summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2014-02-20 17:54:28 +0400
committerJiri Kosina <jkosina@suse.cz>2014-02-20 17:54:28 +0400
commitd4263348f796f29546f90802177865dd4379dd0a (patch)
treeadcbdaebae584eee2f32fab95e826e8e49eef385 /drivers
parentbe873ac782f5ff5ee6675f83929f4fe6737eead2 (diff)
parent6d0abeca3242a88cab8232e4acd7e2bf088f3bc2 (diff)
downloadlinux-d4263348f796f29546f90802177865dd4379dd0a.tar.xz
Merge branch 'master' into for-next
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/ac.c7
-rw-r--r--drivers/acpi/acpi_extlog.c80
-rw-r--r--drivers/acpi/acpi_memhotplug.c27
-rw-r--r--drivers/acpi/acpi_pad.c8
-rw-r--r--drivers/acpi/acpi_processor.c26
-rw-r--r--drivers/acpi/acpica/acdebug.h1
-rw-r--r--drivers/acpi/acpica/acevents.h9
-rw-r--r--drivers/acpi/acpica/acglobal.h32
-rw-r--r--drivers/acpi/acpica/aclocal.h9
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c19
-rw-r--r--drivers/acpi/acpica/dswload.c4
-rw-r--r--drivers/acpi/acpica/evgpeblk.c8
-rw-r--r--drivers/acpi/acpica/evgpeutil.c24
-rw-r--r--drivers/acpi/acpica/exresnte.c3
-rw-r--r--drivers/acpi/acpica/nsxfeval.c23
-rw-r--r--drivers/acpi/acpica/psopinfo.c51
-rw-r--r--drivers/acpi/acpica/tbfadt.c335
-rw-r--r--drivers/acpi/acpica/tbutils.c214
-rw-r--r--drivers/acpi/acpica/utaddress.c19
-rw-r--r--drivers/acpi/acpica/utalloc.c10
-rw-r--r--drivers/acpi/acpica/utcache.c12
-rw-r--r--drivers/acpi/acpica/utdebug.c4
-rw-r--r--drivers/acpi/acpica/utglobal.c4
-rw-r--r--drivers/acpi/acpica/utxfinit.c12
-rw-r--r--drivers/acpi/apei/Kconfig1
-rw-r--r--drivers/acpi/apei/apei-base.c5
-rw-r--r--drivers/acpi/apei/apei-internal.h1
-rw-r--r--drivers/acpi/apei/einj.c59
-rw-r--r--drivers/acpi/apei/erst.c3
-rw-r--r--drivers/acpi/apei/ghes.c40
-rw-r--r--drivers/acpi/battery.c27
-rw-r--r--drivers/acpi/blacklist.c51
-rw-r--r--drivers/acpi/bus.c96
-rw-r--r--drivers/acpi/button.c21
-rw-r--r--drivers/acpi/container.c56
-rw-r--r--drivers/acpi/custom_method.c2
-rw-r--r--drivers/acpi/debugfs.c2
-rw-r--r--drivers/acpi/device_pm.c46
-rw-r--r--drivers/acpi/dock.c21
-rw-r--r--drivers/acpi/ec.c32
-rw-r--r--drivers/acpi/ec_sys.c2
-rw-r--r--drivers/acpi/event.c2
-rw-r--r--drivers/acpi/fan.c3
-rw-r--r--drivers/acpi/glue.c165
-rw-r--r--drivers/acpi/hed.c2
-rw-r--r--drivers/acpi/internal.h19
-rw-r--r--drivers/acpi/numa.c1
-rw-r--r--drivers/acpi/nvs.c3
-rw-r--r--drivers/acpi/osl.c14
-rw-r--r--drivers/acpi/pci_irq.c2
-rw-r--r--drivers/acpi/pci_link.c4
-rw-r--r--drivers/acpi/pci_root.c132
-rw-r--r--drivers/acpi/pci_slot.c1
-rw-r--r--drivers/acpi/power.c3
-rw-r--r--drivers/acpi/proc.c7
-rw-r--r--drivers/acpi/processor_core.c29
-rw-r--r--drivers/acpi/processor_driver.c8
-rw-r--r--drivers/acpi/processor_idle.c50
-rw-r--r--drivers/acpi/processor_perflib.c7
-rw-r--r--drivers/acpi/processor_thermal.c11
-rw-r--r--drivers/acpi/processor_throttling.c7
-rw-r--r--drivers/acpi/sbshc.c3
-rw-r--r--drivers/acpi/scan.c623
-rw-r--r--drivers/acpi/sleep.c14
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/tables.c11
-rw-r--r--drivers/acpi/thermal.c7
-rw-r--r--drivers/acpi/utils.c103
-rw-r--r--drivers/acpi/video.c5
-rw-r--r--drivers/acpi/video_detect.c10
-rw-r--r--drivers/acpi/wakeup.c1
-rw-r--r--drivers/amba/bus.c213
-rw-r--r--drivers/ata/ahci.c137
-rw-r--r--drivers/ata/ahci_imx.c241
-rw-r--r--drivers/ata/ata_generic.c7
-rw-r--r--drivers/ata/libahci.c4
-rw-r--r--drivers/ata/libata-acpi.c28
-rw-r--r--drivers/ata/libata-core.c46
-rw-r--r--drivers/ata/libata-eh.c5
-rw-r--r--drivers/ata/libata-scsi.c39
-rw-r--r--drivers/ata/pata_acpi.c5
-rw-r--r--drivers/ata/pata_samsung_cf.c43
-rw-r--r--drivers/ata/sata_highbank.c1
-rw-r--r--drivers/ata/sata_mv.c54
-rw-r--r--drivers/ata/sata_rcar.c118
-rw-r--r--drivers/ata/sata_sis.c4
-rw-r--r--drivers/atm/he.c1
-rw-r--r--drivers/atm/nicstar.c4
-rw-r--r--drivers/atm/solos-pci.c2
-rw-r--r--drivers/base/Makefile4
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/bus.c13
-rw-r--r--drivers/base/component.c390
-rw-r--r--drivers/base/container.c44
-rw-r--r--drivers/base/core.c7
-rw-r--r--drivers/base/devtmpfs.c2
-rw-r--r--drivers/base/firmware_class.c93
-rw-r--r--drivers/base/init.c1
-rw-r--r--drivers/base/platform.c16
-rw-r--r--drivers/base/power/clock_ops.c30
-rw-r--r--drivers/base/power/generic_ops.c4
-rw-r--r--drivers/base/regmap/regmap-irq.c6
-rw-r--r--drivers/base/regmap/regmap.c72
-rw-r--r--drivers/bcma/Kconfig1
-rw-r--r--drivers/bcma/bcma_private.h2
-rw-r--r--drivers/bcma/driver_chipcommon_sflash.c6
-rw-r--r--drivers/bcma/driver_gpio.c137
-rw-r--r--drivers/bcma/host_pci.c3
-rw-r--r--drivers/bcma/main.c14
-rw-r--r--drivers/block/Kconfig5
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/aoe/aoe.h10
-rw-r--r--drivers/block/aoe/aoecmd.c153
-rw-r--r--drivers/block/brd.c16
-rw-r--r--drivers/block/cciss.c4
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_main.c27
-rw-r--r--drivers/block/drbd/drbd_receiver.c19
-rw-r--r--drivers/block/drbd/drbd_req.c6
-rw-r--r--drivers/block/drbd/drbd_req.h2
-rw-r--r--drivers/block/drbd/drbd_worker.c8
-rw-r--r--drivers/block/floppy.c52
-rw-r--r--drivers/block/loop.c29
-rw-r--r--drivers/block/mg_disk.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c270
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h14
-rw-r--r--drivers/block/nbd.c14
-rw-r--r--drivers/block/null_blk.c212
-rw-r--r--drivers/block/nvme-core.c752
-rw-r--r--drivers/block/nvme-scsi.c147
-rw-r--r--drivers/block/paride/pg.c2
-rw-r--r--drivers/block/pktcdvd.c186
-rw-r--r--drivers/block/ps3disk.c17
-rw-r--r--drivers/block/ps3vram.c12
-rw-r--r--drivers/block/rbd.c394
-rw-r--r--drivers/block/rsxx/dev.c6
-rw-r--r--drivers/block/rsxx/dma.c15
-rw-r--r--drivers/block/skd_main.c4
-rw-r--r--drivers/block/sx8.c16
-rw-r--r--drivers/block/umem.c53
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkback/blkback.c68
-rw-r--r--drivers/block/xen-blkback/common.h5
-rw-r--r--drivers/block/xen-blkback/xenbus.c14
-rw-r--r--drivers/block/xen-blkfront.c17
-rw-r--r--drivers/block/z2ram.c7
-rw-r--r--drivers/block/zram/Kconfig (renamed from drivers/staging/zram/Kconfig)1
-rw-r--r--drivers/block/zram/Makefile (renamed from drivers/staging/zram/Makefile)0
-rw-r--r--drivers/block/zram/zram_drv.c (renamed from drivers/staging/zram/zram_drv.c)161
-rw-r--r--drivers/block/zram/zram_drv.h (renamed from drivers/staging/zram/zram_drv.h)32
-rw-r--r--drivers/bluetooth/ath3k.c6
-rw-r--r--drivers/bluetooth/btmrvl_drv.h25
-rw-r--r--drivers/bluetooth/btmrvl_main.c130
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c9
-rw-r--r--drivers/bluetooth/btmrvl_sdio.h2
-rw-r--r--drivers/bluetooth/btsdio.c6
-rw-r--r--drivers/bluetooth/btusb.c54
-rw-r--r--drivers/bluetooth/hci_vhci.c29
-rw-r--r--drivers/bus/arm-cci.c2
-rw-r--r--drivers/bus/mvebu-mbus.c10
-rw-r--r--drivers/cdrom/gdrom.c4
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/agp/Kconfig5
-rw-r--r--drivers/char/agp/Makefile2
-rw-r--r--drivers/char/agp/agp.h1
-rw-r--r--drivers/char/agp/ali-agp.c4
-rw-r--r--drivers/char/agp/amd-k7-agp.c12
-rw-r--r--drivers/char/agp/amd64-agp.c7
-rw-r--r--drivers/char/agp/ati-agp.c21
-rw-r--r--drivers/char/agp/efficeon-agp.c5
-rw-r--r--drivers/char/agp/generic.c4
-rw-r--r--drivers/char/agp/intel-agp.c53
-rw-r--r--drivers/char/agp/intel-agp.h10
-rw-r--r--drivers/char/agp/intel-gtt.c65
-rw-r--r--drivers/char/agp/nvidia-agp.c9
-rw-r--r--drivers/char/agp/sis-agp.c5
-rw-r--r--drivers/char/agp/via-agp.c13
-rw-r--r--drivers/char/apm-emulation.c11
-rw-r--r--drivers/char/hpet.c7
-rw-r--r--drivers/char/i8k.c360
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c8
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c51
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c2
-rw-r--r--drivers/char/lp.c2
-rw-r--r--drivers/char/mem.c1
-rw-r--r--drivers/char/nwbutton.c5
-rw-r--r--drivers/char/pcmcia/synclink_cs.c4
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/char/tpm/Makefile2
-rw-r--r--drivers/char/tpm/tpm-dev.c213
-rw-r--r--drivers/char/tpm/tpm-interface.c488
-rw-r--r--drivers/char/tpm/tpm-sysfs.c318
-rw-r--r--drivers/char/tpm/tpm.h83
-rw-r--r--drivers/char/tpm/tpm_acpi.c2
-rw-r--r--drivers/char/tpm/tpm_atmel.c28
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c44
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c42
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c43
-rw-r--r--drivers/char/tpm/tpm_i2c_stm_st33.c48
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c41
-rw-r--r--drivers/char/tpm/tpm_infineon.c28
-rw-r--r--drivers/char/tpm/tpm_nsc.c28
-rw-r--r--drivers/char/tpm/tpm_ppi.c407
-rw-r--r--drivers/char/tpm/tpm_tis.c49
-rw-r--r--drivers/char/tpm/xen-tpmfront.c49
-rw-r--r--drivers/char/ttyprintk.c2
-rw-r--r--drivers/char/virtio_console.c9
-rw-r--r--drivers/clk/Kconfig22
-rw-r--r--drivers/clk/Makefile74
-rw-r--r--drivers/clk/at91/Makefile11
-rw-r--r--drivers/clk/at91/clk-main.c187
-rw-r--r--drivers/clk/at91/clk-master.c270
-rw-r--r--drivers/clk/at91/clk-peripheral.c410
-rw-r--r--drivers/clk/at91/clk-pll.c531
-rw-r--r--drivers/clk/at91/clk-plldiv.c135
-rw-r--r--drivers/clk/at91/clk-programmable.c366
-rw-r--r--drivers/clk/at91/clk-smd.c171
-rw-r--r--drivers/clk/at91/clk-system.c135
-rw-r--r--drivers/clk/at91/clk-usb.c398
-rw-r--r--drivers/clk/at91/clk-utmi.c159
-rw-r--r--drivers/clk/at91/pmc.c395
-rw-r--r--drivers/clk/at91/pmc.h114
-rw-r--r--drivers/clk/clk-composite.c28
-rw-r--r--drivers/clk/clk-divider.c2
-rw-r--r--drivers/clk/clk-fixed-rate.c43
-rw-r--r--drivers/clk/clk-max77686.c97
-rw-r--r--drivers/clk/clk-s2mps11.c6
-rw-r--r--drivers/clk/clk-si5351.c30
-rw-r--r--drivers/clk/clk-si5351.h14
-rw-r--r--drivers/clk/clk-si570.c531
-rw-r--r--drivers/clk/clk-vt8500.c2
-rw-r--r--drivers/clk/clk.c382
-rw-r--r--drivers/clk/clk.h16
-rw-r--r--drivers/clk/clkdev.c12
-rw-r--r--drivers/clk/hisilicon/Makefile5
-rw-r--r--drivers/clk/hisilicon/clk-hi3620.c242
-rw-r--r--drivers/clk/hisilicon/clk.c171
-rw-r--r--drivers/clk/hisilicon/clk.h103
-rw-r--r--drivers/clk/hisilicon/clkgate-separated.c130
-rw-r--r--drivers/clk/keystone/gate.c12
-rw-r--r--drivers/clk/keystone/pll.c24
-rw-r--r--drivers/clk/mvebu/Kconfig5
-rw-r--r--drivers/clk/mvebu/Makefile1
-rw-r--r--drivers/clk/mvebu/clk-corediv.c223
-rw-r--r--drivers/clk/mvebu/clk-cpu.c2
-rw-r--r--drivers/clk/qcom/Kconfig47
-rw-r--r--drivers/clk/qcom/Makefile14
-rw-r--r--drivers/clk/qcom/clk-branch.c159
-rw-r--r--drivers/clk/qcom/clk-branch.h56
-rw-r--r--drivers/clk/qcom/clk-pll.c222
-rw-r--r--drivers/clk/qcom/clk-pll.h66
-rw-r--r--drivers/clk/qcom/clk-rcg.c517
-rw-r--r--drivers/clk/qcom/clk-rcg.h159
-rw-r--r--drivers/clk/qcom/clk-rcg2.c291
-rw-r--r--drivers/clk/qcom/clk-regmap.c114
-rw-r--r--drivers/clk/qcom/clk-regmap.h45
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c2819
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c2993
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c2694
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c2321
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c2625
-rw-r--r--drivers/clk/qcom/reset.c63
-rw-r--r--drivers/clk/qcom/reset.h37
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c161
-rw-r--r--drivers/clk/samsung/clk-exynos4.c860
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c697
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c650
-rw-r--r--drivers/clk/samsung/clk-exynos5440.c81
-rw-r--r--drivers/clk/samsung/clk-pll.c2
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c4
-rw-r--r--drivers/clk/shmobile/Makefile7
-rw-r--r--drivers/clk/shmobile/clk-div6.c185
-rw-r--r--drivers/clk/shmobile/clk-emev2.c104
-rw-r--r--drivers/clk/shmobile/clk-mstp.c233
-rw-r--r--drivers/clk/shmobile/clk-rcar-gen2.c298
-rw-r--r--drivers/clk/sirf/Makefile5
-rw-r--r--drivers/clk/sirf/atlas6.h31
-rw-r--r--drivers/clk/sirf/clk-atlas6.c152
-rw-r--r--drivers/clk/sirf/clk-common.c (renamed from drivers/clk/clk-prima2.c)264
-rw-r--r--drivers/clk/sirf/clk-prima2.c151
-rw-r--r--drivers/clk/sirf/prima2.h25
-rw-r--r--drivers/clk/socfpga/clk.c6
-rw-r--r--drivers/clk/spear/clk-frac-synth.c2
-rw-r--r--drivers/clk/sunxi/clk-factors.c67
-rw-r--r--drivers/clk/sunxi/clk-factors.h16
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c493
-rw-r--r--drivers/clk/tegra/Makefile7
-rw-r--r--drivers/clk/tegra/clk-id.h235
-rw-r--r--drivers/clk/tegra/clk-periph-gate.c30
-rw-r--r--drivers/clk/tegra/clk-periph.c74
-rw-r--r--drivers/clk/tegra/clk-pll.c417
-rw-r--r--drivers/clk/tegra/clk-tegra-audio.c215
-rw-r--r--drivers/clk/tegra/clk-tegra-fixed.c111
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c674
-rw-r--r--drivers/clk/tegra/clk-tegra-pmc.c132
-rw-r--r--drivers/clk/tegra/clk-tegra-super-gen4.c149
-rw-r--r--drivers/clk/tegra/clk-tegra114.c1688
-rw-r--r--drivers/clk/tegra/clk-tegra124.c1424
-rw-r--r--drivers/clk/tegra/clk-tegra20.c817
-rw-r--r--drivers/clk/tegra/clk-tegra30.c1504
-rw-r--r--drivers/clk/tegra/clk.c214
-rw-r--r--drivers/clk/tegra/clk.h116
-rw-r--r--drivers/clk/ti/Makefile11
-rw-r--r--drivers/clk/ti/apll.c223
-rw-r--r--drivers/clk/ti/autoidle.c133
-rw-r--r--drivers/clk/ti/clk-33xx.c161
-rw-r--r--drivers/clk/ti/clk-3xxx.c401
-rw-r--r--drivers/clk/ti/clk-43xx.c118
-rw-r--r--drivers/clk/ti/clk-44xx.c316
-rw-r--r--drivers/clk/ti/clk-54xx.c255
-rw-r--r--drivers/clk/ti/clk-7xx.c332
-rw-r--r--drivers/clk/ti/clk.c167
-rw-r--r--drivers/clk/ti/clockdomain.c70
-rw-r--r--drivers/clk/ti/composite.c269
-rw-r--r--drivers/clk/ti/divider.c487
-rw-r--r--drivers/clk/ti/dpll.c558
-rw-r--r--drivers/clk/ti/fixed-factor.c66
-rw-r--r--drivers/clk/ti/gate.c249
-rw-r--r--drivers/clk/ti/interface.c125
-rw-r--r--drivers/clk/ti/mux.c246
-rw-r--r--drivers/clk/ux500/clk-prcmu.c2
-rw-r--r--drivers/clk/versatile/clk-icst.c3
-rw-r--r--drivers/clk/versatile/clk-icst.h1
-rw-r--r--drivers/clk/versatile/clk-impd1.c88
-rw-r--r--drivers/clk/versatile/clk-integrator.c2
-rw-r--r--drivers/clk/versatile/clk-realview.c6
-rw-r--r--drivers/clk/versatile/clk-sp810.c2
-rw-r--r--drivers/clk/zynq/clkc.c18
-rw-r--r--drivers/clocksource/Kconfig5
-rw-r--r--drivers/clocksource/Makefile3
-rw-r--r--drivers/clocksource/arm_global_timer.c4
-rw-r--r--drivers/clocksource/bcm_kona_timer.c54
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c26
-rw-r--r--drivers/clocksource/clksrc-of.c5
-rw-r--r--drivers/clocksource/cs5535-clockevt.c2
-rw-r--r--drivers/clocksource/dw_apb_timer.c3
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c7
-rw-r--r--drivers/clocksource/exynos_mct.c4
-rw-r--r--drivers/clocksource/nomadik-mtu.c25
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c2
-rw-r--r--drivers/clocksource/sh_cmt.c23
-rw-r--r--drivers/clocksource/sh_mtu2.c4
-rw-r--r--drivers/clocksource/sh_tmu.c4
-rw-r--r--drivers/clocksource/sun4i_timer.c14
-rw-r--r--drivers/clocksource/tegra20_timer.c2
-rw-r--r--drivers/clocksource/time-armada-370-xp.c28
-rw-r--r--drivers/clocksource/time-orion.c4
-rw-r--r--drivers/clocksource/timer-sun5i.c192
-rw-r--r--drivers/clocksource/vt8500_timer.c2
-rw-r--r--drivers/cpufreq/Kconfig7
-rw-r--r--drivers/cpufreq/Kconfig.arm27
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c93
-rw-r--r--drivers/cpufreq/arm_big_little.c3
-rw-r--r--drivers/cpufreq/at32ap-cpufreq.c17
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c26
-rw-r--r--drivers/cpufreq/cpufreq.c322
-rw-r--r--drivers/cpufreq/cpufreq_governor.c6
-rw-r--r--drivers/cpufreq/cpufreq_governor.h2
-rw-r--r--drivers/cpufreq/cpufreq_stats.c109
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c16
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c22
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c28
-rw-r--r--drivers/cpufreq/exynos-cpufreq.h22
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c2
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c4
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c75
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c36
-rw-r--r--drivers/cpufreq/freq_table.c78
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c134
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c109
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c1
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c15
-rw-r--r--drivers/cpufreq/omap-cpufreq.c34
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c18
-rw-r--r--drivers/cpufreq/powernow-k6.c147
-rw-r--r--drivers/cpufreq/powernow-k8.c7
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c17
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c1
-rw-r--r--drivers/cpufreq/pxa3xx-cpufreq.c1
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c2
-rw-r--r--drivers/cpufreq/s3c2440-cpufreq.c6
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c14
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c35
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c23
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c2
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c2
-rw-r--r--drivers/cpufreq/spear-cpufreq.c12
-rw-r--r--drivers/cpufreq/speedstep-smi.c32
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c49
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c33
-rw-r--r--drivers/cpuidle/Kconfig5
-rw-r--r--drivers/cpuidle/Kconfig.powerpc20
-rw-r--r--drivers/cpuidle/Makefile5
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c2
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c169
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c267
-rw-r--r--drivers/crypto/Kconfig39
-rw-r--r--drivers/crypto/Makefile33
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c1
-rw-r--r--drivers/crypto/atmel-aes.c143
-rw-r--r--drivers/crypto/atmel-sha.c103
-rw-r--r--drivers/crypto/atmel-tdes.c143
-rw-r--r--drivers/crypto/caam/caamalg.c36
-rw-r--r--drivers/crypto/ccp/Kconfig24
-rw-r--r--drivers/crypto/ccp/Makefile10
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c365
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c279
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c369
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c432
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c517
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h197
-rw-r--r--drivers/crypto/ccp/ccp-dev.c595
-rw-r--r--drivers/crypto/ccp/ccp-dev.h272
-rw-r--r--drivers/crypto/ccp/ccp-ops.c2024
-rw-r--r--drivers/crypto/ccp/ccp-pci.c361
-rw-r--r--drivers/crypto/dcp.c903
-rw-r--r--drivers/crypto/ixp4xx_crypto.c4
-rw-r--r--drivers/crypto/mxs-dcp.c1100
-rw-r--r--drivers/crypto/nx/nx-842.c29
-rw-r--r--drivers/crypto/omap-aes.c16
-rw-r--r--drivers/crypto/omap-sham.c19
-rw-r--r--drivers/crypto/talitos.c23
-rw-r--r--drivers/devfreq/Kconfig2
-rw-r--r--drivers/devfreq/exynos/exynos4_bus.c4
-rw-r--r--drivers/devfreq/exynos/exynos4_bus.h110
-rw-r--r--drivers/dma/Kconfig24
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/acpi-dma.c36
-rw-r--r--drivers/dma/amba-pl08x.c4
-rw-r--r--drivers/dma/at_hdmac_regs.h4
-rw-r--r--drivers/dma/bcm2835-dma.c707
-rw-r--r--drivers/dma/cppi41.c4
-rw-r--r--drivers/dma/dmaengine.c67
-rw-r--r--drivers/dma/dmatest.c12
-rw-r--r--drivers/dma/dw/core.c35
-rw-r--r--drivers/dma/edma.c6
-rw-r--r--drivers/dma/fsldma.c31
-rw-r--r--drivers/dma/fsldma.h2
-rw-r--r--drivers/dma/imx-sdma.c23
-rw-r--r--drivers/dma/ioat/dma.c11
-rw-r--r--drivers/dma/k3dma.c4
-rw-r--r--drivers/dma/mmp_pdma.c221
-rw-r--r--drivers/dma/mmp_tdma.c28
-rw-r--r--drivers/dma/moxart-dma.c699
-rw-r--r--drivers/dma/mv_xor.c125
-rw-r--r--drivers/dma/of-dma.c15
-rw-r--r--drivers/dma/omap-dma.c4
-rw-r--r--drivers/dma/pl330.c71
-rw-r--r--drivers/dma/ppc4xx/adma.c28
-rw-r--r--drivers/dma/sirf-dma.c20
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/dma/tegra20-apb-dma.c114
-rw-r--r--drivers/dma/txx9dmac.c1
-rw-r--r--drivers/dma/virt-dma.h4
-rw-r--r--drivers/edac/amd64_edac.c135
-rw-r--r--drivers/edac/amd76x_edac.c2
-rw-r--r--drivers/edac/e752x_edac.c6
-rw-r--r--drivers/edac/e7xxx_edac.c2
-rw-r--r--drivers/edac/edac_device.c3
-rw-r--r--drivers/edac/edac_mc.c13
-rw-r--r--drivers/edac/edac_mc_sysfs.c12
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/edac/edac_stub.c19
-rw-r--r--drivers/edac/i3000_edac.c2
-rw-r--r--drivers/edac/i3200_edac.c2
-rw-r--r--drivers/edac/i5000_edac.c2
-rw-r--r--drivers/edac/i5100_edac.c2
-rw-r--r--drivers/edac/i5400_edac.c2
-rw-r--r--drivers/edac/i7300_edac.c2
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/i82443bxgx_edac.c2
-rw-r--r--drivers/edac/i82860_edac.c2
-rw-r--r--drivers/edac/i82875p_edac.c2
-rw-r--r--drivers/edac/i82975x_edac.c2
-rw-r--r--drivers/edac/mpc85xx_edac.c98
-rw-r--r--drivers/edac/mpc85xx_edac.h7
-rw-r--r--drivers/edac/r82600_edac.c2
-rw-r--r--drivers/edac/sb_edac.c10
-rw-r--r--drivers/edac/x38_edac.c2
-rw-r--r--drivers/eisa/eisa-bus.c31
-rw-r--r--drivers/extcon/Kconfig10
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-arizona.c73
-rw-r--r--drivers/extcon/extcon-gpio.c32
-rw-r--r--drivers/extcon/extcon-max14577.c752
-rw-r--r--drivers/extcon/extcon-palmas.c17
-rw-r--r--drivers/firewire/core-transaction.c6
-rw-r--r--drivers/firewire/core.h3
-rw-r--r--drivers/firewire/ohci.c27
-rw-r--r--drivers/firewire/sbp2.c1
-rw-r--r--drivers/firmware/Kconfig5
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/dmi-sysfs.c3
-rw-r--r--drivers/firmware/dmi_scan.c20
-rw-r--r--drivers/firmware/efi/Kconfig15
-rw-r--r--drivers/firmware/efi/Makefile3
-rw-r--r--drivers/firmware/efi/efi-pstore.c1
-rw-r--r--drivers/firmware/efi/efi.c45
-rw-r--r--drivers/firmware/efi/runtime-map.c181
-rw-r--r--drivers/firmware/google/Kconfig3
-rw-r--r--drivers/firmware/memmap.c2
-rw-r--r--drivers/gpio/Kconfig53
-rw-r--r--drivers/gpio/Makefile6
-rw-r--r--drivers/gpio/gpio-74x164.c59
-rw-r--r--drivers/gpio/gpio-adnp.c25
-rw-r--r--drivers/gpio/gpio-adp5520.c2
-rw-r--r--drivers/gpio/gpio-adp5588.c2
-rw-r--r--drivers/gpio/gpio-amd8111.c2
-rw-r--r--drivers/gpio/gpio-arizona.c2
-rw-r--r--drivers/gpio/gpio-bcm-kona.c26
-rw-r--r--drivers/gpio/gpio-bt8xx.c4
-rw-r--r--drivers/gpio/gpio-clps711x.c5
-rw-r--r--drivers/gpio/gpio-da9052.c2
-rw-r--r--drivers/gpio/gpio-da9055.c2
-rw-r--r--drivers/gpio/gpio-davinci.c185
-rw-r--r--drivers/gpio/gpio-em.c28
-rw-r--r--drivers/gpio/gpio-f7188x.c1
-rw-r--r--drivers/gpio/gpio-ich.c2
-rw-r--r--drivers/gpio/gpio-intel-mid.c34
-rw-r--r--drivers/gpio/gpio-kempld.c2
-rw-r--r--drivers/gpio/gpio-ks8695.c2
-rw-r--r--drivers/gpio/gpio-lp3943.c242
-rw-r--r--drivers/gpio/gpio-lpc32xx.c14
-rw-r--r--drivers/gpio/gpio-lynxpoint.c39
-rw-r--r--drivers/gpio/gpio-max730x.c5
-rw-r--r--drivers/gpio/gpio-max732x.c2
-rw-r--r--drivers/gpio/gpio-mc33880.c2
-rw-r--r--drivers/gpio/gpio-mc9s08dz60.c2
-rw-r--r--drivers/gpio/gpio-mcp23s08.c252
-rw-r--r--drivers/gpio/gpio-ml-ioh.c4
-rw-r--r--drivers/gpio/gpio-moxart.c156
-rw-r--r--drivers/gpio/gpio-msic.c9
-rw-r--r--drivers/gpio/gpio-msm-v2.c7
-rw-r--r--drivers/gpio/gpio-mvebu.c4
-rw-r--r--drivers/gpio/gpio-mxc.c2
-rw-r--r--drivers/gpio/gpio-octeon.c2
-rw-r--r--drivers/gpio/gpio-omap.c188
-rw-r--r--drivers/gpio/gpio-palmas.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-pcf857x.c2
-rw-r--r--drivers/gpio/gpio-pch.c4
-rw-r--r--drivers/gpio/gpio-pxa.c3
-rw-r--r--drivers/gpio/gpio-rc5t583.c2
-rw-r--r--drivers/gpio/gpio-rcar.c64
-rw-r--r--drivers/gpio/gpio-samsung.c11
-rw-r--r--drivers/gpio/gpio-sch311x.c432
-rw-r--r--drivers/gpio/gpio-sodaville.c10
-rw-r--r--drivers/gpio/gpio-sta2x11.c2
-rw-r--r--drivers/gpio/gpio-stmpe.c2
-rw-r--r--drivers/gpio/gpio-sx150x.c2
-rw-r--r--drivers/gpio/gpio-tb10x.c4
-rw-r--r--drivers/gpio/gpio-tc3589x.c2
-rw-r--r--drivers/gpio/gpio-tegra.c7
-rw-r--r--drivers/gpio/gpio-timberdale.c6
-rw-r--r--drivers/gpio/gpio-tnetv107x.c2
-rw-r--r--drivers/gpio/gpio-tps6586x.c2
-rw-r--r--drivers/gpio/gpio-tps65910.c2
-rw-r--r--drivers/gpio/gpio-tps65912.c2
-rw-r--r--drivers/gpio/gpio-twl4030.c17
-rw-r--r--drivers/gpio/gpio-twl6040.c2
-rw-r--r--drivers/gpio/gpio-ucb1400.c2
-rw-r--r--drivers/gpio/gpio-viperboard.c4
-rw-r--r--drivers/gpio/gpio-vx855.c2
-rw-r--r--drivers/gpio/gpio-wm831x.c2
-rw-r--r--drivers/gpio/gpio-wm8350.c2
-rw-r--r--drivers/gpio/gpio-wm8994.c2
-rw-r--r--drivers/gpio/gpio-xtensa.c179
-rw-r--r--drivers/gpio/gpiolib-acpi.c23
-rw-r--r--drivers/gpio/gpiolib.c255
-rw-r--r--drivers/gpio/gpiolib.h46
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/armada/Kconfig1
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c10
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c20
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c7
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c2
-rw-r--r--drivers/gpu/drm/ast/ast_main.c49
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c20
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c6
-rw-r--r--drivers/gpu/drm/bochs/Kconfig11
-rw-r--r--drivers/gpu/drm/bochs/Makefile4
-rw-r--r--drivers/gpu/drm/bochs/bochs.h164
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c178
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c215
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c177
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c294
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c546
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c12
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c26
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c35
-rw-r--r--drivers/gpu/drm/drm_buffer.c2
-rw-r--r--drivers/gpu/drm/drm_bufs.c10
-rw-r--r--drivers/gpu/drm/drm_crtc.c25
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c31
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c295
-rw-r--r--drivers/gpu/drm/drm_edid_load.c4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/drm_fops.c11
-rw-r--r--drivers/gpu/drm/drm_gem.c45
-rw-r--r--drivers/gpu/drm/drm_info.c16
-rw-r--r--drivers/gpu/drm/drm_irq.c119
-rw-r--r--drivers/gpu/drm/drm_memory.c15
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c315
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/drm_panel.c100
-rw-r--r--drivers/gpu/drm/drm_pci.c38
-rw-r--r--drivers/gpu/drm/drm_platform.c12
-rw-r--r--drivers/gpu/drm/drm_stub.c37
-rw-r--r--drivers/gpu/drm/drm_usb.c2
-rw-r--r--drivers/gpu/drm/drm_vm.c10
-rw-r--r--drivers/gpu/drm/exynos/Kconfig4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c74
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c66
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c6
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c2
-rw-r--r--drivers/gpu/drm/gma500/backlight.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c39
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c12
-rw-r--r--drivers/gpu/drm/gma500/opregion.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h8
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h5
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c19
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c17
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c4
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig32
-rw-r--r--drivers/gpu/drm/i915/Makefile6
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c73
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c288
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c73
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c107
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h184
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c153
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c61
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c29
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c97
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c71
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c39
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c166
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h335
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c49
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c49
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c27
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c146
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c37
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h64
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c156
-rw-r--r--drivers/gpu/drm/i915/intel_display.c623
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c174
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h48
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c193
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h21
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c119
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c5
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c65
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c87
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c22
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c90
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c57
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c946
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1670
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c55
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c62
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h40
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c43
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c86
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c320
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c4
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h4
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c1
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c8
-rw-r--r--drivers/gpu/drm/mga/mga_state.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c6
-rw-r--r--drivers/gpu/drm/msm/Kconfig4
-rw-r--r--drivers/gpu/drm/msm/Makefile21
-rw-r--r--drivers/gpu/drm/msm/NOTES20
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h125
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h116
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c190
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h171
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c25
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h9
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h30
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h8
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h8
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c201
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h38
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h83
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c71
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c139
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c157
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h (renamed from drivers/gpu/drm/msm/mdp4/mdp4.xml.h)88
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c (renamed from drivers/gpu/drm/msm/mdp4/mdp4_crtc.c)201
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c (renamed from drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c)6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c93
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c (renamed from drivers/gpu/drm/msm/mdp4/mdp4_kms.c)61
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h (renamed from drivers/gpu/drm/msm/mdp4/mdp4_kms.h)56
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c (renamed from drivers/gpu/drm/msm/mdp4/mdp4_plane.c)10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h1036
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c574
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c258
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c111
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c350
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h213
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c389
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c173
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h41
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h78
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_format.c (renamed from drivers/gpu/drm/msm/mdp4/mdp4_format.c)13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.c144
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h97
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_irq.c203
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c132
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h64
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c1
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c172
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h5
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c42
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h6
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c148
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h68
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h47
-rw-r--r--drivers/gpu/drm/nouveau/Makefile7
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engine.c23
-rw-r--r--drivers/gpu/drm/nouveau/core/core/subdev.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nve0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv04.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv10.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv20.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv30.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/vga.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/falcon.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c414
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c1408
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc138
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc137
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc542
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h473
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h704
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h766
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h766
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h766
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc382
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc540
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h916
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h1238
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h1238
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h1202
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h1202
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc141
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv108.c236
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c166
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h22
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bar.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h66
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h26
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h12
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/devinit.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h18
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/instmem.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vm.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/priv.h26
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c67
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c107
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/timing.c70
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/base.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/base.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c36
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c41
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c41
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c41
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h18
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c44
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c72
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c18
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c24
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c614
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/base.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c140
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c44
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c117
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc393
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc32
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc53
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h467
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h484
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h484
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h533
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c22
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c183
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c160
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c197
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c133
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c11
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c86
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c26
-rw-r--r--drivers/gpu/drm/panel/Kconfig19
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c548
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c2
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c4
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h4
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c1
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c2
-rw-r--r--drivers/gpu/drm/r128/r128_state.c66
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c139
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c54
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c2
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c52
-rw-r--r--drivers/gpu/drm/radeon/btcd.h4
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c70
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c1
-rw-r--r--drivers/gpu/drm/radeon/cik.c381
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c122
-rw-r--r--drivers/gpu/drm/radeon/cikd.h11
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c15
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c99
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h4
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c48
-rw-r--r--drivers/gpu/drm/radeon/ni.c161
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c77
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c29
-rw-r--r--drivers/gpu/drm/radeon/nid.h1
-rw-r--r--drivers/gpu/drm/radeon/pptable.h4
-rw-r--r--drivers/gpu/drm/radeon/r100.c38
-rw-r--r--drivers/gpu/drm/radeon/r300.c7
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c8
-rw-r--r--drivers/gpu/drm/radeon/r420.c7
-rw-r--r--drivers/gpu/drm/radeon/r520.c5
-rw-r--r--drivers/gpu/drm/radeon/r600.c170
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c25
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c19
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c20
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c2
-rw-r--r--drivers/gpu/drm/radeon/r600d.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon.h23
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c91
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h66
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_mem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h36
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c158
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c94
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h21
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c197
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c5
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/gpu/drm/radeon/rs400.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c7
-rw-r--r--drivers/gpu/drm/radeon/rs690.c17
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c8
-rw-r--r--drivers/gpu/drm/radeon/rv515.c7
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c10
-rw-r--r--drivers/gpu/drm/radeon/rv770.c58
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c46
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.h4
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h7
-rw-r--r--drivers/gpu/drm/radeon/si.c179
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c72
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c1
-rw-r--r--drivers/gpu/drm/radeon/sid.h9
-rw-r--r--drivers/gpu/drm/radeon/sislands_smc.h2
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c23
-rw-r--r--drivers/gpu/drm/radeon/sumo_smc.c1
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c24
-rw-r--r--drivers/gpu/drm/radeon/trinity_smc.c3
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c28
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h14
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c28
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c21
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c10
-rw-r--r--drivers/gpu/drm/savage/savage_state.c8
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c18
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c4
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c2
-rw-r--r--drivers/gpu/drm/tegra/Kconfig25
-rw-r--r--drivers/gpu/drm/tegra/Makefile2
-rw-r--r--drivers/gpu/drm/tegra/bus.c1
-rw-r--r--drivers/gpu/drm/tegra/dc.c117
-rw-r--r--drivers/gpu/drm/tegra/dc.h9
-rw-r--r--drivers/gpu/drm/tegra/drm.c21
-rw-r--r--drivers/gpu/drm/tegra/drm.h15
-rw-r--r--drivers/gpu/drm/tegra/dsi.c971
-rw-r--r--drivers/gpu/drm/tegra/dsi.h134
-rw-r--r--drivers/gpu/drm/tegra/fb.c45
-rw-r--r--drivers/gpu/drm/tegra/gem.c183
-rw-r--r--drivers/gpu/drm/tegra/gem.h9
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c22
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c78
-rw-r--r--drivers/gpu/drm/tegra/mipi-phy.c138
-rw-r--r--drivers/gpu/drm/tegra/mipi-phy.h65
-rw-r--r--drivers/gpu/drm/tegra/output.c51
-rw-r--r--drivers/gpu/drm/tegra/rgb.c47
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c38
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c90
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c30
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c12
-rw-r--r--drivers/gpu/drm/via/via_dma.c12
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c20
-rw-r--r--drivers/gpu/drm/via/via_drv.c2
-rw-r--r--drivers/gpu/drm/via/via_drv.h2
-rw-r--r--drivers/gpu/drm/via/via_irq.c10
-rw-r--r--drivers/gpu/drm/via/via_video.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h742
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c182
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c633
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c218
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h248
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1172
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c107
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c160
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c105
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c653
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c206
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c810
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c475
-rw-r--r--drivers/gpu/host1x/Kconfig2
-rw-r--r--drivers/gpu/host1x/Makefile4
-rw-r--r--drivers/gpu/host1x/bus.c2
-rw-r--r--drivers/gpu/host1x/channel.c5
-rw-r--r--drivers/gpu/host1x/debug.c13
-rw-r--r--drivers/gpu/host1x/dev.c28
-rw-r--r--drivers/gpu/host1x/dev.h2
-rw-r--r--drivers/gpu/host1x/hw/host1x02.c4
-rw-r--r--drivers/gpu/host1x/hw/host1x02_hardware.h142
-rw-r--r--drivers/gpu/host1x/hw/host1x04.c42
-rw-r--r--drivers/gpu/host1x/hw/host1x04.h26
-rw-r--r--drivers/gpu/host1x/hw/host1x04_hardware.h142
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x02_uclass.h6
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x04_channel.h121
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x04_sync.h243
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x04_uclass.h181
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c1
-rw-r--r--drivers/gpu/host1x/job.c6
-rw-r--r--drivers/gpu/host1x/mipi.c275
-rw-r--r--drivers/gpu/host1x/syncpt.c9
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-debug.c5
-rw-r--r--drivers/hid/hid-holtek-mouse.c3
-rw-r--r--drivers/hid/hid-ids.h17
-rw-r--r--drivers/hid/hid-input.c2
-rw-r--r--drivers/hid/hid-lg.c2
-rw-r--r--drivers/hid/hid-logitech-dj.c8
-rw-r--r--drivers/hid/hid-microsoft.c1
-rw-r--r--drivers/hid/hid-multitouch.c7
-rw-r--r--drivers/hid/hid-sensor-hub.c45
-rw-r--r--drivers/hid/hid-sony.c594
-rw-r--r--drivers/hid/hidraw.c27
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c28
-rw-r--r--drivers/hid/usbhid/hid-quirks.c9
-rw-r--r--drivers/hid/usbhid/usbkbd.c2
-rw-r--r--drivers/hv/channel.c14
-rw-r--r--drivers/hv/connection.c13
-rw-r--r--drivers/hv/hv.c2
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/Kconfig6
-rw-r--r--drivers/hwmon/acpi_power_meter.c3
-rw-r--r--drivers/hwmon/adm1025.c4
-rw-r--r--drivers/hwmon/adm1029.c2
-rw-r--r--drivers/hwmon/adm1031.c2
-rw-r--r--drivers/hwmon/adt7475.c2
-rw-r--r--drivers/hwmon/asus_atk0110.c6
-rw-r--r--drivers/hwmon/coretemp.c60
-rw-r--r--drivers/hwmon/da9052-hwmon.c4
-rw-r--r--drivers/hwmon/da9055-hwmon.c4
-rw-r--r--drivers/hwmon/ds1621.c2
-rw-r--r--drivers/hwmon/emc6w201.c4
-rw-r--r--drivers/hwmon/f71805f.c2
-rw-r--r--drivers/hwmon/fam15h_power.c2
-rw-r--r--drivers/hwmon/gl518sm.c2
-rw-r--r--drivers/hwmon/it87.c108
-rw-r--r--drivers/hwmon/k10temp.c3
-rw-r--r--drivers/hwmon/k8temp.c2
-rw-r--r--drivers/hwmon/lm63.c4
-rw-r--r--drivers/hwmon/lm75.c35
-rw-r--r--drivers/hwmon/lm78.c4
-rw-r--r--drivers/hwmon/lm83.c4
-rw-r--r--drivers/hwmon/lm85.c2
-rw-r--r--drivers/hwmon/lm87.c4
-rw-r--r--drivers/hwmon/lm90.c4
-rw-r--r--drivers/hwmon/lm92.c4
-rw-r--r--drivers/hwmon/lm93.c2
-rw-r--r--drivers/hwmon/max1619.c5
-rw-r--r--drivers/hwmon/max6642.c2
-rw-r--r--drivers/hwmon/nct6775.c40
-rw-r--r--drivers/hwmon/ntc_thermistor.c6
-rw-r--r--drivers/hwmon/pc87360.c4
-rw-r--r--drivers/hwmon/pc87427.c4
-rw-r--r--drivers/hwmon/pcf8591.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c68
-rw-r--r--drivers/hwmon/sis5595.c4
-rw-r--r--drivers/hwmon/smsc47b397.c2
-rw-r--r--drivers/hwmon/smsc47m1.c2
-rw-r--r--drivers/hwmon/tmp102.c19
-rw-r--r--drivers/hwmon/via686a.c2
-rw-r--r--drivers/hwmon/vt8231.c2
-rw-r--r--drivers/hwmon/w83627ehf.c4
-rw-r--r--drivers/hwmon/w83627hf.c2
-rw-r--r--drivers/hwmon/w83781d.c2
-rw-r--r--drivers/hwmon/w83795.c4
-rw-r--r--drivers/hwmon/w83l785ts.c6
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c3
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c1
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.c1
-rw-r--r--drivers/i2c/busses/Kconfig26
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-acorn.c2
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c1
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c1
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c1
-rw-r--r--drivers/i2c/busses/i2c-amd756-s4882.c4
-rw-r--r--drivers/i2c/busses/i2c-amd756.c1
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c1
-rw-r--r--drivers/i2c/busses/i2c-at91.c3
-rw-r--r--drivers/i2c/busses/i2c-au1550.c1
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c1
-rw-r--r--drivers/i2c/busses/i2c-cpm.c1
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c1
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c1
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c1
-rw-r--r--drivers/i2c/busses/i2c-highlander.c1
-rw-r--r--drivers/i2c/busses/i2c-hydra.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c5
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c1
-rw-r--r--drivers/i2c/busses/i2c-imx.c2
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c1
-rw-r--r--drivers/i2c/busses/i2c-isch.c8
-rw-r--r--drivers/i2c/busses/i2c-ismt.c38
-rw-r--r--drivers/i2c/busses/i2c-mpc.c1
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c38
-rw-r--r--drivers/i2c/busses/i2c-nforce2-s4985.c4
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c1
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c64
-rw-r--r--drivers/i2c/busses/i2c-ocores.c1
-rw-r--r--drivers/i2c/busses/i2c-octeon.c1
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c4
-rw-r--r--drivers/i2c/busses/i2c-parport.c4
-rw-r--r--drivers/i2c/busses/i2c-parport.h2
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c1
-rw-r--r--drivers/i2c/busses/i2c-piix4.c44
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c1
-rw-r--r--drivers/i2c/busses/i2c-pnx.c64
-rw-r--r--drivers/i2c/busses/i2c-powermac.c1
-rw-r--r--drivers/i2c/busses/i2c-puv3.c1
-rw-r--r--drivers/i2c/busses/i2c-rcar.c22
-rw-r--r--drivers/i2c/busses/i2c-riic.c427
-rw-r--r--drivers/i2c/busses/i2c-robotfuzz-osif.c202
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c76
-rw-r--r--drivers/i2c/busses/i2c-scmi.c1
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c1
-rw-r--r--drivers/i2c/busses/i2c-simtec.c1
-rw-r--r--drivers/i2c/busses/i2c-sis630.c1
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c1
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c4
-rw-r--r--drivers/i2c/busses/i2c-tegra.c13
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c1
-rw-r--r--drivers/i2c/busses/i2c-via.c1
-rw-r--r--drivers/i2c/busses/i2c-viapro.c4
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c16
-rw-r--r--drivers/i2c/busses/i2c-xiic.c90
-rw-r--r--drivers/i2c/busses/i2c-xlr.c1
-rw-r--r--drivers/i2c/busses/scx200_i2c.c1
-rw-r--r--drivers/i2c/i2c-core.c26
-rw-r--r--drivers/i2c/i2c-smbus.c4
-rw-r--r--drivers/i2c/i2c-stub.c2
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c1
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c1
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c1
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c45
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c1
-rw-r--r--drivers/ide/buddha.c2
-rw-r--r--drivers/ide/ide-acpi.c12
-rw-r--r--drivers/ide/ide-cd_verbose.c2
-rw-r--r--drivers/ide/ide-pio-blacklist.c1
-rw-r--r--drivers/idle/intel_idle.c54
-rw-r--r--drivers/iio/Kconfig2
-rw-r--r--drivers/iio/Makefile2
-rw-r--r--drivers/iio/accel/bma180.c23
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c12
-rw-r--r--drivers/iio/adc/ad7266.c21
-rw-r--r--drivers/iio/adc/ad7887.c16
-rw-r--r--drivers/iio/adc/max1363.c10
-rw-r--r--drivers/iio/adc/mcp3422.c9
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c68
-rw-r--r--drivers/iio/adc/viperboard_adc.c20
-rw-r--r--drivers/iio/dac/ad5064.c7
-rw-r--r--drivers/iio/dac/ad5360.c7
-rw-r--r--drivers/iio/dac/ad5380.c7
-rw-r--r--drivers/iio/dac/ad5421.c26
-rw-r--r--drivers/iio/dac/ad5446.c9
-rw-r--r--drivers/iio/dac/ad5449.c7
-rw-r--r--drivers/iio/dac/ad5504.c46
-rw-r--r--drivers/iio/dac/ad5624r_spi.c7
-rw-r--r--drivers/iio/dac/ad5686.c11
-rw-r--r--drivers/iio/dac/ad5755.c21
-rw-r--r--drivers/iio/dac/ad5764.c7
-rw-r--r--drivers/iio/dac/ad5791.c55
-rw-r--r--drivers/iio/dac/max517.c1
-rw-r--r--drivers/iio/dac/mcp4725.c1
-rw-r--r--drivers/iio/gyro/adis16130.c9
-rw-r--r--drivers/iio/gyro/adxrs450.c14
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c11
-rw-r--r--drivers/iio/humidity/Kconfig15
-rw-r--r--drivers/iio/humidity/Makefile5
-rw-r--r--drivers/iio/humidity/dht11.c294
-rw-r--r--drivers/iio/imu/adis16400.h1
-rw-r--r--drivers/iio/imu/adis16400_core.c17
-rw-r--r--drivers/iio/industrialio-buffer.c33
-rw-r--r--drivers/iio/industrialio-core.c102
-rw-r--r--drivers/iio/industrialio-event.c160
-rw-r--r--drivers/iio/industrialio-trigger.c40
-rw-r--r--drivers/iio/kfifo_buf.c23
-rw-r--r--drivers/iio/light/Kconfig11
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/adjd_s311.c7
-rw-r--r--drivers/iio/light/apds9300.c8
-rw-r--r--drivers/iio/light/cm32181.c379
-rw-r--r--drivers/iio/light/cm36651.c37
-rw-r--r--drivers/iio/light/gp2ap020a00f.c8
-rw-r--r--drivers/iio/light/hid-sensor-als.c11
-rw-r--r--drivers/iio/light/tcs3472.c7
-rw-r--r--drivers/iio/light/tsl2563.c28
-rw-r--r--drivers/iio/light/vcnl4000.c11
-rw-r--r--drivers/iio/magnetometer/ak8975.c16
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c12
-rw-r--r--drivers/iio/magnetometer/mag3110.c14
-rw-r--r--drivers/iio/orientation/Kconfig19
-rw-r--r--drivers/iio/orientation/Makefile6
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c428
-rw-r--r--drivers/iio/pressure/Kconfig12
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/mpl3115.c329
-rw-r--r--drivers/infiniband/Kconfig6
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/Makefile5
-rw-r--r--drivers/infiniband/core/addr.c97
-rw-r--r--drivers/infiniband/core/cm.c52
-rw-r--r--drivers/infiniband/core/cma.c83
-rw-r--r--drivers/infiniband/core/core_priv.h2
-rw-r--r--drivers/infiniband/core/iwcm.c14
-rw-r--r--drivers/infiniband/core/sa_query.c12
-rw-r--r--drivers/infiniband/core/sysfs.c1
-rw-r--r--drivers/infiniband/core/ucma.c18
-rw-r--r--drivers/infiniband/core/uverbs.h10
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c21
-rw-r--r--drivers/infiniband/core/uverbs_main.c27
-rw-r--r--drivers/infiniband/core/verbs.c101
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_intr.c3
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c79
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c2
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig2
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c40
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c9
-rw-r--r--drivers/infiniband/hw/mlx4/main.c802
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h18
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c157
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c8
-rw-r--r--drivers/infiniband/hw/mlx5/Kconfig2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c310
-rw-r--r--drivers/infiniband/hw/mlx5/main.c35
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h4
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c1
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c151
-rw-r--r--drivers/infiniband/hw/mlx5/user.h10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c3
-rw-r--r--drivers/infiniband/hw/nes/nes.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/Kconfig2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h12
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c21
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c140
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c9
-rw-r--r--drivers/infiniband/hw/usnic/Kconfig10
-rw-r--r--drivers/infiniband/hw/usnic/Makefile15
-rw-r--r--drivers/infiniband/hw/usnic/usnic.h29
-rw-r--r--drivers/infiniband/hw/usnic/usnic_abi.h73
-rw-r--r--drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h27
-rw-r--r--drivers/infiniband/hw/usnic/usnic_common_util.h68
-rw-r--r--drivers/infiniband/hw/usnic/usnic_debugfs.c154
-rw-r--r--drivers/infiniband/hw/usnic/usnic_debugfs.h29
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.c350
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.h113
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib.h118
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c682
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c761
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h117
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c341
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.h29
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c765
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h72
-rw-r--r--drivers/infiniband/hw/usnic/usnic_log.h58
-rw-r--r--drivers/infiniband/hw/usnic/usnic_transport.c202
-rw-r--r--drivers/infiniband/hw/usnic/usnic_transport.h51
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c604
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.h80
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c236
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h73
-rw-r--r--drivers/infiniband/hw/usnic/usnic_vnic.c467
-rw-r--r--drivers/infiniband/hw/usnic/usnic_vnic.h103
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c3
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c10
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c245
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h10
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c14
-rw-r--r--drivers/input/gameport/emu10k1-gp.c1
-rw-r--r--drivers/input/gameport/fm801-gp.c1
-rw-r--r--drivers/input/input.c80
-rw-r--r--drivers/input/joystick/a3d.c1
-rw-r--r--drivers/input/joystick/adi.c1
-rw-r--r--drivers/input/joystick/cobra.c1
-rw-r--r--drivers/input/joystick/gf2k.c1
-rw-r--r--drivers/input/joystick/grip.c1
-rw-r--r--drivers/input/joystick/grip_mp.c1
-rw-r--r--drivers/input/joystick/guillemot.c1
-rw-r--r--drivers/input/joystick/iforce/iforce.h1
-rw-r--r--drivers/input/joystick/interact.c1
-rw-r--r--drivers/input/joystick/joydump.c1
-rw-r--r--drivers/input/joystick/magellan.c1
-rw-r--r--drivers/input/joystick/sidewinder.c1
-rw-r--r--drivers/input/joystick/spaceball.c1
-rw-r--r--drivers/input/joystick/spaceorb.c1
-rw-r--r--drivers/input/joystick/stinger.c1
-rw-r--r--drivers/input/joystick/tmdc.c1
-rw-r--r--drivers/input/joystick/twidjoy.c1
-rw-r--r--drivers/input/joystick/warrior.c1
-rw-r--r--drivers/input/joystick/xpad.c7
-rw-r--r--drivers/input/joystick/zhenhua.c1
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/adp5520-keys.c3
-rw-r--r--drivers/input/keyboard/adp5588-keys.c11
-rw-r--r--drivers/input/keyboard/adp5589-keys.c9
-rw-r--r--drivers/input/keyboard/bf54x-keys.c5
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c15
-rw-r--r--drivers/input/keyboard/davinci_keyscan.c2
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c2
-rw-r--r--drivers/input/keyboard/goldfish_events.c1
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c1
-rw-r--r--drivers/input/keyboard/hil_kbd.c1
-rw-r--r--drivers/input/keyboard/imx_keypad.c4
-rw-r--r--drivers/input/keyboard/jornada680_kbd.c1
-rw-r--r--drivers/input/keyboard/jornada720_kbd.c1
-rw-r--r--drivers/input/keyboard/lkkbd.c1
-rw-r--r--drivers/input/keyboard/lm8323.c2
-rw-r--r--drivers/input/keyboard/lm8333.c3
-rw-r--r--drivers/input/keyboard/matrix_keypad.c1
-rw-r--r--drivers/input/keyboard/max7359_keypad.c3
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c3
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c4
-rw-r--r--drivers/input/keyboard/newtonkbd.c1
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c3
-rw-r--r--drivers/input/keyboard/omap-keypad.c3
-rw-r--r--drivers/input/keyboard/omap4-keypad.c1
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c1
-rw-r--r--drivers/input/keyboard/pxa930_rotary.c4
-rw-r--r--drivers/input/keyboard/qt1070.c1
-rw-r--r--drivers/input/keyboard/qt2160.c1
-rw-r--r--drivers/input/keyboard/samsung-keypad.c37
-rw-r--r--drivers/input/keyboard/sh_keysc.c5
-rw-r--r--drivers/input/keyboard/spear-keyboard.c1
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c1
-rw-r--r--drivers/input/keyboard/stowaway.c1
-rw-r--r--drivers/input/keyboard/sunkbd.c1
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c1
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c2
-rw-r--r--drivers/input/keyboard/tegra-kbc.c13
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c2
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c122
-rw-r--r--drivers/input/keyboard/w90p910_keypad.c3
-rw-r--r--drivers/input/keyboard/xtkbd.c1
-rw-r--r--drivers/input/misc/Kconfig11
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ad714x.c5
-rw-r--r--drivers/input/misc/adxl34x.c3
-rw-r--r--drivers/input/misc/atlas_btns.c3
-rw-r--r--drivers/input/misc/bfin_rotary.c3
-rw-r--r--drivers/input/misc/bma150.c3
-rw-r--r--drivers/input/misc/cma3000_d0x.c2
-rw-r--r--drivers/input/misc/cobalt_btns.c1
-rw-r--r--drivers/input/misc/da9052_onkey.c1
-rw-r--r--drivers/input/misc/da9055_onkey.c1
-rw-r--r--drivers/input/misc/dm355evm_keys.c1
-rw-r--r--drivers/input/misc/gp2ap002a00f.c2
-rw-r--r--drivers/input/misc/gpio-beeper.c127
-rw-r--r--drivers/input/misc/gpio_tilt_polled.c4
-rw-r--r--drivers/input/misc/keyspan_remote.c1
-rw-r--r--drivers/input/misc/kxtj9.c3
-rw-r--r--drivers/input/misc/max8997_haptic.c1
-rw-r--r--drivers/input/misc/mc13783-pwrbutton.c1
-rw-r--r--drivers/input/misc/mpu3050.c1
-rw-r--r--drivers/input/misc/pcap_keys.c1
-rw-r--r--drivers/input/misc/pcf50633-input.c1
-rw-r--r--drivers/input/misc/pcf8574_keypad.c1
-rw-r--r--drivers/input/misc/pcspkr.c1
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c107
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c106
-rw-r--r--drivers/input/misc/powermate.c1
-rw-r--r--drivers/input/misc/pwm-beeper.c2
-rw-r--r--drivers/input/misc/retu-pwrbutton.c1
-rw-r--r--drivers/input/misc/rotary_encoder.c1
-rw-r--r--drivers/input/misc/sgi_btns.c1
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c1
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c46
-rw-r--r--drivers/input/misc/twl4030-vibra.c6
-rw-r--r--drivers/input/misc/twl6040-vibra.c8
-rw-r--r--drivers/input/misc/wm831x-on.c1
-rw-r--r--drivers/input/misc/xen-kbdfront.c4
-rw-r--r--drivers/input/misc/yealink.c1
-rw-r--r--drivers/input/mouse/alps.c214
-rw-r--r--drivers/input/mouse/alps.h7
-rw-r--r--drivers/input/mouse/appletouch.c1
-rw-r--r--drivers/input/mouse/bcm5974.c1
-rw-r--r--drivers/input/mouse/cypress_ps2.c1
-rw-r--r--drivers/input/mouse/elantech.c45
-rw-r--r--drivers/input/mouse/gpio_mouse.c3
-rw-r--r--drivers/input/mouse/logips2pp.c2
-rw-r--r--drivers/input/mouse/navpoint.c1
-rw-r--r--drivers/input/mouse/pxa930_trkball.c3
-rw-r--r--drivers/input/mouse/sermouse.c1
-rw-r--r--drivers/input/mouse/synaptics_usb.c1
-rw-r--r--drivers/input/mouse/vsxxxaa.c1
-rw-r--r--drivers/input/serio/Kconfig11
-rw-r--r--drivers/input/serio/altera_ps2.c1
-rw-r--r--drivers/input/serio/ambakmi.c3
-rw-r--r--drivers/input/serio/hyperv-keyboard.c4
-rw-r--r--drivers/input/serio/libps2.c1
-rw-r--r--drivers/input/serio/olpc_apsp.c1
-rw-r--r--drivers/input/serio/pcips2.c2
-rw-r--r--drivers/input/serio/q40kbd.c1
-rw-r--r--drivers/input/serio/rpckbd.c1
-rw-r--r--drivers/input/serio/serio_raw.c1
-rw-r--r--drivers/input/serio/serport.c28
-rw-r--r--drivers/input/serio/xilinx_ps2.c1
-rw-r--r--drivers/input/tablet/acecad.c1
-rw-r--r--drivers/input/tablet/aiptek.c1
-rw-r--r--drivers/input/tablet/gtco.c1
-rw-r--r--drivers/input/tablet/hanwang.c1
-rw-r--r--drivers/input/tablet/kbtab.c1
-rw-r--r--drivers/input/tablet/wacom.h1
-rw-r--r--drivers/input/tablet/wacom_sys.c23
-rw-r--r--drivers/input/tablet/wacom_wac.c169
-rw-r--r--drivers/input/tablet/wacom_wac.h13
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c2
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/ad7877.c3
-rw-r--r--drivers/input/touchscreen/ad7879.c5
-rw-r--r--drivers/input/touchscreen/ads7846.c86
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c3
-rw-r--r--drivers/input/touchscreen/atmel_tsadcc.c3
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c2
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c4
-rw-r--r--drivers/input/touchscreen/cyttsp_i2c_common.c2
-rw-r--r--drivers/input/touchscreen/da9034-ts.c3
-rw-r--r--drivers/input/touchscreen/dynapro.c1
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c71
-rw-r--r--drivers/input/touchscreen/eeti_ts.c2
-rw-r--r--drivers/input/touchscreen/egalax_ts.c1
-rw-r--r--drivers/input/touchscreen/elo.c1
-rw-r--r--drivers/input/touchscreen/fujitsu_ts.c1
-rw-r--r--drivers/input/touchscreen/gunze.c1
-rw-r--r--drivers/input/touchscreen/hampshire.c1
-rw-r--r--drivers/input/touchscreen/ili210x.c2
-rw-r--r--drivers/input/touchscreen/inexio.c1
-rw-r--r--drivers/input/touchscreen/intel-mid-touch.c1
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c1
-rw-r--r--drivers/input/touchscreen/lpc32xx_ts.c1
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c1
-rw-r--r--drivers/input/touchscreen/max11801_ts.c1
-rw-r--r--drivers/input/touchscreen/mcs5000_ts.c5
-rw-r--r--drivers/input/touchscreen/mms114.c1
-rw-r--r--drivers/input/touchscreen/mtouch.c1
-rw-r--r--drivers/input/touchscreen/pcap_ts.c1
-rw-r--r--drivers/input/touchscreen/penmount.c1
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c3
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c5
-rw-r--r--drivers/input/touchscreen/st1232.c2
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c1
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c5
-rw-r--r--drivers/input/touchscreen/touchit213.c1
-rw-r--r--drivers/input/touchscreen/touchright.c1
-rw-r--r--drivers/input/touchscreen/touchwin.c1
-rw-r--r--drivers/input/touchscreen/tsc2005.c2
-rw-r--r--drivers/input/touchscreen/tsc2007.c230
-rw-r--r--drivers/input/touchscreen/tsc40.c1
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c9
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c1
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c1
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c1
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c2
-rw-r--r--drivers/input/touchscreen/zforce_ts.c24
-rw-r--r--drivers/input/touchscreen/zylonite-wm97xx.c1
-rw-r--r--drivers/iommu/Kconfig1
-rw-r--r--drivers/iommu/amd_iommu.c4
-rw-r--r--drivers/iommu/amd_iommu_init.c1
-rw-r--r--drivers/iommu/arm-smmu.c33
-rw-r--r--drivers/iommu/dmar.c135
-rw-r--r--drivers/iommu/fsl_pamu_domain.c6
-rw-r--r--drivers/iommu/intel-iommu.c218
-rw-r--r--drivers/iommu/intel_irq_remapping.c109
-rw-r--r--drivers/iommu/irq_remapping.c6
-rw-r--r--drivers/iommu/of_iommu.c1
-rw-r--r--drivers/iommu/shmobile-iommu.c3
-rw-r--r--drivers/iommu/shmobile-ipmmu.c10
-rw-r--r--drivers/iommu/shmobile-ipmmu.h2
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/Makefile4
-rw-r--r--drivers/irqchip/exynos-combiner.c15
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c8
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c150
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c8
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c21
-rw-r--r--drivers/irqchip/irq-sirfsoc.c3
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c15
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c164
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c108
-rw-r--r--drivers/irqchip/irq-zevio.c127
-rw-r--r--drivers/isdn/hisax/hfc_pci.c4
-rw-r--r--drivers/isdn/hisax/q931.c2
-rw-r--r--drivers/isdn/hisax/telespci.c4
-rw-r--r--drivers/isdn/i4l/isdn_net.c4
-rw-r--r--drivers/isdn/mISDN/socket.c5
-rw-r--r--drivers/isdn/sc/event.c2
-rw-r--r--drivers/leds/led-triggers.c15
-rw-r--r--drivers/leds/leds-lp5521.c30
-rw-r--r--drivers/leds/leds-lp5523.c32
-rw-r--r--drivers/leds/leds-lp55xx-common.c2
-rw-r--r--drivers/leds/leds-mc13783.c89
-rw-r--r--drivers/leds/leds-pwm.c9
-rw-r--r--drivers/leds/leds-s3c24xx.c4
-rw-r--r--drivers/leds/leds-tca6507.c207
-rw-r--r--drivers/macintosh/Kconfig2
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c2
-rw-r--r--drivers/mailbox/omap-mbox.h2
-rw-r--r--drivers/md/Kconfig11
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/bcache/Makefile5
-rw-r--r--drivers/md/bcache/alloc.c91
-rw-r--r--drivers/md/bcache/bcache.h98
-rw-r--r--drivers/md/bcache/bset.c907
-rw-r--r--drivers/md/bcache/bset.h440
-rw-r--r--drivers/md/bcache/btree.c707
-rw-r--r--drivers/md/bcache/btree.h62
-rw-r--r--drivers/md/bcache/closure.c90
-rw-r--r--drivers/md/bcache/closure.h355
-rw-r--r--drivers/md/bcache/debug.c268
-rw-r--r--drivers/md/bcache/debug.h27
-rw-r--r--drivers/md/bcache/extents.c616
-rw-r--r--drivers/md/bcache/extents.h13
-rw-r--r--drivers/md/bcache/io.c196
-rw-r--r--drivers/md/bcache/journal.c87
-rw-r--r--drivers/md/bcache/journal.h1
-rw-r--r--drivers/md/bcache/movinggc.c27
-rw-r--r--drivers/md/bcache/request.c204
-rw-r--r--drivers/md/bcache/request.h21
-rw-r--r--drivers/md/bcache/super.c125
-rw-r--r--drivers/md/bcache/sysfs.c129
-rw-r--r--drivers/md/bcache/util.c12
-rw-r--r--drivers/md/bcache/util.h10
-rw-r--r--drivers/md/bcache/writeback.c59
-rw-r--r--drivers/md/bcache/writeback.h2
-rw-r--r--drivers/md/bitmap.c2
-rw-r--r--drivers/md/bitmap.h2
-rw-r--r--drivers/md/dm-bio-record.h37
-rw-r--r--drivers/md/dm-bufio.c38
-rw-r--r--drivers/md/dm-bufio.h12
-rw-r--r--drivers/md/dm-builtin.c48
-rw-r--r--drivers/md/dm-cache-policy-mq.c74
-rw-r--r--drivers/md/dm-cache-policy.c4
-rw-r--r--drivers/md/dm-cache-policy.h6
-rw-r--r--drivers/md/dm-cache-target.c48
-rw-r--r--drivers/md/dm-crypt.c64
-rw-r--r--drivers/md/dm-delay.c42
-rw-r--r--drivers/md/dm-flakey.c7
-rw-r--r--drivers/md/dm-io.c37
-rw-r--r--drivers/md/dm-linear.c3
-rw-r--r--drivers/md/dm-log-userspace-base.c206
-rw-r--r--drivers/md/dm-raid1.c20
-rw-r--r--drivers/md/dm-region-hash.c3
-rw-r--r--drivers/md/dm-snap-persistent.c87
-rw-r--r--drivers/md/dm-snap.c29
-rw-r--r--drivers/md/dm-stripe.c13
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-sysfs.c5
-rw-r--r--drivers/md/dm-table.c22
-rw-r--r--drivers/md/dm-thin-metadata.c20
-rw-r--r--drivers/md/dm-thin-metadata.h4
-rw-r--r--drivers/md/dm-thin.c314
-rw-r--r--drivers/md/dm-verity.c62
-rw-r--r--drivers/md/dm.c204
-rw-r--r--drivers/md/dm.h17
-rw-r--r--drivers/md/faulty.c19
-rw-r--r--drivers/md/linear.c96
-rw-r--r--drivers/md/md.c106
-rw-r--r--drivers/md/md.h13
-rw-r--r--drivers/md/multipath.c13
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c2
-rw-r--r--drivers/md/persistent-data/dm-btree.c33
-rw-r--r--drivers/md/persistent-data/dm-btree.h8
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c6
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c32
-rw-r--r--drivers/md/raid0.c79
-rw-r--r--drivers/md/raid1.c89
-rw-r--r--drivers/md/raid10.c209
-rw-r--r--drivers/md/raid5.c191
-rw-r--r--drivers/media/Kconfig3
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h3
-rw-r--r--drivers/media/dvb-core/dvb_net.c10
-rw-r--r--drivers/media/dvb-frontends/Kconfig7
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/a8293.c2
-rw-r--r--drivers/media/dvb-frontends/cx24117.c131
-rw-r--r--drivers/media/dvb-frontends/dib8000.c590
-rw-r--r--drivers/media/dvb-frontends/drxk.h2
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c24
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c1311
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.h114
-rw-r--r--drivers/media/dvb-frontends/m88ds3103_priv.h215
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c172
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.h2
-rw-r--r--drivers/media/dvb-frontends/nxt200x.c4
-rw-r--r--drivers/media/i2c/Kconfig27
-rw-r--r--drivers/media/i2c/Makefile3
-rw-r--r--drivers/media/i2c/ad9389b.c277
-rw-r--r--drivers/media/i2c/adv7511.c64
-rw-r--r--drivers/media/i2c/adv7604.c645
-rw-r--r--drivers/media/i2c/adv7842.c646
-rw-r--r--drivers/media/i2c/lm3560.c34
-rw-r--r--drivers/media/i2c/mt9m032.c16
-rw-r--r--drivers/media/i2c/mt9p031.c28
-rw-r--r--drivers/media/i2c/mt9t001.c26
-rw-r--r--drivers/media/i2c/mt9v032.c264
-rw-r--r--drivers/media/i2c/s5k5baf.c2053
-rw-r--r--drivers/media/i2c/saa6588.c50
-rw-r--r--drivers/media/i2c/saa6752hs.c (renamed from drivers/media/pci/saa7134/saa6752hs.c)19
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c9
-rw-r--r--drivers/media/i2c/soc_camera/mt9m111.c4
-rw-r--r--drivers/media/i2c/tvp5150.c40
-rw-r--r--drivers/media/i2c/vs6624.c2
-rw-r--r--drivers/media/media-entity.c41
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c10
-rw-r--r--drivers/media/pci/bt8xx/bttv-gpio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c5
-rw-r--r--drivers/media/pci/cx25821/cx25821-alsa.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c2
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c4
-rw-r--r--drivers/media/pci/saa7134/Kconfig1
-rw-r--r--drivers/media/pci/saa7134/Makefile2
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c11
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c359
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c11
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c781
-rw-r--r--drivers/media/pci/saa7134/saa7134.h66
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c2
-rw-r--r--drivers/media/platform/Kconfig10
-rw-r--r--drivers/media/platform/Makefile3
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif_display.c2
-rw-r--r--drivers/media/platform/exynos4-is/Kconfig2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c32
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.h2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-regs.c36
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-regs.h1
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c29
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite-reg.c4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c29
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c148
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c13
-rw-r--r--drivers/media/platform/fsl-viu.c2
-rw-r--r--drivers/media/platform/m2m-deinterlace.c2
-rw-r--r--drivers/media/platform/mem2mem_testdev.c152
-rw-r--r--drivers/media/platform/omap3isp/isp.c100
-rw-r--r--drivers/media/platform/omap3isp/isp.h6
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c5
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c3
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c3
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c3
-rw-r--r--drivers/media/platform/omap3isp/ispqueue.c2
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.c18
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c106
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.h2
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c124
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.h1
-rw-r--r--drivers/media/platform/s5p-jpeg/Makefile2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c1327
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h69
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c279
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h42
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c (renamed from drivers/media/platform/s5p-jpeg/jpeg-hw.h)82
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h63
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-regs.h209
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c28
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h14
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c57
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c26
-rw-r--r--drivers/media/platform/s5p-tv/mixer_drv.c34
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c2
-rw-r--r--drivers/media/platform/s5p-tv/sdo_drv.c39
-rw-r--r--drivers/media/platform/sh_vou.c16
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c179
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c2
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c7
-rw-r--r--drivers/media/platform/soc_camera/soc_scale_crop.c4
-rw-r--r--drivers/media/platform/ti-vpe/Makefile2
-rw-r--r--drivers/media/platform/ti-vpe/csc.c196
-rw-r--r--drivers/media/platform/ti-vpe/csc.h68
-rw-r--r--drivers/media/platform/ti-vpe/sc.c311
-rw-r--r--drivers/media/platform/ti-vpe/sc.h208
-rw-r--r--drivers/media/platform/ti-vpe/sc_coeff.h1342
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.c40
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.h12
-rw-r--r--drivers/media/platform/ti-vpe/vpdma_priv.h2
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c327
-rw-r--r--drivers/media/platform/ti-vpe/vpe_regs.h187
-rw-r--r--drivers/media/platform/vsp1/Makefile3
-rw-r--r--drivers/media/platform/vsp1/vsp1.h7
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c39
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c7
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h4
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.c222
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.h38
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.c252
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.h38
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h16
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c34
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.c96
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.h10
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c356
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.h41
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c13
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c17
-rw-r--r--drivers/media/radio/Kconfig43
-rw-r--r--drivers/media/radio/Makefile4
-rw-r--r--drivers/media/radio/radio-raremono.c387
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c81
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h1
-rw-r--r--drivers/media/radio/si4713/Kconfig40
-rw-r--r--drivers/media/radio/si4713/Makefile7
-rw-r--r--drivers/media/radio/si4713/radio-platform-si4713.c (renamed from drivers/media/radio/radio-si4713.c)0
-rw-r--r--drivers/media/radio/si4713/radio-usb-si4713.c540
-rw-r--r--drivers/media/radio/si4713/si4713.c (renamed from drivers/media/radio/si4713-i2c.c)279
-rw-r--r--drivers/media/radio/si4713/si4713.h (renamed from drivers/media/radio/si4713-i2c.h)4
-rw-r--r--drivers/media/radio/tea575x.c2
-rw-r--r--drivers/media/rc/imon.c8
-rw-r--r--drivers/media/rc/keymaps/Makefile3
-rw-r--r--drivers/media/rc/keymaps/rc-su3000.c75
-rw-r--r--drivers/media/rc/mceusb.c10
-rw-r--r--drivers/media/rc/rc-main.c20
-rw-r--r--drivers/media/rc/st_rc.c13
-rw-r--r--drivers/media/tuners/Kconfig7
-rw-r--r--drivers/media/tuners/Makefile1
-rw-r--r--drivers/media/tuners/e4000.c16
-rw-r--r--drivers/media/tuners/m88ts2022.c674
-rw-r--r--drivers/media/tuners/m88ts2022.h54
-rw-r--r--drivers/media/tuners/m88ts2022_priv.h34
-rw-r--r--drivers/media/tuners/tuner-xc2028.c38
-rw-r--r--drivers/media/usb/Kconfig1
-rw-r--r--drivers/media/usb/Makefile1
-rw-r--r--drivers/media/usb/au0828/au0828-core.c13
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c116
-rw-r--r--drivers/media/usb/au0828/au0828.h6
-rw-r--r--drivers/media/usb/cx231xx/Kconfig2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-i2c.c23
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c3
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c59
-rw-r--r--drivers/media/usb/dvb-usb-v2/ec168.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/it913x.c3
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c2
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c21
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c455
-rw-r--r--drivers/media/usb/em28xx/Kconfig8
-rw-r--r--drivers/media/usb/em28xx/Makefile5
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c429
-rw-r--r--drivers/media/usb/em28xx/em28xx-camera.c1
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c553
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c410
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c112
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c199
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c209
-rw-r--r--drivers/media/usb/em28xx/em28xx-reg.h11
-rw-r--r--drivers/media/usb/em28xx/em28xx-v4l.h20
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c1
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c652
-rw-r--r--drivers/media/usb/em28xx/em28xx.h120
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c4
-rw-r--r--drivers/media/usb/pwc/pwc-if.c1
-rw-r--r--drivers/media/v4l2-core/Kconfig11
-rw-r--r--drivers/media/v4l2-core/Makefile1
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c9
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c126
-rw-r--r--drivers/media/v4l2-core/v4l2-of.c10
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c12
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c10
-rw-r--r--drivers/media/v4l2-core/videobuf-vmalloc.c10
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c485
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c53
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c30
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/message/fusion/mptsas.c8
-rw-r--r--drivers/message/i2o/i2o_config.c4
-rw-r--r--drivers/mfd/88pm800.c4
-rw-r--r--drivers/mfd/88pm805.c2
-rw-r--r--drivers/mfd/Kconfig30
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/ab8500-core.c16
-rw-r--r--drivers/mfd/ab8500-debugfs.c5
-rw-r--r--drivers/mfd/arizona-core.c8
-rw-r--r--drivers/mfd/as3722.c5
-rw-r--r--drivers/mfd/asic3.c4
-rw-r--r--drivers/mfd/cros_ec.c2
-rw-r--r--drivers/mfd/cros_ec_i2c.c8
-rw-r--r--drivers/mfd/cros_ec_spi.c56
-rw-r--r--drivers/mfd/cs5535-mfd.c2
-rw-r--r--drivers/mfd/da9052-core.c2
-rw-r--r--drivers/mfd/da9055-core.c2
-rw-r--r--drivers/mfd/da9063-core.c2
-rw-r--r--drivers/mfd/db8500-prcmu.c4
-rw-r--r--drivers/mfd/htc-pasic3.c2
-rw-r--r--drivers/mfd/intel_msic.c2
-rw-r--r--drivers/mfd/janz-cmodio.c2
-rw-r--r--drivers/mfd/jz4740-adc.c2
-rw-r--r--drivers/mfd/lp3943.c167
-rw-r--r--drivers/mfd/lp8788.c2
-rw-r--r--drivers/mfd/lpc_ich.c2
-rw-r--r--drivers/mfd/lpc_sch.c2
-rw-r--r--drivers/mfd/max14577.c245
-rw-r--r--drivers/mfd/max77686.c4
-rw-r--r--drivers/mfd/max77693.c10
-rw-r--r--drivers/mfd/max8907.c2
-rw-r--r--drivers/mfd/max8925-core.c8
-rw-r--r--drivers/mfd/max8997.c25
-rw-r--r--drivers/mfd/max8998.c6
-rw-r--r--drivers/mfd/mc13xxx-core.c41
-rw-r--r--drivers/mfd/mc13xxx-i2c.c18
-rw-r--r--drivers/mfd/mc13xxx-spi.c20
-rw-r--r--drivers/mfd/mc13xxx.h6
-rw-r--r--drivers/mfd/omap-usb-host.c8
-rw-r--r--drivers/mfd/omap-usb-tll.c44
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/rc5t583.c2
-rw-r--r--drivers/mfd/rdc321x-southbridge.c4
-rw-r--r--drivers/mfd/retu-mfd.c6
-rw-r--r--drivers/mfd/rtl8411.c101
-rw-r--r--drivers/mfd/rtsx_pcr.c17
-rw-r--r--drivers/mfd/rtsx_pcr.h9
-rw-r--r--drivers/mfd/sec-core.c92
-rw-r--r--drivers/mfd/sec-irq.c12
-rw-r--r--drivers/mfd/sm501.c2
-rw-r--r--drivers/mfd/ssbi.c16
-rw-r--r--drivers/mfd/sta2x11-mfd.c6
-rw-r--r--drivers/mfd/stmpe.c10
-rw-r--r--drivers/mfd/stmpe.h2
-rw-r--r--drivers/mfd/tc3589x.c4
-rw-r--r--drivers/mfd/tc6387xb.c2
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c71
-rw-r--r--drivers/mfd/timberdale.c14
-rw-r--r--drivers/mfd/tps6507x.c2
-rw-r--r--drivers/mfd/tps65090.c2
-rw-r--r--drivers/mfd/tps65217.c4
-rw-r--r--drivers/mfd/tps6586x.c52
-rw-r--r--drivers/mfd/tps65910.c2
-rw-r--r--drivers/mfd/tps65912-core.c2
-rw-r--r--drivers/mfd/tps80031.c2
-rw-r--r--drivers/mfd/twl-core.c403
-rw-r--r--drivers/mfd/twl6030-irq.c8
-rw-r--r--drivers/mfd/twl6040.c92
-rw-r--r--drivers/mfd/viperboard.c2
-rw-r--r--drivers/mfd/vx855.c4
-rw-r--r--drivers/mfd/wm5110-tables.c177
-rw-r--r--drivers/mfd/wm831x-core.c14
-rw-r--r--drivers/mfd/wm831x-i2c.c8
-rw-r--r--drivers/mfd/wm831x-spi.c8
-rw-r--r--drivers/mfd/wm8994-core.c4
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/ad525x_dpot.c4
-rw-r--r--drivers/misc/bmp085-i2c.c2
-rw-r--r--drivers/misc/bmp085-spi.c2
-rw-r--r--drivers/misc/bmp085.c39
-rw-r--r--drivers/misc/bmp085.h2
-rw-r--r--drivers/misc/eeprom/eeprom.c2
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c1
-rw-r--r--drivers/misc/genwqe/Kconfig13
-rw-r--r--drivers/misc/genwqe/Makefile7
-rw-r--r--drivers/misc/genwqe/card_base.c1205
-rw-r--r--drivers/misc/genwqe/card_base.h557
-rw-r--r--drivers/misc/genwqe/card_ddcb.c1376
-rw-r--r--drivers/misc/genwqe/card_ddcb.h188
-rw-r--r--drivers/misc/genwqe/card_debugfs.c500
-rw-r--r--drivers/misc/genwqe/card_dev.c1415
-rw-r--r--drivers/misc/genwqe/card_sysfs.c288
-rw-r--r--drivers/misc/genwqe/card_utils.c944
-rw-r--r--drivers/misc/genwqe/genwqe_driver.h77
-rw-r--r--drivers/misc/lkdtm.c7
-rw-r--r--drivers/misc/mei/amthif.c6
-rw-r--r--drivers/misc/mei/client.c38
-rw-r--r--drivers/misc/mei/debugfs.c4
-rw-r--r--drivers/misc/mei/hbm.c239
-rw-r--r--drivers/misc/mei/hbm.h7
-rw-r--r--drivers/misc/mei/hw-me.c40
-rw-r--r--drivers/misc/mei/hw.h3
-rw-r--r--drivers/misc/mei/init.c278
-rw-r--r--drivers/misc/mei/interrupt.c122
-rw-r--r--drivers/misc/mei/main.c2
-rw-r--r--drivers/misc/mei/mei_dev.h33
-rw-r--r--drivers/misc/mei/nfc.c18
-rw-r--r--drivers/misc/mei/pci-me.c27
-rw-r--r--drivers/misc/mei/wd.c1
-rw-r--r--drivers/misc/mic/host/mic_device.h5
-rw-r--r--drivers/misc/mic/host/mic_main.c2
-rw-r--r--drivers/misc/mic/host/mic_virtio.c5
-rw-r--r--drivers/misc/mic/host/mic_x100.c36
-rw-r--r--drivers/misc/sgi-gru/grukdump.c11
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c5
-rw-r--r--drivers/misc/ti-st/st_core.c2
-rw-r--r--drivers/misc/ti-st/st_kim.c1
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c10
-rw-r--r--drivers/mmc/card/block.c7
-rw-r--r--drivers/mmc/core/core.c3
-rw-r--r--drivers/mmc/core/mmc.c8
-rw-r--r--drivers/mmc/core/quirks.c8
-rw-r--r--drivers/mmc/core/sd.c37
-rw-r--r--drivers/mmc/core/sdio_bus.c2
-rw-r--r--drivers/mmc/core/sdio_irq.c11
-rw-r--r--drivers/mmc/host/Kconfig30
-rw-r--r--drivers/mmc/host/Makefile3
-rw-r--r--drivers/mmc/host/atmel-mci.c20
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c95
-rw-r--r--drivers/mmc/host/dw_mmc.c54
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/mxs-mmc.c60
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mmc/host/sdhci-acpi.c31
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c122
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c224
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c321
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.h72
-rw-r--r--drivers/mmc/host/sdhci-pci.c191
-rw-r--r--drivers/mmc/host/sdhci-pci.h78
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c6
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h2
-rw-r--r--drivers/mmc/host/sdhci-tegra.c1
-rw-r--r--drivers/mmc/host/sdhci.c44
-rw-r--r--drivers/mmc/host/sh_mmcif.c100
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c23
-rw-r--r--drivers/mmc/host/tmio_mmc.c9
-rw-r--r--drivers/mmc/host/tmio_mmc.h17
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c38
-rw-r--r--drivers/mtd/Kconfig7
-rw-r--r--drivers/mtd/afs.c3
-rw-r--r--drivers/mtd/ar7part.c3
-rw-r--r--drivers/mtd/bcm47xxpart.c27
-rw-r--r--drivers/mtd/bcm63xxpart.c3
-rw-r--r--drivers/mtd/cmdlinepart.c3
-rw-r--r--drivers/mtd/devices/docg3.c20
-rw-r--r--drivers/mtd/devices/m25p80.c226
-rw-r--r--drivers/mtd/devices/ms02-nv.c2
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c5
-rw-r--r--drivers/mtd/devices/mtdram.c2
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c32
-rw-r--r--drivers/mtd/maps/ixp4xx.c28
-rw-r--r--drivers/mtd/maps/lantiq-flash.c37
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c4
-rw-r--r--drivers/mtd/maps/sun_uflash.c2
-rw-r--r--drivers/mtd/mtdcore.c10
-rw-r--r--drivers/mtd/mtdpart.c9
-rw-r--r--drivers/mtd/nand/Kconfig14
-rw-r--r--drivers/mtd/nand/atmel_nand.c8
-rw-r--r--drivers/mtd/nand/au1550nd.c6
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c1
-rw-r--r--drivers/mtd/nand/cafe_nand.c4
-rw-r--r--drivers/mtd/nand/cmx270_nand.c1
-rw-r--r--drivers/mtd/nand/cs553x_nand.c1
-rw-r--r--drivers/mtd/nand/davinci_nand.c93
-rw-r--r--drivers/mtd/nand/denali.c53
-rw-r--r--drivers/mtd/nand/denali.h4
-rw-r--r--drivers/mtd/nand/denali_dt.c4
-rw-r--r--drivers/mtd/nand/denali_pci.c3
-rw-r--r--drivers/mtd/nand/diskonchip.c13
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c3
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c3
-rw-r--r--drivers/mtd/nand/fsmc_nand.c16
-rw-r--r--drivers/mtd/nand/gpio.c12
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c127
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c207
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h2
-rw-r--r--drivers/mtd/nand/jz4740_nand.c4
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c26
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c19
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c12
-rw-r--r--drivers/mtd/nand/mxc_nand.c25
-rw-r--r--drivers/mtd/nand/nand_base.c183
-rw-r--r--drivers/mtd/nand/nand_ids.c2
-rw-r--r--drivers/mtd/nand/nuc900_nand.c54
-rw-r--r--drivers/mtd/nand/omap2.c8
-rw-r--r--drivers/mtd/nand/orion_nand.c4
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/plat_nand.c40
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c713
-rw-r--r--drivers/mtd/nand/s3c2410.c38
-rw-r--r--drivers/mtd/nand/sh_flctl.c55
-rw-r--r--drivers/mtd/nand/sharpsl.c6
-rw-r--r--drivers/mtd/nand/tmio_nand.c46
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c5
-rw-r--r--drivers/mtd/ofpart.c19
-rw-r--r--drivers/mtd/onenand/generic.c2
-rw-r--r--drivers/mtd/redboot.c3
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c2
-rw-r--r--drivers/mtd/ubi/attach.c4
-rw-r--r--drivers/mtd/ubi/build.c4
-rw-r--r--drivers/mtd/ubi/io.c54
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/Space.c29
-rw-r--r--drivers/net/arcnet/com20020_cs.c1
-rw-r--r--drivers/net/bonding/Makefile2
-rw-r--r--drivers/net/bonding/bond_3ad.c945
-rw-r--r--drivers/net/bonding/bond_3ad.h2
-rw-r--r--drivers/net/bonding/bond_alb.c49
-rw-r--r--drivers/net/bonding/bond_alb.h3
-rw-r--r--drivers/net/bonding/bond_main.c666
-rw-r--r--drivers/net/bonding/bond_netlink.c464
-rw-r--r--drivers/net/bonding/bond_options.c1191
-rw-r--r--drivers/net/bonding/bond_options.h170
-rw-r--r--drivers/net/bonding/bond_procfs.c37
-rw-r--r--drivers/net/bonding/bond_sysfs.c896
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c144
-rw-r--r--drivers/net/bonding/bonding.h51
-rw-r--r--drivers/net/caif/caif_spi_slave.c1
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/at91_can.c1
-rw-r--r--drivers/net/can/bfin_can.c1
-rw-r--r--drivers/net/can/c_can/c_can.c22
-rw-r--r--drivers/net/can/dev.c18
-rw-r--r--drivers/net/can/flexcan.c7
-rw-r--r--drivers/net/can/janz-ican3.c21
-rw-r--r--drivers/net/can/mcp251x.c121
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c273
-rw-r--r--drivers/net/can/mscan/mscan.c3
-rw-r--r--drivers/net/can/mscan/mscan.h3
-rw-r--r--drivers/net/can/pch_can.c4
-rw-r--r--drivers/net/can/sja1000/ems_pci.c3
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c3
-rw-r--r--drivers/net/can/sja1000/plx_pci.c26
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c3
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c3
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c3
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/softing/softing_cs.c3
-rw-r--r--drivers/net/can/softing/softing_fw.c3
-rw-r--r--drivers/net/can/softing/softing_main.c4
-rw-r--r--drivers/net/can/ti_hecc.c11
-rw-r--r--drivers/net/can/usb/ems_usb.c4
-rw-r--r--drivers/net/can/usb/esd_usb2.c1
-rw-r--r--drivers/net/can/usb/kvaser_usb.c1
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c3
-rw-r--r--drivers/net/can/usb/usb_8dev.c1
-rw-r--r--drivers/net/can/vcan.c9
-rw-r--r--drivers/net/eql.c95
-rw-r--r--drivers/net/ethernet/3com/3c509.c3
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c1
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c1
-rw-r--r--drivers/net/ethernet/3com/3c59x.c8
-rw-r--r--drivers/net/ethernet/8390/8390.h7
-rw-r--r--drivers/net/ethernet/8390/apne.c61
-rw-r--r--drivers/net/ethernet/8390/ax88796.c23
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c120
-rw-r--r--drivers/net/ethernet/8390/etherh.c53
-rw-r--r--drivers/net/ethernet/8390/hydra.c13
-rw-r--r--drivers/net/ethernet/8390/lib8390.c77
-rw-r--r--drivers/net/ethernet/8390/mac8390.c19
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c9
-rw-r--r--drivers/net/ethernet/8390/ne.c96
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c54
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c63
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c48
-rw-r--r--drivers/net/ethernet/8390/stnic.c28
-rw-r--r--drivers/net/ethernet/8390/wd.c42
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c26
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c22
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c3
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/alteon/acenic.c1
-rw-r--r--drivers/net/ethernet/amd/7990.c837
-rw-r--r--drivers/net/ethernet/amd/7990.h268
-rw-r--r--drivers/net/ethernet/amd/a2065.c13
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c5
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h6
-rw-r--r--drivers/net/ethernet/amd/ariadne.c13
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c4
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.h3
-rw-r--r--drivers/net/ethernet/amd/hplance.c96
-rw-r--r--drivers/net/ethernet/amd/mvme147.c36
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c1
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c2
-rw-r--r--drivers/net/ethernet/amd/sunlance.c1
-rw-r--r--drivers/net/ethernet/arc/emac.h2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c24
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h3
-rw-r--r--drivers/net/ethernet/atheros/alx/ethtool.c101
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.c58
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.h71
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c51
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h52
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c39
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c30
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c46
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.h1
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c250
-rw-r--r--drivers/net/ethernet/broadcom/b44.h15
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c364
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h94
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c62
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h59
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c129
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h129
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c84
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c160
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c256
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c292
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c441
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c67
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c1
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h4
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c249
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h11
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c625
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h8
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c40
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h33
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h24
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c58
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c251
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h57
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c559
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h26
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h4
-rw-r--r--drivers/net/ethernet/cadence/macb.c126
-rw-r--r--drivers/net/ethernet/cadence/macb.h1
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cphy.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/elmer0.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/gmac.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88x201x.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/regs.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h42
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c94
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c242
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h73
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c4
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c24
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/media.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c1
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h1
-rw-r--r--drivers/net/ethernet/dnet.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h10
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c211
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h33
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c73
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c204
-rw-r--r--drivers/net/ethernet/ethoc.c138
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c7
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c13
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c17
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c23
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c99
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_sysfs.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c5
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c1
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c1
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c1
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c1
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h1
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c15
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h3
-rw-r--r--drivers/net/ethernet/icplus/ipg.h1
-rw-r--r--drivers/net/ethernet/intel/Kconfig49
-rw-r--r--drivers/net/ethernet/intel/Makefile1
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c18
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h127
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c237
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h136
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c666
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c469
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h107
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c316
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c400
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c432
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1754
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c77
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h53
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c662
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h170
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c195
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h64
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h152
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c876
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h11
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile33
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c927
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h106
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2153
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_alloc.h55
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c254
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h238
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h165
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h72
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h84
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h4667
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h97
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c1575
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h296
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1152
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h364
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h321
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c390
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c2353
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c772
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c89
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h28
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c108
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c303
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h65
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c120
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c115
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c32
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c84
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h18
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c76
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h96
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1276
-rw-r--r--drivers/net/ethernet/korina.c1
-rw-r--r--drivers/net/ethernet/lantiq_etop.c6
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c28
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c7
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c387
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c24
-rw-r--r--drivers/net/ethernet/marvell/sky2.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c198
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c142
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c103
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c5
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c1
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c12
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c2
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c5
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c41
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h1
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c37
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.h8
-rw-r--r--drivers/net/ethernet/netx-eth.c3
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c6
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h3
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c17
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.h3
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h203
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c477
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h44
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c89
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c49
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c31
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c204
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c127
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c202
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c453
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c266
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c237
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c133
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c3
-rw-r--r--drivers/net/ethernet/rdc/r6040.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c263
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h56
-rw-r--r--drivers/net/ethernet/s6gmac.c1
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c549
-rw-r--r--drivers/net/ethernet/sfc/efx.c201
-rw-r--r--drivers/net/ethernet/sfc/efx.h16
-rw-r--r--drivers/net/ethernet/sfc/enum.h1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c16
-rw-r--r--drivers/net/ethernet/sfc/falcon.c38
-rw-r--r--drivers/net/ethernet/sfc/farch.c48
-rw-r--r--drivers/net/ethernet/sfc/filter.h17
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c444
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h21
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c76
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h733
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c89
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h75
-rw-r--r--drivers/net/ethernet/sfc/nic.c12
-rw-r--r--drivers/net/ethernet/sfc/nic.h34
-rw-r--r--drivers/net/ethernet/sfc/ptp.c854
-rw-r--r--drivers/net/ethernet/sfc/rx.c24
-rw-r--r--drivers/net/ethernet/sfc/selftest.c2
-rw-r--r--drivers/net/ethernet/sfc/selftest.h1
-rw-r--r--drivers/net/ethernet/sfc/siena.c119
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c1
-rw-r--r--drivers/net/ethernet/sgi/meth.c1
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c4
-rw-r--r--drivers/net/ethernet/smsc/smc911x.h3
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c1
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.h3
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c5
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c140
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c495
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c135
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.h4
-rw-r--r--drivers/net/ethernet/sun/niu.c10
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c1
-rw-r--r--drivers/net/ethernet/sun/sungem.c1
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c155
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c18
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c7
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c2
-rw-r--r--drivers/net/ethernet/tile/Kconfig12
-rw-r--r--drivers/net/ethernet/tile/tilegx.c41
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c18
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c16
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c1
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.h4
-rw-r--r--drivers/net/ethernet/via/via-rhine.c1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c1
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c3
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c36
-rw-r--r--drivers/net/fddi/defxx.c21
-rw-r--r--drivers/net/fddi/skfp/fplustm.c27
-rw-r--r--drivers/net/fddi/skfp/h/supern_2.h96
-rw-r--r--drivers/net/fddi/skfp/h/targetos.h1
-rw-r--r--drivers/net/fddi/skfp/skfddi.c1
-rw-r--r--drivers/net/fddi/skfp/smt.c2
-rw-r--r--drivers/net/fddi/skfp/srf.c24
-rw-r--r--drivers/net/hamradio/6pack.c3
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hamradio/mkiss.c3
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/hippi/rrunner.c6
-rw-r--r--drivers/net/hyperv/hyperv_net.h5
-rw-r--r--drivers/net/hyperv/netvsc.c10
-rw-r--r--drivers/net/hyperv/netvsc_drv.c24
-rw-r--r--drivers/net/hyperv/rndis_filter.c3
-rw-r--r--drivers/net/ieee802154/at86rf230.c2
-rw-r--r--drivers/net/ieee802154/mrf24j40.c1
-rw-r--r--drivers/net/irda/Kconfig11
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/au1k_ir.c4
-rw-r--r--drivers/net/irda/ep7211-sir.c70
-rw-r--r--drivers/net/irda/esi-sir.c4
-rw-r--r--drivers/net/irda/irda-usb.c1
-rw-r--r--drivers/net/irda/kingsun-sir.c1
-rw-r--r--drivers/net/irda/ks959-sir.c1
-rw-r--r--drivers/net/irda/ksdazzle-sir.c1
-rw-r--r--drivers/net/irda/litelink-sir.c4
-rw-r--r--drivers/net/irda/ma600-sir.c4
-rw-r--r--drivers/net/irda/mcs7780.c1
-rw-r--r--drivers/net/irda/old_belkin-sir.c4
-rw-r--r--drivers/net/irda/sh_irda.c2
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/irda/sir_dongle.c1
-rw-r--r--drivers/net/irda/smsc-ircc2.c4
-rw-r--r--drivers/net/irda/smsc-ircc2.h4
-rw-r--r--drivers/net/irda/stir4200.c1
-rw-r--r--drivers/net/irda/via-ircc.c5
-rw-r--r--drivers/net/irda/via-ircc.h3
-rw-r--r--drivers/net/irda/vlsi_ir.c7
-rw-r--r--drivers/net/irda/vlsi_ir.h4
-rw-r--r--drivers/net/loopback.c1
-rw-r--r--drivers/net/macvlan.c64
-rw-r--r--drivers/net/macvtap.c77
-rw-r--r--drivers/net/mdio.c28
-rw-r--r--drivers/net/phy/cicada.c4
-rw-r--r--drivers/net/phy/davicom.c2
-rw-r--r--drivers/net/phy/dp83640.c23
-rw-r--r--drivers/net/phy/icplus.c2
-rw-r--r--drivers/net/phy/lxt.c4
-rw-r--r--drivers/net/phy/marvell.c22
-rw-r--r--drivers/net/phy/mdio-gpio.c1
-rw-r--r--drivers/net/phy/mdio-moxart.c1
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c1
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c1
-rw-r--r--drivers/net/phy/mdio-octeon.c1
-rw-r--r--drivers/net/phy/mdio-sun4i.c4
-rw-r--r--drivers/net/phy/mdio_bus.c36
-rw-r--r--drivers/net/phy/micrel.c4
-rw-r--r--drivers/net/phy/phy.c435
-rw-r--r--drivers/net/phy/phy_device.c451
-rw-r--r--drivers/net/phy/spi_ks8995.c7
-rw-r--r--drivers/net/plip/plip.c4
-rw-r--r--drivers/net/ppp/ppp_mppe.c3
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/team/team.c9
-rw-r--r--drivers/net/team/team_mode_random.c8
-rw-r--r--drivers/net/tun.c98
-rw-r--r--drivers/net/usb/Kconfig27
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix.h4
-rw-r--r--drivers/net/usb/asix_common.c3
-rw-r--r--drivers/net/usb/asix_devices.c3
-rw-r--r--drivers/net/usb/ax88172a.c3
-rw-r--r--drivers/net/usb/ax88179_178a.c3
-rw-r--r--drivers/net/usb/catc.c4
-rw-r--r--drivers/net/usb/cdc_eem.c4
-rw-r--r--drivers/net/usb/cdc_ether.c5
-rw-r--r--drivers/net/usb/cdc_ncm.c1
-rw-r--r--drivers/net/usb/cdc_subset.c4
-rw-r--r--drivers/net/usb/cx82310_eth.c4
-rw-r--r--drivers/net/usb/dm9601.c57
-rw-r--r--drivers/net/usb/gl620a.c4
-rw-r--r--drivers/net/usb/hso.c45
-rw-r--r--drivers/net/usb/int51x1.c3
-rw-r--r--drivers/net/usb/ipheth.c1
-rw-r--r--drivers/net/usb/kalmia.c1
-rw-r--r--drivers/net/usb/kaweth.c4
-rw-r--r--drivers/net/usb/lg-vl600.c3
-rw-r--r--drivers/net/usb/mcs7830.c23
-rw-r--r--drivers/net/usb/net1080.c4
-rw-r--r--drivers/net/usb/plusb.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c927
-rw-r--r--drivers/net/usb/r815x.c8
-rw-r--r--drivers/net/usb/rndis_host.c4
-rw-r--r--drivers/net/usb/rtl8150.c1
-rw-r--r--drivers/net/usb/sierra_net.c3
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc75xx.h3
-rw-r--r--drivers/net/usb/smsc95xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.h3
-rw-r--r--drivers/net/usb/sr9700.c1
-rw-r--r--drivers/net/usb/sr9800.c870
-rw-r--r--drivers/net/usb/sr9800.h202
-rw-r--r--drivers/net/usb/usbnet.c5
-rw-r--r--drivers/net/usb/zaurus.c4
-rw-r--r--drivers/net/virtio_net.c265
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c5
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h1
-rw-r--r--drivers/net/vxlan.c310
-rw-r--r--drivers/net/wan/dlci.c5
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/hd64570.c1
-rw-r--r--drivers/net/wan/hd64570.h4
-rw-r--r--drivers/net/wan/hd64572.c1
-rw-r--r--drivers/net/wan/hd64572.h2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c3
-rw-r--r--drivers/net/wan/pc300too.c1
-rw-r--r--drivers/net/wan/pci200syn.c1
-rw-r--r--drivers/net/wan/sbni.c1
-rw-r--r--drivers/net/wan/wanxl.c1
-rw-r--r--drivers/net/wireless/adm8211.c4
-rw-r--r--drivers/net/wireless/airo_cs.c1
-rw-r--r--drivers/net/wireless/at76c50x-usb.c3
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wireless/ath/ath.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig7
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c53
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c43
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h34
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c160
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c31
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c40
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c677
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c791
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h21
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c408
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h157
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c34
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c11
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c11
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig18
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile14
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h222
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_buffalo_initvals.h126
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c385
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c69
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c297
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c106
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h24
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_wow.c422
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h128
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h401
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h392
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h575
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h1559
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h117
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h718
-rw-r--r--drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h540
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h85
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p1_initvals.h64
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h572
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h434
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c147
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c632
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h44
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c269
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c89
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c88
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h47
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c607
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h75
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c80
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c67
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c51
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c676
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c134
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c247
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h29
-rw-r--r--drivers/net/wireless/ath/ath9k/spectral.c543
-rw-r--r--drivers/net/wireless/ath/ath9k/spectral.h212
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c272
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c588
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c27
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c1
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c13
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c14
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c1
-rw-r--r--drivers/net/wireless/ath/main.c8
-rw-r--r--drivers/net/wireless/ath/regd.c379
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c65
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h1
-rw-r--r--drivers/net/wireless/atmel.c8
-rw-r--r--drivers/net/wireless/atmel.h4
-rw-r--r--drivers/net/wireless/atmel_cs.c5
-rw-r--r--drivers/net/wireless/atmel_pci.c5
-rw-r--r--drivers/net/wireless/b43/b43.h4
-rw-r--r--drivers/net/wireless/b43/main.c27
-rw-r--r--drivers/net/wireless/b43/xmit.c4
-rw-r--r--drivers/net/wireless/b43legacy/main.c1
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcdc.c375
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcdc.h24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c737
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c539
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h487
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h44
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c392
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c31
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c36
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c207
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h42
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c1596
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h54
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c19
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.h61
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h304
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c216
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/nvram.c94
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/nvram.h24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c52
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/proto.c62
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/proto.h57
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c827
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h39
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h83
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h33
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c194
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c38
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c67
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_wifi.h14
-rw-r--r--drivers/net/wireless/cw1200/cw1200_sdio.c4
-rw-r--r--drivers/net/wireless/cw1200/fwio.c1
-rw-r--r--drivers/net/wireless/cw1200/main.c2
-rw-r--r--drivers/net/wireless/cw1200/pm.c11
-rw-r--r--drivers/net/wireless/cw1200/scan.c15
-rw-r--r--drivers/net/wireless/cw1200/sta.c5
-rw-r--r--drivers/net/wireless/cw1200/txrx.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c8
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c28
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c6
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c8
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c5
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c42
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.h1
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c12
-rw-r--r--drivers/net/wireless/iwlegacy/3945-debug.c6
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c11
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c1
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c5
-rw-r--r--drivers/net/wireless/iwlegacy/4965-debug.c6
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c8
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c1
-rw-r--r--drivers/net/wireless/iwlegacy/4965.c1
-rw-r--r--drivers/net/wireless/iwlegacy/common.c13
-rw-r--r--drivers/net/wireless/iwlegacy/debug.c10
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.h4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c10
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c18
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h9
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c9
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c43
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h50
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h87
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/binding.c20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c57
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c546
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c689
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.h101
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h17
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h69
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c32
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/led.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c211
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c344
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h63
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c51
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c28
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c400
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power_legacy.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c2192
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h154
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c77
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sf.c291
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c52
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/testmode.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c96
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c23
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c24
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h65
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c437
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c183
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c172
-rw-r--r--drivers/net/wireless/libertas/README5
-rw-r--r--drivers/net/wireless/libertas/cfg.c7
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c6
-rw-r--r--drivers/net/wireless/libertas/if_spi.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c1215
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h18
-rw-r--r--drivers/net/wireless/mwifiex/11n.c2
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c6
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig4
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c70
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c4
-rw-r--r--drivers/net/wireless/mwifiex/decl.h1
-rw-r--r--drivers/net/wireless/mwifiex/fw.h41
-rw-r--r--drivers/net/wireless/mwifiex/init.c3
-rw-r--r--drivers/net/wireless/mwifiex/main.c14
-rw-r--r--drivers/net/wireless/mwifiex/main.h7
-rw-r--r--drivers/net/wireless/mwifiex/scan.c8
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c80
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c38
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c20
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c46
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c1
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c15
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c39
-rw-r--r--drivers/net/wireless/mwifiex/usb.c58
-rw-r--r--drivers/net/wireless/mwifiex/usb.h12
-rw-r--r--drivers/net/wireless/mwifiex/util.c5
-rw-r--r--drivers/net/wireless/mwl8k.c5
-rw-r--r--drivers/net/wireless/orinoco/hermes.c1
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c1
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c1
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c1
-rw-r--r--drivers/net/wireless/p54/eeprom.c1
-rw-r--r--drivers/net/wireless/p54/fwio.c1
-rw-r--r--drivers/net/wireless/p54/led.c1
-rw-r--r--drivers/net/wireless/p54/main.c2
-rw-r--r--drivers/net/wireless/p54/net2280.h3
-rw-r--r--drivers/net/wireless/p54/p54pci.c1
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/p54/txrx.c5
-rw-r--r--drivers/net/wireless/prism54/isl_38xx.c3
-rw-r--r--drivers/net/wireless/prism54/isl_38xx.h3
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c6
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.h3
-rw-r--r--drivers/net/wireless/prism54/isl_oid.h3
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c4
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.h3
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c3
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.h3
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c5
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c3
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.h3
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c3
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.h3
-rw-r--r--drivers/net/wireless/prism54/prismcompat.h3
-rw-r--r--drivers/net/wireless/ray_cs.c3
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c46
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800mmio.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800mmio.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800soc.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dump.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mmio.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mmio.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c24
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/grf5101.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/max2820.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/sa2400.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c3
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8225.c1
-rw-r--r--drivers/net/wireless/rtlwifi/base.c8
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c4
-rw-r--r--drivers/net/wireless/rtlwifi/core.c11
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c6
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c4
-rw-r--r--drivers/net/wireless/rtlwifi/regd.c61
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c327
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c39
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.c9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.c29
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c17
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/table.c40
-rw-r--r--drivers/net/wireless/rtlwifi/stats.c14
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c8
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h33
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c258
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h26
-rw-r--r--drivers/net/wireless/ti/wl1251/boot.c3
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c58
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.h8
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c46
-rw-r--r--drivers/net/wireless/ti/wl1251/event.h7
-rw-r--r--drivers/net/wireless/ti/wl1251/init.c13
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c153
-rw-r--r--drivers/net/wireless/ti/wl1251/rx.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.c35
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h6
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c14
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c12
-rw-r--r--drivers/net/wireless/wl3501_cs.c5
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_def.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c8
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_al2230.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_al7230b.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_rf2959.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_uw2453.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h3
-rw-r--r--drivers/net/xen-netback/common.h43
-rw-r--r--drivers/net/xen-netback/interface.c57
-rw-r--r--drivers/net/xen-netback/netback.c493
-rw-r--r--drivers/net/xen-netback/xenbus.c3
-rw-r--r--drivers/net/xen-netfront.c191
-rw-r--r--drivers/nfc/Kconfig1
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/mei_phy.c6
-rw-r--r--drivers/nfc/microread/i2c.c4
-rw-r--r--drivers/nfc/microread/mei.c4
-rw-r--r--drivers/nfc/microread/microread.c4
-rw-r--r--drivers/nfc/microread/microread.h4
-rw-r--r--drivers/nfc/nfcmrvl/Kconfig23
-rw-r--r--drivers/nfc/nfcmrvl/Makefile9
-rw-r--r--drivers/nfc/nfcmrvl/main.c165
-rw-r--r--drivers/nfc/nfcmrvl/nfcmrvl.h48
-rw-r--r--drivers/nfc/nfcmrvl/usb.c459
-rw-r--r--drivers/nfc/nfcwilink.c3
-rw-r--r--drivers/nfc/pn533.c7
-rw-r--r--drivers/nfc/pn544/i2c.c4
-rw-r--r--drivers/nfc/pn544/mei.c4
-rw-r--r--drivers/nfc/pn544/pn544.c50
-rw-r--r--drivers/nfc/pn544/pn544.h4
-rw-r--r--drivers/nfc/port100.c1
-rw-r--r--drivers/of/address.c13
-rw-r--r--drivers/of/base.c46
-rw-r--r--drivers/of/device.c3
-rw-r--r--drivers/of/fdt.c12
-rw-r--r--drivers/of/irq.c11
-rw-r--r--drivers/of/of_mdio.c155
-rw-r--r--drivers/of/of_net.c1
-rw-r--r--drivers/parport/parport_mfc3.c2
-rw-r--r--drivers/parport/parport_pc.c20
-rw-r--r--drivers/parport/parport_serial.c5
-rw-r--r--drivers/pci/Kconfig3
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/access.c24
-rw-r--r--drivers/pci/ats.c82
-rw-r--r--drivers/pci/bus.c133
-rw-r--r--drivers/pci/host-bridge.c19
-rw-r--r--drivers/pci/host/pci-exynos.c5
-rw-r--r--drivers/pci/host/pci-imx6.c225
-rw-r--r--drivers/pci/host/pci-mvebu.c99
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c12
-rw-r--r--drivers/pci/host/pci-tegra.c55
-rw-r--r--drivers/pci/host/pcie-designware.c91
-rw-r--r--drivers/pci/host/pcie-designware.h4
-rw-r--r--drivers/pci/hotplug/acpiphp.h5
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c137
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c3
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c14
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c8
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c13
-rw-r--r--drivers/pci/hotplug/pciehp.h17
-rw-r--r--drivers/pci/hotplug/pciehp_core.c17
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c90
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c380
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c23
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c19
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c4
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c4
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c5
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c18
-rw-r--r--drivers/pci/ioapic.c7
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/msi.c348
-rw-r--r--drivers/pci/pci-acpi.c40
-rw-r--r--drivers/pci/pci-label.c187
-rw-r--r--drivers/pci/pci-sysfs.c19
-rw-r--r--drivers/pci/pci.c545
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c56
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c95
-rw-r--r--drivers/pci/pcie/aspm.c12
-rw-r--r--drivers/pci/pcie/portdrv_core.c36
-rw-r--r--drivers/pci/probe.c155
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/pci/remove.c17
-rw-r--r--drivers/pci/rom.c2
-rw-r--r--drivers/pci/setup-bus.c32
-rw-r--r--drivers/pci/setup-res.c2
-rw-r--r--drivers/pci/slot.c26
-rw-r--r--drivers/pci/vc.c434
-rw-r--r--drivers/pci/xen-pcifront.c12
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c2
-rw-r--r--drivers/pcmcia/cardbus.c7
-rw-r--r--drivers/pcmcia/electra_cf.c2
-rw-r--r--drivers/pcmcia/i82092.c2
-rw-r--r--drivers/pcmcia/yenta_socket.c6
-rw-r--r--drivers/phy/Kconfig17
-rw-r--r--drivers/phy/Makefile2
-rw-r--r--drivers/phy/phy-bcm-kona-usb2.c158
-rw-r--r--drivers/phy/phy-core.c132
-rw-r--r--drivers/phy/phy-mvebu-sata.c137
-rw-r--r--drivers/pinctrl/Kconfig48
-rw-r--r--drivers/pinctrl/Makefile5
-rw-r--r--drivers/pinctrl/core.c8
-rw-r--r--drivers/pinctrl/pinconf-generic.c4
-rw-r--r--drivers/pinctrl/pinconf.c22
-rw-r--r--drivers/pinctrl/pinctrl-abx500.c37
-rw-r--r--drivers/pinctrl/pinctrl-abx500.h12
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c22
-rw-r--r--drivers/pinctrl/pinctrl-at91.c60
-rw-r--r--drivers/pinctrl/pinctrl-baytrail.c36
-rw-r--r--drivers/pinctrl/pinctrl-bcm2835.c2
-rw-r--r--drivers/pinctrl/pinctrl-capri.c1454
-rw-r--r--drivers/pinctrl/pinctrl-imx1-core.c17
-rw-r--r--drivers/pinctrl/pinctrl-imx25.c351
-rw-r--r--drivers/pinctrl/pinctrl-msm.c990
-rw-r--r--drivers/pinctrl/pinctrl-msm.h122
-rw-r--r--drivers/pinctrl/pinctrl-msm8x74.c636
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c304
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.h14
-rw-r--r--drivers/pinctrl/pinctrl-single.c18
-rw-r--r--drivers/pinctrl/pinctrl-st.c8
-rw-r--r--drivers/pinctrl/pinctrl-sunxi-pins.h2
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.c15
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c2
-rw-r--r--drivers/pinctrl/pinctrl-tegra124.c3137
-rw-r--r--drivers/pinctrl/pinctrl-xway.c4
-rw-r--r--drivers/pinctrl/sh-pfc/core.c76
-rw-r--r--drivers/pinctrl/sh-pfc/core.h4
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c24
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a73a4.c17
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7740.c15
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7779.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c752
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c655
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7203.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7264.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7269.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7372.c15
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c66
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7720.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7722.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7723.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7724.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7757.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7785.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7786.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-shx3.c2
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h18
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas6.c24
-rw-r--r--drivers/pinctrl/sirf/pinctrl-prima2.c50
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c51
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c17
-rw-r--r--drivers/platform/chrome/Kconfig14
-rw-r--r--drivers/platform/chrome/Makefile1
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c298
-rw-r--r--drivers/platform/chrome/chromeos_pstore.c101
-rw-r--r--drivers/platform/x86/Kconfig19
-rw-r--r--drivers/platform/x86/Makefile2
-rw-r--r--drivers/platform/x86/acer-wmi.c2
-rw-r--r--drivers/platform/x86/asus-laptop.c3
-rw-r--r--drivers/platform/x86/asus-wmi.c53
-rw-r--r--drivers/platform/x86/classmate-laptop.c3
-rw-r--r--drivers/platform/x86/compal-laptop.c121
-rw-r--r--drivers/platform/x86/dell-laptop.c78
-rw-r--r--drivers/platform/x86/dell-wmi-aio.c1
-rw-r--r--drivers/platform/x86/dell-wmi.c1
-rw-r--r--drivers/platform/x86/eeepc-laptop.c57
-rw-r--r--drivers/platform/x86/eeepc-wmi.c2
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c15
-rw-r--r--drivers/platform/x86/hp-wireless.c132
-rw-r--r--drivers/platform/x86/hp_accel.c9
-rw-r--r--drivers/platform/x86/ideapad-laptop.c3
-rw-r--r--drivers/platform/x86/intel-rst.c2
-rw-r--r--drivers/platform/x86/intel-smartconnect.c2
-rw-r--r--drivers/platform/x86/intel_baytrail.c224
-rw-r--r--drivers/platform/x86/intel_baytrail.h90
-rw-r--r--drivers/platform/x86/intel_menlow.c4
-rw-r--r--drivers/platform/x86/intel_oaktrail.c3
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c87
-rw-r--r--drivers/platform/x86/mxm-wmi.c4
-rw-r--r--drivers/platform/x86/panasonic-laptop.c4
-rw-r--r--drivers/platform/x86/pvpanic.c3
-rw-r--r--drivers/platform/x86/samsung-q10.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c6
-rw-r--r--drivers/platform/x86/tc1100-wmi.c4
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c14
-rw-r--r--drivers/platform/x86/toshiba_acpi.c5
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c4
-rw-r--r--drivers/platform/x86/wmi.c2
-rw-r--r--drivers/platform/x86/xo15-ebook.c3
-rw-r--r--drivers/pnp/card.c1
-rw-r--r--drivers/pnp/pnpacpi/core.c33
-rw-r--r--drivers/pnp/pnpacpi/pnpacpi.h1
-rw-r--r--drivers/pnp/pnpbios/core.c12
-rw-r--r--drivers/pnp/resource.c2
-rw-r--r--drivers/power/Kconfig9
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/bq2415x_charger.c121
-rw-r--r--drivers/power/charger-manager.c299
-rw-r--r--drivers/power/ds2782_battery.c2
-rw-r--r--drivers/power/gpio-charger.c19
-rw-r--r--drivers/power/isp1704_charger.c52
-rw-r--r--drivers/power/max14577_charger.c311
-rw-r--r--drivers/power/max17040_battery.c5
-rw-r--r--drivers/power/max17042_battery.c6
-rw-r--r--drivers/power/power_supply_core.c56
-rw-r--r--drivers/power/reset/Kconfig6
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/as3722-poweroff.c96
-rw-r--r--drivers/powercap/intel_rapl.c13
-rw-r--r--drivers/ptp/Kconfig1
-rw-r--r--drivers/pwm/Kconfig24
-rw-r--r--drivers/pwm/Makefile2
-rw-r--r--drivers/pwm/core.c6
-rw-r--r--drivers/pwm/pwm-atmel.c395
-rw-r--r--drivers/pwm/pwm-ep93xx.c4
-rw-r--r--drivers/pwm/pwm-jz4740.c20
-rw-r--r--drivers/pwm/pwm-lp3943.c314
-rw-r--r--drivers/pwm/pwm-pxa.c55
-rw-r--r--drivers/pwm/pwm-tiecap.c1
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c4
-rw-r--r--drivers/pwm/sysfs.c12
-rw-r--r--drivers/regulator/Kconfig19
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/ab3100.c4
-rw-r--r--drivers/regulator/ab8500.c126
-rw-r--r--drivers/regulator/act8865-regulator.c349
-rw-r--r--drivers/regulator/anatop-regulator.c36
-rw-r--r--drivers/regulator/arizona-micsupp.c52
-rw-r--r--drivers/regulator/as3722-regulator.c34
-rw-r--r--drivers/regulator/core.c24
-rw-r--r--drivers/regulator/da9055-regulator.c4
-rw-r--r--drivers/regulator/db8500-prcmu.c20
-rw-r--r--drivers/regulator/gpio-regulator.c17
-rw-r--r--drivers/regulator/lp3971.c43
-rw-r--r--drivers/regulator/lp3972.c41
-rw-r--r--drivers/regulator/max14577.c274
-rw-r--r--drivers/regulator/max77693.c1
-rw-r--r--drivers/regulator/mc13892-regulator.c24
-rw-r--r--drivers/regulator/pcf50633-regulator.c2
-rw-r--r--drivers/regulator/pfuze100-regulator.c41
-rw-r--r--drivers/regulator/s2mps11.c5
-rw-r--r--drivers/regulator/s5m8767.c99
-rw-r--r--drivers/regulator/stw481x-vmmc.c12
-rw-r--r--drivers/regulator/tps51632-regulator.c30
-rw-r--r--drivers/regulator/tps62360-regulator.c2
-rw-r--r--drivers/regulator/tps6586x-regulator.c93
-rw-r--r--drivers/regulator/tps65910-regulator.c40
-rw-r--r--drivers/regulator/twl-regulator.c4
-rw-r--r--drivers/regulator/wm831x-dcdc.c10
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-sunxi.c175
-rw-r--r--drivers/rtc/Kconfig31
-rw-r--r--drivers/rtc/Makefile3
-rw-r--r--drivers/rtc/class.c24
-rw-r--r--drivers/rtc/rtc-as3722.c19
-rw-r--r--drivers/rtc/rtc-cmos.c60
-rw-r--r--drivers/rtc/rtc-ds1305.c1
-rw-r--r--drivers/rtc/rtc-ds1742.c10
-rw-r--r--drivers/rtc/rtc-hym8563.c606
-rw-r--r--drivers/rtc/rtc-isl12057.c310
-rw-r--r--drivers/rtc/rtc-max8907.c11
-rw-r--r--drivers/rtc/rtc-mxc.c10
-rw-r--r--drivers/rtc/rtc-pcf2127.c5
-rw-r--r--drivers/rtc/rtc-rx8581.c81
-rw-r--r--drivers/rtc/rtc-s5m.c2
-rw-r--r--drivers/rtc/rtc-sunxi.c523
-rw-r--r--drivers/rtc/rtc-twl.c38
-rw-r--r--drivers/rtc/rtc-vr41xx.c50
-rw-r--r--drivers/s390/block/dasd.c2
-rw-r--r--drivers/s390/block/dasd_diag.c10
-rw-r--r--drivers/s390/block/dasd_eckd.c48
-rw-r--r--drivers/s390/block/dasd_fba.c26
-rw-r--r--drivers/s390/block/dcssblk.c21
-rw-r--r--drivers/s390/block/scm_blk.c8
-rw-r--r--drivers/s390/block/scm_blk_cluster.c4
-rw-r--r--drivers/s390/block/xpram.c24
-rw-r--r--drivers/s390/char/sclp.h1
-rw-r--r--drivers/s390/char/sclp_cmd.c7
-rw-r--r--drivers/s390/char/sclp_early.c125
-rw-r--r--drivers/s390/char/tty3270.c9
-rw-r--r--drivers/s390/char/vmur.c4
-rw-r--r--drivers/s390/cio/blacklist.c6
-rw-r--r--drivers/s390/cio/ccwgroup.c12
-rw-r--r--drivers/s390/cio/chsc.c73
-rw-r--r--drivers/s390/cio/chsc.h51
-rw-r--r--drivers/s390/cio/cio.c40
-rw-r--r--drivers/s390/cio/css.c26
-rw-r--r--drivers/s390/cio/css.h1
-rw-r--r--drivers/s390/cio/device.c29
-rw-r--r--drivers/s390/cio/qdio.h14
-rw-r--r--drivers/s390/cio/qdio_main.c93
-rw-r--r--drivers/s390/crypto/ap_bus.c31
-rw-r--r--drivers/s390/crypto/ap_bus.h4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c109
-rw-r--r--drivers/s390/crypto/zcrypt_api.h2
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c20
-rw-r--r--drivers/s390/crypto/zcrypt_error.h18
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c12
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c260
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.h2
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c11
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c12
-rw-r--r--drivers/s390/kvm/virtio_ccw.c11
-rw-r--r--drivers/s390/net/Makefile2
-rw-r--r--drivers/s390/net/netiucv.c8
-rw-r--r--drivers/s390/net/qeth_core.h37
-rw-r--r--drivers/s390/net/qeth_core_main.c209
-rw-r--r--drivers/s390/net/qeth_core_mpc.c2
-rw-r--r--drivers/s390/net/qeth_core_mpc.h150
-rw-r--r--drivers/s390/net/qeth_l2.h15
-rw-r--r--drivers/s390/net/qeth_l2_main.c623
-rw-r--r--drivers/s390/net/qeth_l2_sys.c223
-rw-r--r--drivers/s390/net/qeth_l3_main.c8
-rw-r--r--drivers/sbus/char/bbc_i2c.c1
-rw-r--r--drivers/sbus/char/display7seg.c1
-rw-r--r--drivers/sbus/char/envctrl.c1
-rw-r--r--drivers/sbus/char/flash.c1
-rw-r--r--drivers/sbus/char/uctrl.c1
-rw-r--r--drivers/scsi/Kconfig41
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/a2091.c2
-rw-r--r--drivers/scsi/a3000.c2
-rw-r--r--drivers/scsi/a4000t.c2
-rw-r--r--drivers/scsi/aic7xxx_old.c11149
-rw-r--r--drivers/scsi/aic7xxx_old/aic7xxx.h28
-rw-r--r--drivers/scsi/aic7xxx_old/aic7xxx.reg1401
-rw-r--r--drivers/scsi/aic7xxx_old/aic7xxx.seq1539
-rw-r--r--drivers/scsi/aic7xxx_old/aic7xxx_proc.c270
-rw-r--r--drivers/scsi/aic7xxx_old/aic7xxx_reg.h629
-rw-r--r--drivers/scsi/aic7xxx_old/aic7xxx_seq.c817
-rw-r--r--drivers/scsi/aic7xxx_old/scsi_message.h49
-rw-r--r--drivers/scsi/aic7xxx_old/sequencer.h135
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c8
-rw-r--r--drivers/scsi/bfa/bfa_core.c8
-rw-r--r--drivers/scsi/bfa/bfa_defs.h1
-rw-r--r--drivers/scsi/bfa/bfa_fc.h2
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c15
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c727
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h7
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c23
-rw-r--r--drivers/scsi/bfa/bfa_svc.c2
-rw-r--r--drivers/scsi/bfa/bfad.c97
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c15
-rw-r--r--drivers/scsi/bfa/bfad_bsg.h1
-rw-r--r--drivers/scsi/bfa/bfad_drv.h6
-rw-r--r--drivers/scsi/bfa/bfi.h42
-rw-r--r--drivers/scsi/gvp11.c2
-rw-r--r--drivers/scsi/hosts.c31
-rw-r--r--drivers/scsi/hpsa.c241
-rw-r--r--drivers/scsi/hpsa.h9
-rw-r--r--drivers/scsi/hpsa_cmd.h4
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/ipr.h4
-rw-r--r--drivers/scsi/libiscsi.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c2
-rw-r--r--drivers/scsi/mac_scsi.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c5
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c41
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c39
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c118
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h13
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h34
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h19
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c82
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c73
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c39
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c33
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c368
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c181
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c180
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.c360
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.h13
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h40
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h140
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c51
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c154
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c1474
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi.c9
-rw-r--r--drivers/scsi/scsi_debug.c266
-rw-r--r--drivers/scsi/scsi_error.c229
-rw-r--r--drivers/scsi/scsi_pm.c62
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_sysfs.c36
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c414
-rw-r--r--drivers/scsi/scsi_transport_srp.c95
-rw-r--r--drivers/scsi/sd.c30
-rw-r--r--drivers/scsi/sd_dif.c30
-rw-r--r--drivers/scsi/sr.c37
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c5
-rw-r--r--drivers/scsi/virtio_scsi.c15
-rw-r--r--drivers/scsi/zorro7xx.c2
-rw-r--r--drivers/sfi/sfi_acpi.c4
-rw-r--r--drivers/spi/Kconfig23
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-altera.c2
-rw-r--r--drivers/spi/spi-ath79.c14
-rw-r--r--drivers/spi/spi-atmel.c778
-rw-r--r--drivers/spi/spi-bcm2835.c10
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c475
-rw-r--r--drivers/spi/spi-bcm63xx.c48
-rw-r--r--drivers/spi/spi-bitbang-txrx.h2
-rw-r--r--drivers/spi/spi-clps711x.c23
-rw-r--r--drivers/spi/spi-coldfire-qspi.c53
-rw-r--r--drivers/spi/spi-davinci.c49
-rw-r--r--drivers/spi/spi-dw-mmio.c74
-rw-r--r--drivers/spi/spi-dw-pci.c47
-rw-r--r--drivers/spi/spi-dw.c26
-rw-r--r--drivers/spi/spi-dw.h5
-rw-r--r--drivers/spi/spi-falcon.c12
-rw-r--r--drivers/spi/spi-fsl-dspi.c5
-rw-r--r--drivers/spi/spi-fsl-espi.c63
-rw-r--r--drivers/spi/spi-gpio.c8
-rw-r--r--drivers/spi/spi-imx.c27
-rw-r--r--drivers/spi/spi-mpc512x-psc.c44
-rw-r--r--drivers/spi/spi-mxs.c9
-rw-r--r--drivers/spi/spi-nuc900.c58
-rw-r--r--drivers/spi/spi-oc-tiny.c62
-rw-r--r--drivers/spi/spi-omap-100k.c20
-rw-r--r--drivers/spi/spi-omap2-mcspi.c38
-rw-r--r--drivers/spi/spi-orion.c4
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c2
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-rspi.c363
-rw-r--r--drivers/spi/spi-s3c24xx.c74
-rw-r--r--drivers/spi/spi-s3c64xx.c5
-rw-r--r--drivers/spi/spi-sc18is602.c24
-rw-r--r--drivers/spi/spi-sh-hspi.c4
-rw-r--r--drivers/spi/spi-sh-msiof.c62
-rw-r--r--drivers/spi/spi-sh.c13
-rw-r--r--drivers/spi/spi-sirf.c7
-rw-r--r--drivers/spi/spi-tegra114.c166
-rw-r--r--drivers/spi/spi-tegra20-sflash.c40
-rw-r--r--drivers/spi/spi-tegra20-slink.c163
-rw-r--r--drivers/spi/spi-ti-qspi.c129
-rw-r--r--drivers/spi/spi-topcliff-pch.c8
-rw-r--r--drivers/spi/spi-txx9.c8
-rw-r--r--drivers/spi/spi-xcomm.c12
-rw-r--r--drivers/spi/spi.c80
-rw-r--r--drivers/ssb/Kconfig1
-rw-r--r--drivers/ssb/driver_chipcommon_sflash.c6
-rw-r--r--drivers/ssb/driver_gpio.c306
-rw-r--r--drivers/ssb/main.c12
-rw-r--r--drivers/staging/Kconfig12
-rw-r--r--drivers/staging/Makefile6
-rw-r--r--drivers/staging/android/Kconfig2
-rw-r--r--drivers/staging/android/Makefile2
-rw-r--r--drivers/staging/android/alarm-dev.c8
-rw-r--r--drivers/staging/android/ashmem.c45
-rw-r--r--drivers/staging/android/ion/Kconfig35
-rw-r--r--drivers/staging/android/ion/Makefile10
-rw-r--r--drivers/staging/android/ion/compat_ion.c195
-rw-r--r--drivers/staging/android/ion/compat_ion.h30
-rw-r--r--drivers/staging/android/ion/ion.c1549
-rw-r--r--drivers/staging/android/ion/ion.h204
-rw-r--r--drivers/staging/android/ion/ion_carveout_heap.c194
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c195
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c218
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c158
-rw-r--r--drivers/staging/android/ion/ion_heap.c318
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c195
-rw-r--r--drivers/staging/android/ion/ion_priv.h361
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c492
-rw-r--r--drivers/staging/android/ion/ion_test.c282
-rw-r--r--drivers/staging/android/ion/tegra/Makefile1
-rw-r--r--drivers/staging/android/ion/tegra/tegra_ion.c84
-rw-r--r--drivers/staging/android/sw_sync.h17
-rw-r--r--drivers/staging/android/sync.c14
-rw-r--r--drivers/staging/android/sync.h50
-rw-r--r--drivers/staging/android/uapi/ion.h196
-rw-r--r--drivers/staging/android/uapi/ion_test.h70
-rw-r--r--drivers/staging/bcm/Adapter.h2
-rw-r--r--drivers/staging/bcm/Bcmchar.c142
-rw-r--r--drivers/staging/bcm/Bcmnet.c3
-rw-r--r--drivers/staging/bcm/DDRInit.c2042
-rw-r--r--drivers/staging/bcm/InterfaceDld.c140
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.c230
-rw-r--r--drivers/staging/bcm/InterfaceInit.c10
-rw-r--r--drivers/staging/bcm/InterfaceRx.c187
-rw-r--r--drivers/staging/bcm/InterfaceTx.c151
-rw-r--r--drivers/staging/bcm/PHSModule.c4
-rw-r--r--drivers/staging/bcm/Qos.c6
-rw-r--r--drivers/staging/bcm/nvm.c80
-rw-r--r--drivers/staging/btmtk_usb/Kconfig11
-rw-r--r--drivers/staging/btmtk_usb/Makefile1
-rw-r--r--drivers/staging/btmtk_usb/README14
-rw-r--r--drivers/staging/btmtk_usb/TODO10
-rw-r--r--drivers/staging/btmtk_usb/btmtk_usb.c1810
-rw-r--r--drivers/staging/btmtk_usb/btmtk_usb.h138
-rw-r--r--drivers/staging/ced1401/ced_ioc.c3
-rw-r--r--drivers/staging/ced1401/usb1401.c1
-rw-r--r--drivers/staging/comedi/Kconfig6
-rw-r--r--drivers/staging/comedi/Makefile2
-rw-r--r--drivers/staging/comedi/comedi_buf.c99
-rw-r--r--drivers/staging/comedi/comedi_fops.c527
-rw-r--r--drivers/staging/comedi/comedi_internal.h4
-rw-r--r--drivers/staging/comedi/comedidev.h40
-rw-r--r--drivers/staging/comedi/drivers.c38
-rw-r--r--drivers/staging/comedi/drivers/8255.c6
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c17
-rw-r--r--drivers/staging/comedi/drivers/Makefile2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c6
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_035.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1032.c6
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1516.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_16xx.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2032.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2200.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3120.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3200.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c2
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c2
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7x3x.c2
-rw-r--r--drivers/staging/comedi/drivers/adl_pci8164.c2
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c36
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c89
-rw-r--r--drivers/staging/comedi/drivers/adq12b.c29
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c145
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c2
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1724.c6
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c2
-rw-r--r--drivers/staging/comedi/drivers/aio_aio12_8.c13
-rw-r--r--drivers/staging/comedi/drivers/amcc_s5933.h8
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_common.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_pci.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.c4
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c62
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c109
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci263.c2
-rw-r--r--drivers/staging/comedi/drivers/c6xdigio.c54
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c57
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c270
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c2
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c19
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c2
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c9
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c2
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c2
-rw-r--r--drivers/staging/comedi/drivers/das08.c75
-rw-r--r--drivers/staging/comedi/drivers/das08_pci.c2
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c107
-rw-r--r--drivers/staging/comedi/drivers/das1800.c139
-rw-r--r--drivers/staging/comedi/drivers/das6402.c11
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c94
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c109
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c149
-rw-r--r--drivers/staging/comedi/drivers/dt2814.c70
-rw-r--r--drivers/staging/comedi/drivers/dt2815.c19
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c188
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c54
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c1
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c16
-rw-r--r--drivers/staging/comedi/drivers/fl512.c21
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c80
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.c15
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c2
-rw-r--r--drivers/staging/comedi/drivers/ke_counter.c2
-rw-r--r--drivers/staging/comedi/drivers/me4000.c52
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c2
-rw-r--r--drivers/staging/comedi/drivers/mf6x4.c354
-rw-r--r--drivers/staging/comedi/drivers/mite.c145
-rw-r--r--drivers/staging/comedi/drivers/mite.h12
-rw-r--r--drivers/staging/comedi/drivers/mpc624.c19
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c7
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c708
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c111
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio.c15
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio16d.c95
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_pci.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c631
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c179
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c63
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c315
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.h209
-rw-r--r--drivers/staging/comedi/drivers/ni_tio_internal.h431
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c88
-rw-r--r--drivers/staging/comedi/drivers/pcl812.c330
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c195
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c249
-rw-r--r--drivers/staging/comedi/drivers/pcm3724.c6
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c1476
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c428
-rw-r--r--drivers/staging/comedi/drivers/plx9080.h13
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c7
-rw-r--r--drivers/staging/comedi/drivers/s626.c21
-rw-r--r--drivers/staging/comedi/drivers/skel.c2
-rw-r--r--drivers/staging/comedi/drivers/unioxx5.c68
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c1
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c6
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c15
-rw-r--r--drivers/staging/comedi/kcomedilib/Makefile2
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c74
-rw-r--r--drivers/staging/comedi/proc.c6
-rw-r--r--drivers/staging/comedi/range.c9
-rw-r--r--drivers/staging/crystalhd/bc_dts_glob_lnx.h2
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.c4
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.h2
-rw-r--r--drivers/staging/crystalhd/crystalhd_fw_if.h2
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.c2
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.h6
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.c2
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.h3
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.c4
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.h2
-rw-r--r--drivers/staging/cxt1e1/comet.c60
-rw-r--r--drivers/staging/cxt1e1/comet.h601
-rw-r--r--drivers/staging/cxt1e1/functions.c2
-rw-r--r--drivers/staging/cxt1e1/linux.c15
-rw-r--r--drivers/staging/cxt1e1/musycc.c4
-rw-r--r--drivers/staging/cxt1e1/pmcc4_drv.c10
-rw-r--r--drivers/staging/cxt1e1/pmcc4_private.h2
-rw-r--r--drivers/staging/cxt1e1/sbeid.c323
-rw-r--r--drivers/staging/dgap/Kconfig2
-rw-r--r--drivers/staging/dgap/dgap_conf.h6
-rw-r--r--drivers/staging/dgap/dgap_driver.c18
-rw-r--r--drivers/staging/dgap/dgap_driver.h1
-rw-r--r--drivers/staging/dgap/dgap_fep5.c112
-rw-r--r--drivers/staging/dgap/dgap_parse.c1
-rw-r--r--drivers/staging/dgap/dgap_trace.c17
-rw-r--r--drivers/staging/dgap/dgap_tty.c180
-rw-r--r--drivers/staging/dgap/downld.c168
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c253
-rw-r--r--drivers/staging/dgnc/dgnc_trace.c19
-rw-r--r--drivers/staging/dgrp/dgrp_driver.c1
-rw-r--r--drivers/staging/dgrp/dgrp_tty.c4
-rw-r--r--drivers/staging/dwc2/TODO33
-rw-r--r--drivers/staging/et131x/README4
-rw-r--r--drivers/staging/et131x/et131x.c841
-rw-r--r--drivers/staging/et131x/et131x.h40
-rw-r--r--drivers/staging/frontier/alphatrack.c1
-rw-r--r--drivers/staging/frontier/tranzport.c1
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/boot.h304
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c766
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_debug.c2
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_download.c133
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c1309
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_proc.c14
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.c5
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.h5
-rw-r--r--drivers/staging/fwserial/Kconfig20
-rw-r--r--drivers/staging/fwserial/fwserial.c151
-rw-r--r--drivers/staging/fwserial/fwserial.h24
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c44
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c8
-rw-r--r--drivers/staging/gdm724x/gdm_tty.c1
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c40
-rw-r--r--drivers/staging/gdm72xx/gdm_qos.c2
-rw-r--r--drivers/staging/gdm72xx/gdm_usb.c8
-rw-r--r--drivers/staging/gdm72xx/sdio_boot.c1
-rw-r--r--drivers/staging/goldfish/goldfish_nand.c1
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h6
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c7
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c8
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c16
-rw-r--r--drivers/staging/iio/adc/Kconfig2
-rw-r--r--drivers/staging/iio/adc/ad7280a.c28
-rw-r--r--drivers/staging/iio/adc/ad7291.c8
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c7
-rw-r--r--drivers/staging/iio/adc/ad7816.c12
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c19
-rw-r--r--drivers/staging/iio/adc/lpc32xx_adc.c12
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c307
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c6
-rw-r--r--drivers/staging/iio/addac/adt7316-spi.c6
-rw-r--r--drivers/staging/iio/addac/adt7316.c12
-rw-r--r--drivers/staging/iio/addac/adt7316.h1
-rw-r--r--drivers/staging/iio/cdc/ad7150.c8
-rw-r--r--drivers/staging/iio/cdc/ad7746.c14
-rw-r--r--drivers/staging/iio/frequency/ad9832.h6
-rw-r--r--drivers/staging/iio/frequency/ad9834.h4
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c10
-rw-r--r--drivers/staging/iio/iio_simple_dummy.c8
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c2
-rw-r--r--drivers/staging/iio/light/isl29018.c13
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c40
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c14
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c10
-rw-r--r--drivers/staging/imx-drm/Kconfig6
-rw-r--r--drivers/staging/imx-drm/Makefile1
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c87
-rw-r--r--drivers/staging/imx-drm/imx-hdmi.c1912
-rw-r--r--drivers/staging/imx-drm/imx-hdmi.h1032
-rw-r--r--drivers/staging/imx-drm/imx-ldb.c5
-rw-r--r--drivers/staging/imx-drm/imx-tve.c11
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-common.c33
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c3
-rw-r--r--drivers/staging/imx-drm/ipuv3-plane.c12
-rw-r--r--drivers/staging/imx-drm/parallel-display.c3
-rw-r--r--drivers/staging/keucr/smcommon.h2
-rw-r--r--drivers/staging/keucr/smil.h8
-rw-r--r--drivers/staging/keucr/smilecc.c2
-rw-r--r--drivers/staging/keucr/smilmain.c101
-rw-r--r--drivers/staging/keucr/smilsub.c37
-rw-r--r--drivers/staging/keucr/smscsi.c28
-rw-r--r--drivers/staging/keucr/usb.c1
-rw-r--r--drivers/staging/line6/driver.c7
-rw-r--r--drivers/staging/line6/pcm.c1
-rw-r--r--drivers/staging/line6/usbdefs.h7
-rw-r--r--drivers/staging/lustre/TODO5
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/curproc.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h210
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h64
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/kp30.h150
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-fs.h92
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h1
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h5
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h2
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h11
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h4
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c3
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c358
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h6
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c5
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c307
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h1
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c124
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c16
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c21
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c39
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c10
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-msg.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c63
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c30
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c7
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c64
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c105
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h8
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c8
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c5
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h5
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c11
-rw-r--r--drivers/staging/lustre/lustre/Kconfig2
-rw-r--r--drivers/staging/lustre/lustre/fid/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c2
-rw-r--r--drivers/staging/lustre/lustre/fld/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c2
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c7
-rw-r--r--drivers/staging/lustre/lustre/fld/lproc_fld.c3
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h6
-rw-r--r--drivers/staging/lustre/lustre/include/dt_object.h2
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_acl.h18
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_debug.h47
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_intent.h2
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_lite.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h8
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h19
-rw-r--r--drivers/staging/lustre/lustre/include/lu_target.h91
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/liblustreapi.h43
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h72
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h45
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustreapi.h310
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_debug.h19
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm_flags.h90
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_ha.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h11
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h13
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h9
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h245
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h7
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h17
-rw-r--r--drivers/staging/lustre/lustre/include/md_object.h4
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h15
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h12
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c18
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_misc.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c45
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c15
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c57
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c9
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c49
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c23
-rw-r--r--drivers/staging/lustre/lustre/libcfs/debug.c42
-rw-r--r--drivers/staging/lustre/lustre/libcfs/hash.c12
-rw-r--r--drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c7
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c6
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c13
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c11
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c36
-rw-r--r--drivers/staging/lustre/lustre/libcfs/lwt.c266
-rw-r--r--drivers/staging/lustre/lustre/libcfs/module.c55
-rw-r--r--drivers/staging/lustre/lustre/libcfs/nidstrings.c34
-rw-r--r--drivers/staging/lustre/lustre/libcfs/tracefile.c22
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c34
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c26
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c657
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h76
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c76
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c29
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c41
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c14
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c61
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c104
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c617
-rw-r--r--drivers/staging/lustre/lustre/lmv/Makefile4
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_fld.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c6
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c5
-rw-r--r--drivers/staging/lustre/lustre/lov/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h16
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c15
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c35
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c20
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c14
-rw-r--r--drivers/staging/lustre/lustre/lov/lproc_lov.c40
-rw-r--r--drivers/staging/lustre/lustre/lvfs/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/lvfs/fsfilt_ext3.c760
-rw-r--r--drivers/staging/lustre/lustre/lvfs/lvfs_lib.c2
-rw-r--r--drivers/staging/lustre/lustre/lvfs/lvfs_linux.c1
-rw-r--r--drivers/staging/lustre/lustre/mdc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c3
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h3
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c31
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c113
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c92
-rw-r--r--drivers/staging/lustre/lustre/mgc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/mgc/libmgc.c3
-rw-r--r--drivers/staging/lustre/lustre/mgc/lproc_mgc.c3
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c416
-rw-r--r--drivers/staging/lustre/lustre/obdclass/capa.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c20
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c21
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c214
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_test.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/local_storage.c11
-rw-r--r--drivers/staging/lustre/lustre/obdclass/local_storage.h3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c356
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c30
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c3
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c4
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c14
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c25
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c75
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_asn1.h6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_err.h10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_mech_switch.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c14
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c40
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c73
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_server.c450
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c14
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c81
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c375
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c70
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h16
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c50
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c77
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wirehdr.c47
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c86
-rw-r--r--drivers/staging/media/Kconfig8
-rw-r--r--drivers/staging/media/Makefile5
-rw-r--r--drivers/staging/media/as102/as102_drv.c14
-rw-r--r--drivers/staging/media/as102/as102_drv.h8
-rw-r--r--drivers/staging/media/as102/as102_fe.c37
-rw-r--r--drivers/staging/media/as102/as102_fw.c16
-rw-r--r--drivers/staging/media/as102/as102_usb_drv.c36
-rw-r--r--drivers/staging/media/as102/as10x_cmd.c21
-rw-r--r--drivers/staging/media/as102/as10x_cmd_cfg.c9
-rw-r--r--drivers/staging/media/as102/as10x_cmd_stream.c12
-rw-r--r--drivers/staging/media/bcm2048/Kconfig13
-rw-r--r--drivers/staging/media/bcm2048/Makefile1
-rw-r--r--drivers/staging/media/bcm2048/TODO24
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c2744
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.h30
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c1
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c4
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c3
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c4
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c2
-rw-r--r--drivers/staging/media/go7007/go7007-driver.c1
-rw-r--r--drivers/staging/media/go7007/go7007-fw.c4
-rw-r--r--drivers/staging/media/go7007/go7007-i2c.c1
-rw-r--r--drivers/staging/media/go7007/go7007-loader.c5
-rw-r--r--drivers/staging/media/go7007/go7007-usb.c1
-rw-r--r--drivers/staging/media/go7007/go7007-v4l2.c1
-rw-r--r--drivers/staging/media/go7007/s2250-board.c1
-rw-r--r--drivers/staging/media/go7007/saa7134-go7007.c2
-rw-r--r--drivers/staging/media/go7007/snd-go7007.c1
-rw-r--r--drivers/staging/media/lirc/lirc_igorplugusb.c4
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c12
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.c4
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c1
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c10
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c4
-rw-r--r--drivers/staging/media/omap24xx/Kconfig35
-rw-r--r--drivers/staging/media/omap24xx/Makefile5
-rw-r--r--drivers/staging/media/omap24xx/omap24xxcam-dma.c (renamed from drivers/media/platform/omap24xxcam-dma.c)0
-rw-r--r--drivers/staging/media/omap24xx/omap24xxcam.c (renamed from drivers/media/platform/omap24xxcam.c)0
-rw-r--r--drivers/staging/media/omap24xx/omap24xxcam.h (renamed from drivers/media/platform/omap24xxcam.h)2
-rw-r--r--drivers/staging/media/omap24xx/tcm825x.c (renamed from drivers/media/i2c/tcm825x.c)2
-rw-r--r--drivers/staging/media/omap24xx/tcm825x.h (renamed from drivers/media/i2c/tcm825x.h)2
-rw-r--r--drivers/staging/media/omap24xx/v4l2-int-device.c (renamed from drivers/media/v4l2-core/v4l2-int-device.c)2
-rw-r--r--drivers/staging/media/omap24xx/v4l2-int-device.h305
-rw-r--r--drivers/staging/media/omap4iss/Kconfig12
-rw-r--r--drivers/staging/media/omap4iss/Makefile6
-rw-r--r--drivers/staging/media/omap4iss/TODO4
-rw-r--r--drivers/staging/media/omap4iss/iss.c1563
-rw-r--r--drivers/staging/media/omap4iss/iss.h236
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c1343
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.h158
-rw-r--r--drivers/staging/media/omap4iss/iss_csiphy.c279
-rw-r--r--drivers/staging/media/omap4iss/iss_csiphy.h51
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.c570
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.h67
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.c849
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.h92
-rw-r--r--drivers/staging/media/omap4iss/iss_regs.h901
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.c893
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.h75
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c1226
-rw-r--r--drivers/staging/media/omap4iss/iss_video.h204
-rw-r--r--drivers/staging/media/sn9c102/Kconfig (renamed from drivers/media/usb/sn9c102/Kconfig)9
-rw-r--r--drivers/staging/media/sn9c102/Makefile (renamed from drivers/media/usb/sn9c102/Makefile)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102.h (renamed from drivers/media/usb/sn9c102/sn9c102.h)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102.txt592
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_config.h (renamed from drivers/media/usb/sn9c102/sn9c102_config.h)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_core.c (renamed from drivers/media/usb/sn9c102/sn9c102_core.c)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_devtable.h (renamed from drivers/media/usb/sn9c102/sn9c102_devtable.h)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_hv7131d.c (renamed from drivers/media/usb/sn9c102/sn9c102_hv7131d.c)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_hv7131r.c (renamed from drivers/media/usb/sn9c102/sn9c102_hv7131r.c)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_mi0343.c (renamed from drivers/media/usb/sn9c102/sn9c102_mi0343.c)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_mi0360.c (renamed from drivers/media/usb/sn9c102/sn9c102_mi0360.c)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_mt9v111.c (renamed from drivers/media/usb/sn9c102/sn9c102_mt9v111.c)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_ov7630.c (renamed from drivers/media/usb/sn9c102/sn9c102_ov7630.c)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_ov7660.c (renamed from drivers/media/usb/sn9c102/sn9c102_ov7660.c)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_pas106b.c (renamed from drivers/media/usb/sn9c102/sn9c102_pas106b.c)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_pas202bcb.c (renamed from drivers/media/usb/sn9c102/sn9c102_pas202bcb.c)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_sensor.h (renamed from drivers/media/usb/sn9c102/sn9c102_sensor.h)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_tas5110c1b.c (renamed from drivers/media/usb/sn9c102/sn9c102_tas5110c1b.c)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_tas5110d.c (renamed from drivers/media/usb/sn9c102/sn9c102_tas5110d.c)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_tas5130d1b.c (renamed from drivers/media/usb/sn9c102/sn9c102_tas5130d1b.c)0
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-core.c2
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c2
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-v4l2.c7
-rw-r--r--drivers/staging/media/solo6x10/solo6x10.h2
-rw-r--r--drivers/staging/netlogic/xlr_net.c8
-rw-r--r--drivers/staging/nvec/nvec.c13
-rw-r--r--drivers/staging/nvec/nvec.h5
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c274
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h1
-rw-r--r--drivers/staging/octeon/ethernet-rx.c1
-rw-r--r--drivers/staging/octeon/ethernet-tx.c1
-rw-r--r--drivers/staging/octeon/ethernet.c1
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c6
-rw-r--r--drivers/staging/ozwpan/ozcdev.c2
-rw-r--r--drivers/staging/ozwpan/ozeltbuf.c3
-rw-r--r--drivers/staging/ozwpan/ozhcd.c2
-rw-r--r--drivers/staging/ozwpan/ozpd.c1
-rw-r--r--drivers/staging/ozwpan/ozproto.c11
-rw-r--r--drivers/staging/ozwpan/ozusbsvc.c1
-rw-r--r--drivers/staging/ozwpan/ozusbsvc1.c1
-rw-r--r--drivers/staging/panel/panel.c4
-rw-r--r--drivers/staging/phison/phison.c3
-rw-r--r--drivers/staging/quickstart/quickstart.c2
-rw-r--r--drivers/staging/rtl8187se/ieee80211/dot11d.c132
-rw-r--r--drivers/staging/rtl8187se/ieee80211/dot11d.h66
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211.h125
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c4
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c13
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c53
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c1
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c39
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c188
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c64
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c13
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c8
-rw-r--r--drivers/staging/rtl8187se/r8180.h35
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c43
-rw-r--r--drivers/staging/rtl8187se/r8180_dm.h2
-rw-r--r--drivers/staging/rtl8187se/r8180_hw.h4
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225.h3
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.c257
-rw-r--r--drivers/staging/rtl8187se/r8185b_init.c17
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c92
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_br_ext.c87
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c67
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_io.c14
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c36
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c163
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c99
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mp.c23
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_p2p.c27
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c33
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c71
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sreset.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c122
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c22
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c136
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/HalHWImg8188E_RF.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c292
-rw-r--r--drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c918
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_HWConfig.c361
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_RTL8188E.c163
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c62
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_interface.c102
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c16
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_mp.c40
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c12
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_sreset.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c12
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_ops_linux.c1
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPwrSeq.h2
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h4
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h29
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h90
-rw-r--r--drivers/staging/rtl8188eu/include/odm_HWConfig.h4
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RegConfig8188E.h2
-rw-r--r--drivers/staging/rtl8188eu/include/odm_debug.h15
-rw-r--r--drivers/staging/rtl8188eu/include/odm_interface.h54
-rw-r--r--drivers/staging/rtl8188eu/include/odm_precomp.h7
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h143
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h6
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_spec.h70
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_cmd.h105
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_eeprom.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_efuse.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_io.h36
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_set.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_iol.h8
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_led.h6
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h62
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h14
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp_ioctl.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_pwrctrl.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_security.h2
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops.h4
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c137
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c16
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c139
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c6
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c24
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c8
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c12
-rw-r--r--drivers/staging/rtl8192e/dot11d.c2
-rw-r--r--drivers/staging/rtl8192e/dot11d.h5
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c13
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c24
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h3
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c10
-rw-r--r--drivers/staging/rtl8192e/rtl819x_Qos.h37
-rw-r--r--drivers/staging/rtl8192e/rtllib.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c8
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_debug.h6
-rw-r--r--drivers/staging/rtl8192e/rtllib_endianfree.h44
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c76
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c32
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c16
-rw-r--r--drivers/staging/rtl8192u/r8192U.h1
-rw-r--r--drivers/staging/rtl8712/os_intfs.c1
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c1
-rw-r--r--drivers/staging/rtl8712/usb_intf.c5
-rw-r--r--drivers/staging/rtl8821ae/Kconfig11
-rw-r--r--drivers/staging/rtl8821ae/Makefile35
-rw-r--r--drivers/staging/rtl8821ae/TODO10
-rw-r--r--drivers/staging/rtl8821ae/base.c1873
-rw-r--r--drivers/staging/rtl8821ae/base.h159
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.c3976
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.h205
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.c1614
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.h176
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbt_precomp.h99
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.c3891
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.h226
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.c4242
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.h162
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.c3780
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.h179
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.c4104
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.h175
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.c4185
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.h145
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.c1181
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.h549
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/rtl_btc.c236
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/rtl_btc.h66
-rw-r--r--drivers/staging/rtl8821ae/cam.c354
-rw-r--r--drivers/staging/rtl8821ae/cam.h56
-rw-r--r--drivers/staging/rtl8821ae/compat.h125
-rw-r--r--drivers/staging/rtl8821ae/core.c1464
-rw-r--r--drivers/staging/rtl8821ae/core.h43
-rw-r--r--drivers/staging/rtl8821ae/debug.c988
-rw-r--r--drivers/staging/rtl8821ae/debug.h227
-rw-r--r--drivers/staging/rtl8821ae/efuse.c1285
-rw-r--r--drivers/staging/rtl8821ae/efuse.h130
-rw-r--r--drivers/staging/rtl8821ae/pci.c2549
-rw-r--r--drivers/staging/rtl8821ae/pci.h353
-rw-r--r--drivers/staging/rtl8821ae/ps.c1025
-rw-r--r--drivers/staging/rtl8821ae/ps.h55
-rw-r--r--drivers/staging/rtl8821ae/rc.c309
-rw-r--r--drivers/staging/rtl8821ae/rc.h47
-rw-r--r--drivers/staging/rtl8821ae/regd.c503
-rw-r--r--drivers/staging/rtl8821ae/regd.h75
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/btc.h87
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/def.h442
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/dm.c3045
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/dm.h426
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/fw.c1349
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/fw.h321
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.c519
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.h169
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/hal_btc.c2069
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/hal_btc.h160
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/hw.c3346
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/hw.h75
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/led.c239
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/led.h40
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/phy.c5525
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/phy.h258
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/pwrseq.c199
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/pwrseq.h413
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.c140
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.h71
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/reg.h2427
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/rf.c464
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/rf.h46
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/sw.c499
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/sw.h39
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/table.c4002
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/table.h62
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/trx.c1050
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/trx.h641
-rw-r--r--drivers/staging/rtl8821ae/stats.c283
-rw-r--r--drivers/staging/rtl8821ae/stats.h46
-rw-r--r--drivers/staging/rtl8821ae/wifi.h2532
-rw-r--r--drivers/staging/rts5139/ms.c2
-rw-r--r--drivers/staging/rts5139/rts51x.c1
-rw-r--r--drivers/staging/rts5139/rts51x_card.c10
-rw-r--r--drivers/staging/rts5139/rts51x_card.h8
-rw-r--r--drivers/staging/rts5139/rts51x_scsi.c1
-rw-r--r--drivers/staging/rts5208/Kconfig15
-rw-r--r--drivers/staging/rts5208/Makefile6
-rw-r--r--drivers/staging/rts5208/TODO7
-rw-r--r--drivers/staging/rts5208/debug.h43
-rw-r--r--drivers/staging/rts5208/general.c35
-rw-r--r--drivers/staging/rts5208/general.h31
-rw-r--r--drivers/staging/rts5208/ms.c4208
-rw-r--r--drivers/staging/rts5208/ms.h227
-rw-r--r--drivers/staging/rts5208/rtsx.c1071
-rw-r--r--drivers/staging/rts5208/rtsx.h185
-rw-r--r--drivers/staging/rts5208/rtsx_card.c1126
-rw-r--r--drivers/staging/rts5208/rtsx_card.h1098
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c1979
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h1002
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c3370
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.h143
-rw-r--r--drivers/staging/rts5208/rtsx_sys.h50
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c769
-rw-r--r--drivers/staging/rts5208/rtsx_transport.h66
-rw-r--r--drivers/staging/rts5208/sd.c4525
-rw-r--r--drivers/staging/rts5208/sd.h301
-rw-r--r--drivers/staging/rts5208/spi.c877
-rw-r--r--drivers/staging/rts5208/spi.h65
-rw-r--r--drivers/staging/rts5208/trace.h93
-rw-r--r--drivers/staging/rts5208/xd.c2088
-rw-r--r--drivers/staging/rts5208/xd.h188
-rw-r--r--drivers/staging/sb105x/sb_mp_register.h8
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.c4
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.h1
-rw-r--r--drivers/staging/sbe-2t3e3/ctrl.c14
-rw-r--r--drivers/staging/sep/sep_crypto.c3
-rw-r--r--drivers/staging/sep/sep_main.c3
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c18
-rw-r--r--drivers/staging/silicom/bpctl_mod.c2
-rw-r--r--drivers/staging/silicom/bypasslib/bypass.c170
-rw-r--r--drivers/staging/slicoss/README1
-rw-r--r--drivers/staging/slicoss/slicoss.c23
-rw-r--r--drivers/staging/sm7xxfb/sm7xxfb.c4
-rw-r--r--drivers/staging/speakup/main.c2
-rw-r--r--drivers/staging/speakup/serialio.c4
-rw-r--r--drivers/staging/speakup/serialio.h26
-rw-r--r--drivers/staging/tidspbridge/Makefile2
-rw-r--r--drivers/staging/tidspbridge/gen/gh.c148
-rw-r--r--drivers/staging/tidspbridge/gen/uuidutil.c85
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/gh.h12
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/uuidutil.h18
-rw-r--r--drivers/staging/tidspbridge/pmgr/cmm.c7
-rw-r--r--drivers/staging/tidspbridge/pmgr/dbll.c98
-rw-r--r--drivers/staging/tidspbridge/pmgr/dev.c6
-rw-r--r--drivers/staging/tidspbridge/pmgr/dmm.c20
-rw-r--r--drivers/staging/tidspbridge/pmgr/dspapi.c12
-rw-r--r--drivers/staging/tidspbridge/rmgr/dbdcd.c108
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c33
-rw-r--r--drivers/staging/usbip/stub_rx.c20
-rw-r--r--drivers/staging/usbip/usbip_common.c5
-rw-r--r--drivers/staging/usbip/userspace/libsrc/names.c8
-rw-r--r--drivers/staging/usbip/userspace/libsrc/usbip_common.c2
-rw-r--r--drivers/staging/usbip/userspace/libsrc/vhci_driver.c69
-rw-r--r--drivers/staging/usbip/userspace/libsrc/vhci_driver.h2
-rw-r--r--drivers/staging/usbip/userspace/src/Makefile.am2
-rw-r--r--drivers/staging/usbip/userspace/src/usbip.c6
-rw-r--r--drivers/staging/usbip/userspace/src/usbip.h1
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_port.c57
-rw-r--r--drivers/staging/usbip/vhci_hcd.c15
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c3
-rw-r--r--drivers/staging/vme/devices/vme_user.c14
-rw-r--r--drivers/staging/vme/devices/vme_user.h26
-rw-r--r--drivers/staging/vt6655/80211hdr.h2
-rw-r--r--drivers/staging/vt6655/baseband.c4
-rw-r--r--drivers/staging/vt6655/bssdb.c354
-rw-r--r--drivers/staging/vt6655/card.c10
-rw-r--r--drivers/staging/vt6655/channel.c16
-rw-r--r--drivers/staging/vt6655/datarate.c8
-rw-r--r--drivers/staging/vt6655/device.h1
-rw-r--r--drivers/staging/vt6655/device_main.c62
-rw-r--r--drivers/staging/vt6655/dpc.c40
-rw-r--r--drivers/staging/vt6655/hostap.c14
-rw-r--r--drivers/staging/vt6655/iwctl.c4
-rw-r--r--drivers/staging/vt6655/key.c44
-rw-r--r--drivers/staging/vt6655/mac.c4
-rw-r--r--drivers/staging/vt6655/power.c9
-rw-r--r--drivers/staging/vt6655/rf.c2
-rw-r--r--drivers/staging/vt6655/rxtx.c12
-rw-r--r--drivers/staging/vt6655/vntwifi.c6
-rw-r--r--drivers/staging/vt6655/wcmd.c22
-rw-r--r--drivers/staging/vt6655/wctl.c6
-rw-r--r--drivers/staging/vt6655/wmgr.c125
-rw-r--r--drivers/staging/vt6655/wpa.c2
-rw-r--r--drivers/staging/vt6655/wpa2.c18
-rw-r--r--drivers/staging/vt6655/wpactl.c44
-rw-r--r--drivers/staging/vt6655/wpactl.h12
-rw-r--r--drivers/staging/vt6655/wroute.c50
-rw-r--r--drivers/staging/vt6656/Makefile1
-rw-r--r--drivers/staging/vt6656/aes_ccmp.c16
-rw-r--r--drivers/staging/vt6656/baseband.c335
-rw-r--r--drivers/staging/vt6656/bssdb.c2271
-rw-r--r--drivers/staging/vt6656/bssdb.h4
-rw-r--r--drivers/staging/vt6656/card.c108
-rw-r--r--drivers/staging/vt6656/channel.c3
-rw-r--r--drivers/staging/vt6656/datarate.c2
-rw-r--r--drivers/staging/vt6656/datarate.h1
-rw-r--r--drivers/staging/vt6656/desc.h8
-rw-r--r--drivers/staging/vt6656/device.h30
-rw-r--r--drivers/staging/vt6656/device_cfg.h2
-rw-r--r--drivers/staging/vt6656/dpc.c49
-rw-r--r--drivers/staging/vt6656/int.c60
-rw-r--r--drivers/staging/vt6656/iwctl.c20
-rw-r--r--drivers/staging/vt6656/main_usb.c553
-rw-r--r--drivers/staging/vt6656/mib.c489
-rw-r--r--drivers/staging/vt6656/mib.h378
-rw-r--r--drivers/staging/vt6656/rf.c7
-rw-r--r--drivers/staging/vt6656/rndis.h30
-rw-r--r--drivers/staging/vt6656/rxtx.c344
-rw-r--r--drivers/staging/vt6656/rxtx.h12
-rw-r--r--drivers/staging/vt6656/tkip.c4
-rw-r--r--drivers/staging/vt6656/usbpipe.c10
-rw-r--r--drivers/staging/vt6656/wcmd.c1712
-rw-r--r--drivers/staging/vt6656/wmgr.c10
-rw-r--r--drivers/staging/vt6656/wpa.c12
-rw-r--r--drivers/staging/vt6656/wpa2.c16
-rw-r--r--drivers/staging/vt6656/wpactl.c2
-rw-r--r--drivers/staging/winbond/mds.c101
-rw-r--r--drivers/staging/winbond/wbusb.c1
-rw-r--r--drivers/staging/wlags49_h2/debug.h56
-rw-r--r--drivers/staging/wlags49_h2/sta_h25.c6
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.c67
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.h2
-rw-r--r--drivers/staging/wlags49_h2/wl_enc.c15
-rw-r--r--drivers/staging/wlags49_h2/wl_enc.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_main.c155
-rw-r--r--drivers/staging/wlags49_h2/wl_main.h2
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c97
-rw-r--r--drivers/staging/wlags49_h2/wl_pci.c1578
-rw-r--r--drivers/staging/wlags49_h2/wl_pci.h109
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.c134
-rw-r--r--drivers/staging/wlags49_h2/wl_profile.c17
-rw-r--r--drivers/staging/wlags49_h2/wl_util.c59
-rw-r--r--drivers/staging/wlags49_h2/wl_version.h34
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c190
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.h2
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c18
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h4
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c4
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h6
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c6
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c7
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c10
-rw-r--r--drivers/staging/xgifb/XGI_main.h2
-rw-r--r--drivers/staging/xillybus/Kconfig2
-rw-r--r--drivers/staging/xillybus/xillybus_of.c26
-rw-r--r--drivers/staging/xillybus/xillybus_pcie.c4
-rw-r--r--drivers/staging/zram/zram.txt77
-rw-r--r--drivers/staging/zsmalloc/Kconfig11
-rw-r--r--drivers/staging/zsmalloc/Makefile3
-rw-r--r--drivers/staging/zsmalloc/zsmalloc-main.c1072
-rw-r--r--drivers/staging/zsmalloc/zsmalloc.h43
-rw-r--r--drivers/target/Kconfig2
-rw-r--r--drivers/target/iscsi/iscsi_target.c75
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c9
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h2
-rw-r--r--drivers/target/loopback/tcm_loop.c20
-rw-r--r--drivers/target/target_core_alua.c558
-rw-r--r--drivers/target/target_core_alua.h15
-rw-r--r--drivers/target/target_core_configfs.c194
-rw-r--r--drivers/target/target_core_device.c113
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_file.c264
-rw-r--r--drivers/target/target_core_file.h14
-rw-r--r--drivers/target/target_core_iblock.c95
-rw-r--r--drivers/target/target_core_internal.h8
-rw-r--r--drivers/target/target_core_pr.c11
-rw-r--r--drivers/target/target_core_pr.h5
-rw-r--r--drivers/target/target_core_rd.c252
-rw-r--r--drivers/target/target_core_rd.h4
-rw-r--r--drivers/target/target_core_sbc.c260
-rw-r--r--drivers/target/target_core_spc.c114
-rw-r--r--drivers/target/target_core_tpg.c18
-rw-r--r--drivers/target/target_core_transport.c100
-rw-r--r--drivers/target/target_core_ua.c1
-rw-r--r--drivers/target/target_core_xcopy.c4
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c2
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c6
-rw-r--r--drivers/thermal/Kconfig23
-rw-r--r--drivers/thermal/Makefile2
-rw-r--r--drivers/thermal/cpu_cooling.c67
-rw-r--r--drivers/thermal/imx_thermal.c54
-rw-r--r--drivers/thermal/int3403_thermal.c237
-rw-r--r--drivers/thermal/intel_powerclamp.c17
-rw-r--r--drivers/thermal/of-thermal.c849
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.c2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c1
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.c12
-rw-r--r--drivers/thermal/step_wise.c6
-rw-r--r--drivers/thermal/thermal_core.c86
-rw-r--r--drivers/thermal/thermal_core.h9
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c77
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c2
-rw-r--r--drivers/tty/Kconfig2
-rw-r--r--drivers/tty/amiserial.c26
-rw-r--r--drivers/tty/cyclades.c2
-rw-r--r--drivers/tty/goldfish.c1
-rw-r--r--drivers/tty/hvc/hvc_console.c2
-rw-r--r--drivers/tty/hvc/hvc_iucv.c103
-rw-r--r--drivers/tty/hvc/hvc_opal.c8
-rw-r--r--drivers/tty/hvc/hvc_rtas.c12
-rw-r--r--drivers/tty/hvc/hvc_udbg.c9
-rw-r--r--drivers/tty/hvc/hvc_xen.c17
-rw-r--r--drivers/tty/hvc/hvsi_lib.c1
-rw-r--r--drivers/tty/ipwireless/tty.c1
-rw-r--r--drivers/tty/n_gsm.c74
-rw-r--r--drivers/tty/n_r3964.c2
-rw-r--r--drivers/tty/n_tty.c130
-rw-r--r--drivers/tty/rocket.c2
-rw-r--r--drivers/tty/serial/8250/8250_core.c22
-rw-r--r--drivers/tty/serial/8250/8250_dw.c21
-rw-r--r--drivers/tty/serial/8250/8250_em.c1
-rw-r--r--drivers/tty/serial/8250/8250_pci.c12
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c1
-rw-r--r--drivers/tty/serial/8250/serial_cs.c1
-rw-r--r--drivers/tty/serial/Kconfig23
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/amba-pl010.c15
-rw-r--r--drivers/tty/serial/amba-pl011.c84
-rw-r--r--drivers/tty/serial/atmel_serial.c45
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c9
-rw-r--r--drivers/tty/serial/clps711x.c454
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c1
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c1
-rw-r--r--drivers/tty/serial/icom.c4
-rw-r--r--drivers/tty/serial/imx.c8
-rw-r--r--drivers/tty/serial/kgdb_nmi.c1
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c90
-rw-r--r--drivers/tty/serial/mxs-auart.c9
-rw-r--r--drivers/tty/serial/of_serial.c1
-rw-r--r--drivers/tty/serial/omap-serial.c11
-rw-r--r--drivers/tty/serial/pch_uart.c13
-rw-r--r--drivers/tty/serial/rp2.c2
-rw-r--r--drivers/tty/serial/samsung.c8
-rw-r--r--drivers/tty/serial/sc26xx.c749
-rw-r--r--drivers/tty/serial/serial-tegra.c38
-rw-r--r--drivers/tty/serial/serial_core.c8
-rw-r--r--drivers/tty/serial/sh-sci.c425
-rw-r--r--drivers/tty/serial/sh-sci.h2
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c43
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/synclink.c4
-rw-r--r--drivers/tty/synclink_gt.c4
-rw-r--r--drivers/tty/synclinkmp.c4
-rw-r--r--drivers/tty/tty_audit.c2
-rw-r--r--drivers/tty/tty_buffer.c111
-rw-r--r--drivers/tty/tty_io.c25
-rw-r--r--drivers/tty/tty_ldisc.c1
-rw-r--r--drivers/tty/tty_ldsem.c16
-rw-r--r--drivers/tty/tty_port.c1
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_mf624.c2
-rw-r--r--drivers/usb/Kconfig6
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/atm/cxacru.c1
-rw-r--r--drivers/usb/atm/speedtch.c1
-rw-r--r--drivers/usb/atm/ueagle-atm.c1
-rw-r--r--drivers/usb/atm/usbatm.c8
-rw-r--r--drivers/usb/c67x00/Makefile2
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.c2
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.h2
-rw-r--r--drivers/usb/c67x00/c67x00-ll-hpi.c14
-rw-r--r--drivers/usb/c67x00/c67x00-sched.c33
-rw-r--r--drivers/usb/chipidea/Makefile2
-rw-r--r--drivers/usb/chipidea/ci.h89
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c32
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h5
-rw-r--r--drivers/usb/chipidea/ci_hdrc_pci.c2
-rw-r--r--drivers/usb/chipidea/core.c109
-rw-r--r--drivers/usb/chipidea/host.c4
-rw-r--r--drivers/usb/chipidea/otg.h6
-rw-r--r--drivers/usb/chipidea/udc.c26
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c46
-rw-r--r--drivers/usb/class/cdc-acm.c116
-rw-r--r--drivers/usb/class/cdc-acm.h3
-rw-r--r--drivers/usb/class/cdc-wdm.c78
-rw-r--r--drivers/usb/class/usblp.c1
-rw-r--r--drivers/usb/class/usbtmc.c1
-rw-r--r--drivers/usb/core/Makefile2
-rw-r--r--drivers/usb/core/buffer.c2
-rw-r--r--drivers/usb/core/config.c8
-rw-r--r--drivers/usb/core/devio.c2
-rw-r--r--drivers/usb/core/driver.c43
-rw-r--r--drivers/usb/core/hcd-pci.c1
-rw-r--r--drivers/usb/core/hcd.c41
-rw-r--r--drivers/usb/core/hub.c123
-rw-r--r--drivers/usb/core/hub.h2
-rw-r--r--drivers/usb/core/message.c5
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/sysfs.c2
-rw-r--r--drivers/usb/core/urb.c25
-rw-r--r--drivers/usb/core/usb-acpi.c43
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/usb/dwc2/Kconfig (renamed from drivers/staging/dwc2/Kconfig)0
-rw-r--r--drivers/usb/dwc2/Makefile (renamed from drivers/staging/dwc2/Makefile)0
-rw-r--r--drivers/usb/dwc2/core.c (renamed from drivers/staging/dwc2/core.c)378
-rw-r--r--drivers/usb/dwc2/core.h (renamed from drivers/staging/dwc2/core.h)62
-rw-r--r--drivers/usb/dwc2/core_intr.c (renamed from drivers/staging/dwc2/core_intr.c)10
-rw-r--r--drivers/usb/dwc2/hcd.c (renamed from drivers/staging/dwc2/hcd.c)35
-rw-r--r--drivers/usb/dwc2/hcd.h (renamed from drivers/staging/dwc2/hcd.h)4
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c (renamed from drivers/staging/dwc2/hcd_ddma.c)8
-rw-r--r--drivers/usb/dwc2/hcd_intr.c (renamed from drivers/staging/dwc2/hcd_intr.c)6
-rw-r--r--drivers/usb/dwc2/hcd_queue.c (renamed from drivers/staging/dwc2/hcd_queue.c)195
-rw-r--r--drivers/usb/dwc2/hw.h (renamed from drivers/staging/dwc2/hw.h)0
-rw-r--r--drivers/usb/dwc2/pci.c (renamed from drivers/staging/dwc2/pci.c)2
-rw-r--r--drivers/usb/dwc2/platform.c (renamed from drivers/staging/dwc2/platform.c)63
-rw-r--r--drivers/usb/dwc3/Kconfig7
-rw-r--r--drivers/usb/dwc3/Makefile1
-rw-r--r--drivers/usb/dwc3/core.c8
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c1
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c202
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c12
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c3
-rw-r--r--drivers/usb/dwc3/gadget.c10
-rw-r--r--drivers/usb/gadget/Kconfig54
-rw-r--r--drivers/usb/gadget/Makefile9
-rw-r--r--drivers/usb/gadget/acm_ms.c2
-rw-r--r--drivers/usb/gadget/amd5536udc.c18
-rw-r--r--drivers/usb/gadget/at91_udc.c17
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c73
-rw-r--r--drivers/usb/gadget/bcm63xx_udc.c5
-rw-r--r--drivers/usb/gadget/composite.c18
-rw-r--r--drivers/usb/gadget/configfs.c8
-rw-r--r--drivers/usb/gadget/dummy_hcd.c2
-rw-r--r--drivers/usb/gadget/epautoconf.c9
-rw-r--r--drivers/usb/gadget/f_ecm.c73
-rw-r--r--drivers/usb/gadget/f_fs.c1012
-rw-r--r--drivers/usb/gadget/f_hid.c18
-rw-r--r--drivers/usb/gadget/f_loopback.c144
-rw-r--r--drivers/usb/gadget/f_midi.c22
-rw-r--r--drivers/usb/gadget/f_ncm.c2
-rw-r--r--drivers/usb/gadget/f_obex.c2
-rw-r--r--drivers/usb/gadget/f_phonet.c2
-rw-r--r--drivers/usb/gadget/f_rndis.c94
-rw-r--r--drivers/usb/gadget/f_serial.c2
-rw-r--r--drivers/usb/gadget/f_sourcesink.c349
-rw-r--r--drivers/usb/gadget/f_subset.c60
-rw-r--r--drivers/usb/gadget/fotg210-udc.c3
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c5
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c7
-rw-r--r--drivers/usb/gadget/fusb300_udc.c4
-rw-r--r--drivers/usb/gadget/g_ffs.c466
-rw-r--r--drivers/usb/gadget/g_zero.h24
-rw-r--r--drivers/usb/gadget/goku_udc.c17
-rw-r--r--drivers/usb/gadget/gr_udc.c2238
-rw-r--r--drivers/usb/gadget/gr_udc.h220
-rw-r--r--drivers/usb/gadget/lpc32xx_udc.c2
-rw-r--r--drivers/usb/gadget/m66592-udc.c4
-rw-r--r--drivers/usb/gadget/multi.c2
-rw-r--r--drivers/usb/gadget/mv_u3d_core.c5
-rw-r--r--drivers/usb/gadget/mv_udc_core.c5
-rw-r--r--drivers/usb/gadget/net2272.c4
-rw-r--r--drivers/usb/gadget/net2280.c8
-rw-r--r--drivers/usb/gadget/nokia.c6
-rw-r--r--drivers/usb/gadget/omap_udc.c4
-rw-r--r--drivers/usb/gadget/pch_udc.c8
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c5
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c10
-rw-r--r--drivers/usb/gadget/rndis.c8
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c80
-rw-r--r--drivers/usb/gadget/s3c-hsotg.h1
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c1
-rw-r--r--drivers/usb/gadget/u_ether.c2
-rw-r--r--drivers/usb/gadget/u_ether.h43
-rw-r--r--drivers/usb/gadget/u_f.c32
-rw-r--r--drivers/usb/gadget/u_f.h26
-rw-r--r--drivers/usb/gadget/u_fs.h267
-rw-r--r--drivers/usb/gadget/u_rndis.h2
-rw-r--r--drivers/usb/gadget/usbstring.c1
-rw-r--r--drivers/usb/gadget/zero.c8
-rw-r--r--drivers/usb/host/Makefile2
-rw-r--r--drivers/usb/host/ehci-atmel.c1
-rw-r--r--drivers/usb/host/ehci-dbg.c10
-rw-r--r--drivers/usb/host/ehci-exynos.c1
-rw-r--r--drivers/usb/host/ehci-fsl.c33
-rw-r--r--drivers/usb/host/ehci-grlib.c4
-rw-r--r--drivers/usb/host/ehci-hcd.c14
-rw-r--r--drivers/usb/host/ehci-hub.c6
-rw-r--r--drivers/usb/host/ehci-mv.c3
-rw-r--r--drivers/usb/host/ehci-mxc.c1
-rw-r--r--drivers/usb/host/ehci-octeon.c24
-rw-r--r--drivers/usb/host/ehci-omap.c1
-rw-r--r--drivers/usb/host/ehci-orion.c46
-rw-r--r--drivers/usb/host/ehci-platform.c1
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c4
-rw-r--r--drivers/usb/host/ehci-ppc-of.c4
-rw-r--r--drivers/usb/host/ehci-ps3.c1
-rw-r--r--drivers/usb/host/ehci-q.c4
-rw-r--r--drivers/usb/host/ehci-sead3.c1
-rw-r--r--drivers/usb/host/ehci-sh.c1
-rw-r--r--drivers/usb/host/ehci-spear.c1
-rw-r--r--drivers/usb/host/ehci-tegra.c15
-rw-r--r--drivers/usb/host/ehci-tilegx.c1
-rw-r--r--drivers/usb/host/ehci-w90x900.c22
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c7
-rw-r--r--drivers/usb/host/ehci.h26
-rw-r--r--drivers/usb/host/fhci-hcd.c2
-rw-r--r--drivers/usb/host/fotg210-hcd.c96
-rw-r--r--drivers/usb/host/fotg210.h8
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c13
-rw-r--r--drivers/usb/host/fusbh200-hcd.c98
-rw-r--r--drivers/usb/host/fusbh200.h12
-rw-r--r--drivers/usb/host/hwa-hc.c24
-rw-r--r--drivers/usb/host/imx21-dbg.c4
-rw-r--r--drivers/usb/host/imx21-hcd.c7
-rw-r--r--drivers/usb/host/imx21-hcd.h4
-rw-r--r--drivers/usb/host/isp116x-hcd.c5
-rw-r--r--drivers/usb/host/isp1362-hcd.c5
-rw-r--r--drivers/usb/host/isp1760-hcd.c1
-rw-r--r--drivers/usb/host/ohci-at91.c108
-rw-r--r--drivers/usb/host/ohci-da8xx.c73
-rw-r--r--drivers/usb/host/ohci-dbg.c69
-rw-r--r--drivers/usb/host/ohci-exynos.c23
-rw-r--r--drivers/usb/host/ohci-hcd.c29
-rw-r--r--drivers/usb/host/ohci-hub.c6
-rw-r--r--drivers/usb/host/ohci-jz4740.c40
-rw-r--r--drivers/usb/host/ohci-nxp.c30
-rw-r--r--drivers/usb/host/ohci-octeon.c25
-rw-r--r--drivers/usb/host/ohci-omap.c5
-rw-r--r--drivers/usb/host/ohci-omap3.c1
-rw-r--r--drivers/usb/host/ohci-platform.c2
-rw-r--r--drivers/usb/host/ohci-ppc-of.c28
-rw-r--r--drivers/usb/host/ohci-ps3.c1
-rw-r--r--drivers/usb/host/ohci-pxa27x.c42
-rw-r--r--drivers/usb/host/ohci-q.c12
-rw-r--r--drivers/usb/host/ohci-s3c2410.c22
-rw-r--r--drivers/usb/host/ohci-sa1111.c4
-rw-r--r--drivers/usb/host/ohci-sm501.c1
-rw-r--r--drivers/usb/host/ohci-spear.c29
-rw-r--r--drivers/usb/host/ohci-tilegx.c1
-rw-r--r--drivers/usb/host/ohci-tmio.c2
-rw-r--r--drivers/usb/host/ohci.h13
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c6
-rw-r--r--drivers/usb/host/pci-quirks.c1
-rw-r--r--drivers/usb/host/r8a66597-hcd.c8
-rw-r--r--drivers/usb/host/sl811-hcd.c3
-rw-r--r--drivers/usb/host/sl811_cs.c1
-rw-r--r--drivers/usb/host/u132-hcd.c3
-rw-r--r--drivers/usb/host/uhci-debug.c4
-rw-r--r--drivers/usb/host/uhci-grlib.c1
-rw-r--r--drivers/usb/host/uhci-hcd.c44
-rw-r--r--drivers/usb/host/uhci-pci.c2
-rw-r--r--drivers/usb/host/uhci-platform.c1
-rw-r--r--drivers/usb/host/whci/hcd.c1
-rw-r--r--drivers/usb/host/whci/int.c1
-rw-r--r--drivers/usb/host/whci/wusb.c1
-rw-r--r--drivers/usb/host/xhci-dbg.c36
-rw-r--r--drivers/usb/host/xhci-hub.c106
-rw-r--r--drivers/usb/host/xhci-mem.c54
-rw-r--r--drivers/usb/host/xhci-pci.c13
-rw-r--r--drivers/usb/host/xhci-plat.c2
-rw-r--r--drivers/usb/host/xhci-ring.c88
-rw-r--r--drivers/usb/host/xhci-trace.h6
-rw-r--r--drivers/usb/host/xhci.c217
-rw-r--r--drivers/usb/host/xhci.h26
-rw-r--r--drivers/usb/image/mdc800.c2
-rw-r--r--drivers/usb/image/microtek.c1
-rw-r--r--drivers/usb/misc/adutux.c1
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c1
-rw-r--r--drivers/usb/misc/cytherm.c1
-rw-r--r--drivers/usb/misc/emi26.c1
-rw-r--r--drivers/usb/misc/emi62.c1
-rw-r--r--drivers/usb/misc/ezusb.c1
-rw-r--r--drivers/usb/misc/idmouse.c3
-rw-r--r--drivers/usb/misc/iowarrior.c3
-rw-r--r--drivers/usb/misc/ldusb.c1
-rw-r--r--drivers/usb/misc/legousbtower.c1
-rw-r--r--drivers/usb/misc/rio500.c1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.c1
-rw-r--r--drivers/usb/misc/trancevibrator.c1
-rw-r--r--drivers/usb/misc/usblcd.c1
-rw-r--r--drivers/usb/misc/usbled.c1
-rw-r--r--drivers/usb/misc/usbsevseg.c3
-rw-r--r--drivers/usb/misc/usbtest.c42
-rw-r--r--drivers/usb/misc/yurex.c3
-rw-r--r--drivers/usb/musb/Kconfig16
-rw-r--r--drivers/usb/musb/Makefile1
-rw-r--r--drivers/usb/musb/am35x.c1
-rw-r--r--drivers/usb/musb/blackfin.c5
-rw-r--r--drivers/usb/musb/da8xx.c1
-rw-r--r--drivers/usb/musb/davinci.c1
-rw-r--r--drivers/usb/musb/jz4740.c201
-rw-r--r--drivers/usb/musb/musb_am335x.c1
-rw-r--r--drivers/usb/musb/musb_core.c74
-rw-r--r--drivers/usb/musb/musb_core.h3
-rw-r--r--drivers/usb/musb/musb_cppi41.c2
-rw-r--r--drivers/usb/musb/musb_dsps.c118
-rw-r--r--drivers/usb/musb/musb_gadget.c16
-rw-r--r--drivers/usb/musb/musb_host.c13
-rw-r--r--drivers/usb/musb/musb_host.h6
-rw-r--r--drivers/usb/musb/musb_virthub.c70
-rw-r--r--drivers/usb/musb/tusb6010.c1
-rw-r--r--drivers/usb/musb/tusb6010_omap.c1
-rw-r--r--drivers/usb/musb/ux500.c1
-rw-r--r--drivers/usb/musb/ux500_dma.c4
-rw-r--r--drivers/usb/phy/Kconfig50
-rw-r--r--drivers/usb/phy/Makefile7
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c2
-rw-r--r--drivers/usb/phy/phy-am335x-control.c6
-rw-r--r--drivers/usb/phy/phy-am335x.c39
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c8
-rw-r--r--drivers/usb/phy/phy-fsl-usb.h2
-rw-r--r--drivers/usb/phy/phy-fsm-usb.c14
-rw-r--r--drivers/usb/phy/phy-fsm-usb.h236
-rw-r--r--drivers/usb/phy/phy-generic.c1
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c2
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c9
-rw-r--r--drivers/usb/phy/phy-keystone.c136
-rw-r--r--drivers/usb/phy/phy-msm-usb.c36
-rw-r--r--drivers/usb/phy/phy-mv-usb.c7
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c13
-rw-r--r--drivers/usb/phy/phy-omap-control.c19
-rw-r--r--drivers/usb/phy/phy-omap-otg.c169
-rw-r--r--drivers/usb/phy/phy-rcar-gen2-usb.c2
-rw-r--r--drivers/usb/phy/phy-tahvo.c457
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c5
-rw-r--r--drivers/usb/phy/phy.c12
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c18
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c1
-rw-r--r--drivers/usb/serial/Kconfig29
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/aircable.c10
-rw-r--r--drivers/usb/serial/ark3116.c5
-rw-r--r--drivers/usb/serial/belkin_sa.c3
-rw-r--r--drivers/usb/serial/bus.c4
-rw-r--r--drivers/usb/serial/ch341.c135
-rw-r--r--drivers/usb/serial/console.c3
-rw-r--r--drivers/usb/serial/cp210x.c8
-rw-r--r--drivers/usb/serial/cyberjack.c3
-rw-r--r--drivers/usb/serial/cypress_m8.c67
-rw-r--r--drivers/usb/serial/cypress_m8.h30
-rw-r--r--drivers/usb/serial/digi_acceleport.c1
-rw-r--r--drivers/usb/serial/empeg.c1
-rw-r--r--drivers/usb/serial/f81232.c55
-rw-r--r--drivers/usb/serial/ftdi_sio.c36
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/garmin_gps.c20
-rw-r--r--drivers/usb/serial/io_edgeport.c51
-rw-r--r--drivers/usb/serial/io_ti.c59
-rw-r--r--drivers/usb/serial/ipaq.c3
-rw-r--r--drivers/usb/serial/ipw.c1
-rw-r--r--drivers/usb/serial/ir-usb.c9
-rw-r--r--drivers/usb/serial/iuu_phoenix.c3
-rw-r--r--drivers/usb/serial/keyspan.c11
-rw-r--r--drivers/usb/serial/keyspan_pda.c1
-rw-r--r--drivers/usb/serial/keyspan_usa26msg.h2
-rw-r--r--drivers/usb/serial/kl5kusb105.c17
-rw-r--r--drivers/usb/serial/kobil_sct.c1
-rw-r--r--drivers/usb/serial/mct_u232.c1
-rw-r--r--drivers/usb/serial/metro-usb.c5
-rw-r--r--drivers/usb/serial/mos7720.c37
-rw-r--r--drivers/usb/serial/mos7840.c21
-rw-r--r--drivers/usb/serial/mxuport.c1393
-rw-r--r--drivers/usb/serial/navman.c1
-rw-r--r--drivers/usb/serial/omninet.c1
-rw-r--r--drivers/usb/serial/opticon.c11
-rw-r--r--drivers/usb/serial/option.c20
-rw-r--r--drivers/usb/serial/oti6858.c73
-rw-r--r--drivers/usb/serial/pl2303.c511
-rw-r--r--drivers/usb/serial/qcaux.c3
-rw-r--r--drivers/usb/serial/qcserial.c3
-rw-r--r--drivers/usb/serial/quatech2.c9
-rw-r--r--drivers/usb/serial/safe_serial.c3
-rw-r--r--drivers/usb/serial/sierra.c10
-rw-r--r--drivers/usb/serial/spcp8x5.c1
-rw-r--r--drivers/usb/serial/ssu100.c5
-rw-r--r--drivers/usb/serial/symbolserial.c1
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c27
-rw-r--r--drivers/usb/serial/usb-serial-simple.c4
-rw-r--r--drivers/usb/serial/usb-serial.c2
-rw-r--r--drivers/usb/serial/usb_debug.c1
-rw-r--r--drivers/usb/serial/usb_wwan.c6
-rw-r--r--drivers/usb/serial/visor.c19
-rw-r--r--drivers/usb/serial/visor.h2
-rw-r--r--drivers/usb/serial/whiteheat.c9
-rw-r--r--drivers/usb/serial/wishbone-serial.c1
-rw-r--r--drivers/usb/serial/xsens_mt.c1
-rw-r--r--drivers/usb/serial/zte_ev.c24
-rw-r--r--drivers/usb/storage/Kconfig4
-rw-r--r--drivers/usb/storage/onetouch.c1
-rw-r--r--drivers/usb/storage/protocol.c81
-rw-r--r--drivers/usb/storage/scsiglue.c6
-rw-r--r--drivers/usb/storage/unusual_cypress.h2
-rw-r--r--drivers/usb/storage/unusual_devs.h14
-rw-r--r--drivers/usb/storage/usb.c1
-rw-r--r--drivers/usb/usb-skeleton.c1
-rw-r--r--drivers/usb/wusbcore/cbaf.c22
-rw-r--r--drivers/usb/wusbcore/crypto.c2
-rw-r--r--drivers/usb/wusbcore/devconnect.c4
-rw-r--r--drivers/usb/wusbcore/mmc.c9
-rw-r--r--drivers/usb/wusbcore/pal.c1
-rw-r--r--drivers/usb/wusbcore/reservation.c1
-rw-r--r--drivers/usb/wusbcore/security.c42
-rw-r--r--drivers/usb/wusbcore/wa-hc.h25
-rw-r--r--drivers/usb/wusbcore/wa-nep.c10
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c19
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c513
-rw-r--r--drivers/usb/wusbcore/wusbhc.c17
-rw-r--r--drivers/usb/wusbcore/wusbhc.h7
-rw-r--r--drivers/uwb/beacon.c9
-rw-r--r--drivers/uwb/radio.c6
-rw-r--r--drivers/uwb/rsv.c16
-rw-r--r--drivers/uwb/umc-bus.c2
-rw-r--r--drivers/uwb/umc-dev.c1
-rw-r--r--drivers/vfio/pci/vfio_pci.c33
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c12
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c17
-rw-r--r--drivers/vfio/vfio.c70
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c28
-rw-r--r--drivers/vhost/net.c9
-rw-r--r--drivers/vhost/scsi.c11
-rw-r--r--drivers/vhost/test.c8
-rw-r--r--drivers/vhost/vhost.c4
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/video/Kconfig21
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/amifb.c2
-rw-r--r--drivers/video/arkfb.c2
-rw-r--r--drivers/video/asiliantfb.c1
-rw-r--r--drivers/video/aty/aty128fb.c4
-rw-r--r--drivers/video/backlight/hp680_bl.c6
-rw-r--r--drivers/video/backlight/jornada720_bl.c15
-rw-r--r--drivers/video/backlight/jornada720_lcd.c13
-rw-r--r--drivers/video/backlight/kb3886_bl.c2
-rw-r--r--drivers/video/backlight/l4f00242t03.c6
-rw-r--r--drivers/video/backlight/lcd.c2
-rw-r--r--drivers/video/backlight/lp855x_bl.c2
-rw-r--r--drivers/video/backlight/lp8788_bl.c6
-rw-r--r--drivers/video/backlight/omap1_bl.c14
-rw-r--r--drivers/video/backlight/ot200_bl.c9
-rw-r--r--drivers/video/backlight/pwm_bl.c1
-rw-r--r--drivers/video/backlight/tosa_bl.c7
-rw-r--r--drivers/video/backlight/tosa_lcd.c6
-rw-r--r--drivers/video/cirrusfb.c4
-rw-r--r--drivers/video/console/Kconfig3
-rw-r--r--drivers/video/console/fbcon.c5
-rw-r--r--drivers/video/console/sticore.c2
-rw-r--r--drivers/video/exynos/Kconfig3
-rw-r--r--drivers/video/fbmem.c34
-rw-r--r--drivers/video/i810/i810_main.c4
-rw-r--r--drivers/video/logo/logo.c4
-rw-r--r--drivers/video/macfb.c1
-rw-r--r--drivers/video/mmp/core.c9
-rw-r--r--drivers/video/mx3fb.c2
-rw-r--r--drivers/video/mxsfb.c126
-rw-r--r--drivers/video/nvidia/nvidia.c1
-rw-r--r--drivers/video/ocfb.c440
-rw-r--r--drivers/video/omap2/displays-new/panel-sony-acx565akm.c44
-rw-r--r--drivers/video/omap2/dss/apply.c11
-rw-r--r--drivers/video/omap2/dss/dispc.c72
-rw-r--r--drivers/video/omap2/dss/dispc.h20
-rw-r--r--drivers/video/omap2/dss/display-sysfs.c4
-rw-r--r--drivers/video/omap2/dss/dpi.c18
-rw-r--r--drivers/video/omap2/dss/dsi.c223
-rw-r--r--drivers/video/omap2/dss/dss.c163
-rw-r--r--drivers/video/omap2/dss/dss.h17
-rw-r--r--drivers/video/omap2/dss/dss_features.c1
-rw-r--r--drivers/video/omap2/dss/dss_features.h1
-rw-r--r--drivers/video/omap2/dss/hdmi.h16
-rw-r--r--drivers/video/omap2/dss/hdmi4.c22
-rw-r--r--drivers/video/omap2/dss/hdmi4_core.c18
-rw-r--r--drivers/video/omap2/dss/hdmi_common.c2
-rw-r--r--drivers/video/omap2/dss/hdmi_phy.c2
-rw-r--r--drivers/video/omap2/dss/hdmi_pll.c18
-rw-r--r--drivers/video/omap2/dss/hdmi_wp.c16
-rw-r--r--drivers/video/omap2/dss/overlay.c5
-rw-r--r--drivers/video/omap2/dss/sdi.c24
-rw-r--r--drivers/video/omap2/dss/venc.c3
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c19
-rw-r--r--drivers/video/riva/fbdev.c1
-rw-r--r--drivers/video/s3fb.c2
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c2
-rw-r--r--drivers/video/tgafb.c21
-rw-r--r--drivers/video/valkyriefb.c1
-rw-r--r--drivers/video/vermilion/vermilion.c1
-rw-r--r--drivers/video/vt8623fb.c2
-rw-r--r--drivers/video/xen-fbfront.c6
-rw-r--r--drivers/virtio/virtio_balloon.c4
-rw-r--r--drivers/virtio/virtio_pci.c2
-rw-r--r--drivers/vlynq/vlynq.c3
-rw-r--r--drivers/vme/Kconfig2
-rw-r--r--drivers/vme/boards/vme_vmivme7805.c2
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c6
-rw-r--r--drivers/vme/bridges/vme_tsi148.c6
-rw-r--r--drivers/vme/vme.c25
-rw-r--r--drivers/w1/masters/mxc_w1.c31
-rw-r--r--drivers/w1/masters/w1-gpio.c22
-rw-r--r--drivers/w1/w1_int.c12
-rw-r--r--drivers/watchdog/Kconfig60
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/alim1535_wdt.c2
-rw-r--r--drivers/watchdog/alim7101_wdt.c2
-rw-r--r--drivers/watchdog/at91sam9_wdt.c320
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c368
-rw-r--r--drivers/watchdog/davinci_wdt.c225
-rw-r--r--drivers/watchdog/dw_wdt.c2
-rw-r--r--drivers/watchdog/gpio_wdt.c254
-rw-r--r--drivers/watchdog/hpwdt.c13
-rw-r--r--drivers/watchdog/i6300esb.c2
-rw-r--r--drivers/watchdog/imx2_wdt.c4
-rw-r--r--drivers/watchdog/moxart_wdt.c15
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c110
-rw-r--r--drivers/watchdog/nv_tco.c2
-rw-r--r--drivers/watchdog/pcwd_pci.c2
-rw-r--r--drivers/watchdog/pcwd_usb.c40
-rw-r--r--drivers/watchdog/s3c2410_wdt.c203
-rw-r--r--drivers/watchdog/sirfsoc_wdt.c2
-rw-r--r--drivers/watchdog/sp5100_tco.c2
-rw-r--r--drivers/watchdog/via_wdt.c2
-rw-r--r--drivers/watchdog/w83627hf_wdt.c234
-rw-r--r--drivers/watchdog/watchdog_core.c4
-rw-r--r--drivers/watchdog/wdt_pci.c2
-rw-r--r--drivers/xen/Kconfig1
-rw-r--r--drivers/xen/Makefile4
-rw-r--r--drivers/xen/balloon.c72
-rw-r--r--drivers/xen/dbgp.c2
-rw-r--r--drivers/xen/events/Makefile5
-rw-r--r--drivers/xen/events/events_2l.c372
-rw-r--r--drivers/xen/events/events_base.c (renamed from drivers/xen/events.c)799
-rw-r--r--drivers/xen/events/events_fifo.c428
-rw-r--r--drivers/xen/events/events_internal.h150
-rw-r--r--drivers/xen/evtchn.c2
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/grant-table.c89
-rw-r--r--drivers/xen/pci.c2
-rw-r--r--drivers/xen/platform-pci.c11
-rw-r--r--drivers/xen/privcmd.c9
-rw-r--r--drivers/xen/swiotlb-xen.c22
-rw-r--r--drivers/xen/xen-acpi-cpuhotplug.c11
-rw-r--r--drivers/xen/xen-acpi-memhotplug.c8
-rw-r--r--drivers/xen/xen-acpi-pad.c5
-rw-r--r--drivers/xen/xen-acpi-processor.c4
-rw-r--r--drivers/xen/xen-selfballoon.c22
-rw-r--r--drivers/xen/xenbus/xenbus_client.c3
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c2
-rw-r--r--drivers/xen/xencomm.c219
-rw-r--r--drivers/zorro/Makefile3
-rw-r--r--drivers/zorro/names.c11
-rw-r--r--drivers/zorro/proc.c10
-rw-r--r--drivers/zorro/zorro-driver.c11
-rw-r--r--drivers/zorro/zorro-sysfs.c22
-rw-r--r--drivers/zorro/zorro.c27
-rw-r--r--drivers/zorro/zorro.h5
5109 files changed, 372683 insertions, 126300 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 3cc8214f9b26..8e3b8b06c0b2 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -118,7 +118,7 @@ obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SUPERH) += sh/
-obj-$(CONFIG_ARCH_SHMOBILE) += sh/
+obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += sh/
ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
obj-y += clocksource/
endif
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 5d9248526d78..4770de5707b9 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -348,7 +348,6 @@ source "drivers/acpi/apei/Kconfig"
config ACPI_EXTLOG
tristate "Extended Error Log support"
depends on X86_MCE && X86_LOCAL_APIC
- select EFI
select UEFI_CPER
default n
help
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 8711e3797165..e7515aa43d6b 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -32,8 +32,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#define PREFIX "ACPI: "
@@ -207,7 +206,7 @@ static int acpi_ac_probe(struct platform_device *pdev)
goto end;
result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
- ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler, ac);
+ ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac);
if (result) {
power_supply_unregister(&ac->charger);
goto end;
@@ -255,7 +254,7 @@ static int acpi_ac_remove(struct platform_device *pdev)
return -EINVAL;
acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
- ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler);
+ ACPI_ALL_NOTIFY, acpi_ac_notify_handler);
ac = platform_get_drvdata(pdev);
if (ac->charger.dev)
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index a6869e110ce5..c4a5d87ede7e 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -9,9 +9,9 @@
#include <linux/module.h>
#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
#include <linux/cper.h>
#include <linux/ratelimit.h>
+#include <linux/edac.h>
#include <asm/cpu.h>
#include <asm/mce.h>
@@ -20,11 +20,9 @@
#define EXT_ELOG_ENTRY_MASK GENMASK_ULL(51, 0) /* elog entry address mask */
#define EXTLOG_DSM_REV 0x0
-#define EXTLOG_FN_QUERY 0x0
#define EXTLOG_FN_ADDR 0x1
#define FLAG_OS_OPTIN BIT(0)
-#define EXTLOG_QUERY_L1_EXIST BIT(1)
#define ELOG_ENTRY_VALID (1ULL<<63)
#define ELOG_ENTRY_LEN 0x1000
@@ -43,7 +41,9 @@ struct extlog_l1_head {
u8 rev1[12];
};
-static u8 extlog_dsm_uuid[] = "663E35AF-CC10-41A4-88EA-5470AF055295";
+static int old_edac_report_status;
+
+static u8 extlog_dsm_uuid[] __initdata = "663E35AF-CC10-41A4-88EA-5470AF055295";
/* L1 table related physical address */
static u64 elog_base;
@@ -150,65 +150,30 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
rc = print_extlog_rcd(NULL, (struct acpi_generic_status *)elog_buf, cpu);
- return NOTIFY_DONE;
+ return NOTIFY_STOP;
}
-static int extlog_get_dsm(acpi_handle handle, int rev, int func, u64 *ret)
+static bool __init extlog_get_l1addr(void)
{
- struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
- struct acpi_object_list input;
- union acpi_object params[4], *obj;
u8 uuid[16];
- int i;
+ acpi_handle handle;
+ union acpi_object *obj;
acpi_str_to_uuid(extlog_dsm_uuid, uuid);
- input.count = 4;
- input.pointer = params;
- params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = 16;
- params[0].buffer.pointer = uuid;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = rev;
- params[2].type = ACPI_TYPE_INTEGER;
- params[2].integer.value = func;
- params[3].type = ACPI_TYPE_PACKAGE;
- params[3].package.count = 0;
- params[3].package.elements = NULL;
-
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DSM", &input, &buf)))
- return -1;
-
- *ret = 0;
- obj = (union acpi_object *)buf.pointer;
- if (obj->type == ACPI_TYPE_INTEGER) {
- *ret = obj->integer.value;
- } else if (obj->type == ACPI_TYPE_BUFFER) {
- if (obj->buffer.length <= 8) {
- for (i = 0; i < obj->buffer.length; i++)
- *ret |= (obj->buffer.pointer[i] << (i * 8));
- }
- }
- kfree(buf.pointer);
-
- return 0;
-}
-
-static bool extlog_get_l1addr(void)
-{
- acpi_handle handle;
- u64 ret;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
return false;
-
- if (extlog_get_dsm(handle, EXTLOG_DSM_REV, EXTLOG_FN_QUERY, &ret) ||
- !(ret & EXTLOG_QUERY_L1_EXIST))
+ if (!acpi_check_dsm(handle, uuid, EXTLOG_DSM_REV, 1 << EXTLOG_FN_ADDR))
return false;
-
- if (extlog_get_dsm(handle, EXTLOG_DSM_REV, EXTLOG_FN_ADDR, &ret))
+ obj = acpi_evaluate_dsm_typed(handle, uuid, EXTLOG_DSM_REV,
+ EXTLOG_FN_ADDR, NULL, ACPI_TYPE_INTEGER);
+ if (!obj) {
return false;
+ } else {
+ l1_dirbase = obj->integer.value;
+ ACPI_FREE(obj);
+ }
- l1_dirbase = ret;
/* Spec says L1 directory must be 4K aligned, bail out if it isn't */
if (l1_dirbase & ((1 << 12) - 1)) {
pr_warn(FW_BUG "L1 Directory is invalid at physical %llx\n",
@@ -231,8 +196,12 @@ static int __init extlog_init(void)
u64 cap;
int rc;
- rc = -ENODEV;
+ if (get_edac_report_status() == EDAC_REPORTING_FORCE) {
+ pr_warn("Not loading eMCA, error reporting force-enabled through EDAC.\n");
+ return -EPERM;
+ }
+ rc = -ENODEV;
rdmsrl(MSR_IA32_MCG_CAP, cap);
if (!(cap & MCG_ELOG_P))
return rc;
@@ -287,6 +256,12 @@ static int __init extlog_init(void)
if (elog_buf == NULL)
goto err_release_elog;
+ /*
+ * eMCA event report method has higher priority than EDAC method,
+ * unless EDAC event report method is mandatory.
+ */
+ old_edac_report_status = get_edac_report_status();
+ set_edac_report_status(EDAC_REPORTING_DISABLED);
mce_register_decode_chain(&extlog_mce_dec);
/* enable OS to be involved to take over management from BIOS */
((struct extlog_l1_head *)extlog_l1_addr)->flags |= FLAG_OS_OPTIN;
@@ -308,6 +283,7 @@ err:
static void __exit extlog_exit(void)
{
+ set_edac_report_status(old_edac_report_status);
mce_unregister_decode_chain(&extlog_mce_dec);
((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN;
if (extlog_l1_addr)
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 551dad712ffe..b67be85ff0fc 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -180,14 +180,14 @@ static unsigned long acpi_meminfo_end_pfn(struct acpi_memory_info *info)
static int acpi_bind_memblk(struct memory_block *mem, void *arg)
{
- return acpi_bind_one(&mem->dev, (acpi_handle)arg);
+ return acpi_bind_one(&mem->dev, arg);
}
static int acpi_bind_memory_blocks(struct acpi_memory_info *info,
- acpi_handle handle)
+ struct acpi_device *adev)
{
return walk_memory_range(acpi_meminfo_start_pfn(info),
- acpi_meminfo_end_pfn(info), (void *)handle,
+ acpi_meminfo_end_pfn(info), adev,
acpi_bind_memblk);
}
@@ -197,8 +197,7 @@ static int acpi_unbind_memblk(struct memory_block *mem, void *arg)
return 0;
}
-static void acpi_unbind_memory_blocks(struct acpi_memory_info *info,
- acpi_handle handle)
+static void acpi_unbind_memory_blocks(struct acpi_memory_info *info)
{
walk_memory_range(acpi_meminfo_start_pfn(info),
acpi_meminfo_end_pfn(info), NULL, acpi_unbind_memblk);
@@ -242,9 +241,9 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
if (result && result != -EEXIST)
continue;
- result = acpi_bind_memory_blocks(info, handle);
+ result = acpi_bind_memory_blocks(info, mem_device->device);
if (result) {
- acpi_unbind_memory_blocks(info, handle);
+ acpi_unbind_memory_blocks(info);
return -ENODEV;
}
@@ -285,7 +284,7 @@ static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
if (nid == NUMA_NO_NODE)
nid = memory_add_physaddr_to_nid(info->start_addr);
- acpi_unbind_memory_blocks(info, handle);
+ acpi_unbind_memory_blocks(info);
remove_memory(nid, info->start_addr, info->length);
list_del(&info->list);
kfree(info);
@@ -361,7 +360,19 @@ static void acpi_memory_device_remove(struct acpi_device *device)
acpi_memory_device_free(mem_device);
}
+static bool __initdata acpi_no_memhotplug;
+
void __init acpi_memory_hotplug_init(void)
{
+ if (acpi_no_memhotplug)
+ return;
+
acpi_scan_add_handler_with_hotplug(&memory_device_handler, "memory");
}
+
+static int __init disable_acpi_memory_hotplug(char *str)
+{
+ acpi_no_memhotplug = true;
+ return 1;
+}
+__setup("acpi_no_memhotplug", disable_acpi_memory_hotplug);
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index fc6008fbce35..df96a0fe4890 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -28,8 +28,7 @@
#include <linux/cpu.h>
#include <linux/clockchips.h>
#include <linux/slab.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <asm/mwait.h>
#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
@@ -193,10 +192,7 @@ static int power_saving_thread(void *data)
CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
stop_critical_timings();
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- smp_mb();
- if (!need_resched())
- __mwait(power_saving_mwait_eax, 1);
+ mwait_idle_with_hints(power_saving_mwait_eax, 1);
start_critical_timings();
if (lapic_marked_unstable)
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 3c1d6b0c09a4..c29c2c3ec0ad 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -212,7 +212,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
struct acpi_processor *pr = acpi_driver_data(device);
- int cpu_index, device_declaration = 0;
+ int apic_id, cpu_index, device_declaration = 0;
acpi_status status = AE_OK;
static int cpu0_initialized;
unsigned long long value;
@@ -258,18 +258,21 @@ static int acpi_processor_get_info(struct acpi_device *device)
device_declaration = 1;
pr->acpi_id = value;
}
- pr->apic_id = acpi_get_apicid(pr->handle, device_declaration,
- pr->acpi_id);
- cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
- /* Handle UP system running SMP kernel, with no LAPIC in MADT */
- if (!cpu0_initialized && (cpu_index == -1) &&
- (num_online_cpus() == 1)) {
- cpu_index = 0;
+ apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
+ if (apic_id < 0) {
+ acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
+ return -ENODEV;
}
+ pr->apic_id = apic_id;
- cpu0_initialized = 1;
-
+ cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
+ if (!cpu0_initialized) {
+ cpu0_initialized = 1;
+ /* Handle UP system running SMP kernel, with no LAPIC in MADT */
+ if ((cpu_index == -1) && (num_online_cpus() == 1))
+ cpu_index = 0;
+ }
pr->id = cpu_index;
/*
@@ -282,6 +285,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
if (ret)
return ret;
}
+
/*
* On some boxes several processors use the same processor bus id.
* But they are located in different scope. For example:
@@ -395,7 +399,7 @@ static int acpi_processor_add(struct acpi_device *device,
goto err;
}
- result = acpi_bind_one(dev, pr->handle);
+ result = acpi_bind_one(dev, device);
if (result)
goto err;
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index a9fd0b872062..2bf3ca2b8a7a 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -113,7 +113,6 @@ void acpi_db_display_handlers(void);
ACPI_HW_DEPENDENT_RETURN_VOID(void
acpi_db_generate_gpe(char *gpe_arg,
char *block_arg))
-
ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_generate_sci(void))
/*
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 41abe552c7a3..0fb0adf435d6 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -71,9 +71,8 @@ acpi_status acpi_ev_init_global_lock_handler(void);
ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
acpi_ev_acquire_global_lock(u16 timeout))
-
ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void))
- acpi_status acpi_ev_remove_global_lock_handler(void);
+acpi_status acpi_ev_remove_global_lock_handler(void);
/*
* evgpe - Low-level GPE support
@@ -133,7 +132,7 @@ acpi_status acpi_ev_gpe_initialize(void);
ACPI_HW_DEPENDENT_RETURN_VOID(void
acpi_ev_update_gpes(acpi_owner_id table_owner_id))
- acpi_status
+acpi_status
acpi_ev_match_gpe_method(acpi_handle obj_handle,
u32 level, void *context, void **return_value);
@@ -149,7 +148,9 @@ acpi_status
acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block, void *context);
-struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number);
+acpi_status
+acpi_ev_get_gpe_xrupt_block(u32 interrupt_number,
+ struct acpi_gpe_xrupt_info **gpe_xrupt_block);
acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index e9f1fc7f99c7..4ed1aa384df2 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -108,7 +108,7 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
/*
* Optionally enable output from the AML Debug Object.
*/
-bool ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
+u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
/*
* Optionally copy the entire DSDT to local memory (instead of simply
@@ -119,6 +119,24 @@ bool ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
/*
+ * Optionally ignore an XSDT if present and use the RSDT instead.
+ * Although the ACPI specification requires that an XSDT be used instead
+ * of the RSDT, the XSDT has been found to be corrupt or ill-formed on
+ * some machines. Default behavior is to use the XSDT if present.
+ */
+u8 ACPI_INIT_GLOBAL(acpi_gbl_do_not_use_xsdt, FALSE);
+
+/*
+ * Optionally use 32-bit FADT addresses if and when there is a conflict
+ * (address mismatch) between the 32-bit and 64-bit versions of the
+ * address. Although ACPICA adheres to the ACPI specification which
+ * requires the use of the corresponding 64-bit address if it is non-zero,
+ * some machines have been found to have a corrupted non-zero 64-bit
+ * address. Default is FALSE, do not favor the 32-bit addresses.
+ */
+u8 ACPI_INIT_GLOBAL(acpi_gbl_use32_bit_fadt_addresses, FALSE);
+
+/*
* Optionally truncate I/O addresses to 16 bits. Provides compatibility
* with other ACPI implementations. NOTE: During ACPICA initialization,
* this value is set to TRUE if any Windows OSI strings have been
@@ -484,6 +502,18 @@ ACPI_EXTERN u32 acpi_gbl_size_of_acpi_objects;
/*****************************************************************************
*
+ * Application globals
+ *
+ ****************************************************************************/
+
+#ifdef ACPI_APPLICATION
+
+ACPI_FILE ACPI_INIT_GLOBAL(acpi_gbl_debug_file, NULL);
+
+#endif /* ACPI_APPLICATION */
+
+/*****************************************************************************
+ *
* Info/help support
*
****************************************************************************/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 53ed1a8ba4f0..d95ca5449ace 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -1038,15 +1038,16 @@ struct acpi_external_list {
struct acpi_external_list *next;
u32 value;
u16 length;
+ u16 flags;
u8 type;
- u8 flags;
- u8 resolved;
- u8 emitted;
};
/* Values for Flags field above */
-#define ACPI_IPATH_ALLOCATED 0x01
+#define ACPI_EXT_RESOLVED_REFERENCE 0x01 /* Object was resolved during cross ref */
+#define ACPI_EXT_ORIGIN_FROM_FILE 0x02 /* External came from a file */
+#define ACPI_EXT_INTERNAL_PATH_ALLOCATED 0x04 /* Deallocate internal path on completion */
+#define ACPI_EXT_EXTERNAL_EMITTED 0x08 /* External() statement has been emitted */
struct acpi_external_file {
char *path;
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 2d4c07322576..e7a57c554e84 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -105,7 +105,7 @@ acpi_ds_create_external_region(acpi_status lookup_status,
* operation_region not found. Generate an External for it, and
* insert the name into the namespace.
*/
- acpi_dm_add_to_external_list(op, path, ACPI_TYPE_REGION, 0);
+ acpi_dm_add_op_to_external_list(op, path, ACPI_TYPE_REGION, 0, 0);
status = acpi_ns_lookup(walk_state->scope_info, path, ACPI_TYPE_REGION,
ACPI_IMODE_LOAD_PASS1, ACPI_NS_SEARCH_PARENT,
walk_state, node);
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index ade44e49deb4..d7f53fb2979a 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -727,27 +727,26 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
index++;
}
- index--;
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "NumOperands %d, ArgCount %d, Index %d\n",
+ walk_state->num_operands, arg_count, index));
- /* It is the appropriate order to get objects from the Result stack */
+ /* Create the interpreter arguments, in reverse order */
+ index--;
for (i = 0; i < arg_count; i++) {
arg = arguments[index];
-
- /* Force the filling of the operand stack in inverse order */
-
- walk_state->operand_index = (u8) index;
+ walk_state->operand_index = (u8)index;
status = acpi_ds_create_operand(walk_state, arg, index);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
- index--;
-
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "Arg #%u (%p) done, Arg1=%p\n", index, arg,
- first_arg));
+ "Created Arg #%u (%p) %u args total\n",
+ index, arg, arg_count));
+ index--;
}
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 95e681a36f9c..2dbe109727c8 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -181,8 +181,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
* Target of Scope() not found. Generate an External for it, and
* insert the name into the namespace.
*/
- acpi_dm_add_to_external_list(op, path, ACPI_TYPE_DEVICE,
- 0);
+ acpi_dm_add_op_to_external_list(op, path,
+ ACPI_TYPE_DEVICE, 0, 0);
status =
acpi_ns_lookup(walk_state->scope_info, path,
object_type, ACPI_IMODE_LOAD_PASS1,
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index a9e76bc4ad97..a31e549e64cc 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -87,9 +87,9 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
return_ACPI_STATUS(status);
}
- gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block(interrupt_number);
- if (!gpe_xrupt_block) {
- status = AE_NO_MEMORY;
+ status =
+ acpi_ev_get_gpe_xrupt_block(interrupt_number, &gpe_xrupt_block);
+ if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
@@ -112,7 +112,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
unlock_and_exit:
- status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index d3f5e1e2a2b1..4d764e847a08 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -197,8 +197,9 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
* FUNCTION: acpi_ev_get_gpe_xrupt_block
*
* PARAMETERS: interrupt_number - Interrupt for a GPE block
+ * gpe_xrupt_block - Where the block is returned
*
- * RETURN: A GPE interrupt block
+ * RETURN: Status
*
* DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
* block per unique interrupt level used for GPEs. Should be
@@ -207,7 +208,9 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
*
******************************************************************************/
-struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
+acpi_status
+acpi_ev_get_gpe_xrupt_block(u32 interrupt_number,
+ struct acpi_gpe_xrupt_info ** gpe_xrupt_block)
{
struct acpi_gpe_xrupt_info *next_gpe_xrupt;
struct acpi_gpe_xrupt_info *gpe_xrupt;
@@ -221,7 +224,8 @@ struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
while (next_gpe_xrupt) {
if (next_gpe_xrupt->interrupt_number == interrupt_number) {
- return_PTR(next_gpe_xrupt);
+ *gpe_xrupt_block = next_gpe_xrupt;
+ return_ACPI_STATUS(AE_OK);
}
next_gpe_xrupt = next_gpe_xrupt->next;
@@ -231,7 +235,7 @@ struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
if (!gpe_xrupt) {
- return_PTR(NULL);
+ return_ACPI_STATUS(AE_NO_MEMORY);
}
gpe_xrupt->interrupt_number = interrupt_number;
@@ -250,6 +254,7 @@ struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
} else {
acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
}
+
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
/* Install new interrupt handler if not SCI_INT */
@@ -259,14 +264,15 @@ struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
acpi_ev_gpe_xrupt_handler,
gpe_xrupt);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not install GPE interrupt handler at level 0x%X",
- interrupt_number));
- return_PTR(NULL);
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not install GPE interrupt handler at level 0x%X",
+ interrupt_number));
+ return_ACPI_STATUS(status);
}
}
- return_PTR(gpe_xrupt);
+ *gpe_xrupt_block = gpe_xrupt;
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index acd34f599313..7ca6925a87ca 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -124,7 +124,8 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
}
if (!source_desc) {
- ACPI_ERROR((AE_INFO, "No object attached to node %p", node));
+ ACPI_ERROR((AE_INFO, "No object attached to node [%4.4s] %p",
+ node->name.ascii, node));
return_ACPI_STATUS(AE_AML_NO_OPERAND);
}
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index e973e311f856..1f0c28ba50df 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -84,7 +84,7 @@ acpi_evaluate_object_typed(acpi_handle handle,
acpi_object_type return_type)
{
acpi_status status;
- u8 must_free = FALSE;
+ u8 free_buffer_on_error = FALSE;
ACPI_FUNCTION_TRACE(acpi_evaluate_object_typed);
@@ -95,14 +95,13 @@ acpi_evaluate_object_typed(acpi_handle handle,
}
if (return_buffer->length == ACPI_ALLOCATE_BUFFER) {
- must_free = TRUE;
+ free_buffer_on_error = TRUE;
}
/* Evaluate the object */
- status =
- acpi_evaluate_object(handle, pathname, external_params,
- return_buffer);
+ status = acpi_evaluate_object(handle, pathname,
+ external_params, return_buffer);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@@ -135,11 +134,15 @@ acpi_evaluate_object_typed(acpi_handle handle,
pointer)->type),
acpi_ut_get_type_name(return_type)));
- if (must_free) {
-
- /* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */
-
- ACPI_FREE_BUFFER(*return_buffer);
+ if (free_buffer_on_error) {
+ /*
+ * Free a buffer created via ACPI_ALLOCATE_BUFFER.
+ * Note: We use acpi_os_free here because acpi_os_allocate was used
+ * to allocate the buffer. This purposefully bypasses the
+ * (optionally enabled) allocation tracking mechanism since we
+ * only want to track internal allocations.
+ */
+ acpi_os_free(return_buffer->pointer);
return_buffer->pointer = NULL;
}
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index 9ba5301e5751..b0c9787dbe61 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -71,6 +71,10 @@ static const u8 acpi_gbl_argument_count[] =
const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
{
+#ifdef ACPI_DEBUG_OUTPUT
+ const char *opcode_name = "Unknown AML opcode";
+#endif
+
ACPI_FUNCTION_NAME(ps_get_opcode_info);
/*
@@ -92,11 +96,54 @@ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
return (&acpi_gbl_aml_op_info
[acpi_gbl_long_op_index[(u8)opcode]]);
}
+#if defined ACPI_ASL_COMPILER && defined ACPI_DEBUG_OUTPUT
+#include "asldefine.h"
+
+ switch (opcode) {
+ case AML_RAW_DATA_BYTE:
+ opcode_name = "-Raw Data Byte-";
+ break;
+
+ case AML_RAW_DATA_WORD:
+ opcode_name = "-Raw Data Word-";
+ break;
+
+ case AML_RAW_DATA_DWORD:
+ opcode_name = "-Raw Data Dword-";
+ break;
+
+ case AML_RAW_DATA_QWORD:
+ opcode_name = "-Raw Data Qword-";
+ break;
+
+ case AML_RAW_DATA_BUFFER:
+ opcode_name = "-Raw Data Buffer-";
+ break;
+
+ case AML_RAW_DATA_CHAIN:
+ opcode_name = "-Raw Data Buffer Chain-";
+ break;
+
+ case AML_PACKAGE_LENGTH:
+ opcode_name = "-Package Length-";
+ break;
+
+ case AML_UNASSIGNED_OPCODE:
+ opcode_name = "-Unassigned Opcode-";
+ break;
+
+ case AML_DEFAULT_ARG_OP:
+ opcode_name = "-Default Arg-";
+ break;
+
+ default:
+ break;
+ }
+#endif
/* Unknown AML opcode */
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Unknown AML opcode [%4.4X]\n", opcode));
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%4.4X]\n", opcode_name, opcode));
return (&acpi_gbl_aml_op_info[_UNK]);
}
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 9d99f2189693..8f89263ac47e 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -56,10 +56,11 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
static void acpi_tb_convert_fadt(void);
-static void acpi_tb_validate_fadt(void);
-
static void acpi_tb_setup_fadt_registers(void);
+static u64
+acpi_tb_select_address(char *register_name, u32 address32, u64 address64);
+
/* Table for conversion of FADT to common internal format and FADT validation */
typedef struct acpi_fadt_info {
@@ -175,6 +176,7 @@ static struct acpi_fadt_pm_info fadt_pm_info_table[] = {
* space_id - ACPI Space ID for this register
* byte_width - Width of this register
* address - Address of the register
+ * register_name - ASCII name of the ACPI register
*
* RETURN: None
*
@@ -220,6 +222,68 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
/*******************************************************************************
*
+ * FUNCTION: acpi_tb_select_address
+ *
+ * PARAMETERS: register_name - ASCII name of the ACPI register
+ * address32 - 32-bit address of the register
+ * address64 - 64-bit address of the register
+ *
+ * RETURN: The resolved 64-bit address
+ *
+ * DESCRIPTION: Select between 32-bit and 64-bit versions of addresses within
+ * the FADT. Used for the FACS and DSDT addresses.
+ *
+ * NOTES:
+ *
+ * Check for FACS and DSDT address mismatches. An address mismatch between
+ * the 32-bit and 64-bit address fields (FIRMWARE_CTRL/X_FIRMWARE_CTRL and
+ * DSDT/X_DSDT) could be a corrupted address field or it might indicate
+ * the presence of two FACS or two DSDT tables.
+ *
+ * November 2013:
+ * By default, as per the ACPICA specification, a valid 64-bit address is
+ * used regardless of the value of the 32-bit address. However, this
+ * behavior can be overridden via the acpi_gbl_use32_bit_fadt_addresses flag.
+ *
+ ******************************************************************************/
+
+static u64
+acpi_tb_select_address(char *register_name, u32 address32, u64 address64)
+{
+
+ if (!address64) {
+
+ /* 64-bit address is zero, use 32-bit address */
+
+ return ((u64)address32);
+ }
+
+ if (address32 && (address64 != (u64)address32)) {
+
+ /* Address mismatch between 32-bit and 64-bit versions */
+
+ ACPI_BIOS_WARNING((AE_INFO,
+ "32/64X %s address mismatch in FADT: "
+ "0x%8.8X/0x%8.8X%8.8X, using %u-bit address",
+ register_name, address32,
+ ACPI_FORMAT_UINT64(address64),
+ acpi_gbl_use32_bit_fadt_addresses ? 32 :
+ 64));
+
+ /* 32-bit address override */
+
+ if (acpi_gbl_use32_bit_fadt_addresses) {
+ return ((u64)address32);
+ }
+ }
+
+ /* Default is to use the 64-bit address */
+
+ return (address64);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_tb_parse_fadt
*
* PARAMETERS: table_index - Index for the FADT
@@ -331,10 +395,6 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
acpi_tb_convert_fadt();
- /* Validate FADT values now, before we make any changes */
-
- acpi_tb_validate_fadt();
-
/* Initialize the global ACPI register structures */
acpi_tb_setup_fadt_registers();
@@ -344,66 +404,55 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
*
* FUNCTION: acpi_tb_convert_fadt
*
- * PARAMETERS: None, uses acpi_gbl_FADT
+ * PARAMETERS: none - acpi_gbl_FADT is used.
*
* RETURN: None
*
* DESCRIPTION: Converts all versions of the FADT to a common internal format.
- * Expand 32-bit addresses to 64-bit as necessary.
+ * Expand 32-bit addresses to 64-bit as necessary. Also validate
+ * important fields within the FADT.
*
- * NOTE: acpi_gbl_FADT must be of size (struct acpi_table_fadt),
- * and must contain a copy of the actual FADT.
+ * NOTE: acpi_gbl_FADT must be of size (struct acpi_table_fadt), and must
+ * contain a copy of the actual BIOS-provided FADT.
*
* Notes on 64-bit register addresses:
*
* After this FADT conversion, later ACPICA code will only use the 64-bit "X"
* fields of the FADT for all ACPI register addresses.
*
- * The 64-bit "X" fields are optional extensions to the original 32-bit FADT
+ * The 64-bit X fields are optional extensions to the original 32-bit FADT
* V1.0 fields. Even if they are present in the FADT, they are optional and
* are unused if the BIOS sets them to zero. Therefore, we must copy/expand
- * 32-bit V1.0 fields if the corresponding X field is zero.
+ * 32-bit V1.0 fields to the 64-bit X fields if the the 64-bit X field is
+ * originally zero.
*
- * For ACPI 1.0 FADTs, all 32-bit address fields are expanded to the
- * corresponding "X" fields in the internal FADT.
+ * For ACPI 1.0 FADTs (that contain no 64-bit addresses), all 32-bit address
+ * fields are expanded to the corresponding 64-bit X fields in the internal
+ * common FADT.
*
* For ACPI 2.0+ FADTs, all valid (non-zero) 32-bit address fields are expanded
- * to the corresponding 64-bit X fields. For compatibility with other ACPI
- * implementations, we ignore the 64-bit field if the 32-bit field is valid,
- * regardless of whether the host OS is 32-bit or 64-bit.
+ * to the corresponding 64-bit X fields, if the 64-bit field is originally
+ * zero. Adhering to the ACPI specification, we completely ignore the 32-bit
+ * field if the 64-bit field is valid, regardless of whether the host OS is
+ * 32-bit or 64-bit.
+ *
+ * Possible additional checks:
+ * (acpi_gbl_FADT.pm1_event_length >= 4)
+ * (acpi_gbl_FADT.pm1_control_length >= 2)
+ * (acpi_gbl_FADT.pm_timer_length >= 4)
+ * Gpe block lengths must be multiple of 2
*
******************************************************************************/
static void acpi_tb_convert_fadt(void)
{
+ char *name;
struct acpi_generic_address *address64;
u32 address32;
+ u8 length;
u32 i;
/*
- * Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary.
- * Later code will always use the X 64-bit field. Also, check for an
- * address mismatch between the 32-bit and 64-bit address fields
- * (FIRMWARE_CTRL/X_FIRMWARE_CTRL, DSDT/X_DSDT) which would indicate
- * the presence of two FACS or two DSDT tables.
- */
- if (!acpi_gbl_FADT.Xfacs) {
- acpi_gbl_FADT.Xfacs = (u64) acpi_gbl_FADT.facs;
- } else if (acpi_gbl_FADT.facs &&
- (acpi_gbl_FADT.Xfacs != (u64) acpi_gbl_FADT.facs)) {
- ACPI_WARNING((AE_INFO,
- "32/64 FACS address mismatch in FADT - two FACS tables!"));
- }
-
- if (!acpi_gbl_FADT.Xdsdt) {
- acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
- } else if (acpi_gbl_FADT.dsdt &&
- (acpi_gbl_FADT.Xdsdt != (u64) acpi_gbl_FADT.dsdt)) {
- ACPI_WARNING((AE_INFO,
- "32/64 DSDT address mismatch in FADT - two DSDT tables!"));
- }
-
- /*
* For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which
* should be zero are indeed zero. This will workaround BIOSs that
* inadvertently place values in these fields.
@@ -421,119 +470,24 @@ static void acpi_tb_convert_fadt(void)
acpi_gbl_FADT.boot_flags = 0;
}
- /* Update the local FADT table header length */
-
- acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
-
/*
- * Expand the ACPI 1.0 32-bit addresses to the ACPI 2.0 64-bit "X"
- * generic address structures as necessary. Later code will always use
- * the 64-bit address structures.
- *
- * March 2009:
- * We now always use the 32-bit address if it is valid (non-null). This
- * is not in accordance with the ACPI specification which states that
- * the 64-bit address supersedes the 32-bit version, but we do this for
- * compatibility with other ACPI implementations. Most notably, in the
- * case where both the 32 and 64 versions are non-null, we use the 32-bit
- * version. This is the only address that is guaranteed to have been
- * tested by the BIOS manufacturer.
+ * Now we can update the local FADT length to the length of the
+ * current FADT version as defined by the ACPI specification.
+ * Thus, we will have a common FADT internally.
*/
- for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
- address32 = *ACPI_ADD_PTR(u32,
- &acpi_gbl_FADT,
- fadt_info_table[i].address32);
-
- address64 = ACPI_ADD_PTR(struct acpi_generic_address,
- &acpi_gbl_FADT,
- fadt_info_table[i].address64);
-
- /*
- * If both 32- and 64-bit addresses are valid (non-zero),
- * they must match.
- */
- if (address64->address && address32 &&
- (address64->address != (u64)address32)) {
- ACPI_BIOS_ERROR((AE_INFO,
- "32/64X address mismatch in FADT/%s: "
- "0x%8.8X/0x%8.8X%8.8X, using 32",
- fadt_info_table[i].name, address32,
- ACPI_FORMAT_UINT64(address64->
- address)));
- }
-
- /* Always use 32-bit address if it is valid (non-null) */
-
- if (address32) {
- /*
- * Copy the 32-bit address to the 64-bit GAS structure. The
- * Space ID is always I/O for 32-bit legacy address fields
- */
- acpi_tb_init_generic_address(address64,
- ACPI_ADR_SPACE_SYSTEM_IO,
- *ACPI_ADD_PTR(u8,
- &acpi_gbl_FADT,
- fadt_info_table
- [i].length),
- (u64) address32,
- fadt_info_table[i].name);
- }
- }
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_tb_validate_fadt
- *
- * PARAMETERS: table - Pointer to the FADT to be validated
- *
- * RETURN: None
- *
- * DESCRIPTION: Validate various important fields within the FADT. If a problem
- * is found, issue a message, but no status is returned.
- * Used by both the table manager and the disassembler.
- *
- * Possible additional checks:
- * (acpi_gbl_FADT.pm1_event_length >= 4)
- * (acpi_gbl_FADT.pm1_control_length >= 2)
- * (acpi_gbl_FADT.pm_timer_length >= 4)
- * Gpe block lengths must be multiple of 2
- *
- ******************************************************************************/
-
-static void acpi_tb_validate_fadt(void)
-{
- char *name;
- struct acpi_generic_address *address64;
- u8 length;
- u32 i;
+ acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
/*
- * Check for FACS and DSDT address mismatches. An address mismatch between
- * the 32-bit and 64-bit address fields (FIRMWARE_CTRL/X_FIRMWARE_CTRL and
- * DSDT/X_DSDT) would indicate the presence of two FACS or two DSDT tables.
+ * Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary.
+ * Later ACPICA code will always use the X 64-bit field.
*/
- if (acpi_gbl_FADT.facs &&
- (acpi_gbl_FADT.Xfacs != (u64)acpi_gbl_FADT.facs)) {
- ACPI_BIOS_WARNING((AE_INFO,
- "32/64X FACS address mismatch in FADT - "
- "0x%8.8X/0x%8.8X%8.8X, using 32",
- acpi_gbl_FADT.facs,
- ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xfacs)));
-
- acpi_gbl_FADT.Xfacs = (u64)acpi_gbl_FADT.facs;
- }
-
- if (acpi_gbl_FADT.dsdt &&
- (acpi_gbl_FADT.Xdsdt != (u64)acpi_gbl_FADT.dsdt)) {
- ACPI_BIOS_WARNING((AE_INFO,
- "32/64X DSDT address mismatch in FADT - "
- "0x%8.8X/0x%8.8X%8.8X, using 32",
- acpi_gbl_FADT.dsdt,
- ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xdsdt)));
+ acpi_gbl_FADT.Xfacs = acpi_tb_select_address("FACS",
+ acpi_gbl_FADT.facs,
+ acpi_gbl_FADT.Xfacs);
- acpi_gbl_FADT.Xdsdt = (u64)acpi_gbl_FADT.dsdt;
- }
+ acpi_gbl_FADT.Xdsdt = acpi_tb_select_address("DSDT",
+ acpi_gbl_FADT.dsdt,
+ acpi_gbl_FADT.Xdsdt);
/* If Hardware Reduced flag is set, we are all done */
@@ -545,18 +499,95 @@ static void acpi_tb_validate_fadt(void)
for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
/*
- * Generate pointer to the 64-bit address, get the register
- * length (width) and the register name
+ * Get the 32-bit and 64-bit addresses, as well as the register
+ * length and register name.
*/
+ address32 = *ACPI_ADD_PTR(u32,
+ &acpi_gbl_FADT,
+ fadt_info_table[i].address32);
+
address64 = ACPI_ADD_PTR(struct acpi_generic_address,
&acpi_gbl_FADT,
fadt_info_table[i].address64);
- length =
- *ACPI_ADD_PTR(u8, &acpi_gbl_FADT,
- fadt_info_table[i].length);
+
+ length = *ACPI_ADD_PTR(u8,
+ &acpi_gbl_FADT,
+ fadt_info_table[i].length);
+
name = fadt_info_table[i].name;
/*
+ * Expand the ACPI 1.0 32-bit addresses to the ACPI 2.0 64-bit "X"
+ * generic address structures as necessary. Later code will always use
+ * the 64-bit address structures.
+ *
+ * November 2013:
+ * Now always use the 64-bit address if it is valid (non-zero), in
+ * accordance with the ACPI specification which states that a 64-bit
+ * address supersedes the 32-bit version. This behavior can be
+ * overridden by the acpi_gbl_use32_bit_fadt_addresses flag.
+ *
+ * During 64-bit address construction and verification,
+ * these cases are handled:
+ *
+ * Address32 zero, Address64 [don't care] - Use Address64
+ *
+ * Address32 non-zero, Address64 zero - Copy/use Address32
+ * Address32 non-zero == Address64 non-zero - Use Address64
+ * Address32 non-zero != Address64 non-zero - Warning, use Address64
+ *
+ * Override: if acpi_gbl_use32_bit_fadt_addresses is TRUE, and:
+ * Address32 non-zero != Address64 non-zero - Warning, copy/use Address32
+ *
+ * Note: space_id is always I/O for 32-bit legacy address fields
+ */
+ if (address32) {
+ if (!address64->address) {
+
+ /* 64-bit address is zero, use 32-bit address */
+
+ acpi_tb_init_generic_address(address64,
+ ACPI_ADR_SPACE_SYSTEM_IO,
+ *ACPI_ADD_PTR(u8,
+ &acpi_gbl_FADT,
+ fadt_info_table
+ [i].
+ length),
+ (u64)address32,
+ name);
+ } else if (address64->address != (u64)address32) {
+
+ /* Address mismatch */
+
+ ACPI_BIOS_WARNING((AE_INFO,
+ "32/64X address mismatch in FADT/%s: "
+ "0x%8.8X/0x%8.8X%8.8X, using %u-bit address",
+ name, address32,
+ ACPI_FORMAT_UINT64
+ (address64->address),
+ acpi_gbl_use32_bit_fadt_addresses
+ ? 32 : 64));
+
+ if (acpi_gbl_use32_bit_fadt_addresses) {
+
+ /* 32-bit address override */
+
+ acpi_tb_init_generic_address(address64,
+ ACPI_ADR_SPACE_SYSTEM_IO,
+ *ACPI_ADD_PTR
+ (u8,
+ &acpi_gbl_FADT,
+ fadt_info_table
+ [i].
+ length),
+ (u64)
+ address32,
+ name);
+ }
+ }
+ }
+
+ /*
* For each extended field, check for length mismatch between the
* legacy length field and the corresponding 64-bit X length field.
* Note: If the legacy length field is > 0xFF bits, ignore this
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 3d6bb83aa7e7..6412d3c301cb 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -49,69 +49,11 @@
ACPI_MODULE_NAME("tbutils")
/* Local prototypes */
+static acpi_status acpi_tb_validate_xsdt(acpi_physical_address address);
+
static acpi_physical_address
acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size);
-/*******************************************************************************
- *
- * FUNCTION: acpi_tb_check_xsdt
- *
- * PARAMETERS: address - Pointer to the XSDT
- *
- * RETURN: status
- * AE_OK - XSDT is okay
- * AE_NO_MEMORY - can't map XSDT
- * AE_INVALID_TABLE_LENGTH - invalid table length
- * AE_NULL_ENTRY - XSDT has NULL entry
- *
- * DESCRIPTION: validate XSDT
-******************************************************************************/
-
-static acpi_status
-acpi_tb_check_xsdt(acpi_physical_address address)
-{
- struct acpi_table_header *table;
- u32 length;
- u64 xsdt_entry_address;
- u8 *table_entry;
- u32 table_count;
- int i;
-
- table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
- if (!table)
- return AE_NO_MEMORY;
-
- length = table->length;
- acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
- if (length < sizeof(struct acpi_table_header))
- return AE_INVALID_TABLE_LENGTH;
-
- table = acpi_os_map_memory(address, length);
- if (!table)
- return AE_NO_MEMORY;
-
- /* Calculate the number of tables described in XSDT */
- table_count =
- (u32) ((table->length -
- sizeof(struct acpi_table_header)) / sizeof(u64));
- table_entry =
- ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
- for (i = 0; i < table_count; i++) {
- ACPI_MOVE_64_TO_64(&xsdt_entry_address, table_entry);
- if (!xsdt_entry_address) {
- /* XSDT has NULL entry */
- break;
- }
- table_entry += sizeof(u64);
- }
- acpi_os_unmap_memory(table, length);
-
- if (i < table_count)
- return AE_NULL_ENTRY;
- else
- return AE_OK;
-}
-
#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
@@ -383,7 +325,7 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
* Get the table physical address (32-bit for RSDT, 64-bit for XSDT):
* Note: Addresses are 32-bit aligned (not 64) in both RSDT and XSDT
*/
- if (table_entry_size == sizeof(u32)) {
+ if (table_entry_size == ACPI_RSDT_ENTRY_SIZE) {
/*
* 32-bit platform, RSDT: Return 32-bit table entry
* 64-bit platform, RSDT: Expand 32-bit to 64-bit and return
@@ -415,6 +357,87 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
/*******************************************************************************
*
+ * FUNCTION: acpi_tb_validate_xsdt
+ *
+ * PARAMETERS: address - Physical address of the XSDT (from RSDP)
+ *
+ * RETURN: Status. AE_OK if the table appears to be valid.
+ *
+ * DESCRIPTION: Validate an XSDT to ensure that it is of minimum size and does
+ * not contain any NULL entries. A problem that is seen in the
+ * field is that the XSDT exists, but is actually useless because
+ * of one or more (or all) NULL entries.
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_tb_validate_xsdt(acpi_physical_address xsdt_address)
+{
+ struct acpi_table_header *table;
+ u8 *next_entry;
+ acpi_physical_address address;
+ u32 length;
+ u32 entry_count;
+ acpi_status status;
+ u32 i;
+
+ /* Get the XSDT length */
+
+ table =
+ acpi_os_map_memory(xsdt_address, sizeof(struct acpi_table_header));
+ if (!table) {
+ return (AE_NO_MEMORY);
+ }
+
+ length = table->length;
+ acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
+
+ /*
+ * Minimum XSDT length is the size of the standard ACPI header
+ * plus one physical address entry
+ */
+ if (length < (sizeof(struct acpi_table_header) + ACPI_XSDT_ENTRY_SIZE)) {
+ return (AE_INVALID_TABLE_LENGTH);
+ }
+
+ /* Map the entire XSDT */
+
+ table = acpi_os_map_memory(xsdt_address, length);
+ if (!table) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Get the number of entries and pointer to first entry */
+
+ status = AE_OK;
+ next_entry = ACPI_ADD_PTR(u8, table, sizeof(struct acpi_table_header));
+ entry_count = (u32)((table->length - sizeof(struct acpi_table_header)) /
+ ACPI_XSDT_ENTRY_SIZE);
+
+ /* Validate each entry (physical address) within the XSDT */
+
+ for (i = 0; i < entry_count; i++) {
+ address =
+ acpi_tb_get_root_table_entry(next_entry,
+ ACPI_XSDT_ENTRY_SIZE);
+ if (!address) {
+
+ /* Detected a NULL entry, XSDT is invalid */
+
+ status = AE_NULL_ENTRY;
+ break;
+ }
+
+ next_entry += ACPI_XSDT_ENTRY_SIZE;
+ }
+
+ /* Unmap table */
+
+ acpi_os_unmap_memory(table, length);
+ return (status);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_tb_parse_root_table
*
* PARAMETERS: rsdp - Pointer to the RSDP
@@ -438,16 +461,14 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
u32 table_count;
struct acpi_table_header *table;
acpi_physical_address address;
- acpi_physical_address uninitialized_var(rsdt_address);
u32 length;
u8 *table_entry;
acpi_status status;
ACPI_FUNCTION_TRACE(tb_parse_root_table);
- /*
- * Map the entire RSDP and extract the address of the RSDT or XSDT
- */
+ /* Map the entire RSDP and extract the address of the RSDT or XSDT */
+
rsdp = acpi_os_map_memory(rsdp_address, sizeof(struct acpi_table_rsdp));
if (!rsdp) {
return_ACPI_STATUS(AE_NO_MEMORY);
@@ -457,24 +478,22 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
ACPI_CAST_PTR(struct acpi_table_header,
rsdp));
- /* Differentiate between RSDT and XSDT root tables */
+ /* Use XSDT if present and not overridden. Otherwise, use RSDT */
- if (rsdp->revision > 1 && rsdp->xsdt_physical_address
- && !acpi_rsdt_forced) {
+ if ((rsdp->revision > 1) &&
+ rsdp->xsdt_physical_address && !acpi_gbl_do_not_use_xsdt) {
/*
- * Root table is an XSDT (64-bit physical addresses). We must use the
- * XSDT if the revision is > 1 and the XSDT pointer is present, as per
- * the ACPI specification.
+ * RSDP contains an XSDT (64-bit physical addresses). We must use
+ * the XSDT if the revision is > 1 and the XSDT pointer is present,
+ * as per the ACPI specification.
*/
address = (acpi_physical_address) rsdp->xsdt_physical_address;
- table_entry_size = sizeof(u64);
- rsdt_address = (acpi_physical_address)
- rsdp->rsdt_physical_address;
+ table_entry_size = ACPI_XSDT_ENTRY_SIZE;
} else {
/* Root table is an RSDT (32-bit physical addresses) */
address = (acpi_physical_address) rsdp->rsdt_physical_address;
- table_entry_size = sizeof(u32);
+ table_entry_size = ACPI_RSDT_ENTRY_SIZE;
}
/*
@@ -483,15 +502,25 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
*/
acpi_os_unmap_memory(rsdp, sizeof(struct acpi_table_rsdp));
- if (table_entry_size == sizeof(u64)) {
- if (acpi_tb_check_xsdt(address) == AE_NULL_ENTRY) {
- /* XSDT has NULL entry, RSDT is used */
- address = rsdt_address;
- table_entry_size = sizeof(u32);
- ACPI_WARNING((AE_INFO, "BIOS XSDT has NULL entry, "
- "using RSDT"));
+ /*
+ * If it is present and used, validate the XSDT for access/size
+ * and ensure that all table entries are at least non-NULL
+ */
+ if (table_entry_size == ACPI_XSDT_ENTRY_SIZE) {
+ status = acpi_tb_validate_xsdt(address);
+ if (ACPI_FAILURE(status)) {
+ ACPI_BIOS_WARNING((AE_INFO,
+ "XSDT is invalid (%s), using RSDT",
+ acpi_format_exception(status)));
+
+ /* Fall back to the RSDT */
+
+ address =
+ (acpi_physical_address) rsdp->rsdt_physical_address;
+ table_entry_size = ACPI_RSDT_ENTRY_SIZE;
}
}
+
/* Map the RSDT/XSDT table header to get the full table length */
table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
@@ -501,12 +530,14 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
acpi_tb_print_table_header(address, table);
- /* Get the length of the full table, verify length and map entire table */
-
+ /*
+ * Validate length of the table, and map entire table.
+ * Minimum length table must contain at least one entry.
+ */
length = table->length;
acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
- if (length < sizeof(struct acpi_table_header)) {
+ if (length < (sizeof(struct acpi_table_header) + table_entry_size)) {
ACPI_BIOS_ERROR((AE_INFO,
"Invalid table length 0x%X in RSDT/XSDT",
length));
@@ -526,22 +557,21 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
return_ACPI_STATUS(status);
}
- /* Calculate the number of tables described in the root table */
+ /* Get the number of entries and pointer to first entry */
table_count = (u32)((table->length - sizeof(struct acpi_table_header)) /
table_entry_size);
+ table_entry = ACPI_ADD_PTR(u8, table, sizeof(struct acpi_table_header));
+
/*
* First two entries in the table array are reserved for the DSDT
* and FACS, which are not actually present in the RSDT/XSDT - they
* come from the FADT
*/
- table_entry =
- ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
acpi_gbl_root_table_list.current_table_count = 2;
- /*
- * Initialize the root table array from the RSDT/XSDT
- */
+ /* Initialize the root table array from the RSDT/XSDT */
+
for (i = 0; i < table_count; i++) {
if (acpi_gbl_root_table_list.current_table_count >=
acpi_gbl_root_table_list.max_table_count) {
@@ -584,7 +614,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
acpi_tb_install_table(acpi_gbl_root_table_list.tables[i].
address, NULL, i);
- /* Special case for FADT - get the DSDT and FACS */
+ /* Special case for FADT - validate it then get the DSDT and FACS */
if (ACPI_COMPARE_NAME
(&acpi_gbl_root_table_list.tables[i].signature,
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index e0a2e2779c2e..2c2b6ae5dfc4 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -224,10 +224,11 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
while (range_info) {
/*
- * Check if the requested Address/Length overlaps this address_range.
- * Four cases to consider:
+ * Check if the requested address/length overlaps this
+ * address range. There are four cases to consider:
*
- * 1) Input address/length is contained completely in the address range
+ * 1) Input address/length is contained completely in the
+ * address range
* 2) Input address/length overlaps range at the range start
* 3) Input address/length overlaps range at the range end
* 4) Input address/length completely encompasses the range
@@ -244,11 +245,17 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
region_node);
ACPI_WARNING((AE_INFO,
- "0x%p-0x%p %s conflicts with Region %s %d",
+ "%s range 0x%p-0x%p conflicts with OpRegion 0x%p-0x%p (%s)",
+ acpi_ut_get_region_name(space_id),
ACPI_CAST_PTR(void, address),
ACPI_CAST_PTR(void, end_address),
- acpi_ut_get_region_name(space_id),
- pathname, overlap_count));
+ ACPI_CAST_PTR(void,
+ range_info->
+ start_address),
+ ACPI_CAST_PTR(void,
+ range_info->
+ end_address),
+ pathname));
ACPI_FREE(pathname);
}
}
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 814267f52715..1851762fc5b5 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -302,9 +302,13 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
return (AE_BUFFER_OVERFLOW);
case ACPI_ALLOCATE_BUFFER:
-
- /* Allocate a new buffer */
-
+ /*
+ * Allocate a new buffer. We directectly call acpi_os_allocate here to
+ * purposefully bypass the (optionally enabled) internal allocation
+ * tracking mechanism since we only want to track internal
+ * allocations. Note: The caller should use acpi_os_free to free this
+ * buffer created via ACPI_ALLOCATE_BUFFER.
+ */
buffer->pointer = acpi_os_allocate(required_length);
break;
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 366bfec4b770..cacd2fd9e665 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -248,12 +248,12 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
ACPI_FUNCTION_NAME(os_acquire_object);
if (!cache) {
- return (NULL);
+ return_PTR(NULL);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
if (ACPI_FAILURE(status)) {
- return (NULL);
+ return_PTR(NULL);
}
ACPI_MEM_TRACKING(cache->requests++);
@@ -276,7 +276,7 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
if (ACPI_FAILURE(status)) {
- return (NULL);
+ return_PTR(NULL);
}
/* Clear (zero) the previously used Object */
@@ -299,15 +299,15 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
if (ACPI_FAILURE(status)) {
- return (NULL);
+ return_PTR(NULL);
}
object = ACPI_ALLOCATE_ZEROED(cache->object_size);
if (!object) {
- return (NULL);
+ return_PTR(NULL);
}
}
- return (object);
+ return_PTR(object);
}
#endif /* ACPI_USE_LOCAL_CACHE */
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 03ae8affe48f..d971c8631263 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -194,9 +194,9 @@ acpi_debug_print(u32 requested_debug_level,
*/
acpi_os_printf("%9s-%04ld ", module_name, line_number);
-#ifdef ACPI_EXEC_APP
+#ifdef ACPI_APPLICATION
/*
- * For acpi_exec only, emit the thread ID and nesting level.
+ * For acpi_exec/iASL only, emit the thread ID and nesting level.
* Note: nesting level is really only useful during a single-thread
* execution. Otherwise, multiple threads will keep resetting the
* level.
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 81f9a9584451..030cb0dc673c 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -388,11 +388,7 @@ acpi_status acpi_ut_init_globals(void)
/* Public globals */
ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
-
ACPI_EXPORT_SYMBOL(acpi_dbg_level)
-
ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
-
ACPI_EXPORT_SYMBOL(acpi_gpe_count)
-
ACPI_EXPORT_SYMBOL(acpi_current_gpe_count)
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 75efea0539c1..246ef68681f4 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -122,8 +122,16 @@ acpi_status __init acpi_initialize_subsystem(void)
/* If configured, initialize the AML debugger */
- ACPI_DEBUGGER_EXEC(status = acpi_db_initialize());
- return_ACPI_STATUS(status);
+#ifdef ACPI_DEBUGGER
+ status = acpi_db_initialize();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During Debugger initialization"));
+ return_ACPI_STATUS(status);
+ }
+#endif
+
+ return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_subsystem)
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 786294bb682c..3650b2183227 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -2,7 +2,6 @@ config ACPI_APEI
bool "ACPI Platform Error Interface (APEI)"
select MISC_FILESYSTEMS
select PSTORE
- select EFI
select UEFI_CPER
depends on X86
help
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 6d2c49b86b7f..8678dfe5366b 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -34,13 +34,13 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <linux/acpi_io.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/kref.h>
#include <linux/rculist.h>
#include <linux/interrupt.h>
#include <linux/debugfs.h>
+#include <asm/unaligned.h>
#include "apei-internal.h"
@@ -567,8 +567,7 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
bit_offset = reg->bit_offset;
access_size_code = reg->access_width;
space_id = reg->space_id;
- /* Handle possible alignment issues */
- memcpy(paddr, &reg->address, sizeof(*paddr));
+ *paddr = get_unaligned(&reg->address);
if (!*paddr) {
pr_warning(FW_BUG APEI_PFX
"Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n",
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index 21ba34a73883..e5bcd919d4e6 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -8,7 +8,6 @@
#include <linux/cper.h>
#include <linux/acpi.h>
-#include <linux/acpi_io.h>
struct apei_exec_context;
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index fb57d03e698b..1be6f5564485 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -33,7 +33,7 @@
#include <linux/nmi.h>
#include <linux/delay.h>
#include <linux/mm.h>
-#include <acpi/acpi.h>
+#include <asm/unaligned.h>
#include "apei-internal.h"
@@ -216,7 +216,7 @@ static void check_vendor_extension(u64 paddr,
static void *einj_get_parameter_address(void)
{
int i;
- u64 paddrv4 = 0, paddrv5 = 0;
+ u64 pa_v4 = 0, pa_v5 = 0;
struct acpi_whea_header *entry;
entry = EINJ_TAB_ENTRY(einj_tab);
@@ -225,30 +225,28 @@ static void *einj_get_parameter_address(void)
entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
entry->register_region.space_id ==
ACPI_ADR_SPACE_SYSTEM_MEMORY)
- memcpy(&paddrv4, &entry->register_region.address,
- sizeof(paddrv4));
+ pa_v4 = get_unaligned(&entry->register_region.address);
if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS &&
entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
entry->register_region.space_id ==
ACPI_ADR_SPACE_SYSTEM_MEMORY)
- memcpy(&paddrv5, &entry->register_region.address,
- sizeof(paddrv5));
+ pa_v5 = get_unaligned(&entry->register_region.address);
entry++;
}
- if (paddrv5) {
+ if (pa_v5) {
struct set_error_type_with_address *v5param;
- v5param = acpi_os_map_memory(paddrv5, sizeof(*v5param));
+ v5param = acpi_os_map_memory(pa_v5, sizeof(*v5param));
if (v5param) {
acpi5 = 1;
- check_vendor_extension(paddrv5, v5param);
+ check_vendor_extension(pa_v5, v5param);
return v5param;
}
}
- if (param_extension && paddrv4) {
+ if (param_extension && pa_v4) {
struct einj_parameter *v4param;
- v4param = acpi_os_map_memory(paddrv4, sizeof(*v4param));
+ v4param = acpi_os_map_memory(pa_v4, sizeof(*v4param));
if (!v4param)
return NULL;
if (v4param->reserved1 || v4param->reserved2) {
@@ -416,7 +414,8 @@ out:
return rc;
}
-static int __einj_error_inject(u32 type, u64 param1, u64 param2)
+static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
+ u64 param3, u64 param4)
{
struct apei_exec_context ctx;
u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
@@ -446,6 +445,12 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
break;
}
v5param->flags = vendor_flags;
+ } else if (flags) {
+ v5param->flags = flags;
+ v5param->memory_address = param1;
+ v5param->memory_address_range = param2;
+ v5param->apicid = param3;
+ v5param->pcie_sbdf = param4;
} else {
switch (type) {
case ACPI_EINJ_PROCESSOR_CORRECTABLE:
@@ -514,11 +519,17 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
}
/* Inject the specified hardware error */
-static int einj_error_inject(u32 type, u64 param1, u64 param2)
+static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
+ u64 param3, u64 param4)
{
int rc;
unsigned long pfn;
+ /* If user manually set "flags", make sure it is legal */
+ if (flags && (flags &
+ ~(SETWA_FLAGS_APICID|SETWA_FLAGS_MEM|SETWA_FLAGS_PCIE_SBDF)))
+ return -EINVAL;
+
/*
* We need extra sanity checks for memory errors.
* Other types leap directly to injection.
@@ -532,7 +543,7 @@ static int einj_error_inject(u32 type, u64 param1, u64 param2)
if (type & ACPI5_VENDOR_BIT) {
if (vendor_flags != SETWA_FLAGS_MEM)
goto inject;
- } else if (!(type & MEM_ERROR_MASK))
+ } else if (!(type & MEM_ERROR_MASK) && !(flags & SETWA_FLAGS_MEM))
goto inject;
/*
@@ -546,15 +557,18 @@ static int einj_error_inject(u32 type, u64 param1, u64 param2)
inject:
mutex_lock(&einj_mutex);
- rc = __einj_error_inject(type, param1, param2);
+ rc = __einj_error_inject(type, flags, param1, param2, param3, param4);
mutex_unlock(&einj_mutex);
return rc;
}
static u32 error_type;
+static u32 error_flags;
static u64 error_param1;
static u64 error_param2;
+static u64 error_param3;
+static u64 error_param4;
static struct dentry *einj_debug_dir;
static int available_error_type_show(struct seq_file *m, void *v)
@@ -648,7 +662,8 @@ static int error_inject_set(void *data, u64 val)
if (!error_type)
return -EINVAL;
- return einj_error_inject(error_type, error_param1, error_param2);
+ return einj_error_inject(error_type, error_flags, error_param1, error_param2,
+ error_param3, error_param4);
}
DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
@@ -729,6 +744,10 @@ static int __init einj_init(void)
rc = -ENOMEM;
einj_param = einj_get_parameter_address();
if ((param_extension || acpi5) && einj_param) {
+ fentry = debugfs_create_x32("flags", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_flags);
+ if (!fentry)
+ goto err_unmap;
fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
einj_debug_dir, &error_param1);
if (!fentry)
@@ -737,6 +756,14 @@ static int __init einj_init(void)
einj_debug_dir, &error_param2);
if (!fentry)
goto err_unmap;
+ fentry = debugfs_create_x64("param3", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param3);
+ if (!fentry)
+ goto err_unmap;
+ fentry = debugfs_create_x64("param4", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param4);
+ if (!fentry)
+ goto err_unmap;
fentry = debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR,
einj_debug_dir, &notrigger);
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 26311f23c824..ed65e9c4b5b0 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -611,7 +611,7 @@ static void __erst_record_id_cache_compact(void)
if (entries[i] == APEI_ERST_INVALID_RECORD_ID)
continue;
if (wpos != i)
- memcpy(&entries[wpos], &entries[i], sizeof(entries[i]));
+ entries[wpos] = entries[i];
wpos++;
}
erst_record_id_cache.len = wpos;
@@ -942,6 +942,7 @@ static int erst_clearer(enum pstore_type_id type, u64 id, int count,
static struct pstore_info erst_info = {
.owner = THIS_MODULE,
.name = "erst",
+ .flags = PSTORE_FLAGS_FRAGILE,
.open = erst_open_pstore,
.close = erst_close_pstore,
.read = erst_reader,
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index a30bc313787b..dab7cb7349df 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -33,7 +33,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <linux/acpi_io.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
@@ -413,27 +412,31 @@ static void ghes_handle_memory_failure(struct acpi_generic_data *gdata, int sev)
{
#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
unsigned long pfn;
+ int flags = -1;
int sec_sev = ghes_severity(gdata->error_severity);
struct cper_sec_mem_err *mem_err;
mem_err = (struct cper_sec_mem_err *)(gdata + 1);
- if (sec_sev == GHES_SEV_CORRECTED &&
- (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED) &&
- (mem_err->validation_bits & CPER_MEM_VALID_PA)) {
- pfn = mem_err->physical_addr >> PAGE_SHIFT;
- if (pfn_valid(pfn))
- memory_failure_queue(pfn, 0, MF_SOFT_OFFLINE);
- else if (printk_ratelimit())
- pr_warn(FW_WARN GHES_PFX
- "Invalid address in generic error data: %#llx\n",
- mem_err->physical_addr);
- }
- if (sev == GHES_SEV_RECOVERABLE &&
- sec_sev == GHES_SEV_RECOVERABLE &&
- mem_err->validation_bits & CPER_MEM_VALID_PA) {
- pfn = mem_err->physical_addr >> PAGE_SHIFT;
- memory_failure_queue(pfn, 0, 0);
+ if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
+ return;
+
+ pfn = mem_err->physical_addr >> PAGE_SHIFT;
+ if (!pfn_valid(pfn)) {
+ pr_warn_ratelimited(FW_WARN GHES_PFX
+ "Invalid address in generic error data: %#llx\n",
+ mem_err->physical_addr);
+ return;
}
+
+ /* iff following two events can be handled properly by now */
+ if (sec_sev == GHES_SEV_CORRECTED &&
+ (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
+ flags = MF_SOFT_OFFLINE;
+ if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
+ flags = 0;
+
+ if (flags != -1)
+ memory_failure_queue(pfn, 0, flags);
#endif
}
@@ -453,8 +456,7 @@ static void ghes_do_proc(struct ghes *ghes,
ghes_edac_report_mem_error(ghes, sev, mem_err);
#ifdef CONFIG_X86_MCE
- apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED,
- mem_err);
+ apei_mce_report_mem_error(sev, mem_err);
#endif
ghes_handle_memory_failure(gdata, sev);
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index fbf1aceda8b8..018a42883706 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -36,8 +36,7 @@
#include <linux/suspend.h>
#include <asm/unaligned.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/power_supply.h>
#define PREFIX "ACPI: "
@@ -62,6 +61,7 @@ MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
MODULE_DESCRIPTION("ACPI Battery Driver");
MODULE_LICENSE("GPL");
+static int battery_bix_broken_package;
static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -416,7 +416,12 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name));
return -ENODEV;
}
- if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
+
+ if (battery_bix_broken_package)
+ result = extract_package(battery, buffer.pointer,
+ extended_info_offsets + 1,
+ ARRAY_SIZE(extended_info_offsets) - 1);
+ else if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
result = extract_package(battery, buffer.pointer,
extended_info_offsets,
ARRAY_SIZE(extended_info_offsets));
@@ -544,7 +549,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
{
unsigned long x;
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
- if (sscanf(buf, "%ld\n", &x) == 1)
+ if (sscanf(buf, "%lu\n", &x) == 1)
battery->alarm = x/1000;
if (acpi_battery_present(battery))
acpi_battery_set_alarm(battery);
@@ -754,6 +759,17 @@ static int battery_notify(struct notifier_block *nb,
return 0;
}
+static struct dmi_system_id bat_dmi_table[] = {
+ {
+ .ident = "NEC LZ750/LS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"),
+ },
+ },
+ {},
+};
+
static int acpi_battery_add(struct acpi_device *device)
{
int result = 0;
@@ -846,6 +862,9 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
{
if (acpi_disabled)
return;
+
+ if (dmi_check_system(bat_dmi_table))
+ battery_bix_broken_package = 1;
acpi_bus_register_driver(&acpi_battery_driver);
}
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 078c4f7fe2dd..10e4964d051a 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
#include <linux/dmi.h>
#include "internal.h"
@@ -323,6 +322,56 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
},
},
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "HP ProBook 2013 models",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "),
+ DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "HP EliteBook 2013 models",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
+ DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "HP ZBook 14",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "HP ZBook 15",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "HP ZBook 17",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "HP EliteBook 8780w",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"),
+ },
+ },
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index bba9b72e25f8..fcb59c21c68d 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -33,12 +33,11 @@
#include <linux/proc_fs.h>
#include <linux/acpi.h>
#include <linux/slab.h>
+#include <linux/regulator/machine.h>
#ifdef CONFIG_X86
#include <asm/mpspec.h>
#endif
#include <linux/pci.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
#include <acpi/apei.h>
#include <linux/dmi.h>
#include <linux/suspend.h>
@@ -52,9 +51,6 @@ struct acpi_device *acpi_root;
struct proc_dir_entry *acpi_root_dir;
EXPORT_SYMBOL(acpi_root_dir);
-#define STRUCT_TO_INT(s) (*((int*)&s))
-
-
#ifdef CONFIG_X86
static int set_copy_dsdt(const struct dmi_system_id *id)
{
@@ -115,18 +111,16 @@ int acpi_bus_get_status(struct acpi_device *device)
if (ACPI_FAILURE(status))
return -ENODEV;
- STRUCT_TO_INT(device->status) = (int) sta;
+ acpi_set_device_status(device, sta);
if (device->status.functional && !device->status.present) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]: "
"functional but not present;\n",
- device->pnp.bus_id,
- (u32) STRUCT_TO_INT(device->status)));
+ device->pnp.bus_id, (u32)sta));
}
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]\n",
- device->pnp.bus_id,
- (u32) STRUCT_TO_INT(device->status)));
+ device->pnp.bus_id, (u32)sta));
return 0;
}
EXPORT_SYMBOL(acpi_bus_get_status);
@@ -156,6 +150,16 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
}
EXPORT_SYMBOL(acpi_bus_get_private_data);
+void acpi_bus_no_hotplug(acpi_handle handle)
+{
+ struct acpi_device *adev = NULL;
+
+ acpi_bus_get_device(handle, &adev);
+ if (adev)
+ adev->flags.no_hotplug = true;
+}
+EXPORT_SYMBOL_GPL(acpi_bus_no_hotplug);
+
static void acpi_print_osc_error(acpi_handle handle,
struct acpi_osc_context *context, char *error)
{
@@ -329,58 +333,6 @@ static void acpi_bus_osc_support(void)
Notification Handling
-------------------------------------------------------------------------- */
-static void acpi_bus_check_device(acpi_handle handle)
-{
- struct acpi_device *device;
- acpi_status status;
- struct acpi_device_status old_status;
-
- if (acpi_bus_get_device(handle, &device))
- return;
- if (!device)
- return;
-
- old_status = device->status;
-
- /*
- * Make sure this device's parent is present before we go about
- * messing with the device.
- */
- if (device->parent && !device->parent->status.present) {
- device->status = device->parent->status;
- return;
- }
-
- status = acpi_bus_get_status(device);
- if (ACPI_FAILURE(status))
- return;
-
- if (STRUCT_TO_INT(old_status) == STRUCT_TO_INT(device->status))
- return;
-
- /*
- * Device Insertion/Removal
- */
- if ((device->status.present) && !(old_status.present)) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device insertion detected\n"));
- /* TBD: Handle device insertion */
- } else if (!(device->status.present) && (old_status.present)) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device removal detected\n"));
- /* TBD: Handle device removal */
- }
-}
-
-static void acpi_bus_check_scope(acpi_handle handle)
-{
- /* Status Change? */
- acpi_bus_check_device(handle);
-
- /*
- * TBD: Enumerate child devices within this device's scope and
- * run acpi_bus_check_device()'s on them.
- */
-}
-
/**
* acpi_bus_notify
* ---------------
@@ -397,19 +349,11 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
- acpi_bus_check_scope(handle);
- /*
- * TBD: We'll need to outsource certain events to non-ACPI
- * drivers via the device manager (device.c).
- */
+ /* TBD */
break;
case ACPI_NOTIFY_DEVICE_CHECK:
- acpi_bus_check_device(handle);
- /*
- * TBD: We'll need to outsource certain events to non-ACPI
- * drivers via the device manager (device.c).
- */
+ /* TBD */
break;
case ACPI_NOTIFY_DEVICE_WAKE:
@@ -566,6 +510,14 @@ void __init acpi_early_init(void)
goto error0;
}
+ /*
+ * If the system is using ACPI then we can be reasonably
+ * confident that any regulators are managed by the firmware
+ * so tell the regulator core it has everything it needs to
+ * know.
+ */
+ regulator_has_full_constraints();
+
return;
error0:
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index c971929d75c2..11c11f6b8fa1 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -31,8 +31,7 @@
#include <linux/seq_file.h>
#include <linux/input.h>
#include <linux/slab.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <acpi/button.h>
#define PREFIX "ACPI: "
@@ -101,7 +100,6 @@ struct acpi_button {
struct input_dev *input;
char phys[32]; /* for input device */
unsigned long pushed;
- bool wakeup_enabled;
};
static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
@@ -407,16 +405,6 @@ static int acpi_button_add(struct acpi_device *device)
lid_device = device;
}
- if (device->wakeup.flags.valid) {
- /* Button's GPE is run-wake GPE */
- acpi_enable_gpe(device->wakeup.gpe_device,
- device->wakeup.gpe_number);
- if (!device_may_wakeup(&device->dev)) {
- device_set_wakeup_enable(&device->dev, true);
- button->wakeup_enabled = true;
- }
- }
-
printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
return 0;
@@ -433,13 +421,6 @@ static int acpi_button_remove(struct acpi_device *device)
{
struct acpi_button *button = acpi_driver_data(device);
- if (device->wakeup.flags.valid) {
- acpi_disable_gpe(device->wakeup.gpe_device,
- device->wakeup.gpe_number);
- if (button->wakeup_enabled)
- device_set_wakeup_enable(&device->dev, false);
- }
-
acpi_button_remove_fs(device);
input_unregister_device(button->input);
kfree(button);
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index e23151667655..368f9ddb8480 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -27,8 +27,7 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/acpi.h>
-
-#include "internal.h"
+#include <linux/container.h>
#include "internal.h"
@@ -44,19 +43,66 @@ static const struct acpi_device_id container_device_ids[] = {
{"", 0},
};
-static int container_device_attach(struct acpi_device *device,
+static int acpi_container_offline(struct container_dev *cdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&cdev->dev);
+ struct acpi_device *child;
+
+ /* Check all of the dependent devices' physical companions. */
+ list_for_each_entry(child, &adev->children, node)
+ if (!acpi_scan_is_offline(child, false))
+ return -EBUSY;
+
+ return 0;
+}
+
+static void acpi_container_release(struct device *dev)
+{
+ kfree(to_container_dev(dev));
+}
+
+static int container_device_attach(struct acpi_device *adev,
const struct acpi_device_id *not_used)
{
- /* This is necessary for container hotplug to work. */
+ struct container_dev *cdev;
+ struct device *dev;
+ int ret;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ cdev->offline = acpi_container_offline;
+ dev = &cdev->dev;
+ dev->bus = &container_subsys;
+ dev_set_name(dev, "%s", dev_name(&adev->dev));
+ ACPI_COMPANION_SET(dev, adev);
+ dev->release = acpi_container_release;
+ ret = device_register(dev);
+ if (ret) {
+ put_device(dev);
+ return ret;
+ }
+ adev->driver_data = dev;
return 1;
}
+static void container_device_detach(struct acpi_device *adev)
+{
+ struct device *dev = acpi_driver_data(adev);
+
+ adev->driver_data = NULL;
+ if (dev)
+ device_unregister(dev);
+}
+
static struct acpi_scan_handler container_handler = {
.ids = container_device_ids,
.attach = container_device_attach,
+ .detach = container_device_detach,
.hotplug = {
.enabled = true,
- .mode = AHM_CONTAINER,
+ .demand_offline = true,
},
};
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
index 12b62f2cdb3f..c68e72414a67 100644
--- a/drivers/acpi/custom_method.c
+++ b/drivers/acpi/custom_method.c
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include "internal.h"
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index b55d6a20dc0e..6b1919f6bd82 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -5,7 +5,7 @@
#include <linux/export.h>
#include <linux/init.h>
#include <linux/debugfs.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("debugfs");
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index b3480cf7db1a..c14a00d3dca6 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -256,6 +256,8 @@ int acpi_bus_init_power(struct acpi_device *device)
return -EINVAL;
device->power.state = ACPI_STATE_UNKNOWN;
+ if (!acpi_device_is_present(device))
+ return 0;
result = acpi_device_get_power(device, &state);
if (result)
@@ -302,15 +304,18 @@ int acpi_device_fix_up_power(struct acpi_device *device)
return ret;
}
-int acpi_bus_update_power(acpi_handle handle, int *state_p)
+int acpi_device_update_power(struct acpi_device *device, int *state_p)
{
- struct acpi_device *device;
int state;
int result;
- result = acpi_bus_get_device(handle, &device);
- if (result)
+ if (device->power.state == ACPI_STATE_UNKNOWN) {
+ result = acpi_bus_init_power(device);
+ if (!result && state_p)
+ *state_p = device->power.state;
+
return result;
+ }
result = acpi_device_get_power(device, &state);
if (result)
@@ -338,6 +343,15 @@ int acpi_bus_update_power(acpi_handle handle, int *state_p)
return 0;
}
+
+int acpi_bus_update_power(acpi_handle handle, int *state_p)
+{
+ struct acpi_device *device;
+ int result;
+
+ result = acpi_bus_get_device(handle, &device);
+ return result ? result : acpi_device_update_power(device, state_p);
+}
EXPORT_SYMBOL_GPL(acpi_bus_update_power);
bool acpi_bus_power_manageable(acpi_handle handle)
@@ -713,18 +727,6 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
#endif /* CONFIG_PM_SLEEP */
/**
- * acpi_dev_pm_get_node - Get ACPI device node for the given physical device.
- * @dev: Device to get the ACPI node for.
- */
-struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
-{
- acpi_handle handle = ACPI_HANDLE(dev);
- struct acpi_device *adev;
-
- return handle && !acpi_bus_get_device(handle, &adev) ? adev : NULL;
-}
-
-/**
* acpi_dev_pm_low_power - Put ACPI device into a low-power state.
* @dev: Device to put into a low-power state.
* @adev: ACPI device node corresponding to @dev.
@@ -764,7 +766,7 @@ static int acpi_dev_pm_full_power(struct acpi_device *adev)
*/
int acpi_dev_runtime_suspend(struct device *dev)
{
- struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ struct acpi_device *adev = ACPI_COMPANION(dev);
bool remote_wakeup;
int error;
@@ -795,7 +797,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_runtime_suspend);
*/
int acpi_dev_runtime_resume(struct device *dev)
{
- struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ struct acpi_device *adev = ACPI_COMPANION(dev);
int error;
if (!adev)
@@ -848,7 +850,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume);
*/
int acpi_dev_suspend_late(struct device *dev)
{
- struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ struct acpi_device *adev = ACPI_COMPANION(dev);
u32 target_state;
bool wakeup;
int error;
@@ -880,7 +882,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_suspend_late);
*/
int acpi_dev_resume_early(struct device *dev)
{
- struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ struct acpi_device *adev = ACPI_COMPANION(dev);
int error;
if (!adev)
@@ -971,7 +973,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
*/
int acpi_dev_pm_attach(struct device *dev, bool power_on)
{
- struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ struct acpi_device *adev = ACPI_COMPANION(dev);
if (!adev)
return -ENODEV;
@@ -1003,7 +1005,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
*/
void acpi_dev_pm_detach(struct device *dev, bool power_off)
{
- struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ struct acpi_device *adev = ACPI_COMPANION(dev);
if (adev && dev->pm_domain == &acpi_general_pm_domain) {
dev->pm_domain = NULL;
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index dcd73ccb514c..e9b3081c4fe9 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -32,8 +32,8 @@
#include <linux/jiffies.h>
#include <linux/stddef.h>
#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+
+#include "internal.h"
#define PREFIX "ACPI: "
@@ -323,14 +323,11 @@ static int dock_present(struct dock_station *ds)
*/
static void dock_create_acpi_device(acpi_handle handle)
{
- struct acpi_device *device;
+ struct acpi_device *device = NULL;
int ret;
- if (acpi_bus_get_device(handle, &device)) {
- /*
- * no device created for this object,
- * so we should create one.
- */
+ acpi_bus_get_device(handle, &device);
+ if (!acpi_device_enumerated(device)) {
ret = acpi_bus_scan(handle);
if (ret)
pr_debug("error adding bus, %x\n", -ret);
@@ -612,7 +609,7 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
static void dock_notify(struct dock_station *ds, u32 event)
{
acpi_handle handle = ds->handle;
- struct acpi_device *ad;
+ struct acpi_device *adev = NULL;
int surprise_removal = 0;
/*
@@ -635,7 +632,8 @@ static void dock_notify(struct dock_station *ds, u32 event)
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
- if (!dock_in_progress(ds) && acpi_bus_get_device(handle, &ad)) {
+ acpi_bus_get_device(handle, &adev);
+ if (!dock_in_progress(ds) && !acpi_device_enumerated(adev)) {
begin_dock(ds);
dock(ds);
if (!dock_present(ds)) {
@@ -898,9 +896,6 @@ find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
void __init acpi_dock_init(void)
{
- if (acpi_disabled)
- return;
-
/* look for dock stations and bays */
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, find_dock_and_bay, NULL, NULL, NULL);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index ba5b56db9d27..959d41acc108 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -39,10 +39,9 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
-#include <asm/io.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <asm/io.h>
#include "internal.h"
@@ -91,10 +90,6 @@ static unsigned int ec_storm_threshold __read_mostly = 8;
module_param(ec_storm_threshold, uint, 0644);
MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
-/* If we find an EC via the ECDT, we need to keep a ptr to its context */
-/* External interfaces use first EC only, so remember */
-typedef int (*acpi_ec_query_func) (void *data);
-
struct acpi_ec_query_handler {
struct list_head node;
acpi_ec_query_func func;
@@ -387,27 +382,6 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
return acpi_ec_transaction(ec, &t);
}
-/*
- * Externally callable EC access functions. For now, assume 1 EC only
- */
-int ec_burst_enable(void)
-{
- if (!first_ec)
- return -ENODEV;
- return acpi_ec_burst_enable(first_ec);
-}
-
-EXPORT_SYMBOL(ec_burst_enable);
-
-int ec_burst_disable(void)
-{
- if (!first_ec)
- return -ENODEV;
- return acpi_ec_burst_disable(first_ec);
-}
-
-EXPORT_SYMBOL(ec_burst_disable);
-
int ec_read(u8 addr, u8 *val)
{
int err;
@@ -779,9 +753,9 @@ static int ec_install_handlers(struct acpi_ec *ec)
pr_err("Fail in evaluating the _REG object"
" of EC device. Broken bios is suspected.\n");
} else {
+ acpi_disable_gpe(NULL, ec->gpe);
acpi_remove_gpe_handler(NULL, ec->gpe,
&acpi_ec_gpe_handler);
- acpi_disable_gpe(NULL, ec->gpe);
return -ENODEV;
}
}
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 4e7b798900f2..b4c216bab22b 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -105,7 +105,7 @@ static const struct file_operations acpi_ec_io_ops = {
.llseek = default_llseek,
};
-int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
+static int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
{
struct dentry *dev_dir;
char name[64];
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index cae3b387b867..ef2d730734dc 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/gfp.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <net/netlink.h>
#include <net/genetlink.h>
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index ba3da88cee45..1fb62900f32a 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -29,8 +29,7 @@
#include <linux/types.h>
#include <asm/uaccess.h>
#include <linux/thermal.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#define PREFIX "ACPI: "
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index a22a295edb69..0c789224d40d 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -37,7 +37,7 @@ int register_acpi_bus_type(struct acpi_bus_type *type)
{
if (acpi_disabled)
return -ENODEV;
- if (type && type->match && type->find_device) {
+ if (type && type->match && type->find_companion) {
down_write(&bus_type_sem);
list_add_tail(&type->list, &bus_type_list);
up_write(&bus_type_sem);
@@ -82,109 +82,74 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
#define FIND_CHILD_MIN_SCORE 1
#define FIND_CHILD_MAX_SCORE 2
-static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
- void *not_used, void **ret_p)
-{
- struct acpi_device *adev = NULL;
-
- acpi_bus_get_device(handle, &adev);
- if (adev) {
- *ret_p = handle;
- return AE_CTRL_TERMINATE;
- }
- return AE_OK;
-}
-
-static int do_find_child_checks(acpi_handle handle, bool is_bridge)
+static int find_child_checks(struct acpi_device *adev, bool check_children)
{
bool sta_present = true;
unsigned long long sta;
acpi_status status;
- status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
+ status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
if (status == AE_NOT_FOUND)
sta_present = false;
else if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
return -ENODEV;
- if (is_bridge) {
- void *test = NULL;
+ if (check_children && list_empty(&adev->children))
+ return -ENODEV;
- /* Check if this object has at least one child device. */
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
- acpi_dev_present, NULL, NULL, &test);
- if (!test)
- return -ENODEV;
- }
return sta_present ? FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
}
-struct find_child_context {
- u64 addr;
- bool is_bridge;
- acpi_handle ret;
- int ret_score;
-};
-
-static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
- void *data, void **not_used)
-{
- struct find_child_context *context = data;
- unsigned long long addr;
- acpi_status status;
- int score;
-
- status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
- if (ACPI_FAILURE(status) || addr != context->addr)
- return AE_OK;
-
- if (!context->ret) {
- /* This is the first matching object. Save its handle. */
- context->ret = handle;
- return AE_OK;
- }
- /*
- * There is more than one matching object with the same _ADR value.
- * That really is unexpected, so we are kind of beyond the scope of the
- * spec here. We have to choose which one to return, though.
- *
- * First, check if the previously found object is good enough and return
- * its handle if so. Second, check the same for the object that we've
- * just found.
- */
- if (!context->ret_score) {
- score = do_find_child_checks(context->ret, context->is_bridge);
- if (score == FIND_CHILD_MAX_SCORE)
- return AE_CTRL_TERMINATE;
- else
- context->ret_score = score;
- }
- score = do_find_child_checks(handle, context->is_bridge);
- if (score == FIND_CHILD_MAX_SCORE) {
- context->ret = handle;
- return AE_CTRL_TERMINATE;
- } else if (score > context->ret_score) {
- context->ret = handle;
- context->ret_score = score;
- }
- return AE_OK;
-}
-
-acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge)
+struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
+ u64 address, bool check_children)
{
- if (parent) {
- struct find_child_context context = {
- .addr = addr,
- .is_bridge = is_bridge,
- };
-
- acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child,
- NULL, &context, NULL);
- return context.ret;
+ struct acpi_device *adev, *ret = NULL;
+ int ret_score = 0;
+
+ if (!parent)
+ return NULL;
+
+ list_for_each_entry(adev, &parent->children, node) {
+ unsigned long long addr;
+ acpi_status status;
+ int score;
+
+ status = acpi_evaluate_integer(adev->handle, METHOD_NAME__ADR,
+ NULL, &addr);
+ if (ACPI_FAILURE(status) || addr != address)
+ continue;
+
+ if (!ret) {
+ /* This is the first matching object. Save it. */
+ ret = adev;
+ continue;
+ }
+ /*
+ * There is more than one matching device object with the same
+ * _ADR value. That really is unexpected, so we are kind of
+ * beyond the scope of the spec here. We have to choose which
+ * one to return, though.
+ *
+ * First, check if the previously found object is good enough
+ * and return it if so. Second, do the same for the object that
+ * we've just found.
+ */
+ if (!ret_score) {
+ ret_score = find_child_checks(ret, check_children);
+ if (ret_score == FIND_CHILD_MAX_SCORE)
+ return ret;
+ }
+ score = find_child_checks(adev, check_children);
+ if (score == FIND_CHILD_MAX_SCORE) {
+ return adev;
+ } else if (score > ret_score) {
+ ret = adev;
+ ret_score = score;
+ }
}
- return NULL;
+ return ret;
}
-EXPORT_SYMBOL_GPL(acpi_find_child);
+EXPORT_SYMBOL_GPL(acpi_find_child_device);
static void acpi_physnode_link_name(char *buf, unsigned int node_id)
{
@@ -195,9 +160,8 @@ static void acpi_physnode_link_name(char *buf, unsigned int node_id)
strcpy(buf, PHYSICAL_NODE_STRING);
}
-int acpi_bind_one(struct device *dev, acpi_handle handle)
+int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
{
- struct acpi_device *acpi_dev = NULL;
struct acpi_device_physical_node *physical_node, *pn;
char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
struct list_head *physnode_list;
@@ -205,14 +169,12 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
int retval = -EINVAL;
if (ACPI_COMPANION(dev)) {
- if (handle) {
+ if (acpi_dev) {
dev_warn(dev, "ACPI companion already set\n");
return -EINVAL;
} else {
acpi_dev = ACPI_COMPANION(dev);
}
- } else {
- acpi_bus_get_device(handle, &acpi_dev);
}
if (!acpi_dev)
return -EINVAL;
@@ -322,29 +284,22 @@ int acpi_unbind_one(struct device *dev)
}
EXPORT_SYMBOL_GPL(acpi_unbind_one);
-void acpi_preset_companion(struct device *dev, acpi_handle parent, u64 addr)
-{
- struct acpi_device *adev;
-
- if (!acpi_bus_get_device(acpi_get_child(parent, addr), &adev))
- ACPI_COMPANION_SET(dev, adev);
-}
-EXPORT_SYMBOL_GPL(acpi_preset_companion);
-
static int acpi_platform_notify(struct device *dev)
{
struct acpi_bus_type *type = acpi_get_bus_type(dev);
- acpi_handle handle;
int ret;
ret = acpi_bind_one(dev, NULL);
if (ret && type) {
- ret = type->find_device(dev, &handle);
- if (ret) {
+ struct acpi_device *adev;
+
+ adev = type->find_companion(dev);
+ if (!adev) {
DBG("Unable to get handle for %s\n", dev_name(dev));
+ ret = -ENODEV;
goto out;
}
- ret = acpi_bind_one(dev, handle);
+ ret = acpi_bind_one(dev, adev);
if (ret)
goto out;
}
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
index 13b1d39d7cdf..aafe3ca829c2 100644
--- a/drivers/acpi/hed.c
+++ b/drivers/acpi/hed.c
@@ -25,8 +25,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
#include <acpi/hed.h>
static struct acpi_device_id acpi_hed_ids[] = {
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index a29739c0ba79..dedbb2d802f1 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -28,7 +28,6 @@ int init_acpi_device_notify(void);
int acpi_scan_init(void);
void acpi_pci_root_init(void);
void acpi_pci_link_init(void);
-void acpi_pci_root_hp_init(void);
void acpi_processor_init(void);
void acpi_platform_init(void);
int acpi_sysfs_init(void);
@@ -73,6 +72,9 @@ void acpi_lpss_init(void);
static inline void acpi_lpss_init(void) {}
#endif
+bool acpi_queue_hotplug_work(struct work_struct *work);
+bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent);
+
/* --------------------------------------------------------------------------
Device Node Initialization / Removal
-------------------------------------------------------------------------- */
@@ -85,9 +87,9 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
int type, unsigned long long sta);
void acpi_device_add_finalize(struct acpi_device *device);
void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
-int acpi_bind_one(struct device *dev, acpi_handle handle);
+int acpi_bind_one(struct device *dev, struct acpi_device *adev);
int acpi_unbind_one(struct device *dev);
-void acpi_bus_device_eject(void *data, u32 ost_src);
+bool acpi_device_is_present(struct acpi_device *adev);
/* --------------------------------------------------------------------------
Power Resource
@@ -105,6 +107,8 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
int acpi_power_on_resources(struct acpi_device *device, int state);
int acpi_power_transition(struct acpi_device *device, int state);
+int acpi_device_update_power(struct acpi_device *device, int *state_p);
+
int acpi_wakeup_device_init(void);
void acpi_early_processor_set_pdc(void);
@@ -127,12 +131,21 @@ struct acpi_ec {
extern struct acpi_ec *first_ec;
+/* If we find an EC via the ECDT, we need to keep a ptr to its context */
+/* External interfaces use first EC only, so remember */
+typedef int (*acpi_ec_query_func) (void *data);
+
int acpi_ec_init(void);
int acpi_ec_ecdt_probe(void);
int acpi_boot_ec_enable(void);
void acpi_ec_block_transactions(void);
void acpi_ec_unblock_transactions(void);
void acpi_ec_unblock_transactions_early(void);
+int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
+ acpi_handle handle, acpi_ec_query_func func,
+ void *data);
+void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
+
/*--------------------------------------------------------------------------
Suspend/Resume
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index a2343a1d9e0b..9e6816ef280a 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -29,7 +29,6 @@
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/numa.h>
-#include <acpi/acpi_bus.h>
#define PREFIX "ACPI: "
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
index 386a9fe497b4..de4fe03873c5 100644
--- a/drivers/acpi/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -12,7 +12,8 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/acpi.h>
-#include <linux/acpi_io.h>
+
+#include "internal.h"
/* ACPI NVS regions, APEI may use it */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 54a20ff4b864..fc1aa7909690 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -39,7 +39,6 @@
#include <linux/workqueue.h>
#include <linux/nmi.h>
#include <linux/acpi.h>
-#include <linux/acpi_io.h>
#include <linux/efi.h>
#include <linux/ioport.h>
#include <linux/list.h>
@@ -49,9 +48,6 @@
#include <asm/io.h>
#include <asm/uaccess.h>
-#include <acpi/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/processor.h>
#include "internal.h"
#define _COMPONENT ACPI_OS_SERVICES
@@ -544,7 +540,7 @@ static u64 acpi_tables_addr;
static int all_tables_size;
/* Copied from acpica/tbutils.c:acpi_tb_checksum() */
-u8 __init acpi_table_checksum(u8 *buffer, u32 length)
+static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
{
u8 sum = 0;
u8 *end = buffer + length;
@@ -1215,6 +1211,10 @@ acpi_status acpi_hotplug_execute(acpi_hp_callback func, void *data, u32 src)
return AE_OK;
}
+bool acpi_queue_hotplug_work(struct work_struct *work)
+{
+ return queue_work(kacpi_hotplug_wq, work);
+}
acpi_status
acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
@@ -1282,7 +1282,7 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
jiffies = MAX_SCHEDULE_TIMEOUT;
else
jiffies = msecs_to_jiffies(timeout);
-
+
ret = down_timeout(sem, jiffies);
if (ret)
status = AE_TIME;
@@ -1794,7 +1794,7 @@ acpi_status __init acpi_os_initialize1(void)
{
kacpid_wq = alloc_workqueue("kacpid", 0, 1);
kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
- kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
+ kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
BUG_ON(!kacpi_hotplug_wq);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 41c5e1b799ef..52d45ea2bc4f 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -37,8 +37,6 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
#define PREFIX "ACPI: "
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 2652a614deeb..9418c7a1f786 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -39,9 +39,9 @@
#include <linux/pci.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include "internal.h"
#define PREFIX "ACPI: "
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 20360e480bd8..c1c4102e6478 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -35,9 +35,7 @@
#include <linux/pci-aspm.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/apei.h>
+#include <acpi/apei.h> /* for acpi_hest_init() */
#include "internal.h"
@@ -51,6 +49,12 @@ static int acpi_pci_root_add(struct acpi_device *device,
const struct acpi_device_id *not_used);
static void acpi_pci_root_remove(struct acpi_device *device);
+static int acpi_pci_root_scan_dependent(struct acpi_device *adev)
+{
+ acpiphp_check_host_bridge(adev->handle);
+ return 0;
+}
+
#define ACPI_PCIE_REQ_SUPPORT (OSC_PCI_EXT_CONFIG_SUPPORT \
| OSC_PCI_ASPM_SUPPORT \
| OSC_PCI_CLOCK_PM_SUPPORT \
@@ -66,7 +70,8 @@ static struct acpi_scan_handler pci_root_handler = {
.attach = acpi_pci_root_add,
.detach = acpi_pci_root_remove,
.hotplug = {
- .ignore = true,
+ .enabled = true,
+ .scan_dependent = acpi_pci_root_scan_dependent,
},
};
@@ -599,7 +604,9 @@ static int acpi_pci_root_add(struct acpi_device *device,
pci_assign_unassigned_root_bus_resources(root->bus);
}
+ pci_lock_rescan_remove();
pci_bus_add_devices(root->bus);
+ pci_unlock_rescan_remove();
return 1;
end:
@@ -611,6 +618,8 @@ static void acpi_pci_root_remove(struct acpi_device *device)
{
struct acpi_pci_root *root = acpi_driver_data(device);
+ pci_lock_rescan_remove();
+
pci_stop_root_bus(root->bus);
device_set_run_wake(root->bus->bridge, false);
@@ -618,122 +627,17 @@ static void acpi_pci_root_remove(struct acpi_device *device)
pci_remove_root_bus(root->bus);
+ pci_unlock_rescan_remove();
+
kfree(root);
}
void __init acpi_pci_root_init(void)
{
acpi_hest_init();
-
- if (!acpi_pci_disabled) {
- pci_acpi_crs_quirks();
- acpi_scan_add_handler(&pci_root_handler);
- }
-}
-/* Support root bridge hotplug */
-
-static void handle_root_bridge_insertion(acpi_handle handle)
-{
- struct acpi_device *device;
-
- if (!acpi_bus_get_device(handle, &device)) {
- dev_printk(KERN_DEBUG, &device->dev,
- "acpi device already exists; ignoring notify\n");
+ if (acpi_pci_disabled)
return;
- }
-
- if (acpi_bus_scan(handle))
- acpi_handle_err(handle, "cannot add bridge to acpi list\n");
-}
-
-static void hotplug_event_root(void *data, u32 type)
-{
- acpi_handle handle = data;
- struct acpi_pci_root *root;
-
- acpi_scan_lock_acquire();
-
- root = acpi_pci_find_root(handle);
-
- switch (type) {
- case ACPI_NOTIFY_BUS_CHECK:
- /* bus enumerate */
- acpi_handle_printk(KERN_DEBUG, handle,
- "Bus check notify on %s\n", __func__);
- if (root)
- acpiphp_check_host_bridge(handle);
- else
- handle_root_bridge_insertion(handle);
-
- break;
-
- case ACPI_NOTIFY_DEVICE_CHECK:
- /* device check */
- acpi_handle_printk(KERN_DEBUG, handle,
- "Device check notify on %s\n", __func__);
- if (!root)
- handle_root_bridge_insertion(handle);
- break;
-
- case ACPI_NOTIFY_EJECT_REQUEST:
- /* request device eject */
- acpi_handle_printk(KERN_DEBUG, handle,
- "Device eject notify on %s\n", __func__);
- if (!root)
- break;
-
- get_device(&root->device->dev);
-
- acpi_scan_lock_release();
-
- acpi_bus_device_eject(root->device, ACPI_NOTIFY_EJECT_REQUEST);
- return;
- default:
- acpi_handle_warn(handle,
- "notify_handler: unknown event type 0x%x\n",
- type);
- break;
- }
-
- acpi_scan_lock_release();
-}
-
-static void handle_hotplug_event_root(acpi_handle handle, u32 type,
- void *context)
-{
- acpi_hotplug_execute(hotplug_event_root, handle, type);
-}
-
-static acpi_status __init
-find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- acpi_status status;
- int *count = (int *)context;
-
- if (!acpi_is_root_bridge(handle))
- return AE_OK;
-
- (*count)++;
-
- status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_root, NULL);
- if (ACPI_FAILURE(status))
- acpi_handle_printk(KERN_DEBUG, handle,
- "notify handler is not installed, exit status: %u\n",
- (unsigned int)status);
- else
- acpi_handle_printk(KERN_DEBUG, handle,
- "notify handler is installed\n");
-
- return AE_OK;
-}
-
-void __init acpi_pci_root_hp_init(void)
-{
- int num = 0;
-
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, find_root_bridges, NULL, &num, NULL);
- printk(KERN_DEBUG "Found %d acpi root devices\n", num);
+ pci_acpi_crs_quirks();
+ acpi_scan_add_handler_with_hotplug(&pci_root_handler, "pci_root");
}
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index d678a180ca2a..139d9e479370 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -35,6 +35,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/pci-acpi.h>
static bool debug;
static int check_sta_before_sun;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index c2ad391d8041..ad7da686e6e6 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -42,8 +42,7 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include "sleep.h"
#include "internal.h"
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 6a5b152ad4d0..75c28eae8860 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -3,12 +3,11 @@
#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/bcd.h>
+#include <linux/acpi.h>
#include <asm/uaccess.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
#include "sleep.h"
+#include "internal.h"
#define _COMPONENT ACPI_SYSTEM_COMPONENT
@@ -61,7 +60,7 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
seq_printf(seq, "%c%-8s %s:%s\n",
dev->wakeup.flags.run_wake ? '*' : ' ',
(device_may_wakeup(&dev->dev) ||
- (ldev && device_may_wakeup(ldev))) ?
+ device_may_wakeup(ldev)) ?
"enabled" : "disabled",
ldev->bus ? ldev->bus->name :
"no-bus", dev_name(ldev));
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index b3171f30b319..a4eea9a508d3 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -10,8 +10,7 @@
#include <linux/export.h>
#include <linux/dmi.h>
#include <linux/slab.h>
-
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <acpi/processor.h>
#include "internal.h"
@@ -45,13 +44,13 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
(struct acpi_madt_local_apic *)entry;
if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
- return 0;
+ return -ENODEV;
if (lapic->processor_id != acpi_id)
- return 0;
+ return -EINVAL;
*apic_id = lapic->id;
- return 1;
+ return 0;
}
static int map_x2apic_id(struct acpi_subtable_header *entry,
@@ -61,14 +60,14 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
(struct acpi_madt_local_x2apic *)entry;
if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
- return 0;
+ return -ENODEV;
if (device_declaration && (apic->uid == acpi_id)) {
*apic_id = apic->local_apic_id;
- return 1;
+ return 0;
}
- return 0;
+ return -EINVAL;
}
static int map_lsapic_id(struct acpi_subtable_header *entry,
@@ -78,16 +77,16 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
(struct acpi_madt_local_sapic *)entry;
if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
- return 0;
+ return -ENODEV;
if (device_declaration) {
if ((entry->length < 16) || (lsapic->uid != acpi_id))
- return 0;
+ return -EINVAL;
} else if (lsapic->processor_id != acpi_id)
- return 0;
+ return -EINVAL;
*apic_id = (lsapic->id << 8) | lsapic->eid;
- return 1;
+ return 0;
}
static int map_madt_entry(int type, u32 acpi_id)
@@ -117,13 +116,13 @@ static int map_madt_entry(int type, u32 acpi_id)
struct acpi_subtable_header *header =
(struct acpi_subtable_header *)entry;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
- if (map_lapic_id(header, acpi_id, &apic_id))
+ if (!map_lapic_id(header, acpi_id, &apic_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
- if (map_x2apic_id(header, type, acpi_id, &apic_id))
+ if (!map_x2apic_id(header, type, acpi_id, &apic_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
- if (map_lsapic_id(header, type, acpi_id, &apic_id))
+ if (!map_lsapic_id(header, type, acpi_id, &apic_id))
break;
}
entry += header->length;
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 146ab7e2b81d..c1c35623550f 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -224,9 +224,9 @@ static int __acpi_processor_start(struct acpi_device *device)
static int acpi_processor_start(struct device *dev)
{
- struct acpi_device *device;
+ struct acpi_device *device = ACPI_COMPANION(dev);
- if (acpi_bus_get_device(ACPI_HANDLE(dev), &device))
+ if (!device)
return -ENODEV;
return __acpi_processor_start(device);
@@ -234,10 +234,10 @@ static int acpi_processor_start(struct device *dev)
static int acpi_processor_stop(struct device *dev)
{
- struct acpi_device *device;
+ struct acpi_device *device = ACPI_COMPANION(dev);
struct acpi_processor *pr;
- if (acpi_bus_get_device(ACPI_HANDLE(dev), &device))
+ if (!device)
return 0;
acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 644516d9bde6..3dca36d4ad26 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -35,6 +35,7 @@
#include <linux/clockchips.h>
#include <linux/cpuidle.h>
#include <linux/syscore_ops.h>
+#include <acpi/processor.h>
/*
* Include the apic definitions for x86 to have the APIC timer related defines
@@ -46,9 +47,6 @@
#include <asm/apic.h>
#endif
-#include <acpi/acpi_bus.h>
-#include <acpi/processor.h>
-
#define PREFIX "ACPI: "
#define ACPI_PROCESSOR_CLASS "processor"
@@ -213,7 +211,7 @@ static int acpi_processor_suspend(void)
static void acpi_processor_resume(void)
{
- u32 resumed_bm_rld;
+ u32 resumed_bm_rld = 0;
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
if (resumed_bm_rld == saved_bm_rld)
@@ -598,7 +596,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
case ACPI_STATE_C2:
if (!cx->address)
break;
- cx->valid = 1;
+ cx->valid = 1;
break;
case ACPI_STATE_C3:
@@ -727,11 +725,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
if (unlikely(!pr))
return -EINVAL;
- if (cx->entry_method == ACPI_CSTATE_FFH) {
- if (current_set_polling_and_test())
- return -EINVAL;
- }
-
lapic_timer_state_broadcast(pr, cx, 1);
acpi_idle_do_entry(cx);
@@ -785,10 +778,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
if (unlikely(!pr))
return -EINVAL;
- if (cx->entry_method == ACPI_CSTATE_FFH) {
- if (current_set_polling_and_test())
- return -EINVAL;
- }
+#ifdef CONFIG_HOTPLUG_CPU
+ if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
+ !pr->flags.has_cst &&
+ !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
+ return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START);
+#endif
/*
* Must be done before busmaster disable as we might need to
@@ -831,6 +826,13 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
if (unlikely(!pr))
return -EINVAL;
+#ifdef CONFIG_HOTPLUG_CPU
+ if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
+ !pr->flags.has_cst &&
+ !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
+ return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START);
+#endif
+
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
if (drv->safe_state_index >= 0) {
return drv->states[drv->safe_state_index].enter(dev,
@@ -841,11 +843,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
}
}
- if (cx->entry_method == ACPI_CSTATE_FFH) {
- if (current_set_polling_and_test())
- return -EINVAL;
- }
-
acpi_unlazy_tlb(smp_processor_id());
/* Tell the scheduler that we are going deep-idle: */
@@ -932,12 +929,6 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
if (!cx->valid)
continue;
-#ifdef CONFIG_HOTPLUG_CPU
- if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
- !pr->flags.has_cst &&
- !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
- continue;
-#endif
per_cpu(acpi_cstate[count], dev->cpu) = cx;
count++;
@@ -945,8 +936,6 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
break;
}
- dev->state_count = count;
-
if (!count)
return -EINVAL;
@@ -987,13 +976,6 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
if (!cx->valid)
continue;
-#ifdef CONFIG_HOTPLUG_CPU
- if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
- !pr->flags.has_cst &&
- !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
- continue;
-#endif
-
state = &drv->states[count];
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 60a7c28fc167..ff90054f04fd 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -31,15 +31,12 @@
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
-
+#include <linux/acpi.h>
+#include <acpi/processor.h>
#ifdef CONFIG_X86
#include <asm/cpufeature.h>
#endif
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/processor.h>
-
#define PREFIX "ACPI: "
#define ACPI_PROCESSOR_CLASS "processor"
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index d1d2e7fb5b30..e003663b2f8e 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -30,12 +30,9 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
-
-#include <asm/uaccess.h>
-
-#include <acpi/acpi_bus.h>
+#include <linux/acpi.h>
#include <acpi/processor.h>
-#include <acpi/acpi_drivers.h>
+#include <asm/uaccess.h>
#define PREFIX "ACPI: "
@@ -186,14 +183,14 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
#endif
-/* thermal coolign device callbacks */
+/* thermal cooling device callbacks */
static int acpi_processor_max_state(struct acpi_processor *pr)
{
int max_state = 0;
/*
* There exists four states according to
- * cpufreq_thermal_reduction_ptg. 0, 1, 2, 3
+ * cpufreq_thermal_reduction_pctg. 0, 1, 2, 3
*/
max_state += cpufreq_get_max_state(pr->id);
if (pr->flags.throttling)
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index e7dd2c1fee79..28baa05b8018 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -32,14 +32,11 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/cpufreq.h>
-
+#include <linux/acpi.h>
+#include <acpi/processor.h>
#include <asm/io.h>
#include <asm/uaccess.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/processor.h>
-
#define PREFIX "ACPI: "
#define ACPI_PROCESSOR_CLASS "processor"
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index b78bc605837e..26e5b5060523 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -8,8 +8,7 @@
* the Free Software Foundation version 2.
*/
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/delay.h>
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fd39459926b1..57b053f424d1 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -12,13 +12,12 @@
#include <linux/dmi.h>
#include <linux/nls.h>
-#include <acpi/acpi_drivers.h>
+#include <asm/pgtable.h>
#include "internal.h"
#define _COMPONENT ACPI_BUS_COMPONENT
ACPI_MODULE_NAME("scan");
-#define STRUCT_TO_INT(s) (*((int*)&s))
extern struct acpi_device *acpi_root;
#define ACPI_BUS_CLASS "system_bus"
@@ -27,6 +26,8 @@ extern struct acpi_device *acpi_root;
#define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent)
+#define INVALID_ACPI_HANDLE ((acpi_handle)empty_zero_page)
+
/*
* If set, devices will be hot-removed even if they cannot be put offline
* gracefully (from the kernel's standpoint).
@@ -85,6 +86,9 @@ int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
* Creates hid/cid(s) string needed for modalias and uevent
* e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
* char *modalias: "acpi:IBM0001:ACPI0001"
+ * Return: 0: no _HID and no _CID
+ * -EINVAL: output error
+ * -ENOMEM: output is truncated
*/
static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
int size)
@@ -101,8 +105,10 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
count = snprintf(&modalias[len], size, "%s:", id->id);
- if (count < 0 || count >= size)
- return -EINVAL;
+ if (count < 0)
+ return EINVAL;
+ if (count >= size)
+ return -ENOMEM;
len += count;
size -= count;
}
@@ -111,20 +117,96 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
return len;
}
+/*
+ * Creates uevent modalias field for ACPI enumerated devices.
+ * Because the other buses does not support ACPI HIDs & CIDs.
+ * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
+ * "acpi:IBM0001:ACPI0001"
+ */
+int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct acpi_device *acpi_dev;
+ int len;
+
+ acpi_dev = ACPI_COMPANION(dev);
+ if (!acpi_dev)
+ return -ENODEV;
+
+ /* Fall back to bus specific way of modalias exporting */
+ if (list_empty(&acpi_dev->pnp.ids))
+ return -ENODEV;
+
+ if (add_uevent_var(env, "MODALIAS="))
+ return -ENOMEM;
+ len = create_modalias(acpi_dev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
+ if (len <= 0)
+ return len;
+ env->buflen += len;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias);
+
+/*
+ * Creates modalias sysfs attribute for ACPI enumerated devices.
+ * Because the other buses does not support ACPI HIDs & CIDs.
+ * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
+ * "acpi:IBM0001:ACPI0001"
+ */
+int acpi_device_modalias(struct device *dev, char *buf, int size)
+{
+ struct acpi_device *acpi_dev;
+ int len;
+
+ acpi_dev = ACPI_COMPANION(dev);
+ if (!acpi_dev)
+ return -ENODEV;
+
+ /* Fall back to bus specific way of modalias exporting */
+ if (list_empty(&acpi_dev->pnp.ids))
+ return -ENODEV;
+
+ len = create_modalias(acpi_dev, buf, size -1);
+ if (len <= 0)
+ return len;
+ buf[len++] = '\n';
+ return len;
+}
+EXPORT_SYMBOL_GPL(acpi_device_modalias);
+
static ssize_t
acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
int len;
- /* Device has no HID and no CID or string is >1024 */
len = create_modalias(acpi_dev, buf, 1024);
if (len <= 0)
- return 0;
+ return len;
buf[len++] = '\n';
return len;
}
static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
+bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
+{
+ struct acpi_device_physical_node *pn;
+ bool offline = true;
+
+ mutex_lock(&adev->physical_node_lock);
+
+ list_for_each_entry(pn, &adev->physical_node_list, node)
+ if (device_supports_offline(pn->dev) && !pn->dev->offline) {
+ if (uevent)
+ kobject_uevent(&pn->dev->kobj, KOBJ_CHANGE);
+
+ offline = false;
+ break;
+ }
+
+ mutex_unlock(&adev->physical_node_lock);
+ return offline;
+}
+
static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data,
void **ret_p)
{
@@ -195,19 +277,11 @@ static acpi_status acpi_bus_online(acpi_handle handle, u32 lvl, void *data,
return AE_OK;
}
-static int acpi_scan_hot_remove(struct acpi_device *device)
+static int acpi_scan_try_to_offline(struct acpi_device *device)
{
acpi_handle handle = device->handle;
- struct device *errdev;
+ struct device *errdev = NULL;
acpi_status status;
- unsigned long long sta;
-
- /* If there is no handle, the device node has been unregistered. */
- if (!handle) {
- dev_dbg(&device->dev, "ACPI handle missing\n");
- put_device(&device->dev);
- return -EINVAL;
- }
/*
* Carry out two passes here and ignore errors in the first pass,
@@ -218,7 +292,6 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
*
* If the first pass is successful, the second one isn't needed, though.
*/
- errdev = NULL;
status = acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
NULL, acpi_bus_offline, (void *)false,
(void **)&errdev);
@@ -226,7 +299,6 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
dev_warn(errdev, "Offline disabled.\n");
acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
acpi_bus_online, NULL, NULL, NULL);
- put_device(&device->dev);
return -EPERM;
}
acpi_bus_offline(handle, 0, (void *)false, (void **)&errdev);
@@ -245,20 +317,32 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
acpi_walk_namespace(ACPI_TYPE_ANY, handle,
ACPI_UINT32_MAX, acpi_bus_online,
NULL, NULL, NULL);
- put_device(&device->dev);
return -EBUSY;
}
}
+ return 0;
+}
+
+static int acpi_scan_hot_remove(struct acpi_device *device)
+{
+ acpi_handle handle = device->handle;
+ unsigned long long sta;
+ acpi_status status;
+
+ if (device->handler->hotplug.demand_offline && !acpi_force_hot_remove) {
+ if (!acpi_scan_is_offline(device, true))
+ return -EBUSY;
+ } else {
+ int error = acpi_scan_try_to_offline(device);
+ if (error)
+ return error;
+ }
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Hot-removing device %s...\n", dev_name(&device->dev)));
acpi_bus_trim(device);
- /* Device node has been unregistered. */
- put_device(&device->dev);
- device = NULL;
-
acpi_evaluate_lck(handle, 0);
/*
* TBD: _EJD support.
@@ -285,115 +369,126 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
return 0;
}
-void acpi_bus_device_eject(void *data, u32 ost_src)
+static int acpi_scan_device_not_present(struct acpi_device *adev)
{
- struct acpi_device *device = data;
- acpi_handle handle = device->handle;
- u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
- int error;
+ if (!acpi_device_enumerated(adev)) {
+ dev_warn(&adev->dev, "Still not present\n");
+ return -EALREADY;
+ }
+ acpi_bus_trim(adev);
+ return 0;
+}
- lock_device_hotplug();
- mutex_lock(&acpi_scan_lock);
+static int acpi_scan_device_check(struct acpi_device *adev)
+{
+ int error;
- if (ost_src == ACPI_NOTIFY_EJECT_REQUEST)
- acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST,
- ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
+ acpi_bus_get_status(adev);
+ if (adev->status.present || adev->status.functional) {
+ /*
+ * This function is only called for device objects for which
+ * matching scan handlers exist. The only situation in which
+ * the scan handler is not attached to this device object yet
+ * is when the device has just appeared (either it wasn't
+ * present at all before or it was removed and then added
+ * again).
+ */
+ if (adev->handler) {
+ dev_warn(&adev->dev, "Already enumerated\n");
+ return -EALREADY;
+ }
+ error = acpi_bus_scan(adev->handle);
+ if (error) {
+ dev_warn(&adev->dev, "Namespace scan failure\n");
+ return error;
+ }
+ if (!adev->handler) {
+ dev_warn(&adev->dev, "Enumeration failure\n");
+ error = -ENODEV;
+ }
+ } else {
+ error = acpi_scan_device_not_present(adev);
+ }
+ return error;
+}
- if (device->handler && device->handler->hotplug.mode == AHM_CONTAINER)
- kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
+static int acpi_scan_bus_check(struct acpi_device *adev)
+{
+ struct acpi_scan_handler *handler = adev->handler;
+ struct acpi_device *child;
+ int error;
- error = acpi_scan_hot_remove(device);
- if (error == -EPERM) {
- goto err_support;
- } else if (error) {
- goto err_out;
+ acpi_bus_get_status(adev);
+ if (!(adev->status.present || adev->status.functional)) {
+ acpi_scan_device_not_present(adev);
+ return 0;
}
+ if (handler && handler->hotplug.scan_dependent)
+ return handler->hotplug.scan_dependent(adev);
- out:
- mutex_unlock(&acpi_scan_lock);
- unlock_device_hotplug();
- return;
-
- err_support:
- ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
- err_out:
- acpi_evaluate_hotplug_ost(handle, ost_src, ost_code, NULL);
- goto out;
+ error = acpi_bus_scan(adev->handle);
+ if (error) {
+ dev_warn(&adev->dev, "Namespace scan failure\n");
+ return error;
+ }
+ list_for_each_entry(child, &adev->children, node) {
+ error = acpi_scan_bus_check(child);
+ if (error)
+ return error;
+ }
+ return 0;
}
-static void acpi_scan_bus_device_check(void *data, u32 ost_source)
+static void acpi_device_hotplug(void *data, u32 src)
{
- acpi_handle handle = data;
- struct acpi_device *device = NULL;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
+ struct acpi_device *adev = data;
int error;
lock_device_hotplug();
mutex_lock(&acpi_scan_lock);
- if (ost_source != ACPI_NOTIFY_BUS_CHECK) {
- acpi_bus_get_device(handle, &device);
- if (device) {
- dev_warn(&device->dev, "Attempt to re-insert\n");
- goto out;
- }
- }
- error = acpi_bus_scan(handle);
- if (error) {
- acpi_handle_warn(handle, "Namespace scan failure\n");
- goto out;
- }
- error = acpi_bus_get_device(handle, &device);
- if (error) {
- acpi_handle_warn(handle, "Missing device node object\n");
+ /*
+ * The device object's ACPI handle cannot become invalid as long as we
+ * are holding acpi_scan_lock, but it may have become invalid before
+ * that lock was acquired.
+ */
+ if (adev->handle == INVALID_ACPI_HANDLE)
goto out;
- }
- ost_code = ACPI_OST_SC_SUCCESS;
- if (device->handler && device->handler->hotplug.mode == AHM_CONTAINER)
- kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
- out:
- acpi_evaluate_hotplug_ost(handle, ost_source, ost_code, NULL);
- mutex_unlock(&acpi_scan_lock);
- unlock_device_hotplug();
-}
-
-static void acpi_hotplug_unsupported(acpi_handle handle, u32 type)
-{
- u32 ost_status;
-
- switch (type) {
+ switch (src) {
case ACPI_NOTIFY_BUS_CHECK:
- acpi_handle_debug(handle,
- "ACPI_NOTIFY_BUS_CHECK event: unsupported\n");
- ost_status = ACPI_OST_SC_INSERT_NOT_SUPPORTED;
+ error = acpi_scan_bus_check(adev);
break;
case ACPI_NOTIFY_DEVICE_CHECK:
- acpi_handle_debug(handle,
- "ACPI_NOTIFY_DEVICE_CHECK event: unsupported\n");
- ost_status = ACPI_OST_SC_INSERT_NOT_SUPPORTED;
+ error = acpi_scan_device_check(adev);
break;
case ACPI_NOTIFY_EJECT_REQUEST:
- acpi_handle_debug(handle,
- "ACPI_NOTIFY_EJECT_REQUEST event: unsupported\n");
- ost_status = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
+ case ACPI_OST_EC_OSPM_EJECT:
+ error = acpi_scan_hot_remove(adev);
break;
default:
- /* non-hotplug event; possibly handled by other handler */
- return;
+ error = -EINVAL;
+ break;
}
+ if (!error)
+ ost_code = ACPI_OST_SC_SUCCESS;
- acpi_evaluate_hotplug_ost(handle, type, ost_status, NULL);
+ out:
+ acpi_evaluate_hotplug_ost(adev->handle, src, ost_code, NULL);
+ put_device(&adev->dev);
+ mutex_unlock(&acpi_scan_lock);
+ unlock_device_hotplug();
}
static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
{
- struct acpi_scan_handler *handler = data;
+ u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
struct acpi_device *adev;
acpi_status status;
- if (!handler->hotplug.enabled)
- return acpi_hotplug_unsupported(handle, type);
+ if (acpi_bus_get_device(handle, &adev))
+ goto err_out;
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
@@ -404,27 +499,30 @@ static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
break;
case ACPI_NOTIFY_EJECT_REQUEST:
acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
- if (acpi_bus_get_device(handle, &adev))
+ if (!adev->handler)
goto err_out;
- get_device(&adev->dev);
- status = acpi_hotplug_execute(acpi_bus_device_eject, adev, type);
- if (ACPI_SUCCESS(status))
- return;
-
- put_device(&adev->dev);
- goto err_out;
+ if (!adev->handler->hotplug.enabled) {
+ acpi_handle_err(handle, "Eject disabled\n");
+ ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
+ goto err_out;
+ }
+ acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST,
+ ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
+ break;
default:
/* non-hotplug event; possibly handled by other handler */
return;
}
- status = acpi_hotplug_execute(acpi_scan_bus_device_check, handle, type);
+ get_device(&adev->dev);
+ status = acpi_hotplug_execute(acpi_device_hotplug, adev, type);
if (ACPI_SUCCESS(status))
return;
+ put_device(&adev->dev);
+
err_out:
- acpi_evaluate_hotplug_ost(handle, type,
- ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
+ acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
}
static ssize_t real_power_state_show(struct device *dev,
@@ -475,7 +573,7 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
get_device(&acpi_device->dev);
- status = acpi_hotplug_execute(acpi_bus_device_eject, acpi_device,
+ status = acpi_hotplug_execute(acpi_device_hotplug, acpi_device,
ACPI_OST_EC_OSPM_EJECT);
if (ACPI_SUCCESS(status))
return count;
@@ -567,6 +665,20 @@ acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+ char *buf) {
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ acpi_status status;
+ unsigned long long sta;
+
+ status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return sprintf(buf, "%llu\n", sta);
+}
+static DEVICE_ATTR_RO(status);
+
static int acpi_device_setup_files(struct acpi_device *dev)
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
@@ -622,6 +734,12 @@ static int acpi_device_setup_files(struct acpi_device *dev)
dev->pnp.sun = (unsigned long)-1;
}
+ if (acpi_has_method(dev->handle, "_STA")) {
+ result = device_create_file(&dev->dev, &dev_attr_status);
+ if (result)
+ goto end;
+ }
+
/*
* If device has _EJ0, 'eject' file is created that is used to trigger
* hot-removal function from userland.
@@ -677,6 +795,8 @@ static void acpi_device_remove_files(struct acpi_device *dev)
device_remove_file(&dev->dev, &dev_attr_adr);
device_remove_file(&dev->dev, &dev_attr_modalias);
device_remove_file(&dev->dev, &dev_attr_hid);
+ if (acpi_has_method(dev->handle, "_STA"))
+ device_remove_file(&dev->dev, &dev_attr_status);
if (dev->handle)
device_remove_file(&dev->dev, &dev_attr_path);
}
@@ -782,8 +902,8 @@ static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
return -ENOMEM;
len = create_modalias(acpi_dev, &env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen);
- if (len >= (sizeof(env->buf) - env->buflen))
- return -ENOMEM;
+ if (len <= 0)
+ return len;
env->buflen += len;
return 0;
}
@@ -907,9 +1027,91 @@ struct bus_type acpi_bus_type = {
.uevent = acpi_device_uevent,
};
-static void acpi_bus_data_handler(acpi_handle handle, void *context)
+static void acpi_device_del(struct acpi_device *device)
+{
+ mutex_lock(&acpi_device_lock);
+ if (device->parent)
+ list_del(&device->node);
+
+ list_del(&device->wakeup_list);
+ mutex_unlock(&acpi_device_lock);
+
+ acpi_power_add_remove_device(device, false);
+ acpi_device_remove_files(device);
+ if (device->remove)
+ device->remove(device);
+
+ device_del(&device->dev);
+}
+
+static LIST_HEAD(acpi_device_del_list);
+static DEFINE_MUTEX(acpi_device_del_lock);
+
+static void acpi_device_del_work_fn(struct work_struct *work_not_used)
+{
+ for (;;) {
+ struct acpi_device *adev;
+
+ mutex_lock(&acpi_device_del_lock);
+
+ if (list_empty(&acpi_device_del_list)) {
+ mutex_unlock(&acpi_device_del_lock);
+ break;
+ }
+ adev = list_first_entry(&acpi_device_del_list,
+ struct acpi_device, del_list);
+ list_del(&adev->del_list);
+
+ mutex_unlock(&acpi_device_del_lock);
+
+ acpi_device_del(adev);
+ /*
+ * Drop references to all power resources that might have been
+ * used by the device.
+ */
+ acpi_power_transition(adev, ACPI_STATE_D3_COLD);
+ put_device(&adev->dev);
+ }
+}
+
+/**
+ * acpi_scan_drop_device - Drop an ACPI device object.
+ * @handle: Handle of an ACPI namespace node, not used.
+ * @context: Address of the ACPI device object to drop.
+ *
+ * This is invoked by acpi_ns_delete_node() during the removal of the ACPI
+ * namespace node the device object pointed to by @context is attached to.
+ *
+ * The unregistration is carried out asynchronously to avoid running
+ * acpi_device_del() under the ACPICA's namespace mutex and the list is used to
+ * ensure the correct ordering (the device objects must be unregistered in the
+ * same order in which the corresponding namespace nodes are deleted).
+ */
+static void acpi_scan_drop_device(acpi_handle handle, void *context)
{
- /* Intentionally empty. */
+ static DECLARE_WORK(work, acpi_device_del_work_fn);
+ struct acpi_device *adev = context;
+
+ mutex_lock(&acpi_device_del_lock);
+
+ /*
+ * Use the ACPI hotplug workqueue which is ordered, so this work item
+ * won't run after any hotplug work items submitted subsequently. That
+ * prevents attempts to register device objects identical to those being
+ * deleted from happening concurrently (such attempts result from
+ * hotplug events handled via the ACPI hotplug workqueue). It also will
+ * run after all of the work items submitted previosuly, which helps
+ * those work items to ensure that they are not accessing stale device
+ * objects.
+ */
+ if (list_empty(&acpi_device_del_list))
+ acpi_queue_hotplug_work(&work);
+
+ list_add_tail(&adev->del_list, &acpi_device_del_list);
+ /* Make acpi_ns_validate_handle() return NULL for this handle. */
+ adev->handle = INVALID_ACPI_HANDLE;
+
+ mutex_unlock(&acpi_device_del_lock);
}
int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
@@ -919,7 +1121,7 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
if (!device)
return -EINVAL;
- status = acpi_get_data(handle, acpi_bus_data_handler, (void **)device);
+ status = acpi_get_data(handle, acpi_scan_drop_device, (void **)device);
if (ACPI_FAILURE(status) || !*device) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
handle));
@@ -939,7 +1141,7 @@ int acpi_device_add(struct acpi_device *device,
if (device->handle) {
acpi_status status;
- status = acpi_attach_data(device->handle, acpi_bus_data_handler,
+ status = acpi_attach_data(device->handle, acpi_scan_drop_device,
device);
if (ACPI_FAILURE(status)) {
acpi_handle_err(device->handle,
@@ -957,6 +1159,7 @@ int acpi_device_add(struct acpi_device *device,
INIT_LIST_HEAD(&device->node);
INIT_LIST_HEAD(&device->wakeup_list);
INIT_LIST_HEAD(&device->physical_node_list);
+ INIT_LIST_HEAD(&device->del_list);
mutex_init(&device->physical_node_lock);
new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
@@ -1020,37 +1223,10 @@ int acpi_device_add(struct acpi_device *device,
mutex_unlock(&acpi_device_lock);
err_detach:
- acpi_detach_data(device->handle, acpi_bus_data_handler);
+ acpi_detach_data(device->handle, acpi_scan_drop_device);
return result;
}
-static void acpi_device_unregister(struct acpi_device *device)
-{
- mutex_lock(&acpi_device_lock);
- if (device->parent)
- list_del(&device->node);
-
- list_del(&device->wakeup_list);
- mutex_unlock(&acpi_device_lock);
-
- acpi_detach_data(device->handle, acpi_bus_data_handler);
-
- acpi_power_add_remove_device(device, false);
- acpi_device_remove_files(device);
- if (device->remove)
- device->remove(device);
-
- device_del(&device->dev);
- /*
- * Transition the device to D3cold to drop the reference counts of all
- * power resources the device depends on and turn off the ones that have
- * no more references.
- */
- acpi_device_set_power(device, ACPI_STATE_D3_COLD);
- device->handle = NULL;
- put_device(&device->dev);
-}
-
/* --------------------------------------------------------------------------
Driver Management
-------------------------------------------------------------------------- */
@@ -1624,11 +1800,13 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
device->device_type = type;
device->handle = handle;
device->parent = acpi_bus_get_parent(handle);
- STRUCT_TO_INT(device->status) = sta;
+ acpi_set_device_status(device, sta);
acpi_device_get_busid(device);
acpi_set_pnp_ids(handle, &device->pnp, type);
acpi_bus_get_flags(device);
device->flags.match_driver = false;
+ device->flags.initialized = true;
+ device->flags.visited = false;
device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true);
}
@@ -1713,6 +1891,15 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
return 0;
}
+bool acpi_device_is_present(struct acpi_device *adev)
+{
+ if (adev->status.present || adev->status.functional)
+ return true;
+
+ adev->flags.initialized = false;
+ return false;
+}
+
static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler,
char *idstr,
const struct acpi_device_id **matchid)
@@ -1772,7 +1959,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type)
*/
list_for_each_entry(hwid, &pnp.ids, list) {
handler = acpi_scan_match_handler(hwid->id, NULL);
- if (handler && !handler->hotplug.ignore) {
+ if (handler) {
acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
acpi_hotplug_notify_cb, handler);
break;
@@ -1806,18 +1993,6 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
acpi_scan_init_hotplug(handle, type);
- if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
- !(sta & ACPI_STA_DEVICE_FUNCTIONING)) {
- struct acpi_device_wakeup wakeup;
-
- if (acpi_has_method(handle, "_PRW")) {
- acpi_bus_extract_wakeup_device_power_package(handle,
- &wakeup);
- acpi_power_resources_list_free(&wakeup.resources);
- }
- return AE_CTRL_DEPTH;
- }
-
acpi_add_single_object(&device, handle, type, sta);
if (!device)
return AE_CTRL_DEPTH;
@@ -1852,36 +2027,40 @@ static int acpi_scan_attach_handler(struct acpi_device *device)
return ret;
}
-static acpi_status acpi_bus_device_attach(acpi_handle handle, u32 lvl_not_used,
- void *not_used, void **ret_not_used)
+static void acpi_bus_attach(struct acpi_device *device)
{
- struct acpi_device *device;
- unsigned long long sta_not_used;
+ struct acpi_device *child;
int ret;
- /*
- * Ignore errors ignored by acpi_bus_check_add() to avoid terminating
- * namespace walks prematurely.
- */
- if (acpi_bus_type_and_status(handle, &ret, &sta_not_used))
- return AE_OK;
-
- if (acpi_bus_get_device(handle, &device))
- return AE_CTRL_DEPTH;
-
+ acpi_bus_get_status(device);
+ /* Skip devices that are not present. */
+ if (!acpi_device_is_present(device)) {
+ device->flags.visited = false;
+ return;
+ }
if (device->handler)
- return AE_OK;
+ goto ok;
+ if (!device->flags.initialized) {
+ acpi_bus_update_power(device, NULL);
+ device->flags.initialized = true;
+ }
+ device->flags.visited = false;
ret = acpi_scan_attach_handler(device);
if (ret < 0)
- return AE_CTRL_DEPTH;
+ return;
device->flags.match_driver = true;
- if (ret > 0)
- return AE_OK;
+ if (!ret) {
+ ret = device_attach(&device->dev);
+ if (ret < 0)
+ return;
+ }
+ device->flags.visited = true;
- ret = device_attach(&device->dev);
- return ret >= 0 ? AE_OK : AE_CTRL_DEPTH;
+ ok:
+ list_for_each_entry(child, &device->children, node)
+ acpi_bus_attach(child);
}
/**
@@ -1901,75 +2080,49 @@ static acpi_status acpi_bus_device_attach(acpi_handle handle, u32 lvl_not_used,
int acpi_bus_scan(acpi_handle handle)
{
void *device = NULL;
- int error = 0;
if (ACPI_SUCCESS(acpi_bus_check_add(handle, 0, NULL, &device)))
acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
acpi_bus_check_add, NULL, NULL, &device);
- if (!device)
- error = -ENODEV;
- else if (ACPI_SUCCESS(acpi_bus_device_attach(handle, 0, NULL, NULL)))
- acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
- acpi_bus_device_attach, NULL, NULL, NULL);
-
- return error;
-}
-EXPORT_SYMBOL(acpi_bus_scan);
-
-static acpi_status acpi_bus_device_detach(acpi_handle handle, u32 lvl_not_used,
- void *not_used, void **ret_not_used)
-{
- struct acpi_device *device = NULL;
-
- if (!acpi_bus_get_device(handle, &device)) {
- struct acpi_scan_handler *dev_handler = device->handler;
-
- if (dev_handler) {
- if (dev_handler->detach)
- dev_handler->detach(device);
-
- device->handler = NULL;
- } else {
- device_release_driver(&device->dev);
- }
+ if (device) {
+ acpi_bus_attach(device);
+ return 0;
}
- return AE_OK;
-}
-
-static acpi_status acpi_bus_remove(acpi_handle handle, u32 lvl_not_used,
- void *not_used, void **ret_not_used)
-{
- struct acpi_device *device = NULL;
-
- if (!acpi_bus_get_device(handle, &device))
- acpi_device_unregister(device);
-
- return AE_OK;
+ return -ENODEV;
}
+EXPORT_SYMBOL(acpi_bus_scan);
/**
- * acpi_bus_trim - Remove ACPI device node and all of its descendants
- * @start: Root of the ACPI device nodes subtree to remove.
+ * acpi_bus_trim - Detach scan handlers and drivers from ACPI device objects.
+ * @adev: Root of the ACPI namespace scope to walk.
*
* Must be called under acpi_scan_lock.
*/
-void acpi_bus_trim(struct acpi_device *start)
+void acpi_bus_trim(struct acpi_device *adev)
{
+ struct acpi_scan_handler *handler = adev->handler;
+ struct acpi_device *child;
+
+ list_for_each_entry_reverse(child, &adev->children, node)
+ acpi_bus_trim(child);
+
+ adev->flags.match_driver = false;
+ if (handler) {
+ if (handler->detach)
+ handler->detach(adev);
+
+ adev->handler = NULL;
+ } else {
+ device_release_driver(&adev->dev);
+ }
/*
- * Execute acpi_bus_device_detach() as a post-order callback to detach
- * all ACPI drivers from the device nodes being removed.
- */
- acpi_walk_namespace(ACPI_TYPE_ANY, start->handle, ACPI_UINT32_MAX, NULL,
- acpi_bus_device_detach, NULL, NULL);
- acpi_bus_device_detach(start->handle, 0, NULL, NULL);
- /*
- * Execute acpi_bus_remove() as a post-order callback to remove device
- * nodes in the given namespace scope.
+ * Most likely, the device is going away, so put it into D3cold before
+ * that.
*/
- acpi_walk_namespace(ACPI_TYPE_ANY, start->handle, ACPI_UINT32_MAX, NULL,
- acpi_bus_remove, NULL, NULL);
- acpi_bus_remove(start->handle, 0, NULL, NULL);
+ acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
+ adev->flags.initialized = false;
+ adev->flags.visited = false;
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
@@ -2047,14 +2200,14 @@ int __init acpi_scan_init(void)
result = acpi_bus_scan_fixed();
if (result) {
- acpi_device_unregister(acpi_root);
+ acpi_detach_data(acpi_root->handle, acpi_scan_drop_device);
+ acpi_device_del(acpi_root);
+ put_device(&acpi_root->dev);
goto out;
}
acpi_update_all_gpes();
- acpi_pci_root_hp_init();
-
out:
mutex_unlock(&acpi_scan_lock);
return result;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 721e949e606e..b718806657cd 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -18,12 +18,8 @@
#include <linux/reboot.h>
#include <linux/acpi.h>
#include <linux/module.h>
-
#include <asm/io.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
#include "internal.h"
#include "sleep.h"
@@ -670,11 +666,8 @@ static void acpi_hibernation_leave(void)
/* Reprogram control registers */
acpi_leave_sleep_state_prep(ACPI_STATE_S4);
/* Check the hardware signature */
- if (facs && s4_hardware_signature != facs->hardware_signature) {
- printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
- "cannot resume!\n");
- panic("ACPI S4 hardware signature mismatch");
- }
+ if (facs && s4_hardware_signature != facs->hardware_signature)
+ pr_crit("ACPI: Hardware changed while hibernated, success doubtful!\n");
/* Restore the NVS memory area */
suspend_nvs_restore();
/* Allow EC transactions to happen. */
@@ -806,9 +799,6 @@ int __init acpi_sleep_init(void)
char *pos = supported;
int i;
- if (acpi_disabled)
- return 0;
-
acpi_sleep_dmi_check();
sleep_states[ACPI_STATE_S0] = 1;
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 6dbc3ca45223..91a32cefb11f 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -5,7 +5,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include "internal.h"
@@ -226,7 +226,7 @@ module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
/* /sys/modules/acpi/parameters/aml_debug_output */
module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
- bool, 0644);
+ byte, 0644);
MODULE_PARM_DESC(aml_debug_output,
"To enable/disable the ACPI Debug Object output.");
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index d67a1fe07f0e..5837f857ac2e 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -278,12 +278,13 @@ acpi_table_parse_madt(enum acpi_madt_type id,
/**
* acpi_table_parse - find table with @id, run @handler on it
- *
* @id: table id to find
* @handler: handler to run
*
* Scan the ACPI System Descriptor Table (STD) for a table matching @id,
- * run @handler on it. Return 0 if table found, return on if not.
+ * run @handler on it.
+ *
+ * Return 0 if table found, -errno if not.
*/
int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
{
@@ -293,7 +294,7 @@ int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
if (acpi_disabled)
return -ENODEV;
- if (!handler)
+ if (!id || !handler)
return -EINVAL;
if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
@@ -306,7 +307,7 @@ int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
early_acpi_os_unmap_memory(table, tbl_size);
return 0;
} else
- return 1;
+ return -ENODEV;
}
/*
@@ -351,7 +352,7 @@ int __init acpi_table_init(void)
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
if (ACPI_FAILURE(status))
- return 1;
+ return -EINVAL;
check_multiple_madt();
return 0;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 0d9f46b5ae6d..8349a555b92b 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -41,10 +41,9 @@
#include <linux/kmod.h>
#include <linux/reboot.h>
#include <linux/device.h>
-#include <asm/uaccess.h>
#include <linux/thermal.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
+#include <asm/uaccess.h>
#define PREFIX "ACPI: "
@@ -862,7 +861,7 @@ acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal,
return acpi_thermal_cooling_device_cb(thermal, cdev, false);
}
-static const struct thermal_zone_device_ops acpi_thermal_zone_ops = {
+static struct thermal_zone_device_ops acpi_thermal_zone_ops = {
.bind = acpi_thermal_bind_cooling_device,
.unbind = acpi_thermal_unbind_cooling_device,
.get_temp = thermal_get_temp,
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 6d408bfbbb1d..85e3b612bdc0 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -30,8 +30,6 @@
#include <linux/types.h>
#include <linux/hardirq.h>
#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
#include "internal.h"
@@ -101,10 +99,6 @@ acpi_extract_package(union acpi_object *package,
union acpi_object *element = &(package->package.elements[i]);
- if (!element) {
- return AE_BAD_DATA;
- }
-
switch (element->type) {
case ACPI_TYPE_INTEGER:
@@ -574,3 +568,100 @@ acpi_status acpi_evaluate_lck(acpi_handle handle, int lock)
return status;
}
+
+/**
+ * acpi_evaluate_dsm - evaluate device's _DSM method
+ * @handle: ACPI device handle
+ * @uuid: UUID of requested functions, should be 16 bytes
+ * @rev: revision number of requested function
+ * @func: requested function number
+ * @argv4: the function specific parameter
+ *
+ * Evaluate device's _DSM method with specified UUID, revision id and
+ * function number. Caller needs to free the returned object.
+ *
+ * Though ACPI defines the fourth parameter for _DSM should be a package,
+ * some old BIOSes do expect a buffer or an integer etc.
+ */
+union acpi_object *
+acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid, int rev, int func,
+ union acpi_object *argv4)
+{
+ acpi_status ret;
+ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object params[4];
+ struct acpi_object_list input = {
+ .count = 4,
+ .pointer = params,
+ };
+
+ params[0].type = ACPI_TYPE_BUFFER;
+ params[0].buffer.length = 16;
+ params[0].buffer.pointer = (char *)uuid;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = rev;
+ params[2].type = ACPI_TYPE_INTEGER;
+ params[2].integer.value = func;
+ if (argv4) {
+ params[3] = *argv4;
+ } else {
+ params[3].type = ACPI_TYPE_PACKAGE;
+ params[3].package.count = 0;
+ params[3].package.elements = NULL;
+ }
+
+ ret = acpi_evaluate_object(handle, "_DSM", &input, &buf);
+ if (ACPI_SUCCESS(ret))
+ return (union acpi_object *)buf.pointer;
+
+ if (ret != AE_NOT_FOUND)
+ acpi_handle_warn(handle,
+ "failed to evaluate _DSM (0x%x)\n", ret);
+
+ return NULL;
+}
+EXPORT_SYMBOL(acpi_evaluate_dsm);
+
+/**
+ * acpi_check_dsm - check if _DSM method supports requested functions.
+ * @handle: ACPI device handle
+ * @uuid: UUID of requested functions, should be 16 bytes at least
+ * @rev: revision number of requested functions
+ * @funcs: bitmap of requested functions
+ * @exclude: excluding special value, used to support i915 and nouveau
+ *
+ * Evaluate device's _DSM method to check whether it supports requested
+ * functions. Currently only support 64 functions at maximum, should be
+ * enough for now.
+ */
+bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs)
+{
+ int i;
+ u64 mask = 0;
+ union acpi_object *obj;
+
+ if (funcs == 0)
+ return false;
+
+ obj = acpi_evaluate_dsm(handle, uuid, rev, 0, NULL);
+ if (!obj)
+ return false;
+
+ /* For compatibility, old BIOSes may return an integer */
+ if (obj->type == ACPI_TYPE_INTEGER)
+ mask = obj->integer.value;
+ else if (obj->type == ACPI_TYPE_BUFFER)
+ for (i = 0; i < obj->buffer.length && i < 8; i++)
+ mask |= (((u8)obj->buffer.pointer[i]) << (i * 8));
+ ACPI_FREE(obj);
+
+ /*
+ * Bit 0 indicates whether there's support for any functions other than
+ * function 0 for the specified UUID and revision.
+ */
+ if ((mask & 0x1) && (mask & funcs) == funcs)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(acpi_check_dsm);
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 995e91bcb97b..b727d105046d 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -37,12 +37,11 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
#include <linux/dmi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
#include <linux/suspend.h>
+#include <linux/acpi.h>
#include <acpi/video.h>
+#include <asm/uaccess.h>
#include "internal.h"
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 84875fd4c74f..a697b77b8865 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -50,7 +50,7 @@ static bool acpi_video_caps_checked;
static acpi_status
acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
- void **retyurn_value)
+ void **return_value)
{
long *cap = context;
@@ -170,6 +170,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_vendor,
+ .ident = "HP EliteBook Revolve 810",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook Revolve 810 G1"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
.ident = "Lenovo Yoga 13",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index 7bfbe40bc43b..1638401ab282 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -5,7 +5,6 @@
#include <linux/init.h>
#include <linux/acpi.h>
-#include <acpi/acpi_drivers.h>
#include <linux/kernel.h>
#include <linux/types.h>
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index c4876ac9151a..9e6029105607 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -83,162 +83,6 @@ static struct device_attribute amba_dev_attrs[] = {
__ATTR_NULL,
};
-#ifdef CONFIG_PM_SLEEP
-
-static int amba_legacy_suspend(struct device *dev, pm_message_t mesg)
-{
- struct amba_driver *adrv = to_amba_driver(dev->driver);
- struct amba_device *adev = to_amba_device(dev);
- int ret = 0;
-
- if (dev->driver && adrv->suspend)
- ret = adrv->suspend(adev, mesg);
-
- return ret;
-}
-
-static int amba_legacy_resume(struct device *dev)
-{
- struct amba_driver *adrv = to_amba_driver(dev->driver);
- struct amba_device *adev = to_amba_device(dev);
- int ret = 0;
-
- if (dev->driver && adrv->resume)
- ret = adrv->resume(adev);
-
- return ret;
-}
-
-#endif /* CONFIG_PM_SLEEP */
-
-#ifdef CONFIG_SUSPEND
-
-static int amba_pm_suspend(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->suspend)
- ret = drv->pm->suspend(dev);
- } else {
- ret = amba_legacy_suspend(dev, PMSG_SUSPEND);
- }
-
- return ret;
-}
-
-static int amba_pm_resume(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->resume)
- ret = drv->pm->resume(dev);
- } else {
- ret = amba_legacy_resume(dev);
- }
-
- return ret;
-}
-
-#else /* !CONFIG_SUSPEND */
-
-#define amba_pm_suspend NULL
-#define amba_pm_resume NULL
-
-#endif /* !CONFIG_SUSPEND */
-
-#ifdef CONFIG_HIBERNATE_CALLBACKS
-
-static int amba_pm_freeze(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->freeze)
- ret = drv->pm->freeze(dev);
- } else {
- ret = amba_legacy_suspend(dev, PMSG_FREEZE);
- }
-
- return ret;
-}
-
-static int amba_pm_thaw(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->thaw)
- ret = drv->pm->thaw(dev);
- } else {
- ret = amba_legacy_resume(dev);
- }
-
- return ret;
-}
-
-static int amba_pm_poweroff(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->poweroff)
- ret = drv->pm->poweroff(dev);
- } else {
- ret = amba_legacy_suspend(dev, PMSG_HIBERNATE);
- }
-
- return ret;
-}
-
-static int amba_pm_restore(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->restore)
- ret = drv->pm->restore(dev);
- } else {
- ret = amba_legacy_resume(dev);
- }
-
- return ret;
-}
-
-#else /* !CONFIG_HIBERNATE_CALLBACKS */
-
-#define amba_pm_freeze NULL
-#define amba_pm_thaw NULL
-#define amba_pm_poweroff NULL
-#define amba_pm_restore NULL
-
-#endif /* !CONFIG_HIBERNATE_CALLBACKS */
-
#ifdef CONFIG_PM_RUNTIME
/*
* Hooks to provide runtime PM of the pclk (bus clock). It is safe to
@@ -251,7 +95,7 @@ static int amba_pm_runtime_suspend(struct device *dev)
int ret = pm_generic_runtime_suspend(dev);
if (ret == 0 && dev->driver)
- clk_disable(pcdev->pclk);
+ clk_disable_unprepare(pcdev->pclk);
return ret;
}
@@ -262,7 +106,7 @@ static int amba_pm_runtime_resume(struct device *dev)
int ret;
if (dev->driver) {
- ret = clk_enable(pcdev->pclk);
+ ret = clk_prepare_enable(pcdev->pclk);
/* Failure is probably fatal to the system, but... */
if (ret)
return ret;
@@ -272,15 +116,13 @@ static int amba_pm_runtime_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM
-
static const struct dev_pm_ops amba_pm = {
- .suspend = amba_pm_suspend,
- .resume = amba_pm_resume,
- .freeze = amba_pm_freeze,
- .thaw = amba_pm_thaw,
- .poweroff = amba_pm_poweroff,
- .restore = amba_pm_restore,
+ .suspend = pm_generic_suspend,
+ .resume = pm_generic_resume,
+ .freeze = pm_generic_freeze,
+ .thaw = pm_generic_thaw,
+ .poweroff = pm_generic_poweroff,
+ .restore = pm_generic_restore,
SET_RUNTIME_PM_OPS(
amba_pm_runtime_suspend,
amba_pm_runtime_resume,
@@ -288,14 +130,6 @@ static const struct dev_pm_ops amba_pm = {
)
};
-#define AMBA_PM (&amba_pm)
-
-#else /* !CONFIG_PM */
-
-#define AMBA_PM NULL
-
-#endif /* !CONFIG_PM */
-
/*
* Primecells are part of the Advanced Microcontroller Bus Architecture,
* so we call the bus "amba".
@@ -305,7 +139,7 @@ struct bus_type amba_bustype = {
.dev_attrs = amba_dev_attrs,
.match = amba_match,
.uevent = amba_uevent,
- .pm = AMBA_PM,
+ .pm = &amba_pm,
};
static int __init amba_init(void)
@@ -317,36 +151,23 @@ postcore_initcall(amba_init);
static int amba_get_enable_pclk(struct amba_device *pcdev)
{
- struct clk *pclk = clk_get(&pcdev->dev, "apb_pclk");
int ret;
- pcdev->pclk = pclk;
-
- if (IS_ERR(pclk))
- return PTR_ERR(pclk);
+ pcdev->pclk = clk_get(&pcdev->dev, "apb_pclk");
+ if (IS_ERR(pcdev->pclk))
+ return PTR_ERR(pcdev->pclk);
- ret = clk_prepare(pclk);
- if (ret) {
- clk_put(pclk);
- return ret;
- }
-
- ret = clk_enable(pclk);
- if (ret) {
- clk_unprepare(pclk);
- clk_put(pclk);
- }
+ ret = clk_prepare_enable(pcdev->pclk);
+ if (ret)
+ clk_put(pcdev->pclk);
return ret;
}
static void amba_put_disable_pclk(struct amba_device *pcdev)
{
- struct clk *pclk = pcdev->pclk;
-
- clk_disable(pclk);
- clk_unprepare(pclk);
- clk_put(pclk);
+ clk_disable_unprepare(pcdev->pclk);
+ clk_put(pcdev->pclk);
}
/*
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 14f1e9506338..dc2756fb6f33 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -83,6 +83,8 @@ enum board_ids {
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
+static void ahci_mcp89_apple_enable(struct pci_dev *pdev);
+static bool is_mcp89_apple(struct pci_dev *pdev);
static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
#ifdef CONFIG_PM
@@ -427,6 +429,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs }, /* 88se9128 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
.driver_data = board_ahci_yes_fbs }, /* 88se9125 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178,
+ PCI_VENDOR_ID_MARVELL_EXT, 0x9170),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9170 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
@@ -661,6 +666,10 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
if (rc)
return rc;
+ /* Apple BIOS helpfully mangles the registers on resume */
+ if (is_mcp89_apple(pdev))
+ ahci_mcp89_apple_enable(pdev);
+
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
rc = ahci_pci_reset_controller(host);
if (rc)
@@ -777,6 +786,48 @@ static void ahci_p5wdh_workaround(struct ata_host *host)
}
}
+/*
+ * Macbook7,1 firmware forcibly disables MCP89 AHCI and changes PCI ID when
+ * booting in BIOS compatibility mode. We restore the registers but not ID.
+ */
+static void ahci_mcp89_apple_enable(struct pci_dev *pdev)
+{
+ u32 val;
+
+ printk(KERN_INFO "ahci: enabling MCP89 AHCI mode\n");
+
+ pci_read_config_dword(pdev, 0xf8, &val);
+ val |= 1 << 0x1b;
+ /* the following changes the device ID, but appears not to affect function */
+ /* val = (val & ~0xf0000000) | 0x80000000; */
+ pci_write_config_dword(pdev, 0xf8, val);
+
+ pci_read_config_dword(pdev, 0x54c, &val);
+ val |= 1 << 0xc;
+ pci_write_config_dword(pdev, 0x54c, val);
+
+ pci_read_config_dword(pdev, 0x4a4, &val);
+ val &= 0xff;
+ val |= 0x01060100;
+ pci_write_config_dword(pdev, 0x4a4, val);
+
+ pci_read_config_dword(pdev, 0x54c, &val);
+ val &= ~(1 << 0xc);
+ pci_write_config_dword(pdev, 0x54c, val);
+
+ pci_read_config_dword(pdev, 0xf8, &val);
+ val &= ~(1 << 0x1b);
+ pci_write_config_dword(pdev, 0xf8, val);
+}
+
+static bool is_mcp89_apple(struct pci_dev *pdev)
+{
+ return pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
+ pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA &&
+ pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
+ pdev->subsystem_device == 0xcb89;
+}
+
/* only some SB600 ahci controllers can do 64bit DMA */
static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
{
@@ -1097,26 +1148,40 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
{}
#endif
-int ahci_init_interrupts(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
+static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
+ struct ahci_host_priv *hpriv)
{
- int rc;
- unsigned int maxvec;
+ int rc, nvec;
- if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) {
- rc = pci_enable_msi_block_auto(pdev, &maxvec);
- if (rc > 0) {
- if ((rc == maxvec) || (rc == 1))
- return rc;
- /*
- * Assume that advantage of multipe MSIs is negated,
- * so fallback to single MSI mode to save resources
- */
- pci_disable_msi(pdev);
- if (!pci_enable_msi(pdev))
- return 1;
- }
- }
+ if (hpriv->flags & AHCI_HFLAG_NO_MSI)
+ goto intx;
+
+ rc = pci_msi_vec_count(pdev);
+ if (rc < 0)
+ goto intx;
+
+ /*
+ * If number of MSIs is less than number of ports then Sharing Last
+ * Message mode could be enforced. In this case assume that advantage
+ * of multipe MSIs is negated and use single MSI mode instead.
+ */
+ if (rc < n_ports)
+ goto single_msi;
+
+ nvec = rc;
+ rc = pci_enable_msi_block(pdev, nvec);
+ if (rc)
+ goto intx;
+
+ return nvec;
+
+single_msi:
+ rc = pci_enable_msi(pdev);
+ if (rc)
+ goto intx;
+ return 1;
+intx:
pci_intx(pdev, 1);
return 0;
}
@@ -1209,15 +1274,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
return -ENODEV;
- /*
- * For some reason, MCP89 on MacBook 7,1 doesn't work with
- * ahci, use ata_generic instead.
- */
- if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
- pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA &&
- pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
- pdev->subsystem_device == 0xcb89)
- return -ENODEV;
+ /* Apple BIOS on MCP89 prevents us using AHCI */
+ if (is_mcp89_apple(pdev))
+ ahci_mcp89_apple_enable(pdev);
/* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode.
* At the moment, we can only use the AHCI mode. Let the users know
@@ -1238,15 +1297,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* AHCI controllers often implement SFF compatible interface.
- * Grab all PCI BARs just in case.
- */
- rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
- if (rc == -EBUSY)
- pcim_pin_device(pdev);
- if (rc)
- return rc;
-
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == 0x2652 || pdev->device == 0x2653)) {
u8 map;
@@ -1263,6 +1313,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+ /* AHCI controllers often implement SFF compatible interface.
+ * Grab all PCI BARs just in case.
+ */
+ rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ if (rc)
+ return rc;
+
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
@@ -1283,10 +1342,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
- n_msis = ahci_init_interrupts(pdev, hpriv);
- if (n_msis > 1)
- hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
-
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);
@@ -1341,6 +1396,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+ n_msis = ahci_init_interrupts(pdev, n_ports, hpriv);
+ if (n_msis > 1)
+ hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
+
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host)
return -ENOMEM;
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index ae2d73fe321e..dd4d6f74d7bd 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -34,10 +34,21 @@ enum {
HOST_TIMER1MS = 0xe0, /* Timer 1-ms */
};
+enum ahci_imx_type {
+ AHCI_IMX53,
+ AHCI_IMX6Q,
+};
+
struct imx_ahci_priv {
struct platform_device *ahci_pdev;
+ enum ahci_imx_type type;
+
+ /* i.MX53 clock */
+ struct clk *sata_gate_clk;
+ /* Common clock */
struct clk *sata_ref_clk;
struct clk *ahb_clk;
+
struct regmap *gpr;
bool no_device;
bool first_time;
@@ -47,6 +58,59 @@ static int ahci_imx_hotplug;
module_param_named(hotplug, ahci_imx_hotplug, int, 0644);
MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support)");
+static int imx_sata_clock_enable(struct device *dev)
+{
+ struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+ int ret;
+
+ if (imxpriv->type == AHCI_IMX53) {
+ ret = clk_prepare_enable(imxpriv->sata_gate_clk);
+ if (ret < 0) {
+ dev_err(dev, "prepare-enable sata_gate clock err:%d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = clk_prepare_enable(imxpriv->sata_ref_clk);
+ if (ret < 0) {
+ dev_err(dev, "prepare-enable sata_ref clock err:%d\n",
+ ret);
+ goto clk_err;
+ }
+
+ if (imxpriv->type == AHCI_IMX6Q) {
+ regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+ }
+
+ usleep_range(1000, 2000);
+
+ return 0;
+
+clk_err:
+ if (imxpriv->type == AHCI_IMX53)
+ clk_disable_unprepare(imxpriv->sata_gate_clk);
+ return ret;
+}
+
+static void imx_sata_clock_disable(struct device *dev)
+{
+ struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+
+ if (imxpriv->type == AHCI_IMX6Q) {
+ regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+ !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+ }
+
+ clk_disable_unprepare(imxpriv->sata_ref_clk);
+
+ if (imxpriv->type == AHCI_IMX53)
+ clk_disable_unprepare(imxpriv->sata_gate_clk);
+}
+
static void ahci_imx_error_handler(struct ata_port *ap)
{
u32 reg_val;
@@ -72,16 +136,29 @@ static void ahci_imx_error_handler(struct ata_port *ap)
*/
reg_val = readl(mmio + PORT_PHY_CTL);
writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL);
- regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
- IMX6Q_GPR13_SATA_MPLL_CLK_EN,
- !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
- clk_disable_unprepare(imxpriv->sata_ref_clk);
+ imx_sata_clock_disable(ap->dev);
imxpriv->no_device = true;
}
+static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct imx_ahci_priv *imxpriv = dev_get_drvdata(ap->dev->parent);
+ int ret = -EIO;
+
+ if (imxpriv->type == AHCI_IMX53)
+ ret = ahci_pmp_retry_srst_ops.softreset(link, class, deadline);
+ else if (imxpriv->type == AHCI_IMX6Q)
+ ret = ahci_ops.softreset(link, class, deadline);
+
+ return ret;
+}
+
static struct ata_port_operations ahci_imx_ops = {
.inherits = &ahci_platform_ops,
.error_handler = ahci_imx_error_handler,
+ .softreset = ahci_imx_softreset,
};
static const struct ata_port_info ahci_imx_port_info = {
@@ -91,51 +168,15 @@ static const struct ata_port_info ahci_imx_port_info = {
.port_ops = &ahci_imx_ops,
};
-static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
+static int imx_sata_init(struct device *dev, void __iomem *mmio)
{
int ret = 0;
unsigned int reg_val;
struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
- imxpriv->gpr =
- syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
- if (IS_ERR(imxpriv->gpr)) {
- dev_err(dev, "failed to find fsl,imx6q-iomux-gpr regmap\n");
- return PTR_ERR(imxpriv->gpr);
- }
-
- ret = clk_prepare_enable(imxpriv->sata_ref_clk);
- if (ret < 0) {
- dev_err(dev, "prepare-enable sata_ref clock err:%d\n", ret);
+ ret = imx_sata_clock_enable(dev);
+ if (ret < 0)
return ret;
- }
-
- /*
- * set PHY Paremeters, two steps to configure the GPR13,
- * one write for rest of parameters, mask of first write
- * is 0x07fffffd, and the other one write for setting
- * the mpll_clk_en.
- */
- regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK
- | IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK
- | IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK
- | IMX6Q_GPR13_SATA_SPD_MODE_MASK
- | IMX6Q_GPR13_SATA_MPLL_SS_EN
- | IMX6Q_GPR13_SATA_TX_ATTEN_MASK
- | IMX6Q_GPR13_SATA_TX_BOOST_MASK
- | IMX6Q_GPR13_SATA_TX_LVL_MASK
- | IMX6Q_GPR13_SATA_TX_EDGE_RATE
- , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB
- | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M
- | IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F
- | IMX6Q_GPR13_SATA_SPD_MODE_3P0G
- | IMX6Q_GPR13_SATA_MPLL_SS_EN
- | IMX6Q_GPR13_SATA_TX_ATTEN_9_16
- | IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB
- | IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
- regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_MPLL_CLK_EN,
- IMX6Q_GPR13_SATA_MPLL_CLK_EN);
- usleep_range(100, 200);
/*
* Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
@@ -161,13 +202,9 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
return 0;
}
-static void imx6q_sata_exit(struct device *dev)
+static void imx_sata_exit(struct device *dev)
{
- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
-
- regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_MPLL_CLK_EN,
- !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
- clk_disable_unprepare(imxpriv->sata_ref_clk);
+ imx_sata_clock_disable(dev);
}
static int imx_ahci_suspend(struct device *dev)
@@ -178,12 +215,8 @@ static int imx_ahci_suspend(struct device *dev)
* If no_device is set, The CLKs had been gated off in the
* initialization so don't do it again here.
*/
- if (!imxpriv->no_device) {
- regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
- IMX6Q_GPR13_SATA_MPLL_CLK_EN,
- !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
- clk_disable_unprepare(imxpriv->sata_ref_clk);
- }
+ if (!imxpriv->no_device)
+ imx_sata_clock_disable(dev);
return 0;
}
@@ -191,34 +224,26 @@ static int imx_ahci_suspend(struct device *dev)
static int imx_ahci_resume(struct device *dev)
{
struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
- int ret;
-
- if (!imxpriv->no_device) {
- ret = clk_prepare_enable(imxpriv->sata_ref_clk);
- if (ret < 0) {
- dev_err(dev, "pre-enable sata_ref clock err:%d\n", ret);
- return ret;
- }
+ int ret = 0;
- regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
- IMX6Q_GPR13_SATA_MPLL_CLK_EN,
- IMX6Q_GPR13_SATA_MPLL_CLK_EN);
- usleep_range(1000, 2000);
- }
+ if (!imxpriv->no_device)
+ ret = imx_sata_clock_enable(dev);
- return 0;
+ return ret;
}
-static struct ahci_platform_data imx6q_sata_pdata = {
- .init = imx6q_sata_init,
- .exit = imx6q_sata_exit,
- .ata_port_info = &ahci_imx_port_info,
- .suspend = imx_ahci_suspend,
- .resume = imx_ahci_resume,
+static struct ahci_platform_data imx_sata_pdata = {
+ .init = imx_sata_init,
+ .exit = imx_sata_exit,
+ .ata_port_info = &ahci_imx_port_info,
+ .suspend = imx_ahci_suspend,
+ .resume = imx_ahci_resume,
+
};
static const struct of_device_id imx_ahci_of_match[] = {
- { .compatible = "fsl,imx6q-ahci", .data = &imx6q_sata_pdata},
+ { .compatible = "fsl,imx53-ahci", .data = (void *)AHCI_IMX53 },
+ { .compatible = "fsl,imx6q-ahci", .data = (void *)AHCI_IMX6Q },
{},
};
MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
@@ -228,12 +253,20 @@ static int imx_ahci_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *mem, *irq, res[2];
const struct of_device_id *of_id;
+ enum ahci_imx_type type;
const struct ahci_platform_data *pdata = NULL;
struct imx_ahci_priv *imxpriv;
struct device *ahci_dev;
struct platform_device *ahci_pdev;
int ret;
+ of_id = of_match_device(imx_ahci_of_match, dev);
+ if (!of_id)
+ return -EINVAL;
+
+ type = (enum ahci_imx_type)of_id->data;
+ pdata = &imx_sata_pdata;
+
imxpriv = devm_kzalloc(dev, sizeof(*imxpriv), GFP_KERNEL);
if (!imxpriv) {
dev_err(dev, "can't alloc ahci_host_priv\n");
@@ -249,6 +282,8 @@ static int imx_ahci_probe(struct platform_device *pdev)
imxpriv->no_device = false;
imxpriv->first_time = true;
+ imxpriv->type = type;
+
imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(imxpriv->ahb_clk)) {
dev_err(dev, "can't get ahb clock.\n");
@@ -256,6 +291,15 @@ static int imx_ahci_probe(struct platform_device *pdev)
goto err_out;
}
+ if (type == AHCI_IMX53) {
+ imxpriv->sata_gate_clk = devm_clk_get(dev, "sata_gate");
+ if (IS_ERR(imxpriv->sata_gate_clk)) {
+ dev_err(dev, "can't get sata_gate clock.\n");
+ ret = PTR_ERR(imxpriv->sata_gate_clk);
+ goto err_out;
+ }
+ }
+
imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
if (IS_ERR(imxpriv->sata_ref_clk)) {
dev_err(dev, "can't get sata_ref clock.\n");
@@ -266,14 +310,6 @@ static int imx_ahci_probe(struct platform_device *pdev)
imxpriv->ahci_pdev = ahci_pdev;
platform_set_drvdata(pdev, imxpriv);
- of_id = of_match_device(imx_ahci_of_match, dev);
- if (of_id) {
- pdata = of_id->data;
- } else {
- ret = -EINVAL;
- goto err_out;
- }
-
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!mem || !irq) {
@@ -289,6 +325,43 @@ static int imx_ahci_probe(struct platform_device *pdev)
ahci_dev->dma_mask = &ahci_dev->coherent_dma_mask;
ahci_dev->of_node = dev->of_node;
+ if (type == AHCI_IMX6Q) {
+ imxpriv->gpr = syscon_regmap_lookup_by_compatible(
+ "fsl,imx6q-iomuxc-gpr");
+ if (IS_ERR(imxpriv->gpr)) {
+ dev_err(dev,
+ "failed to find fsl,imx6q-iomux-gpr regmap\n");
+ ret = PTR_ERR(imxpriv->gpr);
+ goto err_out;
+ }
+
+ /*
+ * Set PHY Paremeters, two steps to configure the GPR13,
+ * one write for rest of parameters, mask of first write
+ * is 0x07fffffe, and the other one write for setting
+ * the mpll_clk_en happens in imx_sata_clock_enable().
+ */
+ regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+ IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK |
+ IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK |
+ IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK |
+ IMX6Q_GPR13_SATA_SPD_MODE_MASK |
+ IMX6Q_GPR13_SATA_MPLL_SS_EN |
+ IMX6Q_GPR13_SATA_TX_ATTEN_MASK |
+ IMX6Q_GPR13_SATA_TX_BOOST_MASK |
+ IMX6Q_GPR13_SATA_TX_LVL_MASK |
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN |
+ IMX6Q_GPR13_SATA_TX_EDGE_RATE,
+ IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB |
+ IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
+ IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
+ IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
+ IMX6Q_GPR13_SATA_MPLL_SS_EN |
+ IMX6Q_GPR13_SATA_TX_ATTEN_9_16 |
+ IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB |
+ IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
+ }
+
ret = platform_device_add_resources(ahci_pdev, res, 2);
if (ret)
goto err_out;
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index f8f38a08abc5..7d196656adb5 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -221,13 +221,6 @@ static struct pci_device_id ata_generic[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), },
{ PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE),
.driver_data = ATA_GEN_FORCE_DMA },
- /*
- * For some reason, MCP89 on MacBook 7,1 doesn't work with
- * ahci, use ata_generic instead.
- */
- { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA,
- PCI_VENDOR_ID_APPLE, 0xcb89,
- .driver_data = ATA_GEN_FORCE_DMA },
#if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE)
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index c482f8cadd7a..36605abe5a67 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1764,7 +1764,7 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
}
}
-void ahci_port_intr(struct ata_port *ap)
+static void ahci_port_intr(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
u32 status;
@@ -1797,7 +1797,7 @@ irqreturn_t ahci_thread_fn(int irq, void *dev_instance)
}
EXPORT_SYMBOL_GPL(ahci_thread_fn);
-void ahci_hw_port_interrupt(struct ata_port *ap)
+static void ahci_hw_port_interrupt(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_port_priv *pp = ap->private_data;
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 4372cfa883c9..9e69a5308693 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -20,8 +20,6 @@
#include <scsi/scsi_device.h>
#include "libata.h"
-#include <acpi/acpi_bus.h>
-
unsigned int ata_acpi_gtf_filter = ATA_ACPI_FILTER_DEFAULT;
module_param_named(acpi_gtf_filter, ata_acpi_gtf_filter, int, 0644);
MODULE_PARM_DESC(acpi_gtf_filter, "filter mask for ACPI _GTF commands, set to filter out (0x1=set xfermode, 0x2=lock/freeze lock, 0x4=DIPM, 0x8=FPDMA non-zero offset, 0x10=FPDMA DMA Setup FIS auto-activate)");
@@ -180,12 +178,12 @@ static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
/* bind acpi handle to pata port */
void ata_acpi_bind_port(struct ata_port *ap)
{
- acpi_handle host_handle = ACPI_HANDLE(ap->host->dev);
+ struct acpi_device *host_companion = ACPI_COMPANION(ap->host->dev);
- if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_handle)
+ if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_companion)
return;
- acpi_preset_companion(&ap->tdev, host_handle, ap->port_no);
+ acpi_preset_companion(&ap->tdev, host_companion, ap->port_no);
if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
@@ -198,17 +196,17 @@ void ata_acpi_bind_port(struct ata_port *ap)
void ata_acpi_bind_dev(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
- acpi_handle port_handle = ACPI_HANDLE(&ap->tdev);
- acpi_handle host_handle = ACPI_HANDLE(ap->host->dev);
- acpi_handle parent_handle;
+ struct acpi_device *port_companion = ACPI_COMPANION(&ap->tdev);
+ struct acpi_device *host_companion = ACPI_COMPANION(ap->host->dev);
+ struct acpi_device *parent;
u64 adr;
/*
- * For both sata/pata devices, host handle is required.
- * For pata device, port handle is also required.
+ * For both sata/pata devices, host companion device is required.
+ * For pata device, port companion device is also required.
*/
- if (libata_noacpi || !host_handle ||
- (!(ap->flags & ATA_FLAG_ACPI_SATA) && !port_handle))
+ if (libata_noacpi || !host_companion ||
+ (!(ap->flags & ATA_FLAG_ACPI_SATA) && !port_companion))
return;
if (ap->flags & ATA_FLAG_ACPI_SATA) {
@@ -216,13 +214,13 @@ void ata_acpi_bind_dev(struct ata_device *dev)
adr = SATA_ADR(ap->port_no, NO_PORT_MULT);
else
adr = SATA_ADR(ap->port_no, dev->link->pmp);
- parent_handle = host_handle;
+ parent = host_companion;
} else {
adr = dev->devno;
- parent_handle = port_handle;
+ parent = port_companion;
}
- acpi_preset_companion(&dev->tdev, parent_handle, adr);
+ acpi_preset_companion(&dev->tdev, parent, adr);
register_hotplug_dock_device(ata_dev_acpi_handle(dev),
&ata_acpi_dev_dock_ops, dev, NULL, NULL);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 1274720e6bb9..a440958d34e4 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2149,9 +2149,16 @@ static int ata_dev_config_ncq(struct ata_device *dev,
"failed to get NCQ Send/Recv Log Emask 0x%x\n",
err_mask);
} else {
+ u8 *cmds = dev->ncq_send_recv_cmds;
+
dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
- memcpy(dev->ncq_send_recv_cmds, ap->sector_buf,
- ATA_LOG_NCQ_SEND_RECV_SIZE);
+ memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
+
+ if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
+ ata_dev_dbg(dev, "disabling queued TRIM support\n");
+ cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
+ ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
+ }
}
}
@@ -2215,6 +2222,16 @@ int ata_dev_configure(struct ata_device *dev)
if (rc)
return rc;
+ /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
+ if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
+ (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
+ dev->horkage |= ATA_HORKAGE_NOLPM;
+
+ if (dev->horkage & ATA_HORKAGE_NOLPM) {
+ ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
+ dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
+ }
+
/* let ACPI work its magic */
rc = ata_acpi_on_devcfg(dev);
if (rc)
@@ -4156,6 +4173,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
+ /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
+ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
{ "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
@@ -4202,6 +4222,27 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+ /* devices that don't properly handle queued TRIM commands */
+ { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+
+ /*
+ * Some WD SATA-I drives spin up and down erratically when the link
+ * is put into the slumber mode. We don't have full list of the
+ * affected devices. Disable LPM if the device matches one of the
+ * known prefixes and is SATA-1. As a side effect LPM partial is
+ * lost too.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=57211
+ */
+ { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+
/* End Marker */
{ }
};
@@ -6519,6 +6560,7 @@ static int __init ata_parse_force_one(char **cur,
{ "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
{ "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
{ "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
+ { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
};
char *start = *cur, *p = *cur;
char *id, *val, *endp;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 92d7797223be..6d8757008318 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2402,7 +2402,7 @@ static void ata_eh_link_report(struct ata_link *link)
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &link->eh_context;
const char *frozen, *desc;
- char tries_buf[6];
+ char tries_buf[6] = "";
int tag, nr_failed = 0;
if (ehc->i.flags & ATA_EHI_QUIET)
@@ -2433,9 +2433,8 @@ static void ata_eh_link_report(struct ata_link *link)
if (ap->pflags & ATA_PFLAG_FROZEN)
frozen = " frozen";
- memset(tries_buf, 0, sizeof(tries_buf));
if (ap->eh_tries < ATA_EH_MAX_TRIES)
- snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
+ snprintf(tries_buf, sizeof(tries_buf), " t%d",
ap->eh_tries);
if (ehc->i.dev) {
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index ab58556d347c..ef8567de6a75 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -111,12 +111,14 @@ static const char *ata_lpm_policy_names[] = {
[ATA_LPM_MIN_POWER] = "min_power",
};
-static ssize_t ata_scsi_lpm_store(struct device *dev,
+static ssize_t ata_scsi_lpm_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct Scsi_Host *shost = class_to_shost(dev);
+ struct Scsi_Host *shost = class_to_shost(device);
struct ata_port *ap = ata_shost_to_port(shost);
+ struct ata_link *link;
+ struct ata_device *dev;
enum ata_lpm_policy policy;
unsigned long flags;
@@ -132,10 +134,20 @@ static ssize_t ata_scsi_lpm_store(struct device *dev,
return -EINVAL;
spin_lock_irqsave(ap->lock, flags);
+
+ ata_for_each_link(link, ap, EDGE) {
+ ata_for_each_dev(dev, &ap->link, ENABLED) {
+ if (dev->horkage & ATA_HORKAGE_NOLPM) {
+ count = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+ }
+ }
+
ap->target_lpm_policy = policy;
ata_port_schedule_eh(ap);
+out_unlock:
spin_unlock_irqrestore(ap->lock, flags);
-
return count;
}
@@ -3872,6 +3884,27 @@ void ata_scsi_hotplug(struct work_struct *work)
return;
}
+ /*
+ * XXX - UGLY HACK
+ *
+ * The block layer suspend/resume path is fundamentally broken due
+ * to freezable kthreads and workqueue and may deadlock if a block
+ * device gets removed while resume is in progress. I don't know
+ * what the solution is short of removing freezable kthreads and
+ * workqueues altogether.
+ *
+ * The following is an ugly hack to avoid kicking off device
+ * removal while freezer is active. This is a joke but does avoid
+ * this particular deadlock scenario.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=62801
+ * http://marc.info/?l=linux-kernel&m=138695698516487
+ */
+#ifdef CONFIG_FREEZER
+ while (pm_freezing)
+ msleep(10);
+#endif
+
DPRINTK("ENTER\n");
mutex_lock(&ap->scsi_scan_mutex);
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index 73212c9c6d5b..62c9ac80c6e9 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -12,11 +12,10 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gfp.h>
-#include <scsi/scsi_host.h>
-#include <acpi/acpi_bus.h>
-
+#include <linux/acpi.h>
#include <linux/libata.h>
#include <linux/ata.h>
+#include <scsi/scsi_host.h>
#define DRV_NAME "pata_acpi"
#define DRV_VERSION "0.2.3"
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index 898e544a7ae8..a79566d05666 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -24,11 +24,34 @@
#include <linux/slab.h>
#include <linux/platform_data/ata-samsung_cf.h>
-#include <plat/regs-ata.h>
#define DRV_NAME "pata_samsung_cf"
#define DRV_VERSION "0.1"
+#define S3C_CFATA_REG(x) (x)
+#define S3C_CFATA_MUX S3C_CFATA_REG(0x0)
+#define S3C_ATA_CTRL S3C_CFATA_REG(0x0)
+#define S3C_ATA_CMD S3C_CFATA_REG(0x8)
+#define S3C_ATA_IRQ S3C_CFATA_REG(0x10)
+#define S3C_ATA_IRQ_MSK S3C_CFATA_REG(0x14)
+#define S3C_ATA_CFG S3C_CFATA_REG(0x18)
+
+#define S3C_ATA_PIO_TIME S3C_CFATA_REG(0x2c)
+#define S3C_ATA_PIO_DTR S3C_CFATA_REG(0x54)
+#define S3C_ATA_PIO_FED S3C_CFATA_REG(0x58)
+#define S3C_ATA_PIO_SCR S3C_CFATA_REG(0x5c)
+#define S3C_ATA_PIO_LLR S3C_CFATA_REG(0x60)
+#define S3C_ATA_PIO_LMR S3C_CFATA_REG(0x64)
+#define S3C_ATA_PIO_LHR S3C_CFATA_REG(0x68)
+#define S3C_ATA_PIO_DVR S3C_CFATA_REG(0x6c)
+#define S3C_ATA_PIO_CSD S3C_CFATA_REG(0x70)
+#define S3C_ATA_PIO_DAD S3C_CFATA_REG(0x74)
+#define S3C_ATA_PIO_RDATA S3C_CFATA_REG(0x7c)
+
+#define S3C_CFATA_MUX_TRUEIDE 0x01
+#define S3C_ATA_CFG_SWAP 0x40
+#define S3C_ATA_CFG_IORDYEN 0x02
+
enum s3c_cpu_type {
TYPE_S3C64XX,
TYPE_S5PC100,
@@ -495,22 +518,10 @@ static int __init pata_s3c_probe(struct platform_device *pdev)
info->irq = platform_get_irq(pdev, 0);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(dev, "failed to get mem resource\n");
- return -EINVAL;
- }
-
- if (!devm_request_mem_region(dev, res->start,
- resource_size(res), DRV_NAME)) {
- dev_err(dev, "error requesting register region\n");
- return -EBUSY;
- }
- info->ide_addr = devm_ioremap(dev, res->start, resource_size(res));
- if (!info->ide_addr) {
- dev_err(dev, "failed to map IO base address\n");
- return -ENOMEM;
- }
+ info->ide_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(info->ide_addr))
+ return PTR_ERR(info->ide_addr);
info->clk = devm_clk_get(&pdev->dev, "cfcon");
if (IS_ERR(info->clk)) {
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index ea3b3dc10f33..870b11eadc6d 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -29,7 +29,6 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/libata.h>
-#include <linux/ahci_platform.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/export.h>
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 56be31819897..52b8181ddafd 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -60,6 +60,7 @@
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/clk.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/mbus.h>
@@ -304,6 +305,7 @@ enum {
MV5_LTMODE = 0x30,
MV5_PHY_CTL = 0x0C,
SATA_IFCFG = 0x050,
+ LP_PHY_CTL = 0x058,
MV_M2_PREAMP_MASK = 0x7e0,
@@ -431,6 +433,7 @@ enum {
MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
+ MV_HP_FIX_LP_PHY_CTL = (1 << 13), /* fix speed in LP_PHY_CTL ? */
/* Port private flags (pp_flags) */
MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
@@ -563,6 +566,12 @@ struct mv_host_priv {
struct clk *clk;
struct clk **port_clks;
/*
+ * Some devices have a SATA PHY which can be enabled/disabled
+ * in order to save power. These are optional: if the platform
+ * devices does not have any phy, they won't be used.
+ */
+ struct phy **port_phys;
+ /*
* These consistent DMA memory pools give us guaranteed
* alignment for hardware-accessed data structures,
* and less memory waste in accomplishing the alignment.
@@ -1358,6 +1367,7 @@ static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
if (ofs != 0xffffffffU) {
void __iomem *addr = mv_ap_base(link->ap) + ofs;
+ struct mv_host_priv *hpriv = link->ap->host->private_data;
if (sc_reg_in == SCR_CONTROL) {
/*
* Workaround for 88SX60x1 FEr SATA#26:
@@ -1374,6 +1384,18 @@ static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
*/
if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
val |= 0xf000;
+
+ if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) {
+ void __iomem *lp_phy_addr =
+ mv_ap_base(link->ap) + LP_PHY_CTL;
+ /*
+ * Set PHY speed according to SControl speed.
+ */
+ if ((val & 0xf0) == 0x10)
+ writelfl(0x7, lp_phy_addr);
+ else
+ writelfl(0x227, lp_phy_addr);
+ }
}
writelfl(val, addr);
return 0;
@@ -4076,6 +4098,11 @@ static int mv_platform_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!hpriv->port_clks)
return -ENOMEM;
+ hpriv->port_phys = devm_kzalloc(&pdev->dev,
+ sizeof(struct phy *) * n_ports,
+ GFP_KERNEL);
+ if (!hpriv->port_phys)
+ return -ENOMEM;
host->private_data = hpriv;
hpriv->n_ports = n_ports;
hpriv->board_idx = chip_soc;
@@ -4097,6 +4124,19 @@ static int mv_platform_probe(struct platform_device *pdev)
hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
if (!IS_ERR(hpriv->port_clks[port]))
clk_prepare_enable(hpriv->port_clks[port]);
+
+ sprintf(port_number, "port%d", port);
+ hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
+ port_number);
+ if (IS_ERR(hpriv->port_phys[port])) {
+ rc = PTR_ERR(hpriv->port_phys[port]);
+ hpriv->port_phys[port] = NULL;
+ if (rc != -EPROBE_DEFER)
+ dev_warn(&pdev->dev, "error getting phy %d",
+ rc);
+ goto err;
+ } else
+ phy_power_on(hpriv->port_phys[port]);
}
/*
@@ -4110,6 +4150,15 @@ static int mv_platform_probe(struct platform_device *pdev)
if (rc)
goto err;
+ /*
+ * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be
+ * updated in the LP_PHY_CTL register.
+ */
+ if (pdev->dev.of_node &&
+ of_device_is_compatible(pdev->dev.of_node,
+ "marvell,armada-370-sata"))
+ hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL;
+
/* initialize adapter */
rc = mv_init_host(host);
if (rc)
@@ -4132,6 +4181,8 @@ err:
clk_disable_unprepare(hpriv->port_clks[port]);
clk_put(hpriv->port_clks[port]);
}
+ if (hpriv->port_phys[port])
+ phy_power_off(hpriv->port_phys[port]);
}
return rc;
@@ -4161,6 +4212,8 @@ static int mv_platform_remove(struct platform_device *pdev)
clk_disable_unprepare(hpriv->port_clks[port]);
clk_put(hpriv->port_clks[port]);
}
+ if (hpriv->port_phys[port])
+ phy_power_off(hpriv->port_phys[port]);
}
return 0;
}
@@ -4209,6 +4262,7 @@ static int mv_platform_resume(struct platform_device *pdev)
#ifdef CONFIG_OF
static struct of_device_id mv_sata_dt_ids[] = {
+ { .compatible = "marvell,armada-370-sata", },
{ .compatible = "marvell,orion-sata", },
{},
};
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 1dae9a9009f7..2b25bd83fc9d 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/ata.h>
#include <linux/libata.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/err.h>
@@ -123,12 +124,37 @@
#define SATA_RCAR_DMA_BOUNDARY 0x1FFFFFFEUL
+/* Gen2 Physical Layer Control Registers */
+#define RCAR_GEN2_PHY_CTL1_REG 0x1704
+#define RCAR_GEN2_PHY_CTL1 0x34180002
+#define RCAR_GEN2_PHY_CTL1_SS 0xC180 /* Spread Spectrum */
+
+#define RCAR_GEN2_PHY_CTL2_REG 0x170C
+#define RCAR_GEN2_PHY_CTL2 0x00002303
+
+#define RCAR_GEN2_PHY_CTL3_REG 0x171C
+#define RCAR_GEN2_PHY_CTL3 0x000B0194
+
+#define RCAR_GEN2_PHY_CTL4_REG 0x1724
+#define RCAR_GEN2_PHY_CTL4 0x00030994
+
+#define RCAR_GEN2_PHY_CTL5_REG 0x1740
+#define RCAR_GEN2_PHY_CTL5 0x03004001
+#define RCAR_GEN2_PHY_CTL5_DC BIT(1) /* DC connection */
+#define RCAR_GEN2_PHY_CTL5_TR BIT(2) /* Termination Resistor */
+
+enum sata_rcar_type {
+ RCAR_GEN1_SATA,
+ RCAR_GEN2_SATA,
+};
+
struct sata_rcar_priv {
void __iomem *base;
struct clk *clk;
+ enum sata_rcar_type type;
};
-static void sata_rcar_phy_initialize(struct sata_rcar_priv *priv)
+static void sata_rcar_gen1_phy_preinit(struct sata_rcar_priv *priv)
{
void __iomem *base = priv->base;
@@ -141,8 +167,8 @@ static void sata_rcar_phy_initialize(struct sata_rcar_priv *priv)
iowrite32(0, base + SATAPHYRESET_REG);
}
-static void sata_rcar_phy_write(struct sata_rcar_priv *priv, u16 reg, u32 val,
- int group)
+static void sata_rcar_gen1_phy_write(struct sata_rcar_priv *priv, u16 reg,
+ u32 val, int group)
{
void __iomem *base = priv->base;
int timeout;
@@ -170,6 +196,29 @@ static void sata_rcar_phy_write(struct sata_rcar_priv *priv, u16 reg, u32 val,
iowrite32(0, base + SATAPHYADDR_REG);
}
+static void sata_rcar_gen1_phy_init(struct sata_rcar_priv *priv)
+{
+ sata_rcar_gen1_phy_preinit(priv);
+ sata_rcar_gen1_phy_write(priv, SATAPCTLR1_REG, 0x00200188, 0);
+ sata_rcar_gen1_phy_write(priv, SATAPCTLR1_REG, 0x00200188, 1);
+ sata_rcar_gen1_phy_write(priv, SATAPCTLR3_REG, 0x0000A061, 0);
+ sata_rcar_gen1_phy_write(priv, SATAPCTLR2_REG, 0x20000000, 0);
+ sata_rcar_gen1_phy_write(priv, SATAPCTLR2_REG, 0x20000000, 1);
+ sata_rcar_gen1_phy_write(priv, SATAPCTLR4_REG, 0x28E80000, 0);
+}
+
+static void sata_rcar_gen2_phy_init(struct sata_rcar_priv *priv)
+{
+ void __iomem *base = priv->base;
+
+ iowrite32(RCAR_GEN2_PHY_CTL1, base + RCAR_GEN2_PHY_CTL1_REG);
+ iowrite32(RCAR_GEN2_PHY_CTL2, base + RCAR_GEN2_PHY_CTL2_REG);
+ iowrite32(RCAR_GEN2_PHY_CTL3, base + RCAR_GEN2_PHY_CTL3_REG);
+ iowrite32(RCAR_GEN2_PHY_CTL4, base + RCAR_GEN2_PHY_CTL4_REG);
+ iowrite32(RCAR_GEN2_PHY_CTL5 | RCAR_GEN2_PHY_CTL5_DC |
+ RCAR_GEN2_PHY_CTL5_TR, base + RCAR_GEN2_PHY_CTL5_REG);
+}
+
static void sata_rcar_freeze(struct ata_port *ap)
{
struct sata_rcar_priv *priv = ap->host->private_data;
@@ -738,13 +787,17 @@ static void sata_rcar_init_controller(struct ata_host *host)
u32 val;
/* reset and setup phy */
- sata_rcar_phy_initialize(priv);
- sata_rcar_phy_write(priv, SATAPCTLR1_REG, 0x00200188, 0);
- sata_rcar_phy_write(priv, SATAPCTLR1_REG, 0x00200188, 1);
- sata_rcar_phy_write(priv, SATAPCTLR3_REG, 0x0000A061, 0);
- sata_rcar_phy_write(priv, SATAPCTLR2_REG, 0x20000000, 0);
- sata_rcar_phy_write(priv, SATAPCTLR2_REG, 0x20000000, 1);
- sata_rcar_phy_write(priv, SATAPCTLR4_REG, 0x28E80000, 0);
+ switch (priv->type) {
+ case RCAR_GEN1_SATA:
+ sata_rcar_gen1_phy_init(priv);
+ break;
+ case RCAR_GEN2_SATA:
+ sata_rcar_gen2_phy_init(priv);
+ break;
+ default:
+ dev_warn(host->dev, "SATA phy is not initialized\n");
+ break;
+ }
/* SATA-IP reset state */
val = ioread32(base + ATAPI_CONTROL1_REG);
@@ -770,8 +823,40 @@ static void sata_rcar_init_controller(struct ata_host *host)
iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG);
}
+static struct of_device_id sata_rcar_match[] = {
+ {
+ /* Deprecated by "renesas,sata-r8a7779" */
+ .compatible = "renesas,rcar-sata",
+ .data = (void *)RCAR_GEN1_SATA,
+ },
+ {
+ .compatible = "renesas,sata-r8a7779",
+ .data = (void *)RCAR_GEN1_SATA,
+ },
+ {
+ .compatible = "renesas,sata-r8a7790",
+ .data = (void *)RCAR_GEN2_SATA
+ },
+ {
+ .compatible = "renesas,sata-r8a7791",
+ .data = (void *)RCAR_GEN2_SATA
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sata_rcar_match);
+
+static const struct platform_device_id sata_rcar_id_table[] = {
+ { "sata_rcar", RCAR_GEN1_SATA }, /* Deprecated by "sata-r8a7779" */
+ { "sata-r8a7779", RCAR_GEN1_SATA },
+ { "sata-r8a7790", RCAR_GEN2_SATA },
+ { "sata-r8a7791", RCAR_GEN2_SATA },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, sata_rcar_id_table);
+
static int sata_rcar_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id;
struct ata_host *host;
struct sata_rcar_priv *priv;
struct resource *mem;
@@ -787,6 +872,12 @@ static int sata_rcar_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ of_id = of_match_device(sata_rcar_match, &pdev->dev);
+ if (of_id)
+ priv->type = (enum sata_rcar_type)of_id->data;
+ else
+ priv->type = platform_get_device_id(pdev)->driver_data;
+
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "failed to get access to sata clock\n");
@@ -892,15 +983,10 @@ static const struct dev_pm_ops sata_rcar_pm_ops = {
};
#endif
-static struct of_device_id sata_rcar_match[] = {
- { .compatible = "renesas,rcar-sata", },
- {},
-};
-MODULE_DEVICE_TABLE(of, sata_rcar_match);
-
static struct platform_driver sata_rcar_driver = {
.probe = sata_rcar_probe,
.remove = sata_rcar_remove,
+ .id_table = sata_rcar_id_table,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index fe3ca0989b14..1ad2f62d34b9 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -83,6 +83,10 @@ static struct pci_driver sis_pci_driver = {
.id_table = sis_pci_tbl,
.probe = sis_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
};
static struct scsi_host_template sis_sht = {
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 8557adcd34ee..aa6be2698669 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -419,7 +419,6 @@ static void he_remove_one(struct pci_dev *pci_dev)
atm_dev_deregister(atm_dev);
kfree(he_dev);
- pci_set_drvdata(pci_dev, NULL);
pci_disable_device(pci_dev);
}
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 5aca5f4c5458..9587e959ce1a 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -52,6 +52,7 @@
#include <asm/io.h>
#include <asm/uaccess.h>
#include <linux/atomic.h>
+#include <linux/etherdevice.h>
#include "nicstar.h"
#ifdef CONFIG_ATM_NICSTAR_USE_SUNI
#include "suni.h"
@@ -781,8 +782,7 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
if (mac[i] == NULL || !mac_pton(mac[i], card->atmdev->esi)) {
nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
card->atmdev->esi, 6);
- if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
- 0) {
+ if (ether_addr_equal(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00")) {
nicstar_read_eprom(card->membase,
NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT,
card->atmdev->esi, 6);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 32784d18d1f7..e3fb496c7163 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -1335,7 +1335,6 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
out_unmap_both:
kfree(card->dma_bounce);
- pci_set_drvdata(dev, NULL);
pci_iounmap(dev, card->buffers);
out_unmap_config:
pci_iounmap(dev, card->config_regs);
@@ -1457,7 +1456,6 @@ static void fpga_remove(struct pci_dev *dev)
pci_release_regions(dev);
pci_disable_device(dev);
- pci_set_drvdata(dev, NULL);
kfree(card);
}
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 94e8a80e87f8..04b314e0fa51 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -1,10 +1,10 @@
# Makefile for the Linux device tree
-obj-y := core.o bus.o dd.o syscore.o \
+obj-y := component.o core.o bus.o dd.o syscore.o \
driver.o class.o platform.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o \
- topology.o
+ topology.o container.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
obj-y += power/
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 2cbc6774f4cd..24f424249d9b 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -100,6 +100,7 @@ static inline int hypervisor_init(void) { return 0; }
#endif
extern int platform_bus_init(void);
extern void cpu_dev_init(void);
+extern void container_dev_init(void);
struct kobject *virtual_device_parent(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 1db22d3c4036..83e910a57563 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -146,8 +146,19 @@ void bus_remove_file(struct bus_type *bus, struct bus_attribute *attr)
}
EXPORT_SYMBOL_GPL(bus_remove_file);
+static void bus_release(struct kobject *kobj)
+{
+ struct subsys_private *priv =
+ container_of(kobj, typeof(*priv), subsys.kobj);
+ struct bus_type *bus = priv->bus;
+
+ kfree(priv);
+ bus->p = NULL;
+}
+
static struct kobj_type bus_ktype = {
.sysfs_ops = &bus_sysfs_ops,
+ .release = bus_release,
};
static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
@@ -953,8 +964,6 @@ void bus_unregister(struct bus_type *bus)
kset_unregister(bus->p->devices_kset);
bus_remove_file(bus, &bus_attr_uevent);
kset_unregister(&bus->p->subsys);
- kfree(bus->p);
- bus->p = NULL;
}
EXPORT_SYMBOL_GPL(bus_unregister);
diff --git a/drivers/base/component.c b/drivers/base/component.c
new file mode 100644
index 000000000000..c4778995cd72
--- /dev/null
+++ b/drivers/base/component.c
@@ -0,0 +1,390 @@
+/*
+ * Componentized device handling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is work in progress. We gather up the component devices into a list,
+ * and bind them when instructed. At the moment, we're specific to the DRM
+ * subsystem, and only handles one master device, but this doesn't have to be
+ * the case.
+ */
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+struct master {
+ struct list_head node;
+ struct list_head components;
+ bool bound;
+
+ const struct component_master_ops *ops;
+ struct device *dev;
+};
+
+struct component {
+ struct list_head node;
+ struct list_head master_node;
+ struct master *master;
+ bool bound;
+
+ const struct component_ops *ops;
+ struct device *dev;
+};
+
+static DEFINE_MUTEX(component_mutex);
+static LIST_HEAD(component_list);
+static LIST_HEAD(masters);
+
+static struct master *__master_find(struct device *dev,
+ const struct component_master_ops *ops)
+{
+ struct master *m;
+
+ list_for_each_entry(m, &masters, node)
+ if (m->dev == dev && (!ops || m->ops == ops))
+ return m;
+
+ return NULL;
+}
+
+/* Attach an unattached component to a master. */
+static void component_attach_master(struct master *master, struct component *c)
+{
+ c->master = master;
+
+ list_add_tail(&c->master_node, &master->components);
+}
+
+/* Detach a component from a master. */
+static void component_detach_master(struct master *master, struct component *c)
+{
+ list_del(&c->master_node);
+
+ c->master = NULL;
+}
+
+int component_master_add_child(struct master *master,
+ int (*compare)(struct device *, void *), void *compare_data)
+{
+ struct component *c;
+ int ret = -ENXIO;
+
+ list_for_each_entry(c, &component_list, node) {
+ if (c->master)
+ continue;
+
+ if (compare(c->dev, compare_data)) {
+ component_attach_master(master, c);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(component_master_add_child);
+
+/* Detach all attached components from this master */
+static void master_remove_components(struct master *master)
+{
+ while (!list_empty(&master->components)) {
+ struct component *c = list_first_entry(&master->components,
+ struct component, master_node);
+
+ WARN_ON(c->master != master);
+
+ component_detach_master(master, c);
+ }
+}
+
+/*
+ * Try to bring up a master. If component is NULL, we're interested in
+ * this master, otherwise it's a component which must be present to try
+ * and bring up the master.
+ *
+ * Returns 1 for successful bringup, 0 if not ready, or -ve errno.
+ */
+static int try_to_bring_up_master(struct master *master,
+ struct component *component)
+{
+ int ret = 0;
+
+ if (!master->bound) {
+ /*
+ * Search the list of components, looking for components that
+ * belong to this master, and attach them to the master.
+ */
+ if (master->ops->add_components(master->dev, master)) {
+ /* Failed to find all components */
+ master_remove_components(master);
+ ret = 0;
+ goto out;
+ }
+
+ if (component && component->master != master) {
+ master_remove_components(master);
+ ret = 0;
+ goto out;
+ }
+
+ if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Found all components */
+ ret = master->ops->bind(master->dev);
+ if (ret < 0) {
+ devres_release_group(master->dev, NULL);
+ dev_info(master->dev, "master bind failed: %d\n", ret);
+ master_remove_components(master);
+ goto out;
+ }
+
+ master->bound = true;
+ ret = 1;
+ }
+out:
+
+ return ret;
+}
+
+static int try_to_bring_up_masters(struct component *component)
+{
+ struct master *m;
+ int ret = 0;
+
+ list_for_each_entry(m, &masters, node) {
+ ret = try_to_bring_up_master(m, component);
+ if (ret != 0)
+ break;
+ }
+
+ return ret;
+}
+
+static void take_down_master(struct master *master)
+{
+ if (master->bound) {
+ master->ops->unbind(master->dev);
+ devres_release_group(master->dev, NULL);
+ master->bound = false;
+ }
+
+ master_remove_components(master);
+}
+
+int component_master_add(struct device *dev,
+ const struct component_master_ops *ops)
+{
+ struct master *master;
+ int ret;
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->dev = dev;
+ master->ops = ops;
+ INIT_LIST_HEAD(&master->components);
+
+ /* Add to the list of available masters. */
+ mutex_lock(&component_mutex);
+ list_add(&master->node, &masters);
+
+ ret = try_to_bring_up_master(master, NULL);
+
+ if (ret < 0) {
+ /* Delete off the list if we weren't successful */
+ list_del(&master->node);
+ kfree(master);
+ }
+ mutex_unlock(&component_mutex);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(component_master_add);
+
+void component_master_del(struct device *dev,
+ const struct component_master_ops *ops)
+{
+ struct master *master;
+
+ mutex_lock(&component_mutex);
+ master = __master_find(dev, ops);
+ if (master) {
+ take_down_master(master);
+
+ list_del(&master->node);
+ kfree(master);
+ }
+ mutex_unlock(&component_mutex);
+}
+EXPORT_SYMBOL_GPL(component_master_del);
+
+static void component_unbind(struct component *component,
+ struct master *master, void *data)
+{
+ WARN_ON(!component->bound);
+
+ component->ops->unbind(component->dev, master->dev, data);
+ component->bound = false;
+
+ /* Release all resources claimed in the binding of this component */
+ devres_release_group(component->dev, component);
+}
+
+void component_unbind_all(struct device *master_dev, void *data)
+{
+ struct master *master;
+ struct component *c;
+
+ WARN_ON(!mutex_is_locked(&component_mutex));
+
+ master = __master_find(master_dev, NULL);
+ if (!master)
+ return;
+
+ list_for_each_entry_reverse(c, &master->components, master_node)
+ component_unbind(c, master, data);
+}
+EXPORT_SYMBOL_GPL(component_unbind_all);
+
+static int component_bind(struct component *component, struct master *master,
+ void *data)
+{
+ int ret;
+
+ /*
+ * Each component initialises inside its own devres group.
+ * This allows us to roll-back a failed component without
+ * affecting anything else.
+ */
+ if (!devres_open_group(master->dev, NULL, GFP_KERNEL))
+ return -ENOMEM;
+
+ /*
+ * Also open a group for the device itself: this allows us
+ * to release the resources claimed against the sub-device
+ * at the appropriate moment.
+ */
+ if (!devres_open_group(component->dev, component, GFP_KERNEL)) {
+ devres_release_group(master->dev, NULL);
+ return -ENOMEM;
+ }
+
+ dev_dbg(master->dev, "binding %s (ops %ps)\n",
+ dev_name(component->dev), component->ops);
+
+ ret = component->ops->bind(component->dev, master->dev, data);
+ if (!ret) {
+ component->bound = true;
+
+ /*
+ * Close the component device's group so that resources
+ * allocated in the binding are encapsulated for removal
+ * at unbind. Remove the group on the DRM device as we
+ * can clean those resources up independently.
+ */
+ devres_close_group(component->dev, NULL);
+ devres_remove_group(master->dev, NULL);
+
+ dev_info(master->dev, "bound %s (ops %ps)\n",
+ dev_name(component->dev), component->ops);
+ } else {
+ devres_release_group(component->dev, NULL);
+ devres_release_group(master->dev, NULL);
+
+ dev_err(master->dev, "failed to bind %s (ops %ps): %d\n",
+ dev_name(component->dev), component->ops, ret);
+ }
+
+ return ret;
+}
+
+int component_bind_all(struct device *master_dev, void *data)
+{
+ struct master *master;
+ struct component *c;
+ int ret = 0;
+
+ WARN_ON(!mutex_is_locked(&component_mutex));
+
+ master = __master_find(master_dev, NULL);
+ if (!master)
+ return -EINVAL;
+
+ list_for_each_entry(c, &master->components, master_node) {
+ ret = component_bind(c, master, data);
+ if (ret)
+ break;
+ }
+
+ if (ret != 0) {
+ list_for_each_entry_continue_reverse(c, &master->components,
+ master_node)
+ component_unbind(c, master, data);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(component_bind_all);
+
+int component_add(struct device *dev, const struct component_ops *ops)
+{
+ struct component *component;
+ int ret;
+
+ component = kzalloc(sizeof(*component), GFP_KERNEL);
+ if (!component)
+ return -ENOMEM;
+
+ component->ops = ops;
+ component->dev = dev;
+
+ dev_dbg(dev, "adding component (ops %ps)\n", ops);
+
+ mutex_lock(&component_mutex);
+ list_add_tail(&component->node, &component_list);
+
+ ret = try_to_bring_up_masters(component);
+ if (ret < 0) {
+ list_del(&component->node);
+
+ kfree(component);
+ }
+ mutex_unlock(&component_mutex);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(component_add);
+
+void component_del(struct device *dev, const struct component_ops *ops)
+{
+ struct component *c, *component = NULL;
+
+ mutex_lock(&component_mutex);
+ list_for_each_entry(c, &component_list, node)
+ if (c->dev == dev && c->ops == ops) {
+ list_del(&c->node);
+ component = c;
+ break;
+ }
+
+ if (component && component->master)
+ take_down_master(component->master);
+
+ mutex_unlock(&component_mutex);
+
+ WARN_ON(!component);
+ kfree(component);
+}
+EXPORT_SYMBOL_GPL(component_del);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/container.c b/drivers/base/container.c
new file mode 100644
index 000000000000..ecbfbe2e908f
--- /dev/null
+++ b/drivers/base/container.c
@@ -0,0 +1,44 @@
+/*
+ * System bus type for containers.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/container.h>
+
+#include "base.h"
+
+#define CONTAINER_BUS_NAME "container"
+
+static int trivial_online(struct device *dev)
+{
+ return 0;
+}
+
+static int container_offline(struct device *dev)
+{
+ struct container_dev *cdev = to_container_dev(dev);
+
+ return cdev->offline ? cdev->offline(cdev) : 0;
+}
+
+struct bus_type container_subsys = {
+ .name = CONTAINER_BUS_NAME,
+ .dev_name = CONTAINER_BUS_NAME,
+ .online = trivial_online,
+ .offline = container_offline,
+};
+
+void __init container_dev_init(void)
+{
+ int ret;
+
+ ret = subsys_system_register(&container_subsys, NULL);
+ if (ret)
+ pr_err("%s() failed: %d\n", __func__, ret);
+}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 67b180d855b2..2b567177ef78 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -491,11 +491,13 @@ static int device_add_attrs(struct device *dev)
if (device_supports_offline(dev) && !dev->offline_disabled) {
error = device_create_file(dev, &dev_attr_online);
if (error)
- goto err_remove_type_groups;
+ goto err_remove_dev_groups;
}
return 0;
+ err_remove_dev_groups:
+ device_remove_groups(dev, dev->groups);
err_remove_type_groups:
if (type)
device_remove_groups(dev, type->groups);
@@ -1603,6 +1605,7 @@ device_create_groups_vargs(struct class *class, struct device *parent,
goto error;
}
+ device_initialize(dev);
dev->devt = devt;
dev->class = class;
dev->parent = parent;
@@ -1614,7 +1617,7 @@ device_create_groups_vargs(struct class *class, struct device *parent,
if (retval)
goto error;
- retval = device_register(dev);
+ retval = device_add(dev);
if (retval)
goto error;
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 0f3820121e02..25798db14553 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -299,7 +299,7 @@ static int handle_remove(const char *nodename, struct device *dev)
{
struct path parent;
struct dentry *dentry;
- int deleted = 1;
+ int deleted = 0;
int err;
dentry = kern_path_locked(nodename, &parent);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index eb8fb94ae2c5..8a97ddfa6122 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -96,6 +96,15 @@ static inline long firmware_loading_timeout(void)
return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT;
}
+/* firmware behavior options */
+#define FW_OPT_UEVENT (1U << 0)
+#define FW_OPT_NOWAIT (1U << 1)
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+#define FW_OPT_FALLBACK (1U << 2)
+#else
+#define FW_OPT_FALLBACK 0
+#endif
+
struct firmware_cache {
/* firmware_buf instance will be added into the below list */
spinlock_t lock;
@@ -219,6 +228,7 @@ static int fw_lookup_and_allocate_buf(const char *fw_name,
}
static void __fw_free_buf(struct kref *ref)
+ __releases(&fwc->lock)
{
struct firmware_buf *buf = to_fwbuf(ref);
struct firmware_cache *fwc = buf->fwc;
@@ -270,21 +280,21 @@ module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
/* Don't inline this: 'struct kstat' is biggish */
-static noinline_for_stack long fw_file_size(struct file *file)
+static noinline_for_stack int fw_file_size(struct file *file)
{
struct kstat st;
if (vfs_getattr(&file->f_path, &st))
return -1;
if (!S_ISREG(st.mode))
return -1;
- if (st.size != (long)st.size)
+ if (st.size != (int)st.size)
return -1;
return st.size;
}
static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
{
- long size;
+ int size;
char *buf;
int rc;
@@ -820,7 +830,7 @@ static void firmware_class_timeout_work(struct work_struct *work)
static struct firmware_priv *
fw_create_instance(struct firmware *firmware, const char *fw_name,
- struct device *device, bool uevent, bool nowait)
+ struct device *device, unsigned int opt_flags)
{
struct firmware_priv *fw_priv;
struct device *f_dev;
@@ -832,7 +842,7 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
goto exit;
}
- fw_priv->nowait = nowait;
+ fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT);
fw_priv->fw = firmware;
INIT_DELAYED_WORK(&fw_priv->timeout_work,
firmware_class_timeout_work);
@@ -848,8 +858,8 @@ exit:
}
/* load a firmware via user helper */
-static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
- long timeout)
+static int _request_firmware_load(struct firmware_priv *fw_priv,
+ unsigned int opt_flags, long timeout)
{
int retval = 0;
struct device *f_dev = &fw_priv->dev;
@@ -885,7 +895,7 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
goto err_del_bin_attr;
}
- if (uevent) {
+ if (opt_flags & FW_OPT_UEVENT) {
buf->need_uevent = true;
dev_set_uevent_suppress(f_dev, false);
dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
@@ -911,16 +921,16 @@ err_put_dev:
static int fw_load_from_user_helper(struct firmware *firmware,
const char *name, struct device *device,
- bool uevent, bool nowait, long timeout)
+ unsigned int opt_flags, long timeout)
{
struct firmware_priv *fw_priv;
- fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
+ fw_priv = fw_create_instance(firmware, name, device, opt_flags);
if (IS_ERR(fw_priv))
return PTR_ERR(fw_priv);
fw_priv->buf = firmware->priv;
- return _request_firmware_load(fw_priv, uevent, timeout);
+ return _request_firmware_load(fw_priv, opt_flags, timeout);
}
#ifdef CONFIG_PM_SLEEP
@@ -942,7 +952,7 @@ static void kill_requests_without_uevent(void)
#else /* CONFIG_FW_LOADER_USER_HELPER */
static inline int
fw_load_from_user_helper(struct firmware *firmware, const char *name,
- struct device *device, bool uevent, bool nowait,
+ struct device *device, unsigned int opt_flags,
long timeout)
{
return -ENOENT;
@@ -1023,7 +1033,7 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
}
static int assign_firmware_buf(struct firmware *fw, struct device *device,
- bool skip_cache)
+ unsigned int opt_flags)
{
struct firmware_buf *buf = fw->priv;
@@ -1040,7 +1050,8 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
* device may has been deleted already, but the problem
* should be fixed in devres or driver core.
*/
- if (device && !skip_cache)
+ /* don't cache firmware handled without uevent */
+ if (device && (opt_flags & FW_OPT_UEVENT))
fw_add_devm_name(device, buf->fw_id);
/*
@@ -1061,7 +1072,7 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
- struct device *device, bool uevent, bool nowait)
+ struct device *device, unsigned int opt_flags)
{
struct firmware *fw;
long timeout;
@@ -1076,7 +1087,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
ret = 0;
timeout = firmware_loading_timeout();
- if (nowait) {
+ if (opt_flags & FW_OPT_NOWAIT) {
timeout = usermodehelper_read_lock_wait(timeout);
if (!timeout) {
dev_dbg(device, "firmware: %s loading timed out\n",
@@ -1095,16 +1106,18 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
ret = fw_get_filesystem_firmware(device, fw->priv);
if (ret) {
- dev_warn(device, "Direct firmware load failed with error %d\n",
- ret);
- dev_warn(device, "Falling back to user helper\n");
- ret = fw_load_from_user_helper(fw, name, device,
- uevent, nowait, timeout);
+ if (opt_flags & FW_OPT_FALLBACK) {
+ dev_warn(device,
+ "Direct firmware load failed with error %d\n",
+ ret);
+ dev_warn(device, "Falling back to user helper\n");
+ ret = fw_load_from_user_helper(fw, name, device,
+ opt_flags, timeout);
+ }
}
- /* don't cache firmware handled without uevent */
if (!ret)
- ret = assign_firmware_buf(fw, device, !uevent);
+ ret = assign_firmware_buf(fw, device, opt_flags);
usermodehelper_read_unlock();
@@ -1146,12 +1159,37 @@ request_firmware(const struct firmware **firmware_p, const char *name,
/* Need to pin this module until return */
__module_get(THIS_MODULE);
- ret = _request_firmware(firmware_p, name, device, true, false);
+ ret = _request_firmware(firmware_p, name, device,
+ FW_OPT_UEVENT | FW_OPT_FALLBACK);
module_put(THIS_MODULE);
return ret;
}
EXPORT_SYMBOL(request_firmware);
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+/**
+ * request_firmware: - load firmware directly without usermode helper
+ * @firmware_p: pointer to firmware image
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ *
+ * This function works pretty much like request_firmware(), but this doesn't
+ * fall back to usermode helper even if the firmware couldn't be loaded
+ * directly from fs. Hence it's useful for loading optional firmwares, which
+ * aren't always present, without extra long timeouts of udev.
+ **/
+int request_firmware_direct(const struct firmware **firmware_p,
+ const char *name, struct device *device)
+{
+ int ret;
+ __module_get(THIS_MODULE);
+ ret = _request_firmware(firmware_p, name, device, FW_OPT_UEVENT);
+ module_put(THIS_MODULE);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(request_firmware_direct);
+#endif
+
/**
* release_firmware: - release the resource associated with a firmware image
* @fw: firmware resource to release
@@ -1174,7 +1212,7 @@ struct firmware_work {
struct device *device;
void *context;
void (*cont)(const struct firmware *fw, void *context);
- bool uevent;
+ unsigned int opt_flags;
};
static void request_firmware_work_func(struct work_struct *work)
@@ -1185,7 +1223,7 @@ static void request_firmware_work_func(struct work_struct *work)
fw_work = container_of(work, struct firmware_work, work);
_request_firmware(&fw, fw_work->name, fw_work->device,
- fw_work->uevent, true);
+ fw_work->opt_flags);
fw_work->cont(fw, fw_work->context);
put_device(fw_work->device); /* taken in request_firmware_nowait() */
@@ -1233,7 +1271,8 @@ request_firmware_nowait(
fw_work->device = device;
fw_work->context = context;
fw_work->cont = cont;
- fw_work->uevent = uevent;
+ fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK |
+ (uevent ? FW_OPT_UEVENT : 0);
if (!try_module_get(module)) {
kfree(fw_work);
diff --git a/drivers/base/init.c b/drivers/base/init.c
index c16f0b808a17..da033d3bab3c 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -33,4 +33,5 @@ void __init driver_init(void)
platform_bus_init();
cpu_dev_init();
memory_dev_init();
+ container_dev_init();
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 3a94b799f166..bc78848dd59a 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -677,7 +677,17 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
- int len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
+ int len;
+
+ len = of_device_get_modalias(dev, buf, PAGE_SIZE -1);
+ if (len != -ENODEV)
+ return len;
+
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE -1);
+ if (len != -ENODEV)
+ return len;
+
+ len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
}
@@ -699,6 +709,10 @@ static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
if (rc != -ENODEV)
return rc;
+ rc = acpi_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
+
add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
pdev->name);
return 0;
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 9d8fde709390..e870bbe9ec4e 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -33,6 +33,21 @@ struct pm_clock_entry {
};
/**
+ * pm_clk_enable - Enable a clock, reporting any errors
+ * @dev: The device for the given clock
+ * @clk: The clock being enabled.
+ */
+static inline int __pm_clk_enable(struct device *dev, struct clk *clk)
+{
+ int ret = clk_enable(clk);
+ if (ret)
+ dev_err(dev, "%s: failed to enable clk %p, error %d\n",
+ __func__, clk, ret);
+
+ return ret;
+}
+
+/**
* pm_clk_acquire - Acquire a device clock.
* @dev: Device whose clock is to be acquired.
* @ce: PM clock entry corresponding to the clock.
@@ -43,6 +58,7 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
if (IS_ERR(ce->clk)) {
ce->status = PCE_STATUS_ERROR;
} else {
+ clk_prepare(ce->clk);
ce->status = PCE_STATUS_ACQUIRED;
dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
}
@@ -99,10 +115,12 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
if (ce->status < PCE_STATUS_ERROR) {
if (ce->status == PCE_STATUS_ENABLED)
- clk_disable_unprepare(ce->clk);
+ clk_disable(ce->clk);
- if (ce->status >= PCE_STATUS_ACQUIRED)
+ if (ce->status >= PCE_STATUS_ACQUIRED) {
+ clk_unprepare(ce->clk);
clk_put(ce->clk);
+ }
}
kfree(ce->con_id);
@@ -249,6 +267,7 @@ int pm_clk_resume(struct device *dev)
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -259,8 +278,9 @@ int pm_clk_resume(struct device *dev)
list_for_each_entry(ce, &psd->clock_list, node) {
if (ce->status < PCE_STATUS_ERROR) {
- clk_enable(ce->clk);
- ce->status = PCE_STATUS_ENABLED;
+ ret = __pm_clk_enable(dev, ce->clk);
+ if (!ret)
+ ce->status = PCE_STATUS_ENABLED;
}
}
@@ -376,7 +396,7 @@ int pm_clk_resume(struct device *dev)
spin_lock_irqsave(&psd->lock, flags);
list_for_each_entry(ce, &psd->clock_list, node)
- clk_enable(ce->clk);
+ __pm_clk_enable(dev, ce->clk);
spin_unlock_irqrestore(&psd->lock, flags);
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 5ee030a864f9..a2e55bfdf572 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -10,7 +10,7 @@
#include <linux/pm_runtime.h>
#include <linux/export.h>
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
/**
* pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
* @dev: Device to suspend.
@@ -48,7 +48,7 @@ int pm_generic_runtime_resume(struct device *dev)
return ret;
}
EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
/**
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 763c60d3d277..82692068d3cb 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -113,7 +113,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
* OR if there is masked interrupt which hasn't been Acked,
* it'll be ignored in irq handler, then may introduce irq storm
*/
- if (d->mask_buf[i] && d->chip->ack_base) {
+ if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
reg = d->chip->ack_base +
(i * map->reg_stride * d->irq_reg_stride);
ret = regmap_write(map, reg, d->mask_buf[i]);
@@ -271,7 +271,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
for (i = 0; i < data->chip->num_regs; i++) {
data->status_buf[i] &= ~data->mask_buf[i];
- if (data->status_buf[i] && chip->ack_base) {
+ if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
reg = chip->ack_base +
(i * map->reg_stride * data->irq_reg_stride);
ret = regmap_write(map, reg, data->status_buf[i]);
@@ -448,7 +448,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
goto err_alloc;
}
- if (d->status_buf[i] && chip->ack_base) {
+ if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
reg = chip->ack_base +
(i * map->reg_stride * d->irq_reg_stride);
ret = regmap_write(map, reg,
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index c2e002100949..6a19515f8a45 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1514,21 +1514,49 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
{
int ret = 0, i;
size_t val_bytes = map->format.val_bytes;
- void *wval;
- if (!map->bus)
- return -EINVAL;
- if (!map->format.parse_inplace)
+ if (map->bus && !map->format.parse_inplace)
return -EINVAL;
if (reg % map->reg_stride)
return -EINVAL;
map->lock(map->lock_arg);
+ /*
+ * Some devices don't support bulk write, for
+ * them we have a series of single write operations.
+ */
+ if (!map->bus || map->use_single_rw) {
+ for (i = 0; i < val_count; i++) {
+ unsigned int ival;
+
+ switch (val_bytes) {
+ case 1:
+ ival = *(u8 *)(val + (i * val_bytes));
+ break;
+ case 2:
+ ival = *(u16 *)(val + (i * val_bytes));
+ break;
+ case 4:
+ ival = *(u32 *)(val + (i * val_bytes));
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ ival = *(u64 *)(val + (i * val_bytes));
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
- /* No formatting is require if val_byte is 1 */
- if (val_bytes == 1) {
- wval = (void *)val;
+ ret = _regmap_write(map, reg + (i * map->reg_stride),
+ ival);
+ if (ret != 0)
+ goto out;
+ }
} else {
+ void *wval;
+
wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
if (!wval) {
ret = -ENOMEM;
@@ -1537,27 +1565,11 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
}
for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_inplace(wval + i);
- }
- /*
- * Some devices does not support bulk write, for
- * them we have a series of single write operations.
- */
- if (map->use_single_rw) {
- for (i = 0; i < val_count; i++) {
- ret = _regmap_raw_write(map,
- reg + (i * map->reg_stride),
- val + (i * val_bytes),
- val_bytes);
- if (ret != 0)
- goto out;
- }
- } else {
+
ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
- }
- if (val_bytes != 1)
kfree(wval);
-
+ }
out:
map->unlock(map->lock_arg);
return ret;
@@ -1897,14 +1909,10 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_bytes = map->format.val_bytes;
bool vol = regmap_volatile_range(map, reg, val_count);
- if (!map->bus)
- return -EINVAL;
- if (!map->format.parse_inplace)
- return -EINVAL;
if (reg % map->reg_stride)
return -EINVAL;
- if (vol || map->cache_type == REGCACHE_NONE) {
+ if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
/*
* Some devices does not support bulk read, for
* them we have a series of single read operations.
@@ -2173,6 +2181,10 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
int i, ret;
bool bypass;
+ if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
+ num_regs))
+ return 0;
+
map->lock(map->lock_arg);
bypass = map->cache_bypass;
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 7c081b38ef3e..0ee48be23837 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -75,6 +75,7 @@ config BCMA_DRIVER_GMAC_CMN
config BCMA_DRIVER_GPIO
bool "BCMA GPIO driver"
depends on BCMA && GPIOLIB
+ select IRQ_DOMAIN if BCMA_HOST_SOC
help
Driver to provide access to the GPIO pins of the bcma bus.
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 0215f9ad755c..09b632ad0fe2 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -33,8 +33,6 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
int bcma_bus_suspend(struct bcma_bus *bus);
int bcma_bus_resume(struct bcma_bus *bus);
#endif
-struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
- u8 unit);
/* scan.c */
int bcma_bus_scan(struct bcma_bus *bus);
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
index 4d07cce9c5d9..7e11ef4cb7db 100644
--- a/drivers/bcma/driver_chipcommon_sflash.c
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -38,7 +38,7 @@ static const struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
{ "M25P32", 0x15, 0x10000, 64, },
{ "M25P64", 0x16, 0x10000, 128, },
{ "M25FL128", 0x17, 0x10000, 256, },
- { 0 },
+ { NULL },
};
static const struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
@@ -56,7 +56,7 @@ static const struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
{ "SST25VF016", 0x41, 0x1000, 512, },
{ "SST25VF032", 0x4a, 0x1000, 1024, },
{ "SST25VF064", 0x4b, 0x1000, 2048, },
- { 0 },
+ { NULL },
};
static const struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
@@ -67,7 +67,7 @@ static const struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
{ "AT45DB161", 0x2c, 512, 4096, },
{ "AT45DB321", 0x34, 512, 8192, },
{ "AT45DB642", 0x3c, 1024, 8192, },
- { 0 },
+ { NULL },
};
static void bcma_sflash_cmd(struct bcma_drv_cc *cc, u32 opcode)
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 45f0996a3752..25f9887a35d0 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -9,6 +9,9 @@
*/
#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/export.h>
#include <linux/bcma/bcma.h>
@@ -73,19 +76,136 @@ static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio)
bcma_chipco_gpio_pullup(cc, 1 << gpio, 0);
}
+#if IS_BUILTIN(CONFIG_BCMA_HOST_SOC)
static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
{
struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
- return bcma_core_irq(cc->core);
+ return irq_find_mapping(cc->irq_domain, gpio);
else
return -EINVAL;
}
+static void bcma_gpio_irq_unmask(struct irq_data *d)
+{
+ struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
+ int gpio = irqd_to_hwirq(d);
+ u32 val = bcma_chipco_gpio_in(cc, BIT(gpio));
+
+ bcma_chipco_gpio_polarity(cc, BIT(gpio), val);
+ bcma_chipco_gpio_intmask(cc, BIT(gpio), BIT(gpio));
+}
+
+static void bcma_gpio_irq_mask(struct irq_data *d)
+{
+ struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
+ int gpio = irqd_to_hwirq(d);
+
+ bcma_chipco_gpio_intmask(cc, BIT(gpio), 0);
+}
+
+static struct irq_chip bcma_gpio_irq_chip = {
+ .name = "BCMA-GPIO",
+ .irq_mask = bcma_gpio_irq_mask,
+ .irq_unmask = bcma_gpio_irq_unmask,
+};
+
+static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
+{
+ struct bcma_drv_cc *cc = dev_id;
+ u32 val = bcma_cc_read32(cc, BCMA_CC_GPIOIN);
+ u32 mask = bcma_cc_read32(cc, BCMA_CC_GPIOIRQ);
+ u32 pol = bcma_cc_read32(cc, BCMA_CC_GPIOPOL);
+ unsigned long irqs = (val ^ pol) & mask;
+ int gpio;
+
+ if (!irqs)
+ return IRQ_NONE;
+
+ for_each_set_bit(gpio, &irqs, cc->gpio.ngpio)
+ generic_handle_irq(bcma_gpio_to_irq(&cc->gpio, gpio));
+ bcma_chipco_gpio_polarity(cc, irqs, val & irqs);
+
+ return IRQ_HANDLED;
+}
+
+static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
+{
+ struct gpio_chip *chip = &cc->gpio;
+ int gpio, hwirq, err;
+
+ if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
+ return 0;
+
+ cc->irq_domain = irq_domain_add_linear(NULL, chip->ngpio,
+ &irq_domain_simple_ops, cc);
+ if (!cc->irq_domain) {
+ err = -ENODEV;
+ goto err_irq_domain;
+ }
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ int irq = irq_create_mapping(cc->irq_domain, gpio);
+
+ irq_set_chip_data(irq, cc);
+ irq_set_chip_and_handler(irq, &bcma_gpio_irq_chip,
+ handle_simple_irq);
+ }
+
+ hwirq = bcma_core_irq(cc->core);
+ err = request_irq(hwirq, bcma_gpio_irq_handler, IRQF_SHARED, "gpio",
+ cc);
+ if (err)
+ goto err_req_irq;
+
+ bcma_chipco_gpio_intmask(cc, ~0, 0);
+ bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO);
+
+ return 0;
+
+err_req_irq:
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ int irq = irq_find_mapping(cc->irq_domain, gpio);
+
+ irq_dispose_mapping(irq);
+ }
+ irq_domain_remove(cc->irq_domain);
+err_irq_domain:
+ return err;
+}
+
+static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
+{
+ struct gpio_chip *chip = &cc->gpio;
+ int gpio;
+
+ if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
+ return;
+
+ bcma_cc_mask32(cc, BCMA_CC_IRQMASK, ~BCMA_CC_IRQ_GPIO);
+ free_irq(bcma_core_irq(cc->core), cc);
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ int irq = irq_find_mapping(cc->irq_domain, gpio);
+
+ irq_dispose_mapping(irq);
+ }
+ irq_domain_remove(cc->irq_domain);
+}
+#else
+static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
+{
+ return 0;
+}
+
+static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
+{
+}
+#endif
+
int bcma_gpio_init(struct bcma_drv_cc *cc)
{
struct gpio_chip *chip = &cc->gpio;
+ int err;
chip->label = "bcma_gpio";
chip->owner = THIS_MODULE;
@@ -95,7 +215,9 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->set = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
+#if IS_BUILTIN(CONFIG_BCMA_HOST_SOC)
chip->to_irq = bcma_gpio_to_irq;
+#endif
chip->ngpio = 16;
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
@@ -105,10 +227,21 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
else
chip->base = -1;
- return gpiochip_add(chip);
+ err = bcma_gpio_irq_domain_init(cc);
+ if (err)
+ return err;
+
+ err = gpiochip_add(chip);
+ if (err) {
+ bcma_gpio_irq_domain_exit(cc);
+ return err;
+ }
+
+ return 0;
}
int bcma_gpio_unregister(struct bcma_drv_cc *cc)
{
+ bcma_gpio_irq_domain_exit(cc);
return gpiochip_remove(&cc->gpio);
}
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 6fb98b53533f..e333305363aa 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -238,7 +238,6 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
pci_release_regions(dev);
pci_disable_device(dev);
kfree(bus);
- pci_set_drvdata(dev, NULL);
}
#ifdef CONFIG_PM_SLEEP
@@ -270,7 +269,7 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
#endif /* CONFIG_PM_SLEEP */
-static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
+static const struct pci_device_id bcma_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index e15430a82e90..34ea4c588d36 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -78,18 +78,6 @@ static u16 bcma_cc_core_id(struct bcma_bus *bus)
return BCMA_CORE_CHIPCOMMON;
}
-struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
-{
- struct bcma_device *core;
-
- list_for_each_entry(core, &bus->cores, list) {
- if (core->id.id == coreid)
- return core;
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(bcma_find_core);
-
struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
u8 unit)
{
@@ -101,6 +89,7 @@ struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
}
return NULL;
}
+EXPORT_SYMBOL_GPL(bcma_find_core_unit);
bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
int timeout)
@@ -176,6 +165,7 @@ static int bcma_register_cores(struct bcma_bus *bus)
bcma_err(bus,
"Could not register dev for core 0x%03X\n",
core->id.id);
+ put_device(&core->dev);
continue;
}
core->dev_registered = true;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 86b9f37d102e..014a1cfc41c5 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -108,6 +108,8 @@ source "drivers/block/paride/Kconfig"
source "drivers/block/mtip32xx/Kconfig"
+source "drivers/block/zram/Kconfig"
+
config BLK_CPQ_DA
tristate "Compaq SMART2 support"
depends on PCI && VIRT_TO_BUS && 0
@@ -368,7 +370,8 @@ config BLK_DEV_RAM
For details, read <file:Documentation/blockdev/ramdisk.txt>.
To compile this driver as a module, choose M here: the
- module will be called rd.
+ module will be called brd. An alias "rd" has been defined
+ for historical reasons.
Most normal users won't need the RAM disk functionality, and can
thus say N here.
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 816d979c3266..02b688d1438d 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
+obj-$(CONFIG_ZRAM) += zram/
nvme-y := nvme-core.o nvme-scsi.o
skd-y := skd_main.o
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 14a9d1912318..9220f8e833d0 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -100,11 +100,8 @@ enum {
struct buf {
ulong nframesout;
- ulong resid;
- ulong bv_resid;
- sector_t sector;
struct bio *bio;
- struct bio_vec *bv;
+ struct bvec_iter iter;
struct request *rq;
};
@@ -120,13 +117,10 @@ struct frame {
ulong waited;
ulong waited_total;
struct aoetgt *t; /* parent target I belong to */
- sector_t lba;
struct sk_buff *skb; /* command skb freed on module exit */
struct sk_buff *r_skb; /* response skb for async processing */
struct buf *buf;
- struct bio_vec *bv;
- ulong bcnt;
- ulong bv_off;
+ struct bvec_iter iter;
char flags;
};
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d2515435e23f..8184451b57c0 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -196,8 +196,7 @@ aoe_freetframe(struct frame *f)
t = f->t;
f->buf = NULL;
- f->lba = 0;
- f->bv = NULL;
+ memset(&f->iter, 0, sizeof(f->iter));
f->r_skb = NULL;
f->flags = 0;
list_add(&f->head, &t->ffree);
@@ -295,21 +294,14 @@ newframe(struct aoedev *d)
}
static void
-skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt)
+skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
{
int frag = 0;
- ulong fcnt;
-loop:
- fcnt = bv->bv_len - (off - bv->bv_offset);
- if (fcnt > cnt)
- fcnt = cnt;
- skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt);
- cnt -= fcnt;
- if (cnt <= 0)
- return;
- bv++;
- off = bv->bv_offset;
- goto loop;
+ struct bio_vec bv;
+
+ __bio_for_each_segment(bv, bio, iter, iter)
+ skb_fill_page_desc(skb, frag++, bv.bv_page,
+ bv.bv_offset, bv.bv_len);
}
static void
@@ -346,12 +338,10 @@ ata_rw_frameinit(struct frame *f)
t->nout++;
f->waited = 0;
f->waited_total = 0;
- if (f->buf)
- f->lba = f->buf->sector;
/* set up ata header */
- ah->scnt = f->bcnt >> 9;
- put_lba(ah, f->lba);
+ ah->scnt = f->iter.bi_size >> 9;
+ put_lba(ah, f->iter.bi_sector);
if (t->d->flags & DEVFL_EXT) {
ah->aflags |= AOEAFL_EXT;
} else {
@@ -360,11 +350,11 @@ ata_rw_frameinit(struct frame *f)
ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
}
if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
- skb_fillup(skb, f->bv, f->bv_off, f->bcnt);
+ skb_fillup(skb, f->buf->bio, f->iter);
ah->aflags |= AOEAFL_WRITE;
- skb->len += f->bcnt;
- skb->data_len = f->bcnt;
- skb->truesize += f->bcnt;
+ skb->len += f->iter.bi_size;
+ skb->data_len = f->iter.bi_size;
+ skb->truesize += f->iter.bi_size;
t->wpkts++;
} else {
t->rpkts++;
@@ -382,7 +372,6 @@ aoecmd_ata_rw(struct aoedev *d)
struct buf *buf;
struct sk_buff *skb;
struct sk_buff_head queue;
- ulong bcnt, fbcnt;
buf = nextbuf(d);
if (buf == NULL)
@@ -390,39 +379,22 @@ aoecmd_ata_rw(struct aoedev *d)
f = newframe(d);
if (f == NULL)
return 0;
- bcnt = d->maxbcnt;
- if (bcnt == 0)
- bcnt = DEFAULTBCNT;
- if (bcnt > buf->resid)
- bcnt = buf->resid;
- fbcnt = bcnt;
- f->bv = buf->bv;
- f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid);
- do {
- if (fbcnt < buf->bv_resid) {
- buf->bv_resid -= fbcnt;
- buf->resid -= fbcnt;
- break;
- }
- fbcnt -= buf->bv_resid;
- buf->resid -= buf->bv_resid;
- if (buf->resid == 0) {
- d->ip.buf = NULL;
- break;
- }
- buf->bv++;
- buf->bv_resid = buf->bv->bv_len;
- WARN_ON(buf->bv_resid == 0);
- } while (fbcnt);
/* initialize the headers & frame */
f->buf = buf;
- f->bcnt = bcnt;
- ata_rw_frameinit(f);
+ f->iter = buf->iter;
+ f->iter.bi_size = min_t(unsigned long,
+ d->maxbcnt ?: DEFAULTBCNT,
+ f->iter.bi_size);
+ bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
+
+ if (!buf->iter.bi_size)
+ d->ip.buf = NULL;
/* mark all tracking fields and load out */
buf->nframesout += 1;
- buf->sector += bcnt >> 9;
+
+ ata_rw_frameinit(f);
skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
@@ -613,10 +585,7 @@ reassign_frame(struct frame *f)
skb = nf->skb;
nf->skb = f->skb;
nf->buf = f->buf;
- nf->bcnt = f->bcnt;
- nf->lba = f->lba;
- nf->bv = f->bv;
- nf->bv_off = f->bv_off;
+ nf->iter = f->iter;
nf->waited = 0;
nf->waited_total = f->waited_total;
nf->sent = f->sent;
@@ -648,19 +617,19 @@ probe(struct aoetgt *t)
}
f->flags |= FFL_PROBE;
ifrotate(t);
- f->bcnt = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
+ f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
ata_rw_frameinit(f);
skb = f->skb;
- for (frag = 0, n = f->bcnt; n > 0; ++frag, n -= m) {
+ for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) {
if (n < PAGE_SIZE)
m = n;
else
m = PAGE_SIZE;
skb_fill_page_desc(skb, frag, empty_page, 0, m);
}
- skb->len += f->bcnt;
- skb->data_len = f->bcnt;
- skb->truesize += f->bcnt;
+ skb->len += f->iter.bi_size;
+ skb->data_len = f->iter.bi_size;
+ skb->truesize += f->iter.bi_size;
skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
@@ -897,15 +866,15 @@ rqbiocnt(struct request *r)
static void
bio_pageinc(struct bio *bio)
{
- struct bio_vec *bv;
+ struct bio_vec bv;
struct page *page;
- int i;
+ struct bvec_iter iter;
- bio_for_each_segment(bv, bio, i) {
+ bio_for_each_segment(bv, bio, iter) {
/* Non-zero page count for non-head members of
* compound pages is no longer allowed by the kernel.
*/
- page = compound_trans_head(bv->bv_page);
+ page = compound_trans_head(bv.bv_page);
atomic_inc(&page->_count);
}
}
@@ -913,12 +882,12 @@ bio_pageinc(struct bio *bio)
static void
bio_pagedec(struct bio *bio)
{
- struct bio_vec *bv;
struct page *page;
- int i;
+ struct bio_vec bv;
+ struct bvec_iter iter;
- bio_for_each_segment(bv, bio, i) {
- page = compound_trans_head(bv->bv_page);
+ bio_for_each_segment(bv, bio, iter) {
+ page = compound_trans_head(bv.bv_page);
atomic_dec(&page->_count);
}
}
@@ -929,12 +898,8 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
memset(buf, 0, sizeof(*buf));
buf->rq = rq;
buf->bio = bio;
- buf->resid = bio->bi_size;
- buf->sector = bio->bi_sector;
+ buf->iter = bio->bi_iter;
bio_pageinc(bio);
- buf->bv = bio_iovec(bio);
- buf->bv_resid = buf->bv->bv_len;
- WARN_ON(buf->bv_resid == 0);
}
static struct buf *
@@ -1119,24 +1084,18 @@ gettgt(struct aoedev *d, char *addr)
}
static void
-bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt)
+bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
{
- ulong fcnt;
- char *p;
int soff = 0;
-loop:
- fcnt = bv->bv_len - (off - bv->bv_offset);
- if (fcnt > cnt)
- fcnt = cnt;
- p = page_address(bv->bv_page) + off;
- skb_copy_bits(skb, soff, p, fcnt);
- soff += fcnt;
- cnt -= fcnt;
- if (cnt <= 0)
- return;
- bv++;
- off = bv->bv_offset;
- goto loop;
+ struct bio_vec bv;
+
+ iter.bi_size = cnt;
+
+ __bio_for_each_segment(bv, bio, iter, iter) {
+ char *p = page_address(bv.bv_page) + bv.bv_offset;
+ skb_copy_bits(skb, soff, p, bv.bv_len);
+ soff += bv.bv_len;
+ }
}
void
@@ -1152,7 +1111,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
do {
bio = rq->bio;
bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
- } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size));
+ } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
/* cf. http://lkml.org/lkml/2006/10/31/28 */
if (!fastfail)
@@ -1229,7 +1188,15 @@ noskb: if (buf)
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
break;
}
- bvcpy(f->bv, f->bv_off, skb, n);
+ if (n > f->iter.bi_size) {
+ pr_err_ratelimited("%s e%ld.%d. bytes=%ld need=%u\n",
+ "aoe: too-large data size in read from",
+ (long) d->aoemajor, d->aoeminor,
+ n, f->iter.bi_size);
+ clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
+ break;
+ }
+ bvcpy(skb, f->buf->bio, f->iter, n);
case ATA_CMD_PIO_WRITE:
case ATA_CMD_PIO_WRITE_EXT:
spin_lock_irq(&d->lock);
@@ -1272,7 +1239,7 @@ out:
aoe_freetframe(f);
- if (buf && --buf->nframesout == 0 && buf->resid == 0)
+ if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0)
aoe_end_buf(d, buf);
spin_unlock_irq(&d->lock);
@@ -1727,7 +1694,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
{
if (buf == NULL)
return;
- buf->resid = 0;
+ buf->iter.bi_size = 0;
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
if (buf->nframesout == 0)
aoe_end_buf(d, buf);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index d91f1a56e861..e73b85cf0756 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -328,18 +328,18 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
struct block_device *bdev = bio->bi_bdev;
struct brd_device *brd = bdev->bd_disk->private_data;
int rw;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
sector_t sector;
- int i;
+ struct bvec_iter iter;
int err = -EIO;
- sector = bio->bi_sector;
+ sector = bio->bi_iter.bi_sector;
if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
goto out;
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
err = 0;
- discard_from_brd(brd, sector, bio->bi_size);
+ discard_from_brd(brd, sector, bio->bi_iter.bi_size);
goto out;
}
@@ -347,10 +347,10 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
if (rw == READA)
rw = READ;
- bio_for_each_segment(bvec, bio, i) {
- unsigned int len = bvec->bv_len;
- err = brd_do_bvec(brd, bvec->bv_page, len,
- bvec->bv_offset, rw, sector);
+ bio_for_each_segment(bvec, bio, iter) {
+ unsigned int len = bvec.bv_len;
+ err = brd_do_bvec(brd, bvec.bv_page, len,
+ bvec.bv_offset, rw, sector);
if (err)
break;
sector += len >> SECTOR_SHIFT;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index b35fc4f5237c..036e8ab86c71 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -5004,7 +5004,7 @@ reinit_after_soft_reset:
i = alloc_cciss_hba(pdev);
if (i < 0)
- return -1;
+ return -ENOMEM;
h = hba[i];
h->pdev = pdev;
@@ -5205,7 +5205,7 @@ clean_no_release_regions:
*/
pci_set_drvdata(pdev, NULL);
free_hba(h);
- return -1;
+ return -ENODEV;
}
static void cciss_shutdown(struct pci_dev *pdev)
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 28c73ca320a8..a9b13f2cc420 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -159,7 +159,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
bio = bio_alloc_drbd(GFP_NOIO);
bio->bi_bdev = bdev->md_bdev;
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
err = -EIO;
if (bio_add_page(bio, page, size, 0) != size)
goto out;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index b12c11ec4bd2..597f111df67b 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1028,7 +1028,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
} else
page = b->bm_pages[page_nr];
bio->bi_bdev = mdev->ldev->md_bdev;
- bio->bi_sector = on_disk_sector;
+ bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */
bio_add_page(bio, page, len, 0);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 9e3818b1bc83..929468e1512a 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1537,15 +1537,17 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
{
- struct bio_vec *bvec;
- int i;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+
/* hint all but last page with MSG_MORE */
- bio_for_each_segment(bvec, bio, i) {
+ bio_for_each_segment(bvec, bio, iter) {
int err;
- err = _drbd_no_send_page(mdev, bvec->bv_page,
- bvec->bv_offset, bvec->bv_len,
- i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+ err = _drbd_no_send_page(mdev, bvec.bv_page,
+ bvec.bv_offset, bvec.bv_len,
+ bio_iter_last(bvec, iter)
+ ? 0 : MSG_MORE);
if (err)
return err;
}
@@ -1554,15 +1556,16 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
{
- struct bio_vec *bvec;
- int i;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+
/* hint all but last page with MSG_MORE */
- bio_for_each_segment(bvec, bio, i) {
+ bio_for_each_segment(bvec, bio, iter) {
int err;
- err = _drbd_send_page(mdev, bvec->bv_page,
- bvec->bv_offset, bvec->bv_len,
- i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+ err = _drbd_send_page(mdev, bvec.bv_page,
+ bvec.bv_offset, bvec.bv_len,
+ bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
if (err)
return err;
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 6fa6673b36b3..d073305ffd5e 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1333,7 +1333,7 @@ next_bio:
goto fail;
}
/* > peer_req->i.sector, unless this is the first bio */
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio->bi_bdev = mdev->ldev->backing_bdev;
bio->bi_rw = rw;
bio->bi_private = peer_req;
@@ -1353,7 +1353,7 @@ next_bio:
dev_err(DEV,
"bio_add_page failed for len=%u, "
"bi_vcnt=0 (bi_sector=%llu)\n",
- len, (unsigned long long)bio->bi_sector);
+ len, (uint64_t)bio->bi_iter.bi_sector);
err = -ENOSPC;
goto fail;
}
@@ -1595,9 +1595,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
sector_t sector, int data_size)
{
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
struct bio *bio;
- int dgs, err, i, expect;
+ int dgs, err, expect;
void *dig_in = mdev->tconn->int_dig_in;
void *dig_vv = mdev->tconn->int_dig_vv;
@@ -1615,13 +1616,13 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
mdev->recv_cnt += data_size>>9;
bio = req->master_bio;
- D_ASSERT(sector == bio->bi_sector);
+ D_ASSERT(sector == bio->bi_iter.bi_sector);
- bio_for_each_segment(bvec, bio, i) {
- void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
- expect = min_t(int, data_size, bvec->bv_len);
+ bio_for_each_segment(bvec, bio, iter) {
+ void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
+ expect = min_t(int, data_size, bvec.bv_len);
err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
- kunmap(bvec->bv_page);
+ kunmap(bvec.bv_page);
if (err)
return err;
data_size -= expect;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index fec7bef44994..104a040f24de 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -77,8 +77,8 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
req->epoch = 0;
drbd_clear_interval(&req->i);
- req->i.sector = bio_src->bi_sector;
- req->i.size = bio_src->bi_size;
+ req->i.sector = bio_src->bi_iter.bi_sector;
+ req->i.size = bio_src->bi_iter.bi_size;
req->i.local = true;
req->i.waiting = false;
@@ -1280,7 +1280,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
/*
* what we "blindly" assume:
*/
- D_ASSERT(IS_ALIGNED(bio->bi_size, 512));
+ D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512));
inc_ap_bio(mdev);
__drbd_make_request(mdev, bio, start_time);
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 978cb1addc98..28e15d91197a 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -269,7 +269,7 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi
/* Short lived temporary struct on the stack.
* We could squirrel the error to be returned into
- * bio->bi_size, or similar. But that would be too ugly. */
+ * bio->bi_iter.bi_size, or similar. But that would be too ugly. */
struct bio_and_error {
struct bio *bio;
int error;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 891c0ecaa292..84d3175d493a 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -313,8 +313,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
{
struct hash_desc desc;
struct scatterlist sg;
- struct bio_vec *bvec;
- int i;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
desc.tfm = tfm;
desc.flags = 0;
@@ -322,8 +322,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
sg_init_table(&sg, 1);
crypto_hash_init(&desc);
- bio_for_each_segment(bvec, bio, i) {
- sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
+ bio_for_each_segment(bvec, bio, iter) {
+ sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
crypto_hash_update(&desc, &sg, sg.length);
}
crypto_hash_final(&desc, digest);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 000abe2f105c..2023043ce7c0 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2351,7 +2351,7 @@ static void rw_interrupt(void)
/* Compute maximal contiguous buffer size. */
static int buffer_chain_size(void)
{
- struct bio_vec *bv;
+ struct bio_vec bv;
int size;
struct req_iterator iter;
char *base;
@@ -2360,10 +2360,10 @@ static int buffer_chain_size(void)
size = 0;
rq_for_each_segment(bv, current_req, iter) {
- if (page_address(bv->bv_page) + bv->bv_offset != base + size)
+ if (page_address(bv.bv_page) + bv.bv_offset != base + size)
break;
- size += bv->bv_len;
+ size += bv.bv_len;
}
return size >> 9;
@@ -2389,7 +2389,7 @@ static int transfer_size(int ssize, int max_sector, int max_size)
static void copy_buffer(int ssize, int max_sector, int max_sector_2)
{
int remaining; /* number of transferred 512-byte sectors */
- struct bio_vec *bv;
+ struct bio_vec bv;
char *buffer;
char *dma_buffer;
int size;
@@ -2427,10 +2427,10 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
if (!remaining)
break;
- size = bv->bv_len;
+ size = bv.bv_len;
SUPBOUND(size, remaining);
- buffer = page_address(bv->bv_page) + bv->bv_offset;
+ buffer = page_address(bv.bv_page) + bv.bv_offset;
if (dma_buffer + size >
floppy_track_buffer + (max_buffer_sectors << 10) ||
dma_buffer < floppy_track_buffer) {
@@ -3691,9 +3691,12 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (!(mode & FMODE_NDELAY)) {
if (mode & (FMODE_READ|FMODE_WRITE)) {
UDRS->last_checked = 0;
+ clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
check_disk_change(bdev);
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
goto out;
+ if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+ goto out;
}
res = -EROFS;
if ((mode & FMODE_WRITE) &&
@@ -3746,17 +3749,29 @@ static unsigned int floppy_check_events(struct gendisk *disk,
* a disk in the drive, and whether that disk is writable.
*/
-static void floppy_rb0_complete(struct bio *bio, int err)
+struct rb0_cbdata {
+ int drive;
+ struct completion complete;
+};
+
+static void floppy_rb0_cb(struct bio *bio, int err)
{
- complete((struct completion *)bio->bi_private);
+ struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
+ int drive = cbdata->drive;
+
+ if (err) {
+ pr_info("floppy: error %d while reading block 0", err);
+ set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+ }
+ complete(&cbdata->complete);
}
-static int __floppy_read_block_0(struct block_device *bdev)
+static int __floppy_read_block_0(struct block_device *bdev, int drive)
{
struct bio bio;
struct bio_vec bio_vec;
- struct completion complete;
struct page *page;
+ struct rb0_cbdata cbdata;
size_t size;
page = alloc_page(GFP_NOIO);
@@ -3769,23 +3784,26 @@ static int __floppy_read_block_0(struct block_device *bdev)
if (!size)
size = 1024;
+ cbdata.drive = drive;
+
bio_init(&bio);
bio.bi_io_vec = &bio_vec;
bio_vec.bv_page = page;
bio_vec.bv_len = size;
bio_vec.bv_offset = 0;
bio.bi_vcnt = 1;
- bio.bi_size = size;
+ bio.bi_iter.bi_size = size;
bio.bi_bdev = bdev;
- bio.bi_sector = 0;
+ bio.bi_iter.bi_sector = 0;
bio.bi_flags = (1 << BIO_QUIET);
- init_completion(&complete);
- bio.bi_private = &complete;
- bio.bi_end_io = floppy_rb0_complete;
+ bio.bi_private = &cbdata;
+ bio.bi_end_io = floppy_rb0_cb;
submit_bio(READ, &bio);
process_fd_request();
- wait_for_completion(&complete);
+
+ init_completion(&cbdata.complete);
+ wait_for_completion(&cbdata.complete);
__free_page(page);
@@ -3827,7 +3845,7 @@ static int floppy_revalidate(struct gendisk *disk)
UDRS->generation++;
if (drive_no_geom(drive)) {
/* auto-sensing */
- res = __floppy_read_block_0(opened_bdev[drive]);
+ res = __floppy_read_block_0(opened_bdev[drive], drive);
} else {
if (cf)
poll_drive(false, FD_RAW_NEED_DISK);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c8dac7305244..66e8c3b94ef3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -288,9 +288,10 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
{
int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
struct page *page);
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
struct page *page = NULL;
- int i, ret = 0;
+ int ret = 0;
if (lo->transfer != transfer_none) {
page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
@@ -302,11 +303,11 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
do_lo_send = do_lo_send_direct_write;
}
- bio_for_each_segment(bvec, bio, i) {
- ret = do_lo_send(lo, bvec, pos, page);
+ bio_for_each_segment(bvec, bio, iter) {
+ ret = do_lo_send(lo, &bvec, pos, page);
if (ret < 0)
break;
- pos += bvec->bv_len;
+ pos += bvec.bv_len;
}
if (page) {
kunmap(page);
@@ -392,20 +393,20 @@ do_lo_receive(struct loop_device *lo,
static int
lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
{
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
ssize_t s;
- int i;
- bio_for_each_segment(bvec, bio, i) {
- s = do_lo_receive(lo, bvec, bsize, pos);
+ bio_for_each_segment(bvec, bio, iter) {
+ s = do_lo_receive(lo, &bvec, bsize, pos);
if (s < 0)
return s;
- if (s != bvec->bv_len) {
+ if (s != bvec.bv_len) {
zero_fill_bio(bio);
break;
}
- pos += bvec->bv_len;
+ pos += bvec.bv_len;
}
return 0;
}
@@ -415,7 +416,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
loff_t pos;
int ret;
- pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
+ pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
if (bio_rw(bio) == WRITE) {
struct file *file = lo->lo_backing_file;
@@ -444,7 +445,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
goto out;
}
ret = file->f_op->fallocate(file, mode, pos,
- bio->bi_size);
+ bio->bi_iter.bi_size);
if (unlikely(ret && ret != -EINVAL &&
ret != -EOPNOTSUPP))
ret = -EIO;
@@ -798,7 +799,7 @@ static void loop_config_discard(struct loop_device *lo)
/*
* We use punch hole to reclaim the free space used by the
- * image a.k.a. discard. However we do support discard if
+ * image a.k.a. discard. However we do not support discard if
* encryption is enabled, because it may give an attacker
* useful information.
*/
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 7bc363f1ee82..eb59b1241366 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -915,7 +915,7 @@ static int mg_probe(struct platform_device *plat_dev)
/* disk reset */
if (prv_data->dev_attr == MG_STORAGE_DEV) {
- /* If POR seq. not yet finised, wait */
+ /* If POR seq. not yet finished, wait */
err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT);
if (err)
goto probe_err_3b;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 050c71267f14..516026954be6 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -41,10 +41,31 @@
#include "mtip32xx.h"
#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
-#define HW_CMD_TBL_SZ (AHCI_CMD_TBL_HDR_SZ + (MTIP_MAX_SG * 16))
-#define HW_CMD_TBL_AR_SZ (HW_CMD_TBL_SZ * MTIP_MAX_COMMAND_SLOTS)
-#define HW_PORT_PRIV_DMA_SZ \
- (HW_CMD_SLOT_SZ + HW_CMD_TBL_AR_SZ + AHCI_RX_FIS_SZ)
+
+/* DMA region containing RX Fis, Identify, RLE10, and SMART buffers */
+#define AHCI_RX_FIS_SZ 0x100
+#define AHCI_RX_FIS_OFFSET 0x0
+#define AHCI_IDFY_SZ ATA_SECT_SIZE
+#define AHCI_IDFY_OFFSET 0x400
+#define AHCI_SECTBUF_SZ ATA_SECT_SIZE
+#define AHCI_SECTBUF_OFFSET 0x800
+#define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
+#define AHCI_SMARTBUF_OFFSET 0xC00
+/* 0x100 + 0x200 + 0x200 + 0x200 is smaller than 4k but we pad it out */
+#define BLOCK_DMA_ALLOC_SZ 4096
+
+/* DMA region containing command table (should be 8192 bytes) */
+#define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
+#define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
+#define AHCI_CMD_TBL_OFFSET 0x0
+
+/* DMA region per command (contains header and SGL) */
+#define AHCI_CMD_TBL_HDR_SZ 0x80
+#define AHCI_CMD_TBL_HDR_OFFSET 0x0
+#define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
+#define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
+#define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
+
#define HOST_CAP_NZDMA (1 << 19)
#define HOST_HSORG 0xFC
@@ -899,8 +920,9 @@ static void mtip_handle_tfe(struct driver_data *dd)
fail_reason = "thermal shutdown";
}
if (buf[288] == 0xBF) {
+ set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
dev_info(&dd->pdev->dev,
- "Drive indicates rebuild has failed.\n");
+ "Drive indicates rebuild has failed. Secure erase required.\n");
fail_all_ncq_cmds = 1;
fail_reason = "rebuild failed";
}
@@ -1566,6 +1588,12 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
}
#endif
+ /* Check security locked state */
+ if (port->identify[128] & 0x4)
+ set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
+ else
+ clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
+
#ifdef MTIP_TRIM /* Disabling TRIM support temporarily */
/* Demux ID.DRAT & ID.RZAT to determine trim support */
if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5))
@@ -1887,6 +1915,10 @@ static void mtip_dump_identify(struct mtip_port *port)
strlcpy(cbuf, (char *)(port->identify+27), 41);
dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
+ dev_info(&port->dd->pdev->dev, "Security: %04x %s\n",
+ port->identify[128],
+ port->identify[128] & 0x4 ? "(LOCKED)" : "");
+
if (mtip_hw_get_capacity(port->dd, &sectors))
dev_info(&port->dd->pdev->dev,
"Capacity: %llu sectors (%llu MB)\n",
@@ -3313,6 +3345,118 @@ st_out:
}
/*
+ * DMA region teardown
+ *
+ * @dd Pointer to driver_data structure
+ *
+ * return value
+ * None
+ */
+static void mtip_dma_free(struct driver_data *dd)
+{
+ int i;
+ struct mtip_port *port = dd->port;
+
+ if (port->block1)
+ dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
+ port->block1, port->block1_dma);
+
+ if (port->command_list) {
+ dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
+ port->command_list, port->command_list_dma);
+ }
+
+ for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) {
+ if (port->commands[i].command)
+ dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
+ port->commands[i].command,
+ port->commands[i].command_dma);
+ }
+}
+
+/*
+ * DMA region setup
+ *
+ * @dd Pointer to driver_data structure
+ *
+ * return value
+ * -ENOMEM Not enough free DMA region space to initialize driver
+ */
+static int mtip_dma_alloc(struct driver_data *dd)
+{
+ struct mtip_port *port = dd->port;
+ int i, rv = 0;
+ u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
+
+ /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */
+ port->block1 =
+ dmam_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
+ &port->block1_dma, GFP_KERNEL);
+ if (!port->block1)
+ return -ENOMEM;
+ memset(port->block1, 0, BLOCK_DMA_ALLOC_SZ);
+
+ /* Allocate dma memory for command list */
+ port->command_list =
+ dmam_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
+ &port->command_list_dma, GFP_KERNEL);
+ if (!port->command_list) {
+ dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
+ port->block1, port->block1_dma);
+ port->block1 = NULL;
+ port->block1_dma = 0;
+ return -ENOMEM;
+ }
+ memset(port->command_list, 0, AHCI_CMD_TBL_SZ);
+
+ /* Setup all pointers into first DMA region */
+ port->rxfis = port->block1 + AHCI_RX_FIS_OFFSET;
+ port->rxfis_dma = port->block1_dma + AHCI_RX_FIS_OFFSET;
+ port->identify = port->block1 + AHCI_IDFY_OFFSET;
+ port->identify_dma = port->block1_dma + AHCI_IDFY_OFFSET;
+ port->log_buf = port->block1 + AHCI_SECTBUF_OFFSET;
+ port->log_buf_dma = port->block1_dma + AHCI_SECTBUF_OFFSET;
+ port->smart_buf = port->block1 + AHCI_SMARTBUF_OFFSET;
+ port->smart_buf_dma = port->block1_dma + AHCI_SMARTBUF_OFFSET;
+
+ /* Setup per command SGL DMA region */
+
+ /* Point the command headers at the command tables */
+ for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) {
+ port->commands[i].command =
+ dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
+ &port->commands[i].command_dma, GFP_KERNEL);
+ if (!port->commands[i].command) {
+ rv = -ENOMEM;
+ mtip_dma_free(dd);
+ return rv;
+ }
+ memset(port->commands[i].command, 0, CMD_DMA_ALLOC_SZ);
+
+ port->commands[i].command_header = port->command_list +
+ (sizeof(struct mtip_cmd_hdr) * i);
+ port->commands[i].command_header_dma =
+ dd->port->command_list_dma +
+ (sizeof(struct mtip_cmd_hdr) * i);
+
+ if (host_cap_64)
+ port->commands[i].command_header->ctbau =
+ __force_bit2int cpu_to_le32(
+ (port->commands[i].command_dma >> 16) >> 16);
+
+ port->commands[i].command_header->ctba =
+ __force_bit2int cpu_to_le32(
+ port->commands[i].command_dma & 0xFFFFFFFF);
+
+ sg_init_table(port->commands[i].sg, MTIP_MAX_SG);
+
+ /* Mark command as currently inactive */
+ atomic_set(&dd->port->commands[i].active, 0);
+ }
+ return 0;
+}
+
+/*
* Called once for each card.
*
* @dd Pointer to the driver data structure.
@@ -3370,83 +3514,10 @@ static int mtip_hw_init(struct driver_data *dd)
dd->port->mmio = dd->mmio + PORT_OFFSET;
dd->port->dd = dd;
- /* Allocate memory for the command list. */
- dd->port->command_list =
- dmam_alloc_coherent(&dd->pdev->dev,
- HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
- &dd->port->command_list_dma,
- GFP_KERNEL);
- if (!dd->port->command_list) {
- dev_err(&dd->pdev->dev,
- "Memory allocation: command list\n");
- rv = -ENOMEM;
+ /* DMA allocations */
+ rv = mtip_dma_alloc(dd);
+ if (rv < 0)
goto out1;
- }
-
- /* Clear the memory we have allocated. */
- memset(dd->port->command_list,
- 0,
- HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4));
-
- /* Setup the addresse of the RX FIS. */
- dd->port->rxfis = dd->port->command_list + HW_CMD_SLOT_SZ;
- dd->port->rxfis_dma = dd->port->command_list_dma + HW_CMD_SLOT_SZ;
-
- /* Setup the address of the command tables. */
- dd->port->command_table = dd->port->rxfis + AHCI_RX_FIS_SZ;
- dd->port->command_tbl_dma = dd->port->rxfis_dma + AHCI_RX_FIS_SZ;
-
- /* Setup the address of the identify data. */
- dd->port->identify = dd->port->command_table +
- HW_CMD_TBL_AR_SZ;
- dd->port->identify_dma = dd->port->command_tbl_dma +
- HW_CMD_TBL_AR_SZ;
-
- /* Setup the address of the sector buffer - for some non-ncq cmds */
- dd->port->sector_buffer = (void *) dd->port->identify + ATA_SECT_SIZE;
- dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE;
-
- /* Setup the address of the log buf - for read log command */
- dd->port->log_buf = (void *)dd->port->sector_buffer + ATA_SECT_SIZE;
- dd->port->log_buf_dma = dd->port->sector_buffer_dma + ATA_SECT_SIZE;
-
- /* Setup the address of the smart buf - for smart read data command */
- dd->port->smart_buf = (void *)dd->port->log_buf + ATA_SECT_SIZE;
- dd->port->smart_buf_dma = dd->port->log_buf_dma + ATA_SECT_SIZE;
-
-
- /* Point the command headers at the command tables. */
- for (i = 0; i < num_command_slots; i++) {
- dd->port->commands[i].command_header =
- dd->port->command_list +
- (sizeof(struct mtip_cmd_hdr) * i);
- dd->port->commands[i].command_header_dma =
- dd->port->command_list_dma +
- (sizeof(struct mtip_cmd_hdr) * i);
-
- dd->port->commands[i].command =
- dd->port->command_table + (HW_CMD_TBL_SZ * i);
- dd->port->commands[i].command_dma =
- dd->port->command_tbl_dma + (HW_CMD_TBL_SZ * i);
-
- if (readl(dd->mmio + HOST_CAP) & HOST_CAP_64)
- dd->port->commands[i].command_header->ctbau =
- __force_bit2int cpu_to_le32(
- (dd->port->commands[i].command_dma >> 16) >> 16);
- dd->port->commands[i].command_header->ctba =
- __force_bit2int cpu_to_le32(
- dd->port->commands[i].command_dma & 0xFFFFFFFF);
-
- /*
- * If this is not done, a bug is reported by the stock
- * FC11 i386. Due to the fact that it has lots of kernel
- * debugging enabled.
- */
- sg_init_table(dd->port->commands[i].sg, MTIP_MAX_SG);
-
- /* Mark all commands as currently inactive.*/
- atomic_set(&dd->port->commands[i].active, 0);
- }
/* Setup the pointers to the extended s_active and CI registers. */
for (i = 0; i < dd->slot_groups; i++) {
@@ -3594,12 +3665,8 @@ out3:
out2:
mtip_deinit_port(dd->port);
+ mtip_dma_free(dd);
- /* Free the command/command header memory. */
- dmam_free_coherent(&dd->pdev->dev,
- HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
- dd->port->command_list,
- dd->port->command_list_dma);
out1:
/* Free the memory allocated for the for structure. */
kfree(dd->port);
@@ -3622,7 +3689,8 @@ static int mtip_hw_exit(struct driver_data *dd)
* saves its state.
*/
if (!dd->sr) {
- if (!test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
+ if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
+ !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
if (mtip_standby_immediate(dd->port))
dev_warn(&dd->pdev->dev,
"STANDBY IMMEDIATE failed\n");
@@ -3641,11 +3709,9 @@ static int mtip_hw_exit(struct driver_data *dd)
irq_set_affinity_hint(dd->pdev->irq, NULL);
devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
- /* Free the command/command header memory. */
- dmam_free_coherent(&dd->pdev->dev,
- HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
- dd->port->command_list,
- dd->port->command_list_dma);
+ /* Free dma regions */
+ mtip_dma_free(dd);
+
/* Free the memory allocated for the for structure. */
kfree(dd->port);
dd->port = NULL;
@@ -3962,8 +4028,9 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
{
struct driver_data *dd = queue->queuedata;
struct scatterlist *sg;
- struct bio_vec *bvec;
- int i, nents = 0;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ int nents = 0;
int tag = 0, unaligned = 0;
if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
@@ -3993,7 +4060,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
}
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
- bio_endio(bio, mtip_send_trim(dd, bio->bi_sector,
+ bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector,
bio_sectors(bio)));
return;
}
@@ -4006,7 +4073,8 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
dd->unal_qdepth) {
- if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */
+ if (bio->bi_iter.bi_sector % 8 != 0)
+ /* Unaligned on 4k boundaries */
unaligned = 1;
else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
unaligned = 1;
@@ -4025,17 +4093,17 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
}
/* Create the scatter list for this bio. */
- bio_for_each_segment(bvec, bio, i) {
+ bio_for_each_segment(bvec, bio, iter) {
sg_set_page(&sg[nents],
- bvec->bv_page,
- bvec->bv_len,
- bvec->bv_offset);
+ bvec.bv_page,
+ bvec.bv_len,
+ bvec.bv_offset);
nents++;
}
/* Issue the read/write. */
mtip_hw_submit_io(dd,
- bio->bi_sector,
+ bio->bi_iter.bi_sector,
bio_sectors(bio),
nents,
tag,
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 9be7a1582ad3..b52e9a6d6aad 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -69,7 +69,7 @@
* Maximum number of scatter gather entries
* a single command may have.
*/
-#define MTIP_MAX_SG 128
+#define MTIP_MAX_SG 504
/*
* Maximum number of slot groups (Command Issue & s_active registers)
@@ -92,7 +92,7 @@
/* Driver name and version strings */
#define MTIP_DRV_NAME "mtip32xx"
-#define MTIP_DRV_VERSION "1.2.6os3"
+#define MTIP_DRV_VERSION "1.3.0"
/* Maximum number of minor device numbers per device. */
#define MTIP_MAX_MINORS 16
@@ -391,15 +391,13 @@ struct mtip_port {
*/
dma_addr_t rxfis_dma;
/*
- * Pointer to the beginning of the command table memory as used
- * by the driver.
+ * Pointer to the DMA region for RX Fis, Identify, RLE10, and SMART
*/
- void *command_table;
+ void *block1;
/*
- * Pointer to the beginning of the command table memory as used
- * by the DMA.
+ * DMA address of region for RX Fis, Identify, RLE10, and SMART
*/
- dma_addr_t command_tbl_dma;
+ dma_addr_t block1_dma;
/*
* Pointer to the beginning of the identify data memory as used
* by the driver.
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 2dc3b5153f0d..55298db36b2d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -271,18 +271,18 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
if (nbd_cmd(req) == NBD_CMD_WRITE) {
struct req_iterator iter;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
/*
* we are really probing at internals to determine
* whether to set MSG_MORE or not...
*/
rq_for_each_segment(bvec, req, iter) {
flags = 0;
- if (!rq_iter_last(req, iter))
+ if (!rq_iter_last(bvec, iter))
flags = MSG_MORE;
dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
- nbd->disk->disk_name, req, bvec->bv_len);
- result = sock_send_bvec(nbd, bvec, flags);
+ nbd->disk->disk_name, req, bvec.bv_len);
+ result = sock_send_bvec(nbd, &bvec, flags);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
@@ -378,10 +378,10 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
nbd->disk->disk_name, req);
if (nbd_cmd(req) == NBD_CMD_READ) {
struct req_iterator iter;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
rq_for_each_segment(bvec, req, iter) {
- result = sock_recv_bvec(nbd, bvec);
+ result = sock_recv_bvec(nbd, &bvec);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
result);
@@ -389,7 +389,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
return req;
}
dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
- nbd->disk->disk_name, req, bvec->bv_len);
+ nbd->disk->disk_name, req, bvec.bv_len);
}
}
return req;
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index f370fc13aea5..091b9ea14feb 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -1,4 +1,5 @@
#include <linux/module.h>
+
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/fs.h>
@@ -59,13 +60,15 @@ enum {
NULL_IRQ_NONE = 0,
NULL_IRQ_SOFTIRQ = 1,
NULL_IRQ_TIMER = 2,
+};
+enum {
NULL_Q_BIO = 0,
NULL_Q_RQ = 1,
NULL_Q_MQ = 2,
};
-static int submit_queues = 1;
+static int submit_queues;
module_param(submit_queues, int, S_IRUGO);
MODULE_PARM_DESC(submit_queues, "Number of submission queues");
@@ -101,9 +104,9 @@ static int hw_queue_depth = 64;
module_param(hw_queue_depth, int, S_IRUGO);
MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
-static bool use_per_node_hctx = true;
+static bool use_per_node_hctx = false;
module_param(use_per_node_hctx, bool, S_IRUGO);
-MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true");
+MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
static void put_tag(struct nullb_queue *nq, unsigned int tag)
{
@@ -171,18 +174,20 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
static void end_cmd(struct nullb_cmd *cmd)
{
- if (cmd->rq) {
- if (queue_mode == NULL_Q_MQ)
- blk_mq_end_io(cmd->rq, 0);
- else {
- INIT_LIST_HEAD(&cmd->rq->queuelist);
- blk_end_request_all(cmd->rq, 0);
- }
- } else if (cmd->bio)
+ switch (queue_mode) {
+ case NULL_Q_MQ:
+ blk_mq_end_io(cmd->rq, 0);
+ return;
+ case NULL_Q_RQ:
+ INIT_LIST_HEAD(&cmd->rq->queuelist);
+ blk_end_request_all(cmd->rq, 0);
+ break;
+ case NULL_Q_BIO:
bio_endio(cmd->bio, 0);
+ break;
+ }
- if (queue_mode != NULL_Q_MQ)
- free_cmd(cmd);
+ free_cmd(cmd);
}
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
@@ -194,6 +199,7 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
cq = &per_cpu(completion_queues, smp_processor_id());
while ((entry = llist_del_all(&cq->list)) != NULL) {
+ entry = llist_reverse_order(entry);
do {
cmd = container_of(entry, struct nullb_cmd, ll_list);
end_cmd(cmd);
@@ -220,61 +226,31 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd)
static void null_softirq_done_fn(struct request *rq)
{
- blk_end_request_all(rq, 0);
-}
-
-#ifdef CONFIG_SMP
-
-static void null_ipi_cmd_end_io(void *data)
-{
- struct completion_queue *cq;
- struct llist_node *entry, *next;
- struct nullb_cmd *cmd;
-
- cq = &per_cpu(completion_queues, smp_processor_id());
-
- entry = llist_del_all(&cq->list);
-
- while (entry) {
- next = entry->next;
- cmd = llist_entry(entry, struct nullb_cmd, ll_list);
- end_cmd(cmd);
- entry = next;
- }
-}
-
-static void null_cmd_end_ipi(struct nullb_cmd *cmd)
-{
- struct call_single_data *data = &cmd->csd;
- int cpu = get_cpu();
- struct completion_queue *cq = &per_cpu(completion_queues, cpu);
-
- cmd->ll_list.next = NULL;
-
- if (llist_add(&cmd->ll_list, &cq->list)) {
- data->func = null_ipi_cmd_end_io;
- data->flags = 0;
- __smp_call_function_single(cpu, data, 0);
- }
-
- put_cpu();
+ end_cmd(rq->special);
}
-#endif /* CONFIG_SMP */
-
static inline void null_handle_cmd(struct nullb_cmd *cmd)
{
/* Complete IO by inline, softirq or timer */
switch (irqmode) {
- case NULL_IRQ_NONE:
- end_cmd(cmd);
- break;
case NULL_IRQ_SOFTIRQ:
-#ifdef CONFIG_SMP
- null_cmd_end_ipi(cmd);
-#else
+ switch (queue_mode) {
+ case NULL_Q_MQ:
+ blk_mq_complete_request(cmd->rq);
+ break;
+ case NULL_Q_RQ:
+ blk_complete_request(cmd->rq);
+ break;
+ case NULL_Q_BIO:
+ /*
+ * XXX: no proper submitting cpu information available.
+ */
+ end_cmd(cmd);
+ break;
+ }
+ break;
+ case NULL_IRQ_NONE:
end_cmd(cmd);
-#endif
break;
case NULL_IRQ_TIMER:
null_cmd_end_timer(cmd);
@@ -346,8 +322,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index)
{
- return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL,
- hctx_index);
+ int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes);
+ int tip = (reg->nr_hw_queues % nr_online_nodes);
+ int node = 0, i, n;
+
+ /*
+ * Split submit queues evenly wrt to the number of nodes. If uneven,
+ * fill the first buckets with one extra, until the rest is filled with
+ * no extra.
+ */
+ for (i = 0, n = 1; i < hctx_index; i++, n++) {
+ if (n % b_size == 0) {
+ n = 0;
+ node++;
+
+ tip--;
+ if (!tip)
+ b_size = reg->nr_hw_queues / nr_online_nodes;
+ }
+ }
+
+ /*
+ * A node might not be online, therefore map the relative node id to the
+ * real node id.
+ */
+ for_each_online_node(n) {
+ if (!node)
+ break;
+ node--;
+ }
+
+ return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n);
}
static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
@@ -355,16 +360,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
kfree(hctx);
}
+static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
+{
+ BUG_ON(!nullb);
+ BUG_ON(!nq);
+
+ init_waitqueue_head(&nq->wait);
+ nq->queue_depth = nullb->queue_depth;
+}
+
static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int index)
{
struct nullb *nullb = data;
struct nullb_queue *nq = &nullb->queues[index];
- init_waitqueue_head(&nq->wait);
- nq->queue_depth = nullb->queue_depth;
- nullb->nr_queues++;
hctx->driver_data = nq;
+ null_init_queue(nullb, nq);
+ nullb->nr_queues++;
return 0;
}
@@ -373,6 +386,7 @@ static struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.map_queue = blk_mq_map_queue,
.init_hctx = null_init_hctx,
+ .complete = null_softirq_done_fn,
};
static struct blk_mq_reg null_mq_reg = {
@@ -387,10 +401,7 @@ static void null_del_dev(struct nullb *nullb)
list_del_init(&nullb->list);
del_gendisk(nullb->disk);
- if (queue_mode == NULL_Q_MQ)
- blk_mq_free_queue(nullb->q);
- else
- blk_cleanup_queue(nullb->q);
+ blk_cleanup_queue(nullb->q);
put_disk(nullb->disk);
kfree(nullb);
}
@@ -417,13 +428,13 @@ static int setup_commands(struct nullb_queue *nq)
nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
if (!nq->cmds)
- return 1;
+ return -ENOMEM;
tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
if (!nq->tag_map) {
kfree(nq->cmds);
- return 1;
+ return -ENOMEM;
}
for (i = 0; i < nq->queue_depth; i++) {
@@ -454,33 +465,37 @@ static void cleanup_queues(struct nullb *nullb)
static int setup_queues(struct nullb *nullb)
{
- struct nullb_queue *nq;
- int i;
-
- nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL);
+ nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
+ GFP_KERNEL);
if (!nullb->queues)
- return 1;
+ return -ENOMEM;
nullb->nr_queues = 0;
nullb->queue_depth = hw_queue_depth;
- if (queue_mode == NULL_Q_MQ)
- return 0;
+ return 0;
+}
+
+static int init_driver_queues(struct nullb *nullb)
+{
+ struct nullb_queue *nq;
+ int i, ret = 0;
for (i = 0; i < submit_queues; i++) {
nq = &nullb->queues[i];
- init_waitqueue_head(&nq->wait);
- nq->queue_depth = hw_queue_depth;
- if (setup_commands(nq))
- break;
+
+ null_init_queue(nullb, nq);
+
+ ret = setup_commands(nq);
+ if (ret)
+ goto err_queue;
nullb->nr_queues++;
}
- if (i == submit_queues)
- return 0;
-
+ return 0;
+err_queue:
cleanup_queues(nullb);
- return 1;
+ return ret;
}
static int null_add_dev(void)
@@ -518,11 +533,13 @@ static int null_add_dev(void)
} else if (queue_mode == NULL_Q_BIO) {
nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
blk_queue_make_request(nullb->q, null_queue_bio);
+ init_driver_queues(nullb);
} else {
nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
if (nullb->q)
blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
+ init_driver_queues(nullb);
}
if (!nullb->q)
@@ -534,10 +551,7 @@ static int null_add_dev(void)
disk = nullb->disk = alloc_disk_node(1, home_node);
if (!disk) {
queue_fail:
- if (queue_mode == NULL_Q_MQ)
- blk_mq_free_queue(nullb->q);
- else
- blk_cleanup_queue(nullb->q);
+ blk_cleanup_queue(nullb->q);
cleanup_queues(nullb);
err:
kfree(nullb);
@@ -571,15 +585,19 @@ static int __init null_init(void)
{
unsigned int i;
-#if !defined(CONFIG_SMP)
- if (irqmode == NULL_IRQ_SOFTIRQ) {
- pr_warn("null_blk: softirq completions not available.\n");
- pr_warn("null_blk: using direct completions.\n");
- irqmode = NULL_IRQ_NONE;
+ if (bs > PAGE_SIZE) {
+ pr_warn("null_blk: invalid block size\n");
+ pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
+ bs = PAGE_SIZE;
}
-#endif
- if (submit_queues > nr_cpu_ids)
+ if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
+ if (submit_queues < nr_online_nodes) {
+ pr_warn("null_blk: submit_queues param is set to %u.",
+ nr_online_nodes);
+ submit_queues = nr_online_nodes;
+ }
+ } else if (submit_queues > nr_cpu_ids)
submit_queues = nr_cpu_ids;
else if (!submit_queues)
submit_queues = 1;
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 26d03fa0bf26..51824d1f23ea 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -46,7 +46,6 @@
#define NVME_Q_DEPTH 1024
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
-#define NVME_MINORS 64
#define ADMIN_TIMEOUT (60 * HZ)
static int nvme_major;
@@ -58,6 +57,17 @@ module_param(use_threaded_interrupts, int, 0);
static DEFINE_SPINLOCK(dev_list_lock);
static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;
+static struct workqueue_struct *nvme_workq;
+
+static void nvme_reset_failed_dev(struct work_struct *ws);
+
+struct async_cmd_info {
+ struct kthread_work work;
+ struct kthread_worker *worker;
+ u32 result;
+ int status;
+ void *ctx;
+};
/*
* An NVM Express queue. Each device has at least two (one for admin
@@ -66,6 +76,7 @@ static struct task_struct *nvme_thread;
struct nvme_queue {
struct device *q_dmadev;
struct nvme_dev *dev;
+ char irqname[24]; /* nvme4294967295-65535\0 */
spinlock_t q_lock;
struct nvme_command *sq_cmds;
volatile struct nvme_completion *cqes;
@@ -80,9 +91,11 @@ struct nvme_queue {
u16 sq_head;
u16 sq_tail;
u16 cq_head;
+ u16 qid;
u8 cq_phase;
u8 cqe_seen;
u8 q_suspended;
+ struct async_cmd_info cmdinfo;
unsigned long cmdid_data[];
};
@@ -97,6 +110,7 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
@@ -111,6 +125,7 @@ struct nvme_cmd_info {
nvme_completion_fn fn;
void *ctx;
unsigned long timeout;
+ int aborted;
};
static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
@@ -154,6 +169,7 @@ static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
info[cmdid].fn = handler;
info[cmdid].ctx = ctx;
info[cmdid].timeout = jiffies + timeout;
+ info[cmdid].aborted = 0;
return cmdid;
}
@@ -172,6 +188,7 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
+#define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE)
static void special_completion(struct nvme_dev *dev, void *ctx,
struct nvme_completion *cqe)
@@ -180,6 +197,10 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
return;
if (ctx == CMD_CTX_FLUSH)
return;
+ if (ctx == CMD_CTX_ABORT) {
+ ++dev->abort_limit;
+ return;
+ }
if (ctx == CMD_CTX_COMPLETED) {
dev_warn(&dev->pci_dev->dev,
"completed id %d twice on queue %d\n",
@@ -196,6 +217,15 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
}
+static void async_completion(struct nvme_dev *dev, void *ctx,
+ struct nvme_completion *cqe)
+{
+ struct async_cmd_info *cmdinfo = ctx;
+ cmdinfo->result = le32_to_cpup(&cqe->result);
+ cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
+ queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
+}
+
/*
* Called with local interrupts disabled and the q_lock held. May not sleep.
*/
@@ -441,104 +471,19 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
return total_len;
}
-struct nvme_bio_pair {
- struct bio b1, b2, *parent;
- struct bio_vec *bv1, *bv2;
- int err;
- atomic_t cnt;
-};
-
-static void nvme_bio_pair_endio(struct bio *bio, int err)
-{
- struct nvme_bio_pair *bp = bio->bi_private;
-
- if (err)
- bp->err = err;
-
- if (atomic_dec_and_test(&bp->cnt)) {
- bio_endio(bp->parent, bp->err);
- kfree(bp->bv1);
- kfree(bp->bv2);
- kfree(bp);
- }
-}
-
-static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
- int len, int offset)
-{
- struct nvme_bio_pair *bp;
-
- BUG_ON(len > bio->bi_size);
- BUG_ON(idx > bio->bi_vcnt);
-
- bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
- if (!bp)
- return NULL;
- bp->err = 0;
-
- bp->b1 = *bio;
- bp->b2 = *bio;
-
- bp->b1.bi_size = len;
- bp->b2.bi_size -= len;
- bp->b1.bi_vcnt = idx;
- bp->b2.bi_idx = idx;
- bp->b2.bi_sector += len >> 9;
-
- if (offset) {
- bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
- GFP_ATOMIC);
- if (!bp->bv1)
- goto split_fail_1;
-
- bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
- GFP_ATOMIC);
- if (!bp->bv2)
- goto split_fail_2;
-
- memcpy(bp->bv1, bio->bi_io_vec,
- bio->bi_max_vecs * sizeof(struct bio_vec));
- memcpy(bp->bv2, bio->bi_io_vec,
- bio->bi_max_vecs * sizeof(struct bio_vec));
-
- bp->b1.bi_io_vec = bp->bv1;
- bp->b2.bi_io_vec = bp->bv2;
- bp->b2.bi_io_vec[idx].bv_offset += offset;
- bp->b2.bi_io_vec[idx].bv_len -= offset;
- bp->b1.bi_io_vec[idx].bv_len = offset;
- bp->b1.bi_vcnt++;
- } else
- bp->bv1 = bp->bv2 = NULL;
-
- bp->b1.bi_private = bp;
- bp->b2.bi_private = bp;
-
- bp->b1.bi_end_io = nvme_bio_pair_endio;
- bp->b2.bi_end_io = nvme_bio_pair_endio;
-
- bp->parent = bio;
- atomic_set(&bp->cnt, 2);
-
- return bp;
-
- split_fail_2:
- kfree(bp->bv1);
- split_fail_1:
- kfree(bp);
- return NULL;
-}
-
static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
- int idx, int len, int offset)
+ int len)
{
- struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset);
- if (!bp)
+ struct bio *split = bio_split(bio, len >> 9, GFP_ATOMIC, NULL);
+ if (!split)
return -ENOMEM;
+ bio_chain(split, bio);
+
if (bio_list_empty(&nvmeq->sq_cong))
add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
- bio_list_add(&nvmeq->sq_cong, &bp->b1);
- bio_list_add(&nvmeq->sq_cong, &bp->b2);
+ bio_list_add(&nvmeq->sq_cong, split);
+ bio_list_add(&nvmeq->sq_cong, bio);
return 0;
}
@@ -550,41 +495,44 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
struct bio *bio, enum dma_data_direction dma_dir, int psegs)
{
- struct bio_vec *bvec, *bvprv = NULL;
+ struct bio_vec bvec, bvprv;
+ struct bvec_iter iter;
struct scatterlist *sg = NULL;
- int i, length = 0, nsegs = 0, split_len = bio->bi_size;
+ int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
+ int first = 1;
if (nvmeq->dev->stripe_size)
split_len = nvmeq->dev->stripe_size -
- ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1));
+ ((bio->bi_iter.bi_sector << 9) &
+ (nvmeq->dev->stripe_size - 1));
sg_init_table(iod->sg, psegs);
- bio_for_each_segment(bvec, bio, i) {
- if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
- sg->length += bvec->bv_len;
+ bio_for_each_segment(bvec, bio, iter) {
+ if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) {
+ sg->length += bvec.bv_len;
} else {
- if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
- return nvme_split_and_submit(bio, nvmeq, i,
- length, 0);
+ if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec))
+ return nvme_split_and_submit(bio, nvmeq,
+ length);
sg = sg ? sg + 1 : iod->sg;
- sg_set_page(sg, bvec->bv_page, bvec->bv_len,
- bvec->bv_offset);
+ sg_set_page(sg, bvec.bv_page,
+ bvec.bv_len, bvec.bv_offset);
nsegs++;
}
- if (split_len - length < bvec->bv_len)
- return nvme_split_and_submit(bio, nvmeq, i, split_len,
- split_len - length);
- length += bvec->bv_len;
+ if (split_len - length < bvec.bv_len)
+ return nvme_split_and_submit(bio, nvmeq, split_len);
+ length += bvec.bv_len;
bvprv = bvec;
+ first = 0;
}
iod->nents = nsegs;
sg_mark_end(sg);
if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
return -ENOMEM;
- BUG_ON(length != bio->bi_size);
+ BUG_ON(length != bio->bi_iter.bi_size);
return length;
}
@@ -608,8 +556,8 @@ static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
iod->npages = 0;
range->cattr = cpu_to_le32(0);
- range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
- range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+ range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
+ range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm;
@@ -674,7 +622,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
}
result = -ENOMEM;
- iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
+ iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
if (!iod)
goto nomem;
iod->private = bio;
@@ -723,7 +671,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
GFP_ATOMIC);
- cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+ cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
cmnd->rw.control = cpu_to_le16(control);
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
@@ -775,7 +723,7 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
return 0;
- writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
+ writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
nvmeq->cq_head = head;
nvmeq->cq_phase = phase;
@@ -886,12 +834,34 @@ int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
return cmdinfo.status;
}
+static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
+ struct nvme_command *cmd,
+ struct async_cmd_info *cmdinfo, unsigned timeout)
+{
+ int cmdid;
+
+ cmdid = alloc_cmdid_killable(nvmeq, cmdinfo, async_completion, timeout);
+ if (cmdid < 0)
+ return cmdid;
+ cmdinfo->status = -EINTR;
+ cmd->common.command_id = cmdid;
+ nvme_submit_cmd(nvmeq, cmd);
+ return 0;
+}
+
int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
u32 *result)
{
return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
}
+static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
+ struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
+{
+ return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo,
+ ADMIN_TIMEOUT);
+}
+
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
{
int status;
@@ -1002,6 +972,56 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
}
/**
+ * nvme_abort_cmd - Attempt aborting a command
+ * @cmdid: Command id of a timed out IO
+ * @queue: The queue with timed out IO
+ *
+ * Schedule controller reset if the command was already aborted once before and
+ * still hasn't been returned to the driver, or if this is the admin queue.
+ */
+static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
+{
+ int a_cmdid;
+ struct nvme_command cmd;
+ struct nvme_dev *dev = nvmeq->dev;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+
+ if (!nvmeq->qid || info[cmdid].aborted) {
+ if (work_busy(&dev->reset_work))
+ return;
+ list_del_init(&dev->node);
+ dev_warn(&dev->pci_dev->dev,
+ "I/O %d QID %d timeout, reset controller\n", cmdid,
+ nvmeq->qid);
+ PREPARE_WORK(&dev->reset_work, nvme_reset_failed_dev);
+ queue_work(nvme_workq, &dev->reset_work);
+ return;
+ }
+
+ if (!dev->abort_limit)
+ return;
+
+ a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion,
+ ADMIN_TIMEOUT);
+ if (a_cmdid < 0)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abort.opcode = nvme_admin_abort_cmd;
+ cmd.abort.cid = cmdid;
+ cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
+ cmd.abort.command_id = a_cmdid;
+
+ --dev->abort_limit;
+ info[cmdid].aborted = 1;
+ info[cmdid].timeout = jiffies + ADMIN_TIMEOUT;
+
+ dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
+ nvmeq->qid);
+ nvme_submit_cmd(dev->queues[0], &cmd);
+}
+
+/**
* nvme_cancel_ios - Cancel outstanding I/Os
* @queue: The queue to cancel I/Os on
* @timeout: True to only cancel I/Os which have timed out
@@ -1024,7 +1044,12 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
continue;
if (info[cmdid].ctx == CMD_CTX_CANCELLED)
continue;
- dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
+ if (timeout && nvmeq->dev->initialized) {
+ nvme_abort_cmd(cmdid, nvmeq);
+ continue;
+ }
+ dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
+ nvmeq->qid);
ctx = cancel_cmdid(nvmeq, cmdid, &fn);
fn(nvmeq->dev, ctx, &cqe);
}
@@ -1046,26 +1071,31 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
kfree(nvmeq);
}
-static void nvme_free_queues(struct nvme_dev *dev)
+static void nvme_free_queues(struct nvme_dev *dev, int lowest)
{
int i;
- for (i = dev->queue_count - 1; i >= 0; i--) {
+ for (i = dev->queue_count - 1; i >= lowest; i--) {
nvme_free_queue(dev->queues[i]);
dev->queue_count--;
dev->queues[i] = NULL;
}
}
-static void nvme_disable_queue(struct nvme_dev *dev, int qid)
+/**
+ * nvme_suspend_queue - put queue into suspended state
+ * @nvmeq - queue to suspend
+ *
+ * Returns 1 if already suspended, 0 otherwise.
+ */
+static int nvme_suspend_queue(struct nvme_queue *nvmeq)
{
- struct nvme_queue *nvmeq = dev->queues[qid];
- int vector = dev->entry[nvmeq->cq_vector].vector;
+ int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
spin_lock_irq(&nvmeq->q_lock);
if (nvmeq->q_suspended) {
spin_unlock_irq(&nvmeq->q_lock);
- return;
+ return 1;
}
nvmeq->q_suspended = 1;
spin_unlock_irq(&nvmeq->q_lock);
@@ -1073,18 +1103,35 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
irq_set_affinity_hint(vector, NULL);
free_irq(vector, nvmeq);
- /* Don't tell the adapter to delete the admin queue */
- if (qid) {
- adapter_delete_sq(dev, qid);
- adapter_delete_cq(dev, qid);
- }
+ return 0;
+}
+static void nvme_clear_queue(struct nvme_queue *nvmeq)
+{
spin_lock_irq(&nvmeq->q_lock);
nvme_process_cq(nvmeq);
nvme_cancel_ios(nvmeq, false);
spin_unlock_irq(&nvmeq->q_lock);
}
+static void nvme_disable_queue(struct nvme_dev *dev, int qid)
+{
+ struct nvme_queue *nvmeq = dev->queues[qid];
+
+ if (!nvmeq)
+ return;
+ if (nvme_suspend_queue(nvmeq))
+ return;
+
+ /* Don't tell the adapter to delete the admin queue.
+ * Don't tell a removed adapter to delete IO queues. */
+ if (qid && readl(&dev->bar->csts) != -1) {
+ adapter_delete_sq(dev, qid);
+ adapter_delete_cq(dev, qid);
+ }
+ nvme_clear_queue(nvmeq);
+}
+
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
int depth, int vector)
{
@@ -1107,15 +1154,18 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->q_dmadev = dmadev;
nvmeq->dev = dev;
+ snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
+ dev->instance, qid);
spin_lock_init(&nvmeq->q_lock);
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
init_waitqueue_head(&nvmeq->sq_full);
init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
bio_list_init(&nvmeq->sq_cong);
- nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+ nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
nvmeq->q_depth = depth;
nvmeq->cq_vector = vector;
+ nvmeq->qid = qid;
nvmeq->q_suspended = 1;
dev->queue_count++;
@@ -1134,11 +1184,10 @@ static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
{
if (use_threaded_interrupts)
return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
- nvme_irq_check, nvme_irq,
- IRQF_DISABLED | IRQF_SHARED,
+ nvme_irq_check, nvme_irq, IRQF_SHARED,
name, nvmeq);
return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
- IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
+ IRQF_SHARED, name, nvmeq);
}
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
@@ -1149,7 +1198,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
nvmeq->sq_tail = 0;
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
- nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+ nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
memset(nvmeq->cmdid_data, 0, extra);
memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
nvme_cancel_ios(nvmeq, false);
@@ -1169,13 +1218,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
if (result < 0)
goto release_cq;
- result = queue_request_irq(dev, nvmeq, "nvme");
+ result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
if (result < 0)
goto release_sq;
- spin_lock(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->q_lock);
nvme_init_queue(nvmeq, qid);
- spin_unlock(&nvmeq->q_lock);
+ spin_unlock_irq(&nvmeq->q_lock);
return result;
@@ -1287,13 +1336,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
if (result)
return result;
- result = queue_request_irq(dev, nvmeq, "nvme admin");
+ result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
if (result)
return result;
- spin_lock(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->q_lock);
nvme_init_queue(nvmeq, 0);
- spin_unlock(&nvmeq->q_lock);
+ spin_unlock_irq(&nvmeq->q_lock);
return result;
}
@@ -1569,10 +1618,47 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
}
}
+#ifdef CONFIG_COMPAT
+static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvme_ns *ns = bdev->bd_disk->private_data;
+
+ switch (cmd) {
+ case SG_IO:
+ return nvme_sg_io32(ns, arg);
+ }
+ return nvme_ioctl(bdev, mode, cmd, arg);
+}
+#else
+#define nvme_compat_ioctl NULL
+#endif
+
+static int nvme_open(struct block_device *bdev, fmode_t mode)
+{
+ struct nvme_ns *ns = bdev->bd_disk->private_data;
+ struct nvme_dev *dev = ns->dev;
+
+ kref_get(&dev->kref);
+ return 0;
+}
+
+static void nvme_free_dev(struct kref *kref);
+
+static void nvme_release(struct gendisk *disk, fmode_t mode)
+{
+ struct nvme_ns *ns = disk->private_data;
+ struct nvme_dev *dev = ns->dev;
+
+ kref_put(&dev->kref, nvme_free_dev);
+}
+
static const struct block_device_operations nvme_fops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
- .compat_ioctl = nvme_ioctl,
+ .compat_ioctl = nvme_compat_ioctl,
+ .open = nvme_open,
+ .release = nvme_release,
};
static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
@@ -1596,13 +1682,25 @@ static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
static int nvme_kthread(void *data)
{
- struct nvme_dev *dev;
+ struct nvme_dev *dev, *next;
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
spin_lock(&dev_list_lock);
- list_for_each_entry(dev, &dev_list, node) {
+ list_for_each_entry_safe(dev, next, &dev_list, node) {
int i;
+ if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
+ dev->initialized) {
+ if (work_busy(&dev->reset_work))
+ continue;
+ list_del_init(&dev->node);
+ dev_warn(&dev->pci_dev->dev,
+ "Failed status, reset controller\n");
+ PREPARE_WORK(&dev->reset_work,
+ nvme_reset_failed_dev);
+ queue_work(nvme_workq, &dev->reset_work);
+ continue;
+ }
for (i = 0; i < dev->queue_count; i++) {
struct nvme_queue *nvmeq = dev->queues[i];
if (!nvmeq)
@@ -1623,33 +1721,6 @@ static int nvme_kthread(void *data)
return 0;
}
-static DEFINE_IDA(nvme_index_ida);
-
-static int nvme_get_ns_idx(void)
-{
- int index, error;
-
- do {
- if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
- return -1;
-
- spin_lock(&dev_list_lock);
- error = ida_get_new(&nvme_index_ida, &index);
- spin_unlock(&dev_list_lock);
- } while (error == -EAGAIN);
-
- if (error)
- index = -1;
- return index;
-}
-
-static void nvme_put_ns_idx(int index)
-{
- spin_lock(&dev_list_lock);
- ida_remove(&nvme_index_ida, index);
- spin_unlock(&dev_list_lock);
-}
-
static void nvme_config_discard(struct nvme_ns *ns)
{
u32 logical_block_size = queue_logical_block_size(ns->queue);
@@ -1683,7 +1754,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
ns->dev = dev;
ns->queue->queuedata = ns;
- disk = alloc_disk(NVME_MINORS);
+ disk = alloc_disk(0);
if (!disk)
goto out_free_queue;
ns->ns_id = nsid;
@@ -1696,12 +1767,12 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
disk->major = nvme_major;
- disk->minors = NVME_MINORS;
- disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
+ disk->first_minor = 0;
disk->fops = &nvme_fops;
disk->private_data = ns;
disk->queue = ns->queue;
disk->driverfs_dev = &dev->pci_dev->dev;
+ disk->flags = GENHD_FL_EXT_DEVT;
sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
@@ -1717,15 +1788,6 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
return NULL;
}
-static void nvme_ns_free(struct nvme_ns *ns)
-{
- int index = ns->disk->first_minor / NVME_MINORS;
- put_disk(ns->disk);
- nvme_put_ns_idx(index);
- blk_cleanup_queue(ns->queue);
- kfree(ns);
-}
-
static int set_queue_count(struct nvme_dev *dev, int count)
{
int status;
@@ -1741,11 +1803,12 @@ static int set_queue_count(struct nvme_dev *dev, int count)
static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{
- return 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+ return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
}
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
+ struct nvme_queue *adminq = dev->queues[0];
struct pci_dev *pdev = dev->pci_dev;
int result, cpu, i, vecs, nr_io_queues, size, q_depth;
@@ -1772,7 +1835,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
}
/* Deregister the admin queue's interrupt */
- free_irq(dev->entry[0].vector, dev->queues[0]);
+ free_irq(dev->entry[0].vector, adminq);
vecs = nr_io_queues;
for (i = 0; i < vecs; i++)
@@ -1810,9 +1873,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
*/
nr_io_queues = vecs;
- result = queue_request_irq(dev, dev->queues[0], "nvme admin");
+ result = queue_request_irq(dev, adminq, adminq->irqname);
if (result) {
- dev->queues[0]->q_suspended = 1;
+ adminq->q_suspended = 1;
goto free_queues;
}
@@ -1821,9 +1884,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
struct nvme_queue *nvmeq = dev->queues[i];
- spin_lock(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->q_lock);
nvme_cancel_ios(nvmeq, false);
- spin_unlock(&nvmeq->q_lock);
+ spin_unlock_irq(&nvmeq->q_lock);
nvme_free_queue(nvmeq);
dev->queue_count--;
@@ -1864,7 +1927,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
return 0;
free_queues:
- nvme_free_queues(dev);
+ nvme_free_queues(dev, 1);
return result;
}
@@ -1876,6 +1939,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
*/
static int nvme_dev_add(struct nvme_dev *dev)
{
+ struct pci_dev *pdev = dev->pci_dev;
int res;
unsigned nn, i;
struct nvme_ns *ns;
@@ -1885,8 +1949,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
dma_addr_t dma_addr;
int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
- mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
- GFP_KERNEL);
+ mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL);
if (!mem)
return -ENOMEM;
@@ -1899,13 +1962,14 @@ static int nvme_dev_add(struct nvme_dev *dev)
ctrl = mem;
nn = le32_to_cpup(&ctrl->nn);
dev->oncs = le16_to_cpup(&ctrl->oncs);
+ dev->abort_limit = ctrl->acl + 1;
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
if (ctrl->mdts)
dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
- if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) &&
- (dev->pci_dev->device == 0x0953) && ctrl->vs[3])
+ if ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
+ (pdev->device == 0x0953) && ctrl->vs[3])
dev->stripe_size = 1 << (ctrl->vs[3] + shift);
id_ns = mem;
@@ -1953,16 +2017,21 @@ static int nvme_dev_map(struct nvme_dev *dev)
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
goto disable;
- pci_set_drvdata(pdev, dev);
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
if (!dev->bar)
goto disable;
-
- dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap));
+ if (readl(&dev->bar->csts) == -1) {
+ result = -ENODEV;
+ goto unmap;
+ }
+ dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap));
dev->dbs = ((void __iomem *)dev->bar) + 4096;
return 0;
+ unmap:
+ iounmap(dev->bar);
+ dev->bar = NULL;
disable:
pci_release_regions(pdev);
disable_pci:
@@ -1980,37 +2049,183 @@ static void nvme_dev_unmap(struct nvme_dev *dev)
if (dev->bar) {
iounmap(dev->bar);
dev->bar = NULL;
+ pci_release_regions(dev->pci_dev);
}
- pci_release_regions(dev->pci_dev);
if (pci_is_enabled(dev->pci_dev))
pci_disable_device(dev->pci_dev);
}
+struct nvme_delq_ctx {
+ struct task_struct *waiter;
+ struct kthread_worker *worker;
+ atomic_t refcount;
+};
+
+static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
+{
+ dq->waiter = current;
+ mb();
+
+ for (;;) {
+ set_current_state(TASK_KILLABLE);
+ if (!atomic_read(&dq->refcount))
+ break;
+ if (!schedule_timeout(ADMIN_TIMEOUT) ||
+ fatal_signal_pending(current)) {
+ set_current_state(TASK_RUNNING);
+
+ nvme_disable_ctrl(dev, readq(&dev->bar->cap));
+ nvme_disable_queue(dev, 0);
+
+ send_sig(SIGKILL, dq->worker->task, 1);
+ flush_kthread_worker(dq->worker);
+ return;
+ }
+ }
+ set_current_state(TASK_RUNNING);
+}
+
+static void nvme_put_dq(struct nvme_delq_ctx *dq)
+{
+ atomic_dec(&dq->refcount);
+ if (dq->waiter)
+ wake_up_process(dq->waiter);
+}
+
+static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
+{
+ atomic_inc(&dq->refcount);
+ return dq;
+}
+
+static void nvme_del_queue_end(struct nvme_queue *nvmeq)
+{
+ struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
+
+ nvme_clear_queue(nvmeq);
+ nvme_put_dq(dq);
+}
+
+static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
+ kthread_work_func_t fn)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.delete_queue.opcode = opcode;
+ c.delete_queue.qid = cpu_to_le16(nvmeq->qid);
+
+ init_kthread_work(&nvmeq->cmdinfo.work, fn);
+ return nvme_submit_admin_cmd_async(nvmeq->dev, &c, &nvmeq->cmdinfo);
+}
+
+static void nvme_del_cq_work_handler(struct kthread_work *work)
+{
+ struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+ cmdinfo.work);
+ nvme_del_queue_end(nvmeq);
+}
+
+static int nvme_delete_cq(struct nvme_queue *nvmeq)
+{
+ return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq,
+ nvme_del_cq_work_handler);
+}
+
+static void nvme_del_sq_work_handler(struct kthread_work *work)
+{
+ struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+ cmdinfo.work);
+ int status = nvmeq->cmdinfo.status;
+
+ if (!status)
+ status = nvme_delete_cq(nvmeq);
+ if (status)
+ nvme_del_queue_end(nvmeq);
+}
+
+static int nvme_delete_sq(struct nvme_queue *nvmeq)
+{
+ return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq,
+ nvme_del_sq_work_handler);
+}
+
+static void nvme_del_queue_start(struct kthread_work *work)
+{
+ struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+ cmdinfo.work);
+ allow_signal(SIGKILL);
+ if (nvme_delete_sq(nvmeq))
+ nvme_del_queue_end(nvmeq);
+}
+
+static void nvme_disable_io_queues(struct nvme_dev *dev)
+{
+ int i;
+ DEFINE_KTHREAD_WORKER_ONSTACK(worker);
+ struct nvme_delq_ctx dq;
+ struct task_struct *kworker_task = kthread_run(kthread_worker_fn,
+ &worker, "nvme%d", dev->instance);
+
+ if (IS_ERR(kworker_task)) {
+ dev_err(&dev->pci_dev->dev,
+ "Failed to create queue del task\n");
+ for (i = dev->queue_count - 1; i > 0; i--)
+ nvme_disable_queue(dev, i);
+ return;
+ }
+
+ dq.waiter = NULL;
+ atomic_set(&dq.refcount, 0);
+ dq.worker = &worker;
+ for (i = dev->queue_count - 1; i > 0; i--) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+
+ if (nvme_suspend_queue(nvmeq))
+ continue;
+ nvmeq->cmdinfo.ctx = nvme_get_dq(&dq);
+ nvmeq->cmdinfo.worker = dq.worker;
+ init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start);
+ queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work);
+ }
+ nvme_wait_dq(&dq, dev);
+ kthread_stop(kworker_task);
+}
+
static void nvme_dev_shutdown(struct nvme_dev *dev)
{
int i;
- for (i = dev->queue_count - 1; i >= 0; i--)
- nvme_disable_queue(dev, i);
+ dev->initialized = 0;
spin_lock(&dev_list_lock);
list_del_init(&dev->node);
spin_unlock(&dev_list_lock);
- if (dev->bar)
+ if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
+ for (i = dev->queue_count - 1; i >= 0; i--) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+ nvme_suspend_queue(nvmeq);
+ nvme_clear_queue(nvmeq);
+ }
+ } else {
+ nvme_disable_io_queues(dev);
nvme_shutdown_ctrl(dev);
+ nvme_disable_queue(dev, 0);
+ }
nvme_dev_unmap(dev);
}
static void nvme_dev_remove(struct nvme_dev *dev)
{
- struct nvme_ns *ns, *next;
+ struct nvme_ns *ns;
- list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
- list_del(&ns->list);
- del_gendisk(ns->disk);
- nvme_ns_free(ns);
+ list_for_each_entry(ns, &dev->namespaces, list) {
+ if (ns->disk->flags & GENHD_FL_UP)
+ del_gendisk(ns->disk);
+ if (!blk_queue_dying(ns->queue))
+ blk_cleanup_queue(ns->queue);
}
}
@@ -2067,14 +2282,22 @@ static void nvme_release_instance(struct nvme_dev *dev)
spin_unlock(&dev_list_lock);
}
+static void nvme_free_namespaces(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns, *next;
+
+ list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+ list_del(&ns->list);
+ put_disk(ns->disk);
+ kfree(ns);
+ }
+}
+
static void nvme_free_dev(struct kref *kref)
{
struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
- nvme_dev_remove(dev);
- nvme_dev_shutdown(dev);
- nvme_free_queues(dev);
- nvme_release_instance(dev);
- nvme_release_prp_pools(dev);
+
+ nvme_free_namespaces(dev);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
@@ -2138,6 +2361,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
return result;
disable:
+ nvme_disable_queue(dev, 0);
spin_lock(&dev_list_lock);
list_del_init(&dev->node);
spin_unlock(&dev_list_lock);
@@ -2146,6 +2370,71 @@ static int nvme_dev_start(struct nvme_dev *dev)
return result;
}
+static int nvme_remove_dead_ctrl(void *arg)
+{
+ struct nvme_dev *dev = (struct nvme_dev *)arg;
+ struct pci_dev *pdev = dev->pci_dev;
+
+ if (pci_get_drvdata(pdev))
+ pci_stop_and_remove_bus_device(pdev);
+ kref_put(&dev->kref, nvme_free_dev);
+ return 0;
+}
+
+static void nvme_remove_disks(struct work_struct *ws)
+{
+ int i;
+ struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
+
+ nvme_dev_remove(dev);
+ spin_lock(&dev_list_lock);
+ for (i = dev->queue_count - 1; i > 0; i--) {
+ BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
+ nvme_free_queue(dev->queues[i]);
+ dev->queue_count--;
+ dev->queues[i] = NULL;
+ }
+ spin_unlock(&dev_list_lock);
+}
+
+static int nvme_dev_resume(struct nvme_dev *dev)
+{
+ int ret;
+
+ ret = nvme_dev_start(dev);
+ if (ret && ret != -EBUSY)
+ return ret;
+ if (ret == -EBUSY) {
+ spin_lock(&dev_list_lock);
+ PREPARE_WORK(&dev->reset_work, nvme_remove_disks);
+ queue_work(nvme_workq, &dev->reset_work);
+ spin_unlock(&dev_list_lock);
+ }
+ dev->initialized = 1;
+ return 0;
+}
+
+static void nvme_dev_reset(struct nvme_dev *dev)
+{
+ nvme_dev_shutdown(dev);
+ if (nvme_dev_resume(dev)) {
+ dev_err(&dev->pci_dev->dev, "Device failed to resume\n");
+ kref_get(&dev->kref);
+ if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
+ dev->instance))) {
+ dev_err(&dev->pci_dev->dev,
+ "Failed to start controller remove task\n");
+ kref_put(&dev->kref, nvme_free_dev);
+ }
+ }
+}
+
+static void nvme_reset_failed_dev(struct work_struct *ws)
+{
+ struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
+ nvme_dev_reset(dev);
+}
+
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int result = -ENOMEM;
@@ -2164,8 +2453,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto free;
INIT_LIST_HEAD(&dev->namespaces);
+ INIT_WORK(&dev->reset_work, nvme_reset_failed_dev);
dev->pci_dev = pdev;
-
+ pci_set_drvdata(pdev, dev);
result = nvme_set_instance(dev);
if (result)
goto free;
@@ -2181,6 +2471,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto release_pools;
}
+ kref_init(&dev->kref);
result = nvme_dev_add(dev);
if (result)
goto shutdown;
@@ -2195,15 +2486,16 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto remove;
- kref_init(&dev->kref);
+ dev->initialized = 1;
return 0;
remove:
nvme_dev_remove(dev);
+ nvme_free_namespaces(dev);
shutdown:
nvme_dev_shutdown(dev);
release_pools:
- nvme_free_queues(dev);
+ nvme_free_queues(dev, 0);
nvme_release_prp_pools(dev);
release:
nvme_release_instance(dev);
@@ -2214,10 +2506,28 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return result;
}
+static void nvme_shutdown(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+ nvme_dev_shutdown(dev);
+}
+
static void nvme_remove(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+ spin_lock(&dev_list_lock);
+ list_del_init(&dev->node);
+ spin_unlock(&dev_list_lock);
+
+ pci_set_drvdata(pdev, NULL);
+ flush_work(&dev->reset_work);
misc_deregister(&dev->miscdev);
+ nvme_dev_remove(dev);
+ nvme_dev_shutdown(dev);
+ nvme_free_queues(dev, 0);
+ nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
kref_put(&dev->kref, nvme_free_dev);
}
@@ -2241,13 +2551,12 @@ static int nvme_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct nvme_dev *ndev = pci_get_drvdata(pdev);
- int ret;
- ret = nvme_dev_start(ndev);
- /* XXX: should remove gendisks if resume fails */
- if (ret)
- nvme_free_queues(ndev);
- return ret;
+ if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) {
+ PREPARE_WORK(&ndev->reset_work, nvme_reset_failed_dev);
+ queue_work(nvme_workq, &ndev->reset_work);
+ }
+ return 0;
}
static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
@@ -2274,6 +2583,7 @@ static struct pci_driver nvme_driver = {
.id_table = nvme_id_table,
.probe = nvme_probe,
.remove = nvme_remove,
+ .shutdown = nvme_shutdown,
.driver = {
.pm = &nvme_dev_pm_ops,
},
@@ -2288,9 +2598,14 @@ static int __init nvme_init(void)
if (IS_ERR(nvme_thread))
return PTR_ERR(nvme_thread);
+ result = -ENOMEM;
+ nvme_workq = create_singlethread_workqueue("nvme");
+ if (!nvme_workq)
+ goto kill_kthread;
+
result = register_blkdev(nvme_major, "nvme");
if (result < 0)
- goto kill_kthread;
+ goto kill_workq;
else if (result > 0)
nvme_major = result;
@@ -2301,6 +2616,8 @@ static int __init nvme_init(void)
unregister_blkdev:
unregister_blkdev(nvme_major, "nvme");
+ kill_workq:
+ destroy_workqueue(nvme_workq);
kill_kthread:
kthread_stop(nvme_thread);
return result;
@@ -2310,6 +2627,7 @@ static void __exit nvme_exit(void)
{
pci_unregister_driver(&nvme_driver);
unregister_blkdev(nvme_major, "nvme");
+ destroy_workqueue(nvme_workq);
kthread_stop(nvme_thread);
}
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 4a4ff4eb8e23..4a0ceb64e269 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -25,6 +25,7 @@
#include <linux/bio.h>
#include <linux/bitops.h>
#include <linux/blkdev.h>
+#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
@@ -3038,6 +3039,152 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
return retcode;
}
+#ifdef CONFIG_COMPAT
+typedef struct sg_io_hdr32 {
+ compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
+ compat_int_t dxfer_direction; /* [i] data transfer direction */
+ unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */
+ unsigned char mx_sb_len; /* [i] max length to write to sbp */
+ unsigned short iovec_count; /* [i] 0 implies no scatter gather */
+ compat_uint_t dxfer_len; /* [i] byte count of data transfer */
+ compat_uint_t dxferp; /* [i], [*io] points to data transfer memory
+ or scatter gather list */
+ compat_uptr_t cmdp; /* [i], [*i] points to command to perform */
+ compat_uptr_t sbp; /* [i], [*o] points to sense_buffer memory */
+ compat_uint_t timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */
+ compat_uint_t flags; /* [i] 0 -> default, see SG_FLAG... */
+ compat_int_t pack_id; /* [i->o] unused internally (normally) */
+ compat_uptr_t usr_ptr; /* [i->o] unused internally */
+ unsigned char status; /* [o] scsi status */
+ unsigned char masked_status; /* [o] shifted, masked scsi status */
+ unsigned char msg_status; /* [o] messaging level data (optional) */
+ unsigned char sb_len_wr; /* [o] byte count actually written to sbp */
+ unsigned short host_status; /* [o] errors from host adapter */
+ unsigned short driver_status; /* [o] errors from software driver */
+ compat_int_t resid; /* [o] dxfer_len - actual_transferred */
+ compat_uint_t duration; /* [o] time taken by cmd (unit: millisec) */
+ compat_uint_t info; /* [o] auxiliary information */
+} sg_io_hdr32_t; /* 64 bytes long (on sparc32) */
+
+typedef struct sg_iovec32 {
+ compat_uint_t iov_base;
+ compat_uint_t iov_len;
+} sg_iovec32_t;
+
+static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iovec_count)
+{
+ sg_iovec_t __user *iov = (sg_iovec_t __user *) (sgio + 1);
+ sg_iovec32_t __user *iov32 = dxferp;
+ int i;
+
+ for (i = 0; i < iovec_count; i++) {
+ u32 base, len;
+
+ if (get_user(base, &iov32[i].iov_base) ||
+ get_user(len, &iov32[i].iov_len) ||
+ put_user(compat_ptr(base), &iov[i].iov_base) ||
+ put_user(len, &iov[i].iov_len))
+ return -EFAULT;
+ }
+
+ if (put_user(iov, &sgio->dxferp))
+ return -EFAULT;
+ return 0;
+}
+
+int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg)
+{
+ sg_io_hdr32_t __user *sgio32 = (sg_io_hdr32_t __user *)arg;
+ sg_io_hdr_t __user *sgio;
+ u16 iovec_count;
+ u32 data;
+ void __user *dxferp;
+ int err;
+ int interface_id;
+
+ if (get_user(interface_id, &sgio32->interface_id))
+ return -EFAULT;
+ if (interface_id != 'S')
+ return -EINVAL;
+
+ if (get_user(iovec_count, &sgio32->iovec_count))
+ return -EFAULT;
+
+ {
+ void __user *top = compat_alloc_user_space(0);
+ void __user *new = compat_alloc_user_space(sizeof(sg_io_hdr_t) +
+ (iovec_count * sizeof(sg_iovec_t)));
+ if (new > top)
+ return -EINVAL;
+
+ sgio = new;
+ }
+
+ /* Ok, now construct. */
+ if (copy_in_user(&sgio->interface_id, &sgio32->interface_id,
+ (2 * sizeof(int)) +
+ (2 * sizeof(unsigned char)) +
+ (1 * sizeof(unsigned short)) +
+ (1 * sizeof(unsigned int))))
+ return -EFAULT;
+
+ if (get_user(data, &sgio32->dxferp))
+ return -EFAULT;
+ dxferp = compat_ptr(data);
+ if (iovec_count) {
+ if (sg_build_iovec(sgio, dxferp, iovec_count))
+ return -EFAULT;
+ } else {
+ if (put_user(dxferp, &sgio->dxferp))
+ return -EFAULT;
+ }
+
+ {
+ unsigned char __user *cmdp;
+ unsigned char __user *sbp;
+
+ if (get_user(data, &sgio32->cmdp))
+ return -EFAULT;
+ cmdp = compat_ptr(data);
+
+ if (get_user(data, &sgio32->sbp))
+ return -EFAULT;
+ sbp = compat_ptr(data);
+
+ if (put_user(cmdp, &sgio->cmdp) ||
+ put_user(sbp, &sgio->sbp))
+ return -EFAULT;
+ }
+
+ if (copy_in_user(&sgio->timeout, &sgio32->timeout,
+ 3 * sizeof(int)))
+ return -EFAULT;
+
+ if (get_user(data, &sgio32->usr_ptr))
+ return -EFAULT;
+ if (put_user(compat_ptr(data), &sgio->usr_ptr))
+ return -EFAULT;
+
+ err = nvme_sg_io(ns, sgio);
+ if (err >= 0) {
+ void __user *datap;
+
+ if (copy_in_user(&sgio32->pack_id, &sgio->pack_id,
+ sizeof(int)) ||
+ get_user(datap, &sgio->usr_ptr) ||
+ put_user((u32)(unsigned long)datap,
+ &sgio32->usr_ptr) ||
+ copy_in_user(&sgio32->status, &sgio->status,
+ (4 * sizeof(unsigned char)) +
+ (2 * sizeof(unsigned short)) +
+ (3 * sizeof(int))))
+ err = -EFAULT;
+ }
+
+ return err;
+}
+#endif
+
int nvme_sg_get_version_num(int __user *ip)
{
return put_user(sg_version_num, ip);
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 4a27b1de5fcb..2ce3dfd7e6b9 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -581,7 +581,7 @@ static ssize_t pg_write(struct file *filp, const char __user *buf, size_t count,
if (hdr.magic != PG_MAGIC)
return -EINVAL;
- if (hdr.dlen > PG_MAX_DATA)
+ if (hdr.dlen < 0 || hdr.dlen > PG_MAX_DATA)
return -EINVAL;
if ((count - hs) > PG_MAX_DATA)
return -EINVAL;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index ff8668c5efb1..a2af73db187b 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -651,7 +651,7 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
for (;;) {
tmp = rb_entry(n, struct pkt_rb_node, rb_node);
- if (s <= tmp->bio->bi_sector)
+ if (s <= tmp->bio->bi_iter.bi_sector)
next = n->rb_left;
else
next = n->rb_right;
@@ -660,12 +660,12 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
n = next;
}
- if (s > tmp->bio->bi_sector) {
+ if (s > tmp->bio->bi_iter.bi_sector) {
tmp = pkt_rbtree_next(tmp);
if (!tmp)
return NULL;
}
- BUG_ON(s > tmp->bio->bi_sector);
+ BUG_ON(s > tmp->bio->bi_iter.bi_sector);
return tmp;
}
@@ -676,13 +676,13 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
{
struct rb_node **p = &pd->bio_queue.rb_node;
struct rb_node *parent = NULL;
- sector_t s = node->bio->bi_sector;
+ sector_t s = node->bio->bi_iter.bi_sector;
struct pkt_rb_node *tmp;
while (*p) {
parent = *p;
tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
- if (s < tmp->bio->bi_sector)
+ if (s < tmp->bio->bi_iter.bi_sector)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
@@ -706,7 +706,9 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
WRITE : READ, __GFP_WAIT);
if (cgc->buflen) {
- if (blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, __GFP_WAIT))
+ ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
+ __GFP_WAIT);
+ if (ret)
goto out;
}
@@ -857,7 +859,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
spin_lock(&pd->iosched.lock);
bio = bio_list_peek(&pd->iosched.write_queue);
spin_unlock(&pd->iosched.lock);
- if (bio && (bio->bi_sector == pd->iosched.last_write))
+ if (bio && (bio->bi_iter.bi_sector ==
+ pd->iosched.last_write))
need_write_seek = 0;
if (need_write_seek && reads_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
@@ -888,7 +891,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
continue;
if (bio_data_dir(bio) == READ)
- pd->iosched.successive_reads += bio->bi_size >> 10;
+ pd->iosched.successive_reads +=
+ bio->bi_iter.bi_size >> 10;
else {
pd->iosched.successive_reads = 0;
pd->iosched.last_write = bio_end_sector(bio);
@@ -978,7 +982,7 @@ static void pkt_end_io_read(struct bio *bio, int err)
pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
bio, (unsigned long long)pkt->sector,
- (unsigned long long)bio->bi_sector, err);
+ (unsigned long long)bio->bi_iter.bi_sector, err);
if (err)
atomic_inc(&pkt->io_errors);
@@ -1026,8 +1030,9 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
memset(written, 0, sizeof(written));
spin_lock(&pkt->lock);
bio_list_for_each(bio, &pkt->orig_bios) {
- int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
- int num_frames = bio->bi_size / CD_FRAMESIZE;
+ int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
+ (CD_FRAMESIZE >> 9);
+ int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
BUG_ON(first_frame < 0);
BUG_ON(first_frame + num_frames > pkt->frames);
@@ -1053,7 +1058,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
bio = pkt->r_bios[f];
bio_reset(bio);
- bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
+ bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
bio->bi_bdev = pd->bdev;
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
@@ -1150,8 +1155,8 @@ static int pkt_start_recovery(struct packet_data *pkt)
bio_reset(pkt->bio);
pkt->bio->bi_bdev = pd->bdev;
pkt->bio->bi_rw = REQ_WRITE;
- pkt->bio->bi_sector = new_sector;
- pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE;
+ pkt->bio->bi_iter.bi_sector = new_sector;
+ pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
pkt->bio->bi_vcnt = pkt->frames;
pkt->bio->bi_end_io = pkt_end_io_packet_write;
@@ -1213,7 +1218,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
node = first_node;
while (node) {
bio = node->bio;
- zone = get_zone(bio->bi_sector, pd);
+ zone = get_zone(bio->bi_iter.bi_sector, pd);
list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
if (p->sector == zone) {
bio = NULL;
@@ -1252,14 +1257,14 @@ try_next_bio:
pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
bio = node->bio;
- pkt_dbg(2, pd, "found zone=%llx\n",
- (unsigned long long)get_zone(bio->bi_sector, pd));
- if (get_zone(bio->bi_sector, pd) != zone)
+ pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
+ get_zone(bio->bi_iter.bi_sector, pd));
+ if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
break;
pkt_rbtree_erase(pd, node);
spin_lock(&pkt->lock);
bio_list_add(&pkt->orig_bios, bio);
- pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+ pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
spin_unlock(&pkt->lock);
}
/* check write congestion marks, and if bio_queue_size is
@@ -1293,7 +1298,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
bio_reset(pkt->w_bio);
- pkt->w_bio->bi_sector = pkt->sector;
+ pkt->w_bio->bi_iter.bi_sector = pkt->sector;
pkt->w_bio->bi_bdev = pd->bdev;
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
@@ -2335,75 +2340,29 @@ static void pkt_end_io_read_cloned(struct bio *bio, int err)
pkt_bio_finished(pd);
}
-static void pkt_make_request(struct request_queue *q, struct bio *bio)
+static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
{
- struct pktcdvd_device *pd;
- char b[BDEVNAME_SIZE];
+ struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
+ struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
+
+ psd->pd = pd;
+ psd->bio = bio;
+ cloned_bio->bi_bdev = pd->bdev;
+ cloned_bio->bi_private = psd;
+ cloned_bio->bi_end_io = pkt_end_io_read_cloned;
+ pd->stats.secs_r += bio_sectors(bio);
+ pkt_queue_bio(pd, cloned_bio);
+}
+
+static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
+{
+ struct pktcdvd_device *pd = q->queuedata;
sector_t zone;
struct packet_data *pkt;
int was_empty, blocked_bio;
struct pkt_rb_node *node;
- pd = q->queuedata;
- if (!pd) {
- pr_err("%s incorrect request queue\n",
- bdevname(bio->bi_bdev, b));
- goto end_io;
- }
-
- /*
- * Clone READ bios so we can have our own bi_end_io callback.
- */
- if (bio_data_dir(bio) == READ) {
- struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
- struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
-
- psd->pd = pd;
- psd->bio = bio;
- cloned_bio->bi_bdev = pd->bdev;
- cloned_bio->bi_private = psd;
- cloned_bio->bi_end_io = pkt_end_io_read_cloned;
- pd->stats.secs_r += bio_sectors(bio);
- pkt_queue_bio(pd, cloned_bio);
- return;
- }
-
- if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
- pkt_notice(pd, "WRITE for ro device (%llu)\n",
- (unsigned long long)bio->bi_sector);
- goto end_io;
- }
-
- if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
- pkt_err(pd, "wrong bio size\n");
- goto end_io;
- }
-
- blk_queue_bounce(q, &bio);
-
- zone = get_zone(bio->bi_sector, pd);
- pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
- (unsigned long long)bio->bi_sector,
- (unsigned long long)bio_end_sector(bio));
-
- /* Check if we have to split the bio */
- {
- struct bio_pair *bp;
- sector_t last_zone;
- int first_sectors;
-
- last_zone = get_zone(bio_end_sector(bio) - 1, pd);
- if (last_zone != zone) {
- BUG_ON(last_zone != zone + pd->settings.size);
- first_sectors = last_zone - bio->bi_sector;
- bp = bio_split(bio, first_sectors);
- BUG_ON(!bp);
- pkt_make_request(q, &bp->bio1);
- pkt_make_request(q, &bp->bio2);
- bio_pair_release(bp);
- return;
- }
- }
+ zone = get_zone(bio->bi_iter.bi_sector, pd);
/*
* If we find a matching packet in state WAITING or READ_WAIT, we can
@@ -2417,7 +2376,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
if ((pkt->state == PACKET_WAITING_STATE) ||
(pkt->state == PACKET_READ_WAIT_STATE)) {
bio_list_add(&pkt->orig_bios, bio);
- pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+ pkt->write_size +=
+ bio->bi_iter.bi_size / CD_FRAMESIZE;
if ((pkt->write_size >= pkt->frames) &&
(pkt->state == PACKET_WAITING_STATE)) {
atomic_inc(&pkt->run_sm);
@@ -2476,6 +2436,64 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
*/
wake_up(&pd->wqueue);
}
+}
+
+static void pkt_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct pktcdvd_device *pd;
+ char b[BDEVNAME_SIZE];
+ struct bio *split;
+
+ pd = q->queuedata;
+ if (!pd) {
+ pr_err("%s incorrect request queue\n",
+ bdevname(bio->bi_bdev, b));
+ goto end_io;
+ }
+
+ pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
+ (unsigned long long)bio->bi_iter.bi_sector,
+ (unsigned long long)bio_end_sector(bio));
+
+ /*
+ * Clone READ bios so we can have our own bi_end_io callback.
+ */
+ if (bio_data_dir(bio) == READ) {
+ pkt_make_request_read(pd, bio);
+ return;
+ }
+
+ if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
+ pkt_notice(pd, "WRITE for ro device (%llu)\n",
+ (unsigned long long)bio->bi_iter.bi_sector);
+ goto end_io;
+ }
+
+ if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
+ pkt_err(pd, "wrong bio size\n");
+ goto end_io;
+ }
+
+ blk_queue_bounce(q, &bio);
+
+ do {
+ sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
+ sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
+
+ if (last_zone != zone) {
+ BUG_ON(last_zone != zone + pd->settings.size);
+
+ split = bio_split(bio, last_zone -
+ bio->bi_iter.bi_sector,
+ GFP_NOIO, fs_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
+
+ pkt_make_request_write(q, split);
+ } while (split != bio);
+
return;
end_io:
bio_io_error(bio);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index d754a88d7585..c120d70d3fb3 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -94,26 +94,25 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
{
unsigned int offset = 0;
struct req_iterator iter;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
unsigned int i = 0;
size_t size;
void *buf;
rq_for_each_segment(bvec, req, iter) {
unsigned long flags;
- dev_dbg(&dev->sbd.core,
- "%s:%u: bio %u: %u segs %u sectors from %lu\n",
- __func__, __LINE__, i, bio_segments(iter.bio),
- bio_sectors(iter.bio), iter.bio->bi_sector);
+ dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n",
+ __func__, __LINE__, i, bio_sectors(iter.bio),
+ iter.bio->bi_iter.bi_sector);
- size = bvec->bv_len;
- buf = bvec_kmap_irq(bvec, &flags);
+ size = bvec.bv_len;
+ buf = bvec_kmap_irq(&bvec, &flags);
if (gather)
memcpy(dev->bounce_buf+offset, buf, size);
else
memcpy(buf, dev->bounce_buf+offset, size);
offset += size;
- flush_kernel_dcache_page(bvec->bv_page);
+ flush_kernel_dcache_page(bvec.bv_page);
bvec_kunmap_irq(buf, &flags);
i++;
}
@@ -130,7 +129,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
#ifdef DEBUG
unsigned int n = 0;
- struct bio_vec *bv;
+ struct bio_vec bv;
struct req_iterator iter;
rq_for_each_segment(bv, req, iter)
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 06a2e53e5f37..ef45cfb98fd2 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -553,16 +553,16 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
int write = bio_data_dir(bio) == WRITE;
const char *op = write ? "write" : "read";
- loff_t offset = bio->bi_sector << 9;
+ loff_t offset = bio->bi_iter.bi_sector << 9;
int error = 0;
- struct bio_vec *bvec;
- unsigned int i;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
struct bio *next;
- bio_for_each_segment(bvec, bio, i) {
+ bio_for_each_segment(bvec, bio, iter) {
/* PS3 is ppc64, so we don't handle highmem */
- char *ptr = page_address(bvec->bv_page) + bvec->bv_offset;
- size_t len = bvec->bv_len, retlen;
+ char *ptr = page_address(bvec.bv_page) + bvec.bv_offset;
+ size_t len = bvec.bv_len, retlen;
dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op,
len, offset);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index cb1db2979d3d..b365e0dfccb6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -41,6 +41,7 @@
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
+#include <linux/idr.h>
#include "rbd_types.h"
@@ -89,9 +90,9 @@ static int atomic_dec_return_safe(atomic_t *v)
}
#define RBD_DRV_NAME "rbd"
-#define RBD_DRV_NAME_LONG "rbd (rados block device)"
-#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
+#define RBD_MINORS_PER_MAJOR 256
+#define RBD_SINGLE_MAJOR_PART_SHIFT 4
#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
#define RBD_MAX_SNAP_NAME_LEN \
@@ -323,6 +324,7 @@ struct rbd_device {
int dev_id; /* blkdev unique id */
int major; /* blkdev assigned major */
+ int minor;
struct gendisk *disk; /* blkdev's gendisk and rq */
u32 image_format; /* Either 1 or 2 */
@@ -386,6 +388,17 @@ static struct kmem_cache *rbd_img_request_cache;
static struct kmem_cache *rbd_obj_request_cache;
static struct kmem_cache *rbd_segment_name_cache;
+static int rbd_major;
+static DEFINE_IDA(rbd_dev_id_ida);
+
+/*
+ * Default to false for now, as single-major requires >= 0.75 version of
+ * userspace rbd utility.
+ */
+static bool single_major = false;
+module_param(single_major, bool, S_IRUGO);
+MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
+
static int rbd_img_request_submit(struct rbd_img_request *img_request);
static void rbd_dev_device_release(struct device *dev);
@@ -394,18 +407,52 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf,
size_t count);
static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
size_t count);
+static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
+ size_t count);
+static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
+ size_t count);
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
static void rbd_spec_put(struct rbd_spec *spec);
+static int rbd_dev_id_to_minor(int dev_id)
+{
+ return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
+}
+
+static int minor_to_rbd_dev_id(int minor)
+{
+ return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
+}
+
static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
+static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
+static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
static struct attribute *rbd_bus_attrs[] = {
&bus_attr_add.attr,
&bus_attr_remove.attr,
+ &bus_attr_add_single_major.attr,
+ &bus_attr_remove_single_major.attr,
NULL,
};
-ATTRIBUTE_GROUPS(rbd_bus);
+
+static umode_t rbd_bus_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ if (!single_major &&
+ (attr == &bus_attr_add_single_major.attr ||
+ attr == &bus_attr_remove_single_major.attr))
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group rbd_bus_group = {
+ .attrs = rbd_bus_attrs,
+ .is_visible = rbd_bus_is_visible,
+};
+__ATTRIBUTE_GROUPS(rbd_bus);
static struct bus_type rbd_bus_type = {
.name = "rbd",
@@ -1041,9 +1088,9 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
name_format = "%s.%012llx";
if (rbd_dev->image_format == 2)
name_format = "%s.%016llx";
- ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
+ ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
rbd_dev->header.object_prefix, segment);
- if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
+ if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
pr_err("error formatting segment name for #%llu (%d)\n",
segment, ret);
kfree(name);
@@ -1109,23 +1156,23 @@ static void bio_chain_put(struct bio *chain)
*/
static void zero_bio_chain(struct bio *chain, int start_ofs)
{
- struct bio_vec *bv;
+ struct bio_vec bv;
+ struct bvec_iter iter;
unsigned long flags;
void *buf;
- int i;
int pos = 0;
while (chain) {
- bio_for_each_segment(bv, chain, i) {
- if (pos + bv->bv_len > start_ofs) {
+ bio_for_each_segment(bv, chain, iter) {
+ if (pos + bv.bv_len > start_ofs) {
int remainder = max(start_ofs - pos, 0);
- buf = bvec_kmap_irq(bv, &flags);
+ buf = bvec_kmap_irq(&bv, &flags);
memset(buf + remainder, 0,
- bv->bv_len - remainder);
- flush_dcache_page(bv->bv_page);
+ bv.bv_len - remainder);
+ flush_dcache_page(bv.bv_page);
bvec_kunmap_irq(buf, &flags);
}
- pos += bv->bv_len;
+ pos += bv.bv_len;
}
chain = chain->bi_next;
@@ -1173,74 +1220,14 @@ static struct bio *bio_clone_range(struct bio *bio_src,
unsigned int len,
gfp_t gfpmask)
{
- struct bio_vec *bv;
- unsigned int resid;
- unsigned short idx;
- unsigned int voff;
- unsigned short end_idx;
- unsigned short vcnt;
struct bio *bio;
- /* Handle the easy case for the caller */
-
- if (!offset && len == bio_src->bi_size)
- return bio_clone(bio_src, gfpmask);
-
- if (WARN_ON_ONCE(!len))
- return NULL;
- if (WARN_ON_ONCE(len > bio_src->bi_size))
- return NULL;
- if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
- return NULL;
-
- /* Find first affected segment... */
-
- resid = offset;
- bio_for_each_segment(bv, bio_src, idx) {
- if (resid < bv->bv_len)
- break;
- resid -= bv->bv_len;
- }
- voff = resid;
-
- /* ...and the last affected segment */
-
- resid += len;
- __bio_for_each_segment(bv, bio_src, end_idx, idx) {
- if (resid <= bv->bv_len)
- break;
- resid -= bv->bv_len;
- }
- vcnt = end_idx - idx + 1;
-
- /* Build the clone */
-
- bio = bio_alloc(gfpmask, (unsigned int) vcnt);
+ bio = bio_clone(bio_src, gfpmask);
if (!bio)
return NULL; /* ENOMEM */
- bio->bi_bdev = bio_src->bi_bdev;
- bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
- bio->bi_rw = bio_src->bi_rw;
- bio->bi_flags |= 1 << BIO_CLONED;
-
- /*
- * Copy over our part of the bio_vec, then update the first
- * and last (or only) entries.
- */
- memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
- vcnt * sizeof (struct bio_vec));
- bio->bi_io_vec[0].bv_offset += voff;
- if (vcnt > 1) {
- bio->bi_io_vec[0].bv_len -= voff;
- bio->bi_io_vec[vcnt - 1].bv_len = resid;
- } else {
- bio->bi_io_vec[0].bv_len = len;
- }
-
- bio->bi_vcnt = vcnt;
- bio->bi_size = len;
- bio->bi_idx = 0;
+ bio_advance(bio, offset);
+ bio->bi_iter.bi_size = len;
return bio;
}
@@ -1271,7 +1258,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
/* Build up a chain of clone bios up to the limit */
- if (!bi || off >= bi->bi_size || !len)
+ if (!bi || off >= bi->bi_iter.bi_size || !len)
return NULL; /* Nothing to clone */
end = &chain;
@@ -1283,7 +1270,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
rbd_warn(NULL, "bio_chain exhausted with %u left", len);
goto out_err; /* EINVAL; ran out of bio's */
}
- bi_size = min_t(unsigned int, bi->bi_size - off, len);
+ bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
bio = bio_clone_range(bi, off, bi_size, gfpmask);
if (!bio)
goto out_err; /* ENOMEM */
@@ -1292,7 +1279,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
end = &bio->bi_next;
off += bi_size;
- if (off == bi->bi_size) {
+ if (off == bi->bi_iter.bi_size) {
bi = bi->bi_next;
off = 0;
}
@@ -1761,11 +1748,8 @@ static struct ceph_osd_request *rbd_osd_req_create(
osd_req->r_callback = rbd_osd_req_callback;
osd_req->r_priv = obj_request;
- osd_req->r_oid_len = strlen(obj_request->object_name);
- rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
- memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
-
- osd_req->r_file_layout = rbd_dev->layout; /* struct */
+ osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
+ ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
return osd_req;
}
@@ -1802,11 +1786,8 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
osd_req->r_callback = rbd_osd_req_callback;
osd_req->r_priv = obj_request;
- osd_req->r_oid_len = strlen(obj_request->object_name);
- rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
- memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
-
- osd_req->r_file_layout = rbd_dev->layout; /* struct */
+ osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
+ ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
return osd_req;
}
@@ -2186,7 +2167,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
if (type == OBJ_REQUEST_BIO) {
bio_list = data_desc;
- rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
+ rbd_assert(img_offset ==
+ bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
} else {
rbd_assert(type == OBJ_REQUEST_PAGES);
pages = data_desc;
@@ -2866,7 +2848,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
* Request sync osd watch/unwatch. The value of "start" determines
* whether a watch request is being initiated or torn down.
*/
-static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
+static int __rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
@@ -2941,6 +2923,22 @@ out_cancel:
return ret;
}
+static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
+{
+ return __rbd_dev_header_watch_sync(rbd_dev, true);
+}
+
+static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
+{
+ int ret;
+
+ ret = __rbd_dev_header_watch_sync(rbd_dev, false);
+ if (ret) {
+ rbd_warn(rbd_dev, "unable to tear down watch request: %d\n",
+ ret);
+ }
+}
+
/*
* Synchronous osd object method call. Returns the number of bytes
* returned in the outbound buffer, or a negative error code.
@@ -3388,14 +3386,18 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
u64 segment_size;
/* create gendisk info */
- disk = alloc_disk(RBD_MINORS_PER_MAJOR);
+ disk = alloc_disk(single_major ?
+ (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
+ RBD_MINORS_PER_MAJOR);
if (!disk)
return -ENOMEM;
snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
rbd_dev->dev_id);
disk->major = rbd_dev->major;
- disk->first_minor = 0;
+ disk->first_minor = rbd_dev->minor;
+ if (single_major)
+ disk->flags |= GENHD_FL_EXT_DEVT;
disk->fops = &rbd_bd_ops;
disk->private_data = rbd_dev;
@@ -3467,7 +3469,14 @@ static ssize_t rbd_major_show(struct device *dev,
return sprintf(buf, "%d\n", rbd_dev->major);
return sprintf(buf, "(none)\n");
+}
+
+static ssize_t rbd_minor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+ return sprintf(buf, "%d\n", rbd_dev->minor);
}
static ssize_t rbd_client_id_show(struct device *dev,
@@ -3589,6 +3598,7 @@ static ssize_t rbd_image_refresh(struct device *dev,
static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
+static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
@@ -3602,6 +3612,7 @@ static struct attribute *rbd_attrs[] = {
&dev_attr_size.attr,
&dev_attr_features.attr,
&dev_attr_major.attr,
+ &dev_attr_minor.attr,
&dev_attr_client_id.attr,
&dev_attr_pool.attr,
&dev_attr_pool_id.attr,
@@ -4372,21 +4383,29 @@ static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
device_unregister(&rbd_dev->dev);
}
-static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
-
/*
* Get a unique rbd identifier for the given new rbd_dev, and add
- * the rbd_dev to the global list. The minimum rbd id is 1.
+ * the rbd_dev to the global list.
*/
-static void rbd_dev_id_get(struct rbd_device *rbd_dev)
+static int rbd_dev_id_get(struct rbd_device *rbd_dev)
{
- rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
+ int new_dev_id;
+
+ new_dev_id = ida_simple_get(&rbd_dev_id_ida,
+ 0, minor_to_rbd_dev_id(1 << MINORBITS),
+ GFP_KERNEL);
+ if (new_dev_id < 0)
+ return new_dev_id;
+
+ rbd_dev->dev_id = new_dev_id;
spin_lock(&rbd_dev_list_lock);
list_add_tail(&rbd_dev->node, &rbd_dev_list);
spin_unlock(&rbd_dev_list_lock);
- dout("rbd_dev %p given dev id %llu\n", rbd_dev,
- (unsigned long long) rbd_dev->dev_id);
+
+ dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
+
+ return 0;
}
/*
@@ -4395,49 +4414,13 @@ static void rbd_dev_id_get(struct rbd_device *rbd_dev)
*/
static void rbd_dev_id_put(struct rbd_device *rbd_dev)
{
- struct list_head *tmp;
- int rbd_id = rbd_dev->dev_id;
- int max_id;
-
- rbd_assert(rbd_id > 0);
-
- dout("rbd_dev %p released dev id %llu\n", rbd_dev,
- (unsigned long long) rbd_dev->dev_id);
spin_lock(&rbd_dev_list_lock);
list_del_init(&rbd_dev->node);
-
- /*
- * If the id being "put" is not the current maximum, there
- * is nothing special we need to do.
- */
- if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
- spin_unlock(&rbd_dev_list_lock);
- return;
- }
-
- /*
- * We need to update the current maximum id. Search the
- * list to find out what it is. We're more likely to find
- * the maximum at the end, so search the list backward.
- */
- max_id = 0;
- list_for_each_prev(tmp, &rbd_dev_list) {
- struct rbd_device *rbd_dev;
-
- rbd_dev = list_entry(tmp, struct rbd_device, node);
- if (rbd_dev->dev_id > max_id)
- max_id = rbd_dev->dev_id;
- }
spin_unlock(&rbd_dev_list_lock);
- /*
- * The max id could have been updated by rbd_dev_id_get(), in
- * which case it now accurately reflects the new maximum.
- * Be careful not to overwrite the maximum value in that
- * case.
- */
- atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
- dout(" max dev id has been reset\n");
+ ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
+
+ dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
}
/*
@@ -4860,20 +4843,29 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
{
int ret;
- /* generate unique id: find highest unique id, add one */
- rbd_dev_id_get(rbd_dev);
+ /* Get an id and fill in device name. */
+
+ ret = rbd_dev_id_get(rbd_dev);
+ if (ret)
+ return ret;
- /* Fill in the device name, now that we have its id. */
BUILD_BUG_ON(DEV_NAME_LEN
< sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
- /* Get our block major device number. */
+ /* Record our major and minor device numbers. */
- ret = register_blkdev(0, rbd_dev->name);
- if (ret < 0)
- goto err_out_id;
- rbd_dev->major = ret;
+ if (!single_major) {
+ ret = register_blkdev(0, rbd_dev->name);
+ if (ret < 0)
+ goto err_out_id;
+
+ rbd_dev->major = ret;
+ rbd_dev->minor = 0;
+ } else {
+ rbd_dev->major = rbd_major;
+ rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
+ }
/* Set up the blkdev mapping. */
@@ -4905,7 +4897,8 @@ err_out_mapping:
err_out_disk:
rbd_free_disk(rbd_dev);
err_out_blkdev:
- unregister_blkdev(rbd_dev->major, rbd_dev->name);
+ if (!single_major)
+ unregister_blkdev(rbd_dev->major, rbd_dev->name);
err_out_id:
rbd_dev_id_put(rbd_dev);
rbd_dev_mapping_clear(rbd_dev);
@@ -4961,7 +4954,6 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
{
int ret;
- int tmp;
/*
* Get the id from the image id object. Unless there's an
@@ -4980,7 +4972,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
goto err_out_format;
if (mapping) {
- ret = rbd_dev_header_watch_sync(rbd_dev, true);
+ ret = rbd_dev_header_watch_sync(rbd_dev);
if (ret)
goto out_header_name;
}
@@ -5007,12 +4999,8 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
err_out_probe:
rbd_dev_unprobe(rbd_dev);
err_out_watch:
- if (mapping) {
- tmp = rbd_dev_header_watch_sync(rbd_dev, false);
- if (tmp)
- rbd_warn(rbd_dev, "unable to tear down "
- "watch request (%d)\n", tmp);
- }
+ if (mapping)
+ rbd_dev_header_unwatch_sync(rbd_dev);
out_header_name:
kfree(rbd_dev->header_name);
rbd_dev->header_name = NULL;
@@ -5026,9 +5014,9 @@ err_out_format:
return ret;
}
-static ssize_t rbd_add(struct bus_type *bus,
- const char *buf,
- size_t count)
+static ssize_t do_rbd_add(struct bus_type *bus,
+ const char *buf,
+ size_t count)
{
struct rbd_device *rbd_dev = NULL;
struct ceph_options *ceph_opts = NULL;
@@ -5090,6 +5078,12 @@ static ssize_t rbd_add(struct bus_type *bus,
rc = rbd_dev_device_setup(rbd_dev);
if (rc) {
+ /*
+ * rbd_dev_header_unwatch_sync() can't be moved into
+ * rbd_dev_image_release() without refactoring, see
+ * commit 1f3ef78861ac.
+ */
+ rbd_dev_header_unwatch_sync(rbd_dev);
rbd_dev_image_release(rbd_dev);
goto err_out_module;
}
@@ -5110,6 +5104,23 @@ err_out_module:
return (ssize_t)rc;
}
+static ssize_t rbd_add(struct bus_type *bus,
+ const char *buf,
+ size_t count)
+{
+ if (single_major)
+ return -EINVAL;
+
+ return do_rbd_add(bus, buf, count);
+}
+
+static ssize_t rbd_add_single_major(struct bus_type *bus,
+ const char *buf,
+ size_t count)
+{
+ return do_rbd_add(bus, buf, count);
+}
+
static void rbd_dev_device_release(struct device *dev)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
@@ -5117,8 +5128,8 @@ static void rbd_dev_device_release(struct device *dev)
rbd_free_disk(rbd_dev);
clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
rbd_dev_mapping_clear(rbd_dev);
- unregister_blkdev(rbd_dev->major, rbd_dev->name);
- rbd_dev->major = 0;
+ if (!single_major)
+ unregister_blkdev(rbd_dev->major, rbd_dev->name);
rbd_dev_id_put(rbd_dev);
rbd_dev_mapping_clear(rbd_dev);
}
@@ -5149,9 +5160,9 @@ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
}
}
-static ssize_t rbd_remove(struct bus_type *bus,
- const char *buf,
- size_t count)
+static ssize_t do_rbd_remove(struct bus_type *bus,
+ const char *buf,
+ size_t count)
{
struct rbd_device *rbd_dev = NULL;
struct list_head *tmp;
@@ -5191,16 +5202,14 @@ static ssize_t rbd_remove(struct bus_type *bus,
if (ret < 0 || already)
return ret;
- ret = rbd_dev_header_watch_sync(rbd_dev, false);
- if (ret)
- rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
-
+ rbd_dev_header_unwatch_sync(rbd_dev);
/*
* flush remaining watch callbacks - these must be complete
* before the osd_client is shutdown
*/
dout("%s: flushing notifies", __func__);
ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
+
/*
* Don't free anything from rbd_dev->disk until after all
* notifies are completely processed. Otherwise
@@ -5214,6 +5223,23 @@ static ssize_t rbd_remove(struct bus_type *bus,
return count;
}
+static ssize_t rbd_remove(struct bus_type *bus,
+ const char *buf,
+ size_t count)
+{
+ if (single_major)
+ return -EINVAL;
+
+ return do_rbd_remove(bus, buf, count);
+}
+
+static ssize_t rbd_remove_single_major(struct bus_type *bus,
+ const char *buf,
+ size_t count)
+{
+ return do_rbd_remove(bus, buf, count);
+}
+
/*
* create control files in sysfs
* /sys/bus/rbd/...
@@ -5259,7 +5285,7 @@ static int rbd_slab_init(void)
rbd_assert(!rbd_segment_name_cache);
rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
- MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
+ CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
if (rbd_segment_name_cache)
return 0;
out_err:
@@ -5295,24 +5321,45 @@ static int __init rbd_init(void)
if (!libceph_compatible(NULL)) {
rbd_warn(NULL, "libceph incompatibility (quitting)");
-
return -EINVAL;
}
+
rc = rbd_slab_init();
if (rc)
return rc;
+
+ if (single_major) {
+ rbd_major = register_blkdev(0, RBD_DRV_NAME);
+ if (rbd_major < 0) {
+ rc = rbd_major;
+ goto err_out_slab;
+ }
+ }
+
rc = rbd_sysfs_init();
if (rc)
- rbd_slab_exit();
+ goto err_out_blkdev;
+
+ if (single_major)
+ pr_info("loaded (major %d)\n", rbd_major);
else
- pr_info("loaded " RBD_DRV_NAME_LONG "\n");
+ pr_info("loaded\n");
+
+ return 0;
+err_out_blkdev:
+ if (single_major)
+ unregister_blkdev(rbd_major, RBD_DRV_NAME);
+err_out_slab:
+ rbd_slab_exit();
return rc;
}
static void __exit rbd_exit(void)
{
rbd_sysfs_cleanup();
+ if (single_major)
+ unregister_blkdev(rbd_major, RBD_DRV_NAME);
rbd_slab_exit();
}
@@ -5322,9 +5369,8 @@ module_exit(rbd_exit);
MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
-MODULE_DESCRIPTION("rados block device");
-
/* following authorship retained from original osdblk.c */
MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
+MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 2284f5d3a54a..2839d37e5af7 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -174,7 +174,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
if (!card)
goto req_err;
- if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk))
+ if (bio_end_sector(bio) > get_capacity(card->gendisk))
goto req_err;
if (unlikely(card->halt)) {
@@ -187,7 +187,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
goto req_err;
}
- if (bio->bi_size == 0) {
+ if (bio->bi_iter.bi_size == 0) {
dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
goto req_err;
}
@@ -208,7 +208,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
bio_data_dir(bio) ? 'W' : 'R', bio_meta,
- (u64)bio->bi_sector << 9, bio->bi_size);
+ (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
bio_dma_done_cb, bio_meta);
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index fc88ba3e1bd2..cf8cd293abb5 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -684,7 +684,8 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
void *cb_data)
{
struct list_head dma_list[RSXX_MAX_TARGETS];
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
unsigned long long addr8;
unsigned int laddr;
unsigned int bv_len;
@@ -696,7 +697,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
int st;
int i;
- addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */
+ addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
atomic_set(n_dmas, 0);
for (i = 0; i < card->n_targets; i++) {
@@ -705,7 +706,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
}
if (bio->bi_rw & REQ_DISCARD) {
- bv_len = bio->bi_size;
+ bv_len = bio->bi_iter.bi_size;
while (bv_len > 0) {
tgt = rsxx_get_dma_tgt(card, addr8);
@@ -722,9 +723,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
bv_len -= RSXX_HW_BLK_SIZE;
}
} else {
- bio_for_each_segment(bvec, bio, i) {
- bv_len = bvec->bv_len;
- bv_off = bvec->bv_offset;
+ bio_for_each_segment(bvec, bio, iter) {
+ bv_len = bvec.bv_len;
+ bv_off = bvec.bv_offset;
while (bv_len > 0) {
tgt = rsxx_get_dma_tgt(card, addr8);
@@ -736,7 +737,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
st = rsxx_queue_dma(card, &dma_list[tgt],
bio_data_dir(bio),
dma_off, dma_len,
- laddr, bvec->bv_page,
+ laddr, bvec.bv_page,
bv_off, cb, cb_data);
if (st)
goto bvec_err;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 9199c93be926..eb6e1e0e8db2 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -5269,7 +5269,7 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state)
}
}
-const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
+static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
{
switch (state) {
case SKD_MSG_STATE_IDLE:
@@ -5281,7 +5281,7 @@ const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
}
}
-const char *skd_skreq_state_to_str(enum skd_req_state state)
+static const char *skd_skreq_state_to_str(enum skd_req_state state)
{
switch (state) {
case SKD_REQ_STATE_IDLE:
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 3fb6ab4c8b4e..d5e2d12b9d9e 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -1744,20 +1744,6 @@ static void carm_remove_one (struct pci_dev *pdev)
kfree(host);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
-static int __init carm_init(void)
-{
- return pci_register_driver(&carm_driver);
-}
-
-static void __exit carm_exit(void)
-{
- pci_unregister_driver(&carm_driver);
-}
-
-module_init(carm_init);
-module_exit(carm_exit);
-
-
+module_pci_driver(carm_driver);
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index ad70868f8a96..4cf81b5bf0f7 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -108,8 +108,7 @@ struct cardinfo {
* have been written
*/
struct bio *bio, *currentbio, **biotail;
- int current_idx;
- sector_t current_sector;
+ struct bvec_iter current_iter;
struct request_queue *queue;
@@ -118,7 +117,7 @@ struct cardinfo {
struct mm_dma_desc *desc;
int cnt, headcnt;
struct bio *bio, **biotail;
- int idx;
+ struct bvec_iter iter;
} mm_pages[2];
#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc))
@@ -344,16 +343,13 @@ static int add_bio(struct cardinfo *card)
dma_addr_t dma_handle;
int offset;
struct bio *bio;
- struct bio_vec *vec;
- int idx;
+ struct bio_vec vec;
int rw;
- int len;
bio = card->currentbio;
if (!bio && card->bio) {
card->currentbio = card->bio;
- card->current_idx = card->bio->bi_idx;
- card->current_sector = card->bio->bi_sector;
+ card->current_iter = card->bio->bi_iter;
card->bio = card->bio->bi_next;
if (card->bio == NULL)
card->biotail = &card->bio;
@@ -362,18 +358,17 @@ static int add_bio(struct cardinfo *card)
}
if (!bio)
return 0;
- idx = card->current_idx;
rw = bio_rw(bio);
if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
return 0;
- vec = bio_iovec_idx(bio, idx);
- len = vec->bv_len;
+ vec = bio_iter_iovec(bio, card->current_iter);
+
dma_handle = pci_map_page(card->dev,
- vec->bv_page,
- vec->bv_offset,
- len,
+ vec.bv_page,
+ vec.bv_offset,
+ vec.bv_len,
(rw == READ) ?
PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
@@ -381,7 +376,7 @@ static int add_bio(struct cardinfo *card)
desc = &p->desc[p->cnt];
p->cnt++;
if (p->bio == NULL)
- p->idx = idx;
+ p->iter = card->current_iter;
if ((p->biotail) != &bio->bi_next) {
*(p->biotail) = bio;
p->biotail = &(bio->bi_next);
@@ -391,8 +386,8 @@ static int add_bio(struct cardinfo *card)
desc->data_dma_handle = dma_handle;
desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle);
- desc->local_addr = cpu_to_le64(card->current_sector << 9);
- desc->transfer_size = cpu_to_le32(len);
+ desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9);
+ desc->transfer_size = cpu_to_le32(vec.bv_len);
offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc));
desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
desc->zero1 = desc->zero2 = 0;
@@ -407,10 +402,9 @@ static int add_bio(struct cardinfo *card)
desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
desc->sem_control_bits = desc->control_bits;
- card->current_sector += (len >> 9);
- idx++;
- card->current_idx = idx;
- if (idx >= bio->bi_vcnt)
+
+ bio_advance_iter(bio, &card->current_iter, vec.bv_len);
+ if (!card->current_iter.bi_size)
card->currentbio = NULL;
return 1;
@@ -439,23 +433,25 @@ static void process_page(unsigned long data)
struct mm_dma_desc *desc = &page->desc[page->headcnt];
int control = le32_to_cpu(desc->sem_control_bits);
int last = 0;
- int idx;
+ struct bio_vec vec;
if (!(control & DMASCR_DMA_COMPLETE)) {
control = dma_status;
last = 1;
}
+
page->headcnt++;
- idx = page->idx;
- page->idx++;
- if (page->idx >= bio->bi_vcnt) {
+ vec = bio_iter_iovec(bio, page->iter);
+ bio_advance_iter(bio, &page->iter, vec.bv_len);
+
+ if (!page->iter.bi_size) {
page->bio = bio->bi_next;
if (page->bio)
- page->idx = page->bio->bi_idx;
+ page->iter = page->bio->bi_iter;
}
pci_unmap_page(card->dev, desc->data_dma_handle,
- bio_iovec_idx(bio, idx)->bv_len,
+ vec.bv_len,
(control & DMASCR_TRANSFER_READ) ?
PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
if (control & DMASCR_HARD_ERROR) {
@@ -532,7 +528,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
{
struct cardinfo *card = q->queuedata;
pr_debug("mm_make_request %llu %u\n",
- (unsigned long long)bio->bi_sector, bio->bi_size);
+ (unsigned long long)bio->bi_iter.bi_sector,
+ bio->bi_iter.bi_size);
spin_lock_irq(&card->lock);
*card->biotail = bio;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6a680d4de7f1..b1cb3f4c4db4 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -110,9 +110,9 @@ static int __virtblk_add_req(struct virtqueue *vq,
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
-static inline void virtblk_request_done(struct virtblk_req *vbr)
+static inline void virtblk_request_done(struct request *req)
{
- struct request *req = vbr->req;
+ struct virtblk_req *vbr = req->special;
int error = virtblk_result(vbr);
if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -138,7 +138,7 @@ static void virtblk_done(struct virtqueue *vq)
do {
virtqueue_disable_cb(vq);
while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
- virtblk_request_done(vbr);
+ blk_mq_complete_request(vbr->req);
req_done = true;
}
if (unlikely(virtqueue_is_broken(vq)))
@@ -479,6 +479,7 @@ static struct blk_mq_ops virtio_mq_ops = {
.map_queue = blk_mq_map_queue,
.alloc_hctx = blk_mq_alloc_single_hw_queue,
.free_hctx = blk_mq_free_single_hw_queue,
+ .complete = virtblk_request_done,
};
static struct blk_mq_reg virtio_mq_reg = {
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 6620b73d0490..64c60edcdfbc 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -299,7 +299,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
BUG_ON(num != 0);
}
-static void unmap_purged_grants(struct work_struct *work)
+void xen_blkbk_unmap_purged_grants(struct work_struct *work)
{
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
@@ -375,7 +375,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
- INIT_LIST_HEAD(&blkif->persistent_purge_list);
+ BUG_ON(!list_empty(&blkif->persistent_purge_list));
root = &blkif->persistent_gnts;
purge_list:
foreach_grant_safe(persistent_gnt, n, root, node) {
@@ -420,7 +420,6 @@ finished:
blkif->vbd.overflow_max_grants = 0;
/* We can defer this work */
- INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
schedule_work(&blkif->persistent_purge_work);
pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
return;
@@ -625,9 +624,23 @@ purge_gnt_list:
print_stats(blkif);
}
- /* Since we are shutting down remove all pages from the buffer */
- shrink_free_pagepool(blkif, 0 /* All */);
+ /* Drain pending purge work */
+ flush_work(&blkif->persistent_purge_work);
+ if (log_stats)
+ print_stats(blkif);
+
+ blkif->xenblkd = NULL;
+ xen_blkif_put(blkif);
+
+ return 0;
+}
+
+/*
+ * Remove persistent grants and empty the pool of free pages
+ */
+void xen_blkbk_free_caches(struct xen_blkif *blkif)
+{
/* Free all persistent grant pages */
if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
free_persistent_gnts(blkif, &blkif->persistent_gnts,
@@ -636,13 +649,8 @@ purge_gnt_list:
BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
blkif->persistent_gnt_c = 0;
- if (log_stats)
- print_stats(blkif);
-
- blkif->xenblkd = NULL;
- xen_blkif_put(blkif);
-
- return 0;
+ /* Since we are shutting down remove all pages from the buffer */
+ shrink_free_pagepool(blkif, 0 /* All */);
}
/*
@@ -838,7 +846,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
struct grant_page **pages = pending_req->indirect_pages;
struct xen_blkif *blkif = pending_req->blkif;
int indirect_grefs, rc, n, nseg, i;
- struct blkif_request_segment_aligned *segments = NULL;
+ struct blkif_request_segment *segments = NULL;
nseg = pending_req->nr_pages;
indirect_grefs = INDIRECT_PAGES(nseg);
@@ -934,9 +942,7 @@ static void xen_blk_drain_io(struct xen_blkif *blkif)
{
atomic_set(&blkif->drain, 1);
do {
- /* The initial value is one, and one refcnt taken at the
- * start of the xen_blkif_schedule thread. */
- if (atomic_read(&blkif->refcnt) <= 2)
+ if (atomic_read(&blkif->inflight) == 0)
break;
wait_for_completion_interruptible_timeout(
&blkif->drain_complete, HZ);
@@ -976,17 +982,30 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
* the proper response on the ring.
*/
if (atomic_dec_and_test(&pending_req->pendcnt)) {
- xen_blkbk_unmap(pending_req->blkif,
+ struct xen_blkif *blkif = pending_req->blkif;
+
+ xen_blkbk_unmap(blkif,
pending_req->segments,
pending_req->nr_pages);
- make_response(pending_req->blkif, pending_req->id,
+ make_response(blkif, pending_req->id,
pending_req->operation, pending_req->status);
- xen_blkif_put(pending_req->blkif);
- if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
- if (atomic_read(&pending_req->blkif->drain))
- complete(&pending_req->blkif->drain_complete);
+ free_req(blkif, pending_req);
+ /*
+ * Make sure the request is freed before releasing blkif,
+ * or there could be a race between free_req and the
+ * cleanup done in xen_blkif_free during shutdown.
+ *
+ * NB: The fact that we might try to wake up pending_free_wq
+ * before drain_complete (in case there's a drain going on)
+ * it's not a problem with our current implementation
+ * because we can assure there's no thread waiting on
+ * pending_free_wq if there's a drain going on, but it has
+ * to be taken into account if the current model is changed.
+ */
+ if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
+ complete(&blkif->drain_complete);
}
- free_req(pending_req->blkif, pending_req);
+ xen_blkif_put(blkif);
}
}
@@ -1240,6 +1259,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
* below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
*/
xen_blkif_get(blkif);
+ atomic_inc(&blkif->inflight);
for (i = 0; i < nseg; i++) {
while ((bio == NULL) ||
@@ -1257,7 +1277,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
bio->bi_bdev = preq.bdev;
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
- bio->bi_sector = preq.sector_number;
+ bio->bi_iter.bi_sector = preq.sector_number;
}
preq.sector_number += seg[i].nsec;
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 8d8807563d99..be052773ad03 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -57,7 +57,7 @@
#define MAX_INDIRECT_SEGMENTS 256
#define SEGS_PER_INDIRECT_FRAME \
- (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
+ (PAGE_SIZE/sizeof(struct blkif_request_segment))
#define MAX_INDIRECT_PAGES \
((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
#define INDIRECT_PAGES(_segs) \
@@ -278,6 +278,7 @@ struct xen_blkif {
/* for barrier (drain) requests */
struct completion drain_complete;
atomic_t drain;
+ atomic_t inflight;
/* One thread per one blkif. */
struct task_struct *xenblkd;
unsigned int waiting_reqs;
@@ -376,6 +377,7 @@ int xen_blkif_xenbus_init(void);
irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
int xen_blkif_schedule(void *arg);
int xen_blkif_purge_persistent(void *arg);
+void xen_blkbk_free_caches(struct xen_blkif *blkif);
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
struct backend_info *be, int state);
@@ -383,6 +385,7 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
int xen_blkbk_barrier(struct xenbus_transaction xbt,
struct backend_info *be, int state);
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
+void xen_blkbk_unmap_purged_grants(struct work_struct *work);
static inline void blkif_get_x86_32_req(struct blkif_request *dst,
struct blkif_x86_32_request *src)
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index c2014a0aa206..9a547e6b6ebf 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -125,8 +125,11 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
blkif->persistent_gnts.rb_node = NULL;
spin_lock_init(&blkif->free_pages_lock);
INIT_LIST_HEAD(&blkif->free_pages);
+ INIT_LIST_HEAD(&blkif->persistent_purge_list);
blkif->free_pages_num = 0;
atomic_set(&blkif->persistent_gnt_in_use, 0);
+ atomic_set(&blkif->inflight, 0);
+ INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants);
INIT_LIST_HEAD(&blkif->pending_free);
@@ -259,6 +262,17 @@ static void xen_blkif_free(struct xen_blkif *blkif)
if (!atomic_dec_and_test(&blkif->refcnt))
BUG();
+ /* Remove all persistent grants and the cache of ballooned pages. */
+ xen_blkbk_free_caches(blkif);
+
+ /* Make sure everything is drained before shutting down */
+ BUG_ON(blkif->persistent_gnt_c != 0);
+ BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0);
+ BUG_ON(blkif->free_pages_num != 0);
+ BUG_ON(!list_empty(&blkif->persistent_purge_list));
+ BUG_ON(!list_empty(&blkif->free_pages));
+ BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
+
/* Check that there is no request in use */
list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
list_del(&req->free_list);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index c4a4c9006288..efe1b4761735 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(minor_lock);
#define DEV_NAME "xvd" /* name in /dev */
#define SEGS_PER_INDIRECT_FRAME \
- (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
+ (PAGE_SIZE/sizeof(struct blkif_request_segment))
#define INDIRECT_GREFS(_segs) \
((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
@@ -393,7 +393,7 @@ static int blkif_queue_request(struct request *req)
unsigned long id;
unsigned int fsect, lsect;
int i, ref, n;
- struct blkif_request_segment_aligned *segments = NULL;
+ struct blkif_request_segment *segments = NULL;
/*
* Used to store if we are able to queue the request by just using
@@ -550,7 +550,7 @@ static int blkif_queue_request(struct request *req)
} else {
n = i % SEGS_PER_INDIRECT_FRAME;
segments[n] =
- (struct blkif_request_segment_aligned) {
+ (struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
.last_sect = lsect };
@@ -1356,7 +1356,7 @@ static int blkfront_probe(struct xenbus_device *dev,
char *type;
int len;
/* no unplug has been done: do not hook devices != xen vbds */
- if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) {
+ if (xen_has_pv_and_legacy_disk_devices()) {
int major;
if (!VDEV_IS_EXTENDED(vdevice))
@@ -1547,7 +1547,7 @@ static int blkif_recover(struct blkfront_info *info)
for (i = 0; i < pending; i++) {
offset = (i * segs * PAGE_SIZE) >> 9;
size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
- (unsigned int)(bio->bi_size >> 9) - offset);
+ (unsigned int)bio_sectors(bio) - offset);
cloned_bio = bio_clone(bio, GFP_NOIO);
BUG_ON(cloned_bio == NULL);
bio_trim(cloned_bio, offset, size);
@@ -1904,13 +1904,16 @@ static void blkback_changed(struct xenbus_device *dev,
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
- case XenbusStateClosed:
break;
case XenbusStateConnected:
blkfront_connect(info);
break;
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's Closing state -- fallthrough */
case XenbusStateClosing:
blkfront_closing(info);
break;
@@ -2079,7 +2082,7 @@ static int __init xlblk_init(void)
if (!xen_domain())
return -ENODEV;
- if (xen_hvm_domain() && !xen_platform_pci_unplug)
+ if (!xen_has_pv_disk_devices())
return -ENODEV;
if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 5a95baf4b104..27de5046708a 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -43,9 +43,6 @@
#include <linux/zorro.h>
-extern int m68k_realnum_memory;
-extern struct mem_info m68k_memory[NUM_MEMINFO];
-
#define Z2MINOR_COMBINED (0)
#define Z2MINOR_Z2ONLY (1)
#define Z2MINOR_CHIPONLY (2)
@@ -116,8 +113,8 @@ get_z2ram( void )
if ( test_bit( i, zorro_unused_z2ram ) )
{
z2_count++;
- z2ram_map[ z2ram_size++ ] =
- ZTWO_VADDR( Z2RAM_START ) + ( i << Z2RAM_CHUNKSHIFT );
+ z2ram_map[z2ram_size++] = (unsigned long)ZTWO_VADDR(Z2RAM_START) +
+ (i << Z2RAM_CHUNKSHIFT);
clear_bit( i, zorro_unused_z2ram );
}
}
diff --git a/drivers/staging/zram/Kconfig b/drivers/block/zram/Kconfig
index 983314c41349..3450be850399 100644
--- a/drivers/staging/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -14,7 +14,6 @@ config ZRAM
disks and maybe many more.
See zram.txt for more information.
- Project home: <https://compcache.googlecode.com/>
config ZRAM_DEBUG
bool "Compressed RAM block device debug support"
diff --git a/drivers/staging/zram/Makefile b/drivers/block/zram/Makefile
index cb0f9ced6a93..cb0f9ced6a93 100644
--- a/drivers/staging/zram/Makefile
+++ b/drivers/block/zram/Makefile
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 3277d9838f4e..011e55d820b1 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -2,6 +2,7 @@
* Compressed RAM block device
*
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
+ * 2012, 2013 Minchan Kim
*
* This code is released using a dual license strategy: BSD/GPL
* You can choose the licence that better fits your requirements.
@@ -9,7 +10,6 @@
* Released under the terms of 3-clause BSD License
* Released under the terms of GNU General Public License Version 2.0
*
- * Project home: http://compcache.googlecode.com
*/
#define KMSG_COMPONENT "zram"
@@ -104,7 +104,7 @@ static ssize_t zero_pages_show(struct device *dev,
{
struct zram *zram = dev_to_zram(dev);
- return sprintf(buf, "%u\n", zram->stats.pages_zero);
+ return sprintf(buf, "%u\n", atomic_read(&zram->stats.pages_zero));
}
static ssize_t orig_data_size_show(struct device *dev,
@@ -113,7 +113,7 @@ static ssize_t orig_data_size_show(struct device *dev,
struct zram *zram = dev_to_zram(dev);
return sprintf(buf, "%llu\n",
- (u64)(zram->stats.pages_stored) << PAGE_SHIFT);
+ (u64)(atomic_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
}
static ssize_t compr_data_size_show(struct device *dev,
@@ -140,6 +140,7 @@ static ssize_t mem_used_total_show(struct device *dev,
return sprintf(buf, "%llu\n", val);
}
+/* flag operations needs meta->tb_lock */
static int zram_test_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
@@ -171,13 +172,14 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
u64 start, end, bound;
/* unaligned request */
- if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
+ if (unlikely(bio->bi_iter.bi_sector &
+ (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
return 0;
- if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
+ if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
return 0;
- start = bio->bi_sector;
- end = start + (bio->bi_size >> SECTOR_SHIFT);
+ start = bio->bi_iter.bi_sector;
+ end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
bound = zram->disksize >> SECTOR_SHIFT;
/* out of range range */
if (unlikely(start >= bound || end > bound || start > end))
@@ -227,6 +229,8 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
goto free_table;
}
+ rwlock_init(&meta->tb_lock);
+ mutex_init(&meta->buffer_lock);
return meta;
free_table:
@@ -279,6 +283,7 @@ static void handle_zero_page(struct bio_vec *bvec)
flush_dcache_page(page);
}
+/* NOTE: caller should hold meta->tb_lock with write-side */
static void zram_free_page(struct zram *zram, size_t index)
{
struct zram_meta *meta = zram->meta;
@@ -292,21 +297,21 @@ static void zram_free_page(struct zram *zram, size_t index)
*/
if (zram_test_flag(meta, index, ZRAM_ZERO)) {
zram_clear_flag(meta, index, ZRAM_ZERO);
- zram->stats.pages_zero--;
+ atomic_dec(&zram->stats.pages_zero);
}
return;
}
if (unlikely(size > max_zpage_size))
- zram->stats.bad_compress--;
+ atomic_dec(&zram->stats.bad_compress);
zs_free(meta->mem_pool, handle);
if (size <= PAGE_SIZE / 2)
- zram->stats.good_compress--;
+ atomic_dec(&zram->stats.good_compress);
atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
- zram->stats.pages_stored--;
+ atomic_dec(&zram->stats.pages_stored);
meta->table[index].handle = 0;
meta->table[index].size = 0;
@@ -318,20 +323,26 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
size_t clen = PAGE_SIZE;
unsigned char *cmem;
struct zram_meta *meta = zram->meta;
- unsigned long handle = meta->table[index].handle;
+ unsigned long handle;
+ u16 size;
+
+ read_lock(&meta->tb_lock);
+ handle = meta->table[index].handle;
+ size = meta->table[index].size;
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
+ read_unlock(&meta->tb_lock);
clear_page(mem);
return 0;
}
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
- if (meta->table[index].size == PAGE_SIZE)
+ if (size == PAGE_SIZE)
copy_page(mem, cmem);
else
- ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
- mem, &clen);
+ ret = lzo1x_decompress_safe(cmem, size, mem, &clen);
zs_unmap_object(meta->mem_pool, handle);
+ read_unlock(&meta->tb_lock);
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) {
@@ -352,11 +363,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
struct zram_meta *meta = zram->meta;
page = bvec->bv_page;
+ read_lock(&meta->tb_lock);
if (unlikely(!meta->table[index].handle) ||
zram_test_flag(meta, index, ZRAM_ZERO)) {
+ read_unlock(&meta->tb_lock);
handle_zero_page(bvec);
return 0;
}
+ read_unlock(&meta->tb_lock);
if (is_partial_io(bvec))
/* Use a temporary buffer to decompress the page */
@@ -399,6 +413,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
struct page *page;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
struct zram_meta *meta = zram->meta;
+ bool locked = false;
page = bvec->bv_page;
src = meta->compress_buffer;
@@ -418,6 +433,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
goto out;
}
+ mutex_lock(&meta->buffer_lock);
+ locked = true;
user_mem = kmap_atomic(page);
if (is_partial_io(bvec)) {
@@ -432,25 +449,18 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
if (page_zero_filled(uncmem)) {
kunmap_atomic(user_mem);
/* Free memory associated with this sector now. */
+ write_lock(&zram->meta->tb_lock);
zram_free_page(zram, index);
-
- zram->stats.pages_zero++;
zram_set_flag(meta, index, ZRAM_ZERO);
+ write_unlock(&zram->meta->tb_lock);
+
+ atomic_inc(&zram->stats.pages_zero);
ret = 0;
goto out;
}
- /*
- * zram_slot_free_notify could miss free so that let's
- * double check.
- */
- if (unlikely(meta->table[index].handle ||
- zram_test_flag(meta, index, ZRAM_ZERO)))
- zram_free_page(zram, index);
-
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
meta->compress_workmem);
-
if (!is_partial_io(bvec)) {
kunmap_atomic(user_mem);
user_mem = NULL;
@@ -463,7 +473,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
}
if (unlikely(clen > max_zpage_size)) {
- zram->stats.bad_compress++;
+ atomic_inc(&zram->stats.bad_compress);
clen = PAGE_SIZE;
src = NULL;
if (is_partial_io(bvec))
@@ -493,18 +503,22 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
* Free memory associated with this sector
* before overwriting unused sectors.
*/
+ write_lock(&zram->meta->tb_lock);
zram_free_page(zram, index);
meta->table[index].handle = handle;
meta->table[index].size = clen;
+ write_unlock(&zram->meta->tb_lock);
/* Update stats */
atomic64_add(clen, &zram->stats.compr_size);
- zram->stats.pages_stored++;
+ atomic_inc(&zram->stats.pages_stored);
if (clen <= PAGE_SIZE / 2)
- zram->stats.good_compress++;
+ atomic_inc(&zram->stats.good_compress);
out:
+ if (locked)
+ mutex_unlock(&meta->buffer_lock);
if (is_partial_io(bvec))
kfree(uncmem);
@@ -513,36 +527,15 @@ out:
return ret;
}
-static void handle_pending_slot_free(struct zram *zram)
-{
- struct zram_slot_free *free_rq;
-
- spin_lock(&zram->slot_free_lock);
- while (zram->slot_free_rq) {
- free_rq = zram->slot_free_rq;
- zram->slot_free_rq = free_rq->next;
- zram_free_page(zram, free_rq->index);
- kfree(free_rq);
- }
- spin_unlock(&zram->slot_free_lock);
-}
-
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset, struct bio *bio, int rw)
{
int ret;
- if (rw == READ) {
- down_read(&zram->lock);
- handle_pending_slot_free(zram);
+ if (rw == READ)
ret = zram_bvec_read(zram, bvec, index, offset, bio);
- up_read(&zram->lock);
- } else {
- down_write(&zram->lock);
- handle_pending_slot_free(zram);
+ else
ret = zram_bvec_write(zram, bvec, index, offset);
- up_write(&zram->lock);
- }
return ret;
}
@@ -552,8 +545,6 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
size_t index;
struct zram_meta *meta;
- flush_work(&zram->free_work);
-
down_write(&zram->init_lock);
if (!zram->init_done) {
up_write(&zram->init_lock);
@@ -680,9 +671,10 @@ out:
static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
{
- int i, offset;
+ int offset;
u32 index;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
switch (rw) {
case READ:
@@ -693,36 +685,37 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
break;
}
- index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
- offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
+ index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
+ offset = (bio->bi_iter.bi_sector &
+ (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
- bio_for_each_segment(bvec, bio, i) {
+ bio_for_each_segment(bvec, bio, iter) {
int max_transfer_size = PAGE_SIZE - offset;
- if (bvec->bv_len > max_transfer_size) {
+ if (bvec.bv_len > max_transfer_size) {
/*
* zram_bvec_rw() can only make operation on a single
* zram page. Split the bio vector.
*/
struct bio_vec bv;
- bv.bv_page = bvec->bv_page;
+ bv.bv_page = bvec.bv_page;
bv.bv_len = max_transfer_size;
- bv.bv_offset = bvec->bv_offset;
+ bv.bv_offset = bvec.bv_offset;
if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
goto out;
- bv.bv_len = bvec->bv_len - max_transfer_size;
+ bv.bv_len = bvec.bv_len - max_transfer_size;
bv.bv_offset += max_transfer_size;
if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
goto out;
} else
- if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
+ if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
< 0)
goto out;
- update_position(&index, &offset, bvec);
+ update_position(&index, &offset, &bvec);
}
set_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -759,40 +752,19 @@ error:
bio_io_error(bio);
}
-static void zram_slot_free(struct work_struct *work)
-{
- struct zram *zram;
-
- zram = container_of(work, struct zram, free_work);
- down_write(&zram->lock);
- handle_pending_slot_free(zram);
- up_write(&zram->lock);
-}
-
-static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
-{
- spin_lock(&zram->slot_free_lock);
- free_rq->next = zram->slot_free_rq;
- zram->slot_free_rq = free_rq;
- spin_unlock(&zram->slot_free_lock);
-}
-
static void zram_slot_free_notify(struct block_device *bdev,
unsigned long index)
{
struct zram *zram;
- struct zram_slot_free *free_rq;
+ struct zram_meta *meta;
zram = bdev->bd_disk->private_data;
- atomic64_inc(&zram->stats.notify_free);
-
- free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
- if (!free_rq)
- return;
+ meta = zram->meta;
- free_rq->index = index;
- add_slot_free(zram, free_rq);
- schedule_work(&zram->free_work);
+ write_lock(&meta->tb_lock);
+ zram_free_page(zram, index);
+ write_unlock(&meta->tb_lock);
+ atomic64_inc(&zram->stats.notify_free);
}
static const struct block_device_operations zram_devops = {
@@ -836,13 +808,8 @@ static int create_device(struct zram *zram, int device_id)
{
int ret = -ENOMEM;
- init_rwsem(&zram->lock);
init_rwsem(&zram->init_lock);
- INIT_WORK(&zram->free_work, zram_slot_free);
- spin_lock_init(&zram->slot_free_lock);
- zram->slot_free_rq = NULL;
-
zram->queue = blk_alloc_queue(GFP_KERNEL);
if (!zram->queue) {
pr_err("Error allocating disk queue for device %d\n",
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 97a3acf6ab76..ad8aa35bae00 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -2,6 +2,7 @@
* Compressed RAM block device
*
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
+ * 2012, 2013 Minchan Kim
*
* This code is released using a dual license strategy: BSD/GPL
* You can choose the licence that better fits your requirements.
@@ -9,7 +10,6 @@
* Released under the terms of 3-clause BSD License
* Released under the terms of GNU General Public License Version 2.0
*
- * Project home: http://compcache.googlecode.com
*/
#ifndef _ZRAM_DRV_H_
@@ -17,8 +17,7 @@
#include <linux/spinlock.h>
#include <linux/mutex.h>
-
-#include "../zsmalloc/zsmalloc.h"
+#include <linux/zsmalloc.h>
/*
* Some arbitrary value. This is just to catch
@@ -69,10 +68,6 @@ struct table {
u8 flags;
} __aligned(4);
-/*
- * All 64bit fields should only be manipulated by 64bit atomic accessors.
- * All modifications to 32bit counter should be protected by zram->lock.
- */
struct zram_stats {
atomic64_t compr_size; /* compressed size of pages stored */
atomic64_t num_reads; /* failed + successful */
@@ -81,33 +76,23 @@ struct zram_stats {
atomic64_t failed_writes; /* can happen when memory is too low */
atomic64_t invalid_io; /* non-page-aligned I/O requests */
atomic64_t notify_free; /* no. of swap slot free notifications */
- u32 pages_zero; /* no. of zero filled pages */
- u32 pages_stored; /* no. of pages currently stored */
- u32 good_compress; /* % of pages with compression ratio<=50% */
- u32 bad_compress; /* % of pages with compression ratio>=75% */
+ atomic_t pages_zero; /* no. of zero filled pages */
+ atomic_t pages_stored; /* no. of pages currently stored */
+ atomic_t good_compress; /* % of pages with compression ratio<=50% */
+ atomic_t bad_compress; /* % of pages with compression ratio>=75% */
};
struct zram_meta {
+ rwlock_t tb_lock; /* protect table */
void *compress_workmem;
void *compress_buffer;
struct table *table;
struct zs_pool *mem_pool;
-};
-
-struct zram_slot_free {
- unsigned long index;
- struct zram_slot_free *next;
+ struct mutex buffer_lock; /* protect compress buffers */
};
struct zram {
struct zram_meta *meta;
- struct rw_semaphore lock; /* protect compression buffers, table,
- * 32bit stat counters against concurrent
- * notifications, reads and writes */
-
- struct work_struct free_work; /* handle pending free request */
- struct zram_slot_free *slot_free_rq; /* list head of free request */
-
struct request_queue *queue;
struct gendisk *disk;
int init_done;
@@ -118,7 +103,6 @@ struct zram {
* we can store in a disk.
*/
u64 disksize; /* bytes */
- spinlock_t slot_free_lock;
struct zram_stats stats;
};
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 6bfc1bb318f6..106d1d8e16ad 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -83,10 +83,12 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x3005) },
{ USB_DEVICE(0x04CA, 0x3006) },
{ USB_DEVICE(0x04CA, 0x3008) },
+ { USB_DEVICE(0x04CA, 0x300b) },
{ USB_DEVICE(0x13d3, 0x3362) },
{ USB_DEVICE(0x0CF3, 0xE004) },
{ USB_DEVICE(0x0CF3, 0xE005) },
{ USB_DEVICE(0x0930, 0x0219) },
+ { USB_DEVICE(0x0930, 0x0220) },
{ USB_DEVICE(0x0489, 0xe057) },
{ USB_DEVICE(0x13d3, 0x3393) },
{ USB_DEVICE(0x0489, 0xe04e) },
@@ -96,6 +98,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3402) },
{ USB_DEVICE(0x0cf3, 0x3121) },
{ USB_DEVICE(0x0cf3, 0xe003) },
+ { USB_DEVICE(0x0489, 0xe05f) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
@@ -125,10 +128,12 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
@@ -138,6 +143,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index f9d183387f45..7399303d7d99 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -23,8 +23,6 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include <net/bluetooth/bluetooth.h>
-#include <linux/ctype.h>
-#include <linux/firmware.h>
#define BTM_HEADER_LEN 4
#define BTM_UPLD_SIZE 2312
@@ -43,8 +41,6 @@ struct btmrvl_thread {
struct btmrvl_device {
void *card;
struct hci_dev *hcidev;
- struct device *dev;
- const char *cal_data;
u8 dev_type;
@@ -90,12 +86,12 @@ struct btmrvl_private {
#define MRVL_VENDOR_PKT 0xFE
-/* Bluetooth commands */
-#define BT_CMD_AUTO_SLEEP_MODE 0x23
-#define BT_CMD_HOST_SLEEP_CONFIG 0x59
-#define BT_CMD_HOST_SLEEP_ENABLE 0x5A
-#define BT_CMD_MODULE_CFG_REQ 0x5B
-#define BT_CMD_LOAD_CONFIG_DATA 0x61
+/* Vendor specific Bluetooth commands */
+#define BT_CMD_AUTO_SLEEP_MODE 0xFC23
+#define BT_CMD_HOST_SLEEP_CONFIG 0xFC59
+#define BT_CMD_HOST_SLEEP_ENABLE 0xFC5A
+#define BT_CMD_MODULE_CFG_REQ 0xFC5B
+#define BT_CMD_LOAD_CONFIG_DATA 0xFC61
/* Sub-commands: Module Bringup/Shutdown Request/Response */
#define MODULE_BRINGUP_REQ 0xF1
@@ -104,6 +100,11 @@ struct btmrvl_private {
#define MODULE_SHUTDOWN_REQ 0xF2
+/* Vendor specific Bluetooth events */
+#define BT_EVENT_AUTO_SLEEP_MODE 0x23
+#define BT_EVENT_HOST_SLEEP_CONFIG 0x59
+#define BT_EVENT_HOST_SLEEP_ENABLE 0x5A
+#define BT_EVENT_MODULE_CFG_REQ 0x5B
#define BT_EVENT_POWER_STATE 0x20
/* Bluetooth Power States */
@@ -111,8 +112,6 @@ struct btmrvl_private {
#define BT_PS_DISABLE 0x03
#define BT_PS_SLEEP 0x01
-#define OGF 0x3F
-
/* Host Sleep states */
#define HS_ACTIVATED 0x01
#define HS_DEACTIVATED 0x00
@@ -121,7 +120,7 @@ struct btmrvl_private {
#define PS_SLEEP 0x01
#define PS_AWAKE 0x00
-#define BT_CMD_DATA_SIZE 32
+#define BT_CAL_HDR_LEN 4
#define BT_CAL_DATA_SIZE 28
struct btmrvl_event {
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 5cf31c4fe6d1..1e0320af00c6 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -19,7 +19,7 @@
**/
#include <linux/module.h>
-
+#include <linux/of.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -50,12 +50,10 @@ bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
if (hdr->evt == HCI_EV_CMD_COMPLETE) {
struct hci_ev_cmd_complete *ec;
- u16 opcode, ocf, ogf;
+ u16 opcode;
ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE);
opcode = __le16_to_cpu(ec->opcode);
- ocf = hci_opcode_ocf(opcode);
- ogf = hci_opcode_ogf(opcode);
if (priv->btmrvl_dev.sendcmdflag) {
priv->btmrvl_dev.sendcmdflag = false;
@@ -63,9 +61,8 @@ bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
wake_up_interruptible(&priv->adapter->cmd_wait_q);
}
- if (ogf == OGF) {
- BT_DBG("vendor event skipped: ogf 0x%4.4x ocf 0x%4.4x",
- ogf, ocf);
+ if (hci_opcode_ogf(opcode) == 0x3F) {
+ BT_DBG("vendor event skipped: opcode=%#4.4x", opcode);
kfree_skb(skb);
return false;
}
@@ -89,7 +86,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
}
switch (event->data[0]) {
- case BT_CMD_AUTO_SLEEP_MODE:
+ case BT_EVENT_AUTO_SLEEP_MODE:
if (!event->data[2]) {
if (event->data[1] == BT_PS_ENABLE)
adapter->psmode = 1;
@@ -102,7 +99,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
}
break;
- case BT_CMD_HOST_SLEEP_CONFIG:
+ case BT_EVENT_HOST_SLEEP_CONFIG:
if (!event->data[3])
BT_DBG("gpio=%x, gap=%x", event->data[1],
event->data[2]);
@@ -110,7 +107,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
BT_DBG("HSCFG command failed");
break;
- case BT_CMD_HOST_SLEEP_ENABLE:
+ case BT_EVENT_HOST_SLEEP_ENABLE:
if (!event->data[1]) {
adapter->hs_state = HS_ACTIVATED;
if (adapter->psmode)
@@ -121,7 +118,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
}
break;
- case BT_CMD_MODULE_CFG_REQ:
+ case BT_EVENT_MODULE_CFG_REQ:
if (priv->btmrvl_dev.sendcmdflag &&
event->data[1] == MODULE_BRINGUP_REQ) {
BT_DBG("EVENT:%s",
@@ -166,7 +163,7 @@ exit:
}
EXPORT_SYMBOL_GPL(btmrvl_process_event);
-static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 cmd_no,
+static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
const void *param, u8 len)
{
struct sk_buff *skb;
@@ -179,7 +176,7 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 cmd_no,
}
hdr = (struct hci_command_hdr *)skb_put(skb, HCI_COMMAND_HDR_SIZE);
- hdr->opcode = cpu_to_le16(hci_opcode_pack(OGF, cmd_no));
+ hdr->opcode = cpu_to_le16(opcode);
hdr->plen = len;
if (len)
@@ -417,127 +414,62 @@ static int btmrvl_open(struct hci_dev *hdev)
return 0;
}
-/*
- * This function parses provided calibration data input. It should contain
- * hex bytes separated by space or new line character. Here is an example.
- * 00 1C 01 37 FF FF FF FF 02 04 7F 01
- * CE BA 00 00 00 2D C6 C0 00 00 00 00
- * 00 F0 00 00
- */
-static int btmrvl_parse_cal_cfg(const u8 *src, u32 len, u8 *dst, u32 dst_size)
+static int btmrvl_download_cal_data(struct btmrvl_private *priv,
+ u8 *data, int len)
{
- const u8 *s = src;
- u8 *d = dst;
int ret;
- u8 tmp[3];
-
- tmp[2] = '\0';
- while ((s - src) <= len - 2) {
- if (isspace(*s)) {
- s++;
- continue;
- }
-
- if (isxdigit(*s)) {
- if ((d - dst) >= dst_size) {
- BT_ERR("calibration data file too big!!!");
- return -EINVAL;
- }
-
- memcpy(tmp, s, 2);
-
- ret = kstrtou8(tmp, 16, d++);
- if (ret < 0)
- return ret;
-
- s += 2;
- } else {
- return -EINVAL;
- }
- }
- if (d == dst)
- return -EINVAL;
-
- return 0;
-}
-
-static int btmrvl_load_cal_data(struct btmrvl_private *priv,
- u8 *config_data)
-{
- int i, ret;
- u8 data[BT_CMD_DATA_SIZE];
data[0] = 0x00;
data[1] = 0x00;
data[2] = 0x00;
- data[3] = BT_CMD_DATA_SIZE - 4;
-
- /* Swap cal-data bytes. Each four bytes are swapped. Considering 4
- * byte SDIO header offset, mapping of input and output bytes will be
- * {3, 2, 1, 0} -> {0+4, 1+4, 2+4, 3+4},
- * {7, 6, 5, 4} -> {4+4, 5+4, 6+4, 7+4} */
- for (i = 4; i < BT_CMD_DATA_SIZE; i++)
- data[i] = config_data[(i / 4) * 8 - 1 - i];
+ data[3] = len;
print_hex_dump_bytes("Calibration data: ",
- DUMP_PREFIX_OFFSET, data, BT_CMD_DATA_SIZE);
+ DUMP_PREFIX_OFFSET, data, BT_CAL_HDR_LEN + len);
ret = btmrvl_send_sync_cmd(priv, BT_CMD_LOAD_CONFIG_DATA, data,
- BT_CMD_DATA_SIZE);
+ BT_CAL_HDR_LEN + len);
if (ret)
BT_ERR("Failed to download caibration data\n");
return 0;
}
-static int
-btmrvl_process_cal_cfg(struct btmrvl_private *priv, u8 *data, u32 size)
+static int btmrvl_cal_data_dt(struct btmrvl_private *priv)
{
- u8 cal_data[BT_CAL_DATA_SIZE];
+ struct device_node *dt_node;
+ u8 cal_data[BT_CAL_HDR_LEN + BT_CAL_DATA_SIZE];
+ const char name[] = "btmrvl_caldata";
+ const char property[] = "btmrvl,caldata";
int ret;
- ret = btmrvl_parse_cal_cfg(data, size, cal_data, sizeof(cal_data));
+ dt_node = of_find_node_by_name(NULL, name);
+ if (!dt_node)
+ return -ENODEV;
+
+ ret = of_property_read_u8_array(dt_node, property,
+ cal_data + BT_CAL_HDR_LEN,
+ BT_CAL_DATA_SIZE);
if (ret)
return ret;
- ret = btmrvl_load_cal_data(priv, cal_data);
+ BT_DBG("Use cal data from device tree");
+ ret = btmrvl_download_cal_data(priv, cal_data, BT_CAL_DATA_SIZE);
if (ret) {
- BT_ERR("Fail to load calibrate data");
+ BT_ERR("Fail to download calibrate data");
return ret;
}
return 0;
}
-static int btmrvl_cal_data_config(struct btmrvl_private *priv)
-{
- const struct firmware *cfg;
- int ret;
- const char *cal_data = priv->btmrvl_dev.cal_data;
-
- if (!cal_data)
- return 0;
-
- ret = request_firmware(&cfg, cal_data, priv->btmrvl_dev.dev);
- if (ret < 0) {
- BT_DBG("Failed to get %s file, skipping cal data download",
- cal_data);
- return 0;
- }
-
- ret = btmrvl_process_cal_cfg(priv, (u8 *)cfg->data, cfg->size);
- release_firmware(cfg);
- return ret;
-}
-
static int btmrvl_setup(struct hci_dev *hdev)
{
struct btmrvl_private *priv = hci_get_drvdata(hdev);
btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
- if (btmrvl_cal_data_config(priv))
- BT_ERR("Set cal data failed");
+ btmrvl_cal_data_dt(priv);
priv->btmrvl_dev.psmode = 1;
btmrvl_enable_ps(priv);
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index fabcf5bb48af..1b52c9f5230d 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -18,6 +18,7 @@
* this warranty disclaimer.
**/
+#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/mmc/sdio_ids.h>
@@ -101,7 +102,6 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
.helper = "mrvl/sd8688_helper.bin",
.firmware = "mrvl/sd8688.bin",
- .cal_data = NULL,
.reg = &btmrvl_reg_8688,
.sd_blksz_fw_dl = 64,
};
@@ -109,7 +109,6 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
.helper = NULL,
.firmware = "mrvl/sd8787_uapsta.bin",
- .cal_data = NULL,
.reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
@@ -117,7 +116,6 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
.helper = NULL,
.firmware = "mrvl/sd8797_uapsta.bin",
- .cal_data = "mrvl/sd8797_caldata.conf",
.reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
@@ -125,7 +123,6 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
.helper = NULL,
.firmware = "mrvl/sd8897_uapsta.bin",
- .cal_data = NULL,
.reg = &btmrvl_reg_88xx,
.sd_blksz_fw_dl = 256,
};
@@ -1007,7 +1004,6 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
struct btmrvl_sdio_device *data = (void *) id->driver_data;
card->helper = data->helper;
card->firmware = data->firmware;
- card->cal_data = data->cal_data;
card->reg = data->reg;
card->sd_blksz_fw_dl = data->sd_blksz_fw_dl;
}
@@ -1036,8 +1032,6 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
}
card->priv = priv;
- priv->btmrvl_dev.dev = &card->func->dev;
- priv->btmrvl_dev.cal_data = card->cal_data;
/* Initialize the interface specific function pointers */
priv->hw_host_to_card = btmrvl_sdio_host_to_card;
@@ -1220,5 +1214,4 @@ MODULE_FIRMWARE("mrvl/sd8688_helper.bin");
MODULE_FIRMWARE("mrvl/sd8688.bin");
MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
-MODULE_FIRMWARE("mrvl/sd8797_caldata.conf");
MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
diff --git a/drivers/bluetooth/btmrvl_sdio.h b/drivers/bluetooth/btmrvl_sdio.h
index 6872d9ecac07..43d35a609ca9 100644
--- a/drivers/bluetooth/btmrvl_sdio.h
+++ b/drivers/bluetooth/btmrvl_sdio.h
@@ -85,7 +85,6 @@ struct btmrvl_sdio_card {
u32 ioport;
const char *helper;
const char *firmware;
- const char *cal_data;
const struct btmrvl_sdio_card_reg *reg;
u16 sd_blksz_fw_dl;
u8 rx_unit;
@@ -95,7 +94,6 @@ struct btmrvl_sdio_card {
struct btmrvl_sdio_device {
const char *helper;
const char *firmware;
- const char *cal_data;
const struct btmrvl_sdio_card_reg *reg;
u16 sd_blksz_fw_dl;
};
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index b61440aaee65..83f6437dd91d 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -73,6 +73,7 @@ struct btsdio_data {
#define REG_CL_INTRD 0x13 /* Interrupt Clear */
#define REG_EN_INTRD 0x14 /* Interrupt Enable */
#define REG_MD_STAT 0x20 /* Bluetooth Mode Status */
+#define REG_MD_SET 0x20 /* Bluetooth Mode Set */
static int btsdio_tx_packet(struct btsdio_data *data, struct sk_buff *skb)
{
@@ -212,7 +213,7 @@ static int btsdio_open(struct hci_dev *hdev)
}
if (data->func->class == SDIO_CLASS_BT_B)
- sdio_writeb(data->func, 0x00, REG_MD_STAT, NULL);
+ sdio_writeb(data->func, 0x00, REG_MD_SET, NULL);
sdio_writeb(data->func, 0x01, REG_EN_INTRD, NULL);
@@ -333,6 +334,9 @@ static int btsdio_probe(struct sdio_func *func,
hdev->flush = btsdio_flush;
hdev->send = btsdio_send_frame;
+ if (func->vendor == 0x0104 && func->device == 0x00c5)
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+
err = hci_register_dev(hdev);
if (err < 0) {
hci_free_dev(hdev);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index c0ff34f2d2df..baeaaed299e4 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -150,10 +150,12 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
@@ -163,6 +165,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -223,6 +226,7 @@ static const struct usb_device_id blacklist_table[] = {
/* Intel Bluetooth device */
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
+ { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
{ } /* Terminating entry */
};
@@ -961,6 +965,45 @@ static int btusb_setup_bcm92035(struct hci_dev *hdev)
return 0;
}
+static int btusb_setup_csr(struct hci_dev *hdev)
+{
+ struct hci_rp_read_local_version *rp;
+ struct sk_buff *skb;
+ int ret;
+
+ BT_DBG("%s", hdev->name);
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("Reading local version failed (%ld)", -PTR_ERR(skb));
+ return -PTR_ERR(skb);
+ }
+
+ rp = (struct hci_rp_read_local_version *) skb->data;
+
+ if (!rp->status) {
+ if (le16_to_cpu(rp->manufacturer) != 10) {
+ /* Clear the reset quirk since this is not an actual
+ * early Bluetooth 1.1 device from CSR.
+ */
+ clear_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+
+ /* These fake CSR controllers have all a broken
+ * stored link key handling and so just disable it.
+ */
+ set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY,
+ &hdev->quirks);
+ }
+ }
+
+ ret = -bt_to_errno(rp->status);
+
+ kfree_skb(skb);
+
+ return ret;
+}
+
struct intel_version {
u8 status;
u8 hw_platform;
@@ -1435,8 +1478,10 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_BCM92035)
hdev->setup = btusb_setup_bcm92035;
- if (id->driver_info & BTUSB_INTEL)
+ if (id->driver_info & BTUSB_INTEL) {
+ usb_enable_autosuspend(data->udev);
hdev->setup = btusb_setup_intel;
+ }
/* Interface numbers are hardcoded in the specification */
data->isoc = usb_ifnum_to_if(data->udev, 1);
@@ -1459,10 +1504,15 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_CSR) {
struct usb_device *udev = data->udev;
+ u16 bcdDevice = le16_to_cpu(udev->descriptor.bcdDevice);
/* Old firmware would otherwise execute USB reset */
- if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117)
+ if (bcdDevice < 0x117)
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+
+ /* Fake CSR devices with broken commands */
+ if (bcdDevice <= 0x100)
+ hdev->setup = btusb_setup_csr;
}
if (id->driver_info & BTUSB_SNIFFER) {
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 7b167385a1c4..1ef6990a5c7e 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -141,22 +141,28 @@ static int vhci_create_device(struct vhci_data *data, __u8 dev_type)
}
static inline ssize_t vhci_get_user(struct vhci_data *data,
- const char __user *buf, size_t count)
+ const struct iovec *iov,
+ unsigned long count)
{
+ size_t len = iov_length(iov, count);
struct sk_buff *skb;
__u8 pkt_type, dev_type;
+ unsigned long i;
int ret;
- if (count < 2 || count > HCI_MAX_FRAME_SIZE)
+ if (len < 2 || len > HCI_MAX_FRAME_SIZE)
return -EINVAL;
- skb = bt_skb_alloc(count, GFP_KERNEL);
+ skb = bt_skb_alloc(len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
- if (copy_from_user(skb_put(skb, count), buf, count)) {
- kfree_skb(skb);
- return -EFAULT;
+ for (i = 0; i < count; i++) {
+ if (copy_from_user(skb_put(skb, iov[i].iov_len),
+ iov[i].iov_base, iov[i].iov_len)) {
+ kfree_skb(skb);
+ return -EFAULT;
+ }
}
pkt_type = *((__u8 *) skb->data);
@@ -205,7 +211,7 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
return -EINVAL;
}
- return (ret < 0) ? ret : count;
+ return (ret < 0) ? ret : len;
}
static inline ssize_t vhci_put_user(struct vhci_data *data,
@@ -272,12 +278,13 @@ static ssize_t vhci_read(struct file *file,
return ret;
}
-static ssize_t vhci_write(struct file *file,
- const char __user *buf, size_t count, loff_t *pos)
+static ssize_t vhci_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long count, loff_t pos)
{
+ struct file *file = iocb->ki_filp;
struct vhci_data *data = file->private_data;
- return vhci_get_user(data, buf, count);
+ return vhci_get_user(data, iov, count);
}
static unsigned int vhci_poll(struct file *file, poll_table *wait)
@@ -342,7 +349,7 @@ static int vhci_release(struct inode *inode, struct file *file)
static const struct file_operations vhci_fops = {
.owner = THIS_MODULE,
.read = vhci_read,
- .write = vhci_write,
+ .aio_write = vhci_write,
.poll = vhci_poll,
.open = vhci_open,
.release = vhci_release,
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index b6739cb78e32..962fd35cbd8d 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -979,7 +979,7 @@ static int cci_probe(void)
nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
- ports = kcalloc(sizeof(*ports), nb_cci_ports, GFP_KERNEL);
+ ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
if (!ports)
return -ENOMEM;
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 2394e9753ef5..725c46162bbd 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -588,12 +588,6 @@ static const struct mvebu_mbus_soc_data mv78xx0_mbus_data = {
.show_cpu_target = mvebu_sdram_debug_show_orion,
};
-/*
- * The driver doesn't yet have a DT binding because the details of
- * this DT binding still need to be sorted out. However, as a
- * preparation, we already use of_device_id to match a SoC description
- * string against the SoC specific details of this driver.
- */
static const struct of_device_id of_mvebu_mbus_ids[] = {
{ .compatible = "marvell,armada370-mbus",
.data = &armada_370_xp_mbus_data, },
@@ -734,11 +728,11 @@ int __init mvebu_mbus_init(const char *soc, phys_addr_t mbuswins_phys_base,
{
const struct of_device_id *of_id;
- for (of_id = of_mvebu_mbus_ids; of_id->compatible; of_id++)
+ for (of_id = of_mvebu_mbus_ids; of_id->compatible[0]; of_id++)
if (!strcmp(of_id->compatible, soc))
break;
- if (!of_id->compatible) {
+ if (!of_id->compatible[0]) {
pr_err("could not find a matching SoC family\n");
return -ENODEV;
}
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 5980cb9af857..51e75ad96422 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -561,11 +561,11 @@ static int gdrom_set_interrupt_handlers(void)
int err;
err = request_irq(HW_EVENT_GDROM_CMD, gdrom_command_interrupt,
- IRQF_DISABLED, "gdrom_command", &gd);
+ 0, "gdrom_command", &gd);
if (err)
return err;
err = request_irq(HW_EVENT_GDROM_DMA, gdrom_dma_interrupt,
- IRQF_DISABLED, "gdrom_dma", &gd);
+ 0, "gdrom_dma", &gd);
if (err)
free_irq(HW_EVENT_GDROM_CMD, &gd);
return err;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index fa3243d71c76..1386749b48ff 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -499,6 +499,7 @@ config RAW_DRIVER
config MAX_RAW_DEVS
int "Maximum number of RAW devices to support (1-65536)"
depends on RAW_DRIVER
+ range 1 65536
default "256"
help
The maximum number of RAW devices that are supported.
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 290fe5b7fd32..a324f9303e36 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -49,7 +49,7 @@ obj-$(CONFIG_GPIO_TB0219) += tb0219.o
obj-$(CONFIG_TELCLOCK) += tlclk.o
obj-$(CONFIG_MWAVE) += mwave/
-obj-$(CONFIG_AGP) += agp/
+obj-y += agp/
obj-$(CONFIG_PCMCIA) += pcmcia/
obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index d8b1b576556c..c528f96ee204 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -68,6 +68,7 @@ config AGP_AMD64
config AGP_INTEL
tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support"
depends on AGP && X86
+ select INTEL_GTT
help
This option gives you AGP support for the GLX component of X
on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
@@ -155,3 +156,7 @@ config AGP_SGI_TIOCA
This option gives you AGP GART support for the SGI TIO chipset
for IA64 processors.
+config INTEL_GTT
+ tristate
+ depends on X86 && PCI
+
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
index 8eb56e273e75..604489bcdbf9 100644
--- a/drivers/char/agp/Makefile
+++ b/drivers/char/agp/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
obj-$(CONFIG_AGP_PARISC) += parisc-agp.o
obj-$(CONFIG_AGP_I460) += i460-agp.o
obj-$(CONFIG_AGP_INTEL) += intel-agp.o
-obj-$(CONFIG_AGP_INTEL) += intel-gtt.o
+obj-$(CONFIG_INTEL_GTT) += intel-gtt.o
obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o
obj-$(CONFIG_AGP_SIS) += sis-agp.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 923f99df4f1c..b709749c8639 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -239,6 +239,7 @@ long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
/* Chipset independent registers (from AGP Spec) */
#define AGP_APBASE 0x10
+#define AGP_APERTURE_BAR 0
#define AGPSTAT 0x4
#define AGPCMD 0x8
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 443cd6751ca2..19db03667650 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -85,8 +85,8 @@ static int ali_configure(void)
pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000010));
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
#if 0
if (agp_bridge->type == ALI_M1541) {
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 779f0ab845a9..3661a51e93e2 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -11,7 +11,7 @@
#include <linux/slab.h>
#include "agp.h"
-#define AMD_MMBASE 0x14
+#define AMD_MMBASE_BAR 1
#define AMD_APSIZE 0xac
#define AMD_MODECNTL 0xb0
#define AMD_MODECNTL2 0xb2
@@ -126,7 +126,6 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
unsigned long __iomem *cur_gatt;
unsigned long addr;
int retval;
- u32 temp;
int i;
value = A_SIZE_LVL2(agp_bridge->current_size);
@@ -149,8 +148,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
* used to program the agp master not the cpu
*/
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR);
agp_bridge->gart_bus_addr = addr;
/* Calculate the agp offset */
@@ -207,6 +205,7 @@ static int amd_irongate_fetch_size(void)
static int amd_irongate_configure(void)
{
struct aper_size_info_lvl2 *current_size;
+ phys_addr_t reg;
u32 temp;
u16 enable_reg;
@@ -214,9 +213,8 @@ static int amd_irongate_configure(void)
if (!amd_irongate_private.registers) {
/* Get the memory mapped registers */
- pci_read_config_dword(agp_bridge->dev, AMD_MMBASE, &temp);
- temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
- amd_irongate_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
+ reg = pci_resource_start(agp_bridge->dev, AMD_MMBASE_BAR);
+ amd_irongate_private.registers = (volatile u8 __iomem *) ioremap(reg, 4096);
if (!amd_irongate_private.registers)
return -ENOMEM;
}
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index d79d692d05b8..3b47ed0310e1 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -269,7 +269,6 @@ static int agp_aperture_valid(u64 aper, u32 size)
*/
static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap)
{
- u32 aper_low, aper_hi;
u64 aper, nb_aper;
int order = 0;
u32 nb_order, nb_base;
@@ -295,9 +294,7 @@ static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap)
apsize |= 0xf00;
order = 7 - hweight16(apsize);
- pci_read_config_dword(agp, 0x10, &aper_low);
- pci_read_config_dword(agp, 0x14, &aper_hi);
- aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32);
+ aper = pci_bus_address(agp, AGP_APERTURE_BAR);
/*
* On some sick chips APSIZE is 0. This means it wants 4G
@@ -735,7 +732,7 @@ static struct pci_device_id agp_amd64_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
-static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = {
+static const struct pci_device_id agp_amd64_pci_promisc_table[] = {
{ PCI_DEVICE_CLASS(0, 0) },
{ }
};
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 03c1dc1ab552..18a7a6baa304 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -12,7 +12,7 @@
#include <asm/agp.h>
#include "agp.h"
-#define ATI_GART_MMBASE_ADDR 0x14
+#define ATI_GART_MMBASE_BAR 1
#define ATI_RS100_APSIZE 0xac
#define ATI_RS100_IG_AGPMODE 0xb0
#define ATI_RS300_APSIZE 0xf8
@@ -196,12 +196,12 @@ static void ati_cleanup(void)
static int ati_configure(void)
{
+ phys_addr_t reg;
u32 temp;
/* Get the memory mapped registers */
- pci_read_config_dword(agp_bridge->dev, ATI_GART_MMBASE_ADDR, &temp);
- temp = (temp & 0xfffff000);
- ati_generic_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
+ reg = pci_resource_start(agp_bridge->dev, ATI_GART_MMBASE_BAR);
+ ati_generic_private.registers = (volatile u8 __iomem *) ioremap(reg, 4096);
if (!ati_generic_private.registers)
return -ENOMEM;
@@ -211,18 +211,18 @@ static int ati_configure(void)
else
pci_write_config_dword(agp_bridge->dev, ATI_RS300_IG_AGPMODE, 0x20000);
- /* address to map too */
+ /* address to map to */
/*
- pci_read_config_dword(agp_bridge.dev, AGP_APBASE, &temp);
- agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge.gart_bus_addr = pci_bus_address(agp_bridge.dev,
+ AGP_APERTURE_BAR);
printk(KERN_INFO PFX "IGP320 gart_bus_addr: %x\n", agp_bridge.gart_bus_addr);
*/
writel(0x60000, ati_generic_private.registers+ATI_GART_FEATURE_ID);
readl(ati_generic_private.registers+ATI_GART_FEATURE_ID); /* PCI Posting.*/
/* SIGNALED_SYSTEM_ERROR @ NB_STATUS */
- pci_read_config_dword(agp_bridge->dev, 4, &temp);
- pci_write_config_dword(agp_bridge->dev, 4, temp | (1<<14));
+ pci_read_config_dword(agp_bridge->dev, PCI_COMMAND, &temp);
+ pci_write_config_dword(agp_bridge->dev, PCI_COMMAND, temp | (1<<14));
/* Write out the address of the gatt table */
writel(agp_bridge->gatt_bus_addr, ati_generic_private.registers+ATI_GART_BASE);
@@ -385,8 +385,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
* This is a bus address even on the alpha, b/c its
* used to program the agp master not the cpu
*/
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR);
agp_bridge->gart_bus_addr = addr;
/* Calculate the agp offset */
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index 6974d5032053..533cb6d229b8 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -128,7 +128,6 @@ static void efficeon_cleanup(void)
static int efficeon_configure(void)
{
- u32 temp;
u16 temp2;
struct aper_size_info_lvl2 *current_size;
@@ -141,8 +140,8 @@ static int efficeon_configure(void)
current_size->size_value);
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
/* agpctrl */
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index a0df182f6f7d..f39437addb58 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -1396,8 +1396,8 @@ int agp3_generic_configure(void)
current_size = A_SIZE_16(agp_bridge->current_size);
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
/* set aperture size */
pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index a426ee1f57a6..f9b9ca5d31b7 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -14,9 +14,6 @@
#include "intel-agp.h"
#include <drm/intel-gtt.h>
-int intel_agp_enabled;
-EXPORT_SYMBOL(intel_agp_enabled);
-
static int intel_fetch_size(void)
{
int i;
@@ -118,7 +115,6 @@ static void intel_8xx_cleanup(void)
static int intel_configure(void)
{
- u32 temp;
u16 temp2;
struct aper_size_info_16 *current_size;
@@ -128,8 +124,8 @@ static int intel_configure(void)
pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
@@ -148,7 +144,7 @@ static int intel_configure(void)
static int intel_815_configure(void)
{
- u32 temp, addr;
+ u32 addr;
u8 temp2;
struct aper_size_info_8 *current_size;
@@ -167,8 +163,8 @@ static int intel_815_configure(void)
current_size->size_value);
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
pci_read_config_dword(agp_bridge->dev, INTEL_ATTBASE, &addr);
addr &= INTEL_815_ATTBASE_MASK;
@@ -208,7 +204,6 @@ static void intel_820_cleanup(void)
static int intel_820_configure(void)
{
- u32 temp;
u8 temp2;
struct aper_size_info_8 *current_size;
@@ -218,8 +213,8 @@ static int intel_820_configure(void)
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
@@ -239,7 +234,6 @@ static int intel_820_configure(void)
static int intel_840_configure(void)
{
- u32 temp;
u16 temp2;
struct aper_size_info_8 *current_size;
@@ -249,8 +243,8 @@ static int intel_840_configure(void)
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
@@ -268,7 +262,6 @@ static int intel_840_configure(void)
static int intel_845_configure(void)
{
- u32 temp;
u8 temp2;
struct aper_size_info_8 *current_size;
@@ -282,9 +275,9 @@ static int intel_845_configure(void)
agp_bridge->apbase_config);
} else {
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
- agp_bridge->apbase_config = temp;
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
+ agp_bridge->apbase_config = agp_bridge->gart_bus_addr;
}
/* attbase - aperture base */
@@ -303,7 +296,6 @@ static int intel_845_configure(void)
static int intel_850_configure(void)
{
- u32 temp;
u16 temp2;
struct aper_size_info_8 *current_size;
@@ -313,8 +305,8 @@ static int intel_850_configure(void)
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
@@ -332,7 +324,6 @@ static int intel_850_configure(void)
static int intel_860_configure(void)
{
- u32 temp;
u16 temp2;
struct aper_size_info_8 *current_size;
@@ -342,8 +333,8 @@ static int intel_860_configure(void)
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
@@ -361,7 +352,6 @@ static int intel_860_configure(void)
static int intel_830mp_configure(void)
{
- u32 temp;
u16 temp2;
struct aper_size_info_8 *current_size;
@@ -371,8 +361,8 @@ static int intel_830mp_configure(void)
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
@@ -390,7 +380,6 @@ static int intel_830mp_configure(void)
static int intel_7505_configure(void)
{
- u32 temp;
u16 temp2;
struct aper_size_info_8 *current_size;
@@ -400,8 +389,8 @@ static int intel_7505_configure(void)
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
@@ -814,8 +803,6 @@ static int agp_intel_probe(struct pci_dev *pdev,
found_gmch:
pci_set_drvdata(pdev, bridge);
err = agp_add_bridge(bridge);
- if (!err)
- intel_agp_enabled = 1;
return err;
}
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 1042c1b90376..fda073dcd967 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -55,8 +55,8 @@
#define INTEL_I860_ERRSTS 0xc8
/* Intel i810 registers */
-#define I810_GMADDR 0x10
-#define I810_MMADDR 0x14
+#define I810_GMADR_BAR 0
+#define I810_MMADR_BAR 1
#define I810_PTE_BASE 0x10000
#define I810_PTE_MAIN_UNCACHED 0x00000000
#define I810_PTE_LOCAL 0x00000002
@@ -113,9 +113,9 @@
#define INTEL_I850_ERRSTS 0xc8
/* intel 915G registers */
-#define I915_GMADDR 0x18
-#define I915_MMADDR 0x10
-#define I915_PTEADDR 0x1C
+#define I915_GMADR_BAR 2
+#define I915_MMADR_BAR 0
+#define I915_PTE_BAR 3
#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index b8e2014cb9cb..5c85350f4c3d 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -64,7 +64,7 @@ static struct _intel_private {
struct pci_dev *pcidev; /* device one */
struct pci_dev *bridge_dev;
u8 __iomem *registers;
- phys_addr_t gtt_bus_addr;
+ phys_addr_t gtt_phys_addr;
u32 PGETBL_save;
u32 __iomem *gtt; /* I915G */
bool clear_fake_agp; /* on first access via agp, fill with scratch */
@@ -94,6 +94,7 @@ static struct _intel_private {
#define IS_IRONLAKE intel_private.driver->is_ironlake
#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
+#if IS_ENABLED(CONFIG_AGP_INTEL)
static int intel_gtt_map_memory(struct page **pages,
unsigned int num_entries,
struct sg_table *st)
@@ -168,11 +169,12 @@ static void i8xx_destroy_pages(struct page *page)
__free_pages(page, 2);
atomic_dec(&agp_bridge->current_memory_agp);
}
+#endif
#define I810_GTT_ORDER 4
static int i810_setup(void)
{
- u32 reg_addr;
+ phys_addr_t reg_addr;
char *gtt_table;
/* i81x does not preallocate the gtt. It's always 64kb in size. */
@@ -181,8 +183,7 @@ static int i810_setup(void)
return -ENOMEM;
intel_private.i81x_gtt_table = gtt_table;
- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
- reg_addr &= 0xfff80000;
+ reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
intel_private.registers = ioremap(reg_addr, KB(64));
if (!intel_private.registers)
@@ -191,7 +192,7 @@ static int i810_setup(void)
writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
intel_private.registers+I810_PGETBL_CTL);
- intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
+ intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
if ((readl(intel_private.registers+I810_DRAM_CTL)
& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
@@ -209,6 +210,7 @@ static void i810_cleanup(void)
free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
}
+#if IS_ENABLED(CONFIG_AGP_INTEL)
static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
int type)
{
@@ -289,6 +291,7 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
}
kfree(curr);
}
+#endif
static int intel_gtt_setup_scratch_page(void)
{
@@ -608,9 +611,8 @@ static bool intel_gtt_can_wc(void)
static int intel_gtt_init(void)
{
- u32 gma_addr;
u32 gtt_map_size;
- int ret;
+ int ret, bar;
ret = intel_private.driver->setup();
if (ret != 0)
@@ -636,10 +638,10 @@ static int intel_gtt_init(void)
intel_private.gtt = NULL;
if (intel_gtt_can_wc())
- intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
+ intel_private.gtt = ioremap_wc(intel_private.gtt_phys_addr,
gtt_map_size);
if (intel_private.gtt == NULL)
- intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
+ intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
gtt_map_size);
if (intel_private.gtt == NULL) {
intel_private.driver->cleanup();
@@ -647,7 +649,9 @@ static int intel_gtt_init(void)
return -ENOMEM;
}
+#if IS_ENABLED(CONFIG_AGP_INTEL)
global_cache_flush(); /* FIXME: ? */
+#endif
intel_private.stolen_size = intel_gtt_stolen_size();
@@ -660,17 +664,15 @@ static int intel_gtt_init(void)
}
if (INTEL_GTT_GEN <= 2)
- pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
- &gma_addr);
+ bar = I810_GMADR_BAR;
else
- pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
- &gma_addr);
-
- intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
+ bar = I915_GMADR_BAR;
+ intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar);
return 0;
}
+#if IS_ENABLED(CONFIG_AGP_INTEL)
static int intel_fake_agp_fetch_size(void)
{
int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
@@ -689,6 +691,7 @@ static int intel_fake_agp_fetch_size(void)
return 0;
}
+#endif
static void i830_cleanup(void)
{
@@ -787,20 +790,20 @@ EXPORT_SYMBOL(intel_enable_gtt);
static int i830_setup(void)
{
- u32 reg_addr;
+ phys_addr_t reg_addr;
- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
- reg_addr &= 0xfff80000;
+ reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
intel_private.registers = ioremap(reg_addr, KB(64));
if (!intel_private.registers)
return -ENOMEM;
- intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
+ intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
return 0;
}
+#if IS_ENABLED(CONFIG_AGP_INTEL)
static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
{
agp_bridge->gatt_table_real = NULL;
@@ -825,6 +828,7 @@ static int intel_fake_agp_configure(void)
return 0;
}
+#endif
static bool i830_check_flags(unsigned int flags)
{
@@ -863,6 +867,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
}
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
+#if IS_ENABLED(CONFIG_AGP_INTEL)
static void intel_gtt_insert_pages(unsigned int first_entry,
unsigned int num_entries,
struct page **pages,
@@ -928,6 +933,7 @@ out_err:
mem->is_flushed = true;
return ret;
}
+#endif
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
{
@@ -941,6 +947,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
}
EXPORT_SYMBOL(intel_gtt_clear_range);
+#if IS_ENABLED(CONFIG_AGP_INTEL)
static int intel_fake_agp_remove_entries(struct agp_memory *mem,
off_t pg_start, int type)
{
@@ -982,6 +989,7 @@ static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
/* always return NULL for other allocation types for now */
return NULL;
}
+#endif
static int intel_alloc_chipset_flush_resource(void)
{
@@ -1108,12 +1116,10 @@ static void i965_write_entry(dma_addr_t addr,
static int i9xx_setup(void)
{
- u32 reg_addr, gtt_addr;
+ phys_addr_t reg_addr;
int size = KB(512);
- pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
-
- reg_addr &= 0xfff80000;
+ reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR);
intel_private.registers = ioremap(reg_addr, size);
if (!intel_private.registers)
@@ -1121,15 +1127,14 @@ static int i9xx_setup(void)
switch (INTEL_GTT_GEN) {
case 3:
- pci_read_config_dword(intel_private.pcidev,
- I915_PTEADDR, &gtt_addr);
- intel_private.gtt_bus_addr = gtt_addr;
+ intel_private.gtt_phys_addr =
+ pci_resource_start(intel_private.pcidev, I915_PTE_BAR);
break;
case 5:
- intel_private.gtt_bus_addr = reg_addr + MB(2);
+ intel_private.gtt_phys_addr = reg_addr + MB(2);
break;
default:
- intel_private.gtt_bus_addr = reg_addr + KB(512);
+ intel_private.gtt_phys_addr = reg_addr + KB(512);
break;
}
@@ -1138,6 +1143,7 @@ static int i9xx_setup(void)
return 0;
}
+#if IS_ENABLED(CONFIG_AGP_INTEL)
static const struct agp_bridge_driver intel_fake_agp_driver = {
.owner = THIS_MODULE,
.size_type = FIXED_APER_SIZE,
@@ -1159,6 +1165,7 @@ static const struct agp_bridge_driver intel_fake_agp_driver = {
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
};
+#endif
static const struct intel_gtt_driver i81x_gtt_driver = {
.gen = 1,
@@ -1376,11 +1383,13 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
intel_private.refcount++;
+#if IS_ENABLED(CONFIG_AGP_INTEL)
if (bridge) {
bridge->driver = &intel_fake_agp_driver;
bridge->dev_private_data = &intel_private;
bridge->dev = bridge_pdev;
}
+#endif
intel_private.bridge_dev = pci_dev_get(bridge_pdev);
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index be42a2312dc9..a1861b75eb31 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -106,6 +106,7 @@ static int nvidia_configure(void)
{
int i, rc, num_dirs;
u32 apbase, aplimit;
+ phys_addr_t apbase_phys;
struct aper_size_info_8 *current_size;
u32 temp;
@@ -115,9 +116,8 @@ static int nvidia_configure(void)
pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE,
current_size->size_value);
- /* address to map to */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &apbase);
- apbase &= PCI_BASE_ADDRESS_MEM_MASK;
+ /* address to map to */
+ apbase = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR);
agp_bridge->gart_bus_addr = apbase;
aplimit = apbase + (current_size->size * 1024 * 1024) - 1;
pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APBASE, apbase);
@@ -153,8 +153,9 @@ static int nvidia_configure(void)
pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp | 0x100);
/* map aperture */
+ apbase_phys = pci_resource_start(agp_bridge->dev, AGP_APERTURE_BAR);
nvidia_private.aperture =
- (volatile u32 __iomem *) ioremap(apbase, 33 * PAGE_SIZE);
+ (volatile u32 __iomem *) ioremap(apbase_phys, 33 * PAGE_SIZE);
if (!nvidia_private.aperture)
return -ENOMEM;
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 79c838c434bc..2c74038da459 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -50,13 +50,12 @@ static void sis_tlbflush(struct agp_memory *mem)
static int sis_configure(void)
{
- u32 temp;
struct aper_size_info_8 *current_size;
current_size = A_SIZE_8(agp_bridge->current_size);
pci_write_config_byte(agp_bridge->dev, SIS_TLBCNTRL, 0x05);
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
pci_write_config_dword(agp_bridge->dev, SIS_ATTBASE,
agp_bridge->gatt_bus_addr);
pci_write_config_byte(agp_bridge->dev, SIS_APSIZE,
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index 74d3aa3773bf..228f20cddc05 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -43,16 +43,15 @@ static int via_fetch_size(void)
static int via_configure(void)
{
- u32 temp;
struct aper_size_info_8 *current_size;
current_size = A_SIZE_8(agp_bridge->current_size);
/* aperture size */
pci_write_config_byte(agp_bridge->dev, VIA_APSIZE,
current_size->size_value);
- /* address to map too */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ /* address to map to */
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
/* GART control register */
pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, 0x0000000f);
@@ -132,9 +131,9 @@ static int via_configure_agp3(void)
current_size = A_SIZE_16(agp_bridge->current_size);
- /* address to map too */
- pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ /* address to map to */
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
/* attbase - aperture GATT base */
pci_write_config_dword(agp_bridge->dev, VIA_AGP3_ATTBASE,
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 46118f845948..dd9dfa15e9d1 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -531,6 +531,7 @@ static int apm_suspend_notifier(struct notifier_block *nb,
{
struct apm_user *as;
int err;
+ unsigned long apm_event;
/* short-cut emergency suspends */
if (atomic_read(&userspace_notification_inhibit))
@@ -538,6 +539,9 @@ static int apm_suspend_notifier(struct notifier_block *nb,
switch (event) {
case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+ apm_event = (event == PM_SUSPEND_PREPARE) ?
+ APM_USER_SUSPEND : APM_USER_HIBERNATION;
/*
* Queue an event to all "writer" users that we want
* to suspend and need their ack.
@@ -550,7 +554,7 @@ static int apm_suspend_notifier(struct notifier_block *nb,
as->writer && as->suser) {
as->suspend_state = SUSPEND_PENDING;
atomic_inc(&suspend_acks_pending);
- queue_add_event(&as->queue, APM_USER_SUSPEND);
+ queue_add_event(&as->queue, apm_event);
}
}
@@ -601,11 +605,14 @@ static int apm_suspend_notifier(struct notifier_block *nb,
return notifier_from_errno(err);
case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ apm_event = (event == PM_POST_SUSPEND) ?
+ APM_NORMAL_RESUME : APM_HIBERNATION_RESUME;
/*
* Anyone on the APM queues will think we're still suspended.
* Send a message so everyone knows we're now awake again.
*/
- queue_event(APM_NORMAL_RESUME);
+ queue_event(apm_event);
/*
* Finally, wake up anyone who is sleeping on the suspend.
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 5d9c31dfc905..d5d4cd82b9f7 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -34,15 +34,12 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/io.h>
-
+#include <linux/acpi.h>
+#include <linux/hpet.h>
#include <asm/current.h>
#include <asm/irq.h>
#include <asm/div64.h>
-#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <linux/hpet.h>
-
/*
* The High Precision Event Timer driver.
* This driver is closely modelled after the rtc.c driver.
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index e6939e13e338..d915707d2ba1 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -1,12 +1,11 @@
/*
* i8k.c -- Linux driver for accessing the SMM BIOS on Dell laptops.
- * See http://www.debian.org/~dz/i8k/ for more information
- * and for latest version of this driver.
*
* Copyright (C) 2001 Massimo Dal Zotto <dz@debian.org>
*
* Hwmon integration:
- * Copyright (C) 2011 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2011 Jean Delvare <jdelvare@suse.de>
+ * Copyright (C) 2013 Guenter Roeck <linux@roeck-us.net>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -19,6 +18,8 @@
* General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -29,13 +30,12 @@
#include <linux/mutex.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/sched.h>
#include <linux/i8k.h>
-#define I8K_VERSION "1.14 21/02/2005"
-
#define I8K_SMM_FN_STATUS 0x0025
#define I8K_SMM_POWER_STATUS 0x0069
#define I8K_SMM_SET_FAN 0x01a3
@@ -44,7 +44,6 @@
#define I8K_SMM_GET_TEMP 0x10a3
#define I8K_SMM_GET_DELL_SIG1 0xfea3
#define I8K_SMM_GET_DELL_SIG2 0xffa3
-#define I8K_SMM_BIOS_VERSION 0x00a6
#define I8K_FAN_MULT 30
#define I8K_MAX_TEMP 127
@@ -64,6 +63,15 @@
static DEFINE_MUTEX(i8k_mutex);
static char bios_version[4];
static struct device *i8k_hwmon_dev;
+static u32 i8k_hwmon_flags;
+static int i8k_fan_mult;
+
+#define I8K_HWMON_HAVE_TEMP1 (1 << 0)
+#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
+#define I8K_HWMON_HAVE_TEMP3 (1 << 2)
+#define I8K_HWMON_HAVE_TEMP4 (1 << 3)
+#define I8K_HWMON_HAVE_FAN1 (1 << 4)
+#define I8K_HWMON_HAVE_FAN2 (1 << 5)
MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
MODULE_DESCRIPTION("Driver for accessing SMM BIOS on Dell laptops");
@@ -103,11 +111,11 @@ static const struct file_operations i8k_fops = {
struct smm_regs {
unsigned int eax;
- unsigned int ebx __attribute__ ((packed));
- unsigned int ecx __attribute__ ((packed));
- unsigned int edx __attribute__ ((packed));
- unsigned int esi __attribute__ ((packed));
- unsigned int edi __attribute__ ((packed));
+ unsigned int ebx __packed;
+ unsigned int ecx __packed;
+ unsigned int edx __packed;
+ unsigned int esi __packed;
+ unsigned int edi __packed;
};
static inline const char *i8k_get_dmi_data(int field)
@@ -124,6 +132,17 @@ static int i8k_smm(struct smm_regs *regs)
{
int rc;
int eax = regs->eax;
+ cpumask_var_t old_mask;
+
+ /* SMM requires CPU 0 */
+ if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_copy(old_mask, &current->cpus_allowed);
+ set_cpus_allowed_ptr(current, cpumask_of(0));
+ if (smp_processor_id() != 0) {
+ rc = -EBUSY;
+ goto out;
+ }
#if defined(CONFIG_X86_64)
asm volatile("pushq %%rax\n\t"
@@ -148,7 +167,7 @@ static int i8k_smm(struct smm_regs *regs)
"pushfq\n\t"
"popq %%rax\n\t"
"andl $1,%%eax\n"
- :"=a"(rc)
+ : "=a"(rc)
: "a"(regs)
: "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
#else
@@ -174,25 +193,17 @@ static int i8k_smm(struct smm_regs *regs)
"lahf\n\t"
"shrl $8,%%eax\n\t"
"andl $1,%%eax\n"
- :"=a"(rc)
+ : "=a"(rc)
: "a"(regs)
: "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
#endif
if (rc != 0 || (regs->eax & 0xffff) == 0xffff || regs->eax == eax)
- return -EINVAL;
+ rc = -EINVAL;
- return 0;
-}
-
-/*
- * Read the bios version. Return the version as an integer corresponding
- * to the ascii value, for example "A17" is returned as 0x00413137.
- */
-static int i8k_get_bios_version(void)
-{
- struct smm_regs regs = { .eax = I8K_SMM_BIOS_VERSION, };
-
- return i8k_smm(&regs) ? : regs.eax;
+out:
+ set_cpus_allowed_ptr(current, old_mask);
+ free_cpumask_var(old_mask);
+ return rc;
}
/*
@@ -203,7 +214,8 @@ static int i8k_get_fn_status(void)
struct smm_regs regs = { .eax = I8K_SMM_FN_STATUS, };
int rc;
- if ((rc = i8k_smm(&regs)) < 0)
+ rc = i8k_smm(&regs);
+ if (rc < 0)
return rc;
switch ((regs.eax >> I8K_FN_SHIFT) & I8K_FN_MASK) {
@@ -226,7 +238,8 @@ static int i8k_get_power_status(void)
struct smm_regs regs = { .eax = I8K_SMM_POWER_STATUS, };
int rc;
- if ((rc = i8k_smm(&regs)) < 0)
+ rc = i8k_smm(&regs);
+ if (rc < 0)
return rc;
return (regs.eax & 0xff) == I8K_POWER_AC ? I8K_AC : I8K_BATTERY;
@@ -251,7 +264,7 @@ static int i8k_get_fan_speed(int fan)
struct smm_regs regs = { .eax = I8K_SMM_GET_SPEED, };
regs.ebx = fan & 0xff;
- return i8k_smm(&regs) ? : (regs.eax & 0xffff) * fan_mult;
+ return i8k_smm(&regs) ? : (regs.eax & 0xffff) * i8k_fan_mult;
}
/*
@@ -277,10 +290,11 @@ static int i8k_get_temp(int sensor)
int temp;
#ifdef I8K_TEMPERATURE_BUG
- static int prev;
+ static int prev[4];
#endif
regs.ebx = sensor & 0xff;
- if ((rc = i8k_smm(&regs)) < 0)
+ rc = i8k_smm(&regs);
+ if (rc < 0)
return rc;
temp = regs.eax & 0xff;
@@ -294,10 +308,10 @@ static int i8k_get_temp(int sensor)
# 1003655139 00000054 00005c52
*/
if (temp > I8K_MAX_TEMP) {
- temp = prev;
- prev = I8K_MAX_TEMP;
+ temp = prev[sensor];
+ prev[sensor] = I8K_MAX_TEMP;
} else {
- prev = temp;
+ prev[sensor] = temp;
}
#endif
@@ -309,7 +323,8 @@ static int i8k_get_dell_signature(int req_fn)
struct smm_regs regs = { .eax = req_fn, };
int rc;
- if ((rc = i8k_smm(&regs)) < 0)
+ rc = i8k_smm(&regs);
+ if (rc < 0)
return rc;
return regs.eax == 1145651527 && regs.edx == 1145392204 ? 0 : -1;
@@ -328,12 +343,14 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
switch (cmd) {
case I8K_BIOS_VERSION:
- val = i8k_get_bios_version();
+ val = (bios_version[0] << 16) |
+ (bios_version[1] << 8) | bios_version[2];
break;
case I8K_MACHINE_ID:
memset(buff, 0, 16);
- strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL), sizeof(buff));
+ strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
+ sizeof(buff));
break;
case I8K_FN_STATUS:
@@ -470,12 +487,13 @@ static ssize_t i8k_hwmon_show_temp(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
- int cpu_temp;
+ int index = to_sensor_dev_attr(devattr)->index;
+ int temp;
- cpu_temp = i8k_get_temp(0);
- if (cpu_temp < 0)
- return cpu_temp;
- return sprintf(buf, "%d\n", cpu_temp * 1000);
+ temp = i8k_get_temp(index);
+ if (temp < 0)
+ return temp;
+ return sprintf(buf, "%d\n", temp * 1000);
}
static ssize_t i8k_hwmon_show_fan(struct device *dev,
@@ -491,12 +509,44 @@ static ssize_t i8k_hwmon_show_fan(struct device *dev,
return sprintf(buf, "%d\n", fan_speed);
}
+static ssize_t i8k_hwmon_show_pwm(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ int status;
+
+ status = i8k_get_fan_status(index);
+ if (status < 0)
+ return -EIO;
+ return sprintf(buf, "%d\n", clamp_val(status * 128, 0, 255));
+}
+
+static ssize_t i8k_hwmon_set_pwm(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int index = to_sensor_dev_attr(attr)->index;
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 128), 0, 2);
+
+ mutex_lock(&i8k_mutex);
+ err = i8k_set_fan(index, val);
+ mutex_unlock(&i8k_mutex);
+
+ return err < 0 ? -EIO : count;
+}
+
static ssize_t i8k_hwmon_show_label(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
- static const char *labels[4] = {
- "i8k",
+ static const char *labels[3] = {
"CPU",
"Left Fan",
"Right Fan",
@@ -506,108 +556,108 @@ static ssize_t i8k_hwmon_show_label(struct device *dev,
return sprintf(buf, "%s\n", labels[index]);
}
-static DEVICE_ATTR(temp1_input, S_IRUGO, i8k_hwmon_show_temp, NULL);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 3);
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, i8k_hwmon_show_fan, NULL,
I8K_FAN_LEFT);
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm,
+ i8k_hwmon_set_pwm, I8K_FAN_LEFT);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, i8k_hwmon_show_fan, NULL,
I8K_FAN_RIGHT);
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, i8k_hwmon_show_label, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_label, NULL, 3);
+static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm,
+ i8k_hwmon_set_pwm, I8K_FAN_RIGHT);
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_label, NULL, 2);
+
+static struct attribute *i8k_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr, /* 0 */
+ &sensor_dev_attr_temp1_label.dev_attr.attr, /* 1 */
+ &sensor_dev_attr_temp2_input.dev_attr.attr, /* 2 */
+ &sensor_dev_attr_temp3_input.dev_attr.attr, /* 3 */
+ &sensor_dev_attr_temp4_input.dev_attr.attr, /* 4 */
+ &sensor_dev_attr_fan1_input.dev_attr.attr, /* 5 */
+ &sensor_dev_attr_pwm1.dev_attr.attr, /* 6 */
+ &sensor_dev_attr_fan1_label.dev_attr.attr, /* 7 */
+ &sensor_dev_attr_fan2_input.dev_attr.attr, /* 8 */
+ &sensor_dev_attr_pwm2.dev_attr.attr, /* 9 */
+ &sensor_dev_attr_fan2_label.dev_attr.attr, /* 10 */
+ NULL
+};
-static void i8k_hwmon_remove_files(struct device *dev)
+static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
+ int index)
{
- device_remove_file(dev, &dev_attr_temp1_input);
- device_remove_file(dev, &sensor_dev_attr_fan1_input.dev_attr);
- device_remove_file(dev, &sensor_dev_attr_fan2_input.dev_attr);
- device_remove_file(dev, &sensor_dev_attr_temp1_label.dev_attr);
- device_remove_file(dev, &sensor_dev_attr_fan1_label.dev_attr);
- device_remove_file(dev, &sensor_dev_attr_fan2_label.dev_attr);
- device_remove_file(dev, &sensor_dev_attr_name.dev_attr);
+ if ((index == 0 || index == 1) &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
+ return 0;
+ if (index == 2 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP2))
+ return 0;
+ if (index == 3 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP3))
+ return 0;
+ if (index == 4 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP4))
+ return 0;
+ if (index >= 5 && index <= 7 &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN1))
+ return 0;
+ if (index >= 8 && index <= 10 &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN2))
+ return 0;
+
+ return attr->mode;
}
+static const struct attribute_group i8k_group = {
+ .attrs = i8k_attrs,
+ .is_visible = i8k_is_visible,
+};
+__ATTRIBUTE_GROUPS(i8k);
+
static int __init i8k_init_hwmon(void)
{
int err;
- i8k_hwmon_dev = hwmon_device_register(NULL);
- if (IS_ERR(i8k_hwmon_dev)) {
- err = PTR_ERR(i8k_hwmon_dev);
- i8k_hwmon_dev = NULL;
- printk(KERN_ERR "i8k: hwmon registration failed (%d)\n", err);
- return err;
- }
-
- /* Required name attribute */
- err = device_create_file(i8k_hwmon_dev,
- &sensor_dev_attr_name.dev_attr);
- if (err)
- goto exit_unregister;
+ i8k_hwmon_flags = 0;
/* CPU temperature attributes, if temperature reading is OK */
err = i8k_get_temp(0);
- if (err < 0) {
- dev_dbg(i8k_hwmon_dev,
- "Not creating temperature attributes (%d)\n", err);
- } else {
- err = device_create_file(i8k_hwmon_dev, &dev_attr_temp1_input);
- if (err)
- goto exit_remove_files;
- err = device_create_file(i8k_hwmon_dev,
- &sensor_dev_attr_temp1_label.dev_attr);
- if (err)
- goto exit_remove_files;
- }
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP1;
+ /* check for additional temperature sensors */
+ err = i8k_get_temp(1);
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP2;
+ err = i8k_get_temp(2);
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP3;
+ err = i8k_get_temp(3);
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
/* Left fan attributes, if left fan is present */
err = i8k_get_fan_status(I8K_FAN_LEFT);
- if (err < 0) {
- dev_dbg(i8k_hwmon_dev,
- "Not creating %s fan attributes (%d)\n", "left", err);
- } else {
- err = device_create_file(i8k_hwmon_dev,
- &sensor_dev_attr_fan1_input.dev_attr);
- if (err)
- goto exit_remove_files;
- err = device_create_file(i8k_hwmon_dev,
- &sensor_dev_attr_fan1_label.dev_attr);
- if (err)
- goto exit_remove_files;
- }
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1;
/* Right fan attributes, if right fan is present */
err = i8k_get_fan_status(I8K_FAN_RIGHT);
- if (err < 0) {
- dev_dbg(i8k_hwmon_dev,
- "Not creating %s fan attributes (%d)\n", "right", err);
- } else {
- err = device_create_file(i8k_hwmon_dev,
- &sensor_dev_attr_fan2_input.dev_attr);
- if (err)
- goto exit_remove_files;
- err = device_create_file(i8k_hwmon_dev,
- &sensor_dev_attr_fan2_label.dev_attr);
- if (err)
- goto exit_remove_files;
- }
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
+ i8k_hwmon_dev = hwmon_device_register_with_groups(NULL, "i8k", NULL,
+ i8k_groups);
+ if (IS_ERR(i8k_hwmon_dev)) {
+ err = PTR_ERR(i8k_hwmon_dev);
+ i8k_hwmon_dev = NULL;
+ pr_err("hwmon registration failed (%d)\n", err);
+ return err;
+ }
return 0;
-
- exit_remove_files:
- i8k_hwmon_remove_files(i8k_hwmon_dev);
- exit_unregister:
- hwmon_device_unregister(i8k_hwmon_dev);
- return err;
}
-static void __exit i8k_exit_hwmon(void)
-{
- i8k_hwmon_remove_files(i8k_hwmon_dev);
- hwmon_device_unregister(i8k_hwmon_dev);
-}
-
-static struct dmi_system_id __initdata i8k_dmi_table[] = {
+static struct dmi_system_id i8k_dmi_table[] __initdata = {
{
.ident = "Dell Inspiron",
.matches = {
@@ -671,7 +721,23 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"),
},
},
- { }
+ {
+ .ident = "Dell Studio",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Studio"),
+ },
+ .driver_data = (void *)1, /* fan multiplier override */
+ },
+ {
+ .ident = "Dell XPS M140",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MXC051"),
+ },
+ .driver_data = (void *)1, /* fan multiplier override */
+ },
+ { }
};
/*
@@ -679,8 +745,7 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = {
*/
static int __init i8k_probe(void)
{
- char buff[4];
- int version;
+ const struct dmi_system_id *id;
/*
* Get DMI information
@@ -689,49 +754,30 @@ static int __init i8k_probe(void)
if (!ignore_dmi && !force)
return -ENODEV;
- printk(KERN_INFO "i8k: not running on a supported Dell system.\n");
- printk(KERN_INFO "i8k: vendor=%s, model=%s, version=%s\n",
+ pr_info("not running on a supported Dell system.\n");
+ pr_info("vendor=%s, model=%s, version=%s\n",
i8k_get_dmi_data(DMI_SYS_VENDOR),
i8k_get_dmi_data(DMI_PRODUCT_NAME),
i8k_get_dmi_data(DMI_BIOS_VERSION));
}
- strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION), sizeof(bios_version));
+ strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
+ sizeof(bios_version));
/*
* Get SMM Dell signature
*/
if (i8k_get_dell_signature(I8K_SMM_GET_DELL_SIG1) &&
i8k_get_dell_signature(I8K_SMM_GET_DELL_SIG2)) {
- printk(KERN_ERR "i8k: unable to get SMM Dell signature\n");
+ pr_err("unable to get SMM Dell signature\n");
if (!force)
return -ENODEV;
}
- /*
- * Get SMM BIOS version.
- */
- version = i8k_get_bios_version();
- if (version <= 0) {
- printk(KERN_WARNING "i8k: unable to get SMM BIOS version\n");
- } else {
- buff[0] = (version >> 16) & 0xff;
- buff[1] = (version >> 8) & 0xff;
- buff[2] = (version) & 0xff;
- buff[3] = '\0';
- /*
- * If DMI BIOS version is unknown use SMM BIOS version.
- */
- if (!dmi_get_system_info(DMI_BIOS_VERSION))
- strlcpy(bios_version, buff, sizeof(bios_version));
-
- /*
- * Check if the two versions match.
- */
- if (strncmp(buff, bios_version, sizeof(bios_version)) != 0)
- printk(KERN_WARNING "i8k: BIOS version mismatch: %s != %s\n",
- buff, bios_version);
- }
+ i8k_fan_mult = fan_mult;
+ id = dmi_first_match(i8k_dmi_table);
+ if (id && fan_mult == I8K_FAN_MULT && id->driver_data)
+ i8k_fan_mult = (unsigned long)id->driver_data;
return 0;
}
@@ -754,10 +800,6 @@ static int __init i8k_init(void)
if (err)
goto exit_remove_proc;
- printk(KERN_INFO
- "Dell laptop SMM driver v%s Massimo Dal Zotto (dz@debian.org)\n",
- I8K_VERSION);
-
return 0;
exit_remove_proc:
@@ -767,7 +809,7 @@ static int __init i8k_init(void)
static void __exit i8k_exit(void)
{
- i8k_exit_hwmon();
+ hwmon_device_unregister(i8k_hwmon_dev);
remove_proc_entry("i8k", NULL);
}
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index a22a7a502740..f5e4cd7617f6 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -201,7 +201,7 @@ static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
}
bt->state = BT_STATE_IDLE; /* start here */
bt->complete = BT_STATE_IDLE; /* end here */
- bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * 1000000;
+ bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC;
bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
/* BT_CAP_outreqs == zero is a flag to read BT Capabilities */
return 3; /* We claim 3 bytes of space; ought to check SPMI table */
@@ -613,7 +613,7 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
HOST2BMC(42); /* Sequence number */
HOST2BMC(3); /* Cmd == Soft reset */
BT_CONTROL(BT_H2B_ATN);
- bt->timeout = BT_RESET_DELAY * 1000000;
+ bt->timeout = BT_RESET_DELAY * USEC_PER_SEC;
BT_STATE_CHANGE(BT_STATE_RESET3,
SI_SM_CALL_WITH_DELAY);
@@ -651,14 +651,14 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
bt_init_data(bt, bt->io);
if ((i == 8) && !BT_CAP[2]) {
bt->BT_CAP_outreqs = BT_CAP[3];
- bt->BT_CAP_req2rsp = BT_CAP[6] * 1000000;
+ bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
bt->BT_CAP_retries = BT_CAP[7];
} else
printk(KERN_WARNING "IPMI BT: using default values\n");
if (!bt->BT_CAP_outreqs)
bt->BT_CAP_outreqs = 1;
printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
- bt->BT_CAP_req2rsp / 1000000L, bt->BT_CAP_retries);
+ bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
bt->timeout = bt->BT_CAP_req2rsp;
return SI_SM_CALL_WITHOUT_DELAY;
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index e53fc24c6af3..6a4bdc18955a 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -118,8 +118,8 @@ enum kcs_states {
#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH
/* Timeouts in microseconds. */
-#define IBF_RETRY_TIMEOUT 5000000
-#define OBF_RETRY_TIMEOUT 5000000
+#define IBF_RETRY_TIMEOUT (5*USEC_PER_SEC)
+#define OBF_RETRY_TIMEOUT (5*USEC_PER_SEC)
#define MAX_ERROR_RETRIES 10
#define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 15e4a6031934..03f41896d090 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1358,7 +1358,7 @@ static int std_irq_setup(struct smi_info *info)
if (info->si_type == SI_BT) {
rv = request_irq(info->irq,
si_bt_irq_handler,
- IRQF_SHARED | IRQF_DISABLED,
+ IRQF_SHARED,
DEVICE_NAME,
info);
if (!rv)
@@ -1368,7 +1368,7 @@ static int std_irq_setup(struct smi_info *info)
} else
rv = request_irq(info->irq,
si_irq_handler,
- IRQF_SHARED | IRQF_DISABLED,
+ IRQF_SHARED,
DEVICE_NAME,
info);
if (rv) {
@@ -1849,11 +1849,15 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
info->irq_setup = std_irq_setup;
info->slave_addr = ipmb;
- if (!add_smi(info)) {
- if (try_smi_init(info))
- cleanup_one_si(info);
- } else {
+ rv = add_smi(info);
+ if (rv) {
kfree(info);
+ goto out;
+ }
+ rv = try_smi_init(info);
+ if (rv) {
+ cleanup_one_si(info);
+ goto out;
}
} else {
/* remove */
@@ -2067,6 +2071,7 @@ struct SPMITable {
static int try_init_spmi(struct SPMITable *spmi)
{
struct smi_info *info;
+ int rv;
if (spmi->IPMIlegacy != 1) {
printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
@@ -2141,10 +2146,11 @@ static int try_init_spmi(struct SPMITable *spmi)
info->io.addr_data, info->io.regsize, info->io.regspacing,
info->irq);
- if (add_smi(info))
+ rv = add_smi(info);
+ if (rv)
kfree(info);
- return 0;
+ return rv;
}
static void spmi_find_bmc(void)
@@ -2178,6 +2184,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
acpi_handle handle;
acpi_status status;
unsigned long long tmp;
+ int rv;
acpi_dev = pnp_acpi_device(dev);
if (!acpi_dev)
@@ -2259,10 +2266,11 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
res, info->io.regsize, info->io.regspacing,
info->irq);
- if (add_smi(info))
- goto err_free;
+ rv = add_smi(info);
+ if (rv)
+ kfree(info);
- return 0;
+ return rv;
err_free:
kfree(info);
@@ -2566,16 +2574,20 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
&pdev->resource[0], info->io.regsize, info->io.regspacing,
info->irq);
- if (add_smi(info))
+ rv = add_smi(info);
+ if (rv) {
kfree(info);
+ pci_disable_device(pdev);
+ }
- return 0;
+ return rv;
}
static void ipmi_pci_remove(struct pci_dev *pdev)
{
struct smi_info *info = pci_get_drvdata(pdev);
cleanup_one_si(info);
+ pci_disable_device(pdev);
}
static struct pci_device_id ipmi_pci_devices[] = {
@@ -2670,9 +2682,10 @@ static int ipmi_probe(struct platform_device *dev)
dev_set_drvdata(&dev->dev, info);
- if (add_smi(info)) {
+ ret = add_smi(info);
+ if (ret) {
kfree(info);
- return -EBUSY;
+ return ret;
}
#endif
return 0;
@@ -2711,6 +2724,7 @@ static struct platform_driver ipmi_driver = {
static int ipmi_parisc_probe(struct parisc_device *dev)
{
struct smi_info *info;
+ int rv;
info = smi_info_alloc();
@@ -2736,9 +2750,10 @@ static int ipmi_parisc_probe(struct parisc_device *dev)
dev_set_drvdata(&dev->dev, info);
- if (add_smi(info)) {
+ rv = add_smi(info);
+ if (rv) {
kfree(info);
- return -EBUSY;
+ return rv;
}
return 0;
@@ -2773,7 +2788,7 @@ static int wait_for_msg_done(struct smi_info *smi_info)
smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
schedule_timeout_uninterruptible(1);
smi_result = smi_info->handlers->event(
- smi_info->si_sm, 100);
+ smi_info->si_sm, jiffies_to_usecs(1));
} else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
smi_result = smi_info->handlers->event(
smi_info->si_sm, 0);
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
index faed92971907..c8e77afa8b96 100644
--- a/drivers/char/ipmi/ipmi_smic_sm.c
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -80,7 +80,7 @@ enum smic_states {
#define SMIC_MAX_ERROR_RETRIES 3
/* Timeouts in microseconds. */
-#define SMIC_RETRY_TIMEOUT 2000000
+#define SMIC_RETRY_TIMEOUT (2*USEC_PER_SEC)
/* SMIC Flags Register Bits */
#define SMIC_RX_DATA_READY 0x80
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 0913d79424d3..c4094c4e22c1 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -587,6 +587,8 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
return -ENODEV;
switch ( cmd ) {
case LPTIME:
+ if (arg > UINT_MAX / HZ)
+ return -EINVAL;
LP_TIME(minor) = arg * HZ/100;
break;
case LPCHAR:
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index f895a8c8a244..92c5937f80c3 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -22,7 +22,6 @@
#include <linux/device.h>
#include <linux/highmem.h>
#include <linux/backing-dev.h>
-#include <linux/bootmem.h>
#include <linux/splice.h>
#include <linux/pfn.h>
#include <linux/export.h>
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
index 1fd00dc06897..76c490fa0511 100644
--- a/drivers/char/nwbutton.c
+++ b/drivers/char/nwbutton.c
@@ -168,7 +168,10 @@ static irqreturn_t button_handler (int irq, void *dev_id)
static int button_read (struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
- interruptible_sleep_on (&button_wait_queue);
+ DEFINE_WAIT(wait);
+ prepare_to_wait(&button_wait_queue, &wait, TASK_INTERRUPTIBLE);
+ schedule();
+ finish_wait(&button_wait_queue, &wait);
return (copy_to_user (buffer, &button_output_buffer, bcount))
? -EFAULT : bcount;
}
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index d39cca659a3f..8320abd1ef14 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -2511,8 +2511,8 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
/* If port is closing, signal caller to try again */
if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
- if (port->flags & ASYNC_CLOSING)
- interruptible_sleep_on(&port->close_wait);
+ wait_event_interruptible_tty(tty, port->close_wait,
+ !(port->flags & ASYNC_CLOSING));
retval = ((port->flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS);
goto cleanup;
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index f3223aac4df1..6e8d65e9b1d3 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -190,7 +190,7 @@ static int bind_get(int number, dev_t *dev)
struct raw_device_data *rawdev;
struct block_device *bdev;
- if (number <= 0 || number >= MAX_RAW_MINORS)
+ if (number <= 0 || number >= max_raw_minors)
return -EINVAL;
rawdev = &raw_devices[number];
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index b80a4000daee..4d85dd681b81 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -2,7 +2,7 @@
# Makefile for the kernel tpm device drivers.
#
obj-$(CONFIG_TCG_TPM) += tpm.o
-tpm-y := tpm-interface.o
+tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o
tpm-$(CONFIG_ACPI) += tpm_ppi.o
ifdef CONFIG_ACPI
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
new file mode 100644
index 000000000000..d9b774e02a1f
--- /dev/null
+++ b/drivers/char/tpm/tpm-dev.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Copyright (C) 2013 Obsidian Research Corp
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ *
+ * Device file system interface to the TPM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "tpm.h"
+
+struct file_priv {
+ struct tpm_chip *chip;
+
+ /* Data passed to and from the tpm via the read/write calls */
+ atomic_t data_pending;
+ struct mutex buffer_mutex;
+
+ struct timer_list user_read_timer; /* user needs to claim result */
+ struct work_struct work;
+
+ u8 data_buffer[TPM_BUFSIZE];
+};
+
+static void user_reader_timeout(unsigned long ptr)
+{
+ struct file_priv *priv = (struct file_priv *)ptr;
+
+ schedule_work(&priv->work);
+}
+
+static void timeout_work(struct work_struct *work)
+{
+ struct file_priv *priv = container_of(work, struct file_priv, work);
+
+ mutex_lock(&priv->buffer_mutex);
+ atomic_set(&priv->data_pending, 0);
+ memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
+ mutex_unlock(&priv->buffer_mutex);
+}
+
+static int tpm_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *misc = file->private_data;
+ struct tpm_chip *chip = container_of(misc, struct tpm_chip,
+ vendor.miscdev);
+ struct file_priv *priv;
+
+ /* It's assured that the chip will be opened just once,
+ * by the check of is_open variable, which is protected
+ * by driver_lock. */
+ if (test_and_set_bit(0, &chip->is_open)) {
+ dev_dbg(chip->dev, "Another process owns this TPM\n");
+ return -EBUSY;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (priv == NULL) {
+ clear_bit(0, &chip->is_open);
+ return -ENOMEM;
+ }
+
+ priv->chip = chip;
+ atomic_set(&priv->data_pending, 0);
+ mutex_init(&priv->buffer_mutex);
+ setup_timer(&priv->user_read_timer, user_reader_timeout,
+ (unsigned long)priv);
+ INIT_WORK(&priv->work, timeout_work);
+
+ file->private_data = priv;
+ get_device(chip->dev);
+ return 0;
+}
+
+static ssize_t tpm_read(struct file *file, char __user *buf,
+ size_t size, loff_t *off)
+{
+ struct file_priv *priv = file->private_data;
+ ssize_t ret_size;
+ int rc;
+
+ del_singleshot_timer_sync(&priv->user_read_timer);
+ flush_work(&priv->work);
+ ret_size = atomic_read(&priv->data_pending);
+ if (ret_size > 0) { /* relay data */
+ ssize_t orig_ret_size = ret_size;
+ if (size < ret_size)
+ ret_size = size;
+
+ mutex_lock(&priv->buffer_mutex);
+ rc = copy_to_user(buf, priv->data_buffer, ret_size);
+ memset(priv->data_buffer, 0, orig_ret_size);
+ if (rc)
+ ret_size = -EFAULT;
+
+ mutex_unlock(&priv->buffer_mutex);
+ }
+
+ atomic_set(&priv->data_pending, 0);
+
+ return ret_size;
+}
+
+static ssize_t tpm_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *off)
+{
+ struct file_priv *priv = file->private_data;
+ size_t in_size = size;
+ ssize_t out_size;
+
+ /* cannot perform a write until the read has cleared
+ either via tpm_read or a user_read_timer timeout.
+ This also prevents splitted buffered writes from blocking here.
+ */
+ if (atomic_read(&priv->data_pending) != 0)
+ return -EBUSY;
+
+ if (in_size > TPM_BUFSIZE)
+ return -E2BIG;
+
+ mutex_lock(&priv->buffer_mutex);
+
+ if (copy_from_user
+ (priv->data_buffer, (void __user *) buf, in_size)) {
+ mutex_unlock(&priv->buffer_mutex);
+ return -EFAULT;
+ }
+
+ /* atomic tpm command send and result receive */
+ out_size = tpm_transmit(priv->chip, priv->data_buffer,
+ sizeof(priv->data_buffer));
+ if (out_size < 0) {
+ mutex_unlock(&priv->buffer_mutex);
+ return out_size;
+ }
+
+ atomic_set(&priv->data_pending, out_size);
+ mutex_unlock(&priv->buffer_mutex);
+
+ /* Set a timeout by which the reader must come claim the result */
+ mod_timer(&priv->user_read_timer, jiffies + (60 * HZ));
+
+ return in_size;
+}
+
+/*
+ * Called on file close
+ */
+static int tpm_release(struct inode *inode, struct file *file)
+{
+ struct file_priv *priv = file->private_data;
+
+ del_singleshot_timer_sync(&priv->user_read_timer);
+ flush_work(&priv->work);
+ file->private_data = NULL;
+ atomic_set(&priv->data_pending, 0);
+ clear_bit(0, &priv->chip->is_open);
+ put_device(priv->chip->dev);
+ kfree(priv);
+ return 0;
+}
+
+static const struct file_operations tpm_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = tpm_open,
+ .read = tpm_read,
+ .write = tpm_write,
+ .release = tpm_release,
+};
+
+int tpm_dev_add_device(struct tpm_chip *chip)
+{
+ int rc;
+
+ chip->vendor.miscdev.fops = &tpm_fops;
+ if (chip->dev_num == 0)
+ chip->vendor.miscdev.minor = TPM_MINOR;
+ else
+ chip->vendor.miscdev.minor = MISC_DYNAMIC_MINOR;
+
+ chip->vendor.miscdev.name = chip->devname;
+ chip->vendor.miscdev.parent = chip->dev;
+
+ rc = misc_register(&chip->vendor.miscdev);
+ if (rc) {
+ chip->vendor.miscdev.name = NULL;
+ dev_err(chip->dev,
+ "unable to misc_register %s, minor %d err=%d\n",
+ chip->vendor.miscdev.name,
+ chip->vendor.miscdev.minor, rc);
+ }
+ return rc;
+}
+
+void tpm_dev_del_device(struct tpm_chip *chip)
+{
+ if (chip->vendor.miscdev.name)
+ misc_deregister(&chip->vendor.miscdev);
+}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 6ae41d337630..62e10fd1e1cb 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -32,13 +32,6 @@
#include "tpm.h"
#include "tpm_eventlog.h"
-enum tpm_duration {
- TPM_SHORT = 0,
- TPM_MEDIUM = 1,
- TPM_LONG = 2,
- TPM_UNDEFINED,
-};
-
#define TPM_MAX_ORDINAL 243
#define TSC_MAX_ORDINAL 12
#define TPM_PROTECTED_COMMAND 0x00
@@ -312,23 +305,6 @@ static const u8 tpm_ordinal_duration[TPM_MAX_ORDINAL] = {
TPM_MEDIUM,
};
-static void user_reader_timeout(unsigned long ptr)
-{
- struct tpm_chip *chip = (struct tpm_chip *) ptr;
-
- schedule_work(&chip->work);
-}
-
-static void timeout_work(struct work_struct *work)
-{
- struct tpm_chip *chip = container_of(work, struct tpm_chip, work);
-
- mutex_lock(&chip->buffer_mutex);
- atomic_set(&chip->data_pending, 0);
- memset(chip->data_buffer, 0, TPM_BUFSIZE);
- mutex_unlock(&chip->buffer_mutex);
-}
-
/*
* Returns max number of jiffies to wait
*/
@@ -355,8 +331,8 @@ EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
/*
* Internal kernel interface to transmit TPM commands
*/
-static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
- size_t bufsiz)
+ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
+ size_t bufsiz)
{
ssize_t rc;
u32 count, ordinal;
@@ -377,7 +353,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
mutex_lock(&chip->tpm_mutex);
- rc = chip->vendor.send(chip, (u8 *) buf, count);
+ rc = chip->ops->send(chip, (u8 *) buf, count);
if (rc < 0) {
dev_err(chip->dev,
"tpm_transmit: tpm_send: error %zd\n", rc);
@@ -389,12 +365,12 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal);
do {
- u8 status = chip->vendor.status(chip);
- if ((status & chip->vendor.req_complete_mask) ==
- chip->vendor.req_complete_val)
+ u8 status = chip->ops->status(chip);
+ if ((status & chip->ops->req_complete_mask) ==
+ chip->ops->req_complete_val)
goto out_recv;
- if (chip->vendor.req_canceled(chip, status)) {
+ if (chip->ops->req_canceled(chip, status)) {
dev_err(chip->dev, "Operation Canceled\n");
rc = -ECANCELED;
goto out;
@@ -404,13 +380,13 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
rmb();
} while (time_before(jiffies, stop));
- chip->vendor.cancel(chip);
+ chip->ops->cancel(chip);
dev_err(chip->dev, "Operation Timed out\n");
rc = -ETIME;
goto out;
out_recv:
- rc = chip->vendor.recv(chip, (u8 *) buf, bufsiz);
+ rc = chip->ops->recv(chip, (u8 *) buf, bufsiz);
if (rc < 0)
dev_err(chip->dev,
"tpm_transmit: tpm_recv: error %zd\n", rc);
@@ -422,24 +398,6 @@ out:
#define TPM_DIGEST_SIZE 20
#define TPM_RET_CODE_IDX 6
-enum tpm_capabilities {
- TPM_CAP_FLAG = cpu_to_be32(4),
- TPM_CAP_PROP = cpu_to_be32(5),
- CAP_VERSION_1_1 = cpu_to_be32(0x06),
- CAP_VERSION_1_2 = cpu_to_be32(0x1A)
-};
-
-enum tpm_sub_capabilities {
- TPM_CAP_PROP_PCR = cpu_to_be32(0x101),
- TPM_CAP_PROP_MANUFACTURER = cpu_to_be32(0x103),
- TPM_CAP_FLAG_PERM = cpu_to_be32(0x108),
- TPM_CAP_FLAG_VOL = cpu_to_be32(0x109),
- TPM_CAP_PROP_OWNER = cpu_to_be32(0x111),
- TPM_CAP_PROP_TIS_TIMEOUT = cpu_to_be32(0x115),
- TPM_CAP_PROP_TIS_DURATION = cpu_to_be32(0x120),
-
-};
-
static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
int len, const char *desc)
{
@@ -459,7 +417,6 @@ static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
}
#define TPM_INTERNAL_RESULT_SIZE 200
-#define TPM_TAG_RQU_COMMAND cpu_to_be16(193)
#define TPM_ORD_GET_CAP cpu_to_be32(101)
#define TPM_ORD_GET_RANDOM cpu_to_be32(70)
@@ -659,70 +616,6 @@ static int tpm_continue_selftest(struct tpm_chip *chip)
return rc;
}
-ssize_t tpm_show_enabled(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- cap_t cap;
- ssize_t rc;
-
- rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
- "attempting to determine the permanent enabled state");
- if (rc)
- return 0;
-
- rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
- return rc;
-}
-EXPORT_SYMBOL_GPL(tpm_show_enabled);
-
-ssize_t tpm_show_active(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- cap_t cap;
- ssize_t rc;
-
- rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
- "attempting to determine the permanent active state");
- if (rc)
- return 0;
-
- rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
- return rc;
-}
-EXPORT_SYMBOL_GPL(tpm_show_active);
-
-ssize_t tpm_show_owned(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- cap_t cap;
- ssize_t rc;
-
- rc = tpm_getcap(dev, TPM_CAP_PROP_OWNER, &cap,
- "attempting to determine the owner state");
- if (rc)
- return 0;
-
- rc = sprintf(buf, "%d\n", cap.owned);
- return rc;
-}
-EXPORT_SYMBOL_GPL(tpm_show_owned);
-
-ssize_t tpm_show_temp_deactivated(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- cap_t cap;
- ssize_t rc;
-
- rc = tpm_getcap(dev, TPM_CAP_FLAG_VOL, &cap,
- "attempting to determine the temporary state");
- if (rc)
- return 0;
-
- rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
- return rc;
-}
-EXPORT_SYMBOL_GPL(tpm_show_temp_deactivated);
-
/*
* tpm_chip_find_get - return tpm_chip for given chip number
*/
@@ -752,7 +645,7 @@ static struct tpm_input_header pcrread_header = {
.ordinal = TPM_ORDINAL_PCRREAD
};
-static int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
+int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
{
int rc;
struct tpm_cmd_t cmd;
@@ -787,7 +680,7 @@ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
chip = tpm_chip_find_get(chip_num);
if (chip == NULL)
return -ENODEV;
- rc = __tpm_pcr_read(chip, pcr_idx, res_buf);
+ rc = tpm_pcr_read_dev(chip, pcr_idx, res_buf);
tpm_chip_put(chip);
return rc;
}
@@ -911,196 +804,15 @@ int tpm_send(u32 chip_num, void *cmd, size_t buflen)
}
EXPORT_SYMBOL_GPL(tpm_send);
-ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- cap_t cap;
- u8 digest[TPM_DIGEST_SIZE];
- ssize_t rc;
- int i, j, num_pcrs;
- char *str = buf;
- struct tpm_chip *chip = dev_get_drvdata(dev);
-
- rc = tpm_getcap(dev, TPM_CAP_PROP_PCR, &cap,
- "attempting to determine the number of PCRS");
- if (rc)
- return 0;
-
- num_pcrs = be32_to_cpu(cap.num_pcrs);
- for (i = 0; i < num_pcrs; i++) {
- rc = __tpm_pcr_read(chip, i, digest);
- if (rc)
- break;
- str += sprintf(str, "PCR-%02d: ", i);
- for (j = 0; j < TPM_DIGEST_SIZE; j++)
- str += sprintf(str, "%02X ", digest[j]);
- str += sprintf(str, "\n");
- }
- return str - buf;
-}
-EXPORT_SYMBOL_GPL(tpm_show_pcrs);
-
-#define READ_PUBEK_RESULT_SIZE 314
-#define TPM_ORD_READPUBEK cpu_to_be32(124)
-static struct tpm_input_header tpm_readpubek_header = {
- .tag = TPM_TAG_RQU_COMMAND,
- .length = cpu_to_be32(30),
- .ordinal = TPM_ORD_READPUBEK
-};
-
-ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- u8 *data;
- struct tpm_cmd_t tpm_cmd;
- ssize_t err;
- int i, rc;
- char *str = buf;
-
- struct tpm_chip *chip = dev_get_drvdata(dev);
-
- tpm_cmd.header.in = tpm_readpubek_header;
- err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
- "attempting to read the PUBEK");
- if (err)
- goto out;
-
- /*
- ignore header 10 bytes
- algorithm 32 bits (1 == RSA )
- encscheme 16 bits
- sigscheme 16 bits
- parameters (RSA 12->bytes: keybit, #primes, expbit)
- keylenbytes 32 bits
- 256 byte modulus
- ignore checksum 20 bytes
- */
- data = tpm_cmd.params.readpubek_out_buffer;
- str +=
- sprintf(str,
- "Algorithm: %02X %02X %02X %02X\n"
- "Encscheme: %02X %02X\n"
- "Sigscheme: %02X %02X\n"
- "Parameters: %02X %02X %02X %02X "
- "%02X %02X %02X %02X "
- "%02X %02X %02X %02X\n"
- "Modulus length: %d\n"
- "Modulus:\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5],
- data[6], data[7],
- data[12], data[13], data[14], data[15],
- data[16], data[17], data[18], data[19],
- data[20], data[21], data[22], data[23],
- be32_to_cpu(*((__be32 *) (data + 24))));
-
- for (i = 0; i < 256; i++) {
- str += sprintf(str, "%02X ", data[i + 28]);
- if ((i + 1) % 16 == 0)
- str += sprintf(str, "\n");
- }
-out:
- rc = str - buf;
- return rc;
-}
-EXPORT_SYMBOL_GPL(tpm_show_pubek);
-
-
-ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- cap_t cap;
- ssize_t rc;
- char *str = buf;
-
- rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
- "attempting to determine the manufacturer");
- if (rc)
- return 0;
- str += sprintf(str, "Manufacturer: 0x%x\n",
- be32_to_cpu(cap.manufacturer_id));
-
- /* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */
- rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap,
- "attempting to determine the 1.2 version");
- if (!rc) {
- str += sprintf(str,
- "TCG version: %d.%d\nFirmware version: %d.%d\n",
- cap.tpm_version_1_2.Major,
- cap.tpm_version_1_2.Minor,
- cap.tpm_version_1_2.revMajor,
- cap.tpm_version_1_2.revMinor);
- } else {
- /* Otherwise just use TPM_STRUCT_VER */
- rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap,
- "attempting to determine the 1.1 version");
- if (rc)
- return 0;
- str += sprintf(str,
- "TCG version: %d.%d\nFirmware version: %d.%d\n",
- cap.tpm_version.Major,
- cap.tpm_version.Minor,
- cap.tpm_version.revMajor,
- cap.tpm_version.revMinor);
- }
-
- return str - buf;
-}
-EXPORT_SYMBOL_GPL(tpm_show_caps);
-
-ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
-
- if (chip->vendor.duration[TPM_LONG] == 0)
- return 0;
-
- return sprintf(buf, "%d %d %d [%s]\n",
- jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
- jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
- jiffies_to_usecs(chip->vendor.duration[TPM_LONG]),
- chip->vendor.duration_adjusted
- ? "adjusted" : "original");
-}
-EXPORT_SYMBOL_GPL(tpm_show_durations);
-
-ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d %d %d %d [%s]\n",
- jiffies_to_usecs(chip->vendor.timeout_a),
- jiffies_to_usecs(chip->vendor.timeout_b),
- jiffies_to_usecs(chip->vendor.timeout_c),
- jiffies_to_usecs(chip->vendor.timeout_d),
- chip->vendor.timeout_adjusted
- ? "adjusted" : "original");
-}
-EXPORT_SYMBOL_GPL(tpm_show_timeouts);
-
-ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
- if (chip == NULL)
- return 0;
-
- chip->vendor.cancel(chip);
- return count;
-}
-EXPORT_SYMBOL_GPL(tpm_store_cancel);
-
static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
bool check_cancel, bool *canceled)
{
- u8 status = chip->vendor.status(chip);
+ u8 status = chip->ops->status(chip);
*canceled = false;
if ((status & mask) == mask)
return true;
- if (check_cancel && chip->vendor.req_canceled(chip, status)) {
+ if (check_cancel && chip->ops->req_canceled(chip, status)) {
*canceled = true;
return true;
}
@@ -1116,7 +828,7 @@ int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
bool canceled = false;
/* check current status */
- status = chip->vendor.status(chip);
+ status = chip->ops->status(chip);
if ((status & mask) == mask)
return 0;
@@ -1143,7 +855,7 @@ again:
} else {
do {
msleep(TPM_TIMEOUT);
- status = chip->vendor.status(chip);
+ status = chip->ops->status(chip);
if ((status & mask) == mask)
return 0;
} while (time_before(jiffies, stop));
@@ -1151,127 +863,6 @@ again:
return -ETIME;
}
EXPORT_SYMBOL_GPL(wait_for_tpm_stat);
-/*
- * Device file system interface to the TPM
- *
- * It's assured that the chip will be opened just once,
- * by the check of is_open variable, which is protected
- * by driver_lock.
- */
-int tpm_open(struct inode *inode, struct file *file)
-{
- struct miscdevice *misc = file->private_data;
- struct tpm_chip *chip = container_of(misc, struct tpm_chip,
- vendor.miscdev);
-
- if (test_and_set_bit(0, &chip->is_open)) {
- dev_dbg(chip->dev, "Another process owns this TPM\n");
- return -EBUSY;
- }
-
- chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL);
- if (chip->data_buffer == NULL) {
- clear_bit(0, &chip->is_open);
- return -ENOMEM;
- }
-
- atomic_set(&chip->data_pending, 0);
-
- file->private_data = chip;
- get_device(chip->dev);
- return 0;
-}
-EXPORT_SYMBOL_GPL(tpm_open);
-
-/*
- * Called on file close
- */
-int tpm_release(struct inode *inode, struct file *file)
-{
- struct tpm_chip *chip = file->private_data;
-
- del_singleshot_timer_sync(&chip->user_read_timer);
- flush_work(&chip->work);
- file->private_data = NULL;
- atomic_set(&chip->data_pending, 0);
- kzfree(chip->data_buffer);
- clear_bit(0, &chip->is_open);
- put_device(chip->dev);
- return 0;
-}
-EXPORT_SYMBOL_GPL(tpm_release);
-
-ssize_t tpm_write(struct file *file, const char __user *buf,
- size_t size, loff_t *off)
-{
- struct tpm_chip *chip = file->private_data;
- size_t in_size = size;
- ssize_t out_size;
-
- /* cannot perform a write until the read has cleared
- either via tpm_read or a user_read_timer timeout.
- This also prevents splitted buffered writes from blocking here.
- */
- if (atomic_read(&chip->data_pending) != 0)
- return -EBUSY;
-
- if (in_size > TPM_BUFSIZE)
- return -E2BIG;
-
- mutex_lock(&chip->buffer_mutex);
-
- if (copy_from_user
- (chip->data_buffer, (void __user *) buf, in_size)) {
- mutex_unlock(&chip->buffer_mutex);
- return -EFAULT;
- }
-
- /* atomic tpm command send and result receive */
- out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
- if (out_size < 0) {
- mutex_unlock(&chip->buffer_mutex);
- return out_size;
- }
-
- atomic_set(&chip->data_pending, out_size);
- mutex_unlock(&chip->buffer_mutex);
-
- /* Set a timeout by which the reader must come claim the result */
- mod_timer(&chip->user_read_timer, jiffies + (60 * HZ));
-
- return in_size;
-}
-EXPORT_SYMBOL_GPL(tpm_write);
-
-ssize_t tpm_read(struct file *file, char __user *buf,
- size_t size, loff_t *off)
-{
- struct tpm_chip *chip = file->private_data;
- ssize_t ret_size;
- int rc;
-
- del_singleshot_timer_sync(&chip->user_read_timer);
- flush_work(&chip->work);
- ret_size = atomic_read(&chip->data_pending);
- if (ret_size > 0) { /* relay data */
- ssize_t orig_ret_size = ret_size;
- if (size < ret_size)
- ret_size = size;
-
- mutex_lock(&chip->buffer_mutex);
- rc = copy_to_user(buf, chip->data_buffer, ret_size);
- memset(chip->data_buffer, 0, orig_ret_size);
- if (rc)
- ret_size = -EFAULT;
-
- mutex_unlock(&chip->buffer_mutex);
- }
-
- atomic_set(&chip->data_pending, 0);
-
- return ret_size;
-}
-EXPORT_SYMBOL_GPL(tpm_read);
void tpm_remove_hardware(struct device *dev)
{
@@ -1287,8 +878,8 @@ void tpm_remove_hardware(struct device *dev)
spin_unlock(&driver_lock);
synchronize_rcu();
- misc_deregister(&chip->vendor.miscdev);
- sysfs_remove_group(&dev->kobj, chip->vendor.attr_group);
+ tpm_dev_del_device(chip);
+ tpm_sysfs_del_device(chip);
tpm_remove_ppi(&dev->kobj);
tpm_bios_log_teardown(chip->bios_dir);
@@ -1436,9 +1027,6 @@ void tpm_dev_vendor_release(struct tpm_chip *chip)
if (!chip)
return;
- if (chip->vendor.release)
- chip->vendor.release(chip->dev);
-
clear_bit(chip->dev_num, dev_mask);
}
EXPORT_SYMBOL_GPL(tpm_dev_vendor_release);
@@ -1448,7 +1036,7 @@ EXPORT_SYMBOL_GPL(tpm_dev_vendor_release);
* Once all references to platform device are down to 0,
* release all allocated structures.
*/
-void tpm_dev_release(struct device *dev)
+static void tpm_dev_release(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
@@ -1460,7 +1048,6 @@ void tpm_dev_release(struct device *dev)
chip->release(dev);
kfree(chip);
}
-EXPORT_SYMBOL_GPL(tpm_dev_release);
/*
* Called from tpm_<specific>.c probe function only for devices
@@ -1470,7 +1057,7 @@ EXPORT_SYMBOL_GPL(tpm_dev_release);
* pci_disable_device
*/
struct tpm_chip *tpm_register_hardware(struct device *dev,
- const struct tpm_vendor_specific *entry)
+ const struct tpm_class_ops *ops)
{
struct tpm_chip *chip;
@@ -1480,56 +1067,35 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
if (chip == NULL)
return NULL;
- mutex_init(&chip->buffer_mutex);
mutex_init(&chip->tpm_mutex);
INIT_LIST_HEAD(&chip->list);
- INIT_WORK(&chip->work, timeout_work);
-
- setup_timer(&chip->user_read_timer, user_reader_timeout,
- (unsigned long)chip);
-
- memcpy(&chip->vendor, entry, sizeof(struct tpm_vendor_specific));
-
+ chip->ops = ops;
chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES);
if (chip->dev_num >= TPM_NUM_DEVICES) {
dev_err(dev, "No available tpm device numbers\n");
goto out_free;
- } else if (chip->dev_num == 0)
- chip->vendor.miscdev.minor = TPM_MINOR;
- else
- chip->vendor.miscdev.minor = MISC_DYNAMIC_MINOR;
+ }
set_bit(chip->dev_num, dev_mask);
scnprintf(chip->devname, sizeof(chip->devname), "%s%d", "tpm",
chip->dev_num);
- chip->vendor.miscdev.name = chip->devname;
- chip->vendor.miscdev.parent = dev;
chip->dev = get_device(dev);
chip->release = dev->release;
dev->release = tpm_dev_release;
dev_set_drvdata(dev, chip);
- if (misc_register(&chip->vendor.miscdev)) {
- dev_err(chip->dev,
- "unable to misc_register %s, minor %d\n",
- chip->vendor.miscdev.name,
- chip->vendor.miscdev.minor);
+ if (tpm_dev_add_device(chip))
goto put_device;
- }
- if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) {
- misc_deregister(&chip->vendor.miscdev);
- goto put_device;
- }
+ if (tpm_sysfs_add_device(chip))
+ goto del_misc;
- if (tpm_add_ppi(&dev->kobj)) {
- misc_deregister(&chip->vendor.miscdev);
- goto put_device;
- }
+ if (tpm_add_ppi(&dev->kobj))
+ goto del_misc;
chip->bios_dir = tpm_bios_log_setup(chip->devname);
@@ -1540,6 +1106,8 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
return chip;
+del_misc:
+ tpm_dev_del_device(chip);
put_device:
put_device(chip->dev);
out_free:
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
new file mode 100644
index 000000000000..01730a27ae07
--- /dev/null
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Copyright (C) 2013 Obsidian Research Corp
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ *
+ * sysfs filesystem inspection interface to the TPM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+#include <linux/device.h>
+#include "tpm.h"
+
+/* XXX for now this helper is duplicated in tpm-interface.c */
+static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
+ int len, const char *desc)
+{
+ int err;
+
+ len = tpm_transmit(chip, (u8 *) cmd, len);
+ if (len < 0)
+ return len;
+ else if (len < TPM_HEADER_SIZE)
+ return -EFAULT;
+
+ err = be32_to_cpu(cmd->header.out.return_code);
+ if (err != 0 && desc)
+ dev_err(chip->dev, "A TPM error (%d) occurred %s\n", err, desc);
+
+ return err;
+}
+
+#define READ_PUBEK_RESULT_SIZE 314
+#define TPM_ORD_READPUBEK cpu_to_be32(124)
+static struct tpm_input_header tpm_readpubek_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(30),
+ .ordinal = TPM_ORD_READPUBEK
+};
+static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u8 *data;
+ struct tpm_cmd_t tpm_cmd;
+ ssize_t err;
+ int i, rc;
+ char *str = buf;
+
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ tpm_cmd.header.in = tpm_readpubek_header;
+ err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
+ "attempting to read the PUBEK");
+ if (err)
+ goto out;
+
+ /*
+ ignore header 10 bytes
+ algorithm 32 bits (1 == RSA )
+ encscheme 16 bits
+ sigscheme 16 bits
+ parameters (RSA 12->bytes: keybit, #primes, expbit)
+ keylenbytes 32 bits
+ 256 byte modulus
+ ignore checksum 20 bytes
+ */
+ data = tpm_cmd.params.readpubek_out_buffer;
+ str +=
+ sprintf(str,
+ "Algorithm: %02X %02X %02X %02X\n"
+ "Encscheme: %02X %02X\n"
+ "Sigscheme: %02X %02X\n"
+ "Parameters: %02X %02X %02X %02X "
+ "%02X %02X %02X %02X "
+ "%02X %02X %02X %02X\n"
+ "Modulus length: %d\n"
+ "Modulus:\n",
+ data[0], data[1], data[2], data[3],
+ data[4], data[5],
+ data[6], data[7],
+ data[12], data[13], data[14], data[15],
+ data[16], data[17], data[18], data[19],
+ data[20], data[21], data[22], data[23],
+ be32_to_cpu(*((__be32 *) (data + 24))));
+
+ for (i = 0; i < 256; i++) {
+ str += sprintf(str, "%02X ", data[i + 28]);
+ if ((i + 1) % 16 == 0)
+ str += sprintf(str, "\n");
+ }
+out:
+ rc = str - buf;
+ return rc;
+}
+static DEVICE_ATTR_RO(pubek);
+
+static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ cap_t cap;
+ u8 digest[TPM_DIGEST_SIZE];
+ ssize_t rc;
+ int i, j, num_pcrs;
+ char *str = buf;
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ rc = tpm_getcap(dev, TPM_CAP_PROP_PCR, &cap,
+ "attempting to determine the number of PCRS");
+ if (rc)
+ return 0;
+
+ num_pcrs = be32_to_cpu(cap.num_pcrs);
+ for (i = 0; i < num_pcrs; i++) {
+ rc = tpm_pcr_read_dev(chip, i, digest);
+ if (rc)
+ break;
+ str += sprintf(str, "PCR-%02d: ", i);
+ for (j = 0; j < TPM_DIGEST_SIZE; j++)
+ str += sprintf(str, "%02X ", digest[j]);
+ str += sprintf(str, "\n");
+ }
+ return str - buf;
+}
+static DEVICE_ATTR_RO(pcrs);
+
+static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ cap_t cap;
+ ssize_t rc;
+
+ rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
+ "attempting to determine the permanent enabled state");
+ if (rc)
+ return 0;
+
+ rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
+ return rc;
+}
+static DEVICE_ATTR_RO(enabled);
+
+static ssize_t active_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ cap_t cap;
+ ssize_t rc;
+
+ rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
+ "attempting to determine the permanent active state");
+ if (rc)
+ return 0;
+
+ rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
+ return rc;
+}
+static DEVICE_ATTR_RO(active);
+
+static ssize_t owned_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ cap_t cap;
+ ssize_t rc;
+
+ rc = tpm_getcap(dev, TPM_CAP_PROP_OWNER, &cap,
+ "attempting to determine the owner state");
+ if (rc)
+ return 0;
+
+ rc = sprintf(buf, "%d\n", cap.owned);
+ return rc;
+}
+static DEVICE_ATTR_RO(owned);
+
+static ssize_t temp_deactivated_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ cap_t cap;
+ ssize_t rc;
+
+ rc = tpm_getcap(dev, TPM_CAP_FLAG_VOL, &cap,
+ "attempting to determine the temporary state");
+ if (rc)
+ return 0;
+
+ rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
+ return rc;
+}
+static DEVICE_ATTR_RO(temp_deactivated);
+
+static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ cap_t cap;
+ ssize_t rc;
+ char *str = buf;
+
+ rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
+ "attempting to determine the manufacturer");
+ if (rc)
+ return 0;
+ str += sprintf(str, "Manufacturer: 0x%x\n",
+ be32_to_cpu(cap.manufacturer_id));
+
+ /* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */
+ rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap,
+ "attempting to determine the 1.2 version");
+ if (!rc) {
+ str += sprintf(str,
+ "TCG version: %d.%d\nFirmware version: %d.%d\n",
+ cap.tpm_version_1_2.Major,
+ cap.tpm_version_1_2.Minor,
+ cap.tpm_version_1_2.revMajor,
+ cap.tpm_version_1_2.revMinor);
+ } else {
+ /* Otherwise just use TPM_STRUCT_VER */
+ rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap,
+ "attempting to determine the 1.1 version");
+ if (rc)
+ return 0;
+ str += sprintf(str,
+ "TCG version: %d.%d\nFirmware version: %d.%d\n",
+ cap.tpm_version.Major,
+ cap.tpm_version.Minor,
+ cap.tpm_version.revMajor,
+ cap.tpm_version.revMinor);
+ }
+
+ return str - buf;
+}
+static DEVICE_ATTR_RO(caps);
+
+static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ if (chip == NULL)
+ return 0;
+
+ chip->ops->cancel(chip);
+ return count;
+}
+static DEVICE_ATTR_WO(cancel);
+
+static ssize_t durations_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ if (chip->vendor.duration[TPM_LONG] == 0)
+ return 0;
+
+ return sprintf(buf, "%d %d %d [%s]\n",
+ jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
+ jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
+ jiffies_to_usecs(chip->vendor.duration[TPM_LONG]),
+ chip->vendor.duration_adjusted
+ ? "adjusted" : "original");
+}
+static DEVICE_ATTR_RO(durations);
+
+static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d %d %d %d [%s]\n",
+ jiffies_to_usecs(chip->vendor.timeout_a),
+ jiffies_to_usecs(chip->vendor.timeout_b),
+ jiffies_to_usecs(chip->vendor.timeout_c),
+ jiffies_to_usecs(chip->vendor.timeout_d),
+ chip->vendor.timeout_adjusted
+ ? "adjusted" : "original");
+}
+static DEVICE_ATTR_RO(timeouts);
+
+static struct attribute *tpm_dev_attrs[] = {
+ &dev_attr_pubek.attr,
+ &dev_attr_pcrs.attr,
+ &dev_attr_enabled.attr,
+ &dev_attr_active.attr,
+ &dev_attr_owned.attr,
+ &dev_attr_temp_deactivated.attr,
+ &dev_attr_caps.attr,
+ &dev_attr_cancel.attr,
+ &dev_attr_durations.attr,
+ &dev_attr_timeouts.attr,
+ NULL,
+};
+
+static const struct attribute_group tpm_dev_group = {
+ .attrs = tpm_dev_attrs,
+};
+
+int tpm_sysfs_add_device(struct tpm_chip *chip)
+{
+ int err;
+ err = sysfs_create_group(&chip->dev->kobj,
+ &tpm_dev_group);
+
+ if (err)
+ dev_err(chip->dev,
+ "failed to create sysfs attributes, %d\n", err);
+ return err;
+}
+
+void tpm_sysfs_del_device(struct tpm_chip *chip)
+{
+ sysfs_remove_group(&chip->dev->kobj, &tpm_dev_group);
+}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index f32847872193..e4d0888d2eab 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -46,6 +46,14 @@ enum tpm_addr {
TPM_ADDR = 0x4E,
};
+/* Indexes the duration array */
+enum tpm_duration {
+ TPM_SHORT = 0,
+ TPM_MEDIUM = 1,
+ TPM_LONG = 2,
+ TPM_UNDEFINED,
+};
+
#define TPM_WARN_RETRY 0x800
#define TPM_WARN_DOING_SELFTEST 0x802
#define TPM_ERR_DEACTIVATED 0x6
@@ -53,33 +61,9 @@ enum tpm_addr {
#define TPM_ERR_INVALID_POSTINIT 38
#define TPM_HEADER_SIZE 10
-extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr,
- char *);
-extern ssize_t tpm_show_pcrs(struct device *, struct device_attribute *attr,
- char *);
-extern ssize_t tpm_show_caps(struct device *, struct device_attribute *attr,
- char *);
-extern ssize_t tpm_store_cancel(struct device *, struct device_attribute *attr,
- const char *, size_t);
-extern ssize_t tpm_show_enabled(struct device *, struct device_attribute *attr,
- char *);
-extern ssize_t tpm_show_active(struct device *, struct device_attribute *attr,
- char *);
-extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr,
- char *);
-extern ssize_t tpm_show_temp_deactivated(struct device *,
- struct device_attribute *attr, char *);
-extern ssize_t tpm_show_durations(struct device *,
- struct device_attribute *attr, char *);
-extern ssize_t tpm_show_timeouts(struct device *,
- struct device_attribute *attr, char *);
-
struct tpm_chip;
struct tpm_vendor_specific {
- const u8 req_complete_mask;
- const u8 req_complete_val;
- bool (*req_canceled)(struct tpm_chip *chip, u8 status);
void __iomem *iobase; /* ioremapped address */
unsigned long base; /* TPM base address */
@@ -89,13 +73,7 @@ struct tpm_vendor_specific {
int region_size;
int have_region;
- int (*recv) (struct tpm_chip *, u8 *, size_t);
- int (*send) (struct tpm_chip *, u8 *, size_t);
- void (*cancel) (struct tpm_chip *);
- u8 (*status) (struct tpm_chip *);
- void (*release) (struct device *);
struct miscdevice miscdev;
- struct attribute_group *attr_group;
struct list_head list;
int locality;
unsigned long timeout_a, timeout_b, timeout_c, timeout_d; /* jiffies */
@@ -118,19 +96,13 @@ struct tpm_vendor_specific {
struct tpm_chip {
struct device *dev; /* Device stuff */
+ const struct tpm_class_ops *ops;
int dev_num; /* /dev/tpm# */
char devname[7];
unsigned long is_open; /* only one allowed */
int time_expired;
- /* Data passed to and from the tpm via the read/write calls */
- u8 *data_buffer;
- atomic_t data_pending;
- struct mutex buffer_mutex;
-
- struct timer_list user_read_timer; /* user needs to claim result */
- struct work_struct work;
struct mutex tpm_mutex; /* tpm is processing */
struct tpm_vendor_specific vendor;
@@ -171,6 +143,8 @@ struct tpm_output_header {
__be32 return_code;
} __packed;
+#define TPM_TAG_RQU_COMMAND cpu_to_be16(193)
+
struct stclear_flags_t {
__be16 tag;
u8 deactivated;
@@ -244,6 +218,24 @@ typedef union {
struct duration_t duration;
} cap_t;
+enum tpm_capabilities {
+ TPM_CAP_FLAG = cpu_to_be32(4),
+ TPM_CAP_PROP = cpu_to_be32(5),
+ CAP_VERSION_1_1 = cpu_to_be32(0x06),
+ CAP_VERSION_1_2 = cpu_to_be32(0x1A)
+};
+
+enum tpm_sub_capabilities {
+ TPM_CAP_PROP_PCR = cpu_to_be32(0x101),
+ TPM_CAP_PROP_MANUFACTURER = cpu_to_be32(0x103),
+ TPM_CAP_FLAG_PERM = cpu_to_be32(0x108),
+ TPM_CAP_FLAG_VOL = cpu_to_be32(0x109),
+ TPM_CAP_PROP_OWNER = cpu_to_be32(0x111),
+ TPM_CAP_PROP_TIS_TIMEOUT = cpu_to_be32(0x115),
+ TPM_CAP_PROP_TIS_DURATION = cpu_to_be32(0x120),
+
+};
+
struct tpm_getcap_params_in {
__be32 cap;
__be32 subcap_size;
@@ -323,25 +315,28 @@ struct tpm_cmd_t {
ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *);
+ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
+ size_t bufsiz);
extern int tpm_get_timeouts(struct tpm_chip *);
extern void tpm_gen_interrupt(struct tpm_chip *);
extern int tpm_do_selftest(struct tpm_chip *);
extern unsigned long tpm_calc_ordinal_duration(struct tpm_chip *, u32);
extern struct tpm_chip* tpm_register_hardware(struct device *,
- const struct tpm_vendor_specific *);
-extern int tpm_open(struct inode *, struct file *);
-extern int tpm_release(struct inode *, struct file *);
-extern void tpm_dev_release(struct device *dev);
+ const struct tpm_class_ops *ops);
extern void tpm_dev_vendor_release(struct tpm_chip *);
-extern ssize_t tpm_write(struct file *, const char __user *, size_t,
- loff_t *);
-extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *);
extern void tpm_remove_hardware(struct device *);
extern int tpm_pm_suspend(struct device *);
extern int tpm_pm_resume(struct device *);
extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long,
wait_queue_head_t *, bool);
+int tpm_dev_add_device(struct tpm_chip *chip);
+void tpm_dev_del_device(struct tpm_chip *chip);
+int tpm_sysfs_add_device(struct tpm_chip *chip);
+void tpm_sysfs_del_device(struct tpm_chip *chip);
+
+int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
+
#ifdef CONFIG_ACPI
extern int tpm_add_ppi(struct kobject *);
extern void tpm_remove_ppi(struct kobject *);
diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
index 64420b3396a2..b9a57fa4b710 100644
--- a/drivers/char/tpm/tpm_acpi.c
+++ b/drivers/char/tpm/tpm_acpi.c
@@ -23,7 +23,7 @@
#include <linux/security.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <acpi/acpi.h>
+#include <linux/acpi.h>
#include "tpm.h"
#include "tpm_eventlog.h"
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index c9a528d25d22..6069d13ae4ac 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -121,31 +121,7 @@ static bool tpm_atml_req_canceled(struct tpm_chip *chip, u8 status)
return (status == ATML_STATUS_READY);
}
-static const struct file_operations atmel_ops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = tpm_open,
- .read = tpm_read,
- .write = tpm_write,
- .release = tpm_release,
-};
-
-static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
-static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
-static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
-
-static struct attribute* atmel_attrs[] = {
- &dev_attr_pubek.attr,
- &dev_attr_pcrs.attr,
- &dev_attr_caps.attr,
- &dev_attr_cancel.attr,
- NULL,
-};
-
-static struct attribute_group atmel_attr_grp = { .attrs = atmel_attrs };
-
-static const struct tpm_vendor_specific tpm_atmel = {
+static const struct tpm_class_ops tpm_atmel = {
.recv = tpm_atml_recv,
.send = tpm_atml_send,
.cancel = tpm_atml_cancel,
@@ -153,8 +129,6 @@ static const struct tpm_vendor_specific tpm_atmel = {
.req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
.req_complete_val = ATML_STATUS_DATA_AVAIL,
.req_canceled = tpm_atml_req_canceled,
- .attr_group = &atmel_attr_grp,
- .miscdev = { .fops = &atmel_ops, },
};
static struct platform_device *pdev;
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index c3cd7fe481a1..77272925dee6 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -135,50 +135,12 @@ static u8 i2c_atmel_read_status(struct tpm_chip *chip)
return ATMEL_STS_OK;
}
-static const struct file_operations i2c_atmel_ops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = tpm_open,
- .read = tpm_read,
- .write = tpm_write,
- .release = tpm_release,
-};
-
-static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
-static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
-static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
-static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
-static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
-static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
-static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
-static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
-static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
-
-static struct attribute *i2c_atmel_attrs[] = {
- &dev_attr_pubek.attr,
- &dev_attr_pcrs.attr,
- &dev_attr_enabled.attr,
- &dev_attr_active.attr,
- &dev_attr_owned.attr,
- &dev_attr_temp_deactivated.attr,
- &dev_attr_caps.attr,
- &dev_attr_cancel.attr,
- &dev_attr_durations.attr,
- &dev_attr_timeouts.attr,
- NULL,
-};
-
-static struct attribute_group i2c_atmel_attr_grp = {
- .attrs = i2c_atmel_attrs
-};
-
static bool i2c_atmel_req_canceled(struct tpm_chip *chip, u8 status)
{
- return 0;
+ return false;
}
-static const struct tpm_vendor_specific i2c_atmel = {
+static const struct tpm_class_ops i2c_atmel = {
.status = i2c_atmel_read_status,
.recv = i2c_atmel_recv,
.send = i2c_atmel_send,
@@ -186,8 +148,6 @@ static const struct tpm_vendor_specific i2c_atmel = {
.req_complete_mask = ATMEL_STS_OK,
.req_complete_val = ATMEL_STS_OK,
.req_canceled = i2c_atmel_req_canceled,
- .attr_group = &i2c_atmel_attr_grp,
- .miscdev.fops = &i2c_atmel_ops,
};
static int i2c_atmel_probe(struct i2c_client *client,
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index fefd2aa5c81e..52b9b2b2f300 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -566,45 +566,7 @@ static bool tpm_tis_i2c_req_canceled(struct tpm_chip *chip, u8 status)
return (status == TPM_STS_COMMAND_READY);
}
-static const struct file_operations tis_ops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = tpm_open,
- .read = tpm_read,
- .write = tpm_write,
- .release = tpm_release,
-};
-
-static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
-static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
-static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
-static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
-static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
-static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
-static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
-static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
-static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
-
-static struct attribute *tis_attrs[] = {
- &dev_attr_pubek.attr,
- &dev_attr_pcrs.attr,
- &dev_attr_enabled.attr,
- &dev_attr_active.attr,
- &dev_attr_owned.attr,
- &dev_attr_temp_deactivated.attr,
- &dev_attr_caps.attr,
- &dev_attr_cancel.attr,
- &dev_attr_durations.attr,
- &dev_attr_timeouts.attr,
- NULL,
-};
-
-static struct attribute_group tis_attr_grp = {
- .attrs = tis_attrs
-};
-
-static struct tpm_vendor_specific tpm_tis_i2c = {
+static const struct tpm_class_ops tpm_tis_i2c = {
.status = tpm_tis_i2c_status,
.recv = tpm_tis_i2c_recv,
.send = tpm_tis_i2c_send,
@@ -612,8 +574,6 @@ static struct tpm_vendor_specific tpm_tis_i2c = {
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_canceled = tpm_tis_i2c_req_canceled,
- .attr_group = &tis_attr_grp,
- .miscdev.fops = &tis_ops,
};
static int tpm_tis_i2c_init(struct device *dev)
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index 6276fea01ff0..7b158efd49f7 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -178,7 +178,6 @@ static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value,
{
if (chip->vendor.irq && queue) {
s32 rc;
- DEFINE_WAIT(wait);
struct priv_data *priv = chip->vendor.priv;
unsigned int cur_intrs = priv->intrs;
@@ -456,45 +455,7 @@ static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status)
return (status == TPM_STS_COMMAND_READY);
}
-static const struct file_operations i2c_nuvoton_ops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = tpm_open,
- .read = tpm_read,
- .write = tpm_write,
- .release = tpm_release,
-};
-
-static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
-static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
-static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
-static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
-static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
-static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
-static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
-static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
-static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
-
-static struct attribute *i2c_nuvoton_attrs[] = {
- &dev_attr_pubek.attr,
- &dev_attr_pcrs.attr,
- &dev_attr_enabled.attr,
- &dev_attr_active.attr,
- &dev_attr_owned.attr,
- &dev_attr_temp_deactivated.attr,
- &dev_attr_caps.attr,
- &dev_attr_cancel.attr,
- &dev_attr_durations.attr,
- &dev_attr_timeouts.attr,
- NULL,
-};
-
-static struct attribute_group i2c_nuvoton_attr_grp = {
- .attrs = i2c_nuvoton_attrs
-};
-
-static const struct tpm_vendor_specific tpm_i2c = {
+static const struct tpm_class_ops tpm_i2c = {
.status = i2c_nuvoton_read_status,
.recv = i2c_nuvoton_recv,
.send = i2c_nuvoton_send,
@@ -502,8 +463,6 @@ static const struct tpm_vendor_specific tpm_i2c = {
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_canceled = i2c_nuvoton_req_canceled,
- .attr_group = &i2c_nuvoton_attr_grp,
- .miscdev.fops = &i2c_nuvoton_ops,
};
/* The only purpose for the handler is to signal to any waiting threads that
diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
index a0d6ceb5d005..5b0dd8ef74c0 100644
--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
+++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
@@ -410,6 +410,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
&chip->vendor.read_queue)
== 0) {
burstcnt = get_burstcount(chip);
+ if (burstcnt < 0)
+ return burstcnt;
len = min_t(int, burstcnt, count - size);
I2C_READ_DATA(client, TPM_DATA_FIFO, buf + size, len);
size += len;
@@ -451,7 +453,8 @@ static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
size_t len)
{
- u32 status, burstcnt = 0, i, size;
+ u32 status, i, size;
+ int burstcnt = 0;
int ret;
u8 data;
struct i2c_client *client;
@@ -482,6 +485,8 @@ static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
for (i = 0; i < len - 1;) {
burstcnt = get_burstcount(chip);
+ if (burstcnt < 0)
+ return burstcnt;
size = min_t(int, len - i - 1, burstcnt);
ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf, size);
if (ret < 0)
@@ -559,7 +564,7 @@ static int tpm_stm_i2c_recv(struct tpm_chip *chip, unsigned char *buf,
}
out:
- chip->vendor.cancel(chip);
+ chip->ops->cancel(chip);
release_locality(chip);
return size;
}
@@ -569,40 +574,7 @@ static bool tpm_st33_i2c_req_canceled(struct tpm_chip *chip, u8 status)
return (status == TPM_STS_COMMAND_READY);
}
-static const struct file_operations tpm_st33_i2c_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = tpm_read,
- .write = tpm_write,
- .open = tpm_open,
- .release = tpm_release,
-};
-
-static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
-static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
-static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
-static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
-static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
-static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
-static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
-
-static struct attribute *stm_tpm_attrs[] = {
- &dev_attr_pubek.attr,
- &dev_attr_pcrs.attr,
- &dev_attr_enabled.attr,
- &dev_attr_active.attr,
- &dev_attr_owned.attr,
- &dev_attr_temp_deactivated.attr,
- &dev_attr_caps.attr,
- &dev_attr_cancel.attr, NULL,
-};
-
-static struct attribute_group stm_tpm_attr_grp = {
- .attrs = stm_tpm_attrs
-};
-
-static struct tpm_vendor_specific st_i2c_tpm = {
+static const struct tpm_class_ops st_i2c_tpm = {
.send = tpm_stm_i2c_send,
.recv = tpm_stm_i2c_recv,
.cancel = tpm_stm_i2c_cancel,
@@ -610,8 +582,6 @@ static struct tpm_vendor_specific st_i2c_tpm = {
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_canceled = tpm_st33_i2c_req_canceled,
- .attr_group = &stm_tpm_attr_grp,
- .miscdev = {.fops = &tpm_st33_i2c_fops,},
};
static int interrupts;
@@ -837,7 +807,7 @@ static int tpm_st33_i2c_pm_resume(struct device *dev)
if (power_mgt) {
gpio_set_value(pin_infos->io_lpcpd, 1);
ret = wait_for_serirq_timeout(chip,
- (chip->vendor.status(chip) &
+ (chip->ops->status(chip) &
TPM_STS_VALID) == TPM_STS_VALID,
chip->vendor.timeout_b);
} else {
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 2783a42aa732..af74c57e5090 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -403,43 +403,7 @@ static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
return (status == 0);
}
-static const struct file_operations ibmvtpm_ops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = tpm_open,
- .read = tpm_read,
- .write = tpm_write,
- .release = tpm_release,
-};
-
-static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
-static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
-static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
-static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
-static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
-static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
- NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
-static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
-static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
-static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
-
-static struct attribute *ibmvtpm_attrs[] = {
- &dev_attr_pubek.attr,
- &dev_attr_pcrs.attr,
- &dev_attr_enabled.attr,
- &dev_attr_active.attr,
- &dev_attr_owned.attr,
- &dev_attr_temp_deactivated.attr,
- &dev_attr_caps.attr,
- &dev_attr_cancel.attr,
- &dev_attr_durations.attr,
- &dev_attr_timeouts.attr, NULL,
-};
-
-static struct attribute_group ibmvtpm_attr_grp = { .attrs = ibmvtpm_attrs };
-
-static const struct tpm_vendor_specific tpm_ibmvtpm = {
+static const struct tpm_class_ops tpm_ibmvtpm = {
.recv = tpm_ibmvtpm_recv,
.send = tpm_ibmvtpm_send,
.cancel = tpm_ibmvtpm_cancel,
@@ -447,8 +411,6 @@ static const struct tpm_vendor_specific tpm_ibmvtpm = {
.req_complete_mask = 0,
.req_complete_val = 0,
.req_canceled = tpm_ibmvtpm_req_canceled,
- .attr_group = &ibmvtpm_attr_grp,
- .miscdev = { .fops = &ibmvtpm_ops, },
};
static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
@@ -507,7 +469,6 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
return;
}
- return;
case IBMVTPM_VALID_CMD:
switch (crq->msg) {
case VTPM_GET_RTCE_BUFFER_SIZE_RES:
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 2b480c2960bb..dc0a2554034e 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -371,39 +371,13 @@ static u8 tpm_inf_status(struct tpm_chip *chip)
return tpm_data_in(STAT);
}
-static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
-static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
-static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
-
-static struct attribute *inf_attrs[] = {
- &dev_attr_pubek.attr,
- &dev_attr_pcrs.attr,
- &dev_attr_caps.attr,
- &dev_attr_cancel.attr,
- NULL,
-};
-
-static struct attribute_group inf_attr_grp = {.attrs = inf_attrs };
-
-static const struct file_operations inf_ops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = tpm_open,
- .read = tpm_read,
- .write = tpm_write,
- .release = tpm_release,
-};
-
-static const struct tpm_vendor_specific tpm_inf = {
+static const struct tpm_class_ops tpm_inf = {
.recv = tpm_inf_recv,
.send = tpm_inf_send,
.cancel = tpm_inf_cancel,
.status = tpm_inf_status,
.req_complete_mask = 0,
.req_complete_val = 0,
- .attr_group = &inf_attr_grp,
- .miscdev = {.fops = &inf_ops,},
};
static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 770c46f8eb30..3179ec9cffdc 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -232,31 +232,7 @@ static bool tpm_nsc_req_canceled(struct tpm_chip *chip, u8 status)
return (status == NSC_STATUS_RDY);
}
-static const struct file_operations nsc_ops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = tpm_open,
- .read = tpm_read,
- .write = tpm_write,
- .release = tpm_release,
-};
-
-static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
-static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
-static DEVICE_ATTR(cancel, S_IWUSR|S_IWGRP, NULL, tpm_store_cancel);
-
-static struct attribute * nsc_attrs[] = {
- &dev_attr_pubek.attr,
- &dev_attr_pcrs.attr,
- &dev_attr_caps.attr,
- &dev_attr_cancel.attr,
- NULL,
-};
-
-static struct attribute_group nsc_attr_grp = { .attrs = nsc_attrs };
-
-static const struct tpm_vendor_specific tpm_nsc = {
+static const struct tpm_class_ops tpm_nsc = {
.recv = tpm_nsc_recv,
.send = tpm_nsc_send,
.cancel = tpm_nsc_cancel,
@@ -264,8 +240,6 @@ static const struct tpm_vendor_specific tpm_nsc = {
.req_complete_mask = NSC_STATUS_OBF,
.req_complete_val = NSC_STATUS_OBF,
.req_canceled = tpm_nsc_req_canceled,
- .attr_group = &nsc_attr_grp,
- .miscdev = { .fops = &nsc_ops, },
};
static struct platform_device *pdev = NULL;
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 8e562dc65601..b3ea223585bd 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -1,16 +1,6 @@
#include <linux/acpi.h>
-#include <acpi/acpi_drivers.h>
#include "tpm.h"
-static const u8 tpm_ppi_uuid[] = {
- 0xA6, 0xFA, 0xDD, 0x3D,
- 0x1B, 0x36,
- 0xB4, 0x4E,
- 0xA4, 0x24,
- 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53
-};
-static char *tpm_device_name = "TPM";
-
#define TPM_PPI_REVISION_ID 1
#define TPM_PPI_FN_VERSION 1
#define TPM_PPI_FN_SUBREQ 2
@@ -24,247 +14,178 @@ static char *tpm_device_name = "TPM";
#define PPI_VS_REQ_END 255
#define PPI_VERSION_LEN 3
+static const u8 tpm_ppi_uuid[] = {
+ 0xA6, 0xFA, 0xDD, 0x3D,
+ 0x1B, 0x36,
+ 0xB4, 0x4E,
+ 0xA4, 0x24,
+ 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53
+};
+
+static char tpm_ppi_version[PPI_VERSION_LEN + 1];
+static acpi_handle tpm_ppi_handle;
+
static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context,
void **return_value)
{
- acpi_status status;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
- if (strstr(buffer.pointer, context) != NULL) {
- *return_value = handle;
- kfree(buffer.pointer);
- return AE_CTRL_TERMINATE;
+ union acpi_object *obj;
+
+ if (!acpi_check_dsm(handle, tpm_ppi_uuid, TPM_PPI_REVISION_ID,
+ 1 << TPM_PPI_FN_VERSION))
+ return AE_OK;
+
+ /* Cache version string */
+ obj = acpi_evaluate_dsm_typed(handle, tpm_ppi_uuid,
+ TPM_PPI_REVISION_ID, TPM_PPI_FN_VERSION,
+ NULL, ACPI_TYPE_STRING);
+ if (obj) {
+ strlcpy(tpm_ppi_version, obj->string.pointer,
+ PPI_VERSION_LEN + 1);
+ ACPI_FREE(obj);
}
- return AE_OK;
+
+ *return_value = handle;
+
+ return AE_CTRL_TERMINATE;
}
-static inline void ppi_assign_params(union acpi_object params[4],
- u64 function_num)
+static inline union acpi_object *
+tpm_eval_dsm(int func, acpi_object_type type, union acpi_object *argv4)
{
- params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(tpm_ppi_uuid);
- params[0].buffer.pointer = (char *)tpm_ppi_uuid;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = TPM_PPI_REVISION_ID;
- params[2].type = ACPI_TYPE_INTEGER;
- params[2].integer.value = function_num;
- params[3].type = ACPI_TYPE_PACKAGE;
- params[3].package.count = 0;
- params[3].package.elements = NULL;
+ BUG_ON(!tpm_ppi_handle);
+ return acpi_evaluate_dsm_typed(tpm_ppi_handle, tpm_ppi_uuid,
+ TPM_PPI_REVISION_ID, func, argv4, type);
}
static ssize_t tpm_show_ppi_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
- acpi_handle handle;
- acpi_status status;
- struct acpi_object_list input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object params[4];
- union acpi_object *obj;
-
- input.count = 4;
- ppi_assign_params(params, TPM_PPI_FN_VERSION);
- input.pointer = params;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, ppi_callback, NULL,
- tpm_device_name, &handle);
- if (ACPI_FAILURE(status))
- return -ENXIO;
-
- status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
- ACPI_TYPE_STRING);
- if (ACPI_FAILURE(status))
- return -ENOMEM;
- obj = (union acpi_object *)output.pointer;
- status = scnprintf(buf, PAGE_SIZE, "%s\n", obj->string.pointer);
- kfree(output.pointer);
- return status;
+ return scnprintf(buf, PAGE_SIZE, "%s\n", tpm_ppi_version);
}
static ssize_t tpm_show_ppi_request(struct device *dev,
struct device_attribute *attr, char *buf)
{
- acpi_handle handle;
- acpi_status status;
- struct acpi_object_list input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object params[4];
- union acpi_object *ret_obj;
-
- input.count = 4;
- ppi_assign_params(params, TPM_PPI_FN_GETREQ);
- input.pointer = params;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, ppi_callback, NULL,
- tpm_device_name, &handle);
- if (ACPI_FAILURE(status))
+ ssize_t size = -EINVAL;
+ union acpi_object *obj;
+
+ obj = tpm_eval_dsm(TPM_PPI_FN_GETREQ, ACPI_TYPE_PACKAGE, NULL);
+ if (!obj)
return -ENXIO;
- status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
- ACPI_TYPE_PACKAGE);
- if (ACPI_FAILURE(status))
- return -ENOMEM;
/*
* output.pointer should be of package type, including two integers.
* The first is function return code, 0 means success and 1 means
* error. The second is pending TPM operation requested by the OS, 0
* means none and >0 means operation value.
*/
- ret_obj = ((union acpi_object *)output.pointer)->package.elements;
- if (ret_obj->type == ACPI_TYPE_INTEGER) {
- if (ret_obj->integer.value) {
- status = -EFAULT;
- goto cleanup;
- }
- ret_obj++;
- if (ret_obj->type == ACPI_TYPE_INTEGER)
- status = scnprintf(buf, PAGE_SIZE, "%llu\n",
- ret_obj->integer.value);
+ if (obj->package.count == 2 &&
+ obj->package.elements[0].type == ACPI_TYPE_INTEGER &&
+ obj->package.elements[1].type == ACPI_TYPE_INTEGER) {
+ if (obj->package.elements[0].integer.value)
+ size = -EFAULT;
else
- status = -EINVAL;
- } else {
- status = -EINVAL;
+ size = scnprintf(buf, PAGE_SIZE, "%llu\n",
+ obj->package.elements[1].integer.value);
}
-cleanup:
- kfree(output.pointer);
- return status;
+
+ ACPI_FREE(obj);
+
+ return size;
}
static ssize_t tpm_store_ppi_request(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- char version[PPI_VERSION_LEN + 1];
- acpi_handle handle;
- acpi_status status;
- struct acpi_object_list input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object params[4];
- union acpi_object obj;
u32 req;
u64 ret;
+ int func = TPM_PPI_FN_SUBREQ;
+ union acpi_object *obj, tmp;
+ union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(1, &tmp);
- input.count = 4;
- ppi_assign_params(params, TPM_PPI_FN_VERSION);
- input.pointer = params;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, ppi_callback, NULL,
- tpm_device_name, &handle);
- if (ACPI_FAILURE(status))
- return -ENXIO;
-
- status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
- ACPI_TYPE_STRING);
- if (ACPI_FAILURE(status))
- return -ENOMEM;
- strlcpy(version,
- ((union acpi_object *)output.pointer)->string.pointer,
- PPI_VERSION_LEN + 1);
- kfree(output.pointer);
- output.length = ACPI_ALLOCATE_BUFFER;
- output.pointer = NULL;
/*
* the function to submit TPM operation request to pre-os environment
* is updated with function index from SUBREQ to SUBREQ2 since PPI
* version 1.1
*/
- if (strcmp(version, "1.1") == -1)
- params[2].integer.value = TPM_PPI_FN_SUBREQ;
- else
- params[2].integer.value = TPM_PPI_FN_SUBREQ2;
+ if (acpi_check_dsm(tpm_ppi_handle, tpm_ppi_uuid, TPM_PPI_REVISION_ID,
+ 1 << TPM_PPI_FN_SUBREQ2))
+ func = TPM_PPI_FN_SUBREQ2;
+
/*
* PPI spec defines params[3].type as ACPI_TYPE_PACKAGE. Some BIOS
* accept buffer/string/integer type, but some BIOS accept buffer/
* string/package type. For PPI version 1.0 and 1.1, use buffer type
* for compatibility, and use package type since 1.2 according to spec.
*/
- if (strcmp(version, "1.2") == -1) {
- params[3].type = ACPI_TYPE_BUFFER;
- params[3].buffer.length = sizeof(req);
- sscanf(buf, "%d", &req);
- params[3].buffer.pointer = (char *)&req;
+ if (strcmp(tpm_ppi_version, "1.2") < 0) {
+ if (sscanf(buf, "%d", &req) != 1)
+ return -EINVAL;
+ argv4.type = ACPI_TYPE_BUFFER;
+ argv4.buffer.length = sizeof(req);
+ argv4.buffer.pointer = (u8 *)&req;
} else {
- params[3].package.count = 1;
- obj.type = ACPI_TYPE_INTEGER;
- sscanf(buf, "%llu", &obj.integer.value);
- params[3].package.elements = &obj;
+ tmp.type = ACPI_TYPE_INTEGER;
+ if (sscanf(buf, "%llu", &tmp.integer.value) != 1)
+ return -EINVAL;
+ }
+
+ obj = tpm_eval_dsm(func, ACPI_TYPE_INTEGER, &argv4);
+ if (!obj) {
+ return -ENXIO;
+ } else {
+ ret = obj->integer.value;
+ ACPI_FREE(obj);
}
- status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
- ACPI_TYPE_INTEGER);
- if (ACPI_FAILURE(status))
- return -ENOMEM;
- ret = ((union acpi_object *)output.pointer)->integer.value;
if (ret == 0)
- status = (acpi_status)count;
- else if (ret == 1)
- status = -EPERM;
- else
- status = -EFAULT;
- kfree(output.pointer);
- return status;
+ return (acpi_status)count;
+
+ return (ret == 1) ? -EPERM : -EFAULT;
}
static ssize_t tpm_show_ppi_transition_action(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- char version[PPI_VERSION_LEN + 1];
- acpi_handle handle;
- acpi_status status;
- struct acpi_object_list input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object params[4];
u32 ret;
- char *info[] = {
+ acpi_status status;
+ union acpi_object *obj = NULL;
+ union acpi_object tmp = {
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = 0,
+ .buffer.pointer = NULL
+ };
+
+ static char *info[] = {
"None",
"Shutdown",
"Reboot",
"OS Vendor-specific",
"Error",
};
- input.count = 4;
- ppi_assign_params(params, TPM_PPI_FN_VERSION);
- input.pointer = params;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, ppi_callback, NULL,
- tpm_device_name, &handle);
- if (ACPI_FAILURE(status))
- return -ENXIO;
- status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
- ACPI_TYPE_STRING);
- if (ACPI_FAILURE(status))
- return -ENOMEM;
- strlcpy(version,
- ((union acpi_object *)output.pointer)->string.pointer,
- PPI_VERSION_LEN + 1);
/*
* PPI spec defines params[3].type as empty package, but some platforms
* (e.g. Capella with PPI 1.0) need integer/string/buffer type, so for
* compatibility, define params[3].type as buffer, if PPI version < 1.2
*/
- if (strcmp(version, "1.2") == -1) {
- params[3].type = ACPI_TYPE_BUFFER;
- params[3].buffer.length = 0;
- params[3].buffer.pointer = NULL;
+ if (strcmp(tpm_ppi_version, "1.2") < 0)
+ obj = &tmp;
+ obj = tpm_eval_dsm(TPM_PPI_FN_GETACT, ACPI_TYPE_INTEGER, obj);
+ if (!obj) {
+ return -ENXIO;
+ } else {
+ ret = obj->integer.value;
+ ACPI_FREE(obj);
}
- params[2].integer.value = TPM_PPI_FN_GETACT;
- kfree(output.pointer);
- output.length = ACPI_ALLOCATE_BUFFER;
- output.pointer = NULL;
- status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
- ACPI_TYPE_INTEGER);
- if (ACPI_FAILURE(status))
- return -ENOMEM;
- ret = ((union acpi_object *)output.pointer)->integer.value;
+
if (ret < ARRAY_SIZE(info) - 1)
status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, info[ret]);
else
status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret,
info[ARRAY_SIZE(info)-1]);
- kfree(output.pointer);
return status;
}
@@ -272,27 +193,14 @@ static ssize_t tpm_show_ppi_response(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- acpi_handle handle;
- acpi_status status;
- struct acpi_object_list input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object params[4];
- union acpi_object *ret_obj;
- u64 req;
-
- input.count = 4;
- ppi_assign_params(params, TPM_PPI_FN_GETRSP);
- input.pointer = params;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, ppi_callback, NULL,
- tpm_device_name, &handle);
- if (ACPI_FAILURE(status))
+ acpi_status status = -EINVAL;
+ union acpi_object *obj, *ret_obj;
+ u64 req, res;
+
+ obj = tpm_eval_dsm(TPM_PPI_FN_GETRSP, ACPI_TYPE_PACKAGE, NULL);
+ if (!obj)
return -ENXIO;
- status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
- ACPI_TYPE_PACKAGE);
- if (ACPI_FAILURE(status))
- return -ENOMEM;
/*
* parameter output.pointer should be of package type, including
* 3 integers. The first means function return code, the second means
@@ -300,115 +208,82 @@ static ssize_t tpm_show_ppi_response(struct device *dev,
* the most recent TPM operation request. Only if the first is 0, and
* the second integer is not 0, the response makes sense.
*/
- ret_obj = ((union acpi_object *)output.pointer)->package.elements;
- if (ret_obj->type != ACPI_TYPE_INTEGER) {
- status = -EINVAL;
+ ret_obj = obj->package.elements;
+ if (obj->package.count < 3 ||
+ ret_obj[0].type != ACPI_TYPE_INTEGER ||
+ ret_obj[1].type != ACPI_TYPE_INTEGER ||
+ ret_obj[2].type != ACPI_TYPE_INTEGER)
goto cleanup;
- }
- if (ret_obj->integer.value) {
+
+ if (ret_obj[0].integer.value) {
status = -EFAULT;
goto cleanup;
}
- ret_obj++;
- if (ret_obj->type != ACPI_TYPE_INTEGER) {
- status = -EINVAL;
- goto cleanup;
- }
- if (ret_obj->integer.value) {
- req = ret_obj->integer.value;
- ret_obj++;
- if (ret_obj->type != ACPI_TYPE_INTEGER) {
- status = -EINVAL;
- goto cleanup;
- }
- if (ret_obj->integer.value == 0)
+
+ req = ret_obj[1].integer.value;
+ res = ret_obj[2].integer.value;
+ if (req) {
+ if (res == 0)
status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
"0: Success");
- else if (ret_obj->integer.value == 0xFFFFFFF0)
+ else if (res == 0xFFFFFFF0)
status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
"0xFFFFFFF0: User Abort");
- else if (ret_obj->integer.value == 0xFFFFFFF1)
+ else if (res == 0xFFFFFFF1)
status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
"0xFFFFFFF1: BIOS Failure");
- else if (ret_obj->integer.value >= 1 &&
- ret_obj->integer.value <= 0x00000FFF)
+ else if (res >= 1 && res <= 0x00000FFF)
status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
- req, ret_obj->integer.value,
- "Corresponding TPM error");
+ req, res, "Corresponding TPM error");
else
status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
- req, ret_obj->integer.value,
- "Error");
+ req, res, "Error");
} else {
status = scnprintf(buf, PAGE_SIZE, "%llu: %s\n",
- ret_obj->integer.value, "No Recent Request");
+ req, "No Recent Request");
}
+
cleanup:
- kfree(output.pointer);
+ ACPI_FREE(obj);
return status;
}
static ssize_t show_ppi_operations(char *buf, u32 start, u32 end)
{
- char *str = buf;
- char version[PPI_VERSION_LEN + 1];
- acpi_handle handle;
- acpi_status status;
- struct acpi_object_list input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object params[4];
- union acpi_object obj;
int i;
u32 ret;
- char *info[] = {
+ char *str = buf;
+ union acpi_object *obj, tmp;
+ union acpi_object argv = ACPI_INIT_DSM_ARGV4(1, &tmp);
+
+ static char *info[] = {
"Not implemented",
"BIOS only",
"Blocked for OS by BIOS",
"User required",
"User not required",
};
- input.count = 4;
- ppi_assign_params(params, TPM_PPI_FN_VERSION);
- input.pointer = params;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, ppi_callback, NULL,
- tpm_device_name, &handle);
- if (ACPI_FAILURE(status))
- return -ENXIO;
- status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
- ACPI_TYPE_STRING);
- if (ACPI_FAILURE(status))
- return -ENOMEM;
-
- strlcpy(version,
- ((union acpi_object *)output.pointer)->string.pointer,
- PPI_VERSION_LEN + 1);
- kfree(output.pointer);
- output.length = ACPI_ALLOCATE_BUFFER;
- output.pointer = NULL;
- if (strcmp(version, "1.2") == -1)
+ if (!acpi_check_dsm(tpm_ppi_handle, tpm_ppi_uuid, TPM_PPI_REVISION_ID,
+ 1 << TPM_PPI_FN_GETOPR))
return -EPERM;
- params[2].integer.value = TPM_PPI_FN_GETOPR;
- params[3].package.count = 1;
- obj.type = ACPI_TYPE_INTEGER;
- params[3].package.elements = &obj;
+ tmp.integer.type = ACPI_TYPE_INTEGER;
for (i = start; i <= end; i++) {
- obj.integer.value = i;
- status = acpi_evaluate_object_typed(handle, "_DSM",
- &input, &output, ACPI_TYPE_INTEGER);
- if (ACPI_FAILURE(status))
+ tmp.integer.value = i;
+ obj = tpm_eval_dsm(TPM_PPI_FN_GETOPR, ACPI_TYPE_INTEGER, &argv);
+ if (!obj) {
return -ENOMEM;
+ } else {
+ ret = obj->integer.value;
+ ACPI_FREE(obj);
+ }
- ret = ((union acpi_object *)output.pointer)->integer.value;
if (ret > 0 && ret < ARRAY_SIZE(info))
str += scnprintf(str, PAGE_SIZE, "%d %d: %s\n",
i, ret, info[ret]);
- kfree(output.pointer);
- output.length = ACPI_ALLOCATE_BUFFER;
- output.pointer = NULL;
}
+
return str - buf;
}
@@ -450,6 +325,12 @@ static struct attribute_group ppi_attr_grp = {
int tpm_add_ppi(struct kobject *parent)
{
+ /* Cache TPM ACPI handle and version string */
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
+ ppi_callback, NULL, NULL, &tpm_ppi_handle);
+ if (tpm_ppi_handle == NULL)
+ return -ENODEV;
+
return sysfs_create_group(parent, &ppi_attr_grp);
}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 1b74459c0723..a9ed2270c25d 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -432,45 +432,7 @@ static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status)
}
}
-static const struct file_operations tis_ops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = tpm_open,
- .read = tpm_read,
- .write = tpm_write,
- .release = tpm_release,
-};
-
-static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
-static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
-static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
-static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
-static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
-static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
- NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
-static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
-static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
-static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
-
-static struct attribute *tis_attrs[] = {
- &dev_attr_pubek.attr,
- &dev_attr_pcrs.attr,
- &dev_attr_enabled.attr,
- &dev_attr_active.attr,
- &dev_attr_owned.attr,
- &dev_attr_temp_deactivated.attr,
- &dev_attr_caps.attr,
- &dev_attr_cancel.attr,
- &dev_attr_durations.attr,
- &dev_attr_timeouts.attr, NULL,
-};
-
-static struct attribute_group tis_attr_grp = {
- .attrs = tis_attrs
-};
-
-static struct tpm_vendor_specific tpm_tis = {
+static const struct tpm_class_ops tpm_tis = {
.status = tpm_tis_status,
.recv = tpm_tis_recv,
.send = tpm_tis_send,
@@ -478,9 +440,6 @@ static struct tpm_vendor_specific tpm_tis = {
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_canceled = tpm_tis_req_canceled,
- .attr_group = &tis_attr_grp,
- .miscdev = {
- .fops = &tis_ops,},
};
static irqreturn_t tis_int_probe(int irq, void *dev_id)
@@ -743,7 +702,7 @@ out_err:
return rc;
}
-#if defined(CONFIG_PNP) || defined(CONFIG_PM_SLEEP)
+#ifdef CONFIG_PM_SLEEP
static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
{
u32 intmask;
@@ -764,9 +723,7 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
iowrite32(intmask,
chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
}
-#endif
-#ifdef CONFIG_PM_SLEEP
static int tpm_tis_resume(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
@@ -835,11 +792,9 @@ static struct pnp_driver tis_pnp_driver = {
.id_table = tpm_pnp_tbl,
.probe = tpm_tis_pnp_init,
.remove = tpm_tis_pnp_remove,
-#ifdef CONFIG_PM_SLEEP
.driver = {
.pm = &tpm_tis_pm,
},
-#endif
};
#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index c8ff4df81779..2064b4527040 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -17,6 +17,7 @@
#include <xen/xenbus.h>
#include <xen/page.h>
#include "tpm.h"
+#include <xen/platform_pci.h>
struct tpm_private {
struct tpm_chip *chip;
@@ -143,46 +144,7 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return length;
}
-static const struct file_operations vtpm_ops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = tpm_open,
- .read = tpm_read,
- .write = tpm_write,
- .release = tpm_release,
-};
-
-static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
-static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
-static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
-static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
-static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
-static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
- NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
-static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
-static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
-static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
-
-static struct attribute *vtpm_attrs[] = {
- &dev_attr_pubek.attr,
- &dev_attr_pcrs.attr,
- &dev_attr_enabled.attr,
- &dev_attr_active.attr,
- &dev_attr_owned.attr,
- &dev_attr_temp_deactivated.attr,
- &dev_attr_caps.attr,
- &dev_attr_cancel.attr,
- &dev_attr_durations.attr,
- &dev_attr_timeouts.attr,
- NULL,
-};
-
-static struct attribute_group vtpm_attr_grp = {
- .attrs = vtpm_attrs,
-};
-
-static const struct tpm_vendor_specific tpm_vtpm = {
+static const struct tpm_class_ops tpm_vtpm = {
.status = vtpm_status,
.recv = vtpm_recv,
.send = vtpm_send,
@@ -190,10 +152,6 @@ static const struct tpm_vendor_specific tpm_vtpm = {
.req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
.req_complete_val = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
.req_canceled = vtpm_req_canceled,
- .attr_group = &vtpm_attr_grp,
- .miscdev = {
- .fops = &vtpm_ops,
- },
};
static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
@@ -421,6 +379,9 @@ static int __init xen_tpmfront_init(void)
if (!xen_domain())
return -ENODEV;
+ if (!xen_has_pv_devices())
+ return -ENODEV;
+
return xenbus_register_frontend(&tpmfront_driver);
}
module_init(xen_tpmfront_init);
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index d5d2e4a985aa..daea84c41743 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -216,4 +216,4 @@ error:
ttyprintk_driver = NULL;
return ret;
}
-module_init(ttyprintk_init);
+device_initcall(ttyprintk_init);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index feea87cc6b8f..6928d094451d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -890,12 +890,10 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
} else {
/* Failback to copying a page */
struct page *page = alloc_page(GFP_KERNEL);
- char *src = buf->ops->map(pipe, buf, 1);
- char *dst;
+ char *src;
if (!page)
return -ENOMEM;
- dst = kmap(page);
offset = sd->pos & ~PAGE_MASK;
@@ -903,9 +901,8 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
- memcpy(dst + offset, src + buf->offset, len);
-
- kunmap(page);
+ src = buf->ops->map(pipe, buf, 1);
+ memcpy(page_address(page) + offset, src + buf->offset, len);
buf->ops->unmap(pipe, buf, src);
sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 5c51115081b3..7641965d208d 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -23,16 +23,6 @@ config COMMON_CLK
menu "Common Clock Framework"
depends on COMMON_CLK
-config COMMON_CLK_DEBUG
- bool "DebugFS representation of clock tree"
- select DEBUG_FS
- ---help---
- Creates a directory hierarchy in debugfs for visualizing the clk
- tree structure. Each directory contains read-only members
- that export information specific to that clk node: clk_rate,
- clk_flags, clk_prepare_count, clk_enable_count &
- clk_notifier_count.
-
config COMMON_CLK_WM831X
tristate "Clock driver for WM831x/2x PMICs"
depends on MFD_WM831X
@@ -64,6 +54,16 @@ config COMMON_CLK_SI5351
This driver supports Silicon Labs 5351A/B/C programmable clock
generators.
+config COMMON_CLK_SI570
+ tristate "Clock driver for SiLabs 570 and compatible devices"
+ depends on I2C
+ depends on OF
+ select REGMAP_I2C
+ help
+ ---help---
+ This driver supports Silicon Labs 570/571/598/599 programmable
+ clock generators.
+
config COMMON_CLK_S2MPS11
tristate "Clock driver for S2MPS11 MFD"
depends on MFD_SEC_CORE
@@ -107,6 +107,8 @@ config COMMON_CLK_KEYSTONE
Supports clock drivers for Keystone based SOCs. These SOCs have local
a power sleep control module that gate the clock to the IPs and PLLs.
+source "drivers/clk/qcom/Kconfig"
+
endmenu
source "drivers/clk/mvebu/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 7a10bc9a23e7..a367a9831717 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -9,40 +9,44 @@ obj-$(CONFIG_COMMON_CLK) += clk-gate.o
obj-$(CONFIG_COMMON_CLK) += clk-mux.o
obj-$(CONFIG_COMMON_CLK) += clk-composite.o
-# SoCs specific
-obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o
-obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
-obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
-obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
-obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
-obj-$(CONFIG_ARCH_MXS) += mxs/
-obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
-obj-$(CONFIG_PLAT_SPEAR) += spear/
-obj-$(CONFIG_ARCH_U300) += clk-u300.o
-obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
-obj-$(CONFIG_ARCH_SIRF) += clk-prima2.o
-obj-$(CONFIG_PLAT_ORION) += mvebu/
+# hardware specific clock types
+# please keep this section sorted lexicographically by file/directory path name
+obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o
+obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o
+obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
+obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
+obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o
+obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
+obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
+obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
+obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o
+obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
+obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
+obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
+obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
+obj-$(CONFIG_ARCH_U300) += clk-u300.o
+obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
+obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
+obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
+obj-$(CONFIG_COMMON_CLK_AT91) += at91/
+obj-$(CONFIG_ARCH_HI3xxx) += hisilicon/
+obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/
ifeq ($(CONFIG_COMMON_CLK), y)
-obj-$(CONFIG_ARCH_MMP) += mmp/
+obj-$(CONFIG_ARCH_MMP) += mmp/
endif
-obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o
-obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
-obj-$(CONFIG_ARCH_SUNXI) += sunxi/
-obj-$(CONFIG_ARCH_U8500) += ux500/
-obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
-obj-$(CONFIG_ARCH_ZYNQ) += zynq/
-obj-$(CONFIG_ARCH_TEGRA) += tegra/
-obj-$(CONFIG_PLAT_SAMSUNG) += samsung/
-obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
-obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/
-
-obj-$(CONFIG_X86) += x86/
-
-# Chip specific
-obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o
-obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
-obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
-obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
-obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
-obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
-obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o
+obj-$(CONFIG_PLAT_ORION) += mvebu/
+obj-$(CONFIG_ARCH_MXS) += mxs/
+obj-$(CONFIG_COMMON_CLK_QCOM) += qcom/
+obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
+obj-$(CONFIG_PLAT_SAMSUNG) += samsung/
+obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += shmobile/
+obj-$(CONFIG_ARCH_SIRF) += sirf/
+obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
+obj-$(CONFIG_PLAT_SPEAR) += spear/
+obj-$(CONFIG_ARCH_SUNXI) += sunxi/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
+obj-$(CONFIG_ARCH_OMAP2PLUS) += ti/
+obj-$(CONFIG_ARCH_U8500) += ux500/
+obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
+obj-$(CONFIG_X86) += x86/
+obj-$(CONFIG_ARCH_ZYNQ) += zynq/
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
new file mode 100644
index 000000000000..46c1d3d0d66b
--- /dev/null
+++ b/drivers/clk/at91/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for at91 specific clk
+#
+
+obj-y += pmc.o
+obj-y += clk-main.o clk-pll.o clk-plldiv.o clk-master.o
+obj-y += clk-system.o clk-peripheral.o clk-programmable.o
+
+obj-$(CONFIG_HAVE_AT91_UTMI) += clk-utmi.o
+obj-$(CONFIG_HAVE_AT91_USB_CLK) += clk-usb.o
+obj-$(CONFIG_HAVE_AT91_SMD) += clk-smd.o
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
new file mode 100644
index 000000000000..8e9e8cc0412d
--- /dev/null
+++ b/drivers/clk/at91/clk-main.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include "pmc.h"
+
+#define SLOW_CLOCK_FREQ 32768
+#define MAINF_DIV 16
+#define MAINFRDY_TIMEOUT (((MAINF_DIV + 1) * USEC_PER_SEC) / \
+ SLOW_CLOCK_FREQ)
+#define MAINF_LOOP_MIN_WAIT (USEC_PER_SEC / SLOW_CLOCK_FREQ)
+#define MAINF_LOOP_MAX_WAIT MAINFRDY_TIMEOUT
+
+struct clk_main {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned long rate;
+ unsigned int irq;
+ wait_queue_head_t wait;
+};
+
+#define to_clk_main(hw) container_of(hw, struct clk_main, hw)
+
+static irqreturn_t clk_main_irq_handler(int irq, void *dev_id)
+{
+ struct clk_main *clkmain = (struct clk_main *)dev_id;
+
+ wake_up(&clkmain->wait);
+ disable_irq_nosync(clkmain->irq);
+
+ return IRQ_HANDLED;
+}
+
+static int clk_main_prepare(struct clk_hw *hw)
+{
+ struct clk_main *clkmain = to_clk_main(hw);
+ struct at91_pmc *pmc = clkmain->pmc;
+ unsigned long halt_time, timeout;
+ u32 tmp;
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS)) {
+ enable_irq(clkmain->irq);
+ wait_event(clkmain->wait,
+ pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS);
+ }
+
+ if (clkmain->rate)
+ return 0;
+
+ timeout = jiffies + usecs_to_jiffies(MAINFRDY_TIMEOUT);
+ do {
+ halt_time = jiffies;
+ tmp = pmc_read(pmc, AT91_CKGR_MCFR);
+ if (tmp & AT91_PMC_MAINRDY)
+ return 0;
+ usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT);
+ } while (time_before(halt_time, timeout));
+
+ return 0;
+}
+
+static int clk_main_is_prepared(struct clk_hw *hw)
+{
+ struct clk_main *clkmain = to_clk_main(hw);
+
+ return !!(pmc_read(clkmain->pmc, AT91_PMC_SR) & AT91_PMC_MOSCS);
+}
+
+static unsigned long clk_main_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ struct clk_main *clkmain = to_clk_main(hw);
+ struct at91_pmc *pmc = clkmain->pmc;
+
+ if (clkmain->rate)
+ return clkmain->rate;
+
+ tmp = pmc_read(pmc, AT91_CKGR_MCFR) & AT91_PMC_MAINF;
+ clkmain->rate = (tmp * parent_rate) / MAINF_DIV;
+
+ return clkmain->rate;
+}
+
+static const struct clk_ops main_ops = {
+ .prepare = clk_main_prepare,
+ .is_prepared = clk_main_is_prepared,
+ .recalc_rate = clk_main_recalc_rate,
+};
+
+static struct clk * __init
+at91_clk_register_main(struct at91_pmc *pmc,
+ unsigned int irq,
+ const char *name,
+ const char *parent_name,
+ unsigned long rate)
+{
+ int ret;
+ struct clk_main *clkmain;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !irq || !name)
+ return ERR_PTR(-EINVAL);
+
+ if (!rate && !parent_name)
+ return ERR_PTR(-EINVAL);
+
+ clkmain = kzalloc(sizeof(*clkmain), GFP_KERNEL);
+ if (!clkmain)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &main_ops;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+ init.flags = parent_name ? 0 : CLK_IS_ROOT;
+
+ clkmain->hw.init = &init;
+ clkmain->rate = rate;
+ clkmain->pmc = pmc;
+ clkmain->irq = irq;
+ init_waitqueue_head(&clkmain->wait);
+ irq_set_status_flags(clkmain->irq, IRQ_NOAUTOEN);
+ ret = request_irq(clkmain->irq, clk_main_irq_handler,
+ IRQF_TRIGGER_HIGH, "clk-main", clkmain);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk = clk_register(NULL, &clkmain->hw);
+ if (IS_ERR(clk)) {
+ free_irq(clkmain->irq, clkmain);
+ kfree(clkmain);
+ }
+
+ return clk;
+}
+
+
+
+static void __init
+of_at91_clk_main_setup(struct device_node *np, struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ unsigned int irq;
+ const char *parent_name;
+ const char *name = np->name;
+ u32 rate = 0;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ of_property_read_string(np, "clock-output-names", &name);
+ of_property_read_u32(np, "clock-frequency", &rate);
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq)
+ return;
+
+ clk = at91_clk_register_main(pmc, irq, name, parent_name, rate);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+void __init of_at91rm9200_clk_main_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_main_setup(np, pmc);
+}
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
new file mode 100644
index 000000000000..bd313f7816a8
--- /dev/null
+++ b/drivers/clk/at91/clk-master.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include "pmc.h"
+
+#define MASTER_SOURCE_MAX 4
+
+#define MASTER_PRES_MASK 0x7
+#define MASTER_PRES_MAX MASTER_PRES_MASK
+#define MASTER_DIV_SHIFT 8
+#define MASTER_DIV_MASK 0x3
+
+struct clk_master_characteristics {
+ struct clk_range output;
+ u32 divisors[4];
+ u8 have_div3_pres;
+};
+
+struct clk_master_layout {
+ u32 mask;
+ u8 pres_shift;
+};
+
+#define to_clk_master(hw) container_of(hw, struct clk_master, hw)
+
+struct clk_master {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned int irq;
+ wait_queue_head_t wait;
+ const struct clk_master_layout *layout;
+ const struct clk_master_characteristics *characteristics;
+};
+
+static irqreturn_t clk_master_irq_handler(int irq, void *dev_id)
+{
+ struct clk_master *master = (struct clk_master *)dev_id;
+
+ wake_up(&master->wait);
+ disable_irq_nosync(master->irq);
+
+ return IRQ_HANDLED;
+}
+static int clk_master_prepare(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ struct at91_pmc *pmc = master->pmc;
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY)) {
+ enable_irq(master->irq);
+ wait_event(master->wait,
+ pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY);
+ }
+
+ return 0;
+}
+
+static int clk_master_is_prepared(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+
+ return !!(pmc_read(master->pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY);
+}
+
+static unsigned long clk_master_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u8 pres;
+ u8 div;
+ unsigned long rate = parent_rate;
+ struct clk_master *master = to_clk_master(hw);
+ struct at91_pmc *pmc = master->pmc;
+ const struct clk_master_layout *layout = master->layout;
+ const struct clk_master_characteristics *characteristics =
+ master->characteristics;
+ u32 tmp;
+
+ pmc_lock(pmc);
+ tmp = pmc_read(pmc, AT91_PMC_MCKR) & layout->mask;
+ pmc_unlock(pmc);
+
+ pres = (tmp >> layout->pres_shift) & MASTER_PRES_MASK;
+ div = (tmp >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
+
+ if (characteristics->have_div3_pres && pres == MASTER_PRES_MAX)
+ rate /= 3;
+ else
+ rate >>= pres;
+
+ rate /= characteristics->divisors[div];
+
+ if (rate < characteristics->output.min)
+ pr_warn("master clk is underclocked");
+ else if (rate > characteristics->output.max)
+ pr_warn("master clk is overclocked");
+
+ return rate;
+}
+
+static u8 clk_master_get_parent(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ struct at91_pmc *pmc = master->pmc;
+
+ return pmc_read(pmc, AT91_PMC_MCKR) & AT91_PMC_CSS;
+}
+
+static const struct clk_ops master_ops = {
+ .prepare = clk_master_prepare,
+ .is_prepared = clk_master_is_prepared,
+ .recalc_rate = clk_master_recalc_rate,
+ .get_parent = clk_master_get_parent,
+};
+
+static struct clk * __init
+at91_clk_register_master(struct at91_pmc *pmc, unsigned int irq,
+ const char *name, int num_parents,
+ const char **parent_names,
+ const struct clk_master_layout *layout,
+ const struct clk_master_characteristics *characteristics)
+{
+ int ret;
+ struct clk_master *master;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !irq || !name || !num_parents || !parent_names)
+ return ERR_PTR(-EINVAL);
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &master_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = 0;
+
+ master->hw.init = &init;
+ master->layout = layout;
+ master->characteristics = characteristics;
+ master->pmc = pmc;
+ master->irq = irq;
+ init_waitqueue_head(&master->wait);
+ irq_set_status_flags(master->irq, IRQ_NOAUTOEN);
+ ret = request_irq(master->irq, clk_master_irq_handler,
+ IRQF_TRIGGER_HIGH, "clk-master", master);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk = clk_register(NULL, &master->hw);
+ if (IS_ERR(clk))
+ kfree(master);
+
+ return clk;
+}
+
+
+static const struct clk_master_layout at91rm9200_master_layout = {
+ .mask = 0x31F,
+ .pres_shift = 2,
+};
+
+static const struct clk_master_layout at91sam9x5_master_layout = {
+ .mask = 0x373,
+ .pres_shift = 4,
+};
+
+
+static struct clk_master_characteristics * __init
+of_at91_clk_master_get_characteristics(struct device_node *np)
+{
+ struct clk_master_characteristics *characteristics;
+
+ characteristics = kzalloc(sizeof(*characteristics), GFP_KERNEL);
+ if (!characteristics)
+ return NULL;
+
+ if (of_at91_get_clk_range(np, "atmel,clk-output-range", &characteristics->output))
+ goto out_free_characteristics;
+
+ of_property_read_u32_array(np, "atmel,clk-divisors",
+ characteristics->divisors, 4);
+
+ characteristics->have_div3_pres =
+ of_property_read_bool(np, "atmel,master-clk-have-div3-pres");
+
+ return characteristics;
+
+out_free_characteristics:
+ kfree(characteristics);
+ return NULL;
+}
+
+static void __init
+of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc,
+ const struct clk_master_layout *layout)
+{
+ struct clk *clk;
+ int num_parents;
+ int i;
+ unsigned int irq;
+ const char *parent_names[MASTER_SOURCE_MAX];
+ const char *name = np->name;
+ struct clk_master_characteristics *characteristics;
+
+ num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (num_parents <= 0 || num_parents > MASTER_SOURCE_MAX)
+ return;
+
+ for (i = 0; i < num_parents; ++i) {
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ if (!parent_names[i])
+ return;
+ }
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ characteristics = of_at91_clk_master_get_characteristics(np);
+ if (!characteristics)
+ return;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq)
+ return;
+
+ clk = at91_clk_register_master(pmc, irq, name, num_parents,
+ parent_names, layout,
+ characteristics);
+ if (IS_ERR(clk))
+ goto out_free_characteristics;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ return;
+
+out_free_characteristics:
+ kfree(characteristics);
+}
+
+void __init of_at91rm9200_clk_master_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_master_setup(np, pmc, &at91rm9200_master_layout);
+}
+
+void __init of_at91sam9x5_clk_master_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_master_setup(np, pmc, &at91sam9x5_master_layout);
+}
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
new file mode 100644
index 000000000000..597fed423d7d
--- /dev/null
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -0,0 +1,410 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include "pmc.h"
+
+#define PERIPHERAL_MAX 64
+
+#define PERIPHERAL_AT91RM9200 0
+#define PERIPHERAL_AT91SAM9X5 1
+
+#define PERIPHERAL_ID_MIN 2
+#define PERIPHERAL_ID_MAX 31
+#define PERIPHERAL_MASK(id) (1 << ((id) & PERIPHERAL_ID_MAX))
+
+#define PERIPHERAL_RSHIFT_MASK 0x3
+#define PERIPHERAL_RSHIFT(val) (((val) >> 16) & PERIPHERAL_RSHIFT_MASK)
+
+#define PERIPHERAL_MAX_SHIFT 4
+
+struct clk_peripheral {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ u32 id;
+};
+
+#define to_clk_peripheral(hw) container_of(hw, struct clk_peripheral, hw)
+
+struct clk_sam9x5_peripheral {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ struct clk_range range;
+ u32 id;
+ u32 div;
+ bool auto_div;
+};
+
+#define to_clk_sam9x5_peripheral(hw) \
+ container_of(hw, struct clk_sam9x5_peripheral, hw)
+
+static int clk_peripheral_enable(struct clk_hw *hw)
+{
+ struct clk_peripheral *periph = to_clk_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+ int offset = AT91_PMC_PCER;
+ u32 id = periph->id;
+
+ if (id < PERIPHERAL_ID_MIN)
+ return 0;
+ if (id > PERIPHERAL_ID_MAX)
+ offset = AT91_PMC_PCER1;
+ pmc_write(pmc, offset, PERIPHERAL_MASK(id));
+ return 0;
+}
+
+static void clk_peripheral_disable(struct clk_hw *hw)
+{
+ struct clk_peripheral *periph = to_clk_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+ int offset = AT91_PMC_PCDR;
+ u32 id = periph->id;
+
+ if (id < PERIPHERAL_ID_MIN)
+ return;
+ if (id > PERIPHERAL_ID_MAX)
+ offset = AT91_PMC_PCDR1;
+ pmc_write(pmc, offset, PERIPHERAL_MASK(id));
+}
+
+static int clk_peripheral_is_enabled(struct clk_hw *hw)
+{
+ struct clk_peripheral *periph = to_clk_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+ int offset = AT91_PMC_PCSR;
+ u32 id = periph->id;
+
+ if (id < PERIPHERAL_ID_MIN)
+ return 1;
+ if (id > PERIPHERAL_ID_MAX)
+ offset = AT91_PMC_PCSR1;
+ return !!(pmc_read(pmc, offset) & PERIPHERAL_MASK(id));
+}
+
+static const struct clk_ops peripheral_ops = {
+ .enable = clk_peripheral_enable,
+ .disable = clk_peripheral_disable,
+ .is_enabled = clk_peripheral_is_enabled,
+};
+
+static struct clk * __init
+at91_clk_register_peripheral(struct at91_pmc *pmc, const char *name,
+ const char *parent_name, u32 id)
+{
+ struct clk_peripheral *periph;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !name || !parent_name || id > PERIPHERAL_ID_MAX)
+ return ERR_PTR(-EINVAL);
+
+ periph = kzalloc(sizeof(*periph), GFP_KERNEL);
+ if (!periph)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &peripheral_ops;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+ init.flags = 0;
+
+ periph->id = id;
+ periph->hw.init = &init;
+ periph->pmc = pmc;
+
+ clk = clk_register(NULL, &periph->hw);
+ if (IS_ERR(clk))
+ kfree(periph);
+
+ return clk;
+}
+
+static void clk_sam9x5_peripheral_autodiv(struct clk_sam9x5_peripheral *periph)
+{
+ struct clk *parent;
+ unsigned long parent_rate;
+ int shift = 0;
+
+ if (!periph->auto_div)
+ return;
+
+ if (periph->range.max) {
+ parent = clk_get_parent_by_index(periph->hw.clk, 0);
+ parent_rate = __clk_get_rate(parent);
+ if (!parent_rate)
+ return;
+
+ for (; shift < PERIPHERAL_MAX_SHIFT; shift++) {
+ if (parent_rate >> shift <= periph->range.max)
+ break;
+ }
+ }
+
+ periph->auto_div = false;
+ periph->div = shift;
+}
+
+static int clk_sam9x5_peripheral_enable(struct clk_hw *hw)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+
+ if (periph->id < PERIPHERAL_ID_MIN)
+ return 0;
+
+ pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID) |
+ AT91_PMC_PCR_CMD |
+ AT91_PMC_PCR_DIV(periph->div) |
+ AT91_PMC_PCR_EN);
+ return 0;
+}
+
+static void clk_sam9x5_peripheral_disable(struct clk_hw *hw)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+
+ if (periph->id < PERIPHERAL_ID_MIN)
+ return;
+
+ pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID) |
+ AT91_PMC_PCR_CMD);
+}
+
+static int clk_sam9x5_peripheral_is_enabled(struct clk_hw *hw)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+ int ret;
+
+ if (periph->id < PERIPHERAL_ID_MIN)
+ return 1;
+
+ pmc_lock(pmc);
+ pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID));
+ ret = !!(pmc_read(pmc, AT91_PMC_PCR) & AT91_PMC_PCR_EN);
+ pmc_unlock(pmc);
+
+ return ret;
+}
+
+static unsigned long
+clk_sam9x5_peripheral_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+ u32 tmp;
+
+ if (periph->id < PERIPHERAL_ID_MIN)
+ return parent_rate;
+
+ pmc_lock(pmc);
+ pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID));
+ tmp = pmc_read(pmc, AT91_PMC_PCR);
+ pmc_unlock(pmc);
+
+ if (tmp & AT91_PMC_PCR_EN) {
+ periph->div = PERIPHERAL_RSHIFT(tmp);
+ periph->auto_div = false;
+ } else {
+ clk_sam9x5_peripheral_autodiv(periph);
+ }
+
+ return parent_rate >> periph->div;
+}
+
+static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ int shift = 0;
+ unsigned long best_rate;
+ unsigned long best_diff;
+ unsigned long cur_rate = *parent_rate;
+ unsigned long cur_diff;
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+
+ if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max)
+ return *parent_rate;
+
+ if (periph->range.max) {
+ for (; shift < PERIPHERAL_MAX_SHIFT; shift++) {
+ cur_rate = *parent_rate >> shift;
+ if (cur_rate <= periph->range.max)
+ break;
+ }
+ }
+
+ if (rate >= cur_rate)
+ return cur_rate;
+
+ best_diff = cur_rate - rate;
+ best_rate = cur_rate;
+ for (; shift < PERIPHERAL_MAX_SHIFT; shift++) {
+ cur_rate = *parent_rate >> shift;
+ if (cur_rate < rate)
+ cur_diff = rate - cur_rate;
+ else
+ cur_diff = cur_rate - rate;
+
+ if (cur_diff < best_diff) {
+ best_diff = cur_diff;
+ best_rate = cur_rate;
+ }
+
+ if (!best_diff || cur_rate < rate)
+ break;
+ }
+
+ return best_rate;
+}
+
+static int clk_sam9x5_peripheral_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ int shift;
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+ if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) {
+ if (parent_rate == rate)
+ return 0;
+ else
+ return -EINVAL;
+ }
+
+ if (periph->range.max && rate > periph->range.max)
+ return -EINVAL;
+
+ for (shift = 0; shift < PERIPHERAL_MAX_SHIFT; shift++) {
+ if (parent_rate >> shift == rate) {
+ periph->auto_div = false;
+ periph->div = shift;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static const struct clk_ops sam9x5_peripheral_ops = {
+ .enable = clk_sam9x5_peripheral_enable,
+ .disable = clk_sam9x5_peripheral_disable,
+ .is_enabled = clk_sam9x5_peripheral_is_enabled,
+ .recalc_rate = clk_sam9x5_peripheral_recalc_rate,
+ .round_rate = clk_sam9x5_peripheral_round_rate,
+ .set_rate = clk_sam9x5_peripheral_set_rate,
+};
+
+static struct clk * __init
+at91_clk_register_sam9x5_peripheral(struct at91_pmc *pmc, const char *name,
+ const char *parent_name, u32 id,
+ const struct clk_range *range)
+{
+ struct clk_sam9x5_peripheral *periph;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !name || !parent_name)
+ return ERR_PTR(-EINVAL);
+
+ periph = kzalloc(sizeof(*periph), GFP_KERNEL);
+ if (!periph)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &sam9x5_peripheral_ops;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+ init.flags = 0;
+
+ periph->id = id;
+ periph->hw.init = &init;
+ periph->div = 0;
+ periph->pmc = pmc;
+ periph->auto_div = true;
+ periph->range = *range;
+
+ clk = clk_register(NULL, &periph->hw);
+ if (IS_ERR(clk))
+ kfree(periph);
+ else
+ clk_sam9x5_peripheral_autodiv(periph);
+
+ return clk;
+}
+
+static void __init
+of_at91_clk_periph_setup(struct device_node *np, struct at91_pmc *pmc, u8 type)
+{
+ int num;
+ u32 id;
+ struct clk *clk;
+ const char *parent_name;
+ const char *name;
+ struct device_node *periphclknp;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name)
+ return;
+
+ num = of_get_child_count(np);
+ if (!num || num > PERIPHERAL_MAX)
+ return;
+
+ for_each_child_of_node(np, periphclknp) {
+ if (of_property_read_u32(periphclknp, "reg", &id))
+ continue;
+
+ if (id >= PERIPHERAL_MAX)
+ continue;
+
+ if (of_property_read_string(np, "clock-output-names", &name))
+ name = periphclknp->name;
+
+ if (type == PERIPHERAL_AT91RM9200) {
+ clk = at91_clk_register_peripheral(pmc, name,
+ parent_name, id);
+ } else {
+ struct clk_range range = CLK_RANGE(0, 0);
+
+ of_at91_get_clk_range(periphclknp,
+ "atmel,clk-output-range",
+ &range);
+
+ clk = at91_clk_register_sam9x5_peripheral(pmc, name,
+ parent_name,
+ id, &range);
+ }
+
+ if (IS_ERR(clk))
+ continue;
+
+ of_clk_add_provider(periphclknp, of_clk_src_simple_get, clk);
+ }
+}
+
+void __init of_at91rm9200_clk_periph_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_periph_setup(np, pmc, PERIPHERAL_AT91RM9200);
+}
+
+void __init of_at91sam9x5_clk_periph_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_periph_setup(np, pmc, PERIPHERAL_AT91SAM9X5);
+}
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
new file mode 100644
index 000000000000..cf6ed023504c
--- /dev/null
+++ b/drivers/clk/at91/clk-pll.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include "pmc.h"
+
+#define PLL_STATUS_MASK(id) (1 << (1 + (id)))
+#define PLL_REG(id) (AT91_CKGR_PLLAR + ((id) * 4))
+#define PLL_DIV_MASK 0xff
+#define PLL_DIV_MAX PLL_DIV_MASK
+#define PLL_DIV(reg) ((reg) & PLL_DIV_MASK)
+#define PLL_MUL(reg, layout) (((reg) >> (layout)->mul_shift) & \
+ (layout)->mul_mask)
+#define PLL_ICPR_SHIFT(id) ((id) * 16)
+#define PLL_ICPR_MASK(id) (0xffff << PLL_ICPR_SHIFT(id))
+#define PLL_MAX_COUNT 0x3ff
+#define PLL_COUNT_SHIFT 8
+#define PLL_OUT_SHIFT 14
+#define PLL_MAX_ID 1
+
+struct clk_pll_characteristics {
+ struct clk_range input;
+ int num_output;
+ struct clk_range *output;
+ u16 *icpll;
+ u8 *out;
+};
+
+struct clk_pll_layout {
+ u32 pllr_mask;
+ u16 mul_mask;
+ u8 mul_shift;
+};
+
+#define to_clk_pll(hw) container_of(hw, struct clk_pll, hw)
+
+struct clk_pll {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned int irq;
+ wait_queue_head_t wait;
+ u8 id;
+ u8 div;
+ u8 range;
+ u16 mul;
+ const struct clk_pll_layout *layout;
+ const struct clk_pll_characteristics *characteristics;
+};
+
+static irqreturn_t clk_pll_irq_handler(int irq, void *dev_id)
+{
+ struct clk_pll *pll = (struct clk_pll *)dev_id;
+
+ wake_up(&pll->wait);
+ disable_irq_nosync(pll->irq);
+
+ return IRQ_HANDLED;
+}
+
+static int clk_pll_prepare(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ struct at91_pmc *pmc = pll->pmc;
+ const struct clk_pll_layout *layout = pll->layout;
+ const struct clk_pll_characteristics *characteristics =
+ pll->characteristics;
+ u8 id = pll->id;
+ u32 mask = PLL_STATUS_MASK(id);
+ int offset = PLL_REG(id);
+ u8 out = 0;
+ u32 pllr, icpr;
+ u8 div;
+ u16 mul;
+
+ pllr = pmc_read(pmc, offset);
+ div = PLL_DIV(pllr);
+ mul = PLL_MUL(pllr, layout);
+
+ if ((pmc_read(pmc, AT91_PMC_SR) & mask) &&
+ (div == pll->div && mul == pll->mul))
+ return 0;
+
+ if (characteristics->out)
+ out = characteristics->out[pll->range];
+ if (characteristics->icpll) {
+ icpr = pmc_read(pmc, AT91_PMC_PLLICPR) & ~PLL_ICPR_MASK(id);
+ icpr |= (characteristics->icpll[pll->range] <<
+ PLL_ICPR_SHIFT(id));
+ pmc_write(pmc, AT91_PMC_PLLICPR, icpr);
+ }
+
+ pllr &= ~layout->pllr_mask;
+ pllr |= layout->pllr_mask &
+ (pll->div | (PLL_MAX_COUNT << PLL_COUNT_SHIFT) |
+ (out << PLL_OUT_SHIFT) |
+ ((pll->mul & layout->mul_mask) << layout->mul_shift));
+ pmc_write(pmc, offset, pllr);
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & mask)) {
+ enable_irq(pll->irq);
+ wait_event(pll->wait,
+ pmc_read(pmc, AT91_PMC_SR) & mask);
+ }
+
+ return 0;
+}
+
+static int clk_pll_is_prepared(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ struct at91_pmc *pmc = pll->pmc;
+
+ return !!(pmc_read(pmc, AT91_PMC_SR) &
+ PLL_STATUS_MASK(pll->id));
+}
+
+static void clk_pll_unprepare(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ struct at91_pmc *pmc = pll->pmc;
+ const struct clk_pll_layout *layout = pll->layout;
+ int offset = PLL_REG(pll->id);
+ u32 tmp = pmc_read(pmc, offset) & ~(layout->pllr_mask);
+
+ pmc_write(pmc, offset, tmp);
+}
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ const struct clk_pll_layout *layout = pll->layout;
+ struct at91_pmc *pmc = pll->pmc;
+ int offset = PLL_REG(pll->id);
+ u32 tmp = pmc_read(pmc, offset) & layout->pllr_mask;
+ u8 div = PLL_DIV(tmp);
+ u16 mul = PLL_MUL(tmp, layout);
+ if (!div || !mul)
+ return 0;
+
+ return (parent_rate * (mul + 1)) / div;
+}
+
+static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
+ unsigned long parent_rate,
+ u32 *div, u32 *mul,
+ u32 *index) {
+ unsigned long maxrate;
+ unsigned long minrate;
+ unsigned long divrate;
+ unsigned long bestdiv = 1;
+ unsigned long bestmul;
+ unsigned long tmpdiv;
+ unsigned long roundup;
+ unsigned long rounddown;
+ unsigned long remainder;
+ unsigned long bestremainder;
+ unsigned long maxmul;
+ unsigned long maxdiv;
+ unsigned long mindiv;
+ int i = 0;
+ const struct clk_pll_layout *layout = pll->layout;
+ const struct clk_pll_characteristics *characteristics =
+ pll->characteristics;
+
+ /* Minimum divider = 1 */
+ /* Maximum multiplier = max_mul */
+ maxmul = layout->mul_mask + 1;
+ maxrate = (parent_rate * maxmul) / 1;
+
+ /* Maximum divider = max_div */
+ /* Minimum multiplier = 2 */
+ maxdiv = PLL_DIV_MAX;
+ minrate = (parent_rate * 2) / maxdiv;
+
+ if (parent_rate < characteristics->input.min ||
+ parent_rate < characteristics->input.max)
+ return -ERANGE;
+
+ if (parent_rate < minrate || parent_rate > maxrate)
+ return -ERANGE;
+
+ for (i = 0; i < characteristics->num_output; i++) {
+ if (parent_rate >= characteristics->output[i].min &&
+ parent_rate <= characteristics->output[i].max)
+ break;
+ }
+
+ if (i >= characteristics->num_output)
+ return -ERANGE;
+
+ bestmul = rate / parent_rate;
+ rounddown = parent_rate % rate;
+ roundup = rate - rounddown;
+ bestremainder = roundup < rounddown ? roundup : rounddown;
+
+ if (!bestremainder) {
+ if (div)
+ *div = bestdiv;
+ if (mul)
+ *mul = bestmul;
+ if (index)
+ *index = i;
+ return rate;
+ }
+
+ maxdiv = 255 / (bestmul + 1);
+ if (parent_rate / maxdiv < characteristics->input.min)
+ maxdiv = parent_rate / characteristics->input.min;
+ mindiv = parent_rate / characteristics->input.max;
+ if (parent_rate % characteristics->input.max)
+ mindiv++;
+
+ for (tmpdiv = mindiv; tmpdiv < maxdiv; tmpdiv++) {
+ divrate = parent_rate / tmpdiv;
+
+ rounddown = rate % divrate;
+ roundup = divrate - rounddown;
+ remainder = roundup < rounddown ? roundup : rounddown;
+
+ if (remainder < bestremainder) {
+ bestremainder = remainder;
+ bestmul = rate / divrate;
+ bestdiv = tmpdiv;
+ }
+
+ if (!remainder)
+ break;
+ }
+
+ rate = (parent_rate / bestdiv) * bestmul;
+
+ if (div)
+ *div = bestdiv;
+ if (mul)
+ *mul = bestmul;
+ if (index)
+ *index = i;
+
+ return rate;
+}
+
+static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ return clk_pll_get_best_div_mul(pll, rate, *parent_rate,
+ NULL, NULL, NULL);
+}
+
+static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ long ret;
+ u32 div;
+ u32 mul;
+ u32 index;
+
+ ret = clk_pll_get_best_div_mul(pll, rate, parent_rate,
+ &div, &mul, &index);
+ if (ret < 0)
+ return ret;
+
+ pll->range = index;
+ pll->div = div;
+ pll->mul = mul;
+
+ return 0;
+}
+
+static const struct clk_ops pll_ops = {
+ .prepare = clk_pll_prepare,
+ .unprepare = clk_pll_unprepare,
+ .is_prepared = clk_pll_is_prepared,
+ .recalc_rate = clk_pll_recalc_rate,
+ .round_rate = clk_pll_round_rate,
+ .set_rate = clk_pll_set_rate,
+};
+
+static struct clk * __init
+at91_clk_register_pll(struct at91_pmc *pmc, unsigned int irq, const char *name,
+ const char *parent_name, u8 id,
+ const struct clk_pll_layout *layout,
+ const struct clk_pll_characteristics *characteristics)
+{
+ struct clk_pll *pll;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+ int ret;
+ int offset = PLL_REG(id);
+ u32 tmp;
+
+ if (id > PLL_MAX_ID)
+ return ERR_PTR(-EINVAL);
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &pll_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_SET_RATE_GATE;
+
+ pll->id = id;
+ pll->hw.init = &init;
+ pll->layout = layout;
+ pll->characteristics = characteristics;
+ pll->pmc = pmc;
+ pll->irq = irq;
+ tmp = pmc_read(pmc, offset) & layout->pllr_mask;
+ pll->div = PLL_DIV(tmp);
+ pll->mul = PLL_MUL(tmp, layout);
+ init_waitqueue_head(&pll->wait);
+ irq_set_status_flags(pll->irq, IRQ_NOAUTOEN);
+ ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH,
+ id ? "clk-pllb" : "clk-plla", pll);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+
+
+static const struct clk_pll_layout at91rm9200_pll_layout = {
+ .pllr_mask = 0x7FFFFFF,
+ .mul_shift = 16,
+ .mul_mask = 0x7FF,
+};
+
+static const struct clk_pll_layout at91sam9g45_pll_layout = {
+ .pllr_mask = 0xFFFFFF,
+ .mul_shift = 16,
+ .mul_mask = 0xFF,
+};
+
+static const struct clk_pll_layout at91sam9g20_pllb_layout = {
+ .pllr_mask = 0x3FFFFF,
+ .mul_shift = 16,
+ .mul_mask = 0x3F,
+};
+
+static const struct clk_pll_layout sama5d3_pll_layout = {
+ .pllr_mask = 0x1FFFFFF,
+ .mul_shift = 18,
+ .mul_mask = 0x7F,
+};
+
+
+static struct clk_pll_characteristics * __init
+of_at91_clk_pll_get_characteristics(struct device_node *np)
+{
+ int i;
+ int offset;
+ u32 tmp;
+ int num_output;
+ u32 num_cells;
+ struct clk_range input;
+ struct clk_range *output;
+ u8 *out = NULL;
+ u16 *icpll = NULL;
+ struct clk_pll_characteristics *characteristics;
+
+ if (of_at91_get_clk_range(np, "atmel,clk-input-range", &input))
+ return NULL;
+
+ if (of_property_read_u32(np, "#atmel,pll-clk-output-range-cells",
+ &num_cells))
+ return NULL;
+
+ if (num_cells < 2 || num_cells > 4)
+ return NULL;
+
+ if (!of_get_property(np, "atmel,pll-clk-output-ranges", &tmp))
+ return NULL;
+ num_output = tmp / (sizeof(u32) * num_cells);
+
+ characteristics = kzalloc(sizeof(*characteristics), GFP_KERNEL);
+ if (!characteristics)
+ return NULL;
+
+ output = kzalloc(sizeof(*output) * num_output, GFP_KERNEL);
+ if (!output)
+ goto out_free_characteristics;
+
+ if (num_cells > 2) {
+ out = kzalloc(sizeof(*out) * num_output, GFP_KERNEL);
+ if (!out)
+ goto out_free_output;
+ }
+
+ if (num_cells > 3) {
+ icpll = kzalloc(sizeof(*icpll) * num_output, GFP_KERNEL);
+ if (!icpll)
+ goto out_free_output;
+ }
+
+ for (i = 0; i < num_output; i++) {
+ offset = i * num_cells;
+ if (of_property_read_u32_index(np,
+ "atmel,pll-clk-output-ranges",
+ offset, &tmp))
+ goto out_free_output;
+ output[i].min = tmp;
+ if (of_property_read_u32_index(np,
+ "atmel,pll-clk-output-ranges",
+ offset + 1, &tmp))
+ goto out_free_output;
+ output[i].max = tmp;
+
+ if (num_cells == 2)
+ continue;
+
+ if (of_property_read_u32_index(np,
+ "atmel,pll-clk-output-ranges",
+ offset + 2, &tmp))
+ goto out_free_output;
+ out[i] = tmp;
+
+ if (num_cells == 3)
+ continue;
+
+ if (of_property_read_u32_index(np,
+ "atmel,pll-clk-output-ranges",
+ offset + 3, &tmp))
+ goto out_free_output;
+ icpll[i] = tmp;
+ }
+
+ characteristics->input = input;
+ characteristics->num_output = num_output;
+ characteristics->output = output;
+ characteristics->out = out;
+ characteristics->icpll = icpll;
+ return characteristics;
+
+out_free_output:
+ kfree(icpll);
+ kfree(out);
+ kfree(output);
+out_free_characteristics:
+ kfree(characteristics);
+ return NULL;
+}
+
+static void __init
+of_at91_clk_pll_setup(struct device_node *np, struct at91_pmc *pmc,
+ const struct clk_pll_layout *layout)
+{
+ u32 id;
+ unsigned int irq;
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+ struct clk_pll_characteristics *characteristics;
+
+ if (of_property_read_u32(np, "reg", &id))
+ return;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ characteristics = of_at91_clk_pll_get_characteristics(np);
+ if (!characteristics)
+ return;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq)
+ return;
+
+ clk = at91_clk_register_pll(pmc, irq, name, parent_name, id, layout,
+ characteristics);
+ if (IS_ERR(clk))
+ goto out_free_characteristics;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ return;
+
+out_free_characteristics:
+ kfree(characteristics);
+}
+
+void __init of_at91rm9200_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_pll_setup(np, pmc, &at91rm9200_pll_layout);
+}
+
+void __init of_at91sam9g45_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_pll_setup(np, pmc, &at91sam9g45_pll_layout);
+}
+
+void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_pll_setup(np, pmc, &at91sam9g20_pllb_layout);
+}
+
+void __init of_sama5d3_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_pll_setup(np, pmc, &sama5d3_pll_layout);
+}
diff --git a/drivers/clk/at91/clk-plldiv.c b/drivers/clk/at91/clk-plldiv.c
new file mode 100644
index 000000000000..ea226562bb40
--- /dev/null
+++ b/drivers/clk/at91/clk-plldiv.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include "pmc.h"
+
+#define to_clk_plldiv(hw) container_of(hw, struct clk_plldiv, hw)
+
+struct clk_plldiv {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+};
+
+static unsigned long clk_plldiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_plldiv *plldiv = to_clk_plldiv(hw);
+ struct at91_pmc *pmc = plldiv->pmc;
+
+ if (pmc_read(pmc, AT91_PMC_MCKR) & AT91_PMC_PLLADIV2)
+ return parent_rate / 2;
+
+ return parent_rate;
+}
+
+static long clk_plldiv_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long div;
+
+ if (rate > *parent_rate)
+ return *parent_rate;
+ div = *parent_rate / 2;
+ if (rate < div)
+ return div;
+
+ if (rate - div < *parent_rate - rate)
+ return div;
+
+ return *parent_rate;
+}
+
+static int clk_plldiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_plldiv *plldiv = to_clk_plldiv(hw);
+ struct at91_pmc *pmc = plldiv->pmc;
+ u32 tmp;
+
+ if (parent_rate != rate && (parent_rate / 2) != rate)
+ return -EINVAL;
+
+ pmc_lock(pmc);
+ tmp = pmc_read(pmc, AT91_PMC_MCKR) & ~AT91_PMC_PLLADIV2;
+ if ((parent_rate / 2) == rate)
+ tmp |= AT91_PMC_PLLADIV2;
+ pmc_write(pmc, AT91_PMC_MCKR, tmp);
+ pmc_unlock(pmc);
+
+ return 0;
+}
+
+static const struct clk_ops plldiv_ops = {
+ .recalc_rate = clk_plldiv_recalc_rate,
+ .round_rate = clk_plldiv_round_rate,
+ .set_rate = clk_plldiv_set_rate,
+};
+
+static struct clk * __init
+at91_clk_register_plldiv(struct at91_pmc *pmc, const char *name,
+ const char *parent_name)
+{
+ struct clk_plldiv *plldiv;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ plldiv = kzalloc(sizeof(*plldiv), GFP_KERNEL);
+ if (!plldiv)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &plldiv_ops;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+ init.flags = CLK_SET_RATE_GATE;
+
+ plldiv->hw.init = &init;
+ plldiv->pmc = pmc;
+
+ clk = clk_register(NULL, &plldiv->hw);
+
+ if (IS_ERR(clk))
+ kfree(plldiv);
+
+ return clk;
+}
+
+static void __init
+of_at91_clk_plldiv_setup(struct device_node *np, struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91_clk_register_plldiv(pmc, name, parent_name);
+
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ return;
+}
+
+void __init of_at91sam9x5_clk_plldiv_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_plldiv_setup(np, pmc);
+}
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
new file mode 100644
index 000000000000..fd792b203eaf
--- /dev/null
+++ b/drivers/clk/at91/clk-programmable.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include "pmc.h"
+
+#define PROG_SOURCE_MAX 5
+#define PROG_ID_MAX 7
+
+#define PROG_STATUS_MASK(id) (1 << ((id) + 8))
+#define PROG_PRES_MASK 0x7
+#define PROG_MAX_RM9200_CSS 3
+
+struct clk_programmable_layout {
+ u8 pres_shift;
+ u8 css_mask;
+ u8 have_slck_mck;
+};
+
+struct clk_programmable {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned int irq;
+ wait_queue_head_t wait;
+ u8 id;
+ u8 css;
+ u8 pres;
+ u8 slckmck;
+ const struct clk_programmable_layout *layout;
+};
+
+#define to_clk_programmable(hw) container_of(hw, struct clk_programmable, hw)
+
+
+static irqreturn_t clk_programmable_irq_handler(int irq, void *dev_id)
+{
+ struct clk_programmable *prog = (struct clk_programmable *)dev_id;
+
+ wake_up(&prog->wait);
+
+ return IRQ_HANDLED;
+}
+
+static int clk_programmable_prepare(struct clk_hw *hw)
+{
+ u32 tmp;
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ struct at91_pmc *pmc = prog->pmc;
+ const struct clk_programmable_layout *layout = prog->layout;
+ u8 id = prog->id;
+ u32 mask = PROG_STATUS_MASK(id);
+
+ tmp = prog->css | (prog->pres << layout->pres_shift);
+ if (layout->have_slck_mck && prog->slckmck)
+ tmp |= AT91_PMC_CSSMCK_MCK;
+
+ pmc_write(pmc, AT91_PMC_PCKR(id), tmp);
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & mask))
+ wait_event(prog->wait, pmc_read(pmc, AT91_PMC_SR) & mask);
+
+ return 0;
+}
+
+static int clk_programmable_is_ready(struct clk_hw *hw)
+{
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ struct at91_pmc *pmc = prog->pmc;
+
+ return !!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_PCKR(prog->id));
+}
+
+static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ struct at91_pmc *pmc = prog->pmc;
+ const struct clk_programmable_layout *layout = prog->layout;
+
+ tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id));
+ prog->pres = (tmp >> layout->pres_shift) & PROG_PRES_MASK;
+
+ return parent_rate >> prog->pres;
+}
+
+static long clk_programmable_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long best_rate = *parent_rate;
+ unsigned long best_diff;
+ unsigned long new_diff;
+ unsigned long cur_rate;
+ int shift = shift;
+
+ if (rate > *parent_rate)
+ return *parent_rate;
+ else
+ best_diff = *parent_rate - rate;
+
+ if (!best_diff)
+ return best_rate;
+
+ for (shift = 1; shift < PROG_PRES_MASK; shift++) {
+ cur_rate = *parent_rate >> shift;
+
+ if (cur_rate > rate)
+ new_diff = cur_rate - rate;
+ else
+ new_diff = rate - cur_rate;
+
+ if (!new_diff)
+ return cur_rate;
+
+ if (new_diff < best_diff) {
+ best_diff = new_diff;
+ best_rate = cur_rate;
+ }
+
+ if (rate > cur_rate)
+ break;
+ }
+
+ return best_rate;
+}
+
+static int clk_programmable_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ const struct clk_programmable_layout *layout = prog->layout;
+ if (index > layout->css_mask) {
+ if (index > PROG_MAX_RM9200_CSS && layout->have_slck_mck) {
+ prog->css = 0;
+ prog->slckmck = 1;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ prog->css = index;
+ return 0;
+}
+
+static u8 clk_programmable_get_parent(struct clk_hw *hw)
+{
+ u32 tmp;
+ u8 ret;
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ struct at91_pmc *pmc = prog->pmc;
+ const struct clk_programmable_layout *layout = prog->layout;
+
+ tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id));
+ prog->css = tmp & layout->css_mask;
+ ret = prog->css;
+ if (layout->have_slck_mck) {
+ prog->slckmck = !!(tmp & AT91_PMC_CSSMCK_MCK);
+ if (prog->slckmck && !ret)
+ ret = PROG_MAX_RM9200_CSS + 1;
+ }
+
+ return ret;
+}
+
+static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ unsigned long best_rate = parent_rate;
+ unsigned long best_diff;
+ unsigned long new_diff;
+ unsigned long cur_rate;
+ int shift = 0;
+
+ if (rate > parent_rate)
+ return parent_rate;
+ else
+ best_diff = parent_rate - rate;
+
+ if (!best_diff) {
+ prog->pres = shift;
+ return 0;
+ }
+
+ for (shift = 1; shift < PROG_PRES_MASK; shift++) {
+ cur_rate = parent_rate >> shift;
+
+ if (cur_rate > rate)
+ new_diff = cur_rate - rate;
+ else
+ new_diff = rate - cur_rate;
+
+ if (!new_diff)
+ break;
+
+ if (new_diff < best_diff) {
+ best_diff = new_diff;
+ best_rate = cur_rate;
+ }
+
+ if (rate > cur_rate)
+ break;
+ }
+
+ prog->pres = shift;
+ return 0;
+}
+
+static const struct clk_ops programmable_ops = {
+ .prepare = clk_programmable_prepare,
+ .is_prepared = clk_programmable_is_ready,
+ .recalc_rate = clk_programmable_recalc_rate,
+ .round_rate = clk_programmable_round_rate,
+ .get_parent = clk_programmable_get_parent,
+ .set_parent = clk_programmable_set_parent,
+ .set_rate = clk_programmable_set_rate,
+};
+
+static struct clk * __init
+at91_clk_register_programmable(struct at91_pmc *pmc, unsigned int irq,
+ const char *name, const char **parent_names,
+ u8 num_parents, u8 id,
+ const struct clk_programmable_layout *layout)
+{
+ int ret;
+ struct clk_programmable *prog;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+ char irq_name[11];
+
+ if (id > PROG_ID_MAX)
+ return ERR_PTR(-EINVAL);
+
+ prog = kzalloc(sizeof(*prog), GFP_KERNEL);
+ if (!prog)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &programmable_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+
+ prog->id = id;
+ prog->layout = layout;
+ prog->hw.init = &init;
+ prog->pmc = pmc;
+ prog->irq = irq;
+ init_waitqueue_head(&prog->wait);
+ irq_set_status_flags(prog->irq, IRQ_NOAUTOEN);
+ snprintf(irq_name, sizeof(irq_name), "clk-prog%d", id);
+ ret = request_irq(prog->irq, clk_programmable_irq_handler,
+ IRQF_TRIGGER_HIGH, irq_name, prog);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk = clk_register(NULL, &prog->hw);
+ if (IS_ERR(clk))
+ kfree(prog);
+
+ return clk;
+}
+
+static const struct clk_programmable_layout at91rm9200_programmable_layout = {
+ .pres_shift = 2,
+ .css_mask = 0x3,
+ .have_slck_mck = 0,
+};
+
+static const struct clk_programmable_layout at91sam9g45_programmable_layout = {
+ .pres_shift = 2,
+ .css_mask = 0x3,
+ .have_slck_mck = 1,
+};
+
+static const struct clk_programmable_layout at91sam9x5_programmable_layout = {
+ .pres_shift = 4,
+ .css_mask = 0x7,
+ .have_slck_mck = 0,
+};
+
+static void __init
+of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
+ const struct clk_programmable_layout *layout)
+{
+ int num;
+ u32 id;
+ int i;
+ unsigned int irq;
+ struct clk *clk;
+ int num_parents;
+ const char *parent_names[PROG_SOURCE_MAX];
+ const char *name;
+ struct device_node *progclknp;
+
+ num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (num_parents <= 0 || num_parents > PROG_SOURCE_MAX)
+ return;
+
+ for (i = 0; i < num_parents; ++i) {
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ if (!parent_names[i])
+ return;
+ }
+
+ num = of_get_child_count(np);
+ if (!num || num > (PROG_ID_MAX + 1))
+ return;
+
+ for_each_child_of_node(np, progclknp) {
+ if (of_property_read_u32(progclknp, "reg", &id))
+ continue;
+
+ if (of_property_read_string(np, "clock-output-names", &name))
+ name = progclknp->name;
+
+ irq = irq_of_parse_and_map(progclknp, 0);
+ if (!irq)
+ continue;
+
+ clk = at91_clk_register_programmable(pmc, irq, name,
+ parent_names, num_parents,
+ id, layout);
+ if (IS_ERR(clk))
+ continue;
+
+ of_clk_add_provider(progclknp, of_clk_src_simple_get, clk);
+ }
+}
+
+
+void __init of_at91rm9200_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_prog_setup(np, pmc, &at91rm9200_programmable_layout);
+}
+
+void __init of_at91sam9g45_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_prog_setup(np, pmc, &at91sam9g45_programmable_layout);
+}
+
+void __init of_at91sam9x5_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_prog_setup(np, pmc, &at91sam9x5_programmable_layout);
+}
diff --git a/drivers/clk/at91/clk-smd.c b/drivers/clk/at91/clk-smd.c
new file mode 100644
index 000000000000..144d47ecfe63
--- /dev/null
+++ b/drivers/clk/at91/clk-smd.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include "pmc.h"
+
+#define SMD_SOURCE_MAX 2
+
+#define SMD_DIV_SHIFT 8
+#define SMD_MAX_DIV 0xf
+
+struct at91sam9x5_clk_smd {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+};
+
+#define to_at91sam9x5_clk_smd(hw) \
+ container_of(hw, struct at91sam9x5_clk_smd, hw)
+
+static unsigned long at91sam9x5_clk_smd_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ u8 smddiv;
+ struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
+ struct at91_pmc *pmc = smd->pmc;
+
+ tmp = pmc_read(pmc, AT91_PMC_SMD);
+ smddiv = (tmp & AT91_PMC_SMD_DIV) >> SMD_DIV_SHIFT;
+ return parent_rate / (smddiv + 1);
+}
+
+static long at91sam9x5_clk_smd_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long div;
+ unsigned long bestrate;
+ unsigned long tmp;
+
+ if (rate >= *parent_rate)
+ return *parent_rate;
+
+ div = *parent_rate / rate;
+ if (div > SMD_MAX_DIV)
+ return *parent_rate / (SMD_MAX_DIV + 1);
+
+ bestrate = *parent_rate / div;
+ tmp = *parent_rate / (div + 1);
+ if (bestrate - rate > rate - tmp)
+ bestrate = tmp;
+
+ return bestrate;
+}
+
+static int at91sam9x5_clk_smd_set_parent(struct clk_hw *hw, u8 index)
+{
+ u32 tmp;
+ struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
+ struct at91_pmc *pmc = smd->pmc;
+
+ if (index > 1)
+ return -EINVAL;
+ tmp = pmc_read(pmc, AT91_PMC_SMD) & ~AT91_PMC_SMDS;
+ if (index)
+ tmp |= AT91_PMC_SMDS;
+ pmc_write(pmc, AT91_PMC_SMD, tmp);
+ return 0;
+}
+
+static u8 at91sam9x5_clk_smd_get_parent(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
+ struct at91_pmc *pmc = smd->pmc;
+
+ return pmc_read(pmc, AT91_PMC_SMD) & AT91_PMC_SMDS;
+}
+
+static int at91sam9x5_clk_smd_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
+ struct at91_pmc *pmc = smd->pmc;
+ unsigned long div = parent_rate / rate;
+
+ if (parent_rate % rate || div < 1 || div > (SMD_MAX_DIV + 1))
+ return -EINVAL;
+ tmp = pmc_read(pmc, AT91_PMC_SMD) & ~AT91_PMC_SMD_DIV;
+ tmp |= (div - 1) << SMD_DIV_SHIFT;
+ pmc_write(pmc, AT91_PMC_SMD, tmp);
+
+ return 0;
+}
+
+static const struct clk_ops at91sam9x5_smd_ops = {
+ .recalc_rate = at91sam9x5_clk_smd_recalc_rate,
+ .round_rate = at91sam9x5_clk_smd_round_rate,
+ .get_parent = at91sam9x5_clk_smd_get_parent,
+ .set_parent = at91sam9x5_clk_smd_set_parent,
+ .set_rate = at91sam9x5_clk_smd_set_rate,
+};
+
+static struct clk * __init
+at91sam9x5_clk_register_smd(struct at91_pmc *pmc, const char *name,
+ const char **parent_names, u8 num_parents)
+{
+ struct at91sam9x5_clk_smd *smd;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ smd = kzalloc(sizeof(*smd), GFP_KERNEL);
+ if (!smd)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &at91sam9x5_smd_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+
+ smd->hw.init = &init;
+ smd->pmc = pmc;
+
+ clk = clk_register(NULL, &smd->hw);
+ if (IS_ERR(clk))
+ kfree(smd);
+
+ return clk;
+}
+
+void __init of_at91sam9x5_clk_smd_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ int i;
+ int num_parents;
+ const char *parent_names[SMD_SOURCE_MAX];
+ const char *name = np->name;
+
+ num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (num_parents <= 0 || num_parents > SMD_SOURCE_MAX)
+ return;
+
+ for (i = 0; i < num_parents; i++) {
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ if (!parent_names[i])
+ return;
+ }
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91sam9x5_clk_register_smd(pmc, name, parent_names,
+ num_parents);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
new file mode 100644
index 000000000000..8f7c0434a09f
--- /dev/null
+++ b/drivers/clk/at91/clk-system.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include "pmc.h"
+
+#define SYSTEM_MAX_ID 31
+
+#define SYSTEM_MAX_NAME_SZ 32
+
+#define to_clk_system(hw) container_of(hw, struct clk_system, hw)
+struct clk_system {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ u8 id;
+};
+
+static int clk_system_enable(struct clk_hw *hw)
+{
+ struct clk_system *sys = to_clk_system(hw);
+ struct at91_pmc *pmc = sys->pmc;
+
+ pmc_write(pmc, AT91_PMC_SCER, 1 << sys->id);
+ return 0;
+}
+
+static void clk_system_disable(struct clk_hw *hw)
+{
+ struct clk_system *sys = to_clk_system(hw);
+ struct at91_pmc *pmc = sys->pmc;
+
+ pmc_write(pmc, AT91_PMC_SCDR, 1 << sys->id);
+}
+
+static int clk_system_is_enabled(struct clk_hw *hw)
+{
+ struct clk_system *sys = to_clk_system(hw);
+ struct at91_pmc *pmc = sys->pmc;
+
+ return !!(pmc_read(pmc, AT91_PMC_SCSR) & (1 << sys->id));
+}
+
+static const struct clk_ops system_ops = {
+ .enable = clk_system_enable,
+ .disable = clk_system_disable,
+ .is_enabled = clk_system_is_enabled,
+};
+
+static struct clk * __init
+at91_clk_register_system(struct at91_pmc *pmc, const char *name,
+ const char *parent_name, u8 id)
+{
+ struct clk_system *sys;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!parent_name || id > SYSTEM_MAX_ID)
+ return ERR_PTR(-EINVAL);
+
+ sys = kzalloc(sizeof(*sys), GFP_KERNEL);
+ if (!sys)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &system_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ /*
+ * CLK_IGNORE_UNUSED is used to avoid ddrck switch off.
+ * TODO : we should implement a driver supporting at91 ddr controller
+ * (see drivers/memory) which would request and enable the ddrck clock.
+ * When this is done we will be able to remove CLK_IGNORE_UNUSED flag.
+ */
+ init.flags = CLK_IGNORE_UNUSED;
+
+ sys->id = id;
+ sys->hw.init = &init;
+ sys->pmc = pmc;
+
+ clk = clk_register(NULL, &sys->hw);
+ if (IS_ERR(clk))
+ kfree(sys);
+
+ return clk;
+}
+
+static void __init
+of_at91_clk_sys_setup(struct device_node *np, struct at91_pmc *pmc)
+{
+ int num;
+ u32 id;
+ struct clk *clk;
+ const char *name;
+ struct device_node *sysclknp;
+ const char *parent_name;
+
+ num = of_get_child_count(np);
+ if (num > (SYSTEM_MAX_ID + 1))
+ return;
+
+ for_each_child_of_node(np, sysclknp) {
+ if (of_property_read_u32(sysclknp, "reg", &id))
+ continue;
+
+ if (of_property_read_string(np, "clock-output-names", &name))
+ name = sysclknp->name;
+
+ parent_name = of_clk_get_parent_name(sysclknp, 0);
+
+ clk = at91_clk_register_system(pmc, name, parent_name, id);
+ if (IS_ERR(clk))
+ continue;
+
+ of_clk_add_provider(sysclknp, of_clk_src_simple_get, clk);
+ }
+}
+
+void __init of_at91rm9200_clk_sys_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_sys_setup(np, pmc);
+}
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
new file mode 100644
index 000000000000..7d1d26a4bd04
--- /dev/null
+++ b/drivers/clk/at91/clk-usb.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include "pmc.h"
+
+#define USB_SOURCE_MAX 2
+
+#define SAM9X5_USB_DIV_SHIFT 8
+#define SAM9X5_USB_MAX_DIV 0xf
+
+#define RM9200_USB_DIV_SHIFT 28
+#define RM9200_USB_DIV_TAB_SIZE 4
+
+struct at91sam9x5_clk_usb {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+};
+
+#define to_at91sam9x5_clk_usb(hw) \
+ container_of(hw, struct at91sam9x5_clk_usb, hw)
+
+struct at91rm9200_clk_usb {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ u32 divisors[4];
+};
+
+#define to_at91rm9200_clk_usb(hw) \
+ container_of(hw, struct at91rm9200_clk_usb, hw)
+
+static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ u8 usbdiv;
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+
+ tmp = pmc_read(pmc, AT91_PMC_USB);
+ usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT;
+ return parent_rate / (usbdiv + 1);
+}
+
+static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long div;
+ unsigned long bestrate;
+ unsigned long tmp;
+
+ if (rate >= *parent_rate)
+ return *parent_rate;
+
+ div = *parent_rate / rate;
+ if (div >= SAM9X5_USB_MAX_DIV)
+ return *parent_rate / (SAM9X5_USB_MAX_DIV + 1);
+
+ bestrate = *parent_rate / div;
+ tmp = *parent_rate / (div + 1);
+ if (bestrate - rate > rate - tmp)
+ bestrate = tmp;
+
+ return bestrate;
+}
+
+static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
+{
+ u32 tmp;
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+
+ if (index > 1)
+ return -EINVAL;
+ tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_USBS;
+ if (index)
+ tmp |= AT91_PMC_USBS;
+ pmc_write(pmc, AT91_PMC_USB, tmp);
+ return 0;
+}
+
+static u8 at91sam9x5_clk_usb_get_parent(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+
+ return pmc_read(pmc, AT91_PMC_USB) & AT91_PMC_USBS;
+}
+
+static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+ unsigned long div = parent_rate / rate;
+
+ if (parent_rate % rate || div < 1 || div >= SAM9X5_USB_MAX_DIV)
+ return -EINVAL;
+
+ tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV;
+ tmp |= (div - 1) << SAM9X5_USB_DIV_SHIFT;
+ pmc_write(pmc, AT91_PMC_USB, tmp);
+
+ return 0;
+}
+
+static const struct clk_ops at91sam9x5_usb_ops = {
+ .recalc_rate = at91sam9x5_clk_usb_recalc_rate,
+ .round_rate = at91sam9x5_clk_usb_round_rate,
+ .get_parent = at91sam9x5_clk_usb_get_parent,
+ .set_parent = at91sam9x5_clk_usb_set_parent,
+ .set_rate = at91sam9x5_clk_usb_set_rate,
+};
+
+static int at91sam9n12_clk_usb_enable(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+
+ pmc_write(pmc, AT91_PMC_USB,
+ pmc_read(pmc, AT91_PMC_USB) | AT91_PMC_USBS);
+ return 0;
+}
+
+static void at91sam9n12_clk_usb_disable(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+
+ pmc_write(pmc, AT91_PMC_USB,
+ pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_USBS);
+}
+
+static int at91sam9n12_clk_usb_is_enabled(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+
+ return !!(pmc_read(pmc, AT91_PMC_USB) & AT91_PMC_USBS);
+}
+
+static const struct clk_ops at91sam9n12_usb_ops = {
+ .enable = at91sam9n12_clk_usb_enable,
+ .disable = at91sam9n12_clk_usb_disable,
+ .is_enabled = at91sam9n12_clk_usb_is_enabled,
+ .recalc_rate = at91sam9x5_clk_usb_recalc_rate,
+ .round_rate = at91sam9x5_clk_usb_round_rate,
+ .set_rate = at91sam9x5_clk_usb_set_rate,
+};
+
+static struct clk * __init
+at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
+ const char **parent_names, u8 num_parents)
+{
+ struct at91sam9x5_clk_usb *usb;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ usb = kzalloc(sizeof(*usb), GFP_KERNEL);
+ if (!usb)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &at91sam9x5_usb_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+
+ usb->hw.init = &init;
+ usb->pmc = pmc;
+
+ clk = clk_register(NULL, &usb->hw);
+ if (IS_ERR(clk))
+ kfree(usb);
+
+ return clk;
+}
+
+static struct clk * __init
+at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name,
+ const char *parent_name)
+{
+ struct at91sam9x5_clk_usb *usb;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ usb = kzalloc(sizeof(*usb), GFP_KERNEL);
+ if (!usb)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &at91sam9n12_usb_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_SET_RATE_GATE;
+
+ usb->hw.init = &init;
+ usb->pmc = pmc;
+
+ clk = clk_register(NULL, &usb->hw);
+ if (IS_ERR(clk))
+ kfree(usb);
+
+ return clk;
+}
+
+static unsigned long at91rm9200_clk_usb_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+ u32 tmp;
+ u8 usbdiv;
+
+ tmp = pmc_read(pmc, AT91_CKGR_PLLBR);
+ usbdiv = (tmp & AT91_PMC_USBDIV) >> RM9200_USB_DIV_SHIFT;
+ if (usb->divisors[usbdiv])
+ return parent_rate / usb->divisors[usbdiv];
+
+ return 0;
+}
+
+static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
+ unsigned long bestrate = 0;
+ int bestdiff = -1;
+ unsigned long tmprate;
+ int tmpdiff;
+ int i = 0;
+
+ for (i = 0; i < 4; i++) {
+ if (!usb->divisors[i])
+ continue;
+ tmprate = *parent_rate / usb->divisors[i];
+ if (tmprate < rate)
+ tmpdiff = rate - tmprate;
+ else
+ tmpdiff = tmprate - rate;
+
+ if (bestdiff < 0 || bestdiff > tmpdiff) {
+ bestrate = tmprate;
+ bestdiff = tmpdiff;
+ }
+
+ if (!bestdiff)
+ break;
+ }
+
+ return bestrate;
+}
+
+static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ int i;
+ struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+ unsigned long div = parent_rate / rate;
+
+ if (parent_rate % rate)
+ return -EINVAL;
+ for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) {
+ if (usb->divisors[i] == div) {
+ tmp = pmc_read(pmc, AT91_CKGR_PLLBR) &
+ ~AT91_PMC_USBDIV;
+ tmp |= i << RM9200_USB_DIV_SHIFT;
+ pmc_write(pmc, AT91_CKGR_PLLBR, tmp);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static const struct clk_ops at91rm9200_usb_ops = {
+ .recalc_rate = at91rm9200_clk_usb_recalc_rate,
+ .round_rate = at91rm9200_clk_usb_round_rate,
+ .set_rate = at91rm9200_clk_usb_set_rate,
+};
+
+static struct clk * __init
+at91rm9200_clk_register_usb(struct at91_pmc *pmc, const char *name,
+ const char *parent_name, const u32 *divisors)
+{
+ struct at91rm9200_clk_usb *usb;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ usb = kzalloc(sizeof(*usb), GFP_KERNEL);
+ if (!usb)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &at91rm9200_usb_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+
+ usb->hw.init = &init;
+ usb->pmc = pmc;
+ memcpy(usb->divisors, divisors, sizeof(usb->divisors));
+
+ clk = clk_register(NULL, &usb->hw);
+ if (IS_ERR(clk))
+ kfree(usb);
+
+ return clk;
+}
+
+void __init of_at91sam9x5_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ int i;
+ int num_parents;
+ const char *parent_names[USB_SOURCE_MAX];
+ const char *name = np->name;
+
+ num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (num_parents <= 0 || num_parents > USB_SOURCE_MAX)
+ return;
+
+ for (i = 0; i < num_parents; i++) {
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ if (!parent_names[i])
+ return;
+ }
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91sam9x5_clk_register_usb(pmc, name, parent_names, num_parents);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+void __init of_at91sam9n12_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name)
+ return;
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91sam9n12_clk_register_usb(pmc, name, parent_name);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+void __init of_at91rm9200_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+ u32 divisors[4] = {0, 0, 0, 0};
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name)
+ return;
+
+ of_property_read_u32_array(np, "atmel,clk-divisors", divisors, 4);
+ if (!divisors[0])
+ return;
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91rm9200_clk_register_usb(pmc, name, parent_name, divisors);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
new file mode 100644
index 000000000000..ae3263bc1476
--- /dev/null
+++ b/drivers/clk/at91/clk-utmi.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include "pmc.h"
+
+#define UTMI_FIXED_MUL 40
+
+struct clk_utmi {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned int irq;
+ wait_queue_head_t wait;
+};
+
+#define to_clk_utmi(hw) container_of(hw, struct clk_utmi, hw)
+
+static irqreturn_t clk_utmi_irq_handler(int irq, void *dev_id)
+{
+ struct clk_utmi *utmi = (struct clk_utmi *)dev_id;
+
+ wake_up(&utmi->wait);
+ disable_irq_nosync(utmi->irq);
+
+ return IRQ_HANDLED;
+}
+
+static int clk_utmi_prepare(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+ struct at91_pmc *pmc = utmi->pmc;
+ u32 tmp = at91_pmc_read(AT91_CKGR_UCKR) | AT91_PMC_UPLLEN |
+ AT91_PMC_UPLLCOUNT | AT91_PMC_BIASEN;
+
+ pmc_write(pmc, AT91_CKGR_UCKR, tmp);
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU)) {
+ enable_irq(utmi->irq);
+ wait_event(utmi->wait,
+ pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU);
+ }
+
+ return 0;
+}
+
+static int clk_utmi_is_prepared(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+ struct at91_pmc *pmc = utmi->pmc;
+
+ return !!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU);
+}
+
+static void clk_utmi_unprepare(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+ struct at91_pmc *pmc = utmi->pmc;
+ u32 tmp = at91_pmc_read(AT91_CKGR_UCKR) & ~AT91_PMC_UPLLEN;
+
+ pmc_write(pmc, AT91_CKGR_UCKR, tmp);
+}
+
+static unsigned long clk_utmi_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ /* UTMI clk is a fixed clk multiplier */
+ return parent_rate * UTMI_FIXED_MUL;
+}
+
+static const struct clk_ops utmi_ops = {
+ .prepare = clk_utmi_prepare,
+ .unprepare = clk_utmi_unprepare,
+ .is_prepared = clk_utmi_is_prepared,
+ .recalc_rate = clk_utmi_recalc_rate,
+};
+
+static struct clk * __init
+at91_clk_register_utmi(struct at91_pmc *pmc, unsigned int irq,
+ const char *name, const char *parent_name)
+{
+ int ret;
+ struct clk_utmi *utmi;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ utmi = kzalloc(sizeof(*utmi), GFP_KERNEL);
+ if (!utmi)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &utmi_ops;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+ init.flags = CLK_SET_RATE_GATE;
+
+ utmi->hw.init = &init;
+ utmi->pmc = pmc;
+ utmi->irq = irq;
+ init_waitqueue_head(&utmi->wait);
+ irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN);
+ ret = request_irq(utmi->irq, clk_utmi_irq_handler,
+ IRQF_TRIGGER_HIGH, "clk-utmi", utmi);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk = clk_register(NULL, &utmi->hw);
+ if (IS_ERR(clk))
+ kfree(utmi);
+
+ return clk;
+}
+
+static void __init
+of_at91_clk_utmi_setup(struct device_node *np, struct at91_pmc *pmc)
+{
+ unsigned int irq;
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq)
+ return;
+
+ clk = at91_clk_register_utmi(pmc, irq, name, parent_name);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ return;
+}
+
+void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_utmi_setup(np, pmc);
+}
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
new file mode 100644
index 000000000000..6a61477a57e0
--- /dev/null
+++ b/drivers/clk/at91/pmc.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+
+#include <asm/proc-fns.h>
+
+#include "pmc.h"
+
+void __iomem *at91_pmc_base;
+EXPORT_SYMBOL_GPL(at91_pmc_base);
+
+void at91sam9_idle(void)
+{
+ at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK);
+ cpu_do_idle();
+}
+
+int of_at91_get_clk_range(struct device_node *np, const char *propname,
+ struct clk_range *range)
+{
+ u32 min, max;
+ int ret;
+
+ ret = of_property_read_u32_index(np, propname, 0, &min);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_index(np, propname, 1, &max);
+ if (ret)
+ return ret;
+
+ if (range) {
+ range->min = min;
+ range->max = max;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_at91_get_clk_range);
+
+static void pmc_irq_mask(struct irq_data *d)
+{
+ struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
+
+ pmc_write(pmc, AT91_PMC_IDR, 1 << d->hwirq);
+}
+
+static void pmc_irq_unmask(struct irq_data *d)
+{
+ struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
+
+ pmc_write(pmc, AT91_PMC_IER, 1 << d->hwirq);
+}
+
+static int pmc_irq_set_type(struct irq_data *d, unsigned type)
+{
+ if (type != IRQ_TYPE_LEVEL_HIGH) {
+ pr_warn("PMC: type not supported (support only IRQ_TYPE_LEVEL_HIGH type)\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct irq_chip pmc_irq = {
+ .name = "PMC",
+ .irq_disable = pmc_irq_mask,
+ .irq_mask = pmc_irq_mask,
+ .irq_unmask = pmc_irq_unmask,
+ .irq_set_type = pmc_irq_set_type,
+};
+
+static struct lock_class_key pmc_lock_class;
+
+static int pmc_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct at91_pmc *pmc = h->host_data;
+
+ irq_set_lockdep_class(virq, &pmc_lock_class);
+
+ irq_set_chip_and_handler(virq, &pmc_irq,
+ handle_level_irq);
+ set_irq_flags(virq, IRQF_VALID);
+ irq_set_chip_data(virq, pmc);
+
+ return 0;
+}
+
+static int pmc_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
+ unsigned int *out_type)
+{
+ struct at91_pmc *pmc = d->host_data;
+ const struct at91_pmc_caps *caps = pmc->caps;
+
+ if (WARN_ON(intsize < 1))
+ return -EINVAL;
+
+ *out_hwirq = intspec[0];
+
+ if (!(caps->available_irqs & (1 << *out_hwirq)))
+ return -EINVAL;
+
+ *out_type = IRQ_TYPE_LEVEL_HIGH;
+
+ return 0;
+}
+
+static struct irq_domain_ops pmc_irq_ops = {
+ .map = pmc_irq_map,
+ .xlate = pmc_irq_domain_xlate,
+};
+
+static irqreturn_t pmc_irq_handler(int irq, void *data)
+{
+ struct at91_pmc *pmc = (struct at91_pmc *)data;
+ unsigned long sr;
+ int n;
+
+ sr = pmc_read(pmc, AT91_PMC_SR) & pmc_read(pmc, AT91_PMC_IMR);
+ if (!sr)
+ return IRQ_NONE;
+
+ for_each_set_bit(n, &sr, BITS_PER_LONG)
+ generic_handle_irq(irq_find_mapping(pmc->irqdomain, n));
+
+ return IRQ_HANDLED;
+}
+
+static const struct at91_pmc_caps at91rm9200_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
+ AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY |
+ AT91_PMC_PCK3RDY,
+};
+
+static const struct at91_pmc_caps at91sam9260_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
+ AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY,
+};
+
+static const struct at91_pmc_caps at91sam9g45_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
+ AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY,
+};
+
+static const struct at91_pmc_caps at91sam9n12_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
+ AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY | AT91_PMC_MOSCSELS |
+ AT91_PMC_MOSCRCS | AT91_PMC_CFDEV,
+};
+
+static const struct at91_pmc_caps at91sam9x5_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
+ AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY | AT91_PMC_MOSCSELS |
+ AT91_PMC_MOSCRCS | AT91_PMC_CFDEV,
+};
+
+static const struct at91_pmc_caps sama5d3_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
+ AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY |
+ AT91_PMC_MOSCSELS | AT91_PMC_MOSCRCS |
+ AT91_PMC_CFDEV,
+};
+
+static struct at91_pmc *__init at91_pmc_init(struct device_node *np,
+ void __iomem *regbase, int virq,
+ const struct at91_pmc_caps *caps)
+{
+ struct at91_pmc *pmc;
+
+ if (!regbase || !virq || !caps)
+ return NULL;
+
+ at91_pmc_base = regbase;
+
+ pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
+ if (!pmc)
+ return NULL;
+
+ spin_lock_init(&pmc->lock);
+ pmc->regbase = regbase;
+ pmc->virq = virq;
+ pmc->caps = caps;
+
+ pmc->irqdomain = irq_domain_add_linear(np, 32, &pmc_irq_ops, pmc);
+
+ if (!pmc->irqdomain)
+ goto out_free_pmc;
+
+ pmc_write(pmc, AT91_PMC_IDR, 0xffffffff);
+ if (request_irq(pmc->virq, pmc_irq_handler, IRQF_SHARED, "pmc", pmc))
+ goto out_remove_irqdomain;
+
+ return pmc;
+
+out_remove_irqdomain:
+ irq_domain_remove(pmc->irqdomain);
+out_free_pmc:
+ kfree(pmc);
+
+ return NULL;
+}
+
+static const struct of_device_id pmc_clk_ids[] __initconst = {
+ /* Main clock */
+ {
+ .compatible = "atmel,at91rm9200-clk-main",
+ .data = of_at91rm9200_clk_main_setup,
+ },
+ /* PLL clocks */
+ {
+ .compatible = "atmel,at91rm9200-clk-pll",
+ .data = of_at91rm9200_clk_pll_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9g45-clk-pll",
+ .data = of_at91sam9g45_clk_pll_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9g20-clk-pllb",
+ .data = of_at91sam9g20_clk_pllb_setup,
+ },
+ {
+ .compatible = "atmel,sama5d3-clk-pll",
+ .data = of_sama5d3_clk_pll_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-plldiv",
+ .data = of_at91sam9x5_clk_plldiv_setup,
+ },
+ /* Master clock */
+ {
+ .compatible = "atmel,at91rm9200-clk-master",
+ .data = of_at91rm9200_clk_master_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-master",
+ .data = of_at91sam9x5_clk_master_setup,
+ },
+ /* System clocks */
+ {
+ .compatible = "atmel,at91rm9200-clk-system",
+ .data = of_at91rm9200_clk_sys_setup,
+ },
+ /* Peripheral clocks */
+ {
+ .compatible = "atmel,at91rm9200-clk-peripheral",
+ .data = of_at91rm9200_clk_periph_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-peripheral",
+ .data = of_at91sam9x5_clk_periph_setup,
+ },
+ /* Programmable clocks */
+ {
+ .compatible = "atmel,at91rm9200-clk-programmable",
+ .data = of_at91rm9200_clk_prog_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9g45-clk-programmable",
+ .data = of_at91sam9g45_clk_prog_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-programmable",
+ .data = of_at91sam9x5_clk_prog_setup,
+ },
+ /* UTMI clock */
+#if defined(CONFIG_HAVE_AT91_UTMI)
+ {
+ .compatible = "atmel,at91sam9x5-clk-utmi",
+ .data = of_at91sam9x5_clk_utmi_setup,
+ },
+#endif
+ /* USB clock */
+#if defined(CONFIG_HAVE_AT91_USB_CLK)
+ {
+ .compatible = "atmel,at91rm9200-clk-usb",
+ .data = of_at91rm9200_clk_usb_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-usb",
+ .data = of_at91sam9x5_clk_usb_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9n12-clk-usb",
+ .data = of_at91sam9n12_clk_usb_setup,
+ },
+#endif
+ /* SMD clock */
+#if defined(CONFIG_HAVE_AT91_SMD)
+ {
+ .compatible = "atmel,at91sam9x5-clk-smd",
+ .data = of_at91sam9x5_clk_smd_setup,
+ },
+#endif
+ { /*sentinel*/ }
+};
+
+static void __init of_at91_pmc_setup(struct device_node *np,
+ const struct at91_pmc_caps *caps)
+{
+ struct at91_pmc *pmc;
+ struct device_node *childnp;
+ void (*clk_setup)(struct device_node *, struct at91_pmc *);
+ const struct of_device_id *clk_id;
+ void __iomem *regbase = of_iomap(np, 0);
+ int virq;
+
+ if (!regbase)
+ return;
+
+ virq = irq_of_parse_and_map(np, 0);
+ if (!virq)
+ return;
+
+ pmc = at91_pmc_init(np, regbase, virq, caps);
+ if (!pmc)
+ return;
+ for_each_child_of_node(np, childnp) {
+ clk_id = of_match_node(pmc_clk_ids, childnp);
+ if (!clk_id)
+ continue;
+ clk_setup = clk_id->data;
+ clk_setup(childnp, pmc);
+ }
+}
+
+static void __init of_at91rm9200_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &at91rm9200_caps);
+}
+CLK_OF_DECLARE(at91rm9200_clk_pmc, "atmel,at91rm9200-pmc",
+ of_at91rm9200_pmc_setup);
+
+static void __init of_at91sam9260_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &at91sam9260_caps);
+}
+CLK_OF_DECLARE(at91sam9260_clk_pmc, "atmel,at91sam9260-pmc",
+ of_at91sam9260_pmc_setup);
+
+static void __init of_at91sam9g45_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &at91sam9g45_caps);
+}
+CLK_OF_DECLARE(at91sam9g45_clk_pmc, "atmel,at91sam9g45-pmc",
+ of_at91sam9g45_pmc_setup);
+
+static void __init of_at91sam9n12_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &at91sam9n12_caps);
+}
+CLK_OF_DECLARE(at91sam9n12_clk_pmc, "atmel,at91sam9n12-pmc",
+ of_at91sam9n12_pmc_setup);
+
+static void __init of_at91sam9x5_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &at91sam9x5_caps);
+}
+CLK_OF_DECLARE(at91sam9x5_clk_pmc, "atmel,at91sam9x5-pmc",
+ of_at91sam9x5_pmc_setup);
+
+static void __init of_sama5d3_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &sama5d3_caps);
+}
+CLK_OF_DECLARE(sama5d3_clk_pmc, "atmel,sama5d3-pmc",
+ of_sama5d3_pmc_setup);
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
new file mode 100644
index 000000000000..441350983ccb
--- /dev/null
+++ b/drivers/clk/at91/pmc.h
@@ -0,0 +1,114 @@
+/*
+ * drivers/clk/at91/pmc.h
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __PMC_H_
+#define __PMC_H_
+
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/spinlock.h>
+
+struct clk_range {
+ unsigned long min;
+ unsigned long max;
+};
+
+#define CLK_RANGE(MIN, MAX) {.min = MIN, .max = MAX,}
+
+struct at91_pmc_caps {
+ u32 available_irqs;
+};
+
+struct at91_pmc {
+ void __iomem *regbase;
+ int virq;
+ spinlock_t lock;
+ const struct at91_pmc_caps *caps;
+ struct irq_domain *irqdomain;
+};
+
+static inline void pmc_lock(struct at91_pmc *pmc)
+{
+ spin_lock(&pmc->lock);
+}
+
+static inline void pmc_unlock(struct at91_pmc *pmc)
+{
+ spin_unlock(&pmc->lock);
+}
+
+static inline u32 pmc_read(struct at91_pmc *pmc, int offset)
+{
+ return readl(pmc->regbase + offset);
+}
+
+static inline void pmc_write(struct at91_pmc *pmc, int offset, u32 value)
+{
+ writel(value, pmc->regbase + offset);
+}
+
+int of_at91_get_clk_range(struct device_node *np, const char *propname,
+ struct clk_range *range);
+
+extern void __init of_at91rm9200_clk_main_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+extern void __init of_at91rm9200_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9g45_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_sama5d3_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_plldiv_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+extern void __init of_at91rm9200_clk_master_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_master_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+extern void __init of_at91rm9200_clk_sys_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+extern void __init of_at91rm9200_clk_periph_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_periph_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+extern void __init of_at91rm9200_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9g45_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+#if defined(CONFIG_HAVE_AT91_UTMI)
+extern void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+#endif
+
+#if defined(CONFIG_HAVE_AT91_USB_CLK)
+extern void __init of_at91rm9200_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9n12_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+#endif
+
+#if defined(CONFIG_HAVE_AT91_SMD)
+extern void __init of_at91sam9x5_clk_smd_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+#endif
+
+#endif /* __PMC_H_ */
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index a33f46f20a41..57a078e06efe 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -55,6 +55,30 @@ static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
return rate_ops->recalc_rate(rate_hw, parent_rate);
}
+static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate,
+ struct clk **best_parent_p)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *rate_ops = composite->rate_ops;
+ const struct clk_ops *mux_ops = composite->mux_ops;
+ struct clk_hw *rate_hw = composite->rate_hw;
+ struct clk_hw *mux_hw = composite->mux_hw;
+
+ if (rate_hw && rate_ops && rate_ops->determine_rate) {
+ rate_hw->clk = hw->clk;
+ return rate_ops->determine_rate(rate_hw, rate, best_parent_rate,
+ best_parent_p);
+ } else if (mux_hw && mux_ops && mux_ops->determine_rate) {
+ mux_hw->clk = hw->clk;
+ return mux_ops->determine_rate(mux_hw, rate, best_parent_rate,
+ best_parent_p);
+ } else {
+ pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n");
+ return 0;
+ }
+}
+
static long clk_composite_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
@@ -147,6 +171,8 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
composite->mux_ops = mux_ops;
clk_composite_ops->get_parent = clk_composite_get_parent;
clk_composite_ops->set_parent = clk_composite_set_parent;
+ if (mux_ops->determine_rate)
+ clk_composite_ops->determine_rate = clk_composite_determine_rate;
}
if (rate_hw && rate_ops) {
@@ -170,6 +196,8 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
composite->rate_hw = rate_hw;
composite->rate_ops = rate_ops;
clk_composite_ops->recalc_rate = clk_composite_recalc_rate;
+ if (rate_ops->determine_rate)
+ clk_composite_ops->determine_rate = clk_composite_determine_rate;
}
if (gate_hw && gate_ops) {
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 8d3009e44fba..5543b7df8e16 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -87,7 +87,7 @@ static unsigned int _get_table_val(const struct clk_div_table *table,
return 0;
}
-static unsigned int _get_val(struct clk_divider *divider, u8 div)
+static unsigned int _get_val(struct clk_divider *divider, unsigned int div)
{
if (divider->flags & CLK_DIVIDER_ONE_BASED)
return div;
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 1ed591ab8b1d..0fc56ab6e844 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -34,22 +34,31 @@ static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
return to_clk_fixed_rate(hw)->fixed_rate;
}
+static unsigned long clk_fixed_rate_recalc_accuracy(struct clk_hw *hw,
+ unsigned long parent_accuracy)
+{
+ return to_clk_fixed_rate(hw)->fixed_accuracy;
+}
+
const struct clk_ops clk_fixed_rate_ops = {
.recalc_rate = clk_fixed_rate_recalc_rate,
+ .recalc_accuracy = clk_fixed_rate_recalc_accuracy,
};
EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
/**
- * clk_register_fixed_rate - register fixed-rate clock with the clock framework
+ * clk_register_fixed_rate_with_accuracy - register fixed-rate clock with the
+ * clock framework
* @dev: device that is registering this clock
* @name: name of this clock
* @parent_name: name of clock's parent
* @flags: framework-specific flags
* @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock rate
*/
-struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
- unsigned long fixed_rate)
+struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ unsigned long fixed_rate, unsigned long fixed_accuracy)
{
struct clk_fixed_rate *fixed;
struct clk *clk;
@@ -70,16 +79,33 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
/* struct clk_fixed_rate assignments */
fixed->fixed_rate = fixed_rate;
+ fixed->fixed_accuracy = fixed_accuracy;
fixed->hw.init = &init;
/* register the clock */
clk = clk_register(dev, &fixed->hw);
-
if (IS_ERR(clk))
kfree(fixed);
return clk;
}
+EXPORT_SYMBOL_GPL(clk_register_fixed_rate_with_accuracy);
+
+/**
+ * clk_register_fixed_rate - register fixed-rate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ unsigned long fixed_rate)
+{
+ return clk_register_fixed_rate_with_accuracy(dev, name, parent_name,
+ flags, fixed_rate, 0);
+}
EXPORT_SYMBOL_GPL(clk_register_fixed_rate);
#ifdef CONFIG_OF
@@ -91,13 +117,18 @@ void of_fixed_clk_setup(struct device_node *node)
struct clk *clk;
const char *clk_name = node->name;
u32 rate;
+ u32 accuracy = 0;
if (of_property_read_u32(node, "clock-frequency", &rate))
return;
+ of_property_read_u32(node, "clock-accuracy", &accuracy);
+
of_property_read_string(node, "clock-output-names", &clk_name);
- clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate);
+ clk = clk_register_fixed_rate_with_accuracy(NULL, clk_name, NULL,
+ CLK_IS_ROOT, rate,
+ accuracy);
if (!IS_ERR(clk))
of_clk_add_provider(node, of_clk_src_simple_get, clk);
}
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index 9f57bc37cd60..3d7e8dd8fd58 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -66,7 +66,7 @@ static void max77686_clk_unprepare(struct clk_hw *hw)
MAX77686_REG_32KHZ, max77686->mask, ~max77686->mask);
}
-static int max77686_clk_is_enabled(struct clk_hw *hw)
+static int max77686_clk_is_prepared(struct clk_hw *hw)
{
struct max77686_clk *max77686 = to_max77686_clk(hw);
int ret;
@@ -81,10 +81,17 @@ static int max77686_clk_is_enabled(struct clk_hw *hw)
return val & max77686->mask;
}
+static unsigned long max77686_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return 32768;
+}
+
static struct clk_ops max77686_clk_ops = {
.prepare = max77686_clk_prepare,
.unprepare = max77686_clk_unprepare,
- .is_enabled = max77686_clk_is_enabled,
+ .is_prepared = max77686_clk_is_prepared,
+ .recalc_rate = max77686_recalc_rate,
};
static struct clk_init_data max77686_clks_init[MAX77686_CLKS_NUM] = {
@@ -105,38 +112,38 @@ static struct clk_init_data max77686_clks_init[MAX77686_CLKS_NUM] = {
},
};
-static int max77686_clk_register(struct device *dev,
+static struct clk *max77686_clk_register(struct device *dev,
struct max77686_clk *max77686)
{
struct clk *clk;
struct clk_hw *hw = &max77686->hw;
clk = clk_register(dev, hw);
-
if (IS_ERR(clk))
- return -ENOMEM;
+ return clk;
max77686->lookup = kzalloc(sizeof(struct clk_lookup), GFP_KERNEL);
if (!max77686->lookup)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
max77686->lookup->con_id = hw->init->name;
max77686->lookup->clk = clk;
clkdev_add(max77686->lookup);
- return 0;
+ return clk;
}
static int max77686_clk_probe(struct platform_device *pdev)
{
struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct max77686_clk **max77686_clks;
+ struct max77686_clk *max77686_clks[MAX77686_CLKS_NUM];
+ struct clk **clocks;
int i, ret;
- max77686_clks = devm_kzalloc(&pdev->dev, sizeof(struct max77686_clk *)
+ clocks = devm_kzalloc(&pdev->dev, sizeof(struct clk *)
* MAX77686_CLKS_NUM, GFP_KERNEL);
- if (!max77686_clks)
+ if (!clocks)
return -ENOMEM;
for (i = 0; i < MAX77686_CLKS_NUM; i++) {
@@ -151,47 +158,63 @@ static int max77686_clk_probe(struct platform_device *pdev)
max77686_clks[i]->mask = 1 << i;
max77686_clks[i]->hw.init = &max77686_clks_init[i];
- ret = max77686_clk_register(&pdev->dev, max77686_clks[i]);
+ clocks[i] = max77686_clk_register(&pdev->dev, max77686_clks[i]);
+ if (IS_ERR(clocks[i])) {
+ ret = PTR_ERR(clocks[i]);
+ dev_err(&pdev->dev, "failed to register %s\n",
+ max77686_clks[i]->hw.init->name);
+ goto err_clocks;
+ }
+ }
+
+ platform_set_drvdata(pdev, clocks);
+
+ if (iodev->dev->of_node) {
+ struct clk_onecell_data *of_data;
+
+ of_data = devm_kzalloc(&pdev->dev,
+ sizeof(*of_data), GFP_KERNEL);
+ if (!of_data) {
+ ret = -ENOMEM;
+ goto err_clocks;
+ }
+
+ of_data->clks = clocks;
+ of_data->clk_num = MAX77686_CLKS_NUM;
+ ret = of_clk_add_provider(iodev->dev->of_node,
+ of_clk_src_onecell_get, of_data);
if (ret) {
- switch (i) {
- case MAX77686_CLK_AP:
- dev_err(&pdev->dev, "Fail to register CLK_AP\n");
- goto err_clk_ap;
- break;
- case MAX77686_CLK_CP:
- dev_err(&pdev->dev, "Fail to register CLK_CP\n");
- goto err_clk_cp;
- break;
- case MAX77686_CLK_PMIC:
- dev_err(&pdev->dev, "Fail to register CLK_PMIC\n");
- goto err_clk_pmic;
- }
+ dev_err(&pdev->dev, "failed to register OF clock provider\n");
+ goto err_clocks;
}
}
- platform_set_drvdata(pdev, max77686_clks);
+ return 0;
- goto out;
+err_clocks:
+ for (--i; i >= 0; --i) {
+ clkdev_drop(max77686_clks[i]->lookup);
+ clk_unregister(max77686_clks[i]->hw.clk);
+ }
-err_clk_pmic:
- clkdev_drop(max77686_clks[MAX77686_CLK_CP]->lookup);
- kfree(max77686_clks[MAX77686_CLK_CP]->hw.clk);
-err_clk_cp:
- clkdev_drop(max77686_clks[MAX77686_CLK_AP]->lookup);
- kfree(max77686_clks[MAX77686_CLK_AP]->hw.clk);
-err_clk_ap:
-out:
return ret;
}
static int max77686_clk_remove(struct platform_device *pdev)
{
- struct max77686_clk **max77686_clks = platform_get_drvdata(pdev);
+ struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct clk **clocks = platform_get_drvdata(pdev);
int i;
+ if (iodev->dev->of_node)
+ of_clk_del_provider(iodev->dev->of_node);
+
for (i = 0; i < MAX77686_CLKS_NUM; i++) {
- clkdev_drop(max77686_clks[i]->lookup);
- kfree(max77686_clks[i]->hw.clk);
+ struct clk_hw *hw = __clk_get_hw(clocks[i]);
+ struct max77686_clk *max77686 = to_max77686_clk(hw);
+
+ clkdev_drop(max77686->lookup);
+ clk_unregister(clocks[i]);
}
return 0;
}
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 7be41e676a64..00a3abe103a5 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -60,7 +60,7 @@ static int s2mps11_clk_prepare(struct clk_hw *hw)
struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
int ret;
- ret = regmap_update_bits(s2mps11->iodev->regmap,
+ ret = regmap_update_bits(s2mps11->iodev->regmap_pmic,
S2MPS11_REG_RTC_CTRL,
s2mps11->mask, s2mps11->mask);
if (!ret)
@@ -74,7 +74,7 @@ static void s2mps11_clk_unprepare(struct clk_hw *hw)
struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
int ret;
- ret = regmap_update_bits(s2mps11->iodev->regmap, S2MPS11_REG_RTC_CTRL,
+ ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL,
s2mps11->mask, ~s2mps11->mask);
if (!ret)
@@ -174,7 +174,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
s2mps11_clk->hw.init = &s2mps11_clks_init[i];
s2mps11_clk->mask = 1 << i;
- ret = regmap_read(s2mps11_clk->iodev->regmap,
+ ret = regmap_read(s2mps11_clk->iodev->regmap_pmic,
S2MPS11_REG_RTC_CTRL, &val);
if (ret < 0)
goto err_reg;
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index c50e83744b0a..3b2a66f78755 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -1111,11 +1111,11 @@ static const struct of_device_id si5351_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, si5351_dt_ids);
-static int si5351_dt_parse(struct i2c_client *client)
+static int si5351_dt_parse(struct i2c_client *client,
+ enum si5351_variant variant)
{
struct device_node *child, *np = client->dev.of_node;
struct si5351_platform_data *pdata;
- const struct of_device_id *match;
struct property *prop;
const __be32 *p;
int num = 0;
@@ -1124,15 +1124,10 @@ static int si5351_dt_parse(struct i2c_client *client)
if (np == NULL)
return 0;
- match = of_match_node(si5351_dt_ids, np);
- if (match == NULL)
- return -EINVAL;
-
pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- pdata->variant = (enum si5351_variant)match->data;
pdata->clk_xtal = of_clk_get(np, 0);
if (!IS_ERR(pdata->clk_xtal))
clk_put(pdata->clk_xtal);
@@ -1163,7 +1158,7 @@ static int si5351_dt_parse(struct i2c_client *client)
pdata->pll_src[num] = SI5351_PLL_SRC_XTAL;
break;
case 1:
- if (pdata->variant != SI5351_VARIANT_C) {
+ if (variant != SI5351_VARIANT_C) {
dev_err(&client->dev,
"invalid parent %d for pll %d\n",
val, num);
@@ -1187,7 +1182,7 @@ static int si5351_dt_parse(struct i2c_client *client)
}
if (num >= 8 ||
- (pdata->variant == SI5351_VARIANT_A3 && num >= 3)) {
+ (variant == SI5351_VARIANT_A3 && num >= 3)) {
dev_err(&client->dev, "invalid clkout %d\n", num);
return -EINVAL;
}
@@ -1226,7 +1221,7 @@ static int si5351_dt_parse(struct i2c_client *client)
SI5351_CLKOUT_SRC_XTAL;
break;
case 3:
- if (pdata->variant != SI5351_VARIANT_C) {
+ if (variant != SI5351_VARIANT_C) {
dev_err(&client->dev,
"invalid parent %d for clkout %d\n",
val, num);
@@ -1298,7 +1293,7 @@ static int si5351_dt_parse(struct i2c_client *client)
return 0;
}
#else
-static int si5351_dt_parse(struct i2c_client *client)
+static int si5351_dt_parse(struct i2c_client *client, enum si5351_variant variant)
{
return 0;
}
@@ -1307,6 +1302,7 @@ static int si5351_dt_parse(struct i2c_client *client)
static int si5351_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ enum si5351_variant variant = (enum si5351_variant)id->driver_data;
struct si5351_platform_data *pdata;
struct si5351_driver_data *drvdata;
struct clk_init_data init;
@@ -1315,7 +1311,7 @@ static int si5351_i2c_probe(struct i2c_client *client,
u8 num_parents, num_clocks;
int ret, n;
- ret = si5351_dt_parse(client);
+ ret = si5351_dt_parse(client, variant);
if (ret)
return ret;
@@ -1331,7 +1327,7 @@ static int si5351_i2c_probe(struct i2c_client *client,
i2c_set_clientdata(client, drvdata);
drvdata->client = client;
- drvdata->variant = pdata->variant;
+ drvdata->variant = variant;
drvdata->pxtal = pdata->clk_xtal;
drvdata->pclkin = pdata->clk_clkin;
@@ -1568,10 +1564,10 @@ static int si5351_i2c_probe(struct i2c_client *client,
}
static const struct i2c_device_id si5351_i2c_ids[] = {
- { "si5351a", 0 },
- { "si5351a-msop", 0 },
- { "si5351b", 0 },
- { "si5351c", 0 },
+ { "si5351a", SI5351_VARIANT_A },
+ { "si5351a-msop", SI5351_VARIANT_A3 },
+ { "si5351b", SI5351_VARIANT_B },
+ { "si5351c", SI5351_VARIANT_C },
{ }
};
MODULE_DEVICE_TABLE(i2c, si5351_i2c_ids);
diff --git a/drivers/clk/clk-si5351.h b/drivers/clk/clk-si5351.h
index c0dbf2676872..4d0746b50c32 100644
--- a/drivers/clk/clk-si5351.h
+++ b/drivers/clk/clk-si5351.h
@@ -153,4 +153,18 @@
#define SI5351_XTAL_ENABLE (1<<6)
#define SI5351_MULTISYNTH_ENABLE (1<<4)
+/**
+ * enum si5351_variant - SiLabs Si5351 chip variant
+ * @SI5351_VARIANT_A: Si5351A (8 output clocks, XTAL input)
+ * @SI5351_VARIANT_A3: Si5351A MSOP10 (3 output clocks, XTAL input)
+ * @SI5351_VARIANT_B: Si5351B (8 output clocks, XTAL/VXCO input)
+ * @SI5351_VARIANT_C: Si5351C (8 output clocks, XTAL/CLKIN input)
+ */
+enum si5351_variant {
+ SI5351_VARIANT_A = 1,
+ SI5351_VARIANT_A3 = 2,
+ SI5351_VARIANT_B = 3,
+ SI5351_VARIANT_C = 4,
+};
+
#endif
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
new file mode 100644
index 000000000000..4bbbe32585ec
--- /dev/null
+++ b/drivers/clk/clk-si570.c
@@ -0,0 +1,531 @@
+/*
+ * Driver for Silicon Labs Si570/Si571 Programmable XO/VCXO
+ *
+ * Copyright (C) 2010, 2011 Ericsson AB.
+ * Copyright (C) 2011 Guenter Roeck.
+ * Copyright (C) 2011 - 2013 Xilinx Inc.
+ *
+ * Author: Guenter Roeck <guenter.roeck@ericsson.com>
+ * Sören Brinkmann <soren.brinkmann@xilinx.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* Si570 registers */
+#define SI570_REG_HS_N1 7
+#define SI570_REG_N1_RFREQ0 8
+#define SI570_REG_RFREQ1 9
+#define SI570_REG_RFREQ2 10
+#define SI570_REG_RFREQ3 11
+#define SI570_REG_RFREQ4 12
+#define SI570_REG_CONTROL 135
+#define SI570_REG_FREEZE_DCO 137
+#define SI570_DIV_OFFSET_7PPM 6
+
+#define HS_DIV_SHIFT 5
+#define HS_DIV_MASK 0xe0
+#define HS_DIV_OFFSET 4
+#define N1_6_2_MASK 0x1f
+#define N1_1_0_MASK 0xc0
+#define RFREQ_37_32_MASK 0x3f
+
+#define SI570_MIN_FREQ 10000000L
+#define SI570_MAX_FREQ 1417500000L
+#define SI598_MAX_FREQ 525000000L
+
+#define FDCO_MIN 4850000000LL
+#define FDCO_MAX 5670000000LL
+
+#define SI570_CNTRL_RECALL (1 << 0)
+#define SI570_CNTRL_FREEZE_M (1 << 5)
+#define SI570_CNTRL_NEWFREQ (1 << 6)
+
+#define SI570_FREEZE_DCO (1 << 4)
+
+/**
+ * struct clk_si570:
+ * @hw: Clock hw struct
+ * @regmap: Device's regmap
+ * @div_offset: Rgister offset for dividers
+ * @max_freq: Maximum frequency for this device
+ * @fxtal: Factory xtal frequency
+ * @n1: Clock divider N1
+ * @hs_div: Clock divider HSDIV
+ * @rfreq: Clock multiplier RFREQ
+ * @frequency: Current output frequency
+ * @i2c_client: I2C client pointer
+ */
+struct clk_si570 {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ unsigned int div_offset;
+ u64 max_freq;
+ u64 fxtal;
+ unsigned int n1;
+ unsigned int hs_div;
+ u64 rfreq;
+ u64 frequency;
+ struct i2c_client *i2c_client;
+};
+#define to_clk_si570(_hw) container_of(_hw, struct clk_si570, hw)
+
+enum clk_si570_variant {
+ si57x,
+ si59x
+};
+
+/**
+ * si570_get_divs() - Read clock dividers from HW
+ * @data: Pointer to struct clk_si570
+ * @rfreq: Fractional multiplier (output)
+ * @n1: Divider N1 (output)
+ * @hs_div: Divider HSDIV (output)
+ * Returns 0 on success, negative errno otherwise.
+ *
+ * Retrieve clock dividers and multipliers from the HW.
+ */
+static int si570_get_divs(struct clk_si570 *data, u64 *rfreq,
+ unsigned int *n1, unsigned int *hs_div)
+{
+ int err;
+ u8 reg[6];
+ u64 tmp;
+
+ err = regmap_bulk_read(data->regmap, SI570_REG_HS_N1 + data->div_offset,
+ reg, ARRAY_SIZE(reg));
+ if (err)
+ return err;
+
+ *hs_div = ((reg[0] & HS_DIV_MASK) >> HS_DIV_SHIFT) + HS_DIV_OFFSET;
+ *n1 = ((reg[0] & N1_6_2_MASK) << 2) + ((reg[1] & N1_1_0_MASK) >> 6) + 1;
+ /* Handle invalid cases */
+ if (*n1 > 1)
+ *n1 &= ~1;
+
+ tmp = reg[1] & RFREQ_37_32_MASK;
+ tmp = (tmp << 8) + reg[2];
+ tmp = (tmp << 8) + reg[3];
+ tmp = (tmp << 8) + reg[4];
+ tmp = (tmp << 8) + reg[5];
+ *rfreq = tmp;
+
+ return 0;
+}
+
+/**
+ * si570_get_defaults() - Get default values
+ * @data: Driver data structure
+ * @fout: Factory frequency output
+ * Returns 0 on success, negative errno otherwise.
+ */
+static int si570_get_defaults(struct clk_si570 *data, u64 fout)
+{
+ int err;
+ u64 fdco;
+
+ regmap_write(data->regmap, SI570_REG_CONTROL, SI570_CNTRL_RECALL);
+
+ err = si570_get_divs(data, &data->rfreq, &data->n1, &data->hs_div);
+ if (err)
+ return err;
+
+ /*
+ * Accept optional precision loss to avoid arithmetic overflows.
+ * Acceptable per Silicon Labs Application Note AN334.
+ */
+ fdco = fout * data->n1 * data->hs_div;
+ if (fdco >= (1LL << 36))
+ data->fxtal = div64_u64(fdco << 24, data->rfreq >> 4);
+ else
+ data->fxtal = div64_u64(fdco << 28, data->rfreq);
+
+ data->frequency = fout;
+
+ return 0;
+}
+
+/**
+ * si570_update_rfreq() - Update clock multiplier
+ * @data: Driver data structure
+ * Passes on regmap_bulk_write() return value.
+ */
+static int si570_update_rfreq(struct clk_si570 *data)
+{
+ u8 reg[5];
+
+ reg[0] = ((data->n1 - 1) << 6) |
+ ((data->rfreq >> 32) & RFREQ_37_32_MASK);
+ reg[1] = (data->rfreq >> 24) & 0xff;
+ reg[2] = (data->rfreq >> 16) & 0xff;
+ reg[3] = (data->rfreq >> 8) & 0xff;
+ reg[4] = data->rfreq & 0xff;
+
+ return regmap_bulk_write(data->regmap, SI570_REG_N1_RFREQ0 +
+ data->div_offset, reg, ARRAY_SIZE(reg));
+}
+
+/**
+ * si570_calc_divs() - Caluclate clock dividers
+ * @frequency: Target frequency
+ * @data: Driver data structure
+ * @out_rfreq: RFREG fractional multiplier (output)
+ * @out_n1: Clock divider N1 (output)
+ * @out_hs_div: Clock divider HSDIV (output)
+ * Returns 0 on success, negative errno otherwise.
+ *
+ * Calculate the clock dividers (@out_hs_div, @out_n1) and clock multiplier
+ * (@out_rfreq) for a given target @frequency.
+ */
+static int si570_calc_divs(unsigned long frequency, struct clk_si570 *data,
+ u64 *out_rfreq, unsigned int *out_n1, unsigned int *out_hs_div)
+{
+ int i;
+ unsigned int n1, hs_div;
+ u64 fdco, best_fdco = ULLONG_MAX;
+ static const uint8_t si570_hs_div_values[] = { 11, 9, 7, 6, 5, 4 };
+
+ for (i = 0; i < ARRAY_SIZE(si570_hs_div_values); i++) {
+ hs_div = si570_hs_div_values[i];
+ /* Calculate lowest possible value for n1 */
+ n1 = div_u64(div_u64(FDCO_MIN, hs_div), frequency);
+ if (!n1 || (n1 & 1))
+ n1++;
+ while (n1 <= 128) {
+ fdco = (u64)frequency * (u64)hs_div * (u64)n1;
+ if (fdco > FDCO_MAX)
+ break;
+ if (fdco >= FDCO_MIN && fdco < best_fdco) {
+ *out_n1 = n1;
+ *out_hs_div = hs_div;
+ *out_rfreq = div64_u64(fdco << 28, data->fxtal);
+ best_fdco = fdco;
+ }
+ n1 += (n1 == 1 ? 1 : 2);
+ }
+ }
+
+ if (best_fdco == ULLONG_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static unsigned long si570_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ int err;
+ u64 rfreq, rate;
+ unsigned int n1, hs_div;
+ struct clk_si570 *data = to_clk_si570(hw);
+
+ err = si570_get_divs(data, &rfreq, &n1, &hs_div);
+ if (err) {
+ dev_err(&data->i2c_client->dev, "unable to recalc rate\n");
+ return data->frequency;
+ }
+
+ rfreq = div_u64(rfreq, hs_div * n1);
+ rate = (data->fxtal * rfreq) >> 28;
+
+ return rate;
+}
+
+static long si570_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ int err;
+ u64 rfreq;
+ unsigned int n1, hs_div;
+ struct clk_si570 *data = to_clk_si570(hw);
+
+ if (!rate)
+ return 0;
+
+ if (div64_u64(abs(rate - data->frequency) * 10000LL,
+ data->frequency) < 35) {
+ rfreq = div64_u64((data->rfreq * rate) +
+ div64_u64(data->frequency, 2), data->frequency);
+ n1 = data->n1;
+ hs_div = data->hs_div;
+
+ } else {
+ err = si570_calc_divs(rate, data, &rfreq, &n1, &hs_div);
+ if (err) {
+ dev_err(&data->i2c_client->dev,
+ "unable to round rate\n");
+ return 0;
+ }
+ }
+
+ return rate;
+}
+
+/**
+ * si570_set_frequency() - Adjust output frequency
+ * @data: Driver data structure
+ * @frequency: Target frequency
+ * Returns 0 on success.
+ *
+ * Update output frequency for big frequency changes (> 3,500 ppm).
+ */
+static int si570_set_frequency(struct clk_si570 *data, unsigned long frequency)
+{
+ int err;
+
+ err = si570_calc_divs(frequency, data, &data->rfreq, &data->n1,
+ &data->hs_div);
+ if (err)
+ return err;
+
+ /*
+ * The DCO reg should be accessed with a read-modify-write operation
+ * per AN334
+ */
+ regmap_write(data->regmap, SI570_REG_FREEZE_DCO, SI570_FREEZE_DCO);
+ regmap_write(data->regmap, SI570_REG_HS_N1 + data->div_offset,
+ ((data->hs_div - HS_DIV_OFFSET) << HS_DIV_SHIFT) |
+ (((data->n1 - 1) >> 2) & N1_6_2_MASK));
+ si570_update_rfreq(data);
+ regmap_write(data->regmap, SI570_REG_FREEZE_DCO, 0);
+ regmap_write(data->regmap, SI570_REG_CONTROL, SI570_CNTRL_NEWFREQ);
+
+ /* Applying a new frequency can take up to 10ms */
+ usleep_range(10000, 12000);
+
+ return 0;
+}
+
+/**
+ * si570_set_frequency_small() - Adjust output frequency
+ * @data: Driver data structure
+ * @frequency: Target frequency
+ * Returns 0 on success.
+ *
+ * Update output frequency for small frequency changes (< 3,500 ppm).
+ */
+static int si570_set_frequency_small(struct clk_si570 *data,
+ unsigned long frequency)
+{
+ /*
+ * This is a re-implementation of DIV_ROUND_CLOSEST
+ * using the div64_u64 function lieu of letting the compiler
+ * insert EABI calls
+ */
+ data->rfreq = div64_u64((data->rfreq * frequency) +
+ div_u64(data->frequency, 2), data->frequency);
+ regmap_write(data->regmap, SI570_REG_CONTROL, SI570_CNTRL_FREEZE_M);
+ si570_update_rfreq(data);
+ regmap_write(data->regmap, SI570_REG_CONTROL, 0);
+
+ /* Applying a new frequency (small change) can take up to 100us */
+ usleep_range(100, 200);
+
+ return 0;
+}
+
+static int si570_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_si570 *data = to_clk_si570(hw);
+ struct i2c_client *client = data->i2c_client;
+ int err;
+
+ if (rate < SI570_MIN_FREQ || rate > data->max_freq) {
+ dev_err(&client->dev,
+ "requested frequency %lu Hz is out of range\n", rate);
+ return -EINVAL;
+ }
+
+ if (div64_u64(abs(rate - data->frequency) * 10000LL,
+ data->frequency) < 35)
+ err = si570_set_frequency_small(data, rate);
+ else
+ err = si570_set_frequency(data, rate);
+
+ if (err)
+ return err;
+
+ data->frequency = rate;
+
+ return 0;
+}
+
+static const struct clk_ops si570_clk_ops = {
+ .recalc_rate = si570_recalc_rate,
+ .round_rate = si570_round_rate,
+ .set_rate = si570_set_rate,
+};
+
+static bool si570_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SI570_REG_CONTROL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool si570_regmap_is_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SI570_REG_HS_N1 ... (SI570_REG_RFREQ4 + SI570_DIV_OFFSET_7PPM):
+ case SI570_REG_CONTROL:
+ case SI570_REG_FREEZE_DCO:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct regmap_config si570_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = 137,
+ .writeable_reg = si570_regmap_is_writeable,
+ .volatile_reg = si570_regmap_is_volatile,
+};
+
+static int si570_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct clk_si570 *data;
+ struct clk_init_data init;
+ struct clk *clk;
+ u32 initial_fout, factory_fout, stability;
+ int err;
+ enum clk_si570_variant variant = id->driver_data;
+
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ init.ops = &si570_clk_ops;
+ init.flags = CLK_IS_ROOT;
+ init.num_parents = 0;
+ data->hw.init = &init;
+ data->i2c_client = client;
+
+ if (variant == si57x) {
+ err = of_property_read_u32(client->dev.of_node,
+ "temperature-stability", &stability);
+ if (err) {
+ dev_err(&client->dev,
+ "'temperature-stability' property missing\n");
+ return err;
+ }
+ /* adjust register offsets for 7ppm devices */
+ if (stability == 7)
+ data->div_offset = SI570_DIV_OFFSET_7PPM;
+
+ data->max_freq = SI570_MAX_FREQ;
+ } else {
+ data->max_freq = SI598_MAX_FREQ;
+ }
+
+ if (of_property_read_string(client->dev.of_node, "clock-output-names",
+ &init.name))
+ init.name = client->dev.of_node->name;
+
+ err = of_property_read_u32(client->dev.of_node, "factory-fout",
+ &factory_fout);
+ if (err) {
+ dev_err(&client->dev, "'factory-fout' property missing\n");
+ return err;
+ }
+
+ data->regmap = devm_regmap_init_i2c(client, &si570_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(&client->dev, "failed to allocate register map\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ i2c_set_clientdata(client, data);
+ err = si570_get_defaults(data, factory_fout);
+ if (err)
+ return err;
+
+ clk = devm_clk_register(&client->dev, &data->hw);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "clock registration failed\n");
+ return PTR_ERR(clk);
+ }
+ err = of_clk_add_provider(client->dev.of_node, of_clk_src_simple_get,
+ clk);
+ if (err) {
+ dev_err(&client->dev, "unable to add clk provider\n");
+ return err;
+ }
+
+ /* Read the requested initial output frequency from device tree */
+ if (!of_property_read_u32(client->dev.of_node, "clock-frequency",
+ &initial_fout)) {
+ err = clk_set_rate(clk, initial_fout);
+ if (err) {
+ of_clk_del_provider(client->dev.of_node);
+ return err;
+ }
+ }
+
+ /* Display a message indicating that we've successfully registered */
+ dev_info(&client->dev, "registered, current frequency %llu Hz\n",
+ data->frequency);
+
+ return 0;
+}
+
+static int si570_remove(struct i2c_client *client)
+{
+ of_clk_del_provider(client->dev.of_node);
+ return 0;
+}
+
+static const struct i2c_device_id si570_id[] = {
+ { "si570", si57x },
+ { "si571", si57x },
+ { "si598", si59x },
+ { "si599", si59x },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si570_id);
+
+static const struct of_device_id clk_si570_of_match[] = {
+ { .compatible = "silabs,si570" },
+ { .compatible = "silabs,si571" },
+ { .compatible = "silabs,si598" },
+ { .compatible = "silabs,si599" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, clk_si570_of_match);
+
+static struct i2c_driver si570_driver = {
+ .driver = {
+ .name = "si570",
+ .of_match_table = clk_si570_of_match,
+ },
+ .probe = si570_probe,
+ .remove = si570_remove,
+ .id_table = si570_id,
+};
+module_i2c_driver(si570_driver);
+
+MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com");
+MODULE_DESCRIPTION("Si570 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index 7fd5c5e9e25d..37e928846ec5 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -641,7 +641,7 @@ static unsigned long vtwm_pll_recalc_rate(struct clk_hw *hw,
return pll_freq;
}
-const struct clk_ops vtwm_pll_ops = {
+static const struct clk_ops vtwm_pll_ops = {
.round_rate = vtwm_pll_round_rate,
.set_rate = vtwm_pll_set_rate,
.recalc_rate = vtwm_pll_recalc_rate,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 2cf2ea6b77a1..5517944495d8 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -21,6 +21,8 @@
#include <linux/init.h>
#include <linux/sched.h>
+#include "clk.h"
+
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
@@ -92,7 +94,7 @@ static void clk_enable_unlock(unsigned long flags)
/*** debugfs support ***/
-#ifdef CONFIG_COMMON_CLK_DEBUG
+#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
static struct dentry *rootdir;
@@ -104,10 +106,11 @@ static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
if (!c)
return;
- seq_printf(s, "%*s%-*s %-11d %-12d %-10lu",
+ seq_printf(s, "%*s%-*s %-11d %-12d %-10lu %-11lu",
level * 3 + 1, "",
30 - level * 3, c->name,
- c->enable_count, c->prepare_count, clk_get_rate(c));
+ c->enable_count, c->prepare_count, clk_get_rate(c),
+ clk_get_accuracy(c));
seq_printf(s, "\n");
}
@@ -129,8 +132,8 @@ static int clk_summary_show(struct seq_file *s, void *data)
{
struct clk *c;
- seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
- seq_printf(s, "---------------------------------------------------------------------\n");
+ seq_printf(s, " clock enable_cnt prepare_cnt rate accuracy\n");
+ seq_printf(s, "---------------------------------------------------------------------------------\n");
clk_prepare_lock();
@@ -167,6 +170,7 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
seq_printf(s, "\"enable_count\": %d,", c->enable_count);
seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
seq_printf(s, "\"rate\": %lu", clk_get_rate(c));
+ seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c));
}
static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
@@ -248,6 +252,11 @@ static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
if (!d)
goto err_out;
+ d = debugfs_create_u32("clk_accuracy", S_IRUGO, clk->dentry,
+ (u32 *)&clk->accuracy);
+ if (!d)
+ goto err_out;
+
d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
(u32 *)&clk->flags);
if (!d)
@@ -272,7 +281,8 @@ static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
goto out;
err_out:
- debugfs_remove(clk->dentry);
+ debugfs_remove_recursive(clk->dentry);
+ clk->dentry = NULL;
out:
return ret;
}
@@ -342,6 +352,21 @@ out:
return ret;
}
+ /**
+ * clk_debug_unregister - remove a clk node from the debugfs clk tree
+ * @clk: the clk being removed from the debugfs clk tree
+ *
+ * Dynamically removes a clk and all it's children clk nodes from the
+ * debugfs clk tree if clk->dentry points to debugfs created by
+ * clk_debug_register in __clk_init.
+ *
+ * Caller must hold prepare_lock.
+ */
+static void clk_debug_unregister(struct clk *clk)
+{
+ debugfs_remove_recursive(clk->dentry);
+}
+
/**
* clk_debug_reparent - reparent clk node in the debugfs clk tree
* @clk: the clk being reparented
@@ -432,6 +457,9 @@ static inline int clk_debug_register(struct clk *clk) { return 0; }
static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
{
}
+static inline void clk_debug_unregister(struct clk *clk)
+{
+}
#endif
/* caller must hold prepare_lock */
@@ -547,16 +575,19 @@ struct clk_hw *__clk_get_hw(struct clk *clk)
{
return !clk ? NULL : clk->hw;
}
+EXPORT_SYMBOL_GPL(__clk_get_hw);
u8 __clk_get_num_parents(struct clk *clk)
{
return !clk ? 0 : clk->num_parents;
}
+EXPORT_SYMBOL_GPL(__clk_get_num_parents);
struct clk *__clk_get_parent(struct clk *clk)
{
return !clk ? NULL : clk->parent;
}
+EXPORT_SYMBOL_GPL(__clk_get_parent);
struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
{
@@ -570,6 +601,7 @@ struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
else
return clk->parents[index];
}
+EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
unsigned int __clk_get_enable_count(struct clk *clk)
{
@@ -601,6 +633,15 @@ unsigned long __clk_get_rate(struct clk *clk)
out:
return ret;
}
+EXPORT_SYMBOL_GPL(__clk_get_rate);
+
+unsigned long __clk_get_accuracy(struct clk *clk)
+{
+ if (!clk)
+ return 0;
+
+ return clk->accuracy;
+}
unsigned long __clk_get_flags(struct clk *clk)
{
@@ -649,6 +690,7 @@ bool __clk_is_enabled(struct clk *clk)
out:
return !!ret;
}
+EXPORT_SYMBOL_GPL(__clk_is_enabled);
static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
{
@@ -740,6 +782,7 @@ out:
return best;
}
+EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
/*** clk api ***/
@@ -1016,6 +1059,59 @@ static int __clk_notify(struct clk *clk, unsigned long msg,
}
/**
+ * __clk_recalc_accuracies
+ * @clk: first clk in the subtree
+ *
+ * Walks the subtree of clks starting with clk and recalculates accuracies as
+ * it goes. Note that if a clk does not implement the .recalc_accuracy
+ * callback then it is assumed that the clock will take on the accuracy of it's
+ * parent.
+ *
+ * Caller must hold prepare_lock.
+ */
+static void __clk_recalc_accuracies(struct clk *clk)
+{
+ unsigned long parent_accuracy = 0;
+ struct clk *child;
+
+ if (clk->parent)
+ parent_accuracy = clk->parent->accuracy;
+
+ if (clk->ops->recalc_accuracy)
+ clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
+ parent_accuracy);
+ else
+ clk->accuracy = parent_accuracy;
+
+ hlist_for_each_entry(child, &clk->children, child_node)
+ __clk_recalc_accuracies(child);
+}
+
+/**
+ * clk_get_accuracy - return the accuracy of clk
+ * @clk: the clk whose accuracy is being returned
+ *
+ * Simply returns the cached accuracy of the clk, unless
+ * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
+ * issued.
+ * If clk is NULL then returns 0.
+ */
+long clk_get_accuracy(struct clk *clk)
+{
+ unsigned long accuracy;
+
+ clk_prepare_lock();
+ if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
+ __clk_recalc_accuracies(clk);
+
+ accuracy = __clk_get_accuracy(clk);
+ clk_prepare_unlock();
+
+ return accuracy;
+}
+EXPORT_SYMBOL_GPL(clk_get_accuracy);
+
+/**
* __clk_recalc_rates
* @clk: first clk in the subtree
* @msg: notification type (see include/linux/clk.h)
@@ -1129,10 +1225,9 @@ static void clk_reparent(struct clk *clk, struct clk *new_parent)
clk->parent = new_parent;
}
-static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
+static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
{
unsigned long flags;
- int ret = 0;
struct clk *old_parent = clk->parent;
/*
@@ -1163,6 +1258,34 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
clk_reparent(clk, parent);
clk_enable_unlock(flags);
+ return old_parent;
+}
+
+static void __clk_set_parent_after(struct clk *clk, struct clk *parent,
+ struct clk *old_parent)
+{
+ /*
+ * Finish the migration of prepare state and undo the changes done
+ * for preventing a race with clk_enable().
+ */
+ if (clk->prepare_count) {
+ clk_disable(clk);
+ clk_disable(old_parent);
+ __clk_unprepare(old_parent);
+ }
+
+ /* update debugfs with new clk tree topology */
+ clk_debug_reparent(clk, parent);
+}
+
+static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
+{
+ unsigned long flags;
+ int ret = 0;
+ struct clk *old_parent;
+
+ old_parent = __clk_set_parent_before(clk, parent);
+
/* change clock input source */
if (parent && clk->ops->set_parent)
ret = clk->ops->set_parent(clk->hw, p_index);
@@ -1180,18 +1303,8 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
return ret;
}
- /*
- * Finish the migration of prepare state and undo the changes done
- * for preventing a race with clk_enable().
- */
- if (clk->prepare_count) {
- clk_disable(clk);
- clk_disable(old_parent);
- __clk_unprepare(old_parent);
- }
+ __clk_set_parent_after(clk, parent, old_parent);
- /* update debugfs with new clk tree topology */
- clk_debug_reparent(clk, parent);
return 0;
}
@@ -1376,17 +1489,32 @@ static void clk_change_rate(struct clk *clk)
struct clk *child;
unsigned long old_rate;
unsigned long best_parent_rate = 0;
+ bool skip_set_rate = false;
+ struct clk *old_parent;
old_rate = clk->rate;
- /* set parent */
- if (clk->new_parent && clk->new_parent != clk->parent)
- __clk_set_parent(clk, clk->new_parent, clk->new_parent_index);
-
- if (clk->parent)
+ if (clk->new_parent)
+ best_parent_rate = clk->new_parent->rate;
+ else if (clk->parent)
best_parent_rate = clk->parent->rate;
- if (clk->ops->set_rate)
+ if (clk->new_parent && clk->new_parent != clk->parent) {
+ old_parent = __clk_set_parent_before(clk, clk->new_parent);
+
+ if (clk->ops->set_rate_and_parent) {
+ skip_set_rate = true;
+ clk->ops->set_rate_and_parent(clk->hw, clk->new_rate,
+ best_parent_rate,
+ clk->new_parent_index);
+ } else if (clk->ops->set_parent) {
+ clk->ops->set_parent(clk->hw, clk->new_parent_index);
+ }
+
+ __clk_set_parent_after(clk, clk->new_parent, old_parent);
+ }
+
+ if (!skip_set_rate && clk->ops->set_rate)
clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
if (clk->ops->recalc_rate)
@@ -1551,6 +1679,7 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent)
{
clk_reparent(clk, new_parent);
clk_debug_reparent(clk, new_parent);
+ __clk_recalc_accuracies(clk);
__clk_recalc_rates(clk, POST_RATE_CHANGE);
}
@@ -1621,11 +1750,13 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
/* do the re-parent */
ret = __clk_set_parent(clk, parent, p_index);
- /* propagate rate recalculation accordingly */
- if (ret)
+ /* propagate rate an accuracy recalculation accordingly */
+ if (ret) {
__clk_recalc_rates(clk, ABORT_RATE_CHANGE);
- else
+ } else {
__clk_recalc_rates(clk, POST_RATE_CHANGE);
+ __clk_recalc_accuracies(clk);
+ }
out:
clk_prepare_unlock();
@@ -1678,6 +1809,14 @@ int __clk_init(struct device *dev, struct clk *clk)
goto out;
}
+ if (clk->ops->set_rate_and_parent &&
+ !(clk->ops->set_parent && clk->ops->set_rate)) {
+ pr_warn("%s: %s must implement .set_parent & .set_rate\n",
+ __func__, clk->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
/* throw a WARN if any entries in parent_names are NULL */
for (i = 0; i < clk->num_parents; i++)
WARN(!clk->parent_names[i],
@@ -1730,6 +1869,21 @@ int __clk_init(struct device *dev, struct clk *clk)
hlist_add_head(&clk->child_node, &clk_orphan_list);
/*
+ * Set clk's accuracy. The preferred method is to use
+ * .recalc_accuracy. For simple clocks and lazy developers the default
+ * fallback is to use the parent's accuracy. If a clock doesn't have a
+ * parent (or is orphaned) then accuracy is set to zero (perfect
+ * clock).
+ */
+ if (clk->ops->recalc_accuracy)
+ clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
+ __clk_get_accuracy(clk->parent));
+ else if (clk->parent)
+ clk->accuracy = clk->parent->accuracy;
+ else
+ clk->accuracy = 0;
+
+ /*
* Set clk's rate. The preferred method is to use .recalc_rate. For
* simple clocks and lazy developers the default fallback is to use the
* parent's rate. If a clock doesn't have a parent (or is orphaned)
@@ -1743,6 +1897,7 @@ int __clk_init(struct device *dev, struct clk *clk)
else
clk->rate = 0;
+ clk_debug_register(clk);
/*
* walk the list of orphan clocks and reparent any that are children of
* this clock
@@ -1773,8 +1928,7 @@ int __clk_init(struct device *dev, struct clk *clk)
if (clk->ops->init)
clk->ops->init(clk->hw);
- clk_debug_register(clk);
-
+ kref_init(&clk->ref);
out:
clk_prepare_unlock();
@@ -1810,6 +1964,10 @@ struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
clk->flags = hw->init->flags;
clk->parent_names = hw->init->parent_names;
clk->num_parents = hw->init->num_parents;
+ if (dev && dev->driver)
+ clk->owner = dev->driver->owner;
+ else
+ clk->owner = NULL;
ret = __clk_init(dev, clk);
if (ret)
@@ -1830,6 +1988,8 @@ static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
goto fail_name;
}
clk->ops = hw->init->ops;
+ if (dev && dev->driver)
+ clk->owner = dev->driver->owner;
clk->hw = hw;
clk->flags = hw->init->flags;
clk->num_parents = hw->init->num_parents;
@@ -1904,13 +2064,104 @@ fail_out:
}
EXPORT_SYMBOL_GPL(clk_register);
+/*
+ * Free memory allocated for a clock.
+ * Caller must hold prepare_lock.
+ */
+static void __clk_release(struct kref *ref)
+{
+ struct clk *clk = container_of(ref, struct clk, ref);
+ int i = clk->num_parents;
+
+ kfree(clk->parents);
+ while (--i >= 0)
+ kfree(clk->parent_names[i]);
+
+ kfree(clk->parent_names);
+ kfree(clk->name);
+ kfree(clk);
+}
+
+/*
+ * Empty clk_ops for unregistered clocks. These are used temporarily
+ * after clk_unregister() was called on a clock and until last clock
+ * consumer calls clk_put() and the struct clk object is freed.
+ */
+static int clk_nodrv_prepare_enable(struct clk_hw *hw)
+{
+ return -ENXIO;
+}
+
+static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return -ENXIO;
+}
+
+static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
+{
+ return -ENXIO;
+}
+
+static const struct clk_ops clk_nodrv_ops = {
+ .enable = clk_nodrv_prepare_enable,
+ .disable = clk_nodrv_disable_unprepare,
+ .prepare = clk_nodrv_prepare_enable,
+ .unprepare = clk_nodrv_disable_unprepare,
+ .set_rate = clk_nodrv_set_rate,
+ .set_parent = clk_nodrv_set_parent,
+};
+
/**
* clk_unregister - unregister a currently registered clock
* @clk: clock to unregister
- *
- * Currently unimplemented.
*/
-void clk_unregister(struct clk *clk) {}
+void clk_unregister(struct clk *clk)
+{
+ unsigned long flags;
+
+ if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
+ return;
+
+ clk_prepare_lock();
+
+ if (clk->ops == &clk_nodrv_ops) {
+ pr_err("%s: unregistered clock: %s\n", __func__, clk->name);
+ goto out;
+ }
+ /*
+ * Assign empty clock ops for consumers that might still hold
+ * a reference to this clock.
+ */
+ flags = clk_enable_lock();
+ clk->ops = &clk_nodrv_ops;
+ clk_enable_unlock(flags);
+
+ if (!hlist_empty(&clk->children)) {
+ struct clk *child;
+
+ /* Reparent all children to the orphan list. */
+ hlist_for_each_entry(child, &clk->children, child_node)
+ clk_set_parent(child, NULL);
+ }
+
+ clk_debug_unregister(clk);
+
+ hlist_del_init(&clk->child_node);
+
+ if (clk->prepare_count)
+ pr_warn("%s: unregistering prepared clock: %s\n",
+ __func__, clk->name);
+
+ kref_put(&clk->ref, __clk_release);
+out:
+ clk_prepare_unlock();
+}
EXPORT_SYMBOL_GPL(clk_unregister);
static void devm_clk_release(struct device *dev, void *res)
@@ -1970,6 +2221,31 @@ void devm_clk_unregister(struct device *dev, struct clk *clk)
}
EXPORT_SYMBOL_GPL(devm_clk_unregister);
+/*
+ * clkdev helpers
+ */
+int __clk_get(struct clk *clk)
+{
+ if (clk && !try_module_get(clk->owner))
+ return 0;
+
+ kref_get(&clk->ref);
+ return 1;
+}
+
+void __clk_put(struct clk *clk)
+{
+ if (WARN_ON_ONCE(IS_ERR(clk)))
+ return;
+
+ clk_prepare_lock();
+ kref_put(&clk->ref, __clk_release);
+ clk_prepare_unlock();
+
+ if (clk)
+ module_put(clk->owner);
+}
+
/*** clk rate change notifiers ***/
/**
@@ -2104,13 +2380,22 @@ struct of_clk_provider {
void *data;
};
-extern struct of_device_id __clk_of_table[];
-
static const struct of_device_id __clk_of_table_sentinel
__used __section(__clk_of_table_end);
static LIST_HEAD(of_clk_providers);
-static DEFINE_MUTEX(of_clk_lock);
+static DEFINE_MUTEX(of_clk_mutex);
+
+/* of_clk_provider list locking helpers */
+void of_clk_lock(void)
+{
+ mutex_lock(&of_clk_mutex);
+}
+
+void of_clk_unlock(void)
+{
+ mutex_unlock(&of_clk_mutex);
+}
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
void *data)
@@ -2154,9 +2439,9 @@ int of_clk_add_provider(struct device_node *np,
cp->data = data;
cp->get = clk_src_get;
- mutex_lock(&of_clk_lock);
+ mutex_lock(&of_clk_mutex);
list_add(&cp->link, &of_clk_providers);
- mutex_unlock(&of_clk_lock);
+ mutex_unlock(&of_clk_mutex);
pr_debug("Added clock from %s\n", np->full_name);
return 0;
@@ -2171,7 +2456,7 @@ void of_clk_del_provider(struct device_node *np)
{
struct of_clk_provider *cp;
- mutex_lock(&of_clk_lock);
+ mutex_lock(&of_clk_mutex);
list_for_each_entry(cp, &of_clk_providers, link) {
if (cp->node == np) {
list_del(&cp->link);
@@ -2180,24 +2465,33 @@ void of_clk_del_provider(struct device_node *np)
break;
}
}
- mutex_unlock(&of_clk_lock);
+ mutex_unlock(&of_clk_mutex);
}
EXPORT_SYMBOL_GPL(of_clk_del_provider);
-struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
+struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
struct of_clk_provider *provider;
struct clk *clk = ERR_PTR(-ENOENT);
/* Check if we have such a provider in our array */
- mutex_lock(&of_clk_lock);
list_for_each_entry(provider, &of_clk_providers, link) {
if (provider->node == clkspec->np)
clk = provider->get(clkspec, provider->data);
if (!IS_ERR(clk))
break;
}
- mutex_unlock(&of_clk_lock);
+
+ return clk;
+}
+
+struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
+{
+ struct clk *clk;
+
+ mutex_lock(&of_clk_mutex);
+ clk = __of_clk_get_from_provider(clkspec);
+ mutex_unlock(&of_clk_mutex);
return clk;
}
@@ -2245,7 +2539,7 @@ void __init of_clk_init(const struct of_device_id *matches)
struct device_node *np;
if (!matches)
- matches = __clk_of_table;
+ matches = &__clk_of_table;
for_each_matching_node_and_match(np, matches, &match) {
of_clk_init_cb_t clk_init_cb = match->data;
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
new file mode 100644
index 000000000000..795cc9f0dac0
--- /dev/null
+++ b/drivers/clk/clk.h
@@ -0,0 +1,16 @@
+/*
+ * linux/drivers/clk/clk.h
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec);
+void of_clk_lock(void);
+void of_clk_unlock(void);
+#endif
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 442a31363873..48f67218247c 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -21,6 +21,8 @@
#include <linux/clkdev.h>
#include <linux/of.h>
+#include "clk.h"
+
static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
@@ -39,7 +41,13 @@ struct clk *of_clk_get(struct device_node *np, int index)
if (rc)
return ERR_PTR(rc);
- clk = of_clk_get_from_provider(&clkspec);
+ of_clk_lock();
+ clk = __of_clk_get_from_provider(&clkspec);
+
+ if (!IS_ERR(clk) && !__clk_get(clk))
+ clk = ERR_PTR(-ENOENT);
+
+ of_clk_unlock();
of_node_put(clkspec.np);
return clk;
}
@@ -157,7 +165,7 @@ struct clk *clk_get(struct device *dev, const char *con_id)
if (dev) {
clk = of_clk_get_by_name(dev->of_node, con_id);
- if (!IS_ERR(clk) && __clk_get(clk))
+ if (!IS_ERR(clk))
return clk;
}
diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile
new file mode 100644
index 000000000000..a049108341fc
--- /dev/null
+++ b/drivers/clk/hisilicon/Makefile
@@ -0,0 +1,5 @@
+#
+# Hisilicon Clock specific Makefile
+#
+
+obj-y += clk.o clkgate-separated.o clk-hi3620.o
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
new file mode 100644
index 000000000000..f24ad6a3a797
--- /dev/null
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -0,0 +1,242 @@
+/*
+ * Hisilicon Hi3620 clock driver
+ *
+ * Copyright (c) 2012-2013 Hisilicon Limited.
+ * Copyright (c) 2012-2013 Linaro Limited.
+ *
+ * Author: Haojian Zhuang <haojian.zhuang@linaro.org>
+ * Xin Li <li.xin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#include <dt-bindings/clock/hi3620-clock.h>
+
+#include "clk.h"
+
+/* clock parent list */
+static const char *timer0_mux_p[] __initdata = { "osc32k", "timerclk01", };
+static const char *timer1_mux_p[] __initdata = { "osc32k", "timerclk01", };
+static const char *timer2_mux_p[] __initdata = { "osc32k", "timerclk23", };
+static const char *timer3_mux_p[] __initdata = { "osc32k", "timerclk23", };
+static const char *timer4_mux_p[] __initdata = { "osc32k", "timerclk45", };
+static const char *timer5_mux_p[] __initdata = { "osc32k", "timerclk45", };
+static const char *timer6_mux_p[] __initdata = { "osc32k", "timerclk67", };
+static const char *timer7_mux_p[] __initdata = { "osc32k", "timerclk67", };
+static const char *timer8_mux_p[] __initdata = { "osc32k", "timerclk89", };
+static const char *timer9_mux_p[] __initdata = { "osc32k", "timerclk89", };
+static const char *uart0_mux_p[] __initdata = { "osc26m", "pclk", };
+static const char *uart1_mux_p[] __initdata = { "osc26m", "pclk", };
+static const char *uart2_mux_p[] __initdata = { "osc26m", "pclk", };
+static const char *uart3_mux_p[] __initdata = { "osc26m", "pclk", };
+static const char *uart4_mux_p[] __initdata = { "osc26m", "pclk", };
+static const char *spi0_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", };
+static const char *spi1_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", };
+static const char *spi2_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", };
+/* share axi parent */
+static const char *saxi_mux_p[] __initdata = { "armpll3", "armpll2", };
+static const char *pwm0_mux_p[] __initdata = { "osc32k", "osc26m", };
+static const char *pwm1_mux_p[] __initdata = { "osc32k", "osc26m", };
+static const char *sd_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *mmc1_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *mmc1_mux2_p[] __initdata = { "osc26m", "mmc1_div", };
+static const char *g2d_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *venc_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *vdec_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *vpp_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *edc0_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *ldi0_mux_p[] __initdata = { "armpll2", "armpll4",
+ "armpll3", "armpll5", };
+static const char *edc1_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *ldi1_mux_p[] __initdata = { "armpll2", "armpll4",
+ "armpll3", "armpll5", };
+static const char *rclk_hsic_p[] __initdata = { "armpll3", "armpll2", };
+static const char *mmc2_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *mmc3_mux_p[] __initdata = { "armpll2", "armpll3", };
+
+
+/* fixed rate clocks */
+static struct hisi_fixed_rate_clock hi3620_fixed_rate_clks[] __initdata = {
+ { HI3620_OSC32K, "osc32k", NULL, CLK_IS_ROOT, 32768, },
+ { HI3620_OSC26M, "osc26m", NULL, CLK_IS_ROOT, 26000000, },
+ { HI3620_PCLK, "pclk", NULL, CLK_IS_ROOT, 26000000, },
+ { HI3620_PLL_ARM0, "armpll0", NULL, CLK_IS_ROOT, 1600000000, },
+ { HI3620_PLL_ARM1, "armpll1", NULL, CLK_IS_ROOT, 1600000000, },
+ { HI3620_PLL_PERI, "armpll2", NULL, CLK_IS_ROOT, 1440000000, },
+ { HI3620_PLL_USB, "armpll3", NULL, CLK_IS_ROOT, 1440000000, },
+ { HI3620_PLL_HDMI, "armpll4", NULL, CLK_IS_ROOT, 1188000000, },
+ { HI3620_PLL_GPU, "armpll5", NULL, CLK_IS_ROOT, 1300000000, },
+};
+
+/* fixed factor clocks */
+static struct hisi_fixed_factor_clock hi3620_fixed_factor_clks[] __initdata = {
+ { HI3620_RCLK_TCXO, "rclk_tcxo", "osc26m", 1, 4, 0, },
+ { HI3620_RCLK_CFGAXI, "rclk_cfgaxi", "armpll2", 1, 30, 0, },
+ { HI3620_RCLK_PICO, "rclk_pico", "hsic_div", 1, 40, 0, },
+};
+
+static struct hisi_mux_clock hi3620_mux_clks[] __initdata = {
+ { HI3620_TIMER0_MUX, "timer0_mux", timer0_mux_p, ARRAY_SIZE(timer0_mux_p), CLK_SET_RATE_PARENT, 0, 15, 2, 0, },
+ { HI3620_TIMER1_MUX, "timer1_mux", timer1_mux_p, ARRAY_SIZE(timer1_mux_p), CLK_SET_RATE_PARENT, 0, 17, 2, 0, },
+ { HI3620_TIMER2_MUX, "timer2_mux", timer2_mux_p, ARRAY_SIZE(timer2_mux_p), CLK_SET_RATE_PARENT, 0, 19, 2, 0, },
+ { HI3620_TIMER3_MUX, "timer3_mux", timer3_mux_p, ARRAY_SIZE(timer3_mux_p), CLK_SET_RATE_PARENT, 0, 21, 2, 0, },
+ { HI3620_TIMER4_MUX, "timer4_mux", timer4_mux_p, ARRAY_SIZE(timer4_mux_p), CLK_SET_RATE_PARENT, 0x18, 0, 2, 0, },
+ { HI3620_TIMER5_MUX, "timer5_mux", timer5_mux_p, ARRAY_SIZE(timer5_mux_p), CLK_SET_RATE_PARENT, 0x18, 2, 2, 0, },
+ { HI3620_TIMER6_MUX, "timer6_mux", timer6_mux_p, ARRAY_SIZE(timer6_mux_p), CLK_SET_RATE_PARENT, 0x18, 4, 2, 0, },
+ { HI3620_TIMER7_MUX, "timer7_mux", timer7_mux_p, ARRAY_SIZE(timer7_mux_p), CLK_SET_RATE_PARENT, 0x18, 6, 2, 0, },
+ { HI3620_TIMER8_MUX, "timer8_mux", timer8_mux_p, ARRAY_SIZE(timer8_mux_p), CLK_SET_RATE_PARENT, 0x18, 8, 2, 0, },
+ { HI3620_TIMER9_MUX, "timer9_mux", timer9_mux_p, ARRAY_SIZE(timer9_mux_p), CLK_SET_RATE_PARENT, 0x18, 10, 2, 0, },
+ { HI3620_UART0_MUX, "uart0_mux", uart0_mux_p, ARRAY_SIZE(uart0_mux_p), CLK_SET_RATE_PARENT, 0x100, 7, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_UART1_MUX, "uart1_mux", uart1_mux_p, ARRAY_SIZE(uart1_mux_p), CLK_SET_RATE_PARENT, 0x100, 8, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_UART2_MUX, "uart2_mux", uart2_mux_p, ARRAY_SIZE(uart2_mux_p), CLK_SET_RATE_PARENT, 0x100, 9, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_UART3_MUX, "uart3_mux", uart3_mux_p, ARRAY_SIZE(uart3_mux_p), CLK_SET_RATE_PARENT, 0x100, 10, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_UART4_MUX, "uart4_mux", uart4_mux_p, ARRAY_SIZE(uart4_mux_p), CLK_SET_RATE_PARENT, 0x100, 11, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_SPI0_MUX, "spi0_mux", spi0_mux_p, ARRAY_SIZE(spi0_mux_p), CLK_SET_RATE_PARENT, 0x100, 12, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_SPI1_MUX, "spi1_mux", spi1_mux_p, ARRAY_SIZE(spi1_mux_p), CLK_SET_RATE_PARENT, 0x100, 13, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_SPI2_MUX, "spi2_mux", spi2_mux_p, ARRAY_SIZE(spi2_mux_p), CLK_SET_RATE_PARENT, 0x100, 14, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_SAXI_MUX, "saxi_mux", saxi_mux_p, ARRAY_SIZE(saxi_mux_p), CLK_SET_RATE_PARENT, 0x100, 15, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_PWM0_MUX, "pwm0_mux", pwm0_mux_p, ARRAY_SIZE(pwm0_mux_p), CLK_SET_RATE_PARENT, 0x104, 10, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_PWM1_MUX, "pwm1_mux", pwm1_mux_p, ARRAY_SIZE(pwm1_mux_p), CLK_SET_RATE_PARENT, 0x104, 11, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_SD_MUX, "sd_mux", sd_mux_p, ARRAY_SIZE(sd_mux_p), CLK_SET_RATE_PARENT, 0x108, 4, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_MMC1_MUX, "mmc1_mux", mmc1_mux_p, ARRAY_SIZE(mmc1_mux_p), CLK_SET_RATE_PARENT, 0x108, 9, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_MMC1_MUX2, "mmc1_mux2", mmc1_mux2_p, ARRAY_SIZE(mmc1_mux2_p), CLK_SET_RATE_PARENT, 0x108, 10, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_G2D_MUX, "g2d_mux", g2d_mux_p, ARRAY_SIZE(g2d_mux_p), CLK_SET_RATE_PARENT, 0x10c, 5, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_VENC_MUX, "venc_mux", venc_mux_p, ARRAY_SIZE(venc_mux_p), CLK_SET_RATE_PARENT, 0x10c, 11, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_VDEC_MUX, "vdec_mux", vdec_mux_p, ARRAY_SIZE(vdec_mux_p), CLK_SET_RATE_PARENT, 0x110, 5, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_VPP_MUX, "vpp_mux", vpp_mux_p, ARRAY_SIZE(vpp_mux_p), CLK_SET_RATE_PARENT, 0x110, 11, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_EDC0_MUX, "edc0_mux", edc0_mux_p, ARRAY_SIZE(edc0_mux_p), CLK_SET_RATE_PARENT, 0x114, 6, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_LDI0_MUX, "ldi0_mux", ldi0_mux_p, ARRAY_SIZE(ldi0_mux_p), CLK_SET_RATE_PARENT, 0x114, 13, 2, CLK_MUX_HIWORD_MASK, },
+ { HI3620_EDC1_MUX, "edc1_mux", edc1_mux_p, ARRAY_SIZE(edc1_mux_p), CLK_SET_RATE_PARENT, 0x118, 6, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_LDI1_MUX, "ldi1_mux", ldi1_mux_p, ARRAY_SIZE(ldi1_mux_p), CLK_SET_RATE_PARENT, 0x118, 14, 2, CLK_MUX_HIWORD_MASK, },
+ { HI3620_RCLK_HSIC, "rclk_hsic", rclk_hsic_p, ARRAY_SIZE(rclk_hsic_p), CLK_SET_RATE_PARENT, 0x130, 2, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_MMC2_MUX, "mmc2_mux", mmc2_mux_p, ARRAY_SIZE(mmc2_mux_p), CLK_SET_RATE_PARENT, 0x140, 4, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_MMC3_MUX, "mmc3_mux", mmc3_mux_p, ARRAY_SIZE(mmc3_mux_p), CLK_SET_RATE_PARENT, 0x140, 9, 1, CLK_MUX_HIWORD_MASK, },
+};
+
+static struct hisi_divider_clock hi3620_div_clks[] __initdata = {
+ { HI3620_SHAREAXI_DIV, "saxi_div", "saxi_mux", 0, 0x100, 0, 5, CLK_DIVIDER_HIWORD_MASK, NULL, },
+ { HI3620_CFGAXI_DIV, "cfgaxi_div", "saxi_div", 0, 0x100, 5, 2, CLK_DIVIDER_HIWORD_MASK, NULL, },
+ { HI3620_SD_DIV, "sd_div", "sd_mux", 0, 0x108, 0, 4, CLK_DIVIDER_HIWORD_MASK, NULL, },
+ { HI3620_MMC1_DIV, "mmc1_div", "mmc1_mux", 0, 0x108, 5, 4, CLK_DIVIDER_HIWORD_MASK, NULL, },
+ { HI3620_HSIC_DIV, "hsic_div", "rclk_hsic", 0, 0x130, 0, 2, CLK_DIVIDER_HIWORD_MASK, NULL, },
+ { HI3620_MMC2_DIV, "mmc2_div", "mmc2_mux", 0, 0x140, 0, 4, CLK_DIVIDER_HIWORD_MASK, NULL, },
+ { HI3620_MMC3_DIV, "mmc3_div", "mmc3_mux", 0, 0x140, 5, 4, CLK_DIVIDER_HIWORD_MASK, NULL, },
+};
+
+static struct hisi_gate_clock hi3620_seperated_gate_clks[] __initdata = {
+ { HI3620_TIMERCLK01, "timerclk01", "timer_rclk01", CLK_SET_RATE_PARENT, 0x20, 0, 0, },
+ { HI3620_TIMER_RCLK01, "timer_rclk01", "rclk_tcxo", CLK_SET_RATE_PARENT, 0x20, 1, 0, },
+ { HI3620_TIMERCLK23, "timerclk23", "timer_rclk23", CLK_SET_RATE_PARENT, 0x20, 2, 0, },
+ { HI3620_TIMER_RCLK23, "timer_rclk23", "rclk_tcxo", CLK_SET_RATE_PARENT, 0x20, 3, 0, },
+ { HI3620_RTCCLK, "rtcclk", "pclk", CLK_SET_RATE_PARENT, 0x20, 5, 0, },
+ { HI3620_KPC_CLK, "kpc_clk", "pclk", CLK_SET_RATE_PARENT, 0x20, 6, 0, },
+ { HI3620_GPIOCLK0, "gpioclk0", "pclk", CLK_SET_RATE_PARENT, 0x20, 8, 0, },
+ { HI3620_GPIOCLK1, "gpioclk1", "pclk", CLK_SET_RATE_PARENT, 0x20, 9, 0, },
+ { HI3620_GPIOCLK2, "gpioclk2", "pclk", CLK_SET_RATE_PARENT, 0x20, 10, 0, },
+ { HI3620_GPIOCLK3, "gpioclk3", "pclk", CLK_SET_RATE_PARENT, 0x20, 11, 0, },
+ { HI3620_GPIOCLK4, "gpioclk4", "pclk", CLK_SET_RATE_PARENT, 0x20, 12, 0, },
+ { HI3620_GPIOCLK5, "gpioclk5", "pclk", CLK_SET_RATE_PARENT, 0x20, 13, 0, },
+ { HI3620_GPIOCLK6, "gpioclk6", "pclk", CLK_SET_RATE_PARENT, 0x20, 14, 0, },
+ { HI3620_GPIOCLK7, "gpioclk7", "pclk", CLK_SET_RATE_PARENT, 0x20, 15, 0, },
+ { HI3620_GPIOCLK8, "gpioclk8", "pclk", CLK_SET_RATE_PARENT, 0x20, 16, 0, },
+ { HI3620_GPIOCLK9, "gpioclk9", "pclk", CLK_SET_RATE_PARENT, 0x20, 17, 0, },
+ { HI3620_GPIOCLK10, "gpioclk10", "pclk", CLK_SET_RATE_PARENT, 0x20, 18, 0, },
+ { HI3620_GPIOCLK11, "gpioclk11", "pclk", CLK_SET_RATE_PARENT, 0x20, 19, 0, },
+ { HI3620_GPIOCLK12, "gpioclk12", "pclk", CLK_SET_RATE_PARENT, 0x20, 20, 0, },
+ { HI3620_GPIOCLK13, "gpioclk13", "pclk", CLK_SET_RATE_PARENT, 0x20, 21, 0, },
+ { HI3620_GPIOCLK14, "gpioclk14", "pclk", CLK_SET_RATE_PARENT, 0x20, 22, 0, },
+ { HI3620_GPIOCLK15, "gpioclk15", "pclk", CLK_SET_RATE_PARENT, 0x20, 23, 0, },
+ { HI3620_GPIOCLK16, "gpioclk16", "pclk", CLK_SET_RATE_PARENT, 0x20, 24, 0, },
+ { HI3620_GPIOCLK17, "gpioclk17", "pclk", CLK_SET_RATE_PARENT, 0x20, 25, 0, },
+ { HI3620_GPIOCLK18, "gpioclk18", "pclk", CLK_SET_RATE_PARENT, 0x20, 26, 0, },
+ { HI3620_GPIOCLK19, "gpioclk19", "pclk", CLK_SET_RATE_PARENT, 0x20, 27, 0, },
+ { HI3620_GPIOCLK20, "gpioclk20", "pclk", CLK_SET_RATE_PARENT, 0x20, 28, 0, },
+ { HI3620_GPIOCLK21, "gpioclk21", "pclk", CLK_SET_RATE_PARENT, 0x20, 29, 0, },
+ { HI3620_DPHY0_CLK, "dphy0_clk", "osc26m", CLK_SET_RATE_PARENT, 0x30, 15, 0, },
+ { HI3620_DPHY1_CLK, "dphy1_clk", "osc26m", CLK_SET_RATE_PARENT, 0x30, 16, 0, },
+ { HI3620_DPHY2_CLK, "dphy2_clk", "osc26m", CLK_SET_RATE_PARENT, 0x30, 17, 0, },
+ { HI3620_USBPHY_CLK, "usbphy_clk", "rclk_pico", CLK_SET_RATE_PARENT, 0x30, 24, 0, },
+ { HI3620_ACP_CLK, "acp_clk", "rclk_cfgaxi", CLK_SET_RATE_PARENT, 0x30, 28, 0, },
+ { HI3620_TIMERCLK45, "timerclk45", "rclk_tcxo", CLK_SET_RATE_PARENT, 0x40, 3, 0, },
+ { HI3620_TIMERCLK67, "timerclk67", "rclk_tcxo", CLK_SET_RATE_PARENT, 0x40, 4, 0, },
+ { HI3620_TIMERCLK89, "timerclk89", "rclk_tcxo", CLK_SET_RATE_PARENT, 0x40, 5, 0, },
+ { HI3620_PWMCLK0, "pwmclk0", "pwm0_mux", CLK_SET_RATE_PARENT, 0x40, 7, 0, },
+ { HI3620_PWMCLK1, "pwmclk1", "pwm1_mux", CLK_SET_RATE_PARENT, 0x40, 8, 0, },
+ { HI3620_UARTCLK0, "uartclk0", "uart0_mux", CLK_SET_RATE_PARENT, 0x40, 16, 0, },
+ { HI3620_UARTCLK1, "uartclk1", "uart1_mux", CLK_SET_RATE_PARENT, 0x40, 17, 0, },
+ { HI3620_UARTCLK2, "uartclk2", "uart2_mux", CLK_SET_RATE_PARENT, 0x40, 18, 0, },
+ { HI3620_UARTCLK3, "uartclk3", "uart3_mux", CLK_SET_RATE_PARENT, 0x40, 19, 0, },
+ { HI3620_UARTCLK4, "uartclk4", "uart4_mux", CLK_SET_RATE_PARENT, 0x40, 20, 0, },
+ { HI3620_SPICLK0, "spiclk0", "spi0_mux", CLK_SET_RATE_PARENT, 0x40, 21, 0, },
+ { HI3620_SPICLK1, "spiclk1", "spi1_mux", CLK_SET_RATE_PARENT, 0x40, 22, 0, },
+ { HI3620_SPICLK2, "spiclk2", "spi2_mux", CLK_SET_RATE_PARENT, 0x40, 23, 0, },
+ { HI3620_I2CCLK0, "i2cclk0", "pclk", CLK_SET_RATE_PARENT, 0x40, 24, 0, },
+ { HI3620_I2CCLK1, "i2cclk1", "pclk", CLK_SET_RATE_PARENT, 0x40, 25, 0, },
+ { HI3620_SCI_CLK, "sci_clk", "osc26m", CLK_SET_RATE_PARENT, 0x40, 26, 0, },
+ { HI3620_I2CCLK2, "i2cclk2", "pclk", CLK_SET_RATE_PARENT, 0x40, 28, 0, },
+ { HI3620_I2CCLK3, "i2cclk3", "pclk", CLK_SET_RATE_PARENT, 0x40, 29, 0, },
+ { HI3620_DDRC_PER_CLK, "ddrc_per_clk", "rclk_cfgaxi", CLK_SET_RATE_PARENT, 0x50, 9, 0, },
+ { HI3620_DMAC_CLK, "dmac_clk", "rclk_cfgaxi", CLK_SET_RATE_PARENT, 0x50, 10, 0, },
+ { HI3620_USB2DVC_CLK, "usb2dvc_clk", "rclk_cfgaxi", CLK_SET_RATE_PARENT, 0x50, 17, 0, },
+ { HI3620_SD_CLK, "sd_clk", "sd_div", CLK_SET_RATE_PARENT, 0x50, 20, 0, },
+ { HI3620_MMC_CLK1, "mmc_clk1", "mmc1_mux2", CLK_SET_RATE_PARENT, 0x50, 21, 0, },
+ { HI3620_MMC_CLK2, "mmc_clk2", "mmc2_div", CLK_SET_RATE_PARENT, 0x50, 22, 0, },
+ { HI3620_MMC_CLK3, "mmc_clk3", "mmc3_div", CLK_SET_RATE_PARENT, 0x50, 23, 0, },
+ { HI3620_MCU_CLK, "mcu_clk", "acp_clk", CLK_SET_RATE_PARENT, 0x50, 24, 0, },
+};
+
+static void __init hi3620_clk_init(struct device_node *np)
+{
+ void __iomem *base;
+
+ if (np) {
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("failed to map Hi3620 clock registers\n");
+ return;
+ }
+ } else {
+ pr_err("failed to find Hi3620 clock node in DTS\n");
+ return;
+ }
+
+ hisi_clk_init(np, HI3620_NR_CLKS);
+
+ hisi_clk_register_fixed_rate(hi3620_fixed_rate_clks,
+ ARRAY_SIZE(hi3620_fixed_rate_clks),
+ base);
+ hisi_clk_register_fixed_factor(hi3620_fixed_factor_clks,
+ ARRAY_SIZE(hi3620_fixed_factor_clks),
+ base);
+ hisi_clk_register_mux(hi3620_mux_clks, ARRAY_SIZE(hi3620_mux_clks),
+ base);
+ hisi_clk_register_divider(hi3620_div_clks, ARRAY_SIZE(hi3620_div_clks),
+ base);
+ hisi_clk_register_gate_sep(hi3620_seperated_gate_clks,
+ ARRAY_SIZE(hi3620_seperated_gate_clks),
+ base);
+}
+CLK_OF_DECLARE(hi3620_clk, "hisilicon,hi3620-clock", hi3620_clk_init);
diff --git a/drivers/clk/hisilicon/clk.c b/drivers/clk/hisilicon/clk.c
new file mode 100644
index 000000000000..a3a7152c92d9
--- /dev/null
+++ b/drivers/clk/hisilicon/clk.c
@@ -0,0 +1,171 @@
+/*
+ * Hisilicon clock driver
+ *
+ * Copyright (c) 2012-2013 Hisilicon Limited.
+ * Copyright (c) 2012-2013 Linaro Limited.
+ *
+ * Author: Haojian Zhuang <haojian.zhuang@linaro.org>
+ * Xin Li <li.xin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#include "clk.h"
+
+static DEFINE_SPINLOCK(hisi_clk_lock);
+static struct clk **clk_table;
+static struct clk_onecell_data clk_data;
+
+void __init hisi_clk_init(struct device_node *np, int nr_clks)
+{
+ clk_table = kzalloc(sizeof(struct clk *) * nr_clks, GFP_KERNEL);
+ if (!clk_table) {
+ pr_err("%s: could not allocate clock lookup table\n", __func__);
+ return;
+ }
+ clk_data.clks = clk_table;
+ clk_data.clk_num = nr_clks;
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+}
+
+void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *clks,
+ int nums, void __iomem *base)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = clk_register_fixed_rate(NULL, clks[i].name,
+ clks[i].parent_name,
+ clks[i].flags,
+ clks[i].fixed_rate);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ }
+}
+
+void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *clks,
+ int nums, void __iomem *base)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = clk_register_fixed_factor(NULL, clks[i].name,
+ clks[i].parent_name,
+ clks[i].flags, clks[i].mult,
+ clks[i].div);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ }
+}
+
+void __init hisi_clk_register_mux(struct hisi_mux_clock *clks,
+ int nums, void __iomem *base)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = clk_register_mux(NULL, clks[i].name, clks[i].parent_names,
+ clks[i].num_parents, clks[i].flags,
+ base + clks[i].offset, clks[i].shift,
+ clks[i].width, clks[i].mux_flags,
+ &hisi_clk_lock);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+
+ if (clks[i].alias)
+ clk_register_clkdev(clk, clks[i].alias, NULL);
+
+ clk_table[clks[i].id] = clk;
+ }
+}
+
+void __init hisi_clk_register_divider(struct hisi_divider_clock *clks,
+ int nums, void __iomem *base)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = clk_register_divider_table(NULL, clks[i].name,
+ clks[i].parent_name,
+ clks[i].flags,
+ base + clks[i].offset,
+ clks[i].shift, clks[i].width,
+ clks[i].div_flags,
+ clks[i].table,
+ &hisi_clk_lock);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+
+ if (clks[i].alias)
+ clk_register_clkdev(clk, clks[i].alias, NULL);
+
+ clk_table[clks[i].id] = clk;
+ }
+}
+
+void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *clks,
+ int nums, void __iomem *base)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = hisi_register_clkgate_sep(NULL, clks[i].name,
+ clks[i].parent_name,
+ clks[i].flags,
+ base + clks[i].offset,
+ clks[i].bit_idx,
+ clks[i].gate_flags,
+ &hisi_clk_lock);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+
+ if (clks[i].alias)
+ clk_register_clkdev(clk, clks[i].alias, NULL);
+
+ clk_table[clks[i].id] = clk;
+ }
+}
diff --git a/drivers/clk/hisilicon/clk.h b/drivers/clk/hisilicon/clk.h
new file mode 100644
index 000000000000..4a6beebefb7a
--- /dev/null
+++ b/drivers/clk/hisilicon/clk.h
@@ -0,0 +1,103 @@
+/*
+ * Hisilicon Hi3620 clock gate driver
+ *
+ * Copyright (c) 2012-2013 Hisilicon Limited.
+ * Copyright (c) 2012-2013 Linaro Limited.
+ *
+ * Author: Haojian Zhuang <haojian.zhuang@linaro.org>
+ * Xin Li <li.xin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __HISI_CLK_H
+#define __HISI_CLK_H
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+struct hisi_fixed_rate_clock {
+ unsigned int id;
+ char *name;
+ const char *parent_name;
+ unsigned long flags;
+ unsigned long fixed_rate;
+};
+
+struct hisi_fixed_factor_clock {
+ unsigned int id;
+ char *name;
+ const char *parent_name;
+ unsigned long mult;
+ unsigned long div;
+ unsigned long flags;
+};
+
+struct hisi_mux_clock {
+ unsigned int id;
+ const char *name;
+ const char **parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long offset;
+ u8 shift;
+ u8 width;
+ u8 mux_flags;
+ const char *alias;
+};
+
+struct hisi_divider_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ unsigned long flags;
+ unsigned long offset;
+ u8 shift;
+ u8 width;
+ u8 div_flags;
+ struct clk_div_table *table;
+ const char *alias;
+};
+
+struct hisi_gate_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ unsigned long flags;
+ unsigned long offset;
+ u8 bit_idx;
+ u8 gate_flags;
+ const char *alias;
+};
+
+struct clk *hisi_register_clkgate_sep(struct device *, const char *,
+ const char *, unsigned long,
+ void __iomem *, u8,
+ u8, spinlock_t *);
+
+void __init hisi_clk_init(struct device_node *, int);
+void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *,
+ int, void __iomem *);
+void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *,
+ int, void __iomem *);
+void __init hisi_clk_register_mux(struct hisi_mux_clock *, int,
+ void __iomem *);
+void __init hisi_clk_register_divider(struct hisi_divider_clock *,
+ int, void __iomem *);
+void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *,
+ int, void __iomem *);
+#endif /* __HISI_CLK_H */
diff --git a/drivers/clk/hisilicon/clkgate-separated.c b/drivers/clk/hisilicon/clkgate-separated.c
new file mode 100644
index 000000000000..b03d5a7246f9
--- /dev/null
+++ b/drivers/clk/hisilicon/clkgate-separated.c
@@ -0,0 +1,130 @@
+/*
+ * Hisilicon clock separated gate driver
+ *
+ * Copyright (c) 2012-2013 Hisilicon Limited.
+ * Copyright (c) 2012-2013 Linaro Limited.
+ *
+ * Author: Haojian Zhuang <haojian.zhuang@linaro.org>
+ * Xin Li <li.xin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#include "clk.h"
+
+/* clock separated gate register offset */
+#define CLKGATE_SEPERATED_ENABLE 0x0
+#define CLKGATE_SEPERATED_DISABLE 0x4
+#define CLKGATE_SEPERATED_STATUS 0x8
+
+struct clkgate_separated {
+ struct clk_hw hw;
+ void __iomem *enable; /* enable register */
+ u8 bit_idx; /* bits in enable/disable register */
+ u8 flags;
+ spinlock_t *lock;
+};
+
+static int clkgate_separated_enable(struct clk_hw *hw)
+{
+ struct clkgate_separated *sclk;
+ unsigned long flags = 0;
+ u32 reg;
+
+ sclk = container_of(hw, struct clkgate_separated, hw);
+ if (sclk->lock)
+ spin_lock_irqsave(sclk->lock, flags);
+ reg = BIT(sclk->bit_idx);
+ writel_relaxed(reg, sclk->enable);
+ readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS);
+ if (sclk->lock)
+ spin_unlock_irqrestore(sclk->lock, flags);
+ return 0;
+}
+
+static void clkgate_separated_disable(struct clk_hw *hw)
+{
+ struct clkgate_separated *sclk;
+ unsigned long flags = 0;
+ u32 reg;
+
+ sclk = container_of(hw, struct clkgate_separated, hw);
+ if (sclk->lock)
+ spin_lock_irqsave(sclk->lock, flags);
+ reg = BIT(sclk->bit_idx);
+ writel_relaxed(reg, sclk->enable + CLKGATE_SEPERATED_DISABLE);
+ readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS);
+ if (sclk->lock)
+ spin_unlock_irqrestore(sclk->lock, flags);
+}
+
+static int clkgate_separated_is_enabled(struct clk_hw *hw)
+{
+ struct clkgate_separated *sclk;
+ u32 reg;
+
+ sclk = container_of(hw, struct clkgate_separated, hw);
+ reg = readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS);
+ reg &= BIT(sclk->bit_idx);
+
+ return reg ? 1 : 0;
+}
+
+static struct clk_ops clkgate_separated_ops = {
+ .enable = clkgate_separated_enable,
+ .disable = clkgate_separated_disable,
+ .is_enabled = clkgate_separated_is_enabled,
+};
+
+struct clk *hisi_register_clkgate_sep(struct device *dev, const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate_flags, spinlock_t *lock)
+{
+ struct clkgate_separated *sclk;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ sclk = kzalloc(sizeof(*sclk), GFP_KERNEL);
+ if (!sclk) {
+ pr_err("%s: fail to allocate separated gated clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &clkgate_separated_ops;
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ sclk->enable = reg + CLKGATE_SEPERATED_ENABLE;
+ sclk->bit_idx = bit_idx;
+ sclk->flags = clk_gate_flags;
+ sclk->hw.init = &init;
+
+ clk = clk_register(dev, &sclk->hw);
+ if (IS_ERR(clk))
+ kfree(sclk);
+ return clk;
+}
diff --git a/drivers/clk/keystone/gate.c b/drivers/clk/keystone/gate.c
index 1f333bcfc22e..17a598398a53 100644
--- a/drivers/clk/keystone/gate.c
+++ b/drivers/clk/keystone/gate.c
@@ -223,8 +223,7 @@ static void __init of_psc_clk_init(struct device_node *node, spinlock_t *lock)
data->domain_base = of_iomap(node, i);
if (!data->domain_base) {
pr_err("%s: domain ioremap failed\n", __func__);
- iounmap(data->control_base);
- goto out;
+ goto unmap_ctrl;
}
of_property_read_u32(node, "domain-id", &data->domain_id);
@@ -237,16 +236,21 @@ static void __init of_psc_clk_init(struct device_node *node, spinlock_t *lock)
parent_name = of_clk_get_parent_name(node, 0);
if (!parent_name) {
pr_err("%s: Parent clock not found\n", __func__);
- goto out;
+ goto unmap_domain;
}
clk = clk_register_psc(NULL, clk_name, parent_name, data, lock);
- if (clk) {
+ if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
return;
}
pr_err("%s: error registering clk %s\n", __func__, node->name);
+
+unmap_domain:
+ iounmap(data->domain_base);
+unmap_ctrl:
+ iounmap(data->control_base);
out:
kfree(data);
return;
diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
index 47a1bd9f1726..0dd8a4b12747 100644
--- a/drivers/clk/keystone/pll.c
+++ b/drivers/clk/keystone/pll.c
@@ -24,6 +24,8 @@
#define MAIN_PLLM_HIGH_MASK 0x7f000
#define PLLM_HIGH_SHIFT 6
#define PLLD_MASK 0x3f
+#define CLKOD_MASK 0x780000
+#define CLKOD_SHIFT 19
/**
* struct clk_pll_data - pll data structure
@@ -41,7 +43,10 @@
* @pllm_upper_mask: multiplier upper mask
* @pllm_upper_shift: multiplier upper shift
* @plld_mask: divider mask
- * @postdiv: Post divider
+ * @clkod_mask: output divider mask
+ * @clkod_shift: output divider shift
+ * @plld_mask: divider mask
+ * @postdiv: Fixed post divider
*/
struct clk_pll_data {
bool has_pllctrl;
@@ -53,6 +58,8 @@ struct clk_pll_data {
u32 pllm_upper_mask;
u32 pllm_upper_shift;
u32 plld_mask;
+ u32 clkod_mask;
+ u32 clkod_shift;
u32 postdiv;
};
@@ -90,7 +97,13 @@ static unsigned long clk_pllclk_recalc(struct clk_hw *hw,
mult |= ((val & pll_data->pllm_upper_mask)
>> pll_data->pllm_upper_shift);
prediv = (val & pll_data->plld_mask);
- postdiv = pll_data->postdiv;
+
+ if (!pll_data->has_pllctrl)
+ /* read post divider from od bits*/
+ postdiv = ((val & pll_data->clkod_mask) >>
+ pll_data->clkod_shift) + 1;
+ else
+ postdiv = pll_data->postdiv;
rate /= (prediv + 1);
rate = (rate * (mult + 1));
@@ -155,8 +168,11 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
}
parent_name = of_clk_get_parent_name(node, 0);
- if (of_property_read_u32(node, "fixed-postdiv", &pll_data->postdiv))
- goto out;
+ if (of_property_read_u32(node, "fixed-postdiv", &pll_data->postdiv)) {
+ /* assume the PLL has output divider register bits */
+ pll_data->clkod_mask = CLKOD_MASK;
+ pll_data->clkod_shift = CLKOD_SHIFT;
+ }
i = of_property_match_string(node, "reg-names", "control");
pll_data->pll_ctl0 = of_iomap(node, i);
diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig
index 0b0f3e729cf7..c339b829d3e3 100644
--- a/drivers/clk/mvebu/Kconfig
+++ b/drivers/clk/mvebu/Kconfig
@@ -4,15 +4,20 @@ config MVEBU_CLK_COMMON
config MVEBU_CLK_CPU
bool
+config MVEBU_CLK_COREDIV
+ bool
+
config ARMADA_370_CLK
bool
select MVEBU_CLK_COMMON
select MVEBU_CLK_CPU
+ select MVEBU_CLK_COREDIV
config ARMADA_XP_CLK
bool
select MVEBU_CLK_COMMON
select MVEBU_CLK_CPU
+ select MVEBU_CLK_COREDIV
config DOVE_CLK
bool
diff --git a/drivers/clk/mvebu/Makefile b/drivers/clk/mvebu/Makefile
index 1c7e70c63fb2..21bbfb4a9f42 100644
--- a/drivers/clk/mvebu/Makefile
+++ b/drivers/clk/mvebu/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_MVEBU_CLK_COMMON) += common.o
obj-$(CONFIG_MVEBU_CLK_CPU) += clk-cpu.o
+obj-$(CONFIG_MVEBU_CLK_COREDIV) += clk-corediv.o
obj-$(CONFIG_ARMADA_370_CLK) += armada-370.o
obj-$(CONFIG_ARMADA_XP_CLK) += armada-xp.o
diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c
new file mode 100644
index 000000000000..7162615bcdcd
--- /dev/null
+++ b/drivers/clk/mvebu/clk-corediv.c
@@ -0,0 +1,223 @@
+/*
+ * MVEBU Core divider clock
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "common.h"
+
+#define CORE_CLK_DIV_RATIO_MASK 0xff
+#define CORE_CLK_DIV_RATIO_RELOAD BIT(8)
+#define CORE_CLK_DIV_ENABLE_OFFSET 24
+#define CORE_CLK_DIV_RATIO_OFFSET 0x8
+
+struct clk_corediv_desc {
+ unsigned int mask;
+ unsigned int offset;
+ unsigned int fieldbit;
+};
+
+struct clk_corediv {
+ struct clk_hw hw;
+ void __iomem *reg;
+ struct clk_corediv_desc desc;
+ spinlock_t lock;
+};
+
+static struct clk_onecell_data clk_data;
+
+static const struct clk_corediv_desc mvebu_corediv_desc[] __initconst = {
+ { .mask = 0x3f, .offset = 8, .fieldbit = 1 }, /* NAND clock */
+};
+
+#define to_corediv_clk(p) container_of(p, struct clk_corediv, hw)
+
+static int clk_corediv_is_enabled(struct clk_hw *hwclk)
+{
+ struct clk_corediv *corediv = to_corediv_clk(hwclk);
+ struct clk_corediv_desc *desc = &corediv->desc;
+ u32 enable_mask = BIT(desc->fieldbit) << CORE_CLK_DIV_ENABLE_OFFSET;
+
+ return !!(readl(corediv->reg) & enable_mask);
+}
+
+static int clk_corediv_enable(struct clk_hw *hwclk)
+{
+ struct clk_corediv *corediv = to_corediv_clk(hwclk);
+ struct clk_corediv_desc *desc = &corediv->desc;
+ unsigned long flags = 0;
+ u32 reg;
+
+ spin_lock_irqsave(&corediv->lock, flags);
+
+ reg = readl(corediv->reg);
+ reg |= (BIT(desc->fieldbit) << CORE_CLK_DIV_ENABLE_OFFSET);
+ writel(reg, corediv->reg);
+
+ spin_unlock_irqrestore(&corediv->lock, flags);
+
+ return 0;
+}
+
+static void clk_corediv_disable(struct clk_hw *hwclk)
+{
+ struct clk_corediv *corediv = to_corediv_clk(hwclk);
+ struct clk_corediv_desc *desc = &corediv->desc;
+ unsigned long flags = 0;
+ u32 reg;
+
+ spin_lock_irqsave(&corediv->lock, flags);
+
+ reg = readl(corediv->reg);
+ reg &= ~(BIT(desc->fieldbit) << CORE_CLK_DIV_ENABLE_OFFSET);
+ writel(reg, corediv->reg);
+
+ spin_unlock_irqrestore(&corediv->lock, flags);
+}
+
+static unsigned long clk_corediv_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct clk_corediv *corediv = to_corediv_clk(hwclk);
+ struct clk_corediv_desc *desc = &corediv->desc;
+ u32 reg, div;
+
+ reg = readl(corediv->reg + CORE_CLK_DIV_RATIO_OFFSET);
+ div = (reg >> desc->offset) & desc->mask;
+ return parent_rate / div;
+}
+
+static long clk_corediv_round_rate(struct clk_hw *hwclk, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /* Valid ratio are 1:4, 1:5, 1:6 and 1:8 */
+ u32 div;
+
+ div = *parent_rate / rate;
+ if (div < 4)
+ div = 4;
+ else if (div > 6)
+ div = 8;
+
+ return *parent_rate / div;
+}
+
+static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_corediv *corediv = to_corediv_clk(hwclk);
+ struct clk_corediv_desc *desc = &corediv->desc;
+ unsigned long flags = 0;
+ u32 reg, div;
+
+ div = parent_rate / rate;
+
+ spin_lock_irqsave(&corediv->lock, flags);
+
+ /* Write new divider to the divider ratio register */
+ reg = readl(corediv->reg + CORE_CLK_DIV_RATIO_OFFSET);
+ reg &= ~(desc->mask << desc->offset);
+ reg |= (div & desc->mask) << desc->offset;
+ writel(reg, corediv->reg + CORE_CLK_DIV_RATIO_OFFSET);
+
+ /* Set reload-force for this clock */
+ reg = readl(corediv->reg) | BIT(desc->fieldbit);
+ writel(reg, corediv->reg);
+
+ /* Now trigger the clock update */
+ reg = readl(corediv->reg) | CORE_CLK_DIV_RATIO_RELOAD;
+ writel(reg, corediv->reg);
+
+ /*
+ * Wait for clocks to settle down, and then clear all the
+ * ratios request and the reload request.
+ */
+ udelay(1000);
+ reg &= ~(CORE_CLK_DIV_RATIO_MASK | CORE_CLK_DIV_RATIO_RELOAD);
+ writel(reg, corediv->reg);
+ udelay(1000);
+
+ spin_unlock_irqrestore(&corediv->lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops corediv_ops = {
+ .enable = clk_corediv_enable,
+ .disable = clk_corediv_disable,
+ .is_enabled = clk_corediv_is_enabled,
+ .recalc_rate = clk_corediv_recalc_rate,
+ .round_rate = clk_corediv_round_rate,
+ .set_rate = clk_corediv_set_rate,
+};
+
+static void __init mvebu_corediv_clk_init(struct device_node *node)
+{
+ struct clk_init_data init;
+ struct clk_corediv *corediv;
+ struct clk **clks;
+ void __iomem *base;
+ const char *parent_name;
+ const char *clk_name;
+ int i;
+
+ base = of_iomap(node, 0);
+ if (WARN_ON(!base))
+ return;
+
+ parent_name = of_clk_get_parent_name(node, 0);
+
+ clk_data.clk_num = ARRAY_SIZE(mvebu_corediv_desc);
+
+ /* clks holds the clock array */
+ clks = kcalloc(clk_data.clk_num, sizeof(struct clk *),
+ GFP_KERNEL);
+ if (WARN_ON(!clks))
+ goto err_unmap;
+ /* corediv holds the clock specific array */
+ corediv = kcalloc(clk_data.clk_num, sizeof(struct clk_corediv),
+ GFP_KERNEL);
+ if (WARN_ON(!corediv))
+ goto err_free_clks;
+
+ spin_lock_init(&corediv->lock);
+
+ for (i = 0; i < clk_data.clk_num; i++) {
+ of_property_read_string_index(node, "clock-output-names",
+ i, &clk_name);
+ init.num_parents = 1;
+ init.parent_names = &parent_name;
+ init.name = clk_name;
+ init.ops = &corediv_ops;
+ init.flags = 0;
+
+ corediv[i].desc = mvebu_corediv_desc[i];
+ corediv[i].reg = base;
+ corediv[i].hw.init = &init;
+
+ clks[i] = clk_register(NULL, &corediv[i].hw);
+ WARN_ON(IS_ERR(clks[i]));
+ }
+
+ clk_data.clks = clks;
+ of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
+ return;
+
+err_free_clks:
+ kfree(clks);
+err_unmap:
+ iounmap(base);
+}
+CLK_OF_DECLARE(mvebu_corediv_clk, "marvell,armada-370-corediv-clock",
+ mvebu_corediv_clk_init);
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index 1466865b0743..8ebf757d29e2 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -101,7 +101,7 @@ static const struct clk_ops cpu_ops = {
.set_rate = clk_cpu_set_rate,
};
-void __init of_cpu_clk_setup(struct device_node *node)
+static void __init of_cpu_clk_setup(struct device_node *node)
{
struct cpu_clk *cpuclk;
void __iomem *clock_complex_base = of_iomap(node, 0);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
new file mode 100644
index 000000000000..995bcfa021a4
--- /dev/null
+++ b/drivers/clk/qcom/Kconfig
@@ -0,0 +1,47 @@
+config COMMON_CLK_QCOM
+ tristate "Support for Qualcomm's clock controllers"
+ depends on OF
+ select REGMAP_MMIO
+ select RESET_CONTROLLER
+
+config MSM_GCC_8660
+ tristate "MSM8660 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8660 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config MSM_GCC_8960
+ tristate "MSM8960 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8960 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config MSM_MMCC_8960
+ tristate "MSM8960 Multimedia Clock Controller"
+ select MSM_GCC_8960
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8960 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config MSM_GCC_8974
+ tristate "MSM8974 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8974 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config MSM_MMCC_8974
+ tristate "MSM8974 Multimedia Clock Controller"
+ select MSM_GCC_8974
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8974 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
new file mode 100644
index 000000000000..f60db2ef1aee
--- /dev/null
+++ b/drivers/clk/qcom/Makefile
@@ -0,0 +1,14 @@
+obj-$(CONFIG_COMMON_CLK_QCOM) += clk-qcom.o
+
+clk-qcom-y += clk-regmap.o
+clk-qcom-y += clk-pll.o
+clk-qcom-y += clk-rcg.o
+clk-qcom-y += clk-rcg2.o
+clk-qcom-y += clk-branch.o
+clk-qcom-y += reset.o
+
+obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
+obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
+obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
+obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
+obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
new file mode 100644
index 000000000000..6b4d2bcb1a53
--- /dev/null
+++ b/drivers/clk/qcom/clk-branch.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include "clk-branch.h"
+
+static bool clk_branch_in_hwcg_mode(const struct clk_branch *br)
+{
+ u32 val;
+
+ if (!br->hwcg_reg)
+ return 0;
+
+ regmap_read(br->clkr.regmap, br->hwcg_reg, &val);
+
+ return !!(val & BIT(br->hwcg_bit));
+}
+
+static bool clk_branch_check_halt(const struct clk_branch *br, bool enabling)
+{
+ bool invert = (br->halt_check == BRANCH_HALT_ENABLE);
+ u32 val;
+
+ regmap_read(br->clkr.regmap, br->halt_reg, &val);
+
+ val &= BIT(br->halt_bit);
+ if (invert)
+ val = !val;
+
+ return !!val == !enabling;
+}
+
+#define BRANCH_CLK_OFF BIT(31)
+#define BRANCH_NOC_FSM_STATUS_SHIFT 28
+#define BRANCH_NOC_FSM_STATUS_MASK 0x7
+#define BRANCH_NOC_FSM_STATUS_ON (0x2 << BRANCH_NOC_FSM_STATUS_SHIFT)
+
+static bool clk_branch2_check_halt(const struct clk_branch *br, bool enabling)
+{
+ u32 val;
+ u32 mask;
+
+ mask = BRANCH_NOC_FSM_STATUS_MASK << BRANCH_NOC_FSM_STATUS_SHIFT;
+ mask |= BRANCH_CLK_OFF;
+
+ regmap_read(br->clkr.regmap, br->halt_reg, &val);
+
+ if (enabling) {
+ val &= mask;
+ return (val & BRANCH_CLK_OFF) == 0 ||
+ val == BRANCH_NOC_FSM_STATUS_ON;
+ } else {
+ return val & BRANCH_CLK_OFF;
+ }
+}
+
+static int clk_branch_wait(const struct clk_branch *br, bool enabling,
+ bool (check_halt)(const struct clk_branch *, bool))
+{
+ bool voted = br->halt_check & BRANCH_VOTED;
+ const char *name = __clk_get_name(br->clkr.hw.clk);
+
+ /* Skip checking halt bit if the clock is in hardware gated mode */
+ if (clk_branch_in_hwcg_mode(br))
+ return 0;
+
+ if (br->halt_check == BRANCH_HALT_DELAY || (!enabling && voted)) {
+ udelay(10);
+ } else if (br->halt_check == BRANCH_HALT_ENABLE ||
+ br->halt_check == BRANCH_HALT ||
+ (enabling && voted)) {
+ int count = 200;
+
+ while (count-- > 0) {
+ if (check_halt(br, enabling))
+ return 0;
+ udelay(1);
+ }
+ WARN(1, "%s status stuck at 'o%s'", name,
+ enabling ? "ff" : "n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int clk_branch_toggle(struct clk_hw *hw, bool en,
+ bool (check_halt)(const struct clk_branch *, bool))
+{
+ struct clk_branch *br = to_clk_branch(hw);
+ int ret;
+
+ if (en) {
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+ } else {
+ clk_disable_regmap(hw);
+ }
+
+ return clk_branch_wait(br, en, check_halt);
+}
+
+static int clk_branch_enable(struct clk_hw *hw)
+{
+ return clk_branch_toggle(hw, true, clk_branch_check_halt);
+}
+
+static void clk_branch_disable(struct clk_hw *hw)
+{
+ clk_branch_toggle(hw, false, clk_branch_check_halt);
+}
+
+const struct clk_ops clk_branch_ops = {
+ .enable = clk_branch_enable,
+ .disable = clk_branch_disable,
+ .is_enabled = clk_is_enabled_regmap,
+};
+EXPORT_SYMBOL_GPL(clk_branch_ops);
+
+static int clk_branch2_enable(struct clk_hw *hw)
+{
+ return clk_branch_toggle(hw, true, clk_branch2_check_halt);
+}
+
+static void clk_branch2_disable(struct clk_hw *hw)
+{
+ clk_branch_toggle(hw, false, clk_branch2_check_halt);
+}
+
+const struct clk_ops clk_branch2_ops = {
+ .enable = clk_branch2_enable,
+ .disable = clk_branch2_disable,
+ .is_enabled = clk_is_enabled_regmap,
+};
+EXPORT_SYMBOL_GPL(clk_branch2_ops);
+
+const struct clk_ops clk_branch_simple_ops = {
+ .enable = clk_enable_regmap,
+ .disable = clk_disable_regmap,
+ .is_enabled = clk_is_enabled_regmap,
+};
+EXPORT_SYMBOL_GPL(clk_branch_simple_ops);
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
new file mode 100644
index 000000000000..284df3f3c55f
--- /dev/null
+++ b/drivers/clk/qcom/clk-branch.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_BRANCH_H__
+#define __QCOM_CLK_BRANCH_H__
+
+#include <linux/clk-provider.h>
+
+#include "clk-regmap.h"
+
+/**
+ * struct clk_branch - gating clock with status bit and dynamic hardware gating
+ *
+ * @hwcg_reg: dynamic hardware clock gating register
+ * @hwcg_bit: ORed with @hwcg_reg to enable dynamic hardware clock gating
+ * @halt_reg: halt register
+ * @halt_bit: ANDed with @halt_reg to test for clock halted
+ * @halt_check: type of halt checking to perform
+ * @clkr: handle between common and hardware-specific interfaces
+ *
+ * Clock which can gate its output.
+ */
+struct clk_branch {
+ u32 hwcg_reg;
+ u32 halt_reg;
+ u8 hwcg_bit;
+ u8 halt_bit;
+ u8 halt_check;
+#define BRANCH_VOTED BIT(7) /* Delay on disable */
+#define BRANCH_HALT 0 /* pol: 1 = halt */
+#define BRANCH_HALT_VOTED (BRANCH_HALT | BRANCH_VOTED)
+#define BRANCH_HALT_ENABLE 1 /* pol: 0 = halt */
+#define BRANCH_HALT_ENABLE_VOTED (BRANCH_HALT_ENABLE | BRANCH_VOTED)
+#define BRANCH_HALT_DELAY 2 /* No bit to check; just delay */
+
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_branch_ops;
+extern const struct clk_ops clk_branch2_ops;
+extern const struct clk_ops clk_branch_simple_ops;
+
+#define to_clk_branch(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_branch, clkr)
+
+#endif
diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c
new file mode 100644
index 000000000000..0f927c538613
--- /dev/null
+++ b/drivers/clk/qcom/clk-pll.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include <asm/div64.h>
+
+#include "clk-pll.h"
+
+#define PLL_OUTCTRL BIT(0)
+#define PLL_BYPASSNL BIT(1)
+#define PLL_RESET_N BIT(2)
+#define PLL_LOCK_COUNT_SHIFT 8
+#define PLL_LOCK_COUNT_MASK 0x3f
+#define PLL_BIAS_COUNT_SHIFT 14
+#define PLL_BIAS_COUNT_MASK 0x3f
+#define PLL_VOTE_FSM_ENA BIT(20)
+#define PLL_VOTE_FSM_RESET BIT(21)
+
+static int clk_pll_enable(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ int ret;
+ u32 mask, val;
+
+ mask = PLL_OUTCTRL | PLL_RESET_N | PLL_BYPASSNL;
+ ret = regmap_read(pll->clkr.regmap, pll->mode_reg, &val);
+ if (ret)
+ return ret;
+
+ /* Skip if already enabled or in FSM mode */
+ if ((val & mask) == mask || val & PLL_VOTE_FSM_ENA)
+ return 0;
+
+ /* Disable PLL bypass mode. */
+ ret = regmap_update_bits(pll->clkr.regmap, pll->mode_reg, PLL_BYPASSNL,
+ PLL_BYPASSNL);
+ if (ret)
+ return ret;
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ udelay(10);
+
+ /* De-assert active-low PLL reset. */
+ ret = regmap_update_bits(pll->clkr.regmap, pll->mode_reg, PLL_RESET_N,
+ PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ /* Wait until PLL is locked. */
+ udelay(50);
+
+ /* Enable PLL output. */
+ ret = regmap_update_bits(pll->clkr.regmap, pll->mode_reg, PLL_OUTCTRL,
+ PLL_OUTCTRL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void clk_pll_disable(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ u32 mask;
+ u32 val;
+
+ regmap_read(pll->clkr.regmap, pll->mode_reg, &val);
+ /* Skip if in FSM mode */
+ if (val & PLL_VOTE_FSM_ENA)
+ return;
+ mask = PLL_OUTCTRL | PLL_RESET_N | PLL_BYPASSNL;
+ regmap_update_bits(pll->clkr.regmap, pll->mode_reg, mask, 0);
+}
+
+static unsigned long
+clk_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ u32 l, m, n;
+ unsigned long rate;
+ u64 tmp;
+
+ regmap_read(pll->clkr.regmap, pll->l_reg, &l);
+ regmap_read(pll->clkr.regmap, pll->m_reg, &m);
+ regmap_read(pll->clkr.regmap, pll->n_reg, &n);
+
+ l &= 0x3ff;
+ m &= 0x7ffff;
+ n &= 0x7ffff;
+
+ rate = parent_rate * l;
+ if (n) {
+ tmp = parent_rate;
+ tmp *= m;
+ do_div(tmp, n);
+ rate += tmp;
+ }
+ return rate;
+}
+
+const struct clk_ops clk_pll_ops = {
+ .enable = clk_pll_enable,
+ .disable = clk_pll_disable,
+ .recalc_rate = clk_pll_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_pll_ops);
+
+static int wait_for_pll(struct clk_pll *pll)
+{
+ u32 val;
+ int count;
+ int ret;
+ const char *name = __clk_get_name(pll->clkr.hw.clk);
+
+ /* Wait for pll to enable. */
+ for (count = 200; count > 0; count--) {
+ ret = regmap_read(pll->clkr.regmap, pll->status_reg, &val);
+ if (ret)
+ return ret;
+ if (val & BIT(pll->status_bit))
+ return 0;
+ udelay(1);
+ }
+
+ WARN(1, "%s didn't enable after voting for it!\n", name);
+ return -ETIMEDOUT;
+}
+
+static int clk_pll_vote_enable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_pll *p = to_clk_pll(__clk_get_hw(__clk_get_parent(hw->clk)));
+
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+
+ return wait_for_pll(p);
+}
+
+const struct clk_ops clk_pll_vote_ops = {
+ .enable = clk_pll_vote_enable,
+ .disable = clk_disable_regmap,
+};
+EXPORT_SYMBOL_GPL(clk_pll_vote_ops);
+
+static void
+clk_pll_set_fsm_mode(struct clk_pll *pll, struct regmap *regmap)
+{
+ u32 val;
+ u32 mask;
+
+ /* De-assert reset to FSM */
+ regmap_update_bits(regmap, pll->mode_reg, PLL_VOTE_FSM_RESET, 0);
+
+ /* Program bias count and lock count */
+ val = 1 << PLL_BIAS_COUNT_SHIFT;
+ mask = PLL_BIAS_COUNT_MASK << PLL_BIAS_COUNT_SHIFT;
+ mask |= PLL_LOCK_COUNT_MASK << PLL_LOCK_COUNT_SHIFT;
+ regmap_update_bits(regmap, pll->mode_reg, mask, val);
+
+ /* Enable PLL FSM voting */
+ regmap_update_bits(regmap, pll->mode_reg, PLL_VOTE_FSM_ENA,
+ PLL_VOTE_FSM_ENA);
+}
+
+static void clk_pll_configure(struct clk_pll *pll, struct regmap *regmap,
+ const struct pll_config *config)
+{
+ u32 val;
+ u32 mask;
+
+ regmap_write(regmap, pll->l_reg, config->l);
+ regmap_write(regmap, pll->m_reg, config->m);
+ regmap_write(regmap, pll->n_reg, config->n);
+
+ val = config->vco_val;
+ val |= config->pre_div_val;
+ val |= config->post_div_val;
+ val |= config->mn_ena_mask;
+ val |= config->main_output_mask;
+ val |= config->aux_output_mask;
+
+ mask = config->vco_mask;
+ mask |= config->pre_div_mask;
+ mask |= config->post_div_mask;
+ mask |= config->mn_ena_mask;
+ mask |= config->main_output_mask;
+ mask |= config->aux_output_mask;
+
+ regmap_update_bits(regmap, pll->config_reg, mask, val);
+}
+
+void clk_pll_configure_sr_hpm_lp(struct clk_pll *pll, struct regmap *regmap,
+ const struct pll_config *config, bool fsm_mode)
+{
+ clk_pll_configure(pll, regmap, config);
+ if (fsm_mode)
+ clk_pll_set_fsm_mode(pll, regmap);
+}
+EXPORT_SYMBOL_GPL(clk_pll_configure_sr_hpm_lp);
diff --git a/drivers/clk/qcom/clk-pll.h b/drivers/clk/qcom/clk-pll.h
new file mode 100644
index 000000000000..0775a99ca768
--- /dev/null
+++ b/drivers/clk/qcom/clk-pll.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_PLL_H__
+#define __QCOM_CLK_PLL_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
+/**
+ * struct clk_pll - phase locked loop (PLL)
+ * @l_reg: L register
+ * @m_reg: M register
+ * @n_reg: N register
+ * @config_reg: config register
+ * @mode_reg: mode register
+ * @status_reg: status register
+ * @status_bit: ANDed with @status_reg to determine if PLL is enabled
+ * @hw: handle between common and hardware-specific interfaces
+ */
+struct clk_pll {
+ u32 l_reg;
+ u32 m_reg;
+ u32 n_reg;
+ u32 config_reg;
+ u32 mode_reg;
+ u32 status_reg;
+ u8 status_bit;
+
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_pll_ops;
+extern const struct clk_ops clk_pll_vote_ops;
+
+#define to_clk_pll(_hw) container_of(to_clk_regmap(_hw), struct clk_pll, clkr)
+
+struct pll_config {
+ u16 l;
+ u32 m;
+ u32 n;
+ u32 vco_val;
+ u32 vco_mask;
+ u32 pre_div_val;
+ u32 pre_div_mask;
+ u32 post_div_val;
+ u32 post_div_mask;
+ u32 mn_ena_mask;
+ u32 main_output_mask;
+ u32 aux_output_mask;
+};
+
+void clk_pll_configure_sr_hpm_lp(struct clk_pll *pll, struct regmap *regmap,
+ const struct pll_config *config, bool fsm_mode);
+
+#endif
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
new file mode 100644
index 000000000000..abfc2b675aea
--- /dev/null
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -0,0 +1,517 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include <asm/div64.h>
+
+#include "clk-rcg.h"
+
+static u32 ns_to_src(struct src_sel *s, u32 ns)
+{
+ ns >>= s->src_sel_shift;
+ ns &= SRC_SEL_MASK;
+ return ns;
+}
+
+static u32 src_to_ns(struct src_sel *s, u8 src, u32 ns)
+{
+ u32 mask;
+
+ mask = SRC_SEL_MASK;
+ mask <<= s->src_sel_shift;
+ ns &= ~mask;
+
+ ns |= src << s->src_sel_shift;
+ return ns;
+}
+
+static u8 clk_rcg_get_parent(struct clk_hw *hw)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+ int num_parents = __clk_get_num_parents(hw->clk);
+ u32 ns;
+ int i;
+
+ regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ ns = ns_to_src(&rcg->s, ns);
+ for (i = 0; i < num_parents; i++)
+ if (ns == rcg->s.parent_map[i])
+ return i;
+
+ return -EINVAL;
+}
+
+static int reg_to_bank(struct clk_dyn_rcg *rcg, u32 bank)
+{
+ bank &= BIT(rcg->mux_sel_bit);
+ return !!bank;
+}
+
+static u8 clk_dyn_rcg_get_parent(struct clk_hw *hw)
+{
+ struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
+ int num_parents = __clk_get_num_parents(hw->clk);
+ u32 ns, ctl;
+ int bank;
+ int i;
+ struct src_sel *s;
+
+ regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &ctl);
+ bank = reg_to_bank(rcg, ctl);
+ s = &rcg->s[bank];
+
+ regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ ns = ns_to_src(s, ns);
+
+ for (i = 0; i < num_parents; i++)
+ if (ns == s->parent_map[i])
+ return i;
+
+ return -EINVAL;
+}
+
+static int clk_rcg_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+ u32 ns;
+
+ regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ ns = src_to_ns(&rcg->s, rcg->s.parent_map[index], ns);
+ regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
+
+ return 0;
+}
+
+static u32 md_to_m(struct mn *mn, u32 md)
+{
+ md >>= mn->m_val_shift;
+ md &= BIT(mn->width) - 1;
+ return md;
+}
+
+static u32 ns_to_pre_div(struct pre_div *p, u32 ns)
+{
+ ns >>= p->pre_div_shift;
+ ns &= BIT(p->pre_div_width) - 1;
+ return ns;
+}
+
+static u32 pre_div_to_ns(struct pre_div *p, u8 pre_div, u32 ns)
+{
+ u32 mask;
+
+ mask = BIT(p->pre_div_width) - 1;
+ mask <<= p->pre_div_shift;
+ ns &= ~mask;
+
+ ns |= pre_div << p->pre_div_shift;
+ return ns;
+}
+
+static u32 mn_to_md(struct mn *mn, u32 m, u32 n, u32 md)
+{
+ u32 mask, mask_w;
+
+ mask_w = BIT(mn->width) - 1;
+ mask = (mask_w << mn->m_val_shift) | mask_w;
+ md &= ~mask;
+
+ if (n) {
+ m <<= mn->m_val_shift;
+ md |= m;
+ md |= ~n & mask_w;
+ }
+
+ return md;
+}
+
+static u32 ns_m_to_n(struct mn *mn, u32 ns, u32 m)
+{
+ ns = ~ns >> mn->n_val_shift;
+ ns &= BIT(mn->width) - 1;
+ return ns + m;
+}
+
+static u32 reg_to_mnctr_mode(struct mn *mn, u32 val)
+{
+ val >>= mn->mnctr_mode_shift;
+ val &= MNCTR_MODE_MASK;
+ return val;
+}
+
+static u32 mn_to_ns(struct mn *mn, u32 m, u32 n, u32 ns)
+{
+ u32 mask;
+
+ mask = BIT(mn->width) - 1;
+ mask <<= mn->n_val_shift;
+ ns &= ~mask;
+
+ if (n) {
+ n = n - m;
+ n = ~n;
+ n &= BIT(mn->width) - 1;
+ n <<= mn->n_val_shift;
+ ns |= n;
+ }
+
+ return ns;
+}
+
+static u32 mn_to_reg(struct mn *mn, u32 m, u32 n, u32 val)
+{
+ u32 mask;
+
+ mask = MNCTR_MODE_MASK << mn->mnctr_mode_shift;
+ mask |= BIT(mn->mnctr_en_bit);
+ val &= ~mask;
+
+ if (n) {
+ val |= BIT(mn->mnctr_en_bit);
+ val |= MNCTR_MODE_DUAL << mn->mnctr_mode_shift;
+ }
+
+ return val;
+}
+
+static void configure_bank(struct clk_dyn_rcg *rcg, const struct freq_tbl *f)
+{
+ u32 ns, md, ctl, *regp;
+ int bank, new_bank;
+ struct mn *mn;
+ struct pre_div *p;
+ struct src_sel *s;
+ bool enabled;
+ u32 md_reg;
+ u32 bank_reg;
+ bool banked_mn = !!rcg->mn[1].width;
+ struct clk_hw *hw = &rcg->clkr.hw;
+
+ enabled = __clk_is_enabled(hw->clk);
+
+ regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &ctl);
+
+ if (banked_mn) {
+ regp = &ctl;
+ bank_reg = rcg->clkr.enable_reg;
+ } else {
+ regp = &ns;
+ bank_reg = rcg->ns_reg;
+ }
+
+ bank = reg_to_bank(rcg, *regp);
+ new_bank = enabled ? !bank : bank;
+
+ if (banked_mn) {
+ mn = &rcg->mn[new_bank];
+ md_reg = rcg->md_reg[new_bank];
+
+ ns |= BIT(mn->mnctr_reset_bit);
+ regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
+
+ regmap_read(rcg->clkr.regmap, md_reg, &md);
+ md = mn_to_md(mn, f->m, f->n, md);
+ regmap_write(rcg->clkr.regmap, md_reg, md);
+
+ ns = mn_to_ns(mn, f->m, f->n, ns);
+ regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
+
+ ctl = mn_to_reg(mn, f->m, f->n, ctl);
+ regmap_write(rcg->clkr.regmap, rcg->clkr.enable_reg, ctl);
+
+ ns &= ~BIT(mn->mnctr_reset_bit);
+ regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
+ } else {
+ p = &rcg->p[new_bank];
+ ns = pre_div_to_ns(p, f->pre_div - 1, ns);
+ }
+
+ s = &rcg->s[new_bank];
+ ns = src_to_ns(s, s->parent_map[f->src], ns);
+ regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
+
+ if (enabled) {
+ *regp ^= BIT(rcg->mux_sel_bit);
+ regmap_write(rcg->clkr.regmap, bank_reg, *regp);
+ }
+}
+
+static int clk_dyn_rcg_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
+ u32 ns, ctl, md, reg;
+ int bank;
+ struct freq_tbl f = { 0 };
+ bool banked_mn = !!rcg->mn[1].width;
+
+ regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &ctl);
+ reg = banked_mn ? ctl : ns;
+
+ bank = reg_to_bank(rcg, reg);
+
+ if (banked_mn) {
+ regmap_read(rcg->clkr.regmap, rcg->md_reg[bank], &md);
+ f.m = md_to_m(&rcg->mn[bank], md);
+ f.n = ns_m_to_n(&rcg->mn[bank], ns, f.m);
+ } else {
+ f.pre_div = ns_to_pre_div(&rcg->p[bank], ns) + 1;
+ }
+ f.src = index;
+
+ configure_bank(rcg, &f);
+
+ return 0;
+}
+
+/*
+ * Calculate m/n:d rate
+ *
+ * parent_rate m
+ * rate = ----------- x ---
+ * pre_div n
+ */
+static unsigned long
+calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 pre_div)
+{
+ if (pre_div)
+ rate /= pre_div + 1;
+
+ if (mode) {
+ u64 tmp = rate;
+ tmp *= m;
+ do_div(tmp, n);
+ rate = tmp;
+ }
+
+ return rate;
+}
+
+static unsigned long
+clk_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+ u32 pre_div, m = 0, n = 0, ns, md, mode = 0;
+ struct mn *mn = &rcg->mn;
+
+ regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ pre_div = ns_to_pre_div(&rcg->p, ns);
+
+ if (rcg->mn.width) {
+ regmap_read(rcg->clkr.regmap, rcg->md_reg, &md);
+ m = md_to_m(mn, md);
+ n = ns_m_to_n(mn, ns, m);
+ /* MN counter mode is in hw.enable_reg sometimes */
+ if (rcg->clkr.enable_reg != rcg->ns_reg)
+ regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &mode);
+ else
+ mode = ns;
+ mode = reg_to_mnctr_mode(mn, mode);
+ }
+
+ return calc_rate(parent_rate, m, n, mode, pre_div);
+}
+
+static unsigned long
+clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
+ u32 m, n, pre_div, ns, md, mode, reg;
+ int bank;
+ struct mn *mn;
+ bool banked_mn = !!rcg->mn[1].width;
+
+ regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+
+ if (banked_mn)
+ regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &reg);
+ else
+ reg = ns;
+
+ bank = reg_to_bank(rcg, reg);
+
+ if (banked_mn) {
+ mn = &rcg->mn[bank];
+ regmap_read(rcg->clkr.regmap, rcg->md_reg[bank], &md);
+ m = md_to_m(mn, md);
+ n = ns_m_to_n(mn, ns, m);
+ mode = reg_to_mnctr_mode(mn, reg);
+ return calc_rate(parent_rate, m, n, mode, 0);
+ } else {
+ pre_div = ns_to_pre_div(&rcg->p[bank], ns);
+ return calc_rate(parent_rate, 0, 0, 0, pre_div);
+ }
+}
+
+static const
+struct freq_tbl *find_freq(const struct freq_tbl *f, unsigned long rate)
+{
+ if (!f)
+ return NULL;
+
+ for (; f->freq; f++)
+ if (rate <= f->freq)
+ return f;
+
+ return NULL;
+}
+
+static long _freq_tbl_determine_rate(struct clk_hw *hw,
+ const struct freq_tbl *f, unsigned long rate,
+ unsigned long *p_rate, struct clk **p)
+{
+ unsigned long clk_flags;
+
+ f = find_freq(f, rate);
+ if (!f)
+ return -EINVAL;
+
+ clk_flags = __clk_get_flags(hw->clk);
+ *p = clk_get_parent_by_index(hw->clk, f->src);
+ if (clk_flags & CLK_SET_RATE_PARENT) {
+ rate = rate * f->pre_div;
+ if (f->n) {
+ u64 tmp = rate;
+ tmp = tmp * f->n;
+ do_div(tmp, f->m);
+ rate = tmp;
+ }
+ } else {
+ rate = __clk_get_rate(*p);
+ }
+ *p_rate = rate;
+
+ return f->freq;
+}
+
+static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *p_rate, struct clk **p)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p);
+}
+
+static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *p_rate, struct clk **p)
+{
+ struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
+
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p);
+}
+
+static int clk_rcg_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+ const struct freq_tbl *f;
+ u32 ns, md, ctl;
+ struct mn *mn = &rcg->mn;
+ u32 mask = 0;
+ unsigned int reset_reg;
+
+ f = find_freq(rcg->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+ if (rcg->mn.reset_in_cc)
+ reset_reg = rcg->clkr.enable_reg;
+ else
+ reset_reg = rcg->ns_reg;
+
+ if (rcg->mn.width) {
+ mask = BIT(mn->mnctr_reset_bit);
+ regmap_update_bits(rcg->clkr.regmap, reset_reg, mask, mask);
+
+ regmap_read(rcg->clkr.regmap, rcg->md_reg, &md);
+ md = mn_to_md(mn, f->m, f->n, md);
+ regmap_write(rcg->clkr.regmap, rcg->md_reg, md);
+
+ regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ /* MN counter mode is in hw.enable_reg sometimes */
+ if (rcg->clkr.enable_reg != rcg->ns_reg) {
+ regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &ctl);
+ ctl = mn_to_reg(mn, f->m, f->n, ctl);
+ regmap_write(rcg->clkr.regmap, rcg->clkr.enable_reg, ctl);
+ } else {
+ ns = mn_to_reg(mn, f->m, f->n, ns);
+ }
+ ns = mn_to_ns(mn, f->m, f->n, ns);
+ } else {
+ regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ }
+
+ ns = pre_div_to_ns(&rcg->p, f->pre_div - 1, ns);
+ regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
+
+ regmap_update_bits(rcg->clkr.regmap, reset_reg, mask, 0);
+
+ return 0;
+}
+
+static int __clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate)
+{
+ struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
+ const struct freq_tbl *f;
+
+ f = find_freq(rcg->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+ configure_bank(rcg, f);
+
+ return 0;
+}
+
+static int clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_dyn_rcg_set_rate(hw, rate);
+}
+
+static int clk_dyn_rcg_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_dyn_rcg_set_rate(hw, rate);
+}
+
+const struct clk_ops clk_rcg_ops = {
+ .enable = clk_enable_regmap,
+ .disable = clk_disable_regmap,
+ .get_parent = clk_rcg_get_parent,
+ .set_parent = clk_rcg_set_parent,
+ .recalc_rate = clk_rcg_recalc_rate,
+ .determine_rate = clk_rcg_determine_rate,
+ .set_rate = clk_rcg_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_rcg_ops);
+
+const struct clk_ops clk_dyn_rcg_ops = {
+ .enable = clk_enable_regmap,
+ .is_enabled = clk_is_enabled_regmap,
+ .disable = clk_disable_regmap,
+ .get_parent = clk_dyn_rcg_get_parent,
+ .set_parent = clk_dyn_rcg_set_parent,
+ .recalc_rate = clk_dyn_rcg_recalc_rate,
+ .determine_rate = clk_dyn_rcg_determine_rate,
+ .set_rate = clk_dyn_rcg_set_rate,
+ .set_rate_and_parent = clk_dyn_rcg_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_dyn_rcg_ops);
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
new file mode 100644
index 000000000000..1d6b6dece328
--- /dev/null
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_RCG_H__
+#define __QCOM_CLK_RCG_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
+struct freq_tbl {
+ unsigned long freq;
+ u8 src;
+ u8 pre_div;
+ u16 m;
+ u16 n;
+};
+
+/**
+ * struct mn - M/N:D counter
+ * @mnctr_en_bit: bit to enable mn counter
+ * @mnctr_reset_bit: bit to assert mn counter reset
+ * @mnctr_mode_shift: lowest bit of mn counter mode field
+ * @n_val_shift: lowest bit of n value field
+ * @m_val_shift: lowest bit of m value field
+ * @width: number of bits in m/n/d values
+ * @reset_in_cc: true if the mnctr_reset_bit is in the CC register
+ */
+struct mn {
+ u8 mnctr_en_bit;
+ u8 mnctr_reset_bit;
+ u8 mnctr_mode_shift;
+#define MNCTR_MODE_DUAL 0x2
+#define MNCTR_MODE_MASK 0x3
+ u8 n_val_shift;
+ u8 m_val_shift;
+ u8 width;
+ bool reset_in_cc;
+};
+
+/**
+ * struct pre_div - pre-divider
+ * @pre_div_shift: lowest bit of pre divider field
+ * @pre_div_width: number of bits in predivider
+ */
+struct pre_div {
+ u8 pre_div_shift;
+ u8 pre_div_width;
+};
+
+/**
+ * struct src_sel - source selector
+ * @src_sel_shift: lowest bit of source selection field
+ * @parent_map: map from software's parent index to hardware's src_sel field
+ */
+struct src_sel {
+ u8 src_sel_shift;
+#define SRC_SEL_MASK 0x7
+ const u8 *parent_map;
+};
+
+/**
+ * struct clk_rcg - root clock generator
+ *
+ * @ns_reg: NS register
+ * @md_reg: MD register
+ * @mn: mn counter
+ * @p: pre divider
+ * @s: source selector
+ * @freq_tbl: frequency table
+ * @clkr: regmap clock handle
+ * @lock: register lock
+ *
+ */
+struct clk_rcg {
+ u32 ns_reg;
+ u32 md_reg;
+
+ struct mn mn;
+ struct pre_div p;
+ struct src_sel s;
+
+ const struct freq_tbl *freq_tbl;
+
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_rcg_ops;
+
+#define to_clk_rcg(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg, clkr)
+
+/**
+ * struct clk_dyn_rcg - root clock generator with glitch free mux
+ *
+ * @mux_sel_bit: bit to switch glitch free mux
+ * @ns_reg: NS register
+ * @md_reg: MD0 and MD1 register
+ * @mn: mn counter (banked)
+ * @s: source selector (banked)
+ * @freq_tbl: frequency table
+ * @clkr: regmap clock handle
+ * @lock: register lock
+ *
+ */
+struct clk_dyn_rcg {
+ u32 ns_reg;
+ u32 md_reg[2];
+
+ u8 mux_sel_bit;
+
+ struct mn mn[2];
+ struct pre_div p[2];
+ struct src_sel s[2];
+
+ const struct freq_tbl *freq_tbl;
+
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_dyn_rcg_ops;
+
+#define to_clk_dyn_rcg(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_dyn_rcg, clkr)
+
+/**
+ * struct clk_rcg2 - root clock generator
+ *
+ * @cmd_rcgr: corresponds to *_CMD_RCGR
+ * @mnd_width: number of bits in m/n/d values
+ * @hid_width: number of bits in half integer divider
+ * @parent_map: map from software's parent index to hardware's src_sel field
+ * @freq_tbl: frequency table
+ * @clkr: regmap clock handle
+ * @lock: register lock
+ *
+ */
+struct clk_rcg2 {
+ u32 cmd_rcgr;
+ u8 mnd_width;
+ u8 hid_width;
+ const u8 *parent_map;
+ const struct freq_tbl *freq_tbl;
+ struct clk_regmap clkr;
+};
+
+#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
+
+extern const struct clk_ops clk_rcg2_ops;
+
+#endif
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
new file mode 100644
index 000000000000..00f878a04d3f
--- /dev/null
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+
+#include <asm/div64.h>
+
+#include "clk-rcg.h"
+
+#define CMD_REG 0x0
+#define CMD_UPDATE BIT(0)
+#define CMD_ROOT_EN BIT(1)
+#define CMD_DIRTY_CFG BIT(4)
+#define CMD_DIRTY_N BIT(5)
+#define CMD_DIRTY_M BIT(6)
+#define CMD_DIRTY_D BIT(7)
+#define CMD_ROOT_OFF BIT(31)
+
+#define CFG_REG 0x4
+#define CFG_SRC_DIV_SHIFT 0
+#define CFG_SRC_SEL_SHIFT 8
+#define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
+#define CFG_MODE_SHIFT 12
+#define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
+#define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
+
+#define M_REG 0x8
+#define N_REG 0xc
+#define D_REG 0x10
+
+static int clk_rcg2_is_enabled(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cmd;
+ int ret;
+
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
+ if (ret)
+ return ret;
+
+ return (cmd & CMD_ROOT_OFF) != 0;
+}
+
+static u8 clk_rcg2_get_parent(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int num_parents = __clk_get_num_parents(hw->clk);
+ u32 cfg;
+ int i, ret;
+
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ if (ret)
+ return ret;
+
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++)
+ if (cfg == rcg->parent_map[i])
+ return i;
+
+ return -EINVAL;
+}
+
+static int update_config(struct clk_rcg2 *rcg)
+{
+ int count, ret;
+ u32 cmd;
+ struct clk_hw *hw = &rcg->clkr.hw;
+ const char *name = __clk_get_name(hw->clk);
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_UPDATE, CMD_UPDATE);
+ if (ret)
+ return ret;
+
+ /* Wait for update to take effect */
+ for (count = 500; count > 0; count--) {
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
+ if (ret)
+ return ret;
+ if (!(cmd & CMD_UPDATE))
+ return 0;
+ udelay(1);
+ }
+
+ WARN(1, "%s: rcg didn't update its configuration.", name);
+ return 0;
+}
+
+static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int ret;
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ CFG_SRC_SEL_MASK,
+ rcg->parent_map[index] << CFG_SRC_SEL_SHIFT);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+/*
+ * Calculate m/n:d rate
+ *
+ * parent_rate m
+ * rate = ----------- x ---
+ * hid_div n
+ */
+static unsigned long
+calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
+{
+ if (hid_div) {
+ rate *= 2;
+ rate /= hid_div + 1;
+ }
+
+ if (mode) {
+ u64 tmp = rate;
+ tmp *= m;
+ do_div(tmp, n);
+ rate = tmp;
+ }
+
+ return rate;
+}
+
+static unsigned long
+clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+
+ if (rcg->mnd_width) {
+ mask = BIT(rcg->mnd_width) - 1;
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG, &m);
+ m &= mask;
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG, &n);
+ n = ~n;
+ n &= mask;
+ n += m;
+ mode = cfg & CFG_MODE_MASK;
+ mode >>= CFG_MODE_SHIFT;
+ }
+
+ mask = BIT(rcg->hid_width) - 1;
+ hid_div = cfg >> CFG_SRC_DIV_SHIFT;
+ hid_div &= mask;
+
+ return calc_rate(parent_rate, m, n, mode, hid_div);
+}
+
+static const
+struct freq_tbl *find_freq(const struct freq_tbl *f, unsigned long rate)
+{
+ if (!f)
+ return NULL;
+
+ for (; f->freq; f++)
+ if (rate <= f->freq)
+ return f;
+
+ return NULL;
+}
+
+static long _freq_tbl_determine_rate(struct clk_hw *hw,
+ const struct freq_tbl *f, unsigned long rate,
+ unsigned long *p_rate, struct clk **p)
+{
+ unsigned long clk_flags;
+
+ f = find_freq(f, rate);
+ if (!f)
+ return -EINVAL;
+
+ clk_flags = __clk_get_flags(hw->clk);
+ *p = clk_get_parent_by_index(hw->clk, f->src);
+ if (clk_flags & CLK_SET_RATE_PARENT) {
+ if (f->pre_div) {
+ rate /= 2;
+ rate *= f->pre_div + 1;
+ }
+
+ if (f->n) {
+ u64 tmp = rate;
+ tmp = tmp * f->n;
+ do_div(tmp, f->m);
+ rate = tmp;
+ }
+ } else {
+ rate = __clk_get_rate(*p);
+ }
+ *p_rate = rate;
+
+ return f->freq;
+}
+
+static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *p_rate, struct clk **p)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p);
+}
+
+static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f;
+ u32 cfg, mask;
+ int ret;
+
+ f = find_freq(rcg->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+ if (rcg->mnd_width && f->n) {
+ mask = BIT(rcg->mnd_width) - 1;
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG,
+ mask, f->m);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG,
+ mask, ~(f->n - f->m));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + D_REG,
+ mask, ~f->n);
+ if (ret)
+ return ret;
+ }
+
+ mask = BIT(rcg->hid_width) - 1;
+ mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK;
+ cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
+ cfg |= rcg->parent_map[f->src] << CFG_SRC_SEL_SHIFT;
+ if (rcg->mnd_width && f->n)
+ cfg |= CFG_MODE_DUAL_EDGE;
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, mask,
+ cfg);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_set_rate(hw, rate);
+}
+
+static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_rcg2_set_rate(hw, rate);
+}
+
+const struct clk_ops clk_rcg2_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_rate,
+ .set_rate = clk_rcg2_set_rate,
+ .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_ops);
diff --git a/drivers/clk/qcom/clk-regmap.c b/drivers/clk/qcom/clk-regmap.c
new file mode 100644
index 000000000000..a58ba39a900c
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/export.h>
+
+#include "clk-regmap.h"
+
+/**
+ * clk_is_enabled_regmap - standard is_enabled() for regmap users
+ *
+ * @hw: clk to operate on
+ *
+ * Clocks that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their struct clk_regmap and then use
+ * this as their is_enabled operation, saving some code.
+ */
+int clk_is_enabled_regmap(struct clk_hw *hw)
+{
+ struct clk_regmap *rclk = to_clk_regmap(hw);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rclk->regmap, rclk->enable_reg, &val);
+ if (ret != 0)
+ return ret;
+
+ if (rclk->enable_is_inverted)
+ return (val & rclk->enable_mask) == 0;
+ else
+ return (val & rclk->enable_mask) != 0;
+}
+EXPORT_SYMBOL_GPL(clk_is_enabled_regmap);
+
+/**
+ * clk_enable_regmap - standard enable() for regmap users
+ *
+ * @hw: clk to operate on
+ *
+ * Clocks that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their struct clk_regmap and then use
+ * this as their enable() operation, saving some code.
+ */
+int clk_enable_regmap(struct clk_hw *hw)
+{
+ struct clk_regmap *rclk = to_clk_regmap(hw);
+ unsigned int val;
+
+ if (rclk->enable_is_inverted)
+ val = 0;
+ else
+ val = rclk->enable_mask;
+
+ return regmap_update_bits(rclk->regmap, rclk->enable_reg,
+ rclk->enable_mask, val);
+}
+EXPORT_SYMBOL_GPL(clk_enable_regmap);
+
+/**
+ * clk_disable_regmap - standard disable() for regmap users
+ *
+ * @hw: clk to operate on
+ *
+ * Clocks that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their struct clk_regmap and then use
+ * this as their disable() operation, saving some code.
+ */
+void clk_disable_regmap(struct clk_hw *hw)
+{
+ struct clk_regmap *rclk = to_clk_regmap(hw);
+ unsigned int val;
+
+ if (rclk->enable_is_inverted)
+ val = rclk->enable_mask;
+ else
+ val = 0;
+
+ regmap_update_bits(rclk->regmap, rclk->enable_reg, rclk->enable_mask,
+ val);
+}
+EXPORT_SYMBOL_GPL(clk_disable_regmap);
+
+/**
+ * devm_clk_register_regmap - register a clk_regmap clock
+ *
+ * @rclk: clk to operate on
+ *
+ * Clocks that use regmap for their register I/O should register their
+ * clk_regmap struct via this function so that the regmap is initialized
+ * and so that the clock is registered with the common clock framework.
+ */
+struct clk *devm_clk_register_regmap(struct device *dev,
+ struct clk_regmap *rclk)
+{
+ if (dev && dev_get_regmap(dev, NULL))
+ rclk->regmap = dev_get_regmap(dev, NULL);
+ else if (dev && dev->parent)
+ rclk->regmap = dev_get_regmap(dev->parent, NULL);
+
+ return devm_clk_register(dev, &rclk->hw);
+}
+EXPORT_SYMBOL_GPL(devm_clk_register_regmap);
diff --git a/drivers/clk/qcom/clk-regmap.h b/drivers/clk/qcom/clk-regmap.h
new file mode 100644
index 000000000000..491a63d537df
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_REGMAP_H__
+#define __QCOM_CLK_REGMAP_H__
+
+#include <linux/clk-provider.h>
+
+struct regmap;
+
+/**
+ * struct clk_regmap - regmap supporting clock
+ * @hw: handle between common and hardware-specific interfaces
+ * @regmap: regmap to use for regmap helpers and/or by providers
+ * @enable_reg: register when using regmap enable/disable ops
+ * @enable_mask: mask when using regmap enable/disable ops
+ * @enable_is_inverted: flag to indicate set enable_mask bits to disable
+ * when using clock_enable_regmap and friends APIs.
+ */
+struct clk_regmap {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ unsigned int enable_reg;
+ unsigned int enable_mask;
+ bool enable_is_inverted;
+};
+#define to_clk_regmap(_hw) container_of(_hw, struct clk_regmap, hw)
+
+int clk_is_enabled_regmap(struct clk_hw *hw);
+int clk_enable_regmap(struct clk_hw *hw);
+void clk_disable_regmap(struct clk_hw *hw);
+struct clk *
+devm_clk_register_regmap(struct device *dev, struct clk_regmap *rclk);
+
+#endif
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
new file mode 100644
index 000000000000..bc0b7f1fcfbe
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -0,0 +1,2819 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8660.h>
+#include <dt-bindings/reset/qcom,gcc-msm8660.h>
+
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+static struct clk_pll pll8 = {
+ .l_reg = 0x3144,
+ .m_reg = 0x3148,
+ .n_reg = 0x314c,
+ .config_reg = 0x3154,
+ .mode_reg = 0x3140,
+ .status_reg = 0x3158,
+ .status_bit = 16,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pll8",
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap pll8_vote = {
+ .enable_reg = 0x34c0,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "pll8_vote",
+ .parent_names = (const char *[]){ "pll8" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+#define P_PXO 0
+#define P_PLL8 1
+#define P_CXO 2
+
+static const u8 gcc_pxo_pll8_map[] = {
+ [P_PXO] = 0,
+ [P_PLL8] = 3,
+};
+
+static const char *gcc_pxo_pll8[] = {
+ "pxo",
+ "pll8_vote",
+};
+
+static const u8 gcc_pxo_pll8_cxo_map[] = {
+ [P_PXO] = 0,
+ [P_PLL8] = 3,
+ [P_CXO] = 5,
+};
+
+static const char *gcc_pxo_pll8_cxo[] = {
+ "pxo",
+ "pll8_vote",
+ "cxo",
+};
+
+static struct freq_tbl clk_tbl_gsbi_uart[] = {
+ { 1843200, P_PLL8, 2, 6, 625 },
+ { 3686400, P_PLL8, 2, 12, 625 },
+ { 7372800, P_PLL8, 2, 24, 625 },
+ { 14745600, P_PLL8, 2, 48, 625 },
+ { 16000000, P_PLL8, 4, 1, 6 },
+ { 24000000, P_PLL8, 4, 1, 4 },
+ { 32000000, P_PLL8, 4, 1, 3 },
+ { 40000000, P_PLL8, 1, 5, 48 },
+ { 46400000, P_PLL8, 1, 29, 240 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { 51200000, P_PLL8, 1, 2, 15 },
+ { 56000000, P_PLL8, 1, 7, 48 },
+ { 58982400, P_PLL8, 1, 96, 625 },
+ { 64000000, P_PLL8, 2, 1, 3 },
+ { }
+};
+
+static struct clk_rcg gsbi1_uart_src = {
+ .ns_reg = 0x29d4,
+ .md_reg = 0x29d0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x29d4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi1_uart_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x29d4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi1_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi2_uart_src = {
+ .ns_reg = 0x29f4,
+ .md_reg = 0x29f0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x29f4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi2_uart_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x29f4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi2_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi3_uart_src = {
+ .ns_reg = 0x2a14,
+ .md_reg = 0x2a10,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a14,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi3_uart_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x2a14,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi3_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi4_uart_src = {
+ .ns_reg = 0x2a34,
+ .md_reg = 0x2a30,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a34,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi4_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 26,
+ .clkr = {
+ .enable_reg = 0x2a34,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi4_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi5_uart_src = {
+ .ns_reg = 0x2a54,
+ .md_reg = 0x2a50,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a54,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi5_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 22,
+ .clkr = {
+ .enable_reg = 0x2a54,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi5_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi6_uart_src = {
+ .ns_reg = 0x2a74,
+ .md_reg = 0x2a70,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a74,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi6_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 18,
+ .clkr = {
+ .enable_reg = 0x2a74,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi6_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi7_uart_src = {
+ .ns_reg = 0x2a94,
+ .md_reg = 0x2a90,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a94,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi7_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x2a94,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi7_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi8_uart_src = {
+ .ns_reg = 0x2ab4,
+ .md_reg = 0x2ab0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2ab4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi8_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x2ab4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_uart_clk",
+ .parent_names = (const char *[]){ "gsbi8_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi9_uart_src = {
+ .ns_reg = 0x2ad4,
+ .md_reg = 0x2ad0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2ad4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi9_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x2ad4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_uart_clk",
+ .parent_names = (const char *[]){ "gsbi9_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi10_uart_src = {
+ .ns_reg = 0x2af4,
+ .md_reg = 0x2af0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2af4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi10_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x2af4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_uart_clk",
+ .parent_names = (const char *[]){ "gsbi10_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi11_uart_src = {
+ .ns_reg = 0x2b14,
+ .md_reg = 0x2b10,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2b14,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi11_uart_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 17,
+ .clkr = {
+ .enable_reg = 0x2b14,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_uart_clk",
+ .parent_names = (const char *[]){ "gsbi11_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi12_uart_src = {
+ .ns_reg = 0x2b34,
+ .md_reg = 0x2b30,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2b34,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi12_uart_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x2b34,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_uart_clk",
+ .parent_names = (const char *[]){ "gsbi12_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_gsbi_qup[] = {
+ { 1100000, P_PXO, 1, 2, 49 },
+ { 5400000, P_PXO, 1, 1, 5 },
+ { 10800000, P_PXO, 1, 2, 5 },
+ { 15060000, P_PLL8, 1, 2, 51 },
+ { 24000000, P_PLL8, 4, 1, 4 },
+ { 25600000, P_PLL8, 1, 1, 15 },
+ { 27000000, P_PXO, 1, 0, 0 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { 51200000, P_PLL8, 1, 2, 15 },
+ { }
+};
+
+static struct clk_rcg gsbi1_qup_src = {
+ .ns_reg = 0x29cc,
+ .md_reg = 0x29c8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x29cc,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi1_qup_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 9,
+ .clkr = {
+ .enable_reg = 0x29cc,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_qup_clk",
+ .parent_names = (const char *[]){ "gsbi1_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi2_qup_src = {
+ .ns_reg = 0x29ec,
+ .md_reg = 0x29e8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x29ec,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi2_qup_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x29ec,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_qup_clk",
+ .parent_names = (const char *[]){ "gsbi2_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi3_qup_src = {
+ .ns_reg = 0x2a0c,
+ .md_reg = 0x2a08,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a0c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi3_qup_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 0,
+ .clkr = {
+ .enable_reg = 0x2a0c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_qup_clk",
+ .parent_names = (const char *[]){ "gsbi3_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi4_qup_src = {
+ .ns_reg = 0x2a2c,
+ .md_reg = 0x2a28,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a2c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi4_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 24,
+ .clkr = {
+ .enable_reg = 0x2a2c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_qup_clk",
+ .parent_names = (const char *[]){ "gsbi4_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi5_qup_src = {
+ .ns_reg = 0x2a4c,
+ .md_reg = 0x2a48,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a4c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi5_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 20,
+ .clkr = {
+ .enable_reg = 0x2a4c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_qup_clk",
+ .parent_names = (const char *[]){ "gsbi5_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi6_qup_src = {
+ .ns_reg = 0x2a6c,
+ .md_reg = 0x2a68,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a6c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi6_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 16,
+ .clkr = {
+ .enable_reg = 0x2a6c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_qup_clk",
+ .parent_names = (const char *[]){ "gsbi6_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi7_qup_src = {
+ .ns_reg = 0x2a8c,
+ .md_reg = 0x2a88,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a8c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi7_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x2a8c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_qup_clk",
+ .parent_names = (const char *[]){ "gsbi7_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi8_qup_src = {
+ .ns_reg = 0x2aac,
+ .md_reg = 0x2aa8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2aac,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi8_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 8,
+ .clkr = {
+ .enable_reg = 0x2aac,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_qup_clk",
+ .parent_names = (const char *[]){ "gsbi8_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi9_qup_src = {
+ .ns_reg = 0x2acc,
+ .md_reg = 0x2ac8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2acc,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi9_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x2acc,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_qup_clk",
+ .parent_names = (const char *[]){ "gsbi9_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi10_qup_src = {
+ .ns_reg = 0x2aec,
+ .md_reg = 0x2ae8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2aec,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi10_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 0,
+ .clkr = {
+ .enable_reg = 0x2aec,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_qup_clk",
+ .parent_names = (const char *[]){ "gsbi10_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi11_qup_src = {
+ .ns_reg = 0x2b0c,
+ .md_reg = 0x2b08,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2b0c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi11_qup_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 15,
+ .clkr = {
+ .enable_reg = 0x2b0c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_qup_clk",
+ .parent_names = (const char *[]){ "gsbi11_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi12_qup_src = {
+ .ns_reg = 0x2b2c,
+ .md_reg = 0x2b28,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2b2c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi12_qup_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x2b2c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_qup_clk",
+ .parent_names = (const char *[]){ "gsbi12_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_gp[] = {
+ { 9600000, P_CXO, 2, 0, 0 },
+ { 13500000, P_PXO, 2, 0, 0 },
+ { 19200000, P_CXO, 1, 0, 0 },
+ { 27000000, P_PXO, 1, 0, 0 },
+ { 64000000, P_PLL8, 2, 1, 3 },
+ { 76800000, P_PLL8, 1, 1, 5 },
+ { 96000000, P_PLL8, 4, 0, 0 },
+ { 128000000, P_PLL8, 3, 0, 0 },
+ { 192000000, P_PLL8, 2, 0, 0 },
+ { }
+};
+
+static struct clk_rcg gp0_src = {
+ .ns_reg = 0x2d24,
+ .md_reg = 0x2d00,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_cxo_map,
+ },
+ .freq_tbl = clk_tbl_gp,
+ .clkr = {
+ .enable_reg = 0x2d24,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_src",
+ .parent_names = gcc_pxo_pll8_cxo,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ }
+};
+
+static struct clk_branch gp0_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x2d24,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_clk",
+ .parent_names = (const char *[]){ "gp0_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gp1_src = {
+ .ns_reg = 0x2d44,
+ .md_reg = 0x2d40,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_cxo_map,
+ },
+ .freq_tbl = clk_tbl_gp,
+ .clkr = {
+ .enable_reg = 0x2d44,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp1_src",
+ .parent_names = gcc_pxo_pll8_cxo,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch gp1_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x2d44,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp1_clk",
+ .parent_names = (const char *[]){ "gp1_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gp2_src = {
+ .ns_reg = 0x2d64,
+ .md_reg = 0x2d60,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_cxo_map,
+ },
+ .freq_tbl = clk_tbl_gp,
+ .clkr = {
+ .enable_reg = 0x2d64,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp2_src",
+ .parent_names = gcc_pxo_pll8_cxo,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch gp2_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_bit = 5,
+ .clkr = {
+ .enable_reg = 0x2d64,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp2_clk",
+ .parent_names = (const char *[]){ "gp2_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch pmem_clk = {
+ .hwcg_reg = 0x25a0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 20,
+ .clkr = {
+ .enable_reg = 0x25a0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmem_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_rcg prng_src = {
+ .ns_reg = 0x2e80,
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 4,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .clkr.hw = {
+ .init = &(struct clk_init_data){
+ .name = "prng_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch prng_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "prng_clk",
+ .parent_names = (const char *[]){ "prng_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_sdc[] = {
+ { 144000, P_PXO, 3, 2, 125 },
+ { 400000, P_PLL8, 4, 1, 240 },
+ { 16000000, P_PLL8, 4, 1, 6 },
+ { 17070000, P_PLL8, 1, 2, 45 },
+ { 20210000, P_PLL8, 1, 1, 19 },
+ { 24000000, P_PLL8, 4, 1, 4 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { }
+};
+
+static struct clk_rcg sdc1_src = {
+ .ns_reg = 0x282c,
+ .md_reg = 0x2828,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x282c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc1_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc1_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x282c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc1_clk",
+ .parent_names = (const char *[]){ "sdc1_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg sdc2_src = {
+ .ns_reg = 0x284c,
+ .md_reg = 0x2848,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x284c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc2_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc2_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 5,
+ .clkr = {
+ .enable_reg = 0x284c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc2_clk",
+ .parent_names = (const char *[]){ "sdc2_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg sdc3_src = {
+ .ns_reg = 0x286c,
+ .md_reg = 0x2868,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x286c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc3_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc3_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x286c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc3_clk",
+ .parent_names = (const char *[]){ "sdc3_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg sdc4_src = {
+ .ns_reg = 0x288c,
+ .md_reg = 0x2888,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x288c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc4_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc4_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 3,
+ .clkr = {
+ .enable_reg = 0x288c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc4_clk",
+ .parent_names = (const char *[]){ "sdc4_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg sdc5_src = {
+ .ns_reg = 0x28ac,
+ .md_reg = 0x28a8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x28ac,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc5_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc5_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x28ac,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc5_clk",
+ .parent_names = (const char *[]){ "sdc5_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_tsif_ref[] = {
+ { 105000, P_PXO, 1, 1, 256 },
+ { }
+};
+
+static struct clk_rcg tsif_ref_src = {
+ .ns_reg = 0x2710,
+ .md_reg = 0x270c,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_tsif_ref,
+ .clkr = {
+ .enable_reg = 0x2710,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "tsif_ref_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch tsif_ref_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 5,
+ .clkr = {
+ .enable_reg = 0x2710,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "tsif_ref_clk",
+ .parent_names = (const char *[]){ "tsif_ref_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_usb[] = {
+ { 60000000, P_PLL8, 1, 5, 32 },
+ { }
+};
+
+static struct clk_rcg usb_hs1_xcvr_src = {
+ .ns_reg = 0x290c,
+ .md_reg = 0x2908,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_usb,
+ .clkr = {
+ .enable_reg = 0x290c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hs1_xcvr_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch usb_hs1_xcvr_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 0,
+ .clkr = {
+ .enable_reg = 0x290c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hs1_xcvr_clk",
+ .parent_names = (const char *[]){ "usb_hs1_xcvr_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg usb_fs1_xcvr_fs_src = {
+ .ns_reg = 0x2968,
+ .md_reg = 0x2964,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_usb,
+ .clkr = {
+ .enable_reg = 0x2968,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs1_xcvr_fs_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static const char *usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" };
+
+static struct clk_branch usb_fs1_xcvr_fs_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 15,
+ .clkr = {
+ .enable_reg = 0x2968,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs1_xcvr_fs_clk",
+ .parent_names = usb_fs1_xcvr_fs_src_p,
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch usb_fs1_system_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 16,
+ .clkr = {
+ .enable_reg = 0x296c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = usb_fs1_xcvr_fs_src_p,
+ .num_parents = 1,
+ .name = "usb_fs1_system_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg usb_fs2_xcvr_fs_src = {
+ .ns_reg = 0x2988,
+ .md_reg = 0x2984,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_usb,
+ .clkr = {
+ .enable_reg = 0x2988,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs2_xcvr_fs_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static const char *usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" };
+
+static struct clk_branch usb_fs2_xcvr_fs_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x2988,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs2_xcvr_fs_clk",
+ .parent_names = usb_fs2_xcvr_fs_src_p,
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch usb_fs2_system_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x298c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs2_system_clk",
+ .parent_names = usb_fs2_xcvr_fs_src_p,
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gsbi1_h_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x29c0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi2_h_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x29e0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi3_h_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 3,
+ .clkr = {
+ .enable_reg = 0x2a00,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi4_h_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 27,
+ .clkr = {
+ .enable_reg = 0x2a20,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi5_h_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 23,
+ .clkr = {
+ .enable_reg = 0x2a40,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi6_h_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 19,
+ .clkr = {
+ .enable_reg = 0x2a60,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi7_h_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 15,
+ .clkr = {
+ .enable_reg = 0x2a80,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi8_h_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x2aa0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi9_h_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x2ac0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi10_h_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 3,
+ .clkr = {
+ .enable_reg = 0x2ae0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi11_h_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 18,
+ .clkr = {
+ .enable_reg = 0x2b00,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi12_h_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x2b20,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch tsif_h_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x2700,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "tsif_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch usb_fs1_h_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 17,
+ .clkr = {
+ .enable_reg = 0x2960,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch usb_fs2_h_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x2980,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs2_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch usb_hs1_h_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2900,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hs1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc1_h_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x2820,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc2_h_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x2840,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc2_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc3_h_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 9,
+ .clkr = {
+ .enable_reg = 0x2860,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc3_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc4_h_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 8,
+ .clkr = {
+ .enable_reg = 0x2880,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc4_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc5_h_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x28a0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc5_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch adm0_clk = {
+ .halt_reg = 0x2fdc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "adm0_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch adm0_pbus_clk = {
+ .halt_reg = 0x2fdc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "adm0_pbus_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch adm1_clk = {
+ .halt_reg = 0x2fdc,
+ .halt_bit = 12,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "adm1_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch adm1_pbus_clk = {
+ .halt_reg = 0x2fdc,
+ .halt_bit = 11,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "adm1_pbus_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch modem_ahb1_h_clk = {
+ .halt_reg = 0x2fdc,
+ .halt_bit = 8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "modem_ahb1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch modem_ahb2_h_clk = {
+ .halt_reg = 0x2fdc,
+ .halt_bit = 7,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "modem_ahb2_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch pmic_arb0_h_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 22,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmic_arb0_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch pmic_arb1_h_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 21,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmic_arb1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch pmic_ssbi2_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 23,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmic_ssbi2_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch rpm_msg_ram_h_clk = {
+ .hwcg_reg = 0x27e0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "rpm_msg_ram_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_msm8660_clks[] = {
+ [PLL8] = &pll8.clkr,
+ [PLL8_VOTE] = &pll8_vote,
+ [GSBI1_UART_SRC] = &gsbi1_uart_src.clkr,
+ [GSBI1_UART_CLK] = &gsbi1_uart_clk.clkr,
+ [GSBI2_UART_SRC] = &gsbi2_uart_src.clkr,
+ [GSBI2_UART_CLK] = &gsbi2_uart_clk.clkr,
+ [GSBI3_UART_SRC] = &gsbi3_uart_src.clkr,
+ [GSBI3_UART_CLK] = &gsbi3_uart_clk.clkr,
+ [GSBI4_UART_SRC] = &gsbi4_uart_src.clkr,
+ [GSBI4_UART_CLK] = &gsbi4_uart_clk.clkr,
+ [GSBI5_UART_SRC] = &gsbi5_uart_src.clkr,
+ [GSBI5_UART_CLK] = &gsbi5_uart_clk.clkr,
+ [GSBI6_UART_SRC] = &gsbi6_uart_src.clkr,
+ [GSBI6_UART_CLK] = &gsbi6_uart_clk.clkr,
+ [GSBI7_UART_SRC] = &gsbi7_uart_src.clkr,
+ [GSBI7_UART_CLK] = &gsbi7_uart_clk.clkr,
+ [GSBI8_UART_SRC] = &gsbi8_uart_src.clkr,
+ [GSBI8_UART_CLK] = &gsbi8_uart_clk.clkr,
+ [GSBI9_UART_SRC] = &gsbi9_uart_src.clkr,
+ [GSBI9_UART_CLK] = &gsbi9_uart_clk.clkr,
+ [GSBI10_UART_SRC] = &gsbi10_uart_src.clkr,
+ [GSBI10_UART_CLK] = &gsbi10_uart_clk.clkr,
+ [GSBI11_UART_SRC] = &gsbi11_uart_src.clkr,
+ [GSBI11_UART_CLK] = &gsbi11_uart_clk.clkr,
+ [GSBI12_UART_SRC] = &gsbi12_uart_src.clkr,
+ [GSBI12_UART_CLK] = &gsbi12_uart_clk.clkr,
+ [GSBI1_QUP_SRC] = &gsbi1_qup_src.clkr,
+ [GSBI1_QUP_CLK] = &gsbi1_qup_clk.clkr,
+ [GSBI2_QUP_SRC] = &gsbi2_qup_src.clkr,
+ [GSBI2_QUP_CLK] = &gsbi2_qup_clk.clkr,
+ [GSBI3_QUP_SRC] = &gsbi3_qup_src.clkr,
+ [GSBI3_QUP_CLK] = &gsbi3_qup_clk.clkr,
+ [GSBI4_QUP_SRC] = &gsbi4_qup_src.clkr,
+ [GSBI4_QUP_CLK] = &gsbi4_qup_clk.clkr,
+ [GSBI5_QUP_SRC] = &gsbi5_qup_src.clkr,
+ [GSBI5_QUP_CLK] = &gsbi5_qup_clk.clkr,
+ [GSBI6_QUP_SRC] = &gsbi6_qup_src.clkr,
+ [GSBI6_QUP_CLK] = &gsbi6_qup_clk.clkr,
+ [GSBI7_QUP_SRC] = &gsbi7_qup_src.clkr,
+ [GSBI7_QUP_CLK] = &gsbi7_qup_clk.clkr,
+ [GSBI8_QUP_SRC] = &gsbi8_qup_src.clkr,
+ [GSBI8_QUP_CLK] = &gsbi8_qup_clk.clkr,
+ [GSBI9_QUP_SRC] = &gsbi9_qup_src.clkr,
+ [GSBI9_QUP_CLK] = &gsbi9_qup_clk.clkr,
+ [GSBI10_QUP_SRC] = &gsbi10_qup_src.clkr,
+ [GSBI10_QUP_CLK] = &gsbi10_qup_clk.clkr,
+ [GSBI11_QUP_SRC] = &gsbi11_qup_src.clkr,
+ [GSBI11_QUP_CLK] = &gsbi11_qup_clk.clkr,
+ [GSBI12_QUP_SRC] = &gsbi12_qup_src.clkr,
+ [GSBI12_QUP_CLK] = &gsbi12_qup_clk.clkr,
+ [GP0_SRC] = &gp0_src.clkr,
+ [GP0_CLK] = &gp0_clk.clkr,
+ [GP1_SRC] = &gp1_src.clkr,
+ [GP1_CLK] = &gp1_clk.clkr,
+ [GP2_SRC] = &gp2_src.clkr,
+ [GP2_CLK] = &gp2_clk.clkr,
+ [PMEM_CLK] = &pmem_clk.clkr,
+ [PRNG_SRC] = &prng_src.clkr,
+ [PRNG_CLK] = &prng_clk.clkr,
+ [SDC1_SRC] = &sdc1_src.clkr,
+ [SDC1_CLK] = &sdc1_clk.clkr,
+ [SDC2_SRC] = &sdc2_src.clkr,
+ [SDC2_CLK] = &sdc2_clk.clkr,
+ [SDC3_SRC] = &sdc3_src.clkr,
+ [SDC3_CLK] = &sdc3_clk.clkr,
+ [SDC4_SRC] = &sdc4_src.clkr,
+ [SDC4_CLK] = &sdc4_clk.clkr,
+ [SDC5_SRC] = &sdc5_src.clkr,
+ [SDC5_CLK] = &sdc5_clk.clkr,
+ [TSIF_REF_SRC] = &tsif_ref_src.clkr,
+ [TSIF_REF_CLK] = &tsif_ref_clk.clkr,
+ [USB_HS1_XCVR_SRC] = &usb_hs1_xcvr_src.clkr,
+ [USB_HS1_XCVR_CLK] = &usb_hs1_xcvr_clk.clkr,
+ [USB_FS1_XCVR_FS_SRC] = &usb_fs1_xcvr_fs_src.clkr,
+ [USB_FS1_XCVR_FS_CLK] = &usb_fs1_xcvr_fs_clk.clkr,
+ [USB_FS1_SYSTEM_CLK] = &usb_fs1_system_clk.clkr,
+ [USB_FS2_XCVR_FS_SRC] = &usb_fs2_xcvr_fs_src.clkr,
+ [USB_FS2_XCVR_FS_CLK] = &usb_fs2_xcvr_fs_clk.clkr,
+ [USB_FS2_SYSTEM_CLK] = &usb_fs2_system_clk.clkr,
+ [GSBI1_H_CLK] = &gsbi1_h_clk.clkr,
+ [GSBI2_H_CLK] = &gsbi2_h_clk.clkr,
+ [GSBI3_H_CLK] = &gsbi3_h_clk.clkr,
+ [GSBI4_H_CLK] = &gsbi4_h_clk.clkr,
+ [GSBI5_H_CLK] = &gsbi5_h_clk.clkr,
+ [GSBI6_H_CLK] = &gsbi6_h_clk.clkr,
+ [GSBI7_H_CLK] = &gsbi7_h_clk.clkr,
+ [GSBI8_H_CLK] = &gsbi8_h_clk.clkr,
+ [GSBI9_H_CLK] = &gsbi9_h_clk.clkr,
+ [GSBI10_H_CLK] = &gsbi10_h_clk.clkr,
+ [GSBI11_H_CLK] = &gsbi11_h_clk.clkr,
+ [GSBI12_H_CLK] = &gsbi12_h_clk.clkr,
+ [TSIF_H_CLK] = &tsif_h_clk.clkr,
+ [USB_FS1_H_CLK] = &usb_fs1_h_clk.clkr,
+ [USB_FS2_H_CLK] = &usb_fs2_h_clk.clkr,
+ [USB_HS1_H_CLK] = &usb_hs1_h_clk.clkr,
+ [SDC1_H_CLK] = &sdc1_h_clk.clkr,
+ [SDC2_H_CLK] = &sdc2_h_clk.clkr,
+ [SDC3_H_CLK] = &sdc3_h_clk.clkr,
+ [SDC4_H_CLK] = &sdc4_h_clk.clkr,
+ [SDC5_H_CLK] = &sdc5_h_clk.clkr,
+ [ADM0_CLK] = &adm0_clk.clkr,
+ [ADM0_PBUS_CLK] = &adm0_pbus_clk.clkr,
+ [ADM1_CLK] = &adm1_clk.clkr,
+ [ADM1_PBUS_CLK] = &adm1_pbus_clk.clkr,
+ [MODEM_AHB1_H_CLK] = &modem_ahb1_h_clk.clkr,
+ [MODEM_AHB2_H_CLK] = &modem_ahb2_h_clk.clkr,
+ [PMIC_ARB0_H_CLK] = &pmic_arb0_h_clk.clkr,
+ [PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr,
+ [PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr,
+ [RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_msm8660_resets[] = {
+ [AFAB_CORE_RESET] = { 0x2080, 7 },
+ [SCSS_SYS_RESET] = { 0x20b4, 1 },
+ [SCSS_SYS_POR_RESET] = { 0x20b4 },
+ [AFAB_SMPSS_S_RESET] = { 0x20b8, 2 },
+ [AFAB_SMPSS_M1_RESET] = { 0x20b8, 1 },
+ [AFAB_SMPSS_M0_RESET] = { 0x20b8 },
+ [AFAB_EBI1_S_RESET] = { 0x20c0, 7 },
+ [SFAB_CORE_RESET] = { 0x2120, 7 },
+ [SFAB_ADM0_M0_RESET] = { 0x21e0, 7 },
+ [SFAB_ADM0_M1_RESET] = { 0x21e4, 7 },
+ [SFAB_ADM0_M2_RESET] = { 0x21e4, 7 },
+ [ADM0_C2_RESET] = { 0x220c, 4 },
+ [ADM0_C1_RESET] = { 0x220c, 3 },
+ [ADM0_C0_RESET] = { 0x220c, 2 },
+ [ADM0_PBUS_RESET] = { 0x220c, 1 },
+ [ADM0_RESET] = { 0x220c },
+ [SFAB_ADM1_M0_RESET] = { 0x2220, 7 },
+ [SFAB_ADM1_M1_RESET] = { 0x2224, 7 },
+ [SFAB_ADM1_M2_RESET] = { 0x2228, 7 },
+ [MMFAB_ADM1_M3_RESET] = { 0x2240, 7 },
+ [ADM1_C3_RESET] = { 0x226c, 5 },
+ [ADM1_C2_RESET] = { 0x226c, 4 },
+ [ADM1_C1_RESET] = { 0x226c, 3 },
+ [ADM1_C0_RESET] = { 0x226c, 2 },
+ [ADM1_PBUS_RESET] = { 0x226c, 1 },
+ [ADM1_RESET] = { 0x226c },
+ [IMEM0_RESET] = { 0x2280, 7 },
+ [SFAB_LPASS_Q6_RESET] = { 0x23a0, 7 },
+ [SFAB_AFAB_M_RESET] = { 0x23e0, 7 },
+ [AFAB_SFAB_M0_RESET] = { 0x2420, 7 },
+ [AFAB_SFAB_M1_RESET] = { 0x2424, 7 },
+ [DFAB_CORE_RESET] = { 0x24ac, 7 },
+ [SFAB_DFAB_M_RESET] = { 0x2500, 7 },
+ [DFAB_SFAB_M_RESET] = { 0x2520, 7 },
+ [DFAB_SWAY0_RESET] = { 0x2540, 7 },
+ [DFAB_SWAY1_RESET] = { 0x2544, 7 },
+ [DFAB_ARB0_RESET] = { 0x2560, 7 },
+ [DFAB_ARB1_RESET] = { 0x2564, 7 },
+ [PPSS_PROC_RESET] = { 0x2594, 1 },
+ [PPSS_RESET] = { 0x2594 },
+ [PMEM_RESET] = { 0x25a0, 7 },
+ [DMA_BAM_RESET] = { 0x25c0, 7 },
+ [SIC_RESET] = { 0x25e0, 7 },
+ [SPS_TIC_RESET] = { 0x2600, 7 },
+ [CFBP0_RESET] = { 0x2650, 7 },
+ [CFBP1_RESET] = { 0x2654, 7 },
+ [CFBP2_RESET] = { 0x2658, 7 },
+ [EBI2_RESET] = { 0x2664, 7 },
+ [SFAB_CFPB_M_RESET] = { 0x2680, 7 },
+ [CFPB_MASTER_RESET] = { 0x26a0, 7 },
+ [SFAB_CFPB_S_RESET] = { 0x26c0, 7 },
+ [CFPB_SPLITTER_RESET] = { 0x26e0, 7 },
+ [TSIF_RESET] = { 0x2700, 7 },
+ [CE1_RESET] = { 0x2720, 7 },
+ [CE2_RESET] = { 0x2740, 7 },
+ [SFAB_SFPB_M_RESET] = { 0x2780, 7 },
+ [SFAB_SFPB_S_RESET] = { 0x27a0, 7 },
+ [RPM_PROC_RESET] = { 0x27c0, 7 },
+ [RPM_BUS_RESET] = { 0x27c4, 7 },
+ [RPM_MSG_RAM_RESET] = { 0x27e0, 7 },
+ [PMIC_ARB0_RESET] = { 0x2800, 7 },
+ [PMIC_ARB1_RESET] = { 0x2804, 7 },
+ [PMIC_SSBI2_RESET] = { 0x280c, 12 },
+ [SDC1_RESET] = { 0x2830 },
+ [SDC2_RESET] = { 0x2850 },
+ [SDC3_RESET] = { 0x2870 },
+ [SDC4_RESET] = { 0x2890 },
+ [SDC5_RESET] = { 0x28b0 },
+ [USB_HS1_RESET] = { 0x2910 },
+ [USB_HS2_XCVR_RESET] = { 0x2934, 1 },
+ [USB_HS2_RESET] = { 0x2934 },
+ [USB_FS1_XCVR_RESET] = { 0x2974, 1 },
+ [USB_FS1_RESET] = { 0x2974 },
+ [USB_FS2_XCVR_RESET] = { 0x2994, 1 },
+ [USB_FS2_RESET] = { 0x2994 },
+ [GSBI1_RESET] = { 0x29dc },
+ [GSBI2_RESET] = { 0x29fc },
+ [GSBI3_RESET] = { 0x2a1c },
+ [GSBI4_RESET] = { 0x2a3c },
+ [GSBI5_RESET] = { 0x2a5c },
+ [GSBI6_RESET] = { 0x2a7c },
+ [GSBI7_RESET] = { 0x2a9c },
+ [GSBI8_RESET] = { 0x2abc },
+ [GSBI9_RESET] = { 0x2adc },
+ [GSBI10_RESET] = { 0x2afc },
+ [GSBI11_RESET] = { 0x2b1c },
+ [GSBI12_RESET] = { 0x2b3c },
+ [SPDM_RESET] = { 0x2b6c },
+ [SEC_CTRL_RESET] = { 0x2b80, 7 },
+ [TLMM_H_RESET] = { 0x2ba0, 7 },
+ [TLMM_RESET] = { 0x2ba4, 7 },
+ [MARRM_PWRON_RESET] = { 0x2bd4, 1 },
+ [MARM_RESET] = { 0x2bd4 },
+ [MAHB1_RESET] = { 0x2be4, 7 },
+ [SFAB_MSS_S_RESET] = { 0x2c00, 7 },
+ [MAHB2_RESET] = { 0x2c20, 7 },
+ [MODEM_SW_AHB_RESET] = { 0x2c48, 1 },
+ [MODEM_RESET] = { 0x2c48 },
+ [SFAB_MSS_MDM1_RESET] = { 0x2c4c, 1 },
+ [SFAB_MSS_MDM0_RESET] = { 0x2c4c },
+ [MSS_SLP_RESET] = { 0x2c60, 7 },
+ [MSS_MARM_SAW_RESET] = { 0x2c68, 1 },
+ [MSS_WDOG_RESET] = { 0x2c68 },
+ [TSSC_RESET] = { 0x2ca0, 7 },
+ [PDM_RESET] = { 0x2cc0, 12 },
+ [SCSS_CORE0_RESET] = { 0x2d60, 1 },
+ [SCSS_CORE0_POR_RESET] = { 0x2d60 },
+ [SCSS_CORE1_RESET] = { 0x2d80, 1 },
+ [SCSS_CORE1_POR_RESET] = { 0x2d80 },
+ [MPM_RESET] = { 0x2da4, 1 },
+ [EBI1_1X_DIV_RESET] = { 0x2dec, 9 },
+ [EBI1_RESET] = { 0x2dec, 7 },
+ [SFAB_SMPSS_S_RESET] = { 0x2e00, 7 },
+ [USB_PHY0_RESET] = { 0x2e20 },
+ [USB_PHY1_RESET] = { 0x2e40 },
+ [PRNG_RESET] = { 0x2e80, 12 },
+};
+
+static const struct regmap_config gcc_msm8660_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x363c,
+ .fast_io = true,
+};
+
+static const struct of_device_id gcc_msm8660_match_table[] = {
+ { .compatible = "qcom,gcc-msm8660" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_msm8660_match_table);
+
+struct qcom_cc {
+ struct qcom_reset_controller reset;
+ struct clk_onecell_data data;
+ struct clk *clks[];
+};
+
+static int gcc_msm8660_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct resource *res;
+ int i, ret;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ struct clk_onecell_data *data;
+ struct clk **clks;
+ struct regmap *regmap;
+ size_t num_clks;
+ struct qcom_reset_controller *reset;
+ struct qcom_cc *cc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &gcc_msm8660_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ num_clks = ARRAY_SIZE(gcc_msm8660_clks);
+ cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*clks) * num_clks,
+ GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+
+ clks = cc->clks;
+ data = &cc->data;
+ data->clks = clks;
+ data->clk_num = num_clks;
+
+ /* Temporary until RPM clocks supported */
+ clk = clk_register_fixed_rate(dev, "cxo", NULL, CLK_IS_ROOT, 19200000);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ clk = clk_register_fixed_rate(dev, "pxo", NULL, CLK_IS_ROOT, 27000000);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ for (i = 0; i < num_clks; i++) {
+ if (!gcc_msm8660_clks[i])
+ continue;
+ clk = devm_clk_register_regmap(dev, gcc_msm8660_clks[i]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
+ if (ret)
+ return ret;
+
+ reset = &cc->reset;
+ reset->rcdev.of_node = dev->of_node;
+ reset->rcdev.ops = &qcom_reset_ops,
+ reset->rcdev.owner = THIS_MODULE,
+ reset->rcdev.nr_resets = ARRAY_SIZE(gcc_msm8660_resets),
+ reset->regmap = regmap;
+ reset->reset_map = gcc_msm8660_resets,
+ platform_set_drvdata(pdev, &reset->rcdev);
+
+ ret = reset_controller_register(&reset->rcdev);
+ if (ret)
+ of_clk_del_provider(dev->of_node);
+
+ return ret;
+}
+
+static int gcc_msm8660_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ reset_controller_unregister(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static struct platform_driver gcc_msm8660_driver = {
+ .probe = gcc_msm8660_probe,
+ .remove = gcc_msm8660_remove,
+ .driver = {
+ .name = "gcc-msm8660",
+ .owner = THIS_MODULE,
+ .of_match_table = gcc_msm8660_match_table,
+ },
+};
+
+static int __init gcc_msm8660_init(void)
+{
+ return platform_driver_register(&gcc_msm8660_driver);
+}
+core_initcall(gcc_msm8660_init);
+
+static void __exit gcc_msm8660_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8660_driver);
+}
+module_exit(gcc_msm8660_exit);
+
+MODULE_DESCRIPTION("GCC MSM 8660 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-msm8660");
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
new file mode 100644
index 000000000000..fd446ab2fd98
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -0,0 +1,2993 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8960.h>
+#include <dt-bindings/reset/qcom,gcc-msm8960.h>
+
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+static struct clk_pll pll3 = {
+ .l_reg = 0x3164,
+ .m_reg = 0x3168,
+ .n_reg = 0x316c,
+ .config_reg = 0x3174,
+ .mode_reg = 0x3160,
+ .status_reg = 0x3178,
+ .status_bit = 16,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pll3",
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_pll pll8 = {
+ .l_reg = 0x3144,
+ .m_reg = 0x3148,
+ .n_reg = 0x314c,
+ .config_reg = 0x3154,
+ .mode_reg = 0x3140,
+ .status_reg = 0x3158,
+ .status_bit = 16,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pll8",
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap pll8_vote = {
+ .enable_reg = 0x34c0,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "pll8_vote",
+ .parent_names = (const char *[]){ "pll8" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll pll14 = {
+ .l_reg = 0x31c4,
+ .m_reg = 0x31c8,
+ .n_reg = 0x31cc,
+ .config_reg = 0x31d4,
+ .mode_reg = 0x31c0,
+ .status_reg = 0x31d8,
+ .status_bit = 16,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pll14",
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap pll14_vote = {
+ .enable_reg = 0x34c0,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "pll14_vote",
+ .parent_names = (const char *[]){ "pll14" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+#define P_PXO 0
+#define P_PLL8 1
+#define P_CXO 2
+
+static const u8 gcc_pxo_pll8_map[] = {
+ [P_PXO] = 0,
+ [P_PLL8] = 3,
+};
+
+static const char *gcc_pxo_pll8[] = {
+ "pxo",
+ "pll8_vote",
+};
+
+static const u8 gcc_pxo_pll8_cxo_map[] = {
+ [P_PXO] = 0,
+ [P_PLL8] = 3,
+ [P_CXO] = 5,
+};
+
+static const char *gcc_pxo_pll8_cxo[] = {
+ "pxo",
+ "pll8_vote",
+ "cxo",
+};
+
+static struct freq_tbl clk_tbl_gsbi_uart[] = {
+ { 1843200, P_PLL8, 2, 6, 625 },
+ { 3686400, P_PLL8, 2, 12, 625 },
+ { 7372800, P_PLL8, 2, 24, 625 },
+ { 14745600, P_PLL8, 2, 48, 625 },
+ { 16000000, P_PLL8, 4, 1, 6 },
+ { 24000000, P_PLL8, 4, 1, 4 },
+ { 32000000, P_PLL8, 4, 1, 3 },
+ { 40000000, P_PLL8, 1, 5, 48 },
+ { 46400000, P_PLL8, 1, 29, 240 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { 51200000, P_PLL8, 1, 2, 15 },
+ { 56000000, P_PLL8, 1, 7, 48 },
+ { 58982400, P_PLL8, 1, 96, 625 },
+ { 64000000, P_PLL8, 2, 1, 3 },
+ { }
+};
+
+static struct clk_rcg gsbi1_uart_src = {
+ .ns_reg = 0x29d4,
+ .md_reg = 0x29d0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x29d4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi1_uart_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x29d4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi1_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi2_uart_src = {
+ .ns_reg = 0x29f4,
+ .md_reg = 0x29f0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x29f4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi2_uart_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x29f4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi2_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi3_uart_src = {
+ .ns_reg = 0x2a14,
+ .md_reg = 0x2a10,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a14,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi3_uart_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x2a14,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi3_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi4_uart_src = {
+ .ns_reg = 0x2a34,
+ .md_reg = 0x2a30,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a34,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi4_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 26,
+ .clkr = {
+ .enable_reg = 0x2a34,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi4_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi5_uart_src = {
+ .ns_reg = 0x2a54,
+ .md_reg = 0x2a50,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a54,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi5_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 22,
+ .clkr = {
+ .enable_reg = 0x2a54,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi5_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi6_uart_src = {
+ .ns_reg = 0x2a74,
+ .md_reg = 0x2a70,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a74,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi6_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 18,
+ .clkr = {
+ .enable_reg = 0x2a74,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi6_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi7_uart_src = {
+ .ns_reg = 0x2a94,
+ .md_reg = 0x2a90,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a94,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi7_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x2a94,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi7_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi8_uart_src = {
+ .ns_reg = 0x2ab4,
+ .md_reg = 0x2ab0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2ab4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi8_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x2ab4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_uart_clk",
+ .parent_names = (const char *[]){ "gsbi8_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi9_uart_src = {
+ .ns_reg = 0x2ad4,
+ .md_reg = 0x2ad0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2ad4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi9_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x2ad4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_uart_clk",
+ .parent_names = (const char *[]){ "gsbi9_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi10_uart_src = {
+ .ns_reg = 0x2af4,
+ .md_reg = 0x2af0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2af4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi10_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x2af4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_uart_clk",
+ .parent_names = (const char *[]){ "gsbi10_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi11_uart_src = {
+ .ns_reg = 0x2b14,
+ .md_reg = 0x2b10,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2b14,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi11_uart_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 17,
+ .clkr = {
+ .enable_reg = 0x2b14,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_uart_clk",
+ .parent_names = (const char *[]){ "gsbi11_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi12_uart_src = {
+ .ns_reg = 0x2b34,
+ .md_reg = 0x2b30,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2b34,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi12_uart_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x2b34,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_uart_clk",
+ .parent_names = (const char *[]){ "gsbi12_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_gsbi_qup[] = {
+ { 1100000, P_PXO, 1, 2, 49 },
+ { 5400000, P_PXO, 1, 1, 5 },
+ { 10800000, P_PXO, 1, 2, 5 },
+ { 15060000, P_PLL8, 1, 2, 51 },
+ { 24000000, P_PLL8, 4, 1, 4 },
+ { 25600000, P_PLL8, 1, 1, 15 },
+ { 27000000, P_PXO, 1, 0, 0 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { 51200000, P_PLL8, 1, 2, 15 },
+ { }
+};
+
+static struct clk_rcg gsbi1_qup_src = {
+ .ns_reg = 0x29cc,
+ .md_reg = 0x29c8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x29cc,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi1_qup_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 9,
+ .clkr = {
+ .enable_reg = 0x29cc,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_qup_clk",
+ .parent_names = (const char *[]){ "gsbi1_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi2_qup_src = {
+ .ns_reg = 0x29ec,
+ .md_reg = 0x29e8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x29ec,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi2_qup_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x29ec,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_qup_clk",
+ .parent_names = (const char *[]){ "gsbi2_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi3_qup_src = {
+ .ns_reg = 0x2a0c,
+ .md_reg = 0x2a08,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a0c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi3_qup_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 0,
+ .clkr = {
+ .enable_reg = 0x2a0c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_qup_clk",
+ .parent_names = (const char *[]){ "gsbi3_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi4_qup_src = {
+ .ns_reg = 0x2a2c,
+ .md_reg = 0x2a28,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a2c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi4_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 24,
+ .clkr = {
+ .enable_reg = 0x2a2c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_qup_clk",
+ .parent_names = (const char *[]){ "gsbi4_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi5_qup_src = {
+ .ns_reg = 0x2a4c,
+ .md_reg = 0x2a48,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a4c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi5_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 20,
+ .clkr = {
+ .enable_reg = 0x2a4c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_qup_clk",
+ .parent_names = (const char *[]){ "gsbi5_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi6_qup_src = {
+ .ns_reg = 0x2a6c,
+ .md_reg = 0x2a68,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a6c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi6_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 16,
+ .clkr = {
+ .enable_reg = 0x2a6c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_qup_clk",
+ .parent_names = (const char *[]){ "gsbi6_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi7_qup_src = {
+ .ns_reg = 0x2a8c,
+ .md_reg = 0x2a88,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a8c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi7_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x2a8c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_qup_clk",
+ .parent_names = (const char *[]){ "gsbi7_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi8_qup_src = {
+ .ns_reg = 0x2aac,
+ .md_reg = 0x2aa8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2aac,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi8_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 8,
+ .clkr = {
+ .enable_reg = 0x2aac,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_qup_clk",
+ .parent_names = (const char *[]){ "gsbi8_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi9_qup_src = {
+ .ns_reg = 0x2acc,
+ .md_reg = 0x2ac8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2acc,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi9_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x2acc,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_qup_clk",
+ .parent_names = (const char *[]){ "gsbi9_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi10_qup_src = {
+ .ns_reg = 0x2aec,
+ .md_reg = 0x2ae8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2aec,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi10_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 0,
+ .clkr = {
+ .enable_reg = 0x2aec,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_qup_clk",
+ .parent_names = (const char *[]){ "gsbi10_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi11_qup_src = {
+ .ns_reg = 0x2b0c,
+ .md_reg = 0x2b08,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2b0c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi11_qup_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 15,
+ .clkr = {
+ .enable_reg = 0x2b0c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_qup_clk",
+ .parent_names = (const char *[]){ "gsbi11_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi12_qup_src = {
+ .ns_reg = 0x2b2c,
+ .md_reg = 0x2b28,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2b2c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi12_qup_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x2b2c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_qup_clk",
+ .parent_names = (const char *[]){ "gsbi12_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_gp[] = {
+ { 9600000, P_CXO, 2, 0, 0 },
+ { 13500000, P_PXO, 2, 0, 0 },
+ { 19200000, P_CXO, 1, 0, 0 },
+ { 27000000, P_PXO, 1, 0, 0 },
+ { 64000000, P_PLL8, 2, 1, 3 },
+ { 76800000, P_PLL8, 1, 1, 5 },
+ { 96000000, P_PLL8, 4, 0, 0 },
+ { 128000000, P_PLL8, 3, 0, 0 },
+ { 192000000, P_PLL8, 2, 0, 0 },
+ { }
+};
+
+static struct clk_rcg gp0_src = {
+ .ns_reg = 0x2d24,
+ .md_reg = 0x2d00,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_cxo_map,
+ },
+ .freq_tbl = clk_tbl_gp,
+ .clkr = {
+ .enable_reg = 0x2d24,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_src",
+ .parent_names = gcc_pxo_pll8_cxo,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ }
+};
+
+static struct clk_branch gp0_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x2d24,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_clk",
+ .parent_names = (const char *[]){ "gp0_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gp1_src = {
+ .ns_reg = 0x2d44,
+ .md_reg = 0x2d40,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_cxo_map,
+ },
+ .freq_tbl = clk_tbl_gp,
+ .clkr = {
+ .enable_reg = 0x2d44,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp1_src",
+ .parent_names = gcc_pxo_pll8_cxo,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch gp1_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x2d44,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp1_clk",
+ .parent_names = (const char *[]){ "gp1_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gp2_src = {
+ .ns_reg = 0x2d64,
+ .md_reg = 0x2d60,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_cxo_map,
+ },
+ .freq_tbl = clk_tbl_gp,
+ .clkr = {
+ .enable_reg = 0x2d64,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp2_src",
+ .parent_names = gcc_pxo_pll8_cxo,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch gp2_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_bit = 5,
+ .clkr = {
+ .enable_reg = 0x2d64,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp2_clk",
+ .parent_names = (const char *[]){ "gp2_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch pmem_clk = {
+ .hwcg_reg = 0x25a0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 20,
+ .clkr = {
+ .enable_reg = 0x25a0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmem_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_rcg prng_src = {
+ .ns_reg = 0x2e80,
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 4,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "prng_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch prng_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "prng_clk",
+ .parent_names = (const char *[]){ "prng_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_sdc[] = {
+ { 144000, P_PXO, 3, 2, 125 },
+ { 400000, P_PLL8, 4, 1, 240 },
+ { 16000000, P_PLL8, 4, 1, 6 },
+ { 17070000, P_PLL8, 1, 2, 45 },
+ { 20210000, P_PLL8, 1, 1, 19 },
+ { 24000000, P_PLL8, 4, 1, 4 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { 64000000, P_PLL8, 3, 1, 2 },
+ { 96000000, P_PLL8, 4, 0, 0 },
+ { 192000000, P_PLL8, 2, 0, 0 },
+ { }
+};
+
+static struct clk_rcg sdc1_src = {
+ .ns_reg = 0x282c,
+ .md_reg = 0x2828,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x282c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc1_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc1_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x282c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc1_clk",
+ .parent_names = (const char *[]){ "sdc1_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg sdc2_src = {
+ .ns_reg = 0x284c,
+ .md_reg = 0x2848,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x284c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc2_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc2_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 5,
+ .clkr = {
+ .enable_reg = 0x284c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc2_clk",
+ .parent_names = (const char *[]){ "sdc2_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg sdc3_src = {
+ .ns_reg = 0x286c,
+ .md_reg = 0x2868,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x286c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc3_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc3_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x286c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc3_clk",
+ .parent_names = (const char *[]){ "sdc3_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg sdc4_src = {
+ .ns_reg = 0x288c,
+ .md_reg = 0x2888,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x288c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc4_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc4_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 3,
+ .clkr = {
+ .enable_reg = 0x288c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc4_clk",
+ .parent_names = (const char *[]){ "sdc4_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg sdc5_src = {
+ .ns_reg = 0x28ac,
+ .md_reg = 0x28a8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x28ac,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc5_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc5_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x28ac,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc5_clk",
+ .parent_names = (const char *[]){ "sdc5_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_tsif_ref[] = {
+ { 105000, P_PXO, 1, 1, 256 },
+ { }
+};
+
+static struct clk_rcg tsif_ref_src = {
+ .ns_reg = 0x2710,
+ .md_reg = 0x270c,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_tsif_ref,
+ .clkr = {
+ .enable_reg = 0x2710,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "tsif_ref_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch tsif_ref_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 5,
+ .clkr = {
+ .enable_reg = 0x2710,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "tsif_ref_clk",
+ .parent_names = (const char *[]){ "tsif_ref_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_usb[] = {
+ { 60000000, P_PLL8, 1, 5, 32 },
+ { }
+};
+
+static struct clk_rcg usb_hs1_xcvr_src = {
+ .ns_reg = 0x290c,
+ .md_reg = 0x2908,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_usb,
+ .clkr = {
+ .enable_reg = 0x290c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hs1_xcvr_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch usb_hs1_xcvr_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 0,
+ .clkr = {
+ .enable_reg = 0x290c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hs1_xcvr_clk",
+ .parent_names = (const char *[]){ "usb_hs1_xcvr_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg usb_hsic_xcvr_fs_src = {
+ .ns_reg = 0x2928,
+ .md_reg = 0x2924,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_usb,
+ .clkr = {
+ .enable_reg = 0x2928,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_xcvr_fs_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static const char *usb_hsic_xcvr_fs_src_p[] = { "usb_hsic_xcvr_fs_src" };
+
+static struct clk_branch usb_hsic_xcvr_fs_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x2928,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_xcvr_fs_clk",
+ .parent_names = usb_hsic_xcvr_fs_src_p,
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch usb_hsic_system_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 24,
+ .clkr = {
+ .enable_reg = 0x292c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = usb_hsic_xcvr_fs_src_p,
+ .num_parents = 1,
+ .name = "usb_hsic_system_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch usb_hsic_hsic_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 19,
+ .clkr = {
+ .enable_reg = 0x2b44,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pll14_vote" },
+ .num_parents = 1,
+ .name = "usb_hsic_hsic_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch usb_hsic_hsio_cal_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 23,
+ .clkr = {
+ .enable_reg = 0x2b48,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_hsio_cal_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_rcg usb_fs1_xcvr_fs_src = {
+ .ns_reg = 0x2968,
+ .md_reg = 0x2964,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_usb,
+ .clkr = {
+ .enable_reg = 0x2968,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs1_xcvr_fs_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static const char *usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" };
+
+static struct clk_branch usb_fs1_xcvr_fs_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 15,
+ .clkr = {
+ .enable_reg = 0x2968,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs1_xcvr_fs_clk",
+ .parent_names = usb_fs1_xcvr_fs_src_p,
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch usb_fs1_system_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 16,
+ .clkr = {
+ .enable_reg = 0x296c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = usb_fs1_xcvr_fs_src_p,
+ .num_parents = 1,
+ .name = "usb_fs1_system_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg usb_fs2_xcvr_fs_src = {
+ .ns_reg = 0x2988,
+ .md_reg = 0x2984,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_usb,
+ .clkr = {
+ .enable_reg = 0x2988,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs2_xcvr_fs_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static const char *usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" };
+
+static struct clk_branch usb_fs2_xcvr_fs_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x2988,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs2_xcvr_fs_clk",
+ .parent_names = usb_fs2_xcvr_fs_src_p,
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch usb_fs2_system_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x298c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs2_system_clk",
+ .parent_names = usb_fs2_xcvr_fs_src_p,
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch ce1_core_clk = {
+ .hwcg_reg = 0x2724,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd4,
+ .halt_bit = 27,
+ .clkr = {
+ .enable_reg = 0x2724,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "ce1_core_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch ce1_h_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2720,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "ce1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch dma_bam_h_clk = {
+ .hwcg_reg = 0x25c0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x25c0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "dma_bam_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi1_h_clk = {
+ .hwcg_reg = 0x29c0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fcc,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x29c0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi2_h_clk = {
+ .hwcg_reg = 0x29e0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fcc,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x29e0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi3_h_clk = {
+ .hwcg_reg = 0x2a00,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fcc,
+ .halt_bit = 3,
+ .clkr = {
+ .enable_reg = 0x2a00,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi4_h_clk = {
+ .hwcg_reg = 0x2a20,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd0,
+ .halt_bit = 27,
+ .clkr = {
+ .enable_reg = 0x2a20,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi5_h_clk = {
+ .hwcg_reg = 0x2a40,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd0,
+ .halt_bit = 23,
+ .clkr = {
+ .enable_reg = 0x2a40,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi6_h_clk = {
+ .hwcg_reg = 0x2a60,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd0,
+ .halt_bit = 19,
+ .clkr = {
+ .enable_reg = 0x2a60,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi7_h_clk = {
+ .hwcg_reg = 0x2a80,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd0,
+ .halt_bit = 15,
+ .clkr = {
+ .enable_reg = 0x2a80,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi8_h_clk = {
+ .hwcg_reg = 0x2aa0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd0,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x2aa0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi9_h_clk = {
+ .hwcg_reg = 0x2ac0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd0,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x2ac0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi10_h_clk = {
+ .hwcg_reg = 0x2ae0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd0,
+ .halt_bit = 3,
+ .clkr = {
+ .enable_reg = 0x2ae0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi11_h_clk = {
+ .hwcg_reg = 0x2b00,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd4,
+ .halt_bit = 18,
+ .clkr = {
+ .enable_reg = 0x2b00,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi12_h_clk = {
+ .hwcg_reg = 0x2b20,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd4,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x2b20,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch tsif_h_clk = {
+ .hwcg_reg = 0x2700,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd4,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x2700,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "tsif_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch usb_fs1_h_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 17,
+ .clkr = {
+ .enable_reg = 0x2960,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch usb_fs2_h_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x2980,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs2_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch usb_hs1_h_clk = {
+ .hwcg_reg = 0x2900,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2900,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hs1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch usb_hsic_h_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 28,
+ .clkr = {
+ .enable_reg = 0x2920,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc1_h_clk = {
+ .hwcg_reg = 0x2820,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x2820,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc2_h_clk = {
+ .hwcg_reg = 0x2840,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x2840,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc2_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc3_h_clk = {
+ .hwcg_reg = 0x2860,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 9,
+ .clkr = {
+ .enable_reg = 0x2860,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc3_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc4_h_clk = {
+ .hwcg_reg = 0x2880,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 8,
+ .clkr = {
+ .enable_reg = 0x2880,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc4_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc5_h_clk = {
+ .hwcg_reg = 0x28a0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x28a0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc5_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch adm0_clk = {
+ .halt_reg = 0x2fdc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "adm0_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch adm0_pbus_clk = {
+ .hwcg_reg = 0x2208,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fdc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "adm0_pbus_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch pmic_arb0_h_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 22,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmic_arb0_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch pmic_arb1_h_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 21,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmic_arb1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch pmic_ssbi2_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 23,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmic_ssbi2_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch rpm_msg_ram_h_clk = {
+ .hwcg_reg = 0x27e0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "rpm_msg_ram_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_msm8960_clks[] = {
+ [PLL3] = &pll3.clkr,
+ [PLL8] = &pll8.clkr,
+ [PLL8_VOTE] = &pll8_vote,
+ [PLL14] = &pll14.clkr,
+ [PLL14_VOTE] = &pll14_vote,
+ [GSBI1_UART_SRC] = &gsbi1_uart_src.clkr,
+ [GSBI1_UART_CLK] = &gsbi1_uart_clk.clkr,
+ [GSBI2_UART_SRC] = &gsbi2_uart_src.clkr,
+ [GSBI2_UART_CLK] = &gsbi2_uart_clk.clkr,
+ [GSBI3_UART_SRC] = &gsbi3_uart_src.clkr,
+ [GSBI3_UART_CLK] = &gsbi3_uart_clk.clkr,
+ [GSBI4_UART_SRC] = &gsbi4_uart_src.clkr,
+ [GSBI4_UART_CLK] = &gsbi4_uart_clk.clkr,
+ [GSBI5_UART_SRC] = &gsbi5_uart_src.clkr,
+ [GSBI5_UART_CLK] = &gsbi5_uart_clk.clkr,
+ [GSBI6_UART_SRC] = &gsbi6_uart_src.clkr,
+ [GSBI6_UART_CLK] = &gsbi6_uart_clk.clkr,
+ [GSBI7_UART_SRC] = &gsbi7_uart_src.clkr,
+ [GSBI7_UART_CLK] = &gsbi7_uart_clk.clkr,
+ [GSBI8_UART_SRC] = &gsbi8_uart_src.clkr,
+ [GSBI8_UART_CLK] = &gsbi8_uart_clk.clkr,
+ [GSBI9_UART_SRC] = &gsbi9_uart_src.clkr,
+ [GSBI9_UART_CLK] = &gsbi9_uart_clk.clkr,
+ [GSBI10_UART_SRC] = &gsbi10_uart_src.clkr,
+ [GSBI10_UART_CLK] = &gsbi10_uart_clk.clkr,
+ [GSBI11_UART_SRC] = &gsbi11_uart_src.clkr,
+ [GSBI11_UART_CLK] = &gsbi11_uart_clk.clkr,
+ [GSBI12_UART_SRC] = &gsbi12_uart_src.clkr,
+ [GSBI12_UART_CLK] = &gsbi12_uart_clk.clkr,
+ [GSBI1_QUP_SRC] = &gsbi1_qup_src.clkr,
+ [GSBI1_QUP_CLK] = &gsbi1_qup_clk.clkr,
+ [GSBI2_QUP_SRC] = &gsbi2_qup_src.clkr,
+ [GSBI2_QUP_CLK] = &gsbi2_qup_clk.clkr,
+ [GSBI3_QUP_SRC] = &gsbi3_qup_src.clkr,
+ [GSBI3_QUP_CLK] = &gsbi3_qup_clk.clkr,
+ [GSBI4_QUP_SRC] = &gsbi4_qup_src.clkr,
+ [GSBI4_QUP_CLK] = &gsbi4_qup_clk.clkr,
+ [GSBI5_QUP_SRC] = &gsbi5_qup_src.clkr,
+ [GSBI5_QUP_CLK] = &gsbi5_qup_clk.clkr,
+ [GSBI6_QUP_SRC] = &gsbi6_qup_src.clkr,
+ [GSBI6_QUP_CLK] = &gsbi6_qup_clk.clkr,
+ [GSBI7_QUP_SRC] = &gsbi7_qup_src.clkr,
+ [GSBI7_QUP_CLK] = &gsbi7_qup_clk.clkr,
+ [GSBI8_QUP_SRC] = &gsbi8_qup_src.clkr,
+ [GSBI8_QUP_CLK] = &gsbi8_qup_clk.clkr,
+ [GSBI9_QUP_SRC] = &gsbi9_qup_src.clkr,
+ [GSBI9_QUP_CLK] = &gsbi9_qup_clk.clkr,
+ [GSBI10_QUP_SRC] = &gsbi10_qup_src.clkr,
+ [GSBI10_QUP_CLK] = &gsbi10_qup_clk.clkr,
+ [GSBI11_QUP_SRC] = &gsbi11_qup_src.clkr,
+ [GSBI11_QUP_CLK] = &gsbi11_qup_clk.clkr,
+ [GSBI12_QUP_SRC] = &gsbi12_qup_src.clkr,
+ [GSBI12_QUP_CLK] = &gsbi12_qup_clk.clkr,
+ [GP0_SRC] = &gp0_src.clkr,
+ [GP0_CLK] = &gp0_clk.clkr,
+ [GP1_SRC] = &gp1_src.clkr,
+ [GP1_CLK] = &gp1_clk.clkr,
+ [GP2_SRC] = &gp2_src.clkr,
+ [GP2_CLK] = &gp2_clk.clkr,
+ [PMEM_A_CLK] = &pmem_clk.clkr,
+ [PRNG_SRC] = &prng_src.clkr,
+ [PRNG_CLK] = &prng_clk.clkr,
+ [SDC1_SRC] = &sdc1_src.clkr,
+ [SDC1_CLK] = &sdc1_clk.clkr,
+ [SDC2_SRC] = &sdc2_src.clkr,
+ [SDC2_CLK] = &sdc2_clk.clkr,
+ [SDC3_SRC] = &sdc3_src.clkr,
+ [SDC3_CLK] = &sdc3_clk.clkr,
+ [SDC4_SRC] = &sdc4_src.clkr,
+ [SDC4_CLK] = &sdc4_clk.clkr,
+ [SDC5_SRC] = &sdc5_src.clkr,
+ [SDC5_CLK] = &sdc5_clk.clkr,
+ [TSIF_REF_SRC] = &tsif_ref_src.clkr,
+ [TSIF_REF_CLK] = &tsif_ref_clk.clkr,
+ [USB_HS1_XCVR_SRC] = &usb_hs1_xcvr_src.clkr,
+ [USB_HS1_XCVR_CLK] = &usb_hs1_xcvr_clk.clkr,
+ [USB_HSIC_XCVR_FS_SRC] = &usb_hsic_xcvr_fs_src.clkr,
+ [USB_HSIC_XCVR_FS_CLK] = &usb_hsic_xcvr_fs_clk.clkr,
+ [USB_HSIC_SYSTEM_CLK] = &usb_hsic_system_clk.clkr,
+ [USB_HSIC_HSIC_CLK] = &usb_hsic_hsic_clk.clkr,
+ [USB_HSIC_HSIO_CAL_CLK] = &usb_hsic_hsio_cal_clk.clkr,
+ [USB_FS1_XCVR_FS_SRC] = &usb_fs1_xcvr_fs_src.clkr,
+ [USB_FS1_XCVR_FS_CLK] = &usb_fs1_xcvr_fs_clk.clkr,
+ [USB_FS1_SYSTEM_CLK] = &usb_fs1_system_clk.clkr,
+ [USB_FS2_XCVR_FS_SRC] = &usb_fs2_xcvr_fs_src.clkr,
+ [USB_FS2_XCVR_FS_CLK] = &usb_fs2_xcvr_fs_clk.clkr,
+ [USB_FS2_SYSTEM_CLK] = &usb_fs2_system_clk.clkr,
+ [CE1_CORE_CLK] = &ce1_core_clk.clkr,
+ [CE1_H_CLK] = &ce1_h_clk.clkr,
+ [DMA_BAM_H_CLK] = &dma_bam_h_clk.clkr,
+ [GSBI1_H_CLK] = &gsbi1_h_clk.clkr,
+ [GSBI2_H_CLK] = &gsbi2_h_clk.clkr,
+ [GSBI3_H_CLK] = &gsbi3_h_clk.clkr,
+ [GSBI4_H_CLK] = &gsbi4_h_clk.clkr,
+ [GSBI5_H_CLK] = &gsbi5_h_clk.clkr,
+ [GSBI6_H_CLK] = &gsbi6_h_clk.clkr,
+ [GSBI7_H_CLK] = &gsbi7_h_clk.clkr,
+ [GSBI8_H_CLK] = &gsbi8_h_clk.clkr,
+ [GSBI9_H_CLK] = &gsbi9_h_clk.clkr,
+ [GSBI10_H_CLK] = &gsbi10_h_clk.clkr,
+ [GSBI11_H_CLK] = &gsbi11_h_clk.clkr,
+ [GSBI12_H_CLK] = &gsbi12_h_clk.clkr,
+ [TSIF_H_CLK] = &tsif_h_clk.clkr,
+ [USB_FS1_H_CLK] = &usb_fs1_h_clk.clkr,
+ [USB_FS2_H_CLK] = &usb_fs2_h_clk.clkr,
+ [USB_HS1_H_CLK] = &usb_hs1_h_clk.clkr,
+ [USB_HSIC_H_CLK] = &usb_hsic_h_clk.clkr,
+ [SDC1_H_CLK] = &sdc1_h_clk.clkr,
+ [SDC2_H_CLK] = &sdc2_h_clk.clkr,
+ [SDC3_H_CLK] = &sdc3_h_clk.clkr,
+ [SDC4_H_CLK] = &sdc4_h_clk.clkr,
+ [SDC5_H_CLK] = &sdc5_h_clk.clkr,
+ [ADM0_CLK] = &adm0_clk.clkr,
+ [ADM0_PBUS_CLK] = &adm0_pbus_clk.clkr,
+ [PMIC_ARB0_H_CLK] = &pmic_arb0_h_clk.clkr,
+ [PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr,
+ [PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr,
+ [RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_msm8960_resets[] = {
+ [SFAB_MSS_Q6_SW_RESET] = { 0x2040, 7 },
+ [SFAB_MSS_Q6_FW_RESET] = { 0x2044, 7 },
+ [QDSS_STM_RESET] = { 0x2060, 6 },
+ [AFAB_SMPSS_S_RESET] = { 0x20b8, 2 },
+ [AFAB_SMPSS_M1_RESET] = { 0x20b8, 1 },
+ [AFAB_SMPSS_M0_RESET] = { 0x20b8 },
+ [AFAB_EBI1_CH0_RESET] = { 0x20c0, 7 },
+ [AFAB_EBI1_CH1_RESET] = { 0x20c4, 7},
+ [SFAB_ADM0_M0_RESET] = { 0x21e0, 7 },
+ [SFAB_ADM0_M1_RESET] = { 0x21e4, 7 },
+ [SFAB_ADM0_M2_RESET] = { 0x21e8, 7 },
+ [ADM0_C2_RESET] = { 0x220c, 4},
+ [ADM0_C1_RESET] = { 0x220c, 3},
+ [ADM0_C0_RESET] = { 0x220c, 2},
+ [ADM0_PBUS_RESET] = { 0x220c, 1 },
+ [ADM0_RESET] = { 0x220c },
+ [QDSS_CLKS_SW_RESET] = { 0x2260, 5 },
+ [QDSS_POR_RESET] = { 0x2260, 4 },
+ [QDSS_TSCTR_RESET] = { 0x2260, 3 },
+ [QDSS_HRESET_RESET] = { 0x2260, 2 },
+ [QDSS_AXI_RESET] = { 0x2260, 1 },
+ [QDSS_DBG_RESET] = { 0x2260 },
+ [PCIE_A_RESET] = { 0x22c0, 7 },
+ [PCIE_AUX_RESET] = { 0x22c8, 7 },
+ [PCIE_H_RESET] = { 0x22d0, 7 },
+ [SFAB_PCIE_M_RESET] = { 0x22d4, 1 },
+ [SFAB_PCIE_S_RESET] = { 0x22d4 },
+ [SFAB_MSS_M_RESET] = { 0x2340, 7 },
+ [SFAB_USB3_M_RESET] = { 0x2360, 7 },
+ [SFAB_RIVA_M_RESET] = { 0x2380, 7 },
+ [SFAB_LPASS_RESET] = { 0x23a0, 7 },
+ [SFAB_AFAB_M_RESET] = { 0x23e0, 7 },
+ [AFAB_SFAB_M0_RESET] = { 0x2420, 7 },
+ [AFAB_SFAB_M1_RESET] = { 0x2424, 7 },
+ [SFAB_SATA_S_RESET] = { 0x2480, 7 },
+ [SFAB_DFAB_M_RESET] = { 0x2500, 7 },
+ [DFAB_SFAB_M_RESET] = { 0x2520, 7 },
+ [DFAB_SWAY0_RESET] = { 0x2540, 7 },
+ [DFAB_SWAY1_RESET] = { 0x2544, 7 },
+ [DFAB_ARB0_RESET] = { 0x2560, 7 },
+ [DFAB_ARB1_RESET] = { 0x2564, 7 },
+ [PPSS_PROC_RESET] = { 0x2594, 1 },
+ [PPSS_RESET] = { 0x2594},
+ [DMA_BAM_RESET] = { 0x25c0, 7 },
+ [SIC_TIC_RESET] = { 0x2600, 7 },
+ [SLIMBUS_H_RESET] = { 0x2620, 7 },
+ [SFAB_CFPB_M_RESET] = { 0x2680, 7 },
+ [SFAB_CFPB_S_RESET] = { 0x26c0, 7 },
+ [TSIF_H_RESET] = { 0x2700, 7 },
+ [CE1_H_RESET] = { 0x2720, 7 },
+ [CE1_CORE_RESET] = { 0x2724, 7 },
+ [CE1_SLEEP_RESET] = { 0x2728, 7 },
+ [CE2_H_RESET] = { 0x2740, 7 },
+ [CE2_CORE_RESET] = { 0x2744, 7 },
+ [SFAB_SFPB_M_RESET] = { 0x2780, 7 },
+ [SFAB_SFPB_S_RESET] = { 0x27a0, 7 },
+ [RPM_PROC_RESET] = { 0x27c0, 7 },
+ [PMIC_SSBI2_RESET] = { 0x270c, 12 },
+ [SDC1_RESET] = { 0x2830 },
+ [SDC2_RESET] = { 0x2850 },
+ [SDC3_RESET] = { 0x2870 },
+ [SDC4_RESET] = { 0x2890 },
+ [SDC5_RESET] = { 0x28b0 },
+ [DFAB_A2_RESET] = { 0x28c0, 7 },
+ [USB_HS1_RESET] = { 0x2910 },
+ [USB_HSIC_RESET] = { 0x2934 },
+ [USB_FS1_XCVR_RESET] = { 0x2974, 1 },
+ [USB_FS1_RESET] = { 0x2974 },
+ [USB_FS2_XCVR_RESET] = { 0x2994, 1 },
+ [USB_FS2_RESET] = { 0x2994 },
+ [GSBI1_RESET] = { 0x29dc },
+ [GSBI2_RESET] = { 0x29fc },
+ [GSBI3_RESET] = { 0x2a1c },
+ [GSBI4_RESET] = { 0x2a3c },
+ [GSBI5_RESET] = { 0x2a5c },
+ [GSBI6_RESET] = { 0x2a7c },
+ [GSBI7_RESET] = { 0x2a9c },
+ [GSBI8_RESET] = { 0x2abc },
+ [GSBI9_RESET] = { 0x2adc },
+ [GSBI10_RESET] = { 0x2afc },
+ [GSBI11_RESET] = { 0x2b1c },
+ [GSBI12_RESET] = { 0x2b3c },
+ [SPDM_RESET] = { 0x2b6c },
+ [TLMM_H_RESET] = { 0x2ba0, 7 },
+ [SFAB_MSS_S_RESET] = { 0x2c00, 7 },
+ [MSS_SLP_RESET] = { 0x2c60, 7 },
+ [MSS_Q6SW_JTAG_RESET] = { 0x2c68, 7 },
+ [MSS_Q6FW_JTAG_RESET] = { 0x2c6c, 7 },
+ [MSS_RESET] = { 0x2c64 },
+ [SATA_H_RESET] = { 0x2c80, 7 },
+ [SATA_RXOOB_RESE] = { 0x2c8c, 7 },
+ [SATA_PMALIVE_RESET] = { 0x2c90, 7 },
+ [SATA_SFAB_M_RESET] = { 0x2c98, 7 },
+ [TSSC_RESET] = { 0x2ca0, 7 },
+ [PDM_RESET] = { 0x2cc0, 12 },
+ [MPM_H_RESET] = { 0x2da0, 7 },
+ [MPM_RESET] = { 0x2da4 },
+ [SFAB_SMPSS_S_RESET] = { 0x2e00, 7 },
+ [PRNG_RESET] = { 0x2e80, 12 },
+ [RIVA_RESET] = { 0x35e0 },
+};
+
+static const struct regmap_config gcc_msm8960_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x3660,
+ .fast_io = true,
+};
+
+static const struct of_device_id gcc_msm8960_match_table[] = {
+ { .compatible = "qcom,gcc-msm8960" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_msm8960_match_table);
+
+struct qcom_cc {
+ struct qcom_reset_controller reset;
+ struct clk_onecell_data data;
+ struct clk *clks[];
+};
+
+static int gcc_msm8960_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct resource *res;
+ int i, ret;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ struct clk_onecell_data *data;
+ struct clk **clks;
+ struct regmap *regmap;
+ size_t num_clks;
+ struct qcom_reset_controller *reset;
+ struct qcom_cc *cc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &gcc_msm8960_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ num_clks = ARRAY_SIZE(gcc_msm8960_clks);
+ cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*clks) * num_clks,
+ GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+
+ clks = cc->clks;
+ data = &cc->data;
+ data->clks = clks;
+ data->clk_num = num_clks;
+
+ /* Temporary until RPM clocks supported */
+ clk = clk_register_fixed_rate(dev, "cxo", NULL, CLK_IS_ROOT, 19200000);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ clk = clk_register_fixed_rate(dev, "pxo", NULL, CLK_IS_ROOT, 27000000);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ for (i = 0; i < num_clks; i++) {
+ if (!gcc_msm8960_clks[i])
+ continue;
+ clk = devm_clk_register_regmap(dev, gcc_msm8960_clks[i]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
+ if (ret)
+ return ret;
+
+ reset = &cc->reset;
+ reset->rcdev.of_node = dev->of_node;
+ reset->rcdev.ops = &qcom_reset_ops,
+ reset->rcdev.owner = THIS_MODULE,
+ reset->rcdev.nr_resets = ARRAY_SIZE(gcc_msm8960_resets),
+ reset->regmap = regmap;
+ reset->reset_map = gcc_msm8960_resets,
+ platform_set_drvdata(pdev, &reset->rcdev);
+
+ ret = reset_controller_register(&reset->rcdev);
+ if (ret)
+ of_clk_del_provider(dev->of_node);
+
+ return ret;
+}
+
+static int gcc_msm8960_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ reset_controller_unregister(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static struct platform_driver gcc_msm8960_driver = {
+ .probe = gcc_msm8960_probe,
+ .remove = gcc_msm8960_remove,
+ .driver = {
+ .name = "gcc-msm8960",
+ .owner = THIS_MODULE,
+ .of_match_table = gcc_msm8960_match_table,
+ },
+};
+
+static int __init gcc_msm8960_init(void)
+{
+ return platform_driver_register(&gcc_msm8960_driver);
+}
+core_initcall(gcc_msm8960_init);
+
+static void __exit gcc_msm8960_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8960_driver);
+}
+module_exit(gcc_msm8960_exit);
+
+MODULE_DESCRIPTION("QCOM GCC MSM8960 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-msm8960");
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
new file mode 100644
index 000000000000..51d457e2b959
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -0,0 +1,2694 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8974.h>
+#include <dt-bindings/reset/qcom,gcc-msm8974.h>
+
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+#define P_XO 0
+#define P_GPLL0 1
+#define P_GPLL1 1
+
+static const u8 gcc_xo_gpll0_map[] = {
+ [P_XO] = 0,
+ [P_GPLL0] = 1,
+};
+
+static const char *gcc_xo_gpll0[] = {
+ "xo",
+ "gpll0_vote",
+};
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static struct clk_pll gpll0 = {
+ .l_reg = 0x0004,
+ .m_reg = 0x0008,
+ .n_reg = 0x000c,
+ .config_reg = 0x0014,
+ .mode_reg = 0x0000,
+ .status_reg = 0x001c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll0_vote = {
+ .enable_reg = 0x1480,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_vote",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_rcg2 config_noc_clk_src = {
+ .cmd_rcgr = 0x0150,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "config_noc_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 periph_noc_clk_src = {
+ .cmd_rcgr = 0x0190,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "periph_noc_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 system_noc_clk_src = {
+ .cmd_rcgr = 0x0120,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "system_noc_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_pll gpll1 = {
+ .l_reg = 0x0044,
+ .m_reg = 0x0048,
+ .n_reg = 0x004c,
+ .config_reg = 0x0054,
+ .mode_reg = 0x0040,
+ .status_reg = 0x005c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll1_vote = {
+ .enable_reg = 0x1480,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1_vote",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_master_clk[] = {
+ F(125000000, P_GPLL0, 1, 5, 24),
+ { }
+};
+
+static struct clk_rcg2 usb30_master_clk_src = {
+ .cmd_rcgr = 0x03d4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_usb30_master_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_master_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(37500000, P_GPLL0, 16, 0, 0),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0660,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x064c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x06e0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x06cc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0760,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x074c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x07e0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x07cc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0860,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x084c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x08e0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x08cc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_2_uart1_6_apps_clk[] = {
+ F(3686400, P_GPLL0, 1, 96, 15625),
+ F(7372800, P_GPLL0, 1, 192, 15625),
+ F(14745600, P_GPLL0, 1, 384, 15625),
+ F(16000000, P_GPLL0, 5, 2, 15),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 5, 1, 5),
+ F(32000000, P_GPLL0, 1, 4, 75),
+ F(40000000, P_GPLL0, 15, 0, 0),
+ F(46400000, P_GPLL0, 1, 29, 375),
+ F(48000000, P_GPLL0, 12.5, 0, 0),
+ F(51200000, P_GPLL0, 1, 32, 375),
+ F(56000000, P_GPLL0, 1, 7, 75),
+ F(58982400, P_GPLL0, 1, 1536, 15625),
+ F(60000000, P_GPLL0, 10, 0, 0),
+ F(63160000, P_GPLL0, 9.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x068c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x070c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x078c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart3_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x080c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart4_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
+ .cmd_rcgr = 0x088c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart5_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
+ .cmd_rcgr = 0x090c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart6_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x09a0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x098c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup1_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0a20,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0a0c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup2_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0aa0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0a8c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup3_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0b20,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0b0c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0ba0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup5_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0b8c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup5_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0c20,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup6_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0c0c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup6_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x09cc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart1_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x0a4c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart2_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x0acc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart3_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x0b4c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart4_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart5_apps_clk_src = {
+ .cmd_rcgr = 0x0bcc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart5_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart6_apps_clk_src = {
+ .cmd_rcgr = 0x0c4c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart6_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ce1_clk[] = {
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ce1_clk_src = {
+ .cmd_rcgr = 0x1050,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_ce1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ce1_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ce2_clk[] = {
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ce2_clk_src = {
+ .cmd_rcgr = 0x1090,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_ce2_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ce2_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp_clk[] = {
+ F(4800000, P_XO, 4, 0, 0),
+ F(6000000, P_GPLL0, 10, 1, 10),
+ F(6750000, P_GPLL0, 1, 1, 89),
+ F(8000000, P_GPLL0, 15, 1, 5),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16000000, P_GPLL0, 1, 2, 75),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 5, 1, 5),
+ { }
+};
+
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x1904,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_gp_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp1_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x1944,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_gp_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp2_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0x1984,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_gp_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp3_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk[] = {
+ F(60000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x0cd0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_pdm2_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pdm2_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_4_apps_clk[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 15, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x04d0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x0510,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc2_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 sdcc3_apps_clk_src = {
+ .cmd_rcgr = 0x0550,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc3_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x0590,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc4_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_tsif_ref_clk[] = {
+ F(105000, P_XO, 2, 1, 91),
+ { }
+};
+
+static struct clk_rcg2 tsif_ref_clk_src = {
+ .cmd_rcgr = 0x0d90,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_tsif_ref_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "tsif_ref_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk[] = {
+ F(60000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x03e8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_usb30_mock_utmi_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_mock_utmi_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hs_system_clk[] = {
+ F(60000000, P_GPLL0, 10, 0, 0),
+ F(75000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_hs_system_clk_src = {
+ .cmd_rcgr = 0x0490,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_usb_hs_system_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_hs_system_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hsic_clk[] = {
+ F(480000000, P_GPLL1, 1, 0, 0),
+ { }
+};
+
+static u8 usb_hsic_clk_src_map[] = {
+ [P_XO] = 0,
+ [P_GPLL1] = 4,
+};
+
+static struct clk_rcg2 usb_hsic_clk_src = {
+ .cmd_rcgr = 0x0440,
+ .hid_width = 5,
+ .parent_map = usb_hsic_clk_src_map,
+ .freq_tbl = ftbl_gcc_usb_hsic_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_clk_src",
+ .parent_names = (const char *[]){
+ "xo",
+ "gpll1_vote",
+ },
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hsic_io_cal_clk[] = {
+ F(9600000, P_XO, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_hsic_io_cal_clk_src = {
+ .cmd_rcgr = 0x0458,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_usb_hsic_io_cal_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_io_cal_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hsic_system_clk[] = {
+ F(60000000, P_GPLL0, 10, 0, 0),
+ F(75000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_hsic_system_clk_src = {
+ .cmd_rcgr = 0x041c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_usb_hsic_system_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_system_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap gcc_mmss_gpll0_clk_src = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_gpll0_vote",
+ .parent_names = (const char *[]){
+ "gpll0_vote",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_simple_ops,
+ },
+};
+
+static struct clk_branch gcc_bam_dma_ahb_clk = {
+ .halt_reg = 0x0d44,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bam_dma_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x05c4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x0648,
+ .clkr = {
+ .enable_reg = 0x0648,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x0644,
+ .clkr = {
+ .enable_reg = 0x0644,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x06c8,
+ .clkr = {
+ .enable_reg = 0x06c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x06c4,
+ .clkr = {
+ .enable_reg = 0x06c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x0748,
+ .clkr = {
+ .enable_reg = 0x0748,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x0744,
+ .clkr = {
+ .enable_reg = 0x0744,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x07c8,
+ .clkr = {
+ .enable_reg = 0x07c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x07c4,
+ .clkr = {
+ .enable_reg = 0x07c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+ .halt_reg = 0x0848,
+ .clkr = {
+ .enable_reg = 0x0848,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup5_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+ .halt_reg = 0x0844,
+ .clkr = {
+ .enable_reg = 0x0844,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup5_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
+ .halt_reg = 0x08c8,
+ .clkr = {
+ .enable_reg = 0x08c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup6_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+ .halt_reg = 0x08c4,
+ .clkr = {
+ .enable_reg = 0x08c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup6_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x0684,
+ .clkr = {
+ .enable_reg = 0x0684,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x0704,
+ .clkr = {
+ .enable_reg = 0x0704,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+ .halt_reg = 0x0784,
+ .clkr = {
+ .enable_reg = 0x0784,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart4_apps_clk = {
+ .halt_reg = 0x0804,
+ .clkr = {
+ .enable_reg = 0x0804,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart4_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart5_apps_clk = {
+ .halt_reg = 0x0884,
+ .clkr = {
+ .enable_reg = 0x0884,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart5_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart5_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart6_apps_clk = {
+ .halt_reg = 0x0904,
+ .clkr = {
+ .enable_reg = 0x0904,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart6_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart6_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_ahb_clk = {
+ .halt_reg = 0x05c4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
+ .halt_reg = 0x0988,
+ .clkr = {
+ .enable_reg = 0x0988,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
+ .halt_reg = 0x0984,
+ .clkr = {
+ .enable_reg = 0x0984,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
+ .halt_reg = 0x0a08,
+ .clkr = {
+ .enable_reg = 0x0a08,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
+ .halt_reg = 0x0a04,
+ .clkr = {
+ .enable_reg = 0x0a04,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
+ .halt_reg = 0x0a88,
+ .clkr = {
+ .enable_reg = 0x0a88,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
+ .halt_reg = 0x0a84,
+ .clkr = {
+ .enable_reg = 0x0a84,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
+ .halt_reg = 0x0b08,
+ .clkr = {
+ .enable_reg = 0x0b08,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
+ .halt_reg = 0x0b04,
+ .clkr = {
+ .enable_reg = 0x0b04,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = {
+ .halt_reg = 0x0b88,
+ .clkr = {
+ .enable_reg = 0x0b88,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup5_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup5_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = {
+ .halt_reg = 0x0b84,
+ .clkr = {
+ .enable_reg = 0x0b84,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup5_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup5_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = {
+ .halt_reg = 0x0c08,
+ .clkr = {
+ .enable_reg = 0x0c08,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup6_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup6_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = {
+ .halt_reg = 0x0c04,
+ .clkr = {
+ .enable_reg = 0x0c04,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup6_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup6_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart1_apps_clk = {
+ .halt_reg = 0x09c4,
+ .clkr = {
+ .enable_reg = 0x09c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart2_apps_clk = {
+ .halt_reg = 0x0a44,
+ .clkr = {
+ .enable_reg = 0x0a44,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart3_apps_clk = {
+ .halt_reg = 0x0ac4,
+ .clkr = {
+ .enable_reg = 0x0ac4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart3_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart4_apps_clk = {
+ .halt_reg = 0x0b44,
+ .clkr = {
+ .enable_reg = 0x0b44,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart4_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart5_apps_clk = {
+ .halt_reg = 0x0bc4,
+ .clkr = {
+ .enable_reg = 0x0bc4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart5_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart5_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart6_apps_clk = {
+ .halt_reg = 0x0c44,
+ .clkr = {
+ .enable_reg = 0x0c44,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart6_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart6_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x0e04,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .parent_names = (const char *[]){
+ "config_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x104c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_ahb_clk",
+ .parent_names = (const char *[]){
+ "config_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x1048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_axi_clk",
+ .parent_names = (const char *[]){
+ "system_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x1050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_clk",
+ .parent_names = (const char *[]){
+ "ce1_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce2_ahb_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce2_ahb_clk",
+ .parent_names = (const char *[]){
+ "config_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce2_axi_clk = {
+ .halt_reg = 0x1088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce2_axi_clk",
+ .parent_names = (const char *[]){
+ "system_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce2_clk = {
+ .halt_reg = 0x1090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce2_clk",
+ .parent_names = (const char *[]){
+ "ce2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x1900,
+ .clkr = {
+ .enable_reg = 0x1900,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){
+ "gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x1940,
+ .clkr = {
+ .enable_reg = 0x1940,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){
+ "gp2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x1980,
+ .clkr = {
+ .enable_reg = 0x1980,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){
+ "gp3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_lpass_q6_axi_clk = {
+ .halt_reg = 0x11c0,
+ .clkr = {
+ .enable_reg = 0x11c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_lpass_q6_axi_clk",
+ .parent_names = (const char *[]){
+ "system_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
+ .halt_reg = 0x024c,
+ .clkr = {
+ .enable_reg = 0x024c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_noc_cfg_ahb_clk",
+ .parent_names = (const char *[]){
+ "config_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ },
+};
+
+static struct clk_branch gcc_ocmem_noc_cfg_ahb_clk = {
+ .halt_reg = 0x0248,
+ .clkr = {
+ .enable_reg = 0x0248,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ocmem_noc_cfg_ahb_clk",
+ .parent_names = (const char *[]){
+ "config_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x0280,
+ .clkr = {
+ .enable_reg = 0x0280,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .parent_names = (const char *[]){
+ "config_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x0284,
+ .clkr = {
+ .enable_reg = 0x0284,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .flags = CLK_IS_ROOT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x0ccc,
+ .clkr = {
+ .enable_reg = 0x0ccc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]){
+ "pdm2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x0cc4,
+ .clkr = {
+ .enable_reg = 0x0cc4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x0d04,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x04c8,
+ .clkr = {
+ .enable_reg = 0x04c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x04c4,
+ .clkr = {
+ .enable_reg = 0x04c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x0508,
+ .clkr = {
+ .enable_reg = 0x0508,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x0504,
+ .clkr = {
+ .enable_reg = 0x0504,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc3_ahb_clk = {
+ .halt_reg = 0x0548,
+ .clkr = {
+ .enable_reg = 0x0548,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc3_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc3_apps_clk = {
+ .halt_reg = 0x0544,
+ .clkr = {
+ .enable_reg = 0x0544,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc3_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x0588,
+ .clkr = {
+ .enable_reg = 0x0588,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x0584,
+ .clkr = {
+ .enable_reg = 0x0584,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_usb3_axi_clk = {
+ .halt_reg = 0x0108,
+ .clkr = {
+ .enable_reg = 0x0108,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_usb3_axi_clk",
+ .parent_names = (const char *[]){
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ahb_clk = {
+ .halt_reg = 0x0d84,
+ .clkr = {
+ .enable_reg = 0x0d84,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+ .halt_reg = 0x0d88,
+ .clkr = {
+ .enable_reg = 0x0d88,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk",
+ .parent_names = (const char *[]){
+ "tsif_ref_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2a_phy_sleep_clk = {
+ .halt_reg = 0x04ac,
+ .clkr = {
+ .enable_reg = 0x04ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb2a_phy_sleep_clk",
+ .parent_names = (const char *[]){
+ "sleep_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2b_phy_sleep_clk = {
+ .halt_reg = 0x04b4,
+ .clkr = {
+ .enable_reg = 0x04b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb2b_phy_sleep_clk",
+ .parent_names = (const char *[]){
+ "sleep_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+ .halt_reg = 0x03c8,
+ .clkr = {
+ .enable_reg = 0x03c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk",
+ .parent_names = (const char *[]){
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+ .halt_reg = 0x03d0,
+ .clkr = {
+ .enable_reg = 0x03d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "usb30_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sleep_clk = {
+ .halt_reg = 0x03cc,
+ .clkr = {
+ .enable_reg = 0x03cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sleep_clk",
+ .parent_names = (const char *[]){
+ "sleep_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_ahb_clk = {
+ .halt_reg = 0x0488,
+ .clkr = {
+ .enable_reg = 0x0488,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hs_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_system_clk = {
+ .halt_reg = 0x0484,
+ .clkr = {
+ .enable_reg = 0x0484,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hs_system_clk",
+ .parent_names = (const char *[]){
+ "usb_hs_system_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hsic_ahb_clk = {
+ .halt_reg = 0x0408,
+ .clkr = {
+ .enable_reg = 0x0408,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hsic_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hsic_clk = {
+ .halt_reg = 0x0410,
+ .clkr = {
+ .enable_reg = 0x0410,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hsic_clk",
+ .parent_names = (const char *[]){
+ "usb_hsic_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hsic_io_cal_clk = {
+ .halt_reg = 0x0414,
+ .clkr = {
+ .enable_reg = 0x0414,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hsic_io_cal_clk",
+ .parent_names = (const char *[]){
+ "usb_hsic_io_cal_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hsic_io_cal_sleep_clk = {
+ .halt_reg = 0x0418,
+ .clkr = {
+ .enable_reg = 0x0418,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hsic_io_cal_sleep_clk",
+ .parent_names = (const char *[]){
+ "sleep_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hsic_system_clk = {
+ .halt_reg = 0x040c,
+ .clkr = {
+ .enable_reg = 0x040c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hsic_system_clk",
+ .parent_names = (const char *[]){
+ "usb_hsic_system_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_msm8974_clocks[] = {
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_VOTE] = &gpll0_vote,
+ [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
+ [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
+ [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
+ [GPLL1] = &gpll1.clkr,
+ [GPLL1_VOTE] = &gpll1_vote,
+ [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+ [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP1_UART3_APPS_CLK_SRC] = &blsp1_uart3_apps_clk_src.clkr,
+ [BLSP1_UART4_APPS_CLK_SRC] = &blsp1_uart4_apps_clk_src.clkr,
+ [BLSP1_UART5_APPS_CLK_SRC] = &blsp1_uart5_apps_clk_src.clkr,
+ [BLSP1_UART6_APPS_CLK_SRC] = &blsp1_uart6_apps_clk_src.clkr,
+ [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+ [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+ [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+ [BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr,
+ [BLSP2_QUP5_I2C_APPS_CLK_SRC] = &blsp2_qup5_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP5_SPI_APPS_CLK_SRC] = &blsp2_qup5_spi_apps_clk_src.clkr,
+ [BLSP2_QUP6_I2C_APPS_CLK_SRC] = &blsp2_qup6_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP6_SPI_APPS_CLK_SRC] = &blsp2_qup6_spi_apps_clk_src.clkr,
+ [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+ [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+ [BLSP2_UART3_APPS_CLK_SRC] = &blsp2_uart3_apps_clk_src.clkr,
+ [BLSP2_UART4_APPS_CLK_SRC] = &blsp2_uart4_apps_clk_src.clkr,
+ [BLSP2_UART5_APPS_CLK_SRC] = &blsp2_uart5_apps_clk_src.clkr,
+ [BLSP2_UART6_APPS_CLK_SRC] = &blsp2_uart6_apps_clk_src.clkr,
+ [CE1_CLK_SRC] = &ce1_clk_src.clkr,
+ [CE2_CLK_SRC] = &ce2_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [SDCC3_APPS_CLK_SRC] = &sdcc3_apps_clk_src.clkr,
+ [SDCC4_APPS_CLK_SRC] = &sdcc4_apps_clk_src.clkr,
+ [TSIF_REF_CLK_SRC] = &tsif_ref_clk_src.clkr,
+ [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+ [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+ [USB_HSIC_CLK_SRC] = &usb_hsic_clk_src.clkr,
+ [USB_HSIC_IO_CAL_CLK_SRC] = &usb_hsic_io_cal_clk_src.clkr,
+ [USB_HSIC_SYSTEM_CLK_SRC] = &usb_hsic_system_clk_src.clkr,
+ [GCC_BAM_DMA_AHB_CLK] = &gcc_bam_dma_ahb_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
+ [GCC_BLSP1_UART5_APPS_CLK] = &gcc_blsp1_uart5_apps_clk.clkr,
+ [GCC_BLSP1_UART6_APPS_CLK] = &gcc_blsp1_uart6_apps_clk.clkr,
+ [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+ [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP5_I2C_APPS_CLK] = &gcc_blsp2_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP5_SPI_APPS_CLK] = &gcc_blsp2_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP6_I2C_APPS_CLK] = &gcc_blsp2_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP6_SPI_APPS_CLK] = &gcc_blsp2_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+ [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+ [GCC_BLSP2_UART3_APPS_CLK] = &gcc_blsp2_uart3_apps_clk.clkr,
+ [GCC_BLSP2_UART4_APPS_CLK] = &gcc_blsp2_uart4_apps_clk.clkr,
+ [GCC_BLSP2_UART5_APPS_CLK] = &gcc_blsp2_uart5_apps_clk.clkr,
+ [GCC_BLSP2_UART6_APPS_CLK] = &gcc_blsp2_uart6_apps_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CE2_AHB_CLK] = &gcc_ce2_ahb_clk.clkr,
+ [GCC_CE2_AXI_CLK] = &gcc_ce2_axi_clk.clkr,
+ [GCC_CE2_CLK] = &gcc_ce2_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_LPASS_Q6_AXI_CLK] = &gcc_lpass_q6_axi_clk.clkr,
+ [GCC_MMSS_NOC_CFG_AHB_CLK] = &gcc_mmss_noc_cfg_ahb_clk.clkr,
+ [GCC_OCMEM_NOC_CFG_AHB_CLK] = &gcc_ocmem_noc_cfg_ahb_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC3_AHB_CLK] = &gcc_sdcc3_ahb_clk.clkr,
+ [GCC_SDCC3_APPS_CLK] = &gcc_sdcc3_apps_clk.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SYS_NOC_USB3_AXI_CLK] = &gcc_sys_noc_usb3_axi_clk.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr,
+ [GCC_USB2B_PHY_SLEEP_CLK] = &gcc_usb2b_phy_sleep_clk.clkr,
+ [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
+ [GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
+ [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+ [GCC_USB_HSIC_AHB_CLK] = &gcc_usb_hsic_ahb_clk.clkr,
+ [GCC_USB_HSIC_CLK] = &gcc_usb_hsic_clk.clkr,
+ [GCC_USB_HSIC_IO_CAL_CLK] = &gcc_usb_hsic_io_cal_clk.clkr,
+ [GCC_USB_HSIC_IO_CAL_SLEEP_CLK] = &gcc_usb_hsic_io_cal_sleep_clk.clkr,
+ [GCC_USB_HSIC_SYSTEM_CLK] = &gcc_usb_hsic_system_clk.clkr,
+ [GCC_MMSS_GPLL0_CLK_SRC] = &gcc_mmss_gpll0_clk_src,
+};
+
+static const struct qcom_reset_map gcc_msm8974_resets[] = {
+ [GCC_SYSTEM_NOC_BCR] = { 0x0100 },
+ [GCC_CONFIG_NOC_BCR] = { 0x0140 },
+ [GCC_PERIPH_NOC_BCR] = { 0x0180 },
+ [GCC_IMEM_BCR] = { 0x0200 },
+ [GCC_MMSS_BCR] = { 0x0240 },
+ [GCC_QDSS_BCR] = { 0x0300 },
+ [GCC_USB_30_BCR] = { 0x03c0 },
+ [GCC_USB3_PHY_BCR] = { 0x03fc },
+ [GCC_USB_HS_HSIC_BCR] = { 0x0400 },
+ [GCC_USB_HS_BCR] = { 0x0480 },
+ [GCC_USB2A_PHY_BCR] = { 0x04a8 },
+ [GCC_USB2B_PHY_BCR] = { 0x04b0 },
+ [GCC_SDCC1_BCR] = { 0x04c0 },
+ [GCC_SDCC2_BCR] = { 0x0500 },
+ [GCC_SDCC3_BCR] = { 0x0540 },
+ [GCC_SDCC4_BCR] = { 0x0580 },
+ [GCC_BLSP1_BCR] = { 0x05c0 },
+ [GCC_BLSP1_QUP1_BCR] = { 0x0640 },
+ [GCC_BLSP1_UART1_BCR] = { 0x0680 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x06c0 },
+ [GCC_BLSP1_UART2_BCR] = { 0x0700 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x0740 },
+ [GCC_BLSP1_UART3_BCR] = { 0x0780 },
+ [GCC_BLSP1_QUP4_BCR] = { 0x07c0 },
+ [GCC_BLSP1_UART4_BCR] = { 0x0800 },
+ [GCC_BLSP1_QUP5_BCR] = { 0x0840 },
+ [GCC_BLSP1_UART5_BCR] = { 0x0880 },
+ [GCC_BLSP1_QUP6_BCR] = { 0x08c0 },
+ [GCC_BLSP1_UART6_BCR] = { 0x0900 },
+ [GCC_BLSP2_BCR] = { 0x0940 },
+ [GCC_BLSP2_QUP1_BCR] = { 0x0980 },
+ [GCC_BLSP2_UART1_BCR] = { 0x09c0 },
+ [GCC_BLSP2_QUP2_BCR] = { 0x0a00 },
+ [GCC_BLSP2_UART2_BCR] = { 0x0a40 },
+ [GCC_BLSP2_QUP3_BCR] = { 0x0a80 },
+ [GCC_BLSP2_UART3_BCR] = { 0x0ac0 },
+ [GCC_BLSP2_QUP4_BCR] = { 0x0b00 },
+ [GCC_BLSP2_UART4_BCR] = { 0x0b40 },
+ [GCC_BLSP2_QUP5_BCR] = { 0x0b80 },
+ [GCC_BLSP2_UART5_BCR] = { 0x0bc0 },
+ [GCC_BLSP2_QUP6_BCR] = { 0x0c00 },
+ [GCC_BLSP2_UART6_BCR] = { 0x0c40 },
+ [GCC_PDM_BCR] = { 0x0cc0 },
+ [GCC_BAM_DMA_BCR] = { 0x0d40 },
+ [GCC_TSIF_BCR] = { 0x0d80 },
+ [GCC_TCSR_BCR] = { 0x0dc0 },
+ [GCC_BOOT_ROM_BCR] = { 0x0e00 },
+ [GCC_MSG_RAM_BCR] = { 0x0e40 },
+ [GCC_TLMM_BCR] = { 0x0e80 },
+ [GCC_MPM_BCR] = { 0x0ec0 },
+ [GCC_SEC_CTRL_BCR] = { 0x0f40 },
+ [GCC_SPMI_BCR] = { 0x0fc0 },
+ [GCC_SPDM_BCR] = { 0x1000 },
+ [GCC_CE1_BCR] = { 0x1040 },
+ [GCC_CE2_BCR] = { 0x1080 },
+ [GCC_BIMC_BCR] = { 0x1100 },
+ [GCC_MPM_NON_AHB_RESET] = { 0x0ec4, 2 },
+ [GCC_MPM_AHB_RESET] = { 0x0ec4, 1 },
+ [GCC_SNOC_BUS_TIMEOUT0_BCR] = { 0x1240 },
+ [GCC_SNOC_BUS_TIMEOUT2_BCR] = { 0x1248 },
+ [GCC_PNOC_BUS_TIMEOUT0_BCR] = { 0x1280 },
+ [GCC_PNOC_BUS_TIMEOUT1_BCR] = { 0x1288 },
+ [GCC_PNOC_BUS_TIMEOUT2_BCR] = { 0x1290 },
+ [GCC_PNOC_BUS_TIMEOUT3_BCR] = { 0x1298 },
+ [GCC_PNOC_BUS_TIMEOUT4_BCR] = { 0x12a0 },
+ [GCC_CNOC_BUS_TIMEOUT0_BCR] = { 0x12c0 },
+ [GCC_CNOC_BUS_TIMEOUT1_BCR] = { 0x12c8 },
+ [GCC_CNOC_BUS_TIMEOUT2_BCR] = { 0x12d0 },
+ [GCC_CNOC_BUS_TIMEOUT3_BCR] = { 0x12d8 },
+ [GCC_CNOC_BUS_TIMEOUT4_BCR] = { 0x12e0 },
+ [GCC_CNOC_BUS_TIMEOUT5_BCR] = { 0x12e8 },
+ [GCC_CNOC_BUS_TIMEOUT6_BCR] = { 0x12f0 },
+ [GCC_DEHR_BCR] = { 0x1300 },
+ [GCC_RBCPR_BCR] = { 0x1380 },
+ [GCC_MSS_RESTART] = { 0x1680 },
+ [GCC_LPASS_RESTART] = { 0x16c0 },
+ [GCC_WCSS_RESTART] = { 0x1700 },
+ [GCC_VENUS_RESTART] = { 0x1740 },
+};
+
+static const struct regmap_config gcc_msm8974_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1fc0,
+ .fast_io = true,
+};
+
+static const struct of_device_id gcc_msm8974_match_table[] = {
+ { .compatible = "qcom,gcc-msm8974" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_msm8974_match_table);
+
+struct qcom_cc {
+ struct qcom_reset_controller reset;
+ struct clk_onecell_data data;
+ struct clk *clks[];
+};
+
+static int gcc_msm8974_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct resource *res;
+ int i, ret;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ struct clk_onecell_data *data;
+ struct clk **clks;
+ struct regmap *regmap;
+ size_t num_clks;
+ struct qcom_reset_controller *reset;
+ struct qcom_cc *cc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &gcc_msm8974_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ num_clks = ARRAY_SIZE(gcc_msm8974_clocks);
+ cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*clks) * num_clks,
+ GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+
+ clks = cc->clks;
+ data = &cc->data;
+ data->clks = clks;
+ data->clk_num = num_clks;
+
+ /* Temporary until RPM clocks supported */
+ clk = clk_register_fixed_rate(dev, "xo", NULL, CLK_IS_ROOT, 19200000);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ /* Should move to DT node? */
+ clk = clk_register_fixed_rate(dev, "sleep_clk_src", NULL,
+ CLK_IS_ROOT, 32768);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ for (i = 0; i < num_clks; i++) {
+ if (!gcc_msm8974_clocks[i])
+ continue;
+ clk = devm_clk_register_regmap(dev, gcc_msm8974_clocks[i]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
+ if (ret)
+ return ret;
+
+ reset = &cc->reset;
+ reset->rcdev.of_node = dev->of_node;
+ reset->rcdev.ops = &qcom_reset_ops,
+ reset->rcdev.owner = THIS_MODULE,
+ reset->rcdev.nr_resets = ARRAY_SIZE(gcc_msm8974_resets),
+ reset->regmap = regmap;
+ reset->reset_map = gcc_msm8974_resets,
+ platform_set_drvdata(pdev, &reset->rcdev);
+
+ ret = reset_controller_register(&reset->rcdev);
+ if (ret)
+ of_clk_del_provider(dev->of_node);
+
+ return ret;
+}
+
+static int gcc_msm8974_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ reset_controller_unregister(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static struct platform_driver gcc_msm8974_driver = {
+ .probe = gcc_msm8974_probe,
+ .remove = gcc_msm8974_remove,
+ .driver = {
+ .name = "gcc-msm8974",
+ .owner = THIS_MODULE,
+ .of_match_table = gcc_msm8974_match_table,
+ },
+};
+
+static int __init gcc_msm8974_init(void)
+{
+ return platform_driver_register(&gcc_msm8974_driver);
+}
+core_initcall(gcc_msm8974_init);
+
+static void __exit gcc_msm8974_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8974_driver);
+}
+module_exit(gcc_msm8974_exit);
+
+MODULE_DESCRIPTION("QCOM GCC MSM8974 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-msm8974");
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
new file mode 100644
index 000000000000..f9b59c7e48e9
--- /dev/null
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -0,0 +1,2321 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,mmcc-msm8960.h>
+#include <dt-bindings/reset/qcom,mmcc-msm8960.h>
+
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+#define P_PXO 0
+#define P_PLL8 1
+#define P_PLL2 2
+#define P_PLL3 3
+
+static u8 mmcc_pxo_pll8_pll2_map[] = {
+ [P_PXO] = 0,
+ [P_PLL8] = 2,
+ [P_PLL2] = 1,
+};
+
+static const char *mmcc_pxo_pll8_pll2[] = {
+ "pxo",
+ "pll8_vote",
+ "pll2",
+};
+
+static u8 mmcc_pxo_pll8_pll2_pll3_map[] = {
+ [P_PXO] = 0,
+ [P_PLL8] = 2,
+ [P_PLL2] = 1,
+ [P_PLL3] = 3,
+};
+
+static const char *mmcc_pxo_pll8_pll2_pll3[] = {
+ "pxo",
+ "pll2",
+ "pll8_vote",
+ "pll3",
+};
+
+static struct clk_pll pll2 = {
+ .l_reg = 0x320,
+ .m_reg = 0x324,
+ .n_reg = 0x328,
+ .config_reg = 0x32c,
+ .mode_reg = 0x31c,
+ .status_reg = 0x334,
+ .status_bit = 16,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pll2",
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct freq_tbl clk_tbl_cam[] = {
+ { 6000000, P_PLL8, 4, 1, 16 },
+ { 8000000, P_PLL8, 4, 1, 12 },
+ { 12000000, P_PLL8, 4, 1, 8 },
+ { 16000000, P_PLL8, 4, 1, 6 },
+ { 19200000, P_PLL8, 4, 1, 5 },
+ { 24000000, P_PLL8, 4, 1, 4 },
+ { 32000000, P_PLL8, 4, 1, 3 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { 64000000, P_PLL8, 3, 1, 2 },
+ { 96000000, P_PLL8, 4, 0, 0 },
+ { 128000000, P_PLL8, 3, 0, 0 },
+ { }
+};
+
+static struct clk_rcg camclk0_src = {
+ .ns_reg = 0x0148,
+ .md_reg = 0x0144,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 8,
+ .reset_in_cc = true,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 14,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_cam,
+ .clkr = {
+ .enable_reg = 0x0140,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "camclk0_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch camclk0_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 15,
+ .clkr = {
+ .enable_reg = 0x0140,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camclk0_clk",
+ .parent_names = (const char *[]){ "camclk0_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ },
+ },
+
+};
+
+static struct clk_rcg camclk1_src = {
+ .ns_reg = 0x015c,
+ .md_reg = 0x0158,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 8,
+ .reset_in_cc = true,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 14,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_cam,
+ .clkr = {
+ .enable_reg = 0x0154,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "camclk1_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch camclk1_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 16,
+ .clkr = {
+ .enable_reg = 0x0154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camclk1_clk",
+ .parent_names = (const char *[]){ "camclk1_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ },
+ },
+
+};
+
+static struct clk_rcg camclk2_src = {
+ .ns_reg = 0x0228,
+ .md_reg = 0x0224,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 8,
+ .reset_in_cc = true,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 14,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_cam,
+ .clkr = {
+ .enable_reg = 0x0220,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "camclk2_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch camclk2_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 16,
+ .clkr = {
+ .enable_reg = 0x0220,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camclk2_clk",
+ .parent_names = (const char *[]){ "camclk2_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ },
+ },
+
+};
+
+static struct freq_tbl clk_tbl_csi[] = {
+ { 27000000, P_PXO, 1, 0, 0 },
+ { 85330000, P_PLL8, 1, 2, 9 },
+ { 177780000, P_PLL2, 1, 2, 9 },
+ { }
+};
+
+static struct clk_rcg csi0_src = {
+ .ns_reg = 0x0048,
+ .md_reg = 0x0044,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 14,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_csi,
+ .clkr = {
+ .enable_reg = 0x0040,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "csi0_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch csi0_clk = {
+ .halt_reg = 0x01cc,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x0040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "csi0_src" },
+ .num_parents = 1,
+ .name = "csi0_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch csi0_phy_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 9,
+ .clkr = {
+ .enable_reg = 0x0040,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "csi0_src" },
+ .num_parents = 1,
+ .name = "csi0_phy_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg csi1_src = {
+ .ns_reg = 0x0010,
+ .md_reg = 0x0028,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 14,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_csi,
+ .clkr = {
+ .enable_reg = 0x0024,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "csi1_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch csi1_clk = {
+ .halt_reg = 0x01cc,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x0024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "csi1_src" },
+ .num_parents = 1,
+ .name = "csi1_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch csi1_phy_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x0024,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "csi1_src" },
+ .num_parents = 1,
+ .name = "csi1_phy_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg csi2_src = {
+ .ns_reg = 0x0234,
+ .md_reg = 0x022c,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 14,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_csi,
+ .clkr = {
+ .enable_reg = 0x022c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "csi2_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch csi2_clk = {
+ .halt_reg = 0x01cc,
+ .halt_bit = 29,
+ .clkr = {
+ .enable_reg = 0x022c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "csi2_src" },
+ .num_parents = 1,
+ .name = "csi2_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch csi2_phy_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 29,
+ .clkr = {
+ .enable_reg = 0x022c,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "csi2_src" },
+ .num_parents = 1,
+ .name = "csi2_phy_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+struct clk_pix_rdi {
+ u32 s_reg;
+ u32 s_mask;
+ u32 s2_reg;
+ u32 s2_mask;
+ struct clk_regmap clkr;
+};
+
+#define to_clk_pix_rdi(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_pix_rdi, clkr)
+
+static int pix_rdi_set_parent(struct clk_hw *hw, u8 index)
+{
+ int i;
+ int ret = 0;
+ u32 val;
+ struct clk_pix_rdi *rdi = to_clk_pix_rdi(hw);
+ struct clk *clk = hw->clk;
+ int num_parents = __clk_get_num_parents(hw->clk);
+
+ /*
+ * These clocks select three inputs via two muxes. One mux selects
+ * between csi0 and csi1 and the second mux selects between that mux's
+ * output and csi2. The source and destination selections for each
+ * mux must be clocking for the switch to succeed so just turn on
+ * all three sources because it's easier than figuring out what source
+ * needs to be on at what time.
+ */
+ for (i = 0; i < num_parents; i++) {
+ ret = clk_prepare_enable(clk_get_parent_by_index(clk, i));
+ if (ret)
+ goto err;
+ }
+
+ if (index == 2)
+ val = rdi->s2_mask;
+ else
+ val = 0;
+ regmap_update_bits(rdi->clkr.regmap, rdi->s2_reg, rdi->s2_mask, val);
+ /*
+ * Wait at least 6 cycles of slowest clock
+ * for the glitch-free MUX to fully switch sources.
+ */
+ udelay(1);
+
+ if (index == 1)
+ val = rdi->s_mask;
+ else
+ val = 0;
+ regmap_update_bits(rdi->clkr.regmap, rdi->s_reg, rdi->s_mask, val);
+ /*
+ * Wait at least 6 cycles of slowest clock
+ * for the glitch-free MUX to fully switch sources.
+ */
+ udelay(1);
+
+err:
+ for (i--; i >= 0; i--)
+ clk_disable_unprepare(clk_get_parent_by_index(clk, i));
+
+ return ret;
+}
+
+static u8 pix_rdi_get_parent(struct clk_hw *hw)
+{
+ u32 val;
+ struct clk_pix_rdi *rdi = to_clk_pix_rdi(hw);
+
+
+ regmap_read(rdi->clkr.regmap, rdi->s2_reg, &val);
+ if (val & rdi->s2_mask)
+ return 2;
+
+ regmap_read(rdi->clkr.regmap, rdi->s_reg, &val);
+ if (val & rdi->s_mask)
+ return 1;
+
+ return 0;
+}
+
+static const struct clk_ops clk_ops_pix_rdi = {
+ .enable = clk_enable_regmap,
+ .disable = clk_disable_regmap,
+ .set_parent = pix_rdi_set_parent,
+ .get_parent = pix_rdi_get_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+static const char *pix_rdi_parents[] = {
+ "csi0_clk",
+ "csi1_clk",
+ "csi2_clk",
+};
+
+static struct clk_pix_rdi csi_pix_clk = {
+ .s_reg = 0x0058,
+ .s_mask = BIT(25),
+ .s2_reg = 0x0238,
+ .s2_mask = BIT(13),
+ .clkr = {
+ .enable_reg = 0x0058,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "csi_pix_clk",
+ .parent_names = pix_rdi_parents,
+ .num_parents = 3,
+ .ops = &clk_ops_pix_rdi,
+ },
+ },
+};
+
+static struct clk_pix_rdi csi_pix1_clk = {
+ .s_reg = 0x0238,
+ .s_mask = BIT(8),
+ .s2_reg = 0x0238,
+ .s2_mask = BIT(9),
+ .clkr = {
+ .enable_reg = 0x0238,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "csi_pix1_clk",
+ .parent_names = pix_rdi_parents,
+ .num_parents = 3,
+ .ops = &clk_ops_pix_rdi,
+ },
+ },
+};
+
+static struct clk_pix_rdi csi_rdi_clk = {
+ .s_reg = 0x0058,
+ .s_mask = BIT(12),
+ .s2_reg = 0x0238,
+ .s2_mask = BIT(12),
+ .clkr = {
+ .enable_reg = 0x0058,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "csi_rdi_clk",
+ .parent_names = pix_rdi_parents,
+ .num_parents = 3,
+ .ops = &clk_ops_pix_rdi,
+ },
+ },
+};
+
+static struct clk_pix_rdi csi_rdi1_clk = {
+ .s_reg = 0x0238,
+ .s_mask = BIT(0),
+ .s2_reg = 0x0238,
+ .s2_mask = BIT(1),
+ .clkr = {
+ .enable_reg = 0x0238,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "csi_rdi1_clk",
+ .parent_names = pix_rdi_parents,
+ .num_parents = 3,
+ .ops = &clk_ops_pix_rdi,
+ },
+ },
+};
+
+static struct clk_pix_rdi csi_rdi2_clk = {
+ .s_reg = 0x0238,
+ .s_mask = BIT(4),
+ .s2_reg = 0x0238,
+ .s2_mask = BIT(5),
+ .clkr = {
+ .enable_reg = 0x0238,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "csi_rdi2_clk",
+ .parent_names = pix_rdi_parents,
+ .num_parents = 3,
+ .ops = &clk_ops_pix_rdi,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_csiphytimer[] = {
+ { 85330000, P_PLL8, 1, 2, 9 },
+ { 177780000, P_PLL2, 1, 2, 9 },
+ { }
+};
+
+static struct clk_rcg csiphytimer_src = {
+ .ns_reg = 0x0168,
+ .md_reg = 0x0164,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 8,
+ .reset_in_cc = true,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 14,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_csiphytimer,
+ .clkr = {
+ .enable_reg = 0x0160,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "csiphytimer_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static const char *csixphy_timer_src[] = { "csiphytimer_src" };
+
+static struct clk_branch csiphy0_timer_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 17,
+ .clkr = {
+ .enable_reg = 0x0160,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = csixphy_timer_src,
+ .num_parents = 1,
+ .name = "csiphy0_timer_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch csiphy1_timer_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 18,
+ .clkr = {
+ .enable_reg = 0x0160,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = csixphy_timer_src,
+ .num_parents = 1,
+ .name = "csiphy1_timer_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch csiphy2_timer_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 30,
+ .clkr = {
+ .enable_reg = 0x0160,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = csixphy_timer_src,
+ .num_parents = 1,
+ .name = "csiphy2_timer_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_gfx2d[] = {
+ { 27000000, P_PXO, 1, 0 },
+ { 48000000, P_PLL8, 1, 8 },
+ { 54857000, P_PLL8, 1, 7 },
+ { 64000000, P_PLL8, 1, 6 },
+ { 76800000, P_PLL8, 1, 5 },
+ { 96000000, P_PLL8, 1, 4 },
+ { 128000000, P_PLL8, 1, 3 },
+ { 145455000, P_PLL2, 2, 11 },
+ { 160000000, P_PLL2, 1, 5 },
+ { 177778000, P_PLL2, 2, 9 },
+ { 200000000, P_PLL2, 1, 4 },
+ { 228571000, P_PLL2, 2, 7 },
+ { }
+};
+
+static struct clk_dyn_rcg gfx2d0_src = {
+ .ns_reg = 0x0070,
+ .md_reg[0] = 0x0064,
+ .md_reg[1] = 0x0068,
+ .mn[0] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 25,
+ .mnctr_mode_shift = 9,
+ .n_val_shift = 20,
+ .m_val_shift = 4,
+ .width = 4,
+ },
+ .mn[1] = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 24,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 16,
+ .m_val_shift = 4,
+ .width = 4,
+ },
+ .s[0] = {
+ .src_sel_shift = 3,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .mux_sel_bit = 11,
+ .freq_tbl = clk_tbl_gfx2d,
+ .clkr = {
+ .enable_reg = 0x0060,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx2d0_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_dyn_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch gfx2d0_clk = {
+ .halt_reg = 0x01c8,
+ .halt_bit = 9,
+ .clkr = {
+ .enable_reg = 0x0060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx2d0_clk",
+ .parent_names = (const char *[]){ "gfx2d0_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_dyn_rcg gfx2d1_src = {
+ .ns_reg = 0x007c,
+ .md_reg[0] = 0x0078,
+ .md_reg[1] = 0x006c,
+ .mn[0] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 25,
+ .mnctr_mode_shift = 9,
+ .n_val_shift = 20,
+ .m_val_shift = 4,
+ .width = 4,
+ },
+ .mn[1] = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 24,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 16,
+ .m_val_shift = 4,
+ .width = 4,
+ },
+ .s[0] = {
+ .src_sel_shift = 3,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .mux_sel_bit = 11,
+ .freq_tbl = clk_tbl_gfx2d,
+ .clkr = {
+ .enable_reg = 0x0074,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx2d1_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_dyn_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch gfx2d1_clk = {
+ .halt_reg = 0x01c8,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x0074,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx2d1_clk",
+ .parent_names = (const char *[]){ "gfx2d1_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_gfx3d[] = {
+ { 27000000, P_PXO, 1, 0 },
+ { 48000000, P_PLL8, 1, 8 },
+ { 54857000, P_PLL8, 1, 7 },
+ { 64000000, P_PLL8, 1, 6 },
+ { 76800000, P_PLL8, 1, 5 },
+ { 96000000, P_PLL8, 1, 4 },
+ { 128000000, P_PLL8, 1, 3 },
+ { 145455000, P_PLL2, 2, 11 },
+ { 160000000, P_PLL2, 1, 5 },
+ { 177778000, P_PLL2, 2, 9 },
+ { 200000000, P_PLL2, 1, 4 },
+ { 228571000, P_PLL2, 2, 7 },
+ { 266667000, P_PLL2, 1, 3 },
+ { 300000000, P_PLL3, 1, 4 },
+ { 320000000, P_PLL2, 2, 5 },
+ { 400000000, P_PLL2, 1, 2 },
+ { }
+};
+
+static struct clk_dyn_rcg gfx3d_src = {
+ .ns_reg = 0x008c,
+ .md_reg[0] = 0x0084,
+ .md_reg[1] = 0x0088,
+ .mn[0] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 25,
+ .mnctr_mode_shift = 9,
+ .n_val_shift = 18,
+ .m_val_shift = 4,
+ .width = 4,
+ },
+ .mn[1] = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 24,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 14,
+ .m_val_shift = 4,
+ .width = 4,
+ },
+ .s[0] = {
+ .src_sel_shift = 3,
+ .parent_map = mmcc_pxo_pll8_pll2_pll3_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_pll3_map,
+ },
+ .mux_sel_bit = 11,
+ .freq_tbl = clk_tbl_gfx3d,
+ .clkr = {
+ .enable_reg = 0x0080,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx3d_src",
+ .parent_names = mmcc_pxo_pll8_pll2_pll3,
+ .num_parents = 3,
+ .ops = &clk_dyn_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch gfx3d_clk = {
+ .halt_reg = 0x01c8,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x0080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx3d_clk",
+ .parent_names = (const char *[]){ "gfx3d_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_ijpeg[] = {
+ { 27000000, P_PXO, 1, 0, 0 },
+ { 36570000, P_PLL8, 1, 2, 21 },
+ { 54860000, P_PLL8, 7, 0, 0 },
+ { 96000000, P_PLL8, 4, 0, 0 },
+ { 109710000, P_PLL8, 1, 2, 7 },
+ { 128000000, P_PLL8, 3, 0, 0 },
+ { 153600000, P_PLL8, 1, 2, 5 },
+ { 200000000, P_PLL2, 4, 0, 0 },
+ { 228571000, P_PLL2, 1, 2, 7 },
+ { 266667000, P_PLL2, 1, 1, 3 },
+ { 320000000, P_PLL2, 1, 2, 5 },
+ { }
+};
+
+static struct clk_rcg ijpeg_src = {
+ .ns_reg = 0x00a0,
+ .md_reg = 0x009c,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 16,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 12,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_ijpeg,
+ .clkr = {
+ .enable_reg = 0x0098,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "ijpeg_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch ijpeg_clk = {
+ .halt_reg = 0x01c8,
+ .halt_bit = 24,
+ .clkr = {
+ .enable_reg = 0x0098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "ijpeg_clk",
+ .parent_names = (const char *[]){ "ijpeg_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_jpegd[] = {
+ { 64000000, P_PLL8, 6 },
+ { 76800000, P_PLL8, 5 },
+ { 96000000, P_PLL8, 4 },
+ { 160000000, P_PLL2, 5 },
+ { 200000000, P_PLL2, 4 },
+ { }
+};
+
+static struct clk_rcg jpegd_src = {
+ .ns_reg = 0x00ac,
+ .p = {
+ .pre_div_shift = 12,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_jpegd,
+ .clkr = {
+ .enable_reg = 0x00a4,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "jpegd_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch jpegd_clk = {
+ .halt_reg = 0x01c8,
+ .halt_bit = 19,
+ .clkr = {
+ .enable_reg = 0x00a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "jpegd_clk",
+ .parent_names = (const char *[]){ "jpegd_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_mdp[] = {
+ { 9600000, P_PLL8, 1, 1, 40 },
+ { 13710000, P_PLL8, 1, 1, 28 },
+ { 27000000, P_PXO, 1, 0, 0 },
+ { 29540000, P_PLL8, 1, 1, 13 },
+ { 34910000, P_PLL8, 1, 1, 11 },
+ { 38400000, P_PLL8, 1, 1, 10 },
+ { 59080000, P_PLL8, 1, 2, 13 },
+ { 76800000, P_PLL8, 1, 1, 5 },
+ { 85330000, P_PLL8, 1, 2, 9 },
+ { 96000000, P_PLL8, 1, 1, 4 },
+ { 128000000, P_PLL8, 1, 1, 3 },
+ { 160000000, P_PLL2, 1, 1, 5 },
+ { 177780000, P_PLL2, 1, 2, 9 },
+ { 200000000, P_PLL2, 1, 1, 4 },
+ { 228571000, P_PLL2, 1, 2, 7 },
+ { 266667000, P_PLL2, 1, 1, 3 },
+ { }
+};
+
+static struct clk_dyn_rcg mdp_src = {
+ .ns_reg = 0x00d0,
+ .md_reg[0] = 0x00c4,
+ .md_reg[1] = 0x00c8,
+ .mn[0] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 31,
+ .mnctr_mode_shift = 9,
+ .n_val_shift = 22,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .mn[1] = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 30,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 14,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .s[0] = {
+ .src_sel_shift = 3,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .mux_sel_bit = 11,
+ .freq_tbl = clk_tbl_mdp,
+ .clkr = {
+ .enable_reg = 0x00c0,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdp_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_dyn_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch mdp_clk = {
+ .halt_reg = 0x01d0,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x00c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdp_clk",
+ .parent_names = (const char *[]){ "mdp_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdp_lut_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x016c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "mdp_clk" },
+ .num_parents = 1,
+ .name = "mdp_lut_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdp_vsync_clk = {
+ .halt_reg = 0x01cc,
+ .halt_bit = 22,
+ .clkr = {
+ .enable_reg = 0x0058,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdp_vsync_clk",
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_rot[] = {
+ { 27000000, P_PXO, 1 },
+ { 29540000, P_PLL8, 13 },
+ { 32000000, P_PLL8, 12 },
+ { 38400000, P_PLL8, 10 },
+ { 48000000, P_PLL8, 8 },
+ { 54860000, P_PLL8, 7 },
+ { 64000000, P_PLL8, 6 },
+ { 76800000, P_PLL8, 5 },
+ { 96000000, P_PLL8, 4 },
+ { 100000000, P_PLL2, 8 },
+ { 114290000, P_PLL2, 7 },
+ { 133330000, P_PLL2, 6 },
+ { 160000000, P_PLL2, 5 },
+ { 200000000, P_PLL2, 4 },
+ { }
+};
+
+static struct clk_dyn_rcg rot_src = {
+ .ns_reg = 0x00e8,
+ .p[0] = {
+ .pre_div_shift = 22,
+ .pre_div_width = 4,
+ },
+ .p[1] = {
+ .pre_div_shift = 26,
+ .pre_div_width = 4,
+ },
+ .s[0] = {
+ .src_sel_shift = 16,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 19,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .mux_sel_bit = 30,
+ .freq_tbl = clk_tbl_rot,
+ .clkr = {
+ .enable_reg = 0x00e0,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "rot_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_dyn_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch rot_clk = {
+ .halt_reg = 0x01d0,
+ .halt_bit = 15,
+ .clkr = {
+ .enable_reg = 0x00e0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "rot_clk",
+ .parent_names = (const char *[]){ "rot_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+#define P_HDMI_PLL 1
+
+static u8 mmcc_pxo_hdmi_map[] = {
+ [P_PXO] = 0,
+ [P_HDMI_PLL] = 2,
+};
+
+static const char *mmcc_pxo_hdmi[] = {
+ "pxo",
+ "hdmi_pll",
+};
+
+static struct freq_tbl clk_tbl_tv[] = {
+ { 25200000, P_HDMI_PLL, 1, 0, 0 },
+ { 27000000, P_HDMI_PLL, 1, 0, 0 },
+ { 27030000, P_HDMI_PLL, 1, 0, 0 },
+ { 74250000, P_HDMI_PLL, 1, 0, 0 },
+ { 108000000, P_HDMI_PLL, 1, 0, 0 },
+ { 148500000, P_HDMI_PLL, 1, 0, 0 },
+ { }
+};
+
+static struct clk_rcg tv_src = {
+ .ns_reg = 0x00f4,
+ .md_reg = 0x00f0,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 16,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 14,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_hdmi_map,
+ },
+ .freq_tbl = clk_tbl_tv,
+ .clkr = {
+ .enable_reg = 0x00ec,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "tv_src",
+ .parent_names = mmcc_pxo_hdmi,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const char *tv_src_name[] = { "tv_src" };
+
+static struct clk_branch tv_enc_clk = {
+ .halt_reg = 0x01d4,
+ .halt_bit = 9,
+ .clkr = {
+ .enable_reg = 0x00ec,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = tv_src_name,
+ .num_parents = 1,
+ .name = "tv_enc_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch tv_dac_clk = {
+ .halt_reg = 0x01d4,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x00ec,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = tv_src_name,
+ .num_parents = 1,
+ .name = "tv_dac_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdp_tv_clk = {
+ .halt_reg = 0x01d4,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x00ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = tv_src_name,
+ .num_parents = 1,
+ .name = "mdp_tv_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch hdmi_tv_clk = {
+ .halt_reg = 0x01d4,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x00ec,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = tv_src_name,
+ .num_parents = 1,
+ .name = "hdmi_tv_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch hdmi_app_clk = {
+ .halt_reg = 0x01cc,
+ .halt_bit = 25,
+ .clkr = {
+ .enable_reg = 0x005c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hdmi_app_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_vcodec[] = {
+ { 27000000, P_PXO, 1, 0 },
+ { 32000000, P_PLL8, 1, 12 },
+ { 48000000, P_PLL8, 1, 8 },
+ { 54860000, P_PLL8, 1, 7 },
+ { 96000000, P_PLL8, 1, 4 },
+ { 133330000, P_PLL2, 1, 6 },
+ { 200000000, P_PLL2, 1, 4 },
+ { 228570000, P_PLL2, 2, 7 },
+ { 266670000, P_PLL2, 1, 3 },
+ { }
+};
+
+static struct clk_dyn_rcg vcodec_src = {
+ .ns_reg = 0x0100,
+ .md_reg[0] = 0x00fc,
+ .md_reg[1] = 0x0128,
+ .mn[0] = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 31,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 11,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .mn[1] = {
+ .mnctr_en_bit = 10,
+ .mnctr_reset_bit = 30,
+ .mnctr_mode_shift = 11,
+ .n_val_shift = 19,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .s[0] = {
+ .src_sel_shift = 27,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .mux_sel_bit = 13,
+ .freq_tbl = clk_tbl_vcodec,
+ .clkr = {
+ .enable_reg = 0x00f8,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "vcodec_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_dyn_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch vcodec_clk = {
+ .halt_reg = 0x01d0,
+ .halt_bit = 29,
+ .clkr = {
+ .enable_reg = 0x00f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "vcodec_clk",
+ .parent_names = (const char *[]){ "vcodec_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_vpe[] = {
+ { 27000000, P_PXO, 1 },
+ { 34909000, P_PLL8, 11 },
+ { 38400000, P_PLL8, 10 },
+ { 64000000, P_PLL8, 6 },
+ { 76800000, P_PLL8, 5 },
+ { 96000000, P_PLL8, 4 },
+ { 100000000, P_PLL2, 8 },
+ { 160000000, P_PLL2, 5 },
+ { }
+};
+
+static struct clk_rcg vpe_src = {
+ .ns_reg = 0x0118,
+ .p = {
+ .pre_div_shift = 12,
+ .pre_div_width = 4,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_vpe,
+ .clkr = {
+ .enable_reg = 0x0110,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "vpe_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch vpe_clk = {
+ .halt_reg = 0x01c8,
+ .halt_bit = 28,
+ .clkr = {
+ .enable_reg = 0x0110,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "vpe_clk",
+ .parent_names = (const char *[]){ "vpe_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_vfe[] = {
+ { 13960000, P_PLL8, 1, 2, 55 },
+ { 27000000, P_PXO, 1, 0, 0 },
+ { 36570000, P_PLL8, 1, 2, 21 },
+ { 38400000, P_PLL8, 2, 1, 5 },
+ { 45180000, P_PLL8, 1, 2, 17 },
+ { 48000000, P_PLL8, 2, 1, 4 },
+ { 54860000, P_PLL8, 1, 1, 7 },
+ { 64000000, P_PLL8, 2, 1, 3 },
+ { 76800000, P_PLL8, 1, 1, 5 },
+ { 96000000, P_PLL8, 2, 1, 2 },
+ { 109710000, P_PLL8, 1, 2, 7 },
+ { 128000000, P_PLL8, 1, 1, 3 },
+ { 153600000, P_PLL8, 1, 2, 5 },
+ { 200000000, P_PLL2, 2, 1, 2 },
+ { 228570000, P_PLL2, 1, 2, 7 },
+ { 266667000, P_PLL2, 1, 1, 3 },
+ { 320000000, P_PLL2, 1, 2, 5 },
+ { }
+};
+
+static struct clk_rcg vfe_src = {
+ .ns_reg = 0x0108,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 16,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 10,
+ .pre_div_width = 1,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_vfe,
+ .clkr = {
+ .enable_reg = 0x0104,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "vfe_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch vfe_clk = {
+ .halt_reg = 0x01cc,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x0104,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "vfe_clk",
+ .parent_names = (const char *[]){ "vfe_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch vfe_csi_clk = {
+ .halt_reg = 0x01cc,
+ .halt_bit = 8,
+ .clkr = {
+ .enable_reg = 0x0104,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "vfe_src" },
+ .num_parents = 1,
+ .name = "vfe_csi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gmem_axi_clk = {
+ .halt_reg = 0x01d8,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x0018,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gmem_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch ijpeg_axi_clk = {
+ .hwcg_reg = 0x0018,
+ .hwcg_bit = 11,
+ .halt_reg = 0x01d8,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x0018,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "ijpeg_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch mmss_imem_axi_clk = {
+ .hwcg_reg = 0x0018,
+ .hwcg_bit = 15,
+ .halt_reg = 0x01d8,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x0018,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_imem_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch jpegd_axi_clk = {
+ .halt_reg = 0x01d8,
+ .halt_bit = 5,
+ .clkr = {
+ .enable_reg = 0x0018,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "jpegd_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch vcodec_axi_b_clk = {
+ .hwcg_reg = 0x0114,
+ .hwcg_bit = 22,
+ .halt_reg = 0x01e8,
+ .halt_bit = 25,
+ .clkr = {
+ .enable_reg = 0x0114,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "vcodec_axi_b_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch vcodec_axi_a_clk = {
+ .hwcg_reg = 0x0114,
+ .hwcg_bit = 24,
+ .halt_reg = 0x01e8,
+ .halt_bit = 26,
+ .clkr = {
+ .enable_reg = 0x0114,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "vcodec_axi_a_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch vcodec_axi_clk = {
+ .hwcg_reg = 0x0018,
+ .hwcg_bit = 13,
+ .halt_reg = 0x01d8,
+ .halt_bit = 3,
+ .clkr = {
+ .enable_reg = 0x0018,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "vcodec_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch vfe_axi_clk = {
+ .halt_reg = 0x01d8,
+ .halt_bit = 0,
+ .clkr = {
+ .enable_reg = 0x0018,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "vfe_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch mdp_axi_clk = {
+ .hwcg_reg = 0x0018,
+ .hwcg_bit = 16,
+ .halt_reg = 0x01d8,
+ .halt_bit = 8,
+ .clkr = {
+ .enable_reg = 0x0018,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdp_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch rot_axi_clk = {
+ .hwcg_reg = 0x0020,
+ .hwcg_bit = 25,
+ .halt_reg = 0x01d8,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x0020,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "rot_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch vpe_axi_clk = {
+ .hwcg_reg = 0x0020,
+ .hwcg_bit = 27,
+ .halt_reg = 0x01d8,
+ .halt_bit = 1,
+ .clkr = {
+ .enable_reg = 0x0020,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "vpe_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gfx3d_axi_clk = {
+ .hwcg_reg = 0x0244,
+ .hwcg_bit = 24,
+ .halt_reg = 0x0240,
+ .halt_bit = 30,
+ .clkr = {
+ .enable_reg = 0x0244,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx3d_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch amp_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 18,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "amp_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch csi_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 16,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "csi_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT
+ },
+ },
+};
+
+static struct clk_branch dsi_m_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 19,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi_m_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch dsi_s_ahb_clk = {
+ .hwcg_reg = 0x0038,
+ .hwcg_bit = 20,
+ .halt_reg = 0x01dc,
+ .halt_bit = 21,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi_s_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch dsi2_m_ahb_clk = {
+ .halt_reg = 0x01d8,
+ .halt_bit = 18,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi2_m_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT
+ },
+ },
+};
+
+static struct clk_branch dsi2_s_ahb_clk = {
+ .hwcg_reg = 0x0038,
+ .hwcg_bit = 15,
+ .halt_reg = 0x01dc,
+ .halt_bit = 20,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi2_s_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gfx2d0_ahb_clk = {
+ .hwcg_reg = 0x0038,
+ .hwcg_bit = 28,
+ .halt_reg = 0x01dc,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx2d0_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gfx2d1_ahb_clk = {
+ .hwcg_reg = 0x0038,
+ .hwcg_bit = 29,
+ .halt_reg = 0x01dc,
+ .halt_bit = 3,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx2d1_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gfx3d_ahb_clk = {
+ .hwcg_reg = 0x0038,
+ .hwcg_bit = 27,
+ .halt_reg = 0x01dc,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx3d_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch hdmi_m_ahb_clk = {
+ .hwcg_reg = 0x0038,
+ .hwcg_bit = 21,
+ .halt_reg = 0x01dc,
+ .halt_bit = 5,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_m_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch hdmi_s_ahb_clk = {
+ .hwcg_reg = 0x0038,
+ .hwcg_bit = 22,
+ .halt_reg = 0x01dc,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_s_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch ijpeg_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 9,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "ijpeg_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT
+ },
+ },
+};
+
+static struct clk_branch mmss_imem_ahb_clk = {
+ .hwcg_reg = 0x0038,
+ .hwcg_bit = 12,
+ .halt_reg = 0x01dc,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_imem_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT
+ },
+ },
+};
+
+static struct clk_branch jpegd_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "jpegd_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch mdp_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdp_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch rot_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "rot_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT
+ },
+ },
+};
+
+static struct clk_branch smmu_ahb_clk = {
+ .hwcg_reg = 0x0008,
+ .hwcg_bit = 26,
+ .halt_reg = 0x01dc,
+ .halt_bit = 22,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "smmu_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch tv_enc_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 23,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "tv_enc_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch vcodec_ahb_clk = {
+ .hwcg_reg = 0x0038,
+ .hwcg_bit = 26,
+ .halt_reg = 0x01dc,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "vcodec_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch vfe_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "vfe_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch vpe_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 15,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "vpe_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_regmap *mmcc_msm8960_clks[] = {
+ [TV_ENC_AHB_CLK] = &tv_enc_ahb_clk.clkr,
+ [AMP_AHB_CLK] = &amp_ahb_clk.clkr,
+ [DSI2_S_AHB_CLK] = &dsi2_s_ahb_clk.clkr,
+ [JPEGD_AHB_CLK] = &jpegd_ahb_clk.clkr,
+ [GFX2D0_AHB_CLK] = &gfx2d0_ahb_clk.clkr,
+ [DSI_S_AHB_CLK] = &dsi_s_ahb_clk.clkr,
+ [DSI2_M_AHB_CLK] = &dsi2_m_ahb_clk.clkr,
+ [VPE_AHB_CLK] = &vpe_ahb_clk.clkr,
+ [SMMU_AHB_CLK] = &smmu_ahb_clk.clkr,
+ [HDMI_M_AHB_CLK] = &hdmi_m_ahb_clk.clkr,
+ [VFE_AHB_CLK] = &vfe_ahb_clk.clkr,
+ [ROT_AHB_CLK] = &rot_ahb_clk.clkr,
+ [VCODEC_AHB_CLK] = &vcodec_ahb_clk.clkr,
+ [MDP_AHB_CLK] = &mdp_ahb_clk.clkr,
+ [DSI_M_AHB_CLK] = &dsi_m_ahb_clk.clkr,
+ [CSI_AHB_CLK] = &csi_ahb_clk.clkr,
+ [MMSS_IMEM_AHB_CLK] = &mmss_imem_ahb_clk.clkr,
+ [IJPEG_AHB_CLK] = &ijpeg_ahb_clk.clkr,
+ [HDMI_S_AHB_CLK] = &hdmi_s_ahb_clk.clkr,
+ [GFX3D_AHB_CLK] = &gfx3d_ahb_clk.clkr,
+ [GFX2D1_AHB_CLK] = &gfx2d1_ahb_clk.clkr,
+ [JPEGD_AXI_CLK] = &jpegd_axi_clk.clkr,
+ [GMEM_AXI_CLK] = &gmem_axi_clk.clkr,
+ [MDP_AXI_CLK] = &mdp_axi_clk.clkr,
+ [MMSS_IMEM_AXI_CLK] = &mmss_imem_axi_clk.clkr,
+ [IJPEG_AXI_CLK] = &ijpeg_axi_clk.clkr,
+ [GFX3D_AXI_CLK] = &gfx3d_axi_clk.clkr,
+ [VCODEC_AXI_CLK] = &vcodec_axi_clk.clkr,
+ [VFE_AXI_CLK] = &vfe_axi_clk.clkr,
+ [VPE_AXI_CLK] = &vpe_axi_clk.clkr,
+ [ROT_AXI_CLK] = &rot_axi_clk.clkr,
+ [VCODEC_AXI_A_CLK] = &vcodec_axi_a_clk.clkr,
+ [VCODEC_AXI_B_CLK] = &vcodec_axi_b_clk.clkr,
+ [CSI0_SRC] = &csi0_src.clkr,
+ [CSI0_CLK] = &csi0_clk.clkr,
+ [CSI0_PHY_CLK] = &csi0_phy_clk.clkr,
+ [CSI1_SRC] = &csi1_src.clkr,
+ [CSI1_CLK] = &csi1_clk.clkr,
+ [CSI1_PHY_CLK] = &csi1_phy_clk.clkr,
+ [CSI2_SRC] = &csi2_src.clkr,
+ [CSI2_CLK] = &csi2_clk.clkr,
+ [CSI2_PHY_CLK] = &csi2_phy_clk.clkr,
+ [CSI_PIX_CLK] = &csi_pix_clk.clkr,
+ [CSI_RDI_CLK] = &csi_rdi_clk.clkr,
+ [MDP_VSYNC_CLK] = &mdp_vsync_clk.clkr,
+ [HDMI_APP_CLK] = &hdmi_app_clk.clkr,
+ [CSI_PIX1_CLK] = &csi_pix1_clk.clkr,
+ [CSI_RDI2_CLK] = &csi_rdi2_clk.clkr,
+ [CSI_RDI1_CLK] = &csi_rdi1_clk.clkr,
+ [GFX2D0_SRC] = &gfx2d0_src.clkr,
+ [GFX2D0_CLK] = &gfx2d0_clk.clkr,
+ [GFX2D1_SRC] = &gfx2d1_src.clkr,
+ [GFX2D1_CLK] = &gfx2d1_clk.clkr,
+ [GFX3D_SRC] = &gfx3d_src.clkr,
+ [GFX3D_CLK] = &gfx3d_clk.clkr,
+ [IJPEG_SRC] = &ijpeg_src.clkr,
+ [IJPEG_CLK] = &ijpeg_clk.clkr,
+ [JPEGD_SRC] = &jpegd_src.clkr,
+ [JPEGD_CLK] = &jpegd_clk.clkr,
+ [MDP_SRC] = &mdp_src.clkr,
+ [MDP_CLK] = &mdp_clk.clkr,
+ [MDP_LUT_CLK] = &mdp_lut_clk.clkr,
+ [ROT_SRC] = &rot_src.clkr,
+ [ROT_CLK] = &rot_clk.clkr,
+ [TV_ENC_CLK] = &tv_enc_clk.clkr,
+ [TV_DAC_CLK] = &tv_dac_clk.clkr,
+ [HDMI_TV_CLK] = &hdmi_tv_clk.clkr,
+ [MDP_TV_CLK] = &mdp_tv_clk.clkr,
+ [TV_SRC] = &tv_src.clkr,
+ [VCODEC_SRC] = &vcodec_src.clkr,
+ [VCODEC_CLK] = &vcodec_clk.clkr,
+ [VFE_SRC] = &vfe_src.clkr,
+ [VFE_CLK] = &vfe_clk.clkr,
+ [VFE_CSI_CLK] = &vfe_csi_clk.clkr,
+ [VPE_SRC] = &vpe_src.clkr,
+ [VPE_CLK] = &vpe_clk.clkr,
+ [CAMCLK0_SRC] = &camclk0_src.clkr,
+ [CAMCLK0_CLK] = &camclk0_clk.clkr,
+ [CAMCLK1_SRC] = &camclk1_src.clkr,
+ [CAMCLK1_CLK] = &camclk1_clk.clkr,
+ [CAMCLK2_SRC] = &camclk2_src.clkr,
+ [CAMCLK2_CLK] = &camclk2_clk.clkr,
+ [CSIPHYTIMER_SRC] = &csiphytimer_src.clkr,
+ [CSIPHY2_TIMER_CLK] = &csiphy2_timer_clk.clkr,
+ [CSIPHY1_TIMER_CLK] = &csiphy1_timer_clk.clkr,
+ [CSIPHY0_TIMER_CLK] = &csiphy0_timer_clk.clkr,
+ [PLL2] = &pll2.clkr,
+};
+
+static const struct qcom_reset_map mmcc_msm8960_resets[] = {
+ [VPE_AXI_RESET] = { 0x0208, 15 },
+ [IJPEG_AXI_RESET] = { 0x0208, 14 },
+ [MPD_AXI_RESET] = { 0x0208, 13 },
+ [VFE_AXI_RESET] = { 0x0208, 9 },
+ [SP_AXI_RESET] = { 0x0208, 8 },
+ [VCODEC_AXI_RESET] = { 0x0208, 7 },
+ [ROT_AXI_RESET] = { 0x0208, 6 },
+ [VCODEC_AXI_A_RESET] = { 0x0208, 5 },
+ [VCODEC_AXI_B_RESET] = { 0x0208, 4 },
+ [FAB_S3_AXI_RESET] = { 0x0208, 3 },
+ [FAB_S2_AXI_RESET] = { 0x0208, 2 },
+ [FAB_S1_AXI_RESET] = { 0x0208, 1 },
+ [FAB_S0_AXI_RESET] = { 0x0208 },
+ [SMMU_GFX3D_ABH_RESET] = { 0x020c, 31 },
+ [SMMU_VPE_AHB_RESET] = { 0x020c, 30 },
+ [SMMU_VFE_AHB_RESET] = { 0x020c, 29 },
+ [SMMU_ROT_AHB_RESET] = { 0x020c, 28 },
+ [SMMU_VCODEC_B_AHB_RESET] = { 0x020c, 27 },
+ [SMMU_VCODEC_A_AHB_RESET] = { 0x020c, 26 },
+ [SMMU_MDP1_AHB_RESET] = { 0x020c, 25 },
+ [SMMU_MDP0_AHB_RESET] = { 0x020c, 24 },
+ [SMMU_JPEGD_AHB_RESET] = { 0x020c, 23 },
+ [SMMU_IJPEG_AHB_RESET] = { 0x020c, 22 },
+ [SMMU_GFX2D0_AHB_RESET] = { 0x020c, 21 },
+ [SMMU_GFX2D1_AHB_RESET] = { 0x020c, 20 },
+ [APU_AHB_RESET] = { 0x020c, 18 },
+ [CSI_AHB_RESET] = { 0x020c, 17 },
+ [TV_ENC_AHB_RESET] = { 0x020c, 15 },
+ [VPE_AHB_RESET] = { 0x020c, 14 },
+ [FABRIC_AHB_RESET] = { 0x020c, 13 },
+ [GFX2D0_AHB_RESET] = { 0x020c, 12 },
+ [GFX2D1_AHB_RESET] = { 0x020c, 11 },
+ [GFX3D_AHB_RESET] = { 0x020c, 10 },
+ [HDMI_AHB_RESET] = { 0x020c, 9 },
+ [MSSS_IMEM_AHB_RESET] = { 0x020c, 8 },
+ [IJPEG_AHB_RESET] = { 0x020c, 7 },
+ [DSI_M_AHB_RESET] = { 0x020c, 6 },
+ [DSI_S_AHB_RESET] = { 0x020c, 5 },
+ [JPEGD_AHB_RESET] = { 0x020c, 4 },
+ [MDP_AHB_RESET] = { 0x020c, 3 },
+ [ROT_AHB_RESET] = { 0x020c, 2 },
+ [VCODEC_AHB_RESET] = { 0x020c, 1 },
+ [VFE_AHB_RESET] = { 0x020c, 0 },
+ [DSI2_M_AHB_RESET] = { 0x0210, 31 },
+ [DSI2_S_AHB_RESET] = { 0x0210, 30 },
+ [CSIPHY2_RESET] = { 0x0210, 29 },
+ [CSI_PIX1_RESET] = { 0x0210, 28 },
+ [CSIPHY0_RESET] = { 0x0210, 27 },
+ [CSIPHY1_RESET] = { 0x0210, 26 },
+ [DSI2_RESET] = { 0x0210, 25 },
+ [VFE_CSI_RESET] = { 0x0210, 24 },
+ [MDP_RESET] = { 0x0210, 21 },
+ [AMP_RESET] = { 0x0210, 20 },
+ [JPEGD_RESET] = { 0x0210, 19 },
+ [CSI1_RESET] = { 0x0210, 18 },
+ [VPE_RESET] = { 0x0210, 17 },
+ [MMSS_FABRIC_RESET] = { 0x0210, 16 },
+ [VFE_RESET] = { 0x0210, 15 },
+ [GFX2D0_RESET] = { 0x0210, 14 },
+ [GFX2D1_RESET] = { 0x0210, 13 },
+ [GFX3D_RESET] = { 0x0210, 12 },
+ [HDMI_RESET] = { 0x0210, 11 },
+ [MMSS_IMEM_RESET] = { 0x0210, 10 },
+ [IJPEG_RESET] = { 0x0210, 9 },
+ [CSI0_RESET] = { 0x0210, 8 },
+ [DSI_RESET] = { 0x0210, 7 },
+ [VCODEC_RESET] = { 0x0210, 6 },
+ [MDP_TV_RESET] = { 0x0210, 4 },
+ [MDP_VSYNC_RESET] = { 0x0210, 3 },
+ [ROT_RESET] = { 0x0210, 2 },
+ [TV_HDMI_RESET] = { 0x0210, 1 },
+ [TV_ENC_RESET] = { 0x0210 },
+ [CSI2_RESET] = { 0x0214, 2 },
+ [CSI_RDI1_RESET] = { 0x0214, 1 },
+ [CSI_RDI2_RESET] = { 0x0214 },
+};
+
+static const struct regmap_config mmcc_msm8960_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x334,
+ .fast_io = true,
+};
+
+static const struct of_device_id mmcc_msm8960_match_table[] = {
+ { .compatible = "qcom,mmcc-msm8960" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mmcc_msm8960_match_table);
+
+struct qcom_cc {
+ struct qcom_reset_controller reset;
+ struct clk_onecell_data data;
+ struct clk *clks[];
+};
+
+static int mmcc_msm8960_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct resource *res;
+ int i, ret;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ struct clk_onecell_data *data;
+ struct clk **clks;
+ struct regmap *regmap;
+ size_t num_clks;
+ struct qcom_reset_controller *reset;
+ struct qcom_cc *cc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &mmcc_msm8960_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ num_clks = ARRAY_SIZE(mmcc_msm8960_clks);
+ cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*clks) * num_clks,
+ GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+
+ clks = cc->clks;
+ data = &cc->data;
+ data->clks = clks;
+ data->clk_num = num_clks;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!mmcc_msm8960_clks[i])
+ continue;
+ clk = devm_clk_register_regmap(dev, mmcc_msm8960_clks[i]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
+ if (ret)
+ return ret;
+
+ reset = &cc->reset;
+ reset->rcdev.of_node = dev->of_node;
+ reset->rcdev.ops = &qcom_reset_ops,
+ reset->rcdev.owner = THIS_MODULE,
+ reset->rcdev.nr_resets = ARRAY_SIZE(mmcc_msm8960_resets),
+ reset->regmap = regmap;
+ reset->reset_map = mmcc_msm8960_resets,
+ platform_set_drvdata(pdev, &reset->rcdev);
+
+ ret = reset_controller_register(&reset->rcdev);
+ if (ret)
+ of_clk_del_provider(dev->of_node);
+
+ return ret;
+}
+
+static int mmcc_msm8960_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ reset_controller_unregister(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static struct platform_driver mmcc_msm8960_driver = {
+ .probe = mmcc_msm8960_probe,
+ .remove = mmcc_msm8960_remove,
+ .driver = {
+ .name = "mmcc-msm8960",
+ .owner = THIS_MODULE,
+ .of_match_table = mmcc_msm8960_match_table,
+ },
+};
+
+module_platform_driver(mmcc_msm8960_driver);
+
+MODULE_DESCRIPTION("QCOM MMCC MSM8960 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mmcc-msm8960");
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
new file mode 100644
index 000000000000..c95774514b81
--- /dev/null
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -0,0 +1,2625 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,mmcc-msm8974.h>
+#include <dt-bindings/reset/qcom,mmcc-msm8974.h>
+
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+#define P_XO 0
+#define P_MMPLL0 1
+#define P_EDPLINK 1
+#define P_MMPLL1 2
+#define P_HDMIPLL 2
+#define P_GPLL0 3
+#define P_EDPVCO 3
+#define P_GPLL1 4
+#define P_DSI0PLL 4
+#define P_MMPLL2 4
+#define P_MMPLL3 4
+#define P_DSI1PLL 5
+
+static const u8 mmcc_xo_mmpll0_mmpll1_gpll0_map[] = {
+ [P_XO] = 0,
+ [P_MMPLL0] = 1,
+ [P_MMPLL1] = 2,
+ [P_GPLL0] = 5,
+};
+
+static const char *mmcc_xo_mmpll0_mmpll1_gpll0[] = {
+ "xo",
+ "mmpll0_vote",
+ "mmpll1_vote",
+ "mmss_gpll0_vote",
+};
+
+static const u8 mmcc_xo_mmpll0_dsi_hdmi_gpll0_map[] = {
+ [P_XO] = 0,
+ [P_MMPLL0] = 1,
+ [P_HDMIPLL] = 4,
+ [P_GPLL0] = 5,
+ [P_DSI0PLL] = 2,
+ [P_DSI1PLL] = 3,
+};
+
+static const char *mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
+ "xo",
+ "mmpll0_vote",
+ "hdmipll",
+ "mmss_gpll0_vote",
+ "dsi0pll",
+ "dsi1pll",
+};
+
+static const u8 mmcc_xo_mmpll0_1_2_gpll0_map[] = {
+ [P_XO] = 0,
+ [P_MMPLL0] = 1,
+ [P_MMPLL1] = 2,
+ [P_GPLL0] = 5,
+ [P_MMPLL2] = 3,
+};
+
+static const char *mmcc_xo_mmpll0_1_2_gpll0[] = {
+ "xo",
+ "mmpll0_vote",
+ "mmpll1_vote",
+ "mmss_gpll0_vote",
+ "mmpll2",
+};
+
+static const u8 mmcc_xo_mmpll0_1_3_gpll0_map[] = {
+ [P_XO] = 0,
+ [P_MMPLL0] = 1,
+ [P_MMPLL1] = 2,
+ [P_GPLL0] = 5,
+ [P_MMPLL3] = 3,
+};
+
+static const char *mmcc_xo_mmpll0_1_3_gpll0[] = {
+ "xo",
+ "mmpll0_vote",
+ "mmpll1_vote",
+ "mmss_gpll0_vote",
+ "mmpll3",
+};
+
+static const u8 mmcc_xo_mmpll0_1_gpll1_0_map[] = {
+ [P_XO] = 0,
+ [P_MMPLL0] = 1,
+ [P_MMPLL1] = 2,
+ [P_GPLL0] = 5,
+ [P_GPLL1] = 4,
+};
+
+static const char *mmcc_xo_mmpll0_1_gpll1_0[] = {
+ "xo",
+ "mmpll0_vote",
+ "mmpll1_vote",
+ "mmss_gpll0_vote",
+ "gpll1_vote",
+};
+
+static const u8 mmcc_xo_dsi_hdmi_edp_map[] = {
+ [P_XO] = 0,
+ [P_EDPLINK] = 4,
+ [P_HDMIPLL] = 3,
+ [P_EDPVCO] = 5,
+ [P_DSI0PLL] = 1,
+ [P_DSI1PLL] = 2,
+};
+
+static const char *mmcc_xo_dsi_hdmi_edp[] = {
+ "xo",
+ "edp_link_clk",
+ "hdmipll",
+ "edp_vco_div",
+ "dsi0pll",
+ "dsi1pll",
+};
+
+static const u8 mmcc_xo_dsi_hdmi_edp_gpll0_map[] = {
+ [P_XO] = 0,
+ [P_EDPLINK] = 4,
+ [P_HDMIPLL] = 3,
+ [P_GPLL0] = 5,
+ [P_DSI0PLL] = 1,
+ [P_DSI1PLL] = 2,
+};
+
+static const char *mmcc_xo_dsi_hdmi_edp_gpll0[] = {
+ "xo",
+ "edp_link_clk",
+ "hdmipll",
+ "gpll0_vote",
+ "dsi0pll",
+ "dsi1pll",
+};
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static struct clk_pll mmpll0 = {
+ .l_reg = 0x0004,
+ .m_reg = 0x0008,
+ .n_reg = 0x000c,
+ .config_reg = 0x0014,
+ .mode_reg = 0x0000,
+ .status_reg = 0x001c,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll0",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap mmpll0_vote = {
+ .enable_reg = 0x0100,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll0_vote",
+ .parent_names = (const char *[]){ "mmpll0" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll mmpll1 = {
+ .l_reg = 0x0044,
+ .m_reg = 0x0048,
+ .n_reg = 0x004c,
+ .config_reg = 0x0054,
+ .mode_reg = 0x0040,
+ .status_reg = 0x005c,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll1",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap mmpll1_vote = {
+ .enable_reg = 0x0100,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll1_vote",
+ .parent_names = (const char *[]){ "mmpll1" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll mmpll2 = {
+ .l_reg = 0x4104,
+ .m_reg = 0x4108,
+ .n_reg = 0x410c,
+ .config_reg = 0x4114,
+ .mode_reg = 0x4100,
+ .status_reg = 0x411c,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll2",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_pll mmpll3 = {
+ .l_reg = 0x0084,
+ .m_reg = 0x0088,
+ .n_reg = 0x008c,
+ .config_reg = 0x0094,
+ .mode_reg = 0x0080,
+ .status_reg = 0x009c,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll3",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_rcg2 mmss_ahb_clk_src = {
+ .cmd_rcgr = 0x5000,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmss_ahb_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mmss_axi_clk[] = {
+ F( 19200000, P_XO, 1, 0, 0),
+ F( 37500000, P_GPLL0, 16, 0, 0),
+ F( 50000000, P_GPLL0, 12, 0, 0),
+ F( 75000000, P_GPLL0, 8, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(291750000, P_MMPLL1, 4, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ F(466800000, P_MMPLL1, 2.5, 0, 0),
+};
+
+static struct clk_rcg2 mmss_axi_clk_src = {
+ .cmd_rcgr = 0x5040,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_mmss_axi_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmss_axi_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_ocmemnoc_clk[] = {
+ F( 19200000, P_XO, 1, 0, 0),
+ F( 37500000, P_GPLL0, 16, 0, 0),
+ F( 50000000, P_GPLL0, 12, 0, 0),
+ F( 75000000, P_GPLL0, 8, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(291750000, P_MMPLL1, 4, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+};
+
+static struct clk_rcg2 ocmemnoc_clk_src = {
+ .cmd_rcgr = 0x5090,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_ocmemnoc_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ocmemnoc_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_camss_csi0_3_clk[] = {
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_MMPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+ .cmd_rcgr = 0x3090,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_csi0_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+ .cmd_rcgr = 0x3100,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_csi0_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi2_clk_src = {
+ .cmd_rcgr = 0x3160,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_csi0_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi2_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi3_clk_src = {
+ .cmd_rcgr = 0x31c0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_csi0_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi3_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_camss_vfe_vfe0_1_clk[] = {
+ F(37500000, P_GPLL0, 16, 0, 0),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(60000000, P_GPLL0, 10, 0, 0),
+ F(80000000, P_GPLL0, 7.5, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(109090000, P_GPLL0, 5.5, 0, 0),
+ F(133330000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(228570000, P_MMPLL0, 3.5, 0, 0),
+ F(266670000, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ F(465000000, P_MMPLL3, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+ .cmd_rcgr = 0x3600,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_vfe_vfe0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe0_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 vfe1_clk_src = {
+ .cmd_rcgr = 0x3620,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_vfe_vfe0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe1_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_mdp_clk[] = {
+ F(37500000, P_GPLL0, 16, 0, 0),
+ F(60000000, P_GPLL0, 10, 0, 0),
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(85710000, P_GPLL0, 7, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(133330000, P_MMPLL0, 6, 0, 0),
+ F(160000000, P_MMPLL0, 5, 0, 0),
+ F(200000000, P_MMPLL0, 4, 0, 0),
+ F(228570000, P_MMPLL0, 3.5, 0, 0),
+ F(240000000, P_GPLL0, 2.5, 0, 0),
+ F(266670000, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+ .cmd_rcgr = 0x2040,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_dsi_hdmi_gpll0_map,
+ .freq_tbl = ftbl_mdss_mdp_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mdp_clk_src",
+ .parent_names = mmcc_xo_mmpll0_dsi_hdmi_gpll0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+ .cmd_rcgr = 0x4000,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_1_2_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gfx3d_clk_src",
+ .parent_names = mmcc_xo_mmpll0_1_2_gpll0,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = {
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(133330000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(228570000, P_MMPLL0, 3.5, 0, 0),
+ F(266670000, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 jpeg0_clk_src = {
+ .cmd_rcgr = 0x3500,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_jpeg_jpeg0_2_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg0_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 jpeg1_clk_src = {
+ .cmd_rcgr = 0x3520,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_jpeg_jpeg0_2_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg1_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 jpeg2_clk_src = {
+ .cmd_rcgr = 0x3540,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_jpeg_jpeg0_2_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg2_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_pclk0_clk[] = {
+ F(125000000, P_DSI0PLL, 2, 0, 0),
+ F(250000000, P_DSI0PLL, 1, 0, 0),
+ { }
+};
+
+static struct freq_tbl ftbl_mdss_pclk1_clk[] = {
+ F(125000000, P_DSI1PLL, 2, 0, 0),
+ F(250000000, P_DSI1PLL, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+ .cmd_rcgr = 0x2000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+ .freq_tbl = ftbl_mdss_pclk0_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk0_clk_src",
+ .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 pclk1_clk_src = {
+ .cmd_rcgr = 0x2020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+ .freq_tbl = ftbl_mdss_pclk1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk1_clk_src",
+ .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_venus0_vcodec0_clk[] = {
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(133330000, P_MMPLL0, 6, 0, 0),
+ F(200000000, P_MMPLL0, 4, 0, 0),
+ F(266670000, P_MMPLL0, 3, 0, 0),
+ F(465000000, P_MMPLL3, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vcodec0_clk_src = {
+ .cmd_rcgr = 0x1000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_1_3_gpll0_map,
+ .freq_tbl = ftbl_venus0_vcodec0_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vcodec0_clk_src",
+ .parent_names = mmcc_xo_mmpll0_1_3_gpll0,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_camss_cci_cci_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cci_clk_src = {
+ .cmd_rcgr = 0x3300,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_cci_cci_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cci_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_camss_gp0_1_clk[] = {
+ F(10000, P_XO, 16, 1, 120),
+ F(24000, P_XO, 16, 1, 50),
+ F(6000000, P_GPLL0, 10, 1, 10),
+ F(12000000, P_GPLL0, 10, 1, 5),
+ F(13000000, P_GPLL0, 4, 13, 150),
+ F(24000000, P_GPLL0, 5, 1, 5),
+ { }
+};
+
+static struct clk_rcg2 camss_gp0_clk_src = {
+ .cmd_rcgr = 0x3420,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_1_gpll1_0_map,
+ .freq_tbl = ftbl_camss_gp0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp0_clk_src",
+ .parent_names = mmcc_xo_mmpll0_1_gpll1_0,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camss_gp1_clk_src = {
+ .cmd_rcgr = 0x3450,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_1_gpll1_0_map,
+ .freq_tbl = ftbl_camss_gp0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp1_clk_src",
+ .parent_names = mmcc_xo_mmpll0_1_gpll1_0,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_camss_mclk0_3_clk[] = {
+ F(4800000, P_XO, 4, 0, 0),
+ F(6000000, P_GPLL0, 10, 1, 10),
+ F(8000000, P_GPLL0, 15, 1, 5),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16000000, P_GPLL0, 12.5, 1, 3),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 5, 1, 5),
+ F(32000000, P_MMPLL0, 5, 1, 5),
+ F(48000000, P_GPLL0, 12.5, 0, 0),
+ F(64000000, P_MMPLL0, 12.5, 0, 0),
+ F(66670000, P_GPLL0, 9, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+ .cmd_rcgr = 0x3360,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_mclk0_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk0_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+ .cmd_rcgr = 0x3390,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_mclk0_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk1_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk2_clk_src = {
+ .cmd_rcgr = 0x33c0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_mclk0_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk2_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk3_clk_src = {
+ .cmd_rcgr = 0x33f0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_mclk0_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk3_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_camss_phy0_2_csi0_2phytimer_clk[] = {
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_MMPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x3000,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_phy0_2_csi0_2phytimer_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0phytimer_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x3030,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_phy0_2_csi0_2phytimer_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1phytimer_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x3060,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_phy0_2_csi0_2phytimer_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi2phytimer_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_camss_vfe_cpp_clk[] = {
+ F(133330000, P_GPLL0, 4.5, 0, 0),
+ F(266670000, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ F(465000000, P_MMPLL3, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cpp_clk_src = {
+ .cmd_rcgr = 0x3640,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_vfe_cpp_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cpp_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_byte0_clk[] = {
+ F(93750000, P_DSI0PLL, 8, 0, 0),
+ F(187500000, P_DSI0PLL, 4, 0, 0),
+ { }
+};
+
+static struct freq_tbl ftbl_mdss_byte1_clk[] = {
+ F(93750000, P_DSI1PLL, 8, 0, 0),
+ F(187500000, P_DSI1PLL, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+ .cmd_rcgr = 0x2120,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+ .freq_tbl = ftbl_mdss_byte0_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte0_clk_src",
+ .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 byte1_clk_src = {
+ .cmd_rcgr = 0x2140,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+ .freq_tbl = ftbl_mdss_byte1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte1_clk_src",
+ .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_edpaux_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 edpaux_clk_src = {
+ .cmd_rcgr = 0x20e0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_mdss_edpaux_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "edpaux_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_edplink_clk[] = {
+ F(135000000, P_EDPLINK, 2, 0, 0),
+ F(270000000, P_EDPLINK, 11, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 edplink_clk_src = {
+ .cmd_rcgr = 0x20c0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+ .freq_tbl = ftbl_mdss_edplink_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "edplink_clk_src",
+ .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_edppixel_clk[] = {
+ F(175000000, P_EDPVCO, 2, 0, 0),
+ F(350000000, P_EDPVCO, 11, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 edppixel_clk_src = {
+ .cmd_rcgr = 0x20a0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi_hdmi_edp_map,
+ .freq_tbl = ftbl_mdss_edppixel_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "edppixel_clk_src",
+ .parent_names = mmcc_xo_dsi_hdmi_edp,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+ .cmd_rcgr = 0x2160,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+ .freq_tbl = ftbl_mdss_esc0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc0_clk_src",
+ .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 esc1_clk_src = {
+ .cmd_rcgr = 0x2180,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+ .freq_tbl = ftbl_mdss_esc0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc1_clk_src",
+ .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_extpclk_clk[] = {
+ F(25200000, P_HDMIPLL, 1, 0, 0),
+ F(27000000, P_HDMIPLL, 1, 0, 0),
+ F(27030000, P_HDMIPLL, 1, 0, 0),
+ F(65000000, P_HDMIPLL, 1, 0, 0),
+ F(74250000, P_HDMIPLL, 1, 0, 0),
+ F(108000000, P_HDMIPLL, 1, 0, 0),
+ F(148500000, P_HDMIPLL, 1, 0, 0),
+ F(268500000, P_HDMIPLL, 1, 0, 0),
+ F(297000000, P_HDMIPLL, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 extpclk_clk_src = {
+ .cmd_rcgr = 0x2060,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+ .freq_tbl = ftbl_mdss_extpclk_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "extpclk_clk_src",
+ .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_hdmi_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 hdmi_clk_src = {
+ .cmd_rcgr = 0x2100,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_mdss_hdmi_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hdmi_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_vsync_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+ .cmd_rcgr = 0x2080,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_mdss_vsync_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vsync_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch camss_cci_cci_ahb_clk = {
+ .halt_reg = 0x3348,
+ .clkr = {
+ .enable_reg = 0x3348,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cci_cci_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cci_cci_clk = {
+ .halt_reg = 0x3344,
+ .clkr = {
+ .enable_reg = 0x3344,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cci_cci_clk",
+ .parent_names = (const char *[]){
+ "cci_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0_ahb_clk = {
+ .halt_reg = 0x30bc,
+ .clkr = {
+ .enable_reg = 0x30bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0_clk = {
+ .halt_reg = 0x30b4,
+ .clkr = {
+ .enable_reg = 0x30b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0_clk",
+ .parent_names = (const char *[]){
+ "csi0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0phy_clk = {
+ .halt_reg = 0x30c4,
+ .clkr = {
+ .enable_reg = 0x30c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0phy_clk",
+ .parent_names = (const char *[]){
+ "csi0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0pix_clk = {
+ .halt_reg = 0x30e4,
+ .clkr = {
+ .enable_reg = 0x30e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0pix_clk",
+ .parent_names = (const char *[]){
+ "csi0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0rdi_clk = {
+ .halt_reg = 0x30d4,
+ .clkr = {
+ .enable_reg = 0x30d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0rdi_clk",
+ .parent_names = (const char *[]){
+ "csi0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1_ahb_clk = {
+ .halt_reg = 0x3128,
+ .clkr = {
+ .enable_reg = 0x3128,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1_clk = {
+ .halt_reg = 0x3124,
+ .clkr = {
+ .enable_reg = 0x3124,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1_clk",
+ .parent_names = (const char *[]){
+ "csi1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1phy_clk = {
+ .halt_reg = 0x3134,
+ .clkr = {
+ .enable_reg = 0x3134,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1phy_clk",
+ .parent_names = (const char *[]){
+ "csi1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1pix_clk = {
+ .halt_reg = 0x3154,
+ .clkr = {
+ .enable_reg = 0x3154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1pix_clk",
+ .parent_names = (const char *[]){
+ "csi1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1rdi_clk = {
+ .halt_reg = 0x3144,
+ .clkr = {
+ .enable_reg = 0x3144,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1rdi_clk",
+ .parent_names = (const char *[]){
+ "csi1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2_ahb_clk = {
+ .halt_reg = 0x3188,
+ .clkr = {
+ .enable_reg = 0x3188,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2_clk = {
+ .halt_reg = 0x3184,
+ .clkr = {
+ .enable_reg = 0x3184,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2_clk",
+ .parent_names = (const char *[]){
+ "csi2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2phy_clk = {
+ .halt_reg = 0x3194,
+ .clkr = {
+ .enable_reg = 0x3194,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2phy_clk",
+ .parent_names = (const char *[]){
+ "csi2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2pix_clk = {
+ .halt_reg = 0x31b4,
+ .clkr = {
+ .enable_reg = 0x31b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2pix_clk",
+ .parent_names = (const char *[]){
+ "csi2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2rdi_clk = {
+ .halt_reg = 0x31a4,
+ .clkr = {
+ .enable_reg = 0x31a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2rdi_clk",
+ .parent_names = (const char *[]){
+ "csi2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3_ahb_clk = {
+ .halt_reg = 0x31e8,
+ .clkr = {
+ .enable_reg = 0x31e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3_clk = {
+ .halt_reg = 0x31e4,
+ .clkr = {
+ .enable_reg = 0x31e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3_clk",
+ .parent_names = (const char *[]){
+ "csi3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3phy_clk = {
+ .halt_reg = 0x31f4,
+ .clkr = {
+ .enable_reg = 0x31f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3phy_clk",
+ .parent_names = (const char *[]){
+ "csi3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3pix_clk = {
+ .halt_reg = 0x3214,
+ .clkr = {
+ .enable_reg = 0x3214,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3pix_clk",
+ .parent_names = (const char *[]){
+ "csi3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3rdi_clk = {
+ .halt_reg = 0x3204,
+ .clkr = {
+ .enable_reg = 0x3204,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3rdi_clk",
+ .parent_names = (const char *[]){
+ "csi3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi_vfe0_clk = {
+ .halt_reg = 0x3704,
+ .clkr = {
+ .enable_reg = 0x3704,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi_vfe0_clk",
+ .parent_names = (const char *[]){
+ "vfe0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi_vfe1_clk = {
+ .halt_reg = 0x3714,
+ .clkr = {
+ .enable_reg = 0x3714,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi_vfe1_clk",
+ .parent_names = (const char *[]){
+ "vfe1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_gp0_clk = {
+ .halt_reg = 0x3444,
+ .clkr = {
+ .enable_reg = 0x3444,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_gp0_clk",
+ .parent_names = (const char *[]){
+ "camss_gp0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_gp1_clk = {
+ .halt_reg = 0x3474,
+ .clkr = {
+ .enable_reg = 0x3474,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_gp1_clk",
+ .parent_names = (const char *[]){
+ "camss_gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_ispif_ahb_clk = {
+ .halt_reg = 0x3224,
+ .clkr = {
+ .enable_reg = 0x3224,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_ispif_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_jpeg0_clk = {
+ .halt_reg = 0x35a8,
+ .clkr = {
+ .enable_reg = 0x35a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_jpeg0_clk",
+ .parent_names = (const char *[]){
+ "jpeg0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_jpeg1_clk = {
+ .halt_reg = 0x35ac,
+ .clkr = {
+ .enable_reg = 0x35ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_jpeg1_clk",
+ .parent_names = (const char *[]){
+ "jpeg1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_jpeg2_clk = {
+ .halt_reg = 0x35b0,
+ .clkr = {
+ .enable_reg = 0x35b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_jpeg2_clk",
+ .parent_names = (const char *[]){
+ "jpeg2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_jpeg_ahb_clk = {
+ .halt_reg = 0x35b4,
+ .clkr = {
+ .enable_reg = 0x35b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_jpeg_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_jpeg_axi_clk = {
+ .halt_reg = 0x35b8,
+ .clkr = {
+ .enable_reg = 0x35b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_jpeg_axi_clk",
+ .parent_names = (const char *[]){
+ "mmss_axi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_jpeg_ocmemnoc_clk = {
+ .halt_reg = 0x35bc,
+ .clkr = {
+ .enable_reg = 0x35bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_jpeg_ocmemnoc_clk",
+ .parent_names = (const char *[]){
+ "ocmemnoc_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk0_clk = {
+ .halt_reg = 0x3384,
+ .clkr = {
+ .enable_reg = 0x3384,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk0_clk",
+ .parent_names = (const char *[]){
+ "mclk0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk1_clk = {
+ .halt_reg = 0x33b4,
+ .clkr = {
+ .enable_reg = 0x33b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk1_clk",
+ .parent_names = (const char *[]){
+ "mclk1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk2_clk = {
+ .halt_reg = 0x33e4,
+ .clkr = {
+ .enable_reg = 0x33e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk2_clk",
+ .parent_names = (const char *[]){
+ "mclk2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk3_clk = {
+ .halt_reg = 0x3414,
+ .clkr = {
+ .enable_reg = 0x3414,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk3_clk",
+ .parent_names = (const char *[]){
+ "mclk3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_micro_ahb_clk = {
+ .halt_reg = 0x3494,
+ .clkr = {
+ .enable_reg = 0x3494,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_micro_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_phy0_csi0phytimer_clk = {
+ .halt_reg = 0x3024,
+ .clkr = {
+ .enable_reg = 0x3024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_phy0_csi0phytimer_clk",
+ .parent_names = (const char *[]){
+ "csi0phytimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_phy1_csi1phytimer_clk = {
+ .halt_reg = 0x3054,
+ .clkr = {
+ .enable_reg = 0x3054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_phy1_csi1phytimer_clk",
+ .parent_names = (const char *[]){
+ "csi1phytimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_phy2_csi2phytimer_clk = {
+ .halt_reg = 0x3084,
+ .clkr = {
+ .enable_reg = 0x3084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_phy2_csi2phytimer_clk",
+ .parent_names = (const char *[]){
+ "csi2phytimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_top_ahb_clk = {
+ .halt_reg = 0x3484,
+ .clkr = {
+ .enable_reg = 0x3484,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_top_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_cpp_ahb_clk = {
+ .halt_reg = 0x36b4,
+ .clkr = {
+ .enable_reg = 0x36b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_cpp_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_cpp_clk = {
+ .halt_reg = 0x36b0,
+ .clkr = {
+ .enable_reg = 0x36b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_cpp_clk",
+ .parent_names = (const char *[]){
+ "cpp_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vfe0_clk = {
+ .halt_reg = 0x36a8,
+ .clkr = {
+ .enable_reg = 0x36a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vfe0_clk",
+ .parent_names = (const char *[]){
+ "vfe0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vfe1_clk = {
+ .halt_reg = 0x36ac,
+ .clkr = {
+ .enable_reg = 0x36ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vfe1_clk",
+ .parent_names = (const char *[]){
+ "vfe1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vfe_ahb_clk = {
+ .halt_reg = 0x36b8,
+ .clkr = {
+ .enable_reg = 0x36b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vfe_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vfe_axi_clk = {
+ .halt_reg = 0x36bc,
+ .clkr = {
+ .enable_reg = 0x36bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vfe_axi_clk",
+ .parent_names = (const char *[]){
+ "mmss_axi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vfe_ocmemnoc_clk = {
+ .halt_reg = 0x36c0,
+ .clkr = {
+ .enable_reg = 0x36c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vfe_ocmemnoc_clk",
+ .parent_names = (const char *[]){
+ "ocmemnoc_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_ahb_clk = {
+ .halt_reg = 0x2308,
+ .clkr = {
+ .enable_reg = 0x2308,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_axi_clk = {
+ .halt_reg = 0x2310,
+ .clkr = {
+ .enable_reg = 0x2310,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_axi_clk",
+ .parent_names = (const char *[]){
+ "mmss_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte0_clk = {
+ .halt_reg = 0x233c,
+ .clkr = {
+ .enable_reg = 0x233c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte0_clk",
+ .parent_names = (const char *[]){
+ "byte0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte1_clk = {
+ .halt_reg = 0x2340,
+ .clkr = {
+ .enable_reg = 0x2340,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte1_clk",
+ .parent_names = (const char *[]){
+ "byte1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_edpaux_clk = {
+ .halt_reg = 0x2334,
+ .clkr = {
+ .enable_reg = 0x2334,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_edpaux_clk",
+ .parent_names = (const char *[]){
+ "edpaux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_edplink_clk = {
+ .halt_reg = 0x2330,
+ .clkr = {
+ .enable_reg = 0x2330,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_edplink_clk",
+ .parent_names = (const char *[]){
+ "edplink_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_edppixel_clk = {
+ .halt_reg = 0x232c,
+ .clkr = {
+ .enable_reg = 0x232c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_edppixel_clk",
+ .parent_names = (const char *[]){
+ "edppixel_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_esc0_clk = {
+ .halt_reg = 0x2344,
+ .clkr = {
+ .enable_reg = 0x2344,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_esc0_clk",
+ .parent_names = (const char *[]){
+ "esc0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_esc1_clk = {
+ .halt_reg = 0x2348,
+ .clkr = {
+ .enable_reg = 0x2348,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_esc1_clk",
+ .parent_names = (const char *[]){
+ "esc1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_extpclk_clk = {
+ .halt_reg = 0x2324,
+ .clkr = {
+ .enable_reg = 0x2324,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_extpclk_clk",
+ .parent_names = (const char *[]){
+ "extpclk_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_hdmi_ahb_clk = {
+ .halt_reg = 0x230c,
+ .clkr = {
+ .enable_reg = 0x230c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_hdmi_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_hdmi_clk = {
+ .halt_reg = 0x2338,
+ .clkr = {
+ .enable_reg = 0x2338,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_hdmi_clk",
+ .parent_names = (const char *[]){
+ "hdmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_mdp_clk = {
+ .halt_reg = 0x231c,
+ .clkr = {
+ .enable_reg = 0x231c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_mdp_clk",
+ .parent_names = (const char *[]){
+ "mdp_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_mdp_lut_clk = {
+ .halt_reg = 0x2320,
+ .clkr = {
+ .enable_reg = 0x2320,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_mdp_lut_clk",
+ .parent_names = (const char *[]){
+ "mdp_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_pclk0_clk = {
+ .halt_reg = 0x2314,
+ .clkr = {
+ .enable_reg = 0x2314,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_pclk0_clk",
+ .parent_names = (const char *[]){
+ "pclk0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_pclk1_clk = {
+ .halt_reg = 0x2318,
+ .clkr = {
+ .enable_reg = 0x2318,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_pclk1_clk",
+ .parent_names = (const char *[]){
+ "pclk1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_vsync_clk = {
+ .halt_reg = 0x2328,
+ .clkr = {
+ .enable_reg = 0x2328,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_vsync_clk",
+ .parent_names = (const char *[]){
+ "vsync_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmss_misc_ahb_clk = {
+ .halt_reg = 0x502c,
+ .clkr = {
+ .enable_reg = 0x502c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_misc_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmss_mmssnoc_ahb_clk = {
+ .halt_reg = 0x5024,
+ .clkr = {
+ .enable_reg = 0x5024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_mmssnoc_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ },
+};
+
+static struct clk_branch mmss_mmssnoc_bto_ahb_clk = {
+ .halt_reg = 0x5028,
+ .clkr = {
+ .enable_reg = 0x5028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_mmssnoc_bto_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ },
+};
+
+static struct clk_branch mmss_mmssnoc_axi_clk = {
+ .halt_reg = 0x506c,
+ .clkr = {
+ .enable_reg = 0x506c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_mmssnoc_axi_clk",
+ .parent_names = (const char *[]){
+ "mmss_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmss_s0_axi_clk = {
+ .halt_reg = 0x5064,
+ .clkr = {
+ .enable_reg = 0x5064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_s0_axi_clk",
+ .parent_names = (const char *[]){
+ "mmss_axi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ },
+};
+
+static struct clk_branch ocmemcx_ahb_clk = {
+ .halt_reg = 0x405c,
+ .clkr = {
+ .enable_reg = 0x405c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "ocmemcx_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ocmemcx_ocmemnoc_clk = {
+ .halt_reg = 0x4058,
+ .clkr = {
+ .enable_reg = 0x4058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "ocmemcx_ocmemnoc_clk",
+ .parent_names = (const char *[]){
+ "ocmemnoc_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch oxili_ocmemgx_clk = {
+ .halt_reg = 0x402c,
+ .clkr = {
+ .enable_reg = 0x402c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "oxili_ocmemgx_clk",
+ .parent_names = (const char *[]){
+ "gfx3d_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ocmemnoc_clk = {
+ .halt_reg = 0x50b4,
+ .clkr = {
+ .enable_reg = 0x50b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "ocmemnoc_clk",
+ .parent_names = (const char *[]){
+ "ocmemnoc_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch oxili_gfx3d_clk = {
+ .halt_reg = 0x4028,
+ .clkr = {
+ .enable_reg = 0x4028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "oxili_gfx3d_clk",
+ .parent_names = (const char *[]){
+ "gfx3d_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch oxilicx_ahb_clk = {
+ .halt_reg = 0x403c,
+ .clkr = {
+ .enable_reg = 0x403c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "oxilicx_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch oxilicx_axi_clk = {
+ .halt_reg = 0x4038,
+ .clkr = {
+ .enable_reg = 0x4038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "oxilicx_axi_clk",
+ .parent_names = (const char *[]){
+ "mmss_axi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch venus0_ahb_clk = {
+ .halt_reg = 0x1030,
+ .clkr = {
+ .enable_reg = 0x1030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "venus0_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch venus0_axi_clk = {
+ .halt_reg = 0x1034,
+ .clkr = {
+ .enable_reg = 0x1034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "venus0_axi_clk",
+ .parent_names = (const char *[]){
+ "mmss_axi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch venus0_ocmemnoc_clk = {
+ .halt_reg = 0x1038,
+ .clkr = {
+ .enable_reg = 0x1038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "venus0_ocmemnoc_clk",
+ .parent_names = (const char *[]){
+ "ocmemnoc_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch venus0_vcodec0_clk = {
+ .halt_reg = 0x1028,
+ .clkr = {
+ .enable_reg = 0x1028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "venus0_vcodec0_clk",
+ .parent_names = (const char *[]){
+ "vcodec0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct pll_config mmpll1_config = {
+ .l = 60,
+ .m = 25,
+ .n = 32,
+ .vco_val = 0x0,
+ .vco_mask = 0x3 << 20,
+ .pre_div_val = 0x0,
+ .pre_div_mask = 0x3 << 12,
+ .post_div_val = 0x0,
+ .post_div_mask = 0x3 << 8,
+ .mn_ena_mask = BIT(24),
+ .main_output_mask = BIT(0),
+};
+
+static struct pll_config mmpll3_config = {
+ .l = 48,
+ .m = 7,
+ .n = 16,
+ .vco_val = 0x0,
+ .vco_mask = 0x3 << 20,
+ .pre_div_val = 0x0,
+ .pre_div_mask = 0x3 << 12,
+ .post_div_val = 0x0,
+ .post_div_mask = 0x3 << 8,
+ .mn_ena_mask = BIT(24),
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+};
+
+static struct clk_regmap *mmcc_msm8974_clocks[] = {
+ [MMSS_AHB_CLK_SRC] = &mmss_ahb_clk_src.clkr,
+ [MMSS_AXI_CLK_SRC] = &mmss_axi_clk_src.clkr,
+ [OCMEMNOC_CLK_SRC] = &ocmemnoc_clk_src.clkr,
+ [MMPLL0] = &mmpll0.clkr,
+ [MMPLL0_VOTE] = &mmpll0_vote,
+ [MMPLL1] = &mmpll1.clkr,
+ [MMPLL1_VOTE] = &mmpll1_vote,
+ [MMPLL2] = &mmpll2.clkr,
+ [MMPLL3] = &mmpll3.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+ [CSI3_CLK_SRC] = &csi3_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+ [JPEG1_CLK_SRC] = &jpeg1_clk_src.clkr,
+ [JPEG2_CLK_SRC] = &jpeg2_clk_src.clkr,
+ [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+ [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr,
+ [CCI_CLK_SRC] = &cci_clk_src.clkr,
+ [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+ [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+ [MCLK3_CLK_SRC] = &mclk3_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+ [CSI2PHYTIMER_CLK_SRC] = &csi2phytimer_clk_src.clkr,
+ [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+ [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+ [EDPAUX_CLK_SRC] = &edpaux_clk_src.clkr,
+ [EDPLINK_CLK_SRC] = &edplink_clk_src.clkr,
+ [EDPPIXEL_CLK_SRC] = &edppixel_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+ [EXTPCLK_CLK_SRC] = &extpclk_clk_src.clkr,
+ [HDMI_CLK_SRC] = &hdmi_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [CAMSS_CCI_CCI_AHB_CLK] = &camss_cci_cci_ahb_clk.clkr,
+ [CAMSS_CCI_CCI_CLK] = &camss_cci_cci_clk.clkr,
+ [CAMSS_CSI0_AHB_CLK] = &camss_csi0_ahb_clk.clkr,
+ [CAMSS_CSI0_CLK] = &camss_csi0_clk.clkr,
+ [CAMSS_CSI0PHY_CLK] = &camss_csi0phy_clk.clkr,
+ [CAMSS_CSI0PIX_CLK] = &camss_csi0pix_clk.clkr,
+ [CAMSS_CSI0RDI_CLK] = &camss_csi0rdi_clk.clkr,
+ [CAMSS_CSI1_AHB_CLK] = &camss_csi1_ahb_clk.clkr,
+ [CAMSS_CSI1_CLK] = &camss_csi1_clk.clkr,
+ [CAMSS_CSI1PHY_CLK] = &camss_csi1phy_clk.clkr,
+ [CAMSS_CSI1PIX_CLK] = &camss_csi1pix_clk.clkr,
+ [CAMSS_CSI1RDI_CLK] = &camss_csi1rdi_clk.clkr,
+ [CAMSS_CSI2_AHB_CLK] = &camss_csi2_ahb_clk.clkr,
+ [CAMSS_CSI2_CLK] = &camss_csi2_clk.clkr,
+ [CAMSS_CSI2PHY_CLK] = &camss_csi2phy_clk.clkr,
+ [CAMSS_CSI2PIX_CLK] = &camss_csi2pix_clk.clkr,
+ [CAMSS_CSI2RDI_CLK] = &camss_csi2rdi_clk.clkr,
+ [CAMSS_CSI3_AHB_CLK] = &camss_csi3_ahb_clk.clkr,
+ [CAMSS_CSI3_CLK] = &camss_csi3_clk.clkr,
+ [CAMSS_CSI3PHY_CLK] = &camss_csi3phy_clk.clkr,
+ [CAMSS_CSI3PIX_CLK] = &camss_csi3pix_clk.clkr,
+ [CAMSS_CSI3RDI_CLK] = &camss_csi3rdi_clk.clkr,
+ [CAMSS_CSI_VFE0_CLK] = &camss_csi_vfe0_clk.clkr,
+ [CAMSS_CSI_VFE1_CLK] = &camss_csi_vfe1_clk.clkr,
+ [CAMSS_GP0_CLK] = &camss_gp0_clk.clkr,
+ [CAMSS_GP1_CLK] = &camss_gp1_clk.clkr,
+ [CAMSS_ISPIF_AHB_CLK] = &camss_ispif_ahb_clk.clkr,
+ [CAMSS_JPEG_JPEG0_CLK] = &camss_jpeg_jpeg0_clk.clkr,
+ [CAMSS_JPEG_JPEG1_CLK] = &camss_jpeg_jpeg1_clk.clkr,
+ [CAMSS_JPEG_JPEG2_CLK] = &camss_jpeg_jpeg2_clk.clkr,
+ [CAMSS_JPEG_JPEG_AHB_CLK] = &camss_jpeg_jpeg_ahb_clk.clkr,
+ [CAMSS_JPEG_JPEG_AXI_CLK] = &camss_jpeg_jpeg_axi_clk.clkr,
+ [CAMSS_JPEG_JPEG_OCMEMNOC_CLK] = &camss_jpeg_jpeg_ocmemnoc_clk.clkr,
+ [CAMSS_MCLK0_CLK] = &camss_mclk0_clk.clkr,
+ [CAMSS_MCLK1_CLK] = &camss_mclk1_clk.clkr,
+ [CAMSS_MCLK2_CLK] = &camss_mclk2_clk.clkr,
+ [CAMSS_MCLK3_CLK] = &camss_mclk3_clk.clkr,
+ [CAMSS_MICRO_AHB_CLK] = &camss_micro_ahb_clk.clkr,
+ [CAMSS_PHY0_CSI0PHYTIMER_CLK] = &camss_phy0_csi0phytimer_clk.clkr,
+ [CAMSS_PHY1_CSI1PHYTIMER_CLK] = &camss_phy1_csi1phytimer_clk.clkr,
+ [CAMSS_PHY2_CSI2PHYTIMER_CLK] = &camss_phy2_csi2phytimer_clk.clkr,
+ [CAMSS_TOP_AHB_CLK] = &camss_top_ahb_clk.clkr,
+ [CAMSS_VFE_CPP_AHB_CLK] = &camss_vfe_cpp_ahb_clk.clkr,
+ [CAMSS_VFE_CPP_CLK] = &camss_vfe_cpp_clk.clkr,
+ [CAMSS_VFE_VFE0_CLK] = &camss_vfe_vfe0_clk.clkr,
+ [CAMSS_VFE_VFE1_CLK] = &camss_vfe_vfe1_clk.clkr,
+ [CAMSS_VFE_VFE_AHB_CLK] = &camss_vfe_vfe_ahb_clk.clkr,
+ [CAMSS_VFE_VFE_AXI_CLK] = &camss_vfe_vfe_axi_clk.clkr,
+ [CAMSS_VFE_VFE_OCMEMNOC_CLK] = &camss_vfe_vfe_ocmemnoc_clk.clkr,
+ [MDSS_AHB_CLK] = &mdss_ahb_clk.clkr,
+ [MDSS_AXI_CLK] = &mdss_axi_clk.clkr,
+ [MDSS_BYTE0_CLK] = &mdss_byte0_clk.clkr,
+ [MDSS_BYTE1_CLK] = &mdss_byte1_clk.clkr,
+ [MDSS_EDPAUX_CLK] = &mdss_edpaux_clk.clkr,
+ [MDSS_EDPLINK_CLK] = &mdss_edplink_clk.clkr,
+ [MDSS_EDPPIXEL_CLK] = &mdss_edppixel_clk.clkr,
+ [MDSS_ESC0_CLK] = &mdss_esc0_clk.clkr,
+ [MDSS_ESC1_CLK] = &mdss_esc1_clk.clkr,
+ [MDSS_EXTPCLK_CLK] = &mdss_extpclk_clk.clkr,
+ [MDSS_HDMI_AHB_CLK] = &mdss_hdmi_ahb_clk.clkr,
+ [MDSS_HDMI_CLK] = &mdss_hdmi_clk.clkr,
+ [MDSS_MDP_CLK] = &mdss_mdp_clk.clkr,
+ [MDSS_MDP_LUT_CLK] = &mdss_mdp_lut_clk.clkr,
+ [MDSS_PCLK0_CLK] = &mdss_pclk0_clk.clkr,
+ [MDSS_PCLK1_CLK] = &mdss_pclk1_clk.clkr,
+ [MDSS_VSYNC_CLK] = &mdss_vsync_clk.clkr,
+ [MMSS_MISC_AHB_CLK] = &mmss_misc_ahb_clk.clkr,
+ [MMSS_MMSSNOC_AHB_CLK] = &mmss_mmssnoc_ahb_clk.clkr,
+ [MMSS_MMSSNOC_BTO_AHB_CLK] = &mmss_mmssnoc_bto_ahb_clk.clkr,
+ [MMSS_MMSSNOC_AXI_CLK] = &mmss_mmssnoc_axi_clk.clkr,
+ [MMSS_S0_AXI_CLK] = &mmss_s0_axi_clk.clkr,
+ [OCMEMCX_AHB_CLK] = &ocmemcx_ahb_clk.clkr,
+ [OCMEMCX_OCMEMNOC_CLK] = &ocmemcx_ocmemnoc_clk.clkr,
+ [OXILI_OCMEMGX_CLK] = &oxili_ocmemgx_clk.clkr,
+ [OCMEMNOC_CLK] = &ocmemnoc_clk.clkr,
+ [OXILI_GFX3D_CLK] = &oxili_gfx3d_clk.clkr,
+ [OXILICX_AHB_CLK] = &oxilicx_ahb_clk.clkr,
+ [OXILICX_AXI_CLK] = &oxilicx_axi_clk.clkr,
+ [VENUS0_AHB_CLK] = &venus0_ahb_clk.clkr,
+ [VENUS0_AXI_CLK] = &venus0_axi_clk.clkr,
+ [VENUS0_OCMEMNOC_CLK] = &venus0_ocmemnoc_clk.clkr,
+ [VENUS0_VCODEC0_CLK] = &venus0_vcodec0_clk.clkr,
+};
+
+static const struct qcom_reset_map mmcc_msm8974_resets[] = {
+ [SPDM_RESET] = { 0x0200 },
+ [SPDM_RM_RESET] = { 0x0300 },
+ [VENUS0_RESET] = { 0x1020 },
+ [MDSS_RESET] = { 0x2300 },
+ [CAMSS_PHY0_RESET] = { 0x3020 },
+ [CAMSS_PHY1_RESET] = { 0x3050 },
+ [CAMSS_PHY2_RESET] = { 0x3080 },
+ [CAMSS_CSI0_RESET] = { 0x30b0 },
+ [CAMSS_CSI0PHY_RESET] = { 0x30c0 },
+ [CAMSS_CSI0RDI_RESET] = { 0x30d0 },
+ [CAMSS_CSI0PIX_RESET] = { 0x30e0 },
+ [CAMSS_CSI1_RESET] = { 0x3120 },
+ [CAMSS_CSI1PHY_RESET] = { 0x3130 },
+ [CAMSS_CSI1RDI_RESET] = { 0x3140 },
+ [CAMSS_CSI1PIX_RESET] = { 0x3150 },
+ [CAMSS_CSI2_RESET] = { 0x3180 },
+ [CAMSS_CSI2PHY_RESET] = { 0x3190 },
+ [CAMSS_CSI2RDI_RESET] = { 0x31a0 },
+ [CAMSS_CSI2PIX_RESET] = { 0x31b0 },
+ [CAMSS_CSI3_RESET] = { 0x31e0 },
+ [CAMSS_CSI3PHY_RESET] = { 0x31f0 },
+ [CAMSS_CSI3RDI_RESET] = { 0x3200 },
+ [CAMSS_CSI3PIX_RESET] = { 0x3210 },
+ [CAMSS_ISPIF_RESET] = { 0x3220 },
+ [CAMSS_CCI_RESET] = { 0x3340 },
+ [CAMSS_MCLK0_RESET] = { 0x3380 },
+ [CAMSS_MCLK1_RESET] = { 0x33b0 },
+ [CAMSS_MCLK2_RESET] = { 0x33e0 },
+ [CAMSS_MCLK3_RESET] = { 0x3410 },
+ [CAMSS_GP0_RESET] = { 0x3440 },
+ [CAMSS_GP1_RESET] = { 0x3470 },
+ [CAMSS_TOP_RESET] = { 0x3480 },
+ [CAMSS_MICRO_RESET] = { 0x3490 },
+ [CAMSS_JPEG_RESET] = { 0x35a0 },
+ [CAMSS_VFE_RESET] = { 0x36a0 },
+ [CAMSS_CSI_VFE0_RESET] = { 0x3700 },
+ [CAMSS_CSI_VFE1_RESET] = { 0x3710 },
+ [OXILI_RESET] = { 0x4020 },
+ [OXILICX_RESET] = { 0x4030 },
+ [OCMEMCX_RESET] = { 0x4050 },
+ [MMSS_RBCRP_RESET] = { 0x4080 },
+ [MMSSNOCAHB_RESET] = { 0x5020 },
+ [MMSSNOCAXI_RESET] = { 0x5060 },
+ [OCMEMNOC_RESET] = { 0x50b0 },
+};
+
+static const struct regmap_config mmcc_msm8974_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x5104,
+ .fast_io = true,
+};
+
+static const struct of_device_id mmcc_msm8974_match_table[] = {
+ { .compatible = "qcom,mmcc-msm8974" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mmcc_msm8974_match_table);
+
+struct qcom_cc {
+ struct qcom_reset_controller reset;
+ struct clk_onecell_data data;
+ struct clk *clks[];
+};
+
+static int mmcc_msm8974_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct resource *res;
+ int i, ret;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ struct clk_onecell_data *data;
+ struct clk **clks;
+ struct regmap *regmap;
+ size_t num_clks;
+ struct qcom_reset_controller *reset;
+ struct qcom_cc *cc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &mmcc_msm8974_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ num_clks = ARRAY_SIZE(mmcc_msm8974_clocks);
+ cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*clks) * num_clks,
+ GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+
+ clks = cc->clks;
+ data = &cc->data;
+ data->clks = clks;
+ data->clk_num = num_clks;
+
+ clk_pll_configure_sr_hpm_lp(&mmpll1, regmap, &mmpll1_config, true);
+ clk_pll_configure_sr_hpm_lp(&mmpll3, regmap, &mmpll3_config, false);
+
+ for (i = 0; i < num_clks; i++) {
+ if (!mmcc_msm8974_clocks[i])
+ continue;
+ clk = devm_clk_register_regmap(dev, mmcc_msm8974_clocks[i]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
+ if (ret)
+ return ret;
+
+ reset = &cc->reset;
+ reset->rcdev.of_node = dev->of_node;
+ reset->rcdev.ops = &qcom_reset_ops,
+ reset->rcdev.owner = THIS_MODULE,
+ reset->rcdev.nr_resets = ARRAY_SIZE(mmcc_msm8974_resets),
+ reset->regmap = regmap;
+ reset->reset_map = mmcc_msm8974_resets,
+ platform_set_drvdata(pdev, &reset->rcdev);
+
+ ret = reset_controller_register(&reset->rcdev);
+ if (ret)
+ of_clk_del_provider(dev->of_node);
+
+ return ret;
+}
+
+static int mmcc_msm8974_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ reset_controller_unregister(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static struct platform_driver mmcc_msm8974_driver = {
+ .probe = mmcc_msm8974_probe,
+ .remove = mmcc_msm8974_remove,
+ .driver = {
+ .name = "mmcc-msm8974",
+ .owner = THIS_MODULE,
+ .of_match_table = mmcc_msm8974_match_table,
+ },
+};
+module_platform_driver(mmcc_msm8974_driver);
+
+MODULE_DESCRIPTION("QCOM MMCC MSM8974 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mmcc-msm8974");
diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
new file mode 100644
index 000000000000..6c977d3a8590
--- /dev/null
+++ b/drivers/clk/qcom/reset.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/export.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/delay.h>
+
+#include "reset.h"
+
+static int qcom_reset(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ rcdev->ops->assert(rcdev, id);
+ udelay(1);
+ rcdev->ops->deassert(rcdev, id);
+ return 0;
+}
+
+static int
+qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct qcom_reset_controller *rst;
+ const struct qcom_reset_map *map;
+ u32 mask;
+
+ rst = to_qcom_reset_controller(rcdev);
+ map = &rst->reset_map[id];
+ mask = BIT(map->bit);
+
+ return regmap_update_bits(rst->regmap, map->reg, mask, mask);
+}
+
+static int
+qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct qcom_reset_controller *rst;
+ const struct qcom_reset_map *map;
+ u32 mask;
+
+ rst = to_qcom_reset_controller(rcdev);
+ map = &rst->reset_map[id];
+ mask = BIT(map->bit);
+
+ return regmap_update_bits(rst->regmap, map->reg, mask, 0);
+}
+
+struct reset_control_ops qcom_reset_ops = {
+ .reset = qcom_reset,
+ .assert = qcom_reset_assert,
+ .deassert = qcom_reset_deassert,
+};
+EXPORT_SYMBOL_GPL(qcom_reset_ops);
diff --git a/drivers/clk/qcom/reset.h b/drivers/clk/qcom/reset.h
new file mode 100644
index 000000000000..0e11e2130f97
--- /dev/null
+++ b/drivers/clk/qcom/reset.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_RESET_H__
+#define __QCOM_CLK_RESET_H__
+
+#include <linux/reset-controller.h>
+
+struct qcom_reset_map {
+ unsigned int reg;
+ u8 bit;
+};
+
+struct regmap;
+
+struct qcom_reset_controller {
+ const struct qcom_reset_map *reset_map;
+ struct regmap *regmap;
+ struct reset_controller_dev rcdev;
+};
+
+#define to_qcom_reset_controller(r) \
+ container_of(r, struct qcom_reset_controller, rcdev);
+
+extern struct reset_control_ops qcom_reset_ops;
+
+#endif
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 39b40aaede2b..884187fbfe00 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -14,9 +14,17 @@
#include <linux/clk-provider.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <dt-bindings/clk/exynos-audss-clk.h>
+enum exynos_audss_clk_type {
+ TYPE_EXYNOS4210,
+ TYPE_EXYNOS5250,
+ TYPE_EXYNOS5420,
+};
+
static DEFINE_SPINLOCK(lock);
static struct clk **clk_table;
static void __iomem *reg_base;
@@ -26,17 +34,13 @@ static struct clk_onecell_data clk_data;
#define ASS_CLK_DIV 0x4
#define ASS_CLK_GATE 0x8
+#ifdef CONFIG_PM_SLEEP
static unsigned long reg_save[][2] = {
{ASS_CLK_SRC, 0},
{ASS_CLK_DIV, 0},
{ASS_CLK_GATE, 0},
};
-/* list of all parent clock list */
-static const char *mout_audss_p[] = { "fin_pll", "fout_epll" };
-static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" };
-
-#ifdef CONFIG_PM_SLEEP
static int exynos_audss_clk_suspend(void)
{
int i;
@@ -61,31 +65,69 @@ static struct syscore_ops exynos_audss_clk_syscore_ops = {
};
#endif /* CONFIG_PM_SLEEP */
+static const struct of_device_id exynos_audss_clk_of_match[] = {
+ { .compatible = "samsung,exynos4210-audss-clock",
+ .data = (void *)TYPE_EXYNOS4210, },
+ { .compatible = "samsung,exynos5250-audss-clock",
+ .data = (void *)TYPE_EXYNOS5250, },
+ { .compatible = "samsung,exynos5420-audss-clock",
+ .data = (void *)TYPE_EXYNOS5420, },
+ {},
+};
+
/* register exynos_audss clocks */
-static void __init exynos_audss_clk_init(struct device_node *np)
+static int exynos_audss_clk_probe(struct platform_device *pdev)
{
- reg_base = of_iomap(np, 0);
- if (!reg_base) {
- pr_err("%s: failed to map audss registers\n", __func__);
- return;
+ int i, ret = 0;
+ struct resource *res;
+ const char *mout_audss_p[] = {"fin_pll", "fout_epll"};
+ const char *mout_i2s_p[] = {"mout_audss", "cdclk0", "sclk_audio0"};
+ const char *sclk_pcm_p = "sclk_pcm0";
+ struct clk *pll_ref, *pll_in, *cdclk, *sclk_audio, *sclk_pcm_in;
+ const struct of_device_id *match;
+ enum exynos_audss_clk_type variant;
+
+ match = of_match_node(exynos_audss_clk_of_match, pdev->dev.of_node);
+ if (!match)
+ return -EINVAL;
+ variant = (enum exynos_audss_clk_type)match->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(reg_base)) {
+ dev_err(&pdev->dev, "failed to map audss registers\n");
+ return PTR_ERR(reg_base);
}
- clk_table = kzalloc(sizeof(struct clk *) * EXYNOS_AUDSS_MAX_CLKS,
+ clk_table = devm_kzalloc(&pdev->dev,
+ sizeof(struct clk *) * EXYNOS_AUDSS_MAX_CLKS,
GFP_KERNEL);
- if (!clk_table) {
- pr_err("%s: could not allocate clk lookup table\n", __func__);
- return;
- }
+ if (!clk_table)
+ return -ENOMEM;
clk_data.clks = clk_table;
- clk_data.clk_num = EXYNOS_AUDSS_MAX_CLKS;
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
-
+ if (variant == TYPE_EXYNOS5420)
+ clk_data.clk_num = EXYNOS_AUDSS_MAX_CLKS;
+ else
+ clk_data.clk_num = EXYNOS_AUDSS_MAX_CLKS - 1;
+
+ pll_ref = devm_clk_get(&pdev->dev, "pll_ref");
+ pll_in = devm_clk_get(&pdev->dev, "pll_in");
+ if (!IS_ERR(pll_ref))
+ mout_audss_p[0] = __clk_get_name(pll_ref);
+ if (!IS_ERR(pll_in))
+ mout_audss_p[1] = __clk_get_name(pll_in);
clk_table[EXYNOS_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss",
mout_audss_p, ARRAY_SIZE(mout_audss_p),
CLK_SET_RATE_NO_REPARENT,
reg_base + ASS_CLK_SRC, 0, 1, 0, &lock);
+ cdclk = devm_clk_get(&pdev->dev, "cdclk");
+ sclk_audio = devm_clk_get(&pdev->dev, "sclk_audio");
+ if (!IS_ERR(cdclk))
+ mout_i2s_p[1] = __clk_get_name(cdclk);
+ if (!IS_ERR(sclk_audio))
+ mout_i2s_p[2] = __clk_get_name(sclk_audio);
clk_table[EXYNOS_MOUT_I2S] = clk_register_mux(NULL, "mout_i2s",
mout_i2s_p, ARRAY_SIZE(mout_i2s_p),
CLK_SET_RATE_NO_REPARENT,
@@ -119,17 +161,88 @@ static void __init exynos_audss_clk_init(struct device_node *np)
"sclk_pcm", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 4, 0, &lock);
+ sclk_pcm_in = devm_clk_get(&pdev->dev, "sclk_pcm_in");
+ if (!IS_ERR(sclk_pcm_in))
+ sclk_pcm_p = __clk_get_name(sclk_pcm_in);
clk_table[EXYNOS_SCLK_PCM] = clk_register_gate(NULL, "sclk_pcm",
- "div_pcm0", CLK_SET_RATE_PARENT,
+ sclk_pcm_p, CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 5, 0, &lock);
+ if (variant == TYPE_EXYNOS5420) {
+ clk_table[EXYNOS_ADMA] = clk_register_gate(NULL, "adma",
+ "dout_srp", CLK_SET_RATE_PARENT,
+ reg_base + ASS_CLK_GATE, 9, 0, &lock);
+ }
+
+ for (i = 0; i < clk_data.clk_num; i++) {
+ if (IS_ERR(clk_table[i])) {
+ dev_err(&pdev->dev, "failed to register clock %d\n", i);
+ ret = PTR_ERR(clk_table[i]);
+ goto unregister;
+ }
+ }
+
+ ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
+ &clk_data);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add clock provider\n");
+ goto unregister;
+ }
+
#ifdef CONFIG_PM_SLEEP
register_syscore_ops(&exynos_audss_clk_syscore_ops);
#endif
- pr_info("Exynos: Audss: clock setup completed\n");
+ dev_info(&pdev->dev, "setup completed\n");
+
+ return 0;
+
+unregister:
+ for (i = 0; i < clk_data.clk_num; i++) {
+ if (!IS_ERR(clk_table[i]))
+ clk_unregister(clk_table[i]);
+ }
+
+ return ret;
+}
+
+static int exynos_audss_clk_remove(struct platform_device *pdev)
+{
+ int i;
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ for (i = 0; i < clk_data.clk_num; i++) {
+ if (!IS_ERR(clk_table[i]))
+ clk_unregister(clk_table[i]);
+ }
+
+ return 0;
}
-CLK_OF_DECLARE(exynos4210_audss_clk, "samsung,exynos4210-audss-clock",
- exynos_audss_clk_init);
-CLK_OF_DECLARE(exynos5250_audss_clk, "samsung,exynos5250-audss-clock",
- exynos_audss_clk_init);
+
+static struct platform_driver exynos_audss_clk_driver = {
+ .driver = {
+ .name = "exynos-audss-clk",
+ .owner = THIS_MODULE,
+ .of_match_table = exynos_audss_clk_of_match,
+ },
+ .probe = exynos_audss_clk_probe,
+ .remove = exynos_audss_clk_remove,
+};
+
+static int __init exynos_audss_clk_init(void)
+{
+ return platform_driver_register(&exynos_audss_clk_driver);
+}
+core_initcall(exynos_audss_clk_init);
+
+static void __exit exynos_audss_clk_exit(void)
+{
+ platform_driver_unregister(&exynos_audss_clk_driver);
+}
+module_exit(exynos_audss_clk_exit);
+
+MODULE_AUTHOR("Padmavathi Venna <padma.v@samsung.com>");
+MODULE_DESCRIPTION("Exynos Audio Subsystem Clock Controller");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:exynos-audss-clk");
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index ad5ff50c5f28..010f071af883 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -10,6 +10,7 @@
* Common Clock Framework support for all Exynos4 SoCs.
*/
+#include <dt-bindings/clock/exynos4.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
@@ -39,7 +40,7 @@
#define SRC_TOP1 0xc214
#define SRC_CAM 0xc220
#define SRC_TV 0xc224
-#define SRC_MFC 0xcc28
+#define SRC_MFC 0xc228
#define SRC_G3D 0xc22c
#define E4210_SRC_IMAGE 0xc230
#define SRC_LCD0 0xc234
@@ -130,68 +131,6 @@ enum exynos4_plls {
};
/*
- * Let each supported clock get a unique id. This id is used to lookup the clock
- * for device tree based platforms. The clocks are categorized into three
- * sections: core, sclk gate and bus interface gate clocks.
- *
- * When adding a new clock to this list, it is advised to choose a clock
- * category and add it to the end of that category. That is because the the
- * device tree source file is referring to these ids and any change in the
- * sequence number of existing clocks will require corresponding change in the
- * device tree files. This limitation would go away when pre-processor support
- * for dtc would be available.
- */
-enum exynos4_clks {
- none,
-
- /* core clocks */
- xxti, xusbxti, fin_pll, fout_apll, fout_mpll, fout_epll, fout_vpll,
- sclk_apll, sclk_mpll, sclk_epll, sclk_vpll, arm_clk, aclk200, aclk100,
- aclk160, aclk133, mout_mpll_user_t, mout_mpll_user_c, mout_core,
- mout_apll, /* 20 */
-
- /* gate for special clocks (sclk) */
- sclk_fimc0 = 128, sclk_fimc1, sclk_fimc2, sclk_fimc3, sclk_cam0,
- sclk_cam1, sclk_csis0, sclk_csis1, sclk_hdmi, sclk_mixer, sclk_dac,
- sclk_pixel, sclk_fimd0, sclk_mdnie0, sclk_mdnie_pwm0, sclk_mipi0,
- sclk_audio0, sclk_mmc0, sclk_mmc1, sclk_mmc2, sclk_mmc3, sclk_mmc4,
- sclk_sata, sclk_uart0, sclk_uart1, sclk_uart2, sclk_uart3, sclk_uart4,
- sclk_audio1, sclk_audio2, sclk_spdif, sclk_spi0, sclk_spi1, sclk_spi2,
- sclk_slimbus, sclk_fimd1, sclk_mipi1, sclk_pcm1, sclk_pcm2, sclk_i2s1,
- sclk_i2s2, sclk_mipihsi, sclk_mfc, sclk_pcm0, sclk_g3d, sclk_pwm_isp,
- sclk_spi0_isp, sclk_spi1_isp, sclk_uart_isp, sclk_fimg2d,
-
- /* gate clocks */
- fimc0 = 256, fimc1, fimc2, fimc3, csis0, csis1, jpeg, smmu_fimc0,
- smmu_fimc1, smmu_fimc2, smmu_fimc3, smmu_jpeg, vp, mixer, tvenc, hdmi,
- smmu_tv, mfc, smmu_mfcl, smmu_mfcr, g3d, g2d, rotator, mdma, smmu_g2d,
- smmu_rotator, smmu_mdma, fimd0, mie0, mdnie0, dsim0, smmu_fimd0, fimd1,
- mie1, dsim1, smmu_fimd1, pdma0, pdma1, pcie_phy, sata_phy, tsi, sdmmc0,
- sdmmc1, sdmmc2, sdmmc3, sdmmc4, sata, sromc, usb_host, usb_device, pcie,
- onenand, nfcon, smmu_pcie, gps, smmu_gps, uart0, uart1, uart2, uart3,
- uart4, i2c0, i2c1, i2c2, i2c3, i2c4, i2c5, i2c6, i2c7, i2c_hdmi, tsadc,
- spi0, spi1, spi2, i2s1, i2s2, pcm0, i2s0, pcm1, pcm2, pwm, slimbus,
- spdif, ac97, modemif, chipid, sysreg, hdmi_cec, mct, wdt, rtc, keyif,
- audss, mipi_hsi, mdma2, pixelasyncm0, pixelasyncm1, fimc_lite0,
- fimc_lite1, ppmuispx, ppmuispmx, fimc_isp, fimc_drc, fimc_fd, mcuisp,
- gicisp, smmu_isp, smmu_drc, smmu_fd, smmu_lite0, smmu_lite1, mcuctl_isp,
- mpwm_isp, i2c0_isp, i2c1_isp, mtcadc_isp, pwm_isp, wdt_isp, uart_isp,
- asyncaxim, smmu_ispcx, spi0_isp, spi1_isp, pwm_isp_sclk, spi0_isp_sclk,
- spi1_isp_sclk, uart_isp_sclk, tmu_apbif,
-
- /* mux clocks */
- mout_fimc0 = 384, mout_fimc1, mout_fimc2, mout_fimc3, mout_cam0,
- mout_cam1, mout_csis0, mout_csis1, mout_g3d0, mout_g3d1, mout_g3d,
- aclk400_mcuisp,
-
- /* div clocks */
- div_isp0 = 450, div_isp1, div_mcuisp0, div_mcuisp1, div_aclk200,
- div_aclk400_mcuisp,
-
- nr_clks,
-};
-
-/*
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
*/
@@ -347,255 +286,256 @@ PNAME(mout_user_aclk266_gps_p4x12) = {"fin_pll", "div_aclk266_gps", };
/* fixed rate clocks generated outside the soc */
static struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata = {
- FRATE(xxti, "xxti", NULL, CLK_IS_ROOT, 0),
- FRATE(xusbxti, "xusbxti", NULL, CLK_IS_ROOT, 0),
+ FRATE(CLK_XXTI, "xxti", NULL, CLK_IS_ROOT, 0),
+ FRATE(CLK_XUSBXTI, "xusbxti", NULL, CLK_IS_ROOT, 0),
};
/* fixed rate clocks generated inside the soc */
static struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = {
- FRATE(none, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000),
- FRATE(none, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
- FRATE(none, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(0, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000),
+ FRATE(0, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
+ FRATE(0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
};
static struct samsung_fixed_rate_clock exynos4210_fixed_rate_clks[] __initdata = {
- FRATE(none, "sclk_usbphy1", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(0, "sclk_usbphy1", NULL, CLK_IS_ROOT, 48000000),
};
/* list of mux clocks supported in all exynos4 soc's */
static struct samsung_mux_clock exynos4_mux_clks[] __initdata = {
- MUX_FA(mout_apll, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
+ MUX_FA(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
CLK_SET_RATE_PARENT, 0, "mout_apll"),
- MUX(none, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1),
- MUX(none, "mout_mfc1", sclk_evpll_p, SRC_MFC, 4, 1),
- MUX(none, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1),
- MUX_F(mout_g3d1, "mout_g3d1", sclk_evpll_p, SRC_G3D, 4, 1,
+ MUX(0, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1),
+ MUX(0, "mout_mfc1", sclk_evpll_p, SRC_MFC, 4, 1),
+ MUX(0, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1),
+ MUX_F(CLK_MOUT_G3D1, "mout_g3d1", sclk_evpll_p, SRC_G3D, 4, 1,
CLK_SET_RATE_PARENT, 0),
- MUX_F(mout_g3d, "mout_g3d", mout_g3d_p, SRC_G3D, 8, 1,
+ MUX_F(CLK_MOUT_G3D, "mout_g3d", mout_g3d_p, SRC_G3D, 8, 1,
CLK_SET_RATE_PARENT, 0),
- MUX(none, "mout_spdif", mout_spdif_p, SRC_PERIL1, 8, 2),
- MUX(none, "mout_onenand1", mout_onenand1_p, SRC_TOP0, 0, 1),
- MUX(sclk_epll, "sclk_epll", mout_epll_p, SRC_TOP0, 4, 1),
- MUX(none, "mout_onenand", mout_onenand_p, SRC_TOP0, 28, 1),
+ MUX(0, "mout_spdif", mout_spdif_p, SRC_PERIL1, 8, 2),
+ MUX(0, "mout_onenand1", mout_onenand1_p, SRC_TOP0, 0, 1),
+ MUX(CLK_SCLK_EPLL, "sclk_epll", mout_epll_p, SRC_TOP0, 4, 1),
+ MUX(0, "mout_onenand", mout_onenand_p, SRC_TOP0, 28, 1),
};
/* list of mux clocks supported in exynos4210 soc */
static struct samsung_mux_clock exynos4210_mux_early[] __initdata = {
- MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1),
+ MUX(0, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1),
};
static struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
- MUX(none, "mout_aclk200", sclk_ampll_p4210, SRC_TOP0, 12, 1),
- MUX(none, "mout_aclk100", sclk_ampll_p4210, SRC_TOP0, 16, 1),
- MUX(none, "mout_aclk160", sclk_ampll_p4210, SRC_TOP0, 20, 1),
- MUX(none, "mout_aclk133", sclk_ampll_p4210, SRC_TOP0, 24, 1),
- MUX(none, "mout_mixer", mout_mixer_p4210, SRC_TV, 4, 1),
- MUX(none, "mout_dac", mout_dac_p4210, SRC_TV, 8, 1),
- MUX(none, "mout_g2d0", sclk_ampll_p4210, E4210_SRC_IMAGE, 0, 1),
- MUX(none, "mout_g2d1", sclk_evpll_p, E4210_SRC_IMAGE, 4, 1),
- MUX(none, "mout_g2d", mout_g2d_p, E4210_SRC_IMAGE, 8, 1),
- MUX(none, "mout_fimd1", group1_p4210, E4210_SRC_LCD1, 0, 4),
- MUX(none, "mout_mipi1", group1_p4210, E4210_SRC_LCD1, 12, 4),
- MUX(sclk_mpll, "sclk_mpll", mout_mpll_p, SRC_CPU, 8, 1),
- MUX(mout_core, "mout_core", mout_core_p4210, SRC_CPU, 16, 1),
- MUX(sclk_vpll, "sclk_vpll", sclk_vpll_p4210, SRC_TOP0, 8, 1),
- MUX(mout_fimc0, "mout_fimc0", group1_p4210, SRC_CAM, 0, 4),
- MUX(mout_fimc1, "mout_fimc1", group1_p4210, SRC_CAM, 4, 4),
- MUX(mout_fimc2, "mout_fimc2", group1_p4210, SRC_CAM, 8, 4),
- MUX(mout_fimc3, "mout_fimc3", group1_p4210, SRC_CAM, 12, 4),
- MUX(mout_cam0, "mout_cam0", group1_p4210, SRC_CAM, 16, 4),
- MUX(mout_cam1, "mout_cam1", group1_p4210, SRC_CAM, 20, 4),
- MUX(mout_csis0, "mout_csis0", group1_p4210, SRC_CAM, 24, 4),
- MUX(mout_csis1, "mout_csis1", group1_p4210, SRC_CAM, 28, 4),
- MUX(none, "mout_mfc0", sclk_ampll_p4210, SRC_MFC, 0, 1),
- MUX_F(mout_g3d0, "mout_g3d0", sclk_ampll_p4210, SRC_G3D, 0, 1,
+ MUX(0, "mout_aclk200", sclk_ampll_p4210, SRC_TOP0, 12, 1),
+ MUX(0, "mout_aclk100", sclk_ampll_p4210, SRC_TOP0, 16, 1),
+ MUX(0, "mout_aclk160", sclk_ampll_p4210, SRC_TOP0, 20, 1),
+ MUX(0, "mout_aclk133", sclk_ampll_p4210, SRC_TOP0, 24, 1),
+ MUX(0, "mout_mixer", mout_mixer_p4210, SRC_TV, 4, 1),
+ MUX(0, "mout_dac", mout_dac_p4210, SRC_TV, 8, 1),
+ MUX(0, "mout_g2d0", sclk_ampll_p4210, E4210_SRC_IMAGE, 0, 1),
+ MUX(0, "mout_g2d1", sclk_evpll_p, E4210_SRC_IMAGE, 4, 1),
+ MUX(0, "mout_g2d", mout_g2d_p, E4210_SRC_IMAGE, 8, 1),
+ MUX(0, "mout_fimd1", group1_p4210, E4210_SRC_LCD1, 0, 4),
+ MUX(0, "mout_mipi1", group1_p4210, E4210_SRC_LCD1, 12, 4),
+ MUX(CLK_SCLK_MPLL, "sclk_mpll", mout_mpll_p, SRC_CPU, 8, 1),
+ MUX(CLK_MOUT_CORE, "mout_core", mout_core_p4210, SRC_CPU, 16, 1),
+ MUX(CLK_SCLK_VPLL, "sclk_vpll", sclk_vpll_p4210, SRC_TOP0, 8, 1),
+ MUX(CLK_MOUT_FIMC0, "mout_fimc0", group1_p4210, SRC_CAM, 0, 4),
+ MUX(CLK_MOUT_FIMC1, "mout_fimc1", group1_p4210, SRC_CAM, 4, 4),
+ MUX(CLK_MOUT_FIMC2, "mout_fimc2", group1_p4210, SRC_CAM, 8, 4),
+ MUX(CLK_MOUT_FIMC3, "mout_fimc3", group1_p4210, SRC_CAM, 12, 4),
+ MUX(CLK_MOUT_CAM0, "mout_cam0", group1_p4210, SRC_CAM, 16, 4),
+ MUX(CLK_MOUT_CAM1, "mout_cam1", group1_p4210, SRC_CAM, 20, 4),
+ MUX(CLK_MOUT_CSIS0, "mout_csis0", group1_p4210, SRC_CAM, 24, 4),
+ MUX(CLK_MOUT_CSIS1, "mout_csis1", group1_p4210, SRC_CAM, 28, 4),
+ MUX(0, "mout_mfc0", sclk_ampll_p4210, SRC_MFC, 0, 1),
+ MUX_F(CLK_MOUT_G3D0, "mout_g3d0", sclk_ampll_p4210, SRC_G3D, 0, 1,
CLK_SET_RATE_PARENT, 0),
- MUX(none, "mout_fimd0", group1_p4210, SRC_LCD0, 0, 4),
- MUX(none, "mout_mipi0", group1_p4210, SRC_LCD0, 12, 4),
- MUX(none, "mout_audio0", mout_audio0_p4210, SRC_MAUDIO, 0, 4),
- MUX(none, "mout_mmc0", group1_p4210, SRC_FSYS, 0, 4),
- MUX(none, "mout_mmc1", group1_p4210, SRC_FSYS, 4, 4),
- MUX(none, "mout_mmc2", group1_p4210, SRC_FSYS, 8, 4),
- MUX(none, "mout_mmc3", group1_p4210, SRC_FSYS, 12, 4),
- MUX(none, "mout_mmc4", group1_p4210, SRC_FSYS, 16, 4),
- MUX(none, "mout_sata", sclk_ampll_p4210, SRC_FSYS, 24, 1),
- MUX(none, "mout_uart0", group1_p4210, SRC_PERIL0, 0, 4),
- MUX(none, "mout_uart1", group1_p4210, SRC_PERIL0, 4, 4),
- MUX(none, "mout_uart2", group1_p4210, SRC_PERIL0, 8, 4),
- MUX(none, "mout_uart3", group1_p4210, SRC_PERIL0, 12, 4),
- MUX(none, "mout_uart4", group1_p4210, SRC_PERIL0, 16, 4),
- MUX(none, "mout_audio1", mout_audio1_p4210, SRC_PERIL1, 0, 4),
- MUX(none, "mout_audio2", mout_audio2_p4210, SRC_PERIL1, 4, 4),
- MUX(none, "mout_spi0", group1_p4210, SRC_PERIL1, 16, 4),
- MUX(none, "mout_spi1", group1_p4210, SRC_PERIL1, 20, 4),
- MUX(none, "mout_spi2", group1_p4210, SRC_PERIL1, 24, 4),
+ MUX(0, "mout_fimd0", group1_p4210, SRC_LCD0, 0, 4),
+ MUX(0, "mout_mipi0", group1_p4210, SRC_LCD0, 12, 4),
+ MUX(0, "mout_audio0", mout_audio0_p4210, SRC_MAUDIO, 0, 4),
+ MUX(0, "mout_mmc0", group1_p4210, SRC_FSYS, 0, 4),
+ MUX(0, "mout_mmc1", group1_p4210, SRC_FSYS, 4, 4),
+ MUX(0, "mout_mmc2", group1_p4210, SRC_FSYS, 8, 4),
+ MUX(0, "mout_mmc3", group1_p4210, SRC_FSYS, 12, 4),
+ MUX(0, "mout_mmc4", group1_p4210, SRC_FSYS, 16, 4),
+ MUX(0, "mout_sata", sclk_ampll_p4210, SRC_FSYS, 24, 1),
+ MUX(0, "mout_uart0", group1_p4210, SRC_PERIL0, 0, 4),
+ MUX(0, "mout_uart1", group1_p4210, SRC_PERIL0, 4, 4),
+ MUX(0, "mout_uart2", group1_p4210, SRC_PERIL0, 8, 4),
+ MUX(0, "mout_uart3", group1_p4210, SRC_PERIL0, 12, 4),
+ MUX(0, "mout_uart4", group1_p4210, SRC_PERIL0, 16, 4),
+ MUX(0, "mout_audio1", mout_audio1_p4210, SRC_PERIL1, 0, 4),
+ MUX(0, "mout_audio2", mout_audio2_p4210, SRC_PERIL1, 4, 4),
+ MUX(0, "mout_spi0", group1_p4210, SRC_PERIL1, 16, 4),
+ MUX(0, "mout_spi1", group1_p4210, SRC_PERIL1, 20, 4),
+ MUX(0, "mout_spi2", group1_p4210, SRC_PERIL1, 24, 4),
};
/* list of mux clocks supported in exynos4x12 soc */
static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
- MUX(mout_mpll_user_c, "mout_mpll_user_c", mout_mpll_user_p4x12,
+ MUX(CLK_MOUT_MPLL_USER_C, "mout_mpll_user_c", mout_mpll_user_p4x12,
SRC_CPU, 24, 1),
- MUX(none, "mout_aclk266_gps", aclk_p4412, SRC_TOP1, 4, 1),
- MUX(none, "mout_aclk400_mcuisp", aclk_p4412, SRC_TOP1, 8, 1),
- MUX(mout_mpll_user_t, "mout_mpll_user_t", mout_mpll_user_p4x12,
+ MUX(0, "mout_aclk266_gps", aclk_p4412, SRC_TOP1, 4, 1),
+ MUX(0, "mout_aclk400_mcuisp", aclk_p4412, SRC_TOP1, 8, 1),
+ MUX(CLK_MOUT_MPLL_USER_T, "mout_mpll_user_t", mout_mpll_user_p4x12,
SRC_TOP1, 12, 1),
- MUX(none, "mout_user_aclk266_gps", mout_user_aclk266_gps_p4x12,
+ MUX(0, "mout_user_aclk266_gps", mout_user_aclk266_gps_p4x12,
SRC_TOP1, 16, 1),
- MUX(aclk200, "aclk200", mout_user_aclk200_p4x12, SRC_TOP1, 20, 1),
- MUX(aclk400_mcuisp, "aclk400_mcuisp", mout_user_aclk400_mcuisp_p4x12,
- SRC_TOP1, 24, 1),
- MUX(none, "mout_aclk200", aclk_p4412, SRC_TOP0, 12, 1),
- MUX(none, "mout_aclk100", aclk_p4412, SRC_TOP0, 16, 1),
- MUX(none, "mout_aclk160", aclk_p4412, SRC_TOP0, 20, 1),
- MUX(none, "mout_aclk133", aclk_p4412, SRC_TOP0, 24, 1),
- MUX(none, "mout_mdnie0", group1_p4x12, SRC_LCD0, 4, 4),
- MUX(none, "mout_mdnie_pwm0", group1_p4x12, SRC_LCD0, 8, 4),
- MUX(none, "mout_sata", sclk_ampll_p4x12, SRC_FSYS, 24, 1),
- MUX(none, "mout_jpeg0", sclk_ampll_p4x12, E4X12_SRC_CAM1, 0, 1),
- MUX(none, "mout_jpeg1", sclk_evpll_p, E4X12_SRC_CAM1, 4, 1),
- MUX(none, "mout_jpeg", mout_jpeg_p, E4X12_SRC_CAM1, 8, 1),
- MUX(sclk_mpll, "sclk_mpll", mout_mpll_p, SRC_DMC, 12, 1),
- MUX(sclk_vpll, "sclk_vpll", mout_vpll_p, SRC_TOP0, 8, 1),
- MUX(mout_core, "mout_core", mout_core_p4x12, SRC_CPU, 16, 1),
- MUX(mout_fimc0, "mout_fimc0", group1_p4x12, SRC_CAM, 0, 4),
- MUX(mout_fimc1, "mout_fimc1", group1_p4x12, SRC_CAM, 4, 4),
- MUX(mout_fimc2, "mout_fimc2", group1_p4x12, SRC_CAM, 8, 4),
- MUX(mout_fimc3, "mout_fimc3", group1_p4x12, SRC_CAM, 12, 4),
- MUX(mout_cam0, "mout_cam0", group1_p4x12, SRC_CAM, 16, 4),
- MUX(mout_cam1, "mout_cam1", group1_p4x12, SRC_CAM, 20, 4),
- MUX(mout_csis0, "mout_csis0", group1_p4x12, SRC_CAM, 24, 4),
- MUX(mout_csis1, "mout_csis1", group1_p4x12, SRC_CAM, 28, 4),
- MUX(none, "mout_mfc0", sclk_ampll_p4x12, SRC_MFC, 0, 1),
- MUX_F(mout_g3d0, "mout_g3d0", sclk_ampll_p4x12, SRC_G3D, 0, 1,
+ MUX(CLK_ACLK200, "aclk200", mout_user_aclk200_p4x12, SRC_TOP1, 20, 1),
+ MUX(CLK_ACLK400_MCUISP, "aclk400_mcuisp",
+ mout_user_aclk400_mcuisp_p4x12, SRC_TOP1, 24, 1),
+ MUX(0, "mout_aclk200", aclk_p4412, SRC_TOP0, 12, 1),
+ MUX(0, "mout_aclk100", aclk_p4412, SRC_TOP0, 16, 1),
+ MUX(0, "mout_aclk160", aclk_p4412, SRC_TOP0, 20, 1),
+ MUX(0, "mout_aclk133", aclk_p4412, SRC_TOP0, 24, 1),
+ MUX(0, "mout_mdnie0", group1_p4x12, SRC_LCD0, 4, 4),
+ MUX(0, "mout_mdnie_pwm0", group1_p4x12, SRC_LCD0, 8, 4),
+ MUX(0, "mout_sata", sclk_ampll_p4x12, SRC_FSYS, 24, 1),
+ MUX(0, "mout_jpeg0", sclk_ampll_p4x12, E4X12_SRC_CAM1, 0, 1),
+ MUX(0, "mout_jpeg1", sclk_evpll_p, E4X12_SRC_CAM1, 4, 1),
+ MUX(0, "mout_jpeg", mout_jpeg_p, E4X12_SRC_CAM1, 8, 1),
+ MUX(CLK_SCLK_MPLL, "sclk_mpll", mout_mpll_p, SRC_DMC, 12, 1),
+ MUX(CLK_SCLK_VPLL, "sclk_vpll", mout_vpll_p, SRC_TOP0, 8, 1),
+ MUX(CLK_MOUT_CORE, "mout_core", mout_core_p4x12, SRC_CPU, 16, 1),
+ MUX(CLK_MOUT_FIMC0, "mout_fimc0", group1_p4x12, SRC_CAM, 0, 4),
+ MUX(CLK_MOUT_FIMC1, "mout_fimc1", group1_p4x12, SRC_CAM, 4, 4),
+ MUX(CLK_MOUT_FIMC2, "mout_fimc2", group1_p4x12, SRC_CAM, 8, 4),
+ MUX(CLK_MOUT_FIMC3, "mout_fimc3", group1_p4x12, SRC_CAM, 12, 4),
+ MUX(CLK_MOUT_CAM0, "mout_cam0", group1_p4x12, SRC_CAM, 16, 4),
+ MUX(CLK_MOUT_CAM1, "mout_cam1", group1_p4x12, SRC_CAM, 20, 4),
+ MUX(CLK_MOUT_CSIS0, "mout_csis0", group1_p4x12, SRC_CAM, 24, 4),
+ MUX(CLK_MOUT_CSIS1, "mout_csis1", group1_p4x12, SRC_CAM, 28, 4),
+ MUX(0, "mout_mfc0", sclk_ampll_p4x12, SRC_MFC, 0, 1),
+ MUX_F(CLK_MOUT_G3D0, "mout_g3d0", sclk_ampll_p4x12, SRC_G3D, 0, 1,
CLK_SET_RATE_PARENT, 0),
- MUX(none, "mout_fimd0", group1_p4x12, SRC_LCD0, 0, 4),
- MUX(none, "mout_mipi0", group1_p4x12, SRC_LCD0, 12, 4),
- MUX(none, "mout_audio0", mout_audio0_p4x12, SRC_MAUDIO, 0, 4),
- MUX(none, "mout_mmc0", group1_p4x12, SRC_FSYS, 0, 4),
- MUX(none, "mout_mmc1", group1_p4x12, SRC_FSYS, 4, 4),
- MUX(none, "mout_mmc2", group1_p4x12, SRC_FSYS, 8, 4),
- MUX(none, "mout_mmc3", group1_p4x12, SRC_FSYS, 12, 4),
- MUX(none, "mout_mmc4", group1_p4x12, SRC_FSYS, 16, 4),
- MUX(none, "mout_mipihsi", aclk_p4412, SRC_FSYS, 24, 1),
- MUX(none, "mout_uart0", group1_p4x12, SRC_PERIL0, 0, 4),
- MUX(none, "mout_uart1", group1_p4x12, SRC_PERIL0, 4, 4),
- MUX(none, "mout_uart2", group1_p4x12, SRC_PERIL0, 8, 4),
- MUX(none, "mout_uart3", group1_p4x12, SRC_PERIL0, 12, 4),
- MUX(none, "mout_uart4", group1_p4x12, SRC_PERIL0, 16, 4),
- MUX(none, "mout_audio1", mout_audio1_p4x12, SRC_PERIL1, 0, 4),
- MUX(none, "mout_audio2", mout_audio2_p4x12, SRC_PERIL1, 4, 4),
- MUX(none, "mout_spi0", group1_p4x12, SRC_PERIL1, 16, 4),
- MUX(none, "mout_spi1", group1_p4x12, SRC_PERIL1, 20, 4),
- MUX(none, "mout_spi2", group1_p4x12, SRC_PERIL1, 24, 4),
- MUX(none, "mout_pwm_isp", group1_p4x12, E4X12_SRC_ISP, 0, 4),
- MUX(none, "mout_spi0_isp", group1_p4x12, E4X12_SRC_ISP, 4, 4),
- MUX(none, "mout_spi1_isp", group1_p4x12, E4X12_SRC_ISP, 8, 4),
- MUX(none, "mout_uart_isp", group1_p4x12, E4X12_SRC_ISP, 12, 4),
- MUX(none, "mout_g2d0", sclk_ampll_p4210, SRC_DMC, 20, 1),
- MUX(none, "mout_g2d1", sclk_evpll_p, SRC_DMC, 24, 1),
- MUX(none, "mout_g2d", mout_g2d_p, SRC_DMC, 28, 1),
+ MUX(0, "mout_fimd0", group1_p4x12, SRC_LCD0, 0, 4),
+ MUX(0, "mout_mipi0", group1_p4x12, SRC_LCD0, 12, 4),
+ MUX(0, "mout_audio0", mout_audio0_p4x12, SRC_MAUDIO, 0, 4),
+ MUX(0, "mout_mmc0", group1_p4x12, SRC_FSYS, 0, 4),
+ MUX(0, "mout_mmc1", group1_p4x12, SRC_FSYS, 4, 4),
+ MUX(0, "mout_mmc2", group1_p4x12, SRC_FSYS, 8, 4),
+ MUX(0, "mout_mmc3", group1_p4x12, SRC_FSYS, 12, 4),
+ MUX(0, "mout_mmc4", group1_p4x12, SRC_FSYS, 16, 4),
+ MUX(0, "mout_mipihsi", aclk_p4412, SRC_FSYS, 24, 1),
+ MUX(0, "mout_uart0", group1_p4x12, SRC_PERIL0, 0, 4),
+ MUX(0, "mout_uart1", group1_p4x12, SRC_PERIL0, 4, 4),
+ MUX(0, "mout_uart2", group1_p4x12, SRC_PERIL0, 8, 4),
+ MUX(0, "mout_uart3", group1_p4x12, SRC_PERIL0, 12, 4),
+ MUX(0, "mout_uart4", group1_p4x12, SRC_PERIL0, 16, 4),
+ MUX(0, "mout_audio1", mout_audio1_p4x12, SRC_PERIL1, 0, 4),
+ MUX(0, "mout_audio2", mout_audio2_p4x12, SRC_PERIL1, 4, 4),
+ MUX(0, "mout_spi0", group1_p4x12, SRC_PERIL1, 16, 4),
+ MUX(0, "mout_spi1", group1_p4x12, SRC_PERIL1, 20, 4),
+ MUX(0, "mout_spi2", group1_p4x12, SRC_PERIL1, 24, 4),
+ MUX(0, "mout_pwm_isp", group1_p4x12, E4X12_SRC_ISP, 0, 4),
+ MUX(0, "mout_spi0_isp", group1_p4x12, E4X12_SRC_ISP, 4, 4),
+ MUX(0, "mout_spi1_isp", group1_p4x12, E4X12_SRC_ISP, 8, 4),
+ MUX(0, "mout_uart_isp", group1_p4x12, E4X12_SRC_ISP, 12, 4),
+ MUX(0, "mout_g2d0", sclk_ampll_p4210, SRC_DMC, 20, 1),
+ MUX(0, "mout_g2d1", sclk_evpll_p, SRC_DMC, 24, 1),
+ MUX(0, "mout_g2d", mout_g2d_p, SRC_DMC, 28, 1),
};
/* list of divider clocks supported in all exynos4 soc's */
static struct samsung_div_clock exynos4_div_clks[] __initdata = {
- DIV(none, "div_core", "mout_core", DIV_CPU0, 0, 3),
- DIV(none, "div_core2", "div_core", DIV_CPU0, 28, 3),
- DIV(none, "div_fimc0", "mout_fimc0", DIV_CAM, 0, 4),
- DIV(none, "div_fimc1", "mout_fimc1", DIV_CAM, 4, 4),
- DIV(none, "div_fimc2", "mout_fimc2", DIV_CAM, 8, 4),
- DIV(none, "div_fimc3", "mout_fimc3", DIV_CAM, 12, 4),
- DIV(none, "div_cam0", "mout_cam0", DIV_CAM, 16, 4),
- DIV(none, "div_cam1", "mout_cam1", DIV_CAM, 20, 4),
- DIV(none, "div_csis0", "mout_csis0", DIV_CAM, 24, 4),
- DIV(none, "div_csis1", "mout_csis1", DIV_CAM, 28, 4),
- DIV(sclk_mfc, "sclk_mfc", "mout_mfc", DIV_MFC, 0, 4),
- DIV_F(none, "div_g3d", "mout_g3d", DIV_G3D, 0, 4,
+ DIV(0, "div_core", "mout_core", DIV_CPU0, 0, 3),
+ DIV(0, "div_core2", "div_core", DIV_CPU0, 28, 3),
+ DIV(0, "div_fimc0", "mout_fimc0", DIV_CAM, 0, 4),
+ DIV(0, "div_fimc1", "mout_fimc1", DIV_CAM, 4, 4),
+ DIV(0, "div_fimc2", "mout_fimc2", DIV_CAM, 8, 4),
+ DIV(0, "div_fimc3", "mout_fimc3", DIV_CAM, 12, 4),
+ DIV(0, "div_cam0", "mout_cam0", DIV_CAM, 16, 4),
+ DIV(0, "div_cam1", "mout_cam1", DIV_CAM, 20, 4),
+ DIV(0, "div_csis0", "mout_csis0", DIV_CAM, 24, 4),
+ DIV(0, "div_csis1", "mout_csis1", DIV_CAM, 28, 4),
+ DIV(CLK_SCLK_MFC, "sclk_mfc", "mout_mfc", DIV_MFC, 0, 4),
+ DIV_F(0, "div_g3d", "mout_g3d", DIV_G3D, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV(0, "div_fimd0", "mout_fimd0", DIV_LCD0, 0, 4),
+ DIV(0, "div_mipi0", "mout_mipi0", DIV_LCD0, 16, 4),
+ DIV(0, "div_audio0", "mout_audio0", DIV_MAUDIO, 0, 4),
+ DIV(CLK_SCLK_PCM0, "sclk_pcm0", "sclk_audio0", DIV_MAUDIO, 4, 8),
+ DIV(0, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
+ DIV(0, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4),
+ DIV(0, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4),
+ DIV(0, "div_mmc3", "mout_mmc3", DIV_FSYS2, 16, 4),
+ DIV(CLK_SCLK_PIXEL, "sclk_pixel", "sclk_vpll", DIV_TV, 0, 4),
+ DIV(CLK_ACLK100, "aclk100", "mout_aclk100", DIV_TOP, 4, 4),
+ DIV(CLK_ACLK160, "aclk160", "mout_aclk160", DIV_TOP, 8, 3),
+ DIV(CLK_ACLK133, "aclk133", "mout_aclk133", DIV_TOP, 12, 3),
+ DIV(0, "div_onenand", "mout_onenand1", DIV_TOP, 16, 3),
+ DIV(CLK_SCLK_SLIMBUS, "sclk_slimbus", "sclk_epll", DIV_PERIL3, 4, 4),
+ DIV(CLK_SCLK_PCM1, "sclk_pcm1", "sclk_audio1", DIV_PERIL4, 4, 8),
+ DIV(CLK_SCLK_PCM2, "sclk_pcm2", "sclk_audio2", DIV_PERIL4, 20, 8),
+ DIV(CLK_SCLK_I2S1, "sclk_i2s1", "sclk_audio1", DIV_PERIL5, 0, 6),
+ DIV(CLK_SCLK_I2S2, "sclk_i2s2", "sclk_audio2", DIV_PERIL5, 8, 6),
+ DIV(0, "div_mmc4", "mout_mmc4", DIV_FSYS3, 0, 4),
+ DIV_F(0, "div_mmc_pre4", "div_mmc4", DIV_FSYS3, 8, 8,
CLK_SET_RATE_PARENT, 0),
- DIV(none, "div_fimd0", "mout_fimd0", DIV_LCD0, 0, 4),
- DIV(none, "div_mipi0", "mout_mipi0", DIV_LCD0, 16, 4),
- DIV(none, "div_audio0", "mout_audio0", DIV_MAUDIO, 0, 4),
- DIV(sclk_pcm0, "sclk_pcm0", "sclk_audio0", DIV_MAUDIO, 4, 8),
- DIV(none, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
- DIV(none, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4),
- DIV(none, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4),
- DIV(none, "div_mmc3", "mout_mmc3", DIV_FSYS2, 16, 4),
- DIV(sclk_pixel, "sclk_pixel", "sclk_vpll", DIV_TV, 0, 4),
- DIV(aclk100, "aclk100", "mout_aclk100", DIV_TOP, 4, 4),
- DIV(aclk160, "aclk160", "mout_aclk160", DIV_TOP, 8, 3),
- DIV(aclk133, "aclk133", "mout_aclk133", DIV_TOP, 12, 3),
- DIV(none, "div_onenand", "mout_onenand1", DIV_TOP, 16, 3),
- DIV(sclk_slimbus, "sclk_slimbus", "sclk_epll", DIV_PERIL3, 4, 4),
- DIV(sclk_pcm1, "sclk_pcm1", "sclk_audio1", DIV_PERIL4, 4, 8),
- DIV(sclk_pcm2, "sclk_pcm2", "sclk_audio2", DIV_PERIL4, 20, 8),
- DIV(sclk_i2s1, "sclk_i2s1", "sclk_audio1", DIV_PERIL5, 0, 6),
- DIV(sclk_i2s2, "sclk_i2s2", "sclk_audio2", DIV_PERIL5, 8, 6),
- DIV(none, "div_mmc4", "mout_mmc4", DIV_FSYS3, 0, 4),
- DIV(none, "div_mmc_pre4", "div_mmc4", DIV_FSYS3, 8, 8),
- DIV(none, "div_uart0", "mout_uart0", DIV_PERIL0, 0, 4),
- DIV(none, "div_uart1", "mout_uart1", DIV_PERIL0, 4, 4),
- DIV(none, "div_uart2", "mout_uart2", DIV_PERIL0, 8, 4),
- DIV(none, "div_uart3", "mout_uart3", DIV_PERIL0, 12, 4),
- DIV(none, "div_uart4", "mout_uart4", DIV_PERIL0, 16, 4),
- DIV(none, "div_spi0", "mout_spi0", DIV_PERIL1, 0, 4),
- DIV(none, "div_spi_pre0", "div_spi0", DIV_PERIL1, 8, 8),
- DIV(none, "div_spi1", "mout_spi1", DIV_PERIL1, 16, 4),
- DIV(none, "div_spi_pre1", "div_spi1", DIV_PERIL1, 24, 8),
- DIV(none, "div_spi2", "mout_spi2", DIV_PERIL2, 0, 4),
- DIV(none, "div_spi_pre2", "div_spi2", DIV_PERIL2, 8, 8),
- DIV(none, "div_audio1", "mout_audio1", DIV_PERIL4, 0, 4),
- DIV(none, "div_audio2", "mout_audio2", DIV_PERIL4, 16, 4),
- DIV(arm_clk, "arm_clk", "div_core2", DIV_CPU0, 28, 3),
- DIV(sclk_apll, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
- DIV_F(none, "div_mipi_pre0", "div_mipi0", DIV_LCD0, 20, 4,
+ DIV(0, "div_uart0", "mout_uart0", DIV_PERIL0, 0, 4),
+ DIV(0, "div_uart1", "mout_uart1", DIV_PERIL0, 4, 4),
+ DIV(0, "div_uart2", "mout_uart2", DIV_PERIL0, 8, 4),
+ DIV(0, "div_uart3", "mout_uart3", DIV_PERIL0, 12, 4),
+ DIV(0, "div_uart4", "mout_uart4", DIV_PERIL0, 16, 4),
+ DIV(0, "div_spi0", "mout_spi0", DIV_PERIL1, 0, 4),
+ DIV(0, "div_spi_pre0", "div_spi0", DIV_PERIL1, 8, 8),
+ DIV(0, "div_spi1", "mout_spi1", DIV_PERIL1, 16, 4),
+ DIV(0, "div_spi_pre1", "div_spi1", DIV_PERIL1, 24, 8),
+ DIV(0, "div_spi2", "mout_spi2", DIV_PERIL2, 0, 4),
+ DIV(0, "div_spi_pre2", "div_spi2", DIV_PERIL2, 8, 8),
+ DIV(0, "div_audio1", "mout_audio1", DIV_PERIL4, 0, 4),
+ DIV(0, "div_audio2", "mout_audio2", DIV_PERIL4, 16, 4),
+ DIV(CLK_ARM_CLK, "arm_clk", "div_core2", DIV_CPU0, 28, 3),
+ DIV(CLK_SCLK_APLL, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
+ DIV_F(0, "div_mipi_pre0", "div_mipi0", DIV_LCD0, 20, 4,
CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_mmc_pre0", "div_mmc0", DIV_FSYS1, 8, 8,
+ DIV_F(0, "div_mmc_pre0", "div_mmc0", DIV_FSYS1, 8, 8,
CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_mmc_pre1", "div_mmc1", DIV_FSYS1, 24, 8,
+ DIV_F(0, "div_mmc_pre1", "div_mmc1", DIV_FSYS1, 24, 8,
CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_mmc_pre2", "div_mmc2", DIV_FSYS2, 8, 8,
+ DIV_F(0, "div_mmc_pre2", "div_mmc2", DIV_FSYS2, 8, 8,
CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_mmc_pre3", "div_mmc3", DIV_FSYS2, 24, 8,
+ DIV_F(0, "div_mmc_pre3", "div_mmc3", DIV_FSYS2, 24, 8,
CLK_SET_RATE_PARENT, 0),
};
/* list of divider clocks supported in exynos4210 soc */
static struct samsung_div_clock exynos4210_div_clks[] __initdata = {
- DIV(aclk200, "aclk200", "mout_aclk200", DIV_TOP, 0, 3),
- DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_IMAGE, 0, 4),
- DIV(none, "div_fimd1", "mout_fimd1", E4210_DIV_LCD1, 0, 4),
- DIV(none, "div_mipi1", "mout_mipi1", E4210_DIV_LCD1, 16, 4),
- DIV(none, "div_sata", "mout_sata", DIV_FSYS0, 20, 4),
- DIV_F(none, "div_mipi_pre1", "div_mipi1", E4210_DIV_LCD1, 20, 4,
+ DIV(CLK_ACLK200, "aclk200", "mout_aclk200", DIV_TOP, 0, 3),
+ DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_IMAGE, 0, 4),
+ DIV(0, "div_fimd1", "mout_fimd1", E4210_DIV_LCD1, 0, 4),
+ DIV(0, "div_mipi1", "mout_mipi1", E4210_DIV_LCD1, 16, 4),
+ DIV(0, "div_sata", "mout_sata", DIV_FSYS0, 20, 4),
+ DIV_F(0, "div_mipi_pre1", "div_mipi1", E4210_DIV_LCD1, 20, 4,
CLK_SET_RATE_PARENT, 0),
};
/* list of divider clocks supported in exynos4x12 soc */
static struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
- DIV(none, "div_mdnie0", "mout_mdnie0", DIV_LCD0, 4, 4),
- DIV(none, "div_mdnie_pwm0", "mout_mdnie_pwm0", DIV_LCD0, 8, 4),
- DIV(none, "div_mdnie_pwm_pre0", "div_mdnie_pwm0", DIV_LCD0, 12, 4),
- DIV(none, "div_mipihsi", "mout_mipihsi", DIV_FSYS0, 20, 4),
- DIV(none, "div_jpeg", "mout_jpeg", E4X12_DIV_CAM1, 0, 4),
- DIV(div_aclk200, "div_aclk200", "mout_aclk200", DIV_TOP, 0, 3),
- DIV(none, "div_aclk266_gps", "mout_aclk266_gps", DIV_TOP, 20, 3),
- DIV(div_aclk400_mcuisp, "div_aclk400_mcuisp", "mout_aclk400_mcuisp",
+ DIV(0, "div_mdnie0", "mout_mdnie0", DIV_LCD0, 4, 4),
+ DIV(0, "div_mdnie_pwm0", "mout_mdnie_pwm0", DIV_LCD0, 8, 4),
+ DIV(0, "div_mdnie_pwm_pre0", "div_mdnie_pwm0", DIV_LCD0, 12, 4),
+ DIV(0, "div_mipihsi", "mout_mipihsi", DIV_FSYS0, 20, 4),
+ DIV(0, "div_jpeg", "mout_jpeg", E4X12_DIV_CAM1, 0, 4),
+ DIV(CLK_DIV_ACLK200, "div_aclk200", "mout_aclk200", DIV_TOP, 0, 3),
+ DIV(0, "div_aclk266_gps", "mout_aclk266_gps", DIV_TOP, 20, 3),
+ DIV(CLK_DIV_ACLK400_MCUISP, "div_aclk400_mcuisp", "mout_aclk400_mcuisp",
DIV_TOP, 24, 3),
- DIV(none, "div_pwm_isp", "mout_pwm_isp", E4X12_DIV_ISP, 0, 4),
- DIV(none, "div_spi0_isp", "mout_spi0_isp", E4X12_DIV_ISP, 4, 4),
- DIV(none, "div_spi0_isp_pre", "div_spi0_isp", E4X12_DIV_ISP, 8, 8),
- DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4),
- DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8),
- DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4),
- DIV_F(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3,
+ DIV(0, "div_pwm_isp", "mout_pwm_isp", E4X12_DIV_ISP, 0, 4),
+ DIV(0, "div_spi0_isp", "mout_spi0_isp", E4X12_DIV_ISP, 4, 4),
+ DIV(0, "div_spi0_isp_pre", "div_spi0_isp", E4X12_DIV_ISP, 8, 8),
+ DIV(0, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4),
+ DIV(0, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8),
+ DIV(0, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4),
+ DIV_F(CLK_DIV_ISP0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3,
CLK_GET_RATE_NOCACHE, 0),
- DIV_F(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3,
+ DIV_F(CLK_DIV_ISP1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3,
CLK_GET_RATE_NOCACHE, 0),
- DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3),
- DIV_F(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1,
+ DIV(0, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3),
+ DIV_F(CLK_DIV_MCUISP0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1,
4, 3, CLK_GET_RATE_NOCACHE, 0),
- DIV_F(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
+ DIV_F(CLK_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
8, 3, CLK_GET_RATE_NOCACHE, 0),
- DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
+ DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
};
/* list of gate clocks supported in all exynos4 soc's */
@@ -605,333 +545,341 @@ static struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
* the device name and clock alias names specified below for some
* of the clocks can be removed.
*/
- GATE(sclk_hdmi, "sclk_hdmi", "mout_hdmi", SRC_MASK_TV, 0, 0, 0),
- GATE(sclk_spdif, "sclk_spdif", "mout_spdif", SRC_MASK_PERIL1, 8, 0, 0),
- GATE(jpeg, "jpeg", "aclk160", GATE_IP_CAM, 6, 0, 0),
- GATE(mie0, "mie0", "aclk160", GATE_IP_LCD0, 1, 0, 0),
- GATE(dsim0, "dsim0", "aclk160", GATE_IP_LCD0, 3, 0, 0),
- GATE(fimd1, "fimd1", "aclk160", E4210_GATE_IP_LCD1, 0, 0, 0),
- GATE(mie1, "mie1", "aclk160", E4210_GATE_IP_LCD1, 1, 0, 0),
- GATE(dsim1, "dsim1", "aclk160", E4210_GATE_IP_LCD1, 3, 0, 0),
- GATE(smmu_fimd1, "smmu_fimd1", "aclk160", E4210_GATE_IP_LCD1, 4, 0, 0),
- GATE(tsi, "tsi", "aclk133", GATE_IP_FSYS, 4, 0, 0),
- GATE(sromc, "sromc", "aclk133", GATE_IP_FSYS, 11, 0, 0),
- GATE(sclk_g3d, "sclk_g3d", "div_g3d", GATE_IP_G3D, 0,
+ GATE(CLK_SCLK_HDMI, "sclk_hdmi", "mout_hdmi", SRC_MASK_TV, 0, 0, 0),
+ GATE(CLK_SCLK_SPDIF, "sclk_spdif", "mout_spdif", SRC_MASK_PERIL1, 8, 0,
+ 0),
+ GATE(CLK_JPEG, "jpeg", "aclk160", GATE_IP_CAM, 6, 0, 0),
+ GATE(CLK_MIE0, "mie0", "aclk160", GATE_IP_LCD0, 1, 0, 0),
+ GATE(CLK_DSIM0, "dsim0", "aclk160", GATE_IP_LCD0, 3, 0, 0),
+ GATE(CLK_FIMD1, "fimd1", "aclk160", E4210_GATE_IP_LCD1, 0, 0, 0),
+ GATE(CLK_MIE1, "mie1", "aclk160", E4210_GATE_IP_LCD1, 1, 0, 0),
+ GATE(CLK_DSIM1, "dsim1", "aclk160", E4210_GATE_IP_LCD1, 3, 0, 0),
+ GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "aclk160", E4210_GATE_IP_LCD1, 4, 0,
+ 0),
+ GATE(CLK_TSI, "tsi", "aclk133", GATE_IP_FSYS, 4, 0, 0),
+ GATE(CLK_SROMC, "sromc", "aclk133", GATE_IP_FSYS, 11, 0, 0),
+ GATE(CLK_SCLK_G3D, "sclk_g3d", "div_g3d", GATE_IP_G3D, 0,
CLK_SET_RATE_PARENT, 0),
- GATE(usb_device, "usb_device", "aclk133", GATE_IP_FSYS, 13, 0, 0),
- GATE(onenand, "onenand", "aclk133", GATE_IP_FSYS, 15, 0, 0),
- GATE(nfcon, "nfcon", "aclk133", GATE_IP_FSYS, 16, 0, 0),
- GATE(gps, "gps", "aclk133", GATE_IP_GPS, 0, 0, 0),
- GATE(smmu_gps, "smmu_gps", "aclk133", GATE_IP_GPS, 1, 0, 0),
- GATE(slimbus, "slimbus", "aclk100", GATE_IP_PERIL, 25, 0, 0),
- GATE(sclk_cam0, "sclk_cam0", "div_cam0", GATE_SCLK_CAM, 4,
+ GATE(CLK_USB_DEVICE, "usb_device", "aclk133", GATE_IP_FSYS, 13, 0, 0),
+ GATE(CLK_ONENAND, "onenand", "aclk133", GATE_IP_FSYS, 15, 0, 0),
+ GATE(CLK_NFCON, "nfcon", "aclk133", GATE_IP_FSYS, 16, 0, 0),
+ GATE(CLK_GPS, "gps", "aclk133", GATE_IP_GPS, 0, 0, 0),
+ GATE(CLK_SMMU_GPS, "smmu_gps", "aclk133", GATE_IP_GPS, 1, 0, 0),
+ GATE(CLK_SLIMBUS, "slimbus", "aclk100", GATE_IP_PERIL, 25, 0, 0),
+ GATE(CLK_SCLK_CAM0, "sclk_cam0", "div_cam0", GATE_SCLK_CAM, 4,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_cam1, "sclk_cam1", "div_cam1", GATE_SCLK_CAM, 5,
+ GATE(CLK_SCLK_CAM1, "sclk_cam1", "div_cam1", GATE_SCLK_CAM, 5,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mipi0, "sclk_mipi0", "div_mipi_pre0",
+ GATE(CLK_SCLK_MIPI0, "sclk_mipi0", "div_mipi_pre0",
SRC_MASK_LCD0, 12, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_audio0, "sclk_audio0", "div_audio0", SRC_MASK_MAUDIO, 0,
+ GATE(CLK_SCLK_AUDIO0, "sclk_audio0", "div_audio0", SRC_MASK_MAUDIO, 0,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_audio1, "sclk_audio1", "div_audio1", SRC_MASK_PERIL1, 0,
+ GATE(CLK_SCLK_AUDIO1, "sclk_audio1", "div_audio1", SRC_MASK_PERIL1, 0,
CLK_SET_RATE_PARENT, 0),
- GATE(vp, "vp", "aclk160", GATE_IP_TV, 0, 0, 0),
- GATE(mixer, "mixer", "aclk160", GATE_IP_TV, 1, 0, 0),
- GATE(hdmi, "hdmi", "aclk160", GATE_IP_TV, 3, 0, 0),
- GATE(pwm, "pwm", "aclk100", GATE_IP_PERIL, 24, 0, 0),
- GATE(sdmmc4, "sdmmc4", "aclk133", GATE_IP_FSYS, 9, 0, 0),
- GATE(usb_host, "usb_host", "aclk133", GATE_IP_FSYS, 12, 0, 0),
- GATE(sclk_fimc0, "sclk_fimc0", "div_fimc0", SRC_MASK_CAM, 0,
+ GATE(CLK_VP, "vp", "aclk160", GATE_IP_TV, 0, 0, 0),
+ GATE(CLK_MIXER, "mixer", "aclk160", GATE_IP_TV, 1, 0, 0),
+ GATE(CLK_HDMI, "hdmi", "aclk160", GATE_IP_TV, 3, 0, 0),
+ GATE(CLK_PWM, "pwm", "aclk100", GATE_IP_PERIL, 24, 0, 0),
+ GATE(CLK_SDMMC4, "sdmmc4", "aclk133", GATE_IP_FSYS, 9, 0, 0),
+ GATE(CLK_USB_HOST, "usb_host", "aclk133", GATE_IP_FSYS, 12, 0, 0),
+ GATE(CLK_SCLK_FIMC0, "sclk_fimc0", "div_fimc0", SRC_MASK_CAM, 0,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_fimc1, "sclk_fimc1", "div_fimc1", SRC_MASK_CAM, 4,
+ GATE(CLK_SCLK_FIMC1, "sclk_fimc1", "div_fimc1", SRC_MASK_CAM, 4,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_fimc2, "sclk_fimc2", "div_fimc2", SRC_MASK_CAM, 8,
+ GATE(CLK_SCLK_FIMC2, "sclk_fimc2", "div_fimc2", SRC_MASK_CAM, 8,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_fimc3, "sclk_fimc3", "div_fimc3", SRC_MASK_CAM, 12,
+ GATE(CLK_SCLK_FIMC3, "sclk_fimc3", "div_fimc3", SRC_MASK_CAM, 12,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_csis0, "sclk_csis0", "div_csis0", SRC_MASK_CAM, 24,
+ GATE(CLK_SCLK_CSIS0, "sclk_csis0", "div_csis0", SRC_MASK_CAM, 24,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_csis1, "sclk_csis1", "div_csis1", SRC_MASK_CAM, 28,
+ GATE(CLK_SCLK_CSIS1, "sclk_csis1", "div_csis1", SRC_MASK_CAM, 28,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_fimd0, "sclk_fimd0", "div_fimd0", SRC_MASK_LCD0, 0,
+ GATE(CLK_SCLK_FIMD0, "sclk_fimd0", "div_fimd0", SRC_MASK_LCD0, 0,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc0, "sclk_mmc0", "div_mmc_pre0", SRC_MASK_FSYS, 0,
+ GATE(CLK_SCLK_MMC0, "sclk_mmc0", "div_mmc_pre0", SRC_MASK_FSYS, 0,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc1, "sclk_mmc1", "div_mmc_pre1", SRC_MASK_FSYS, 4,
+ GATE(CLK_SCLK_MMC1, "sclk_mmc1", "div_mmc_pre1", SRC_MASK_FSYS, 4,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc2, "sclk_mmc2", "div_mmc_pre2", SRC_MASK_FSYS, 8,
+ GATE(CLK_SCLK_MMC2, "sclk_mmc2", "div_mmc_pre2", SRC_MASK_FSYS, 8,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc3, "sclk_mmc3", "div_mmc_pre3", SRC_MASK_FSYS, 12,
+ GATE(CLK_SCLK_MMC3, "sclk_mmc3", "div_mmc_pre3", SRC_MASK_FSYS, 12,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc4, "sclk_mmc4", "div_mmc_pre4", SRC_MASK_FSYS, 16,
+ GATE(CLK_SCLK_MMC4, "sclk_mmc4", "div_mmc_pre4", SRC_MASK_FSYS, 16,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart0, "uclk0", "div_uart0", SRC_MASK_PERIL0, 0,
+ GATE(CLK_SCLK_UART0, "uclk0", "div_uart0", SRC_MASK_PERIL0, 0,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart1, "uclk1", "div_uart1", SRC_MASK_PERIL0, 4,
+ GATE(CLK_SCLK_UART1, "uclk1", "div_uart1", SRC_MASK_PERIL0, 4,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart2, "uclk2", "div_uart2", SRC_MASK_PERIL0, 8,
+ GATE(CLK_SCLK_UART2, "uclk2", "div_uart2", SRC_MASK_PERIL0, 8,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart3, "uclk3", "div_uart3", SRC_MASK_PERIL0, 12,
+ GATE(CLK_SCLK_UART3, "uclk3", "div_uart3", SRC_MASK_PERIL0, 12,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart4, "uclk4", "div_uart4", SRC_MASK_PERIL0, 16,
+ GATE(CLK_SCLK_UART4, "uclk4", "div_uart4", SRC_MASK_PERIL0, 16,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_audio2, "sclk_audio2", "div_audio2", SRC_MASK_PERIL1, 4,
+ GATE(CLK_SCLK_AUDIO2, "sclk_audio2", "div_audio2", SRC_MASK_PERIL1, 4,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi0, "sclk_spi0", "div_spi_pre0", SRC_MASK_PERIL1, 16,
+ GATE(CLK_SCLK_SPI0, "sclk_spi0", "div_spi_pre0", SRC_MASK_PERIL1, 16,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi1, "sclk_spi1", "div_spi_pre1", SRC_MASK_PERIL1, 20,
+ GATE(CLK_SCLK_SPI1, "sclk_spi1", "div_spi_pre1", SRC_MASK_PERIL1, 20,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi2, "sclk_spi2", "div_spi_pre2", SRC_MASK_PERIL1, 24,
+ GATE(CLK_SCLK_SPI2, "sclk_spi2", "div_spi_pre2", SRC_MASK_PERIL1, 24,
CLK_SET_RATE_PARENT, 0),
- GATE(fimc0, "fimc0", "aclk160", GATE_IP_CAM, 0,
+ GATE(CLK_FIMC0, "fimc0", "aclk160", GATE_IP_CAM, 0,
0, 0),
- GATE(fimc1, "fimc1", "aclk160", GATE_IP_CAM, 1,
+ GATE(CLK_FIMC1, "fimc1", "aclk160", GATE_IP_CAM, 1,
0, 0),
- GATE(fimc2, "fimc2", "aclk160", GATE_IP_CAM, 2,
+ GATE(CLK_FIMC2, "fimc2", "aclk160", GATE_IP_CAM, 2,
0, 0),
- GATE(fimc3, "fimc3", "aclk160", GATE_IP_CAM, 3,
+ GATE(CLK_FIMC3, "fimc3", "aclk160", GATE_IP_CAM, 3,
0, 0),
- GATE(csis0, "csis0", "aclk160", GATE_IP_CAM, 4,
+ GATE(CLK_CSIS0, "csis0", "aclk160", GATE_IP_CAM, 4,
0, 0),
- GATE(csis1, "csis1", "aclk160", GATE_IP_CAM, 5,
+ GATE(CLK_CSIS1, "csis1", "aclk160", GATE_IP_CAM, 5,
0, 0),
- GATE(smmu_fimc0, "smmu_fimc0", "aclk160", GATE_IP_CAM, 7,
+ GATE(CLK_SMMU_FIMC0, "smmu_fimc0", "aclk160", GATE_IP_CAM, 7,
0, 0),
- GATE(smmu_fimc1, "smmu_fimc1", "aclk160", GATE_IP_CAM, 8,
+ GATE(CLK_SMMU_FIMC1, "smmu_fimc1", "aclk160", GATE_IP_CAM, 8,
0, 0),
- GATE(smmu_fimc2, "smmu_fimc2", "aclk160", GATE_IP_CAM, 9,
+ GATE(CLK_SMMU_FIMC2, "smmu_fimc2", "aclk160", GATE_IP_CAM, 9,
0, 0),
- GATE(smmu_fimc3, "smmu_fimc3", "aclk160", GATE_IP_CAM, 10,
+ GATE(CLK_SMMU_FIMC3, "smmu_fimc3", "aclk160", GATE_IP_CAM, 10,
0, 0),
- GATE(smmu_jpeg, "smmu_jpeg", "aclk160", GATE_IP_CAM, 11,
+ GATE(CLK_SMMU_JPEG, "smmu_jpeg", "aclk160", GATE_IP_CAM, 11,
0, 0),
- GATE(pixelasyncm0, "pxl_async0", "aclk160", GATE_IP_CAM, 17, 0, 0),
- GATE(pixelasyncm1, "pxl_async1", "aclk160", GATE_IP_CAM, 18, 0, 0),
- GATE(smmu_tv, "smmu_tv", "aclk160", GATE_IP_TV, 4,
+ GATE(CLK_PIXELASYNCM0, "pxl_async0", "aclk160", GATE_IP_CAM, 17, 0, 0),
+ GATE(CLK_PIXELASYNCM1, "pxl_async1", "aclk160", GATE_IP_CAM, 18, 0, 0),
+ GATE(CLK_SMMU_TV, "smmu_tv", "aclk160", GATE_IP_TV, 4,
0, 0),
- GATE(mfc, "mfc", "aclk100", GATE_IP_MFC, 0, 0, 0),
- GATE(smmu_mfcl, "smmu_mfcl", "aclk100", GATE_IP_MFC, 1,
+ GATE(CLK_MFC, "mfc", "aclk100", GATE_IP_MFC, 0, 0, 0),
+ GATE(CLK_SMMU_MFCL, "smmu_mfcl", "aclk100", GATE_IP_MFC, 1,
0, 0),
- GATE(smmu_mfcr, "smmu_mfcr", "aclk100", GATE_IP_MFC, 2,
+ GATE(CLK_SMMU_MFCR, "smmu_mfcr", "aclk100", GATE_IP_MFC, 2,
0, 0),
- GATE(fimd0, "fimd0", "aclk160", GATE_IP_LCD0, 0,
+ GATE(CLK_FIMD0, "fimd0", "aclk160", GATE_IP_LCD0, 0,
0, 0),
- GATE(smmu_fimd0, "smmu_fimd0", "aclk160", GATE_IP_LCD0, 4,
+ GATE(CLK_SMMU_FIMD0, "smmu_fimd0", "aclk160", GATE_IP_LCD0, 4,
0, 0),
- GATE(pdma0, "pdma0", "aclk133", GATE_IP_FSYS, 0,
+ GATE(CLK_PDMA0, "pdma0", "aclk133", GATE_IP_FSYS, 0,
0, 0),
- GATE(pdma1, "pdma1", "aclk133", GATE_IP_FSYS, 1,
+ GATE(CLK_PDMA1, "pdma1", "aclk133", GATE_IP_FSYS, 1,
0, 0),
- GATE(sdmmc0, "sdmmc0", "aclk133", GATE_IP_FSYS, 5,
+ GATE(CLK_SDMMC0, "sdmmc0", "aclk133", GATE_IP_FSYS, 5,
0, 0),
- GATE(sdmmc1, "sdmmc1", "aclk133", GATE_IP_FSYS, 6,
+ GATE(CLK_SDMMC1, "sdmmc1", "aclk133", GATE_IP_FSYS, 6,
0, 0),
- GATE(sdmmc2, "sdmmc2", "aclk133", GATE_IP_FSYS, 7,
+ GATE(CLK_SDMMC2, "sdmmc2", "aclk133", GATE_IP_FSYS, 7,
0, 0),
- GATE(sdmmc3, "sdmmc3", "aclk133", GATE_IP_FSYS, 8,
+ GATE(CLK_SDMMC3, "sdmmc3", "aclk133", GATE_IP_FSYS, 8,
0, 0),
- GATE(uart0, "uart0", "aclk100", GATE_IP_PERIL, 0,
+ GATE(CLK_UART0, "uart0", "aclk100", GATE_IP_PERIL, 0,
0, 0),
- GATE(uart1, "uart1", "aclk100", GATE_IP_PERIL, 1,
+ GATE(CLK_UART1, "uart1", "aclk100", GATE_IP_PERIL, 1,
0, 0),
- GATE(uart2, "uart2", "aclk100", GATE_IP_PERIL, 2,
+ GATE(CLK_UART2, "uart2", "aclk100", GATE_IP_PERIL, 2,
0, 0),
- GATE(uart3, "uart3", "aclk100", GATE_IP_PERIL, 3,
+ GATE(CLK_UART3, "uart3", "aclk100", GATE_IP_PERIL, 3,
0, 0),
- GATE(uart4, "uart4", "aclk100", GATE_IP_PERIL, 4,
+ GATE(CLK_UART4, "uart4", "aclk100", GATE_IP_PERIL, 4,
0, 0),
- GATE(i2c0, "i2c0", "aclk100", GATE_IP_PERIL, 6,
+ GATE(CLK_I2C0, "i2c0", "aclk100", GATE_IP_PERIL, 6,
0, 0),
- GATE(i2c1, "i2c1", "aclk100", GATE_IP_PERIL, 7,
+ GATE(CLK_I2C1, "i2c1", "aclk100", GATE_IP_PERIL, 7,
0, 0),
- GATE(i2c2, "i2c2", "aclk100", GATE_IP_PERIL, 8,
+ GATE(CLK_I2C2, "i2c2", "aclk100", GATE_IP_PERIL, 8,
0, 0),
- GATE(i2c3, "i2c3", "aclk100", GATE_IP_PERIL, 9,
+ GATE(CLK_I2C3, "i2c3", "aclk100", GATE_IP_PERIL, 9,
0, 0),
- GATE(i2c4, "i2c4", "aclk100", GATE_IP_PERIL, 10,
+ GATE(CLK_I2C4, "i2c4", "aclk100", GATE_IP_PERIL, 10,
0, 0),
- GATE(i2c5, "i2c5", "aclk100", GATE_IP_PERIL, 11,
+ GATE(CLK_I2C5, "i2c5", "aclk100", GATE_IP_PERIL, 11,
0, 0),
- GATE(i2c6, "i2c6", "aclk100", GATE_IP_PERIL, 12,
+ GATE(CLK_I2C6, "i2c6", "aclk100", GATE_IP_PERIL, 12,
0, 0),
- GATE(i2c7, "i2c7", "aclk100", GATE_IP_PERIL, 13,
+ GATE(CLK_I2C7, "i2c7", "aclk100", GATE_IP_PERIL, 13,
0, 0),
- GATE(i2c_hdmi, "i2c-hdmi", "aclk100", GATE_IP_PERIL, 14,
+ GATE(CLK_I2C_HDMI, "i2c-hdmi", "aclk100", GATE_IP_PERIL, 14,
0, 0),
- GATE(spi0, "spi0", "aclk100", GATE_IP_PERIL, 16,
+ GATE(CLK_SPI0, "spi0", "aclk100", GATE_IP_PERIL, 16,
0, 0),
- GATE(spi1, "spi1", "aclk100", GATE_IP_PERIL, 17,
+ GATE(CLK_SPI1, "spi1", "aclk100", GATE_IP_PERIL, 17,
0, 0),
- GATE(spi2, "spi2", "aclk100", GATE_IP_PERIL, 18,
+ GATE(CLK_SPI2, "spi2", "aclk100", GATE_IP_PERIL, 18,
0, 0),
- GATE(i2s1, "i2s1", "aclk100", GATE_IP_PERIL, 20,
+ GATE(CLK_I2S1, "i2s1", "aclk100", GATE_IP_PERIL, 20,
0, 0),
- GATE(i2s2, "i2s2", "aclk100", GATE_IP_PERIL, 21,
+ GATE(CLK_I2S2, "i2s2", "aclk100", GATE_IP_PERIL, 21,
0, 0),
- GATE(pcm1, "pcm1", "aclk100", GATE_IP_PERIL, 22,
+ GATE(CLK_PCM1, "pcm1", "aclk100", GATE_IP_PERIL, 22,
0, 0),
- GATE(pcm2, "pcm2", "aclk100", GATE_IP_PERIL, 23,
+ GATE(CLK_PCM2, "pcm2", "aclk100", GATE_IP_PERIL, 23,
0, 0),
- GATE(spdif, "spdif", "aclk100", GATE_IP_PERIL, 26,
+ GATE(CLK_SPDIF, "spdif", "aclk100", GATE_IP_PERIL, 26,
0, 0),
- GATE(ac97, "ac97", "aclk100", GATE_IP_PERIL, 27,
+ GATE(CLK_AC97, "ac97", "aclk100", GATE_IP_PERIL, 27,
0, 0),
};
/* list of gate clocks supported in exynos4210 soc */
static struct samsung_gate_clock exynos4210_gate_clks[] __initdata = {
- GATE(tvenc, "tvenc", "aclk160", GATE_IP_TV, 2, 0, 0),
- GATE(g2d, "g2d", "aclk200", E4210_GATE_IP_IMAGE, 0, 0, 0),
- GATE(rotator, "rotator", "aclk200", E4210_GATE_IP_IMAGE, 1, 0, 0),
- GATE(mdma, "mdma", "aclk200", E4210_GATE_IP_IMAGE, 2, 0, 0),
- GATE(smmu_g2d, "smmu_g2d", "aclk200", E4210_GATE_IP_IMAGE, 3, 0, 0),
- GATE(smmu_mdma, "smmu_mdma", "aclk200", E4210_GATE_IP_IMAGE, 5, 0, 0),
- GATE(pcie_phy, "pcie_phy", "aclk133", GATE_IP_FSYS, 2, 0, 0),
- GATE(sata_phy, "sata_phy", "aclk133", GATE_IP_FSYS, 3, 0, 0),
- GATE(sata, "sata", "aclk133", GATE_IP_FSYS, 10, 0, 0),
- GATE(pcie, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0),
- GATE(smmu_pcie, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0),
- GATE(modemif, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0),
- GATE(chipid, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0),
- GATE(sysreg, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0,
+ GATE(CLK_TVENC, "tvenc", "aclk160", GATE_IP_TV, 2, 0, 0),
+ GATE(CLK_G2D, "g2d", "aclk200", E4210_GATE_IP_IMAGE, 0, 0, 0),
+ GATE(CLK_ROTATOR, "rotator", "aclk200", E4210_GATE_IP_IMAGE, 1, 0, 0),
+ GATE(CLK_MDMA, "mdma", "aclk200", E4210_GATE_IP_IMAGE, 2, 0, 0),
+ GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk200", E4210_GATE_IP_IMAGE, 3, 0, 0),
+ GATE(CLK_SMMU_MDMA, "smmu_mdma", "aclk200", E4210_GATE_IP_IMAGE, 5, 0,
+ 0),
+ GATE(CLK_PCIE_PHY, "pcie_phy", "aclk133", GATE_IP_FSYS, 2, 0, 0),
+ GATE(CLK_SATA_PHY, "sata_phy", "aclk133", GATE_IP_FSYS, 3, 0, 0),
+ GATE(CLK_SATA, "sata", "aclk133", GATE_IP_FSYS, 10, 0, 0),
+ GATE(CLK_PCIE, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0),
+ GATE(CLK_SMMU_PCIE, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0),
+ GATE(CLK_MODEMIF, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0),
+ GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0),
+ GATE(CLK_SYSREG, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0,
CLK_IGNORE_UNUSED, 0),
- GATE(hdmi_cec, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0, 0),
- GATE(smmu_rotator, "smmu_rotator", "aclk200",
+ GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0,
+ 0),
+ GATE(CLK_SMMU_ROTATOR, "smmu_rotator", "aclk200",
E4210_GATE_IP_IMAGE, 4, 0, 0),
- GATE(sclk_mipi1, "sclk_mipi1", "div_mipi_pre1",
+ GATE(CLK_SCLK_MIPI1, "sclk_mipi1", "div_mipi_pre1",
E4210_SRC_MASK_LCD1, 12, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_sata, "sclk_sata", "div_sata",
+ GATE(CLK_SCLK_SATA, "sclk_sata", "div_sata",
SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mixer, "sclk_mixer", "mout_mixer", SRC_MASK_TV, 4, 0, 0),
- GATE(sclk_dac, "sclk_dac", "mout_dac", SRC_MASK_TV, 8, 0, 0),
- GATE(tsadc, "tsadc", "aclk100", GATE_IP_PERIL, 15,
+ GATE(CLK_SCLK_MIXER, "sclk_mixer", "mout_mixer", SRC_MASK_TV, 4, 0, 0),
+ GATE(CLK_SCLK_DAC, "sclk_dac", "mout_dac", SRC_MASK_TV, 8, 0, 0),
+ GATE(CLK_TSADC, "tsadc", "aclk100", GATE_IP_PERIL, 15,
0, 0),
- GATE(mct, "mct", "aclk100", E4210_GATE_IP_PERIR, 13,
+ GATE(CLK_MCT, "mct", "aclk100", E4210_GATE_IP_PERIR, 13,
0, 0),
- GATE(wdt, "watchdog", "aclk100", E4210_GATE_IP_PERIR, 14,
+ GATE(CLK_WDT, "watchdog", "aclk100", E4210_GATE_IP_PERIR, 14,
0, 0),
- GATE(rtc, "rtc", "aclk100", E4210_GATE_IP_PERIR, 15,
+ GATE(CLK_RTC, "rtc", "aclk100", E4210_GATE_IP_PERIR, 15,
0, 0),
- GATE(keyif, "keyif", "aclk100", E4210_GATE_IP_PERIR, 16,
+ GATE(CLK_KEYIF, "keyif", "aclk100", E4210_GATE_IP_PERIR, 16,
0, 0),
- GATE(sclk_fimd1, "sclk_fimd1", "div_fimd1", E4210_SRC_MASK_LCD1, 0,
+ GATE(CLK_SCLK_FIMD1, "sclk_fimd1", "div_fimd1", E4210_SRC_MASK_LCD1, 0,
CLK_SET_RATE_PARENT, 0),
- GATE(tmu_apbif, "tmu_apbif", "aclk100", E4210_GATE_IP_PERIR, 17, 0, 0),
+ GATE(CLK_TMU_APBIF, "tmu_apbif", "aclk100", E4210_GATE_IP_PERIR, 17, 0,
+ 0),
};
/* list of gate clocks supported in exynos4x12 soc */
static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
- GATE(audss, "audss", "sclk_epll", E4X12_GATE_IP_MAUDIO, 0, 0, 0),
- GATE(mdnie0, "mdnie0", "aclk160", GATE_IP_LCD0, 2, 0, 0),
- GATE(rotator, "rotator", "aclk200", E4X12_GATE_IP_IMAGE, 1, 0, 0),
- GATE(mdma2, "mdma2", "aclk200", E4X12_GATE_IP_IMAGE, 2, 0, 0),
- GATE(smmu_mdma, "smmu_mdma", "aclk200", E4X12_GATE_IP_IMAGE, 5, 0, 0),
- GATE(mipi_hsi, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
- GATE(chipid, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
- GATE(sysreg, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
+ GATE(CLK_AUDSS, "audss", "sclk_epll", E4X12_GATE_IP_MAUDIO, 0, 0, 0),
+ GATE(CLK_MDNIE0, "mdnie0", "aclk160", GATE_IP_LCD0, 2, 0, 0),
+ GATE(CLK_ROTATOR, "rotator", "aclk200", E4X12_GATE_IP_IMAGE, 1, 0, 0),
+ GATE(CLK_MDMA2, "mdma2", "aclk200", E4X12_GATE_IP_IMAGE, 2, 0, 0),
+ GATE(CLK_SMMU_MDMA, "smmu_mdma", "aclk200", E4X12_GATE_IP_IMAGE, 5, 0,
+ 0),
+ GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
+ GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
+ GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
CLK_IGNORE_UNUSED, 0),
- GATE(hdmi_cec, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0, 0),
- GATE(sclk_mdnie0, "sclk_mdnie0", "div_mdnie0",
+ GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0,
+ 0),
+ GATE(CLK_SCLK_MDNIE0, "sclk_mdnie0", "div_mdnie0",
SRC_MASK_LCD0, 4, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mdnie_pwm0, "sclk_mdnie_pwm0", "div_mdnie_pwm_pre0",
+ GATE(CLK_SCLK_MDNIE_PWM0, "sclk_mdnie_pwm0", "div_mdnie_pwm_pre0",
SRC_MASK_LCD0, 8, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mipihsi, "sclk_mipihsi", "div_mipihsi",
+ GATE(CLK_SCLK_MIPIHSI, "sclk_mipihsi", "div_mipihsi",
SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
- GATE(smmu_rotator, "smmu_rotator", "aclk200",
+ GATE(CLK_SMMU_ROTATOR, "smmu_rotator", "aclk200",
E4X12_GATE_IP_IMAGE, 4, 0, 0),
- GATE(mct, "mct", "aclk100", E4X12_GATE_IP_PERIR, 13,
+ GATE(CLK_MCT, "mct", "aclk100", E4X12_GATE_IP_PERIR, 13,
0, 0),
- GATE(rtc, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15,
+ GATE(CLK_RTC, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15,
0, 0),
- GATE(keyif, "keyif", "aclk100", E4X12_GATE_IP_PERIR, 16, 0, 0),
- GATE(sclk_pwm_isp, "sclk_pwm_isp", "div_pwm_isp",
+ GATE(CLK_KEYIF, "keyif", "aclk100", E4X12_GATE_IP_PERIR, 16, 0, 0),
+ GATE(CLK_SCLK_PWM_ISP, "sclk_pwm_isp", "div_pwm_isp",
E4X12_SRC_MASK_ISP, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi0_isp, "sclk_spi0_isp", "div_spi0_isp_pre",
+ GATE(CLK_SCLK_SPI0_ISP, "sclk_spi0_isp", "div_spi0_isp_pre",
E4X12_SRC_MASK_ISP, 4, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi1_isp, "sclk_spi1_isp", "div_spi1_isp_pre",
+ GATE(CLK_SCLK_SPI1_ISP, "sclk_spi1_isp", "div_spi1_isp_pre",
E4X12_SRC_MASK_ISP, 8, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart_isp, "sclk_uart_isp", "div_uart_isp",
+ GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "div_uart_isp",
E4X12_SRC_MASK_ISP, 12, CLK_SET_RATE_PARENT, 0),
- GATE(pwm_isp_sclk, "pwm_isp_sclk", "sclk_pwm_isp",
+ GATE(CLK_PWM_ISP_SCLK, "pwm_isp_sclk", "sclk_pwm_isp",
E4X12_GATE_IP_ISP, 0, 0, 0),
- GATE(spi0_isp_sclk, "spi0_isp_sclk", "sclk_spi0_isp",
+ GATE(CLK_SPI0_ISP_SCLK, "spi0_isp_sclk", "sclk_spi0_isp",
E4X12_GATE_IP_ISP, 1, 0, 0),
- GATE(spi1_isp_sclk, "spi1_isp_sclk", "sclk_spi1_isp",
+ GATE(CLK_SPI1_ISP_SCLK, "spi1_isp_sclk", "sclk_spi1_isp",
E4X12_GATE_IP_ISP, 2, 0, 0),
- GATE(uart_isp_sclk, "uart_isp_sclk", "sclk_uart_isp",
+ GATE(CLK_UART_ISP_SCLK, "uart_isp_sclk", "sclk_uart_isp",
E4X12_GATE_IP_ISP, 3, 0, 0),
- GATE(wdt, "watchdog", "aclk100", E4X12_GATE_IP_PERIR, 14, 0, 0),
- GATE(pcm0, "pcm0", "aclk100", E4X12_GATE_IP_MAUDIO, 2,
+ GATE(CLK_WDT, "watchdog", "aclk100", E4X12_GATE_IP_PERIR, 14, 0, 0),
+ GATE(CLK_PCM0, "pcm0", "aclk100", E4X12_GATE_IP_MAUDIO, 2,
0, 0),
- GATE(i2s0, "i2s0", "aclk100", E4X12_GATE_IP_MAUDIO, 3,
+ GATE(CLK_I2S0, "i2s0", "aclk100", E4X12_GATE_IP_MAUDIO, 3,
0, 0),
- GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0,
+ GATE(CLK_FIMC_ISP, "isp", "aclk200", E4X12_GATE_ISP0, 0,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1,
+ GATE(CLK_FIMC_DRC, "drc", "aclk200", E4X12_GATE_ISP0, 1,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2,
+ GATE(CLK_FIMC_FD, "fd", "aclk200", E4X12_GATE_ISP0, 2,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3,
+ GATE(CLK_FIMC_LITE0, "lite0", "aclk200", E4X12_GATE_ISP0, 3,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4,
+ GATE(CLK_FIMC_LITE1, "lite1", "aclk200", E4X12_GATE_ISP0, 4,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5,
+ GATE(CLK_MCUISP, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7,
+ GATE(CLK_GICISP, "gicisp", "aclk200", E4X12_GATE_ISP0, 7,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8,
+ GATE(CLK_SMMU_ISP, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9,
+ GATE(CLK_SMMU_DRC, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10,
+ GATE(CLK_SMMU_FD, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11,
+ GATE(CLK_SMMU_LITE0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12,
+ GATE(CLK_SMMU_LITE1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20,
+ GATE(CLK_PPMUISPMX, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21,
+ GATE(CLK_PPMUISPX, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23,
+ GATE(CLK_MCUCTL_ISP, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24,
+ GATE(CLK_MPWM_ISP, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25,
+ GATE(CLK_I2C0_ISP, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26,
+ GATE(CLK_I2C1_ISP, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27,
+ GATE(CLK_MTCADC_ISP, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28,
+ GATE(CLK_PWM_ISP, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30,
+ GATE(CLK_WDT_ISP, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31,
+ GATE(CLK_UART_ISP, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0,
+ GATE(CLK_ASYNCAXIM, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4,
+ GATE(CLK_SMMU_ISPCX, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12,
+ GATE(CLK_SPI0_ISP, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
+ GATE(CLK_SPI1_ISP, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
- GATE(tmu_apbif, "tmu_apbif", "aclk100", E4X12_GATE_IP_PERIR, 17, 0, 0),
+ GATE(CLK_G2D, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
+ GATE(CLK_TMU_APBIF, "tmu_apbif", "aclk100", E4X12_GATE_IP_PERIR, 17, 0,
+ 0),
};
static struct samsung_clock_alias exynos4_aliases[] __initdata = {
- ALIAS(mout_core, NULL, "moutcore"),
- ALIAS(arm_clk, NULL, "armclk"),
- ALIAS(sclk_apll, NULL, "mout_apll"),
+ ALIAS(CLK_MOUT_CORE, NULL, "moutcore"),
+ ALIAS(CLK_ARM_CLK, NULL, "armclk"),
+ ALIAS(CLK_SCLK_APLL, NULL, "mout_apll"),
};
static struct samsung_clock_alias exynos4210_aliases[] __initdata = {
- ALIAS(sclk_mpll, NULL, "mout_mpll"),
+ ALIAS(CLK_SCLK_MPLL, NULL, "mout_mpll"),
};
static struct samsung_clock_alias exynos4x12_aliases[] __initdata = {
- ALIAS(mout_mpll_user_c, NULL, "mout_mpll"),
+ ALIAS(CLK_MOUT_MPLL_USER_C, NULL, "mout_mpll"),
};
/*
@@ -977,7 +925,7 @@ static void __init exynos4_clk_register_finpll(unsigned long xom)
finpll_f = clk_get_rate(clk);
}
- fclk.id = fin_pll;
+ fclk.id = CLK_FIN_PLL;
fclk.name = "fin_pll";
fclk.parent_name = NULL;
fclk.flags = CLK_IS_ROOT;
@@ -1067,24 +1015,24 @@ static struct samsung_pll_rate_table exynos4x12_vpll_rates[] __initdata = {
};
static struct samsung_pll_clock exynos4210_plls[nr_plls] __initdata = {
- [apll] = PLL_A(pll_4508, fout_apll, "fout_apll", "fin_pll", APLL_LOCK,
- APLL_CON0, "fout_apll", NULL),
- [mpll] = PLL_A(pll_4508, fout_mpll, "fout_mpll", "fin_pll",
+ [apll] = PLL_A(pll_4508, CLK_FOUT_APLL, "fout_apll", "fin_pll",
+ APLL_LOCK, APLL_CON0, "fout_apll", NULL),
+ [mpll] = PLL_A(pll_4508, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
E4210_MPLL_LOCK, E4210_MPLL_CON0, "fout_mpll", NULL),
- [epll] = PLL_A(pll_4600, fout_epll, "fout_epll", "fin_pll", EPLL_LOCK,
- EPLL_CON0, "fout_epll", NULL),
- [vpll] = PLL_A(pll_4650c, fout_vpll, "fout_vpll", "mout_vpllsrc",
+ [epll] = PLL_A(pll_4600, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
+ EPLL_LOCK, EPLL_CON0, "fout_epll", NULL),
+ [vpll] = PLL_A(pll_4650c, CLK_FOUT_VPLL, "fout_vpll", "mout_vpllsrc",
VPLL_LOCK, VPLL_CON0, "fout_vpll", NULL),
};
static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
- [apll] = PLL(pll_35xx, fout_apll, "fout_apll", "fin_pll",
+ [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
APLL_LOCK, APLL_CON0, NULL),
- [mpll] = PLL(pll_35xx, fout_mpll, "fout_mpll", "fin_pll",
+ [mpll] = PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
E4X12_MPLL_LOCK, E4X12_MPLL_CON0, NULL),
- [epll] = PLL(pll_36xx, fout_epll, "fout_epll", "fin_pll",
+ [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
EPLL_LOCK, EPLL_CON0, NULL),
- [vpll] = PLL(pll_36xx, fout_vpll, "fout_vpll", "fin_pll",
+ [vpll] = PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "fin_pll",
VPLL_LOCK, VPLL_CON0, NULL),
};
@@ -1098,11 +1046,11 @@ static void __init exynos4_clk_init(struct device_node *np,
panic("%s: failed to map registers\n", __func__);
if (exynos4_soc == EXYNOS4210)
- samsung_clk_init(np, reg_base, nr_clks,
+ samsung_clk_init(np, reg_base, CLK_NR_CLKS,
exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs),
exynos4210_clk_save, ARRAY_SIZE(exynos4210_clk_save));
else
- samsung_clk_init(np, reg_base, nr_clks,
+ samsung_clk_init(np, reg_base, CLK_NR_CLKS,
exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs),
exynos4x12_clk_save, ARRAY_SIZE(exynos4x12_clk_save));
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index adf32343c9f9..ff4beebe1f0b 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -10,6 +10,7 @@
* Common Clock Framework support for Exynos5250 SoC.
*/
+#include <dt-bindings/clock/exynos5250.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
@@ -25,6 +26,7 @@
#define MPLL_LOCK 0x4000
#define MPLL_CON0 0x4100
#define SRC_CORE1 0x4204
+#define GATE_IP_ACP 0x8800
#define CPLL_LOCK 0x10020
#define EPLL_LOCK 0x10030
#define VPLL_LOCK 0x10040
@@ -35,6 +37,7 @@
#define GPLL_CON0 0x10150
#define SRC_TOP0 0x10210
#define SRC_TOP2 0x10218
+#define SRC_TOP3 0x1021c
#define SRC_GSCL 0x10220
#define SRC_DISP1_0 0x1022c
#define SRC_MAU 0x10240
@@ -65,6 +68,7 @@
#define DIV_PERIC4 0x10568
#define DIV_PERIC5 0x1056c
#define GATE_IP_GSCL 0x10920
+#define GATE_IP_DISP1 0x10928
#define GATE_IP_MFC 0x1092c
#define GATE_IP_GEN 0x10934
#define GATE_IP_FSYS 0x10944
@@ -74,8 +78,6 @@
#define BPLL_CON0 0x20110
#define SRC_CDREX 0x20200
#define PLL_DIV2_SEL 0x20a24
-#define GATE_IP_DISP1 0x10928
-#define GATE_IP_ACP 0x10000
/* list of PLLs to be registered */
enum exynos5250_plls {
@@ -84,51 +86,6 @@ enum exynos5250_plls {
};
/*
- * Let each supported clock get a unique id. This id is used to lookup the clock
- * for device tree based platforms. The clocks are categorized into three
- * sections: core, sclk gate and bus interface gate clocks.
- *
- * When adding a new clock to this list, it is advised to choose a clock
- * category and add it to the end of that category. That is because the the
- * device tree source file is referring to these ids and any change in the
- * sequence number of existing clocks will require corresponding change in the
- * device tree files. This limitation would go away when pre-processor support
- * for dtc would be available.
- */
-enum exynos5250_clks {
- none,
-
- /* core clocks */
- fin_pll, fout_apll, fout_mpll, fout_bpll, fout_gpll, fout_cpll,
- fout_epll, fout_vpll,
-
- /* gate for special clocks (sclk) */
- sclk_cam_bayer = 128, sclk_cam0, sclk_cam1, sclk_gscl_wa, sclk_gscl_wb,
- sclk_fimd1, sclk_mipi1, sclk_dp, sclk_hdmi, sclk_pixel, sclk_audio0,
- sclk_mmc0, sclk_mmc1, sclk_mmc2, sclk_mmc3, sclk_sata, sclk_usb3,
- sclk_jpeg, sclk_uart0, sclk_uart1, sclk_uart2, sclk_uart3, sclk_pwm,
- sclk_audio1, sclk_audio2, sclk_spdif, sclk_spi0, sclk_spi1, sclk_spi2,
- div_i2s1, div_i2s2, sclk_hdmiphy,
-
- /* gate clocks */
- gscl0 = 256, gscl1, gscl2, gscl3, gscl_wa, gscl_wb, smmu_gscl0,
- smmu_gscl1, smmu_gscl2, smmu_gscl3, mfc, smmu_mfcl, smmu_mfcr, rotator,
- jpeg, mdma1, smmu_rotator, smmu_jpeg, smmu_mdma1, pdma0, pdma1, sata,
- usbotg, mipi_hsi, sdmmc0, sdmmc1, sdmmc2, sdmmc3, sromc, usb2, usb3,
- sata_phyctrl, sata_phyi2c, uart0, uart1, uart2, uart3, uart4, i2c0,
- i2c1, i2c2, i2c3, i2c4, i2c5, i2c6, i2c7, i2c_hdmi, adc, spi0, spi1,
- spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2,
- hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1,
- tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct,
- wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d,
-
- /* mux clocks */
- mout_hdmi = 1024,
-
- nr_clks,
-};
-
-/*
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
*/
@@ -138,6 +95,7 @@ static unsigned long exynos5250_clk_regs[] __initdata = {
SRC_CORE1,
SRC_TOP0,
SRC_TOP2,
+ SRC_TOP3,
SRC_GSCL,
SRC_DISP1_0,
SRC_MAU,
@@ -181,7 +139,7 @@ static unsigned long exynos5250_clk_regs[] __initdata = {
/* list of all parent clock list */
PNAME(mout_apll_p) = { "fin_pll", "fout_apll", };
-PNAME(mout_cpu_p) = { "mout_apll", "sclk_mpll", };
+PNAME(mout_cpu_p) = { "mout_apll", "mout_mpll", };
PNAME(mout_mpll_fout_p) = { "fout_mplldiv2", "fout_mpll" };
PNAME(mout_mpll_p) = { "fin_pll", "mout_mpll_fout" };
PNAME(mout_bpll_fout_p) = { "fout_bplldiv2", "fout_bpll" };
@@ -190,308 +148,432 @@ PNAME(mout_vpllsrc_p) = { "fin_pll", "sclk_hdmi27m" };
PNAME(mout_vpll_p) = { "mout_vpllsrc", "fout_vpll" };
PNAME(mout_cpll_p) = { "fin_pll", "fout_cpll" };
PNAME(mout_epll_p) = { "fin_pll", "fout_epll" };
-PNAME(mout_mpll_user_p) = { "fin_pll", "sclk_mpll" };
-PNAME(mout_bpll_user_p) = { "fin_pll", "sclk_bpll" };
-PNAME(mout_aclk166_p) = { "sclk_cpll", "sclk_mpll_user" };
-PNAME(mout_aclk200_p) = { "sclk_mpll_user", "sclk_bpll_user" };
+PNAME(mout_mpll_user_p) = { "fin_pll", "mout_mpll" };
+PNAME(mout_bpll_user_p) = { "fin_pll", "mout_bpll" };
+PNAME(mout_aclk166_p) = { "mout_cpll", "mout_mpll_user" };
+PNAME(mout_aclk200_p) = { "mout_mpll_user", "mout_bpll_user" };
+PNAME(mout_aclk200_sub_p) = { "fin_pll", "div_aclk200" };
+PNAME(mout_aclk266_sub_p) = { "fin_pll", "div_aclk266" };
+PNAME(mout_aclk333_sub_p) = { "fin_pll", "div_aclk333" };
PNAME(mout_hdmi_p) = { "div_hdmi_pixel", "sclk_hdmiphy" };
-PNAME(mout_usb3_p) = { "sclk_mpll_user", "sclk_cpll" };
+PNAME(mout_usb3_p) = { "mout_mpll_user", "mout_cpll" };
PNAME(mout_group1_p) = { "fin_pll", "fin_pll", "sclk_hdmi27m",
"sclk_dptxphy", "sclk_uhostphy", "sclk_hdmiphy",
- "sclk_mpll_user", "sclk_epll", "sclk_vpll",
- "sclk_cpll" };
+ "mout_mpll_user", "mout_epll", "mout_vpll",
+ "mout_cpll", "none", "none",
+ "none", "none", "none",
+ "none" };
PNAME(mout_audio0_p) = { "cdclk0", "fin_pll", "sclk_hdmi27m", "sclk_dptxphy",
- "sclk_uhostphy", "sclk_hdmiphy",
- "sclk_mpll_user", "sclk_epll", "sclk_vpll",
- "sclk_cpll" };
+ "sclk_uhostphy", "fin_pll",
+ "mout_mpll_user", "mout_epll", "mout_vpll",
+ "mout_cpll", "none", "none",
+ "none", "none", "none",
+ "none" };
PNAME(mout_audio1_p) = { "cdclk1", "fin_pll", "sclk_hdmi27m", "sclk_dptxphy",
- "sclk_uhostphy", "sclk_hdmiphy",
- "sclk_mpll_user", "sclk_epll", "sclk_vpll",
- "sclk_cpll" };
+ "sclk_uhostphy", "fin_pll",
+ "mout_mpll_user", "mout_epll", "mout_vpll",
+ "mout_cpll", "none", "none",
+ "none", "none", "none",
+ "none" };
PNAME(mout_audio2_p) = { "cdclk2", "fin_pll", "sclk_hdmi27m", "sclk_dptxphy",
- "sclk_uhostphy", "sclk_hdmiphy",
- "sclk_mpll_user", "sclk_epll", "sclk_vpll",
- "sclk_cpll" };
+ "sclk_uhostphy", "fin_pll",
+ "mout_mpll_user", "mout_epll", "mout_vpll",
+ "mout_cpll", "none", "none",
+ "none", "none", "none",
+ "none" };
PNAME(mout_spdif_p) = { "sclk_audio0", "sclk_audio1", "sclk_audio2",
"spdif_extclk" };
/* fixed rate clocks generated outside the soc */
static struct samsung_fixed_rate_clock exynos5250_fixed_rate_ext_clks[] __initdata = {
- FRATE(fin_pll, "fin_pll", NULL, CLK_IS_ROOT, 0),
+ FRATE(CLK_FIN_PLL, "fin_pll", NULL, CLK_IS_ROOT, 0),
};
/* fixed rate clocks generated inside the soc */
static struct samsung_fixed_rate_clock exynos5250_fixed_rate_clks[] __initdata = {
- FRATE(sclk_hdmiphy, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
- FRATE(none, "sclk_hdmi27m", NULL, CLK_IS_ROOT, 27000000),
- FRATE(none, "sclk_dptxphy", NULL, CLK_IS_ROOT, 24000000),
- FRATE(none, "sclk_uhostphy", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
+ FRATE(0, "sclk_hdmi27m", NULL, CLK_IS_ROOT, 27000000),
+ FRATE(0, "sclk_dptxphy", NULL, CLK_IS_ROOT, 24000000),
+ FRATE(0, "sclk_uhostphy", NULL, CLK_IS_ROOT, 48000000),
};
static struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = {
- FFACTOR(none, "fout_mplldiv2", "fout_mpll", 1, 2, 0),
- FFACTOR(none, "fout_bplldiv2", "fout_bpll", 1, 2, 0),
+ FFACTOR(0, "fout_mplldiv2", "fout_mpll", 1, 2, 0),
+ FFACTOR(0, "fout_bplldiv2", "fout_bpll", 1, 2, 0),
};
static struct samsung_mux_clock exynos5250_pll_pmux_clks[] __initdata = {
- MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
+ MUX(0, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
};
static struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
- MUX_A(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, "mout_apll"),
- MUX_A(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"),
- MUX(none, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1),
- MUX_A(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"),
- MUX(none, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1),
- MUX(none, "sclk_bpll", mout_bpll_p, SRC_CDREX, 0, 1),
- MUX(none, "sclk_vpll", mout_vpll_p, SRC_TOP2, 16, 1),
- MUX(none, "sclk_epll", mout_epll_p, SRC_TOP2, 12, 1),
- MUX(none, "sclk_cpll", mout_cpll_p, SRC_TOP2, 8, 1),
- MUX(none, "sclk_mpll_user", mout_mpll_user_p, SRC_TOP2, 20, 1),
- MUX(none, "sclk_bpll_user", mout_bpll_user_p, SRC_TOP2, 24, 1),
- MUX(none, "mout_aclk166", mout_aclk166_p, SRC_TOP0, 8, 1),
- MUX(none, "mout_aclk333", mout_aclk166_p, SRC_TOP0, 16, 1),
- MUX(none, "mout_aclk200", mout_aclk200_p, SRC_TOP0, 12, 1),
- MUX(none, "mout_cam_bayer", mout_group1_p, SRC_GSCL, 12, 4),
- MUX(none, "mout_cam0", mout_group1_p, SRC_GSCL, 16, 4),
- MUX(none, "mout_cam1", mout_group1_p, SRC_GSCL, 20, 4),
- MUX(none, "mout_gscl_wa", mout_group1_p, SRC_GSCL, 24, 4),
- MUX(none, "mout_gscl_wb", mout_group1_p, SRC_GSCL, 28, 4),
- MUX(none, "mout_fimd1", mout_group1_p, SRC_DISP1_0, 0, 4),
- MUX(none, "mout_mipi1", mout_group1_p, SRC_DISP1_0, 12, 4),
- MUX(none, "mout_dp", mout_group1_p, SRC_DISP1_0, 16, 4),
- MUX(mout_hdmi, "mout_hdmi", mout_hdmi_p, SRC_DISP1_0, 20, 1),
- MUX(none, "mout_audio0", mout_audio0_p, SRC_MAU, 0, 4),
- MUX(none, "mout_mmc0", mout_group1_p, SRC_FSYS, 0, 4),
- MUX(none, "mout_mmc1", mout_group1_p, SRC_FSYS, 4, 4),
- MUX(none, "mout_mmc2", mout_group1_p, SRC_FSYS, 8, 4),
- MUX(none, "mout_mmc3", mout_group1_p, SRC_FSYS, 12, 4),
- MUX(none, "mout_sata", mout_aclk200_p, SRC_FSYS, 24, 1),
- MUX(none, "mout_usb3", mout_usb3_p, SRC_FSYS, 28, 1),
- MUX(none, "mout_jpeg", mout_group1_p, SRC_GEN, 0, 4),
- MUX(none, "mout_uart0", mout_group1_p, SRC_PERIC0, 0, 4),
- MUX(none, "mout_uart1", mout_group1_p, SRC_PERIC0, 4, 4),
- MUX(none, "mout_uart2", mout_group1_p, SRC_PERIC0, 8, 4),
- MUX(none, "mout_uart3", mout_group1_p, SRC_PERIC0, 12, 4),
- MUX(none, "mout_pwm", mout_group1_p, SRC_PERIC0, 24, 4),
- MUX(none, "mout_audio1", mout_audio1_p, SRC_PERIC1, 0, 4),
- MUX(none, "mout_audio2", mout_audio2_p, SRC_PERIC1, 4, 4),
- MUX(none, "mout_spdif", mout_spdif_p, SRC_PERIC1, 8, 2),
- MUX(none, "mout_spi0", mout_group1_p, SRC_PERIC1, 16, 4),
- MUX(none, "mout_spi1", mout_group1_p, SRC_PERIC1, 20, 4),
- MUX(none, "mout_spi2", mout_group1_p, SRC_PERIC1, 24, 4),
+ /*
+ * NOTE: Following table is sorted by (clock domain, register address,
+ * bitfield shift) triplet in ascending order. When adding new entries,
+ * please make sure that the order is kept, to avoid merge conflicts
+ * and make further work with defined data easier.
+ */
+
+ /*
+ * CMU_CPU
+ */
+ MUX_FA(0, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
+ CLK_SET_RATE_PARENT, 0, "mout_apll"),
+ MUX_A(0, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"),
+
+ /*
+ * CMU_CORE
+ */
+ MUX_A(0, "mout_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"),
+
+ /*
+ * CMU_TOP
+ */
+ MUX(0, "mout_aclk166", mout_aclk166_p, SRC_TOP0, 8, 1),
+ MUX(0, "mout_aclk200", mout_aclk200_p, SRC_TOP0, 12, 1),
+ MUX(0, "mout_aclk333", mout_aclk166_p, SRC_TOP0, 16, 1),
+
+ MUX(0, "mout_cpll", mout_cpll_p, SRC_TOP2, 8, 1),
+ MUX(0, "mout_epll", mout_epll_p, SRC_TOP2, 12, 1),
+ MUX(0, "mout_vpll", mout_vpll_p, SRC_TOP2, 16, 1),
+ MUX(0, "mout_mpll_user", mout_mpll_user_p, SRC_TOP2, 20, 1),
+ MUX(0, "mout_bpll_user", mout_bpll_user_p, SRC_TOP2, 24, 1),
+
+ MUX(0, "mout_aclk200_disp1_sub", mout_aclk200_sub_p, SRC_TOP3, 4, 1),
+ MUX(0, "mout_aclk266_gscl_sub", mout_aclk266_sub_p, SRC_TOP3, 8, 1),
+ MUX(0, "mout_aclk333_sub", mout_aclk333_sub_p, SRC_TOP3, 24, 1),
+
+ MUX(0, "mout_cam_bayer", mout_group1_p, SRC_GSCL, 12, 4),
+ MUX(0, "mout_cam0", mout_group1_p, SRC_GSCL, 16, 4),
+ MUX(0, "mout_cam1", mout_group1_p, SRC_GSCL, 20, 4),
+ MUX(0, "mout_gscl_wa", mout_group1_p, SRC_GSCL, 24, 4),
+ MUX(0, "mout_gscl_wb", mout_group1_p, SRC_GSCL, 28, 4),
+
+ MUX(0, "mout_fimd1", mout_group1_p, SRC_DISP1_0, 0, 4),
+ MUX(0, "mout_mipi1", mout_group1_p, SRC_DISP1_0, 12, 4),
+ MUX(0, "mout_dp", mout_group1_p, SRC_DISP1_0, 16, 4),
+ MUX(CLK_MOUT_HDMI, "mout_hdmi", mout_hdmi_p, SRC_DISP1_0, 20, 1),
+
+ MUX(0, "mout_audio0", mout_audio0_p, SRC_MAU, 0, 4),
+
+ MUX(0, "mout_mmc0", mout_group1_p, SRC_FSYS, 0, 4),
+ MUX(0, "mout_mmc1", mout_group1_p, SRC_FSYS, 4, 4),
+ MUX(0, "mout_mmc2", mout_group1_p, SRC_FSYS, 8, 4),
+ MUX(0, "mout_mmc3", mout_group1_p, SRC_FSYS, 12, 4),
+ MUX(0, "mout_sata", mout_aclk200_p, SRC_FSYS, 24, 1),
+ MUX(0, "mout_usb3", mout_usb3_p, SRC_FSYS, 28, 1),
+
+ MUX(0, "mout_jpeg", mout_group1_p, SRC_GEN, 0, 4),
+
+ MUX(0, "mout_uart0", mout_group1_p, SRC_PERIC0, 0, 4),
+ MUX(0, "mout_uart1", mout_group1_p, SRC_PERIC0, 4, 4),
+ MUX(0, "mout_uart2", mout_group1_p, SRC_PERIC0, 8, 4),
+ MUX(0, "mout_uart3", mout_group1_p, SRC_PERIC0, 12, 4),
+ MUX(0, "mout_pwm", mout_group1_p, SRC_PERIC0, 24, 4),
+
+ MUX(0, "mout_audio1", mout_audio1_p, SRC_PERIC1, 0, 4),
+ MUX(0, "mout_audio2", mout_audio2_p, SRC_PERIC1, 4, 4),
+ MUX(0, "mout_spdif", mout_spdif_p, SRC_PERIC1, 8, 2),
+ MUX(0, "mout_spi0", mout_group1_p, SRC_PERIC1, 16, 4),
+ MUX(0, "mout_spi1", mout_group1_p, SRC_PERIC1, 20, 4),
+ MUX(0, "mout_spi2", mout_group1_p, SRC_PERIC1, 24, 4),
+
+ /*
+ * CMU_CDREX
+ */
+ MUX(0, "mout_bpll", mout_bpll_p, SRC_CDREX, 0, 1),
+
+ MUX(0, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1),
+ MUX(0, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1),
};
static struct samsung_div_clock exynos5250_div_clks[] __initdata = {
- DIV(none, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
- DIV(none, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
- DIV(none, "aclk66_pre", "sclk_mpll_user", DIV_TOP1, 24, 3),
- DIV(none, "aclk66", "aclk66_pre", DIV_TOP0, 0, 3),
- DIV(none, "aclk266", "sclk_mpll_user", DIV_TOP0, 16, 3),
- DIV(none, "aclk166", "mout_aclk166", DIV_TOP0, 8, 3),
- DIV(none, "aclk333", "mout_aclk333", DIV_TOP0, 20, 3),
- DIV(none, "aclk200", "mout_aclk200", DIV_TOP0, 12, 3),
- DIV(none, "div_cam_bayer", "mout_cam_bayer", DIV_GSCL, 12, 4),
- DIV(none, "div_cam0", "mout_cam0", DIV_GSCL, 16, 4),
- DIV(none, "div_cam1", "mout_cam1", DIV_GSCL, 20, 4),
- DIV(none, "div_gscl_wa", "mout_gscl_wa", DIV_GSCL, 24, 4),
- DIV(none, "div_gscl_wb", "mout_gscl_wb", DIV_GSCL, 28, 4),
- DIV(none, "div_fimd1", "mout_fimd1", DIV_DISP1_0, 0, 4),
- DIV(none, "div_mipi1", "mout_mipi1", DIV_DISP1_0, 16, 4),
- DIV(none, "div_dp", "mout_dp", DIV_DISP1_0, 24, 4),
- DIV(none, "div_jpeg", "mout_jpeg", DIV_GEN, 4, 4),
- DIV(none, "div_audio0", "mout_audio0", DIV_MAU, 0, 4),
- DIV(none, "div_pcm0", "sclk_audio0", DIV_MAU, 4, 8),
- DIV(none, "div_sata", "mout_sata", DIV_FSYS0, 20, 4),
- DIV(none, "div_usb3", "mout_usb3", DIV_FSYS0, 24, 4),
- DIV(none, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
- DIV(none, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4),
- DIV(none, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4),
- DIV(none, "div_mmc3", "mout_mmc3", DIV_FSYS2, 16, 4),
- DIV(none, "div_uart0", "mout_uart0", DIV_PERIC0, 0, 4),
- DIV(none, "div_uart1", "mout_uart1", DIV_PERIC0, 4, 4),
- DIV(none, "div_uart2", "mout_uart2", DIV_PERIC0, 8, 4),
- DIV(none, "div_uart3", "mout_uart3", DIV_PERIC0, 12, 4),
- DIV(none, "div_spi0", "mout_spi0", DIV_PERIC1, 0, 4),
- DIV(none, "div_spi1", "mout_spi1", DIV_PERIC1, 16, 4),
- DIV(none, "div_spi2", "mout_spi2", DIV_PERIC2, 0, 4),
- DIV(none, "div_pwm", "mout_pwm", DIV_PERIC3, 0, 4),
- DIV(none, "div_audio1", "mout_audio1", DIV_PERIC4, 0, 4),
- DIV(none, "div_pcm1", "sclk_audio1", DIV_PERIC4, 4, 8),
- DIV(none, "div_audio2", "mout_audio2", DIV_PERIC4, 16, 4),
- DIV(none, "div_pcm2", "sclk_audio2", DIV_PERIC4, 20, 8),
- DIV(div_i2s1, "div_i2s1", "sclk_audio1", DIV_PERIC5, 0, 6),
- DIV(div_i2s2, "div_i2s2", "sclk_audio2", DIV_PERIC5, 8, 6),
- DIV(sclk_pixel, "div_hdmi_pixel", "sclk_vpll", DIV_DISP1_0, 28, 4),
- DIV_A(none, "armclk", "div_arm", DIV_CPU0, 28, 3, "armclk"),
- DIV_F(none, "div_mipi1_pre", "div_mipi1",
+ /*
+ * NOTE: Following table is sorted by (clock domain, register address,
+ * bitfield shift) triplet in ascending order. When adding new entries,
+ * please make sure that the order is kept, to avoid merge conflicts
+ * and make further work with defined data easier.
+ */
+
+ /*
+ * CMU_CPU
+ */
+ DIV(0, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
+ DIV(0, "div_apll", "mout_apll", DIV_CPU0, 24, 3),
+ DIV_A(0, "div_arm2", "div_arm", DIV_CPU0, 28, 3, "armclk"),
+
+ /*
+ * CMU_TOP
+ */
+ DIV(0, "div_aclk66", "div_aclk66_pre", DIV_TOP0, 0, 3),
+ DIV(0, "div_aclk166", "mout_aclk166", DIV_TOP0, 8, 3),
+ DIV(0, "div_aclk200", "mout_aclk200", DIV_TOP0, 12, 3),
+ DIV(0, "div_aclk266", "mout_mpll_user", DIV_TOP0, 16, 3),
+ DIV(0, "div_aclk333", "mout_aclk333", DIV_TOP0, 20, 3),
+
+ DIV(0, "div_aclk66_pre", "mout_mpll_user", DIV_TOP1, 24, 3),
+
+ DIV(0, "div_cam_bayer", "mout_cam_bayer", DIV_GSCL, 12, 4),
+ DIV(0, "div_cam0", "mout_cam0", DIV_GSCL, 16, 4),
+ DIV(0, "div_cam1", "mout_cam1", DIV_GSCL, 20, 4),
+ DIV(0, "div_gscl_wa", "mout_gscl_wa", DIV_GSCL, 24, 4),
+ DIV(0, "div_gscl_wb", "mout_gscl_wb", DIV_GSCL, 28, 4),
+
+ DIV(0, "div_fimd1", "mout_fimd1", DIV_DISP1_0, 0, 4),
+ DIV(0, "div_mipi1", "mout_mipi1", DIV_DISP1_0, 16, 4),
+ DIV_F(0, "div_mipi1_pre", "div_mipi1",
DIV_DISP1_0, 20, 4, CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_mmc_pre0", "div_mmc0",
+ DIV(0, "div_dp", "mout_dp", DIV_DISP1_0, 24, 4),
+ DIV(CLK_SCLK_PIXEL, "div_hdmi_pixel", "mout_vpll", DIV_DISP1_0, 28, 4),
+
+ DIV(0, "div_jpeg", "mout_jpeg", DIV_GEN, 4, 4),
+
+ DIV(0, "div_audio0", "mout_audio0", DIV_MAU, 0, 4),
+ DIV(CLK_DIV_PCM0, "div_pcm0", "sclk_audio0", DIV_MAU, 4, 8),
+
+ DIV(0, "div_sata", "mout_sata", DIV_FSYS0, 20, 4),
+ DIV(0, "div_usb3", "mout_usb3", DIV_FSYS0, 24, 4),
+
+ DIV(0, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
+ DIV_F(0, "div_mmc_pre0", "div_mmc0",
DIV_FSYS1, 8, 8, CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_mmc_pre1", "div_mmc1",
+ DIV(0, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4),
+ DIV_F(0, "div_mmc_pre1", "div_mmc1",
DIV_FSYS1, 24, 8, CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_mmc_pre2", "div_mmc2",
+
+ DIV(0, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4),
+ DIV_F(0, "div_mmc_pre2", "div_mmc2",
DIV_FSYS2, 8, 8, CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_mmc_pre3", "div_mmc3",
+ DIV(0, "div_mmc3", "mout_mmc3", DIV_FSYS2, 16, 4),
+ DIV_F(0, "div_mmc_pre3", "div_mmc3",
DIV_FSYS2, 24, 8, CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_spi_pre0", "div_spi0",
+
+ DIV(0, "div_uart0", "mout_uart0", DIV_PERIC0, 0, 4),
+ DIV(0, "div_uart1", "mout_uart1", DIV_PERIC0, 4, 4),
+ DIV(0, "div_uart2", "mout_uart2", DIV_PERIC0, 8, 4),
+ DIV(0, "div_uart3", "mout_uart3", DIV_PERIC0, 12, 4),
+
+ DIV(0, "div_spi0", "mout_spi0", DIV_PERIC1, 0, 4),
+ DIV_F(0, "div_spi_pre0", "div_spi0",
DIV_PERIC1, 8, 8, CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_spi_pre1", "div_spi1",
+ DIV(0, "div_spi1", "mout_spi1", DIV_PERIC1, 16, 4),
+ DIV_F(0, "div_spi_pre1", "div_spi1",
DIV_PERIC1, 24, 8, CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_spi_pre2", "div_spi2",
+
+ DIV(0, "div_spi2", "mout_spi2", DIV_PERIC2, 0, 4),
+ DIV_F(0, "div_spi_pre2", "div_spi2",
DIV_PERIC2, 8, 8, CLK_SET_RATE_PARENT, 0),
+
+ DIV(0, "div_pwm", "mout_pwm", DIV_PERIC3, 0, 4),
+
+ DIV(0, "div_audio1", "mout_audio1", DIV_PERIC4, 0, 4),
+ DIV(0, "div_pcm1", "sclk_audio1", DIV_PERIC4, 4, 8),
+ DIV(0, "div_audio2", "mout_audio2", DIV_PERIC4, 16, 4),
+ DIV(0, "div_pcm2", "sclk_audio2", DIV_PERIC4, 20, 8),
+
+ DIV(CLK_DIV_I2S1, "div_i2s1", "sclk_audio1", DIV_PERIC5, 0, 6),
+ DIV(CLK_DIV_I2S2, "div_i2s2", "sclk_audio2", DIV_PERIC5, 8, 6),
};
static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
- GATE(gscl0, "gscl0", "none", GATE_IP_GSCL, 0, 0, 0),
- GATE(gscl1, "gscl1", "none", GATE_IP_GSCL, 1, 0, 0),
- GATE(gscl2, "gscl2", "aclk266", GATE_IP_GSCL, 2, 0, 0),
- GATE(gscl3, "gscl3", "aclk266", GATE_IP_GSCL, 3, 0, 0),
- GATE(gscl_wa, "gscl_wa", "div_gscl_wa", GATE_IP_GSCL, 5, 0, 0),
- GATE(gscl_wb, "gscl_wb", "div_gscl_wb", GATE_IP_GSCL, 6, 0, 0),
- GATE(smmu_gscl0, "smmu_gscl0", "aclk266", GATE_IP_GSCL, 7, 0, 0),
- GATE(smmu_gscl1, "smmu_gscl1", "aclk266", GATE_IP_GSCL, 8, 0, 0),
- GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0),
- GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0),
- GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
- GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0),
- GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0),
- GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0),
- GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0),
- GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0),
- GATE(smmu_rotator, "smmu_rotator", "aclk266", GATE_IP_GEN, 6, 0, 0),
- GATE(smmu_jpeg, "smmu_jpeg", "aclk166", GATE_IP_GEN, 7, 0, 0),
- GATE(smmu_mdma1, "smmu_mdma1", "aclk266", GATE_IP_GEN, 9, 0, 0),
- GATE(pdma0, "pdma0", "aclk200", GATE_IP_FSYS, 1, 0, 0),
- GATE(pdma1, "pdma1", "aclk200", GATE_IP_FSYS, 2, 0, 0),
- GATE(sata, "sata", "aclk200", GATE_IP_FSYS, 6, 0, 0),
- GATE(usbotg, "usbotg", "aclk200", GATE_IP_FSYS, 7, 0, 0),
- GATE(mipi_hsi, "mipi_hsi", "aclk200", GATE_IP_FSYS, 8, 0, 0),
- GATE(sdmmc0, "sdmmc0", "aclk200", GATE_IP_FSYS, 12, 0, 0),
- GATE(sdmmc1, "sdmmc1", "aclk200", GATE_IP_FSYS, 13, 0, 0),
- GATE(sdmmc2, "sdmmc2", "aclk200", GATE_IP_FSYS, 14, 0, 0),
- GATE(sdmmc3, "sdmmc3", "aclk200", GATE_IP_FSYS, 15, 0, 0),
- GATE(sromc, "sromc", "aclk200", GATE_IP_FSYS, 17, 0, 0),
- GATE(usb2, "usb2", "aclk200", GATE_IP_FSYS, 18, 0, 0),
- GATE(usb3, "usb3", "aclk200", GATE_IP_FSYS, 19, 0, 0),
- GATE(sata_phyctrl, "sata_phyctrl", "aclk200", GATE_IP_FSYS, 24, 0, 0),
- GATE(sata_phyi2c, "sata_phyi2c", "aclk200", GATE_IP_FSYS, 25, 0, 0),
- GATE(uart0, "uart0", "aclk66", GATE_IP_PERIC, 0, 0, 0),
- GATE(uart1, "uart1", "aclk66", GATE_IP_PERIC, 1, 0, 0),
- GATE(uart2, "uart2", "aclk66", GATE_IP_PERIC, 2, 0, 0),
- GATE(uart3, "uart3", "aclk66", GATE_IP_PERIC, 3, 0, 0),
- GATE(uart4, "uart4", "aclk66", GATE_IP_PERIC, 4, 0, 0),
- GATE(i2c0, "i2c0", "aclk66", GATE_IP_PERIC, 6, 0, 0),
- GATE(i2c1, "i2c1", "aclk66", GATE_IP_PERIC, 7, 0, 0),
- GATE(i2c2, "i2c2", "aclk66", GATE_IP_PERIC, 8, 0, 0),
- GATE(i2c3, "i2c3", "aclk66", GATE_IP_PERIC, 9, 0, 0),
- GATE(i2c4, "i2c4", "aclk66", GATE_IP_PERIC, 10, 0, 0),
- GATE(i2c5, "i2c5", "aclk66", GATE_IP_PERIC, 11, 0, 0),
- GATE(i2c6, "i2c6", "aclk66", GATE_IP_PERIC, 12, 0, 0),
- GATE(i2c7, "i2c7", "aclk66", GATE_IP_PERIC, 13, 0, 0),
- GATE(i2c_hdmi, "i2c_hdmi", "aclk66", GATE_IP_PERIC, 14, 0, 0),
- GATE(adc, "adc", "aclk66", GATE_IP_PERIC, 15, 0, 0),
- GATE(spi0, "spi0", "aclk66", GATE_IP_PERIC, 16, 0, 0),
- GATE(spi1, "spi1", "aclk66", GATE_IP_PERIC, 17, 0, 0),
- GATE(spi2, "spi2", "aclk66", GATE_IP_PERIC, 18, 0, 0),
- GATE(i2s1, "i2s1", "aclk66", GATE_IP_PERIC, 20, 0, 0),
- GATE(i2s2, "i2s2", "aclk66", GATE_IP_PERIC, 21, 0, 0),
- GATE(pcm1, "pcm1", "aclk66", GATE_IP_PERIC, 22, 0, 0),
- GATE(pcm2, "pcm2", "aclk66", GATE_IP_PERIC, 23, 0, 0),
- GATE(pwm, "pwm", "aclk66", GATE_IP_PERIC, 24, 0, 0),
- GATE(spdif, "spdif", "aclk66", GATE_IP_PERIC, 26, 0, 0),
- GATE(ac97, "ac97", "aclk66", GATE_IP_PERIC, 27, 0, 0),
- GATE(hsi2c0, "hsi2c0", "aclk66", GATE_IP_PERIC, 28, 0, 0),
- GATE(hsi2c1, "hsi2c1", "aclk66", GATE_IP_PERIC, 29, 0, 0),
- GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0),
- GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
- GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
- GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0),
- GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
- GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
- GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
- GATE(tzpc2, "tzpc2", "aclk66", GATE_IP_PERIS, 8, 0, 0),
- GATE(tzpc3, "tzpc3", "aclk66", GATE_IP_PERIS, 9, 0, 0),
- GATE(tzpc4, "tzpc4", "aclk66", GATE_IP_PERIS, 10, 0, 0),
- GATE(tzpc5, "tzpc5", "aclk66", GATE_IP_PERIS, 11, 0, 0),
- GATE(tzpc6, "tzpc6", "aclk66", GATE_IP_PERIS, 12, 0, 0),
- GATE(tzpc7, "tzpc7", "aclk66", GATE_IP_PERIS, 13, 0, 0),
- GATE(tzpc8, "tzpc8", "aclk66", GATE_IP_PERIS, 14, 0, 0),
- GATE(tzpc9, "tzpc9", "aclk66", GATE_IP_PERIS, 15, 0, 0),
- GATE(hdmi_cec, "hdmi_cec", "aclk66", GATE_IP_PERIS, 16, 0, 0),
- GATE(mct, "mct", "aclk66", GATE_IP_PERIS, 18, 0, 0),
- GATE(wdt, "wdt", "aclk66", GATE_IP_PERIS, 19, 0, 0),
- GATE(rtc, "rtc", "aclk66", GATE_IP_PERIS, 20, 0, 0),
- GATE(tmu, "tmu", "aclk66", GATE_IP_PERIS, 21, 0, 0),
- GATE(cmu_top, "cmu_top", "aclk66",
- GATE_IP_PERIS, 3, CLK_IGNORE_UNUSED, 0),
- GATE(cmu_core, "cmu_core", "aclk66",
- GATE_IP_PERIS, 4, CLK_IGNORE_UNUSED, 0),
- GATE(cmu_mem, "cmu_mem", "aclk66",
- GATE_IP_PERIS, 5, CLK_IGNORE_UNUSED, 0),
- GATE(sclk_cam_bayer, "sclk_cam_bayer", "div_cam_bayer",
+ /*
+ * NOTE: Following table is sorted by (clock domain, register address,
+ * bitfield shift) triplet in ascending order. When adding new entries,
+ * please make sure that the order is kept, to avoid merge conflicts
+ * and make further work with defined data easier.
+ */
+
+ /*
+ * CMU_ACP
+ */
+ GATE(CLK_MDMA0, "mdma0", "div_aclk266", GATE_IP_ACP, 1, 0, 0),
+ GATE(CLK_G2D, "g2d", "div_aclk200", GATE_IP_ACP, 3, 0, 0),
+ GATE(CLK_SMMU_MDMA0, "smmu_mdma0", "div_aclk266", GATE_IP_ACP, 5, 0, 0),
+
+ /*
+ * CMU_TOP
+ */
+ GATE(CLK_SCLK_CAM_BAYER, "sclk_cam_bayer", "div_cam_bayer",
SRC_MASK_GSCL, 12, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_cam0, "sclk_cam0", "div_cam0",
+ GATE(CLK_SCLK_CAM0, "sclk_cam0", "div_cam0",
SRC_MASK_GSCL, 16, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_cam1, "sclk_cam1", "div_cam1",
+ GATE(CLK_SCLK_CAM1, "sclk_cam1", "div_cam1",
SRC_MASK_GSCL, 20, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_gscl_wa, "sclk_gscl_wa", "div_gscl_wa",
+ GATE(CLK_SCLK_GSCL_WA, "sclk_gscl_wa", "div_gscl_wa",
SRC_MASK_GSCL, 24, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_gscl_wb, "sclk_gscl_wb", "div_gscl_wb",
+ GATE(CLK_SCLK_GSCL_WB, "sclk_gscl_wb", "div_gscl_wb",
SRC_MASK_GSCL, 28, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_fimd1, "sclk_fimd1", "div_fimd1",
+
+ GATE(CLK_SCLK_FIMD1, "sclk_fimd1", "div_fimd1",
SRC_MASK_DISP1_0, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mipi1, "sclk_mipi1", "div_mipi1",
+ GATE(CLK_SCLK_MIPI1, "sclk_mipi1", "div_mipi1",
SRC_MASK_DISP1_0, 12, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_dp, "sclk_dp", "div_dp",
+ GATE(CLK_SCLK_DP, "sclk_dp", "div_dp",
SRC_MASK_DISP1_0, 16, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_hdmi, "sclk_hdmi", "mout_hdmi",
+ GATE(CLK_SCLK_HDMI, "sclk_hdmi", "mout_hdmi",
SRC_MASK_DISP1_0, 20, 0, 0),
- GATE(sclk_audio0, "sclk_audio0", "div_audio0",
+
+ GATE(CLK_SCLK_AUDIO0, "sclk_audio0", "div_audio0",
SRC_MASK_MAU, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc0, "sclk_mmc0", "div_mmc_pre0",
+
+ GATE(CLK_SCLK_MMC0, "sclk_mmc0", "div_mmc_pre0",
SRC_MASK_FSYS, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc1, "sclk_mmc1", "div_mmc_pre1",
+ GATE(CLK_SCLK_MMC1, "sclk_mmc1", "div_mmc_pre1",
SRC_MASK_FSYS, 4, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc2, "sclk_mmc2", "div_mmc_pre2",
+ GATE(CLK_SCLK_MMC2, "sclk_mmc2", "div_mmc_pre2",
SRC_MASK_FSYS, 8, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc3, "sclk_mmc3", "div_mmc_pre3",
+ GATE(CLK_SCLK_MMC3, "sclk_mmc3", "div_mmc_pre3",
SRC_MASK_FSYS, 12, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_sata, "sclk_sata", "div_sata",
+ GATE(CLK_SCLK_SATA, "sclk_sata", "div_sata",
SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_usb3, "sclk_usb3", "div_usb3",
+ GATE(CLK_SCLK_USB3, "sclk_usb3", "div_usb3",
SRC_MASK_FSYS, 28, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_jpeg, "sclk_jpeg", "div_jpeg",
+
+ GATE(CLK_SCLK_JPEG, "sclk_jpeg", "div_jpeg",
SRC_MASK_GEN, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart0, "sclk_uart0", "div_uart0",
+
+ GATE(CLK_SCLK_UART0, "sclk_uart0", "div_uart0",
SRC_MASK_PERIC0, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart1, "sclk_uart1", "div_uart1",
+ GATE(CLK_SCLK_UART1, "sclk_uart1", "div_uart1",
SRC_MASK_PERIC0, 4, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart2, "sclk_uart2", "div_uart2",
+ GATE(CLK_SCLK_UART2, "sclk_uart2", "div_uart2",
SRC_MASK_PERIC0, 8, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart3, "sclk_uart3", "div_uart3",
+ GATE(CLK_SCLK_UART3, "sclk_uart3", "div_uart3",
SRC_MASK_PERIC0, 12, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_pwm, "sclk_pwm", "div_pwm",
+ GATE(CLK_SCLK_PWM, "sclk_pwm", "div_pwm",
SRC_MASK_PERIC0, 24, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_audio1, "sclk_audio1", "div_audio1",
+
+ GATE(CLK_SCLK_AUDIO1, "sclk_audio1", "div_audio1",
SRC_MASK_PERIC1, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_audio2, "sclk_audio2", "div_audio2",
+ GATE(CLK_SCLK_AUDIO2, "sclk_audio2", "div_audio2",
SRC_MASK_PERIC1, 4, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spdif, "sclk_spdif", "mout_spdif",
+ GATE(CLK_SCLK_SPDIF, "sclk_spdif", "mout_spdif",
SRC_MASK_PERIC1, 4, 0, 0),
- GATE(sclk_spi0, "sclk_spi0", "div_spi_pre0",
+ GATE(CLK_SCLK_SPI0, "sclk_spi0", "div_spi_pre0",
SRC_MASK_PERIC1, 16, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi1, "sclk_spi1", "div_spi_pre1",
+ GATE(CLK_SCLK_SPI1, "sclk_spi1", "div_spi_pre1",
SRC_MASK_PERIC1, 20, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi2, "sclk_spi2", "div_spi_pre2",
+ GATE(CLK_SCLK_SPI2, "sclk_spi2", "div_spi_pre2",
SRC_MASK_PERIC1, 24, CLK_SET_RATE_PARENT, 0),
- GATE(fimd1, "fimd1", "aclk200", GATE_IP_DISP1, 0, 0, 0),
- GATE(mie1, "mie1", "aclk200", GATE_IP_DISP1, 1, 0, 0),
- GATE(dsim0, "dsim0", "aclk200", GATE_IP_DISP1, 3, 0, 0),
- GATE(dp, "dp", "aclk200", GATE_IP_DISP1, 4, 0, 0),
- GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
- GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
- GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0),
+
+ GATE(CLK_GSCL0, "gscl0", "mout_aclk266_gscl_sub", GATE_IP_GSCL, 0, 0,
+ 0),
+ GATE(CLK_GSCL1, "gscl1", "mout_aclk266_gscl_sub", GATE_IP_GSCL, 1, 0,
+ 0),
+ GATE(CLK_GSCL2, "gscl2", "mout_aclk266_gscl_sub", GATE_IP_GSCL, 2, 0,
+ 0),
+ GATE(CLK_GSCL3, "gscl3", "mout_aclk266_gscl_sub", GATE_IP_GSCL, 3, 0,
+ 0),
+ GATE(CLK_GSCL_WA, "gscl_wa", "div_gscl_wa", GATE_IP_GSCL, 5, 0, 0),
+ GATE(CLK_GSCL_WB, "gscl_wb", "div_gscl_wb", GATE_IP_GSCL, 6, 0, 0),
+ GATE(CLK_SMMU_GSCL0, "smmu_gscl0", "mout_aclk266_gscl_sub",
+ GATE_IP_GSCL, 7, 0, 0),
+ GATE(CLK_SMMU_GSCL1, "smmu_gscl1", "mout_aclk266_gscl_sub",
+ GATE_IP_GSCL, 8, 0, 0),
+ GATE(CLK_SMMU_GSCL2, "smmu_gscl2", "mout_aclk266_gscl_sub",
+ GATE_IP_GSCL, 9, 0, 0),
+ GATE(CLK_SMMU_GSCL3, "smmu_gscl3", "mout_aclk266_gscl_sub",
+ GATE_IP_GSCL, 10, 0, 0),
+
+ GATE(CLK_FIMD1, "fimd1", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 0, 0,
+ 0),
+ GATE(CLK_MIE1, "mie1", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 1, 0,
+ 0),
+ GATE(CLK_DSIM0, "dsim0", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 3, 0,
+ 0),
+ GATE(CLK_DP, "dp", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 4, 0, 0),
+ GATE(CLK_MIXER, "mixer", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 5, 0,
+ 0),
+ GATE(CLK_HDMI, "hdmi", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 6, 0,
+ 0),
+
+ GATE(CLK_MFC, "mfc", "mout_aclk333_sub", GATE_IP_MFC, 0, 0, 0),
+ GATE(CLK_SMMU_MFCR, "smmu_mfcr", "mout_aclk333_sub", GATE_IP_MFC, 1, 0,
+ 0),
+ GATE(CLK_SMMU_MFCL, "smmu_mfcl", "mout_aclk333_sub", GATE_IP_MFC, 2, 0,
+ 0),
+
+ GATE(CLK_ROTATOR, "rotator", "div_aclk266", GATE_IP_GEN, 1, 0, 0),
+ GATE(CLK_JPEG, "jpeg", "div_aclk166", GATE_IP_GEN, 2, 0, 0),
+ GATE(CLK_MDMA1, "mdma1", "div_aclk266", GATE_IP_GEN, 4, 0, 0),
+ GATE(CLK_SMMU_ROTATOR, "smmu_rotator", "div_aclk266", GATE_IP_GEN, 6, 0,
+ 0),
+ GATE(CLK_SMMU_JPEG, "smmu_jpeg", "div_aclk166", GATE_IP_GEN, 7, 0, 0),
+ GATE(CLK_SMMU_MDMA1, "smmu_mdma1", "div_aclk266", GATE_IP_GEN, 9, 0, 0),
+
+ GATE(CLK_PDMA0, "pdma0", "div_aclk200", GATE_IP_FSYS, 1, 0, 0),
+ GATE(CLK_PDMA1, "pdma1", "div_aclk200", GATE_IP_FSYS, 2, 0, 0),
+ GATE(CLK_SATA, "sata", "div_aclk200", GATE_IP_FSYS, 6, 0, 0),
+ GATE(CLK_USBOTG, "usbotg", "div_aclk200", GATE_IP_FSYS, 7, 0, 0),
+ GATE(CLK_MIPI_HSI, "mipi_hsi", "div_aclk200", GATE_IP_FSYS, 8, 0, 0),
+ GATE(CLK_SDMMC0, "sdmmc0", "div_aclk200", GATE_IP_FSYS, 12, 0, 0),
+ GATE(CLK_SDMMC1, "sdmmc1", "div_aclk200", GATE_IP_FSYS, 13, 0, 0),
+ GATE(CLK_SDMMC2, "sdmmc2", "div_aclk200", GATE_IP_FSYS, 14, 0, 0),
+ GATE(CLK_SDMMC3, "sdmmc3", "div_aclk200", GATE_IP_FSYS, 15, 0, 0),
+ GATE(CLK_SROMC, "sromc", "div_aclk200", GATE_IP_FSYS, 17, 0, 0),
+ GATE(CLK_USB2, "usb2", "div_aclk200", GATE_IP_FSYS, 18, 0, 0),
+ GATE(CLK_USB3, "usb3", "div_aclk200", GATE_IP_FSYS, 19, 0, 0),
+ GATE(CLK_SATA_PHYCTRL, "sata_phyctrl", "div_aclk200",
+ GATE_IP_FSYS, 24, 0, 0),
+ GATE(CLK_SATA_PHYI2C, "sata_phyi2c", "div_aclk200", GATE_IP_FSYS, 25, 0,
+ 0),
+
+ GATE(CLK_UART0, "uart0", "div_aclk66", GATE_IP_PERIC, 0, 0, 0),
+ GATE(CLK_UART1, "uart1", "div_aclk66", GATE_IP_PERIC, 1, 0, 0),
+ GATE(CLK_UART2, "uart2", "div_aclk66", GATE_IP_PERIC, 2, 0, 0),
+ GATE(CLK_UART3, "uart3", "div_aclk66", GATE_IP_PERIC, 3, 0, 0),
+ GATE(CLK_UART4, "uart4", "div_aclk66", GATE_IP_PERIC, 4, 0, 0),
+ GATE(CLK_I2C0, "i2c0", "div_aclk66", GATE_IP_PERIC, 6, 0, 0),
+ GATE(CLK_I2C1, "i2c1", "div_aclk66", GATE_IP_PERIC, 7, 0, 0),
+ GATE(CLK_I2C2, "i2c2", "div_aclk66", GATE_IP_PERIC, 8, 0, 0),
+ GATE(CLK_I2C3, "i2c3", "div_aclk66", GATE_IP_PERIC, 9, 0, 0),
+ GATE(CLK_I2C4, "i2c4", "div_aclk66", GATE_IP_PERIC, 10, 0, 0),
+ GATE(CLK_I2C5, "i2c5", "div_aclk66", GATE_IP_PERIC, 11, 0, 0),
+ GATE(CLK_I2C6, "i2c6", "div_aclk66", GATE_IP_PERIC, 12, 0, 0),
+ GATE(CLK_I2C7, "i2c7", "div_aclk66", GATE_IP_PERIC, 13, 0, 0),
+ GATE(CLK_I2C_HDMI, "i2c_hdmi", "div_aclk66", GATE_IP_PERIC, 14, 0, 0),
+ GATE(CLK_ADC, "adc", "div_aclk66", GATE_IP_PERIC, 15, 0, 0),
+ GATE(CLK_SPI0, "spi0", "div_aclk66", GATE_IP_PERIC, 16, 0, 0),
+ GATE(CLK_SPI1, "spi1", "div_aclk66", GATE_IP_PERIC, 17, 0, 0),
+ GATE(CLK_SPI2, "spi2", "div_aclk66", GATE_IP_PERIC, 18, 0, 0),
+ GATE(CLK_I2S1, "i2s1", "div_aclk66", GATE_IP_PERIC, 20, 0, 0),
+ GATE(CLK_I2S2, "i2s2", "div_aclk66", GATE_IP_PERIC, 21, 0, 0),
+ GATE(CLK_PCM1, "pcm1", "div_aclk66", GATE_IP_PERIC, 22, 0, 0),
+ GATE(CLK_PCM2, "pcm2", "div_aclk66", GATE_IP_PERIC, 23, 0, 0),
+ GATE(CLK_PWM, "pwm", "div_aclk66", GATE_IP_PERIC, 24, 0, 0),
+ GATE(CLK_SPDIF, "spdif", "div_aclk66", GATE_IP_PERIC, 26, 0, 0),
+ GATE(CLK_AC97, "ac97", "div_aclk66", GATE_IP_PERIC, 27, 0, 0),
+ GATE(CLK_HSI2C0, "hsi2c0", "div_aclk66", GATE_IP_PERIC, 28, 0, 0),
+ GATE(CLK_HSI2C1, "hsi2c1", "div_aclk66", GATE_IP_PERIC, 29, 0, 0),
+ GATE(CLK_HSI2C2, "hsi2c2", "div_aclk66", GATE_IP_PERIC, 30, 0, 0),
+ GATE(CLK_HSI2C3, "hsi2c3", "div_aclk66", GATE_IP_PERIC, 31, 0, 0),
+
+ GATE(CLK_CHIPID, "chipid", "div_aclk66", GATE_IP_PERIS, 0, 0, 0),
+ GATE(CLK_SYSREG, "sysreg", "div_aclk66",
+ GATE_IP_PERIS, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PMU, "pmu", "div_aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED,
+ 0),
+ GATE(CLK_CMU_TOP, "cmu_top", "div_aclk66",
+ GATE_IP_PERIS, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_CMU_CORE, "cmu_core", "div_aclk66",
+ GATE_IP_PERIS, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_CMU_MEM, "cmu_mem", "div_aclk66",
+ GATE_IP_PERIS, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TZPC0, "tzpc0", "div_aclk66", GATE_IP_PERIS, 6, 0, 0),
+ GATE(CLK_TZPC1, "tzpc1", "div_aclk66", GATE_IP_PERIS, 7, 0, 0),
+ GATE(CLK_TZPC2, "tzpc2", "div_aclk66", GATE_IP_PERIS, 8, 0, 0),
+ GATE(CLK_TZPC3, "tzpc3", "div_aclk66", GATE_IP_PERIS, 9, 0, 0),
+ GATE(CLK_TZPC4, "tzpc4", "div_aclk66", GATE_IP_PERIS, 10, 0, 0),
+ GATE(CLK_TZPC5, "tzpc5", "div_aclk66", GATE_IP_PERIS, 11, 0, 0),
+ GATE(CLK_TZPC6, "tzpc6", "div_aclk66", GATE_IP_PERIS, 12, 0, 0),
+ GATE(CLK_TZPC7, "tzpc7", "div_aclk66", GATE_IP_PERIS, 13, 0, 0),
+ GATE(CLK_TZPC8, "tzpc8", "div_aclk66", GATE_IP_PERIS, 14, 0, 0),
+ GATE(CLK_TZPC9, "tzpc9", "div_aclk66", GATE_IP_PERIS, 15, 0, 0),
+ GATE(CLK_HDMI_CEC, "hdmi_cec", "div_aclk66", GATE_IP_PERIS, 16, 0, 0),
+ GATE(CLK_MCT, "mct", "div_aclk66", GATE_IP_PERIS, 18, 0, 0),
+ GATE(CLK_WDT, "wdt", "div_aclk66", GATE_IP_PERIS, 19, 0, 0),
+ GATE(CLK_RTC, "rtc", "div_aclk66", GATE_IP_PERIS, 20, 0, 0),
+ GATE(CLK_TMU, "tmu", "div_aclk66", GATE_IP_PERIS, 21, 0, 0),
};
static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = {
@@ -517,20 +599,41 @@ static struct samsung_pll_rate_table epll_24mhz_tbl[] __initdata = {
{ },
};
+static struct samsung_pll_rate_table apll_24mhz_tbl[] __initdata = {
+ /* sorted in descending order */
+ /* PLL_35XX_RATE(rate, m, p, s) */
+ PLL_35XX_RATE(1700000000, 425, 6, 0),
+ PLL_35XX_RATE(1600000000, 200, 3, 0),
+ PLL_35XX_RATE(1500000000, 250, 4, 0),
+ PLL_35XX_RATE(1400000000, 175, 3, 0),
+ PLL_35XX_RATE(1300000000, 325, 6, 0),
+ PLL_35XX_RATE(1200000000, 200, 4, 0),
+ PLL_35XX_RATE(1100000000, 275, 6, 0),
+ PLL_35XX_RATE(1000000000, 125, 3, 0),
+ PLL_35XX_RATE(900000000, 150, 4, 0),
+ PLL_35XX_RATE(800000000, 100, 3, 0),
+ PLL_35XX_RATE(700000000, 175, 3, 1),
+ PLL_35XX_RATE(600000000, 200, 4, 1),
+ PLL_35XX_RATE(500000000, 125, 3, 1),
+ PLL_35XX_RATE(400000000, 100, 3, 1),
+ PLL_35XX_RATE(300000000, 200, 4, 2),
+ PLL_35XX_RATE(200000000, 100, 3, 2),
+};
+
static struct samsung_pll_clock exynos5250_plls[nr_plls] __initdata = {
- [apll] = PLL_A(pll_35xx, fout_apll, "fout_apll", "fin_pll", APLL_LOCK,
- APLL_CON0, "fout_apll", NULL),
- [mpll] = PLL_A(pll_35xx, fout_mpll, "fout_mpll", "fin_pll", MPLL_LOCK,
- MPLL_CON0, "fout_mpll", NULL),
- [bpll] = PLL(pll_35xx, fout_bpll, "fout_bpll", "fin_pll", BPLL_LOCK,
+ [apll] = PLL_A(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
+ APLL_LOCK, APLL_CON0, "fout_apll", NULL),
+ [mpll] = PLL_A(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
+ MPLL_LOCK, MPLL_CON0, "fout_mpll", NULL),
+ [bpll] = PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll", BPLL_LOCK,
BPLL_CON0, NULL),
- [gpll] = PLL(pll_35xx, fout_gpll, "fout_gpll", "fin_pll", GPLL_LOCK,
+ [gpll] = PLL(pll_35xx, CLK_FOUT_GPLL, "fout_gpll", "fin_pll", GPLL_LOCK,
GPLL_CON0, NULL),
- [cpll] = PLL(pll_35xx, fout_cpll, "fout_cpll", "fin_pll", CPLL_LOCK,
+ [cpll] = PLL(pll_35xx, CLK_FOUT_CPLL, "fout_cpll", "fin_pll", CPLL_LOCK,
CPLL_CON0, NULL),
- [epll] = PLL(pll_36xx, fout_epll, "fout_epll", "fin_pll", EPLL_LOCK,
+ [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll", EPLL_LOCK,
EPLL_CON0, NULL),
- [vpll] = PLL(pll_36xx, fout_vpll, "fout_vpll", "mout_vpllsrc",
+ [vpll] = PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "mout_vpllsrc",
VPLL_LOCK, VPLL_CON0, NULL),
};
@@ -552,7 +655,7 @@ static void __init exynos5250_clk_init(struct device_node *np)
panic("%s: unable to determine soc\n", __func__);
}
- samsung_clk_init(np, reg_base, nr_clks,
+ samsung_clk_init(np, reg_base, CLK_NR_CLKS,
exynos5250_clk_regs, ARRAY_SIZE(exynos5250_clk_regs),
NULL, 0);
samsung_clk_of_register_fixed_ext(exynos5250_fixed_rate_ext_clks,
@@ -561,8 +664,10 @@ static void __init exynos5250_clk_init(struct device_node *np)
samsung_clk_register_mux(exynos5250_pll_pmux_clks,
ARRAY_SIZE(exynos5250_pll_pmux_clks));
- if (_get_rate("fin_pll") == 24 * MHZ)
+ if (_get_rate("fin_pll") == 24 * MHZ) {
exynos5250_plls[epll].rate_table = epll_24mhz_tbl;
+ exynos5250_plls[apll].rate_table = apll_24mhz_tbl;
+ }
if (_get_rate("mout_vpllsrc") == 24 * MHZ)
exynos5250_plls[vpll].rate_table = vpll_24mhz_tbl;
@@ -581,6 +686,6 @@ static void __init exynos5250_clk_init(struct device_node *np)
ARRAY_SIZE(exynos5250_gate_clks));
pr_info("Exynos5250: clock setup completed, armclk=%ld\n",
- _get_rate("armclk"));
+ _get_rate("div_arm2"));
}
CLK_OF_DECLARE(exynos5250_clk, "samsung,exynos5250-clock", exynos5250_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 48c4a9350b91..ab4f2f7d88ef 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -10,6 +10,7 @@
* Common Clock Framework support for Exynos5420 SoC.
*/
+#include <dt-bindings/clock/exynos5420.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
@@ -107,48 +108,6 @@ enum exynos5420_plls {
nr_plls /* number of PLLs */
};
-enum exynos5420_clks {
- none,
-
- /* core clocks */
- fin_pll, fout_apll, fout_cpll, fout_dpll, fout_epll, fout_rpll,
- fout_ipll, fout_spll, fout_vpll, fout_mpll, fout_bpll, fout_kpll,
-
- /* gate for special clocks (sclk) */
- sclk_uart0 = 128, sclk_uart1, sclk_uart2, sclk_uart3, sclk_mmc0,
- sclk_mmc1, sclk_mmc2, sclk_spi0, sclk_spi1, sclk_spi2, sclk_i2s1,
- sclk_i2s2, sclk_pcm1, sclk_pcm2, sclk_spdif, sclk_hdmi, sclk_pixel,
- sclk_dp1, sclk_mipi1, sclk_fimd1, sclk_maudio0, sclk_maupcm0,
- sclk_usbd300, sclk_usbd301, sclk_usbphy300, sclk_usbphy301, sclk_unipro,
- sclk_pwm, sclk_gscl_wa, sclk_gscl_wb, sclk_hdmiphy,
-
- /* gate clocks */
- aclk66_peric = 256, uart0, uart1, uart2, uart3, i2c0, i2c1, i2c2, i2c3,
- i2c4, i2c5, i2c6, i2c7, i2c_hdmi, tsadc, spi0, spi1, spi2, keyif, i2s1,
- i2s2, pcm1, pcm2, pwm, spdif, i2c8, i2c9, i2c10, aclk66_psgen = 300,
- chipid, sysreg, tzpc0, tzpc1, tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7,
- tzpc8, tzpc9, hdmi_cec, seckey, mct, wdt, rtc, tmu, tmu_gpu,
- pclk66_gpio = 330, aclk200_fsys2 = 350, mmc0, mmc1, mmc2, sromc, ufs,
- aclk200_fsys = 360, tsi, pdma0, pdma1, rtic, usbh20, usbd300, usbd301,
- aclk400_mscl = 380, mscl0, mscl1, mscl2, smmu_mscl0, smmu_mscl1,
- smmu_mscl2, aclk333 = 400, mfc, smmu_mfcl, smmu_mfcr,
- aclk200_disp1 = 410, dsim1, dp1, hdmi, aclk300_disp1 = 420, fimd1,
- smmu_fimd1, aclk166 = 430, mixer, aclk266 = 440, rotator, mdma1,
- smmu_rotator, smmu_mdma1, aclk300_jpeg = 450, jpeg, jpeg2, smmu_jpeg,
- aclk300_gscl = 460, smmu_gscl0, smmu_gscl1, gscl_wa, gscl_wb, gscl0,
- gscl1, clk_3aa, aclk266_g2d = 470, sss, slim_sss, mdma0,
- aclk333_g2d = 480, g2d, aclk333_432_gscl = 490, smmu_3aa, smmu_fimcl0,
- smmu_fimcl1, smmu_fimcl3, fimc_lite3, aclk_g3d = 500, g3d, smmu_mixer,
-
- /* mux clocks */
- mout_hdmi = 640,
-
- /* divider clocks */
- dout_pixel = 768,
-
- nr_clks,
-};
-
/*
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
@@ -298,225 +257,226 @@ PNAME(maudio0_p) = { "fin_pll", "maudio_clk", "sclk_dpll", "sclk_mpll",
/* fixed rate clocks generated outside the soc */
static struct samsung_fixed_rate_clock exynos5420_fixed_rate_ext_clks[] __initdata = {
- FRATE(fin_pll, "fin_pll", NULL, CLK_IS_ROOT, 0),
+ FRATE(CLK_FIN_PLL, "fin_pll", NULL, CLK_IS_ROOT, 0),
};
/* fixed rate clocks generated inside the soc */
static struct samsung_fixed_rate_clock exynos5420_fixed_rate_clks[] __initdata = {
- FRATE(sclk_hdmiphy, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
- FRATE(none, "sclk_pwi", NULL, CLK_IS_ROOT, 24000000),
- FRATE(none, "sclk_usbh20", NULL, CLK_IS_ROOT, 48000000),
- FRATE(none, "mphy_refclk_ixtal24", NULL, CLK_IS_ROOT, 48000000),
- FRATE(none, "sclk_usbh20_scan_clk", NULL, CLK_IS_ROOT, 480000000),
+ FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
+ FRATE(0, "sclk_pwi", NULL, CLK_IS_ROOT, 24000000),
+ FRATE(0, "sclk_usbh20", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(0, "mphy_refclk_ixtal24", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(0, "sclk_usbh20_scan_clk", NULL, CLK_IS_ROOT, 480000000),
};
static struct samsung_fixed_factor_clock exynos5420_fixed_factor_clks[] __initdata = {
- FFACTOR(none, "sclk_hsic_12m", "fin_pll", 1, 2, 0),
+ FFACTOR(0, "sclk_hsic_12m", "fin_pll", 1, 2, 0),
};
static struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
- MUX(none, "mout_mspll_kfc", mspll_cpu_p, SRC_TOP7, 8, 2),
- MUX(none, "mout_mspll_cpu", mspll_cpu_p, SRC_TOP7, 12, 2),
- MUX(none, "mout_apll", apll_p, SRC_CPU, 0, 1),
- MUX(none, "mout_cpu", cpu_p, SRC_CPU, 16, 1),
- MUX(none, "mout_kpll", kpll_p, SRC_KFC, 0, 1),
- MUX(none, "mout_cpu_kfc", kfc_p, SRC_KFC, 16, 1),
+ MUX(0, "mout_mspll_kfc", mspll_cpu_p, SRC_TOP7, 8, 2),
+ MUX(0, "mout_mspll_cpu", mspll_cpu_p, SRC_TOP7, 12, 2),
+ MUX(0, "mout_apll", apll_p, SRC_CPU, 0, 1),
+ MUX(0, "mout_cpu", cpu_p, SRC_CPU, 16, 1),
+ MUX(0, "mout_kpll", kpll_p, SRC_KFC, 0, 1),
+ MUX(0, "mout_cpu_kfc", kfc_p, SRC_KFC, 16, 1),
- MUX(none, "sclk_bpll", bpll_p, SRC_CDREX, 0, 1),
+ MUX(0, "sclk_bpll", bpll_p, SRC_CDREX, 0, 1),
- MUX_A(none, "mout_aclk400_mscl", group1_p,
+ MUX_A(0, "mout_aclk400_mscl", group1_p,
SRC_TOP0, 4, 2, "aclk400_mscl"),
- MUX(none, "mout_aclk200", group1_p, SRC_TOP0, 8, 2),
- MUX(none, "mout_aclk200_fsys2", group1_p, SRC_TOP0, 12, 2),
- MUX(none, "mout_aclk200_fsys", group1_p, SRC_TOP0, 28, 2),
-
- MUX(none, "mout_aclk333_432_gscl", group4_p, SRC_TOP1, 0, 2),
- MUX(none, "mout_aclk66", group1_p, SRC_TOP1, 8, 2),
- MUX(none, "mout_aclk266", group1_p, SRC_TOP1, 20, 2),
- MUX(none, "mout_aclk166", group1_p, SRC_TOP1, 24, 2),
- MUX(none, "mout_aclk333", group1_p, SRC_TOP1, 28, 2),
-
- MUX(none, "mout_aclk333_g2d", group1_p, SRC_TOP2, 8, 2),
- MUX(none, "mout_aclk266_g2d", group1_p, SRC_TOP2, 12, 2),
- MUX(none, "mout_aclk_g3d", group5_p, SRC_TOP2, 16, 1),
- MUX(none, "mout_aclk300_jpeg", group1_p, SRC_TOP2, 20, 2),
- MUX(none, "mout_aclk300_disp1", group1_p, SRC_TOP2, 24, 2),
- MUX(none, "mout_aclk300_gscl", group1_p, SRC_TOP2, 28, 2),
-
- MUX(none, "mout_user_aclk400_mscl", user_aclk400_mscl_p,
+ MUX(0, "mout_aclk200", group1_p, SRC_TOP0, 8, 2),
+ MUX(0, "mout_aclk200_fsys2", group1_p, SRC_TOP0, 12, 2),
+ MUX(0, "mout_aclk200_fsys", group1_p, SRC_TOP0, 28, 2),
+
+ MUX(0, "mout_aclk333_432_gscl", group4_p, SRC_TOP1, 0, 2),
+ MUX(0, "mout_aclk66", group1_p, SRC_TOP1, 8, 2),
+ MUX(0, "mout_aclk266", group1_p, SRC_TOP1, 20, 2),
+ MUX(0, "mout_aclk166", group1_p, SRC_TOP1, 24, 2),
+ MUX(0, "mout_aclk333", group1_p, SRC_TOP1, 28, 2),
+
+ MUX(0, "mout_aclk333_g2d", group1_p, SRC_TOP2, 8, 2),
+ MUX(0, "mout_aclk266_g2d", group1_p, SRC_TOP2, 12, 2),
+ MUX(0, "mout_aclk_g3d", group5_p, SRC_TOP2, 16, 1),
+ MUX(0, "mout_aclk300_jpeg", group1_p, SRC_TOP2, 20, 2),
+ MUX(0, "mout_aclk300_disp1", group1_p, SRC_TOP2, 24, 2),
+ MUX(0, "mout_aclk300_gscl", group1_p, SRC_TOP2, 28, 2),
+
+ MUX(0, "mout_user_aclk400_mscl", user_aclk400_mscl_p,
SRC_TOP3, 4, 1),
- MUX_A(none, "mout_aclk200_disp1", aclk200_disp1_p,
+ MUX_A(0, "mout_aclk200_disp1", aclk200_disp1_p,
SRC_TOP3, 8, 1, "aclk200_disp1"),
- MUX(none, "mout_user_aclk200_fsys2", user_aclk200_fsys2_p,
+ MUX(0, "mout_user_aclk200_fsys2", user_aclk200_fsys2_p,
SRC_TOP3, 12, 1),
- MUX(none, "mout_user_aclk200_fsys", user_aclk200_fsys_p,
+ MUX(0, "mout_user_aclk200_fsys", user_aclk200_fsys_p,
SRC_TOP3, 28, 1),
- MUX(none, "mout_user_aclk333_432_gscl", user_aclk333_432_gscl_p,
+ MUX(0, "mout_user_aclk333_432_gscl", user_aclk333_432_gscl_p,
SRC_TOP4, 0, 1),
- MUX(none, "mout_aclk66_peric", aclk66_peric_p, SRC_TOP4, 8, 1),
- MUX(none, "mout_user_aclk266", user_aclk266_p, SRC_TOP4, 20, 1),
- MUX(none, "mout_user_aclk166", user_aclk166_p, SRC_TOP4, 24, 1),
- MUX(none, "mout_user_aclk333", user_aclk333_p, SRC_TOP4, 28, 1),
-
- MUX(none, "mout_aclk66_psgen", aclk66_peric_p, SRC_TOP5, 4, 1),
- MUX(none, "mout_user_aclk333_g2d", user_aclk333_g2d_p, SRC_TOP5, 8, 1),
- MUX(none, "mout_user_aclk266_g2d", user_aclk266_g2d_p, SRC_TOP5, 12, 1),
- MUX_A(none, "mout_user_aclk_g3d", user_aclk_g3d_p,
+ MUX(0, "mout_aclk66_peric", aclk66_peric_p, SRC_TOP4, 8, 1),
+ MUX(0, "mout_user_aclk266", user_aclk266_p, SRC_TOP4, 20, 1),
+ MUX(0, "mout_user_aclk166", user_aclk166_p, SRC_TOP4, 24, 1),
+ MUX(0, "mout_user_aclk333", user_aclk333_p, SRC_TOP4, 28, 1),
+
+ MUX(0, "mout_aclk66_psgen", aclk66_peric_p, SRC_TOP5, 4, 1),
+ MUX(0, "mout_user_aclk333_g2d", user_aclk333_g2d_p, SRC_TOP5, 8, 1),
+ MUX(0, "mout_user_aclk266_g2d", user_aclk266_g2d_p, SRC_TOP5, 12, 1),
+ MUX_A(0, "mout_user_aclk_g3d", user_aclk_g3d_p,
SRC_TOP5, 16, 1, "aclkg3d"),
- MUX(none, "mout_user_aclk300_jpeg", user_aclk300_jpeg_p,
+ MUX(0, "mout_user_aclk300_jpeg", user_aclk300_jpeg_p,
SRC_TOP5, 20, 1),
- MUX(none, "mout_user_aclk300_disp1", user_aclk300_disp1_p,
+ MUX(0, "mout_user_aclk300_disp1", user_aclk300_disp1_p,
SRC_TOP5, 24, 1),
- MUX(none, "mout_user_aclk300_gscl", user_aclk300_gscl_p,
+ MUX(0, "mout_user_aclk300_gscl", user_aclk300_gscl_p,
SRC_TOP5, 28, 1),
- MUX(none, "sclk_mpll", mpll_p, SRC_TOP6, 0, 1),
- MUX(none, "sclk_vpll", vpll_p, SRC_TOP6, 4, 1),
- MUX(none, "sclk_spll", spll_p, SRC_TOP6, 8, 1),
- MUX(none, "sclk_ipll", ipll_p, SRC_TOP6, 12, 1),
- MUX(none, "sclk_rpll", rpll_p, SRC_TOP6, 16, 1),
- MUX(none, "sclk_epll", epll_p, SRC_TOP6, 20, 1),
- MUX(none, "sclk_dpll", dpll_p, SRC_TOP6, 24, 1),
- MUX(none, "sclk_cpll", cpll_p, SRC_TOP6, 28, 1),
-
- MUX(none, "mout_sw_aclk400_mscl", sw_aclk400_mscl_p, SRC_TOP10, 4, 1),
- MUX(none, "mout_sw_aclk200", sw_aclk200_p, SRC_TOP10, 8, 1),
- MUX(none, "mout_sw_aclk200_fsys2", sw_aclk200_fsys2_p,
+ MUX(0, "sclk_mpll", mpll_p, SRC_TOP6, 0, 1),
+ MUX(0, "sclk_vpll", vpll_p, SRC_TOP6, 4, 1),
+ MUX(0, "sclk_spll", spll_p, SRC_TOP6, 8, 1),
+ MUX(0, "sclk_ipll", ipll_p, SRC_TOP6, 12, 1),
+ MUX(0, "sclk_rpll", rpll_p, SRC_TOP6, 16, 1),
+ MUX(0, "sclk_epll", epll_p, SRC_TOP6, 20, 1),
+ MUX(0, "sclk_dpll", dpll_p, SRC_TOP6, 24, 1),
+ MUX(0, "sclk_cpll", cpll_p, SRC_TOP6, 28, 1),
+
+ MUX(0, "mout_sw_aclk400_mscl", sw_aclk400_mscl_p, SRC_TOP10, 4, 1),
+ MUX(0, "mout_sw_aclk200", sw_aclk200_p, SRC_TOP10, 8, 1),
+ MUX(0, "mout_sw_aclk200_fsys2", sw_aclk200_fsys2_p,
SRC_TOP10, 12, 1),
- MUX(none, "mout_sw_aclk200_fsys", sw_aclk200_fsys_p, SRC_TOP10, 28, 1),
+ MUX(0, "mout_sw_aclk200_fsys", sw_aclk200_fsys_p, SRC_TOP10, 28, 1),
- MUX(none, "mout_sw_aclk333_432_gscl", sw_aclk333_432_gscl_p,
+ MUX(0, "mout_sw_aclk333_432_gscl", sw_aclk333_432_gscl_p,
SRC_TOP11, 0, 1),
- MUX(none, "mout_sw_aclk66", sw_aclk66_p, SRC_TOP11, 8, 1),
- MUX(none, "mout_sw_aclk266", sw_aclk266_p, SRC_TOP11, 20, 1),
- MUX(none, "mout_sw_aclk166", sw_aclk166_p, SRC_TOP11, 24, 1),
- MUX(none, "mout_sw_aclk333", sw_aclk333_p, SRC_TOP11, 28, 1),
-
- MUX(none, "mout_sw_aclk333_g2d", sw_aclk333_g2d_p, SRC_TOP12, 8, 1),
- MUX(none, "mout_sw_aclk266_g2d", sw_aclk266_g2d_p, SRC_TOP12, 12, 1),
- MUX(none, "mout_sw_aclk_g3d", sw_aclk_g3d_p, SRC_TOP12, 16, 1),
- MUX(none, "mout_sw_aclk300_jpeg", sw_aclk300_jpeg_p, SRC_TOP12, 20, 1),
- MUX(none, "mout_sw_aclk300_disp1", sw_aclk300_disp1_p,
+ MUX(0, "mout_sw_aclk66", sw_aclk66_p, SRC_TOP11, 8, 1),
+ MUX(0, "mout_sw_aclk266", sw_aclk266_p, SRC_TOP11, 20, 1),
+ MUX(0, "mout_sw_aclk166", sw_aclk166_p, SRC_TOP11, 24, 1),
+ MUX(0, "mout_sw_aclk333", sw_aclk333_p, SRC_TOP11, 28, 1),
+
+ MUX(0, "mout_sw_aclk333_g2d", sw_aclk333_g2d_p, SRC_TOP12, 8, 1),
+ MUX(0, "mout_sw_aclk266_g2d", sw_aclk266_g2d_p, SRC_TOP12, 12, 1),
+ MUX(0, "mout_sw_aclk_g3d", sw_aclk_g3d_p, SRC_TOP12, 16, 1),
+ MUX(0, "mout_sw_aclk300_jpeg", sw_aclk300_jpeg_p, SRC_TOP12, 20, 1),
+ MUX(0, "mout_sw_aclk300_disp1", sw_aclk300_disp1_p,
SRC_TOP12, 24, 1),
- MUX(none, "mout_sw_aclk300_gscl", sw_aclk300_gscl_p, SRC_TOP12, 28, 1),
+ MUX(0, "mout_sw_aclk300_gscl", sw_aclk300_gscl_p, SRC_TOP12, 28, 1),
/* DISP1 Block */
- MUX(none, "mout_fimd1", group3_p, SRC_DISP10, 4, 1),
- MUX(none, "mout_mipi1", group2_p, SRC_DISP10, 16, 3),
- MUX(none, "mout_dp1", group2_p, SRC_DISP10, 20, 3),
- MUX(none, "mout_pixel", group2_p, SRC_DISP10, 24, 3),
- MUX(mout_hdmi, "mout_hdmi", hdmi_p, SRC_DISP10, 28, 1),
+ MUX(0, "mout_fimd1", group3_p, SRC_DISP10, 4, 1),
+ MUX(0, "mout_mipi1", group2_p, SRC_DISP10, 16, 3),
+ MUX(0, "mout_dp1", group2_p, SRC_DISP10, 20, 3),
+ MUX(0, "mout_pixel", group2_p, SRC_DISP10, 24, 3),
+ MUX(CLK_MOUT_HDMI, "mout_hdmi", hdmi_p, SRC_DISP10, 28, 1),
/* MAU Block */
- MUX(none, "mout_maudio0", maudio0_p, SRC_MAU, 28, 3),
+ MUX(0, "mout_maudio0", maudio0_p, SRC_MAU, 28, 3),
/* FSYS Block */
- MUX(none, "mout_usbd301", group2_p, SRC_FSYS, 4, 3),
- MUX(none, "mout_mmc0", group2_p, SRC_FSYS, 8, 3),
- MUX(none, "mout_mmc1", group2_p, SRC_FSYS, 12, 3),
- MUX(none, "mout_mmc2", group2_p, SRC_FSYS, 16, 3),
- MUX(none, "mout_usbd300", group2_p, SRC_FSYS, 20, 3),
- MUX(none, "mout_unipro", group2_p, SRC_FSYS, 24, 3),
+ MUX(0, "mout_usbd301", group2_p, SRC_FSYS, 4, 3),
+ MUX(0, "mout_mmc0", group2_p, SRC_FSYS, 8, 3),
+ MUX(0, "mout_mmc1", group2_p, SRC_FSYS, 12, 3),
+ MUX(0, "mout_mmc2", group2_p, SRC_FSYS, 16, 3),
+ MUX(0, "mout_usbd300", group2_p, SRC_FSYS, 20, 3),
+ MUX(0, "mout_unipro", group2_p, SRC_FSYS, 24, 3),
/* PERIC Block */
- MUX(none, "mout_uart0", group2_p, SRC_PERIC0, 4, 3),
- MUX(none, "mout_uart1", group2_p, SRC_PERIC0, 8, 3),
- MUX(none, "mout_uart2", group2_p, SRC_PERIC0, 12, 3),
- MUX(none, "mout_uart3", group2_p, SRC_PERIC0, 16, 3),
- MUX(none, "mout_pwm", group2_p, SRC_PERIC0, 24, 3),
- MUX(none, "mout_spdif", spdif_p, SRC_PERIC0, 28, 3),
- MUX(none, "mout_audio0", audio0_p, SRC_PERIC1, 8, 3),
- MUX(none, "mout_audio1", audio1_p, SRC_PERIC1, 12, 3),
- MUX(none, "mout_audio2", audio2_p, SRC_PERIC1, 16, 3),
- MUX(none, "mout_spi0", group2_p, SRC_PERIC1, 20, 3),
- MUX(none, "mout_spi1", group2_p, SRC_PERIC1, 24, 3),
- MUX(none, "mout_spi2", group2_p, SRC_PERIC1, 28, 3),
+ MUX(0, "mout_uart0", group2_p, SRC_PERIC0, 4, 3),
+ MUX(0, "mout_uart1", group2_p, SRC_PERIC0, 8, 3),
+ MUX(0, "mout_uart2", group2_p, SRC_PERIC0, 12, 3),
+ MUX(0, "mout_uart3", group2_p, SRC_PERIC0, 16, 3),
+ MUX(0, "mout_pwm", group2_p, SRC_PERIC0, 24, 3),
+ MUX(0, "mout_spdif", spdif_p, SRC_PERIC0, 28, 3),
+ MUX(0, "mout_audio0", audio0_p, SRC_PERIC1, 8, 3),
+ MUX(0, "mout_audio1", audio1_p, SRC_PERIC1, 12, 3),
+ MUX(0, "mout_audio2", audio2_p, SRC_PERIC1, 16, 3),
+ MUX(0, "mout_spi0", group2_p, SRC_PERIC1, 20, 3),
+ MUX(0, "mout_spi1", group2_p, SRC_PERIC1, 24, 3),
+ MUX(0, "mout_spi2", group2_p, SRC_PERIC1, 28, 3),
};
static struct samsung_div_clock exynos5420_div_clks[] __initdata = {
- DIV(none, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
- DIV(none, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
- DIV(none, "armclk2", "div_arm", DIV_CPU0, 28, 3),
- DIV(none, "div_kfc", "mout_cpu_kfc", DIV_KFC0, 0, 3),
- DIV(none, "sclk_kpll", "mout_kpll", DIV_KFC0, 24, 3),
-
- DIV(none, "dout_aclk400_mscl", "mout_aclk400_mscl", DIV_TOP0, 4, 3),
- DIV(none, "dout_aclk200", "mout_aclk200", DIV_TOP0, 8, 3),
- DIV(none, "dout_aclk200_fsys2", "mout_aclk200_fsys2", DIV_TOP0, 12, 3),
- DIV(none, "dout_pclk200_fsys", "mout_pclk200_fsys", DIV_TOP0, 24, 3),
- DIV(none, "dout_aclk200_fsys", "mout_aclk200_fsys", DIV_TOP0, 28, 3),
-
- DIV(none, "dout_aclk333_432_gscl", "mout_aclk333_432_gscl",
+ DIV(0, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
+ DIV(0, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
+ DIV(0, "armclk2", "div_arm", DIV_CPU0, 28, 3),
+ DIV(0, "div_kfc", "mout_cpu_kfc", DIV_KFC0, 0, 3),
+ DIV(0, "sclk_kpll", "mout_kpll", DIV_KFC0, 24, 3),
+
+ DIV(0, "dout_aclk400_mscl", "mout_aclk400_mscl", DIV_TOP0, 4, 3),
+ DIV(0, "dout_aclk200", "mout_aclk200", DIV_TOP0, 8, 3),
+ DIV(0, "dout_aclk200_fsys2", "mout_aclk200_fsys2", DIV_TOP0, 12, 3),
+ DIV(0, "dout_pclk200_fsys", "mout_pclk200_fsys", DIV_TOP0, 24, 3),
+ DIV(0, "dout_aclk200_fsys", "mout_aclk200_fsys", DIV_TOP0, 28, 3),
+
+ DIV(0, "dout_aclk333_432_gscl", "mout_aclk333_432_gscl",
DIV_TOP1, 0, 3),
- DIV(none, "dout_aclk66", "mout_aclk66", DIV_TOP1, 8, 6),
- DIV(none, "dout_aclk266", "mout_aclk266", DIV_TOP1, 20, 3),
- DIV(none, "dout_aclk166", "mout_aclk166", DIV_TOP1, 24, 3),
- DIV(none, "dout_aclk333", "mout_aclk333", DIV_TOP1, 28, 3),
-
- DIV(none, "dout_aclk333_g2d", "mout_aclk333_g2d", DIV_TOP2, 8, 3),
- DIV(none, "dout_aclk266_g2d", "mout_aclk266_g2d", DIV_TOP2, 12, 3),
- DIV(none, "dout_aclk_g3d", "mout_aclk_g3d", DIV_TOP2, 16, 3),
- DIV(none, "dout_aclk300_jpeg", "mout_aclk300_jpeg", DIV_TOP2, 20, 3),
- DIV_A(none, "dout_aclk300_disp1", "mout_aclk300_disp1",
+ DIV(0, "dout_aclk66", "mout_aclk66", DIV_TOP1, 8, 6),
+ DIV(0, "dout_aclk266", "mout_aclk266", DIV_TOP1, 20, 3),
+ DIV(0, "dout_aclk166", "mout_aclk166", DIV_TOP1, 24, 3),
+ DIV(0, "dout_aclk333", "mout_aclk333", DIV_TOP1, 28, 3),
+
+ DIV(0, "dout_aclk333_g2d", "mout_aclk333_g2d", DIV_TOP2, 8, 3),
+ DIV(0, "dout_aclk266_g2d", "mout_aclk266_g2d", DIV_TOP2, 12, 3),
+ DIV(0, "dout_aclk_g3d", "mout_aclk_g3d", DIV_TOP2, 16, 3),
+ DIV(0, "dout_aclk300_jpeg", "mout_aclk300_jpeg", DIV_TOP2, 20, 3),
+ DIV_A(0, "dout_aclk300_disp1", "mout_aclk300_disp1",
DIV_TOP2, 24, 3, "aclk300_disp1"),
- DIV(none, "dout_aclk300_gscl", "mout_aclk300_gscl", DIV_TOP2, 28, 3),
+ DIV(0, "dout_aclk300_gscl", "mout_aclk300_gscl", DIV_TOP2, 28, 3),
/* DISP1 Block */
- DIV(none, "dout_fimd1", "mout_fimd1", DIV_DISP10, 0, 4),
- DIV(none, "dout_mipi1", "mout_mipi1", DIV_DISP10, 16, 8),
- DIV(none, "dout_dp1", "mout_dp1", DIV_DISP10, 24, 4),
- DIV(dout_pixel, "dout_hdmi_pixel", "mout_pixel", DIV_DISP10, 28, 4),
+ DIV(0, "dout_fimd1", "mout_fimd1", DIV_DISP10, 0, 4),
+ DIV(0, "dout_mipi1", "mout_mipi1", DIV_DISP10, 16, 8),
+ DIV(0, "dout_dp1", "mout_dp1", DIV_DISP10, 24, 4),
+ DIV(CLK_DOUT_PIXEL, "dout_hdmi_pixel", "mout_pixel", DIV_DISP10, 28, 4),
/* Audio Block */
- DIV(none, "dout_maudio0", "mout_maudio0", DIV_MAU, 20, 4),
- DIV(none, "dout_maupcm0", "dout_maudio0", DIV_MAU, 24, 8),
+ DIV(0, "dout_maudio0", "mout_maudio0", DIV_MAU, 20, 4),
+ DIV(0, "dout_maupcm0", "dout_maudio0", DIV_MAU, 24, 8),
/* USB3.0 */
- DIV(none, "dout_usbphy301", "mout_usbd301", DIV_FSYS0, 12, 4),
- DIV(none, "dout_usbphy300", "mout_usbd300", DIV_FSYS0, 16, 4),
- DIV(none, "dout_usbd301", "mout_usbd301", DIV_FSYS0, 20, 4),
- DIV(none, "dout_usbd300", "mout_usbd300", DIV_FSYS0, 24, 4),
+ DIV(0, "dout_usbphy301", "mout_usbd301", DIV_FSYS0, 12, 4),
+ DIV(0, "dout_usbphy300", "mout_usbd300", DIV_FSYS0, 16, 4),
+ DIV(0, "dout_usbd301", "mout_usbd301", DIV_FSYS0, 20, 4),
+ DIV(0, "dout_usbd300", "mout_usbd300", DIV_FSYS0, 24, 4),
/* MMC */
- DIV(none, "dout_mmc0", "mout_mmc0", DIV_FSYS1, 0, 10),
- DIV(none, "dout_mmc1", "mout_mmc1", DIV_FSYS1, 10, 10),
- DIV(none, "dout_mmc2", "mout_mmc2", DIV_FSYS1, 20, 10),
+ DIV(0, "dout_mmc0", "mout_mmc0", DIV_FSYS1, 0, 10),
+ DIV(0, "dout_mmc1", "mout_mmc1", DIV_FSYS1, 10, 10),
+ DIV(0, "dout_mmc2", "mout_mmc2", DIV_FSYS1, 20, 10),
- DIV(none, "dout_unipro", "mout_unipro", DIV_FSYS2, 24, 8),
+ DIV(0, "dout_unipro", "mout_unipro", DIV_FSYS2, 24, 8),
/* UART and PWM */
- DIV(none, "dout_uart0", "mout_uart0", DIV_PERIC0, 8, 4),
- DIV(none, "dout_uart1", "mout_uart1", DIV_PERIC0, 12, 4),
- DIV(none, "dout_uart2", "mout_uart2", DIV_PERIC0, 16, 4),
- DIV(none, "dout_uart3", "mout_uart3", DIV_PERIC0, 20, 4),
- DIV(none, "dout_pwm", "mout_pwm", DIV_PERIC0, 28, 4),
+ DIV(0, "dout_uart0", "mout_uart0", DIV_PERIC0, 8, 4),
+ DIV(0, "dout_uart1", "mout_uart1", DIV_PERIC0, 12, 4),
+ DIV(0, "dout_uart2", "mout_uart2", DIV_PERIC0, 16, 4),
+ DIV(0, "dout_uart3", "mout_uart3", DIV_PERIC0, 20, 4),
+ DIV(0, "dout_pwm", "mout_pwm", DIV_PERIC0, 28, 4),
/* SPI */
- DIV(none, "dout_spi0", "mout_spi0", DIV_PERIC1, 20, 4),
- DIV(none, "dout_spi1", "mout_spi1", DIV_PERIC1, 24, 4),
- DIV(none, "dout_spi2", "mout_spi2", DIV_PERIC1, 28, 4),
+ DIV(0, "dout_spi0", "mout_spi0", DIV_PERIC1, 20, 4),
+ DIV(0, "dout_spi1", "mout_spi1", DIV_PERIC1, 24, 4),
+ DIV(0, "dout_spi2", "mout_spi2", DIV_PERIC1, 28, 4),
/* PCM */
- DIV(none, "dout_pcm1", "dout_audio1", DIV_PERIC2, 16, 8),
- DIV(none, "dout_pcm2", "dout_audio2", DIV_PERIC2, 24, 8),
+ DIV(0, "dout_pcm1", "dout_audio1", DIV_PERIC2, 16, 8),
+ DIV(0, "dout_pcm2", "dout_audio2", DIV_PERIC2, 24, 8),
/* Audio - I2S */
- DIV(none, "dout_i2s1", "dout_audio1", DIV_PERIC3, 6, 6),
- DIV(none, "dout_i2s2", "dout_audio2", DIV_PERIC3, 12, 6),
- DIV(none, "dout_audio0", "mout_audio0", DIV_PERIC3, 20, 4),
- DIV(none, "dout_audio1", "mout_audio1", DIV_PERIC3, 24, 4),
- DIV(none, "dout_audio2", "mout_audio2", DIV_PERIC3, 28, 4),
+ DIV(0, "dout_i2s1", "dout_audio1", DIV_PERIC3, 6, 6),
+ DIV(0, "dout_i2s2", "dout_audio2", DIV_PERIC3, 12, 6),
+ DIV(0, "dout_audio0", "mout_audio0", DIV_PERIC3, 20, 4),
+ DIV(0, "dout_audio1", "mout_audio1", DIV_PERIC3, 24, 4),
+ DIV(0, "dout_audio2", "mout_audio2", DIV_PERIC3, 28, 4),
/* SPI Pre-Ratio */
- DIV(none, "dout_pre_spi0", "dout_spi0", DIV_PERIC4, 8, 8),
- DIV(none, "dout_pre_spi1", "dout_spi1", DIV_PERIC4, 16, 8),
- DIV(none, "dout_pre_spi2", "dout_spi2", DIV_PERIC4, 24, 8),
+ DIV(0, "dout_pre_spi0", "dout_spi0", DIV_PERIC4, 8, 8),
+ DIV(0, "dout_pre_spi1", "dout_spi1", DIV_PERIC4, 16, 8),
+ DIV(0, "dout_pre_spi2", "dout_spi2", DIV_PERIC4, 24, 8),
};
static struct samsung_gate_clock exynos5420_gate_clks[] __initdata = {
/* TODO: Re-verify the CG bits for all the gate clocks */
- GATE_A(mct, "pclk_st", "aclk66_psgen", GATE_BUS_PERIS1, 2, 0, 0, "mct"),
+ GATE_A(CLK_MCT, "pclk_st", "aclk66_psgen", GATE_BUS_PERIS1, 2, 0, 0,
+ "mct"),
GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys",
GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0),
@@ -545,217 +505,227 @@ static struct samsung_gate_clock exynos5420_gate_clks[] __initdata = {
GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0),
/* sclk */
- GATE(sclk_uart0, "sclk_uart0", "dout_uart0",
+ GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_uart0",
GATE_TOP_SCLK_PERIC, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart1, "sclk_uart1", "dout_uart1",
+ GATE(CLK_SCLK_UART1, "sclk_uart1", "dout_uart1",
GATE_TOP_SCLK_PERIC, 1, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart2, "sclk_uart2", "dout_uart2",
+ GATE(CLK_SCLK_UART2, "sclk_uart2", "dout_uart2",
GATE_TOP_SCLK_PERIC, 2, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart3, "sclk_uart3", "dout_uart3",
+ GATE(CLK_SCLK_UART3, "sclk_uart3", "dout_uart3",
GATE_TOP_SCLK_PERIC, 3, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi0, "sclk_spi0", "dout_pre_spi0",
+ GATE(CLK_SCLK_SPI0, "sclk_spi0", "dout_pre_spi0",
GATE_TOP_SCLK_PERIC, 6, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi1, "sclk_spi1", "dout_pre_spi1",
+ GATE(CLK_SCLK_SPI1, "sclk_spi1", "dout_pre_spi1",
GATE_TOP_SCLK_PERIC, 7, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi2, "sclk_spi2", "dout_pre_spi2",
+ GATE(CLK_SCLK_SPI2, "sclk_spi2", "dout_pre_spi2",
GATE_TOP_SCLK_PERIC, 8, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spdif, "sclk_spdif", "mout_spdif",
+ GATE(CLK_SCLK_SPDIF, "sclk_spdif", "mout_spdif",
GATE_TOP_SCLK_PERIC, 9, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_pwm, "sclk_pwm", "dout_pwm",
+ GATE(CLK_SCLK_PWM, "sclk_pwm", "dout_pwm",
GATE_TOP_SCLK_PERIC, 11, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_pcm1, "sclk_pcm1", "dout_pcm1",
+ GATE(CLK_SCLK_PCM1, "sclk_pcm1", "dout_pcm1",
GATE_TOP_SCLK_PERIC, 15, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_pcm2, "sclk_pcm2", "dout_pcm2",
+ GATE(CLK_SCLK_PCM2, "sclk_pcm2", "dout_pcm2",
GATE_TOP_SCLK_PERIC, 16, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_i2s1, "sclk_i2s1", "dout_i2s1",
+ GATE(CLK_SCLK_I2S1, "sclk_i2s1", "dout_i2s1",
GATE_TOP_SCLK_PERIC, 17, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_i2s2, "sclk_i2s2", "dout_i2s2",
+ GATE(CLK_SCLK_I2S2, "sclk_i2s2", "dout_i2s2",
GATE_TOP_SCLK_PERIC, 18, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc0, "sclk_mmc0", "dout_mmc0",
+ GATE(CLK_SCLK_MMC0, "sclk_mmc0", "dout_mmc0",
GATE_TOP_SCLK_FSYS, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc1, "sclk_mmc1", "dout_mmc1",
+ GATE(CLK_SCLK_MMC1, "sclk_mmc1", "dout_mmc1",
GATE_TOP_SCLK_FSYS, 1, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc2, "sclk_mmc2", "dout_mmc2",
+ GATE(CLK_SCLK_MMC2, "sclk_mmc2", "dout_mmc2",
GATE_TOP_SCLK_FSYS, 2, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_usbphy301, "sclk_usbphy301", "dout_usbphy301",
+ GATE(CLK_SCLK_USBPHY301, "sclk_usbphy301", "dout_usbphy301",
GATE_TOP_SCLK_FSYS, 7, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_usbphy300, "sclk_usbphy300", "dout_usbphy300",
+ GATE(CLK_SCLK_USBPHY300, "sclk_usbphy300", "dout_usbphy300",
GATE_TOP_SCLK_FSYS, 8, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_usbd300, "sclk_usbd300", "dout_usbd300",
+ GATE(CLK_SCLK_USBD300, "sclk_usbd300", "dout_usbd300",
GATE_TOP_SCLK_FSYS, 9, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_usbd301, "sclk_usbd301", "dout_usbd301",
+ GATE(CLK_SCLK_USBD301, "sclk_usbd301", "dout_usbd301",
GATE_TOP_SCLK_FSYS, 10, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_usbd301, "sclk_unipro", "dout_unipro",
+ GATE(CLK_SCLK_USBD301, "sclk_unipro", "dout_unipro",
SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_gscl_wa, "sclk_gscl_wa", "aclK333_432_gscl",
+ GATE(CLK_SCLK_GSCL_WA, "sclk_gscl_wa", "aclK333_432_gscl",
GATE_TOP_SCLK_GSCL, 6, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_gscl_wb, "sclk_gscl_wb", "aclk333_432_gscl",
+ GATE(CLK_SCLK_GSCL_WB, "sclk_gscl_wb", "aclk333_432_gscl",
GATE_TOP_SCLK_GSCL, 7, CLK_SET_RATE_PARENT, 0),
/* Display */
- GATE(sclk_fimd1, "sclk_fimd1", "dout_fimd1",
+ GATE(CLK_SCLK_FIMD1, "sclk_fimd1", "dout_fimd1",
GATE_TOP_SCLK_DISP1, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mipi1, "sclk_mipi1", "dout_mipi1",
+ GATE(CLK_SCLK_MIPI1, "sclk_mipi1", "dout_mipi1",
GATE_TOP_SCLK_DISP1, 3, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_hdmi, "sclk_hdmi", "mout_hdmi",
+ GATE(CLK_SCLK_HDMI, "sclk_hdmi", "mout_hdmi",
GATE_TOP_SCLK_DISP1, 9, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_pixel, "sclk_pixel", "dout_hdmi_pixel",
+ GATE(CLK_SCLK_PIXEL, "sclk_pixel", "dout_hdmi_pixel",
GATE_TOP_SCLK_DISP1, 10, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_dp1, "sclk_dp1", "dout_dp1",
+ GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1",
GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0),
/* Maudio Block */
- GATE(sclk_maudio0, "sclk_maudio0", "dout_maudio0",
+ GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_maupcm0, "sclk_maupcm0", "dout_maupcm0",
+ GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
/* FSYS */
- GATE(tsi, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0),
- GATE(pdma0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0),
- GATE(pdma1, "pdma1", "aclk200_fsys", GATE_BUS_FSYS0, 2, 0, 0),
- GATE(ufs, "ufs", "aclk200_fsys2", GATE_BUS_FSYS0, 3, 0, 0),
- GATE(rtic, "rtic", "aclk200_fsys", GATE_BUS_FSYS0, 5, 0, 0),
- GATE(mmc0, "mmc0", "aclk200_fsys2", GATE_BUS_FSYS0, 12, 0, 0),
- GATE(mmc1, "mmc1", "aclk200_fsys2", GATE_BUS_FSYS0, 13, 0, 0),
- GATE(mmc2, "mmc2", "aclk200_fsys2", GATE_BUS_FSYS0, 14, 0, 0),
- GATE(sromc, "sromc", "aclk200_fsys2",
+ GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0),
+ GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0),
+ GATE(CLK_PDMA1, "pdma1", "aclk200_fsys", GATE_BUS_FSYS0, 2, 0, 0),
+ GATE(CLK_UFS, "ufs", "aclk200_fsys2", GATE_BUS_FSYS0, 3, 0, 0),
+ GATE(CLK_RTIC, "rtic", "aclk200_fsys", GATE_BUS_FSYS0, 5, 0, 0),
+ GATE(CLK_MMC0, "mmc0", "aclk200_fsys2", GATE_BUS_FSYS0, 12, 0, 0),
+ GATE(CLK_MMC1, "mmc1", "aclk200_fsys2", GATE_BUS_FSYS0, 13, 0, 0),
+ GATE(CLK_MMC2, "mmc2", "aclk200_fsys2", GATE_BUS_FSYS0, 14, 0, 0),
+ GATE(CLK_SROMC, "sromc", "aclk200_fsys2",
GATE_BUS_FSYS0, 19, CLK_IGNORE_UNUSED, 0),
- GATE(usbh20, "usbh20", "aclk200_fsys", GATE_BUS_FSYS0, 20, 0, 0),
- GATE(usbd300, "usbd300", "aclk200_fsys", GATE_BUS_FSYS0, 21, 0, 0),
- GATE(usbd301, "usbd301", "aclk200_fsys", GATE_BUS_FSYS0, 28, 0, 0),
+ GATE(CLK_USBH20, "usbh20", "aclk200_fsys", GATE_BUS_FSYS0, 20, 0, 0),
+ GATE(CLK_USBD300, "usbd300", "aclk200_fsys", GATE_BUS_FSYS0, 21, 0, 0),
+ GATE(CLK_USBD301, "usbd301", "aclk200_fsys", GATE_BUS_FSYS0, 28, 0, 0),
/* UART */
- GATE(uart0, "uart0", "aclk66_peric", GATE_BUS_PERIC, 4, 0, 0),
- GATE(uart1, "uart1", "aclk66_peric", GATE_BUS_PERIC, 5, 0, 0),
- GATE_A(uart2, "uart2", "aclk66_peric",
+ GATE(CLK_UART0, "uart0", "aclk66_peric", GATE_BUS_PERIC, 4, 0, 0),
+ GATE(CLK_UART1, "uart1", "aclk66_peric", GATE_BUS_PERIC, 5, 0, 0),
+ GATE_A(CLK_UART2, "uart2", "aclk66_peric",
GATE_BUS_PERIC, 6, CLK_IGNORE_UNUSED, 0, "uart2"),
- GATE(uart3, "uart3", "aclk66_peric", GATE_BUS_PERIC, 7, 0, 0),
+ GATE(CLK_UART3, "uart3", "aclk66_peric", GATE_BUS_PERIC, 7, 0, 0),
/* I2C */
- GATE(i2c0, "i2c0", "aclk66_peric", GATE_BUS_PERIC, 9, 0, 0),
- GATE(i2c1, "i2c1", "aclk66_peric", GATE_BUS_PERIC, 10, 0, 0),
- GATE(i2c2, "i2c2", "aclk66_peric", GATE_BUS_PERIC, 11, 0, 0),
- GATE(i2c3, "i2c3", "aclk66_peric", GATE_BUS_PERIC, 12, 0, 0),
- GATE(i2c4, "i2c4", "aclk66_peric", GATE_BUS_PERIC, 13, 0, 0),
- GATE(i2c5, "i2c5", "aclk66_peric", GATE_BUS_PERIC, 14, 0, 0),
- GATE(i2c6, "i2c6", "aclk66_peric", GATE_BUS_PERIC, 15, 0, 0),
- GATE(i2c7, "i2c7", "aclk66_peric", GATE_BUS_PERIC, 16, 0, 0),
- GATE(i2c_hdmi, "i2c_hdmi", "aclk66_peric", GATE_BUS_PERIC, 17, 0, 0),
- GATE(tsadc, "tsadc", "aclk66_peric", GATE_BUS_PERIC, 18, 0, 0),
+ GATE(CLK_I2C0, "i2c0", "aclk66_peric", GATE_BUS_PERIC, 9, 0, 0),
+ GATE(CLK_I2C1, "i2c1", "aclk66_peric", GATE_BUS_PERIC, 10, 0, 0),
+ GATE(CLK_I2C2, "i2c2", "aclk66_peric", GATE_BUS_PERIC, 11, 0, 0),
+ GATE(CLK_I2C3, "i2c3", "aclk66_peric", GATE_BUS_PERIC, 12, 0, 0),
+ GATE(CLK_I2C4, "i2c4", "aclk66_peric", GATE_BUS_PERIC, 13, 0, 0),
+ GATE(CLK_I2C5, "i2c5", "aclk66_peric", GATE_BUS_PERIC, 14, 0, 0),
+ GATE(CLK_I2C6, "i2c6", "aclk66_peric", GATE_BUS_PERIC, 15, 0, 0),
+ GATE(CLK_I2C7, "i2c7", "aclk66_peric", GATE_BUS_PERIC, 16, 0, 0),
+ GATE(CLK_I2C_HDMI, "i2c_hdmi", "aclk66_peric", GATE_BUS_PERIC, 17, 0,
+ 0),
+ GATE(CLK_TSADC, "tsadc", "aclk66_peric", GATE_BUS_PERIC, 18, 0, 0),
/* SPI */
- GATE(spi0, "spi0", "aclk66_peric", GATE_BUS_PERIC, 19, 0, 0),
- GATE(spi1, "spi1", "aclk66_peric", GATE_BUS_PERIC, 20, 0, 0),
- GATE(spi2, "spi2", "aclk66_peric", GATE_BUS_PERIC, 21, 0, 0),
- GATE(keyif, "keyif", "aclk66_peric", GATE_BUS_PERIC, 22, 0, 0),
+ GATE(CLK_SPI0, "spi0", "aclk66_peric", GATE_BUS_PERIC, 19, 0, 0),
+ GATE(CLK_SPI1, "spi1", "aclk66_peric", GATE_BUS_PERIC, 20, 0, 0),
+ GATE(CLK_SPI2, "spi2", "aclk66_peric", GATE_BUS_PERIC, 21, 0, 0),
+ GATE(CLK_KEYIF, "keyif", "aclk66_peric", GATE_BUS_PERIC, 22, 0, 0),
/* I2S */
- GATE(i2s1, "i2s1", "aclk66_peric", GATE_BUS_PERIC, 23, 0, 0),
- GATE(i2s2, "i2s2", "aclk66_peric", GATE_BUS_PERIC, 24, 0, 0),
+ GATE(CLK_I2S1, "i2s1", "aclk66_peric", GATE_BUS_PERIC, 23, 0, 0),
+ GATE(CLK_I2S2, "i2s2", "aclk66_peric", GATE_BUS_PERIC, 24, 0, 0),
/* PCM */
- GATE(pcm1, "pcm1", "aclk66_peric", GATE_BUS_PERIC, 25, 0, 0),
- GATE(pcm2, "pcm2", "aclk66_peric", GATE_BUS_PERIC, 26, 0, 0),
+ GATE(CLK_PCM1, "pcm1", "aclk66_peric", GATE_BUS_PERIC, 25, 0, 0),
+ GATE(CLK_PCM2, "pcm2", "aclk66_peric", GATE_BUS_PERIC, 26, 0, 0),
/* PWM */
- GATE(pwm, "pwm", "aclk66_peric", GATE_BUS_PERIC, 27, 0, 0),
+ GATE(CLK_PWM, "pwm", "aclk66_peric", GATE_BUS_PERIC, 27, 0, 0),
/* SPDIF */
- GATE(spdif, "spdif", "aclk66_peric", GATE_BUS_PERIC, 29, 0, 0),
+ GATE(CLK_SPDIF, "spdif", "aclk66_peric", GATE_BUS_PERIC, 29, 0, 0),
- GATE(i2c8, "i2c8", "aclk66_peric", GATE_BUS_PERIC1, 0, 0, 0),
- GATE(i2c9, "i2c9", "aclk66_peric", GATE_BUS_PERIC1, 1, 0, 0),
- GATE(i2c10, "i2c10", "aclk66_peric", GATE_BUS_PERIC1, 2, 0, 0),
+ GATE(CLK_I2C8, "i2c8", "aclk66_peric", GATE_BUS_PERIC1, 0, 0, 0),
+ GATE(CLK_I2C9, "i2c9", "aclk66_peric", GATE_BUS_PERIC1, 1, 0, 0),
+ GATE(CLK_I2C10, "i2c10", "aclk66_peric", GATE_BUS_PERIC1, 2, 0, 0),
- GATE(chipid, "chipid", "aclk66_psgen",
+ GATE(CLK_CHIPID, "chipid", "aclk66_psgen",
GATE_BUS_PERIS0, 12, CLK_IGNORE_UNUSED, 0),
- GATE(sysreg, "sysreg", "aclk66_psgen",
+ GATE(CLK_SYSREG, "sysreg", "aclk66_psgen",
GATE_BUS_PERIS0, 13, CLK_IGNORE_UNUSED, 0),
- GATE(tzpc0, "tzpc0", "aclk66_psgen", GATE_BUS_PERIS0, 18, 0, 0),
- GATE(tzpc1, "tzpc1", "aclk66_psgen", GATE_BUS_PERIS0, 19, 0, 0),
- GATE(tzpc2, "tzpc2", "aclk66_psgen", GATE_BUS_PERIS0, 20, 0, 0),
- GATE(tzpc3, "tzpc3", "aclk66_psgen", GATE_BUS_PERIS0, 21, 0, 0),
- GATE(tzpc4, "tzpc4", "aclk66_psgen", GATE_BUS_PERIS0, 22, 0, 0),
- GATE(tzpc5, "tzpc5", "aclk66_psgen", GATE_BUS_PERIS0, 23, 0, 0),
- GATE(tzpc6, "tzpc6", "aclk66_psgen", GATE_BUS_PERIS0, 24, 0, 0),
- GATE(tzpc7, "tzpc7", "aclk66_psgen", GATE_BUS_PERIS0, 25, 0, 0),
- GATE(tzpc8, "tzpc8", "aclk66_psgen", GATE_BUS_PERIS0, 26, 0, 0),
- GATE(tzpc9, "tzpc9", "aclk66_psgen", GATE_BUS_PERIS0, 27, 0, 0),
-
- GATE(hdmi_cec, "hdmi_cec", "aclk66_psgen", GATE_BUS_PERIS1, 0, 0, 0),
- GATE(seckey, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
- GATE(wdt, "wdt", "aclk66_psgen", GATE_BUS_PERIS1, 3, 0, 0),
- GATE(rtc, "rtc", "aclk66_psgen", GATE_BUS_PERIS1, 4, 0, 0),
- GATE(tmu, "tmu", "aclk66_psgen", GATE_BUS_PERIS1, 5, 0, 0),
- GATE(tmu_gpu, "tmu_gpu", "aclk66_psgen", GATE_BUS_PERIS1, 6, 0, 0),
-
- GATE(gscl0, "gscl0", "aclk300_gscl", GATE_IP_GSCL0, 0, 0, 0),
- GATE(gscl1, "gscl1", "aclk300_gscl", GATE_IP_GSCL0, 1, 0, 0),
- GATE(clk_3aa, "clk_3aa", "aclk300_gscl", GATE_IP_GSCL0, 4, 0, 0),
-
- GATE(smmu_3aa, "smmu_3aa", "aclk333_432_gscl", GATE_IP_GSCL1, 2, 0, 0),
- GATE(smmu_fimcl0, "smmu_fimcl0", "aclk333_432_gscl",
+ GATE(CLK_TZPC0, "tzpc0", "aclk66_psgen", GATE_BUS_PERIS0, 18, 0, 0),
+ GATE(CLK_TZPC1, "tzpc1", "aclk66_psgen", GATE_BUS_PERIS0, 19, 0, 0),
+ GATE(CLK_TZPC2, "tzpc2", "aclk66_psgen", GATE_BUS_PERIS0, 20, 0, 0),
+ GATE(CLK_TZPC3, "tzpc3", "aclk66_psgen", GATE_BUS_PERIS0, 21, 0, 0),
+ GATE(CLK_TZPC4, "tzpc4", "aclk66_psgen", GATE_BUS_PERIS0, 22, 0, 0),
+ GATE(CLK_TZPC5, "tzpc5", "aclk66_psgen", GATE_BUS_PERIS0, 23, 0, 0),
+ GATE(CLK_TZPC6, "tzpc6", "aclk66_psgen", GATE_BUS_PERIS0, 24, 0, 0),
+ GATE(CLK_TZPC7, "tzpc7", "aclk66_psgen", GATE_BUS_PERIS0, 25, 0, 0),
+ GATE(CLK_TZPC8, "tzpc8", "aclk66_psgen", GATE_BUS_PERIS0, 26, 0, 0),
+ GATE(CLK_TZPC9, "tzpc9", "aclk66_psgen", GATE_BUS_PERIS0, 27, 0, 0),
+
+ GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk66_psgen", GATE_BUS_PERIS1, 0, 0,
+ 0),
+ GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
+ GATE(CLK_WDT, "wdt", "aclk66_psgen", GATE_BUS_PERIS1, 3, 0, 0),
+ GATE(CLK_RTC, "rtc", "aclk66_psgen", GATE_BUS_PERIS1, 4, 0, 0),
+ GATE(CLK_TMU, "tmu", "aclk66_psgen", GATE_BUS_PERIS1, 5, 0, 0),
+ GATE(CLK_TMU_GPU, "tmu_gpu", "aclk66_psgen", GATE_BUS_PERIS1, 6, 0, 0),
+
+ GATE(CLK_GSCL0, "gscl0", "aclk300_gscl", GATE_IP_GSCL0, 0, 0, 0),
+ GATE(CLK_GSCL1, "gscl1", "aclk300_gscl", GATE_IP_GSCL0, 1, 0, 0),
+ GATE(CLK_CLK_3AA, "clk_3aa", "aclk300_gscl", GATE_IP_GSCL0, 4, 0, 0),
+
+ GATE(CLK_SMMU_3AA, "smmu_3aa", "aclk333_432_gscl", GATE_IP_GSCL1, 2, 0,
+ 0),
+ GATE(CLK_SMMU_FIMCL0, "smmu_fimcl0", "aclk333_432_gscl",
GATE_IP_GSCL1, 3, 0, 0),
- GATE(smmu_fimcl1, "smmu_fimcl1", "aclk333_432_gscl",
+ GATE(CLK_SMMU_FIMCL1, "smmu_fimcl1", "aclk333_432_gscl",
GATE_IP_GSCL1, 4, 0, 0),
- GATE(smmu_gscl0, "smmu_gscl0", "aclk300_gscl", GATE_IP_GSCL1, 6, 0, 0),
- GATE(smmu_gscl1, "smmu_gscl1", "aclk300_gscl", GATE_IP_GSCL1, 7, 0, 0),
- GATE(gscl_wa, "gscl_wa", "aclk300_gscl", GATE_IP_GSCL1, 12, 0, 0),
- GATE(gscl_wb, "gscl_wb", "aclk300_gscl", GATE_IP_GSCL1, 13, 0, 0),
- GATE(smmu_fimcl3, "smmu_fimcl3,", "aclk333_432_gscl",
+ GATE(CLK_SMMU_GSCL0, "smmu_gscl0", "aclk300_gscl", GATE_IP_GSCL1, 6, 0,
+ 0),
+ GATE(CLK_SMMU_GSCL1, "smmu_gscl1", "aclk300_gscl", GATE_IP_GSCL1, 7, 0,
+ 0),
+ GATE(CLK_GSCL_WA, "gscl_wa", "aclk300_gscl", GATE_IP_GSCL1, 12, 0, 0),
+ GATE(CLK_GSCL_WB, "gscl_wb", "aclk300_gscl", GATE_IP_GSCL1, 13, 0, 0),
+ GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3,", "aclk333_432_gscl",
GATE_IP_GSCL1, 16, 0, 0),
- GATE(fimc_lite3, "fimc_lite3", "aclk333_432_gscl",
+ GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl",
GATE_IP_GSCL1, 17, 0, 0),
- GATE(fimd1, "fimd1", "aclk300_disp1", GATE_IP_DISP1, 0, 0, 0),
- GATE(dsim1, "dsim1", "aclk200_disp1", GATE_IP_DISP1, 3, 0, 0),
- GATE(dp1, "dp1", "aclk200_disp1", GATE_IP_DISP1, 4, 0, 0),
- GATE(mixer, "mixer", "aclk166", GATE_IP_DISP1, 5, 0, 0),
- GATE(hdmi, "hdmi", "aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
- GATE(smmu_fimd1, "smmu_fimd1", "aclk300_disp1", GATE_IP_DISP1, 8, 0, 0),
-
- GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
- GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0),
- GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0),
-
- GATE(g3d, "g3d", "aclkg3d", GATE_IP_G3D, 9, 0, 0),
-
- GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0),
- GATE(jpeg, "jpeg", "aclk300_jpeg", GATE_IP_GEN, 2, 0, 0),
- GATE(jpeg2, "jpeg2", "aclk300_jpeg", GATE_IP_GEN, 3, 0, 0),
- GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0),
- GATE(smmu_rotator, "smmu_rotator", "aclk266", GATE_IP_GEN, 6, 0, 0),
- GATE(smmu_jpeg, "smmu_jpeg", "aclk300_jpeg", GATE_IP_GEN, 7, 0, 0),
- GATE(smmu_mdma1, "smmu_mdma1", "aclk266", GATE_IP_GEN, 9, 0, 0),
-
- GATE(mscl0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
- GATE(mscl1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
- GATE(mscl2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
- GATE(smmu_mscl0, "smmu_mscl0", "aclk400_mscl", GATE_IP_MSCL, 8, 0, 0),
- GATE(smmu_mscl1, "smmu_mscl1", "aclk400_mscl", GATE_IP_MSCL, 9, 0, 0),
- GATE(smmu_mscl2, "smmu_mscl2", "aclk400_mscl", GATE_IP_MSCL, 10, 0, 0),
- GATE(smmu_mixer, "smmu_mixer", "aclk200_disp1", GATE_IP_DISP1, 9, 0, 0),
+ GATE(CLK_FIMD1, "fimd1", "aclk300_disp1", GATE_IP_DISP1, 0, 0, 0),
+ GATE(CLK_DSIM1, "dsim1", "aclk200_disp1", GATE_IP_DISP1, 3, 0, 0),
+ GATE(CLK_DP1, "dp1", "aclk200_disp1", GATE_IP_DISP1, 4, 0, 0),
+ GATE(CLK_MIXER, "mixer", "aclk166", GATE_IP_DISP1, 5, 0, 0),
+ GATE(CLK_HDMI, "hdmi", "aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
+ GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "aclk300_disp1", GATE_IP_DISP1, 8, 0,
+ 0),
+
+ GATE(CLK_MFC, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
+ GATE(CLK_SMMU_MFCL, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0),
+ GATE(CLK_SMMU_MFCR, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0),
+
+ GATE(CLK_G3D, "g3d", "aclkg3d", GATE_IP_G3D, 9, 0, 0),
+
+ GATE(CLK_ROTATOR, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0),
+ GATE(CLK_JPEG, "jpeg", "aclk300_jpeg", GATE_IP_GEN, 2, 0, 0),
+ GATE(CLK_JPEG2, "jpeg2", "aclk300_jpeg", GATE_IP_GEN, 3, 0, 0),
+ GATE(CLK_MDMA1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0),
+ GATE(CLK_SMMU_ROTATOR, "smmu_rotator", "aclk266", GATE_IP_GEN, 6, 0, 0),
+ GATE(CLK_SMMU_JPEG, "smmu_jpeg", "aclk300_jpeg", GATE_IP_GEN, 7, 0, 0),
+ GATE(CLK_SMMU_MDMA1, "smmu_mdma1", "aclk266", GATE_IP_GEN, 9, 0, 0),
+
+ GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
+ GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
+ GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
+ GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "aclk400_mscl", GATE_IP_MSCL, 8, 0,
+ 0),
+ GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "aclk400_mscl", GATE_IP_MSCL, 9, 0,
+ 0),
+ GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "aclk400_mscl", GATE_IP_MSCL, 10, 0,
+ 0),
+ GATE(CLK_SMMU_MIXER, "smmu_mixer", "aclk200_disp1", GATE_IP_DISP1, 9, 0,
+ 0),
};
static struct samsung_pll_clock exynos5420_plls[nr_plls] __initdata = {
- [apll] = PLL(pll_2550, fout_apll, "fout_apll", "fin_pll", APLL_LOCK,
+ [apll] = PLL(pll_2550, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK,
APLL_CON0, NULL),
- [cpll] = PLL(pll_2550, fout_mpll, "fout_mpll", "fin_pll", MPLL_LOCK,
- MPLL_CON0, NULL),
- [dpll] = PLL(pll_2550, fout_dpll, "fout_dpll", "fin_pll", DPLL_LOCK,
+ [cpll] = PLL(pll_2550, CLK_FOUT_CPLL, "fout_cpll", "fin_pll", CPLL_LOCK,
+ CPLL_CON0, NULL),
+ [dpll] = PLL(pll_2550, CLK_FOUT_DPLL, "fout_dpll", "fin_pll", DPLL_LOCK,
DPLL_CON0, NULL),
- [epll] = PLL(pll_2650, fout_epll, "fout_epll", "fin_pll", EPLL_LOCK,
+ [epll] = PLL(pll_2650, CLK_FOUT_EPLL, "fout_epll", "fin_pll", EPLL_LOCK,
EPLL_CON0, NULL),
- [rpll] = PLL(pll_2650, fout_rpll, "fout_rpll", "fin_pll", RPLL_LOCK,
+ [rpll] = PLL(pll_2650, CLK_FOUT_RPLL, "fout_rpll", "fin_pll", RPLL_LOCK,
RPLL_CON0, NULL),
- [ipll] = PLL(pll_2550, fout_ipll, "fout_ipll", "fin_pll", IPLL_LOCK,
+ [ipll] = PLL(pll_2550, CLK_FOUT_IPLL, "fout_ipll", "fin_pll", IPLL_LOCK,
IPLL_CON0, NULL),
- [spll] = PLL(pll_2550, fout_spll, "fout_spll", "fin_pll", SPLL_LOCK,
+ [spll] = PLL(pll_2550, CLK_FOUT_SPLL, "fout_spll", "fin_pll", SPLL_LOCK,
SPLL_CON0, NULL),
- [vpll] = PLL(pll_2550, fout_vpll, "fout_vpll", "fin_pll", VPLL_LOCK,
+ [vpll] = PLL(pll_2550, CLK_FOUT_VPLL, "fout_vpll", "fin_pll", VPLL_LOCK,
VPLL_CON0, NULL),
- [mpll] = PLL(pll_2550, fout_mpll, "fout_mpll", "fin_pll", MPLL_LOCK,
+ [mpll] = PLL(pll_2550, CLK_FOUT_MPLL, "fout_mpll", "fin_pll", MPLL_LOCK,
MPLL_CON0, NULL),
- [bpll] = PLL(pll_2550, fout_bpll, "fout_bpll", "fin_pll", BPLL_LOCK,
+ [bpll] = PLL(pll_2550, CLK_FOUT_BPLL, "fout_bpll", "fin_pll", BPLL_LOCK,
BPLL_CON0, NULL),
- [kpll] = PLL(pll_2550, fout_kpll, "fout_kpll", "fin_pll", KPLL_LOCK,
+ [kpll] = PLL(pll_2550, CLK_FOUT_KPLL, "fout_kpll", "fin_pll", KPLL_LOCK,
KPLL_CON0, NULL),
};
@@ -777,7 +747,7 @@ static void __init exynos5420_clk_init(struct device_node *np)
panic("%s: unable to determine soc\n", __func__);
}
- samsung_clk_init(np, reg_base, nr_clks,
+ samsung_clk_init(np, reg_base, CLK_NR_CLKS,
exynos5420_clk_regs, ARRAY_SIZE(exynos5420_clk_regs),
NULL, 0);
samsung_clk_of_register_fixed_ext(exynos5420_fixed_rate_ext_clks,
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
index f8658945bfd2..cbc15b56891d 100644
--- a/drivers/clk/samsung/clk-exynos5440.c
+++ b/drivers/clk/samsung/clk-exynos5440.c
@@ -9,6 +9,7 @@
* Common Clock Framework support for Exynos5440 SoC.
*/
+#include <dt-bindings/clock/exynos5440.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
@@ -22,79 +23,65 @@
#define CPU_CLK_STATUS 0xfc
#define MISC_DOUT1 0x558
-/*
- * Let each supported clock get a unique id. This id is used to lookup the clock
- * for device tree based platforms.
- */
-enum exynos5440_clks {
- none, xtal, arm_clk,
-
- spi_baud = 16, pb0_250, pr0_250, pr1_250, b_250, b_125, b_200, sata,
- usb, gmac0, cs250, pb0_250_o, pr0_250_o, pr1_250_o, b_250_o, b_125_o,
- b_200_o, sata_o, usb_o, gmac0_o, cs250_o,
-
- nr_clks,
-};
-
/* parent clock name list */
PNAME(mout_armclk_p) = { "cplla", "cpllb" };
PNAME(mout_spi_p) = { "div125", "div200" };
/* fixed rate clocks generated outside the soc */
static struct samsung_fixed_rate_clock exynos5440_fixed_rate_ext_clks[] __initdata = {
- FRATE(none, "xtal", NULL, CLK_IS_ROOT, 0),
+ FRATE(0, "xtal", NULL, CLK_IS_ROOT, 0),
};
/* fixed rate clocks */
static struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initdata = {
- FRATE(none, "ppll", NULL, CLK_IS_ROOT, 1000000000),
- FRATE(none, "usb_phy0", NULL, CLK_IS_ROOT, 60000000),
- FRATE(none, "usb_phy1", NULL, CLK_IS_ROOT, 60000000),
- FRATE(none, "usb_ohci12", NULL, CLK_IS_ROOT, 12000000),
- FRATE(none, "usb_ohci48", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(0, "ppll", NULL, CLK_IS_ROOT, 1000000000),
+ FRATE(0, "usb_phy0", NULL, CLK_IS_ROOT, 60000000),
+ FRATE(0, "usb_phy1", NULL, CLK_IS_ROOT, 60000000),
+ FRATE(0, "usb_ohci12", NULL, CLK_IS_ROOT, 12000000),
+ FRATE(0, "usb_ohci48", NULL, CLK_IS_ROOT, 48000000),
};
/* fixed factor clocks */
static struct samsung_fixed_factor_clock exynos5440_fixed_factor_clks[] __initdata = {
- FFACTOR(none, "div250", "ppll", 1, 4, 0),
- FFACTOR(none, "div200", "ppll", 1, 5, 0),
- FFACTOR(none, "div125", "div250", 1, 2, 0),
+ FFACTOR(0, "div250", "ppll", 1, 4, 0),
+ FFACTOR(0, "div200", "ppll", 1, 5, 0),
+ FFACTOR(0, "div125", "div250", 1, 2, 0),
};
/* mux clocks */
static struct samsung_mux_clock exynos5440_mux_clks[] __initdata = {
- MUX(none, "mout_spi", mout_spi_p, MISC_DOUT1, 5, 1),
- MUX_A(arm_clk, "arm_clk", mout_armclk_p,
+ MUX(0, "mout_spi", mout_spi_p, MISC_DOUT1, 5, 1),
+ MUX_A(CLK_ARM_CLK, "arm_clk", mout_armclk_p,
CPU_CLK_STATUS, 0, 1, "armclk"),
};
/* divider clocks */
static struct samsung_div_clock exynos5440_div_clks[] __initdata = {
- DIV(spi_baud, "div_spi", "mout_spi", MISC_DOUT1, 3, 2),
+ DIV(CLK_SPI_BAUD, "div_spi", "mout_spi", MISC_DOUT1, 3, 2),
};
/* gate clocks */
static struct samsung_gate_clock exynos5440_gate_clks[] __initdata = {
- GATE(pb0_250, "pb0_250", "div250", CLKEN_OV_VAL, 3, 0, 0),
- GATE(pr0_250, "pr0_250", "div250", CLKEN_OV_VAL, 4, 0, 0),
- GATE(pr1_250, "pr1_250", "div250", CLKEN_OV_VAL, 5, 0, 0),
- GATE(b_250, "b_250", "div250", CLKEN_OV_VAL, 9, 0, 0),
- GATE(b_125, "b_125", "div125", CLKEN_OV_VAL, 10, 0, 0),
- GATE(b_200, "b_200", "div200", CLKEN_OV_VAL, 11, 0, 0),
- GATE(sata, "sata", "div200", CLKEN_OV_VAL, 12, 0, 0),
- GATE(usb, "usb", "div200", CLKEN_OV_VAL, 13, 0, 0),
- GATE(gmac0, "gmac0", "div200", CLKEN_OV_VAL, 14, 0, 0),
- GATE(cs250, "cs250", "div250", CLKEN_OV_VAL, 19, 0, 0),
- GATE(pb0_250_o, "pb0_250_o", "pb0_250", CLKEN_OV_VAL, 3, 0, 0),
- GATE(pr0_250_o, "pr0_250_o", "pr0_250", CLKEN_OV_VAL, 4, 0, 0),
- GATE(pr1_250_o, "pr1_250_o", "pr1_250", CLKEN_OV_VAL, 5, 0, 0),
- GATE(b_250_o, "b_250_o", "b_250", CLKEN_OV_VAL, 9, 0, 0),
- GATE(b_125_o, "b_125_o", "b_125", CLKEN_OV_VAL, 10, 0, 0),
- GATE(b_200_o, "b_200_o", "b_200", CLKEN_OV_VAL, 11, 0, 0),
- GATE(sata_o, "sata_o", "sata", CLKEN_OV_VAL, 12, 0, 0),
- GATE(usb_o, "usb_o", "usb", CLKEN_OV_VAL, 13, 0, 0),
- GATE(gmac0_o, "gmac0_o", "gmac", CLKEN_OV_VAL, 14, 0, 0),
- GATE(cs250_o, "cs250_o", "cs250", CLKEN_OV_VAL, 19, 0, 0),
+ GATE(CLK_PB0_250, "pb0_250", "div250", CLKEN_OV_VAL, 3, 0, 0),
+ GATE(CLK_PR0_250, "pr0_250", "div250", CLKEN_OV_VAL, 4, 0, 0),
+ GATE(CLK_PR1_250, "pr1_250", "div250", CLKEN_OV_VAL, 5, 0, 0),
+ GATE(CLK_B_250, "b_250", "div250", CLKEN_OV_VAL, 9, 0, 0),
+ GATE(CLK_B_125, "b_125", "div125", CLKEN_OV_VAL, 10, 0, 0),
+ GATE(CLK_B_200, "b_200", "div200", CLKEN_OV_VAL, 11, 0, 0),
+ GATE(CLK_SATA, "sata", "div200", CLKEN_OV_VAL, 12, 0, 0),
+ GATE(CLK_USB, "usb", "div200", CLKEN_OV_VAL, 13, 0, 0),
+ GATE(CLK_GMAC0, "gmac0", "div200", CLKEN_OV_VAL, 14, 0, 0),
+ GATE(CLK_CS250, "cs250", "div250", CLKEN_OV_VAL, 19, 0, 0),
+ GATE(CLK_PB0_250_O, "pb0_250_o", "pb0_250", CLKEN_OV_VAL, 3, 0, 0),
+ GATE(CLK_PR0_250_O, "pr0_250_o", "pr0_250", CLKEN_OV_VAL, 4, 0, 0),
+ GATE(CLK_PR1_250_O, "pr1_250_o", "pr1_250", CLKEN_OV_VAL, 5, 0, 0),
+ GATE(CLK_B_250_O, "b_250_o", "b_250", CLKEN_OV_VAL, 9, 0, 0),
+ GATE(CLK_B_125_O, "b_125_o", "b_125", CLKEN_OV_VAL, 10, 0, 0),
+ GATE(CLK_B_200_O, "b_200_o", "b_200", CLKEN_OV_VAL, 11, 0, 0),
+ GATE(CLK_SATA_O, "sata_o", "sata", CLKEN_OV_VAL, 12, 0, 0),
+ GATE(CLK_USB_O, "usb_o", "usb", CLKEN_OV_VAL, 13, 0, 0),
+ GATE(CLK_GMAC0_O, "gmac0_o", "gmac", CLKEN_OV_VAL, 14, 0, 0),
+ GATE(CLK_CS250_O, "cs250_o", "cs250", CLKEN_OV_VAL, 19, 0, 0),
};
static struct of_device_id ext_clk_match[] __initdata = {
@@ -114,7 +101,7 @@ static void __init exynos5440_clk_init(struct device_node *np)
return;
}
- samsung_clk_init(np, reg_base, nr_clks, NULL, 0, NULL, 0);
+ samsung_clk_init(np, reg_base, CLK_NR_CLKS, NULL, 0, NULL, 0);
samsung_clk_of_register_fixed_ext(exynos5440_fixed_rate_ext_clks,
ARRAY_SIZE(exynos5440_fixed_rate_ext_clks), ext_clk_match);
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 529e11dc2c6b..81e6d2f49aa0 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -375,7 +375,7 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate,
break;
default:
break;
- };
+ }
/* Set new configuration. */
__raw_writel(con1, pll->con_reg + 0x4);
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index 7d2c84265947..8e27aee6887e 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -331,8 +331,8 @@ static struct samsung_clock_alias s3c64xx_clock_aliases[] = {
ALIAS(HCLK_HSMMC1, "s3c-sdhci.1", "mmc_busclk.0"),
ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "hsmmc"),
ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "mmc_busclk.0"),
- ALIAS(HCLK_DMA1, NULL, "dma1"),
- ALIAS(HCLK_DMA0, NULL, "dma0"),
+ ALIAS(HCLK_DMA1, "dma-pl080s.1", "apb_pclk"),
+ ALIAS(HCLK_DMA0, "dma-pl080s.0", "apb_pclk"),
ALIAS(HCLK_CAMIF, "s3c-camif", "camif"),
ALIAS(HCLK_LCD, "s3c-fb", "lcd"),
ALIAS(PCLK_SPI1, "s3c6410-spi.1", "spi"),
diff --git a/drivers/clk/shmobile/Makefile b/drivers/clk/shmobile/Makefile
new file mode 100644
index 000000000000..9ecef140dba7
--- /dev/null
+++ b/drivers/clk/shmobile/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_ARCH_EMEV2) += clk-emev2.o
+obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o
+obj-$(CONFIG_ARCH_R8A7791) += clk-rcar-gen2.o
+obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += clk-div6.o
+obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += clk-mstp.o
+# for emply built-in.o
+obj-n := dummy
diff --git a/drivers/clk/shmobile/clk-div6.c b/drivers/clk/shmobile/clk-div6.c
new file mode 100644
index 000000000000..aac4756ec52e
--- /dev/null
+++ b/drivers/clk/shmobile/clk-div6.c
@@ -0,0 +1,185 @@
+/*
+ * r8a7790 Common Clock Framework support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#define CPG_DIV6_CKSTP BIT(8)
+#define CPG_DIV6_DIV(d) ((d) & 0x3f)
+#define CPG_DIV6_DIV_MASK 0x3f
+
+/**
+ * struct div6_clock - MSTP gating clock
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: IO-remapped register
+ * @div: divisor value (1-64)
+ */
+struct div6_clock {
+ struct clk_hw hw;
+ void __iomem *reg;
+ unsigned int div;
+};
+
+#define to_div6_clock(_hw) container_of(_hw, struct div6_clock, hw)
+
+static int cpg_div6_clock_enable(struct clk_hw *hw)
+{
+ struct div6_clock *clock = to_div6_clock(hw);
+
+ clk_writel(CPG_DIV6_DIV(clock->div - 1), clock->reg);
+
+ return 0;
+}
+
+static void cpg_div6_clock_disable(struct clk_hw *hw)
+{
+ struct div6_clock *clock = to_div6_clock(hw);
+
+ /* DIV6 clocks require the divisor field to be non-zero when stopping
+ * the clock.
+ */
+ clk_writel(CPG_DIV6_CKSTP | CPG_DIV6_DIV(CPG_DIV6_DIV_MASK),
+ clock->reg);
+}
+
+static int cpg_div6_clock_is_enabled(struct clk_hw *hw)
+{
+ struct div6_clock *clock = to_div6_clock(hw);
+
+ return !(clk_readl(clock->reg) & CPG_DIV6_CKSTP);
+}
+
+static unsigned long cpg_div6_clock_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct div6_clock *clock = to_div6_clock(hw);
+ unsigned int div = (clk_readl(clock->reg) & CPG_DIV6_DIV_MASK) + 1;
+
+ return parent_rate / div;
+}
+
+static unsigned int cpg_div6_clock_calc_div(unsigned long rate,
+ unsigned long parent_rate)
+{
+ unsigned int div;
+
+ div = DIV_ROUND_CLOSEST(parent_rate, rate);
+ return clamp_t(unsigned int, div, 1, 64);
+}
+
+static long cpg_div6_clock_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned int div = cpg_div6_clock_calc_div(rate, *parent_rate);
+
+ return *parent_rate / div;
+}
+
+static int cpg_div6_clock_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct div6_clock *clock = to_div6_clock(hw);
+ unsigned int div = cpg_div6_clock_calc_div(rate, parent_rate);
+
+ clock->div = div;
+
+ /* Only program the new divisor if the clock isn't stopped. */
+ if (!(clk_readl(clock->reg) & CPG_DIV6_CKSTP))
+ clk_writel(CPG_DIV6_DIV(clock->div - 1), clock->reg);
+
+ return 0;
+}
+
+static const struct clk_ops cpg_div6_clock_ops = {
+ .enable = cpg_div6_clock_enable,
+ .disable = cpg_div6_clock_disable,
+ .is_enabled = cpg_div6_clock_is_enabled,
+ .recalc_rate = cpg_div6_clock_recalc_rate,
+ .round_rate = cpg_div6_clock_round_rate,
+ .set_rate = cpg_div6_clock_set_rate,
+};
+
+static void __init cpg_div6_clock_init(struct device_node *np)
+{
+ struct clk_init_data init;
+ struct div6_clock *clock;
+ const char *parent_name;
+ const char *name;
+ struct clk *clk;
+ int ret;
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ pr_err("%s: failed to allocate %s DIV6 clock\n",
+ __func__, np->name);
+ return;
+ }
+
+ /* Remap the clock register and read the divisor. Disabling the
+ * clock overwrites the divisor, so we need to cache its value for the
+ * enable operation.
+ */
+ clock->reg = of_iomap(np, 0);
+ if (clock->reg == NULL) {
+ pr_err("%s: failed to map %s DIV6 clock register\n",
+ __func__, np->name);
+ goto error;
+ }
+
+ clock->div = (clk_readl(clock->reg) & CPG_DIV6_DIV_MASK) + 1;
+
+ /* Parse the DT properties. */
+ ret = of_property_read_string(np, "clock-output-names", &name);
+ if (ret < 0) {
+ pr_err("%s: failed to get %s DIV6 clock output name\n",
+ __func__, np->name);
+ goto error;
+ }
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (parent_name == NULL) {
+ pr_err("%s: failed to get %s DIV6 clock parent name\n",
+ __func__, np->name);
+ goto error;
+ }
+
+ /* Register the clock. */
+ init.name = name;
+ init.ops = &cpg_div6_clock_ops;
+ init.flags = CLK_IS_BASIC;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clock->hw.init = &init;
+
+ clk = clk_register(NULL, &clock->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register %s DIV6 clock (%ld)\n",
+ __func__, np->name, PTR_ERR(clk));
+ goto error;
+ }
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+
+ return;
+
+error:
+ if (clock->reg)
+ iounmap(clock->reg);
+ kfree(clock);
+}
+CLK_OF_DECLARE(cpg_div6_clk, "renesas,cpg-div6-clock", cpg_div6_clock_init);
diff --git a/drivers/clk/shmobile/clk-emev2.c b/drivers/clk/shmobile/clk-emev2.c
new file mode 100644
index 000000000000..6c7c929c7765
--- /dev/null
+++ b/drivers/clk/shmobile/clk-emev2.c
@@ -0,0 +1,104 @@
+/*
+ * EMMA Mobile EV2 common clock framework support
+ *
+ * Copyright (C) 2013 Takashi Yoshii <takashi.yoshii.ze@renesas.com>
+ * Copyright (C) 2012 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+/* EMEV2 SMU registers */
+#define USIAU0_RSTCTRL 0x094
+#define USIBU1_RSTCTRL 0x0ac
+#define USIBU2_RSTCTRL 0x0b0
+#define USIBU3_RSTCTRL 0x0b4
+#define STI_RSTCTRL 0x124
+#define STI_CLKSEL 0x688
+
+static DEFINE_SPINLOCK(lock);
+
+/* not pretty, but hey */
+void __iomem *smu_base;
+
+static void __init emev2_smu_write(unsigned long value, int offs)
+{
+ BUG_ON(!smu_base || (offs >= PAGE_SIZE));
+ writel_relaxed(value, smu_base + offs);
+}
+
+static const struct of_device_id smu_id[] __initconst = {
+ { .compatible = "renesas,emev2-smu", },
+ {},
+};
+
+static void __init emev2_smu_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, smu_id);
+ BUG_ON(!np);
+ smu_base = of_iomap(np, 0);
+ BUG_ON(!smu_base);
+ of_node_put(np);
+
+ /* setup STI timer to run on 32.768 kHz and deassert reset */
+ emev2_smu_write(0, STI_CLKSEL);
+ emev2_smu_write(1, STI_RSTCTRL);
+
+ /* deassert reset for UART0->UART3 */
+ emev2_smu_write(2, USIAU0_RSTCTRL);
+ emev2_smu_write(2, USIBU1_RSTCTRL);
+ emev2_smu_write(2, USIBU2_RSTCTRL);
+ emev2_smu_write(2, USIBU3_RSTCTRL);
+}
+
+static void __init emev2_smu_clkdiv_init(struct device_node *np)
+{
+ u32 reg[2];
+ struct clk *clk;
+ const char *parent_name = of_clk_get_parent_name(np, 0);
+ if (WARN_ON(of_property_read_u32_array(np, "reg", reg, 2)))
+ return;
+ if (!smu_base)
+ emev2_smu_init();
+ clk = clk_register_divider(NULL, np->name, parent_name, 0,
+ smu_base + reg[0], reg[1], 8, 0, &lock);
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, np->name, NULL);
+ pr_debug("## %s %s %p\n", __func__, np->name, clk);
+}
+CLK_OF_DECLARE(emev2_smu_clkdiv, "renesas,emev2-smu-clkdiv",
+ emev2_smu_clkdiv_init);
+
+static void __init emev2_smu_gclk_init(struct device_node *np)
+{
+ u32 reg[2];
+ struct clk *clk;
+ const char *parent_name = of_clk_get_parent_name(np, 0);
+ if (WARN_ON(of_property_read_u32_array(np, "reg", reg, 2)))
+ return;
+ if (!smu_base)
+ emev2_smu_init();
+ clk = clk_register_gate(NULL, np->name, parent_name, 0,
+ smu_base + reg[0], reg[1], 0, &lock);
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, np->name, NULL);
+ pr_debug("## %s %s %p\n", __func__, np->name, clk);
+}
+CLK_OF_DECLARE(emev2_smu_gclk, "renesas,emev2-smu-gclk", emev2_smu_gclk_init);
diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/shmobile/clk-mstp.c
new file mode 100644
index 000000000000..42d5912b1d25
--- /dev/null
+++ b/drivers/clk/shmobile/clk-mstp.c
@@ -0,0 +1,233 @@
+/*
+ * R-Car MSTP clocks
+ *
+ * Copyright (C) 2013 Ideas On Board SPRL
+ *
+ * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+
+/*
+ * MSTP clocks. We can't use standard gate clocks as we need to poll on the
+ * status register when enabling the clock.
+ */
+
+#define MSTP_MAX_CLOCKS 32
+
+/**
+ * struct mstp_clock_group - MSTP gating clocks group
+ *
+ * @data: clocks in this group
+ * @smstpcr: module stop control register
+ * @mstpsr: module stop status register (optional)
+ * @lock: protects writes to SMSTPCR
+ */
+struct mstp_clock_group {
+ struct clk_onecell_data data;
+ void __iomem *smstpcr;
+ void __iomem *mstpsr;
+ spinlock_t lock;
+};
+
+/**
+ * struct mstp_clock - MSTP gating clock
+ * @hw: handle between common and hardware-specific interfaces
+ * @bit_index: control bit index
+ * @group: MSTP clocks group
+ */
+struct mstp_clock {
+ struct clk_hw hw;
+ u32 bit_index;
+ struct mstp_clock_group *group;
+};
+
+#define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw)
+
+static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
+{
+ struct mstp_clock *clock = to_mstp_clock(hw);
+ struct mstp_clock_group *group = clock->group;
+ u32 bitmask = BIT(clock->bit_index);
+ unsigned long flags;
+ unsigned int i;
+ u32 value;
+
+ spin_lock_irqsave(&group->lock, flags);
+
+ value = clk_readl(group->smstpcr);
+ if (enable)
+ value &= ~bitmask;
+ else
+ value |= bitmask;
+ clk_writel(value, group->smstpcr);
+
+ spin_unlock_irqrestore(&group->lock, flags);
+
+ if (!enable || !group->mstpsr)
+ return 0;
+
+ for (i = 1000; i > 0; --i) {
+ if (!(clk_readl(group->mstpsr) & bitmask))
+ break;
+ cpu_relax();
+ }
+
+ if (!i) {
+ pr_err("%s: failed to enable %p[%d]\n", __func__,
+ group->smstpcr, clock->bit_index);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int cpg_mstp_clock_enable(struct clk_hw *hw)
+{
+ return cpg_mstp_clock_endisable(hw, true);
+}
+
+static void cpg_mstp_clock_disable(struct clk_hw *hw)
+{
+ cpg_mstp_clock_endisable(hw, false);
+}
+
+static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
+{
+ struct mstp_clock *clock = to_mstp_clock(hw);
+ struct mstp_clock_group *group = clock->group;
+ u32 value;
+
+ if (group->mstpsr)
+ value = clk_readl(group->mstpsr);
+ else
+ value = clk_readl(group->smstpcr);
+
+ return !!(value & BIT(clock->bit_index));
+}
+
+static const struct clk_ops cpg_mstp_clock_ops = {
+ .enable = cpg_mstp_clock_enable,
+ .disable = cpg_mstp_clock_disable,
+ .is_enabled = cpg_mstp_clock_is_enabled,
+};
+
+static struct clk * __init
+cpg_mstp_clock_register(const char *name, const char *parent_name,
+ unsigned int index, struct mstp_clock_group *group)
+{
+ struct clk_init_data init;
+ struct mstp_clock *clock;
+ struct clk *clk;
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ pr_err("%s: failed to allocate MSTP clock.\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &cpg_mstp_clock_ops;
+ init.flags = CLK_IS_BASIC;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clock->bit_index = index;
+ clock->group = group;
+ clock->hw.init = &init;
+
+ clk = clk_register(NULL, &clock->hw);
+
+ if (IS_ERR(clk))
+ kfree(clock);
+
+ return clk;
+}
+
+static void __init cpg_mstp_clocks_init(struct device_node *np)
+{
+ struct mstp_clock_group *group;
+ struct clk **clks;
+ unsigned int i;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ clks = kmalloc(MSTP_MAX_CLOCKS * sizeof(*clks), GFP_KERNEL);
+ if (group == NULL || clks == NULL) {
+ kfree(group);
+ kfree(clks);
+ pr_err("%s: failed to allocate group\n", __func__);
+ return;
+ }
+
+ spin_lock_init(&group->lock);
+ group->data.clks = clks;
+
+ group->smstpcr = of_iomap(np, 0);
+ group->mstpsr = of_iomap(np, 1);
+
+ if (group->smstpcr == NULL) {
+ pr_err("%s: failed to remap SMSTPCR\n", __func__);
+ kfree(group);
+ kfree(clks);
+ return;
+ }
+
+ for (i = 0; i < MSTP_MAX_CLOCKS; ++i)
+ clks[i] = ERR_PTR(-ENOENT);
+
+ for (i = 0; i < MSTP_MAX_CLOCKS; ++i) {
+ const char *parent_name;
+ const char *name;
+ u32 clkidx;
+ int ret;
+
+ /* Skip clocks with no name. */
+ ret = of_property_read_string_index(np, "clock-output-names",
+ i, &name);
+ if (ret < 0 || strlen(name) == 0)
+ continue;
+
+ parent_name = of_clk_get_parent_name(np, i);
+ ret = of_property_read_u32_index(np, "renesas,clock-indices", i,
+ &clkidx);
+ if (parent_name == NULL || ret < 0)
+ break;
+
+ if (clkidx >= MSTP_MAX_CLOCKS) {
+ pr_err("%s: invalid clock %s %s index %u)\n",
+ __func__, np->name, name, clkidx);
+ continue;
+ }
+
+ clks[clkidx] = cpg_mstp_clock_register(name, parent_name,
+ clkidx, group);
+ if (!IS_ERR(clks[clkidx])) {
+ group->data.clk_num = max(group->data.clk_num,
+ clkidx + 1);
+ /*
+ * Register a clkdev to let board code retrieve the
+ * clock by name and register aliases for non-DT
+ * devices.
+ *
+ * FIXME: Remove this when all devices that require a
+ * clock will be instantiated from DT.
+ */
+ clk_register_clkdev(clks[clkidx], name, NULL);
+ } else {
+ pr_err("%s: failed to register %s %s clock (%ld)\n",
+ __func__, np->name, name, PTR_ERR(clks[clkidx]));
+ }
+ }
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, &group->data);
+}
+CLK_OF_DECLARE(cpg_mstp_clks, "renesas,cpg-mstp-clocks", cpg_mstp_clocks_init);
diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c
new file mode 100644
index 000000000000..a59ec217a124
--- /dev/null
+++ b/drivers/clk/shmobile/clk-rcar-gen2.c
@@ -0,0 +1,298 @@
+/*
+ * rcar_gen2 Core CPG Clocks
+ *
+ * Copyright (C) 2013 Ideas On Board SPRL
+ *
+ * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/shmobile.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+
+struct rcar_gen2_cpg {
+ struct clk_onecell_data data;
+ spinlock_t lock;
+ void __iomem *reg;
+};
+
+#define CPG_SDCKCR 0x00000074
+#define CPG_PLL0CR 0x000000d8
+#define CPG_FRQCRC 0x000000e0
+#define CPG_FRQCRC_ZFC_MASK (0x1f << 8)
+#define CPG_FRQCRC_ZFC_SHIFT 8
+
+/* -----------------------------------------------------------------------------
+ * Z Clock
+ *
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * enable - clk_enable only ensures that parents are enabled
+ * rate - rate is adjustable. clk->rate = parent->rate * mult / 32
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+struct cpg_z_clk {
+ struct clk_hw hw;
+ void __iomem *reg;
+};
+
+#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
+
+static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ unsigned int val;
+
+ val = (clk_readl(zclk->reg) & CPG_FRQCRC_ZFC_MASK)
+ >> CPG_FRQCRC_ZFC_SHIFT;
+ mult = 32 - val;
+
+ return div_u64((u64)parent_rate * mult, 32);
+}
+
+static long cpg_z_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long prate = *parent_rate;
+ unsigned int mult;
+
+ if (!prate)
+ prate = 1;
+
+ mult = div_u64((u64)rate * 32, prate);
+ mult = clamp(mult, 1U, 32U);
+
+ return *parent_rate / 32 * mult;
+}
+
+static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ u32 val;
+
+ mult = div_u64((u64)rate * 32, parent_rate);
+ mult = clamp(mult, 1U, 32U);
+
+ val = clk_readl(zclk->reg);
+ val &= ~CPG_FRQCRC_ZFC_MASK;
+ val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT;
+ clk_writel(val, zclk->reg);
+
+ return 0;
+}
+
+static const struct clk_ops cpg_z_clk_ops = {
+ .recalc_rate = cpg_z_clk_recalc_rate,
+ .round_rate = cpg_z_clk_round_rate,
+ .set_rate = cpg_z_clk_set_rate,
+};
+
+static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg)
+{
+ static const char *parent_name = "pll0";
+ struct clk_init_data init;
+ struct cpg_z_clk *zclk;
+ struct clk *clk;
+
+ zclk = kzalloc(sizeof(*zclk), GFP_KERNEL);
+ if (!zclk)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = "z";
+ init.ops = &cpg_z_clk_ops;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ zclk->reg = cpg->reg + CPG_FRQCRC;
+ zclk->hw.init = &init;
+
+ clk = clk_register(NULL, &zclk->hw);
+ if (IS_ERR(clk))
+ kfree(zclk);
+
+ return clk;
+}
+
+/* -----------------------------------------------------------------------------
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3
+ * 14 13 19 (MHz) *1 *1
+ *---------------------------------------------------
+ * 0 0 0 15 x 1 x172/2 x208/2 x106
+ * 0 0 1 15 x 1 x172/2 x208/2 x88
+ * 0 1 0 20 x 1 x130/2 x156/2 x80
+ * 0 1 1 20 x 1 x130/2 x156/2 x66
+ * 1 0 0 26 / 2 x200/2 x240/2 x122
+ * 1 0 1 26 / 2 x200/2 x240/2 x102
+ * 1 1 0 30 / 2 x172/2 x208/2 x106
+ * 1 1 1 30 / 2 x172/2 x208/2 x88
+ *
+ * *1 : Table 7.6 indicates VCO ouput (PLLx = VCO/2)
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 12) | \
+ (((md) & BIT(13)) >> 12) | \
+ (((md) & BIT(19)) >> 19))
+struct cpg_pll_config {
+ unsigned int extal_div;
+ unsigned int pll1_mult;
+ unsigned int pll3_mult;
+};
+
+static const struct cpg_pll_config cpg_pll_configs[8] __initconst = {
+ { 1, 208, 106 }, { 1, 208, 88 }, { 1, 156, 80 }, { 1, 156, 66 },
+ { 2, 240, 122 }, { 2, 240, 102 }, { 2, 208, 106 }, { 2, 208, 88 },
+};
+
+/* SDHI divisors */
+static const struct clk_div_table cpg_sdh_div_table[] = {
+ { 0, 2 }, { 1, 3 }, { 2, 4 }, { 3, 6 },
+ { 4, 8 }, { 5, 12 }, { 6, 16 }, { 7, 18 },
+ { 8, 24 }, { 10, 36 }, { 11, 48 }, { 0, 0 },
+};
+
+static const struct clk_div_table cpg_sd01_div_table[] = {
+ { 5, 12 }, { 6, 16 }, { 7, 18 }, { 8, 24 },
+ { 10, 36 }, { 11, 48 }, { 12, 10 }, { 0, 0 },
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+static u32 cpg_mode __initdata;
+
+static struct clk * __init
+rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
+ const struct cpg_pll_config *config,
+ const char *name)
+{
+ const struct clk_div_table *table = NULL;
+ const char *parent_name = "main";
+ unsigned int shift;
+ unsigned int mult = 1;
+ unsigned int div = 1;
+
+ if (!strcmp(name, "main")) {
+ parent_name = of_clk_get_parent_name(np, 0);
+ div = config->extal_div;
+ } else if (!strcmp(name, "pll0")) {
+ /* PLL0 is a configurable multiplier clock. Register it as a
+ * fixed factor clock for now as there's no generic multiplier
+ * clock implementation and we currently have no need to change
+ * the multiplier value.
+ */
+ u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+ mult = ((value >> 24) & ((1 << 7) - 1)) + 1;
+ } else if (!strcmp(name, "pll1")) {
+ mult = config->pll1_mult / 2;
+ } else if (!strcmp(name, "pll3")) {
+ mult = config->pll3_mult;
+ } else if (!strcmp(name, "lb")) {
+ div = cpg_mode & BIT(18) ? 36 : 24;
+ } else if (!strcmp(name, "qspi")) {
+ div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2)
+ ? 16 : 20;
+ } else if (!strcmp(name, "sdh")) {
+ table = cpg_sdh_div_table;
+ shift = 8;
+ } else if (!strcmp(name, "sd0")) {
+ table = cpg_sd01_div_table;
+ shift = 4;
+ } else if (!strcmp(name, "sd1")) {
+ table = cpg_sd01_div_table;
+ shift = 0;
+ } else if (!strcmp(name, "z")) {
+ return cpg_z_clk_register(cpg);
+ } else {
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!table)
+ return clk_register_fixed_factor(NULL, name, parent_name, 0,
+ mult, div);
+ else
+ return clk_register_divider_table(NULL, name, parent_name, 0,
+ cpg->reg + CPG_SDCKCR, shift,
+ 4, 0, table, &cpg->lock);
+}
+
+static void __init rcar_gen2_cpg_clocks_init(struct device_node *np)
+{
+ const struct cpg_pll_config *config;
+ struct rcar_gen2_cpg *cpg;
+ struct clk **clks;
+ unsigned int i;
+ int num_clks;
+
+ num_clks = of_property_count_strings(np, "clock-output-names");
+ if (num_clks < 0) {
+ pr_err("%s: failed to count clocks\n", __func__);
+ return;
+ }
+
+ cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
+ clks = kzalloc(num_clks * sizeof(*clks), GFP_KERNEL);
+ if (cpg == NULL || clks == NULL) {
+ /* We're leaking memory on purpose, there's no point in cleaning
+ * up as the system won't boot anyway.
+ */
+ pr_err("%s: failed to allocate cpg\n", __func__);
+ return;
+ }
+
+ spin_lock_init(&cpg->lock);
+
+ cpg->data.clks = clks;
+ cpg->data.clk_num = num_clks;
+
+ cpg->reg = of_iomap(np, 0);
+ if (WARN_ON(cpg->reg == NULL))
+ return;
+
+ config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ for (i = 0; i < num_clks; ++i) {
+ const char *name;
+ struct clk *clk;
+
+ of_property_read_string_index(np, "clock-output-names", i,
+ &name);
+
+ clk = rcar_gen2_cpg_register_clock(np, cpg, config, name);
+ if (IS_ERR(clk))
+ pr_err("%s: failed to register %s %s clock (%ld)\n",
+ __func__, np->name, name, PTR_ERR(clk));
+ else
+ cpg->data.clks[i] = clk;
+ }
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+}
+CLK_OF_DECLARE(rcar_gen2_cpg_clks, "renesas,rcar-gen2-cpg-clocks",
+ rcar_gen2_cpg_clocks_init);
+
+void __init rcar_gen2_clocks_init(u32 mode)
+{
+ cpg_mode = mode;
+
+ of_clk_init(NULL);
+}
diff --git a/drivers/clk/sirf/Makefile b/drivers/clk/sirf/Makefile
new file mode 100644
index 000000000000..36b8e203f6e7
--- /dev/null
+++ b/drivers/clk/sirf/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for sirf specific clk
+#
+
+obj-$(CONFIG_ARCH_SIRF) += clk-prima2.o clk-atlas6.o
diff --git a/drivers/clk/sirf/atlas6.h b/drivers/clk/sirf/atlas6.h
new file mode 100644
index 000000000000..376217f3bf8f
--- /dev/null
+++ b/drivers/clk/sirf/atlas6.h
@@ -0,0 +1,31 @@
+#define SIRFSOC_CLKC_CLK_EN0 0x0000
+#define SIRFSOC_CLKC_CLK_EN1 0x0004
+#define SIRFSOC_CLKC_REF_CFG 0x0020
+#define SIRFSOC_CLKC_CPU_CFG 0x0024
+#define SIRFSOC_CLKC_MEM_CFG 0x0028
+#define SIRFSOC_CLKC_MEMDIV_CFG 0x002C
+#define SIRFSOC_CLKC_SYS_CFG 0x0030
+#define SIRFSOC_CLKC_IO_CFG 0x0034
+#define SIRFSOC_CLKC_DSP_CFG 0x0038
+#define SIRFSOC_CLKC_GFX_CFG 0x003c
+#define SIRFSOC_CLKC_MM_CFG 0x0040
+#define SIRFSOC_CLKC_GFX2D_CFG 0x0040
+#define SIRFSOC_CLKC_LCD_CFG 0x0044
+#define SIRFSOC_CLKC_MMC01_CFG 0x0048
+#define SIRFSOC_CLKC_MMC23_CFG 0x004C
+#define SIRFSOC_CLKC_MMC45_CFG 0x0050
+#define SIRFSOC_CLKC_NAND_CFG 0x0054
+#define SIRFSOC_CLKC_NANDDIV_CFG 0x0058
+#define SIRFSOC_CLKC_PLL1_CFG0 0x0080
+#define SIRFSOC_CLKC_PLL2_CFG0 0x0084
+#define SIRFSOC_CLKC_PLL3_CFG0 0x0088
+#define SIRFSOC_CLKC_PLL1_CFG1 0x008c
+#define SIRFSOC_CLKC_PLL2_CFG1 0x0090
+#define SIRFSOC_CLKC_PLL3_CFG1 0x0094
+#define SIRFSOC_CLKC_PLL1_CFG2 0x0098
+#define SIRFSOC_CLKC_PLL2_CFG2 0x009c
+#define SIRFSOC_CLKC_PLL3_CFG2 0x00A0
+#define SIRFSOC_USBPHY_PLL_CTRL 0x0008
+#define SIRFSOC_USBPHY_PLL_POWERDOWN BIT(1)
+#define SIRFSOC_USBPHY_PLL_BYPASS BIT(2)
+#define SIRFSOC_USBPHY_PLL_LOCK BIT(3)
diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c
new file mode 100644
index 000000000000..f9f4a15a64ab
--- /dev/null
+++ b/drivers/clk/sirf/clk-atlas6.c
@@ -0,0 +1,152 @@
+/*
+ * Clock tree for CSR SiRFatlasVI
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+
+#include "atlas6.h"
+#include "clk-common.c"
+
+static struct clk_dmn clk_mmc01 = {
+ .regofs = SIRFSOC_CLKC_MMC01_CFG,
+ .enable_bit = 59,
+ .hw = {
+ .init = &clk_mmc01_init,
+ },
+};
+
+static struct clk_dmn clk_mmc23 = {
+ .regofs = SIRFSOC_CLKC_MMC23_CFG,
+ .enable_bit = 60,
+ .hw = {
+ .init = &clk_mmc23_init,
+ },
+};
+
+static struct clk_dmn clk_mmc45 = {
+ .regofs = SIRFSOC_CLKC_MMC45_CFG,
+ .enable_bit = 61,
+ .hw = {
+ .init = &clk_mmc45_init,
+ },
+};
+
+static struct clk_init_data clk_nand_init = {
+ .name = "nand",
+ .ops = &dmn_ops,
+ .parent_names = dmn_clk_parents,
+ .num_parents = ARRAY_SIZE(dmn_clk_parents),
+};
+
+static struct clk_dmn clk_nand = {
+ .regofs = SIRFSOC_CLKC_NAND_CFG,
+ .enable_bit = 34,
+ .hw = {
+ .init = &clk_nand_init,
+ },
+};
+
+enum atlas6_clk_index {
+ /* 0 1 2 3 4 5 6 7 8 9 */
+ rtc, osc, pll1, pll2, pll3, mem, sys, security, dsp, gps,
+ mf, io, cpu, uart0, uart1, uart2, tsc, i2c0, i2c1, spi0,
+ spi1, pwmc, efuse, pulse, dmac0, dmac1, nand, audio, usp0, usp1,
+ usp2, vip, gfx, gfx2d, lcd, vpp, mmc01, mmc23, mmc45, usbpll,
+ usb0, usb1, cphif, maxclk,
+};
+
+static __initdata struct clk_hw *atlas6_clk_hw_array[maxclk] = {
+ NULL, /* dummy */
+ NULL,
+ &clk_pll1.hw,
+ &clk_pll2.hw,
+ &clk_pll3.hw,
+ &clk_mem.hw,
+ &clk_sys.hw,
+ &clk_security.hw,
+ &clk_dsp.hw,
+ &clk_gps.hw,
+ &clk_mf.hw,
+ &clk_io.hw,
+ &clk_cpu.hw,
+ &clk_uart0.hw,
+ &clk_uart1.hw,
+ &clk_uart2.hw,
+ &clk_tsc.hw,
+ &clk_i2c0.hw,
+ &clk_i2c1.hw,
+ &clk_spi0.hw,
+ &clk_spi1.hw,
+ &clk_pwmc.hw,
+ &clk_efuse.hw,
+ &clk_pulse.hw,
+ &clk_dmac0.hw,
+ &clk_dmac1.hw,
+ &clk_nand.hw,
+ &clk_audio.hw,
+ &clk_usp0.hw,
+ &clk_usp1.hw,
+ &clk_usp2.hw,
+ &clk_vip.hw,
+ &clk_gfx.hw,
+ &clk_gfx2d.hw,
+ &clk_lcd.hw,
+ &clk_vpp.hw,
+ &clk_mmc01.hw,
+ &clk_mmc23.hw,
+ &clk_mmc45.hw,
+ &usb_pll_clk_hw,
+ &clk_usb0.hw,
+ &clk_usb1.hw,
+ &clk_cphif.hw,
+};
+
+static struct clk *atlas6_clks[maxclk];
+
+static void __init atlas6_clk_init(struct device_node *np)
+{
+ struct device_node *rscnp;
+ int i;
+
+ rscnp = of_find_compatible_node(NULL, NULL, "sirf,prima2-rsc");
+ sirfsoc_rsc_vbase = of_iomap(rscnp, 0);
+ if (!sirfsoc_rsc_vbase)
+ panic("unable to map rsc registers\n");
+ of_node_put(rscnp);
+
+ sirfsoc_clk_vbase = of_iomap(np, 0);
+ if (!sirfsoc_clk_vbase)
+ panic("unable to map clkc registers\n");
+
+ /* These are always available (RTC and 26MHz OSC)*/
+ atlas6_clks[rtc] = clk_register_fixed_rate(NULL, "rtc", NULL,
+ CLK_IS_ROOT, 32768);
+ atlas6_clks[osc] = clk_register_fixed_rate(NULL, "osc", NULL,
+ CLK_IS_ROOT, 26000000);
+
+ for (i = pll1; i < maxclk; i++) {
+ atlas6_clks[i] = clk_register(NULL, atlas6_clk_hw_array[i]);
+ BUG_ON(!atlas6_clks[i]);
+ }
+ clk_register_clkdev(atlas6_clks[cpu], NULL, "cpu");
+ clk_register_clkdev(atlas6_clks[io], NULL, "io");
+ clk_register_clkdev(atlas6_clks[mem], NULL, "mem");
+ clk_register_clkdev(atlas6_clks[mem], NULL, "osc");
+
+ clk_data.clks = atlas6_clks;
+ clk_data.clk_num = maxclk;
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+}
+CLK_OF_DECLARE(atlas6_clk, "sirf,atlas6-clkc", atlas6_clk_init);
diff --git a/drivers/clk/clk-prima2.c b/drivers/clk/sirf/clk-common.c
index 6c15e3316137..7dde6a82f514 100644
--- a/drivers/clk/clk-prima2.c
+++ b/drivers/clk/sirf/clk-common.c
@@ -1,51 +1,18 @@
/*
- * Clock tree for CSR SiRFprimaII
+ * common clks module for all SiRF SoCs
*
* Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
*
* Licensed under GPLv2 or later.
*/
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/of_address.h>
-#include <linux/syscore_ops.h>
-
-#define SIRFSOC_CLKC_CLK_EN0 0x0000
-#define SIRFSOC_CLKC_CLK_EN1 0x0004
-#define SIRFSOC_CLKC_REF_CFG 0x0014
-#define SIRFSOC_CLKC_CPU_CFG 0x0018
-#define SIRFSOC_CLKC_MEM_CFG 0x001c
-#define SIRFSOC_CLKC_SYS_CFG 0x0020
-#define SIRFSOC_CLKC_IO_CFG 0x0024
-#define SIRFSOC_CLKC_DSP_CFG 0x0028
-#define SIRFSOC_CLKC_GFX_CFG 0x002c
-#define SIRFSOC_CLKC_MM_CFG 0x0030
-#define SIRFSOC_CLKC_LCD_CFG 0x0034
-#define SIRFSOC_CLKC_MMC_CFG 0x0038
-#define SIRFSOC_CLKC_PLL1_CFG0 0x0040
-#define SIRFSOC_CLKC_PLL2_CFG0 0x0044
-#define SIRFSOC_CLKC_PLL3_CFG0 0x0048
-#define SIRFSOC_CLKC_PLL1_CFG1 0x004c
-#define SIRFSOC_CLKC_PLL2_CFG1 0x0050
-#define SIRFSOC_CLKC_PLL3_CFG1 0x0054
-#define SIRFSOC_CLKC_PLL1_CFG2 0x0058
-#define SIRFSOC_CLKC_PLL2_CFG2 0x005c
-#define SIRFSOC_CLKC_PLL3_CFG2 0x0060
-#define SIRFSOC_USBPHY_PLL_CTRL 0x0008
-#define SIRFSOC_USBPHY_PLL_POWERDOWN BIT(1)
-#define SIRFSOC_USBPHY_PLL_BYPASS BIT(2)
-#define SIRFSOC_USBPHY_PLL_LOCK BIT(3)
-
-static void *sirfsoc_clk_vbase, *sirfsoc_rsc_vbase;
-
#define KHZ 1000
#define MHZ (KHZ * KHZ)
+static void *sirfsoc_clk_vbase;
+static void *sirfsoc_rsc_vbase;
+static struct clk_onecell_data clk_data;
+
/*
* SiRFprimaII clock controller
* - 2 oscillators: osc-26MHz, rtc-32.768KHz
@@ -127,6 +94,7 @@ static long pll_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
unsigned long fin, nf, nr, od;
+ u64 dividend;
/*
* fout = fin * nf / (nr * od);
@@ -147,7 +115,10 @@ static long pll_clk_round_rate(struct clk_hw *hw, unsigned long rate,
nr = BIT(6);
od = 1;
- return fin * nf / (nr * od);
+ dividend = (u64)fin * nf;
+ do_div(dividend, nr * od);
+
+ return (long)dividend;
}
static int pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -186,6 +157,30 @@ static int pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static long cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /*
+ * SiRF SoC has not cpu clock control,
+ * So bypass to it's parent pll.
+ */
+ struct clk *parent_clk = clk_get_parent(hw->clk);
+ struct clk *pll_parent_clk = clk_get_parent(parent_clk);
+ unsigned long pll_parent_rate = clk_get_rate(pll_parent_clk);
+ return pll_clk_round_rate(__clk_get_hw(parent_clk), rate, &pll_parent_rate);
+}
+
+static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ /*
+ * SiRF SoC has not cpu clock control,
+ * So return the parent pll rate.
+ */
+ struct clk *parent_clk = clk_get_parent(hw->clk);
+ return __clk_get_rate(parent_clk);
+}
+
static struct clk_ops std_pll_ops = {
.recalc_rate = pll_clk_recalc_rate,
.round_rate = pll_clk_round_rate,
@@ -403,6 +398,42 @@ static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static int cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int ret1, ret2;
+ struct clk *cur_parent;
+
+ if (rate == clk_get_rate(clk_pll1.hw.clk)) {
+ ret1 = clk_set_parent(hw->clk, clk_pll1.hw.clk);
+ return ret1;
+ }
+
+ if (rate == clk_get_rate(clk_pll2.hw.clk)) {
+ ret1 = clk_set_parent(hw->clk, clk_pll2.hw.clk);
+ return ret1;
+ }
+
+ if (rate == clk_get_rate(clk_pll3.hw.clk)) {
+ ret1 = clk_set_parent(hw->clk, clk_pll3.hw.clk);
+ return ret1;
+ }
+
+ cur_parent = clk_get_parent(hw->clk);
+
+ /* switch to tmp pll before setting parent clock's rate */
+ if (cur_parent == clk_pll1.hw.clk) {
+ ret1 = clk_set_parent(hw->clk, clk_pll2.hw.clk);
+ BUG_ON(ret1);
+ }
+
+ ret2 = clk_set_rate(clk_pll1.hw.clk, rate);
+
+ ret1 = clk_set_parent(hw->clk, clk_pll1.hw.clk);
+
+ return ret2 ? ret2 : ret1;
+}
+
static struct clk_ops msi_ops = {
.set_rate = dmn_clk_set_rate,
.round_rate = dmn_clk_round_rate,
@@ -457,6 +488,9 @@ static struct clk_dmn clk_io = {
static struct clk_ops cpu_ops = {
.set_parent = dmn_clk_set_parent,
.get_parent = dmn_clk_get_parent,
+ .set_rate = cpu_clk_set_rate,
+ .round_rate = cpu_clk_round_rate,
+ .recalc_rate = cpu_clk_recalc_rate,
};
static struct clk_init_data clk_cpu_init = {
@@ -532,6 +566,11 @@ static struct clk_dmn clk_mm = {
},
};
+/*
+ * for atlas6, gfx2d holds the bit of prima2's clk_mm
+ */
+#define clk_gfx2d clk_mm
+
static struct clk_init_data clk_lcd_init = {
.name = "lcd",
.ops = &dmn_ops,
@@ -569,14 +608,6 @@ static struct clk_init_data clk_mmc01_init = {
.num_parents = ARRAY_SIZE(dmn_clk_parents),
};
-static struct clk_dmn clk_mmc01 = {
- .regofs = SIRFSOC_CLKC_MMC_CFG,
- .enable_bit = 59,
- .hw = {
- .init = &clk_mmc01_init,
- },
-};
-
static struct clk_init_data clk_mmc23_init = {
.name = "mmc23",
.ops = &dmn_ops,
@@ -584,14 +615,6 @@ static struct clk_init_data clk_mmc23_init = {
.num_parents = ARRAY_SIZE(dmn_clk_parents),
};
-static struct clk_dmn clk_mmc23 = {
- .regofs = SIRFSOC_CLKC_MMC_CFG,
- .enable_bit = 60,
- .hw = {
- .init = &clk_mmc23_init,
- },
-};
-
static struct clk_init_data clk_mmc45_init = {
.name = "mmc45",
.ops = &dmn_ops,
@@ -599,14 +622,6 @@ static struct clk_init_data clk_mmc45_init = {
.num_parents = ARRAY_SIZE(dmn_clk_parents),
};
-static struct clk_dmn clk_mmc45 = {
- .regofs = SIRFSOC_CLKC_MMC_CFG,
- .enable_bit = 61,
- .hw = {
- .init = &clk_mmc45_init,
- },
-};
-
/*
* peripheral controllers in io domain
*/
@@ -667,6 +682,20 @@ static struct clk_ops ios_ops = {
.disable = std_clk_disable,
};
+static struct clk_init_data clk_cphif_init = {
+ .name = "cphif",
+ .ops = &ios_ops,
+ .parent_names = std_clk_io_parents,
+ .num_parents = ARRAY_SIZE(std_clk_io_parents),
+};
+
+static struct clk_std clk_cphif = {
+ .enable_bit = 20,
+ .hw = {
+ .init = &clk_cphif_init,
+ },
+};
+
static struct clk_init_data clk_dmac0_init = {
.name = "dmac0",
.ops = &ios_ops,
@@ -695,20 +724,6 @@ static struct clk_std clk_dmac1 = {
},
};
-static struct clk_init_data clk_nand_init = {
- .name = "nand",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_nand = {
- .enable_bit = 34,
- .hw = {
- .init = &clk_nand_init,
- },
-};
-
static struct clk_init_data clk_audio_init = {
.name = "audio",
.ops = &ios_ops,
@@ -970,7 +985,7 @@ static const char *std_clk_sys_parents[] = {
};
static struct clk_init_data clk_security_init = {
- .name = "mf",
+ .name = "security",
.ops = &ios_ops,
.parent_names = std_clk_sys_parents,
.num_parents = ARRAY_SIZE(std_clk_sys_parents),
@@ -1014,96 +1029,3 @@ static struct clk_std clk_usb1 = {
.init = &clk_usb1_init,
},
};
-
-enum prima2_clk_index {
- /* 0 1 2 3 4 5 6 7 8 9 */
- rtc, osc, pll1, pll2, pll3, mem, sys, security, dsp, gps,
- mf, io, cpu, uart0, uart1, uart2, tsc, i2c0, i2c1, spi0,
- spi1, pwmc, efuse, pulse, dmac0, dmac1, nand, audio, usp0, usp1,
- usp2, vip, gfx, mm, lcd, vpp, mmc01, mmc23, mmc45, usbpll,
- usb0, usb1, maxclk,
-};
-
-static struct clk_hw *prima2_clk_hw_array[maxclk] __initdata = {
- NULL, /* dummy */
- NULL,
- &clk_pll1.hw,
- &clk_pll2.hw,
- &clk_pll3.hw,
- &clk_mem.hw,
- &clk_sys.hw,
- &clk_security.hw,
- &clk_dsp.hw,
- &clk_gps.hw,
- &clk_mf.hw,
- &clk_io.hw,
- &clk_cpu.hw,
- &clk_uart0.hw,
- &clk_uart1.hw,
- &clk_uart2.hw,
- &clk_tsc.hw,
- &clk_i2c0.hw,
- &clk_i2c1.hw,
- &clk_spi0.hw,
- &clk_spi1.hw,
- &clk_pwmc.hw,
- &clk_efuse.hw,
- &clk_pulse.hw,
- &clk_dmac0.hw,
- &clk_dmac1.hw,
- &clk_nand.hw,
- &clk_audio.hw,
- &clk_usp0.hw,
- &clk_usp1.hw,
- &clk_usp2.hw,
- &clk_vip.hw,
- &clk_gfx.hw,
- &clk_mm.hw,
- &clk_lcd.hw,
- &clk_vpp.hw,
- &clk_mmc01.hw,
- &clk_mmc23.hw,
- &clk_mmc45.hw,
- &usb_pll_clk_hw,
- &clk_usb0.hw,
- &clk_usb1.hw,
-};
-
-static struct clk *prima2_clks[maxclk];
-static struct clk_onecell_data clk_data;
-
-static void __init sirfsoc_clk_init(struct device_node *np)
-{
- struct device_node *rscnp;
- int i;
-
- rscnp = of_find_compatible_node(NULL, NULL, "sirf,prima2-rsc");
- sirfsoc_rsc_vbase = of_iomap(rscnp, 0);
- if (!sirfsoc_rsc_vbase)
- panic("unable to map rsc registers\n");
- of_node_put(rscnp);
-
- sirfsoc_clk_vbase = of_iomap(np, 0);
- if (!sirfsoc_clk_vbase)
- panic("unable to map clkc registers\n");
-
- /* These are always available (RTC and 26MHz OSC)*/
- prima2_clks[rtc] = clk_register_fixed_rate(NULL, "rtc", NULL,
- CLK_IS_ROOT, 32768);
- prima2_clks[osc]= clk_register_fixed_rate(NULL, "osc", NULL,
- CLK_IS_ROOT, 26000000);
-
- for (i = pll1; i < maxclk; i++) {
- prima2_clks[i] = clk_register(NULL, prima2_clk_hw_array[i]);
- BUG_ON(IS_ERR(prima2_clks[i]));
- }
- clk_register_clkdev(prima2_clks[cpu], NULL, "cpu");
- clk_register_clkdev(prima2_clks[io], NULL, "io");
- clk_register_clkdev(prima2_clks[mem], NULL, "mem");
-
- clk_data.clks = prima2_clks;
- clk_data.clk_num = maxclk;
-
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
-}
-CLK_OF_DECLARE(sirfsoc_clk, "sirf,prima2-clkc", sirfsoc_clk_init);
diff --git a/drivers/clk/sirf/clk-prima2.c b/drivers/clk/sirf/clk-prima2.c
new file mode 100644
index 000000000000..7adc5c70c7ff
--- /dev/null
+++ b/drivers/clk/sirf/clk-prima2.c
@@ -0,0 +1,151 @@
+/*
+ * Clock tree for CSR SiRFprimaII
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+
+#include "prima2.h"
+#include "clk-common.c"
+
+static struct clk_dmn clk_mmc01 = {
+ .regofs = SIRFSOC_CLKC_MMC_CFG,
+ .enable_bit = 59,
+ .hw = {
+ .init = &clk_mmc01_init,
+ },
+};
+
+static struct clk_dmn clk_mmc23 = {
+ .regofs = SIRFSOC_CLKC_MMC_CFG,
+ .enable_bit = 60,
+ .hw = {
+ .init = &clk_mmc23_init,
+ },
+};
+
+static struct clk_dmn clk_mmc45 = {
+ .regofs = SIRFSOC_CLKC_MMC_CFG,
+ .enable_bit = 61,
+ .hw = {
+ .init = &clk_mmc45_init,
+ },
+};
+
+static struct clk_init_data clk_nand_init = {
+ .name = "nand",
+ .ops = &ios_ops,
+ .parent_names = std_clk_io_parents,
+ .num_parents = ARRAY_SIZE(std_clk_io_parents),
+};
+
+static struct clk_std clk_nand = {
+ .enable_bit = 34,
+ .hw = {
+ .init = &clk_nand_init,
+ },
+};
+
+enum prima2_clk_index {
+ /* 0 1 2 3 4 5 6 7 8 9 */
+ rtc, osc, pll1, pll2, pll3, mem, sys, security, dsp, gps,
+ mf, io, cpu, uart0, uart1, uart2, tsc, i2c0, i2c1, spi0,
+ spi1, pwmc, efuse, pulse, dmac0, dmac1, nand, audio, usp0, usp1,
+ usp2, vip, gfx, mm, lcd, vpp, mmc01, mmc23, mmc45, usbpll,
+ usb0, usb1, cphif, maxclk,
+};
+
+static __initdata struct clk_hw *prima2_clk_hw_array[maxclk] = {
+ NULL, /* dummy */
+ NULL,
+ &clk_pll1.hw,
+ &clk_pll2.hw,
+ &clk_pll3.hw,
+ &clk_mem.hw,
+ &clk_sys.hw,
+ &clk_security.hw,
+ &clk_dsp.hw,
+ &clk_gps.hw,
+ &clk_mf.hw,
+ &clk_io.hw,
+ &clk_cpu.hw,
+ &clk_uart0.hw,
+ &clk_uart1.hw,
+ &clk_uart2.hw,
+ &clk_tsc.hw,
+ &clk_i2c0.hw,
+ &clk_i2c1.hw,
+ &clk_spi0.hw,
+ &clk_spi1.hw,
+ &clk_pwmc.hw,
+ &clk_efuse.hw,
+ &clk_pulse.hw,
+ &clk_dmac0.hw,
+ &clk_dmac1.hw,
+ &clk_nand.hw,
+ &clk_audio.hw,
+ &clk_usp0.hw,
+ &clk_usp1.hw,
+ &clk_usp2.hw,
+ &clk_vip.hw,
+ &clk_gfx.hw,
+ &clk_mm.hw,
+ &clk_lcd.hw,
+ &clk_vpp.hw,
+ &clk_mmc01.hw,
+ &clk_mmc23.hw,
+ &clk_mmc45.hw,
+ &usb_pll_clk_hw,
+ &clk_usb0.hw,
+ &clk_usb1.hw,
+ &clk_cphif.hw,
+};
+
+static struct clk *prima2_clks[maxclk];
+
+static void __init prima2_clk_init(struct device_node *np)
+{
+ struct device_node *rscnp;
+ int i;
+
+ rscnp = of_find_compatible_node(NULL, NULL, "sirf,prima2-rsc");
+ sirfsoc_rsc_vbase = of_iomap(rscnp, 0);
+ if (!sirfsoc_rsc_vbase)
+ panic("unable to map rsc registers\n");
+ of_node_put(rscnp);
+
+ sirfsoc_clk_vbase = of_iomap(np, 0);
+ if (!sirfsoc_clk_vbase)
+ panic("unable to map clkc registers\n");
+
+ /* These are always available (RTC and 26MHz OSC)*/
+ prima2_clks[rtc] = clk_register_fixed_rate(NULL, "rtc", NULL,
+ CLK_IS_ROOT, 32768);
+ prima2_clks[osc] = clk_register_fixed_rate(NULL, "osc", NULL,
+ CLK_IS_ROOT, 26000000);
+
+ for (i = pll1; i < maxclk; i++) {
+ prima2_clks[i] = clk_register(NULL, prima2_clk_hw_array[i]);
+ BUG_ON(!prima2_clks[i]);
+ }
+ clk_register_clkdev(prima2_clks[cpu], NULL, "cpu");
+ clk_register_clkdev(prima2_clks[io], NULL, "io");
+ clk_register_clkdev(prima2_clks[mem], NULL, "mem");
+ clk_register_clkdev(prima2_clks[mem], NULL, "osc");
+
+ clk_data.clks = prima2_clks;
+ clk_data.clk_num = maxclk;
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+}
+CLK_OF_DECLARE(prima2_clk, "sirf,prima2-clkc", prima2_clk_init);
diff --git a/drivers/clk/sirf/prima2.h b/drivers/clk/sirf/prima2.h
new file mode 100644
index 000000000000..01bc3854a058
--- /dev/null
+++ b/drivers/clk/sirf/prima2.h
@@ -0,0 +1,25 @@
+#define SIRFSOC_CLKC_CLK_EN0 0x0000
+#define SIRFSOC_CLKC_CLK_EN1 0x0004
+#define SIRFSOC_CLKC_REF_CFG 0x0014
+#define SIRFSOC_CLKC_CPU_CFG 0x0018
+#define SIRFSOC_CLKC_MEM_CFG 0x001c
+#define SIRFSOC_CLKC_SYS_CFG 0x0020
+#define SIRFSOC_CLKC_IO_CFG 0x0024
+#define SIRFSOC_CLKC_DSP_CFG 0x0028
+#define SIRFSOC_CLKC_GFX_CFG 0x002c
+#define SIRFSOC_CLKC_MM_CFG 0x0030
+#define SIRFSOC_CLKC_LCD_CFG 0x0034
+#define SIRFSOC_CLKC_MMC_CFG 0x0038
+#define SIRFSOC_CLKC_PLL1_CFG0 0x0040
+#define SIRFSOC_CLKC_PLL2_CFG0 0x0044
+#define SIRFSOC_CLKC_PLL3_CFG0 0x0048
+#define SIRFSOC_CLKC_PLL1_CFG1 0x004c
+#define SIRFSOC_CLKC_PLL2_CFG1 0x0050
+#define SIRFSOC_CLKC_PLL3_CFG1 0x0054
+#define SIRFSOC_CLKC_PLL1_CFG2 0x0058
+#define SIRFSOC_CLKC_PLL2_CFG2 0x005c
+#define SIRFSOC_CLKC_PLL3_CFG2 0x0060
+#define SIRFSOC_USBPHY_PLL_CTRL 0x0008
+#define SIRFSOC_USBPHY_PLL_POWERDOWN BIT(1)
+#define SIRFSOC_USBPHY_PLL_BYPASS BIT(2)
+#define SIRFSOC_USBPHY_PLL_LOCK BIT(3)
diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
index 81dd31a686df..5983a26a8c5f 100644
--- a/drivers/clk/socfpga/clk.c
+++ b/drivers/clk/socfpga/clk.c
@@ -121,9 +121,7 @@ static __init struct clk *socfpga_clk_init(struct device_node *node,
int rc;
u32 fixed_div;
- rc = of_property_read_u32(node, "reg", &reg);
- if (WARN_ON(rc))
- return NULL;
+ of_property_read_u32(node, "reg", &reg);
socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
if (WARN_ON(!socfpga_clk))
@@ -292,7 +290,7 @@ static void __init socfpga_gate_clk_init(struct device_node *node,
socfpga_clk->shift = div_reg[1];
socfpga_clk->width = div_reg[2];
} else {
- socfpga_clk->div_reg = 0;
+ socfpga_clk->div_reg = NULL;
}
of_property_read_string(node, "clock-output-names", &clk_name);
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
index 958aa3ad1d60..dffd4ce6c8b5 100644
--- a/drivers/clk/spear/clk-frac-synth.c
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -116,7 +116,7 @@ static int clk_frac_set_rate(struct clk_hw *hw, unsigned long drate,
return 0;
}
-struct clk_ops clk_frac_ops = {
+static struct clk_ops clk_frac_ops = {
.recalc_rate = clk_frac_recalc_rate,
.round_rate = clk_frac_round_rate,
.set_rate = clk_frac_set_rate,
diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
index 88523f91d9b7..9e232644f07e 100644
--- a/drivers/clk/sunxi/clk-factors.c
+++ b/drivers/clk/sunxi/clk-factors.c
@@ -30,17 +30,9 @@
* parent - fixed parent. No clk_set_parent support
*/
-struct clk_factors {
- struct clk_hw hw;
- void __iomem *reg;
- struct clk_factors_config *config;
- void (*get_factors) (u32 *rate, u32 parent, u8 *n, u8 *k, u8 *m, u8 *p);
- spinlock_t *lock;
-};
-
#define to_clk_factors(_hw) container_of(_hw, struct clk_factors, hw)
-#define SETMASK(len, pos) (((-1U) >> (31-len)) << (pos))
+#define SETMASK(len, pos) (((1U << (len)) - 1) << (pos))
#define CLRMASK(len, pos) (~(SETMASK(len, pos)))
#define FACTOR_GET(bit, len, reg) (((reg) & SETMASK(len, bit)) >> (bit))
@@ -88,7 +80,7 @@ static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- u8 n, k, m, p;
+ u8 n = 0, k = 0, m = 0, p = 0;
u32 reg;
struct clk_factors *factors = to_clk_factors(hw);
struct clk_factors_config *config = factors->config;
@@ -120,61 +112,8 @@ static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static const struct clk_ops clk_factors_ops = {
+const struct clk_ops clk_factors_ops = {
.recalc_rate = clk_factors_recalc_rate,
.round_rate = clk_factors_round_rate,
.set_rate = clk_factors_set_rate,
};
-
-/**
- * clk_register_factors - register a factors clock with
- * the clock framework
- * @dev: device registering this clock
- * @name: name of this clock
- * @parent_name: name of clock's parent
- * @flags: framework-specific flags
- * @reg: register address to adjust factors
- * @config: shift and width of factors n, k, m and p
- * @get_factors: function to calculate the factors for a given frequency
- * @lock: shared register lock for this clock
- */
-struct clk *clk_register_factors(struct device *dev, const char *name,
- const char *parent_name,
- unsigned long flags, void __iomem *reg,
- struct clk_factors_config *config,
- void (*get_factors)(u32 *rate, u32 parent,
- u8 *n, u8 *k, u8 *m, u8 *p),
- spinlock_t *lock)
-{
- struct clk_factors *factors;
- struct clk *clk;
- struct clk_init_data init;
-
- /* allocate the factors */
- factors = kzalloc(sizeof(struct clk_factors), GFP_KERNEL);
- if (!factors) {
- pr_err("%s: could not allocate factors clk\n", __func__);
- return ERR_PTR(-ENOMEM);
- }
-
- init.name = name;
- init.ops = &clk_factors_ops;
- init.flags = flags;
- init.parent_names = (parent_name ? &parent_name : NULL);
- init.num_parents = (parent_name ? 1 : 0);
-
- /* struct clk_factors assignments */
- factors->reg = reg;
- factors->config = config;
- factors->lock = lock;
- factors->hw.init = &init;
- factors->get_factors = get_factors;
-
- /* register the clock */
- clk = clk_register(dev, &factors->hw);
-
- if (IS_ERR(clk))
- kfree(factors);
-
- return clk;
-}
diff --git a/drivers/clk/sunxi/clk-factors.h b/drivers/clk/sunxi/clk-factors.h
index f49851cc4380..02e1a43ebac7 100644
--- a/drivers/clk/sunxi/clk-factors.h
+++ b/drivers/clk/sunxi/clk-factors.h
@@ -17,11 +17,13 @@ struct clk_factors_config {
u8 pwidth;
};
-struct clk *clk_register_factors(struct device *dev, const char *name,
- const char *parent_name,
- unsigned long flags, void __iomem *reg,
- struct clk_factors_config *config,
- void (*get_factors) (u32 *rate, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p),
- spinlock_t *lock);
+struct clk_factors {
+ struct clk_hw hw;
+ void __iomem *reg;
+ struct clk_factors_config *config;
+ void (*get_factors) (u32 *rate, u32 parent, u8 *n, u8 *k, u8 *m, u8 *p);
+ spinlock_t *lock;
+};
+
+extern const struct clk_ops clk_factors_ops;
#endif
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 9bbd03514540..abb6c5ac8a10 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -23,6 +23,9 @@
static DEFINE_SPINLOCK(clk_lock);
+/* Maximum number of parents our clocks have */
+#define SUNXI_MAX_PARENTS 5
+
/**
* sun4i_osc_clk_setup() - Setup function for gatable oscillator
*/
@@ -37,18 +40,16 @@ static void __init sun4i_osc_clk_setup(struct device_node *node)
const char *clk_name = node->name;
u32 rate;
+ if (of_property_read_u32(node, "clock-frequency", &rate))
+ return;
+
/* allocate fixed-rate and gate clock structs */
fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL);
if (!fixed)
return;
gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
- if (!gate) {
- kfree(fixed);
- return;
- }
-
- if (of_property_read_u32(node, "clock-frequency", &rate))
- return;
+ if (!gate)
+ goto err_free_fixed;
/* set up gate and fixed rate properties */
gate->reg = of_iomap(node, 0);
@@ -63,10 +64,18 @@ static void __init sun4i_osc_clk_setup(struct device_node *node)
&gate->hw, &clk_gate_ops,
CLK_IS_ROOT);
- if (!IS_ERR(clk)) {
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
- }
+ if (IS_ERR(clk))
+ goto err_free_gate;
+
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, clk_name, NULL);
+
+ return;
+
+err_free_gate:
+ kfree(gate);
+err_free_fixed:
+ kfree(fixed);
}
CLK_OF_DECLARE(sun4i_osc, "allwinner,sun4i-osc-clk", sun4i_osc_clk_setup);
@@ -209,6 +218,40 @@ static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
}
/**
+ * sun4i_get_pll5_factors() - calculates n, k factors for PLL5
+ * PLL5 rate is calculated as follows
+ * rate = parent_rate * n * (k + 1)
+ * parent_rate is always 24Mhz
+ */
+
+static void sun4i_get_pll5_factors(u32 *freq, u32 parent_rate,
+ u8 *n, u8 *k, u8 *m, u8 *p)
+{
+ u8 div;
+
+ /* Normalize value to a parent_rate multiple (24M) */
+ div = *freq / parent_rate;
+ *freq = parent_rate * div;
+
+ /* we were called to round the frequency, we can now return */
+ if (n == NULL)
+ return;
+
+ if (div < 31)
+ *k = 0;
+ else if (div / 2 < 31)
+ *k = 1;
+ else if (div / 3 < 31)
+ *k = 2;
+ else
+ *k = 3;
+
+ *n = DIV_ROUND_UP(div, (*k+1));
+}
+
+
+
+/**
* sun4i_get_apb1_factors() - calculates m, p factors for APB1
* APB1 rate is calculated as follows
* rate = (parent_rate >> p) / (m + 1);
@@ -252,10 +295,96 @@ static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate,
/**
+ * sun4i_get_mod0_factors() - calculates m, n factors for MOD0-style clocks
+ * MMC rate is calculated as follows
+ * rate = (parent_rate >> p) / (m + 1);
+ */
+
+static void sun4i_get_mod0_factors(u32 *freq, u32 parent_rate,
+ u8 *n, u8 *k, u8 *m, u8 *p)
+{
+ u8 div, calcm, calcp;
+
+ /* These clocks can only divide, so we will never be able to achieve
+ * frequencies higher than the parent frequency */
+ if (*freq > parent_rate)
+ *freq = parent_rate;
+
+ div = parent_rate / *freq;
+
+ if (div < 16)
+ calcp = 0;
+ else if (div / 2 < 16)
+ calcp = 1;
+ else if (div / 4 < 16)
+ calcp = 2;
+ else
+ calcp = 3;
+
+ calcm = DIV_ROUND_UP(div, 1 << calcp);
+
+ *freq = (parent_rate >> calcp) / calcm;
+
+ /* we were called to round the frequency, we can now return */
+ if (n == NULL)
+ return;
+
+ *m = calcm - 1;
+ *p = calcp;
+}
+
+
+
+/**
+ * sun7i_a20_get_out_factors() - calculates m, p factors for CLK_OUT_A/B
+ * CLK_OUT rate is calculated as follows
+ * rate = (parent_rate >> p) / (m + 1);
+ */
+
+static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
+ u8 *n, u8 *k, u8 *m, u8 *p)
+{
+ u8 div, calcm, calcp;
+
+ /* These clocks can only divide, so we will never be able to achieve
+ * frequencies higher than the parent frequency */
+ if (*freq > parent_rate)
+ *freq = parent_rate;
+
+ div = parent_rate / *freq;
+
+ if (div < 32)
+ calcp = 0;
+ else if (div / 2 < 32)
+ calcp = 1;
+ else if (div / 4 < 32)
+ calcp = 2;
+ else
+ calcp = 3;
+
+ calcm = DIV_ROUND_UP(div, 1 << calcp);
+
+ *freq = (parent_rate >> calcp) / calcm;
+
+ /* we were called to round the frequency, we can now return */
+ if (n == NULL)
+ return;
+
+ *m = calcm - 1;
+ *p = calcp;
+}
+
+
+
+/**
* sunxi_factors_clk_setup() - Setup function for factor clocks
*/
+#define SUNXI_FACTORS_MUX_MASK 0x3
+
struct factors_data {
+ int enable;
+ int mux;
struct clk_factors_config *table;
void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
};
@@ -280,6 +409,13 @@ static struct clk_factors_config sun6i_a31_pll1_config = {
.mwidth = 2,
};
+static struct clk_factors_config sun4i_pll5_config = {
+ .nshift = 8,
+ .nwidth = 5,
+ .kshift = 4,
+ .kwidth = 2,
+};
+
static struct clk_factors_config sun4i_apb1_config = {
.mshift = 0,
.mwidth = 5,
@@ -287,40 +423,143 @@ static struct clk_factors_config sun4i_apb1_config = {
.pwidth = 2,
};
+/* user manual says "n" but it's really "p" */
+static struct clk_factors_config sun4i_mod0_config = {
+ .mshift = 0,
+ .mwidth = 4,
+ .pshift = 16,
+ .pwidth = 2,
+};
+
+/* user manual says "n" but it's really "p" */
+static struct clk_factors_config sun7i_a20_out_config = {
+ .mshift = 8,
+ .mwidth = 5,
+ .pshift = 20,
+ .pwidth = 2,
+};
+
static const struct factors_data sun4i_pll1_data __initconst = {
+ .enable = 31,
.table = &sun4i_pll1_config,
.getter = sun4i_get_pll1_factors,
};
static const struct factors_data sun6i_a31_pll1_data __initconst = {
+ .enable = 31,
.table = &sun6i_a31_pll1_config,
.getter = sun6i_a31_get_pll1_factors,
};
+static const struct factors_data sun4i_pll5_data __initconst = {
+ .enable = 31,
+ .table = &sun4i_pll5_config,
+ .getter = sun4i_get_pll5_factors,
+};
+
static const struct factors_data sun4i_apb1_data __initconst = {
.table = &sun4i_apb1_config,
.getter = sun4i_get_apb1_factors,
};
-static void __init sunxi_factors_clk_setup(struct device_node *node,
- struct factors_data *data)
+static const struct factors_data sun4i_mod0_data __initconst = {
+ .enable = 31,
+ .mux = 24,
+ .table = &sun4i_mod0_config,
+ .getter = sun4i_get_mod0_factors,
+};
+
+static const struct factors_data sun7i_a20_out_data __initconst = {
+ .enable = 31,
+ .mux = 24,
+ .table = &sun7i_a20_out_config,
+ .getter = sun7i_a20_get_out_factors,
+};
+
+static struct clk * __init sunxi_factors_clk_setup(struct device_node *node,
+ const struct factors_data *data)
{
struct clk *clk;
+ struct clk_factors *factors;
+ struct clk_gate *gate = NULL;
+ struct clk_mux *mux = NULL;
+ struct clk_hw *gate_hw = NULL;
+ struct clk_hw *mux_hw = NULL;
const char *clk_name = node->name;
- const char *parent;
+ const char *parents[SUNXI_MAX_PARENTS];
void *reg;
+ int i = 0;
reg = of_iomap(node, 0);
- parent = of_clk_get_parent_name(node, 0);
+ /* if we have a mux, we will have >1 parents */
+ while (i < SUNXI_MAX_PARENTS &&
+ (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
+ i++;
+
+ /* Nodes should be providing the name via clock-output-names
+ * but originally our dts didn't, and so we used node->name.
+ * The new, better nodes look like clk@deadbeef, so we pull the
+ * name just in this case */
+ if (!strcmp("clk", clk_name)) {
+ of_property_read_string_index(node, "clock-output-names",
+ 0, &clk_name);
+ }
+
+ factors = kzalloc(sizeof(struct clk_factors), GFP_KERNEL);
+ if (!factors)
+ return NULL;
+
+ /* Add a gate if this factor clock can be gated */
+ if (data->enable) {
+ gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
+ if (!gate) {
+ kfree(factors);
+ return NULL;
+ }
+
+ /* set up gate properties */
+ gate->reg = reg;
+ gate->bit_idx = data->enable;
+ gate->lock = &clk_lock;
+ gate_hw = &gate->hw;
+ }
+
+ /* Add a mux if this factor clock can be muxed */
+ if (data->mux) {
+ mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
+ if (!mux) {
+ kfree(factors);
+ kfree(gate);
+ return NULL;
+ }
+
+ /* set up gate properties */
+ mux->reg = reg;
+ mux->shift = data->mux;
+ mux->mask = SUNXI_FACTORS_MUX_MASK;
+ mux->lock = &clk_lock;
+ mux_hw = &mux->hw;
+ }
- clk = clk_register_factors(NULL, clk_name, parent, 0, reg,
- data->table, data->getter, &clk_lock);
+ /* set up factors properties */
+ factors->reg = reg;
+ factors->config = data->table;
+ factors->get_factors = data->getter;
+ factors->lock = &clk_lock;
+
+ clk = clk_register_composite(NULL, clk_name,
+ parents, i,
+ mux_hw, &clk_mux_ops,
+ &factors->hw, &clk_factors_ops,
+ gate_hw, &clk_gate_ops, 0);
if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
clk_register_clkdev(clk, clk_name, NULL);
}
+
+ return clk;
}
@@ -352,13 +591,14 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
{
struct clk *clk;
const char *clk_name = node->name;
- const char *parents[5];
+ const char *parents[SUNXI_MAX_PARENTS];
void *reg;
int i = 0;
reg = of_iomap(node, 0);
- while (i < 5 && (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
+ while (i < SUNXI_MAX_PARENTS &&
+ (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
i++;
clk = clk_register_mux(NULL, clk_name, parents, i,
@@ -555,11 +795,186 @@ static void __init sunxi_gates_clk_setup(struct device_node *node,
of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
}
+
+
+/**
+ * sunxi_divs_clk_setup() helper data
+ */
+
+#define SUNXI_DIVS_MAX_QTY 2
+#define SUNXI_DIVISOR_WIDTH 2
+
+struct divs_data {
+ const struct factors_data *factors; /* data for the factor clock */
+ struct {
+ u8 fixed; /* is it a fixed divisor? if not... */
+ struct clk_div_table *table; /* is it a table based divisor? */
+ u8 shift; /* otherwise it's a normal divisor with this shift */
+ u8 pow; /* is it power-of-two based? */
+ u8 gate; /* is it independently gateable? */
+ } div[SUNXI_DIVS_MAX_QTY];
+};
+
+static struct clk_div_table pll6_sata_tbl[] = {
+ { .val = 0, .div = 6, },
+ { .val = 1, .div = 12, },
+ { .val = 2, .div = 18, },
+ { .val = 3, .div = 24, },
+ { } /* sentinel */
+};
+
+static const struct divs_data pll5_divs_data __initconst = {
+ .factors = &sun4i_pll5_data,
+ .div = {
+ { .shift = 0, .pow = 0, }, /* M, DDR */
+ { .shift = 16, .pow = 1, }, /* P, other */
+ }
+};
+
+static const struct divs_data pll6_divs_data __initconst = {
+ .factors = &sun4i_pll5_data,
+ .div = {
+ { .shift = 0, .table = pll6_sata_tbl, .gate = 14 }, /* M, SATA */
+ { .fixed = 2 }, /* P, other */
+ }
+};
+
+/**
+ * sunxi_divs_clk_setup() - Setup function for leaf divisors on clocks
+ *
+ * These clocks look something like this
+ * ________________________
+ * | ___divisor 1---|----> to consumer
+ * parent >--| pll___/___divisor 2---|----> to consumer
+ * | \_______________|____> to consumer
+ * |________________________|
+ */
+
+static void __init sunxi_divs_clk_setup(struct device_node *node,
+ struct divs_data *data)
+{
+ struct clk_onecell_data *clk_data;
+ const char *parent = node->name;
+ const char *clk_name;
+ struct clk **clks, *pclk;
+ struct clk_hw *gate_hw, *rate_hw;
+ const struct clk_ops *rate_ops;
+ struct clk_gate *gate = NULL;
+ struct clk_fixed_factor *fix_factor;
+ struct clk_divider *divider;
+ void *reg;
+ int i = 0;
+ int flags, clkflags;
+
+ /* Set up factor clock that we will be dividing */
+ pclk = sunxi_factors_clk_setup(node, data->factors);
+
+ reg = of_iomap(node, 0);
+
+ clk_data = kmalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
+ if (!clk_data)
+ return;
+
+ clks = kzalloc((SUNXI_DIVS_MAX_QTY+1) * sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ goto free_clkdata;
+
+ clk_data->clks = clks;
+
+ /* It's not a good idea to have automatic reparenting changing
+ * our RAM clock! */
+ clkflags = !strcmp("pll5", parent) ? 0 : CLK_SET_RATE_PARENT;
+
+ for (i = 0; i < SUNXI_DIVS_MAX_QTY; i++) {
+ if (of_property_read_string_index(node, "clock-output-names",
+ i, &clk_name) != 0)
+ break;
+
+ gate_hw = NULL;
+ rate_hw = NULL;
+ rate_ops = NULL;
+
+ /* If this leaf clock can be gated, create a gate */
+ if (data->div[i].gate) {
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ goto free_clks;
+
+ gate->reg = reg;
+ gate->bit_idx = data->div[i].gate;
+ gate->lock = &clk_lock;
+
+ gate_hw = &gate->hw;
+ }
+
+ /* Leaves can be fixed or configurable divisors */
+ if (data->div[i].fixed) {
+ fix_factor = kzalloc(sizeof(*fix_factor), GFP_KERNEL);
+ if (!fix_factor)
+ goto free_gate;
+
+ fix_factor->mult = 1;
+ fix_factor->div = data->div[i].fixed;
+
+ rate_hw = &fix_factor->hw;
+ rate_ops = &clk_fixed_factor_ops;
+ } else {
+ divider = kzalloc(sizeof(*divider), GFP_KERNEL);
+ if (!divider)
+ goto free_gate;
+
+ flags = data->div[i].pow ? CLK_DIVIDER_POWER_OF_TWO : 0;
+
+ divider->reg = reg;
+ divider->shift = data->div[i].shift;
+ divider->width = SUNXI_DIVISOR_WIDTH;
+ divider->flags = flags;
+ divider->lock = &clk_lock;
+ divider->table = data->div[i].table;
+
+ rate_hw = &divider->hw;
+ rate_ops = &clk_divider_ops;
+ }
+
+ /* Wrap the (potential) gate and the divisor on a composite
+ * clock to unify them */
+ clks[i] = clk_register_composite(NULL, clk_name, &parent, 1,
+ NULL, NULL,
+ rate_hw, rate_ops,
+ gate_hw, &clk_gate_ops,
+ clkflags);
+
+ WARN_ON(IS_ERR(clk_data->clks[i]));
+ clk_register_clkdev(clks[i], clk_name, NULL);
+ }
+
+ /* The last clock available on the getter is the parent */
+ clks[i++] = pclk;
+
+ /* Adjust to the real max */
+ clk_data->clk_num = i;
+
+ of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ return;
+
+free_gate:
+ kfree(gate);
+free_clks:
+ kfree(clks);
+free_clkdata:
+ kfree(clk_data);
+}
+
+
+
/* Matches for factors clocks */
static const struct of_device_id clk_factors_match[] __initconst = {
{.compatible = "allwinner,sun4i-pll1-clk", .data = &sun4i_pll1_data,},
{.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,},
{.compatible = "allwinner,sun4i-apb1-clk", .data = &sun4i_apb1_data,},
+ {.compatible = "allwinner,sun4i-mod0-clk", .data = &sun4i_mod0_data,},
+ {.compatible = "allwinner,sun7i-a20-out-clk", .data = &sun7i_a20_out_data,},
{}
};
@@ -572,6 +987,13 @@ static const struct of_device_id clk_div_match[] __initconst = {
{}
};
+/* Matches for divided outputs */
+static const struct of_device_id clk_divs_match[] __initconst = {
+ {.compatible = "allwinner,sun4i-pll5-clk", .data = &pll5_divs_data,},
+ {.compatible = "allwinner,sun4i-pll6-clk", .data = &pll6_divs_data,},
+ {}
+};
+
/* Matches for mux clocks */
static const struct of_device_id clk_mux_match[] __initconst = {
{.compatible = "allwinner,sun4i-cpu-clk", .data = &sun4i_cpu_mux_data,},
@@ -616,7 +1038,32 @@ static void __init of_sunxi_table_clock_setup(const struct of_device_id *clk_mat
}
}
-static void __init sunxi_init_clocks(struct device_node *np)
+/**
+ * System clock protection
+ *
+ * By enabling these critical clocks, we prevent their accidental gating
+ * by the framework
+ */
+static void __init sunxi_clock_protect(void)
+{
+ struct clk *clk;
+
+ /* memory bus clock - sun5i+ */
+ clk = clk_get(NULL, "mbus");
+ if (!IS_ERR(clk)) {
+ clk_prepare_enable(clk);
+ clk_put(clk);
+ }
+
+ /* DDR clock - sun4i+ */
+ clk = clk_get(NULL, "pll5_ddr");
+ if (!IS_ERR(clk)) {
+ clk_prepare_enable(clk);
+ clk_put(clk);
+ }
+}
+
+static void __init sunxi_init_clocks(void)
{
/* Register factor clocks */
of_sunxi_table_clock_setup(clk_factors_match, sunxi_factors_clk_setup);
@@ -624,11 +1071,17 @@ static void __init sunxi_init_clocks(struct device_node *np)
/* Register divider clocks */
of_sunxi_table_clock_setup(clk_div_match, sunxi_divider_clk_setup);
+ /* Register divided output clocks */
+ of_sunxi_table_clock_setup(clk_divs_match, sunxi_divs_clk_setup);
+
/* Register mux clocks */
of_sunxi_table_clock_setup(clk_mux_match, sunxi_mux_clk_setup);
/* Register gate clocks */
of_sunxi_table_clock_setup(clk_gates_match, sunxi_gates_clk_setup);
+
+ /* Enable core system clocks */
+ sunxi_clock_protect();
}
CLK_OF_DECLARE(sun4i_a10_clk_init, "allwinner,sun4i-a10", sunxi_init_clocks);
CLK_OF_DECLARE(sun5i_a10s_clk_init, "allwinner,sun5i-a10s", sunxi_init_clocks);
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index f49fac2d193a..f7dfb72884a4 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -6,7 +6,12 @@ obj-y += clk-periph-gate.o
obj-y += clk-pll.o
obj-y += clk-pll-out.o
obj-y += clk-super.o
-
+obj-y += clk-tegra-audio.o
+obj-y += clk-tegra-periph.o
+obj-y += clk-tegra-pmc.o
+obj-y += clk-tegra-fixed.o
+obj-y += clk-tegra-super-gen4.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o
obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o
obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o
+obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
new file mode 100644
index 000000000000..cf0c323f2c36
--- /dev/null
+++ b/drivers/clk/tegra/clk-id.h
@@ -0,0 +1,235 @@
+/*
+ * This header provides IDs for clocks common between several Tegra SoCs
+ */
+#ifndef _TEGRA_CLK_ID_H
+#define _TEGRA_CLK_ID_H
+
+enum clk_id {
+ tegra_clk_actmon,
+ tegra_clk_adx,
+ tegra_clk_adx1,
+ tegra_clk_afi,
+ tegra_clk_amx,
+ tegra_clk_amx1,
+ tegra_clk_apbdma,
+ tegra_clk_apbif,
+ tegra_clk_audio0,
+ tegra_clk_audio0_2x,
+ tegra_clk_audio0_mux,
+ tegra_clk_audio1,
+ tegra_clk_audio1_2x,
+ tegra_clk_audio1_mux,
+ tegra_clk_audio2,
+ tegra_clk_audio2_2x,
+ tegra_clk_audio2_mux,
+ tegra_clk_audio3,
+ tegra_clk_audio3_2x,
+ tegra_clk_audio3_mux,
+ tegra_clk_audio4,
+ tegra_clk_audio4_2x,
+ tegra_clk_audio4_mux,
+ tegra_clk_blink,
+ tegra_clk_bsea,
+ tegra_clk_bsev,
+ tegra_clk_cclk_g,
+ tegra_clk_cclk_lp,
+ tegra_clk_cilab,
+ tegra_clk_cilcd,
+ tegra_clk_cile,
+ tegra_clk_clk_32k,
+ tegra_clk_clk72Mhz,
+ tegra_clk_clk_m,
+ tegra_clk_clk_m_div2,
+ tegra_clk_clk_m_div4,
+ tegra_clk_clk_out_1,
+ tegra_clk_clk_out_1_mux,
+ tegra_clk_clk_out_2,
+ tegra_clk_clk_out_2_mux,
+ tegra_clk_clk_out_3,
+ tegra_clk_clk_out_3_mux,
+ tegra_clk_cml0,
+ tegra_clk_cml1,
+ tegra_clk_csi,
+ tegra_clk_csite,
+ tegra_clk_csus,
+ tegra_clk_cve,
+ tegra_clk_dam0,
+ tegra_clk_dam1,
+ tegra_clk_dam2,
+ tegra_clk_d_audio,
+ tegra_clk_dds,
+ tegra_clk_dfll_ref,
+ tegra_clk_dfll_soc,
+ tegra_clk_disp1,
+ tegra_clk_disp2,
+ tegra_clk_dp2,
+ tegra_clk_dpaux,
+ tegra_clk_dsia,
+ tegra_clk_dsialp,
+ tegra_clk_dsia_mux,
+ tegra_clk_dsib,
+ tegra_clk_dsiblp,
+ tegra_clk_dsib_mux,
+ tegra_clk_dtv,
+ tegra_clk_emc,
+ tegra_clk_entropy,
+ tegra_clk_epp,
+ tegra_clk_epp_8,
+ tegra_clk_extern1,
+ tegra_clk_extern2,
+ tegra_clk_extern3,
+ tegra_clk_fuse,
+ tegra_clk_fuse_burn,
+ tegra_clk_gpu,
+ tegra_clk_gr2d,
+ tegra_clk_gr2d_8,
+ tegra_clk_gr3d,
+ tegra_clk_gr3d_8,
+ tegra_clk_hclk,
+ tegra_clk_hda,
+ tegra_clk_hda2codec_2x,
+ tegra_clk_hda2hdmi,
+ tegra_clk_hdmi,
+ tegra_clk_hdmi_audio,
+ tegra_clk_host1x,
+ tegra_clk_host1x_8,
+ tegra_clk_i2c1,
+ tegra_clk_i2c2,
+ tegra_clk_i2c3,
+ tegra_clk_i2c4,
+ tegra_clk_i2c5,
+ tegra_clk_i2c6,
+ tegra_clk_i2cslow,
+ tegra_clk_i2s0,
+ tegra_clk_i2s0_sync,
+ tegra_clk_i2s1,
+ tegra_clk_i2s1_sync,
+ tegra_clk_i2s2,
+ tegra_clk_i2s2_sync,
+ tegra_clk_i2s3,
+ tegra_clk_i2s3_sync,
+ tegra_clk_i2s4,
+ tegra_clk_i2s4_sync,
+ tegra_clk_isp,
+ tegra_clk_isp_8,
+ tegra_clk_ispb,
+ tegra_clk_kbc,
+ tegra_clk_kfuse,
+ tegra_clk_la,
+ tegra_clk_mipi,
+ tegra_clk_mipi_cal,
+ tegra_clk_mpe,
+ tegra_clk_mselect,
+ tegra_clk_msenc,
+ tegra_clk_ndflash,
+ tegra_clk_ndflash_8,
+ tegra_clk_ndspeed,
+ tegra_clk_ndspeed_8,
+ tegra_clk_nor,
+ tegra_clk_owr,
+ tegra_clk_pcie,
+ tegra_clk_pclk,
+ tegra_clk_pll_a,
+ tegra_clk_pll_a_out0,
+ tegra_clk_pll_c,
+ tegra_clk_pll_c2,
+ tegra_clk_pll_c3,
+ tegra_clk_pll_c4,
+ tegra_clk_pll_c_out1,
+ tegra_clk_pll_d,
+ tegra_clk_pll_d2,
+ tegra_clk_pll_d2_out0,
+ tegra_clk_pll_d_out0,
+ tegra_clk_pll_dp,
+ tegra_clk_pll_e_out0,
+ tegra_clk_pll_m,
+ tegra_clk_pll_m_out1,
+ tegra_clk_pll_p,
+ tegra_clk_pll_p_out1,
+ tegra_clk_pll_p_out2,
+ tegra_clk_pll_p_out2_int,
+ tegra_clk_pll_p_out3,
+ tegra_clk_pll_p_out4,
+ tegra_clk_pll_p_out5,
+ tegra_clk_pll_ref,
+ tegra_clk_pll_re_out,
+ tegra_clk_pll_re_vco,
+ tegra_clk_pll_u,
+ tegra_clk_pll_u_12m,
+ tegra_clk_pll_u_480m,
+ tegra_clk_pll_u_48m,
+ tegra_clk_pll_u_60m,
+ tegra_clk_pll_x,
+ tegra_clk_pll_x_out0,
+ tegra_clk_pwm,
+ tegra_clk_rtc,
+ tegra_clk_sata,
+ tegra_clk_sata_cold,
+ tegra_clk_sata_oob,
+ tegra_clk_sbc1,
+ tegra_clk_sbc1_8,
+ tegra_clk_sbc2,
+ tegra_clk_sbc2_8,
+ tegra_clk_sbc3,
+ tegra_clk_sbc3_8,
+ tegra_clk_sbc4,
+ tegra_clk_sbc4_8,
+ tegra_clk_sbc5,
+ tegra_clk_sbc5_8,
+ tegra_clk_sbc6,
+ tegra_clk_sbc6_8,
+ tegra_clk_sclk,
+ tegra_clk_sdmmc1,
+ tegra_clk_sdmmc2,
+ tegra_clk_sdmmc3,
+ tegra_clk_sdmmc4,
+ tegra_clk_se,
+ tegra_clk_soc_therm,
+ tegra_clk_sor0,
+ tegra_clk_sor0_lvds,
+ tegra_clk_spdif,
+ tegra_clk_spdif_2x,
+ tegra_clk_spdif_in,
+ tegra_clk_spdif_in_sync,
+ tegra_clk_spdif_mux,
+ tegra_clk_spdif_out,
+ tegra_clk_timer,
+ tegra_clk_trace,
+ tegra_clk_tsec,
+ tegra_clk_tsensor,
+ tegra_clk_tvdac,
+ tegra_clk_tvo,
+ tegra_clk_uarta,
+ tegra_clk_uartb,
+ tegra_clk_uartc,
+ tegra_clk_uartd,
+ tegra_clk_uarte,
+ tegra_clk_usb2,
+ tegra_clk_usb3,
+ tegra_clk_usbd,
+ tegra_clk_vcp,
+ tegra_clk_vde,
+ tegra_clk_vde_8,
+ tegra_clk_vfir,
+ tegra_clk_vi,
+ tegra_clk_vi_8,
+ tegra_clk_vi_9,
+ tegra_clk_vic03,
+ tegra_clk_vim2_clk,
+ tegra_clk_vimclk_sync,
+ tegra_clk_vi_sensor,
+ tegra_clk_vi_sensor2,
+ tegra_clk_vi_sensor_8,
+ tegra_clk_xusb_dev,
+ tegra_clk_xusb_dev_src,
+ tegra_clk_xusb_falcon_src,
+ tegra_clk_xusb_fs_src,
+ tegra_clk_xusb_host,
+ tegra_clk_xusb_host_src,
+ tegra_clk_xusb_hs_src,
+ tegra_clk_xusb_ss,
+ tegra_clk_xusb_ss_src,
+ tegra_clk_max,
+};
+
+#endif /* _TEGRA_CLK_ID_H */
diff --git a/drivers/clk/tegra/clk-periph-gate.c b/drivers/clk/tegra/clk-periph-gate.c
index bafee9895a24..507015314827 100644
--- a/drivers/clk/tegra/clk-periph-gate.c
+++ b/drivers/clk/tegra/clk-periph-gate.c
@@ -36,8 +36,6 @@ static DEFINE_SPINLOCK(periph_ref_lock);
#define read_rst(gate) \
readl_relaxed(gate->clk_base + (gate->regs->rst_reg))
-#define write_rst_set(val, gate) \
- writel_relaxed(val, gate->clk_base + (gate->regs->rst_set_reg))
#define write_rst_clr(val, gate) \
writel_relaxed(val, gate->clk_base + (gate->regs->rst_clr_reg))
@@ -123,26 +121,6 @@ static void clk_periph_disable(struct clk_hw *hw)
spin_unlock_irqrestore(&periph_ref_lock, flags);
}
-void tegra_periph_reset(struct tegra_clk_periph_gate *gate, bool assert)
-{
- if (gate->flags & TEGRA_PERIPH_NO_RESET)
- return;
-
- if (assert) {
- /*
- * If peripheral is in the APB bus then read the APB bus to
- * flush the write operation in apb bus. This will avoid the
- * peripheral access after disabling clock
- */
- if (gate->flags & TEGRA_PERIPH_ON_APB)
- tegra_read_chipid();
-
- write_rst_set(periph_clk_to_bit(gate), gate);
- } else {
- write_rst_clr(periph_clk_to_bit(gate), gate);
- }
-}
-
const struct clk_ops tegra_clk_periph_gate_ops = {
.is_enabled = clk_periph_is_enabled,
.enable = clk_periph_enable,
@@ -151,12 +129,16 @@ const struct clk_ops tegra_clk_periph_gate_ops = {
struct clk *tegra_clk_register_periph_gate(const char *name,
const char *parent_name, u8 gate_flags, void __iomem *clk_base,
- unsigned long flags, int clk_num,
- struct tegra_clk_periph_regs *pregs, int *enable_refcnt)
+ unsigned long flags, int clk_num, int *enable_refcnt)
{
struct tegra_clk_periph_gate *gate;
struct clk *clk;
struct clk_init_data init;
+ struct tegra_clk_periph_regs *pregs;
+
+ pregs = get_reg_bank(clk_num);
+ if (!pregs)
+ return ERR_PTR(-EINVAL);
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate) {
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index b2309d37a963..356e9b804421 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -111,46 +111,6 @@ static void clk_periph_disable(struct clk_hw *hw)
gate_ops->disable(gate_hw);
}
-void tegra_periph_reset_deassert(struct clk *c)
-{
- struct clk_hw *hw = __clk_get_hw(c);
- struct tegra_clk_periph *periph = to_clk_periph(hw);
- struct tegra_clk_periph_gate *gate;
-
- if (periph->magic != TEGRA_CLK_PERIPH_MAGIC) {
- gate = to_clk_periph_gate(hw);
- if (gate->magic != TEGRA_CLK_PERIPH_GATE_MAGIC) {
- WARN_ON(1);
- return;
- }
- } else {
- gate = &periph->gate;
- }
-
- tegra_periph_reset(gate, 0);
-}
-EXPORT_SYMBOL(tegra_periph_reset_deassert);
-
-void tegra_periph_reset_assert(struct clk *c)
-{
- struct clk_hw *hw = __clk_get_hw(c);
- struct tegra_clk_periph *periph = to_clk_periph(hw);
- struct tegra_clk_periph_gate *gate;
-
- if (periph->magic != TEGRA_CLK_PERIPH_MAGIC) {
- gate = to_clk_periph_gate(hw);
- if (gate->magic != TEGRA_CLK_PERIPH_GATE_MAGIC) {
- WARN_ON(1);
- return;
- }
- } else {
- gate = &periph->gate;
- }
-
- tegra_periph_reset(gate, 1);
-}
-EXPORT_SYMBOL(tegra_periph_reset_assert);
-
const struct clk_ops tegra_clk_periph_ops = {
.get_parent = clk_periph_get_parent,
.set_parent = clk_periph_set_parent,
@@ -162,7 +122,7 @@ const struct clk_ops tegra_clk_periph_ops = {
.disable = clk_periph_disable,
};
-const struct clk_ops tegra_clk_periph_nodiv_ops = {
+static const struct clk_ops tegra_clk_periph_nodiv_ops = {
.get_parent = clk_periph_get_parent,
.set_parent = clk_periph_set_parent,
.is_enabled = clk_periph_is_enabled,
@@ -170,27 +130,50 @@ const struct clk_ops tegra_clk_periph_nodiv_ops = {
.disable = clk_periph_disable,
};
+const struct clk_ops tegra_clk_periph_no_gate_ops = {
+ .get_parent = clk_periph_get_parent,
+ .set_parent = clk_periph_set_parent,
+ .recalc_rate = clk_periph_recalc_rate,
+ .round_rate = clk_periph_round_rate,
+ .set_rate = clk_periph_set_rate,
+};
+
static struct clk *_tegra_clk_register_periph(const char *name,
const char **parent_names, int num_parents,
struct tegra_clk_periph *periph,
- void __iomem *clk_base, u32 offset, bool div,
+ void __iomem *clk_base, u32 offset,
unsigned long flags)
{
struct clk *clk;
struct clk_init_data init;
+ struct tegra_clk_periph_regs *bank;
+ bool div = !(periph->gate.flags & TEGRA_PERIPH_NO_DIV);
+
+ if (periph->gate.flags & TEGRA_PERIPH_NO_DIV) {
+ flags |= CLK_SET_RATE_PARENT;
+ init.ops = &tegra_clk_periph_nodiv_ops;
+ } else if (periph->gate.flags & TEGRA_PERIPH_NO_GATE)
+ init.ops = &tegra_clk_periph_no_gate_ops;
+ else
+ init.ops = &tegra_clk_periph_ops;
init.name = name;
- init.ops = div ? &tegra_clk_periph_ops : &tegra_clk_periph_nodiv_ops;
init.flags = flags;
init.parent_names = parent_names;
init.num_parents = num_parents;
+ bank = get_reg_bank(periph->gate.clk_num);
+ if (!bank)
+ return ERR_PTR(-EINVAL);
+
/* Data in .init is copied by clk_register(), so stack variable OK */
periph->hw.init = &init;
periph->magic = TEGRA_CLK_PERIPH_MAGIC;
periph->mux.reg = clk_base + offset;
periph->divider.reg = div ? (clk_base + offset) : NULL;
periph->gate.clk_base = clk_base;
+ periph->gate.regs = bank;
+ periph->gate.enable_refcnt = periph_clk_enb_refcnt;
clk = clk_register(NULL, &periph->hw);
if (IS_ERR(clk))
@@ -209,7 +192,7 @@ struct clk *tegra_clk_register_periph(const char *name,
u32 offset, unsigned long flags)
{
return _tegra_clk_register_periph(name, parent_names, num_parents,
- periph, clk_base, offset, true, flags);
+ periph, clk_base, offset, flags);
}
struct clk *tegra_clk_register_periph_nodiv(const char *name,
@@ -217,6 +200,7 @@ struct clk *tegra_clk_register_periph_nodiv(const char *name,
struct tegra_clk_periph *periph, void __iomem *clk_base,
u32 offset)
{
+ periph->gate.flags |= TEGRA_PERIPH_NO_DIV;
return _tegra_clk_register_periph(name, parent_names, num_parents,
- periph, clk_base, offset, false, CLK_SET_RATE_PARENT);
+ periph, clk_base, offset, CLK_SET_RATE_PARENT);
}
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 197074a57754..0d20241e0770 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -77,7 +77,23 @@
#define PLLE_MISC_SETUP_VALUE (7 << PLLE_MISC_SETUP_BASE_SHIFT)
#define PLLE_SS_CTRL 0x68
-#define PLLE_SS_DISABLE (7 << 10)
+#define PLLE_SS_CNTL_BYPASS_SS BIT(10)
+#define PLLE_SS_CNTL_INTERP_RESET BIT(11)
+#define PLLE_SS_CNTL_SSC_BYP BIT(12)
+#define PLLE_SS_CNTL_CENTER BIT(14)
+#define PLLE_SS_CNTL_INVERT BIT(15)
+#define PLLE_SS_DISABLE (PLLE_SS_CNTL_BYPASS_SS | PLLE_SS_CNTL_INTERP_RESET |\
+ PLLE_SS_CNTL_SSC_BYP)
+#define PLLE_SS_MAX_MASK 0x1ff
+#define PLLE_SS_MAX_VAL 0x25
+#define PLLE_SS_INC_MASK (0xff << 16)
+#define PLLE_SS_INC_VAL (0x1 << 16)
+#define PLLE_SS_INCINTRV_MASK (0x3f << 24)
+#define PLLE_SS_INCINTRV_VAL (0x20 << 24)
+#define PLLE_SS_COEFFICIENTS_MASK \
+ (PLLE_SS_MAX_MASK | PLLE_SS_INC_MASK | PLLE_SS_INCINTRV_MASK)
+#define PLLE_SS_COEFFICIENTS_VAL \
+ (PLLE_SS_MAX_VAL | PLLE_SS_INC_VAL | PLLE_SS_INCINTRV_VAL)
#define PLLE_AUX_PLLP_SEL BIT(2)
#define PLLE_AUX_ENABLE_SWCTL BIT(4)
@@ -121,6 +137,36 @@
#define PMC_SATA_PWRGT_PLLE_IDDQ_VALUE BIT(5)
#define PMC_SATA_PWRGT_PLLE_IDDQ_SWCTL BIT(4)
+#define PLLSS_MISC_KCP 0
+#define PLLSS_MISC_KVCO 0
+#define PLLSS_MISC_SETUP 0
+#define PLLSS_EN_SDM 0
+#define PLLSS_EN_SSC 0
+#define PLLSS_EN_DITHER2 0
+#define PLLSS_EN_DITHER 1
+#define PLLSS_SDM_RESET 0
+#define PLLSS_CLAMP 0
+#define PLLSS_SDM_SSC_MAX 0
+#define PLLSS_SDM_SSC_MIN 0
+#define PLLSS_SDM_SSC_STEP 0
+#define PLLSS_SDM_DIN 0
+#define PLLSS_MISC_DEFAULT ((PLLSS_MISC_KCP << 25) | \
+ (PLLSS_MISC_KVCO << 24) | \
+ PLLSS_MISC_SETUP)
+#define PLLSS_CFG_DEFAULT ((PLLSS_EN_SDM << 31) | \
+ (PLLSS_EN_SSC << 30) | \
+ (PLLSS_EN_DITHER2 << 29) | \
+ (PLLSS_EN_DITHER << 28) | \
+ (PLLSS_SDM_RESET) << 27 | \
+ (PLLSS_CLAMP << 22))
+#define PLLSS_CTRL1_DEFAULT \
+ ((PLLSS_SDM_SSC_MAX << 16) | PLLSS_SDM_SSC_MIN)
+#define PLLSS_CTRL2_DEFAULT \
+ ((PLLSS_SDM_SSC_STEP << 16) | PLLSS_SDM_DIN)
+#define PLLSS_LOCK_OVERRIDE BIT(24)
+#define PLLSS_REF_SRC_SEL_SHIFT 25
+#define PLLSS_REF_SRC_SEL_MASK (3 << PLLSS_REF_SRC_SEL_SHIFT)
+
#define pll_readl(offset, p) readl_relaxed(p->clk_base + offset)
#define pll_readl_base(p) pll_readl(p->params->base_reg, p)
#define pll_readl_misc(p) pll_readl(p->params->misc_reg, p)
@@ -134,7 +180,7 @@
#define mask(w) ((1 << (w)) - 1)
#define divm_mask(p) mask(p->params->div_nmp->divm_width)
#define divn_mask(p) mask(p->params->div_nmp->divn_width)
-#define divp_mask(p) (p->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK : \
+#define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\
mask(p->params->div_nmp->divp_width))
#define divm_max(p) (divm_mask(p))
@@ -154,10 +200,10 @@ static void clk_pll_enable_lock(struct tegra_clk_pll *pll)
{
u32 val;
- if (!(pll->flags & TEGRA_PLL_USE_LOCK))
+ if (!(pll->params->flags & TEGRA_PLL_USE_LOCK))
return;
- if (!(pll->flags & TEGRA_PLL_HAS_LOCK_ENABLE))
+ if (!(pll->params->flags & TEGRA_PLL_HAS_LOCK_ENABLE))
return;
val = pll_readl_misc(pll);
@@ -171,13 +217,13 @@ static int clk_pll_wait_for_lock(struct tegra_clk_pll *pll)
u32 val, lock_mask;
void __iomem *lock_addr;
- if (!(pll->flags & TEGRA_PLL_USE_LOCK)) {
+ if (!(pll->params->flags & TEGRA_PLL_USE_LOCK)) {
udelay(pll->params->lock_delay);
return 0;
}
lock_addr = pll->clk_base;
- if (pll->flags & TEGRA_PLL_LOCK_MISC)
+ if (pll->params->flags & TEGRA_PLL_LOCK_MISC)
lock_addr += pll->params->misc_reg;
else
lock_addr += pll->params->base_reg;
@@ -204,7 +250,7 @@ static int clk_pll_is_enabled(struct clk_hw *hw)
struct tegra_clk_pll *pll = to_clk_pll(hw);
u32 val;
- if (pll->flags & TEGRA_PLLM) {
+ if (pll->params->flags & TEGRA_PLLM) {
val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
if (val & PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE)
return val & PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE ? 1 : 0;
@@ -223,12 +269,12 @@ static void _clk_pll_enable(struct clk_hw *hw)
clk_pll_enable_lock(pll);
val = pll_readl_base(pll);
- if (pll->flags & TEGRA_PLL_BYPASS)
+ if (pll->params->flags & TEGRA_PLL_BYPASS)
val &= ~PLL_BASE_BYPASS;
val |= PLL_BASE_ENABLE;
pll_writel_base(val, pll);
- if (pll->flags & TEGRA_PLLM) {
+ if (pll->params->flags & TEGRA_PLLM) {
val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
val |= PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE;
writel_relaxed(val, pll->pmc + PMC_PLLP_WB0_OVERRIDE);
@@ -241,12 +287,12 @@ static void _clk_pll_disable(struct clk_hw *hw)
u32 val;
val = pll_readl_base(pll);
- if (pll->flags & TEGRA_PLL_BYPASS)
+ if (pll->params->flags & TEGRA_PLL_BYPASS)
val &= ~PLL_BASE_BYPASS;
val &= ~PLL_BASE_ENABLE;
pll_writel_base(val, pll);
- if (pll->flags & TEGRA_PLLM) {
+ if (pll->params->flags & TEGRA_PLLM) {
val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
val &= ~PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE;
writel_relaxed(val, pll->pmc + PMC_PLLP_WB0_OVERRIDE);
@@ -326,7 +372,7 @@ static int _get_table_rate(struct clk_hw *hw,
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table *sel;
- for (sel = pll->freq_table; sel->input_rate != 0; sel++)
+ for (sel = pll->params->freq_table; sel->input_rate != 0; sel++)
if (sel->input_rate == parent_rate &&
sel->output_rate == rate)
break;
@@ -389,12 +435,11 @@ static int _calc_rate(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
if (cfg->m > divm_max(pll) || cfg->n > divn_max(pll) ||
(1 << p_div) > divp_max(pll)
|| cfg->output_rate > pll->params->vco_max) {
- pr_err("%s: Failed to set %s rate %lu\n",
- __func__, __clk_get_name(hw->clk), rate);
- WARN_ON(1);
return -EINVAL;
}
+ cfg->output_rate >>= p_div;
+
if (pll->params->pdiv_tohw) {
ret = _p_div_to_hw(hw, 1 << p_div);
if (ret < 0)
@@ -414,7 +459,7 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
struct tegra_clk_pll_params *params = pll->params;
struct div_nmp *div_nmp = params->div_nmp;
- if ((pll->flags & TEGRA_PLLM) &&
+ if ((params->flags & TEGRA_PLLM) &&
(pll_override_readl(PMC_PLLP_WB0_OVERRIDE, pll) &
PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE)) {
val = pll_override_readl(params->pmc_divp_reg, pll);
@@ -450,7 +495,7 @@ static void _get_pll_mnp(struct tegra_clk_pll *pll,
struct tegra_clk_pll_params *params = pll->params;
struct div_nmp *div_nmp = params->div_nmp;
- if ((pll->flags & TEGRA_PLLM) &&
+ if ((params->flags & TEGRA_PLLM) &&
(pll_override_readl(PMC_PLLP_WB0_OVERRIDE, pll) &
PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE)) {
val = pll_override_readl(params->pmc_divp_reg, pll);
@@ -479,11 +524,11 @@ static void _update_pll_cpcon(struct tegra_clk_pll *pll,
val &= ~(PLL_MISC_CPCON_MASK << PLL_MISC_CPCON_SHIFT);
val |= cfg->cpcon << PLL_MISC_CPCON_SHIFT;
- if (pll->flags & TEGRA_PLL_SET_LFCON) {
+ if (pll->params->flags & TEGRA_PLL_SET_LFCON) {
val &= ~(PLL_MISC_LFCON_MASK << PLL_MISC_LFCON_SHIFT);
if (cfg->n >= PLLDU_LFCON_SET_DIVN)
val |= 1 << PLL_MISC_LFCON_SHIFT;
- } else if (pll->flags & TEGRA_PLL_SET_DCCON) {
+ } else if (pll->params->flags & TEGRA_PLL_SET_DCCON) {
val &= ~(1 << PLL_MISC_DCCON_SHIFT);
if (rate >= (pll->params->vco_max >> 1))
val |= 1 << PLL_MISC_DCCON_SHIFT;
@@ -505,7 +550,7 @@ static int _program_pll(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
_update_pll_mnp(pll, cfg);
- if (pll->flags & TEGRA_PLL_HAS_CPCON)
+ if (pll->params->flags & TEGRA_PLL_HAS_CPCON)
_update_pll_cpcon(pll, cfg, rate);
if (state) {
@@ -524,11 +569,11 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long flags = 0;
int ret = 0;
- if (pll->flags & TEGRA_PLL_FIXED) {
- if (rate != pll->fixed_rate) {
+ if (pll->params->flags & TEGRA_PLL_FIXED) {
+ if (rate != pll->params->fixed_rate) {
pr_err("%s: Can not change %s fixed rate %lu to %lu\n",
__func__, __clk_get_name(hw->clk),
- pll->fixed_rate, rate);
+ pll->params->fixed_rate, rate);
return -EINVAL;
}
return 0;
@@ -536,6 +581,8 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
if (_get_table_rate(hw, &cfg, rate, parent_rate) &&
_calc_rate(hw, &cfg, rate, parent_rate)) {
+ pr_err("%s: Failed to set %s rate %lu\n", __func__,
+ __clk_get_name(hw->clk), rate);
WARN_ON(1);
return -EINVAL;
}
@@ -559,18 +606,16 @@ static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table cfg;
- if (pll->flags & TEGRA_PLL_FIXED)
- return pll->fixed_rate;
+ if (pll->params->flags & TEGRA_PLL_FIXED)
+ return pll->params->fixed_rate;
/* PLLM is used for memory; we do not change rate */
- if (pll->flags & TEGRA_PLLM)
+ if (pll->params->flags & TEGRA_PLLM)
return __clk_get_rate(hw->clk);
if (_get_table_rate(hw, &cfg, rate, *prate) &&
- _calc_rate(hw, &cfg, rate, *prate)) {
- WARN_ON(1);
+ _calc_rate(hw, &cfg, rate, *prate))
return -EINVAL;
- }
return cfg.output_rate;
}
@@ -586,17 +631,19 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
val = pll_readl_base(pll);
- if ((pll->flags & TEGRA_PLL_BYPASS) && (val & PLL_BASE_BYPASS))
+ if ((pll->params->flags & TEGRA_PLL_BYPASS) && (val & PLL_BASE_BYPASS))
return parent_rate;
- if ((pll->flags & TEGRA_PLL_FIXED) && !(val & PLL_BASE_OVERRIDE)) {
+ if ((pll->params->flags & TEGRA_PLL_FIXED) &&
+ !(val & PLL_BASE_OVERRIDE)) {
struct tegra_clk_pll_freq_table sel;
- if (_get_table_rate(hw, &sel, pll->fixed_rate, parent_rate)) {
+ if (_get_table_rate(hw, &sel, pll->params->fixed_rate,
+ parent_rate)) {
pr_err("Clock %s has unknown fixed frequency\n",
__clk_get_name(hw->clk));
BUG();
}
- return pll->fixed_rate;
+ return pll->params->fixed_rate;
}
_get_pll_mnp(pll, &cfg);
@@ -664,7 +711,7 @@ static int clk_plle_enable(struct clk_hw *hw)
u32 val;
int err;
- if (_get_table_rate(hw, &sel, pll->fixed_rate, input_rate))
+ if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
return -EINVAL;
clk_pll_disable(hw);
@@ -680,7 +727,7 @@ static int clk_plle_enable(struct clk_hw *hw)
return err;
}
- if (pll->flags & TEGRA_PLLE_CONFIGURE) {
+ if (pll->params->flags & TEGRA_PLLE_CONFIGURE) {
/* configure dividers */
val = pll_readl_base(pll);
val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll));
@@ -744,7 +791,7 @@ const struct clk_ops tegra_clk_plle_ops = {
.enable = clk_plle_enable,
};
-#ifdef CONFIG_ARCH_TEGRA_114_SOC
+#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC)
static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
unsigned long parent_rate)
@@ -755,6 +802,48 @@ static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
return 1;
}
+static unsigned long _clip_vco_min(unsigned long vco_min,
+ unsigned long parent_rate)
+{
+ return DIV_ROUND_UP(vco_min, parent_rate) * parent_rate;
+}
+
+static int _setup_dynamic_ramp(struct tegra_clk_pll_params *pll_params,
+ void __iomem *clk_base,
+ unsigned long parent_rate)
+{
+ u32 val;
+ u32 step_a, step_b;
+
+ switch (parent_rate) {
+ case 12000000:
+ case 13000000:
+ case 26000000:
+ step_a = 0x2B;
+ step_b = 0x0B;
+ break;
+ case 16800000:
+ step_a = 0x1A;
+ step_b = 0x09;
+ break;
+ case 19200000:
+ step_a = 0x12;
+ step_b = 0x08;
+ break;
+ default:
+ pr_err("%s: Unexpected reference rate %lu\n",
+ __func__, parent_rate);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ val = step_a << pll_params->stepa_shift;
+ val |= step_b << pll_params->stepb_shift;
+ writel_relaxed(val, clk_base + pll_params->dyn_ramp_reg);
+
+ return 0;
+}
+
static int clk_pll_iddq_enable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
@@ -1173,7 +1262,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
unsigned long flags = 0;
unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
- if (_get_table_rate(hw, &sel, pll->fixed_rate, input_rate))
+ if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
return -EINVAL;
if (pll->lock)
@@ -1217,6 +1306,18 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
if (ret < 0)
goto out;
+ val = pll_readl(PLLE_SS_CTRL, pll);
+ val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
+ val &= ~PLLE_SS_COEFFICIENTS_MASK;
+ val |= PLLE_SS_COEFFICIENTS_VAL;
+ pll_writel(val, PLLE_SS_CTRL, pll);
+ val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
+ pll_writel(val, PLLE_SS_CTRL, pll);
+ udelay(1);
+ val &= ~PLLE_SS_CNTL_INTERP_RESET;
+ pll_writel(val, PLLE_SS_CTRL, pll);
+ udelay(1);
+
/* TODO: enable hw control of xusb brick pll */
out:
@@ -1248,9 +1349,8 @@ static void clk_plle_tegra114_disable(struct clk_hw *hw)
#endif
static struct tegra_clk_pll *_tegra_init_pll(void __iomem *clk_base,
- void __iomem *pmc, unsigned long fixed_rate,
- struct tegra_clk_pll_params *pll_params, u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock)
+ void __iomem *pmc, struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock)
{
struct tegra_clk_pll *pll;
@@ -1261,10 +1361,7 @@ static struct tegra_clk_pll *_tegra_init_pll(void __iomem *clk_base,
pll->clk_base = clk_base;
pll->pmc = pmc;
- pll->freq_table = freq_table;
pll->params = pll_params;
- pll->fixed_rate = fixed_rate;
- pll->flags = pll_flags;
pll->lock = lock;
if (!pll_params->div_nmp)
@@ -1293,17 +1390,15 @@ static struct clk *_tegra_clk_register_pll(struct tegra_clk_pll *pll,
struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
- struct tegra_clk_pll_params *pll_params, u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock)
+ unsigned long flags, struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock)
{
struct tegra_clk_pll *pll;
struct clk *clk;
- pll_flags |= TEGRA_PLL_BYPASS;
- pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
- pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
- freq_table, lock);
+ pll_params->flags |= TEGRA_PLL_BYPASS;
+ pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+ pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
@@ -1317,17 +1412,15 @@ struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
- struct tegra_clk_pll_params *pll_params, u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock)
+ unsigned long flags, struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock)
{
struct tegra_clk_pll *pll;
struct clk *clk;
- pll_flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS;
- pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
- pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
- freq_table, lock);
+ pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS;
+ pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+ pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
@@ -1339,8 +1432,8 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
return clk;
}
-#ifdef CONFIG_ARCH_TEGRA_114_SOC
-const struct clk_ops tegra_clk_pllxc_ops = {
+#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC)
+static const struct clk_ops tegra_clk_pllxc_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_pll_iddq_enable,
.disable = clk_pll_iddq_disable,
@@ -1349,7 +1442,7 @@ const struct clk_ops tegra_clk_pllxc_ops = {
.set_rate = clk_pllxc_set_rate,
};
-const struct clk_ops tegra_clk_pllm_ops = {
+static const struct clk_ops tegra_clk_pllm_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_pll_iddq_enable,
.disable = clk_pll_iddq_disable,
@@ -1358,7 +1451,7 @@ const struct clk_ops tegra_clk_pllm_ops = {
.set_rate = clk_pllm_set_rate,
};
-const struct clk_ops tegra_clk_pllc_ops = {
+static const struct clk_ops tegra_clk_pllc_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_pllc_enable,
.disable = clk_pllc_disable,
@@ -1367,7 +1460,7 @@ const struct clk_ops tegra_clk_pllc_ops = {
.set_rate = clk_pllc_set_rate,
};
-const struct clk_ops tegra_clk_pllre_ops = {
+static const struct clk_ops tegra_clk_pllre_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_pll_iddq_enable,
.disable = clk_pll_iddq_disable,
@@ -1376,7 +1469,7 @@ const struct clk_ops tegra_clk_pllre_ops = {
.set_rate = clk_pllre_set_rate,
};
-const struct clk_ops tegra_clk_plle_tegra114_ops = {
+static const struct clk_ops tegra_clk_plle_tegra114_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_plle_tegra114_enable,
.disable = clk_plle_tegra114_disable,
@@ -1386,21 +1479,46 @@ const struct clk_ops tegra_clk_plle_tegra114_ops = {
struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
+ unsigned long flags,
struct tegra_clk_pll_params *pll_params,
- u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock)
{
struct tegra_clk_pll *pll;
- struct clk *clk;
+ struct clk *clk, *parent;
+ unsigned long parent_rate;
+ int err;
+ u32 val, val_iddq;
+
+ parent = __clk_lookup(parent_name);
+ if (!parent) {
+ WARN(1, "parent clk %s of %s must be registered first\n",
+ name, parent_name);
+ return ERR_PTR(-EINVAL);
+ }
if (!pll_params->pdiv_tohw)
return ERR_PTR(-EINVAL);
- pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
- pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
- freq_table, lock);
+ parent_rate = __clk_get_rate(parent);
+
+ pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
+
+ err = _setup_dynamic_ramp(pll_params, clk_base, parent_rate);
+ if (err)
+ return ERR_PTR(err);
+
+ val = readl_relaxed(clk_base + pll_params->base_reg);
+ val_iddq = readl_relaxed(clk_base + pll_params->iddq_reg);
+
+ if (val & PLL_BASE_ENABLE)
+ WARN_ON(val_iddq & BIT(pll_params->iddq_bit_idx));
+ else {
+ val_iddq |= BIT(pll_params->iddq_bit_idx);
+ writel_relaxed(val_iddq, clk_base + pll_params->iddq_reg);
+ }
+
+ pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+ pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
@@ -1414,19 +1532,19 @@ struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name,
struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
+ unsigned long flags,
struct tegra_clk_pll_params *pll_params,
- u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock, unsigned long parent_rate)
{
u32 val;
struct tegra_clk_pll *pll;
struct clk *clk;
- pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_LOCK_MISC;
- pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
- freq_table, lock);
+ pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_LOCK_MISC;
+
+ pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
+
+ pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
@@ -1461,23 +1579,32 @@ struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
+ unsigned long flags,
struct tegra_clk_pll_params *pll_params,
- u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock)
{
struct tegra_clk_pll *pll;
- struct clk *clk;
+ struct clk *clk, *parent;
+ unsigned long parent_rate;
if (!pll_params->pdiv_tohw)
return ERR_PTR(-EINVAL);
- pll_flags |= TEGRA_PLL_BYPASS;
- pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
- pll_flags |= TEGRA_PLLM;
- pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
- freq_table, lock);
+ parent = __clk_lookup(parent_name);
+ if (!parent) {
+ WARN(1, "parent clk %s of %s must be registered first\n",
+ name, parent_name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ parent_rate = __clk_get_rate(parent);
+
+ pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
+
+ pll_params->flags |= TEGRA_PLL_BYPASS;
+ pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+ pll_params->flags |= TEGRA_PLLM;
+ pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
@@ -1491,10 +1618,8 @@ struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name,
struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
+ unsigned long flags,
struct tegra_clk_pll_params *pll_params,
- u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock)
{
struct clk *parent, *clk;
@@ -1507,20 +1632,21 @@ struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name,
return ERR_PTR(-EINVAL);
parent = __clk_lookup(parent_name);
- if (IS_ERR(parent)) {
+ if (!parent) {
WARN(1, "parent clk %s of %s must be registered first\n",
name, parent_name);
return ERR_PTR(-EINVAL);
}
- pll_flags |= TEGRA_PLL_BYPASS;
- pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
- freq_table, lock);
+ parent_rate = __clk_get_rate(parent);
+
+ pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
+
+ pll_params->flags |= TEGRA_PLL_BYPASS;
+ pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
- parent_rate = __clk_get_rate(parent);
-
/*
* Most of PLLC register fields are shadowed, and can not be read
* directly from PLL h/w. Hence, actual PLLC boot state is unknown.
@@ -1567,17 +1693,15 @@ struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name,
struct clk *tegra_clk_register_plle_tegra114(const char *name,
const char *parent_name,
void __iomem *clk_base, unsigned long flags,
- unsigned long fixed_rate,
struct tegra_clk_pll_params *pll_params,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock)
{
struct tegra_clk_pll *pll;
struct clk *clk;
u32 val, val_aux;
- pll = _tegra_init_pll(clk_base, NULL, fixed_rate, pll_params,
- TEGRA_PLL_HAS_LOCK_ENABLE, freq_table, lock);
+ pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+ pll = _tegra_init_pll(clk_base, NULL, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
@@ -1587,11 +1711,13 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
val_aux = pll_readl(pll_params->aux_reg, pll);
if (val & PLL_BASE_ENABLE) {
- if (!(val_aux & PLLE_AUX_PLLRE_SEL))
+ if ((val_aux & PLLE_AUX_PLLRE_SEL) ||
+ (val_aux & PLLE_AUX_PLLP_SEL))
WARN(1, "pll_e enabled with unsupported parent %s\n",
- (val & PLLE_AUX_PLLP_SEL) ? "pllp_out0" : "pll_ref");
+ (val_aux & PLLE_AUX_PLLP_SEL) ? "pllp_out0" :
+ "pll_re_vco");
} else {
- val_aux |= PLLE_AUX_PLLRE_SEL;
+ val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL);
pll_writel(val, pll_params->aux_reg, pll);
}
@@ -1603,3 +1729,92 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
return clk;
}
#endif
+
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+static const struct clk_ops tegra_clk_pllss_ops = {
+ .is_enabled = clk_pll_is_enabled,
+ .enable = clk_pll_iddq_enable,
+ .disable = clk_pll_iddq_disable,
+ .recalc_rate = clk_pll_recalc_rate,
+ .round_rate = clk_pll_ramp_round_rate,
+ .set_rate = clk_pllxc_set_rate,
+};
+
+struct clk *tegra_clk_register_pllss(const char *name, const char *parent_name,
+ void __iomem *clk_base, unsigned long flags,
+ struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock)
+{
+ struct tegra_clk_pll *pll;
+ struct clk *clk, *parent;
+ struct tegra_clk_pll_freq_table cfg;
+ unsigned long parent_rate;
+ u32 val;
+ int i;
+
+ if (!pll_params->div_nmp)
+ return ERR_PTR(-EINVAL);
+
+ parent = __clk_lookup(parent_name);
+ if (!parent) {
+ WARN(1, "parent clk %s of %s must be registered first\n",
+ name, parent_name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pll_params->flags = TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_USE_LOCK;
+ pll = _tegra_init_pll(clk_base, NULL, pll_params, lock);
+ if (IS_ERR(pll))
+ return ERR_CAST(pll);
+
+ val = pll_readl_base(pll);
+ val &= ~PLLSS_REF_SRC_SEL_MASK;
+ pll_writel_base(val, pll);
+
+ parent_rate = __clk_get_rate(parent);
+
+ pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
+
+ /* initialize PLL to minimum rate */
+
+ cfg.m = _pll_fixed_mdiv(pll_params, parent_rate);
+ cfg.n = cfg.m * pll_params->vco_min / parent_rate;
+
+ for (i = 0; pll_params->pdiv_tohw[i].pdiv; i++)
+ ;
+ if (!i) {
+ kfree(pll);
+ return ERR_PTR(-EINVAL);
+ }
+
+ cfg.p = pll_params->pdiv_tohw[i-1].hw_val;
+
+ _update_pll_mnp(pll, &cfg);
+
+ pll_writel_misc(PLLSS_MISC_DEFAULT, pll);
+ pll_writel(PLLSS_CFG_DEFAULT, pll_params->ext_misc_reg[0], pll);
+ pll_writel(PLLSS_CTRL1_DEFAULT, pll_params->ext_misc_reg[1], pll);
+ pll_writel(PLLSS_CTRL1_DEFAULT, pll_params->ext_misc_reg[2], pll);
+
+ val = pll_readl_base(pll);
+ if (val & PLL_BASE_ENABLE) {
+ if (val & BIT(pll_params->iddq_bit_idx)) {
+ WARN(1, "%s is on but IDDQ set\n", name);
+ kfree(pll);
+ return ERR_PTR(-EINVAL);
+ }
+ } else
+ val |= BIT(pll_params->iddq_bit_idx);
+
+ val &= ~PLLSS_LOCK_OVERRIDE;
+ pll_writel_base(val, pll);
+
+ clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
+ &tegra_clk_pllss_ops);
+
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+#endif
diff --git a/drivers/clk/tegra/clk-tegra-audio.c b/drivers/clk/tegra/clk-tegra-audio.c
new file mode 100644
index 000000000000..5c38aab2c5b8
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra-audio.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/clk/tegra.h>
+
+#include "clk.h"
+#include "clk-id.h"
+
+#define AUDIO_SYNC_CLK_I2S0 0x4a0
+#define AUDIO_SYNC_CLK_I2S1 0x4a4
+#define AUDIO_SYNC_CLK_I2S2 0x4a8
+#define AUDIO_SYNC_CLK_I2S3 0x4ac
+#define AUDIO_SYNC_CLK_I2S4 0x4b0
+#define AUDIO_SYNC_CLK_SPDIF 0x4b4
+
+#define AUDIO_SYNC_DOUBLER 0x49c
+
+#define PLLA_OUT 0xb4
+
+struct tegra_sync_source_initdata {
+ char *name;
+ unsigned long rate;
+ unsigned long max_rate;
+ int clk_id;
+};
+
+#define SYNC(_name) \
+ {\
+ .name = #_name,\
+ .rate = 24000000,\
+ .max_rate = 24000000,\
+ .clk_id = tegra_clk_ ## _name,\
+ }
+
+struct tegra_audio_clk_initdata {
+ char *gate_name;
+ char *mux_name;
+ u32 offset;
+ int gate_clk_id;
+ int mux_clk_id;
+};
+
+#define AUDIO(_name, _offset) \
+ {\
+ .gate_name = #_name,\
+ .mux_name = #_name"_mux",\
+ .offset = _offset,\
+ .gate_clk_id = tegra_clk_ ## _name,\
+ .mux_clk_id = tegra_clk_ ## _name ## _mux,\
+ }
+
+struct tegra_audio2x_clk_initdata {
+ char *parent;
+ char *gate_name;
+ char *name_2x;
+ char *div_name;
+ int clk_id;
+ int clk_num;
+ u8 div_offset;
+};
+
+#define AUDIO2X(_name, _num, _offset) \
+ {\
+ .parent = #_name,\
+ .gate_name = #_name"_2x",\
+ .name_2x = #_name"_doubler",\
+ .div_name = #_name"_div",\
+ .clk_id = tegra_clk_ ## _name ## _2x,\
+ .clk_num = _num,\
+ .div_offset = _offset,\
+ }
+
+static DEFINE_SPINLOCK(clk_doubler_lock);
+
+static const char *mux_audio_sync_clk[] = { "spdif_in_sync", "i2s0_sync",
+ "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "vimclk_sync",
+};
+
+static struct tegra_sync_source_initdata sync_source_clks[] __initdata = {
+ SYNC(spdif_in_sync),
+ SYNC(i2s0_sync),
+ SYNC(i2s1_sync),
+ SYNC(i2s2_sync),
+ SYNC(i2s3_sync),
+ SYNC(i2s4_sync),
+ SYNC(vimclk_sync),
+};
+
+static struct tegra_audio_clk_initdata audio_clks[] = {
+ AUDIO(audio0, AUDIO_SYNC_CLK_I2S0),
+ AUDIO(audio1, AUDIO_SYNC_CLK_I2S1),
+ AUDIO(audio2, AUDIO_SYNC_CLK_I2S2),
+ AUDIO(audio3, AUDIO_SYNC_CLK_I2S3),
+ AUDIO(audio4, AUDIO_SYNC_CLK_I2S4),
+ AUDIO(spdif, AUDIO_SYNC_CLK_SPDIF),
+};
+
+static struct tegra_audio2x_clk_initdata audio2x_clks[] = {
+ AUDIO2X(audio0, 113, 24),
+ AUDIO2X(audio1, 114, 25),
+ AUDIO2X(audio2, 115, 26),
+ AUDIO2X(audio3, 116, 27),
+ AUDIO2X(audio4, 117, 28),
+ AUDIO2X(spdif, 118, 29),
+};
+
+void __init tegra_audio_clk_init(void __iomem *clk_base,
+ void __iomem *pmc_base, struct tegra_clk *tegra_clks,
+ struct tegra_clk_pll_params *pll_a_params)
+{
+ struct clk *clk;
+ struct clk **dt_clk;
+ int i;
+
+ /* PLLA */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_pll_a, tegra_clks);
+ if (dt_clk) {
+ clk = tegra_clk_register_pll("pll_a", "pll_p_out1", clk_base,
+ pmc_base, 0, pll_a_params, NULL);
+ *dt_clk = clk;
+ }
+
+ /* PLLA_OUT0 */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_pll_a_out0, tegra_clks);
+ if (dt_clk) {
+ clk = tegra_clk_register_divider("pll_a_out0_div", "pll_a",
+ clk_base + PLLA_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_a_out0", "pll_a_out0_div",
+ clk_base + PLLA_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0, NULL);
+ *dt_clk = clk;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sync_source_clks); i++) {
+ struct tegra_sync_source_initdata *data;
+
+ data = &sync_source_clks[i];
+
+ dt_clk = tegra_lookup_dt_id(data->clk_id, tegra_clks);
+ if (!dt_clk)
+ continue;
+
+ clk = tegra_clk_register_sync_source(data->name,
+ data->rate, data->max_rate);
+ *dt_clk = clk;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(audio_clks); i++) {
+ struct tegra_audio_clk_initdata *data;
+
+ data = &audio_clks[i];
+ dt_clk = tegra_lookup_dt_id(data->mux_clk_id, tegra_clks);
+
+ if (!dt_clk)
+ continue;
+ clk = clk_register_mux(NULL, data->mux_name, mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
+ clk_base + data->offset, 0, 3, 0,
+ NULL);
+ *dt_clk = clk;
+
+ dt_clk = tegra_lookup_dt_id(data->gate_clk_id, tegra_clks);
+ if (!dt_clk)
+ continue;
+
+ clk = clk_register_gate(NULL, data->gate_name, data->mux_name,
+ 0, clk_base + data->offset, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ *dt_clk = clk;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(audio2x_clks); i++) {
+ struct tegra_audio2x_clk_initdata *data;
+
+ data = &audio2x_clks[i];
+ dt_clk = tegra_lookup_dt_id(data->clk_id, tegra_clks);
+ if (!dt_clk)
+ continue;
+
+ clk = clk_register_fixed_factor(NULL, data->name_2x,
+ data->parent, CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider(data->div_name,
+ data->name_2x, clk_base + AUDIO_SYNC_DOUBLER,
+ 0, 0, data->div_offset, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate(data->gate_name,
+ data->div_name, TEGRA_PERIPH_NO_RESET,
+ clk_base, CLK_SET_RATE_PARENT, data->clk_num,
+ periph_clk_enb_refcnt);
+ *dt_clk = clk;
+ }
+}
+
diff --git a/drivers/clk/tegra/clk-tegra-fixed.c b/drivers/clk/tegra/clk-tegra-fixed.c
new file mode 100644
index 000000000000..f3b773833429
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra-fixed.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/clk/tegra.h>
+
+#include "clk.h"
+#include "clk-id.h"
+
+#define OSC_CTRL 0x50
+#define OSC_CTRL_OSC_FREQ_SHIFT 28
+#define OSC_CTRL_PLL_REF_DIV_SHIFT 26
+
+int __init tegra_osc_clk_init(void __iomem *clk_base,
+ struct tegra_clk *tegra_clks,
+ unsigned long *input_freqs, int num,
+ unsigned long *osc_freq,
+ unsigned long *pll_ref_freq)
+{
+ struct clk *clk;
+ struct clk **dt_clk;
+ u32 val, pll_ref_div;
+ unsigned osc_idx;
+
+ val = readl_relaxed(clk_base + OSC_CTRL);
+ osc_idx = val >> OSC_CTRL_OSC_FREQ_SHIFT;
+
+ if (osc_idx < num)
+ *osc_freq = input_freqs[osc_idx];
+ else
+ *osc_freq = 0;
+
+ if (!*osc_freq) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m, tegra_clks);
+ if (!dt_clk)
+ return 0;
+
+ clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT,
+ *osc_freq);
+ *dt_clk = clk;
+
+ /* pll_ref */
+ val = (val >> OSC_CTRL_PLL_REF_DIV_SHIFT) & 3;
+ pll_ref_div = 1 << val;
+ dt_clk = tegra_lookup_dt_id(tegra_clk_pll_ref, tegra_clks);
+ if (!dt_clk)
+ return 0;
+
+ clk = clk_register_fixed_factor(NULL, "pll_ref", "clk_m",
+ 0, 1, pll_ref_div);
+ *dt_clk = clk;
+
+ if (pll_ref_freq)
+ *pll_ref_freq = *osc_freq / pll_ref_div;
+
+ return 0;
+}
+
+void __init tegra_fixed_clk_init(struct tegra_clk *tegra_clks)
+{
+ struct clk *clk;
+ struct clk **dt_clk;
+
+ /* clk_32k */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_clk_32k, tegra_clks);
+ if (dt_clk) {
+ clk = clk_register_fixed_rate(NULL, "clk_32k", NULL,
+ CLK_IS_ROOT, 32768);
+ *dt_clk = clk;
+ }
+
+ /* clk_m_div2 */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m_div2, tegra_clks);
+ if (dt_clk) {
+ clk = clk_register_fixed_factor(NULL, "clk_m_div2", "clk_m",
+ CLK_SET_RATE_PARENT, 1, 2);
+ *dt_clk = clk;
+ }
+
+ /* clk_m_div4 */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m_div4, tegra_clks);
+ if (dt_clk) {
+ clk = clk_register_fixed_factor(NULL, "clk_m_div4", "clk_m",
+ CLK_SET_RATE_PARENT, 1, 4);
+ *dt_clk = clk;
+ }
+}
+
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
new file mode 100644
index 000000000000..5c35885f4a7c
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -0,0 +1,674 @@
+/*
+ * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/clk/tegra.h>
+
+#include "clk.h"
+#include "clk-id.h"
+
+#define CLK_SOURCE_I2S0 0x1d8
+#define CLK_SOURCE_I2S1 0x100
+#define CLK_SOURCE_I2S2 0x104
+#define CLK_SOURCE_NDFLASH 0x160
+#define CLK_SOURCE_I2S3 0x3bc
+#define CLK_SOURCE_I2S4 0x3c0
+#define CLK_SOURCE_SPDIF_OUT 0x108
+#define CLK_SOURCE_SPDIF_IN 0x10c
+#define CLK_SOURCE_PWM 0x110
+#define CLK_SOURCE_ADX 0x638
+#define CLK_SOURCE_ADX1 0x670
+#define CLK_SOURCE_AMX 0x63c
+#define CLK_SOURCE_AMX1 0x674
+#define CLK_SOURCE_HDA 0x428
+#define CLK_SOURCE_HDA2CODEC_2X 0x3e4
+#define CLK_SOURCE_SBC1 0x134
+#define CLK_SOURCE_SBC2 0x118
+#define CLK_SOURCE_SBC3 0x11c
+#define CLK_SOURCE_SBC4 0x1b4
+#define CLK_SOURCE_SBC5 0x3c8
+#define CLK_SOURCE_SBC6 0x3cc
+#define CLK_SOURCE_SATA_OOB 0x420
+#define CLK_SOURCE_SATA 0x424
+#define CLK_SOURCE_NDSPEED 0x3f8
+#define CLK_SOURCE_VFIR 0x168
+#define CLK_SOURCE_SDMMC1 0x150
+#define CLK_SOURCE_SDMMC2 0x154
+#define CLK_SOURCE_SDMMC3 0x1bc
+#define CLK_SOURCE_SDMMC4 0x164
+#define CLK_SOURCE_CVE 0x140
+#define CLK_SOURCE_TVO 0x188
+#define CLK_SOURCE_TVDAC 0x194
+#define CLK_SOURCE_VDE 0x1c8
+#define CLK_SOURCE_CSITE 0x1d4
+#define CLK_SOURCE_LA 0x1f8
+#define CLK_SOURCE_TRACE 0x634
+#define CLK_SOURCE_OWR 0x1cc
+#define CLK_SOURCE_NOR 0x1d0
+#define CLK_SOURCE_MIPI 0x174
+#define CLK_SOURCE_I2C1 0x124
+#define CLK_SOURCE_I2C2 0x198
+#define CLK_SOURCE_I2C3 0x1b8
+#define CLK_SOURCE_I2C4 0x3c4
+#define CLK_SOURCE_I2C5 0x128
+#define CLK_SOURCE_I2C6 0x65c
+#define CLK_SOURCE_UARTA 0x178
+#define CLK_SOURCE_UARTB 0x17c
+#define CLK_SOURCE_UARTC 0x1a0
+#define CLK_SOURCE_UARTD 0x1c0
+#define CLK_SOURCE_UARTE 0x1c4
+#define CLK_SOURCE_3D 0x158
+#define CLK_SOURCE_2D 0x15c
+#define CLK_SOURCE_MPE 0x170
+#define CLK_SOURCE_UARTE 0x1c4
+#define CLK_SOURCE_VI_SENSOR 0x1a8
+#define CLK_SOURCE_VI 0x148
+#define CLK_SOURCE_EPP 0x16c
+#define CLK_SOURCE_MSENC 0x1f0
+#define CLK_SOURCE_TSEC 0x1f4
+#define CLK_SOURCE_HOST1X 0x180
+#define CLK_SOURCE_HDMI 0x18c
+#define CLK_SOURCE_DISP1 0x138
+#define CLK_SOURCE_DISP2 0x13c
+#define CLK_SOURCE_CILAB 0x614
+#define CLK_SOURCE_CILCD 0x618
+#define CLK_SOURCE_CILE 0x61c
+#define CLK_SOURCE_DSIALP 0x620
+#define CLK_SOURCE_DSIBLP 0x624
+#define CLK_SOURCE_TSENSOR 0x3b8
+#define CLK_SOURCE_D_AUDIO 0x3d0
+#define CLK_SOURCE_DAM0 0x3d8
+#define CLK_SOURCE_DAM1 0x3dc
+#define CLK_SOURCE_DAM2 0x3e0
+#define CLK_SOURCE_ACTMON 0x3e8
+#define CLK_SOURCE_EXTERN1 0x3ec
+#define CLK_SOURCE_EXTERN2 0x3f0
+#define CLK_SOURCE_EXTERN3 0x3f4
+#define CLK_SOURCE_I2CSLOW 0x3fc
+#define CLK_SOURCE_SE 0x42c
+#define CLK_SOURCE_MSELECT 0x3b4
+#define CLK_SOURCE_DFLL_REF 0x62c
+#define CLK_SOURCE_DFLL_SOC 0x630
+#define CLK_SOURCE_SOC_THERM 0x644
+#define CLK_SOURCE_XUSB_HOST_SRC 0x600
+#define CLK_SOURCE_XUSB_FALCON_SRC 0x604
+#define CLK_SOURCE_XUSB_FS_SRC 0x608
+#define CLK_SOURCE_XUSB_SS_SRC 0x610
+#define CLK_SOURCE_XUSB_DEV_SRC 0x60c
+#define CLK_SOURCE_ISP 0x144
+#define CLK_SOURCE_SOR0 0x414
+#define CLK_SOURCE_DPAUX 0x418
+#define CLK_SOURCE_SATA_OOB 0x420
+#define CLK_SOURCE_SATA 0x424
+#define CLK_SOURCE_ENTROPY 0x628
+#define CLK_SOURCE_VI_SENSOR2 0x658
+#define CLK_SOURCE_HDMI_AUDIO 0x668
+#define CLK_SOURCE_VIC03 0x678
+#define CLK_SOURCE_CLK72MHZ 0x66c
+
+#define MASK(x) (BIT(x) - 1)
+
+#define MUX(_name, _parents, _offset, \
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ 30, MASK(2), 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, \
+ _clk_num, _gate_flags, _clk_id, _parents##_idx, 0,\
+ NULL)
+
+#define MUX_FLAGS(_name, _parents, _offset,\
+ _clk_num, _gate_flags, _clk_id, flags)\
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ 30, MASK(2), 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP,\
+ _clk_num, _gate_flags, _clk_id, _parents##_idx, flags,\
+ NULL)
+
+#define MUX8(_name, _parents, _offset, \
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ 29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP,\
+ _clk_num, _gate_flags, _clk_id, _parents##_idx, 0,\
+ NULL)
+
+#define MUX8_NOGATE_LOCK(_name, _parents, _offset, _clk_id, _lock) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset, \
+ 29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP,\
+ 0, TEGRA_PERIPH_NO_GATE, _clk_id,\
+ _parents##_idx, 0, _lock)
+
+#define INT(_name, _parents, _offset, \
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ 30, MASK(2), 0, 0, 8, 1, TEGRA_DIVIDER_INT| \
+ TEGRA_DIVIDER_ROUND_UP, _clk_num, _gate_flags,\
+ _clk_id, _parents##_idx, 0, NULL)
+
+#define INT_FLAGS(_name, _parents, _offset,\
+ _clk_num, _gate_flags, _clk_id, flags)\
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ 30, MASK(2), 0, 0, 8, 1, TEGRA_DIVIDER_INT| \
+ TEGRA_DIVIDER_ROUND_UP, _clk_num, _gate_flags,\
+ _clk_id, _parents##_idx, flags, NULL)
+
+#define INT8(_name, _parents, _offset,\
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ 29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_INT| \
+ TEGRA_DIVIDER_ROUND_UP, _clk_num, _gate_flags,\
+ _clk_id, _parents##_idx, 0, NULL)
+
+#define UART(_name, _parents, _offset,\
+ _clk_num, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ 30, MASK(2), 0, 0, 16, 1, TEGRA_DIVIDER_UART| \
+ TEGRA_DIVIDER_ROUND_UP, _clk_num, 0, _clk_id,\
+ _parents##_idx, 0, NULL)
+
+#define I2C(_name, _parents, _offset,\
+ _clk_num, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ 30, MASK(2), 0, 0, 16, 0, TEGRA_DIVIDER_ROUND_UP,\
+ _clk_num, 0, _clk_id, _parents##_idx, 0, NULL)
+
+#define XUSB(_name, _parents, _offset, \
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset, \
+ 29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_INT| \
+ TEGRA_DIVIDER_ROUND_UP, _clk_num, _gate_flags,\
+ _clk_id, _parents##_idx, 0, NULL)
+
+#define AUDIO(_name, _offset, _clk_num,\
+ _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, mux_d_audio_clk, \
+ _offset, 16, 0xE01F, 0, 0, 8, 1, \
+ TEGRA_DIVIDER_ROUND_UP, _clk_num, _gate_flags, \
+ _clk_id, mux_d_audio_clk_idx, 0, NULL)
+
+#define NODIV(_name, _parents, _offset, \
+ _mux_shift, _mux_mask, _clk_num, \
+ _gate_flags, _clk_id, _lock) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ _mux_shift, _mux_mask, 0, 0, 0, 0, 0,\
+ _clk_num, (_gate_flags) | TEGRA_PERIPH_NO_DIV,\
+ _clk_id, _parents##_idx, 0, _lock)
+
+#define GATE(_name, _parent_name, \
+ _clk_num, _gate_flags, _clk_id, _flags) \
+ { \
+ .name = _name, \
+ .clk_id = _clk_id, \
+ .p.parent_name = _parent_name, \
+ .periph = TEGRA_CLK_PERIPH(0, 0, 0, 0, 0, 0, 0, \
+ _clk_num, _gate_flags, 0, NULL), \
+ .flags = _flags \
+ }
+
+#define PLLP_BASE 0xa0
+#define PLLP_MISC 0xac
+#define PLLP_OUTA 0xa4
+#define PLLP_OUTB 0xa8
+#define PLLP_OUTC 0x67c
+
+#define PLL_BASE_LOCK BIT(27)
+#define PLL_MISC_LOCK_ENABLE 18
+
+static DEFINE_SPINLOCK(PLLP_OUTA_lock);
+static DEFINE_SPINLOCK(PLLP_OUTB_lock);
+static DEFINE_SPINLOCK(PLLP_OUTC_lock);
+static DEFINE_SPINLOCK(sor0_lock);
+
+#define MUX_I2S_SPDIF(_id) \
+static const char *mux_pllaout0_##_id##_2x_pllp_clkm[] = { "pll_a_out0", \
+ #_id, "pll_p",\
+ "clk_m"};
+MUX_I2S_SPDIF(audio0)
+MUX_I2S_SPDIF(audio1)
+MUX_I2S_SPDIF(audio2)
+MUX_I2S_SPDIF(audio3)
+MUX_I2S_SPDIF(audio4)
+MUX_I2S_SPDIF(audio)
+
+#define mux_pllaout0_audio0_2x_pllp_clkm_idx NULL
+#define mux_pllaout0_audio1_2x_pllp_clkm_idx NULL
+#define mux_pllaout0_audio2_2x_pllp_clkm_idx NULL
+#define mux_pllaout0_audio3_2x_pllp_clkm_idx NULL
+#define mux_pllaout0_audio4_2x_pllp_clkm_idx NULL
+#define mux_pllaout0_audio_2x_pllp_clkm_idx NULL
+
+static const char *mux_pllp_pllc_pllm_clkm[] = {
+ "pll_p", "pll_c", "pll_m", "clk_m"
+};
+#define mux_pllp_pllc_pllm_clkm_idx NULL
+
+static const char *mux_pllp_pllc_pllm[] = { "pll_p", "pll_c", "pll_m" };
+#define mux_pllp_pllc_pllm_idx NULL
+
+static const char *mux_pllp_pllc_clk32_clkm[] = {
+ "pll_p", "pll_c", "clk_32k", "clk_m"
+};
+#define mux_pllp_pllc_clk32_clkm_idx NULL
+
+static const char *mux_plla_pllc_pllp_clkm[] = {
+ "pll_a_out0", "pll_c", "pll_p", "clk_m"
+};
+#define mux_plla_pllc_pllp_clkm_idx mux_pllp_pllc_pllm_clkm_idx
+
+static const char *mux_pllp_pllc2_c_c3_pllm_clkm[] = {
+ "pll_p", "pll_c2", "pll_c", "pll_c3", "pll_m", "clk_m"
+};
+static u32 mux_pllp_pllc2_c_c3_pllm_clkm_idx[] = {
+ [0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6,
+};
+
+static const char *mux_pllp_clkm[] = {
+ "pll_p", "clk_m"
+};
+static u32 mux_pllp_clkm_idx[] = {
+ [0] = 0, [1] = 3,
+};
+
+static const char *mux_pllm_pllc2_c_c3_pllp_plla[] = {
+ "pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0"
+};
+#define mux_pllm_pllc2_c_c3_pllp_plla_idx mux_pllp_pllc2_c_c3_pllm_clkm_idx
+
+static const char *mux_pllp_pllm_plld_plla_pllc_plld2_clkm[] = {
+ "pll_p", "pll_m", "pll_d_out0", "pll_a_out0", "pll_c",
+ "pll_d2_out0", "clk_m"
+};
+#define mux_pllp_pllm_plld_plla_pllc_plld2_clkm_idx NULL
+
+static const char *mux_pllm_pllc_pllp_plla[] = {
+ "pll_m", "pll_c", "pll_p", "pll_a_out0"
+};
+#define mux_pllm_pllc_pllp_plla_idx mux_pllp_pllc_pllm_clkm_idx
+
+static const char *mux_pllp_pllc_clkm[] = {
+ "pll_p", "pll_c", "pll_m"
+};
+static u32 mux_pllp_pllc_clkm_idx[] = {
+ [0] = 0, [1] = 1, [2] = 3,
+};
+
+static const char *mux_pllp_pllc_clkm_clk32[] = {
+ "pll_p", "pll_c", "clk_m", "clk_32k"
+};
+#define mux_pllp_pllc_clkm_clk32_idx NULL
+
+static const char *mux_plla_clk32_pllp_clkm_plle[] = {
+ "pll_a_out0", "clk_32k", "pll_p", "clk_m", "pll_e_out0"
+};
+#define mux_plla_clk32_pllp_clkm_plle_idx NULL
+
+static const char *mux_clkm_pllp_pllc_pllre[] = {
+ "clk_m", "pll_p", "pll_c", "pll_re_out"
+};
+static u32 mux_clkm_pllp_pllc_pllre_idx[] = {
+ [0] = 0, [1] = 1, [2] = 3, [3] = 5,
+};
+
+static const char *mux_clkm_48M_pllp_480M[] = {
+ "clk_m", "pll_u_48M", "pll_p", "pll_u_480M"
+};
+#define mux_clkm_48M_pllp_480M_idx NULL
+
+static const char *mux_clkm_pllre_clk32_480M_pllc_ref[] = {
+ "clk_m", "pll_re_out", "clk_32k", "pll_u_480M", "pll_c", "pll_ref"
+};
+static u32 mux_clkm_pllre_clk32_480M_pllc_ref_idx[] = {
+ [0] = 0, [1] = 1, [2] = 3, [3] = 3, [4] = 4, [5] = 7,
+};
+
+static const char *mux_d_audio_clk[] = {
+ "pll_a_out0", "pll_p", "clk_m", "spdif_in_sync", "i2s0_sync",
+ "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "vimclk_sync",
+};
+static u32 mux_d_audio_clk_idx[] = {
+ [0] = 0, [1] = 0x8000, [2] = 0xc000, [3] = 0xE000, [4] = 0xE001,
+ [5] = 0xE002, [6] = 0xE003, [7] = 0xE004, [8] = 0xE005, [9] = 0xE007,
+};
+
+static const char *mux_pllp_plld_pllc_clkm[] = {
+ "pll_p", "pll_d_out0", "pll_c", "clk_m"
+};
+#define mux_pllp_plld_pllc_clkm_idx NULL
+static const char *mux_pllm_pllc_pllp_plla_clkm_pllc4[] = {
+ "pll_m", "pll_c", "pll_p", "pll_a_out0", "clk_m", "pll_c4",
+};
+static u32 mux_pllm_pllc_pllp_plla_clkm_pllc4_idx[] = {
+ [0] = 0, [1] = 1, [2] = 3, [3] = 3, [4] = 6, [5] = 7,
+};
+
+static const char *mux_pllp_clkm1[] = {
+ "pll_p", "clk_m",
+};
+#define mux_pllp_clkm1_idx NULL
+
+static const char *mux_pllp3_pllc_clkm[] = {
+ "pll_p_out3", "pll_c", "pll_c2", "clk_m",
+};
+#define mux_pllp3_pllc_clkm_idx NULL
+
+static const char *mux_pllm_pllc_pllp_plla_pllc2_c3_clkm[] = {
+ "pll_m", "pll_c", "pll_p", "pll_a", "pll_c2", "pll_c3", "clk_m"
+};
+static u32 mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx[] = {
+ [0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6,
+};
+
+static const char *mux_pllm_pllc2_c_c3_pllp_plla_pllc4[] = {
+ "pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0", "pll_c4",
+};
+static u32 mux_pllm_pllc2_c_c3_pllp_plla_pllc4_idx[] = {
+ [0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6, [6] = 7,
+};
+
+static const char *mux_clkm_plldp_sor0lvds[] = {
+ "clk_m", "pll_dp", "sor0_lvds",
+};
+#define mux_clkm_plldp_sor0lvds_idx NULL
+
+static struct tegra_periph_init_data periph_clks[] = {
+ AUDIO("d_audio", CLK_SOURCE_D_AUDIO, 106, TEGRA_PERIPH_ON_APB, tegra_clk_d_audio),
+ AUDIO("dam0", CLK_SOURCE_DAM0, 108, TEGRA_PERIPH_ON_APB, tegra_clk_dam0),
+ AUDIO("dam1", CLK_SOURCE_DAM1, 109, TEGRA_PERIPH_ON_APB, tegra_clk_dam1),
+ AUDIO("dam2", CLK_SOURCE_DAM2, 110, TEGRA_PERIPH_ON_APB, tegra_clk_dam2),
+ I2C("i2c1", mux_pllp_clkm, CLK_SOURCE_I2C1, 12, tegra_clk_i2c1),
+ I2C("i2c2", mux_pllp_clkm, CLK_SOURCE_I2C2, 54, tegra_clk_i2c2),
+ I2C("i2c3", mux_pllp_clkm, CLK_SOURCE_I2C3, 67, tegra_clk_i2c3),
+ I2C("i2c4", mux_pllp_clkm, CLK_SOURCE_I2C4, 103, tegra_clk_i2c4),
+ I2C("i2c5", mux_pllp_clkm, CLK_SOURCE_I2C5, 47, tegra_clk_i2c5),
+ INT("vde", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_VDE, 61, 0, tegra_clk_vde),
+ INT("vi", mux_pllm_pllc_pllp_plla, CLK_SOURCE_VI, 20, 0, tegra_clk_vi),
+ INT("epp", mux_pllm_pllc_pllp_plla, CLK_SOURCE_EPP, 19, 0, tegra_clk_epp),
+ INT("host1x", mux_pllm_pllc_pllp_plla, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x),
+ INT("mpe", mux_pllm_pllc_pllp_plla, CLK_SOURCE_MPE, 60, 0, tegra_clk_mpe),
+ INT("2d", mux_pllm_pllc_pllp_plla, CLK_SOURCE_2D, 21, 0, tegra_clk_gr2d),
+ INT("3d", mux_pllm_pllc_pllp_plla, CLK_SOURCE_3D, 24, 0, tegra_clk_gr3d),
+ INT8("vde", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_VDE, 61, 0, tegra_clk_vde_8),
+ INT8("vi", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI, 20, 0, tegra_clk_vi_8),
+ INT8("vi", mux_pllm_pllc2_c_c3_pllp_plla_pllc4, CLK_SOURCE_VI, 20, 0, tegra_clk_vi_9),
+ INT8("epp", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_EPP, 19, 0, tegra_clk_epp_8),
+ INT8("msenc", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_MSENC, 91, TEGRA_PERIPH_WAR_1005168, tegra_clk_msenc),
+ INT8("tsec", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_TSEC, 83, 0, tegra_clk_tsec),
+ INT8("host1x", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_8),
+ INT8("se", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se),
+ INT8("2d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_2D, 21, 0, tegra_clk_gr2d_8),
+ INT8("3d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_3D, 24, 0, tegra_clk_gr3d_8),
+ INT8("vic03", mux_pllm_pllc_pllp_plla_pllc2_c3_clkm, CLK_SOURCE_VIC03, 178, 0, tegra_clk_vic03),
+ INT_FLAGS("mselect", mux_pllp_clkm, CLK_SOURCE_MSELECT, 99, 0, tegra_clk_mselect, CLK_IGNORE_UNUSED),
+ MUX("i2s0", mux_pllaout0_audio0_2x_pllp_clkm, CLK_SOURCE_I2S0, 30, TEGRA_PERIPH_ON_APB, tegra_clk_i2s0),
+ MUX("i2s1", mux_pllaout0_audio1_2x_pllp_clkm, CLK_SOURCE_I2S1, 11, TEGRA_PERIPH_ON_APB, tegra_clk_i2s1),
+ MUX("i2s2", mux_pllaout0_audio2_2x_pllp_clkm, CLK_SOURCE_I2S2, 18, TEGRA_PERIPH_ON_APB, tegra_clk_i2s2),
+ MUX("i2s3", mux_pllaout0_audio3_2x_pllp_clkm, CLK_SOURCE_I2S3, 101, TEGRA_PERIPH_ON_APB, tegra_clk_i2s3),
+ MUX("i2s4", mux_pllaout0_audio4_2x_pllp_clkm, CLK_SOURCE_I2S4, 102, TEGRA_PERIPH_ON_APB, tegra_clk_i2s4),
+ MUX("spdif_out", mux_pllaout0_audio_2x_pllp_clkm, CLK_SOURCE_SPDIF_OUT, 10, TEGRA_PERIPH_ON_APB, tegra_clk_spdif_out),
+ MUX("spdif_in", mux_pllp_pllc_pllm, CLK_SOURCE_SPDIF_IN, 10, TEGRA_PERIPH_ON_APB, tegra_clk_spdif_in),
+ MUX("pwm", mux_pllp_pllc_clk32_clkm, CLK_SOURCE_PWM, 17, TEGRA_PERIPH_ON_APB, tegra_clk_pwm),
+ MUX("adx", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX, 154, TEGRA_PERIPH_ON_APB, tegra_clk_adx),
+ MUX("amx", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX, 153, TEGRA_PERIPH_ON_APB, tegra_clk_amx),
+ MUX("hda", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA, 125, TEGRA_PERIPH_ON_APB, tegra_clk_hda),
+ MUX("hda2codec_2x", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, TEGRA_PERIPH_ON_APB, tegra_clk_hda2codec_2x),
+ MUX("vfir", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_VFIR, 7, TEGRA_PERIPH_ON_APB, tegra_clk_vfir),
+ MUX("sdmmc1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1),
+ MUX("sdmmc2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2),
+ MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3),
+ MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4),
+ MUX("la", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_LA, 76, TEGRA_PERIPH_ON_APB, tegra_clk_la),
+ MUX("trace", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_TRACE, 77, TEGRA_PERIPH_ON_APB, tegra_clk_trace),
+ MUX("owr", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_OWR, 71, TEGRA_PERIPH_ON_APB, tegra_clk_owr),
+ MUX("nor", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_NOR, 42, 0, tegra_clk_nor),
+ MUX("mipi", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_MIPI, 50, TEGRA_PERIPH_ON_APB, tegra_clk_mipi),
+ MUX("vi_sensor", mux_pllm_pllc_pllp_plla, CLK_SOURCE_VI_SENSOR, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor),
+ MUX("cilab", mux_pllp_pllc_clkm, CLK_SOURCE_CILAB, 144, 0, tegra_clk_cilab),
+ MUX("cilcd", mux_pllp_pllc_clkm, CLK_SOURCE_CILCD, 145, 0, tegra_clk_cilcd),
+ MUX("cile", mux_pllp_pllc_clkm, CLK_SOURCE_CILE, 146, 0, tegra_clk_cile),
+ MUX("dsialp", mux_pllp_pllc_clkm, CLK_SOURCE_DSIALP, 147, 0, tegra_clk_dsialp),
+ MUX("dsiblp", mux_pllp_pllc_clkm, CLK_SOURCE_DSIBLP, 148, 0, tegra_clk_dsiblp),
+ MUX("tsensor", mux_pllp_pllc_clkm_clk32, CLK_SOURCE_TSENSOR, 100, TEGRA_PERIPH_ON_APB, tegra_clk_tsensor),
+ MUX("actmon", mux_pllp_pllc_clk32_clkm, CLK_SOURCE_ACTMON, 119, 0, tegra_clk_actmon),
+ MUX("dfll_ref", mux_pllp_clkm, CLK_SOURCE_DFLL_REF, 155, TEGRA_PERIPH_ON_APB, tegra_clk_dfll_ref),
+ MUX("dfll_soc", mux_pllp_clkm, CLK_SOURCE_DFLL_SOC, 155, TEGRA_PERIPH_ON_APB, tegra_clk_dfll_soc),
+ MUX("i2cslow", mux_pllp_pllc_clk32_clkm, CLK_SOURCE_I2CSLOW, 81, TEGRA_PERIPH_ON_APB, tegra_clk_i2cslow),
+ MUX("sbc1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1),
+ MUX("sbc2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2),
+ MUX("sbc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3),
+ MUX("sbc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC4, 68, TEGRA_PERIPH_ON_APB, tegra_clk_sbc4),
+ MUX("sbc5", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC5, 104, TEGRA_PERIPH_ON_APB, tegra_clk_sbc5),
+ MUX("sbc6", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC6, 105, TEGRA_PERIPH_ON_APB, tegra_clk_sbc6),
+ MUX("cve", mux_pllp_plld_pllc_clkm, CLK_SOURCE_CVE, 49, 0, tegra_clk_cve),
+ MUX("tvo", mux_pllp_plld_pllc_clkm, CLK_SOURCE_TVO, 49, 0, tegra_clk_tvo),
+ MUX("tvdac", mux_pllp_plld_pllc_clkm, CLK_SOURCE_TVDAC, 53, 0, tegra_clk_tvdac),
+ MUX("ndflash", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_NDFLASH, 13, TEGRA_PERIPH_ON_APB, tegra_clk_ndflash),
+ MUX("ndspeed", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_NDSPEED, 80, TEGRA_PERIPH_ON_APB, tegra_clk_ndspeed),
+ MUX("sata_oob", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SATA_OOB, 123, TEGRA_PERIPH_ON_APB, tegra_clk_sata_oob),
+ MUX("sata", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SATA, 124, TEGRA_PERIPH_ON_APB, tegra_clk_sata),
+ MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1),
+ MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1),
+ MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2),
+ MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8),
+ MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8),
+ MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8),
+ MUX8("sbc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC4, 68, TEGRA_PERIPH_ON_APB, tegra_clk_sbc4_8),
+ MUX8("sbc5", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC5, 104, TEGRA_PERIPH_ON_APB, tegra_clk_sbc5_8),
+ MUX8("sbc6", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC6, 105, TEGRA_PERIPH_ON_APB, tegra_clk_sbc6_8),
+ MUX8("ndflash", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_NDFLASH, 13, TEGRA_PERIPH_ON_APB, tegra_clk_ndflash_8),
+ MUX8("ndspeed", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_NDSPEED, 80, TEGRA_PERIPH_ON_APB, tegra_clk_ndspeed_8),
+ MUX8("hdmi", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_HDMI, 51, 0, tegra_clk_hdmi),
+ MUX8("extern1", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN1, 120, 0, tegra_clk_extern1),
+ MUX8("extern2", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN2, 121, 0, tegra_clk_extern2),
+ MUX8("extern3", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN3, 122, 0, tegra_clk_extern3),
+ MUX8("soc_therm", mux_pllm_pllc_pllp_plla, CLK_SOURCE_SOC_THERM, 78, TEGRA_PERIPH_ON_APB, tegra_clk_soc_therm),
+ MUX8("vi_sensor", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor_8),
+ MUX8("isp", mux_pllm_pllc_pllp_plla_clkm_pllc4, CLK_SOURCE_ISP, 23, TEGRA_PERIPH_ON_APB, tegra_clk_isp_8),
+ MUX8("entropy", mux_pllp_clkm1, CLK_SOURCE_ENTROPY, 149, 0, tegra_clk_entropy),
+ MUX8("hdmi_audio", mux_pllp3_pllc_clkm, CLK_SOURCE_HDMI_AUDIO, 176, TEGRA_PERIPH_NO_RESET, tegra_clk_hdmi_audio),
+ MUX8("clk72mhz", mux_pllp3_pllc_clkm, CLK_SOURCE_CLK72MHZ, 177, TEGRA_PERIPH_NO_RESET, tegra_clk_clk72Mhz),
+ MUX8_NOGATE_LOCK("sor0_lvds", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_SOR0, tegra_clk_sor0_lvds, &sor0_lock),
+ MUX_FLAGS("csite", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_CSITE, 73, TEGRA_PERIPH_ON_APB, tegra_clk_csite, CLK_IGNORE_UNUSED),
+ NODIV("disp1", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_DISP1, 29, 7, 27, 0, tegra_clk_disp1, NULL),
+ NODIV("disp2", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_DISP2, 29, 7, 26, 0, tegra_clk_disp2, NULL),
+ NODIV("sor0", mux_clkm_plldp_sor0lvds, CLK_SOURCE_SOR0, 14, 3, 182, 0, tegra_clk_sor0, &sor0_lock),
+ UART("uarta", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTA, 6, tegra_clk_uarta),
+ UART("uartb", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, tegra_clk_uartb),
+ UART("uartc", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, tegra_clk_uartc),
+ UART("uartd", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTD, 65, tegra_clk_uartd),
+ UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 65, tegra_clk_uarte),
+ XUSB("xusb_host_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_HOST_SRC, 143, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_host_src),
+ XUSB("xusb_falcon_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_FALCON_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_falcon_src),
+ XUSB("xusb_fs_src", mux_clkm_48M_pllp_480M, CLK_SOURCE_XUSB_FS_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_fs_src),
+ XUSB("xusb_ss_src", mux_clkm_pllre_clk32_480M_pllc_ref, CLK_SOURCE_XUSB_SS_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_ss_src),
+ XUSB("xusb_dev_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src),
+};
+
+static struct tegra_periph_init_data gate_clks[] = {
+ GATE("rtc", "clk_32k", 4, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_rtc, 0),
+ GATE("timer", "clk_m", 5, 0, tegra_clk_timer, 0),
+ GATE("isp", "clk_m", 23, 0, tegra_clk_isp, 0),
+ GATE("vcp", "clk_m", 29, 0, tegra_clk_vcp, 0),
+ GATE("apbdma", "clk_m", 34, 0, tegra_clk_apbdma, 0),
+ GATE("kbc", "clk_32k", 36, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_kbc, 0),
+ GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, 0),
+ GATE("fuse_burn", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse_burn, 0),
+ GATE("kfuse", "clk_m", 40, TEGRA_PERIPH_ON_APB, tegra_clk_kfuse, 0),
+ GATE("apbif", "clk_m", 107, TEGRA_PERIPH_ON_APB, tegra_clk_apbif, 0),
+ GATE("hda2hdmi", "clk_m", 128, TEGRA_PERIPH_ON_APB, tegra_clk_hda2hdmi, 0),
+ GATE("bsea", "clk_m", 62, 0, tegra_clk_bsea, 0),
+ GATE("bsev", "clk_m", 63, 0, tegra_clk_bsev, 0),
+ GATE("mipi-cal", "clk_m", 56, 0, tegra_clk_mipi_cal, 0),
+ GATE("usbd", "clk_m", 22, 0, tegra_clk_usbd, 0),
+ GATE("usb2", "clk_m", 58, 0, tegra_clk_usb2, 0),
+ GATE("usb3", "clk_m", 59, 0, tegra_clk_usb3, 0),
+ GATE("csi", "pll_p_out3", 52, 0, tegra_clk_csi, 0),
+ GATE("afi", "clk_m", 72, 0, tegra_clk_afi, 0),
+ GATE("csus", "clk_m", 92, TEGRA_PERIPH_NO_RESET, tegra_clk_csus, 0),
+ GATE("dds", "clk_m", 150, TEGRA_PERIPH_ON_APB, tegra_clk_dds, 0),
+ GATE("dp2", "clk_m", 152, TEGRA_PERIPH_ON_APB, tegra_clk_dp2, 0),
+ GATE("dtv", "clk_m", 79, TEGRA_PERIPH_ON_APB, tegra_clk_dtv, 0),
+ GATE("xusb_host", "xusb_host_src", 89, 0, tegra_clk_xusb_host, 0),
+ GATE("xusb_ss", "xusb_ss_src", 156, 0, tegra_clk_xusb_ss, 0),
+ GATE("xusb_dev", "xusb_dev_src", 95, 0, tegra_clk_xusb_dev, 0),
+ GATE("dsia", "dsia_mux", 48, 0, tegra_clk_dsia, 0),
+ GATE("dsib", "dsib_mux", 82, 0, tegra_clk_dsib, 0),
+ GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IGNORE_UNUSED),
+ GATE("sata_cold", "clk_m", 129, TEGRA_PERIPH_ON_APB, tegra_clk_sata_cold, 0),
+ GATE("ispb", "clk_m", 3, 0, tegra_clk_ispb, 0),
+ GATE("vim2_clk", "clk_m", 11, 0, tegra_clk_vim2_clk, 0),
+ GATE("pcie", "clk_m", 70, 0, tegra_clk_pcie, 0),
+ GATE("dpaux", "clk_m", 181, 0, tegra_clk_dpaux, 0),
+ GATE("gpu", "pll_ref", 184, 0, tegra_clk_gpu, 0),
+};
+
+struct pll_out_data {
+ char *div_name;
+ char *pll_out_name;
+ u32 offset;
+ int clk_id;
+ u8 div_shift;
+ u8 div_flags;
+ u8 rst_shift;
+ spinlock_t *lock;
+};
+
+#define PLL_OUT(_num, _offset, _div_shift, _div_flags, _rst_shift, _id) \
+ {\
+ .div_name = "pll_p_out" #_num "_div",\
+ .pll_out_name = "pll_p_out" #_num,\
+ .offset = _offset,\
+ .div_shift = _div_shift,\
+ .div_flags = _div_flags | TEGRA_DIVIDER_FIXED |\
+ TEGRA_DIVIDER_ROUND_UP,\
+ .rst_shift = _rst_shift,\
+ .clk_id = tegra_clk_ ## _id,\
+ .lock = &_offset ##_lock,\
+ }
+
+static struct pll_out_data pllp_out_clks[] = {
+ PLL_OUT(1, PLLP_OUTA, 8, 0, 0, pll_p_out1),
+ PLL_OUT(2, PLLP_OUTA, 24, 0, 16, pll_p_out2),
+ PLL_OUT(2, PLLP_OUTA, 24, TEGRA_DIVIDER_INT, 16, pll_p_out2_int),
+ PLL_OUT(3, PLLP_OUTB, 8, 0, 0, pll_p_out3),
+ PLL_OUT(4, PLLP_OUTB, 24, 0, 16, pll_p_out4),
+ PLL_OUT(5, PLLP_OUTC, 24, 0, 16, pll_p_out5),
+};
+
+static void __init periph_clk_init(void __iomem *clk_base,
+ struct tegra_clk *tegra_clks)
+{
+ int i;
+ struct clk *clk;
+ struct clk **dt_clk;
+
+ for (i = 0; i < ARRAY_SIZE(periph_clks); i++) {
+ struct tegra_clk_periph_regs *bank;
+ struct tegra_periph_init_data *data;
+
+ data = periph_clks + i;
+
+ dt_clk = tegra_lookup_dt_id(data->clk_id, tegra_clks);
+ if (!dt_clk)
+ continue;
+
+ bank = get_reg_bank(data->periph.gate.clk_num);
+ if (!bank)
+ continue;
+
+ data->periph.gate.regs = bank;
+ clk = tegra_clk_register_periph(data->name,
+ data->p.parent_names, data->num_parents,
+ &data->periph, clk_base, data->offset,
+ data->flags);
+ *dt_clk = clk;
+ }
+}
+
+static void __init gate_clk_init(void __iomem *clk_base,
+ struct tegra_clk *tegra_clks)
+{
+ int i;
+ struct clk *clk;
+ struct clk **dt_clk;
+
+ for (i = 0; i < ARRAY_SIZE(gate_clks); i++) {
+ struct tegra_periph_init_data *data;
+
+ data = gate_clks + i;
+
+ dt_clk = tegra_lookup_dt_id(data->clk_id, tegra_clks);
+ if (!dt_clk)
+ continue;
+
+ clk = tegra_clk_register_periph_gate(data->name,
+ data->p.parent_name, data->periph.gate.flags,
+ clk_base, data->flags,
+ data->periph.gate.clk_num,
+ periph_clk_enb_refcnt);
+ *dt_clk = clk;
+ }
+}
+
+static void __init init_pllp(void __iomem *clk_base, void __iomem *pmc_base,
+ struct tegra_clk *tegra_clks,
+ struct tegra_clk_pll_params *pll_params)
+{
+ struct clk *clk;
+ struct clk **dt_clk;
+ int i;
+
+ dt_clk = tegra_lookup_dt_id(tegra_clk_pll_p, tegra_clks);
+ if (dt_clk) {
+ /* PLLP */
+ clk = tegra_clk_register_pll("pll_p", "pll_ref", clk_base,
+ pmc_base, 0, pll_params, NULL);
+ clk_register_clkdev(clk, "pll_p", NULL);
+ *dt_clk = clk;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pllp_out_clks); i++) {
+ struct pll_out_data *data;
+
+ data = pllp_out_clks + i;
+
+ dt_clk = tegra_lookup_dt_id(data->clk_id, tegra_clks);
+ if (!dt_clk)
+ continue;
+
+ clk = tegra_clk_register_divider(data->div_name, "pll_p",
+ clk_base + data->offset, 0, data->div_flags,
+ data->div_shift, 8, 1, data->lock);
+ clk = tegra_clk_register_pll_out(data->pll_out_name,
+ data->div_name, clk_base + data->offset,
+ data->rst_shift + 1, data->rst_shift,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ data->lock);
+ *dt_clk = clk;
+ }
+}
+
+void __init tegra_periph_clk_init(void __iomem *clk_base,
+ void __iomem *pmc_base, struct tegra_clk *tegra_clks,
+ struct tegra_clk_pll_params *pll_params)
+{
+ init_pllp(clk_base, pmc_base, tegra_clks, pll_params);
+ periph_clk_init(clk_base, tegra_clks);
+ gate_clk_init(clk_base, tegra_clks);
+}
diff --git a/drivers/clk/tegra/clk-tegra-pmc.c b/drivers/clk/tegra/clk-tegra-pmc.c
new file mode 100644
index 000000000000..08b21c1ee867
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra-pmc.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/clk/tegra.h>
+
+#include "clk.h"
+#include "clk-id.h"
+
+#define PMC_CLK_OUT_CNTRL 0x1a8
+#define PMC_DPD_PADS_ORIDE 0x1c
+#define PMC_DPD_PADS_ORIDE_BLINK_ENB 20
+#define PMC_CTRL 0
+#define PMC_CTRL_BLINK_ENB 7
+#define PMC_BLINK_TIMER 0x40
+
+struct pmc_clk_init_data {
+ char *mux_name;
+ char *gate_name;
+ const char **parents;
+ int num_parents;
+ int mux_id;
+ int gate_id;
+ char *dev_name;
+ u8 mux_shift;
+ u8 gate_shift;
+};
+
+#define PMC_CLK(_num, _mux_shift, _gate_shift)\
+ {\
+ .mux_name = "clk_out_" #_num "_mux",\
+ .gate_name = "clk_out_" #_num,\
+ .parents = clk_out ##_num ##_parents,\
+ .num_parents = ARRAY_SIZE(clk_out ##_num ##_parents),\
+ .mux_id = tegra_clk_clk_out_ ##_num ##_mux,\
+ .gate_id = tegra_clk_clk_out_ ##_num,\
+ .dev_name = "extern" #_num,\
+ .mux_shift = _mux_shift,\
+ .gate_shift = _gate_shift,\
+ }
+
+static DEFINE_SPINLOCK(clk_out_lock);
+
+static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2",
+ "clk_m_div4", "extern1",
+};
+
+static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2",
+ "clk_m_div4", "extern2",
+};
+
+static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2",
+ "clk_m_div4", "extern3",
+};
+
+static struct pmc_clk_init_data pmc_clks[] = {
+ PMC_CLK(1, 6, 2),
+ PMC_CLK(2, 14, 10),
+ PMC_CLK(3, 22, 18),
+};
+
+void __init tegra_pmc_clk_init(void __iomem *pmc_base,
+ struct tegra_clk *tegra_clks)
+{
+ struct clk *clk;
+ struct clk **dt_clk;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmc_clks); i++) {
+ struct pmc_clk_init_data *data;
+
+ data = pmc_clks + i;
+
+ dt_clk = tegra_lookup_dt_id(data->mux_id, tegra_clks);
+ if (!dt_clk)
+ continue;
+
+ clk = clk_register_mux(NULL, data->mux_name, data->parents,
+ data->num_parents, CLK_SET_RATE_NO_REPARENT,
+ pmc_base + PMC_CLK_OUT_CNTRL, data->mux_shift,
+ 3, 0, &clk_out_lock);
+ *dt_clk = clk;
+
+
+ dt_clk = tegra_lookup_dt_id(data->gate_id, tegra_clks);
+ if (!dt_clk)
+ continue;
+
+ clk = clk_register_gate(NULL, data->gate_name, data->mux_name,
+ 0, pmc_base + PMC_CLK_OUT_CNTRL,
+ data->gate_shift, 0, &clk_out_lock);
+ *dt_clk = clk;
+ clk_register_clkdev(clk, data->dev_name, data->gate_name);
+ }
+
+ /* blink */
+ writel_relaxed(0, pmc_base + PMC_BLINK_TIMER);
+ clk = clk_register_gate(NULL, "blink_override", "clk_32k", 0,
+ pmc_base + PMC_DPD_PADS_ORIDE,
+ PMC_DPD_PADS_ORIDE_BLINK_ENB, 0, NULL);
+
+ dt_clk = tegra_lookup_dt_id(tegra_clk_blink, tegra_clks);
+ if (!dt_clk)
+ return;
+
+ clk = clk_register_gate(NULL, "blink", "blink_override", 0,
+ pmc_base + PMC_CTRL,
+ PMC_CTRL_BLINK_ENB, 0, NULL);
+ clk_register_clkdev(clk, "blink", NULL);
+ *dt_clk = clk;
+}
+
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
new file mode 100644
index 000000000000..05dce4aa2c11
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/clk/tegra.h>
+
+#include "clk.h"
+#include "clk-id.h"
+
+#define PLLX_BASE 0xe0
+#define PLLX_MISC 0xe4
+#define PLLX_MISC2 0x514
+#define PLLX_MISC3 0x518
+
+#define CCLKG_BURST_POLICY 0x368
+#define CCLKLP_BURST_POLICY 0x370
+#define SCLK_BURST_POLICY 0x028
+#define SYSTEM_CLK_RATE 0x030
+
+static DEFINE_SPINLOCK(sysrate_lock);
+
+static const char *sclk_parents[] = { "clk_m", "pll_c_out1", "pll_p_out4",
+ "pll_p", "pll_p_out2", "unused",
+ "clk_32k", "pll_m_out1" };
+
+static const char *cclk_g_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
+ "pll_p", "pll_p_out4", "unused",
+ "unused", "pll_x" };
+
+static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
+ "pll_p", "pll_p_out4", "unused",
+ "unused", "pll_x", "pll_x_out0" };
+
+static void __init tegra_sclk_init(void __iomem *clk_base,
+ struct tegra_clk *tegra_clks)
+{
+ struct clk *clk;
+ struct clk **dt_clk;
+
+ /* SCLK */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_sclk, tegra_clks);
+ if (dt_clk) {
+ clk = tegra_clk_register_super_mux("sclk", sclk_parents,
+ ARRAY_SIZE(sclk_parents),
+ CLK_SET_RATE_PARENT,
+ clk_base + SCLK_BURST_POLICY,
+ 0, 4, 0, 0, NULL);
+ *dt_clk = clk;
+ }
+
+ /* HCLK */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_hclk, tegra_clks);
+ if (dt_clk) {
+ clk = clk_register_divider(NULL, "hclk_div", "sclk", 0,
+ clk_base + SYSTEM_CLK_RATE, 4, 2, 0,
+ &sysrate_lock);
+ clk = clk_register_gate(NULL, "hclk", "hclk_div",
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ clk_base + SYSTEM_CLK_RATE,
+ 7, CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ *dt_clk = clk;
+ }
+
+ /* PCLK */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_pclk, tegra_clks);
+ if (!dt_clk)
+ return;
+
+ clk = clk_register_divider(NULL, "pclk_div", "hclk", 0,
+ clk_base + SYSTEM_CLK_RATE, 0, 2, 0,
+ &sysrate_lock);
+ clk = clk_register_gate(NULL, "pclk", "pclk_div", CLK_SET_RATE_PARENT |
+ CLK_IGNORE_UNUSED, clk_base + SYSTEM_CLK_RATE,
+ 3, CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ *dt_clk = clk;
+}
+
+void __init tegra_super_clk_gen4_init(void __iomem *clk_base,
+ void __iomem *pmc_base,
+ struct tegra_clk *tegra_clks,
+ struct tegra_clk_pll_params *params)
+{
+ struct clk *clk;
+ struct clk **dt_clk;
+
+ /* CCLKG */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_cclk_g, tegra_clks);
+ if (dt_clk) {
+ clk = tegra_clk_register_super_mux("cclk_g", cclk_g_parents,
+ ARRAY_SIZE(cclk_g_parents),
+ CLK_SET_RATE_PARENT,
+ clk_base + CCLKG_BURST_POLICY,
+ 0, 4, 0, 0, NULL);
+ *dt_clk = clk;
+ }
+
+ /* CCLKLP */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_cclk_lp, tegra_clks);
+ if (dt_clk) {
+ clk = tegra_clk_register_super_mux("cclk_lp", cclk_lp_parents,
+ ARRAY_SIZE(cclk_lp_parents),
+ CLK_SET_RATE_PARENT,
+ clk_base + CCLKLP_BURST_POLICY,
+ 0, 4, 8, 9, NULL);
+ *dt_clk = clk;
+ }
+
+ tegra_sclk_init(clk_base, tegra_clks);
+
+#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC)
+ /* PLLX */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_pll_x, tegra_clks);
+ if (!dt_clk)
+ return;
+
+ clk = tegra_clk_register_pllxc("pll_x", "pll_ref", clk_base,
+ pmc_base, CLK_IGNORE_UNUSED, params, NULL);
+ *dt_clk = clk;
+
+ /* PLLX_OUT0 */
+
+ dt_clk = tegra_lookup_dt_id(tegra_clk_pll_x_out0, tegra_clks);
+ if (!dt_clk)
+ return;
+ clk = clk_register_fixed_factor(NULL, "pll_x_out0", "pll_x",
+ CLK_SET_RATE_PARENT, 1, 2);
+ *dt_clk = clk;
+#endif
+}
+
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 9467da7dee49..90d9d25f2228 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -23,30 +23,15 @@
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/clk/tegra.h>
+#include <dt-bindings/clock/tegra114-car.h>
#include "clk.h"
+#include "clk-id.h"
-#define RST_DEVICES_L 0x004
-#define RST_DEVICES_H 0x008
-#define RST_DEVICES_U 0x00C
#define RST_DFLL_DVCO 0x2F4
-#define RST_DEVICES_V 0x358
-#define RST_DEVICES_W 0x35C
-#define RST_DEVICES_X 0x28C
-#define RST_DEVICES_SET_L 0x300
-#define RST_DEVICES_CLR_L 0x304
-#define RST_DEVICES_SET_H 0x308
-#define RST_DEVICES_CLR_H 0x30c
-#define RST_DEVICES_SET_U 0x310
-#define RST_DEVICES_CLR_U 0x314
-#define RST_DEVICES_SET_V 0x430
-#define RST_DEVICES_CLR_V 0x434
-#define RST_DEVICES_SET_W 0x438
-#define RST_DEVICES_CLR_W 0x43c
#define CPU_FINETRIM_SELECT 0x4d4 /* override default prop dlys */
#define CPU_FINETRIM_DR 0x4d8 /* rise->rise prop dly A */
#define CPU_FINETRIM_R 0x4e4 /* rise->rise prop dly inc A */
-#define RST_DEVICES_NUM 5
/* RST_DFLL_DVCO bitfields */
#define DVFS_DFLL_RESET_SHIFT 0
@@ -73,25 +58,7 @@
#define CPU_FINETRIM_R_FCPU_6_SHIFT 10 /* ftop */
#define CPU_FINETRIM_R_FCPU_6_MASK (0x3 << CPU_FINETRIM_R_FCPU_6_SHIFT)
-#define CLK_OUT_ENB_L 0x010
-#define CLK_OUT_ENB_H 0x014
-#define CLK_OUT_ENB_U 0x018
-#define CLK_OUT_ENB_V 0x360
-#define CLK_OUT_ENB_W 0x364
-#define CLK_OUT_ENB_X 0x280
-#define CLK_OUT_ENB_SET_L 0x320
-#define CLK_OUT_ENB_CLR_L 0x324
-#define CLK_OUT_ENB_SET_H 0x328
-#define CLK_OUT_ENB_CLR_H 0x32c
-#define CLK_OUT_ENB_SET_U 0x330
-#define CLK_OUT_ENB_CLR_U 0x334
-#define CLK_OUT_ENB_SET_V 0x440
-#define CLK_OUT_ENB_CLR_V 0x444
-#define CLK_OUT_ENB_SET_W 0x448
-#define CLK_OUT_ENB_CLR_W 0x44c
-#define CLK_OUT_ENB_SET_X 0x284
-#define CLK_OUT_ENB_CLR_X 0x288
-#define CLK_OUT_ENB_NUM 6
+#define TEGRA114_CLK_PERIPH_BANKS 5
#define PLLC_BASE 0x80
#define PLLC_MISC2 0x88
@@ -139,25 +106,6 @@
#define PLLE_AUX 0x48c
#define PLLC_OUT 0x84
#define PLLM_OUT 0x94
-#define PLLP_OUTA 0xa4
-#define PLLP_OUTB 0xa8
-#define PLLA_OUT 0xb4
-
-#define AUDIO_SYNC_CLK_I2S0 0x4a0
-#define AUDIO_SYNC_CLK_I2S1 0x4a4
-#define AUDIO_SYNC_CLK_I2S2 0x4a8
-#define AUDIO_SYNC_CLK_I2S3 0x4ac
-#define AUDIO_SYNC_CLK_I2S4 0x4b0
-#define AUDIO_SYNC_CLK_SPDIF 0x4b4
-
-#define AUDIO_SYNC_DOUBLER 0x49c
-
-#define PMC_CLK_OUT_CNTRL 0x1a8
-#define PMC_DPD_PADS_ORIDE 0x1c
-#define PMC_DPD_PADS_ORIDE_BLINK_ENB 20
-#define PMC_CTRL 0
-#define PMC_CTRL_BLINK_ENB 7
-#define PMC_BLINK_TIMER 0x40
#define OSC_CTRL 0x50
#define OSC_CTRL_OSC_FREQ_SHIFT 28
@@ -166,9 +114,6 @@
#define PLLXC_SW_MAX_P 6
#define CCLKG_BURST_POLICY 0x368
-#define CCLKLP_BURST_POLICY 0x370
-#define SCLK_BURST_POLICY 0x028
-#define SYSTEM_CLK_RATE 0x030
#define UTMIP_PLL_CFG2 0x488
#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6)
@@ -196,91 +141,8 @@
#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE BIT(1)
#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL BIT(0)
-#define CLK_SOURCE_I2S0 0x1d8
-#define CLK_SOURCE_I2S1 0x100
-#define CLK_SOURCE_I2S2 0x104
-#define CLK_SOURCE_NDFLASH 0x160
-#define CLK_SOURCE_I2S3 0x3bc
-#define CLK_SOURCE_I2S4 0x3c0
-#define CLK_SOURCE_SPDIF_OUT 0x108
-#define CLK_SOURCE_SPDIF_IN 0x10c
-#define CLK_SOURCE_PWM 0x110
-#define CLK_SOURCE_ADX 0x638
-#define CLK_SOURCE_AMX 0x63c
-#define CLK_SOURCE_HDA 0x428
-#define CLK_SOURCE_HDA2CODEC_2X 0x3e4
-#define CLK_SOURCE_SBC1 0x134
-#define CLK_SOURCE_SBC2 0x118
-#define CLK_SOURCE_SBC3 0x11c
-#define CLK_SOURCE_SBC4 0x1b4
-#define CLK_SOURCE_SBC5 0x3c8
-#define CLK_SOURCE_SBC6 0x3cc
-#define CLK_SOURCE_SATA_OOB 0x420
-#define CLK_SOURCE_SATA 0x424
-#define CLK_SOURCE_NDSPEED 0x3f8
-#define CLK_SOURCE_VFIR 0x168
-#define CLK_SOURCE_SDMMC1 0x150
-#define CLK_SOURCE_SDMMC2 0x154
-#define CLK_SOURCE_SDMMC3 0x1bc
-#define CLK_SOURCE_SDMMC4 0x164
-#define CLK_SOURCE_VDE 0x1c8
#define CLK_SOURCE_CSITE 0x1d4
-#define CLK_SOURCE_LA 0x1f8
-#define CLK_SOURCE_TRACE 0x634
-#define CLK_SOURCE_OWR 0x1cc
-#define CLK_SOURCE_NOR 0x1d0
-#define CLK_SOURCE_MIPI 0x174
-#define CLK_SOURCE_I2C1 0x124
-#define CLK_SOURCE_I2C2 0x198
-#define CLK_SOURCE_I2C3 0x1b8
-#define CLK_SOURCE_I2C4 0x3c4
-#define CLK_SOURCE_I2C5 0x128
-#define CLK_SOURCE_UARTA 0x178
-#define CLK_SOURCE_UARTB 0x17c
-#define CLK_SOURCE_UARTC 0x1a0
-#define CLK_SOURCE_UARTD 0x1c0
-#define CLK_SOURCE_UARTE 0x1c4
-#define CLK_SOURCE_UARTA_DBG 0x178
-#define CLK_SOURCE_UARTB_DBG 0x17c
-#define CLK_SOURCE_UARTC_DBG 0x1a0
-#define CLK_SOURCE_UARTD_DBG 0x1c0
-#define CLK_SOURCE_UARTE_DBG 0x1c4
-#define CLK_SOURCE_3D 0x158
-#define CLK_SOURCE_2D 0x15c
-#define CLK_SOURCE_VI_SENSOR 0x1a8
-#define CLK_SOURCE_VI 0x148
-#define CLK_SOURCE_EPP 0x16c
-#define CLK_SOURCE_MSENC 0x1f0
-#define CLK_SOURCE_TSEC 0x1f4
-#define CLK_SOURCE_HOST1X 0x180
-#define CLK_SOURCE_HDMI 0x18c
-#define CLK_SOURCE_DISP1 0x138
-#define CLK_SOURCE_DISP2 0x13c
-#define CLK_SOURCE_CILAB 0x614
-#define CLK_SOURCE_CILCD 0x618
-#define CLK_SOURCE_CILE 0x61c
-#define CLK_SOURCE_DSIALP 0x620
-#define CLK_SOURCE_DSIBLP 0x624
-#define CLK_SOURCE_TSENSOR 0x3b8
-#define CLK_SOURCE_D_AUDIO 0x3d0
-#define CLK_SOURCE_DAM0 0x3d8
-#define CLK_SOURCE_DAM1 0x3dc
-#define CLK_SOURCE_DAM2 0x3e0
-#define CLK_SOURCE_ACTMON 0x3e8
-#define CLK_SOURCE_EXTERN1 0x3ec
-#define CLK_SOURCE_EXTERN2 0x3f0
-#define CLK_SOURCE_EXTERN3 0x3f4
-#define CLK_SOURCE_I2CSLOW 0x3fc
-#define CLK_SOURCE_SE 0x42c
-#define CLK_SOURCE_MSELECT 0x3b4
-#define CLK_SOURCE_DFLL_REF 0x62c
-#define CLK_SOURCE_DFLL_SOC 0x630
-#define CLK_SOURCE_SOC_THERM 0x644
-#define CLK_SOURCE_XUSB_HOST_SRC 0x600
-#define CLK_SOURCE_XUSB_FALCON_SRC 0x604
-#define CLK_SOURCE_XUSB_FS_SRC 0x608
#define CLK_SOURCE_XUSB_SS_SRC 0x610
-#define CLK_SOURCE_XUSB_DEV_SRC 0x60c
#define CLK_SOURCE_EMC 0x19c
/* PLLM override registers */
@@ -298,19 +160,13 @@ static struct cpu_clk_suspend_context {
} tegra114_cpu_clk_sctx;
#endif
-static int periph_clk_enb_refcnt[CLK_OUT_ENB_NUM * 32];
-
static void __iomem *clk_base;
static void __iomem *pmc_base;
static DEFINE_SPINLOCK(pll_d_lock);
static DEFINE_SPINLOCK(pll_d2_lock);
static DEFINE_SPINLOCK(pll_u_lock);
-static DEFINE_SPINLOCK(pll_div_lock);
static DEFINE_SPINLOCK(pll_re_lock);
-static DEFINE_SPINLOCK(clk_doubler_lock);
-static DEFINE_SPINLOCK(clk_out_lock);
-static DEFINE_SPINLOCK(sysrate_lock);
static struct div_nmp pllxc_nmp = {
.divm_shift = 0,
@@ -370,6 +226,8 @@ static struct tegra_clk_pll_params pll_c_params = {
.stepb_shift = 9,
.pdiv_tohw = pllxc_p,
.div_nmp = &pllxc_nmp,
+ .freq_table = pll_c_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
};
static struct div_nmp pllcx_nmp = {
@@ -417,6 +275,8 @@ static struct tegra_clk_pll_params pll_c2_params = {
.ext_misc_reg[0] = 0x4f0,
.ext_misc_reg[1] = 0x4f4,
.ext_misc_reg[2] = 0x4f8,
+ .freq_table = pll_cx_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_params pll_c3_params = {
@@ -437,6 +297,8 @@ static struct tegra_clk_pll_params pll_c3_params = {
.ext_misc_reg[0] = 0x504,
.ext_misc_reg[1] = 0x508,
.ext_misc_reg[2] = 0x50c,
+ .freq_table = pll_cx_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
};
static struct div_nmp pllm_nmp = {
@@ -483,6 +345,8 @@ static struct tegra_clk_pll_params pll_m_params = {
.div_nmp = &pllm_nmp,
.pmc_divnm_reg = PMC_PLLM_WB0_OVERRIDE,
.pmc_divp_reg = PMC_PLLM_WB0_OVERRIDE_2,
+ .freq_table = pll_m_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
};
static struct div_nmp pllp_nmp = {
@@ -516,6 +380,9 @@ static struct tegra_clk_pll_params pll_p_params = {
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
.div_nmp = &pllp_nmp,
+ .freq_table = pll_p_freq_table,
+ .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK,
+ .fixed_rate = 408000000,
};
static struct tegra_clk_pll_freq_table pll_a_freq_table[] = {
@@ -543,6 +410,8 @@ static struct tegra_clk_pll_params pll_a_params = {
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
.div_nmp = &pllp_nmp,
+ .freq_table = pll_a_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
@@ -579,6 +448,9 @@ static struct tegra_clk_pll_params pll_d_params = {
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
.div_nmp = &pllp_nmp,
+ .freq_table = pll_d_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
+ TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_params pll_d2_params = {
@@ -594,6 +466,9 @@ static struct tegra_clk_pll_params pll_d2_params = {
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
.div_nmp = &pllp_nmp,
+ .freq_table = pll_d_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
+ TEGRA_PLL_USE_LOCK,
};
static struct pdiv_map pllu_p[] = {
@@ -634,6 +509,9 @@ static struct tegra_clk_pll_params pll_u_params = {
.lock_delay = 1000,
.pdiv_tohw = pllu_p,
.div_nmp = &pllu_nmp,
+ .freq_table = pll_u_freq_table,
+ .flags = TEGRA_PLLU | TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
+ TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_freq_table pll_x_freq_table[] = {
@@ -667,12 +545,15 @@ static struct tegra_clk_pll_params pll_x_params = {
.stepb_shift = 24,
.pdiv_tohw = pllxc_p,
.div_nmp = &pllxc_nmp,
+ .freq_table = pll_x_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_freq_table pll_e_freq_table[] = {
/* PLLE special case: use cpcon field to store cml divider value */
{336000000, 100000000, 100, 21, 16, 11},
{312000000, 100000000, 200, 26, 24, 13},
+ {12000000, 100000000, 200, 1, 24, 13},
{0, 0, 0, 0, 0, 0},
};
@@ -699,6 +580,9 @@ static struct tegra_clk_pll_params pll_e_params = {
.lock_enable_bit_idx = PLLE_MISC_LOCK_ENABLE,
.lock_delay = 300,
.div_nmp = &plle_nmp,
+ .freq_table = pll_e_freq_table,
+ .flags = TEGRA_PLL_FIXED,
+ .fixed_rate = 100000000,
};
static struct div_nmp pllre_nmp = {
@@ -725,53 +609,7 @@ static struct tegra_clk_pll_params pll_re_vco_params = {
.iddq_reg = PLLRE_MISC,
.iddq_bit_idx = PLLRE_IDDQ_BIT,
.div_nmp = &pllre_nmp,
-};
-
-/* Peripheral clock registers */
-
-static struct tegra_clk_periph_regs periph_l_regs = {
- .enb_reg = CLK_OUT_ENB_L,
- .enb_set_reg = CLK_OUT_ENB_SET_L,
- .enb_clr_reg = CLK_OUT_ENB_CLR_L,
- .rst_reg = RST_DEVICES_L,
- .rst_set_reg = RST_DEVICES_SET_L,
- .rst_clr_reg = RST_DEVICES_CLR_L,
-};
-
-static struct tegra_clk_periph_regs periph_h_regs = {
- .enb_reg = CLK_OUT_ENB_H,
- .enb_set_reg = CLK_OUT_ENB_SET_H,
- .enb_clr_reg = CLK_OUT_ENB_CLR_H,
- .rst_reg = RST_DEVICES_H,
- .rst_set_reg = RST_DEVICES_SET_H,
- .rst_clr_reg = RST_DEVICES_CLR_H,
-};
-
-static struct tegra_clk_periph_regs periph_u_regs = {
- .enb_reg = CLK_OUT_ENB_U,
- .enb_set_reg = CLK_OUT_ENB_SET_U,
- .enb_clr_reg = CLK_OUT_ENB_CLR_U,
- .rst_reg = RST_DEVICES_U,
- .rst_set_reg = RST_DEVICES_SET_U,
- .rst_clr_reg = RST_DEVICES_CLR_U,
-};
-
-static struct tegra_clk_periph_regs periph_v_regs = {
- .enb_reg = CLK_OUT_ENB_V,
- .enb_set_reg = CLK_OUT_ENB_SET_V,
- .enb_clr_reg = CLK_OUT_ENB_CLR_V,
- .rst_reg = RST_DEVICES_V,
- .rst_set_reg = RST_DEVICES_SET_V,
- .rst_clr_reg = RST_DEVICES_CLR_V,
-};
-
-static struct tegra_clk_periph_regs periph_w_regs = {
- .enb_reg = CLK_OUT_ENB_W,
- .enb_set_reg = CLK_OUT_ENB_SET_W,
- .enb_clr_reg = CLK_OUT_ENB_CLR_W,
- .rst_reg = RST_DEVICES_W,
- .rst_set_reg = RST_DEVICES_SET_W,
- .rst_clr_reg = RST_DEVICES_CLR_W,
+ .flags = TEGRA_PLL_USE_LOCK,
};
/* possible OSC frequencies in Hz */
@@ -787,120 +625,6 @@ static unsigned long tegra114_input_freq[] = {
#define MASK(x) (BIT(x) - 1)
-#define TEGRA_INIT_DATA_MUX(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
- 30, MASK(2), 0, 0, 8, 1, 0, _regs, _clk_num, \
- periph_clk_enb_refcnt, _gate_flags, _clk_id, \
- _parents##_idx, 0)
-
-#define TEGRA_INIT_DATA_MUX_FLAGS(_name, _con_id, _dev_id, _parents, _offset,\
- _clk_num, _regs, _gate_flags, _clk_id, flags)\
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
- 30, MASK(2), 0, 0, 8, 1, 0, _regs, _clk_num, \
- periph_clk_enb_refcnt, _gate_flags, _clk_id, \
- _parents##_idx, flags)
-
-#define TEGRA_INIT_DATA_MUX8(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
- 29, MASK(3), 0, 0, 8, 1, 0, _regs, _clk_num, \
- periph_clk_enb_refcnt, _gate_flags, _clk_id, \
- _parents##_idx, 0)
-
-#define TEGRA_INIT_DATA_INT(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
- 30, MASK(2), 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs,\
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
- _clk_id, _parents##_idx, 0)
-
-#define TEGRA_INIT_DATA_INT_FLAGS(_name, _con_id, _dev_id, _parents, _offset,\
- _clk_num, _regs, _gate_flags, _clk_id, flags)\
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
- 30, MASK(2), 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs,\
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
- _clk_id, _parents##_idx, flags)
-
-#define TEGRA_INIT_DATA_INT8(_name, _con_id, _dev_id, _parents, _offset,\
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
- 29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs,\
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
- _clk_id, _parents##_idx, 0)
-
-#define TEGRA_INIT_DATA_UART(_name, _con_id, _dev_id, _parents, _offset,\
- _clk_num, _regs, _clk_id) \
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
- 30, MASK(2), 0, 0, 16, 1, TEGRA_DIVIDER_UART, _regs,\
- _clk_num, periph_clk_enb_refcnt, 0, _clk_id, \
- _parents##_idx, 0)
-
-#define TEGRA_INIT_DATA_I2C(_name, _con_id, _dev_id, _parents, _offset,\
- _clk_num, _regs, _clk_id) \
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
- 30, MASK(2), 0, 0, 16, 0, 0, _regs, _clk_num, \
- periph_clk_enb_refcnt, 0, _clk_id, _parents##_idx, 0)
-
-#define TEGRA_INIT_DATA_NODIV(_name, _con_id, _dev_id, _parents, _offset, \
- _mux_shift, _mux_mask, _clk_num, _regs, \
- _gate_flags, _clk_id) \
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
- _mux_shift, _mux_mask, 0, 0, 0, 0, 0, _regs, \
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
- _clk_id, _parents##_idx, 0)
-
-#define TEGRA_INIT_DATA_XUSB(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset, \
- 29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs, \
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
- _clk_id, _parents##_idx, 0)
-
-#define TEGRA_INIT_DATA_AUDIO(_name, _con_id, _dev_id, _offset, _clk_num,\
- _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, mux_d_audio_clk, \
- _offset, 16, 0xE01F, 0, 0, 8, 1, 0, _regs, _clk_num, \
- periph_clk_enb_refcnt, _gate_flags , _clk_id, \
- mux_d_audio_clk_idx, 0)
-
-enum tegra114_clk {
- rtc = 4, timer = 5, uarta = 6, sdmmc2 = 9, i2s1 = 11, i2c1 = 12,
- ndflash = 13, sdmmc1 = 14, sdmmc4 = 15, pwm = 17, i2s2 = 18, epp = 19,
- gr_2d = 21, usbd = 22, isp = 23, gr_3d = 24, disp2 = 26, disp1 = 27,
- host1x = 28, vcp = 29, i2s0 = 30, apbdma = 34, kbc = 36, kfuse = 40,
- sbc1 = 41, nor = 42, sbc2 = 44, sbc3 = 46, i2c5 = 47, dsia = 48,
- mipi = 50, hdmi = 51, csi = 52, i2c2 = 54, uartc = 55, mipi_cal = 56,
- emc, usb2, usb3, vde = 61, bsea = 62, bsev = 63, uartd = 65,
- i2c3 = 67, sbc4 = 68, sdmmc3 = 69, owr = 71, csite = 73,
- la = 76, trace = 77, soc_therm = 78, dtv = 79, ndspeed = 80,
- i2cslow = 81, dsib = 82, tsec = 83, xusb_host = 89, msenc = 91,
- csus = 92, mselect = 99, tsensor = 100, i2s3 = 101, i2s4 = 102,
- i2c4 = 103, sbc5 = 104, sbc6 = 105, d_audio, apbif = 107, dam0, dam1,
- dam2, hda2codec_2x = 111, audio0_2x = 113, audio1_2x, audio2_2x,
- audio3_2x, audio4_2x, spdif_2x, actmon = 119, extern1 = 120,
- extern2 = 121, extern3 = 122, hda = 125, se = 127, hda2hdmi = 128,
- cilab = 144, cilcd = 145, cile = 146, dsialp = 147, dsiblp = 148,
- dds = 150, dp2 = 152, amx = 153, adx = 154, xusb_ss = 156, uartb = 192,
- vfir, spdif_in, spdif_out, vi, vi_sensor, fuse, fuse_burn, clk_32k,
- clk_m, clk_m_div2, clk_m_div4, pll_ref, pll_c, pll_c_out1, pll_c2,
- pll_c3, pll_m, pll_m_out1, pll_p, pll_p_out1, pll_p_out2, pll_p_out3,
- pll_p_out4, pll_a, pll_a_out0, pll_d, pll_d_out0, pll_d2, pll_d2_out0,
- pll_u, pll_u_480M, pll_u_60M, pll_u_48M, pll_u_12M, pll_x, pll_x_out0,
- pll_re_vco, pll_re_out, pll_e_out0, spdif_in_sync, i2s0_sync,
- i2s1_sync, i2s2_sync, i2s3_sync, i2s4_sync, vimclk_sync, audio0,
- audio1, audio2, audio3, audio4, spdif, clk_out_1, clk_out_2, clk_out_3,
- blink, xusb_host_src = 252, xusb_falcon_src, xusb_fs_src, xusb_ss_src,
- xusb_dev_src, xusb_dev, xusb_hs_src, sclk, hclk, pclk, cclk_g, cclk_lp,
- dfll_ref = 264, dfll_soc,
-
- /* Mux clocks */
-
- audio0_mux = 300, audio1_mux, audio2_mux, audio3_mux, audio4_mux,
- spdif_mux, clk_out_1_mux, clk_out_2_mux, clk_out_3_mux, dsia_mux,
- dsib_mux, clk_max,
-};
-
struct utmi_clk_param {
/* Oscillator Frequency in KHz */
u32 osc_frequency;
@@ -934,122 +658,11 @@ static const struct utmi_clk_param utmi_parameters[] = {
/* peripheral mux definitions */
-#define MUX_I2S_SPDIF(_id) \
-static const char *mux_pllaout0_##_id##_2x_pllp_clkm[] = { "pll_a_out0", \
- #_id, "pll_p",\
- "clk_m"};
-MUX_I2S_SPDIF(audio0)
-MUX_I2S_SPDIF(audio1)
-MUX_I2S_SPDIF(audio2)
-MUX_I2S_SPDIF(audio3)
-MUX_I2S_SPDIF(audio4)
-MUX_I2S_SPDIF(audio)
-
-#define mux_pllaout0_audio0_2x_pllp_clkm_idx NULL
-#define mux_pllaout0_audio1_2x_pllp_clkm_idx NULL
-#define mux_pllaout0_audio2_2x_pllp_clkm_idx NULL
-#define mux_pllaout0_audio3_2x_pllp_clkm_idx NULL
-#define mux_pllaout0_audio4_2x_pllp_clkm_idx NULL
-#define mux_pllaout0_audio_2x_pllp_clkm_idx NULL
-
-static const char *mux_pllp_pllc_pllm_clkm[] = {
- "pll_p", "pll_c", "pll_m", "clk_m"
-};
-#define mux_pllp_pllc_pllm_clkm_idx NULL
-
-static const char *mux_pllp_pllc_pllm[] = { "pll_p", "pll_c", "pll_m" };
-#define mux_pllp_pllc_pllm_idx NULL
-
-static const char *mux_pllp_pllc_clk32_clkm[] = {
- "pll_p", "pll_c", "clk_32k", "clk_m"
-};
-#define mux_pllp_pllc_clk32_clkm_idx NULL
-
-static const char *mux_plla_pllc_pllp_clkm[] = {
- "pll_a_out0", "pll_c", "pll_p", "clk_m"
-};
-#define mux_plla_pllc_pllp_clkm_idx mux_pllp_pllc_pllm_clkm_idx
-
-static const char *mux_pllp_pllc2_c_c3_pllm_clkm[] = {
- "pll_p", "pll_c2", "pll_c", "pll_c3", "pll_m", "clk_m"
-};
-static u32 mux_pllp_pllc2_c_c3_pllm_clkm_idx[] = {
- [0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6,
-};
-
-static const char *mux_pllp_clkm[] = {
- "pll_p", "clk_m"
-};
-static u32 mux_pllp_clkm_idx[] = {
- [0] = 0, [1] = 3,
-};
-
-static const char *mux_pllm_pllc2_c_c3_pllp_plla[] = {
- "pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0"
-};
-#define mux_pllm_pllc2_c_c3_pllp_plla_idx mux_pllp_pllc2_c_c3_pllm_clkm_idx
-
-static const char *mux_pllp_pllm_plld_plla_pllc_plld2_clkm[] = {
- "pll_p", "pll_m", "pll_d_out0", "pll_a_out0", "pll_c",
- "pll_d2_out0", "clk_m"
-};
-#define mux_pllp_pllm_plld_plla_pllc_plld2_clkm_idx NULL
-
-static const char *mux_pllm_pllc_pllp_plla[] = {
- "pll_m", "pll_c", "pll_p", "pll_a_out0"
-};
-#define mux_pllm_pllc_pllp_plla_idx mux_pllp_pllc_pllm_clkm_idx
-
-static const char *mux_pllp_pllc_clkm[] = {
- "pll_p", "pll_c", "pll_m"
-};
-static u32 mux_pllp_pllc_clkm_idx[] = {
- [0] = 0, [1] = 1, [2] = 3,
-};
-
-static const char *mux_pllp_pllc_clkm_clk32[] = {
- "pll_p", "pll_c", "clk_m", "clk_32k"
-};
-#define mux_pllp_pllc_clkm_clk32_idx NULL
-
-static const char *mux_plla_clk32_pllp_clkm_plle[] = {
- "pll_a_out0", "clk_32k", "pll_p", "clk_m", "pll_e_out0"
-};
-#define mux_plla_clk32_pllp_clkm_plle_idx NULL
-
-static const char *mux_clkm_pllp_pllc_pllre[] = {
- "clk_m", "pll_p", "pll_c", "pll_re_out"
-};
-static u32 mux_clkm_pllp_pllc_pllre_idx[] = {
- [0] = 0, [1] = 1, [2] = 3, [3] = 5,
-};
-
-static const char *mux_clkm_48M_pllp_480M[] = {
- "clk_m", "pll_u_48M", "pll_p", "pll_u_480M"
-};
-#define mux_clkm_48M_pllp_480M_idx NULL
-
-static const char *mux_clkm_pllre_clk32_480M_pllc_ref[] = {
- "clk_m", "pll_re_out", "clk_32k", "pll_u_480M", "pll_c", "pll_ref"
-};
-static u32 mux_clkm_pllre_clk32_480M_pllc_ref_idx[] = {
- [0] = 0, [1] = 1, [2] = 3, [3] = 3, [4] = 4, [5] = 7,
-};
-
static const char *mux_plld_out0_plld2_out0[] = {
"pll_d_out0", "pll_d2_out0",
};
#define mux_plld_out0_plld2_out0_idx NULL
-static const char *mux_d_audio_clk[] = {
- "pll_a_out0", "pll_p", "clk_m", "spdif_in_sync", "i2s0_sync",
- "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "vimclk_sync",
-};
-static u32 mux_d_audio_clk_idx[] = {
- [0] = 0, [1] = 0x8000, [2] = 0xc000, [3] = 0xE000, [4] = 0xE001,
- [5] = 0xE002, [6] = 0xE003, [7] = 0xE004, [8] = 0xE005, [9] = 0xE007,
-};
-
static const char *mux_pllmcp_clkm[] = {
"pll_m_out0", "pll_c_out0", "pll_p_out0", "clk_m", "pll_m_ud",
};
@@ -1064,8 +677,253 @@ static const struct clk_div_table pll_re_div_table[] = {
{ .val = 0, .div = 0 },
};
-static struct clk *clks[clk_max];
-static struct clk_onecell_data clk_data;
+static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
+ [tegra_clk_rtc] = { .dt_id = TEGRA114_CLK_RTC, .present = true },
+ [tegra_clk_timer] = { .dt_id = TEGRA114_CLK_TIMER, .present = true },
+ [tegra_clk_uarta] = { .dt_id = TEGRA114_CLK_UARTA, .present = true },
+ [tegra_clk_uartd] = { .dt_id = TEGRA114_CLK_UARTD, .present = true },
+ [tegra_clk_sdmmc2] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true },
+ [tegra_clk_i2s1] = { .dt_id = TEGRA114_CLK_I2S1, .present = true },
+ [tegra_clk_i2c1] = { .dt_id = TEGRA114_CLK_I2C1, .present = true },
+ [tegra_clk_ndflash] = { .dt_id = TEGRA114_CLK_NDFLASH, .present = true },
+ [tegra_clk_sdmmc1] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true },
+ [tegra_clk_sdmmc4] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true },
+ [tegra_clk_pwm] = { .dt_id = TEGRA114_CLK_PWM, .present = true },
+ [tegra_clk_i2s0] = { .dt_id = TEGRA114_CLK_I2S0, .present = true },
+ [tegra_clk_i2s2] = { .dt_id = TEGRA114_CLK_I2S2, .present = true },
+ [tegra_clk_epp_8] = { .dt_id = TEGRA114_CLK_EPP, .present = true },
+ [tegra_clk_gr2d_8] = { .dt_id = TEGRA114_CLK_GR2D, .present = true },
+ [tegra_clk_usbd] = { .dt_id = TEGRA114_CLK_USBD, .present = true },
+ [tegra_clk_isp] = { .dt_id = TEGRA114_CLK_ISP, .present = true },
+ [tegra_clk_gr3d_8] = { .dt_id = TEGRA114_CLK_GR3D, .present = true },
+ [tegra_clk_disp2] = { .dt_id = TEGRA114_CLK_DISP2, .present = true },
+ [tegra_clk_disp1] = { .dt_id = TEGRA114_CLK_DISP1, .present = true },
+ [tegra_clk_host1x_8] = { .dt_id = TEGRA114_CLK_HOST1X, .present = true },
+ [tegra_clk_vcp] = { .dt_id = TEGRA114_CLK_VCP, .present = true },
+ [tegra_clk_apbdma] = { .dt_id = TEGRA114_CLK_APBDMA, .present = true },
+ [tegra_clk_kbc] = { .dt_id = TEGRA114_CLK_KBC, .present = true },
+ [tegra_clk_kfuse] = { .dt_id = TEGRA114_CLK_KFUSE, .present = true },
+ [tegra_clk_sbc1_8] = { .dt_id = TEGRA114_CLK_SBC1, .present = true },
+ [tegra_clk_nor] = { .dt_id = TEGRA114_CLK_NOR, .present = true },
+ [tegra_clk_sbc2_8] = { .dt_id = TEGRA114_CLK_SBC2, .present = true },
+ [tegra_clk_sbc3_8] = { .dt_id = TEGRA114_CLK_SBC3, .present = true },
+ [tegra_clk_i2c5] = { .dt_id = TEGRA114_CLK_I2C5, .present = true },
+ [tegra_clk_dsia] = { .dt_id = TEGRA114_CLK_DSIA, .present = true },
+ [tegra_clk_mipi] = { .dt_id = TEGRA114_CLK_MIPI, .present = true },
+ [tegra_clk_hdmi] = { .dt_id = TEGRA114_CLK_HDMI, .present = true },
+ [tegra_clk_csi] = { .dt_id = TEGRA114_CLK_CSI, .present = true },
+ [tegra_clk_i2c2] = { .dt_id = TEGRA114_CLK_I2C2, .present = true },
+ [tegra_clk_uartc] = { .dt_id = TEGRA114_CLK_UARTC, .present = true },
+ [tegra_clk_mipi_cal] = { .dt_id = TEGRA114_CLK_MIPI_CAL, .present = true },
+ [tegra_clk_emc] = { .dt_id = TEGRA114_CLK_EMC, .present = true },
+ [tegra_clk_usb2] = { .dt_id = TEGRA114_CLK_USB2, .present = true },
+ [tegra_clk_usb3] = { .dt_id = TEGRA114_CLK_USB3, .present = true },
+ [tegra_clk_vde_8] = { .dt_id = TEGRA114_CLK_VDE, .present = true },
+ [tegra_clk_bsea] = { .dt_id = TEGRA114_CLK_BSEA, .present = true },
+ [tegra_clk_bsev] = { .dt_id = TEGRA114_CLK_BSEV, .present = true },
+ [tegra_clk_i2c3] = { .dt_id = TEGRA114_CLK_I2C3, .present = true },
+ [tegra_clk_sbc4_8] = { .dt_id = TEGRA114_CLK_SBC4, .present = true },
+ [tegra_clk_sdmmc3] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true },
+ [tegra_clk_owr] = { .dt_id = TEGRA114_CLK_OWR, .present = true },
+ [tegra_clk_csite] = { .dt_id = TEGRA114_CLK_CSITE, .present = true },
+ [tegra_clk_la] = { .dt_id = TEGRA114_CLK_LA, .present = true },
+ [tegra_clk_trace] = { .dt_id = TEGRA114_CLK_TRACE, .present = true },
+ [tegra_clk_soc_therm] = { .dt_id = TEGRA114_CLK_SOC_THERM, .present = true },
+ [tegra_clk_dtv] = { .dt_id = TEGRA114_CLK_DTV, .present = true },
+ [tegra_clk_ndspeed] = { .dt_id = TEGRA114_CLK_NDSPEED, .present = true },
+ [tegra_clk_i2cslow] = { .dt_id = TEGRA114_CLK_I2CSLOW, .present = true },
+ [tegra_clk_dsib] = { .dt_id = TEGRA114_CLK_DSIB, .present = true },
+ [tegra_clk_tsec] = { .dt_id = TEGRA114_CLK_TSEC, .present = true },
+ [tegra_clk_xusb_host] = { .dt_id = TEGRA114_CLK_XUSB_HOST, .present = true },
+ [tegra_clk_msenc] = { .dt_id = TEGRA114_CLK_MSENC, .present = true },
+ [tegra_clk_csus] = { .dt_id = TEGRA114_CLK_CSUS, .present = true },
+ [tegra_clk_mselect] = { .dt_id = TEGRA114_CLK_MSELECT, .present = true },
+ [tegra_clk_tsensor] = { .dt_id = TEGRA114_CLK_TSENSOR, .present = true },
+ [tegra_clk_i2s3] = { .dt_id = TEGRA114_CLK_I2S3, .present = true },
+ [tegra_clk_i2s4] = { .dt_id = TEGRA114_CLK_I2S4, .present = true },
+ [tegra_clk_i2c4] = { .dt_id = TEGRA114_CLK_I2C4, .present = true },
+ [tegra_clk_sbc5_8] = { .dt_id = TEGRA114_CLK_SBC5, .present = true },
+ [tegra_clk_sbc6_8] = { .dt_id = TEGRA114_CLK_SBC6, .present = true },
+ [tegra_clk_d_audio] = { .dt_id = TEGRA114_CLK_D_AUDIO, .present = true },
+ [tegra_clk_apbif] = { .dt_id = TEGRA114_CLK_APBIF, .present = true },
+ [tegra_clk_dam0] = { .dt_id = TEGRA114_CLK_DAM0, .present = true },
+ [tegra_clk_dam1] = { .dt_id = TEGRA114_CLK_DAM1, .present = true },
+ [tegra_clk_dam2] = { .dt_id = TEGRA114_CLK_DAM2, .present = true },
+ [tegra_clk_hda2codec_2x] = { .dt_id = TEGRA114_CLK_HDA2CODEC_2X, .present = true },
+ [tegra_clk_audio0_2x] = { .dt_id = TEGRA114_CLK_AUDIO0_2X, .present = true },
+ [tegra_clk_audio1_2x] = { .dt_id = TEGRA114_CLK_AUDIO1_2X, .present = true },
+ [tegra_clk_audio2_2x] = { .dt_id = TEGRA114_CLK_AUDIO2_2X, .present = true },
+ [tegra_clk_audio3_2x] = { .dt_id = TEGRA114_CLK_AUDIO3_2X, .present = true },
+ [tegra_clk_audio4_2x] = { .dt_id = TEGRA114_CLK_AUDIO4_2X, .present = true },
+ [tegra_clk_spdif_2x] = { .dt_id = TEGRA114_CLK_SPDIF_2X, .present = true },
+ [tegra_clk_actmon] = { .dt_id = TEGRA114_CLK_ACTMON, .present = true },
+ [tegra_clk_extern1] = { .dt_id = TEGRA114_CLK_EXTERN1, .present = true },
+ [tegra_clk_extern2] = { .dt_id = TEGRA114_CLK_EXTERN2, .present = true },
+ [tegra_clk_extern3] = { .dt_id = TEGRA114_CLK_EXTERN3, .present = true },
+ [tegra_clk_hda] = { .dt_id = TEGRA114_CLK_HDA, .present = true },
+ [tegra_clk_se] = { .dt_id = TEGRA114_CLK_SE, .present = true },
+ [tegra_clk_hda2hdmi] = { .dt_id = TEGRA114_CLK_HDA2HDMI, .present = true },
+ [tegra_clk_cilab] = { .dt_id = TEGRA114_CLK_CILAB, .present = true },
+ [tegra_clk_cilcd] = { .dt_id = TEGRA114_CLK_CILCD, .present = true },
+ [tegra_clk_cile] = { .dt_id = TEGRA114_CLK_CILE, .present = true },
+ [tegra_clk_dsialp] = { .dt_id = TEGRA114_CLK_DSIALP, .present = true },
+ [tegra_clk_dsiblp] = { .dt_id = TEGRA114_CLK_DSIBLP, .present = true },
+ [tegra_clk_dds] = { .dt_id = TEGRA114_CLK_DDS, .present = true },
+ [tegra_clk_dp2] = { .dt_id = TEGRA114_CLK_DP2, .present = true },
+ [tegra_clk_amx] = { .dt_id = TEGRA114_CLK_AMX, .present = true },
+ [tegra_clk_adx] = { .dt_id = TEGRA114_CLK_ADX, .present = true },
+ [tegra_clk_xusb_ss] = { .dt_id = TEGRA114_CLK_XUSB_SS, .present = true },
+ [tegra_clk_uartb] = { .dt_id = TEGRA114_CLK_UARTB, .present = true },
+ [tegra_clk_vfir] = { .dt_id = TEGRA114_CLK_VFIR, .present = true },
+ [tegra_clk_spdif_in] = { .dt_id = TEGRA114_CLK_SPDIF_IN, .present = true },
+ [tegra_clk_spdif_out] = { .dt_id = TEGRA114_CLK_SPDIF_OUT, .present = true },
+ [tegra_clk_vi_8] = { .dt_id = TEGRA114_CLK_VI, .present = true },
+ [tegra_clk_vi_sensor_8] = { .dt_id = TEGRA114_CLK_VI_SENSOR, .present = true },
+ [tegra_clk_fuse] = { .dt_id = TEGRA114_CLK_FUSE, .present = true },
+ [tegra_clk_fuse_burn] = { .dt_id = TEGRA114_CLK_FUSE_BURN, .present = true },
+ [tegra_clk_clk_32k] = { .dt_id = TEGRA114_CLK_CLK_32K, .present = true },
+ [tegra_clk_clk_m] = { .dt_id = TEGRA114_CLK_CLK_M, .present = true },
+ [tegra_clk_clk_m_div2] = { .dt_id = TEGRA114_CLK_CLK_M_DIV2, .present = true },
+ [tegra_clk_clk_m_div4] = { .dt_id = TEGRA114_CLK_CLK_M_DIV4, .present = true },
+ [tegra_clk_pll_ref] = { .dt_id = TEGRA114_CLK_PLL_REF, .present = true },
+ [tegra_clk_pll_c] = { .dt_id = TEGRA114_CLK_PLL_C, .present = true },
+ [tegra_clk_pll_c_out1] = { .dt_id = TEGRA114_CLK_PLL_C_OUT1, .present = true },
+ [tegra_clk_pll_c2] = { .dt_id = TEGRA114_CLK_PLL_C2, .present = true },
+ [tegra_clk_pll_c3] = { .dt_id = TEGRA114_CLK_PLL_C3, .present = true },
+ [tegra_clk_pll_m] = { .dt_id = TEGRA114_CLK_PLL_M, .present = true },
+ [tegra_clk_pll_m_out1] = { .dt_id = TEGRA114_CLK_PLL_M_OUT1, .present = true },
+ [tegra_clk_pll_p] = { .dt_id = TEGRA114_CLK_PLL_P, .present = true },
+ [tegra_clk_pll_p_out1] = { .dt_id = TEGRA114_CLK_PLL_P_OUT1, .present = true },
+ [tegra_clk_pll_p_out2_int] = { .dt_id = TEGRA114_CLK_PLL_P_OUT2, .present = true },
+ [tegra_clk_pll_p_out3] = { .dt_id = TEGRA114_CLK_PLL_P_OUT3, .present = true },
+ [tegra_clk_pll_p_out4] = { .dt_id = TEGRA114_CLK_PLL_P_OUT4, .present = true },
+ [tegra_clk_pll_a] = { .dt_id = TEGRA114_CLK_PLL_A, .present = true },
+ [tegra_clk_pll_a_out0] = { .dt_id = TEGRA114_CLK_PLL_A_OUT0, .present = true },
+ [tegra_clk_pll_d] = { .dt_id = TEGRA114_CLK_PLL_D, .present = true },
+ [tegra_clk_pll_d_out0] = { .dt_id = TEGRA114_CLK_PLL_D_OUT0, .present = true },
+ [tegra_clk_pll_d2] = { .dt_id = TEGRA114_CLK_PLL_D2, .present = true },
+ [tegra_clk_pll_d2_out0] = { .dt_id = TEGRA114_CLK_PLL_D2_OUT0, .present = true },
+ [tegra_clk_pll_u] = { .dt_id = TEGRA114_CLK_PLL_U, .present = true },
+ [tegra_clk_pll_u_480m] = { .dt_id = TEGRA114_CLK_PLL_U_480M, .present = true },
+ [tegra_clk_pll_u_60m] = { .dt_id = TEGRA114_CLK_PLL_U_60M, .present = true },
+ [tegra_clk_pll_u_48m] = { .dt_id = TEGRA114_CLK_PLL_U_48M, .present = true },
+ [tegra_clk_pll_u_12m] = { .dt_id = TEGRA114_CLK_PLL_U_12M, .present = true },
+ [tegra_clk_pll_x] = { .dt_id = TEGRA114_CLK_PLL_X, .present = true },
+ [tegra_clk_pll_x_out0] = { .dt_id = TEGRA114_CLK_PLL_X_OUT0, .present = true },
+ [tegra_clk_pll_re_vco] = { .dt_id = TEGRA114_CLK_PLL_RE_VCO, .present = true },
+ [tegra_clk_pll_re_out] = { .dt_id = TEGRA114_CLK_PLL_RE_OUT, .present = true },
+ [tegra_clk_pll_e_out0] = { .dt_id = TEGRA114_CLK_PLL_E_OUT0, .present = true },
+ [tegra_clk_spdif_in_sync] = { .dt_id = TEGRA114_CLK_SPDIF_IN_SYNC, .present = true },
+ [tegra_clk_i2s0_sync] = { .dt_id = TEGRA114_CLK_I2S0_SYNC, .present = true },
+ [tegra_clk_i2s1_sync] = { .dt_id = TEGRA114_CLK_I2S1_SYNC, .present = true },
+ [tegra_clk_i2s2_sync] = { .dt_id = TEGRA114_CLK_I2S2_SYNC, .present = true },
+ [tegra_clk_i2s3_sync] = { .dt_id = TEGRA114_CLK_I2S3_SYNC, .present = true },
+ [tegra_clk_i2s4_sync] = { .dt_id = TEGRA114_CLK_I2S4_SYNC, .present = true },
+ [tegra_clk_vimclk_sync] = { .dt_id = TEGRA114_CLK_VIMCLK_SYNC, .present = true },
+ [tegra_clk_audio0] = { .dt_id = TEGRA114_CLK_AUDIO0, .present = true },
+ [tegra_clk_audio1] = { .dt_id = TEGRA114_CLK_AUDIO1, .present = true },
+ [tegra_clk_audio2] = { .dt_id = TEGRA114_CLK_AUDIO2, .present = true },
+ [tegra_clk_audio3] = { .dt_id = TEGRA114_CLK_AUDIO3, .present = true },
+ [tegra_clk_audio4] = { .dt_id = TEGRA114_CLK_AUDIO4, .present = true },
+ [tegra_clk_spdif] = { .dt_id = TEGRA114_CLK_SPDIF, .present = true },
+ [tegra_clk_clk_out_1] = { .dt_id = TEGRA114_CLK_CLK_OUT_1, .present = true },
+ [tegra_clk_clk_out_2] = { .dt_id = TEGRA114_CLK_CLK_OUT_2, .present = true },
+ [tegra_clk_clk_out_3] = { .dt_id = TEGRA114_CLK_CLK_OUT_3, .present = true },
+ [tegra_clk_blink] = { .dt_id = TEGRA114_CLK_BLINK, .present = true },
+ [tegra_clk_xusb_host_src] = { .dt_id = TEGRA114_CLK_XUSB_HOST_SRC, .present = true },
+ [tegra_clk_xusb_falcon_src] = { .dt_id = TEGRA114_CLK_XUSB_FALCON_SRC, .present = true },
+ [tegra_clk_xusb_fs_src] = { .dt_id = TEGRA114_CLK_XUSB_FS_SRC, .present = true },
+ [tegra_clk_xusb_ss_src] = { .dt_id = TEGRA114_CLK_XUSB_SS_SRC, .present = true },
+ [tegra_clk_xusb_dev_src] = { .dt_id = TEGRA114_CLK_XUSB_DEV_SRC, .present = true },
+ [tegra_clk_xusb_dev] = { .dt_id = TEGRA114_CLK_XUSB_DEV, .present = true },
+ [tegra_clk_xusb_hs_src] = { .dt_id = TEGRA114_CLK_XUSB_HS_SRC, .present = true },
+ [tegra_clk_sclk] = { .dt_id = TEGRA114_CLK_SCLK, .present = true },
+ [tegra_clk_hclk] = { .dt_id = TEGRA114_CLK_HCLK, .present = true },
+ [tegra_clk_pclk] = { .dt_id = TEGRA114_CLK_PCLK, .present = true },
+ [tegra_clk_cclk_g] = { .dt_id = TEGRA114_CLK_CCLK_G, .present = true },
+ [tegra_clk_cclk_lp] = { .dt_id = TEGRA114_CLK_CCLK_LP, .present = true },
+ [tegra_clk_dfll_ref] = { .dt_id = TEGRA114_CLK_DFLL_REF, .present = true },
+ [tegra_clk_dfll_soc] = { .dt_id = TEGRA114_CLK_DFLL_SOC, .present = true },
+ [tegra_clk_audio0_mux] = { .dt_id = TEGRA114_CLK_AUDIO0_MUX, .present = true },
+ [tegra_clk_audio1_mux] = { .dt_id = TEGRA114_CLK_AUDIO1_MUX, .present = true },
+ [tegra_clk_audio2_mux] = { .dt_id = TEGRA114_CLK_AUDIO2_MUX, .present = true },
+ [tegra_clk_audio3_mux] = { .dt_id = TEGRA114_CLK_AUDIO3_MUX, .present = true },
+ [tegra_clk_audio4_mux] = { .dt_id = TEGRA114_CLK_AUDIO4_MUX, .present = true },
+ [tegra_clk_spdif_mux] = { .dt_id = TEGRA114_CLK_SPDIF_MUX, .present = true },
+ [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA114_CLK_CLK_OUT_1_MUX, .present = true },
+ [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA114_CLK_CLK_OUT_2_MUX, .present = true },
+ [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA114_CLK_CLK_OUT_3_MUX, .present = true },
+ [tegra_clk_dsia_mux] = { .dt_id = TEGRA114_CLK_DSIA_MUX, .present = true },
+ [tegra_clk_dsib_mux] = { .dt_id = TEGRA114_CLK_DSIB_MUX, .present = true },
+};
+
+static struct tegra_devclk devclks[] __initdata = {
+ { .con_id = "clk_m", .dt_id = TEGRA114_CLK_CLK_M },
+ { .con_id = "pll_ref", .dt_id = TEGRA114_CLK_PLL_REF },
+ { .con_id = "clk_32k", .dt_id = TEGRA114_CLK_CLK_32K },
+ { .con_id = "clk_m_div2", .dt_id = TEGRA114_CLK_CLK_M_DIV2 },
+ { .con_id = "clk_m_div4", .dt_id = TEGRA114_CLK_CLK_M_DIV4 },
+ { .con_id = "pll_c", .dt_id = TEGRA114_CLK_PLL_C },
+ { .con_id = "pll_c_out1", .dt_id = TEGRA114_CLK_PLL_C_OUT1 },
+ { .con_id = "pll_c2", .dt_id = TEGRA114_CLK_PLL_C2 },
+ { .con_id = "pll_c3", .dt_id = TEGRA114_CLK_PLL_C3 },
+ { .con_id = "pll_p", .dt_id = TEGRA114_CLK_PLL_P },
+ { .con_id = "pll_p_out1", .dt_id = TEGRA114_CLK_PLL_P_OUT1 },
+ { .con_id = "pll_p_out2", .dt_id = TEGRA114_CLK_PLL_P_OUT2 },
+ { .con_id = "pll_p_out3", .dt_id = TEGRA114_CLK_PLL_P_OUT3 },
+ { .con_id = "pll_p_out4", .dt_id = TEGRA114_CLK_PLL_P_OUT4 },
+ { .con_id = "pll_m", .dt_id = TEGRA114_CLK_PLL_M },
+ { .con_id = "pll_m_out1", .dt_id = TEGRA114_CLK_PLL_M_OUT1 },
+ { .con_id = "pll_x", .dt_id = TEGRA114_CLK_PLL_X },
+ { .con_id = "pll_x_out0", .dt_id = TEGRA114_CLK_PLL_X_OUT0 },
+ { .con_id = "pll_u", .dt_id = TEGRA114_CLK_PLL_U },
+ { .con_id = "pll_u_480M", .dt_id = TEGRA114_CLK_PLL_U_480M },
+ { .con_id = "pll_u_60M", .dt_id = TEGRA114_CLK_PLL_U_60M },
+ { .con_id = "pll_u_48M", .dt_id = TEGRA114_CLK_PLL_U_48M },
+ { .con_id = "pll_u_12M", .dt_id = TEGRA114_CLK_PLL_U_12M },
+ { .con_id = "pll_d", .dt_id = TEGRA114_CLK_PLL_D },
+ { .con_id = "pll_d_out0", .dt_id = TEGRA114_CLK_PLL_D_OUT0 },
+ { .con_id = "pll_d2", .dt_id = TEGRA114_CLK_PLL_D2 },
+ { .con_id = "pll_d2_out0", .dt_id = TEGRA114_CLK_PLL_D2_OUT0 },
+ { .con_id = "pll_a", .dt_id = TEGRA114_CLK_PLL_A },
+ { .con_id = "pll_a_out0", .dt_id = TEGRA114_CLK_PLL_A_OUT0 },
+ { .con_id = "pll_re_vco", .dt_id = TEGRA114_CLK_PLL_RE_VCO },
+ { .con_id = "pll_re_out", .dt_id = TEGRA114_CLK_PLL_RE_OUT },
+ { .con_id = "pll_e_out0", .dt_id = TEGRA114_CLK_PLL_E_OUT0 },
+ { .con_id = "spdif_in_sync", .dt_id = TEGRA114_CLK_SPDIF_IN_SYNC },
+ { .con_id = "i2s0_sync", .dt_id = TEGRA114_CLK_I2S0_SYNC },
+ { .con_id = "i2s1_sync", .dt_id = TEGRA114_CLK_I2S1_SYNC },
+ { .con_id = "i2s2_sync", .dt_id = TEGRA114_CLK_I2S2_SYNC },
+ { .con_id = "i2s3_sync", .dt_id = TEGRA114_CLK_I2S3_SYNC },
+ { .con_id = "i2s4_sync", .dt_id = TEGRA114_CLK_I2S4_SYNC },
+ { .con_id = "vimclk_sync", .dt_id = TEGRA114_CLK_VIMCLK_SYNC },
+ { .con_id = "audio0", .dt_id = TEGRA114_CLK_AUDIO0 },
+ { .con_id = "audio1", .dt_id = TEGRA114_CLK_AUDIO1 },
+ { .con_id = "audio2", .dt_id = TEGRA114_CLK_AUDIO2 },
+ { .con_id = "audio3", .dt_id = TEGRA114_CLK_AUDIO3 },
+ { .con_id = "audio4", .dt_id = TEGRA114_CLK_AUDIO4 },
+ { .con_id = "spdif", .dt_id = TEGRA114_CLK_SPDIF },
+ { .con_id = "audio0_2x", .dt_id = TEGRA114_CLK_AUDIO0_2X },
+ { .con_id = "audio1_2x", .dt_id = TEGRA114_CLK_AUDIO1_2X },
+ { .con_id = "audio2_2x", .dt_id = TEGRA114_CLK_AUDIO2_2X },
+ { .con_id = "audio3_2x", .dt_id = TEGRA114_CLK_AUDIO3_2X },
+ { .con_id = "audio4_2x", .dt_id = TEGRA114_CLK_AUDIO4_2X },
+ { .con_id = "spdif_2x", .dt_id = TEGRA114_CLK_SPDIF_2X },
+ { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA114_CLK_EXTERN1 },
+ { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA114_CLK_EXTERN2 },
+ { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA114_CLK_EXTERN3 },
+ { .con_id = "blink", .dt_id = TEGRA114_CLK_BLINK },
+ { .con_id = "cclk_g", .dt_id = TEGRA114_CLK_CCLK_G },
+ { .con_id = "cclk_lp", .dt_id = TEGRA114_CLK_CCLK_LP },
+ { .con_id = "sclk", .dt_id = TEGRA114_CLK_SCLK },
+ { .con_id = "hclk", .dt_id = TEGRA114_CLK_HCLK },
+ { .con_id = "pclk", .dt_id = TEGRA114_CLK_PCLK },
+ { .con_id = "fuse", .dt_id = TEGRA114_CLK_FUSE },
+ { .dev_id = "rtc-tegra", .dt_id = TEGRA114_CLK_RTC },
+ { .dev_id = "timer", .dt_id = TEGRA114_CLK_TIMER },
+};
+
+static struct clk **clks;
static unsigned long osc_freq;
static unsigned long pll_ref_freq;
@@ -1086,16 +944,14 @@ static int __init tegra114_osc_clk_init(void __iomem *clk_base)
/* clk_m */
clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT,
osc_freq);
- clk_register_clkdev(clk, "clk_m", NULL);
- clks[clk_m] = clk;
+ clks[TEGRA114_CLK_CLK_M] = clk;
/* pll_ref */
val = (val >> OSC_CTRL_PLL_REF_DIV_SHIFT) & 3;
pll_ref_div = 1 << val;
clk = clk_register_fixed_factor(NULL, "pll_ref", "clk_m",
CLK_SET_RATE_PARENT, 1, pll_ref_div);
- clk_register_clkdev(clk, "pll_ref", NULL);
- clks[pll_ref] = clk;
+ clks[TEGRA114_CLK_PLL_REF] = clk;
pll_ref_freq = osc_freq / pll_ref_div;
@@ -1109,20 +965,17 @@ static void __init tegra114_fixed_clk_init(void __iomem *clk_base)
/* clk_32k */
clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, CLK_IS_ROOT,
32768);
- clk_register_clkdev(clk, "clk_32k", NULL);
- clks[clk_32k] = clk;
+ clks[TEGRA114_CLK_CLK_32K] = clk;
/* clk_m_div2 */
clk = clk_register_fixed_factor(NULL, "clk_m_div2", "clk_m",
CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "clk_m_div2", NULL);
- clks[clk_m_div2] = clk;
+ clks[TEGRA114_CLK_CLK_M_DIV2] = clk;
/* clk_m_div4 */
clk = clk_register_fixed_factor(NULL, "clk_m_div4", "clk_m",
CLK_SET_RATE_PARENT, 1, 4);
- clk_register_clkdev(clk, "clk_m_div4", NULL);
- clks[clk_m_div4] = clk;
+ clks[TEGRA114_CLK_CLK_M_DIV4] = clk;
}
@@ -1208,63 +1061,6 @@ static __init void tegra114_utmi_param_configure(void __iomem *clk_base)
writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
}
-static void __init _clip_vco_min(struct tegra_clk_pll_params *pll_params)
-{
- pll_params->vco_min =
- DIV_ROUND_UP(pll_params->vco_min, pll_ref_freq) * pll_ref_freq;
-}
-
-static int __init _setup_dynamic_ramp(struct tegra_clk_pll_params *pll_params,
- void __iomem *clk_base)
-{
- u32 val;
- u32 step_a, step_b;
-
- switch (pll_ref_freq) {
- case 12000000:
- case 13000000:
- case 26000000:
- step_a = 0x2B;
- step_b = 0x0B;
- break;
- case 16800000:
- step_a = 0x1A;
- step_b = 0x09;
- break;
- case 19200000:
- step_a = 0x12;
- step_b = 0x08;
- break;
- default:
- pr_err("%s: Unexpected reference rate %lu\n",
- __func__, pll_ref_freq);
- WARN_ON(1);
- return -EINVAL;
- }
-
- val = step_a << pll_params->stepa_shift;
- val |= step_b << pll_params->stepb_shift;
- writel_relaxed(val, clk_base + pll_params->dyn_ramp_reg);
-
- return 0;
-}
-
-static void __init _init_iddq(struct tegra_clk_pll_params *pll_params,
- void __iomem *clk_base)
-{
- u32 val, val_iddq;
-
- val = readl_relaxed(clk_base + pll_params->base_reg);
- val_iddq = readl_relaxed(clk_base + pll_params->iddq_reg);
-
- if (val & BIT(30))
- WARN_ON(val_iddq & BIT(pll_params->iddq_bit_idx));
- else {
- val_iddq |= BIT(pll_params->iddq_bit_idx);
- writel_relaxed(val_iddq, clk_base + pll_params->iddq_reg);
- }
-}
-
static void __init tegra114_pll_init(void __iomem *clk_base,
void __iomem *pmc)
{
@@ -1272,104 +1068,34 @@ static void __init tegra114_pll_init(void __iomem *clk_base,
struct clk *clk;
/* PLLC */
- _clip_vco_min(&pll_c_params);
- if (_setup_dynamic_ramp(&pll_c_params, clk_base) >= 0) {
- _init_iddq(&pll_c_params, clk_base);
- clk = tegra_clk_register_pllxc("pll_c", "pll_ref", clk_base,
- pmc, 0, 0, &pll_c_params, TEGRA_PLL_USE_LOCK,
- pll_c_freq_table, NULL);
- clk_register_clkdev(clk, "pll_c", NULL);
- clks[pll_c] = clk;
-
- /* PLLC_OUT1 */
- clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
- clk_base + PLLC_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
- 8, 8, 1, NULL);
- clk = tegra_clk_register_pll_out("pll_c_out1", "pll_c_out1_div",
- clk_base + PLLC_OUT, 1, 0,
- CLK_SET_RATE_PARENT, 0, NULL);
- clk_register_clkdev(clk, "pll_c_out1", NULL);
- clks[pll_c_out1] = clk;
- }
+ clk = tegra_clk_register_pllxc("pll_c", "pll_ref", clk_base,
+ pmc, 0, &pll_c_params, NULL);
+ clks[TEGRA114_CLK_PLL_C] = clk;
+
+ /* PLLC_OUT1 */
+ clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
+ clk_base + PLLC_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_c_out1", "pll_c_out1_div",
+ clk_base + PLLC_OUT, 1, 0,
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clks[TEGRA114_CLK_PLL_C_OUT1] = clk;
/* PLLC2 */
- _clip_vco_min(&pll_c2_params);
- clk = tegra_clk_register_pllc("pll_c2", "pll_ref", clk_base, pmc, 0, 0,
- &pll_c2_params, TEGRA_PLL_USE_LOCK,
- pll_cx_freq_table, NULL);
- clk_register_clkdev(clk, "pll_c2", NULL);
- clks[pll_c2] = clk;
+ clk = tegra_clk_register_pllc("pll_c2", "pll_ref", clk_base, pmc, 0,
+ &pll_c2_params, NULL);
+ clks[TEGRA114_CLK_PLL_C2] = clk;
/* PLLC3 */
- _clip_vco_min(&pll_c3_params);
- clk = tegra_clk_register_pllc("pll_c3", "pll_ref", clk_base, pmc, 0, 0,
- &pll_c3_params, TEGRA_PLL_USE_LOCK,
- pll_cx_freq_table, NULL);
- clk_register_clkdev(clk, "pll_c3", NULL);
- clks[pll_c3] = clk;
-
- /* PLLP */
- clk = tegra_clk_register_pll("pll_p", "pll_ref", clk_base, pmc, 0,
- 408000000, &pll_p_params,
- TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK,
- pll_p_freq_table, NULL);
- clk_register_clkdev(clk, "pll_p", NULL);
- clks[pll_p] = clk;
-
- /* PLLP_OUT1 */
- clk = tegra_clk_register_divider("pll_p_out1_div", "pll_p",
- clk_base + PLLP_OUTA, 0, TEGRA_DIVIDER_FIXED |
- TEGRA_DIVIDER_ROUND_UP, 8, 8, 1, &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out1", "pll_p_out1_div",
- clk_base + PLLP_OUTA, 1, 0,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out1", NULL);
- clks[pll_p_out1] = clk;
-
- /* PLLP_OUT2 */
- clk = tegra_clk_register_divider("pll_p_out2_div", "pll_p",
- clk_base + PLLP_OUTA, 0, TEGRA_DIVIDER_FIXED |
- TEGRA_DIVIDER_ROUND_UP | TEGRA_DIVIDER_INT, 24,
- 8, 1, &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out2", "pll_p_out2_div",
- clk_base + PLLP_OUTA, 17, 16,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out2", NULL);
- clks[pll_p_out2] = clk;
-
- /* PLLP_OUT3 */
- clk = tegra_clk_register_divider("pll_p_out3_div", "pll_p",
- clk_base + PLLP_OUTB, 0, TEGRA_DIVIDER_FIXED |
- TEGRA_DIVIDER_ROUND_UP, 8, 8, 1, &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out3", "pll_p_out3_div",
- clk_base + PLLP_OUTB, 1, 0,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out3", NULL);
- clks[pll_p_out3] = clk;
-
- /* PLLP_OUT4 */
- clk = tegra_clk_register_divider("pll_p_out4_div", "pll_p",
- clk_base + PLLP_OUTB, 0, TEGRA_DIVIDER_FIXED |
- TEGRA_DIVIDER_ROUND_UP, 24, 8, 1,
- &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out4", "pll_p_out4_div",
- clk_base + PLLP_OUTB, 17, 16,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out4", NULL);
- clks[pll_p_out4] = clk;
+ clk = tegra_clk_register_pllc("pll_c3", "pll_ref", clk_base, pmc, 0,
+ &pll_c3_params, NULL);
+ clks[TEGRA114_CLK_PLL_C3] = clk;
/* PLLM */
- _clip_vco_min(&pll_m_params);
clk = tegra_clk_register_pllm("pll_m", "pll_ref", clk_base, pmc,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, 0,
- &pll_m_params, TEGRA_PLL_USE_LOCK,
- pll_m_freq_table, NULL);
- clk_register_clkdev(clk, "pll_m", NULL);
- clks[pll_m] = clk;
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+ &pll_m_params, NULL);
+ clks[TEGRA114_CLK_PLL_M] = clk;
/* PLLM_OUT1 */
clk = tegra_clk_register_divider("pll_m_out1_div", "pll_m",
@@ -1378,41 +1104,20 @@ static void __init tegra114_pll_init(void __iomem *clk_base,
clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
CLK_SET_RATE_PARENT, 0, NULL);
- clk_register_clkdev(clk, "pll_m_out1", NULL);
- clks[pll_m_out1] = clk;
+ clks[TEGRA114_CLK_PLL_M_OUT1] = clk;
/* PLLM_UD */
clk = clk_register_fixed_factor(NULL, "pll_m_ud", "pll_m",
CLK_SET_RATE_PARENT, 1, 1);
- /* PLLX */
- _clip_vco_min(&pll_x_params);
- if (_setup_dynamic_ramp(&pll_x_params, clk_base) >= 0) {
- _init_iddq(&pll_x_params, clk_base);
- clk = tegra_clk_register_pllxc("pll_x", "pll_ref", clk_base,
- pmc, CLK_IGNORE_UNUSED, 0, &pll_x_params,
- TEGRA_PLL_USE_LOCK, pll_x_freq_table, NULL);
- clk_register_clkdev(clk, "pll_x", NULL);
- clks[pll_x] = clk;
- }
-
- /* PLLX_OUT0 */
- clk = clk_register_fixed_factor(NULL, "pll_x_out0", "pll_x",
- CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "pll_x_out0", NULL);
- clks[pll_x_out0] = clk;
-
/* PLLU */
val = readl(clk_base + pll_u_params.base_reg);
val &= ~BIT(24); /* disable PLLU_OVERRIDE */
writel(val, clk_base + pll_u_params.base_reg);
clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc, 0,
- 0, &pll_u_params, TEGRA_PLLU |
- TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
- TEGRA_PLL_USE_LOCK, pll_u_freq_table, &pll_u_lock);
- clk_register_clkdev(clk, "pll_u", NULL);
- clks[pll_u] = clk;
+ &pll_u_params, &pll_u_lock);
+ clks[TEGRA114_CLK_PLL_U] = clk;
tegra114_utmi_param_configure(clk_base);
@@ -1420,731 +1125,97 @@ static void __init tegra114_pll_init(void __iomem *clk_base,
clk = clk_register_gate(NULL, "pll_u_480M", "pll_u",
CLK_SET_RATE_PARENT, clk_base + PLLU_BASE,
22, 0, &pll_u_lock);
- clk_register_clkdev(clk, "pll_u_480M", NULL);
- clks[pll_u_480M] = clk;
+ clks[TEGRA114_CLK_PLL_U_480M] = clk;
/* PLLU_60M */
clk = clk_register_fixed_factor(NULL, "pll_u_60M", "pll_u",
CLK_SET_RATE_PARENT, 1, 8);
- clk_register_clkdev(clk, "pll_u_60M", NULL);
- clks[pll_u_60M] = clk;
+ clks[TEGRA114_CLK_PLL_U_60M] = clk;
/* PLLU_48M */
clk = clk_register_fixed_factor(NULL, "pll_u_48M", "pll_u",
CLK_SET_RATE_PARENT, 1, 10);
- clk_register_clkdev(clk, "pll_u_48M", NULL);
- clks[pll_u_48M] = clk;
+ clks[TEGRA114_CLK_PLL_U_48M] = clk;
/* PLLU_12M */
clk = clk_register_fixed_factor(NULL, "pll_u_12M", "pll_u",
CLK_SET_RATE_PARENT, 1, 40);
- clk_register_clkdev(clk, "pll_u_12M", NULL);
- clks[pll_u_12M] = clk;
+ clks[TEGRA114_CLK_PLL_U_12M] = clk;
/* PLLD */
clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, pmc, 0,
- 0, &pll_d_params,
- TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
- TEGRA_PLL_USE_LOCK, pll_d_freq_table, &pll_d_lock);
- clk_register_clkdev(clk, "pll_d", NULL);
- clks[pll_d] = clk;
+ &pll_d_params, &pll_d_lock);
+ clks[TEGRA114_CLK_PLL_D] = clk;
/* PLLD_OUT0 */
clk = clk_register_fixed_factor(NULL, "pll_d_out0", "pll_d",
CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "pll_d_out0", NULL);
- clks[pll_d_out0] = clk;
+ clks[TEGRA114_CLK_PLL_D_OUT0] = clk;
/* PLLD2 */
clk = tegra_clk_register_pll("pll_d2", "pll_ref", clk_base, pmc, 0,
- 0, &pll_d2_params,
- TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
- TEGRA_PLL_USE_LOCK, pll_d_freq_table, &pll_d2_lock);
- clk_register_clkdev(clk, "pll_d2", NULL);
- clks[pll_d2] = clk;
+ &pll_d2_params, &pll_d2_lock);
+ clks[TEGRA114_CLK_PLL_D2] = clk;
/* PLLD2_OUT0 */
clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2",
CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "pll_d2_out0", NULL);
- clks[pll_d2_out0] = clk;
-
- /* PLLA */
- clk = tegra_clk_register_pll("pll_a", "pll_p_out1", clk_base, pmc, 0,
- 0, &pll_a_params, TEGRA_PLL_HAS_CPCON |
- TEGRA_PLL_USE_LOCK, pll_a_freq_table, NULL);
- clk_register_clkdev(clk, "pll_a", NULL);
- clks[pll_a] = clk;
-
- /* PLLA_OUT0 */
- clk = tegra_clk_register_divider("pll_a_out0_div", "pll_a",
- clk_base + PLLA_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
- 8, 8, 1, NULL);
- clk = tegra_clk_register_pll_out("pll_a_out0", "pll_a_out0_div",
- clk_base + PLLA_OUT, 1, 0, CLK_IGNORE_UNUSED |
- CLK_SET_RATE_PARENT, 0, NULL);
- clk_register_clkdev(clk, "pll_a_out0", NULL);
- clks[pll_a_out0] = clk;
+ clks[TEGRA114_CLK_PLL_D2_OUT0] = clk;
/* PLLRE */
- _clip_vco_min(&pll_re_vco_params);
clk = tegra_clk_register_pllre("pll_re_vco", "pll_ref", clk_base, pmc,
- 0, 0, &pll_re_vco_params, TEGRA_PLL_USE_LOCK,
- NULL, &pll_re_lock, pll_ref_freq);
- clk_register_clkdev(clk, "pll_re_vco", NULL);
- clks[pll_re_vco] = clk;
+ 0, &pll_re_vco_params, &pll_re_lock, pll_ref_freq);
+ clks[TEGRA114_CLK_PLL_RE_VCO] = clk;
clk = clk_register_divider_table(NULL, "pll_re_out", "pll_re_vco", 0,
clk_base + PLLRE_BASE, 16, 4, 0,
pll_re_div_table, &pll_re_lock);
- clk_register_clkdev(clk, "pll_re_out", NULL);
- clks[pll_re_out] = clk;
+ clks[TEGRA114_CLK_PLL_RE_OUT] = clk;
/* PLLE */
- clk = tegra_clk_register_plle_tegra114("pll_e_out0", "pll_re_vco",
- clk_base, 0, 100000000, &pll_e_params,
- pll_e_freq_table, NULL);
- clk_register_clkdev(clk, "pll_e_out0", NULL);
- clks[pll_e_out0] = clk;
-}
-
-static const char *mux_audio_sync_clk[] = { "spdif_in_sync", "i2s0_sync",
- "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "vimclk_sync",
-};
-
-static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern1",
-};
-
-static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern2",
-};
-
-static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern3",
-};
-
-static void __init tegra114_audio_clk_init(void __iomem *clk_base)
-{
- struct clk *clk;
-
- /* spdif_in_sync */
- clk = tegra_clk_register_sync_source("spdif_in_sync", 24000000,
- 24000000);
- clk_register_clkdev(clk, "spdif_in_sync", NULL);
- clks[spdif_in_sync] = clk;
-
- /* i2s0_sync */
- clk = tegra_clk_register_sync_source("i2s0_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s0_sync", NULL);
- clks[i2s0_sync] = clk;
-
- /* i2s1_sync */
- clk = tegra_clk_register_sync_source("i2s1_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s1_sync", NULL);
- clks[i2s1_sync] = clk;
-
- /* i2s2_sync */
- clk = tegra_clk_register_sync_source("i2s2_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s2_sync", NULL);
- clks[i2s2_sync] = clk;
-
- /* i2s3_sync */
- clk = tegra_clk_register_sync_source("i2s3_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s3_sync", NULL);
- clks[i2s3_sync] = clk;
-
- /* i2s4_sync */
- clk = tegra_clk_register_sync_source("i2s4_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s4_sync", NULL);
- clks[i2s4_sync] = clk;
-
- /* vimclk_sync */
- clk = tegra_clk_register_sync_source("vimclk_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "vimclk_sync", NULL);
- clks[vimclk_sync] = clk;
-
- /* audio0 */
- clk = clk_register_mux(NULL, "audio0_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S0, 0, 3, 0,
- NULL);
- clks[audio0_mux] = clk;
- clk = clk_register_gate(NULL, "audio0", "audio0_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S0, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio0", NULL);
- clks[audio0] = clk;
-
- /* audio1 */
- clk = clk_register_mux(NULL, "audio1_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S1, 0, 3, 0,
- NULL);
- clks[audio1_mux] = clk;
- clk = clk_register_gate(NULL, "audio1", "audio1_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S1, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio1", NULL);
- clks[audio1] = clk;
-
- /* audio2 */
- clk = clk_register_mux(NULL, "audio2_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S2, 0, 3, 0,
- NULL);
- clks[audio2_mux] = clk;
- clk = clk_register_gate(NULL, "audio2", "audio2_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S2, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio2", NULL);
- clks[audio2] = clk;
-
- /* audio3 */
- clk = clk_register_mux(NULL, "audio3_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S3, 0, 3, 0,
- NULL);
- clks[audio3_mux] = clk;
- clk = clk_register_gate(NULL, "audio3", "audio3_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S3, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio3", NULL);
- clks[audio3] = clk;
-
- /* audio4 */
- clk = clk_register_mux(NULL, "audio4_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S4, 0, 3, 0,
- NULL);
- clks[audio4_mux] = clk;
- clk = clk_register_gate(NULL, "audio4", "audio4_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S4, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio4", NULL);
- clks[audio4] = clk;
-
- /* spdif */
- clk = clk_register_mux(NULL, "spdif_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_SPDIF, 0, 3, 0,
- NULL);
- clks[spdif_mux] = clk;
- clk = clk_register_gate(NULL, "spdif", "spdif_mux", 0,
- clk_base + AUDIO_SYNC_CLK_SPDIF, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "spdif", NULL);
- clks[spdif] = clk;
-
- /* audio0_2x */
- clk = clk_register_fixed_factor(NULL, "audio0_doubler", "audio0",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio0_div", "audio0_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 24, 1,
- 0, &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio0_2x", "audio0_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 113, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio0_2x", NULL);
- clks[audio0_2x] = clk;
-
- /* audio1_2x */
- clk = clk_register_fixed_factor(NULL, "audio1_doubler", "audio1",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio1_div", "audio1_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 25, 1,
- 0, &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio1_2x", "audio1_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 114, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio1_2x", NULL);
- clks[audio1_2x] = clk;
-
- /* audio2_2x */
- clk = clk_register_fixed_factor(NULL, "audio2_doubler", "audio2",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio2_div", "audio2_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 26, 1,
- 0, &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio2_2x", "audio2_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 115, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio2_2x", NULL);
- clks[audio2_2x] = clk;
-
- /* audio3_2x */
- clk = clk_register_fixed_factor(NULL, "audio3_doubler", "audio3",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio3_div", "audio3_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 27, 1,
- 0, &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio3_2x", "audio3_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 116, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio3_2x", NULL);
- clks[audio3_2x] = clk;
-
- /* audio4_2x */
- clk = clk_register_fixed_factor(NULL, "audio4_doubler", "audio4",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio4_div", "audio4_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 28, 1,
- 0, &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio4_2x", "audio4_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 117, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio4_2x", NULL);
- clks[audio4_2x] = clk;
-
- /* spdif_2x */
- clk = clk_register_fixed_factor(NULL, "spdif_doubler", "spdif",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("spdif_div", "spdif_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 29, 1,
- 0, &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("spdif_2x", "spdif_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 118,
- &periph_v_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "spdif_2x", NULL);
- clks[spdif_2x] = clk;
-}
-
-static void __init tegra114_pmc_clk_init(void __iomem *pmc_base)
-{
- struct clk *clk;
-
- /* clk_out_1 */
- clk = clk_register_mux(NULL, "clk_out_1_mux", clk_out1_parents,
- ARRAY_SIZE(clk_out1_parents),
- CLK_SET_RATE_NO_REPARENT,
- pmc_base + PMC_CLK_OUT_CNTRL, 6, 3, 0,
- &clk_out_lock);
- clks[clk_out_1_mux] = clk;
- clk = clk_register_gate(NULL, "clk_out_1", "clk_out_1_mux", 0,
- pmc_base + PMC_CLK_OUT_CNTRL, 2, 0,
- &clk_out_lock);
- clk_register_clkdev(clk, "extern1", "clk_out_1");
- clks[clk_out_1] = clk;
-
- /* clk_out_2 */
- clk = clk_register_mux(NULL, "clk_out_2_mux", clk_out2_parents,
- ARRAY_SIZE(clk_out2_parents),
- CLK_SET_RATE_NO_REPARENT,
- pmc_base + PMC_CLK_OUT_CNTRL, 14, 3, 0,
- &clk_out_lock);
- clks[clk_out_2_mux] = clk;
- clk = clk_register_gate(NULL, "clk_out_2", "clk_out_2_mux", 0,
- pmc_base + PMC_CLK_OUT_CNTRL, 10, 0,
- &clk_out_lock);
- clk_register_clkdev(clk, "extern2", "clk_out_2");
- clks[clk_out_2] = clk;
-
- /* clk_out_3 */
- clk = clk_register_mux(NULL, "clk_out_3_mux", clk_out3_parents,
- ARRAY_SIZE(clk_out3_parents),
- CLK_SET_RATE_NO_REPARENT,
- pmc_base + PMC_CLK_OUT_CNTRL, 22, 3, 0,
- &clk_out_lock);
- clks[clk_out_3_mux] = clk;
- clk = clk_register_gate(NULL, "clk_out_3", "clk_out_3_mux", 0,
- pmc_base + PMC_CLK_OUT_CNTRL, 18, 0,
- &clk_out_lock);
- clk_register_clkdev(clk, "extern3", "clk_out_3");
- clks[clk_out_3] = clk;
-
- /* blink */
- /* clear the blink timer register to directly output clk_32k */
- writel_relaxed(0, pmc_base + PMC_BLINK_TIMER);
- clk = clk_register_gate(NULL, "blink_override", "clk_32k", 0,
- pmc_base + PMC_DPD_PADS_ORIDE,
- PMC_DPD_PADS_ORIDE_BLINK_ENB, 0, NULL);
- clk = clk_register_gate(NULL, "blink", "blink_override", 0,
- pmc_base + PMC_CTRL,
- PMC_CTRL_BLINK_ENB, 0, NULL);
- clk_register_clkdev(clk, "blink", NULL);
- clks[blink] = clk;
-
+ clk = tegra_clk_register_plle_tegra114("pll_e_out0", "pll_ref",
+ clk_base, 0, &pll_e_params, NULL);
+ clks[TEGRA114_CLK_PLL_E_OUT0] = clk;
}
-static const char *sclk_parents[] = { "clk_m", "pll_c_out1", "pll_p_out4",
- "pll_p", "pll_p_out2", "unused",
- "clk_32k", "pll_m_out1" };
-
-static const char *cclk_g_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
- "pll_p", "pll_p_out4", "unused",
- "unused", "pll_x" };
-
-static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
- "pll_p", "pll_p_out4", "unused",
- "unused", "pll_x", "pll_x_out0" };
-
-static void __init tegra114_super_clk_init(void __iomem *clk_base)
+static __init void tegra114_periph_clk_init(void __iomem *clk_base,
+ void __iomem *pmc_base)
{
struct clk *clk;
+ u32 val;
- /* CCLKG */
- clk = tegra_clk_register_super_mux("cclk_g", cclk_g_parents,
- ARRAY_SIZE(cclk_g_parents),
- CLK_SET_RATE_PARENT,
- clk_base + CCLKG_BURST_POLICY,
- 0, 4, 0, 0, NULL);
- clk_register_clkdev(clk, "cclk_g", NULL);
- clks[cclk_g] = clk;
-
- /* CCLKLP */
- clk = tegra_clk_register_super_mux("cclk_lp", cclk_lp_parents,
- ARRAY_SIZE(cclk_lp_parents),
- CLK_SET_RATE_PARENT,
- clk_base + CCLKLP_BURST_POLICY,
- 0, 4, 8, 9, NULL);
- clk_register_clkdev(clk, "cclk_lp", NULL);
- clks[cclk_lp] = clk;
-
- /* SCLK */
- clk = tegra_clk_register_super_mux("sclk", sclk_parents,
- ARRAY_SIZE(sclk_parents),
- CLK_SET_RATE_PARENT,
- clk_base + SCLK_BURST_POLICY,
- 0, 4, 0, 0, NULL);
- clk_register_clkdev(clk, "sclk", NULL);
- clks[sclk] = clk;
-
- /* HCLK */
- clk = clk_register_divider(NULL, "hclk_div", "sclk", 0,
- clk_base + SYSTEM_CLK_RATE, 4, 2, 0,
- &sysrate_lock);
- clk = clk_register_gate(NULL, "hclk", "hclk_div", CLK_SET_RATE_PARENT |
- CLK_IGNORE_UNUSED, clk_base + SYSTEM_CLK_RATE,
- 7, CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
- clk_register_clkdev(clk, "hclk", NULL);
- clks[hclk] = clk;
-
- /* PCLK */
- clk = clk_register_divider(NULL, "pclk_div", "hclk", 0,
- clk_base + SYSTEM_CLK_RATE, 0, 2, 0,
- &sysrate_lock);
- clk = clk_register_gate(NULL, "pclk", "pclk_div", CLK_SET_RATE_PARENT |
- CLK_IGNORE_UNUSED, clk_base + SYSTEM_CLK_RATE,
- 3, CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
- clk_register_clkdev(clk, "pclk", NULL);
- clks[pclk] = clk;
-}
-
-static struct tegra_periph_init_data tegra_periph_clk_list[] = {
- TEGRA_INIT_DATA_MUX("i2s0", NULL, "tegra30-i2s.0", mux_pllaout0_audio0_2x_pllp_clkm, CLK_SOURCE_I2S0, 30, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s0),
- TEGRA_INIT_DATA_MUX("i2s1", NULL, "tegra30-i2s.1", mux_pllaout0_audio1_2x_pllp_clkm, CLK_SOURCE_I2S1, 11, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s1),
- TEGRA_INIT_DATA_MUX("i2s2", NULL, "tegra30-i2s.2", mux_pllaout0_audio2_2x_pllp_clkm, CLK_SOURCE_I2S2, 18, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s2),
- TEGRA_INIT_DATA_MUX("i2s3", NULL, "tegra30-i2s.3", mux_pllaout0_audio3_2x_pllp_clkm, CLK_SOURCE_I2S3, 101, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2s3),
- TEGRA_INIT_DATA_MUX("i2s4", NULL, "tegra30-i2s.4", mux_pllaout0_audio4_2x_pllp_clkm, CLK_SOURCE_I2S4, 102, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2s4),
- TEGRA_INIT_DATA_MUX("spdif_out", "spdif_out", "tegra30-spdif", mux_pllaout0_audio_2x_pllp_clkm, CLK_SOURCE_SPDIF_OUT, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_out),
- TEGRA_INIT_DATA_MUX("spdif_in", "spdif_in", "tegra30-spdif", mux_pllp_pllc_pllm, CLK_SOURCE_SPDIF_IN, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_in),
- TEGRA_INIT_DATA_MUX("pwm", NULL, "pwm", mux_pllp_pllc_clk32_clkm, CLK_SOURCE_PWM, 17, &periph_l_regs, TEGRA_PERIPH_ON_APB, pwm),
- TEGRA_INIT_DATA_MUX("adx", NULL, "adx", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX, 154, &periph_w_regs, TEGRA_PERIPH_ON_APB, adx),
- TEGRA_INIT_DATA_MUX("amx", NULL, "amx", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX, 153, &periph_w_regs, TEGRA_PERIPH_ON_APB, amx),
- TEGRA_INIT_DATA_MUX("hda", "hda", "tegra30-hda", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA, 125, &periph_v_regs, TEGRA_PERIPH_ON_APB, hda),
- TEGRA_INIT_DATA_MUX("hda2codec_2x", "hda2codec", "tegra30-hda", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, &periph_v_regs, TEGRA_PERIPH_ON_APB, hda2codec_2x),
- TEGRA_INIT_DATA_MUX("sbc1", NULL, "tegra11-spi.0", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC1, 41, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc1),
- TEGRA_INIT_DATA_MUX("sbc2", NULL, "tegra11-spi.1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC2, 44, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc2),
- TEGRA_INIT_DATA_MUX("sbc3", NULL, "tegra11-spi.2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC3, 46, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc3),
- TEGRA_INIT_DATA_MUX("sbc4", NULL, "tegra11-spi.3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC4, 68, &periph_u_regs, TEGRA_PERIPH_ON_APB, sbc4),
- TEGRA_INIT_DATA_MUX("sbc5", NULL, "tegra11-spi.4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC5, 104, &periph_v_regs, TEGRA_PERIPH_ON_APB, sbc5),
- TEGRA_INIT_DATA_MUX("sbc6", NULL, "tegra11-spi.5", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC6, 105, &periph_v_regs, TEGRA_PERIPH_ON_APB, sbc6),
- TEGRA_INIT_DATA_MUX8("ndflash", NULL, "tegra_nand", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_NDFLASH, 13, &periph_u_regs, TEGRA_PERIPH_ON_APB, ndspeed),
- TEGRA_INIT_DATA_MUX8("ndspeed", NULL, "tegra_nand_speed", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_NDSPEED, 80, &periph_u_regs, TEGRA_PERIPH_ON_APB, ndspeed),
- TEGRA_INIT_DATA_MUX("vfir", NULL, "vfir", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_VFIR, 7, &periph_l_regs, TEGRA_PERIPH_ON_APB, vfir),
- TEGRA_INIT_DATA_MUX("sdmmc1", NULL, "sdhci-tegra.0", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, &periph_l_regs, 0, sdmmc1),
- TEGRA_INIT_DATA_MUX("sdmmc2", NULL, "sdhci-tegra.1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, &periph_l_regs, 0, sdmmc2),
- TEGRA_INIT_DATA_MUX("sdmmc3", NULL, "sdhci-tegra.2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, &periph_u_regs, 0, sdmmc3),
- TEGRA_INIT_DATA_MUX("sdmmc4", NULL, "sdhci-tegra.3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, &periph_l_regs, 0, sdmmc4),
- TEGRA_INIT_DATA_INT("vde", NULL, "vde", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_VDE, 61, &periph_h_regs, 0, vde),
- TEGRA_INIT_DATA_MUX_FLAGS("csite", NULL, "csite", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_CSITE, 73, &periph_u_regs, TEGRA_PERIPH_ON_APB, csite, CLK_IGNORE_UNUSED),
- TEGRA_INIT_DATA_MUX("la", NULL, "la", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_LA, 76, &periph_u_regs, TEGRA_PERIPH_ON_APB, la),
- TEGRA_INIT_DATA_MUX("trace", NULL, "trace", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_TRACE, 77, &periph_u_regs, TEGRA_PERIPH_ON_APB, trace),
- TEGRA_INIT_DATA_MUX("owr", NULL, "tegra_w1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_OWR, 71, &periph_u_regs, TEGRA_PERIPH_ON_APB, owr),
- TEGRA_INIT_DATA_MUX("nor", NULL, "tegra-nor", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_NOR, 42, &periph_h_regs, 0, nor),
- TEGRA_INIT_DATA_MUX("mipi", NULL, "mipi", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_MIPI, 50, &periph_h_regs, TEGRA_PERIPH_ON_APB, mipi),
- TEGRA_INIT_DATA_I2C("i2c1", "div-clk", "tegra11-i2c.0", mux_pllp_clkm, CLK_SOURCE_I2C1, 12, &periph_l_regs, i2c1),
- TEGRA_INIT_DATA_I2C("i2c2", "div-clk", "tegra11-i2c.1", mux_pllp_clkm, CLK_SOURCE_I2C2, 54, &periph_h_regs, i2c2),
- TEGRA_INIT_DATA_I2C("i2c3", "div-clk", "tegra11-i2c.2", mux_pllp_clkm, CLK_SOURCE_I2C3, 67, &periph_u_regs, i2c3),
- TEGRA_INIT_DATA_I2C("i2c4", "div-clk", "tegra11-i2c.3", mux_pllp_clkm, CLK_SOURCE_I2C4, 103, &periph_v_regs, i2c4),
- TEGRA_INIT_DATA_I2C("i2c5", "div-clk", "tegra11-i2c.4", mux_pllp_clkm, CLK_SOURCE_I2C5, 47, &periph_h_regs, i2c5),
- TEGRA_INIT_DATA_UART("uarta", NULL, "tegra_uart.0", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTA, 6, &periph_l_regs, uarta),
- TEGRA_INIT_DATA_UART("uartb", NULL, "tegra_uart.1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, &periph_l_regs, uartb),
- TEGRA_INIT_DATA_UART("uartc", NULL, "tegra_uart.2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, &periph_h_regs, uartc),
- TEGRA_INIT_DATA_UART("uartd", NULL, "tegra_uart.3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTD, 65, &periph_u_regs, uartd),
- TEGRA_INIT_DATA_INT("3d", NULL, "3d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_3D, 24, &periph_l_regs, 0, gr_3d),
- TEGRA_INIT_DATA_INT("2d", NULL, "2d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_2D, 21, &periph_l_regs, 0, gr_2d),
- TEGRA_INIT_DATA_MUX("vi_sensor", "vi_sensor", "tegra_camera", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR, 20, &periph_l_regs, TEGRA_PERIPH_NO_RESET, vi_sensor),
- TEGRA_INIT_DATA_INT8("vi", "vi", "tegra_camera", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI, 20, &periph_l_regs, 0, vi),
- TEGRA_INIT_DATA_INT8("epp", NULL, "epp", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_EPP, 19, &periph_l_regs, 0, epp),
- TEGRA_INIT_DATA_INT8("msenc", NULL, "msenc", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_MSENC, 91, &periph_u_regs, TEGRA_PERIPH_WAR_1005168, msenc),
- TEGRA_INIT_DATA_INT8("tsec", NULL, "tsec", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_TSEC, 83, &periph_u_regs, 0, tsec),
- TEGRA_INIT_DATA_INT8("host1x", NULL, "host1x", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_HOST1X, 28, &periph_l_regs, 0, host1x),
- TEGRA_INIT_DATA_MUX8("hdmi", NULL, "hdmi", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_HDMI, 51, &periph_h_regs, 0, hdmi),
- TEGRA_INIT_DATA_MUX("cilab", "cilab", "tegra_camera", mux_pllp_pllc_clkm, CLK_SOURCE_CILAB, 144, &periph_w_regs, 0, cilab),
- TEGRA_INIT_DATA_MUX("cilcd", "cilcd", "tegra_camera", mux_pllp_pllc_clkm, CLK_SOURCE_CILCD, 145, &periph_w_regs, 0, cilcd),
- TEGRA_INIT_DATA_MUX("cile", "cile", "tegra_camera", mux_pllp_pllc_clkm, CLK_SOURCE_CILE, 146, &periph_w_regs, 0, cile),
- TEGRA_INIT_DATA_MUX("dsialp", "dsialp", "tegradc.0", mux_pllp_pllc_clkm, CLK_SOURCE_DSIALP, 147, &periph_w_regs, 0, dsialp),
- TEGRA_INIT_DATA_MUX("dsiblp", "dsiblp", "tegradc.1", mux_pllp_pllc_clkm, CLK_SOURCE_DSIBLP, 148, &periph_w_regs, 0, dsiblp),
- TEGRA_INIT_DATA_MUX("tsensor", NULL, "tegra-tsensor", mux_pllp_pllc_clkm_clk32, CLK_SOURCE_TSENSOR, 100, &periph_v_regs, TEGRA_PERIPH_ON_APB, tsensor),
- TEGRA_INIT_DATA_MUX("actmon", NULL, "actmon", mux_pllp_pllc_clk32_clkm, CLK_SOURCE_ACTMON, 119, &periph_v_regs, 0, actmon),
- TEGRA_INIT_DATA_MUX8("extern1", NULL, "extern1", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN1, 120, &periph_v_regs, 0, extern1),
- TEGRA_INIT_DATA_MUX8("extern2", NULL, "extern2", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN2, 121, &periph_v_regs, 0, extern2),
- TEGRA_INIT_DATA_MUX8("extern3", NULL, "extern3", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN3, 122, &periph_v_regs, 0, extern3),
- TEGRA_INIT_DATA_MUX("i2cslow", NULL, "i2cslow", mux_pllp_pllc_clk32_clkm, CLK_SOURCE_I2CSLOW, 81, &periph_u_regs, TEGRA_PERIPH_ON_APB, i2cslow),
- TEGRA_INIT_DATA_INT8("se", NULL, "se", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SE, 127, &periph_v_regs, TEGRA_PERIPH_ON_APB, se),
- TEGRA_INIT_DATA_INT_FLAGS("mselect", NULL, "mselect", mux_pllp_clkm, CLK_SOURCE_MSELECT, 99, &periph_v_regs, 0, mselect, CLK_IGNORE_UNUSED),
- TEGRA_INIT_DATA_MUX("dfll_ref", "ref", "t114_dfll", mux_pllp_clkm, CLK_SOURCE_DFLL_REF, 155, &periph_w_regs, TEGRA_PERIPH_ON_APB, dfll_ref),
- TEGRA_INIT_DATA_MUX("dfll_soc", "soc", "t114_dfll", mux_pllp_clkm, CLK_SOURCE_DFLL_SOC, 155, &periph_w_regs, TEGRA_PERIPH_ON_APB, dfll_soc),
- TEGRA_INIT_DATA_MUX8("soc_therm", NULL, "soc_therm", mux_pllm_pllc_pllp_plla, CLK_SOURCE_SOC_THERM, 78, &periph_u_regs, TEGRA_PERIPH_ON_APB, soc_therm),
- TEGRA_INIT_DATA_XUSB("xusb_host_src", "host_src", "tegra_xhci", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_HOST_SRC, 143, &periph_w_regs, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, xusb_host_src),
- TEGRA_INIT_DATA_XUSB("xusb_falcon_src", "falcon_src", "tegra_xhci", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_FALCON_SRC, 143, &periph_w_regs, TEGRA_PERIPH_NO_RESET, xusb_falcon_src),
- TEGRA_INIT_DATA_XUSB("xusb_fs_src", "fs_src", "tegra_xhci", mux_clkm_48M_pllp_480M, CLK_SOURCE_XUSB_FS_SRC, 143, &periph_w_regs, TEGRA_PERIPH_NO_RESET, xusb_fs_src),
- TEGRA_INIT_DATA_XUSB("xusb_ss_src", "ss_src", "tegra_xhci", mux_clkm_pllre_clk32_480M_pllc_ref, CLK_SOURCE_XUSB_SS_SRC, 143, &periph_w_regs, TEGRA_PERIPH_NO_RESET, xusb_ss_src),
- TEGRA_INIT_DATA_XUSB("xusb_dev_src", "dev_src", "tegra_xhci", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, &periph_u_regs, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, xusb_dev_src),
- TEGRA_INIT_DATA_AUDIO("d_audio", "d_audio", "tegra30-ahub", CLK_SOURCE_D_AUDIO, 106, &periph_v_regs, TEGRA_PERIPH_ON_APB, d_audio),
- TEGRA_INIT_DATA_AUDIO("dam0", NULL, "tegra30-dam.0", CLK_SOURCE_DAM0, 108, &periph_v_regs, TEGRA_PERIPH_ON_APB, dam0),
- TEGRA_INIT_DATA_AUDIO("dam1", NULL, "tegra30-dam.1", CLK_SOURCE_DAM1, 109, &periph_v_regs, TEGRA_PERIPH_ON_APB, dam1),
- TEGRA_INIT_DATA_AUDIO("dam2", NULL, "tegra30-dam.2", CLK_SOURCE_DAM2, 110, &periph_v_regs, TEGRA_PERIPH_ON_APB, dam2),
-};
-
-static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = {
- TEGRA_INIT_DATA_NODIV("disp1", NULL, "tegradc.0", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_DISP1, 29, 7, 27, &periph_l_regs, 0, disp1),
- TEGRA_INIT_DATA_NODIV("disp2", NULL, "tegradc.1", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_DISP2, 29, 7, 26, &periph_l_regs, 0, disp2),
-};
+ /* xusb_hs_src */
+ val = readl(clk_base + CLK_SOURCE_XUSB_SS_SRC);
+ val |= BIT(25); /* always select PLLU_60M */
+ writel(val, clk_base + CLK_SOURCE_XUSB_SS_SRC);
-static __init void tegra114_periph_clk_init(void __iomem *clk_base)
-{
- struct tegra_periph_init_data *data;
- struct clk *clk;
- int i;
- u32 val;
+ clk = clk_register_fixed_factor(NULL, "xusb_hs_src", "pll_u_60M", 0,
+ 1, 1);
+ clks[TEGRA114_CLK_XUSB_HS_SRC] = clk;
- /* apbdma */
- clk = tegra_clk_register_periph_gate("apbdma", "clk_m", 0, clk_base,
- 0, 34, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[apbdma] = clk;
-
- /* rtc */
- clk = tegra_clk_register_periph_gate("rtc", "clk_32k",
- TEGRA_PERIPH_ON_APB |
- TEGRA_PERIPH_NO_RESET, clk_base,
- 0, 4, &periph_l_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "rtc-tegra");
- clks[rtc] = clk;
-
- /* kbc */
- clk = tegra_clk_register_periph_gate("kbc", "clk_32k",
- TEGRA_PERIPH_ON_APB |
- TEGRA_PERIPH_NO_RESET, clk_base,
- 0, 36, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[kbc] = clk;
-
- /* timer */
- clk = tegra_clk_register_periph_gate("timer", "clk_m", 0, clk_base,
- 0, 5, &periph_l_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "timer");
- clks[timer] = clk;
-
- /* kfuse */
- clk = tegra_clk_register_periph_gate("kfuse", "clk_m",
- TEGRA_PERIPH_ON_APB, clk_base, 0, 40,
- &periph_h_regs, periph_clk_enb_refcnt);
- clks[kfuse] = clk;
-
- /* fuse */
- clk = tegra_clk_register_periph_gate("fuse", "clk_m",
- TEGRA_PERIPH_ON_APB, clk_base, 0, 39,
- &periph_h_regs, periph_clk_enb_refcnt);
- clks[fuse] = clk;
-
- /* fuse_burn */
- clk = tegra_clk_register_periph_gate("fuse_burn", "clk_m",
- TEGRA_PERIPH_ON_APB, clk_base, 0, 39,
- &periph_h_regs, periph_clk_enb_refcnt);
- clks[fuse_burn] = clk;
-
- /* apbif */
- clk = tegra_clk_register_periph_gate("apbif", "clk_m",
- TEGRA_PERIPH_ON_APB, clk_base, 0, 107,
- &periph_v_regs, periph_clk_enb_refcnt);
- clks[apbif] = clk;
-
- /* hda2hdmi */
- clk = tegra_clk_register_periph_gate("hda2hdmi", "clk_m",
- TEGRA_PERIPH_ON_APB, clk_base, 0, 128,
- &periph_w_regs, periph_clk_enb_refcnt);
- clks[hda2hdmi] = clk;
-
- /* vcp */
- clk = tegra_clk_register_periph_gate("vcp", "clk_m", 0, clk_base, 0,
- 29, &periph_l_regs,
- periph_clk_enb_refcnt);
- clks[vcp] = clk;
-
- /* bsea */
- clk = tegra_clk_register_periph_gate("bsea", "clk_m", 0, clk_base,
- 0, 62, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[bsea] = clk;
-
- /* bsev */
- clk = tegra_clk_register_periph_gate("bsev", "clk_m", 0, clk_base,
- 0, 63, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[bsev] = clk;
-
- /* mipi-cal */
- clk = tegra_clk_register_periph_gate("mipi-cal", "clk_m", 0, clk_base,
- 0, 56, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[mipi_cal] = clk;
-
- /* usbd */
- clk = tegra_clk_register_periph_gate("usbd", "clk_m", 0, clk_base,
- 0, 22, &periph_l_regs,
- periph_clk_enb_refcnt);
- clks[usbd] = clk;
-
- /* usb2 */
- clk = tegra_clk_register_periph_gate("usb2", "clk_m", 0, clk_base,
- 0, 58, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[usb2] = clk;
-
- /* usb3 */
- clk = tegra_clk_register_periph_gate("usb3", "clk_m", 0, clk_base,
- 0, 59, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[usb3] = clk;
-
- /* csi */
- clk = tegra_clk_register_periph_gate("csi", "pll_p_out3", 0, clk_base,
- 0, 52, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[csi] = clk;
-
- /* isp */
- clk = tegra_clk_register_periph_gate("isp", "clk_m", 0, clk_base, 0,
- 23, &periph_l_regs,
- periph_clk_enb_refcnt);
- clks[isp] = clk;
-
- /* csus */
- clk = tegra_clk_register_periph_gate("csus", "clk_m",
- TEGRA_PERIPH_NO_RESET, clk_base, 0, 92,
- &periph_u_regs, periph_clk_enb_refcnt);
- clks[csus] = clk;
-
- /* dds */
- clk = tegra_clk_register_periph_gate("dds", "clk_m",
- TEGRA_PERIPH_ON_APB, clk_base, 0, 150,
- &periph_w_regs, periph_clk_enb_refcnt);
- clks[dds] = clk;
-
- /* dp2 */
- clk = tegra_clk_register_periph_gate("dp2", "clk_m",
- TEGRA_PERIPH_ON_APB, clk_base, 0, 152,
- &periph_w_regs, periph_clk_enb_refcnt);
- clks[dp2] = clk;
-
- /* dtv */
- clk = tegra_clk_register_periph_gate("dtv", "clk_m",
- TEGRA_PERIPH_ON_APB, clk_base, 0, 79,
- &periph_u_regs, periph_clk_enb_refcnt);
- clks[dtv] = clk;
-
- /* dsia */
+ /* dsia mux */
clk = clk_register_mux(NULL, "dsia_mux", mux_plld_out0_plld2_out0,
ARRAY_SIZE(mux_plld_out0_plld2_out0),
CLK_SET_RATE_NO_REPARENT,
clk_base + PLLD_BASE, 25, 1, 0, &pll_d_lock);
- clks[dsia_mux] = clk;
- clk = tegra_clk_register_periph_gate("dsia", "dsia_mux", 0, clk_base,
- 0, 48, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[dsia] = clk;
+ clks[TEGRA114_CLK_DSIA_MUX] = clk;
- /* dsib */
+ /* dsib mux */
clk = clk_register_mux(NULL, "dsib_mux", mux_plld_out0_plld2_out0,
ARRAY_SIZE(mux_plld_out0_plld2_out0),
CLK_SET_RATE_NO_REPARENT,
clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
- clks[dsib_mux] = clk;
- clk = tegra_clk_register_periph_gate("dsib", "dsib_mux", 0, clk_base,
- 0, 82, &periph_u_regs,
- periph_clk_enb_refcnt);
- clks[dsib] = clk;
+ clks[TEGRA114_CLK_DSIB_MUX] = clk;
- /* xusb_hs_src */
- val = readl(clk_base + CLK_SOURCE_XUSB_SS_SRC);
- val |= BIT(25); /* always select PLLU_60M */
- writel(val, clk_base + CLK_SOURCE_XUSB_SS_SRC);
-
- clk = clk_register_fixed_factor(NULL, "xusb_hs_src", "pll_u_60M", 0,
- 1, 1);
- clks[xusb_hs_src] = clk;
-
- /* xusb_host */
- clk = tegra_clk_register_periph_gate("xusb_host", "xusb_host_src", 0,
- clk_base, 0, 89, &periph_u_regs,
- periph_clk_enb_refcnt);
- clks[xusb_host] = clk;
-
- /* xusb_ss */
- clk = tegra_clk_register_periph_gate("xusb_ss", "xusb_ss_src", 0,
- clk_base, 0, 156, &periph_w_regs,
- periph_clk_enb_refcnt);
- clks[xusb_host] = clk;
-
- /* xusb_dev */
- clk = tegra_clk_register_periph_gate("xusb_dev", "xusb_dev_src", 0,
- clk_base, 0, 95, &periph_u_regs,
- periph_clk_enb_refcnt);
- clks[xusb_dev] = clk;
-
- /* emc */
+ /* emc mux */
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
ARRAY_SIZE(mux_pllmcp_clkm),
CLK_SET_RATE_NO_REPARENT,
clk_base + CLK_SOURCE_EMC,
29, 3, 0, NULL);
- clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base,
- CLK_IGNORE_UNUSED, 57, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[emc] = clk;
-
- for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
- data = &tegra_periph_clk_list[i];
- clk = tegra_clk_register_periph(data->name, data->parent_names,
- data->num_parents, &data->periph,
- clk_base, data->offset, data->flags);
- clks[data->clk_id] = clk;
- }
- for (i = 0; i < ARRAY_SIZE(tegra_periph_nodiv_clk_list); i++) {
- data = &tegra_periph_nodiv_clk_list[i];
- clk = tegra_clk_register_periph_nodiv(data->name,
- data->parent_names, data->num_parents,
- &data->periph, clk_base, data->offset);
- clks[data->clk_id] = clk;
- }
+ tegra_periph_clk_init(clk_base, pmc_base, tegra114_clks,
+ &pll_p_params);
}
/* Tegra114 CPU clock and reset control functions */
@@ -2207,28 +1278,37 @@ static const struct of_device_id pmc_match[] __initconst = {
* breaks
*/
static struct tegra_clk_init_table init_table[] __initdata = {
- {uarta, pll_p, 408000000, 0},
- {uartb, pll_p, 408000000, 0},
- {uartc, pll_p, 408000000, 0},
- {uartd, pll_p, 408000000, 0},
- {pll_a, clk_max, 564480000, 1},
- {pll_a_out0, clk_max, 11289600, 1},
- {extern1, pll_a_out0, 0, 1},
- {clk_out_1_mux, extern1, 0, 1},
- {clk_out_1, clk_max, 0, 1},
- {i2s0, pll_a_out0, 11289600, 0},
- {i2s1, pll_a_out0, 11289600, 0},
- {i2s2, pll_a_out0, 11289600, 0},
- {i2s3, pll_a_out0, 11289600, 0},
- {i2s4, pll_a_out0, 11289600, 0},
- {dfll_soc, pll_p, 51000000, 1},
- {dfll_ref, pll_p, 51000000, 1},
- {clk_max, clk_max, 0, 0}, /* This MUST be the last entry. */
+ {TEGRA114_CLK_UARTA, TEGRA114_CLK_PLL_P, 408000000, 0},
+ {TEGRA114_CLK_UARTB, TEGRA114_CLK_PLL_P, 408000000, 0},
+ {TEGRA114_CLK_UARTC, TEGRA114_CLK_PLL_P, 408000000, 0},
+ {TEGRA114_CLK_UARTD, TEGRA114_CLK_PLL_P, 408000000, 0},
+ {TEGRA114_CLK_PLL_A, TEGRA114_CLK_CLK_MAX, 564480000, 1},
+ {TEGRA114_CLK_PLL_A_OUT0, TEGRA114_CLK_CLK_MAX, 11289600, 1},
+ {TEGRA114_CLK_EXTERN1, TEGRA114_CLK_PLL_A_OUT0, 0, 1},
+ {TEGRA114_CLK_CLK_OUT_1_MUX, TEGRA114_CLK_EXTERN1, 0, 1},
+ {TEGRA114_CLK_CLK_OUT_1, TEGRA114_CLK_CLK_MAX, 0, 1},
+ {TEGRA114_CLK_I2S0, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA114_CLK_I2S1, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA114_CLK_I2S2, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA114_CLK_I2S3, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA114_CLK_I2S4, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA114_CLK_HOST1X, TEGRA114_CLK_PLL_P, 136000000, 0},
+ {TEGRA114_CLK_DFLL_SOC, TEGRA114_CLK_PLL_P, 51000000, 1},
+ {TEGRA114_CLK_DFLL_REF, TEGRA114_CLK_PLL_P, 51000000, 1},
+ {TEGRA114_CLK_DISP1, TEGRA114_CLK_PLL_P, 0, 0},
+ {TEGRA114_CLK_DISP2, TEGRA114_CLK_PLL_P, 0, 0},
+ {TEGRA114_CLK_GR2D, TEGRA114_CLK_PLL_C2, 300000000, 0},
+ {TEGRA114_CLK_GR3D, TEGRA114_CLK_PLL_C2, 300000000, 0},
+ {TEGRA114_CLK_DSIALP, TEGRA114_CLK_PLL_P, 68000000, 0},
+ {TEGRA114_CLK_DSIBLP, TEGRA114_CLK_PLL_P, 68000000, 0},
+
+ /* This MUST be the last entry. */
+ {TEGRA114_CLK_CLK_MAX, TEGRA114_CLK_CLK_MAX, 0, 0},
};
static void __init tegra114_clock_apply_init_table(void)
{
- tegra_init_from_table(init_table, clks, clk_max);
+ tegra_init_from_table(init_table, clks, TEGRA114_CLK_CLK_MAX);
}
@@ -2359,7 +1439,6 @@ EXPORT_SYMBOL(tegra114_clock_deassert_dfll_dvco_reset);
static void __init tegra114_clock_init(struct device_node *np)
{
struct device_node *node;
- int i;
clk_base = of_iomap(np, 0);
if (!clk_base) {
@@ -2381,29 +1460,24 @@ static void __init tegra114_clock_init(struct device_node *np)
return;
}
+ clks = tegra_clk_init(clk_base, TEGRA114_CLK_CLK_MAX,
+ TEGRA114_CLK_PERIPH_BANKS);
+ if (!clks)
+ return;
+
if (tegra114_osc_clk_init(clk_base) < 0)
return;
tegra114_fixed_clk_init(clk_base);
tegra114_pll_init(clk_base, pmc_base);
- tegra114_periph_clk_init(clk_base);
- tegra114_audio_clk_init(clk_base);
- tegra114_pmc_clk_init(pmc_base);
- tegra114_super_clk_init(clk_base);
-
- for (i = 0; i < ARRAY_SIZE(clks); i++) {
- if (IS_ERR(clks[i])) {
- pr_err
- ("Tegra114 clk %d: register failed with %ld\n",
- i, PTR_ERR(clks[i]));
- }
- if (!clks[i])
- clks[i] = ERR_PTR(-EINVAL);
- }
-
- clk_data.clks = clks;
- clk_data.clk_num = ARRAY_SIZE(clks);
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ tegra114_periph_clk_init(clk_base, pmc_base);
+ tegra_audio_clk_init(clk_base, pmc_base, tegra114_clks, &pll_a_params);
+ tegra_pmc_clk_init(pmc_base, tegra114_clks);
+ tegra_super_clk_gen4_init(clk_base, pmc_base, tegra114_clks,
+ &pll_x_params);
+
+ tegra_add_of_provider(np);
+ tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
tegra_clk_apply_init_table = tegra114_clock_apply_init_table;
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
new file mode 100644
index 000000000000..aff86b5bc745
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -0,0 +1,1424 @@
+/*
+ * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/clk/tegra.h>
+#include <dt-bindings/clock/tegra124-car.h>
+
+#include "clk.h"
+#include "clk-id.h"
+
+#define CLK_SOURCE_CSITE 0x1d4
+#define CLK_SOURCE_EMC 0x19c
+#define CLK_SOURCE_XUSB_SS_SRC 0x610
+
+#define PLLC_BASE 0x80
+#define PLLC_OUT 0x84
+#define PLLC_MISC2 0x88
+#define PLLC_MISC 0x8c
+#define PLLC2_BASE 0x4e8
+#define PLLC2_MISC 0x4ec
+#define PLLC3_BASE 0x4fc
+#define PLLC3_MISC 0x500
+#define PLLM_BASE 0x90
+#define PLLM_OUT 0x94
+#define PLLM_MISC 0x9c
+#define PLLP_BASE 0xa0
+#define PLLP_MISC 0xac
+#define PLLA_BASE 0xb0
+#define PLLA_MISC 0xbc
+#define PLLD_BASE 0xd0
+#define PLLD_MISC 0xdc
+#define PLLU_BASE 0xc0
+#define PLLU_MISC 0xcc
+#define PLLX_BASE 0xe0
+#define PLLX_MISC 0xe4
+#define PLLX_MISC2 0x514
+#define PLLX_MISC3 0x518
+#define PLLE_BASE 0xe8
+#define PLLE_MISC 0xec
+#define PLLD2_BASE 0x4b8
+#define PLLD2_MISC 0x4bc
+#define PLLE_AUX 0x48c
+#define PLLRE_BASE 0x4c4
+#define PLLRE_MISC 0x4c8
+#define PLLDP_BASE 0x590
+#define PLLDP_MISC 0x594
+#define PLLC4_BASE 0x5a4
+#define PLLC4_MISC 0x5a8
+
+#define PLLC_IDDQ_BIT 26
+#define PLLRE_IDDQ_BIT 16
+#define PLLSS_IDDQ_BIT 19
+
+#define PLL_BASE_LOCK BIT(27)
+#define PLLE_MISC_LOCK BIT(11)
+#define PLLRE_MISC_LOCK BIT(24)
+
+#define PLL_MISC_LOCK_ENABLE 18
+#define PLLC_MISC_LOCK_ENABLE 24
+#define PLLDU_MISC_LOCK_ENABLE 22
+#define PLLE_MISC_LOCK_ENABLE 9
+#define PLLRE_MISC_LOCK_ENABLE 30
+#define PLLSS_MISC_LOCK_ENABLE 30
+
+#define PLLXC_SW_MAX_P 6
+
+#define PMC_PLLM_WB0_OVERRIDE 0x1dc
+#define PMC_PLLM_WB0_OVERRIDE_2 0x2b0
+
+#define UTMIP_PLL_CFG2 0x488
+#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6)
+#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN BIT(0)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN BIT(2)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN BIT(4)
+
+#define UTMIP_PLL_CFG1 0x484
+#define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 6)
+#define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
+#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP BIT(17)
+#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN BIT(16)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP BIT(15)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12)
+
+#define UTMIPLL_HW_PWRDN_CFG0 0x52c
+#define UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE BIT(25)
+#define UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE BIT(24)
+#define UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET BIT(6)
+#define UTMIPLL_HW_PWRDN_CFG0_SEQ_RESET_INPUT_VALUE BIT(5)
+#define UTMIPLL_HW_PWRDN_CFG0_SEQ_IN_SWCTL BIT(4)
+#define UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL BIT(2)
+#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE BIT(1)
+#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL BIT(0)
+
+/* Tegra CPU clock and reset control regs */
+#define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS 0x470
+
+#ifdef CONFIG_PM_SLEEP
+static struct cpu_clk_suspend_context {
+ u32 clk_csite_src;
+} tegra124_cpu_clk_sctx;
+#endif
+
+static void __iomem *clk_base;
+static void __iomem *pmc_base;
+
+static unsigned long osc_freq;
+static unsigned long pll_ref_freq;
+
+static DEFINE_SPINLOCK(pll_d_lock);
+static DEFINE_SPINLOCK(pll_d2_lock);
+static DEFINE_SPINLOCK(pll_e_lock);
+static DEFINE_SPINLOCK(pll_re_lock);
+static DEFINE_SPINLOCK(pll_u_lock);
+
+/* possible OSC frequencies in Hz */
+static unsigned long tegra124_input_freq[] = {
+ [0] = 13000000,
+ [1] = 16800000,
+ [4] = 19200000,
+ [5] = 38400000,
+ [8] = 12000000,
+ [9] = 48000000,
+ [12] = 260000000,
+};
+
+static const char *mux_plld_out0_plld2_out0[] = {
+ "pll_d_out0", "pll_d2_out0",
+};
+#define mux_plld_out0_plld2_out0_idx NULL
+
+static const char *mux_pllmcp_clkm[] = {
+ "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3",
+};
+#define mux_pllmcp_clkm_idx NULL
+
+static struct div_nmp pllxc_nmp = {
+ .divm_shift = 0,
+ .divm_width = 8,
+ .divn_shift = 8,
+ .divn_width = 8,
+ .divp_shift = 20,
+ .divp_width = 4,
+};
+
+static struct pdiv_map pllxc_p[] = {
+ { .pdiv = 1, .hw_val = 0 },
+ { .pdiv = 2, .hw_val = 1 },
+ { .pdiv = 3, .hw_val = 2 },
+ { .pdiv = 4, .hw_val = 3 },
+ { .pdiv = 5, .hw_val = 4 },
+ { .pdiv = 6, .hw_val = 5 },
+ { .pdiv = 8, .hw_val = 6 },
+ { .pdiv = 10, .hw_val = 7 },
+ { .pdiv = 12, .hw_val = 8 },
+ { .pdiv = 16, .hw_val = 9 },
+ { .pdiv = 12, .hw_val = 10 },
+ { .pdiv = 16, .hw_val = 11 },
+ { .pdiv = 20, .hw_val = 12 },
+ { .pdiv = 24, .hw_val = 13 },
+ { .pdiv = 32, .hw_val = 14 },
+ { .pdiv = 0, .hw_val = 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_x_freq_table[] = {
+ /* 1 GHz */
+ {12000000, 1000000000, 83, 0, 1}, /* actual: 996.0 MHz */
+ {13000000, 1000000000, 76, 0, 1}, /* actual: 988.0 MHz */
+ {16800000, 1000000000, 59, 0, 1}, /* actual: 991.2 MHz */
+ {19200000, 1000000000, 52, 0, 1}, /* actual: 998.4 MHz */
+ {26000000, 1000000000, 76, 1, 1}, /* actual: 988.0 MHz */
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_x_params = {
+ .input_min = 12000000,
+ .input_max = 800000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000, /* s/w policy, h/w capability 50 MHz */
+ .vco_min = 700000000,
+ .vco_max = 3000000000UL,
+ .base_reg = PLLX_BASE,
+ .misc_reg = PLLX_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .iddq_reg = PLLX_MISC3,
+ .iddq_bit_idx = 3,
+ .max_p = 6,
+ .dyn_ramp_reg = PLLX_MISC2,
+ .stepa_shift = 16,
+ .stepb_shift = 24,
+ .pdiv_tohw = pllxc_p,
+ .div_nmp = &pllxc_nmp,
+ .freq_table = pll_x_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
+};
+
+static struct tegra_clk_pll_freq_table pll_c_freq_table[] = {
+ { 12000000, 624000000, 104, 1, 2},
+ { 12000000, 600000000, 100, 1, 2},
+ { 13000000, 600000000, 92, 1, 2}, /* actual: 598.0 MHz */
+ { 16800000, 600000000, 71, 1, 2}, /* actual: 596.4 MHz */
+ { 19200000, 600000000, 62, 1, 2}, /* actual: 595.2 MHz */
+ { 26000000, 600000000, 92, 2, 2}, /* actual: 598.0 MHz */
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_params pll_c_params = {
+ .input_min = 12000000,
+ .input_max = 800000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000, /* s/w policy, h/w capability 50 MHz */
+ .vco_min = 600000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLC_BASE,
+ .misc_reg = PLLC_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLC_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .iddq_reg = PLLC_MISC,
+ .iddq_bit_idx = PLLC_IDDQ_BIT,
+ .max_p = PLLXC_SW_MAX_P,
+ .dyn_ramp_reg = PLLC_MISC2,
+ .stepa_shift = 17,
+ .stepb_shift = 9,
+ .pdiv_tohw = pllxc_p,
+ .div_nmp = &pllxc_nmp,
+ .freq_table = pll_c_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
+};
+
+static struct div_nmp pllcx_nmp = {
+ .divm_shift = 0,
+ .divm_width = 2,
+ .divn_shift = 8,
+ .divn_width = 8,
+ .divp_shift = 20,
+ .divp_width = 3,
+};
+
+static struct pdiv_map pllc_p[] = {
+ { .pdiv = 1, .hw_val = 0 },
+ { .pdiv = 2, .hw_val = 1 },
+ { .pdiv = 3, .hw_val = 2 },
+ { .pdiv = 4, .hw_val = 3 },
+ { .pdiv = 6, .hw_val = 4 },
+ { .pdiv = 8, .hw_val = 5 },
+ { .pdiv = 12, .hw_val = 6 },
+ { .pdiv = 16, .hw_val = 7 },
+ { .pdiv = 0, .hw_val = 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_cx_freq_table[] = {
+ {12000000, 600000000, 100, 1, 2},
+ {13000000, 600000000, 92, 1, 2}, /* actual: 598.0 MHz */
+ {16800000, 600000000, 71, 1, 2}, /* actual: 596.4 MHz */
+ {19200000, 600000000, 62, 1, 2}, /* actual: 595.2 MHz */
+ {26000000, 600000000, 92, 2, 2}, /* actual: 598.0 MHz */
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_c2_params = {
+ .input_min = 12000000,
+ .input_max = 48000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000,
+ .vco_min = 600000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLC2_BASE,
+ .misc_reg = PLLC2_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .pdiv_tohw = pllc_p,
+ .div_nmp = &pllcx_nmp,
+ .max_p = 7,
+ .ext_misc_reg[0] = 0x4f0,
+ .ext_misc_reg[1] = 0x4f4,
+ .ext_misc_reg[2] = 0x4f8,
+ .freq_table = pll_cx_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
+};
+
+static struct tegra_clk_pll_params pll_c3_params = {
+ .input_min = 12000000,
+ .input_max = 48000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000,
+ .vco_min = 600000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLC3_BASE,
+ .misc_reg = PLLC3_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .pdiv_tohw = pllc_p,
+ .div_nmp = &pllcx_nmp,
+ .max_p = 7,
+ .ext_misc_reg[0] = 0x504,
+ .ext_misc_reg[1] = 0x508,
+ .ext_misc_reg[2] = 0x50c,
+ .freq_table = pll_cx_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
+};
+
+static struct div_nmp pllss_nmp = {
+ .divm_shift = 0,
+ .divm_width = 8,
+ .divn_shift = 8,
+ .divn_width = 8,
+ .divp_shift = 20,
+ .divp_width = 4,
+};
+
+static struct pdiv_map pll12g_ssd_esd_p[] = {
+ { .pdiv = 1, .hw_val = 0 },
+ { .pdiv = 2, .hw_val = 1 },
+ { .pdiv = 3, .hw_val = 2 },
+ { .pdiv = 4, .hw_val = 3 },
+ { .pdiv = 5, .hw_val = 4 },
+ { .pdiv = 6, .hw_val = 5 },
+ { .pdiv = 8, .hw_val = 6 },
+ { .pdiv = 10, .hw_val = 7 },
+ { .pdiv = 12, .hw_val = 8 },
+ { .pdiv = 16, .hw_val = 9 },
+ { .pdiv = 12, .hw_val = 10 },
+ { .pdiv = 16, .hw_val = 11 },
+ { .pdiv = 20, .hw_val = 12 },
+ { .pdiv = 24, .hw_val = 13 },
+ { .pdiv = 32, .hw_val = 14 },
+ { .pdiv = 0, .hw_val = 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_c4_freq_table[] = {
+ { 12000000, 600000000, 100, 1, 1},
+ { 13000000, 600000000, 92, 1, 1}, /* actual: 598.0 MHz */
+ { 16800000, 600000000, 71, 1, 1}, /* actual: 596.4 MHz */
+ { 19200000, 600000000, 62, 1, 1}, /* actual: 595.2 MHz */
+ { 26000000, 600000000, 92, 2, 1}, /* actual: 598.0 MHz */
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_params pll_c4_params = {
+ .input_min = 12000000,
+ .input_max = 1000000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000, /* s/w policy, h/w capability 38 MHz */
+ .vco_min = 600000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLC4_BASE,
+ .misc_reg = PLLC4_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .iddq_reg = PLLC4_BASE,
+ .iddq_bit_idx = PLLSS_IDDQ_BIT,
+ .pdiv_tohw = pll12g_ssd_esd_p,
+ .div_nmp = &pllss_nmp,
+ .ext_misc_reg[0] = 0x5ac,
+ .ext_misc_reg[1] = 0x5b0,
+ .ext_misc_reg[2] = 0x5b4,
+ .freq_table = pll_c4_freq_table,
+};
+
+static struct pdiv_map pllm_p[] = {
+ { .pdiv = 1, .hw_val = 0 },
+ { .pdiv = 2, .hw_val = 1 },
+ { .pdiv = 0, .hw_val = 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_m_freq_table[] = {
+ {12000000, 800000000, 66, 1, 1}, /* actual: 792.0 MHz */
+ {13000000, 800000000, 61, 1, 1}, /* actual: 793.0 MHz */
+ {16800000, 800000000, 47, 1, 1}, /* actual: 789.6 MHz */
+ {19200000, 800000000, 41, 1, 1}, /* actual: 787.2 MHz */
+ {26000000, 800000000, 61, 2, 1}, /* actual: 793.0 MHz */
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct div_nmp pllm_nmp = {
+ .divm_shift = 0,
+ .divm_width = 8,
+ .override_divm_shift = 0,
+ .divn_shift = 8,
+ .divn_width = 8,
+ .override_divn_shift = 8,
+ .divp_shift = 20,
+ .divp_width = 1,
+ .override_divp_shift = 27,
+};
+
+static struct tegra_clk_pll_params pll_m_params = {
+ .input_min = 12000000,
+ .input_max = 500000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000, /* s/w policy, h/w capability 50 MHz */
+ .vco_min = 400000000,
+ .vco_max = 1066000000,
+ .base_reg = PLLM_BASE,
+ .misc_reg = PLLM_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .max_p = 2,
+ .pdiv_tohw = pllm_p,
+ .div_nmp = &pllm_nmp,
+ .pmc_divnm_reg = PMC_PLLM_WB0_OVERRIDE,
+ .pmc_divp_reg = PMC_PLLM_WB0_OVERRIDE_2,
+ .freq_table = pll_m_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
+};
+
+static struct tegra_clk_pll_freq_table pll_e_freq_table[] = {
+ /* PLLE special case: use cpcon field to store cml divider value */
+ {336000000, 100000000, 100, 21, 16, 11},
+ {312000000, 100000000, 200, 26, 24, 13},
+ {13000000, 100000000, 200, 1, 26, 13},
+ {12000000, 100000000, 200, 1, 24, 13},
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct div_nmp plle_nmp = {
+ .divm_shift = 0,
+ .divm_width = 8,
+ .divn_shift = 8,
+ .divn_width = 8,
+ .divp_shift = 24,
+ .divp_width = 4,
+};
+
+static struct tegra_clk_pll_params pll_e_params = {
+ .input_min = 12000000,
+ .input_max = 1000000000,
+ .cf_min = 12000000,
+ .cf_max = 75000000,
+ .vco_min = 1600000000,
+ .vco_max = 2400000000U,
+ .base_reg = PLLE_BASE,
+ .misc_reg = PLLE_MISC,
+ .aux_reg = PLLE_AUX,
+ .lock_mask = PLLE_MISC_LOCK,
+ .lock_enable_bit_idx = PLLE_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .div_nmp = &plle_nmp,
+ .freq_table = pll_e_freq_table,
+ .flags = TEGRA_PLL_FIXED,
+ .fixed_rate = 100000000,
+};
+
+static const struct clk_div_table pll_re_div_table[] = {
+ { .val = 0, .div = 1 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 3 },
+ { .val = 3, .div = 4 },
+ { .val = 4, .div = 5 },
+ { .val = 5, .div = 6 },
+ { .val = 0, .div = 0 },
+};
+
+static struct div_nmp pllre_nmp = {
+ .divm_shift = 0,
+ .divm_width = 8,
+ .divn_shift = 8,
+ .divn_width = 8,
+ .divp_shift = 16,
+ .divp_width = 4,
+};
+
+static struct tegra_clk_pll_params pll_re_vco_params = {
+ .input_min = 12000000,
+ .input_max = 1000000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000, /* s/w policy, h/w capability 38 MHz */
+ .vco_min = 300000000,
+ .vco_max = 600000000,
+ .base_reg = PLLRE_BASE,
+ .misc_reg = PLLRE_MISC,
+ .lock_mask = PLLRE_MISC_LOCK,
+ .lock_enable_bit_idx = PLLRE_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .iddq_reg = PLLRE_MISC,
+ .iddq_bit_idx = PLLRE_IDDQ_BIT,
+ .div_nmp = &pllre_nmp,
+ .flags = TEGRA_PLL_USE_LOCK,
+};
+
+static struct div_nmp pllp_nmp = {
+ .divm_shift = 0,
+ .divm_width = 5,
+ .divn_shift = 8,
+ .divn_width = 10,
+ .divp_shift = 20,
+ .divp_width = 3,
+};
+
+static struct tegra_clk_pll_freq_table pll_p_freq_table[] = {
+ {12000000, 216000000, 432, 12, 1, 8},
+ {13000000, 216000000, 432, 13, 1, 8},
+ {16800000, 216000000, 360, 14, 1, 8},
+ {19200000, 216000000, 360, 16, 1, 8},
+ {26000000, 216000000, 432, 26, 1, 8},
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_p_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 200000000,
+ .vco_max = 700000000,
+ .base_reg = PLLP_BASE,
+ .misc_reg = PLLP_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .div_nmp = &pllp_nmp,
+ .freq_table = pll_p_freq_table,
+ .fixed_rate = 408000000,
+ .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK,
+};
+
+static struct tegra_clk_pll_freq_table pll_a_freq_table[] = {
+ {9600000, 282240000, 147, 5, 0, 4},
+ {9600000, 368640000, 192, 5, 0, 4},
+ {9600000, 240000000, 200, 8, 0, 8},
+
+ {28800000, 282240000, 245, 25, 0, 8},
+ {28800000, 368640000, 320, 25, 0, 8},
+ {28800000, 240000000, 200, 24, 0, 8},
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_a_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 200000000,
+ .vco_max = 700000000,
+ .base_reg = PLLA_BASE,
+ .misc_reg = PLLA_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .div_nmp = &pllp_nmp,
+ .freq_table = pll_a_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
+};
+
+static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
+ {12000000, 216000000, 864, 12, 4, 12},
+ {13000000, 216000000, 864, 13, 4, 12},
+ {16800000, 216000000, 720, 14, 4, 12},
+ {19200000, 216000000, 720, 16, 4, 12},
+ {26000000, 216000000, 864, 26, 4, 12},
+
+ {12000000, 594000000, 594, 12, 1, 12},
+ {13000000, 594000000, 594, 13, 1, 12},
+ {16800000, 594000000, 495, 14, 1, 12},
+ {19200000, 594000000, 495, 16, 1, 12},
+ {26000000, 594000000, 594, 26, 1, 12},
+
+ {12000000, 1000000000, 1000, 12, 1, 12},
+ {13000000, 1000000000, 1000, 13, 1, 12},
+ {19200000, 1000000000, 625, 12, 1, 12},
+ {26000000, 1000000000, 1000, 26, 1, 12},
+
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_d_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 500000000,
+ .vco_max = 1000000000,
+ .base_reg = PLLD_BASE,
+ .misc_reg = PLLD_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+ .div_nmp = &pllp_nmp,
+ .freq_table = pll_d_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
+ TEGRA_PLL_USE_LOCK,
+};
+
+static struct tegra_clk_pll_freq_table tegra124_pll_d2_freq_table[] = {
+ { 12000000, 148500000, 99, 1, 8},
+ { 12000000, 594000000, 99, 1, 1},
+ { 13000000, 594000000, 91, 1, 1}, /* actual: 591.5 MHz */
+ { 16800000, 594000000, 71, 1, 1}, /* actual: 596.4 MHz */
+ { 19200000, 594000000, 62, 1, 1}, /* actual: 595.2 MHz */
+ { 26000000, 594000000, 91, 2, 1}, /* actual: 591.5 MHz */
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_params tegra124_pll_d2_params = {
+ .input_min = 12000000,
+ .input_max = 1000000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000, /* s/w policy, h/w capability 38 MHz */
+ .vco_min = 600000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLD2_BASE,
+ .misc_reg = PLLD2_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .iddq_reg = PLLD2_BASE,
+ .iddq_bit_idx = PLLSS_IDDQ_BIT,
+ .pdiv_tohw = pll12g_ssd_esd_p,
+ .div_nmp = &pllss_nmp,
+ .ext_misc_reg[0] = 0x570,
+ .ext_misc_reg[1] = 0x574,
+ .ext_misc_reg[2] = 0x578,
+ .max_p = 15,
+ .freq_table = tegra124_pll_d2_freq_table,
+};
+
+static struct tegra_clk_pll_freq_table pll_dp_freq_table[] = {
+ { 12000000, 600000000, 100, 1, 1},
+ { 13000000, 600000000, 92, 1, 1}, /* actual: 598.0 MHz */
+ { 16800000, 600000000, 71, 1, 1}, /* actual: 596.4 MHz */
+ { 19200000, 600000000, 62, 1, 1}, /* actual: 595.2 MHz */
+ { 26000000, 600000000, 92, 2, 1}, /* actual: 598.0 MHz */
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_params pll_dp_params = {
+ .input_min = 12000000,
+ .input_max = 1000000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000, /* s/w policy, h/w capability 38 MHz */
+ .vco_min = 600000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLDP_BASE,
+ .misc_reg = PLLDP_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .iddq_reg = PLLDP_BASE,
+ .iddq_bit_idx = PLLSS_IDDQ_BIT,
+ .pdiv_tohw = pll12g_ssd_esd_p,
+ .div_nmp = &pllss_nmp,
+ .ext_misc_reg[0] = 0x598,
+ .ext_misc_reg[1] = 0x59c,
+ .ext_misc_reg[2] = 0x5a0,
+ .max_p = 5,
+ .freq_table = pll_dp_freq_table,
+};
+
+static struct pdiv_map pllu_p[] = {
+ { .pdiv = 1, .hw_val = 1 },
+ { .pdiv = 2, .hw_val = 0 },
+ { .pdiv = 0, .hw_val = 0 },
+};
+
+static struct div_nmp pllu_nmp = {
+ .divm_shift = 0,
+ .divm_width = 5,
+ .divn_shift = 8,
+ .divn_width = 10,
+ .divp_shift = 20,
+ .divp_width = 1,
+};
+
+static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
+ {12000000, 480000000, 960, 12, 2, 12},
+ {13000000, 480000000, 960, 13, 2, 12},
+ {16800000, 480000000, 400, 7, 2, 5},
+ {19200000, 480000000, 200, 4, 2, 3},
+ {26000000, 480000000, 960, 26, 2, 12},
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_u_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 480000000,
+ .vco_max = 960000000,
+ .base_reg = PLLU_BASE,
+ .misc_reg = PLLU_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+ .pdiv_tohw = pllu_p,
+ .div_nmp = &pllu_nmp,
+ .freq_table = pll_u_freq_table,
+ .flags = TEGRA_PLLU | TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
+ TEGRA_PLL_USE_LOCK,
+};
+
+struct utmi_clk_param {
+ /* Oscillator Frequency in KHz */
+ u32 osc_frequency;
+ /* UTMIP PLL Enable Delay Count */
+ u8 enable_delay_count;
+ /* UTMIP PLL Stable count */
+ u8 stable_count;
+ /* UTMIP PLL Active delay count */
+ u8 active_delay_count;
+ /* UTMIP PLL Xtal frequency count */
+ u8 xtal_freq_count;
+};
+
+static const struct utmi_clk_param utmi_parameters[] = {
+ {.osc_frequency = 13000000, .enable_delay_count = 0x02,
+ .stable_count = 0x33, .active_delay_count = 0x05,
+ .xtal_freq_count = 0x7F},
+ {.osc_frequency = 19200000, .enable_delay_count = 0x03,
+ .stable_count = 0x4B, .active_delay_count = 0x06,
+ .xtal_freq_count = 0xBB},
+ {.osc_frequency = 12000000, .enable_delay_count = 0x02,
+ .stable_count = 0x2F, .active_delay_count = 0x04,
+ .xtal_freq_count = 0x76},
+ {.osc_frequency = 26000000, .enable_delay_count = 0x04,
+ .stable_count = 0x66, .active_delay_count = 0x09,
+ .xtal_freq_count = 0xFE},
+ {.osc_frequency = 16800000, .enable_delay_count = 0x03,
+ .stable_count = 0x41, .active_delay_count = 0x0A,
+ .xtal_freq_count = 0xA4},
+};
+
+static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
+ [tegra_clk_ispb] = { .dt_id = TEGRA124_CLK_ISPB, .present = true },
+ [tegra_clk_rtc] = { .dt_id = TEGRA124_CLK_RTC, .present = true },
+ [tegra_clk_timer] = { .dt_id = TEGRA124_CLK_TIMER, .present = true },
+ [tegra_clk_uarta] = { .dt_id = TEGRA124_CLK_UARTA, .present = true },
+ [tegra_clk_sdmmc2] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true },
+ [tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true },
+ [tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true },
+ [tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true },
+ [tegra_clk_sdmmc1] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true },
+ [tegra_clk_sdmmc4] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true },
+ [tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true },
+ [tegra_clk_i2s2] = { .dt_id = TEGRA124_CLK_I2S2, .present = true },
+ [tegra_clk_gr2d] = { .dt_id = TEGRA124_CLK_GR_2D, .present = true },
+ [tegra_clk_usbd] = { .dt_id = TEGRA124_CLK_USBD, .present = true },
+ [tegra_clk_isp_8] = { .dt_id = TEGRA124_CLK_ISP, .present = true },
+ [tegra_clk_gr3d] = { .dt_id = TEGRA124_CLK_GR_3D, .present = true },
+ [tegra_clk_disp2] = { .dt_id = TEGRA124_CLK_DISP2, .present = true },
+ [tegra_clk_disp1] = { .dt_id = TEGRA124_CLK_DISP1, .present = true },
+ [tegra_clk_host1x] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true },
+ [tegra_clk_vcp] = { .dt_id = TEGRA124_CLK_VCP, .present = true },
+ [tegra_clk_i2s0] = { .dt_id = TEGRA124_CLK_I2S0, .present = true },
+ [tegra_clk_apbdma] = { .dt_id = TEGRA124_CLK_APBDMA, .present = true },
+ [tegra_clk_kbc] = { .dt_id = TEGRA124_CLK_KBC, .present = true },
+ [tegra_clk_kfuse] = { .dt_id = TEGRA124_CLK_KFUSE, .present = true },
+ [tegra_clk_sbc1] = { .dt_id = TEGRA124_CLK_SBC1, .present = true },
+ [tegra_clk_nor] = { .dt_id = TEGRA124_CLK_NOR, .present = true },
+ [tegra_clk_sbc2] = { .dt_id = TEGRA124_CLK_SBC2, .present = true },
+ [tegra_clk_sbc3] = { .dt_id = TEGRA124_CLK_SBC3, .present = true },
+ [tegra_clk_i2c5] = { .dt_id = TEGRA124_CLK_I2C5, .present = true },
+ [tegra_clk_dsia] = { .dt_id = TEGRA124_CLK_DSIA, .present = true },
+ [tegra_clk_mipi] = { .dt_id = TEGRA124_CLK_MIPI, .present = true },
+ [tegra_clk_hdmi] = { .dt_id = TEGRA124_CLK_HDMI, .present = true },
+ [tegra_clk_csi] = { .dt_id = TEGRA124_CLK_CSI, .present = true },
+ [tegra_clk_i2c2] = { .dt_id = TEGRA124_CLK_I2C2, .present = true },
+ [tegra_clk_uartc] = { .dt_id = TEGRA124_CLK_UARTC, .present = true },
+ [tegra_clk_mipi_cal] = { .dt_id = TEGRA124_CLK_MIPI_CAL, .present = true },
+ [tegra_clk_emc] = { .dt_id = TEGRA124_CLK_EMC, .present = true },
+ [tegra_clk_usb2] = { .dt_id = TEGRA124_CLK_USB2, .present = true },
+ [tegra_clk_usb3] = { .dt_id = TEGRA124_CLK_USB3, .present = true },
+ [tegra_clk_vde_8] = { .dt_id = TEGRA124_CLK_VDE, .present = true },
+ [tegra_clk_bsea] = { .dt_id = TEGRA124_CLK_BSEA, .present = true },
+ [tegra_clk_bsev] = { .dt_id = TEGRA124_CLK_BSEV, .present = true },
+ [tegra_clk_uartd] = { .dt_id = TEGRA124_CLK_UARTD, .present = true },
+ [tegra_clk_i2c3] = { .dt_id = TEGRA124_CLK_I2C3, .present = true },
+ [tegra_clk_sbc4] = { .dt_id = TEGRA124_CLK_SBC4, .present = true },
+ [tegra_clk_sdmmc3] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true },
+ [tegra_clk_pcie] = { .dt_id = TEGRA124_CLK_PCIE, .present = true },
+ [tegra_clk_owr] = { .dt_id = TEGRA124_CLK_OWR, .present = true },
+ [tegra_clk_afi] = { .dt_id = TEGRA124_CLK_AFI, .present = true },
+ [tegra_clk_csite] = { .dt_id = TEGRA124_CLK_CSITE, .present = true },
+ [tegra_clk_la] = { .dt_id = TEGRA124_CLK_LA, .present = true },
+ [tegra_clk_trace] = { .dt_id = TEGRA124_CLK_TRACE, .present = true },
+ [tegra_clk_soc_therm] = { .dt_id = TEGRA124_CLK_SOC_THERM, .present = true },
+ [tegra_clk_dtv] = { .dt_id = TEGRA124_CLK_DTV, .present = true },
+ [tegra_clk_ndspeed] = { .dt_id = TEGRA124_CLK_NDSPEED, .present = true },
+ [tegra_clk_i2cslow] = { .dt_id = TEGRA124_CLK_I2CSLOW, .present = true },
+ [tegra_clk_dsib] = { .dt_id = TEGRA124_CLK_DSIB, .present = true },
+ [tegra_clk_tsec] = { .dt_id = TEGRA124_CLK_TSEC, .present = true },
+ [tegra_clk_xusb_host] = { .dt_id = TEGRA124_CLK_XUSB_HOST, .present = true },
+ [tegra_clk_msenc] = { .dt_id = TEGRA124_CLK_MSENC, .present = true },
+ [tegra_clk_csus] = { .dt_id = TEGRA124_CLK_CSUS, .present = true },
+ [tegra_clk_mselect] = { .dt_id = TEGRA124_CLK_MSELECT, .present = true },
+ [tegra_clk_tsensor] = { .dt_id = TEGRA124_CLK_TSENSOR, .present = true },
+ [tegra_clk_i2s3] = { .dt_id = TEGRA124_CLK_I2S3, .present = true },
+ [tegra_clk_i2s4] = { .dt_id = TEGRA124_CLK_I2S4, .present = true },
+ [tegra_clk_i2c4] = { .dt_id = TEGRA124_CLK_I2C4, .present = true },
+ [tegra_clk_sbc5] = { .dt_id = TEGRA124_CLK_SBC5, .present = true },
+ [tegra_clk_sbc6] = { .dt_id = TEGRA124_CLK_SBC6, .present = true },
+ [tegra_clk_d_audio] = { .dt_id = TEGRA124_CLK_D_AUDIO, .present = true },
+ [tegra_clk_apbif] = { .dt_id = TEGRA124_CLK_APBIF, .present = true },
+ [tegra_clk_dam0] = { .dt_id = TEGRA124_CLK_DAM0, .present = true },
+ [tegra_clk_dam1] = { .dt_id = TEGRA124_CLK_DAM1, .present = true },
+ [tegra_clk_dam2] = { .dt_id = TEGRA124_CLK_DAM2, .present = true },
+ [tegra_clk_hda2codec_2x] = { .dt_id = TEGRA124_CLK_HDA2CODEC_2X, .present = true },
+ [tegra_clk_audio0_2x] = { .dt_id = TEGRA124_CLK_AUDIO0_2X, .present = true },
+ [tegra_clk_audio1_2x] = { .dt_id = TEGRA124_CLK_AUDIO1_2X, .present = true },
+ [tegra_clk_audio2_2x] = { .dt_id = TEGRA124_CLK_AUDIO2_2X, .present = true },
+ [tegra_clk_audio3_2x] = { .dt_id = TEGRA124_CLK_AUDIO3_2X, .present = true },
+ [tegra_clk_audio4_2x] = { .dt_id = TEGRA124_CLK_AUDIO4_2X, .present = true },
+ [tegra_clk_spdif_2x] = { .dt_id = TEGRA124_CLK_SPDIF_2X, .present = true },
+ [tegra_clk_actmon] = { .dt_id = TEGRA124_CLK_ACTMON, .present = true },
+ [tegra_clk_extern1] = { .dt_id = TEGRA124_CLK_EXTERN1, .present = true },
+ [tegra_clk_extern2] = { .dt_id = TEGRA124_CLK_EXTERN2, .present = true },
+ [tegra_clk_extern3] = { .dt_id = TEGRA124_CLK_EXTERN3, .present = true },
+ [tegra_clk_sata_oob] = { .dt_id = TEGRA124_CLK_SATA_OOB, .present = true },
+ [tegra_clk_sata] = { .dt_id = TEGRA124_CLK_SATA, .present = true },
+ [tegra_clk_hda] = { .dt_id = TEGRA124_CLK_HDA, .present = true },
+ [tegra_clk_se] = { .dt_id = TEGRA124_CLK_SE, .present = true },
+ [tegra_clk_hda2hdmi] = { .dt_id = TEGRA124_CLK_HDA2HDMI, .present = true },
+ [tegra_clk_sata_cold] = { .dt_id = TEGRA124_CLK_SATA_COLD, .present = true },
+ [tegra_clk_cilab] = { .dt_id = TEGRA124_CLK_CILAB, .present = true },
+ [tegra_clk_cilcd] = { .dt_id = TEGRA124_CLK_CILCD, .present = true },
+ [tegra_clk_cile] = { .dt_id = TEGRA124_CLK_CILE, .present = true },
+ [tegra_clk_dsialp] = { .dt_id = TEGRA124_CLK_DSIALP, .present = true },
+ [tegra_clk_dsiblp] = { .dt_id = TEGRA124_CLK_DSIBLP, .present = true },
+ [tegra_clk_entropy] = { .dt_id = TEGRA124_CLK_ENTROPY, .present = true },
+ [tegra_clk_dds] = { .dt_id = TEGRA124_CLK_DDS, .present = true },
+ [tegra_clk_dp2] = { .dt_id = TEGRA124_CLK_DP2, .present = true },
+ [tegra_clk_amx] = { .dt_id = TEGRA124_CLK_AMX, .present = true },
+ [tegra_clk_adx] = { .dt_id = TEGRA124_CLK_ADX, .present = true },
+ [tegra_clk_xusb_ss] = { .dt_id = TEGRA124_CLK_XUSB_SS, .present = true },
+ [tegra_clk_i2c6] = { .dt_id = TEGRA124_CLK_I2C6, .present = true },
+ [tegra_clk_vim2_clk] = { .dt_id = TEGRA124_CLK_VIM2_CLK, .present = true },
+ [tegra_clk_hdmi_audio] = { .dt_id = TEGRA124_CLK_HDMI_AUDIO, .present = true },
+ [tegra_clk_clk72Mhz] = { .dt_id = TEGRA124_CLK_CLK72MHZ, .present = true },
+ [tegra_clk_vic03] = { .dt_id = TEGRA124_CLK_VIC03, .present = true },
+ [tegra_clk_adx1] = { .dt_id = TEGRA124_CLK_ADX1, .present = true },
+ [tegra_clk_dpaux] = { .dt_id = TEGRA124_CLK_DPAUX, .present = true },
+ [tegra_clk_sor0] = { .dt_id = TEGRA124_CLK_SOR0, .present = true },
+ [tegra_clk_sor0_lvds] = { .dt_id = TEGRA124_CLK_SOR0_LVDS, .present = true },
+ [tegra_clk_gpu] = { .dt_id = TEGRA124_CLK_GPU, .present = true },
+ [tegra_clk_amx1] = { .dt_id = TEGRA124_CLK_AMX1, .present = true },
+ [tegra_clk_uartb] = { .dt_id = TEGRA124_CLK_UARTB, .present = true },
+ [tegra_clk_vfir] = { .dt_id = TEGRA124_CLK_VFIR, .present = true },
+ [tegra_clk_spdif_in] = { .dt_id = TEGRA124_CLK_SPDIF_IN, .present = true },
+ [tegra_clk_spdif_out] = { .dt_id = TEGRA124_CLK_SPDIF_OUT, .present = true },
+ [tegra_clk_vi_9] = { .dt_id = TEGRA124_CLK_VI, .present = true },
+ [tegra_clk_vi_sensor] = { .dt_id = TEGRA124_CLK_VI_SENSOR, .present = true },
+ [tegra_clk_fuse] = { .dt_id = TEGRA124_CLK_FUSE, .present = true },
+ [tegra_clk_fuse_burn] = { .dt_id = TEGRA124_CLK_FUSE_BURN, .present = true },
+ [tegra_clk_clk_32k] = { .dt_id = TEGRA124_CLK_CLK_32K, .present = true },
+ [tegra_clk_clk_m] = { .dt_id = TEGRA124_CLK_CLK_M, .present = true },
+ [tegra_clk_clk_m_div2] = { .dt_id = TEGRA124_CLK_CLK_M_DIV2, .present = true },
+ [tegra_clk_clk_m_div4] = { .dt_id = TEGRA124_CLK_CLK_M_DIV4, .present = true },
+ [tegra_clk_pll_ref] = { .dt_id = TEGRA124_CLK_PLL_REF, .present = true },
+ [tegra_clk_pll_c] = { .dt_id = TEGRA124_CLK_PLL_C, .present = true },
+ [tegra_clk_pll_c_out1] = { .dt_id = TEGRA124_CLK_PLL_C_OUT1, .present = true },
+ [tegra_clk_pll_c2] = { .dt_id = TEGRA124_CLK_PLL_C2, .present = true },
+ [tegra_clk_pll_c3] = { .dt_id = TEGRA124_CLK_PLL_C3, .present = true },
+ [tegra_clk_pll_m] = { .dt_id = TEGRA124_CLK_PLL_M, .present = true },
+ [tegra_clk_pll_m_out1] = { .dt_id = TEGRA124_CLK_PLL_M_OUT1, .present = true },
+ [tegra_clk_pll_p] = { .dt_id = TEGRA124_CLK_PLL_P, .present = true },
+ [tegra_clk_pll_p_out1] = { .dt_id = TEGRA124_CLK_PLL_P_OUT1, .present = true },
+ [tegra_clk_pll_p_out2] = { .dt_id = TEGRA124_CLK_PLL_P_OUT2, .present = true },
+ [tegra_clk_pll_p_out3] = { .dt_id = TEGRA124_CLK_PLL_P_OUT3, .present = true },
+ [tegra_clk_pll_p_out4] = { .dt_id = TEGRA124_CLK_PLL_P_OUT4, .present = true },
+ [tegra_clk_pll_a] = { .dt_id = TEGRA124_CLK_PLL_A, .present = true },
+ [tegra_clk_pll_a_out0] = { .dt_id = TEGRA124_CLK_PLL_A_OUT0, .present = true },
+ [tegra_clk_pll_d] = { .dt_id = TEGRA124_CLK_PLL_D, .present = true },
+ [tegra_clk_pll_d_out0] = { .dt_id = TEGRA124_CLK_PLL_D_OUT0, .present = true },
+ [tegra_clk_pll_d2] = { .dt_id = TEGRA124_CLK_PLL_D2, .present = true },
+ [tegra_clk_pll_d2_out0] = { .dt_id = TEGRA124_CLK_PLL_D2_OUT0, .present = true },
+ [tegra_clk_pll_u] = { .dt_id = TEGRA124_CLK_PLL_U, .present = true },
+ [tegra_clk_pll_u_480m] = { .dt_id = TEGRA124_CLK_PLL_U_480M, .present = true },
+ [tegra_clk_pll_u_60m] = { .dt_id = TEGRA124_CLK_PLL_U_60M, .present = true },
+ [tegra_clk_pll_u_48m] = { .dt_id = TEGRA124_CLK_PLL_U_48M, .present = true },
+ [tegra_clk_pll_u_12m] = { .dt_id = TEGRA124_CLK_PLL_U_12M, .present = true },
+ [tegra_clk_pll_x] = { .dt_id = TEGRA124_CLK_PLL_X, .present = true },
+ [tegra_clk_pll_x_out0] = { .dt_id = TEGRA124_CLK_PLL_X_OUT0, .present = true },
+ [tegra_clk_pll_re_vco] = { .dt_id = TEGRA124_CLK_PLL_RE_VCO, .present = true },
+ [tegra_clk_pll_re_out] = { .dt_id = TEGRA124_CLK_PLL_RE_OUT, .present = true },
+ [tegra_clk_spdif_in_sync] = { .dt_id = TEGRA124_CLK_SPDIF_IN_SYNC, .present = true },
+ [tegra_clk_i2s0_sync] = { .dt_id = TEGRA124_CLK_I2S0_SYNC, .present = true },
+ [tegra_clk_i2s1_sync] = { .dt_id = TEGRA124_CLK_I2S1_SYNC, .present = true },
+ [tegra_clk_i2s2_sync] = { .dt_id = TEGRA124_CLK_I2S2_SYNC, .present = true },
+ [tegra_clk_i2s3_sync] = { .dt_id = TEGRA124_CLK_I2S3_SYNC, .present = true },
+ [tegra_clk_i2s4_sync] = { .dt_id = TEGRA124_CLK_I2S4_SYNC, .present = true },
+ [tegra_clk_vimclk_sync] = { .dt_id = TEGRA124_CLK_VIMCLK_SYNC, .present = true },
+ [tegra_clk_audio0] = { .dt_id = TEGRA124_CLK_AUDIO0, .present = true },
+ [tegra_clk_audio1] = { .dt_id = TEGRA124_CLK_AUDIO1, .present = true },
+ [tegra_clk_audio2] = { .dt_id = TEGRA124_CLK_AUDIO2, .present = true },
+ [tegra_clk_audio3] = { .dt_id = TEGRA124_CLK_AUDIO3, .present = true },
+ [tegra_clk_audio4] = { .dt_id = TEGRA124_CLK_AUDIO4, .present = true },
+ [tegra_clk_spdif] = { .dt_id = TEGRA124_CLK_SPDIF, .present = true },
+ [tegra_clk_clk_out_1] = { .dt_id = TEGRA124_CLK_CLK_OUT_1, .present = true },
+ [tegra_clk_clk_out_2] = { .dt_id = TEGRA124_CLK_CLK_OUT_2, .present = true },
+ [tegra_clk_clk_out_3] = { .dt_id = TEGRA124_CLK_CLK_OUT_3, .present = true },
+ [tegra_clk_blink] = { .dt_id = TEGRA124_CLK_BLINK, .present = true },
+ [tegra_clk_xusb_host_src] = { .dt_id = TEGRA124_CLK_XUSB_HOST_SRC, .present = true },
+ [tegra_clk_xusb_falcon_src] = { .dt_id = TEGRA124_CLK_XUSB_FALCON_SRC, .present = true },
+ [tegra_clk_xusb_fs_src] = { .dt_id = TEGRA124_CLK_XUSB_FS_SRC, .present = true },
+ [tegra_clk_xusb_ss_src] = { .dt_id = TEGRA124_CLK_XUSB_SS_SRC, .present = true },
+ [tegra_clk_xusb_dev_src] = { .dt_id = TEGRA124_CLK_XUSB_DEV_SRC, .present = true },
+ [tegra_clk_xusb_dev] = { .dt_id = TEGRA124_CLK_XUSB_DEV, .present = true },
+ [tegra_clk_xusb_hs_src] = { .dt_id = TEGRA124_CLK_XUSB_HS_SRC, .present = true },
+ [tegra_clk_sclk] = { .dt_id = TEGRA124_CLK_SCLK, .present = true },
+ [tegra_clk_hclk] = { .dt_id = TEGRA124_CLK_HCLK, .present = true },
+ [tegra_clk_pclk] = { .dt_id = TEGRA124_CLK_PCLK, .present = true },
+ [tegra_clk_cclk_g] = { .dt_id = TEGRA124_CLK_CCLK_G, .present = true },
+ [tegra_clk_cclk_lp] = { .dt_id = TEGRA124_CLK_CCLK_LP, .present = true },
+ [tegra_clk_dfll_ref] = { .dt_id = TEGRA124_CLK_DFLL_REF, .present = true },
+ [tegra_clk_dfll_soc] = { .dt_id = TEGRA124_CLK_DFLL_SOC, .present = true },
+ [tegra_clk_vi_sensor2] = { .dt_id = TEGRA124_CLK_VI_SENSOR2, .present = true },
+ [tegra_clk_pll_p_out5] = { .dt_id = TEGRA124_CLK_PLL_P_OUT5, .present = true },
+ [tegra_clk_pll_c4] = { .dt_id = TEGRA124_CLK_PLL_C4, .present = true },
+ [tegra_clk_pll_dp] = { .dt_id = TEGRA124_CLK_PLL_DP, .present = true },
+ [tegra_clk_audio0_mux] = { .dt_id = TEGRA124_CLK_AUDIO0_MUX, .present = true },
+ [tegra_clk_audio1_mux] = { .dt_id = TEGRA124_CLK_AUDIO1_MUX, .present = true },
+ [tegra_clk_audio2_mux] = { .dt_id = TEGRA124_CLK_AUDIO2_MUX, .present = true },
+ [tegra_clk_audio3_mux] = { .dt_id = TEGRA124_CLK_AUDIO3_MUX, .present = true },
+ [tegra_clk_audio4_mux] = { .dt_id = TEGRA124_CLK_AUDIO4_MUX, .present = true },
+ [tegra_clk_spdif_mux] = { .dt_id = TEGRA124_CLK_SPDIF_MUX, .present = true },
+ [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_1_MUX, .present = true },
+ [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_2_MUX, .present = true },
+ [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true },
+ [tegra_clk_dsia_mux] = { .dt_id = TEGRA124_CLK_DSIA_MUX, .present = true },
+ [tegra_clk_dsib_mux] = { .dt_id = TEGRA124_CLK_DSIB_MUX, .present = true },
+ [tegra_clk_uarte] = { .dt_id = TEGRA124_CLK_UARTE, .present = true },
+};
+
+static struct tegra_devclk devclks[] __initdata = {
+ { .con_id = "clk_m", .dt_id = TEGRA124_CLK_CLK_M },
+ { .con_id = "pll_ref", .dt_id = TEGRA124_CLK_PLL_REF },
+ { .con_id = "clk_32k", .dt_id = TEGRA124_CLK_CLK_32K },
+ { .con_id = "clk_m_div2", .dt_id = TEGRA124_CLK_CLK_M_DIV2 },
+ { .con_id = "clk_m_div4", .dt_id = TEGRA124_CLK_CLK_M_DIV4 },
+ { .con_id = "pll_c", .dt_id = TEGRA124_CLK_PLL_C },
+ { .con_id = "pll_c_out1", .dt_id = TEGRA124_CLK_PLL_C_OUT1 },
+ { .con_id = "pll_c2", .dt_id = TEGRA124_CLK_PLL_C2 },
+ { .con_id = "pll_c3", .dt_id = TEGRA124_CLK_PLL_C3 },
+ { .con_id = "pll_p", .dt_id = TEGRA124_CLK_PLL_P },
+ { .con_id = "pll_p_out1", .dt_id = TEGRA124_CLK_PLL_P_OUT1 },
+ { .con_id = "pll_p_out2", .dt_id = TEGRA124_CLK_PLL_P_OUT2 },
+ { .con_id = "pll_p_out3", .dt_id = TEGRA124_CLK_PLL_P_OUT3 },
+ { .con_id = "pll_p_out4", .dt_id = TEGRA124_CLK_PLL_P_OUT4 },
+ { .con_id = "pll_m", .dt_id = TEGRA124_CLK_PLL_M },
+ { .con_id = "pll_m_out1", .dt_id = TEGRA124_CLK_PLL_M_OUT1 },
+ { .con_id = "pll_x", .dt_id = TEGRA124_CLK_PLL_X },
+ { .con_id = "pll_x_out0", .dt_id = TEGRA124_CLK_PLL_X_OUT0 },
+ { .con_id = "pll_u", .dt_id = TEGRA124_CLK_PLL_U },
+ { .con_id = "pll_u_480M", .dt_id = TEGRA124_CLK_PLL_U_480M },
+ { .con_id = "pll_u_60M", .dt_id = TEGRA124_CLK_PLL_U_60M },
+ { .con_id = "pll_u_48M", .dt_id = TEGRA124_CLK_PLL_U_48M },
+ { .con_id = "pll_u_12M", .dt_id = TEGRA124_CLK_PLL_U_12M },
+ { .con_id = "pll_d", .dt_id = TEGRA124_CLK_PLL_D },
+ { .con_id = "pll_d_out0", .dt_id = TEGRA124_CLK_PLL_D_OUT0 },
+ { .con_id = "pll_d2", .dt_id = TEGRA124_CLK_PLL_D2 },
+ { .con_id = "pll_d2_out0", .dt_id = TEGRA124_CLK_PLL_D2_OUT0 },
+ { .con_id = "pll_a", .dt_id = TEGRA124_CLK_PLL_A },
+ { .con_id = "pll_a_out0", .dt_id = TEGRA124_CLK_PLL_A_OUT0 },
+ { .con_id = "pll_re_vco", .dt_id = TEGRA124_CLK_PLL_RE_VCO },
+ { .con_id = "pll_re_out", .dt_id = TEGRA124_CLK_PLL_RE_OUT },
+ { .con_id = "spdif_in_sync", .dt_id = TEGRA124_CLK_SPDIF_IN_SYNC },
+ { .con_id = "i2s0_sync", .dt_id = TEGRA124_CLK_I2S0_SYNC },
+ { .con_id = "i2s1_sync", .dt_id = TEGRA124_CLK_I2S1_SYNC },
+ { .con_id = "i2s2_sync", .dt_id = TEGRA124_CLK_I2S2_SYNC },
+ { .con_id = "i2s3_sync", .dt_id = TEGRA124_CLK_I2S3_SYNC },
+ { .con_id = "i2s4_sync", .dt_id = TEGRA124_CLK_I2S4_SYNC },
+ { .con_id = "vimclk_sync", .dt_id = TEGRA124_CLK_VIMCLK_SYNC },
+ { .con_id = "audio0", .dt_id = TEGRA124_CLK_AUDIO0 },
+ { .con_id = "audio1", .dt_id = TEGRA124_CLK_AUDIO1 },
+ { .con_id = "audio2", .dt_id = TEGRA124_CLK_AUDIO2 },
+ { .con_id = "audio3", .dt_id = TEGRA124_CLK_AUDIO3 },
+ { .con_id = "audio4", .dt_id = TEGRA124_CLK_AUDIO4 },
+ { .con_id = "spdif", .dt_id = TEGRA124_CLK_SPDIF },
+ { .con_id = "audio0_2x", .dt_id = TEGRA124_CLK_AUDIO0_2X },
+ { .con_id = "audio1_2x", .dt_id = TEGRA124_CLK_AUDIO1_2X },
+ { .con_id = "audio2_2x", .dt_id = TEGRA124_CLK_AUDIO2_2X },
+ { .con_id = "audio3_2x", .dt_id = TEGRA124_CLK_AUDIO3_2X },
+ { .con_id = "audio4_2x", .dt_id = TEGRA124_CLK_AUDIO4_2X },
+ { .con_id = "spdif_2x", .dt_id = TEGRA124_CLK_SPDIF_2X },
+ { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA124_CLK_EXTERN1 },
+ { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA124_CLK_EXTERN2 },
+ { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA124_CLK_EXTERN3 },
+ { .con_id = "blink", .dt_id = TEGRA124_CLK_BLINK },
+ { .con_id = "cclk_g", .dt_id = TEGRA124_CLK_CCLK_G },
+ { .con_id = "cclk_lp", .dt_id = TEGRA124_CLK_CCLK_LP },
+ { .con_id = "sclk", .dt_id = TEGRA124_CLK_SCLK },
+ { .con_id = "hclk", .dt_id = TEGRA124_CLK_HCLK },
+ { .con_id = "pclk", .dt_id = TEGRA124_CLK_PCLK },
+ { .con_id = "fuse", .dt_id = TEGRA124_CLK_FUSE },
+ { .dev_id = "rtc-tegra", .dt_id = TEGRA124_CLK_RTC },
+ { .dev_id = "timer", .dt_id = TEGRA124_CLK_TIMER },
+};
+
+static struct clk **clks;
+
+static void tegra124_utmi_param_configure(void __iomem *clk_base)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) {
+ if (osc_freq == utmi_parameters[i].osc_frequency)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(utmi_parameters)) {
+ pr_err("%s: Unexpected oscillator freq %lu\n", __func__,
+ osc_freq);
+ return;
+ }
+
+ reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2);
+
+ /* Program UTMIP PLL stable and active counts */
+ /* [FIXME] arclk_rst.h says WRONG! This should be 1ms -> 0x50 Check! */
+ reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0);
+ reg |= UTMIP_PLL_CFG2_STABLE_COUNT(utmi_parameters[i].stable_count);
+
+ reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0);
+
+ reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(utmi_parameters[i].
+ active_delay_count);
+
+ /* Remove power downs from UTMIP PLL control bits */
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN;
+
+ writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2);
+
+ /* Program UTMIP PLL delay and oscillator frequency counts */
+ reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1);
+ reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0);
+
+ reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(utmi_parameters[i].
+ enable_delay_count);
+
+ reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0);
+ reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(utmi_parameters[i].
+ xtal_freq_count);
+
+ /* Remove power downs from UTMIP PLL control bits */
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN;
+ writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1);
+
+ /* Setup HW control of UTMIPLL */
+ reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0);
+ reg |= UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET;
+ reg &= ~UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL;
+ reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE;
+ writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
+
+ reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1);
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
+ writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1);
+
+ udelay(1);
+
+ /* Setup SW override of UTMIPLL assuming USB2.0
+ ports are assigned to USB2 */
+ reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0);
+ reg |= UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL;
+ reg &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE;
+ writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
+
+ udelay(1);
+
+ /* Enable HW control UTMIPLL */
+ reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0);
+ reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE;
+ writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
+}
+
+static __init void tegra124_periph_clk_init(void __iomem *clk_base,
+ void __iomem *pmc_base)
+{
+ struct clk *clk;
+ u32 val;
+
+ /* xusb_hs_src */
+ val = readl(clk_base + CLK_SOURCE_XUSB_SS_SRC);
+ val |= BIT(25); /* always select PLLU_60M */
+ writel(val, clk_base + CLK_SOURCE_XUSB_SS_SRC);
+
+ clk = clk_register_fixed_factor(NULL, "xusb_hs_src", "pll_u_60M", 0,
+ 1, 1);
+ clks[TEGRA124_CLK_XUSB_HS_SRC] = clk;
+
+ /* dsia mux */
+ clk = clk_register_mux(NULL, "dsia_mux", mux_plld_out0_plld2_out0,
+ ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
+ clk_base + PLLD_BASE, 25, 1, 0, &pll_d_lock);
+ clks[TEGRA124_CLK_DSIA_MUX] = clk;
+
+ /* dsib mux */
+ clk = clk_register_mux(NULL, "dsib_mux", mux_plld_out0_plld2_out0,
+ ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
+ clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
+ clks[TEGRA124_CLK_DSIB_MUX] = clk;
+
+ /* emc mux */
+ clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
+ ARRAY_SIZE(mux_pllmcp_clkm), 0,
+ clk_base + CLK_SOURCE_EMC,
+ 29, 3, 0, NULL);
+
+ /* cml0 */
+ clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX,
+ 0, 0, &pll_e_lock);
+ clk_register_clkdev(clk, "cml0", NULL);
+ clks[TEGRA124_CLK_CML0] = clk;
+
+ /* cml1 */
+ clk = clk_register_gate(NULL, "cml1", "pll_e", 0, clk_base + PLLE_AUX,
+ 1, 0, &pll_e_lock);
+ clk_register_clkdev(clk, "cml1", NULL);
+ clks[TEGRA124_CLK_CML1] = clk;
+
+ tegra_periph_clk_init(clk_base, pmc_base, tegra124_clks, &pll_p_params);
+}
+
+static void __init tegra124_pll_init(void __iomem *clk_base,
+ void __iomem *pmc)
+{
+ u32 val;
+ struct clk *clk;
+
+ /* PLLC */
+ clk = tegra_clk_register_pllxc("pll_c", "pll_ref", clk_base,
+ pmc, 0, &pll_c_params, NULL);
+ clk_register_clkdev(clk, "pll_c", NULL);
+ clks[TEGRA124_CLK_PLL_C] = clk;
+
+ /* PLLC_OUT1 */
+ clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
+ clk_base + PLLC_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_c_out1", "pll_c_out1_div",
+ clk_base + PLLC_OUT, 1, 0,
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_c_out1", NULL);
+ clks[TEGRA124_CLK_PLL_C_OUT1] = clk;
+
+ /* PLLC2 */
+ clk = tegra_clk_register_pllc("pll_c2", "pll_ref", clk_base, pmc, 0,
+ &pll_c2_params, NULL);
+ clk_register_clkdev(clk, "pll_c2", NULL);
+ clks[TEGRA124_CLK_PLL_C2] = clk;
+
+ /* PLLC3 */
+ clk = tegra_clk_register_pllc("pll_c3", "pll_ref", clk_base, pmc, 0,
+ &pll_c3_params, NULL);
+ clk_register_clkdev(clk, "pll_c3", NULL);
+ clks[TEGRA124_CLK_PLL_C3] = clk;
+
+ /* PLLM */
+ clk = tegra_clk_register_pllm("pll_m", "pll_ref", clk_base, pmc,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+ &pll_m_params, NULL);
+ clk_register_clkdev(clk, "pll_m", NULL);
+ clks[TEGRA124_CLK_PLL_M] = clk;
+
+ /* PLLM_OUT1 */
+ clk = tegra_clk_register_divider("pll_m_out1_div", "pll_m",
+ clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
+ clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_m_out1", NULL);
+ clks[TEGRA124_CLK_PLL_M_OUT1] = clk;
+
+ /* PLLM_UD */
+ clk = clk_register_fixed_factor(NULL, "pll_m_ud", "pll_m",
+ CLK_SET_RATE_PARENT, 1, 1);
+
+ /* PLLU */
+ val = readl(clk_base + pll_u_params.base_reg);
+ val &= ~BIT(24); /* disable PLLU_OVERRIDE */
+ writel(val, clk_base + pll_u_params.base_reg);
+
+ clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc, 0,
+ &pll_u_params, &pll_u_lock);
+ clk_register_clkdev(clk, "pll_u", NULL);
+ clks[TEGRA124_CLK_PLL_U] = clk;
+
+ tegra124_utmi_param_configure(clk_base);
+
+ /* PLLU_480M */
+ clk = clk_register_gate(NULL, "pll_u_480M", "pll_u",
+ CLK_SET_RATE_PARENT, clk_base + PLLU_BASE,
+ 22, 0, &pll_u_lock);
+ clk_register_clkdev(clk, "pll_u_480M", NULL);
+ clks[TEGRA124_CLK_PLL_U_480M] = clk;
+
+ /* PLLU_60M */
+ clk = clk_register_fixed_factor(NULL, "pll_u_60M", "pll_u",
+ CLK_SET_RATE_PARENT, 1, 8);
+ clk_register_clkdev(clk, "pll_u_60M", NULL);
+ clks[TEGRA124_CLK_PLL_U_60M] = clk;
+
+ /* PLLU_48M */
+ clk = clk_register_fixed_factor(NULL, "pll_u_48M", "pll_u",
+ CLK_SET_RATE_PARENT, 1, 10);
+ clk_register_clkdev(clk, "pll_u_48M", NULL);
+ clks[TEGRA124_CLK_PLL_U_48M] = clk;
+
+ /* PLLU_12M */
+ clk = clk_register_fixed_factor(NULL, "pll_u_12M", "pll_u",
+ CLK_SET_RATE_PARENT, 1, 40);
+ clk_register_clkdev(clk, "pll_u_12M", NULL);
+ clks[TEGRA124_CLK_PLL_U_12M] = clk;
+
+ /* PLLD */
+ clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, pmc, 0,
+ &pll_d_params, &pll_d_lock);
+ clk_register_clkdev(clk, "pll_d", NULL);
+ clks[TEGRA124_CLK_PLL_D] = clk;
+
+ /* PLLD_OUT0 */
+ clk = clk_register_fixed_factor(NULL, "pll_d_out0", "pll_d",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "pll_d_out0", NULL);
+ clks[TEGRA124_CLK_PLL_D_OUT0] = clk;
+
+ /* PLLRE */
+ clk = tegra_clk_register_pllre("pll_re_vco", "pll_ref", clk_base, pmc,
+ 0, &pll_re_vco_params, &pll_re_lock, pll_ref_freq);
+ clk_register_clkdev(clk, "pll_re_vco", NULL);
+ clks[TEGRA124_CLK_PLL_RE_VCO] = clk;
+
+ clk = clk_register_divider_table(NULL, "pll_re_out", "pll_re_vco", 0,
+ clk_base + PLLRE_BASE, 16, 4, 0,
+ pll_re_div_table, &pll_re_lock);
+ clk_register_clkdev(clk, "pll_re_out", NULL);
+ clks[TEGRA124_CLK_PLL_RE_OUT] = clk;
+
+ /* PLLE */
+ clk = tegra_clk_register_plle_tegra114("pll_e", "pll_ref",
+ clk_base, 0, &pll_e_params, NULL);
+ clk_register_clkdev(clk, "pll_e", NULL);
+ clks[TEGRA124_CLK_PLL_E] = clk;
+
+ /* PLLC4 */
+ clk = tegra_clk_register_pllss("pll_c4", "pll_ref", clk_base, 0,
+ &pll_c4_params, NULL);
+ clk_register_clkdev(clk, "pll_c4", NULL);
+ clks[TEGRA124_CLK_PLL_C4] = clk;
+
+ /* PLLDP */
+ clk = tegra_clk_register_pllss("pll_dp", "pll_ref", clk_base, 0,
+ &pll_dp_params, NULL);
+ clk_register_clkdev(clk, "pll_dp", NULL);
+ clks[TEGRA124_CLK_PLL_DP] = clk;
+
+ /* PLLD2 */
+ clk = tegra_clk_register_pllss("pll_d2", "pll_ref", clk_base, 0,
+ &tegra124_pll_d2_params, NULL);
+ clk_register_clkdev(clk, "pll_d2", NULL);
+ clks[TEGRA124_CLK_PLL_D2] = clk;
+
+ /* PLLD2_OUT0 ?? */
+ clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "pll_d2_out0", NULL);
+ clks[TEGRA124_CLK_PLL_D2_OUT0] = clk;
+
+}
+
+/* Tegra124 CPU clock and reset control functions */
+static void tegra124_wait_cpu_in_reset(u32 cpu)
+{
+ unsigned int reg;
+
+ do {
+ reg = readl(clk_base + CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
+ cpu_relax();
+ } while (!(reg & (1 << cpu))); /* check CPU been reset or not */
+}
+
+static void tegra124_disable_cpu_clock(u32 cpu)
+{
+ /* flow controller would take care in the power sequence. */
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void tegra124_cpu_clock_suspend(void)
+{
+ /* switch coresite to clk_m, save off original source */
+ tegra124_cpu_clk_sctx.clk_csite_src =
+ readl(clk_base + CLK_SOURCE_CSITE);
+ writel(3 << 30, clk_base + CLK_SOURCE_CSITE);
+}
+
+static void tegra124_cpu_clock_resume(void)
+{
+ writel(tegra124_cpu_clk_sctx.clk_csite_src,
+ clk_base + CLK_SOURCE_CSITE);
+}
+#endif
+
+static struct tegra_cpu_car_ops tegra124_cpu_car_ops = {
+ .wait_for_reset = tegra124_wait_cpu_in_reset,
+ .disable_clock = tegra124_disable_cpu_clock,
+#ifdef CONFIG_PM_SLEEP
+ .suspend = tegra124_cpu_clock_suspend,
+ .resume = tegra124_cpu_clock_resume,
+#endif
+};
+
+static const struct of_device_id pmc_match[] __initconst = {
+ { .compatible = "nvidia,tegra124-pmc" },
+ {},
+};
+
+static struct tegra_clk_init_table init_table[] __initdata = {
+ {TEGRA124_CLK_UARTA, TEGRA124_CLK_PLL_P, 408000000, 0},
+ {TEGRA124_CLK_UARTB, TEGRA124_CLK_PLL_P, 408000000, 0},
+ {TEGRA124_CLK_UARTC, TEGRA124_CLK_PLL_P, 408000000, 0},
+ {TEGRA124_CLK_UARTD, TEGRA124_CLK_PLL_P, 408000000, 0},
+ {TEGRA124_CLK_PLL_A, TEGRA124_CLK_CLK_MAX, 564480000, 1},
+ {TEGRA124_CLK_PLL_A_OUT0, TEGRA124_CLK_CLK_MAX, 11289600, 1},
+ {TEGRA124_CLK_EXTERN1, TEGRA124_CLK_PLL_A_OUT0, 0, 1},
+ {TEGRA124_CLK_CLK_OUT_1_MUX, TEGRA124_CLK_EXTERN1, 0, 1},
+ {TEGRA124_CLK_CLK_OUT_1, TEGRA124_CLK_CLK_MAX, 0, 1},
+ {TEGRA124_CLK_I2S0, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA124_CLK_I2S1, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA124_CLK_I2S2, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA124_CLK_I2S3, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA124_CLK_I2S4, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA124_CLK_VDE, TEGRA124_CLK_PLL_P, 0, 0},
+ {TEGRA124_CLK_HOST1X, TEGRA124_CLK_PLL_P, 136000000, 1},
+ {TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 1},
+ {TEGRA124_CLK_DFLL_SOC, TEGRA124_CLK_PLL_P, 51000000, 1},
+ {TEGRA124_CLK_DFLL_REF, TEGRA124_CLK_PLL_P, 51000000, 1},
+ {TEGRA124_CLK_PLL_C, TEGRA124_CLK_CLK_MAX, 768000000, 0},
+ {TEGRA124_CLK_PLL_C_OUT1, TEGRA124_CLK_CLK_MAX, 100000000, 0},
+ {TEGRA124_CLK_SBC4, TEGRA124_CLK_PLL_P, 12000000, 1},
+ {TEGRA124_CLK_TSEC, TEGRA124_CLK_PLL_C3, 0, 0},
+ {TEGRA124_CLK_MSENC, TEGRA124_CLK_PLL_C3, 0, 0},
+ /* This MUST be the last entry. */
+ {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
+};
+
+static void __init tegra124_clock_apply_init_table(void)
+{
+ tegra_init_from_table(init_table, clks, TEGRA124_CLK_CLK_MAX);
+}
+
+static void __init tegra124_clock_init(struct device_node *np)
+{
+ struct device_node *node;
+
+ clk_base = of_iomap(np, 0);
+ if (!clk_base) {
+ pr_err("ioremap tegra124 CAR failed\n");
+ return;
+ }
+
+ node = of_find_matching_node(NULL, pmc_match);
+ if (!node) {
+ pr_err("Failed to find pmc node\n");
+ WARN_ON(1);
+ return;
+ }
+
+ pmc_base = of_iomap(node, 0);
+ if (!pmc_base) {
+ pr_err("Can't map pmc registers\n");
+ WARN_ON(1);
+ return;
+ }
+
+ clks = tegra_clk_init(clk_base, TEGRA124_CLK_CLK_MAX, 6);
+ if (!clks)
+ return;
+
+ if (tegra_osc_clk_init(clk_base, tegra124_clks, tegra124_input_freq,
+ ARRAY_SIZE(tegra124_input_freq), &osc_freq, &pll_ref_freq) < 0)
+ return;
+
+ tegra_fixed_clk_init(tegra124_clks);
+ tegra124_pll_init(clk_base, pmc_base);
+ tegra124_periph_clk_init(clk_base, pmc_base);
+ tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks, &pll_a_params);
+ tegra_pmc_clk_init(pmc_base, tegra124_clks);
+
+ tegra_super_clk_gen4_init(clk_base, pmc_base, tegra124_clks,
+ &pll_x_params);
+ tegra_add_of_provider(np);
+ tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
+
+ tegra_clk_apply_init_table = tegra124_clock_apply_init_table;
+
+ tegra_cpu_car_ops = &tegra124_cpu_car_ops;
+}
+CLK_OF_DECLARE(tegra124, "nvidia,tegra124-car", tegra124_clock_init);
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 056f649d0d89..dbace152b2fa 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -22,30 +22,10 @@
#include <linux/of_address.h>
#include <linux/clk/tegra.h>
#include <linux/delay.h>
+#include <dt-bindings/clock/tegra20-car.h>
#include "clk.h"
-
-#define RST_DEVICES_L 0x004
-#define RST_DEVICES_H 0x008
-#define RST_DEVICES_U 0x00c
-#define RST_DEVICES_SET_L 0x300
-#define RST_DEVICES_CLR_L 0x304
-#define RST_DEVICES_SET_H 0x308
-#define RST_DEVICES_CLR_H 0x30c
-#define RST_DEVICES_SET_U 0x310
-#define RST_DEVICES_CLR_U 0x314
-#define RST_DEVICES_NUM 3
-
-#define CLK_OUT_ENB_L 0x010
-#define CLK_OUT_ENB_H 0x014
-#define CLK_OUT_ENB_U 0x018
-#define CLK_OUT_ENB_SET_L 0x320
-#define CLK_OUT_ENB_CLR_L 0x324
-#define CLK_OUT_ENB_SET_H 0x328
-#define CLK_OUT_ENB_CLR_H 0x32c
-#define CLK_OUT_ENB_SET_U 0x330
-#define CLK_OUT_ENB_CLR_U 0x334
-#define CLK_OUT_ENB_NUM 3
+#include "clk-id.h"
#define OSC_CTRL 0x50
#define OSC_CTRL_OSC_FREQ_MASK (3<<30)
@@ -67,6 +47,8 @@
#define OSC_FREQ_DET_BUSY (1<<31)
#define OSC_FREQ_DET_CNT_MASK 0xFFFF
+#define TEGRA20_CLK_PERIPH_BANKS 3
+
#define PLLS_BASE 0xf0
#define PLLS_MISC 0xf4
#define PLLC_BASE 0x80
@@ -114,34 +96,15 @@
#define CLK_SOURCE_I2S1 0x100
#define CLK_SOURCE_I2S2 0x104
-#define CLK_SOURCE_SPDIF_OUT 0x108
-#define CLK_SOURCE_SPDIF_IN 0x10c
#define CLK_SOURCE_PWM 0x110
#define CLK_SOURCE_SPI 0x114
-#define CLK_SOURCE_SBC1 0x134
-#define CLK_SOURCE_SBC2 0x118
-#define CLK_SOURCE_SBC3 0x11c
-#define CLK_SOURCE_SBC4 0x1b4
#define CLK_SOURCE_XIO 0x120
#define CLK_SOURCE_TWC 0x12c
#define CLK_SOURCE_IDE 0x144
-#define CLK_SOURCE_NDFLASH 0x160
-#define CLK_SOURCE_VFIR 0x168
-#define CLK_SOURCE_SDMMC1 0x150
-#define CLK_SOURCE_SDMMC2 0x154
-#define CLK_SOURCE_SDMMC3 0x1bc
-#define CLK_SOURCE_SDMMC4 0x164
-#define CLK_SOURCE_CVE 0x140
-#define CLK_SOURCE_TVO 0x188
-#define CLK_SOURCE_TVDAC 0x194
#define CLK_SOURCE_HDMI 0x18c
#define CLK_SOURCE_DISP1 0x138
#define CLK_SOURCE_DISP2 0x13c
#define CLK_SOURCE_CSITE 0x1d4
-#define CLK_SOURCE_LA 0x1f8
-#define CLK_SOURCE_OWR 0x1cc
-#define CLK_SOURCE_NOR 0x1d0
-#define CLK_SOURCE_MIPI 0x174
#define CLK_SOURCE_I2C1 0x124
#define CLK_SOURCE_I2C2 0x198
#define CLK_SOURCE_I2C3 0x1b8
@@ -151,24 +114,10 @@
#define CLK_SOURCE_UARTC 0x1a0
#define CLK_SOURCE_UARTD 0x1c0
#define CLK_SOURCE_UARTE 0x1c4
-#define CLK_SOURCE_3D 0x158
-#define CLK_SOURCE_2D 0x15c
-#define CLK_SOURCE_MPE 0x170
-#define CLK_SOURCE_EPP 0x16c
-#define CLK_SOURCE_HOST1X 0x180
-#define CLK_SOURCE_VDE 0x1c8
-#define CLK_SOURCE_VI 0x148
-#define CLK_SOURCE_VI_SENSOR 0x1a8
#define CLK_SOURCE_EMC 0x19c
#define AUDIO_SYNC_CLK 0x38
-#define PMC_CTRL 0x0
-#define PMC_CTRL_BLINK_ENB 7
-#define PMC_DPD_PADS_ORIDE 0x1c
-#define PMC_DPD_PADS_ORIDE_BLINK_ENB 20
-#define PMC_BLINK_TIMER 0x40
-
/* Tegra CPU clock and reset control regs */
#define TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX 0x4c
#define TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET 0x340
@@ -188,64 +137,32 @@ static struct cpu_clk_suspend_context {
} tegra20_cpu_clk_sctx;
#endif
-static int periph_clk_enb_refcnt[CLK_OUT_ENB_NUM * 32];
-
static void __iomem *clk_base;
static void __iomem *pmc_base;
-static DEFINE_SPINLOCK(pll_div_lock);
-static DEFINE_SPINLOCK(sysrate_lock);
-
-#define TEGRA_INIT_DATA_MUX(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+#define TEGRA_INIT_DATA_MUX(_name, _parents, _offset, \
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \
30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, \
- _regs, _clk_num, periph_clk_enb_refcnt, \
+ _clk_num, \
_gate_flags, _clk_id)
-#define TEGRA_INIT_DATA_INT(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
- 30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs, \
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
- _clk_id)
-
-#define TEGRA_INIT_DATA_DIV16(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
- 30, 2, 0, 0, 16, 0, TEGRA_DIVIDER_ROUND_UP, _regs, \
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+#define TEGRA_INIT_DATA_DIV16(_name, _parents, _offset, \
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \
+ 30, 2, 0, 0, 16, 0, TEGRA_DIVIDER_ROUND_UP, \
+ _clk_num, _gate_flags, \
_clk_id)
-#define TEGRA_INIT_DATA_NODIV(_name, _con_id, _dev_id, _parents, _offset, \
- _mux_shift, _mux_width, _clk_num, _regs, \
+#define TEGRA_INIT_DATA_NODIV(_name, _parents, _offset, \
+ _mux_shift, _mux_width, _clk_num, \
_gate_flags, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
- _mux_shift, _mux_width, 0, 0, 0, 0, 0, _regs, \
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \
+ _mux_shift, _mux_width, 0, 0, 0, 0, 0, \
+ _clk_num, _gate_flags, \
_clk_id)
-/* IDs assigned here must be in sync with DT bindings definition
- * for Tegra20 clocks .
- */
-enum tegra20_clk {
- cpu, ac97 = 3, rtc, timer, uarta, gpio = 8, sdmmc2, i2s1 = 11, i2c1,
- ndflash, sdmmc1, sdmmc4, twc, pwm, i2s2, epp, gr2d = 21, usbd, isp,
- gr3d, ide, disp2, disp1, host1x, vcp, cache2 = 31, mem, ahbdma, apbdma,
- kbc = 36, stat_mon, pmc, fuse, kfuse, sbc1, nor, spi, sbc2, xio, sbc3,
- dvc, dsi, mipi = 50, hdmi, csi, tvdac, i2c2, uartc, emc = 57, usb2,
- usb3, mpe, vde, bsea, bsev, speedo, uartd, uarte, i2c3, sbc4, sdmmc3,
- pex, owr, afi, csite, pcie_xclk, avpucq = 75, la, irama = 84, iramb,
- iramc, iramd, cram2, audio_2x, clk_d, csus = 92, cdev2, cdev1,
- uartb = 96, vfir, spdif_in, spdif_out, vi, vi_sensor, tvo, cve,
- osc, clk_32k, clk_m, sclk, cclk, hclk, pclk, blink, pll_a, pll_a_out0,
- pll_c, pll_c_out1, pll_d, pll_d_out0, pll_e, pll_m, pll_m_out1,
- pll_p, pll_p_out1, pll_p_out2, pll_p_out3, pll_p_out4, pll_s, pll_u,
- pll_x, cop, audio, pll_ref, twd, clk_max,
-};
-
-static struct clk *clks[clk_max];
-static struct clk_onecell_data clk_data;
+static struct clk **clks;
static struct tegra_clk_pll_freq_table pll_c_freq_table[] = {
{ 12000000, 600000000, 600, 12, 0, 8 },
@@ -383,6 +300,8 @@ static struct tegra_clk_pll_params pll_c_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_c_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON,
};
static struct tegra_clk_pll_params pll_m_params = {
@@ -397,6 +316,8 @@ static struct tegra_clk_pll_params pll_m_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_m_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON,
};
static struct tegra_clk_pll_params pll_p_params = {
@@ -411,6 +332,9 @@ static struct tegra_clk_pll_params pll_p_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_p_freq_table,
+ .flags = TEGRA_PLL_FIXED | TEGRA_PLL_HAS_CPCON,
+ .fixed_rate = 216000000,
};
static struct tegra_clk_pll_params pll_a_params = {
@@ -425,6 +349,8 @@ static struct tegra_clk_pll_params pll_a_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_a_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON,
};
static struct tegra_clk_pll_params pll_d_params = {
@@ -439,6 +365,8 @@ static struct tegra_clk_pll_params pll_d_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
+ .freq_table = pll_d_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON,
};
static struct pdiv_map pllu_p[] = {
@@ -460,6 +388,8 @@ static struct tegra_clk_pll_params pll_u_params = {
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
.pdiv_tohw = pllu_p,
+ .freq_table = pll_u_freq_table,
+ .flags = TEGRA_PLLU | TEGRA_PLL_HAS_CPCON,
};
static struct tegra_clk_pll_params pll_x_params = {
@@ -474,6 +404,8 @@ static struct tegra_clk_pll_params pll_x_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_x_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON,
};
static struct tegra_clk_pll_params pll_e_params = {
@@ -488,34 +420,160 @@ static struct tegra_clk_pll_params pll_e_params = {
.lock_mask = PLLE_MISC_LOCK,
.lock_enable_bit_idx = PLLE_MISC_LOCK_ENABLE,
.lock_delay = 0,
+ .freq_table = pll_e_freq_table,
+ .flags = TEGRA_PLL_FIXED,
+ .fixed_rate = 100000000,
};
-/* Peripheral clock registers */
-static struct tegra_clk_periph_regs periph_l_regs = {
- .enb_reg = CLK_OUT_ENB_L,
- .enb_set_reg = CLK_OUT_ENB_SET_L,
- .enb_clr_reg = CLK_OUT_ENB_CLR_L,
- .rst_reg = RST_DEVICES_L,
- .rst_set_reg = RST_DEVICES_SET_L,
- .rst_clr_reg = RST_DEVICES_CLR_L,
-};
-
-static struct tegra_clk_periph_regs periph_h_regs = {
- .enb_reg = CLK_OUT_ENB_H,
- .enb_set_reg = CLK_OUT_ENB_SET_H,
- .enb_clr_reg = CLK_OUT_ENB_CLR_H,
- .rst_reg = RST_DEVICES_H,
- .rst_set_reg = RST_DEVICES_SET_H,
- .rst_clr_reg = RST_DEVICES_CLR_H,
+static struct tegra_devclk devclks[] __initdata = {
+ { .con_id = "pll_c", .dt_id = TEGRA20_CLK_PLL_C },
+ { .con_id = "pll_c_out1", .dt_id = TEGRA20_CLK_PLL_C_OUT1 },
+ { .con_id = "pll_p", .dt_id = TEGRA20_CLK_PLL_P },
+ { .con_id = "pll_p_out1", .dt_id = TEGRA20_CLK_PLL_P_OUT1 },
+ { .con_id = "pll_p_out2", .dt_id = TEGRA20_CLK_PLL_P_OUT2 },
+ { .con_id = "pll_p_out3", .dt_id = TEGRA20_CLK_PLL_P_OUT3 },
+ { .con_id = "pll_p_out4", .dt_id = TEGRA20_CLK_PLL_P_OUT4 },
+ { .con_id = "pll_m", .dt_id = TEGRA20_CLK_PLL_M },
+ { .con_id = "pll_m_out1", .dt_id = TEGRA20_CLK_PLL_M_OUT1 },
+ { .con_id = "pll_x", .dt_id = TEGRA20_CLK_PLL_X },
+ { .con_id = "pll_u", .dt_id = TEGRA20_CLK_PLL_U },
+ { .con_id = "pll_d", .dt_id = TEGRA20_CLK_PLL_D },
+ { .con_id = "pll_d_out0", .dt_id = TEGRA20_CLK_PLL_D_OUT0 },
+ { .con_id = "pll_a", .dt_id = TEGRA20_CLK_PLL_A },
+ { .con_id = "pll_a_out0", .dt_id = TEGRA20_CLK_PLL_A_OUT0 },
+ { .con_id = "pll_e", .dt_id = TEGRA20_CLK_PLL_E },
+ { .con_id = "cclk", .dt_id = TEGRA20_CLK_CCLK },
+ { .con_id = "sclk", .dt_id = TEGRA20_CLK_SCLK },
+ { .con_id = "hclk", .dt_id = TEGRA20_CLK_HCLK },
+ { .con_id = "pclk", .dt_id = TEGRA20_CLK_PCLK },
+ { .con_id = "fuse", .dt_id = TEGRA20_CLK_FUSE },
+ { .con_id = "twd", .dt_id = TEGRA20_CLK_TWD },
+ { .con_id = "audio", .dt_id = TEGRA20_CLK_AUDIO },
+ { .con_id = "audio_2x", .dt_id = TEGRA20_CLK_AUDIO_2X },
+ { .dev_id = "tegra20-ac97", .dt_id = TEGRA20_CLK_AC97 },
+ { .dev_id = "tegra-apbdma", .dt_id = TEGRA20_CLK_APBDMA },
+ { .dev_id = "rtc-tegra", .dt_id = TEGRA20_CLK_RTC },
+ { .dev_id = "timer", .dt_id = TEGRA20_CLK_TIMER },
+ { .dev_id = "tegra-kbc", .dt_id = TEGRA20_CLK_KBC },
+ { .con_id = "csus", .dev_id = "tegra_camera", .dt_id = TEGRA20_CLK_CSUS },
+ { .con_id = "vcp", .dev_id = "tegra-avp", .dt_id = TEGRA20_CLK_VCP },
+ { .con_id = "bsea", .dev_id = "tegra-avp", .dt_id = TEGRA20_CLK_BSEA },
+ { .con_id = "bsev", .dev_id = "tegra-aes", .dt_id = TEGRA20_CLK_BSEV },
+ { .con_id = "emc", .dt_id = TEGRA20_CLK_EMC },
+ { .dev_id = "fsl-tegra-udc", .dt_id = TEGRA20_CLK_USBD },
+ { .dev_id = "tegra-ehci.1", .dt_id = TEGRA20_CLK_USB2 },
+ { .dev_id = "tegra-ehci.2", .dt_id = TEGRA20_CLK_USB3 },
+ { .dev_id = "dsi", .dt_id = TEGRA20_CLK_DSI },
+ { .con_id = "csi", .dev_id = "tegra_camera", .dt_id = TEGRA20_CLK_CSI },
+ { .con_id = "isp", .dev_id = "tegra_camera", .dt_id = TEGRA20_CLK_ISP },
+ { .con_id = "pex", .dt_id = TEGRA20_CLK_PEX },
+ { .con_id = "afi", .dt_id = TEGRA20_CLK_AFI },
+ { .con_id = "cdev1", .dt_id = TEGRA20_CLK_CDEV1 },
+ { .con_id = "cdev2", .dt_id = TEGRA20_CLK_CDEV2 },
+ { .con_id = "clk_32k", .dt_id = TEGRA20_CLK_CLK_32K },
+ { .con_id = "blink", .dt_id = TEGRA20_CLK_BLINK },
+ { .con_id = "clk_m", .dt_id = TEGRA20_CLK_CLK_M },
+ { .con_id = "pll_ref", .dt_id = TEGRA20_CLK_PLL_REF },
+ { .dev_id = "tegra20-i2s.0", .dt_id = TEGRA20_CLK_I2S1 },
+ { .dev_id = "tegra20-i2s.1", .dt_id = TEGRA20_CLK_I2S2 },
+ { .con_id = "spdif_out", .dev_id = "tegra20-spdif", .dt_id = TEGRA20_CLK_SPDIF_OUT },
+ { .con_id = "spdif_in", .dev_id = "tegra20-spdif", .dt_id = TEGRA20_CLK_SPDIF_IN },
+ { .dev_id = "spi_tegra.0", .dt_id = TEGRA20_CLK_SBC1 },
+ { .dev_id = "spi_tegra.1", .dt_id = TEGRA20_CLK_SBC2 },
+ { .dev_id = "spi_tegra.2", .dt_id = TEGRA20_CLK_SBC3 },
+ { .dev_id = "spi_tegra.3", .dt_id = TEGRA20_CLK_SBC4 },
+ { .dev_id = "spi", .dt_id = TEGRA20_CLK_SPI },
+ { .dev_id = "xio", .dt_id = TEGRA20_CLK_XIO },
+ { .dev_id = "twc", .dt_id = TEGRA20_CLK_TWC },
+ { .dev_id = "ide", .dt_id = TEGRA20_CLK_IDE },
+ { .dev_id = "tegra_nand", .dt_id = TEGRA20_CLK_NDFLASH },
+ { .dev_id = "vfir", .dt_id = TEGRA20_CLK_VFIR },
+ { .dev_id = "csite", .dt_id = TEGRA20_CLK_CSITE },
+ { .dev_id = "la", .dt_id = TEGRA20_CLK_LA },
+ { .dev_id = "tegra_w1", .dt_id = TEGRA20_CLK_OWR },
+ { .dev_id = "mipi", .dt_id = TEGRA20_CLK_MIPI },
+ { .dev_id = "vde", .dt_id = TEGRA20_CLK_VDE },
+ { .con_id = "vi", .dev_id = "tegra_camera", .dt_id = TEGRA20_CLK_VI },
+ { .dev_id = "epp", .dt_id = TEGRA20_CLK_EPP },
+ { .dev_id = "mpe", .dt_id = TEGRA20_CLK_MPE },
+ { .dev_id = "host1x", .dt_id = TEGRA20_CLK_HOST1X },
+ { .dev_id = "3d", .dt_id = TEGRA20_CLK_GR3D },
+ { .dev_id = "2d", .dt_id = TEGRA20_CLK_GR2D },
+ { .dev_id = "tegra-nor", .dt_id = TEGRA20_CLK_NOR },
+ { .dev_id = "sdhci-tegra.0", .dt_id = TEGRA20_CLK_SDMMC1 },
+ { .dev_id = "sdhci-tegra.1", .dt_id = TEGRA20_CLK_SDMMC2 },
+ { .dev_id = "sdhci-tegra.2", .dt_id = TEGRA20_CLK_SDMMC3 },
+ { .dev_id = "sdhci-tegra.3", .dt_id = TEGRA20_CLK_SDMMC4 },
+ { .dev_id = "cve", .dt_id = TEGRA20_CLK_CVE },
+ { .dev_id = "tvo", .dt_id = TEGRA20_CLK_TVO },
+ { .dev_id = "tvdac", .dt_id = TEGRA20_CLK_TVDAC },
+ { .con_id = "vi_sensor", .dev_id = "tegra_camera", .dt_id = TEGRA20_CLK_VI_SENSOR },
+ { .dev_id = "hdmi", .dt_id = TEGRA20_CLK_HDMI },
+ { .con_id = "div-clk", .dev_id = "tegra-i2c.0", .dt_id = TEGRA20_CLK_I2C1 },
+ { .con_id = "div-clk", .dev_id = "tegra-i2c.1", .dt_id = TEGRA20_CLK_I2C2 },
+ { .con_id = "div-clk", .dev_id = "tegra-i2c.2", .dt_id = TEGRA20_CLK_I2C3 },
+ { .con_id = "div-clk", .dev_id = "tegra-i2c.3", .dt_id = TEGRA20_CLK_DVC },
+ { .dev_id = "tegra-pwm", .dt_id = TEGRA20_CLK_PWM },
+ { .dev_id = "tegra_uart.0", .dt_id = TEGRA20_CLK_UARTA },
+ { .dev_id = "tegra_uart.1", .dt_id = TEGRA20_CLK_UARTB },
+ { .dev_id = "tegra_uart.2", .dt_id = TEGRA20_CLK_UARTC },
+ { .dev_id = "tegra_uart.3", .dt_id = TEGRA20_CLK_UARTD },
+ { .dev_id = "tegra_uart.4", .dt_id = TEGRA20_CLK_UARTE },
+ { .dev_id = "tegradc.0", .dt_id = TEGRA20_CLK_DISP1 },
+ { .dev_id = "tegradc.1", .dt_id = TEGRA20_CLK_DISP2 },
};
-static struct tegra_clk_periph_regs periph_u_regs = {
- .enb_reg = CLK_OUT_ENB_U,
- .enb_set_reg = CLK_OUT_ENB_SET_U,
- .enb_clr_reg = CLK_OUT_ENB_CLR_U,
- .rst_reg = RST_DEVICES_U,
- .rst_set_reg = RST_DEVICES_SET_U,
- .rst_clr_reg = RST_DEVICES_CLR_U,
+static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = {
+ [tegra_clk_spdif_out] = { .dt_id = TEGRA20_CLK_SPDIF_OUT, .present = true },
+ [tegra_clk_spdif_in] = { .dt_id = TEGRA20_CLK_SPDIF_IN, .present = true },
+ [tegra_clk_sdmmc1] = { .dt_id = TEGRA20_CLK_SDMMC1, .present = true },
+ [tegra_clk_sdmmc2] = { .dt_id = TEGRA20_CLK_SDMMC2, .present = true },
+ [tegra_clk_sdmmc3] = { .dt_id = TEGRA20_CLK_SDMMC3, .present = true },
+ [tegra_clk_sdmmc4] = { .dt_id = TEGRA20_CLK_SDMMC4, .present = true },
+ [tegra_clk_la] = { .dt_id = TEGRA20_CLK_LA, .present = true },
+ [tegra_clk_csite] = { .dt_id = TEGRA20_CLK_CSITE, .present = true },
+ [tegra_clk_vfir] = { .dt_id = TEGRA20_CLK_VFIR, .present = true },
+ [tegra_clk_mipi] = { .dt_id = TEGRA20_CLK_MIPI, .present = true },
+ [tegra_clk_nor] = { .dt_id = TEGRA20_CLK_NOR, .present = true },
+ [tegra_clk_rtc] = { .dt_id = TEGRA20_CLK_RTC, .present = true },
+ [tegra_clk_timer] = { .dt_id = TEGRA20_CLK_TIMER, .present = true },
+ [tegra_clk_kbc] = { .dt_id = TEGRA20_CLK_KBC, .present = true },
+ [tegra_clk_csus] = { .dt_id = TEGRA20_CLK_CSUS, .present = true },
+ [tegra_clk_vcp] = { .dt_id = TEGRA20_CLK_VCP, .present = true },
+ [tegra_clk_bsea] = { .dt_id = TEGRA20_CLK_BSEA, .present = true },
+ [tegra_clk_bsev] = { .dt_id = TEGRA20_CLK_BSEV, .present = true },
+ [tegra_clk_usbd] = { .dt_id = TEGRA20_CLK_USBD, .present = true },
+ [tegra_clk_usb2] = { .dt_id = TEGRA20_CLK_USB2, .present = true },
+ [tegra_clk_usb3] = { .dt_id = TEGRA20_CLK_USB3, .present = true },
+ [tegra_clk_csi] = { .dt_id = TEGRA20_CLK_CSI, .present = true },
+ [tegra_clk_isp] = { .dt_id = TEGRA20_CLK_ISP, .present = true },
+ [tegra_clk_clk_32k] = { .dt_id = TEGRA20_CLK_CLK_32K, .present = true },
+ [tegra_clk_blink] = { .dt_id = TEGRA20_CLK_BLINK, .present = true },
+ [tegra_clk_hclk] = { .dt_id = TEGRA20_CLK_HCLK, .present = true },
+ [tegra_clk_pclk] = { .dt_id = TEGRA20_CLK_PCLK, .present = true },
+ [tegra_clk_pll_p_out1] = { .dt_id = TEGRA20_CLK_PLL_P_OUT1, .present = true },
+ [tegra_clk_pll_p_out2] = { .dt_id = TEGRA20_CLK_PLL_P_OUT2, .present = true },
+ [tegra_clk_pll_p_out3] = { .dt_id = TEGRA20_CLK_PLL_P_OUT3, .present = true },
+ [tegra_clk_pll_p_out4] = { .dt_id = TEGRA20_CLK_PLL_P_OUT4, .present = true },
+ [tegra_clk_pll_p] = { .dt_id = TEGRA20_CLK_PLL_P, .present = true },
+ [tegra_clk_owr] = { .dt_id = TEGRA20_CLK_OWR, .present = true },
+ [tegra_clk_sbc1] = { .dt_id = TEGRA20_CLK_SBC1, .present = true },
+ [tegra_clk_sbc2] = { .dt_id = TEGRA20_CLK_SBC2, .present = true },
+ [tegra_clk_sbc3] = { .dt_id = TEGRA20_CLK_SBC3, .present = true },
+ [tegra_clk_sbc4] = { .dt_id = TEGRA20_CLK_SBC4, .present = true },
+ [tegra_clk_vde] = { .dt_id = TEGRA20_CLK_VDE, .present = true },
+ [tegra_clk_vi] = { .dt_id = TEGRA20_CLK_VI, .present = true },
+ [tegra_clk_epp] = { .dt_id = TEGRA20_CLK_EPP, .present = true },
+ [tegra_clk_mpe] = { .dt_id = TEGRA20_CLK_MPE, .present = true },
+ [tegra_clk_host1x] = { .dt_id = TEGRA20_CLK_HOST1X, .present = true },
+ [tegra_clk_gr2d] = { .dt_id = TEGRA20_CLK_GR2D, .present = true },
+ [tegra_clk_gr3d] = { .dt_id = TEGRA20_CLK_GR3D, .present = true },
+ [tegra_clk_ndflash] = { .dt_id = TEGRA20_CLK_NDFLASH, .present = true },
+ [tegra_clk_cve] = { .dt_id = TEGRA20_CLK_CVE, .present = true },
+ [tegra_clk_tvo] = { .dt_id = TEGRA20_CLK_TVO, .present = true },
+ [tegra_clk_tvdac] = { .dt_id = TEGRA20_CLK_TVDAC, .present = true },
+ [tegra_clk_vi_sensor] = { .dt_id = TEGRA20_CLK_VI_SENSOR, .present = true },
+ [tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true },
};
static unsigned long tegra20_clk_measure_input_freq(void)
@@ -577,10 +635,8 @@ static void tegra20_pll_init(void)
/* PLLC */
clk = tegra_clk_register_pll("pll_c", "pll_ref", clk_base, NULL, 0,
- 0, &pll_c_params, TEGRA_PLL_HAS_CPCON,
- pll_c_freq_table, NULL);
- clk_register_clkdev(clk, "pll_c", NULL);
- clks[pll_c] = clk;
+ &pll_c_params, NULL);
+ clks[TEGRA20_CLK_PLL_C] = clk;
/* PLLC_OUT1 */
clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
@@ -589,71 +645,13 @@ static void tegra20_pll_init(void)
clk = tegra_clk_register_pll_out("pll_c_out1", "pll_c_out1_div",
clk_base + PLLC_OUT, 1, 0, CLK_SET_RATE_PARENT,
0, NULL);
- clk_register_clkdev(clk, "pll_c_out1", NULL);
- clks[pll_c_out1] = clk;
-
- /* PLLP */
- clk = tegra_clk_register_pll("pll_p", "pll_ref", clk_base, NULL, 0,
- 216000000, &pll_p_params, TEGRA_PLL_FIXED |
- TEGRA_PLL_HAS_CPCON, pll_p_freq_table, NULL);
- clk_register_clkdev(clk, "pll_p", NULL);
- clks[pll_p] = clk;
-
- /* PLLP_OUT1 */
- clk = tegra_clk_register_divider("pll_p_out1_div", "pll_p",
- clk_base + PLLP_OUTA, 0,
- TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
- 8, 8, 1, &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out1", "pll_p_out1_div",
- clk_base + PLLP_OUTA, 1, 0,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out1", NULL);
- clks[pll_p_out1] = clk;
-
- /* PLLP_OUT2 */
- clk = tegra_clk_register_divider("pll_p_out2_div", "pll_p",
- clk_base + PLLP_OUTA, 0,
- TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
- 24, 8, 1, &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out2", "pll_p_out2_div",
- clk_base + PLLP_OUTA, 17, 16,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out2", NULL);
- clks[pll_p_out2] = clk;
-
- /* PLLP_OUT3 */
- clk = tegra_clk_register_divider("pll_p_out3_div", "pll_p",
- clk_base + PLLP_OUTB, 0,
- TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
- 8, 8, 1, &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out3", "pll_p_out3_div",
- clk_base + PLLP_OUTB, 1, 0,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out3", NULL);
- clks[pll_p_out3] = clk;
-
- /* PLLP_OUT4 */
- clk = tegra_clk_register_divider("pll_p_out4_div", "pll_p",
- clk_base + PLLP_OUTB, 0,
- TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
- 24, 8, 1, &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out4", "pll_p_out4_div",
- clk_base + PLLP_OUTB, 17, 16,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out4", NULL);
- clks[pll_p_out4] = clk;
+ clks[TEGRA20_CLK_PLL_C_OUT1] = clk;
/* PLLM */
clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, NULL,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, 0,
- &pll_m_params, TEGRA_PLL_HAS_CPCON,
- pll_m_freq_table, NULL);
- clk_register_clkdev(clk, "pll_m", NULL);
- clks[pll_m] = clk;
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+ &pll_m_params, NULL);
+ clks[TEGRA20_CLK_PLL_M] = clk;
/* PLLM_OUT1 */
clk = tegra_clk_register_divider("pll_m_out1_div", "pll_m",
@@ -662,42 +660,32 @@ static void tegra20_pll_init(void)
clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
CLK_SET_RATE_PARENT, 0, NULL);
- clk_register_clkdev(clk, "pll_m_out1", NULL);
- clks[pll_m_out1] = clk;
+ clks[TEGRA20_CLK_PLL_M_OUT1] = clk;
/* PLLX */
clk = tegra_clk_register_pll("pll_x", "pll_ref", clk_base, NULL, 0,
- 0, &pll_x_params, TEGRA_PLL_HAS_CPCON,
- pll_x_freq_table, NULL);
- clk_register_clkdev(clk, "pll_x", NULL);
- clks[pll_x] = clk;
+ &pll_x_params, NULL);
+ clks[TEGRA20_CLK_PLL_X] = clk;
/* PLLU */
clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, NULL, 0,
- 0, &pll_u_params, TEGRA_PLLU | TEGRA_PLL_HAS_CPCON,
- pll_u_freq_table, NULL);
- clk_register_clkdev(clk, "pll_u", NULL);
- clks[pll_u] = clk;
+ &pll_u_params, NULL);
+ clks[TEGRA20_CLK_PLL_U] = clk;
/* PLLD */
clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, NULL, 0,
- 0, &pll_d_params, TEGRA_PLL_HAS_CPCON,
- pll_d_freq_table, NULL);
- clk_register_clkdev(clk, "pll_d", NULL);
- clks[pll_d] = clk;
+ &pll_d_params, NULL);
+ clks[TEGRA20_CLK_PLL_D] = clk;
/* PLLD_OUT0 */
clk = clk_register_fixed_factor(NULL, "pll_d_out0", "pll_d",
CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "pll_d_out0", NULL);
- clks[pll_d_out0] = clk;
+ clks[TEGRA20_CLK_PLL_D_OUT0] = clk;
/* PLLA */
clk = tegra_clk_register_pll("pll_a", "pll_p_out1", clk_base, NULL, 0,
- 0, &pll_a_params, TEGRA_PLL_HAS_CPCON,
- pll_a_freq_table, NULL);
- clk_register_clkdev(clk, "pll_a", NULL);
- clks[pll_a] = clk;
+ &pll_a_params, NULL);
+ clks[TEGRA20_CLK_PLL_A] = clk;
/* PLLA_OUT0 */
clk = tegra_clk_register_divider("pll_a_out0_div", "pll_a",
@@ -706,15 +694,12 @@ static void tegra20_pll_init(void)
clk = tegra_clk_register_pll_out("pll_a_out0", "pll_a_out0_div",
clk_base + PLLA_OUT, 1, 0, CLK_IGNORE_UNUSED |
CLK_SET_RATE_PARENT, 0, NULL);
- clk_register_clkdev(clk, "pll_a_out0", NULL);
- clks[pll_a_out0] = clk;
+ clks[TEGRA20_CLK_PLL_A_OUT0] = clk;
/* PLLE */
clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, pmc_base,
- 0, 100000000, &pll_e_params,
- 0, pll_e_freq_table, NULL);
- clk_register_clkdev(clk, "pll_e", NULL);
- clks[pll_e] = clk;
+ 0, &pll_e_params, NULL);
+ clks[TEGRA20_CLK_PLL_E] = clk;
}
static const char *cclk_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
@@ -732,40 +717,17 @@ static void tegra20_super_clk_init(void)
clk = tegra_clk_register_super_mux("cclk", cclk_parents,
ARRAY_SIZE(cclk_parents), CLK_SET_RATE_PARENT,
clk_base + CCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
- clk_register_clkdev(clk, "cclk", NULL);
- clks[cclk] = clk;
+ clks[TEGRA20_CLK_CCLK] = clk;
/* SCLK */
clk = tegra_clk_register_super_mux("sclk", sclk_parents,
ARRAY_SIZE(sclk_parents), CLK_SET_RATE_PARENT,
clk_base + SCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
- clk_register_clkdev(clk, "sclk", NULL);
- clks[sclk] = clk;
-
- /* HCLK */
- clk = clk_register_divider(NULL, "hclk_div", "sclk", 0,
- clk_base + CLK_SYSTEM_RATE, 4, 2, 0,
- &sysrate_lock);
- clk = clk_register_gate(NULL, "hclk", "hclk_div", CLK_SET_RATE_PARENT,
- clk_base + CLK_SYSTEM_RATE, 7,
- CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
- clk_register_clkdev(clk, "hclk", NULL);
- clks[hclk] = clk;
-
- /* PCLK */
- clk = clk_register_divider(NULL, "pclk_div", "hclk", 0,
- clk_base + CLK_SYSTEM_RATE, 0, 2, 0,
- &sysrate_lock);
- clk = clk_register_gate(NULL, "pclk", "pclk_div", CLK_SET_RATE_PARENT,
- clk_base + CLK_SYSTEM_RATE, 3,
- CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
- clk_register_clkdev(clk, "pclk", NULL);
- clks[pclk] = clk;
+ clks[TEGRA20_CLK_SCLK] = clk;
/* twd */
clk = clk_register_fixed_factor(NULL, "twd", "cclk", 0, 1, 4);
- clk_register_clkdev(clk, "twd", NULL);
- clks[twd] = clk;
+ clks[TEGRA20_CLK_TWD] = clk;
}
static const char *audio_parents[] = {"spdif_in", "i2s1", "i2s2", "unused",
@@ -784,18 +746,16 @@ static void __init tegra20_audio_clk_init(void)
clk = clk_register_gate(NULL, "audio", "audio_mux", 0,
clk_base + AUDIO_SYNC_CLK, 4,
CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio", NULL);
- clks[audio] = clk;
+ clks[TEGRA20_CLK_AUDIO] = clk;
/* audio_2x */
clk = clk_register_fixed_factor(NULL, "audio_doubler", "audio",
CLK_SET_RATE_PARENT, 2, 1);
clk = tegra_clk_register_periph_gate("audio_2x", "audio_doubler",
TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 89, &periph_u_regs,
+ CLK_SET_RATE_PARENT, 89,
periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio_2x", NULL);
- clks[audio_2x] = clk;
+ clks[TEGRA20_CLK_AUDIO_2X] = clk;
}
@@ -803,68 +763,36 @@ static const char *i2s1_parents[] = {"pll_a_out0", "audio_2x", "pll_p",
"clk_m"};
static const char *i2s2_parents[] = {"pll_a_out0", "audio_2x", "pll_p",
"clk_m"};
-static const char *spdif_out_parents[] = {"pll_a_out0", "audio_2x", "pll_p",
- "clk_m"};
-static const char *spdif_in_parents[] = {"pll_p", "pll_c", "pll_m"};
static const char *pwm_parents[] = {"pll_p", "pll_c", "audio", "clk_m",
"clk_32k"};
static const char *mux_pllpcm_clkm[] = {"pll_p", "pll_c", "pll_m", "clk_m"};
-static const char *mux_pllmcpa[] = {"pll_m", "pll_c", "pll_c", "pll_a"};
static const char *mux_pllpdc_clkm[] = {"pll_p", "pll_d_out0", "pll_c",
"clk_m"};
static const char *mux_pllmcp_clkm[] = {"pll_m", "pll_c", "pll_p", "clk_m"};
static struct tegra_periph_init_data tegra_periph_clk_list[] = {
- TEGRA_INIT_DATA_MUX("i2s1", NULL, "tegra20-i2s.0", i2s1_parents, CLK_SOURCE_I2S1, 11, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s1),
- TEGRA_INIT_DATA_MUX("i2s2", NULL, "tegra20-i2s.1", i2s2_parents, CLK_SOURCE_I2S2, 18, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s2),
- TEGRA_INIT_DATA_MUX("spdif_out", "spdif_out", "tegra20-spdif", spdif_out_parents, CLK_SOURCE_SPDIF_OUT, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_out),
- TEGRA_INIT_DATA_MUX("spdif_in", "spdif_in", "tegra20-spdif", spdif_in_parents, CLK_SOURCE_SPDIF_IN, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_in),
- TEGRA_INIT_DATA_MUX("sbc1", NULL, "spi_tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SBC1, 41, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc1),
- TEGRA_INIT_DATA_MUX("sbc2", NULL, "spi_tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SBC2, 44, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc2),
- TEGRA_INIT_DATA_MUX("sbc3", NULL, "spi_tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SBC3, 46, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc3),
- TEGRA_INIT_DATA_MUX("sbc4", NULL, "spi_tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SBC4, 68, &periph_u_regs, TEGRA_PERIPH_ON_APB, sbc4),
- TEGRA_INIT_DATA_MUX("spi", NULL, "spi", mux_pllpcm_clkm, CLK_SOURCE_SPI, 43, &periph_h_regs, TEGRA_PERIPH_ON_APB, spi),
- TEGRA_INIT_DATA_MUX("xio", NULL, "xio", mux_pllpcm_clkm, CLK_SOURCE_XIO, 45, &periph_h_regs, 0, xio),
- TEGRA_INIT_DATA_MUX("twc", NULL, "twc", mux_pllpcm_clkm, CLK_SOURCE_TWC, 16, &periph_l_regs, TEGRA_PERIPH_ON_APB, twc),
- TEGRA_INIT_DATA_MUX("ide", NULL, "ide", mux_pllpcm_clkm, CLK_SOURCE_XIO, 25, &periph_l_regs, 0, ide),
- TEGRA_INIT_DATA_MUX("ndflash", NULL, "tegra_nand", mux_pllpcm_clkm, CLK_SOURCE_NDFLASH, 13, &periph_l_regs, 0, ndflash),
- TEGRA_INIT_DATA_MUX("vfir", NULL, "vfir", mux_pllpcm_clkm, CLK_SOURCE_VFIR, 7, &periph_l_regs, TEGRA_PERIPH_ON_APB, vfir),
- TEGRA_INIT_DATA_MUX("csite", NULL, "csite", mux_pllpcm_clkm, CLK_SOURCE_CSITE, 73, &periph_u_regs, 0, csite),
- TEGRA_INIT_DATA_MUX("la", NULL, "la", mux_pllpcm_clkm, CLK_SOURCE_LA, 76, &periph_u_regs, 0, la),
- TEGRA_INIT_DATA_MUX("owr", NULL, "tegra_w1", mux_pllpcm_clkm, CLK_SOURCE_OWR, 71, &periph_u_regs, TEGRA_PERIPH_ON_APB, owr),
- TEGRA_INIT_DATA_MUX("mipi", NULL, "mipi", mux_pllpcm_clkm, CLK_SOURCE_MIPI, 50, &periph_h_regs, TEGRA_PERIPH_ON_APB, mipi),
- TEGRA_INIT_DATA_MUX("vde", NULL, "vde", mux_pllpcm_clkm, CLK_SOURCE_VDE, 61, &periph_h_regs, 0, vde),
- TEGRA_INIT_DATA_MUX("vi", "vi", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI, 20, &periph_l_regs, 0, vi),
- TEGRA_INIT_DATA_MUX("epp", NULL, "epp", mux_pllmcpa, CLK_SOURCE_EPP, 19, &periph_l_regs, 0, epp),
- TEGRA_INIT_DATA_MUX("mpe", NULL, "mpe", mux_pllmcpa, CLK_SOURCE_MPE, 60, &periph_h_regs, 0, mpe),
- TEGRA_INIT_DATA_MUX("host1x", NULL, "host1x", mux_pllmcpa, CLK_SOURCE_HOST1X, 28, &periph_l_regs, 0, host1x),
- TEGRA_INIT_DATA_MUX("3d", NULL, "3d", mux_pllmcpa, CLK_SOURCE_3D, 24, &periph_l_regs, TEGRA_PERIPH_MANUAL_RESET, gr3d),
- TEGRA_INIT_DATA_MUX("2d", NULL, "2d", mux_pllmcpa, CLK_SOURCE_2D, 21, &periph_l_regs, 0, gr2d),
- TEGRA_INIT_DATA_MUX("nor", NULL, "tegra-nor", mux_pllpcm_clkm, CLK_SOURCE_NOR, 42, &periph_h_regs, 0, nor),
- TEGRA_INIT_DATA_MUX("sdmmc1", NULL, "sdhci-tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SDMMC1, 14, &periph_l_regs, 0, sdmmc1),
- TEGRA_INIT_DATA_MUX("sdmmc2", NULL, "sdhci-tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SDMMC2, 9, &periph_l_regs, 0, sdmmc2),
- TEGRA_INIT_DATA_MUX("sdmmc3", NULL, "sdhci-tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SDMMC3, 69, &periph_u_regs, 0, sdmmc3),
- TEGRA_INIT_DATA_MUX("sdmmc4", NULL, "sdhci-tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SDMMC4, 15, &periph_l_regs, 0, sdmmc4),
- TEGRA_INIT_DATA_MUX("cve", NULL, "cve", mux_pllpdc_clkm, CLK_SOURCE_CVE, 49, &periph_h_regs, 0, cve),
- TEGRA_INIT_DATA_MUX("tvo", NULL, "tvo", mux_pllpdc_clkm, CLK_SOURCE_TVO, 49, &periph_h_regs, 0, tvo),
- TEGRA_INIT_DATA_MUX("tvdac", NULL, "tvdac", mux_pllpdc_clkm, CLK_SOURCE_TVDAC, 53, &periph_h_regs, 0, tvdac),
- TEGRA_INIT_DATA_MUX("vi_sensor", "vi_sensor", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI_SENSOR, 20, &periph_l_regs, TEGRA_PERIPH_NO_RESET, vi_sensor),
- TEGRA_INIT_DATA_DIV16("i2c1", "div-clk", "tegra-i2c.0", mux_pllpcm_clkm, CLK_SOURCE_I2C1, 12, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2c1),
- TEGRA_INIT_DATA_DIV16("i2c2", "div-clk", "tegra-i2c.1", mux_pllpcm_clkm, CLK_SOURCE_I2C2, 54, &periph_h_regs, TEGRA_PERIPH_ON_APB, i2c2),
- TEGRA_INIT_DATA_DIV16("i2c3", "div-clk", "tegra-i2c.2", mux_pllpcm_clkm, CLK_SOURCE_I2C3, 67, &periph_u_regs, TEGRA_PERIPH_ON_APB, i2c3),
- TEGRA_INIT_DATA_DIV16("dvc", "div-clk", "tegra-i2c.3", mux_pllpcm_clkm, CLK_SOURCE_DVC, 47, &periph_h_regs, TEGRA_PERIPH_ON_APB, dvc),
- TEGRA_INIT_DATA_MUX("hdmi", NULL, "hdmi", mux_pllpdc_clkm, CLK_SOURCE_HDMI, 51, &periph_h_regs, 0, hdmi),
- TEGRA_INIT_DATA("pwm", NULL, "tegra-pwm", pwm_parents, CLK_SOURCE_PWM, 28, 3, 0, 0, 8, 1, 0, &periph_l_regs, 17, periph_clk_enb_refcnt, TEGRA_PERIPH_ON_APB, pwm),
+ TEGRA_INIT_DATA_MUX("i2s1", i2s1_parents, CLK_SOURCE_I2S1, 11, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_I2S1),
+ TEGRA_INIT_DATA_MUX("i2s2", i2s2_parents, CLK_SOURCE_I2S2, 18, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_I2S2),
+ TEGRA_INIT_DATA_MUX("spi", mux_pllpcm_clkm, CLK_SOURCE_SPI, 43, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_SPI),
+ TEGRA_INIT_DATA_MUX("xio", mux_pllpcm_clkm, CLK_SOURCE_XIO, 45, 0, TEGRA20_CLK_XIO),
+ TEGRA_INIT_DATA_MUX("twc", mux_pllpcm_clkm, CLK_SOURCE_TWC, 16, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_TWC),
+ TEGRA_INIT_DATA_MUX("ide", mux_pllpcm_clkm, CLK_SOURCE_XIO, 25, 0, TEGRA20_CLK_IDE),
+ TEGRA_INIT_DATA_DIV16("dvc", mux_pllpcm_clkm, CLK_SOURCE_DVC, 47, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_DVC),
+ TEGRA_INIT_DATA_DIV16("i2c1", mux_pllpcm_clkm, CLK_SOURCE_I2C1, 12, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_I2C1),
+ TEGRA_INIT_DATA_DIV16("i2c2", mux_pllpcm_clkm, CLK_SOURCE_I2C2, 54, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_I2C2),
+ TEGRA_INIT_DATA_DIV16("i2c3", mux_pllpcm_clkm, CLK_SOURCE_I2C3, 67, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_I2C3),
+ TEGRA_INIT_DATA_MUX("hdmi", mux_pllpdc_clkm, CLK_SOURCE_HDMI, 51, 0, TEGRA20_CLK_HDMI),
+ TEGRA_INIT_DATA("pwm", NULL, NULL, pwm_parents, CLK_SOURCE_PWM, 28, 3, 0, 0, 8, 1, 0, 17, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_PWM),
};
static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = {
- TEGRA_INIT_DATA_NODIV("uarta", NULL, "tegra_uart.0", mux_pllpcm_clkm, CLK_SOURCE_UARTA, 30, 2, 6, &periph_l_regs, TEGRA_PERIPH_ON_APB, uarta),
- TEGRA_INIT_DATA_NODIV("uartb", NULL, "tegra_uart.1", mux_pllpcm_clkm, CLK_SOURCE_UARTB, 30, 2, 7, &periph_l_regs, TEGRA_PERIPH_ON_APB, uartb),
- TEGRA_INIT_DATA_NODIV("uartc", NULL, "tegra_uart.2", mux_pllpcm_clkm, CLK_SOURCE_UARTC, 30, 2, 55, &periph_h_regs, TEGRA_PERIPH_ON_APB, uartc),
- TEGRA_INIT_DATA_NODIV("uartd", NULL, "tegra_uart.3", mux_pllpcm_clkm, CLK_SOURCE_UARTD, 30, 2, 65, &periph_u_regs, TEGRA_PERIPH_ON_APB, uartd),
- TEGRA_INIT_DATA_NODIV("uarte", NULL, "tegra_uart.4", mux_pllpcm_clkm, CLK_SOURCE_UARTE, 30, 2, 66, &periph_u_regs, TEGRA_PERIPH_ON_APB, uarte),
- TEGRA_INIT_DATA_NODIV("disp1", NULL, "tegradc.0", mux_pllpdc_clkm, CLK_SOURCE_DISP1, 30, 2, 27, &periph_l_regs, 0, disp1),
- TEGRA_INIT_DATA_NODIV("disp2", NULL, "tegradc.1", mux_pllpdc_clkm, CLK_SOURCE_DISP2, 30, 2, 26, &periph_l_regs, 0, disp2),
+ TEGRA_INIT_DATA_NODIV("uarta", mux_pllpcm_clkm, CLK_SOURCE_UARTA, 30, 2, 6, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_UARTA),
+ TEGRA_INIT_DATA_NODIV("uartb", mux_pllpcm_clkm, CLK_SOURCE_UARTB, 30, 2, 7, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_UARTB),
+ TEGRA_INIT_DATA_NODIV("uartc", mux_pllpcm_clkm, CLK_SOURCE_UARTC, 30, 2, 55, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_UARTC),
+ TEGRA_INIT_DATA_NODIV("uartd", mux_pllpcm_clkm, CLK_SOURCE_UARTD, 30, 2, 65, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_UARTD),
+ TEGRA_INIT_DATA_NODIV("uarte", mux_pllpcm_clkm, CLK_SOURCE_UARTE, 30, 2, 66, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_UARTE),
+ TEGRA_INIT_DATA_NODIV("disp1", mux_pllpdc_clkm, CLK_SOURCE_DISP1, 30, 2, 27, 0, TEGRA20_CLK_DISP1),
+ TEGRA_INIT_DATA_NODIV("disp2", mux_pllpdc_clkm, CLK_SOURCE_DISP2, 30, 2, 26, 0, TEGRA20_CLK_DISP2),
};
static void __init tegra20_periph_clk_init(void)
@@ -876,69 +804,13 @@ static void __init tegra20_periph_clk_init(void)
/* ac97 */
clk = tegra_clk_register_periph_gate("ac97", "pll_a_out0",
TEGRA_PERIPH_ON_APB,
- clk_base, 0, 3, &periph_l_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra20-ac97");
- clks[ac97] = clk;
+ clk_base, 0, 3, periph_clk_enb_refcnt);
+ clks[TEGRA20_CLK_AC97] = clk;
/* apbdma */
clk = tegra_clk_register_periph_gate("apbdma", "pclk", 0, clk_base,
- 0, 34, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra-apbdma");
- clks[apbdma] = clk;
-
- /* rtc */
- clk = tegra_clk_register_periph_gate("rtc", "clk_32k",
- TEGRA_PERIPH_NO_RESET,
- clk_base, 0, 4, &periph_l_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "rtc-tegra");
- clks[rtc] = clk;
-
- /* timer */
- clk = tegra_clk_register_periph_gate("timer", "clk_m", 0, clk_base,
- 0, 5, &periph_l_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "timer");
- clks[timer] = clk;
-
- /* kbc */
- clk = tegra_clk_register_periph_gate("kbc", "clk_32k",
- TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
- clk_base, 0, 36, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra-kbc");
- clks[kbc] = clk;
-
- /* csus */
- clk = tegra_clk_register_periph_gate("csus", "clk_m",
- TEGRA_PERIPH_NO_RESET,
- clk_base, 0, 92, &periph_u_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "csus", "tengra_camera");
- clks[csus] = clk;
-
- /* vcp */
- clk = tegra_clk_register_periph_gate("vcp", "clk_m", 0,
- clk_base, 0, 29, &periph_l_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "vcp", "tegra-avp");
- clks[vcp] = clk;
-
- /* bsea */
- clk = tegra_clk_register_periph_gate("bsea", "clk_m", 0,
- clk_base, 0, 62, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "bsea", "tegra-avp");
- clks[bsea] = clk;
-
- /* bsev */
- clk = tegra_clk_register_periph_gate("bsev", "clk_m", 0,
- clk_base, 0, 63, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "bsev", "tegra-aes");
- clks[bsev] = clk;
+ 0, 34, periph_clk_enb_refcnt);
+ clks[TEGRA20_CLK_APBDMA] = clk;
/* emc */
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
@@ -947,130 +819,52 @@ static void __init tegra20_periph_clk_init(void)
clk_base + CLK_SOURCE_EMC,
30, 2, 0, NULL);
clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
- 57, &periph_h_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "emc", NULL);
- clks[emc] = clk;
-
- /* usbd */
- clk = tegra_clk_register_periph_gate("usbd", "clk_m", 0, clk_base, 0,
- 22, &periph_l_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "fsl-tegra-udc");
- clks[usbd] = clk;
-
- /* usb2 */
- clk = tegra_clk_register_periph_gate("usb2", "clk_m", 0, clk_base, 0,
- 58, &periph_h_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra-ehci.1");
- clks[usb2] = clk;
-
- /* usb3 */
- clk = tegra_clk_register_periph_gate("usb3", "clk_m", 0, clk_base, 0,
- 59, &periph_h_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra-ehci.2");
- clks[usb3] = clk;
+ 57, periph_clk_enb_refcnt);
+ clks[TEGRA20_CLK_EMC] = clk;
/* dsi */
clk = tegra_clk_register_periph_gate("dsi", "pll_d", 0, clk_base, 0,
- 48, &periph_h_regs, periph_clk_enb_refcnt);
+ 48, periph_clk_enb_refcnt);
clk_register_clkdev(clk, NULL, "dsi");
- clks[dsi] = clk;
-
- /* csi */
- clk = tegra_clk_register_periph_gate("csi", "pll_p_out3", 0, clk_base,
- 0, 52, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "csi", "tegra_camera");
- clks[csi] = clk;
-
- /* isp */
- clk = tegra_clk_register_periph_gate("isp", "clk_m", 0, clk_base, 0, 23,
- &periph_l_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "isp", "tegra_camera");
- clks[isp] = clk;
+ clks[TEGRA20_CLK_DSI] = clk;
/* pex */
clk = tegra_clk_register_periph_gate("pex", "clk_m", 0, clk_base, 0, 70,
- &periph_u_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "pex", NULL);
- clks[pex] = clk;
-
- /* afi */
- clk = tegra_clk_register_periph_gate("afi", "clk_m", 0, clk_base, 0, 72,
- &periph_u_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "afi", NULL);
- clks[afi] = clk;
-
- /* pcie_xclk */
- clk = tegra_clk_register_periph_gate("pcie_xclk", "clk_m", 0, clk_base,
- 0, 74, &periph_u_regs,
periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "pcie_xclk", NULL);
- clks[pcie_xclk] = clk;
+ clks[TEGRA20_CLK_PEX] = clk;
/* cdev1 */
clk = clk_register_fixed_rate(NULL, "cdev1_fixed", NULL, CLK_IS_ROOT,
26000000);
clk = tegra_clk_register_periph_gate("cdev1", "cdev1_fixed", 0,
- clk_base, 0, 94, &periph_u_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "cdev1", NULL);
- clks[cdev1] = clk;
+ clk_base, 0, 94, periph_clk_enb_refcnt);
+ clks[TEGRA20_CLK_CDEV1] = clk;
/* cdev2 */
clk = clk_register_fixed_rate(NULL, "cdev2_fixed", NULL, CLK_IS_ROOT,
26000000);
clk = tegra_clk_register_periph_gate("cdev2", "cdev2_fixed", 0,
- clk_base, 0, 93, &periph_u_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "cdev2", NULL);
- clks[cdev2] = clk;
+ clk_base, 0, 93, periph_clk_enb_refcnt);
+ clks[TEGRA20_CLK_CDEV2] = clk;
for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
data = &tegra_periph_clk_list[i];
- clk = tegra_clk_register_periph(data->name, data->parent_names,
+ clk = tegra_clk_register_periph(data->name, data->p.parent_names,
data->num_parents, &data->periph,
clk_base, data->offset, data->flags);
- clk_register_clkdev(clk, data->con_id, data->dev_id);
clks[data->clk_id] = clk;
}
for (i = 0; i < ARRAY_SIZE(tegra_periph_nodiv_clk_list); i++) {
data = &tegra_periph_nodiv_clk_list[i];
clk = tegra_clk_register_periph_nodiv(data->name,
- data->parent_names,
+ data->p.parent_names,
data->num_parents, &data->periph,
clk_base, data->offset);
- clk_register_clkdev(clk, data->con_id, data->dev_id);
clks[data->clk_id] = clk;
}
-}
-
-
-static void __init tegra20_fixed_clk_init(void)
-{
- struct clk *clk;
-
- /* clk_32k */
- clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, CLK_IS_ROOT,
- 32768);
- clk_register_clkdev(clk, "clk_32k", NULL);
- clks[clk_32k] = clk;
-}
-
-static void __init tegra20_pmc_clk_init(void)
-{
- struct clk *clk;
- /* blink */
- writel_relaxed(0, pmc_base + PMC_BLINK_TIMER);
- clk = clk_register_gate(NULL, "blink_override", "clk_32k", 0,
- pmc_base + PMC_DPD_PADS_ORIDE,
- PMC_DPD_PADS_ORIDE_BLINK_ENB, 0, NULL);
- clk = clk_register_gate(NULL, "blink", "blink_override", 0,
- pmc_base + PMC_CTRL,
- PMC_CTRL_BLINK_ENB, 0, NULL);
- clk_register_clkdev(clk, "blink", NULL);
- clks[blink] = clk;
+ tegra_periph_clk_init(clk_base, pmc_base, tegra20_clks, &pll_p_params);
}
static void __init tegra20_osc_clk_init(void)
@@ -1084,15 +878,13 @@ static void __init tegra20_osc_clk_init(void)
/* clk_m */
clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT |
CLK_IGNORE_UNUSED, input_freq);
- clk_register_clkdev(clk, "clk_m", NULL);
- clks[clk_m] = clk;
+ clks[TEGRA20_CLK_CLK_M] = clk;
/* pll_ref */
pll_ref_div = tegra20_get_pll_ref_div();
clk = clk_register_fixed_factor(NULL, "pll_ref", "clk_m",
CLK_SET_RATE_PARENT, 1, pll_ref_div);
- clk_register_clkdev(clk, "pll_ref", NULL);
- clks[pll_ref] = clk;
+ clks[TEGRA20_CLK_PLL_REF] = clk;
}
/* Tegra20 CPU clock and reset control functions */
@@ -1226,49 +1018,49 @@ static struct tegra_cpu_car_ops tegra20_cpu_car_ops = {
};
static struct tegra_clk_init_table init_table[] __initdata = {
- {pll_p, clk_max, 216000000, 1},
- {pll_p_out1, clk_max, 28800000, 1},
- {pll_p_out2, clk_max, 48000000, 1},
- {pll_p_out3, clk_max, 72000000, 1},
- {pll_p_out4, clk_max, 24000000, 1},
- {pll_c, clk_max, 600000000, 1},
- {pll_c_out1, clk_max, 120000000, 1},
- {sclk, pll_c_out1, 0, 1},
- {hclk, clk_max, 0, 1},
- {pclk, clk_max, 60000000, 1},
- {csite, clk_max, 0, 1},
- {emc, clk_max, 0, 1},
- {cclk, clk_max, 0, 1},
- {uarta, pll_p, 0, 0},
- {uartb, pll_p, 0, 0},
- {uartc, pll_p, 0, 0},
- {uartd, pll_p, 0, 0},
- {uarte, pll_p, 0, 0},
- {pll_a, clk_max, 56448000, 1},
- {pll_a_out0, clk_max, 11289600, 1},
- {cdev1, clk_max, 0, 1},
- {blink, clk_max, 32768, 1},
- {i2s1, pll_a_out0, 11289600, 0},
- {i2s2, pll_a_out0, 11289600, 0},
- {sdmmc1, pll_p, 48000000, 0},
- {sdmmc3, pll_p, 48000000, 0},
- {sdmmc4, pll_p, 48000000, 0},
- {spi, pll_p, 20000000, 0},
- {sbc1, pll_p, 100000000, 0},
- {sbc2, pll_p, 100000000, 0},
- {sbc3, pll_p, 100000000, 0},
- {sbc4, pll_p, 100000000, 0},
- {host1x, pll_c, 150000000, 0},
- {disp1, pll_p, 600000000, 0},
- {disp2, pll_p, 600000000, 0},
- {gr2d, pll_c, 300000000, 0},
- {gr3d, pll_c, 300000000, 0},
- {clk_max, clk_max, 0, 0}, /* This MUST be the last entry */
+ {TEGRA20_CLK_PLL_P, TEGRA20_CLK_CLK_MAX, 216000000, 1},
+ {TEGRA20_CLK_PLL_P_OUT1, TEGRA20_CLK_CLK_MAX, 28800000, 1},
+ {TEGRA20_CLK_PLL_P_OUT2, TEGRA20_CLK_CLK_MAX, 48000000, 1},
+ {TEGRA20_CLK_PLL_P_OUT3, TEGRA20_CLK_CLK_MAX, 72000000, 1},
+ {TEGRA20_CLK_PLL_P_OUT4, TEGRA20_CLK_CLK_MAX, 24000000, 1},
+ {TEGRA20_CLK_PLL_C, TEGRA20_CLK_CLK_MAX, 600000000, 1},
+ {TEGRA20_CLK_PLL_C_OUT1, TEGRA20_CLK_CLK_MAX, 120000000, 1},
+ {TEGRA20_CLK_SCLK, TEGRA20_CLK_PLL_C_OUT1, 0, 1},
+ {TEGRA20_CLK_HCLK, TEGRA20_CLK_CLK_MAX, 0, 1},
+ {TEGRA20_CLK_PCLK, TEGRA20_CLK_CLK_MAX, 60000000, 1},
+ {TEGRA20_CLK_CSITE, TEGRA20_CLK_CLK_MAX, 0, 1},
+ {TEGRA20_CLK_EMC, TEGRA20_CLK_CLK_MAX, 0, 1},
+ {TEGRA20_CLK_CCLK, TEGRA20_CLK_CLK_MAX, 0, 1},
+ {TEGRA20_CLK_UARTA, TEGRA20_CLK_PLL_P, 0, 0},
+ {TEGRA20_CLK_UARTB, TEGRA20_CLK_PLL_P, 0, 0},
+ {TEGRA20_CLK_UARTC, TEGRA20_CLK_PLL_P, 0, 0},
+ {TEGRA20_CLK_UARTD, TEGRA20_CLK_PLL_P, 0, 0},
+ {TEGRA20_CLK_UARTE, TEGRA20_CLK_PLL_P, 0, 0},
+ {TEGRA20_CLK_PLL_A, TEGRA20_CLK_CLK_MAX, 56448000, 1},
+ {TEGRA20_CLK_PLL_A_OUT0, TEGRA20_CLK_CLK_MAX, 11289600, 1},
+ {TEGRA20_CLK_CDEV1, TEGRA20_CLK_CLK_MAX, 0, 1},
+ {TEGRA20_CLK_BLINK, TEGRA20_CLK_CLK_MAX, 32768, 1},
+ {TEGRA20_CLK_I2S1, TEGRA20_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA20_CLK_I2S2, TEGRA20_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA20_CLK_SDMMC1, TEGRA20_CLK_PLL_P, 48000000, 0},
+ {TEGRA20_CLK_SDMMC3, TEGRA20_CLK_PLL_P, 48000000, 0},
+ {TEGRA20_CLK_SDMMC4, TEGRA20_CLK_PLL_P, 48000000, 0},
+ {TEGRA20_CLK_SPI, TEGRA20_CLK_PLL_P, 20000000, 0},
+ {TEGRA20_CLK_SBC1, TEGRA20_CLK_PLL_P, 100000000, 0},
+ {TEGRA20_CLK_SBC2, TEGRA20_CLK_PLL_P, 100000000, 0},
+ {TEGRA20_CLK_SBC3, TEGRA20_CLK_PLL_P, 100000000, 0},
+ {TEGRA20_CLK_SBC4, TEGRA20_CLK_PLL_P, 100000000, 0},
+ {TEGRA20_CLK_HOST1X, TEGRA20_CLK_PLL_C, 150000000, 0},
+ {TEGRA20_CLK_DISP1, TEGRA20_CLK_PLL_P, 600000000, 0},
+ {TEGRA20_CLK_DISP2, TEGRA20_CLK_PLL_P, 600000000, 0},
+ {TEGRA20_CLK_GR2D, TEGRA20_CLK_PLL_C, 300000000, 0},
+ {TEGRA20_CLK_GR3D, TEGRA20_CLK_PLL_C, 300000000, 0},
+ {TEGRA20_CLK_CLK_MAX, TEGRA20_CLK_CLK_MAX, 0, 0}, /* This MUST be the last entry */
};
static void __init tegra20_clock_apply_init_table(void)
{
- tegra_init_from_table(init_table, clks, clk_max);
+ tegra_init_from_table(init_table, clks, TEGRA20_CLK_CLK_MAX);
}
/*
@@ -1277,11 +1069,11 @@ static void __init tegra20_clock_apply_init_table(void)
* table under two names.
*/
static struct tegra_clk_duplicate tegra_clk_duplicates[] = {
- TEGRA_CLK_DUPLICATE(usbd, "utmip-pad", NULL),
- TEGRA_CLK_DUPLICATE(usbd, "tegra-ehci.0", NULL),
- TEGRA_CLK_DUPLICATE(usbd, "tegra-otg", NULL),
- TEGRA_CLK_DUPLICATE(cclk, NULL, "cpu"),
- TEGRA_CLK_DUPLICATE(clk_max, NULL, NULL), /* Must be the last entry */
+ TEGRA_CLK_DUPLICATE(TEGRA20_CLK_USBD, "utmip-pad", NULL),
+ TEGRA_CLK_DUPLICATE(TEGRA20_CLK_USBD, "tegra-ehci.0", NULL),
+ TEGRA_CLK_DUPLICATE(TEGRA20_CLK_USBD, "tegra-otg", NULL),
+ TEGRA_CLK_DUPLICATE(TEGRA20_CLK_CCLK, NULL, "cpu"),
+ TEGRA_CLK_DUPLICATE(TEGRA20_CLK_CLK_MAX, NULL, NULL), /* Must be the last entry */
};
static const struct of_device_id pmc_match[] __initconst = {
@@ -1291,7 +1083,6 @@ static const struct of_device_id pmc_match[] __initconst = {
static void __init tegra20_clock_init(struct device_node *np)
{
- int i;
struct device_node *node;
clk_base = of_iomap(np, 0);
@@ -1312,30 +1103,24 @@ static void __init tegra20_clock_init(struct device_node *np)
BUG();
}
+ clks = tegra_clk_init(clk_base, TEGRA20_CLK_CLK_MAX,
+ TEGRA20_CLK_PERIPH_BANKS);
+ if (!clks)
+ return;
+
tegra20_osc_clk_init();
- tegra20_pmc_clk_init();
- tegra20_fixed_clk_init();
+ tegra_fixed_clk_init(tegra20_clks);
tegra20_pll_init();
tegra20_super_clk_init();
+ tegra_super_clk_gen4_init(clk_base, pmc_base, tegra20_clks, NULL);
tegra20_periph_clk_init();
tegra20_audio_clk_init();
+ tegra_pmc_clk_init(pmc_base, tegra20_clks);
+ tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA20_CLK_CLK_MAX);
- for (i = 0; i < ARRAY_SIZE(clks); i++) {
- if (IS_ERR(clks[i])) {
- pr_err("Tegra20 clk %d: register failed with %ld\n",
- i, PTR_ERR(clks[i]));
- BUG();
- }
- if (!clks[i])
- clks[i] = ERR_PTR(-EINVAL);
- }
-
- tegra_init_dup_clks(tegra_clk_duplicates, clks, clk_max);
-
- clk_data.clks = clks;
- clk_data.clk_num = ARRAY_SIZE(clks);
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ tegra_add_of_provider(np);
+ tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
tegra_clk_apply_init_table = tegra20_clock_apply_init_table;
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index dbe7c8003c5c..8b10c38b6e3c 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -23,42 +23,9 @@
#include <linux/of_address.h>
#include <linux/clk/tegra.h>
#include <linux/tegra-powergate.h>
-
+#include <dt-bindings/clock/tegra30-car.h>
#include "clk.h"
-
-#define RST_DEVICES_L 0x004
-#define RST_DEVICES_H 0x008
-#define RST_DEVICES_U 0x00c
-#define RST_DEVICES_V 0x358
-#define RST_DEVICES_W 0x35c
-#define RST_DEVICES_SET_L 0x300
-#define RST_DEVICES_CLR_L 0x304
-#define RST_DEVICES_SET_H 0x308
-#define RST_DEVICES_CLR_H 0x30c
-#define RST_DEVICES_SET_U 0x310
-#define RST_DEVICES_CLR_U 0x314
-#define RST_DEVICES_SET_V 0x430
-#define RST_DEVICES_CLR_V 0x434
-#define RST_DEVICES_SET_W 0x438
-#define RST_DEVICES_CLR_W 0x43c
-#define RST_DEVICES_NUM 5
-
-#define CLK_OUT_ENB_L 0x010
-#define CLK_OUT_ENB_H 0x014
-#define CLK_OUT_ENB_U 0x018
-#define CLK_OUT_ENB_V 0x360
-#define CLK_OUT_ENB_W 0x364
-#define CLK_OUT_ENB_SET_L 0x320
-#define CLK_OUT_ENB_CLR_L 0x324
-#define CLK_OUT_ENB_SET_H 0x328
-#define CLK_OUT_ENB_CLR_H 0x32c
-#define CLK_OUT_ENB_SET_U 0x330
-#define CLK_OUT_ENB_CLR_U 0x334
-#define CLK_OUT_ENB_SET_V 0x440
-#define CLK_OUT_ENB_CLR_V 0x444
-#define CLK_OUT_ENB_SET_W 0x448
-#define CLK_OUT_ENB_CLR_W 0x44c
-#define CLK_OUT_ENB_NUM 5
+#include "clk-id.h"
#define OSC_CTRL 0x50
#define OSC_CTRL_OSC_FREQ_MASK (0xF<<28)
@@ -92,6 +59,8 @@
#define SYSTEM_CLK_RATE 0x030
+#define TEGRA30_CLK_PERIPH_BANKS 5
+
#define PLLC_BASE 0x80
#define PLLC_MISC 0x8c
#define PLLM_BASE 0x90
@@ -132,88 +101,21 @@
#define AUDIO_SYNC_CLK_I2S4 0x4b0
#define AUDIO_SYNC_CLK_SPDIF 0x4b4
-#define PMC_CLK_OUT_CNTRL 0x1a8
-
-#define CLK_SOURCE_I2S0 0x1d8
-#define CLK_SOURCE_I2S1 0x100
-#define CLK_SOURCE_I2S2 0x104
-#define CLK_SOURCE_I2S3 0x3bc
-#define CLK_SOURCE_I2S4 0x3c0
#define CLK_SOURCE_SPDIF_OUT 0x108
-#define CLK_SOURCE_SPDIF_IN 0x10c
#define CLK_SOURCE_PWM 0x110
#define CLK_SOURCE_D_AUDIO 0x3d0
#define CLK_SOURCE_DAM0 0x3d8
#define CLK_SOURCE_DAM1 0x3dc
#define CLK_SOURCE_DAM2 0x3e0
-#define CLK_SOURCE_HDA 0x428
-#define CLK_SOURCE_HDA2CODEC_2X 0x3e4
-#define CLK_SOURCE_SBC1 0x134
-#define CLK_SOURCE_SBC2 0x118
-#define CLK_SOURCE_SBC3 0x11c
-#define CLK_SOURCE_SBC4 0x1b4
-#define CLK_SOURCE_SBC5 0x3c8
-#define CLK_SOURCE_SBC6 0x3cc
-#define CLK_SOURCE_SATA_OOB 0x420
-#define CLK_SOURCE_SATA 0x424
-#define CLK_SOURCE_NDFLASH 0x160
-#define CLK_SOURCE_NDSPEED 0x3f8
-#define CLK_SOURCE_VFIR 0x168
-#define CLK_SOURCE_SDMMC1 0x150
-#define CLK_SOURCE_SDMMC2 0x154
-#define CLK_SOURCE_SDMMC3 0x1bc
-#define CLK_SOURCE_SDMMC4 0x164
-#define CLK_SOURCE_VDE 0x1c8
-#define CLK_SOURCE_CSITE 0x1d4
-#define CLK_SOURCE_LA 0x1f8
-#define CLK_SOURCE_OWR 0x1cc
-#define CLK_SOURCE_NOR 0x1d0
-#define CLK_SOURCE_MIPI 0x174
-#define CLK_SOURCE_I2C1 0x124
-#define CLK_SOURCE_I2C2 0x198
-#define CLK_SOURCE_I2C3 0x1b8
-#define CLK_SOURCE_I2C4 0x3c4
-#define CLK_SOURCE_I2C5 0x128
-#define CLK_SOURCE_UARTA 0x178
-#define CLK_SOURCE_UARTB 0x17c
-#define CLK_SOURCE_UARTC 0x1a0
-#define CLK_SOURCE_UARTD 0x1c0
-#define CLK_SOURCE_UARTE 0x1c4
-#define CLK_SOURCE_VI 0x148
-#define CLK_SOURCE_VI_SENSOR 0x1a8
-#define CLK_SOURCE_3D 0x158
#define CLK_SOURCE_3D2 0x3b0
#define CLK_SOURCE_2D 0x15c
-#define CLK_SOURCE_EPP 0x16c
-#define CLK_SOURCE_MPE 0x170
-#define CLK_SOURCE_HOST1X 0x180
-#define CLK_SOURCE_CVE 0x140
-#define CLK_SOURCE_TVO 0x188
-#define CLK_SOURCE_DTV 0x1dc
#define CLK_SOURCE_HDMI 0x18c
-#define CLK_SOURCE_TVDAC 0x194
-#define CLK_SOURCE_DISP1 0x138
-#define CLK_SOURCE_DISP2 0x13c
#define CLK_SOURCE_DSIB 0xd0
-#define CLK_SOURCE_TSENSOR 0x3b8
-#define CLK_SOURCE_ACTMON 0x3e8
-#define CLK_SOURCE_EXTERN1 0x3ec
-#define CLK_SOURCE_EXTERN2 0x3f0
-#define CLK_SOURCE_EXTERN3 0x3f4
-#define CLK_SOURCE_I2CSLOW 0x3fc
#define CLK_SOURCE_SE 0x42c
-#define CLK_SOURCE_MSELECT 0x3b4
#define CLK_SOURCE_EMC 0x19c
#define AUDIO_SYNC_DOUBLER 0x49c
-#define PMC_CTRL 0
-#define PMC_CTRL_BLINK_ENB 7
-
-#define PMC_DPD_PADS_ORIDE 0x1c
-#define PMC_DPD_PADS_ORIDE_BLINK_ENB 20
-#define PMC_BLINK_TIMER 0x40
-
#define UTMIP_PLL_CFG2 0x488
#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6)
#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18)
@@ -266,89 +168,41 @@ static struct cpu_clk_suspend_context {
} tegra30_cpu_clk_sctx;
#endif
-static int periph_clk_enb_refcnt[CLK_OUT_ENB_NUM * 32];
-
static void __iomem *clk_base;
static void __iomem *pmc_base;
static unsigned long input_freq;
-static DEFINE_SPINLOCK(clk_doubler_lock);
-static DEFINE_SPINLOCK(clk_out_lock);
-static DEFINE_SPINLOCK(pll_div_lock);
static DEFINE_SPINLOCK(cml_lock);
static DEFINE_SPINLOCK(pll_d_lock);
-static DEFINE_SPINLOCK(sysrate_lock);
-
-#define TEGRA_INIT_DATA_MUX(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
- 30, 2, 0, 0, 8, 1, 0, _regs, _clk_num, \
- periph_clk_enb_refcnt, _gate_flags, _clk_id)
-
-#define TEGRA_INIT_DATA_DIV16(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
- 30, 2, 0, 0, 16, 0, TEGRA_DIVIDER_ROUND_UP, \
- _regs, _clk_num, periph_clk_enb_refcnt, \
- _gate_flags, _clk_id)
-
-#define TEGRA_INIT_DATA_MUX8(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
- 29, 3, 0, 0, 8, 1, 0, _regs, _clk_num, \
- periph_clk_enb_refcnt, _gate_flags, _clk_id)
-
-#define TEGRA_INIT_DATA_INT(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
- 30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs, \
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
- _clk_id)
-#define TEGRA_INIT_DATA_UART(_name, _con_id, _dev_id, _parents, _offset,\
- _clk_num, _regs, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
- 30, 2, 0, 0, 16, 1, TEGRA_DIVIDER_UART, _regs, \
- _clk_num, periph_clk_enb_refcnt, 0, _clk_id)
+#define TEGRA_INIT_DATA_MUX(_name, _parents, _offset, \
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \
+ 30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, \
+ _clk_num, _gate_flags, _clk_id)
+
+#define TEGRA_INIT_DATA_MUX8(_name, _parents, _offset, \
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \
+ 29, 3, 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, \
+ _clk_num, _gate_flags, _clk_id)
+
+#define TEGRA_INIT_DATA_INT(_name, _parents, _offset, \
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \
+ 30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_INT | \
+ TEGRA_DIVIDER_ROUND_UP, _clk_num, \
+ _gate_flags, _clk_id)
-#define TEGRA_INIT_DATA_NODIV(_name, _con_id, _dev_id, _parents, _offset, \
- _mux_shift, _mux_width, _clk_num, _regs, \
+#define TEGRA_INIT_DATA_NODIV(_name, _parents, _offset, \
+ _mux_shift, _mux_width, _clk_num, \
_gate_flags, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
- _mux_shift, _mux_width, 0, 0, 0, 0, 0, _regs, \
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \
+ _mux_shift, _mux_width, 0, 0, 0, 0, 0,\
+ _clk_num, _gate_flags, \
_clk_id)
-/*
- * IDs assigned here must be in sync with DT bindings definition
- * for Tegra30 clocks.
- */
-enum tegra30_clk {
- cpu, rtc = 4, timer, uarta, gpio = 8, sdmmc2, i2s1 = 11, i2c1, ndflash,
- sdmmc1, sdmmc4, pwm = 17, i2s2, epp, gr2d = 21, usbd, isp, gr3d,
- disp2 = 26, disp1, host1x, vcp, i2s0, cop_cache, mc, ahbdma, apbdma,
- kbc = 36, statmon, pmc, kfuse = 40, sbc1, nor, sbc2 = 44, sbc3 = 46,
- i2c5, dsia, mipi = 50, hdmi, csi, tvdac, i2c2, uartc, emc = 57, usb2,
- usb3, mpe, vde, bsea, bsev, speedo, uartd, uarte, i2c3, sbc4, sdmmc3,
- pcie, owr, afi, csite, pciex, avpucq, la, dtv = 79, ndspeed, i2cslow,
- dsib, irama = 84, iramb, iramc, iramd, cram2, audio_2x = 90, csus = 92,
- cdev2, cdev1, cpu_g = 96, cpu_lp, gr3d2, mselect, tsensor, i2s3, i2s4,
- i2c4, sbc5, sbc6, d_audio, apbif, dam0, dam1, dam2, hda2codec_2x,
- atomics, audio0_2x, audio1_2x, audio2_2x, audio3_2x, audio4_2x,
- spdif_2x, actmon, extern1, extern2, extern3, sata_oob, sata, hda,
- se = 127, hda2hdmi, sata_cold, uartb = 160, vfir, spdif_in, spdif_out,
- vi, vi_sensor, fuse, fuse_burn, cve, tvo, clk_32k, clk_m, clk_m_div2,
- clk_m_div4, pll_ref, pll_c, pll_c_out1, pll_m, pll_m_out1, pll_p,
- pll_p_out1, pll_p_out2, pll_p_out3, pll_p_out4, pll_a, pll_a_out0,
- pll_d, pll_d_out0, pll_d2, pll_d2_out0, pll_u, pll_x, pll_x_out0, pll_e,
- spdif_in_sync, i2s0_sync, i2s1_sync, i2s2_sync, i2s3_sync, i2s4_sync,
- vimclk_sync, audio0, audio1, audio2, audio3, audio4, spdif, clk_out_1,
- clk_out_2, clk_out_3, sclk, blink, cclk_g, cclk_lp, twd, cml0, cml1,
- hclk, pclk, clk_out_1_mux = 300, clk_max
-};
-
-static struct clk *clks[clk_max];
-static struct clk_onecell_data clk_data;
+static struct clk **clks;
/*
* Structure defining the fields for USB UTMI clocks Parameters.
@@ -564,6 +418,8 @@ static struct tegra_clk_pll_params pll_c_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_c_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
};
static struct div_nmp pllm_nmp = {
@@ -593,6 +449,9 @@ static struct tegra_clk_pll_params pll_m_params = {
.div_nmp = &pllm_nmp,
.pmc_divnm_reg = PMC_PLLM_WB0_OVERRIDE,
.pmc_divp_reg = PMC_PLLM_WB0_OVERRIDE,
+ .freq_table = pll_m_freq_table,
+ .flags = TEGRA_PLLM | TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_SET_DCCON | TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_params pll_p_params = {
@@ -607,6 +466,9 @@ static struct tegra_clk_pll_params pll_p_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_p_freq_table,
+ .flags = TEGRA_PLL_FIXED | TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
+ .fixed_rate = 408000000,
};
static struct tegra_clk_pll_params pll_a_params = {
@@ -621,6 +483,8 @@ static struct tegra_clk_pll_params pll_a_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_a_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_params pll_d_params = {
@@ -635,6 +499,10 @@ static struct tegra_clk_pll_params pll_d_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
+ .freq_table = pll_d_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
+ TEGRA_PLL_USE_LOCK,
+
};
static struct tegra_clk_pll_params pll_d2_params = {
@@ -649,6 +517,9 @@ static struct tegra_clk_pll_params pll_d2_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
+ .freq_table = pll_d_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
+ TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_params pll_u_params = {
@@ -664,6 +535,8 @@ static struct tegra_clk_pll_params pll_u_params = {
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
.pdiv_tohw = pllu_p,
+ .freq_table = pll_u_freq_table,
+ .flags = TEGRA_PLLU | TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON,
};
static struct tegra_clk_pll_params pll_x_params = {
@@ -678,6 +551,9 @@ static struct tegra_clk_pll_params pll_x_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_x_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_DCCON |
+ TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_params pll_e_params = {
@@ -692,116 +568,299 @@ static struct tegra_clk_pll_params pll_e_params = {
.lock_mask = PLLE_MISC_LOCK,
.lock_enable_bit_idx = PLLE_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_e_freq_table,
+ .flags = TEGRA_PLLE_CONFIGURE | TEGRA_PLL_FIXED,
+ .fixed_rate = 100000000,
};
-/* Peripheral clock registers */
-static struct tegra_clk_periph_regs periph_l_regs = {
- .enb_reg = CLK_OUT_ENB_L,
- .enb_set_reg = CLK_OUT_ENB_SET_L,
- .enb_clr_reg = CLK_OUT_ENB_CLR_L,
- .rst_reg = RST_DEVICES_L,
- .rst_set_reg = RST_DEVICES_SET_L,
- .rst_clr_reg = RST_DEVICES_CLR_L,
+static unsigned long tegra30_input_freq[] = {
+ [0] = 13000000,
+ [1] = 16800000,
+ [4] = 19200000,
+ [5] = 38400000,
+ [8] = 12000000,
+ [9] = 48000000,
+ [12] = 260000000,
};
-static struct tegra_clk_periph_regs periph_h_regs = {
- .enb_reg = CLK_OUT_ENB_H,
- .enb_set_reg = CLK_OUT_ENB_SET_H,
- .enb_clr_reg = CLK_OUT_ENB_CLR_H,
- .rst_reg = RST_DEVICES_H,
- .rst_set_reg = RST_DEVICES_SET_H,
- .rst_clr_reg = RST_DEVICES_CLR_H,
+static struct tegra_devclk devclks[] __initdata = {
+ { .con_id = "pll_c", .dt_id = TEGRA30_CLK_PLL_C },
+ { .con_id = "pll_c_out1", .dt_id = TEGRA30_CLK_PLL_C_OUT1 },
+ { .con_id = "pll_p", .dt_id = TEGRA30_CLK_PLL_P },
+ { .con_id = "pll_p_out1", .dt_id = TEGRA30_CLK_PLL_P_OUT1 },
+ { .con_id = "pll_p_out2", .dt_id = TEGRA30_CLK_PLL_P_OUT2 },
+ { .con_id = "pll_p_out3", .dt_id = TEGRA30_CLK_PLL_P_OUT3 },
+ { .con_id = "pll_p_out4", .dt_id = TEGRA30_CLK_PLL_P_OUT4 },
+ { .con_id = "pll_m", .dt_id = TEGRA30_CLK_PLL_M },
+ { .con_id = "pll_m_out1", .dt_id = TEGRA30_CLK_PLL_M_OUT1 },
+ { .con_id = "pll_x", .dt_id = TEGRA30_CLK_PLL_X },
+ { .con_id = "pll_x_out0", .dt_id = TEGRA30_CLK_PLL_X_OUT0 },
+ { .con_id = "pll_u", .dt_id = TEGRA30_CLK_PLL_U },
+ { .con_id = "pll_d", .dt_id = TEGRA30_CLK_PLL_D },
+ { .con_id = "pll_d_out0", .dt_id = TEGRA30_CLK_PLL_D_OUT0 },
+ { .con_id = "pll_d2", .dt_id = TEGRA30_CLK_PLL_D2 },
+ { .con_id = "pll_d2_out0", .dt_id = TEGRA30_CLK_PLL_D2_OUT0 },
+ { .con_id = "pll_a", .dt_id = TEGRA30_CLK_PLL_A },
+ { .con_id = "pll_a_out0", .dt_id = TEGRA30_CLK_PLL_A_OUT0 },
+ { .con_id = "pll_e", .dt_id = TEGRA30_CLK_PLL_E },
+ { .con_id = "spdif_in_sync", .dt_id = TEGRA30_CLK_SPDIF_IN_SYNC },
+ { .con_id = "i2s0_sync", .dt_id = TEGRA30_CLK_I2S0_SYNC },
+ { .con_id = "i2s1_sync", .dt_id = TEGRA30_CLK_I2S1_SYNC },
+ { .con_id = "i2s2_sync", .dt_id = TEGRA30_CLK_I2S2_SYNC },
+ { .con_id = "i2s3_sync", .dt_id = TEGRA30_CLK_I2S3_SYNC },
+ { .con_id = "i2s4_sync", .dt_id = TEGRA30_CLK_I2S4_SYNC },
+ { .con_id = "vimclk_sync", .dt_id = TEGRA30_CLK_VIMCLK_SYNC },
+ { .con_id = "audio0", .dt_id = TEGRA30_CLK_AUDIO0 },
+ { .con_id = "audio1", .dt_id = TEGRA30_CLK_AUDIO1 },
+ { .con_id = "audio2", .dt_id = TEGRA30_CLK_AUDIO2 },
+ { .con_id = "audio3", .dt_id = TEGRA30_CLK_AUDIO3 },
+ { .con_id = "audio4", .dt_id = TEGRA30_CLK_AUDIO4 },
+ { .con_id = "spdif", .dt_id = TEGRA30_CLK_SPDIF },
+ { .con_id = "audio0_2x", .dt_id = TEGRA30_CLK_AUDIO0_2X },
+ { .con_id = "audio1_2x", .dt_id = TEGRA30_CLK_AUDIO1_2X },
+ { .con_id = "audio2_2x", .dt_id = TEGRA30_CLK_AUDIO2_2X },
+ { .con_id = "audio3_2x", .dt_id = TEGRA30_CLK_AUDIO3_2X },
+ { .con_id = "audio4_2x", .dt_id = TEGRA30_CLK_AUDIO4_2X },
+ { .con_id = "spdif_2x", .dt_id = TEGRA30_CLK_SPDIF_2X },
+ { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA30_CLK_EXTERN1 },
+ { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA30_CLK_EXTERN2 },
+ { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA30_CLK_EXTERN3 },
+ { .con_id = "blink", .dt_id = TEGRA30_CLK_BLINK },
+ { .con_id = "cclk_g", .dt_id = TEGRA30_CLK_CCLK_G },
+ { .con_id = "cclk_lp", .dt_id = TEGRA30_CLK_CCLK_LP },
+ { .con_id = "sclk", .dt_id = TEGRA30_CLK_SCLK },
+ { .con_id = "hclk", .dt_id = TEGRA30_CLK_HCLK },
+ { .con_id = "pclk", .dt_id = TEGRA30_CLK_PCLK },
+ { .con_id = "twd", .dt_id = TEGRA30_CLK_TWD },
+ { .con_id = "emc", .dt_id = TEGRA30_CLK_EMC },
+ { .con_id = "clk_32k", .dt_id = TEGRA30_CLK_CLK_32K },
+ { .con_id = "clk_m_div2", .dt_id = TEGRA30_CLK_CLK_M_DIV2 },
+ { .con_id = "clk_m_div4", .dt_id = TEGRA30_CLK_CLK_M_DIV4 },
+ { .con_id = "cml0", .dt_id = TEGRA30_CLK_CML0 },
+ { .con_id = "cml1", .dt_id = TEGRA30_CLK_CML1 },
+ { .con_id = "clk_m", .dt_id = TEGRA30_CLK_CLK_M },
+ { .con_id = "pll_ref", .dt_id = TEGRA30_CLK_PLL_REF },
+ { .con_id = "csus", .dev_id = "tengra_camera", .dt_id = TEGRA30_CLK_CSUS },
+ { .con_id = "vcp", .dev_id = "tegra-avp", .dt_id = TEGRA30_CLK_VCP },
+ { .con_id = "bsea", .dev_id = "tegra-avp", .dt_id = TEGRA30_CLK_BSEA },
+ { .con_id = "bsev", .dev_id = "tegra-aes", .dt_id = TEGRA30_CLK_BSEV },
+ { .con_id = "dsia", .dev_id = "tegradc.0", .dt_id = TEGRA30_CLK_DSIA },
+ { .con_id = "csi", .dev_id = "tegra_camera", .dt_id = TEGRA30_CLK_CSI },
+ { .con_id = "isp", .dev_id = "tegra_camera", .dt_id = TEGRA30_CLK_ISP },
+ { .con_id = "pcie", .dev_id = "tegra-pcie", .dt_id = TEGRA30_CLK_PCIE },
+ { .con_id = "afi", .dev_id = "tegra-pcie", .dt_id = TEGRA30_CLK_AFI },
+ { .con_id = "fuse", .dt_id = TEGRA30_CLK_FUSE },
+ { .con_id = "fuse_burn", .dev_id = "fuse-tegra", .dt_id = TEGRA30_CLK_FUSE_BURN },
+ { .con_id = "apbif", .dev_id = "tegra30-ahub", .dt_id = TEGRA30_CLK_APBIF },
+ { .con_id = "hda2hdmi", .dev_id = "tegra30-hda", .dt_id = TEGRA30_CLK_HDA2HDMI },
+ { .dev_id = "tegra-apbdma", .dt_id = TEGRA30_CLK_APBDMA },
+ { .dev_id = "rtc-tegra", .dt_id = TEGRA30_CLK_RTC },
+ { .dev_id = "timer", .dt_id = TEGRA30_CLK_TIMER },
+ { .dev_id = "tegra-kbc", .dt_id = TEGRA30_CLK_KBC },
+ { .dev_id = "fsl-tegra-udc", .dt_id = TEGRA30_CLK_USBD },
+ { .dev_id = "tegra-ehci.1", .dt_id = TEGRA30_CLK_USB2 },
+ { .dev_id = "tegra-ehci.2", .dt_id = TEGRA30_CLK_USB2 },
+ { .dev_id = "kfuse-tegra", .dt_id = TEGRA30_CLK_KFUSE },
+ { .dev_id = "tegra_sata_cold", .dt_id = TEGRA30_CLK_SATA_COLD },
+ { .dev_id = "dtv", .dt_id = TEGRA30_CLK_DTV },
+ { .dev_id = "tegra30-i2s.0", .dt_id = TEGRA30_CLK_I2S0 },
+ { .dev_id = "tegra30-i2s.1", .dt_id = TEGRA30_CLK_I2S1 },
+ { .dev_id = "tegra30-i2s.2", .dt_id = TEGRA30_CLK_I2S2 },
+ { .dev_id = "tegra30-i2s.3", .dt_id = TEGRA30_CLK_I2S3 },
+ { .dev_id = "tegra30-i2s.4", .dt_id = TEGRA30_CLK_I2S4 },
+ { .con_id = "spdif_out", .dev_id = "tegra30-spdif", .dt_id = TEGRA30_CLK_SPDIF_OUT },
+ { .con_id = "spdif_in", .dev_id = "tegra30-spdif", .dt_id = TEGRA30_CLK_SPDIF_IN },
+ { .con_id = "d_audio", .dev_id = "tegra30-ahub", .dt_id = TEGRA30_CLK_D_AUDIO },
+ { .dev_id = "tegra30-dam.0", .dt_id = TEGRA30_CLK_DAM0 },
+ { .dev_id = "tegra30-dam.1", .dt_id = TEGRA30_CLK_DAM1 },
+ { .dev_id = "tegra30-dam.2", .dt_id = TEGRA30_CLK_DAM2 },
+ { .con_id = "hda", .dev_id = "tegra30-hda", .dt_id = TEGRA30_CLK_HDA },
+ { .con_id = "hda2codec", .dev_id = "tegra30-hda", .dt_id = TEGRA30_CLK_HDA2CODEC_2X },
+ { .dev_id = "spi_tegra.0", .dt_id = TEGRA30_CLK_SBC1 },
+ { .dev_id = "spi_tegra.1", .dt_id = TEGRA30_CLK_SBC2 },
+ { .dev_id = "spi_tegra.2", .dt_id = TEGRA30_CLK_SBC3 },
+ { .dev_id = "spi_tegra.3", .dt_id = TEGRA30_CLK_SBC4 },
+ { .dev_id = "spi_tegra.4", .dt_id = TEGRA30_CLK_SBC5 },
+ { .dev_id = "spi_tegra.5", .dt_id = TEGRA30_CLK_SBC6 },
+ { .dev_id = "tegra_sata_oob", .dt_id = TEGRA30_CLK_SATA_OOB },
+ { .dev_id = "tegra_sata", .dt_id = TEGRA30_CLK_SATA },
+ { .dev_id = "tegra_nand", .dt_id = TEGRA30_CLK_NDFLASH },
+ { .dev_id = "tegra_nand_speed", .dt_id = TEGRA30_CLK_NDSPEED },
+ { .dev_id = "vfir", .dt_id = TEGRA30_CLK_VFIR },
+ { .dev_id = "csite", .dt_id = TEGRA30_CLK_CSITE },
+ { .dev_id = "la", .dt_id = TEGRA30_CLK_LA },
+ { .dev_id = "tegra_w1", .dt_id = TEGRA30_CLK_OWR },
+ { .dev_id = "mipi", .dt_id = TEGRA30_CLK_MIPI },
+ { .dev_id = "tegra-tsensor", .dt_id = TEGRA30_CLK_TSENSOR },
+ { .dev_id = "i2cslow", .dt_id = TEGRA30_CLK_I2CSLOW },
+ { .dev_id = "vde", .dt_id = TEGRA30_CLK_VDE },
+ { .con_id = "vi", .dev_id = "tegra_camera", .dt_id = TEGRA30_CLK_VI },
+ { .dev_id = "epp", .dt_id = TEGRA30_CLK_EPP },
+ { .dev_id = "mpe", .dt_id = TEGRA30_CLK_MPE },
+ { .dev_id = "host1x", .dt_id = TEGRA30_CLK_HOST1X },
+ { .dev_id = "3d", .dt_id = TEGRA30_CLK_GR3D },
+ { .dev_id = "3d2", .dt_id = TEGRA30_CLK_GR3D2 },
+ { .dev_id = "2d", .dt_id = TEGRA30_CLK_GR2D },
+ { .dev_id = "se", .dt_id = TEGRA30_CLK_SE },
+ { .dev_id = "mselect", .dt_id = TEGRA30_CLK_MSELECT },
+ { .dev_id = "tegra-nor", .dt_id = TEGRA30_CLK_NOR },
+ { .dev_id = "sdhci-tegra.0", .dt_id = TEGRA30_CLK_SDMMC1 },
+ { .dev_id = "sdhci-tegra.1", .dt_id = TEGRA30_CLK_SDMMC2 },
+ { .dev_id = "sdhci-tegra.2", .dt_id = TEGRA30_CLK_SDMMC3 },
+ { .dev_id = "sdhci-tegra.3", .dt_id = TEGRA30_CLK_SDMMC4 },
+ { .dev_id = "cve", .dt_id = TEGRA30_CLK_CVE },
+ { .dev_id = "tvo", .dt_id = TEGRA30_CLK_TVO },
+ { .dev_id = "tvdac", .dt_id = TEGRA30_CLK_TVDAC },
+ { .dev_id = "actmon", .dt_id = TEGRA30_CLK_ACTMON },
+ { .con_id = "vi_sensor", .dev_id = "tegra_camera", .dt_id = TEGRA30_CLK_VI_SENSOR },
+ { .con_id = "div-clk", .dev_id = "tegra-i2c.0", .dt_id = TEGRA30_CLK_I2C1 },
+ { .con_id = "div-clk", .dev_id = "tegra-i2c.1", .dt_id = TEGRA30_CLK_I2C2 },
+ { .con_id = "div-clk", .dev_id = "tegra-i2c.2", .dt_id = TEGRA30_CLK_I2C3 },
+ { .con_id = "div-clk", .dev_id = "tegra-i2c.3", .dt_id = TEGRA30_CLK_I2C4 },
+ { .con_id = "div-clk", .dev_id = "tegra-i2c.4", .dt_id = TEGRA30_CLK_I2C5 },
+ { .dev_id = "tegra_uart.0", .dt_id = TEGRA30_CLK_UARTA },
+ { .dev_id = "tegra_uart.1", .dt_id = TEGRA30_CLK_UARTB },
+ { .dev_id = "tegra_uart.2", .dt_id = TEGRA30_CLK_UARTC },
+ { .dev_id = "tegra_uart.3", .dt_id = TEGRA30_CLK_UARTD },
+ { .dev_id = "tegra_uart.4", .dt_id = TEGRA30_CLK_UARTE },
+ { .dev_id = "hdmi", .dt_id = TEGRA30_CLK_HDMI },
+ { .dev_id = "extern1", .dt_id = TEGRA30_CLK_EXTERN1 },
+ { .dev_id = "extern2", .dt_id = TEGRA30_CLK_EXTERN2 },
+ { .dev_id = "extern3", .dt_id = TEGRA30_CLK_EXTERN3 },
+ { .dev_id = "pwm", .dt_id = TEGRA30_CLK_PWM },
+ { .dev_id = "tegradc.0", .dt_id = TEGRA30_CLK_DISP1 },
+ { .dev_id = "tegradc.1", .dt_id = TEGRA30_CLK_DISP2 },
+ { .dev_id = "tegradc.1", .dt_id = TEGRA30_CLK_DSIB },
};
-static struct tegra_clk_periph_regs periph_u_regs = {
- .enb_reg = CLK_OUT_ENB_U,
- .enb_set_reg = CLK_OUT_ENB_SET_U,
- .enb_clr_reg = CLK_OUT_ENB_CLR_U,
- .rst_reg = RST_DEVICES_U,
- .rst_set_reg = RST_DEVICES_SET_U,
- .rst_clr_reg = RST_DEVICES_CLR_U,
-};
+static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = {
+ [tegra_clk_clk_32k] = { .dt_id = TEGRA30_CLK_CLK_32K, .present = true },
+ [tegra_clk_clk_m] = { .dt_id = TEGRA30_CLK_CLK_M, .present = true },
+ [tegra_clk_clk_m_div2] = { .dt_id = TEGRA30_CLK_CLK_M_DIV2, .present = true },
+ [tegra_clk_clk_m_div4] = { .dt_id = TEGRA30_CLK_CLK_M_DIV4, .present = true },
+ [tegra_clk_pll_ref] = { .dt_id = TEGRA30_CLK_PLL_REF, .present = true },
+ [tegra_clk_spdif_in_sync] = { .dt_id = TEGRA30_CLK_SPDIF_IN_SYNC, .present = true },
+ [tegra_clk_i2s0_sync] = { .dt_id = TEGRA30_CLK_I2S0_SYNC, .present = true },
+ [tegra_clk_i2s1_sync] = { .dt_id = TEGRA30_CLK_I2S1_SYNC, .present = true },
+ [tegra_clk_i2s2_sync] = { .dt_id = TEGRA30_CLK_I2S2_SYNC, .present = true },
+ [tegra_clk_i2s3_sync] = { .dt_id = TEGRA30_CLK_I2S3_SYNC, .present = true },
+ [tegra_clk_i2s4_sync] = { .dt_id = TEGRA30_CLK_I2S4_SYNC, .present = true },
+ [tegra_clk_vimclk_sync] = { .dt_id = TEGRA30_CLK_VIMCLK_SYNC, .present = true },
+ [tegra_clk_audio0] = { .dt_id = TEGRA30_CLK_AUDIO0, .present = true },
+ [tegra_clk_audio1] = { .dt_id = TEGRA30_CLK_AUDIO1, .present = true },
+ [tegra_clk_audio2] = { .dt_id = TEGRA30_CLK_AUDIO2, .present = true },
+ [tegra_clk_audio3] = { .dt_id = TEGRA30_CLK_AUDIO3, .present = true },
+ [tegra_clk_audio4] = { .dt_id = TEGRA30_CLK_AUDIO4, .present = true },
+ [tegra_clk_spdif] = { .dt_id = TEGRA30_CLK_SPDIF, .present = true },
+ [tegra_clk_audio0_mux] = { .dt_id = TEGRA30_CLK_AUDIO0_MUX, .present = true },
+ [tegra_clk_audio1_mux] = { .dt_id = TEGRA30_CLK_AUDIO1_MUX, .present = true },
+ [tegra_clk_audio2_mux] = { .dt_id = TEGRA30_CLK_AUDIO2_MUX, .present = true },
+ [tegra_clk_audio3_mux] = { .dt_id = TEGRA30_CLK_AUDIO3_MUX, .present = true },
+ [tegra_clk_audio4_mux] = { .dt_id = TEGRA30_CLK_AUDIO4_MUX, .present = true },
+ [tegra_clk_spdif_mux] = { .dt_id = TEGRA30_CLK_SPDIF_MUX, .present = true },
+ [tegra_clk_audio0_2x] = { .dt_id = TEGRA30_CLK_AUDIO0_2X, .present = true },
+ [tegra_clk_audio1_2x] = { .dt_id = TEGRA30_CLK_AUDIO1_2X, .present = true },
+ [tegra_clk_audio2_2x] = { .dt_id = TEGRA30_CLK_AUDIO2_2X, .present = true },
+ [tegra_clk_audio3_2x] = { .dt_id = TEGRA30_CLK_AUDIO3_2X, .present = true },
+ [tegra_clk_audio4_2x] = { .dt_id = TEGRA30_CLK_AUDIO4_2X, .present = true },
+ [tegra_clk_spdif_2x] = { .dt_id = TEGRA30_CLK_SPDIF_2X, .present = true },
+ [tegra_clk_clk_out_1] = { .dt_id = TEGRA30_CLK_CLK_OUT_1, .present = true },
+ [tegra_clk_clk_out_2] = { .dt_id = TEGRA30_CLK_CLK_OUT_2, .present = true },
+ [tegra_clk_clk_out_3] = { .dt_id = TEGRA30_CLK_CLK_OUT_3, .present = true },
+ [tegra_clk_blink] = { .dt_id = TEGRA30_CLK_BLINK, .present = true },
+ [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA30_CLK_CLK_OUT_1_MUX, .present = true },
+ [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA30_CLK_CLK_OUT_2_MUX, .present = true },
+ [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA30_CLK_CLK_OUT_3_MUX, .present = true },
+ [tegra_clk_hclk] = { .dt_id = TEGRA30_CLK_HCLK, .present = true },
+ [tegra_clk_pclk] = { .dt_id = TEGRA30_CLK_PCLK, .present = true },
+ [tegra_clk_i2s0] = { .dt_id = TEGRA30_CLK_I2S0, .present = true },
+ [tegra_clk_i2s1] = { .dt_id = TEGRA30_CLK_I2S1, .present = true },
+ [tegra_clk_i2s2] = { .dt_id = TEGRA30_CLK_I2S2, .present = true },
+ [tegra_clk_i2s3] = { .dt_id = TEGRA30_CLK_I2S3, .present = true },
+ [tegra_clk_i2s4] = { .dt_id = TEGRA30_CLK_I2S4, .present = true },
+ [tegra_clk_spdif_in] = { .dt_id = TEGRA30_CLK_SPDIF_IN, .present = true },
+ [tegra_clk_hda] = { .dt_id = TEGRA30_CLK_HDA, .present = true },
+ [tegra_clk_hda2codec_2x] = { .dt_id = TEGRA30_CLK_HDA2CODEC_2X, .present = true },
+ [tegra_clk_sbc1] = { .dt_id = TEGRA30_CLK_SBC1, .present = true },
+ [tegra_clk_sbc2] = { .dt_id = TEGRA30_CLK_SBC2, .present = true },
+ [tegra_clk_sbc3] = { .dt_id = TEGRA30_CLK_SBC3, .present = true },
+ [tegra_clk_sbc4] = { .dt_id = TEGRA30_CLK_SBC4, .present = true },
+ [tegra_clk_sbc5] = { .dt_id = TEGRA30_CLK_SBC5, .present = true },
+ [tegra_clk_sbc6] = { .dt_id = TEGRA30_CLK_SBC6, .present = true },
+ [tegra_clk_ndflash] = { .dt_id = TEGRA30_CLK_NDFLASH, .present = true },
+ [tegra_clk_ndspeed] = { .dt_id = TEGRA30_CLK_NDSPEED, .present = true },
+ [tegra_clk_vfir] = { .dt_id = TEGRA30_CLK_VFIR, .present = true },
+ [tegra_clk_la] = { .dt_id = TEGRA30_CLK_LA, .present = true },
+ [tegra_clk_csite] = { .dt_id = TEGRA30_CLK_CSITE, .present = true },
+ [tegra_clk_owr] = { .dt_id = TEGRA30_CLK_OWR, .present = true },
+ [tegra_clk_mipi] = { .dt_id = TEGRA30_CLK_MIPI, .present = true },
+ [tegra_clk_tsensor] = { .dt_id = TEGRA30_CLK_TSENSOR, .present = true },
+ [tegra_clk_i2cslow] = { .dt_id = TEGRA30_CLK_I2CSLOW, .present = true },
+ [tegra_clk_vde] = { .dt_id = TEGRA30_CLK_VDE, .present = true },
+ [tegra_clk_vi] = { .dt_id = TEGRA30_CLK_VI, .present = true },
+ [tegra_clk_epp] = { .dt_id = TEGRA30_CLK_EPP, .present = true },
+ [tegra_clk_mpe] = { .dt_id = TEGRA30_CLK_MPE, .present = true },
+ [tegra_clk_host1x] = { .dt_id = TEGRA30_CLK_HOST1X, .present = true },
+ [tegra_clk_gr2d] = { .dt_id = TEGRA30_CLK_GR2D, .present = true },
+ [tegra_clk_gr3d] = { .dt_id = TEGRA30_CLK_GR3D, .present = true },
+ [tegra_clk_mselect] = { .dt_id = TEGRA30_CLK_MSELECT, .present = true },
+ [tegra_clk_nor] = { .dt_id = TEGRA30_CLK_NOR, .present = true },
+ [tegra_clk_sdmmc1] = { .dt_id = TEGRA30_CLK_SDMMC1, .present = true },
+ [tegra_clk_sdmmc2] = { .dt_id = TEGRA30_CLK_SDMMC2, .present = true },
+ [tegra_clk_sdmmc3] = { .dt_id = TEGRA30_CLK_SDMMC3, .present = true },
+ [tegra_clk_sdmmc4] = { .dt_id = TEGRA30_CLK_SDMMC4, .present = true },
+ [tegra_clk_cve] = { .dt_id = TEGRA30_CLK_CVE, .present = true },
+ [tegra_clk_tvo] = { .dt_id = TEGRA30_CLK_TVO, .present = true },
+ [tegra_clk_tvdac] = { .dt_id = TEGRA30_CLK_TVDAC, .present = true },
+ [tegra_clk_actmon] = { .dt_id = TEGRA30_CLK_ACTMON, .present = true },
+ [tegra_clk_vi_sensor] = { .dt_id = TEGRA30_CLK_VI_SENSOR, .present = true },
+ [tegra_clk_i2c1] = { .dt_id = TEGRA30_CLK_I2C1, .present = true },
+ [tegra_clk_i2c2] = { .dt_id = TEGRA30_CLK_I2C2, .present = true },
+ [tegra_clk_i2c3] = { .dt_id = TEGRA30_CLK_I2C3, .present = true },
+ [tegra_clk_i2c4] = { .dt_id = TEGRA30_CLK_I2C4, .present = true },
+ [tegra_clk_i2c5] = { .dt_id = TEGRA30_CLK_I2C5, .present = true },
+ [tegra_clk_uarta] = { .dt_id = TEGRA30_CLK_UARTA, .present = true },
+ [tegra_clk_uartb] = { .dt_id = TEGRA30_CLK_UARTB, .present = true },
+ [tegra_clk_uartc] = { .dt_id = TEGRA30_CLK_UARTC, .present = true },
+ [tegra_clk_uartd] = { .dt_id = TEGRA30_CLK_UARTD, .present = true },
+ [tegra_clk_uarte] = { .dt_id = TEGRA30_CLK_UARTE, .present = true },
+ [tegra_clk_extern1] = { .dt_id = TEGRA30_CLK_EXTERN1, .present = true },
+ [tegra_clk_extern2] = { .dt_id = TEGRA30_CLK_EXTERN2, .present = true },
+ [tegra_clk_extern3] = { .dt_id = TEGRA30_CLK_EXTERN3, .present = true },
+ [tegra_clk_disp1] = { .dt_id = TEGRA30_CLK_DISP1, .present = true },
+ [tegra_clk_disp2] = { .dt_id = TEGRA30_CLK_DISP2, .present = true },
+ [tegra_clk_apbdma] = { .dt_id = TEGRA30_CLK_APBDMA, .present = true },
+ [tegra_clk_rtc] = { .dt_id = TEGRA30_CLK_RTC, .present = true },
+ [tegra_clk_timer] = { .dt_id = TEGRA30_CLK_TIMER, .present = true },
+ [tegra_clk_kbc] = { .dt_id = TEGRA30_CLK_KBC, .present = true },
+ [tegra_clk_csus] = { .dt_id = TEGRA30_CLK_CSUS, .present = true },
+ [tegra_clk_vcp] = { .dt_id = TEGRA30_CLK_VCP, .present = true },
+ [tegra_clk_bsea] = { .dt_id = TEGRA30_CLK_BSEA, .present = true },
+ [tegra_clk_bsev] = { .dt_id = TEGRA30_CLK_BSEV, .present = true },
+ [tegra_clk_usbd] = { .dt_id = TEGRA30_CLK_USBD, .present = true },
+ [tegra_clk_usb2] = { .dt_id = TEGRA30_CLK_USB2, .present = true },
+ [tegra_clk_usb3] = { .dt_id = TEGRA30_CLK_USB3, .present = true },
+ [tegra_clk_csi] = { .dt_id = TEGRA30_CLK_CSI, .present = true },
+ [tegra_clk_isp] = { .dt_id = TEGRA30_CLK_ISP, .present = true },
+ [tegra_clk_kfuse] = { .dt_id = TEGRA30_CLK_KFUSE, .present = true },
+ [tegra_clk_fuse] = { .dt_id = TEGRA30_CLK_FUSE, .present = true },
+ [tegra_clk_fuse_burn] = { .dt_id = TEGRA30_CLK_FUSE_BURN, .present = true },
+ [tegra_clk_apbif] = { .dt_id = TEGRA30_CLK_APBIF, .present = true },
+ [tegra_clk_hda2hdmi] = { .dt_id = TEGRA30_CLK_HDA2HDMI, .present = true },
+ [tegra_clk_sata_cold] = { .dt_id = TEGRA30_CLK_SATA_COLD, .present = true },
+ [tegra_clk_sata_oob] = { .dt_id = TEGRA30_CLK_SATA_OOB, .present = true },
+ [tegra_clk_sata] = { .dt_id = TEGRA30_CLK_SATA, .present = true },
+ [tegra_clk_dtv] = { .dt_id = TEGRA30_CLK_DTV, .present = true },
+ [tegra_clk_pll_p] = { .dt_id = TEGRA30_CLK_PLL_P, .present = true },
+ [tegra_clk_pll_p_out1] = { .dt_id = TEGRA30_CLK_PLL_P_OUT1, .present = true },
+ [tegra_clk_pll_p_out2] = { .dt_id = TEGRA30_CLK_PLL_P_OUT2, .present = true },
+ [tegra_clk_pll_p_out3] = { .dt_id = TEGRA30_CLK_PLL_P_OUT3, .present = true },
+ [tegra_clk_pll_p_out4] = { .dt_id = TEGRA30_CLK_PLL_P_OUT4, .present = true },
+ [tegra_clk_pll_a] = { .dt_id = TEGRA30_CLK_PLL_A, .present = true },
+ [tegra_clk_pll_a_out0] = { .dt_id = TEGRA30_CLK_PLL_A_OUT0, .present = true },
-static struct tegra_clk_periph_regs periph_v_regs = {
- .enb_reg = CLK_OUT_ENB_V,
- .enb_set_reg = CLK_OUT_ENB_SET_V,
- .enb_clr_reg = CLK_OUT_ENB_CLR_V,
- .rst_reg = RST_DEVICES_V,
- .rst_set_reg = RST_DEVICES_SET_V,
- .rst_clr_reg = RST_DEVICES_CLR_V,
};
-static struct tegra_clk_periph_regs periph_w_regs = {
- .enb_reg = CLK_OUT_ENB_W,
- .enb_set_reg = CLK_OUT_ENB_SET_W,
- .enb_clr_reg = CLK_OUT_ENB_CLR_W,
- .rst_reg = RST_DEVICES_W,
- .rst_set_reg = RST_DEVICES_SET_W,
- .rst_clr_reg = RST_DEVICES_CLR_W,
-};
-
-static void tegra30_clk_measure_input_freq(void)
-{
- u32 osc_ctrl = readl_relaxed(clk_base + OSC_CTRL);
- u32 auto_clk_control = osc_ctrl & OSC_CTRL_OSC_FREQ_MASK;
- u32 pll_ref_div = osc_ctrl & OSC_CTRL_PLL_REF_DIV_MASK;
-
- switch (auto_clk_control) {
- case OSC_CTRL_OSC_FREQ_12MHZ:
- BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
- input_freq = 12000000;
- break;
- case OSC_CTRL_OSC_FREQ_13MHZ:
- BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
- input_freq = 13000000;
- break;
- case OSC_CTRL_OSC_FREQ_19_2MHZ:
- BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
- input_freq = 19200000;
- break;
- case OSC_CTRL_OSC_FREQ_26MHZ:
- BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
- input_freq = 26000000;
- break;
- case OSC_CTRL_OSC_FREQ_16_8MHZ:
- BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
- input_freq = 16800000;
- break;
- case OSC_CTRL_OSC_FREQ_38_4MHZ:
- BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_2);
- input_freq = 38400000;
- break;
- case OSC_CTRL_OSC_FREQ_48MHZ:
- BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_4);
- input_freq = 48000000;
- break;
- default:
- pr_err("Unexpected auto clock control value %d",
- auto_clk_control);
- BUG();
- return;
- }
-}
-
-static unsigned int tegra30_get_pll_ref_div(void)
-{
- u32 pll_ref_div = readl_relaxed(clk_base + OSC_CTRL) &
- OSC_CTRL_PLL_REF_DIV_MASK;
-
- switch (pll_ref_div) {
- case OSC_CTRL_PLL_REF_DIV_1:
- return 1;
- case OSC_CTRL_PLL_REF_DIV_2:
- return 2;
- case OSC_CTRL_PLL_REF_DIV_4:
- return 4;
- default:
- pr_err("Invalid pll ref divider %d", pll_ref_div);
- BUG();
- }
- return 0;
-}
-
static void tegra30_utmi_param_configure(void)
{
u32 reg;
@@ -863,11 +922,8 @@ static void __init tegra30_pll_init(void)
/* PLLC */
clk = tegra_clk_register_pll("pll_c", "pll_ref", clk_base, pmc_base, 0,
- 0, &pll_c_params,
- TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
- pll_c_freq_table, NULL);
- clk_register_clkdev(clk, "pll_c", NULL);
- clks[pll_c] = clk;
+ &pll_c_params, NULL);
+ clks[TEGRA30_CLK_PLL_C] = clk;
/* PLLC_OUT1 */
clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
@@ -876,73 +932,13 @@ static void __init tegra30_pll_init(void)
clk = tegra_clk_register_pll_out("pll_c_out1", "pll_c_out1_div",
clk_base + PLLC_OUT, 1, 0, CLK_SET_RATE_PARENT,
0, NULL);
- clk_register_clkdev(clk, "pll_c_out1", NULL);
- clks[pll_c_out1] = clk;
-
- /* PLLP */
- clk = tegra_clk_register_pll("pll_p", "pll_ref", clk_base, pmc_base, 0,
- 408000000, &pll_p_params,
- TEGRA_PLL_FIXED | TEGRA_PLL_HAS_CPCON |
- TEGRA_PLL_USE_LOCK, pll_p_freq_table, NULL);
- clk_register_clkdev(clk, "pll_p", NULL);
- clks[pll_p] = clk;
-
- /* PLLP_OUT1 */
- clk = tegra_clk_register_divider("pll_p_out1_div", "pll_p",
- clk_base + PLLP_OUTA, 0, TEGRA_DIVIDER_FIXED |
- TEGRA_DIVIDER_ROUND_UP, 8, 8, 1,
- &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out1", "pll_p_out1_div",
- clk_base + PLLP_OUTA, 1, 0,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out1", NULL);
- clks[pll_p_out1] = clk;
-
- /* PLLP_OUT2 */
- clk = tegra_clk_register_divider("pll_p_out2_div", "pll_p",
- clk_base + PLLP_OUTA, 0, TEGRA_DIVIDER_FIXED |
- TEGRA_DIVIDER_ROUND_UP, 24, 8, 1,
- &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out2", "pll_p_out2_div",
- clk_base + PLLP_OUTA, 17, 16,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out2", NULL);
- clks[pll_p_out2] = clk;
-
- /* PLLP_OUT3 */
- clk = tegra_clk_register_divider("pll_p_out3_div", "pll_p",
- clk_base + PLLP_OUTB, 0, TEGRA_DIVIDER_FIXED |
- TEGRA_DIVIDER_ROUND_UP, 8, 8, 1,
- &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out3", "pll_p_out3_div",
- clk_base + PLLP_OUTB, 1, 0,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out3", NULL);
- clks[pll_p_out3] = clk;
-
- /* PLLP_OUT4 */
- clk = tegra_clk_register_divider("pll_p_out4_div", "pll_p",
- clk_base + PLLP_OUTB, 0, TEGRA_DIVIDER_FIXED |
- TEGRA_DIVIDER_ROUND_UP, 24, 8, 1,
- &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out4", "pll_p_out4_div",
- clk_base + PLLP_OUTB, 17, 16,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out4", NULL);
- clks[pll_p_out4] = clk;
+ clks[TEGRA30_CLK_PLL_C_OUT1] = clk;
/* PLLM */
clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, pmc_base,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, 0,
- &pll_m_params, TEGRA_PLLM | TEGRA_PLL_HAS_CPCON |
- TEGRA_PLL_SET_DCCON | TEGRA_PLL_USE_LOCK,
- pll_m_freq_table, NULL);
- clk_register_clkdev(clk, "pll_m", NULL);
- clks[pll_m] = clk;
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+ &pll_m_params, NULL);
+ clks[TEGRA30_CLK_PLL_M] = clk;
/* PLLM_OUT1 */
clk = tegra_clk_register_divider("pll_m_out1_div", "pll_m",
@@ -951,78 +947,44 @@ static void __init tegra30_pll_init(void)
clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
CLK_SET_RATE_PARENT, 0, NULL);
- clk_register_clkdev(clk, "pll_m_out1", NULL);
- clks[pll_m_out1] = clk;
+ clks[TEGRA30_CLK_PLL_M_OUT1] = clk;
/* PLLX */
clk = tegra_clk_register_pll("pll_x", "pll_ref", clk_base, pmc_base, 0,
- 0, &pll_x_params, TEGRA_PLL_HAS_CPCON |
- TEGRA_PLL_SET_DCCON | TEGRA_PLL_USE_LOCK,
- pll_x_freq_table, NULL);
- clk_register_clkdev(clk, "pll_x", NULL);
- clks[pll_x] = clk;
+ &pll_x_params, NULL);
+ clks[TEGRA30_CLK_PLL_X] = clk;
/* PLLX_OUT0 */
clk = clk_register_fixed_factor(NULL, "pll_x_out0", "pll_x",
CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "pll_x_out0", NULL);
- clks[pll_x_out0] = clk;
+ clks[TEGRA30_CLK_PLL_X_OUT0] = clk;
/* PLLU */
clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc_base, 0,
- 0, &pll_u_params, TEGRA_PLLU | TEGRA_PLL_HAS_CPCON |
- TEGRA_PLL_SET_LFCON,
- pll_u_freq_table,
- NULL);
- clk_register_clkdev(clk, "pll_u", NULL);
- clks[pll_u] = clk;
+ &pll_u_params, NULL);
+ clks[TEGRA30_CLK_PLL_U] = clk;
tegra30_utmi_param_configure();
/* PLLD */
clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, pmc_base, 0,
- 0, &pll_d_params, TEGRA_PLL_HAS_CPCON |
- TEGRA_PLL_SET_LFCON | TEGRA_PLL_USE_LOCK,
- pll_d_freq_table, &pll_d_lock);
- clk_register_clkdev(clk, "pll_d", NULL);
- clks[pll_d] = clk;
+ &pll_d_params, &pll_d_lock);
+ clks[TEGRA30_CLK_PLL_D] = clk;
/* PLLD_OUT0 */
clk = clk_register_fixed_factor(NULL, "pll_d_out0", "pll_d",
CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "pll_d_out0", NULL);
- clks[pll_d_out0] = clk;
+ clks[TEGRA30_CLK_PLL_D_OUT0] = clk;
/* PLLD2 */
clk = tegra_clk_register_pll("pll_d2", "pll_ref", clk_base, pmc_base, 0,
- 0, &pll_d2_params, TEGRA_PLL_HAS_CPCON |
- TEGRA_PLL_SET_LFCON | TEGRA_PLL_USE_LOCK,
- pll_d_freq_table, NULL);
- clk_register_clkdev(clk, "pll_d2", NULL);
- clks[pll_d2] = clk;
+ &pll_d2_params, NULL);
+ clks[TEGRA30_CLK_PLL_D2] = clk;
/* PLLD2_OUT0 */
clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2",
CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "pll_d2_out0", NULL);
- clks[pll_d2_out0] = clk;
-
- /* PLLA */
- clk = tegra_clk_register_pll("pll_a", "pll_p_out1", clk_base, pmc_base,
- 0, 0, &pll_a_params, TEGRA_PLL_HAS_CPCON |
- TEGRA_PLL_USE_LOCK, pll_a_freq_table, NULL);
- clk_register_clkdev(clk, "pll_a", NULL);
- clks[pll_a] = clk;
-
- /* PLLA_OUT0 */
- clk = tegra_clk_register_divider("pll_a_out0_div", "pll_a",
- clk_base + PLLA_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
- 8, 8, 1, NULL);
- clk = tegra_clk_register_pll_out("pll_a_out0", "pll_a_out0_div",
- clk_base + PLLA_OUT, 1, 0, CLK_IGNORE_UNUSED |
- CLK_SET_RATE_PARENT, 0, NULL);
- clk_register_clkdev(clk, "pll_a_out0", NULL);
- clks[pll_a_out0] = clk;
+ clks[TEGRA30_CLK_PLL_D2_OUT0] = clk;
/* PLLE */
clk = clk_register_mux(NULL, "pll_e_mux", pll_e_parents,
@@ -1030,258 +992,8 @@ static void __init tegra30_pll_init(void)
CLK_SET_RATE_NO_REPARENT,
clk_base + PLLE_AUX, 2, 1, 0, NULL);
clk = tegra_clk_register_plle("pll_e", "pll_e_mux", clk_base, pmc_base,
- CLK_GET_RATE_NOCACHE, 100000000, &pll_e_params,
- TEGRA_PLLE_CONFIGURE, pll_e_freq_table, NULL);
- clk_register_clkdev(clk, "pll_e", NULL);
- clks[pll_e] = clk;
-}
-
-static const char *mux_audio_sync_clk[] = { "spdif_in_sync", "i2s0_sync",
- "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "vimclk_sync",};
-static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern1", };
-static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern2", };
-static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern3", };
-
-static void __init tegra30_audio_clk_init(void)
-{
- struct clk *clk;
-
- /* spdif_in_sync */
- clk = tegra_clk_register_sync_source("spdif_in_sync", 24000000,
- 24000000);
- clk_register_clkdev(clk, "spdif_in_sync", NULL);
- clks[spdif_in_sync] = clk;
-
- /* i2s0_sync */
- clk = tegra_clk_register_sync_source("i2s0_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s0_sync", NULL);
- clks[i2s0_sync] = clk;
-
- /* i2s1_sync */
- clk = tegra_clk_register_sync_source("i2s1_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s1_sync", NULL);
- clks[i2s1_sync] = clk;
-
- /* i2s2_sync */
- clk = tegra_clk_register_sync_source("i2s2_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s2_sync", NULL);
- clks[i2s2_sync] = clk;
-
- /* i2s3_sync */
- clk = tegra_clk_register_sync_source("i2s3_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s3_sync", NULL);
- clks[i2s3_sync] = clk;
-
- /* i2s4_sync */
- clk = tegra_clk_register_sync_source("i2s4_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s4_sync", NULL);
- clks[i2s4_sync] = clk;
-
- /* vimclk_sync */
- clk = tegra_clk_register_sync_source("vimclk_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "vimclk_sync", NULL);
- clks[vimclk_sync] = clk;
-
- /* audio0 */
- clk = clk_register_mux(NULL, "audio0_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S0, 0, 3, 0, NULL);
- clk = clk_register_gate(NULL, "audio0", "audio0_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S0, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio0", NULL);
- clks[audio0] = clk;
-
- /* audio1 */
- clk = clk_register_mux(NULL, "audio1_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S1, 0, 3, 0, NULL);
- clk = clk_register_gate(NULL, "audio1", "audio1_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S1, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio1", NULL);
- clks[audio1] = clk;
-
- /* audio2 */
- clk = clk_register_mux(NULL, "audio2_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S2, 0, 3, 0, NULL);
- clk = clk_register_gate(NULL, "audio2", "audio2_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S2, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio2", NULL);
- clks[audio2] = clk;
-
- /* audio3 */
- clk = clk_register_mux(NULL, "audio3_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S3, 0, 3, 0, NULL);
- clk = clk_register_gate(NULL, "audio3", "audio3_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S3, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio3", NULL);
- clks[audio3] = clk;
-
- /* audio4 */
- clk = clk_register_mux(NULL, "audio4_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S4, 0, 3, 0, NULL);
- clk = clk_register_gate(NULL, "audio4", "audio4_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S4, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio4", NULL);
- clks[audio4] = clk;
-
- /* spdif */
- clk = clk_register_mux(NULL, "spdif_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_SPDIF, 0, 3, 0, NULL);
- clk = clk_register_gate(NULL, "spdif", "spdif_mux", 0,
- clk_base + AUDIO_SYNC_CLK_SPDIF, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "spdif", NULL);
- clks[spdif] = clk;
-
- /* audio0_2x */
- clk = clk_register_fixed_factor(NULL, "audio0_doubler", "audio0",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio0_div", "audio0_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 24, 1, 0,
- &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio0_2x", "audio0_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 113, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio0_2x", NULL);
- clks[audio0_2x] = clk;
-
- /* audio1_2x */
- clk = clk_register_fixed_factor(NULL, "audio1_doubler", "audio1",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio1_div", "audio1_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 25, 1, 0,
- &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio1_2x", "audio1_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 114, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio1_2x", NULL);
- clks[audio1_2x] = clk;
-
- /* audio2_2x */
- clk = clk_register_fixed_factor(NULL, "audio2_doubler", "audio2",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio2_div", "audio2_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 26, 1, 0,
- &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio2_2x", "audio2_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 115, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio2_2x", NULL);
- clks[audio2_2x] = clk;
-
- /* audio3_2x */
- clk = clk_register_fixed_factor(NULL, "audio3_doubler", "audio3",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio3_div", "audio3_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 27, 1, 0,
- &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio3_2x", "audio3_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 116, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio3_2x", NULL);
- clks[audio3_2x] = clk;
-
- /* audio4_2x */
- clk = clk_register_fixed_factor(NULL, "audio4_doubler", "audio4",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio4_div", "audio4_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 28, 1, 0,
- &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio4_2x", "audio4_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 117, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio4_2x", NULL);
- clks[audio4_2x] = clk;
-
- /* spdif_2x */
- clk = clk_register_fixed_factor(NULL, "spdif_doubler", "spdif",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("spdif_div", "spdif_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 29, 1, 0,
- &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("spdif_2x", "spdif_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 118, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "spdif_2x", NULL);
- clks[spdif_2x] = clk;
-}
-
-static void __init tegra30_pmc_clk_init(void)
-{
- struct clk *clk;
-
- /* clk_out_1 */
- clk = clk_register_mux(NULL, "clk_out_1_mux", clk_out1_parents,
- ARRAY_SIZE(clk_out1_parents),
- CLK_SET_RATE_NO_REPARENT,
- pmc_base + PMC_CLK_OUT_CNTRL, 6, 3, 0,
- &clk_out_lock);
- clks[clk_out_1_mux] = clk;
- clk = clk_register_gate(NULL, "clk_out_1", "clk_out_1_mux", 0,
- pmc_base + PMC_CLK_OUT_CNTRL, 2, 0,
- &clk_out_lock);
- clk_register_clkdev(clk, "extern1", "clk_out_1");
- clks[clk_out_1] = clk;
-
- /* clk_out_2 */
- clk = clk_register_mux(NULL, "clk_out_2_mux", clk_out2_parents,
- ARRAY_SIZE(clk_out2_parents),
- CLK_SET_RATE_NO_REPARENT,
- pmc_base + PMC_CLK_OUT_CNTRL, 14, 3, 0,
- &clk_out_lock);
- clk = clk_register_gate(NULL, "clk_out_2", "clk_out_2_mux", 0,
- pmc_base + PMC_CLK_OUT_CNTRL, 10, 0,
- &clk_out_lock);
- clk_register_clkdev(clk, "extern2", "clk_out_2");
- clks[clk_out_2] = clk;
-
- /* clk_out_3 */
- clk = clk_register_mux(NULL, "clk_out_3_mux", clk_out3_parents,
- ARRAY_SIZE(clk_out3_parents),
- CLK_SET_RATE_NO_REPARENT,
- pmc_base + PMC_CLK_OUT_CNTRL, 22, 3, 0,
- &clk_out_lock);
- clk = clk_register_gate(NULL, "clk_out_3", "clk_out_3_mux", 0,
- pmc_base + PMC_CLK_OUT_CNTRL, 18, 0,
- &clk_out_lock);
- clk_register_clkdev(clk, "extern3", "clk_out_3");
- clks[clk_out_3] = clk;
-
- /* blink */
- writel_relaxed(0, pmc_base + PMC_BLINK_TIMER);
- clk = clk_register_gate(NULL, "blink_override", "clk_32k", 0,
- pmc_base + PMC_DPD_PADS_ORIDE,
- PMC_DPD_PADS_ORIDE_BLINK_ENB, 0, NULL);
- clk = clk_register_gate(NULL, "blink", "blink_override", 0,
- pmc_base + PMC_CTRL,
- PMC_CTRL_BLINK_ENB, 0, NULL);
- clk_register_clkdev(clk, "blink", NULL);
- clks[blink] = clk;
-
+ CLK_GET_RATE_NOCACHE, &pll_e_params, NULL);
+ clks[TEGRA30_CLK_PLL_E] = clk;
}
static const char *cclk_g_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
@@ -1332,8 +1044,7 @@ static void __init tegra30_super_clk_init(void)
CLK_SET_RATE_PARENT,
clk_base + CCLKG_BURST_POLICY,
0, 4, 0, 0, NULL);
- clk_register_clkdev(clk, "cclk_g", NULL);
- clks[cclk_g] = clk;
+ clks[TEGRA30_CLK_CCLK_G] = clk;
/*
* Clock input to cclk_lp divided from pll_p using
@@ -1369,8 +1080,7 @@ static void __init tegra30_super_clk_init(void)
clk_base + CCLKLP_BURST_POLICY,
TEGRA_DIVIDER_2, 4, 8, 9,
NULL);
- clk_register_clkdev(clk, "cclk_lp", NULL);
- clks[cclk_lp] = clk;
+ clks[TEGRA30_CLK_CCLK_LP] = clk;
/* SCLK */
clk = tegra_clk_register_super_mux("sclk", sclk_parents,
@@ -1378,142 +1088,44 @@ static void __init tegra30_super_clk_init(void)
CLK_SET_RATE_PARENT,
clk_base + SCLK_BURST_POLICY,
0, 4, 0, 0, NULL);
- clk_register_clkdev(clk, "sclk", NULL);
- clks[sclk] = clk;
-
- /* HCLK */
- clk = clk_register_divider(NULL, "hclk_div", "sclk", 0,
- clk_base + SYSTEM_CLK_RATE, 4, 2, 0,
- &sysrate_lock);
- clk = clk_register_gate(NULL, "hclk", "hclk_div", CLK_SET_RATE_PARENT,
- clk_base + SYSTEM_CLK_RATE, 7,
- CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
- clk_register_clkdev(clk, "hclk", NULL);
- clks[hclk] = clk;
-
- /* PCLK */
- clk = clk_register_divider(NULL, "pclk_div", "hclk", 0,
- clk_base + SYSTEM_CLK_RATE, 0, 2, 0,
- &sysrate_lock);
- clk = clk_register_gate(NULL, "pclk", "pclk_div", CLK_SET_RATE_PARENT,
- clk_base + SYSTEM_CLK_RATE, 3,
- CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
- clk_register_clkdev(clk, "pclk", NULL);
- clks[pclk] = clk;
+ clks[TEGRA30_CLK_SCLK] = clk;
/* twd */
clk = clk_register_fixed_factor(NULL, "twd", "cclk_g",
CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "twd", NULL);
- clks[twd] = clk;
+ clks[TEGRA30_CLK_TWD] = clk;
+
+ tegra_super_clk_gen4_init(clk_base, pmc_base, tegra30_clks, NULL);
}
static const char *mux_pllacp_clkm[] = { "pll_a_out0", "unused", "pll_p",
"clk_m" };
static const char *mux_pllpcm_clkm[] = { "pll_p", "pll_c", "pll_m", "clk_m" };
static const char *mux_pllmcp_clkm[] = { "pll_m", "pll_c", "pll_p", "clk_m" };
-static const char *i2s0_parents[] = { "pll_a_out0", "audio0_2x", "pll_p",
- "clk_m" };
-static const char *i2s1_parents[] = { "pll_a_out0", "audio1_2x", "pll_p",
- "clk_m" };
-static const char *i2s2_parents[] = { "pll_a_out0", "audio2_2x", "pll_p",
- "clk_m" };
-static const char *i2s3_parents[] = { "pll_a_out0", "audio3_2x", "pll_p",
- "clk_m" };
-static const char *i2s4_parents[] = { "pll_a_out0", "audio4_2x", "pll_p",
- "clk_m" };
static const char *spdif_out_parents[] = { "pll_a_out0", "spdif_2x", "pll_p",
"clk_m" };
-static const char *spdif_in_parents[] = { "pll_p", "pll_c", "pll_m" };
-static const char *mux_pllpc_clk32k_clkm[] = { "pll_p", "pll_c", "clk_32k",
- "clk_m" };
-static const char *mux_pllpc_clkm_clk32k[] = { "pll_p", "pll_c", "clk_m",
- "clk_32k" };
static const char *mux_pllmcpa[] = { "pll_m", "pll_c", "pll_p", "pll_a_out0" };
-static const char *mux_pllpdc_clkm[] = { "pll_p", "pll_d_out0", "pll_c",
- "clk_m" };
-static const char *mux_pllp_clkm[] = { "pll_p", "unused", "unused", "clk_m" };
static const char *mux_pllpmdacd2_clkm[] = { "pll_p", "pll_m", "pll_d_out0",
"pll_a_out0", "pll_c",
"pll_d2_out0", "clk_m" };
-static const char *mux_plla_clk32k_pllp_clkm_plle[] = { "pll_a_out0",
- "clk_32k", "pll_p",
- "clk_m", "pll_e" };
static const char *mux_plld_out0_plld2_out0[] = { "pll_d_out0",
"pll_d2_out0" };
+static const char *pwm_parents[] = { "pll_p", "pll_c", "clk_32k", "clk_m" };
static struct tegra_periph_init_data tegra_periph_clk_list[] = {
- TEGRA_INIT_DATA_MUX("i2s0", NULL, "tegra30-i2s.0", i2s0_parents, CLK_SOURCE_I2S0, 30, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s0),
- TEGRA_INIT_DATA_MUX("i2s1", NULL, "tegra30-i2s.1", i2s1_parents, CLK_SOURCE_I2S1, 11, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s1),
- TEGRA_INIT_DATA_MUX("i2s2", NULL, "tegra30-i2s.2", i2s2_parents, CLK_SOURCE_I2S2, 18, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s2),
- TEGRA_INIT_DATA_MUX("i2s3", NULL, "tegra30-i2s.3", i2s3_parents, CLK_SOURCE_I2S3, 101, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2s3),
- TEGRA_INIT_DATA_MUX("i2s4", NULL, "tegra30-i2s.4", i2s4_parents, CLK_SOURCE_I2S4, 102, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2s4),
- TEGRA_INIT_DATA_MUX("spdif_out", "spdif_out", "tegra30-spdif", spdif_out_parents, CLK_SOURCE_SPDIF_OUT, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_out),
- TEGRA_INIT_DATA_MUX("spdif_in", "spdif_in", "tegra30-spdif", spdif_in_parents, CLK_SOURCE_SPDIF_IN, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_in),
- TEGRA_INIT_DATA_MUX("d_audio", "d_audio", "tegra30-ahub", mux_pllacp_clkm, CLK_SOURCE_D_AUDIO, 106, &periph_v_regs, 0, d_audio),
- TEGRA_INIT_DATA_MUX("dam0", NULL, "tegra30-dam.0", mux_pllacp_clkm, CLK_SOURCE_DAM0, 108, &periph_v_regs, 0, dam0),
- TEGRA_INIT_DATA_MUX("dam1", NULL, "tegra30-dam.1", mux_pllacp_clkm, CLK_SOURCE_DAM1, 109, &periph_v_regs, 0, dam1),
- TEGRA_INIT_DATA_MUX("dam2", NULL, "tegra30-dam.2", mux_pllacp_clkm, CLK_SOURCE_DAM2, 110, &periph_v_regs, 0, dam2),
- TEGRA_INIT_DATA_MUX("hda", "hda", "tegra30-hda", mux_pllpcm_clkm, CLK_SOURCE_HDA, 125, &periph_v_regs, 0, hda),
- TEGRA_INIT_DATA_MUX("hda2codec_2x", "hda2codec", "tegra30-hda", mux_pllpcm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, &periph_v_regs, 0, hda2codec_2x),
- TEGRA_INIT_DATA_MUX("sbc1", NULL, "spi_tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SBC1, 41, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc1),
- TEGRA_INIT_DATA_MUX("sbc2", NULL, "spi_tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SBC2, 44, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc2),
- TEGRA_INIT_DATA_MUX("sbc3", NULL, "spi_tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SBC3, 46, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc3),
- TEGRA_INIT_DATA_MUX("sbc4", NULL, "spi_tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SBC4, 68, &periph_u_regs, TEGRA_PERIPH_ON_APB, sbc4),
- TEGRA_INIT_DATA_MUX("sbc5", NULL, "spi_tegra.4", mux_pllpcm_clkm, CLK_SOURCE_SBC5, 104, &periph_v_regs, TEGRA_PERIPH_ON_APB, sbc5),
- TEGRA_INIT_DATA_MUX("sbc6", NULL, "spi_tegra.5", mux_pllpcm_clkm, CLK_SOURCE_SBC6, 105, &periph_v_regs, TEGRA_PERIPH_ON_APB, sbc6),
- TEGRA_INIT_DATA_MUX("sata_oob", NULL, "tegra_sata_oob", mux_pllpcm_clkm, CLK_SOURCE_SATA_OOB, 123, &periph_v_regs, TEGRA_PERIPH_ON_APB, sata_oob),
- TEGRA_INIT_DATA_MUX("sata", NULL, "tegra_sata", mux_pllpcm_clkm, CLK_SOURCE_SATA, 124, &periph_v_regs, TEGRA_PERIPH_ON_APB, sata),
- TEGRA_INIT_DATA_MUX("ndflash", NULL, "tegra_nand", mux_pllpcm_clkm, CLK_SOURCE_NDFLASH, 13, &periph_l_regs, TEGRA_PERIPH_ON_APB, ndflash),
- TEGRA_INIT_DATA_MUX("ndspeed", NULL, "tegra_nand_speed", mux_pllpcm_clkm, CLK_SOURCE_NDSPEED, 80, &periph_u_regs, TEGRA_PERIPH_ON_APB, ndspeed),
- TEGRA_INIT_DATA_MUX("vfir", NULL, "vfir", mux_pllpcm_clkm, CLK_SOURCE_VFIR, 7, &periph_l_regs, TEGRA_PERIPH_ON_APB, vfir),
- TEGRA_INIT_DATA_MUX("csite", NULL, "csite", mux_pllpcm_clkm, CLK_SOURCE_CSITE, 73, &periph_u_regs, TEGRA_PERIPH_ON_APB, csite),
- TEGRA_INIT_DATA_MUX("la", NULL, "la", mux_pllpcm_clkm, CLK_SOURCE_LA, 76, &periph_u_regs, TEGRA_PERIPH_ON_APB, la),
- TEGRA_INIT_DATA_MUX("owr", NULL, "tegra_w1", mux_pllpcm_clkm, CLK_SOURCE_OWR, 71, &periph_u_regs, TEGRA_PERIPH_ON_APB, owr),
- TEGRA_INIT_DATA_MUX("mipi", NULL, "mipi", mux_pllpcm_clkm, CLK_SOURCE_MIPI, 50, &periph_h_regs, TEGRA_PERIPH_ON_APB, mipi),
- TEGRA_INIT_DATA_MUX("tsensor", NULL, "tegra-tsensor", mux_pllpc_clkm_clk32k, CLK_SOURCE_TSENSOR, 100, &periph_v_regs, TEGRA_PERIPH_ON_APB, tsensor),
- TEGRA_INIT_DATA_MUX("i2cslow", NULL, "i2cslow", mux_pllpc_clk32k_clkm, CLK_SOURCE_I2CSLOW, 81, &periph_u_regs, TEGRA_PERIPH_ON_APB, i2cslow),
- TEGRA_INIT_DATA_INT("vde", NULL, "vde", mux_pllpcm_clkm, CLK_SOURCE_VDE, 61, &periph_h_regs, 0, vde),
- TEGRA_INIT_DATA_INT("vi", "vi", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI, 20, &periph_l_regs, 0, vi),
- TEGRA_INIT_DATA_INT("epp", NULL, "epp", mux_pllmcpa, CLK_SOURCE_EPP, 19, &periph_l_regs, 0, epp),
- TEGRA_INIT_DATA_INT("mpe", NULL, "mpe", mux_pllmcpa, CLK_SOURCE_MPE, 60, &periph_h_regs, 0, mpe),
- TEGRA_INIT_DATA_INT("host1x", NULL, "host1x", mux_pllmcpa, CLK_SOURCE_HOST1X, 28, &periph_l_regs, 0, host1x),
- TEGRA_INIT_DATA_INT("3d", NULL, "3d", mux_pllmcpa, CLK_SOURCE_3D, 24, &periph_l_regs, TEGRA_PERIPH_MANUAL_RESET, gr3d),
- TEGRA_INIT_DATA_INT("3d2", NULL, "3d2", mux_pllmcpa, CLK_SOURCE_3D2, 98, &periph_v_regs, TEGRA_PERIPH_MANUAL_RESET, gr3d2),
- TEGRA_INIT_DATA_INT("2d", NULL, "2d", mux_pllmcpa, CLK_SOURCE_2D, 21, &periph_l_regs, 0, gr2d),
- TEGRA_INIT_DATA_INT("se", NULL, "se", mux_pllpcm_clkm, CLK_SOURCE_SE, 127, &periph_v_regs, 0, se),
- TEGRA_INIT_DATA_MUX("mselect", NULL, "mselect", mux_pllp_clkm, CLK_SOURCE_MSELECT, 99, &periph_v_regs, 0, mselect),
- TEGRA_INIT_DATA_MUX("nor", NULL, "tegra-nor", mux_pllpcm_clkm, CLK_SOURCE_NOR, 42, &periph_h_regs, 0, nor),
- TEGRA_INIT_DATA_MUX("sdmmc1", NULL, "sdhci-tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SDMMC1, 14, &periph_l_regs, 0, sdmmc1),
- TEGRA_INIT_DATA_MUX("sdmmc2", NULL, "sdhci-tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SDMMC2, 9, &periph_l_regs, 0, sdmmc2),
- TEGRA_INIT_DATA_MUX("sdmmc3", NULL, "sdhci-tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SDMMC3, 69, &periph_u_regs, 0, sdmmc3),
- TEGRA_INIT_DATA_MUX("sdmmc4", NULL, "sdhci-tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SDMMC4, 15, &periph_l_regs, 0, sdmmc4),
- TEGRA_INIT_DATA_MUX("cve", NULL, "cve", mux_pllpdc_clkm, CLK_SOURCE_CVE, 49, &periph_h_regs, 0, cve),
- TEGRA_INIT_DATA_MUX("tvo", NULL, "tvo", mux_pllpdc_clkm, CLK_SOURCE_TVO, 49, &periph_h_regs, 0, tvo),
- TEGRA_INIT_DATA_MUX("tvdac", NULL, "tvdac", mux_pllpdc_clkm, CLK_SOURCE_TVDAC, 53, &periph_h_regs, 0, tvdac),
- TEGRA_INIT_DATA_MUX("actmon", NULL, "actmon", mux_pllpc_clk32k_clkm, CLK_SOURCE_ACTMON, 119, &periph_v_regs, 0, actmon),
- TEGRA_INIT_DATA_MUX("vi_sensor", "vi_sensor", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI_SENSOR, 20, &periph_l_regs, TEGRA_PERIPH_NO_RESET, vi_sensor),
- TEGRA_INIT_DATA_DIV16("i2c1", "div-clk", "tegra-i2c.0", mux_pllp_clkm, CLK_SOURCE_I2C1, 12, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2c1),
- TEGRA_INIT_DATA_DIV16("i2c2", "div-clk", "tegra-i2c.1", mux_pllp_clkm, CLK_SOURCE_I2C2, 54, &periph_h_regs, TEGRA_PERIPH_ON_APB, i2c2),
- TEGRA_INIT_DATA_DIV16("i2c3", "div-clk", "tegra-i2c.2", mux_pllp_clkm, CLK_SOURCE_I2C3, 67, &periph_u_regs, TEGRA_PERIPH_ON_APB, i2c3),
- TEGRA_INIT_DATA_DIV16("i2c4", "div-clk", "tegra-i2c.3", mux_pllp_clkm, CLK_SOURCE_I2C4, 103, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2c4),
- TEGRA_INIT_DATA_DIV16("i2c5", "div-clk", "tegra-i2c.4", mux_pllp_clkm, CLK_SOURCE_I2C5, 47, &periph_h_regs, TEGRA_PERIPH_ON_APB, i2c5),
- TEGRA_INIT_DATA_UART("uarta", NULL, "tegra_uart.0", mux_pllpcm_clkm, CLK_SOURCE_UARTA, 6, &periph_l_regs, uarta),
- TEGRA_INIT_DATA_UART("uartb", NULL, "tegra_uart.1", mux_pllpcm_clkm, CLK_SOURCE_UARTB, 7, &periph_l_regs, uartb),
- TEGRA_INIT_DATA_UART("uartc", NULL, "tegra_uart.2", mux_pllpcm_clkm, CLK_SOURCE_UARTC, 55, &periph_h_regs, uartc),
- TEGRA_INIT_DATA_UART("uartd", NULL, "tegra_uart.3", mux_pllpcm_clkm, CLK_SOURCE_UARTD, 65, &periph_u_regs, uartd),
- TEGRA_INIT_DATA_UART("uarte", NULL, "tegra_uart.4", mux_pllpcm_clkm, CLK_SOURCE_UARTE, 66, &periph_u_regs, uarte),
- TEGRA_INIT_DATA_MUX8("hdmi", NULL, "hdmi", mux_pllpmdacd2_clkm, CLK_SOURCE_HDMI, 51, &periph_h_regs, 0, hdmi),
- TEGRA_INIT_DATA_MUX8("extern1", NULL, "extern1", mux_plla_clk32k_pllp_clkm_plle, CLK_SOURCE_EXTERN1, 120, &periph_v_regs, 0, extern1),
- TEGRA_INIT_DATA_MUX8("extern2", NULL, "extern2", mux_plla_clk32k_pllp_clkm_plle, CLK_SOURCE_EXTERN2, 121, &periph_v_regs, 0, extern2),
- TEGRA_INIT_DATA_MUX8("extern3", NULL, "extern3", mux_plla_clk32k_pllp_clkm_plle, CLK_SOURCE_EXTERN3, 122, &periph_v_regs, 0, extern3),
- TEGRA_INIT_DATA("pwm", NULL, "pwm", mux_pllpc_clk32k_clkm, CLK_SOURCE_PWM, 28, 2, 0, 0, 8, 1, 0, &periph_l_regs, 17, periph_clk_enb_refcnt, 0, pwm),
+ TEGRA_INIT_DATA_MUX("spdif_out", spdif_out_parents, CLK_SOURCE_SPDIF_OUT, 10, TEGRA_PERIPH_ON_APB, TEGRA30_CLK_SPDIF_OUT),
+ TEGRA_INIT_DATA_MUX("d_audio", mux_pllacp_clkm, CLK_SOURCE_D_AUDIO, 106, 0, TEGRA30_CLK_D_AUDIO),
+ TEGRA_INIT_DATA_MUX("dam0", mux_pllacp_clkm, CLK_SOURCE_DAM0, 108, 0, TEGRA30_CLK_DAM0),
+ TEGRA_INIT_DATA_MUX("dam1", mux_pllacp_clkm, CLK_SOURCE_DAM1, 109, 0, TEGRA30_CLK_DAM1),
+ TEGRA_INIT_DATA_MUX("dam2", mux_pllacp_clkm, CLK_SOURCE_DAM2, 110, 0, TEGRA30_CLK_DAM2),
+ TEGRA_INIT_DATA_INT("3d2", mux_pllmcpa, CLK_SOURCE_3D2, 98, TEGRA_PERIPH_MANUAL_RESET, TEGRA30_CLK_GR3D2),
+ TEGRA_INIT_DATA_INT("se", mux_pllpcm_clkm, CLK_SOURCE_SE, 127, 0, TEGRA30_CLK_SE),
+ TEGRA_INIT_DATA_MUX8("hdmi", mux_pllpmdacd2_clkm, CLK_SOURCE_HDMI, 51, 0, TEGRA30_CLK_HDMI),
+ TEGRA_INIT_DATA("pwm", NULL, NULL, pwm_parents, CLK_SOURCE_PWM, 28, 2, 0, 0, 8, 1, 0, 17, TEGRA_PERIPH_ON_APB, TEGRA30_CLK_PWM),
};
static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = {
- TEGRA_INIT_DATA_NODIV("disp1", NULL, "tegradc.0", mux_pllpmdacd2_clkm, CLK_SOURCE_DISP1, 29, 3, 27, &periph_l_regs, 0, disp1),
- TEGRA_INIT_DATA_NODIV("disp2", NULL, "tegradc.1", mux_pllpmdacd2_clkm, CLK_SOURCE_DISP2, 29, 3, 26, &periph_l_regs, 0, disp2),
- TEGRA_INIT_DATA_NODIV("dsib", NULL, "tegradc.1", mux_plld_out0_plld2_out0, CLK_SOURCE_DSIB, 25, 1, 82, &periph_u_regs, 0, dsib),
+ TEGRA_INIT_DATA_NODIV("dsib", mux_plld_out0_plld2_out0, CLK_SOURCE_DSIB, 25, 1, 82, 0, TEGRA30_CLK_DSIB),
};
static void __init tegra30_periph_clk_init(void)
@@ -1522,170 +1134,20 @@ static void __init tegra30_periph_clk_init(void)
struct clk *clk;
int i;
- /* apbdma */
- clk = tegra_clk_register_periph_gate("apbdma", "clk_m", 0, clk_base, 0, 34,
- &periph_h_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra-apbdma");
- clks[apbdma] = clk;
-
- /* rtc */
- clk = tegra_clk_register_periph_gate("rtc", "clk_32k",
- TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
- clk_base, 0, 4, &periph_l_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "rtc-tegra");
- clks[rtc] = clk;
-
- /* timer */
- clk = tegra_clk_register_periph_gate("timer", "clk_m", 0, clk_base, 0,
- 5, &periph_l_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "timer");
- clks[timer] = clk;
-
- /* kbc */
- clk = tegra_clk_register_periph_gate("kbc", "clk_32k",
- TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
- clk_base, 0, 36, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra-kbc");
- clks[kbc] = clk;
-
- /* csus */
- clk = tegra_clk_register_periph_gate("csus", "clk_m",
- TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
- clk_base, 0, 92, &periph_u_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "csus", "tengra_camera");
- clks[csus] = clk;
-
- /* vcp */
- clk = tegra_clk_register_periph_gate("vcp", "clk_m", 0, clk_base, 0, 29,
- &periph_l_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "vcp", "tegra-avp");
- clks[vcp] = clk;
-
- /* bsea */
- clk = tegra_clk_register_periph_gate("bsea", "clk_m", 0, clk_base, 0,
- 62, &periph_h_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "bsea", "tegra-avp");
- clks[bsea] = clk;
-
- /* bsev */
- clk = tegra_clk_register_periph_gate("bsev", "clk_m", 0, clk_base, 0,
- 63, &periph_h_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "bsev", "tegra-aes");
- clks[bsev] = clk;
-
- /* usbd */
- clk = tegra_clk_register_periph_gate("usbd", "clk_m", 0, clk_base, 0,
- 22, &periph_l_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "fsl-tegra-udc");
- clks[usbd] = clk;
-
- /* usb2 */
- clk = tegra_clk_register_periph_gate("usb2", "clk_m", 0, clk_base, 0,
- 58, &periph_h_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra-ehci.1");
- clks[usb2] = clk;
-
- /* usb3 */
- clk = tegra_clk_register_periph_gate("usb3", "clk_m", 0, clk_base, 0,
- 59, &periph_h_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra-ehci.2");
- clks[usb3] = clk;
-
/* dsia */
clk = tegra_clk_register_periph_gate("dsia", "pll_d_out0", 0, clk_base,
- 0, 48, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "dsia", "tegradc.0");
- clks[dsia] = clk;
-
- /* csi */
- clk = tegra_clk_register_periph_gate("csi", "pll_p_out3", 0, clk_base,
- 0, 52, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "csi", "tegra_camera");
- clks[csi] = clk;
-
- /* isp */
- clk = tegra_clk_register_periph_gate("isp", "clk_m", 0, clk_base, 0, 23,
- &periph_l_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "isp", "tegra_camera");
- clks[isp] = clk;
+ 0, 48, periph_clk_enb_refcnt);
+ clks[TEGRA30_CLK_DSIA] = clk;
/* pcie */
clk = tegra_clk_register_periph_gate("pcie", "clk_m", 0, clk_base, 0,
- 70, &periph_u_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "pcie", "tegra-pcie");
- clks[pcie] = clk;
+ 70, periph_clk_enb_refcnt);
+ clks[TEGRA30_CLK_PCIE] = clk;
/* afi */
clk = tegra_clk_register_periph_gate("afi", "clk_m", 0, clk_base, 0, 72,
- &periph_u_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "afi", "tegra-pcie");
- clks[afi] = clk;
-
- /* pciex */
- clk = tegra_clk_register_periph_gate("pciex", "pll_e", 0, clk_base, 0,
- 74, &periph_u_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "pciex", "tegra-pcie");
- clks[pciex] = clk;
-
- /* kfuse */
- clk = tegra_clk_register_periph_gate("kfuse", "clk_m",
- TEGRA_PERIPH_ON_APB,
- clk_base, 0, 40, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "kfuse-tegra");
- clks[kfuse] = clk;
-
- /* fuse */
- clk = tegra_clk_register_periph_gate("fuse", "clk_m",
- TEGRA_PERIPH_ON_APB,
- clk_base, 0, 39, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "fuse", "fuse-tegra");
- clks[fuse] = clk;
-
- /* fuse_burn */
- clk = tegra_clk_register_periph_gate("fuse_burn", "clk_m",
- TEGRA_PERIPH_ON_APB,
- clk_base, 0, 39, &periph_h_regs,
periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "fuse_burn", "fuse-tegra");
- clks[fuse_burn] = clk;
-
- /* apbif */
- clk = tegra_clk_register_periph_gate("apbif", "clk_m", 0,
- clk_base, 0, 107, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "apbif", "tegra30-ahub");
- clks[apbif] = clk;
-
- /* hda2hdmi */
- clk = tegra_clk_register_periph_gate("hda2hdmi", "clk_m",
- TEGRA_PERIPH_ON_APB,
- clk_base, 0, 128, &periph_w_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "hda2hdmi", "tegra30-hda");
- clks[hda2hdmi] = clk;
-
- /* sata_cold */
- clk = tegra_clk_register_periph_gate("sata_cold", "clk_m",
- TEGRA_PERIPH_ON_APB,
- clk_base, 0, 129, &periph_w_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra_sata_cold");
- clks[sata_cold] = clk;
-
- /* dtv */
- clk = tegra_clk_register_periph_gate("dtv", "clk_m",
- TEGRA_PERIPH_ON_APB,
- clk_base, 0, 79, &periph_u_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "dtv");
- clks[dtv] = clk;
+ clks[TEGRA30_CLK_AFI] = clk;
/* emc */
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
@@ -1694,84 +1156,37 @@ static void __init tegra30_periph_clk_init(void)
clk_base + CLK_SOURCE_EMC,
30, 2, 0, NULL);
clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
- 57, &periph_h_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "emc", NULL);
- clks[emc] = clk;
+ 57, periph_clk_enb_refcnt);
+ clks[TEGRA30_CLK_EMC] = clk;
+
+ /* cml0 */
+ clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX,
+ 0, 0, &cml_lock);
+ clks[TEGRA30_CLK_CML0] = clk;
+
+ /* cml1 */
+ clk = clk_register_gate(NULL, "cml1", "pll_e", 0, clk_base + PLLE_AUX,
+ 1, 0, &cml_lock);
+ clks[TEGRA30_CLK_CML1] = clk;
for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
data = &tegra_periph_clk_list[i];
- clk = tegra_clk_register_periph(data->name, data->parent_names,
+ clk = tegra_clk_register_periph(data->name, data->p.parent_names,
data->num_parents, &data->periph,
clk_base, data->offset, data->flags);
- clk_register_clkdev(clk, data->con_id, data->dev_id);
clks[data->clk_id] = clk;
}
for (i = 0; i < ARRAY_SIZE(tegra_periph_nodiv_clk_list); i++) {
data = &tegra_periph_nodiv_clk_list[i];
clk = tegra_clk_register_periph_nodiv(data->name,
- data->parent_names,
+ data->p.parent_names,
data->num_parents, &data->periph,
clk_base, data->offset);
- clk_register_clkdev(clk, data->con_id, data->dev_id);
clks[data->clk_id] = clk;
}
-}
-
-static void __init tegra30_fixed_clk_init(void)
-{
- struct clk *clk;
-
- /* clk_32k */
- clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, CLK_IS_ROOT,
- 32768);
- clk_register_clkdev(clk, "clk_32k", NULL);
- clks[clk_32k] = clk;
- /* clk_m_div2 */
- clk = clk_register_fixed_factor(NULL, "clk_m_div2", "clk_m",
- CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "clk_m_div2", NULL);
- clks[clk_m_div2] = clk;
-
- /* clk_m_div4 */
- clk = clk_register_fixed_factor(NULL, "clk_m_div4", "clk_m",
- CLK_SET_RATE_PARENT, 1, 4);
- clk_register_clkdev(clk, "clk_m_div4", NULL);
- clks[clk_m_div4] = clk;
-
- /* cml0 */
- clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX,
- 0, 0, &cml_lock);
- clk_register_clkdev(clk, "cml0", NULL);
- clks[cml0] = clk;
-
- /* cml1 */
- clk = clk_register_gate(NULL, "cml1", "pll_e", 0, clk_base + PLLE_AUX,
- 1, 0, &cml_lock);
- clk_register_clkdev(clk, "cml1", NULL);
- clks[cml1] = clk;
-}
-
-static void __init tegra30_osc_clk_init(void)
-{
- struct clk *clk;
- unsigned int pll_ref_div;
-
- tegra30_clk_measure_input_freq();
-
- /* clk_m */
- clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT,
- input_freq);
- clk_register_clkdev(clk, "clk_m", NULL);
- clks[clk_m] = clk;
-
- /* pll_ref */
- pll_ref_div = tegra30_get_pll_ref_div();
- clk = clk_register_fixed_factor(NULL, "pll_ref", "clk_m",
- CLK_SET_RATE_PARENT, 1, pll_ref_div);
- clk_register_clkdev(clk, "pll_ref", NULL);
- clks[pll_ref] = clk;
+ tegra_periph_clk_init(clk_base, pmc_base, tegra30_clks, &pll_p_params);
}
/* Tegra30 CPU clock and reset control functions */
@@ -1913,48 +1328,49 @@ static struct tegra_cpu_car_ops tegra30_cpu_car_ops = {
};
static struct tegra_clk_init_table init_table[] __initdata = {
- {uarta, pll_p, 408000000, 0},
- {uartb, pll_p, 408000000, 0},
- {uartc, pll_p, 408000000, 0},
- {uartd, pll_p, 408000000, 0},
- {uarte, pll_p, 408000000, 0},
- {pll_a, clk_max, 564480000, 1},
- {pll_a_out0, clk_max, 11289600, 1},
- {extern1, pll_a_out0, 0, 1},
- {clk_out_1_mux, extern1, 0, 0},
- {clk_out_1, clk_max, 0, 1},
- {blink, clk_max, 0, 1},
- {i2s0, pll_a_out0, 11289600, 0},
- {i2s1, pll_a_out0, 11289600, 0},
- {i2s2, pll_a_out0, 11289600, 0},
- {i2s3, pll_a_out0, 11289600, 0},
- {i2s4, pll_a_out0, 11289600, 0},
- {sdmmc1, pll_p, 48000000, 0},
- {sdmmc2, pll_p, 48000000, 0},
- {sdmmc3, pll_p, 48000000, 0},
- {pll_m, clk_max, 0, 1},
- {pclk, clk_max, 0, 1},
- {csite, clk_max, 0, 1},
- {emc, clk_max, 0, 1},
- {mselect, clk_max, 0, 1},
- {sbc1, pll_p, 100000000, 0},
- {sbc2, pll_p, 100000000, 0},
- {sbc3, pll_p, 100000000, 0},
- {sbc4, pll_p, 100000000, 0},
- {sbc5, pll_p, 100000000, 0},
- {sbc6, pll_p, 100000000, 0},
- {host1x, pll_c, 150000000, 0},
- {disp1, pll_p, 600000000, 0},
- {disp2, pll_p, 600000000, 0},
- {twd, clk_max, 0, 1},
- {gr2d, pll_c, 300000000, 0},
- {gr3d, pll_c, 300000000, 0},
- {clk_max, clk_max, 0, 0}, /* This MUST be the last entry. */
+ {TEGRA30_CLK_UARTA, TEGRA30_CLK_PLL_P, 408000000, 0},
+ {TEGRA30_CLK_UARTB, TEGRA30_CLK_PLL_P, 408000000, 0},
+ {TEGRA30_CLK_UARTC, TEGRA30_CLK_PLL_P, 408000000, 0},
+ {TEGRA30_CLK_UARTD, TEGRA30_CLK_PLL_P, 408000000, 0},
+ {TEGRA30_CLK_UARTE, TEGRA30_CLK_PLL_P, 408000000, 0},
+ {TEGRA30_CLK_PLL_A, TEGRA30_CLK_CLK_MAX, 564480000, 1},
+ {TEGRA30_CLK_PLL_A_OUT0, TEGRA30_CLK_CLK_MAX, 11289600, 1},
+ {TEGRA30_CLK_EXTERN1, TEGRA30_CLK_PLL_A_OUT0, 0, 1},
+ {TEGRA30_CLK_CLK_OUT_1_MUX, TEGRA30_CLK_EXTERN1, 0, 0},
+ {TEGRA30_CLK_CLK_OUT_1, TEGRA30_CLK_CLK_MAX, 0, 1},
+ {TEGRA30_CLK_BLINK, TEGRA30_CLK_CLK_MAX, 0, 1},
+ {TEGRA30_CLK_I2S0, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA30_CLK_I2S1, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA30_CLK_I2S2, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA30_CLK_I2S3, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA30_CLK_I2S4, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA30_CLK_SDMMC1, TEGRA30_CLK_PLL_P, 48000000, 0},
+ {TEGRA30_CLK_SDMMC2, TEGRA30_CLK_PLL_P, 48000000, 0},
+ {TEGRA30_CLK_SDMMC3, TEGRA30_CLK_PLL_P, 48000000, 0},
+ {TEGRA30_CLK_PLL_M, TEGRA30_CLK_CLK_MAX, 0, 1},
+ {TEGRA30_CLK_PCLK, TEGRA30_CLK_CLK_MAX, 0, 1},
+ {TEGRA30_CLK_CSITE, TEGRA30_CLK_CLK_MAX, 0, 1},
+ {TEGRA30_CLK_EMC, TEGRA30_CLK_CLK_MAX, 0, 1},
+ {TEGRA30_CLK_MSELECT, TEGRA30_CLK_CLK_MAX, 0, 1},
+ {TEGRA30_CLK_SBC1, TEGRA30_CLK_PLL_P, 100000000, 0},
+ {TEGRA30_CLK_SBC2, TEGRA30_CLK_PLL_P, 100000000, 0},
+ {TEGRA30_CLK_SBC3, TEGRA30_CLK_PLL_P, 100000000, 0},
+ {TEGRA30_CLK_SBC4, TEGRA30_CLK_PLL_P, 100000000, 0},
+ {TEGRA30_CLK_SBC5, TEGRA30_CLK_PLL_P, 100000000, 0},
+ {TEGRA30_CLK_SBC6, TEGRA30_CLK_PLL_P, 100000000, 0},
+ {TEGRA30_CLK_HOST1X, TEGRA30_CLK_PLL_C, 150000000, 0},
+ {TEGRA30_CLK_DISP1, TEGRA30_CLK_PLL_P, 600000000, 0},
+ {TEGRA30_CLK_DISP2, TEGRA30_CLK_PLL_P, 600000000, 0},
+ {TEGRA30_CLK_TWD, TEGRA30_CLK_CLK_MAX, 0, 1},
+ {TEGRA30_CLK_GR2D, TEGRA30_CLK_PLL_C, 300000000, 0},
+ {TEGRA30_CLK_GR3D, TEGRA30_CLK_PLL_C, 300000000, 0},
+ {TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0},
+ {TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0}, /* This MUST be the last entry. */
};
static void __init tegra30_clock_apply_init_table(void)
{
- tegra_init_from_table(init_table, clks, clk_max);
+ tegra_init_from_table(init_table, clks, TEGRA30_CLK_CLK_MAX);
}
/*
@@ -1963,19 +1379,18 @@ static void __init tegra30_clock_apply_init_table(void)
* table under two names.
*/
static struct tegra_clk_duplicate tegra_clk_duplicates[] = {
- TEGRA_CLK_DUPLICATE(usbd, "utmip-pad", NULL),
- TEGRA_CLK_DUPLICATE(usbd, "tegra-ehci.0", NULL),
- TEGRA_CLK_DUPLICATE(usbd, "tegra-otg", NULL),
- TEGRA_CLK_DUPLICATE(bsev, "tegra-avp", "bsev"),
- TEGRA_CLK_DUPLICATE(bsev, "nvavp", "bsev"),
- TEGRA_CLK_DUPLICATE(vde, "tegra-aes", "vde"),
- TEGRA_CLK_DUPLICATE(bsea, "tegra-aes", "bsea"),
- TEGRA_CLK_DUPLICATE(bsea, "nvavp", "bsea"),
- TEGRA_CLK_DUPLICATE(cml1, "tegra_sata_cml", NULL),
- TEGRA_CLK_DUPLICATE(cml0, "tegra_pcie", "cml"),
- TEGRA_CLK_DUPLICATE(pciex, "tegra_pcie", "pciex"),
- TEGRA_CLK_DUPLICATE(vcp, "nvavp", "vcp"),
- TEGRA_CLK_DUPLICATE(clk_max, NULL, NULL), /* MUST be the last entry */
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_USBD, "utmip-pad", NULL),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_USBD, "tegra-ehci.0", NULL),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_USBD, "tegra-otg", NULL),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_BSEV, "tegra-avp", "bsev"),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_BSEV, "nvavp", "bsev"),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_VDE, "tegra-aes", "vde"),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_BSEA, "tegra-aes", "bsea"),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_BSEA, "nvavp", "bsea"),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_CML1, "tegra_sata_cml", NULL),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_CML0, "tegra_pcie", "cml"),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_VCP, "nvavp", "vcp"),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_CLK_MAX, NULL, NULL), /* MUST be the last entry */
};
static const struct of_device_id pmc_match[] __initconst = {
@@ -1986,7 +1401,6 @@ static const struct of_device_id pmc_match[] __initconst = {
static void __init tegra30_clock_init(struct device_node *np)
{
struct device_node *node;
- int i;
clk_base = of_iomap(np, 0);
if (!clk_base) {
@@ -2006,29 +1420,27 @@ static void __init tegra30_clock_init(struct device_node *np)
BUG();
}
- tegra30_osc_clk_init();
- tegra30_fixed_clk_init();
+ clks = tegra_clk_init(clk_base, TEGRA30_CLK_CLK_MAX,
+ TEGRA30_CLK_PERIPH_BANKS);
+ if (!clks)
+ return;
+
+ if (tegra_osc_clk_init(clk_base, tegra30_clks, tegra30_input_freq,
+ ARRAY_SIZE(tegra30_input_freq), &input_freq, NULL) < 0)
+ return;
+
+
+ tegra_fixed_clk_init(tegra30_clks);
tegra30_pll_init();
tegra30_super_clk_init();
tegra30_periph_clk_init();
- tegra30_audio_clk_init();
- tegra30_pmc_clk_init();
-
- for (i = 0; i < ARRAY_SIZE(clks); i++) {
- if (IS_ERR(clks[i])) {
- pr_err("Tegra30 clk %d: register failed with %ld\n",
- i, PTR_ERR(clks[i]));
- BUG();
- }
- if (!clks[i])
- clks[i] = ERR_PTR(-EINVAL);
- }
+ tegra_audio_clk_init(clk_base, pmc_base, tegra30_clks, &pll_a_params);
+ tegra_pmc_clk_init(pmc_base, tegra30_clks);
- tegra_init_dup_clks(tegra_clk_duplicates, clks, clk_max);
+ tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA30_CLK_CLK_MAX);
- clk_data.clks = clks;
- clk_data.clk_num = ARRAY_SIZE(clks);
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ tegra_add_of_provider(np);
+ tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
tegra_clk_apply_init_table = tegra30_clock_apply_init_table;
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index 86581ac1fd69..c0a7d7723510 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -18,13 +18,175 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/clk/tegra.h>
+#include <linux/reset-controller.h>
+#include <linux/tegra-soc.h>
#include "clk.h"
+#define CLK_OUT_ENB_L 0x010
+#define CLK_OUT_ENB_H 0x014
+#define CLK_OUT_ENB_U 0x018
+#define CLK_OUT_ENB_V 0x360
+#define CLK_OUT_ENB_W 0x364
+#define CLK_OUT_ENB_X 0x280
+#define CLK_OUT_ENB_SET_L 0x320
+#define CLK_OUT_ENB_CLR_L 0x324
+#define CLK_OUT_ENB_SET_H 0x328
+#define CLK_OUT_ENB_CLR_H 0x32c
+#define CLK_OUT_ENB_SET_U 0x330
+#define CLK_OUT_ENB_CLR_U 0x334
+#define CLK_OUT_ENB_SET_V 0x440
+#define CLK_OUT_ENB_CLR_V 0x444
+#define CLK_OUT_ENB_SET_W 0x448
+#define CLK_OUT_ENB_CLR_W 0x44c
+#define CLK_OUT_ENB_SET_X 0x284
+#define CLK_OUT_ENB_CLR_X 0x288
+
+#define RST_DEVICES_L 0x004
+#define RST_DEVICES_H 0x008
+#define RST_DEVICES_U 0x00C
+#define RST_DFLL_DVCO 0x2F4
+#define RST_DEVICES_V 0x358
+#define RST_DEVICES_W 0x35C
+#define RST_DEVICES_X 0x28C
+#define RST_DEVICES_SET_L 0x300
+#define RST_DEVICES_CLR_L 0x304
+#define RST_DEVICES_SET_H 0x308
+#define RST_DEVICES_CLR_H 0x30c
+#define RST_DEVICES_SET_U 0x310
+#define RST_DEVICES_CLR_U 0x314
+#define RST_DEVICES_SET_V 0x430
+#define RST_DEVICES_CLR_V 0x434
+#define RST_DEVICES_SET_W 0x438
+#define RST_DEVICES_CLR_W 0x43c
+#define RST_DEVICES_SET_X 0x290
+#define RST_DEVICES_CLR_X 0x294
+
/* Global data of Tegra CPU CAR ops */
static struct tegra_cpu_car_ops dummy_car_ops;
struct tegra_cpu_car_ops *tegra_cpu_car_ops = &dummy_car_ops;
+int *periph_clk_enb_refcnt;
+static int periph_banks;
+static struct clk **clks;
+static int clk_num;
+static struct clk_onecell_data clk_data;
+
+static struct tegra_clk_periph_regs periph_regs[] = {
+ [0] = {
+ .enb_reg = CLK_OUT_ENB_L,
+ .enb_set_reg = CLK_OUT_ENB_SET_L,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_L,
+ .rst_reg = RST_DEVICES_L,
+ .rst_set_reg = RST_DEVICES_SET_L,
+ .rst_clr_reg = RST_DEVICES_CLR_L,
+ },
+ [1] = {
+ .enb_reg = CLK_OUT_ENB_H,
+ .enb_set_reg = CLK_OUT_ENB_SET_H,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_H,
+ .rst_reg = RST_DEVICES_H,
+ .rst_set_reg = RST_DEVICES_SET_H,
+ .rst_clr_reg = RST_DEVICES_CLR_H,
+ },
+ [2] = {
+ .enb_reg = CLK_OUT_ENB_U,
+ .enb_set_reg = CLK_OUT_ENB_SET_U,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_U,
+ .rst_reg = RST_DEVICES_U,
+ .rst_set_reg = RST_DEVICES_SET_U,
+ .rst_clr_reg = RST_DEVICES_CLR_U,
+ },
+ [3] = {
+ .enb_reg = CLK_OUT_ENB_V,
+ .enb_set_reg = CLK_OUT_ENB_SET_V,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_V,
+ .rst_reg = RST_DEVICES_V,
+ .rst_set_reg = RST_DEVICES_SET_V,
+ .rst_clr_reg = RST_DEVICES_CLR_V,
+ },
+ [4] = {
+ .enb_reg = CLK_OUT_ENB_W,
+ .enb_set_reg = CLK_OUT_ENB_SET_W,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_W,
+ .rst_reg = RST_DEVICES_W,
+ .rst_set_reg = RST_DEVICES_SET_W,
+ .rst_clr_reg = RST_DEVICES_CLR_W,
+ },
+ [5] = {
+ .enb_reg = CLK_OUT_ENB_X,
+ .enb_set_reg = CLK_OUT_ENB_SET_X,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_X,
+ .rst_reg = RST_DEVICES_X,
+ .rst_set_reg = RST_DEVICES_SET_X,
+ .rst_clr_reg = RST_DEVICES_CLR_X,
+ },
+};
+
+static void __iomem *clk_base;
+
+static int tegra_clk_rst_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ /*
+ * If peripheral is on the APB bus then we must read the APB bus to
+ * flush the write operation in apb bus. This will avoid peripheral
+ * access after disabling clock. Since the reset driver has no
+ * knowledge of which reset IDs represent which devices, simply do
+ * this all the time.
+ */
+ tegra_read_chipid();
+
+ writel_relaxed(BIT(id % 32),
+ clk_base + periph_regs[id / 32].rst_set_reg);
+
+ return 0;
+}
+
+static int tegra_clk_rst_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ writel_relaxed(BIT(id % 32),
+ clk_base + periph_regs[id / 32].rst_clr_reg);
+
+ return 0;
+}
+
+struct tegra_clk_periph_regs *get_reg_bank(int clkid)
+{
+ int reg_bank = clkid / 32;
+
+ if (reg_bank < periph_banks)
+ return &periph_regs[reg_bank];
+ else {
+ WARN_ON(1);
+ return NULL;
+ }
+}
+
+struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks)
+{
+ clk_base = regs;
+
+ if (WARN_ON(banks > ARRAY_SIZE(periph_regs)))
+ return NULL;
+
+ periph_clk_enb_refcnt = kzalloc(32 * banks *
+ sizeof(*periph_clk_enb_refcnt), GFP_KERNEL);
+ if (!periph_clk_enb_refcnt)
+ return NULL;
+
+ periph_banks = banks;
+
+ clks = kzalloc(num * sizeof(struct clk *), GFP_KERNEL);
+ if (!clks)
+ kfree(periph_clk_enb_refcnt);
+
+ clk_num = num;
+
+ return clks;
+}
+
void __init tegra_init_dup_clks(struct tegra_clk_duplicate *dup_list,
struct clk *clks[], int clk_max)
{
@@ -74,6 +236,58 @@ void __init tegra_init_from_table(struct tegra_clk_init_table *tbl,
}
}
+static struct reset_control_ops rst_ops = {
+ .assert = tegra_clk_rst_assert,
+ .deassert = tegra_clk_rst_deassert,
+};
+
+static struct reset_controller_dev rst_ctlr = {
+ .ops = &rst_ops,
+ .owner = THIS_MODULE,
+ .of_reset_n_cells = 1,
+};
+
+void __init tegra_add_of_provider(struct device_node *np)
+{
+ int i;
+
+ for (i = 0; i < clk_num; i++) {
+ if (IS_ERR(clks[i])) {
+ pr_err
+ ("Tegra clk %d: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+ }
+ if (!clks[i])
+ clks[i] = ERR_PTR(-EINVAL);
+ }
+
+ clk_data.clks = clks;
+ clk_data.clk_num = clk_num;
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ rst_ctlr.of_node = np;
+ rst_ctlr.nr_resets = clk_num * 32;
+ reset_controller_register(&rst_ctlr);
+}
+
+void __init tegra_register_devclks(struct tegra_devclk *dev_clks, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++, dev_clks++)
+ clk_register_clkdev(clks[dev_clks->dt_id], dev_clks->con_id,
+ dev_clks->dev_id);
+}
+
+struct clk ** __init tegra_lookup_dt_id(int clk_id,
+ struct tegra_clk *tegra_clk)
+{
+ if (tegra_clk[clk_id].present)
+ return &clks[tegra_clk[clk_id].dt_id];
+ else
+ return NULL;
+}
+
tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
void __init tegra_clocks_apply_init_table(void)
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 07cfacd91686..16ec8d6bb87f 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -37,6 +37,8 @@ struct tegra_clk_sync_source {
container_of(_hw, struct tegra_clk_sync_source, hw)
extern const struct clk_ops tegra_clk_sync_source_ops;
+extern int *periph_clk_enb_refcnt;
+
struct clk *tegra_clk_register_sync_source(const char *name,
unsigned long fixed_rate, unsigned long max_rate);
@@ -188,12 +190,15 @@ struct tegra_clk_pll_params {
u32 ext_misc_reg[3];
u32 pmc_divnm_reg;
u32 pmc_divp_reg;
+ u32 flags;
int stepa_shift;
int stepb_shift;
int lock_delay;
int max_p;
struct pdiv_map *pdiv_tohw;
struct div_nmp *div_nmp;
+ struct tegra_clk_pll_freq_table *freq_table;
+ unsigned long fixed_rate;
};
/**
@@ -233,10 +238,7 @@ struct tegra_clk_pll {
struct clk_hw hw;
void __iomem *clk_base;
void __iomem *pmc;
- u32 flags;
- unsigned long fixed_rate;
spinlock_t *lock;
- struct tegra_clk_pll_freq_table *freq_table;
struct tegra_clk_pll_params *params;
};
@@ -258,56 +260,49 @@ extern const struct clk_ops tegra_clk_pll_ops;
extern const struct clk_ops tegra_clk_plle_ops;
struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
- struct tegra_clk_pll_params *pll_params, u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock);
+ unsigned long flags, struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock);
struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
- struct tegra_clk_pll_params *pll_params, u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock);
+ unsigned long flags, struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock);
struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
+ unsigned long flags,
struct tegra_clk_pll_params *pll_params,
- u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock);
struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
+ unsigned long flags,
struct tegra_clk_pll_params *pll_params,
- u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock);
struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
+ unsigned long flags,
struct tegra_clk_pll_params *pll_params,
- u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock);
struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
+ unsigned long flags,
struct tegra_clk_pll_params *pll_params,
- u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock, unsigned long parent_rate);
struct clk *tegra_clk_register_plle_tegra114(const char *name,
const char *parent_name,
void __iomem *clk_base, unsigned long flags,
- unsigned long fixed_rate,
struct tegra_clk_pll_params *pll_params,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock);
+struct clk *tegra_clk_register_pllss(const char *name, const char *parent_name,
+ void __iomem *clk_base, unsigned long flags,
+ struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock);
+
/**
* struct tegra_clk_pll_out - PLL divider down clock
*
@@ -395,13 +390,13 @@ struct tegra_clk_periph_gate {
#define TEGRA_PERIPH_MANUAL_RESET BIT(1)
#define TEGRA_PERIPH_ON_APB BIT(2)
#define TEGRA_PERIPH_WAR_1005168 BIT(3)
+#define TEGRA_PERIPH_NO_DIV BIT(4)
+#define TEGRA_PERIPH_NO_GATE BIT(5)
-void tegra_periph_reset(struct tegra_clk_periph_gate *gate, bool assert);
extern const struct clk_ops tegra_clk_periph_gate_ops;
struct clk *tegra_clk_register_periph_gate(const char *name,
const char *parent_name, u8 gate_flags, void __iomem *clk_base,
- unsigned long flags, int clk_num,
- struct tegra_clk_periph_regs *pregs, int *enable_refcnt);
+ unsigned long flags, int clk_num, int *enable_refcnt);
/**
* struct clk-periph - peripheral clock
@@ -443,26 +438,26 @@ struct clk *tegra_clk_register_periph_nodiv(const char *name,
#define TEGRA_CLK_PERIPH(_mux_shift, _mux_mask, _mux_flags, \
_div_shift, _div_width, _div_frac_width, \
- _div_flags, _clk_num, _enb_refcnt, _regs, \
- _gate_flags, _table) \
+ _div_flags, _clk_num,\
+ _gate_flags, _table, _lock) \
{ \
.mux = { \
.flags = _mux_flags, \
.shift = _mux_shift, \
.mask = _mux_mask, \
.table = _table, \
+ .lock = _lock, \
}, \
.divider = { \
.flags = _div_flags, \
.shift = _div_shift, \
.width = _div_width, \
.frac_width = _div_frac_width, \
+ .lock = _lock, \
}, \
.gate = { \
.flags = _gate_flags, \
.clk_num = _clk_num, \
- .enable_refcnt = _enb_refcnt, \
- .regs = _regs, \
}, \
.mux_ops = &clk_mux_ops, \
.div_ops = &tegra_clk_frac_div_ops, \
@@ -472,7 +467,10 @@ struct clk *tegra_clk_register_periph_nodiv(const char *name,
struct tegra_periph_init_data {
const char *name;
int clk_id;
- const char **parent_names;
+ union {
+ const char **parent_names;
+ const char *parent_name;
+ } p;
int num_parents;
struct tegra_clk_periph periph;
u32 offset;
@@ -483,20 +481,19 @@ struct tegra_periph_init_data {
#define TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parent_names, _offset,\
_mux_shift, _mux_mask, _mux_flags, _div_shift, \
- _div_width, _div_frac_width, _div_flags, _regs, \
- _clk_num, _enb_refcnt, _gate_flags, _clk_id, _table,\
- _flags) \
+ _div_width, _div_frac_width, _div_flags, \
+ _clk_num, _gate_flags, _clk_id, _table, \
+ _flags, _lock) \
{ \
.name = _name, \
.clk_id = _clk_id, \
- .parent_names = _parent_names, \
+ .p.parent_names = _parent_names, \
.num_parents = ARRAY_SIZE(_parent_names), \
.periph = TEGRA_CLK_PERIPH(_mux_shift, _mux_mask, \
_mux_flags, _div_shift, \
_div_width, _div_frac_width, \
_div_flags, _clk_num, \
- _enb_refcnt, _regs, \
- _gate_flags, _table), \
+ _gate_flags, _table, _lock), \
.offset = _offset, \
.con_id = _con_id, \
.dev_id = _dev_id, \
@@ -505,13 +502,13 @@ struct tegra_periph_init_data {
#define TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parent_names, _offset,\
_mux_shift, _mux_width, _mux_flags, _div_shift, \
- _div_width, _div_frac_width, _div_flags, _regs, \
- _clk_num, _enb_refcnt, _gate_flags, _clk_id) \
+ _div_width, _div_frac_width, _div_flags, \
+ _clk_num, _gate_flags, _clk_id) \
TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parent_names, _offset,\
_mux_shift, BIT(_mux_width) - 1, _mux_flags, \
_div_shift, _div_width, _div_frac_width, _div_flags, \
- _regs, _clk_num, _enb_refcnt, _gate_flags, _clk_id,\
- NULL, 0)
+ _clk_num, _gate_flags, _clk_id,\
+ NULL, 0, NULL)
/**
* struct clk_super_mux - super clock
@@ -581,12 +578,49 @@ struct tegra_clk_duplicate {
}, \
}
+struct tegra_clk {
+ int dt_id;
+ bool present;
+};
+
+struct tegra_devclk {
+ int dt_id;
+ char *dev_id;
+ char *con_id;
+};
+
void tegra_init_from_table(struct tegra_clk_init_table *tbl,
struct clk *clks[], int clk_max);
void tegra_init_dup_clks(struct tegra_clk_duplicate *dup_list,
struct clk *clks[], int clk_max);
+struct tegra_clk_periph_regs *get_reg_bank(int clkid);
+struct clk **tegra_clk_init(void __iomem *clk_base, int num, int periph_banks);
+
+struct clk **tegra_lookup_dt_id(int clk_id, struct tegra_clk *tegra_clk);
+
+void tegra_add_of_provider(struct device_node *np);
+void tegra_register_devclks(struct tegra_devclk *dev_clks, int num);
+
+void tegra_audio_clk_init(void __iomem *clk_base,
+ void __iomem *pmc_base, struct tegra_clk *tegra_clks,
+ struct tegra_clk_pll_params *pll_params);
+
+void tegra_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base,
+ struct tegra_clk *tegra_clks,
+ struct tegra_clk_pll_params *pll_params);
+
+void tegra_pmc_clk_init(void __iomem *pmc_base, struct tegra_clk *tegra_clks);
+void tegra_fixed_clk_init(struct tegra_clk *tegra_clks);
+int tegra_osc_clk_init(void __iomem *clk_base, struct tegra_clk *tegra_clks,
+ unsigned long *input_freqs, int num,
+ unsigned long *osc_freq,
+ unsigned long *pll_ref_freq);
+void tegra_super_clk_gen4_init(void __iomem *clk_base,
+ void __iomem *pmc_base, struct tegra_clk *tegra_clks,
+ struct tegra_clk_pll_params *pll_params);
+
void tegra114_clock_tune_cpu_trimmers_high(void);
void tegra114_clock_tune_cpu_trimmers_low(void);
void tegra114_clock_tune_cpu_trimmers_init(void);
diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile
new file mode 100644
index 000000000000..4319d4031aa3
--- /dev/null
+++ b/drivers/clk/ti/Makefile
@@ -0,0 +1,11 @@
+ifneq ($(CONFIG_OF),)
+obj-y += clk.o autoidle.o clockdomain.o
+clk-common = dpll.o composite.o divider.o gate.o \
+ fixed-factor.o mux.o apll.o
+obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o
+obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o clk-3xxx.o
+obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o
+obj-$(CONFIG_SOC_OMAP5) += $(clk-common) clk-54xx.o
+obj-$(CONFIG_SOC_DRA7XX) += $(clk-common) clk-7xx.o
+obj-$(CONFIG_SOC_AM43XX) += $(clk-common) clk-43xx.o
+endif
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
new file mode 100644
index 000000000000..b986f61f5a77
--- /dev/null
+++ b/drivers/clk/ti/apll.c
@@ -0,0 +1,223 @@
+/*
+ * OMAP APLL clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * J Keerthy <j-keerthy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/log2.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+#include <linux/delay.h>
+
+#define APLL_FORCE_LOCK 0x1
+#define APLL_AUTO_IDLE 0x2
+#define MAX_APLL_WAIT_TRIES 1000000
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static int dra7_apll_enable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ int r = 0, i = 0;
+ struct dpll_data *ad;
+ const char *clk_name;
+ u8 state = 1;
+ u32 v;
+
+ ad = clk->dpll_data;
+ if (!ad)
+ return -EINVAL;
+
+ clk_name = __clk_get_name(clk->hw.clk);
+
+ state <<= __ffs(ad->idlest_mask);
+
+ /* Check is already locked */
+ v = ti_clk_ll_ops->clk_readl(ad->idlest_reg);
+
+ if ((v & ad->idlest_mask) == state)
+ return r;
+
+ v = ti_clk_ll_ops->clk_readl(ad->control_reg);
+ v &= ~ad->enable_mask;
+ v |= APLL_FORCE_LOCK << __ffs(ad->enable_mask);
+ ti_clk_ll_ops->clk_writel(v, ad->control_reg);
+
+ state <<= __ffs(ad->idlest_mask);
+
+ while (1) {
+ v = ti_clk_ll_ops->clk_readl(ad->idlest_reg);
+ if ((v & ad->idlest_mask) == state)
+ break;
+ if (i > MAX_APLL_WAIT_TRIES)
+ break;
+ i++;
+ udelay(1);
+ }
+
+ if (i == MAX_APLL_WAIT_TRIES) {
+ pr_warn("clock: %s failed transition to '%s'\n",
+ clk_name, (state) ? "locked" : "bypassed");
+ } else {
+ pr_debug("clock: %s transition to '%s' in %d loops\n",
+ clk_name, (state) ? "locked" : "bypassed", i);
+
+ r = 0;
+ }
+
+ return r;
+}
+
+static void dra7_apll_disable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *ad;
+ u8 state = 1;
+ u32 v;
+
+ ad = clk->dpll_data;
+
+ state <<= __ffs(ad->idlest_mask);
+
+ v = ti_clk_ll_ops->clk_readl(ad->control_reg);
+ v &= ~ad->enable_mask;
+ v |= APLL_AUTO_IDLE << __ffs(ad->enable_mask);
+ ti_clk_ll_ops->clk_writel(v, ad->control_reg);
+}
+
+static int dra7_apll_is_enabled(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *ad;
+ u32 v;
+
+ ad = clk->dpll_data;
+
+ v = ti_clk_ll_ops->clk_readl(ad->control_reg);
+ v &= ad->enable_mask;
+
+ v >>= __ffs(ad->enable_mask);
+
+ return v == APLL_AUTO_IDLE ? 0 : 1;
+}
+
+static u8 dra7_init_apll_parent(struct clk_hw *hw)
+{
+ return 0;
+}
+
+static const struct clk_ops apll_ck_ops = {
+ .enable = &dra7_apll_enable,
+ .disable = &dra7_apll_disable,
+ .is_enabled = &dra7_apll_is_enabled,
+ .get_parent = &dra7_init_apll_parent,
+};
+
+static void __init omap_clk_register_apll(struct clk_hw *hw,
+ struct device_node *node)
+{
+ struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
+ struct dpll_data *ad = clk_hw->dpll_data;
+ struct clk *clk;
+
+ ad->clk_ref = of_clk_get(node, 0);
+ ad->clk_bypass = of_clk_get(node, 1);
+
+ if (IS_ERR(ad->clk_ref) || IS_ERR(ad->clk_bypass)) {
+ pr_debug("clk-ref or clk-bypass for %s not ready, retry\n",
+ node->name);
+ if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
+ return;
+
+ goto cleanup;
+ }
+
+ clk = clk_register(NULL, &clk_hw->hw);
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ kfree(clk_hw->hw.init->parent_names);
+ kfree(clk_hw->hw.init);
+ return;
+ }
+
+cleanup:
+ kfree(clk_hw->dpll_data);
+ kfree(clk_hw->hw.init->parent_names);
+ kfree(clk_hw->hw.init);
+ kfree(clk_hw);
+}
+
+static void __init of_dra7_apll_setup(struct device_node *node)
+{
+ struct dpll_data *ad = NULL;
+ struct clk_hw_omap *clk_hw = NULL;
+ struct clk_init_data *init = NULL;
+ const char **parent_names = NULL;
+ int i;
+
+ ad = kzalloc(sizeof(*ad), GFP_KERNEL);
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ init = kzalloc(sizeof(*init), GFP_KERNEL);
+ if (!ad || !clk_hw || !init)
+ goto cleanup;
+
+ clk_hw->dpll_data = ad;
+ clk_hw->hw.init = init;
+ clk_hw->flags = MEMMAP_ADDRESSING;
+
+ init->name = node->name;
+ init->ops = &apll_ck_ops;
+
+ init->num_parents = of_clk_get_parent_count(node);
+ if (init->num_parents < 1) {
+ pr_err("dra7 apll %s must have parent(s)\n", node->name);
+ goto cleanup;
+ }
+
+ parent_names = kzalloc(sizeof(char *) * init->num_parents, GFP_KERNEL);
+ if (!parent_names)
+ goto cleanup;
+
+ for (i = 0; i < init->num_parents; i++)
+ parent_names[i] = of_clk_get_parent_name(node, i);
+
+ init->parent_names = parent_names;
+
+ ad->control_reg = ti_clk_get_reg_addr(node, 0);
+ ad->idlest_reg = ti_clk_get_reg_addr(node, 1);
+
+ if (!ad->control_reg || !ad->idlest_reg)
+ goto cleanup;
+
+ ad->idlest_mask = 0x1;
+ ad->enable_mask = 0x3;
+
+ omap_clk_register_apll(&clk_hw->hw, node);
+ return;
+
+cleanup:
+ kfree(parent_names);
+ kfree(ad);
+ kfree(clk_hw);
+ kfree(init);
+}
+CLK_OF_DECLARE(dra7_apll_clock, "ti,dra7-apll-clock", of_dra7_apll_setup);
diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c
new file mode 100644
index 000000000000..8912ff80af34
--- /dev/null
+++ b/drivers/clk/ti/autoidle.c
@@ -0,0 +1,133 @@
+/*
+ * TI clock autoidle support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+struct clk_ti_autoidle {
+ void __iomem *reg;
+ u8 shift;
+ u8 flags;
+ const char *name;
+ struct list_head node;
+};
+
+#define AUTOIDLE_LOW 0x1
+
+static LIST_HEAD(autoidle_clks);
+
+static void ti_allow_autoidle(struct clk_ti_autoidle *clk)
+{
+ u32 val;
+
+ val = ti_clk_ll_ops->clk_readl(clk->reg);
+
+ if (clk->flags & AUTOIDLE_LOW)
+ val &= ~(1 << clk->shift);
+ else
+ val |= (1 << clk->shift);
+
+ ti_clk_ll_ops->clk_writel(val, clk->reg);
+}
+
+static void ti_deny_autoidle(struct clk_ti_autoidle *clk)
+{
+ u32 val;
+
+ val = ti_clk_ll_ops->clk_readl(clk->reg);
+
+ if (clk->flags & AUTOIDLE_LOW)
+ val |= (1 << clk->shift);
+ else
+ val &= ~(1 << clk->shift);
+
+ ti_clk_ll_ops->clk_writel(val, clk->reg);
+}
+
+/**
+ * of_ti_clk_allow_autoidle_all - enable autoidle for all clocks
+ *
+ * Enables hardware autoidle for all registered DT clocks, which have
+ * the feature.
+ */
+void of_ti_clk_allow_autoidle_all(void)
+{
+ struct clk_ti_autoidle *c;
+
+ list_for_each_entry(c, &autoidle_clks, node)
+ ti_allow_autoidle(c);
+}
+
+/**
+ * of_ti_clk_deny_autoidle_all - disable autoidle for all clocks
+ *
+ * Disables hardware autoidle for all registered DT clocks, which have
+ * the feature.
+ */
+void of_ti_clk_deny_autoidle_all(void)
+{
+ struct clk_ti_autoidle *c;
+
+ list_for_each_entry(c, &autoidle_clks, node)
+ ti_deny_autoidle(c);
+}
+
+/**
+ * of_ti_clk_autoidle_setup - sets up hardware autoidle for a clock
+ * @node: pointer to the clock device node
+ *
+ * Checks if a clock has hardware autoidle support or not (check
+ * for presence of 'ti,autoidle-shift' property in the device tree
+ * node) and sets up the hardware autoidle feature for the clock
+ * if available. If autoidle is available, the clock is also added
+ * to the autoidle list for later processing. Returns 0 on success,
+ * negative error value on failure.
+ */
+int __init of_ti_clk_autoidle_setup(struct device_node *node)
+{
+ u32 shift;
+ struct clk_ti_autoidle *clk;
+
+ /* Check if this clock has autoidle support or not */
+ if (of_property_read_u32(node, "ti,autoidle-shift", &shift))
+ return 0;
+
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+
+ if (!clk)
+ return -ENOMEM;
+
+ clk->shift = shift;
+ clk->name = node->name;
+ clk->reg = ti_clk_get_reg_addr(node, 0);
+
+ if (!clk->reg) {
+ kfree(clk);
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(node, "ti,invert-autoidle-bit"))
+ clk->flags |= AUTOIDLE_LOW;
+
+ list_add(&clk->node, &autoidle_clks);
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
new file mode 100644
index 000000000000..776ee4594bd4
--- /dev/null
+++ b/drivers/clk/ti/clk-33xx.c
@@ -0,0 +1,161 @@
+/*
+ * AM33XX Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+static struct ti_dt_clk am33xx_clks[] = {
+ DT_CLK(NULL, "clk_32768_ck", "clk_32768_ck"),
+ DT_CLK(NULL, "clk_rc32k_ck", "clk_rc32k_ck"),
+ DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+ DT_CLK(NULL, "virt_24000000_ck", "virt_24000000_ck"),
+ DT_CLK(NULL, "virt_25000000_ck", "virt_25000000_ck"),
+ DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
+ DT_CLK(NULL, "sys_clkin_ck", "sys_clkin_ck"),
+ DT_CLK(NULL, "tclkin_ck", "tclkin_ck"),
+ DT_CLK(NULL, "dpll_core_ck", "dpll_core_ck"),
+ DT_CLK(NULL, "dpll_core_x2_ck", "dpll_core_x2_ck"),
+ DT_CLK(NULL, "dpll_core_m4_ck", "dpll_core_m4_ck"),
+ DT_CLK(NULL, "dpll_core_m5_ck", "dpll_core_m5_ck"),
+ DT_CLK(NULL, "dpll_core_m6_ck", "dpll_core_m6_ck"),
+ DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"),
+ DT_CLK("cpu0", NULL, "dpll_mpu_ck"),
+ DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"),
+ DT_CLK(NULL, "dpll_ddr_ck", "dpll_ddr_ck"),
+ DT_CLK(NULL, "dpll_ddr_m2_ck", "dpll_ddr_m2_ck"),
+ DT_CLK(NULL, "dpll_ddr_m2_div2_ck", "dpll_ddr_m2_div2_ck"),
+ DT_CLK(NULL, "dpll_disp_ck", "dpll_disp_ck"),
+ DT_CLK(NULL, "dpll_disp_m2_ck", "dpll_disp_m2_ck"),
+ DT_CLK(NULL, "dpll_per_ck", "dpll_per_ck"),
+ DT_CLK(NULL, "dpll_per_m2_ck", "dpll_per_m2_ck"),
+ DT_CLK(NULL, "dpll_per_m2_div4_wkupdm_ck", "dpll_per_m2_div4_wkupdm_ck"),
+ DT_CLK(NULL, "dpll_per_m2_div4_ck", "dpll_per_m2_div4_ck"),
+ DT_CLK(NULL, "adc_tsc_fck", "adc_tsc_fck"),
+ DT_CLK(NULL, "cefuse_fck", "cefuse_fck"),
+ DT_CLK(NULL, "clkdiv32k_ck", "clkdiv32k_ck"),
+ DT_CLK(NULL, "clkdiv32k_ick", "clkdiv32k_ick"),
+ DT_CLK(NULL, "dcan0_fck", "dcan0_fck"),
+ DT_CLK("481cc000.d_can", NULL, "dcan0_fck"),
+ DT_CLK(NULL, "dcan1_fck", "dcan1_fck"),
+ DT_CLK("481d0000.d_can", NULL, "dcan1_fck"),
+ DT_CLK(NULL, "pruss_ocp_gclk", "pruss_ocp_gclk"),
+ DT_CLK(NULL, "mcasp0_fck", "mcasp0_fck"),
+ DT_CLK(NULL, "mcasp1_fck", "mcasp1_fck"),
+ DT_CLK(NULL, "mmu_fck", "mmu_fck"),
+ DT_CLK(NULL, "smartreflex0_fck", "smartreflex0_fck"),
+ DT_CLK(NULL, "smartreflex1_fck", "smartreflex1_fck"),
+ DT_CLK(NULL, "sha0_fck", "sha0_fck"),
+ DT_CLK(NULL, "aes0_fck", "aes0_fck"),
+ DT_CLK(NULL, "rng_fck", "rng_fck"),
+ DT_CLK(NULL, "timer1_fck", "timer1_fck"),
+ DT_CLK(NULL, "timer2_fck", "timer2_fck"),
+ DT_CLK(NULL, "timer3_fck", "timer3_fck"),
+ DT_CLK(NULL, "timer4_fck", "timer4_fck"),
+ DT_CLK(NULL, "timer5_fck", "timer5_fck"),
+ DT_CLK(NULL, "timer6_fck", "timer6_fck"),
+ DT_CLK(NULL, "timer7_fck", "timer7_fck"),
+ DT_CLK(NULL, "usbotg_fck", "usbotg_fck"),
+ DT_CLK(NULL, "ieee5000_fck", "ieee5000_fck"),
+ DT_CLK(NULL, "wdt1_fck", "wdt1_fck"),
+ DT_CLK(NULL, "l4_rtc_gclk", "l4_rtc_gclk"),
+ DT_CLK(NULL, "l3_gclk", "l3_gclk"),
+ DT_CLK(NULL, "dpll_core_m4_div2_ck", "dpll_core_m4_div2_ck"),
+ DT_CLK(NULL, "l4hs_gclk", "l4hs_gclk"),
+ DT_CLK(NULL, "l3s_gclk", "l3s_gclk"),
+ DT_CLK(NULL, "l4fw_gclk", "l4fw_gclk"),
+ DT_CLK(NULL, "l4ls_gclk", "l4ls_gclk"),
+ DT_CLK(NULL, "clk_24mhz", "clk_24mhz"),
+ DT_CLK(NULL, "sysclk_div_ck", "sysclk_div_ck"),
+ DT_CLK(NULL, "cpsw_125mhz_gclk", "cpsw_125mhz_gclk"),
+ DT_CLK(NULL, "cpsw_cpts_rft_clk", "cpsw_cpts_rft_clk"),
+ DT_CLK(NULL, "gpio0_dbclk_mux_ck", "gpio0_dbclk_mux_ck"),
+ DT_CLK(NULL, "gpio0_dbclk", "gpio0_dbclk"),
+ DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"),
+ DT_CLK(NULL, "gpio2_dbclk", "gpio2_dbclk"),
+ DT_CLK(NULL, "gpio3_dbclk", "gpio3_dbclk"),
+ DT_CLK(NULL, "lcd_gclk", "lcd_gclk"),
+ DT_CLK(NULL, "mmc_clk", "mmc_clk"),
+ DT_CLK(NULL, "gfx_fclk_clksel_ck", "gfx_fclk_clksel_ck"),
+ DT_CLK(NULL, "gfx_fck_div_ck", "gfx_fck_div_ck"),
+ DT_CLK(NULL, "sysclkout_pre_ck", "sysclkout_pre_ck"),
+ DT_CLK(NULL, "clkout2_div_ck", "clkout2_div_ck"),
+ DT_CLK(NULL, "timer_32k_ck", "clkdiv32k_ick"),
+ DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK(NULL, "dbg_sysclk_ck", "dbg_sysclk_ck"),
+ DT_CLK(NULL, "dbg_clka_ck", "dbg_clka_ck"),
+ DT_CLK(NULL, "stm_pmd_clock_mux_ck", "stm_pmd_clock_mux_ck"),
+ DT_CLK(NULL, "trace_pmd_clk_mux_ck", "trace_pmd_clk_mux_ck"),
+ DT_CLK(NULL, "stm_clk_div_ck", "stm_clk_div_ck"),
+ DT_CLK(NULL, "trace_clk_div_ck", "trace_clk_div_ck"),
+ DT_CLK(NULL, "clkout2_ck", "clkout2_ck"),
+ DT_CLK("48300200.ehrpwm", "tbclk", "ehrpwm0_tbclk"),
+ DT_CLK("48302200.ehrpwm", "tbclk", "ehrpwm1_tbclk"),
+ DT_CLK("48304200.ehrpwm", "tbclk", "ehrpwm2_tbclk"),
+ { .node_name = NULL },
+};
+
+static const char *enable_init_clks[] = {
+ "dpll_ddr_m2_ck",
+ "dpll_mpu_m2_ck",
+ "l3_gclk",
+ "l4hs_gclk",
+ "l4fw_gclk",
+ "l4ls_gclk",
+ /* Required for external peripherals like, Audio codecs */
+ "clkout2_ck",
+};
+
+int __init am33xx_dt_clk_init(void)
+{
+ struct clk *clk1, *clk2;
+
+ ti_dt_clocks_register(am33xx_clks);
+
+ omap2_clk_disable_autoidle_all();
+
+ omap2_clk_enable_init_clocks(enable_init_clks,
+ ARRAY_SIZE(enable_init_clks));
+
+ /* TRM ERRATA: Timer 3 & 6 default parent (TCLKIN) may not be always
+ * physically present, in such a case HWMOD enabling of
+ * clock would be failure with default parent. And timer
+ * probe thinks clock is already enabled, this leads to
+ * crash upon accessing timer 3 & 6 registers in probe.
+ * Fix by setting parent of both these timers to master
+ * oscillator clock.
+ */
+
+ clk1 = clk_get_sys(NULL, "sys_clkin_ck");
+ clk2 = clk_get_sys(NULL, "timer3_fck");
+ clk_set_parent(clk2, clk1);
+
+ clk2 = clk_get_sys(NULL, "timer6_fck");
+ clk_set_parent(clk2, clk1);
+ /*
+ * The On-Chip 32K RC Osc clock is not an accurate clock-source as per
+ * the design/spec, so as a result, for example, timer which supposed
+ * to get expired @60Sec, but will expire somewhere ~@40Sec, which is
+ * not expected by any use-case, so change WDT1 clock source to PRCM
+ * 32KHz clock.
+ */
+ clk1 = clk_get_sys(NULL, "wdt1_fck");
+ clk2 = clk_get_sys(NULL, "clkdiv32k_ick");
+ clk_set_parent(clk1, clk2);
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
new file mode 100644
index 000000000000..d3230234f07b
--- /dev/null
+++ b/drivers/clk/ti/clk-3xxx.c
@@ -0,0 +1,401 @@
+/*
+ * OMAP3 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+
+static struct ti_dt_clk omap3xxx_clks[] = {
+ DT_CLK(NULL, "apb_pclk", "dummy_apb_pclk"),
+ DT_CLK(NULL, "omap_32k_fck", "omap_32k_fck"),
+ DT_CLK(NULL, "virt_12m_ck", "virt_12m_ck"),
+ DT_CLK(NULL, "virt_13m_ck", "virt_13m_ck"),
+ DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+ DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
+ DT_CLK(NULL, "virt_38_4m_ck", "virt_38_4m_ck"),
+ DT_CLK(NULL, "osc_sys_ck", "osc_sys_ck"),
+ DT_CLK("twl", "fck", "osc_sys_ck"),
+ DT_CLK(NULL, "sys_ck", "sys_ck"),
+ DT_CLK(NULL, "omap_96m_alwon_fck", "omap_96m_alwon_fck"),
+ DT_CLK("etb", "emu_core_alwon_ck", "emu_core_alwon_ck"),
+ DT_CLK(NULL, "sys_altclk", "sys_altclk"),
+ DT_CLK(NULL, "mcbsp_clks", "mcbsp_clks"),
+ DT_CLK(NULL, "sys_clkout1", "sys_clkout1"),
+ DT_CLK(NULL, "dpll1_ck", "dpll1_ck"),
+ DT_CLK(NULL, "dpll1_x2_ck", "dpll1_x2_ck"),
+ DT_CLK(NULL, "dpll1_x2m2_ck", "dpll1_x2m2_ck"),
+ DT_CLK(NULL, "dpll3_ck", "dpll3_ck"),
+ DT_CLK(NULL, "core_ck", "core_ck"),
+ DT_CLK(NULL, "dpll3_x2_ck", "dpll3_x2_ck"),
+ DT_CLK(NULL, "dpll3_m2_ck", "dpll3_m2_ck"),
+ DT_CLK(NULL, "dpll3_m2x2_ck", "dpll3_m2x2_ck"),
+ DT_CLK(NULL, "dpll3_m3_ck", "dpll3_m3_ck"),
+ DT_CLK(NULL, "dpll3_m3x2_ck", "dpll3_m3x2_ck"),
+ DT_CLK(NULL, "dpll4_ck", "dpll4_ck"),
+ DT_CLK(NULL, "dpll4_x2_ck", "dpll4_x2_ck"),
+ DT_CLK(NULL, "omap_96m_fck", "omap_96m_fck"),
+ DT_CLK(NULL, "cm_96m_fck", "cm_96m_fck"),
+ DT_CLK(NULL, "omap_54m_fck", "omap_54m_fck"),
+ DT_CLK(NULL, "omap_48m_fck", "omap_48m_fck"),
+ DT_CLK(NULL, "omap_12m_fck", "omap_12m_fck"),
+ DT_CLK(NULL, "dpll4_m2_ck", "dpll4_m2_ck"),
+ DT_CLK(NULL, "dpll4_m2x2_ck", "dpll4_m2x2_ck"),
+ DT_CLK(NULL, "dpll4_m3_ck", "dpll4_m3_ck"),
+ DT_CLK(NULL, "dpll4_m3x2_ck", "dpll4_m3x2_ck"),
+ DT_CLK(NULL, "dpll4_m4_ck", "dpll4_m4_ck"),
+ DT_CLK(NULL, "dpll4_m4x2_ck", "dpll4_m4x2_ck"),
+ DT_CLK(NULL, "dpll4_m5_ck", "dpll4_m5_ck"),
+ DT_CLK(NULL, "dpll4_m5x2_ck", "dpll4_m5x2_ck"),
+ DT_CLK(NULL, "dpll4_m6_ck", "dpll4_m6_ck"),
+ DT_CLK(NULL, "dpll4_m6x2_ck", "dpll4_m6x2_ck"),
+ DT_CLK("etb", "emu_per_alwon_ck", "emu_per_alwon_ck"),
+ DT_CLK(NULL, "clkout2_src_ck", "clkout2_src_ck"),
+ DT_CLK(NULL, "sys_clkout2", "sys_clkout2"),
+ DT_CLK(NULL, "corex2_fck", "corex2_fck"),
+ DT_CLK(NULL, "dpll1_fck", "dpll1_fck"),
+ DT_CLK(NULL, "mpu_ck", "mpu_ck"),
+ DT_CLK(NULL, "arm_fck", "arm_fck"),
+ DT_CLK("etb", "emu_mpu_alwon_ck", "emu_mpu_alwon_ck"),
+ DT_CLK(NULL, "l3_ick", "l3_ick"),
+ DT_CLK(NULL, "l4_ick", "l4_ick"),
+ DT_CLK(NULL, "rm_ick", "rm_ick"),
+ DT_CLK(NULL, "gpt10_fck", "gpt10_fck"),
+ DT_CLK(NULL, "gpt11_fck", "gpt11_fck"),
+ DT_CLK(NULL, "core_96m_fck", "core_96m_fck"),
+ DT_CLK(NULL, "mmchs2_fck", "mmchs2_fck"),
+ DT_CLK(NULL, "mmchs1_fck", "mmchs1_fck"),
+ DT_CLK(NULL, "i2c3_fck", "i2c3_fck"),
+ DT_CLK(NULL, "i2c2_fck", "i2c2_fck"),
+ DT_CLK(NULL, "i2c1_fck", "i2c1_fck"),
+ DT_CLK(NULL, "mcbsp5_fck", "mcbsp5_fck"),
+ DT_CLK(NULL, "mcbsp1_fck", "mcbsp1_fck"),
+ DT_CLK(NULL, "core_48m_fck", "core_48m_fck"),
+ DT_CLK(NULL, "mcspi4_fck", "mcspi4_fck"),
+ DT_CLK(NULL, "mcspi3_fck", "mcspi3_fck"),
+ DT_CLK(NULL, "mcspi2_fck", "mcspi2_fck"),
+ DT_CLK(NULL, "mcspi1_fck", "mcspi1_fck"),
+ DT_CLK(NULL, "uart2_fck", "uart2_fck"),
+ DT_CLK(NULL, "uart1_fck", "uart1_fck"),
+ DT_CLK(NULL, "core_12m_fck", "core_12m_fck"),
+ DT_CLK("omap_hdq.0", "fck", "hdq_fck"),
+ DT_CLK(NULL, "hdq_fck", "hdq_fck"),
+ DT_CLK(NULL, "core_l3_ick", "core_l3_ick"),
+ DT_CLK(NULL, "sdrc_ick", "sdrc_ick"),
+ DT_CLK(NULL, "gpmc_fck", "gpmc_fck"),
+ DT_CLK(NULL, "core_l4_ick", "core_l4_ick"),
+ DT_CLK("omap_hsmmc.1", "ick", "mmchs2_ick"),
+ DT_CLK("omap_hsmmc.0", "ick", "mmchs1_ick"),
+ DT_CLK(NULL, "mmchs2_ick", "mmchs2_ick"),
+ DT_CLK(NULL, "mmchs1_ick", "mmchs1_ick"),
+ DT_CLK("omap_hdq.0", "ick", "hdq_ick"),
+ DT_CLK(NULL, "hdq_ick", "hdq_ick"),
+ DT_CLK("omap2_mcspi.4", "ick", "mcspi4_ick"),
+ DT_CLK("omap2_mcspi.3", "ick", "mcspi3_ick"),
+ DT_CLK("omap2_mcspi.2", "ick", "mcspi2_ick"),
+ DT_CLK("omap2_mcspi.1", "ick", "mcspi1_ick"),
+ DT_CLK(NULL, "mcspi4_ick", "mcspi4_ick"),
+ DT_CLK(NULL, "mcspi3_ick", "mcspi3_ick"),
+ DT_CLK(NULL, "mcspi2_ick", "mcspi2_ick"),
+ DT_CLK(NULL, "mcspi1_ick", "mcspi1_ick"),
+ DT_CLK("omap_i2c.3", "ick", "i2c3_ick"),
+ DT_CLK("omap_i2c.2", "ick", "i2c2_ick"),
+ DT_CLK("omap_i2c.1", "ick", "i2c1_ick"),
+ DT_CLK(NULL, "i2c3_ick", "i2c3_ick"),
+ DT_CLK(NULL, "i2c2_ick", "i2c2_ick"),
+ DT_CLK(NULL, "i2c1_ick", "i2c1_ick"),
+ DT_CLK(NULL, "uart2_ick", "uart2_ick"),
+ DT_CLK(NULL, "uart1_ick", "uart1_ick"),
+ DT_CLK(NULL, "gpt11_ick", "gpt11_ick"),
+ DT_CLK(NULL, "gpt10_ick", "gpt10_ick"),
+ DT_CLK("omap-mcbsp.5", "ick", "mcbsp5_ick"),
+ DT_CLK("omap-mcbsp.1", "ick", "mcbsp1_ick"),
+ DT_CLK(NULL, "mcbsp5_ick", "mcbsp5_ick"),
+ DT_CLK(NULL, "mcbsp1_ick", "mcbsp1_ick"),
+ DT_CLK(NULL, "omapctrl_ick", "omapctrl_ick"),
+ DT_CLK(NULL, "dss_tv_fck", "dss_tv_fck"),
+ DT_CLK(NULL, "dss_96m_fck", "dss_96m_fck"),
+ DT_CLK(NULL, "dss2_alwon_fck", "dss2_alwon_fck"),
+ DT_CLK(NULL, "utmi_p1_gfclk", "dummy_ck"),
+ DT_CLK(NULL, "utmi_p2_gfclk", "dummy_ck"),
+ DT_CLK(NULL, "xclk60mhsp1_ck", "dummy_ck"),
+ DT_CLK(NULL, "xclk60mhsp2_ck", "dummy_ck"),
+ DT_CLK(NULL, "init_60m_fclk", "dummy_ck"),
+ DT_CLK(NULL, "gpt1_fck", "gpt1_fck"),
+ DT_CLK(NULL, "aes2_ick", "aes2_ick"),
+ DT_CLK(NULL, "wkup_32k_fck", "wkup_32k_fck"),
+ DT_CLK(NULL, "gpio1_dbck", "gpio1_dbck"),
+ DT_CLK(NULL, "sha12_ick", "sha12_ick"),
+ DT_CLK(NULL, "wdt2_fck", "wdt2_fck"),
+ DT_CLK("omap_wdt", "ick", "wdt2_ick"),
+ DT_CLK(NULL, "wdt2_ick", "wdt2_ick"),
+ DT_CLK(NULL, "wdt1_ick", "wdt1_ick"),
+ DT_CLK(NULL, "gpio1_ick", "gpio1_ick"),
+ DT_CLK(NULL, "omap_32ksync_ick", "omap_32ksync_ick"),
+ DT_CLK(NULL, "gpt12_ick", "gpt12_ick"),
+ DT_CLK(NULL, "gpt1_ick", "gpt1_ick"),
+ DT_CLK(NULL, "per_96m_fck", "per_96m_fck"),
+ DT_CLK(NULL, "per_48m_fck", "per_48m_fck"),
+ DT_CLK(NULL, "uart3_fck", "uart3_fck"),
+ DT_CLK(NULL, "gpt2_fck", "gpt2_fck"),
+ DT_CLK(NULL, "gpt3_fck", "gpt3_fck"),
+ DT_CLK(NULL, "gpt4_fck", "gpt4_fck"),
+ DT_CLK(NULL, "gpt5_fck", "gpt5_fck"),
+ DT_CLK(NULL, "gpt6_fck", "gpt6_fck"),
+ DT_CLK(NULL, "gpt7_fck", "gpt7_fck"),
+ DT_CLK(NULL, "gpt8_fck", "gpt8_fck"),
+ DT_CLK(NULL, "gpt9_fck", "gpt9_fck"),
+ DT_CLK(NULL, "per_32k_alwon_fck", "per_32k_alwon_fck"),
+ DT_CLK(NULL, "gpio6_dbck", "gpio6_dbck"),
+ DT_CLK(NULL, "gpio5_dbck", "gpio5_dbck"),
+ DT_CLK(NULL, "gpio4_dbck", "gpio4_dbck"),
+ DT_CLK(NULL, "gpio3_dbck", "gpio3_dbck"),
+ DT_CLK(NULL, "gpio2_dbck", "gpio2_dbck"),
+ DT_CLK(NULL, "wdt3_fck", "wdt3_fck"),
+ DT_CLK(NULL, "per_l4_ick", "per_l4_ick"),
+ DT_CLK(NULL, "gpio6_ick", "gpio6_ick"),
+ DT_CLK(NULL, "gpio5_ick", "gpio5_ick"),
+ DT_CLK(NULL, "gpio4_ick", "gpio4_ick"),
+ DT_CLK(NULL, "gpio3_ick", "gpio3_ick"),
+ DT_CLK(NULL, "gpio2_ick", "gpio2_ick"),
+ DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
+ DT_CLK(NULL, "uart3_ick", "uart3_ick"),
+ DT_CLK(NULL, "uart4_ick", "uart4_ick"),
+ DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
+ DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
+ DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
+ DT_CLK(NULL, "gpt6_ick", "gpt6_ick"),
+ DT_CLK(NULL, "gpt5_ick", "gpt5_ick"),
+ DT_CLK(NULL, "gpt4_ick", "gpt4_ick"),
+ DT_CLK(NULL, "gpt3_ick", "gpt3_ick"),
+ DT_CLK(NULL, "gpt2_ick", "gpt2_ick"),
+ DT_CLK("omap-mcbsp.2", "ick", "mcbsp2_ick"),
+ DT_CLK("omap-mcbsp.3", "ick", "mcbsp3_ick"),
+ DT_CLK("omap-mcbsp.4", "ick", "mcbsp4_ick"),
+ DT_CLK(NULL, "mcbsp4_ick", "mcbsp2_ick"),
+ DT_CLK(NULL, "mcbsp3_ick", "mcbsp3_ick"),
+ DT_CLK(NULL, "mcbsp2_ick", "mcbsp4_ick"),
+ DT_CLK(NULL, "mcbsp2_fck", "mcbsp2_fck"),
+ DT_CLK(NULL, "mcbsp3_fck", "mcbsp3_fck"),
+ DT_CLK(NULL, "mcbsp4_fck", "mcbsp4_fck"),
+ DT_CLK("etb", "emu_src_ck", "emu_src_ck"),
+ DT_CLK(NULL, "emu_src_ck", "emu_src_ck"),
+ DT_CLK(NULL, "pclk_fck", "pclk_fck"),
+ DT_CLK(NULL, "pclkx2_fck", "pclkx2_fck"),
+ DT_CLK(NULL, "atclk_fck", "atclk_fck"),
+ DT_CLK(NULL, "traceclk_src_fck", "traceclk_src_fck"),
+ DT_CLK(NULL, "traceclk_fck", "traceclk_fck"),
+ DT_CLK(NULL, "secure_32k_fck", "secure_32k_fck"),
+ DT_CLK(NULL, "gpt12_fck", "gpt12_fck"),
+ DT_CLK(NULL, "wdt1_fck", "wdt1_fck"),
+ DT_CLK(NULL, "timer_32k_ck", "omap_32k_fck"),
+ DT_CLK(NULL, "timer_sys_ck", "sys_ck"),
+ DT_CLK(NULL, "cpufreq_ck", "dpll1_ck"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap34xx_omap36xx_clks[] = {
+ DT_CLK(NULL, "aes1_ick", "aes1_ick"),
+ DT_CLK("omap_rng", "ick", "rng_ick"),
+ DT_CLK("omap3-rom-rng", "ick", "rng_ick"),
+ DT_CLK(NULL, "sha11_ick", "sha11_ick"),
+ DT_CLK(NULL, "des1_ick", "des1_ick"),
+ DT_CLK(NULL, "cam_mclk", "cam_mclk"),
+ DT_CLK(NULL, "cam_ick", "cam_ick"),
+ DT_CLK(NULL, "csi2_96m_fck", "csi2_96m_fck"),
+ DT_CLK(NULL, "security_l3_ick", "security_l3_ick"),
+ DT_CLK(NULL, "pka_ick", "pka_ick"),
+ DT_CLK(NULL, "icr_ick", "icr_ick"),
+ DT_CLK("omap-aes", "ick", "aes2_ick"),
+ DT_CLK("omap-sham", "ick", "sha12_ick"),
+ DT_CLK(NULL, "des2_ick", "des2_ick"),
+ DT_CLK(NULL, "mspro_ick", "mspro_ick"),
+ DT_CLK(NULL, "mailboxes_ick", "mailboxes_ick"),
+ DT_CLK(NULL, "ssi_l4_ick", "ssi_l4_ick"),
+ DT_CLK(NULL, "sr1_fck", "sr1_fck"),
+ DT_CLK(NULL, "sr2_fck", "sr2_fck"),
+ DT_CLK(NULL, "sr_l4_ick", "sr_l4_ick"),
+ DT_CLK(NULL, "security_l4_ick2", "security_l4_ick2"),
+ DT_CLK(NULL, "wkup_l4_ick", "wkup_l4_ick"),
+ DT_CLK(NULL, "dpll2_fck", "dpll2_fck"),
+ DT_CLK(NULL, "iva2_ck", "iva2_ck"),
+ DT_CLK(NULL, "modem_fck", "modem_fck"),
+ DT_CLK(NULL, "sad2d_ick", "sad2d_ick"),
+ DT_CLK(NULL, "mad2d_ick", "mad2d_ick"),
+ DT_CLK(NULL, "mspro_fck", "mspro_fck"),
+ DT_CLK(NULL, "dpll2_ck", "dpll2_ck"),
+ DT_CLK(NULL, "dpll2_m2_ck", "dpll2_m2_ck"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap36xx_omap3430es2plus_clks[] = {
+ DT_CLK(NULL, "ssi_ssr_fck", "ssi_ssr_fck_3430es2"),
+ DT_CLK(NULL, "ssi_sst_fck", "ssi_sst_fck_3430es2"),
+ DT_CLK("musb-omap2430", "ick", "hsotgusb_ick_3430es2"),
+ DT_CLK(NULL, "hsotgusb_ick", "hsotgusb_ick_3430es2"),
+ DT_CLK(NULL, "ssi_ick", "ssi_ick_3430es2"),
+ DT_CLK(NULL, "usim_fck", "usim_fck"),
+ DT_CLK(NULL, "usim_ick", "usim_ick"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap3430es1_clks[] = {
+ DT_CLK(NULL, "gfx_l3_ck", "gfx_l3_ck"),
+ DT_CLK(NULL, "gfx_l3_fck", "gfx_l3_fck"),
+ DT_CLK(NULL, "gfx_l3_ick", "gfx_l3_ick"),
+ DT_CLK(NULL, "gfx_cg1_ck", "gfx_cg1_ck"),
+ DT_CLK(NULL, "gfx_cg2_ck", "gfx_cg2_ck"),
+ DT_CLK(NULL, "d2d_26m_fck", "d2d_26m_fck"),
+ DT_CLK(NULL, "fshostusb_fck", "fshostusb_fck"),
+ DT_CLK(NULL, "ssi_ssr_fck", "ssi_ssr_fck_3430es1"),
+ DT_CLK(NULL, "ssi_sst_fck", "ssi_sst_fck_3430es1"),
+ DT_CLK("musb-omap2430", "ick", "hsotgusb_ick_3430es1"),
+ DT_CLK(NULL, "hsotgusb_ick", "hsotgusb_ick_3430es1"),
+ DT_CLK(NULL, "fac_ick", "fac_ick"),
+ DT_CLK(NULL, "ssi_ick", "ssi_ick_3430es1"),
+ DT_CLK(NULL, "usb_l4_ick", "usb_l4_ick"),
+ DT_CLK(NULL, "dss1_alwon_fck", "dss1_alwon_fck_3430es1"),
+ DT_CLK("omapdss_dss", "ick", "dss_ick_3430es1"),
+ DT_CLK(NULL, "dss_ick", "dss_ick_3430es1"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap36xx_am35xx_omap3430es2plus_clks[] = {
+ DT_CLK(NULL, "virt_16_8m_ck", "virt_16_8m_ck"),
+ DT_CLK(NULL, "dpll5_ck", "dpll5_ck"),
+ DT_CLK(NULL, "dpll5_m2_ck", "dpll5_m2_ck"),
+ DT_CLK(NULL, "sgx_fck", "sgx_fck"),
+ DT_CLK(NULL, "sgx_ick", "sgx_ick"),
+ DT_CLK(NULL, "cpefuse_fck", "cpefuse_fck"),
+ DT_CLK(NULL, "ts_fck", "ts_fck"),
+ DT_CLK(NULL, "usbtll_fck", "usbtll_fck"),
+ DT_CLK(NULL, "usbtll_ick", "usbtll_ick"),
+ DT_CLK("omap_hsmmc.2", "ick", "mmchs3_ick"),
+ DT_CLK(NULL, "mmchs3_ick", "mmchs3_ick"),
+ DT_CLK(NULL, "mmchs3_fck", "mmchs3_fck"),
+ DT_CLK(NULL, "dss1_alwon_fck", "dss1_alwon_fck_3430es2"),
+ DT_CLK("omapdss_dss", "ick", "dss_ick_3430es2"),
+ DT_CLK(NULL, "dss_ick", "dss_ick_3430es2"),
+ DT_CLK(NULL, "usbhost_120m_fck", "usbhost_120m_fck"),
+ DT_CLK(NULL, "usbhost_48m_fck", "usbhost_48m_fck"),
+ DT_CLK(NULL, "usbhost_ick", "usbhost_ick"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk am35xx_clks[] = {
+ DT_CLK(NULL, "ipss_ick", "ipss_ick"),
+ DT_CLK(NULL, "rmii_ck", "rmii_ck"),
+ DT_CLK(NULL, "pclk_ck", "pclk_ck"),
+ DT_CLK(NULL, "emac_ick", "emac_ick"),
+ DT_CLK(NULL, "emac_fck", "emac_fck"),
+ DT_CLK("davinci_emac.0", NULL, "emac_ick"),
+ DT_CLK("davinci_mdio.0", NULL, "emac_fck"),
+ DT_CLK("vpfe-capture", "master", "vpfe_ick"),
+ DT_CLK("vpfe-capture", "slave", "vpfe_fck"),
+ DT_CLK(NULL, "hsotgusb_ick", "hsotgusb_ick_am35xx"),
+ DT_CLK(NULL, "hsotgusb_fck", "hsotgusb_fck_am35xx"),
+ DT_CLK(NULL, "hecc_ck", "hecc_ck"),
+ DT_CLK(NULL, "uart4_ick", "uart4_ick_am35xx"),
+ DT_CLK(NULL, "uart4_fck", "uart4_fck_am35xx"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap36xx_clks[] = {
+ DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"),
+ DT_CLK(NULL, "uart4_fck", "uart4_fck"),
+ { .node_name = NULL },
+};
+
+static const char *enable_init_clks[] = {
+ "sdrc_ick",
+ "gpmc_fck",
+ "omapctrl_ick",
+};
+
+enum {
+ OMAP3_SOC_AM35XX,
+ OMAP3_SOC_OMAP3430_ES1,
+ OMAP3_SOC_OMAP3430_ES2_PLUS,
+ OMAP3_SOC_OMAP3630,
+ OMAP3_SOC_TI81XX,
+};
+
+static int __init omap3xxx_dt_clk_init(int soc_type)
+{
+ if (soc_type == OMAP3_SOC_AM35XX || soc_type == OMAP3_SOC_OMAP3630 ||
+ soc_type == OMAP3_SOC_OMAP3430_ES1 ||
+ soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS)
+ ti_dt_clocks_register(omap3xxx_clks);
+
+ if (soc_type == OMAP3_SOC_AM35XX)
+ ti_dt_clocks_register(am35xx_clks);
+
+ if (soc_type == OMAP3_SOC_OMAP3630 || soc_type == OMAP3_SOC_AM35XX ||
+ soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS)
+ ti_dt_clocks_register(omap36xx_am35xx_omap3430es2plus_clks);
+
+ if (soc_type == OMAP3_SOC_OMAP3430_ES1)
+ ti_dt_clocks_register(omap3430es1_clks);
+
+ if (soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS ||
+ soc_type == OMAP3_SOC_OMAP3630)
+ ti_dt_clocks_register(omap36xx_omap3430es2plus_clks);
+
+ if (soc_type == OMAP3_SOC_OMAP3430_ES1 ||
+ soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS ||
+ soc_type == OMAP3_SOC_OMAP3630)
+ ti_dt_clocks_register(omap34xx_omap36xx_clks);
+
+ if (soc_type == OMAP3_SOC_OMAP3630)
+ ti_dt_clocks_register(omap36xx_clks);
+
+ omap2_clk_disable_autoidle_all();
+
+ omap2_clk_enable_init_clocks(enable_init_clks,
+ ARRAY_SIZE(enable_init_clks));
+
+ pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
+ (clk_get_rate(clk_get_sys(NULL, "osc_sys_ck")) / 1000000),
+ (clk_get_rate(clk_get_sys(NULL, "osc_sys_ck")) / 100000) % 10,
+ (clk_get_rate(clk_get_sys(NULL, "core_ck")) / 1000000),
+ (clk_get_rate(clk_get_sys(NULL, "arm_fck")) / 1000000));
+
+ if (soc_type != OMAP3_SOC_TI81XX && soc_type != OMAP3_SOC_OMAP3430_ES1)
+ omap3_clk_lock_dpll5();
+
+ return 0;
+}
+
+int __init omap3430_dt_clk_init(void)
+{
+ return omap3xxx_dt_clk_init(OMAP3_SOC_OMAP3430_ES2_PLUS);
+}
+
+int __init omap3630_dt_clk_init(void)
+{
+ return omap3xxx_dt_clk_init(OMAP3_SOC_OMAP3630);
+}
+
+int __init am35xx_dt_clk_init(void)
+{
+ return omap3xxx_dt_clk_init(OMAP3_SOC_AM35XX);
+}
+
+int __init ti81xx_dt_clk_init(void)
+{
+ return omap3xxx_dt_clk_init(OMAP3_SOC_TI81XX);
+}
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
new file mode 100644
index 000000000000..67c8de572c50
--- /dev/null
+++ b/drivers/clk/ti/clk-43xx.c
@@ -0,0 +1,118 @@
+/*
+ * AM43XX Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+static struct ti_dt_clk am43xx_clks[] = {
+ DT_CLK(NULL, "clk_32768_ck", "clk_32768_ck"),
+ DT_CLK(NULL, "clk_rc32k_ck", "clk_rc32k_ck"),
+ DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+ DT_CLK(NULL, "virt_24000000_ck", "virt_24000000_ck"),
+ DT_CLK(NULL, "virt_25000000_ck", "virt_25000000_ck"),
+ DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
+ DT_CLK(NULL, "sys_clkin_ck", "sys_clkin_ck"),
+ DT_CLK(NULL, "tclkin_ck", "tclkin_ck"),
+ DT_CLK(NULL, "dpll_core_ck", "dpll_core_ck"),
+ DT_CLK(NULL, "dpll_core_x2_ck", "dpll_core_x2_ck"),
+ DT_CLK(NULL, "dpll_core_m4_ck", "dpll_core_m4_ck"),
+ DT_CLK(NULL, "dpll_core_m5_ck", "dpll_core_m5_ck"),
+ DT_CLK(NULL, "dpll_core_m6_ck", "dpll_core_m6_ck"),
+ DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"),
+ DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"),
+ DT_CLK(NULL, "dpll_ddr_ck", "dpll_ddr_ck"),
+ DT_CLK(NULL, "dpll_ddr_m2_ck", "dpll_ddr_m2_ck"),
+ DT_CLK(NULL, "dpll_disp_ck", "dpll_disp_ck"),
+ DT_CLK(NULL, "dpll_disp_m2_ck", "dpll_disp_m2_ck"),
+ DT_CLK(NULL, "dpll_per_ck", "dpll_per_ck"),
+ DT_CLK(NULL, "dpll_per_m2_ck", "dpll_per_m2_ck"),
+ DT_CLK(NULL, "dpll_per_m2_div4_wkupdm_ck", "dpll_per_m2_div4_wkupdm_ck"),
+ DT_CLK(NULL, "dpll_per_m2_div4_ck", "dpll_per_m2_div4_ck"),
+ DT_CLK(NULL, "adc_tsc_fck", "adc_tsc_fck"),
+ DT_CLK(NULL, "clkdiv32k_ck", "clkdiv32k_ck"),
+ DT_CLK(NULL, "clkdiv32k_ick", "clkdiv32k_ick"),
+ DT_CLK(NULL, "dcan0_fck", "dcan0_fck"),
+ DT_CLK(NULL, "dcan1_fck", "dcan1_fck"),
+ DT_CLK(NULL, "pruss_ocp_gclk", "pruss_ocp_gclk"),
+ DT_CLK(NULL, "mcasp0_fck", "mcasp0_fck"),
+ DT_CLK(NULL, "mcasp1_fck", "mcasp1_fck"),
+ DT_CLK(NULL, "smartreflex0_fck", "smartreflex0_fck"),
+ DT_CLK(NULL, "smartreflex1_fck", "smartreflex1_fck"),
+ DT_CLK(NULL, "sha0_fck", "sha0_fck"),
+ DT_CLK(NULL, "aes0_fck", "aes0_fck"),
+ DT_CLK(NULL, "timer1_fck", "timer1_fck"),
+ DT_CLK(NULL, "timer2_fck", "timer2_fck"),
+ DT_CLK(NULL, "timer3_fck", "timer3_fck"),
+ DT_CLK(NULL, "timer4_fck", "timer4_fck"),
+ DT_CLK(NULL, "timer5_fck", "timer5_fck"),
+ DT_CLK(NULL, "timer6_fck", "timer6_fck"),
+ DT_CLK(NULL, "timer7_fck", "timer7_fck"),
+ DT_CLK(NULL, "wdt1_fck", "wdt1_fck"),
+ DT_CLK(NULL, "l3_gclk", "l3_gclk"),
+ DT_CLK(NULL, "dpll_core_m4_div2_ck", "dpll_core_m4_div2_ck"),
+ DT_CLK(NULL, "l4hs_gclk", "l4hs_gclk"),
+ DT_CLK(NULL, "l3s_gclk", "l3s_gclk"),
+ DT_CLK(NULL, "l4ls_gclk", "l4ls_gclk"),
+ DT_CLK(NULL, "clk_24mhz", "clk_24mhz"),
+ DT_CLK(NULL, "cpsw_125mhz_gclk", "cpsw_125mhz_gclk"),
+ DT_CLK(NULL, "cpsw_cpts_rft_clk", "cpsw_cpts_rft_clk"),
+ DT_CLK(NULL, "gpio0_dbclk_mux_ck", "gpio0_dbclk_mux_ck"),
+ DT_CLK(NULL, "gpio0_dbclk", "gpio0_dbclk"),
+ DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"),
+ DT_CLK(NULL, "gpio2_dbclk", "gpio2_dbclk"),
+ DT_CLK(NULL, "gpio3_dbclk", "gpio3_dbclk"),
+ DT_CLK(NULL, "gpio4_dbclk", "gpio4_dbclk"),
+ DT_CLK(NULL, "gpio5_dbclk", "gpio5_dbclk"),
+ DT_CLK(NULL, "mmc_clk", "mmc_clk"),
+ DT_CLK(NULL, "gfx_fclk_clksel_ck", "gfx_fclk_clksel_ck"),
+ DT_CLK(NULL, "gfx_fck_div_ck", "gfx_fck_div_ck"),
+ DT_CLK(NULL, "timer_32k_ck", "clkdiv32k_ick"),
+ DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK(NULL, "sysclk_div", "sysclk_div"),
+ DT_CLK(NULL, "disp_clk", "disp_clk"),
+ DT_CLK(NULL, "clk_32k_mosc_ck", "clk_32k_mosc_ck"),
+ DT_CLK(NULL, "clk_32k_tpm_ck", "clk_32k_tpm_ck"),
+ DT_CLK(NULL, "dpll_extdev_ck", "dpll_extdev_ck"),
+ DT_CLK(NULL, "dpll_extdev_m2_ck", "dpll_extdev_m2_ck"),
+ DT_CLK(NULL, "mux_synctimer32k_ck", "mux_synctimer32k_ck"),
+ DT_CLK(NULL, "synctimer_32kclk", "synctimer_32kclk"),
+ DT_CLK(NULL, "timer8_fck", "timer8_fck"),
+ DT_CLK(NULL, "timer9_fck", "timer9_fck"),
+ DT_CLK(NULL, "timer10_fck", "timer10_fck"),
+ DT_CLK(NULL, "timer11_fck", "timer11_fck"),
+ DT_CLK(NULL, "cpsw_50m_clkdiv", "cpsw_50m_clkdiv"),
+ DT_CLK(NULL, "cpsw_5m_clkdiv", "cpsw_5m_clkdiv"),
+ DT_CLK(NULL, "dpll_ddr_x2_ck", "dpll_ddr_x2_ck"),
+ DT_CLK(NULL, "dpll_ddr_m4_ck", "dpll_ddr_m4_ck"),
+ DT_CLK(NULL, "dpll_per_clkdcoldo", "dpll_per_clkdcoldo"),
+ DT_CLK(NULL, "dll_aging_clk_div", "dll_aging_clk_div"),
+ DT_CLK(NULL, "div_core_25m_ck", "div_core_25m_ck"),
+ DT_CLK(NULL, "func_12m_clk", "func_12m_clk"),
+ DT_CLK(NULL, "vtp_clk_div", "vtp_clk_div"),
+ DT_CLK(NULL, "usbphy_32khz_clkmux", "usbphy_32khz_clkmux"),
+ { .node_name = NULL },
+};
+
+int __init am43xx_dt_clk_init(void)
+{
+ ti_dt_clocks_register(am43xx_clks);
+
+ omap2_clk_disable_autoidle_all();
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
new file mode 100644
index 000000000000..ae00218b5da3
--- /dev/null
+++ b/drivers/clk/ti/clk-44xx.c
@@ -0,0 +1,316 @@
+/*
+ * OMAP4 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-private.h>
+#include <linux/clkdev.h>
+#include <linux/clk/ti.h>
+
+/*
+ * OMAP4 ABE DPLL default frequency. In OMAP4460 TRM version V, section
+ * "3.6.3.2.3 CM1_ABE Clock Generator" states that the "DPLL_ABE_X2_CLK
+ * must be set to 196.608 MHz" and hence, the DPLL locked frequency is
+ * half of this value.
+ */
+#define OMAP4_DPLL_ABE_DEFFREQ 98304000
+
+/*
+ * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section
+ * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred
+ * locked frequency for the USB DPLL is 960MHz.
+ */
+#define OMAP4_DPLL_USB_DEFFREQ 960000000
+
+static struct ti_dt_clk omap44xx_clks[] = {
+ DT_CLK(NULL, "extalt_clkin_ck", "extalt_clkin_ck"),
+ DT_CLK(NULL, "pad_clks_src_ck", "pad_clks_src_ck"),
+ DT_CLK(NULL, "pad_clks_ck", "pad_clks_ck"),
+ DT_CLK(NULL, "pad_slimbus_core_clks_ck", "pad_slimbus_core_clks_ck"),
+ DT_CLK(NULL, "secure_32k_clk_src_ck", "secure_32k_clk_src_ck"),
+ DT_CLK(NULL, "slimbus_src_clk", "slimbus_src_clk"),
+ DT_CLK(NULL, "slimbus_clk", "slimbus_clk"),
+ DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
+ DT_CLK(NULL, "virt_12000000_ck", "virt_12000000_ck"),
+ DT_CLK(NULL, "virt_13000000_ck", "virt_13000000_ck"),
+ DT_CLK(NULL, "virt_16800000_ck", "virt_16800000_ck"),
+ DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+ DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
+ DT_CLK(NULL, "virt_27000000_ck", "virt_27000000_ck"),
+ DT_CLK(NULL, "virt_38400000_ck", "virt_38400000_ck"),
+ DT_CLK(NULL, "sys_clkin_ck", "sys_clkin_ck"),
+ DT_CLK(NULL, "tie_low_clock_ck", "tie_low_clock_ck"),
+ DT_CLK(NULL, "utmi_phy_clkout_ck", "utmi_phy_clkout_ck"),
+ DT_CLK(NULL, "xclk60mhsp1_ck", "xclk60mhsp1_ck"),
+ DT_CLK(NULL, "xclk60mhsp2_ck", "xclk60mhsp2_ck"),
+ DT_CLK(NULL, "xclk60motg_ck", "xclk60motg_ck"),
+ DT_CLK(NULL, "abe_dpll_bypass_clk_mux_ck", "abe_dpll_bypass_clk_mux_ck"),
+ DT_CLK(NULL, "abe_dpll_refclk_mux_ck", "abe_dpll_refclk_mux_ck"),
+ DT_CLK(NULL, "dpll_abe_ck", "dpll_abe_ck"),
+ DT_CLK(NULL, "dpll_abe_x2_ck", "dpll_abe_x2_ck"),
+ DT_CLK(NULL, "dpll_abe_m2x2_ck", "dpll_abe_m2x2_ck"),
+ DT_CLK(NULL, "abe_24m_fclk", "abe_24m_fclk"),
+ DT_CLK(NULL, "abe_clk", "abe_clk"),
+ DT_CLK(NULL, "aess_fclk", "aess_fclk"),
+ DT_CLK(NULL, "dpll_abe_m3x2_ck", "dpll_abe_m3x2_ck"),
+ DT_CLK(NULL, "core_hsd_byp_clk_mux_ck", "core_hsd_byp_clk_mux_ck"),
+ DT_CLK(NULL, "dpll_core_ck", "dpll_core_ck"),
+ DT_CLK(NULL, "dpll_core_x2_ck", "dpll_core_x2_ck"),
+ DT_CLK(NULL, "dpll_core_m6x2_ck", "dpll_core_m6x2_ck"),
+ DT_CLK(NULL, "dbgclk_mux_ck", "dbgclk_mux_ck"),
+ DT_CLK(NULL, "dpll_core_m2_ck", "dpll_core_m2_ck"),
+ DT_CLK(NULL, "ddrphy_ck", "ddrphy_ck"),
+ DT_CLK(NULL, "dpll_core_m5x2_ck", "dpll_core_m5x2_ck"),
+ DT_CLK(NULL, "div_core_ck", "div_core_ck"),
+ DT_CLK(NULL, "div_iva_hs_clk", "div_iva_hs_clk"),
+ DT_CLK(NULL, "div_mpu_hs_clk", "div_mpu_hs_clk"),
+ DT_CLK(NULL, "dpll_core_m4x2_ck", "dpll_core_m4x2_ck"),
+ DT_CLK(NULL, "dll_clk_div_ck", "dll_clk_div_ck"),
+ DT_CLK(NULL, "dpll_abe_m2_ck", "dpll_abe_m2_ck"),
+ DT_CLK(NULL, "dpll_core_m3x2_ck", "dpll_core_m3x2_ck"),
+ DT_CLK(NULL, "dpll_core_m7x2_ck", "dpll_core_m7x2_ck"),
+ DT_CLK(NULL, "iva_hsd_byp_clk_mux_ck", "iva_hsd_byp_clk_mux_ck"),
+ DT_CLK(NULL, "dpll_iva_ck", "dpll_iva_ck"),
+ DT_CLK(NULL, "dpll_iva_x2_ck", "dpll_iva_x2_ck"),
+ DT_CLK(NULL, "dpll_iva_m4x2_ck", "dpll_iva_m4x2_ck"),
+ DT_CLK(NULL, "dpll_iva_m5x2_ck", "dpll_iva_m5x2_ck"),
+ DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"),
+ DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"),
+ DT_CLK(NULL, "per_hs_clk_div_ck", "per_hs_clk_div_ck"),
+ DT_CLK(NULL, "per_hsd_byp_clk_mux_ck", "per_hsd_byp_clk_mux_ck"),
+ DT_CLK(NULL, "dpll_per_ck", "dpll_per_ck"),
+ DT_CLK(NULL, "dpll_per_m2_ck", "dpll_per_m2_ck"),
+ DT_CLK(NULL, "dpll_per_x2_ck", "dpll_per_x2_ck"),
+ DT_CLK(NULL, "dpll_per_m2x2_ck", "dpll_per_m2x2_ck"),
+ DT_CLK(NULL, "dpll_per_m3x2_ck", "dpll_per_m3x2_ck"),
+ DT_CLK(NULL, "dpll_per_m4x2_ck", "dpll_per_m4x2_ck"),
+ DT_CLK(NULL, "dpll_per_m5x2_ck", "dpll_per_m5x2_ck"),
+ DT_CLK(NULL, "dpll_per_m6x2_ck", "dpll_per_m6x2_ck"),
+ DT_CLK(NULL, "dpll_per_m7x2_ck", "dpll_per_m7x2_ck"),
+ DT_CLK(NULL, "usb_hs_clk_div_ck", "usb_hs_clk_div_ck"),
+ DT_CLK(NULL, "dpll_usb_ck", "dpll_usb_ck"),
+ DT_CLK(NULL, "dpll_usb_clkdcoldo_ck", "dpll_usb_clkdcoldo_ck"),
+ DT_CLK(NULL, "dpll_usb_m2_ck", "dpll_usb_m2_ck"),
+ DT_CLK(NULL, "ducati_clk_mux_ck", "ducati_clk_mux_ck"),
+ DT_CLK(NULL, "func_12m_fclk", "func_12m_fclk"),
+ DT_CLK(NULL, "func_24m_clk", "func_24m_clk"),
+ DT_CLK(NULL, "func_24mc_fclk", "func_24mc_fclk"),
+ DT_CLK(NULL, "func_48m_fclk", "func_48m_fclk"),
+ DT_CLK(NULL, "func_48mc_fclk", "func_48mc_fclk"),
+ DT_CLK(NULL, "func_64m_fclk", "func_64m_fclk"),
+ DT_CLK(NULL, "func_96m_fclk", "func_96m_fclk"),
+ DT_CLK(NULL, "init_60m_fclk", "init_60m_fclk"),
+ DT_CLK(NULL, "l3_div_ck", "l3_div_ck"),
+ DT_CLK(NULL, "l4_div_ck", "l4_div_ck"),
+ DT_CLK(NULL, "lp_clk_div_ck", "lp_clk_div_ck"),
+ DT_CLK(NULL, "l4_wkup_clk_mux_ck", "l4_wkup_clk_mux_ck"),
+ DT_CLK("smp_twd", NULL, "mpu_periphclk"),
+ DT_CLK(NULL, "ocp_abe_iclk", "ocp_abe_iclk"),
+ DT_CLK(NULL, "per_abe_24m_fclk", "per_abe_24m_fclk"),
+ DT_CLK(NULL, "per_abe_nc_fclk", "per_abe_nc_fclk"),
+ DT_CLK(NULL, "syc_clk_div_ck", "syc_clk_div_ck"),
+ DT_CLK(NULL, "aes1_fck", "aes1_fck"),
+ DT_CLK(NULL, "aes2_fck", "aes2_fck"),
+ DT_CLK(NULL, "dmic_sync_mux_ck", "dmic_sync_mux_ck"),
+ DT_CLK(NULL, "func_dmic_abe_gfclk", "func_dmic_abe_gfclk"),
+ DT_CLK(NULL, "dss_sys_clk", "dss_sys_clk"),
+ DT_CLK(NULL, "dss_tv_clk", "dss_tv_clk"),
+ DT_CLK(NULL, "dss_dss_clk", "dss_dss_clk"),
+ DT_CLK(NULL, "dss_48mhz_clk", "dss_48mhz_clk"),
+ DT_CLK(NULL, "dss_fck", "dss_fck"),
+ DT_CLK("omapdss_dss", "ick", "dss_fck"),
+ DT_CLK(NULL, "fdif_fck", "fdif_fck"),
+ DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"),
+ DT_CLK(NULL, "gpio2_dbclk", "gpio2_dbclk"),
+ DT_CLK(NULL, "gpio3_dbclk", "gpio3_dbclk"),
+ DT_CLK(NULL, "gpio4_dbclk", "gpio4_dbclk"),
+ DT_CLK(NULL, "gpio5_dbclk", "gpio5_dbclk"),
+ DT_CLK(NULL, "gpio6_dbclk", "gpio6_dbclk"),
+ DT_CLK(NULL, "sgx_clk_mux", "sgx_clk_mux"),
+ DT_CLK(NULL, "hsi_fck", "hsi_fck"),
+ DT_CLK(NULL, "iss_ctrlclk", "iss_ctrlclk"),
+ DT_CLK(NULL, "mcasp_sync_mux_ck", "mcasp_sync_mux_ck"),
+ DT_CLK(NULL, "func_mcasp_abe_gfclk", "func_mcasp_abe_gfclk"),
+ DT_CLK(NULL, "mcbsp1_sync_mux_ck", "mcbsp1_sync_mux_ck"),
+ DT_CLK(NULL, "func_mcbsp1_gfclk", "func_mcbsp1_gfclk"),
+ DT_CLK(NULL, "mcbsp2_sync_mux_ck", "mcbsp2_sync_mux_ck"),
+ DT_CLK(NULL, "func_mcbsp2_gfclk", "func_mcbsp2_gfclk"),
+ DT_CLK(NULL, "mcbsp3_sync_mux_ck", "mcbsp3_sync_mux_ck"),
+ DT_CLK(NULL, "func_mcbsp3_gfclk", "func_mcbsp3_gfclk"),
+ DT_CLK(NULL, "mcbsp4_sync_mux_ck", "mcbsp4_sync_mux_ck"),
+ DT_CLK(NULL, "per_mcbsp4_gfclk", "per_mcbsp4_gfclk"),
+ DT_CLK(NULL, "hsmmc1_fclk", "hsmmc1_fclk"),
+ DT_CLK(NULL, "hsmmc2_fclk", "hsmmc2_fclk"),
+ DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "ocp2scp_usb_phy_phy_48m"),
+ DT_CLK(NULL, "sha2md5_fck", "sha2md5_fck"),
+ DT_CLK(NULL, "slimbus1_fclk_1", "slimbus1_fclk_1"),
+ DT_CLK(NULL, "slimbus1_fclk_0", "slimbus1_fclk_0"),
+ DT_CLK(NULL, "slimbus1_fclk_2", "slimbus1_fclk_2"),
+ DT_CLK(NULL, "slimbus1_slimbus_clk", "slimbus1_slimbus_clk"),
+ DT_CLK(NULL, "slimbus2_fclk_1", "slimbus2_fclk_1"),
+ DT_CLK(NULL, "slimbus2_fclk_0", "slimbus2_fclk_0"),
+ DT_CLK(NULL, "slimbus2_slimbus_clk", "slimbus2_slimbus_clk"),
+ DT_CLK(NULL, "smartreflex_core_fck", "smartreflex_core_fck"),
+ DT_CLK(NULL, "smartreflex_iva_fck", "smartreflex_iva_fck"),
+ DT_CLK(NULL, "smartreflex_mpu_fck", "smartreflex_mpu_fck"),
+ DT_CLK(NULL, "dmt1_clk_mux", "dmt1_clk_mux"),
+ DT_CLK(NULL, "cm2_dm10_mux", "cm2_dm10_mux"),
+ DT_CLK(NULL, "cm2_dm11_mux", "cm2_dm11_mux"),
+ DT_CLK(NULL, "cm2_dm2_mux", "cm2_dm2_mux"),
+ DT_CLK(NULL, "cm2_dm3_mux", "cm2_dm3_mux"),
+ DT_CLK(NULL, "cm2_dm4_mux", "cm2_dm4_mux"),
+ DT_CLK(NULL, "timer5_sync_mux", "timer5_sync_mux"),
+ DT_CLK(NULL, "timer6_sync_mux", "timer6_sync_mux"),
+ DT_CLK(NULL, "timer7_sync_mux", "timer7_sync_mux"),
+ DT_CLK(NULL, "timer8_sync_mux", "timer8_sync_mux"),
+ DT_CLK(NULL, "cm2_dm9_mux", "cm2_dm9_mux"),
+ DT_CLK(NULL, "usb_host_fs_fck", "usb_host_fs_fck"),
+ DT_CLK("usbhs_omap", "fs_fck", "usb_host_fs_fck"),
+ DT_CLK(NULL, "utmi_p1_gfclk", "utmi_p1_gfclk"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "usb_host_hs_utmi_p1_clk"),
+ DT_CLK(NULL, "utmi_p2_gfclk", "utmi_p2_gfclk"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "usb_host_hs_utmi_p2_clk"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "usb_host_hs_utmi_p3_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "usb_host_hs_hsic480m_p1_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "usb_host_hs_hsic60m_p1_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "usb_host_hs_hsic60m_p2_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "usb_host_hs_hsic480m_p2_clk"),
+ DT_CLK(NULL, "usb_host_hs_func48mclk", "usb_host_hs_func48mclk"),
+ DT_CLK(NULL, "usb_host_hs_fck", "usb_host_hs_fck"),
+ DT_CLK("usbhs_omap", "hs_fck", "usb_host_hs_fck"),
+ DT_CLK(NULL, "otg_60m_gfclk", "otg_60m_gfclk"),
+ DT_CLK(NULL, "usb_otg_hs_xclk", "usb_otg_hs_xclk"),
+ DT_CLK(NULL, "usb_otg_hs_ick", "usb_otg_hs_ick"),
+ DT_CLK("musb-omap2430", "ick", "usb_otg_hs_ick"),
+ DT_CLK(NULL, "usb_phy_cm_clk32k", "usb_phy_cm_clk32k"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "usb_tll_hs_usb_ch2_clk"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "usb_tll_hs_usb_ch0_clk"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "usb_tll_hs_usb_ch1_clk"),
+ DT_CLK(NULL, "usb_tll_hs_ick", "usb_tll_hs_ick"),
+ DT_CLK("usbhs_omap", "usbtll_ick", "usb_tll_hs_ick"),
+ DT_CLK("usbhs_tll", "usbtll_ick", "usb_tll_hs_ick"),
+ DT_CLK(NULL, "usim_ck", "usim_ck"),
+ DT_CLK(NULL, "usim_fclk", "usim_fclk"),
+ DT_CLK(NULL, "pmd_stm_clock_mux_ck", "pmd_stm_clock_mux_ck"),
+ DT_CLK(NULL, "pmd_trace_clk_mux_ck", "pmd_trace_clk_mux_ck"),
+ DT_CLK(NULL, "stm_clk_div_ck", "stm_clk_div_ck"),
+ DT_CLK(NULL, "trace_clk_div_ck", "trace_clk_div_ck"),
+ DT_CLK(NULL, "auxclk0_src_ck", "auxclk0_src_ck"),
+ DT_CLK(NULL, "auxclk0_ck", "auxclk0_ck"),
+ DT_CLK(NULL, "auxclkreq0_ck", "auxclkreq0_ck"),
+ DT_CLK(NULL, "auxclk1_src_ck", "auxclk1_src_ck"),
+ DT_CLK(NULL, "auxclk1_ck", "auxclk1_ck"),
+ DT_CLK(NULL, "auxclkreq1_ck", "auxclkreq1_ck"),
+ DT_CLK(NULL, "auxclk2_src_ck", "auxclk2_src_ck"),
+ DT_CLK(NULL, "auxclk2_ck", "auxclk2_ck"),
+ DT_CLK(NULL, "auxclkreq2_ck", "auxclkreq2_ck"),
+ DT_CLK(NULL, "auxclk3_src_ck", "auxclk3_src_ck"),
+ DT_CLK(NULL, "auxclk3_ck", "auxclk3_ck"),
+ DT_CLK(NULL, "auxclkreq3_ck", "auxclkreq3_ck"),
+ DT_CLK(NULL, "auxclk4_src_ck", "auxclk4_src_ck"),
+ DT_CLK(NULL, "auxclk4_ck", "auxclk4_ck"),
+ DT_CLK(NULL, "auxclkreq4_ck", "auxclkreq4_ck"),
+ DT_CLK(NULL, "auxclk5_src_ck", "auxclk5_src_ck"),
+ DT_CLK(NULL, "auxclk5_ck", "auxclk5_ck"),
+ DT_CLK(NULL, "auxclkreq5_ck", "auxclkreq5_ck"),
+ DT_CLK("50000000.gpmc", "fck", "dummy_ck"),
+ DT_CLK("omap_i2c.1", "ick", "dummy_ck"),
+ DT_CLK("omap_i2c.2", "ick", "dummy_ck"),
+ DT_CLK("omap_i2c.3", "ick", "dummy_ck"),
+ DT_CLK("omap_i2c.4", "ick", "dummy_ck"),
+ DT_CLK(NULL, "mailboxes_ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.0", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.1", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.2", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.3", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.4", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.1", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.2", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.3", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.4", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.1", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.2", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.3", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.4", "ick", "dummy_ck"),
+ DT_CLK(NULL, "uart1_ick", "dummy_ck"),
+ DT_CLK(NULL, "uart2_ick", "dummy_ck"),
+ DT_CLK(NULL, "uart3_ick", "dummy_ck"),
+ DT_CLK(NULL, "uart4_ick", "dummy_ck"),
+ DT_CLK("usbhs_omap", "usbhost_ick", "dummy_ck"),
+ DT_CLK("usbhs_omap", "usbtll_fck", "dummy_ck"),
+ DT_CLK("usbhs_tll", "usbtll_fck", "dummy_ck"),
+ DT_CLK("omap_wdt", "ick", "dummy_ck"),
+ DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
+ DT_CLK("omap_timer.1", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("omap_timer.2", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("omap_timer.3", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("omap_timer.4", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("omap_timer.9", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("omap_timer.10", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("omap_timer.11", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("omap_timer.5", "timer_sys_ck", "syc_clk_div_ck"),
+ DT_CLK("omap_timer.6", "timer_sys_ck", "syc_clk_div_ck"),
+ DT_CLK("omap_timer.7", "timer_sys_ck", "syc_clk_div_ck"),
+ DT_CLK("omap_timer.8", "timer_sys_ck", "syc_clk_div_ck"),
+ DT_CLK("4a318000.timer", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("48032000.timer", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("48034000.timer", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("48036000.timer", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("4803e000.timer", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("48086000.timer", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("48088000.timer", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("40138000.timer", "timer_sys_ck", "syc_clk_div_ck"),
+ DT_CLK("4013a000.timer", "timer_sys_ck", "syc_clk_div_ck"),
+ DT_CLK("4013c000.timer", "timer_sys_ck", "syc_clk_div_ck"),
+ DT_CLK("4013e000.timer", "timer_sys_ck", "syc_clk_div_ck"),
+ DT_CLK(NULL, "cpufreq_ck", "dpll_mpu_ck"),
+ DT_CLK(NULL, "bandgap_fclk", "bandgap_fclk"),
+ DT_CLK(NULL, "div_ts_ck", "div_ts_ck"),
+ DT_CLK(NULL, "bandgap_ts_fclk", "bandgap_ts_fclk"),
+ { .node_name = NULL },
+};
+
+int __init omap4xxx_dt_clk_init(void)
+{
+ int rc;
+ struct clk *abe_dpll_ref, *abe_dpll, *sys_32k_ck, *usb_dpll;
+
+ ti_dt_clocks_register(omap44xx_clks);
+
+ omap2_clk_disable_autoidle_all();
+
+ /*
+ * Lock USB DPLL on OMAP4 devices so that the L3INIT power
+ * domain can transition to retention state when not in use.
+ */
+ usb_dpll = clk_get_sys(NULL, "dpll_usb_ck");
+ rc = clk_set_rate(usb_dpll, OMAP4_DPLL_USB_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure USB DPLL!\n", __func__);
+
+ /*
+ * On OMAP4460 the ABE DPLL fails to turn on if in idle low-power
+ * state when turning the ABE clock domain. Workaround this by
+ * locking the ABE DPLL on boot.
+ * Lock the ABE DPLL in any case to avoid issues with audio.
+ */
+ abe_dpll_ref = clk_get_sys(NULL, "abe_dpll_refclk_mux_ck");
+ sys_32k_ck = clk_get_sys(NULL, "sys_32k_ck");
+ rc = clk_set_parent(abe_dpll_ref, sys_32k_ck);
+ abe_dpll = clk_get_sys(NULL, "dpll_abe_ck");
+ if (!rc)
+ rc = clk_set_rate(abe_dpll, OMAP4_DPLL_ABE_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
new file mode 100644
index 000000000000..0ef9f581286b
--- /dev/null
+++ b/drivers/clk/ti/clk-54xx.c
@@ -0,0 +1,255 @@
+/*
+ * OMAP5 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-private.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/clk/ti.h>
+
+#define OMAP5_DPLL_ABE_DEFFREQ 98304000
+
+/*
+ * OMAP543x TRM, section "3.6.3.9.5 DPLL_USB Preferred Settings"
+ * states it must be at 960MHz
+ */
+#define OMAP5_DPLL_USB_DEFFREQ 960000000
+
+static struct ti_dt_clk omap54xx_clks[] = {
+ DT_CLK(NULL, "pad_clks_src_ck", "pad_clks_src_ck"),
+ DT_CLK(NULL, "pad_clks_ck", "pad_clks_ck"),
+ DT_CLK(NULL, "secure_32k_clk_src_ck", "secure_32k_clk_src_ck"),
+ DT_CLK(NULL, "slimbus_src_clk", "slimbus_src_clk"),
+ DT_CLK(NULL, "slimbus_clk", "slimbus_clk"),
+ DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
+ DT_CLK(NULL, "virt_12000000_ck", "virt_12000000_ck"),
+ DT_CLK(NULL, "virt_13000000_ck", "virt_13000000_ck"),
+ DT_CLK(NULL, "virt_16800000_ck", "virt_16800000_ck"),
+ DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+ DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
+ DT_CLK(NULL, "virt_27000000_ck", "virt_27000000_ck"),
+ DT_CLK(NULL, "virt_38400000_ck", "virt_38400000_ck"),
+ DT_CLK(NULL, "sys_clkin", "sys_clkin"),
+ DT_CLK(NULL, "xclk60mhsp1_ck", "xclk60mhsp1_ck"),
+ DT_CLK(NULL, "xclk60mhsp2_ck", "xclk60mhsp2_ck"),
+ DT_CLK(NULL, "abe_dpll_bypass_clk_mux", "abe_dpll_bypass_clk_mux"),
+ DT_CLK(NULL, "abe_dpll_clk_mux", "abe_dpll_clk_mux"),
+ DT_CLK(NULL, "dpll_abe_ck", "dpll_abe_ck"),
+ DT_CLK(NULL, "dpll_abe_x2_ck", "dpll_abe_x2_ck"),
+ DT_CLK(NULL, "dpll_abe_m2x2_ck", "dpll_abe_m2x2_ck"),
+ DT_CLK(NULL, "abe_24m_fclk", "abe_24m_fclk"),
+ DT_CLK(NULL, "abe_clk", "abe_clk"),
+ DT_CLK(NULL, "abe_iclk", "abe_iclk"),
+ DT_CLK(NULL, "abe_lp_clk_div", "abe_lp_clk_div"),
+ DT_CLK(NULL, "dpll_abe_m3x2_ck", "dpll_abe_m3x2_ck"),
+ DT_CLK(NULL, "dpll_core_ck", "dpll_core_ck"),
+ DT_CLK(NULL, "dpll_core_x2_ck", "dpll_core_x2_ck"),
+ DT_CLK(NULL, "dpll_core_h21x2_ck", "dpll_core_h21x2_ck"),
+ DT_CLK(NULL, "c2c_fclk", "c2c_fclk"),
+ DT_CLK(NULL, "c2c_iclk", "c2c_iclk"),
+ DT_CLK(NULL, "custefuse_sys_gfclk_div", "custefuse_sys_gfclk_div"),
+ DT_CLK(NULL, "dpll_core_h11x2_ck", "dpll_core_h11x2_ck"),
+ DT_CLK(NULL, "dpll_core_h12x2_ck", "dpll_core_h12x2_ck"),
+ DT_CLK(NULL, "dpll_core_h13x2_ck", "dpll_core_h13x2_ck"),
+ DT_CLK(NULL, "dpll_core_h14x2_ck", "dpll_core_h14x2_ck"),
+ DT_CLK(NULL, "dpll_core_h22x2_ck", "dpll_core_h22x2_ck"),
+ DT_CLK(NULL, "dpll_core_h23x2_ck", "dpll_core_h23x2_ck"),
+ DT_CLK(NULL, "dpll_core_h24x2_ck", "dpll_core_h24x2_ck"),
+ DT_CLK(NULL, "dpll_core_m2_ck", "dpll_core_m2_ck"),
+ DT_CLK(NULL, "dpll_core_m3x2_ck", "dpll_core_m3x2_ck"),
+ DT_CLK(NULL, "iva_dpll_hs_clk_div", "iva_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_iva_ck", "dpll_iva_ck"),
+ DT_CLK(NULL, "dpll_iva_x2_ck", "dpll_iva_x2_ck"),
+ DT_CLK(NULL, "dpll_iva_h11x2_ck", "dpll_iva_h11x2_ck"),
+ DT_CLK(NULL, "dpll_iva_h12x2_ck", "dpll_iva_h12x2_ck"),
+ DT_CLK(NULL, "mpu_dpll_hs_clk_div", "mpu_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"),
+ DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"),
+ DT_CLK(NULL, "per_dpll_hs_clk_div", "per_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_per_ck", "dpll_per_ck"),
+ DT_CLK(NULL, "dpll_per_x2_ck", "dpll_per_x2_ck"),
+ DT_CLK(NULL, "dpll_per_h11x2_ck", "dpll_per_h11x2_ck"),
+ DT_CLK(NULL, "dpll_per_h12x2_ck", "dpll_per_h12x2_ck"),
+ DT_CLK(NULL, "dpll_per_h14x2_ck", "dpll_per_h14x2_ck"),
+ DT_CLK(NULL, "dpll_per_m2_ck", "dpll_per_m2_ck"),
+ DT_CLK(NULL, "dpll_per_m2x2_ck", "dpll_per_m2x2_ck"),
+ DT_CLK(NULL, "dpll_per_m3x2_ck", "dpll_per_m3x2_ck"),
+ DT_CLK(NULL, "dpll_unipro1_ck", "dpll_unipro1_ck"),
+ DT_CLK(NULL, "dpll_unipro1_clkdcoldo", "dpll_unipro1_clkdcoldo"),
+ DT_CLK(NULL, "dpll_unipro1_m2_ck", "dpll_unipro1_m2_ck"),
+ DT_CLK(NULL, "dpll_unipro2_ck", "dpll_unipro2_ck"),
+ DT_CLK(NULL, "dpll_unipro2_clkdcoldo", "dpll_unipro2_clkdcoldo"),
+ DT_CLK(NULL, "dpll_unipro2_m2_ck", "dpll_unipro2_m2_ck"),
+ DT_CLK(NULL, "usb_dpll_hs_clk_div", "usb_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_usb_ck", "dpll_usb_ck"),
+ DT_CLK(NULL, "dpll_usb_clkdcoldo", "dpll_usb_clkdcoldo"),
+ DT_CLK(NULL, "dpll_usb_m2_ck", "dpll_usb_m2_ck"),
+ DT_CLK(NULL, "dss_syc_gfclk_div", "dss_syc_gfclk_div"),
+ DT_CLK(NULL, "func_128m_clk", "func_128m_clk"),
+ DT_CLK(NULL, "func_12m_fclk", "func_12m_fclk"),
+ DT_CLK(NULL, "func_24m_clk", "func_24m_clk"),
+ DT_CLK(NULL, "func_48m_fclk", "func_48m_fclk"),
+ DT_CLK(NULL, "func_96m_fclk", "func_96m_fclk"),
+ DT_CLK(NULL, "l3_iclk_div", "l3_iclk_div"),
+ DT_CLK(NULL, "gpu_l3_iclk", "gpu_l3_iclk"),
+ DT_CLK(NULL, "l3init_60m_fclk", "l3init_60m_fclk"),
+ DT_CLK(NULL, "wkupaon_iclk_mux", "wkupaon_iclk_mux"),
+ DT_CLK(NULL, "l3instr_ts_gclk_div", "l3instr_ts_gclk_div"),
+ DT_CLK(NULL, "l4_root_clk_div", "l4_root_clk_div"),
+ DT_CLK(NULL, "dss_32khz_clk", "dss_32khz_clk"),
+ DT_CLK(NULL, "dss_48mhz_clk", "dss_48mhz_clk"),
+ DT_CLK(NULL, "dss_dss_clk", "dss_dss_clk"),
+ DT_CLK(NULL, "dss_sys_clk", "dss_sys_clk"),
+ DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"),
+ DT_CLK(NULL, "gpio2_dbclk", "gpio2_dbclk"),
+ DT_CLK(NULL, "gpio3_dbclk", "gpio3_dbclk"),
+ DT_CLK(NULL, "gpio4_dbclk", "gpio4_dbclk"),
+ DT_CLK(NULL, "gpio5_dbclk", "gpio5_dbclk"),
+ DT_CLK(NULL, "gpio6_dbclk", "gpio6_dbclk"),
+ DT_CLK(NULL, "gpio7_dbclk", "gpio7_dbclk"),
+ DT_CLK(NULL, "gpio8_dbclk", "gpio8_dbclk"),
+ DT_CLK(NULL, "iss_ctrlclk", "iss_ctrlclk"),
+ DT_CLK(NULL, "lli_txphy_clk", "lli_txphy_clk"),
+ DT_CLK(NULL, "lli_txphy_ls_clk", "lli_txphy_ls_clk"),
+ DT_CLK(NULL, "mmc1_32khz_clk", "mmc1_32khz_clk"),
+ DT_CLK(NULL, "sata_ref_clk", "sata_ref_clk"),
+ DT_CLK(NULL, "slimbus1_slimbus_clk", "slimbus1_slimbus_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "usb_host_hs_hsic480m_p1_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "usb_host_hs_hsic480m_p2_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p3_clk", "usb_host_hs_hsic480m_p3_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "usb_host_hs_hsic60m_p1_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "usb_host_hs_hsic60m_p2_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p3_clk", "usb_host_hs_hsic60m_p3_clk"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "usb_host_hs_utmi_p1_clk"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "usb_host_hs_utmi_p2_clk"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "usb_host_hs_utmi_p3_clk"),
+ DT_CLK(NULL, "usb_otg_ss_refclk960m", "usb_otg_ss_refclk960m"),
+ DT_CLK(NULL, "usb_phy_cm_clk32k", "usb_phy_cm_clk32k"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "usb_tll_hs_usb_ch0_clk"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "usb_tll_hs_usb_ch1_clk"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "usb_tll_hs_usb_ch2_clk"),
+ DT_CLK(NULL, "aess_fclk", "aess_fclk"),
+ DT_CLK(NULL, "dmic_sync_mux_ck", "dmic_sync_mux_ck"),
+ DT_CLK(NULL, "dmic_gfclk", "dmic_gfclk"),
+ DT_CLK(NULL, "fdif_fclk", "fdif_fclk"),
+ DT_CLK(NULL, "gpu_core_gclk_mux", "gpu_core_gclk_mux"),
+ DT_CLK(NULL, "gpu_hyd_gclk_mux", "gpu_hyd_gclk_mux"),
+ DT_CLK(NULL, "hsi_fclk", "hsi_fclk"),
+ DT_CLK(NULL, "mcasp_sync_mux_ck", "mcasp_sync_mux_ck"),
+ DT_CLK(NULL, "mcasp_gfclk", "mcasp_gfclk"),
+ DT_CLK(NULL, "mcbsp1_sync_mux_ck", "mcbsp1_sync_mux_ck"),
+ DT_CLK(NULL, "mcbsp1_gfclk", "mcbsp1_gfclk"),
+ DT_CLK(NULL, "mcbsp2_sync_mux_ck", "mcbsp2_sync_mux_ck"),
+ DT_CLK(NULL, "mcbsp2_gfclk", "mcbsp2_gfclk"),
+ DT_CLK(NULL, "mcbsp3_sync_mux_ck", "mcbsp3_sync_mux_ck"),
+ DT_CLK(NULL, "mcbsp3_gfclk", "mcbsp3_gfclk"),
+ DT_CLK(NULL, "mmc1_fclk_mux", "mmc1_fclk_mux"),
+ DT_CLK(NULL, "mmc1_fclk", "mmc1_fclk"),
+ DT_CLK(NULL, "mmc2_fclk_mux", "mmc2_fclk_mux"),
+ DT_CLK(NULL, "mmc2_fclk", "mmc2_fclk"),
+ DT_CLK(NULL, "timer10_gfclk_mux", "timer10_gfclk_mux"),
+ DT_CLK(NULL, "timer11_gfclk_mux", "timer11_gfclk_mux"),
+ DT_CLK(NULL, "timer1_gfclk_mux", "timer1_gfclk_mux"),
+ DT_CLK(NULL, "timer2_gfclk_mux", "timer2_gfclk_mux"),
+ DT_CLK(NULL, "timer3_gfclk_mux", "timer3_gfclk_mux"),
+ DT_CLK(NULL, "timer4_gfclk_mux", "timer4_gfclk_mux"),
+ DT_CLK(NULL, "timer5_gfclk_mux", "timer5_gfclk_mux"),
+ DT_CLK(NULL, "timer6_gfclk_mux", "timer6_gfclk_mux"),
+ DT_CLK(NULL, "timer7_gfclk_mux", "timer7_gfclk_mux"),
+ DT_CLK(NULL, "timer8_gfclk_mux", "timer8_gfclk_mux"),
+ DT_CLK(NULL, "timer9_gfclk_mux", "timer9_gfclk_mux"),
+ DT_CLK(NULL, "utmi_p1_gfclk", "utmi_p1_gfclk"),
+ DT_CLK(NULL, "utmi_p2_gfclk", "utmi_p2_gfclk"),
+ DT_CLK(NULL, "auxclk0_src_ck", "auxclk0_src_ck"),
+ DT_CLK(NULL, "auxclk0_ck", "auxclk0_ck"),
+ DT_CLK(NULL, "auxclkreq0_ck", "auxclkreq0_ck"),
+ DT_CLK(NULL, "auxclk1_src_ck", "auxclk1_src_ck"),
+ DT_CLK(NULL, "auxclk1_ck", "auxclk1_ck"),
+ DT_CLK(NULL, "auxclkreq1_ck", "auxclkreq1_ck"),
+ DT_CLK(NULL, "auxclk2_src_ck", "auxclk2_src_ck"),
+ DT_CLK(NULL, "auxclk2_ck", "auxclk2_ck"),
+ DT_CLK(NULL, "auxclkreq2_ck", "auxclkreq2_ck"),
+ DT_CLK(NULL, "auxclk3_src_ck", "auxclk3_src_ck"),
+ DT_CLK(NULL, "auxclk3_ck", "auxclk3_ck"),
+ DT_CLK(NULL, "auxclkreq3_ck", "auxclkreq3_ck"),
+ DT_CLK(NULL, "gpmc_ck", "dummy_ck"),
+ DT_CLK("omap_i2c.1", "ick", "dummy_ck"),
+ DT_CLK("omap_i2c.2", "ick", "dummy_ck"),
+ DT_CLK("omap_i2c.3", "ick", "dummy_ck"),
+ DT_CLK("omap_i2c.4", "ick", "dummy_ck"),
+ DT_CLK(NULL, "mailboxes_ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.0", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.1", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.2", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.3", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.4", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.1", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.2", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.3", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.4", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.1", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.2", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.3", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.4", "ick", "dummy_ck"),
+ DT_CLK(NULL, "uart1_ick", "dummy_ck"),
+ DT_CLK(NULL, "uart2_ick", "dummy_ck"),
+ DT_CLK(NULL, "uart3_ick", "dummy_ck"),
+ DT_CLK(NULL, "uart4_ick", "dummy_ck"),
+ DT_CLK("usbhs_omap", "usbhost_ick", "dummy_ck"),
+ DT_CLK("usbhs_omap", "usbtll_fck", "dummy_ck"),
+ DT_CLK("omap_wdt", "ick", "dummy_ck"),
+ DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
+ DT_CLK("omap_timer.1", "sys_ck", "sys_clkin"),
+ DT_CLK("omap_timer.2", "sys_ck", "sys_clkin"),
+ DT_CLK("omap_timer.3", "sys_ck", "sys_clkin"),
+ DT_CLK("omap_timer.4", "sys_ck", "sys_clkin"),
+ DT_CLK("omap_timer.9", "sys_ck", "sys_clkin"),
+ DT_CLK("omap_timer.10", "sys_ck", "sys_clkin"),
+ DT_CLK("omap_timer.11", "sys_ck", "sys_clkin"),
+ DT_CLK("omap_timer.5", "sys_ck", "dss_syc_gfclk_div"),
+ DT_CLK("omap_timer.6", "sys_ck", "dss_syc_gfclk_div"),
+ DT_CLK("omap_timer.7", "sys_ck", "dss_syc_gfclk_div"),
+ DT_CLK("omap_timer.8", "sys_ck", "dss_syc_gfclk_div"),
+ { .node_name = NULL },
+};
+
+int __init omap5xxx_dt_clk_init(void)
+{
+ int rc;
+ struct clk *abe_dpll_ref, *abe_dpll, *sys_32k_ck, *usb_dpll;
+
+ ti_dt_clocks_register(omap54xx_clks);
+
+ omap2_clk_disable_autoidle_all();
+
+ abe_dpll_ref = clk_get_sys(NULL, "abe_dpll_clk_mux");
+ sys_32k_ck = clk_get_sys(NULL, "sys_32k_ck");
+ rc = clk_set_parent(abe_dpll_ref, sys_32k_ck);
+ abe_dpll = clk_get_sys(NULL, "dpll_abe_ck");
+ if (!rc)
+ rc = clk_set_rate(abe_dpll, OMAP5_DPLL_ABE_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+
+ usb_dpll = clk_get_sys(NULL, "dpll_usb_ck");
+ rc = clk_set_rate(usb_dpll, OMAP5_DPLL_USB_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure USB DPLL!\n", __func__);
+
+ usb_dpll = clk_get_sys(NULL, "dpll_usb_m2_ck");
+ rc = clk_set_rate(usb_dpll, OMAP5_DPLL_USB_DEFFREQ/2);
+ if (rc)
+ pr_err("%s: failed to set USB_DPLL M2 OUT\n", __func__);
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
new file mode 100644
index 000000000000..9977653f2d63
--- /dev/null
+++ b/drivers/clk/ti/clk-7xx.c
@@ -0,0 +1,332 @@
+/*
+ * DRA7 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-private.h>
+#include <linux/clkdev.h>
+#include <linux/clk/ti.h>
+
+#define DRA7_DPLL_ABE_DEFFREQ 361267200
+#define DRA7_DPLL_GMAC_DEFFREQ 1000000000
+
+
+static struct ti_dt_clk dra7xx_clks[] = {
+ DT_CLK(NULL, "atl_clkin0_ck", "atl_clkin0_ck"),
+ DT_CLK(NULL, "atl_clkin1_ck", "atl_clkin1_ck"),
+ DT_CLK(NULL, "atl_clkin2_ck", "atl_clkin2_ck"),
+ DT_CLK(NULL, "atlclkin3_ck", "atlclkin3_ck"),
+ DT_CLK(NULL, "hdmi_clkin_ck", "hdmi_clkin_ck"),
+ DT_CLK(NULL, "mlb_clkin_ck", "mlb_clkin_ck"),
+ DT_CLK(NULL, "mlbp_clkin_ck", "mlbp_clkin_ck"),
+ DT_CLK(NULL, "pciesref_acs_clk_ck", "pciesref_acs_clk_ck"),
+ DT_CLK(NULL, "ref_clkin0_ck", "ref_clkin0_ck"),
+ DT_CLK(NULL, "ref_clkin1_ck", "ref_clkin1_ck"),
+ DT_CLK(NULL, "ref_clkin2_ck", "ref_clkin2_ck"),
+ DT_CLK(NULL, "ref_clkin3_ck", "ref_clkin3_ck"),
+ DT_CLK(NULL, "rmii_clk_ck", "rmii_clk_ck"),
+ DT_CLK(NULL, "sdvenc_clkin_ck", "sdvenc_clkin_ck"),
+ DT_CLK(NULL, "secure_32k_clk_src_ck", "secure_32k_clk_src_ck"),
+ DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
+ DT_CLK(NULL, "virt_12000000_ck", "virt_12000000_ck"),
+ DT_CLK(NULL, "virt_13000000_ck", "virt_13000000_ck"),
+ DT_CLK(NULL, "virt_16800000_ck", "virt_16800000_ck"),
+ DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+ DT_CLK(NULL, "virt_20000000_ck", "virt_20000000_ck"),
+ DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
+ DT_CLK(NULL, "virt_27000000_ck", "virt_27000000_ck"),
+ DT_CLK(NULL, "virt_38400000_ck", "virt_38400000_ck"),
+ DT_CLK(NULL, "sys_clkin1", "sys_clkin1"),
+ DT_CLK(NULL, "sys_clkin2", "sys_clkin2"),
+ DT_CLK(NULL, "usb_otg_clkin_ck", "usb_otg_clkin_ck"),
+ DT_CLK(NULL, "video1_clkin_ck", "video1_clkin_ck"),
+ DT_CLK(NULL, "video1_m2_clkin_ck", "video1_m2_clkin_ck"),
+ DT_CLK(NULL, "video2_clkin_ck", "video2_clkin_ck"),
+ DT_CLK(NULL, "video2_m2_clkin_ck", "video2_m2_clkin_ck"),
+ DT_CLK(NULL, "abe_dpll_sys_clk_mux", "abe_dpll_sys_clk_mux"),
+ DT_CLK(NULL, "abe_dpll_bypass_clk_mux", "abe_dpll_bypass_clk_mux"),
+ DT_CLK(NULL, "abe_dpll_clk_mux", "abe_dpll_clk_mux"),
+ DT_CLK(NULL, "dpll_abe_ck", "dpll_abe_ck"),
+ DT_CLK(NULL, "dpll_abe_x2_ck", "dpll_abe_x2_ck"),
+ DT_CLK(NULL, "dpll_abe_m2x2_ck", "dpll_abe_m2x2_ck"),
+ DT_CLK(NULL, "abe_24m_fclk", "abe_24m_fclk"),
+ DT_CLK(NULL, "abe_clk", "abe_clk"),
+ DT_CLK(NULL, "aess_fclk", "aess_fclk"),
+ DT_CLK(NULL, "abe_giclk_div", "abe_giclk_div"),
+ DT_CLK(NULL, "abe_lp_clk_div", "abe_lp_clk_div"),
+ DT_CLK(NULL, "abe_sys_clk_div", "abe_sys_clk_div"),
+ DT_CLK(NULL, "adc_gfclk_mux", "adc_gfclk_mux"),
+ DT_CLK(NULL, "dpll_pcie_ref_ck", "dpll_pcie_ref_ck"),
+ DT_CLK(NULL, "dpll_pcie_ref_m2ldo_ck", "dpll_pcie_ref_m2ldo_ck"),
+ DT_CLK(NULL, "apll_pcie_ck", "apll_pcie_ck"),
+ DT_CLK(NULL, "apll_pcie_clkvcoldo", "apll_pcie_clkvcoldo"),
+ DT_CLK(NULL, "apll_pcie_clkvcoldo_div", "apll_pcie_clkvcoldo_div"),
+ DT_CLK(NULL, "apll_pcie_m2_ck", "apll_pcie_m2_ck"),
+ DT_CLK(NULL, "sys_clk1_dclk_div", "sys_clk1_dclk_div"),
+ DT_CLK(NULL, "sys_clk2_dclk_div", "sys_clk2_dclk_div"),
+ DT_CLK(NULL, "dpll_abe_m2_ck", "dpll_abe_m2_ck"),
+ DT_CLK(NULL, "per_abe_x1_dclk_div", "per_abe_x1_dclk_div"),
+ DT_CLK(NULL, "dpll_abe_m3x2_ck", "dpll_abe_m3x2_ck"),
+ DT_CLK(NULL, "dpll_core_ck", "dpll_core_ck"),
+ DT_CLK(NULL, "dpll_core_x2_ck", "dpll_core_x2_ck"),
+ DT_CLK(NULL, "dpll_core_h12x2_ck", "dpll_core_h12x2_ck"),
+ DT_CLK(NULL, "mpu_dpll_hs_clk_div", "mpu_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"),
+ DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"),
+ DT_CLK(NULL, "mpu_dclk_div", "mpu_dclk_div"),
+ DT_CLK(NULL, "dsp_dpll_hs_clk_div", "dsp_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_dsp_ck", "dpll_dsp_ck"),
+ DT_CLK(NULL, "dpll_dsp_m2_ck", "dpll_dsp_m2_ck"),
+ DT_CLK(NULL, "dsp_gclk_div", "dsp_gclk_div"),
+ DT_CLK(NULL, "iva_dpll_hs_clk_div", "iva_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_iva_ck", "dpll_iva_ck"),
+ DT_CLK(NULL, "dpll_iva_m2_ck", "dpll_iva_m2_ck"),
+ DT_CLK(NULL, "iva_dclk", "iva_dclk"),
+ DT_CLK(NULL, "dpll_gpu_ck", "dpll_gpu_ck"),
+ DT_CLK(NULL, "dpll_gpu_m2_ck", "dpll_gpu_m2_ck"),
+ DT_CLK(NULL, "gpu_dclk", "gpu_dclk"),
+ DT_CLK(NULL, "dpll_core_m2_ck", "dpll_core_m2_ck"),
+ DT_CLK(NULL, "core_dpll_out_dclk_div", "core_dpll_out_dclk_div"),
+ DT_CLK(NULL, "dpll_ddr_ck", "dpll_ddr_ck"),
+ DT_CLK(NULL, "dpll_ddr_m2_ck", "dpll_ddr_m2_ck"),
+ DT_CLK(NULL, "emif_phy_dclk_div", "emif_phy_dclk_div"),
+ DT_CLK(NULL, "dpll_gmac_ck", "dpll_gmac_ck"),
+ DT_CLK(NULL, "dpll_gmac_m2_ck", "dpll_gmac_m2_ck"),
+ DT_CLK(NULL, "gmac_250m_dclk_div", "gmac_250m_dclk_div"),
+ DT_CLK(NULL, "video2_dclk_div", "video2_dclk_div"),
+ DT_CLK(NULL, "video1_dclk_div", "video1_dclk_div"),
+ DT_CLK(NULL, "hdmi_dclk_div", "hdmi_dclk_div"),
+ DT_CLK(NULL, "per_dpll_hs_clk_div", "per_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_per_ck", "dpll_per_ck"),
+ DT_CLK(NULL, "dpll_per_m2_ck", "dpll_per_m2_ck"),
+ DT_CLK(NULL, "func_96m_aon_dclk_div", "func_96m_aon_dclk_div"),
+ DT_CLK(NULL, "usb_dpll_hs_clk_div", "usb_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_usb_ck", "dpll_usb_ck"),
+ DT_CLK(NULL, "dpll_usb_m2_ck", "dpll_usb_m2_ck"),
+ DT_CLK(NULL, "l3init_480m_dclk_div", "l3init_480m_dclk_div"),
+ DT_CLK(NULL, "usb_otg_dclk_div", "usb_otg_dclk_div"),
+ DT_CLK(NULL, "sata_dclk_div", "sata_dclk_div"),
+ DT_CLK(NULL, "dpll_pcie_ref_m2_ck", "dpll_pcie_ref_m2_ck"),
+ DT_CLK(NULL, "pcie2_dclk_div", "pcie2_dclk_div"),
+ DT_CLK(NULL, "pcie_dclk_div", "pcie_dclk_div"),
+ DT_CLK(NULL, "emu_dclk_div", "emu_dclk_div"),
+ DT_CLK(NULL, "secure_32k_dclk_div", "secure_32k_dclk_div"),
+ DT_CLK(NULL, "eve_dpll_hs_clk_div", "eve_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_eve_ck", "dpll_eve_ck"),
+ DT_CLK(NULL, "dpll_eve_m2_ck", "dpll_eve_m2_ck"),
+ DT_CLK(NULL, "eve_dclk_div", "eve_dclk_div"),
+ DT_CLK(NULL, "clkoutmux0_clk_mux", "clkoutmux0_clk_mux"),
+ DT_CLK(NULL, "clkoutmux1_clk_mux", "clkoutmux1_clk_mux"),
+ DT_CLK(NULL, "clkoutmux2_clk_mux", "clkoutmux2_clk_mux"),
+ DT_CLK(NULL, "custefuse_sys_gfclk_div", "custefuse_sys_gfclk_div"),
+ DT_CLK(NULL, "dpll_core_h13x2_ck", "dpll_core_h13x2_ck"),
+ DT_CLK(NULL, "dpll_core_h14x2_ck", "dpll_core_h14x2_ck"),
+ DT_CLK(NULL, "dpll_core_h22x2_ck", "dpll_core_h22x2_ck"),
+ DT_CLK(NULL, "dpll_core_h23x2_ck", "dpll_core_h23x2_ck"),
+ DT_CLK(NULL, "dpll_core_h24x2_ck", "dpll_core_h24x2_ck"),
+ DT_CLK(NULL, "dpll_ddr_x2_ck", "dpll_ddr_x2_ck"),
+ DT_CLK(NULL, "dpll_ddr_h11x2_ck", "dpll_ddr_h11x2_ck"),
+ DT_CLK(NULL, "dpll_dsp_x2_ck", "dpll_dsp_x2_ck"),
+ DT_CLK(NULL, "dpll_dsp_m3x2_ck", "dpll_dsp_m3x2_ck"),
+ DT_CLK(NULL, "dpll_gmac_x2_ck", "dpll_gmac_x2_ck"),
+ DT_CLK(NULL, "dpll_gmac_h11x2_ck", "dpll_gmac_h11x2_ck"),
+ DT_CLK(NULL, "dpll_gmac_h12x2_ck", "dpll_gmac_h12x2_ck"),
+ DT_CLK(NULL, "dpll_gmac_h13x2_ck", "dpll_gmac_h13x2_ck"),
+ DT_CLK(NULL, "dpll_gmac_m3x2_ck", "dpll_gmac_m3x2_ck"),
+ DT_CLK(NULL, "dpll_per_x2_ck", "dpll_per_x2_ck"),
+ DT_CLK(NULL, "dpll_per_h11x2_ck", "dpll_per_h11x2_ck"),
+ DT_CLK(NULL, "dpll_per_h12x2_ck", "dpll_per_h12x2_ck"),
+ DT_CLK(NULL, "dpll_per_h13x2_ck", "dpll_per_h13x2_ck"),
+ DT_CLK(NULL, "dpll_per_h14x2_ck", "dpll_per_h14x2_ck"),
+ DT_CLK(NULL, "dpll_per_m2x2_ck", "dpll_per_m2x2_ck"),
+ DT_CLK(NULL, "dpll_usb_clkdcoldo", "dpll_usb_clkdcoldo"),
+ DT_CLK(NULL, "eve_clk", "eve_clk"),
+ DT_CLK(NULL, "func_128m_clk", "func_128m_clk"),
+ DT_CLK(NULL, "func_12m_fclk", "func_12m_fclk"),
+ DT_CLK(NULL, "func_24m_clk", "func_24m_clk"),
+ DT_CLK(NULL, "func_48m_fclk", "func_48m_fclk"),
+ DT_CLK(NULL, "func_96m_fclk", "func_96m_fclk"),
+ DT_CLK(NULL, "gmii_m_clk_div", "gmii_m_clk_div"),
+ DT_CLK(NULL, "hdmi_clk2_div", "hdmi_clk2_div"),
+ DT_CLK(NULL, "hdmi_div_clk", "hdmi_div_clk"),
+ DT_CLK(NULL, "hdmi_dpll_clk_mux", "hdmi_dpll_clk_mux"),
+ DT_CLK(NULL, "l3_iclk_div", "l3_iclk_div"),
+ DT_CLK(NULL, "l3init_60m_fclk", "l3init_60m_fclk"),
+ DT_CLK(NULL, "l4_root_clk_div", "l4_root_clk_div"),
+ DT_CLK(NULL, "mlb_clk", "mlb_clk"),
+ DT_CLK(NULL, "mlbp_clk", "mlbp_clk"),
+ DT_CLK(NULL, "per_abe_x1_gfclk2_div", "per_abe_x1_gfclk2_div"),
+ DT_CLK(NULL, "timer_sys_clk_div", "timer_sys_clk_div"),
+ DT_CLK(NULL, "video1_clk2_div", "video1_clk2_div"),
+ DT_CLK(NULL, "video1_div_clk", "video1_div_clk"),
+ DT_CLK(NULL, "video1_dpll_clk_mux", "video1_dpll_clk_mux"),
+ DT_CLK(NULL, "video2_clk2_div", "video2_clk2_div"),
+ DT_CLK(NULL, "video2_div_clk", "video2_div_clk"),
+ DT_CLK(NULL, "video2_dpll_clk_mux", "video2_dpll_clk_mux"),
+ DT_CLK(NULL, "wkupaon_iclk_mux", "wkupaon_iclk_mux"),
+ DT_CLK(NULL, "dss_32khz_clk", "dss_32khz_clk"),
+ DT_CLK(NULL, "dss_48mhz_clk", "dss_48mhz_clk"),
+ DT_CLK(NULL, "dss_dss_clk", "dss_dss_clk"),
+ DT_CLK(NULL, "dss_hdmi_clk", "dss_hdmi_clk"),
+ DT_CLK(NULL, "dss_video1_clk", "dss_video1_clk"),
+ DT_CLK(NULL, "dss_video2_clk", "dss_video2_clk"),
+ DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"),
+ DT_CLK(NULL, "gpio2_dbclk", "gpio2_dbclk"),
+ DT_CLK(NULL, "gpio3_dbclk", "gpio3_dbclk"),
+ DT_CLK(NULL, "gpio4_dbclk", "gpio4_dbclk"),
+ DT_CLK(NULL, "gpio5_dbclk", "gpio5_dbclk"),
+ DT_CLK(NULL, "gpio6_dbclk", "gpio6_dbclk"),
+ DT_CLK(NULL, "gpio7_dbclk", "gpio7_dbclk"),
+ DT_CLK(NULL, "gpio8_dbclk", "gpio8_dbclk"),
+ DT_CLK(NULL, "mmc1_clk32k", "mmc1_clk32k"),
+ DT_CLK(NULL, "mmc2_clk32k", "mmc2_clk32k"),
+ DT_CLK(NULL, "mmc3_clk32k", "mmc3_clk32k"),
+ DT_CLK(NULL, "mmc4_clk32k", "mmc4_clk32k"),
+ DT_CLK(NULL, "sata_ref_clk", "sata_ref_clk"),
+ DT_CLK(NULL, "usb_otg_ss1_refclk960m", "usb_otg_ss1_refclk960m"),
+ DT_CLK(NULL, "usb_otg_ss2_refclk960m", "usb_otg_ss2_refclk960m"),
+ DT_CLK(NULL, "usb_phy1_always_on_clk32k", "usb_phy1_always_on_clk32k"),
+ DT_CLK(NULL, "usb_phy2_always_on_clk32k", "usb_phy2_always_on_clk32k"),
+ DT_CLK(NULL, "usb_phy3_always_on_clk32k", "usb_phy3_always_on_clk32k"),
+ DT_CLK(NULL, "atl_dpll_clk_mux", "atl_dpll_clk_mux"),
+ DT_CLK(NULL, "atl_gfclk_mux", "atl_gfclk_mux"),
+ DT_CLK(NULL, "dcan1_sys_clk_mux", "dcan1_sys_clk_mux"),
+ DT_CLK(NULL, "gmac_gmii_ref_clk_div", "gmac_gmii_ref_clk_div"),
+ DT_CLK(NULL, "gmac_rft_clk_mux", "gmac_rft_clk_mux"),
+ DT_CLK(NULL, "gpu_core_gclk_mux", "gpu_core_gclk_mux"),
+ DT_CLK(NULL, "gpu_hyd_gclk_mux", "gpu_hyd_gclk_mux"),
+ DT_CLK(NULL, "ipu1_gfclk_mux", "ipu1_gfclk_mux"),
+ DT_CLK(NULL, "l3instr_ts_gclk_div", "l3instr_ts_gclk_div"),
+ DT_CLK(NULL, "mcasp1_ahclkr_mux", "mcasp1_ahclkr_mux"),
+ DT_CLK(NULL, "mcasp1_ahclkx_mux", "mcasp1_ahclkx_mux"),
+ DT_CLK(NULL, "mcasp1_aux_gfclk_mux", "mcasp1_aux_gfclk_mux"),
+ DT_CLK(NULL, "mcasp2_ahclkr_mux", "mcasp2_ahclkr_mux"),
+ DT_CLK(NULL, "mcasp2_ahclkx_mux", "mcasp2_ahclkx_mux"),
+ DT_CLK(NULL, "mcasp2_aux_gfclk_mux", "mcasp2_aux_gfclk_mux"),
+ DT_CLK(NULL, "mcasp3_ahclkx_mux", "mcasp3_ahclkx_mux"),
+ DT_CLK(NULL, "mcasp3_aux_gfclk_mux", "mcasp3_aux_gfclk_mux"),
+ DT_CLK(NULL, "mcasp4_ahclkx_mux", "mcasp4_ahclkx_mux"),
+ DT_CLK(NULL, "mcasp4_aux_gfclk_mux", "mcasp4_aux_gfclk_mux"),
+ DT_CLK(NULL, "mcasp5_ahclkx_mux", "mcasp5_ahclkx_mux"),
+ DT_CLK(NULL, "mcasp5_aux_gfclk_mux", "mcasp5_aux_gfclk_mux"),
+ DT_CLK(NULL, "mcasp6_ahclkx_mux", "mcasp6_ahclkx_mux"),
+ DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "mcasp6_aux_gfclk_mux"),
+ DT_CLK(NULL, "mcasp7_ahclkx_mux", "mcasp7_ahclkx_mux"),
+ DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "mcasp7_aux_gfclk_mux"),
+ DT_CLK(NULL, "mcasp8_ahclk_mux", "mcasp8_ahclk_mux"),
+ DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "mcasp8_aux_gfclk_mux"),
+ DT_CLK(NULL, "mmc1_fclk_mux", "mmc1_fclk_mux"),
+ DT_CLK(NULL, "mmc1_fclk_div", "mmc1_fclk_div"),
+ DT_CLK(NULL, "mmc2_fclk_mux", "mmc2_fclk_mux"),
+ DT_CLK(NULL, "mmc2_fclk_div", "mmc2_fclk_div"),
+ DT_CLK(NULL, "mmc3_gfclk_mux", "mmc3_gfclk_mux"),
+ DT_CLK(NULL, "mmc3_gfclk_div", "mmc3_gfclk_div"),
+ DT_CLK(NULL, "mmc4_gfclk_mux", "mmc4_gfclk_mux"),
+ DT_CLK(NULL, "mmc4_gfclk_div", "mmc4_gfclk_div"),
+ DT_CLK(NULL, "qspi_gfclk_mux", "qspi_gfclk_mux"),
+ DT_CLK(NULL, "qspi_gfclk_div", "qspi_gfclk_div"),
+ DT_CLK(NULL, "timer10_gfclk_mux", "timer10_gfclk_mux"),
+ DT_CLK(NULL, "timer11_gfclk_mux", "timer11_gfclk_mux"),
+ DT_CLK(NULL, "timer13_gfclk_mux", "timer13_gfclk_mux"),
+ DT_CLK(NULL, "timer14_gfclk_mux", "timer14_gfclk_mux"),
+ DT_CLK(NULL, "timer15_gfclk_mux", "timer15_gfclk_mux"),
+ DT_CLK(NULL, "timer16_gfclk_mux", "timer16_gfclk_mux"),
+ DT_CLK(NULL, "timer1_gfclk_mux", "timer1_gfclk_mux"),
+ DT_CLK(NULL, "timer2_gfclk_mux", "timer2_gfclk_mux"),
+ DT_CLK(NULL, "timer3_gfclk_mux", "timer3_gfclk_mux"),
+ DT_CLK(NULL, "timer4_gfclk_mux", "timer4_gfclk_mux"),
+ DT_CLK(NULL, "timer5_gfclk_mux", "timer5_gfclk_mux"),
+ DT_CLK(NULL, "timer6_gfclk_mux", "timer6_gfclk_mux"),
+ DT_CLK(NULL, "timer7_gfclk_mux", "timer7_gfclk_mux"),
+ DT_CLK(NULL, "timer8_gfclk_mux", "timer8_gfclk_mux"),
+ DT_CLK(NULL, "timer9_gfclk_mux", "timer9_gfclk_mux"),
+ DT_CLK(NULL, "uart10_gfclk_mux", "uart10_gfclk_mux"),
+ DT_CLK(NULL, "uart1_gfclk_mux", "uart1_gfclk_mux"),
+ DT_CLK(NULL, "uart2_gfclk_mux", "uart2_gfclk_mux"),
+ DT_CLK(NULL, "uart3_gfclk_mux", "uart3_gfclk_mux"),
+ DT_CLK(NULL, "uart4_gfclk_mux", "uart4_gfclk_mux"),
+ DT_CLK(NULL, "uart5_gfclk_mux", "uart5_gfclk_mux"),
+ DT_CLK(NULL, "uart6_gfclk_mux", "uart6_gfclk_mux"),
+ DT_CLK(NULL, "uart7_gfclk_mux", "uart7_gfclk_mux"),
+ DT_CLK(NULL, "uart8_gfclk_mux", "uart8_gfclk_mux"),
+ DT_CLK(NULL, "uart9_gfclk_mux", "uart9_gfclk_mux"),
+ DT_CLK(NULL, "vip1_gclk_mux", "vip1_gclk_mux"),
+ DT_CLK(NULL, "vip2_gclk_mux", "vip2_gclk_mux"),
+ DT_CLK(NULL, "vip3_gclk_mux", "vip3_gclk_mux"),
+ DT_CLK(NULL, "gpmc_ck", "dummy_ck"),
+ DT_CLK("omap_i2c.1", "ick", "dummy_ck"),
+ DT_CLK("omap_i2c.2", "ick", "dummy_ck"),
+ DT_CLK("omap_i2c.3", "ick", "dummy_ck"),
+ DT_CLK("omap_i2c.4", "ick", "dummy_ck"),
+ DT_CLK(NULL, "mailboxes_ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.0", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.1", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.2", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.3", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.4", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.1", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.2", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.3", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.4", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.1", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.2", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.3", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.4", "ick", "dummy_ck"),
+ DT_CLK(NULL, "uart1_ick", "dummy_ck"),
+ DT_CLK(NULL, "uart2_ick", "dummy_ck"),
+ DT_CLK(NULL, "uart3_ick", "dummy_ck"),
+ DT_CLK(NULL, "uart4_ick", "dummy_ck"),
+ DT_CLK("usbhs_omap", "usbhost_ick", "dummy_ck"),
+ DT_CLK("usbhs_omap", "usbtll_fck", "dummy_ck"),
+ DT_CLK("omap_wdt", "ick", "dummy_ck"),
+ DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
+ DT_CLK("4ae18000.timer", "timer_sys_ck", "sys_clkin2"),
+ DT_CLK("48032000.timer", "timer_sys_ck", "sys_clkin2"),
+ DT_CLK("48034000.timer", "timer_sys_ck", "sys_clkin2"),
+ DT_CLK("48036000.timer", "timer_sys_ck", "sys_clkin2"),
+ DT_CLK("4803e000.timer", "timer_sys_ck", "sys_clkin2"),
+ DT_CLK("48086000.timer", "timer_sys_ck", "sys_clkin2"),
+ DT_CLK("48088000.timer", "timer_sys_ck", "sys_clkin2"),
+ DT_CLK("48820000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+ DT_CLK("48822000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+ DT_CLK("48824000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+ DT_CLK("48826000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+ DT_CLK(NULL, "sys_clkin", "sys_clkin1"),
+ { .node_name = NULL },
+};
+
+int __init dra7xx_dt_clk_init(void)
+{
+ int rc;
+ struct clk *abe_dpll_mux, *sys_clkin2, *dpll_ck;
+
+ ti_dt_clocks_register(dra7xx_clks);
+
+ omap2_clk_disable_autoidle_all();
+
+ abe_dpll_mux = clk_get_sys(NULL, "abe_dpll_sys_clk_mux");
+ sys_clkin2 = clk_get_sys(NULL, "sys_clkin2");
+ dpll_ck = clk_get_sys(NULL, "dpll_abe_ck");
+
+ rc = clk_set_parent(abe_dpll_mux, sys_clkin2);
+ if (!rc)
+ rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+
+ dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
+ rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure GMAC DPLL!\n", __func__);
+
+ return rc;
+}
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
new file mode 100644
index 000000000000..b1a6f7144f3f
--- /dev/null
+++ b/drivers/clk/ti/clk.c
@@ -0,0 +1,167 @@
+/*
+ * TI clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/ti.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/list.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static int ti_dt_clk_memmap_index;
+struct ti_clk_ll_ops *ti_clk_ll_ops;
+
+/**
+ * ti_dt_clocks_register - register DT alias clocks during boot
+ * @oclks: list of clocks to register
+ *
+ * Register alias or non-standard DT clock entries during boot. By
+ * default, DT clocks are found based on their node name. If any
+ * additional con-id / dev-id -> clock mapping is required, use this
+ * function to list these.
+ */
+void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
+{
+ struct ti_dt_clk *c;
+ struct device_node *node;
+ struct clk *clk;
+ struct of_phandle_args clkspec;
+
+ for (c = oclks; c->node_name != NULL; c++) {
+ node = of_find_node_by_name(NULL, c->node_name);
+ clkspec.np = node;
+ clk = of_clk_get_from_provider(&clkspec);
+
+ if (!IS_ERR(clk)) {
+ c->lk.clk = clk;
+ clkdev_add(&c->lk);
+ } else {
+ pr_warn("failed to lookup clock node %s\n",
+ c->node_name);
+ }
+ }
+}
+
+struct clk_init_item {
+ struct device_node *node;
+ struct clk_hw *hw;
+ ti_of_clk_init_cb_t func;
+ struct list_head link;
+};
+
+static LIST_HEAD(retry_list);
+
+/**
+ * ti_clk_retry_init - retries a failed clock init at later phase
+ * @node: device not for the clock
+ * @hw: partially initialized clk_hw struct for the clock
+ * @func: init function to be called for the clock
+ *
+ * Adds a failed clock init to the retry list. The retry list is parsed
+ * once all the other clocks have been initialized.
+ */
+int __init ti_clk_retry_init(struct device_node *node, struct clk_hw *hw,
+ ti_of_clk_init_cb_t func)
+{
+ struct clk_init_item *retry;
+
+ pr_debug("%s: adding to retry list...\n", node->name);
+ retry = kzalloc(sizeof(*retry), GFP_KERNEL);
+ if (!retry)
+ return -ENOMEM;
+
+ retry->node = node;
+ retry->func = func;
+ retry->hw = hw;
+ list_add(&retry->link, &retry_list);
+
+ return 0;
+}
+
+/**
+ * ti_clk_get_reg_addr - get register address for a clock register
+ * @node: device node for the clock
+ * @index: register index from the clock node
+ *
+ * Builds clock register address from device tree information. This
+ * is a struct of type clk_omap_reg.
+ */
+void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index)
+{
+ struct clk_omap_reg *reg;
+ u32 val;
+ u32 tmp;
+
+ reg = (struct clk_omap_reg *)&tmp;
+ reg->index = ti_dt_clk_memmap_index;
+
+ if (of_property_read_u32_index(node, "reg", index, &val)) {
+ pr_err("%s must have reg[%d]!\n", node->name, index);
+ return NULL;
+ }
+
+ reg->offset = val;
+
+ return (void __iomem *)tmp;
+}
+
+/**
+ * ti_dt_clk_init_provider - init master clock provider
+ * @parent: master node
+ * @index: internal index for clk_reg_ops
+ *
+ * Initializes a master clock IP block and its child clock nodes.
+ * Regmap is provided for accessing the register space for the
+ * IP block and all the clocks under it.
+ */
+void ti_dt_clk_init_provider(struct device_node *parent, int index)
+{
+ const struct of_device_id *match;
+ struct device_node *np;
+ struct device_node *clocks;
+ of_clk_init_cb_t clk_init_cb;
+ struct clk_init_item *retry;
+ struct clk_init_item *tmp;
+
+ ti_dt_clk_memmap_index = index;
+
+ /* get clocks for this parent */
+ clocks = of_get_child_by_name(parent, "clocks");
+ if (!clocks) {
+ pr_err("%s missing 'clocks' child node.\n", parent->name);
+ return;
+ }
+
+ for_each_child_of_node(clocks, np) {
+ match = of_match_node(&__clk_of_table, np);
+ if (!match)
+ continue;
+ clk_init_cb = (of_clk_init_cb_t)match->data;
+ pr_debug("%s: initializing: %s\n", __func__, np->name);
+ clk_init_cb(np);
+ }
+
+ list_for_each_entry_safe(retry, tmp, &retry_list, link) {
+ pr_debug("retry-init: %s\n", retry->node->name);
+ retry->func(retry->hw, retry->node);
+ list_del(&retry->link);
+ kfree(retry);
+ }
+}
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
new file mode 100644
index 000000000000..f1e0038d76ac
--- /dev/null
+++ b/drivers/clk/ti/clockdomain.c
@@ -0,0 +1,70 @@
+/*
+ * OMAP clockdomain support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static void __init of_ti_clockdomain_setup(struct device_node *node)
+{
+ struct clk *clk;
+ struct clk_hw *clk_hw;
+ const char *clkdm_name = node->name;
+ int i;
+ int num_clks;
+
+ num_clks = of_count_phandle_with_args(node, "clocks", "#clock-cells");
+
+ for (i = 0; i < num_clks; i++) {
+ clk = of_clk_get(node, i);
+ if (__clk_get_flags(clk) & CLK_IS_BASIC) {
+ pr_warn("can't setup clkdm for basic clk %s\n",
+ __clk_get_name(clk));
+ continue;
+ }
+ clk_hw = __clk_get_hw(clk);
+ to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name;
+ omap2_init_clk_clkdm(clk_hw);
+ }
+}
+
+static struct of_device_id ti_clkdm_match_table[] __initdata = {
+ { .compatible = "ti,clockdomain" },
+ { }
+};
+
+/**
+ * ti_dt_clockdomains_setup - setup device tree clockdomains
+ *
+ * Initializes clockdomain nodes for a SoC. This parses through all the
+ * nodes with compatible = "ti,clockdomain", and add the clockdomain
+ * info for all the clocks listed under these. This function shall be
+ * called after rest of the DT clock init has completed and all
+ * clock nodes have been registered.
+ */
+void __init ti_dt_clockdomains_setup(void)
+{
+ struct device_node *np;
+ for_each_matching_node(np, ti_clkdm_match_table) {
+ of_ti_clockdomain_setup(np);
+ }
+}
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
new file mode 100644
index 000000000000..19d8980ba458
--- /dev/null
+++ b/drivers/clk/ti/composite.c
@@ -0,0 +1,269 @@
+/*
+ * TI composite clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+#include <linux/list.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
+
+static unsigned long ti_composite_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return ti_clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static long ti_composite_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return -EINVAL;
+}
+
+static int ti_composite_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return -EINVAL;
+}
+
+static const struct clk_ops ti_composite_divider_ops = {
+ .recalc_rate = &ti_composite_recalc_rate,
+ .round_rate = &ti_composite_round_rate,
+ .set_rate = &ti_composite_set_rate,
+};
+
+static const struct clk_ops ti_composite_gate_ops = {
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+};
+
+struct component_clk {
+ int num_parents;
+ const char **parent_names;
+ struct device_node *node;
+ int type;
+ struct clk_hw *hw;
+ struct list_head link;
+};
+
+static const char * __initconst component_clk_types[] = {
+ "gate", "divider", "mux"
+};
+
+static LIST_HEAD(component_clks);
+
+static struct device_node *_get_component_node(struct device_node *node, int i)
+{
+ int rc;
+ struct of_phandle_args clkspec;
+
+ rc = of_parse_phandle_with_args(node, "clocks", "#clock-cells", i,
+ &clkspec);
+ if (rc)
+ return NULL;
+
+ return clkspec.np;
+}
+
+static struct component_clk *_lookup_component(struct device_node *node)
+{
+ struct component_clk *comp;
+
+ list_for_each_entry(comp, &component_clks, link) {
+ if (comp->node == node)
+ return comp;
+ }
+ return NULL;
+}
+
+struct clk_hw_omap_comp {
+ struct clk_hw hw;
+ struct device_node *comp_nodes[CLK_COMPONENT_TYPE_MAX];
+ struct component_clk *comp_clks[CLK_COMPONENT_TYPE_MAX];
+};
+
+static inline struct clk_hw *_get_hw(struct clk_hw_omap_comp *clk, int idx)
+{
+ if (!clk)
+ return NULL;
+
+ if (!clk->comp_clks[idx])
+ return NULL;
+
+ return clk->comp_clks[idx]->hw;
+}
+
+#define to_clk_hw_comp(_hw) container_of(_hw, struct clk_hw_omap_comp, hw)
+
+static void __init ti_clk_register_composite(struct clk_hw *hw,
+ struct device_node *node)
+{
+ struct clk *clk;
+ struct clk_hw_omap_comp *cclk = to_clk_hw_comp(hw);
+ struct component_clk *comp;
+ int num_parents = 0;
+ const char **parent_names = NULL;
+ int i;
+
+ /* Check for presence of each component clock */
+ for (i = 0; i < CLK_COMPONENT_TYPE_MAX; i++) {
+ if (!cclk->comp_nodes[i])
+ continue;
+
+ comp = _lookup_component(cclk->comp_nodes[i]);
+ if (!comp) {
+ pr_debug("component %s not ready for %s, retry\n",
+ cclk->comp_nodes[i]->name, node->name);
+ if (!ti_clk_retry_init(node, hw,
+ ti_clk_register_composite))
+ return;
+
+ goto cleanup;
+ }
+ if (cclk->comp_clks[comp->type] != NULL) {
+ pr_err("duplicate component types for %s (%s)!\n",
+ node->name, component_clk_types[comp->type]);
+ goto cleanup;
+ }
+
+ cclk->comp_clks[comp->type] = comp;
+
+ /* Mark this node as found */
+ cclk->comp_nodes[i] = NULL;
+ }
+
+ /* All components exists, proceed with registration */
+ for (i = CLK_COMPONENT_TYPE_MAX - 1; i >= 0; i--) {
+ comp = cclk->comp_clks[i];
+ if (!comp)
+ continue;
+ if (comp->num_parents) {
+ num_parents = comp->num_parents;
+ parent_names = comp->parent_names;
+ break;
+ }
+ }
+
+ if (!num_parents) {
+ pr_err("%s: no parents found for %s!\n", __func__, node->name);
+ goto cleanup;
+ }
+
+ clk = clk_register_composite(NULL, node->name,
+ parent_names, num_parents,
+ _get_hw(cclk, CLK_COMPONENT_TYPE_MUX),
+ &ti_clk_mux_ops,
+ _get_hw(cclk, CLK_COMPONENT_TYPE_DIVIDER),
+ &ti_composite_divider_ops,
+ _get_hw(cclk, CLK_COMPONENT_TYPE_GATE),
+ &ti_composite_gate_ops, 0);
+
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+cleanup:
+ /* Free component clock list entries */
+ for (i = 0; i < CLK_COMPONENT_TYPE_MAX; i++) {
+ if (!cclk->comp_clks[i])
+ continue;
+ list_del(&cclk->comp_clks[i]->link);
+ kfree(cclk->comp_clks[i]);
+ }
+
+ kfree(cclk);
+}
+
+static void __init of_ti_composite_clk_setup(struct device_node *node)
+{
+ int num_clks;
+ int i;
+ struct clk_hw_omap_comp *cclk;
+
+ /* Number of component clocks to be put inside this clock */
+ num_clks = of_clk_get_parent_count(node);
+
+ if (num_clks < 1) {
+ pr_err("composite clk %s must have component(s)\n", node->name);
+ return;
+ }
+
+ cclk = kzalloc(sizeof(*cclk), GFP_KERNEL);
+ if (!cclk)
+ return;
+
+ /* Get device node pointers for each component clock */
+ for (i = 0; i < num_clks; i++)
+ cclk->comp_nodes[i] = _get_component_node(node, i);
+
+ ti_clk_register_composite(&cclk->hw, node);
+}
+CLK_OF_DECLARE(ti_composite_clock, "ti,composite-clock",
+ of_ti_composite_clk_setup);
+
+/**
+ * ti_clk_add_component - add a component clock to the pool
+ * @node: device node of the component clock
+ * @hw: hardware clock definition for the component clock
+ * @type: type of the component clock
+ *
+ * Adds a component clock to the list of available components, so that
+ * it can be registered by a composite clock.
+ */
+int __init ti_clk_add_component(struct device_node *node, struct clk_hw *hw,
+ int type)
+{
+ int num_parents;
+ const char **parent_names;
+ struct component_clk *clk;
+ int i;
+
+ num_parents = of_clk_get_parent_count(node);
+
+ if (num_parents < 1) {
+ pr_err("component-clock %s must have parent(s)\n", node->name);
+ return -EINVAL;
+ }
+
+ parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL);
+ if (!parent_names)
+ return -ENOMEM;
+
+ for (i = 0; i < num_parents; i++)
+ parent_names[i] = of_clk_get_parent_name(node, i);
+
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk) {
+ kfree(parent_names);
+ return -ENOMEM;
+ }
+
+ clk->num_parents = num_parents;
+ clk->parent_names = parent_names;
+ clk->hw = hw;
+ clk->node = node;
+ clk->type = type;
+ list_add(&clk->link, &component_clks);
+
+ return 0;
+}
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
new file mode 100644
index 000000000000..a15e445570b2
--- /dev/null
+++ b/drivers/clk/ti/divider.c
@@ -0,0 +1,487 @@
+/*
+ * TI Divider Clock
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
+
+#define div_mask(d) ((1 << ((d)->width)) - 1)
+
+static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
+{
+ unsigned int maxdiv = 0;
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->div > maxdiv)
+ maxdiv = clkt->div;
+ return maxdiv;
+}
+
+static unsigned int _get_maxdiv(struct clk_divider *divider)
+{
+ if (divider->flags & CLK_DIVIDER_ONE_BASED)
+ return div_mask(divider);
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return 1 << div_mask(divider);
+ if (divider->table)
+ return _get_table_maxdiv(divider->table);
+ return div_mask(divider) + 1;
+}
+
+static unsigned int _get_table_div(const struct clk_div_table *table,
+ unsigned int val)
+{
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->val == val)
+ return clkt->div;
+ return 0;
+}
+
+static unsigned int _get_div(struct clk_divider *divider, unsigned int val)
+{
+ if (divider->flags & CLK_DIVIDER_ONE_BASED)
+ return val;
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return 1 << val;
+ if (divider->table)
+ return _get_table_div(divider->table, val);
+ return val + 1;
+}
+
+static unsigned int _get_table_val(const struct clk_div_table *table,
+ unsigned int div)
+{
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->div == div)
+ return clkt->val;
+ return 0;
+}
+
+static unsigned int _get_val(struct clk_divider *divider, u8 div)
+{
+ if (divider->flags & CLK_DIVIDER_ONE_BASED)
+ return div;
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return __ffs(div);
+ if (divider->table)
+ return _get_table_val(divider->table, div);
+ return div - 1;
+}
+
+static unsigned long ti_clk_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int div, val;
+
+ val = ti_clk_ll_ops->clk_readl(divider->reg) >> divider->shift;
+ val &= div_mask(divider);
+
+ div = _get_div(divider, val);
+ if (!div) {
+ WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
+ "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
+ __clk_get_name(hw->clk));
+ return parent_rate;
+ }
+
+ return parent_rate / div;
+}
+
+/*
+ * The reverse of DIV_ROUND_UP: The maximum number which
+ * divided by m is r
+ */
+#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1)
+
+static bool _is_valid_table_div(const struct clk_div_table *table,
+ unsigned int div)
+{
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->div == div)
+ return true;
+ return false;
+}
+
+static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
+{
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return is_power_of_2(div);
+ if (divider->table)
+ return _is_valid_table_div(divider->table, div);
+ return true;
+}
+
+static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ int i, bestdiv = 0;
+ unsigned long parent_rate, best = 0, now, maxdiv;
+ unsigned long parent_rate_saved = *best_parent_rate;
+
+ if (!rate)
+ rate = 1;
+
+ maxdiv = _get_maxdiv(divider);
+
+ if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
+ parent_rate = *best_parent_rate;
+ bestdiv = DIV_ROUND_UP(parent_rate, rate);
+ bestdiv = bestdiv == 0 ? 1 : bestdiv;
+ bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
+ return bestdiv;
+ }
+
+ /*
+ * The maximum divider we can use without overflowing
+ * unsigned long in rate * i below
+ */
+ maxdiv = min(ULONG_MAX / rate, maxdiv);
+
+ for (i = 1; i <= maxdiv; i++) {
+ if (!_is_valid_div(divider, i))
+ continue;
+ if (rate * i == parent_rate_saved) {
+ /*
+ * It's the most ideal case if the requested rate can be
+ * divided from parent clock without needing to change
+ * parent rate, so return the divider immediately.
+ */
+ *best_parent_rate = parent_rate_saved;
+ return i;
+ }
+ parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
+ MULT_ROUND_UP(rate, i));
+ now = parent_rate / i;
+ if (now <= rate && now > best) {
+ bestdiv = i;
+ best = now;
+ *best_parent_rate = parent_rate;
+ }
+ }
+
+ if (!bestdiv) {
+ bestdiv = _get_maxdiv(divider);
+ *best_parent_rate =
+ __clk_round_rate(__clk_get_parent(hw->clk), 1);
+ }
+
+ return bestdiv;
+}
+
+static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int div;
+ div = ti_clk_divider_bestdiv(hw, rate, prate);
+
+ return *prate / div;
+}
+
+static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int div, value;
+ unsigned long flags = 0;
+ u32 val;
+
+ div = parent_rate / rate;
+ value = _get_val(divider, div);
+
+ if (value > div_mask(divider))
+ value = div_mask(divider);
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+
+ if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
+ val = div_mask(divider) << (divider->shift + 16);
+ } else {
+ val = ti_clk_ll_ops->clk_readl(divider->reg);
+ val &= ~(div_mask(divider) << divider->shift);
+ }
+ val |= value << divider->shift;
+ ti_clk_ll_ops->clk_writel(val, divider->reg);
+
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+
+ return 0;
+}
+
+const struct clk_ops ti_clk_divider_ops = {
+ .recalc_rate = ti_clk_divider_recalc_rate,
+ .round_rate = ti_clk_divider_round_rate,
+ .set_rate = ti_clk_divider_set_rate,
+};
+
+static struct clk *_register_divider(struct device *dev, const char *name,
+ const char *parent_name,
+ unsigned long flags, void __iomem *reg,
+ u8 shift, u8 width, u8 clk_divider_flags,
+ const struct clk_div_table *table,
+ spinlock_t *lock)
+{
+ struct clk_divider *div;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
+ if (width + shift > 16) {
+ pr_warn("divider value exceeds LOWORD field\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* allocate the divider */
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div) {
+ pr_err("%s: could not allocate divider clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &ti_clk_divider_ops;
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_divider assignments */
+ div->reg = reg;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+ div->table = table;
+
+ /* register the clock */
+ clk = clk_register(dev, &div->hw);
+
+ if (IS_ERR(clk))
+ kfree(div);
+
+ return clk;
+}
+
+static struct clk_div_table
+__init *ti_clk_get_div_table(struct device_node *node)
+{
+ struct clk_div_table *table;
+ const __be32 *divspec;
+ u32 val;
+ u32 num_div;
+ u32 valid_div;
+ int i;
+
+ divspec = of_get_property(node, "ti,dividers", &num_div);
+
+ if (!divspec)
+ return NULL;
+
+ num_div /= 4;
+
+ valid_div = 0;
+
+ /* Determine required size for divider table */
+ for (i = 0; i < num_div; i++) {
+ of_property_read_u32_index(node, "ti,dividers", i, &val);
+ if (val)
+ valid_div++;
+ }
+
+ if (!valid_div) {
+ pr_err("no valid dividers for %s table\n", node->name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ table = kzalloc(sizeof(*table) * (valid_div + 1), GFP_KERNEL);
+
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ valid_div = 0;
+
+ for (i = 0; i < num_div; i++) {
+ of_property_read_u32_index(node, "ti,dividers", i, &val);
+ if (val) {
+ table[valid_div].div = val;
+ table[valid_div].val = i;
+ valid_div++;
+ }
+ }
+
+ return table;
+}
+
+static int _get_divider_width(struct device_node *node,
+ const struct clk_div_table *table,
+ u8 flags)
+{
+ u32 min_div;
+ u32 max_div;
+ u32 val = 0;
+ u32 div;
+
+ if (!table) {
+ /* Clk divider table not provided, determine min/max divs */
+ if (of_property_read_u32(node, "ti,min-div", &min_div))
+ min_div = 1;
+
+ if (of_property_read_u32(node, "ti,max-div", &max_div)) {
+ pr_err("no max-div for %s!\n", node->name);
+ return -EINVAL;
+ }
+
+ /* Determine bit width for the field */
+ if (flags & CLK_DIVIDER_ONE_BASED)
+ val = 1;
+
+ div = min_div;
+
+ while (div < max_div) {
+ if (flags & CLK_DIVIDER_POWER_OF_TWO)
+ div <<= 1;
+ else
+ div++;
+ val++;
+ }
+ } else {
+ div = 0;
+
+ while (table[div].div) {
+ val = table[div].val;
+ div++;
+ }
+ }
+
+ return fls(val);
+}
+
+static int __init ti_clk_divider_populate(struct device_node *node,
+ void __iomem **reg, const struct clk_div_table **table,
+ u32 *flags, u8 *div_flags, u8 *width, u8 *shift)
+{
+ u32 val;
+
+ *reg = ti_clk_get_reg_addr(node, 0);
+ if (!*reg)
+ return -EINVAL;
+
+ if (!of_property_read_u32(node, "ti,bit-shift", &val))
+ *shift = val;
+ else
+ *shift = 0;
+
+ *flags = 0;
+ *div_flags = 0;
+
+ if (of_property_read_bool(node, "ti,index-starts-at-one"))
+ *div_flags |= CLK_DIVIDER_ONE_BASED;
+
+ if (of_property_read_bool(node, "ti,index-power-of-two"))
+ *div_flags |= CLK_DIVIDER_POWER_OF_TWO;
+
+ if (of_property_read_bool(node, "ti,set-rate-parent"))
+ *flags |= CLK_SET_RATE_PARENT;
+
+ *table = ti_clk_get_div_table(node);
+
+ if (IS_ERR(*table))
+ return PTR_ERR(*table);
+
+ *width = _get_divider_width(node, *table, *div_flags);
+
+ return 0;
+}
+
+/**
+ * of_ti_divider_clk_setup - Setup function for simple div rate clock
+ * @node: device node for this clock
+ *
+ * Sets up a basic divider clock.
+ */
+static void __init of_ti_divider_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ const char *parent_name;
+ void __iomem *reg;
+ u8 clk_divider_flags = 0;
+ u8 width = 0;
+ u8 shift = 0;
+ const struct clk_div_table *table = NULL;
+ u32 flags = 0;
+
+ parent_name = of_clk_get_parent_name(node, 0);
+
+ if (ti_clk_divider_populate(node, &reg, &table, &flags,
+ &clk_divider_flags, &width, &shift))
+ goto cleanup;
+
+ clk = _register_divider(NULL, node->name, parent_name, flags, reg,
+ shift, width, clk_divider_flags, table, NULL);
+
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ of_ti_clk_autoidle_setup(node);
+ return;
+ }
+
+cleanup:
+ kfree(table);
+}
+CLK_OF_DECLARE(divider_clk, "ti,divider-clock", of_ti_divider_clk_setup);
+
+static void __init of_ti_composite_divider_clk_setup(struct device_node *node)
+{
+ struct clk_divider *div;
+ u32 val;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return;
+
+ if (ti_clk_divider_populate(node, &div->reg, &div->table, &val,
+ &div->flags, &div->width, &div->shift) < 0)
+ goto cleanup;
+
+ if (!ti_clk_add_component(node, &div->hw, CLK_COMPONENT_TYPE_DIVIDER))
+ return;
+
+cleanup:
+ kfree(div->table);
+ kfree(div);
+}
+CLK_OF_DECLARE(ti_composite_divider_clk, "ti,composite-divider-clock",
+ of_ti_composite_divider_clk_setup);
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
new file mode 100644
index 000000000000..7e498a44f97d
--- /dev/null
+++ b/drivers/clk/ti/dpll.c
@@ -0,0 +1,558 @@
+/*
+ * OMAP DPLL clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#define DPLL_HAS_AUTOIDLE 0x1
+
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+ defined(CONFIG_SOC_DRA7XX)
+static const struct clk_ops dpll_m4xen_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .recalc_rate = &omap4_dpll_regm4xen_recalc,
+ .round_rate = &omap4_dpll_regm4xen_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .get_parent = &omap2_init_dpll_parent,
+};
+#endif
+
+static const struct clk_ops dpll_core_ck_ops = {
+ .recalc_rate = &omap3_dpll_recalc,
+ .get_parent = &omap2_init_dpll_parent,
+};
+
+#ifdef CONFIG_ARCH_OMAP3
+static const struct clk_ops omap3_dpll_core_ck_ops = {
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+};
+#endif
+
+static const struct clk_ops dpll_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .recalc_rate = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .get_parent = &omap2_init_dpll_parent,
+};
+
+static const struct clk_ops dpll_no_gate_ck_ops = {
+ .recalc_rate = &omap3_dpll_recalc,
+ .get_parent = &omap2_init_dpll_parent,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+};
+
+#ifdef CONFIG_ARCH_OMAP3
+static const struct clk_ops omap3_dpll_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap3_dpll_recalc,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .round_rate = &omap2_dpll_round_rate,
+};
+
+static const struct clk_ops omap3_dpll_per_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap3_dpll_recalc,
+ .set_rate = &omap3_dpll4_set_rate,
+ .round_rate = &omap2_dpll_round_rate,
+};
+#endif
+
+static const struct clk_ops dpll_x2_ck_ops = {
+ .recalc_rate = &omap3_clkoutx2_recalc,
+};
+
+/**
+ * ti_clk_register_dpll - low level registration of a DPLL clock
+ * @hw: hardware clock definition for the clock
+ * @node: device node for the clock
+ *
+ * Finalizes DPLL registration process. In case a failure (clk-ref or
+ * clk-bypass is missing), the clock is added to retry list and
+ * the initialization is retried on later stage.
+ */
+static void __init ti_clk_register_dpll(struct clk_hw *hw,
+ struct device_node *node)
+{
+ struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
+ struct dpll_data *dd = clk_hw->dpll_data;
+ struct clk *clk;
+
+ dd->clk_ref = of_clk_get(node, 0);
+ dd->clk_bypass = of_clk_get(node, 1);
+
+ if (IS_ERR(dd->clk_ref) || IS_ERR(dd->clk_bypass)) {
+ pr_debug("clk-ref or clk-bypass missing for %s, retry later\n",
+ node->name);
+ if (!ti_clk_retry_init(node, hw, ti_clk_register_dpll))
+ return;
+
+ goto cleanup;
+ }
+
+ /* register the clock */
+ clk = clk_register(NULL, &clk_hw->hw);
+
+ if (!IS_ERR(clk)) {
+ omap2_init_clk_hw_omap_clocks(clk);
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ kfree(clk_hw->hw.init->parent_names);
+ kfree(clk_hw->hw.init);
+ return;
+ }
+
+cleanup:
+ kfree(clk_hw->dpll_data);
+ kfree(clk_hw->hw.init->parent_names);
+ kfree(clk_hw->hw.init);
+ kfree(clk_hw);
+}
+
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+ defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX)
+/**
+ * ti_clk_register_dpll_x2 - Registers a DPLLx2 clock
+ * @node: device node for this clock
+ * @ops: clk_ops for this clock
+ * @hw_ops: clk_hw_ops for this clock
+ *
+ * Initializes a DPLL x 2 clock from device tree data.
+ */
+static void ti_clk_register_dpll_x2(struct device_node *node,
+ const struct clk_ops *ops,
+ const struct clk_hw_omap_ops *hw_ops)
+{
+ struct clk *clk;
+ struct clk_init_data init = { NULL };
+ struct clk_hw_omap *clk_hw;
+ const char *name = node->name;
+ const char *parent_name;
+
+ parent_name = of_clk_get_parent_name(node, 0);
+ if (!parent_name) {
+ pr_err("%s must have parent\n", node->name);
+ return;
+ }
+
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ if (!clk_hw)
+ return;
+
+ clk_hw->ops = hw_ops;
+ clk_hw->hw.init = &init;
+
+ init.name = name;
+ init.ops = ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ /* register the clock */
+ clk = clk_register(NULL, &clk_hw->hw);
+
+ if (IS_ERR(clk)) {
+ kfree(clk_hw);
+ } else {
+ omap2_init_clk_hw_omap_clocks(clk);
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ }
+}
+#endif
+
+/**
+ * of_ti_dpll_setup - Setup function for OMAP DPLL clocks
+ * @node: device node containing the DPLL info
+ * @ops: ops for the DPLL
+ * @ddt: DPLL data template to use
+ * @init_flags: flags for controlling init types
+ *
+ * Initializes a DPLL clock from device tree data.
+ */
+static void __init of_ti_dpll_setup(struct device_node *node,
+ const struct clk_ops *ops,
+ const struct dpll_data *ddt,
+ u8 init_flags)
+{
+ struct clk_hw_omap *clk_hw = NULL;
+ struct clk_init_data *init = NULL;
+ const char **parent_names = NULL;
+ struct dpll_data *dd = NULL;
+ int i;
+ u8 dpll_mode = 0;
+
+ dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ init = kzalloc(sizeof(*init), GFP_KERNEL);
+ if (!dd || !clk_hw || !init)
+ goto cleanup;
+
+ memcpy(dd, ddt, sizeof(*dd));
+
+ clk_hw->dpll_data = dd;
+ clk_hw->ops = &clkhwops_omap3_dpll;
+ clk_hw->hw.init = init;
+ clk_hw->flags = MEMMAP_ADDRESSING;
+
+ init->name = node->name;
+ init->ops = ops;
+
+ init->num_parents = of_clk_get_parent_count(node);
+ if (init->num_parents < 1) {
+ pr_err("%s must have parent(s)\n", node->name);
+ goto cleanup;
+ }
+
+ parent_names = kzalloc(sizeof(char *) * init->num_parents, GFP_KERNEL);
+ if (!parent_names)
+ goto cleanup;
+
+ for (i = 0; i < init->num_parents; i++)
+ parent_names[i] = of_clk_get_parent_name(node, i);
+
+ init->parent_names = parent_names;
+
+ dd->control_reg = ti_clk_get_reg_addr(node, 0);
+ dd->idlest_reg = ti_clk_get_reg_addr(node, 1);
+ dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2);
+
+ if (!dd->control_reg || !dd->idlest_reg || !dd->mult_div1_reg)
+ goto cleanup;
+
+ if (init_flags & DPLL_HAS_AUTOIDLE) {
+ dd->autoidle_reg = ti_clk_get_reg_addr(node, 3);
+ if (!dd->autoidle_reg)
+ goto cleanup;
+ }
+
+ if (of_property_read_bool(node, "ti,low-power-stop"))
+ dpll_mode |= 1 << DPLL_LOW_POWER_STOP;
+
+ if (of_property_read_bool(node, "ti,low-power-bypass"))
+ dpll_mode |= 1 << DPLL_LOW_POWER_BYPASS;
+
+ if (of_property_read_bool(node, "ti,lock"))
+ dpll_mode |= 1 << DPLL_LOCKED;
+
+ if (dpll_mode)
+ dd->modes = dpll_mode;
+
+ ti_clk_register_dpll(&clk_hw->hw, node);
+ return;
+
+cleanup:
+ kfree(dd);
+ kfree(parent_names);
+ kfree(init);
+ kfree(clk_hw);
+}
+
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+ defined(CONFIG_SOC_DRA7XX)
+static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
+{
+ ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
+}
+CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
+ of_ti_omap4_dpll_x2_setup);
+#endif
+
+#ifdef CONFIG_SOC_AM33XX
+static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
+{
+ ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
+}
+CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock",
+ of_ti_am3_dpll_x2_setup);
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+static void __init of_ti_omap3_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .freqsel_mask = 0xf0,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
+ of_ti_omap3_dpll_setup);
+
+static void __init of_ti_omap3_core_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 16,
+ .div1_mask = 0x7f << 8,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .freqsel_mask = 0xf0,
+ };
+
+ of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap3_core_dpll_clock, "ti,omap3-dpll-core-clock",
+ of_ti_omap3_core_dpll_setup);
+
+static void __init of_ti_omap3_per_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1 << 1,
+ .enable_mask = 0x7 << 16,
+ .autoidle_mask = 0x7 << 3,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .freqsel_mask = 0xf00000,
+ .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap3_per_dpll_clock, "ti,omap3-dpll-per-clock",
+ of_ti_omap3_per_dpll_setup);
+
+static void __init of_ti_omap3_per_jtype_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1 << 1,
+ .enable_mask = 0x7 << 16,
+ .autoidle_mask = 0x7 << 3,
+ .mult_mask = 0xfff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 4095,
+ .max_divider = 128,
+ .min_divider = 1,
+ .sddiv_mask = 0xff << 24,
+ .dco_mask = 0xe << 20,
+ .flags = DPLL_J_TYPE,
+ .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap3_per_jtype_dpll_clock, "ti,omap3-dpll-per-j-type-clock",
+ of_ti_omap3_per_jtype_dpll_setup);
+#endif
+
+static void __init of_ti_omap4_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap4_dpll_clock, "ti,omap4-dpll-clock",
+ of_ti_omap4_dpll_setup);
+
+static void __init of_ti_omap4_core_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap4_core_dpll_clock, "ti,omap4-dpll-core-clock",
+ of_ti_omap4_core_dpll_setup);
+
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+ defined(CONFIG_SOC_DRA7XX)
+static void __init of_ti_omap4_m4xen_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .m4xen_mask = 0x800,
+ .lpmode_mask = 1 << 10,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap4_m4xen_dpll_clock, "ti,omap4-dpll-m4xen-clock",
+ of_ti_omap4_m4xen_dpll_setup);
+
+static void __init of_ti_omap4_jtype_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0xfff << 8,
+ .div1_mask = 0xff,
+ .max_multiplier = 4095,
+ .max_divider = 256,
+ .min_divider = 1,
+ .sddiv_mask = 0xff << 24,
+ .flags = DPLL_J_TYPE,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap4_jtype_dpll_clock, "ti,omap4-dpll-j-type-clock",
+ of_ti_omap4_jtype_dpll_setup);
+#endif
+
+static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd, 0);
+}
+CLK_OF_DECLARE(ti_am3_no_gate_dpll_clock, "ti,am3-dpll-no-gate-clock",
+ of_ti_am3_no_gate_dpll_setup);
+
+static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 4095,
+ .max_divider = 256,
+ .min_divider = 2,
+ .flags = DPLL_J_TYPE,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_ck_ops, &dd, 0);
+}
+CLK_OF_DECLARE(ti_am3_jtype_dpll_clock, "ti,am3-dpll-j-type-clock",
+ of_ti_am3_jtype_dpll_setup);
+
+static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .flags = DPLL_J_TYPE,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd, 0);
+}
+CLK_OF_DECLARE(ti_am3_no_gate_jtype_dpll_clock,
+ "ti,am3-dpll-no-gate-j-type-clock",
+ of_ti_am3_no_gate_jtype_dpll_setup);
+
+static void __init of_ti_am3_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_ck_ops, &dd, 0);
+}
+CLK_OF_DECLARE(ti_am3_dpll_clock, "ti,am3-dpll-clock", of_ti_am3_dpll_setup);
+
+static void __init of_ti_am3_core_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd, 0);
+}
+CLK_OF_DECLARE(ti_am3_core_dpll_clock, "ti,am3-dpll-core-clock",
+ of_ti_am3_core_dpll_setup);
diff --git a/drivers/clk/ti/fixed-factor.c b/drivers/clk/ti/fixed-factor.c
new file mode 100644
index 000000000000..c2c8a287408c
--- /dev/null
+++ b/drivers/clk/ti/fixed-factor.c
@@ -0,0 +1,66 @@
+/*
+ * TI Fixed Factor Clock
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+/**
+ * of_ti_fixed_factor_clk_setup - Setup function for TI fixed factor clock
+ * @node: device node for this clock
+ *
+ * Sets up a simple fixed factor clock based on device tree info.
+ */
+static void __init of_ti_fixed_factor_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ const char *clk_name = node->name;
+ const char *parent_name;
+ u32 div, mult;
+ u32 flags = 0;
+
+ if (of_property_read_u32(node, "ti,clock-div", &div)) {
+ pr_err("%s must have a clock-div property\n", node->name);
+ return;
+ }
+
+ if (of_property_read_u32(node, "ti,clock-mult", &mult)) {
+ pr_err("%s must have a clock-mult property\n", node->name);
+ return;
+ }
+
+ if (of_property_read_bool(node, "ti,set-rate-parent"))
+ flags |= CLK_SET_RATE_PARENT;
+
+ parent_name = of_clk_get_parent_name(node, 0);
+
+ clk = clk_register_fixed_factor(NULL, clk_name, parent_name, flags,
+ mult, div);
+
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ of_ti_clk_autoidle_setup(node);
+ }
+}
+CLK_OF_DECLARE(ti_fixed_factor_clk, "ti,fixed-factor-clock",
+ of_ti_fixed_factor_clk_setup);
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
new file mode 100644
index 000000000000..3e2999d11d15
--- /dev/null
+++ b/drivers/clk/ti/gate.c
@@ -0,0 +1,249 @@
+/*
+ * OMAP gate clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk);
+
+static const struct clk_ops omap_gate_clkdm_clk_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_clkops_enable_clkdm,
+ .disable = &omap2_clkops_disable_clkdm,
+};
+
+static const struct clk_ops omap_gate_clk_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+};
+
+static const struct clk_ops omap_gate_clk_hsdiv_restore_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap36xx_gate_clk_enable_with_hsdiv_restore,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+};
+
+/**
+ * omap36xx_gate_clk_enable_with_hsdiv_restore - enable clocks suffering
+ * from HSDivider PWRDN problem Implements Errata ID: i556.
+ * @clk: DPLL output struct clk
+ *
+ * 3630 only: dpll3_m3_ck, dpll4_m2_ck, dpll4_m3_ck, dpll4_m4_ck,
+ * dpll4_m5_ck & dpll4_m6_ck dividers gets loaded with reset
+ * valueafter their respective PWRDN bits are set. Any dummy write
+ * (Any other value different from the Read value) to the
+ * corresponding CM_CLKSEL register will refresh the dividers.
+ */
+static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
+{
+ struct clk_divider *parent;
+ struct clk_hw *parent_hw;
+ u32 dummy_v, orig_v;
+ int ret;
+
+ /* Clear PWRDN bit of HSDIVIDER */
+ ret = omap2_dflt_clk_enable(clk);
+
+ /* Parent is the x2 node, get parent of parent for the m2 div */
+ parent_hw = __clk_get_hw(__clk_get_parent(__clk_get_parent(clk->clk)));
+ parent = to_clk_divider(parent_hw);
+
+ /* Restore the dividers */
+ if (!ret) {
+ orig_v = ti_clk_ll_ops->clk_readl(parent->reg);
+ dummy_v = orig_v;
+
+ /* Write any other value different from the Read value */
+ dummy_v ^= (1 << parent->shift);
+ ti_clk_ll_ops->clk_writel(dummy_v, parent->reg);
+
+ /* Write the original divider */
+ ti_clk_ll_ops->clk_writel(orig_v, parent->reg);
+ }
+
+ return ret;
+}
+
+static void __init _of_ti_gate_clk_setup(struct device_node *node,
+ const struct clk_ops *ops,
+ const struct clk_hw_omap_ops *hw_ops)
+{
+ struct clk *clk;
+ struct clk_init_data init = { NULL };
+ struct clk_hw_omap *clk_hw;
+ const char *clk_name = node->name;
+ const char *parent_name;
+ u32 val;
+
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ if (!clk_hw)
+ return;
+
+ clk_hw->hw.init = &init;
+
+ init.name = clk_name;
+ init.ops = ops;
+
+ if (ops != &omap_gate_clkdm_clk_ops) {
+ clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0);
+ if (!clk_hw->enable_reg)
+ goto cleanup;
+
+ if (!of_property_read_u32(node, "ti,bit-shift", &val))
+ clk_hw->enable_bit = val;
+ }
+
+ clk_hw->ops = hw_ops;
+
+ clk_hw->flags = MEMMAP_ADDRESSING;
+
+ if (of_clk_get_parent_count(node) != 1) {
+ pr_err("%s must have 1 parent\n", clk_name);
+ goto cleanup;
+ }
+
+ parent_name = of_clk_get_parent_name(node, 0);
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ if (of_property_read_bool(node, "ti,set-rate-parent"))
+ init.flags |= CLK_SET_RATE_PARENT;
+
+ if (of_property_read_bool(node, "ti,set-bit-to-disable"))
+ clk_hw->flags |= INVERT_ENABLE;
+
+ clk = clk_register(NULL, &clk_hw->hw);
+
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ return;
+ }
+
+cleanup:
+ kfree(clk_hw);
+}
+
+static void __init
+_of_ti_composite_gate_clk_setup(struct device_node *node,
+ const struct clk_hw_omap_ops *hw_ops)
+{
+ struct clk_hw_omap *gate;
+ u32 val = 0;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return;
+
+ gate->enable_reg = ti_clk_get_reg_addr(node, 0);
+ if (!gate->enable_reg)
+ goto cleanup;
+
+ of_property_read_u32(node, "ti,bit-shift", &val);
+
+ gate->enable_bit = val;
+ gate->ops = hw_ops;
+ gate->flags = MEMMAP_ADDRESSING;
+
+ if (!ti_clk_add_component(node, &gate->hw, CLK_COMPONENT_TYPE_GATE))
+ return;
+
+cleanup:
+ kfree(gate);
+}
+
+static void __init
+of_ti_composite_no_wait_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_composite_gate_clk_setup(node, NULL);
+}
+CLK_OF_DECLARE(ti_composite_no_wait_gate_clk, "ti,composite-no-wait-gate-clock",
+ of_ti_composite_no_wait_gate_clk_setup);
+
+#ifdef CONFIG_ARCH_OMAP3
+static void __init of_ti_composite_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_composite_gate_clk_setup(node, &clkhwops_iclk_wait);
+}
+CLK_OF_DECLARE(ti_composite_interface_clk, "ti,composite-interface-clock",
+ of_ti_composite_interface_clk_setup);
+#endif
+
+static void __init of_ti_composite_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_composite_gate_clk_setup(node, &clkhwops_wait);
+}
+CLK_OF_DECLARE(ti_composite_gate_clk, "ti,composite-gate-clock",
+ of_ti_composite_gate_clk_setup);
+
+
+static void __init of_ti_clkdm_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_gate_clk_setup(node, &omap_gate_clkdm_clk_ops, NULL);
+}
+CLK_OF_DECLARE(ti_clkdm_gate_clk, "ti,clkdm-gate-clock",
+ of_ti_clkdm_gate_clk_setup);
+
+static void __init of_ti_hsdiv_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_gate_clk_setup(node, &omap_gate_clk_hsdiv_restore_ops,
+ &clkhwops_wait);
+}
+CLK_OF_DECLARE(ti_hsdiv_gate_clk, "ti,hsdiv-gate-clock",
+ of_ti_hsdiv_gate_clk_setup);
+
+static void __init of_ti_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_gate_clk_setup(node, &omap_gate_clk_ops, NULL);
+}
+CLK_OF_DECLARE(ti_gate_clk, "ti,gate-clock", of_ti_gate_clk_setup)
+
+static void __init of_ti_wait_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_gate_clk_setup(node, &omap_gate_clk_ops, &clkhwops_wait);
+}
+CLK_OF_DECLARE(ti_wait_gate_clk, "ti,wait-gate-clock",
+ of_ti_wait_gate_clk_setup);
+
+#ifdef CONFIG_ARCH_OMAP3
+static void __init of_ti_am35xx_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_gate_clk_setup(node, &omap_gate_clk_ops,
+ &clkhwops_am35xx_ipss_module_wait);
+}
+CLK_OF_DECLARE(ti_am35xx_gate_clk, "ti,am35xx-gate-clock",
+ of_ti_am35xx_gate_clk_setup);
+
+static void __init of_ti_dss_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_gate_clk_setup(node, &omap_gate_clk_ops,
+ &clkhwops_omap3430es2_dss_usbhost_wait);
+}
+CLK_OF_DECLARE(ti_dss_gate_clk, "ti,dss-gate-clock",
+ of_ti_dss_gate_clk_setup);
+#endif
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
new file mode 100644
index 000000000000..320a2b168bb2
--- /dev/null
+++ b/drivers/clk/ti/interface.c
@@ -0,0 +1,125 @@
+/*
+ * OMAP interface clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static const struct clk_ops ti_interface_clk_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+};
+
+static void __init _of_ti_interface_clk_setup(struct device_node *node,
+ const struct clk_hw_omap_ops *ops)
+{
+ struct clk *clk;
+ struct clk_init_data init = { NULL };
+ struct clk_hw_omap *clk_hw;
+ const char *parent_name;
+ u32 val;
+
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ if (!clk_hw)
+ return;
+
+ clk_hw->hw.init = &init;
+ clk_hw->ops = ops;
+ clk_hw->flags = MEMMAP_ADDRESSING;
+
+ clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0);
+ if (!clk_hw->enable_reg)
+ goto cleanup;
+
+ if (!of_property_read_u32(node, "ti,bit-shift", &val))
+ clk_hw->enable_bit = val;
+
+ init.name = node->name;
+ init.ops = &ti_interface_clk_ops;
+ init.flags = 0;
+
+ parent_name = of_clk_get_parent_name(node, 0);
+ if (!parent_name) {
+ pr_err("%s must have a parent\n", node->name);
+ goto cleanup;
+ }
+
+ init.num_parents = 1;
+ init.parent_names = &parent_name;
+
+ clk = clk_register(NULL, &clk_hw->hw);
+
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ omap2_init_clk_hw_omap_clocks(clk);
+ return;
+ }
+
+cleanup:
+ kfree(clk_hw);
+}
+
+static void __init of_ti_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node, &clkhwops_iclk_wait);
+}
+CLK_OF_DECLARE(ti_interface_clk, "ti,omap3-interface-clock",
+ of_ti_interface_clk_setup);
+
+static void __init of_ti_no_wait_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node, &clkhwops_iclk);
+}
+CLK_OF_DECLARE(ti_no_wait_interface_clk, "ti,omap3-no-wait-interface-clock",
+ of_ti_no_wait_interface_clk_setup);
+
+static void __init of_ti_hsotgusb_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node,
+ &clkhwops_omap3430es2_iclk_hsotgusb_wait);
+}
+CLK_OF_DECLARE(ti_hsotgusb_interface_clk, "ti,omap3-hsotgusb-interface-clock",
+ of_ti_hsotgusb_interface_clk_setup);
+
+static void __init of_ti_dss_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node,
+ &clkhwops_omap3430es2_iclk_dss_usbhost_wait);
+}
+CLK_OF_DECLARE(ti_dss_interface_clk, "ti,omap3-dss-interface-clock",
+ of_ti_dss_interface_clk_setup);
+
+static void __init of_ti_ssi_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node, &clkhwops_omap3430es2_iclk_ssi_wait);
+}
+CLK_OF_DECLARE(ti_ssi_interface_clk, "ti,omap3-ssi-interface-clock",
+ of_ti_ssi_interface_clk_setup);
+
+static void __init of_ti_am35xx_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node, &clkhwops_am35xx_ipss_wait);
+}
+CLK_OF_DECLARE(ti_am35xx_interface_clk, "ti,am35xx-interface-clock",
+ of_ti_am35xx_interface_clk_setup);
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
new file mode 100644
index 000000000000..0197a478720c
--- /dev/null
+++ b/drivers/clk/ti/mux.c
@@ -0,0 +1,246 @@
+/*
+ * TI Multiplexer Clock
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
+
+static u8 ti_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ int num_parents = __clk_get_num_parents(hw->clk);
+ u32 val;
+
+ /*
+ * FIXME need a mux-specific flag to determine if val is bitwise or
+ * numeric. e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges
+ * from 0x1 to 0x7 (index starts at one)
+ * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
+ * val = 0x4 really means "bit 2, index starts at bit 0"
+ */
+ val = ti_clk_ll_ops->clk_readl(mux->reg) >> mux->shift;
+ val &= mux->mask;
+
+ if (mux->table) {
+ int i;
+
+ for (i = 0; i < num_parents; i++)
+ if (mux->table[i] == val)
+ return i;
+ return -EINVAL;
+ }
+
+ if (val && (mux->flags & CLK_MUX_INDEX_BIT))
+ val = ffs(val) - 1;
+
+ if (val && (mux->flags & CLK_MUX_INDEX_ONE))
+ val--;
+
+ if (val >= num_parents)
+ return -EINVAL;
+
+ return val;
+}
+
+static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val;
+ unsigned long flags = 0;
+
+ if (mux->table) {
+ index = mux->table[index];
+ } else {
+ if (mux->flags & CLK_MUX_INDEX_BIT)
+ index = (1 << ffs(index));
+
+ if (mux->flags & CLK_MUX_INDEX_ONE)
+ index++;
+ }
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+
+ if (mux->flags & CLK_MUX_HIWORD_MASK) {
+ val = mux->mask << (mux->shift + 16);
+ } else {
+ val = ti_clk_ll_ops->clk_readl(mux->reg);
+ val &= ~(mux->mask << mux->shift);
+ }
+ val |= index << mux->shift;
+ ti_clk_ll_ops->clk_writel(val, mux->reg);
+
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+
+ return 0;
+}
+
+const struct clk_ops ti_clk_mux_ops = {
+ .get_parent = ti_clk_mux_get_parent,
+ .set_parent = ti_clk_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+static struct clk *_register_mux(struct device *dev, const char *name,
+ const char **parent_names, u8 num_parents,
+ unsigned long flags, void __iomem *reg,
+ u8 shift, u32 mask, u8 clk_mux_flags,
+ u32 *table, spinlock_t *lock)
+{
+ struct clk_mux *mux;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ /* allocate the mux */
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux) {
+ pr_err("%s: could not allocate mux clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &ti_clk_mux_ops;
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ /* struct clk_mux assignments */
+ mux->reg = reg;
+ mux->shift = shift;
+ mux->mask = mask;
+ mux->flags = clk_mux_flags;
+ mux->lock = lock;
+ mux->table = table;
+ mux->hw.init = &init;
+
+ clk = clk_register(dev, &mux->hw);
+
+ if (IS_ERR(clk))
+ kfree(mux);
+
+ return clk;
+}
+
+/**
+ * of_mux_clk_setup - Setup function for simple mux rate clock
+ * @node: DT node for the clock
+ *
+ * Sets up a basic clock multiplexer.
+ */
+static void of_mux_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ void __iomem *reg;
+ int num_parents;
+ const char **parent_names;
+ int i;
+ u8 clk_mux_flags = 0;
+ u32 mask = 0;
+ u32 shift = 0;
+ u32 flags = 0;
+
+ num_parents = of_clk_get_parent_count(node);
+ if (num_parents < 2) {
+ pr_err("mux-clock %s must have parents\n", node->name);
+ return;
+ }
+ parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL);
+ if (!parent_names)
+ goto cleanup;
+
+ for (i = 0; i < num_parents; i++)
+ parent_names[i] = of_clk_get_parent_name(node, i);
+
+ reg = ti_clk_get_reg_addr(node, 0);
+
+ if (!reg)
+ goto cleanup;
+
+ of_property_read_u32(node, "ti,bit-shift", &shift);
+
+ if (of_property_read_bool(node, "ti,index-starts-at-one"))
+ clk_mux_flags |= CLK_MUX_INDEX_ONE;
+
+ if (of_property_read_bool(node, "ti,set-rate-parent"))
+ flags |= CLK_SET_RATE_PARENT;
+
+ /* Generate bit-mask based on parent info */
+ mask = num_parents;
+ if (!(clk_mux_flags & CLK_MUX_INDEX_ONE))
+ mask--;
+
+ mask = (1 << fls(mask)) - 1;
+
+ clk = _register_mux(NULL, node->name, parent_names, num_parents, flags,
+ reg, shift, mask, clk_mux_flags, NULL, NULL);
+
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+cleanup:
+ kfree(parent_names);
+}
+CLK_OF_DECLARE(mux_clk, "ti,mux-clock", of_mux_clk_setup);
+
+static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
+{
+ struct clk_mux *mux;
+ int num_parents;
+ u32 val;
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return;
+
+ mux->reg = ti_clk_get_reg_addr(node, 0);
+
+ if (!mux->reg)
+ goto cleanup;
+
+ if (!of_property_read_u32(node, "ti,bit-shift", &val))
+ mux->shift = val;
+
+ if (of_property_read_bool(node, "ti,index-starts-at-one"))
+ mux->flags |= CLK_MUX_INDEX_ONE;
+
+ num_parents = of_clk_get_parent_count(node);
+
+ if (num_parents < 2) {
+ pr_err("%s must have parents\n", node->name);
+ goto cleanup;
+ }
+
+ mux->mask = num_parents - 1;
+ mux->mask = (1 << fls(mux->mask)) - 1;
+
+ if (!ti_clk_add_component(node, &mux->hw, CLK_COMPONENT_TYPE_MUX))
+ return;
+
+cleanup:
+ kfree(mux);
+}
+CLK_OF_DECLARE(ti_composite_mux_clk_setup, "ti,composite-mux-clock",
+ of_ti_composite_mux_clk_setup);
diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c
index 293a28854417..e2d63bc47436 100644
--- a/drivers/clk/ux500/clk-prcmu.c
+++ b/drivers/clk/ux500/clk-prcmu.c
@@ -36,7 +36,7 @@ static int clk_prcmu_prepare(struct clk_hw *hw)
if (!ret)
clk->is_prepared = 1;
- return ret;;
+ return ret;
}
static void clk_prcmu_unprepare(struct clk_hw *hw)
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index f5e4c21b301f..8cbfcf88fae3 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -119,6 +119,7 @@ static const struct clk_ops icst_ops = {
struct clk *icst_clk_register(struct device *dev,
const struct clk_icst_desc *desc,
+ const char *name,
void __iomem *base)
{
struct clk *clk;
@@ -130,7 +131,7 @@ struct clk *icst_clk_register(struct device *dev,
pr_err("could not allocate ICST clock!\n");
return ERR_PTR(-ENOMEM);
}
- init.name = "icst";
+ init.name = name;
init.ops = &icst_ops;
init.flags = CLK_IS_ROOT;
init.parent_names = NULL;
diff --git a/drivers/clk/versatile/clk-icst.h b/drivers/clk/versatile/clk-icst.h
index dad51b6ffd00..be99dd0da785 100644
--- a/drivers/clk/versatile/clk-icst.h
+++ b/drivers/clk/versatile/clk-icst.h
@@ -15,4 +15,5 @@ struct clk_icst_desc {
struct clk *icst_clk_register(struct device *dev,
const struct clk_icst_desc *desc,
+ const char *name,
void __iomem *base);
diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c
index 369139af2a3b..844f8d711a12 100644
--- a/drivers/clk/versatile/clk-impd1.c
+++ b/drivers/clk/versatile/clk-impd1.c
@@ -1,6 +1,6 @@
/*
* Clock driver for the ARM Integrator/IM-PD1 board
- * Copyright (C) 2012 Linus Walleij
+ * Copyright (C) 2012-2013 Linus Walleij
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -18,20 +18,28 @@
#include "clk-icst.h"
struct impd1_clk {
- struct clk *vcoclk;
+ char *vco1name;
+ struct clk *vco1clk;
+ char *vco2name;
+ struct clk *vco2clk;
+ struct clk *mmciclk;
+ char *uartname;
struct clk *uartclk;
- struct clk_lookup *clks[3];
+ char *spiname;
+ struct clk *spiclk;
+ char *scname;
+ struct clk *scclk;
+ struct clk_lookup *clks[6];
};
+/* One entry for each connected IM-PD1 LM */
static struct impd1_clk impd1_clks[4];
/*
- * There are two VCO's on the IM-PD1 but only one is used by the
- * kernel, that is why we are only implementing the control of
- * IMPD1_OSC1 here.
+ * There are two VCO's on the IM-PD1
*/
-static const struct icst_params impd1_vco_params = {
+static const struct icst_params impd1_vco1_params = {
.ref = 24000000, /* 24 MHz */
.vco_max = ICST525_VCO_MAX_3V,
.vco_min = ICST525_VCO_MIN,
@@ -44,11 +52,29 @@ static const struct icst_params impd1_vco_params = {
};
static const struct clk_icst_desc impd1_icst1_desc = {
- .params = &impd1_vco_params,
+ .params = &impd1_vco1_params,
.vco_offset = IMPD1_OSC1,
.lock_offset = IMPD1_LOCK,
};
+static const struct icst_params impd1_vco2_params = {
+ .ref = 24000000, /* 24 MHz */
+ .vco_max = ICST525_VCO_MAX_3V,
+ .vco_min = ICST525_VCO_MIN,
+ .vd_min = 12,
+ .vd_max = 519,
+ .rd_min = 3,
+ .rd_max = 120,
+ .s2div = icst525_s2div,
+ .idx2s = icst525_idx2s,
+};
+
+static const struct clk_icst_desc impd1_icst2_desc = {
+ .params = &impd1_vco2_params,
+ .vco_offset = IMPD1_OSC2,
+ .lock_offset = IMPD1_LOCK,
+};
+
/**
* integrator_impd1_clk_init() - set up the integrator clock tree
* @base: base address of the logic module (LM)
@@ -66,16 +92,39 @@ void integrator_impd1_clk_init(void __iomem *base, unsigned int id)
}
imc = &impd1_clks[id];
- clk = icst_clk_register(NULL, &impd1_icst1_desc, base);
- imc->vcoclk = clk;
+ imc->vco1name = kasprintf(GFP_KERNEL, "lm%x-vco1", id);
+ clk = icst_clk_register(NULL, &impd1_icst1_desc, imc->vco1name, base);
+ imc->vco1clk = clk;
imc->clks[0] = clkdev_alloc(clk, NULL, "lm%x:01000", id);
- /* UART reference clock */
- clk = clk_register_fixed_rate(NULL, "uartclk", NULL, CLK_IS_ROOT,
- 14745600);
+ /* VCO2 is also called "CLK2" */
+ imc->vco2name = kasprintf(GFP_KERNEL, "lm%x-vco2", id);
+ clk = icst_clk_register(NULL, &impd1_icst2_desc, imc->vco2name, base);
+ imc->vco2clk = clk;
+
+ /* MMCI uses CLK2 right off */
+ imc->clks[1] = clkdev_alloc(clk, NULL, "lm%x:00700", id);
+
+ /* UART reference clock divides CLK2 by a fixed factor 4 */
+ imc->uartname = kasprintf(GFP_KERNEL, "lm%x-uartclk", id);
+ clk = clk_register_fixed_factor(NULL, imc->uartname, imc->vco2name,
+ CLK_IGNORE_UNUSED, 1, 4);
imc->uartclk = clk;
- imc->clks[1] = clkdev_alloc(clk, NULL, "lm%x:00100", id);
- imc->clks[2] = clkdev_alloc(clk, NULL, "lm%x:00200", id);
+ imc->clks[2] = clkdev_alloc(clk, NULL, "lm%x:00100", id);
+ imc->clks[3] = clkdev_alloc(clk, NULL, "lm%x:00200", id);
+
+ /* SPI PL022 clock divides CLK2 by a fixed factor 64 */
+ imc->spiname = kasprintf(GFP_KERNEL, "lm%x-spiclk", id);
+ clk = clk_register_fixed_factor(NULL, imc->spiname, imc->vco2name,
+ CLK_IGNORE_UNUSED, 1, 64);
+ imc->clks[4] = clkdev_alloc(clk, NULL, "lm%x:00300", id);
+
+ /* Smart Card clock divides CLK2 by a fixed factor 4 */
+ imc->scname = kasprintf(GFP_KERNEL, "lm%x-scclk", id);
+ clk = clk_register_fixed_factor(NULL, imc->scname, imc->vco2name,
+ CLK_IGNORE_UNUSED, 1, 4);
+ imc->scclk = clk;
+ imc->clks[5] = clkdev_alloc(clk, NULL, "lm%x:00600", id);
for (i = 0; i < ARRAY_SIZE(imc->clks); i++)
clkdev_add(imc->clks[i]);
@@ -92,6 +141,13 @@ void integrator_impd1_clk_exit(unsigned int id)
for (i = 0; i < ARRAY_SIZE(imc->clks); i++)
clkdev_drop(imc->clks[i]);
+ clk_unregister(imc->spiclk);
clk_unregister(imc->uartclk);
- clk_unregister(imc->vcoclk);
+ clk_unregister(imc->vco2clk);
+ clk_unregister(imc->vco1clk);
+ kfree(imc->scname);
+ kfree(imc->spiname);
+ kfree(imc->uartname);
+ kfree(imc->vco2name);
+ kfree(imc->vco1name);
}
diff --git a/drivers/clk/versatile/clk-integrator.c b/drivers/clk/versatile/clk-integrator.c
index 08593b4ee2c9..bda8967e09c2 100644
--- a/drivers/clk/versatile/clk-integrator.c
+++ b/drivers/clk/versatile/clk-integrator.c
@@ -78,7 +78,7 @@ void __init integrator_clk_init(bool is_cp)
clk_register_clkdev(clk, NULL, "sp804");
/* ICST VCO clock used on the Integrator/CP CLCD */
- clk = icst_clk_register(NULL, &cp_icst_desc,
+ clk = icst_clk_register(NULL, &cp_icst_desc, "icst",
__io_address(INTEGRATOR_HDR_BASE));
clk_register_clkdev(clk, NULL, "clcd");
}
diff --git a/drivers/clk/versatile/clk-realview.c b/drivers/clk/versatile/clk-realview.c
index cda07e70a408..747e7b31117c 100644
--- a/drivers/clk/versatile/clk-realview.c
+++ b/drivers/clk/versatile/clk-realview.c
@@ -84,9 +84,11 @@ void __init realview_clk_init(void __iomem *sysbase, bool is_pb1176)
/* ICST VCO clock */
if (is_pb1176)
- clk = icst_clk_register(NULL, &realview_osc0_desc, sysbase);
+ clk = icst_clk_register(NULL, &realview_osc0_desc,
+ "osc0", sysbase);
else
- clk = icst_clk_register(NULL, &realview_osc4_desc, sysbase);
+ clk = icst_clk_register(NULL, &realview_osc4_desc,
+ "osc4", sysbase);
clk_register_clkdev(clk, NULL, "dev:clcd");
clk_register_clkdev(clk, NULL, "issp:clcd");
diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
index bf9b15a585e1..c6e86a9a2aa3 100644
--- a/drivers/clk/versatile/clk-sp810.c
+++ b/drivers/clk/versatile/clk-sp810.c
@@ -123,7 +123,7 @@ static const struct clk_ops clk_sp810_timerclken_ops = {
.set_parent = clk_sp810_timerclken_set_parent,
};
-struct clk *clk_sp810_timerclken_of_get(struct of_phandle_args *clkspec,
+static struct clk *clk_sp810_timerclken_of_get(struct of_phandle_args *clkspec,
void *data)
{
struct clk_sp810 *sp810 = data;
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 10772aa72e4e..09dd0173ea0a 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -102,9 +102,10 @@ static const char *swdt_ext_clk_input_names[] __initdata = {"swdt_ext_clk"};
static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
const char *clk_name, void __iomem *fclk_ctrl_reg,
- const char **parents)
+ const char **parents, int enable)
{
struct clk *clk;
+ u32 enable_reg;
char *mux_name;
char *div0_name;
char *div1_name;
@@ -147,6 +148,12 @@ static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
clks[fclk] = clk_register_gate(NULL, clk_name,
div1_name, CLK_SET_RATE_PARENT, fclk_gate_reg,
0, CLK_GATE_SET_TO_DISABLE, fclk_gate_lock);
+ enable_reg = readl(fclk_gate_reg) & 1;
+ if (enable && !enable_reg) {
+ if (clk_prepare_enable(clks[fclk]))
+ pr_warn("%s: FCLK%u enable failed\n", __func__,
+ fclk - fclk0);
+ }
kfree(mux_name);
kfree(div0_name);
kfree(div1_name);
@@ -213,6 +220,7 @@ static void __init zynq_clk_setup(struct device_node *np)
int ret;
struct clk *clk;
char *clk_name;
+ unsigned int fclk_enable = 0;
const char *clk_output_name[clk_max];
const char *cpu_parents[4];
const char *periph_parents[4];
@@ -238,6 +246,8 @@ static void __init zynq_clk_setup(struct device_node *np)
periph_parents[2] = clk_output_name[armpll];
periph_parents[3] = clk_output_name[ddrpll];
+ of_property_read_u32(np, "fclk-enable", &fclk_enable);
+
/* ps_clk */
ret = of_property_read_u32(np, "ps-clk-frequency", &tmp);
if (ret) {
@@ -340,10 +350,12 @@ static void __init zynq_clk_setup(struct device_node *np)
clk_prepare_enable(clks[dci]);
/* Peripheral clocks */
- for (i = fclk0; i <= fclk3; i++)
+ for (i = fclk0; i <= fclk3; i++) {
+ int enable = !!(fclk_enable & BIT(i - fclk0));
zynq_clk_register_fclk(i, clk_output_name[i],
SLCR_FPGA0_CLK_CTRL + 0x10 * (i - fclk0),
- periph_parents);
+ periph_parents, enable);
+ }
zynq_clk_register_periph_clk(lqspi, 0, clk_output_name[lqspi], NULL,
SLCR_LQSPI_CLK_CTRL, periph_parents, 0);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 5c07a56962db..cd6950fd8caf 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -37,6 +37,10 @@ config SUN4I_TIMER
select CLKSRC_MMIO
bool
+config SUN5I_HSTIMER
+ select CLKSRC_MMIO
+ bool
+
config VT8500_TIMER
bool
@@ -75,6 +79,7 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
config CLKSRC_EFM32
bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
+ select CLKSRC_MMIO
default ARCH_EFM32
help
Support to use the timers of EFM32 SoCs as clock source and clock
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 33621efb9148..c7ca50a9c232 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -22,10 +22,11 @@ obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o
obj-$(CONFIG_ARCH_MXS) += mxs_timer.o
obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o
obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
+obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o
obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o
-obj-$(CONFIG_ARCH_BCM) += bcm_kona_timer.o
+obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm_kona_timer.o
obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o
obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o
obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index c639b1a9e996..0fc31d029e52 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -202,7 +202,7 @@ static struct clocksource gt_clocksource = {
};
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
-static u32 notrace gt_sched_clock_read(void)
+static u64 notrace gt_sched_clock_read(void)
{
return gt_counter_read();
}
@@ -217,7 +217,7 @@ static void __init gt_clocksource_init(void)
writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
- setup_sched_clock(gt_sched_clock_read, 32, gt_clk_rate);
+ sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate);
#endif
clocksource_register_hz(&gt_clocksource, gt_clk_rate);
}
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index 0d7d8c3ed6b2..0595dc6c453e 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -17,6 +17,7 @@
#include <linux/jiffies.h>
#include <linux/clockchips.h>
#include <linux/types.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <asm/mach/time.h>
@@ -98,30 +99,6 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
return;
}
-static const struct of_device_id bcm_timer_ids[] __initconst = {
- {.compatible = "brcm,kona-timer"},
- {.compatible = "bcm,kona-timer"}, /* deprecated name */
- {},
-};
-
-static void __init kona_timers_init(struct device_node *node)
-{
- u32 freq;
-
- if (!of_property_read_u32(node, "clock-frequency", &freq))
- arch_timer_rate = freq;
- else
- panic("clock-frequency not set in the .dts file");
-
- /* Setup IRQ numbers */
- timers.tmr_irq = irq_of_parse_and_map(node, 0);
-
- /* Setup IO addresses */
- timers.tmr_regs = of_iomap(node, 0);
-
- kona_timer_disable_and_clear(timers.tmr_regs);
-}
-
static int kona_timer_set_next_event(unsigned long clc,
struct clock_event_device *unused)
{
@@ -196,7 +173,34 @@ static struct irqaction kona_timer_irq = {
static void __init kona_timer_init(struct device_node *node)
{
- kona_timers_init(node);
+ u32 freq;
+ struct clk *external_clk;
+
+ if (!of_device_is_available(node)) {
+ pr_info("Kona Timer v1 marked as disabled in device tree\n");
+ return;
+ }
+
+ external_clk = of_clk_get_by_name(node, NULL);
+
+ if (!IS_ERR(external_clk)) {
+ arch_timer_rate = clk_get_rate(external_clk);
+ clk_prepare_enable(external_clk);
+ } else if (!of_property_read_u32(node, "clock-frequency", &freq)) {
+ arch_timer_rate = freq;
+ } else {
+ pr_err("Kona Timer v1 unable to determine clock-frequency");
+ return;
+ }
+
+ /* Setup IRQ numbers */
+ timers.tmr_irq = irq_of_parse_and_map(node, 0);
+
+ /* Setup IO addresses */
+ timers.tmr_regs = of_iomap(node, 0);
+
+ kona_timer_disable_and_clear(timers.tmr_regs);
+
kona_timer_clockevents_init();
setup_irq(timers.tmr_irq, &kona_timer_irq);
kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index b2bb3a4bc205..63f176de0d02 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -67,11 +67,13 @@
* struct ttc_timer - This definition defines local timer structure
*
* @base_addr: Base address of timer
+ * @freq: Timer input clock frequency
* @clk: Associated clock source
* @clk_rate_change_nb Notifier block for clock rate changes
*/
struct ttc_timer {
void __iomem *base_addr;
+ unsigned long freq;
struct clk *clk;
struct notifier_block clk_rate_change_nb;
};
@@ -158,7 +160,7 @@ static cycle_t __ttc_clocksource_read(struct clocksource *cs)
TTC_COUNT_VAL_OFFSET);
}
-static u32 notrace ttc_sched_clock_read(void)
+static u64 notrace ttc_sched_clock_read(void)
{
return __raw_readl(ttc_sched_clock_val_reg);
}
@@ -196,9 +198,8 @@ static void ttc_set_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- ttc_set_interval(timer,
- DIV_ROUND_CLOSEST(clk_get_rate(ttce->ttc.clk),
- PRESCALE * HZ));
+ ttc_set_interval(timer, DIV_ROUND_CLOSEST(ttce->ttc.freq,
+ PRESCALE * HZ));
break;
case CLOCK_EVT_MODE_ONESHOT:
case CLOCK_EVT_MODE_UNUSED:
@@ -273,6 +274,8 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
return;
}
+ ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk);
+
ttccs->ttc.clk_rate_change_nb.notifier_call =
ttc_rate_change_clocksource_cb;
ttccs->ttc.clk_rate_change_nb.next = NULL;
@@ -298,16 +301,14 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
__raw_writel(CNT_CNTRL_RESET,
ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
- err = clocksource_register_hz(&ttccs->cs,
- clk_get_rate(ttccs->ttc.clk) / PRESCALE);
+ err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE);
if (WARN_ON(err)) {
kfree(ttccs);
return;
}
ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET;
- setup_sched_clock(ttc_sched_clock_read, 16,
- clk_get_rate(ttccs->ttc.clk) / PRESCALE);
+ sched_clock_register(ttc_sched_clock_read, 16, ttccs->ttc.freq / PRESCALE);
}
static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
@@ -334,6 +335,9 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
ndata->new_rate / PRESCALE);
local_irq_restore(flags);
+ /* update cached frequency */
+ ttc->freq = ndata->new_rate;
+
/* fall through */
}
case PRE_RATE_CHANGE:
@@ -367,6 +371,7 @@ static void __init ttc_setup_clockevent(struct clk *clk,
if (clk_notifier_register(ttcce->ttc.clk,
&ttcce->ttc.clk_rate_change_nb))
pr_warn("Unable to register clock notifier.\n");
+ ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
ttcce->ttc.base_addr = base;
ttcce->ce.name = "ttc_clockevent";
@@ -388,15 +393,14 @@ static void __init ttc_setup_clockevent(struct clk *clk,
__raw_writel(0x1, ttcce->ttc.base_addr + TTC_IER_OFFSET);
err = request_irq(irq, ttc_clock_event_interrupt,
- IRQF_DISABLED | IRQF_TIMER,
- ttcce->ce.name, ttcce);
+ IRQF_TIMER, ttcce->ce.name, ttcce);
if (WARN_ON(err)) {
kfree(ttcce);
return;
}
clockevents_config_and_register(&ttcce->ce,
- clk_get_rate(ttcce->ttc.clk) / PRESCALE, 1, 0xfffe);
+ ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
}
/**
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index 35639cf4e5a2..ae2e4278c42a 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -28,6 +28,7 @@ void __init clocksource_of_init(void)
struct device_node *np;
const struct of_device_id *match;
clocksource_of_init_fn init_func;
+ unsigned clocksources = 0;
for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
if (!of_device_is_available(np))
@@ -35,6 +36,8 @@ void __init clocksource_of_init(void)
init_func = match->data;
init_func(np);
- of_node_put(np);
+ clocksources++;
}
+ if (!clocksources)
+ pr_crit("%s: no matching clocksources found\n", __func__);
}
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index ea210482dd20..db2105290898 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -131,7 +131,7 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
static struct irqaction mfgptirq = {
.handler = mfgpt_tick,
- .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER | IRQF_SHARED,
+ .flags = IRQF_NOBALANCING | IRQF_TIMER | IRQF_SHARED,
.name = DRV_NAME,
};
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index e54ca1062d8e..f3656a6b0382 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -243,8 +243,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
dw_ced->irqaction.dev_id = &dw_ced->ced;
dw_ced->irqaction.irq = irq;
dw_ced->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL |
- IRQF_NOBALANCING |
- IRQF_DISABLED;
+ IRQF_NOBALANCING;
dw_ced->eoi = apbt_eoi;
err = setup_irq(irq, &dw_ced->irqaction);
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 45ba8aecc729..2a2ea2717f3a 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -108,12 +108,11 @@ static void __init add_clocksource(struct device_node *source_timer)
static u64 read_sched_clock(void)
{
- return __raw_readl(sched_io_base);
+ return ~__raw_readl(sched_io_base);
}
static const struct of_device_id sptimer_ids[] __initconst = {
{ .compatible = "picochip,pc3x2-rtc" },
- { .compatible = "snps,dw-apb-timer-sp" },
{ /* Sentinel */ },
};
@@ -151,4 +150,6 @@ static void __init dw_apb_timer_init(struct device_node *timer)
num_called++;
}
CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
-CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init);
+CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
+CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init);
+CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 62b0de6a1837..48f76bc05da0 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -71,6 +71,10 @@ enum {
MCT_L1_IRQ,
MCT_L2_IRQ,
MCT_L3_IRQ,
+ MCT_L4_IRQ,
+ MCT_L5_IRQ,
+ MCT_L6_IRQ,
+ MCT_L7_IRQ,
MCT_NR_IRQS,
};
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index ed7b73b508e0..a709cfa49d85 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -20,7 +20,6 @@
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/platform_data/clocksource-nomadik-mtu.h>
#include <linux/sched_clock.h>
#include <asm/mach/time.h>
@@ -103,7 +102,7 @@ static int nmdk_clkevt_next(unsigned long evt, struct clock_event_device *ev)
return 0;
}
-void nmdk_clkevt_reset(void)
+static void nmdk_clkevt_reset(void)
{
if (clkevt_periodic) {
/* Timer: configure load and background-load, and fire it up */
@@ -144,7 +143,7 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode,
}
}
-void nmdk_clksrc_reset(void)
+static void nmdk_clksrc_reset(void)
{
/* Disable */
writel(0, mtu_base + MTU_CR(0));
@@ -187,13 +186,13 @@ static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id)
static struct irqaction nmdk_timer_irq = {
.name = "Nomadik Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER,
+ .flags = IRQF_TIMER,
.handler = nmdk_timer_interrupt,
.dev_id = &nmdk_clkevt,
};
-static void __init __nmdk_timer_init(void __iomem *base, int irq,
- struct clk *pclk, struct clk *clk)
+static void __init nmdk_timer_init(void __iomem *base, int irq,
+ struct clk *pclk, struct clk *clk)
{
unsigned long rate;
@@ -245,18 +244,6 @@ static void __init __nmdk_timer_init(void __iomem *base, int irq,
register_current_timer_delay(&mtu_delay_timer);
}
-void __init nmdk_timer_init(void __iomem *base, int irq)
-{
- struct clk *clk0, *pclk0;
-
- pclk0 = clk_get_sys("mtu0", "apb_pclk");
- BUG_ON(IS_ERR(pclk0));
- clk0 = clk_get_sys("mtu0", NULL);
- BUG_ON(IS_ERR(clk0));
-
- __nmdk_timer_init(base, irq, pclk0, clk0);
-}
-
static void __init nmdk_timer_of_init(struct device_node *node)
{
struct clk *pclk;
@@ -280,7 +267,7 @@ static void __init nmdk_timer_of_init(struct device_node *node)
if (irq <= 0)
panic("Can't parse IRQ");
- __nmdk_timer_init(base, irq, pclk, clk);
+ nmdk_timer_init(base, irq, pclk, clk);
}
CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "st,nomadik-mtu",
nmdk_timer_of_init);
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index 85082e8d3052..5645cfc90c41 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -264,7 +264,7 @@ static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id)
static struct irqaction samsung_clock_event_irq = {
.name = "samsung_time_irq",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = samsung_clock_event_isr,
.dev_id = &time_event_device,
};
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 0965e9848b3d..0b1836a6c539 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -634,12 +634,18 @@ static int sh_cmt_clock_event_next(unsigned long delta,
static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
{
- pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->pdev->dev);
+ struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
+
+ pm_genpd_syscore_poweroff(&p->pdev->dev);
+ clk_unprepare(p->clk);
}
static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
{
- pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->pdev->dev);
+ struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
+
+ clk_prepare(p->clk);
+ pm_genpd_syscore_poweron(&p->pdev->dev);
}
static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
@@ -726,8 +732,7 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_cmt_interrupt;
p->irqaction.dev_id = p;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
- IRQF_IRQPOLL | IRQF_NOBALANCING;
+ p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
p->clk = clk_get(&p->pdev->dev, "cmt_fck");
@@ -737,6 +742,10 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
goto err2;
}
+ ret = clk_prepare(p->clk);
+ if (ret < 0)
+ goto err3;
+
if (res2 && (resource_size(res2) == 4)) {
/* assume both CMSTR and CMCSR to be 32-bit */
p->read_control = sh_cmt_read32;
@@ -773,19 +782,21 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
cfg->clocksource_rating);
if (ret) {
dev_err(&p->pdev->dev, "registration failed\n");
- goto err3;
+ goto err4;
}
p->cs_enabled = false;
ret = setup_irq(irq, &p->irqaction);
if (ret) {
dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
- goto err3;
+ goto err4;
}
platform_set_drvdata(pdev, p);
return 0;
+err4:
+ clk_unprepare(p->clk);
err3:
clk_put(p->clk);
err2:
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 3cf12834681e..e30d76e0a6fa 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -302,8 +302,7 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
p->irqaction.handler = sh_mtu2_interrupt;
p->irqaction.dev_id = p;
p->irqaction.irq = irq;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
- IRQF_IRQPOLL | IRQF_NOBALANCING;
+ p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
@@ -358,7 +357,6 @@ static int sh_mtu2_probe(struct platform_device *pdev)
ret = sh_mtu2_setup(p, pdev);
if (ret) {
kfree(p);
- platform_set_drvdata(pdev, NULL);
pm_runtime_idle(&pdev->dev);
return ret;
}
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 63557cda0a7d..ecd7b60bfdfa 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -462,8 +462,7 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
p->irqaction.handler = sh_tmu_interrupt;
p->irqaction.dev_id = p;
p->irqaction.irq = irq;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
- IRQF_IRQPOLL | IRQF_NOBALANCING;
+ p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
p->clk = clk_get(&p->pdev->dev, "tmu_fck");
@@ -523,7 +522,6 @@ static int sh_tmu_probe(struct platform_device *pdev)
ret = sh_tmu_setup(p, pdev);
if (ret) {
kfree(p);
- platform_set_drvdata(pdev, NULL);
pm_runtime_idle(&pdev->dev);
return ret;
}
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 2fb4695a28d8..bf497afba9ad 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -114,7 +114,7 @@ static int sun4i_clkevt_next_event(unsigned long evt,
static struct clock_event_device sun4i_clockevent = {
.name = "sun4i_tick",
- .rating = 300,
+ .rating = 350,
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_mode = sun4i_clkevt_mode,
.set_next_event = sun4i_clkevt_next_event,
@@ -138,7 +138,7 @@ static struct irqaction sun4i_timer_irq = {
.dev_id = &sun4i_clockevent,
};
-static u32 sun4i_timer_sched_read(void)
+static u64 notrace sun4i_timer_sched_read(void)
{
return ~readl(timer_base + TIMER_CNTVAL_REG(1));
}
@@ -170,15 +170,18 @@ static void __init sun4i_timer_init(struct device_node *node)
TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
timer_base + TIMER_CTL_REG(1));
- setup_sched_clock(sun4i_timer_sched_read, 32, rate);
+ sched_clock_register(sun4i_timer_sched_read, 32, rate);
clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name,
- rate, 300, 32, clocksource_mmio_readl_down);
+ rate, 350, 32, clocksource_mmio_readl_down);
ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
timer_base + TIMER_CTL_REG(0));
+ /* Make sure timer is stopped before playing with interrupts */
+ sun4i_clkevt_time_stop(0);
+
ret = setup_irq(irq, &sun4i_timer_irq);
if (ret)
pr_warn("failed to setup irq %d\n", irq);
@@ -187,7 +190,8 @@ static void __init sun4i_timer_init(struct device_node *node)
val = readl(timer_base + TIMER_IRQ_EN_REG);
writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
- sun4i_clockevent.cpumask = cpumask_of(0);
+ sun4i_clockevent.cpumask = cpu_possible_mask;
+ sun4i_clockevent.irq = irq;
clockevents_config_and_register(&sun4i_clockevent, rate,
TIMER_SYNC_TICKS, 0xffffffff);
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index 642849256d82..d1869f02051c 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -149,7 +149,7 @@ static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id)
static struct irqaction tegra_timer_irq = {
.name = "timer0",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_HIGH,
+ .flags = IRQF_TIMER | IRQF_TRIGGER_HIGH,
.handler = tegra_timer_interrupt,
.dev_id = &tegra_clockevent,
};
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index d8e47e502785..ee8691b89944 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -76,6 +76,7 @@
static void __iomem *timer_base, *local_base;
static unsigned int timer_clk;
static bool timer25Mhz = true;
+static u32 enable_mask;
/*
* Number of timer ticks per jiffy.
@@ -121,8 +122,7 @@ armada_370_xp_clkevt_next_event(unsigned long delta,
/*
* Enable the timer.
*/
- local_timer_ctrl_clrset(TIMER0_RELOAD_EN,
- TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT));
+ local_timer_ctrl_clrset(TIMER0_RELOAD_EN, enable_mask);
return 0;
}
@@ -141,9 +141,7 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode,
/*
* Enable timer.
*/
- local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN |
- TIMER0_EN |
- TIMER0_DIV(TIMER_DIVIDER_SHIFT));
+ local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
} else {
/*
* Disable timer.
@@ -240,10 +238,13 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
WARN_ON(!timer_base);
local_base = of_iomap(np, 1);
- if (timer25Mhz)
+ if (timer25Mhz) {
set = TIMER0_25MHZ;
- else
+ enable_mask = TIMER0_EN;
+ } else {
clr = TIMER0_25MHZ;
+ enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT);
+ }
timer_ctrl_clrset(clr, set);
local_timer_ctrl_clrset(clr, set);
@@ -256,19 +257,18 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
/*
- * Set scale and timer for sched_clock.
- */
- sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
-
- /*
* Setup free-running clocksource timer (interrupts
* disabled).
*/
writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
- timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN |
- TIMER0_DIV(TIMER_DIVIDER_SHIFT));
+ timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
+
+ /*
+ * Set scale and timer for sched_clock.
+ */
+ sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
"armada_370_xp_clocksource",
diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c
index 9c7f018a67ca..20066222f3f2 100644
--- a/drivers/clocksource/time-orion.c
+++ b/drivers/clocksource/time-orion.c
@@ -53,7 +53,7 @@ EXPORT_SYMBOL(orion_timer_ctrl_clrset);
/*
* Free-running clocksource handling.
*/
-static u32 notrace orion_read_sched_clock(void)
+static u64 notrace orion_read_sched_clock(void)
{
return ~readl(timer_base + TIMER0_VAL);
}
@@ -135,7 +135,7 @@ static void __init orion_timer_init(struct device_node *np)
clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
clk_get_rate(clk), 300, 32,
clocksource_mmio_readl_down);
- setup_sched_clock(orion_read_sched_clock, 32, clk_get_rate(clk));
+ sched_clock_register(orion_read_sched_clock, 32, clk_get_rate(clk));
/* setup timer1 as clockevent timer */
if (setup_irq(irq, &orion_clkevt_irq))
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
new file mode 100644
index 000000000000..deebcd6469fc
--- /dev/null
+++ b/drivers/clocksource/timer-sun5i.c
@@ -0,0 +1,192 @@
+/*
+ * Allwinner SoCs hstimer driver.
+ *
+ * Copyright (C) 2013 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/sched_clock.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define TIMER_IRQ_EN_REG 0x00
+#define TIMER_IRQ_EN(val) BIT(val)
+#define TIMER_IRQ_ST_REG 0x04
+#define TIMER_CTL_REG(val) (0x20 * (val) + 0x10)
+#define TIMER_CTL_ENABLE BIT(0)
+#define TIMER_CTL_RELOAD BIT(1)
+#define TIMER_CTL_CLK_PRES(val) (((val) & 0x7) << 4)
+#define TIMER_CTL_ONESHOT BIT(7)
+#define TIMER_INTVAL_LO_REG(val) (0x20 * (val) + 0x14)
+#define TIMER_INTVAL_HI_REG(val) (0x20 * (val) + 0x18)
+#define TIMER_CNTVAL_LO_REG(val) (0x20 * (val) + 0x1c)
+#define TIMER_CNTVAL_HI_REG(val) (0x20 * (val) + 0x20)
+
+#define TIMER_SYNC_TICKS 3
+
+static void __iomem *timer_base;
+static u32 ticks_per_jiffy;
+
+/*
+ * When we disable a timer, we need to wait at least for 2 cycles of
+ * the timer source clock. We will use for that the clocksource timer
+ * that is already setup and runs at the same frequency than the other
+ * timers, and we never will be disabled.
+ */
+static void sun5i_clkevt_sync(void)
+{
+ u32 old = readl(timer_base + TIMER_CNTVAL_LO_REG(1));
+
+ while ((old - readl(timer_base + TIMER_CNTVAL_LO_REG(1))) < TIMER_SYNC_TICKS)
+ cpu_relax();
+}
+
+static void sun5i_clkevt_time_stop(u8 timer)
+{
+ u32 val = readl(timer_base + TIMER_CTL_REG(timer));
+ writel(val & ~TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG(timer));
+
+ sun5i_clkevt_sync();
+}
+
+static void sun5i_clkevt_time_setup(u8 timer, u32 delay)
+{
+ writel(delay, timer_base + TIMER_INTVAL_LO_REG(timer));
+}
+
+static void sun5i_clkevt_time_start(u8 timer, bool periodic)
+{
+ u32 val = readl(timer_base + TIMER_CTL_REG(timer));
+
+ if (periodic)
+ val &= ~TIMER_CTL_ONESHOT;
+ else
+ val |= TIMER_CTL_ONESHOT;
+
+ writel(val | TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
+ timer_base + TIMER_CTL_REG(timer));
+}
+
+static void sun5i_clkevt_mode(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ sun5i_clkevt_time_stop(0);
+ sun5i_clkevt_time_setup(0, ticks_per_jiffy);
+ sun5i_clkevt_time_start(0, true);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ sun5i_clkevt_time_stop(0);
+ sun5i_clkevt_time_start(0, false);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ default:
+ sun5i_clkevt_time_stop(0);
+ break;
+ }
+}
+
+static int sun5i_clkevt_next_event(unsigned long evt,
+ struct clock_event_device *unused)
+{
+ sun5i_clkevt_time_stop(0);
+ sun5i_clkevt_time_setup(0, evt - TIMER_SYNC_TICKS);
+ sun5i_clkevt_time_start(0, false);
+
+ return 0;
+}
+
+static struct clock_event_device sun5i_clockevent = {
+ .name = "sun5i_tick",
+ .rating = 340,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = sun5i_clkevt_mode,
+ .set_next_event = sun5i_clkevt_next_event,
+};
+
+
+static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = (struct clock_event_device *)dev_id;
+
+ writel(0x1, timer_base + TIMER_IRQ_ST_REG);
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction sun5i_timer_irq = {
+ .name = "sun5i_timer0",
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = sun5i_timer_interrupt,
+ .dev_id = &sun5i_clockevent,
+};
+
+static u64 sun5i_timer_sched_read(void)
+{
+ return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1));
+}
+
+static void __init sun5i_timer_init(struct device_node *node)
+{
+ unsigned long rate;
+ struct clk *clk;
+ int ret, irq;
+ u32 val;
+
+ timer_base = of_iomap(node, 0);
+ if (!timer_base)
+ panic("Can't map registers");
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0)
+ panic("Can't parse IRQ");
+
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk))
+ panic("Can't get timer clock");
+ clk_prepare_enable(clk);
+ rate = clk_get_rate(clk);
+
+ writel(~0, timer_base + TIMER_INTVAL_LO_REG(1));
+ writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
+ timer_base + TIMER_CTL_REG(1));
+
+ sched_clock_register(sun5i_timer_sched_read, 32, rate);
+ clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name,
+ rate, 340, 32, clocksource_mmio_readl_down);
+
+ ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
+
+ ret = setup_irq(irq, &sun5i_timer_irq);
+ if (ret)
+ pr_warn("failed to setup irq %d\n", irq);
+
+ /* Enable timer0 interrupt */
+ val = readl(timer_base + TIMER_IRQ_EN_REG);
+ writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
+
+ sun5i_clockevent.cpumask = cpu_possible_mask;
+ sun5i_clockevent.irq = irq;
+
+ clockevents_config_and_register(&sun5i_clockevent, rate,
+ TIMER_SYNC_TICKS, 0xffffffff);
+}
+CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
+ sun5i_timer_init);
+CLOCKSOURCE_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer",
+ sun5i_timer_init);
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
index ad3c0e83a779..1098ed3b9b89 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/vt8500_timer.c
@@ -124,7 +124,7 @@ static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id)
static struct irqaction irq = {
.name = "vt8500_timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = vt8500_timer_interrupt,
.dev_id = &clockevent,
};
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 38093e272377..4b029c0944af 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -20,6 +20,10 @@ if CPU_FREQ
config CPU_FREQ_GOV_COMMON
bool
+config CPU_FREQ_BOOST_SW
+ bool
+ depends on THERMAL
+
config CPU_FREQ_STAT
tristate "CPU frequency translation statistics"
default y
@@ -181,7 +185,8 @@ config CPU_FREQ_GOV_CONSERVATIVE
config GENERIC_CPUFREQ_CPU0
tristate "Generic CPU0 cpufreq driver"
- depends on HAVE_CLK && REGULATOR && PM_OPP && OF
+ depends on HAVE_CLK && REGULATOR && OF && THERMAL && CPU_THERMAL
+ select PM_OPP
help
This adds a generic cpufreq driver for CPU0 frequency management.
It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index ce52ed949249..31297499a60a 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -4,7 +4,8 @@
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
- depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
+ depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK
+ select PM_OPP
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
@@ -54,7 +55,8 @@ config ARM_EXYNOS5250_CPUFREQ
config ARM_EXYNOS5440_CPUFREQ
bool "SAMSUNG EXYNOS5440"
depends on SOC_EXYNOS5440
- depends on HAVE_CLK && PM_OPP && OF
+ depends on HAVE_CLK && OF
+ select PM_OPP
default y
help
This adds the CPUFreq driver for Samsung EXYNOS5440
@@ -64,6 +66,21 @@ config ARM_EXYNOS5440_CPUFREQ
If in doubt, say N.
+config ARM_EXYNOS_CPU_FREQ_BOOST_SW
+ bool "EXYNOS Frequency Overclocking - Software"
+ depends on ARM_EXYNOS_CPUFREQ
+ select CPU_FREQ_BOOST_SW
+ select EXYNOS_THERMAL
+ help
+ This driver supports software managed overclocking (BOOST).
+ It allows usage of special frequencies for Samsung Exynos
+ processors if thermal conditions are appropriate.
+
+ It reguires, for safe operation, thermal framework with properly
+ defined trip points.
+
+ If in doubt, say N.
+
config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based"
depends on ARCH_HIGHBANK
@@ -79,11 +96,11 @@ config ARM_HIGHBANK_CPUFREQ
If in doubt, say N.
config ARM_IMX6Q_CPUFREQ
- tristate "Freescale i.MX6Q cpufreq support"
- depends on SOC_IMX6Q
+ tristate "Freescale i.MX6 cpufreq support"
+ depends on ARCH_MXC
depends on REGULATOR_ANATOP
help
- This adds cpufreq driver support for Freescale i.MX6Q SOC.
+ This adds cpufreq driver support for Freescale i.MX6 series SoCs.
If in doubt, say N.
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index caf41ebea184..18448a7e9f86 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -80,7 +80,6 @@ static struct acpi_processor_performance __percpu *acpi_perf_data;
static struct cpufreq_driver acpi_cpufreq_driver;
static unsigned int acpi_pstate_strict;
-static bool boost_enabled, boost_supported;
static struct msr __percpu *msrs;
static bool boost_state(unsigned int cpu)
@@ -133,49 +132,16 @@ static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
wrmsr_on_cpus(cpumask, msr_addr, msrs);
}
-static ssize_t _store_boost(const char *buf, size_t count)
+static int _store_boost(int val)
{
- int ret;
- unsigned long val = 0;
-
- if (!boost_supported)
- return -EINVAL;
-
- ret = kstrtoul(buf, 10, &val);
- if (ret || (val > 1))
- return -EINVAL;
-
- if ((val && boost_enabled) || (!val && !boost_enabled))
- return count;
-
get_online_cpus();
-
boost_set_msrs(val, cpu_online_mask);
-
put_online_cpus();
-
- boost_enabled = val;
pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
- return count;
-}
-
-static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
- return _store_boost(buf, count);
-}
-
-static ssize_t show_global_boost(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- return sprintf(buf, "%u\n", boost_enabled);
+ return 0;
}
-static struct global_attr global_boost = __ATTR(boost, 0644,
- show_global_boost,
- store_global_boost);
-
static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
{
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
@@ -186,15 +152,32 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
cpufreq_freq_attr_ro(freqdomain_cpus);
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
+static ssize_t store_boost(const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val = 0;
+
+ if (!acpi_cpufreq_driver.boost_supported)
+ return -EINVAL;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret || (val > 1))
+ return -EINVAL;
+
+ _store_boost((int) val);
+
+ return count;
+}
+
static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
size_t count)
{
- return _store_boost(buf, count);
+ return store_boost(buf, count);
}
static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
{
- return sprintf(buf, "%u\n", boost_enabled);
+ return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled);
}
cpufreq_freq_attr_rw(cpb);
@@ -554,7 +537,7 @@ static int boost_notify(struct notifier_block *nb, unsigned long action,
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- boost_set_msrs(boost_enabled, cpumask);
+ boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask);
break;
case CPU_DOWN_PREPARE:
@@ -911,6 +894,7 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
.resume = acpi_cpufreq_resume,
.name = "acpi-cpufreq",
.attr = acpi_cpufreq_attr,
+ .set_boost = _store_boost,
};
static void __init acpi_cpufreq_boost_init(void)
@@ -921,33 +905,22 @@ static void __init acpi_cpufreq_boost_init(void)
if (!msrs)
return;
- boost_supported = true;
- boost_enabled = boost_state(0);
-
+ acpi_cpufreq_driver.boost_supported = true;
+ acpi_cpufreq_driver.boost_enabled = boost_state(0);
get_online_cpus();
/* Force all MSRs to the same value */
- boost_set_msrs(boost_enabled, cpu_online_mask);
+ boost_set_msrs(acpi_cpufreq_driver.boost_enabled,
+ cpu_online_mask);
register_cpu_notifier(&boost_nb);
put_online_cpus();
- } else
- global_boost.attr.mode = 0444;
-
- /* We create the boost file in any case, though for systems without
- * hardware support it will be read-only and hardwired to return 0.
- */
- if (cpufreq_sysfs_create_file(&(global_boost.attr)))
- pr_warn(PFX "could not register global boost sysfs file\n");
- else
- pr_debug("registered global boost sysfs file\n");
+ }
}
-static void __exit acpi_cpufreq_boost_exit(void)
+static void acpi_cpufreq_boost_exit(void)
{
- cpufreq_sysfs_remove_file(&(global_boost.attr));
-
if (msrs) {
unregister_cpu_notifier(&boost_nb);
@@ -993,13 +966,13 @@ static int __init acpi_cpufreq_init(void)
*iter = &cpb;
}
#endif
+ acpi_cpufreq_boost_init();
ret = cpufreq_register_driver(&acpi_cpufreq_driver);
- if (ret)
+ if (ret) {
free_acpi_perf_data();
- else
- acpi_cpufreq_boost_init();
-
+ acpi_cpufreq_boost_exit();
+ }
return ret;
}
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 5519933813ea..72f87e9317e3 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -488,7 +488,8 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
static struct cpufreq_driver bL_cpufreq_driver = {
.name = "arm-big-little",
.flags = CPUFREQ_STICKY |
- CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = bL_cpufreq_set_target,
.get = bL_cpufreq_get_rate,
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
index 7c03dd84f66a..a1c79f549edb 100644
--- a/drivers/cpufreq/at32ap-cpufreq.c
+++ b/drivers/cpufreq/at32ap-cpufreq.c
@@ -21,17 +21,8 @@
#include <linux/export.h>
#include <linux/slab.h>
-static struct clk *cpuclk;
static struct cpufreq_frequency_table *freq_table;
-static unsigned int at32_get_speed(unsigned int cpu)
-{
- /* No SMP support */
- if (cpu)
- return 0;
- return (unsigned int)((clk_get_rate(cpuclk) + 500) / 1000);
-}
-
static unsigned int ref_freq;
static unsigned long loops_per_jiffy_ref;
@@ -39,7 +30,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned int old_freq, new_freq;
- old_freq = at32_get_speed(0);
+ old_freq = policy->cur;
new_freq = freq_table[index].frequency;
if (!ref_freq) {
@@ -50,7 +41,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
if (old_freq < new_freq)
boot_cpu_data.loops_per_jiffy = cpufreq_scale(
loops_per_jiffy_ref, ref_freq, new_freq);
- clk_set_rate(cpuclk, new_freq * 1000);
+ clk_set_rate(policy->clk, new_freq * 1000);
if (new_freq < old_freq)
boot_cpu_data.loops_per_jiffy = cpufreq_scale(
loops_per_jiffy_ref, ref_freq, new_freq);
@@ -61,6 +52,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
static int at32_cpufreq_driver_init(struct cpufreq_policy *policy)
{
unsigned int frequency, rate, min_freq;
+ static struct clk *cpuclk;
int retval, steps, i;
if (policy->cpu != 0)
@@ -103,6 +95,7 @@ static int at32_cpufreq_driver_init(struct cpufreq_policy *policy)
frequency /= 2;
}
+ policy->clk = cpuclk;
freq_table[steps - 1].frequency = CPUFREQ_TABLE_END;
retval = cpufreq_table_validate_and_show(policy, freq_table);
@@ -123,7 +116,7 @@ static struct cpufreq_driver at32_driver = {
.init = at32_cpufreq_driver_init,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = at32_set_target,
- .get = at32_get_speed,
+ .get = cpufreq_generic_get,
.flags = CPUFREQ_STICKY,
};
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index d4585ce2346c..0c12ffc0ebcb 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -13,7 +13,9 @@
#include <linux/clk.h>
#include <linux/cpu.h>
+#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -21,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/thermal.h>
static unsigned int transition_latency;
static unsigned int voltage_tolerance; /* in percentage */
@@ -29,11 +32,7 @@ static struct device *cpu_dev;
static struct clk *cpu_clk;
static struct regulator *cpu_reg;
static struct cpufreq_frequency_table *freq_table;
-
-static unsigned int cpu0_get_speed(unsigned int cpu)
-{
- return clk_get_rate(cpu_clk) / 1000;
-}
+static struct thermal_cooling_device *cdev;
static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
{
@@ -44,7 +43,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
int ret;
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
- if (freq_Hz < 0)
+ if (freq_Hz <= 0)
freq_Hz = freq_table[index].frequency * 1000;
freq_exact = freq_Hz;
@@ -100,6 +99,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
{
+ policy->clk = cpu_clk;
return cpufreq_generic_init(policy, freq_table, transition_latency);
}
@@ -107,7 +107,7 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
.flags = CPUFREQ_STICKY,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = cpu0_set_target,
- .get = cpu0_get_speed,
+ .get = cpufreq_generic_get,
.init = cpu0_cpufreq_init,
.exit = cpufreq_generic_exit,
.name = "generic_cpu0",
@@ -201,6 +201,17 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
goto out_free_table;
}
+ /*
+ * For now, just loading the cooling device;
+ * thermal DT code takes care of matching them.
+ */
+ if (of_find_property(np, "#cooling-cells", NULL)) {
+ cdev = of_cpufreq_cooling_register(np, cpu_present_mask);
+ if (IS_ERR(cdev))
+ pr_err("running cpufreq without cooling device: %ld\n",
+ PTR_ERR(cdev));
+ }
+
of_node_put(np);
return 0;
@@ -213,6 +224,7 @@ out_put_node:
static int cpu0_cpufreq_remove(struct platform_device *pdev)
{
+ cpufreq_cooling_unregister(cdev);
cpufreq_unregister_driver(&cpu0_cpufreq_driver);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 02d534da22dd..08ca8c9f41cd 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -39,7 +39,7 @@ static struct cpufreq_driver *cpufreq_driver;
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
static DEFINE_RWLOCK(cpufreq_driver_lock);
-static DEFINE_MUTEX(cpufreq_governor_lock);
+DEFINE_MUTEX(cpufreq_governor_lock);
static LIST_HEAD(cpufreq_policy_list);
#ifdef CONFIG_HOTPLUG_CPU
@@ -176,6 +176,20 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_generic_init);
+unsigned int cpufreq_generic_get(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+
+ if (!policy || IS_ERR(policy->clk)) {
+ pr_err("%s: No %s associated to cpu: %d\n", __func__,
+ policy ? "clk" : "policy", cpu);
+ return 0;
+ }
+
+ return clk_get_rate(policy->clk) / 1000;
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_get);
+
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
struct cpufreq_policy *policy = NULL;
@@ -320,10 +334,51 @@ void cpufreq_notify_transition(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
+/* Do post notifications when there are chances that transition has failed */
+void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, int transition_failed)
+{
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
+ if (!transition_failed)
+ return;
+
+ swap(freqs->old, freqs->new);
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
+}
+EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition);
+
/*********************************************************************
* SYSFS INTERFACE *
*********************************************************************/
+ssize_t show_boost(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
+}
+
+static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret, enable;
+
+ ret = sscanf(buf, "%d", &enable);
+ if (ret != 1 || enable < 0 || enable > 1)
+ return -EINVAL;
+
+ if (cpufreq_boost_trigger_state(enable)) {
+ pr_err("%s: Cannot %s BOOST!\n", __func__,
+ enable ? "enable" : "disable");
+ return -EINVAL;
+ }
+
+ pr_debug("%s: cpufreq BOOST %s\n", __func__,
+ enable ? "enabled" : "disabled");
+
+ return count;
+}
+define_one_global_rw(boost);
static struct cpufreq_governor *__find_governor(const char *str_governor)
{
@@ -828,14 +883,17 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
int ret = 0;
memcpy(&new_policy, policy, sizeof(*policy));
+
+ /* Use the default policy if its valid. */
+ if (cpufreq_driver->setpolicy)
+ cpufreq_parse_governor(policy->governor->name,
+ &new_policy.policy, NULL);
+
/* assure that the starting sequence is run in cpufreq_set_policy */
policy->governor = NULL;
/* set default policy */
ret = cpufreq_set_policy(policy, &new_policy);
- policy->user_policy.policy = policy->policy;
- policy->user_policy.governor = policy->governor;
-
if (ret) {
pr_debug("setting policy failed\n");
if (cpufreq_driver->exit)
@@ -845,8 +903,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
#ifdef CONFIG_HOTPLUG_CPU
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
- unsigned int cpu, struct device *dev,
- bool frozen)
+ unsigned int cpu, struct device *dev)
{
int ret = 0;
unsigned long flags;
@@ -877,11 +934,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
}
}
- /* Don't touch sysfs links during light-weight init */
- if (!frozen)
- ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
-
- return ret;
+ return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
}
#endif
@@ -926,6 +979,30 @@ err_free_policy:
return NULL;
}
+static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
+{
+ struct kobject *kobj;
+ struct completion *cmp;
+
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_REMOVE_POLICY, policy);
+
+ down_read(&policy->rwsem);
+ kobj = &policy->kobj;
+ cmp = &policy->kobj_unregister;
+ up_read(&policy->rwsem);
+ kobject_put(kobj);
+
+ /*
+ * We need to make sure that the underlying kobj is
+ * actually not referenced anymore by anybody before we
+ * proceed with unloading.
+ */
+ pr_debug("waiting for dropping of refcount\n");
+ wait_for_completion(cmp);
+ pr_debug("wait complete\n");
+}
+
static void cpufreq_policy_free(struct cpufreq_policy *policy)
{
free_cpumask_var(policy->related_cpus);
@@ -986,7 +1063,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
+ ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
up_read(&cpufreq_rwsem);
return ret;
}
@@ -994,15 +1071,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
#endif
- if (frozen)
- /* Restore the saved policy when doing light-weight init */
- policy = cpufreq_policy_restore(cpu);
- else
+ /*
+ * Restore the saved policy when doing light-weight init and fall back
+ * to the full init if that fails.
+ */
+ policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
+ if (!policy) {
+ frozen = false;
policy = cpufreq_policy_alloc();
-
- if (!policy)
- goto nomem_out;
-
+ if (!policy)
+ goto nomem_out;
+ }
/*
* In the resume path, since we restore a saved policy, the assignment
@@ -1030,6 +1109,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
goto err_set_policy_cpu;
}
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ for_each_cpu(j, policy->cpus)
+ per_cpu(cpufreq_cpu_data, j) = policy;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
if (cpufreq_driver->get) {
policy->cur = cpufreq_driver->get(policy->cpu);
if (!policy->cur) {
@@ -1038,6 +1122,46 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
}
}
+ /*
+ * Sometimes boot loaders set CPU frequency to a value outside of
+ * frequency table present with cpufreq core. In such cases CPU might be
+ * unstable if it has to run on that frequency for long duration of time
+ * and so its better to set it to a frequency which is specified in
+ * freq-table. This also makes cpufreq stats inconsistent as
+ * cpufreq-stats would fail to register because current frequency of CPU
+ * isn't found in freq-table.
+ *
+ * Because we don't want this change to effect boot process badly, we go
+ * for the next freq which is >= policy->cur ('cur' must be set by now,
+ * otherwise we will end up setting freq to lowest of the table as 'cur'
+ * is initialized to zero).
+ *
+ * We are passing target-freq as "policy->cur - 1" otherwise
+ * __cpufreq_driver_target() would simply fail, as policy->cur will be
+ * equal to target-freq.
+ */
+ if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
+ && has_target()) {
+ /* Are we running at unknown frequency ? */
+ ret = cpufreq_frequency_table_get_index(policy, policy->cur);
+ if (ret == -EINVAL) {
+ /* Warn user and fix it */
+ pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
+ __func__, policy->cpu, policy->cur);
+ ret = __cpufreq_driver_target(policy, policy->cur - 1,
+ CPUFREQ_RELATION_L);
+
+ /*
+ * Reaching here after boot in a few seconds may not
+ * mean that system will remain stable at "unknown"
+ * frequency for longer duration. Hence, a BUG_ON().
+ */
+ BUG_ON(ret);
+ pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
+ __func__, policy->cpu, policy->cur);
+ }
+ }
+
/* related cpus should atleast have policy->cpus */
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
@@ -1047,8 +1171,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
*/
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
- policy->user_policy.min = policy->min;
- policy->user_policy.max = policy->max;
+ if (!frozen) {
+ policy->user_policy.min = policy->min;
+ policy->user_policy.max = policy->max;
+ }
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
@@ -1062,15 +1188,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
}
#endif
- write_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_cpu(j, policy->cpus)
- per_cpu(cpufreq_cpu_data, j) = policy;
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
if (!frozen) {
ret = cpufreq_add_dev_interface(policy, dev);
if (ret)
goto err_out_unregister;
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_CREATE_POLICY, policy);
}
write_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -1079,6 +1202,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
cpufreq_init_policy(policy);
+ if (!frozen) {
+ policy->user_policy.policy = policy->policy;
+ policy->user_policy.governor = policy->governor;
+ }
+
kobject_uevent(&policy->kobj, KOBJ_ADD);
up_read(&cpufreq_rwsem);
@@ -1087,16 +1215,22 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
return 0;
err_out_unregister:
+err_get_freq:
write_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus)
per_cpu(cpufreq_cpu_data, j) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-err_get_freq:
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
err_set_policy_cpu:
+ if (frozen) {
+ /* Do not leave stale fallback data behind. */
+ per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
+ cpufreq_policy_put_kobj(policy);
+ }
cpufreq_policy_free(policy);
+
nomem_out:
up_read(&cpufreq_rwsem);
@@ -1118,7 +1252,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
}
static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
- unsigned int old_cpu, bool frozen)
+ unsigned int old_cpu)
{
struct device *cpu_dev;
int ret;
@@ -1126,10 +1260,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
/* first sibling now owns the new sysfs dir */
cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
- /* Don't touch sysfs files during light-weight tear-down */
- if (frozen)
- return cpu_dev->id;
-
sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
if (ret) {
@@ -1196,7 +1326,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
if (!frozen)
sysfs_remove_link(&dev->kobj, "cpufreq");
} else if (cpus > 1) {
- new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
+ new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
if (new_cpu >= 0) {
update_policy_cpu(policy, new_cpu);
@@ -1218,8 +1348,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
int ret;
unsigned long flags;
struct cpufreq_policy *policy;
- struct kobject *kobj;
- struct completion *cmp;
read_lock_irqsave(&cpufreq_driver_lock, flags);
policy = per_cpu(cpufreq_cpu_data, cpu);
@@ -1249,22 +1377,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
}
}
- if (!frozen) {
- down_read(&policy->rwsem);
- kobj = &policy->kobj;
- cmp = &policy->kobj_unregister;
- up_read(&policy->rwsem);
- kobject_put(kobj);
-
- /*
- * We need to make sure that the underlying kobj is
- * actually not referenced anymore by anybody before we
- * proceed with unloading.
- */
- pr_debug("waiting for dropping of refcount\n");
- wait_for_completion(cmp);
- pr_debug("wait complete\n");
- }
+ if (!frozen)
+ cpufreq_policy_put_kobj(policy);
/*
* Perform the ->exit() even during light-weight tear-down,
@@ -1711,17 +1825,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
pr_err("%s: Failed to change cpu frequency: %d\n",
__func__, retval);
- if (notify) {
- /*
- * Notify with old freq in case we failed to change
- * frequency
- */
- if (retval)
- freqs.new = freqs.old;
-
- cpufreq_notify_transition(policy, &freqs,
- CPUFREQ_POSTCHANGE);
- }
+ if (notify)
+ cpufreq_notify_post_transition(policy, &freqs, retval);
}
out:
@@ -2106,6 +2211,73 @@ static struct notifier_block __refdata cpufreq_cpu_notifier = {
};
/*********************************************************************
+ * BOOST *
+ *********************************************************************/
+static int cpufreq_boost_set_sw(int state)
+{
+ struct cpufreq_frequency_table *freq_table;
+ struct cpufreq_policy *policy;
+ int ret = -EINVAL;
+
+ list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+ freq_table = cpufreq_frequency_get_table(policy->cpu);
+ if (freq_table) {
+ ret = cpufreq_frequency_table_cpuinfo(policy,
+ freq_table);
+ if (ret) {
+ pr_err("%s: Policy frequency update failed\n",
+ __func__);
+ break;
+ }
+ policy->user_policy.max = policy->max;
+ __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ }
+ }
+
+ return ret;
+}
+
+int cpufreq_boost_trigger_state(int state)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (cpufreq_driver->boost_enabled == state)
+ return 0;
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver->boost_enabled = state;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ ret = cpufreq_driver->set_boost(state);
+ if (ret) {
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver->boost_enabled = !state;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ pr_err("%s: Cannot %s BOOST\n", __func__,
+ state ? "enable" : "disable");
+ }
+
+ return ret;
+}
+
+int cpufreq_boost_supported(void)
+{
+ if (likely(cpufreq_driver))
+ return cpufreq_driver->boost_supported;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
+
+int cpufreq_boost_enabled(void)
+{
+ return cpufreq_driver->boost_enabled;
+}
+EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
+
+/*********************************************************************
* REGISTER / UNREGISTER CPUFREQ DRIVER *
*********************************************************************/
@@ -2145,9 +2317,25 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
cpufreq_driver = driver_data;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ if (cpufreq_boost_supported()) {
+ /*
+ * Check if driver provides function to enable boost -
+ * if not, use cpufreq_boost_set_sw as default
+ */
+ if (!cpufreq_driver->set_boost)
+ cpufreq_driver->set_boost = cpufreq_boost_set_sw;
+
+ ret = cpufreq_sysfs_create_file(&boost.attr);
+ if (ret) {
+ pr_err("%s: cannot register global BOOST sysfs file\n",
+ __func__);
+ goto err_null_driver;
+ }
+ }
+
ret = subsys_interface_register(&cpufreq_interface);
if (ret)
- goto err_null_driver;
+ goto err_boost_unreg;
if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
int i;
@@ -2174,6 +2362,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
return 0;
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
+err_boost_unreg:
+ if (cpufreq_boost_supported())
+ cpufreq_sysfs_remove_file(&boost.attr);
err_null_driver:
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
@@ -2200,6 +2391,9 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
pr_debug("unregistering driver %s\n", driver->name);
subsys_interface_unregister(&cpufreq_interface);
+ if (cpufreq_boost_supported())
+ cpufreq_sysfs_remove_file(&boost.attr);
+
unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
down_write(&cpufreq_rwsem);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index e6be63561fa6..ba43991ba98a 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -119,8 +119,9 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
{
int i;
+ mutex_lock(&cpufreq_governor_lock);
if (!policy->governor_enabled)
- return;
+ goto out_unlock;
if (!all_cpus) {
/*
@@ -135,6 +136,9 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
for_each_cpu(i, policy->cpus)
__gov_queue_work(i, dbs_data, delay);
}
+
+out_unlock:
+ mutex_unlock(&cpufreq_governor_lock);
}
EXPORT_SYMBOL_GPL(gov_queue_work);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index b5f2b8618949..bfb9ae14142c 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -257,6 +257,8 @@ static ssize_t show_sampling_rate_min_gov_pol \
return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
}
+extern struct mutex cpufreq_governor_lock;
+
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
bool need_load_eval(struct cpu_dbs_common_info *cdbs,
unsigned int sampling_rate);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 4cf0d2805cb2..5793e1447fb1 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -151,44 +151,36 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
return -1;
}
-/* should be called late in the CPU removal sequence so that the stats
- * memory is still available in case someone tries to use it.
- */
-static void cpufreq_stats_free_table(unsigned int cpu)
+static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
{
- struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
+ struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
- if (stat) {
- pr_debug("%s: Free stat table\n", __func__);
- kfree(stat->time_in_state);
- kfree(stat);
- per_cpu(cpufreq_stats_table, cpu) = NULL;
- }
+ if (!stat)
+ return;
+
+ pr_debug("%s: Free stat table\n", __func__);
+
+ sysfs_remove_group(&policy->kobj, &stats_attr_group);
+ kfree(stat->time_in_state);
+ kfree(stat);
+ per_cpu(cpufreq_stats_table, policy->cpu) = NULL;
}
-/* must be called early in the CPU removal sequence (before
- * cpufreq_remove_dev) so that policy is still valid.
- */
-static void cpufreq_stats_free_sysfs(unsigned int cpu)
+static void cpufreq_stats_free_table(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy;
+ policy = cpufreq_cpu_get(cpu);
if (!policy)
return;
- if (!cpufreq_frequency_get_table(cpu))
- goto put_ref;
-
- if (!policy_is_shared(policy)) {
- pr_debug("%s: Free sysfs stat\n", __func__);
- sysfs_remove_group(&policy->kobj, &stats_attr_group);
- }
+ if (cpufreq_frequency_get_table(policy->cpu))
+ __cpufreq_stats_free_table(policy);
-put_ref:
cpufreq_cpu_put(policy);
}
-static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
+static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
unsigned int i, j, count = 0, ret = 0;
@@ -261,6 +253,26 @@ error_get_fail:
return ret;
}
+static void cpufreq_stats_create_table(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ struct cpufreq_frequency_table *table;
+
+ /*
+ * "likely(!policy)" because normally cpufreq_stats will be registered
+ * before cpufreq driver
+ */
+ policy = cpufreq_cpu_get(cpu);
+ if (likely(!policy))
+ return;
+
+ table = cpufreq_frequency_get_table(policy->cpu);
+ if (likely(table))
+ __cpufreq_stats_create_table(policy, table);
+
+ cpufreq_cpu_put(policy);
+}
+
static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
{
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
@@ -277,7 +289,7 @@ static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
unsigned long val, void *data)
{
- int ret;
+ int ret = 0;
struct cpufreq_policy *policy = data;
struct cpufreq_frequency_table *table;
unsigned int cpu = policy->cpu;
@@ -287,15 +299,16 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
return 0;
}
- if (val != CPUFREQ_NOTIFY)
- return 0;
table = cpufreq_frequency_get_table(cpu);
if (!table)
return 0;
- ret = cpufreq_stats_create_table(policy, table);
- if (ret)
- return ret;
- return 0;
+
+ if (val == CPUFREQ_CREATE_POLICY)
+ ret = __cpufreq_stats_create_table(policy, table);
+ else if (val == CPUFREQ_REMOVE_POLICY)
+ __cpufreq_stats_free_table(policy);
+
+ return ret;
}
static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
@@ -334,29 +347,6 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
return 0;
}
-static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch (action) {
- case CPU_DOWN_PREPARE:
- cpufreq_stats_free_sysfs(cpu);
- break;
- case CPU_DEAD:
- cpufreq_stats_free_table(cpu);
- break;
- }
- return NOTIFY_OK;
-}
-
-/* priority=1 so this will get called before cpufreq_remove_dev */
-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
- .notifier_call = cpufreq_stat_cpu_callback,
- .priority = 1,
-};
-
static struct notifier_block notifier_policy_block = {
.notifier_call = cpufreq_stat_notifier_policy
};
@@ -376,14 +366,14 @@ static int __init cpufreq_stats_init(void)
if (ret)
return ret;
- register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
+ for_each_online_cpu(cpu)
+ cpufreq_stats_create_table(cpu);
ret = cpufreq_register_notifier(&notifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER);
if (ret) {
cpufreq_unregister_notifier(&notifier_policy_block,
CPUFREQ_POLICY_NOTIFIER);
- unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
for_each_online_cpu(cpu)
cpufreq_stats_free_table(cpu);
return ret;
@@ -399,11 +389,8 @@ static void __exit cpufreq_stats_exit(void)
CPUFREQ_POLICY_NOTIFIER);
cpufreq_unregister_notifier(&notifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER);
- unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
- for_each_online_cpu(cpu) {
+ for_each_online_cpu(cpu)
cpufreq_stats_free_table(cpu);
- cpufreq_stats_free_sysfs(cpu);
- }
}
MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 5e8a854381b7..2cf33848d86e 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -58,14 +58,6 @@ static int davinci_verify_speed(struct cpufreq_policy *policy)
return 0;
}
-static unsigned int davinci_getspeed(unsigned int cpu)
-{
- if (cpu)
- return 0;
-
- return clk_get_rate(cpufreq.armclk) / 1000;
-}
-
static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
{
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
@@ -73,7 +65,7 @@ static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
unsigned int old_freq, new_freq;
int ret = 0;
- old_freq = davinci_getspeed(0);
+ old_freq = policy->cur;
new_freq = pdata->freq_table[idx].frequency;
/* if moving to higher frequency, up the voltage beforehand */
@@ -116,6 +108,8 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
return result;
}
+ policy->clk = cpufreq.armclk;
+
/*
* Time measurement across the target() function yields ~1500-1800us
* time taken with no drivers on notification list.
@@ -126,10 +120,10 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver davinci_driver = {
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = davinci_verify_speed,
.target_index = davinci_target,
- .get = davinci_getspeed,
+ .get = cpufreq_generic_get,
.init = davinci_cpu_init,
.exit = cpufreq_generic_exit,
.name = "davinci",
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 0e67ab96321a..412a78bb0c94 100644
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -26,32 +26,18 @@ static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
return clk_set_rate(armss_clk, freq_table[index].frequency * 1000);
}
-static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
-{
- int i = 0;
- unsigned long freq = clk_get_rate(armss_clk) / 1000;
-
- /* The value is rounded to closest frequency in the defined table. */
- while (freq_table[i + 1].frequency != CPUFREQ_TABLE_END) {
- if (freq < freq_table[i].frequency +
- (freq_table[i + 1].frequency - freq_table[i].frequency) / 2)
- return freq_table[i].frequency;
- i++;
- }
-
- return freq_table[i].frequency;
-}
-
static int dbx500_cpufreq_init(struct cpufreq_policy *policy)
{
+ policy->clk = armss_clk;
return cpufreq_generic_init(policy, freq_table, 20 * 1000);
}
static struct cpufreq_driver dbx500_cpufreq_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
+ .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = dbx500_cpufreq_target,
- .get = dbx500_cpufreq_getspeed,
+ .get = cpufreq_generic_get,
.init = dbx500_cpufreq_init,
.name = "DBX500",
.attr = cpufreq_generic_attr,
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index f3c22874da75..fcd2914d081a 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -17,6 +17,7 @@
#include <linux/regulator/consumer.h>
#include <linux/cpufreq.h>
#include <linux/suspend.h>
+#include <linux/platform_device.h>
#include <plat/cpu.h>
@@ -30,11 +31,6 @@ static unsigned int locking_frequency;
static bool frequency_locked;
static DEFINE_MUTEX(cpufreq_lock);
-static unsigned int exynos_getspeed(unsigned int cpu)
-{
- return clk_get_rate(exynos_info->cpu_clk) / 1000;
-}
-
static int exynos_cpufreq_get_index(unsigned int freq)
{
struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
@@ -214,25 +210,29 @@ static struct notifier_block exynos_cpufreq_nb = {
static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
+ policy->clk = exynos_info->cpu_clk;
return cpufreq_generic_init(policy, exynos_info->freq_table, 100000);
}
static struct cpufreq_driver exynos_driver = {
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = exynos_target,
- .get = exynos_getspeed,
+ .get = cpufreq_generic_get,
.init = exynos_cpufreq_cpu_init,
.exit = cpufreq_generic_exit,
.name = "exynos_cpufreq",
.attr = cpufreq_generic_attr,
+#ifdef CONFIG_ARM_EXYNOS_CPU_FREQ_BOOST_SW
+ .boost_supported = true,
+#endif
#ifdef CONFIG_PM
.suspend = exynos_cpufreq_suspend,
.resume = exynos_cpufreq_resume,
#endif
};
-static int __init exynos_cpufreq_init(void)
+static int exynos_cpufreq_probe(struct platform_device *pdev)
{
int ret = -EINVAL;
@@ -263,7 +263,7 @@ static int __init exynos_cpufreq_init(void)
goto err_vdd_arm;
}
- locking_frequency = exynos_getspeed(0);
+ locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000;
register_pm_notifier(&exynos_cpufreq_nb);
@@ -281,4 +281,12 @@ err_vdd_arm:
kfree(exynos_info);
return -EINVAL;
}
-late_initcall(exynos_cpufreq_init);
+
+static struct platform_driver exynos_cpufreq_platdrv = {
+ .driver = {
+ .name = "exynos-cpufreq",
+ .owner = THIS_MODULE,
+ },
+ .probe = exynos_cpufreq_probe,
+};
+module_platform_driver(exynos_cpufreq_platdrv);
diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h
index 7f25cee8cec2..3ddade8a5125 100644
--- a/drivers/cpufreq/exynos-cpufreq.h
+++ b/drivers/cpufreq/exynos-cpufreq.h
@@ -67,3 +67,25 @@ static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
return -EOPNOTSUPP;
}
#endif
+
+#include <plat/cpu.h>
+#include <mach/map.h>
+
+#define EXYNOS4_CLKSRC_CPU (S5P_VA_CMU + 0x14200)
+#define EXYNOS4_CLKMUX_STATCPU (S5P_VA_CMU + 0x14400)
+
+#define EXYNOS4_CLKDIV_CPU (S5P_VA_CMU + 0x14500)
+#define EXYNOS4_CLKDIV_CPU1 (S5P_VA_CMU + 0x14504)
+#define EXYNOS4_CLKDIV_STATCPU (S5P_VA_CMU + 0x14600)
+#define EXYNOS4_CLKDIV_STATCPU1 (S5P_VA_CMU + 0x14604)
+
+#define EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT (16)
+#define EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK (0x7 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)
+
+#define EXYNOS5_APLL_LOCK (S5P_VA_CMU + 0x00000)
+#define EXYNOS5_APLL_CON0 (S5P_VA_CMU + 0x00100)
+#define EXYNOS5_CLKMUX_STATCPU (S5P_VA_CMU + 0x00400)
+#define EXYNOS5_CLKDIV_CPU0 (S5P_VA_CMU + 0x00500)
+#define EXYNOS5_CLKDIV_CPU1 (S5P_VA_CMU + 0x00504)
+#define EXYNOS5_CLKDIV_STATCPU0 (S5P_VA_CMU + 0x00600)
+#define EXYNOS5_CLKDIV_STATCPU1 (S5P_VA_CMU + 0x00604)
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index dfd1643b0b2f..40d84c43d8f4 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -17,8 +17,6 @@
#include <linux/slab.h>
#include <linux/cpufreq.h>
-#include <mach/regs-clock.h>
-
#include "exynos-cpufreq.h"
static struct clk *cpu_clk;
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index efad5e657f6f..7c11ace3b3fc 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -17,8 +17,6 @@
#include <linux/slab.h>
#include <linux/cpufreq.h>
-#include <mach/regs-clock.h>
-
#include "exynos-cpufreq.h"
static struct clk *cpu_clk;
@@ -32,7 +30,7 @@ static unsigned int exynos4x12_volt_table[] = {
};
static struct cpufreq_frequency_table exynos4x12_freq_table[] = {
- {L0, CPUFREQ_ENTRY_INVALID},
+ {CPUFREQ_BOOST_FREQ, 1500 * 1000},
{L1, 1400 * 1000},
{L2, 1300 * 1000},
{L3, 1200 * 1000},
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index 8feda86fe42c..5f90b82a4082 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -18,7 +18,6 @@
#include <linux/cpufreq.h>
#include <mach/map.h>
-#include <mach/regs-clock.h>
#include "exynos-cpufreq.h"
@@ -102,12 +101,12 @@ static void set_clkdiv(unsigned int div_index)
cpu_relax();
}
-static void set_apll(unsigned int new_index,
- unsigned int old_index)
+static void set_apll(unsigned int index)
{
- unsigned int tmp, pdiv;
+ unsigned int tmp;
+ unsigned int freq = apll_freq_5250[index].freq;
- /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+ /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
clk_set_parent(moutcore, mout_mpll);
do {
@@ -116,24 +115,9 @@ static void set_apll(unsigned int new_index,
tmp &= 0x7;
} while (tmp != 0x2);
- /* 2. Set APLL Lock time */
- pdiv = ((apll_freq_5250[new_index].mps >> 8) & 0x3f);
-
- __raw_writel((pdiv * 250), EXYNOS5_APLL_LOCK);
+ clk_set_rate(mout_apll, freq * 1000);
- /* 3. Change PLL PMS values */
- tmp = __raw_readl(EXYNOS5_APLL_CON0);
- tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= apll_freq_5250[new_index].mps;
- __raw_writel(tmp, EXYNOS5_APLL_CON0);
-
- /* 4. wait_lock_time */
- do {
- cpu_relax();
- tmp = __raw_readl(EXYNOS5_APLL_CON0);
- } while (!(tmp & (0x1 << 29)));
-
- /* 5. MUX_CORE_SEL = APLL */
+ /* MUX_CORE_SEL = APLL */
clk_set_parent(moutcore, mout_apll);
do {
@@ -141,55 +125,17 @@ static void set_apll(unsigned int new_index,
tmp = __raw_readl(EXYNOS5_CLKMUX_STATCPU);
tmp &= (0x7 << 16);
} while (tmp != (0x1 << 16));
-
-}
-
-static bool exynos5250_pms_change(unsigned int old_index, unsigned int new_index)
-{
- unsigned int old_pm = apll_freq_5250[old_index].mps >> 8;
- unsigned int new_pm = apll_freq_5250[new_index].mps >> 8;
-
- return (old_pm == new_pm) ? 0 : 1;
}
static void exynos5250_set_frequency(unsigned int old_index,
unsigned int new_index)
{
- unsigned int tmp;
-
if (old_index > new_index) {
- if (!exynos5250_pms_change(old_index, new_index)) {
- /* 1. Change the system clock divider values */
- set_clkdiv(new_index);
- /* 2. Change just s value in apll m,p,s value */
- tmp = __raw_readl(EXYNOS5_APLL_CON0);
- tmp &= ~(0x7 << 0);
- tmp |= apll_freq_5250[new_index].mps & 0x7;
- __raw_writel(tmp, EXYNOS5_APLL_CON0);
-
- } else {
- /* Clock Configuration Procedure */
- /* 1. Change the system clock divider values */
- set_clkdiv(new_index);
- /* 2. Change the apll m,p,s value */
- set_apll(new_index, old_index);
- }
+ set_clkdiv(new_index);
+ set_apll(new_index);
} else if (old_index < new_index) {
- if (!exynos5250_pms_change(old_index, new_index)) {
- /* 1. Change just s value in apll m,p,s value */
- tmp = __raw_readl(EXYNOS5_APLL_CON0);
- tmp &= ~(0x7 << 0);
- tmp |= apll_freq_5250[new_index].mps & 0x7;
- __raw_writel(tmp, EXYNOS5_APLL_CON0);
- /* 2. Change the system clock divider values */
- set_clkdiv(new_index);
- } else {
- /* Clock Configuration Procedure */
- /* 1. Change the apll m,p,s value */
- set_apll(new_index, old_index);
- /* 2. Change the system clock divider values */
- set_clkdiv(new_index);
- }
+ set_apll(new_index);
+ set_clkdiv(new_index);
}
}
@@ -222,7 +168,6 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
info->volt_table = exynos5250_volt_table;
info->freq_table = exynos5250_freq_table;
info->set_freq = exynos5250_set_frequency;
- info->need_apll_change = exynos5250_pms_change;
return 0;
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index 76bef8b078cb..49b756015316 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -100,7 +100,6 @@ struct exynos_dvfs_data {
struct resource *mem;
int irq;
struct clk *cpu_clk;
- unsigned int cur_frequency;
unsigned int latency;
struct cpufreq_frequency_table *freq_table;
unsigned int freq_count;
@@ -165,7 +164,7 @@ static int init_div_table(void)
return 0;
}
-static void exynos_enable_dvfs(void)
+static void exynos_enable_dvfs(unsigned int cur_frequency)
{
unsigned int tmp, i, cpu;
struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
@@ -184,18 +183,18 @@ static void exynos_enable_dvfs(void)
/* Set initial performance index */
for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
- if (freq_table[i].frequency == dvfs_info->cur_frequency)
+ if (freq_table[i].frequency == cur_frequency)
break;
if (freq_table[i].frequency == CPUFREQ_TABLE_END) {
dev_crit(dvfs_info->dev, "Boot up frequency not supported\n");
/* Assign the highest frequency */
i = 0;
- dvfs_info->cur_frequency = freq_table[i].frequency;
+ cur_frequency = freq_table[i].frequency;
}
dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ",
- dvfs_info->cur_frequency);
+ cur_frequency);
for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
@@ -209,11 +208,6 @@ static void exynos_enable_dvfs(void)
dvfs_info->base + XMU_DVFS_CTRL);
}
-static unsigned int exynos_getspeed(unsigned int cpu)
-{
- return dvfs_info->cur_frequency;
-}
-
static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned int tmp;
@@ -222,7 +216,7 @@ static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
mutex_lock(&cpufreq_lock);
- freqs.old = dvfs_info->cur_frequency;
+ freqs.old = policy->cur;
freqs.new = freq_table[index].frequency;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
@@ -250,7 +244,7 @@ static void exynos_cpufreq_work(struct work_struct *work)
goto skip_work;
mutex_lock(&cpufreq_lock);
- freqs.old = dvfs_info->cur_frequency;
+ freqs.old = policy->cur;
cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS);
if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1)
@@ -260,10 +254,9 @@ static void exynos_cpufreq_work(struct work_struct *work)
if (likely(index < dvfs_info->freq_count)) {
freqs.new = freq_table[index].frequency;
- dvfs_info->cur_frequency = freqs.new;
} else {
dev_crit(dvfs_info->dev, "New frequency out of range\n");
- freqs.new = dvfs_info->cur_frequency;
+ freqs.new = freqs.old;
}
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
@@ -307,15 +300,17 @@ static void exynos_sort_descend_freq_table(void)
static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
+ policy->clk = dvfs_info->cpu_clk;
return cpufreq_generic_init(policy, dvfs_info->freq_table,
dvfs_info->latency);
}
static struct cpufreq_driver exynos_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION,
+ .flags = CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = exynos_target,
- .get = exynos_getspeed,
+ .get = cpufreq_generic_get,
.init = exynos_cpufreq_cpu_init,
.exit = cpufreq_generic_exit,
.name = CPUFREQ_NAME,
@@ -335,6 +330,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
int ret = -EINVAL;
struct device_node *np;
struct resource res;
+ unsigned int cur_frequency;
np = pdev->dev.of_node;
if (!np)
@@ -391,13 +387,13 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
goto err_free_table;
}
- dvfs_info->cur_frequency = clk_get_rate(dvfs_info->cpu_clk);
- if (!dvfs_info->cur_frequency) {
+ cur_frequency = clk_get_rate(dvfs_info->cpu_clk);
+ if (!cur_frequency) {
dev_err(dvfs_info->dev, "Failed to get clock rate\n");
ret = -EINVAL;
goto err_free_table;
}
- dvfs_info->cur_frequency /= 1000;
+ cur_frequency /= 1000;
INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work);
ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq,
@@ -414,7 +410,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
goto err_free_table;
}
- exynos_enable_dvfs();
+ exynos_enable_dvfs(cur_frequency);
ret = cpufreq_register_driver(&exynos_driver);
if (ret) {
dev_err(dvfs_info->dev,
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 3458d27f63b4..8e54f97899ba 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -32,6 +32,10 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
continue;
}
+ if (!cpufreq_boost_enabled()
+ && table[i].driver_data == CPUFREQ_BOOST_FREQ)
+ continue;
+
pr_debug("table entry %u: %u kHz, %u driver_data\n",
i, freq, table[i].driver_data);
if (freq < min_freq)
@@ -178,11 +182,34 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
+int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
+ unsigned int freq)
+{
+ struct cpufreq_frequency_table *table;
+ int i;
+
+ table = cpufreq_frequency_get_table(policy->cpu);
+ if (unlikely(!table)) {
+ pr_debug("%s: Unable to find frequency table\n", __func__);
+ return -ENOENT;
+ }
+
+ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ if (table[i].frequency == freq)
+ return i;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index);
+
static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
+
/**
* show_available_freqs - show available frequencies for the specified CPU
*/
-static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf)
+static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
+ bool show_boost)
{
unsigned int i = 0;
unsigned int cpu = policy->cpu;
@@ -197,6 +224,20 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf)
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
continue;
+ /*
+ * show_boost = true and driver_data = BOOST freq
+ * display BOOST freqs
+ *
+ * show_boost = false and driver_data = BOOST freq
+ * show_boost = true and driver_data != BOOST freq
+ * continue - do not display anything
+ *
+ * show_boost = false and driver_data != BOOST freq
+ * display NON BOOST freqs
+ */
+ if (show_boost ^ (table[i].driver_data == CPUFREQ_BOOST_FREQ))
+ continue;
+
count += sprintf(&buf[count], "%d ", table[i].frequency);
}
count += sprintf(&buf[count], "\n");
@@ -205,16 +246,39 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf)
}
-struct freq_attr cpufreq_freq_attr_scaling_available_freqs = {
- .attr = { .name = "scaling_available_frequencies",
- .mode = 0444,
- },
- .show = show_available_freqs,
-};
+#define cpufreq_attr_available_freq(_name) \
+struct freq_attr cpufreq_freq_attr_##_name##_freqs = \
+__ATTR_RO(_name##_frequencies)
+
+/**
+ * show_scaling_available_frequencies - show available normal frequencies for
+ * the specified CPU
+ */
+static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
+ char *buf)
+{
+ return show_available_freqs(policy, buf, false);
+}
+cpufreq_attr_available_freq(scaling_available);
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
+/**
+ * show_available_boost_freqs - show available boost frequencies for
+ * the specified CPU
+ */
+static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
+ char *buf)
+{
+ return show_available_freqs(policy, buf, true);
+}
+cpufreq_attr_available_freq(scaling_boost);
+EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
+
struct freq_attr *cpufreq_generic_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
+#ifdef CONFIG_CPU_FREQ_BOOST_SW
+ &cpufreq_freq_attr_scaling_boost_freqs,
+#endif
NULL,
};
EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 4b3f18e5f36b..ce69059be1fc 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -35,10 +35,8 @@ static struct device *cpu_dev;
static struct cpufreq_frequency_table *freq_table;
static unsigned int transition_latency;
-static unsigned int imx6q_get_speed(unsigned int cpu)
-{
- return clk_get_rate(arm_clk) / 1000;
-}
+static u32 *imx6_soc_volt;
+static u32 soc_opp_count;
static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
{
@@ -69,23 +67,22 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
/* scaling up? scale voltage before frequency */
if (new_freq > old_freq) {
+ ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
+ if (ret) {
+ dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret);
+ return ret;
+ }
+ ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
+ if (ret) {
+ dev_err(cpu_dev, "failed to scale vddsoc up: %d\n", ret);
+ return ret;
+ }
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
if (ret) {
dev_err(cpu_dev,
"failed to scale vddarm up: %d\n", ret);
return ret;
}
-
- /*
- * Need to increase vddpu and vddsoc for safety
- * if we are about to run at 1.2 GHz.
- */
- if (new_freq == FREQ_1P2_GHZ / 1000) {
- regulator_set_voltage_tol(pu_reg,
- PU_SOC_VOLTAGE_HIGH, 0);
- regulator_set_voltage_tol(soc_reg,
- PU_SOC_VOLTAGE_HIGH, 0);
- }
}
/*
@@ -120,12 +117,15 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
"failed to scale vddarm down: %d\n", ret);
ret = 0;
}
-
- if (old_freq == FREQ_1P2_GHZ / 1000) {
- regulator_set_voltage_tol(pu_reg,
- PU_SOC_VOLTAGE_NORMAL, 0);
- regulator_set_voltage_tol(soc_reg,
- PU_SOC_VOLTAGE_NORMAL, 0);
+ ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret);
+ ret = 0;
+ }
+ ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret);
+ ret = 0;
}
}
@@ -134,13 +134,15 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
+ policy->clk = arm_clk;
return cpufreq_generic_init(policy, freq_table, transition_latency);
}
static struct cpufreq_driver imx6q_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = imx6q_set_target,
- .get = imx6q_get_speed,
+ .get = cpufreq_generic_get,
.init = imx6q_cpufreq_init,
.exit = cpufreq_generic_exit,
.name = "imx6q-cpufreq",
@@ -153,6 +155,9 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
struct dev_pm_opp *opp;
unsigned long min_volt, max_volt;
int num, ret;
+ const struct property *prop;
+ const __be32 *val;
+ u32 nr, i, j;
cpu_dev = get_cpu_device(0);
if (!cpu_dev) {
@@ -187,12 +192,25 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_node;
}
- /* We expect an OPP table supplied by platform */
+ /*
+ * We expect an OPP table supplied by platform.
+ * Just, incase the platform did not supply the OPP
+ * table, it will try to get it.
+ */
num = dev_pm_opp_get_opp_count(cpu_dev);
if (num < 0) {
- ret = num;
- dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
- goto put_node;
+ ret = of_init_opp_table(cpu_dev);
+ if (ret < 0) {
+ dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
+ goto put_node;
+ }
+
+ num = dev_pm_opp_get_opp_count(cpu_dev);
+ if (num < 0) {
+ ret = num;
+ dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
+ goto put_node;
+ }
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
@@ -201,10 +219,62 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_node;
}
+ /* Make imx6_soc_volt array's size same as arm opp number */
+ imx6_soc_volt = devm_kzalloc(cpu_dev, sizeof(*imx6_soc_volt) * num, GFP_KERNEL);
+ if (imx6_soc_volt == NULL) {
+ ret = -ENOMEM;
+ goto free_freq_table;
+ }
+
+ prop = of_find_property(np, "fsl,soc-operating-points", NULL);
+ if (!prop || !prop->value)
+ goto soc_opp_out;
+
+ /*
+ * Each OPP is a set of tuples consisting of frequency and
+ * voltage like <freq-kHz vol-uV>.
+ */
+ nr = prop->length / sizeof(u32);
+ if (nr % 2 || (nr / 2) < num)
+ goto soc_opp_out;
+
+ for (j = 0; j < num; j++) {
+ val = prop->value;
+ for (i = 0; i < nr / 2; i++) {
+ unsigned long freq = be32_to_cpup(val++);
+ unsigned long volt = be32_to_cpup(val++);
+ if (freq_table[j].frequency == freq) {
+ imx6_soc_volt[soc_opp_count++] = volt;
+ break;
+ }
+ }
+ }
+
+soc_opp_out:
+ /* use fixed soc opp volt if no valid soc opp info found in dtb */
+ if (soc_opp_count != num) {
+ dev_warn(cpu_dev, "can NOT find valid fsl,soc-operating-points property in dtb, use default value!\n");
+ for (j = 0; j < num; j++)
+ imx6_soc_volt[j] = PU_SOC_VOLTAGE_NORMAL;
+ if (freq_table[num - 1].frequency * 1000 == FREQ_1P2_GHZ)
+ imx6_soc_volt[num - 1] = PU_SOC_VOLTAGE_HIGH;
+ }
+
if (of_property_read_u32(np, "clock-latency", &transition_latency))
transition_latency = CPUFREQ_ETERNAL;
/*
+ * Calculate the ramp time for max voltage change in the
+ * VDDSOC and VDDPU regulators.
+ */
+ ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+ ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+
+ /*
* OPP is maintained in order of increasing frequency, and
* freq_table initialised from OPP is therefore sorted in the
* same order.
@@ -221,18 +291,6 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
if (ret > 0)
transition_latency += ret * 1000;
- /* Count vddpu and vddsoc latency in for 1.2 GHz support */
- if (freq_table[num].frequency == FREQ_1P2_GHZ / 1000) {
- ret = regulator_set_voltage_time(pu_reg, PU_SOC_VOLTAGE_NORMAL,
- PU_SOC_VOLTAGE_HIGH);
- if (ret > 0)
- transition_latency += ret * 1000;
- ret = regulator_set_voltage_time(soc_reg, PU_SOC_VOLTAGE_NORMAL,
- PU_SOC_VOLTAGE_HIGH);
- if (ret > 0)
- transition_latency += ret * 1000;
- }
-
ret = cpufreq_register_driver(&imx6q_cpufreq_driver);
if (ret) {
dev_err(cpu_dev, "failed register driver: %d\n", ret);
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
index 7d8ab000d317..0e27844e8c2d 100644
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -190,6 +190,7 @@ static int integrator_cpufreq_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver integrator_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = integrator_verify_policy,
.target = integrator_set_target,
.get = integrator_get,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 5f1cbae36961..c788abf1c457 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -35,6 +35,7 @@
#define SAMPLE_COUNT 3
#define BYT_RATIOS 0x66a
+#define BYT_VIDS 0x66b
#define FRAC_BITS 8
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
@@ -54,6 +55,7 @@ struct sample {
int32_t core_pct_busy;
u64 aperf;
u64 mperf;
+ unsigned long long tsc;
int freq;
};
@@ -64,6 +66,12 @@ struct pstate_data {
int turbo_pstate;
};
+struct vid_data {
+ int32_t min;
+ int32_t max;
+ int32_t ratio;
+};
+
struct _pid {
int setpoint;
int32_t integral;
@@ -82,12 +90,12 @@ struct cpudata {
struct timer_list timer;
struct pstate_data pstate;
+ struct vid_data vid;
struct _pid pid;
- int min_pstate_count;
-
u64 prev_aperf;
u64 prev_mperf;
+ unsigned long long prev_tsc;
int sample_ptr;
struct sample samples[SAMPLE_COUNT];
};
@@ -106,7 +114,8 @@ struct pstate_funcs {
int (*get_max)(void);
int (*get_min)(void);
int (*get_turbo)(void);
- void (*set)(int pstate);
+ void (*set)(struct cpudata*, int pstate);
+ void (*get_vid)(struct cpudata *);
};
struct cpu_defaults {
@@ -358,6 +367,42 @@ static int byt_get_max_pstate(void)
return (value >> 16) & 0xFF;
}
+static void byt_set_pstate(struct cpudata *cpudata, int pstate)
+{
+ u64 val;
+ int32_t vid_fp;
+ u32 vid;
+
+ val = pstate << 8;
+ if (limits.no_turbo)
+ val |= (u64)1 << 32;
+
+ vid_fp = cpudata->vid.min + mul_fp(
+ int_tofp(pstate - cpudata->pstate.min_pstate),
+ cpudata->vid.ratio);
+
+ vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
+ vid = fp_toint(vid_fp);
+
+ val |= vid;
+
+ wrmsrl(MSR_IA32_PERF_CTL, val);
+}
+
+static void byt_get_vid(struct cpudata *cpudata)
+{
+ u64 value;
+
+ rdmsrl(BYT_VIDS, value);
+ cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
+ cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
+ cpudata->vid.ratio = div_fp(
+ cpudata->vid.max - cpudata->vid.min,
+ int_tofp(cpudata->pstate.max_pstate -
+ cpudata->pstate.min_pstate));
+}
+
+
static int core_get_min_pstate(void)
{
u64 value;
@@ -384,7 +429,7 @@ static int core_get_turbo_pstate(void)
return ret;
}
-static void core_set_pstate(int pstate)
+static void core_set_pstate(struct cpudata *cpudata, int pstate)
{
u64 val;
@@ -425,7 +470,8 @@ static struct cpu_defaults byt_params = {
.get_max = byt_get_max_pstate,
.get_min = byt_get_min_pstate,
.get_turbo = byt_get_max_pstate,
- .set = core_set_pstate,
+ .set = byt_set_pstate,
+ .get_vid = byt_get_vid,
},
};
@@ -462,7 +508,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
cpu->pstate.current_pstate = pstate;
- pstate_funcs.set(pstate);
+ pstate_funcs.set(cpu, pstate);
}
static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
@@ -488,6 +534,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
cpu->pstate.max_pstate = pstate_funcs.get_max();
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
+ if (pstate_funcs.get_vid)
+ pstate_funcs.get_vid(cpu);
+
/*
* goto max pstate so we don't slow up boot if we are built-in if we are
* a module we will take care of it during normal operation
@@ -499,29 +548,41 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
struct sample *sample)
{
u64 core_pct;
- core_pct = div64_u64(int_tofp(sample->aperf * 100),
- sample->mperf);
- sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
+ u64 c0_pct;
+
+ core_pct = div64_u64(sample->aperf * 100, sample->mperf);
- sample->core_pct_busy = core_pct;
+ c0_pct = div64_u64(sample->mperf * 100, sample->tsc);
+ sample->freq = fp_toint(
+ mul_fp(int_tofp(cpu->pstate.max_pstate),
+ int_tofp(core_pct * 1000)));
+
+ sample->core_pct_busy = mul_fp(int_tofp(core_pct),
+ div_fp(int_tofp(c0_pct + 1), int_tofp(100)));
}
static inline void intel_pstate_sample(struct cpudata *cpu)
{
u64 aperf, mperf;
+ unsigned long long tsc;
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
+ tsc = native_read_tsc();
+
cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
cpu->samples[cpu->sample_ptr].aperf = aperf;
cpu->samples[cpu->sample_ptr].mperf = mperf;
+ cpu->samples[cpu->sample_ptr].tsc = tsc;
cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
+ cpu->samples[cpu->sample_ptr].tsc -= cpu->prev_tsc;
intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
cpu->prev_aperf = aperf;
cpu->prev_mperf = mperf;
+ cpu->prev_tsc = tsc;
}
static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
@@ -556,6 +617,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
ctl = pid_calc(pid, busy_scaled);
steps = abs(ctl);
+
if (ctl < 0)
intel_pstate_pstate_increase(cpu, steps);
else
@@ -565,23 +627,27 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
static void intel_pstate_timer_func(unsigned long __data)
{
struct cpudata *cpu = (struct cpudata *) __data;
+ struct sample *sample;
intel_pstate_sample(cpu);
+
+ sample = &cpu->samples[cpu->sample_ptr];
+
intel_pstate_adjust_busy_pstate(cpu);
- if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) {
- cpu->min_pstate_count++;
- if (!(cpu->min_pstate_count % 5)) {
- intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
- }
- } else
- cpu->min_pstate_count = 0;
+ trace_pstate_sample(fp_toint(sample->core_pct_busy),
+ fp_toint(intel_pstate_get_scaled_busy(cpu)),
+ cpu->pstate.current_pstate,
+ sample->mperf,
+ sample->aperf,
+ sample->freq);
intel_pstate_set_sample_time(cpu);
}
#define ICPU(model, policy) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy }
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
+ (unsigned long)&policy }
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(0x2a, core_params),
@@ -614,6 +680,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu = all_cpu_data[cpunum];
intel_pstate_get_cpu_pstates(cpu);
+ if (!cpu->pstate.current_pstate) {
+ all_cpu_data[cpunum] = NULL;
+ kfree(cpu);
+ return -ENODATA;
+ }
cpu->cpu = cpunum;
@@ -776,6 +847,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
pstate_funcs.get_min = funcs->get_min;
pstate_funcs.get_turbo = funcs->get_turbo;
pstate_funcs.set = funcs->set;
+ pstate_funcs.get_vid = funcs->get_vid;
}
#if IS_ENABLED(CONFIG_ACPI)
@@ -884,6 +956,7 @@ static int __init intel_pstate_init(void)
intel_pstate_debug_expose_params();
intel_pstate_sysfs_expose_params();
+
return rc;
out:
get_online_cpus();
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index 0767a4e29dfe..eb7abe345b50 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -97,6 +97,7 @@ static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver kirkwood_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.get = kirkwood_cpufreq_get_cpu_frequency,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = kirkwood_cpufreq_target,
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index a43609218105..b6581abc9207 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -24,8 +24,6 @@
static uint nowait;
-static struct clk *cpuclk;
-
static void (*saved_cpu_wait) (void);
static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
@@ -44,11 +42,6 @@ static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
return 0;
}
-static unsigned int loongson2_cpufreq_get(unsigned int cpu)
-{
- return clk_get_rate(cpuclk);
-}
-
/*
* Here we notify other drivers of the proposed change and the final change.
*/
@@ -69,13 +62,14 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
set_cpus_allowed_ptr(current, &cpus_allowed);
/* setting the cpu frequency */
- clk_set_rate(cpuclk, freq);
+ clk_set_rate(policy->clk, freq);
return 0;
}
static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
+ static struct clk *cpuclk;
int i;
unsigned long rate;
int ret;
@@ -104,13 +98,14 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
return ret;
}
+ policy->clk = cpuclk;
return cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0);
}
static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
- clk_put(cpuclk);
+ clk_put(policy->clk);
return 0;
}
@@ -119,7 +114,7 @@ static struct cpufreq_driver loongson2_cpufreq_driver = {
.init = loongson2_cpufreq_cpu_init,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = loongson2_cpufreq_target,
- .get = loongson2_cpufreq_get,
+ .get = cpufreq_generic_get,
.exit = loongson2_cpufreq_exit,
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index a0acd0bfba40..590f5b66d181 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -36,21 +36,9 @@
static struct cpufreq_frequency_table *freq_table;
static atomic_t freq_table_users = ATOMIC_INIT(0);
-static struct clk *mpu_clk;
static struct device *mpu_dev;
static struct regulator *mpu_reg;
-static unsigned int omap_getspeed(unsigned int cpu)
-{
- unsigned long rate;
-
- if (cpu >= NR_CPUS)
- return 0;
-
- rate = clk_get_rate(mpu_clk) / 1000;
- return rate;
-}
-
static int omap_target(struct cpufreq_policy *policy, unsigned int index)
{
int r, ret;
@@ -58,11 +46,11 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int index)
unsigned long freq, volt = 0, volt_old = 0, tol = 0;
unsigned int old_freq, new_freq;
- old_freq = omap_getspeed(policy->cpu);
+ old_freq = policy->cur;
new_freq = freq_table[index].frequency;
freq = new_freq * 1000;
- ret = clk_round_rate(mpu_clk, freq);
+ ret = clk_round_rate(policy->clk, freq);
if (IS_ERR_VALUE(ret)) {
dev_warn(mpu_dev,
"CPUfreq: Cannot find matching frequency for %lu\n",
@@ -100,7 +88,7 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int index)
}
}
- ret = clk_set_rate(mpu_clk, new_freq * 1000);
+ ret = clk_set_rate(policy->clk, new_freq * 1000);
/* scaling down? scale voltage after frequency */
if (mpu_reg && (new_freq < old_freq)) {
@@ -108,7 +96,7 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int index)
if (r < 0) {
dev_warn(mpu_dev, "%s: unable to scale voltage down.\n",
__func__);
- clk_set_rate(mpu_clk, old_freq * 1000);
+ clk_set_rate(policy->clk, old_freq * 1000);
return r;
}
}
@@ -126,9 +114,9 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
{
int result;
- mpu_clk = clk_get(NULL, "cpufreq_ck");
- if (IS_ERR(mpu_clk))
- return PTR_ERR(mpu_clk);
+ policy->clk = clk_get(NULL, "cpufreq_ck");
+ if (IS_ERR(policy->clk))
+ return PTR_ERR(policy->clk);
if (!freq_table) {
result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table);
@@ -149,7 +137,7 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
freq_table_free();
fail:
- clk_put(mpu_clk);
+ clk_put(policy->clk);
return result;
}
@@ -157,15 +145,15 @@ static int omap_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
freq_table_free();
- clk_put(mpu_clk);
+ clk_put(policy->clk);
return 0;
}
static struct cpufreq_driver omap_driver = {
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = omap_target,
- .get = omap_getspeed,
+ .get = cpufreq_generic_get,
.init = omap_cpu_init,
.exit = omap_cpu_exit,
.name = "omap",
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index e2b4f40ff69a..1c0f1067af73 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -213,6 +213,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
cpu, target_freq,
(pcch_virt_addr + pcc_cpu_data->input_offset));
+ freqs.old = policy->cur;
freqs.new = target_freq;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
@@ -228,25 +229,20 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
status = ioread16(&pcch_hdr->status);
+ iowrite16(0, &pcch_hdr->status);
+
+ cpufreq_notify_post_transition(policy, &freqs, status != CMD_COMPLETE);
+ spin_unlock(&pcc_lock);
+
if (status != CMD_COMPLETE) {
pr_debug("target: FAILED for cpu %d, with status: 0x%x\n",
cpu, status);
- goto cmd_incomplete;
+ return -EINVAL;
}
- iowrite16(0, &pcch_hdr->status);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu);
- spin_unlock(&pcc_lock);
return 0;
-
-cmd_incomplete:
- freqs.new = freqs.old;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- iowrite16(0, &pcch_hdr->status);
- spin_unlock(&pcc_lock);
- return -EINVAL;
}
static int pcc_get_offset(int cpu)
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index 643e7952cad3..b9a444e358b5 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -26,41 +26,108 @@
static unsigned int busfreq; /* FSB, in 10 kHz */
static unsigned int max_multiplier;
+static unsigned int param_busfreq = 0;
+static unsigned int param_max_multiplier = 0;
+
+module_param_named(max_multiplier, param_max_multiplier, uint, S_IRUGO);
+MODULE_PARM_DESC(max_multiplier, "Maximum multiplier (allowed values: 20 30 35 40 45 50 55 60)");
+
+module_param_named(bus_frequency, param_busfreq, uint, S_IRUGO);
+MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz");
/* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
static struct cpufreq_frequency_table clock_ratio[] = {
- {45, /* 000 -> 4.5x */ 0},
+ {60, /* 110 -> 6.0x */ 0},
+ {55, /* 011 -> 5.5x */ 0},
{50, /* 001 -> 5.0x */ 0},
+ {45, /* 000 -> 4.5x */ 0},
{40, /* 010 -> 4.0x */ 0},
- {55, /* 011 -> 5.5x */ 0},
- {20, /* 100 -> 2.0x */ 0},
- {30, /* 101 -> 3.0x */ 0},
- {60, /* 110 -> 6.0x */ 0},
{35, /* 111 -> 3.5x */ 0},
+ {30, /* 101 -> 3.0x */ 0},
+ {20, /* 100 -> 2.0x */ 0},
{0, CPUFREQ_TABLE_END}
};
+static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 };
+static const u8 register_to_index[8] = { 3, 2, 4, 1, 7, 6, 0, 5 };
+
+static const struct {
+ unsigned freq;
+ unsigned mult;
+} usual_frequency_table[] = {
+ { 400000, 40 }, // 100 * 4
+ { 450000, 45 }, // 100 * 4.5
+ { 475000, 50 }, // 95 * 5
+ { 500000, 50 }, // 100 * 5
+ { 506250, 45 }, // 112.5 * 4.5
+ { 533500, 55 }, // 97 * 5.5
+ { 550000, 55 }, // 100 * 5.5
+ { 562500, 50 }, // 112.5 * 5
+ { 570000, 60 }, // 95 * 6
+ { 600000, 60 }, // 100 * 6
+ { 618750, 55 }, // 112.5 * 5.5
+ { 660000, 55 }, // 120 * 5.5
+ { 675000, 60 }, // 112.5 * 6
+ { 720000, 60 }, // 120 * 6
+};
+
+#define FREQ_RANGE 3000
/**
* powernow_k6_get_cpu_multiplier - returns the current FSB multiplier
*
- * Returns the current setting of the frequency multiplier. Core clock
+ * Returns the current setting of the frequency multiplier. Core clock
* speed is frequency of the Front-Side Bus multiplied with this value.
*/
static int powernow_k6_get_cpu_multiplier(void)
{
- u64 invalue = 0;
+ unsigned long invalue = 0;
u32 msrval;
+ local_irq_disable();
+
msrval = POWERNOW_IOPORT + 0x1;
wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
invalue = inl(POWERNOW_IOPORT + 0x8);
msrval = POWERNOW_IOPORT + 0x0;
wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
- return clock_ratio[(invalue >> 5)&7].driver_data;
+ local_irq_enable();
+
+ return clock_ratio[register_to_index[(invalue >> 5)&7]].driver_data;
}
+static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
+{
+ unsigned long outvalue, invalue;
+ unsigned long msrval;
+ unsigned long cr0;
+
+ /* we now need to transform best_i to the BVC format, see AMD#23446 */
+
+ /*
+ * The processor doesn't respond to inquiry cycles while changing the
+ * frequency, so we must disable cache.
+ */
+ local_irq_disable();
+ cr0 = read_cr0();
+ write_cr0(cr0 | X86_CR0_CD);
+ wbinvd();
+
+ outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5);
+
+ msrval = POWERNOW_IOPORT + 0x1;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
+ invalue = inl(POWERNOW_IOPORT + 0x8);
+ invalue = invalue & 0x1f;
+ outvalue = outvalue | invalue;
+ outl(outvalue, (POWERNOW_IOPORT + 0x8));
+ msrval = POWERNOW_IOPORT + 0x0;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
+
+ write_cr0(cr0);
+ local_irq_enable();
+}
/**
* powernow_k6_target - set the PowerNow! multiplier
@@ -71,8 +138,6 @@ static int powernow_k6_get_cpu_multiplier(void)
static int powernow_k6_target(struct cpufreq_policy *policy,
unsigned int best_i)
{
- unsigned long outvalue = 0, invalue = 0;
- unsigned long msrval;
struct cpufreq_freqs freqs;
if (clock_ratio[best_i].driver_data > max_multiplier) {
@@ -85,35 +150,63 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- /* we now need to transform best_i to the BVC format, see AMD#23446 */
-
- outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5);
-
- msrval = POWERNOW_IOPORT + 0x1;
- wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
- invalue = inl(POWERNOW_IOPORT + 0x8);
- invalue = invalue & 0xf;
- outvalue = outvalue | invalue;
- outl(outvalue , (POWERNOW_IOPORT + 0x8));
- msrval = POWERNOW_IOPORT + 0x0;
- wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
+ powernow_k6_set_cpu_multiplier(best_i);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return 0;
}
-
static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
{
unsigned int i, f;
+ unsigned khz;
if (policy->cpu != 0)
return -ENODEV;
- /* get frequencies */
- max_multiplier = powernow_k6_get_cpu_multiplier();
- busfreq = cpu_khz / max_multiplier;
+ max_multiplier = 0;
+ khz = cpu_khz;
+ for (i = 0; i < ARRAY_SIZE(usual_frequency_table); i++) {
+ if (khz >= usual_frequency_table[i].freq - FREQ_RANGE &&
+ khz <= usual_frequency_table[i].freq + FREQ_RANGE) {
+ khz = usual_frequency_table[i].freq;
+ max_multiplier = usual_frequency_table[i].mult;
+ break;
+ }
+ }
+ if (param_max_multiplier) {
+ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
+ if (clock_ratio[i].driver_data == param_max_multiplier) {
+ max_multiplier = param_max_multiplier;
+ goto have_max_multiplier;
+ }
+ }
+ printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
+ return -EINVAL;
+ }
+
+ if (!max_multiplier) {
+ printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz);
+ printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n");
+ return -EOPNOTSUPP;
+ }
+
+have_max_multiplier:
+ param_max_multiplier = max_multiplier;
+
+ if (param_busfreq) {
+ if (param_busfreq >= 50000 && param_busfreq <= 150000) {
+ busfreq = param_busfreq / 10;
+ goto have_busfreq;
+ }
+ printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
+ return -EINVAL;
+ }
+
+ busfreq = khz / max_multiplier;
+have_busfreq:
+ param_busfreq = busfreq * 10;
/* table init */
for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
@@ -125,7 +218,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
}
/* cpuinfo and default policy values */
- policy->cpuinfo.transition_latency = 200000;
+ policy->cpuinfo.transition_latency = 500000;
return cpufreq_table_validate_and_show(policy, clock_ratio);
}
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 0023c7d40a51..e10b646634d7 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -964,14 +964,9 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
cpufreq_cpu_put(policy);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
res = transition_fid_vid(data, fid, vid);
- if (res)
- freqs.new = freqs.old;
- else
- freqs.new = find_khz_freq_from_fid(data->currfid);
+ cpufreq_notify_post_transition(policy, &freqs, res);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return res;
}
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index 3f7be46d2b27..051000f44ca2 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -24,12 +24,10 @@
/**
* struct cpu_data - per CPU data struct
- * @clk: the clk of CPU
* @parent: the parent node of cpu clock
* @table: frequency table
*/
struct cpu_data {
- struct clk *clk;
struct device_node *parent;
struct cpufreq_frequency_table *table;
};
@@ -81,13 +79,6 @@ static inline const struct cpumask *cpu_core_mask(int cpu)
}
#endif
-static unsigned int corenet_cpufreq_get_speed(unsigned int cpu)
-{
- struct cpu_data *data = per_cpu(cpu_data, cpu);
-
- return clk_get_rate(data->clk) / 1000;
-}
-
/* reduce the duplicated frequencies in frequency table */
static void freq_table_redup(struct cpufreq_frequency_table *freq_table,
int count)
@@ -158,8 +149,8 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_np;
}
- data->clk = of_clk_get(np, 0);
- if (IS_ERR(data->clk)) {
+ policy->clk = of_clk_get(np, 0);
+ if (IS_ERR(policy->clk)) {
pr_err("%s: no clock information\n", __func__);
goto err_nomem2;
}
@@ -255,7 +246,7 @@ static int corenet_cpufreq_target(struct cpufreq_policy *policy,
struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
parent = of_clk_get(data->parent, data->table[index].driver_data);
- return clk_set_parent(data->clk, parent);
+ return clk_set_parent(policy->clk, parent);
}
static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
@@ -265,7 +256,7 @@ static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
.exit = __exit_p(corenet_cpufreq_cpu_exit),
.verify = cpufreq_generic_frequency_table_verify,
.target_index = corenet_cpufreq_target,
- .get = corenet_cpufreq_get_speed,
+ .get = cpufreq_generic_get,
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 0a0f4369636a..a9195a86b069 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -423,6 +423,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver pxa_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = pxa_set_target,
.init = pxa_cpufreq_init,
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
index 93840048dd11..3785687e9d70 100644
--- a/drivers/cpufreq/pxa3xx-cpufreq.c
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -201,6 +201,7 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver pxa3xx_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = pxa3xx_cpufreq_set,
.init = pxa3xx_cpufreq_init,
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 8d904a00027b..826b8be23099 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -481,7 +481,7 @@ err_hclk:
}
static struct cpufreq_driver s3c2416_cpufreq_driver = {
- .flags = 0,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = s3c2416_cpufreq_set_target,
.get = s3c2416_cpufreq_get_speed,
diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c
index 72b2cc8a5a85..f84ed10755b5 100644
--- a/drivers/cpufreq/s3c2440-cpufreq.c
+++ b/drivers/cpufreq/s3c2440-cpufreq.c
@@ -22,8 +22,6 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <mach/hardware.h>
-
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -55,7 +53,7 @@ static inline int within_khz(unsigned long a, unsigned long b)
* specified in @cfg. The values are stored in @cfg for later use
* by the relevant set routine if the request settings can be reached.
*/
-int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
+static int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
{
unsigned int hdiv, pdiv;
unsigned long hclk, fclk, armclk;
@@ -242,7 +240,7 @@ static int s3c2440_cpufreq_calctable(struct s3c_cpufreq_config *cfg,
return ret;
}
-struct s3c_cpufreq_info s3c2440_cpufreq_info = {
+static struct s3c_cpufreq_info s3c2440_cpufreq_info = {
.max = {
.fclk = 400000000,
.hclk = 133333333,
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 485088253358..25069741b507 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -355,11 +355,6 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
return -EINVAL;
}
-static unsigned int s3c_cpufreq_get(unsigned int cpu)
-{
- return clk_get_rate(clk_arm) / 1000;
-}
-
struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
{
struct clk *clk;
@@ -373,6 +368,7 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
static int s3c_cpufreq_init(struct cpufreq_policy *policy)
{
+ policy->clk = clk_arm;
return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
}
@@ -408,7 +404,7 @@ static int s3c_cpufreq_suspend(struct cpufreq_policy *policy)
{
suspend_pll.frequency = clk_get_rate(_clk_mpll);
suspend_pll.driver_data = __raw_readl(S3C2410_MPLLCON);
- suspend_freq = s3c_cpufreq_get(0) * 1000;
+ suspend_freq = clk_get_rate(clk_arm);
return 0;
}
@@ -448,9 +444,9 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
#endif
static struct cpufreq_driver s3c24xx_driver = {
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.target = s3c_cpufreq_target,
- .get = s3c_cpufreq_get,
+ .get = cpufreq_generic_get,
.init = s3c_cpufreq_init,
.suspend = s3c_cpufreq_suspend,
.resume = s3c_cpufreq_resume,
@@ -509,7 +505,7 @@ int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
return 0;
}
-int __init s3c_cpufreq_auto_io(void)
+static int __init s3c_cpufreq_auto_io(void)
{
int ret;
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 67e302eeefec..c4226de079ab 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -19,7 +19,6 @@
#include <linux/regulator/consumer.h>
#include <linux/module.h>
-static struct clk *armclk;
static struct regulator *vddarm;
static unsigned long regulator_latency;
@@ -54,14 +53,6 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
};
#endif
-static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
-{
- if (cpu != 0)
- return 0;
-
- return clk_get_rate(armclk) / 1000;
-}
-
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
@@ -69,7 +60,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int old_freq, new_freq;
int ret;
- old_freq = clk_get_rate(armclk) / 1000;
+ old_freq = clk_get_rate(policy->clk) / 1000;
new_freq = s3c64xx_freq_table[index].frequency;
dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
@@ -86,7 +77,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
}
#endif
- ret = clk_set_rate(armclk, new_freq * 1000);
+ ret = clk_set_rate(policy->clk, new_freq * 1000);
if (ret < 0) {
pr_err("Failed to set rate %dkHz: %d\n",
new_freq, ret);
@@ -101,7 +92,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
if (ret != 0) {
pr_err("Failed to set VDDARM for %dkHz: %d\n",
new_freq, ret);
- if (clk_set_rate(armclk, old_freq * 1000) < 0)
+ if (clk_set_rate(policy->clk, old_freq * 1000) < 0)
pr_err("Failed to restore original clock rate\n");
return ret;
@@ -110,7 +101,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
#endif
pr_debug("Set actual frequency %lukHz\n",
- clk_get_rate(armclk) / 1000);
+ clk_get_rate(policy->clk) / 1000);
return 0;
}
@@ -169,11 +160,11 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
return -ENODEV;
}
- armclk = clk_get(NULL, "armclk");
- if (IS_ERR(armclk)) {
+ policy->clk = clk_get(NULL, "armclk");
+ if (IS_ERR(policy->clk)) {
pr_err("Unable to obtain ARMCLK: %ld\n",
- PTR_ERR(armclk));
- return PTR_ERR(armclk);
+ PTR_ERR(policy->clk));
+ return PTR_ERR(policy->clk);
}
#ifdef CONFIG_REGULATOR
@@ -193,7 +184,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
unsigned long r;
/* Check for frequencies we can generate */
- r = clk_round_rate(armclk, freq->frequency * 1000);
+ r = clk_round_rate(policy->clk, freq->frequency * 1000);
r /= 1000;
if (r != freq->frequency) {
pr_debug("%dkHz unsupported by clock\n",
@@ -203,7 +194,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
/* If we have no regulator then assume startup
* frequency is the maximum we can support. */
- if (!vddarm && freq->frequency > s3c64xx_cpufreq_get_speed(0))
+ if (!vddarm && freq->frequency > clk_get_rate(policy->clk) / 1000)
freq->frequency = CPUFREQ_ENTRY_INVALID;
freq++;
@@ -219,17 +210,17 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
pr_err("Failed to configure frequency table: %d\n",
ret);
regulator_put(vddarm);
- clk_put(armclk);
+ clk_put(policy->clk);
}
return ret;
}
static struct cpufreq_driver s3c64xx_cpufreq_driver = {
- .flags = 0,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = s3c64xx_cpufreq_set_target,
- .get = s3c64xx_cpufreq_get_speed,
+ .get = cpufreq_generic_get,
.init = s3c64xx_cpufreq_driver_init,
.name = "s3c",
};
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index e3973dae28a7..55a8e9fa9435 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -23,7 +23,6 @@
#include <mach/map.h>
#include <mach/regs-clock.h>
-static struct clk *cpu_clk;
static struct clk *dmc0_clk;
static struct clk *dmc1_clk;
static DEFINE_MUTEX(set_freq_lock);
@@ -164,14 +163,6 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
__raw_writel(tmp1, reg);
}
-static unsigned int s5pv210_getspeed(unsigned int cpu)
-{
- if (cpu)
- return 0;
-
- return clk_get_rate(cpu_clk) / 1000;
-}
-
static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned long reg;
@@ -193,7 +184,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
goto exit;
}
- old_freq = s5pv210_getspeed(0);
+ old_freq = policy->cur;
new_freq = s5pv210_freq_table[index].frequency;
/* Finding current running level index */
@@ -471,9 +462,9 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
unsigned long mem_type;
int ret;
- cpu_clk = clk_get(NULL, "armclk");
- if (IS_ERR(cpu_clk))
- return PTR_ERR(cpu_clk);
+ policy->clk = clk_get(NULL, "armclk");
+ if (IS_ERR(policy->clk))
+ return PTR_ERR(policy->clk);
dmc0_clk = clk_get(NULL, "sclk_dmc0");
if (IS_ERR(dmc0_clk)) {
@@ -516,7 +507,7 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
out_dmc1:
clk_put(dmc0_clk);
out_dmc0:
- clk_put(cpu_clk);
+ clk_put(policy->clk);
return ret;
}
@@ -560,10 +551,10 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
}
static struct cpufreq_driver s5pv210_driver = {
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = s5pv210_target,
- .get = s5pv210_getspeed,
+ .get = cpufreq_generic_get,
.init = s5pv210_cpu_init,
.name = "s5pv210",
#ifdef CONFIG_PM
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
index 623da742f8e7..728eab77e8e0 100644
--- a/drivers/cpufreq/sa1100-cpufreq.c
+++ b/drivers/cpufreq/sa1100-cpufreq.c
@@ -201,7 +201,7 @@ static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver sa1100_driver __refdata = {
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = sa1100_target,
.get = sa11x0_getspeed,
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index 2c2b2e601d13..546376719d8f 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -312,7 +312,7 @@ static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
/* sa1110_driver needs __refdata because it must remain after init registers
* it with cpufreq_register_driver() */
static struct cpufreq_driver sa1110_driver __refdata = {
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = sa1110_target,
.get = sa11x0_getspeed,
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index d02ccd19c9c4..5c86e3fa5593 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -30,11 +30,6 @@ static struct {
u32 cnt;
} spear_cpufreq;
-static unsigned int spear_cpufreq_get(unsigned int cpu)
-{
- return clk_get_rate(spear_cpufreq.clk) / 1000;
-}
-
static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq)
{
struct clk *sys_pclk;
@@ -138,7 +133,7 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
}
newfreq = clk_round_rate(srcclk, newfreq * mult);
- if (newfreq < 0) {
+ if (newfreq <= 0) {
pr_err("clk_round_rate failed for cpu src clock\n");
return newfreq;
}
@@ -156,16 +151,17 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
static int spear_cpufreq_init(struct cpufreq_policy *policy)
{
+ policy->clk = spear_cpufreq.clk;
return cpufreq_generic_init(policy, spear_cpufreq.freq_tbl,
spear_cpufreq.transition_latency);
}
static struct cpufreq_driver spear_cpufreq_driver = {
.name = "cpufreq-spear",
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = spear_cpufreq_target,
- .get = spear_cpufreq_get,
+ .get = cpufreq_generic_get,
.init = spear_cpufreq_init,
.exit = cpufreq_generic_exit,
.attr = cpufreq_generic_attr,
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 0f5326d6f79f..998c17b42200 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -141,38 +141,6 @@ static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high)
}
/**
- * speedstep_get_state - set the SpeedStep state
- * @state: processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
- *
- */
-static int speedstep_get_state(void)
-{
- u32 function = GET_SPEEDSTEP_STATE;
- u32 result, state, edi, command, dummy;
-
- command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
-
- pr_debug("trying to determine current setting with command %x "
- "at port %x\n", command, smi_port);
-
- __asm__ __volatile__(
- "push %%ebp\n"
- "out %%al, (%%dx)\n"
- "pop %%ebp\n"
- : "=a" (result),
- "=b" (state), "=D" (edi),
- "=c" (dummy), "=d" (dummy), "=S" (dummy)
- : "a" (command), "b" (function), "c" (0),
- "d" (smi_port), "S" (0), "D" (0)
- );
-
- pr_debug("state is %x, result is %x\n", state, result);
-
- return state & 1;
-}
-
-
-/**
* speedstep_set_state - set the SpeedStep state
* @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
*
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
index b7309c37033d..e652c1bd8d0f 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -47,21 +47,9 @@ static struct clk *pll_x_clk;
static struct clk *pll_p_clk;
static struct clk *emc_clk;
-static unsigned long target_cpu_speed[NUM_CPUS];
static DEFINE_MUTEX(tegra_cpu_lock);
static bool is_suspended;
-static unsigned int tegra_getspeed(unsigned int cpu)
-{
- unsigned long rate;
-
- if (cpu >= NUM_CPUS)
- return 0;
-
- rate = clk_get_rate(cpu_clk) / 1000;
- return rate;
-}
-
static int tegra_cpu_clk_set_rate(unsigned long rate)
{
int ret;
@@ -103,9 +91,6 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
{
int ret = 0;
- if (tegra_getspeed(0) == rate)
- return ret;
-
/*
* Vote on memory bus frequency based on cpu frequency
* This sets the minimum frequency, display or avp may request higher
@@ -125,33 +110,16 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
return ret;
}
-static unsigned long tegra_cpu_highest_speed(void)
-{
- unsigned long rate = 0;
- int i;
-
- for_each_online_cpu(i)
- rate = max(rate, target_cpu_speed[i]);
- return rate;
-}
-
static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int freq;
- int ret = 0;
+ int ret = -EBUSY;
mutex_lock(&tegra_cpu_lock);
- if (is_suspended)
- goto out;
-
- freq = freq_table[index].frequency;
+ if (!is_suspended)
+ ret = tegra_update_cpu_speed(policy,
+ freq_table[index].frequency);
- target_cpu_speed[policy->cpu] = freq;
-
- ret = tegra_update_cpu_speed(policy, tegra_cpu_highest_speed());
-
-out:
mutex_unlock(&tegra_cpu_lock);
return ret;
}
@@ -165,7 +133,8 @@ static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
is_suspended = true;
pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
freq_table[0].frequency);
- tegra_update_cpu_speed(policy, freq_table[0].frequency);
+ if (clk_get_rate(cpu_clk) / 1000 != freq_table[0].frequency)
+ tegra_update_cpu_speed(policy, freq_table[0].frequency);
cpufreq_cpu_put(policy);
} else if (event == PM_POST_SUSPEND) {
is_suspended = false;
@@ -189,8 +158,6 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
clk_prepare_enable(emc_clk);
clk_prepare_enable(cpu_clk);
- target_cpu_speed[policy->cpu] = tegra_getspeed(policy->cpu);
-
/* FIXME: what's the actual transition time? */
ret = cpufreq_generic_init(policy, freq_table, 300 * 1000);
if (ret) {
@@ -202,6 +169,7 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
if (policy->cpu == 0)
register_pm_notifier(&tegra_cpu_pm_notifier);
+ policy->clk = cpu_clk;
return 0;
}
@@ -214,9 +182,10 @@ static int tegra_cpu_exit(struct cpufreq_policy *policy)
}
static struct cpufreq_driver tegra_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra_target,
- .get = tegra_getspeed,
+ .get = cpufreq_generic_get,
.init = tegra_cpu_init,
.exit = tegra_cpu_exit,
.name = "tegra",
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index 653ae2955b55..36cc330b8747 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -11,6 +11,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -33,42 +34,34 @@ static int ucv2_verify_speed(struct cpufreq_policy *policy)
return 0;
}
-static unsigned int ucv2_getspeed(unsigned int cpu)
-{
- struct clk *mclk = clk_get(NULL, "MAIN_CLK");
-
- if (cpu)
- return 0;
- return clk_get_rate(mclk)/1000;
-}
-
static int ucv2_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
- unsigned int cur = ucv2_getspeed(0);
struct cpufreq_freqs freqs;
- struct clk *mclk = clk_get(NULL, "MAIN_CLK");
+ int ret;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ freqs.old = policy->cur;
+ freqs.new = target_freq;
- if (!clk_set_rate(mclk, target_freq * 1000)) {
- freqs.old = cur;
- freqs.new = target_freq;
- }
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ ret = clk_set_rate(policy->mclk, target_freq * 1000);
+ cpufreq_notify_post_transition(policy, &freqs, ret);
- return 0;
+ return ret;
}
static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
{
if (policy->cpu != 0)
return -EINVAL;
+
policy->min = policy->cpuinfo.min_freq = 250000;
policy->max = policy->cpuinfo.max_freq = 1000000;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ policy->clk = clk_get(NULL, "MAIN_CLK");
+ if (IS_ERR(policy->clk))
+ return PTR_ERR(policy->clk);
return 0;
}
@@ -76,7 +69,7 @@ static struct cpufreq_driver ucv2_driver = {
.flags = CPUFREQ_STICKY,
.verify = ucv2_verify_speed,
.target = ucv2_target,
- .get = ucv2_getspeed,
+ .get = cpufreq_generic_get,
.init = ucv2_cpu_init,
.name = "UniCore-II",
};
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index b3fb81d7cf04..f04e25f6c98d 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -35,6 +35,11 @@ depends on ARM
source "drivers/cpuidle/Kconfig.arm"
endmenu
+menu "POWERPC CPU Idle Drivers"
+depends on PPC
+source "drivers/cpuidle/Kconfig.powerpc"
+endmenu
+
endif
config ARCH_NEEDS_CPU_IDLE_COUPLED
diff --git a/drivers/cpuidle/Kconfig.powerpc b/drivers/cpuidle/Kconfig.powerpc
new file mode 100644
index 000000000000..66c3a09574e9
--- /dev/null
+++ b/drivers/cpuidle/Kconfig.powerpc
@@ -0,0 +1,20 @@
+#
+# POWERPC CPU Idle Drivers
+#
+config PSERIES_CPUIDLE
+ bool "Cpuidle driver for pSeries platforms"
+ depends on CPU_IDLE
+ depends on PPC_PSERIES
+ default y
+ help
+ Select this option to enable processor idle state management
+ through cpuidle subsystem.
+
+config POWERNV_CPUIDLE
+ bool "Cpuidle driver for powernv platforms"
+ depends on CPU_IDLE
+ depends on PPC_POWERNV
+ default y
+ help
+ Select this option to enable processor idle state management
+ through cpuidle subsystem.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 527be28e5c1e..f71ae1b373c5 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -13,3 +13,8 @@ obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o
obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o
obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
+
+###############################################################################
+# POWERPC drivers
+obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o
+obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index 36795639df0d..6e51114057d0 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -65,7 +65,7 @@ static struct cpuidle_driver calxeda_idle_driver = {
.state_count = 2,
};
-static int __init calxeda_cpuidle_probe(struct platform_device *pdev)
+static int calxeda_cpuidle_probe(struct platform_device *pdev)
{
return cpuidle_register(&calxeda_idle_driver, NULL);
}
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
new file mode 100644
index 000000000000..78fd174c57e8
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -0,0 +1,169 @@
+/*
+ * cpuidle-powernv - idle state cpuidle driver.
+ * Adapted from drivers/cpuidle/cpuidle-pseries
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+
+#include <asm/machdep.h>
+#include <asm/firmware.h>
+
+struct cpuidle_driver powernv_idle_driver = {
+ .name = "powernv_idle",
+ .owner = THIS_MODULE,
+};
+
+static int max_idle_state;
+static struct cpuidle_state *cpuidle_state_table;
+
+static int snooze_loop(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ local_irq_enable();
+ set_thread_flag(TIF_POLLING_NRFLAG);
+
+ while (!need_resched()) {
+ HMT_low();
+ HMT_very_low();
+ }
+
+ HMT_medium();
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ smp_mb();
+ return index;
+}
+
+static int nap_loop(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ power7_idle();
+ return index;
+}
+
+/*
+ * States for dedicated partition case.
+ */
+static struct cpuidle_state powernv_states[] = {
+ { /* Snooze */
+ .name = "snooze",
+ .desc = "snooze",
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 0,
+ .target_residency = 0,
+ .enter = &snooze_loop },
+ { /* NAP */
+ .name = "NAP",
+ .desc = "NAP",
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 10,
+ .target_residency = 100,
+ .enter = &nap_loop },
+};
+
+static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n,
+ unsigned long action, void *hcpu)
+{
+ int hotcpu = (unsigned long)hcpu;
+ struct cpuidle_device *dev =
+ per_cpu(cpuidle_devices, hotcpu);
+
+ if (dev && cpuidle_get_driver()) {
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ cpuidle_pause_and_lock();
+ cpuidle_enable_device(dev);
+ cpuidle_resume_and_unlock();
+ break;
+
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ cpuidle_pause_and_lock();
+ cpuidle_disable_device(dev);
+ cpuidle_resume_and_unlock();
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block setup_hotplug_notifier = {
+ .notifier_call = powernv_cpuidle_add_cpu_notifier,
+};
+
+/*
+ * powernv_cpuidle_driver_init()
+ */
+static int powernv_cpuidle_driver_init(void)
+{
+ int idle_state;
+ struct cpuidle_driver *drv = &powernv_idle_driver;
+
+ drv->state_count = 0;
+
+ for (idle_state = 0; idle_state < max_idle_state; ++idle_state) {
+ /* Is the state not enabled? */
+ if (cpuidle_state_table[idle_state].enter == NULL)
+ continue;
+
+ drv->states[drv->state_count] = /* structure copy */
+ cpuidle_state_table[idle_state];
+
+ drv->state_count += 1;
+ }
+
+ return 0;
+}
+
+/*
+ * powernv_idle_probe()
+ * Choose state table for shared versus dedicated partition
+ */
+static int powernv_idle_probe(void)
+{
+
+ if (cpuidle_disable != IDLE_NO_OVERRIDE)
+ return -ENODEV;
+
+ if (firmware_has_feature(FW_FEATURE_OPALv3)) {
+ cpuidle_state_table = powernv_states;
+ max_idle_state = ARRAY_SIZE(powernv_states);
+ } else
+ return -ENODEV;
+
+ return 0;
+}
+
+static int __init powernv_processor_idle_init(void)
+{
+ int retval;
+
+ retval = powernv_idle_probe();
+ if (retval)
+ return retval;
+
+ powernv_cpuidle_driver_init();
+ retval = cpuidle_register(&powernv_idle_driver, NULL);
+ if (retval) {
+ printk(KERN_DEBUG "Registration of powernv driver failed.\n");
+ return retval;
+ }
+
+ register_cpu_notifier(&setup_hotplug_notifier);
+ printk(KERN_DEBUG "powernv_idle_driver registered\n");
+ return 0;
+}
+
+device_initcall(powernv_processor_idle_init);
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
new file mode 100644
index 000000000000..7ab564aa0b1c
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -0,0 +1,267 @@
+/*
+ * cpuidle-pseries - idle state cpuidle driver.
+ * Adapted from drivers/idle/intel_idle.c and
+ * drivers/acpi/processor_idle.c
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+
+#include <asm/paca.h>
+#include <asm/reg.h>
+#include <asm/machdep.h>
+#include <asm/firmware.h>
+#include <asm/plpar_wrappers.h>
+
+struct cpuidle_driver pseries_idle_driver = {
+ .name = "pseries_idle",
+ .owner = THIS_MODULE,
+};
+
+static int max_idle_state;
+static struct cpuidle_state *cpuidle_state_table;
+
+static inline void idle_loop_prolog(unsigned long *in_purr)
+{
+ *in_purr = mfspr(SPRN_PURR);
+ /*
+ * Indicate to the HV that we are idle. Now would be
+ * a good time to find other work to dispatch.
+ */
+ get_lppaca()->idle = 1;
+}
+
+static inline void idle_loop_epilog(unsigned long in_purr)
+{
+ u64 wait_cycles;
+
+ wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
+ wait_cycles += mfspr(SPRN_PURR) - in_purr;
+ get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
+ get_lppaca()->idle = 0;
+}
+
+static int snooze_loop(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ unsigned long in_purr;
+
+ idle_loop_prolog(&in_purr);
+ local_irq_enable();
+ set_thread_flag(TIF_POLLING_NRFLAG);
+
+ while (!need_resched()) {
+ HMT_low();
+ HMT_very_low();
+ }
+
+ HMT_medium();
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ smp_mb();
+
+ idle_loop_epilog(in_purr);
+
+ return index;
+}
+
+static void check_and_cede_processor(void)
+{
+ /*
+ * Ensure our interrupt state is properly tracked,
+ * also checks if no interrupt has occurred while we
+ * were soft-disabled
+ */
+ if (prep_irq_for_idle()) {
+ cede_processor();
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Ensure that H_CEDE returns with IRQs on */
+ if (WARN_ON(!(mfmsr() & MSR_EE)))
+ __hard_irq_enable();
+#endif
+ }
+}
+
+static int dedicated_cede_loop(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ unsigned long in_purr;
+
+ idle_loop_prolog(&in_purr);
+ get_lppaca()->donate_dedicated_cpu = 1;
+
+ HMT_medium();
+ check_and_cede_processor();
+
+ get_lppaca()->donate_dedicated_cpu = 0;
+
+ idle_loop_epilog(in_purr);
+
+ return index;
+}
+
+static int shared_cede_loop(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ unsigned long in_purr;
+
+ idle_loop_prolog(&in_purr);
+
+ /*
+ * Yield the processor to the hypervisor. We return if
+ * an external interrupt occurs (which are driven prior
+ * to returning here) or if a prod occurs from another
+ * processor. When returning here, external interrupts
+ * are enabled.
+ */
+ check_and_cede_processor();
+
+ idle_loop_epilog(in_purr);
+
+ return index;
+}
+
+/*
+ * States for dedicated partition case.
+ */
+static struct cpuidle_state dedicated_states[] = {
+ { /* Snooze */
+ .name = "snooze",
+ .desc = "snooze",
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 0,
+ .target_residency = 0,
+ .enter = &snooze_loop },
+ { /* CEDE */
+ .name = "CEDE",
+ .desc = "CEDE",
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 10,
+ .target_residency = 100,
+ .enter = &dedicated_cede_loop },
+};
+
+/*
+ * States for shared partition case.
+ */
+static struct cpuidle_state shared_states[] = {
+ { /* Shared Cede */
+ .name = "Shared Cede",
+ .desc = "Shared Cede",
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 0,
+ .target_residency = 0,
+ .enter = &shared_cede_loop },
+};
+
+static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n,
+ unsigned long action, void *hcpu)
+{
+ int hotcpu = (unsigned long)hcpu;
+ struct cpuidle_device *dev =
+ per_cpu(cpuidle_devices, hotcpu);
+
+ if (dev && cpuidle_get_driver()) {
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ cpuidle_pause_and_lock();
+ cpuidle_enable_device(dev);
+ cpuidle_resume_and_unlock();
+ break;
+
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ cpuidle_pause_and_lock();
+ cpuidle_disable_device(dev);
+ cpuidle_resume_and_unlock();
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block setup_hotplug_notifier = {
+ .notifier_call = pseries_cpuidle_add_cpu_notifier,
+};
+
+/*
+ * pseries_cpuidle_driver_init()
+ */
+static int pseries_cpuidle_driver_init(void)
+{
+ int idle_state;
+ struct cpuidle_driver *drv = &pseries_idle_driver;
+
+ drv->state_count = 0;
+
+ for (idle_state = 0; idle_state < max_idle_state; ++idle_state) {
+ /* Is the state not enabled? */
+ if (cpuidle_state_table[idle_state].enter == NULL)
+ continue;
+
+ drv->states[drv->state_count] = /* structure copy */
+ cpuidle_state_table[idle_state];
+
+ drv->state_count += 1;
+ }
+
+ return 0;
+}
+
+/*
+ * pseries_idle_probe()
+ * Choose state table for shared versus dedicated partition
+ */
+static int pseries_idle_probe(void)
+{
+
+ if (cpuidle_disable != IDLE_NO_OVERRIDE)
+ return -ENODEV;
+
+ if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+ if (lppaca_shared_proc(get_lppaca())) {
+ cpuidle_state_table = shared_states;
+ max_idle_state = ARRAY_SIZE(shared_states);
+ } else {
+ cpuidle_state_table = dedicated_states;
+ max_idle_state = ARRAY_SIZE(dedicated_states);
+ }
+ } else
+ return -ENODEV;
+
+ return 0;
+}
+
+static int __init pseries_processor_idle_init(void)
+{
+ int retval;
+
+ retval = pseries_idle_probe();
+ if (retval)
+ return retval;
+
+ pseries_cpuidle_driver_init();
+ retval = cpuidle_register(&pseries_idle_driver, NULL);
+ if (retval) {
+ printk(KERN_DEBUG "Registration of pseries driver failed.\n");
+ return retval;
+ }
+
+ register_cpu_notifier(&setup_hotplug_notifier);
+ printk(KERN_DEBUG "pseries_idle_driver registered\n");
+ return 0;
+}
+
+device_initcall(pseries_processor_idle_init);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index f4fd837bcb82..13857f5d28f7 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -289,16 +289,6 @@ config CRYPTO_DEV_SAHARA
This option enables support for the SAHARA HW crypto accelerator
found in some Freescale i.MX chips.
-config CRYPTO_DEV_DCP
- tristate "Support for the DCP engine"
- depends on ARCH_MXS && OF
- select CRYPTO_BLKCIPHER
- select CRYPTO_AES
- select CRYPTO_CBC
- help
- This options enables support for the hardware crypto-acceleration
- capabilities of the DCP co-processor
-
config CRYPTO_DEV_S5P
tristate "Support for Samsung S5PV210 crypto accelerator"
depends on ARCH_S5PV210
@@ -399,4 +389,33 @@ config CRYPTO_DEV_ATMEL_SHA
To compile this driver as a module, choose M here: the module
will be called atmel-sha.
+config CRYPTO_DEV_CCP
+ bool "Support for AMD Cryptographic Coprocessor"
+ depends on X86 && PCI
+ default n
+ help
+ The AMD Cryptographic Coprocessor provides hardware support
+ for encryption, hashing and related operations.
+
+if CRYPTO_DEV_CCP
+ source "drivers/crypto/ccp/Kconfig"
+endif
+
+config CRYPTO_DEV_MXS_DCP
+ tristate "Support for Freescale MXS DCP"
+ depends on ARCH_MXS
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_AES
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_ALGAPI
+ help
+ The Freescale i.MX23/i.MX28 has SHA1/SHA256 and AES128 CBC/ECB
+ co-processor on the die.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mxs-dcp.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index b4946ddd2550..0bc6aa0a54d7 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,24 +1,25 @@
-obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
-obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
+obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
+obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
-obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
-n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
-obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
-obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
-obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
-obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
+obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
+obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
+obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
+n2_crypto-y := n2_core.o n2_asm.o
+obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
+obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
+obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
+obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
-obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
-obj-$(CONFIG_CRYPTO_DEV_DCP) += dcp.o
+obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
+obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
+obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
-obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
-obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
-obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
-obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
-obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index efaf6302405f..37f9cc98ba17 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -724,7 +724,6 @@ static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
crypto4xx_destroy_pdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
crypto4xx_destroy_sdr(core_dev->dev);
- dev_set_drvdata(core_dev->device, NULL);
iounmap(core_dev->dev->ce_base);
kfree(core_dev->dev);
kfree(core_dev);
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index c1efd910d97b..d7c9e317423c 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -30,6 +30,7 @@
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
@@ -39,6 +40,7 @@
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <linux/platform_data/crypto-atmel.h>
+#include <dt-bindings/dma/at91.h>
#include "atmel-aes-regs.h"
#define CFB8_BLOCK_SIZE 1
@@ -747,59 +749,50 @@ static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
struct crypto_platform_data *pdata)
{
int err = -ENOMEM;
- dma_cap_mask_t mask_in, mask_out;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Try to grab 2 DMA channels */
+ dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
+ atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
+ if (!dd->dma_lch_in.chan)
+ goto err_dma_in;
+
+ dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
+ dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
+ AES_IDATAR(0);
+ dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_in.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.device_fc = false;
+
+ dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
+ atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
+ if (!dd->dma_lch_out.chan)
+ goto err_dma_out;
+
+ dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
+ dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
+ AES_ODATAR(0);
+ dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_out.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.device_fc = false;
- if (pdata && pdata->dma_slave->txdata.dma_dev &&
- pdata->dma_slave->rxdata.dma_dev) {
-
- /* Try to grab 2 DMA channels */
- dma_cap_zero(mask_in);
- dma_cap_set(DMA_SLAVE, mask_in);
-
- dd->dma_lch_in.chan = dma_request_channel(mask_in,
- atmel_aes_filter, &pdata->dma_slave->rxdata);
-
- if (!dd->dma_lch_in.chan)
- goto err_dma_in;
-
- dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
- dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
- AES_IDATAR(0);
- dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
- dd->dma_lch_in.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_in.dma_conf.device_fc = false;
-
- dma_cap_zero(mask_out);
- dma_cap_set(DMA_SLAVE, mask_out);
- dd->dma_lch_out.chan = dma_request_channel(mask_out,
- atmel_aes_filter, &pdata->dma_slave->txdata);
-
- if (!dd->dma_lch_out.chan)
- goto err_dma_out;
-
- dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
- dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
- AES_ODATAR(0);
- dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
- dd->dma_lch_out.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_out.dma_conf.device_fc = false;
-
- return 0;
- } else {
- return -ENODEV;
- }
+ return 0;
err_dma_out:
dma_release_channel(dd->dma_lch_in.chan);
err_dma_in:
+ dev_warn(dd->dev, "no DMA channel available\n");
return err;
}
@@ -1261,6 +1254,47 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
}
}
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_aes_dt_ids[] = {
+ { .compatible = "atmel,at91sam9g46-aes" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
+
+static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct crypto_platform_data *pdata;
+
+ if (!np) {
+ dev_err(&pdev->dev, "device node not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "could not allocate memory for pdata\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pdata->dma_slave = devm_kzalloc(&pdev->dev,
+ sizeof(*(pdata->dma_slave)),
+ GFP_KERNEL);
+ if (!pdata->dma_slave) {
+ dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
+ devm_kfree(&pdev->dev, pdata);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pdata;
+}
+#else
+static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
static int atmel_aes_probe(struct platform_device *pdev)
{
struct atmel_aes_dev *aes_dd;
@@ -1272,6 +1306,14 @@ static int atmel_aes_probe(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
if (!pdata) {
+ pdata = atmel_aes_of_init(pdev);
+ if (IS_ERR(pdata)) {
+ err = PTR_ERR(pdata);
+ goto aes_dd_err;
+ }
+ }
+
+ if (!pdata->dma_slave) {
err = -ENXIO;
goto aes_dd_err;
}
@@ -1358,7 +1400,9 @@ static int atmel_aes_probe(struct platform_device *pdev)
if (err)
goto err_algs;
- dev_info(dev, "Atmel AES\n");
+ dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
+ dma_chan_name(aes_dd->dma_lch_in.chan),
+ dma_chan_name(aes_dd->dma_lch_out.chan));
return 0;
@@ -1424,6 +1468,7 @@ static struct platform_driver atmel_aes_driver = {
.driver = {
.name = "atmel_aes",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_aes_dt_ids),
},
};
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index eaed8bf183bc..0618be06b9fb 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -30,6 +30,7 @@
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
@@ -1263,32 +1264,29 @@ static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
int err = -ENOMEM;
dma_cap_mask_t mask_in;
- if (pdata && pdata->dma_slave->rxdata.dma_dev) {
- /* Try to grab DMA channel */
- dma_cap_zero(mask_in);
- dma_cap_set(DMA_SLAVE, mask_in);
+ /* Try to grab DMA channel */
+ dma_cap_zero(mask_in);
+ dma_cap_set(DMA_SLAVE, mask_in);
- dd->dma_lch_in.chan = dma_request_channel(mask_in,
- atmel_sha_filter, &pdata->dma_slave->rxdata);
-
- if (!dd->dma_lch_in.chan)
- return err;
-
- dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
- dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
- SHA_REG_DIN(0);
- dd->dma_lch_in.dma_conf.src_maxburst = 1;
- dd->dma_lch_in.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_in.dma_conf.dst_maxburst = 1;
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_in.dma_conf.device_fc = false;
-
- return 0;
+ dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
+ atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
+ if (!dd->dma_lch_in.chan) {
+ dev_warn(dd->dev, "no DMA channel available\n");
+ return err;
}
- return -ENODEV;
+ dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
+ dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
+ SHA_REG_DIN(0);
+ dd->dma_lch_in.dma_conf.src_maxburst = 1;
+ dd->dma_lch_in.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.device_fc = false;
+
+ return 0;
}
static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
@@ -1326,6 +1324,48 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
}
}
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_sha_dt_ids[] = {
+ { .compatible = "atmel,at91sam9g46-sha" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
+
+static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct crypto_platform_data *pdata;
+
+ if (!np) {
+ dev_err(&pdev->dev, "device node not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "could not allocate memory for pdata\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pdata->dma_slave = devm_kzalloc(&pdev->dev,
+ sizeof(*(pdata->dma_slave)),
+ GFP_KERNEL);
+ if (!pdata->dma_slave) {
+ dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
+ devm_kfree(&pdev->dev, pdata);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pdata;
+}
+#else /* CONFIG_OF */
+static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
static int atmel_sha_probe(struct platform_device *pdev)
{
struct atmel_sha_dev *sha_dd;
@@ -1402,13 +1442,23 @@ static int atmel_sha_probe(struct platform_device *pdev)
if (sha_dd->caps.has_dma) {
pdata = pdev->dev.platform_data;
if (!pdata) {
- dev_err(&pdev->dev, "platform data not available\n");
+ pdata = atmel_sha_of_init(pdev);
+ if (IS_ERR(pdata)) {
+ dev_err(&pdev->dev, "platform data not available\n");
+ err = PTR_ERR(pdata);
+ goto err_pdata;
+ }
+ }
+ if (!pdata->dma_slave) {
err = -ENXIO;
goto err_pdata;
}
err = atmel_sha_dma_init(sha_dd, pdata);
if (err)
goto err_sha_dma;
+
+ dev_info(dev, "using %s for DMA transfers\n",
+ dma_chan_name(sha_dd->dma_lch_in.chan));
}
spin_lock(&atmel_sha.lock);
@@ -1419,7 +1469,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
if (err)
goto err_algs;
- dev_info(dev, "Atmel SHA1/SHA256\n");
+ dev_info(dev, "Atmel SHA1/SHA256%s%s\n",
+ sha_dd->caps.has_sha224 ? "/SHA224" : "",
+ sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : "");
return 0;
@@ -1483,6 +1535,7 @@ static struct platform_driver atmel_sha_driver = {
.driver = {
.name = "atmel_sha",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_sha_dt_ids),
},
};
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 4a99564a08e6..6cde5b530c69 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -30,6 +30,7 @@
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
@@ -716,59 +717,50 @@ static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
struct crypto_platform_data *pdata)
{
int err = -ENOMEM;
- dma_cap_mask_t mask_in, mask_out;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Try to grab 2 DMA channels */
+ dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
+ atmel_tdes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
+ if (!dd->dma_lch_in.chan)
+ goto err_dma_in;
+
+ dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
+ dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
+ TDES_IDATA1R;
+ dd->dma_lch_in.dma_conf.src_maxburst = 1;
+ dd->dma_lch_in.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.device_fc = false;
+
+ dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
+ atmel_tdes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
+ if (!dd->dma_lch_out.chan)
+ goto err_dma_out;
+
+ dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
+ dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
+ TDES_ODATA1R;
+ dd->dma_lch_out.dma_conf.src_maxburst = 1;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_out.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.device_fc = false;
- if (pdata && pdata->dma_slave->txdata.dma_dev &&
- pdata->dma_slave->rxdata.dma_dev) {
-
- /* Try to grab 2 DMA channels */
- dma_cap_zero(mask_in);
- dma_cap_set(DMA_SLAVE, mask_in);
-
- dd->dma_lch_in.chan = dma_request_channel(mask_in,
- atmel_tdes_filter, &pdata->dma_slave->rxdata);
-
- if (!dd->dma_lch_in.chan)
- goto err_dma_in;
-
- dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
- dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
- TDES_IDATA1R;
- dd->dma_lch_in.dma_conf.src_maxburst = 1;
- dd->dma_lch_in.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_in.dma_conf.dst_maxburst = 1;
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_in.dma_conf.device_fc = false;
-
- dma_cap_zero(mask_out);
- dma_cap_set(DMA_SLAVE, mask_out);
- dd->dma_lch_out.chan = dma_request_channel(mask_out,
- atmel_tdes_filter, &pdata->dma_slave->txdata);
-
- if (!dd->dma_lch_out.chan)
- goto err_dma_out;
-
- dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
- dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
- TDES_ODATA1R;
- dd->dma_lch_out.dma_conf.src_maxburst = 1;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_out.dma_conf.dst_maxburst = 1;
- dd->dma_lch_out.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_out.dma_conf.device_fc = false;
-
- return 0;
- } else {
- return -ENODEV;
- }
+ return 0;
err_dma_out:
dma_release_channel(dd->dma_lch_in.chan);
err_dma_in:
+ dev_warn(dd->dev, "no DMA channel available\n");
return err;
}
@@ -1317,6 +1309,47 @@ static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
}
}
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_tdes_dt_ids[] = {
+ { .compatible = "atmel,at91sam9g46-tdes" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
+
+static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct crypto_platform_data *pdata;
+
+ if (!np) {
+ dev_err(&pdev->dev, "device node not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "could not allocate memory for pdata\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pdata->dma_slave = devm_kzalloc(&pdev->dev,
+ sizeof(*(pdata->dma_slave)),
+ GFP_KERNEL);
+ if (!pdata->dma_slave) {
+ dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
+ devm_kfree(&pdev->dev, pdata);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pdata;
+}
+#else /* CONFIG_OF */
+static inline struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
static int atmel_tdes_probe(struct platform_device *pdev)
{
struct atmel_tdes_dev *tdes_dd;
@@ -1399,13 +1432,24 @@ static int atmel_tdes_probe(struct platform_device *pdev)
if (tdes_dd->caps.has_dma) {
pdata = pdev->dev.platform_data;
if (!pdata) {
- dev_err(&pdev->dev, "platform data not available\n");
+ pdata = atmel_tdes_of_init(pdev);
+ if (IS_ERR(pdata)) {
+ dev_err(&pdev->dev, "platform data not available\n");
+ err = PTR_ERR(pdata);
+ goto err_pdata;
+ }
+ }
+ if (!pdata->dma_slave) {
err = -ENXIO;
goto err_pdata;
}
err = atmel_tdes_dma_init(tdes_dd, pdata);
if (err)
goto err_tdes_dma;
+
+ dev_info(dev, "using %s, %s for DMA transfers\n",
+ dma_chan_name(tdes_dd->dma_lch_in.chan),
+ dma_chan_name(tdes_dd->dma_lch_out.chan));
}
spin_lock(&atmel_tdes.lock);
@@ -1487,6 +1531,7 @@ static struct platform_driver atmel_tdes_driver = {
.driver = {
.name = "atmel_tdes",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_tdes_dt_ids),
},
};
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 4cf5dec826e1..b71f2fd749df 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -467,24 +467,10 @@ static int aead_setkey(struct crypto_aead *aead,
static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- struct rtattr *rta = (void *)key;
- struct crypto_authenc_key_param *param;
- unsigned int authkeylen;
- unsigned int enckeylen;
+ struct crypto_authenc_keys keys;
int ret = 0;
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
-
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
-
- if (keylen < enckeylen)
- goto badkey;
-
- authkeylen = keylen - enckeylen;
-
- if (keylen > CAAM_MAX_KEY_SIZE)
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
/* Pick class 2 key length from algorithm submask */
@@ -492,25 +478,29 @@ static int aead_setkey(struct crypto_aead *aead,
OP_ALG_ALGSEL_SHIFT] * 2;
ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+ if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
#ifdef DEBUG
printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
- keylen, enckeylen, authkeylen);
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
ctx->split_key_len, ctx->split_key_pad_len);
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
- ret = gen_split_aead_key(ctx, key, authkeylen);
+ ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
if (ret) {
goto badkey;
}
/* postpend encryption key to auth split key */
- memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
+ memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
- enckeylen, DMA_TO_DEVICE);
+ keys.enckeylen, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
@@ -518,15 +508,15 @@ static int aead_setkey(struct crypto_aead *aead,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len + enckeylen, 1);
+ ctx->split_key_pad_len + keys.enckeylen, 1);
#endif
- ctx->enckeylen = enckeylen;
+ ctx->enckeylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
if (ret) {
dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
- enckeylen, DMA_TO_DEVICE);
+ keys.enckeylen, DMA_TO_DEVICE);
}
return ret;
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
new file mode 100644
index 000000000000..7639ffc36c68
--- /dev/null
+++ b/drivers/crypto/ccp/Kconfig
@@ -0,0 +1,24 @@
+config CRYPTO_DEV_CCP_DD
+ tristate "Cryptographic Coprocessor device driver"
+ depends on CRYPTO_DEV_CCP
+ default m
+ select HW_RANDOM
+ help
+ Provides the interface to use the AMD Cryptographic Coprocessor
+ which can be used to accelerate or offload encryption operations
+ such as SHA, AES and more. If you choose 'M' here, this module
+ will be called ccp.
+
+config CRYPTO_DEV_CCP_CRYPTO
+ tristate "Encryption and hashing acceleration support"
+ depends on CRYPTO_DEV_CCP_DD
+ default m
+ select CRYPTO_ALGAPI
+ select CRYPTO_HASH
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AUTHENC
+ help
+ Support for using the cryptographic API with the AMD Cryptographic
+ Coprocessor. This module supports acceleration and offload of SHA
+ and AES algorithms. If you choose 'M' here, this module will be
+ called ccp_crypto.
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
new file mode 100644
index 000000000000..d3505a018720
--- /dev/null
+++ b/drivers/crypto/ccp/Makefile
@@ -0,0 +1,10 @@
+obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
+ccp-objs := ccp-dev.o ccp-ops.o
+ccp-objs += ccp-pci.o
+
+obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
+ccp-crypto-objs := ccp-crypto-main.o \
+ ccp-crypto-aes.o \
+ ccp-crypto-aes-cmac.o \
+ ccp-crypto-aes-xts.o \
+ ccp-crypto-sha.o
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
new file mode 100644
index 000000000000..8e162ad82085
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -0,0 +1,365 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+
+static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
+ int ret)
+{
+ struct ahash_request *req = ahash_request_cast(async_req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ unsigned int digest_size = crypto_ahash_digestsize(tfm);
+
+ if (ret)
+ goto e_free;
+
+ if (rctx->hash_rem) {
+ /* Save remaining data to buffer */
+ unsigned int offset = rctx->nbytes - rctx->hash_rem;
+ scatterwalk_map_and_copy(rctx->buf, rctx->src,
+ offset, rctx->hash_rem, 0);
+ rctx->buf_count = rctx->hash_rem;
+ } else
+ rctx->buf_count = 0;
+
+ /* Update result area if supplied */
+ if (req->result)
+ memcpy(req->result, rctx->iv, digest_size);
+
+e_free:
+ sg_free_table(&rctx->data_sg);
+
+ return ret;
+}
+
+static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
+ unsigned int final)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct scatterlist *sg, *cmac_key_sg = NULL;
+ unsigned int block_size =
+ crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ unsigned int need_pad, sg_count;
+ gfp_t gfp;
+ u64 len;
+ int ret;
+
+ if (!ctx->u.aes.key_len)
+ return -EINVAL;
+
+ if (nbytes)
+ rctx->null_msg = 0;
+
+ len = (u64)rctx->buf_count + (u64)nbytes;
+
+ if (!final && (len <= block_size)) {
+ scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
+ 0, nbytes, 0);
+ rctx->buf_count += nbytes;
+
+ return 0;
+ }
+
+ rctx->src = req->src;
+ rctx->nbytes = nbytes;
+
+ rctx->final = final;
+ rctx->hash_rem = final ? 0 : len & (block_size - 1);
+ rctx->hash_cnt = len - rctx->hash_rem;
+ if (!final && !rctx->hash_rem) {
+ /* CCP can't do zero length final, so keep some data around */
+ rctx->hash_cnt -= block_size;
+ rctx->hash_rem = block_size;
+ }
+
+ if (final && (rctx->null_msg || (len & (block_size - 1))))
+ need_pad = 1;
+ else
+ need_pad = 0;
+
+ sg_init_one(&rctx->iv_sg, rctx->iv, sizeof(rctx->iv));
+
+ /* Build the data scatterlist table - allocate enough entries for all
+ * possible data pieces (buffer, input data, padding)
+ */
+ sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2;
+ gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ GFP_KERNEL : GFP_ATOMIC;
+ ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
+ if (ret)
+ return ret;
+
+ sg = NULL;
+ if (rctx->buf_count) {
+ sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
+ sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
+ }
+
+ if (nbytes)
+ sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
+
+ if (need_pad) {
+ int pad_length = block_size - (len & (block_size - 1));
+
+ rctx->hash_cnt += pad_length;
+
+ memset(rctx->pad, 0, sizeof(rctx->pad));
+ rctx->pad[0] = 0x80;
+ sg_init_one(&rctx->pad_sg, rctx->pad, pad_length);
+ sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg);
+ }
+ if (sg) {
+ sg_mark_end(sg);
+ sg = rctx->data_sg.sgl;
+ }
+
+ /* Initialize the K1/K2 scatterlist */
+ if (final)
+ cmac_key_sg = (need_pad) ? &ctx->u.aes.k2_sg
+ : &ctx->u.aes.k1_sg;
+
+ memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ INIT_LIST_HEAD(&rctx->cmd.entry);
+ rctx->cmd.engine = CCP_ENGINE_AES;
+ rctx->cmd.u.aes.type = ctx->u.aes.type;
+ rctx->cmd.u.aes.mode = ctx->u.aes.mode;
+ rctx->cmd.u.aes.action = CCP_AES_ACTION_ENCRYPT;
+ rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
+ rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
+ rctx->cmd.u.aes.iv = &rctx->iv_sg;
+ rctx->cmd.u.aes.iv_len = AES_BLOCK_SIZE;
+ rctx->cmd.u.aes.src = sg;
+ rctx->cmd.u.aes.src_len = rctx->hash_cnt;
+ rctx->cmd.u.aes.dst = NULL;
+ rctx->cmd.u.aes.cmac_key = cmac_key_sg;
+ rctx->cmd.u.aes.cmac_key_len = ctx->u.aes.kn_len;
+ rctx->cmd.u.aes.cmac_final = final;
+
+ ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+ return ret;
+}
+
+static int ccp_aes_cmac_init(struct ahash_request *req)
+{
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+
+ memset(rctx, 0, sizeof(*rctx));
+
+ rctx->null_msg = 1;
+
+ return 0;
+}
+
+static int ccp_aes_cmac_update(struct ahash_request *req)
+{
+ return ccp_do_cmac_update(req, req->nbytes, 0);
+}
+
+static int ccp_aes_cmac_final(struct ahash_request *req)
+{
+ return ccp_do_cmac_update(req, 0, 1);
+}
+
+static int ccp_aes_cmac_finup(struct ahash_request *req)
+{
+ return ccp_do_cmac_update(req, req->nbytes, 1);
+}
+
+static int ccp_aes_cmac_digest(struct ahash_request *req)
+{
+ int ret;
+
+ ret = ccp_aes_cmac_init(req);
+ if (ret)
+ return ret;
+
+ return ccp_aes_cmac_finup(req);
+}
+
+static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct ccp_crypto_ahash_alg *alg =
+ ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
+ u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo;
+ u64 rb_hi = 0x00, rb_lo = 0x87;
+ __be64 *gk;
+ int ret;
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ ctx->u.aes.type = CCP_AES_TYPE_128;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->u.aes.type = CCP_AES_TYPE_192;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->u.aes.type = CCP_AES_TYPE_256;
+ break;
+ default:
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ ctx->u.aes.mode = alg->mode;
+
+ /* Set to zero until complete */
+ ctx->u.aes.key_len = 0;
+
+ /* Set the key for the AES cipher used to generate the keys */
+ ret = crypto_cipher_setkey(ctx->u.aes.tfm_cipher, key, key_len);
+ if (ret)
+ return ret;
+
+ /* Encrypt a block of zeroes - use key area in context */
+ memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
+ crypto_cipher_encrypt_one(ctx->u.aes.tfm_cipher, ctx->u.aes.key,
+ ctx->u.aes.key);
+
+ /* Generate K1 and K2 */
+ k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key));
+ k0_lo = be64_to_cpu(*((__be64 *)ctx->u.aes.key + 1));
+
+ k1_hi = (k0_hi << 1) | (k0_lo >> 63);
+ k1_lo = k0_lo << 1;
+ if (ctx->u.aes.key[0] & 0x80) {
+ k1_hi ^= rb_hi;
+ k1_lo ^= rb_lo;
+ }
+ gk = (__be64 *)ctx->u.aes.k1;
+ *gk = cpu_to_be64(k1_hi);
+ gk++;
+ *gk = cpu_to_be64(k1_lo);
+
+ k2_hi = (k1_hi << 1) | (k1_lo >> 63);
+ k2_lo = k1_lo << 1;
+ if (ctx->u.aes.k1[0] & 0x80) {
+ k2_hi ^= rb_hi;
+ k2_lo ^= rb_lo;
+ }
+ gk = (__be64 *)ctx->u.aes.k2;
+ *gk = cpu_to_be64(k2_hi);
+ gk++;
+ *gk = cpu_to_be64(k2_lo);
+
+ ctx->u.aes.kn_len = sizeof(ctx->u.aes.k1);
+ sg_init_one(&ctx->u.aes.k1_sg, ctx->u.aes.k1, sizeof(ctx->u.aes.k1));
+ sg_init_one(&ctx->u.aes.k2_sg, ctx->u.aes.k2, sizeof(ctx->u.aes.k2));
+
+ /* Save the supplied key */
+ memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
+ memcpy(ctx->u.aes.key, key, key_len);
+ ctx->u.aes.key_len = key_len;
+ sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
+
+ return ret;
+}
+
+static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct crypto_cipher *cipher_tfm;
+
+ ctx->complete = ccp_aes_cmac_complete;
+ ctx->u.aes.key_len = 0;
+
+ crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx));
+
+ cipher_tfm = crypto_alloc_cipher("aes", 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(cipher_tfm)) {
+ pr_warn("could not load aes cipher driver\n");
+ return PTR_ERR(cipher_tfm);
+ }
+ ctx->u.aes.tfm_cipher = cipher_tfm;
+
+ return 0;
+}
+
+static void ccp_aes_cmac_cra_exit(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->u.aes.tfm_cipher)
+ crypto_free_cipher(ctx->u.aes.tfm_cipher);
+ ctx->u.aes.tfm_cipher = NULL;
+}
+
+int ccp_register_aes_cmac_algs(struct list_head *head)
+{
+ struct ccp_crypto_ahash_alg *ccp_alg;
+ struct ahash_alg *alg;
+ struct hash_alg_common *halg;
+ struct crypto_alg *base;
+ int ret;
+
+ ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+ if (!ccp_alg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ccp_alg->entry);
+ ccp_alg->mode = CCP_AES_MODE_CMAC;
+
+ alg = &ccp_alg->alg;
+ alg->init = ccp_aes_cmac_init;
+ alg->update = ccp_aes_cmac_update;
+ alg->final = ccp_aes_cmac_final;
+ alg->finup = ccp_aes_cmac_finup;
+ alg->digest = ccp_aes_cmac_digest;
+ alg->setkey = ccp_aes_cmac_setkey;
+
+ halg = &alg->halg;
+ halg->digestsize = AES_BLOCK_SIZE;
+
+ base = &halg->base;
+ snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
+ snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp");
+ base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK;
+ base->cra_blocksize = AES_BLOCK_SIZE;
+ base->cra_ctxsize = sizeof(struct ccp_ctx);
+ base->cra_priority = CCP_CRA_PRIORITY;
+ base->cra_type = &crypto_ahash_type;
+ base->cra_init = ccp_aes_cmac_cra_init;
+ base->cra_exit = ccp_aes_cmac_cra_exit;
+ base->cra_module = THIS_MODULE;
+
+ ret = crypto_register_ahash(alg);
+ if (ret) {
+ pr_err("%s ahash algorithm registration error (%d)\n",
+ base->cra_name, ret);
+ kfree(ccp_alg);
+ return ret;
+ }
+
+ list_add(&ccp_alg->entry, head);
+
+ return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
new file mode 100644
index 000000000000..0237ab58f242
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -0,0 +1,279 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+
+struct ccp_aes_xts_def {
+ const char *name;
+ const char *drv_name;
+};
+
+static struct ccp_aes_xts_def aes_xts_algs[] = {
+ {
+ .name = "xts(aes)",
+ .drv_name = "xts-aes-ccp",
+ },
+};
+
+struct ccp_unit_size_map {
+ unsigned int size;
+ u32 value;
+};
+
+static struct ccp_unit_size_map unit_size_map[] = {
+ {
+ .size = 4096,
+ .value = CCP_XTS_AES_UNIT_SIZE_4096,
+ },
+ {
+ .size = 2048,
+ .value = CCP_XTS_AES_UNIT_SIZE_2048,
+ },
+ {
+ .size = 1024,
+ .value = CCP_XTS_AES_UNIT_SIZE_1024,
+ },
+ {
+ .size = 512,
+ .value = CCP_XTS_AES_UNIT_SIZE_512,
+ },
+ {
+ .size = 256,
+ .value = CCP_XTS_AES_UNIT_SIZE__LAST,
+ },
+ {
+ .size = 128,
+ .value = CCP_XTS_AES_UNIT_SIZE__LAST,
+ },
+ {
+ .size = 64,
+ .value = CCP_XTS_AES_UNIT_SIZE__LAST,
+ },
+ {
+ .size = 32,
+ .value = CCP_XTS_AES_UNIT_SIZE__LAST,
+ },
+ {
+ .size = 16,
+ .value = CCP_XTS_AES_UNIT_SIZE_16,
+ },
+ {
+ .size = 1,
+ .value = CCP_XTS_AES_UNIT_SIZE__LAST,
+ },
+};
+
+static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
+{
+ struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+ struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+
+ if (ret)
+ return ret;
+
+ memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
+
+ return 0;
+}
+
+static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+
+ /* Only support 128-bit AES key with a 128-bit Tweak key,
+ * otherwise use the fallback
+ */
+ switch (key_len) {
+ case AES_KEYSIZE_128 * 2:
+ memcpy(ctx->u.aes.key, key, key_len);
+ break;
+ }
+ ctx->u.aes.key_len = key_len / 2;
+ sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
+
+ return crypto_ablkcipher_setkey(ctx->u.aes.tfm_ablkcipher, key,
+ key_len);
+}
+
+static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
+ unsigned int encrypt)
+{
+ struct crypto_tfm *tfm =
+ crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ unsigned int unit;
+ int ret;
+
+ if (!ctx->u.aes.key_len)
+ return -EINVAL;
+
+ if (req->nbytes & (AES_BLOCK_SIZE - 1))
+ return -EINVAL;
+
+ if (!req->info)
+ return -EINVAL;
+
+ for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
+ if (!(req->nbytes & (unit_size_map[unit].size - 1)))
+ break;
+
+ if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
+ (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
+ /* Use the fallback to process the request for any
+ * unsupported unit sizes or key sizes
+ */
+ ablkcipher_request_set_tfm(req, ctx->u.aes.tfm_ablkcipher);
+ ret = (encrypt) ? crypto_ablkcipher_encrypt(req) :
+ crypto_ablkcipher_decrypt(req);
+ ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+
+ return ret;
+ }
+
+ memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
+ sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE);
+
+ memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ INIT_LIST_HEAD(&rctx->cmd.entry);
+ rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
+ rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
+ : CCP_AES_ACTION_DECRYPT;
+ rctx->cmd.u.xts.unit_size = unit_size_map[unit].value;
+ rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
+ rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
+ rctx->cmd.u.xts.iv = &rctx->iv_sg;
+ rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE;
+ rctx->cmd.u.xts.src = req->src;
+ rctx->cmd.u.xts.src_len = req->nbytes;
+ rctx->cmd.u.xts.dst = req->dst;
+
+ ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+ return ret;
+}
+
+static int ccp_aes_xts_encrypt(struct ablkcipher_request *req)
+{
+ return ccp_aes_xts_crypt(req, 1);
+}
+
+static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
+{
+ return ccp_aes_xts_crypt(req, 0);
+}
+
+static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_ablkcipher *fallback_tfm;
+
+ ctx->complete = ccp_aes_xts_complete;
+ ctx->u.aes.key_len = 0;
+
+ fallback_tfm = crypto_alloc_ablkcipher(tfm->__crt_alg->cra_name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback_tfm)) {
+ pr_warn("could not load fallback driver %s\n",
+ tfm->__crt_alg->cra_name);
+ return PTR_ERR(fallback_tfm);
+ }
+ ctx->u.aes.tfm_ablkcipher = fallback_tfm;
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx) +
+ fallback_tfm->base.crt_ablkcipher.reqsize;
+
+ return 0;
+}
+
+static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->u.aes.tfm_ablkcipher)
+ crypto_free_ablkcipher(ctx->u.aes.tfm_ablkcipher);
+ ctx->u.aes.tfm_ablkcipher = NULL;
+}
+
+
+static int ccp_register_aes_xts_alg(struct list_head *head,
+ const struct ccp_aes_xts_def *def)
+{
+ struct ccp_crypto_ablkcipher_alg *ccp_alg;
+ struct crypto_alg *alg;
+ int ret;
+
+ ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+ if (!ccp_alg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ccp_alg->entry);
+
+ alg = &ccp_alg->alg;
+
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ def->drv_name);
+ alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK;
+ alg->cra_blocksize = AES_BLOCK_SIZE;
+ alg->cra_ctxsize = sizeof(struct ccp_ctx);
+ alg->cra_priority = CCP_CRA_PRIORITY;
+ alg->cra_type = &crypto_ablkcipher_type;
+ alg->cra_ablkcipher.setkey = ccp_aes_xts_setkey;
+ alg->cra_ablkcipher.encrypt = ccp_aes_xts_encrypt;
+ alg->cra_ablkcipher.decrypt = ccp_aes_xts_decrypt;
+ alg->cra_ablkcipher.min_keysize = AES_MIN_KEY_SIZE * 2;
+ alg->cra_ablkcipher.max_keysize = AES_MAX_KEY_SIZE * 2;
+ alg->cra_ablkcipher.ivsize = AES_BLOCK_SIZE;
+ alg->cra_init = ccp_aes_xts_cra_init;
+ alg->cra_exit = ccp_aes_xts_cra_exit;
+ alg->cra_module = THIS_MODULE;
+
+ ret = crypto_register_alg(alg);
+ if (ret) {
+ pr_err("%s ablkcipher algorithm registration error (%d)\n",
+ alg->cra_name, ret);
+ kfree(ccp_alg);
+ return ret;
+ }
+
+ list_add(&ccp_alg->entry, head);
+
+ return 0;
+}
+
+int ccp_register_aes_xts_algs(struct list_head *head)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(aes_xts_algs); i++) {
+ ret = ccp_register_aes_xts_alg(head, &aes_xts_algs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
new file mode 100644
index 000000000000..e46490db0f63
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -0,0 +1,369 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) AES crypto API support
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+
+static int ccp_aes_complete(struct crypto_async_request *async_req, int ret)
+{
+ struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+ struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+
+ if (ret)
+ return ret;
+
+ if (ctx->u.aes.mode != CCP_AES_MODE_ECB)
+ memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
+
+ return 0;
+}
+
+static int ccp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+ struct ccp_crypto_ablkcipher_alg *alg =
+ ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ ctx->u.aes.type = CCP_AES_TYPE_128;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->u.aes.type = CCP_AES_TYPE_192;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->u.aes.type = CCP_AES_TYPE_256;
+ break;
+ default:
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ ctx->u.aes.mode = alg->mode;
+ ctx->u.aes.key_len = key_len;
+
+ memcpy(ctx->u.aes.key, key, key_len);
+ sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
+
+ return 0;
+}
+
+static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct scatterlist *iv_sg = NULL;
+ unsigned int iv_len = 0;
+ int ret;
+
+ if (!ctx->u.aes.key_len)
+ return -EINVAL;
+
+ if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
+ (ctx->u.aes.mode == CCP_AES_MODE_CBC) ||
+ (ctx->u.aes.mode == CCP_AES_MODE_CFB)) &&
+ (req->nbytes & (AES_BLOCK_SIZE - 1)))
+ return -EINVAL;
+
+ if (ctx->u.aes.mode != CCP_AES_MODE_ECB) {
+ if (!req->info)
+ return -EINVAL;
+
+ memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
+ iv_sg = &rctx->iv_sg;
+ iv_len = AES_BLOCK_SIZE;
+ sg_init_one(iv_sg, rctx->iv, iv_len);
+ }
+
+ memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ INIT_LIST_HEAD(&rctx->cmd.entry);
+ rctx->cmd.engine = CCP_ENGINE_AES;
+ rctx->cmd.u.aes.type = ctx->u.aes.type;
+ rctx->cmd.u.aes.mode = ctx->u.aes.mode;
+ rctx->cmd.u.aes.action =
+ (encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT;
+ rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
+ rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
+ rctx->cmd.u.aes.iv = iv_sg;
+ rctx->cmd.u.aes.iv_len = iv_len;
+ rctx->cmd.u.aes.src = req->src;
+ rctx->cmd.u.aes.src_len = req->nbytes;
+ rctx->cmd.u.aes.dst = req->dst;
+
+ ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+ return ret;
+}
+
+static int ccp_aes_encrypt(struct ablkcipher_request *req)
+{
+ return ccp_aes_crypt(req, true);
+}
+
+static int ccp_aes_decrypt(struct ablkcipher_request *req)
+{
+ return ccp_aes_crypt(req, false);
+}
+
+static int ccp_aes_cra_init(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->complete = ccp_aes_complete;
+ ctx->u.aes.key_len = 0;
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
+
+ return 0;
+}
+
+static void ccp_aes_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req,
+ int ret)
+{
+ struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+ struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+
+ /* Restore the original pointer */
+ req->info = rctx->rfc3686_info;
+
+ return ccp_aes_complete(async_req, ret);
+}
+
+static int ccp_aes_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+
+ if (key_len < CTR_RFC3686_NONCE_SIZE)
+ return -EINVAL;
+
+ key_len -= CTR_RFC3686_NONCE_SIZE;
+ memcpy(ctx->u.aes.nonce, key + key_len, CTR_RFC3686_NONCE_SIZE);
+
+ return ccp_aes_setkey(tfm, key, key_len);
+}
+
+static int ccp_aes_rfc3686_crypt(struct ablkcipher_request *req, bool encrypt)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ u8 *iv;
+
+ /* Initialize the CTR block */
+ iv = rctx->rfc3686_iv;
+ memcpy(iv, ctx->u.aes.nonce, CTR_RFC3686_NONCE_SIZE);
+
+ iv += CTR_RFC3686_NONCE_SIZE;
+ memcpy(iv, req->info, CTR_RFC3686_IV_SIZE);
+
+ iv += CTR_RFC3686_IV_SIZE;
+ *(__be32 *)iv = cpu_to_be32(1);
+
+ /* Point to the new IV */
+ rctx->rfc3686_info = req->info;
+ req->info = rctx->rfc3686_iv;
+
+ return ccp_aes_crypt(req, encrypt);
+}
+
+static int ccp_aes_rfc3686_encrypt(struct ablkcipher_request *req)
+{
+ return ccp_aes_rfc3686_crypt(req, true);
+}
+
+static int ccp_aes_rfc3686_decrypt(struct ablkcipher_request *req)
+{
+ return ccp_aes_rfc3686_crypt(req, false);
+}
+
+static int ccp_aes_rfc3686_cra_init(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->complete = ccp_aes_rfc3686_complete;
+ ctx->u.aes.key_len = 0;
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
+
+ return 0;
+}
+
+static void ccp_aes_rfc3686_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static struct crypto_alg ccp_aes_defaults = {
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ccp_ctx),
+ .cra_priority = CCP_CRA_PRIORITY,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = ccp_aes_cra_init,
+ .cra_exit = ccp_aes_cra_exit,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = ccp_aes_setkey,
+ .encrypt = ccp_aes_encrypt,
+ .decrypt = ccp_aes_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+};
+
+static struct crypto_alg ccp_aes_rfc3686_defaults = {
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = CTR_RFC3686_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ccp_ctx),
+ .cra_priority = CCP_CRA_PRIORITY,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = ccp_aes_rfc3686_cra_init,
+ .cra_exit = ccp_aes_rfc3686_cra_exit,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = ccp_aes_rfc3686_setkey,
+ .encrypt = ccp_aes_rfc3686_encrypt,
+ .decrypt = ccp_aes_rfc3686_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ },
+};
+
+struct ccp_aes_def {
+ enum ccp_aes_mode mode;
+ const char *name;
+ const char *driver_name;
+ unsigned int blocksize;
+ unsigned int ivsize;
+ struct crypto_alg *alg_defaults;
+};
+
+static struct ccp_aes_def aes_algs[] = {
+ {
+ .mode = CCP_AES_MODE_ECB,
+ .name = "ecb(aes)",
+ .driver_name = "ecb-aes-ccp",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = 0,
+ .alg_defaults = &ccp_aes_defaults,
+ },
+ {
+ .mode = CCP_AES_MODE_CBC,
+ .name = "cbc(aes)",
+ .driver_name = "cbc-aes-ccp",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .alg_defaults = &ccp_aes_defaults,
+ },
+ {
+ .mode = CCP_AES_MODE_CFB,
+ .name = "cfb(aes)",
+ .driver_name = "cfb-aes-ccp",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .alg_defaults = &ccp_aes_defaults,
+ },
+ {
+ .mode = CCP_AES_MODE_OFB,
+ .name = "ofb(aes)",
+ .driver_name = "ofb-aes-ccp",
+ .blocksize = 1,
+ .ivsize = AES_BLOCK_SIZE,
+ .alg_defaults = &ccp_aes_defaults,
+ },
+ {
+ .mode = CCP_AES_MODE_CTR,
+ .name = "ctr(aes)",
+ .driver_name = "ctr-aes-ccp",
+ .blocksize = 1,
+ .ivsize = AES_BLOCK_SIZE,
+ .alg_defaults = &ccp_aes_defaults,
+ },
+ {
+ .mode = CCP_AES_MODE_CTR,
+ .name = "rfc3686(ctr(aes))",
+ .driver_name = "rfc3686-ctr-aes-ccp",
+ .blocksize = 1,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .alg_defaults = &ccp_aes_rfc3686_defaults,
+ },
+};
+
+static int ccp_register_aes_alg(struct list_head *head,
+ const struct ccp_aes_def *def)
+{
+ struct ccp_crypto_ablkcipher_alg *ccp_alg;
+ struct crypto_alg *alg;
+ int ret;
+
+ ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+ if (!ccp_alg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ccp_alg->entry);
+
+ ccp_alg->mode = def->mode;
+
+ /* Copy the defaults and override as necessary */
+ alg = &ccp_alg->alg;
+ *alg = *def->alg_defaults;
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ def->driver_name);
+ alg->cra_blocksize = def->blocksize;
+ alg->cra_ablkcipher.ivsize = def->ivsize;
+
+ ret = crypto_register_alg(alg);
+ if (ret) {
+ pr_err("%s ablkcipher algorithm registration error (%d)\n",
+ alg->cra_name, ret);
+ kfree(ccp_alg);
+ return ret;
+ }
+
+ list_add(&ccp_alg->entry, head);
+
+ return 0;
+}
+
+int ccp_register_aes_algs(struct list_head *head)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+ ret = ccp_register_aes_alg(head, &aes_algs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
new file mode 100644
index 000000000000..2636f044789d
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -0,0 +1,432 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) crypto API support
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/ccp.h>
+#include <linux/scatterlist.h>
+#include <crypto/internal/hash.h>
+
+#include "ccp-crypto.h"
+
+MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0.0");
+MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
+
+
+/* List heads for the supported algorithms */
+static LIST_HEAD(hash_algs);
+static LIST_HEAD(cipher_algs);
+
+/* For any tfm, requests for that tfm on the same CPU must be returned
+ * in the order received. With multiple queues available, the CCP can
+ * process more than one cmd at a time. Therefore we must maintain
+ * a cmd list to insure the proper ordering of requests on a given tfm/cpu
+ * combination.
+ */
+struct ccp_crypto_cpu_queue {
+ struct list_head cmds;
+ struct list_head *backlog;
+ unsigned int cmd_count;
+};
+#define CCP_CRYPTO_MAX_QLEN 50
+
+struct ccp_crypto_percpu_queue {
+ struct ccp_crypto_cpu_queue __percpu *cpu_queue;
+};
+static struct ccp_crypto_percpu_queue req_queue;
+
+struct ccp_crypto_cmd {
+ struct list_head entry;
+
+ struct ccp_cmd *cmd;
+
+ /* Save the crypto_tfm and crypto_async_request addresses
+ * separately to avoid any reference to a possibly invalid
+ * crypto_async_request structure after invoking the request
+ * callback
+ */
+ struct crypto_async_request *req;
+ struct crypto_tfm *tfm;
+
+ /* Used for held command processing to determine state */
+ int ret;
+
+ int cpu;
+};
+
+struct ccp_crypto_cpu {
+ struct work_struct work;
+ struct completion completion;
+ struct ccp_crypto_cmd *crypto_cmd;
+ int err;
+};
+
+
+static inline bool ccp_crypto_success(int err)
+{
+ if (err && (err != -EINPROGRESS) && (err != -EBUSY))
+ return false;
+
+ return true;
+}
+
+/*
+ * ccp_crypto_cmd_complete must be called while running on the appropriate
+ * cpu and the caller must have done a get_cpu to disable preemption
+ */
+static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
+ struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
+{
+ struct ccp_crypto_cpu_queue *cpu_queue;
+ struct ccp_crypto_cmd *held = NULL, *tmp;
+
+ *backlog = NULL;
+
+ cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
+
+ /* Held cmds will be after the current cmd in the queue so start
+ * searching for a cmd with a matching tfm for submission.
+ */
+ tmp = crypto_cmd;
+ list_for_each_entry_continue(tmp, &cpu_queue->cmds, entry) {
+ if (crypto_cmd->tfm != tmp->tfm)
+ continue;
+ held = tmp;
+ break;
+ }
+
+ /* Process the backlog:
+ * Because cmds can be executed from any point in the cmd list
+ * special precautions have to be taken when handling the backlog.
+ */
+ if (cpu_queue->backlog != &cpu_queue->cmds) {
+ /* Skip over this cmd if it is the next backlog cmd */
+ if (cpu_queue->backlog == &crypto_cmd->entry)
+ cpu_queue->backlog = crypto_cmd->entry.next;
+
+ *backlog = container_of(cpu_queue->backlog,
+ struct ccp_crypto_cmd, entry);
+ cpu_queue->backlog = cpu_queue->backlog->next;
+
+ /* Skip over this cmd if it is now the next backlog cmd */
+ if (cpu_queue->backlog == &crypto_cmd->entry)
+ cpu_queue->backlog = crypto_cmd->entry.next;
+ }
+
+ /* Remove the cmd entry from the list of cmds */
+ cpu_queue->cmd_count--;
+ list_del(&crypto_cmd->entry);
+
+ return held;
+}
+
+static void ccp_crypto_complete_on_cpu(struct work_struct *work)
+{
+ struct ccp_crypto_cpu *cpu_work =
+ container_of(work, struct ccp_crypto_cpu, work);
+ struct ccp_crypto_cmd *crypto_cmd = cpu_work->crypto_cmd;
+ struct ccp_crypto_cmd *held, *next, *backlog;
+ struct crypto_async_request *req = crypto_cmd->req;
+ struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
+ int cpu, ret;
+
+ cpu = get_cpu();
+
+ if (cpu_work->err == -EINPROGRESS) {
+ /* Only propogate the -EINPROGRESS if necessary */
+ if (crypto_cmd->ret == -EBUSY) {
+ crypto_cmd->ret = -EINPROGRESS;
+ req->complete(req, -EINPROGRESS);
+ }
+
+ goto e_cpu;
+ }
+
+ /* Operation has completed - update the queue before invoking
+ * the completion callbacks and retrieve the next cmd (cmd with
+ * a matching tfm) that can be submitted to the CCP.
+ */
+ held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
+ if (backlog) {
+ backlog->ret = -EINPROGRESS;
+ backlog->req->complete(backlog->req, -EINPROGRESS);
+ }
+
+ /* Transition the state from -EBUSY to -EINPROGRESS first */
+ if (crypto_cmd->ret == -EBUSY)
+ req->complete(req, -EINPROGRESS);
+
+ /* Completion callbacks */
+ ret = cpu_work->err;
+ if (ctx->complete)
+ ret = ctx->complete(req, ret);
+ req->complete(req, ret);
+
+ /* Submit the next cmd */
+ while (held) {
+ ret = ccp_enqueue_cmd(held->cmd);
+ if (ccp_crypto_success(ret))
+ break;
+
+ /* Error occurred, report it and get the next entry */
+ held->req->complete(held->req, ret);
+
+ next = ccp_crypto_cmd_complete(held, &backlog);
+ if (backlog) {
+ backlog->ret = -EINPROGRESS;
+ backlog->req->complete(backlog->req, -EINPROGRESS);
+ }
+
+ kfree(held);
+ held = next;
+ }
+
+ kfree(crypto_cmd);
+
+e_cpu:
+ put_cpu();
+
+ complete(&cpu_work->completion);
+}
+
+static void ccp_crypto_complete(void *data, int err)
+{
+ struct ccp_crypto_cmd *crypto_cmd = data;
+ struct ccp_crypto_cpu cpu_work;
+
+ INIT_WORK(&cpu_work.work, ccp_crypto_complete_on_cpu);
+ init_completion(&cpu_work.completion);
+ cpu_work.crypto_cmd = crypto_cmd;
+ cpu_work.err = err;
+
+ schedule_work_on(crypto_cmd->cpu, &cpu_work.work);
+
+ /* Keep the completion call synchronous */
+ wait_for_completion(&cpu_work.completion);
+}
+
+static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
+{
+ struct ccp_crypto_cpu_queue *cpu_queue;
+ struct ccp_crypto_cmd *active = NULL, *tmp;
+ int cpu, ret;
+
+ cpu = get_cpu();
+ crypto_cmd->cpu = cpu;
+
+ cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
+
+ /* Check if the cmd can/should be queued */
+ if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
+ ret = -EBUSY;
+ if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
+ goto e_cpu;
+ }
+
+ /* Look for an entry with the same tfm. If there is a cmd
+ * with the same tfm in the list for this cpu then the current
+ * cmd cannot be submitted to the CCP yet.
+ */
+ list_for_each_entry(tmp, &cpu_queue->cmds, entry) {
+ if (crypto_cmd->tfm != tmp->tfm)
+ continue;
+ active = tmp;
+ break;
+ }
+
+ ret = -EINPROGRESS;
+ if (!active) {
+ ret = ccp_enqueue_cmd(crypto_cmd->cmd);
+ if (!ccp_crypto_success(ret))
+ goto e_cpu;
+ }
+
+ if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
+ ret = -EBUSY;
+ if (cpu_queue->backlog == &cpu_queue->cmds)
+ cpu_queue->backlog = &crypto_cmd->entry;
+ }
+ crypto_cmd->ret = ret;
+
+ cpu_queue->cmd_count++;
+ list_add_tail(&crypto_cmd->entry, &cpu_queue->cmds);
+
+e_cpu:
+ put_cpu();
+
+ return ret;
+}
+
+/**
+ * ccp_crypto_enqueue_request - queue an crypto async request for processing
+ * by the CCP
+ *
+ * @req: crypto_async_request struct to be processed
+ * @cmd: ccp_cmd struct to be sent to the CCP
+ */
+int ccp_crypto_enqueue_request(struct crypto_async_request *req,
+ struct ccp_cmd *cmd)
+{
+ struct ccp_crypto_cmd *crypto_cmd;
+ gfp_t gfp;
+ int ret;
+
+ gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+
+ crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
+ if (!crypto_cmd)
+ return -ENOMEM;
+
+ /* The tfm pointer must be saved and not referenced from the
+ * crypto_async_request (req) pointer because it is used after
+ * completion callback for the request and the req pointer
+ * might not be valid anymore.
+ */
+ crypto_cmd->cmd = cmd;
+ crypto_cmd->req = req;
+ crypto_cmd->tfm = req->tfm;
+
+ cmd->callback = ccp_crypto_complete;
+ cmd->data = crypto_cmd;
+
+ if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ cmd->flags |= CCP_CMD_MAY_BACKLOG;
+ else
+ cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
+
+ ret = ccp_crypto_enqueue_cmd(crypto_cmd);
+ if (!ccp_crypto_success(ret))
+ kfree(crypto_cmd);
+
+ return ret;
+}
+
+struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
+ struct scatterlist *sg_add)
+{
+ struct scatterlist *sg, *sg_last = NULL;
+
+ for (sg = table->sgl; sg; sg = sg_next(sg))
+ if (!sg_page(sg))
+ break;
+ BUG_ON(!sg);
+
+ for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
+ sg_set_page(sg, sg_page(sg_add), sg_add->length,
+ sg_add->offset);
+ sg_last = sg;
+ }
+ BUG_ON(sg_add);
+
+ return sg_last;
+}
+
+static int ccp_register_algs(void)
+{
+ int ret;
+
+ ret = ccp_register_aes_algs(&cipher_algs);
+ if (ret)
+ return ret;
+
+ ret = ccp_register_aes_cmac_algs(&hash_algs);
+ if (ret)
+ return ret;
+
+ ret = ccp_register_aes_xts_algs(&cipher_algs);
+ if (ret)
+ return ret;
+
+ ret = ccp_register_sha_algs(&hash_algs);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ccp_unregister_algs(void)
+{
+ struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
+ struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
+
+ list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
+ crypto_unregister_ahash(&ahash_alg->alg);
+ list_del(&ahash_alg->entry);
+ kfree(ahash_alg);
+ }
+
+ list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) {
+ crypto_unregister_alg(&ablk_alg->alg);
+ list_del(&ablk_alg->entry);
+ kfree(ablk_alg);
+ }
+}
+
+static int ccp_init_queues(void)
+{
+ struct ccp_crypto_cpu_queue *cpu_queue;
+ int cpu;
+
+ req_queue.cpu_queue = alloc_percpu(struct ccp_crypto_cpu_queue);
+ if (!req_queue.cpu_queue)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
+ INIT_LIST_HEAD(&cpu_queue->cmds);
+ cpu_queue->backlog = &cpu_queue->cmds;
+ cpu_queue->cmd_count = 0;
+ }
+
+ return 0;
+}
+
+static void ccp_fini_queue(void)
+{
+ struct ccp_crypto_cpu_queue *cpu_queue;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
+ BUG_ON(!list_empty(&cpu_queue->cmds));
+ }
+ free_percpu(req_queue.cpu_queue);
+}
+
+static int ccp_crypto_init(void)
+{
+ int ret;
+
+ ret = ccp_init_queues();
+ if (ret)
+ return ret;
+
+ ret = ccp_register_algs();
+ if (ret) {
+ ccp_unregister_algs();
+ ccp_fini_queue();
+ }
+
+ return ret;
+}
+
+static void ccp_crypto_exit(void)
+{
+ ccp_unregister_algs();
+ ccp_fini_queue();
+}
+
+module_init(ccp_crypto_init);
+module_exit(ccp_crypto_exit);
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
new file mode 100644
index 000000000000..3867290b3531
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -0,0 +1,517 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) SHA crypto API support
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+
+struct ccp_sha_result {
+ struct completion completion;
+ int err;
+};
+
+static void ccp_sync_hash_complete(struct crypto_async_request *req, int err)
+{
+ struct ccp_sha_result *result = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ result->err = err;
+ complete(&result->completion);
+}
+
+static int ccp_sync_hash(struct crypto_ahash *tfm, u8 *buf,
+ struct scatterlist *sg, unsigned int len)
+{
+ struct ccp_sha_result result;
+ struct ahash_request *req;
+ int ret;
+
+ init_completion(&result.completion);
+
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ ccp_sync_hash_complete, &result);
+ ahash_request_set_crypt(req, sg, buf, len);
+
+ ret = crypto_ahash_digest(req);
+ if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
+ ret = wait_for_completion_interruptible(&result.completion);
+ if (!ret)
+ ret = result.err;
+ }
+
+ ahash_request_free(req);
+
+ return ret;
+}
+
+static int ccp_sha_finish_hmac(struct crypto_async_request *async_req)
+{
+ struct ahash_request *req = ahash_request_cast(async_req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct scatterlist sg[2];
+ unsigned int block_size =
+ crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ unsigned int digest_size = crypto_ahash_digestsize(tfm);
+
+ sg_init_table(sg, ARRAY_SIZE(sg));
+ sg_set_buf(&sg[0], ctx->u.sha.opad, block_size);
+ sg_set_buf(&sg[1], rctx->ctx, digest_size);
+
+ return ccp_sync_hash(ctx->u.sha.hmac_tfm, req->result, sg,
+ block_size + digest_size);
+}
+
+static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
+{
+ struct ahash_request *req = ahash_request_cast(async_req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ unsigned int digest_size = crypto_ahash_digestsize(tfm);
+
+ if (ret)
+ goto e_free;
+
+ if (rctx->hash_rem) {
+ /* Save remaining data to buffer */
+ unsigned int offset = rctx->nbytes - rctx->hash_rem;
+ scatterwalk_map_and_copy(rctx->buf, rctx->src,
+ offset, rctx->hash_rem, 0);
+ rctx->buf_count = rctx->hash_rem;
+ } else
+ rctx->buf_count = 0;
+
+ /* Update result area if supplied */
+ if (req->result)
+ memcpy(req->result, rctx->ctx, digest_size);
+
+ /* If we're doing an HMAC, we need to perform that on the final op */
+ if (rctx->final && ctx->u.sha.key_len)
+ ret = ccp_sha_finish_hmac(async_req);
+
+e_free:
+ sg_free_table(&rctx->data_sg);
+
+ return ret;
+}
+
+static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
+ unsigned int final)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct scatterlist *sg;
+ unsigned int block_size =
+ crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ unsigned int sg_count;
+ gfp_t gfp;
+ u64 len;
+ int ret;
+
+ len = (u64)rctx->buf_count + (u64)nbytes;
+
+ if (!final && (len <= block_size)) {
+ scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
+ 0, nbytes, 0);
+ rctx->buf_count += nbytes;
+
+ return 0;
+ }
+
+ rctx->src = req->src;
+ rctx->nbytes = nbytes;
+
+ rctx->final = final;
+ rctx->hash_rem = final ? 0 : len & (block_size - 1);
+ rctx->hash_cnt = len - rctx->hash_rem;
+ if (!final && !rctx->hash_rem) {
+ /* CCP can't do zero length final, so keep some data around */
+ rctx->hash_cnt -= block_size;
+ rctx->hash_rem = block_size;
+ }
+
+ /* Initialize the context scatterlist */
+ sg_init_one(&rctx->ctx_sg, rctx->ctx, sizeof(rctx->ctx));
+
+ sg = NULL;
+ if (rctx->buf_count && nbytes) {
+ /* Build the data scatterlist table - allocate enough entries
+ * for both data pieces (buffer and input data)
+ */
+ gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ GFP_KERNEL : GFP_ATOMIC;
+ sg_count = sg_nents(req->src) + 1;
+ ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
+ if (ret)
+ return ret;
+
+ sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
+ sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
+ sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
+ sg_mark_end(sg);
+
+ sg = rctx->data_sg.sgl;
+ } else if (rctx->buf_count) {
+ sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
+
+ sg = &rctx->buf_sg;
+ } else if (nbytes) {
+ sg = req->src;
+ }
+
+ rctx->msg_bits += (rctx->hash_cnt << 3); /* Total in bits */
+
+ memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ INIT_LIST_HEAD(&rctx->cmd.entry);
+ rctx->cmd.engine = CCP_ENGINE_SHA;
+ rctx->cmd.u.sha.type = rctx->type;
+ rctx->cmd.u.sha.ctx = &rctx->ctx_sg;
+ rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx);
+ rctx->cmd.u.sha.src = sg;
+ rctx->cmd.u.sha.src_len = rctx->hash_cnt;
+ rctx->cmd.u.sha.final = rctx->final;
+ rctx->cmd.u.sha.msg_bits = rctx->msg_bits;
+
+ rctx->first = 0;
+
+ ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+ return ret;
+}
+
+static int ccp_sha_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_crypto_ahash_alg *alg =
+ ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
+ unsigned int block_size =
+ crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+ memset(rctx, 0, sizeof(*rctx));
+
+ memcpy(rctx->ctx, alg->init, sizeof(rctx->ctx));
+ rctx->type = alg->type;
+ rctx->first = 1;
+
+ if (ctx->u.sha.key_len) {
+ /* Buffer the HMAC key for first update */
+ memcpy(rctx->buf, ctx->u.sha.ipad, block_size);
+ rctx->buf_count = block_size;
+ }
+
+ return 0;
+}
+
+static int ccp_sha_update(struct ahash_request *req)
+{
+ return ccp_do_sha_update(req, req->nbytes, 0);
+}
+
+static int ccp_sha_final(struct ahash_request *req)
+{
+ return ccp_do_sha_update(req, 0, 1);
+}
+
+static int ccp_sha_finup(struct ahash_request *req)
+{
+ return ccp_do_sha_update(req, req->nbytes, 1);
+}
+
+static int ccp_sha_digest(struct ahash_request *req)
+{
+ int ret;
+
+ ret = ccp_sha_init(req);
+ if (ret)
+ return ret;
+
+ return ccp_sha_finup(req);
+}
+
+static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct scatterlist sg;
+ unsigned int block_size =
+ crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ unsigned int digest_size = crypto_ahash_digestsize(tfm);
+ int i, ret;
+
+ /* Set to zero until complete */
+ ctx->u.sha.key_len = 0;
+
+ /* Clear key area to provide zero padding for keys smaller
+ * than the block size
+ */
+ memset(ctx->u.sha.key, 0, sizeof(ctx->u.sha.key));
+
+ if (key_len > block_size) {
+ /* Must hash the input key */
+ sg_init_one(&sg, key, key_len);
+ ret = ccp_sync_hash(tfm, ctx->u.sha.key, &sg, key_len);
+ if (ret) {
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ key_len = digest_size;
+ } else
+ memcpy(ctx->u.sha.key, key, key_len);
+
+ for (i = 0; i < block_size; i++) {
+ ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ 0x36;
+ ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c;
+ }
+
+ ctx->u.sha.key_len = key_len;
+
+ return 0;
+}
+
+static int ccp_sha_cra_init(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+
+ ctx->complete = ccp_sha_complete;
+ ctx->u.sha.key_len = 0;
+
+ crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sha_req_ctx));
+
+ return 0;
+}
+
+static void ccp_sha_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm);
+ struct crypto_ahash *hmac_tfm;
+
+ hmac_tfm = crypto_alloc_ahash(alg->child_alg,
+ CRYPTO_ALG_TYPE_AHASH, 0);
+ if (IS_ERR(hmac_tfm)) {
+ pr_warn("could not load driver %s need for HMAC support\n",
+ alg->child_alg);
+ return PTR_ERR(hmac_tfm);
+ }
+
+ ctx->u.sha.hmac_tfm = hmac_tfm;
+
+ return ccp_sha_cra_init(tfm);
+}
+
+static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->u.sha.hmac_tfm)
+ crypto_free_ahash(ctx->u.sha.hmac_tfm);
+
+ ccp_sha_cra_exit(tfm);
+}
+
+static const __be32 sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
+ cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
+ cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
+ cpu_to_be32(SHA1_H4), 0, 0, 0,
+};
+
+static const __be32 sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
+ cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
+ cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
+ cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
+ cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
+};
+
+static const __be32 sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
+ cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
+ cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
+ cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
+ cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
+};
+
+struct ccp_sha_def {
+ const char *name;
+ const char *drv_name;
+ const __be32 *init;
+ enum ccp_sha_type type;
+ u32 digest_size;
+ u32 block_size;
+};
+
+static struct ccp_sha_def sha_algs[] = {
+ {
+ .name = "sha1",
+ .drv_name = "sha1-ccp",
+ .init = sha1_init,
+ .type = CCP_SHA_TYPE_1,
+ .digest_size = SHA1_DIGEST_SIZE,
+ .block_size = SHA1_BLOCK_SIZE,
+ },
+ {
+ .name = "sha224",
+ .drv_name = "sha224-ccp",
+ .init = sha224_init,
+ .type = CCP_SHA_TYPE_224,
+ .digest_size = SHA224_DIGEST_SIZE,
+ .block_size = SHA224_BLOCK_SIZE,
+ },
+ {
+ .name = "sha256",
+ .drv_name = "sha256-ccp",
+ .init = sha256_init,
+ .type = CCP_SHA_TYPE_256,
+ .digest_size = SHA256_DIGEST_SIZE,
+ .block_size = SHA256_BLOCK_SIZE,
+ },
+};
+
+static int ccp_register_hmac_alg(struct list_head *head,
+ const struct ccp_sha_def *def,
+ const struct ccp_crypto_ahash_alg *base_alg)
+{
+ struct ccp_crypto_ahash_alg *ccp_alg;
+ struct ahash_alg *alg;
+ struct hash_alg_common *halg;
+ struct crypto_alg *base;
+ int ret;
+
+ ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+ if (!ccp_alg)
+ return -ENOMEM;
+
+ /* Copy the base algorithm and only change what's necessary */
+ *ccp_alg = *base_alg;
+ INIT_LIST_HEAD(&ccp_alg->entry);
+
+ strncpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME);
+
+ alg = &ccp_alg->alg;
+ alg->setkey = ccp_sha_setkey;
+
+ halg = &alg->halg;
+
+ base = &halg->base;
+ snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name);
+ snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s",
+ def->drv_name);
+ base->cra_init = ccp_hmac_sha_cra_init;
+ base->cra_exit = ccp_hmac_sha_cra_exit;
+
+ ret = crypto_register_ahash(alg);
+ if (ret) {
+ pr_err("%s ahash algorithm registration error (%d)\n",
+ base->cra_name, ret);
+ kfree(ccp_alg);
+ return ret;
+ }
+
+ list_add(&ccp_alg->entry, head);
+
+ return ret;
+}
+
+static int ccp_register_sha_alg(struct list_head *head,
+ const struct ccp_sha_def *def)
+{
+ struct ccp_crypto_ahash_alg *ccp_alg;
+ struct ahash_alg *alg;
+ struct hash_alg_common *halg;
+ struct crypto_alg *base;
+ int ret;
+
+ ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+ if (!ccp_alg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ccp_alg->entry);
+
+ ccp_alg->init = def->init;
+ ccp_alg->type = def->type;
+
+ alg = &ccp_alg->alg;
+ alg->init = ccp_sha_init;
+ alg->update = ccp_sha_update;
+ alg->final = ccp_sha_final;
+ alg->finup = ccp_sha_finup;
+ alg->digest = ccp_sha_digest;
+
+ halg = &alg->halg;
+ halg->digestsize = def->digest_size;
+
+ base = &halg->base;
+ snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ def->drv_name);
+ base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK;
+ base->cra_blocksize = def->block_size;
+ base->cra_ctxsize = sizeof(struct ccp_ctx);
+ base->cra_priority = CCP_CRA_PRIORITY;
+ base->cra_type = &crypto_ahash_type;
+ base->cra_init = ccp_sha_cra_init;
+ base->cra_exit = ccp_sha_cra_exit;
+ base->cra_module = THIS_MODULE;
+
+ ret = crypto_register_ahash(alg);
+ if (ret) {
+ pr_err("%s ahash algorithm registration error (%d)\n",
+ base->cra_name, ret);
+ kfree(ccp_alg);
+ return ret;
+ }
+
+ list_add(&ccp_alg->entry, head);
+
+ ret = ccp_register_hmac_alg(head, def, ccp_alg);
+
+ return ret;
+}
+
+int ccp_register_sha_algs(struct list_head *head)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
+ ret = ccp_register_sha_alg(head, &sha_algs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
new file mode 100644
index 000000000000..b222231b6169
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -0,0 +1,197 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) crypto API support
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CCP_CRYPTO_H__
+#define __CCP_CRYPTO_H__
+
+
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/ccp.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+
+
+#define CCP_CRA_PRIORITY 300
+
+struct ccp_crypto_ablkcipher_alg {
+ struct list_head entry;
+
+ u32 mode;
+
+ struct crypto_alg alg;
+};
+
+struct ccp_crypto_ahash_alg {
+ struct list_head entry;
+
+ const __be32 *init;
+ u32 type;
+ u32 mode;
+
+ /* Child algorithm used for HMAC, CMAC, etc */
+ char child_alg[CRYPTO_MAX_ALG_NAME];
+
+ struct ahash_alg alg;
+};
+
+static inline struct ccp_crypto_ablkcipher_alg *
+ ccp_crypto_ablkcipher_alg(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+
+ return container_of(alg, struct ccp_crypto_ablkcipher_alg, alg);
+}
+
+static inline struct ccp_crypto_ahash_alg *
+ ccp_crypto_ahash_alg(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct ahash_alg *ahash_alg;
+
+ ahash_alg = container_of(alg, struct ahash_alg, halg.base);
+
+ return container_of(ahash_alg, struct ccp_crypto_ahash_alg, alg);
+}
+
+
+/***** AES related defines *****/
+struct ccp_aes_ctx {
+ /* Fallback cipher for XTS with unsupported unit sizes */
+ struct crypto_ablkcipher *tfm_ablkcipher;
+
+ /* Cipher used to generate CMAC K1/K2 keys */
+ struct crypto_cipher *tfm_cipher;
+
+ enum ccp_engine engine;
+ enum ccp_aes_type type;
+ enum ccp_aes_mode mode;
+
+ struct scatterlist key_sg;
+ unsigned int key_len;
+ u8 key[AES_MAX_KEY_SIZE];
+
+ u8 nonce[CTR_RFC3686_NONCE_SIZE];
+
+ /* CMAC key structures */
+ struct scatterlist k1_sg;
+ struct scatterlist k2_sg;
+ unsigned int kn_len;
+ u8 k1[AES_BLOCK_SIZE];
+ u8 k2[AES_BLOCK_SIZE];
+};
+
+struct ccp_aes_req_ctx {
+ struct scatterlist iv_sg;
+ u8 iv[AES_BLOCK_SIZE];
+
+ /* Fields used for RFC3686 requests */
+ u8 *rfc3686_info;
+ u8 rfc3686_iv[AES_BLOCK_SIZE];
+
+ struct ccp_cmd cmd;
+};
+
+struct ccp_aes_cmac_req_ctx {
+ unsigned int null_msg;
+ unsigned int final;
+
+ struct scatterlist *src;
+ unsigned int nbytes;
+
+ u64 hash_cnt;
+ unsigned int hash_rem;
+
+ struct sg_table data_sg;
+
+ struct scatterlist iv_sg;
+ u8 iv[AES_BLOCK_SIZE];
+
+ struct scatterlist buf_sg;
+ unsigned int buf_count;
+ u8 buf[AES_BLOCK_SIZE];
+
+ struct scatterlist pad_sg;
+ unsigned int pad_count;
+ u8 pad[AES_BLOCK_SIZE];
+
+ struct ccp_cmd cmd;
+};
+
+/***** SHA related defines *****/
+#define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE
+#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
+struct ccp_sha_ctx {
+ unsigned int key_len;
+ u8 key[MAX_SHA_BLOCK_SIZE];
+ u8 ipad[MAX_SHA_BLOCK_SIZE];
+ u8 opad[MAX_SHA_BLOCK_SIZE];
+ struct crypto_ahash *hmac_tfm;
+};
+
+struct ccp_sha_req_ctx {
+ enum ccp_sha_type type;
+
+ u64 msg_bits;
+
+ unsigned int first;
+ unsigned int final;
+
+ struct scatterlist *src;
+ unsigned int nbytes;
+
+ u64 hash_cnt;
+ unsigned int hash_rem;
+
+ struct sg_table data_sg;
+
+ struct scatterlist ctx_sg;
+ u8 ctx[MAX_SHA_CONTEXT_SIZE];
+
+ struct scatterlist buf_sg;
+ unsigned int buf_count;
+ u8 buf[MAX_SHA_BLOCK_SIZE];
+
+ /* HMAC support field */
+ struct scatterlist pad_sg;
+
+ /* CCP driver command */
+ struct ccp_cmd cmd;
+};
+
+/***** Common Context Structure *****/
+struct ccp_ctx {
+ int (*complete)(struct crypto_async_request *req, int ret);
+
+ union {
+ struct ccp_aes_ctx aes;
+ struct ccp_sha_ctx sha;
+ } u;
+};
+
+int ccp_crypto_enqueue_request(struct crypto_async_request *req,
+ struct ccp_cmd *cmd);
+struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
+ struct scatterlist *sg_add);
+
+int ccp_register_aes_algs(struct list_head *head);
+int ccp_register_aes_cmac_algs(struct list_head *head);
+int ccp_register_aes_xts_algs(struct list_head *head);
+int ccp_register_sha_algs(struct list_head *head);
+
+#endif
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
new file mode 100644
index 000000000000..c3bc21264600
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -0,0 +1,595 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/cpu.h>
+#include <asm/cpu_device_id.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+
+MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0.0");
+MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
+
+
+static struct ccp_device *ccp_dev;
+static inline struct ccp_device *ccp_get_device(void)
+{
+ return ccp_dev;
+}
+
+static inline void ccp_add_device(struct ccp_device *ccp)
+{
+ ccp_dev = ccp;
+}
+
+static inline void ccp_del_device(struct ccp_device *ccp)
+{
+ ccp_dev = NULL;
+}
+
+/**
+ * ccp_enqueue_cmd - queue an operation for processing by the CCP
+ *
+ * @cmd: ccp_cmd struct to be processed
+ *
+ * Queue a cmd to be processed by the CCP. If queueing the cmd
+ * would exceed the defined length of the cmd queue the cmd will
+ * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
+ * result in a return code of -EBUSY.
+ *
+ * The callback routine specified in the ccp_cmd struct will be
+ * called to notify the caller of completion (if the cmd was not
+ * backlogged) or advancement out of the backlog. If the cmd has
+ * advanced out of the backlog the "err" value of the callback
+ * will be -EINPROGRESS. Any other "err" value during callback is
+ * the result of the operation.
+ *
+ * The cmd has been successfully queued if:
+ * the return code is -EINPROGRESS or
+ * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
+ */
+int ccp_enqueue_cmd(struct ccp_cmd *cmd)
+{
+ struct ccp_device *ccp = ccp_get_device();
+ unsigned long flags;
+ unsigned int i;
+ int ret;
+
+ if (!ccp)
+ return -ENODEV;
+
+ /* Caller must supply a callback routine */
+ if (!cmd->callback)
+ return -EINVAL;
+
+ cmd->ccp = ccp;
+
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ i = ccp->cmd_q_count;
+
+ if (ccp->cmd_count >= MAX_CMD_QLEN) {
+ ret = -EBUSY;
+ if (cmd->flags & CCP_CMD_MAY_BACKLOG)
+ list_add_tail(&cmd->entry, &ccp->backlog);
+ } else {
+ ret = -EINPROGRESS;
+ ccp->cmd_count++;
+ list_add_tail(&cmd->entry, &ccp->cmd);
+
+ /* Find an idle queue */
+ if (!ccp->suspending) {
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ if (ccp->cmd_q[i].active)
+ continue;
+
+ break;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+ /* If we found an idle queue, wake it up */
+ if (i < ccp->cmd_q_count)
+ wake_up_process(ccp->cmd_q[i].kthread);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
+
+static void ccp_do_cmd_backlog(struct work_struct *work)
+{
+ struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
+ struct ccp_device *ccp = cmd->ccp;
+ unsigned long flags;
+ unsigned int i;
+
+ cmd->callback(cmd->data, -EINPROGRESS);
+
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ ccp->cmd_count++;
+ list_add_tail(&cmd->entry, &ccp->cmd);
+
+ /* Find an idle queue */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ if (ccp->cmd_q[i].active)
+ continue;
+
+ break;
+ }
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+ /* If we found an idle queue, wake it up */
+ if (i < ccp->cmd_q_count)
+ wake_up_process(ccp->cmd_q[i].kthread);
+}
+
+static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
+{
+ struct ccp_device *ccp = cmd_q->ccp;
+ struct ccp_cmd *cmd = NULL;
+ struct ccp_cmd *backlog = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ cmd_q->active = 0;
+
+ if (ccp->suspending) {
+ cmd_q->suspended = 1;
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+ wake_up_interruptible(&ccp->suspend_queue);
+
+ return NULL;
+ }
+
+ if (ccp->cmd_count) {
+ cmd_q->active = 1;
+
+ cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
+ list_del(&cmd->entry);
+
+ ccp->cmd_count--;
+ }
+
+ if (!list_empty(&ccp->backlog)) {
+ backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
+ entry);
+ list_del(&backlog->entry);
+ }
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+ if (backlog) {
+ INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
+ schedule_work(&backlog->work);
+ }
+
+ return cmd;
+}
+
+static void ccp_do_cmd_complete(struct work_struct *work)
+{
+ struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
+
+ cmd->callback(cmd->data, cmd->ret);
+}
+
+static int ccp_cmd_queue_thread(void *data)
+{
+ struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
+ struct ccp_cmd *cmd;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ cmd = ccp_dequeue_cmd(cmd_q);
+ if (!cmd)
+ continue;
+
+ __set_current_state(TASK_RUNNING);
+
+ /* Execute the command */
+ cmd->ret = ccp_run_cmd(cmd_q, cmd);
+
+ /* Schedule the completion callback */
+ INIT_WORK(&cmd->work, ccp_do_cmd_complete);
+ schedule_work(&cmd->work);
+ }
+
+ __set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
+static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
+ u32 trng_value;
+ int len = min_t(int, sizeof(trng_value), max);
+
+ /*
+ * Locking is provided by the caller so we can update device
+ * hwrng-related fields safely
+ */
+ trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
+ if (!trng_value) {
+ /* Zero is returned if not data is available or if a
+ * bad-entropy error is present. Assume an error if
+ * we exceed TRNG_RETRIES reads of zero.
+ */
+ if (ccp->hwrng_retries++ > TRNG_RETRIES)
+ return -EIO;
+
+ return 0;
+ }
+
+ /* Reset the counter and save the rng value */
+ ccp->hwrng_retries = 0;
+ memcpy(data, &trng_value, len);
+
+ return len;
+}
+
+/**
+ * ccp_alloc_struct - allocate and initialize the ccp_device struct
+ *
+ * @dev: device struct of the CCP
+ */
+struct ccp_device *ccp_alloc_struct(struct device *dev)
+{
+ struct ccp_device *ccp;
+
+ ccp = kzalloc(sizeof(*ccp), GFP_KERNEL);
+ if (ccp == NULL) {
+ dev_err(dev, "unable to allocate device struct\n");
+ return NULL;
+ }
+ ccp->dev = dev;
+
+ INIT_LIST_HEAD(&ccp->cmd);
+ INIT_LIST_HEAD(&ccp->backlog);
+
+ spin_lock_init(&ccp->cmd_lock);
+ mutex_init(&ccp->req_mutex);
+ mutex_init(&ccp->ksb_mutex);
+ ccp->ksb_count = KSB_COUNT;
+ ccp->ksb_start = 0;
+
+ return ccp;
+}
+
+/**
+ * ccp_init - initialize the CCP device
+ *
+ * @ccp: ccp_device struct
+ */
+int ccp_init(struct ccp_device *ccp)
+{
+ struct device *dev = ccp->dev;
+ struct ccp_cmd_queue *cmd_q;
+ struct dma_pool *dma_pool;
+ char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
+ unsigned int qmr, qim, i;
+ int ret;
+
+ /* Find available queues */
+ qim = 0;
+ qmr = ioread32(ccp->io_regs + Q_MASK_REG);
+ for (i = 0; i < MAX_HW_QUEUES; i++) {
+ if (!(qmr & (1 << i)))
+ continue;
+
+ /* Allocate a dma pool for this queue */
+ snprintf(dma_pool_name, sizeof(dma_pool_name), "ccp_q%d", i);
+ dma_pool = dma_pool_create(dma_pool_name, dev,
+ CCP_DMAPOOL_MAX_SIZE,
+ CCP_DMAPOOL_ALIGN, 0);
+ if (!dma_pool) {
+ dev_err(dev, "unable to allocate dma pool\n");
+ ret = -ENOMEM;
+ goto e_pool;
+ }
+
+ cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
+ ccp->cmd_q_count++;
+
+ cmd_q->ccp = ccp;
+ cmd_q->id = i;
+ cmd_q->dma_pool = dma_pool;
+
+ /* Reserve 2 KSB regions for the queue */
+ cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
+ cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
+ ccp->ksb_count -= 2;
+
+ /* Preset some register values and masks that are queue
+ * number dependent
+ */
+ cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
+ (CMD_Q_STATUS_INCR * i);
+ cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
+ (CMD_Q_STATUS_INCR * i);
+ cmd_q->int_ok = 1 << (i * 2);
+ cmd_q->int_err = 1 << ((i * 2) + 1);
+
+ cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
+
+ init_waitqueue_head(&cmd_q->int_queue);
+
+ /* Build queue interrupt mask (two interrupts per queue) */
+ qim |= cmd_q->int_ok | cmd_q->int_err;
+
+ dev_dbg(dev, "queue #%u available\n", i);
+ }
+ if (ccp->cmd_q_count == 0) {
+ dev_notice(dev, "no command queues available\n");
+ ret = -EIO;
+ goto e_pool;
+ }
+ dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
+
+ /* Disable and clear interrupts until ready */
+ iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ ioread32(cmd_q->reg_int_status);
+ ioread32(cmd_q->reg_status);
+ }
+ iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
+
+ /* Request an irq */
+ ret = ccp->get_irq(ccp);
+ if (ret) {
+ dev_err(dev, "unable to allocate an IRQ\n");
+ goto e_pool;
+ }
+
+ /* Initialize the queues used to wait for KSB space and suspend */
+ init_waitqueue_head(&ccp->ksb_queue);
+ init_waitqueue_head(&ccp->suspend_queue);
+
+ /* Create a kthread for each queue */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ struct task_struct *kthread;
+
+ cmd_q = &ccp->cmd_q[i];
+
+ kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
+ "ccp-q%u", cmd_q->id);
+ if (IS_ERR(kthread)) {
+ dev_err(dev, "error creating queue thread (%ld)\n",
+ PTR_ERR(kthread));
+ ret = PTR_ERR(kthread);
+ goto e_kthread;
+ }
+
+ cmd_q->kthread = kthread;
+ wake_up_process(kthread);
+ }
+
+ /* Register the RNG */
+ ccp->hwrng.name = "ccp-rng";
+ ccp->hwrng.read = ccp_trng_read;
+ ret = hwrng_register(&ccp->hwrng);
+ if (ret) {
+ dev_err(dev, "error registering hwrng (%d)\n", ret);
+ goto e_kthread;
+ }
+
+ /* Make the device struct available before enabling interrupts */
+ ccp_add_device(ccp);
+
+ /* Enable interrupts */
+ iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
+
+ return 0;
+
+e_kthread:
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ if (ccp->cmd_q[i].kthread)
+ kthread_stop(ccp->cmd_q[i].kthread);
+
+ ccp->free_irq(ccp);
+
+e_pool:
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ dma_pool_destroy(ccp->cmd_q[i].dma_pool);
+
+ return ret;
+}
+
+/**
+ * ccp_destroy - tear down the CCP device
+ *
+ * @ccp: ccp_device struct
+ */
+void ccp_destroy(struct ccp_device *ccp)
+{
+ struct ccp_cmd_queue *cmd_q;
+ struct ccp_cmd *cmd;
+ unsigned int qim, i;
+
+ /* Remove general access to the device struct */
+ ccp_del_device(ccp);
+
+ /* Unregister the RNG */
+ hwrng_unregister(&ccp->hwrng);
+
+ /* Stop the queue kthreads */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ if (ccp->cmd_q[i].kthread)
+ kthread_stop(ccp->cmd_q[i].kthread);
+
+ /* Build queue interrupt mask (two interrupt masks per queue) */
+ qim = 0;
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+ qim |= cmd_q->int_ok | cmd_q->int_err;
+ }
+
+ /* Disable and clear interrupts */
+ iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ ioread32(cmd_q->reg_int_status);
+ ioread32(cmd_q->reg_status);
+ }
+ iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
+
+ ccp->free_irq(ccp);
+
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ dma_pool_destroy(ccp->cmd_q[i].dma_pool);
+
+ /* Flush the cmd and backlog queue */
+ while (!list_empty(&ccp->cmd)) {
+ /* Invoke the callback directly with an error code */
+ cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
+ list_del(&cmd->entry);
+ cmd->callback(cmd->data, -ENODEV);
+ }
+ while (!list_empty(&ccp->backlog)) {
+ /* Invoke the callback directly with an error code */
+ cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
+ list_del(&cmd->entry);
+ cmd->callback(cmd->data, -ENODEV);
+ }
+}
+
+/**
+ * ccp_irq_handler - handle interrupts generated by the CCP device
+ *
+ * @irq: the irq associated with the interrupt
+ * @data: the data value supplied when the irq was created
+ */
+irqreturn_t ccp_irq_handler(int irq, void *data)
+{
+ struct device *dev = data;
+ struct ccp_device *ccp = dev_get_drvdata(dev);
+ struct ccp_cmd_queue *cmd_q;
+ u32 q_int, status;
+ unsigned int i;
+
+ status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
+
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ q_int = status & (cmd_q->int_ok | cmd_q->int_err);
+ if (q_int) {
+ cmd_q->int_status = status;
+ cmd_q->q_status = ioread32(cmd_q->reg_status);
+ cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
+
+ /* On error, only save the first error value */
+ if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
+ cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+
+ cmd_q->int_rcvd = 1;
+
+ /* Acknowledge the interrupt and wake the kthread */
+ iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
+ wake_up_interruptible(&cmd_q->int_queue);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PM
+bool ccp_queues_suspended(struct ccp_device *ccp)
+{
+ unsigned int suspended = 0;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ if (ccp->cmd_q[i].suspended)
+ suspended++;
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+ return ccp->cmd_q_count == suspended;
+}
+#endif
+
+static const struct x86_cpu_id ccp_support[] = {
+ { X86_VENDOR_AMD, 22, },
+};
+
+static int __init ccp_mod_init(void)
+{
+ struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
+ int ret;
+
+ if (!x86_match_cpu(ccp_support))
+ return -ENODEV;
+
+ switch (cpuinfo->x86) {
+ case 22:
+ if ((cpuinfo->x86_model < 48) || (cpuinfo->x86_model > 63))
+ return -ENODEV;
+
+ ret = ccp_pci_init();
+ if (ret)
+ return ret;
+
+ /* Don't leave the driver loaded if init failed */
+ if (!ccp_get_device()) {
+ ccp_pci_exit();
+ return -ENODEV;
+ }
+
+ return 0;
+
+ break;
+ }
+
+ return -ENODEV;
+}
+
+static void __exit ccp_mod_exit(void)
+{
+ struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
+
+ switch (cpuinfo->x86) {
+ case 22:
+ ccp_pci_exit();
+ break;
+ }
+}
+
+module_init(ccp_mod_init);
+module_exit(ccp_mod_exit);
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
new file mode 100644
index 000000000000..7ec536e702ec
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -0,0 +1,272 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CCP_DEV_H__
+#define __CCP_DEV_H__
+
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+#include <linux/hw_random.h>
+
+
+#define IO_OFFSET 0x20000
+
+#define MAX_DMAPOOL_NAME_LEN 32
+
+#define MAX_HW_QUEUES 5
+#define MAX_CMD_QLEN 100
+
+#define TRNG_RETRIES 10
+
+
+/****** Register Mappings ******/
+#define Q_MASK_REG 0x000
+#define TRNG_OUT_REG 0x00c
+#define IRQ_MASK_REG 0x040
+#define IRQ_STATUS_REG 0x200
+
+#define DEL_CMD_Q_JOB 0x124
+#define DEL_Q_ACTIVE 0x00000200
+#define DEL_Q_ID_SHIFT 6
+
+#define CMD_REQ0 0x180
+#define CMD_REQ_INCR 0x04
+
+#define CMD_Q_STATUS_BASE 0x210
+#define CMD_Q_INT_STATUS_BASE 0x214
+#define CMD_Q_STATUS_INCR 0x20
+
+#define CMD_Q_CACHE 0x228
+#define CMD_Q_CACHE_INC 0x20
+
+#define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f);
+#define CMD_Q_DEPTH(__qs) (((__qs) >> 12) & 0x0000000f);
+
+/****** REQ0 Related Values ******/
+#define REQ0_WAIT_FOR_WRITE 0x00000004
+#define REQ0_INT_ON_COMPLETE 0x00000002
+#define REQ0_STOP_ON_COMPLETE 0x00000001
+
+#define REQ0_CMD_Q_SHIFT 9
+#define REQ0_JOBID_SHIFT 3
+
+/****** REQ1 Related Values ******/
+#define REQ1_PROTECT_SHIFT 27
+#define REQ1_ENGINE_SHIFT 23
+#define REQ1_KEY_KSB_SHIFT 2
+
+#define REQ1_EOM 0x00000002
+#define REQ1_INIT 0x00000001
+
+/* AES Related Values */
+#define REQ1_AES_TYPE_SHIFT 21
+#define REQ1_AES_MODE_SHIFT 18
+#define REQ1_AES_ACTION_SHIFT 17
+#define REQ1_AES_CFB_SIZE_SHIFT 10
+
+/* XTS-AES Related Values */
+#define REQ1_XTS_AES_SIZE_SHIFT 10
+
+/* SHA Related Values */
+#define REQ1_SHA_TYPE_SHIFT 21
+
+/* RSA Related Values */
+#define REQ1_RSA_MOD_SIZE_SHIFT 10
+
+/* Pass-Through Related Values */
+#define REQ1_PT_BW_SHIFT 12
+#define REQ1_PT_BS_SHIFT 10
+
+/* ECC Related Values */
+#define REQ1_ECC_AFFINE_CONVERT 0x00200000
+#define REQ1_ECC_FUNCTION_SHIFT 18
+
+/****** REQ4 Related Values ******/
+#define REQ4_KSB_SHIFT 18
+#define REQ4_MEMTYPE_SHIFT 16
+
+/****** REQ6 Related Values ******/
+#define REQ6_MEMTYPE_SHIFT 16
+
+
+/****** Key Storage Block ******/
+#define KSB_START 77
+#define KSB_END 127
+#define KSB_COUNT (KSB_END - KSB_START + 1)
+#define CCP_KSB_BITS 256
+#define CCP_KSB_BYTES 32
+
+#define CCP_JOBID_MASK 0x0000003f
+
+#define CCP_DMAPOOL_MAX_SIZE 64
+#define CCP_DMAPOOL_ALIGN (1 << 5)
+
+#define CCP_REVERSE_BUF_SIZE 64
+
+#define CCP_AES_KEY_KSB_COUNT 1
+#define CCP_AES_CTX_KSB_COUNT 1
+
+#define CCP_XTS_AES_KEY_KSB_COUNT 1
+#define CCP_XTS_AES_CTX_KSB_COUNT 1
+
+#define CCP_SHA_KSB_COUNT 1
+
+#define CCP_RSA_MAX_WIDTH 4096
+
+#define CCP_PASSTHRU_BLOCKSIZE 256
+#define CCP_PASSTHRU_MASKSIZE 32
+#define CCP_PASSTHRU_KSB_COUNT 1
+
+#define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */
+#define CCP_ECC_MAX_OPERANDS 6
+#define CCP_ECC_MAX_OUTPUTS 3
+#define CCP_ECC_SRC_BUF_SIZE 448
+#define CCP_ECC_DST_BUF_SIZE 192
+#define CCP_ECC_OPERAND_SIZE 64
+#define CCP_ECC_OUTPUT_SIZE 64
+#define CCP_ECC_RESULT_OFFSET 60
+#define CCP_ECC_RESULT_SUCCESS 0x0001
+
+
+struct ccp_device;
+struct ccp_cmd;
+
+struct ccp_cmd_queue {
+ struct ccp_device *ccp;
+
+ /* Queue identifier */
+ u32 id;
+
+ /* Queue dma pool */
+ struct dma_pool *dma_pool;
+
+ /* Queue reserved KSB regions */
+ u32 ksb_key;
+ u32 ksb_ctx;
+
+ /* Queue processing thread */
+ struct task_struct *kthread;
+ unsigned int active;
+ unsigned int suspended;
+
+ /* Number of free command slots available */
+ unsigned int free_slots;
+
+ /* Interrupt masks */
+ u32 int_ok;
+ u32 int_err;
+
+ /* Register addresses for queue */
+ void __iomem *reg_status;
+ void __iomem *reg_int_status;
+
+ /* Status values from job */
+ u32 int_status;
+ u32 q_status;
+ u32 q_int_status;
+ u32 cmd_error;
+
+ /* Interrupt wait queue */
+ wait_queue_head_t int_queue;
+ unsigned int int_rcvd;
+} ____cacheline_aligned;
+
+struct ccp_device {
+ struct device *dev;
+
+ /*
+ * Bus specific device information
+ */
+ void *dev_specific;
+ int (*get_irq)(struct ccp_device *ccp);
+ void (*free_irq)(struct ccp_device *ccp);
+
+ /*
+ * I/O area used for device communication. The register mapping
+ * starts at an offset into the mapped bar.
+ * The CMD_REQx registers and the Delete_Cmd_Queue_Job register
+ * need to be protected while a command queue thread is accessing
+ * them.
+ */
+ struct mutex req_mutex ____cacheline_aligned;
+ void __iomem *io_map;
+ void __iomem *io_regs;
+
+ /*
+ * Master lists that all cmds are queued on. Because there can be
+ * more than one CCP command queue that can process a cmd a separate
+ * backlog list is neeeded so that the backlog completion call
+ * completes before the cmd is available for execution.
+ */
+ spinlock_t cmd_lock ____cacheline_aligned;
+ unsigned int cmd_count;
+ struct list_head cmd;
+ struct list_head backlog;
+
+ /*
+ * The command queues. These represent the queues available on the
+ * CCP that are available for processing cmds
+ */
+ struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES];
+ unsigned int cmd_q_count;
+
+ /*
+ * Support for the CCP True RNG
+ */
+ struct hwrng hwrng;
+ unsigned int hwrng_retries;
+
+ /*
+ * A counter used to generate job-ids for cmds submitted to the CCP
+ */
+ atomic_t current_id ____cacheline_aligned;
+
+ /*
+ * The CCP uses key storage blocks (KSB) to maintain context for certain
+ * operations. To prevent multiple cmds from using the same KSB range
+ * a command queue reserves a KSB range for the duration of the cmd.
+ * Each queue, will however, reserve 2 KSB blocks for operations that
+ * only require single KSB entries (eg. AES context/iv and key) in order
+ * to avoid allocation contention. This will reserve at most 10 KSB
+ * entries, leaving 40 KSB entries available for dynamic allocation.
+ */
+ struct mutex ksb_mutex ____cacheline_aligned;
+ DECLARE_BITMAP(ksb, KSB_COUNT);
+ wait_queue_head_t ksb_queue;
+ unsigned int ksb_avail;
+ unsigned int ksb_count;
+ u32 ksb_start;
+
+ /* Suspend support */
+ unsigned int suspending;
+ wait_queue_head_t suspend_queue;
+};
+
+
+int ccp_pci_init(void);
+void ccp_pci_exit(void);
+
+struct ccp_device *ccp_alloc_struct(struct device *dev);
+int ccp_init(struct ccp_device *ccp);
+void ccp_destroy(struct ccp_device *ccp);
+bool ccp_queues_suspended(struct ccp_device *ccp);
+
+irqreturn_t ccp_irq_handler(int irq, void *data);
+
+int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
+
+#endif
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
new file mode 100644
index 000000000000..71ed3ade7e12
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -0,0 +1,2024 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/ccp.h>
+#include <linux/scatterlist.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-dev.h"
+
+
+enum ccp_memtype {
+ CCP_MEMTYPE_SYSTEM = 0,
+ CCP_MEMTYPE_KSB,
+ CCP_MEMTYPE_LOCAL,
+ CCP_MEMTYPE__LAST,
+};
+
+struct ccp_dma_info {
+ dma_addr_t address;
+ unsigned int offset;
+ unsigned int length;
+ enum dma_data_direction dir;
+};
+
+struct ccp_dm_workarea {
+ struct device *dev;
+ struct dma_pool *dma_pool;
+ unsigned int length;
+
+ u8 *address;
+ struct ccp_dma_info dma;
+};
+
+struct ccp_sg_workarea {
+ struct scatterlist *sg;
+ unsigned int nents;
+ unsigned int length;
+
+ struct scatterlist *dma_sg;
+ struct device *dma_dev;
+ unsigned int dma_count;
+ enum dma_data_direction dma_dir;
+
+ unsigned int sg_used;
+
+ u64 bytes_left;
+};
+
+struct ccp_data {
+ struct ccp_sg_workarea sg_wa;
+ struct ccp_dm_workarea dm_wa;
+};
+
+struct ccp_mem {
+ enum ccp_memtype type;
+ union {
+ struct ccp_dma_info dma;
+ u32 ksb;
+ } u;
+};
+
+struct ccp_aes_op {
+ enum ccp_aes_type type;
+ enum ccp_aes_mode mode;
+ enum ccp_aes_action action;
+};
+
+struct ccp_xts_aes_op {
+ enum ccp_aes_action action;
+ enum ccp_xts_aes_unit_size unit_size;
+};
+
+struct ccp_sha_op {
+ enum ccp_sha_type type;
+ u64 msg_bits;
+};
+
+struct ccp_rsa_op {
+ u32 mod_size;
+ u32 input_len;
+};
+
+struct ccp_passthru_op {
+ enum ccp_passthru_bitwise bit_mod;
+ enum ccp_passthru_byteswap byte_swap;
+};
+
+struct ccp_ecc_op {
+ enum ccp_ecc_function function;
+};
+
+struct ccp_op {
+ struct ccp_cmd_queue *cmd_q;
+
+ u32 jobid;
+ u32 ioc;
+ u32 soc;
+ u32 ksb_key;
+ u32 ksb_ctx;
+ u32 init;
+ u32 eom;
+
+ struct ccp_mem src;
+ struct ccp_mem dst;
+
+ union {
+ struct ccp_aes_op aes;
+ struct ccp_xts_aes_op xts;
+ struct ccp_sha_op sha;
+ struct ccp_rsa_op rsa;
+ struct ccp_passthru_op passthru;
+ struct ccp_ecc_op ecc;
+ } u;
+};
+
+/* The CCP cannot perform zero-length sha operations so the caller
+ * is required to buffer data for the final operation. However, a
+ * sha operation for a message with a total length of zero is valid
+ * so known values are required to supply the result.
+ */
+static const u8 ccp_sha1_zero[CCP_SHA_CTXSIZE] = {
+ 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
+ 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
+ 0xaf, 0xd8, 0x07, 0x09, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ccp_sha224_zero[CCP_SHA_CTXSIZE] = {
+ 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9,
+ 0x47, 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4,
+ 0x15, 0xa2, 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a,
+ 0xc5, 0xb3, 0xe4, 0x2f, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ccp_sha256_zero[CCP_SHA_CTXSIZE] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
+ 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
+ 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
+ 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55,
+};
+
+static u32 ccp_addr_lo(struct ccp_dma_info *info)
+{
+ return lower_32_bits(info->address + info->offset);
+}
+
+static u32 ccp_addr_hi(struct ccp_dma_info *info)
+{
+ return upper_32_bits(info->address + info->offset) & 0x0000ffff;
+}
+
+static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
+{
+ struct ccp_cmd_queue *cmd_q = op->cmd_q;
+ struct ccp_device *ccp = cmd_q->ccp;
+ void __iomem *cr_addr;
+ u32 cr0, cmd;
+ unsigned int i;
+ int ret = 0;
+
+ /* We could read a status register to see how many free slots
+ * are actually available, but reading that register resets it
+ * and you could lose some error information.
+ */
+ cmd_q->free_slots--;
+
+ cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
+ | (op->jobid << REQ0_JOBID_SHIFT)
+ | REQ0_WAIT_FOR_WRITE;
+
+ if (op->soc)
+ cr0 |= REQ0_STOP_ON_COMPLETE
+ | REQ0_INT_ON_COMPLETE;
+
+ if (op->ioc || !cmd_q->free_slots)
+ cr0 |= REQ0_INT_ON_COMPLETE;
+
+ /* Start at CMD_REQ1 */
+ cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
+
+ mutex_lock(&ccp->req_mutex);
+
+ /* Write CMD_REQ1 through CMD_REQx first */
+ for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
+ iowrite32(*(cr + i), cr_addr);
+
+ /* Tell the CCP to start */
+ wmb();
+ iowrite32(cr0, ccp->io_regs + CMD_REQ0);
+
+ mutex_unlock(&ccp->req_mutex);
+
+ if (cr0 & REQ0_INT_ON_COMPLETE) {
+ /* Wait for the job to complete */
+ ret = wait_event_interruptible(cmd_q->int_queue,
+ cmd_q->int_rcvd);
+ if (ret || cmd_q->cmd_error) {
+ /* On error delete all related jobs from the queue */
+ cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
+ | op->jobid;
+
+ iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
+
+ if (!ret)
+ ret = -EIO;
+ } else if (op->soc) {
+ /* Delete just head job from the queue on SoC */
+ cmd = DEL_Q_ACTIVE
+ | (cmd_q->id << DEL_Q_ID_SHIFT)
+ | op->jobid;
+
+ iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
+ }
+
+ cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
+
+ cmd_q->int_rcvd = 0;
+ }
+
+ return ret;
+}
+
+static int ccp_perform_aes(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
+ | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
+ | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
+ | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
+ | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ if (op->u.aes.mode == CCP_AES_MODE_CFB)
+ cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
+
+ if (op->eom)
+ cr[0] |= REQ1_EOM;
+
+ if (op->init)
+ cr[0] |= REQ1_INIT;
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_xts_aes(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
+ | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
+ | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
+ | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ if (op->eom)
+ cr[0] |= REQ1_EOM;
+
+ if (op->init)
+ cr[0] |= REQ1_INIT;
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_sha(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
+ | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
+ | REQ1_INIT;
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+
+ if (op->eom) {
+ cr[0] |= REQ1_EOM;
+ cr[4] = lower_32_bits(op->u.sha.msg_bits);
+ cr[5] = upper_32_bits(op->u.sha.msg_bits);
+ } else {
+ cr[4] = 0;
+ cr[5] = 0;
+ }
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_rsa(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
+ | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
+ | (op->ksb_key << REQ1_KEY_KSB_SHIFT)
+ | REQ1_EOM;
+ cr[1] = op->u.rsa.input_len - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_passthru(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
+ | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
+ | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
+
+ if (op->src.type == CCP_MEMTYPE_SYSTEM)
+ cr[1] = op->src.u.dma.length - 1;
+ else
+ cr[1] = op->dst.u.dma.length - 1;
+
+ if (op->src.type == CCP_MEMTYPE_SYSTEM) {
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+
+ if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+ cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT);
+ } else {
+ cr[2] = op->src.u.ksb * CCP_KSB_BYTES;
+ cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT);
+ }
+
+ if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+ } else {
+ cr[4] = op->dst.u.ksb * CCP_KSB_BYTES;
+ cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT);
+ }
+
+ if (op->eom)
+ cr[0] |= REQ1_EOM;
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_ecc(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = REQ1_ECC_AFFINE_CONVERT
+ | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
+ | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
+ | REQ1_EOM;
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count)
+{
+ int start;
+
+ for (;;) {
+ mutex_lock(&ccp->ksb_mutex);
+
+ start = (u32)bitmap_find_next_zero_area(ccp->ksb,
+ ccp->ksb_count,
+ ccp->ksb_start,
+ count, 0);
+ if (start <= ccp->ksb_count) {
+ bitmap_set(ccp->ksb, start, count);
+
+ mutex_unlock(&ccp->ksb_mutex);
+ break;
+ }
+
+ ccp->ksb_avail = 0;
+
+ mutex_unlock(&ccp->ksb_mutex);
+
+ /* Wait for KSB entries to become available */
+ if (wait_event_interruptible(ccp->ksb_queue, ccp->ksb_avail))
+ return 0;
+ }
+
+ return KSB_START + start;
+}
+
+static void ccp_free_ksb(struct ccp_device *ccp, unsigned int start,
+ unsigned int count)
+{
+ if (!start)
+ return;
+
+ mutex_lock(&ccp->ksb_mutex);
+
+ bitmap_clear(ccp->ksb, start - KSB_START, count);
+
+ ccp->ksb_avail = 1;
+
+ mutex_unlock(&ccp->ksb_mutex);
+
+ wake_up_interruptible_all(&ccp->ksb_queue);
+}
+
+static u32 ccp_gen_jobid(struct ccp_device *ccp)
+{
+ return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
+}
+
+static void ccp_sg_free(struct ccp_sg_workarea *wa)
+{
+ if (wa->dma_count)
+ dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
+
+ wa->dma_count = 0;
+}
+
+static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
+ struct scatterlist *sg, u64 len,
+ enum dma_data_direction dma_dir)
+{
+ memset(wa, 0, sizeof(*wa));
+
+ wa->sg = sg;
+ if (!sg)
+ return 0;
+
+ wa->nents = sg_nents(sg);
+ wa->length = sg->length;
+ wa->bytes_left = len;
+ wa->sg_used = 0;
+
+ if (len == 0)
+ return 0;
+
+ if (dma_dir == DMA_NONE)
+ return 0;
+
+ wa->dma_sg = sg;
+ wa->dma_dev = dev;
+ wa->dma_dir = dma_dir;
+ wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
+ if (!wa->dma_count)
+ return -ENOMEM;
+
+
+ return 0;
+}
+
+static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
+{
+ unsigned int nbytes = min_t(u64, len, wa->bytes_left);
+
+ if (!wa->sg)
+ return;
+
+ wa->sg_used += nbytes;
+ wa->bytes_left -= nbytes;
+ if (wa->sg_used == wa->sg->length) {
+ wa->sg = sg_next(wa->sg);
+ wa->sg_used = 0;
+ }
+}
+
+static void ccp_dm_free(struct ccp_dm_workarea *wa)
+{
+ if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
+ if (wa->address)
+ dma_pool_free(wa->dma_pool, wa->address,
+ wa->dma.address);
+ } else {
+ if (wa->dma.address)
+ dma_unmap_single(wa->dev, wa->dma.address, wa->length,
+ wa->dma.dir);
+ kfree(wa->address);
+ }
+
+ wa->address = NULL;
+ wa->dma.address = 0;
+}
+
+static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
+ struct ccp_cmd_queue *cmd_q,
+ unsigned int len,
+ enum dma_data_direction dir)
+{
+ memset(wa, 0, sizeof(*wa));
+
+ if (!len)
+ return 0;
+
+ wa->dev = cmd_q->ccp->dev;
+ wa->length = len;
+
+ if (len <= CCP_DMAPOOL_MAX_SIZE) {
+ wa->dma_pool = cmd_q->dma_pool;
+
+ wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
+ &wa->dma.address);
+ if (!wa->address)
+ return -ENOMEM;
+
+ wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
+
+ memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
+ } else {
+ wa->address = kzalloc(len, GFP_KERNEL);
+ if (!wa->address)
+ return -ENOMEM;
+
+ wa->dma.address = dma_map_single(wa->dev, wa->address, len,
+ dir);
+ if (!wa->dma.address)
+ return -ENOMEM;
+
+ wa->dma.length = len;
+ }
+ wa->dma.dir = dir;
+
+ return 0;
+}
+
+static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
+ struct scatterlist *sg, unsigned int sg_offset,
+ unsigned int len)
+{
+ WARN_ON(!wa->address);
+
+ scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
+ 0);
+}
+
+static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
+ struct scatterlist *sg, unsigned int sg_offset,
+ unsigned int len)
+{
+ WARN_ON(!wa->address);
+
+ scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
+ 1);
+}
+
+static void ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
+ struct scatterlist *sg,
+ unsigned int len, unsigned int se_len,
+ bool sign_extend)
+{
+ unsigned int nbytes, sg_offset, dm_offset, ksb_len, i;
+ u8 buffer[CCP_REVERSE_BUF_SIZE];
+
+ BUG_ON(se_len > sizeof(buffer));
+
+ sg_offset = len;
+ dm_offset = 0;
+ nbytes = len;
+ while (nbytes) {
+ ksb_len = min_t(unsigned int, nbytes, se_len);
+ sg_offset -= ksb_len;
+
+ scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 0);
+ for (i = 0; i < ksb_len; i++)
+ wa->address[dm_offset + i] = buffer[ksb_len - i - 1];
+
+ dm_offset += ksb_len;
+ nbytes -= ksb_len;
+
+ if ((ksb_len != se_len) && sign_extend) {
+ /* Must sign-extend to nearest sign-extend length */
+ if (wa->address[dm_offset - 1] & 0x80)
+ memset(wa->address + dm_offset, 0xff,
+ se_len - ksb_len);
+ }
+ }
+}
+
+static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
+ struct scatterlist *sg,
+ unsigned int len)
+{
+ unsigned int nbytes, sg_offset, dm_offset, ksb_len, i;
+ u8 buffer[CCP_REVERSE_BUF_SIZE];
+
+ sg_offset = 0;
+ dm_offset = len;
+ nbytes = len;
+ while (nbytes) {
+ ksb_len = min_t(unsigned int, nbytes, sizeof(buffer));
+ dm_offset -= ksb_len;
+
+ for (i = 0; i < ksb_len; i++)
+ buffer[ksb_len - i - 1] = wa->address[dm_offset + i];
+ scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 1);
+
+ sg_offset += ksb_len;
+ nbytes -= ksb_len;
+ }
+}
+
+static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
+{
+ ccp_dm_free(&data->dm_wa);
+ ccp_sg_free(&data->sg_wa);
+}
+
+static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
+ struct scatterlist *sg, u64 sg_len,
+ unsigned int dm_len,
+ enum dma_data_direction dir)
+{
+ int ret;
+
+ memset(data, 0, sizeof(*data));
+
+ ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
+ dir);
+ if (ret)
+ goto e_err;
+
+ ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
+ if (ret)
+ goto e_err;
+
+ return 0;
+
+e_err:
+ ccp_free_data(data, cmd_q);
+
+ return ret;
+}
+
+static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
+{
+ struct ccp_sg_workarea *sg_wa = &data->sg_wa;
+ struct ccp_dm_workarea *dm_wa = &data->dm_wa;
+ unsigned int buf_count, nbytes;
+
+ /* Clear the buffer if setting it */
+ if (!from)
+ memset(dm_wa->address, 0, dm_wa->length);
+
+ if (!sg_wa->sg)
+ return 0;
+
+ /* Perform the copy operation
+ * nbytes will always be <= UINT_MAX because dm_wa->length is
+ * an unsigned int
+ */
+ nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
+ scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
+ nbytes, from);
+
+ /* Update the structures and generate the count */
+ buf_count = 0;
+ while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
+ nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
+ dm_wa->length - buf_count);
+ nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
+
+ buf_count += nbytes;
+ ccp_update_sg_workarea(sg_wa, nbytes);
+ }
+
+ return buf_count;
+}
+
+static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
+{
+ return ccp_queue_buf(data, 0);
+}
+
+static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
+{
+ return ccp_queue_buf(data, 1);
+}
+
+static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
+ struct ccp_op *op, unsigned int block_size,
+ bool blocksize_op)
+{
+ unsigned int sg_src_len, sg_dst_len, op_len;
+
+ /* The CCP can only DMA from/to one address each per operation. This
+ * requires that we find the smallest DMA area between the source
+ * and destination. The resulting len values will always be <= UINT_MAX
+ * because the dma length is an unsigned int.
+ */
+ sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
+ sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
+
+ if (dst) {
+ sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
+ sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
+ op_len = min(sg_src_len, sg_dst_len);
+ } else
+ op_len = sg_src_len;
+
+ /* The data operation length will be at least block_size in length
+ * or the smaller of available sg room remaining for the source or
+ * the destination
+ */
+ op_len = max(op_len, block_size);
+
+ /* Unless we have to buffer data, there's no reason to wait */
+ op->soc = 0;
+
+ if (sg_src_len < block_size) {
+ /* Not enough data in the sg element, so it
+ * needs to be buffered into a blocksize chunk
+ */
+ int cp_len = ccp_fill_queue_buf(src);
+
+ op->soc = 1;
+ op->src.u.dma.address = src->dm_wa.dma.address;
+ op->src.u.dma.offset = 0;
+ op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
+ } else {
+ /* Enough data in the sg element, but we need to
+ * adjust for any previously copied data
+ */
+ op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
+ op->src.u.dma.offset = src->sg_wa.sg_used;
+ op->src.u.dma.length = op_len & ~(block_size - 1);
+
+ ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
+ }
+
+ if (dst) {
+ if (sg_dst_len < block_size) {
+ /* Not enough room in the sg element or we're on the
+ * last piece of data (when using padding), so the
+ * output needs to be buffered into a blocksize chunk
+ */
+ op->soc = 1;
+ op->dst.u.dma.address = dst->dm_wa.dma.address;
+ op->dst.u.dma.offset = 0;
+ op->dst.u.dma.length = op->src.u.dma.length;
+ } else {
+ /* Enough room in the sg element, but we need to
+ * adjust for any previously used area
+ */
+ op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
+ op->dst.u.dma.offset = dst->sg_wa.sg_used;
+ op->dst.u.dma.length = op->src.u.dma.length;
+ }
+ }
+}
+
+static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
+ struct ccp_op *op)
+{
+ op->init = 0;
+
+ if (dst) {
+ if (op->dst.u.dma.address == dst->dm_wa.dma.address)
+ ccp_empty_queue_buf(dst);
+ else
+ ccp_update_sg_workarea(&dst->sg_wa,
+ op->dst.u.dma.length);
+ }
+}
+
+static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q,
+ struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
+ u32 byte_swap, bool from)
+{
+ struct ccp_op op;
+
+ memset(&op, 0, sizeof(op));
+
+ op.cmd_q = cmd_q;
+ op.jobid = jobid;
+ op.eom = 1;
+
+ if (from) {
+ op.soc = 1;
+ op.src.type = CCP_MEMTYPE_KSB;
+ op.src.u.ksb = ksb;
+ op.dst.type = CCP_MEMTYPE_SYSTEM;
+ op.dst.u.dma.address = wa->dma.address;
+ op.dst.u.dma.length = wa->length;
+ } else {
+ op.src.type = CCP_MEMTYPE_SYSTEM;
+ op.src.u.dma.address = wa->dma.address;
+ op.src.u.dma.length = wa->length;
+ op.dst.type = CCP_MEMTYPE_KSB;
+ op.dst.u.ksb = ksb;
+ }
+
+ op.u.passthru.byte_swap = byte_swap;
+
+ return ccp_perform_passthru(&op);
+}
+
+static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q,
+ struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
+ u32 byte_swap)
+{
+ return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, false);
+}
+
+static int ccp_copy_from_ksb(struct ccp_cmd_queue *cmd_q,
+ struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
+ u32 byte_swap)
+{
+ return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, true);
+}
+
+static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
+ struct ccp_cmd *cmd)
+{
+ struct ccp_aes_engine *aes = &cmd->u.aes;
+ struct ccp_dm_workarea key, ctx;
+ struct ccp_data src;
+ struct ccp_op op;
+ unsigned int dm_offset;
+ int ret;
+
+ if (!((aes->key_len == AES_KEYSIZE_128) ||
+ (aes->key_len == AES_KEYSIZE_192) ||
+ (aes->key_len == AES_KEYSIZE_256)))
+ return -EINVAL;
+
+ if (aes->src_len & (AES_BLOCK_SIZE - 1))
+ return -EINVAL;
+
+ if (aes->iv_len != AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (!aes->key || !aes->iv || !aes->src)
+ return -EINVAL;
+
+ if (aes->cmac_final) {
+ if (aes->cmac_key_len != AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (!aes->cmac_key)
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1);
+ BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1);
+
+ ret = -EIO;
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = ccp_gen_jobid(cmd_q->ccp);
+ op.ksb_key = cmd_q->ksb_key;
+ op.ksb_ctx = cmd_q->ksb_ctx;
+ op.init = 1;
+ op.u.aes.type = aes->type;
+ op.u.aes.mode = aes->mode;
+ op.u.aes.action = aes->action;
+
+ /* All supported key sizes fit in a single (32-byte) KSB entry
+ * and must be in little endian format. Use the 256-bit byte
+ * swap passthru option to convert from big endian to little
+ * endian.
+ */
+ ret = ccp_init_dm_workarea(&key, cmd_q,
+ CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ dm_offset = CCP_KSB_BYTES - aes->key_len;
+ ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_key;
+ }
+
+ /* The AES context fits in a single (32-byte) KSB entry and
+ * must be in little endian format. Use the 256-bit byte swap
+ * passthru option to convert from big endian to little endian.
+ */
+ ret = ccp_init_dm_workarea(&ctx, cmd_q,
+ CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto e_key;
+
+ dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+ ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_ctx;
+ }
+
+ /* Send data to the CCP AES engine */
+ ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ if (ret)
+ goto e_ctx;
+
+ while (src.sg_wa.bytes_left) {
+ ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
+ if (aes->cmac_final && !src.sg_wa.bytes_left) {
+ op.eom = 1;
+
+ /* Push the K1/K2 key to the CCP now */
+ ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid,
+ op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_src;
+ }
+
+ ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
+ aes->cmac_key_len);
+ ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_src;
+ }
+ }
+
+ ret = ccp_perform_aes(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_src;
+ }
+
+ ccp_process_data(&src, NULL, &op);
+ }
+
+ /* Retrieve the AES context - convert from LE to BE using
+ * 32-byte (256-bit) byteswapping
+ */
+ ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_src;
+ }
+
+ /* ...but we only need AES_BLOCK_SIZE bytes */
+ dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+ ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+
+e_src:
+ ccp_free_data(&src, cmd_q);
+
+e_ctx:
+ ccp_dm_free(&ctx);
+
+e_key:
+ ccp_dm_free(&key);
+
+ return ret;
+}
+
+static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+ struct ccp_aes_engine *aes = &cmd->u.aes;
+ struct ccp_dm_workarea key, ctx;
+ struct ccp_data src, dst;
+ struct ccp_op op;
+ unsigned int dm_offset;
+ bool in_place = false;
+ int ret;
+
+ if (aes->mode == CCP_AES_MODE_CMAC)
+ return ccp_run_aes_cmac_cmd(cmd_q, cmd);
+
+ if (!((aes->key_len == AES_KEYSIZE_128) ||
+ (aes->key_len == AES_KEYSIZE_192) ||
+ (aes->key_len == AES_KEYSIZE_256)))
+ return -EINVAL;
+
+ if (((aes->mode == CCP_AES_MODE_ECB) ||
+ (aes->mode == CCP_AES_MODE_CBC) ||
+ (aes->mode == CCP_AES_MODE_CFB)) &&
+ (aes->src_len & (AES_BLOCK_SIZE - 1)))
+ return -EINVAL;
+
+ if (!aes->key || !aes->src || !aes->dst)
+ return -EINVAL;
+
+ if (aes->mode != CCP_AES_MODE_ECB) {
+ if (aes->iv_len != AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (!aes->iv)
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1);
+ BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1);
+
+ ret = -EIO;
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = ccp_gen_jobid(cmd_q->ccp);
+ op.ksb_key = cmd_q->ksb_key;
+ op.ksb_ctx = cmd_q->ksb_ctx;
+ op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
+ op.u.aes.type = aes->type;
+ op.u.aes.mode = aes->mode;
+ op.u.aes.action = aes->action;
+
+ /* All supported key sizes fit in a single (32-byte) KSB entry
+ * and must be in little endian format. Use the 256-bit byte
+ * swap passthru option to convert from big endian to little
+ * endian.
+ */
+ ret = ccp_init_dm_workarea(&key, cmd_q,
+ CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ dm_offset = CCP_KSB_BYTES - aes->key_len;
+ ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_key;
+ }
+
+ /* The AES context fits in a single (32-byte) KSB entry and
+ * must be in little endian format. Use the 256-bit byte swap
+ * passthru option to convert from big endian to little endian.
+ */
+ ret = ccp_init_dm_workarea(&ctx, cmd_q,
+ CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto e_key;
+
+ if (aes->mode != CCP_AES_MODE_ECB) {
+ /* Load the AES context - conver to LE */
+ dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+ ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_ctx;
+ }
+ }
+
+ /* Prepare the input and output data workareas. For in-place
+ * operations we need to set the dma direction to BIDIRECTIONAL
+ * and copy the src workarea to the dst workarea.
+ */
+ if (sg_virt(aes->src) == sg_virt(aes->dst))
+ in_place = true;
+
+ ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
+ AES_BLOCK_SIZE,
+ in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ if (ret)
+ goto e_ctx;
+
+ if (in_place)
+ dst = src;
+ else {
+ ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
+ AES_BLOCK_SIZE, DMA_FROM_DEVICE);
+ if (ret)
+ goto e_src;
+ }
+
+ /* Send data to the CCP AES engine */
+ while (src.sg_wa.bytes_left) {
+ ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
+ if (!src.sg_wa.bytes_left) {
+ op.eom = 1;
+
+ /* Since we don't retrieve the AES context in ECB
+ * mode we have to wait for the operation to complete
+ * on the last piece of data
+ */
+ if (aes->mode == CCP_AES_MODE_ECB)
+ op.soc = 1;
+ }
+
+ ret = ccp_perform_aes(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ ccp_process_data(&src, &dst, &op);
+ }
+
+ if (aes->mode != CCP_AES_MODE_ECB) {
+ /* Retrieve the AES context - convert from LE to BE using
+ * 32-byte (256-bit) byteswapping
+ */
+ ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ /* ...but we only need AES_BLOCK_SIZE bytes */
+ dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+ ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ }
+
+e_dst:
+ if (!in_place)
+ ccp_free_data(&dst, cmd_q);
+
+e_src:
+ ccp_free_data(&src, cmd_q);
+
+e_ctx:
+ ccp_dm_free(&ctx);
+
+e_key:
+ ccp_dm_free(&key);
+
+ return ret;
+}
+
+static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
+ struct ccp_cmd *cmd)
+{
+ struct ccp_xts_aes_engine *xts = &cmd->u.xts;
+ struct ccp_dm_workarea key, ctx;
+ struct ccp_data src, dst;
+ struct ccp_op op;
+ unsigned int unit_size, dm_offset;
+ bool in_place = false;
+ int ret;
+
+ switch (xts->unit_size) {
+ case CCP_XTS_AES_UNIT_SIZE_16:
+ unit_size = 16;
+ break;
+ case CCP_XTS_AES_UNIT_SIZE_512:
+ unit_size = 512;
+ break;
+ case CCP_XTS_AES_UNIT_SIZE_1024:
+ unit_size = 1024;
+ break;
+ case CCP_XTS_AES_UNIT_SIZE_2048:
+ unit_size = 2048;
+ break;
+ case CCP_XTS_AES_UNIT_SIZE_4096:
+ unit_size = 4096;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (xts->key_len != AES_KEYSIZE_128)
+ return -EINVAL;
+
+ if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
+ return -EINVAL;
+
+ if (xts->iv_len != AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (!xts->key || !xts->iv || !xts->src || !xts->dst)
+ return -EINVAL;
+
+ BUILD_BUG_ON(CCP_XTS_AES_KEY_KSB_COUNT != 1);
+ BUILD_BUG_ON(CCP_XTS_AES_CTX_KSB_COUNT != 1);
+
+ ret = -EIO;
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = ccp_gen_jobid(cmd_q->ccp);
+ op.ksb_key = cmd_q->ksb_key;
+ op.ksb_ctx = cmd_q->ksb_ctx;
+ op.init = 1;
+ op.u.xts.action = xts->action;
+ op.u.xts.unit_size = xts->unit_size;
+
+ /* All supported key sizes fit in a single (32-byte) KSB entry
+ * and must be in little endian format. Use the 256-bit byte
+ * swap passthru option to convert from big endian to little
+ * endian.
+ */
+ ret = ccp_init_dm_workarea(&key, cmd_q,
+ CCP_XTS_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ dm_offset = CCP_KSB_BYTES - AES_KEYSIZE_128;
+ ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
+ ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
+ ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_key;
+ }
+
+ /* The AES context fits in a single (32-byte) KSB entry and
+ * for XTS is already in little endian format so no byte swapping
+ * is needed.
+ */
+ ret = ccp_init_dm_workarea(&ctx, cmd_q,
+ CCP_XTS_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto e_key;
+
+ ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
+ ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_ctx;
+ }
+
+ /* Prepare the input and output data workareas. For in-place
+ * operations we need to set the dma direction to BIDIRECTIONAL
+ * and copy the src workarea to the dst workarea.
+ */
+ if (sg_virt(xts->src) == sg_virt(xts->dst))
+ in_place = true;
+
+ ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
+ unit_size,
+ in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ if (ret)
+ goto e_ctx;
+
+ if (in_place)
+ dst = src;
+ else {
+ ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
+ unit_size, DMA_FROM_DEVICE);
+ if (ret)
+ goto e_src;
+ }
+
+ /* Send data to the CCP AES engine */
+ while (src.sg_wa.bytes_left) {
+ ccp_prepare_data(&src, &dst, &op, unit_size, true);
+ if (!src.sg_wa.bytes_left)
+ op.eom = 1;
+
+ ret = ccp_perform_xts_aes(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ ccp_process_data(&src, &dst, &op);
+ }
+
+ /* Retrieve the AES context - convert from LE to BE using
+ * 32-byte (256-bit) byteswapping
+ */
+ ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ /* ...but we only need AES_BLOCK_SIZE bytes */
+ dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+ ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
+
+e_dst:
+ if (!in_place)
+ ccp_free_data(&dst, cmd_q);
+
+e_src:
+ ccp_free_data(&src, cmd_q);
+
+e_ctx:
+ ccp_dm_free(&ctx);
+
+e_key:
+ ccp_dm_free(&key);
+
+ return ret;
+}
+
+static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+ struct ccp_sha_engine *sha = &cmd->u.sha;
+ struct ccp_dm_workarea ctx;
+ struct ccp_data src;
+ struct ccp_op op;
+ int ret;
+
+ if (sha->ctx_len != CCP_SHA_CTXSIZE)
+ return -EINVAL;
+
+ if (!sha->ctx)
+ return -EINVAL;
+
+ if (!sha->final && (sha->src_len & (CCP_SHA_BLOCKSIZE - 1)))
+ return -EINVAL;
+
+ if (!sha->src_len) {
+ const u8 *sha_zero;
+
+ /* Not final, just return */
+ if (!sha->final)
+ return 0;
+
+ /* CCP can't do a zero length sha operation so the caller
+ * must buffer the data.
+ */
+ if (sha->msg_bits)
+ return -EINVAL;
+
+ /* A sha operation for a message with a total length of zero,
+ * return known result.
+ */
+ switch (sha->type) {
+ case CCP_SHA_TYPE_1:
+ sha_zero = ccp_sha1_zero;
+ break;
+ case CCP_SHA_TYPE_224:
+ sha_zero = ccp_sha224_zero;
+ break;
+ case CCP_SHA_TYPE_256:
+ sha_zero = ccp_sha256_zero;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
+ sha->ctx_len, 1);
+
+ return 0;
+ }
+
+ if (!sha->src)
+ return -EINVAL;
+
+ BUILD_BUG_ON(CCP_SHA_KSB_COUNT != 1);
+
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = ccp_gen_jobid(cmd_q->ccp);
+ op.ksb_ctx = cmd_q->ksb_ctx;
+ op.u.sha.type = sha->type;
+ op.u.sha.msg_bits = sha->msg_bits;
+
+ /* The SHA context fits in a single (32-byte) KSB entry and
+ * must be in little endian format. Use the 256-bit byte swap
+ * passthru option to convert from big endian to little endian.
+ */
+ ret = ccp_init_dm_workarea(&ctx, cmd_q,
+ CCP_SHA_KSB_COUNT * CCP_KSB_BYTES,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ return ret;
+
+ ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
+ ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_ctx;
+ }
+
+ /* Send data to the CCP SHA engine */
+ ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
+ CCP_SHA_BLOCKSIZE, DMA_TO_DEVICE);
+ if (ret)
+ goto e_ctx;
+
+ while (src.sg_wa.bytes_left) {
+ ccp_prepare_data(&src, NULL, &op, CCP_SHA_BLOCKSIZE, false);
+ if (sha->final && !src.sg_wa.bytes_left)
+ op.eom = 1;
+
+ ret = ccp_perform_sha(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_data;
+ }
+
+ ccp_process_data(&src, NULL, &op);
+ }
+
+ /* Retrieve the SHA context - convert from LE to BE using
+ * 32-byte (256-bit) byteswapping to BE
+ */
+ ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_data;
+ }
+
+ ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
+
+e_data:
+ ccp_free_data(&src, cmd_q);
+
+e_ctx:
+ ccp_dm_free(&ctx);
+
+ return ret;
+}
+
+static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+ struct ccp_rsa_engine *rsa = &cmd->u.rsa;
+ struct ccp_dm_workarea exp, src;
+ struct ccp_data dst;
+ struct ccp_op op;
+ unsigned int ksb_count, i_len, o_len;
+ int ret;
+
+ if (rsa->key_size > CCP_RSA_MAX_WIDTH)
+ return -EINVAL;
+
+ if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
+ return -EINVAL;
+
+ /* The RSA modulus must precede the message being acted upon, so
+ * it must be copied to a DMA area where the message and the
+ * modulus can be concatenated. Therefore the input buffer
+ * length required is twice the output buffer length (which
+ * must be a multiple of 256-bits).
+ */
+ o_len = ((rsa->key_size + 255) / 256) * 32;
+ i_len = o_len * 2;
+
+ ksb_count = o_len / CCP_KSB_BYTES;
+
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = ccp_gen_jobid(cmd_q->ccp);
+ op.ksb_key = ccp_alloc_ksb(cmd_q->ccp, ksb_count);
+ if (!op.ksb_key)
+ return -EIO;
+
+ /* The RSA exponent may span multiple (32-byte) KSB entries and must
+ * be in little endian format. Reverse copy each 32-byte chunk
+ * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
+ * and each byte within that chunk and do not perform any byte swap
+ * operations on the passthru operation.
+ */
+ ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
+ if (ret)
+ goto e_ksb;
+
+ ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, CCP_KSB_BYTES,
+ true);
+ ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_exp;
+ }
+
+ /* Concatenate the modulus and the message. Both the modulus and
+ * the operands must be in little endian format. Since the input
+ * is in big endian format it must be converted.
+ */
+ ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
+ if (ret)
+ goto e_exp;
+
+ ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, CCP_KSB_BYTES,
+ true);
+ src.address += o_len; /* Adjust the address for the copy operation */
+ ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, CCP_KSB_BYTES,
+ true);
+ src.address -= o_len; /* Reset the address to original value */
+
+ /* Prepare the output area for the operation */
+ ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
+ o_len, DMA_FROM_DEVICE);
+ if (ret)
+ goto e_src;
+
+ op.soc = 1;
+ op.src.u.dma.address = src.dma.address;
+ op.src.u.dma.offset = 0;
+ op.src.u.dma.length = i_len;
+ op.dst.u.dma.address = dst.dm_wa.dma.address;
+ op.dst.u.dma.offset = 0;
+ op.dst.u.dma.length = o_len;
+
+ op.u.rsa.mod_size = rsa->key_size;
+ op.u.rsa.input_len = i_len;
+
+ ret = ccp_perform_rsa(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ ccp_reverse_get_dm_area(&dst.dm_wa, rsa->dst, rsa->mod_len);
+
+e_dst:
+ ccp_free_data(&dst, cmd_q);
+
+e_src:
+ ccp_dm_free(&src);
+
+e_exp:
+ ccp_dm_free(&exp);
+
+e_ksb:
+ ccp_free_ksb(cmd_q->ccp, op.ksb_key, ksb_count);
+
+ return ret;
+}
+
+static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
+ struct ccp_cmd *cmd)
+{
+ struct ccp_passthru_engine *pt = &cmd->u.passthru;
+ struct ccp_dm_workarea mask;
+ struct ccp_data src, dst;
+ struct ccp_op op;
+ bool in_place = false;
+ unsigned int i;
+ int ret;
+
+ if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
+ return -EINVAL;
+
+ if (!pt->src || !pt->dst)
+ return -EINVAL;
+
+ if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
+ if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
+ return -EINVAL;
+ if (!pt->mask)
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1);
+
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = ccp_gen_jobid(cmd_q->ccp);
+
+ if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
+ /* Load the mask */
+ op.ksb_key = cmd_q->ksb_key;
+
+ ret = ccp_init_dm_workarea(&mask, cmd_q,
+ CCP_PASSTHRU_KSB_COUNT *
+ CCP_KSB_BYTES,
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
+ ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_mask;
+ }
+ }
+
+ /* Prepare the input and output data workareas. For in-place
+ * operations we need to set the dma direction to BIDIRECTIONAL
+ * and copy the src workarea to the dst workarea.
+ */
+ if (sg_virt(pt->src) == sg_virt(pt->dst))
+ in_place = true;
+
+ ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
+ CCP_PASSTHRU_MASKSIZE,
+ in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ if (ret)
+ goto e_mask;
+
+ if (in_place)
+ dst = src;
+ else {
+ ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
+ CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
+ if (ret)
+ goto e_src;
+ }
+
+ /* Send data to the CCP Passthru engine
+ * Because the CCP engine works on a single source and destination
+ * dma address at a time, each entry in the source scatterlist
+ * (after the dma_map_sg call) must be less than or equal to the
+ * (remaining) length in the destination scatterlist entry and the
+ * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
+ */
+ dst.sg_wa.sg_used = 0;
+ for (i = 1; i <= src.sg_wa.dma_count; i++) {
+ if (!dst.sg_wa.sg ||
+ (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
+ ret = -EINVAL;
+ goto e_dst;
+ }
+
+ if (i == src.sg_wa.dma_count) {
+ op.eom = 1;
+ op.soc = 1;
+ }
+
+ op.src.type = CCP_MEMTYPE_SYSTEM;
+ op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
+ op.src.u.dma.offset = 0;
+ op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
+
+ op.dst.type = CCP_MEMTYPE_SYSTEM;
+ op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
+ op.src.u.dma.offset = dst.sg_wa.sg_used;
+ op.src.u.dma.length = op.src.u.dma.length;
+
+ ret = ccp_perform_passthru(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ dst.sg_wa.sg_used += src.sg_wa.sg->length;
+ if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
+ dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
+ dst.sg_wa.sg_used = 0;
+ }
+ src.sg_wa.sg = sg_next(src.sg_wa.sg);
+ }
+
+e_dst:
+ if (!in_place)
+ ccp_free_data(&dst, cmd_q);
+
+e_src:
+ ccp_free_data(&src, cmd_q);
+
+e_mask:
+ if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+ ccp_dm_free(&mask);
+
+ return ret;
+}
+
+static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+ struct ccp_ecc_engine *ecc = &cmd->u.ecc;
+ struct ccp_dm_workarea src, dst;
+ struct ccp_op op;
+ int ret;
+ u8 *save;
+
+ if (!ecc->u.mm.operand_1 ||
+ (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
+ return -EINVAL;
+
+ if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
+ if (!ecc->u.mm.operand_2 ||
+ (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
+ return -EINVAL;
+
+ if (!ecc->u.mm.result ||
+ (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
+ return -EINVAL;
+
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = ccp_gen_jobid(cmd_q->ccp);
+
+ /* Concatenate the modulus and the operands. Both the modulus and
+ * the operands must be in little endian format. Since the input
+ * is in big endian format it must be converted and placed in a
+ * fixed length buffer.
+ */
+ ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ /* Save the workarea address since it is updated in order to perform
+ * the concatenation
+ */
+ save = src.address;
+
+ /* Copy the ECC modulus */
+ ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+
+ /* Copy the first operand */
+ ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
+ ecc->u.mm.operand_1_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+
+ if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
+ /* Copy the second operand */
+ ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
+ ecc->u.mm.operand_2_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+ }
+
+ /* Restore the workarea address */
+ src.address = save;
+
+ /* Prepare the output area for the operation */
+ ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (ret)
+ goto e_src;
+
+ op.soc = 1;
+ op.src.u.dma.address = src.dma.address;
+ op.src.u.dma.offset = 0;
+ op.src.u.dma.length = src.length;
+ op.dst.u.dma.address = dst.dma.address;
+ op.dst.u.dma.offset = 0;
+ op.dst.u.dma.length = dst.length;
+
+ op.u.ecc.function = cmd->u.ecc.function;
+
+ ret = ccp_perform_ecc(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ ecc->ecc_result = le16_to_cpup(
+ (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
+ if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
+ ret = -EIO;
+ goto e_dst;
+ }
+
+ /* Save the ECC result */
+ ccp_reverse_get_dm_area(&dst, ecc->u.mm.result, CCP_ECC_MODULUS_BYTES);
+
+e_dst:
+ ccp_dm_free(&dst);
+
+e_src:
+ ccp_dm_free(&src);
+
+ return ret;
+}
+
+static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+ struct ccp_ecc_engine *ecc = &cmd->u.ecc;
+ struct ccp_dm_workarea src, dst;
+ struct ccp_op op;
+ int ret;
+ u8 *save;
+
+ if (!ecc->u.pm.point_1.x ||
+ (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
+ !ecc->u.pm.point_1.y ||
+ (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
+ return -EINVAL;
+
+ if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
+ if (!ecc->u.pm.point_2.x ||
+ (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
+ !ecc->u.pm.point_2.y ||
+ (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
+ return -EINVAL;
+ } else {
+ if (!ecc->u.pm.domain_a ||
+ (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
+ return -EINVAL;
+
+ if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
+ if (!ecc->u.pm.scalar ||
+ (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
+ return -EINVAL;
+ }
+
+ if (!ecc->u.pm.result.x ||
+ (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
+ !ecc->u.pm.result.y ||
+ (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
+ return -EINVAL;
+
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = ccp_gen_jobid(cmd_q->ccp);
+
+ /* Concatenate the modulus and the operands. Both the modulus and
+ * the operands must be in little endian format. Since the input
+ * is in big endian format it must be converted and placed in a
+ * fixed length buffer.
+ */
+ ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ /* Save the workarea address since it is updated in order to perform
+ * the concatenation
+ */
+ save = src.address;
+
+ /* Copy the ECC modulus */
+ ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+
+ /* Copy the first point X and Y coordinate */
+ ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
+ ecc->u.pm.point_1.x_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+ ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
+ ecc->u.pm.point_1.y_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+
+ /* Set the first point Z coordianate to 1 */
+ *(src.address) = 0x01;
+ src.address += CCP_ECC_OPERAND_SIZE;
+
+ if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
+ /* Copy the second point X and Y coordinate */
+ ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
+ ecc->u.pm.point_2.x_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+ ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
+ ecc->u.pm.point_2.y_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+
+ /* Set the second point Z coordianate to 1 */
+ *(src.address) = 0x01;
+ src.address += CCP_ECC_OPERAND_SIZE;
+ } else {
+ /* Copy the Domain "a" parameter */
+ ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a,
+ ecc->u.pm.domain_a_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+
+ if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
+ /* Copy the scalar value */
+ ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar,
+ ecc->u.pm.scalar_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+ }
+ }
+
+ /* Restore the workarea address */
+ src.address = save;
+
+ /* Prepare the output area for the operation */
+ ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (ret)
+ goto e_src;
+
+ op.soc = 1;
+ op.src.u.dma.address = src.dma.address;
+ op.src.u.dma.offset = 0;
+ op.src.u.dma.length = src.length;
+ op.dst.u.dma.address = dst.dma.address;
+ op.dst.u.dma.offset = 0;
+ op.dst.u.dma.length = dst.length;
+
+ op.u.ecc.function = cmd->u.ecc.function;
+
+ ret = ccp_perform_ecc(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ ecc->ecc_result = le16_to_cpup(
+ (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
+ if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
+ ret = -EIO;
+ goto e_dst;
+ }
+
+ /* Save the workarea address since it is updated as we walk through
+ * to copy the point math result
+ */
+ save = dst.address;
+
+ /* Save the ECC result X and Y coordinates */
+ ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.x,
+ CCP_ECC_MODULUS_BYTES);
+ dst.address += CCP_ECC_OUTPUT_SIZE;
+ ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.y,
+ CCP_ECC_MODULUS_BYTES);
+ dst.address += CCP_ECC_OUTPUT_SIZE;
+
+ /* Restore the workarea address */
+ dst.address = save;
+
+e_dst:
+ ccp_dm_free(&dst);
+
+e_src:
+ ccp_dm_free(&src);
+
+ return ret;
+}
+
+static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+ struct ccp_ecc_engine *ecc = &cmd->u.ecc;
+
+ ecc->ecc_result = 0;
+
+ if (!ecc->mod ||
+ (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
+ return -EINVAL;
+
+ switch (ecc->function) {
+ case CCP_ECC_FUNCTION_MMUL_384BIT:
+ case CCP_ECC_FUNCTION_MADD_384BIT:
+ case CCP_ECC_FUNCTION_MINV_384BIT:
+ return ccp_run_ecc_mm_cmd(cmd_q, cmd);
+
+ case CCP_ECC_FUNCTION_PADD_384BIT:
+ case CCP_ECC_FUNCTION_PMUL_384BIT:
+ case CCP_ECC_FUNCTION_PDBL_384BIT:
+ return ccp_run_ecc_pm_cmd(cmd_q, cmd);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+ int ret;
+
+ cmd->engine_error = 0;
+ cmd_q->cmd_error = 0;
+ cmd_q->int_rcvd = 0;
+ cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
+
+ switch (cmd->engine) {
+ case CCP_ENGINE_AES:
+ ret = ccp_run_aes_cmd(cmd_q, cmd);
+ break;
+ case CCP_ENGINE_XTS_AES_128:
+ ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
+ break;
+ case CCP_ENGINE_SHA:
+ ret = ccp_run_sha_cmd(cmd_q, cmd);
+ break;
+ case CCP_ENGINE_RSA:
+ ret = ccp_run_rsa_cmd(cmd_q, cmd);
+ break;
+ case CCP_ENGINE_PASSTHRU:
+ ret = ccp_run_passthru_cmd(cmd_q, cmd);
+ break;
+ case CCP_ENGINE_ECC:
+ ret = ccp_run_ecc_cmd(cmd_q, cmd);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
new file mode 100644
index 000000000000..93319f9db753
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -0,0 +1,361 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+
+#define IO_BAR 2
+#define MSIX_VECTORS 2
+
+struct ccp_msix {
+ u32 vector;
+ char name[16];
+};
+
+struct ccp_pci {
+ int msix_count;
+ struct ccp_msix msix[MSIX_VECTORS];
+};
+
+static int ccp_get_msix_irqs(struct ccp_device *ccp)
+{
+ struct ccp_pci *ccp_pci = ccp->dev_specific;
+ struct device *dev = ccp->dev;
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct msix_entry msix_entry[MSIX_VECTORS];
+ unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1;
+ int v, ret;
+
+ for (v = 0; v < ARRAY_SIZE(msix_entry); v++)
+ msix_entry[v].entry = v;
+
+ while ((ret = pci_enable_msix(pdev, msix_entry, v)) > 0)
+ v = ret;
+ if (ret)
+ return ret;
+
+ ccp_pci->msix_count = v;
+ for (v = 0; v < ccp_pci->msix_count; v++) {
+ /* Set the interrupt names and request the irqs */
+ snprintf(ccp_pci->msix[v].name, name_len, "ccp-%u", v);
+ ccp_pci->msix[v].vector = msix_entry[v].vector;
+ ret = request_irq(ccp_pci->msix[v].vector, ccp_irq_handler,
+ 0, ccp_pci->msix[v].name, dev);
+ if (ret) {
+ dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n",
+ ret);
+ goto e_irq;
+ }
+ }
+
+ return 0;
+
+e_irq:
+ while (v--)
+ free_irq(ccp_pci->msix[v].vector, dev);
+
+ pci_disable_msix(pdev);
+
+ ccp_pci->msix_count = 0;
+
+ return ret;
+}
+
+static int ccp_get_msi_irq(struct ccp_device *ccp)
+{
+ struct device *dev = ccp->dev;
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ int ret;
+
+ ret = pci_enable_msi(pdev);
+ if (ret)
+ return ret;
+
+ ret = request_irq(pdev->irq, ccp_irq_handler, 0, "ccp", dev);
+ if (ret) {
+ dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
+ goto e_msi;
+ }
+
+ return 0;
+
+e_msi:
+ pci_disable_msi(pdev);
+
+ return ret;
+}
+
+static int ccp_get_irqs(struct ccp_device *ccp)
+{
+ struct device *dev = ccp->dev;
+ int ret;
+
+ ret = ccp_get_msix_irqs(ccp);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get MSI-X vectors, try MSI */
+ dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
+ ret = ccp_get_msi_irq(ccp);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get MSI interrupt */
+ dev_notice(dev, "could not enable MSI (%d)\n", ret);
+
+ return ret;
+}
+
+static void ccp_free_irqs(struct ccp_device *ccp)
+{
+ struct ccp_pci *ccp_pci = ccp->dev_specific;
+ struct device *dev = ccp->dev;
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+
+ if (ccp_pci->msix_count) {
+ while (ccp_pci->msix_count--)
+ free_irq(ccp_pci->msix[ccp_pci->msix_count].vector,
+ dev);
+ pci_disable_msix(pdev);
+ } else {
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+ }
+}
+
+static int ccp_find_mmio_area(struct ccp_device *ccp)
+{
+ struct device *dev = ccp->dev;
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ resource_size_t io_len;
+ unsigned long io_flags;
+ int bar;
+
+ io_flags = pci_resource_flags(pdev, IO_BAR);
+ io_len = pci_resource_len(pdev, IO_BAR);
+ if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800)))
+ return IO_BAR;
+
+ for (bar = 0; bar < PCI_STD_RESOURCE_END; bar++) {
+ io_flags = pci_resource_flags(pdev, bar);
+ io_len = pci_resource_len(pdev, bar);
+ if ((io_flags & IORESOURCE_MEM) &&
+ (io_len >= (IO_OFFSET + 0x800)))
+ return bar;
+ }
+
+ return -EIO;
+}
+
+static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ccp_device *ccp;
+ struct ccp_pci *ccp_pci;
+ struct device *dev = &pdev->dev;
+ unsigned int bar;
+ int ret;
+
+ ret = -ENOMEM;
+ ccp = ccp_alloc_struct(dev);
+ if (!ccp)
+ goto e_err;
+
+ ccp_pci = kzalloc(sizeof(*ccp_pci), GFP_KERNEL);
+ if (!ccp_pci) {
+ ret = -ENOMEM;
+ goto e_free1;
+ }
+ ccp->dev_specific = ccp_pci;
+ ccp->get_irq = ccp_get_irqs;
+ ccp->free_irq = ccp_free_irqs;
+
+ ret = pci_request_regions(pdev, "ccp");
+ if (ret) {
+ dev_err(dev, "pci_request_regions failed (%d)\n", ret);
+ goto e_free2;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "pci_enable_device failed (%d)\n", ret);
+ goto e_regions;
+ }
+
+ pci_set_master(pdev);
+
+ ret = ccp_find_mmio_area(ccp);
+ if (ret < 0)
+ goto e_device;
+ bar = ret;
+
+ ret = -EIO;
+ ccp->io_map = pci_iomap(pdev, bar, 0);
+ if (ccp->io_map == NULL) {
+ dev_err(dev, "pci_iomap failed\n");
+ goto e_device;
+ }
+ ccp->io_regs = ccp->io_map + IO_OFFSET;
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(48));
+ if (ret == 0) {
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(dev,
+ "pci_set_consistent_dma_mask failed (%d)\n",
+ ret);
+ goto e_bar0;
+ }
+ } else {
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "pci_set_dma_mask failed (%d)\n", ret);
+ goto e_bar0;
+ }
+ }
+
+ dev_set_drvdata(dev, ccp);
+
+ ret = ccp_init(ccp);
+ if (ret)
+ goto e_bar0;
+
+ dev_notice(dev, "enabled\n");
+
+ return 0;
+
+e_bar0:
+ pci_iounmap(pdev, ccp->io_map);
+
+e_device:
+ pci_disable_device(pdev);
+
+e_regions:
+ pci_release_regions(pdev);
+
+e_free2:
+ kfree(ccp_pci);
+
+e_free1:
+ kfree(ccp);
+
+e_err:
+ dev_notice(dev, "initialization failed\n");
+ return ret;
+}
+
+static void ccp_pci_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ccp_device *ccp = dev_get_drvdata(dev);
+
+ if (!ccp)
+ return;
+
+ ccp_destroy(ccp);
+
+ pci_iounmap(pdev, ccp->io_map);
+
+ pci_disable_device(pdev);
+
+ pci_release_regions(pdev);
+
+ kfree(ccp);
+
+ dev_notice(dev, "disabled\n");
+}
+
+#ifdef CONFIG_PM
+static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct device *dev = &pdev->dev;
+ struct ccp_device *ccp = dev_get_drvdata(dev);
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ ccp->suspending = 1;
+
+ /* Wake all the queue kthreads to prepare for suspend */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ wake_up_process(ccp->cmd_q[i].kthread);
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+ /* Wait for all queue kthreads to say they're done */
+ while (!ccp_queues_suspended(ccp))
+ wait_event_interruptible(ccp->suspend_queue,
+ ccp_queues_suspended(ccp));
+
+ return 0;
+}
+
+static int ccp_pci_resume(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ccp_device *ccp = dev_get_drvdata(dev);
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ ccp->suspending = 0;
+
+ /* Wake up all the kthreads */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ ccp->cmd_q[i].suspended = 0;
+ wake_up_process(ccp->cmd_q[i].kthread);
+ }
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+ return 0;
+}
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(ccp_pci_table) = {
+ { PCI_VDEVICE(AMD, 0x1537), },
+ /* Last entry must be zero */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ccp_pci_table);
+
+static struct pci_driver ccp_pci_driver = {
+ .name = "AMD Cryptographic Coprocessor",
+ .id_table = ccp_pci_table,
+ .probe = ccp_pci_probe,
+ .remove = ccp_pci_remove,
+#ifdef CONFIG_PM
+ .suspend = ccp_pci_suspend,
+ .resume = ccp_pci_resume,
+#endif
+};
+
+int ccp_pci_init(void)
+{
+ return pci_register_driver(&ccp_pci_driver);
+}
+
+void ccp_pci_exit(void)
+{
+ pci_unregister_driver(&ccp_pci_driver);
+}
diff --git a/drivers/crypto/dcp.c b/drivers/crypto/dcp.c
deleted file mode 100644
index 247ab8048f5b..000000000000
--- a/drivers/crypto/dcp.c
+++ /dev/null
@@ -1,903 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Support for DCP cryptographic accelerator.
- *
- * Copyright (c) 2013
- * Author: Tobias Rauter <tobias.rauter@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Based on tegra-aes.c, dcp.c (from freescale SDK) and sahara.c
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-#include <linux/mutex.h>
-#include <linux/interrupt.h>
-#include <linux/completion.h>
-#include <linux/workqueue.h>
-#include <linux/delay.h>
-#include <linux/crypto.h>
-#include <linux/miscdevice.h>
-
-#include <crypto/scatterwalk.h>
-#include <crypto/aes.h>
-
-
-/* IOCTL for DCP OTP Key AES - taken from Freescale's SDK*/
-#define DBS_IOCTL_BASE 'd'
-#define DBS_ENC _IOW(DBS_IOCTL_BASE, 0x00, uint8_t[16])
-#define DBS_DEC _IOW(DBS_IOCTL_BASE, 0x01, uint8_t[16])
-
-/* DCP channel used for AES */
-#define USED_CHANNEL 1
-/* Ring Buffers' maximum size */
-#define DCP_MAX_PKG 20
-
-/* Control Register */
-#define DCP_REG_CTRL 0x000
-#define DCP_CTRL_SFRST (1<<31)
-#define DCP_CTRL_CLKGATE (1<<30)
-#define DCP_CTRL_CRYPTO_PRESENT (1<<29)
-#define DCP_CTRL_SHA_PRESENT (1<<28)
-#define DCP_CTRL_GATHER_RES_WRITE (1<<23)
-#define DCP_CTRL_ENABLE_CONTEXT_CACHE (1<<22)
-#define DCP_CTRL_ENABLE_CONTEXT_SWITCH (1<<21)
-#define DCP_CTRL_CH_IRQ_E_0 0x01
-#define DCP_CTRL_CH_IRQ_E_1 0x02
-#define DCP_CTRL_CH_IRQ_E_2 0x04
-#define DCP_CTRL_CH_IRQ_E_3 0x08
-
-/* Status register */
-#define DCP_REG_STAT 0x010
-#define DCP_STAT_OTP_KEY_READY (1<<28)
-#define DCP_STAT_CUR_CHANNEL(stat) ((stat>>24)&0x0F)
-#define DCP_STAT_READY_CHANNEL(stat) ((stat>>16)&0x0F)
-#define DCP_STAT_IRQ(stat) (stat&0x0F)
-#define DCP_STAT_CHAN_0 (0x01)
-#define DCP_STAT_CHAN_1 (0x02)
-#define DCP_STAT_CHAN_2 (0x04)
-#define DCP_STAT_CHAN_3 (0x08)
-
-/* Channel Control Register */
-#define DCP_REG_CHAN_CTRL 0x020
-#define DCP_CHAN_CTRL_CH0_IRQ_MERGED (1<<16)
-#define DCP_CHAN_CTRL_HIGH_PRIO_0 (0x0100)
-#define DCP_CHAN_CTRL_HIGH_PRIO_1 (0x0200)
-#define DCP_CHAN_CTRL_HIGH_PRIO_2 (0x0400)
-#define DCP_CHAN_CTRL_HIGH_PRIO_3 (0x0800)
-#define DCP_CHAN_CTRL_ENABLE_0 (0x01)
-#define DCP_CHAN_CTRL_ENABLE_1 (0x02)
-#define DCP_CHAN_CTRL_ENABLE_2 (0x04)
-#define DCP_CHAN_CTRL_ENABLE_3 (0x08)
-
-/*
- * Channel Registers:
- * The DCP has 4 channels. Each of this channels
- * has 4 registers (command pointer, semaphore, status and options).
- * The address of register REG of channel CHAN is obtained by
- * dcp_chan_reg(REG, CHAN)
- */
-#define DCP_REG_CHAN_PTR 0x00000100
-#define DCP_REG_CHAN_SEMA 0x00000110
-#define DCP_REG_CHAN_STAT 0x00000120
-#define DCP_REG_CHAN_OPT 0x00000130
-
-#define DCP_CHAN_STAT_NEXT_CHAIN_IS_0 0x010000
-#define DCP_CHAN_STAT_NO_CHAIN 0x020000
-#define DCP_CHAN_STAT_CONTEXT_ERROR 0x030000
-#define DCP_CHAN_STAT_PAYLOAD_ERROR 0x040000
-#define DCP_CHAN_STAT_INVALID_MODE 0x050000
-#define DCP_CHAN_STAT_PAGEFAULT 0x40
-#define DCP_CHAN_STAT_DST 0x20
-#define DCP_CHAN_STAT_SRC 0x10
-#define DCP_CHAN_STAT_PACKET 0x08
-#define DCP_CHAN_STAT_SETUP 0x04
-#define DCP_CHAN_STAT_MISMATCH 0x02
-
-/* hw packet control*/
-
-#define DCP_PKT_PAYLOAD_KEY (1<<11)
-#define DCP_PKT_OTP_KEY (1<<10)
-#define DCP_PKT_CIPHER_INIT (1<<9)
-#define DCP_PKG_CIPHER_ENCRYPT (1<<8)
-#define DCP_PKT_CIPHER_ENABLE (1<<5)
-#define DCP_PKT_DECR_SEM (1<<1)
-#define DCP_PKT_CHAIN (1<<2)
-#define DCP_PKT_IRQ 1
-
-#define DCP_PKT_MODE_CBC (1<<4)
-#define DCP_PKT_KEYSELECT_OTP (0xFF<<8)
-
-/* cipher flags */
-#define DCP_ENC 0x0001
-#define DCP_DEC 0x0002
-#define DCP_ECB 0x0004
-#define DCP_CBC 0x0008
-#define DCP_CBC_INIT 0x0010
-#define DCP_NEW_KEY 0x0040
-#define DCP_OTP_KEY 0x0080
-#define DCP_AES 0x1000
-
-/* DCP Flags */
-#define DCP_FLAG_BUSY 0x01
-#define DCP_FLAG_PRODUCING 0x02
-
-/* clock defines */
-#define CLOCK_ON 1
-#define CLOCK_OFF 0
-
-struct dcp_dev_req_ctx {
- int mode;
-};
-
-struct dcp_op {
- unsigned int flags;
- u8 key[AES_KEYSIZE_128];
- int keylen;
-
- struct ablkcipher_request *req;
- struct crypto_ablkcipher *fallback;
-
- uint32_t stat;
- uint32_t pkt1;
- uint32_t pkt2;
- struct ablkcipher_walk walk;
-};
-
-struct dcp_dev {
- struct device *dev;
- void __iomem *dcp_regs_base;
-
- int dcp_vmi_irq;
- int dcp_irq;
-
- spinlock_t queue_lock;
- struct crypto_queue queue;
-
- uint32_t pkt_produced;
- uint32_t pkt_consumed;
-
- struct dcp_hw_packet *hw_pkg[DCP_MAX_PKG];
- dma_addr_t hw_phys_pkg;
-
- /* [KEY][IV] Both with 16 Bytes */
- u8 *payload_base;
- dma_addr_t payload_base_dma;
-
-
- struct tasklet_struct done_task;
- struct tasklet_struct queue_task;
- struct timer_list watchdog;
-
- unsigned long flags;
-
- struct dcp_op *ctx;
-
- struct miscdevice dcp_bootstream_misc;
-};
-
-struct dcp_hw_packet {
- uint32_t next;
- uint32_t pkt1;
- uint32_t pkt2;
- uint32_t src;
- uint32_t dst;
- uint32_t size;
- uint32_t payload;
- uint32_t stat;
-};
-
-static struct dcp_dev *global_dev;
-
-static inline u32 dcp_chan_reg(u32 reg, int chan)
-{
- return reg + (chan) * 0x40;
-}
-
-static inline void dcp_write(struct dcp_dev *dev, u32 data, u32 reg)
-{
- writel(data, dev->dcp_regs_base + reg);
-}
-
-static inline void dcp_set(struct dcp_dev *dev, u32 data, u32 reg)
-{
- writel(data, dev->dcp_regs_base + (reg | 0x04));
-}
-
-static inline void dcp_clear(struct dcp_dev *dev, u32 data, u32 reg)
-{
- writel(data, dev->dcp_regs_base + (reg | 0x08));
-}
-
-static inline void dcp_toggle(struct dcp_dev *dev, u32 data, u32 reg)
-{
- writel(data, dev->dcp_regs_base + (reg | 0x0C));
-}
-
-static inline unsigned int dcp_read(struct dcp_dev *dev, u32 reg)
-{
- return readl(dev->dcp_regs_base + reg);
-}
-
-static void dcp_dma_unmap(struct dcp_dev *dev, struct dcp_hw_packet *pkt)
-{
- dma_unmap_page(dev->dev, pkt->src, pkt->size, DMA_TO_DEVICE);
- dma_unmap_page(dev->dev, pkt->dst, pkt->size, DMA_FROM_DEVICE);
- dev_dbg(dev->dev, "unmap packet %x", (unsigned int) pkt);
-}
-
-static int dcp_dma_map(struct dcp_dev *dev,
- struct ablkcipher_walk *walk, struct dcp_hw_packet *pkt)
-{
- dev_dbg(dev->dev, "map packet %x", (unsigned int) pkt);
- /* align to length = 16 */
- pkt->size = walk->nbytes - (walk->nbytes % 16);
-
- pkt->src = dma_map_page(dev->dev, walk->src.page, walk->src.offset,
- pkt->size, DMA_TO_DEVICE);
-
- if (pkt->src == 0) {
- dev_err(dev->dev, "Unable to map src");
- return -ENOMEM;
- }
-
- pkt->dst = dma_map_page(dev->dev, walk->dst.page, walk->dst.offset,
- pkt->size, DMA_FROM_DEVICE);
-
- if (pkt->dst == 0) {
- dev_err(dev->dev, "Unable to map dst");
- dma_unmap_page(dev->dev, pkt->src, pkt->size, DMA_TO_DEVICE);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void dcp_op_one(struct dcp_dev *dev, struct dcp_hw_packet *pkt,
- uint8_t last)
-{
- struct dcp_op *ctx = dev->ctx;
- pkt->pkt1 = ctx->pkt1;
- pkt->pkt2 = ctx->pkt2;
-
- pkt->payload = (u32) dev->payload_base_dma;
- pkt->stat = 0;
-
- if (ctx->flags & DCP_CBC_INIT) {
- pkt->pkt1 |= DCP_PKT_CIPHER_INIT;
- ctx->flags &= ~DCP_CBC_INIT;
- }
-
- mod_timer(&dev->watchdog, jiffies + msecs_to_jiffies(500));
- pkt->pkt1 |= DCP_PKT_IRQ;
- if (!last)
- pkt->pkt1 |= DCP_PKT_CHAIN;
-
- dev->pkt_produced++;
-
- dcp_write(dev, 1,
- dcp_chan_reg(DCP_REG_CHAN_SEMA, USED_CHANNEL));
-}
-
-static void dcp_op_proceed(struct dcp_dev *dev)
-{
- struct dcp_op *ctx = dev->ctx;
- struct dcp_hw_packet *pkt;
-
- while (ctx->walk.nbytes) {
- int err = 0;
-
- pkt = dev->hw_pkg[dev->pkt_produced % DCP_MAX_PKG];
- err = dcp_dma_map(dev, &ctx->walk, pkt);
- if (err) {
- dev->ctx->stat |= err;
- /* start timer to wait for already set up calls */
- mod_timer(&dev->watchdog,
- jiffies + msecs_to_jiffies(500));
- break;
- }
-
-
- err = ctx->walk.nbytes - pkt->size;
- ablkcipher_walk_done(dev->ctx->req, &dev->ctx->walk, err);
-
- dcp_op_one(dev, pkt, ctx->walk.nbytes == 0);
- /* we have to wait if no space is left in buffer */
- if (dev->pkt_produced - dev->pkt_consumed == DCP_MAX_PKG)
- break;
- }
- clear_bit(DCP_FLAG_PRODUCING, &dev->flags);
-}
-
-static void dcp_op_start(struct dcp_dev *dev, uint8_t use_walk)
-{
- struct dcp_op *ctx = dev->ctx;
-
- if (ctx->flags & DCP_NEW_KEY) {
- memcpy(dev->payload_base, ctx->key, ctx->keylen);
- ctx->flags &= ~DCP_NEW_KEY;
- }
-
- ctx->pkt1 = 0;
- ctx->pkt1 |= DCP_PKT_CIPHER_ENABLE;
- ctx->pkt1 |= DCP_PKT_DECR_SEM;
-
- if (ctx->flags & DCP_OTP_KEY)
- ctx->pkt1 |= DCP_PKT_OTP_KEY;
- else
- ctx->pkt1 |= DCP_PKT_PAYLOAD_KEY;
-
- if (ctx->flags & DCP_ENC)
- ctx->pkt1 |= DCP_PKG_CIPHER_ENCRYPT;
-
- ctx->pkt2 = 0;
- if (ctx->flags & DCP_CBC)
- ctx->pkt2 |= DCP_PKT_MODE_CBC;
-
- dev->pkt_produced = 0;
- dev->pkt_consumed = 0;
-
- ctx->stat = 0;
- dcp_clear(dev, -1, dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL));
- dcp_write(dev, (u32) dev->hw_phys_pkg,
- dcp_chan_reg(DCP_REG_CHAN_PTR, USED_CHANNEL));
-
- set_bit(DCP_FLAG_PRODUCING, &dev->flags);
-
- if (use_walk) {
- ablkcipher_walk_init(&ctx->walk, ctx->req->dst,
- ctx->req->src, ctx->req->nbytes);
- ablkcipher_walk_phys(ctx->req, &ctx->walk);
- dcp_op_proceed(dev);
- } else {
- dcp_op_one(dev, dev->hw_pkg[0], 1);
- clear_bit(DCP_FLAG_PRODUCING, &dev->flags);
- }
-}
-
-static void dcp_done_task(unsigned long data)
-{
- struct dcp_dev *dev = (struct dcp_dev *)data;
- struct dcp_hw_packet *last_packet;
- int fin;
- fin = 0;
-
- for (last_packet = dev->hw_pkg[(dev->pkt_consumed) % DCP_MAX_PKG];
- last_packet->stat == 1;
- last_packet =
- dev->hw_pkg[++(dev->pkt_consumed) % DCP_MAX_PKG]) {
-
- dcp_dma_unmap(dev, last_packet);
- last_packet->stat = 0;
- fin++;
- }
- /* the last call of this function already consumed this IRQ's packet */
- if (fin == 0)
- return;
-
- dev_dbg(dev->dev,
- "Packet(s) done with status %x; finished: %d, produced:%d, complete consumed: %d",
- dev->ctx->stat, fin, dev->pkt_produced, dev->pkt_consumed);
-
- last_packet = dev->hw_pkg[(dev->pkt_consumed - 1) % DCP_MAX_PKG];
- if (!dev->ctx->stat && last_packet->pkt1 & DCP_PKT_CHAIN) {
- if (!test_and_set_bit(DCP_FLAG_PRODUCING, &dev->flags))
- dcp_op_proceed(dev);
- return;
- }
-
- while (unlikely(dev->pkt_consumed < dev->pkt_produced)) {
- dcp_dma_unmap(dev,
- dev->hw_pkg[dev->pkt_consumed++ % DCP_MAX_PKG]);
- }
-
- if (dev->ctx->flags & DCP_OTP_KEY) {
- /* we used the miscdevice, no walk to finish */
- clear_bit(DCP_FLAG_BUSY, &dev->flags);
- return;
- }
-
- ablkcipher_walk_complete(&dev->ctx->walk);
- dev->ctx->req->base.complete(&dev->ctx->req->base,
- dev->ctx->stat);
- dev->ctx->req = NULL;
- /* in case there are other requests in the queue */
- tasklet_schedule(&dev->queue_task);
-}
-
-static void dcp_watchdog(unsigned long data)
-{
- struct dcp_dev *dev = (struct dcp_dev *)data;
- dev->ctx->stat |= dcp_read(dev,
- dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL));
-
- dev_err(dev->dev, "Timeout, Channel status: %x", dev->ctx->stat);
-
- if (!dev->ctx->stat)
- dev->ctx->stat = -ETIMEDOUT;
-
- dcp_done_task(data);
-}
-
-
-static irqreturn_t dcp_common_irq(int irq, void *context)
-{
- u32 msk;
- struct dcp_dev *dev = (struct dcp_dev *) context;
-
- del_timer(&dev->watchdog);
-
- msk = DCP_STAT_IRQ(dcp_read(dev, DCP_REG_STAT));
- dcp_clear(dev, msk, DCP_REG_STAT);
- if (msk == 0)
- return IRQ_NONE;
-
- dev->ctx->stat |= dcp_read(dev,
- dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL));
-
- if (msk & DCP_STAT_CHAN_1)
- tasklet_schedule(&dev->done_task);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t dcp_vmi_irq(int irq, void *context)
-{
- return dcp_common_irq(irq, context);
-}
-
-static irqreturn_t dcp_irq(int irq, void *context)
-{
- return dcp_common_irq(irq, context);
-}
-
-static void dcp_crypt(struct dcp_dev *dev, struct dcp_op *ctx)
-{
- dev->ctx = ctx;
-
- if ((ctx->flags & DCP_CBC) && ctx->req->info) {
- ctx->flags |= DCP_CBC_INIT;
- memcpy(dev->payload_base + AES_KEYSIZE_128,
- ctx->req->info, AES_KEYSIZE_128);
- }
-
- dcp_op_start(dev, 1);
-}
-
-static void dcp_queue_task(unsigned long data)
-{
- struct dcp_dev *dev = (struct dcp_dev *) data;
- struct crypto_async_request *async_req, *backlog;
- struct crypto_ablkcipher *tfm;
- struct dcp_op *ctx;
- struct dcp_dev_req_ctx *rctx;
- struct ablkcipher_request *req;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->queue_lock, flags);
-
- backlog = crypto_get_backlog(&dev->queue);
- async_req = crypto_dequeue_request(&dev->queue);
-
- spin_unlock_irqrestore(&dev->queue_lock, flags);
-
- if (!async_req)
- goto ret_nothing_done;
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
-
- req = ablkcipher_request_cast(async_req);
- tfm = crypto_ablkcipher_reqtfm(req);
- rctx = ablkcipher_request_ctx(req);
- ctx = crypto_ablkcipher_ctx(tfm);
-
- if (!req->src || !req->dst)
- goto ret_nothing_done;
-
- ctx->flags |= rctx->mode;
- ctx->req = req;
-
- dcp_crypt(dev, ctx);
-
- return;
-
-ret_nothing_done:
- clear_bit(DCP_FLAG_BUSY, &dev->flags);
-}
-
-
-static int dcp_cra_init(struct crypto_tfm *tfm)
-{
- const char *name = tfm->__crt_alg->cra_name;
- struct dcp_op *ctx = crypto_tfm_ctx(tfm);
-
- tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_dev_req_ctx);
-
- ctx->fallback = crypto_alloc_ablkcipher(name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
-
- if (IS_ERR(ctx->fallback)) {
- dev_err(global_dev->dev, "Error allocating fallback algo %s\n",
- name);
- return PTR_ERR(ctx->fallback);
- }
-
- return 0;
-}
-
-static void dcp_cra_exit(struct crypto_tfm *tfm)
-{
- struct dcp_op *ctx = crypto_tfm_ctx(tfm);
-
- if (ctx->fallback)
- crypto_free_ablkcipher(ctx->fallback);
-
- ctx->fallback = NULL;
-}
-
-/* async interface */
-static int dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int len)
-{
- struct dcp_op *ctx = crypto_ablkcipher_ctx(tfm);
- unsigned int ret = 0;
- ctx->keylen = len;
- ctx->flags = 0;
- if (len == AES_KEYSIZE_128) {
- if (memcmp(ctx->key, key, AES_KEYSIZE_128)) {
- memcpy(ctx->key, key, len);
- ctx->flags |= DCP_NEW_KEY;
- }
- return 0;
- }
-
- ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- ctx->fallback->base.crt_flags |=
- (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_ablkcipher_setkey(ctx->fallback, key, len);
- if (ret) {
- struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
-
- tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm_aux->crt_flags |=
- (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
- }
- return ret;
-}
-
-static int dcp_aes_cbc_crypt(struct ablkcipher_request *req, int mode)
-{
- struct dcp_dev_req_ctx *rctx = ablkcipher_request_ctx(req);
- struct dcp_dev *dev = global_dev;
- unsigned long flags;
- int err = 0;
-
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
- return -EINVAL;
-
- rctx->mode = mode;
-
- spin_lock_irqsave(&dev->queue_lock, flags);
- err = ablkcipher_enqueue_request(&dev->queue, req);
- spin_unlock_irqrestore(&dev->queue_lock, flags);
-
- flags = test_and_set_bit(DCP_FLAG_BUSY, &dev->flags);
-
- if (!(flags & DCP_FLAG_BUSY))
- tasklet_schedule(&dev->queue_task);
-
- return err;
-}
-
-static int dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
- struct dcp_op *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
-
- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- int err = 0;
- ablkcipher_request_set_tfm(req, ctx->fallback);
- err = crypto_ablkcipher_encrypt(req);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
- return err;
- }
-
- return dcp_aes_cbc_crypt(req, DCP_AES | DCP_ENC | DCP_CBC);
-}
-
-static int dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
- struct dcp_op *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
-
- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- int err = 0;
- ablkcipher_request_set_tfm(req, ctx->fallback);
- err = crypto_ablkcipher_decrypt(req);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
- return err;
- }
- return dcp_aes_cbc_crypt(req, DCP_AES | DCP_DEC | DCP_CBC);
-}
-
-static struct crypto_alg algs[] = {
- {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "dcp-cbc-aes",
- .cra_alignmask = 3,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_KEYSIZE_128,
- .cra_type = &crypto_ablkcipher_type,
- .cra_priority = 300,
- .cra_u.ablkcipher = {
- .min_keysize = AES_KEYSIZE_128,
- .max_keysize = AES_KEYSIZE_128,
- .setkey = dcp_aes_setkey,
- .encrypt = dcp_aes_cbc_encrypt,
- .decrypt = dcp_aes_cbc_decrypt,
- .ivsize = AES_KEYSIZE_128,
- }
-
- },
-};
-
-/* DCP bootstream verification interface: uses OTP key for crypto */
-static int dcp_bootstream_open(struct inode *inode, struct file *file)
-{
- file->private_data = container_of((file->private_data),
- struct dcp_dev, dcp_bootstream_misc);
- return 0;
-}
-
-static long dcp_bootstream_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct dcp_dev *dev = (struct dcp_dev *) file->private_data;
- void __user *argp = (void __user *)arg;
- int ret;
-
- if (dev == NULL)
- return -EBADF;
-
- if (cmd != DBS_ENC && cmd != DBS_DEC)
- return -EINVAL;
-
- if (copy_from_user(dev->payload_base, argp, 16))
- return -EFAULT;
-
- if (test_and_set_bit(DCP_FLAG_BUSY, &dev->flags))
- return -EAGAIN;
-
- dev->ctx = kzalloc(sizeof(struct dcp_op), GFP_KERNEL);
- if (!dev->ctx) {
- dev_err(dev->dev,
- "cannot allocate context for OTP crypto");
- clear_bit(DCP_FLAG_BUSY, &dev->flags);
- return -ENOMEM;
- }
-
- dev->ctx->flags = DCP_AES | DCP_ECB | DCP_OTP_KEY | DCP_CBC_INIT;
- dev->ctx->flags |= (cmd == DBS_ENC) ? DCP_ENC : DCP_DEC;
- dev->hw_pkg[0]->src = dev->payload_base_dma;
- dev->hw_pkg[0]->dst = dev->payload_base_dma;
- dev->hw_pkg[0]->size = 16;
-
- dcp_op_start(dev, 0);
-
- while (test_bit(DCP_FLAG_BUSY, &dev->flags))
- cpu_relax();
-
- ret = dev->ctx->stat;
- if (!ret && copy_to_user(argp, dev->payload_base, 16))
- ret = -EFAULT;
-
- kfree(dev->ctx);
-
- return ret;
-}
-
-static const struct file_operations dcp_bootstream_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = dcp_bootstream_ioctl,
- .open = dcp_bootstream_open,
-};
-
-static int dcp_probe(struct platform_device *pdev)
-{
- struct dcp_dev *dev = NULL;
- struct resource *r;
- int i, ret, j;
-
- dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- global_dev = dev;
- dev->dev = &pdev->dev;
-
- platform_set_drvdata(pdev, dev);
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->dcp_regs_base = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(dev->dcp_regs_base))
- return PTR_ERR(dev->dcp_regs_base);
-
- dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL);
- udelay(10);
- dcp_clear(dev, DCP_CTRL_SFRST | DCP_CTRL_CLKGATE, DCP_REG_CTRL);
-
- dcp_write(dev, DCP_CTRL_GATHER_RES_WRITE |
- DCP_CTRL_ENABLE_CONTEXT_CACHE | DCP_CTRL_CH_IRQ_E_1,
- DCP_REG_CTRL);
-
- dcp_write(dev, DCP_CHAN_CTRL_ENABLE_1, DCP_REG_CHAN_CTRL);
-
- for (i = 0; i < 4; i++)
- dcp_clear(dev, -1, dcp_chan_reg(DCP_REG_CHAN_STAT, i));
-
- dcp_clear(dev, -1, DCP_REG_STAT);
-
-
- r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!r) {
- dev_err(&pdev->dev, "can't get IRQ resource (0)\n");
- return -EIO;
- }
- dev->dcp_vmi_irq = r->start;
- ret = devm_request_irq(&pdev->dev, dev->dcp_vmi_irq, dcp_vmi_irq, 0,
- "dcp", dev);
- if (ret != 0) {
- dev_err(&pdev->dev, "can't request_irq (0)\n");
- return -EIO;
- }
-
- r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
- if (!r) {
- dev_err(&pdev->dev, "can't get IRQ resource (1)\n");
- return -EIO;
- }
- dev->dcp_irq = r->start;
- ret = devm_request_irq(&pdev->dev, dev->dcp_irq, dcp_irq, 0, "dcp",
- dev);
- if (ret != 0) {
- dev_err(&pdev->dev, "can't request_irq (1)\n");
- return -EIO;
- }
-
- dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev,
- DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
- &dev->hw_phys_pkg,
- GFP_KERNEL);
- if (!dev->hw_pkg[0]) {
- dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
- return -ENOMEM;
- }
-
- for (i = 1; i < DCP_MAX_PKG; i++) {
- dev->hw_pkg[i - 1]->next = dev->hw_phys_pkg
- + i * sizeof(struct dcp_hw_packet);
- dev->hw_pkg[i] = dev->hw_pkg[i - 1] + 1;
- }
- dev->hw_pkg[i - 1]->next = dev->hw_phys_pkg;
-
-
- dev->payload_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
- &dev->payload_base_dma, GFP_KERNEL);
- if (!dev->payload_base) {
- dev_err(&pdev->dev, "Could not allocate memory for key\n");
- ret = -ENOMEM;
- goto err_free_hw_packet;
- }
- tasklet_init(&dev->queue_task, dcp_queue_task,
- (unsigned long) dev);
- tasklet_init(&dev->done_task, dcp_done_task,
- (unsigned long) dev);
- spin_lock_init(&dev->queue_lock);
-
- crypto_init_queue(&dev->queue, 10);
-
- init_timer(&dev->watchdog);
- dev->watchdog.function = &dcp_watchdog;
- dev->watchdog.data = (unsigned long)dev;
-
- dev->dcp_bootstream_misc.minor = MISC_DYNAMIC_MINOR,
- dev->dcp_bootstream_misc.name = "dcpboot",
- dev->dcp_bootstream_misc.fops = &dcp_bootstream_fops,
- ret = misc_register(&dev->dcp_bootstream_misc);
- if (ret != 0) {
- dev_err(dev->dev, "Unable to register misc device\n");
- goto err_free_key_iv;
- }
-
- for (i = 0; i < ARRAY_SIZE(algs); i++) {
- algs[i].cra_priority = 300;
- algs[i].cra_ctxsize = sizeof(struct dcp_op);
- algs[i].cra_module = THIS_MODULE;
- algs[i].cra_init = dcp_cra_init;
- algs[i].cra_exit = dcp_cra_exit;
- if (crypto_register_alg(&algs[i])) {
- dev_err(&pdev->dev, "register algorithm failed\n");
- ret = -ENOMEM;
- goto err_unregister;
- }
- }
- dev_notice(&pdev->dev, "DCP crypto enabled.!\n");
-
- return 0;
-
-err_unregister:
- for (j = 0; j < i; j++)
- crypto_unregister_alg(&algs[j]);
-err_free_key_iv:
- tasklet_kill(&dev->done_task);
- tasklet_kill(&dev->queue_task);
- dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
- dev->payload_base_dma);
-err_free_hw_packet:
- dma_free_coherent(&pdev->dev, DCP_MAX_PKG *
- sizeof(struct dcp_hw_packet), dev->hw_pkg[0],
- dev->hw_phys_pkg);
-
- return ret;
-}
-
-static int dcp_remove(struct platform_device *pdev)
-{
- struct dcp_dev *dev;
- int j;
- dev = platform_get_drvdata(pdev);
-
- misc_deregister(&dev->dcp_bootstream_misc);
-
- for (j = 0; j < ARRAY_SIZE(algs); j++)
- crypto_unregister_alg(&algs[j]);
-
- tasklet_kill(&dev->done_task);
- tasklet_kill(&dev->queue_task);
-
- dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
- dev->payload_base_dma);
-
- dma_free_coherent(&pdev->dev,
- DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
- dev->hw_pkg[0], dev->hw_phys_pkg);
-
- return 0;
-}
-
-static struct of_device_id fs_dcp_of_match[] = {
- { .compatible = "fsl-dcp"},
- {},
-};
-
-static struct platform_driver fs_dcp_driver = {
- .probe = dcp_probe,
- .remove = dcp_remove,
- .driver = {
- .name = "fsl-dcp",
- .owner = THIS_MODULE,
- .of_match_table = fs_dcp_of_match
- }
-};
-
-module_platform_driver(fs_dcp_driver);
-
-
-MODULE_AUTHOR("Tobias Rauter <tobias.rauter@gmail.com>");
-MODULE_DESCRIPTION("Freescale DCP Crypto Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 9dd6e01eac33..f757a0f428bd 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1410,14 +1410,12 @@ static const struct platform_device_info ixp_dev_info __initdata = {
static int __init ixp_module_init(void)
{
int num = ARRAY_SIZE(ixp4xx_algos);
- int i, err ;
+ int i, err;
pdev = platform_device_register_full(&ixp_dev_info);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
- dev = &pdev->dev;
-
spin_lock_init(&desc_lock);
spin_lock_init(&emerg_lock);
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
new file mode 100644
index 000000000000..a6db7fa6f891
--- /dev/null
+++ b/drivers/crypto/mxs-dcp.c
@@ -0,0 +1,1100 @@
+/*
+ * Freescale i.MX23/i.MX28 Data Co-Processor driver
+ *
+ * Copyright (C) 2013 Marek Vasut <marex@denx.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/stmp_device.h>
+
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/internal/hash.h>
+
+#define DCP_MAX_CHANS 4
+#define DCP_BUF_SZ PAGE_SIZE
+
+/* DCP DMA descriptor. */
+struct dcp_dma_desc {
+ uint32_t next_cmd_addr;
+ uint32_t control0;
+ uint32_t control1;
+ uint32_t source;
+ uint32_t destination;
+ uint32_t size;
+ uint32_t payload;
+ uint32_t status;
+};
+
+/* Coherent aligned block for bounce buffering. */
+struct dcp_coherent_block {
+ uint8_t aes_in_buf[DCP_BUF_SZ];
+ uint8_t aes_out_buf[DCP_BUF_SZ];
+ uint8_t sha_in_buf[DCP_BUF_SZ];
+
+ uint8_t aes_key[2 * AES_KEYSIZE_128];
+ uint8_t sha_digest[SHA256_DIGEST_SIZE];
+
+ struct dcp_dma_desc desc[DCP_MAX_CHANS];
+};
+
+struct dcp {
+ struct device *dev;
+ void __iomem *base;
+
+ uint32_t caps;
+
+ struct dcp_coherent_block *coh;
+
+ struct completion completion[DCP_MAX_CHANS];
+ struct mutex mutex[DCP_MAX_CHANS];
+ struct task_struct *thread[DCP_MAX_CHANS];
+ struct crypto_queue queue[DCP_MAX_CHANS];
+};
+
+enum dcp_chan {
+ DCP_CHAN_HASH_SHA = 0,
+ DCP_CHAN_CRYPTO = 2,
+};
+
+struct dcp_async_ctx {
+ /* Common context */
+ enum dcp_chan chan;
+ uint32_t fill;
+
+ /* SHA Hash-specific context */
+ struct mutex mutex;
+ uint32_t alg;
+ unsigned int hot:1;
+
+ /* Crypto-specific context */
+ unsigned int enc:1;
+ unsigned int ecb:1;
+ struct crypto_ablkcipher *fallback;
+ unsigned int key_len;
+ uint8_t key[AES_KEYSIZE_128];
+};
+
+struct dcp_sha_req_ctx {
+ unsigned int init:1;
+ unsigned int fini:1;
+};
+
+/*
+ * There can even be only one instance of the MXS DCP due to the
+ * design of Linux Crypto API.
+ */
+static struct dcp *global_sdcp;
+static DEFINE_MUTEX(global_mutex);
+
+/* DCP register layout. */
+#define MXS_DCP_CTRL 0x00
+#define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
+#define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
+
+#define MXS_DCP_STAT 0x10
+#define MXS_DCP_STAT_CLR 0x18
+#define MXS_DCP_STAT_IRQ_MASK 0xf
+
+#define MXS_DCP_CHANNELCTRL 0x20
+#define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
+
+#define MXS_DCP_CAPABILITY1 0x40
+#define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
+#define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
+#define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
+
+#define MXS_DCP_CONTEXT 0x50
+
+#define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
+
+#define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
+
+#define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
+#define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
+
+/* DMA descriptor bits. */
+#define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
+#define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
+#define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
+#define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
+#define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
+#define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
+#define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
+#define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
+#define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
+
+#define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
+#define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
+#define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
+#define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
+#define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
+
+static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
+{
+ struct dcp *sdcp = global_sdcp;
+ const int chan = actx->chan;
+ uint32_t stat;
+ int ret;
+ struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
+
+ dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
+ DMA_TO_DEVICE);
+
+ reinit_completion(&sdcp->completion[chan]);
+
+ /* Clear status register. */
+ writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
+
+ /* Load the DMA descriptor. */
+ writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
+
+ /* Increment the semaphore to start the DMA transfer. */
+ writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
+
+ ret = wait_for_completion_timeout(&sdcp->completion[chan],
+ msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
+ chan, readl(sdcp->base + MXS_DCP_STAT));
+ return -ETIMEDOUT;
+ }
+
+ stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
+ if (stat & 0xff) {
+ dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
+ chan, stat);
+ return -EINVAL;
+ }
+
+ dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
+
+ return 0;
+}
+
+/*
+ * Encryption (AES128)
+ */
+static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, int init)
+{
+ struct dcp *sdcp = global_sdcp;
+ struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
+ int ret;
+
+ dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
+ 2 * AES_KEYSIZE_128,
+ DMA_TO_DEVICE);
+ dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
+ DCP_BUF_SZ, DMA_TO_DEVICE);
+ dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
+ DCP_BUF_SZ, DMA_FROM_DEVICE);
+
+ /* Fill in the DMA descriptor. */
+ desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
+ MXS_DCP_CONTROL0_INTERRUPT |
+ MXS_DCP_CONTROL0_ENABLE_CIPHER;
+
+ /* Payload contains the key. */
+ desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
+
+ if (actx->enc)
+ desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
+ if (init)
+ desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
+
+ desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
+
+ if (actx->ecb)
+ desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
+ else
+ desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
+
+ desc->next_cmd_addr = 0;
+ desc->source = src_phys;
+ desc->destination = dst_phys;
+ desc->size = actx->fill;
+ desc->payload = key_phys;
+ desc->status = 0;
+
+ ret = mxs_dcp_start_dma(actx);
+
+ dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
+ DMA_TO_DEVICE);
+ dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
+ dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
+
+ return ret;
+}
+
+static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
+{
+ struct dcp *sdcp = global_sdcp;
+
+ struct ablkcipher_request *req = ablkcipher_request_cast(arq);
+ struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
+
+ struct scatterlist *dst = req->dst;
+ struct scatterlist *src = req->src;
+ const int nents = sg_nents(req->src);
+
+ const int out_off = DCP_BUF_SZ;
+ uint8_t *in_buf = sdcp->coh->aes_in_buf;
+ uint8_t *out_buf = sdcp->coh->aes_out_buf;
+
+ uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
+ uint32_t dst_off = 0;
+
+ uint8_t *key = sdcp->coh->aes_key;
+
+ int ret = 0;
+ int split = 0;
+ unsigned int i, len, clen, rem = 0;
+ int init = 0;
+
+ actx->fill = 0;
+
+ /* Copy the key from the temporary location. */
+ memcpy(key, actx->key, actx->key_len);
+
+ if (!actx->ecb) {
+ /* Copy the CBC IV just past the key. */
+ memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
+ /* CBC needs the INIT set. */
+ init = 1;
+ } else {
+ memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
+ }
+
+ for_each_sg(req->src, src, nents, i) {
+ src_buf = sg_virt(src);
+ len = sg_dma_len(src);
+
+ do {
+ if (actx->fill + len > out_off)
+ clen = out_off - actx->fill;
+ else
+ clen = len;
+
+ memcpy(in_buf + actx->fill, src_buf, clen);
+ len -= clen;
+ src_buf += clen;
+ actx->fill += clen;
+
+ /*
+ * If we filled the buffer or this is the last SG,
+ * submit the buffer.
+ */
+ if (actx->fill == out_off || sg_is_last(src)) {
+ ret = mxs_dcp_run_aes(actx, init);
+ if (ret)
+ return ret;
+ init = 0;
+
+ out_tmp = out_buf;
+ while (dst && actx->fill) {
+ if (!split) {
+ dst_buf = sg_virt(dst);
+ dst_off = 0;
+ }
+ rem = min(sg_dma_len(dst) - dst_off,
+ actx->fill);
+
+ memcpy(dst_buf + dst_off, out_tmp, rem);
+ out_tmp += rem;
+ dst_off += rem;
+ actx->fill -= rem;
+
+ if (dst_off == sg_dma_len(dst)) {
+ dst = sg_next(dst);
+ split = 0;
+ } else {
+ split = 1;
+ }
+ }
+ }
+ } while (len);
+ }
+
+ return ret;
+}
+
+static int dcp_chan_thread_aes(void *data)
+{
+ struct dcp *sdcp = global_sdcp;
+ const int chan = DCP_CHAN_CRYPTO;
+
+ struct crypto_async_request *backlog;
+ struct crypto_async_request *arq;
+
+ int ret;
+
+ do {
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ mutex_lock(&sdcp->mutex[chan]);
+ backlog = crypto_get_backlog(&sdcp->queue[chan]);
+ arq = crypto_dequeue_request(&sdcp->queue[chan]);
+ mutex_unlock(&sdcp->mutex[chan]);
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ if (arq) {
+ ret = mxs_dcp_aes_block_crypt(arq);
+ arq->complete(arq, ret);
+ continue;
+ }
+
+ schedule();
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+
+static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
+{
+ struct crypto_tfm *tfm =
+ crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ int ret;
+
+ ablkcipher_request_set_tfm(req, ctx->fallback);
+
+ if (enc)
+ ret = crypto_ablkcipher_encrypt(req);
+ else
+ ret = crypto_ablkcipher_decrypt(req);
+
+ ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+
+ return ret;
+}
+
+static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
+{
+ struct dcp *sdcp = global_sdcp;
+ struct crypto_async_request *arq = &req->base;
+ struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
+ int ret;
+
+ if (unlikely(actx->key_len != AES_KEYSIZE_128))
+ return mxs_dcp_block_fallback(req, enc);
+
+ actx->enc = enc;
+ actx->ecb = ecb;
+ actx->chan = DCP_CHAN_CRYPTO;
+
+ mutex_lock(&sdcp->mutex[actx->chan]);
+ ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
+ mutex_unlock(&sdcp->mutex[actx->chan]);
+
+ wake_up_process(sdcp->thread[actx->chan]);
+
+ return -EINPROGRESS;
+}
+
+static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return mxs_dcp_aes_enqueue(req, 0, 1);
+}
+
+static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return mxs_dcp_aes_enqueue(req, 1, 1);
+}
+
+static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return mxs_dcp_aes_enqueue(req, 0, 0);
+}
+
+static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return mxs_dcp_aes_enqueue(req, 1, 0);
+}
+
+static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
+ unsigned int ret;
+
+ /*
+ * AES 128 is supposed by the hardware, store key into temporary
+ * buffer and exit. We must use the temporary buffer here, since
+ * there can still be an operation in progress.
+ */
+ actx->key_len = len;
+ if (len == AES_KEYSIZE_128) {
+ memcpy(actx->key, key, len);
+ return 0;
+ }
+
+ /* Check if the key size is supported by kernel at all. */
+ if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+ tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+
+ /*
+ * If the requested AES key size is not supported by the hardware,
+ * but is supported by in-kernel software implementation, we use
+ * software fallback.
+ */
+ actx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ actx->fallback->base.crt_flags |=
+ tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK;
+
+ ret = crypto_ablkcipher_setkey(actx->fallback, key, len);
+ if (!ret)
+ return 0;
+
+ tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->base.crt_flags |=
+ actx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK;
+
+ return ret;
+}
+
+static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
+{
+ const char *name = tfm->__crt_alg->cra_name;
+ const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
+ struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
+ struct crypto_ablkcipher *blk;
+
+ blk = crypto_alloc_ablkcipher(name, 0, flags);
+ if (IS_ERR(blk))
+ return PTR_ERR(blk);
+
+ actx->fallback = blk;
+ tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_async_ctx);
+ return 0;
+}
+
+static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
+{
+ struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ablkcipher(actx->fallback);
+ actx->fallback = NULL;
+}
+
+/*
+ * Hashing (SHA1/SHA256)
+ */
+static int mxs_dcp_run_sha(struct ahash_request *req)
+{
+ struct dcp *sdcp = global_sdcp;
+ int ret;
+
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
+ struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+ struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
+ dma_addr_t digest_phys = dma_map_single(sdcp->dev,
+ sdcp->coh->sha_digest,
+ SHA256_DIGEST_SIZE,
+ DMA_FROM_DEVICE);
+
+ dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
+ DCP_BUF_SZ, DMA_TO_DEVICE);
+
+ /* Fill in the DMA descriptor. */
+ desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
+ MXS_DCP_CONTROL0_INTERRUPT |
+ MXS_DCP_CONTROL0_ENABLE_HASH;
+ if (rctx->init)
+ desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
+
+ desc->control1 = actx->alg;
+ desc->next_cmd_addr = 0;
+ desc->source = buf_phys;
+ desc->destination = 0;
+ desc->size = actx->fill;
+ desc->payload = 0;
+ desc->status = 0;
+
+ /* Set HASH_TERM bit for last transfer block. */
+ if (rctx->fini) {
+ desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
+ desc->payload = digest_phys;
+ }
+
+ ret = mxs_dcp_start_dma(actx);
+
+ dma_unmap_single(sdcp->dev, digest_phys, SHA256_DIGEST_SIZE,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
+{
+ struct dcp *sdcp = global_sdcp;
+
+ struct ahash_request *req = ahash_request_cast(arq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
+ struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
+ const int nents = sg_nents(req->src);
+
+ uint8_t *digest = sdcp->coh->sha_digest;
+ uint8_t *in_buf = sdcp->coh->sha_in_buf;
+
+ uint8_t *src_buf;
+
+ struct scatterlist *src;
+
+ unsigned int i, len, clen;
+ int ret;
+
+ int fin = rctx->fini;
+ if (fin)
+ rctx->fini = 0;
+
+ for_each_sg(req->src, src, nents, i) {
+ src_buf = sg_virt(src);
+ len = sg_dma_len(src);
+
+ do {
+ if (actx->fill + len > DCP_BUF_SZ)
+ clen = DCP_BUF_SZ - actx->fill;
+ else
+ clen = len;
+
+ memcpy(in_buf + actx->fill, src_buf, clen);
+ len -= clen;
+ src_buf += clen;
+ actx->fill += clen;
+
+ /*
+ * If we filled the buffer and still have some
+ * more data, submit the buffer.
+ */
+ if (len && actx->fill == DCP_BUF_SZ) {
+ ret = mxs_dcp_run_sha(req);
+ if (ret)
+ return ret;
+ actx->fill = 0;
+ rctx->init = 0;
+ }
+ } while (len);
+ }
+
+ if (fin) {
+ rctx->fini = 1;
+
+ /* Submit whatever is left. */
+ ret = mxs_dcp_run_sha(req);
+ if (ret || !req->result)
+ return ret;
+ actx->fill = 0;
+
+ /* For some reason, the result is flipped. */
+ for (i = 0; i < halg->digestsize; i++)
+ req->result[i] = digest[halg->digestsize - i - 1];
+ }
+
+ return 0;
+}
+
+static int dcp_chan_thread_sha(void *data)
+{
+ struct dcp *sdcp = global_sdcp;
+ const int chan = DCP_CHAN_HASH_SHA;
+
+ struct crypto_async_request *backlog;
+ struct crypto_async_request *arq;
+
+ struct dcp_sha_req_ctx *rctx;
+
+ struct ahash_request *req;
+ int ret, fini;
+
+ do {
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ mutex_lock(&sdcp->mutex[chan]);
+ backlog = crypto_get_backlog(&sdcp->queue[chan]);
+ arq = crypto_dequeue_request(&sdcp->queue[chan]);
+ mutex_unlock(&sdcp->mutex[chan]);
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ if (arq) {
+ req = ahash_request_cast(arq);
+ rctx = ahash_request_ctx(req);
+
+ ret = dcp_sha_req_to_buf(arq);
+ fini = rctx->fini;
+ arq->complete(arq, ret);
+ if (!fini)
+ continue;
+ }
+
+ schedule();
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+
+static int dcp_sha_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
+
+ struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
+
+ /*
+ * Start hashing session. The code below only inits the
+ * hashing session context, nothing more.
+ */
+ memset(actx, 0, sizeof(*actx));
+
+ if (strcmp(halg->base.cra_name, "sha1") == 0)
+ actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
+ else
+ actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
+
+ actx->fill = 0;
+ actx->hot = 0;
+ actx->chan = DCP_CHAN_HASH_SHA;
+
+ mutex_init(&actx->mutex);
+
+ return 0;
+}
+
+static int dcp_sha_update_fx(struct ahash_request *req, int fini)
+{
+ struct dcp *sdcp = global_sdcp;
+
+ struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
+
+ int ret;
+
+ /*
+ * Ignore requests that have no data in them and are not
+ * the trailing requests in the stream of requests.
+ */
+ if (!req->nbytes && !fini)
+ return 0;
+
+ mutex_lock(&actx->mutex);
+
+ rctx->fini = fini;
+
+ if (!actx->hot) {
+ actx->hot = 1;
+ rctx->init = 1;
+ }
+
+ mutex_lock(&sdcp->mutex[actx->chan]);
+ ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
+ mutex_unlock(&sdcp->mutex[actx->chan]);
+
+ wake_up_process(sdcp->thread[actx->chan]);
+ mutex_unlock(&actx->mutex);
+
+ return -EINPROGRESS;
+}
+
+static int dcp_sha_update(struct ahash_request *req)
+{
+ return dcp_sha_update_fx(req, 0);
+}
+
+static int dcp_sha_final(struct ahash_request *req)
+{
+ ahash_request_set_crypt(req, NULL, req->result, 0);
+ req->nbytes = 0;
+ return dcp_sha_update_fx(req, 1);
+}
+
+static int dcp_sha_finup(struct ahash_request *req)
+{
+ return dcp_sha_update_fx(req, 1);
+}
+
+static int dcp_sha_digest(struct ahash_request *req)
+{
+ int ret;
+
+ ret = dcp_sha_init(req);
+ if (ret)
+ return ret;
+
+ return dcp_sha_finup(req);
+}
+
+static int dcp_sha_cra_init(struct crypto_tfm *tfm)
+{
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct dcp_sha_req_ctx));
+ return 0;
+}
+
+static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+/* AES 128 ECB and AES 128 CBC */
+static struct crypto_alg dcp_aes_algs[] = {
+ {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-dcp",
+ .cra_priority = 400,
+ .cra_alignmask = 15,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_init = mxs_dcp_aes_fallback_init,
+ .cra_exit = mxs_dcp_aes_fallback_exit,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mxs_dcp_aes_setkey,
+ .encrypt = mxs_dcp_aes_ecb_encrypt,
+ .decrypt = mxs_dcp_aes_ecb_decrypt
+ },
+ },
+ }, {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-dcp",
+ .cra_priority = 400,
+ .cra_alignmask = 15,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_init = mxs_dcp_aes_fallback_init,
+ .cra_exit = mxs_dcp_aes_fallback_exit,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mxs_dcp_aes_setkey,
+ .encrypt = mxs_dcp_aes_cbc_encrypt,
+ .decrypt = mxs_dcp_aes_cbc_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ },
+ },
+};
+
+/* SHA1 */
+static struct ahash_alg dcp_sha1_alg = {
+ .init = dcp_sha_init,
+ .update = dcp_sha_update,
+ .final = dcp_sha_final,
+ .finup = dcp_sha_finup,
+ .digest = dcp_sha_digest,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-dcp",
+ .cra_priority = 400,
+ .cra_alignmask = 63,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = dcp_sha_cra_init,
+ .cra_exit = dcp_sha_cra_exit,
+ },
+ },
+};
+
+/* SHA256 */
+static struct ahash_alg dcp_sha256_alg = {
+ .init = dcp_sha_init,
+ .update = dcp_sha_update,
+ .final = dcp_sha_final,
+ .finup = dcp_sha_finup,
+ .digest = dcp_sha_digest,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-dcp",
+ .cra_priority = 400,
+ .cra_alignmask = 63,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = dcp_sha_cra_init,
+ .cra_exit = dcp_sha_cra_exit,
+ },
+ },
+};
+
+static irqreturn_t mxs_dcp_irq(int irq, void *context)
+{
+ struct dcp *sdcp = context;
+ uint32_t stat;
+ int i;
+
+ stat = readl(sdcp->base + MXS_DCP_STAT);
+ stat &= MXS_DCP_STAT_IRQ_MASK;
+ if (!stat)
+ return IRQ_NONE;
+
+ /* Clear the interrupts. */
+ writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
+
+ /* Complete the DMA requests that finished. */
+ for (i = 0; i < DCP_MAX_CHANS; i++)
+ if (stat & (1 << i))
+ complete(&sdcp->completion[i]);
+
+ return IRQ_HANDLED;
+}
+
+static int mxs_dcp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dcp *sdcp = NULL;
+ int i, ret;
+
+ struct resource *iores;
+ int dcp_vmi_irq, dcp_irq;
+
+ mutex_lock(&global_mutex);
+ if (global_sdcp) {
+ dev_err(dev, "Only one DCP instance allowed!\n");
+ ret = -ENODEV;
+ goto err_mutex;
+ }
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dcp_vmi_irq = platform_get_irq(pdev, 0);
+ dcp_irq = platform_get_irq(pdev, 1);
+ if (dcp_vmi_irq < 0 || dcp_irq < 0) {
+ ret = -EINVAL;
+ goto err_mutex;
+ }
+
+ sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
+ if (!sdcp) {
+ ret = -ENOMEM;
+ goto err_mutex;
+ }
+
+ sdcp->dev = dev;
+ sdcp->base = devm_ioremap_resource(dev, iores);
+ if (IS_ERR(sdcp->base)) {
+ ret = PTR_ERR(sdcp->base);
+ goto err_mutex;
+ }
+
+ ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
+ "dcp-vmi-irq", sdcp);
+ if (ret) {
+ dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
+ goto err_mutex;
+ }
+
+ ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
+ "dcp-irq", sdcp);
+ if (ret) {
+ dev_err(dev, "Failed to claim DCP IRQ!\n");
+ goto err_mutex;
+ }
+
+ /* Allocate coherent helper block. */
+ sdcp->coh = kzalloc(sizeof(struct dcp_coherent_block), GFP_KERNEL);
+ if (!sdcp->coh) {
+ dev_err(dev, "Error allocating coherent block\n");
+ ret = -ENOMEM;
+ goto err_mutex;
+ }
+
+ /* Restart the DCP block. */
+ stmp_reset_block(sdcp->base);
+
+ /* Initialize control register. */
+ writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
+ MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
+ sdcp->base + MXS_DCP_CTRL);
+
+ /* Enable all DCP DMA channels. */
+ writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
+ sdcp->base + MXS_DCP_CHANNELCTRL);
+
+ /*
+ * We do not enable context switching. Give the context buffer a
+ * pointer to an illegal address so if context switching is
+ * inadvertantly enabled, the DCP will return an error instead of
+ * trashing good memory. The DCP DMA cannot access ROM, so any ROM
+ * address will do.
+ */
+ writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
+ for (i = 0; i < DCP_MAX_CHANS; i++)
+ writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
+ writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
+
+ global_sdcp = sdcp;
+
+ platform_set_drvdata(pdev, sdcp);
+
+ for (i = 0; i < DCP_MAX_CHANS; i++) {
+ mutex_init(&sdcp->mutex[i]);
+ init_completion(&sdcp->completion[i]);
+ crypto_init_queue(&sdcp->queue[i], 50);
+ }
+
+ /* Create the SHA and AES handler threads. */
+ sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
+ NULL, "mxs_dcp_chan/sha");
+ if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
+ dev_err(dev, "Error starting SHA thread!\n");
+ ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
+ goto err_free_coherent;
+ }
+
+ sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
+ NULL, "mxs_dcp_chan/aes");
+ if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
+ dev_err(dev, "Error starting SHA thread!\n");
+ ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
+ goto err_destroy_sha_thread;
+ }
+
+ /* Register the various crypto algorithms. */
+ sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
+
+ if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
+ ret = crypto_register_algs(dcp_aes_algs,
+ ARRAY_SIZE(dcp_aes_algs));
+ if (ret) {
+ /* Failed to register algorithm. */
+ dev_err(dev, "Failed to register AES crypto!\n");
+ goto err_destroy_aes_thread;
+ }
+ }
+
+ if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
+ ret = crypto_register_ahash(&dcp_sha1_alg);
+ if (ret) {
+ dev_err(dev, "Failed to register %s hash!\n",
+ dcp_sha1_alg.halg.base.cra_name);
+ goto err_unregister_aes;
+ }
+ }
+
+ if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
+ ret = crypto_register_ahash(&dcp_sha256_alg);
+ if (ret) {
+ dev_err(dev, "Failed to register %s hash!\n",
+ dcp_sha256_alg.halg.base.cra_name);
+ goto err_unregister_sha1;
+ }
+ }
+
+ return 0;
+
+err_unregister_sha1:
+ if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
+ crypto_unregister_ahash(&dcp_sha1_alg);
+
+err_unregister_aes:
+ if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
+ crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
+
+err_destroy_aes_thread:
+ kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
+
+err_destroy_sha_thread:
+ kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
+
+err_free_coherent:
+ kfree(sdcp->coh);
+err_mutex:
+ mutex_unlock(&global_mutex);
+ return ret;
+}
+
+static int mxs_dcp_remove(struct platform_device *pdev)
+{
+ struct dcp *sdcp = platform_get_drvdata(pdev);
+
+ kfree(sdcp->coh);
+
+ if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
+ crypto_unregister_ahash(&dcp_sha256_alg);
+
+ if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
+ crypto_unregister_ahash(&dcp_sha1_alg);
+
+ if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
+ crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
+
+ kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
+ kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
+
+ platform_set_drvdata(pdev, NULL);
+
+ mutex_lock(&global_mutex);
+ global_sdcp = NULL;
+ mutex_unlock(&global_mutex);
+
+ return 0;
+}
+
+static const struct of_device_id mxs_dcp_dt_ids[] = {
+ { .compatible = "fsl,imx23-dcp", .data = NULL, },
+ { .compatible = "fsl,imx28-dcp", .data = NULL, },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
+
+static struct platform_driver mxs_dcp_driver = {
+ .probe = mxs_dcp_probe,
+ .remove = mxs_dcp_remove,
+ .driver = {
+ .name = "mxs-dcp",
+ .owner = THIS_MODULE,
+ .of_match_table = mxs_dcp_dt_ids,
+ },
+};
+
+module_platform_driver(mxs_dcp_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("Freescale MXS DCP Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-dcp");
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 6c4c000671c5..1e5481d88a26 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -158,6 +158,15 @@ static inline unsigned long nx842_get_scatterlist_size(
return sl->entry_nr * sizeof(struct nx842_slentry);
}
+static inline unsigned long nx842_get_pa(void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ return page_to_phys(vmalloc_to_page(addr))
+ + offset_in_page(addr);
+ else
+ return __pa(addr);
+}
+
static int nx842_build_scatterlist(unsigned long buf, int len,
struct nx842_scatterlist *sl)
{
@@ -168,7 +177,7 @@ static int nx842_build_scatterlist(unsigned long buf, int len,
entry = sl->entries;
while (len) {
- entry->ptr = __pa(buf);
+ entry->ptr = nx842_get_pa((void *)buf);
nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE);
if (nextpage < buf + len) {
/* we aren't at the end yet */
@@ -370,8 +379,8 @@ int nx842_compress(const unsigned char *in, unsigned int inlen,
op.flags = NX842_OP_COMPRESS;
csbcpb = &workmem->csbcpb;
memset(csbcpb, 0, sizeof(*csbcpb));
- op.csbcpb = __pa(csbcpb);
- op.out = __pa(slout.entries);
+ op.csbcpb = nx842_get_pa(csbcpb);
+ op.out = nx842_get_pa(slout.entries);
for (i = 0; i < hdr->blocks_nr; i++) {
/*
@@ -401,13 +410,13 @@ int nx842_compress(const unsigned char *in, unsigned int inlen,
*/
if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
/* Create direct DDE */
- op.in = __pa(inbuf);
+ op.in = nx842_get_pa((void *)inbuf);
op.inlen = max_sync_size;
} else {
/* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(inbuf, max_sync_size, &slin);
- op.in = __pa(slin.entries);
+ op.in = nx842_get_pa(slin.entries);
op.inlen = -nx842_get_scatterlist_size(&slin);
}
@@ -565,7 +574,7 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
op.flags = NX842_OP_DECOMPRESS;
csbcpb = &workmem->csbcpb;
memset(csbcpb, 0, sizeof(*csbcpb));
- op.csbcpb = __pa(csbcpb);
+ op.csbcpb = nx842_get_pa(csbcpb);
/*
* max_sync_size may have changed since compression,
@@ -597,12 +606,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
if (likely((inbuf & NX842_HW_PAGE_MASK) ==
((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) {
/* Create direct DDE */
- op.in = __pa(inbuf);
+ op.in = nx842_get_pa((void *)inbuf);
op.inlen = hdr->sizes[i];
} else {
/* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin);
- op.in = __pa(slin.entries);
+ op.in = nx842_get_pa(slin.entries);
op.inlen = -nx842_get_scatterlist_size(&slin);
}
@@ -613,12 +622,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
*/
if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
/* Create direct DDE */
- op.out = __pa(outbuf);
+ op.out = nx842_get_pa((void *)outbuf);
op.outlen = max_sync_size;
} else {
/* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(outbuf, max_sync_size, &slout);
- op.out = __pa(slout.entries);
+ op.out = nx842_get_pa(slout.entries);
op.outlen = -nx842_get_scatterlist_size(&slout);
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index a9ccbf14096e..dde41f1df608 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -784,6 +784,7 @@ static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
static int omap_aes_cra_init(struct crypto_tfm *tfm)
{
struct omap_aes_dev *dd = NULL;
+ int err;
/* Find AES device, currently picks the first device */
spin_lock_bh(&list_lock);
@@ -792,7 +793,13 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm)
}
spin_unlock_bh(&list_lock);
- pm_runtime_get_sync(dd->dev);
+ err = pm_runtime_get_sync(dd->dev);
+ if (err < 0) {
+ dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
+ __func__, err);
+ return err;
+ }
+
tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
return 0;
@@ -1182,7 +1189,12 @@ static int omap_aes_probe(struct platform_device *pdev)
dd->phys_base = res.start;
pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ dev_err(dev, "%s: failed to get_sync(%d)\n",
+ __func__, err);
+ goto err_res;
+ }
omap_aes_dma_stop(dd);
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index e45aaaf0db30..a727a6a59653 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -789,10 +789,13 @@ static int omap_sham_update_cpu(struct omap_sham_dev *dd)
dev_dbg(dd->dev, "cpu: bufcnt: %u, digcnt: %d, final: %d\n",
ctx->bufcnt, ctx->digcnt, final);
- bufcnt = ctx->bufcnt;
- ctx->bufcnt = 0;
+ if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
+ bufcnt = ctx->bufcnt;
+ ctx->bufcnt = 0;
+ return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final);
+ }
- return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final);
+ return 0;
}
static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
@@ -1103,6 +1106,9 @@ static int omap_sham_update(struct ahash_request *req)
return 0;
}
+ if (dd->polling_mode)
+ ctx->flags |= BIT(FLAGS_CPU);
+
return omap_sham_enqueue(req, OP_UPDATE);
}
@@ -1970,7 +1976,8 @@ err_algs:
crypto_unregister_ahash(
&dd->pdata->algs_info[i].algs_list[j]);
pm_runtime_disable(dev);
- dma_release_channel(dd->dma_lch);
+ if (dd->dma_lch)
+ dma_release_channel(dd->dma_lch);
data_err:
dev_err(dev, "initialization failed.\n");
@@ -1994,7 +2001,9 @@ static int omap_sham_remove(struct platform_device *pdev)
&dd->pdata->algs_info[i].algs_list[j]);
tasklet_kill(&dd->done_task);
pm_runtime_disable(&pdev->dev);
- dma_release_channel(dd->dma_lch);
+
+ if (dd->dma_lch)
+ dma_release_channel(dd->dma_lch);
return 0;
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index b44f4ddc565c..5967667e1a8f 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -338,20 +338,29 @@ DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
static u32 current_desc_hdr(struct device *dev, int ch)
{
struct talitos_private *priv = dev_get_drvdata(dev);
- int tail = priv->chan[ch].tail;
+ int tail, iter;
dma_addr_t cur_desc;
- cur_desc = in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
+ cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
+ cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
- while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
- tail = (tail + 1) & (priv->fifo_len - 1);
- if (tail == priv->chan[ch].tail) {
+ if (!cur_desc) {
+ dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
+ return 0;
+ }
+
+ tail = priv->chan[ch].tail;
+
+ iter = tail;
+ while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
+ iter = (iter + 1) & (priv->fifo_len - 1);
+ if (iter == tail) {
dev_err(dev, "couldn't locate current descriptor\n");
return 0;
}
}
- return priv->chan[ch].fifo[tail].desc->hdr;
+ return priv->chan[ch].fifo[iter].desc->hdr;
}
/*
@@ -2486,8 +2495,6 @@ static int talitos_remove(struct platform_device *ofdev)
iounmap(priv->reg);
- dev_set_drvdata(dev, NULL);
-
kfree(priv);
return 0;
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 31f3adba4cf3..7d2f43550700 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -67,7 +67,7 @@ comment "DEVFREQ Drivers"
config ARM_EXYNOS4_BUS_DEVFREQ
bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver"
- depends on CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412
+ depends on (CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412) && !ARCH_MULTIPLATFORM
select ARCH_HAS_OPP
select DEVFREQ_GOV_SIMPLE_ONDEMAND
help
diff --git a/drivers/devfreq/exynos/exynos4_bus.c b/drivers/devfreq/exynos/exynos4_bus.c
index bbbfe6853b18..e07b0c68c715 100644
--- a/drivers/devfreq/exynos/exynos4_bus.c
+++ b/drivers/devfreq/exynos/exynos4_bus.c
@@ -30,9 +30,9 @@
extern unsigned int exynos_result_of_asv;
#endif
-#include <mach/regs-clock.h>
+#include <mach/map.h>
-#include <plat/map-s5p.h>
+#include "exynos4_bus.h"
#define MAX_SAFEVOLT 1200000 /* 1.2V */
diff --git a/drivers/devfreq/exynos/exynos4_bus.h b/drivers/devfreq/exynos/exynos4_bus.h
new file mode 100644
index 000000000000..94c73c18d28c
--- /dev/null
+++ b/drivers/devfreq/exynos/exynos4_bus.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS4 BUS header
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __DEVFREQ_EXYNOS4_BUS_H
+#define __DEVFREQ_EXYNOS4_BUS_H __FILE__
+
+#include <mach/map.h>
+
+#define EXYNOS4_CLKDIV_LEFTBUS (S5P_VA_CMU + 0x04500)
+#define EXYNOS4_CLKDIV_STAT_LEFTBUS (S5P_VA_CMU + 0x04600)
+
+#define EXYNOS4_CLKDIV_RIGHTBUS (S5P_VA_CMU + 0x08500)
+#define EXYNOS4_CLKDIV_STAT_RIGHTBUS (S5P_VA_CMU + 0x08600)
+
+#define EXYNOS4_CLKDIV_TOP (S5P_VA_CMU + 0x0C510)
+#define EXYNOS4_CLKDIV_CAM (S5P_VA_CMU + 0x0C520)
+#define EXYNOS4_CLKDIV_MFC (S5P_VA_CMU + 0x0C528)
+
+#define EXYNOS4_CLKDIV_STAT_TOP (S5P_VA_CMU + 0x0C610)
+#define EXYNOS4_CLKDIV_STAT_MFC (S5P_VA_CMU + 0x0C628)
+
+#define EXYNOS4210_CLKGATE_IP_IMAGE (S5P_VA_CMU + 0x0C930)
+#define EXYNOS4212_CLKGATE_IP_IMAGE (S5P_VA_CMU + 0x04930)
+
+#define EXYNOS4_CLKDIV_DMC0 (S5P_VA_CMU + 0x10500)
+#define EXYNOS4_CLKDIV_DMC1 (S5P_VA_CMU + 0x10504)
+#define EXYNOS4_CLKDIV_STAT_DMC0 (S5P_VA_CMU + 0x10600)
+#define EXYNOS4_CLKDIV_STAT_DMC1 (S5P_VA_CMU + 0x10604)
+
+#define EXYNOS4_DMC_PAUSE_CTRL (S5P_VA_CMU + 0x11094)
+#define EXYNOS4_DMC_PAUSE_ENABLE (1 << 0)
+
+#define EXYNOS4_CLKDIV_DMC0_ACP_SHIFT (0)
+#define EXYNOS4_CLKDIV_DMC0_ACP_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_ACP_SHIFT)
+#define EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT (4)
+#define EXYNOS4_CLKDIV_DMC0_ACPPCLK_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT)
+#define EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT (8)
+#define EXYNOS4_CLKDIV_DMC0_DPHY_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT)
+#define EXYNOS4_CLKDIV_DMC0_DMC_SHIFT (12)
+#define EXYNOS4_CLKDIV_DMC0_DMC_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_DMC_SHIFT)
+#define EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT (16)
+#define EXYNOS4_CLKDIV_DMC0_DMCD_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT)
+#define EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT (20)
+#define EXYNOS4_CLKDIV_DMC0_DMCP_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT)
+#define EXYNOS4_CLKDIV_DMC0_COPY2_SHIFT (24)
+#define EXYNOS4_CLKDIV_DMC0_COPY2_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_COPY2_SHIFT)
+#define EXYNOS4_CLKDIV_DMC0_CORETI_SHIFT (28)
+#define EXYNOS4_CLKDIV_DMC0_CORETI_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_CORETI_SHIFT)
+
+#define EXYNOS4_CLKDIV_DMC1_G2D_ACP_SHIFT (0)
+#define EXYNOS4_CLKDIV_DMC1_G2D_ACP_MASK (0xf << EXYNOS4_CLKDIV_DMC1_G2D_ACP_SHIFT)
+#define EXYNOS4_CLKDIV_DMC1_C2C_SHIFT (4)
+#define EXYNOS4_CLKDIV_DMC1_C2C_MASK (0x7 << EXYNOS4_CLKDIV_DMC1_C2C_SHIFT)
+#define EXYNOS4_CLKDIV_DMC1_PWI_SHIFT (8)
+#define EXYNOS4_CLKDIV_DMC1_PWI_MASK (0xf << EXYNOS4_CLKDIV_DMC1_PWI_SHIFT)
+#define EXYNOS4_CLKDIV_DMC1_C2CACLK_SHIFT (12)
+#define EXYNOS4_CLKDIV_DMC1_C2CACLK_MASK (0x7 << EXYNOS4_CLKDIV_DMC1_C2CACLK_SHIFT)
+#define EXYNOS4_CLKDIV_DMC1_DVSEM_SHIFT (16)
+#define EXYNOS4_CLKDIV_DMC1_DVSEM_MASK (0x7f << EXYNOS4_CLKDIV_DMC1_DVSEM_SHIFT)
+#define EXYNOS4_CLKDIV_DMC1_DPM_SHIFT (24)
+#define EXYNOS4_CLKDIV_DMC1_DPM_MASK (0x7f << EXYNOS4_CLKDIV_DMC1_DPM_SHIFT)
+
+#define EXYNOS4_CLKDIV_MFC_SHIFT (0)
+#define EXYNOS4_CLKDIV_MFC_MASK (0x7 << EXYNOS4_CLKDIV_MFC_SHIFT)
+
+#define EXYNOS4_CLKDIV_TOP_ACLK200_SHIFT (0)
+#define EXYNOS4_CLKDIV_TOP_ACLK200_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK200_SHIFT)
+#define EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT (4)
+#define EXYNOS4_CLKDIV_TOP_ACLK100_MASK (0xF << EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT)
+#define EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT (8)
+#define EXYNOS4_CLKDIV_TOP_ACLK160_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT)
+#define EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT (12)
+#define EXYNOS4_CLKDIV_TOP_ACLK133_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT)
+#define EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT (16)
+#define EXYNOS4_CLKDIV_TOP_ONENAND_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT)
+#define EXYNOS4_CLKDIV_TOP_ACLK266_GPS_SHIFT (20)
+#define EXYNOS4_CLKDIV_TOP_ACLK266_GPS_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK266_GPS_SHIFT)
+#define EXYNOS4_CLKDIV_TOP_ACLK400_MCUISP_SHIFT (24)
+#define EXYNOS4_CLKDIV_TOP_ACLK400_MCUISP_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK400_MCUISP_SHIFT)
+
+#define EXYNOS4_CLKDIV_BUS_GDLR_SHIFT (0)
+#define EXYNOS4_CLKDIV_BUS_GDLR_MASK (0x7 << EXYNOS4_CLKDIV_BUS_GDLR_SHIFT)
+#define EXYNOS4_CLKDIV_BUS_GPLR_SHIFT (4)
+#define EXYNOS4_CLKDIV_BUS_GPLR_MASK (0x7 << EXYNOS4_CLKDIV_BUS_GPLR_SHIFT)
+
+#define EXYNOS4_CLKDIV_CAM_FIMC0_SHIFT (0)
+#define EXYNOS4_CLKDIV_CAM_FIMC0_MASK (0xf << EXYNOS4_CLKDIV_CAM_FIMC0_SHIFT)
+#define EXYNOS4_CLKDIV_CAM_FIMC1_SHIFT (4)
+#define EXYNOS4_CLKDIV_CAM_FIMC1_MASK (0xf << EXYNOS4_CLKDIV_CAM_FIMC1_SHIFT)
+#define EXYNOS4_CLKDIV_CAM_FIMC2_SHIFT (8)
+#define EXYNOS4_CLKDIV_CAM_FIMC2_MASK (0xf << EXYNOS4_CLKDIV_CAM_FIMC2_SHIFT)
+#define EXYNOS4_CLKDIV_CAM_FIMC3_SHIFT (12)
+#define EXYNOS4_CLKDIV_CAM_FIMC3_MASK (0xf << EXYNOS4_CLKDIV_CAM_FIMC3_SHIFT)
+
+#define EXYNOS4_CLKDIV_CAM1 (S5P_VA_CMU + 0x0C568)
+
+#define EXYNOS4_CLKDIV_STAT_CAM1 (S5P_VA_CMU + 0x0C668)
+
+#define EXYNOS4_CLKDIV_CAM1_JPEG_SHIFT (0)
+#define EXYNOS4_CLKDIV_CAM1_JPEG_MASK (0xf << EXYNOS4_CLKDIV_CAM1_JPEG_SHIFT)
+
+#endif /* __DEVFREQ_EXYNOS4_BUS_H */
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 446687cc2334..605b016bcea4 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -62,6 +62,7 @@ config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on PCI && X86
select DMA_ENGINE
+ select DMA_ENGINE_RAID
select DCA
help
Enable support for the Intel(R) I/OAT DMA engine present
@@ -112,6 +113,7 @@ config MV_XOR
bool "Marvell XOR engine support"
depends on PLAT_ORION
select DMA_ENGINE
+ select DMA_ENGINE_RAID
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
---help---
Enable support for the Marvell XOR engine.
@@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA
tristate "AMCC PPC440SPe ADMA support"
depends on 440SPe || 440SP
select DMA_ENGINE
+ select DMA_ENGINE_RAID
select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
help
@@ -289,9 +292,11 @@ config MMP_TDMA
bool "MMP Two-Channel DMA support"
depends on ARCH_MMP
select DMA_ENGINE
+ select MMP_SRAM
help
Support the MMP Two-Channel DMA engine.
This engine used for MMP Audio DMA and pxa910 SQU.
+ It needs sram driver under mach-mmp.
Say Y here if you enabled MMP ADMA, otherwise say N.
@@ -301,6 +306,12 @@ config DMA_OMAP
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
+config DMA_BCM2835
+ tristate "BCM2835 DMA engine support"
+ depends on (ARCH_BCM2835 || MACH_BCM2708)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+
config TI_CPPI41
tristate "AM33xx CPPI41 DMA support"
depends on ARCH_OMAP
@@ -331,6 +342,15 @@ config K3_DMA
Support the DMA engine for Hisilicon K3 platform
devices.
+config MOXART_DMA
+ tristate "MOXART DMA support"
+ depends on ARCH_MOXART
+ select DMA_ENGINE
+ select DMA_OF
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the MOXA ART SoC DMA controller.
+
config DMA_ENGINE
bool
@@ -352,6 +372,7 @@ config NET_DMA
bool "Network: TCP receive copy offload"
depends on DMA_ENGINE && NET
default (INTEL_IOATDMA || FSL_DMA)
+ depends on BROKEN
help
This enables the use of DMA engines in the network stack to
offload receive copy-to-user operations, freeing CPU cycles.
@@ -377,4 +398,7 @@ config DMATEST
Simple DMA test client. Say N unless you're debugging a
DMA Device driver.
+config DMA_ENGINE_RAID
+ bool
+
endif
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0ce2da97e429..a029d0f4a1be 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -38,7 +38,9 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
obj-$(CONFIG_TI_CPPI41) += cppi41.o
obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index e69b03c0fa50..1e506afa33f5 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -30,11 +30,12 @@ static DEFINE_MUTEX(acpi_dma_lock);
* @adev: ACPI device to match with
* @adma: struct acpi_dma of the given DMA controller
*
- * Returns 1 on success, 0 when no information is available, or appropriate
- * errno value on error.
- *
* In order to match a device from DSDT table to the corresponding CSRT device
* we use MMIO address and IRQ.
+ *
+ * Return:
+ * 1 on success, 0 when no information is available, or appropriate errno value
+ * on error.
*/
static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
struct acpi_device *adev, struct acpi_dma *adma)
@@ -101,7 +102,6 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
*
* We are using this table to get the request line range of the specific DMA
* controller to be used later.
- *
*/
static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
{
@@ -141,10 +141,11 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
* @data pointer to controller specific data to be used by
* translation function
*
- * Returns 0 on success or appropriate errno value on error.
- *
* Allocated memory should be freed with appropriate acpi_dma_controller_free()
* call.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
*/
int acpi_dma_controller_register(struct device *dev,
struct dma_chan *(*acpi_dma_xlate)
@@ -188,6 +189,9 @@ EXPORT_SYMBOL_GPL(acpi_dma_controller_register);
* @dev: struct device of DMA controller
*
* Memory allocated by acpi_dma_controller_register() is freed here.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
*/
int acpi_dma_controller_free(struct device *dev)
{
@@ -225,6 +229,9 @@ static void devm_acpi_dma_release(struct device *dev, void *res)
* Managed acpi_dma_controller_register(). DMA controller registered by this
* function are automatically freed on driver detach. See
* acpi_dma_controller_register() for more information.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
*/
int devm_acpi_dma_controller_register(struct device *dev,
struct dma_chan *(*acpi_dma_xlate)
@@ -267,8 +274,6 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
* @adma: struct acpi_dma of DMA controller
* @dma_spec: dma specifier to update
*
- * Returns 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
- *
* Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource
* Descriptor":
* DMA Request Line bits is a platform-relative number uniquely
@@ -276,6 +281,9 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
* mapping is done in a controller-specific OS driver.
* That's why we can safely adjust slave_id when the appropriate controller is
* found.
+ *
+ * Return:
+ * 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
*/
static int acpi_dma_update_dma_spec(struct acpi_dma *adma,
struct acpi_dma_spec *dma_spec)
@@ -334,7 +342,8 @@ static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data)
* @dev: struct device to get DMA request from
* @index: index of FixedDMA descriptor for @dev
*
- * Returns pointer to appropriate dma channel on success or NULL on error.
+ * Return:
+ * Pointer to appropriate dma channel on success or NULL on error.
*/
struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
size_t index)
@@ -403,7 +412,8 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
* translate the names "tx" and "rx" here based on the most common case where
* the first FixedDMA descriptor is TX and second is RX.
*
- * Returns pointer to appropriate dma channel on success or NULL on error.
+ * Return:
+ * Pointer to appropriate dma channel on success or NULL on error.
*/
struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
const char *name)
@@ -427,8 +437,10 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name);
* @adma: pointer to ACPI DMA controller data
*
* A simple translation function for ACPI based devices. Passes &struct
- * dma_spec to the DMA controller driver provided filter function. Returns
- * pointer to the channel if found or %NULL otherwise.
+ * dma_spec to the DMA controller driver provided filter function.
+ *
+ * Return:
+ * Pointer to the channel if found or %NULL otherwise.
*/
struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec,
struct acpi_dma *adma)
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index ec4ee5c1fe9d..8114731a1c62 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -83,6 +83,7 @@
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -1771,6 +1772,7 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
return false;
}
+EXPORT_SYMBOL_GPL(pl08x_filter_id);
/*
* Just check that the device is there and active
@@ -2167,7 +2169,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
/* Register slave channels */
ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
pl08x->pd->num_slave_channels, true);
- if (ret <= 0) {
+ if (ret < 0) {
dev_warn(&pl08x->adev->dev,
"%s failed to enumerate slave channels - %d\n",
__func__, ret);
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index f31d647acdfa..2787aba60c6b 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan)
{
return &chan->dev->device;
}
-static struct device *chan2parent(struct dma_chan *chan)
-{
- return chan->dev->device.parent;
-}
#if defined(VERBOSE_DEBUG)
static void vdbg_dump_regs(struct at_dma_chan *atchan)
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
new file mode 100644
index 000000000000..a03602164e3e
--- /dev/null
+++ b/drivers/dma/bcm2835-dma.c
@@ -0,0 +1,707 @@
+/*
+ * BCM2835 DMA engine support
+ *
+ * This driver only supports cyclic DMA transfers
+ * as needed for the I2S module.
+ *
+ * Author: Florian Meier <florian.meier@koalo.de>
+ * Copyright 2013
+ *
+ * Based on
+ * OMAP DMAengine support by Russell King
+ *
+ * BCM2708 DMA Driver
+ * Copyright (C) 2010 Broadcom
+ *
+ * Raspberry Pi PCM I2S ALSA Driver
+ * Copyright (c) by Phil Poole 2013
+ *
+ * MARVELL MMP Peripheral DMA Driver
+ * Copyright 2012 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+struct bcm2835_dmadev {
+ struct dma_device ddev;
+ spinlock_t lock;
+ void __iomem *base;
+ struct device_dma_parameters dma_parms;
+};
+
+struct bcm2835_dma_cb {
+ uint32_t info;
+ uint32_t src;
+ uint32_t dst;
+ uint32_t length;
+ uint32_t stride;
+ uint32_t next;
+ uint32_t pad[2];
+};
+
+struct bcm2835_chan {
+ struct virt_dma_chan vc;
+ struct list_head node;
+
+ struct dma_slave_config cfg;
+ bool cyclic;
+ unsigned int dreq;
+
+ int ch;
+ struct bcm2835_desc *desc;
+
+ void __iomem *chan_base;
+ int irq_number;
+};
+
+struct bcm2835_desc {
+ struct virt_dma_desc vd;
+ enum dma_transfer_direction dir;
+
+ unsigned int control_block_size;
+ struct bcm2835_dma_cb *control_block_base;
+ dma_addr_t control_block_base_phys;
+
+ unsigned int frames;
+ size_t size;
+};
+
+#define BCM2835_DMA_CS 0x00
+#define BCM2835_DMA_ADDR 0x04
+#define BCM2835_DMA_SOURCE_AD 0x0c
+#define BCM2835_DMA_DEST_AD 0x10
+#define BCM2835_DMA_NEXTCB 0x1C
+
+/* DMA CS Control and Status bits */
+#define BCM2835_DMA_ACTIVE BIT(0)
+#define BCM2835_DMA_INT BIT(2)
+#define BCM2835_DMA_ISPAUSED BIT(4) /* Pause requested or not active */
+#define BCM2835_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */
+#define BCM2835_DMA_ERR BIT(8)
+#define BCM2835_DMA_ABORT BIT(30) /* Stop current CB, go to next, WO */
+#define BCM2835_DMA_RESET BIT(31) /* WO, self clearing */
+
+#define BCM2835_DMA_INT_EN BIT(0)
+#define BCM2835_DMA_D_INC BIT(4)
+#define BCM2835_DMA_D_DREQ BIT(6)
+#define BCM2835_DMA_S_INC BIT(8)
+#define BCM2835_DMA_S_DREQ BIT(10)
+
+#define BCM2835_DMA_PER_MAP(x) ((x) << 16)
+
+#define BCM2835_DMA_DATA_TYPE_S8 1
+#define BCM2835_DMA_DATA_TYPE_S16 2
+#define BCM2835_DMA_DATA_TYPE_S32 4
+#define BCM2835_DMA_DATA_TYPE_S128 16
+
+#define BCM2835_DMA_BULK_MASK BIT(0)
+#define BCM2835_DMA_FIQ_MASK (BIT(2) | BIT(3))
+
+/* Valid only for channels 0 - 14, 15 has its own base address */
+#define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */
+#define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n))
+
+static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d)
+{
+ return container_of(d, struct bcm2835_dmadev, ddev);
+}
+
+static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct bcm2835_chan, vc.chan);
+}
+
+static inline struct bcm2835_desc *to_bcm2835_dma_desc(
+ struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct bcm2835_desc, vd.tx);
+}
+
+static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
+{
+ struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
+ dma_free_coherent(desc->vd.tx.chan->device->dev,
+ desc->control_block_size,
+ desc->control_block_base,
+ desc->control_block_base_phys);
+ kfree(desc);
+}
+
+static int bcm2835_dma_abort(void __iomem *chan_base)
+{
+ unsigned long cs;
+ long int timeout = 10000;
+
+ cs = readl(chan_base + BCM2835_DMA_CS);
+ if (!(cs & BCM2835_DMA_ACTIVE))
+ return 0;
+
+ /* Write 0 to the active bit - Pause the DMA */
+ writel(0, chan_base + BCM2835_DMA_CS);
+
+ /* Wait for any current AXI transfer to complete */
+ while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) {
+ cpu_relax();
+ cs = readl(chan_base + BCM2835_DMA_CS);
+ }
+
+ /* We'll un-pause when we set of our next DMA */
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ if (!(cs & BCM2835_DMA_ACTIVE))
+ return 0;
+
+ /* Terminate the control block chain */
+ writel(0, chan_base + BCM2835_DMA_NEXTCB);
+
+ /* Abort the whole DMA */
+ writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
+ chan_base + BCM2835_DMA_CS);
+
+ return 0;
+}
+
+static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+ struct bcm2835_desc *d;
+
+ if (!vd) {
+ c->desc = NULL;
+ return;
+ }
+
+ list_del(&vd->node);
+
+ c->desc = d = to_bcm2835_dma_desc(&vd->tx);
+
+ writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR);
+ writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
+}
+
+static irqreturn_t bcm2835_dma_callback(int irq, void *data)
+{
+ struct bcm2835_chan *c = data;
+ struct bcm2835_desc *d;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+
+ /* Acknowledge interrupt */
+ writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS);
+
+ d = c->desc;
+
+ if (d) {
+ /* TODO Only works for cyclic DMA */
+ vchan_cyclic_callback(&d->vd);
+ }
+
+ /* Keep the DMA engine running */
+ writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
+
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+ dev_dbg(c->vc.chan.device->dev,
+ "Allocating DMA channel %d\n", c->ch);
+
+ return request_irq(c->irq_number,
+ bcm2835_dma_callback, 0, "DMA IRQ", c);
+}
+
+static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+ vchan_free_chan_resources(&c->vc);
+ free_irq(c->irq_number, c);
+
+ dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
+}
+
+static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d)
+{
+ return d->size;
+}
+
+static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
+{
+ unsigned int i;
+ size_t size;
+
+ for (size = i = 0; i < d->frames; i++) {
+ struct bcm2835_dma_cb *control_block =
+ &d->control_block_base[i];
+ size_t this_size = control_block->length;
+ dma_addr_t dma;
+
+ if (d->dir == DMA_DEV_TO_MEM)
+ dma = control_block->dst;
+ else
+ dma = control_block->src;
+
+ if (size)
+ size += this_size;
+ else if (addr >= dma && addr < dma + this_size)
+ size += dma + this_size - addr;
+ }
+
+ return size;
+}
+
+static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ vd = vchan_find_desc(&c->vc, cookie);
+ if (vd) {
+ txstate->residue =
+ bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx));
+ } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
+ struct bcm2835_desc *d = c->desc;
+ dma_addr_t pos;
+
+ if (d->dir == DMA_MEM_TO_DEV)
+ pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD);
+ else if (d->dir == DMA_DEV_TO_MEM)
+ pos = readl(c->chan_base + BCM2835_DMA_DEST_AD);
+ else
+ pos = 0;
+
+ txstate->residue = bcm2835_dma_desc_size_pos(d, pos);
+ } else {
+ txstate->residue = 0;
+ }
+
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+
+ return ret;
+}
+
+static void bcm2835_dma_issue_pending(struct dma_chan *chan)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+ unsigned long flags;
+
+ c->cyclic = true; /* Nothing else is implemented */
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ if (vchan_issue_pending(&c->vc) && !c->desc)
+ bcm2835_dma_start_desc(c);
+
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+ enum dma_slave_buswidth dev_width;
+ struct bcm2835_desc *d;
+ dma_addr_t dev_addr;
+ unsigned int es, sync_type;
+ unsigned int frame;
+
+ /* Grab configuration */
+ if (!is_slave_direction(direction)) {
+ dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (direction == DMA_DEV_TO_MEM) {
+ dev_addr = c->cfg.src_addr;
+ dev_width = c->cfg.src_addr_width;
+ sync_type = BCM2835_DMA_S_DREQ;
+ } else {
+ dev_addr = c->cfg.dst_addr;
+ dev_width = c->cfg.dst_addr_width;
+ sync_type = BCM2835_DMA_D_DREQ;
+ }
+
+ /* Bus width translates to the element size (ES) */
+ switch (dev_width) {
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ es = BCM2835_DMA_DATA_TYPE_S32;
+ break;
+ default:
+ return NULL;
+ }
+
+ /* Now allocate and setup the descriptor. */
+ d = kzalloc(sizeof(*d), GFP_NOWAIT);
+ if (!d)
+ return NULL;
+
+ d->dir = direction;
+ d->frames = buf_len / period_len;
+
+ /* Allocate memory for control blocks */
+ d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb);
+ d->control_block_base = dma_zalloc_coherent(chan->device->dev,
+ d->control_block_size, &d->control_block_base_phys,
+ GFP_NOWAIT);
+
+ if (!d->control_block_base) {
+ kfree(d);
+ return NULL;
+ }
+
+ /*
+ * Iterate over all frames, create a control block
+ * for each frame and link them together.
+ */
+ for (frame = 0; frame < d->frames; frame++) {
+ struct bcm2835_dma_cb *control_block =
+ &d->control_block_base[frame];
+
+ /* Setup adresses */
+ if (d->dir == DMA_DEV_TO_MEM) {
+ control_block->info = BCM2835_DMA_D_INC;
+ control_block->src = dev_addr;
+ control_block->dst = buf_addr + frame * period_len;
+ } else {
+ control_block->info = BCM2835_DMA_S_INC;
+ control_block->src = buf_addr + frame * period_len;
+ control_block->dst = dev_addr;
+ }
+
+ /* Enable interrupt */
+ control_block->info |= BCM2835_DMA_INT_EN;
+
+ /* Setup synchronization */
+ if (sync_type != 0)
+ control_block->info |= sync_type;
+
+ /* Setup DREQ channel */
+ if (c->dreq != 0)
+ control_block->info |=
+ BCM2835_DMA_PER_MAP(c->dreq);
+
+ /* Length of a frame */
+ control_block->length = period_len;
+ d->size += control_block->length;
+
+ /*
+ * Next block is the next frame.
+ * This DMA engine driver currently only supports cyclic DMA.
+ * Therefore, wrap around at number of frames.
+ */
+ control_block->next = d->control_block_base_phys +
+ sizeof(struct bcm2835_dma_cb)
+ * ((frame + 1) % d->frames);
+ }
+
+ return vchan_tx_prep(&c->vc, &d->vd, flags);
+}
+
+static int bcm2835_dma_slave_config(struct bcm2835_chan *c,
+ struct dma_slave_config *cfg)
+{
+ if ((cfg->direction == DMA_DEV_TO_MEM &&
+ cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+ (cfg->direction == DMA_MEM_TO_DEV &&
+ cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+ !is_slave_direction(cfg->direction)) {
+ return -EINVAL;
+ }
+
+ c->cfg = *cfg;
+
+ return 0;
+}
+
+static int bcm2835_dma_terminate_all(struct bcm2835_chan *c)
+{
+ struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
+ unsigned long flags;
+ int timeout = 10000;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+
+ /* Prevent this channel being scheduled */
+ spin_lock(&d->lock);
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+
+ /*
+ * Stop DMA activity: we assume the callback will not be called
+ * after bcm_dma_abort() returns (even if it does, it will see
+ * c->desc is NULL and exit.)
+ */
+ if (c->desc) {
+ c->desc = NULL;
+ bcm2835_dma_abort(c->chan_base);
+
+ /* Wait for stopping */
+ while (--timeout) {
+ if (!(readl(c->chan_base + BCM2835_DMA_CS) &
+ BCM2835_DMA_ACTIVE))
+ break;
+
+ cpu_relax();
+ }
+
+ if (!timeout)
+ dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
+ }
+
+ vchan_get_all_descriptors(&c->vc, &head);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ vchan_dma_desc_free_list(&c->vc, &head);
+
+ return 0;
+}
+
+static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+ switch (cmd) {
+ case DMA_SLAVE_CONFIG:
+ return bcm2835_dma_slave_config(c,
+ (struct dma_slave_config *)arg);
+
+ case DMA_TERMINATE_ALL:
+ return bcm2835_dma_terminate_all(c);
+
+ default:
+ return -ENXIO;
+ }
+}
+
+static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
+{
+ struct bcm2835_chan *c;
+
+ c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+
+ c->vc.desc_free = bcm2835_dma_desc_free;
+ vchan_init(&c->vc, &d->ddev);
+ INIT_LIST_HEAD(&c->node);
+
+ d->ddev.chancnt++;
+
+ c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
+ c->ch = chan_id;
+ c->irq_number = irq;
+
+ return 0;
+}
+
+static void bcm2835_dma_free(struct bcm2835_dmadev *od)
+{
+ struct bcm2835_chan *c, *next;
+
+ list_for_each_entry_safe(c, next, &od->ddev.channels,
+ vc.chan.device_node) {
+ list_del(&c->vc.chan.device_node);
+ tasklet_kill(&c->vc.task);
+ }
+}
+
+static const struct of_device_id bcm2835_dma_of_match[] = {
+ { .compatible = "brcm,bcm2835-dma", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match);
+
+static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
+ struct of_dma *ofdma)
+{
+ struct bcm2835_dmadev *d = ofdma->of_dma_data;
+ struct dma_chan *chan;
+
+ chan = dma_get_any_slave_channel(&d->ddev);
+ if (!chan)
+ return NULL;
+
+ /* Set DREQ from param */
+ to_bcm2835_dma_chan(chan)->dreq = spec->args[0];
+
+ return chan;
+}
+
+static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = false;
+ caps->cmd_terminate = true;
+
+ return 0;
+}
+
+static int bcm2835_dma_probe(struct platform_device *pdev)
+{
+ struct bcm2835_dmadev *od;
+ struct resource *res;
+ void __iomem *base;
+ int rc;
+ int i;
+ int irq;
+ uint32_t chans_available;
+
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
+ if (!od)
+ return -ENOMEM;
+
+ pdev->dev.dma_parms = &od->dma_parms;
+ dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ od->base = base;
+
+ dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
+ dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
+ od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
+ od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
+ od->ddev.device_tx_status = bcm2835_dma_tx_status;
+ od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
+ od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps;
+ od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
+ od->ddev.device_control = bcm2835_dma_control;
+ od->ddev.dev = &pdev->dev;
+ INIT_LIST_HEAD(&od->ddev.channels);
+ spin_lock_init(&od->lock);
+
+ platform_set_drvdata(pdev, od);
+
+ /* Request DMA channel mask from device tree */
+ if (of_property_read_u32(pdev->dev.of_node,
+ "brcm,dma-channel-mask",
+ &chans_available)) {
+ dev_err(&pdev->dev, "Failed to get channel mask\n");
+ rc = -EINVAL;
+ goto err_no_dma;
+ }
+
+ /*
+ * Do not use the FIQ and BULK channels,
+ * because they are used by the GPU.
+ */
+ chans_available &= ~(BCM2835_DMA_FIQ_MASK | BCM2835_DMA_BULK_MASK);
+
+ for (i = 0; i < pdev->num_resources; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ break;
+
+ if (chans_available & (1 << i)) {
+ rc = bcm2835_dma_chan_init(od, i, irq);
+ if (rc)
+ goto err_no_dma;
+ }
+ }
+
+ dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i);
+
+ /* Device-tree DMA controller registration */
+ rc = of_dma_controller_register(pdev->dev.of_node,
+ bcm2835_dma_xlate, od);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to register DMA controller\n");
+ goto err_no_dma;
+ }
+
+ rc = dma_async_device_register(&od->ddev);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Failed to register slave DMA engine device: %d\n", rc);
+ goto err_no_dma;
+ }
+
+ dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n");
+
+ return 0;
+
+err_no_dma:
+ bcm2835_dma_free(od);
+ return rc;
+}
+
+static int bcm2835_dma_remove(struct platform_device *pdev)
+{
+ struct bcm2835_dmadev *od = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&od->ddev);
+ bcm2835_dma_free(od);
+
+ return 0;
+}
+
+static struct platform_driver bcm2835_dma_driver = {
+ .probe = bcm2835_dma_probe,
+ .remove = bcm2835_dma_remove,
+ .driver = {
+ .name = "bcm2835-dma",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(bcm2835_dma_of_match),
+ },
+};
+
+module_platform_driver(bcm2835_dma_driver);
+
+MODULE_ALIAS("platform:bcm2835-dma");
+MODULE_DESCRIPTION("BCM2835 DMA engine driver");
+MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index c29dacff66fa..c18aebf7d5aa 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -972,8 +972,10 @@ static int cppi41_dma_probe(struct platform_device *pdev)
goto err_chans;
irq = irq_of_parse_and_map(dev->of_node, 0);
- if (!irq)
+ if (!irq) {
+ ret = -EINVAL;
goto err_irq;
+ }
cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index ea806bdc12ef..ed610b497518 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -535,11 +535,41 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
}
EXPORT_SYMBOL_GPL(dma_get_slave_channel);
+struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+ int err;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* lock against __dma_request_channel */
+ mutex_lock(&dma_list_mutex);
+
+ chan = private_candidate(&mask, device, NULL, NULL);
+ if (chan) {
+ err = dma_chan_get(chan);
+ if (err) {
+ pr_debug("%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
+ chan = NULL;
+ }
+ }
+
+ mutex_unlock(&dma_list_mutex);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
+
/**
* __dma_request_channel - try to allocate an exclusive channel
* @mask: capabilities that the channel must satisfy
* @fn: optional callback to disposition available channels
* @fn_param: opaque parameter to pass to dma_filter_fn
+ *
+ * Returns pointer to appropriate DMA channel on success or NULL.
*/
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
dma_filter_fn fn, void *fn_param)
@@ -591,18 +621,43 @@ EXPORT_SYMBOL_GPL(__dma_request_channel);
* dma_request_slave_channel - try to allocate an exclusive slave channel
* @dev: pointer to client device structure
* @name: slave channel name
+ *
+ * Returns pointer to appropriate DMA channel on success or an error pointer.
*/
-struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name)
+struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
+ const char *name)
{
+ struct dma_chan *chan;
+
/* If device-tree is present get slave info from here */
if (dev->of_node)
return of_dma_request_slave_channel(dev->of_node, name);
/* If device was enumerated by ACPI get slave info from here */
- if (ACPI_HANDLE(dev))
- return acpi_dma_request_slave_chan_by_name(dev, name);
+ if (ACPI_HANDLE(dev)) {
+ chan = acpi_dma_request_slave_chan_by_name(dev, name);
+ if (chan)
+ return chan;
+ }
- return NULL;
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
+
+/**
+ * dma_request_slave_channel - try to allocate an exclusive slave channel
+ * @dev: pointer to client device structure
+ * @name: slave channel name
+ *
+ * Returns pointer to appropriate DMA channel on success or NULL.
+ */
+struct dma_chan *dma_request_slave_channel(struct device *dev,
+ const char *name)
+{
+ struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
+ if (IS_ERR(ch))
+ return NULL;
+ return ch;
}
EXPORT_SYMBOL_GPL(dma_request_slave_channel);
@@ -912,7 +967,7 @@ struct dmaengine_unmap_pool {
#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
static struct dmaengine_unmap_pool unmap_pool[] = {
__UNMAP_POOL(2),
- #if IS_ENABLED(CONFIG_ASYNC_TX_DMA)
+ #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
__UNMAP_POOL(16),
__UNMAP_POOL(128),
__UNMAP_POOL(256),
@@ -1054,7 +1109,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
dma_cookie_t cookie;
unsigned long flags;
- unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO);
+ unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
if (!unmap)
return -ENOMEM;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 20f9a3aaf926..05b6dea770a4 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -31,7 +31,7 @@ module_param_string(channel, test_channel, sizeof(test_channel),
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
-static char test_device[20];
+static char test_device[32];
module_param_string(device, test_device, sizeof(test_device),
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
@@ -89,7 +89,7 @@ MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
struct dmatest_params {
unsigned int buf_size;
char channel[20];
- char device[20];
+ char device[32];
unsigned int threads_per_chan;
unsigned int max_channels;
unsigned int iterations;
@@ -539,9 +539,9 @@ static int dmatest_func(void *data)
um->len = params->buf_size;
for (i = 0; i < src_cnt; i++) {
- unsigned long buf = (unsigned long) thread->srcs[i];
+ void *buf = thread->srcs[i];
struct page *pg = virt_to_page(buf);
- unsigned pg_off = buf & ~PAGE_MASK;
+ unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
um->len, DMA_TO_DEVICE);
@@ -559,9 +559,9 @@ static int dmatest_func(void *data)
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
dsts = &um->addr[src_cnt];
for (i = 0; i < dst_cnt; i++) {
- unsigned long buf = (unsigned long) thread->dsts[i];
+ void *buf = thread->dsts[i];
struct page *pg = virt_to_page(buf);
- unsigned pg_off = buf & ~PAGE_MASK;
+ unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
DMA_BIDIRECTIONAL);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 7516be4677cf..13ac3f240e79 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -218,8 +218,10 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
u32 ctllo;
- /* Software emulation of LLP mode relies on interrupts to continue
- * multi block transfer. */
+ /*
+ * Software emulation of LLP mode relies on interrupts to continue
+ * multi block transfer.
+ */
ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
channel_writel(dwc, SAR, desc->lli.sar);
@@ -253,8 +255,7 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
&dwc->flags);
if (was_soft_llp) {
dev_err(chan2dev(&dwc->chan),
- "BUG: Attempted to start new LLP transfer "
- "inside ongoing one\n");
+ "BUG: Attempted to start new LLP transfer inside ongoing one\n");
return;
}
@@ -420,8 +421,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
return;
}
- dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
- (unsigned long long)llp);
+ dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
/* Initial residue value */
@@ -567,9 +567,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
unlikely(status_xfer & dwc->mask)) {
int i;
- dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
- "interrupt, stopping DMA transfer\n",
- status_xfer ? "xfer" : "error");
+ dev_err(chan2dev(&dwc->chan),
+ "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
+ status_xfer ? "xfer" : "error");
spin_lock_irqsave(&dwc->lock, flags);
@@ -711,9 +711,8 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
u32 ctllo;
dev_vdbg(chan2dev(chan),
- "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
- (unsigned long long)dest, (unsigned long long)src,
- len, flags);
+ "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
+ &dest, &src, len, flags);
if (unlikely(!len)) {
dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
@@ -1401,9 +1400,9 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
/* Let's make a cyclic list */
last->lli.llp = cdesc->desc[0]->txd.phys;
- dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
- "period %zu periods %d\n", (unsigned long long)buf_addr,
- buf_len, period_len, periods);
+ dev_dbg(chan2dev(&dwc->chan),
+ "cyclic prepared buf %pad len %zu period %zu periods %d\n",
+ &buf_addr, buf_len, period_len, periods);
cdesc->periods = periods;
dwc->cdesc = cdesc;
@@ -1603,9 +1602,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
dwc_params);
- /* Decode maximum block size for given channel. The
+ /*
+ * Decode maximum block size for given channel. The
* stored 4 bit value represents blocks from 0x00 for 3
- * up to 0x0a for 4095. */
+ * up to 0x0a for 4095.
+ */
dwc->block_size =
(4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
dwc->nollp =
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 2539ea0cbc63..cd8da451d199 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -699,8 +699,8 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
echan->alloced = true;
echan->slot[0] = echan->ch_num;
- dev_info(dev, "allocated channel for %u:%u\n",
- EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
+ dev_dbg(dev, "allocated channel for %u:%u\n",
+ EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
return 0;
@@ -736,7 +736,7 @@ static void edma_free_chan_resources(struct dma_chan *chan)
echan->alloced = false;
}
- dev_info(dev, "freeing channel for %u\n", echan->ch_num);
+ dev_dbg(dev, "freeing channel for %u\n", echan->ch_num);
}
/* Send pending descriptor to hardware */
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 7086a16a55f2..f157c6f76b32 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan,
hw->count = CPU_TO_DMA(chan, count, 32);
}
-static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
-{
- return DMA_TO_CPU(chan, desc->hw.count, 32);
-}
-
static void set_desc_src(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t src)
{
@@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan,
hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
}
-static dma_addr_t get_desc_src(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
-{
- u64 snoop_bits;
-
- snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
- ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
- return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
-}
-
static void set_desc_dst(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t dst)
{
@@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan,
hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
}
-static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
-{
- u64 snoop_bits;
-
- snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
- ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
- return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
-}
-
static void set_desc_next(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t next)
{
@@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
struct fsl_desc_sw *child;
unsigned long flags;
- dma_cookie_t cookie;
+ dma_cookie_t cookie = -EINVAL;
spin_lock_irqsave(&chan->desc_lock, flags);
@@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
struct fsl_desc_sw *desc)
{
struct dma_async_tx_descriptor *txd = &desc->async_tx;
- struct device *dev = chan->common.device->dev;
- dma_addr_t src = get_desc_src(chan, desc);
- dma_addr_t dst = get_desc_dst(chan, desc);
- u32 len = get_desc_cnt(chan, desc);
/* Run the link descriptor callback function */
if (txd->callback) {
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 1ffc24484d23..d56e83599825 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -41,7 +41,7 @@
* channel is allowed to transfer before the DMA engine pauses
* the current channel and switches to the next channel
*/
-#define FSL_DMA_MR_BWC 0x08000000
+#define FSL_DMA_MR_BWC 0x0A000000
/* Special MR definition for MPC8349 */
#define FSL_DMA_MR_EOTIE 0x00000080
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index c75679d42028..4e7918339b12 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -323,6 +323,7 @@ struct sdma_engine {
struct clk *clk_ipg;
struct clk *clk_ahb;
spinlock_t channel_0_lock;
+ u32 script_number;
struct sdma_script_start_addrs *script_addrs;
const struct sdma_driver_data *drvdata;
};
@@ -724,6 +725,10 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
per_2_emi = sdma->script_addrs->app_2_mcu_addr;
emi_2_per = sdma->script_addrs->mcu_2_app_addr;
break;
+ case IMX_DMATYPE_SSI_DUAL:
+ per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
+ break;
case IMX_DMATYPE_SSI_SP:
case IMX_DMATYPE_MMC:
case IMX_DMATYPE_SDHC:
@@ -1238,6 +1243,7 @@ static void sdma_issue_pending(struct dma_chan *chan)
}
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
static void sdma_add_scripts(struct sdma_engine *sdma,
const struct sdma_script_start_addrs *addr)
@@ -1246,7 +1252,11 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
s32 *saddr_arr = (u32 *)sdma->script_addrs;
int i;
- for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
+ /* use the default firmware in ROM if missing external firmware */
+ if (!sdma->script_number)
+ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+
+ for (i = 0; i < sdma->script_number; i++)
if (addr_arr[i] > 0)
saddr_arr[i] = addr_arr[i];
}
@@ -1272,6 +1282,17 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
goto err_firmware;
if (header->ram_code_start + header->ram_code_size > fw->size)
goto err_firmware;
+ switch (header->version_major) {
+ case 1:
+ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+ break;
+ case 2:
+ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
+ break;
+ default:
+ dev_err(sdma->dev, "unknown firmware version\n");
+ goto err_firmware;
+ }
addr = (void *)header + header->script_addrs_start;
ram_code = (void *)header + header->ram_code_start;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1a49c777607c..87529181efcc 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -817,7 +817,15 @@ int ioat_dma_self_test(struct ioatdma_device *device)
}
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_src)) {
+ dev_err(dev, "mapping src buffer failed\n");
+ goto free_resources;
+ }
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_dest)) {
+ dev_err(dev, "mapping dest buffer failed\n");
+ goto unmap_src;
+ }
flags = DMA_PREP_INTERRUPT;
tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
IOAT_TEST_SIZE, flags);
@@ -855,8 +863,9 @@ int ioat_dma_self_test(struct ioatdma_device *device)
}
unmap_dma:
- dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
+unmap_src:
+ dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
free_resources:
dma->device_free_chan_resources(dma_chan);
out:
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index e26075408e9b..a1f911aaf220 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -477,7 +477,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
dma_addr_t addr, src = 0, dst = 0;
int num = sglen, i;
- if (sgl == 0)
+ if (sgl == NULL)
return NULL;
for_each_sg(sgl, sg, sglen, i) {
@@ -817,7 +817,7 @@ static int k3_dma_resume(struct device *dev)
return 0;
}
-SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
+static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
static struct platform_driver k3_pdma_driver = {
.driver = {
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 8869500ab92b..b439679f4126 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -5,6 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -32,38 +33,37 @@
#define DTADR 0x0208
#define DCMD 0x020c
-#define DCSR_RUN (1 << 31) /* Run Bit (read / write) */
-#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */
-#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */
-#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
-#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
-#define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */
-#define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
-#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
-
-#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */
-#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
-#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
-#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
-#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
-#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
-#define DCSR_EORINTR (1 << 9) /* The end of Receive */
-
-#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \
- (((n) & 0x3f) << 2))
-#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
-#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
+#define DCSR_RUN BIT(31) /* Run Bit (read / write) */
+#define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
+#define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */
+#define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
+#define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
+#define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
+#define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
+#define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
+
+#define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */
+#define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
+#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
+#define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
+#define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
+#define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
+#define DCSR_EORINTR BIT(9) /* The end of Receive */
+
+#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
+#define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
+#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
-#define DDADR_STOP (1 << 0) /* Stop (read / write) */
-
-#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
-#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
-#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
-#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
-#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
-#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
-#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
+#define DDADR_STOP BIT(0) /* Stop (read / write) */
+
+#define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
+#define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
+#define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
+#define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
+#define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
+#define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
+#define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
@@ -132,10 +132,14 @@ struct mmp_pdma_device {
spinlock_t phy_lock; /* protect alloc/free phy channels */
};
-#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
-#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
-#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
-#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
+#define tx_to_mmp_pdma_desc(tx) \
+ container_of(tx, struct mmp_pdma_desc_sw, async_tx)
+#define to_mmp_pdma_desc(lh) \
+ container_of(lh, struct mmp_pdma_desc_sw, node)
+#define to_mmp_pdma_chan(dchan) \
+ container_of(dchan, struct mmp_pdma_chan, chan)
+#define to_mmp_pdma_dev(dmadev) \
+ container_of(dmadev, struct mmp_pdma_device, device)
static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
{
@@ -162,19 +166,18 @@ static void enable_chan(struct mmp_pdma_phy *phy)
writel(dalgn, phy->base + DALGN);
reg = (phy->idx << 2) + DCSR;
- writel(readl(phy->base + reg) | DCSR_RUN,
- phy->base + reg);
+ writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
}
static void disable_chan(struct mmp_pdma_phy *phy)
{
u32 reg;
- if (phy) {
- reg = (phy->idx << 2) + DCSR;
- writel(readl(phy->base + reg) & ~DCSR_RUN,
- phy->base + reg);
- }
+ if (!phy)
+ return;
+
+ reg = (phy->idx << 2) + DCSR;
+ writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
}
static int clear_chan_irq(struct mmp_pdma_phy *phy)
@@ -183,26 +186,27 @@ static int clear_chan_irq(struct mmp_pdma_phy *phy)
u32 dint = readl(phy->base + DINT);
u32 reg = (phy->idx << 2) + DCSR;
- if (dint & BIT(phy->idx)) {
- /* clear irq */
- dcsr = readl(phy->base + reg);
- writel(dcsr, phy->base + reg);
- if ((dcsr & DCSR_BUSERR) && (phy->vchan))
- dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
- return 0;
- }
- return -EAGAIN;
+ if (!(dint & BIT(phy->idx)))
+ return -EAGAIN;
+
+ /* clear irq */
+ dcsr = readl(phy->base + reg);
+ writel(dcsr, phy->base + reg);
+ if ((dcsr & DCSR_BUSERR) && (phy->vchan))
+ dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
+
+ return 0;
}
static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
{
struct mmp_pdma_phy *phy = dev_id;
- if (clear_chan_irq(phy) == 0) {
- tasklet_schedule(&phy->vchan->tasklet);
- return IRQ_HANDLED;
- } else
+ if (clear_chan_irq(phy) != 0)
return IRQ_NONE;
+
+ tasklet_schedule(&phy->vchan->tasklet);
+ return IRQ_HANDLED;
}
static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
@@ -224,8 +228,8 @@ static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
if (irq_num)
return IRQ_HANDLED;
- else
- return IRQ_NONE;
+
+ return IRQ_NONE;
}
/* lookup free phy channel as descending priority */
@@ -245,9 +249,9 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
*/
spin_lock_irqsave(&pdev->phy_lock, flags);
- for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
+ for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
for (i = 0; i < pdev->dma_channels; i++) {
- if (prio != ((i & 0xf) >> 2))
+ if (prio != (i & 0xf) >> 2)
continue;
phy = &pdev->phy[i];
if (!phy->vchan) {
@@ -389,14 +393,16 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
if (chan->desc_pool)
return 1;
- chan->desc_pool =
- dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
- sizeof(struct mmp_pdma_desc_sw),
- __alignof__(struct mmp_pdma_desc_sw), 0);
+ chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
+ chan->dev,
+ sizeof(struct mmp_pdma_desc_sw),
+ __alignof__(struct mmp_pdma_desc_sw),
+ 0);
if (!chan->desc_pool) {
dev_err(chan->dev, "unable to allocate descriptor pool\n");
return -ENOMEM;
}
+
mmp_pdma_free_phy(chan);
chan->idle = true;
chan->dev_addr = 0;
@@ -404,7 +410,7 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
}
static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
- struct list_head *list)
+ struct list_head *list)
{
struct mmp_pdma_desc_sw *desc, *_desc;
@@ -434,8 +440,8 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
static struct dma_async_tx_descriptor *
mmp_pdma_prep_memcpy(struct dma_chan *dchan,
- dma_addr_t dma_dst, dma_addr_t dma_src,
- size_t len, unsigned long flags)
+ dma_addr_t dma_dst, dma_addr_t dma_src,
+ size_t len, unsigned long flags)
{
struct mmp_pdma_chan *chan;
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
@@ -515,8 +521,8 @@ fail:
static struct dma_async_tx_descriptor *
mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction dir,
- unsigned long flags, void *context)
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
{
struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
@@ -591,10 +597,11 @@ fail:
return NULL;
}
-static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
- struct dma_chan *dchan, dma_addr_t buf_addr, size_t len,
- size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+static struct dma_async_tx_descriptor *
+mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
+ dma_addr_t buf_addr, size_t len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
{
struct mmp_pdma_chan *chan;
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
@@ -636,8 +643,8 @@ static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
goto fail;
}
- new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN |
- (DCMD_LENGTH & period_len);
+ new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
+ (DCMD_LENGTH & period_len));
new->desc.dsadr = dma_src;
new->desc.dtadr = dma_dst;
@@ -677,12 +684,11 @@ fail:
}
static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
- unsigned long arg)
+ unsigned long arg)
{
struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
struct dma_slave_config *cfg = (void *)arg;
unsigned long flags;
- int ret = 0;
u32 maxburst = 0, addr = 0;
enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
@@ -739,11 +745,12 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
return -ENOSYS;
}
- return ret;
+ return 0;
}
static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
- dma_cookie_t cookie, struct dma_tx_state *txstate)
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
return dma_cookie_status(dchan, cookie, txstate);
}
@@ -845,15 +852,14 @@ static int mmp_pdma_remove(struct platform_device *op)
return 0;
}
-static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
- int idx, int irq)
+static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
{
struct mmp_pdma_phy *phy = &pdev->phy[idx];
struct mmp_pdma_chan *chan;
int ret;
- chan = devm_kzalloc(pdev->dev,
- sizeof(struct mmp_pdma_chan), GFP_KERNEL);
+ chan = devm_kzalloc(pdev->dev, sizeof(struct mmp_pdma_chan),
+ GFP_KERNEL);
if (chan == NULL)
return -ENOMEM;
@@ -861,8 +867,8 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
phy->base = pdev->base;
if (irq) {
- ret = devm_request_irq(pdev->dev, irq,
- mmp_pdma_chan_handler, 0, "pdma", phy);
+ ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, 0,
+ "pdma", phy);
if (ret) {
dev_err(pdev->dev, "channel request irq fail!\n");
return ret;
@@ -877,8 +883,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
INIT_LIST_HEAD(&chan->chain_running);
/* register virt channel to dma engine */
- list_add_tail(&chan->chan.device_node,
- &pdev->device.channels);
+ list_add_tail(&chan->chan.device_node, &pdev->device.channels);
return 0;
}
@@ -893,33 +898,15 @@ static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct mmp_pdma_device *d = ofdma->of_dma_data;
- struct dma_chan *chan, *candidate;
-
-retry:
- candidate = NULL;
+ struct dma_chan *chan;
- /* walk the list of channels registered with the current instance and
- * find one that is currently unused */
- list_for_each_entry(chan, &d->device.channels, device_node)
- if (chan->client_count == 0) {
- candidate = chan;
- break;
- }
-
- if (!candidate)
+ chan = dma_get_any_slave_channel(&d->device);
+ if (!chan)
return NULL;
- /* dma_get_slave_channel will return NULL if we lost a race between
- * the lookup and the reservation */
- chan = dma_get_slave_channel(candidate);
+ to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
- if (chan) {
- struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
- c->drcmr = dma_spec->args[0];
- return chan;
- }
-
- goto retry;
+ return chan;
}
static int mmp_pdma_probe(struct platform_device *op)
@@ -934,6 +921,7 @@ static int mmp_pdma_probe(struct platform_device *op)
pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
if (!pdev)
return -ENOMEM;
+
pdev->dev = &op->dev;
spin_lock_init(&pdev->phy_lock);
@@ -945,8 +933,8 @@ static int mmp_pdma_probe(struct platform_device *op)
of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
if (of_id)
- of_property_read_u32(pdev->dev->of_node,
- "#dma-channels", &dma_channels);
+ of_property_read_u32(pdev->dev->of_node, "#dma-channels",
+ &dma_channels);
else if (pdata && pdata->dma_channels)
dma_channels = pdata->dma_channels;
else
@@ -958,8 +946,9 @@ static int mmp_pdma_probe(struct platform_device *op)
irq_num++;
}
- pdev->phy = devm_kzalloc(pdev->dev,
- dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
+ pdev->phy = devm_kcalloc(pdev->dev,
+ dma_channels, sizeof(struct mmp_pdma_chan),
+ GFP_KERNEL);
if (pdev->phy == NULL)
return -ENOMEM;
@@ -968,8 +957,8 @@ static int mmp_pdma_probe(struct platform_device *op)
if (irq_num != dma_channels) {
/* all chan share one irq, demux inside */
irq = platform_get_irq(op, 0);
- ret = devm_request_irq(pdev->dev, irq,
- mmp_pdma_int_handler, 0, "pdma", pdev);
+ ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, 0,
+ "pdma", pdev);
if (ret)
return ret;
}
@@ -1045,7 +1034,7 @@ bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
if (chan->device->dev->driver != &mmp_pdma_driver.driver)
return false;
- c->drcmr = *(unsigned int *) param;
+ c->drcmr = *(unsigned int *)param;
return true;
}
@@ -1053,6 +1042,6 @@ EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
module_platform_driver(mmp_pdma_driver);
-MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
+MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
MODULE_AUTHOR("Marvell International Ltd.");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 3ddacc14a736..33f96aaa80c7 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -121,11 +121,13 @@ struct mmp_tdma_chan {
int idx;
enum mmp_tdma_type type;
int irq;
- unsigned long reg_base;
+ void __iomem *reg_base;
size_t buf_len;
size_t period_len;
size_t pos;
+
+ struct gen_pool *pool;
};
#define TDMA_CHANNEL_NUM 2
@@ -182,7 +184,7 @@ static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac)
static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
{
- unsigned int tdcr;
+ unsigned int tdcr = 0;
mmp_tdma_disable_chan(tdmac);
@@ -324,7 +326,7 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
struct gen_pool *gpool;
int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
- gpool = sram_get_gpool("asram");
+ gpool = tdmac->pool;
if (tdmac->desc_arr)
gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
size);
@@ -374,7 +376,7 @@ struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
struct gen_pool *gpool;
int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
- gpool = sram_get_gpool("asram");
+ gpool = tdmac->pool;
if (!gpool)
return NULL;
@@ -505,7 +507,8 @@ static int mmp_tdma_remove(struct platform_device *pdev)
}
static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
- int idx, int irq, int type)
+ int idx, int irq,
+ int type, struct gen_pool *pool)
{
struct mmp_tdma_chan *tdmac;
@@ -526,7 +529,8 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
tdmac->chan.device = &tdev->device;
tdmac->idx = idx;
tdmac->type = type;
- tdmac->reg_base = (unsigned long)tdev->base + idx * 4;
+ tdmac->reg_base = tdev->base + idx * 4;
+ tdmac->pool = pool;
tdmac->status = DMA_COMPLETE;
tdev->tdmac[tdmac->idx] = tdmac;
tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
@@ -553,6 +557,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
int i, ret;
int irq = 0, irq_num = 0;
int chan_num = TDMA_CHANNEL_NUM;
+ struct gen_pool *pool;
of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
if (of_id)
@@ -579,6 +584,15 @@ static int mmp_tdma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&tdev->device.channels);
+ if (pdev->dev.of_node)
+ pool = of_get_named_gen_pool(pdev->dev.of_node, "asram", 0);
+ else
+ pool = sram_get_gpool("asram");
+ if (!pool) {
+ dev_err(&pdev->dev, "asram pool not available\n");
+ return -ENOMEM;
+ }
+
if (irq_num != chan_num) {
irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, irq,
@@ -590,7 +604,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
/* initialize channel parameters */
for (i = 0; i < chan_num; i++) {
irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i);
- ret = mmp_tdma_chan_init(tdev, i, irq, type);
+ ret = mmp_tdma_chan_init(tdev, i, irq, type, pool);
if (ret)
return ret;
}
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 000000000000..3258e484e4f6
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,699 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define APB_DMA_MAX_CHANNEL 4
+
+#define REG_OFF_ADDRESS_SOURCE 0
+#define REG_OFF_ADDRESS_DEST 4
+#define REG_OFF_CYCLES 8
+#define REG_OFF_CTRL 12
+#define REG_OFF_CHAN_SIZE 16
+
+#define APB_DMA_ENABLE BIT(0)
+#define APB_DMA_FIN_INT_STS BIT(1)
+#define APB_DMA_FIN_INT_EN BIT(2)
+#define APB_DMA_BURST_MODE BIT(3)
+#define APB_DMA_ERR_INT_STS BIT(4)
+#define APB_DMA_ERR_INT_EN BIT(5)
+
+/*
+ * Unset: APB
+ * Set: AHB
+ */
+#define APB_DMA_SOURCE_SELECT 0x40
+#define APB_DMA_DEST_SELECT 0x80
+
+#define APB_DMA_SOURCE 0x100
+#define APB_DMA_DEST 0x1000
+
+#define APB_DMA_SOURCE_MASK 0x700
+#define APB_DMA_DEST_MASK 0x7000
+
+/*
+ * 000: No increment
+ * 001: +1 (Burst=0), +4 (Burst=1)
+ * 010: +2 (Burst=0), +8 (Burst=1)
+ * 011: +4 (Burst=0), +16 (Burst=1)
+ * 101: -1 (Burst=0), -4 (Burst=1)
+ * 110: -2 (Burst=0), -8 (Burst=1)
+ * 111: -4 (Burst=0), -16 (Burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0 0
+#define APB_DMA_SOURCE_INC_1_4 0x100
+#define APB_DMA_SOURCE_INC_2_8 0x200
+#define APB_DMA_SOURCE_INC_4_16 0x300
+#define APB_DMA_SOURCE_DEC_1_4 0x500
+#define APB_DMA_SOURCE_DEC_2_8 0x600
+#define APB_DMA_SOURCE_DEC_4_16 0x700
+#define APB_DMA_DEST_INC_0 0
+#define APB_DMA_DEST_INC_1_4 0x1000
+#define APB_DMA_DEST_INC_2_8 0x2000
+#define APB_DMA_DEST_INC_4_16 0x3000
+#define APB_DMA_DEST_DEC_1_4 0x5000
+#define APB_DMA_DEST_DEC_2_8 0x6000
+#define APB_DMA_DEST_DEC_4_16 0x7000
+
+/*
+ * Request signal select source/destination address for DMA hardware handshake.
+ *
+ * The request line number is a property of the DMA controller itself,
+ * e.g. MMC must always request channels where dma_slave_config->slave_id is 5.
+ *
+ * 0: No request / Grant signal
+ * 1-15: Request / Grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO 0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK 0xf000000
+#define APB_DMA_DEST_REQ_NO 0x10000
+#define APB_DMA_DEST_REQ_NO_MASK 0xf0000
+
+#define APB_DMA_DATA_WIDTH 0x100000
+#define APB_DMA_DATA_WIDTH_MASK 0x300000
+/*
+ * Data width of transfer:
+ *
+ * 00: Word
+ * 01: Half
+ * 10: Byte
+ */
+#define APB_DMA_DATA_WIDTH_4 0
+#define APB_DMA_DATA_WIDTH_2 0x100000
+#define APB_DMA_DATA_WIDTH_1 0x200000
+
+#define APB_DMA_CYCLES_MASK 0x00ffffff
+
+#define MOXART_DMA_DATA_TYPE_S8 0x00
+#define MOXART_DMA_DATA_TYPE_S16 0x01
+#define MOXART_DMA_DATA_TYPE_S32 0x02
+
+struct moxart_sg {
+ dma_addr_t addr;
+ uint32_t len;
+};
+
+struct moxart_desc {
+ enum dma_transfer_direction dma_dir;
+ dma_addr_t dev_addr;
+ unsigned int sglen;
+ unsigned int dma_cycles;
+ struct virt_dma_desc vd;
+ uint8_t es;
+ struct moxart_sg sg[0];
+};
+
+struct moxart_chan {
+ struct virt_dma_chan vc;
+
+ void __iomem *base;
+ struct moxart_desc *desc;
+
+ struct dma_slave_config cfg;
+
+ bool allocated;
+ bool error;
+ int ch_num;
+ unsigned int line_reqno;
+ unsigned int sgidx;
+};
+
+struct moxart_dmadev {
+ struct dma_device dma_slave;
+ struct moxart_chan slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_filter_data {
+ struct moxart_dmadev *mdc;
+ struct of_phandle_args *dma_spec;
+};
+
+static const unsigned int es_bytes[] = {
+ [MOXART_DMA_DATA_TYPE_S8] = 1,
+ [MOXART_DMA_DATA_TYPE_S16] = 2,
+ [MOXART_DMA_DATA_TYPE_S32] = 4,
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct moxart_chan, vc.chan);
+}
+
+static inline struct moxart_desc *to_moxart_dma_desc(
+ struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct moxart_desc, vd.tx);
+}
+
+static void moxart_dma_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(container_of(vd, struct moxart_desc, vd));
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+ struct moxart_chan *ch = to_moxart_dma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+ u32 ctrl;
+
+ dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+ spin_lock_irqsave(&ch->vc.lock, flags);
+
+ if (ch->desc)
+ ch->desc = NULL;
+
+ ctrl = readl(ch->base + REG_OFF_CTRL);
+ ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(ctrl, ch->base + REG_OFF_CTRL);
+
+ vchan_get_all_descriptors(&ch->vc, &head);
+ spin_unlock_irqrestore(&ch->vc.lock, flags);
+ vchan_dma_desc_free_list(&ch->vc, &head);
+
+ return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct moxart_chan *ch = to_moxart_dma_chan(chan);
+ u32 ctrl;
+
+ ch->cfg = *cfg;
+
+ ctrl = readl(ch->base + REG_OFF_CTRL);
+ ctrl |= APB_DMA_BURST_MODE;
+ ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+ ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+ switch (ch->cfg.src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ ctrl |= APB_DMA_DATA_WIDTH_1;
+ if (ch->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_1_4;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_1_4;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ ctrl |= APB_DMA_DATA_WIDTH_2;
+ if (ch->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_2_8;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_2_8;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ ctrl &= ~APB_DMA_DATA_WIDTH;
+ if (ch->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_4_16;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_4_16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ch->cfg.direction == DMA_MEM_TO_DEV) {
+ ctrl &= ~APB_DMA_DEST_SELECT;
+ ctrl |= APB_DMA_SOURCE_SELECT;
+ ctrl |= (ch->line_reqno << 16 &
+ APB_DMA_DEST_REQ_NO_MASK);
+ } else {
+ ctrl |= APB_DMA_DEST_SELECT;
+ ctrl &= ~APB_DMA_SOURCE_SELECT;
+ ctrl |= (ch->line_reqno << 24 &
+ APB_DMA_SOURCE_REQ_NO_MASK);
+ }
+
+ writel(ctrl, ch->base + REG_OFF_CTRL);
+
+ return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case DMA_PAUSE:
+ case DMA_RESUME:
+ return -EINVAL;
+ case DMA_TERMINATE_ALL:
+ moxart_terminate_all(chan);
+ break;
+ case DMA_SLAVE_CONFIG:
+ ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long tx_flags, void *context)
+{
+ struct moxart_chan *ch = to_moxart_dma_chan(chan);
+ struct moxart_desc *d;
+ enum dma_slave_buswidth dev_width;
+ dma_addr_t dev_addr;
+ struct scatterlist *sgent;
+ unsigned int es;
+ unsigned int i;
+
+ if (!is_slave_direction(dir)) {
+ dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
+ __func__);
+ return NULL;
+ }
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_addr = ch->cfg.src_addr;
+ dev_width = ch->cfg.src_addr_width;
+ } else {
+ dev_addr = ch->cfg.dst_addr;
+ dev_width = ch->cfg.dst_addr_width;
+ }
+
+ switch (dev_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ es = MOXART_DMA_DATA_TYPE_S8;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ es = MOXART_DMA_DATA_TYPE_S16;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ es = MOXART_DMA_DATA_TYPE_S32;
+ break;
+ default:
+ dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
+ __func__, dev_width);
+ return NULL;
+ }
+
+ d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
+ if (!d)
+ return NULL;
+
+ d->dma_dir = dir;
+ d->dev_addr = dev_addr;
+ d->es = es;
+
+ for_each_sg(sgl, sgent, sg_len, i) {
+ d->sg[i].addr = sg_dma_address(sgent);
+ d->sg[i].len = sg_dma_len(sgent);
+ }
+
+ d->sglen = sg_len;
+
+ ch->error = 0;
+
+ return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct moxart_dmadev *mdc = ofdma->of_dma_data;
+ struct dma_chan *chan;
+ struct moxart_chan *ch;
+
+ chan = dma_get_any_slave_channel(&mdc->dma_slave);
+ if (!chan)
+ return NULL;
+
+ ch = to_moxart_dma_chan(chan);
+ ch->line_reqno = dma_spec->args[0];
+
+ return chan;
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+ dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+ __func__, ch->ch_num);
+ ch->allocated = 1;
+
+ return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+ vchan_free_chan_resources(&ch->vc);
+
+ dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+ __func__, ch->ch_num);
+ ch->allocated = 0;
+}
+
+static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
+ dma_addr_t dst_addr)
+{
+ writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
+ writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
+}
+
+static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
+{
+ struct moxart_desc *d = ch->desc;
+ unsigned int sglen_div = es_bytes[d->es];
+
+ d->dma_cycles = len >> sglen_div;
+
+ /*
+ * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16
+ * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ).
+ */
+ writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
+
+ dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
+ __func__, d->dma_cycles, len);
+}
+
+static void moxart_start_dma(struct moxart_chan *ch)
+{
+ u32 ctrl;
+
+ ctrl = readl(ch->base + REG_OFF_CTRL);
+ ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(ctrl, ch->base + REG_OFF_CTRL);
+}
+
+static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
+{
+ struct moxart_desc *d = ch->desc;
+ struct moxart_sg *sg = ch->desc->sg + idx;
+
+ if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
+ moxart_dma_set_params(ch, sg->addr, d->dev_addr);
+ else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
+ moxart_dma_set_params(ch, d->dev_addr, sg->addr);
+
+ moxart_set_transfer_params(ch, sg->len);
+
+ moxart_start_dma(ch);
+}
+
+static void moxart_dma_start_desc(struct dma_chan *chan)
+{
+ struct moxart_chan *ch = to_moxart_dma_chan(chan);
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&ch->vc);
+
+ if (!vd) {
+ ch->desc = NULL;
+ return;
+ }
+
+ list_del(&vd->node);
+
+ ch->desc = to_moxart_dma_desc(&vd->tx);
+ ch->sgidx = 0;
+
+ moxart_dma_start_sg(ch, 0);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+ struct moxart_chan *ch = to_moxart_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->vc.lock, flags);
+ if (vchan_issue_pending(&ch->vc) && !ch->desc)
+ moxart_dma_start_desc(chan);
+ spin_unlock_irqrestore(&ch->vc.lock, flags);
+}
+
+static size_t moxart_dma_desc_size(struct moxart_desc *d,
+ unsigned int completed_sgs)
+{
+ unsigned int i;
+ size_t size;
+
+ for (size = i = completed_sgs; i < d->sglen; i++)
+ size += d->sg[i].len;
+
+ return size;
+}
+
+static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
+{
+ size_t size;
+ unsigned int completed_cycles, cycles;
+
+ size = moxart_dma_desc_size(ch->desc, ch->sgidx);
+ cycles = readl(ch->base + REG_OFF_CYCLES);
+ completed_cycles = (ch->desc->dma_cycles - cycles);
+ size -= completed_cycles << es_bytes[ch->desc->es];
+
+ dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
+
+ return size;
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct moxart_chan *ch = to_moxart_dma_chan(chan);
+ struct virt_dma_desc *vd;
+ struct moxart_desc *d;
+ enum dma_status ret;
+ unsigned long flags;
+
+ /*
+ * dma_cookie_status() assigns initial residue value.
+ */
+ ret = dma_cookie_status(chan, cookie, txstate);
+
+ spin_lock_irqsave(&ch->vc.lock, flags);
+ vd = vchan_find_desc(&ch->vc, cookie);
+ if (vd) {
+ d = to_moxart_dma_desc(&vd->tx);
+ txstate->residue = moxart_dma_desc_size(d, 0);
+ } else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
+ txstate->residue = moxart_dma_desc_size_in_flight(ch);
+ }
+ spin_unlock_irqrestore(&ch->vc.lock, flags);
+
+ if (ch->error)
+ return DMA_ERROR;
+
+ return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+ dma->device_prep_slave_sg = moxart_prep_slave_sg;
+ dma->device_alloc_chan_resources = moxart_alloc_chan_resources;
+ dma->device_free_chan_resources = moxart_free_chan_resources;
+ dma->device_issue_pending = moxart_issue_pending;
+ dma->device_tx_status = moxart_tx_status;
+ dma->device_control = moxart_control;
+ dma->dev = dev;
+
+ INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+ struct moxart_dmadev *mc = devid;
+ struct moxart_chan *ch = &mc->slave_chans[0];
+ unsigned int i;
+ unsigned long flags;
+ u32 ctrl;
+
+ dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+ if (!ch->allocated)
+ continue;
+
+ ctrl = readl(ch->base + REG_OFF_CTRL);
+
+ dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
+ __func__, ch, ch->base, ctrl);
+
+ if (ctrl & APB_DMA_FIN_INT_STS) {
+ ctrl &= ~APB_DMA_FIN_INT_STS;
+ if (ch->desc) {
+ spin_lock_irqsave(&ch->vc.lock, flags);
+ if (++ch->sgidx < ch->desc->sglen) {
+ moxart_dma_start_sg(ch, ch->sgidx);
+ } else {
+ vchan_cookie_complete(&ch->desc->vd);
+ moxart_dma_start_desc(&ch->vc.chan);
+ }
+ spin_unlock_irqrestore(&ch->vc.lock, flags);
+ }
+ }
+
+ if (ctrl & APB_DMA_ERR_INT_STS) {
+ ctrl &= ~APB_DMA_ERR_INT_STS;
+ ch->error = 1;
+ }
+
+ writel(ctrl, ch->base + REG_OFF_CTRL);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ static void __iomem *dma_base_addr;
+ int ret, i;
+ unsigned int irq;
+ struct moxart_chan *ch;
+ struct moxart_dmadev *mdc;
+
+ mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+ if (!mdc) {
+ dev_err(dev, "can't allocate DMA container\n");
+ return -ENOMEM;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq == NO_IRQ) {
+ dev_err(dev, "no IRQ resource\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dma_base_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dma_base_addr))
+ return PTR_ERR(dma_base_addr);
+
+ dma_cap_zero(mdc->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+ dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask);
+
+ moxart_dma_init(&mdc->dma_slave, dev);
+
+ ch = &mdc->slave_chans[0];
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+ ch->ch_num = i;
+ ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
+ ch->allocated = 0;
+
+ ch->vc.desc_free = moxart_dma_desc_free;
+ vchan_init(&ch->vc, &mdc->dma_slave);
+
+ dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
+ __func__, i, ch->ch_num, ch->base);
+ }
+
+ platform_set_drvdata(pdev, mdc);
+
+ ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+ "moxart-dma-engine", mdc);
+ if (ret) {
+ dev_err(dev, "devm_request_irq failed\n");
+ return ret;
+ }
+
+ ret = dma_async_device_register(&mdc->dma_slave);
+ if (ret) {
+ dev_err(dev, "dma_async_device_register failed\n");
+ return ret;
+ }
+
+ ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+ if (ret) {
+ dev_err(dev, "of_dma_controller_register failed\n");
+ dma_async_device_unregister(&mdc->dma_slave);
+ return ret;
+ }
+
+ dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+ return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+ struct moxart_dmadev *m = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&m->dma_slave);
+
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+ { .compatible = "moxa,moxart-dma" },
+ { }
+};
+
+static struct platform_driver moxart_driver = {
+ .probe = moxart_probe,
+ .remove = moxart_remove,
+ .driver = {
+ .name = "moxart-dma-engine",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_dma_match,
+ },
+};
+
+static int moxart_init(void)
+{
+ return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+ platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 7807f0ef4e20..766b68ed505c 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
hw_desc->desc_command = (1 << 31);
}
-static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
-{
- struct mv_xor_desc *hw_desc = desc->hw_desc;
- return hw_desc->phy_dest_addr;
-}
-
static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
u32 byte_count)
{
@@ -503,8 +497,8 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
if (!mv_can_chain(grp_start))
goto submit_done;
- dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
- old_chain_tail->async_tx.phys);
+ dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
+ &old_chain_tail->async_tx.phys);
/* fix up the hardware chain */
mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
@@ -533,7 +527,8 @@ submit_done:
/* returns the number of allocated descriptors */
static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
{
- char *hw_desc;
+ void *virt_desc;
+ dma_addr_t dma_desc;
int idx;
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
struct mv_xor_desc_slot *slot = NULL;
@@ -548,17 +543,16 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
" %d descriptor slots", idx);
break;
}
- hw_desc = (char *) mv_chan->dma_desc_pool_virt;
- slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+ virt_desc = mv_chan->dma_desc_pool_virt;
+ slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
dma_async_tx_descriptor_init(&slot->async_tx, chan);
slot->async_tx.tx_submit = mv_xor_tx_submit;
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
INIT_LIST_HEAD(&slot->tx_list);
- hw_desc = (char *) mv_chan->dma_desc_pool;
- slot->async_tx.phys =
- (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+ dma_desc = mv_chan->dma_desc_pool;
+ slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
slot->idx = idx++;
spin_lock_bh(&mv_chan->lock);
@@ -588,8 +582,8 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
int slot_cnt;
dev_dbg(mv_chan_to_devp(mv_chan),
- "%s dest: %x src %x len: %u flags: %ld\n",
- __func__, dest, src, len, flags);
+ "%s dest: %pad src %pad len: %u flags: %ld\n",
+ __func__, &dest, &src, len, flags);
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
@@ -632,8 +626,8 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
dev_dbg(mv_chan_to_devp(mv_chan),
- "%s src_cnt: %d len: dest %x %u flags: %ld\n",
- __func__, src_cnt, len, dest, flags);
+ "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
+ __func__, src_cnt, len, &dest, flags);
spin_lock_bh(&mv_chan->lock);
slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
@@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
/*
* Perform a transaction to verify the HW works.
*/
-#define MV_XOR_TEST_SIZE 2000
static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
{
@@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
struct dma_chan *dma_chan;
dma_cookie_t cookie;
struct dma_async_tx_descriptor *tx;
+ struct dmaengine_unmap_data *unmap;
int err = 0;
- src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
+ src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
if (!src)
return -ENOMEM;
- dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
+ dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
if (!dest) {
kfree(src);
return -ENOMEM;
}
/* Fill in src buffer */
- for (i = 0; i < MV_XOR_TEST_SIZE; i++)
+ for (i = 0; i < PAGE_SIZE; i++)
((u8 *) src)[i] = (u8)i;
dma_chan = &mv_chan->dmachan;
@@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
goto out;
}
- dest_dma = dma_map_single(dma_chan->device->dev, dest,
- MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
+ unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
+ if (!unmap) {
+ err = -ENOMEM;
+ goto free_resources;
+ }
+
+ src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
+ PAGE_SIZE, DMA_TO_DEVICE);
+ unmap->to_cnt = 1;
+ unmap->addr[0] = src_dma;
- src_dma = dma_map_single(dma_chan->device->dev, src,
- MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
+ dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ unmap->from_cnt = 1;
+ unmap->addr[1] = dest_dma;
+
+ unmap->len = PAGE_SIZE;
tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
- MV_XOR_TEST_SIZE, 0);
+ PAGE_SIZE, 0);
cookie = mv_xor_tx_submit(tx);
mv_xor_issue_pending(dma_chan);
async_tx_ack(tx);
@@ -841,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
}
dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
- MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
- if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (memcmp(src, dest, PAGE_SIZE)) {
dev_err(dma_chan->device->dev,
"Self-test copy failed compare, disabling\n");
err = -ENODEV;
@@ -850,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
}
free_resources:
+ dmaengine_unmap_put(unmap);
mv_xor_free_chan_resources(dma_chan);
out:
kfree(src);
@@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
dma_addr_t dest_dma;
struct dma_async_tx_descriptor *tx;
+ struct dmaengine_unmap_data *unmap;
struct dma_chan *dma_chan;
dma_cookie_t cookie;
u8 cmp_byte = 0;
u32 cmp_word;
int err = 0;
+ int src_count = MV_XOR_NUM_SRC_TEST;
- for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
+ for (src_idx = 0; src_idx < src_count; src_idx++) {
xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
if (!xor_srcs[src_idx]) {
while (src_idx--)
@@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
}
/* Fill in src buffers */
- for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
+ for (src_idx = 0; src_idx < src_count; src_idx++) {
u8 *ptr = page_address(xor_srcs[src_idx]);
for (i = 0; i < PAGE_SIZE; i++)
ptr[i] = (1 << src_idx);
}
- for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
+ for (src_idx = 0; src_idx < src_count; src_idx++)
cmp_byte ^= (u8) (1 << src_idx);
cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
@@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
goto out;
}
+ unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
+ GFP_KERNEL);
+ if (!unmap) {
+ err = -ENOMEM;
+ goto free_resources;
+ }
+
/* test xor */
- dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ for (i = 0; i < src_count; i++) {
+ unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
+ 0, PAGE_SIZE, DMA_TO_DEVICE);
+ dma_srcs[i] = unmap->addr[i];
+ unmap->to_cnt++;
+ }
- for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
- dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
- 0, PAGE_SIZE, DMA_TO_DEVICE);
+ unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ dest_dma = unmap->addr[src_count];
+ unmap->from_cnt = 1;
+ unmap->len = PAGE_SIZE;
tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
- MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
+ src_count, PAGE_SIZE, 0);
cookie = mv_xor_tx_submit(tx);
mv_xor_issue_pending(dma_chan);
@@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
}
free_resources:
+ dmaengine_unmap_put(unmap);
mv_xor_free_chan_resources(dma_chan);
out:
- src_idx = MV_XOR_NUM_SRC_TEST;
+ src_idx = src_count;
while (src_idx--)
__free_page(xor_srcs[src_idx]);
__free_page(dest);
@@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev)
int i = 0;
for_each_child_of_node(pdev->dev.of_node, np) {
+ struct mv_xor_chan *chan;
dma_cap_mask_t cap_mask;
int irq;
@@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev)
goto err_channel_add;
}
- xordev->channels[i] =
- mv_xor_channel_add(xordev, pdev, i,
- cap_mask, irq);
- if (IS_ERR(xordev->channels[i])) {
- ret = PTR_ERR(xordev->channels[i]);
- xordev->channels[i] = NULL;
+ chan = mv_xor_channel_add(xordev, pdev, i,
+ cap_mask, irq);
+ if (IS_ERR(chan)) {
+ ret = PTR_ERR(chan);
irq_dispose_mapping(irq);
goto err_channel_add;
}
+ xordev->channels[i] = chan;
i++;
}
} else if (pdata && pdata->channels) {
for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
struct mv_xor_channel_data *cd;
+ struct mv_xor_chan *chan;
int irq;
cd = &pdata->channels[i];
@@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev)
goto err_channel_add;
}
- xordev->channels[i] =
- mv_xor_channel_add(xordev, pdev, i,
- cd->cap_mask, irq);
- if (IS_ERR(xordev->channels[i])) {
- ret = PTR_ERR(xordev->channels[i]);
+ chan = mv_xor_channel_add(xordev, pdev, i,
+ cd->cap_mask, irq);
+ if (IS_ERR(chan)) {
+ ret = PTR_ERR(chan);
goto err_channel_add;
}
+
+ xordev->channels[i] = chan;
}
}
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 0b88dd3d05f4..e8fe9dc455f4 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -143,7 +143,7 @@ static int of_dma_match_channel(struct device_node *np, const char *name,
* @np: device node to get DMA request from
* @name: name of desired channel
*
- * Returns pointer to appropriate dma channel on success or NULL on error.
+ * Returns pointer to appropriate DMA channel on success or an error pointer.
*/
struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
const char *name)
@@ -152,17 +152,18 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
struct of_dma *ofdma;
struct dma_chan *chan;
int count, i;
+ int ret_no_channel = -ENODEV;
if (!np || !name) {
pr_err("%s: not enough information provided\n", __func__);
- return NULL;
+ return ERR_PTR(-ENODEV);
}
count = of_property_count_strings(np, "dma-names");
if (count < 0) {
pr_err("%s: dma-names property of node '%s' missing or empty\n",
__func__, np->full_name);
- return NULL;
+ return ERR_PTR(-ENODEV);
}
for (i = 0; i < count; i++) {
@@ -172,10 +173,12 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
mutex_lock(&of_dma_lock);
ofdma = of_dma_find_controller(&dma_spec);
- if (ofdma)
+ if (ofdma) {
chan = ofdma->of_dma_xlate(&dma_spec, ofdma);
- else
+ } else {
+ ret_no_channel = -EPROBE_DEFER;
chan = NULL;
+ }
mutex_unlock(&of_dma_lock);
@@ -185,7 +188,7 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
return chan;
}
- return NULL;
+ return ERR_PTR(ret_no_channel);
}
/**
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 2f66cf4e54fe..362e7c49f2e1 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -190,7 +190,7 @@ static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct omap_chan *c = to_omap_dma_chan(chan);
- dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
+ dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
return omap_request_dma(c->dma_sig, "DMA engine",
omap_dma_callback, c, &c->dma_ch);
@@ -203,7 +203,7 @@ static void omap_dma_free_chan_resources(struct dma_chan *chan)
vchan_free_chan_resources(&c->vc);
omap_free_dma(c->dma_ch);
- dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
+ dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
}
static size_t omap_dma_sg_size(struct omap_sg *sg)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index cdf0483b8f2d..73fa9b7a10ab 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -543,7 +543,9 @@ struct dma_pl330_chan {
/* DMA-Engine Channel */
struct dma_chan chan;
- /* List of to be xfered descriptors */
+ /* List of submitted descriptors */
+ struct list_head submitted_list;
+ /* List of issued descriptors */
struct list_head work_list;
/* List of completed descriptors */
struct list_head completed_list;
@@ -578,12 +580,16 @@ struct dma_pl330_dmac {
/* DMA-Engine Device */
struct dma_device ddma;
+ /* Holds info about sg limitations */
+ struct device_dma_parameters dma_parms;
+
/* Pool of descriptors available for the DMAC's channels */
struct list_head desc_pool;
/* To protect desc_pool manipulation */
spinlock_t pool_lock;
/* Peripheral channels connected to this DMAC */
+ unsigned int num_peripherals;
struct dma_pl330_chan *peripherals; /* keep at end */
};
@@ -606,11 +612,6 @@ struct dma_pl330_desc {
struct dma_pl330_chan *pchan;
};
-struct dma_pl330_filter_args {
- struct dma_pl330_dmac *pdmac;
- unsigned int chan_id;
-};
-
static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
{
if (r && r->xfer_cb)
@@ -2298,16 +2299,6 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
tasklet_schedule(&pch->task);
}
-static bool pl330_dt_filter(struct dma_chan *chan, void *param)
-{
- struct dma_pl330_filter_args *fargs = param;
-
- if (chan->device != &fargs->pdmac->ddma)
- return false;
-
- return (chan->chan_id == fargs->chan_id);
-}
-
bool pl330_filter(struct dma_chan *chan, void *param)
{
u8 *peri_id;
@@ -2325,23 +2316,16 @@ static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
{
int count = dma_spec->args_count;
struct dma_pl330_dmac *pdmac = ofdma->of_dma_data;
- struct dma_pl330_filter_args fargs;
- dma_cap_mask_t cap;
-
- if (!pdmac)
- return NULL;
+ unsigned int chan_id;
if (count != 1)
return NULL;
- fargs.pdmac = pdmac;
- fargs.chan_id = dma_spec->args[0];
-
- dma_cap_zero(cap);
- dma_cap_set(DMA_SLAVE, cap);
- dma_cap_set(DMA_CYCLIC, cap);
+ chan_id = dma_spec->args[0];
+ if (chan_id >= pdmac->num_peripherals)
+ return NULL;
- return dma_request_channel(cap, pl330_dt_filter, &fargs);
+ return dma_get_slave_channel(&pdmac->peripherals[chan_id].chan);
}
static int pl330_alloc_chan_resources(struct dma_chan *chan)
@@ -2385,6 +2369,11 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
/* Mark all desc done */
+ list_for_each_entry(desc, &pch->submitted_list, node) {
+ desc->status = FREE;
+ dma_cookie_complete(&desc->txd);
+ }
+
list_for_each_entry(desc, &pch->work_list , node) {
desc->status = FREE;
dma_cookie_complete(&desc->txd);
@@ -2395,6 +2384,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
dma_cookie_complete(&desc->txd);
}
+ list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool);
list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
@@ -2453,7 +2443,14 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
static void pl330_issue_pending(struct dma_chan *chan)
{
- pl330_tasklet((unsigned long) to_pchan(chan));
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pch->lock, flags);
+ list_splice_tail_init(&pch->submitted_list, &pch->work_list);
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ pl330_tasklet((unsigned long)pch);
}
/*
@@ -2480,11 +2477,11 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
dma_cookie_assign(&desc->txd);
- list_move_tail(&desc->node, &pch->work_list);
+ list_move_tail(&desc->node, &pch->submitted_list);
}
cookie = dma_cookie_assign(&last->txd);
- list_add_tail(&last->node, &pch->work_list);
+ list_add_tail(&last->node, &pch->submitted_list);
spin_unlock_irqrestore(&pch->lock, flags);
return cookie;
@@ -2492,12 +2489,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
static inline void _init_desc(struct dma_pl330_desc *desc)
{
- desc->pchan = NULL;
desc->req.x = &desc->px;
desc->req.token = desc;
desc->rqcfg.swap = SWAP_NO;
- desc->rqcfg.privileged = 0;
- desc->rqcfg.insnaccess = 0;
desc->rqcfg.scctl = SCCTRL0;
desc->rqcfg.dcctl = DCCTRL0;
desc->req.cfg = &desc->rqcfg;
@@ -2517,7 +2511,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
if (!pdmac)
return 0;
- desc = kmalloc(count * sizeof(*desc), flg);
+ desc = kcalloc(count, sizeof(*desc), flg);
if (!desc)
return 0;
@@ -2887,6 +2881,7 @@ static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
caps->cmd_pause = false;
caps->cmd_terminate = true;
+ caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
return 0;
}
@@ -2962,6 +2957,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
else
num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
+ pdmac->num_peripherals = num_chan;
+
pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
if (!pdmac->peripherals) {
ret = -ENOMEM;
@@ -2976,6 +2973,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
else
pch->chan.private = adev->dev.of_node;
+ INIT_LIST_HEAD(&pch->submitted_list);
INIT_LIST_HEAD(&pch->work_list);
INIT_LIST_HEAD(&pch->completed_list);
spin_lock_init(&pch->lock);
@@ -3023,6 +3021,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
"unable to register DMA to the generic DT DMA helpers\n");
}
}
+
+ adev->dev.dma_parms = &pdmac->dma_parms;
+
/*
* This is the limit for transfers with a buswidth of 1, larger
* buswidths will have larger limits.
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 8da48c6b2a38..ce7a8d7564ba 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -533,29 +533,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
}
/**
- * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
- */
-static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
- int value, unsigned long flags)
-{
- struct dma_cdb *hw_desc = desc->hw_desc;
-
- memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
- desc->hw_next = NULL;
- desc->src_cnt = 1;
- desc->dst_cnt = 1;
-
- if (flags & DMA_PREP_INTERRUPT)
- set_bit(PPC440SPE_DESC_INT, &desc->flags);
- else
- clear_bit(PPC440SPE_DESC_INT, &desc->flags);
-
- hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
- hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
- hw_desc->opc = DMA_CDB_OPC_DFILL128;
-}
-
-/**
* ppc440spe_desc_set_src_addr - set source address into the descriptor
*/
static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc,
@@ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
struct ppc440spe_adma_chan *chan,
dma_cookie_t cookie)
{
- int i;
-
BUG_ON(desc->async_tx.cookie < 0);
if (desc->async_tx.cookie > 0) {
cookie = desc->async_tx.cookie;
@@ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
ppc440spe_adma_prep_dma_interrupt;
}
pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
- "( %s%s%s%s%s%s%s)\n",
+ "( %s%s%s%s%s%s)\n",
dev_name(adev->dev),
dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
@@ -4139,6 +4114,7 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
regs = ioremap(res.start, resource_size(&res));
if (!regs) {
dev_err(&ofdev->dev, "failed to ioremap regs!\n");
+ ret = -ENOMEM;
goto err_regs_alloc;
}
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 6aec3ad814d3..d4d3a3109b16 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -640,6 +640,25 @@ bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
}
EXPORT_SYMBOL(sirfsoc_dma_filter_id);
+#define SIRFSOC_DMA_BUSWIDTHS \
+ (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
+ caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = true;
+ caps->cmd_terminate = true;
+
+ return 0;
+}
+
static int sirfsoc_dma_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
@@ -712,6 +731,7 @@ static int sirfsoc_dma_probe(struct platform_device *op)
dma->device_tx_status = sirfsoc_dma_tx_status;
dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
+ dma->device_slave_caps = sirfsoc_dma_device_slave_caps;
INIT_LIST_HEAD(&dma->channels);
dma_cap_set(DMA_SLAVE, dma->cap_mask);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index b8c031b7de4e..00a2de957b23 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2409,6 +2409,7 @@ static void d40_set_prio_realtime(struct d40_chan *d40c)
#define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1)
#define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
#define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
+#define D40_DT_FLAGS_HIGH_PRIO(flags) ((flags >> 4) & 0x1)
static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
@@ -2446,6 +2447,9 @@ static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
cfg.use_fixed_channel = true;
}
+ if (D40_DT_FLAGS_HIGH_PRIO(flags))
+ cfg.high_priority = true;
+
return dma_request_channel(cap, stedma40_filter, &cfg);
}
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 73654e33f13b..03ad64ecaaf0 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1,7 +1,7 @@
/*
* DMA driver for Nvidia's Tegra20 APB DMA controller.
*
- * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -29,11 +29,12 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
-#include <linux/clk/tegra.h>
#include "dmaengine.h"
@@ -99,6 +100,11 @@
#define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
#define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
+/* Tegra148 specific registers */
+#define TEGRA_APBDMA_CHAN_WCOUNT 0x20
+
+#define TEGRA_APBDMA_CHAN_WORD_TRANSFER 0x24
+
/*
* If any burst is in flight and DMA paused then this is the time to complete
* on-flight burst and update DMA status register.
@@ -108,21 +114,22 @@
/* Channel base address offset from APBDMA base address */
#define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
-/* DMA channel register space size */
-#define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20
-
struct tegra_dma;
/*
* tegra_dma_chip_data Tegra chip specific DMA data
* @nr_channels: Number of channels available in the controller.
+ * @channel_reg_size: Channel register size/stride.
* @max_dma_count: Maximum DMA transfer count supported by DMA controller.
* @support_channel_pause: Support channel wise pause of dma.
+ * @support_separate_wcount_reg: Support separate word count register.
*/
struct tegra_dma_chip_data {
int nr_channels;
+ int channel_reg_size;
int max_dma_count;
bool support_channel_pause;
+ bool support_separate_wcount_reg;
};
/* DMA channel registers */
@@ -132,6 +139,7 @@ struct tegra_dma_channel_regs {
unsigned long apb_ptr;
unsigned long ahb_seq;
unsigned long apb_seq;
+ unsigned long wcount;
};
/*
@@ -199,6 +207,7 @@ struct tegra_dma_channel {
void *callback_param;
/* Channel-slave specific configuration */
+ unsigned int slave_id;
struct dma_slave_config dma_sconfig;
struct tegra_dma_channel_regs channel_reg;
};
@@ -208,6 +217,7 @@ struct tegra_dma {
struct dma_device dma_dev;
struct device *dev;
struct clk *dma_clk;
+ struct reset_control *rst;
spinlock_t global_lock;
void __iomem *base_addr;
const struct tegra_dma_chip_data *chip_data;
@@ -339,6 +349,8 @@ static int tegra_dma_slave_config(struct dma_chan *dc,
}
memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
+ if (!tdc->slave_id)
+ tdc->slave_id = sconfig->slave_id;
tdc->config_init = true;
return 0;
}
@@ -421,6 +433,8 @@ static void tegra_dma_start(struct tegra_dma_channel *tdc,
tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
+ if (tdc->tdma->chip_data->support_separate_wcount_reg)
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
/* Start DMA */
tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
@@ -460,6 +474,9 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
/* Safe to program new configuration */
tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
+ if (tdc->tdma->chip_data->support_separate_wcount_reg)
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
+ nsg_req->ch_regs.wcount);
tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
nsg_req->configured = true;
@@ -713,6 +730,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
struct tegra_dma_desc *dma_desc;
unsigned long flags;
unsigned long status;
+ unsigned long wcount;
bool was_busy;
spin_lock_irqsave(&tdc->lock, flags);
@@ -733,6 +751,10 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
tdc->isr_handler(tdc, true);
status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
}
+ if (tdc->tdma->chip_data->support_separate_wcount_reg)
+ wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
+ else
+ wcount = status;
was_busy = tdc->busy;
tegra_dma_stop(tdc);
@@ -741,7 +763,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
sgreq = list_first_entry(&tdc->pending_sg_req,
typeof(*sgreq), node);
sgreq->dma_desc->bytes_transferred +=
- get_current_xferred_count(tdc, sgreq, status);
+ get_current_xferred_count(tdc, sgreq, wcount);
}
tegra_dma_resume(tdc);
@@ -903,6 +925,17 @@ static int get_transfer_param(struct tegra_dma_channel *tdc,
return -EINVAL;
}
+static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
+ struct tegra_dma_channel_regs *ch_regs, u32 len)
+{
+ u32 len_field = (len - 4) & 0xFFFC;
+
+ if (tdc->tdma->chip_data->support_separate_wcount_reg)
+ ch_regs->wcount = len_field;
+ else
+ ch_regs->csr |= len_field;
+}
+
static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
enum dma_transfer_direction direction, unsigned long flags,
@@ -941,7 +974,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
- csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
if (flags & DMA_PREP_INTERRUPT)
csr |= TEGRA_APBDMA_CSR_IE_EOC;
@@ -986,7 +1019,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
sg_req->ch_regs.apb_ptr = apb_ptr;
sg_req->ch_regs.ahb_ptr = mem;
- sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
+ sg_req->ch_regs.csr = csr;
+ tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
sg_req->ch_regs.apb_seq = apb_seq;
sg_req->ch_regs.ahb_seq = ahb_seq;
sg_req->configured = false;
@@ -1085,7 +1119,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
csr |= TEGRA_APBDMA_CSR_FLOW;
if (flags & DMA_PREP_INTERRUPT)
csr |= TEGRA_APBDMA_CSR_IE_EOC;
- csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
@@ -1115,7 +1149,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
sg_req->ch_regs.apb_ptr = apb_ptr;
sg_req->ch_regs.ahb_ptr = mem;
- sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
+ sg_req->ch_regs.csr = csr;
+ tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
sg_req->ch_regs.apb_seq = apb_seq;
sg_req->ch_regs.ahb_seq = ahb_seq;
sg_req->configured = false;
@@ -1205,32 +1240,69 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
kfree(sg_req);
}
clk_disable_unprepare(tdma->dma_clk);
+
+ tdc->slave_id = 0;
+}
+
+static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct tegra_dma *tdma = ofdma->of_dma_data;
+ struct dma_chan *chan;
+ struct tegra_dma_channel *tdc;
+
+ chan = dma_get_any_slave_channel(&tdma->dma_dev);
+ if (!chan)
+ return NULL;
+
+ tdc = to_tegra_dma_chan(chan);
+ tdc->slave_id = dma_spec->args[0];
+
+ return chan;
}
/* Tegra20 specific DMA controller information */
static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
.nr_channels = 16,
+ .channel_reg_size = 0x20,
.max_dma_count = 1024UL * 64,
.support_channel_pause = false,
+ .support_separate_wcount_reg = false,
};
/* Tegra30 specific DMA controller information */
static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
.nr_channels = 32,
+ .channel_reg_size = 0x20,
.max_dma_count = 1024UL * 64,
.support_channel_pause = false,
+ .support_separate_wcount_reg = false,
};
/* Tegra114 specific DMA controller information */
static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
.nr_channels = 32,
+ .channel_reg_size = 0x20,
+ .max_dma_count = 1024UL * 64,
+ .support_channel_pause = true,
+ .support_separate_wcount_reg = false,
+};
+
+/* Tegra148 specific DMA controller information */
+static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
+ .nr_channels = 32,
+ .channel_reg_size = 0x40,
.max_dma_count = 1024UL * 64,
.support_channel_pause = true,
+ .support_separate_wcount_reg = true,
};
static const struct of_device_id tegra_dma_of_match[] = {
{
+ .compatible = "nvidia,tegra148-apbdma",
+ .data = &tegra148_dma_chip_data,
+ }, {
.compatible = "nvidia,tegra114-apbdma",
.data = &tegra114_dma_chip_data,
}, {
@@ -1282,6 +1354,12 @@ static int tegra_dma_probe(struct platform_device *pdev)
return PTR_ERR(tdma->dma_clk);
}
+ tdma->rst = devm_reset_control_get(&pdev->dev, "dma");
+ if (IS_ERR(tdma->rst)) {
+ dev_err(&pdev->dev, "Error: Missing reset\n");
+ return PTR_ERR(tdma->rst);
+ }
+
spin_lock_init(&tdma->global_lock);
pm_runtime_enable(&pdev->dev);
@@ -1302,9 +1380,9 @@ static int tegra_dma_probe(struct platform_device *pdev)
}
/* Reset DMA controller */
- tegra_periph_reset_assert(tdma->dma_clk);
+ reset_control_assert(tdma->rst);
udelay(2);
- tegra_periph_reset_deassert(tdma->dma_clk);
+ reset_control_deassert(tdma->rst);
/* Enable global DMA registers */
tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
@@ -1318,7 +1396,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
struct tegra_dma_channel *tdc = &tdma->channels[i];
tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
- i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE;
+ i * cdata->channel_reg_size;
res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
if (!res) {
@@ -1376,10 +1454,20 @@ static int tegra_dma_probe(struct platform_device *pdev)
goto err_irq;
}
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ tegra_dma_of_xlate, tdma);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Tegra20 APB DMA OF registration failed %d\n", ret);
+ goto err_unregister_dma_dev;
+ }
+
dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
cdata->nr_channels);
return 0;
+err_unregister_dma_dev:
+ dma_async_device_unregister(&tdma->dma_dev);
err_irq:
while (--i >= 0) {
struct tegra_dma_channel *tdc = &tdma->channels[i];
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index bae6c29f5502..17686caf64d5 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
dma_async_tx_callback callback;
void *param;
struct dma_async_tx_descriptor *txd = &desc->txd;
- struct txx9dmac_slave *ds = dc->chan.private;
dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
txd->cookie, desc);
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 85c19d63f9fb..181b95267866 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -84,10 +84,12 @@ static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
{
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+ dma_cookie_t cookie;
+ cookie = vd->tx.cookie;
dma_cookie_complete(&vd->tx);
dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
- vd, vd->tx.cookie);
+ vd, cookie);
list_add_tail(&vd->node, &vc->desc_completed);
tasklet_schedule(&vc->task);
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index b53d0de17e15..98e14ee4833c 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1,7 +1,7 @@
#include "amd64_edac.h"
#include <asm/amd_nb.h>
-static struct edac_pci_ctl_info *amd64_ctl_pci;
+static struct edac_pci_ctl_info *pci_ctl;
static int report_gart_errors;
module_param(report_gart_errors, int, 0644);
@@ -162,7 +162,7 @@ static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
* scan the scrub rate mapping table for a close or matching bandwidth value to
* issue. If requested is too big, then use last maximum value found.
*/
-static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
+static int __set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
{
u32 scrubval;
int i;
@@ -198,7 +198,7 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
return 0;
}
-static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
+static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
{
struct amd64_pvt *pvt = mci->pvt_info;
u32 min_scrubrate = 0x5;
@@ -210,10 +210,10 @@ static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
if (pvt->fam == 0x15 && pvt->model < 0x10)
f15h_select_dct(pvt, 0);
- return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
+ return __set_scrub_rate(pvt->F3, bw, min_scrubrate);
}
-static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
+static int get_scrub_rate(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
u32 scrubval = 0;
@@ -240,8 +240,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
* returns true if the SysAddr given by sys_addr matches the
* DRAM base/limit associated with node_id
*/
-static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
- u8 nid)
+static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
{
u64 addr;
@@ -285,7 +284,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
if (intlv_en == 0) {
for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
- if (amd64_base_limit_match(pvt, sys_addr, node_id))
+ if (base_limit_match(pvt, sys_addr, node_id))
goto found;
}
goto err_no_match;
@@ -309,7 +308,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
}
/* sanity test for sys_addr */
- if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
+ if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
"range for node %d with node interleaving enabled.\n",
__func__, sys_addr, node_id);
@@ -660,7 +659,7 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
* Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
* are ECC capable.
*/
-static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
+static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
{
u8 bit;
unsigned long edac_cap = EDAC_FLAG_NONE;
@@ -675,9 +674,9 @@ static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
return edac_cap;
}
-static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
+static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
-static void amd64_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
+static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
{
edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
@@ -711,7 +710,7 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
(pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
(pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
- amd64_dump_dramcfg_low(pvt, pvt->dclr0, 0);
+ debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
@@ -722,19 +721,19 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
- amd64_debug_display_dimm_sizes(pvt, 0);
+ debug_display_dimm_sizes(pvt, 0);
/* everything below this point is Fam10h and above */
if (pvt->fam == 0xf)
return;
- amd64_debug_display_dimm_sizes(pvt, 1);
+ debug_display_dimm_sizes(pvt, 1);
amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
- amd64_dump_dramcfg_low(pvt, pvt->dclr1, 1);
+ debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
}
/*
@@ -800,7 +799,7 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
}
}
-static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
+static enum mem_type determine_memory_type(struct amd64_pvt *pvt, int cs)
{
enum mem_type type;
@@ -1578,7 +1577,7 @@ static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
num_dcts_intlv, dct_sel);
/* Verify we stay within the MAX number of channels allowed */
- if (channel > 4 || channel < 0)
+ if (channel > 3)
return -EINVAL;
leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
@@ -1702,7 +1701,7 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
* debug routine to display the memory sizes of all logical DIMMs and its
* CSROWs
*/
-static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
+static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
{
int dimm, size0, size1;
u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
@@ -1744,7 +1743,7 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
}
}
-static struct amd64_family_type amd64_family_types[] = {
+static struct amd64_family_type family_types[] = {
[K8_CPUS] = {
.ctl_name = "K8",
.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
@@ -2005,9 +2004,9 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
string, "");
}
-static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
- struct mce *m)
+static inline void decode_bus_error(int node_id, struct mce *m)
{
+ struct mem_ctl_info *mci = mcis[node_id];
struct amd64_pvt *pvt = mci->pvt_info;
u8 ecc_type = (m->status >> 45) & 0x3;
u8 xec = XEC(m->status, 0x1f);
@@ -2035,11 +2034,6 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
__log_bus_error(mci, &err, ecc_type);
}
-void amd64_decode_bus_error(int node_id, struct mce *m)
-{
- __amd64_decode_bus_error(mcis[node_id], m);
-}
-
/*
* Use pvt->F2 which contains the F2 CPU PCI device to get the related
* F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
@@ -2196,7 +2190,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
* encompasses
*
*/
-static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
+static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
{
u32 cs_mode, nr_pages;
u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
@@ -2263,19 +2257,19 @@ static int init_csrows(struct mem_ctl_info *mci)
pvt->mc_node_id, i);
if (row_dct0) {
- nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
+ nr_pages = get_csrow_nr_pages(pvt, 0, i);
csrow->channels[0]->dimm->nr_pages = nr_pages;
}
/* K8 has only one DCT */
if (pvt->fam != 0xf && row_dct1) {
- int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i);
+ int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
nr_pages += row_dct1_pages;
}
- mtype = amd64_determine_memory_type(pvt, i);
+ mtype = determine_memory_type(pvt, i);
edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
@@ -2309,7 +2303,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
}
/* check MCG_CTL on all the cpus on this node */
-static bool amd64_nb_mce_bank_enabled_on_node(u16 nid)
+static bool nb_mce_bank_enabled_on_node(u16 nid)
{
cpumask_var_t mask;
int cpu, nbe;
@@ -2482,7 +2476,7 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
ecc_en = !!(value & NBCFG_ECC_ENABLE);
amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
- nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
+ nb_mce_en = nb_mce_bank_enabled_on_node(nid);
if (!nb_mce_en)
amd64_notice("NB MCE bank disabled, set MSR "
"0x%08x[4] on node %d to enable.\n",
@@ -2537,7 +2531,7 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
if (pvt->nbcap & NBCAP_CHIPKILL)
mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
- mci->edac_cap = amd64_determine_edac_cap(pvt);
+ mci->edac_cap = determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = EDAC_AMD64_VERSION;
mci->ctl_name = fam->ctl_name;
@@ -2545,14 +2539,14 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
mci->ctl_page_to_phys = NULL;
/* memory scrubber interface */
- mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
- mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
+ mci->set_sdram_scrub_rate = set_scrub_rate;
+ mci->get_sdram_scrub_rate = get_scrub_rate;
}
/*
* returns a pointer to the family descriptor on success, NULL otherwise.
*/
-static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
+static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
{
struct amd64_family_type *fam_type = NULL;
@@ -2563,29 +2557,29 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
switch (pvt->fam) {
case 0xf:
- fam_type = &amd64_family_types[K8_CPUS];
- pvt->ops = &amd64_family_types[K8_CPUS].ops;
+ fam_type = &family_types[K8_CPUS];
+ pvt->ops = &family_types[K8_CPUS].ops;
break;
case 0x10:
- fam_type = &amd64_family_types[F10_CPUS];
- pvt->ops = &amd64_family_types[F10_CPUS].ops;
+ fam_type = &family_types[F10_CPUS];
+ pvt->ops = &family_types[F10_CPUS].ops;
break;
case 0x15:
if (pvt->model == 0x30) {
- fam_type = &amd64_family_types[F15_M30H_CPUS];
- pvt->ops = &amd64_family_types[F15_M30H_CPUS].ops;
+ fam_type = &family_types[F15_M30H_CPUS];
+ pvt->ops = &family_types[F15_M30H_CPUS].ops;
break;
}
- fam_type = &amd64_family_types[F15_CPUS];
- pvt->ops = &amd64_family_types[F15_CPUS].ops;
+ fam_type = &family_types[F15_CPUS];
+ pvt->ops = &family_types[F15_CPUS].ops;
break;
case 0x16:
- fam_type = &amd64_family_types[F16_CPUS];
- pvt->ops = &amd64_family_types[F16_CPUS].ops;
+ fam_type = &family_types[F16_CPUS];
+ pvt->ops = &family_types[F16_CPUS].ops;
break;
default:
@@ -2601,7 +2595,7 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
return fam_type;
}
-static int amd64_init_one_instance(struct pci_dev *F2)
+static int init_one_instance(struct pci_dev *F2)
{
struct amd64_pvt *pvt = NULL;
struct amd64_family_type *fam_type = NULL;
@@ -2619,7 +2613,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
pvt->F2 = F2;
ret = -EINVAL;
- fam_type = amd64_per_family_init(pvt);
+ fam_type = per_family_init(pvt);
if (!fam_type)
goto err_free;
@@ -2680,7 +2674,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
if (report_gart_errors)
amd_report_gart_errors(true);
- amd_register_ecc_decoder(amd64_decode_bus_error);
+ amd_register_ecc_decoder(decode_bus_error);
mcis[nid] = mci;
@@ -2703,8 +2697,8 @@ err_ret:
return ret;
}
-static int amd64_probe_one_instance(struct pci_dev *pdev,
- const struct pci_device_id *mc_type)
+static int probe_one_instance(struct pci_dev *pdev,
+ const struct pci_device_id *mc_type)
{
u16 nid = amd_get_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
@@ -2736,7 +2730,7 @@ static int amd64_probe_one_instance(struct pci_dev *pdev,
goto err_enable;
}
- ret = amd64_init_one_instance(pdev);
+ ret = init_one_instance(pdev);
if (ret < 0) {
amd64_err("Error probing instance: %d\n", nid);
restore_ecc_error_reporting(s, nid, F3);
@@ -2752,7 +2746,7 @@ err_out:
return ret;
}
-static void amd64_remove_one_instance(struct pci_dev *pdev)
+static void remove_one_instance(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
@@ -2777,7 +2771,7 @@ static void amd64_remove_one_instance(struct pci_dev *pdev)
/* unregister from EDAC MCE */
amd_report_gart_errors(false);
- amd_unregister_ecc_decoder(amd64_decode_bus_error);
+ amd_unregister_ecc_decoder(decode_bus_error);
kfree(ecc_stngs[nid]);
ecc_stngs[nid] = NULL;
@@ -2795,7 +2789,7 @@ static void amd64_remove_one_instance(struct pci_dev *pdev)
* PCI core identifies what devices are on a system during boot, and then
* inquiry this table to see if this driver is for a given device found.
*/
-static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = {
+static const struct pci_device_id amd64_pci_table[] = {
{
.vendor = PCI_VENDOR_ID_AMD,
.device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
@@ -2843,8 +2837,8 @@ MODULE_DEVICE_TABLE(pci, amd64_pci_table);
static struct pci_driver amd64_pci_driver = {
.name = EDAC_MOD_STR,
- .probe = amd64_probe_one_instance,
- .remove = amd64_remove_one_instance,
+ .probe = probe_one_instance,
+ .remove = remove_one_instance,
.id_table = amd64_pci_table,
};
@@ -2853,23 +2847,18 @@ static void setup_pci_device(void)
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
- if (amd64_ctl_pci)
+ if (pci_ctl)
return;
mci = mcis[0];
- if (mci) {
-
- pvt = mci->pvt_info;
- amd64_ctl_pci =
- edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
-
- if (!amd64_ctl_pci) {
- pr_warning("%s(): Unable to create PCI control\n",
- __func__);
+ if (!mci)
+ return;
- pr_warning("%s(): PCI error report via EDAC not set\n",
- __func__);
- }
+ pvt = mci->pvt_info;
+ pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
+ if (!pci_ctl) {
+ pr_warn("%s(): Unable to create PCI control\n", __func__);
+ pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
}
}
@@ -2925,8 +2914,8 @@ err_ret:
static void __exit amd64_edac_exit(void)
{
- if (amd64_ctl_pci)
- edac_pci_release_generic_ctl(amd64_ctl_pci);
+ if (pci_ctl)
+ edac_pci_release_generic_ctl(pci_ctl);
pci_unregister_driver(&amd64_pci_driver);
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index 96e3ee3460a5..3a501b530e11 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -333,7 +333,7 @@ static void amd76x_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(amd76x_pci_tbl) = {
+static const struct pci_device_id amd76x_pci_tbl[] = {
{
PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
AMD762},
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 644fec54681f..92d54fa65f93 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -1182,9 +1182,11 @@ static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
pvt->dev_info->err_dev, pvt->bridge_ck);
- if (pvt->bridge_ck == NULL)
+ if (pvt->bridge_ck == NULL) {
pvt->bridge_ck = pci_scan_single_device(pdev->bus,
PCI_DEVFN(0, 1));
+ pci_dev_get(pvt->bridge_ck);
+ }
if (pvt->bridge_ck == NULL) {
e752x_printk(KERN_ERR, "error reporting device not found:"
@@ -1421,7 +1423,7 @@ static void e752x_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(e752x_pci_tbl) = {
+static const struct pci_device_id e752x_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
E7520},
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 1c4056a50383..3cda79bc8b00 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -555,7 +555,7 @@ static void e7xxx_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(e7xxx_pci_tbl) = {
+static const struct pci_device_id e7xxx_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
E7205},
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 102674346035..592af5f0cf39 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -437,6 +437,9 @@ void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
{
int status;
+ if (!edac_dev->edac_check)
+ return;
+
status = cancel_delayed_work(&edac_dev->work);
if (status == 0) {
/* workq instance might be running, wait for it */
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index e8c9ef03495b..33edd6766344 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -559,7 +559,8 @@ static void edac_mc_workq_function(struct work_struct *work_req)
*
* called with the mem_ctls_mutex held
*/
-static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
+static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
+ bool init)
{
edac_dbg(0, "\n");
@@ -567,7 +568,9 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
if (mci->op_state != OP_RUNNING_POLL)
return;
- INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+ if (init)
+ INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+
mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
}
@@ -601,7 +604,7 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
* user space has updated our poll period value, need to
* reset our workq delays
*/
-void edac_mc_reset_delay_period(int value)
+void edac_mc_reset_delay_period(unsigned long value)
{
struct mem_ctl_info *mci;
struct list_head *item;
@@ -611,7 +614,7 @@ void edac_mc_reset_delay_period(int value)
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
- edac_mc_workq_setup(mci, (unsigned long) value);
+ edac_mc_workq_setup(mci, value, false);
}
mutex_unlock(&mem_ctls_mutex);
@@ -782,7 +785,7 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
/* This instance is NOW RUNNING */
mci->op_state = OP_RUNNING_POLL;
- edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
+ edac_mc_workq_setup(mci, edac_mc_get_poll_msec(), true);
} else {
mci->op_state = OP_RUNNING_INTERRUPT;
}
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 9f7e0e609516..b335c6ab5efe 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -52,18 +52,20 @@ int edac_mc_get_poll_msec(void)
static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
{
- long l;
+ unsigned long l;
int ret;
if (!val)
return -EINVAL;
- ret = kstrtol(val, 0, &l);
+ ret = kstrtoul(val, 0, &l);
if (ret)
return ret;
- if ((int)l != l)
+
+ if (l < 1000)
return -EINVAL;
- *((int *)kp->arg) = l;
+
+ *((unsigned long *)kp->arg) = l;
/* notify edac_mc engine to reset the poll period */
edac_mc_reset_delay_period(l);
@@ -914,7 +916,7 @@ void __exit edac_debugfs_exit(void)
debugfs_remove(edac_debugfs);
}
-int edac_create_debug_nodes(struct mem_ctl_info *mci)
+static int edac_create_debug_nodes(struct mem_ctl_info *mci)
{
struct dentry *d, *parent;
char name[80];
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 3d139c6e7fe3..f2118bfcf8df 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -52,7 +52,7 @@ extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev);
extern void edac_device_reset_delay_period(struct edac_device_ctl_info
*edac_dev, unsigned long value);
-extern void edac_mc_reset_delay_period(int value);
+extern void edac_mc_reset_delay_period(unsigned long value);
extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c
index 351945fa2ecd..9d9e18aefaaa 100644
--- a/drivers/edac/edac_stub.c
+++ b/drivers/edac/edac_stub.c
@@ -29,6 +29,25 @@ EXPORT_SYMBOL_GPL(edac_err_assert);
static atomic_t edac_subsys_valid = ATOMIC_INIT(0);
+int edac_report_status = EDAC_REPORTING_ENABLED;
+EXPORT_SYMBOL_GPL(edac_report_status);
+
+static int __init edac_report_setup(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!strncmp(str, "on", 2))
+ set_edac_report_status(EDAC_REPORTING_ENABLED);
+ else if (!strncmp(str, "off", 3))
+ set_edac_report_status(EDAC_REPORTING_DISABLED);
+ else if (!strncmp(str, "force", 5))
+ set_edac_report_status(EDAC_REPORTING_FORCE);
+
+ return 0;
+}
+__setup("edac_report=", edac_report_setup);
+
/*
* called to determine if there is an EDAC driver interested in
* knowing an event (such as NMI) occurred
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 694efcbf19c0..cd28b968e5c7 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -487,7 +487,7 @@ static void i3000_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(i3000_pci_tbl) = {
+static const struct pci_device_id i3000_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I3000},
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index be10a74b16ea..fa1326e5a4b0 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -466,7 +466,7 @@ static void i3200_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(i3200_pci_tbl) = {
+static const struct pci_device_id i3200_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I3200},
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 63b2194e8c20..72e07e3cf718 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1530,7 +1530,7 @@ static void i5000_remove_one(struct pci_dev *pdev)
*
* The "E500P" device is the first device supported.
*/
-static DEFINE_PCI_DEVICE_TABLE(i5000_pci_tbl) = {
+static const struct pci_device_id i5000_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
.driver_data = I5000P},
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 157b934e8ce3..36a38ee94fa8 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -1213,7 +1213,7 @@ static void i5100_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(i5100_pci_tbl) = {
+static const struct pci_device_id i5100_pci_tbl[] = {
/* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
{ 0, }
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 0a05bbceb08f..e080cbfa8fc9 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -1416,7 +1416,7 @@ static void i5400_remove_one(struct pci_dev *pdev)
*
* The "E500P" device is the first device supported.
*/
-static DEFINE_PCI_DEVICE_TABLE(i5400_pci_tbl) = {
+static const struct pci_device_id i5400_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
{0,} /* 0 terminated list. */
};
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 9004c64b169e..d63f4798f7d0 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -1160,7 +1160,7 @@ static void i7300_remove_one(struct pci_dev *pdev)
*
* Has only 8086:360c PCI ID
*/
-static DEFINE_PCI_DEVICE_TABLE(i7300_pci_tbl) = {
+static const struct pci_device_id i7300_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
{0,} /* 0 terminated list. */
};
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 80a963d64e58..87533ca7752e 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -394,7 +394,7 @@ static const struct pci_id_table pci_dev_table[] = {
/*
* pci_device_id table for which devices we are looking for
*/
-static DEFINE_PCI_DEVICE_TABLE(i7core_pci_tbl) = {
+static const struct pci_device_id i7core_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
{0,} /* 0 terminated list. */
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 57fdb77903ba..d730e276d1a8 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -386,7 +386,7 @@ static void i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
-static DEFINE_PCI_DEVICE_TABLE(i82443bxgx_pci_tbl) = {
+static const struct pci_device_id i82443bxgx_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index 3e3e431c8301..3382f6344e42 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -288,7 +288,7 @@ static void i82860_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(i82860_pci_tbl) = {
+static const struct pci_device_id i82860_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I82860},
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 2f8535fc451e..80573df0a4d7 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -527,7 +527,7 @@ static void i82875p_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(i82875p_pci_tbl) = {
+static const struct pci_device_id i82875p_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I82875P},
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 0c8d4b0eaa32..10b10521f62e 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -628,7 +628,7 @@ static void i82975x_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(i82975x_pci_tbl) = {
+static const struct pci_device_id i82975x_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I82975X
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index fd46b0bd5f2a..8f9182179a7c 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -1,6 +1,8 @@
/*
* Freescale MPC85xx Memory Controller kenel module
*
+ * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
+ *
* Author: Dave Jiang <djiang@mvista.com>
*
* 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
@@ -196,6 +198,42 @@ static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
edac_pci_handle_npe(pci, pci->ctl_name);
}
+static void mpc85xx_pcie_check(struct edac_pci_ctl_info *pci)
+{
+ struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
+ u32 err_detect;
+
+ err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
+
+ pr_err("PCIe error(s) detected\n");
+ pr_err("PCIe ERR_DR register: 0x%08x\n", err_detect);
+ pr_err("PCIe ERR_CAP_STAT register: 0x%08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_GAS_TIMR));
+ pr_err("PCIe ERR_CAP_R0 register: 0x%08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R0));
+ pr_err("PCIe ERR_CAP_R1 register: 0x%08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R1));
+ pr_err("PCIe ERR_CAP_R2 register: 0x%08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R2));
+ pr_err("PCIe ERR_CAP_R3 register: 0x%08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R3));
+
+ /* clear error bits */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
+}
+
+static int mpc85xx_pcie_find_capability(struct device_node *np)
+{
+ struct pci_controller *hose;
+
+ if (!np)
+ return -EINVAL;
+
+ hose = pci_find_hose_for_OF_device(np);
+
+ return early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
+}
+
static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
{
struct edac_pci_ctl_info *pci = dev_id;
@@ -207,7 +245,10 @@ static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
if (!err_detect)
return IRQ_NONE;
- mpc85xx_pci_check(pci);
+ if (pdata->is_pcie)
+ mpc85xx_pcie_check(pci);
+ else
+ mpc85xx_pci_check(pci);
return IRQ_HANDLED;
}
@@ -239,14 +280,22 @@ int mpc85xx_pci_err_probe(struct platform_device *op)
pdata = pci->pvt_info;
pdata->name = "mpc85xx_pci_err";
pdata->irq = NO_IRQ;
+
+ if (mpc85xx_pcie_find_capability(op->dev.of_node) > 0)
+ pdata->is_pcie = true;
+
dev_set_drvdata(&op->dev, pci);
pci->dev = &op->dev;
pci->mod_name = EDAC_MOD_STR;
pci->ctl_name = pdata->name;
pci->dev_name = dev_name(&op->dev);
- if (edac_op_state == EDAC_OPSTATE_POLL)
- pci->edac_check = mpc85xx_pci_check;
+ if (edac_op_state == EDAC_OPSTATE_POLL) {
+ if (pdata->is_pcie)
+ pci->edac_check = mpc85xx_pcie_check;
+ else
+ pci->edac_check = mpc85xx_pci_check;
+ }
pdata->edac_idx = edac_pci_idx++;
@@ -275,16 +324,26 @@ int mpc85xx_pci_err_probe(struct platform_device *op)
goto err;
}
- orig_pci_err_cap_dr =
- in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
+ if (pdata->is_pcie) {
+ orig_pci_err_cap_dr =
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR);
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, ~0);
+ orig_pci_err_en =
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, 0);
+ } else {
+ orig_pci_err_cap_dr =
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
- /* PCI master abort is expected during config cycles */
- out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
+ /* PCI master abort is expected during config cycles */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
- orig_pci_err_en = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
+ orig_pci_err_en =
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
- /* disable master abort reporting */
- out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
+ /* disable master abort reporting */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
+ }
/* clear error bits */
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
@@ -297,7 +356,8 @@ int mpc85xx_pci_err_probe(struct platform_device *op)
if (edac_op_state == EDAC_OPSTATE_INT) {
pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
res = devm_request_irq(&op->dev, pdata->irq,
- mpc85xx_pci_isr, IRQF_DISABLED,
+ mpc85xx_pci_isr,
+ IRQF_DISABLED | IRQF_SHARED,
"[EDAC] PCI err", pci);
if (res < 0) {
printk(KERN_ERR
@@ -312,6 +372,22 @@ int mpc85xx_pci_err_probe(struct platform_device *op)
pdata->irq);
}
+ if (pdata->is_pcie) {
+ /*
+ * Enable all PCIe error interrupt & error detect except invalid
+ * PEX_CONFIG_ADDR/PEX_CONFIG_DATA access interrupt generation
+ * enable bit and invalid PEX_CONFIG_ADDR/PEX_CONFIG_DATA access
+ * detection enable bit. Because PCIe bus code to initialize and
+ * configure these PCIe devices on booting will use some invalid
+ * PEX_CONFIG_ADDR/PEX_CONFIG_DATA, edac driver prints the much
+ * notice information. So disable this detect to fix ugly print.
+ */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0
+ & ~PEX_ERR_ICCAIE_EN_BIT);
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, 0
+ | PEX_ERR_ICCAD_DISR_BIT);
+ }
+
devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
edac_dbg(3, "success\n");
printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
index 932016f2cf06..8c6256436227 100644
--- a/drivers/edac/mpc85xx_edac.h
+++ b/drivers/edac/mpc85xx_edac.h
@@ -134,13 +134,19 @@
#define MPC85XX_PCI_ERR_DR 0x0000
#define MPC85XX_PCI_ERR_CAP_DR 0x0004
#define MPC85XX_PCI_ERR_EN 0x0008
+#define PEX_ERR_ICCAIE_EN_BIT 0x00020000
#define MPC85XX_PCI_ERR_ATTRIB 0x000c
#define MPC85XX_PCI_ERR_ADDR 0x0010
+#define PEX_ERR_ICCAD_DISR_BIT 0x00020000
#define MPC85XX_PCI_ERR_EXT_ADDR 0x0014
#define MPC85XX_PCI_ERR_DL 0x0018
#define MPC85XX_PCI_ERR_DH 0x001c
#define MPC85XX_PCI_GAS_TIMR 0x0020
#define MPC85XX_PCI_PCIX_TIMR 0x0024
+#define MPC85XX_PCIE_ERR_CAP_R0 0x0028
+#define MPC85XX_PCIE_ERR_CAP_R1 0x002c
+#define MPC85XX_PCIE_ERR_CAP_R2 0x0030
+#define MPC85XX_PCIE_ERR_CAP_R3 0x0034
struct mpc85xx_mc_pdata {
char *name;
@@ -158,6 +164,7 @@ struct mpc85xx_l2_pdata {
struct mpc85xx_pci_pdata {
char *name;
+ bool is_pcie;
int edac_idx;
void __iomem *pci_vbase;
int irq;
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 2fd6a5490905..8f936bc7a010 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -383,7 +383,7 @@ static void r82600_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(r82600_pci_tbl) = {
+static const struct pci_device_id r82600_pci_tbl[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
},
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index d7f1b57bd3be..54e2abe671f7 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -461,7 +461,7 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
/*
* pci_device_id table for which devices we are looking for
*/
-static DEFINE_PCI_DEVICE_TABLE(sbridge_pci_tbl) = {
+static const struct pci_device_id sbridge_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)},
{0,} /* 0 terminated list. */
@@ -915,7 +915,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
}
}
-struct mem_ctl_info *get_mci_for_node_id(u8 node_id)
+static struct mem_ctl_info *get_mci_for_node_id(u8 node_id)
{
struct sbridge_dev *sbridge_dev;
@@ -1829,6 +1829,9 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
struct mem_ctl_info *mci;
struct sbridge_pvt *pvt;
+ if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
+ return NOTIFY_DONE;
+
mci = get_mci_for_node_id(mce->socketid);
if (!mci)
return NOTIFY_BAD;
@@ -2142,9 +2145,10 @@ static int __init sbridge_init(void)
opstate_init();
pci_rc = pci_register_driver(&sbridge_driver);
-
if (pci_rc >= 0) {
mce_register_decode_chain(&sbridge_mce_dec);
+ if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
+ sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n");
return 0;
}
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index 1a4df82376ba..4891b450830b 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -448,7 +448,7 @@ static void x38_remove_one(struct pci_dev *pdev)
edac_mc_free(mci);
}
-static DEFINE_PCI_DEVICE_TABLE(x38_pci_tbl) = {
+static const struct pci_device_id x38_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
X38},
diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
index 272a3ec35957..612afeaec3cb 100644
--- a/drivers/eisa/eisa-bus.c
+++ b/drivers/eisa/eisa-bus.c
@@ -232,8 +232,10 @@ static int __init eisa_init_device(struct eisa_root_device *root,
static int __init eisa_register_device(struct eisa_device *edev)
{
int rc = device_register(&edev->dev);
- if (rc)
+ if (rc) {
+ put_device(&edev->dev);
return rc;
+ }
rc = device_create_file(&edev->dev, &dev_attr_signature);
if (rc)
@@ -275,18 +277,19 @@ static int __init eisa_request_resources(struct eisa_root_device *root,
}
if (slot) {
+ edev->res[i].name = NULL;
edev->res[i].start = SLOT_ADDRESS(root, slot)
+ (i * 0x400);
edev->res[i].end = edev->res[i].start + 0xff;
edev->res[i].flags = IORESOURCE_IO;
} else {
+ edev->res[i].name = NULL;
edev->res[i].start = SLOT_ADDRESS(root, slot)
+ EISA_VENDOR_ID_OFFSET;
edev->res[i].end = edev->res[i].start + 3;
edev->res[i].flags = IORESOURCE_IO | IORESOURCE_BUSY;
}
- dev_printk(KERN_DEBUG, &edev->dev, "%pR\n", &edev->res[i]);
if (request_resource(root->res, &edev->res[i]))
goto failed;
}
@@ -326,19 +329,20 @@ static int __init eisa_probe(struct eisa_root_device *root)
return -ENOMEM;
}
- if (eisa_init_device(root, edev, 0)) {
+ if (eisa_request_resources(root, edev, 0)) {
+ dev_warn(root->dev,
+ "EISA: Cannot allocate resource for mainboard\n");
kfree(edev);
if (!root->force_probe)
- return -ENODEV;
+ return -EBUSY;
goto force_probe;
}
- if (eisa_request_resources(root, edev, 0)) {
- dev_warn(root->dev,
- "EISA: Cannot allocate resource for mainboard\n");
+ if (eisa_init_device(root, edev, 0)) {
+ eisa_release_resources(edev);
kfree(edev);
if (!root->force_probe)
- return -EBUSY;
+ return -ENODEV;
goto force_probe;
}
@@ -361,11 +365,6 @@ static int __init eisa_probe(struct eisa_root_device *root)
continue;
}
- if (eisa_init_device(root, edev, i)) {
- kfree(edev);
- continue;
- }
-
if (eisa_request_resources(root, edev, i)) {
dev_warn(root->dev,
"Cannot allocate resource for EISA slot %d\n",
@@ -374,6 +373,12 @@ static int __init eisa_probe(struct eisa_root_device *root)
continue;
}
+ if (eisa_init_device(root, edev, i)) {
+ eisa_release_resources(edev);
+ kfree(edev);
+ continue;
+ }
+
if (edev->state == (EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED))
enabled_str = " (forced enabled)";
else if (edev->state == EISA_CONFIG_FORCED)
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index f1d54a3985bd..bdb5a00f1dfa 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -31,6 +31,16 @@ config EXTCON_ADC_JACK
help
Say Y here to enable extcon device driver based on ADC values.
+config EXTCON_MAX14577
+ tristate "MAX14577 EXTCON Support"
+ depends on MFD_MAX14577
+ select IRQ_DOMAIN
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the MUIC device of
+ Maxim MAX14577 PMIC. The MAX14577 MUIC is a USB port accessory
+ detector and switch.
+
config EXTCON_MAX77693
tristate "MAX77693 EXTCON Support"
depends on MFD_MAX77693 && INPUT
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 759fdae46f95..43eccc0e3448 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_OF_EXTCON) += of_extcon.o
obj-$(CONFIG_EXTCON) += extcon-class.o
obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
+obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o
obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o
obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index a287cece0593..c20602f601ee 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -44,6 +44,15 @@
#define HPDET_DEBOUNCE 500
#define DEFAULT_MICD_TIMEOUT 2000
+#define MICD_LVL_1_TO_7 (ARIZONA_MICD_LVL_1 | ARIZONA_MICD_LVL_2 | \
+ ARIZONA_MICD_LVL_3 | ARIZONA_MICD_LVL_4 | \
+ ARIZONA_MICD_LVL_5 | ARIZONA_MICD_LVL_6 | \
+ ARIZONA_MICD_LVL_7)
+
+#define MICD_LVL_0_TO_7 (ARIZONA_MICD_LVL_0 | MICD_LVL_1_TO_7)
+
+#define MICD_LVL_0_TO_8 (MICD_LVL_0_TO_7 | ARIZONA_MICD_LVL_8)
+
struct arizona_extcon_info {
struct device *dev;
struct arizona *arizona;
@@ -426,26 +435,15 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info)
}
val &= ARIZONA_HP_LVL_B_MASK;
+ /* Convert to ohms, the value is in 0.5 ohm increments */
+ val /= 2;
regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
&range);
range = (range & ARIZONA_HP_IMPEDANCE_RANGE_MASK)
>> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT;
- /* Skip up or down a range? */
- if (range && (val < arizona_hpdet_c_ranges[range].min)) {
- range--;
- dev_dbg(arizona->dev, "Moving to HPDET range %d-%d\n",
- arizona_hpdet_c_ranges[range].min,
- arizona_hpdet_c_ranges[range].max);
- regmap_update_bits(arizona->regmap,
- ARIZONA_HEADPHONE_DETECT_1,
- ARIZONA_HP_IMPEDANCE_RANGE_MASK,
- range <<
- ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
- return -EAGAIN;
- }
-
+ /* Skip up a range, or report? */
if (range < ARRAY_SIZE(arizona_hpdet_c_ranges) - 1 &&
(val >= arizona_hpdet_c_ranges[range].max)) {
range++;
@@ -459,6 +457,12 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info)
ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
return -EAGAIN;
}
+
+ if (range && (val < arizona_hpdet_c_ranges[range].min)) {
+ dev_dbg(arizona->dev, "Reporting range boundary %d\n",
+ arizona_hpdet_c_ranges[range].min);
+ val = arizona_hpdet_c_ranges[range].min;
+ }
}
dev_dbg(arizona->dev, "HP impedance %d ohms\n", val);
@@ -594,9 +598,15 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
dev_err(arizona->dev, "Failed to report HP/line: %d\n",
ret);
+done:
+ /* Reset back to starting range */
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_IMPEDANCE_RANGE_MASK | ARIZONA_HP_POLL,
+ 0);
+
arizona_extcon_do_magic(info, 0);
-done:
if (id_gpio)
gpio_set_value_cansleep(id_gpio, 0);
@@ -765,7 +775,20 @@ static void arizona_micd_detect(struct work_struct *work)
mutex_lock(&info->lock);
- for (i = 0; i < 10 && !(val & 0x7fc); i++) {
+ /* If the cable was removed while measuring ignore the result */
+ ret = extcon_get_cable_state_(&info->edev, ARIZONA_CABLE_MECHANICAL);
+ if (ret < 0) {
+ dev_err(arizona->dev, "Failed to check cable state: %d\n",
+ ret);
+ mutex_unlock(&info->lock);
+ return;
+ } else if (!ret) {
+ dev_dbg(arizona->dev, "Ignoring MICDET for removed cable\n");
+ mutex_unlock(&info->lock);
+ return;
+ }
+
+ for (i = 0; i < 10 && !(val & MICD_LVL_0_TO_8); i++) {
ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_3, &val);
if (ret != 0) {
dev_err(arizona->dev,
@@ -784,7 +807,7 @@ static void arizona_micd_detect(struct work_struct *work)
}
}
- if (i == 10 && !(val & 0x7fc)) {
+ if (i == 10 && !(val & MICD_LVL_0_TO_8)) {
dev_err(arizona->dev, "Failed to get valid MICDET value\n");
mutex_unlock(&info->lock);
return;
@@ -798,7 +821,7 @@ static void arizona_micd_detect(struct work_struct *work)
}
/* If we got a high impedence we should have a headset, report it. */
- if (info->detecting && (val & 0x400)) {
+ if (info->detecting && (val & ARIZONA_MICD_LVL_8)) {
arizona_identify_headphone(info);
ret = extcon_update_state(&info->edev,
@@ -827,7 +850,7 @@ static void arizona_micd_detect(struct work_struct *work)
* plain headphones. If both polarities report a low
* impedence then give up and report headphones.
*/
- if (info->detecting && (val & 0x3f8)) {
+ if (info->detecting && (val & MICD_LVL_1_TO_7)) {
if (info->jack_flips >= info->micd_num_modes * 10) {
dev_dbg(arizona->dev, "Detected HP/line\n");
arizona_identify_headphone(info);
@@ -851,7 +874,7 @@ static void arizona_micd_detect(struct work_struct *work)
* If we're still detecting and we detect a short then we've
* got a headphone. Otherwise it's a button press.
*/
- if (val & 0x3fc) {
+ if (val & MICD_LVL_0_TO_7) {
if (info->mic) {
dev_dbg(arizona->dev, "Mic button detected\n");
@@ -1126,6 +1149,16 @@ static int arizona_extcon_probe(struct platform_device *pdev)
break;
}
break;
+ case WM5110:
+ switch (arizona->rev) {
+ case 0 ... 2:
+ break;
+ default:
+ info->micd_clamp = true;
+ info->hpdet_ip = 2;
+ break;
+ }
+ break;
default:
break;
}
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 7e0dff58e494..a63a6b21c9ad 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -40,6 +40,7 @@ struct gpio_extcon_data {
int irq;
struct delayed_work work;
unsigned long debounce_jiffies;
+ bool check_on_resume;
};
static void gpio_extcon_work(struct work_struct *work)
@@ -103,8 +104,15 @@ static int gpio_extcon_probe(struct platform_device *pdev)
extcon_data->gpio_active_low = pdata->gpio_active_low;
extcon_data->state_on = pdata->state_on;
extcon_data->state_off = pdata->state_off;
+ extcon_data->check_on_resume = pdata->check_on_resume;
if (pdata->state_on && pdata->state_off)
extcon_data->edev.print_state = extcon_gpio_print_state;
+
+ ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN,
+ pdev->name);
+ if (ret < 0)
+ return ret;
+
if (pdata->debounce) {
ret = gpio_set_debounce(extcon_data->gpio,
pdata->debounce * 1000);
@@ -117,11 +125,6 @@ static int gpio_extcon_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN,
- pdev->name);
- if (ret < 0)
- goto err;
-
INIT_DELAYED_WORK(&extcon_data->work, gpio_extcon_work);
extcon_data->irq = gpio_to_irq(extcon_data->gpio);
@@ -159,12 +162,31 @@ static int gpio_extcon_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int gpio_extcon_resume(struct device *dev)
+{
+ struct gpio_extcon_data *extcon_data;
+
+ extcon_data = dev_get_drvdata(dev);
+ if (extcon_data->check_on_resume)
+ queue_delayed_work(system_power_efficient_wq,
+ &extcon_data->work, extcon_data->debounce_jiffies);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops gpio_extcon_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(NULL, gpio_extcon_resume)
+};
+
static struct platform_driver gpio_extcon_driver = {
.probe = gpio_extcon_probe,
.remove = gpio_extcon_remove,
.driver = {
.name = "extcon-gpio",
.owner = THIS_MODULE,
+ .pm = &gpio_extcon_pm_ops,
},
};
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
new file mode 100644
index 000000000000..3846941801b8
--- /dev/null
+++ b/drivers/extcon/extcon-max14577.c
@@ -0,0 +1,752 @@
+/*
+ * extcon-max14577.c - MAX14577 extcon driver to support MAX14577 MUIC
+ *
+ * Copyright (C) 2013 Samsung Electrnoics
+ * Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/max14577.h>
+#include <linux/mfd/max14577-private.h>
+#include <linux/extcon.h>
+
+#define DEV_NAME "max14577-muic"
+#define DELAY_MS_DEFAULT 17000 /* unit: millisecond */
+
+enum max14577_muic_adc_debounce_time {
+ ADC_DEBOUNCE_TIME_5MS = 0,
+ ADC_DEBOUNCE_TIME_10MS,
+ ADC_DEBOUNCE_TIME_25MS,
+ ADC_DEBOUNCE_TIME_38_62MS,
+};
+
+enum max14577_muic_status {
+ MAX14577_MUIC_STATUS1 = 0,
+ MAX14577_MUIC_STATUS2 = 1,
+ MAX14577_MUIC_STATUS_END,
+};
+
+struct max14577_muic_info {
+ struct device *dev;
+ struct max14577 *max14577;
+ struct extcon_dev *edev;
+ int prev_cable_type;
+ int prev_chg_type;
+ u8 status[MAX14577_MUIC_STATUS_END];
+
+ bool irq_adc;
+ bool irq_chg;
+ struct work_struct irq_work;
+ struct mutex mutex;
+
+ /*
+ * Use delayed workqueue to detect cable state and then
+ * notify cable state to notifiee/platform through uevent.
+ * After completing the booting of platform, the extcon provider
+ * driver should notify cable state to upper layer.
+ */
+ struct delayed_work wq_detcable;
+
+ /*
+ * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+ * h/w path of COMP2/COMN1 on CONTROL1 register.
+ */
+ int path_usb;
+ int path_uart;
+};
+
+enum max14577_muic_cable_group {
+ MAX14577_CABLE_GROUP_ADC = 0,
+ MAX14577_CABLE_GROUP_CHG,
+};
+
+/**
+ * struct max14577_muic_irq
+ * @irq: the index of irq list of MUIC device.
+ * @name: the name of irq.
+ * @virq: the virtual irq to use irq domain
+ */
+struct max14577_muic_irq {
+ unsigned int irq;
+ const char *name;
+ unsigned int virq;
+};
+
+static struct max14577_muic_irq muic_irqs[] = {
+ { MAX14577_IRQ_INT1_ADC, "muic-ADC" },
+ { MAX14577_IRQ_INT1_ADCLOW, "muic-ADCLOW" },
+ { MAX14577_IRQ_INT1_ADCERR, "muic-ADCError" },
+ { MAX14577_IRQ_INT2_CHGTYP, "muic-CHGTYP" },
+ { MAX14577_IRQ_INT2_CHGDETRUN, "muic-CHGDETRUN" },
+ { MAX14577_IRQ_INT2_DCDTMR, "muic-DCDTMR" },
+ { MAX14577_IRQ_INT2_DBCHG, "muic-DBCHG" },
+ { MAX14577_IRQ_INT2_VBVOLT, "muic-VBVOLT" },
+};
+
+/* Define supported accessory type */
+enum max14577_muic_acc_type {
+ MAX14577_MUIC_ADC_GROUND = 0x0,
+ MAX14577_MUIC_ADC_SEND_END_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S1_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S2_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S3_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S4_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S5_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S6_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S7_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S8_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S9_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S10_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S11_BUTTON,
+ MAX14577_MUIC_ADC_REMOTE_S12_BUTTON,
+ MAX14577_MUIC_ADC_RESERVED_ACC_1,
+ MAX14577_MUIC_ADC_RESERVED_ACC_2,
+ MAX14577_MUIC_ADC_RESERVED_ACC_3,
+ MAX14577_MUIC_ADC_RESERVED_ACC_4,
+ MAX14577_MUIC_ADC_RESERVED_ACC_5,
+ MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE2,
+ MAX14577_MUIC_ADC_PHONE_POWERED_DEV,
+ MAX14577_MUIC_ADC_TTY_CONVERTER,
+ MAX14577_MUIC_ADC_UART_CABLE,
+ MAX14577_MUIC_ADC_CEA936A_TYPE1_CHG,
+ MAX14577_MUIC_ADC_FACTORY_MODE_USB_OFF,
+ MAX14577_MUIC_ADC_FACTORY_MODE_USB_ON,
+ MAX14577_MUIC_ADC_AV_CABLE_NOLOAD,
+ MAX14577_MUIC_ADC_CEA936A_TYPE2_CHG,
+ MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF,
+ MAX14577_MUIC_ADC_FACTORY_MODE_UART_ON,
+ MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE1, /* with Remote and Simple Ctrl */
+ MAX14577_MUIC_ADC_OPEN,
+};
+
+/* max14577 MUIC device support below list of accessories(external connector) */
+enum {
+ EXTCON_CABLE_USB = 0,
+ EXTCON_CABLE_TA,
+ EXTCON_CABLE_FAST_CHARGER,
+ EXTCON_CABLE_SLOW_CHARGER,
+ EXTCON_CABLE_CHARGE_DOWNSTREAM,
+ EXTCON_CABLE_JIG_USB_ON,
+ EXTCON_CABLE_JIG_USB_OFF,
+ EXTCON_CABLE_JIG_UART_OFF,
+ EXTCON_CABLE_JIG_UART_ON,
+
+ _EXTCON_CABLE_NUM,
+};
+
+static const char *max14577_extcon_cable[] = {
+ [EXTCON_CABLE_USB] = "USB",
+ [EXTCON_CABLE_TA] = "TA",
+ [EXTCON_CABLE_FAST_CHARGER] = "Fast-charger",
+ [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
+ [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
+ [EXTCON_CABLE_JIG_USB_ON] = "JIG-USB-ON",
+ [EXTCON_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
+ [EXTCON_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
+ [EXTCON_CABLE_JIG_UART_ON] = "JIG-UART-ON",
+
+ NULL,
+};
+
+/*
+ * max14577_muic_set_debounce_time - Set the debounce time of ADC
+ * @info: the instance including private data of max14577 MUIC
+ * @time: the debounce time of ADC
+ */
+static int max14577_muic_set_debounce_time(struct max14577_muic_info *info,
+ enum max14577_muic_adc_debounce_time time)
+{
+ u8 ret;
+
+ switch (time) {
+ case ADC_DEBOUNCE_TIME_5MS:
+ case ADC_DEBOUNCE_TIME_10MS:
+ case ADC_DEBOUNCE_TIME_25MS:
+ case ADC_DEBOUNCE_TIME_38_62MS:
+ ret = max14577_update_reg(info->max14577->regmap,
+ MAX14577_MUIC_REG_CONTROL3,
+ CTRL3_ADCDBSET_MASK,
+ time << CTRL3_ADCDBSET_SHIFT);
+ if (ret) {
+ dev_err(info->dev, "failed to set ADC debounce time\n");
+ return ret;
+ }
+ break;
+ default:
+ dev_err(info->dev, "invalid ADC debounce time\n");
+ return -EINVAL;
+ }
+
+ return 0;
+};
+
+/*
+ * max14577_muic_set_path - Set hardware line according to attached cable
+ * @info: the instance including private data of max14577 MUIC
+ * @value: the path according to attached cable
+ * @attached: the state of cable (true:attached, false:detached)
+ *
+ * The max14577 MUIC device share outside H/W line among a varity of cables
+ * so, this function set internal path of H/W line according to the type of
+ * attached cable.
+ */
+static int max14577_muic_set_path(struct max14577_muic_info *info,
+ u8 val, bool attached)
+{
+ int ret = 0;
+ u8 ctrl1, ctrl2 = 0;
+
+ /* Set open state to path before changing hw path */
+ ret = max14577_update_reg(info->max14577->regmap,
+ MAX14577_MUIC_REG_CONTROL1,
+ CLEAR_IDBEN_MICEN_MASK, CTRL1_SW_OPEN);
+ if (ret < 0) {
+ dev_err(info->dev, "failed to update MUIC register\n");
+ return ret;
+ }
+
+ if (attached)
+ ctrl1 = val;
+ else
+ ctrl1 = CTRL1_SW_OPEN;
+
+ ret = max14577_update_reg(info->max14577->regmap,
+ MAX14577_MUIC_REG_CONTROL1,
+ CLEAR_IDBEN_MICEN_MASK, ctrl1);
+ if (ret < 0) {
+ dev_err(info->dev, "failed to update MUIC register\n");
+ return ret;
+ }
+
+ if (attached)
+ ctrl2 |= CTRL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */
+ else
+ ctrl2 |= CTRL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */
+
+ ret = max14577_update_reg(info->max14577->regmap,
+ MAX14577_REG_CONTROL2,
+ CTRL2_LOWPWR_MASK | CTRL2_CPEN_MASK, ctrl2);
+ if (ret < 0) {
+ dev_err(info->dev, "failed to update MUIC register\n");
+ return ret;
+ }
+
+ dev_dbg(info->dev,
+ "CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
+ ctrl1, ctrl2, attached ? "attached" : "detached");
+
+ return 0;
+}
+
+/*
+ * max14577_muic_get_cable_type - Return cable type and check cable state
+ * @info: the instance including private data of max14577 MUIC
+ * @group: the path according to attached cable
+ * @attached: store cable state and return
+ *
+ * This function check the cable state either attached or detached,
+ * and then divide precise type of cable according to cable group.
+ * - max14577_CABLE_GROUP_ADC
+ * - max14577_CABLE_GROUP_CHG
+ */
+static int max14577_muic_get_cable_type(struct max14577_muic_info *info,
+ enum max14577_muic_cable_group group, bool *attached)
+{
+ int cable_type = 0;
+ int adc;
+ int chg_type;
+
+ switch (group) {
+ case MAX14577_CABLE_GROUP_ADC:
+ /*
+ * Read ADC value to check cable type and decide cable state
+ * according to cable type
+ */
+ adc = info->status[MAX14577_MUIC_STATUS1] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ /*
+ * Check current cable state/cable type and store cable type
+ * (info->prev_cable_type) for handling cable when cable is
+ * detached.
+ */
+ if (adc == MAX14577_MUIC_ADC_OPEN) {
+ *attached = false;
+
+ cable_type = info->prev_cable_type;
+ info->prev_cable_type = MAX14577_MUIC_ADC_OPEN;
+ } else {
+ *attached = true;
+
+ cable_type = info->prev_cable_type = adc;
+ }
+ break;
+ case MAX14577_CABLE_GROUP_CHG:
+ /*
+ * Read charger type to check cable type and decide cable state
+ * according to type of charger cable.
+ */
+ chg_type = info->status[MAX14577_MUIC_STATUS2] &
+ STATUS2_CHGTYP_MASK;
+ chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+ if (chg_type == MAX14577_CHARGER_TYPE_NONE) {
+ *attached = false;
+
+ cable_type = info->prev_chg_type;
+ info->prev_chg_type = MAX14577_CHARGER_TYPE_NONE;
+ } else {
+ *attached = true;
+
+ /*
+ * Check current cable state/cable type and store cable
+ * type(info->prev_chg_type) for handling cable when
+ * charger cable is detached.
+ */
+ cable_type = info->prev_chg_type = chg_type;
+ }
+
+ break;
+ default:
+ dev_err(info->dev, "Unknown cable group (%d)\n", group);
+ cable_type = -EINVAL;
+ break;
+ }
+
+ return cable_type;
+}
+
+static int max14577_muic_jig_handler(struct max14577_muic_info *info,
+ int cable_type, bool attached)
+{
+ char cable_name[32];
+ int ret = 0;
+ u8 path = CTRL1_SW_OPEN;
+
+ dev_dbg(info->dev,
+ "external connector is %s (adc:0x%02x)\n",
+ attached ? "attached" : "detached", cable_type);
+
+ switch (cable_type) {
+ case MAX14577_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */
+ /* PATH:AP_USB */
+ strcpy(cable_name, "JIG-USB-OFF");
+ path = CTRL1_SW_USB;
+ break;
+ case MAX14577_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */
+ /* PATH:AP_USB */
+ strcpy(cable_name, "JIG-USB-ON");
+ path = CTRL1_SW_USB;
+ break;
+ case MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */
+ /* PATH:AP_UART */
+ strcpy(cable_name, "JIG-UART-OFF");
+ path = CTRL1_SW_UART;
+ break;
+ default:
+ dev_err(info->dev, "failed to detect %s jig cable\n",
+ attached ? "attached" : "detached");
+ return -EINVAL;
+ }
+
+ ret = max14577_muic_set_path(info, path, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, cable_name, attached);
+
+ return 0;
+}
+
+static int max14577_muic_adc_handler(struct max14577_muic_info *info)
+{
+ int cable_type;
+ bool attached;
+ int ret = 0;
+
+ /* Check accessory state which is either detached or attached */
+ cable_type = max14577_muic_get_cable_type(info,
+ MAX14577_CABLE_GROUP_ADC, &attached);
+
+ dev_dbg(info->dev,
+ "external connector is %s (adc:0x%02x, prev_adc:0x%x)\n",
+ attached ? "attached" : "detached", cable_type,
+ info->prev_cable_type);
+
+ switch (cable_type) {
+ case MAX14577_MUIC_ADC_FACTORY_MODE_USB_OFF:
+ case MAX14577_MUIC_ADC_FACTORY_MODE_USB_ON:
+ case MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF:
+ /* JIG */
+ ret = max14577_muic_jig_handler(info, cable_type, attached);
+ if (ret < 0)
+ return ret;
+ break;
+ case MAX14577_MUIC_ADC_GROUND:
+ case MAX14577_MUIC_ADC_SEND_END_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S1_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S2_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S3_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S4_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S5_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S6_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S7_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S8_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S9_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S10_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S11_BUTTON:
+ case MAX14577_MUIC_ADC_REMOTE_S12_BUTTON:
+ case MAX14577_MUIC_ADC_RESERVED_ACC_1:
+ case MAX14577_MUIC_ADC_RESERVED_ACC_2:
+ case MAX14577_MUIC_ADC_RESERVED_ACC_3:
+ case MAX14577_MUIC_ADC_RESERVED_ACC_4:
+ case MAX14577_MUIC_ADC_RESERVED_ACC_5:
+ case MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE2:
+ case MAX14577_MUIC_ADC_PHONE_POWERED_DEV:
+ case MAX14577_MUIC_ADC_TTY_CONVERTER:
+ case MAX14577_MUIC_ADC_UART_CABLE:
+ case MAX14577_MUIC_ADC_CEA936A_TYPE1_CHG:
+ case MAX14577_MUIC_ADC_AV_CABLE_NOLOAD:
+ case MAX14577_MUIC_ADC_CEA936A_TYPE2_CHG:
+ case MAX14577_MUIC_ADC_FACTORY_MODE_UART_ON:
+ case MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE1:
+ /*
+ * This accessory isn't used in general case if it is specially
+ * needed to detect additional accessory, should implement
+ * proper operation when this accessory is attached/detached.
+ */
+ dev_info(info->dev,
+ "accessory is %s but it isn't used (adc:0x%x)\n",
+ attached ? "attached" : "detached", cable_type);
+ return -EAGAIN;
+ default:
+ dev_err(info->dev,
+ "failed to detect %s accessory (adc:0x%x)\n",
+ attached ? "attached" : "detached", cable_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int max14577_muic_chg_handler(struct max14577_muic_info *info)
+{
+ int chg_type;
+ bool attached;
+ int ret = 0;
+
+ chg_type = max14577_muic_get_cable_type(info,
+ MAX14577_CABLE_GROUP_CHG, &attached);
+
+ dev_dbg(info->dev,
+ "external connector is %s(chg_type:0x%x, prev_chg_type:0x%x)\n",
+ attached ? "attached" : "detached",
+ chg_type, info->prev_chg_type);
+
+ switch (chg_type) {
+ case MAX14577_CHARGER_TYPE_USB:
+ /* PATH:AP_USB */
+ ret = max14577_muic_set_path(info, info->path_usb, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, "USB", attached);
+ break;
+ case MAX14577_CHARGER_TYPE_DEDICATED_CHG:
+ extcon_set_cable_state(info->edev, "TA", attached);
+ break;
+ case MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT:
+ extcon_set_cable_state(info->edev,
+ "Charge-downstream", attached);
+ break;
+ case MAX14577_CHARGER_TYPE_SPECIAL_500MA:
+ extcon_set_cable_state(info->edev, "Slow-charger", attached);
+ break;
+ case MAX14577_CHARGER_TYPE_SPECIAL_1A:
+ extcon_set_cable_state(info->edev, "Fast-charger", attached);
+ break;
+ case MAX14577_CHARGER_TYPE_NONE:
+ case MAX14577_CHARGER_TYPE_DEAD_BATTERY:
+ break;
+ default:
+ dev_err(info->dev,
+ "failed to detect %s accessory (chg_type:0x%x)\n",
+ attached ? "attached" : "detached", chg_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void max14577_muic_irq_work(struct work_struct *work)
+{
+ struct max14577_muic_info *info = container_of(work,
+ struct max14577_muic_info, irq_work);
+ int ret = 0;
+
+ if (!info->edev)
+ return;
+
+ mutex_lock(&info->mutex);
+
+ ret = max14577_bulk_read(info->max14577->regmap,
+ MAX14577_MUIC_REG_STATUS1, info->status, 2);
+ if (ret) {
+ dev_err(info->dev, "failed to read MUIC register\n");
+ mutex_unlock(&info->mutex);
+ return;
+ }
+
+ if (info->irq_adc) {
+ ret = max14577_muic_adc_handler(info);
+ info->irq_adc = false;
+ }
+ if (info->irq_chg) {
+ ret = max14577_muic_chg_handler(info);
+ info->irq_chg = false;
+ }
+
+ if (ret < 0)
+ dev_err(info->dev, "failed to handle MUIC interrupt\n");
+
+ mutex_unlock(&info->mutex);
+
+ return;
+}
+
+static irqreturn_t max14577_muic_irq_handler(int irq, void *data)
+{
+ struct max14577_muic_info *info = data;
+ int i, irq_type = -1;
+
+ /*
+ * We may be called multiple times for different nested IRQ-s.
+ * Including changes in INT1_ADC and INT2_CGHTYP at once.
+ * However we only need to know whether it was ADC, charger
+ * or both interrupts so decode IRQ and turn on proper flags.
+ */
+ for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
+ if (irq == muic_irqs[i].virq)
+ irq_type = muic_irqs[i].irq;
+
+ switch (irq_type) {
+ case MAX14577_IRQ_INT1_ADC:
+ case MAX14577_IRQ_INT1_ADCLOW:
+ case MAX14577_IRQ_INT1_ADCERR:
+ /* Handle all of accessory except for
+ type of charger accessory */
+ info->irq_adc = true;
+ break;
+ case MAX14577_IRQ_INT2_CHGTYP:
+ case MAX14577_IRQ_INT2_CHGDETRUN:
+ case MAX14577_IRQ_INT2_DCDTMR:
+ case MAX14577_IRQ_INT2_DBCHG:
+ case MAX14577_IRQ_INT2_VBVOLT:
+ /* Handle charger accessory */
+ info->irq_chg = true;
+ break;
+ default:
+ dev_err(info->dev, "muic interrupt: irq %d occurred, skipped\n",
+ irq_type);
+ return IRQ_HANDLED;
+ }
+ schedule_work(&info->irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static int max14577_muic_detect_accessory(struct max14577_muic_info *info)
+{
+ int ret = 0;
+ int adc;
+ int chg_type;
+ bool attached;
+
+ mutex_lock(&info->mutex);
+
+ /* Read STATUSx register to detect accessory */
+ ret = max14577_bulk_read(info->max14577->regmap,
+ MAX14577_MUIC_REG_STATUS1, info->status, 2);
+ if (ret) {
+ dev_err(info->dev, "failed to read MUIC register\n");
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
+
+ adc = max14577_muic_get_cable_type(info, MAX14577_CABLE_GROUP_ADC,
+ &attached);
+ if (attached && adc != MAX14577_MUIC_ADC_OPEN) {
+ ret = max14577_muic_adc_handler(info);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot detect accessory\n");
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
+ }
+
+ chg_type = max14577_muic_get_cable_type(info, MAX14577_CABLE_GROUP_CHG,
+ &attached);
+ if (attached && chg_type != MAX14577_CHARGER_TYPE_NONE) {
+ ret = max14577_muic_chg_handler(info);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot detect charger accessory\n");
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&info->mutex);
+
+ return 0;
+}
+
+static void max14577_muic_detect_cable_wq(struct work_struct *work)
+{
+ struct max14577_muic_info *info = container_of(to_delayed_work(work),
+ struct max14577_muic_info, wq_detcable);
+
+ max14577_muic_detect_accessory(info);
+}
+
+static int max14577_muic_probe(struct platform_device *pdev)
+{
+ struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent);
+ struct max14577_muic_info *info;
+ int delay_jiffies;
+ int ret;
+ int i;
+ u8 id;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+ info->dev = &pdev->dev;
+ info->max14577 = max14577;
+
+ platform_set_drvdata(pdev, info);
+ mutex_init(&info->mutex);
+
+ INIT_WORK(&info->irq_work, max14577_muic_irq_work);
+
+ /* Support irq domain for max14577 MUIC device */
+ for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
+ struct max14577_muic_irq *muic_irq = &muic_irqs[i];
+ unsigned int virq = 0;
+
+ virq = regmap_irq_get_virq(max14577->irq_data, muic_irq->irq);
+ if (!virq)
+ return -EINVAL;
+ muic_irq->virq = virq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, virq, NULL,
+ max14577_muic_irq_handler,
+ IRQF_NO_SUSPEND,
+ muic_irq->name, info);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed: irq request (IRQ: %d,"
+ " error :%d)\n",
+ muic_irq->irq, ret);
+ return ret;
+ }
+ }
+
+ /* Initialize extcon device */
+ info->edev = devm_kzalloc(&pdev->dev, sizeof(*info->edev), GFP_KERNEL);
+ if (!info->edev) {
+ dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
+ return -ENOMEM;
+ }
+ info->edev->name = DEV_NAME;
+ info->edev->supported_cable = max14577_extcon_cable;
+ ret = extcon_dev_register(info->edev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register extcon device\n");
+ return ret;
+ }
+
+ /* Default h/w line path */
+ info->path_usb = CTRL1_SW_USB;
+ info->path_uart = CTRL1_SW_UART;
+ delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
+
+ /* Set initial path for UART */
+ max14577_muic_set_path(info, info->path_uart, true);
+
+ /* Check revision number of MUIC device*/
+ ret = max14577_read_reg(info->max14577->regmap,
+ MAX14577_REG_DEVICEID, &id);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to read revision number\n");
+ goto err_extcon;
+ }
+ dev_info(info->dev, "device ID : 0x%x\n", id);
+
+ /* Set ADC debounce time */
+ max14577_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
+
+ /*
+ * Detect accessory after completing the initialization of platform
+ *
+ * - Use delayed workqueue to detect cable state and then
+ * notify cable state to notifiee/platform through uevent.
+ * After completing the booting of platform, the extcon provider
+ * driver should notify cable state to upper layer.
+ */
+ INIT_DELAYED_WORK(&info->wq_detcable, max14577_muic_detect_cable_wq);
+ ret = queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
+ delay_jiffies);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to schedule delayed work for cable detect\n");
+ goto err_extcon;
+ }
+
+ return ret;
+
+err_extcon:
+ extcon_dev_unregister(info->edev);
+ return ret;
+}
+
+static int max14577_muic_remove(struct platform_device *pdev)
+{
+ struct max14577_muic_info *info = platform_get_drvdata(pdev);
+
+ cancel_work_sync(&info->irq_work);
+ extcon_dev_unregister(info->edev);
+
+ return 0;
+}
+
+static struct platform_driver max14577_muic_driver = {
+ .driver = {
+ .name = DEV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = max14577_muic_probe,
+ .remove = max14577_muic_remove,
+};
+
+module_platform_driver(max14577_muic_driver);
+
+MODULE_DESCRIPTION("MAXIM 14577 Extcon driver");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:extcon-max14577");
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 6c91976dd823..2aea4bcdd7f3 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -78,20 +78,24 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
{
- unsigned int set;
+ unsigned int set, id_src;
struct palmas_usb *palmas_usb = _palmas_usb;
palmas_read(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
PALMAS_USB_ID_INT_LATCH_SET, &set);
+ palmas_read(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
+ PALMAS_USB_ID_INT_SRC, &id_src);
- if (set & PALMAS_USB_ID_INT_SRC_ID_GND) {
+ if ((set & PALMAS_USB_ID_INT_SRC_ID_GND) &&
+ (id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) {
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
PALMAS_USB_ID_INT_LATCH_CLR,
PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND);
palmas_usb->linkstat = PALMAS_USB_STATE_ID;
extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", true);
dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
- } else if (set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) {
+ } else if ((set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) &&
+ (id_src & PALMAS_USB_ID_INT_SRC_ID_FLOAT)) {
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
PALMAS_USB_ID_INT_LATCH_CLR,
PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT);
@@ -103,6 +107,11 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", false);
dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
+ } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_DISCONNECT) &&
+ (id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) {
+ palmas_usb->linkstat = PALMAS_USB_STATE_ID;
+ extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", true);
+ dev_info(palmas_usb->dev, " USB-HOST cable is attached\n");
}
return IRQ_HANDLED;
@@ -269,7 +278,9 @@ static const struct dev_pm_ops palmas_pm_ops = {
static struct of_device_id of_palmas_match_tbl[] = {
{ .compatible = "ti,palmas-usb", },
+ { .compatible = "ti,palmas-usb-vid", },
{ .compatible = "ti,twl6035-usb", },
+ { .compatible = "ti,twl6035-usb-vid", },
{ /* end */ }
};
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 0e799516a2ab..eb6935c8ad94 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -523,11 +523,11 @@ static DEFINE_SPINLOCK(address_handler_list_lock);
static LIST_HEAD(address_handler_list);
const struct fw_address_region fw_high_memory_region =
- { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, };
+ { .start = FW_MAX_PHYSICAL_RANGE, .end = 0xffffe0000000ULL, };
EXPORT_SYMBOL(fw_high_memory_region);
static const struct fw_address_region low_memory_region =
- { .start = 0x000000000000ULL, .end = 0x000100000000ULL, };
+ { .start = 0x000000000000ULL, .end = FW_MAX_PHYSICAL_RANGE, };
#if 0
const struct fw_address_region fw_private_region =
@@ -1217,7 +1217,7 @@ static void handle_low_memory(struct fw_card *card, struct fw_request *request,
}
static struct fw_address_handler low_memory = {
- .length = 0x000100000000ULL,
+ .length = FW_MAX_PHYSICAL_RANGE,
.address_callback = handle_low_memory,
};
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 515a42c786d0..c98764aeeec6 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -237,6 +237,9 @@ static inline bool is_next_generation(int new_generation, int old_generation)
#define LOCAL_BUS 0xffc0
+/* arbitrarily chosen maximum range for physical DMA: 128 TB */
+#define FW_MAX_PHYSICAL_RANGE (128ULL << 40)
+
void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
int fw_get_response_length(struct fw_request *request);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 6aa8a86cb83b..6f74d8d3f700 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -370,6 +370,10 @@ MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
", or a combination, or all = -1)");
+static bool param_remote_dma;
+module_param_named(remote_dma, param_remote_dma, bool, 0444);
+MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
+
static void log_irqs(struct fw_ohci *ohci, u32 evt)
{
if (likely(!(param_debug &
@@ -2050,10 +2054,10 @@ static void bus_reset_work(struct work_struct *work)
be32_to_cpu(ohci->next_header));
}
-#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
- reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
- reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
-#endif
+ if (param_remote_dma) {
+ reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
+ reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
+ }
spin_unlock_irq(&ohci->lock);
@@ -2363,7 +2367,7 @@ static int ohci_enable(struct fw_card *card,
reg_write(ohci, OHCI1394_FairnessControl, 0);
card->priority_budget_implemented = ohci->pri_req_max != 0;
- reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
+ reg_write(ohci, OHCI1394_PhyUpperBound, FW_MAX_PHYSICAL_RANGE >> 16);
reg_write(ohci, OHCI1394_IntEventClear, ~0);
reg_write(ohci, OHCI1394_IntMaskClear, ~0);
@@ -2587,13 +2591,13 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
static int ohci_enable_phys_dma(struct fw_card *card,
int node_id, int generation)
{
-#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
- return 0;
-#else
struct fw_ohci *ohci = fw_ohci(card);
unsigned long flags;
int n, ret = 0;
+ if (param_remote_dma)
+ return 0;
+
/*
* FIXME: Make sure this bitmask is cleared when we clear the busReset
* interrupt bit. Clear physReqResourceAllBuses on bus reset.
@@ -2622,7 +2626,6 @@ static int ohci_enable_phys_dma(struct fw_card *card,
spin_unlock_irqrestore(&ohci->lock, flags);
return ret;
-#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
}
static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
@@ -3720,9 +3723,11 @@ static int pci_probe(struct pci_dev *dev,
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
ohci_notice(ohci,
"added OHCI v%x.%x device as card %d, "
- "%d IR + %d IT contexts, quirks 0x%x\n",
+ "%d IR + %d IT contexts, quirks 0x%x%s\n",
version >> 16, version & 0xff, ohci->card.index,
- ohci->n_ir, ohci->n_it, ohci->quirks);
+ ohci->n_ir, ohci->n_it, ohci->quirks,
+ reg_read(ohci, OHCI1394_PhyUpperBound) ?
+ ", >4 GB phys DMA" : "");
return 0;
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index b0bb056458a3..281029daf98c 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1623,7 +1623,6 @@ static struct scsi_host_template scsi_driver_template = {
.cmd_per_lun = 1,
.can_queue = 1,
.sdev_attrs = sbp2_scsi_sysfs_attrs,
- .no_write_same = 1,
};
MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 074787281c94..41983883cef4 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -108,9 +108,12 @@ config DMI_SYSFS
under /sys/firmware/dmi when this option is enabled and
loaded.
+config DMI_SCAN_MACHINE_NON_EFI_FALLBACK
+ bool
+
config ISCSI_IBFT_FIND
bool "iSCSI Boot Firmware Table Attributes"
- depends on X86
+ depends on X86 && ACPI
default n
help
This option enables the kernel to find the region of memory
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 299fad6b5867..5373dc5b6011 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-$(CONFIG_EFI) += efi/
+obj-$(CONFIG_UEFI_CPER) += efi/
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index eb26d62e5188..e0f1cb3d3598 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -553,7 +553,7 @@ static const struct bin_attribute dmi_entry_raw_attr = {
static void dmi_sysfs_entry_release(struct kobject *kobj)
{
struct dmi_sysfs_entry *entry = to_entry(kobj);
- sysfs_remove_bin_file(&entry->kobj, &dmi_entry_raw_attr);
+
spin_lock(&entry_list_lock);
list_del(&entry->list);
spin_unlock(&entry_list_lock);
@@ -685,6 +685,7 @@ static void __exit dmi_sysfs_exit(void)
pr_debug("dmi-sysfs: unloading.\n");
cleanup_entry_list();
kset_unregister(dmi_kset);
+ kobject_del(dmi_kobj);
kobject_put(dmi_kobj);
}
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index c7e81ff8f3ef..17afc51f3054 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -116,7 +116,7 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
{
u8 *buf;
- buf = dmi_ioremap(dmi_base, dmi_len);
+ buf = dmi_early_remap(dmi_base, dmi_len);
if (buf == NULL)
return -1;
@@ -124,7 +124,7 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
add_device_randomness(buf, dmi_len);
- dmi_iounmap(buf, dmi_len);
+ dmi_early_unmap(buf, dmi_len);
return 0;
}
@@ -527,18 +527,18 @@ void __init dmi_scan_machine(void)
* needed during early boot. This also means we can
* iounmap the space when we're done with it.
*/
- p = dmi_ioremap(efi.smbios, 32);
+ p = dmi_early_remap(efi.smbios, 32);
if (p == NULL)
goto error;
memcpy_fromio(buf, p, 32);
- dmi_iounmap(p, 32);
+ dmi_early_unmap(p, 32);
if (!dmi_present(buf)) {
dmi_available = 1;
goto out;
}
- } else {
- p = dmi_ioremap(0xF0000, 0x10000);
+ } else if (IS_ENABLED(CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK)) {
+ p = dmi_early_remap(0xF0000, 0x10000);
if (p == NULL)
goto error;
@@ -554,12 +554,12 @@ void __init dmi_scan_machine(void)
memcpy_fromio(buf + 16, q, 16);
if (!dmi_present(buf)) {
dmi_available = 1;
- dmi_iounmap(p, 0x10000);
+ dmi_early_unmap(p, 0x10000);
goto out;
}
memcpy(buf, buf + 16, 16);
}
- dmi_iounmap(p, 0x10000);
+ dmi_early_unmap(p, 0x10000);
}
error:
pr_info("DMI not present or invalid.\n");
@@ -831,13 +831,13 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
if (!dmi_available)
return -1;
- buf = ioremap(dmi_base, dmi_len);
+ buf = dmi_remap(dmi_base, dmi_len);
if (buf == NULL)
return -1;
dmi_table(buf, dmi_len, dmi_num, decode, private_data);
- iounmap(buf);
+ dmi_unmap(buf);
return 0;
}
EXPORT_SYMBOL_GPL(dmi_walk);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 3150aa4874e8..1e75f48b61f8 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -36,7 +36,18 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE
backend for pstore by default. This setting can be overridden
using the efivars module's pstore_disable parameter.
-config UEFI_CPER
- def_bool n
+config EFI_RUNTIME_MAP
+ bool "Export efi runtime maps to sysfs"
+ depends on X86 && EFI && KEXEC
+ default y
+ help
+ Export efi runtime memory maps to /sys/firmware/efi/runtime-map.
+ That memory map is used for example by kexec to set up efi virtual
+ mapping the 2nd kernel, but can also be used for debugging purposes.
+
+ See also Documentation/ABI/testing/sysfs-firmware-efi-runtime-map.
endmenu
+
+config UEFI_CPER
+ bool
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 9ba156d3c775..9553496b0f43 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -1,7 +1,8 @@
#
# Makefile for linux kernel
#
-obj-y += efi.o vars.o
+obj-$(CONFIG_EFI) += efi.o vars.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
obj-$(CONFIG_UEFI_CPER) += cper.o
+obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 743fd426f21b..4b9dc836dcf9 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -356,6 +356,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
static struct pstore_info efi_pstore_info = {
.owner = THIS_MODULE,
.name = "efi",
+ .flags = PSTORE_FLAGS_FRAGILE,
.open = efi_pstore_open,
.close = efi_pstore_close,
.read = efi_pstore_read,
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 2e2fbdec0845..4753bac65279 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -32,6 +32,9 @@ struct efi __read_mostly efi = {
.hcdp = EFI_INVALID_TABLE_ADDR,
.uga = EFI_INVALID_TABLE_ADDR,
.uv_systab = EFI_INVALID_TABLE_ADDR,
+ .fw_vendor = EFI_INVALID_TABLE_ADDR,
+ .runtime = EFI_INVALID_TABLE_ADDR,
+ .config_table = EFI_INVALID_TABLE_ADDR,
};
EXPORT_SYMBOL(efi);
@@ -71,13 +74,49 @@ static ssize_t systab_show(struct kobject *kobj,
static struct kobj_attribute efi_attr_systab =
__ATTR(systab, 0400, systab_show, NULL);
+#define EFI_FIELD(var) efi.var
+
+#define EFI_ATTR_SHOW(name) \
+static ssize_t name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "0x%lx\n", EFI_FIELD(name)); \
+}
+
+EFI_ATTR_SHOW(fw_vendor);
+EFI_ATTR_SHOW(runtime);
+EFI_ATTR_SHOW(config_table);
+
+static struct kobj_attribute efi_attr_fw_vendor = __ATTR_RO(fw_vendor);
+static struct kobj_attribute efi_attr_runtime = __ATTR_RO(runtime);
+static struct kobj_attribute efi_attr_config_table = __ATTR_RO(config_table);
+
static struct attribute *efi_subsys_attrs[] = {
&efi_attr_systab.attr,
- NULL, /* maybe more in the future? */
+ &efi_attr_fw_vendor.attr,
+ &efi_attr_runtime.attr,
+ &efi_attr_config_table.attr,
+ NULL,
};
+static umode_t efi_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ umode_t mode = attr->mode;
+
+ if (attr == &efi_attr_fw_vendor.attr)
+ return (efi.fw_vendor == EFI_INVALID_TABLE_ADDR) ? 0 : mode;
+ else if (attr == &efi_attr_runtime.attr)
+ return (efi.runtime == EFI_INVALID_TABLE_ADDR) ? 0 : mode;
+ else if (attr == &efi_attr_config_table.attr)
+ return (efi.config_table == EFI_INVALID_TABLE_ADDR) ? 0 : mode;
+
+ return mode;
+}
+
static struct attribute_group efi_subsys_attr_group = {
.attrs = efi_subsys_attrs,
+ .is_visible = efi_attr_is_visible,
};
static struct efivars generic_efivars;
@@ -128,6 +167,10 @@ static int __init efisubsys_init(void)
goto err_unregister;
}
+ error = efi_runtime_map_init(efi_kobj);
+ if (error)
+ goto err_remove_group;
+
/* and the standard mountpoint for efivarfs */
efivars_kobj = kobject_create_and_add("efivars", efi_kobj);
if (!efivars_kobj) {
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
new file mode 100644
index 000000000000..97cdd16a2169
--- /dev/null
+++ b/drivers/firmware/efi/runtime-map.c
@@ -0,0 +1,181 @@
+/*
+ * linux/drivers/efi/runtime-map.c
+ * Copyright (C) 2013 Red Hat, Inc., Dave Young <dyoung@redhat.com>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/efi.h>
+#include <linux/slab.h>
+
+#include <asm/setup.h>
+
+static void *efi_runtime_map;
+static int nr_efi_runtime_map;
+static u32 efi_memdesc_size;
+
+struct efi_runtime_map_entry {
+ efi_memory_desc_t md;
+ struct kobject kobj; /* kobject for each entry */
+};
+
+static struct efi_runtime_map_entry **map_entries;
+
+struct map_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct efi_runtime_map_entry *entry, char *buf);
+};
+
+static inline struct map_attribute *to_map_attr(struct attribute *attr)
+{
+ return container_of(attr, struct map_attribute, attr);
+}
+
+static ssize_t type_show(struct efi_runtime_map_entry *entry, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", entry->md.type);
+}
+
+#define EFI_RUNTIME_FIELD(var) entry->md.var
+
+#define EFI_RUNTIME_U64_ATTR_SHOW(name) \
+static ssize_t name##_show(struct efi_runtime_map_entry *entry, char *buf) \
+{ \
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n", EFI_RUNTIME_FIELD(name)); \
+}
+
+EFI_RUNTIME_U64_ATTR_SHOW(phys_addr);
+EFI_RUNTIME_U64_ATTR_SHOW(virt_addr);
+EFI_RUNTIME_U64_ATTR_SHOW(num_pages);
+EFI_RUNTIME_U64_ATTR_SHOW(attribute);
+
+static inline struct efi_runtime_map_entry *to_map_entry(struct kobject *kobj)
+{
+ return container_of(kobj, struct efi_runtime_map_entry, kobj);
+}
+
+static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct efi_runtime_map_entry *entry = to_map_entry(kobj);
+ struct map_attribute *map_attr = to_map_attr(attr);
+
+ return map_attr->show(entry, buf);
+}
+
+static struct map_attribute map_type_attr = __ATTR_RO(type);
+static struct map_attribute map_phys_addr_attr = __ATTR_RO(phys_addr);
+static struct map_attribute map_virt_addr_attr = __ATTR_RO(virt_addr);
+static struct map_attribute map_num_pages_attr = __ATTR_RO(num_pages);
+static struct map_attribute map_attribute_attr = __ATTR_RO(attribute);
+
+/*
+ * These are default attributes that are added for every memmap entry.
+ */
+static struct attribute *def_attrs[] = {
+ &map_type_attr.attr,
+ &map_phys_addr_attr.attr,
+ &map_virt_addr_attr.attr,
+ &map_num_pages_attr.attr,
+ &map_attribute_attr.attr,
+ NULL
+};
+
+static const struct sysfs_ops map_attr_ops = {
+ .show = map_attr_show,
+};
+
+static void map_release(struct kobject *kobj)
+{
+ struct efi_runtime_map_entry *entry;
+
+ entry = to_map_entry(kobj);
+ kfree(entry);
+}
+
+static struct kobj_type __refdata map_ktype = {
+ .sysfs_ops = &map_attr_ops,
+ .default_attrs = def_attrs,
+ .release = map_release,
+};
+
+static struct kset *map_kset;
+
+static struct efi_runtime_map_entry *
+add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
+{
+ int ret;
+ struct efi_runtime_map_entry *entry;
+
+ if (!map_kset) {
+ map_kset = kset_create_and_add("runtime-map", NULL, kobj);
+ if (!map_kset)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ kset_unregister(map_kset);
+ return entry;
+ }
+
+ memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size,
+ sizeof(efi_memory_desc_t));
+
+ kobject_init(&entry->kobj, &map_ktype);
+ entry->kobj.kset = map_kset;
+ ret = kobject_add(&entry->kobj, NULL, "%d", nr);
+ if (ret) {
+ kobject_put(&entry->kobj);
+ kset_unregister(map_kset);
+ return ERR_PTR(ret);
+ }
+
+ return entry;
+}
+
+void efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size)
+{
+ efi_runtime_map = map;
+ nr_efi_runtime_map = nr_entries;
+ efi_memdesc_size = desc_size;
+}
+
+int __init efi_runtime_map_init(struct kobject *efi_kobj)
+{
+ int i, j, ret = 0;
+ struct efi_runtime_map_entry *entry;
+
+ if (!efi_runtime_map)
+ return 0;
+
+ map_entries = kzalloc(nr_efi_runtime_map * sizeof(entry), GFP_KERNEL);
+ if (!map_entries) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < nr_efi_runtime_map; i++) {
+ entry = add_sysfs_runtime_map_entry(efi_kobj, i);
+ if (IS_ERR(entry)) {
+ ret = PTR_ERR(entry);
+ goto out_add_entry;
+ }
+ *(map_entries + i) = entry;
+ }
+
+ return 0;
+out_add_entry:
+ for (j = i - 1; j > 0; j--) {
+ entry = *(map_entries + j);
+ kobject_put(&entry->kobj);
+ }
+ if (map_kset)
+ kset_unregister(map_kset);
+out:
+ return ret;
+}
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index 2f21b0bfe653..29c8cdda82a1 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -12,8 +12,7 @@ menu "Google Firmware Drivers"
config GOOGLE_SMI
tristate "SMI interface for Google platforms"
- depends on ACPI && DMI
- select EFI
+ depends on ACPI && DMI && EFI
select EFI_VARS
help
Say Y here if you want to enable SMI callbacks for Google
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index e2e04b007e15..17cf96c45f2b 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -324,7 +324,7 @@ int __init firmware_map_add_early(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
- entry = alloc_bootmem(sizeof(struct firmware_map_entry));
+ entry = memblock_virt_alloc(sizeof(struct firmware_map_entry), 0);
if (WARN_ON(!entry))
return -ENOMEM;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 0f0444475bf0..903f24d28ba0 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -110,11 +110,18 @@ comment "Memory mapped GPIO drivers:"
config GPIO_CLPS711X
tristate "CLPS711X GPIO support"
- depends on ARCH_CLPS711X
+ depends on ARCH_CLPS711X || COMPILE_TEST
select GPIO_GENERIC
help
Say yes here to support GPIO on CLPS711X SoCs.
+config GPIO_DAVINCI
+ bool "TI Davinci/Keystone GPIO support"
+ default y if ARCH_DAVINCI
+ depends on ARM && (ARCH_DAVINCI || ARCH_KEYSTONE)
+ help
+ Say yes here to enable GPIO support for TI Davinci/Keystone SoCs.
+
config GPIO_GENERIC_PLATFORM
tristate "Generic memory-mapped GPIO controller support (MMIO platform device)"
select GPIO_GENERIC
@@ -156,6 +163,13 @@ config GPIO_F7188X
To compile this driver as a module, choose M here: the module will
be called f7188x-gpio.
+config GPIO_MOXART
+ bool "MOXART GPIO support"
+ depends on ARCH_MOXART
+ help
+ Select this option to enable GPIO driver for
+ MOXA ART SoC devices.
+
config GPIO_MPC5200
def_bool y
depends on PPC_MPC52xx
@@ -237,6 +251,15 @@ config GPIO_SAMSUNG
Legacy GPIO support. Use only for platforms without support for
pinctrl.
+config GPIO_SCH311X
+ tristate "SMSC SCH311x SuperI/O GPIO"
+ help
+ Driver to enable the GPIOs found on SMSC SMSC SCH3112, SCH3114 and
+ SCH3116 "Super I/O" chipsets.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gpio-sch311x.
+
config GPIO_SPEAR_SPICS
bool "ST SPEAr13xx SPI Chip Select as GPIO support"
depends on PLAT_SPEAR
@@ -281,6 +304,15 @@ config GPIO_XILINX
help
Say yes here to support the Xilinx FPGA GPIO device
+config GPIO_XTENSA
+ bool "Xtensa GPIO32 support"
+ depends on XTENSA
+ depends on HAVE_XTENSA_GPIO32
+ depends on !SMP
+ help
+ Say yes here to support the Xtensa internal GPIO32 IMPWIRE (input)
+ and EXPSTATE (output) ports
+
config GPIO_VR41XX
tristate "NEC VR4100 series General-purpose I/O Uint support"
depends on CPU_VR41XX
@@ -353,7 +385,7 @@ config GPIO_GE_FPGA
board computers.
config GPIO_LYNXPOINT
- bool "Intel Lynxpoint GPIO support"
+ tristate "Intel Lynxpoint GPIO support"
depends on ACPI && X86
select IRQ_DOMAIN
help
@@ -371,6 +403,7 @@ config GPIO_GRGPIO
config GPIO_TB10X
bool
+ select GENERIC_IRQ_CHIP
select OF_GPIO
comment "I2C GPIO expanders:"
@@ -381,6 +414,14 @@ config GPIO_ARIZONA
help
Support for GPIOs on Wolfson Arizona class devices.
+config GPIO_LP3943
+ tristate "TI/National Semiconductor LP3943 GPIO expander"
+ depends on MFD_LP3943
+ help
+ GPIO driver for LP3943 MFD.
+ LP3943 can be used as a GPIO expander which provides up to 16 GPIOs.
+ Open drain outputs are required for this usage.
+
config GPIO_MAX7300
tristate "Maxim MAX7300 GPIO expander"
depends on I2C
@@ -692,11 +733,13 @@ config GPIO_MAX7301
config GPIO_MCP23S08
tristate "Microchip MCP23xxx I/O expander"
+ depends on OF_GPIO
depends on (SPI_MASTER && !I2C) || I2C
help
SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
I/O expanders.
This provides a GPIO interface supporting inputs and outputs.
+ The I2C versions of the chips can be used as interrupt-controller.
config GPIO_MC33880
tristate "Freescale MC33880 high-side/low-side switch"
@@ -707,10 +750,10 @@ config GPIO_MC33880
config GPIO_74X164
tristate "74x164 serial-in/parallel-out 8-bits shift register"
- depends on SPI_MASTER
+ depends on SPI_MASTER && OF
help
- Platform driver for 74x164 compatible serial-in/parallel-out
- 8-outputs shift registers. This driver can be used to provide access
+ Driver for 74x164 compatible serial-in/parallel-out 8-outputs
+ shift registers. This driver can be used to provide access
to more gpio outputs.
comment "AC97 GPIO expanders:"
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 7971e36b8b12..5d50179ece16 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_GPIO_CLPS711X) += gpio-clps711x.o
obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o
obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o
obj-$(CONFIG_GPIO_DA9055) += gpio-da9055.o
-obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
+obj-$(CONFIG_GPIO_DAVINCI) += gpio-davinci.o
obj-$(CONFIG_GPIO_EM) += gpio-em.o
obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
obj-$(CONFIG_GPIO_F7188X) += gpio-f7188x.o
@@ -35,6 +35,7 @@ obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o
obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
obj-$(CONFIG_GPIO_INTEL_MID) += gpio-intel-mid.o
+obj-$(CONFIG_GPIO_LP3943) += gpio-lp3943.o
obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
@@ -46,6 +47,7 @@ obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o
obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o
+obj-$(CONFIG_GPIO_MOXART) += gpio-moxart.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
@@ -67,6 +69,7 @@ obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o
obj-$(CONFIG_GPIO_SAMSUNG) += gpio-samsung.o
obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
+obj-$(CONFIG_GPIO_SCH311X) += gpio-sch311x.o
obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o
obj-$(CONFIG_GPIO_STA2X11) += gpio-sta2x11.o
@@ -95,3 +98,4 @@ obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x.o
obj-$(CONFIG_GPIO_WM8350) += gpio-wm8350.o
obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o
obj-$(CONFIG_GPIO_XILINX) += gpio-xilinx.o
+obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 1e04bf91328d..e4ae29824c32 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -12,7 +12,6 @@
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/spi/spi.h>
-#include <linux/spi/74x164.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
@@ -21,7 +20,6 @@
#define GEN_74X164_NUMBER_GPIOS 8
struct gen_74x164_chip {
- struct spi_device *spi;
u8 *buffer;
struct gpio_chip gpio_chip;
struct mutex lock;
@@ -35,6 +33,7 @@ static struct gen_74x164_chip *gpio_to_74x164_chip(struct gpio_chip *gc)
static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
{
+ struct spi_device *spi = to_spi_device(chip->gpio_chip.dev);
struct spi_message message;
struct spi_transfer *msg_buf;
int i, ret = 0;
@@ -55,12 +54,12 @@ static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
* byte of the buffer will end up in the last register.
*/
for (i = chip->registers - 1; i >= 0; i--) {
- msg_buf[i].tx_buf = chip->buffer +i;
+ msg_buf[i].tx_buf = chip->buffer + i;
msg_buf[i].len = sizeof(u8);
spi_message_add_tail(msg_buf + i, &message);
}
- ret = spi_sync(chip->spi, &message);
+ ret = spi_sync(spi, &message);
kfree(msg_buf);
@@ -108,14 +107,8 @@ static int gen_74x164_direction_output(struct gpio_chip *gc,
static int gen_74x164_probe(struct spi_device *spi)
{
struct gen_74x164_chip *chip;
- struct gen_74x164_chip_platform_data *pdata;
int ret;
- if (!spi->dev.of_node) {
- dev_err(&spi->dev, "No device tree data available.\n");
- return -EINVAL;
- }
-
/*
* bits_per_word cannot be configured in platform data
*/
@@ -129,40 +122,32 @@ static int gen_74x164_probe(struct spi_device *spi)
if (!chip)
return -ENOMEM;
- pdata = dev_get_platdata(&spi->dev);
- if (pdata && pdata->base)
- chip->gpio_chip.base = pdata->base;
- else
- chip->gpio_chip.base = -1;
-
- mutex_init(&chip->lock);
-
spi_set_drvdata(spi, chip);
- chip->spi = spi;
-
chip->gpio_chip.label = spi->modalias;
chip->gpio_chip.direction_output = gen_74x164_direction_output;
chip->gpio_chip.get = gen_74x164_get_value;
chip->gpio_chip.set = gen_74x164_set_value;
+ chip->gpio_chip.base = -1;
- if (of_property_read_u32(spi->dev.of_node, "registers-number", &chip->registers)) {
- dev_err(&spi->dev, "Missing registers-number property in the DT.\n");
- ret = -EINVAL;
- goto exit_destroy;
+ if (of_property_read_u32(spi->dev.of_node, "registers-number",
+ &chip->registers)) {
+ dev_err(&spi->dev,
+ "Missing registers-number property in the DT.\n");
+ return -EINVAL;
}
chip->gpio_chip.ngpio = GEN_74X164_NUMBER_GPIOS * chip->registers;
chip->buffer = devm_kzalloc(&spi->dev, chip->registers, GFP_KERNEL);
- if (!chip->buffer) {
- ret = -ENOMEM;
- goto exit_destroy;
- }
+ if (!chip->buffer)
+ return -ENOMEM;
- chip->gpio_chip.can_sleep = 1;
+ chip->gpio_chip.can_sleep = true;
chip->gpio_chip.dev = &spi->dev;
chip->gpio_chip.owner = THIS_MODULE;
+ mutex_init(&chip->lock);
+
ret = __gen_74x164_write_config(chip);
if (ret) {
dev_err(&spi->dev, "Failed writing: %d\n", ret);
@@ -170,31 +155,23 @@ static int gen_74x164_probe(struct spi_device *spi)
}
ret = gpiochip_add(&chip->gpio_chip);
- if (ret)
- goto exit_destroy;
-
- return ret;
+ if (!ret)
+ return 0;
exit_destroy:
mutex_destroy(&chip->lock);
+
return ret;
}
static int gen_74x164_remove(struct spi_device *spi)
{
- struct gen_74x164_chip *chip;
+ struct gen_74x164_chip *chip = spi_get_drvdata(spi);
int ret;
- chip = spi_get_drvdata(spi);
- if (chip == NULL)
- return -ENODEV;
-
ret = gpiochip_remove(&chip->gpio_chip);
if (!ret)
mutex_destroy(&chip->lock);
- else
- dev_err(&spi->dev, "Failed to remove the GPIO controller: %d\n",
- ret);
return ret;
}
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index b204033acaeb..6fc6206b38bd 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -260,7 +260,7 @@ static int adnp_gpio_setup(struct adnp *adnp, unsigned int num_gpios)
chip->direction_output = adnp_gpio_direction_output;
chip->get = adnp_gpio_get;
chip->set = adnp_gpio_set;
- chip->can_sleep = 1;
+ chip->can_sleep = true;
if (IS_ENABLED(CONFIG_DEBUG_FS))
chip->dbg_show = adnp_gpio_dbg_show;
@@ -408,6 +408,27 @@ static void adnp_irq_bus_unlock(struct irq_data *data)
mutex_unlock(&adnp->irq_lock);
}
+static unsigned int adnp_irq_startup(struct irq_data *data)
+{
+ struct adnp *adnp = irq_data_get_irq_chip_data(data);
+
+ if (gpio_lock_as_irq(&adnp->gpio, data->hwirq))
+ dev_err(adnp->gpio.dev,
+ "unable to lock HW IRQ %lu for IRQ\n",
+ data->hwirq);
+ /* Satisfy the .enable semantics by unmasking the line */
+ adnp_irq_unmask(data);
+ return 0;
+}
+
+static void adnp_irq_shutdown(struct irq_data *data)
+{
+ struct adnp *adnp = irq_data_get_irq_chip_data(data);
+
+ adnp_irq_mask(data);
+ gpio_unlock_as_irq(&adnp->gpio, data->hwirq);
+}
+
static struct irq_chip adnp_irq_chip = {
.name = "gpio-adnp",
.irq_mask = adnp_irq_mask,
@@ -415,6 +436,8 @@ static struct irq_chip adnp_irq_chip = {
.irq_set_type = adnp_irq_set_type,
.irq_bus_lock = adnp_irq_bus_lock,
.irq_bus_sync_unlock = adnp_irq_bus_unlock,
+ .irq_startup = adnp_irq_startup,
+ .irq_shutdown = adnp_irq_shutdown,
};
static int adnp_irq_map(struct irq_domain *domain, unsigned int irq,
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index 084337d5514d..613265944e2e 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -127,7 +127,7 @@ static int adp5520_gpio_probe(struct platform_device *pdev)
gc->direction_output = adp5520_gpio_direction_output;
gc->get = adp5520_gpio_get_value;
gc->set = adp5520_gpio_set_value;
- gc->can_sleep = 1;
+ gc->can_sleep = true;
gc->base = pdata->gpio_start;
gc->ngpio = gpios;
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index 90fc4c99c024..3f190e68f973 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -380,7 +380,7 @@ static int adp5588_gpio_probe(struct i2c_client *client,
gc->direction_output = adp5588_gpio_direction_output;
gc->get = adp5588_gpio_get_value;
gc->set = adp5588_gpio_set_value;
- gc->can_sleep = 1;
+ gc->can_sleep = true;
gc->base = pdata->gpio_start;
gc->ngpio = ADP5588_MAXGPIO;
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index 710fafcdd1b1..94e9992f8904 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -60,7 +60,7 @@
* register a pci_driver, because someone else might one day
* want to register another driver on the same PCI id.
*/
-static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
+static const struct pci_device_id pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS), 0 },
{ 0, }, /* terminate list */
};
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index dceb5dcf9d16..29bdff558981 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -91,7 +91,7 @@ static struct gpio_chip template_chip = {
.get = arizona_gpio_get,
.direction_output = arizona_gpio_direction_out,
.set = arizona_gpio_set,
- .can_sleep = 1,
+ .can_sleep = true,
};
static int arizona_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 54c18c220a60..f32357e2d78d 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2013 Broadcom Corporation
+ * Copyright (C) 2012-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -449,12 +449,34 @@ static void bcm_kona_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static unsigned int bcm_kona_gpio_irq_startup(struct irq_data *d)
+{
+ struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
+
+ if (gpio_lock_as_irq(&kona_gpio->gpio_chip, d->hwirq))
+ dev_err(kona_gpio->gpio_chip.dev,
+ "unable to lock HW IRQ %lu for IRQ\n",
+ d->hwirq);
+ bcm_kona_gpio_irq_unmask(d);
+ return 0;
+}
+
+static void bcm_kona_gpio_irq_shutdown(struct irq_data *d)
+{
+ struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
+
+ bcm_kona_gpio_irq_mask(d);
+ gpio_unlock_as_irq(&kona_gpio->gpio_chip, d->hwirq);
+}
+
static struct irq_chip bcm_gpio_irq_chip = {
.name = "bcm-kona-gpio",
.irq_ack = bcm_kona_gpio_irq_ack,
.irq_mask = bcm_kona_gpio_irq_mask,
.irq_unmask = bcm_kona_gpio_irq_unmask,
.irq_set_type = bcm_kona_gpio_irq_set_type,
+ .irq_startup = bcm_kona_gpio_irq_startup,
+ .irq_shutdown = bcm_kona_gpio_irq_shutdown,
};
static struct __initconst of_device_id bcm_kona_gpio_of_match[] = {
@@ -635,6 +657,6 @@ static struct platform_driver bcm_kona_gpio_driver = {
module_platform_driver(bcm_kona_gpio_driver);
-MODULE_AUTHOR("Broadcom");
+MODULE_AUTHOR("Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>");
MODULE_DESCRIPTION("Broadcom Kona GPIO Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index 9dfe36fd8baf..ecb3ca2d1d10 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -169,7 +169,7 @@ static void bt8xxgpio_gpio_setup(struct bt8xxgpio *bg)
c->dbg_show = NULL;
c->base = modparam_gpiobase;
c->ngpio = BT8XXGPIO_NR_GPIOS;
- c->can_sleep = 0;
+ c->can_sleep = false;
}
static int bt8xxgpio_probe(struct pci_dev *dev,
@@ -308,7 +308,7 @@ static int bt8xxgpio_resume(struct pci_dev *pdev)
#define bt8xxgpio_resume NULL
#endif /* CONFIG_PM */
-static DEFINE_PCI_DEVICE_TABLE(bt8xxgpio_pci_tbl) = {
+static const struct pci_device_id bt8xxgpio_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT848) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT849) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT878) },
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index 0924f20fa47f..3c2ba2ad0ada 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -77,7 +77,7 @@ static int clps711x_gpio_remove(struct platform_device *pdev)
return bgpio_remove(bgc);
}
-static const struct of_device_id clps711x_gpio_ids[] = {
+static const struct of_device_id __maybe_unused clps711x_gpio_ids[] = {
{ .compatible = "cirrus,clps711x-gpio" },
{ }
};
@@ -87,7 +87,7 @@ static struct platform_driver clps711x_gpio_driver = {
.driver = {
.name = "clps711x-gpio",
.owner = THIS_MODULE,
- .of_match_table = clps711x_gpio_ids,
+ .of_match_table = of_match_ptr(clps711x_gpio_ids),
},
.probe = clps711x_gpio_probe,
.remove = clps711x_gpio_remove,
@@ -97,3 +97,4 @@ module_platform_driver(clps711x_gpio_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
MODULE_DESCRIPTION("CLPS711X GPIO driver");
+MODULE_ALIAS("platform:clps711x-gpio");
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 9b77dc05d4ad..416cdf786b05 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -200,7 +200,7 @@ static struct gpio_chip reference_gp = {
.direction_input = da9052_gpio_direction_input,
.direction_output = da9052_gpio_direction_output,
.to_irq = da9052_gpio_to_irq,
- .can_sleep = 1,
+ .can_sleep = true,
.ngpio = 16,
.base = -1,
};
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index 7ef0820032bd..f992997bc301 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -134,7 +134,7 @@ static struct gpio_chip reference_gp = {
.direction_input = da9055_gpio_direction_input,
.direction_output = da9055_gpio_direction_output,
.to_irq = da9055_gpio_to_irq,
- .can_sleep = 1,
+ .can_sleep = true,
.ngpio = 3,
.base = -1,
};
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 84be70157ad6..7629b4f12b7f 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -16,8 +16,13 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/platform_data/gpio-davinci.h>
+#include <linux/irqchip/chained_irq.h>
struct davinci_gpio_regs {
u32 dir;
@@ -82,14 +87,14 @@ static inline int __davinci_direction(struct gpio_chip *chip,
u32 mask = 1 << offset;
spin_lock_irqsave(&d->lock, flags);
- temp = __raw_readl(&g->dir);
+ temp = readl_relaxed(&g->dir);
if (out) {
temp &= ~mask;
- __raw_writel(mask, value ? &g->set_data : &g->clr_data);
+ writel_relaxed(mask, value ? &g->set_data : &g->clr_data);
} else {
temp |= mask;
}
- __raw_writel(temp, &g->dir);
+ writel_relaxed(temp, &g->dir);
spin_unlock_irqrestore(&d->lock, flags);
return 0;
@@ -118,7 +123,7 @@ static int davinci_gpio_get(struct gpio_chip *chip, unsigned offset)
struct davinci_gpio_controller *d = chip2controller(chip);
struct davinci_gpio_regs __iomem *g = d->regs;
- return (1 << offset) & __raw_readl(&g->in_data);
+ return (1 << offset) & readl_relaxed(&g->in_data);
}
/*
@@ -130,7 +135,41 @@ davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
struct davinci_gpio_controller *d = chip2controller(chip);
struct davinci_gpio_regs __iomem *g = d->regs;
- __raw_writel((1 << offset), value ? &g->set_data : &g->clr_data);
+ writel_relaxed((1 << offset), value ? &g->set_data : &g->clr_data);
+}
+
+static struct davinci_gpio_platform_data *
+davinci_gpio_get_pdata(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct davinci_gpio_platform_data *pdata;
+ int ret;
+ u32 val;
+
+ if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
+ return pdev->dev.platform_data;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ ret = of_property_read_u32(dn, "ti,ngpio", &val);
+ if (ret)
+ goto of_err;
+
+ pdata->ngpio = val;
+
+ ret = of_property_read_u32(dn, "ti,davinci-gpio-unbanked", &val);
+ if (ret)
+ goto of_err;
+
+ pdata->gpio_unbanked = val;
+
+ return pdata;
+
+of_err:
+ dev_err(&pdev->dev, "Populating pdata from DT failed: err %d\n", ret);
+ return NULL;
}
static int davinci_gpio_probe(struct platform_device *pdev)
@@ -143,12 +182,14 @@ static int davinci_gpio_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
- pdata = dev->platform_data;
+ pdata = davinci_gpio_get_pdata(pdev);
if (!pdata) {
dev_err(dev, "No platform data found\n");
return -EINVAL;
}
+ dev->platform_data = pdata;
+
/*
* The gpio banks conceptually expose a segmented bitmap,
* and "ngpio" is one more than the largest zero-based
@@ -160,8 +201,8 @@ static int davinci_gpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (WARN_ON(DAVINCI_N_GPIO < ngpio))
- ngpio = DAVINCI_N_GPIO;
+ if (WARN_ON(ARCH_NR_GPIOS < ngpio))
+ ngpio = ARCH_NR_GPIOS;
chips = devm_kzalloc(dev,
ngpio * sizeof(struct davinci_gpio_controller),
@@ -194,6 +235,9 @@ static int davinci_gpio_probe(struct platform_device *pdev)
if (chips[i].chip.ngpio > 32)
chips[i].chip.ngpio = 32;
+#ifdef CONFIG_OF_GPIO
+ chips[i].chip.of_node = dev->of_node;
+#endif
spin_lock_init(&chips[i].lock);
regs = gpio2regs(base);
@@ -227,8 +271,8 @@ static void gpio_irq_disable(struct irq_data *d)
struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
u32 mask = (u32) irq_data_get_irq_handler_data(d);
- __raw_writel(mask, &g->clr_falling);
- __raw_writel(mask, &g->clr_rising);
+ writel_relaxed(mask, &g->clr_falling);
+ writel_relaxed(mask, &g->clr_rising);
}
static void gpio_irq_enable(struct irq_data *d)
@@ -242,9 +286,9 @@ static void gpio_irq_enable(struct irq_data *d)
status = IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
if (status & IRQ_TYPE_EDGE_FALLING)
- __raw_writel(mask, &g->set_falling);
+ writel_relaxed(mask, &g->set_falling);
if (status & IRQ_TYPE_EDGE_RISING)
- __raw_writel(mask, &g->set_rising);
+ writel_relaxed(mask, &g->set_rising);
}
static int gpio_irq_type(struct irq_data *d, unsigned trigger)
@@ -278,34 +322,28 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc)
mask <<= 16;
/* temporarily mask (level sensitive) parent IRQ */
- desc->irq_data.chip->irq_mask(&desc->irq_data);
- desc->irq_data.chip->irq_ack(&desc->irq_data);
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
while (1) {
u32 status;
- int n;
- int res;
+ int bit;
/* ack any irqs */
- status = __raw_readl(&g->intstat) & mask;
+ status = readl_relaxed(&g->intstat) & mask;
if (!status)
break;
- __raw_writel(status, &g->intstat);
+ writel_relaxed(status, &g->intstat);
/* now demux them to the right lowlevel handler */
- n = d->irq_base;
- if (irq & 1) {
- n += 16;
- status >>= 16;
- }
while (status) {
- res = ffs(status);
- n += res;
- generic_handle_irq(n - 1);
- status >>= res;
+ bit = __ffs(status);
+ status &= ~BIT(bit);
+ generic_handle_irq(
+ irq_find_mapping(d->irq_domain,
+ d->chip.base + bit));
}
}
- desc->irq_data.chip->irq_unmask(&desc->irq_data);
+ chained_irq_exit(irq_desc_get_chip(desc), desc);
/* now it may re-trigger */
}
@@ -313,10 +351,10 @@ static int gpio_to_irq_banked(struct gpio_chip *chip, unsigned offset)
{
struct davinci_gpio_controller *d = chip2controller(chip);
- if (d->irq_base >= 0)
- return d->irq_base + offset;
+ if (d->irq_domain)
+ return irq_create_mapping(d->irq_domain, d->chip.base + offset);
else
- return -ENODEV;
+ return -ENXIO;
}
static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset)
@@ -346,14 +384,35 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger)
if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
return -EINVAL;
- __raw_writel(mask, (trigger & IRQ_TYPE_EDGE_FALLING)
+ writel_relaxed(mask, (trigger & IRQ_TYPE_EDGE_FALLING)
? &g->set_falling : &g->clr_falling);
- __raw_writel(mask, (trigger & IRQ_TYPE_EDGE_RISING)
+ writel_relaxed(mask, (trigger & IRQ_TYPE_EDGE_RISING)
? &g->set_rising : &g->clr_rising);
return 0;
}
+static int
+davinci_gpio_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct davinci_gpio_regs __iomem *g = gpio2regs(hw);
+
+ irq_set_chip_and_handler_name(irq, &gpio_irqchip, handle_simple_irq,
+ "davinci_gpio");
+ irq_set_irq_type(irq, IRQ_TYPE_NONE);
+ irq_set_chip_data(irq, (__force void *)g);
+ irq_set_handler_data(irq, (void *)__gpio_mask(hw));
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+static const struct irq_domain_ops davinci_gpio_irq_ops = {
+ .map = davinci_gpio_irq_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
/*
* NOTE: for suspend/resume, probably best to make a platform_device with
* suspend_late/resume_resume calls hooking into results of the set_wake()
@@ -373,6 +432,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
struct davinci_gpio_controller *chips = platform_get_drvdata(pdev);
struct davinci_gpio_platform_data *pdata = dev->platform_data;
struct davinci_gpio_regs __iomem *g;
+ struct irq_domain *irq_domain = NULL;
ngpio = pdata->ngpio;
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -396,6 +456,22 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
}
clk_prepare_enable(clk);
+ if (!pdata->gpio_unbanked) {
+ irq = irq_alloc_descs(-1, 0, ngpio, 0);
+ if (irq < 0) {
+ dev_err(dev, "Couldn't allocate IRQ numbers\n");
+ return irq;
+ }
+
+ irq_domain = irq_domain_add_legacy(NULL, ngpio, irq, 0,
+ &davinci_gpio_irq_ops,
+ chips);
+ if (!irq_domain) {
+ dev_err(dev, "Couldn't register an IRQ domain\n");
+ return -ENODEV;
+ }
+ }
+
/*
* Arrange gpio_to_irq() support, handling either direct IRQs or
* banked IRQs. Having GPIOs in the first GPIO bank use direct
@@ -404,9 +480,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
*/
for (gpio = 0, bank = 0; gpio < ngpio; bank++, gpio += 32) {
chips[bank].chip.to_irq = gpio_to_irq_banked;
- chips[bank].irq_base = pdata->gpio_unbanked
- ? -EINVAL
- : (pdata->intc_irq_num + gpio);
+ chips[bank].irq_domain = irq_domain;
}
/*
@@ -432,8 +506,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
/* default trigger: both edges */
g = gpio2regs(0);
- __raw_writel(~0, &g->set_falling);
- __raw_writel(~0, &g->set_rising);
+ writel_relaxed(~0, &g->set_falling);
+ writel_relaxed(~0, &g->set_rising);
/* set the direct IRQs up to use that irqchip */
for (gpio = 0; gpio < pdata->gpio_unbanked; gpio++, irq++) {
@@ -449,15 +523,11 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
* Or, AINTC can handle IRQs for banks of 16 GPIO IRQs, which we
* then chain through our own handler.
*/
- for (gpio = 0, irq = gpio_to_irq(0), bank = 0;
- gpio < ngpio;
- bank++, bank_irq++) {
- unsigned i;
-
+ for (gpio = 0, bank = 0; gpio < ngpio; bank++, bank_irq++, gpio += 16) {
/* disabled by default, enabled only as needed */
g = gpio2regs(gpio);
- __raw_writel(~0, &g->clr_falling);
- __raw_writel(~0, &g->clr_rising);
+ writel_relaxed(~0, &g->clr_falling);
+ writel_relaxed(~0, &g->clr_rising);
/* set up all irqs in this bank */
irq_set_chained_handler(bank_irq, gpio_irq_handler);
@@ -469,14 +539,6 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
*/
irq_set_handler_data(bank_irq, &chips[gpio / 32]);
- for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
- irq_set_chip(irq, &gpio_irqchip);
- irq_set_chip_data(irq, (__force void *)g);
- irq_set_handler_data(irq, (void *)__gpio_mask(gpio));
- irq_set_handler(irq, handle_simple_irq);
- set_irq_flags(irq, IRQF_VALID);
- }
-
binten |= BIT(bank);
}
@@ -485,18 +547,25 @@ done:
* BINTEN -- per-bank interrupt enable. genirq would also let these
* bits be set/cleared dynamically.
*/
- __raw_writel(binten, gpio_base + BINTEN);
-
- printk(KERN_INFO "DaVinci: %d gpio irqs\n", irq - gpio_to_irq(0));
+ writel_relaxed(binten, gpio_base + BINTEN);
return 0;
}
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id davinci_gpio_ids[] = {
+ { .compatible = "ti,dm6441-gpio", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, davinci_gpio_ids);
+#endif
+
static struct platform_driver davinci_gpio_driver = {
.probe = davinci_gpio_probe,
.driver = {
- .name = "davinci_gpio",
- .owner = THIS_MODULE,
+ .name = "davinci_gpio",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(davinci_gpio_ids),
},
};
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index ec190361bf2e..1e98a9873967 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -99,6 +99,27 @@ static void em_gio_irq_enable(struct irq_data *d)
em_gio_write(p, GIO_IEN, BIT(irqd_to_hwirq(d)));
}
+static unsigned int em_gio_irq_startup(struct irq_data *d)
+{
+ struct em_gio_priv *p = irq_data_get_irq_chip_data(d);
+
+ if (gpio_lock_as_irq(&p->gpio_chip, irqd_to_hwirq(d)))
+ dev_err(p->gpio_chip.dev,
+ "unable to lock HW IRQ %lu for IRQ\n",
+ irqd_to_hwirq(d));
+ em_gio_irq_enable(d);
+ return 0;
+}
+
+static void em_gio_irq_shutdown(struct irq_data *d)
+{
+ struct em_gio_priv *p = irq_data_get_irq_chip_data(d);
+
+ em_gio_irq_disable(d);
+ gpio_unlock_as_irq(&p->gpio_chip, irqd_to_hwirq(d));
+}
+
+
#define GIO_ASYNC(x) (x + 8)
static unsigned char em_gio_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
@@ -328,6 +349,7 @@ static int em_gio_probe(struct platform_device *pdev)
gpio_chip->request = em_gio_request;
gpio_chip->free = em_gio_free;
gpio_chip->label = name;
+ gpio_chip->dev = &pdev->dev;
gpio_chip->owner = THIS_MODULE;
gpio_chip->base = pdata->gpio_base;
gpio_chip->ngpio = pdata->number_of_pins;
@@ -336,10 +358,10 @@ static int em_gio_probe(struct platform_device *pdev)
irq_chip->name = name;
irq_chip->irq_mask = em_gio_irq_disable;
irq_chip->irq_unmask = em_gio_irq_enable;
- irq_chip->irq_enable = em_gio_irq_enable;
- irq_chip->irq_disable = em_gio_irq_disable;
irq_chip->irq_set_type = em_gio_irq_set_type;
- irq_chip->flags = IRQCHIP_SKIP_SET_WAKE;
+ irq_chip->irq_startup = em_gio_irq_startup;
+ irq_chip->irq_shutdown = em_gio_irq_shutdown;
+ irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
pdata->number_of_pins,
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index 9cb8320e1181..8f73ee093739 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -135,6 +135,7 @@ static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value);
.set = f7188x_gpio_set, \
.base = _base, \
.ngpio = _ngpio, \
+ .can_sleep = true, \
}, \
.regbase = _regbase, \
}
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 814addb62d2c..f5bf3c38bca6 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -252,7 +252,7 @@ static void ichx_gpiolib_setup(struct gpio_chip *chip)
chip->direction_output = ichx_gpio_direction_output;
chip->base = modparam_gpiobase;
chip->ngpio = ichx_priv.desc->ngpio;
- chip->can_sleep = 0;
+ chip->can_sleep = false;
chip->dbg_show = NULL;
}
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index be803af658ac..e585163f1ad5 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -235,11 +235,33 @@ static void intel_mid_irq_mask(struct irq_data *d)
{
}
+static unsigned int intel_mid_irq_startup(struct irq_data *d)
+{
+ struct intel_mid_gpio *priv = irq_data_get_irq_chip_data(d);
+
+ if (gpio_lock_as_irq(&priv->chip, irqd_to_hwirq(d)))
+ dev_err(priv->chip.dev,
+ "unable to lock HW IRQ %lu for IRQ\n",
+ irqd_to_hwirq(d));
+ intel_mid_irq_unmask(d);
+ return 0;
+}
+
+static void intel_mid_irq_shutdown(struct irq_data *d)
+{
+ struct intel_mid_gpio *priv = irq_data_get_irq_chip_data(d);
+
+ intel_mid_irq_mask(d);
+ gpio_unlock_as_irq(&priv->chip, irqd_to_hwirq(d));
+}
+
static struct irq_chip intel_mid_irqchip = {
.name = "INTEL_MID-GPIO",
.irq_mask = intel_mid_irq_mask,
.irq_unmask = intel_mid_irq_unmask,
.irq_set_type = intel_mid_irq_type,
+ .irq_startup = intel_mid_irq_startup,
+ .irq_shutdown = intel_mid_irq_shutdown,
};
static const struct intel_mid_gpio_ddata gpio_lincroft = {
@@ -275,7 +297,7 @@ static const struct intel_mid_gpio_ddata gpio_tangier = {
.chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
};
-static DEFINE_PCI_DEVICE_TABLE(intel_gpio_ids) = {
+static const struct pci_device_id intel_gpio_ids[] = {
{
/* Lincroft */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f),
@@ -358,8 +380,7 @@ static int intel_gpio_irq_map(struct irq_domain *d, unsigned int irq,
{
struct intel_mid_gpio *priv = d->host_data;
- irq_set_chip_and_handler_name(irq, &intel_mid_irqchip,
- handle_simple_irq, "demux");
+ irq_set_chip_and_handler(irq, &intel_mid_irqchip, handle_simple_irq);
irq_set_chip_data(irq, priv);
irq_set_irq_type(irq, IRQ_TYPE_NONE);
@@ -373,8 +394,8 @@ static const struct irq_domain_ops intel_gpio_irq_ops = {
static int intel_gpio_runtime_idle(struct device *dev)
{
- pm_schedule_suspend(dev, 500);
- return -EBUSY;
+ int err = pm_schedule_suspend(dev, 500);
+ return err ?: -EBUSY;
}
static const struct dev_pm_ops intel_gpio_pm_ops = {
@@ -418,6 +439,7 @@ static int intel_gpio_probe(struct pci_dev *pdev,
priv->reg_base = pcim_iomap_table(pdev)[0];
priv->chip.label = dev_name(&pdev->dev);
+ priv->chip.dev = &pdev->dev;
priv->chip.request = intel_gpio_request;
priv->chip.direction_input = intel_gpio_direction_input;
priv->chip.direction_output = intel_gpio_direction_output;
@@ -426,7 +448,7 @@ static int intel_gpio_probe(struct pci_dev *pdev,
priv->chip.to_irq = intel_gpio_to_irq;
priv->chip.base = gpio_base;
priv->chip.ngpio = ddata->ngpio;
- priv->chip.can_sleep = 0;
+ priv->chip.can_sleep = false;
priv->pdev = pdev;
spin_lock_init(&priv->lock);
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
index efdc3924d7df..c6d88173f5a2 100644
--- a/drivers/gpio/gpio-kempld.c
+++ b/drivers/gpio/gpio-kempld.c
@@ -167,7 +167,7 @@ static int kempld_gpio_probe(struct platform_device *pdev)
chip->label = "gpio-kempld";
chip->owner = THIS_MODULE;
chip->dev = dev;
- chip->can_sleep = 1;
+ chip->can_sleep = true;
if (pdata && pdata->gpio_base)
chip->base = pdata->gpio_base;
else
diff --git a/drivers/gpio/gpio-ks8695.c b/drivers/gpio/gpio-ks8695.c
index a3ac66ea364b..464a83de0d6a 100644
--- a/drivers/gpio/gpio-ks8695.c
+++ b/drivers/gpio/gpio-ks8695.c
@@ -228,7 +228,7 @@ static struct gpio_chip ks8695_gpio_chip = {
.to_irq = ks8695_gpio_to_irq,
.base = 0,
.ngpio = 16,
- .can_sleep = 0,
+ .can_sleep = false,
};
/* Register the GPIOs */
diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c
new file mode 100644
index 000000000000..a0341c92bcb4
--- /dev/null
+++ b/drivers/gpio/gpio-lp3943.c
@@ -0,0 +1,242 @@
+/*
+ * TI/National Semiconductor LP3943 GPIO driver
+ *
+ * Copyright 2013 Texas Instruments
+ *
+ * Author: Milo Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2.
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/mfd/lp3943.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+enum lp3943_gpios {
+ LP3943_GPIO1,
+ LP3943_GPIO2,
+ LP3943_GPIO3,
+ LP3943_GPIO4,
+ LP3943_GPIO5,
+ LP3943_GPIO6,
+ LP3943_GPIO7,
+ LP3943_GPIO8,
+ LP3943_GPIO9,
+ LP3943_GPIO10,
+ LP3943_GPIO11,
+ LP3943_GPIO12,
+ LP3943_GPIO13,
+ LP3943_GPIO14,
+ LP3943_GPIO15,
+ LP3943_GPIO16,
+ LP3943_MAX_GPIO,
+};
+
+struct lp3943_gpio {
+ struct gpio_chip chip;
+ struct lp3943 *lp3943;
+ u16 input_mask; /* 1 = GPIO is input direction, 0 = output */
+};
+
+static inline struct lp3943_gpio *to_lp3943_gpio(struct gpio_chip *_chip)
+{
+ return container_of(_chip, struct lp3943_gpio, chip);
+}
+
+static int lp3943_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct lp3943_gpio *lp3943_gpio = to_lp3943_gpio(chip);
+ struct lp3943 *lp3943 = lp3943_gpio->lp3943;
+
+ /* Return an error if the pin is already assigned */
+ if (test_and_set_bit(offset, &lp3943->pin_used))
+ return -EBUSY;
+
+ return 0;
+}
+
+static void lp3943_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ struct lp3943_gpio *lp3943_gpio = to_lp3943_gpio(chip);
+ struct lp3943 *lp3943 = lp3943_gpio->lp3943;
+
+ clear_bit(offset, &lp3943->pin_used);
+}
+
+static int lp3943_gpio_set_mode(struct lp3943_gpio *lp3943_gpio, u8 offset,
+ u8 val)
+{
+ struct lp3943 *lp3943 = lp3943_gpio->lp3943;
+ const struct lp3943_reg_cfg *mux = lp3943->mux_cfg;
+
+ return lp3943_update_bits(lp3943, mux[offset].reg, mux[offset].mask,
+ val << mux[offset].shift);
+}
+
+static int lp3943_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct lp3943_gpio *lp3943_gpio = to_lp3943_gpio(chip);
+
+ lp3943_gpio->input_mask |= BIT(offset);
+
+ return lp3943_gpio_set_mode(lp3943_gpio, offset, LP3943_GPIO_IN);
+}
+
+static int lp3943_get_gpio_in_status(struct lp3943_gpio *lp3943_gpio,
+ struct gpio_chip *chip, unsigned offset)
+{
+ u8 addr, read;
+ int err;
+
+ switch (offset) {
+ case LP3943_GPIO1 ... LP3943_GPIO8:
+ addr = LP3943_REG_GPIO_A;
+ break;
+ case LP3943_GPIO9 ... LP3943_GPIO16:
+ addr = LP3943_REG_GPIO_B;
+ offset = offset - 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = lp3943_read_byte(lp3943_gpio->lp3943, addr, &read);
+ if (err)
+ return err;
+
+ return !!(read & BIT(offset));
+}
+
+static int lp3943_get_gpio_out_status(struct lp3943_gpio *lp3943_gpio,
+ struct gpio_chip *chip, unsigned offset)
+{
+ struct lp3943 *lp3943 = lp3943_gpio->lp3943;
+ const struct lp3943_reg_cfg *mux = lp3943->mux_cfg;
+ u8 read;
+ int err;
+
+ err = lp3943_read_byte(lp3943, mux[offset].reg, &read);
+ if (err)
+ return err;
+
+ read = (read & mux[offset].mask) >> mux[offset].shift;
+
+ if (read == LP3943_GPIO_OUT_HIGH)
+ return 1;
+ else if (read == LP3943_GPIO_OUT_LOW)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int lp3943_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct lp3943_gpio *lp3943_gpio = to_lp3943_gpio(chip);
+
+ /*
+ * Limitation:
+ * LP3943 doesn't have the GPIO direction register. It provides
+ * only input and output status registers.
+ * So, direction info is required to handle the 'get' operation.
+ * This variable is updated whenever the direction is changed and
+ * it is used here.
+ */
+
+ if (lp3943_gpio->input_mask & BIT(offset))
+ return lp3943_get_gpio_in_status(lp3943_gpio, chip, offset);
+ else
+ return lp3943_get_gpio_out_status(lp3943_gpio, chip, offset);
+}
+
+static void lp3943_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct lp3943_gpio *lp3943_gpio = to_lp3943_gpio(chip);
+ u8 data;
+
+ if (value)
+ data = LP3943_GPIO_OUT_HIGH;
+ else
+ data = LP3943_GPIO_OUT_LOW;
+
+ lp3943_gpio_set_mode(lp3943_gpio, offset, data);
+}
+
+static int lp3943_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct lp3943_gpio *lp3943_gpio = to_lp3943_gpio(chip);
+
+ lp3943_gpio_set(chip, offset, value);
+ lp3943_gpio->input_mask &= ~BIT(offset);
+
+ return 0;
+}
+
+static const struct gpio_chip lp3943_gpio_chip = {
+ .label = "lp3943",
+ .owner = THIS_MODULE,
+ .request = lp3943_gpio_request,
+ .free = lp3943_gpio_free,
+ .direction_input = lp3943_gpio_direction_input,
+ .get = lp3943_gpio_get,
+ .direction_output = lp3943_gpio_direction_output,
+ .set = lp3943_gpio_set,
+ .base = -1,
+ .ngpio = LP3943_MAX_GPIO,
+ .can_sleep = 1,
+};
+
+static int lp3943_gpio_probe(struct platform_device *pdev)
+{
+ struct lp3943 *lp3943 = dev_get_drvdata(pdev->dev.parent);
+ struct lp3943_gpio *lp3943_gpio;
+
+ lp3943_gpio = devm_kzalloc(&pdev->dev, sizeof(*lp3943_gpio),
+ GFP_KERNEL);
+ if (!lp3943_gpio)
+ return -ENOMEM;
+
+ lp3943_gpio->lp3943 = lp3943;
+ lp3943_gpio->chip = lp3943_gpio_chip;
+ lp3943_gpio->chip.dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, lp3943_gpio);
+
+ return gpiochip_add(&lp3943_gpio->chip);
+}
+
+static int lp3943_gpio_remove(struct platform_device *pdev)
+{
+ struct lp3943_gpio *lp3943_gpio = platform_get_drvdata(pdev);
+
+ return gpiochip_remove(&lp3943_gpio->chip);
+}
+
+static const struct of_device_id lp3943_gpio_of_match[] = {
+ { .compatible = "ti,lp3943-gpio", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lp3943_gpio_of_match);
+
+static struct platform_driver lp3943_gpio_driver = {
+ .probe = lp3943_gpio_probe,
+ .remove = lp3943_gpio_remove,
+ .driver = {
+ .name = "lp3943-gpio",
+ .owner = THIS_MODULE,
+ .of_match_table = lp3943_gpio_of_match,
+ },
+};
+module_platform_driver(lp3943_gpio_driver);
+
+MODULE_DESCRIPTION("LP3943 GPIO driver");
+MODULE_ALIAS("platform:lp3943-gpio");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 2d5555decf0c..225344d66404 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -25,10 +25,10 @@
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/module.h>
+#include <linux/platform_data/gpio-lpc32xx.h>
#include <mach/hardware.h>
#include <mach/platform.h>
-#include <mach/gpio-lpc32xx.h>
#include <mach/irqs.h>
#define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000)
@@ -448,7 +448,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.base = LPC32XX_GPIO_P0_GRP,
.ngpio = LPC32XX_GPIO_P0_MAX,
.names = gpio_p0_names,
- .can_sleep = 0,
+ .can_sleep = false,
},
.gpio_grp = &gpio_grp_regs_p0,
},
@@ -464,7 +464,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.base = LPC32XX_GPIO_P1_GRP,
.ngpio = LPC32XX_GPIO_P1_MAX,
.names = gpio_p1_names,
- .can_sleep = 0,
+ .can_sleep = false,
},
.gpio_grp = &gpio_grp_regs_p1,
},
@@ -479,7 +479,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.base = LPC32XX_GPIO_P2_GRP,
.ngpio = LPC32XX_GPIO_P2_MAX,
.names = gpio_p2_names,
- .can_sleep = 0,
+ .can_sleep = false,
},
.gpio_grp = &gpio_grp_regs_p2,
},
@@ -495,7 +495,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.base = LPC32XX_GPIO_P3_GRP,
.ngpio = LPC32XX_GPIO_P3_MAX,
.names = gpio_p3_names,
- .can_sleep = 0,
+ .can_sleep = false,
},
.gpio_grp = &gpio_grp_regs_p3,
},
@@ -509,7 +509,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.base = LPC32XX_GPI_P3_GRP,
.ngpio = LPC32XX_GPI_P3_MAX,
.names = gpi_p3_names,
- .can_sleep = 0,
+ .can_sleep = false,
},
.gpio_grp = &gpio_grp_regs_p3,
},
@@ -523,7 +523,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.base = LPC32XX_GPO_P3_GRP,
.ngpio = LPC32XX_GPO_P3_MAX,
.names = gpo_p3_names,
- .can_sleep = 0,
+ .can_sleep = false,
},
.gpio_grp = &gpio_grp_regs_p3,
},
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index a0804740a0b7..66b18535b5ae 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -301,6 +301,26 @@ static void lp_irq_disable(struct irq_data *d)
spin_unlock_irqrestore(&lg->lock, flags);
}
+static unsigned int lp_irq_startup(struct irq_data *d)
+{
+ struct lp_gpio *lg = irq_data_get_irq_chip_data(d);
+
+ if (gpio_lock_as_irq(&lg->chip, irqd_to_hwirq(d)))
+ dev_err(lg->chip.dev,
+ "unable to lock HW IRQ %lu for IRQ\n",
+ irqd_to_hwirq(d));
+ lp_irq_enable(d);
+ return 0;
+}
+
+static void lp_irq_shutdown(struct irq_data *d)
+{
+ struct lp_gpio *lg = irq_data_get_irq_chip_data(d);
+
+ lp_irq_disable(d);
+ gpio_unlock_as_irq(&lg->chip, irqd_to_hwirq(d));
+}
+
static struct irq_chip lp_irqchip = {
.name = "LP-GPIO",
.irq_mask = lp_irq_mask,
@@ -308,6 +328,8 @@ static struct irq_chip lp_irqchip = {
.irq_enable = lp_irq_enable,
.irq_disable = lp_irq_disable,
.irq_set_type = lp_irq_type,
+ .irq_startup = lp_irq_startup,
+ .irq_shutdown = lp_irq_shutdown,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
@@ -331,8 +353,7 @@ static int lp_gpio_irq_map(struct irq_domain *d, unsigned int irq,
{
struct lp_gpio *lg = d->host_data;
- irq_set_chip_and_handler_name(irq, &lp_irqchip, handle_simple_irq,
- "demux");
+ irq_set_chip_and_handler(irq, &lp_irqchip, handle_simple_irq);
irq_set_chip_data(irq, lg);
irq_set_irq_type(irq, IRQ_TYPE_NONE);
@@ -392,7 +413,7 @@ static int lp_gpio_probe(struct platform_device *pdev)
gc->set = lp_gpio_set;
gc->base = -1;
gc->ngpio = LP_NUM_GPIO;
- gc->can_sleep = 0;
+ gc->can_sleep = false;
gc->dev = dev;
/* set up interrupts */
@@ -438,6 +459,7 @@ static const struct dev_pm_ops lp_gpio_pm_ops = {
static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = {
{ "INT33C7", 0 },
+ { "INT3437", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, lynxpoint_gpio_acpi_match);
@@ -469,4 +491,15 @@ static int __init lp_gpio_init(void)
return platform_driver_register(&lp_gpio_driver);
}
+static void __exit lp_gpio_exit(void)
+{
+ platform_driver_unregister(&lp_gpio_driver);
+}
+
subsys_initcall(lp_gpio_init);
+module_exit(lp_gpio_exit);
+
+MODULE_AUTHOR("Mathias Nyman (Intel)");
+MODULE_DESCRIPTION("GPIO interface for Intel Lynxpoint");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lp_gpio");
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index f4f4ed19bdc1..8672755f95c9 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -188,7 +188,7 @@ int __max730x_probe(struct max7301 *ts)
ts->chip.set = max7301_set;
ts->chip.ngpio = PIN_NUMBER;
- ts->chip.can_sleep = 1;
+ ts->chip.can_sleep = true;
ts->chip.dev = dev;
ts->chip.owner = THIS_MODULE;
@@ -220,7 +220,6 @@ int __max730x_probe(struct max7301 *ts)
return ret;
exit_destroy:
- dev_set_drvdata(dev, NULL);
mutex_destroy(&ts->lock);
return ret;
}
@@ -234,8 +233,6 @@ int __max730x_remove(struct device *dev)
if (ts == NULL)
return -ENODEV;
- dev_set_drvdata(dev, NULL);
-
/* Power down the chip and disable IRQ output */
ts->write(dev, 0x04, 0x00);
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 91ad74dea8ce..36cb290764b6 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -564,7 +564,7 @@ static int max732x_setup_gpio(struct max732x_chip *chip,
gc->set = max732x_gpio_set_value;
}
gc->get = max732x_gpio_get_value;
- gc->can_sleep = 1;
+ gc->can_sleep = true;
gc->base = gpio_start;
gc->ngpio = port;
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index c0b7835f5136..553a80a5eaf3 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -115,7 +115,7 @@ static int mc33880_probe(struct spi_device *spi)
mc->chip.set = mc33880_set;
mc->chip.base = pdata->base;
mc->chip.ngpio = PIN_NUMBER;
- mc->chip.can_sleep = 1;
+ mc->chip.can_sleep = true;
mc->chip.dev = &spi->dev;
mc->chip.owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-mc9s08dz60.c b/drivers/gpio/gpio-mc9s08dz60.c
index 0ab700046a23..dce35ff00db7 100644
--- a/drivers/gpio/gpio-mc9s08dz60.c
+++ b/drivers/gpio/gpio-mc9s08dz60.c
@@ -102,7 +102,7 @@ static int mc9s08dz60_probe(struct i2c_client *client,
mc9s->chip.dev = &client->dev;
mc9s->chip.owner = THIS_MODULE;
mc9s->chip.ngpio = GPIO_NUM;
- mc9s->chip.can_sleep = 1;
+ mc9s->chip.can_sleep = true;
mc9s->chip.get = mc9s08dz60_get_value;
mc9s->chip.set = mc9s08dz60_set_value;
mc9s->chip.direction_output = mc9s08dz60_direction_output;
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 2deb0c5e54a4..1ac288ea810d 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -1,5 +1,13 @@
/*
- * MCP23S08 SPI/GPIO gpio expander driver
+ * MCP23S08 SPI/I2C GPIO gpio expander driver
+ *
+ * The inputs and outputs of the mcp23s08, mcp23s17, mcp23008 and mcp23017 are
+ * supported.
+ * For the I2C versions of the chips (mcp23008 and mcp23017) generation of
+ * interrupts is also supported.
+ * The hardware of the SPI versions of the chips (mcp23s08 and mcp23s17) is
+ * also capable of generating interrupts, but the linux driver does not
+ * support that yet.
*/
#include <linux/kernel.h>
@@ -12,7 +20,8 @@
#include <linux/spi/mcp23s08.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
-#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
#include <linux/of_device.h>
/**
@@ -34,6 +43,7 @@
#define MCP_DEFVAL 0x03
#define MCP_INTCON 0x04
#define MCP_IOCON 0x05
+# define IOCON_MIRROR (1 << 6)
# define IOCON_SEQOP (1 << 5)
# define IOCON_HAEN (1 << 3)
# define IOCON_ODR (1 << 2)
@@ -57,8 +67,14 @@ struct mcp23s08 {
u8 addr;
u16 cache[11];
+ u16 irq_rise;
+ u16 irq_fall;
+ int irq;
+ bool irq_controller;
/* lock protects the cached values */
struct mutex lock;
+ struct mutex irq_lock;
+ struct irq_domain *irq_domain;
struct gpio_chip chip;
@@ -77,6 +93,11 @@ struct mcp23s08_driver_data {
struct mcp23s08 chip[];
};
+/* This lock class tells lockdep that GPIO irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key gpio_lock_class;
+
/*----------------------------------------------------------------------*/
#if IS_ENABLED(CONFIG_I2C)
@@ -316,6 +337,195 @@ mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value)
}
/*----------------------------------------------------------------------*/
+static irqreturn_t mcp23s08_irq(int irq, void *data)
+{
+ struct mcp23s08 *mcp = data;
+ int intcap, intf, i;
+ unsigned int child_irq;
+
+ mutex_lock(&mcp->lock);
+ intf = mcp->ops->read(mcp, MCP_INTF);
+ if (intf < 0) {
+ mutex_unlock(&mcp->lock);
+ return IRQ_HANDLED;
+ }
+
+ mcp->cache[MCP_INTF] = intf;
+
+ intcap = mcp->ops->read(mcp, MCP_INTCAP);
+ if (intcap < 0) {
+ mutex_unlock(&mcp->lock);
+ return IRQ_HANDLED;
+ }
+
+ mcp->cache[MCP_INTCAP] = intcap;
+ mutex_unlock(&mcp->lock);
+
+
+ for (i = 0; i < mcp->chip.ngpio; i++) {
+ if ((BIT(i) & mcp->cache[MCP_INTF]) &&
+ ((BIT(i) & intcap & mcp->irq_rise) ||
+ (mcp->irq_fall & ~intcap & BIT(i)))) {
+ child_irq = irq_find_mapping(mcp->irq_domain, i);
+ handle_nested_irq(child_irq);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mcp23s08_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip);
+
+ return irq_find_mapping(mcp->irq_domain, offset);
+}
+
+static void mcp23s08_irq_mask(struct irq_data *data)
+{
+ struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
+ unsigned int pos = data->hwirq;
+
+ mcp->cache[MCP_GPINTEN] &= ~BIT(pos);
+}
+
+static void mcp23s08_irq_unmask(struct irq_data *data)
+{
+ struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
+ unsigned int pos = data->hwirq;
+
+ mcp->cache[MCP_GPINTEN] |= BIT(pos);
+}
+
+static int mcp23s08_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
+ unsigned int pos = data->hwirq;
+ int status = 0;
+
+ if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
+ mcp->cache[MCP_INTCON] &= ~BIT(pos);
+ mcp->irq_rise |= BIT(pos);
+ mcp->irq_fall |= BIT(pos);
+ } else if (type & IRQ_TYPE_EDGE_RISING) {
+ mcp->cache[MCP_INTCON] &= ~BIT(pos);
+ mcp->irq_rise |= BIT(pos);
+ mcp->irq_fall &= ~BIT(pos);
+ } else if (type & IRQ_TYPE_EDGE_FALLING) {
+ mcp->cache[MCP_INTCON] &= ~BIT(pos);
+ mcp->irq_rise &= ~BIT(pos);
+ mcp->irq_fall |= BIT(pos);
+ } else
+ return -EINVAL;
+
+ return status;
+}
+
+static void mcp23s08_irq_bus_lock(struct irq_data *data)
+{
+ struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&mcp->irq_lock);
+}
+
+static void mcp23s08_irq_bus_unlock(struct irq_data *data)
+{
+ struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&mcp->lock);
+ mcp->ops->write(mcp, MCP_GPINTEN, mcp->cache[MCP_GPINTEN]);
+ mcp->ops->write(mcp, MCP_DEFVAL, mcp->cache[MCP_DEFVAL]);
+ mcp->ops->write(mcp, MCP_INTCON, mcp->cache[MCP_INTCON]);
+ mutex_unlock(&mcp->lock);
+ mutex_unlock(&mcp->irq_lock);
+}
+
+static unsigned int mcp23s08_irq_startup(struct irq_data *data)
+{
+ struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
+
+ if (gpio_lock_as_irq(&mcp->chip, data->hwirq))
+ dev_err(mcp->chip.dev,
+ "unable to lock HW IRQ %lu for IRQ usage\n",
+ data->hwirq);
+
+ mcp23s08_irq_unmask(data);
+ return 0;
+}
+
+static void mcp23s08_irq_shutdown(struct irq_data *data)
+{
+ struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
+
+ mcp23s08_irq_mask(data);
+ gpio_unlock_as_irq(&mcp->chip, data->hwirq);
+}
+
+static struct irq_chip mcp23s08_irq_chip = {
+ .name = "gpio-mcp23xxx",
+ .irq_mask = mcp23s08_irq_mask,
+ .irq_unmask = mcp23s08_irq_unmask,
+ .irq_set_type = mcp23s08_irq_set_type,
+ .irq_bus_lock = mcp23s08_irq_bus_lock,
+ .irq_bus_sync_unlock = mcp23s08_irq_bus_unlock,
+ .irq_startup = mcp23s08_irq_startup,
+ .irq_shutdown = mcp23s08_irq_shutdown,
+};
+
+static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
+{
+ struct gpio_chip *chip = &mcp->chip;
+ int err, irq, j;
+
+ mutex_init(&mcp->irq_lock);
+
+ mcp->irq_domain = irq_domain_add_linear(chip->of_node, chip->ngpio,
+ &irq_domain_simple_ops, mcp);
+ if (!mcp->irq_domain)
+ return -ENODEV;
+
+ err = devm_request_threaded_irq(chip->dev, mcp->irq, NULL, mcp23s08_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ dev_name(chip->dev), mcp);
+ if (err != 0) {
+ dev_err(chip->dev, "unable to request IRQ#%d: %d\n",
+ mcp->irq, err);
+ return err;
+ }
+
+ chip->to_irq = mcp23s08_gpio_to_irq;
+
+ for (j = 0; j < mcp->chip.ngpio; j++) {
+ irq = irq_create_mapping(mcp->irq_domain, j);
+ irq_set_lockdep_class(irq, &gpio_lock_class);
+ irq_set_chip_data(irq, mcp);
+ irq_set_chip(irq, &mcp23s08_irq_chip);
+ irq_set_nested_thread(irq, true);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
+ }
+ return 0;
+}
+
+static void mcp23s08_irq_teardown(struct mcp23s08 *mcp)
+{
+ unsigned int irq, i;
+
+ free_irq(mcp->irq, mcp);
+
+ for (i = 0; i < mcp->chip.ngpio; i++) {
+ irq = irq_find_mapping(mcp->irq_domain, i);
+ if (irq > 0)
+ irq_dispose_mapping(irq);
+ }
+
+ irq_domain_remove(mcp->irq_domain);
+}
+
+/*----------------------------------------------------------------------*/
#ifdef CONFIG_DEBUG_FS
@@ -370,10 +580,11 @@ done:
/*----------------------------------------------------------------------*/
static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
- void *data, unsigned addr,
- unsigned type, unsigned base, unsigned pullups)
+ void *data, unsigned addr, unsigned type,
+ unsigned base, unsigned pullups)
{
int status;
+ bool mirror = false;
mutex_init(&mcp->lock);
@@ -425,20 +636,32 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
}
mcp->chip.base = base;
- mcp->chip.can_sleep = 1;
+ mcp->chip.can_sleep = true;
mcp->chip.dev = dev;
mcp->chip.owner = THIS_MODULE;
/* verify MCP_IOCON.SEQOP = 0, so sequential reads work,
* and MCP_IOCON.HAEN = 1, so we work with all chips.
*/
+
status = mcp->ops->read(mcp, MCP_IOCON);
if (status < 0)
goto fail;
- if ((status & IOCON_SEQOP) || !(status & IOCON_HAEN)) {
+
+ mcp->irq_controller = of_property_read_bool(mcp->chip.of_node,
+ "interrupt-controller");
+ if (mcp->irq && mcp->irq_controller && (type == MCP_TYPE_017))
+ mirror = of_property_read_bool(mcp->chip.of_node,
+ "microchip,irq-mirror");
+
+ if ((status & IOCON_SEQOP) || !(status & IOCON_HAEN) || mirror) {
/* mcp23s17 has IOCON twice, make sure they are in sync */
status &= ~(IOCON_SEQOP | (IOCON_SEQOP << 8));
status |= IOCON_HAEN | (IOCON_HAEN << 8);
+ status &= ~(IOCON_INTPOL | (IOCON_INTPOL << 8));
+ if (mirror)
+ status |= IOCON_MIRROR | (IOCON_MIRROR << 8);
+
status = mcp->ops->write(mcp, MCP_IOCON, status);
if (status < 0)
goto fail;
@@ -470,6 +693,16 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
}
status = gpiochip_add(&mcp->chip);
+ if (status < 0)
+ goto fail;
+
+ if (mcp->irq && mcp->irq_controller) {
+ status = mcp23s08_irq_setup(mcp);
+ if (status) {
+ mcp23s08_irq_teardown(mcp);
+ goto fail;
+ }
+ }
fail:
if (status < 0)
dev_dbg(dev, "can't setup chip %d, --> %d\n",
@@ -546,6 +779,7 @@ static int mcp230xx_probe(struct i2c_client *client,
if (match || !pdata) {
base = -1;
pullups = 0;
+ client->irq = irq_of_parse_and_map(client->dev.of_node, 0);
} else {
if (!gpio_is_valid(pdata->base)) {
dev_dbg(&client->dev, "invalid platform data\n");
@@ -559,6 +793,7 @@ static int mcp230xx_probe(struct i2c_client *client,
if (!mcp)
return -ENOMEM;
+ mcp->irq = client->irq;
status = mcp23s08_probe_one(mcp, &client->dev, client, client->addr,
id->driver_data, base, pullups);
if (status)
@@ -579,6 +814,9 @@ static int mcp230xx_remove(struct i2c_client *client)
struct mcp23s08 *mcp = i2c_get_clientdata(client);
int status;
+ if (client->irq && mcp->irq_controller)
+ mcp23s08_irq_teardown(mcp);
+
status = gpiochip_remove(&mcp->chip);
if (status == 0)
kfree(mcp);
@@ -640,7 +878,7 @@ static int mcp23s08_probe(struct spi_device *spi)
match = of_match_device(of_match_ptr(mcp23s08_spi_of_match), &spi->dev);
if (match) {
- type = (int)match->data;
+ type = (int)(uintptr_t)match->data;
status = of_property_read_u32(spi->dev.of_node,
"microchip,spi-present-mask", &spi_present_mask);
if (status) {
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 6da6d7667c6d..d51329d23d38 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -242,7 +242,7 @@ static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
gpio->dbg_show = NULL;
gpio->base = -1;
gpio->ngpio = num_port;
- gpio->can_sleep = 0;
+ gpio->can_sleep = false;
gpio->to_irq = ioh_gpio_to_irq;
}
@@ -596,7 +596,7 @@ static int ioh_gpio_resume(struct pci_dev *pdev)
#define ioh_gpio_resume NULL
#endif
-static DEFINE_PCI_DEVICE_TABLE(ioh_gpio_pcidev_id) = {
+static const struct pci_device_id ioh_gpio_pcidev_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) },
{ 0, }
};
diff --git a/drivers/gpio/gpio-moxart.c b/drivers/gpio/gpio-moxart.c
new file mode 100644
index 000000000000..2af990022cc9
--- /dev/null
+++ b/drivers/gpio/gpio-moxart.c
@@ -0,0 +1,156 @@
+/*
+ * MOXA ART SoCs GPIO driver.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+
+#define GPIO_DATA_OUT 0x00
+#define GPIO_DATA_IN 0x04
+#define GPIO_PIN_DIRECTION 0x08
+
+struct moxart_gpio_chip {
+ struct gpio_chip gpio;
+ void __iomem *base;
+};
+
+static inline struct moxart_gpio_chip *to_moxart_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct moxart_gpio_chip, gpio);
+}
+
+static int moxart_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_request_gpio(offset);
+}
+
+static void moxart_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ pinctrl_free_gpio(offset);
+}
+
+static int moxart_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct moxart_gpio_chip *gc = to_moxart_gpio(chip);
+ void __iomem *ioaddr = gc->base + GPIO_PIN_DIRECTION;
+
+ writel(readl(ioaddr) & ~BIT(offset), ioaddr);
+ return 0;
+}
+
+static int moxart_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct moxart_gpio_chip *gc = to_moxart_gpio(chip);
+ void __iomem *ioaddr = gc->base + GPIO_PIN_DIRECTION;
+
+ writel(readl(ioaddr) | BIT(offset), ioaddr);
+ return 0;
+}
+
+static void moxart_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct moxart_gpio_chip *gc = to_moxart_gpio(chip);
+ void __iomem *ioaddr = gc->base + GPIO_DATA_OUT;
+ u32 reg = readl(ioaddr);
+
+ if (value)
+ reg = reg | BIT(offset);
+ else
+ reg = reg & ~BIT(offset);
+
+
+ writel(reg, ioaddr);
+}
+
+static int moxart_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct moxart_gpio_chip *gc = to_moxart_gpio(chip);
+ u32 ret = readl(gc->base + GPIO_PIN_DIRECTION);
+
+ if (ret & BIT(offset))
+ return !!(readl(gc->base + GPIO_DATA_OUT) & BIT(offset));
+ else
+ return !!(readl(gc->base + GPIO_DATA_IN) & BIT(offset));
+}
+
+static struct gpio_chip moxart_template_chip = {
+ .label = "moxart-gpio",
+ .request = moxart_gpio_request,
+ .free = moxart_gpio_free,
+ .direction_input = moxart_gpio_direction_input,
+ .direction_output = moxart_gpio_direction_output,
+ .set = moxart_gpio_set,
+ .get = moxart_gpio_get,
+ .ngpio = 32,
+ .owner = THIS_MODULE,
+};
+
+static int moxart_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct moxart_gpio_chip *mgc;
+ int ret;
+
+ mgc = devm_kzalloc(dev, sizeof(*mgc), GFP_KERNEL);
+ if (!mgc) {
+ dev_err(dev, "can't allocate GPIO chip container\n");
+ return -ENOMEM;
+ }
+ mgc->gpio = moxart_template_chip;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mgc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mgc->base))
+ return PTR_ERR(mgc->base);
+
+ mgc->gpio.dev = dev;
+
+ ret = gpiochip_add(&mgc->gpio);
+ if (ret) {
+ dev_err(dev, "%s: gpiochip_add failed\n",
+ dev->of_node->full_name);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id moxart_gpio_match[] = {
+ { .compatible = "moxa,moxart-gpio" },
+ { }
+};
+
+static struct platform_driver moxart_gpio_driver = {
+ .driver = {
+ .name = "moxart-gpio",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_gpio_match,
+ },
+ .probe = moxart_gpio_probe,
+};
+module_platform_driver(moxart_gpio_driver);
+
+MODULE_DESCRIPTION("MOXART GPIO chip driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
index d75eaa3a1dcc..8f70ded82a2b 100644
--- a/drivers/gpio/gpio-msic.c
+++ b/drivers/gpio/gpio-msic.c
@@ -292,7 +292,7 @@ static int platform_msic_gpio_probe(struct platform_device *pdev)
mg->chip.to_irq = msic_gpio_to_irq;
mg->chip.base = pdata->gpio_base;
mg->chip.ngpio = MSIC_NUM_GPIO;
- mg->chip.can_sleep = 1;
+ mg->chip.can_sleep = true;
mg->chip.dev = dev;
mutex_init(&mg->buslock);
@@ -305,10 +305,9 @@ static int platform_msic_gpio_probe(struct platform_device *pdev)
for (i = 0; i < mg->chip.ngpio; i++) {
irq_set_chip_data(i + mg->irq_base, mg);
- irq_set_chip_and_handler_name(i + mg->irq_base,
- &msic_irqchip,
- handle_simple_irq,
- "demux");
+ irq_set_chip_and_handler(i + mg->irq_base,
+ &msic_irqchip,
+ handle_simple_irq);
}
irq_set_chained_handler(mg->irq, msic_gpio_irq_handler);
irq_set_handler_data(mg->irq, mg);
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index 7b37300973db..a3351acd4963 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -252,7 +252,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
spin_lock_irqsave(&tlmm_lock, irq_flags);
writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
- clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
+ clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
__clear_bit(gpio, msm_gpio.enabled_irqs);
spin_unlock_irqrestore(&tlmm_lock, irq_flags);
}
@@ -264,7 +264,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
spin_lock_irqsave(&tlmm_lock, irq_flags);
__set_bit(gpio, msm_gpio.enabled_irqs);
- set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
+ set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
spin_unlock_irqrestore(&tlmm_lock, irq_flags);
}
@@ -430,10 +430,11 @@ static int msm_gpio_probe(struct platform_device *pdev)
return 0;
}
-static struct of_device_id msm_gpio_of_match[] = {
+static const struct of_device_id msm_gpio_of_match[] = {
{ .compatible = "qcom,msm-gpio", },
{ },
};
+MODULE_DEVICE_TABLE(of, msm_gpio_of_match);
static int msm_gpio_remove(struct platform_device *dev)
{
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index db3129043e63..3b1fd1ce460f 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -600,7 +600,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.to_irq = mvebu_gpio_to_irq;
mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
mvchip->chip.ngpio = ngpios;
- mvchip->chip.can_sleep = 0;
+ mvchip->chip.can_sleep = false;
mvchip->chip.of_node = np;
mvchip->chip.dbg_show = mvebu_gpio_dbg_show;
@@ -676,7 +676,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
if (mvchip->irqbase < 0) {
dev_err(&pdev->dev, "no irqs\n");
- return -ENOMEM;
+ return mvchip->irqbase;
}
gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase,
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 3307f6db3a92..db83b3c0a449 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -422,7 +422,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
port->irq_high = platform_get_irq(pdev, 1);
port->irq = platform_get_irq(pdev, 0);
if (port->irq < 0)
- return -EINVAL;
+ return port->irq;
/* disable the interrupt and clear the status */
writel(0, port->base + GPIO_IMR);
diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c
index 71a4a318315d..dbb08546b9ec 100644
--- a/drivers/gpio/gpio-octeon.c
+++ b/drivers/gpio/gpio-octeon.c
@@ -111,7 +111,7 @@ static int octeon_gpio_probe(struct platform_device *pdev)
chip->dev = &pdev->dev;
chip->owner = THIS_MODULE;
chip->base = 0;
- chip->can_sleep = 0;
+ chip->can_sleep = false;
chip->ngpio = 20;
chip->direction_input = octeon_gpio_dir_in;
chip->get = octeon_gpio_get;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index f319c9ffd4a8..424319061e09 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -108,12 +108,12 @@ static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
u32 l;
reg += bank->regs->direction;
- l = __raw_readl(reg);
+ l = readl_relaxed(reg);
if (is_input)
l |= 1 << gpio;
else
l &= ~(1 << gpio);
- __raw_writel(l, reg);
+ writel_relaxed(l, reg);
bank->context.oe = l;
}
@@ -132,7 +132,7 @@ static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable)
bank->context.dataout &= ~l;
}
- __raw_writel(l, reg);
+ writel_relaxed(l, reg);
}
/* set data out value using mask register */
@@ -142,12 +142,12 @@ static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
u32 gpio_bit = GPIO_BIT(bank, gpio);
u32 l;
- l = __raw_readl(reg);
+ l = readl_relaxed(reg);
if (enable)
l |= gpio_bit;
else
l &= ~gpio_bit;
- __raw_writel(l, reg);
+ writel_relaxed(l, reg);
bank->context.dataout = l;
}
@@ -155,26 +155,26 @@ static int _get_gpio_datain(struct gpio_bank *bank, int offset)
{
void __iomem *reg = bank->base + bank->regs->datain;
- return (__raw_readl(reg) & (1 << offset)) != 0;
+ return (readl_relaxed(reg) & (1 << offset)) != 0;
}
static int _get_gpio_dataout(struct gpio_bank *bank, int offset)
{
void __iomem *reg = bank->base + bank->regs->dataout;
- return (__raw_readl(reg) & (1 << offset)) != 0;
+ return (readl_relaxed(reg) & (1 << offset)) != 0;
}
static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
{
- int l = __raw_readl(base + reg);
+ int l = readl_relaxed(base + reg);
if (set)
l |= mask;
else
l &= ~mask;
- __raw_writel(l, base + reg);
+ writel_relaxed(l, base + reg);
}
static inline void _gpio_dbck_enable(struct gpio_bank *bank)
@@ -183,7 +183,7 @@ static inline void _gpio_dbck_enable(struct gpio_bank *bank)
clk_enable(bank->dbck);
bank->dbck_enabled = true;
- __raw_writel(bank->dbck_enable_mask,
+ writel_relaxed(bank->dbck_enable_mask,
bank->base + bank->regs->debounce_en);
}
}
@@ -196,7 +196,7 @@ static inline void _gpio_dbck_disable(struct gpio_bank *bank)
* enabled but the clock is not, GPIO module seems to be unable
* to detect events and generate interrupts at least on OMAP3.
*/
- __raw_writel(0, bank->base + bank->regs->debounce_en);
+ writel_relaxed(0, bank->base + bank->regs->debounce_en);
clk_disable(bank->dbck);
bank->dbck_enabled = false;
@@ -233,10 +233,10 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
clk_enable(bank->dbck);
reg = bank->base + bank->regs->debounce;
- __raw_writel(debounce, reg);
+ writel_relaxed(debounce, reg);
reg = bank->base + bank->regs->debounce_en;
- val = __raw_readl(reg);
+ val = readl_relaxed(reg);
if (debounce)
val |= l;
@@ -244,7 +244,7 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
val &= ~l;
bank->dbck_enable_mask = val;
- __raw_writel(val, reg);
+ writel_relaxed(val, reg);
clk_disable(bank->dbck);
/*
* Enable debounce clock per module.
@@ -283,12 +283,12 @@ static void _clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio)
bank->dbck_enable_mask &= ~gpio_bit;
bank->context.debounce_en &= ~gpio_bit;
- __raw_writel(bank->context.debounce_en,
+ writel_relaxed(bank->context.debounce_en,
bank->base + bank->regs->debounce_en);
if (!bank->dbck_enable_mask) {
bank->context.debounce = 0;
- __raw_writel(bank->context.debounce, bank->base +
+ writel_relaxed(bank->context.debounce, bank->base +
bank->regs->debounce);
clk_disable(bank->dbck);
bank->dbck_enabled = false;
@@ -311,18 +311,18 @@ static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
trigger & IRQ_TYPE_EDGE_FALLING);
bank->context.leveldetect0 =
- __raw_readl(bank->base + bank->regs->leveldetect0);
+ readl_relaxed(bank->base + bank->regs->leveldetect0);
bank->context.leveldetect1 =
- __raw_readl(bank->base + bank->regs->leveldetect1);
+ readl_relaxed(bank->base + bank->regs->leveldetect1);
bank->context.risingdetect =
- __raw_readl(bank->base + bank->regs->risingdetect);
+ readl_relaxed(bank->base + bank->regs->risingdetect);
bank->context.fallingdetect =
- __raw_readl(bank->base + bank->regs->fallingdetect);
+ readl_relaxed(bank->base + bank->regs->fallingdetect);
if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
bank->context.wake_en =
- __raw_readl(bank->base + bank->regs->wkup_en);
+ readl_relaxed(bank->base + bank->regs->wkup_en);
}
/* This part needs to be executed always for OMAP{34xx, 44xx} */
@@ -347,8 +347,8 @@ static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
exit:
bank->level_mask =
- __raw_readl(bank->base + bank->regs->leveldetect0) |
- __raw_readl(bank->base + bank->regs->leveldetect1);
+ readl_relaxed(bank->base + bank->regs->leveldetect0) |
+ readl_relaxed(bank->base + bank->regs->leveldetect1);
}
#ifdef CONFIG_ARCH_OMAP1
@@ -366,13 +366,13 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
reg += bank->regs->irqctrl;
- l = __raw_readl(reg);
+ l = readl_relaxed(reg);
if ((l >> gpio) & 1)
l &= ~(1 << gpio);
else
l |= 1 << gpio;
- __raw_writel(l, reg);
+ writel_relaxed(l, reg);
}
#else
static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
@@ -390,7 +390,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
} else if (bank->regs->irqctrl) {
reg += bank->regs->irqctrl;
- l = __raw_readl(reg);
+ l = readl_relaxed(reg);
if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
bank->toggle_mask |= 1 << gpio;
if (trigger & IRQ_TYPE_EDGE_RISING)
@@ -400,7 +400,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
else
return -EINVAL;
- __raw_writel(l, reg);
+ writel_relaxed(l, reg);
} else if (bank->regs->edgectrl1) {
if (gpio & 0x08)
reg += bank->regs->edgectrl2;
@@ -408,7 +408,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
reg += bank->regs->edgectrl1;
gpio &= 0x07;
- l = __raw_readl(reg);
+ l = readl_relaxed(reg);
l &= ~(3 << (gpio << 1));
if (trigger & IRQ_TYPE_EDGE_RISING)
l |= 2 << (gpio << 1);
@@ -418,8 +418,8 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
/* Enable wake-up during idle for dynamic tick */
_gpio_rmw(base, bank->regs->wkup_en, 1 << gpio, trigger);
bank->context.wake_en =
- __raw_readl(bank->base + bank->regs->wkup_en);
- __raw_writel(l, reg);
+ readl_relaxed(bank->base + bank->regs->wkup_en);
+ writel_relaxed(l, reg);
}
return 0;
}
@@ -430,17 +430,17 @@ static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset)
void __iomem *reg = bank->base + bank->regs->pinctrl;
/* Claim the pin for MPU */
- __raw_writel(__raw_readl(reg) | (1 << offset), reg);
+ writel_relaxed(readl_relaxed(reg) | (1 << offset), reg);
}
if (bank->regs->ctrl && !BANK_USED(bank)) {
void __iomem *reg = bank->base + bank->regs->ctrl;
u32 ctrl;
- ctrl = __raw_readl(reg);
+ ctrl = readl_relaxed(reg);
/* Module is enabled, clocks are not gated */
ctrl &= ~GPIO_MOD_CTRL_BIT;
- __raw_writel(ctrl, reg);
+ writel_relaxed(ctrl, reg);
bank->context.ctrl = ctrl;
}
}
@@ -455,17 +455,17 @@ static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset)
/* Disable wake-up during idle for dynamic tick */
_gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
bank->context.wake_en =
- __raw_readl(bank->base + bank->regs->wkup_en);
+ readl_relaxed(bank->base + bank->regs->wkup_en);
}
if (bank->regs->ctrl && !BANK_USED(bank)) {
void __iomem *reg = bank->base + bank->regs->ctrl;
u32 ctrl;
- ctrl = __raw_readl(reg);
+ ctrl = readl_relaxed(reg);
/* Module is disabled, clocks are gated */
ctrl |= GPIO_MOD_CTRL_BIT;
- __raw_writel(ctrl, reg);
+ writel_relaxed(ctrl, reg);
bank->context.ctrl = ctrl;
}
}
@@ -474,7 +474,7 @@ static int gpio_is_input(struct gpio_bank *bank, int mask)
{
void __iomem *reg = bank->base + bank->regs->direction;
- return __raw_readl(reg) & mask;
+ return readl_relaxed(reg) & mask;
}
static int gpio_irq_type(struct irq_data *d, unsigned type)
@@ -538,16 +538,16 @@ static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
void __iomem *reg = bank->base;
reg += bank->regs->irqstatus;
- __raw_writel(gpio_mask, reg);
+ writel_relaxed(gpio_mask, reg);
/* Workaround for clearing DSP GPIO interrupts to allow retention */
if (bank->regs->irqstatus2) {
reg = bank->base + bank->regs->irqstatus2;
- __raw_writel(gpio_mask, reg);
+ writel_relaxed(gpio_mask, reg);
}
/* Flush posted write for the irq status to avoid spurious interrupts */
- __raw_readl(reg);
+ readl_relaxed(reg);
}
static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
@@ -562,7 +562,7 @@ static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
u32 mask = (1 << bank->width) - 1;
reg += bank->regs->irqenable;
- l = __raw_readl(reg);
+ l = readl_relaxed(reg);
if (bank->regs->irqenable_inv)
l = ~l;
l &= mask;
@@ -580,7 +580,7 @@ static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
bank->context.irqenable1 |= gpio_mask;
} else {
reg += bank->regs->irqenable;
- l = __raw_readl(reg);
+ l = readl_relaxed(reg);
if (bank->regs->irqenable_inv)
l &= ~gpio_mask;
else
@@ -588,7 +588,7 @@ static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
bank->context.irqenable1 = l;
}
- __raw_writel(l, reg);
+ writel_relaxed(l, reg);
}
static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
@@ -602,7 +602,7 @@ static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
bank->context.irqenable1 &= ~gpio_mask;
} else {
reg += bank->regs->irqenable;
- l = __raw_readl(reg);
+ l = readl_relaxed(reg);
if (bank->regs->irqenable_inv)
l |= gpio_mask;
else
@@ -610,7 +610,7 @@ static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
bank->context.irqenable1 = l;
}
- __raw_writel(l, reg);
+ writel_relaxed(l, reg);
}
static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
@@ -646,7 +646,7 @@ static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
else
bank->context.wake_en &= ~gpio_bit;
- __raw_writel(bank->context.wake_en, bank->base + bank->regs->wkup_en);
+ writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en);
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
@@ -748,7 +748,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
u32 enabled;
enabled = _get_gpio_irqbank_mask(bank);
- isr_saved = isr = __raw_readl(isr_reg) & enabled;
+ isr_saved = isr = readl_relaxed(isr_reg) & enabled;
if (bank->level_mask)
level_mask = bank->level_mask & enabled;
@@ -883,7 +883,7 @@ static int omap_mpuio_suspend_noirq(struct device *dev)
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
- __raw_writel(0xffff & ~bank->context.wake_en, mask_reg);
+ writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg);
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
@@ -898,7 +898,7 @@ static int omap_mpuio_resume_noirq(struct device *dev)
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
- __raw_writel(bank->context.wake_en, mask_reg);
+ writel_relaxed(bank->context.wake_en, mask_reg);
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
@@ -1011,7 +1011,7 @@ static void __init omap_gpio_show_rev(struct gpio_bank *bank)
if (called || bank->regs->revision == USHRT_MAX)
return;
- rev = __raw_readw(bank->base + bank->regs->revision);
+ rev = readw_relaxed(bank->base + bank->regs->revision);
pr_info("OMAP GPIO hardware version %d.%d\n",
(rev >> 4) & 0x0f, rev & 0x0f);
@@ -1032,20 +1032,20 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
l = 0xffff;
if (bank->is_mpuio) {
- __raw_writel(l, bank->base + bank->regs->irqenable);
+ writel_relaxed(l, bank->base + bank->regs->irqenable);
return;
}
_gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
_gpio_rmw(base, bank->regs->irqstatus, l, !bank->regs->irqenable_inv);
if (bank->regs->debounce_en)
- __raw_writel(0, base + bank->regs->debounce_en);
+ writel_relaxed(0, base + bank->regs->debounce_en);
/* Save OE default value (0xffffffff) in the context */
- bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
+ bank->context.oe = readl_relaxed(bank->base + bank->regs->direction);
/* Initialize interface clk ungated, module enabled */
if (bank->regs->ctrl)
- __raw_writel(0, base + bank->regs->ctrl);
+ writel_relaxed(0, base + bank->regs->ctrl);
bank->dbck = clk_get(bank->dev, "dbclk");
if (IS_ERR(bank->dbck))
@@ -1282,11 +1282,11 @@ static int omap_gpio_runtime_suspend(struct device *dev)
*/
wake_low = bank->context.leveldetect0 & bank->context.wake_en;
if (wake_low)
- __raw_writel(wake_low | bank->context.fallingdetect,
+ writel_relaxed(wake_low | bank->context.fallingdetect,
bank->base + bank->regs->fallingdetect);
wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
if (wake_hi)
- __raw_writel(wake_hi | bank->context.risingdetect,
+ writel_relaxed(wake_hi | bank->context.risingdetect,
bank->base + bank->regs->risingdetect);
if (!bank->enabled_non_wakeup_gpios)
@@ -1301,7 +1301,7 @@ static int omap_gpio_runtime_suspend(struct device *dev)
* non-wakeup GPIOs. Otherwise spurious IRQs will be
* generated. See OMAP2420 Errata item 1.101.
*/
- bank->saved_datain = __raw_readl(bank->base +
+ bank->saved_datain = readl_relaxed(bank->base +
bank->regs->datain);
l1 = bank->context.fallingdetect;
l2 = bank->context.risingdetect;
@@ -1309,8 +1309,8 @@ static int omap_gpio_runtime_suspend(struct device *dev)
l1 &= ~bank->enabled_non_wakeup_gpios;
l2 &= ~bank->enabled_non_wakeup_gpios;
- __raw_writel(l1, bank->base + bank->regs->fallingdetect);
- __raw_writel(l2, bank->base + bank->regs->risingdetect);
+ writel_relaxed(l1, bank->base + bank->regs->fallingdetect);
+ writel_relaxed(l2, bank->base + bank->regs->risingdetect);
bank->workaround_enabled = true;
@@ -1358,9 +1358,9 @@ static int omap_gpio_runtime_resume(struct device *dev)
* generate a PRCM wakeup. Here we restore the
* pre-runtime_suspend() values for edge triggering.
*/
- __raw_writel(bank->context.fallingdetect,
+ writel_relaxed(bank->context.fallingdetect,
bank->base + bank->regs->fallingdetect);
- __raw_writel(bank->context.risingdetect,
+ writel_relaxed(bank->context.risingdetect,
bank->base + bank->regs->risingdetect);
if (bank->loses_context) {
@@ -1382,7 +1382,7 @@ static int omap_gpio_runtime_resume(struct device *dev)
return 0;
}
- l = __raw_readl(bank->base + bank->regs->datain);
+ l = readl_relaxed(bank->base + bank->regs->datain);
/*
* Check if any of the non-wakeup interrupt GPIOs have changed
@@ -1412,24 +1412,24 @@ static int omap_gpio_runtime_resume(struct device *dev)
if (gen) {
u32 old0, old1;
- old0 = __raw_readl(bank->base + bank->regs->leveldetect0);
- old1 = __raw_readl(bank->base + bank->regs->leveldetect1);
+ old0 = readl_relaxed(bank->base + bank->regs->leveldetect0);
+ old1 = readl_relaxed(bank->base + bank->regs->leveldetect1);
if (!bank->regs->irqstatus_raw0) {
- __raw_writel(old0 | gen, bank->base +
+ writel_relaxed(old0 | gen, bank->base +
bank->regs->leveldetect0);
- __raw_writel(old1 | gen, bank->base +
+ writel_relaxed(old1 | gen, bank->base +
bank->regs->leveldetect1);
}
if (bank->regs->irqstatus_raw0) {
- __raw_writel(old0 | l, bank->base +
+ writel_relaxed(old0 | l, bank->base +
bank->regs->leveldetect0);
- __raw_writel(old1 | l, bank->base +
+ writel_relaxed(old1 | l, bank->base +
bank->regs->leveldetect1);
}
- __raw_writel(old0, bank->base + bank->regs->leveldetect0);
- __raw_writel(old1, bank->base + bank->regs->leveldetect1);
+ writel_relaxed(old0, bank->base + bank->regs->leveldetect0);
+ writel_relaxed(old1, bank->base + bank->regs->leveldetect1);
}
bank->workaround_enabled = false;
@@ -1471,55 +1471,55 @@ static void omap_gpio_init_context(struct gpio_bank *p)
struct omap_gpio_reg_offs *regs = p->regs;
void __iomem *base = p->base;
- p->context.ctrl = __raw_readl(base + regs->ctrl);
- p->context.oe = __raw_readl(base + regs->direction);
- p->context.wake_en = __raw_readl(base + regs->wkup_en);
- p->context.leveldetect0 = __raw_readl(base + regs->leveldetect0);
- p->context.leveldetect1 = __raw_readl(base + regs->leveldetect1);
- p->context.risingdetect = __raw_readl(base + regs->risingdetect);
- p->context.fallingdetect = __raw_readl(base + regs->fallingdetect);
- p->context.irqenable1 = __raw_readl(base + regs->irqenable);
- p->context.irqenable2 = __raw_readl(base + regs->irqenable2);
+ p->context.ctrl = readl_relaxed(base + regs->ctrl);
+ p->context.oe = readl_relaxed(base + regs->direction);
+ p->context.wake_en = readl_relaxed(base + regs->wkup_en);
+ p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0);
+ p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1);
+ p->context.risingdetect = readl_relaxed(base + regs->risingdetect);
+ p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect);
+ p->context.irqenable1 = readl_relaxed(base + regs->irqenable);
+ p->context.irqenable2 = readl_relaxed(base + regs->irqenable2);
if (regs->set_dataout && p->regs->clr_dataout)
- p->context.dataout = __raw_readl(base + regs->set_dataout);
+ p->context.dataout = readl_relaxed(base + regs->set_dataout);
else
- p->context.dataout = __raw_readl(base + regs->dataout);
+ p->context.dataout = readl_relaxed(base + regs->dataout);
p->context_valid = true;
}
static void omap_gpio_restore_context(struct gpio_bank *bank)
{
- __raw_writel(bank->context.wake_en,
+ writel_relaxed(bank->context.wake_en,
bank->base + bank->regs->wkup_en);
- __raw_writel(bank->context.ctrl, bank->base + bank->regs->ctrl);
- __raw_writel(bank->context.leveldetect0,
+ writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl);
+ writel_relaxed(bank->context.leveldetect0,
bank->base + bank->regs->leveldetect0);
- __raw_writel(bank->context.leveldetect1,
+ writel_relaxed(bank->context.leveldetect1,
bank->base + bank->regs->leveldetect1);
- __raw_writel(bank->context.risingdetect,
+ writel_relaxed(bank->context.risingdetect,
bank->base + bank->regs->risingdetect);
- __raw_writel(bank->context.fallingdetect,
+ writel_relaxed(bank->context.fallingdetect,
bank->base + bank->regs->fallingdetect);
if (bank->regs->set_dataout && bank->regs->clr_dataout)
- __raw_writel(bank->context.dataout,
+ writel_relaxed(bank->context.dataout,
bank->base + bank->regs->set_dataout);
else
- __raw_writel(bank->context.dataout,
+ writel_relaxed(bank->context.dataout,
bank->base + bank->regs->dataout);
- __raw_writel(bank->context.oe, bank->base + bank->regs->direction);
+ writel_relaxed(bank->context.oe, bank->base + bank->regs->direction);
if (bank->dbck_enable_mask) {
- __raw_writel(bank->context.debounce, bank->base +
+ writel_relaxed(bank->context.debounce, bank->base +
bank->regs->debounce);
- __raw_writel(bank->context.debounce_en,
+ writel_relaxed(bank->context.debounce_en,
bank->base + bank->regs->debounce_en);
}
- __raw_writel(bank->context.irqenable1,
+ writel_relaxed(bank->context.irqenable1,
bank->base + bank->regs->irqenable);
- __raw_writel(bank->context.irqenable2,
+ writel_relaxed(bank->context.irqenable2,
bank->base + bank->regs->irqenable2);
}
#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index 11801e986dd9..da9d33252e56 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -182,7 +182,7 @@ static int palmas_gpio_probe(struct platform_device *pdev)
palmas_gpio->gpio_chip.owner = THIS_MODULE;
palmas_gpio->gpio_chip.label = dev_name(&pdev->dev);
palmas_gpio->gpio_chip.ngpio = dev_data->ngpio;
- palmas_gpio->gpio_chip.can_sleep = 1;
+ palmas_gpio->gpio_chip.can_sleep = true;
palmas_gpio->gpio_chip.direction_input = palmas_gpio_input;
palmas_gpio->gpio_chip.direction_output = palmas_gpio_output;
palmas_gpio->gpio_chip.to_irq = palmas_gpio_to_irq;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 6e48c07e3d8c..019b23b955a2 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -354,7 +354,7 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
gc->direction_output = pca953x_gpio_direction_output;
gc->get = pca953x_gpio_get_value;
gc->set = pca953x_gpio_set_value;
- gc->can_sleep = 1;
+ gc->can_sleep = true;
gc->base = chip->gpio_start;
gc->ngpio = gpios;
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 1535686e74ea..82735822bc9d 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -305,7 +305,7 @@ static int pcf857x_probe(struct i2c_client *client,
spin_lock_init(&gpio->slock);
gpio->chip.base = pdata ? pdata->gpio_base : -1;
- gpio->chip.can_sleep = 1;
+ gpio->chip.can_sleep = true;
gpio->chip.dev = &client->dev;
gpio->chip.owner = THIS_MODULE;
gpio->chip.get = pcf857x_get;
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 0fec097e838d..9656c196772e 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -224,7 +224,7 @@ static void pch_gpio_setup(struct pch_gpio *chip)
gpio->dbg_show = NULL;
gpio->base = -1;
gpio->ngpio = gpio_pins[chip->ioh];
- gpio->can_sleep = 0;
+ gpio->can_sleep = false;
gpio->to_irq = pch_gpio_to_irq;
}
@@ -518,7 +518,7 @@ static int pch_gpio_resume(struct pci_dev *pdev)
#endif
#define PCI_VENDOR_ID_ROHM 0x10DB
-static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
+static const struct pci_device_id pch_gpio_pcidev_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) },
{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8043) },
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index cc13d1b74fad..42e6e64f2120 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -263,7 +263,8 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset)
{
- return readl_relaxed(gpio_chip_base(chip) + GPLR_OFFSET) & (1 << offset);
+ u32 gplr = readl_relaxed(gpio_chip_base(chip) + GPLR_OFFSET);
+ return !!(gplr & (1 << offset));
}
static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index e63d6a397e17..122b776fdc0b 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -133,7 +133,7 @@ static int rc5t583_gpio_probe(struct platform_device *pdev)
rc5t583_gpio->gpio_chip.get = rc5t583_gpio_get,
rc5t583_gpio->gpio_chip.to_irq = rc5t583_gpio_to_irq,
rc5t583_gpio->gpio_chip.ngpio = RC5T583_MAX_GPIO,
- rc5t583_gpio->gpio_chip.can_sleep = 1,
+ rc5t583_gpio->gpio_chip.can_sleep = true,
rc5t583_gpio->gpio_chip.dev = &pdev->dev;
rc5t583_gpio->gpio_chip.base = -1;
rc5t583_gpio->rc5t583 = rc5t583;
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index fe088a30567a..ca76ce751540 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -169,7 +169,8 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
u32 pending;
unsigned int offset, irqs_handled = 0;
- while ((pending = gpio_rcar_read(p, INTDT))) {
+ while ((pending = gpio_rcar_read(p, INTDT) &
+ gpio_rcar_read(p, INTMSK))) {
offset = __ffs(pending);
gpio_rcar_write(p, INTCLR, BIT(offset));
generic_handle_irq(irq_find_mapping(p->irq_domain, offset));
@@ -284,7 +285,34 @@ static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
.map = gpio_rcar_irq_domain_map,
};
-static void gpio_rcar_parse_pdata(struct gpio_rcar_priv *p)
+struct gpio_rcar_info {
+ bool has_both_edge_trigger;
+};
+
+static const struct of_device_id gpio_rcar_of_table[] = {
+ {
+ .compatible = "renesas,gpio-r8a7790",
+ .data = (void *)&(const struct gpio_rcar_info) {
+ .has_both_edge_trigger = true,
+ },
+ }, {
+ .compatible = "renesas,gpio-r8a7791",
+ .data = (void *)&(const struct gpio_rcar_info) {
+ .has_both_edge_trigger = true,
+ },
+ }, {
+ .compatible = "renesas,gpio-rcar",
+ .data = (void *)&(const struct gpio_rcar_info) {
+ .has_both_edge_trigger = false,
+ },
+ }, {
+ /* Terminator */
+ },
+};
+
+MODULE_DEVICE_TABLE(of, gpio_rcar_of_table);
+
+static int gpio_rcar_parse_pdata(struct gpio_rcar_priv *p)
{
struct gpio_rcar_config *pdata = dev_get_platdata(&p->pdev->dev);
struct device_node *np = p->pdev->dev.of_node;
@@ -294,11 +322,21 @@ static void gpio_rcar_parse_pdata(struct gpio_rcar_priv *p)
if (pdata) {
p->config = *pdata;
} else if (IS_ENABLED(CONFIG_OF) && np) {
+ const struct of_device_id *match;
+ const struct gpio_rcar_info *info;
+
+ match = of_match_node(gpio_rcar_of_table, np);
+ if (!match)
+ return -EINVAL;
+
+ info = match->data;
+
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0,
&args);
p->config.number_of_pins = ret == 0 ? args.args[2]
: RCAR_MAX_GPIO_PER_BANK;
p->config.gpio_base = -1;
+ p->config.has_both_edge_trigger = info->has_both_edge_trigger;
}
if (p->config.number_of_pins == 0 ||
@@ -308,6 +346,8 @@ static void gpio_rcar_parse_pdata(struct gpio_rcar_priv *p)
p->config.number_of_pins, RCAR_MAX_GPIO_PER_BANK);
p->config.number_of_pins = RCAR_MAX_GPIO_PER_BANK;
}
+
+ return 0;
}
static int gpio_rcar_probe(struct platform_device *pdev)
@@ -330,7 +370,9 @@ static int gpio_rcar_probe(struct platform_device *pdev)
spin_lock_init(&p->lock);
/* Get device configuration from DT node or platform data. */
- gpio_rcar_parse_pdata(p);
+ ret = gpio_rcar_parse_pdata(p);
+ if (ret < 0)
+ return ret;
platform_set_drvdata(pdev, p);
@@ -369,10 +411,9 @@ static int gpio_rcar_probe(struct platform_device *pdev)
irq_chip->name = name;
irq_chip->irq_mask = gpio_rcar_irq_disable;
irq_chip->irq_unmask = gpio_rcar_irq_enable;
- irq_chip->irq_enable = gpio_rcar_irq_enable;
- irq_chip->irq_disable = gpio_rcar_irq_disable;
irq_chip->irq_set_type = gpio_rcar_irq_set_type;
- irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_SET_TYPE_MASKED;
+ irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_SET_TYPE_MASKED
+ | IRQCHIP_MASK_ON_SUSPEND;
p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
p->config.number_of_pins,
@@ -435,17 +476,6 @@ static int gpio_rcar_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id gpio_rcar_of_table[] = {
- {
- .compatible = "renesas,gpio-rcar",
- },
- { },
-};
-
-MODULE_DEVICE_TABLE(of, gpio_rcar_of_table);
-#endif
-
static struct platform_driver gpio_rcar_device_driver = {
.probe = gpio_rcar_probe,
.remove = gpio_rcar_remove,
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 76e02b9460e6..a85e00bf9834 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -30,10 +30,13 @@
#include <asm/irq.h>
-#include <mach/hardware.h>
#include <mach/map.h>
#include <mach/regs-gpio.h>
+#if defined(CONFIG_ARCH_S3C24XX) || defined(CONFIG_ARCH_S3C64XX)
+#include <mach/gpio-samsung.h>
+#endif
+
#include <plat/cpu.h>
#include <plat/gpio-core.h>
#include <plat/gpio-cfg.h>
@@ -1053,7 +1056,7 @@ struct samsung_gpio_chip s3c24xx_gpios[] = {
.base = S3C2410_GPA(0),
.owner = THIS_MODULE,
.label = "GPIOA",
- .ngpio = 24,
+ .ngpio = 27,
.direction_input = s3c24xx_gpiolib_banka_input,
.direction_output = s3c24xx_gpiolib_banka_output,
},
@@ -1062,7 +1065,7 @@ struct samsung_gpio_chip s3c24xx_gpios[] = {
.base = S3C2410_GPB(0),
.owner = THIS_MODULE,
.label = "GPIOB",
- .ngpio = 16,
+ .ngpio = 11,
},
}, {
.chip = {
@@ -1107,7 +1110,7 @@ struct samsung_gpio_chip s3c24xx_gpios[] = {
.base = S3C2410_GPH(0),
.owner = THIS_MODULE,
.label = "GPIOH",
- .ngpio = 11,
+ .ngpio = 15,
},
},
/* GPIOS for the S3C2443 and later devices. */
diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c
new file mode 100644
index 000000000000..0357387b3645
--- /dev/null
+++ b/drivers/gpio/gpio-sch311x.c
@@ -0,0 +1,432 @@
+/*
+ * GPIO driver for the SMSC SCH311x Super-I/O chips
+ *
+ * Copyright (C) 2013 Bruno Randolf <br1@einfach.org>
+ *
+ * SuperIO functions and chip detection:
+ * (c) Copyright 2008 Wim Van Sebroeck <wim@iguana.be>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+
+#define DRV_NAME "gpio-sch311x"
+
+#define SCH311X_GPIO_CONF_OUT 0x00
+#define SCH311X_GPIO_CONF_IN 0x01
+#define SCH311X_GPIO_CONF_INVERT 0x02
+#define SCH311X_GPIO_CONF_OPEN_DRAIN 0x80
+
+#define SIO_CONFIG_KEY_ENTER 0x55
+#define SIO_CONFIG_KEY_EXIT 0xaa
+
+#define GP1 0x4b
+
+static int sch311x_ioports[] = { 0x2e, 0x4e, 0x162e, 0x164e };
+
+static struct platform_device *sch311x_gpio_pdev;
+
+struct sch311x_pdev_data { /* platform device data */
+ unsigned short runtime_reg; /* runtime register base address */
+};
+
+struct sch311x_gpio_block { /* one GPIO block runtime data */
+ struct gpio_chip chip;
+ unsigned short data_reg; /* from definition below */
+ unsigned short *config_regs; /* pointer to definition below */
+ unsigned short runtime_reg; /* runtime register */
+ spinlock_t lock; /* lock for this GPIO block */
+};
+
+struct sch311x_gpio_priv { /* driver private data */
+ struct sch311x_gpio_block blocks[6];
+};
+
+struct sch311x_gpio_block_def { /* register address definitions */
+ unsigned short data_reg;
+ unsigned short config_regs[8];
+ unsigned short base;
+};
+
+/* Note: some GPIOs are not available, these are marked with 0x00 */
+
+static struct sch311x_gpio_block_def sch311x_gpio_blocks[] = {
+ {
+ .data_reg = 0x4b, /* GP1 */
+ .config_regs = {0x23, 0x24, 0x25, 0x26, 0x27, 0x29, 0x2a, 0x2b},
+ .base = 10,
+ },
+ {
+ .data_reg = 0x4c, /* GP2 */
+ .config_regs = {0x00, 0x2c, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x32},
+ .base = 20,
+ },
+ {
+ .data_reg = 0x4d, /* GP3 */
+ .config_regs = {0x33, 0x34, 0x35, 0x36, 0x37, 0x00, 0x39, 0x3a},
+ .base = 30,
+ },
+ {
+ .data_reg = 0x4e, /* GP4 */
+ .config_regs = {0x3b, 0x00, 0x3d, 0x00, 0x6e, 0x6f, 0x72, 0x73},
+ .base = 40,
+ },
+ {
+ .data_reg = 0x4f, /* GP5 */
+ .config_regs = {0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46},
+ .base = 50,
+ },
+ {
+ .data_reg = 0x50, /* GP6 */
+ .config_regs = {0x47, 0x48, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59},
+ .base = 60,
+ },
+};
+
+static inline struct sch311x_gpio_block *
+to_sch311x_gpio_block(struct gpio_chip *chip)
+{
+ return container_of(chip, struct sch311x_gpio_block, chip);
+}
+
+
+/*
+ * Super-IO functions
+ */
+
+static inline int sch311x_sio_enter(int sio_config_port)
+{
+ /* Don't step on other drivers' I/O space by accident. */
+ if (!request_muxed_region(sio_config_port, 2, DRV_NAME)) {
+ pr_err(DRV_NAME "I/O address 0x%04x already in use\n",
+ sio_config_port);
+ return -EBUSY;
+ }
+
+ outb(SIO_CONFIG_KEY_ENTER, sio_config_port);
+ return 0;
+}
+
+static inline void sch311x_sio_exit(int sio_config_port)
+{
+ outb(SIO_CONFIG_KEY_EXIT, sio_config_port);
+ release_region(sio_config_port, 2);
+}
+
+static inline int sch311x_sio_inb(int sio_config_port, int reg)
+{
+ outb(reg, sio_config_port);
+ return inb(sio_config_port + 1);
+}
+
+static inline void sch311x_sio_outb(int sio_config_port, int reg, int val)
+{
+ outb(reg, sio_config_port);
+ outb(val, sio_config_port + 1);
+}
+
+
+/*
+ * GPIO functions
+ */
+
+static int sch311x_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct sch311x_gpio_block *block = to_sch311x_gpio_block(chip);
+
+ if (block->config_regs[offset] == 0) /* GPIO is not available */
+ return -ENODEV;
+
+ if (!request_region(block->runtime_reg + block->config_regs[offset],
+ 1, DRV_NAME)) {
+ dev_err(chip->dev, "Failed to request region 0x%04x.\n",
+ block->runtime_reg + block->config_regs[offset]);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static void sch311x_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ struct sch311x_gpio_block *block = to_sch311x_gpio_block(chip);
+
+ if (block->config_regs[offset] == 0) /* GPIO is not available */
+ return;
+
+ release_region(block->runtime_reg + block->config_regs[offset], 1);
+}
+
+static int sch311x_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct sch311x_gpio_block *block = to_sch311x_gpio_block(chip);
+ unsigned char data;
+
+ spin_lock(&block->lock);
+ data = inb(block->runtime_reg + block->data_reg);
+ spin_unlock(&block->lock);
+
+ return !!(data & BIT(offset));
+}
+
+static void __sch311x_gpio_set(struct sch311x_gpio_block *block,
+ unsigned offset, int value)
+{
+ unsigned char data = inb(block->runtime_reg + block->data_reg);
+ if (value)
+ data |= BIT(offset);
+ else
+ data &= ~BIT(offset);
+ outb(data, block->runtime_reg + block->data_reg);
+}
+
+static void sch311x_gpio_set(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct sch311x_gpio_block *block = to_sch311x_gpio_block(chip);
+
+ spin_lock(&block->lock);
+ __sch311x_gpio_set(block, offset, value);
+ spin_unlock(&block->lock);
+}
+
+static int sch311x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
+{
+ struct sch311x_gpio_block *block = to_sch311x_gpio_block(chip);
+
+ spin_lock(&block->lock);
+ outb(SCH311X_GPIO_CONF_IN, block->runtime_reg +
+ block->config_regs[offset]);
+ spin_unlock(&block->lock);
+
+ return 0;
+}
+
+static int sch311x_gpio_direction_out(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct sch311x_gpio_block *block = to_sch311x_gpio_block(chip);
+
+ spin_lock(&block->lock);
+
+ outb(SCH311X_GPIO_CONF_OUT, block->runtime_reg +
+ block->config_regs[offset]);
+
+ __sch311x_gpio_set(block, offset, value);
+
+ spin_unlock(&block->lock);
+ return 0;
+}
+
+static int sch311x_gpio_probe(struct platform_device *pdev)
+{
+ struct sch311x_pdev_data *pdata = pdev->dev.platform_data;
+ struct sch311x_gpio_priv *priv;
+ struct sch311x_gpio_block *block;
+ int err, i;
+
+ /* we can register all GPIO data registers at once */
+ if (!request_region(pdata->runtime_reg + GP1, 6, DRV_NAME)) {
+ dev_err(&pdev->dev, "Failed to request region 0x%04x-0x%04x.\n",
+ pdata->runtime_reg + GP1, pdata->runtime_reg + GP1 + 5);
+ return -EBUSY;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ for (i = 0; i < ARRAY_SIZE(priv->blocks); i++) {
+ block = &priv->blocks[i];
+
+ spin_lock_init(&block->lock);
+
+ block->chip.label = DRV_NAME;
+ block->chip.owner = THIS_MODULE;
+ block->chip.request = sch311x_gpio_request;
+ block->chip.free = sch311x_gpio_free;
+ block->chip.direction_input = sch311x_gpio_direction_in;
+ block->chip.direction_output = sch311x_gpio_direction_out;
+ block->chip.get = sch311x_gpio_get;
+ block->chip.set = sch311x_gpio_set;
+ block->chip.ngpio = 8;
+ block->chip.dev = &pdev->dev;
+ block->chip.base = sch311x_gpio_blocks[i].base;
+ block->config_regs = sch311x_gpio_blocks[i].config_regs;
+ block->data_reg = sch311x_gpio_blocks[i].data_reg;
+ block->runtime_reg = pdata->runtime_reg;
+
+ err = gpiochip_add(&block->chip);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Could not register gpiochip, %d\n", err);
+ goto exit_err;
+ }
+ dev_info(&pdev->dev,
+ "SMSC SCH311x GPIO block %d registered.\n", i);
+ }
+
+ return 0;
+
+exit_err:
+ release_region(pdata->runtime_reg + GP1, 6);
+ /* release already registered chips */
+ for (--i; i >= 0; i--)
+ gpiochip_remove(&priv->blocks[i].chip);
+ return err;
+}
+
+static int sch311x_gpio_remove(struct platform_device *pdev)
+{
+ struct sch311x_pdev_data *pdata = pdev->dev.platform_data;
+ struct sch311x_gpio_priv *priv = platform_get_drvdata(pdev);
+ int err, i;
+
+ release_region(pdata->runtime_reg + GP1, 6);
+
+ for (i = 0; i < ARRAY_SIZE(priv->blocks); i++) {
+ err = gpiochip_remove(&priv->blocks[i].chip);
+ if (err)
+ return err;
+ dev_info(&pdev->dev,
+ "SMSC SCH311x GPIO block %d unregistered.\n", i);
+ }
+ return 0;
+}
+
+static struct platform_driver sch311x_gpio_driver = {
+ .driver.name = DRV_NAME,
+ .driver.owner = THIS_MODULE,
+ .probe = sch311x_gpio_probe,
+ .remove = sch311x_gpio_remove,
+};
+
+
+/*
+ * Init & exit routines
+ */
+
+static int __init sch311x_detect(int sio_config_port, unsigned short *addr)
+{
+ int err = 0, reg;
+ unsigned short base_addr;
+ unsigned char dev_id;
+
+ err = sch311x_sio_enter(sio_config_port);
+ if (err)
+ return err;
+
+ /* Check device ID. We currently know about:
+ * SCH3112 (0x7c), SCH3114 (0x7d), and SCH3116 (0x7f). */
+ reg = sch311x_sio_inb(sio_config_port, 0x20);
+ if (!(reg == 0x7c || reg == 0x7d || reg == 0x7f)) {
+ err = -ENODEV;
+ goto exit;
+ }
+ dev_id = reg == 0x7c ? 2 : reg == 0x7d ? 4 : 6;
+
+ /* Select logical device A (runtime registers) */
+ sch311x_sio_outb(sio_config_port, 0x07, 0x0a);
+
+ /* Check if Logical Device Register is currently active */
+ if ((sch311x_sio_inb(sio_config_port, 0x30) & 0x01) == 0)
+ pr_info("Seems that LDN 0x0a is not active...\n");
+
+ /* Get the base address of the runtime registers */
+ base_addr = (sch311x_sio_inb(sio_config_port, 0x60) << 8) |
+ sch311x_sio_inb(sio_config_port, 0x61);
+ if (!base_addr) {
+ pr_err("Base address not set\n");
+ err = -ENODEV;
+ goto exit;
+ }
+ *addr = base_addr;
+
+ pr_info("Found an SMSC SCH311%d chip at 0x%04x\n", dev_id, base_addr);
+
+exit:
+ sch311x_sio_exit(sio_config_port);
+ return err;
+}
+
+static int __init sch311x_gpio_pdev_add(const unsigned short addr)
+{
+ struct sch311x_pdev_data pdata;
+ int err;
+
+ pdata.runtime_reg = addr;
+
+ sch311x_gpio_pdev = platform_device_alloc(DRV_NAME, -1);
+ if (!sch311x_gpio_pdev)
+ return -ENOMEM;
+
+ err = platform_device_add_data(sch311x_gpio_pdev,
+ &pdata, sizeof(pdata));
+ if (err) {
+ pr_err(DRV_NAME "Platform data allocation failed\n");
+ goto err;
+ }
+
+ err = platform_device_add(sch311x_gpio_pdev);
+ if (err) {
+ pr_err(DRV_NAME "Device addition failed\n");
+ goto err;
+ }
+ return 0;
+
+err:
+ platform_device_put(sch311x_gpio_pdev);
+ return err;
+}
+
+static int __init sch311x_gpio_init(void)
+{
+ int err, i;
+ unsigned short addr = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sch311x_ioports); i++)
+ if (sch311x_detect(sch311x_ioports[i], &addr) == 0)
+ break;
+
+ if (!addr)
+ return -ENODEV;
+
+ err = platform_driver_register(&sch311x_gpio_driver);
+ if (err)
+ return err;
+
+ err = sch311x_gpio_pdev_add(addr);
+ if (err)
+ goto unreg_platform_driver;
+
+ return 0;
+
+unreg_platform_driver:
+ platform_driver_unregister(&sch311x_gpio_driver);
+ return err;
+}
+
+static void __exit sch311x_gpio_exit(void)
+{
+ platform_device_unregister(sch311x_gpio_pdev);
+ platform_driver_unregister(&sch311x_gpio_driver);
+}
+
+module_init(sch311x_gpio_init);
+module_exit(sch311x_gpio_exit);
+
+MODULE_AUTHOR("Bruno Randolf <br1@einfach.org>");
+MODULE_DESCRIPTION("SMSC SCH311x GPIO Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gpio-sch311x");
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index 88f374ac7753..7c6c518929bc 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -176,8 +176,10 @@ static int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
sd->id = irq_domain_add_legacy(pdev->dev.of_node, SDV_NUM_PUB_GPIOS,
sd->irq_base, 0, &irq_domain_sdv_ops, sd);
- if (!sd->id)
+ if (!sd->id) {
+ ret = -ENODEV;
goto out_free_irq;
+ }
return 0;
out_free_irq:
free_irq(pdev->irq, sd);
@@ -212,8 +214,10 @@ static int sdv_gpio_probe(struct pci_dev *pdev,
}
addr = pci_resource_start(pdev, GPIO_BAR);
- if (!addr)
+ if (!addr) {
+ ret = -ENODEV;
goto release_reg;
+ }
sd->gpio_pub_base = ioremap(addr, pci_resource_len(pdev, GPIO_BAR));
prop = of_get_property(pdev->dev.of_node, "intel,muxctl", &len);
@@ -270,7 +274,7 @@ static void sdv_gpio_remove(struct pci_dev *pdev)
kfree(sd);
}
-static DEFINE_PCI_DEVICE_TABLE(sdv_gpio_pci_ids) = {
+static const struct pci_device_id sdv_gpio_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SDV_GPIO) },
{ 0, },
};
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index f2fb12c18da9..68e3fcb1acea 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -146,7 +146,7 @@ static void gsta_gpio_setup(struct gsta_gpio *chip) /* called from probe */
gpio->dbg_show = NULL;
gpio->base = gpio_base;
gpio->ngpio = GSTA_NR_GPIO;
- gpio->can_sleep = 0;
+ gpio->can_sleep = false;
gpio->to_irq = gsta_gpio_to_irq;
/*
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 2647e243d471..2776a09bee58 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -129,7 +129,7 @@ static struct gpio_chip template_chip = {
.set = stmpe_gpio_set,
.to_irq = stmpe_gpio_to_irq,
.request = stmpe_gpio_request,
- .can_sleep = 1,
+ .can_sleep = true,
};
static int stmpe_gpio_irq_set_type(struct irq_data *d, unsigned int type)
diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c
index d2983e9ad6af..13d73fb2b5e1 100644
--- a/drivers/gpio/gpio-sx150x.c
+++ b/drivers/gpio/gpio-sx150x.c
@@ -436,7 +436,7 @@ static void sx150x_init_chip(struct sx150x_chip *chip,
chip->gpio_chip.set = sx150x_gpio_set;
chip->gpio_chip.to_irq = sx150x_gpio_to_irq;
chip->gpio_chip.base = pdata->gpio_base;
- chip->gpio_chip.can_sleep = 1;
+ chip->gpio_chip.can_sleep = true;
chip->gpio_chip.ngpio = chip->dev_cfg->ngpios;
if (pdata->oscio_is_gpo)
++chip->gpio_chip.ngpio;
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index da071ddbad99..07bce97647a6 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -222,7 +222,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
tb10x_gpio->gc.free = tb10x_gpio_free;
tb10x_gpio->gc.base = -1;
tb10x_gpio->gc.ngpio = ngpio;
- tb10x_gpio->gc.can_sleep = 0;
+ tb10x_gpio->gc.can_sleep = false;
ret = gpiochip_add(&tb10x_gpio->gc);
@@ -318,7 +318,7 @@ static struct platform_driver tb10x_gpio_driver = {
.remove = tb10x_gpio_remove,
.driver = {
.name = "tb10x-gpio",
- .of_match_table = of_match_ptr(tb10x_gpio_dt_ids),
+ .of_match_table = tb10x_gpio_dt_ids,
.owner = THIS_MODULE,
}
};
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index ddb5fefaa715..1019320984d7 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -127,7 +127,7 @@ static struct gpio_chip template_chip = {
.direction_output = tc3589x_gpio_direction_output,
.set = tc3589x_gpio_set,
.to_irq = tc3589x_gpio_to_irq,
- .can_sleep = 1,
+ .can_sleep = true,
};
static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type)
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index cfd3b9037bc7..2b49f878b56c 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -425,6 +425,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
struct tegra_gpio_soc_config *config;
struct resource *res;
struct tegra_gpio_bank *bank;
+ int ret;
int gpio;
int i;
int j;
@@ -494,7 +495,11 @@ static int tegra_gpio_probe(struct platform_device *pdev)
tegra_gpio_chip.of_node = pdev->dev.of_node;
- gpiochip_add(&tegra_gpio_chip);
+ ret = gpiochip_add(&tegra_gpio_chip);
+ if (ret < 0) {
+ irq_domain_remove(irq_domain);
+ return ret;
+ }
for (gpio = 0; gpio < tegra_gpio_chip.ngpio; gpio++) {
int irq = irq_create_mapping(irq_domain, gpio);
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index 7a0e956ef1ed..f9a8fbde108e 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -275,7 +275,7 @@ static int timbgpio_probe(struct platform_device *pdev)
gc->dbg_show = NULL;
gc->base = pdata->gpio_base;
gc->ngpio = pdata->nr_pins;
- gc->can_sleep = 0;
+ gc->can_sleep = false;
err = gpiochip_add(gc);
if (err)
@@ -290,8 +290,8 @@ static int timbgpio_probe(struct platform_device *pdev)
return 0;
for (i = 0; i < pdata->nr_pins; i++) {
- irq_set_chip_and_handler_name(tgpio->irq_base + i,
- &timbgpio_irqchip, handle_simple_irq, "mux");
+ irq_set_chip_and_handler(tgpio->irq_base + i,
+ &timbgpio_irqchip, handle_simple_irq);
irq_set_chip_data(tgpio->irq_base + i, tgpio);
#ifdef CONFIG_ARM
set_irq_flags(tgpio->irq_base + i, IRQF_VALID | IRQF_PROBE);
diff --git a/drivers/gpio/gpio-tnetv107x.c b/drivers/gpio/gpio-tnetv107x.c
index 58445bb69106..4aa481579a05 100644
--- a/drivers/gpio/gpio-tnetv107x.c
+++ b/drivers/gpio/gpio-tnetv107x.c
@@ -176,7 +176,7 @@ static int __init tnetv107x_gpio_setup(void)
ctlr = &chips[i];
ctlr->chip.label = "tnetv107x";
- ctlr->chip.can_sleep = 0;
+ ctlr->chip.can_sleep = false;
ctlr->chip.base = base;
ctlr->chip.ngpio = ngpio - base;
if (ctlr->chip.ngpio > 32)
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index 29e8e750bd49..8994dfa13491 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -108,7 +108,7 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
tps6586x_gpio->gpio_chip.label = pdev->name;
tps6586x_gpio->gpio_chip.dev = &pdev->dev;
tps6586x_gpio->gpio_chip.ngpio = 4;
- tps6586x_gpio->gpio_chip.can_sleep = 1;
+ tps6586x_gpio->gpio_chip.can_sleep = true;
/* FIXME: add handling of GPIOs as dedicated inputs */
tps6586x_gpio->gpio_chip.direction_output = tps6586x_gpio_output;
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 06146219d9d2..b6e818e68007 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -143,7 +143,7 @@ static int tps65910_gpio_probe(struct platform_device *pdev)
default:
return -EINVAL;
}
- tps65910_gpio->gpio_chip.can_sleep = 1;
+ tps65910_gpio->gpio_chip.can_sleep = true;
tps65910_gpio->gpio_chip.direction_input = tps65910_gpio_input;
tps65910_gpio->gpio_chip.direction_output = tps65910_gpio_output;
tps65910_gpio->gpio_chip.set = tps65910_gpio_set;
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index 276a4229b032..59ee486cb8b9 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -79,7 +79,7 @@ static struct gpio_chip template_chip = {
.direction_output = tps65912_gpio_output,
.get = tps65912_gpio_get,
.set = tps65912_gpio_set,
- .can_sleep = 1,
+ .can_sleep = true,
.ngpio = 5,
.base = -1,
};
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index b97d6a6577b9..8b88ca2eda9c 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
if (offset < TWL4030_GPIO_MAX)
ret = twl4030_set_gpio_direction(offset, 1);
else
- ret = -EINVAL;
+ ret = -EINVAL; /* LED outputs can't be set as input */
if (!ret)
priv->direction &= ~BIT(offset);
@@ -354,11 +354,20 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
{
struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
- int ret = -EINVAL;
+ int ret = 0;
mutex_lock(&priv->mutex);
- if (offset < TWL4030_GPIO_MAX)
+ if (offset < TWL4030_GPIO_MAX) {
ret = twl4030_set_gpio_direction(offset, 0);
+ if (ret) {
+ mutex_unlock(&priv->mutex);
+ return ret;
+ }
+ }
+
+ /*
+ * LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output
+ */
priv->direction |= BIT(offset);
mutex_unlock(&priv->mutex);
@@ -387,7 +396,7 @@ static struct gpio_chip template_chip = {
.direction_output = twl_direction_out,
.set = twl_set,
.to_irq = twl_to_irq,
- .can_sleep = 1,
+ .can_sleep = true,
};
/*----------------------------------------------------------------------*/
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index d420d30b86e7..0caf5cd1b47d 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -77,7 +77,7 @@ static struct gpio_chip twl6040gpo_chip = {
.get = twl6040gpo_get,
.direction_output = twl6040gpo_direction_out,
.set = twl6040gpo_set,
- .can_sleep = 1,
+ .can_sleep = true,
};
/*----------------------------------------------------------------------*/
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
index 06fb5cf99ded..2445fe771179 100644
--- a/drivers/gpio/gpio-ucb1400.c
+++ b/drivers/gpio/gpio-ucb1400.c
@@ -64,7 +64,7 @@ static int ucb1400_gpio_probe(struct platform_device *dev)
ucb->gc.direction_output = ucb1400_gpio_dir_out;
ucb->gc.get = ucb1400_gpio_get;
ucb->gc.set = ucb1400_gpio_set;
- ucb->gc.can_sleep = 1;
+ ucb->gc.can_sleep = true;
err = gpiochip_add(&ucb->gc);
if (err)
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
index 5ac2919197fe..79e3b5836712 100644
--- a/drivers/gpio/gpio-viperboard.c
+++ b/drivers/gpio/gpio-viperboard.c
@@ -413,7 +413,7 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
vb_gpio->gpioa.owner = THIS_MODULE;
vb_gpio->gpioa.base = -1;
vb_gpio->gpioa.ngpio = 16;
- vb_gpio->gpioa.can_sleep = 1;
+ vb_gpio->gpioa.can_sleep = true;
vb_gpio->gpioa.set = vprbrd_gpioa_set;
vb_gpio->gpioa.get = vprbrd_gpioa_get;
vb_gpio->gpioa.direction_input = vprbrd_gpioa_direction_input;
@@ -430,7 +430,7 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
vb_gpio->gpiob.owner = THIS_MODULE;
vb_gpio->gpiob.base = -1;
vb_gpio->gpiob.ngpio = 16;
- vb_gpio->gpiob.can_sleep = 1;
+ vb_gpio->gpiob.can_sleep = true;
vb_gpio->gpiob.set = vprbrd_gpiob_set;
vb_gpio->gpiob.get = vprbrd_gpiob_get;
vb_gpio->gpiob.direction_input = vprbrd_gpiob_direction_input;
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index cddfa22edb41..0fd23b6a753d 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -214,7 +214,7 @@ static void vx855gpio_gpio_setup(struct vx855_gpio *vg)
c->dbg_show = NULL;
c->base = 0;
c->ngpio = NR_VX855_GP;
- c->can_sleep = 0;
+ c->can_sleep = false;
c->names = vx855gpio_names;
}
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index 456000c5c457..b18a1a26425e 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -240,7 +240,7 @@ static struct gpio_chip template_chip = {
.to_irq = wm831x_gpio_to_irq,
.set_debounce = wm831x_gpio_set_debounce,
.dbg_show = wm831x_gpio_dbg_show,
- .can_sleep = 1,
+ .can_sleep = true,
};
static int wm831x_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-wm8350.c b/drivers/gpio/gpio-wm8350.c
index fc49154be7b1..2487f9d575d3 100644
--- a/drivers/gpio/gpio-wm8350.c
+++ b/drivers/gpio/gpio-wm8350.c
@@ -106,7 +106,7 @@ static struct gpio_chip template_chip = {
.direction_output = wm8350_gpio_direction_out,
.set = wm8350_gpio_set,
.to_irq = wm8350_gpio_to_irq,
- .can_sleep = 1,
+ .can_sleep = true,
};
static int wm8350_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index a53dbdefc7ee..d93b6b581677 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -242,7 +242,7 @@ static struct gpio_chip template_chip = {
.set = wm8994_gpio_set,
.to_irq = wm8994_gpio_to_irq,
.dbg_show = wm8994_gpio_dbg_show,
- .can_sleep = 1,
+ .can_sleep = true,
};
static int wm8994_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
new file mode 100644
index 000000000000..7081304d6797
--- /dev/null
+++ b/drivers/gpio/gpio-xtensa.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2013 TangoTec Ltd.
+ * Author: Baruch Siach <baruch@tkos.co.il>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Driver for the Xtensa LX4 GPIO32 Option
+ *
+ * Documentation: Xtensa LX4 Microprocessor Data Book, Section 2.22
+ *
+ * GPIO32 is a standard optional extension to the Xtensa architecture core that
+ * provides preconfigured output and input ports for intra SoC signaling. The
+ * GPIO32 option is implemented as 32bit Tensilica Instruction Extension (TIE)
+ * output state called EXPSTATE, and 32bit input wire called IMPWIRE. This
+ * driver treats input and output states as two distinct devices.
+ *
+ * Access to GPIO32 specific instructions is controlled by the CPENABLE
+ * (Coprocessor Enable Bits) register. By default Xtensa Linux startup code
+ * disables access to all coprocessors. This driver sets the CPENABLE bit
+ * corresponding to GPIO32 before any GPIO32 specific instruction, and restores
+ * CPENABLE state after that.
+ *
+ * This driver is currently incompatible with SMP. The GPIO32 extension is not
+ * guaranteed to be available in all cores. Moreover, each core controls a
+ * different set of IO wires. A theoretical SMP aware version of this driver
+ * would need to have a per core workqueue to do the actual GPIO manipulation.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+
+#include <asm/coprocessor.h> /* CPENABLE read/write macros */
+
+#ifndef XCHAL_CP_ID_XTIOP
+#error GPIO32 option is not enabled for your xtensa core variant
+#endif
+
+#if XCHAL_HAVE_CP
+
+static inline unsigned long enable_cp(unsigned long *cpenable)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ RSR_CPENABLE(*cpenable);
+ WSR_CPENABLE(*cpenable | BIT(XCHAL_CP_ID_XTIOP));
+
+ return flags;
+}
+
+static inline void disable_cp(unsigned long flags, unsigned long cpenable)
+{
+ WSR_CPENABLE(cpenable);
+ local_irq_restore(flags);
+}
+
+#else
+
+static inline unsigned long enable_cp(unsigned long *cpenable)
+{
+ *cpenable = 0; /* avoid uninitialized value warning */
+ return 0;
+}
+
+static inline void disable_cp(unsigned long flags, unsigned long cpenable)
+{
+}
+
+#endif /* XCHAL_HAVE_CP */
+
+static int xtensa_impwire_get_direction(struct gpio_chip *gc, unsigned offset)
+{
+ return 1; /* input only */
+}
+
+static int xtensa_impwire_get_value(struct gpio_chip *gc, unsigned offset)
+{
+ unsigned long flags, saved_cpenable;
+ u32 impwire;
+
+ flags = enable_cp(&saved_cpenable);
+ __asm__ __volatile__("read_impwire %0" : "=a" (impwire));
+ disable_cp(flags, saved_cpenable);
+
+ return !!(impwire & BIT(offset));
+}
+
+static void xtensa_impwire_set_value(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ BUG(); /* output only; should never be called */
+}
+
+static int xtensa_expstate_get_direction(struct gpio_chip *gc, unsigned offset)
+{
+ return 0; /* output only */
+}
+
+static int xtensa_expstate_get_value(struct gpio_chip *gc, unsigned offset)
+{
+ unsigned long flags, saved_cpenable;
+ u32 expstate;
+
+ flags = enable_cp(&saved_cpenable);
+ __asm__ __volatile__("rur.expstate %0" : "=a" (expstate));
+ disable_cp(flags, saved_cpenable);
+
+ return !!(expstate & BIT(offset));
+}
+
+static void xtensa_expstate_set_value(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ unsigned long flags, saved_cpenable;
+ u32 mask = BIT(offset);
+ u32 val = value ? BIT(offset) : 0;
+
+ flags = enable_cp(&saved_cpenable);
+ __asm__ __volatile__("wrmsk_expstate %0, %1"
+ :: "a" (val), "a" (mask));
+ disable_cp(flags, saved_cpenable);
+}
+
+static struct gpio_chip impwire_chip = {
+ .label = "impwire",
+ .base = -1,
+ .ngpio = 32,
+ .get_direction = xtensa_impwire_get_direction,
+ .get = xtensa_impwire_get_value,
+ .set = xtensa_impwire_set_value,
+};
+
+static struct gpio_chip expstate_chip = {
+ .label = "expstate",
+ .base = -1,
+ .ngpio = 32,
+ .get_direction = xtensa_expstate_get_direction,
+ .get = xtensa_expstate_get_value,
+ .set = xtensa_expstate_set_value,
+};
+
+static int xtensa_gpio_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = gpiochip_add(&impwire_chip);
+ if (ret)
+ return ret;
+ return gpiochip_add(&expstate_chip);
+}
+
+static struct platform_driver xtensa_gpio_driver = {
+ .driver = {
+ .name = "xtensa-gpio",
+ .owner = THIS_MODULE,
+ },
+ .probe = xtensa_gpio_probe,
+};
+
+static int __init xtensa_gpio_init(void)
+{
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_simple("xtensa-gpio", 0, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ return platform_driver_register(&xtensa_gpio_driver);
+}
+device_initcall(xtensa_gpio_init);
+
+MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
+MODULE_DESCRIPTION("Xtensa LX4 GPIO32 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index ae0ffdce8bd5..716ee9843110 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -12,11 +12,13 @@
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
#include <linux/export.h>
-#include <linux/acpi_gpio.h>
#include <linux/acpi.h>
#include <linux/interrupt.h>
+#include "gpiolib.h"
+
struct acpi_gpio_evt_pin {
struct list_head node;
acpi_handle *evt_handle;
@@ -94,7 +96,7 @@ static void acpi_gpio_evt_dh(acpi_handle handle, void *data)
* gpio pins have acpi event methods and assigns interrupt handlers that calls
* the acpi event methods for those pins.
*/
-void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
+static void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
{
struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
struct acpi_resource *res;
@@ -192,7 +194,6 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
irq);
}
}
-EXPORT_SYMBOL(acpi_gpiochip_request_interrupts);
/**
* acpi_gpiochip_free_interrupts() - Free GPIO _EVT ACPI event interrupts.
@@ -203,7 +204,7 @@ EXPORT_SYMBOL(acpi_gpiochip_request_interrupts);
* The remaining ACPI event interrupts associated with the chip are freed
* automatically.
*/
-void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
+static void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
{
acpi_handle handle;
acpi_status status;
@@ -230,7 +231,6 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
acpi_detach_data(handle, acpi_gpio_evt_dh);
kfree(evt_pins);
}
-EXPORT_SYMBOL(acpi_gpiochip_free_interrupts);
struct acpi_gpio_lookup {
struct acpi_gpio_info info;
@@ -307,6 +307,15 @@ struct gpio_desc *acpi_get_gpiod_by_index(struct device *dev, int index,
if (lookup.desc && info)
*info = lookup.info;
- return lookup.desc ? lookup.desc : ERR_PTR(-ENODEV);
+ return lookup.desc ? lookup.desc : ERR_PTR(-ENOENT);
+}
+
+void acpi_gpiochip_add(struct gpio_chip *chip)
+{
+ acpi_gpiochip_request_interrupts(chip);
+}
+
+void acpi_gpiochip_remove(struct gpio_chip *chip)
+{
+ acpi_gpiochip_free_interrupts(chip);
}
-EXPORT_SYMBOL_GPL(acpi_get_gpiod_by_index);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 85f772c0b26a..50c4922fe53a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -10,12 +10,13 @@
#include <linux/seq_file.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
-#include <linux/acpi_gpio.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/gpio/driver.h>
+#include "gpiolib.h"
+
#define CREATE_TRACE_POINTS
#include <trace/events/gpio.h>
@@ -84,40 +85,57 @@ static DEFINE_IDR(dirent_idr);
static int gpiod_request(struct gpio_desc *desc, const char *label);
static void gpiod_free(struct gpio_desc *desc);
+/* With descriptor prefix */
+
#ifdef CONFIG_DEBUG_FS
-#define gpiod_emerg(desc, fmt, ...) \
- pr_emerg("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label, \
+#define gpiod_emerg(desc, fmt, ...) \
+ pr_emerg("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?",\
##__VA_ARGS__)
-#define gpiod_crit(desc, fmt, ...) \
- pr_crit("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label, \
+#define gpiod_crit(desc, fmt, ...) \
+ pr_crit("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
##__VA_ARGS__)
-#define gpiod_err(desc, fmt, ...) \
- pr_err("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label, \
+#define gpiod_err(desc, fmt, ...) \
+ pr_err("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
##__VA_ARGS__)
-#define gpiod_warn(desc, fmt, ...) \
- pr_warn("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label, \
+#define gpiod_warn(desc, fmt, ...) \
+ pr_warn("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
##__VA_ARGS__)
-#define gpiod_info(desc, fmt, ...) \
- pr_info("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label, \
+#define gpiod_info(desc, fmt, ...) \
+ pr_info("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
##__VA_ARGS__)
-#define gpiod_dbg(desc, fmt, ...) \
- pr_debug("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label, \
+#define gpiod_dbg(desc, fmt, ...) \
+ pr_debug("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?",\
##__VA_ARGS__)
#else
-#define gpiod_emerg(desc, fmt, ...) \
+#define gpiod_emerg(desc, fmt, ...) \
pr_emerg("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__)
-#define gpiod_crit(desc, fmt, ...) \
+#define gpiod_crit(desc, fmt, ...) \
pr_crit("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__)
-#define gpiod_err(desc, fmt, ...) \
+#define gpiod_err(desc, fmt, ...) \
pr_err("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__)
-#define gpiod_warn(desc, fmt, ...) \
+#define gpiod_warn(desc, fmt, ...) \
pr_warn("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__)
-#define gpiod_info(desc, fmt, ...) \
+#define gpiod_info(desc, fmt, ...) \
pr_info("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__)
-#define gpiod_dbg(desc, fmt, ...) \
+#define gpiod_dbg(desc, fmt, ...) \
pr_debug("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__)
#endif
+/* With chip prefix */
+
+#define chip_emerg(chip, fmt, ...) \
+ pr_emerg("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+#define chip_crit(chip, fmt, ...) \
+ pr_crit("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+#define chip_err(chip, fmt, ...) \
+ pr_err("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+#define chip_warn(chip, fmt, ...) \
+ pr_warn("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+#define chip_info(chip, fmt, ...) \
+ pr_info("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+#define chip_dbg(chip, fmt, ...) \
+ pr_debug("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+
static inline void desc_set_label(struct gpio_desc *d, const char *label)
{
#ifdef CONFIG_DEBUG_FS
@@ -151,9 +169,10 @@ EXPORT_SYMBOL_GPL(gpio_to_desc);
static struct gpio_desc *gpiochip_offset_to_desc(struct gpio_chip *chip,
unsigned int offset)
{
- unsigned int gpio = chip->base + offset;
+ if (offset >= chip->ngpio)
+ return ERR_PTR(-EINVAL);
- return gpio_to_desc(gpio);
+ return &chip->desc[offset];
}
/**
@@ -187,7 +206,8 @@ static int gpio_ensure_requested(struct gpio_desc *desc)
if (WARN(test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0,
"autorequest GPIO-%d\n", gpio)) {
if (!try_module_get(chip->owner)) {
- pr_err("GPIO-%d: module can't be gotten \n", gpio);
+ gpiod_err(desc, "%s: module can't be gotten\n",
+ __func__);
clear_bit(FLAG_REQUESTED, &desc->flags);
/* lose */
return -EIO;
@@ -393,7 +413,7 @@ static const DEVICE_ATTR(value, 0644,
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
{
- struct sysfs_dirent *value_sd = priv;
+ struct kernfs_node *value_sd = priv;
sysfs_notify_dirent(value_sd);
return IRQ_HANDLED;
@@ -402,7 +422,7 @@ static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
unsigned long gpio_flags)
{
- struct sysfs_dirent *value_sd;
+ struct kernfs_node *value_sd;
unsigned long irq_flags;
int ret, irq, id;
@@ -808,8 +828,8 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
test_bit(FLAG_EXPORT, &desc->flags)) {
spin_unlock_irqrestore(&gpio_lock, flags);
- pr_debug("%s: gpio %d unavailable (requested=%d, exported=%d)\n",
- __func__, desc_to_gpio(desc),
+ gpiod_dbg(desc, "%s: unavailable (requested=%d, exported=%d)\n",
+ __func__,
test_bit(FLAG_REQUESTED, &desc->flags),
test_bit(FLAG_EXPORT, &desc->flags));
status = -EPERM;
@@ -857,8 +877,7 @@ fail_unregister_device:
device_unregister(dev);
fail_unlock:
mutex_unlock(&sysfs_lock);
- pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
- status);
+ gpiod_dbg(desc, "%s: status %d\n", __func__, status);
return status;
}
EXPORT_SYMBOL_GPL(gpiod_export);
@@ -906,8 +925,7 @@ int gpiod_export_link(struct device *dev, const char *name,
mutex_unlock(&sysfs_lock);
if (status)
- pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
- status);
+ gpiod_dbg(desc, "%s: status %d\n", __func__, status);
return status;
}
@@ -951,8 +969,7 @@ unlock:
mutex_unlock(&sysfs_lock);
if (status)
- pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
- status);
+ gpiod_dbg(desc, "%s: status %d\n", __func__, status);
return status;
}
@@ -994,8 +1011,7 @@ void gpiod_unexport(struct gpio_desc *desc)
}
if (status)
- pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
- status);
+ gpiod_dbg(desc, "%s: status %d\n", __func__, status);
}
EXPORT_SYMBOL_GPL(gpiod_unexport);
@@ -1034,8 +1050,7 @@ static int gpiochip_export(struct gpio_chip *chip)
chip->desc[gpio++].chip = NULL;
spin_unlock_irqrestore(&gpio_lock, flags);
- pr_debug("%s: chip %s status %d\n", __func__,
- chip->label, status);
+ chip_dbg(chip, "%s: status %d\n", __func__, status);
}
return status;
@@ -1051,15 +1066,14 @@ static void gpiochip_unexport(struct gpio_chip *chip)
if (dev) {
put_device(dev);
device_unregister(dev);
- chip->exported = 0;
+ chip->exported = false;
status = 0;
} else
status = -ENODEV;
mutex_unlock(&sysfs_lock);
if (status)
- pr_debug("%s: chip %s status %d\n", __func__,
- chip->label, status);
+ chip_dbg(chip, "%s: status %d\n", __func__, status);
}
static int __init gpiolib_sysfs_init(void)
@@ -1213,6 +1227,7 @@ int gpiochip_add(struct gpio_chip *chip)
#endif
of_gpiochip_add(chip);
+ acpi_gpiochip_add(chip);
if (status)
goto fail;
@@ -1221,7 +1236,7 @@ int gpiochip_add(struct gpio_chip *chip)
if (status)
goto fail;
- pr_debug("gpiochip_add: registered GPIOs %d to %d on device: %s\n",
+ pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__,
chip->base, chip->base + chip->ngpio - 1,
chip->label ? : "generic");
@@ -1231,7 +1246,7 @@ unlock:
spin_unlock_irqrestore(&gpio_lock, flags);
fail:
/* failures here can mean systems won't boot... */
- pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n",
+ pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__,
chip->base, chip->base + chip->ngpio - 1,
chip->label ? : "generic");
return status;
@@ -1254,6 +1269,7 @@ int gpiochip_remove(struct gpio_chip *chip)
gpiochip_remove_pin_ranges(chip);
of_gpiochip_remove(chip);
+ acpi_gpiochip_remove(chip);
for (id = 0; id < chip->ngpio; id++) {
if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags)) {
@@ -1339,8 +1355,7 @@ int gpiochip_add_pingroup_range(struct gpio_chip *chip,
pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL);
if (!pin_range) {
- pr_err("%s: GPIO chip: failed to allocate pin ranges\n",
- chip->label);
+ chip_err(chip, "failed to allocate pin ranges\n");
return -ENOMEM;
}
@@ -1361,9 +1376,8 @@ int gpiochip_add_pingroup_range(struct gpio_chip *chip,
pinctrl_add_gpio_range(pctldev, &pin_range->range);
- pr_debug("GPIO chip %s: created GPIO range %d->%d ==> %s PINGRP %s\n",
- chip->label, gpio_offset,
- gpio_offset + pin_range->range.npins - 1,
+ chip_dbg(chip, "created GPIO range %d->%d ==> %s PINGRP %s\n",
+ gpio_offset, gpio_offset + pin_range->range.npins - 1,
pinctrl_dev_get_devname(pctldev), pin_group);
list_add_tail(&pin_range->node, &chip->pin_ranges);
@@ -1390,8 +1404,7 @@ int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL);
if (!pin_range) {
- pr_err("%s: GPIO chip: failed to allocate pin ranges\n",
- chip->label);
+ chip_err(chip, "failed to allocate pin ranges\n");
return -ENOMEM;
}
@@ -1406,13 +1419,12 @@ int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
&pin_range->range);
if (IS_ERR(pin_range->pctldev)) {
ret = PTR_ERR(pin_range->pctldev);
- pr_err("%s: GPIO chip: could not create pin range\n",
- chip->label);
+ chip_err(chip, "could not create pin range\n");
kfree(pin_range);
return ret;
}
- pr_debug("GPIO chip %s: created GPIO range %d->%d ==> %s PIN %d->%d\n",
- chip->label, gpio_offset, gpio_offset + npins - 1,
+ chip_dbg(chip, "created GPIO range %d->%d ==> %s PIN %d->%d\n",
+ gpio_offset, gpio_offset + npins - 1,
pinctl_name,
pin_offset, pin_offset + npins - 1);
@@ -1499,8 +1511,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
}
done:
if (status)
- pr_debug("_gpio_request: gpio-%d (%s) status %d\n",
- desc_to_gpio(desc), label ? : "?", status);
+ gpiod_dbg(desc, "%s: status %d\n", __func__, status);
spin_unlock_irqrestore(&gpio_lock, flags);
return status;
}
@@ -1701,7 +1712,7 @@ int gpiod_direction_input(struct gpio_desc *desc)
if (!chip->get || !chip->direction_input) {
gpiod_warn(desc,
"%s: missing get() or direction_input() operations\n",
- __func__);
+ __func__);
return -EIO;
}
@@ -1721,7 +1732,8 @@ int gpiod_direction_input(struct gpio_desc *desc)
if (status) {
status = chip->request(chip, offset);
if (status < 0) {
- gpiod_dbg(desc, "chip request fail, %d\n", status);
+ gpiod_dbg(desc, "%s: chip request fail, %d\n",
+ __func__, status);
/* and it's not available to anyone else ...
* gpio_request() is the fully clean solution.
*/
@@ -1739,7 +1751,7 @@ lose:
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
if (status)
- gpiod_dbg(desc, "%s status %d\n", __func__, status);
+ gpiod_dbg(desc, "%s: status %d\n", __func__, status);
return status;
}
EXPORT_SYMBOL_GPL(gpiod_direction_input);
@@ -1806,7 +1818,8 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
if (status) {
status = chip->request(chip, offset);
if (status < 0) {
- gpiod_dbg(desc, "chip request fail, %d\n", status);
+ gpiod_dbg(desc, "%s: chip request fail, %d\n",
+ __func__, status);
/* and it's not available to anyone else ...
* gpio_request() is the fully clean solution.
*/
@@ -2259,18 +2272,14 @@ void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
/**
- * gpiod_add_table() - register GPIO device consumers
- * @table: array of consumers to register
- * @num: number of consumers in table
+ * gpiod_add_lookup_table() - register GPIO device consumers
+ * @table: table of consumers to register
*/
-void gpiod_add_table(struct gpiod_lookup *table, size_t size)
+void gpiod_add_lookup_table(struct gpiod_lookup_table *table)
{
mutex_lock(&gpio_lookup_lock);
- while (size--) {
- list_add_tail(&table->list, &gpio_lookup_list);
- table++;
- }
+ list_add_tail(&table->list, &gpio_lookup_list);
mutex_unlock(&gpio_lookup_lock);
}
@@ -2326,76 +2335,92 @@ static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
return desc;
}
-static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
- unsigned int idx,
- enum gpio_lookup_flags *flags)
+static struct gpiod_lookup_table *gpiod_find_lookup_table(struct device *dev)
{
const char *dev_id = dev ? dev_name(dev) : NULL;
- struct gpio_desc *desc = ERR_PTR(-ENODEV);
- unsigned int match, best = 0;
- struct gpiod_lookup *p;
+ struct gpiod_lookup_table *table;
mutex_lock(&gpio_lookup_lock);
- list_for_each_entry(p, &gpio_lookup_list, list) {
- match = 0;
+ list_for_each_entry(table, &gpio_lookup_list, list) {
+ if (table->dev_id && dev_id) {
+ /*
+ * Valid strings on both ends, must be identical to have
+ * a match
+ */
+ if (!strcmp(table->dev_id, dev_id))
+ goto found;
+ } else {
+ /*
+ * One of the pointers is NULL, so both must be to have
+ * a match
+ */
+ if (dev_id == table->dev_id)
+ goto found;
+ }
+ }
+ table = NULL;
- if (p->dev_id) {
- if (!dev_id || strcmp(p->dev_id, dev_id))
- continue;
+found:
+ mutex_unlock(&gpio_lookup_lock);
+ return table;
+}
- match += 2;
- }
+static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
+ unsigned int idx,
+ enum gpio_lookup_flags *flags)
+{
+ struct gpio_desc *desc = ERR_PTR(-ENOENT);
+ struct gpiod_lookup_table *table;
+ struct gpiod_lookup *p;
- if (p->con_id) {
- if (!con_id || strcmp(p->con_id, con_id))
- continue;
+ table = gpiod_find_lookup_table(dev);
+ if (!table)
+ return desc;
- match += 1;
- }
+ for (p = &table->table[0]; p->chip_label; p++) {
+ struct gpio_chip *chip;
+ /* idx must always match exactly */
if (p->idx != idx)
continue;
- if (match > best) {
- struct gpio_chip *chip;
+ /* If the lookup entry has a con_id, require exact match */
+ if (p->con_id && (!con_id || strcmp(p->con_id, con_id)))
+ continue;
- chip = find_chip_by_name(p->chip_label);
+ chip = find_chip_by_name(p->chip_label);
- if (!chip) {
- dev_warn(dev, "cannot find GPIO chip %s\n",
- p->chip_label);
- continue;
- }
+ if (!chip) {
+ dev_err(dev, "cannot find GPIO chip %s\n",
+ p->chip_label);
+ return ERR_PTR(-ENODEV);
+ }
- if (chip->ngpio <= p->chip_hwnum) {
- dev_warn(dev, "GPIO chip %s has %d GPIOs\n",
- chip->label, chip->ngpio);
- continue;
- }
+ if (chip->ngpio <= p->chip_hwnum) {
+ dev_err(dev,
+ "requested GPIO %d is out of range [0..%d] for chip %s\n",
+ idx, chip->ngpio, chip->label);
+ return ERR_PTR(-EINVAL);
+ }
- desc = gpio_to_desc(chip->base + p->chip_hwnum);
- *flags = p->flags;
+ desc = gpiochip_offset_to_desc(chip, p->chip_hwnum);
+ *flags = p->flags;
- if (match != 3)
- best = match;
- else
- break;
- }
+ return desc;
}
- mutex_unlock(&gpio_lookup_lock);
-
return desc;
}
/**
* gpio_get - obtain a GPIO for a given GPIO function
- * @dev: GPIO consumer
+ * @dev: GPIO consumer, can be NULL for system-global GPIOs
* @con_id: function within the GPIO consumer
*
* Return the GPIO descriptor corresponding to the function con_id of device
- * dev, or an IS_ERR() condition if an error occured.
+ * dev, -ENOENT if no GPIO has been assigned to the requested function, or
+ * another IS_ERR() code if an error occured while trying to acquire the GPIO.
*/
struct gpio_desc *__must_check gpiod_get(struct device *dev, const char *con_id)
{
@@ -2405,14 +2430,16 @@ EXPORT_SYMBOL_GPL(gpiod_get);
/**
* gpiod_get_index - obtain a GPIO from a multi-index GPIO function
- * @dev: GPIO consumer
+ * @dev: GPIO consumer, can be NULL for system-global GPIOs
* @con_id: function within the GPIO consumer
* @idx: index of the GPIO to obtain in the consumer
*
* This variant of gpiod_get() allows to access GPIOs other than the first
* defined one for functions that define several GPIOs.
*
- * Return a valid GPIO descriptor, or an IS_ERR() condition in case of error.
+ * Return a valid GPIO descriptor, -ENOENT if no GPIO has been assigned to the
+ * requested function and/or index, or another IS_ERR() code if an error
+ * occured while trying to acquire the GPIO.
*/
struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
const char *con_id,
@@ -2437,13 +2464,9 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
* Either we are not using DT or ACPI, or their lookup did not return
* a result. In that case, use platform lookup as a fallback.
*/
- if (!desc || IS_ERR(desc)) {
- struct gpio_desc *pdesc;
+ if (!desc || desc == ERR_PTR(-ENOENT)) {
dev_dbg(dev, "using lookup tables for GPIO lookup");
- pdesc = gpiod_find(dev, con_id, idx, &flags);
- /* If used as fallback, do not replace the previous error */
- if (!IS_ERR(pdesc) || !desc)
- desc = pdesc;
+ desc = gpiod_find(dev, con_id, idx, &flags);
}
if (IS_ERR(desc)) {
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
new file mode 100644
index 000000000000..82be586c1f90
--- /dev/null
+++ b/drivers/gpio/gpiolib.h
@@ -0,0 +1,46 @@
+/*
+ * Internal GPIO functions.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef GPIOLIB_H
+#define GPIOLIB_H
+
+#include <linux/err.h>
+#include <linux/device.h>
+
+/**
+ * struct acpi_gpio_info - ACPI GPIO specific information
+ * @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
+ * @active_low: in case of @gpioint, the pin is active low
+ */
+struct acpi_gpio_info {
+ bool gpioint;
+ bool active_low;
+};
+
+#ifdef CONFIG_ACPI
+void acpi_gpiochip_add(struct gpio_chip *chip);
+void acpi_gpiochip_remove(struct gpio_chip *chip);
+
+struct gpio_desc *acpi_get_gpiod_by_index(struct device *dev, int index,
+ struct acpi_gpio_info *info);
+#else
+static inline void acpi_gpiochip_add(struct gpio_chip *chip) { }
+static inline void acpi_gpiochip_remove(struct gpio_chip *chip) { }
+
+static inline struct gpio_desc *
+acpi_get_gpiod_by_index(struct device *dev, int index,
+ struct acpi_gpio_info *info)
+{
+ return ERR_PTR(-ENOSYS);
+}
+#endif
+
+#endif /* GPIOLIB_H */
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f86427591167..8e7fa4dbaed8 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -20,6 +20,10 @@ menuconfig DRM
details. You should also select and configure AGP
(/dev/agpgart) support if it is available for your platform.
+config DRM_MIPI_DSI
+ bool
+ depends on DRM
+
config DRM_USB
tristate
depends on DRM
@@ -188,6 +192,10 @@ source "drivers/gpu/drm/tilcdc/Kconfig"
source "drivers/gpu/drm/qxl/Kconfig"
+source "drivers/gpu/drm/bochs/Kconfig"
+
source "drivers/gpu/drm/msm/Kconfig"
source "drivers/gpu/drm/tegra/Kconfig"
+
+source "drivers/gpu/drm/panel/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index cc08b845f965..292a79d64146 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -18,6 +18,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
drm-$(CONFIG_PCI) += ati_pcigart.o
+drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-usb-y := drm_usb.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
CFLAGS_drm_trace_points.o := -I$(src)
obj-$(CONFIG_DRM) += drm.o
+obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-$(CONFIG_DRM_USB) += drm_usb.o
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
@@ -56,6 +58,8 @@ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-$(CONFIG_DRM_OMAP) += omapdrm/
obj-$(CONFIG_DRM_TILCDC) += tilcdc/
obj-$(CONFIG_DRM_QXL) += qxl/
+obj-$(CONFIG_DRM_BOCHS) += bochs/
obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-y += i2c/
+obj-y += panel/
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig
index 40d371521fe1..50ae88ad4d76 100644
--- a/drivers/gpu/drm/armada/Kconfig
+++ b/drivers/gpu/drm/armada/Kconfig
@@ -5,6 +5,7 @@ config DRM_ARMADA
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
help
Support the "LCD" controllers found on the Marvell Armada 510
devices. There are two controllers on the device, each controller
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index eef09ec9a5ff..a72cae03b99b 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -103,6 +103,7 @@ void armada_drm_queue_unref_work(struct drm_device *,
extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
int armada_fbdev_init(struct drm_device *);
+void armada_fbdev_lastclose(struct drm_device *);
void armada_fbdev_fini(struct drm_device *);
int armada_overlay_plane_create(struct drm_device *, unsigned long);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 4f2b28354915..acf3a36c9ebc 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -128,6 +128,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
return -ENOMEM;
}
+ platform_set_drvdata(dev->platformdev, dev);
dev->dev_private = priv;
/* Get the implementation specific driver data. */
@@ -321,6 +322,11 @@ static struct drm_ioctl_desc armada_ioctls[] = {
DRM_UNLOCKED),
};
+static void armada_drm_lastclose(struct drm_device *dev)
+{
+ armada_fbdev_lastclose(dev);
+}
+
static const struct file_operations armada_drm_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -337,7 +343,7 @@ static struct drm_driver armada_drm_driver = {
.open = NULL,
.preclose = NULL,
.postclose = NULL,
- .lastclose = NULL,
+ .lastclose = armada_drm_lastclose,
.unload = armada_drm_unload,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = armada_drm_enable_vblank,
@@ -376,7 +382,7 @@ static int armada_drm_probe(struct platform_device *pdev)
static int armada_drm_remove(struct platform_device *pdev)
{
- drm_platform_exit(&armada_drm_driver, pdev);
+ drm_put_dev(platform_get_drvdata(pdev));
return 0;
}
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index dd5ea77dac96..948cb14c561e 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -105,9 +105,9 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
- DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n",
- dfb->fb.width, dfb->fb.height,
- dfb->fb.bits_per_pixel, obj->phys_addr);
+ DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
+ dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel,
+ (unsigned long long)obj->phys_addr);
return 0;
@@ -177,6 +177,16 @@ int armada_fbdev_init(struct drm_device *dev)
return ret;
}
+void armada_fbdev_lastclose(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+
+ drm_modeset_lock_all(dev);
+ if (priv->fbdev)
+ drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+ drm_modeset_unlock_all(dev);
+}
+
void armada_fbdev_fini(struct drm_device *dev)
{
struct armada_private *priv = dev->dev_private;
@@ -192,11 +202,11 @@ void armada_fbdev_fini(struct drm_device *dev)
framebuffer_release(info);
}
+ drm_fb_helper_fini(fbh);
+
if (fbh->fb)
fbh->fb->funcs->destroy(fbh->fb);
- drm_fb_helper_fini(fbh);
-
priv->fbdev = NULL;
}
}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 9f2356bae7fd..887816f43476 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -172,8 +172,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
obj->dev_addr = obj->linear->start;
}
- DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n",
- obj, obj->phys_addr, obj->dev_addr);
+ DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
+ (unsigned long long)obj->phys_addr,
+ (unsigned long long)obj->dev_addr);
return 0;
}
@@ -557,7 +558,6 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
* refcount on the gem object itself.
*/
drm_gem_object_reference(obj);
- dma_buf_put(buf);
return obj;
}
}
@@ -573,6 +573,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
}
dobj->obj.import_attach = attach;
+ get_dma_buf(buf);
/*
* Don't call dma_buf_map_attachment() here - it maps the
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 7b33e14e44aa..a28640f47c27 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -65,7 +65,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
- if (!in_interrupt())
+ if (drm_can_sleep())
ret = ast_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index af0b868a9dfd..50535fd5a88d 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -189,53 +189,6 @@ static int ast_get_dram_info(struct drm_device *dev)
return 0;
}
-uint32_t ast_get_max_dclk(struct drm_device *dev, int bpp)
-{
- struct ast_private *ast = dev->dev_private;
- uint32_t dclk, jreg;
- uint32_t dram_bus_width, mclk, dram_bandwidth, actual_dram_bandwidth, dram_efficency = 500;
-
- dram_bus_width = ast->dram_bus_width;
- mclk = ast->mclk;
-
- if (ast->chip == AST2100 ||
- ast->chip == AST1100 ||
- ast->chip == AST2200 ||
- ast->chip == AST2150 ||
- ast->dram_bus_width == 16)
- dram_efficency = 600;
- else if (ast->chip == AST2300)
- dram_efficency = 400;
-
- dram_bandwidth = mclk * dram_bus_width * 2 / 8;
- actual_dram_bandwidth = dram_bandwidth * dram_efficency / 1000;
-
- if (ast->chip == AST1180)
- dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
- else {
- jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
- if ((jreg & 0x08) && (ast->chip == AST2000))
- dclk = actual_dram_bandwidth / ((bpp + 1 + 16) / 8);
- else if ((jreg & 0x08) && (bpp == 8))
- dclk = actual_dram_bandwidth / ((bpp + 1 + 24) / 8);
- else
- dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
- }
-
- if (ast->chip == AST2100 ||
- ast->chip == AST2200 ||
- ast->chip == AST2300 ||
- ast->chip == AST1180) {
- if (dclk > 200)
- dclk = 200;
- } else {
- if (dclk > 165)
- dclk = 165;
- }
-
- return dclk;
-}
-
static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
@@ -449,7 +402,7 @@ int ast_dumb_create(struct drm_file *file,
return 0;
}
-void ast_bo_unref(struct ast_bo **bo)
+static void ast_bo_unref(struct ast_bo **bo)
{
struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 7fc9f7272b56..cca063b11083 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -404,7 +404,7 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
}
}
-void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
+static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
struct ast_private *ast = dev->dev_private;
@@ -415,7 +415,7 @@ void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
}
-bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+static bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
switch (crtc->fb->bits_per_pixel) {
@@ -427,7 +427,7 @@ bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
return true;
}
-void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
+static void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
{
struct ast_private *ast = crtc->dev->dev_private;
u32 addr;
@@ -623,7 +623,7 @@ static const struct drm_crtc_funcs ast_crtc_funcs = {
.destroy = ast_crtc_destroy,
};
-int ast_crtc_init(struct drm_device *dev)
+static int ast_crtc_init(struct drm_device *dev)
{
struct ast_crtc *crtc;
int i;
@@ -710,7 +710,7 @@ static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = {
.mode_set = ast_encoder_mode_set,
};
-int ast_encoder_init(struct drm_device *dev)
+static int ast_encoder_init(struct drm_device *dev)
{
struct ast_encoder *ast_encoder;
@@ -777,7 +777,7 @@ static const struct drm_connector_funcs ast_connector_funcs = {
.destroy = ast_connector_destroy,
};
-int ast_connector_init(struct drm_device *dev)
+static int ast_connector_init(struct drm_device *dev)
{
struct ast_connector *ast_connector;
struct drm_connector *connector;
@@ -810,7 +810,7 @@ int ast_connector_init(struct drm_device *dev)
}
/* allocate cursor cache and pin at start of VRAM */
-int ast_cursor_init(struct drm_device *dev)
+static int ast_cursor_init(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
int size;
@@ -847,7 +847,7 @@ fail:
return ret;
}
-void ast_cursor_fini(struct drm_device *dev)
+static void ast_cursor_fini(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
ttm_bo_kunmap(&ast->cache_kmap);
@@ -965,7 +965,7 @@ static void ast_i2c_destroy(struct ast_i2c_chan *i2c)
kfree(i2c);
}
-void ast_show_cursor(struct drm_crtc *crtc)
+static void ast_show_cursor(struct drm_crtc *crtc)
{
struct ast_private *ast = crtc->dev->dev_private;
u8 jreg;
@@ -976,7 +976,7 @@ void ast_show_cursor(struct drm_crtc *crtc)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
}
-void ast_hide_cursor(struct drm_crtc *crtc)
+static void ast_hide_cursor(struct drm_crtc *crtc)
{
struct ast_private *ast = crtc->dev->dev_private;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 32aecb34dbce..4ea9b17ac17a 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -80,7 +80,7 @@ static int ast_ttm_global_init(struct ast_private *ast)
return 0;
}
-void
+static void
ast_ttm_global_release(struct ast_private *ast)
{
if (ast->ttm.mem_global_ref.release == NULL)
@@ -102,7 +102,7 @@ static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
kfree(bo);
}
-bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
+static bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
{
if (bo->destroy == &ast_bo_ttm_destroy)
return true;
@@ -208,7 +208,7 @@ static struct ttm_backend_func ast_tt_backend_func = {
};
-struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
new file mode 100644
index 000000000000..c8fcf12019f0
--- /dev/null
+++ b/drivers/gpu/drm/bochs/Kconfig
@@ -0,0 +1,11 @@
+config DRM_BOCHS
+ tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
+ depends on DRM && PCI
+ select DRM_KMS_HELPER
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select DRM_TTM
+ help
+ Choose this option for qemu.
+ If M is selected the module will be called bochs-drm.
diff --git a/drivers/gpu/drm/bochs/Makefile b/drivers/gpu/drm/bochs/Makefile
new file mode 100644
index 000000000000..844a55614920
--- /dev/null
+++ b/drivers/gpu/drm/bochs/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+bochs-drm-y := bochs_drv.o bochs_mm.o bochs_kms.o bochs_fbdev.o bochs_hw.o
+
+obj-$(CONFIG_DRM_BOCHS) += bochs-drm.o
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
new file mode 100644
index 000000000000..741965c001a6
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -0,0 +1,164 @@
+#include <linux/io.h>
+#include <linux/fb.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_page_alloc.h>
+
+/* ---------------------------------------------------------------------- */
+
+#define VBE_DISPI_IOPORT_INDEX 0x01CE
+#define VBE_DISPI_IOPORT_DATA 0x01CF
+
+#define VBE_DISPI_INDEX_ID 0x0
+#define VBE_DISPI_INDEX_XRES 0x1
+#define VBE_DISPI_INDEX_YRES 0x2
+#define VBE_DISPI_INDEX_BPP 0x3
+#define VBE_DISPI_INDEX_ENABLE 0x4
+#define VBE_DISPI_INDEX_BANK 0x5
+#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6
+#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7
+#define VBE_DISPI_INDEX_X_OFFSET 0x8
+#define VBE_DISPI_INDEX_Y_OFFSET 0x9
+#define VBE_DISPI_INDEX_VIDEO_MEMORY_64K 0xa
+
+#define VBE_DISPI_ID0 0xB0C0
+#define VBE_DISPI_ID1 0xB0C1
+#define VBE_DISPI_ID2 0xB0C2
+#define VBE_DISPI_ID3 0xB0C3
+#define VBE_DISPI_ID4 0xB0C4
+#define VBE_DISPI_ID5 0xB0C5
+
+#define VBE_DISPI_DISABLED 0x00
+#define VBE_DISPI_ENABLED 0x01
+#define VBE_DISPI_GETCAPS 0x02
+#define VBE_DISPI_8BIT_DAC 0x20
+#define VBE_DISPI_LFB_ENABLED 0x40
+#define VBE_DISPI_NOCLEARMEM 0x80
+
+/* ---------------------------------------------------------------------- */
+
+enum bochs_types {
+ BOCHS_QEMU_STDVGA,
+ BOCHS_UNKNOWN,
+};
+
+struct bochs_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct bochs_device {
+ /* hw */
+ void __iomem *mmio;
+ int ioports;
+ void __iomem *fb_map;
+ unsigned long fb_base;
+ unsigned long fb_size;
+
+ /* mode */
+ u16 xres;
+ u16 yres;
+ u16 yres_virtual;
+ u32 stride;
+ u32 bpp;
+
+ /* drm */
+ struct drm_device *dev;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ bool mode_config_initialized;
+
+ /* ttm */
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ bool initialized;
+ } ttm;
+
+ /* fbdev */
+ struct {
+ struct bochs_framebuffer gfb;
+ struct drm_fb_helper helper;
+ int size;
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
+ bool initialized;
+ } fb;
+};
+
+#define to_bochs_framebuffer(x) container_of(x, struct bochs_framebuffer, base)
+
+struct bochs_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ u32 placements[3];
+ int pin_count;
+};
+
+static inline struct bochs_bo *bochs_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct bochs_bo, bo);
+}
+
+static inline struct bochs_bo *gem_to_bochs_bo(struct drm_gem_object *gem)
+{
+ return container_of(gem, struct bochs_bo, gem);
+}
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+static inline u64 bochs_bo_mmap_offset(struct bochs_bo *bo)
+{
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* bochs_hw.c */
+int bochs_hw_init(struct drm_device *dev, uint32_t flags);
+void bochs_hw_fini(struct drm_device *dev);
+
+void bochs_hw_setmode(struct bochs_device *bochs,
+ struct drm_display_mode *mode);
+void bochs_hw_setbase(struct bochs_device *bochs,
+ int x, int y, u64 addr);
+
+/* bochs_mm.c */
+int bochs_mm_init(struct bochs_device *bochs);
+void bochs_mm_fini(struct bochs_device *bochs);
+int bochs_mmap(struct file *filp, struct vm_area_struct *vma);
+
+int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+int bochs_gem_init_object(struct drm_gem_object *obj);
+void bochs_gem_free_object(struct drm_gem_object *obj);
+int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+
+int bochs_framebuffer_init(struct drm_device *dev,
+ struct bochs_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int bochs_bo_unpin(struct bochs_bo *bo);
+
+extern const struct drm_mode_config_funcs bochs_mode_funcs;
+
+/* bochs_kms.c */
+int bochs_kms_init(struct bochs_device *bochs);
+void bochs_kms_fini(struct bochs_device *bochs);
+
+/* bochs_fbdev.c */
+int bochs_fbdev_init(struct bochs_device *bochs);
+void bochs_fbdev_fini(struct bochs_device *bochs);
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
new file mode 100644
index 000000000000..395bba261c9a
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -0,0 +1,178 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "bochs.h"
+
+static bool enable_fbdev = true;
+module_param_named(fbdev, enable_fbdev, bool, 0444);
+MODULE_PARM_DESC(fbdev, "register fbdev device");
+
+/* ---------------------------------------------------------------------- */
+/* drm interface */
+
+static int bochs_unload(struct drm_device *dev)
+{
+ struct bochs_device *bochs = dev->dev_private;
+
+ bochs_fbdev_fini(bochs);
+ bochs_kms_fini(bochs);
+ bochs_mm_fini(bochs);
+ bochs_hw_fini(dev);
+ kfree(bochs);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+static int bochs_load(struct drm_device *dev, unsigned long flags)
+{
+ struct bochs_device *bochs;
+ int ret;
+
+ bochs = kzalloc(sizeof(*bochs), GFP_KERNEL);
+ if (bochs == NULL)
+ return -ENOMEM;
+ dev->dev_private = bochs;
+ bochs->dev = dev;
+
+ ret = bochs_hw_init(dev, flags);
+ if (ret)
+ goto err;
+
+ ret = bochs_mm_init(bochs);
+ if (ret)
+ goto err;
+
+ ret = bochs_kms_init(bochs);
+ if (ret)
+ goto err;
+
+ if (enable_fbdev)
+ bochs_fbdev_init(bochs);
+
+ return 0;
+
+err:
+ bochs_unload(dev);
+ return ret;
+}
+
+static const struct file_operations bochs_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = bochs_mmap,
+};
+
+static struct drm_driver bochs_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET,
+ .load = bochs_load,
+ .unload = bochs_unload,
+ .fops = &bochs_fops,
+ .name = "bochs-drm",
+ .desc = "bochs dispi vga interface (qemu stdvga)",
+ .date = "20130925",
+ .major = 1,
+ .minor = 0,
+ .gem_free_object = bochs_gem_free_object,
+ .dumb_create = bochs_dumb_create,
+ .dumb_map_offset = bochs_dumb_mmap_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+};
+
+/* ---------------------------------------------------------------------- */
+/* pci interface */
+
+static int bochs_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+ remove_conflicting_framebuffers(ap, "bochsdrmfb", false);
+ kfree(ap);
+
+ return 0;
+}
+
+static int bochs_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int ret;
+
+ ret = bochs_kick_out_firmware_fb(pdev);
+ if (ret)
+ return ret;
+
+ return drm_get_pci_dev(pdev, ent, &bochs_driver);
+}
+
+static void bochs_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(bochs_pci_tbl) = {
+ {
+ .vendor = 0x1234,
+ .device = 0x1111,
+ .subvendor = 0x1af4,
+ .subdevice = 0x1100,
+ .driver_data = BOCHS_QEMU_STDVGA,
+ },
+ {
+ .vendor = 0x1234,
+ .device = 0x1111,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = BOCHS_UNKNOWN,
+ },
+ { /* end of list */ }
+};
+
+static struct pci_driver bochs_pci_driver = {
+ .name = "bochs-drm",
+ .id_table = bochs_pci_tbl,
+ .probe = bochs_pci_probe,
+ .remove = bochs_pci_remove,
+};
+
+/* ---------------------------------------------------------------------- */
+/* module init/exit */
+
+static int __init bochs_init(void)
+{
+ return drm_pci_init(&bochs_driver, &bochs_pci_driver);
+}
+
+static void __exit bochs_exit(void)
+{
+ drm_pci_exit(&bochs_driver, &bochs_pci_driver);
+}
+
+module_init(bochs_init);
+module_exit(bochs_exit);
+
+MODULE_DEVICE_TABLE(pci, bochs_pci_tbl);
+MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
new file mode 100644
index 000000000000..4da5206b7cc9
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -0,0 +1,215 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+/* ---------------------------------------------------------------------- */
+
+static struct fb_ops bochsfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int bochsfb_create_object(struct bochs_device *bochs,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = bochs->dev;
+ struct drm_gem_object *gobj;
+ u32 size;
+ int ret = 0;
+
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = bochs_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int bochsfb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct bochs_device *bochs =
+ container_of(helper, struct bochs_device, fb.helper);
+ struct drm_device *dev = bochs->dev;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct device *device = &dev->pdev->dev;
+ struct drm_gem_object *gobj = NULL;
+ struct bochs_bo *bo = NULL;
+ int size, ret;
+
+ if (sizes->surface_bpp != 32)
+ return -EINVAL;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ /* alloc, pin & map bo */
+ ret = bochsfb_create_object(bochs, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+
+ bo = gem_to_bochs_bo(gobj);
+
+ ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+ if (ret)
+ return ret;
+
+ ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
+ if (ret) {
+ DRM_ERROR("failed to pin fbcon\n");
+ ttm_bo_unreserve(&bo->bo);
+ return ret;
+ }
+
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages,
+ &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fbcon\n");
+ ttm_bo_unreserve(&bo->bo);
+ return ret;
+ }
+
+ ttm_bo_unreserve(&bo->bo);
+
+ /* init fb device */
+ info = framebuffer_alloc(0, device);
+ if (info == NULL)
+ return -ENOMEM;
+
+ info->par = &bochs->fb.helper;
+
+ ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj);
+ if (ret)
+ return ret;
+
+ bochs->fb.size = size;
+
+ /* setup helper */
+ fb = &bochs->fb.gfb.base;
+ bochs->fb.helper.fb = fb;
+ bochs->fb.helper.fbdev = info;
+
+ strcpy(info->fix.id, "bochsdrmfb");
+
+ info->flags = FBINFO_DEFAULT;
+ info->fbops = &bochsfb_ops;
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &bochs->fb.helper, sizes->fb_width,
+ sizes->fb_height);
+
+ info->screen_base = bo->kmap.virtual;
+ info->screen_size = size;
+
+#if 0
+ /* FIXME: get this right for mmap(/dev/fb0) */
+ info->fix.smem_start = bochs_bo_mmap_offset(bo);
+ info->fix.smem_len = size;
+#endif
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int bochs_fbdev_destroy(struct bochs_device *bochs)
+{
+ struct bochs_framebuffer *gfb = &bochs->fb.gfb;
+ struct fb_info *info;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (bochs->fb.helper.fbdev) {
+ info = bochs->fb.helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (gfb->obj) {
+ drm_gem_object_unreference_unlocked(gfb->obj);
+ gfb->obj = NULL;
+ }
+
+ drm_fb_helper_fini(&bochs->fb.helper);
+ drm_framebuffer_unregister_private(&gfb->base);
+ drm_framebuffer_cleanup(&gfb->base);
+
+ return 0;
+}
+
+void bochs_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+}
+
+void bochs_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ *red = regno;
+ *green = regno;
+ *blue = regno;
+}
+
+static struct drm_fb_helper_funcs bochs_fb_helper_funcs = {
+ .gamma_set = bochs_fb_gamma_set,
+ .gamma_get = bochs_fb_gamma_get,
+ .fb_probe = bochsfb_create,
+};
+
+int bochs_fbdev_init(struct bochs_device *bochs)
+{
+ int ret;
+
+ bochs->fb.helper.funcs = &bochs_fb_helper_funcs;
+ spin_lock_init(&bochs->fb.dirty_lock);
+
+ ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper,
+ 1, 1);
+ if (ret)
+ return ret;
+
+ drm_fb_helper_single_add_all_connectors(&bochs->fb.helper);
+ drm_helper_disable_unused_functions(bochs->dev);
+ drm_fb_helper_initial_config(&bochs->fb.helper, 32);
+
+ bochs->fb.initialized = true;
+ return 0;
+}
+
+void bochs_fbdev_fini(struct bochs_device *bochs)
+{
+ if (!bochs->fb.initialized)
+ return;
+
+ bochs_fbdev_destroy(bochs);
+ bochs->fb.initialized = false;
+}
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
new file mode 100644
index 000000000000..dbe619e6aab4
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -0,0 +1,177 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+/* ---------------------------------------------------------------------- */
+
+static void bochs_vga_writeb(struct bochs_device *bochs, u16 ioport, u8 val)
+{
+ if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df))
+ return;
+
+ if (bochs->mmio) {
+ int offset = ioport - 0x3c0 + 0x400;
+ writeb(val, bochs->mmio + offset);
+ } else {
+ outb(val, ioport);
+ }
+}
+
+static u16 bochs_dispi_read(struct bochs_device *bochs, u16 reg)
+{
+ u16 ret = 0;
+
+ if (bochs->mmio) {
+ int offset = 0x500 + (reg << 1);
+ ret = readw(bochs->mmio + offset);
+ } else {
+ outw(reg, VBE_DISPI_IOPORT_INDEX);
+ ret = inw(VBE_DISPI_IOPORT_DATA);
+ }
+ return ret;
+}
+
+static void bochs_dispi_write(struct bochs_device *bochs, u16 reg, u16 val)
+{
+ if (bochs->mmio) {
+ int offset = 0x500 + (reg << 1);
+ writew(val, bochs->mmio + offset);
+ } else {
+ outw(reg, VBE_DISPI_IOPORT_INDEX);
+ outw(val, VBE_DISPI_IOPORT_DATA);
+ }
+}
+
+int bochs_hw_init(struct drm_device *dev, uint32_t flags)
+{
+ struct bochs_device *bochs = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ unsigned long addr, size, mem, ioaddr, iosize;
+ u16 id;
+
+ if (/* (ent->driver_data == BOCHS_QEMU_STDVGA) && */
+ (pdev->resource[2].flags & IORESOURCE_MEM)) {
+ /* mmio bar with vga and bochs registers present */
+ if (pci_request_region(pdev, 2, "bochs-drm") != 0) {
+ DRM_ERROR("Cannot request mmio region\n");
+ return -EBUSY;
+ }
+ ioaddr = pci_resource_start(pdev, 2);
+ iosize = pci_resource_len(pdev, 2);
+ bochs->mmio = ioremap(ioaddr, iosize);
+ if (bochs->mmio == NULL) {
+ DRM_ERROR("Cannot map mmio region\n");
+ return -ENOMEM;
+ }
+ } else {
+ ioaddr = VBE_DISPI_IOPORT_INDEX;
+ iosize = 2;
+ if (!request_region(ioaddr, iosize, "bochs-drm")) {
+ DRM_ERROR("Cannot request ioports\n");
+ return -EBUSY;
+ }
+ bochs->ioports = 1;
+ }
+
+ id = bochs_dispi_read(bochs, VBE_DISPI_INDEX_ID);
+ mem = bochs_dispi_read(bochs, VBE_DISPI_INDEX_VIDEO_MEMORY_64K)
+ * 64 * 1024;
+ if ((id & 0xfff0) != VBE_DISPI_ID0) {
+ DRM_ERROR("ID mismatch\n");
+ return -ENODEV;
+ }
+
+ if ((pdev->resource[0].flags & IORESOURCE_MEM) == 0)
+ return -ENODEV;
+ addr = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
+ if (addr == 0)
+ return -ENODEV;
+ if (size != mem) {
+ DRM_ERROR("Size mismatch: pci=%ld, bochs=%ld\n",
+ size, mem);
+ size = min(size, mem);
+ }
+
+ if (pci_request_region(pdev, 0, "bochs-drm") != 0) {
+ DRM_ERROR("Cannot request framebuffer\n");
+ return -EBUSY;
+ }
+
+ bochs->fb_map = ioremap(addr, size);
+ if (bochs->fb_map == NULL) {
+ DRM_ERROR("Cannot map framebuffer\n");
+ return -ENOMEM;
+ }
+ bochs->fb_base = addr;
+ bochs->fb_size = size;
+
+ DRM_INFO("Found bochs VGA, ID 0x%x.\n", id);
+ DRM_INFO("Framebuffer size %ld kB @ 0x%lx, %s @ 0x%lx.\n",
+ size / 1024, addr,
+ bochs->ioports ? "ioports" : "mmio",
+ ioaddr);
+ return 0;
+}
+
+void bochs_hw_fini(struct drm_device *dev)
+{
+ struct bochs_device *bochs = dev->dev_private;
+
+ if (bochs->mmio)
+ iounmap(bochs->mmio);
+ if (bochs->ioports)
+ release_region(VBE_DISPI_IOPORT_INDEX, 2);
+ if (bochs->fb_map)
+ iounmap(bochs->fb_map);
+ pci_release_regions(dev->pdev);
+}
+
+void bochs_hw_setmode(struct bochs_device *bochs,
+ struct drm_display_mode *mode)
+{
+ bochs->xres = mode->hdisplay;
+ bochs->yres = mode->vdisplay;
+ bochs->bpp = 32;
+ bochs->stride = mode->hdisplay * (bochs->bpp / 8);
+ bochs->yres_virtual = bochs->fb_size / bochs->stride;
+
+ DRM_DEBUG_DRIVER("%dx%d @ %d bpp, vy %d\n",
+ bochs->xres, bochs->yres, bochs->bpp,
+ bochs->yres_virtual);
+
+ bochs_vga_writeb(bochs, 0x3c0, 0x20); /* unblank */
+
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_YRES, bochs->yres);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_BANK, 0);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH, bochs->xres);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_HEIGHT,
+ bochs->yres_virtual);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, 0);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, 0);
+
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE,
+ VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED);
+}
+
+void bochs_hw_setbase(struct bochs_device *bochs,
+ int x, int y, u64 addr)
+{
+ unsigned long offset = (unsigned long)addr +
+ y * bochs->stride +
+ x * (bochs->bpp / 8);
+ int vy = offset / bochs->stride;
+ int vx = (offset % bochs->stride) * 8 / bochs->bpp;
+
+ DRM_DEBUG_DRIVER("x %d, y %d, addr %llx -> offset %lx, vx %d, vy %d\n",
+ x, y, addr, offset, vx, vy);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, vx);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, vy);
+}
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
new file mode 100644
index 000000000000..62ec7d4b3816
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -0,0 +1,294 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+static int defx = 1024;
+static int defy = 768;
+
+module_param(defx, int, 0444);
+module_param(defy, int, 0444);
+MODULE_PARM_DESC(defx, "default x resolution");
+MODULE_PARM_DESC(defy, "default y resolution");
+
+/* ---------------------------------------------------------------------- */
+
+static void bochs_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static void bochs_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ default:
+ return;
+ }
+}
+
+static bool bochs_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct bochs_device *bochs =
+ container_of(crtc, struct bochs_device, crtc);
+ struct bochs_framebuffer *bochs_fb;
+ struct bochs_bo *bo;
+ u64 gpu_addr = 0;
+ int ret;
+
+ if (old_fb) {
+ bochs_fb = to_bochs_framebuffer(old_fb);
+ bo = gem_to_bochs_bo(bochs_fb->obj);
+ ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+ if (ret) {
+ DRM_ERROR("failed to reserve old_fb bo\n");
+ } else {
+ bochs_bo_unpin(bo);
+ ttm_bo_unreserve(&bo->bo);
+ }
+ }
+
+ if (WARN_ON(crtc->fb == NULL))
+ return -EINVAL;
+
+ bochs_fb = to_bochs_framebuffer(crtc->fb);
+ bo = gem_to_bochs_bo(bochs_fb->obj);
+ ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+ if (ret)
+ return ret;
+
+ ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ ttm_bo_unreserve(&bo->bo);
+ return ret;
+ }
+
+ ttm_bo_unreserve(&bo->bo);
+ bochs_hw_setbase(bochs, x, y, gpu_addr);
+ return 0;
+}
+
+static int bochs_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct bochs_device *bochs =
+ container_of(crtc, struct bochs_device, crtc);
+
+ bochs_hw_setmode(bochs, mode);
+ bochs_crtc_mode_set_base(crtc, x, y, old_fb);
+ return 0;
+}
+
+static void bochs_crtc_prepare(struct drm_crtc *crtc)
+{
+}
+
+static void bochs_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+static void bochs_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs bochs_crtc_funcs = {
+ .gamma_set = bochs_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+};
+
+static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
+ .dpms = bochs_crtc_dpms,
+ .mode_fixup = bochs_crtc_mode_fixup,
+ .mode_set = bochs_crtc_mode_set,
+ .mode_set_base = bochs_crtc_mode_set_base,
+ .prepare = bochs_crtc_prepare,
+ .commit = bochs_crtc_commit,
+ .load_lut = bochs_crtc_load_lut,
+};
+
+static void bochs_crtc_init(struct drm_device *dev)
+{
+ struct bochs_device *bochs = dev->dev_private;
+ struct drm_crtc *crtc = &bochs->crtc;
+
+ drm_crtc_init(dev, crtc, &bochs_crtc_funcs);
+ drm_mode_crtc_set_gamma_size(crtc, 256);
+ drm_crtc_helper_add(crtc, &bochs_helper_funcs);
+}
+
+static bool bochs_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void bochs_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void bochs_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+}
+
+static void bochs_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void bochs_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static const struct drm_encoder_helper_funcs bochs_encoder_helper_funcs = {
+ .dpms = bochs_encoder_dpms,
+ .mode_fixup = bochs_encoder_mode_fixup,
+ .mode_set = bochs_encoder_mode_set,
+ .prepare = bochs_encoder_prepare,
+ .commit = bochs_encoder_commit,
+};
+
+static const struct drm_encoder_funcs bochs_encoder_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static void bochs_encoder_init(struct drm_device *dev)
+{
+ struct bochs_device *bochs = dev->dev_private;
+ struct drm_encoder *encoder = &bochs->encoder;
+
+ encoder->possible_crtcs = 0x1;
+ drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &bochs_encoder_helper_funcs);
+}
+
+
+int bochs_connector_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ count = drm_add_modes_noedid(connector, 8192, 8192);
+ drm_set_preferred_mode(connector, defx, defy);
+ return count;
+}
+
+static int bochs_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct bochs_device *bochs =
+ container_of(connector, struct bochs_device, connector);
+ unsigned long size = mode->hdisplay * mode->vdisplay * 4;
+
+ /*
+ * Make sure we can fit two framebuffers into video memory.
+ * This allows up to 1600x1200 with 16 MB (default size).
+ * If you want more try this:
+ * 'qemu -vga std -global VGA.vgamem_mb=32 $otherargs'
+ */
+ if (size * 2 > bochs->fb_size)
+ return MODE_BAD;
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+bochs_connector_best_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, enc_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+static enum drm_connector_status bochs_connector_detect(struct drm_connector
+ *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
+ .get_modes = bochs_connector_get_modes,
+ .mode_valid = bochs_connector_mode_valid,
+ .best_encoder = bochs_connector_best_encoder,
+};
+
+struct drm_connector_funcs bochs_connector_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = bochs_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+};
+
+static void bochs_connector_init(struct drm_device *dev)
+{
+ struct bochs_device *bochs = dev->dev_private;
+ struct drm_connector *connector = &bochs->connector;
+
+ drm_connector_init(dev, connector, &bochs_connector_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ drm_connector_helper_add(connector,
+ &bochs_connector_connector_helper_funcs);
+}
+
+
+int bochs_kms_init(struct bochs_device *bochs)
+{
+ drm_mode_config_init(bochs->dev);
+ bochs->mode_config_initialized = true;
+
+ bochs->dev->mode_config.max_width = 8192;
+ bochs->dev->mode_config.max_height = 8192;
+
+ bochs->dev->mode_config.fb_base = bochs->fb_base;
+ bochs->dev->mode_config.preferred_depth = 24;
+ bochs->dev->mode_config.prefer_shadow = 0;
+
+ bochs->dev->mode_config.funcs = (void *)&bochs_mode_funcs;
+
+ bochs_crtc_init(bochs->dev);
+ bochs_encoder_init(bochs->dev);
+ bochs_connector_init(bochs->dev);
+ drm_mode_connector_attach_encoder(&bochs->connector,
+ &bochs->encoder);
+
+ return 0;
+}
+
+void bochs_kms_fini(struct bochs_device *bochs)
+{
+ if (bochs->mode_config_initialized) {
+ drm_mode_config_cleanup(bochs->dev);
+ bochs->mode_config_initialized = false;
+ }
+}
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
new file mode 100644
index 000000000000..ce6858765b37
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -0,0 +1,546 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+static void bochs_ttm_placement(struct bochs_bo *bo, int domain);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct bochs_device *bochs_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct bochs_device, ttm.bdev);
+}
+
+static int bochs_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void bochs_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int bochs_ttm_global_init(struct bochs_device *bochs)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &bochs->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &bochs_ttm_mem_global_init;
+ global_ref->release = &bochs_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ bochs->ttm.bo_global_ref.mem_glob =
+ bochs->ttm.mem_global_ref.object;
+ global_ref = &bochs->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&bochs->ttm.mem_global_ref);
+ return r;
+ }
+
+ return 0;
+}
+
+static void bochs_ttm_global_release(struct bochs_device *bochs)
+{
+ if (bochs->ttm.mem_global_ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&bochs->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&bochs->ttm.mem_global_ref);
+ bochs->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct bochs_bo *bo;
+
+ bo = container_of(tbo, struct bochs_bo, bo);
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+static bool bochs_ttm_bo_is_bochs_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &bochs_bo_ttm_destroy)
+ return true;
+ return false;
+}
+
+static int bochs_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+bochs_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct bochs_bo *bochsbo = bochs_bo(bo);
+
+ if (!bochs_ttm_bo_is_bochs_bo(bo))
+ return;
+
+ bochs_ttm_placement(bochsbo, TTM_PL_FLAG_SYSTEM);
+ *pl = bochsbo->placement;
+}
+
+static int bochs_bo_verify_access(struct ttm_buffer_object *bo,
+ struct file *filp)
+{
+ struct bochs_bo *bochsbo = bochs_bo(bo);
+
+ return drm_vma_node_verify_access(&bochsbo->gem.vma_node, filp);
+}
+
+static int bochs_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct bochs_device *bochs = bochs_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = bochs->fb_base;
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static void bochs_ttm_io_mem_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+}
+
+static int bochs_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+}
+
+
+static void bochs_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func bochs_tt_backend_func = {
+ .destroy = &bochs_ttm_backend_destroy,
+};
+
+static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (tt == NULL)
+ return NULL;
+ tt->func = &bochs_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+struct ttm_bo_driver bochs_bo_driver = {
+ .ttm_tt_create = bochs_ttm_tt_create,
+ .ttm_tt_populate = ttm_pool_populate,
+ .ttm_tt_unpopulate = ttm_pool_unpopulate,
+ .init_mem_type = bochs_bo_init_mem_type,
+ .evict_flags = bochs_bo_evict_flags,
+ .move = bochs_bo_move,
+ .verify_access = bochs_bo_verify_access,
+ .io_mem_reserve = &bochs_ttm_io_mem_reserve,
+ .io_mem_free = &bochs_ttm_io_mem_free,
+};
+
+int bochs_mm_init(struct bochs_device *bochs)
+{
+ struct ttm_bo_device *bdev = &bochs->ttm.bdev;
+ int ret;
+
+ ret = bochs_ttm_global_init(bochs);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&bochs->ttm.bdev,
+ bochs->ttm.bo_global_ref.ref.object,
+ &bochs_bo_driver, DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ bochs->fb_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ bochs->ttm.initialized = true;
+ return 0;
+}
+
+void bochs_mm_fini(struct bochs_device *bochs)
+{
+ if (!bochs->ttm.initialized)
+ return;
+
+ ttm_bo_device_release(&bochs->ttm.bdev);
+ bochs_ttm_global_release(bochs);
+ bochs->ttm.initialized = false;
+}
+
+static void bochs_ttm_placement(struct bochs_bo *bo, int domain)
+{
+ u32 c = 0;
+ bo->placement.fpfn = 0;
+ bo->placement.lpfn = 0;
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM) {
+ bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED
+ | TTM_PL_FLAG_VRAM;
+ }
+ if (domain & TTM_PL_FLAG_SYSTEM) {
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ }
+ if (!c) {
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ }
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+}
+
+static inline u64 bochs_bo_gpu_offset(struct bochs_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = bochs_bo_gpu_offset(bo);
+ return 0;
+ }
+
+ bochs_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = bochs_bo_gpu_offset(bo);
+ return 0;
+}
+
+int bochs_bo_unpin(struct bochs_bo *bo)
+{
+ int i, ret;
+
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int bochs_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct bochs_device *bochs;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ file_priv = filp->private_data;
+ bochs = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &bochs->ttm.bdev);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int bochs_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct bochs_bo **pbochsbo)
+{
+ struct bochs_device *bochs = dev->dev_private;
+ struct bochs_bo *bochsbo;
+ size_t acc_size;
+ int ret;
+
+ bochsbo = kzalloc(sizeof(struct bochs_bo), GFP_KERNEL);
+ if (!bochsbo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &bochsbo->gem, size);
+ if (ret) {
+ kfree(bochsbo);
+ return ret;
+ }
+
+ bochsbo->bo.bdev = &bochs->ttm.bdev;
+ bochsbo->bo.bdev->dev_mapping = dev->dev_mapping;
+
+ bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&bochs->ttm.bdev, size,
+ sizeof(struct bochs_bo));
+
+ ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size,
+ ttm_bo_type_device, &bochsbo->placement,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
+ NULL, bochs_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pbochsbo = bochsbo;
+ return 0;
+}
+
+int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct bochs_bo *bochsbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = ALIGN(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = bochs_bo_create(dev, size, 0, 0, &bochsbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+ *obj = &bochsbo->gem;
+ return 0;
+}
+
+int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_object *gobj;
+ u32 handle;
+ int ret;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = bochs_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+static void bochs_bo_unref(struct bochs_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
+
+}
+
+void bochs_gem_free_object(struct drm_gem_object *obj)
+{
+ struct bochs_bo *bochs_bo = gem_to_bochs_bo(obj);
+
+ if (!bochs_bo)
+ return;
+ bochs_bo_unref(&bochs_bo);
+}
+
+int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct bochs_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_bochs_bo(obj);
+ *offset = bochs_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb);
+ if (bochs_fb->obj)
+ drm_gem_object_unreference_unlocked(bochs_fb->obj);
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+static const struct drm_framebuffer_funcs bochs_fb_funcs = {
+ .destroy = bochs_user_framebuffer_destroy,
+};
+
+int bochs_framebuffer_init(struct drm_device *dev,
+ struct bochs_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
+ ret = drm_framebuffer_init(dev, &gfb->base, &bochs_fb_funcs);
+ if (ret) {
+ DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static struct drm_framebuffer *
+bochs_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct bochs_framebuffer *bochs_fb;
+ int ret;
+
+ DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n",
+ mode_cmd->width, mode_cmd->height,
+ (mode_cmd->pixel_format) & 0xff,
+ (mode_cmd->pixel_format >> 8) & 0xff,
+ (mode_cmd->pixel_format >> 16) & 0xff,
+ (mode_cmd->pixel_format >> 24) & 0xff);
+
+ if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888)
+ return ERR_PTR(-ENOENT);
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ bochs_fb = kzalloc(sizeof(*bochs_fb), GFP_KERNEL);
+ if (!bochs_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = bochs_framebuffer_init(dev, bochs_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(bochs_fb);
+ return ERR_PTR(ret);
+ }
+ return &bochs_fb->base;
+}
+
+const struct drm_mode_config_funcs bochs_mode_funcs = {
+ .fb_create = bochs_user_framebuffer_create,
+};
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index b6aded73838b..117d3eca5e37 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -222,7 +222,7 @@ void cirrus_fbdev_fini(struct cirrus_device *cdev);
void cirrus_driver_irq_preinstall(struct drm_device *dev);
int cirrus_driver_irq_postinstall(struct drm_device *dev);
void cirrus_driver_irq_uninstall(struct drm_device *dev);
-irqreturn_t cirrus_driver_irq_handler(DRM_IRQ_ARGS);
+irqreturn_t cirrus_driver_irq_handler(int irq, void *arg);
/* cirrus_kms.c */
int cirrus_driver_load(struct drm_device *dev, unsigned long flags);
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index b27e95666fab..32bbba0a787b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -39,7 +39,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
- if (!in_interrupt())
+ if (drm_can_sleep())
ret = cirrus_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
@@ -233,6 +233,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
info->apertures->ranges[0].size = cdev->mc.vram_size;
+ info->fix.smem_start = cdev->dev->mode_config.fb_base;
+ info->fix.smem_len = cdev->mc.vram_size;
+
info->screen_base = sysram;
info->screen_size = size;
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 78e76f24343d..4b0170cf53fd 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -255,7 +255,7 @@ int cirrus_dumb_create(struct drm_file *file,
return 0;
}
-void cirrus_bo_unref(struct cirrus_bo **bo)
+static void cirrus_bo_unref(struct cirrus_bo **bo)
{
struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index adabc3daaa5b..530f78f84dee 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -102,7 +102,7 @@ static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
+static void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
{
struct cirrus_device *cdev = crtc->dev->dev_private;
u32 addr;
@@ -273,8 +273,8 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
sr07 |= 0x11;
break;
case 16:
- sr07 |= 0xc1;
- hdr = 0xc0;
+ sr07 |= 0x17;
+ hdr = 0xc1;
break;
case 24:
sr07 |= 0x15;
@@ -453,7 +453,7 @@ static void cirrus_encoder_commit(struct drm_encoder *encoder)
{
}
-void cirrus_encoder_destroy(struct drm_encoder *encoder)
+static void cirrus_encoder_destroy(struct drm_encoder *encoder)
{
struct cirrus_encoder *cirrus_encoder = to_cirrus_encoder(encoder);
drm_encoder_cleanup(encoder);
@@ -492,7 +492,7 @@ static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
}
-int cirrus_vga_get_modes(struct drm_connector *connector)
+static int cirrus_vga_get_modes(struct drm_connector *connector)
{
int count;
@@ -509,7 +509,7 @@ static int cirrus_vga_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
+static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
*connector)
{
int enc_id = connector->encoder_ids[0];
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 75becdeac07d..8b37c25ff9bd 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -80,7 +80,7 @@ static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
return 0;
}
-void
+static void
cirrus_ttm_global_release(struct cirrus_device *cirrus)
{
if (cirrus->ttm.mem_global_ref.release == NULL)
@@ -102,7 +102,7 @@ static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
kfree(bo);
}
-bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo)
+static bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo)
{
if (bo->destroy == &cirrus_bo_ttm_destroy)
return true;
@@ -208,7 +208,7 @@ static struct ttm_backend_func cirrus_tt_backend_func = {
};
-struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
@@ -375,26 +375,6 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
return 0;
}
-int cirrus_bo_unpin(struct cirrus_bo *bo)
-{
- int i, ret;
- if (!bo->pin_count) {
- DRM_ERROR("unpin bad %p\n", bo);
- return 0;
- }
- bo->pin_count--;
- if (bo->pin_count)
- return 0;
-
- for (i = 0; i < bo->placement.num_placement ; i++)
- bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
- if (ret)
- return ret;
-
- return 0;
-}
-
int cirrus_bo_push_sysram(struct cirrus_bo *bo)
{
int i, ret;
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index e301d653d97e..dde205cef384 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -53,7 +53,7 @@
*/
int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
{
- DRM_AGP_KERN *kern;
+ struct agp_kern_info *kern;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
@@ -198,17 +198,15 @@ int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
{
struct drm_agp_mem *entry;
- DRM_AGP_MEM *memory;
+ struct agp_memory *memory;
unsigned long pages;
u32 type;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL)))
+ if (!(entry = kzalloc(sizeof(*entry), GFP_KERNEL)))
return -ENOMEM;
- memset(entry, 0, sizeof(*entry));
-
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
type = (u32) request->type;
if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) {
@@ -393,14 +391,16 @@ int drm_agp_free_ioctl(struct drm_device *dev, void *data,
* Gets the drm_agp_t structure which is made available by the agpgart module
* via the inter_module_* functions. Creates and initializes a drm_agp_head
* structure.
+ *
+ * Note that final cleanup of the kmalloced structure is directly done in
+ * drm_pci_agp_destroy.
*/
struct drm_agp_head *drm_agp_init(struct drm_device *dev)
{
struct drm_agp_head *head = NULL;
- if (!(head = kmalloc(sizeof(*head), GFP_KERNEL)))
+ if (!(head = kzalloc(sizeof(*head), GFP_KERNEL)))
return NULL;
- memset((void *)head, 0, sizeof(*head));
head->bridge = agp_find_bridge(dev->pdev);
if (!head->bridge) {
if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
@@ -439,7 +439,7 @@ void drm_agp_clear(struct drm_device *dev)
{
struct drm_agp_mem *entry, *tempe;
- if (!drm_core_has_AGP(dev) || !dev->agp)
+ if (!dev->agp)
return;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
@@ -460,35 +460,20 @@ void drm_agp_clear(struct drm_device *dev)
}
/**
- * drm_agp_destroy - Destroy AGP head
- * @dev: DRM device
- *
- * Destroy resources that were previously allocated via drm_agp_initp. Caller
- * must ensure to clean up all AGP resources before calling this. See
- * drm_agp_clear().
- *
- * Call this to destroy AGP heads allocated via drm_agp_init().
- */
-void drm_agp_destroy(struct drm_agp_head *agp)
-{
- kfree(agp);
-}
-
-/**
* Binds a collection of pages into AGP memory at the given offset, returning
* the AGP memory structure containing them.
*
* No reference is held on the pages during this time -- it is up to the
* caller to handle that.
*/
-DRM_AGP_MEM *
+struct agp_memory *
drm_agp_bind_pages(struct drm_device *dev,
struct page **pages,
unsigned long num_pages,
uint32_t gtt_offset,
u32 type)
{
- DRM_AGP_MEM *mem;
+ struct agp_memory *mem;
int ret, i;
DRM_DEBUG("\n");
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
index 39a718340319..0406110f83ed 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -114,7 +114,7 @@ int drm_buffer_copy_from_user(struct drm_buffer *buf,
for (idx = 0; idx < nr_pages; ++idx) {
- if (DRM_COPY_FROM_USER(buf->data[idx],
+ if (copy_from_user(buf->data[idx],
user_data + idx * PAGE_SIZE,
min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
DRM_ERROR("Failed to copy user data (%p) to drm buffer"
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 471e051d295e..edec31fe3fed 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -261,7 +261,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
struct drm_agp_mem *entry;
int valid = 0;
- if (!drm_core_has_AGP(dev)) {
+ if (!dev->agp) {
kfree(map);
return -EINVAL;
}
@@ -303,9 +303,6 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
break;
}
- case _DRM_GEM:
- DRM_ERROR("tried to addmap GEM object\n");
- break;
case _DRM_SCATTER_GATHER:
if (!dev->sg) {
kfree(map);
@@ -483,9 +480,6 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
- case _DRM_GEM:
- DRM_ERROR("tried to rmmap GEM object\n");
- break;
}
kfree(map);
@@ -1396,7 +1390,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
spin_unlock(&dev->count_lock);
if (request->count >= dma->buf_count) {
- if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
+ if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
|| (drm_core_check_feature(dev, DRIVER_SG)
&& (dma->flags & _DRM_DMA_USE_SG))) {
struct drm_local_map *map = dev->agp_buffer_map;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d6cf77c472e7..3b7d32da1604 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -675,6 +675,29 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_cleanup);
/**
+ * drm_crtc_index - find the index of a registered CRTC
+ * @crtc: CRTC to find index for
+ *
+ * Given a registered CRTC, return the index of that CRTC within a DRM
+ * device's list of CRTCs.
+ */
+unsigned int drm_crtc_index(struct drm_crtc *crtc)
+{
+ unsigned int index = 0;
+ struct drm_crtc *tmp;
+
+ list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+ if (tmp == crtc)
+ return index;
+
+ index++;
+ }
+
+ BUG();
+}
+EXPORT_SYMBOL(drm_crtc_index);
+
+/**
* drm_mode_probed_add - add a mode to a connector's probed mode list
* @connector: connector the new mode
* @mode: mode data
@@ -2767,10 +2790,8 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
}
if (fb->funcs->dirty) {
- drm_modeset_lock_all(dev);
ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
clips, num_clips);
- drm_modeset_unlock_all(dev);
} else {
ret = -ENOSYS;
}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 0058fd74063e..f7a81209beb3 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -324,35 +324,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
-/**
- * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
- * @encoder: encoder to test
- * @crtc: crtc to test
- *
- * Return false if @encoder can't be driven by @crtc, true otherwise.
- */
-static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
- struct drm_crtc *crtc)
-{
- struct drm_device *dev;
- struct drm_crtc *tmp;
- int crtc_mask = 1;
-
- WARN(!crtc, "checking null crtc?\n");
-
- dev = crtc->dev;
-
- list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
- if (tmp == crtc)
- break;
- crtc_mask <<= 1;
- }
-
- if (encoder->possible_crtcs & crtc_mask)
- return true;
- return false;
-}
-
/*
* Check the CRTC we're going to map each output to vs. its current
* CRTC. If they don't match, we have to disable the output and the CRTC
@@ -536,7 +507,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
* are later needed by vblank and swap-completion
* timestamping. They are derived from true hwmode.
*/
- drm_calc_timestamping_constants(crtc);
+ drm_calc_timestamping_constants(crtc, &crtc->hwmode);
/* FIXME: add subpixel order */
done:
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index d9137e49c4e8..345be03c23db 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -315,9 +315,6 @@ long drm_ioctl(struct file *filp,
if (drm_device_is_unplugged(dev))
return -ENODEV;
- atomic_inc(&dev->ioctl_count);
- ++file_priv->ioctl_count;
-
if ((nr >= DRM_CORE_IOCTL_COUNT) &&
((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
goto err_i1;
@@ -410,7 +407,6 @@ long drm_ioctl(struct file *filp,
if (kdata != stack_kdata)
kfree(kdata);
- atomic_dec(&dev->ioctl_count);
if (retcode)
DRM_DEBUG("ret = %d\n", retcode);
return retcode;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0a1e4a5f4234..b924306b8477 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -68,6 +68,8 @@
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
/* Force reduced-blanking timings for detailed modes */
#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
+/* Force 8bpc */
+#define EDID_QUIRK_FORCE_8BPC (1 << 8)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -128,6 +130,9 @@ static struct edid_quirk {
/* Medion MD 30217 PG */
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
+
+ /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
+ { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
};
/*
@@ -600,347 +605,347 @@ static const struct drm_display_mode edid_cea_modes[] = {
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 2 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 3 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 4 - 1280x720@60Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 5 - 1920x1080i@60Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 6 - 1440x480i@60Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 7 - 1440x480i@60Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 8 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 9 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 10 - 2880x480i@60Hz */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 11 - 2880x480i@60Hz */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 12 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 13 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 14 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 15 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 16 - 1920x1080@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 17 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 18 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 19 - 1280x720@50Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 20 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 21 - 1440x576i@50Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 22 - 1440x576i@50Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 23 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 24 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 25 - 2880x576i@50Hz */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 26 - 2880x576i@50Hz */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 27 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 28 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 29 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 30 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 31 - 1920x1080@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 32 - 1920x1080@24Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, },
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 33 - 1920x1080@25Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, },
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 34 - 1920x1080@30Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, },
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 35 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 36 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 37 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 38 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 39 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 40 - 1920x1080i@100Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 100, },
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 41 - 1280x720@100Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, },
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 42 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 100, },
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 43 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 100, },
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 44 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 100, },
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 45 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 100, },
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 46 - 1920x1080i@120Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 120, },
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 47 - 1280x720@120Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, },
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 48 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 120, },
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 49 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 120, },
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 50 - 1440x480i@120Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 120, },
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 51 - 1440x480i@120Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 120, },
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 52 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 200, },
+ .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 53 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 200, },
+ .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 54 - 1440x576i@200Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 200, },
+ .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 55 - 1440x576i@200Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 200, },
+ .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 56 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 240, },
+ .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 57 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 240, },
+ .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 58 - 1440x480i@240 */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 240, },
+ .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 59 - 1440x480i@240 */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 240, },
+ .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 60 - 1280x720@24Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, },
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 61 - 1280x720@25Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, },
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 62 - 1280x720@30Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, },
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 63 - 1920x1080@120Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, },
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 64 - 1920x1080@100Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, },
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
};
/*
@@ -2557,25 +2562,40 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
return modes;
}
-static int
-do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
+static struct drm_display_mode *
+drm_display_mode_from_vic_index(struct drm_connector *connector,
+ const u8 *video_db, u8 video_len,
+ u8 video_index)
{
struct drm_device *dev = connector->dev;
- const u8 *mode;
+ struct drm_display_mode *newmode;
u8 cea_mode;
- int modes = 0;
- for (mode = db; mode < db + len; mode++) {
- cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
- if (cea_mode < ARRAY_SIZE(edid_cea_modes)) {
- struct drm_display_mode *newmode;
- newmode = drm_mode_duplicate(dev,
- &edid_cea_modes[cea_mode]);
- if (newmode) {
- newmode->vrefresh = 0;
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
+ if (video_db == NULL || video_index >= video_len)
+ return NULL;
+
+ /* CEA modes are numbered 1..127 */
+ cea_mode = (video_db[video_index] & 127) - 1;
+ if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
+ return NULL;
+
+ newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+ newmode->vrefresh = 0;
+
+ return newmode;
+}
+
+static int
+do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
+{
+ int i, modes = 0;
+
+ for (i = 0; i < len; i++) {
+ struct drm_display_mode *mode;
+ mode = drm_display_mode_from_vic_index(connector, db, len, i);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ modes++;
}
}
@@ -2669,21 +2689,13 @@ static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
const u8 *video_db, u8 video_len, u8 video_index)
{
- struct drm_device *dev = connector->dev;
struct drm_display_mode *newmode;
int modes = 0;
- u8 cea_mode;
-
- if (video_db == NULL || video_index >= video_len)
- return 0;
-
- /* CEA modes are numbered 1..127 */
- cea_mode = (video_db[video_index] & 127) - 1;
- if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
- return 0;
if (structure & (1 << 0)) {
- newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+ newmode = drm_display_mode_from_vic_index(connector, video_db,
+ video_len,
+ video_index);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING;
drm_mode_probed_add(connector, newmode);
@@ -2691,7 +2703,9 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
}
}
if (structure & (1 << 6)) {
- newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+ newmode = drm_display_mode_from_vic_index(connector, video_db,
+ video_len,
+ video_index);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
drm_mode_probed_add(connector, newmode);
@@ -2699,7 +2713,9 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
}
}
if (structure & (1 << 8)) {
- newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+ newmode = drm_display_mode_from_vic_index(connector, video_db,
+ video_len,
+ video_index);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
drm_mode_probed_add(connector, newmode);
@@ -2723,7 +2739,7 @@ static int
do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
const u8 *video_db, u8 video_len)
{
- int modes = 0, offset = 0, i, multi_present = 0;
+ int modes = 0, offset = 0, i, multi_present = 0, multi_len;
u8 vic_len, hdmi_3d_len = 0;
u16 mask;
u16 structure_all;
@@ -2769,32 +2785,84 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
}
offset += 1 + vic_len;
- if (!(multi_present == 1 || multi_present == 2))
- goto out;
+ if (multi_present == 1)
+ multi_len = 2;
+ else if (multi_present == 2)
+ multi_len = 4;
+ else
+ multi_len = 0;
- if ((multi_present == 1 && len < (9 + offset)) ||
- (multi_present == 2 && len < (11 + offset)))
+ if (len < (8 + offset + hdmi_3d_len - 1))
goto out;
- if ((multi_present == 1 && hdmi_3d_len < 2) ||
- (multi_present == 2 && hdmi_3d_len < 4))
+ if (hdmi_3d_len < multi_len)
goto out;
- /* 3D_Structure_ALL */
- structure_all = (db[8 + offset] << 8) | db[9 + offset];
+ if (multi_present == 1 || multi_present == 2) {
+ /* 3D_Structure_ALL */
+ structure_all = (db[8 + offset] << 8) | db[9 + offset];
- /* check if 3D_MASK is present */
- if (multi_present == 2)
- mask = (db[10 + offset] << 8) | db[11 + offset];
- else
- mask = 0xffff;
+ /* check if 3D_MASK is present */
+ if (multi_present == 2)
+ mask = (db[10 + offset] << 8) | db[11 + offset];
+ else
+ mask = 0xffff;
+
+ for (i = 0; i < 16; i++) {
+ if (mask & (1 << i))
+ modes += add_3d_struct_modes(connector,
+ structure_all,
+ video_db,
+ video_len, i);
+ }
+ }
+
+ offset += multi_len;
+
+ for (i = 0; i < (hdmi_3d_len - multi_len); i++) {
+ int vic_index;
+ struct drm_display_mode *newmode = NULL;
+ unsigned int newflag = 0;
+ bool detail_present;
- for (i = 0; i < 16; i++) {
- if (mask & (1 << i))
- modes += add_3d_struct_modes(connector,
- structure_all,
- video_db,
- video_len, i);
+ detail_present = ((db[8 + offset + i] & 0x0f) > 7);
+
+ if (detail_present && (i + 1 == hdmi_3d_len - multi_len))
+ break;
+
+ /* 2D_VIC_order_X */
+ vic_index = db[8 + offset + i] >> 4;
+
+ /* 3D_Structure_X */
+ switch (db[8 + offset + i] & 0x0f) {
+ case 0:
+ newflag = DRM_MODE_FLAG_3D_FRAME_PACKING;
+ break;
+ case 6:
+ newflag = DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
+ break;
+ case 8:
+ /* 3D_Detail_X */
+ if ((db[9 + offset + i] >> 4) == 1)
+ newflag = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
+ break;
+ }
+
+ if (newflag != 0) {
+ newmode = drm_display_mode_from_vic_index(connector,
+ video_db,
+ video_len,
+ vic_index);
+
+ if (newmode) {
+ newmode->flags |= newflag;
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+
+ if (detail_present)
+ i++;
}
out:
@@ -3435,6 +3503,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
drm_add_display_info(edid, &connector->display_info);
+ if (quirks & EDID_QUIRK_FORCE_8BPC)
+ connector->display_info.bpc = 8;
+
return num_modes;
}
EXPORT_SYMBOL(drm_add_edid_modes);
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 9081172ef057..1b4c7a5442c5 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -141,7 +141,7 @@ static int edid_size(const u8 *edid, int data_size)
return (edid[0x7e] + 1) * EDID_LENGTH;
}
-static u8 *edid_load(struct drm_connector *connector, const char *name,
+static void *edid_load(struct drm_connector *connector, const char *name,
const char *connector_name)
{
const struct firmware *fw = NULL;
@@ -263,7 +263,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
if (*last == '\n')
*last = '\0';
- edid = (struct edid *) edid_load(connector, edidname, connector_name);
+ edid = edid_load(connector, edidname, connector_name);
if (IS_ERR_OR_NULL(edid))
return 0;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0a19401aff80..98a03639b413 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -359,6 +359,11 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
struct drm_crtc *crtc;
int bound = 0, crtcs_bound = 0;
+ /* Sometimes user space wants everything disabled, so don't steal the
+ * display if there's a master. */
+ if (dev->primary->master)
+ return false;
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->fb)
crtcs_bound++;
@@ -368,6 +373,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
if (bound < crtcs_bound)
return false;
+
return true;
}
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index c5b929c3f77a..7f2af9aca038 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -232,7 +232,6 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
goto out_put_pid;
}
- priv->ioctl_count = 0;
/* for compatibility root is always authenticated */
priv->always_authenticated = capable(CAP_SYS_ADMIN);
priv->authenticated = priv->always_authenticated;
@@ -392,9 +391,6 @@ static void drm_legacy_dev_reinit(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- atomic_set(&dev->ioctl_count, 0);
- atomic_set(&dev->vma_count, 0);
-
dev->sigdata.lock = NULL;
dev->context_flag = 0;
@@ -578,12 +574,7 @@ int drm_release(struct inode *inode, struct file *filp)
*/
if (!--dev->open_count) {
- if (atomic_read(&dev->ioctl_count)) {
- DRM_ERROR("Device busy: %d\n",
- atomic_read(&dev->ioctl_count));
- retcode = -EBUSY;
- } else
- retcode = drm_lastclose(dev);
+ retcode = drm_lastclose(dev);
if (drm_device_is_unplugged(dev))
drm_put_dev(dev);
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 4761adedad2a..5bbad873c798 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -91,19 +91,19 @@
int
drm_gem_init(struct drm_device *dev)
{
- struct drm_gem_mm *mm;
+ struct drm_vma_offset_manager *vma_offset_manager;
mutex_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
- mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
- if (!mm) {
+ vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
+ if (!vma_offset_manager) {
DRM_ERROR("out of memory\n");
return -ENOMEM;
}
- dev->mm_private = mm;
- drm_vma_offset_manager_init(&mm->vma_manager,
+ dev->vma_offset_manager = vma_offset_manager;
+ drm_vma_offset_manager_init(vma_offset_manager,
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
@@ -113,11 +113,10 @@ drm_gem_init(struct drm_device *dev)
void
drm_gem_destroy(struct drm_device *dev)
{
- struct drm_gem_mm *mm = dev->mm_private;
- drm_vma_offset_manager_destroy(&mm->vma_manager);
- kfree(mm);
- dev->mm_private = NULL;
+ drm_vma_offset_manager_destroy(dev->vma_offset_manager);
+ kfree(dev->vma_offset_manager);
+ dev->vma_offset_manager = NULL;
}
/**
@@ -129,11 +128,12 @@ int drm_gem_object_init(struct drm_device *dev,
{
struct file *filp;
+ drm_gem_private_object_init(dev, obj, size);
+
filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
if (IS_ERR(filp))
return PTR_ERR(filp);
- drm_gem_private_object_init(dev, obj, size);
obj->filp = filp;
return 0;
@@ -175,11 +175,6 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
mutex_unlock(&filp->prime.lock);
}
-static void drm_gem_object_ref_bug(struct kref *list_kref)
-{
- BUG();
-}
-
/**
* Called after the last handle to the object has been closed
*
@@ -195,13 +190,6 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj)
if (obj->name) {
idr_remove(&dev->object_name_idr, obj->name);
obj->name = 0;
- /*
- * The object name held a reference to this object, drop
- * that now.
- *
- * This cannot be the last reference, since the handle holds one too.
- */
- kref_put(&obj->refcount, drm_gem_object_ref_bug);
}
}
@@ -374,9 +362,8 @@ void
drm_gem_free_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_gem_mm *mm = dev->mm_private;
- drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
+ drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
}
EXPORT_SYMBOL(drm_gem_free_mmap_offset);
@@ -398,9 +385,8 @@ int
drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
{
struct drm_device *dev = obj->dev;
- struct drm_gem_mm *mm = dev->mm_private;
- return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
+ return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
size / PAGE_SIZE);
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
@@ -602,9 +588,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
goto err;
obj->name = ret;
-
- /* Allocate a reference for the name table. */
- drm_gem_object_reference(obj);
}
args->name = (uint64_t) obj->name;
@@ -833,7 +816,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
- struct drm_gem_mm *mm = dev->mm_private;
struct drm_gem_object *obj;
struct drm_vma_offset_node *node;
int ret = 0;
@@ -843,7 +825,8 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
mutex_lock(&dev->struct_mutex);
- node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
+ node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
+ vma->vm_pgoff,
vma_pages(vma));
if (!node) {
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 7d5a152eeb02..7473035dd28b 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -186,14 +186,14 @@ int drm_clients_info(struct seq_file *m, void *data)
struct drm_file *priv;
mutex_lock(&dev->struct_mutex);
- seq_printf(m, "a dev pid uid magic ioctls\n\n");
+ seq_printf(m, "a dev pid uid magic\n\n");
list_for_each_entry(priv, &dev->filelist, lhead) {
- seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
+ seq_printf(m, "%c %3d %5d %5d %10u\n",
priv->authenticated ? 'y' : 'n',
priv->minor->index,
pid_vnr(priv->pid),
from_kuid_munged(seq_user_ns(m), priv->uid),
- priv->magic, priv->ioctl_count);
+ priv->magic);
}
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -234,14 +234,18 @@ int drm_vma_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_vma_entry *pt;
struct vm_area_struct *vma;
+ unsigned long vma_count = 0;
#if defined(__i386__)
unsigned int pgprot;
#endif
mutex_lock(&dev->struct_mutex);
- seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
- atomic_read(&dev->vma_count),
- high_memory, (void *)(unsigned long)virt_to_phys(high_memory));
+ list_for_each_entry(pt, &dev->vmalist, head)
+ vma_count++;
+
+ seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
+ vma_count, high_memory,
+ (void *)(unsigned long)virt_to_phys(high_memory));
list_for_each_entry(pt, &dev->vmalist, head) {
vma = pt->vma;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 64c34d5876ff..c2676b5908d9 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -368,7 +368,7 @@ int drm_irq_uninstall(struct drm_device *dev)
if (dev->num_crtcs) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) {
- DRM_WAKEUP(&dev->vblank[i].queue);
+ wake_up(&dev->vblank[i].queue);
dev->vblank[i].enabled = false;
dev->vblank[i].last =
dev->driver->get_vblank_counter(dev, i);
@@ -436,45 +436,41 @@ int drm_control(struct drm_device *dev, void *data,
}
/**
- * drm_calc_timestamping_constants - Calculate and
- * store various constants which are later needed by
- * vblank and swap-completion timestamping, e.g, by
- * drm_calc_vbltimestamp_from_scanoutpos().
- * They are derived from crtc's true scanout timing,
- * so they take things like panel scaling or other
- * adjustments into account.
+ * drm_calc_timestamping_constants - Calculate vblank timestamp constants
*
* @crtc drm_crtc whose timestamp constants should be updated.
+ * @mode display mode containing the scanout timings
*
+ * Calculate and store various constants which are later
+ * needed by vblank and swap-completion timestamping, e.g,
+ * by drm_calc_vbltimestamp_from_scanoutpos(). They are
+ * derived from crtc's true scanout timing, so they take
+ * things like panel scaling or other adjustments into account.
*/
-void drm_calc_timestamping_constants(struct drm_crtc *crtc)
+void drm_calc_timestamping_constants(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
{
- s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
- u64 dotclock;
-
- /* Dot clock in Hz: */
- dotclock = (u64) crtc->hwmode.clock * 1000;
-
- /* Fields of interlaced scanout modes are only half a frame duration.
- * Double the dotclock to get half the frame-/line-/pixelduration.
- */
- if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
- dotclock *= 2;
+ int linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
+ int dotclock = mode->crtc_clock;
/* Valid dotclock? */
if (dotclock > 0) {
- int frame_size;
- /* Convert scanline length in pixels and video dot clock to
- * line duration, frame duration and pixel duration in
- * nanoseconds:
+ int frame_size = mode->crtc_htotal * mode->crtc_vtotal;
+
+ /*
+ * Convert scanline length in pixels and video
+ * dot clock to line duration, frame duration
+ * and pixel duration in nanoseconds:
*/
- pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
- linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
- 1000000000), dotclock);
- frame_size = crtc->hwmode.crtc_htotal *
- crtc->hwmode.crtc_vtotal;
- framedur_ns = (s64) div64_u64((u64) frame_size * 1000000000,
- dotclock);
+ pixeldur_ns = 1000000 / dotclock;
+ linedur_ns = div_u64((u64) mode->crtc_htotal * 1000000, dotclock);
+ framedur_ns = div_u64((u64) frame_size * 1000000, dotclock);
+
+ /*
+ * Fields of interlaced scanout modes are only half a frame duration.
+ */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ framedur_ns /= 2;
} else
DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
crtc->base.id);
@@ -484,11 +480,11 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
crtc->framedur_ns = framedur_ns;
DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
- crtc->base.id, crtc->hwmode.crtc_htotal,
- crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
+ crtc->base.id, mode->crtc_htotal,
+ mode->crtc_vtotal, mode->crtc_vdisplay);
DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
- crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
- (int) linedur_ns, (int) pixeldur_ns);
+ crtc->base.id, dotclock, framedur_ns,
+ linedur_ns, pixeldur_ns);
}
EXPORT_SYMBOL(drm_calc_timestamping_constants);
@@ -521,6 +517,7 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* 0 = Default.
* DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
* @refcrtc: drm_crtc* of crtc which defines scanout timing.
+ * @mode: mode which defines the scanout timings
*
* Returns negative value on error, failure or if not supported in current
* video mode:
@@ -540,14 +537,14 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
int *max_error,
struct timeval *vblank_time,
unsigned flags,
- struct drm_crtc *refcrtc)
+ const struct drm_crtc *refcrtc,
+ const struct drm_display_mode *mode)
{
ktime_t stime, etime, mono_time_offset;
struct timeval tv_etime;
- struct drm_display_mode *mode;
- int vbl_status, vtotal, vdisplay;
+ int vbl_status;
int vpos, hpos, i;
- s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
+ int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
bool invbl;
if (crtc < 0 || crtc >= dev->num_crtcs) {
@@ -561,10 +558,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
return -EIO;
}
- mode = &refcrtc->hwmode;
- vtotal = mode->crtc_vtotal;
- vdisplay = mode->crtc_vdisplay;
-
/* Durations of frames, lines, pixels in nanoseconds. */
framedur_ns = refcrtc->framedur_ns;
linedur_ns = refcrtc->linedur_ns;
@@ -573,7 +566,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
/* If mode timing undefined, just return as no-op:
* Happens during initial modesetting of a crtc.
*/
- if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
+ if (framedur_ns == 0) {
DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
return -EAGAIN;
}
@@ -590,7 +583,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
* Get vertical and horizontal scanout position vpos, hpos,
* and bounding timestamps stime, etime, pre/post query.
*/
- vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos,
+ vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos,
&hpos, &stime, &etime);
/*
@@ -611,18 +604,18 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
/* Accept result with < max_error nsecs timing uncertainty. */
- if (duration_ns <= (s64) *max_error)
+ if (duration_ns <= *max_error)
break;
}
/* Noisy system timing? */
if (i == DRM_TIMESTAMP_MAXRETRIES) {
DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
- crtc, (int) duration_ns/1000, *max_error/1000, i);
+ crtc, duration_ns/1000, *max_error/1000, i);
}
/* Return upper bound of timestamp precision error. */
- *max_error = (int) duration_ns;
+ *max_error = duration_ns;
/* Check if in vblank area:
* vpos is >=0 in video scanout area, but negative
@@ -635,25 +628,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
* since start of scanout at first display scanline. delta_ns
* can be negative if start of scanout hasn't happened yet.
*/
- delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;
-
- /* Is vpos outside nominal vblank area, but less than
- * 1/100 of a frame height away from start of vblank?
- * If so, assume this isn't a massively delayed vblank
- * interrupt, but a vblank interrupt that fired a few
- * microseconds before true start of vblank. Compensate
- * by adding a full frame duration to the final timestamp.
- * Happens, e.g., on ATI R500, R600.
- *
- * We only do this if DRM_CALLED_FROM_VBLIRQ.
- */
- if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
- ((vdisplay - vpos) < vtotal / 100)) {
- delta_ns = delta_ns - framedur_ns;
-
- /* Signal this correction as "applied". */
- vbl_status |= 0x8;
- }
+ delta_ns = vpos * linedur_ns + hpos * pixeldur_ns;
if (!drm_timestamp_monotonic)
etime = ktime_sub(etime, mono_time_offset);
@@ -673,7 +648,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
crtc, (int)vbl_status, hpos, vpos,
(long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
(long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
- (int)duration_ns/1000, i);
+ duration_ns/1000, i);
vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
if (invbl)
@@ -960,7 +935,7 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
(drm_vblank_offdelay > 0))
mod_timer(&dev->vblank_disable_timer,
- jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
+ jiffies + ((drm_vblank_offdelay * HZ)/1000));
}
EXPORT_SYMBOL(drm_vblank_put);
@@ -980,7 +955,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
vblank_disable_and_save(dev, crtc);
- DRM_WAKEUP(&dev->vblank[crtc].queue);
+ wake_up(&dev->vblank[crtc].queue);
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
@@ -1244,7 +1219,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
vblwait->request.sequence, crtc);
dev->vblank[crtc].last_wait = vblwait->request.sequence;
- DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * HZ,
(((drm_vblank_count(dev, crtc) -
vblwait->request.sequence) <= (1 << 23)) ||
!dev->irq_enabled));
@@ -1363,7 +1338,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
crtc, (int) diff_ns);
}
- DRM_WAKEUP(&dev->vblank[crtc].queue);
+ wake_up(&dev->vblank[crtc].queue);
drm_handle_vblank_events(dev, crtc);
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 64e44fad8ae8..00c67c0f2381 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -82,19 +82,19 @@ static void *agp_remap(unsigned long offset, unsigned long size,
}
/** Wrapper around agp_free_memory() */
-void drm_free_agp(DRM_AGP_MEM * handle, int pages)
+void drm_free_agp(struct agp_memory * handle, int pages)
{
agp_free_memory(handle);
}
/** Wrapper around agp_bind_memory() */
-int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
+int drm_bind_agp(struct agp_memory * handle, unsigned int start)
{
return agp_bind_memory(handle, start);
}
/** Wrapper around agp_unbind_memory() */
-int drm_unbind_agp(DRM_AGP_MEM * handle)
+int drm_unbind_agp(struct agp_memory * handle)
{
return agp_unbind_memory(handle);
}
@@ -110,8 +110,7 @@ static inline void *agp_remap(unsigned long offset, unsigned long size,
void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
{
- if (drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+ if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = ioremap(map->offset, map->size);
@@ -120,8 +119,7 @@ EXPORT_SYMBOL(drm_core_ioremap);
void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
{
- if (drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+ if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = ioremap_wc(map->offset, map->size);
@@ -133,8 +131,7 @@ void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
if (!map->handle || !map->size)
return;
- if (drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+ if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
vunmap(map->handle);
else
iounmap(map->handle);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
new file mode 100644
index 000000000000..b155ee2ffa17
--- /dev/null
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -0,0 +1,315 @@
+/*
+ * MIPI DSI Bus
+ *
+ * Copyright (C) 2012-2013, Samsung Electronics, Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drm_mipi_dsi.h>
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <video/mipi_display.h>
+
+static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
+{
+ return of_driver_match_device(dev, drv);
+}
+
+static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
+ .runtime_suspend = pm_generic_runtime_suspend,
+ .runtime_resume = pm_generic_runtime_resume,
+ .suspend = pm_generic_suspend,
+ .resume = pm_generic_resume,
+ .freeze = pm_generic_freeze,
+ .thaw = pm_generic_thaw,
+ .poweroff = pm_generic_poweroff,
+ .restore = pm_generic_restore,
+};
+
+static struct bus_type mipi_dsi_bus_type = {
+ .name = "mipi-dsi",
+ .match = mipi_dsi_device_match,
+ .pm = &mipi_dsi_device_pm_ops,
+};
+
+static void mipi_dsi_dev_release(struct device *dev)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+ of_node_put(dev->of_node);
+ kfree(dsi);
+}
+
+static const struct device_type mipi_dsi_device_type = {
+ .release = mipi_dsi_dev_release,
+};
+
+static struct mipi_dsi_device *mipi_dsi_device_alloc(struct mipi_dsi_host *host)
+{
+ struct mipi_dsi_device *dsi;
+
+ dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return ERR_PTR(-ENOMEM);
+
+ dsi->host = host;
+ dsi->dev.bus = &mipi_dsi_bus_type;
+ dsi->dev.parent = host->dev;
+ dsi->dev.type = &mipi_dsi_device_type;
+
+ device_initialize(&dsi->dev);
+
+ return dsi;
+}
+
+static int mipi_dsi_device_add(struct mipi_dsi_device *dsi)
+{
+ struct mipi_dsi_host *host = dsi->host;
+
+ dev_set_name(&dsi->dev, "%s.%d", dev_name(host->dev), dsi->channel);
+
+ return device_add(&dsi->dev);
+}
+
+static struct mipi_dsi_device *
+of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
+{
+ struct mipi_dsi_device *dsi;
+ struct device *dev = host->dev;
+ int ret;
+ u32 reg;
+
+ ret = of_property_read_u32(node, "reg", &reg);
+ if (ret) {
+ dev_err(dev, "device node %s has no valid reg property: %d\n",
+ node->full_name, ret);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (reg > 3) {
+ dev_err(dev, "device node %s has invalid reg property: %u\n",
+ node->full_name, reg);
+ return ERR_PTR(-EINVAL);
+ }
+
+ dsi = mipi_dsi_device_alloc(host);
+ if (IS_ERR(dsi)) {
+ dev_err(dev, "failed to allocate DSI device %s: %ld\n",
+ node->full_name, PTR_ERR(dsi));
+ return dsi;
+ }
+
+ dsi->dev.of_node = of_node_get(node);
+ dsi->channel = reg;
+
+ ret = mipi_dsi_device_add(dsi);
+ if (ret) {
+ dev_err(dev, "failed to add DSI device %s: %d\n",
+ node->full_name, ret);
+ kfree(dsi);
+ return ERR_PTR(ret);
+ }
+
+ return dsi;
+}
+
+int mipi_dsi_host_register(struct mipi_dsi_host *host)
+{
+ struct device_node *node;
+
+ for_each_available_child_of_node(host->dev->of_node, node)
+ of_mipi_dsi_device_add(host, node);
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_host_register);
+
+static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+ device_unregister(&dsi->dev);
+
+ return 0;
+}
+
+void mipi_dsi_host_unregister(struct mipi_dsi_host *host)
+{
+ device_for_each_child(host->dev, NULL, mipi_dsi_remove_device_fn);
+}
+EXPORT_SYMBOL(mipi_dsi_host_unregister);
+
+/**
+ * mipi_dsi_attach - attach a DSI device to its DSI host
+ * @dsi: DSI peripheral
+ */
+int mipi_dsi_attach(struct mipi_dsi_device *dsi)
+{
+ const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+
+ if (!ops || !ops->attach)
+ return -ENOSYS;
+
+ return ops->attach(dsi->host, dsi);
+}
+EXPORT_SYMBOL(mipi_dsi_attach);
+
+/**
+ * mipi_dsi_detach - detach a DSI device from its DSI host
+ * @dsi: DSI peripheral
+ */
+int mipi_dsi_detach(struct mipi_dsi_device *dsi)
+{
+ const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+
+ if (!ops || !ops->detach)
+ return -ENOSYS;
+
+ return ops->detach(dsi->host, dsi);
+}
+EXPORT_SYMBOL(mipi_dsi_detach);
+
+/**
+ * mipi_dsi_dcs_write - send DCS write command
+ * @dsi: DSI device
+ * @channel: virtual channel
+ * @data: pointer to the command followed by parameters
+ * @len: length of @data
+ */
+int mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, unsigned int channel,
+ const void *data, size_t len)
+{
+ const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+ struct mipi_dsi_msg msg = {
+ .channel = channel,
+ .tx_buf = data,
+ .tx_len = len
+ };
+
+ if (!ops || !ops->transfer)
+ return -ENOSYS;
+
+ switch (len) {
+ case 0:
+ return -EINVAL;
+ case 1:
+ msg.type = MIPI_DSI_DCS_SHORT_WRITE;
+ break;
+ case 2:
+ msg.type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
+ break;
+ default:
+ msg.type = MIPI_DSI_DCS_LONG_WRITE;
+ break;
+ }
+
+ return ops->transfer(dsi->host, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_write);
+
+/**
+ * mipi_dsi_dcs_read - send DCS read request command
+ * @dsi: DSI device
+ * @channel: virtual channel
+ * @cmd: DCS read command
+ * @data: pointer to read buffer
+ * @len: length of @data
+ *
+ * Function returns number of read bytes or error code.
+ */
+ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel,
+ u8 cmd, void *data, size_t len)
+{
+ const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+ struct mipi_dsi_msg msg = {
+ .channel = channel,
+ .type = MIPI_DSI_DCS_READ,
+ .tx_buf = &cmd,
+ .tx_len = 1,
+ .rx_buf = data,
+ .rx_len = len
+ };
+
+ if (!ops || !ops->transfer)
+ return -ENOSYS;
+
+ return ops->transfer(dsi->host, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_read);
+
+static int mipi_dsi_drv_probe(struct device *dev)
+{
+ struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+ return drv->probe(dsi);
+}
+
+static int mipi_dsi_drv_remove(struct device *dev)
+{
+ struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+ return drv->remove(dsi);
+}
+
+/**
+ * mipi_dsi_driver_register - register a driver for DSI devices
+ * @drv: DSI driver structure
+ */
+int mipi_dsi_driver_register(struct mipi_dsi_driver *drv)
+{
+ drv->driver.bus = &mipi_dsi_bus_type;
+ if (drv->probe)
+ drv->driver.probe = mipi_dsi_drv_probe;
+ if (drv->remove)
+ drv->driver.remove = mipi_dsi_drv_remove;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(mipi_dsi_driver_register);
+
+/**
+ * mipi_dsi_driver_unregister - unregister a driver for DSI devices
+ * @drv: DSI driver structure
+ */
+void mipi_dsi_driver_unregister(struct mipi_dsi_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(mipi_dsi_driver_unregister);
+
+static int __init mipi_dsi_bus_init(void)
+{
+ return bus_register(&mipi_dsi_bus_type);
+}
+postcore_initcall(mipi_dsi_bus_init);
+
+MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
+MODULE_DESCRIPTION("MIPI DSI Bus");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 85071a1c4547..b0733153dfd2 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1041,7 +1041,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
/* if equal delete the probed mode */
mode->status = pmode->status;
/* Merge type bits together */
- mode->type = pmode->type;
+ mode->type |= pmode->type;
list_del(&pmode->head);
drm_mode_destroy(connector->dev, pmode);
break;
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
new file mode 100644
index 000000000000..2ef988e037b7
--- /dev/null
+++ b/drivers/gpu/drm/drm_panel.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_panel.h>
+
+static DEFINE_MUTEX(panel_lock);
+static LIST_HEAD(panel_list);
+
+void drm_panel_init(struct drm_panel *panel)
+{
+ INIT_LIST_HEAD(&panel->list);
+}
+EXPORT_SYMBOL(drm_panel_init);
+
+int drm_panel_add(struct drm_panel *panel)
+{
+ mutex_lock(&panel_lock);
+ list_add_tail(&panel->list, &panel_list);
+ mutex_unlock(&panel_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_panel_add);
+
+void drm_panel_remove(struct drm_panel *panel)
+{
+ mutex_lock(&panel_lock);
+ list_del_init(&panel->list);
+ mutex_unlock(&panel_lock);
+}
+EXPORT_SYMBOL(drm_panel_remove);
+
+int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
+{
+ if (panel->connector)
+ return -EBUSY;
+
+ panel->connector = connector;
+ panel->drm = connector->dev;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_panel_attach);
+
+int drm_panel_detach(struct drm_panel *panel)
+{
+ panel->connector = NULL;
+ panel->drm = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_panel_detach);
+
+#ifdef CONFIG_OF
+struct drm_panel *of_drm_find_panel(struct device_node *np)
+{
+ struct drm_panel *panel;
+
+ mutex_lock(&panel_lock);
+
+ list_for_each_entry(panel, &panel_list, list) {
+ if (panel->dev->of_node == np) {
+ mutex_unlock(&panel_lock);
+ return panel;
+ }
+ }
+
+ mutex_unlock(&panel_lock);
+ return NULL;
+}
+EXPORT_SYMBOL(of_drm_find_panel);
+#endif
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("DRM panel infrastructure");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 02679793c9e2..5736aaa7e86c 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -262,16 +262,11 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
return 0;
}
-static int drm_pci_agp_init(struct drm_device *dev)
+static void drm_pci_agp_init(struct drm_device *dev)
{
- if (drm_core_has_AGP(dev)) {
+ if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
if (drm_pci_device_is_agp(dev))
dev->agp = drm_agp_init(dev);
- if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
- && (dev->agp == NULL)) {
- DRM_ERROR("Cannot initialize the agpgart module.\n");
- return -EINVAL;
- }
if (dev->agp) {
dev->agp->agp_mtrr = arch_phys_wc_add(
dev->agp->agp_info.aper_base,
@@ -279,15 +274,14 @@ static int drm_pci_agp_init(struct drm_device *dev)
1024 * 1024);
}
}
- return 0;
}
-static void drm_pci_agp_destroy(struct drm_device *dev)
+void drm_pci_agp_destroy(struct drm_device *dev)
{
- if (drm_core_has_AGP(dev) && dev->agp) {
+ if (dev->agp) {
arch_phys_wc_del(dev->agp->agp_mtrr);
drm_agp_clear(dev);
- drm_agp_destroy(dev->agp);
+ kfree(dev->agp);
dev->agp = NULL;
}
}
@@ -299,8 +293,6 @@ static struct drm_bus drm_pci_bus = {
.set_busid = drm_pci_set_busid,
.set_unique = drm_pci_set_unique,
.irq_by_busid = drm_pci_irq_by_busid,
- .agp_init = drm_pci_agp_init,
- .agp_destroy = drm_pci_agp_destroy,
};
/**
@@ -338,17 +330,25 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
if (drm_core_check_feature(dev, DRIVER_MODESET))
pci_set_drvdata(pdev, dev);
+ drm_pci_agp_init(dev);
+
ret = drm_dev_register(dev, ent->driver_data);
if (ret)
- goto err_pci;
+ goto err_agp;
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, pci_name(pdev), dev->primary->index);
+ /* No locking needed since shadow-attach is single-threaded since it may
+ * only be called from the per-driver module init hook. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
+
return 0;
-err_pci:
+err_agp:
+ drm_pci_agp_destroy(dev);
pci_disable_device(pdev);
err_free:
drm_dev_free(dev);
@@ -375,7 +375,6 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
DRM_DEBUG("\n");
- INIT_LIST_HEAD(&driver->device_list);
driver->kdriver.pci = pdriver;
driver->bus = &drm_pci_bus;
@@ -383,6 +382,7 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
return pci_register_driver(pdriver);
/* If not using KMS, fall back to stealth mode manual scanning. */
+ INIT_LIST_HEAD(&driver->legacy_dev_list);
for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
pid = &pdriver->id_table[i];
@@ -452,6 +452,7 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
return -1;
}
+void drm_pci_agp_destroy(struct drm_device *dev) {}
#endif
EXPORT_SYMBOL(drm_pci_init);
@@ -465,8 +466,11 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
if (driver->driver_features & DRIVER_MODESET) {
pci_unregister_driver(pdriver);
} else {
- list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+ list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
+ legacy_dev_list) {
drm_put_dev(dev);
+ list_del(&dev->legacy_dev_list);
+ }
}
DRM_INFO("Module unloaded\n");
}
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index fc24fee8ec83..21fc82006b78 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -147,18 +147,6 @@ int drm_platform_init(struct drm_driver *driver, struct platform_device *platfor
driver->kdriver.platform_device = platform_device;
driver->bus = &drm_platform_bus;
- INIT_LIST_HEAD(&driver->device_list);
return drm_get_platform_dev(platform_device, driver);
}
EXPORT_SYMBOL(drm_platform_init);
-
-void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device)
-{
- struct drm_device *dev, *tmp;
- DRM_DEBUG("\n");
-
- list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
- drm_put_dev(dev);
- DRM_INFO("Module unloaded\n");
-}
-EXPORT_SYMBOL(drm_platform_exit);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index f53d5246979c..98a33c580ca1 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -99,13 +99,19 @@ void drm_ut_debug_printk(unsigned int request_level,
const char *function_name,
const char *format, ...)
{
+ struct va_format vaf;
va_list args;
if (drm_debug & request_level) {
- if (function_name)
- printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
va_start(args, format);
- vprintk(format, args);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ if (function_name)
+ printk(KERN_DEBUG "[%s:%s], %pV", prefix,
+ function_name, &vaf);
+ else
+ printk(KERN_DEBUG "%pV", &vaf);
va_end(args);
}
}
@@ -521,16 +527,10 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
mutex_lock(&drm_global_mutex);
- if (dev->driver->bus->agp_init) {
- ret = dev->driver->bus->agp_init(dev);
- if (ret)
- goto out_unlock;
- }
-
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
if (ret)
- goto err_agp;
+ goto out_unlock;
}
if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
@@ -557,8 +557,6 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto err_unload;
}
- list_add_tail(&dev->driver_item, &dev->driver->device_list);
-
ret = 0;
goto out_unlock;
@@ -566,14 +564,11 @@ err_unload:
if (dev->driver->unload)
dev->driver->unload(dev);
err_primary_node:
- drm_put_minor(dev->primary);
+ drm_unplug_minor(dev->primary);
err_render_node:
- drm_put_minor(dev->render);
+ drm_unplug_minor(dev->render);
err_control_node:
- drm_put_minor(dev->control);
-err_agp:
- if (dev->driver->bus->agp_destroy)
- dev->driver->bus->agp_destroy(dev);
+ drm_unplug_minor(dev->control);
out_unlock:
mutex_unlock(&drm_global_mutex);
return ret;
@@ -597,8 +592,8 @@ void drm_dev_unregister(struct drm_device *dev)
if (dev->driver->unload)
dev->driver->unload(dev);
- if (dev->driver->bus->agp_destroy)
- dev->driver->bus->agp_destroy(dev);
+ if (dev->agp)
+ drm_pci_agp_destroy(dev);
drm_vblank_cleanup(dev);
@@ -608,7 +603,5 @@ void drm_dev_unregister(struct drm_device *dev)
drm_unplug_minor(dev->control);
drm_unplug_minor(dev->render);
drm_unplug_minor(dev->primary);
-
- list_del(&dev->driver_item);
}
EXPORT_SYMBOL(drm_dev_unregister);
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index b179b70e7853..0f8cb1ae7607 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -1,4 +1,5 @@
#include <drm/drmP.h>
+#include <drm/drm_usb.h>
#include <linux/usb.h>
#include <linux/module.h>
@@ -63,7 +64,6 @@ int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
int res;
DRM_DEBUG("\n");
- INIT_LIST_HEAD(&driver->device_list);
driver->kdriver.usb = udriver;
driver->bus = &drm_usb_bus;
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 93e95d7efd57..24e045c4f531 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -101,7 +101,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/*
* Find the right map
*/
- if (!drm_core_has_AGP(dev))
+ if (!dev->agp)
goto vm_fault_error;
if (!dev->agp || !dev->agp->cant_use_aperture)
@@ -220,7 +220,6 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_dec(&dev->vma_count);
map = vma->vm_private_data;
@@ -266,9 +265,6 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
- case _DRM_GEM:
- DRM_ERROR("tried to rmmap GEM object\n");
- break;
}
kfree(map);
}
@@ -408,7 +404,6 @@ void drm_vm_open_locked(struct drm_device *dev,
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_inc(&dev->vma_count);
vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
if (vma_entry) {
@@ -436,7 +431,6 @@ void drm_vm_close_locked(struct drm_device *dev,
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_dec(&dev->vma_count);
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma == vma) {
@@ -595,7 +589,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
switch (map->type) {
#if !defined(__arm__)
case _DRM_AGP:
- if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
+ if (dev->agp && dev->agp->cant_use_aperture) {
/*
* On some platforms we can't talk to bus dma address from the CPU, so for
* memory of type DRM_AGP, we'll deal with sorting out the real physical
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index f227f544aa36..6e1a1a20cf6b 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -51,7 +51,7 @@ config DRM_EXYNOS_G2D
config DRM_EXYNOS_IPP
bool "Exynos DRM IPP"
- depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM
+ depends on DRM_EXYNOS
help
Choose this option if you want to use IPP feature for DRM.
@@ -69,6 +69,6 @@ config DRM_EXYNOS_ROTATOR
config DRM_EXYNOS_GSC
bool "Exynos DRM GSC"
- depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+ depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM
help
Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 22b8f5eced80..215131ab1dd2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -14,6 +14,8 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <linux/anon_inodes.h>
+
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
@@ -119,6 +121,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
drm_vblank_offdelay = VBLANK_OFF_DELAY;
+ platform_set_drvdata(dev->platformdev, dev);
+
return 0;
err_drm_device:
@@ -150,9 +154,14 @@ static int exynos_drm_unload(struct drm_device *dev)
return 0;
}
+static const struct file_operations exynos_drm_gem_fops = {
+ .mmap = exynos_drm_gem_mmap_buffer,
+};
+
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv;
+ struct file *anon_filp;
int ret;
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
@@ -162,11 +171,23 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
file->driver_priv = file_priv;
ret = exynos_drm_subdrv_open(dev, file);
- if (ret) {
- kfree(file_priv);
- file->driver_priv = NULL;
+ if (ret)
+ goto out;
+
+ anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
+ NULL, 0);
+ if (IS_ERR(anon_filp)) {
+ ret = PTR_ERR(anon_filp);
+ goto out;
}
+ anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
+ file_priv->anon_filp = anon_filp;
+
+ return ret;
+out:
+ kfree(file_priv);
+ file->driver_priv = NULL;
return ret;
}
@@ -179,6 +200,7 @@ static void exynos_drm_preclose(struct drm_device *dev,
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
{
struct exynos_drm_private *private = dev->dev_private;
+ struct drm_exynos_file_private *file_priv;
struct drm_pending_vblank_event *v, *vt;
struct drm_pending_event *e, *et;
unsigned long flags;
@@ -204,6 +226,9 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
}
spin_unlock_irqrestore(&dev->event_lock, flags);
+ file_priv = file->driver_priv;
+ if (file_priv->anon_filp)
+ fput(file_priv->anon_filp);
kfree(file->driver_priv);
file->driver_priv = NULL;
@@ -305,7 +330,7 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
static int exynos_drm_platform_remove(struct platform_device *pdev)
{
- drm_platform_exit(&exynos_drm_driver, pdev);
+ drm_put_dev(platform_get_drvdata(pdev));
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index eaa19668bf00..0eaf5a27e120 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -226,6 +226,7 @@ struct exynos_drm_ipp_private {
struct drm_exynos_file_private {
struct exynos_drm_g2d_private *g2d_priv;
struct exynos_drm_ipp_private *ipp_priv;
+ struct file *anon_filp;
};
/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index a61878bf5dcd..a20440ce32e6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -347,7 +347,7 @@ static void fimd_wait_for_vblank(struct device *dev)
*/
if (!wait_event_timeout(ctx->wait_vsync_queue,
!atomic_read(&ctx->wait_vsync_event),
- DRM_HZ/20))
+ HZ/20))
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
@@ -706,7 +706,7 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
/* set wait vsync event to zero and wake up queue. */
if (atomic_read(&ctx->wait_vsync_event)) {
atomic_set(&ctx->wait_vsync_event, 0);
- DRM_WAKEUP(&ctx->wait_vsync_queue);
+ wake_up(&ctx->wait_vsync_queue);
}
out:
return IRQ_HANDLED;
@@ -954,7 +954,7 @@ static int fimd_probe(struct platform_device *pdev)
}
ctx->driver_data = drm_fimd_get_driver_data(pdev);
- DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+ init_waitqueue_head(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
subdrv = &ctx->subdrv;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 380aec28840b..6c1885eedfdf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -607,7 +607,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
reg_type = REG_TYPE_NONE;
DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
break;
- };
+ }
return reg_type;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index be59d50d8b16..42d2904d88c7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -338,46 +338,22 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
&args->offset);
}
-static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
- struct file *filp)
-{
- struct drm_file *file_priv;
-
- /* find current process's drm_file from filelist. */
- list_for_each_entry(file_priv, &drm_dev->filelist, lhead)
- if (file_priv->filp == filp)
- return file_priv;
-
- WARN_ON(1);
-
- return ERR_PTR(-EFAULT);
-}
-
-static int exynos_drm_gem_mmap_buffer(struct file *filp,
+int exynos_drm_gem_mmap_buffer(struct file *filp,
struct vm_area_struct *vma)
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct drm_device *drm_dev = obj->dev;
struct exynos_drm_gem_buf *buffer;
- struct drm_file *file_priv;
unsigned long vm_size;
int ret;
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = obj;
vma->vm_ops = drm_dev->driver->gem_vm_ops;
- /* restore it to driver's fops. */
- filp->f_op = fops_get(drm_dev->driver->fops);
-
- file_priv = exynos_drm_find_drm_file(drm_dev, filp);
- if (IS_ERR(file_priv))
- return PTR_ERR(file_priv);
-
- /* restore it to drm_file. */
- filp->private_data = file_priv;
-
update_vm_cache_attr(exynos_gem_obj, vma);
vm_size = vma->vm_end - vma->vm_start;
@@ -411,15 +387,13 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
return 0;
}
-static const struct file_operations exynos_drm_gem_fops = {
- .mmap = exynos_drm_gem_mmap_buffer,
-};
-
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_exynos_file_private *exynos_file_priv;
struct drm_exynos_gem_mmap *args = data;
struct drm_gem_object *obj;
+ struct file *anon_filp;
unsigned long addr;
if (!(dev->driver->driver_features & DRIVER_GEM)) {
@@ -427,47 +401,25 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
}
+ mutex_lock(&dev->struct_mutex);
+
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
+ mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
- /*
- * We have to use gem object and its fops for specific mmaper,
- * but vm_mmap() can deliver only filp. So we have to change
- * filp->f_op and filp->private_data temporarily, then restore
- * again. So it is important to keep lock until restoration the
- * settings to prevent others from misuse of filp->f_op or
- * filp->private_data.
- */
- mutex_lock(&dev->struct_mutex);
-
- /*
- * Set specific mmper's fops. And it will be restored by
- * exynos_drm_gem_mmap_buffer to dev->driver->fops.
- * This is used to call specific mapper temporarily.
- */
- file_priv->filp->f_op = &exynos_drm_gem_fops;
-
- /*
- * Set gem object to private_data so that specific mmaper
- * can get the gem object. And it will be restored by
- * exynos_drm_gem_mmap_buffer to drm_file.
- */
- file_priv->filp->private_data = obj;
+ exynos_file_priv = file_priv->driver_priv;
+ anon_filp = exynos_file_priv->anon_filp;
+ anon_filp->private_data = obj;
- addr = vm_mmap(file_priv->filp, 0, args->size,
- PROT_READ | PROT_WRITE, MAP_SHARED, 0);
+ addr = vm_mmap(anon_filp, 0, args->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, 0);
drm_gem_object_unreference(obj);
if (IS_ERR_VALUE(addr)) {
- /* check filp->f_op, filp->private_data are restored */
- if (file_priv->filp->f_op == &exynos_drm_gem_fops) {
- file_priv->filp->f_op = fops_get(dev->driver->fops);
- file_priv->filp->private_data = file_priv;
- }
mutex_unlock(&dev->struct_mutex);
return (int)addr;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index b8c818ba2ff4..1592c0ba7de8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -122,6 +122,9 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int exynos_drm_gem_mmap_buffer(struct file *filp,
+ struct vm_area_struct *vma);
+
/* map user space allocated by malloc to pages. */
int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index d519a4e5fe40..09312b877470 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -16,7 +16,6 @@
#include <linux/types.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
-#include <plat/map-base.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
@@ -826,7 +825,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
/*
- * quf == NULL condition means all event deletion.
+ * qbuf == NULL condition means all event deletion.
* stop operations want to delete all event list.
* another case delete only same buf id.
*/
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index a0e10aeb0e67..c021ddc1ffb4 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -34,6 +34,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/hdmi.h>
#include <drm/exynos_drm.h>
@@ -59,19 +60,6 @@
#define HDMI_AUI_VERSION 0x01
#define HDMI_AUI_LENGTH 0x0A
-/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
-enum HDMI_PACKET_TYPE {
- /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
- /* InfoFrame packet type */
- HDMI_PACKET_TYPE_INFOFRAME = 0x80,
- /* Vendor-Specific InfoFrame */
- HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
- /* Auxiliary Video information InfoFrame */
- HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
- /* Audio information InfoFrame */
- HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
-};
-
enum hdmi_type {
HDMI_TYPE13,
HDMI_TYPE14,
@@ -379,12 +367,6 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
},
};
-struct hdmi_infoframe {
- enum HDMI_PACKET_TYPE type;
- u8 ver;
- u8 len;
-};
-
static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
{
return readl(hdata->regs + reg_id);
@@ -682,7 +664,7 @@ static u8 hdmi_chksum(struct hdmi_context *hdata,
}
static void hdmi_reg_infoframe(struct hdmi_context *hdata,
- struct hdmi_infoframe *infoframe)
+ union hdmi_infoframe *infoframe)
{
u32 hdr_sum;
u8 chksum;
@@ -700,13 +682,15 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
return;
}
- switch (infoframe->type) {
- case HDMI_PACKET_TYPE_AVI:
+ switch (infoframe->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
- hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1,
+ infoframe->any.version);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length);
+ hdr_sum = infoframe->any.type + infoframe->any.version +
+ infoframe->any.length;
/* Output format zero hardcoded ,RGB YBCR selection */
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
@@ -722,18 +706,20 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
- infoframe->len, hdr_sum);
+ infoframe->any.length, hdr_sum);
DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
break;
- case HDMI_PACKET_TYPE_AUI:
+ case HDMI_INFOFRAME_TYPE_AUDIO:
hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
- hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1,
+ infoframe->any.version);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length);
+ hdr_sum = infoframe->any.type + infoframe->any.version +
+ infoframe->any.length;
chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
- infoframe->len, hdr_sum);
+ infoframe->any.length, hdr_sum);
DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
break;
@@ -985,7 +971,7 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
static void hdmi_conf_init(struct hdmi_context *hdata)
{
- struct hdmi_infoframe infoframe;
+ union hdmi_infoframe infoframe;
/* disable HPD interrupts from HDMI IP block, use GPIO instead */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
@@ -1021,14 +1007,14 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
} else {
- infoframe.type = HDMI_PACKET_TYPE_AVI;
- infoframe.ver = HDMI_AVI_VERSION;
- infoframe.len = HDMI_AVI_LENGTH;
+ infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI;
+ infoframe.any.version = HDMI_AVI_VERSION;
+ infoframe.any.length = HDMI_AVI_LENGTH;
hdmi_reg_infoframe(hdata, &infoframe);
- infoframe.type = HDMI_PACKET_TYPE_AUI;
- infoframe.ver = HDMI_AUI_VERSION;
- infoframe.len = HDMI_AUI_LENGTH;
+ infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO;
+ infoframe.any.version = HDMI_AUI_VERSION;
+ infoframe.any.length = HDMI_AUI_LENGTH;
hdmi_reg_infoframe(hdata, &infoframe);
/* enable AVI packet every vsync, fixes purple line problem */
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 63bc5f92fbb3..2dfa48c76f54 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -868,7 +868,7 @@ static void mixer_wait_for_vblank(void *ctx)
*/
if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
!atomic_read(&mixer_ctx->wait_vsync_event),
- DRM_HZ/20))
+ HZ/20))
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
@@ -1019,7 +1019,7 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
/* set wait vsync event to zero and wake up queue. */
if (atomic_read(&ctx->wait_vsync_event)) {
atomic_set(&ctx->wait_vsync_event, 0);
- DRM_WAKEUP(&ctx->wait_vsync_queue);
+ wake_up(&ctx->wait_vsync_queue);
}
}
@@ -1209,7 +1209,7 @@ static int mixer_probe(struct platform_device *pdev)
drm_hdmi_ctx->ctx = (void *)ctx;
ctx->vp_enabled = drv->is_vp_enabled;
ctx->mxr_ver = drv->version;
- DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+ init_waitqueue_head(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
platform_set_drvdata(pdev, drm_hdmi_ctx);
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index d5ef1a5793c8..de6f62a6ceb7 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -326,7 +326,7 @@ int psbfb_sync(struct fb_info *info)
struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_device *dev = psbfb->base.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- unsigned long _end = jiffies + DRM_HZ;
+ unsigned long _end = jiffies + HZ;
int busy = 0;
unsigned long flags;
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 143eba3309c5..ea7dfc59d796 100644
--- a/drivers/gpu/drm/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -26,13 +26,13 @@
#include "intel_bios.h"
#include "power.h"
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
static void do_gma_backlight_set(struct drm_device *dev)
{
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
struct drm_psb_private *dev_priv = dev->dev_private;
backlight_update_status(dev_priv->backlight_device);
-#endif
}
+#endif
void gma_backlight_enable(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index f88a1815d87c..0490ce36b53f 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -483,7 +483,7 @@ cdv_intel_dp_aux_native_write(struct gma_encoder *encoder,
if (send_bytes > 16)
return -1;
- msg[0] = AUX_NATIVE_WRITE << 4;
+ msg[0] = DP_AUX_NATIVE_WRITE << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
msg[3] = send_bytes - 1;
@@ -493,9 +493,10 @@ cdv_intel_dp_aux_native_write(struct gma_encoder *encoder,
ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
- if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ ack >>= 4;
+ if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
break;
- else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
udelay(100);
else
return -EIO;
@@ -523,7 +524,7 @@ cdv_intel_dp_aux_native_read(struct gma_encoder *encoder,
uint8_t ack;
int ret;
- msg[0] = AUX_NATIVE_READ << 4;
+ msg[0] = DP_AUX_NATIVE_READ << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
msg[3] = recv_bytes - 1;
@@ -538,12 +539,12 @@ cdv_intel_dp_aux_native_read(struct gma_encoder *encoder,
return -EPROTO;
if (ret < 0)
return ret;
- ack = reply[0];
- if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+ ack = reply[0] >> 4;
+ if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
memcpy(recv, reply + 1, ret - 1);
return ret - 1;
}
- else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
udelay(100);
else
return -EIO;
@@ -569,12 +570,12 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
/* Set up the command byte */
if (mode & MODE_I2C_READ)
- msg[0] = AUX_I2C_READ << 4;
+ msg[0] = DP_AUX_I2C_READ << 4;
else
- msg[0] = AUX_I2C_WRITE << 4;
+ msg[0] = DP_AUX_I2C_WRITE << 4;
if (!(mode & MODE_I2C_STOP))
- msg[0] |= AUX_I2C_MOT << 4;
+ msg[0] |= DP_AUX_I2C_MOT << 4;
msg[1] = address >> 8;
msg[2] = address;
@@ -606,16 +607,16 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
return ret;
}
- switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
- case AUX_NATIVE_REPLY_ACK:
+ switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
+ case DP_AUX_NATIVE_REPLY_ACK:
/* I2C-over-AUX Reply field is only valid
* when paired with AUX ACK.
*/
break;
- case AUX_NATIVE_REPLY_NACK:
+ case DP_AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch native nack\n");
return -EREMOTEIO;
- case AUX_NATIVE_REPLY_DEFER:
+ case DP_AUX_NATIVE_REPLY_DEFER:
udelay(100);
continue;
default:
@@ -624,16 +625,16 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
return -EREMOTEIO;
}
- switch (reply[0] & AUX_I2C_REPLY_MASK) {
- case AUX_I2C_REPLY_ACK:
+ switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
+ case DP_AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ) {
*read_byte = reply[1];
}
return reply_bytes - 1;
- case AUX_I2C_REPLY_NACK:
+ case DP_AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_i2c nack\n");
return -EREMOTEIO;
- case AUX_I2C_REPLY_DEFER:
+ case DP_AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n");
udelay(100);
break;
@@ -677,7 +678,7 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
return ret;
}
-void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+static void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
{
adjusted_mode->hdisplay = fixed_mode->hdisplay;
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 24e8af3d22bf..386de2c9dc86 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -349,6 +349,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
/* If we didn't get a handle then turn the cursor off */
if (!handle) {
temp = CURSOR_MODE_DISABLE;
+ mutex_lock(&dev->struct_mutex);
if (gma_power_begin(dev, false)) {
REG_WRITE(control, temp);
@@ -365,6 +366,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
gma_crtc->cursor_obj = NULL;
}
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -374,9 +376,12 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL;
}
+ mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file_priv, handle);
- if (!obj)
- return -ENOENT;
+ if (!obj) {
+ ret = -ENOENT;
+ goto unlock;
+ }
if (obj->size < width * height * 4) {
dev_dbg(dev->dev, "Buffer is too small\n");
@@ -440,10 +445,13 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
}
gma_crtc->cursor_obj = obj;
+unlock:
+ mutex_unlock(&dev->struct_mutex);
return ret;
unref_cursor:
drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index ad0d6de938f3..13ec6283bf59 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -22,7 +22,6 @@
*
*/
#include <linux/acpi.h>
-#include <linux/acpi_io.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index b59e6588c343..5ad6a03e477e 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -212,8 +212,8 @@ enum {
#define PSB_HIGH_REG_OFFS 0x0600
#define PSB_NUM_VBLANKS 2
-#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
-#define PSB_LID_DELAY (DRM_HZ / 10)
+#define PSB_WATCHDOG_DELAY (HZ * 2)
+#define PSB_LID_DELAY (HZ / 10)
#define MDFLD_PNW_B0 0x04
#define MDFLD_PNW_C0 0x08
@@ -232,7 +232,7 @@ enum {
#define MDFLD_DSR_RR 45
#define MDFLD_DPU_ENABLE (1 << 31)
#define MDFLD_DSR_FULLSCREEN (1 << 30)
-#define MDFLD_DSR_DELAY (DRM_HZ / MDFLD_DSR_RR)
+#define MDFLD_DSR_DELAY (HZ / MDFLD_DSR_RR)
#define PSB_PWR_STATE_ON 1
#define PSB_PWR_STATE_OFF 2
@@ -769,7 +769,7 @@ extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
*psb_irq.c
*/
-extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t psb_irq_handler(int irq, void *arg);
extern int psb_irq_enable_dpst(struct drm_device *dev);
extern int psb_irq_disable_dpst(struct drm_device *dev);
extern void psb_irq_preinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index bde27fdb41bf..dc2c8eb030fa 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -250,11 +250,6 @@ extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
extern int intelfb_probe(struct drm_device *dev);
extern int intelfb_remove(struct drm_device *dev,
struct drm_framebuffer *fb);
-extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
- *dev, struct
- drm_mode_fb_cmd
- *mode_cmd,
- void *mm_private);
extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index ba4830342d34..f883f9e4c524 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -200,7 +200,7 @@ static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
mid_pipe_event_handler(dev, 1);
}
-irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t psb_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -253,7 +253,7 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
(void) PSB_RVDC32(PSB_INT_IDENTITY_R);
- DRM_READMEMORYBARRIER();
+ rmb();
if (!handled)
return IRQ_NONE;
@@ -450,21 +450,6 @@ int psb_irq_disable_dpst(struct drm_device *dev)
return 0;
}
-#ifdef PSB_FIXME
-static int psb_vblank_do_wait(struct drm_device *dev,
- unsigned int *sequence, atomic_t *counter)
-{
- unsigned int cur_vblank;
- int ret = 0;
- DRM_WAIT_ON(ret, dev->vblank.queue, 3 * DRM_HZ,
- (((cur_vblank = atomic_read(counter))
- - *sequence) <= (1 << 23)));
- *sequence = cur_vblank;
-
- return ret;
-}
-#endif
-
/*
* It is used to enable VBLANK interrupt
*/
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index debb7f190c06..d0b45ffa1126 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -32,7 +32,7 @@ void sysirq_uninit(struct drm_device *dev);
void psb_irq_preinstall(struct drm_device *dev);
int psb_irq_postinstall(struct drm_device *dev);
void psb_irq_uninstall(struct drm_device *dev);
-irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+irqreturn_t psb_irq_handler(int irq, void *arg);
int psb_irq_enable_dpst(struct drm_device *dev);
int psb_irq_disable_dpst(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 400b0c4a10fb..fa18cf374470 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -208,7 +208,7 @@ struct tda998x_priv {
# define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1)
# define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6)
#define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */
-# define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0)
+# define PLL_SERIAL_2_SRL_NOSC(x) ((x) << 0)
# define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4)
#define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */
# define PLL_SERIAL_3_SRL_CCIR (1 << 0)
@@ -528,10 +528,10 @@ tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p)
{
uint8_t buf[PB(5) + 1];
+ memset(buf, 0, sizeof(buf));
buf[HB(0)] = 0x84;
buf[HB(1)] = 0x01;
buf[HB(2)] = 10;
- buf[PB(0)] = 0;
buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
buf[PB(4)] = p->audio_frame[4];
@@ -824,6 +824,11 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
}
div = 148500 / mode->clock;
+ if (div != 0) {
+ div--;
+ if (div > 3)
+ div = 3;
+ }
/* mute the audio FIFO: */
reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
@@ -913,7 +918,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
if (priv->rev == TDA19988) {
/* let incoming pixels fill the active space (if any) */
- reg_write(encoder, REG_ENABLE_SPACE, 0x01);
+ reg_write(encoder, REG_ENABLE_SPACE, 0x00);
}
/* must be last register set: */
@@ -1094,6 +1099,8 @@ tda998x_encoder_destroy(struct drm_encoder *encoder)
{
struct tda998x_priv *priv = to_tda998x_priv(encoder);
drm_i2c_encoder_destroy(encoder);
+ if (priv->cec)
+ i2c_unregister_device(priv->cec);
kfree(priv);
}
@@ -1142,8 +1149,10 @@ tda998x_encoder_init(struct i2c_client *client,
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
- priv->current_page = 0;
+ priv->current_page = 0xff;
priv->cec = i2c_new_dummy(client->adapter, 0x34);
+ if (!priv->cec)
+ return -ENODEV;
priv->dpms = DRM_MODE_DPMS_OFF;
encoder_slave->slave_priv = priv;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 249fdff305c6..aeace37415aa 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -1193,6 +1193,10 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
int i810_driver_load(struct drm_device *dev, unsigned long flags)
{
+ /* Our userspace depends upon the agp mapping support. */
+ if (!dev->agp)
+ return -EINVAL;
+
pci_set_master(dev->pdev);
return 0;
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index d8180d22cedd..441ccf8f5bdc 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -57,7 +57,7 @@ static const struct file_operations i810_driver_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
+ DRIVER_USE_AGP |
DRIVER_HAVE_DMA,
.dev_priv_size = sizeof(drm_i810_buf_priv_t),
.load = i810_driver_load,
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 6199d0b5b958..73ed59eff139 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -1,8 +1,10 @@
config DRM_I915
tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
depends on DRM
- depends on AGP
- depends on AGP_INTEL
+ depends on X86 && PCI
+ depends on (AGP || AGP=n)
+ select INTEL_GTT
+ select AGP_INTEL if AGP
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
select SHMEM
@@ -35,15 +37,14 @@ config DRM_I915
config DRM_I915_KMS
bool "Enable modesetting on intel by default"
depends on DRM_I915
+ default y
help
- Choose this option if you want kernel modesetting enabled by default,
- and you have a new enough userspace to support this. Running old
- userspaces with this enabled will cause pain. Note that this causes
- the driver to bind to PCI devices, which precludes loading things
- like intelfb.
+ Choose this option if you want kernel modesetting enabled by default.
+
+ If in doubt, say "Y".
config DRM_I915_FBDEV
- bool "Enable legacy fbdev support for the modesettting intel driver"
+ bool "Enable legacy fbdev support for the modesetting intel driver"
depends on DRM_I915
select DRM_KMS_FB_HELPER
select FB_CFB_FILLRECT
@@ -55,9 +56,12 @@ config DRM_I915_FBDEV
support. Note that this support also provide the linux console
support on top of the intel modesetting driver.
+ If in doubt, say "Y".
+
config DRM_I915_PRELIMINARY_HW_SUPPORT
bool "Enable preliminary support for prerelease Intel hardware by default"
depends on DRM_I915
+ default n
help
Choose this option if you have prerelease Intel hardware and want the
i915 driver to support it by default. You can enable such support at
@@ -65,3 +69,15 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
option changes the default for that module option.
If in doubt, say "N".
+
+config DRM_I915_UMS
+ bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option if you still need userspace modesetting.
+
+ Userspace modesetting is deprecated for quite some time now, so
+ enable this only if you have ancient versions of the DDX drivers.
+
+ If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 41838eaa799c..9fd44f5f3b3b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -4,7 +4,6 @@
ccflags-y := -Iinclude/drm
i915-y := i915_drv.o i915_dma.o i915_irq.o \
- i915_debugfs.o \
i915_gpu_error.o \
i915_suspend.o \
i915_gem.o \
@@ -38,7 +37,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
intel_ringbuffer.o \
intel_overlay.o \
intel_sprite.o \
- intel_opregion.o \
intel_sideband.o \
intel_uncore.o \
dvo_ch7xxx.o \
@@ -51,10 +49,12 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915-$(CONFIG_COMPAT) += i915_ioc32.o
-i915-$(CONFIG_ACPI) += intel_acpi.o
+i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
+i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
+
obj-$(CONFIG_DRM_I915) += i915.o
CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
index c4a255be6979..954acb2c7021 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -87,49 +87,6 @@ struct ns2501_priv {
* when switching the resolution.
*/
-static void enable_dvo(struct intel_dvo_device *dvo)
-{
- struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
- struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_gmbus *bus = container_of(adapter,
- struct intel_gmbus,
- adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
-
- DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
-
- ns->dvoc = I915_READ(DVO_C);
- ns->pll_a = I915_READ(_DPLL_A);
- ns->srcdim = I915_READ(DVOC_SRCDIM);
- ns->fw_blc = I915_READ(FW_BLC);
-
- I915_WRITE(DVOC, 0x10004084);
- I915_WRITE(_DPLL_A, 0xd0820000);
- I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768
- I915_WRITE(FW_BLC, 0x1080304);
-
- I915_WRITE(DVOC, 0x90004084);
-}
-
-/*
- * Restore the I915 registers modified by the above
- * trigger function.
- */
-static void restore_dvo(struct intel_dvo_device *dvo)
-{
- struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_gmbus *bus = container_of(adapter,
- struct intel_gmbus,
- adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
- struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
-
- I915_WRITE(DVOC, ns->dvoc);
- I915_WRITE(_DPLL_A, ns->pll_a);
- I915_WRITE(DVOC_SRCDIM, ns->srcdim);
- I915_WRITE(FW_BLC, ns->fw_blc);
-}
-
/*
** Read a register from the ns2501.
** Returns true if successful, false otherwise.
@@ -300,7 +257,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *adjusted_mode)
{
bool ok;
- bool restore = false;
+ int retries = 10;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
DRM_DEBUG_KMS
@@ -476,20 +433,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
ns->reg_8_shadow |= NS2501_8_BPAS;
}
ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
-
- if (!ok) {
- if (restore)
- restore_dvo(dvo);
- enable_dvo(dvo);
- restore = true;
- }
- } while (!ok);
- /*
- * Restore the old i915 registers before
- * forcing the ns2501 on.
- */
- if (restore)
- restore_dvo(dvo);
+ } while (!ok && retries--);
}
/* set the NS2501 power state */
@@ -510,7 +454,7 @@ static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
{
bool ok;
- bool restore = false;
+ int retries = 10;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
unsigned char ch;
@@ -537,16 +481,7 @@ static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
ok &=
ns2501_writeb(dvo, 0x35,
enable ? 0xff : 0x00);
- if (!ok) {
- if (restore)
- restore_dvo(dvo);
- enable_dvo(dvo);
- restore = true;
- }
- } while (!ok);
-
- if (restore)
- restore_dvo(dvo);
+ } while (!ok && retries--);
}
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 6ed45a984230..b2b46c52294c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -40,8 +40,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#if defined(CONFIG_DEBUG_FS)
-
enum {
ACTIVE_LIST,
INACTIVE_LIST,
@@ -406,16 +404,26 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_putc(m, '\n');
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
+ struct task_struct *task;
memset(&stats, 0, sizeof(stats));
idr_for_each(&file->object_idr, per_file_stats, &stats);
+ /*
+ * Although we have a valid reference on file->pid, that does
+ * not guarantee that the task_struct who called get_pid() is
+ * still alive (e.g. get_pid(current) => fork() => exit()).
+ * Therefore, we need to protect this ->comm access using RCU.
+ */
+ rcu_read_lock();
+ task = pid_task(file->pid, PIDTYPE_PID);
seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
- get_pid_task(file->pid, PIDTYPE_PID)->comm,
+ task ? task->comm : "<unknown>",
stats.count,
stats.total,
stats.active,
stats.inactive,
stats.unbound);
+ rcu_read_unlock();
}
mutex_unlock(&dev->struct_mutex);
@@ -564,10 +572,12 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
for_each_ring(ring, dev_priv, i)
i915_ring_seqno_info(m, ring);
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -585,6 +595,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
if (INTEL_INFO(dev)->gen >= 8) {
int i;
@@ -711,6 +722,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
i915_ring_seqno_info(m, ring);
}
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -904,9 +916,11 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
crstanddelay = I915_READ16(CRSTANDVID);
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
@@ -919,7 +933,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret = 0;
+
+ intel_runtime_pm_get(dev_priv);
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -945,9 +961,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
/* RPSTAT1 is in the GT power well */
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
- return ret;
+ goto out;
- gen6_gt_force_wake_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
reqf &= ~GEN6_TURBO_DISABLE;
@@ -970,7 +986,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
cagf *= GT_FREQUENCY_MULTIPLIER;
- gen6_gt_force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
@@ -1018,23 +1034,24 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
- val = vlv_punit_read(dev_priv, PUNIT_FUSE_BUS1);
+ val = valleyview_rps_max_freq(dev_priv);
seq_printf(m, "max GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv->mem_freq, val));
+ vlv_gpu_freq(dev_priv, val));
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM);
+ val = valleyview_rps_min_freq(dev_priv);
seq_printf(m, "min GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv->mem_freq, val));
+ vlv_gpu_freq(dev_priv, val));
seq_printf(m, "current GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv->mem_freq,
- (freq_sts >> 8) & 0xff));
+ vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
mutex_unlock(&dev_priv->rps.hw_lock);
} else {
seq_puts(m, "no P-state info available\n");
}
- return 0;
+out:
+ intel_runtime_pm_put(dev_priv);
+ return ret;
}
static int i915_delayfreq_table(struct seq_file *m, void *unused)
@@ -1048,6 +1065,7 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
for (i = 0; i < 16; i++) {
delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
@@ -1055,6 +1073,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
(delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
}
+ intel_runtime_pm_put(dev_priv);
+
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -1076,12 +1096,14 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
for (i = 1; i <= 32; i++) {
inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
}
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -1099,11 +1121,13 @@ static int ironlake_drpc_info(struct seq_file *m)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
rgvmodectl = I915_READ(MEMMODECTL);
rstdbyctl = I915_READ(RSTDBYCTL);
crstandvid = I915_READ16(CRSTANDVID);
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
@@ -1154,6 +1178,50 @@ static int ironlake_drpc_info(struct seq_file *m)
return 0;
}
+static int vlv_drpc_info(struct seq_file *m)
+{
+
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rpmodectl1, rcctl1;
+ unsigned fw_rendercount = 0, fw_mediacount = 0;
+
+ rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
+ rcctl1 = I915_READ(GEN6_RC_CONTROL);
+
+ seq_printf(m, "Video Turbo Mode: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
+ seq_printf(m, "Turbo enabled: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_ENABLE));
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_ENABLE));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
+ GEN6_RP_MEDIA_SW_MODE));
+ seq_printf(m, "RC6 Enabled: %s\n",
+ yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
+ GEN6_RC_CTL_EI_MODE(1))));
+ seq_printf(m, "Render Power Well: %s\n",
+ (I915_READ(VLV_GTLC_PW_STATUS) &
+ VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
+ seq_printf(m, "Media Power Well: %s\n",
+ (I915_READ(VLV_GTLC_PW_STATUS) &
+ VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ fw_rendercount = dev_priv->uncore.fw_rendercount;
+ fw_mediacount = dev_priv->uncore.fw_mediacount;
+ spin_unlock_irq(&dev_priv->uncore.lock);
+
+ seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount);
+ seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount);
+
+
+ return 0;
+}
+
+
static int gen6_drpc_info(struct seq_file *m)
{
@@ -1167,6 +1235,7 @@ static int gen6_drpc_info(struct seq_file *m)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->uncore.lock);
forcewake_count = dev_priv->uncore.forcewake_count;
@@ -1192,6 +1261,8 @@ static int gen6_drpc_info(struct seq_file *m)
sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
mutex_unlock(&dev_priv->rps.hw_lock);
+ intel_runtime_pm_put(dev_priv);
+
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
seq_printf(m, "HW control enabled: %s\n",
@@ -1256,7 +1327,9 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if (IS_VALLEYVIEW(dev))
+ return vlv_drpc_info(m);
+ else if (IS_GEN6(dev) || IS_GEN7(dev))
return gen6_drpc_info(m);
else
return ironlake_drpc_info(m);
@@ -1268,7 +1341,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- if (!I915_HAS_FBC(dev)) {
+ if (!HAS_FBC(dev)) {
seq_puts(m, "FBC unsupported on this chipset\n");
return 0;
}
@@ -1330,7 +1403,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
return 0;
}
- if (I915_READ(IPS_CTL) & IPS_ENABLE)
+ if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
seq_puts(m, "enabled\n");
else
seq_puts(m, "disabled\n");
@@ -1406,6 +1479,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
@@ -1422,6 +1496,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
((ia_freq >> 8) & 0xff) * 100);
}
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
@@ -1437,8 +1512,10 @@ static int i915_gfxec(struct seq_file *m, void *unused)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -1565,13 +1642,21 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned forcewake_count;
+ unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
spin_lock_irq(&dev_priv->uncore.lock);
- forcewake_count = dev_priv->uncore.forcewake_count;
+ if (IS_VALLEYVIEW(dev)) {
+ fw_rendercount = dev_priv->uncore.fw_rendercount;
+ fw_mediacount = dev_priv->uncore.fw_mediacount;
+ } else
+ forcewake_count = dev_priv->uncore.forcewake_count;
spin_unlock_irq(&dev_priv->uncore.lock);
- seq_printf(m, "forcewake count = %u\n", forcewake_count);
+ if (IS_VALLEYVIEW(dev)) {
+ seq_printf(m, "fw_rendercount = %u\n", fw_rendercount);
+ seq_printf(m, "fw_mediacount = %u\n", fw_mediacount);
+ } else
+ seq_printf(m, "forcewake count = %u\n", forcewake_count);
return 0;
}
@@ -1610,6 +1695,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_x));
@@ -1641,6 +1727,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
I915_READ(DISP_ARB_CTL));
}
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -1701,16 +1788,19 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
if (INTEL_INFO(dev)->gen >= 8)
gen8_ppgtt_info(m, dev);
else if (INTEL_INFO(dev)->gen >= 6)
gen6_ppgtt_info(m, dev);
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -1735,28 +1825,28 @@ static int i915_dpio_info(struct seq_file *m, void *data)
seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
- seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
- seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
+ seq_printf(m, "DPIO PLL DW3 CH0 : 0x%08x\n",
+ vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(0)));
+ seq_printf(m, "DPIO PLL DW3 CH1: 0x%08x\n",
+ vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(1)));
- seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
- seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
+ seq_printf(m, "DPIO PLL DW5 CH0: 0x%08x\n",
+ vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(0)));
+ seq_printf(m, "DPIO PLL DW5 CH1: 0x%08x\n",
+ vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(1)));
- seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
- seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
+ seq_printf(m, "DPIO PLL DW7 CH0: 0x%08x\n",
+ vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(0)));
+ seq_printf(m, "DPIO PLL DW7 CH1: 0x%08x\n",
+ vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(1)));
- seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
- seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
+ seq_printf(m, "DPIO PLL DW10 CH0: 0x%08x\n",
+ vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(0)));
+ seq_printf(m, "DPIO PLL DW10 CH1: 0x%08x\n",
+ vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(1)));
seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
+ vlv_dpio_read(dev_priv, PIPE_A, VLV_CMN_DW0));
mutex_unlock(&dev_priv->dpio_lock);
@@ -1784,6 +1874,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
u32 psrperf = 0;
bool enabled = false;
+ intel_runtime_pm_get(dev_priv);
+
seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
@@ -1796,6 +1888,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
EDP_PSR_PERF_CNT_MASK;
seq_printf(m, "Performance_Counter: %u\n", psrperf);
+ intel_runtime_pm_put(dev_priv);
return 0;
}
@@ -1845,6 +1938,76 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
return 0;
}
+static const char *power_domain_str(enum intel_display_power_domain domain)
+{
+ switch (domain) {
+ case POWER_DOMAIN_PIPE_A:
+ return "PIPE_A";
+ case POWER_DOMAIN_PIPE_B:
+ return "PIPE_B";
+ case POWER_DOMAIN_PIPE_C:
+ return "PIPE_C";
+ case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
+ return "PIPE_A_PANEL_FITTER";
+ case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
+ return "PIPE_B_PANEL_FITTER";
+ case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
+ return "PIPE_C_PANEL_FITTER";
+ case POWER_DOMAIN_TRANSCODER_A:
+ return "TRANSCODER_A";
+ case POWER_DOMAIN_TRANSCODER_B:
+ return "TRANSCODER_B";
+ case POWER_DOMAIN_TRANSCODER_C:
+ return "TRANSCODER_C";
+ case POWER_DOMAIN_TRANSCODER_EDP:
+ return "TRANSCODER_EDP";
+ case POWER_DOMAIN_VGA:
+ return "VGA";
+ case POWER_DOMAIN_AUDIO:
+ return "AUDIO";
+ case POWER_DOMAIN_INIT:
+ return "INIT";
+ default:
+ WARN_ON(1);
+ return "?";
+ }
+}
+
+static int i915_power_domain_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ int i;
+
+ mutex_lock(&power_domains->lock);
+
+ seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
+ for (i = 0; i < power_domains->power_well_count; i++) {
+ struct i915_power_well *power_well;
+ enum intel_display_power_domain power_domain;
+
+ power_well = &power_domains->power_wells[i];
+ seq_printf(m, "%-25s %d\n", power_well->name,
+ power_well->count);
+
+ for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
+ power_domain++) {
+ if (!(BIT(power_domain) & power_well->domains))
+ continue;
+
+ seq_printf(m, " %-23s %d\n",
+ power_domain_str(power_domain),
+ power_domains->domain_use_count[power_domain]);
+ }
+ }
+
+ mutex_unlock(&power_domains->lock);
+
+ return 0;
+}
+
struct pipe_crc_info {
const char *name;
struct drm_device *dev;
@@ -1857,6 +2020,9 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
struct drm_i915_private *dev_priv = info->dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+ if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
+ return -ENODEV;
+
spin_lock_irq(&pipe_crc->lock);
if (pipe_crc->opened) {
@@ -2005,8 +2171,8 @@ static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
info->dev = dev;
ent = debugfs_create_file(info->name, S_IRUGO, root, info,
&i915_pipe_crc_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
+ if (!ent)
+ return -ENOMEM;
return drm_add_fake_info_node(minor, ent, info);
}
@@ -2347,7 +2513,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- u32 val;
+ u32 val = 0; /* shut up gcc */
int ret;
if (pipe_crc->source == source)
@@ -2742,7 +2908,7 @@ i915_drop_caches_set(void *data, u64 val)
struct i915_vma *vma, *x;
int ret;
- DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
+ DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
@@ -2810,8 +2976,7 @@ i915_max_freq_get(void *data, u64 *val)
return ret;
if (IS_VALLEYVIEW(dev))
- *val = vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.max_delay);
+ *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
else
*val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -2841,9 +3006,9 @@ i915_max_freq_set(void *data, u64 val)
* Turbo will still be enabled, but won't go above the set value.
*/
if (IS_VALLEYVIEW(dev)) {
- val = vlv_freq_opcode(dev_priv->mem_freq, val);
+ val = vlv_freq_opcode(dev_priv, val);
dev_priv->rps.max_delay = val;
- gen6_set_rps(dev, val);
+ valleyview_set_rps(dev, val);
} else {
do_div(val, GT_FREQUENCY_MULTIPLIER);
dev_priv->rps.max_delay = val;
@@ -2876,8 +3041,7 @@ i915_min_freq_get(void *data, u64 *val)
return ret;
if (IS_VALLEYVIEW(dev))
- *val = vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.min_delay);
+ *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
else
*val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -2907,7 +3071,7 @@ i915_min_freq_set(void *data, u64 val)
* Turbo will still be enabled, but won't go below the set value.
*/
if (IS_VALLEYVIEW(dev)) {
- val = vlv_freq_opcode(dev_priv->mem_freq, val);
+ val = vlv_freq_opcode(dev_priv, val);
dev_priv->rps.min_delay = val;
valleyview_set_rps(dev, val);
} else {
@@ -2938,8 +3102,11 @@ i915_cache_sharing_get(void *data, u64 *val)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->dev->struct_mutex);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@@ -2960,6 +3127,7 @@ i915_cache_sharing_set(void *data, u64 val)
if (val > 3)
return -EINVAL;
+ intel_runtime_pm_get(dev_priv);
DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
/* Update the cache sharing policy here as well */
@@ -2968,6 +3136,7 @@ i915_cache_sharing_set(void *data, u64 val)
snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+ intel_runtime_pm_put(dev_priv);
return 0;
}
@@ -2983,7 +3152,8 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
if (INTEL_INFO(dev)->gen < 6)
return 0;
- gen6_gt_force_wake_get(dev_priv);
+ intel_runtime_pm_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
return 0;
}
@@ -2996,7 +3166,8 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
if (INTEL_INFO(dev)->gen < 6)
return 0;
- gen6_gt_force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_runtime_pm_put(dev_priv);
return 0;
}
@@ -3016,8 +3187,8 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
S_IRUSR,
root, dev,
&i915_forcewake_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
+ if (!ent)
+ return -ENOMEM;
return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
}
@@ -3034,8 +3205,8 @@ static int i915_debugfs_create(struct dentry *root,
S_IRUGO | S_IWUSR,
root, dev,
fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
+ if (!ent)
+ return -ENOMEM;
return drm_add_fake_info_node(minor, ent, fops);
}
@@ -3079,6 +3250,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_edp_psr_status", i915_edp_psr_status, 0},
{"i915_energy_uJ", i915_energy_uJ, 0},
{"i915_pc8_status", i915_pc8_status, 0},
+ {"i915_power_domain_info", i915_power_domain_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
@@ -3102,10 +3274,10 @@ static const struct i915_debugfs_files {
void intel_display_crc_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
+ enum pipe pipe;
- for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i];
+ for_each_pipe(pipe) {
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
pipe_crc->opened = false;
spin_lock_init(&pipe_crc->lock);
@@ -3164,5 +3336,3 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
drm_debugfs_remove_files(info_list, 1, minor);
}
}
-
-#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0cab2d045135..15a74f979b4b 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -42,6 +42,8 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
#include <acpi/video.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
@@ -83,6 +85,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
+ /*
+ * The dri breadcrumb update races against the drm master disappearing.
+ * Instead of trying to fix this (this is by far not the only ums issue)
+ * just don't do the update in kms mode.
+ */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
if (dev->primary->master) {
master_priv = dev->primary->master->driver_priv;
if (master_priv->sarea_priv)
@@ -783,7 +793,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
if (ring->irq_get(ring)) {
- DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ,
READ_BREADCRUMB(dev_priv) >= irq_nr);
ring->irq_put(ring);
} else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
@@ -820,7 +830,7 @@ static int i915_irq_emit(struct drm_device *dev, void *data,
result = i915_emit_irq(dev);
mutex_unlock(&dev->struct_mutex);
- if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+ if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
@@ -1008,8 +1018,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
- DRM_ERROR("DRM_COPY_TO_USER failed\n");
+ if (copy_to_user(param->value, &value, sizeof(int))) {
+ DRM_ERROR("copy_to_user failed\n");
return -EFAULT;
}
@@ -1403,7 +1413,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
master->driver_priv = NULL;
}
-#ifdef CONFIG_DRM_I915_FBDEV
+#if IS_ENABLED(CONFIG_FB)
static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
struct apertures_struct *ap;
@@ -1476,6 +1486,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return -ENODEV;
}
+ /* UMS needs agp support. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
+ return -EINVAL;
+
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
@@ -1486,20 +1500,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
- spin_lock_init(&dev_priv->backlight.lock);
+ spin_lock_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
mutex_init(&dev_priv->dpio_lock);
- mutex_init(&dev_priv->rps.hw_lock);
mutex_init(&dev_priv->modeset_restore_lock);
- mutex_init(&dev_priv->pc8.lock);
- dev_priv->pc8.requirements_met = false;
- dev_priv->pc8.gpu_idle = false;
- dev_priv->pc8.irqs_disabled = false;
- dev_priv->pc8.enabled = false;
- dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
- INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
+ intel_pm_setup(dev);
intel_display_crc_init(dev);
@@ -1603,7 +1610,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
intel_irq_init(dev);
- intel_pm_init(dev);
intel_uncore_sanitize(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
@@ -1639,8 +1645,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_gem_unload;
}
- if (HAS_POWER_WELL(dev))
- intel_power_domains_init(dev);
+ intel_power_domains_init(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev);
@@ -1664,11 +1669,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_GEN5(dev))
intel_gpu_ips_init(dev_priv);
+ intel_init_runtime_pm(dev_priv);
+
return 0;
out_power_well:
- if (HAS_POWER_WELL(dev))
- intel_power_domains_remove(dev);
+ intel_power_domains_remove(dev);
drm_vblank_cleanup(dev);
out_gem_unload:
if (dev_priv->mm.inactive_shrinker.scan_objects)
@@ -1679,6 +1685,7 @@ out_gem_unload:
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
+ pm_qos_remove_request(&dev_priv->pm_qos);
destroy_workqueue(dev_priv->wq);
out_mtrrfree:
arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1704,25 +1711,27 @@ int i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ ret = i915_gem_suspend(dev);
+ if (ret) {
+ DRM_ERROR("failed to idle hardware: %d\n", ret);
+ return ret;
+ }
+
+ intel_fini_runtime_pm(dev_priv);
+
intel_gpu_ips_teardown();
- if (HAS_POWER_WELL(dev)) {
- /* The i915.ko module is still not prepared to be loaded when
- * the power well is not enabled, so just enable it in case
- * we're going to unload/reload. */
- intel_display_set_init_power(dev, true);
- intel_power_domains_remove(dev);
- }
+ /* The i915.ko module is still not prepared to be loaded when
+ * the power well is not enabled, so just enable it in case
+ * we're going to unload/reload. */
+ intel_display_set_init_power(dev, true);
+ intel_power_domains_remove(dev);
i915_teardown_sysfs(dev);
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
- ret = i915_gem_suspend(dev);
- if (ret)
- DRM_ERROR("failed to idle hardware: %d\n", ret);
-
io_mapping_free(dev_priv->gtt.mappable);
arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1777,7 +1786,6 @@ int i915_driver_unload(struct drm_device *dev)
list_del(&dev_priv->gtt.base.global_link);
WARN_ON(!list_empty(&dev_priv->vm_list));
- drm_mm_takedown(&dev_priv->gtt.base.mm);
drm_vblank_cleanup(dev);
@@ -1848,8 +1856,10 @@ void i915_driver_lastclose(struct drm_device * dev)
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
+ mutex_lock(&dev->struct_mutex);
i915_gem_context_close(dev, file_priv);
i915_gem_release(dev, file_priv);
+ mutex_unlock(&dev->struct_mutex);
}
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
@@ -1908,6 +1918,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2e367a1c6a64..04f1f02c4019 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -59,7 +59,7 @@ MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
int i915_semaphores __read_mostly = -1;
-module_param_named(semaphores, i915_semaphores, int, 0600);
+module_param_named(semaphores, i915_semaphores, int, 0400);
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
@@ -114,7 +114,7 @@ MODULE_PARM_DESC(enable_hangcheck,
"(default: true)");
int i915_enable_ppgtt __read_mostly = -1;
-module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
+module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400);
MODULE_PARM_DESC(i915_enable_ppgtt,
"Enable PPGTT (default: true)");
@@ -155,7 +155,6 @@ MODULE_PARM_DESC(prefault_disable,
"Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
static struct drm_driver driver;
-extern int intel_agp_enabled;
static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
@@ -173,6 +172,7 @@ static const struct intel_device_info intel_i85x_info = {
.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
+ .has_fbc = 1,
.ring_mask = RENDER_RING,
};
@@ -192,6 +192,7 @@ static const struct intel_device_info intel_i915gm_info = {
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
+ .has_fbc = 1,
.ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i945g_info = {
@@ -204,6 +205,7 @@ static const struct intel_device_info intel_i945gm_info = {
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
+ .has_fbc = 1,
.ring_mask = RENDER_RING,
};
@@ -265,6 +267,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
static const struct intel_device_info intel_sandybridge_d_info = {
.gen = 6, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
};
@@ -280,6 +283,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
#define GEN7_FEATURES \
.gen = 7, .num_pipes = 3, \
.need_gfx_hws = 1, .has_hotplug = 1, \
+ .has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.has_llc = 1
@@ -292,7 +296,6 @@ static const struct intel_device_info intel_ivybridge_m_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
.is_mobile = 1,
- .has_fbc = 1,
};
static const struct intel_device_info intel_ivybridge_q_info = {
@@ -307,6 +310,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
.num_pipes = 2,
.is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
+ .has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
};
@@ -315,6 +319,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
.num_pipes = 2,
.is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
+ .has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
};
@@ -332,12 +337,10 @@ static const struct intel_device_info intel_haswell_m_info = {
.is_mobile = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
- .has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
};
static const struct intel_device_info intel_broadwell_d_info = {
- .is_preliminary = 1,
.gen = 8, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
@@ -346,7 +349,6 @@ static const struct intel_device_info intel_broadwell_d_info = {
};
static const struct intel_device_info intel_broadwell_m_info = {
- .is_preliminary = 1,
.gen = 8, .is_mobile = 1, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
@@ -476,12 +478,12 @@ check_next:
bool i915_semaphore_is_enabled(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
- return 0;
+ return false;
/* Until we get further testing... */
if (IS_GEN8(dev)) {
WARN_ON(!i915_preliminary_hw_support);
- return 0;
+ return false;
}
if (i915_semaphores >= 0)
@@ -493,7 +495,7 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
return false;
#endif
- return 1;
+ return true;
}
static int i915_drm_freeze(struct drm_device *dev)
@@ -501,6 +503,8 @@ static int i915_drm_freeze(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
+ intel_runtime_pm_get(dev_priv);
+
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_SUSPENDED;
@@ -651,6 +655,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
intel_modeset_init_hw(dev);
drm_modeset_lock_all(dev);
+ drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
@@ -687,6 +692,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
+
+ intel_runtime_pm_put(dev_priv);
return error;
}
@@ -761,14 +768,14 @@ int i915_reset(struct drm_device *dev)
DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
dev_priv->gpu_error.stop_rings = 0;
if (ret == -ENODEV) {
- DRM_ERROR("Reset not implemented, but ignoring "
- "error for simulated gpu hangs\n");
+ DRM_INFO("Reset not implemented, but ignoring "
+ "error for simulated gpu hangs\n");
ret = 0;
}
}
if (ret) {
- DRM_ERROR("Failed to reset chip.\n");
+ DRM_ERROR("Failed to reset chip: %i\n", ret);
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -789,12 +796,9 @@ int i915_reset(struct drm_device *dev)
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->ums.mm_suspended) {
- bool hw_contexts_disabled = dev_priv->hw_contexts_disabled;
dev_priv->ums.mm_suspended = 0;
ret = i915_gem_init_hw(dev);
- if (!hw_contexts_disabled && dev_priv->hw_contexts_disabled)
- DRM_ERROR("HW contexts didn't survive reset\n");
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("Failed hw init on reset %d\n", ret);
@@ -830,17 +834,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
- /* We've managed to ship a kms-enabled ddx that shipped with an XvMC
- * implementation for gen3 (and only gen3) that used legacy drm maps
- * (gasp!) to share buffers between X and the client. Hence we need to
- * keep around the fake agp stuff for gen3, even when kms is enabled. */
- if (intel_info->gen != 3) {
- driver.driver_features &=
- ~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
- } else if (!intel_agp_enabled) {
- DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
- return -ENODEV;
- }
+ driver.driver_features &= ~(DRIVER_USE_AGP);
return drm_get_pci_dev(pdev, ent, &driver);
}
@@ -914,6 +908,49 @@ static int i915_pm_poweroff(struct device *dev)
return i915_drm_freeze(drm_dev);
}
+static int i915_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!HAS_RUNTIME_PM(dev));
+
+ DRM_DEBUG_KMS("Suspending device\n");
+
+ i915_gem_release_all_mmaps(dev_priv);
+
+ del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+ dev_priv->pm.suspended = true;
+
+ /*
+ * current versions of firmware which depend on this opregion
+ * notification have repurposed the D1 definition to mean
+ * "runtime suspended" vs. what you would normally expect (D3)
+ * to distinguish it from notifications that might be sent
+ * via the suspend path.
+ */
+ intel_opregion_notify_adapter(dev, PCI_D1);
+
+ return 0;
+}
+
+static int i915_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!HAS_RUNTIME_PM(dev));
+
+ DRM_DEBUG_KMS("Resuming device\n");
+
+ intel_opregion_notify_adapter(dev, PCI_D0);
+ dev_priv->pm.suspended = false;
+
+ return 0;
+}
+
static const struct dev_pm_ops i915_pm_ops = {
.suspend = i915_pm_suspend,
.resume = i915_pm_resume,
@@ -921,6 +958,8 @@ static const struct dev_pm_ops i915_pm_ops = {
.thaw = i915_pm_thaw,
.poweroff = i915_pm_poweroff,
.restore = i915_pm_resume,
+ .runtime_suspend = i915_runtime_suspend,
+ .runtime_resume = i915_runtime_resume,
};
static const struct vm_operations_struct i915_gem_vm_ops = {
@@ -948,7 +987,7 @@ static struct drm_driver driver = {
* deal with them for Intel hardware.
*/
.driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
+ DRIVER_USE_AGP |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
DRIVER_RENDER,
.load = i915_driver_load,
@@ -1023,14 +1062,24 @@ static int __init i915_init(void)
driver.driver_features &= ~DRIVER_MODESET;
#endif
- if (!(driver.driver_features & DRIVER_MODESET))
+ if (!(driver.driver_features & DRIVER_MODESET)) {
driver.get_vblank_timestamp = NULL;
+#ifndef CONFIG_DRM_I915_UMS
+ /* Silently fail loading to not upset userspace. */
+ return 0;
+#endif
+ }
return drm_pci_init(&driver, &i915_pci_driver);
}
static void __exit i915_exit(void)
{
+#ifndef CONFIG_DRM_I915_UMS
+ if (!(driver.driver_features & DRIVER_MODESET))
+ return; /* Never loaded a driver. */
+#endif
+
drm_pci_exit(&driver, &i915_pci_driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ccdbecca070d..df77e20e3c3d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -89,6 +89,18 @@ enum port {
};
#define port_name(p) ((p) + 'A')
+#define I915_NUM_PHYS_VLV 1
+
+enum dpio_channel {
+ DPIO_CH0,
+ DPIO_CH1
+};
+
+enum dpio_phy {
+ DPIO_PHY0,
+ DPIO_PHY1
+};
+
enum intel_display_power_domain {
POWER_DOMAIN_PIPE_A,
POWER_DOMAIN_PIPE_B,
@@ -101,6 +113,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP,
POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
@@ -310,13 +323,14 @@ struct drm_i915_error_state {
u32 instps[I915_NUM_RINGS];
u32 extra_instdone[I915_NUM_INSTDONE_REG];
u32 seqno[I915_NUM_RINGS];
- u64 bbaddr;
+ u64 bbaddr[I915_NUM_RINGS];
u32 fault_reg[I915_NUM_RINGS];
u32 done_reg;
u32 faddr[I915_NUM_RINGS];
u64 fence[I915_MAX_NUM_FENCES];
struct timeval time;
struct drm_i915_error_ring {
+ bool valid;
struct drm_i915_error_object {
int page_count;
u32 gtt_offset;
@@ -351,6 +365,7 @@ struct drm_i915_error_state {
enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
};
+struct intel_connector;
struct intel_crtc_config;
struct intel_crtc;
struct intel_limit;
@@ -358,7 +373,7 @@ struct dpll;
struct drm_i915_display_funcs {
bool (*fbc_enabled)(struct drm_device *dev);
- void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
+ void (*enable_fbc)(struct drm_crtc *crtc);
void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
@@ -413,11 +428,20 @@ struct drm_i915_display_funcs {
/* render clock increase/decrease */
/* display clock increase/decrease */
/* pll clock increase/decrease */
+
+ int (*setup_backlight)(struct intel_connector *connector);
+ uint32_t (*get_backlight)(struct intel_connector *connector);
+ void (*set_backlight)(struct intel_connector *connector,
+ uint32_t level);
+ void (*disable_backlight)(struct intel_connector *connector);
+ void (*enable_backlight)(struct intel_connector *connector);
};
struct intel_uncore_funcs {
- void (*force_wake_get)(struct drm_i915_private *dev_priv);
- void (*force_wake_put)(struct drm_i915_private *dev_priv);
+ void (*force_wake_get)(struct drm_i915_private *dev_priv,
+ int fw_engine);
+ void (*force_wake_put)(struct drm_i915_private *dev_priv,
+ int fw_engine);
uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
@@ -442,6 +466,9 @@ struct intel_uncore {
unsigned fifo_count;
unsigned forcewake_count;
+ unsigned fw_rendercount;
+ unsigned fw_mediacount;
+
struct delayed_work force_wake_work;
};
@@ -669,7 +696,6 @@ struct i915_fbc {
struct delayed_work work;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
- int interval;
} *fbc_work;
enum no_fbc_reason {
@@ -708,7 +734,6 @@ enum intel_sbi_destination {
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
-#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
struct intel_fbdev;
struct intel_fbc_work;
@@ -761,8 +786,6 @@ struct i915_suspend_saved_registers {
u32 saveBLC_PWM_CTL;
u32 saveBLC_PWM_CTL2;
u32 saveBLC_HIST_CTL_B;
- u32 saveBLC_PWM_CTL_B;
- u32 saveBLC_PWM_CTL2_B;
u32 saveBLC_CPU_PWM_CTL;
u32 saveBLC_CPU_PWM_CTL2;
u32 saveFPB0;
@@ -932,21 +955,29 @@ struct intel_ilk_power_mgmt {
/* Power well structure for haswell */
struct i915_power_well {
+ const char *name;
+ bool always_on;
/* power well enable/disable usage count */
int count;
+ unsigned long domains;
+ void *data;
+ void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
+ bool enable);
+ bool (*is_enabled)(struct drm_device *dev,
+ struct i915_power_well *power_well);
};
-#define I915_MAX_POWER_WELLS 1
-
struct i915_power_domains {
/*
* Power wells needed for initialization at driver init and suspend
* time are on. They are kept on until after the first modeset.
*/
bool init_power_on;
+ int power_well_count;
struct mutex lock;
- struct i915_power_well power_wells[I915_MAX_POWER_WELLS];
+ int domain_use_count[POWER_DOMAIN_NUM];
+ struct i915_power_well *power_wells;
};
struct i915_dri1_state {
@@ -1077,34 +1108,30 @@ struct i915_gpu_error {
unsigned long missed_irq_rings;
/**
- * State variable and reset counter controlling the reset flow
+ * State variable controlling the reset flow and count
*
- * Upper bits are for the reset counter. This counter is used by the
- * wait_seqno code to race-free noticed that a reset event happened and
- * that it needs to restart the entire ioctl (since most likely the
- * seqno it waited for won't ever signal anytime soon).
+ * This is a counter which gets incremented when reset is triggered,
+ * and again when reset has been handled. So odd values (lowest bit set)
+ * means that reset is in progress and even values that
+ * (reset_counter >> 1):th reset was successfully completed.
+ *
+ * If reset is not completed succesfully, the I915_WEDGE bit is
+ * set meaning that hardware is terminally sour and there is no
+ * recovery. All waiters on the reset_queue will be woken when
+ * that happens.
+ *
+ * This counter is used by the wait_seqno code to notice that reset
+ * event happened and it needs to restart the entire ioctl (since most
+ * likely the seqno it waited for won't ever signal anytime soon).
*
* This is important for lock-free wait paths, where no contended lock
* naturally enforces the correct ordering between the bail-out of the
* waiter and the gpu reset work code.
- *
- * Lowest bit controls the reset state machine: Set means a reset is in
- * progress. This state will (presuming we don't have any bugs) decay
- * into either unset (successful reset) or the special WEDGED value (hw
- * terminally sour). All waiters on the reset_queue will be woken when
- * that happens.
*/
atomic_t reset_counter;
- /**
- * Special values/flags for reset_counter
- *
- * Note that the code relies on
- * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
- * being true.
- */
#define I915_RESET_IN_PROGRESS_FLAG 1
-#define I915_WEDGED 0xffffffff
+#define I915_WEDGED (1 << 31)
/**
* Waitqueue to signal when the reset has completed. Used by clients
@@ -1158,6 +1185,11 @@ struct intel_vbt_data {
int edp_bpp;
struct edp_power_seq edp_pps;
+ struct {
+ u16 pwm_freq_hz;
+ bool active_low_pwm;
+ } backlight;
+
/* MIPI DSI */
struct {
u16 panel_id;
@@ -1184,7 +1216,7 @@ struct intel_wm_level {
uint32_t fbc_val;
};
-struct hsw_wm_values {
+struct ilk_wm_values {
uint32_t wm_pipe[3];
uint32_t wm_lp[3];
uint32_t wm_lp_spr[3];
@@ -1262,6 +1294,10 @@ struct i915_package_c8 {
} regsave;
};
+struct i915_runtime_pm {
+ bool suspended;
+};
+
enum intel_pipe_crc_source {
INTEL_PIPE_CRC_SOURCE_NONE,
INTEL_PIPE_CRC_SOURCE_PLANE1,
@@ -1366,15 +1402,9 @@ typedef struct drm_i915_private {
/* overlay */
struct intel_overlay *overlay;
- unsigned int sprite_scaling_enabled;
- /* backlight */
- struct {
- int level;
- bool enabled;
- spinlock_t lock; /* bl registers and the above bl fields */
- struct backlight_device *device;
- } backlight;
+ /* backlight registers and fields in struct intel_panel */
+ spinlock_t backlight_lock;
/* LVDS info */
bool no_aux_handshake;
@@ -1426,6 +1456,7 @@ typedef struct drm_i915_private {
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
struct intel_ddi_plls ddi_plls;
+ int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
/* Reclocking support */
bool render_reclock_avail;
@@ -1470,7 +1501,6 @@ typedef struct drm_i915_private {
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
- bool hw_contexts_disabled;
uint32_t hw_context_size;
struct list_head context_list;
@@ -1492,11 +1522,13 @@ typedef struct drm_i915_private {
uint16_t cur_latency[5];
/* current hardware state */
- struct hsw_wm_values hw;
+ struct ilk_wm_values hw;
} wm;
struct i915_package_c8 pc8;
+ struct i915_runtime_pm pm;
+
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
@@ -1755,8 +1787,13 @@ struct drm_i915_file_private {
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0xFF00) == 0x0C00)
-#define IS_ULT(dev) (IS_HASWELL(dev) && \
+#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
+ (((dev)->pdev->device & 0xf) == 0x2 || \
+ ((dev)->pdev->device & 0xf) == 0x6 || \
+ ((dev)->pdev->device & 0xf) == 0xe))
+#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0xFF00) == 0x0A00)
+#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0x00F0) == 0x0020)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
@@ -1794,6 +1831,14 @@ struct drm_i915_file_private {
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+/*
+ * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
+ * even when in MSI mode. This results in spurious interrupt warnings if the
+ * legacy irq no. is shared with another device. The kernel then disables that
+ * interrupt source and so prevents the other device from working properly.
+ */
+#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
@@ -1808,15 +1853,15 @@ struct drm_i915_file_private {
#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
-#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
-#define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
+#define HAS_RUNTIME_PM(dev) (IS_HASWELL(dev))
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -1901,14 +1946,11 @@ void i915_queue_hangcheck(struct drm_device *dev);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
-extern void intel_pm_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
-extern void intel_pm_init(struct drm_device *dev);
extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev);
extern void intel_uncore_init(struct drm_device *dev);
-extern void intel_uncore_clear_errors(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
extern void intel_uncore_fini(struct drm_device *dev);
@@ -1984,6 +2026,7 @@ void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
+void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
@@ -2060,12 +2103,17 @@ int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
return unlikely(atomic_read(&error->reset_counter)
- & I915_RESET_IN_PROGRESS_FLAG);
+ & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
}
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
{
- return atomic_read(&error->reset_counter) == I915_WEDGED;
+ return atomic_read(&error->reset_counter) & I915_WEDGED;
+}
+
+static inline u32 i915_reset_count(struct i915_gpu_error *error)
+{
+ return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
}
void i915_gem_reset(struct drm_device *dev);
@@ -2177,7 +2225,7 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
}
/* i915_gem_context.c */
-void i915_gem_context_init(struct drm_device *dev);
+int __must_check i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct intel_ring_buffer *ring,
@@ -2336,8 +2384,8 @@ extern void intel_i2c_reset(struct drm_device *dev);
/* intel_opregion.c */
struct intel_encoder;
-extern int intel_opregion_setup(struct drm_device *dev);
#ifdef CONFIG_ACPI
+extern int intel_opregion_setup(struct drm_device *dev);
extern void intel_opregion_init(struct drm_device *dev);
extern void intel_opregion_fini(struct drm_device *dev);
extern void intel_opregion_asle_intr(struct drm_device *dev);
@@ -2346,6 +2394,7 @@ extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
extern int intel_opregion_notify_adapter(struct drm_device *dev,
pci_power_t state);
#else
+static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
static inline void intel_opregion_init(struct drm_device *dev) { return; }
static inline void intel_opregion_fini(struct drm_device *dev) { return; }
static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
@@ -2395,6 +2444,8 @@ extern int intel_enable_rc6(const struct drm_device *dev);
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
/* overlay */
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -2410,8 +2461,8 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
* must be set to prevent GT core from power down and stale values being
* returned.
*/
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
@@ -2426,6 +2477,8 @@ u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
@@ -2434,9 +2487,30 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination);
void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
enum intel_sbi_destination destination);
+u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+
+int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
+int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
+
+void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
+void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
+
+#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
+ (((reg) >= 0x2000 && (reg) < 0x4000) ||\
+ ((reg) >= 0x5000 && (reg) < 0x8000) ||\
+ ((reg) >= 0xB000 && (reg) < 0x12000) ||\
+ ((reg) >= 0x2E000 && (reg) < 0x30000))
+
+#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
+ (((reg) >= 0x12000 && (reg) < 0x14000) ||\
+ ((reg) >= 0x22000 && (reg) < 0x24000) ||\
+ ((reg) >= 0x30000 && (reg) < 0x40000))
+
+#define FORCEWAKE_RENDER (1 << 0)
+#define FORCEWAKE_MEDIA (1 << 1)
+#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
-int vlv_gpu_freq(int ddr_freq, int val);
-int vlv_freq_opcode(int ddr_freq, int val);
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 621c7c67a643..00c836154725 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1015,9 +1015,11 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
struct drm_i915_file_private *file_priv)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ const bool irq_test_in_progress =
+ ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
struct timespec before, now;
DEFINE_WAIT(wait);
- long timeout_jiffies;
+ unsigned long timeout_expire;
int ret;
WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
@@ -1025,7 +1027,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
- timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
+ timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
gen6_rps_boost(dev_priv);
@@ -1035,8 +1037,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
msecs_to_jiffies(100));
}
- if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
- WARN_ON(!ring->irq_get(ring)))
+ if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
return -ENODEV;
/* Record current time in case interrupted by signal, or wedged */
@@ -1044,7 +1045,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
getrawmonotonic(&before);
for (;;) {
struct timer_list timer;
- unsigned long expire;
prepare_to_wait(&ring->irq_queue, &wait,
interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
@@ -1070,23 +1070,22 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
break;
}
- if (timeout_jiffies <= 0) {
+ if (timeout && time_after_eq(jiffies, timeout_expire)) {
ret = -ETIME;
break;
}
timer.function = NULL;
if (timeout || missed_irq(dev_priv, ring)) {
+ unsigned long expire;
+
setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
- expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
+ expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
mod_timer(&timer, expire);
}
io_schedule();
- if (timeout)
- timeout_jiffies = expire - jiffies;
-
if (timer.function) {
del_singleshot_timer_sync(&timer);
destroy_timer_on_stack(&timer);
@@ -1095,7 +1094,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
getrawmonotonic(&now);
trace_i915_gem_request_wait_end(ring, seqno);
- ring->irq_put(ring);
+ if (!irq_test_in_progress)
+ ring->irq_put(ring);
finish_wait(&ring->irq_queue, &wait);
@@ -1380,6 +1380,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int ret = 0;
bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
+ intel_runtime_pm_get(dev_priv);
+
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
PAGE_SHIFT;
@@ -1427,8 +1429,10 @@ out:
/* If this -EIO is due to a gpu hang, give the reset code a
* chance to clean up the mess. Otherwise return the proper
* SIGBUS. */
- if (i915_terminally_wedged(&dev_priv->gpu_error))
- return VM_FAULT_SIGBUS;
+ if (i915_terminally_wedged(&dev_priv->gpu_error)) {
+ ret = VM_FAULT_SIGBUS;
+ break;
+ }
case -EAGAIN:
/*
* EAGAIN means the gpu is hung and we'll wait for the error
@@ -1443,15 +1447,38 @@ out:
* EBUSY is ok: this just means that another thread
* already did the job.
*/
- return VM_FAULT_NOPAGE;
+ ret = VM_FAULT_NOPAGE;
+ break;
case -ENOMEM:
- return VM_FAULT_OOM;
+ ret = VM_FAULT_OOM;
+ break;
case -ENOSPC:
- return VM_FAULT_SIGBUS;
+ ret = VM_FAULT_SIGBUS;
+ break;
default:
WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
- return VM_FAULT_SIGBUS;
+ ret = VM_FAULT_SIGBUS;
+ break;
}
+
+ intel_runtime_pm_put(dev_priv);
+ return ret;
+}
+
+void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+ struct i915_vma *vma;
+
+ /*
+ * Only the global gtt is relevant for gtt memory mappings, so restrict
+ * list traversal to objects bound into the global address space. Note
+ * that the active list should be empty, but better safe than sorry.
+ */
+ WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
+ list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
+ i915_gem_release_mmap(vma->obj);
+ list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
+ i915_gem_release_mmap(vma->obj);
}
/**
@@ -2303,7 +2330,7 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
if (ring->hangcheck.action != HANGCHECK_WAIT &&
i915_request_guilty(request, acthd, &inside)) {
- DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
+ DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
ring->name,
inside ? "inside" : "flushing",
offset,
@@ -2343,28 +2370,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
kfree(request);
}
-static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
{
- u32 completed_seqno;
- u32 acthd;
-
- acthd = intel_ring_get_active_head(ring);
- completed_seqno = ring->get_seqno(ring, false);
-
- while (!list_empty(&ring->request_list)) {
- struct drm_i915_gem_request *request;
-
- request = list_first_entry(&ring->request_list,
- struct drm_i915_gem_request,
- list);
+ u32 completed_seqno = ring->get_seqno(ring, false);
+ u32 acthd = intel_ring_get_active_head(ring);
+ struct drm_i915_gem_request *request;
- if (request->seqno > completed_seqno)
- i915_set_reset_status(ring, request, acthd);
+ list_for_each_entry(request, &ring->request_list, list) {
+ if (i915_seqno_passed(completed_seqno, request->seqno))
+ continue;
- i915_gem_free_request(request);
+ i915_set_reset_status(ring, request, acthd);
}
+}
+static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
@@ -2374,6 +2397,23 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
i915_gem_object_move_to_inactive(obj);
}
+
+ /*
+ * We must free the requests after all the corresponding objects have
+ * been moved off active lists. Which is the same order as the normal
+ * retire_requests function does. This is important if object hold
+ * implicit references on things like e.g. ppgtt address spaces through
+ * the request.
+ */
+ while (!list_empty(&ring->request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&ring->request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ i915_gem_free_request(request);
+ }
}
void i915_gem_restore_fences(struct drm_device *dev)
@@ -2403,8 +2443,16 @@ void i915_gem_reset(struct drm_device *dev)
struct intel_ring_buffer *ring;
int i;
+ /*
+ * Before we free the objects from the requests, we need to inspect
+ * them for finding the guilty party. As the requests only borrow
+ * their reference to the objects, the inspection must be done first.
+ */
for_each_ring(ring, dev_priv, i)
- i915_gem_reset_ring_lists(dev_priv, ring);
+ i915_gem_reset_ring_status(dev_priv, ring);
+
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_reset_ring_cleanup(dev_priv, ring);
i915_gem_cleanup_ringbuffer(dev);
@@ -2746,7 +2794,6 @@ int i915_vma_unbind(struct i915_vma *vma)
obj->has_aliasing_ppgtt_mapping = 0;
}
i915_gem_gtt_finish_object(obj);
- i915_gem_object_unpin_pages(obj);
list_del(&vma->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
@@ -2754,7 +2801,6 @@ int i915_vma_unbind(struct i915_vma *vma)
obj->map_and_fenceable = true;
drm_mm_remove_node(&vma->node);
-
i915_gem_vma_destroy(vma);
/* Since the unbound list is global, only move to that list if
@@ -2762,6 +2808,12 @@ int i915_vma_unbind(struct i915_vma *vma)
if (list_empty(&obj->vma_list))
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+ /* And finally now the object is completely decoupled from this vma,
+ * we can drop its hold on the backing storage and allow it to be
+ * reaped by the shrinker.
+ */
+ i915_gem_object_unpin_pages(obj);
+
return 0;
}
@@ -3054,7 +3106,7 @@ i915_find_fence_reg(struct drm_device *dev)
}
if (avail == NULL)
- return NULL;
+ goto deadlock;
/* None available, try to steal one or wait for a user to finish */
list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
@@ -3064,7 +3116,12 @@ i915_find_fence_reg(struct drm_device *dev)
return reg;
}
- return NULL;
+deadlock:
+ /* Wait for completion of pending flips which consume fences */
+ if (intel_has_pending_fb_unpin(dev))
+ return ERR_PTR(-EAGAIN);
+
+ return ERR_PTR(-EDEADLK);
}
/**
@@ -3109,8 +3166,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
}
} else if (enable) {
reg = i915_find_fence_reg(dev);
- if (reg == NULL)
- return -EDEADLK;
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
if (reg->obj) {
struct drm_i915_gem_object *old = reg->obj;
@@ -4165,6 +4222,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_vma *vma, *next;
+ intel_runtime_pm_get(dev_priv);
+
trace_i915_gem_object_destroy(obj);
if (obj->phys_obj)
@@ -4209,6 +4268,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
kfree(obj->bit_17);
i915_gem_object_free(obj);
+
+ intel_runtime_pm_put(dev_priv);
}
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
@@ -4465,7 +4526,13 @@ i915_gem_init_hw(struct drm_device *dev)
* XXX: There was some w/a described somewhere suggesting loading
* contexts before PPGTT.
*/
- i915_gem_context_init(dev);
+ ret = i915_gem_context_init(dev);
+ if (ret) {
+ i915_gem_cleanup_ringbuffer(dev);
+ DRM_ERROR("Context initialization failed %d\n", ret);
+ return ret;
+ }
+
if (dev_priv->mm.aliasing_ppgtt) {
ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 72a3df32292f..e08acaba5402 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -247,36 +247,34 @@ err_destroy:
return ret;
}
-void i915_gem_context_init(struct drm_device *dev)
+int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
- if (!HAS_HW_CONTEXTS(dev)) {
- dev_priv->hw_contexts_disabled = true;
- DRM_DEBUG_DRIVER("Disabling HW Contexts; old hardware\n");
- return;
- }
+ if (!HAS_HW_CONTEXTS(dev))
+ return 0;
/* If called from reset, or thaw... we've been here already */
- if (dev_priv->hw_contexts_disabled ||
- dev_priv->ring[RCS].default_context)
- return;
+ if (dev_priv->ring[RCS].default_context)
+ return 0;
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
if (dev_priv->hw_context_size > (1<<20)) {
- dev_priv->hw_contexts_disabled = true;
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
- return;
+ return -E2BIG;
}
- if (create_default_context(dev_priv)) {
- dev_priv->hw_contexts_disabled = true;
- DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed\n");
- return;
+ ret = create_default_context(dev_priv);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %d\n",
+ ret);
+ return ret;
}
DRM_DEBUG_DRIVER("HW context support initialized\n");
+ return 0;
}
void i915_gem_context_fini(struct drm_device *dev)
@@ -284,7 +282,7 @@ void i915_gem_context_fini(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
- if (dev_priv->hw_contexts_disabled)
+ if (!HAS_HW_CONTEXTS(dev))
return;
/* The only known way to stop the gpu from accessing the hw context is
@@ -327,16 +325,16 @@ i915_gem_context_get_hang_stats(struct drm_device *dev,
struct drm_file *file,
u32 id)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_hw_context *ctx;
if (id == DEFAULT_CONTEXT_ID)
return &file_priv->hang_stats;
- ctx = NULL;
- if (!dev_priv->hw_contexts_disabled)
- ctx = i915_gem_context_get(file->driver_priv, id);
+ if (!HAS_HW_CONTEXTS(dev))
+ return ERR_PTR(-ENOENT);
+
+ ctx = i915_gem_context_get(file->driver_priv, id);
if (ctx == NULL)
return ERR_PTR(-ENOENT);
@@ -347,10 +345,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- mutex_lock(&dev->struct_mutex);
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
- mutex_unlock(&dev->struct_mutex);
}
static struct i915_hw_context *
@@ -423,11 +419,21 @@ static int do_switch(struct i915_hw_context *to)
if (ret)
return ret;
- /* Clear this page out of any CPU caches for coherent swap-in/out. Note
+ /*
+ * Pin can switch back to the default context if we end up calling into
+ * evict_everything - as a last ditch gtt defrag effort that also
+ * switches to the default context. Hence we need to reload from here.
+ */
+ from = ring->last_context;
+
+ /*
+ * Clear this page out of any CPU caches for coherent swap-in/out. Note
* that thanks to write = false in this call and us not setting any gpu
* write domains when putting a context object onto the active list
* (when switching away from it), this won't block.
- * XXX: We need a real interface to do this instead of trickery. */
+ *
+ * XXX: We need a real interface to do this instead of trickery.
+ */
ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
if (ret) {
i915_gem_object_unpin(to->obj);
@@ -494,8 +500,6 @@ static int do_switch(struct i915_hw_context *to)
* @ring: ring for which we'll execute the context switch
* @file_priv: file_priv associated with the context, may be NULL
* @id: context id number
- * @seqno: sequence number by which the new context will be switched to
- * @flags:
*
* The context life cycle is simple. The context refcount is incremented and
* decremented by 1 and create and destroy. If the context is in use by the GPU,
@@ -509,7 +513,7 @@ int i915_switch_context(struct intel_ring_buffer *ring,
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct i915_hw_context *to;
- if (dev_priv->hw_contexts_disabled)
+ if (!HAS_HW_CONTEXTS(ring->dev))
return 0;
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
@@ -534,7 +538,6 @@ int i915_switch_context(struct intel_ring_buffer *ring,
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_context_create *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_hw_context *ctx;
@@ -543,7 +546,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
- if (dev_priv->hw_contexts_disabled)
+ if (!HAS_HW_CONTEXTS(dev))
return -ENODEV;
ret = i915_mutex_lock_interruptible(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index b7376533633d..2ca280f9ee53 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -27,8 +27,10 @@
*/
#include <drm/drmP.h>
-#include "i915_drv.h"
#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
#include "i915_trace.h"
static bool
@@ -53,6 +55,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
struct list_head eviction_list, unwind_list;
struct i915_vma *vma;
int ret = 0;
+ int pass = 0;
trace_i915_gem_evict(dev, min_size, alignment, mappable);
@@ -88,6 +91,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
} else
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
+search_again:
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(vma, &vm->inactive_list, mm_list) {
if (mark_free(vma, &unwind_list))
@@ -115,10 +119,27 @@ none:
list_del_init(&vma->exec_list);
}
- /* We expect the caller to unpin, evict all and try again, or give up.
- * So calling i915_gem_evict_vm() is unnecessary.
+ /* Can we unpin some objects such as idle hw contents,
+ * or pending flips?
+ */
+ if (nonblocking)
+ return -ENOSPC;
+
+ /* Only idle the GPU and repeat the search once */
+ if (pass++ == 0) {
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(dev);
+ goto search_again;
+ }
+
+ /* If we still have pending pageflip completions, drop
+ * back to userspace to give our workqueues time to
+ * acquire our locks and unpin the old scanouts.
*/
- return -ENOSPC;
+ return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
found:
/* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b7e787fb4649..d269ecf46e26 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -46,7 +46,7 @@ struct eb_vmas {
};
static struct eb_vmas *
-eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
+eb_create(struct drm_i915_gem_execbuffer2 *args)
{
struct eb_vmas *eb = NULL;
@@ -93,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
{
struct drm_i915_gem_object *obj;
struct list_head objects;
- int i, ret = 0;
+ int i, ret;
INIT_LIST_HEAD(&objects);
spin_lock(&file->table_lock);
@@ -106,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
- goto out;
+ goto err;
}
if (!list_empty(&obj->obj_exec_link)) {
@@ -114,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
ret = -EINVAL;
- goto out;
+ goto err;
}
drm_gem_object_reference(&obj->base);
@@ -123,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb,
spin_unlock(&file->table_lock);
i = 0;
- list_for_each_entry(obj, &objects, obj_exec_link) {
+ while (!list_empty(&objects)) {
struct i915_vma *vma;
+ obj = list_first_entry(&objects,
+ struct drm_i915_gem_object,
+ obj_exec_link);
+
/*
* NOTE: We can leak any vmas created here when something fails
* later on. But that's no issue since vma_unbind can deal with
@@ -138,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb,
if (IS_ERR(vma)) {
DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma);
- goto out;
+ goto err;
}
+ /* Transfer ownership from the objects list to the vmas list. */
list_add_tail(&vma->exec_list, &eb->vmas);
+ list_del_init(&obj->obj_exec_link);
vma->exec_entry = &exec[i];
if (eb->and < 0) {
@@ -155,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb,
++i;
}
+ return 0;
-out:
+
+err:
while (!list_empty(&objects)) {
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
obj_exec_link);
list_del_init(&obj->obj_exec_link);
- if (ret)
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference(&obj->base);
}
+ /*
+ * Objects already transfered to the vmas list will be unreferenced by
+ * eb_destroy.
+ */
+
return ret;
}
@@ -240,7 +252,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
uint32_t page_offset = offset_in_page(reloc->offset);
char *vaddr;
- int ret = -EINVAL;
+ int ret;
ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret)
@@ -275,7 +287,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
- int ret = -EINVAL;
+ int ret;
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
@@ -323,7 +335,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct drm_i915_gem_object *target_i915_obj;
struct i915_vma *target_vma;
uint32_t target_offset;
- int ret = -EINVAL;
+ int ret;
/* we've already hold a reference to all valid objects */
target_vma = eb_get_vma(eb, reloc->target_handle);
@@ -332,7 +344,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
target_i915_obj = target_vma->obj;
target_obj = &target_vma->obj->base;
- target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
+ target_offset = target_vma->node.start;
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
@@ -353,7 +365,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
- return ret;
+ return -EINVAL;
}
if (unlikely((reloc->write_domain | reloc->read_domains)
& ~I915_GEM_GPU_DOMAINS)) {
@@ -364,7 +376,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
- return ret;
+ return -EINVAL;
}
target_obj->pending_read_domains |= reloc->read_domains;
@@ -384,14 +396,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
obj, reloc->target_handle,
(int) reloc->offset,
(int) obj->base.size);
- return ret;
+ return -EINVAL;
}
if (unlikely(reloc->offset & 3)) {
DRM_DEBUG("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
obj, reloc->target_handle,
(int) reloc->offset);
- return ret;
+ return -EINVAL;
}
/* We can't wait for rendering with pagefaults disabled */
@@ -479,8 +491,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
}
static int
-i915_gem_execbuffer_relocate(struct eb_vmas *eb,
- struct i915_address_space *vm)
+i915_gem_execbuffer_relocate(struct eb_vmas *eb)
{
struct i915_vma *vma;
int ret = 0;
@@ -889,6 +900,24 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
return 0;
}
+static int
+i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
+ const u32 ctx_id)
+{
+ struct i915_ctx_hang_stats *hs;
+
+ hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
+ if (IS_ERR(hs))
+ return PTR_ERR(hs);
+
+ if (hs->banned) {
+ DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
+ return -EIO;
+ }
+
+ return 0;
+}
+
static void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_ring_buffer *ring)
@@ -968,8 +997,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
- struct i915_ctx_hang_stats *hs;
- u32 ctx_id = i915_execbuffer2_get_context_id(*args);
+ const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
u32 mask, flags;
int ret, mode, i;
@@ -1096,6 +1124,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
}
+ intel_runtime_pm_get(dev_priv);
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto pre_mutex_err;
@@ -1106,7 +1136,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err;
}
- eb = eb_create(args, vm);
+ ret = i915_gem_validate_context(dev, file, ctx_id);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ goto pre_mutex_err;
+ }
+
+ eb = eb_create(args);
if (eb == NULL) {
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
@@ -1129,7 +1165,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* The objects are in their final locations, apply the relocations. */
if (need_relocs)
- ret = i915_gem_execbuffer_relocate(eb, vm);
+ ret = i915_gem_execbuffer_relocate(eb);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
@@ -1158,17 +1194,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto err;
- hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
- if (IS_ERR(hs)) {
- ret = PTR_ERR(hs);
- goto err;
- }
-
- if (hs->banned) {
- ret = -EIO;
- goto err;
- }
-
ret = i915_switch_context(ring, file, ctx_id);
if (ret)
goto err;
@@ -1230,6 +1255,10 @@ err:
pre_mutex_err:
kfree(cliprects);
+
+ /* intel_gpu_busy should also get a ref, so it will free when the device
+ * is really idle. */
+ intel_runtime_pm_put(dev_priv);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 38cb8d44a013..40a2b36b276b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -240,10 +240,16 @@ static int gen8_ppgtt_enable(struct drm_device *dev)
for_each_ring(ring, dev_priv, j) {
ret = gen8_write_pdp(ring, i, addr);
if (ret)
- return ret;
+ goto err_out;
}
}
return 0;
+
+err_out:
+ for_each_ring(ring, dev_priv, j)
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
+ return ret;
}
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
@@ -293,23 +299,23 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
struct sg_page_iter sg_iter;
- pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
+ pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
- dma_addr_t page_addr;
+ if (pt_vaddr == NULL)
+ pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
- page_addr = sg_dma_address(sg_iter.sg) +
- (sg_iter.sg_pgoffset << PAGE_SHIFT);
- pt_vaddr[act_pte] = gen8_pte_encode(page_addr, cache_level,
- true);
+ pt_vaddr[act_pte] =
+ gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
+ cache_level, true);
if (++act_pte == GEN8_PTES_PER_PAGE) {
kunmap_atomic(pt_vaddr);
+ pt_vaddr = NULL;
act_pt++;
- pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
act_pte = 0;
-
}
}
- kunmap_atomic(pt_vaddr);
+ if (pt_vaddr)
+ kunmap_atomic(pt_vaddr);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -318,6 +324,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
container_of(vm, struct i915_hw_ppgtt, base);
int i, j;
+ drm_mm_takedown(&vm->mm);
+
for (i = 0; i < ppgtt->num_pd_pages ; i++) {
if (ppgtt->pd_dma_addr[i]) {
pci_unmap_page(ppgtt->base.dev->pdev,
@@ -337,8 +345,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
kfree(ppgtt->gen8_pt_dma_addr[i]);
}
- __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT);
- __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT);
+ __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
+ __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
}
/**
@@ -381,6 +389,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+ ppgtt->base.start = 0;
+ ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE;
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
@@ -573,21 +583,23 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
struct sg_page_iter sg_iter;
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+ pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
- dma_addr_t page_addr;
+ if (pt_vaddr == NULL)
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
- page_addr = sg_page_iter_dma_address(&sg_iter);
- pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
+ pt_vaddr[act_pte] =
+ vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
+ cache_level, true);
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
kunmap_atomic(pt_vaddr);
+ pt_vaddr = NULL;
act_pt++;
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
act_pte = 0;
-
}
}
- kunmap_atomic(pt_vaddr);
+ if (pt_vaddr)
+ kunmap_atomic(pt_vaddr);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -632,6 +644,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
+ ppgtt->base.start = 0;
+ ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
GFP_KERNEL);
if (!ppgtt->pt_pages)
@@ -906,14 +920,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
WARN_ON(readq(&gtt_entries[i-1])
!= gen8_pte_encode(addr, level, true));
-#if 0 /* TODO: Still needed on GEN8? */
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
* have finished.
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-#endif
}
/*
@@ -1126,7 +1138,6 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
if (ret)
DRM_DEBUG_KMS("Reservation failed\n");
obj->has_global_gtt_mapping = 1;
- list_add(&vma->vma_link, &obj->vma_list);
}
dev_priv->gtt.base.start = start;
@@ -1241,6 +1252,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
if (bdw_gmch_ctl)
bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+ if (bdw_gmch_ctl > 4) {
+ WARN_ON(!i915_preliminary_hw_support);
+ return 4<<20;
+ }
+
return bdw_gmch_ctl << 20;
}
@@ -1262,14 +1278,14 @@ static int ggtt_probe_common(struct drm_device *dev,
size_t gtt_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- phys_addr_t gtt_bus_addr;
+ phys_addr_t gtt_phys_addr;
int ret;
/* For Modern GENs the PTEs and register space are split in the BAR */
- gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
+ gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
(pci_resource_len(dev->pdev, 0) / 2);
- dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
+ dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
if (!dev_priv->gtt.gsm) {
DRM_ERROR("Failed to map the gtt page table\n");
return -ENOMEM;
@@ -1397,6 +1413,8 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
{
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
+
+ drm_mm_takedown(&vm->mm);
iounmap(gtt->gsm);
teardown_scratch_page(vm->dev);
}
@@ -1422,6 +1440,9 @@ static int i915_gmch_probe(struct drm_device *dev,
dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
+ if (unlikely(dev_priv->gtt.do_idle_maps))
+ DRM_INFO("applying Ironlake quirks for intel_iommu\n");
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index d284d892ed94..1a24e84f2315 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -250,7 +250,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
}
sg = st->sgl;
- sg->offset = offset;
+ sg->offset = 0;
sg->length = size;
sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
@@ -420,6 +420,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
+ i915_gem_object_pin_pages(obj);
return obj;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 79dcb8f896c6..990cf8f43efd 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -146,7 +146,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
va_list tmp;
va_copy(tmp, args);
- if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
+ len = vsnprintf(NULL, 0, f, tmp);
+ va_end(tmp);
+
+ if (!__i915_error_seek(e, len))
return;
}
@@ -239,6 +242,9 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
unsigned ring)
{
BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
+ if (!error->ring[ring].valid)
+ return;
+
err_printf(m, "%s command stream:\n", ring_str(ring));
err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
@@ -247,12 +253,11 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
- if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
- err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_INFO(dev)->gen >= 4) {
+ err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr[ring]);
err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]);
- if (INTEL_INFO(dev)->gen >= 4)
err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
+ }
err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
if (INTEL_INFO(dev)->gen >= 6) {
@@ -294,7 +299,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
struct drm_device *dev = error_priv->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_state *error = error_priv->error;
- struct intel_ring_buffer *ring;
int i, j, page, offset, elt;
if (!error) {
@@ -329,7 +333,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (INTEL_INFO(dev)->gen == 7)
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
- for_each_ring(ring, dev_priv, i)
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++)
i915_ring_error_state(m, dev, error, i);
if (error->active_bo)
@@ -386,8 +390,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
- obj = error->ring[i].ctx;
- if (obj) {
+ if ((obj = error->ring[i].ctx)) {
err_printf(m, "%s --- HW Context = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
@@ -668,7 +671,8 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
return NULL;
obj = ring->scratch.obj;
- if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
+ if (obj != NULL &&
+ acthd >= i915_gem_obj_ggtt_offset(obj) &&
acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
return i915_error_object_create(dev_priv, obj);
}
@@ -725,8 +729,9 @@ static void i915_record_ring_state(struct drm_device *dev,
error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
- if (ring->id == RCS)
- error->bbaddr = I915_READ64(BB_ADDR);
+ error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base));
+ if (INTEL_INFO(dev)->gen >= 8)
+ error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
} else {
error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
@@ -775,11 +780,17 @@ static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
struct drm_i915_gem_request *request;
int i, count;
- for_each_ring(ring, dev_priv, i) {
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct intel_ring_buffer *ring = &dev_priv->ring[i];
+
+ if (ring->dev == NULL)
+ continue;
+
+ error->ring[i].valid = true;
+
i915_record_ring_state(dev, error, ring);
error->ring[i].batchbuffer =
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5d1dedc02f15..9fec71175571 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -62,7 +62,7 @@ static const u32 hpd_mask_i915[] = {
[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
};
-static const u32 hpd_status_gen4[] = {
+static const u32 hpd_status_g4x[] = {
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
@@ -567,8 +567,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
} else {
- enum transcoder cpu_transcoder =
- intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+ enum transcoder cpu_transcoder = (enum transcoder) pipe;
u32 htotal;
htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
@@ -600,7 +599,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
* Cook up a vblank counter by also checking the pixel
* counter against vblank start.
*/
- return ((high1 << 8) | low) + (pixel >= vbl_start);
+ return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
}
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -621,36 +620,15 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
-static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
+static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t status;
- int reg;
- if (IS_VALLEYVIEW(dev)) {
- status = pipe == PIPE_A ?
- I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-
- reg = VLV_ISR;
- } else if (IS_GEN2(dev)) {
- status = pipe == PIPE_A ?
- I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-
- reg = ISR;
- } else if (INTEL_INFO(dev)->gen < 5) {
- status = pipe == PIPE_A ?
- I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-
- reg = ISR;
- } else if (INTEL_INFO(dev)->gen < 7) {
+ if (INTEL_INFO(dev)->gen < 7) {
status = pipe == PIPE_A ?
DE_PIPEA_VBLANK :
DE_PIPEB_VBLANK;
-
- reg = DEISR;
} else {
switch (pipe) {
default:
@@ -664,18 +642,14 @@ static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
status = DE_PIPEC_VBLANK_IVB;
break;
}
-
- reg = DEISR;
}
- if (IS_GEN2(dev))
- return __raw_i915_read16(dev_priv, reg) & status;
- else
- return __raw_i915_read32(dev_priv, reg) & status;
+ return __raw_i915_read32(dev_priv, DEISR) & status;
}
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
- int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
+ unsigned int flags, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
@@ -698,6 +672,12 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
vbl_start = mode->crtc_vblank_start;
vbl_end = mode->crtc_vblank_end;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vbl_start = DIV_ROUND_UP(vbl_start, 2);
+ vbl_end /= 2;
+ vtotal /= 2;
+ }
+
ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
/*
@@ -722,17 +702,42 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
else
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
- /*
- * The scanline counter increments at the leading edge
- * of hsync, ie. it completely misses the active portion
- * of the line. Fix up the counter at both edges of vblank
- * to get a more accurate picture whether we're in vblank
- * or not.
- */
- in_vbl = intel_pipe_in_vblank_locked(dev, pipe);
- if ((in_vbl && position == vbl_start - 1) ||
- (!in_vbl && position == vbl_end - 1))
- position = (position + 1) % vtotal;
+ if (HAS_PCH_SPLIT(dev)) {
+ /*
+ * The scanline counter increments at the leading edge
+ * of hsync, ie. it completely misses the active portion
+ * of the line. Fix up the counter at both edges of vblank
+ * to get a more accurate picture whether we're in vblank
+ * or not.
+ */
+ in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
+ if ((in_vbl && position == vbl_start - 1) ||
+ (!in_vbl && position == vbl_end - 1))
+ position = (position + 1) % vtotal;
+ } else {
+ /*
+ * ISR vblank status bits don't work the way we'd want
+ * them to work on non-PCH platforms (for
+ * ilk_pipe_in_vblank_locked()), and there doesn't
+ * appear any other way to determine if we're currently
+ * in vblank.
+ *
+ * Instead let's assume that we're already in vblank if
+ * we got called from the vblank interrupt and the
+ * scanline counter value indicates that we're on the
+ * line just prior to vblank start. This should result
+ * in the correct answer, unless the vblank interrupt
+ * delivery really got delayed for almost exactly one
+ * full frame/field.
+ */
+ if (flags & DRM_CALLED_FROM_VBLIRQ &&
+ position == vbl_start - 1) {
+ position = (position + 1) % vtotal;
+
+ /* Signal this correction as "applied". */
+ ret |= 0x8;
+ }
+ }
} else {
/* Have access to pixelcount since start of frame.
* We can split this into vertical and horizontal
@@ -809,7 +814,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
vblank_time, flags,
- crtc);
+ crtc,
+ &to_intel_crtc(crtc)->config.adjusted_mode);
}
static bool intel_hpd_irq_event(struct drm_device *dev,
@@ -1015,10 +1021,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
/* sysfs frequency interfaces may have snuck in while servicing the
* interrupt
*/
- if (new_delay < (int)dev_priv->rps.min_delay)
- new_delay = dev_priv->rps.min_delay;
- if (new_delay > (int)dev_priv->rps.max_delay)
- new_delay = dev_priv->rps.max_delay;
+ new_delay = clamp_t(int, new_delay,
+ dev_priv->rps.min_delay, dev_priv->rps.max_delay);
dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
if (IS_VALLEYVIEW(dev_priv->dev))
@@ -1235,9 +1239,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) {
- WARN(((hpd[i] & hotplug_trigger) &&
- dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
- "Received HPD interrupt although disabled\n");
+ WARN_ONCE(hpd[i] & hotplug_trigger &&
+ dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
+ "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
+ hotplug_trigger, i, hpd[i]);
if (!(hpd[i] & hotplug_trigger) ||
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
@@ -1474,6 +1479,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
+ if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+ dp_aux_irq_handler(dev);
+
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
}
@@ -1993,7 +2001,7 @@ static void i915_error_work_func(struct work_struct *work)
kobject_uevent_env(&dev->primary->kdev->kobj,
KOBJ_CHANGE, reset_done_event);
} else {
- atomic_set(&error->reset_counter, I915_WEDGED);
+ atomic_set_mask(I915_WEDGED, &error->reset_counter);
}
/*
@@ -2713,6 +2721,8 @@ static void gen8_irq_preinstall(struct drm_device *dev)
#undef GEN8_IRQ_INIT_NDX
POSTING_READ(GEN8_PCU_IIR);
+
+ ibx_irq_preinstall(dev);
}
static void ibx_hpd_irq_setup(struct drm_device *dev)
@@ -3138,10 +3148,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
* Returns true when a page flip has completed.
*/
static bool i8xx_handle_vblank(struct drm_device *dev,
- int pipe, u16 iir)
+ int plane, int pipe, u32 iir)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
+ u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
if (!drm_handle_vblank(dev, pipe))
return false;
@@ -3149,7 +3159,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
if ((iir & flip_pending) == 0)
return false;
- intel_prepare_page_flip(dev, pipe);
+ intel_prepare_page_flip(dev, plane);
/* We detect FlipDone by looking for the change in PendingFlip from '1'
* to '0' on the following vblank, i.e. IIR has the Pendingflip
@@ -3218,9 +3228,13 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
notify_ring(dev, &dev_priv->ring[RCS]);
for_each_pipe(pipe) {
+ int plane = pipe;
+ if (HAS_FBC(dev))
+ plane = !plane;
+
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
- i8xx_handle_vblank(dev, pipe, iir))
- flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
+ i8xx_handle_vblank(dev, plane, pipe, iir))
+ flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
@@ -3416,7 +3430,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
for_each_pipe(pipe) {
int plane = pipe;
- if (IS_MOBILE(dev))
+ if (HAS_FBC(dev))
plane = !plane;
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
@@ -3653,7 +3667,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
hotplug_status);
intel_hpd_irq_handler(dev, hotplug_trigger,
- IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
+ IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
+
+ if (IS_G4X(dev) &&
+ (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
+ dp_aux_irq_handler(dev);
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
@@ -3891,8 +3909,8 @@ void hsw_pc8_disable_interrupts(struct drm_device *dev)
dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
- ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
- ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
+ ironlake_disable_display_irq(dev_priv, 0xffffffff);
+ ibx_disable_display_interrupt(dev_priv, 0xffffffff);
ilk_disable_gt_irq(dev_priv, 0xffffffff);
snb_disable_pm_irq(dev_priv, 0xffffffff);
@@ -3906,34 +3924,26 @@ void hsw_pc8_restore_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- uint32_t val, expected;
+ uint32_t val;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
val = I915_READ(DEIMR);
- expected = ~DE_PCH_EVENT_IVB;
- WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
+ WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val);
- val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
- expected = ~SDE_HOTPLUG_MASK_CPT;
- WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
- val, expected);
+ val = I915_READ(SDEIMR);
+ WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val);
val = I915_READ(GTIMR);
- expected = 0xffffffff;
- WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
+ WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val);
val = I915_READ(GEN6_PMIMR);
- expected = 0xffffffff;
- WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
- expected);
+ WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
dev_priv->pc8.irqs_disabled = false;
ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
- ibx_enable_display_interrupt(dev_priv,
- ~dev_priv->pc8.regsave.sdeimr &
- ~SDE_HOTPLUG_MASK_CPT);
+ ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr);
ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ee2742122a02..a48b7cad6f11 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -193,10 +193,13 @@
#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
+#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
+#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
+#define MI_ARB_ENABLE (1<<0)
+#define MI_ARB_DISABLE (0<<0)
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
#define MI_SUSPEND_FLUSH_EN (1<<0)
-#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
#define MI_OVERLAY_CONTINUE (0x0<<21)
#define MI_OVERLAY_ON (0x1<<21)
@@ -212,10 +215,24 @@
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
-#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
-#define MI_ARB_ENABLE (1<<0)
-#define MI_ARB_DISABLE (0<<0)
-
+#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
+#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
+#define MI_SEMAPHORE_UPDATE (1<<21)
+#define MI_SEMAPHORE_COMPARE (1<<20)
+#define MI_SEMAPHORE_REGISTER (1<<18)
+#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */
+#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */
+#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */
+#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */
+#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */
+#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */
+#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */
+#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */
+#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */
+#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
+#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
+#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
+#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
#define MI_MM_SPACE_GTT (1<<8)
#define MI_MM_SPACE_PHYSICAL (0<<8)
@@ -235,7 +252,7 @@
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
-#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
+#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
@@ -246,30 +263,13 @@
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
-#define MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_PPGTT_HSW (1<<8)
-#define MI_BATCH_NON_SECURE_HSW (1<<13)
+#define MI_BATCH_NON_SECURE_HSW (1<<13)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
-#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
-#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
-#define MI_SEMAPHORE_UPDATE (1<<21)
-#define MI_SEMAPHORE_COMPARE (1<<20)
-#define MI_SEMAPHORE_REGISTER (1<<18)
-#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */
-#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */
-#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */
-#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */
-#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */
-#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */
-#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */
-#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */
-#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */
-#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
-#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
-#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
-#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
+
#define MI_PREDICATE_RESULT_2 (0x2214)
#define LOWER_SLICE_ENABLED (1<<0)
@@ -354,6 +354,7 @@
#define IOSF_BYTE_ENABLES_SHIFT 4
#define IOSF_BAR_SHIFT 1
#define IOSF_SB_BUSY (1<<0)
+#define IOSF_PORT_BUNIT 0x3
#define IOSF_PORT_PUNIT 0x4
#define IOSF_PORT_NC 0x11
#define IOSF_PORT_DPIO 0x12
@@ -361,12 +362,21 @@
#define IOSF_PORT_CCK 0x14
#define IOSF_PORT_CCU 0xA9
#define IOSF_PORT_GPS_CORE 0x48
+#define IOSF_PORT_FLISDSI 0x1B
#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104)
#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108)
+/* See configdb bunit SB addr map */
+#define BUNIT_REG_BISOC 0x11
+
#define PUNIT_OPCODE_REG_READ 6
#define PUNIT_OPCODE_REG_WRITE 7
+#define PUNIT_REG_DSPFREQ 0x36
+#define DSPFREQSTAT_SHIFT 30
+#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
+#define DSPFREQGUAR_SHIFT 14
+#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
#define PUNIT_REG_PWRGT_CTRL 0x60
#define PUNIT_REG_PWRGT_STATUS 0x61
#define PUNIT_CLK_GATE 1
@@ -429,6 +439,7 @@
#define DSI_PLL_N1_DIV_MASK (3 << 16)
#define DSI_PLL_M1_DIV_SHIFT 0
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
+#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
/*
* DPIO - a special bus for various display related registers to hide behind
@@ -447,15 +458,13 @@
#define DPIO_SFR_BYPASS (1<<1)
#define DPIO_CMNRST (1<<0)
-#define _DPIO_TX3_SWING_CTL4_A 0x690
-#define _DPIO_TX3_SWING_CTL4_B 0x2a90
-#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX3_SWING_CTL4_A, \
- _DPIO_TX3_SWING_CTL4_B)
+#define DPIO_PHY(pipe) ((pipe) >> 1)
+#define DPIO_PHY_IOSF_PORT(phy) (dev_priv->dpio_phy_iosf_port[phy])
/*
* Per pipe/PLL DPIO regs
*/
-#define _DPIO_DIV_A 0x800c
+#define _VLV_PLL_DW3_CH0 0x800c
#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
#define DPIO_POST_DIV_DAC 0
#define DPIO_POST_DIV_HDMIDP 1 /* DAC 225-400M rate */
@@ -468,10 +477,10 @@
#define DPIO_ENABLE_CALIBRATION (1<<11)
#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
#define DPIO_M2DIV_MASK 0xff
-#define _DPIO_DIV_B 0x802c
-#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
+#define _VLV_PLL_DW3_CH1 0x802c
+#define VLV_PLL_DW3(ch) _PIPE(ch, _VLV_PLL_DW3_CH0, _VLV_PLL_DW3_CH1)
-#define _DPIO_REFSFR_A 0x8014
+#define _VLV_PLL_DW5_CH0 0x8014
#define DPIO_REFSEL_OVERRIDE 27
#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
@@ -479,118 +488,112 @@
#define DPIO_PLL_REFCLK_SEL_MASK 3
#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
-#define _DPIO_REFSFR_B 0x8034
-#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
+#define _VLV_PLL_DW5_CH1 0x8034
+#define VLV_PLL_DW5(ch) _PIPE(ch, _VLV_PLL_DW5_CH0, _VLV_PLL_DW5_CH1)
-#define _DPIO_CORE_CLK_A 0x801c
-#define _DPIO_CORE_CLK_B 0x803c
-#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
+#define _VLV_PLL_DW7_CH0 0x801c
+#define _VLV_PLL_DW7_CH1 0x803c
+#define VLV_PLL_DW7(ch) _PIPE(ch, _VLV_PLL_DW7_CH0, _VLV_PLL_DW7_CH1)
-#define _DPIO_IREF_CTL_A 0x8040
-#define _DPIO_IREF_CTL_B 0x8060
-#define DPIO_IREF_CTL(pipe) _PIPE(pipe, _DPIO_IREF_CTL_A, _DPIO_IREF_CTL_B)
+#define _VLV_PLL_DW8_CH0 0x8040
+#define _VLV_PLL_DW8_CH1 0x8060
+#define VLV_PLL_DW8(ch) _PIPE(ch, _VLV_PLL_DW8_CH0, _VLV_PLL_DW8_CH1)
-#define DPIO_IREF_BCAST 0xc044
-#define _DPIO_IREF_A 0x8044
-#define _DPIO_IREF_B 0x8064
-#define DPIO_IREF(pipe) _PIPE(pipe, _DPIO_IREF_A, _DPIO_IREF_B)
+#define VLV_PLL_DW9_BCAST 0xc044
+#define _VLV_PLL_DW9_CH0 0x8044
+#define _VLV_PLL_DW9_CH1 0x8064
+#define VLV_PLL_DW9(ch) _PIPE(ch, _VLV_PLL_DW9_CH0, _VLV_PLL_DW9_CH1)
-#define _DPIO_PLL_CML_A 0x804c
-#define _DPIO_PLL_CML_B 0x806c
-#define DPIO_PLL_CML(pipe) _PIPE(pipe, _DPIO_PLL_CML_A, _DPIO_PLL_CML_B)
+#define _VLV_PLL_DW10_CH0 0x8048
+#define _VLV_PLL_DW10_CH1 0x8068
+#define VLV_PLL_DW10(ch) _PIPE(ch, _VLV_PLL_DW10_CH0, _VLV_PLL_DW10_CH1)
-#define _DPIO_LPF_COEFF_A 0x8048
-#define _DPIO_LPF_COEFF_B 0x8068
-#define DPIO_LPF_COEFF(pipe) _PIPE(pipe, _DPIO_LPF_COEFF_A, _DPIO_LPF_COEFF_B)
+#define _VLV_PLL_DW11_CH0 0x804c
+#define _VLV_PLL_DW11_CH1 0x806c
+#define VLV_PLL_DW11(ch) _PIPE(ch, _VLV_PLL_DW11_CH0, _VLV_PLL_DW11_CH1)
-#define DPIO_CALIBRATION 0x80ac
+/* Spec for ref block start counts at DW10 */
+#define VLV_REF_DW13 0x80ac
-#define DPIO_FASTCLK_DISABLE 0x8100
+#define VLV_CMN_DW0 0x8100
/*
* Per DDI channel DPIO regs
*/
-#define _DPIO_PCS_TX_0 0x8200
-#define _DPIO_PCS_TX_1 0x8400
+#define _VLV_PCS_DW0_CH0 0x8200
+#define _VLV_PCS_DW0_CH1 0x8400
#define DPIO_PCS_TX_LANE2_RESET (1<<16)
#define DPIO_PCS_TX_LANE1_RESET (1<<7)
-#define DPIO_PCS_TX(port) _PORT(port, _DPIO_PCS_TX_0, _DPIO_PCS_TX_1)
+#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
-#define _DPIO_PCS_CLK_0 0x8204
-#define _DPIO_PCS_CLK_1 0x8404
+#define _VLV_PCS_DW1_CH0 0x8204
+#define _VLV_PCS_DW1_CH1 0x8404
#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22)
#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
#define DPIO_PCS_CLK_SOFT_RESET (1<<5)
-#define DPIO_PCS_CLK(port) _PORT(port, _DPIO_PCS_CLK_0, _DPIO_PCS_CLK_1)
-
-#define _DPIO_PCS_CTL_OVR1_A 0x8224
-#define _DPIO_PCS_CTL_OVR1_B 0x8424
-#define DPIO_PCS_CTL_OVER1(port) _PORT(port, _DPIO_PCS_CTL_OVR1_A, \
- _DPIO_PCS_CTL_OVR1_B)
-
-#define _DPIO_PCS_STAGGER0_A 0x822c
-#define _DPIO_PCS_STAGGER0_B 0x842c
-#define DPIO_PCS_STAGGER0(port) _PORT(port, _DPIO_PCS_STAGGER0_A, \
- _DPIO_PCS_STAGGER0_B)
-
-#define _DPIO_PCS_STAGGER1_A 0x8230
-#define _DPIO_PCS_STAGGER1_B 0x8430
-#define DPIO_PCS_STAGGER1(port) _PORT(port, _DPIO_PCS_STAGGER1_A, \
- _DPIO_PCS_STAGGER1_B)
-
-#define _DPIO_PCS_CLOCKBUF0_A 0x8238
-#define _DPIO_PCS_CLOCKBUF0_B 0x8438
-#define DPIO_PCS_CLOCKBUF0(port) _PORT(port, _DPIO_PCS_CLOCKBUF0_A, \
- _DPIO_PCS_CLOCKBUF0_B)
-
-#define _DPIO_PCS_CLOCKBUF8_A 0x825c
-#define _DPIO_PCS_CLOCKBUF8_B 0x845c
-#define DPIO_PCS_CLOCKBUF8(port) _PORT(port, _DPIO_PCS_CLOCKBUF8_A, \
- _DPIO_PCS_CLOCKBUF8_B)
-
-#define _DPIO_TX_SWING_CTL2_A 0x8288
-#define _DPIO_TX_SWING_CTL2_B 0x8488
-#define DPIO_TX_SWING_CTL2(port) _PORT(port, _DPIO_TX_SWING_CTL2_A, \
- _DPIO_TX_SWING_CTL2_B)
-
-#define _DPIO_TX_SWING_CTL3_A 0x828c
-#define _DPIO_TX_SWING_CTL3_B 0x848c
-#define DPIO_TX_SWING_CTL3(port) _PORT(port, _DPIO_TX_SWING_CTL3_A, \
- _DPIO_TX_SWING_CTL3_B)
-
-#define _DPIO_TX_SWING_CTL4_A 0x8290
-#define _DPIO_TX_SWING_CTL4_B 0x8490
-#define DPIO_TX_SWING_CTL4(port) _PORT(port, _DPIO_TX_SWING_CTL4_A, \
- _DPIO_TX_SWING_CTL4_B)
-
-#define _DPIO_TX_OCALINIT_0 0x8294
-#define _DPIO_TX_OCALINIT_1 0x8494
+#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
+
+#define _VLV_PCS_DW8_CH0 0x8220
+#define _VLV_PCS_DW8_CH1 0x8420
+#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
+
+#define _VLV_PCS01_DW8_CH0 0x0220
+#define _VLV_PCS23_DW8_CH0 0x0420
+#define _VLV_PCS01_DW8_CH1 0x2620
+#define _VLV_PCS23_DW8_CH1 0x2820
+#define VLV_PCS01_DW8(port) _PORT(port, _VLV_PCS01_DW8_CH0, _VLV_PCS01_DW8_CH1)
+#define VLV_PCS23_DW8(port) _PORT(port, _VLV_PCS23_DW8_CH0, _VLV_PCS23_DW8_CH1)
+
+#define _VLV_PCS_DW9_CH0 0x8224
+#define _VLV_PCS_DW9_CH1 0x8424
+#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
+
+#define _VLV_PCS_DW11_CH0 0x822c
+#define _VLV_PCS_DW11_CH1 0x842c
+#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
+
+#define _VLV_PCS_DW12_CH0 0x8230
+#define _VLV_PCS_DW12_CH1 0x8430
+#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
+
+#define _VLV_PCS_DW14_CH0 0x8238
+#define _VLV_PCS_DW14_CH1 0x8438
+#define VLV_PCS_DW14(ch) _PORT(ch, _VLV_PCS_DW14_CH0, _VLV_PCS_DW14_CH1)
+
+#define _VLV_PCS_DW23_CH0 0x825c
+#define _VLV_PCS_DW23_CH1 0x845c
+#define VLV_PCS_DW23(ch) _PORT(ch, _VLV_PCS_DW23_CH0, _VLV_PCS_DW23_CH1)
+
+#define _VLV_TX_DW2_CH0 0x8288
+#define _VLV_TX_DW2_CH1 0x8488
+#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
+
+#define _VLV_TX_DW3_CH0 0x828c
+#define _VLV_TX_DW3_CH1 0x848c
+#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
+
+#define _VLV_TX_DW4_CH0 0x8290
+#define _VLV_TX_DW4_CH1 0x8490
+#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
+
+#define _VLV_TX3_DW4_CH0 0x690
+#define _VLV_TX3_DW4_CH1 0x2a90
+#define VLV_TX3_DW4(ch) _PORT(ch, _VLV_TX3_DW4_CH0, _VLV_TX3_DW4_CH1)
+
+#define _VLV_TX_DW5_CH0 0x8294
+#define _VLV_TX_DW5_CH1 0x8494
#define DPIO_TX_OCALINIT_EN (1<<31)
-#define DPIO_TX_OCALINIT(port) _PORT(port, _DPIO_TX_OCALINIT_0, \
- _DPIO_TX_OCALINIT_1)
-
-#define _DPIO_TX_CTL_0 0x82ac
-#define _DPIO_TX_CTL_1 0x84ac
-#define DPIO_TX_CTL(port) _PORT(port, _DPIO_TX_CTL_0, _DPIO_TX_CTL_1)
-
-#define _DPIO_TX_LANE_0 0x82b8
-#define _DPIO_TX_LANE_1 0x84b8
-#define DPIO_TX_LANE(port) _PORT(port, _DPIO_TX_LANE_0, _DPIO_TX_LANE_1)
-
-#define _DPIO_DATA_CHANNEL1 0x8220
-#define _DPIO_DATA_CHANNEL2 0x8420
-#define DPIO_DATA_CHANNEL(port) _PORT(port, _DPIO_DATA_CHANNEL1, _DPIO_DATA_CHANNEL2)
-
-#define _DPIO_PORT0_PCS0 0x0220
-#define _DPIO_PORT0_PCS1 0x0420
-#define _DPIO_PORT1_PCS2 0x2620
-#define _DPIO_PORT1_PCS3 0x2820
-#define DPIO_DATA_LANE_A(port) _PORT(port, _DPIO_PORT0_PCS0, _DPIO_PORT1_PCS2)
-#define DPIO_DATA_LANE_B(port) _PORT(port, _DPIO_PORT0_PCS1, _DPIO_PORT1_PCS3)
-#define DPIO_DATA_CHANNEL1 0x8220
-#define DPIO_DATA_CHANNEL2 0x8420
+#define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1)
+
+#define _VLV_TX_DW11_CH0 0x82ac
+#define _VLV_TX_DW11_CH1 0x84ac
+#define VLV_TX_DW11(ch) _PORT(ch, _VLV_TX_DW11_CH0, _VLV_TX_DW11_CH1)
+
+#define _VLV_TX_DW14_CH0 0x82b8
+#define _VLV_TX_DW14_CH1 0x84b8
+#define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1)
/*
* Fence registers
@@ -732,6 +735,8 @@
#define HWSTAM 0x02098
#define DMA_FADD_I8XX 0x020d0
#define RING_BBSTATE(base) ((base)+0x110)
+#define RING_BBADDR(base) ((base)+0x140)
+#define RING_BBADDR_UDW(base) ((base)+0x168) /* gen8+ */
#define ERROR_GEN6 0x040a0
#define GEN7_ERR_INT 0x44040
@@ -922,7 +927,6 @@
#define CM0_COLOR_EVICT_DISABLE (1<<3)
#define CM0_DEPTH_WRITE_DISABLE (1<<1)
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
-#define BB_ADDR 0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
#define GFX_FLSH_CNTL_GEN6 0x101008
#define GFX_FLSH_CNTL_EN (1<<0)
@@ -999,6 +1003,7 @@
#define GEN7_FF_THREAD_MODE 0x20a0
#define GEN7_FF_SCHED_MASK 0x0077070
+#define GEN8_FF_DS_REF_CNT_FFME (1 << 19)
#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
@@ -1026,14 +1031,14 @@
#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
#define FBC_CTL_C3_IDLE (1<<13)
#define FBC_CTL_STRIDE_SHIFT (5)
-#define FBC_CTL_FENCENO (1<<0)
+#define FBC_CTL_FENCENO_SHIFT (0)
#define FBC_COMMAND 0x0320c
#define FBC_CMD_COMPRESS (1<<0)
#define FBC_STATUS 0x03210
#define FBC_STAT_COMPRESSING (1<<31)
#define FBC_STAT_COMPRESSED (1<<30)
#define FBC_STAT_MODIFIED (1<<29)
-#define FBC_STAT_CURRENT_LINE (1<<0)
+#define FBC_STAT_CURRENT_LINE_SHIFT (0)
#define FBC_CONTROL2 0x03214
#define FBC_CTL_FENCE_DBL (0<<4)
#define FBC_CTL_IDLE_IMM (0<<2)
@@ -2117,9 +2122,13 @@
* Please check the detailed lore in the commit message for for experimental
* evidence.
*/
-#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29)
-#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
-#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27)
+#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
+#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
+#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
+/* VLV DP/HDMI bits again match Bspec */
+#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
+#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
+#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
@@ -2130,6 +2139,11 @@
#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
+#define DP_AUX_CHANNEL_D_INT_STATUS_G4X (1 << 6)
+#define DP_AUX_CHANNEL_C_INT_STATUS_G4X (1 << 5)
+#define DP_AUX_CHANNEL_B_INT_STATUS_G4X (1 << 4)
+#define DP_AUX_CHANNEL_MASK_INT_STATUS_G4X (7 << 4)
+
/* SDVO is different across gen3/4 */
#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
@@ -3421,42 +3435,6 @@
/* the unit of memory self-refresh latency time is 0.5us */
#define ILK_SRLT_MASK 0x3f
-/* define the fifo size on Ironlake */
-#define ILK_DISPLAY_FIFO 128
-#define ILK_DISPLAY_MAXWM 64
-#define ILK_DISPLAY_DFTWM 8
-#define ILK_CURSOR_FIFO 32
-#define ILK_CURSOR_MAXWM 16
-#define ILK_CURSOR_DFTWM 8
-
-#define ILK_DISPLAY_SR_FIFO 512
-#define ILK_DISPLAY_MAX_SRWM 0x1ff
-#define ILK_DISPLAY_DFT_SRWM 0x3f
-#define ILK_CURSOR_SR_FIFO 64
-#define ILK_CURSOR_MAX_SRWM 0x3f
-#define ILK_CURSOR_DFT_SRWM 8
-
-#define ILK_FIFO_LINE_SIZE 64
-
-/* define the WM info on Sandybridge */
-#define SNB_DISPLAY_FIFO 128
-#define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */
-#define SNB_DISPLAY_DFTWM 8
-#define SNB_CURSOR_FIFO 32
-#define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */
-#define SNB_CURSOR_DFTWM 8
-
-#define SNB_DISPLAY_SR_FIFO 512
-#define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */
-#define SNB_DISPLAY_DFT_SRWM 0x3f
-#define SNB_CURSOR_SR_FIFO 64
-#define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */
-#define SNB_CURSOR_DFT_SRWM 8
-
-#define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */
-
-#define SNB_FIFO_LINE_SIZE 64
-
/* the address where we get all kinds of latency value */
#define SSKPD 0x5d10
@@ -3600,8 +3578,6 @@
#define DISP_BASEADDR_MASK (0xfffff000)
#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
-#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
- (I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
/* VBIOS flags */
#define SWF00 (dev_priv->info->display_mmio_offset + 0x71410)
@@ -3787,7 +3763,7 @@
#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
#define SP_ENABLE (1<<31)
-#define SP_GEAMMA_ENABLE (1<<30)
+#define SP_GAMMA_ENABLE (1<<30)
#define SP_PIXFORMAT_MASK (0xf<<26)
#define SP_FORMAT_YUV422 (0<<26)
#define SP_FORMAT_BGR565 (5<<26)
@@ -4139,6 +4115,8 @@
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
+#define DISP_ARB_CTL2 0x45004
+#define DISP_DATA_PARTITION_5_6 (1<<6)
#define GEN7_MSG_CTL 0x45010
#define WAIT_FOR_PCH_RESET_ACK (1<<1)
#define WAIT_FOR_PCH_FLR_ACK (1<<0)
@@ -4159,6 +4137,10 @@
#define GEN7_L3SQCREG4 0xb034
#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
+/* GEN8 chicken */
+#define HDC_CHICKEN0 0x7300
+#define HDC_FORCE_NON_COHERENT (1<<4)
+
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
@@ -4843,6 +4825,8 @@
#define FORCEWAKE_ACK 0x130090
#define VLV_GTLC_WAKE_CTRL 0x130090
#define VLV_GTLC_PW_STATUS 0x130094
+#define VLV_GTLC_PW_RENDER_STATUS_MASK 0x80
+#define VLV_GTLC_PW_MEDIA_STATUS_MASK 0x20
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
@@ -4851,12 +4835,16 @@
#define FORCEWAKE_MT_ENABLE (1<<5)
#define GTFIFODBG 0x120000
-#define GT_FIFO_CPU_ERROR_MASK 7
+#define GT_FIFO_SBDROPERR (1<<6)
+#define GT_FIFO_BLOBDROPERR (1<<5)
+#define GT_FIFO_SB_READ_ABORTERR (1<<4)
+#define GT_FIFO_DROPERR (1<<3)
#define GT_FIFO_OVFERR (1<<2)
#define GT_FIFO_IAWRERR (1<<1)
#define GT_FIFO_IARDERR (1<<0)
-#define GT_FIFO_FREE_ENTRIES 0x120008
+#define GTFIFOCTL 0x120008
+#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
#define HSW_IDICR 0x9008
@@ -4890,6 +4878,7 @@
#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
+#define VLV_RC_CTL_CTX_RST_PARALLEL (1<<24)
#define GEN7_RC_CTL_TO_MODE (1<<28)
#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
#define GEN6_RC_CTL_HW_ENABLE (1<<31)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 98790c7cccb1..8150fdc08d49 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -192,7 +192,6 @@ static void i915_restore_vga(struct drm_device *dev)
static void i915_save_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
/* Display arbitration control */
if (INTEL_INFO(dev)->gen <= 4)
@@ -203,46 +202,27 @@ static void i915_save_display(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_save_display_reg(dev);
- spin_lock_irqsave(&dev_priv->backlight.lock, flags);
-
/* LVDS state */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
- dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
- dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
- dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
- dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- dev_priv->regfile.saveBLC_PWM_CTL =
- I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
dev_priv->regfile.saveBLC_HIST_CTL =
I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
- dev_priv->regfile.saveBLC_PWM_CTL2 =
- I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
- dev_priv->regfile.saveBLC_PWM_CTL_B =
- I915_READ(VLV_BLC_PWM_CTL(PIPE_B));
dev_priv->regfile.saveBLC_HIST_CTL_B =
I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
- dev_priv->regfile.saveBLC_PWM_CTL2_B =
- I915_READ(VLV_BLC_PWM_CTL2(PIPE_B));
} else {
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
- if (INTEL_INFO(dev)->gen >= 4)
- dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
dev_priv->regfile.saveLVDS = I915_READ(LVDS);
}
- spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
-
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
@@ -257,7 +237,7 @@ static void i915_save_display(struct drm_device *dev)
}
/* Only regfile.save FBC state on the platform that supports FBC */
- if (I915_HAS_FBC(dev)) {
+ if (HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
} else if (IS_GM45(dev)) {
@@ -278,7 +258,6 @@ static void i915_restore_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 mask = 0xffffffff;
- unsigned long flags;
/* Display arbitration */
if (INTEL_INFO(dev)->gen <= 4)
@@ -287,12 +266,6 @@ static void i915_restore_display(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_restore_display_reg(dev);
- spin_lock_irqsave(&dev_priv->backlight.lock, flags);
-
- /* LVDS state */
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
-
if (drm_core_check_feature(dev, DRIVER_MODESET))
mask = ~LVDS_PORT_EN;
@@ -305,13 +278,6 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
- I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
- /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
- * otherwise we get blank eDP screen after S3 on some machines
- */
- I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
@@ -319,21 +285,12 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(RSTDBYCTL,
dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
} else if (IS_VALLEYVIEW(dev)) {
- I915_WRITE(VLV_BLC_PWM_CTL(PIPE_A),
- dev_priv->regfile.saveBLC_PWM_CTL);
I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
dev_priv->regfile.saveBLC_HIST_CTL);
- I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_A),
- dev_priv->regfile.saveBLC_PWM_CTL2);
- I915_WRITE(VLV_BLC_PWM_CTL(PIPE_B),
- dev_priv->regfile.saveBLC_PWM_CTL);
I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
dev_priv->regfile.saveBLC_HIST_CTL);
- I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_B),
- dev_priv->regfile.saveBLC_PWM_CTL2);
} else {
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
- I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
@@ -341,11 +298,9 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
}
- spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
-
/* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev);
- if (I915_HAS_FBC(dev)) {
+ if (HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
} else if (IS_GM45(dev)) {
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index cef38fd320a7..33bcae314bf8 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -40,10 +40,13 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
struct drm_i915_private *dev_priv = dev->dev_private;
u64 raw_time; /* 32b value may overflow during fixed point math */
u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
+ u32 ret;
if (!intel_enable_rc6(dev))
return 0;
+ intel_runtime_pm_get(dev_priv);
+
/* On VLV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev)) {
u32 clkctl2;
@@ -52,7 +55,8 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
CLK_CTL2_CZCOUNT_30NS_SHIFT;
if (!clkctl2) {
WARN(!clkctl2, "bogus CZ count value");
- return 0;
+ ret = 0;
+ goto out;
}
units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
@@ -62,7 +66,11 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
}
raw_time = I915_READ(reg) * units;
- return DIV_ROUND_UP_ULL(raw_time, div);
+ ret = DIV_ROUND_UP_ULL(raw_time, div);
+
+out:
+ intel_runtime_pm_put(dev_priv);
+ return ret;
}
static ssize_t
@@ -183,13 +191,13 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
int slice = (int)(uintptr_t)attr->private;
int ret;
+ if (!HAS_HW_CONTEXTS(drm_dev))
+ return -ENXIO;
+
ret = l3_access_valid(drm_dev, offset);
if (ret)
return ret;
- if (dev_priv->hw_contexts_disabled)
- return -ENXIO;
-
ret = i915_mutex_lock_interruptible(drm_dev);
if (ret)
return ret;
@@ -259,7 +267,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
if (IS_VALLEYVIEW(dev_priv->dev)) {
u32 freq;
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff);
+ ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else {
ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
}
@@ -276,8 +284,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct drm_i915_private *dev_priv = dev->dev_private;
return snprintf(buf, PAGE_SIZE, "%d\n",
- vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.rpe_delay));
+ vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay));
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -291,7 +298,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
- ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
+ ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
else
ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -318,7 +325,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) {
- val = vlv_freq_opcode(dev_priv->mem_freq, val);
+ val = vlv_freq_opcode(dev_priv, val);
hw_max = valleyview_rps_max_freq(dev_priv);
hw_min = valleyview_rps_min_freq(dev_priv);
@@ -342,15 +349,15 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
DRM_DEBUG("User requested overclocking to %d\n",
val * GT_FREQUENCY_MULTIPLIER);
+ dev_priv->rps.max_delay = val;
+
if (dev_priv->rps.cur_delay > val) {
- if (IS_VALLEYVIEW(dev_priv->dev))
- valleyview_set_rps(dev_priv->dev, val);
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
else
- gen6_set_rps(dev_priv->dev, val);
+ gen6_set_rps(dev, val);
}
- dev_priv->rps.max_delay = val;
-
mutex_unlock(&dev_priv->rps.hw_lock);
return count;
@@ -367,7 +374,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
- ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
+ ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
else
ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -394,7 +401,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev)) {
- val = vlv_freq_opcode(dev_priv->mem_freq, val);
+ val = vlv_freq_opcode(dev_priv, val);
hw_max = valleyview_rps_max_freq(dev_priv);
hw_min = valleyview_rps_min_freq(dev_priv);
@@ -411,15 +418,15 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
return -EINVAL;
}
+ dev_priv->rps.min_delay = val;
+
if (dev_priv->rps.cur_delay < val) {
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val);
else
- gen6_set_rps(dev_priv->dev, val);
+ gen6_set_rps(dev, val);
}
- dev_priv->rps.min_delay = val;
-
mutex_unlock(&dev_priv->rps.hw_lock);
return count;
@@ -449,7 +456,9 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
if (attr == &dev_attr_gt_RP0_freq_mhz) {
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
index 967da4772c44..caa18e855815 100644
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -270,6 +270,18 @@ void i915_save_display_reg(struct drm_device *dev)
}
/* FIXME: regfile.save TV & SDVO state */
+ /* Backlight */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+ dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+ dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
+ dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
+ } else {
+ dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ if (INTEL_INFO(dev)->gen >= 4)
+ dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ }
+
return;
}
@@ -280,6 +292,21 @@ void i915_restore_display_reg(struct drm_device *dev)
int dpll_b_reg, fpb0_reg, fpb1_reg;
int i;
+ /* Backlight */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+ /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
+ * otherwise we get blank eDP screen after S3 on some machines
+ */
+ I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
+ } else {
+ if (INTEL_INFO(dev)->gen >= 4)
+ I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+ }
+
/* Display port ratios (must be done before clock is set) */
if (SUPPORTS_INTEGRATED_DP(dev)) {
I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index dfff0907f70e..d96eee1ae9c5 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -6,14 +6,10 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/vga_switcheroo.h>
-#include <acpi/acpi_drivers.h>
-
#include <drm/drmP.h>
#include "i915_drv.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
-
-#define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */
#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
static struct intel_dsm_priv {
@@ -28,61 +24,6 @@ static const u8 intel_dsm_guid[] = {
0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
};
-static int intel_dsm(acpi_handle handle, int func)
-{
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_object_list input;
- union acpi_object params[4];
- union acpi_object *obj;
- u32 result;
- int ret = 0;
-
- input.count = 4;
- input.pointer = params;
- params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(intel_dsm_guid);
- params[0].buffer.pointer = (char *)intel_dsm_guid;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = INTEL_DSM_REVISION_ID;
- params[2].type = ACPI_TYPE_INTEGER;
- params[2].integer.value = func;
- params[3].type = ACPI_TYPE_PACKAGE;
- params[3].package.count = 0;
- params[3].package.elements = NULL;
-
- ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
- if (ret) {
- DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
- return ret;
- }
-
- obj = (union acpi_object *)output.pointer;
-
- result = 0;
- switch (obj->type) {
- case ACPI_TYPE_INTEGER:
- result = obj->integer.value;
- break;
-
- case ACPI_TYPE_BUFFER:
- if (obj->buffer.length == 4) {
- result = (obj->buffer.pointer[0] |
- (obj->buffer.pointer[1] << 8) |
- (obj->buffer.pointer[2] << 16) |
- (obj->buffer.pointer[3] << 24));
- break;
- }
- default:
- ret = -EINVAL;
- break;
- }
- if (result == 0x80000002)
- ret = -ENODEV;
-
- kfree(output.pointer);
- return ret;
-}
-
static char *intel_dsm_port_name(u8 id)
{
switch (id) {
@@ -137,83 +78,56 @@ static char *intel_dsm_mux_type(u8 type)
static void intel_dsm_platform_mux_info(void)
{
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_object_list input;
- union acpi_object params[4];
- union acpi_object *pkg;
- int i, ret;
-
- input.count = 4;
- input.pointer = params;
- params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(intel_dsm_guid);
- params[0].buffer.pointer = (char *)intel_dsm_guid;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = INTEL_DSM_REVISION_ID;
- params[2].type = ACPI_TYPE_INTEGER;
- params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
- params[3].type = ACPI_TYPE_PACKAGE;
- params[3].package.count = 0;
- params[3].package.elements = NULL;
-
- ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
- &output);
- if (ret) {
- DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
- goto out;
+ int i;
+ union acpi_object *pkg, *connector_count;
+
+ pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, intel_dsm_guid,
+ INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,
+ NULL, ACPI_TYPE_PACKAGE);
+ if (!pkg) {
+ DRM_DEBUG_DRIVER("failed to evaluate _DSM\n");
+ return;
}
- pkg = (union acpi_object *)output.pointer;
-
- if (pkg->type == ACPI_TYPE_PACKAGE) {
- union acpi_object *connector_count = &pkg->package.elements[0];
- DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
- (unsigned long long)connector_count->integer.value);
- for (i = 1; i < pkg->package.count; i++) {
- union acpi_object *obj = &pkg->package.elements[i];
- union acpi_object *connector_id =
- &obj->package.elements[0];
- union acpi_object *info = &obj->package.elements[1];
- DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
- (unsigned long long)connector_id->integer.value);
- DRM_DEBUG_DRIVER(" port id: %s\n",
- intel_dsm_port_name(info->buffer.pointer[0]));
- DRM_DEBUG_DRIVER(" display mux info: %s\n",
- intel_dsm_mux_type(info->buffer.pointer[1]));
- DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n",
- intel_dsm_mux_type(info->buffer.pointer[2]));
- DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
- intel_dsm_mux_type(info->buffer.pointer[3]));
- }
+ connector_count = &pkg->package.elements[0];
+ DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
+ (unsigned long long)connector_count->integer.value);
+ for (i = 1; i < pkg->package.count; i++) {
+ union acpi_object *obj = &pkg->package.elements[i];
+ union acpi_object *connector_id = &obj->package.elements[0];
+ union acpi_object *info = &obj->package.elements[1];
+ DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
+ (unsigned long long)connector_id->integer.value);
+ DRM_DEBUG_DRIVER(" port id: %s\n",
+ intel_dsm_port_name(info->buffer.pointer[0]));
+ DRM_DEBUG_DRIVER(" display mux info: %s\n",
+ intel_dsm_mux_type(info->buffer.pointer[1]));
+ DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n",
+ intel_dsm_mux_type(info->buffer.pointer[2]));
+ DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
+ intel_dsm_mux_type(info->buffer.pointer[3]));
}
-out:
- kfree(output.pointer);
+ ACPI_FREE(pkg);
}
static bool intel_dsm_pci_probe(struct pci_dev *pdev)
{
acpi_handle dhandle;
- int ret;
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
- if (!acpi_has_method(dhandle, "_DSM")) {
+ if (!acpi_check_dsm(dhandle, intel_dsm_guid, INTEL_DSM_REVISION_ID,
+ 1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {
DRM_DEBUG_KMS("no _DSM method for intel device\n");
return false;
}
- ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS);
- if (ret < 0) {
- DRM_DEBUG_KMS("failed to get supported _DSM functions\n");
- return false;
- }
-
intel_dsm_priv.dhandle = dhandle;
-
intel_dsm_platform_mux_info();
+
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index e4fba39631a5..f22041973f3a 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -281,6 +281,34 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
}
}
+static void
+parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+ const struct bdb_lfp_backlight_data *backlight_data;
+ const struct bdb_lfp_backlight_data_entry *entry;
+
+ backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
+ if (!backlight_data)
+ return;
+
+ if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
+ DRM_DEBUG_KMS("Unsupported backlight data entry size %u\n",
+ backlight_data->entry_size);
+ return;
+ }
+
+ entry = &backlight_data->data[panel_type];
+
+ dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
+ dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
+ DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
+ "active %s, min brightness %u, level %u\n",
+ dev_priv->vbt.backlight.pwm_freq_hz,
+ dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
+ entry->min_brightness,
+ backlight_data->level[panel_type]);
+}
+
/* Try to find sdvo panel data */
static void
parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
@@ -327,12 +355,12 @@ static int intel_bios_ssc_frequency(struct drm_device *dev,
{
switch (INTEL_INFO(dev)->gen) {
case 2:
- return alternate ? 66 : 48;
+ return alternate ? 66667 : 48000;
case 3:
case 4:
- return alternate ? 100 : 96;
+ return alternate ? 100000 : 96000;
default:
- return alternate ? 100 : 120;
+ return alternate ? 100000 : 120000;
}
}
@@ -796,7 +824,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
*/
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev,
!HAS_PCH_SPLIT(dev));
- DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
+ DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
for (port = PORT_A; port < I915_MAX_PORTS; port++) {
struct ddi_vbt_port_info *info =
@@ -894,6 +922,7 @@ intel_parse_bios(struct drm_device *dev)
parse_general_features(dev_priv, bdb);
parse_general_definitions(dev_priv, bdb);
parse_lfp_panel_data(dev_priv, bdb);
+ parse_lfp_backlight(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb);
parse_sdvo_device_mapping(dev_priv, bdb);
parse_device_mapping(dev_priv, bdb);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index f580a2b0ddd3..282de5e9f39d 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -39,7 +39,7 @@ struct vbt_header {
u8 reserved0;
u32 bdb_offset; /**< from beginning of VBT */
u32 aim_offset[4]; /**< from beginning of VBT */
-} __attribute__((packed));
+} __packed;
struct bdb_header {
u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
@@ -65,7 +65,7 @@ struct vbios_data {
u8 rsvd4; /* popup memory size */
u8 resize_pci_bios;
u8 rsvd5; /* is crt already on ddc2 */
-} __attribute__((packed));
+} __packed;
/*
* There are several types of BIOS data blocks (BDBs), each block has
@@ -142,7 +142,7 @@ struct bdb_general_features {
u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
u8 rsvd11:3; /* finish byte */
-} __attribute__((packed));
+} __packed;
/* pre-915 */
#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
@@ -225,7 +225,7 @@ struct old_child_dev_config {
u8 dvo2_wiring;
u16 extended_type;
u8 dvo_function;
-} __attribute__((packed));
+} __packed;
/* This one contains field offsets that are known to be common for all BDB
* versions. Notice that the meaning of the contents contents may still change,
@@ -238,7 +238,7 @@ struct common_child_dev_config {
u8 not_common2[2];
u8 ddc_pin;
u16 edid_ptr;
-} __attribute__((packed));
+} __packed;
/* This field changes depending on the BDB version, so the most reliable way to
* read it is by checking the BDB version and reading the raw pointer. */
@@ -279,7 +279,7 @@ struct bdb_general_definitions {
* sizeof(child_device_config);
*/
union child_device_config devices[0];
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_options {
u8 panel_type;
@@ -293,7 +293,7 @@ struct bdb_lvds_options {
u8 lvds_edid:1;
u8 rsvd2:1;
u8 rsvd4;
-} __attribute__((packed));
+} __packed;
/* LFP pointer table contains entries to the struct below */
struct bdb_lvds_lfp_data_ptr {
@@ -303,12 +303,12 @@ struct bdb_lvds_lfp_data_ptr {
u8 dvo_table_size;
u16 panel_pnp_id_offset;
u8 pnp_table_size;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_lfp_data_ptrs {
u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
struct bdb_lvds_lfp_data_ptr ptr[16];
-} __attribute__((packed));
+} __packed;
/* LFP data has 3 blocks per entry */
struct lvds_fp_timing {
@@ -325,7 +325,7 @@ struct lvds_fp_timing {
u32 pfit_reg;
u32 pfit_reg_val;
u16 terminator;
-} __attribute__((packed));
+} __packed;
struct lvds_dvo_timing {
u16 clock; /**< In 10khz */
@@ -353,7 +353,7 @@ struct lvds_dvo_timing {
u8 vsync_positive:1;
u8 hsync_positive:1;
u8 rsvd2:1;
-} __attribute__((packed));
+} __packed;
struct lvds_pnp_id {
u16 mfg_name;
@@ -361,17 +361,33 @@ struct lvds_pnp_id {
u32 serial;
u8 mfg_week;
u8 mfg_year;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_lfp_data_entry {
struct lvds_fp_timing fp_timing;
struct lvds_dvo_timing dvo_timing;
struct lvds_pnp_id pnp_id;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_lfp_data {
struct bdb_lvds_lfp_data_entry data[16];
-} __attribute__((packed));
+} __packed;
+
+struct bdb_lfp_backlight_data_entry {
+ u8 type:2;
+ u8 active_low_pwm:1;
+ u8 obsolete1:5;
+ u16 pwm_freq_hz;
+ u8 min_brightness;
+ u8 obsolete2;
+ u8 obsolete3;
+} __packed;
+
+struct bdb_lfp_backlight_data {
+ u8 entry_size;
+ struct bdb_lfp_backlight_data_entry data[16];
+ u8 level[16];
+} __packed;
struct aimdb_header {
char signature[16];
@@ -379,12 +395,12 @@ struct aimdb_header {
u16 aimdb_version;
u16 aimdb_header_size;
u16 aimdb_size;
-} __attribute__((packed));
+} __packed;
struct aimdb_block {
u8 aimdb_id;
u16 aimdb_size;
-} __attribute__((packed));
+} __packed;
struct vch_panel_data {
u16 fp_timing_offset;
@@ -395,12 +411,12 @@ struct vch_panel_data {
u8 text_fitting_size;
u16 graphics_fitting_offset;
u8 graphics_fitting_size;
-} __attribute__((packed));
+} __packed;
struct vch_bdb_22 {
struct aimdb_block aimdb_block;
struct vch_panel_data panels[16];
-} __attribute__((packed));
+} __packed;
struct bdb_sdvo_lvds_options {
u8 panel_backlight;
@@ -416,7 +432,7 @@ struct bdb_sdvo_lvds_options {
u8 panel_misc_bits_2;
u8 panel_misc_bits_3;
u8 panel_misc_bits_4;
-} __attribute__((packed));
+} __packed;
#define BDB_DRIVER_FEATURE_NO_LVDS 0
@@ -462,7 +478,7 @@ struct bdb_driver_features {
u8 hdmi_termination;
u8 custom_vbt_version;
-} __attribute__((packed));
+} __packed;
#define EDP_18BPP 0
#define EDP_24BPP 1
@@ -487,14 +503,14 @@ struct edp_power_seq {
u16 t9;
u16 t10;
u16 t11_t12;
-} __attribute__ ((packed));
+} __packed;
struct edp_link_params {
u8 rate:4;
u8 lanes:4;
u8 preemphasis:4;
u8 vswing:4;
-} __attribute__ ((packed));
+} __packed;
struct bdb_edp {
struct edp_power_seq power_seqs[16];
@@ -505,7 +521,7 @@ struct bdb_edp {
/* ith bit indicates enabled/disabled for (i+1)th panel */
u16 edp_s3d_feature;
u16 edp_t3_optimization;
-} __attribute__ ((packed));
+} __packed;
void intel_setup_bios(struct drm_device *dev);
int intel_parse_bios(struct drm_device *dev);
@@ -733,6 +749,6 @@ struct bdb_mipi {
u32 hl_switch_cnt;
u32 lp_byte_clk;
u32 clk_lane_switch_cnt;
-} __attribute__((packed));
+} __packed;
#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index b5b1b9b23adf..e2e39e65f109 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -222,8 +222,9 @@ static void intel_crt_dpms(struct drm_connector *connector, int mode)
intel_modeset_check_state(connector->dev);
}
-static int intel_crt_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_crt_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 526c8ded16b0..e06b9e017d6b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -73,7 +73,7 @@ static const u32 hsw_ddi_translations_hdmi[] = {
};
static const u32 bdw_ddi_translations_edp[] = {
- 0x00FFFFFF, 0x00000012, /* DP parameters */
+ 0x00FFFFFF, 0x00000012, /* eDP parameters */
0x00EBAFFF, 0x00020011,
0x00C71FFF, 0x0006000F,
0x00FFFFFF, 0x00020011,
@@ -696,25 +696,25 @@ intel_ddi_calculate_wrpll(int clock /* in Hz */,
*n2_out = best.n2;
*p_out = best.p;
*r2_out = best.r2;
-
- DRM_DEBUG_KMS("WRPLL: %dHz refresh rate with p=%d, n2=%d r2=%d\n",
- clock, *p_out, *n2_out, *r2_out);
}
-bool intel_ddi_pll_mode_set(struct drm_crtc *crtc)
+/*
+ * Tries to find a PLL for the CRTC. If it finds, it increases the refcount and
+ * stores it in intel_crtc->ddi_pll_sel, so other mode sets won't be able to
+ * steal the selected PLL. You need to call intel_ddi_pll_enable to actually
+ * enable the PLL.
+ */
+bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
int type = intel_encoder->type;
enum pipe pipe = intel_crtc->pipe;
- uint32_t reg, val;
int clock = intel_crtc->config.port_clock;
- /* TODO: reuse PLLs when possible (compare values) */
-
intel_ddi_put_crtc_pll(crtc);
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
@@ -736,66 +736,145 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc)
return false;
}
- /* We don't need to turn any PLL on because we'll use LCPLL. */
- return true;
-
} else if (type == INTEL_OUTPUT_HDMI) {
+ uint32_t reg, val;
unsigned p, n2, r2;
- if (plls->wrpll1_refcount == 0) {
+ intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+ val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+ WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p);
+
+ if (val == I915_READ(WRPLL_CTL1)) {
+ DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
+ pipe_name(pipe));
+ reg = WRPLL_CTL1;
+ } else if (val == I915_READ(WRPLL_CTL2)) {
+ DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
+ pipe_name(pipe));
+ reg = WRPLL_CTL2;
+ } else if (plls->wrpll1_refcount == 0) {
DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
pipe_name(pipe));
- plls->wrpll1_refcount++;
reg = WRPLL_CTL1;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
} else if (plls->wrpll2_refcount == 0) {
DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
pipe_name(pipe));
- plls->wrpll2_refcount++;
reg = WRPLL_CTL2;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
} else {
DRM_ERROR("No WRPLLs available!\n");
return false;
}
- WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
- "WRPLL already enabled\n");
+ DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+ clock, p, n2, r2);
- intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
-
- val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
- WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
- WRPLL_DIVIDER_POST(p);
+ if (reg == WRPLL_CTL1) {
+ plls->wrpll1_refcount++;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
+ } else {
+ plls->wrpll2_refcount++;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
+ }
} else if (type == INTEL_OUTPUT_ANALOG) {
if (plls->spll_refcount == 0) {
DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
pipe_name(pipe));
plls->spll_refcount++;
- reg = SPLL_CTL;
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
} else {
DRM_ERROR("SPLL already in use\n");
return false;
}
- WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
- "SPLL already enabled\n");
-
- val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
-
} else {
WARN(1, "Invalid DDI encoder type %d\n", type);
return false;
}
- I915_WRITE(reg, val);
- udelay(20);
-
return true;
}
+/*
+ * To be called after intel_ddi_pll_select(). That one selects the PLL to be
+ * used, this one actually enables the PLL.
+ */
+void intel_ddi_pll_enable(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ int clock = crtc->config.port_clock;
+ uint32_t reg, cur_val, new_val;
+ int refcount;
+ const char *pll_name;
+ uint32_t enable_bit = (1 << 31);
+ unsigned int p, n2, r2;
+
+ BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE);
+ BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE);
+
+ switch (crtc->ddi_pll_sel) {
+ case PORT_CLK_SEL_LCPLL_2700:
+ case PORT_CLK_SEL_LCPLL_1350:
+ case PORT_CLK_SEL_LCPLL_810:
+ /*
+ * LCPLL should always be enabled at this point of the mode set
+ * sequence, so nothing to do.
+ */
+ return;
+
+ case PORT_CLK_SEL_SPLL:
+ pll_name = "SPLL";
+ reg = SPLL_CTL;
+ refcount = plls->spll_refcount;
+ new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz |
+ SPLL_PLL_SSC;
+ break;
+
+ case PORT_CLK_SEL_WRPLL1:
+ case PORT_CLK_SEL_WRPLL2:
+ if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) {
+ pll_name = "WRPLL1";
+ reg = WRPLL_CTL1;
+ refcount = plls->wrpll1_refcount;
+ } else {
+ pll_name = "WRPLL2";
+ reg = WRPLL_CTL2;
+ refcount = plls->wrpll2_refcount;
+ }
+
+ intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+ new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+ WRPLL_DIVIDER_REFERENCE(r2) |
+ WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p);
+
+ break;
+
+ case PORT_CLK_SEL_NONE:
+ WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n");
+ return;
+ default:
+ WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel);
+ return;
+ }
+
+ cur_val = I915_READ(reg);
+
+ WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount);
+ if (refcount == 1) {
+ WARN(cur_val & enable_bit, "%s already enabled\n", pll_name);
+ I915_WRITE(reg, new_val);
+ POSTING_READ(reg);
+ udelay(20);
+ } else {
+ WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name);
+ }
+}
+
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
@@ -1057,12 +1136,18 @@ void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
enum pipe pipe;
struct intel_crtc *intel_crtc;
+ dev_priv->ddi_plls.spll_refcount = 0;
+ dev_priv->ddi_plls.wrpll1_refcount = 0;
+ dev_priv->ddi_plls.wrpll2_refcount = 0;
+
for_each_pipe(pipe) {
intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- if (!intel_crtc->active)
+ if (!intel_crtc->active) {
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
continue;
+ }
intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
pipe);
@@ -1115,9 +1200,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_panel_on(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, true);
}
WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
@@ -1160,7 +1243,6 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 080f6fd4e839..9fa24347963a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -90,8 +90,8 @@ intel_fdi_link_freq(struct drm_device *dev)
static const intel_limit_t intel_limits_i8xx_dac = {
.dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 930000, .max = 1400000 },
- .n = { .min = 3, .max = 16 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
.m = { .min = 96, .max = 140 },
.m1 = { .min = 18, .max = 26 },
.m2 = { .min = 6, .max = 16 },
@@ -103,8 +103,8 @@ static const intel_limit_t intel_limits_i8xx_dac = {
static const intel_limit_t intel_limits_i8xx_dvo = {
.dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 930000, .max = 1400000 },
- .n = { .min = 3, .max = 16 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
.m = { .min = 96, .max = 140 },
.m1 = { .min = 18, .max = 26 },
.m2 = { .min = 6, .max = 16 },
@@ -116,8 +116,8 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
static const intel_limit_t intel_limits_i8xx_lvds = {
.dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 930000, .max = 1400000 },
- .n = { .min = 3, .max = 16 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
.m = { .min = 96, .max = 140 },
.m1 = { .min = 18, .max = 26 },
.m2 = { .min = 6, .max = 16 },
@@ -329,6 +329,8 @@ static void vlv_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m1 * clock->m2;
clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return;
clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
@@ -430,6 +432,8 @@ static void pineview_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return;
clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
@@ -443,6 +447,8 @@ static void i9xx_clock(int refclk, intel_clock_t *clock)
{
clock->m = i9xx_dpll_compute_m(clock);
clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
+ return;
clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
@@ -748,10 +754,10 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
return intel_crtc->config.cpu_transcoder;
}
-static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
+static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 frame, frame_reg = PIPEFRAME(pipe);
+ u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
frame = I915_READ(frame_reg);
@@ -772,8 +778,8 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipestat_reg = PIPESTAT(pipe);
- if (INTEL_INFO(dev)->gen >= 5) {
- ironlake_wait_for_vblank(dev, pipe);
+ if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ g4x_wait_for_vblank(dev, pipe);
return;
}
@@ -1205,15 +1211,12 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
}
}
-static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
{
u32 val;
bool enabled;
- if (HAS_PCH_LPT(dev_priv->dev)) {
- DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
- return;
- }
+ WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
val = I915_READ(PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
@@ -1361,6 +1364,24 @@ static void intel_init_dpio(struct drm_device *dev)
if (!IS_VALLEYVIEW(dev))
return;
+ DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
+}
+
+static void intel_reset_dpio(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_VALLEYVIEW(dev))
+ return;
+
+ /*
+ * Enable the CRI clock source so we can get at the display and the
+ * reference clock for VGA hotplug / manual detection.
+ */
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV |
+ DPLL_INTEGRATED_CRI_CLK_VLV);
+
/*
* From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
* 6. De-assert cmn_reset/side_reset. Same as VLV X0.
@@ -1487,25 +1508,35 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
- /* Leave integrated clock source enabled */
+ /*
+ * Leave integrated clock source and reference clock enabled for pipe B.
+ * The latter is needed for VGA hotplug / manual detection.
+ */
if (pipe == PIPE_B)
- val = DPLL_INTEGRATED_CRI_CLK_VLV;
+ val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
}
-void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dport)
{
u32 port_mask;
- if (!port)
+ switch (dport->port) {
+ case PORT_B:
port_mask = DPLL_PORTB_READY_MASK;
- else
+ break;
+ case PORT_C:
port_mask = DPLL_PORTC_READY_MASK;
+ break;
+ default:
+ BUG();
+ }
if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
- 'B' + port, I915_READ(DPLL(0)));
+ port_name(dport->port), I915_READ(DPLL(0)));
}
/**
@@ -2083,8 +2114,8 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_MODIFY_DISPBASE(DSPSURF(plane),
- i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+ I915_WRITE(DSPSURF(plane),
+ i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
} else
@@ -2174,8 +2205,8 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
- I915_MODIFY_DISPBASE(DSPSURF(plane),
- i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+ I915_WRITE(DSPSURF(plane),
+ i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
@@ -2233,7 +2264,12 @@ void intel_display_handle_reset(struct drm_device *dev)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
mutex_lock(&crtc->mutex);
- if (intel_crtc->active)
+ /*
+ * FIXME: Once we have proper support for primary planes (and
+ * disabling them without disabling the entire crtc) allow again
+ * a NULL crtc->fb.
+ */
+ if (intel_crtc->active && crtc->fb)
dev_priv->display.update_plane(crtc, crtc->fb,
crtc->x, crtc->y);
mutex_unlock(&crtc->mutex);
@@ -2350,6 +2386,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
}
+ intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
+ intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
}
ret = dev_priv->display.update_plane(crtc, fb, x, y);
@@ -2944,6 +2982,30 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
return pending;
}
+bool intel_has_pending_fb_unpin(struct drm_device *dev)
+{
+ struct intel_crtc *crtc;
+
+ /* Note that we don't need to be called with mode_config.lock here
+ * as our list of CRTC objects is static for the lifetime of the
+ * device and so cannot disappear as we iterate. Similarly, we can
+ * happily treat the predicates as racy, atomic checks as userspace
+ * cannot claim and pin a new fb without at least acquring the
+ * struct_mutex and so serialising with us.
+ */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ if (atomic_read(&crtc->unpin_work_count) == 0)
+ continue;
+
+ if (crtc->unpin_work)
+ intel_wait_for_vblank(dev, crtc->pipe);
+
+ return true;
+ }
+
+ return false;
+}
+
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3399,9 +3461,8 @@ void hsw_enable_ips(struct intel_crtc *crtc)
mutex_unlock(&dev_priv->rps.hw_lock);
/* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
- * mailbox." Therefore we need to defer waiting on the state
- * change.
- * TODO: need to fix this for state checker
+ * mailbox." Moreover, the mailbox may return a bogus state,
+ * so we need to just enable it and continue on.
*/
} else {
I915_WRITE(IPS_CTL, IPS_ENABLE);
@@ -3428,9 +3489,10 @@ void hsw_disable_ips(struct intel_crtc *crtc)
mutex_lock(&dev_priv->rps.hw_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
mutex_unlock(&dev_priv->rps.hw_lock);
- } else
+ } else {
I915_WRITE(IPS_CTL, 0);
- POSTING_READ(IPS_CTL);
+ POSTING_READ(IPS_CTL);
+ }
/* We need to wait for a vblank before we can disable the plane. */
intel_wait_for_vblank(dev, crtc->pipe);
@@ -3465,7 +3527,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
/* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*/
- if (intel_crtc->config.ips_enabled &&
+ if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(intel_crtc);
@@ -3910,6 +3972,174 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
+int valleyview_get_vco(struct drm_i915_private *dev_priv)
+{
+ int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
+
+ /* Obtain SKU information */
+ mutex_lock(&dev_priv->dpio_lock);
+ hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
+ CCK_FUSE_HPLL_FREQ_MASK;
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ return vco_freq[hpll_freq];
+}
+
+/* Adjust CDclk dividers to allow high res or save power if possible */
+static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, cmd;
+
+ if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
+ cmd = 2;
+ else if (cdclk == 266)
+ cmd = 1;
+ else
+ cmd = 0;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val &= ~DSPFREQGUAR_MASK;
+ val |= (cmd << DSPFREQGUAR_SHIFT);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+ DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
+ 50)) {
+ DRM_ERROR("timed out waiting for CDclk change\n");
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ if (cdclk == 400) {
+ u32 divider, vco;
+
+ vco = valleyview_get_vco(dev_priv);
+ divider = ((vco << 1) / cdclk) - 1;
+
+ mutex_lock(&dev_priv->dpio_lock);
+ /* adjust cdclk divider */
+ val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+ val &= ~0xf;
+ val |= divider;
+ vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+ mutex_unlock(&dev_priv->dpio_lock);
+ }
+
+ mutex_lock(&dev_priv->dpio_lock);
+ /* adjust self-refresh exit latency value */
+ val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
+ val &= ~0x7f;
+
+ /*
+ * For high bandwidth configs, we set a higher latency in the bunit
+ * so that the core display fetch happens in time to avoid underruns.
+ */
+ if (cdclk == 400)
+ val |= 4500 / 250; /* 4.5 usec */
+ else
+ val |= 3000 / 250; /* 3.0 usec */
+ vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ /* Since we changed the CDclk, we need to update the GMBUSFREQ too */
+ intel_i2c_reset(dev);
+}
+
+static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
+{
+ int cur_cdclk, vco;
+ int divider;
+
+ vco = valleyview_get_vco(dev_priv);
+
+ mutex_lock(&dev_priv->dpio_lock);
+ divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ divider &= 0xf;
+
+ cur_cdclk = (vco << 1) / (divider + 1);
+
+ return cur_cdclk;
+}
+
+static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
+ int max_pixclk)
+{
+ int cur_cdclk;
+
+ cur_cdclk = valleyview_cur_cdclk(dev_priv);
+
+ /*
+ * Really only a few cases to deal with, as only 4 CDclks are supported:
+ * 200MHz
+ * 267MHz
+ * 320MHz
+ * 400MHz
+ * So we check to see whether we're above 90% of the lower bin and
+ * adjust if needed.
+ */
+ if (max_pixclk > 288000) {
+ return 400;
+ } else if (max_pixclk > 240000) {
+ return 320;
+ } else
+ return 266;
+ /* Looks like the 200MHz CDclk freq doesn't work on some configs */
+}
+
+static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
+ unsigned modeset_pipes,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_crtc *intel_crtc;
+ int max_pixclk = 0;
+
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ if (modeset_pipes & (1 << intel_crtc->pipe))
+ max_pixclk = max(max_pixclk,
+ pipe_config->adjusted_mode.crtc_clock);
+ else if (intel_crtc->base.enabled)
+ max_pixclk = max(max_pixclk,
+ intel_crtc->config.adjusted_mode.crtc_clock);
+ }
+
+ return max_pixclk;
+}
+
+static void valleyview_modeset_global_pipes(struct drm_device *dev,
+ unsigned *prepare_pipes,
+ unsigned modeset_pipes,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc;
+ int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes,
+ pipe_config);
+ int cur_cdclk = valleyview_cur_cdclk(dev_priv);
+
+ if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
+ return;
+
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+ base.head)
+ if (intel_crtc->base.enabled)
+ *prepare_pipes |= (1 << intel_crtc->pipe);
+}
+
+static void valleyview_modeset_global_resources(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL);
+ int cur_cdclk = valleyview_cur_cdclk(dev_priv);
+ int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
+
+ if (req_cdclk != cur_cdclk)
+ valleyview_set_cdclk(dev, req_cdclk);
+}
+
static void valleyview_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -4570,9 +4800,8 @@ static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
refclk = 100000;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
- refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
- DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
- refclk / 1000);
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
} else if (!IS_GEN2(dev)) {
refclk = 96000;
} else {
@@ -4634,24 +4863,24 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
* PLLB opamp always calibrates to max value of 0x3f, force enable it
* and set it to a reasonable value instead.
*/
- reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
reg_val &= 0xffffff00;
reg_val |= 0x00000030;
- vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
- reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
reg_val &= 0x8cffffff;
reg_val = 0x8c000000;
- vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
+ vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
- reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
reg_val &= 0xffffff00;
- vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
- reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
reg_val &= 0x00ffffff;
reg_val |= 0xb0000000;
- vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
+ vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
}
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
@@ -4720,15 +4949,15 @@ static void vlv_update_pll(struct intel_crtc *crtc)
vlv_pllb_recal_opamp(dev_priv, pipe);
/* Set up Tx target for periodic Rcomp update */
- vlv_dpio_write(dev_priv, pipe, DPIO_IREF_BCAST, 0x0100000f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
/* Disable target IRef on PLL */
- reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF_CTL(pipe));
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
reg_val &= 0x00ffffff;
- vlv_dpio_write(dev_priv, pipe, DPIO_IREF_CTL(pipe), reg_val);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
/* Disable fast lock */
- vlv_dpio_write(dev_priv, pipe, DPIO_FASTCLK_DISABLE, 0x610);
+ vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
/* Set idtafcrecal before PLL is enabled */
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
@@ -4742,50 +4971,54 @@ static void vlv_update_pll(struct intel_crtc *crtc)
* Note: don't use the DAC post divider as it seems unstable.
*/
mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
- vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
mdiv |= DPIO_ENABLE_CALIBRATION;
- vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
/* Set HBR and RBR LPF coefficients */
if (crtc->config.port_clock == 162000 ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
- vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x009f0003);
else
- vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x00d0000f);
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
/* Use SSC source */
if (!pipe)
- vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
0x0df40000);
else
- vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
if (!pipe)
- vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
0x0df70000);
else
- vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
0x0df40000);
}
- coreclk = vlv_dpio_read(dev_priv, pipe, DPIO_CORE_CLK(pipe));
+ coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
coreclk |= 0x01000000;
- vlv_dpio_write(dev_priv, pipe, DPIO_CORE_CLK(pipe), coreclk);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
- vlv_dpio_write(dev_priv, pipe, DPIO_PLL_CML(pipe), 0x87871000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
- /* Enable DPIO clock input */
+ /*
+ * Enable DPIO clock input. We should never disable the reference
+ * clock for pipe B, since VGA hotplug / manual detection depends
+ * on it.
+ */
dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
/* We should never disable this, set it here for state tracking */
@@ -5230,6 +5463,9 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
+ if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
+ return;
+
tmp = I915_READ(PFIT_CONTROL);
if (!(tmp & PFIT_ENABLE))
return;
@@ -5261,7 +5497,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
int refclk = 100000;
mutex_lock(&dev_priv->dpio_lock);
- mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe));
+ mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
mutex_unlock(&dev_priv->dpio_lock);
clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
@@ -5718,9 +5954,9 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
}
if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
- DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+ DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
dev_priv->vbt.lvds_ssc_freq);
- return dev_priv->vbt.lvds_ssc_freq * 1000;
+ return dev_priv->vbt.lvds_ssc_freq;
}
return 120000;
@@ -5982,7 +6218,7 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
factor = 21;
if (is_lvds) {
if ((intel_panel_use_ssc(dev_priv) &&
- dev_priv->vbt.lvds_ssc_freq == 100) ||
+ dev_priv->vbt.lvds_ssc_freq == 100000) ||
(HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
factor = 25;
} else if (intel_crtc->config.sdvo_tv_clock)
@@ -6303,7 +6539,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
uint32_t val;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
- WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
+ WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
@@ -6323,7 +6559,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
val = I915_READ(DEIMR);
- WARN((val & ~DE_PCH_EVENT_IVB) != val,
+ WARN((val | DE_PCH_EVENT_IVB) != 0xffffffff,
"Unexpected DEIMR bits enabled: 0x%x\n", val);
val = I915_READ(SDEIMR);
WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
@@ -6402,7 +6638,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
/* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine! */
- gen6_gt_force_wake_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -6436,7 +6672,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
DRM_ERROR("Switching back to LCPLL failed\n");
}
- gen6_gt_force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
void hsw_enable_pc8_work(struct work_struct *__work)
@@ -6447,6 +6683,8 @@ void hsw_enable_pc8_work(struct work_struct *__work)
struct drm_device *dev = dev_priv->dev;
uint32_t val;
+ WARN_ON(!HAS_PC8(dev));
+
if (dev_priv->pc8.enabled)
return;
@@ -6463,6 +6701,8 @@ void hsw_enable_pc8_work(struct work_struct *__work)
lpt_disable_clkout_dp(dev);
hsw_pc8_disable_interrupts(dev);
hsw_disable_lcpll(dev_priv, true, true);
+
+ intel_runtime_pm_put(dev_priv);
}
static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
@@ -6492,12 +6732,16 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
if (dev_priv->pc8.disable_count != 1)
return;
+ WARN_ON(!HAS_PC8(dev));
+
cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
if (!dev_priv->pc8.enabled)
return;
DRM_DEBUG_KMS("Disabling package C8+\n");
+ intel_runtime_pm_get(dev_priv);
+
hsw_restore_lcpll(dev_priv);
hsw_pc8_restore_interrupts(dev);
lpt_init_pch_refclk(dev);
@@ -6704,8 +6948,9 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
int plane = intel_crtc->plane;
int ret;
- if (!intel_ddi_pll_mode_set(crtc))
+ if (!intel_ddi_pll_select(intel_crtc))
return -EINVAL;
+ intel_ddi_pll_enable(intel_crtc);
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
@@ -6796,8 +7041,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
if (intel_display_power_enabled(dev, pfit_domain))
ironlake_get_pfit_config(crtc, pipe_config);
- pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
- (I915_READ(IPS_CTL) & IPS_ENABLE);
+ if (IS_HASWELL(dev))
+ pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
+ (I915_READ(IPS_CTL) & IPS_ENABLE);
pipe_config->pixel_multiplier = 1;
@@ -7689,7 +7935,7 @@ static int i9xx_pll_refclk(struct drm_device *dev,
u32 dpll = pipe_config->dpll_hw_state.dpll;
if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
- return dev_priv->vbt.lvds_ssc_freq * 1000;
+ return dev_priv->vbt.lvds_ssc_freq;
else if (HAS_PCH_SPLIT(dev))
return 120000;
else if (!IS_GEN2(dev))
@@ -7752,12 +7998,17 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
else
i9xx_clock(refclk, &clock);
} else {
- bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
+ u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
+ bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
if (is_lvds) {
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
- clock.p2 = 14;
+
+ if (lvds & LVDS_CLKB_POWER_UP)
+ clock.p2 = 7;
+ else
+ clock.p2 = 14;
} else {
if (dpll & PLL_P1_DIVIDE_BY_TWO)
clock.p1 = 2;
@@ -8493,28 +8744,6 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = {
.load_lut = intel_crtc_load_lut,
};
-static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
- struct drm_crtc *crtc)
-{
- struct drm_device *dev;
- struct drm_crtc *tmp;
- int crtc_mask = 1;
-
- WARN(!crtc, "checking null crtc?\n");
-
- dev = crtc->dev;
-
- list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
- if (tmp == crtc)
- break;
- crtc_mask <<= 1;
- }
-
- if (encoder->possible_crtcs & crtc_mask)
- return true;
- return false;
-}
-
/**
* intel_modeset_update_staged_output_state
*
@@ -9122,7 +9351,9 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(pch_pfit.size);
}
- PIPE_CONF_CHECK_I(ips_enabled);
+ /* BDW+ don't expose a synchronous way to read the state */
+ if (IS_HASWELL(dev))
+ PIPE_CONF_CHECK_I(ips_enabled);
PIPE_CONF_CHECK_I(double_wide);
@@ -9135,7 +9366,7 @@ intel_pipe_config_compare(struct drm_device *dev,
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
- if (!IS_HASWELL(dev)) {
+ if (!HAS_DDI(dev)) {
PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
}
@@ -9368,21 +9599,19 @@ static int __intel_set_mode(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_display_mode *saved_mode, *saved_hwmode;
+ struct drm_display_mode *saved_mode;
struct intel_crtc_config *pipe_config = NULL;
struct intel_crtc *intel_crtc;
unsigned disable_pipes, prepare_pipes, modeset_pipes;
int ret = 0;
- saved_mode = kcalloc(2, sizeof(*saved_mode), GFP_KERNEL);
+ saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
if (!saved_mode)
return -ENOMEM;
- saved_hwmode = saved_mode + 1;
intel_modeset_affected_pipes(crtc, &modeset_pipes,
&prepare_pipes, &disable_pipes);
- *saved_hwmode = crtc->hwmode;
*saved_mode = crtc->mode;
/* Hack: Because we don't (yet) support global modeset on multiple
@@ -9402,6 +9631,21 @@ static int __intel_set_mode(struct drm_crtc *crtc,
"[modeset]");
}
+ /*
+ * See if the config requires any additional preparation, e.g.
+ * to adjust global state with pipes off. We need to do this
+ * here so we can get the modeset_pipe updated config for the new
+ * mode set on this crtc. For other crtcs we need to use the
+ * adjusted_mode bits in the crtc directly.
+ */
+ if (IS_VALLEYVIEW(dev)) {
+ valleyview_modeset_global_pipes(dev, &prepare_pipes,
+ modeset_pipes, pipe_config);
+
+ /* may have added more to prepare_pipes than we should */
+ prepare_pipes &= ~disable_pipes;
+ }
+
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
intel_crtc_disable(&intel_crtc->base);
@@ -9418,6 +9662,14 @@ static int __intel_set_mode(struct drm_crtc *crtc,
/* mode_set/enable/disable functions rely on a correct pipe
* config. */
to_intel_crtc(crtc)->config = *pipe_config;
+
+ /*
+ * Calculate and store various constants which
+ * are later needed by vblank and swap-completion
+ * timestamping. They are derived from true hwmode.
+ */
+ drm_calc_timestamping_constants(crtc,
+ &pipe_config->adjusted_mode);
}
/* Only after disabling all output pipelines that will be changed can we
@@ -9441,23 +9693,10 @@ static int __intel_set_mode(struct drm_crtc *crtc,
for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
dev_priv->display.crtc_enable(&intel_crtc->base);
- if (modeset_pipes) {
- /* Store real post-adjustment hardware mode. */
- crtc->hwmode = pipe_config->adjusted_mode;
-
- /* Calculate and store various constants which
- * are later needed by vblank and swap-completion
- * timestamping. They are derived from true hwmode.
- */
- drm_calc_timestamping_constants(crtc);
- }
-
/* FIXME: add subpixel order */
done:
- if (ret && crtc->enabled) {
- crtc->hwmode = *saved_hwmode;
+ if (ret && crtc->enabled)
crtc->mode = *saved_mode;
- }
out:
kfree(pipe_config);
@@ -9679,8 +9918,8 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
/* Make sure the new CRTC will work with the encoder */
- if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
- new_crtc)) {
+ if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
+ new_crtc)) {
return -EINVAL;
}
connector->encoder->new_crtc = to_intel_crtc(new_crtc);
@@ -9694,17 +9933,21 @@ intel_modeset_stage_output_state(struct drm_device *dev,
/* Check for any encoders that needs to be disabled. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
+ int num_connectors = 0;
list_for_each_entry(connector,
&dev->mode_config.connector_list,
base.head) {
if (connector->new_encoder == encoder) {
WARN_ON(!connector->new_encoder->new_crtc);
-
- goto next_encoder;
+ num_connectors++;
}
}
- encoder->new_crtc = NULL;
-next_encoder:
+
+ if (num_connectors == 0)
+ encoder->new_crtc = NULL;
+ else if (num_connectors > 1)
+ return -EINVAL;
+
/* Only now check for crtc changes so we don't miss encoders
* that will be disabled. */
if (&encoder->new_crtc->base != encoder->base.crtc) {
@@ -9775,6 +10018,16 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
ret = intel_pipe_set_base(set->crtc,
set->x, set->y, set->fb);
+ /*
+ * In the fastboot case this may be our only check of the
+ * state after boot. It would be better to only do it on
+ * the first update, but we don't have a nice way of doing that
+ * (and really, set_config isn't used much for high freq page
+ * flipping, so increasing its cost here shouldn't be a big
+ * deal).
+ */
+ if (i915_fastboot && ret == 0)
+ intel_modeset_check_state(set->crtc->dev);
}
if (ret) {
@@ -9835,7 +10088,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
/* PCH refclock must be enabled first */
- assert_pch_refclk_enabled(dev_priv);
+ ibx_assert_pch_refclk_enabled(dev_priv);
I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
@@ -9903,8 +10156,6 @@ static void intel_shared_dpll_init(struct drm_device *dev)
dev_priv->num_shared_dpll = 0;
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
- DRM_DEBUG_KMS("%i shared PLLs initialized\n",
- dev_priv->num_shared_dpll);
}
static void intel_crtc_init(struct drm_device *dev, int pipe)
@@ -9926,10 +10177,13 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->lut_b[i] = i;
}
- /* Swap pipes & planes for FBC on pre-965 */
+ /*
+ * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
+ * is hooked to plane B. Hence we want plane A feeding pipe B.
+ */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
- if (IS_MOBILE(dev) && IS_GEN3(dev)) {
+ if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = !pipe;
}
@@ -10018,6 +10272,28 @@ static bool has_edp_a(struct drm_device *dev)
return true;
}
+const char *intel_output_name(int output)
+{
+ static const char *names[] = {
+ [INTEL_OUTPUT_UNUSED] = "Unused",
+ [INTEL_OUTPUT_ANALOG] = "Analog",
+ [INTEL_OUTPUT_DVO] = "DVO",
+ [INTEL_OUTPUT_SDVO] = "SDVO",
+ [INTEL_OUTPUT_LVDS] = "LVDS",
+ [INTEL_OUTPUT_TVOUT] = "TV",
+ [INTEL_OUTPUT_HDMI] = "HDMI",
+ [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
+ [INTEL_OUTPUT_EDP] = "eDP",
+ [INTEL_OUTPUT_DSI] = "DSI",
+ [INTEL_OUTPUT_UNKNOWN] = "Unknown",
+ };
+
+ if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
+ return "Invalid";
+
+ return names[output];
+}
+
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10412,8 +10688,11 @@ static void intel_init_display(struct drm_device *dev)
}
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
- } else if (IS_VALLEYVIEW(dev))
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.modeset_global_resources =
+ valleyview_modeset_global_resources;
dev_priv->display.write_eld = ironlake_write_eld;
+ }
/* Default just returns -ENODEV to indicate unsupported */
dev_priv->display.queue_flip = intel_default_queue_flip;
@@ -10440,6 +10719,8 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.queue_flip = intel_gen7_queue_flip;
break;
}
+
+ intel_panel_init_backlight_funcs(dev);
}
/*
@@ -10476,17 +10757,6 @@ static void quirk_invert_brightness(struct drm_device *dev)
DRM_INFO("applying inverted panel brightness quirk\n");
}
-/*
- * Some machines (Dell XPS13) suffer broken backlight controls if
- * BLM_PCH_PWM_ENABLE is set.
- */
-static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
- DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
-}
-
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -10541,16 +10811,20 @@ static struct intel_quirk intel_quirks[] = {
/* Sony Vaio Y cannot use SSC on LVDS */
{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
- /*
- * All GM45 Acer (and its brands eMachines and Packard Bell) laptops
- * seem to use inverted backlight PWM.
- */
- { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
+ /* Acer Aspire 5734Z must invert backlight brightness */
+ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+ /* Acer/eMachines G725 */
+ { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+ /* Acer/eMachines e725 */
+ { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
- /* Dell XPS13 HD Sandy Bridge */
- { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
- /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
- { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
+ /* Acer/Packard Bell NCL20 */
+ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+ /* Acer Aspire 4736Z */
+ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -10594,18 +10868,11 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init_hw(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
intel_prepare_ddi(dev);
intel_init_clock_gating(dev);
- /* Enable the CRI clock source so we can get at the display */
- if (IS_VALLEYVIEW(dev))
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_INTEGRATED_CRI_CLK_VLV);
-
- intel_init_dpio(dev);
+ intel_reset_dpio(dev);
mutex_lock(&dev->struct_mutex);
intel_enable_gt_powersave(dev);
@@ -10667,6 +10934,9 @@ void intel_modeset_init(struct drm_device *dev)
}
}
+ intel_init_dpio(dev);
+ intel_reset_dpio(dev);
+
intel_cpu_pll_init(dev);
intel_shared_dpll_init(dev);
@@ -10870,7 +11140,7 @@ void i915_redisable_vga(struct drm_device *dev)
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
- if (HAS_POWER_WELL(dev) &&
+ if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
return;
@@ -11014,7 +11284,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
pll->on = false;
}
- if (IS_HASWELL(dev))
+ if (HAS_PCH_SPLIT(dev))
ilk_wm_get_hw_state(dev);
if (force_restore) {
@@ -11036,8 +11306,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
}
intel_modeset_check_state(dev);
-
- drm_mode_config_reset(dev);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -11046,7 +11314,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_setup_overlay(dev);
+ mutex_lock(&dev->mode_config.mutex);
+ drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, false);
+ mutex_unlock(&dev->mode_config.mutex);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -11091,12 +11362,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
/* flush any delayed tasks or pending work */
flush_scheduled_work();
- /* destroy backlight, if any, before the connectors */
- intel_panel_destroy_backlight(dev);
-
- /* destroy the sysfs files before encoders/connectors */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ /* destroy the backlight and sysfs files before encoders/connectors */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ intel_panel_destroy_backlight(connector);
drm_sysfs_connector_remove(connector);
+ }
drm_mode_config_cleanup(dev);
@@ -11125,14 +11395,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
- pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
+ pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
if (state)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
- pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
+ pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
return 0;
}
@@ -11150,6 +11421,7 @@ struct intel_display_error_state {
} cursor[I915_MAX_PIPES];
struct intel_pipe_error_state {
+ bool power_domain_on;
u32 source;
} pipe[I915_MAX_PIPES];
@@ -11164,6 +11436,7 @@ struct intel_display_error_state {
} plane[I915_MAX_PIPES];
struct intel_transcoder_error_state {
+ bool power_domain_on;
enum transcoder cpu_transcoder;
u32 conf;
@@ -11197,11 +11470,13 @@ intel_display_capture_error_state(struct drm_device *dev)
if (error == NULL)
return NULL;
- if (HAS_POWER_WELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
for_each_pipe(i) {
- if (!intel_display_power_enabled(dev, POWER_DOMAIN_PIPE(i)))
+ error->pipe[i].power_domain_on =
+ intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
+ if (!error->pipe[i].power_domain_on)
continue;
if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
@@ -11237,8 +11512,10 @@ intel_display_capture_error_state(struct drm_device *dev)
for (i = 0; i < error->num_transcoders; i++) {
enum transcoder cpu_transcoder = transcoders[i];
- if (!intel_display_power_enabled(dev,
- POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
+ error->transcoder[i].power_domain_on =
+ intel_display_power_enabled_sw(dev,
+ POWER_DOMAIN_TRANSCODER(cpu_transcoder));
+ if (!error->transcoder[i].power_domain_on)
continue;
error->transcoder[i].cpu_transcoder = cpu_transcoder;
@@ -11268,11 +11545,13 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
return;
err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
- if (HAS_POWER_WELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
for_each_pipe(i) {
err_printf(m, "Pipe [%d]:\n", i);
+ err_printf(m, " Power: %s\n",
+ error->pipe[i].power_domain_on ? "on" : "off");
err_printf(m, " SRC: %08x\n", error->pipe[i].source);
err_printf(m, "Plane [%d]:\n", i);
@@ -11298,6 +11577,8 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
for (i = 0; i < error->num_transcoders; i++) {
err_printf(m, "CPU transcoder: %c\n",
transcoder_name(error->transcoder[i].cpu_transcoder));
+ err_printf(m, " Power: %s\n",
+ error->transcoder[i].power_domain_on ? "on" : "off");
err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 30c627c7b7ba..2f517b85b3f4 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -142,7 +142,7 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
return (max_link_clock * max_lanes * 8) / 10;
}
-static int
+static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -404,7 +404,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
int i, ret, recv_bytes;
uint32_t status;
int try, precharge, clock = 0;
- bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+ bool has_aux_irq = HAS_AUX_IRQ(dev);
uint32_t timeout;
/* dp aux is extremely sensitive to irq latency, hence request the
@@ -542,7 +542,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
return -E2BIG;
intel_dp_check_edp(intel_dp);
- msg[0] = AUX_NATIVE_WRITE << 4;
+ msg[0] = DP_AUX_NATIVE_WRITE << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
msg[3] = send_bytes - 1;
@@ -552,9 +552,10 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
- if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ ack >>= 4;
+ if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
break;
- else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
udelay(100);
else
return -EIO;
@@ -586,7 +587,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
return -E2BIG;
intel_dp_check_edp(intel_dp);
- msg[0] = AUX_NATIVE_READ << 4;
+ msg[0] = DP_AUX_NATIVE_READ << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
msg[3] = recv_bytes - 1;
@@ -601,12 +602,12 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
return -EPROTO;
if (ret < 0)
return ret;
- ack = reply[0];
- if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+ ack = reply[0] >> 4;
+ if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
memcpy(recv, reply + 1, ret - 1);
return ret - 1;
}
- else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
udelay(100);
else
return -EIO;
@@ -633,12 +634,12 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
intel_dp_check_edp(intel_dp);
/* Set up the command byte */
if (mode & MODE_I2C_READ)
- msg[0] = AUX_I2C_READ << 4;
+ msg[0] = DP_AUX_I2C_READ << 4;
else
- msg[0] = AUX_I2C_WRITE << 4;
+ msg[0] = DP_AUX_I2C_WRITE << 4;
if (!(mode & MODE_I2C_STOP))
- msg[0] |= AUX_I2C_MOT << 4;
+ msg[0] |= DP_AUX_I2C_MOT << 4;
msg[1] = address >> 8;
msg[2] = address;
@@ -675,17 +676,17 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
goto out;
}
- switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
- case AUX_NATIVE_REPLY_ACK:
+ switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
+ case DP_AUX_NATIVE_REPLY_ACK:
/* I2C-over-AUX Reply field is only valid
* when paired with AUX ACK.
*/
break;
- case AUX_NATIVE_REPLY_NACK:
+ case DP_AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch native nack\n");
ret = -EREMOTEIO;
goto out;
- case AUX_NATIVE_REPLY_DEFER:
+ case DP_AUX_NATIVE_REPLY_DEFER:
/*
* For now, just give more slack to branch devices. We
* could check the DPCD for I2C bit rate capabilities,
@@ -706,18 +707,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
goto out;
}
- switch (reply[0] & AUX_I2C_REPLY_MASK) {
- case AUX_I2C_REPLY_ACK:
+ switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
+ case DP_AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ) {
*read_byte = reply[1];
}
ret = reply_bytes - 1;
goto out;
- case AUX_I2C_REPLY_NACK:
+ case DP_AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_i2c nack\n");
ret = -EREMOTEIO;
goto out;
- case AUX_I2C_REPLY_DEFER:
+ case DP_AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n");
udelay(100);
break;
@@ -1037,6 +1038,8 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
}
+
+ DRM_DEBUG_KMS("Wait complete\n");
}
static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
@@ -1092,6 +1095,8 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
if (ironlake_edp_have_panel_vdd(intel_dp))
return;
+ intel_runtime_pm_get(dev_priv);
+
DRM_DEBUG_KMS("Turning eDP VDD on\n");
if (!ironlake_edp_have_panel_power(intel_dp))
@@ -1140,7 +1145,11 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
/* Make sure sequencer is idle before allowing subsequent activity */
DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
- msleep(intel_dp->panel_power_down_delay);
+
+ if ((pp & POWER_TARGET_ON) == 0)
+ msleep(intel_dp->panel_power_cycle_delay);
+
+ intel_runtime_pm_put(dev_priv);
}
}
@@ -1233,20 +1242,16 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power off\n");
- WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
-
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
- pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
- intel_dp->want_panel_vdd = false;
-
ironlake_wait_panel_off(intel_dp);
}
@@ -1772,7 +1777,6 @@ static void intel_disable_dp(struct intel_encoder *encoder)
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
- ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);
@@ -1845,34 +1849,36 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- int port = vlv_dport_to_channel(dport);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
struct edp_power_seq power_seq;
u32 val;
mutex_lock(&dev_priv->dpio_lock);
- val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
- vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
mutex_unlock(&dev_priv->dpio_lock);
- /* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
- &power_seq);
+ if (is_edp(intel_dp)) {
+ /* init power sequencer on this pipe and port */
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+ &power_seq);
+ }
intel_enable_dp(encoder);
- vlv_wait_port_ready(dev_priv, port);
+ vlv_wait_port_ready(dev_priv, dport);
}
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -1882,24 +1888,24 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
- int port = vlv_dport_to_channel(dport);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -1941,18 +1947,6 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
DP_LINK_STATUS_SIZE);
}
-#if 0
-static char *voltage_names[] = {
- "0.4V", "0.6V", "0.8V", "1.2V"
-};
-static char *pre_emph_names[] = {
- "0dB", "3.5dB", "6dB", "9.5dB"
-};
-static char *link_train_names[] = {
- "pattern 1", "pattern 2", "idle", "off"
-};
-#endif
-
/*
* These are source-specific values; current Intel hardware supports
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
@@ -2050,7 +2044,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
unsigned long demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value;
uint8_t train_set = intel_dp->train_set[0];
- int port = vlv_dport_to_channel(dport);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
@@ -2127,14 +2121,14 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
}
mutex_lock(&dev_priv->dpio_lock);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
uniqtranscale_reg_value);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
mutex_unlock(&dev_priv->dpio_lock);
return 0;
@@ -2646,7 +2640,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
- intel_dp_link_down(intel_dp);
break;
}
@@ -2899,13 +2892,11 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
/* Try to read receiver status if the link appears to be up */
if (!intel_dp_get_link_status(intel_dp, link_status)) {
- intel_dp_link_down(intel_dp);
return;
}
/* Now read the DPCD to see if it's actually running */
if (!intel_dp_get_dpcd(intel_dp)) {
- intel_dp_link_down(intel_dp);
return;
}
@@ -3020,18 +3011,34 @@ g4x_dp_detect(struct intel_dp *intel_dp)
return status;
}
- switch (intel_dig_port->port) {
- case PORT_B:
- bit = PORTB_HOTPLUG_LIVE_STATUS;
- break;
- case PORT_C:
- bit = PORTC_HOTPLUG_LIVE_STATUS;
- break;
- case PORT_D:
- bit = PORTD_HOTPLUG_LIVE_STATUS;
- break;
- default:
- return connector_status_unknown;
+ if (IS_VALLEYVIEW(dev)) {
+ switch (intel_dig_port->port) {
+ case PORT_B:
+ bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
+ break;
+ case PORT_C:
+ bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
+ break;
+ case PORT_D:
+ bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
+ break;
+ default:
+ return connector_status_unknown;
+ }
+ } else {
+ switch (intel_dig_port->port) {
+ case PORT_B:
+ bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ case PORT_C:
+ bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ case PORT_D:
+ bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ default:
+ return connector_status_unknown;
+ }
}
if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
@@ -3082,9 +3089,12 @@ intel_dp_detect(struct drm_connector *connector, bool force)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
enum drm_connector_status status;
struct edid *edid = NULL;
+ intel_runtime_pm_get(dev_priv);
+
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
@@ -3096,7 +3106,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
status = g4x_dp_detect(intel_dp);
if (status != connector_status_connected)
- return status;
+ goto out;
intel_dp_probe_oui(intel_dp);
@@ -3112,7 +3122,11 @@ intel_dp_detect(struct drm_connector *connector, bool force)
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
- return connector_status_connected;
+ status = connector_status_connected;
+
+out:
+ intel_runtime_pm_put(dev_priv);
+ return status;
}
static int intel_dp_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a18e88b3e425..fbfaaba5cc3b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -65,8 +65,8 @@
#define wait_for_atomic_us(COND, US) _wait_for((COND), \
DIV_ROUND_UP((US), 1000), 0)
-#define KHz(x) (1000*x)
-#define MHz(x) KHz(1000*x)
+#define KHz(x) (1000 * (x))
+#define MHz(x) KHz(1000 * (x))
/*
* Display related stuff
@@ -155,7 +155,19 @@ struct intel_encoder {
struct intel_panel {
struct drm_display_mode *fixed_mode;
+ struct drm_display_mode *downclock_mode;
int fitting_mode;
+
+ /* backlight */
+ struct {
+ bool present;
+ u32 level;
+ u32 max;
+ bool enabled;
+ bool combination_mode; /* gen 2/4 only */
+ bool active_low_pwm;
+ struct backlight_device *device;
+ } backlight;
};
struct intel_connector {
@@ -443,7 +455,7 @@ struct intel_hdmi {
bool rgb_quant_range_selectable;
void (*write_infoframe)(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
- const uint8_t *frame, ssize_t len);
+ const void *frame, ssize_t len);
void (*set_infoframes)(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
};
@@ -490,9 +502,9 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
{
switch (dport->port) {
case PORT_B:
- return 0;
+ return DPIO_CH0;
case PORT_C:
- return 1;
+ return DPIO_CH1;
default:
BUG();
}
@@ -601,7 +613,8 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
-bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
+bool intel_ddi_pll_select(struct intel_crtc *crtc);
+void intel_ddi_pll_enable(struct intel_crtc *crtc);
void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
@@ -612,6 +625,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
/* intel_display.c */
+const char *intel_output_name(int output);
+bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -638,7 +653,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
void intel_wait_for_vblank(struct drm_device *dev, int pipe);
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
-void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dport);
bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old);
@@ -690,11 +706,10 @@ void
ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
int dotclock);
bool intel_crtc_active(struct drm_crtc *crtc);
-void i915_disable_vga_mem(struct drm_device *dev);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
void intel_display_set_init_power(struct drm_device *dev, bool enable);
-
+int valleyview_get_vco(struct drm_i915_private *dev_priv);
/* intel_dp.c */
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
@@ -808,9 +823,13 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
int intel_panel_setup_backlight(struct drm_connector *connector);
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
-void intel_panel_destroy_backlight(struct drm_device *dev);
+void intel_panel_destroy_backlight(struct drm_connector *connector);
+void intel_panel_init_backlight_funcs(struct drm_device *dev);
enum drm_connector_status intel_panel_detect(struct drm_device *dev);
-
+extern struct drm_display_mode *intel_find_panel_downclock(
+ struct drm_device *dev,
+ struct drm_display_mode *fixed_mode,
+ struct drm_connector *connector);
/* intel_pm.c */
void intel_init_clock_gating(struct drm_device *dev);
@@ -821,6 +840,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
uint32_t sprite_width, int pixel_size,
bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev);
+void intel_pm_setup(struct drm_device *dev);
bool intel_fbc_enabled(struct drm_device *dev);
void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
@@ -829,6 +849,8 @@ int intel_power_domains_init(struct drm_device *dev);
void intel_power_domains_remove(struct drm_device *dev);
bool intel_display_power_enabled(struct drm_device *dev,
enum intel_display_power_domain domain);
+bool intel_display_power_enabled_sw(struct drm_device *dev,
+ enum intel_display_power_domain domain);
void intel_display_power_get(struct drm_device *dev,
enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_device *dev,
@@ -843,6 +865,10 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv);
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
+void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
+void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index d257b093ca68..fabbf0d895cf 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -37,49 +37,18 @@
static const struct intel_dsi_device intel_dsi_devices[] = {
};
-
-static void vlv_cck_modify(struct drm_i915_private *dev_priv, u32 reg, u32 val,
- u32 mask)
-{
- u32 tmp = vlv_cck_read(dev_priv, reg);
- tmp &= ~mask;
- tmp |= val;
- vlv_cck_write(dev_priv, reg, tmp);
-}
-
-static void band_gap_wa(struct drm_i915_private *dev_priv)
+static void band_gap_reset(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->dpio_lock);
- /* Enable bandgap fix in GOP driver */
- vlv_cck_modify(dev_priv, 0x6D, 0x00010000, 0x00030000);
- msleep(20);
- vlv_cck_modify(dev_priv, 0x6E, 0x00010000, 0x00030000);
- msleep(20);
- vlv_cck_modify(dev_priv, 0x6F, 0x00010000, 0x00030000);
- msleep(20);
- vlv_cck_modify(dev_priv, 0x00, 0x00008000, 0x00008000);
- msleep(20);
- vlv_cck_modify(dev_priv, 0x00, 0x00000000, 0x00008000);
- msleep(20);
-
- /* Turn Display Trunk on */
- vlv_cck_modify(dev_priv, 0x6B, 0x00020000, 0x00030000);
- msleep(20);
-
- vlv_cck_modify(dev_priv, 0x6C, 0x00020000, 0x00030000);
- msleep(20);
-
- vlv_cck_modify(dev_priv, 0x6D, 0x00020000, 0x00030000);
- msleep(20);
- vlv_cck_modify(dev_priv, 0x6E, 0x00020000, 0x00030000);
- msleep(20);
- vlv_cck_modify(dev_priv, 0x6F, 0x00020000, 0x00030000);
+ vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
+ vlv_flisdsi_write(dev_priv, 0x0F, 0x0005);
+ vlv_flisdsi_write(dev_priv, 0x0F, 0x0025);
+ udelay(150);
+ vlv_flisdsi_write(dev_priv, 0x0F, 0x0000);
+ vlv_flisdsi_write(dev_priv, 0x08, 0x0000);
mutex_unlock(&dev_priv->dpio_lock);
-
- /* Need huge delay, otherwise clock is not stable */
- msleep(100);
}
static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
@@ -132,14 +101,47 @@ static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
vlv_enable_dsi_pll(encoder);
}
+static void intel_dsi_device_ready(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ int pipe = intel_crtc->pipe;
+ u32 val;
+
+ DRM_DEBUG_KMS("\n");
+
+ val = I915_READ(MIPI_PORT_CTRL(pipe));
+ I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
+ usleep_range(2000, 2500);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
+ usleep_range(2000, 2500);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
+ usleep_range(2000, 2500);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
+ usleep_range(2000, 2500);
+}
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
DRM_DEBUG_KMS("\n");
+
+ if (intel_dsi->dev.dev_ops->panel_reset)
+ intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
+
+ /* put device in ready state */
+ intel_dsi_device_ready(encoder);
+
+ if (intel_dsi->dev.dev_ops->send_otp_cmds)
+ intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
}
static void intel_dsi_enable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int pipe = intel_crtc->pipe;
@@ -147,41 +149,28 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
- temp = I915_READ(MIPI_DEVICE_READY(pipe));
- if ((temp & DEVICE_READY) == 0) {
- temp &= ~ULPS_STATE_MASK;
- I915_WRITE(MIPI_DEVICE_READY(pipe), temp | DEVICE_READY);
- } else if (temp & ULPS_STATE_MASK) {
- temp &= ~ULPS_STATE_MASK;
- I915_WRITE(MIPI_DEVICE_READY(pipe), temp | ULPS_STATE_EXIT);
- /*
- * We need to ensure that there is a minimum of 1 ms time
- * available before clearing the UPLS exit state.
- */
- msleep(2);
- I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
- }
-
if (is_cmd_mode(intel_dsi))
I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
-
- if (is_vid_mode(intel_dsi)) {
+ else {
msleep(20); /* XXX */
dpi_send_cmd(intel_dsi, TURN_ON);
msleep(100);
/* assert ip_tg_enable signal */
- temp = I915_READ(MIPI_PORT_CTRL(pipe));
+ temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
+ temp = temp | intel_dsi->port_bits;
I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
POSTING_READ(MIPI_PORT_CTRL(pipe));
}
- intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
+ if (intel_dsi->dev.dev_ops->enable)
+ intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
}
static void intel_dsi_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int pipe = intel_crtc->pipe;
@@ -189,8 +178,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
- intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
-
if (is_vid_mode(intel_dsi)) {
dpi_send_cmd(intel_dsi, SHUTDOWN);
msleep(10);
@@ -203,20 +190,54 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
msleep(2);
}
- temp = I915_READ(MIPI_DEVICE_READY(pipe));
- if (temp & DEVICE_READY) {
- temp &= ~DEVICE_READY;
- temp &= ~ULPS_STATE_MASK;
- I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
- }
+ /* if disable packets are sent before sending shutdown packet then in
+ * some next enable sequence send turn on packet error is observed */
+ if (intel_dsi->dev.dev_ops->disable)
+ intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
}
-static void intel_dsi_post_disable(struct intel_encoder *encoder)
+static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ int pipe = intel_crtc->pipe;
+ u32 val;
+
DRM_DEBUG_KMS("\n");
+ I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+ usleep_range(2000, 2500);
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
+ usleep_range(2000, 2500);
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+ usleep_range(2000, 2500);
+
+ val = I915_READ(MIPI_PORT_CTRL(pipe));
+ I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
+
+ if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
+ == 0x00000), 30))
+ DRM_ERROR("DSI LP not going Low\n");
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
+ usleep_range(2000, 2500);
+
vlv_disable_dsi_pll(encoder);
}
+static void intel_dsi_post_disable(struct intel_encoder *encoder)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+ DRM_DEBUG_KMS("\n");
+
+ intel_dsi_clear_device_ready(encoder);
+
+ if (intel_dsi->dev.dev_ops->disable_panel_power)
+ intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
+}
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
@@ -251,8 +272,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
/* XXX: read flags, set to adjusted_mode */
}
-static int intel_dsi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
@@ -352,11 +374,8 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
- /* Update the DSI PLL */
- vlv_enable_dsi_pll(intel_encoder);
-
/* XXX: Location of the call */
- band_gap_wa(dev_priv);
+ band_gap_reset(dev_priv);
/* escape clock divider, 20MHz, shared for A and C. device ready must be
* off when doing this! txclkesc? */
@@ -373,11 +392,7 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
- I915_WRITE(MIPI_DPHY_PARAM(pipe),
- 0x3c << EXIT_ZERO_COUNT_SHIFT |
- 0x1f << TRAIL_COUNT_SHIFT |
- 0xc5 << CLK_ZERO_COUNT_SHIFT |
- 0x1f << PREPARE_COUNT_SHIFT);
+ I915_WRITE(MIPI_DPHY_PARAM(pipe), intel_dsi->dphy_reg);
I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
@@ -425,9 +440,9 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
adjusted_mode->htotal,
bpp, intel_dsi->lane_count) + 1);
}
- I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), 8309); /* max */
- I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), 0x14); /* max */
- I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), 0xffff); /* max */
+ I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout);
+ I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val);
+ I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), intel_dsi->rst_timer_val);
/* dphy stuff */
@@ -442,29 +457,31 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
*
* XXX: write MIPI_STOP_STATE_STALL?
*/
- I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), 0x46);
+ I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe),
+ intel_dsi->hs_to_lp_count);
/* XXX: low power clock equivalence in terms of byte clock. the number
* of byte clocks occupied in one low power clock. based on txbyteclkhs
* and txclkesc. txclkesc time / txbyteclk time * (105 +
* MIPI_STOP_STATE_STALL) / 105.???
*/
- I915_WRITE(MIPI_LP_BYTECLK(pipe), 4);
+ I915_WRITE(MIPI_LP_BYTECLK(pipe), intel_dsi->lp_byte_clk);
/* the bw essential for transmitting 16 long packets containing 252
* bytes meant for dcs write memory command is programmed in this
* register in terms of byte clocks. based on dsi transfer rate and the
* number of lanes configured the time taken to transmit 16 long packets
* in a dsi stream varies. */
- I915_WRITE(MIPI_DBI_BW_CTRL(pipe), 0x820);
+ I915_WRITE(MIPI_DBI_BW_CTRL(pipe), intel_dsi->bw_timer);
I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
- 0xa << LP_HS_SSW_CNT_SHIFT |
- 0x14 << HS_LP_PWR_SW_CNT_SHIFT);
+ intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
+ intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
if (is_vid_mode(intel_dsi))
I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
- intel_dsi->video_mode_format);
+ intel_dsi->video_frmt_cfg_bits |
+ intel_dsi->video_mode_format);
}
static enum drm_connector_status
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index c7765f33d524..b4a27cec882f 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -39,6 +39,13 @@ struct intel_dsi_device {
struct intel_dsi_dev_ops {
bool (*init)(struct intel_dsi_device *dsi);
+ void (*panel_reset)(struct intel_dsi_device *dsi);
+
+ void (*disable_panel_power)(struct intel_dsi_device *dsi);
+
+ /* one time programmable commands if needed */
+ void (*send_otp_cmds)(struct intel_dsi_device *dsi);
+
/* This callback must be able to assume DSI commands can be sent */
void (*enable)(struct intel_dsi_device *dsi);
@@ -89,6 +96,20 @@ struct intel_dsi {
/* eot for MIPI_EOT_DISABLE register */
u32 eot_disable;
+
+ u32 port_bits;
+ u32 bw_timer;
+ u32 dphy_reg;
+ u32 video_frmt_cfg_bits;
+ u16 lp_byte_clk;
+
+ /* timeouts in byte clocks */
+ u16 lp_rx_timeout;
+ u16 turn_arnd_val;
+ u16 rst_timer_val;
+ u16 hs_to_lp_count;
+ u16 clk_lp_to_hs_count;
+ u16 clk_hs_to_lp_count;
};
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 44279b2ade88..ba79ec19da3b 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -50,6 +50,8 @@ static const u32 lfsr_converts[] = {
71, 35 /* 91 - 92 */
};
+#ifdef DSI_CLK_FROM_RR
+
static u32 dsi_rr_formula(const struct drm_display_mode *mode,
int pixel_format, int video_mode_format,
int lane_count, bool eotp)
@@ -121,7 +123,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
/* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
- dsi_clk = dsi_bit_clock_hz / (1000 * 1000);
+ dsi_clk = dsi_bit_clock_hz / 1000;
if (eotp && video_mode_format == VIDEO_MODE_BURST)
dsi_clk *= 2;
@@ -129,64 +131,37 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
return dsi_clk;
}
-#ifdef MNP_FROM_TABLE
-
-struct dsi_clock_table {
- u32 freq;
- u8 m;
- u8 p;
-};
-
-static const struct dsi_clock_table dsi_clk_tbl[] = {
- {300, 72, 6}, {313, 75, 6}, {323, 78, 6}, {333, 80, 6},
- {343, 82, 6}, {353, 85, 6}, {363, 87, 6}, {373, 90, 6},
- {383, 92, 6}, {390, 78, 5}, {393, 79, 5}, {400, 80, 5},
- {401, 80, 5}, {402, 80, 5}, {403, 81, 5}, {404, 81, 5},
- {405, 81, 5}, {406, 81, 5}, {407, 81, 5}, {408, 82, 5},
- {409, 82, 5}, {410, 82, 5}, {411, 82, 5}, {412, 82, 5},
- {413, 83, 5}, {414, 83, 5}, {415, 83, 5}, {416, 83, 5},
- {417, 83, 5}, {418, 84, 5}, {419, 84, 5}, {420, 84, 5},
- {430, 86, 5}, {440, 88, 5}, {450, 90, 5}, {460, 92, 5},
- {470, 75, 4}, {480, 77, 4}, {490, 78, 4}, {500, 80, 4},
- {510, 82, 4}, {520, 83, 4}, {530, 85, 4}, {540, 86, 4},
- {550, 88, 4}, {560, 90, 4}, {570, 91, 4}, {580, 70, 3},
- {590, 71, 3}, {600, 72, 3}, {610, 73, 3}, {620, 74, 3},
- {630, 76, 3}, {640, 77, 3}, {650, 78, 3}, {660, 79, 3},
- {670, 80, 3}, {680, 82, 3}, {690, 83, 3}, {700, 84, 3},
- {710, 85, 3}, {720, 86, 3}, {730, 88, 3}, {740, 89, 3},
- {750, 90, 3}, {760, 91, 3}, {770, 92, 3}, {780, 62, 2},
- {790, 63, 2}, {800, 64, 2}, {880, 70, 2}, {900, 72, 2},
- {1000, 80, 2}, /* dsi clock frequency in Mhz*/
-};
+#else
-static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
+/* Get DSI clock from pixel clock */
+static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode,
+ int pixel_format, int lane_count)
{
- unsigned int i;
- u8 m;
- u8 n;
- u8 p;
- u32 m_seed;
-
- if (dsi_clk < 300 || dsi_clk > 1000)
- return -ECHRNG;
+ u32 dsi_clk_khz;
+ u32 bpp;
- for (i = 0; i <= ARRAY_SIZE(dsi_clk_tbl); i++) {
- if (dsi_clk_tbl[i].freq > dsi_clk)
- break;
+ switch (pixel_format) {
+ default:
+ case VID_MODE_FORMAT_RGB888:
+ case VID_MODE_FORMAT_RGB666_LOOSE:
+ bpp = 24;
+ break;
+ case VID_MODE_FORMAT_RGB666:
+ bpp = 18;
+ break;
+ case VID_MODE_FORMAT_RGB565:
+ bpp = 16;
+ break;
}
- m = dsi_clk_tbl[i].m;
- p = dsi_clk_tbl[i].p;
- m_seed = lfsr_converts[m - 62];
- n = 1;
- dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + p - 2);
- dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
- m_seed << DSI_PLL_M1_DIV_SHIFT;
+ /* DSI data rate = pixel clock * bits per pixel / lane count
+ pixel clock is converted from KHz to Hz */
+ dsi_clk_khz = DIV_ROUND_CLOSEST(mode->clock * bpp, lane_count);
- return 0;
+ return dsi_clk_khz;
}
-#else
+#endif
static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
{
@@ -194,36 +169,47 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
u32 ref_clk;
u32 error;
u32 tmp_error;
- u32 target_dsi_clk;
- u32 calc_dsi_clk;
+ int target_dsi_clk;
+ int calc_dsi_clk;
u32 calc_m;
u32 calc_p;
u32 m_seed;
- if (dsi_clk < 300 || dsi_clk > 1150) {
+ /* dsi_clk is expected in KHZ */
+ if (dsi_clk < 300000 || dsi_clk > 1150000) {
DRM_ERROR("DSI CLK Out of Range\n");
return -ECHRNG;
}
ref_clk = 25000;
- target_dsi_clk = dsi_clk * 1000;
+ target_dsi_clk = dsi_clk;
error = 0xFFFFFFFF;
+ tmp_error = 0xFFFFFFFF;
calc_m = 0;
calc_p = 0;
for (m = 62; m <= 92; m++) {
for (p = 2; p <= 6; p++) {
-
+ /* Find the optimal m and p divisors
+ with minimal error +/- the required clock */
calc_dsi_clk = (m * ref_clk) / p;
- if (calc_dsi_clk >= target_dsi_clk) {
- tmp_error = calc_dsi_clk - target_dsi_clk;
- if (tmp_error < error) {
- error = tmp_error;
- calc_m = m;
- calc_p = p;
- }
+ if (calc_dsi_clk == target_dsi_clk) {
+ calc_m = m;
+ calc_p = p;
+ error = 0;
+ break;
+ } else
+ tmp_error = abs(target_dsi_clk - calc_dsi_clk);
+
+ if (tmp_error < error) {
+ error = tmp_error;
+ calc_m = m;
+ calc_p = p;
}
}
+
+ if (error == 0)
+ break;
}
m_seed = lfsr_converts[calc_m - 62];
@@ -235,8 +221,6 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
return 0;
}
-#endif
-
/*
* XXX: The muxing and gating is hard coded for now. Need to add support for
* sharing PLLs with two DSI outputs.
@@ -251,9 +235,8 @@ static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
struct dsi_mnp dsi_mnp;
u32 dsi_clk;
- dsi_clk = dsi_rr_formula(mode, intel_dsi->pixel_format,
- intel_dsi->video_mode_format,
- intel_dsi->lane_count, !intel_dsi->eot_disable);
+ dsi_clk = dsi_clk_from_pclk(mode, intel_dsi->pixel_format,
+ intel_dsi->lane_count);
ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
if (ret) {
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 3c7736546856..eeff998e52ef 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -234,8 +234,9 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
intel_modeset_check_state(connector->dev);
}
-static int intel_dvo_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_dvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 895fcb4fbd94..39eac9937a4a 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -57,18 +57,14 @@ static struct fb_ops intelfb_ops = {
.fb_debug_leave = drm_fb_helper_debug_leave,
};
-static int intelfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
+static int intelfb_alloc(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper);
struct drm_device *dev = helper->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct fb_info *info;
- struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
- struct device *device = &dev->pdev->dev;
int size, ret;
/* we don't do packed 24bpp */
@@ -94,8 +90,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out;
}
- mutex_lock(&dev->struct_mutex);
-
/* Flush everything out, we'll be doing GTT only from now on */
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret) {
@@ -103,7 +97,50 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unref;
}
- info = framebuffer_alloc(0, device);
+ ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
+ if (ret)
+ goto out_unpin;
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin(obj);
+out_unref:
+ drm_gem_object_unreference(&obj->base);
+out:
+ return ret;
+}
+
+static int intelfb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct intel_fbdev *ifbdev =
+ container_of(helper, struct intel_fbdev, helper);
+ struct intel_framebuffer *intel_fb = &ifbdev->ifb;
+ struct drm_device *dev = helper->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
+ int size, ret;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (!intel_fb->obj) {
+ DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
+ ret = intelfb_alloc(helper, sizes);
+ if (ret)
+ goto out_unlock;
+ } else {
+ DRM_DEBUG_KMS("re-using BIOS fb\n");
+ sizes->fb_width = intel_fb->base.width;
+ sizes->fb_height = intel_fb->base.height;
+ }
+
+ obj = intel_fb->obj;
+ size = obj->base.size;
+
+ info = framebuffer_alloc(0, &dev->pdev->dev);
if (!info) {
ret = -ENOMEM;
goto out_unpin;
@@ -111,10 +148,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->par = helper;
- ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
- if (ret)
- goto out_unpin;
-
fb = &ifbdev->ifb.base;
ifbdev->helper.fb = fb;
@@ -170,17 +203,15 @@ static int intelfb_create(struct drm_fb_helper *helper,
fb->width, fb->height,
i915_gem_obj_ggtt_offset(obj), obj);
-
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
out_unpin:
i915_gem_object_unpin(obj);
-out_unref:
drm_gem_object_unreference(&obj->base);
+out_unlock:
mutex_unlock(&dev->struct_mutex);
-out:
return ret;
}
@@ -297,8 +328,6 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
fb_set_suspend(info, state);
}
-MODULE_LICENSE("GPL and additional rights");
-
void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 03f9ca70530c..6db0d9d17f47 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -130,9 +130,9 @@ static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
static void g4x_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
- const uint8_t *frame, ssize_t len)
+ const void *frame, ssize_t len)
{
- uint32_t *data = (uint32_t *)frame;
+ const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val = I915_READ(VIDEO_DIP_CTL);
@@ -167,9 +167,9 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
static void ibx_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
- const uint8_t *frame, ssize_t len)
+ const void *frame, ssize_t len)
{
- uint32_t *data = (uint32_t *)frame;
+ const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -205,9 +205,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
static void cpt_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
- const uint8_t *frame, ssize_t len)
+ const void *frame, ssize_t len)
{
- uint32_t *data = (uint32_t *)frame;
+ const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -246,9 +246,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
static void vlv_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
- const uint8_t *frame, ssize_t len)
+ const void *frame, ssize_t len)
{
- uint32_t *data = (uint32_t *)frame;
+ const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -284,9 +284,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
static void hsw_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
- const uint8_t *frame, ssize_t len)
+ const void *frame, ssize_t len)
{
- uint32_t *data = (uint32_t *)frame;
+ const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -853,8 +853,9 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
return 225000;
}
-static int intel_hdmi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
return MODE_CLOCK_HIGH;
@@ -1081,7 +1082,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
- int port = vlv_dport_to_channel(dport);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
@@ -1090,41 +1091,33 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
/* Enable clock channels for this port */
mutex_lock(&dev_priv->dpio_lock);
- val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
- vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
/* HDMI 1.0V-2dB */
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port),
- 0x2b245f5f);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
- 0x5578b83a);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port),
- 0x0c782040);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX3_SWING_CTL4(port),
- 0x2b247878);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
- 0x00002000);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
- DPIO_TX_OCALINIT_EN);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
/* Program lane clock */
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port),
- 0x00760018);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port),
- 0x00400888);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
mutex_unlock(&dev_priv->dpio_lock);
intel_enable_hdmi(encoder);
- vlv_wait_port_ready(dev_priv, port);
+ vlv_wait_port_ready(dev_priv, dport);
}
static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
@@ -1134,7 +1127,7 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
- int port = vlv_dport_to_channel(dport);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
if (!IS_VALLEYVIEW(dev))
@@ -1142,24 +1135,22 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
-
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
- 0x00002000);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
- DPIO_TX_OCALINIT_EN);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -1169,13 +1160,13 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
- int port = vlv_dport_to_channel(dport);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
mutex_lock(&dev_priv->dpio_lock);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), 0x00000000);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), 0x00e00060);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
mutex_unlock(&dev_priv->dpio_lock);
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 2ca17b14b6c1..d33b61d0dd33 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -82,20 +82,11 @@ static int get_disp_clk_div(struct drm_i915_private *dev_priv,
static void gmbus_set_freq(struct drm_i915_private *dev_priv)
{
- int vco_freq[] = { 800, 1600, 2000, 2400 };
- int gmbus_freq = 0, cdclk_div, hpll_freq;
+ int vco, gmbus_freq = 0, cdclk_div;
BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
- /* Skip setting the gmbus freq if BIOS has already programmed it */
- if (I915_READ(GMBUSFREQ_VLV) != 0xA0)
- return;
-
- /* Obtain SKU information */
- mutex_lock(&dev_priv->dpio_lock);
- hpll_freq =
- vlv_cck_read(dev_priv, CCK_FUSE_REG) & CCK_FUSE_HPLL_FREQ_MASK;
- mutex_unlock(&dev_priv->dpio_lock);
+ vco = valleyview_get_vco(dev_priv);
/* Get the CDCLK divide ratio */
cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
@@ -106,7 +97,7 @@ static void gmbus_set_freq(struct drm_i915_private *dev_priv)
* in fact 1MHz is the correct frequency.
*/
if (cdclk_div)
- gmbus_freq = (vco_freq[hpll_freq] << 1) / cdclk_div;
+ gmbus_freq = (vco << 1) / cdclk_div;
if (WARN_ON(gmbus_freq == 0))
return;
@@ -267,13 +258,6 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
algo->data = bus;
}
-/*
- * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI
- * mode. This results in spurious interrupt warnings if the legacy irq no. is
- * shared with another device. The kernel then disables that interrupt source
- * and so prevents the other device from working properly.
- */
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
static int
gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
u32 gmbus2_status,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c3b4da7895ed..8bcb93a2a9f6 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -256,8 +256,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
POSTING_READ(lvds_encoder->reg);
}
-static int intel_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
@@ -446,9 +447,19 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
if (dev_priv->modeset_restore == MODESET_DONE)
goto exit;
- drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(dev, true);
- drm_modeset_unlock_all(dev);
+ /*
+ * Some old platform's BIOS love to wreak havoc while the lid is closed.
+ * We try to detect this here and undo any damage. The split for PCH
+ * platforms is rather conservative and a bit arbitrary expect that on
+ * those platforms VGA disabling requires actual legacy VGA I/O access,
+ * and as part of the cleanup in the hw state restore we also redisable
+ * the vga plane.
+ */
+ if (!HAS_PCH_SPLIT(dev)) {
+ drm_modeset_lock_all(dev);
+ intel_modeset_setup_hw_state(dev, true);
+ drm_modeset_unlock_all(dev);
+ }
dev_priv->modeset_restore = MODESET_DONE;
@@ -744,57 +755,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
{ } /* terminating entry */
};
-/**
- * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
- * @dev: drm device
- * @connector: LVDS connector
- *
- * Find the reduced downclock for LVDS in EDID.
- */
-static void intel_find_lvds_downclock(struct drm_device *dev,
- struct drm_display_mode *fixed_mode,
- struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *scan;
- int temp_downclock;
-
- temp_downclock = fixed_mode->clock;
- list_for_each_entry(scan, &connector->probed_modes, head) {
- /*
- * If one mode has the same resolution with the fixed_panel
- * mode while they have the different refresh rate, it means
- * that the reduced downclock is found for the LVDS. In such
- * case we can set the different FPx0/1 to dynamically select
- * between low and high frequency.
- */
- if (scan->hdisplay == fixed_mode->hdisplay &&
- scan->hsync_start == fixed_mode->hsync_start &&
- scan->hsync_end == fixed_mode->hsync_end &&
- scan->htotal == fixed_mode->htotal &&
- scan->vdisplay == fixed_mode->vdisplay &&
- scan->vsync_start == fixed_mode->vsync_start &&
- scan->vsync_end == fixed_mode->vsync_end &&
- scan->vtotal == fixed_mode->vtotal) {
- if (scan->clock < temp_downclock) {
- /*
- * The downclock is already found. But we
- * expect to find the lower downclock.
- */
- temp_downclock = scan->clock;
- }
- }
- }
- if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
- /* We found the downclock for LVDS. */
- dev_priv->lvds_downclock_avail = 1;
- dev_priv->lvds_downclock = temp_downclock;
- DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
- "Normal clock %dKhz, downclock %dKhz\n",
- fixed_mode->clock, temp_downclock);
- }
-}
-
/*
* Enumerate the child dev array parsed from VBT to check whether
* the LVDS is present.
@@ -1072,8 +1032,22 @@ void intel_lvds_init(struct drm_device *dev)
fixed_mode = drm_mode_duplicate(dev, scan);
if (fixed_mode) {
- intel_find_lvds_downclock(dev, fixed_mode,
- connector);
+ intel_connector->panel.downclock_mode =
+ intel_find_panel_downclock(dev,
+ fixed_mode, connector);
+ if (intel_connector->panel.downclock_mode !=
+ NULL && i915_lvds_downclock) {
+ /* We found the downclock for LVDS. */
+ dev_priv->lvds_downclock_avail = true;
+ dev_priv->lvds_downclock =
+ intel_connector->panel.
+ downclock_mode->clock;
+ DRM_DEBUG_KMS("LVDS downclock is found"
+ " in EDID. Normal clock %dKhz, "
+ "downclock %dKhz\n",
+ fixed_mode->clock,
+ dev_priv->lvds_downclock);
+ }
goto out;
}
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 6d69a9bad865..acde2945eb8a 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -28,7 +28,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
-#include <linux/acpi_io.h>
#include <acpi/video.h>
#include <drm/drmP.h>
@@ -64,7 +63,7 @@ struct opregion_header {
u8 driver_ver[16];
u32 mboxes;
u8 reserved[164];
-} __attribute__((packed));
+} __packed;
/* OpRegion mailbox #1: public ACPI methods */
struct opregion_acpi {
@@ -86,7 +85,7 @@ struct opregion_acpi {
u32 cnot; /* current OS notification */
u32 nrdy; /* driver status */
u8 rsvd2[60];
-} __attribute__((packed));
+} __packed;
/* OpRegion mailbox #2: SWSCI */
struct opregion_swsci {
@@ -94,7 +93,7 @@ struct opregion_swsci {
u32 parm; /* command parameters */
u32 dslp; /* driver sleep time-out */
u8 rsvd[244];
-} __attribute__((packed));
+} __packed;
/* OpRegion mailbox #3: ASLE */
struct opregion_asle {
@@ -115,7 +114,7 @@ struct opregion_asle {
u32 srot; /* supported rotation angles */
u32 iuer; /* IUER events */
u8 rsvd[86];
-} __attribute__((packed));
+} __packed;
/* Driver readiness indicator */
#define ASLE_ARDY_READY (1 << 0)
@@ -227,6 +226,8 @@ struct opregion_asle {
#define ACPI_DIGITAL_OUTPUT (3<<8)
#define ACPI_LVDS_OUTPUT (4<<8)
+#define MAX_DSLP 1500
+
#ifdef CONFIG_ACPI
static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
{
@@ -261,10 +262,11 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
/* The spec says 2ms should be the default, but it's too small
* for some machines. */
dslp = 50;
- } else if (dslp > 500) {
+ } else if (dslp > MAX_DSLP) {
/* Hey bios, trust must be earned. */
- WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp);
- dslp = 500;
+ DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, "
+ "using %u ms instead\n", dslp, MAX_DSLP);
+ dslp = MAX_DSLP;
}
/* The spec tells us to do this, but we are the only user... */
@@ -396,13 +398,8 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- struct intel_connector *intel_connector = NULL;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
+ struct intel_connector *intel_connector;
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
- u32 ret = 0;
- bool found = false;
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
@@ -414,38 +411,20 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
return ASLC_BACKLIGHT_FAILED;
mutex_lock(&dev->mode_config.mutex);
+
/*
- * Could match the OpRegion connector here instead, but we'd also need
- * to verify the connector could handle a backlight call.
+ * Update backlight on all connectors that support backlight (usually
+ * only one).
*/
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- if (encoder->crtc == crtc) {
- found = true;
- break;
- }
-
- if (!found) {
- ret = ASLC_BACKLIGHT_FAILED;
- goto out;
- }
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- if (connector->encoder == encoder)
- intel_connector = to_intel_connector(connector);
-
- if (!intel_connector) {
- ret = ASLC_BACKLIGHT_FAILED;
- goto out;
- }
-
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
- intel_panel_set_backlight(intel_connector, bclp, 255);
+ list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
+ intel_panel_set_backlight(intel_connector, bclp, 255);
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
-out:
mutex_unlock(&dev->mode_config.mutex);
- return ret;
+
+ return 0;
}
static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a98a990fbab3..a759ecdb7a6e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1005,7 +1005,7 @@ static int intel_panel_fitter_pipe(struct drm_device *dev)
u32 pfit_control;
/* i830 doesn't have a panel fitter */
- if (IS_I830(dev))
+ if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
return -1;
pfit_control = I915_READ(PFIT_CONTROL);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index f161ac02c4f6..350de359123a 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -325,203 +325,170 @@ out:
pipe_config->gmch_pfit.lvds_border_bits = border;
}
-static int is_backlight_combination_mode(struct drm_device *dev)
+static int i915_panel_invert_brightness;
+MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
+ "(-1 force normal, 0 machine defaults, 1 force inversion), please "
+ "report PCI device ID, subsystem vendor and subsystem device ID "
+ "to dri-devel@lists.freedesktop.org, if your machine needs it. "
+ "It will then be included in an upcoming module version.");
+module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
+static u32 intel_panel_compute_brightness(struct intel_connector *connector,
+ u32 val)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
- if (IS_GEN4(dev))
- return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
+ WARN_ON(panel->backlight.max == 0);
- if (IS_GEN2(dev))
- return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
+ if (i915_panel_invert_brightness < 0)
+ return val;
- return 0;
+ if (i915_panel_invert_brightness > 0 ||
+ dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
+ return panel->backlight.max - val;
+ }
+
+ return val;
}
-/* XXX: query mode clock or hardware clock and program max PWM appropriately
- * when it's 0.
- */
-static u32 i915_read_blc_pwm_ctl(struct drm_device *dev, enum pipe pipe)
+static u32 bdw_get_backlight(struct intel_connector *connector)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val;
- WARN_ON_SMP(!spin_is_locked(&dev_priv->backlight.lock));
-
- /* Restore the CTL value if it lost, e.g. GPU reset */
-
- if (HAS_PCH_SPLIT(dev_priv->dev)) {
- val = I915_READ(BLC_PWM_PCH_CTL2);
- if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
- dev_priv->regfile.saveBLC_PWM_CTL2 = val;
- } else if (val == 0) {
- val = dev_priv->regfile.saveBLC_PWM_CTL2;
- I915_WRITE(BLC_PWM_PCH_CTL2, val);
- }
- } else if (IS_VALLEYVIEW(dev)) {
- val = I915_READ(VLV_BLC_PWM_CTL(pipe));
- if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
- dev_priv->regfile.saveBLC_PWM_CTL = val;
- dev_priv->regfile.saveBLC_PWM_CTL2 =
- I915_READ(VLV_BLC_PWM_CTL2(pipe));
- } else if (val == 0) {
- val = dev_priv->regfile.saveBLC_PWM_CTL;
- I915_WRITE(VLV_BLC_PWM_CTL(pipe), val);
- I915_WRITE(VLV_BLC_PWM_CTL2(pipe),
- dev_priv->regfile.saveBLC_PWM_CTL2);
- }
+ return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
- if (!val)
- val = 0x0f42ffff;
- } else {
- val = I915_READ(BLC_PWM_CTL);
- if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
- dev_priv->regfile.saveBLC_PWM_CTL = val;
- if (INTEL_INFO(dev)->gen >= 4)
- dev_priv->regfile.saveBLC_PWM_CTL2 =
- I915_READ(BLC_PWM_CTL2);
- } else if (val == 0) {
- val = dev_priv->regfile.saveBLC_PWM_CTL;
- I915_WRITE(BLC_PWM_CTL, val);
- if (INTEL_INFO(dev)->gen >= 4)
- I915_WRITE(BLC_PWM_CTL2,
- dev_priv->regfile.saveBLC_PWM_CTL2);
- }
- }
+static u32 pch_get_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- return val;
+ return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
}
-static u32 intel_panel_get_max_backlight(struct drm_device *dev,
- enum pipe pipe)
+static u32 i9xx_get_backlight(struct intel_connector *connector)
{
- u32 max;
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 val;
- max = i915_read_blc_pwm_ctl(dev, pipe);
+ val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ if (INTEL_INFO(dev)->gen < 4)
+ val >>= 1;
- if (HAS_PCH_SPLIT(dev)) {
- max >>= 16;
- } else {
- if (INTEL_INFO(dev)->gen < 4)
- max >>= 17;
- else
- max >>= 16;
+ if (panel->backlight.combination_mode) {
+ u8 lbpc;
- if (is_backlight_combination_mode(dev))
- max *= 0xff;
+ pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
+ val *= lbpc;
}
- DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
-
- return max;
+ return val;
}
-static int i915_panel_invert_brightness;
-MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
- "(-1 force normal, 0 machine defaults, 1 force inversion), please "
- "report PCI device ID, subsystem vendor and subsystem device ID "
- "to dri-devel@lists.freedesktop.org, if your machine needs it. "
- "It will then be included in an upcoming module version.");
-module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
-static u32 intel_panel_compute_brightness(struct drm_device *dev,
- enum pipe pipe, u32 val)
+static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (i915_panel_invert_brightness < 0)
- return val;
+ return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
- if (i915_panel_invert_brightness > 0 ||
- dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
- u32 max = intel_panel_get_max_backlight(dev, pipe);
- if (max)
- return max - val;
- }
+static u32 vlv_get_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
- return val;
+ return _vlv_get_backlight(dev, pipe);
}
-static u32 intel_panel_get_backlight(struct drm_device *dev,
- enum pipe pipe)
+static u32 intel_panel_get_backlight(struct intel_connector *connector)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
unsigned long flags;
- int reg;
-
- spin_lock_irqsave(&dev_priv->backlight.lock, flags);
-
- if (HAS_PCH_SPLIT(dev)) {
- val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- } else {
- if (IS_VALLEYVIEW(dev))
- reg = VLV_BLC_PWM_CTL(pipe);
- else
- reg = BLC_PWM_CTL;
-
- val = I915_READ(reg) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (INTEL_INFO(dev)->gen < 4)
- val >>= 1;
-
- if (is_backlight_combination_mode(dev)) {
- u8 lbpc;
- pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
- val *= lbpc;
- }
- }
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
- val = intel_panel_compute_brightness(dev, pipe, val);
+ val = dev_priv->display.get_backlight(connector);
+ val = intel_panel_compute_brightness(connector, val);
- spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
return val;
}
-static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
+static void bdw_set_backlight(struct intel_connector *connector, u32 level)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(BLC_PWM_CPU_CTL, val | level);
+ u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
}
-static void intel_panel_actually_set_backlight(struct drm_device *dev,
- enum pipe pipe, u32 level)
+static void pch_set_backlight(struct intel_connector *connector, u32 level)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
- int reg;
- DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
- level = intel_panel_compute_brightness(dev, pipe, level);
+ tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_CPU_CTL, tmp | level);
+}
- if (HAS_PCH_SPLIT(dev))
- return intel_pch_panel_set_backlight(dev, level);
+static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 tmp, mask;
- if (is_backlight_combination_mode(dev)) {
- u32 max = intel_panel_get_max_backlight(dev, pipe);
- u8 lbpc;
+ WARN_ON(panel->backlight.max == 0);
- /* we're screwed, but keep behaviour backwards compatible */
- if (!max)
- max = 1;
+ if (panel->backlight.combination_mode) {
+ u8 lbpc;
- lbpc = level * 0xfe / max + 1;
+ lbpc = level * 0xfe / panel->backlight.max + 1;
level /= lbpc;
pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
}
- if (IS_VALLEYVIEW(dev))
- reg = VLV_BLC_PWM_CTL(pipe);
- else
- reg = BLC_PWM_CTL;
-
- tmp = I915_READ(reg);
- if (INTEL_INFO(dev)->gen < 4)
+ if (IS_GEN4(dev)) {
+ mask = BACKLIGHT_DUTY_CYCLE_MASK;
+ } else {
level <<= 1;
- tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(reg, tmp | level);
+ mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
+ }
+
+ tmp = I915_READ(BLC_PWM_CTL) & ~mask;
+ I915_WRITE(BLC_PWM_CTL, tmp | level);
+}
+
+static void vlv_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ u32 tmp;
+
+ tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
+}
+
+static void
+intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+
+ level = intel_panel_compute_brightness(connector, level);
+ dev_priv->display.set_backlight(connector, level);
}
/* set backlight brightness to level in range [0..max] */
@@ -530,45 +497,89 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 freq;
unsigned long flags;
- if (pipe == INVALID_PIPE)
+ if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
- spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
- freq = intel_panel_get_max_backlight(dev, pipe);
- if (!freq) {
- /* we are screwed, bail out */
- goto out;
- }
+ WARN_ON(panel->backlight.max == 0);
- /* scale to hardware, but be careful to not overflow */
+ /* scale to hardware max, but be careful to not overflow */
+ freq = panel->backlight.max;
if (freq < max)
level = level * freq / max;
else
level = freq / max * level;
- dev_priv->backlight.level = level;
- if (dev_priv->backlight.device)
- dev_priv->backlight.device->props.brightness = level;
+ panel->backlight.level = level;
+ if (panel->backlight.device)
+ panel->backlight.device->props.brightness = level;
- if (dev_priv->backlight.enabled)
- intel_panel_actually_set_backlight(dev, pipe, level);
-out:
- spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+ if (panel->backlight.enabled)
+ intel_panel_actually_set_backlight(connector, level);
+
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+}
+
+static void pch_disable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ intel_panel_actually_set_backlight(connector, 0);
+
+ tmp = I915_READ(BLC_PWM_CPU_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+
+ tmp = I915_READ(BLC_PWM_PCH_CTL1);
+ I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+}
+
+static void i9xx_disable_backlight(struct intel_connector *connector)
+{
+ intel_panel_actually_set_backlight(connector, 0);
+}
+
+static void i965_disable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ intel_panel_actually_set_backlight(connector, 0);
+
+ tmp = I915_READ(BLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+}
+
+static void vlv_disable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ u32 tmp;
+
+ intel_panel_actually_set_backlight(connector, 0);
+
+ tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+ I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
}
void intel_panel_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
unsigned long flags;
- if (pipe == INVALID_PIPE)
+ if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
/*
@@ -582,141 +593,215 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
return;
}
- spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
- dev_priv->backlight.enabled = false;
- intel_panel_actually_set_backlight(dev, pipe, 0);
+ panel->backlight.enabled = false;
+ dev_priv->display.disable_backlight(connector);
- if (INTEL_INFO(dev)->gen >= 4) {
- uint32_t reg, tmp;
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+}
- if (HAS_PCH_SPLIT(dev))
- reg = BLC_PWM_CPU_CTL2;
- else if (IS_VALLEYVIEW(dev))
- reg = VLV_BLC_PWM_CTL2(pipe);
- else
- reg = BLC_PWM_CTL2;
+static void bdw_enable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 pch_ctl1, pch_ctl2;
+
+ pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+ DRM_DEBUG_KMS("pch backlight already enabled\n");
+ pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+ }
- I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
+ pch_ctl2 = panel->backlight.max << 16;
+ I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
- if (HAS_PCH_SPLIT(dev)) {
- tmp = I915_READ(BLC_PWM_PCH_CTL1);
- tmp &= ~BLM_PCH_PWM_ENABLE;
- I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
- }
- }
+ pch_ctl1 = 0;
+ if (panel->backlight.active_low_pwm)
+ pch_ctl1 |= BLM_PCH_POLARITY;
+
+ /* BDW always uses the pch pwm controls. */
+ pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
- spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+ POSTING_READ(BLC_PWM_PCH_CTL1);
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+
+ /* This won't stick until the above enable. */
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
}
-void intel_panel_enable_backlight(struct intel_connector *connector)
+static void pch_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
enum transcoder cpu_transcoder =
intel_pipe_to_cpu_transcoder(dev_priv, pipe);
- unsigned long flags;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2;
- if (pipe == INVALID_PIPE)
- return;
+ cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+ if (cpu_ctl2 & BLM_PWM_ENABLE) {
+ WARN(1, "cpu backlight already enabled\n");
+ cpu_ctl2 &= ~BLM_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+ }
- DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+ pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+ DRM_DEBUG_KMS("pch backlight already enabled\n");
+ pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+ }
+
+ if (cpu_transcoder == TRANSCODER_EDP)
+ cpu_ctl2 = BLM_TRANSCODER_EDP;
+ else
+ cpu_ctl2 = BLM_PIPE(cpu_transcoder);
+ I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+ POSTING_READ(BLC_PWM_CPU_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
+
+ /* This won't stick until the above enable. */
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
+
+ pch_ctl2 = panel->backlight.max << 16;
+ I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
+
+ pch_ctl1 = 0;
+ if (panel->backlight.active_low_pwm)
+ pch_ctl1 |= BLM_PCH_POLARITY;
+
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+ POSTING_READ(BLC_PWM_PCH_CTL1);
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+}
- spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+static void i9xx_enable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, freq;
- if (dev_priv->backlight.level == 0) {
- dev_priv->backlight.level = intel_panel_get_max_backlight(dev,
- pipe);
- if (dev_priv->backlight.device)
- dev_priv->backlight.device->props.brightness =
- dev_priv->backlight.level;
+ ctl = I915_READ(BLC_PWM_CTL);
+ if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
+ WARN(1, "backlight already enabled\n");
+ I915_WRITE(BLC_PWM_CTL, 0);
}
- if (INTEL_INFO(dev)->gen >= 4) {
- uint32_t reg, tmp;
+ freq = panel->backlight.max;
+ if (panel->backlight.combination_mode)
+ freq /= 0xff;
- if (HAS_PCH_SPLIT(dev))
- reg = BLC_PWM_CPU_CTL2;
- else if (IS_VALLEYVIEW(dev))
- reg = VLV_BLC_PWM_CTL2(pipe);
- else
- reg = BLC_PWM_CTL2;
+ ctl = freq << 17;
+ if (IS_GEN2(dev) && panel->backlight.combination_mode)
+ ctl |= BLM_LEGACY_MODE;
+ if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
+ ctl |= BLM_POLARITY_PNV;
- tmp = I915_READ(reg);
+ I915_WRITE(BLC_PWM_CTL, ctl);
+ POSTING_READ(BLC_PWM_CTL);
- /* Note that this can also get called through dpms changes. And
- * we don't track the backlight dpms state, hence check whether
- * we have to do anything first. */
- if (tmp & BLM_PWM_ENABLE)
- goto set_level;
+ /* XXX: combine this into above write? */
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
+}
- if (INTEL_INFO(dev)->num_pipes == 3)
- tmp &= ~BLM_PIPE_SELECT_IVB;
- else
- tmp &= ~BLM_PIPE_SELECT;
+static void i965_enable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ u32 ctl, ctl2, freq;
- if (cpu_transcoder == TRANSCODER_EDP)
- tmp |= BLM_TRANSCODER_EDP;
- else
- tmp |= BLM_PIPE(cpu_transcoder);
- tmp &= ~BLM_PWM_ENABLE;
-
- I915_WRITE(reg, tmp);
- POSTING_READ(reg);
- I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
-
- if (HAS_PCH_SPLIT(dev) &&
- !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
- tmp = I915_READ(BLC_PWM_PCH_CTL1);
- tmp |= BLM_PCH_PWM_ENABLE;
- tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
- I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
- }
+ ctl2 = I915_READ(BLC_PWM_CTL2);
+ if (ctl2 & BLM_PWM_ENABLE) {
+ WARN(1, "backlight already enabled\n");
+ ctl2 &= ~BLM_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_CTL2, ctl2);
}
-set_level:
- /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
- * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
- * registers are set.
- */
- dev_priv->backlight.enabled = true;
- intel_panel_actually_set_backlight(dev, pipe,
- dev_priv->backlight.level);
+ freq = panel->backlight.max;
+ if (panel->backlight.combination_mode)
+ freq /= 0xff;
+
+ ctl = freq << 16;
+ I915_WRITE(BLC_PWM_CTL, ctl);
+
+ /* XXX: combine this into above write? */
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
- spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+ ctl2 = BLM_PIPE(pipe);
+ if (panel->backlight.combination_mode)
+ ctl2 |= BLM_COMBINATION_MODE;
+ if (panel->backlight.active_low_pwm)
+ ctl2 |= BLM_POLARITY_I965;
+ I915_WRITE(BLC_PWM_CTL2, ctl2);
+ POSTING_READ(BLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
}
-/* FIXME: use VBT vals to init PWM_CTL and PWM_CTL2 correctly */
-static void intel_panel_init_backlight_regs(struct drm_device *dev)
+static void vlv_enable_backlight(struct intel_connector *connector)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ u32 ctl, ctl2;
- if (IS_VALLEYVIEW(dev)) {
- enum pipe pipe;
+ ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+ if (ctl2 & BLM_PWM_ENABLE) {
+ WARN(1, "backlight already enabled\n");
+ ctl2 &= ~BLM_PWM_ENABLE;
+ I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+ }
- for_each_pipe(pipe) {
- u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+ ctl = panel->backlight.max << 16;
+ I915_WRITE(VLV_BLC_PWM_CTL(pipe), ctl);
- /* Skip if the modulation freq is already set */
- if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
- continue;
+ /* XXX: combine this into above write? */
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
- cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
- cur_val);
- }
- }
+ ctl2 = 0;
+ if (panel->backlight.active_low_pwm)
+ ctl2 |= BLM_POLARITY_I965;
+ I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+ POSTING_READ(VLV_BLC_PWM_CTL2(pipe));
+ I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
}
-static void intel_panel_init_backlight(struct drm_device *dev)
+void intel_panel_enable_backlight(struct intel_connector *connector)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ unsigned long flags;
+
+ if (!panel->backlight.present || pipe == INVALID_PIPE)
+ return;
+
+ DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+
+ WARN_ON(panel->backlight.max == 0);
- intel_panel_init_backlight_regs(dev);
+ if (panel->backlight.level == 0) {
+ panel->backlight.level = panel->backlight.max;
+ if (panel->backlight.device)
+ panel->backlight.device->props.brightness =
+ panel->backlight.level;
+ }
+
+ dev_priv->display.enable_backlight(connector);
+ panel->backlight.enabled = true;
- dev_priv->backlight.level = intel_panel_get_backlight(dev, 0);
- dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
}
enum drm_connector_status
@@ -742,7 +827,7 @@ intel_panel_detect(struct drm_device *dev)
}
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
-static int intel_panel_update_status(struct backlight_device *bd)
+static int intel_backlight_device_update_status(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
@@ -756,85 +841,362 @@ static int intel_panel_update_status(struct backlight_device *bd)
return 0;
}
-static int intel_panel_get_brightness(struct backlight_device *bd)
+static int intel_backlight_device_get_brightness(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
- enum pipe pipe;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+ intel_runtime_pm_get(dev_priv);
mutex_lock(&dev->mode_config.mutex);
- pipe = intel_get_pipe_from_connector(connector);
+ ret = intel_panel_get_backlight(connector);
mutex_unlock(&dev->mode_config.mutex);
- if (pipe == INVALID_PIPE)
- return 0;
+ intel_runtime_pm_put(dev_priv);
- return intel_panel_get_backlight(connector->base.dev, pipe);
+ return ret;
}
-static const struct backlight_ops intel_panel_bl_ops = {
- .update_status = intel_panel_update_status,
- .get_brightness = intel_panel_get_brightness,
+static const struct backlight_ops intel_backlight_device_ops = {
+ .update_status = intel_backlight_device_update_status,
+ .get_brightness = intel_backlight_device_get_brightness,
};
-int intel_panel_setup_backlight(struct drm_connector *connector)
+static int intel_backlight_device_register(struct intel_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
struct backlight_properties props;
- unsigned long flags;
- intel_panel_init_backlight(dev);
-
- if (WARN_ON(dev_priv->backlight.device))
+ if (WARN_ON(panel->backlight.device))
return -ENODEV;
+ BUG_ON(panel->backlight.max == 0);
+
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
- props.brightness = dev_priv->backlight.level;
+ props.brightness = panel->backlight.level;
+ props.max_brightness = panel->backlight.max;
- spin_lock_irqsave(&dev_priv->backlight.lock, flags);
- props.max_brightness = intel_panel_get_max_backlight(dev, 0);
- spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
-
- if (props.max_brightness == 0) {
- DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
- return -ENODEV;
- }
- dev_priv->backlight.device =
+ /*
+ * Note: using the same name independent of the connector prevents
+ * registration of multiple backlight devices in the driver.
+ */
+ panel->backlight.device =
backlight_device_register("intel_backlight",
- connector->kdev,
- to_intel_connector(connector),
- &intel_panel_bl_ops, &props);
+ connector->base.kdev,
+ connector,
+ &intel_backlight_device_ops, &props);
- if (IS_ERR(dev_priv->backlight.device)) {
+ if (IS_ERR(panel->backlight.device)) {
DRM_ERROR("Failed to register backlight: %ld\n",
- PTR_ERR(dev_priv->backlight.device));
- dev_priv->backlight.device = NULL;
+ PTR_ERR(panel->backlight.device));
+ panel->backlight.device = NULL;
return -ENODEV;
}
return 0;
}
-void intel_panel_destroy_backlight(struct drm_device *dev)
+static void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ if (panel->backlight.device) {
+ backlight_device_unregister(panel->backlight.device);
+ panel->backlight.device = NULL;
+ }
+}
+#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+static int intel_backlight_device_register(struct intel_connector *connector)
+{
+ return 0;
+}
+static void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+/*
+ * Note: The setup hooks can't assume pipe is set!
+ *
+ * XXX: Query mode clock or hardware clock and program PWM modulation frequency
+ * appropriately when it's 0. Use VBT and/or sane defaults.
+ */
+static int bdw_setup_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 pch_ctl1, pch_ctl2, val;
+
+ pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+ pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+ panel->backlight.max = pch_ctl2 >> 16;
+ if (!panel->backlight.max)
+ return -ENODEV;
+
+ val = bdw_get_backlight(connector);
+ panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+ panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
+ panel->backlight.level != 0;
+
+ return 0;
+}
+
+static int pch_setup_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
+
+ pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+ pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+ panel->backlight.max = pch_ctl2 >> 16;
+ if (!panel->backlight.max)
+ return -ENODEV;
+
+ val = pch_get_backlight(connector);
+ panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+ cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+ panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
+ (pch_ctl1 & BLM_PCH_PWM_ENABLE) && panel->backlight.level != 0;
+
+ return 0;
+}
+
+static int i9xx_setup_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, val;
+
+ ctl = I915_READ(BLC_PWM_CTL);
+
+ if (IS_GEN2(dev))
+ panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
+
+ if (IS_PINEVIEW(dev))
+ panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
+
+ panel->backlight.max = ctl >> 17;
+ if (panel->backlight.combination_mode)
+ panel->backlight.max *= 0xff;
+
+ if (!panel->backlight.max)
+ return -ENODEV;
+
+ val = i9xx_get_backlight(connector);
+ panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+ panel->backlight.enabled = panel->backlight.level != 0;
+
+ return 0;
+}
+
+static int i965_setup_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, ctl2, val;
+
+ ctl2 = I915_READ(BLC_PWM_CTL2);
+ panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
+ panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+ ctl = I915_READ(BLC_PWM_CTL);
+ panel->backlight.max = ctl >> 16;
+ if (panel->backlight.combination_mode)
+ panel->backlight.max *= 0xff;
+
+ if (!panel->backlight.max)
+ return -ENODEV;
+
+ val = i9xx_get_backlight(connector);
+ panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+ panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
+ panel->backlight.level != 0;
+
+ return 0;
+}
+
+static int vlv_setup_backlight(struct intel_connector *connector)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->backlight.device) {
- backlight_device_unregister(dev_priv->backlight.device);
- dev_priv->backlight.device = NULL;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe;
+ u32 ctl, ctl2, val;
+
+ for_each_pipe(pipe) {
+ u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+
+ /* Skip if the modulation freq is already set */
+ if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
+ continue;
+
+ cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
+ cur_val);
}
+
+ ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
+ panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+ ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
+ panel->backlight.max = ctl >> 16;
+ if (!panel->backlight.max)
+ return -ENODEV;
+
+ val = _vlv_get_backlight(dev, PIPE_A);
+ panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+ panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
+ panel->backlight.level != 0;
+
+ return 0;
}
-#else
+
int intel_panel_setup_backlight(struct drm_connector *connector)
{
- intel_panel_init_backlight(connector->dev);
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_panel *panel = &intel_connector->panel;
+ unsigned long flags;
+ int ret;
+
+ /* set level and max in panel struct */
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+ ret = dev_priv->display.setup_backlight(intel_connector);
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+
+ if (ret) {
+ DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
+ drm_get_connector_name(connector));
+ return ret;
+ }
+
+ intel_backlight_device_register(intel_connector);
+
+ panel->backlight.present = true;
+
+ DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, "
+ "sysfs interface %sregistered\n",
+ panel->backlight.enabled ? "enabled" : "disabled",
+ panel->backlight.level, panel->backlight.max,
+ panel->backlight.device ? "" : "not ");
+
return 0;
}
-void intel_panel_destroy_backlight(struct drm_device *dev)
+void intel_panel_destroy_backlight(struct drm_connector *connector)
{
- return;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_panel *panel = &intel_connector->panel;
+
+ panel->backlight.present = false;
+ intel_backlight_device_unregister(intel_connector);
+}
+
+/**
+ * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
+ * @dev: drm device
+ * @fixed_mode : panel native mode
+ * @connector: LVDS/eDP connector
+ *
+ * Return downclock_avail
+ * Find the reduced downclock for LVDS/eDP in EDID.
+ */
+struct drm_display_mode *
+intel_find_panel_downclock(struct drm_device *dev,
+ struct drm_display_mode *fixed_mode,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *scan, *tmp_mode;
+ int temp_downclock;
+
+ temp_downclock = fixed_mode->clock;
+ tmp_mode = NULL;
+
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ /*
+ * If one mode has the same resolution with the fixed_panel
+ * mode while they have the different refresh rate, it means
+ * that the reduced downclock is found. In such
+ * case we can set the different FPx0/1 to dynamically select
+ * between low and high frequency.
+ */
+ if (scan->hdisplay == fixed_mode->hdisplay &&
+ scan->hsync_start == fixed_mode->hsync_start &&
+ scan->hsync_end == fixed_mode->hsync_end &&
+ scan->htotal == fixed_mode->htotal &&
+ scan->vdisplay == fixed_mode->vdisplay &&
+ scan->vsync_start == fixed_mode->vsync_start &&
+ scan->vsync_end == fixed_mode->vsync_end &&
+ scan->vtotal == fixed_mode->vtotal) {
+ if (scan->clock < temp_downclock) {
+ /*
+ * The downclock is already found. But we
+ * expect to find the lower downclock.
+ */
+ temp_downclock = scan->clock;
+ tmp_mode = scan;
+ }
+ }
+ }
+
+ if (temp_downclock < fixed_mode->clock)
+ return drm_mode_duplicate(dev, tmp_mode);
+ else
+ return NULL;
+}
+
+/* Set up chip specific backlight functions */
+void intel_panel_init_backlight_funcs(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_BROADWELL(dev)) {
+ dev_priv->display.setup_backlight = bdw_setup_backlight;
+ dev_priv->display.enable_backlight = bdw_enable_backlight;
+ dev_priv->display.disable_backlight = pch_disable_backlight;
+ dev_priv->display.set_backlight = bdw_set_backlight;
+ dev_priv->display.get_backlight = bdw_get_backlight;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.setup_backlight = pch_setup_backlight;
+ dev_priv->display.enable_backlight = pch_enable_backlight;
+ dev_priv->display.disable_backlight = pch_disable_backlight;
+ dev_priv->display.set_backlight = pch_set_backlight;
+ dev_priv->display.get_backlight = pch_get_backlight;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.setup_backlight = vlv_setup_backlight;
+ dev_priv->display.enable_backlight = vlv_enable_backlight;
+ dev_priv->display.disable_backlight = vlv_disable_backlight;
+ dev_priv->display.set_backlight = vlv_set_backlight;
+ dev_priv->display.get_backlight = vlv_get_backlight;
+ } else if (IS_GEN4(dev)) {
+ dev_priv->display.setup_backlight = i965_setup_backlight;
+ dev_priv->display.enable_backlight = i965_enable_backlight;
+ dev_priv->display.disable_backlight = i965_disable_backlight;
+ dev_priv->display.set_backlight = i9xx_set_backlight;
+ dev_priv->display.get_backlight = i9xx_get_backlight;
+ } else {
+ dev_priv->display.setup_backlight = i9xx_setup_backlight;
+ dev_priv->display.enable_backlight = i9xx_enable_backlight;
+ dev_priv->display.disable_backlight = i9xx_disable_backlight;
+ dev_priv->display.set_backlight = i9xx_set_backlight;
+ dev_priv->display.get_backlight = i9xx_get_backlight;
+ }
}
-#endif
int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode)
@@ -851,4 +1213,8 @@ void intel_panel_fini(struct intel_panel *panel)
if (panel->fixed_mode)
drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
+
+ if (panel->downclock_mode)
+ drm_mode_destroy(intel_connector->base.dev,
+ panel->downclock_mode);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 6e0d5e075b15..d77cc81900f9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,7 +30,9 @@
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
+#include <linux/vgaarb.h>
#include <drm/i915_powerwell.h>
+#include <linux/pm_runtime.h>
/**
* RC6 is a special power stage which allows the GPU to enter an very
@@ -86,7 +88,7 @@ static void i8xx_disable_fbc(struct drm_device *dev)
DRM_DEBUG_KMS("disabled FBC\n");
}
-static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void i8xx_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -96,32 +98,40 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch;
int plane, i;
- u32 fbc_ctl, fbc_ctl2;
+ u32 fbc_ctl;
cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
- /* FBC_CTL wants 64B units */
- cfb_pitch = (cfb_pitch / 64) - 1;
+ /* FBC_CTL wants 32B or 64B units */
+ if (IS_GEN2(dev))
+ cfb_pitch = (cfb_pitch / 32) - 1;
+ else
+ cfb_pitch = (cfb_pitch / 64) - 1;
plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG + (i * 4), 0);
- /* Set it up... */
- fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
- fbc_ctl2 |= plane;
- I915_WRITE(FBC_CONTROL2, fbc_ctl2);
- I915_WRITE(FBC_FENCE_OFF, crtc->y);
+ if (IS_GEN4(dev)) {
+ u32 fbc_ctl2;
+
+ /* Set it up... */
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+ fbc_ctl2 |= plane;
+ I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+ I915_WRITE(FBC_FENCE_OFF, crtc->y);
+ }
/* enable it... */
- fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
+ fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
- fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
@@ -136,7 +146,7 @@ static bool i8xx_fbc_enabled(struct drm_device *dev)
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}
-static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void g4x_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -145,16 +155,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
- unsigned long stall_watermark = 200;
u32 dpfc_ctl;
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
- I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
- (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
- (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
/* enable it... */
@@ -191,7 +197,11 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
u32 blt_ecoskpd;
/* Make sure blitter notifies FBC of writes */
- gen6_gt_force_wake_get(dev_priv);
+
+ /* Blitter is part of Media powerwell on VLV. No impact of
+ * his param in other platforms for now */
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
+
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT;
@@ -202,10 +212,11 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
GEN6_BLITTER_LOCK_SHIFT);
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
POSTING_READ(GEN6_BLITTER_ECOSKPD);
- gen6_gt_force_wake_put(dev_priv);
+
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
}
-static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void ironlake_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -214,7 +225,6 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
- unsigned long stall_watermark = 200;
u32 dpfc_ctl;
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
@@ -222,12 +232,11 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
/* Set persistent mode for front-buffer rendering, ala X. */
dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
- dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
+ dpfc_ctl |= DPFC_CTL_FENCE_EN;
+ if (IS_GEN5(dev))
+ dpfc_ctl |= obj->fence_reg;
I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
- I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
- (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
- (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */
@@ -265,7 +274,7 @@ static bool ironlake_fbc_enabled(struct drm_device *dev)
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
-static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void gen7_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -295,7 +304,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
sandybridge_blit_fbc_update(dev);
- DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
bool intel_fbc_enabled(struct drm_device *dev)
@@ -322,8 +331,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
* the prior work.
*/
if (work->crtc->fb == work->fb) {
- dev_priv->display.enable_fbc(work->crtc,
- work->interval);
+ dev_priv->display.enable_fbc(work->crtc);
dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
dev_priv->fbc.fb_id = work->crtc->fb->base.id;
@@ -360,7 +368,7 @@ static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
dev_priv->fbc.fbc_work = NULL;
}
-static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void intel_enable_fbc(struct drm_crtc *crtc)
{
struct intel_fbc_work *work;
struct drm_device *dev = crtc->dev;
@@ -374,13 +382,12 @@ static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL) {
DRM_ERROR("Failed to allocate FBC work structure\n");
- dev_priv->display.enable_fbc(crtc, interval);
+ dev_priv->display.enable_fbc(crtc);
return;
}
work->crtc = crtc;
work->fb = crtc->fb;
- work->interval = interval;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
dev_priv->fbc.fbc_work = work;
@@ -454,7 +461,7 @@ void intel_update_fbc(struct drm_device *dev)
const struct drm_display_mode *adjusted_mode;
unsigned int max_width, max_height;
- if (!I915_HAS_FBC(dev)) {
+ if (!HAS_FBC(dev)) {
set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
return;
}
@@ -530,10 +537,10 @@ void intel_update_fbc(struct drm_device *dev)
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
goto out_disable;
}
- if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
- intel_crtc->plane != 0) {
+ if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) &&
+ intel_crtc->plane != PLANE_A) {
if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
- DRM_DEBUG_KMS("plane not 0, disabling compression\n");
+ DRM_DEBUG_KMS("plane not A, disabling compression\n");
goto out_disable;
}
@@ -595,7 +602,7 @@ void intel_update_fbc(struct drm_device *dev)
intel_disable_fbc(dev);
}
- intel_enable_fbc(crtc, 500);
+ intel_enable_fbc(crtc);
dev_priv->fbc.no_fbc_reason = FBC_OK;
return;
@@ -817,7 +824,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
-static int i85x_get_fifo_size(struct drm_device *dev, int plane)
+static int i830_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
@@ -850,21 +857,6 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
-static int i830_get_fifo_size(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- size >>= 1; /* Convert to cachelines */
-
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
-
- return size;
-}
-
/* Pineview has different values for various configs */
static const struct intel_watermark_params pineview_display_wm = {
PINEVIEW_DISPLAY_FIFO,
@@ -943,14 +935,14 @@ static const struct intel_watermark_params i915_wm_info = {
2,
I915_FIFO_LINE_SIZE
};
-static const struct intel_watermark_params i855_wm_info = {
+static const struct intel_watermark_params i830_wm_info = {
I855GM_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I830_FIFO_LINE_SIZE
};
-static const struct intel_watermark_params i830_wm_info = {
+static const struct intel_watermark_params i845_wm_info = {
I830_FIFO_SIZE,
I915_MAX_WM,
1,
@@ -958,65 +950,6 @@ static const struct intel_watermark_params i830_wm_info = {
I830_FIFO_LINE_SIZE
};
-static const struct intel_watermark_params ironlake_display_wm_info = {
- ILK_DISPLAY_FIFO,
- ILK_DISPLAY_MAXWM,
- ILK_DISPLAY_DFTWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_cursor_wm_info = {
- ILK_CURSOR_FIFO,
- ILK_CURSOR_MAXWM,
- ILK_CURSOR_DFTWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_display_srwm_info = {
- ILK_DISPLAY_SR_FIFO,
- ILK_DISPLAY_MAX_SRWM,
- ILK_DISPLAY_DFT_SRWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_cursor_srwm_info = {
- ILK_CURSOR_SR_FIFO,
- ILK_CURSOR_MAX_SRWM,
- ILK_CURSOR_DFT_SRWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-
-static const struct intel_watermark_params sandybridge_display_wm_info = {
- SNB_DISPLAY_FIFO,
- SNB_DISPLAY_MAXWM,
- SNB_DISPLAY_DFTWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_cursor_wm_info = {
- SNB_CURSOR_FIFO,
- SNB_CURSOR_MAXWM,
- SNB_CURSOR_DFTWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_display_srwm_info = {
- SNB_DISPLAY_SR_FIFO,
- SNB_DISPLAY_MAX_SRWM,
- SNB_DISPLAY_DFT_SRWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
- SNB_CURSOR_SR_FIFO,
- SNB_CURSOR_MAX_SRWM,
- SNB_CURSOR_DFT_SRWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-
-
/**
* intel_calculate_wm - calculate watermark level
* @clock_in_khz: pixel clock
@@ -1567,7 +1500,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
else if (!IS_GEN2(dev))
wm_info = &i915_wm_info;
else
- wm_info = &i855_wm_info;
+ wm_info = &i830_wm_info;
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
@@ -1615,7 +1548,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+ I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
/* Calc sr entries for one plane configs */
if (HAS_FW_BLC(dev) && enabled) {
@@ -1667,14 +1600,14 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(FW_BLC_SELF,
FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
DRM_DEBUG_KMS("memory self refresh enabled\n");
} else
DRM_DEBUG_KMS("memory self refresh disabled\n");
}
}
-static void i830_update_wm(struct drm_crtc *unused_crtc)
+static void i845_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1689,7 +1622,7 @@ static void i830_update_wm(struct drm_crtc *unused_crtc)
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
- &i830_wm_info,
+ &i845_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
4, latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -1700,423 +1633,6 @@ static void i830_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(FW_BLC, fwater_lo);
}
-/*
- * Check the wm result.
- *
- * If any calculated watermark values is larger than the maximum value that
- * can be programmed into the associated watermark register, that watermark
- * must be disabled.
- */
-static bool ironlake_check_srwm(struct drm_device *dev, int level,
- int fbc_wm, int display_wm, int cursor_wm,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
- " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
-
- if (fbc_wm > SNB_FBC_MAX_SRWM) {
- DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
- fbc_wm, SNB_FBC_MAX_SRWM, level);
-
- /* fbc has it's own way to disable FBC WM */
- I915_WRITE(DISP_ARB_CTL,
- I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
- return false;
- } else if (INTEL_INFO(dev)->gen >= 6) {
- /* enable FBC WM (except on ILK, where it must remain off) */
- I915_WRITE(DISP_ARB_CTL,
- I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
- }
-
- if (display_wm > display->max_wm) {
- DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
- display_wm, SNB_DISPLAY_MAX_SRWM, level);
- return false;
- }
-
- if (cursor_wm > cursor->max_wm) {
- DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
- cursor_wm, SNB_CURSOR_MAX_SRWM, level);
- return false;
- }
-
- if (!(fbc_wm || display_wm || cursor_wm)) {
- DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
- return false;
- }
-
- return true;
-}
-
-/*
- * Compute watermark values of WM[1-3],
- */
-static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
- int latency_ns,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor,
- int *fbc_wm, int *display_wm, int *cursor_wm)
-{
- struct drm_crtc *crtc;
- const struct drm_display_mode *adjusted_mode;
- unsigned long line_time_us;
- int hdisplay, htotal, pixel_size, clock;
- int line_count, line_size;
- int small, large;
- int entries;
-
- if (!latency_ns) {
- *fbc_wm = *display_wm = *cursor_wm = 0;
- return false;
- }
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
- clock = adjusted_mode->crtc_clock;
- htotal = adjusted_mode->crtc_htotal;
- hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
- pixel_size = crtc->fb->bits_per_pixel / 8;
-
- line_time_us = (htotal * 1000) / clock;
- line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = hdisplay * pixel_size;
-
- /* Use the minimum of the small and large buffer method for primary */
- small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
- large = line_count * line_size;
-
- entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
- *display_wm = entries + display->guard_size;
-
- /*
- * Spec says:
- * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
- */
- *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
-
- /* calculate the self-refresh watermark for display cursor */
- entries = line_count * pixel_size * 64;
- entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
- *cursor_wm = entries + cursor->guard_size;
-
- return ironlake_check_srwm(dev, level,
- *fbc_wm, *display_wm, *cursor_wm,
- display, cursor);
-}
-
-static void ironlake_update_wm(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int fbc_wm, plane_wm, cursor_wm;
- unsigned int enabled;
-
- enabled = 0;
- if (g4x_compute_wm0(dev, PIPE_A,
- &ironlake_display_wm_info,
- dev_priv->wm.pri_latency[0] * 100,
- &ironlake_cursor_wm_info,
- dev_priv->wm.cur_latency[0] * 100,
- &plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEA_ILK,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
- DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
- " plane %d, " "cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1 << PIPE_A;
- }
-
- if (g4x_compute_wm0(dev, PIPE_B,
- &ironlake_display_wm_info,
- dev_priv->wm.pri_latency[0] * 100,
- &ironlake_cursor_wm_info,
- dev_priv->wm.cur_latency[0] * 100,
- &plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEB_ILK,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
- DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1 << PIPE_B;
- }
-
- /*
- * Calculate and update the self-refresh watermark only when one
- * display plane is used.
- */
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- if (!single_plane_enabled(enabled))
- return;
- enabled = ffs(enabled) - 1;
-
- /* WM1 */
- if (!ironlake_compute_srwm(dev, 1, enabled,
- dev_priv->wm.pri_latency[1] * 500,
- &ironlake_display_srwm_info,
- &ironlake_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM1_LP_ILK,
- WM1_LP_SR_EN |
- (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM2 */
- if (!ironlake_compute_srwm(dev, 2, enabled,
- dev_priv->wm.pri_latency[2] * 500,
- &ironlake_display_srwm_info,
- &ironlake_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM2_LP_ILK,
- WM2_LP_EN |
- (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /*
- * WM3 is unsupported on ILK, probably because we don't have latency
- * data for that power state
- */
-}
-
-static void sandybridge_update_wm(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
- u32 val;
- int fbc_wm, plane_wm, cursor_wm;
- unsigned int enabled;
-
- enabled = 0;
- if (g4x_compute_wm0(dev, PIPE_A,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEA_ILK);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEA_ILK, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
- " plane %d, " "cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1 << PIPE_A;
- }
-
- if (g4x_compute_wm0(dev, PIPE_B,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEB_ILK);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEB_ILK, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1 << PIPE_B;
- }
-
- /*
- * Calculate and update the self-refresh watermark only when one
- * display plane is used.
- *
- * SNB support 3 levels of watermark.
- *
- * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
- * and disabled in the descending order
- *
- */
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- if (!single_plane_enabled(enabled) ||
- dev_priv->sprite_scaling_enabled)
- return;
- enabled = ffs(enabled) - 1;
-
- /* WM1 */
- if (!ironlake_compute_srwm(dev, 1, enabled,
- dev_priv->wm.pri_latency[1] * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM1_LP_ILK,
- WM1_LP_SR_EN |
- (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM2 */
- if (!ironlake_compute_srwm(dev, 2, enabled,
- dev_priv->wm.pri_latency[2] * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM2_LP_ILK,
- WM2_LP_EN |
- (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM3 */
- if (!ironlake_compute_srwm(dev, 3, enabled,
- dev_priv->wm.pri_latency[3] * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM3_LP_ILK,
- WM3_LP_EN |
- (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-}
-
-static void ivybridge_update_wm(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
- u32 val;
- int fbc_wm, plane_wm, cursor_wm;
- int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
- unsigned int enabled;
-
- enabled = 0;
- if (g4x_compute_wm0(dev, PIPE_A,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEA_ILK);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEA_ILK, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
- " plane %d, " "cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1 << PIPE_A;
- }
-
- if (g4x_compute_wm0(dev, PIPE_B,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEB_ILK);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEB_ILK, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1 << PIPE_B;
- }
-
- if (g4x_compute_wm0(dev, PIPE_C,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEC_IVB);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEC_IVB, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1 << PIPE_C;
- }
-
- /*
- * Calculate and update the self-refresh watermark only when one
- * display plane is used.
- *
- * SNB support 3 levels of watermark.
- *
- * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
- * and disabled in the descending order
- *
- */
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- if (!single_plane_enabled(enabled) ||
- dev_priv->sprite_scaling_enabled)
- return;
- enabled = ffs(enabled) - 1;
-
- /* WM1 */
- if (!ironlake_compute_srwm(dev, 1, enabled,
- dev_priv->wm.pri_latency[1] * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM1_LP_ILK,
- WM1_LP_SR_EN |
- (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM2 */
- if (!ironlake_compute_srwm(dev, 2, enabled,
- dev_priv->wm.pri_latency[2] * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM2_LP_ILK,
- WM2_LP_EN |
- (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM3, note we have to correct the cursor latency */
- if (!ironlake_compute_srwm(dev, 3, enabled,
- dev_priv->wm.pri_latency[3] * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
- !ironlake_compute_srwm(dev, 3, enabled,
- dev_priv->wm.cur_latency[3] * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM3_LP_ILK,
- WM3_LP_EN |
- (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-}
-
static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
struct drm_crtc *crtc)
{
@@ -2185,7 +1701,7 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
}
-struct hsw_pipe_wm_parameters {
+struct ilk_pipe_wm_parameters {
bool active;
uint32_t pipe_htotal;
uint32_t pixel_rate;
@@ -2194,7 +1710,7 @@ struct hsw_pipe_wm_parameters {
struct intel_plane_wm_parameters cur;
};
-struct hsw_wm_maximums {
+struct ilk_wm_maximums {
uint16_t pri;
uint16_t spr;
uint16_t cur;
@@ -2212,7 +1728,7 @@ struct intel_wm_config {
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
uint32_t mem_value,
bool is_lp)
{
@@ -2241,7 +1757,7 @@ static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
uint32_t mem_value)
{
uint32_t method1, method2;
@@ -2264,7 +1780,7 @@ static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
uint32_t mem_value)
{
if (!params->active || !params->cur.enabled)
@@ -2278,7 +1794,7 @@ static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
}
/* Only for WM_LP. */
-static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
uint32_t pri_val)
{
if (!params->active || !params->pri.enabled)
@@ -2383,7 +1899,7 @@ static void ilk_compute_wm_maximums(struct drm_device *dev,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
- struct hsw_wm_maximums *max)
+ struct ilk_wm_maximums *max)
{
max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
@@ -2392,7 +1908,7 @@ static void ilk_compute_wm_maximums(struct drm_device *dev,
}
static bool ilk_validate_wm_level(int level,
- const struct hsw_wm_maximums *max,
+ const struct ilk_wm_maximums *max,
struct intel_wm_level *result)
{
bool ret;
@@ -2434,7 +1950,7 @@ static bool ilk_validate_wm_level(int level,
static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
int level,
- const struct hsw_pipe_wm_parameters *p,
+ const struct ilk_pipe_wm_parameters *p,
struct intel_wm_level *result)
{
uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2482,7 +1998,7 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
wm[0] = (sskpd >> 56) & 0xFF;
@@ -2530,7 +2046,7 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
static int ilk_wm_max_level(const struct drm_device *dev)
{
/* how many WM levels are we expecting */
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
return 4;
else if (INTEL_INFO(dev)->gen >= 6)
return 3;
@@ -2582,8 +2098,8 @@ static void intel_setup_wm_latency(struct drm_device *dev)
intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
}
-static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
- struct hsw_pipe_wm_parameters *p,
+static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
+ struct ilk_pipe_wm_parameters *p,
struct intel_wm_config *config)
{
struct drm_device *dev = crtc->dev;
@@ -2593,7 +2109,7 @@ static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
p->active = intel_crtc_active(crtc);
if (p->active) {
- p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
+ p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
p->cur.bytes_per_pixel = 4;
@@ -2620,7 +2136,7 @@ static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
/* Compute new watermarks for the pipe */
static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
- const struct hsw_pipe_wm_parameters *params,
+ const struct ilk_pipe_wm_parameters *params,
struct intel_pipe_wm *pipe_wm)
{
struct drm_device *dev = crtc->dev;
@@ -2632,16 +2148,25 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
.sprites_enabled = params->spr.enabled,
.sprites_scaled = params->spr.scaled,
};
- struct hsw_wm_maximums max;
+ struct ilk_wm_maximums max;
/* LP0 watermarks always use 1/2 DDB partitioning */
ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
+ /* ILK/SNB: LP2+ watermarks only w/o sprites */
+ if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
+ max_level = 1;
+
+ /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
+ if (params->spr.scaled)
+ max_level = 0;
+
for (level = 0; level <= max_level; level++)
ilk_compute_wm_level(dev_priv, level, params,
&pipe_wm->wm[level]);
- pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
/* At least LP0 must be valid */
return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
@@ -2676,12 +2201,19 @@ static void ilk_merge_wm_level(struct drm_device *dev,
* Merge all low power watermarks for all active pipes.
*/
static void ilk_wm_merge(struct drm_device *dev,
- const struct hsw_wm_maximums *max,
+ const struct intel_wm_config *config,
+ const struct ilk_wm_maximums *max,
struct intel_pipe_wm *merged)
{
int level, max_level = ilk_wm_max_level(dev);
- merged->fbc_wm_enabled = true;
+ /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
+ if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
+ config->num_pipes_active > 1)
+ return;
+
+ /* ILK: FBC WM must be disabled always */
+ merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
/* merge each WM1+ level */
for (level = 1; level <= max_level; level++) {
@@ -2701,6 +2233,20 @@ static void ilk_wm_merge(struct drm_device *dev,
wm->fbc_val = 0;
}
}
+
+ /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
+ /*
+ * FIXME this is racy. FBC might get enabled later.
+ * What we should check here is whether FBC can be
+ * enabled sometime later.
+ */
+ if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
+ for (level = 2; level <= max_level; level++) {
+ struct intel_wm_level *wm = &merged->wm[level];
+
+ wm->enable = false;
+ }
+ }
}
static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
@@ -2709,10 +2255,21 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
}
-static void hsw_compute_wm_results(struct drm_device *dev,
+/* The value we need to program into the WM_LPx latency field */
+static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ return 2 * level;
+ else
+ return dev_priv->wm.pri_latency[level];
+}
+
+static void ilk_compute_wm_results(struct drm_device *dev,
const struct intel_pipe_wm *merged,
enum intel_ddb_partitioning partitioning,
- struct hsw_wm_values *results)
+ struct ilk_wm_values *results)
{
struct intel_crtc *intel_crtc;
int level, wm_lp;
@@ -2731,7 +2288,7 @@ static void hsw_compute_wm_results(struct drm_device *dev,
break;
results->wm_lp[wm_lp - 1] = WM3_LP_EN |
- ((level * 2) << WM1_LP_LATENCY_SHIFT) |
+ (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
(r->pri_val << WM1_LP_SR_SHIFT) |
r->cur_val;
@@ -2742,7 +2299,11 @@ static void hsw_compute_wm_results(struct drm_device *dev,
results->wm_lp[wm_lp - 1] |=
r->fbc_val << WM1_LP_FBC_SHIFT;
- results->wm_lp_spr[wm_lp - 1] = r->spr_val;
+ if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
+ WARN_ON(wm_lp != 1);
+ results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
+ } else
+ results->wm_lp_spr[wm_lp - 1] = r->spr_val;
}
/* LP0 register values */
@@ -2765,7 +2326,7 @@ static void hsw_compute_wm_results(struct drm_device *dev,
/* Find the result with the highest level enabled. Check for enable_fbc_wm in
* case both are at the same level. Prefer r1 in case they're the same. */
-static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
+static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
struct intel_pipe_wm *r1,
struct intel_pipe_wm *r2)
{
@@ -2800,8 +2361,8 @@ static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
#define WM_DIRTY_DDB (1 << 25)
static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
- const struct hsw_wm_values *old,
- const struct hsw_wm_values *new)
+ const struct ilk_wm_values *old,
+ const struct ilk_wm_values *new)
{
unsigned int dirty = 0;
enum pipe pipe;
@@ -2851,27 +2412,53 @@ static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
return dirty;
}
+static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
+ unsigned int dirty)
+{
+ struct ilk_wm_values *previous = &dev_priv->wm.hw;
+ bool changed = false;
+
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
+ previous->wm_lp[2] &= ~WM1_LP_SR_EN;
+ I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
+ changed = true;
+ }
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
+ previous->wm_lp[1] &= ~WM1_LP_SR_EN;
+ I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
+ changed = true;
+ }
+ if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
+ previous->wm_lp[0] &= ~WM1_LP_SR_EN;
+ I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
+ changed = true;
+ }
+
+ /*
+ * Don't touch WM1S_LP_EN here.
+ * Doing so could cause underruns.
+ */
+
+ return changed;
+}
+
/*
* The spec says we shouldn't write when we don't need, because every write
* causes WMs to be re-evaluated, expending some power.
*/
-static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
- struct hsw_wm_values *results)
+static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
+ struct ilk_wm_values *results)
{
- struct hsw_wm_values *previous = &dev_priv->wm.hw;
+ struct drm_device *dev = dev_priv->dev;
+ struct ilk_wm_values *previous = &dev_priv->wm.hw;
unsigned int dirty;
uint32_t val;
- dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results);
+ dirty = ilk_compute_wm_dirty(dev, previous, results);
if (!dirty)
return;
- if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0)
- I915_WRITE(WM3_LP_ILK, 0);
- if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0)
- I915_WRITE(WM2_LP_ILK, 0);
- if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0)
- I915_WRITE(WM1_LP_ILK, 0);
+ _ilk_disable_lp_wm(dev_priv, dirty);
if (dirty & WM_DIRTY_PIPE(PIPE_A))
I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
@@ -2888,12 +2475,21 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
if (dirty & WM_DIRTY_DDB) {
- val = I915_READ(WM_MISC);
- if (results->partitioning == INTEL_DDB_PART_1_2)
- val &= ~WM_MISC_DATA_PARTITION_5_6;
- else
- val |= WM_MISC_DATA_PARTITION_5_6;
- I915_WRITE(WM_MISC, val);
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ val = I915_READ(WM_MISC);
+ if (results->partitioning == INTEL_DDB_PART_1_2)
+ val &= ~WM_MISC_DATA_PARTITION_5_6;
+ else
+ val |= WM_MISC_DATA_PARTITION_5_6;
+ I915_WRITE(WM_MISC, val);
+ } else {
+ val = I915_READ(DISP_ARB_CTL2);
+ if (results->partitioning == INTEL_DDB_PART_1_2)
+ val &= ~DISP_DATA_PARTITION_5_6;
+ else
+ val |= DISP_DATA_PARTITION_5_6;
+ I915_WRITE(DISP_ARB_CTL2, val);
+ }
}
if (dirty & WM_DIRTY_FBC) {
@@ -2905,37 +2501,48 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
I915_WRITE(DISP_ARB_CTL, val);
}
- if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0])
+ if (dirty & WM_DIRTY_LP(1) &&
+ previous->wm_lp_spr[0] != results->wm_lp_spr[0])
I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
- if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
- I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
- if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
- I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
- if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0)
+ if (INTEL_INFO(dev)->gen >= 7) {
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
+ I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
+ I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
+ }
+
+ if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
- if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0)
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
- if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0)
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
dev_priv->wm.hw = *results;
}
-static void haswell_update_wm(struct drm_crtc *crtc)
+static bool ilk_disable_lp_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
+}
+
+static void ilk_update_wm(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct hsw_wm_maximums max;
- struct hsw_pipe_wm_parameters params = {};
- struct hsw_wm_values results = {};
+ struct ilk_wm_maximums max;
+ struct ilk_pipe_wm_parameters params = {};
+ struct ilk_wm_values results = {};
enum intel_ddb_partitioning partitioning;
struct intel_pipe_wm pipe_wm = {};
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct intel_wm_config config = {};
- hsw_compute_wm_parameters(crtc, &params, &config);
+ ilk_compute_wm_parameters(crtc, &params, &config);
intel_compute_pipe_wm(crtc, &params, &pipe_wm);
@@ -2945,15 +2552,15 @@ static void haswell_update_wm(struct drm_crtc *crtc)
intel_crtc->wm.active = pipe_wm;
ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
- ilk_wm_merge(dev, &max, &lp_wm_1_2);
+ ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
if (INTEL_INFO(dev)->gen >= 7 &&
config.num_pipes_active == 1 && config.sprites_enabled) {
ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
- ilk_wm_merge(dev, &max, &lp_wm_5_6);
+ ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
- best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
+ best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
} else {
best_lp_wm = &lp_wm_1_2;
}
@@ -2961,16 +2568,17 @@ static void haswell_update_wm(struct drm_crtc *crtc)
partitioning = (best_lp_wm == &lp_wm_1_2) ?
INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
- hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results);
+ ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
- hsw_write_wm_values(dev_priv, &results);
+ ilk_write_wm_values(dev_priv, &results);
}
-static void haswell_update_sprite_wm(struct drm_plane *plane,
+static void ilk_update_sprite_wm(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
bool enabled, bool scaled)
{
+ struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
intel_plane->wm.enabled = enabled;
@@ -2978,176 +2586,24 @@ static void haswell_update_sprite_wm(struct drm_plane *plane,
intel_plane->wm.horiz_pixels = sprite_width;
intel_plane->wm.bytes_per_pixel = pixel_size;
- haswell_update_wm(crtc);
-}
-
-static bool
-sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
- uint32_t sprite_width, int pixel_size,
- const struct intel_watermark_params *display,
- int display_latency_ns, int *sprite_wm)
-{
- struct drm_crtc *crtc;
- int clock;
- int entries, tlb_miss;
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- if (!intel_crtc_active(crtc)) {
- *sprite_wm = display->guard_size;
- return false;
- }
-
- clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
-
- /* Use the small buffer method to calculate the sprite watermark */
- entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
- tlb_miss = display->fifo_size*display->cacheline_size -
- sprite_width * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, display->cacheline_size);
- *sprite_wm = entries + display->guard_size;
- if (*sprite_wm > (int)display->max_wm)
- *sprite_wm = display->max_wm;
-
- return true;
-}
-
-static bool
-sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
- uint32_t sprite_width, int pixel_size,
- const struct intel_watermark_params *display,
- int latency_ns, int *sprite_wm)
-{
- struct drm_crtc *crtc;
- unsigned long line_time_us;
- int clock;
- int line_count, line_size;
- int small, large;
- int entries;
-
- if (!latency_ns) {
- *sprite_wm = 0;
- return false;
- }
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
- if (!clock) {
- *sprite_wm = 0;
- return false;
- }
-
- line_time_us = (sprite_width * 1000) / clock;
- if (!line_time_us) {
- *sprite_wm = 0;
- return false;
- }
-
- line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = sprite_width * pixel_size;
-
- /* Use the minimum of the small and large buffer method for primary */
- small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
- large = line_count * line_size;
-
- entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
- *sprite_wm = entries + display->guard_size;
-
- return *sprite_wm > 0x3ff ? false : true;
-}
-
-static void sandybridge_update_sprite_wm(struct drm_plane *plane,
- struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
- bool enabled, bool scaled)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe = to_intel_plane(plane)->pipe;
- int latency = dev_priv->wm.spr_latency[0] * 100; /* In unit 0.1us */
- u32 val;
- int sprite_wm, reg;
- int ret;
-
- if (!enabled)
- return;
-
- switch (pipe) {
- case 0:
- reg = WM0_PIPEA_ILK;
- break;
- case 1:
- reg = WM0_PIPEB_ILK;
- break;
- case 2:
- reg = WM0_PIPEC_IVB;
- break;
- default:
- return; /* bad pipe */
- }
-
- ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
- &sandybridge_display_wm_info,
- latency, &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
- pipe_name(pipe));
- return;
- }
-
- val = I915_READ(reg);
- val &= ~WM0_PIPE_SPRITE_MASK;
- I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
- DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm);
-
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- dev_priv->wm.spr_latency[1] * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
- pipe_name(pipe));
- return;
- }
- I915_WRITE(WM1S_LP_ILK, sprite_wm);
-
- /* Only IVB has two more LP watermarks for sprite */
- if (!IS_IVYBRIDGE(dev))
- return;
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- dev_priv->wm.spr_latency[2] * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
- pipe_name(pipe));
- return;
- }
- I915_WRITE(WM2S_LP_IVB, sprite_wm);
+ /*
+ * IVB workaround: must disable low power watermarks for at least
+ * one frame before enabling scaling. LP watermarks can be re-enabled
+ * when scaling is disabled.
+ *
+ * WaCxSRDisabledForSpriteScaling:ivb
+ */
+ if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
+ intel_wait_for_vblank(dev, intel_plane->pipe);
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- dev_priv->wm.spr_latency[3] * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
- pipe_name(pipe));
- return;
- }
- I915_WRITE(WM3S_LP_IVB, sprite_wm);
+ ilk_update_wm(crtc);
}
static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct hsw_wm_values *hw = &dev_priv->wm.hw;
+ struct ilk_wm_values *hw = &dev_priv->wm.hw;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_pipe_wm *active = &intel_crtc->wm.active;
enum pipe pipe = intel_crtc->pipe;
@@ -3158,7 +2614,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
};
hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
- hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
if (intel_crtc_active(crtc)) {
u32 tmp = hw->wm_pipe[pipe];
@@ -3190,7 +2647,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
void ilk_wm_get_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct hsw_wm_values *hw = &dev_priv->wm.hw;
+ struct ilk_wm_values *hw = &dev_priv->wm.hw;
struct drm_crtc *crtc;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
@@ -3204,8 +2661,12 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
- hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
- INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+ INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+ else if (IS_IVYBRIDGE(dev))
+ hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
+ INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
hw->enable_fbc_wm =
!(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
@@ -3430,26 +2891,19 @@ static void ironlake_disable_drps(struct drm_device *dev)
* ourselves, instead of doing a rmw cycle (which might result in us clearing
* all limits and the gpu stuck at whatever frequency it is at atm).
*/
-static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
+static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
{
u32 limits;
- limits = 0;
-
- if (*val >= dev_priv->rps.max_delay)
- *val = dev_priv->rps.max_delay;
- limits |= dev_priv->rps.max_delay << 24;
-
/* Only set the down limit when we've reached the lowest level to avoid
* getting more interrupts, otherwise leave this clear. This prevents a
* race in the hw when coming out of rc6: There's a tiny window where
* the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
- if (*val <= dev_priv->rps.min_delay) {
- *val = dev_priv->rps.min_delay;
+ limits = dev_priv->rps.max_delay << 24;
+ if (val <= dev_priv->rps.min_delay)
limits |= dev_priv->rps.min_delay << 16;
- }
return limits;
}
@@ -3549,7 +3003,6 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 limits = gen6_rps_limits(dev_priv, &val);
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_delay);
@@ -3572,7 +3025,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
/* Make sure we continue to get interrupts
* until we hit the minimum or maximum frequencies.
*/
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ gen6_rps_limits(dev_priv, val));
POSTING_READ(GEN6_RPNSWREQ);
@@ -3583,9 +3037,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
void gen6_rps_idle(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
+
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (dev_priv->info->is_valleyview)
+ if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
@@ -3596,9 +3052,11 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
void gen6_rps_boost(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
+
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (dev_priv->info->is_valleyview)
+ if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
@@ -3607,48 +3065,18 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->rps.hw_lock);
}
-/*
- * Wait until the previous freq change has completed,
- * or the timeout elapsed, and then update our notion
- * of the current GPU frequency.
- */
-static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
-{
- u32 pval;
-
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
- if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
- DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
-
- pval >>= 8;
-
- if (pval != dev_priv->rps.cur_delay)
- DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
- vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
- dev_priv->rps.cur_delay,
- vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
-
- dev_priv->rps.cur_delay = pval;
-}
-
void valleyview_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- gen6_rps_limits(dev_priv, &val);
-
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_delay);
WARN_ON(val < dev_priv->rps.min_delay);
- vlv_update_rps_cur_delay(dev_priv);
-
DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.cur_delay),
+ vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
dev_priv->rps.cur_delay,
- vlv_gpu_freq(dev_priv->mem_freq, val), val);
+ vlv_gpu_freq(dev_priv, val), val);
if (val == dev_priv->rps.cur_delay)
return;
@@ -3657,7 +3085,7 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
dev_priv->rps.cur_delay = val;
- trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
+ trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
}
static void gen6_disable_rps_interrupts(struct drm_device *dev)
@@ -3775,7 +3203,7 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 1c & 1d: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- gen6_gt_force_wake_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -3832,7 +3260,7 @@ static void gen8_enable_rps(struct drm_device *dev)
gen6_enable_rps_interrupts(dev);
- gen6_gt_force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
static void gen6_enable_rps(struct drm_device *dev)
@@ -3862,7 +3290,7 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GTFIFODBG, gtfifodbg);
}
- gen6_gt_force_wake_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
@@ -3954,7 +3382,7 @@ static void gen6_enable_rps(struct drm_device *dev)
DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
}
- gen6_gt_force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
void gen6_update_ring_freq(struct drm_device *dev)
@@ -4116,7 +3544,8 @@ static void valleyview_enable_rps(struct drm_device *dev)
valleyview_setup_pctx(dev);
- gen6_gt_force_wake_get(dev_priv);
+ /* If VLV, Forcewake all wells, else re-direct to regular path */
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
@@ -4140,7 +3569,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
- I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
/* allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
@@ -4148,65 +3577,47 @@ static void valleyview_enable_rps(struct drm_device *dev)
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
- rc6_mode = GEN7_RC_CTL_TO_MODE;
+ rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
intel_print_rc6_info(dev, rc6_mode);
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- switch ((val >> 6) & 3) {
- case 0:
- case 1:
- dev_priv->mem_freq = 800;
- break;
- case 2:
- dev_priv->mem_freq = 1066;
- break;
- case 3:
- dev_priv->mem_freq = 1333;
- break;
- }
- DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
dev_priv->rps.cur_delay = (val >> 8) & 0xff;
DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.cur_delay),
+ vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
dev_priv->rps.cur_delay);
dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
dev_priv->rps.hw_max = dev_priv->rps.max_delay;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.max_delay),
+ vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay),
dev_priv->rps.max_delay);
dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.rpe_delay),
+ vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
dev_priv->rps.rpe_delay);
dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.min_delay),
+ vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay),
dev_priv->rps.min_delay);
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.rpe_delay),
+ vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
dev_priv->rps.rpe_delay);
valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
gen6_enable_rps_interrupts(dev);
- gen6_gt_force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
void ironlake_teardown_rc6(struct drm_device *dev)
@@ -5019,6 +4430,20 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
}
}
+static void ilk_init_lp_watermarks(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
+ I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
+ I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+
+ /*
+ * Don't touch WM1S_LP_EN here.
+ * Doing so could cause underruns.
+ */
+}
+
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5052,9 +4477,8 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
+
+ ilk_init_lp_watermarks(dev);
/*
* Based on the document from hardware guys the following bits
@@ -5161,9 +4585,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_GT_MODE,
_MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
+ ilk_init_lp_watermarks(dev);
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
@@ -5304,28 +4726,40 @@ static void gen8_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
- /* WaSwitchSolVfFArbitrationPriority */
+ /* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
- /* WaPsrDPAMaskVBlankInSRD */
+ /* WaPsrDPAMaskVBlankInSRD:bdw */
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
- /* WaPsrDPRSUnmaskVBlankInSRD */
+ /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
for_each_pipe(i) {
I915_WRITE(CHICKEN_PIPESL_1(i),
I915_READ(CHICKEN_PIPESL_1(i) |
DPRS_MASK_VBLANK_SRD));
}
+
+ /* Use Force Non-Coherent whenever executing a 3D context. This is a
+ * workaround for for a possible hang in the unlikely event a TLB
+ * invalidation occurs during a PSD flush.
+ */
+ I915_WRITE(HDC_CHICKEN0,
+ I915_READ(HDC_CHICKEN0) |
+ _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
+
+ /* WaVSRefCountFullforceMissDisable:bdw */
+ /* WaDSRefCountFullforceMissDisable:bdw */
+ I915_WRITE(GEN7_FF_THREAD_MODE,
+ I915_READ(GEN7_FF_THREAD_MODE) &
+ ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
}
static void haswell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
+ ilk_init_lp_watermarks(dev);
/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:hsw workaround.
@@ -5374,9 +4808,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t snpcr;
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
+ ilk_init_lp_watermarks(dev);
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
@@ -5463,6 +4895,26 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ switch ((val >> 6) & 3) {
+ case 0:
+ dev_priv->mem_freq = 800;
+ break;
+ case 1:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 2:
+ dev_priv->mem_freq = 1333;
+ break;
+ case 3:
+ dev_priv->mem_freq = 1333;
+ break;
+ }
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
@@ -5642,51 +5094,137 @@ void intel_suspend_hw(struct drm_device *dev)
lpt_suspend_hw(dev);
}
-static bool is_always_on_power_domain(struct drm_device *dev,
- enum intel_display_power_domain domain)
-{
- unsigned long always_on_domains;
+#define for_each_power_well(i, power_well, domain_mask, power_domains) \
+ for (i = 0; \
+ i < (power_domains)->power_well_count && \
+ ((power_well) = &(power_domains)->power_wells[i]); \
+ i++) \
+ if ((power_well)->domains & (domain_mask))
- BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
-
- if (IS_BROADWELL(dev)) {
- always_on_domains = BDW_ALWAYS_ON_POWER_DOMAINS;
- } else if (IS_HASWELL(dev)) {
- always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
- } else {
- WARN_ON(1);
- return true;
- }
-
- return BIT(domain) & always_on_domains;
-}
+#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
+ for (i = (power_domains)->power_well_count - 1; \
+ i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
+ i--) \
+ if ((power_well)->domains & (domain_mask))
/**
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
* be enabled.
*/
+static bool hsw_power_well_enabled(struct drm_device *dev,
+ struct i915_power_well *power_well)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(HSW_PWR_WELL_DRIVER) ==
+ (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
+}
+
+bool intel_display_power_enabled_sw(struct drm_device *dev,
+ enum intel_display_power_domain domain)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_power_domains *power_domains;
+
+ power_domains = &dev_priv->power_domains;
+
+ return power_domains->domain_use_count[domain];
+}
+
bool intel_display_power_enabled(struct drm_device *dev,
enum intel_display_power_domain domain)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_power_domains *power_domains;
+ struct i915_power_well *power_well;
+ bool is_enabled;
+ int i;
- if (!HAS_POWER_WELL(dev))
- return true;
+ power_domains = &dev_priv->power_domains;
- if (is_always_on_power_domain(dev, domain))
- return true;
+ is_enabled = true;
- return I915_READ(HSW_PWR_WELL_DRIVER) ==
- (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
+ mutex_lock(&power_domains->lock);
+ for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+ if (power_well->always_on)
+ continue;
+
+ if (!power_well->is_enabled(dev, power_well)) {
+ is_enabled = false;
+ break;
+ }
+ }
+ mutex_unlock(&power_domains->lock);
+
+ return is_enabled;
}
-static void __intel_set_power_well(struct drm_device *dev, bool enable)
+static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ unsigned long irqflags;
+
+ /*
+ * After we re-enable the power well, if we touch VGA register 0x3d5
+ * we'll get unclaimed register interrupts. This stops after we write
+ * anything to the VGA MSR register. The vgacon module uses this
+ * register all the time, so if we unbind our driver and, as a
+ * consequence, bind vgacon, we'll get stuck in an infinite loop at
+ * console_unlock(). So make here we touch the VGA MSR register, making
+ * sure vgacon can keep working normally without triggering interrupts
+ * and error messages.
+ */
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+
+ if (IS_BROADWELL(dev)) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
+ dev_priv->de_irq_mask[PIPE_B]);
+ I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
+ ~dev_priv->de_irq_mask[PIPE_B] |
+ GEN8_PIPE_VBLANK);
+ I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
+ dev_priv->de_irq_mask[PIPE_C]);
+ I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
+ ~dev_priv->de_irq_mask[PIPE_C] |
+ GEN8_PIPE_VBLANK);
+ POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+}
+
+static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ enum pipe p;
+ unsigned long irqflags;
+
+ /*
+ * After this, the registers on the pipes that are part of the power
+ * well will become zero, so we have to adjust our counters according to
+ * that.
+ *
+ * FIXME: Should we do this in general in drm_vblank_post_modeset?
+ */
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ for_each_pipe(p)
+ if (p != PIPE_A)
+ dev->vblank[p].last = 0;
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+}
+
+static void hsw_set_power_well(struct drm_device *dev,
+ struct i915_power_well *power_well, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool is_enabled, enable_requested;
uint32_t tmp;
+ WARN_ON(dev_priv->pc8.enabled);
+
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
@@ -5702,28 +5240,15 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
HSW_PWR_WELL_STATE_ENABLED), 20))
DRM_ERROR("Timeout enabling power well\n");
}
+
+ hsw_power_well_post_enable(dev_priv);
} else {
if (enable_requested) {
- unsigned long irqflags;
- enum pipe p;
-
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Requesting to disable the power well\n");
- /*
- * After this, the registers on the pipes that are part
- * of the power well will become zero, so we have to
- * adjust our counters according to that.
- *
- * FIXME: Should we do this in general in
- * drm_vblank_post_modeset?
- */
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- for_each_pipe(p)
- if (p != PIPE_A)
- dev->vblank[p].last = 0;
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ hsw_power_well_post_disable(dev_priv);
}
}
}
@@ -5731,16 +5256,26 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
static void __intel_power_well_get(struct drm_device *dev,
struct i915_power_well *power_well)
{
- if (!power_well->count++)
- __intel_set_power_well(dev, true);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!power_well->count++ && power_well->set) {
+ hsw_disable_package_c8(dev_priv);
+ power_well->set(dev, power_well, true);
+ }
}
static void __intel_power_well_put(struct drm_device *dev,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
WARN_ON(!power_well->count);
- if (!--power_well->count && i915_disable_power_well)
- __intel_set_power_well(dev, false);
+
+ if (!--power_well->count && power_well->set &&
+ i915_disable_power_well) {
+ power_well->set(dev, power_well, false);
+ hsw_enable_package_c8(dev_priv);
+ }
}
void intel_display_power_get(struct drm_device *dev,
@@ -5748,17 +5283,18 @@ void intel_display_power_get(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
-
- if (!HAS_POWER_WELL(dev))
- return;
-
- if (is_always_on_power_domain(dev, domain))
- return;
+ struct i915_power_well *power_well;
+ int i;
power_domains = &dev_priv->power_domains;
mutex_lock(&power_domains->lock);
- __intel_power_well_get(dev, &power_domains->power_wells[0]);
+
+ for_each_power_well(i, power_well, BIT(domain), power_domains)
+ __intel_power_well_get(dev, power_well);
+
+ power_domains->domain_use_count[domain]++;
+
mutex_unlock(&power_domains->lock);
}
@@ -5767,17 +5303,19 @@ void intel_display_power_put(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
-
- if (!HAS_POWER_WELL(dev))
- return;
-
- if (is_always_on_power_domain(dev, domain))
- return;
+ struct i915_power_well *power_well;
+ int i;
power_domains = &dev_priv->power_domains;
mutex_lock(&power_domains->lock);
- __intel_power_well_put(dev, &power_domains->power_wells[0]);
+
+ WARN_ON(!power_domains->domain_use_count[domain]);
+ power_domains->domain_use_count[domain]--;
+
+ for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
+ __intel_power_well_put(dev, power_well);
+
mutex_unlock(&power_domains->lock);
}
@@ -5793,10 +5331,7 @@ void i915_request_power_well(void)
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
-
- mutex_lock(&hsw_pwr->lock);
- __intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]);
- mutex_unlock(&hsw_pwr->lock);
+ intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO);
}
EXPORT_SYMBOL_GPL(i915_request_power_well);
@@ -5810,24 +5345,71 @@ void i915_release_power_well(void)
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
-
- mutex_lock(&hsw_pwr->lock);
- __intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]);
- mutex_unlock(&hsw_pwr->lock);
+ intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO);
}
EXPORT_SYMBOL_GPL(i915_release_power_well);
+static struct i915_power_well i9xx_always_on_power_well[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = POWER_DOMAIN_MASK,
+ },
+};
+
+static struct i915_power_well hsw_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+ },
+ {
+ .name = "display",
+ .domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS,
+ .is_enabled = hsw_power_well_enabled,
+ .set = hsw_set_power_well,
+ },
+};
+
+static struct i915_power_well bdw_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+ },
+ {
+ .name = "display",
+ .domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS,
+ .is_enabled = hsw_power_well_enabled,
+ .set = hsw_set_power_well,
+ },
+};
+
+#define set_power_wells(power_domains, __power_wells) ({ \
+ (power_domains)->power_wells = (__power_wells); \
+ (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
+})
+
int intel_power_domains_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *power_well;
mutex_init(&power_domains->lock);
- hsw_pwr = power_domains;
- power_well = &power_domains->power_wells[0];
- power_well->count = 0;
+ /*
+ * The enabling order will be from lower to higher indexed wells,
+ * the disabling order is reversed.
+ */
+ if (IS_HASWELL(dev)) {
+ set_power_wells(power_domains, hsw_power_wells);
+ hsw_pwr = power_domains;
+ } else if (IS_BROADWELL(dev)) {
+ set_power_wells(power_domains, bdw_power_wells);
+ hsw_pwr = power_domains;
+ } else {
+ set_power_wells(power_domains, i9xx_always_on_power_well);
+ }
return 0;
}
@@ -5842,15 +5424,13 @@ static void intel_power_domains_resume(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
-
- if (!HAS_POWER_WELL(dev))
- return;
+ int i;
mutex_lock(&power_domains->lock);
-
- power_well = &power_domains->power_wells[0];
- __intel_set_power_well(dev, power_well->count > 0);
-
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+ if (power_well->set)
+ power_well->set(dev, power_well, power_well->count > 0);
+ }
mutex_unlock(&power_domains->lock);
}
@@ -5864,13 +5444,13 @@ void intel_power_domains_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!HAS_POWER_WELL(dev))
- return;
-
/* For now, we need the power well to be always enabled. */
intel_display_set_init_power(dev, true);
intel_power_domains_resume(dev);
+ if (!(IS_HASWELL(dev) || IS_BROADWELL(dev)))
+ return;
+
/* We're taking over the BIOS, so clear any requests made by it since
* the driver is in charge now. */
if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
@@ -5888,31 +5468,86 @@ void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
hsw_enable_package_c8(dev_priv);
}
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ pm_runtime_get_sync(device);
+ WARN(dev_priv->pm.suspended, "Device still suspended.\n");
+}
+
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ pm_runtime_mark_last_busy(device);
+ pm_runtime_put_autosuspend(device);
+}
+
+void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ dev_priv->pm.suspended = false;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ pm_runtime_set_active(device);
+
+ pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
+ pm_runtime_mark_last_busy(device);
+ pm_runtime_use_autosuspend(device);
+}
+
+void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ /* Make sure we're not suspended first. */
+ pm_runtime_get_sync(device);
+ pm_runtime_disable(device);
+}
+
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (I915_HAS_FBC(dev)) {
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_FBC(dev)) {
+ if (INTEL_INFO(dev)->gen >= 7) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
- dev_priv->display.enable_fbc =
- gen7_enable_fbc;
- else
- dev_priv->display.enable_fbc =
- ironlake_enable_fbc;
+ dev_priv->display.enable_fbc = gen7_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (INTEL_INFO(dev)->gen >= 5) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ dev_priv->display.enable_fbc = ironlake_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
} else if (IS_GM45(dev)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
dev_priv->display.disable_fbc = g4x_disable_fbc;
- } else if (IS_CRESTLINE(dev)) {
+ } else {
dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
dev_priv->display.enable_fbc = i8xx_enable_fbc;
dev_priv->display.disable_fbc = i8xx_disable_fbc;
+
+ /* This value was pulled out of someone's hat */
+ I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
}
- /* 855GM needs testing */
}
/* For cxsr */
@@ -5925,58 +5560,27 @@ void intel_init_pm(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
intel_setup_wm_latency(dev);
- if (IS_GEN5(dev)) {
- if (dev_priv->wm.pri_latency[1] &&
- dev_priv->wm.spr_latency[1] &&
- dev_priv->wm.cur_latency[1])
- dev_priv->display.update_wm = ironlake_update_wm;
- else {
- DRM_DEBUG_KMS("Failed to get proper latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
+ if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
+ dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
+ (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
+ dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
+ dev_priv->display.update_wm = ilk_update_wm;
+ dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ }
+
+ if (IS_GEN5(dev))
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
- } else if (IS_GEN6(dev)) {
- if (dev_priv->wm.pri_latency[0] &&
- dev_priv->wm.spr_latency[0] &&
- dev_priv->wm.cur_latency[0]) {
- dev_priv->display.update_wm = sandybridge_update_wm;
- dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
- } else {
- DRM_DEBUG_KMS("Failed to read display plane latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
+ else if (IS_GEN6(dev))
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
- } else if (IS_IVYBRIDGE(dev)) {
- if (dev_priv->wm.pri_latency[0] &&
- dev_priv->wm.spr_latency[0] &&
- dev_priv->wm.cur_latency[0]) {
- dev_priv->display.update_wm = ivybridge_update_wm;
- dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
- } else {
- DRM_DEBUG_KMS("Failed to read display plane latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
+ else if (IS_IVYBRIDGE(dev))
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
- } else if (IS_HASWELL(dev)) {
- if (dev_priv->wm.pri_latency[0] &&
- dev_priv->wm.spr_latency[0] &&
- dev_priv->wm.cur_latency[0]) {
- dev_priv->display.update_wm = haswell_update_wm;
- dev_priv->display.update_sprite_wm =
- haswell_update_sprite_wm;
- } else {
- DRM_DEBUG_KMS("Failed to read display plane latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
+ else if (IS_HASWELL(dev))
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
- } else if (INTEL_INFO(dev)->gen == 8) {
+ else if (INTEL_INFO(dev)->gen == 8)
dev_priv->display.init_clock_gating = gen8_init_clock_gating;
- } else
- dev_priv->display.update_wm = NULL;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.update_wm = valleyview_update_wm;
dev_priv->display.init_clock_gating =
@@ -6010,21 +5614,21 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
- } else if (IS_I865G(dev)) {
- dev_priv->display.update_wm = i830_update_wm;
- dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- dev_priv->display.get_fifo_size = i830_get_fifo_size;
- } else if (IS_I85X(dev)) {
- dev_priv->display.update_wm = i9xx_update_wm;
- dev_priv->display.get_fifo_size = i85x_get_fifo_size;
- dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- } else {
- dev_priv->display.update_wm = i830_update_wm;
- dev_priv->display.init_clock_gating = i830_init_clock_gating;
- if (IS_845G(dev))
+ } else if (IS_GEN2(dev)) {
+ if (INTEL_INFO(dev)->num_pipes == 1) {
+ dev_priv->display.update_wm = i845_update_wm;
dev_priv->display.get_fifo_size = i845_get_fifo_size;
- else
+ } else {
+ dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ }
+
+ if (IS_I85X(dev) || IS_I865G(dev))
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ else
+ dev_priv->display.init_clock_gating = i830_init_clock_gating;
+ } else {
+ DRM_ERROR("unexpected fall-through in intel_init_pm\n");
}
}
@@ -6075,65 +5679,63 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
return 0;
}
-int vlv_gpu_freq(int ddr_freq, int val)
+int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- int mult, base;
+ int div;
- switch (ddr_freq) {
+ /* 4 x czclk */
+ switch (dev_priv->mem_freq) {
case 800:
- mult = 20;
- base = 120;
+ div = 10;
break;
case 1066:
- mult = 22;
- base = 133;
+ div = 12;
break;
case 1333:
- mult = 21;
- base = 125;
+ div = 16;
break;
default:
return -1;
}
- return ((val - 0xbd) * mult) + base;
+ return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
}
-int vlv_freq_opcode(int ddr_freq, int val)
+int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- int mult, base;
+ int mul;
- switch (ddr_freq) {
+ /* 4 x czclk */
+ switch (dev_priv->mem_freq) {
case 800:
- mult = 20;
- base = 120;
+ mul = 10;
break;
case 1066:
- mult = 22;
- base = 133;
+ mul = 12;
break;
case 1333:
- mult = 21;
- base = 125;
+ mul = 16;
break;
default:
return -1;
}
- val /= mult;
- val -= base / mult;
- val += 0xbd;
-
- if (val > 0xea)
- val = 0xea;
-
- return val;
+ return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
}
-void intel_pm_init(struct drm_device *dev)
+void intel_pm_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ mutex_init(&dev_priv->rps.hw_lock);
+
+ mutex_init(&dev_priv->pc8.lock);
+ dev_priv->pc8.requirements_met = false;
+ dev_priv->pc8.gpu_idle = false;
+ dev_priv->pc8.irqs_disabled = false;
+ dev_priv->pc8.enabled = false;
+ dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
+ INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b620337e6d67..b7f1742caf87 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -285,14 +285,16 @@ static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
if (!ring->fbc_dirty)
return 0;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
- intel_ring_emit(ring, MI_NOOP);
/* WaFbcNukeOn3DBlt:ivb/hsw */
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, MSG_FBC_REND_STATE);
intel_ring_emit(ring, value);
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
+ intel_ring_emit(ring, MSG_FBC_REND_STATE);
+ intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
intel_ring_advance(ring);
ring->fbc_dirty = false;
@@ -354,7 +356,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
- if (flush_domains)
+ if (!invalidate_domains && flush_domains)
return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
return 0;
@@ -436,7 +438,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
int ret = 0;
u32 head;
- gen6_gt_force_wake_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
if (I915_NEED_GFX_HWS(dev))
intel_ring_setup_status_page(ring);
@@ -509,7 +511,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
out:
- gen6_gt_force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
return ret;
}
@@ -661,19 +663,22 @@ gen6_add_request(struct intel_ring_buffer *ring)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *useless;
- int i, ret;
+ int i, ret, num_dwords = 4;
- ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) *
- MBOX_UPDATE_DWORDS) +
- 4);
+ if (i915_semaphore_is_enabled(dev))
+ num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
+#undef MBOX_UPDATE_DWORDS
+
+ ret = intel_ring_begin(ring, num_dwords);
if (ret)
return ret;
-#undef MBOX_UPDATE_DWORDS
- for_each_ring(useless, dev_priv, i) {
- u32 mbox_reg = ring->signal_mbox[i];
- if (mbox_reg != GEN6_NOSYNC)
- update_mboxes(ring, mbox_reg);
+ if (i915_semaphore_is_enabled(dev)) {
+ for_each_ring(useless, dev_priv, i) {
+ u32 mbox_reg = ring->signal_mbox[i];
+ if (mbox_reg != GEN6_NOSYNC)
+ update_mboxes(ring, mbox_reg);
+ }
}
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
@@ -965,6 +970,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
} else if (IS_GEN6(ring->dev)) {
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
} else {
+ /* XXX: gen8 returns to sanity */
mmio = RING_HWS_PGA(ring->mmio_base);
}
@@ -1029,11 +1035,6 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
if (!dev->irq_enabled)
return false;
- /* It looks like we need to prevent the gt from suspending while waiting
- * for an notifiy irq, otherwise irqs seem to get lost on at least the
- * blt/bsd rings on ivb. */
- gen6_gt_force_wake_get(dev_priv);
-
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
if (HAS_L3_DPF(dev) && ring->id == RCS)
@@ -1065,8 +1066,6 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- gen6_gt_force_wake_put(dev_priv);
}
static bool
@@ -1610,8 +1609,8 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
}
-static int __intel_ring_begin(struct intel_ring_buffer *ring,
- int bytes)
+static int __intel_ring_prepare(struct intel_ring_buffer *ring,
+ int bytes)
{
int ret;
@@ -1627,7 +1626,6 @@ static int __intel_ring_begin(struct intel_ring_buffer *ring,
return ret;
}
- ring->space -= bytes;
return 0;
}
@@ -1642,12 +1640,17 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
if (ret)
return ret;
+ ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
+ if (ret)
+ return ret;
+
/* Preallocate the olr before touching the ring */
ret = intel_ring_alloc_seqno(ring);
if (ret)
return ret;
- return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
+ ring->space -= num_dwords * sizeof(uint32_t);
+ return 0;
}
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
@@ -1837,7 +1840,7 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
}
intel_ring_advance(ring);
- if (IS_GEN7(dev) && flush)
+ if (IS_GEN7(dev) && !invalidate && flush)
return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index a583e8f718a7..95bdfb3c431c 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -413,23 +413,34 @@ static const struct _sdvo_cmd_name {
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
{
- int i;
+ int i, pos = 0;
+#define BUF_LEN 256
+ char buffer[BUF_LEN];
+
+#define BUF_PRINT(args...) \
+ pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
+
- DRM_DEBUG_KMS("%s: W: %02X ",
- SDVO_NAME(intel_sdvo), cmd);
- for (i = 0; i < args_len; i++)
- DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
- for (; i < 8; i++)
- DRM_LOG_KMS(" ");
+ for (i = 0; i < args_len; i++) {
+ BUF_PRINT("%02X ", ((u8 *)args)[i]);
+ }
+ for (; i < 8; i++) {
+ BUF_PRINT(" ");
+ }
for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
if (cmd == sdvo_cmd_names[i].cmd) {
- DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
+ BUF_PRINT("(%s)", sdvo_cmd_names[i].name);
break;
}
}
- if (i == ARRAY_SIZE(sdvo_cmd_names))
- DRM_LOG_KMS("(%02X)", cmd);
- DRM_LOG_KMS("\n");
+ if (i == ARRAY_SIZE(sdvo_cmd_names)) {
+ BUF_PRINT("(%02X)", cmd);
+ }
+ BUG_ON(pos >= BUF_LEN - 1);
+#undef BUF_PRINT
+#undef BUF_LEN
+
+ DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
}
static const char *cmd_status_names[] = {
@@ -512,9 +523,10 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
{
u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
- int i;
+ int i, pos = 0;
+#define BUF_LEN 256
+ char buffer[BUF_LEN];
- DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
/*
* The documentation states that all commands will be
@@ -551,10 +563,13 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
goto log_fail;
}
+#define BUF_PRINT(args...) \
+ pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
+
if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
- DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+ BUF_PRINT("(%s)", cmd_status_names[status]);
else
- DRM_LOG_KMS("(??? %d)", status);
+ BUF_PRINT("(??? %d)", status);
if (status != SDVO_CMD_STATUS_SUCCESS)
goto log_fail;
@@ -565,13 +580,17 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
SDVO_I2C_RETURN_0 + i,
&((u8 *)response)[i]))
goto log_fail;
- DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
+ BUF_PRINT(" %02X", ((u8 *)response)[i]);
}
- DRM_LOG_KMS("\n");
+ BUG_ON(pos >= BUF_LEN - 1);
+#undef BUF_PRINT
+#undef BUF_LEN
+
+ DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
return true;
log_fail:
- DRM_LOG_KMS("... failed\n");
+ DRM_DEBUG_KMS("%s: R: ... failed\n", SDVO_NAME(intel_sdvo));
return false;
}
@@ -933,7 +952,7 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
unsigned if_index, uint8_t tx_rate,
- uint8_t *data, unsigned length)
+ const uint8_t *data, unsigned length)
{
uint8_t set_buf_index[2] = { if_index, 0 };
uint8_t hbuf_size, tmp[8];
@@ -1517,8 +1536,9 @@ static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
intel_modeset_check_state(connector->dev);
}
-static int intel_sdvo_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_sdvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 770bdd6ecd9f..2e2d4eb4a00d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -59,7 +59,7 @@ struct intel_sdvo_caps {
unsigned int stall_support:1;
unsigned int pad:1;
u16 output_flags;
-} __attribute__((packed));
+} __packed;
/* Note: SDVO detailed timing flags match EDID misc flags. */
#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
@@ -94,12 +94,12 @@ struct intel_sdvo_dtd {
u8 v_sync_off_high;
u8 reserved;
} part2;
-} __attribute__((packed));
+} __packed;
struct intel_sdvo_pixel_clock_range {
u16 min; /**< pixel clock, in 10kHz units */
u16 max; /**< pixel clock, in 10kHz units */
-} __attribute__((packed));
+} __packed;
struct intel_sdvo_preferred_input_timing_args {
u16 clock;
@@ -108,7 +108,7 @@ struct intel_sdvo_preferred_input_timing_args {
u8 interlace:1;
u8 scaled:1;
u8 pad:6;
-} __attribute__((packed));
+} __packed;
/* I2C registers for SDVO */
#define SDVO_I2C_ARG_0 0x07
@@ -162,7 +162,7 @@ struct intel_sdvo_get_trained_inputs_response {
unsigned int input0_trained:1;
unsigned int input1_trained:1;
unsigned int pad:6;
-} __attribute__((packed));
+} __packed;
/** Returns a struct intel_sdvo_output_flags of active outputs. */
#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
@@ -219,7 +219,7 @@ struct intel_sdvo_get_interrupt_event_source_response {
unsigned int ambient_light_interrupt:1;
unsigned int hdmi_audio_encrypt_change:1;
unsigned int pad:6;
-} __attribute__((packed));
+} __packed;
/**
* Selects which input is affected by future input commands.
@@ -232,7 +232,7 @@ struct intel_sdvo_get_interrupt_event_source_response {
struct intel_sdvo_set_target_input_args {
unsigned int target_1:1;
unsigned int pad:7;
-} __attribute__((packed));
+} __packed;
/**
* Takes a struct intel_sdvo_output_flags of which outputs are targeted by
@@ -370,7 +370,7 @@ struct intel_sdvo_tv_format {
unsigned int hdtv_std_eia_7702a_480i_60:1;
unsigned int hdtv_std_eia_7702a_480p_60:1;
unsigned int pad:3;
-} __attribute__((packed));
+} __packed;
#define SDVO_CMD_GET_TV_FORMAT 0x28
@@ -401,7 +401,7 @@ struct intel_sdvo_sdtv_resolution_request {
unsigned int secam_l:1;
unsigned int secam_60:1;
unsigned int pad:5;
-} __attribute__((packed));
+} __packed;
struct intel_sdvo_sdtv_resolution_reply {
unsigned int res_320x200:1;
@@ -426,7 +426,7 @@ struct intel_sdvo_sdtv_resolution_reply {
unsigned int res_1024x768:1;
unsigned int res_1280x1024:1;
unsigned int pad:5;
-} __attribute__((packed));
+} __packed;
/* Get supported resolution with squire pixel aspect ratio that can be
scaled for the requested HDTV format */
@@ -463,7 +463,7 @@ struct intel_sdvo_hdtv_resolution_request {
unsigned int hdtv_std_eia_7702a_480i_60:1;
unsigned int hdtv_std_eia_7702a_480p_60:1;
unsigned int pad:6;
-} __attribute__((packed));
+} __packed;
struct intel_sdvo_hdtv_resolution_reply {
unsigned int res_640x480:1;
@@ -517,7 +517,7 @@ struct intel_sdvo_hdtv_resolution_reply {
unsigned int res_1280x768:1;
unsigned int pad5:7;
-} __attribute__((packed));
+} __packed;
/* Get supported power state returns info for encoder and monitor, rely on
last SetTargetInput and SetTargetOutput calls */
@@ -557,13 +557,13 @@ struct sdvo_panel_power_sequencing {
unsigned int t4_high:2;
unsigned int pad:6;
-} __attribute__((packed));
+} __packed;
#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
struct sdvo_max_backlight_reply {
u8 max_value;
u8 default_value;
-} __attribute__((packed));
+} __packed;
#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32
@@ -573,14 +573,14 @@ struct sdvo_get_ambient_light_reply {
u16 trip_low;
u16 trip_high;
u16 value;
-} __attribute__((packed));
+} __packed;
#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
struct sdvo_set_ambient_light_reply {
u16 trip_low;
u16 trip_high;
unsigned int enable:1;
unsigned int pad:7;
-} __attribute__((packed));
+} __packed;
/* Set display power state */
#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d
@@ -608,7 +608,7 @@ struct intel_sdvo_enhancements_reply {
unsigned int dither:1;
unsigned int tv_chroma_filter:1;
unsigned int tv_luma_filter:1;
-} __attribute__((packed));
+} __packed;
/* Picture enhancement limits below are dependent on the current TV format,
* and thus need to be queried and set after it.
@@ -630,7 +630,7 @@ struct intel_sdvo_enhancements_reply {
struct intel_sdvo_enhancement_limits_reply {
u16 max_value;
u16 default_value;
-} __attribute__((packed));
+} __packed;
#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80
@@ -671,7 +671,7 @@ struct intel_sdvo_enhancement_limits_reply {
#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
struct intel_sdvo_enhancements_arg {
u16 value;
-} __attribute__((packed));
+} __packed;
#define SDVO_CMD_GET_DOT_CRAWL 0x70
#define SDVO_CMD_SET_DOT_CRAWL 0x71
@@ -727,4 +727,4 @@ struct intel_sdvo_enhancements_arg {
struct intel_sdvo_encode {
u8 dvi_rev;
u8 hdmi_rev;
-} __attribute__ ((packed));
+} __packed;
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 9944d8135e87..0954f132726e 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -90,6 +90,22 @@ void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
mutex_unlock(&dev_priv->dpio_lock);
}
+u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
+ PUNIT_OPCODE_REG_READ, reg, &val);
+
+ return val;
+}
+
+void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
+ PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
{
u32 val = 0;
@@ -160,27 +176,18 @@ void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
PUNIT_OPCODE_REG_WRITE, reg, &val);
}
-static u32 vlv_get_phy_port(enum pipe pipe)
-{
- u32 port = IOSF_PORT_DPIO;
-
- WARN_ON ((pipe != PIPE_A) && (pipe != PIPE_B));
-
- return port;
-}
-
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
DPIO_OPCODE_REG_READ, reg, &val);
return val;
}
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
{
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
DPIO_OPCODE_REG_WRITE, reg, &val);
}
@@ -242,3 +249,17 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
return;
}
}
+
+u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val = 0;
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
+ DPIO_OPCODE_REG_READ, reg, &val);
+ return val;
+}
+
+void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
+ DPIO_OPCODE_REG_WRITE, reg, &val);
+}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index b9fabf826f7d..716a3c9c0751 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -104,6 +104,12 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
break;
}
+ /*
+ * Enable gamma to match primary/cursor plane behaviour.
+ * FIXME should be user controllable via propertiesa.
+ */
+ sprctl |= SP_GAMMA_ENABLE;
+
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SP_TILED;
@@ -135,8 +141,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
- I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
- sprsurf_offset);
+ I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
+ sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane));
}
@@ -152,7 +158,7 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
~SP_ENABLE);
/* Activate double buffered register update */
- I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
+ I915_WRITE(SPSURF(pipe, plane), 0);
POSTING_READ(SPSURF(pipe, plane));
intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
@@ -224,7 +230,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
sprctl = I915_READ(SPRCTL(pipe));
@@ -257,6 +262,12 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
BUG();
}
+ /*
+ * Enable gamma to match primary/cursor plane behaviour.
+ * FIXME should be user controllable via propertiesa.
+ */
+ sprctl |= SPRITE_GAMMA_ENABLE;
+
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SPRITE_TILED;
@@ -279,21 +290,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
crtc_w--;
crtc_h--;
- /*
- * IVB workaround: must disable low power watermarks for at least
- * one frame before enabling scaling. LP watermarks can be re-enabled
- * when scaling is disabled.
- */
- if (crtc_w != src_w || crtc_h != src_h) {
- dev_priv->sprite_scaling_enabled |= 1 << pipe;
-
- if (!scaling_was_enabled) {
- intel_update_watermarks(crtc);
- intel_wait_for_vblank(dev, pipe);
- }
+ if (crtc_w != src_w || crtc_h != src_h)
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
- } else
- dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
@@ -317,13 +315,9 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
- I915_MODIFY_DISPBASE(SPRSURF(pipe),
- i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
+ I915_WRITE(SPRSURF(pipe),
+ i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
-
- /* potentially re-enable LP watermarks */
- if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
- intel_update_watermarks(crtc);
}
static void
@@ -333,23 +327,22 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
- bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
- I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
+ I915_WRITE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
- dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
+ /*
+ * Avoid underruns when disabling the sprite.
+ * FIXME remove once watermark updates are done properly.
+ */
+ intel_wait_for_vblank(dev, pipe);
intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
-
- /* potentially re-enable LP watermarks */
- if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
- intel_update_watermarks(crtc);
}
static int
@@ -453,6 +446,12 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
BUG();
}
+ /*
+ * Enable gamma to match primary/cursor plane behaviour.
+ * FIXME should be user controllable via propertiesa.
+ */
+ dvscntr |= DVS_GAMMA_ENABLE;
+
if (obj->tiling_mode != I915_TILING_NONE)
dvscntr |= DVS_TILED;
@@ -470,7 +469,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
crtc_h--;
dvsscale = 0;
- if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
+ if (crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
@@ -490,8 +489,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
- I915_MODIFY_DISPBASE(DVSSURF(pipe),
- i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
+ I915_WRITE(DVSSURF(pipe),
+ i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
@@ -507,9 +506,15 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
/* Flush double buffered register updates */
- I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
+ I915_WRITE(DVSSURF(pipe), 0);
POSTING_READ(DVSSURF(pipe));
+ /*
+ * Avoid underruns when disabling the sprite.
+ * FIXME remove once watermark updates are done properly.
+ */
+ intel_wait_for_vblank(dev, pipe);
+
intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
}
@@ -643,6 +648,15 @@ format_is_yuv(uint32_t format)
}
}
+static bool colorkey_enabled(struct intel_plane *intel_plane)
+{
+ struct drm_intel_sprite_colorkey key;
+
+ intel_plane->get_colorkey(&intel_plane->base, &key);
+
+ return key.flags != I915_SET_COLORKEY_NONE;
+}
+
static int
intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
@@ -828,7 +842,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
* If the sprite is completely covering the primary plane,
* we can disable the primary and save power.
*/
- disable_primary = drm_rect_equals(&dst, &clip);
+ disable_primary = drm_rect_equals(&dst, &clip) && !colorkey_enabled(intel_plane);
WARN_ON(disable_primary && !visible && intel_crtc->active);
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 0b02078a0b84..87df68f5f504 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -64,7 +64,8 @@ static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
__raw_posting_read(dev_priv, ECOBUS);
}
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
+ int fw_engine)
{
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
@@ -89,7 +90,8 @@ static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
__raw_posting_read(dev_priv, ECOBUS);
}
-static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
+ int fw_engine)
{
u32 forcewake_ack;
@@ -121,12 +123,12 @@ static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
u32 gtfifodbg;
gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
- if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
- "MMIO read or write has been dropped %x\n", gtfifodbg))
- __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+ if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
+ __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
}
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
+ int fw_engine)
{
__raw_i915_write32(dev_priv, FORCEWAKE, 0);
/* something from same cacheline, but !FORCEWAKE */
@@ -134,7 +136,8 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
gen6_gt_check_fifodbg(dev_priv);
}
-static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
+ int fw_engine)
{
__raw_i915_write32(dev_priv, FORCEWAKE_MT,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
@@ -147,12 +150,19 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
int ret = 0;
+ /* On VLV, FIFO will be shared by both SW and HW.
+ * So, we need to read the FREE_ENTRIES everytime */
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ dev_priv->uncore.fifo_count =
+ __raw_i915_read32(dev_priv, GTFIFOCTL) &
+ GT_FIFO_FREE_ENTRIES_MASK;
+
if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
int loop = 500;
- u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+ u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
udelay(10);
- fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+ fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
}
if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
++ret;
@@ -171,38 +181,112 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
}
-static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
+ int fw_engine)
{
- if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+ /* Check for Render Engine */
+ if (FORCEWAKE_RENDER & fw_engine) {
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_VLV) &
+ FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
- __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+ __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
- if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_VLV) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: waiting for Render to ack.\n");
+ }
- if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
+ /* Check for Media Engine */
+ if (FORCEWAKE_MEDIA & fw_engine) {
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_MEDIA_VLV) &
+ FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_MEDIA_VLV) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: waiting for media to ack.\n");
+ }
/* WaRsForcewakeWaitTC0:vlv */
__gen6_gt_wait_for_thread_c0(dev_priv);
+
}
-static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
+ int fw_engine)
{
- __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+ /* Check for Render Engine */
+ if (FORCEWAKE_RENDER & fw_engine)
+ __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+
+ /* Check for Media Engine */
+ if (FORCEWAKE_MEDIA & fw_engine)
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
+
+}
+
+void vlv_force_wake_get(struct drm_i915_private *dev_priv,
+ int fw_engine)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (FORCEWAKE_RENDER & fw_engine) {
+ if (dev_priv->uncore.fw_rendercount++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_RENDER);
+ }
+ if (FORCEWAKE_MEDIA & fw_engine) {
+ if (dev_priv->uncore.fw_mediacount++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_MEDIA);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+void vlv_force_wake_put(struct drm_i915_private *dev_priv,
+ int fw_engine)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ if (FORCEWAKE_RENDER & fw_engine) {
+ WARN_ON(dev_priv->uncore.fw_rendercount == 0);
+ if (--dev_priv->uncore.fw_rendercount == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_RENDER);
+ }
+
+ if (FORCEWAKE_MEDIA & fw_engine) {
+ WARN_ON(dev_priv->uncore.fw_mediacount == 0);
+ if (--dev_priv->uncore.fw_mediacount == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_MEDIA);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void gen6_force_wake_work(struct work_struct *work)
@@ -213,7 +297,7 @@ static void gen6_force_wake_work(struct work_struct *work)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (--dev_priv->uncore.forcewake_count == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv);
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -248,6 +332,11 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
}
+ /* clear out old GT FIFO errors */
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ __raw_i915_write32(dev_priv, GTFIFODBG,
+ __raw_i915_read32(dev_priv, GTFIFODBG));
+
intel_uncore_forcewake_reset(dev);
}
@@ -256,8 +345,6 @@ void intel_uncore_sanitize(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg_val;
- intel_uncore_forcewake_reset(dev);
-
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_disable_gt_powersave(dev);
@@ -281,29 +368,40 @@ void intel_uncore_sanitize(struct drm_device *dev)
* be called at the beginning of the sequence followed by a call to
* gen6_gt_force_wake_put() at the end of the sequence.
*/
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
{
unsigned long irqflags;
if (!dev_priv->uncore.funcs.force_wake_get)
return;
+ intel_runtime_pm_get(dev_priv);
+
+ /* Redirect to VLV specific routine */
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ return vlv_force_wake_get(dev_priv, fw_engine);
+
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (dev_priv->uncore.forcewake_count++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv);
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
/*
* see gen6_gt_force_wake_get()
*/
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
{
unsigned long irqflags;
if (!dev_priv->uncore.funcs.force_wake_put)
return;
+ /* Redirect to VLV specific routine */
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ return vlv_force_wake_put(dev_priv, fw_engine);
+
+
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (--dev_priv->uncore.forcewake_count == 0) {
dev_priv->uncore.forcewake_count++;
@@ -312,6 +410,8 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
1);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ intel_runtime_pm_put(dev_priv);
}
/* We give fast paths for the really cool registers */
@@ -346,6 +446,13 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
}
}
+static void
+assert_device_not_suspended(struct drm_i915_private *dev_priv)
+{
+ WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
+ "Device suspended\n");
+}
+
#define REG_READ_HEADER(x) \
unsigned long irqflags; \
u##x val = 0; \
@@ -379,16 +486,51 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_HEADER(x); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
if (dev_priv->uncore.forcewake_count == 0) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+ FORCEWAKE_ALL); \
val = __raw_i915_read##x(dev_priv, reg); \
if (dev_priv->uncore.forcewake_count == 0) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+ FORCEWAKE_ALL); \
} else { \
val = __raw_i915_read##x(dev_priv, reg); \
} \
REG_READ_FOOTER; \
}
+#define __vlv_read(x) \
+static u##x \
+vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ unsigned fwengine = 0; \
+ unsigned *fwcount; \
+ REG_READ_HEADER(x); \
+ if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
+ fwengine = FORCEWAKE_RENDER; \
+ fwcount = &dev_priv->uncore.fw_rendercount; \
+ } \
+ else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
+ fwengine = FORCEWAKE_MEDIA; \
+ fwcount = &dev_priv->uncore.fw_mediacount; \
+ } \
+ if (fwengine != 0) { \
+ if ((*fwcount)++ == 0) \
+ (dev_priv)->uncore.funcs.force_wake_get(dev_priv, \
+ fwengine); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ if (--(*fwcount) == 0) \
+ (dev_priv)->uncore.funcs.force_wake_put(dev_priv, \
+ fwengine); \
+ } else { \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ } \
+ REG_READ_FOOTER; \
+}
+
+
+__vlv_read(8)
+__vlv_read(16)
+__vlv_read(32)
+__vlv_read(64)
__gen6_read(8)
__gen6_read(16)
__gen6_read(32)
@@ -402,6 +544,7 @@ __gen4_read(16)
__gen4_read(32)
__gen4_read(64)
+#undef __vlv_read
#undef __gen6_read
#undef __gen5_read
#undef __gen4_read
@@ -413,12 +556,15 @@ __gen4_read(64)
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+#define REG_WRITE_FOOTER \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
+
#define __gen4_write(x) \
static void \
gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
REG_WRITE_HEADER; \
__raw_i915_write##x(dev_priv, reg, val); \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ REG_WRITE_FOOTER; \
}
#define __gen5_write(x) \
@@ -427,7 +573,7 @@ gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
REG_WRITE_HEADER; \
ilk_dummy_write(dev_priv); \
__raw_i915_write##x(dev_priv, reg, val); \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ REG_WRITE_FOOTER; \
}
#define __gen6_write(x) \
@@ -438,11 +584,12 @@ gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
+ assert_device_not_suspended(dev_priv); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ REG_WRITE_FOOTER; \
}
#define __hsw_write(x) \
@@ -453,13 +600,14 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
+ assert_device_not_suspended(dev_priv); \
hsw_unclaimed_reg_clear(dev_priv, reg); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
hsw_unclaimed_reg_check(dev_priv, reg); \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ REG_WRITE_FOOTER; \
}
static const u32 gen8_shadowed_regs[] = {
@@ -486,16 +634,18 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
- bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \
+ bool __needs_put = reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg); \
REG_WRITE_HEADER; \
if (__needs_put) { \
- dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+ FORCEWAKE_ALL); \
} \
__raw_i915_write##x(dev_priv, reg, val); \
if (__needs_put) { \
- dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+ FORCEWAKE_ALL); \
} \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ REG_WRITE_FOOTER; \
}
__gen8_write(8)
@@ -524,6 +674,7 @@ __gen4_write(64)
#undef __gen6_write
#undef __gen5_write
#undef __gen4_write
+#undef REG_WRITE_FOOTER
#undef REG_WRITE_HEADER
void intel_uncore_init(struct drm_device *dev)
@@ -534,8 +685,8 @@ void intel_uncore_init(struct drm_device *dev)
gen6_force_wake_work);
if (IS_VALLEYVIEW(dev)) {
- dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
- dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
+ dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
@@ -552,9 +703,9 @@ void intel_uncore_init(struct drm_device *dev)
* forcewake being disabled.
*/
mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv);
+ __gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv);
+ __gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
if (ecobus & FORCEWAKE_MT_ENABLE) {
@@ -601,10 +752,18 @@ void intel_uncore_init(struct drm_device *dev)
dev_priv->uncore.funcs.mmio_writel = gen6_write32;
dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
}
- dev_priv->uncore.funcs.mmio_readb = gen6_read8;
- dev_priv->uncore.funcs.mmio_readw = gen6_read16;
- dev_priv->uncore.funcs.mmio_readl = gen6_read32;
- dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->uncore.funcs.mmio_readb = vlv_read8;
+ dev_priv->uncore.funcs.mmio_readw = vlv_read16;
+ dev_priv->uncore.funcs.mmio_readl = vlv_read32;
+ dev_priv->uncore.funcs.mmio_readq = vlv_read64;
+ } else {
+ dev_priv->uncore.funcs.mmio_readb = gen6_read8;
+ dev_priv->uncore.funcs.mmio_readw = gen6_read16;
+ dev_priv->uncore.funcs.mmio_readl = gen6_read32;
+ dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ }
break;
case 5:
dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
@@ -646,7 +805,7 @@ static const struct register_whitelist {
uint32_t size;
uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
} whitelist[] = {
- { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
+ { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0x1F0 },
};
int i915_reg_read_ioctl(struct drm_device *dev,
@@ -687,6 +846,43 @@ int i915_reg_read_ioctl(struct drm_device *dev,
return 0;
}
+int i915_get_reset_stats_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_reset_stats *args = data;
+ struct i915_ctx_hang_stats *hs;
+ int ret;
+
+ if (args->flags || args->pad)
+ return -EINVAL;
+
+ if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id);
+ if (IS_ERR(hs)) {
+ mutex_unlock(&dev->struct_mutex);
+ return PTR_ERR(hs);
+ }
+
+ if (capable(CAP_SYS_ADMIN))
+ args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+ else
+ args->reset_count = 0;
+
+ args->batch_active = hs->batch_active;
+ args->batch_pending = hs->batch_pending;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
static int i965_reset_complete(struct drm_device *dev)
{
u8 gdrst;
@@ -770,12 +966,12 @@ static int gen6_do_reset(struct drm_device *dev)
/* If reset with a user forcewake, try to restore, otherwise turn it off */
if (dev_priv->uncore.forcewake_count)
- dev_priv->uncore.funcs.force_wake_get(dev_priv);
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
else
- dev_priv->uncore.funcs.force_wake_put(dev_priv);
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
/* Restore fifo count */
- dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+ dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
return ret;
@@ -784,6 +980,7 @@ static int gen6_do_reset(struct drm_device *dev)
int intel_gpu_reset(struct drm_device *dev)
{
switch (INTEL_INFO(dev)->gen) {
+ case 8:
case 7:
case 6: return gen6_do_reset(dev);
case 5: return ironlake_do_reset(dev);
@@ -792,15 +989,6 @@ int intel_gpu_reset(struct drm_device *dev)
}
}
-void intel_uncore_clear_errors(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* XXX needs spinlock around caller's grouping */
- if (HAS_FPGA_DBG_UNCLAIMED(dev))
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-}
-
void intel_uncore_check_errors(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 087db33f6cff..c3bf059ba720 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -1075,10 +1075,10 @@ static int mga_dma_get_buffers(struct drm_device *dev,
buf->file_priv = file_priv;
- if (DRM_COPY_TO_USER(&d->request_indices[i],
+ if (copy_to_user(&d->request_indices[i],
&buf->idx, sizeof(buf->idx)))
return -EFAULT;
- if (DRM_COPY_TO_USER(&d->request_sizes[i],
+ if (copy_to_user(&d->request_sizes[i],
&buf->total, sizeof(buf->total)))
return -EFAULT;
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index ca4bc54ea214..fe453213600a 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -186,14 +186,14 @@ extern void mga_disable_vblank(struct drm_device *dev, int crtc);
extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
extern int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence);
extern int mga_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
-extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t mga_driver_irq_handler(int irq, void *arg);
extern void mga_driver_irq_preinstall(struct drm_device *dev);
extern int mga_driver_irq_postinstall(struct drm_device *dev);
extern void mga_driver_irq_uninstall(struct drm_device *dev);
extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
-#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER()
+#define mga_flush_write_combine() wmb()
#define MGA_READ8(reg) DRM_READ8(dev_priv->mmio, (reg))
#define MGA_READ(reg) DRM_READ32(dev_priv->mmio, (reg))
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 709e90db8c40..86b4bb804852 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -34,6 +34,7 @@
#include <drm/drmP.h>
#include <drm/mga_drm.h>
+#include "mga_drv.h"
typedef struct drm32_mga_init {
int func;
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index 2b0ceb8dc11b..1b071b8ff9dc 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -47,7 +47,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
}
-irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t mga_driver_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
MGA_WRITE(MGA_PRIMEND, prim_end);
atomic_inc(&dev_priv->last_fence_retired);
- DRM_WAKEUP(&dev_priv->fence_queue);
+ wake_up(&dev_priv->fence_queue);
handled = 1;
}
@@ -128,7 +128,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
* by about a day rather than she wants to wait for years
* using fences.
*/
- DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
(((cur_fence = atomic_read(&dev_priv->last_fence_retired))
- *sequence) <= (1 << 23)));
@@ -151,7 +151,7 @@ int mga_driver_irq_postinstall(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
- DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
+ init_waitqueue_head(&dev_priv->fence_queue);
/* Turn on soft trap interrupt. Vertical blank interrupts are enabled
* in mga_enable_vblank.
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index 37cc2fb4eadd..314685b7f41f 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1029,7 +1029,7 @@ static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *fil
return -EINVAL;
}
- if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+ if (copy_to_user(param->value, &value, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 801731aeab61..9f9780b7ddf0 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -22,8 +22,10 @@ static void mga_hide_cursor(struct mga_device *mdev)
{
WREG8(MGA_CURPOSXL, 0);
WREG8(MGA_CURPOSXH, 0);
- mgag200_bo_unpin(mdev->cursor.pixels_1);
- mgag200_bo_unpin(mdev->cursor.pixels_2);
+ if (mdev->cursor.pixels_1->pin_count)
+ mgag200_bo_unpin(mdev->cursor.pixels_1);
+ if (mdev->cursor.pixels_2->pin_count)
+ mgag200_bo_unpin(mdev->cursor.pixels_2);
}
int mga_crtc_cursor_set(struct drm_crtc *crtc,
@@ -32,7 +34,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t width,
uint32_t height)
{
- struct drm_device *dev = (struct drm_device *)file_priv->minor->dev;
+ struct drm_device *dev = crtc->dev;
struct mga_device *mdev = (struct mga_device *)dev->dev_private;
struct mgag200_bo *pixels_1 = mdev->cursor.pixels_1;
struct mgag200_bo *pixels_2 = mdev->cursor.pixels_2;
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 964f58cee5ea..13b7dd83faa9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -41,7 +41,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
- if (!in_interrupt())
+ if (drm_can_sleep())
ret = mgag200_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
@@ -282,6 +282,11 @@ int mgag200_fbdev_init(struct mga_device *mdev)
{
struct mga_fbdev *mfbdev;
int ret;
+ int bpp_sel = 32;
+
+ /* prefer 16bpp on low end gpus with limited VRAM */
+ if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
+ bpp_sel = 16;
mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL);
if (!mfbdev)
@@ -301,7 +306,7 @@ int mgag200_fbdev_init(struct mga_device *mdev)
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(mdev->dev);
- drm_fb_helper_initial_config(&mfbdev->helper, 32);
+ drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index b1120cb1db6d..26868e5c55b0 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -217,7 +217,10 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_init(dev);
dev->mode_config.funcs = (void *)&mga_mode_funcs;
- dev->mode_config.preferred_depth = 24;
+ if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
+ dev->mode_config.preferred_depth = 16;
+ else
+ dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
r = mgag200_modeset_init(mdev);
@@ -310,7 +313,7 @@ int mgag200_dumb_create(struct drm_file *file,
return 0;
}
-void mgag200_bo_unref(struct mgag200_bo **bo)
+static void mgag200_bo_unref(struct mgag200_bo **bo)
{
struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index ee6ed633b7b1..968374776db9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -691,7 +691,7 @@ static void mga_g200wb_commit(struct drm_crtc *crtc)
CRTCEXT0 has to be programmed last to trigger an update and make the
new addr variable take effect.
*/
-void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
+static void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
{
struct mga_device *mdev = crtc->dev->dev_private;
u32 addr;
@@ -1398,7 +1398,7 @@ static void mga_encoder_commit(struct drm_encoder *encoder)
{
}
-void mga_encoder_destroy(struct drm_encoder *encoder)
+static void mga_encoder_destroy(struct drm_encoder *encoder)
{
struct mga_encoder *mga_encoder = to_mga_encoder(encoder);
drm_encoder_cleanup(encoder);
@@ -1519,11 +1519,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
(mga_vga_calculate_mode_bandwidth(mode, bpp)
> (32700 * 1024))) {
return MODE_BANDWIDTH;
- } else if (mode->type == G200_EH &&
+ } else if (mdev->type == G200_EH &&
(mga_vga_calculate_mode_bandwidth(mode, bpp)
> (37500 * 1024))) {
return MODE_BANDWIDTH;
- } else if (mode->type == G200_ER &&
+ } else if (mdev->type == G200_ER &&
(mga_vga_calculate_mode_bandwidth(mode,
bpp) > (55000 * 1024))) {
return MODE_BANDWIDTH;
@@ -1558,7 +1558,7 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-struct drm_encoder *mga_connector_best_encoder(struct drm_connector
+static struct drm_encoder *mga_connector_best_encoder(struct drm_connector
*connector)
{
int enc_id = connector->encoder_ids[0];
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 07b192fe15c6..adb5166a5dfd 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -80,7 +80,7 @@ static int mgag200_ttm_global_init(struct mga_device *ast)
return 0;
}
-void
+static void
mgag200_ttm_global_release(struct mga_device *ast)
{
if (ast->ttm.mem_global_ref.release == NULL)
@@ -102,7 +102,7 @@ static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
kfree(bo);
}
-bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo)
+static bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo)
{
if (bo->destroy == &mgag200_bo_ttm_destroy)
return true;
@@ -208,7 +208,7 @@ static struct ttm_backend_func mgag200_tt_backend_func = {
};
-struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index f39ab7554fc9..c69d1e07a3a6 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -2,8 +2,8 @@
config DRM_MSM
tristate "MSM DRM"
depends on DRM
- depends on ARCH_MSM
- depends on ARCH_MSM8960
+ depends on MSM_IOMMU
+ depends on (ARCH_MSM && ARCH_MSM8960) || (ARM && COMPILE_TEST)
select DRM_KMS_HELPER
select SHMEM
select TMPFS
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index e5fa12b0d21e..4f977a593bea 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -12,18 +12,27 @@ msm-y := \
hdmi/hdmi_i2c.o \
hdmi/hdmi_phy_8960.o \
hdmi/hdmi_phy_8x60.o \
- mdp4/mdp4_crtc.o \
- mdp4/mdp4_dtv_encoder.o \
- mdp4/mdp4_format.o \
- mdp4/mdp4_irq.o \
- mdp4/mdp4_kms.o \
- mdp4/mdp4_plane.o \
+ hdmi/hdmi_phy_8x74.o \
+ mdp/mdp_format.o \
+ mdp/mdp_kms.o \
+ mdp/mdp4/mdp4_crtc.o \
+ mdp/mdp4/mdp4_dtv_encoder.o \
+ mdp/mdp4/mdp4_irq.o \
+ mdp/mdp4/mdp4_kms.o \
+ mdp/mdp4/mdp4_plane.o \
+ mdp/mdp5/mdp5_crtc.o \
+ mdp/mdp5/mdp5_encoder.o \
+ mdp/mdp5/mdp5_irq.o \
+ mdp/mdp5/mdp5_kms.o \
+ mdp/mdp5/mdp5_plane.o \
+ mdp/mdp5/mdp5_smp.o \
msm_drv.o \
msm_fb.o \
msm_gem.o \
msm_gem_prime.o \
msm_gem_submit.o \
msm_gpu.o \
+ msm_iommu.o \
msm_ringbuffer.o
msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES
index e036f6c1db94..9c4255b98021 100644
--- a/drivers/gpu/drm/msm/NOTES
+++ b/drivers/gpu/drm/msm/NOTES
@@ -4,7 +4,7 @@ In the current snapdragon SoC's, we have (at least) 3 different
display controller blocks at play:
+ MDP3 - ?? seems to be what is on geeksphone peak device
+ MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410)
- + MDSS - snapdragon 800
+ + MDP5 - snapdragon 800
(I don't have a completely clear picture on which display controller
maps to which part #)
@@ -46,6 +46,24 @@ and treat the MDP4 block's irq as "the" irq. Even though the connectors
may have their own irqs which they install themselves. For this reason
the display controller is the "master" device.
+For MDP5, the mapping is:
+
+ plane -> PIPE{RGBn,VIGn} \
+ crtc -> LM (layer mixer) |-> MDP "device"
+ encoder -> INTF /
+ connector -> HDMI/DSI/eDP/etc --> other device(s)
+
+Unlike MDP4, it appears we can get by with a single encoder, rather
+than needing a different implementation for DTV, DSI, etc. (Ie. the
+register interface is same, just different bases.)
+
+Also unlike MDP4, with MDP5 all the IRQs for other blocks (HDMI, DSI,
+etc) are routed through MDP.
+
+And finally, MDP5 has this "Shared Memory Pool" (called "SMP"), from
+which blocks need to be allocated to the active pipes based on fetch
+stride.
+
Each connector probably ends up being a separate device, just for the
logistics of finding/mapping io region, irq, etc. Idealy we would
have a better way than just stashing the platform device in a global
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 9588098741b5..85d615e7d62f 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -202,6 +203,12 @@ enum a2xx_rb_copy_sample_select {
SAMPLE_0123 = 6,
};
+enum adreno_mmu_clnt_beh {
+ BEH_NEVR = 0,
+ BEH_TRAN_RNG = 1,
+ BEH_TRAN_FLT = 2,
+};
+
enum sq_tex_clamp {
SQ_TEX_WRAP = 0,
SQ_TEX_MIRROR = 1,
@@ -238,6 +245,92 @@ enum sq_tex_filter {
#define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1
+#define REG_A2XX_MH_MMU_CONFIG 0x00000040
+#define A2XX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001
+#define A2XX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002
+#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030
+#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4
+static inline uint32_t A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0
+#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300
+#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00
+#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000
+#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000
+#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000
+#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000
+#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18
+static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000
+#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20
+static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000
+#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22
+static inline uint32_t A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000
+#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24
+static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
+}
+
+#define REG_A2XX_MH_MMU_VA_RANGE 0x00000041
+
+#define REG_A2XX_MH_MMU_PT_BASE 0x00000042
+
+#define REG_A2XX_MH_MMU_PAGE_FAULT 0x00000043
+
+#define REG_A2XX_MH_MMU_TRAN_ERROR 0x00000044
+
+#define REG_A2XX_MH_MMU_INVALIDATE 0x00000045
+
+#define REG_A2XX_MH_MMU_MPU_BASE 0x00000046
+
+#define REG_A2XX_MH_MMU_MPU_END 0x00000047
+
+#define REG_A2XX_NQWAIT_UNTIL 0x00000394
+
#define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000395
#define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000397
@@ -276,20 +369,6 @@ enum sq_tex_filter {
#define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447
-#define REG_A2XX_CP_ST_BASE 0x0000044d
-
-#define REG_A2XX_CP_ST_BUFSZ 0x0000044e
-
-#define REG_A2XX_CP_IB1_BASE 0x00000458
-
-#define REG_A2XX_CP_IB1_BUFSZ 0x00000459
-
-#define REG_A2XX_CP_IB2_BASE 0x0000045a
-
-#define REG_A2XX_CP_IB2_BUFSZ 0x0000045b
-
-#define REG_A2XX_CP_STAT 0x0000047f
-
#define REG_A2XX_RBBM_STATUS 0x000005d0
#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f
#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0
@@ -808,6 +887,12 @@ static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
#define REG_A2XX_SQ_VS_PROGRAM 0x000021f7
+#define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9
+
+#define REG_A2XX_VGT_DRAW_INITIATOR 0x000021fc
+
+#define REG_A2XX_VGT_IMMED_DATA 0x000021fd
+
#define REG_A2XX_RB_DEPTHCONTROL 0x00002200
#define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001
#define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index d4afdf657559..a7be56163d23 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -292,6 +293,8 @@ enum a3xx_tex_type {
#define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
#define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000
+#define REG_A3XX_RBBM_NQWAIT_UNTIL 0x00000040
+
#define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033
#define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050
@@ -304,6 +307,8 @@ enum a3xx_tex_type {
#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a
+#define REG_A3XX_RBBM_INT_SET_CMD 0x00000060
+
#define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061
#define REG_A3XX_RBBM_INT_0_MASK 0x00000063
@@ -937,13 +942,13 @@ static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val)
return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK;
}
-#define REG_A3XX_UNKNOWN_20E8 0x000020e8
+#define REG_A3XX_RB_CLEAR_COLOR_DW0 0x000020e8
-#define REG_A3XX_UNKNOWN_20E9 0x000020e9
+#define REG_A3XX_RB_CLEAR_COLOR_DW1 0x000020e9
-#define REG_A3XX_UNKNOWN_20EA 0x000020ea
+#define REG_A3XX_RB_CLEAR_COLOR_DW2 0x000020ea
-#define REG_A3XX_UNKNOWN_20EB 0x000020eb
+#define REG_A3XX_RB_CLEAR_COLOR_DW3 0x000020eb
#define REG_A3XX_RB_COPY_CONTROL 0x000020ec
#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
@@ -1026,7 +1031,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
-#define REG_A3XX_UNKNOWN_2101 0x00002101
+#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
#define REG_A3XX_RB_DEPTH_INFO 0x00002102
#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
@@ -1103,11 +1108,11 @@ static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v
return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
}
-#define REG_A3XX_UNKNOWN_2105 0x00002105
+#define REG_A3XX_RB_STENCIL_CLEAR 0x00002105
-#define REG_A3XX_UNKNOWN_2106 0x00002106
+#define REG_A3XX_RB_STENCIL_BUF_INFO 0x00002106
-#define REG_A3XX_UNKNOWN_2107 0x00002107
+#define REG_A3XX_RB_STENCIL_BUF_PITCH 0x00002107
#define REG_A3XX_RB_STENCILREFMASK 0x00002108
#define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
@@ -1149,20 +1154,31 @@ static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
}
-#define REG_A3XX_PA_SC_WINDOW_OFFSET 0x0000210e
-#define A3XX_PA_SC_WINDOW_OFFSET_X__MASK 0x0000ffff
-#define A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_X(uint32_t val)
+#define REG_A3XX_RB_LRZ_VSC_CONTROL 0x0000210c
+#define A3XX_RB_LRZ_VSC_CONTROL_BINNING_ENABLE 0x00000002
+
+#define REG_A3XX_RB_WINDOW_OFFSET 0x0000210e
+#define A3XX_RB_WINDOW_OFFSET_X__MASK 0x0000ffff
+#define A3XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A3XX_RB_WINDOW_OFFSET_X(uint32_t val)
{
- return ((val) << A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_X__MASK;
+ return ((val) << A3XX_RB_WINDOW_OFFSET_X__SHIFT) & A3XX_RB_WINDOW_OFFSET_X__MASK;
}
-#define A3XX_PA_SC_WINDOW_OFFSET_Y__MASK 0xffff0000
-#define A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_Y(uint32_t val)
+#define A3XX_RB_WINDOW_OFFSET_Y__MASK 0xffff0000
+#define A3XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val)
{
- return ((val) << A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_Y__MASK;
+ return ((val) << A3XX_RB_WINDOW_OFFSET_Y__SHIFT) & A3XX_RB_WINDOW_OFFSET_Y__MASK;
}
+#define REG_A3XX_RB_SAMPLE_COUNT_CONTROL 0x00002110
+
+#define REG_A3XX_RB_SAMPLE_COUNT_ADDR 0x00002111
+
+#define REG_A3XX_RB_Z_CLAMP_MIN 0x00002114
+
+#define REG_A3XX_RB_Z_CLAMP_MAX 0x00002115
+
#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4
#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea
@@ -1309,6 +1325,8 @@ static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215
+#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x00002216
+
#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217
#define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a
@@ -1491,12 +1509,13 @@ static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0
#define REG_A3XX_SP_SP_CTRL_REG 0x000022c0
#define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000
-#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x000c0000
+#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x00040000
#define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18
static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val)
{
return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK;
}
+#define A3XX_SP_SP_CTRL_REG_BINNING 0x00080000
#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000
#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20
static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
@@ -1669,7 +1688,7 @@ static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5
-#define REG_A3XX_SP_VS_PVT_MEM_CTRL_REG 0x000022d6
+#define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG 0x000022d6
#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7
@@ -1772,7 +1791,7 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3
-#define REG_A3XX_SP_FS_PVT_MEM_CTRL_REG 0x000022e4
+#define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG 0x000022e4
#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5
@@ -1943,6 +1962,9 @@ static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00
static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
+#define REG_A3XX_VSC_BIN_CONTROL 0x00000c3c
+#define A3XX_VSC_BIN_CONTROL_BINNING_ENABLE 0x00000001
+
#define REG_A3XX_UNKNOWN_0C3D 0x00000c3d
#define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48
@@ -1953,7 +1975,7 @@ static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x000
#define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b
-#define REG_A3XX_UNKNOWN_0C81 0x00000c81
+#define REG_A3XX_GRAS_TSE_DEBUG_ECO 0x00000c81
#define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88
@@ -1975,22 +1997,24 @@ static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x000
#define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0
+#define REG_A3XX_RB_DEBUG_ECO_CONTROLS_ADDR 0x00000cc1
+
#define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6
#define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7
-#define REG_A3XX_RB_WINDOW_SIZE 0x00000ce0
-#define A3XX_RB_WINDOW_SIZE_WIDTH__MASK 0x00003fff
-#define A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT 0
-static inline uint32_t A3XX_RB_WINDOW_SIZE_WIDTH(uint32_t val)
+#define REG_A3XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0
+static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val)
{
- return ((val) << A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT) & A3XX_RB_WINDOW_SIZE_WIDTH__MASK;
+ return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK;
}
-#define A3XX_RB_WINDOW_SIZE_HEIGHT__MASK 0x0fffc000
-#define A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT 14
-static inline uint32_t A3XX_RB_WINDOW_SIZE_HEIGHT(uint32_t val)
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x0fffc000
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 14
+static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val)
{
- return ((val) << A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT) & A3XX_RB_WINDOW_SIZE_HEIGHT__MASK;
+ return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK;
}
#define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00
@@ -2088,6 +2112,14 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op
#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09
+#define REG_A3XX_VGT_CL_INITIATOR 0x000021f0
+
+#define REG_A3XX_VGT_EVENT_INITIATOR 0x000021f9
+
+#define REG_A3XX_VGT_DRAW_INITIATOR 0x000021fc
+
+#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
+
#define REG_A3XX_TEX_SAMP_0 0x00000000
#define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002
#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
@@ -2123,6 +2155,18 @@ static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
#define REG_A3XX_TEX_SAMP_1 0x00000001
+#define A3XX_TEX_SAMP_1_MAX_LOD__MASK 0x003ff000
+#define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT 12
+static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A3XX_TEX_SAMP_1_MIN_LOD__MASK 0xffc00000
+#define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT 22
+static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
#define REG_A3XX_TEX_CONST_0 0x00000000
#define A3XX_TEX_CONST_0_TILED 0x00000001
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 035bd13dc8bd..461df93e825e 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -15,6 +15,10 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#ifdef CONFIG_MSM_OCMEM
+# include <mach/ocmem.h>
+#endif
+
#include "a3xx_gpu.h"
#define A3XX_INT0_MASK \
@@ -63,6 +67,7 @@ static void a3xx_me_init(struct msm_gpu *gpu)
static int a3xx_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
uint32_t *ptr, len;
int i, ret;
@@ -105,6 +110,21 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+ } else if (adreno_is_a330v2(adreno_gpu)) {
+ /*
+ * Most of the VBIF registers on 8974v2 have the correct
+ * values at power on, so we won't modify those if we don't
+ * need to
+ */
+ /* Enable 1k sort: */
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
+ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
+
} else if (adreno_is_a330(adreno_gpu)) {
/* Set up 16 deep read/write request queues: */
gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
@@ -121,10 +141,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
/* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
/* Set up AOOO: */
- gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000ffff);
- gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0xffffffff);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003f);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003f003f);
/* Enable 1K sort: */
- gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001ffff);
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
/* Disable VBIF clock gating. This is to enable AXI running
* higher frequency than GPU:
@@ -162,14 +182,23 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
/* Enable Clock gating: */
- gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
-
- /* Set the OCMEM base address for A330 */
-//TODO:
-// if (adreno_is_a330(adreno_gpu)) {
-// gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
-// (unsigned int)(a3xx_gpu->ocmem_base >> 14));
-// }
+ if (adreno_is_a320(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
+ else if (adreno_is_a330v2(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
+ else if (adreno_is_a330(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbffcffff);
+
+ if (adreno_is_a330v2(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x05515455);
+ else if (adreno_is_a330(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000);
+
+ /* Set the OCMEM base address for A330, etc */
+ if (a3xx_gpu->ocmem_hdl) {
+ gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
+ (unsigned int)(a3xx_gpu->ocmem_base >> 14));
+ }
/* Turn on performance counters: */
gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
@@ -219,7 +248,7 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
/* Load PM4: */
ptr = (uint32_t *)(adreno_gpu->pm4->data);
len = adreno_gpu->pm4->size / 4;
- DBG("loading PM4 ucode version: %u", ptr[0]);
+ DBG("loading PM4 ucode version: %x", ptr[1]);
gpu_write(gpu, REG_AXXX_CP_DEBUG,
AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
@@ -231,19 +260,26 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
/* Load PFP: */
ptr = (uint32_t *)(adreno_gpu->pfp->data);
len = adreno_gpu->pfp->size / 4;
- DBG("loading PFP ucode version: %u", ptr[0]);
+ DBG("loading PFP ucode version: %x", ptr[5]);
gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
for (i = 1; i < len; i++)
gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
/* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
- if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu))
+ if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu)) {
gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
-
+ } else if (adreno_is_a330(adreno_gpu)) {
+ /* NOTE: this (value take from downstream android driver)
+ * includes some bits outside of the known bitfields. But
+ * A330 has this "MERCIU queue" thing too, which might
+ * explain a new bitfield or reshuffling:
+ */
+ gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x003e2008);
+ }
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
@@ -253,6 +289,14 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
return 0;
}
+static void a3xx_recover(struct msm_gpu *gpu)
+{
+ gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1);
+ gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD);
+ gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0);
+ adreno_recover(gpu);
+}
+
static void a3xx_destroy(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -261,6 +305,12 @@ static void a3xx_destroy(struct msm_gpu *gpu)
DBG("%s", gpu->name);
adreno_gpu_cleanup(adreno_gpu);
+
+#ifdef CONFIG_MSM_OCMEM
+ if (a3xx_gpu->ocmem_base)
+ ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl);
+#endif
+
put_device(&a3xx_gpu->pdev->dev);
kfree(a3xx_gpu);
}
@@ -371,7 +421,7 @@ static const struct adreno_gpu_funcs funcs = {
.hw_init = a3xx_hw_init,
.pm_suspend = msm_gpu_pm_suspend,
.pm_resume = msm_gpu_pm_resume,
- .recover = adreno_recover,
+ .recover = a3xx_recover,
.last_fence = adreno_last_fence,
.submit = adreno_submit,
.flush = adreno_flush,
@@ -387,6 +437,7 @@ static const struct adreno_gpu_funcs funcs = {
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
{
struct a3xx_gpu *a3xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
struct platform_device *pdev = a3xx_pdev;
struct adreno_platform_config *config;
@@ -406,7 +457,8 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
goto fail;
}
- gpu = &a3xx_gpu->base.base;
+ adreno_gpu = &a3xx_gpu->base;
+ gpu = &adreno_gpu->base;
get_device(&pdev->dev);
a3xx_gpu->pdev = pdev;
@@ -414,16 +466,46 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
gpu->fast_rate = config->fast_rate;
gpu->slow_rate = config->slow_rate;
gpu->bus_freq = config->bus_freq;
+#ifdef CONFIG_MSM_BUS_SCALING
+ gpu->bus_scale_table = config->bus_scale_table;
+#endif
DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
- ret = adreno_gpu_init(dev, pdev, &a3xx_gpu->base,
- &funcs, config->rev);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, config->rev);
if (ret)
goto fail;
- return &a3xx_gpu->base.base;
+ /* if needed, allocate gmem: */
+ if (adreno_is_a330(adreno_gpu)) {
+#ifdef CONFIG_MSM_OCMEM
+ /* TODO this is different/missing upstream: */
+ struct ocmem_buf *ocmem_hdl =
+ ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
+
+ a3xx_gpu->ocmem_hdl = ocmem_hdl;
+ a3xx_gpu->ocmem_base = ocmem_hdl->addr;
+ adreno_gpu->gmem = ocmem_hdl->len;
+ DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
+ a3xx_gpu->ocmem_base);
+#endif
+ }
+
+ if (!gpu->mmu) {
+ /* TODO we think it is possible to configure the GPU to
+ * restrict access to VRAM carveout. But the required
+ * registers are unknown. For now just bail out and
+ * limp along with just modesetting. If it turns out
+ * to not be possible to restrict access, then we must
+ * implement a cmdstream validator.
+ */
+ dev_err(dev->dev, "No memory protection without IOMMU\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ return gpu;
fail:
if (a3xx_gpu)
@@ -436,19 +518,59 @@ fail:
* The a3xx device:
*/
+#if defined(CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF)
+# include <mach/kgsl.h>
+#endif
+
static int a3xx_probe(struct platform_device *pdev)
{
static struct adreno_platform_config config = {};
#ifdef CONFIG_OF
- /* TODO */
+ struct device_node *child, *node = pdev->dev.of_node;
+ u32 val;
+ int ret;
+
+ ret = of_property_read_u32(node, "qcom,chipid", &val);
+ if (ret) {
+ dev_err(&pdev->dev, "could not find chipid: %d\n", ret);
+ return ret;
+ }
+
+ config.rev = ADRENO_REV((val >> 24) & 0xff,
+ (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
+
+ /* find clock rates: */
+ config.fast_rate = 0;
+ config.slow_rate = ~0;
+ for_each_child_of_node(node, child) {
+ if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
+ struct device_node *pwrlvl;
+ for_each_child_of_node(child, pwrlvl) {
+ ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
+ if (ret) {
+ dev_err(&pdev->dev, "could not find gpu-freq: %d\n", ret);
+ return ret;
+ }
+ config.fast_rate = max(config.fast_rate, val);
+ config.slow_rate = min(config.slow_rate, val);
+ }
+ }
+ }
+
+ if (!config.fast_rate) {
+ dev_err(&pdev->dev, "could not find clk rates\n");
+ return -ENXIO;
+ }
+
#else
+ struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
uint32_t version = socinfo_get_version();
if (cpu_is_apq8064ab()) {
config.fast_rate = 450000000;
config.slow_rate = 27000000;
config.bus_freq = 4;
config.rev = ADRENO_REV(3, 2, 1, 0);
- } else if (cpu_is_apq8064() || cpu_is_msm8960ab()) {
+ } else if (cpu_is_apq8064()) {
config.fast_rate = 400000000;
config.slow_rate = 27000000;
config.bus_freq = 4;
@@ -461,6 +583,16 @@ static int a3xx_probe(struct platform_device *pdev)
else
config.rev = ADRENO_REV(3, 2, 0, 0);
+ } else if (cpu_is_msm8960ab()) {
+ config.fast_rate = 400000000;
+ config.slow_rate = 320000000;
+ config.bus_freq = 4;
+
+ if (SOCINFO_VERSION_MINOR(version) == 0)
+ config.rev = ADRENO_REV(3, 2, 1, 0);
+ else
+ config.rev = ADRENO_REV(3, 2, 1, 1);
+
} else if (cpu_is_msm8930()) {
config.fast_rate = 400000000;
config.slow_rate = 27000000;
@@ -473,6 +605,9 @@ static int a3xx_probe(struct platform_device *pdev)
config.rev = ADRENO_REV(3, 0, 5, 0);
}
+# ifdef CONFIG_MSM_BUS_SCALING
+ config.bus_scale_table = pdata->bus_scale_table;
+# endif
#endif
pdev->dev.platform_data = &config;
a3xx_pdev = pdev;
@@ -485,10 +620,19 @@ static int a3xx_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,kgsl-3d0" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
static struct platform_driver a3xx_driver = {
.probe = a3xx_probe,
.remove = a3xx_remove,
- .driver.name = "kgsl-3d0",
+ .driver = {
+ .name = "kgsl-3d0",
+ .of_match_table = dt_match,
+ },
};
void __init a3xx_register(void)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
index 32c398c2d00a..bb9a8ca0507b 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -24,6 +24,10 @@
struct a3xx_gpu {
struct adreno_gpu base;
struct platform_device *pdev;
+
+ /* if OCMEM is used for GMEM: */
+ uint32_t ocmem_base;
+ void *ocmem_hdl;
};
#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 33dcc606c7c5..d6e6ce2d1abd 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -115,96 +116,6 @@ enum adreno_rb_depth_format {
DEPTHX_24_8 = 1,
};
-enum adreno_mmu_clnt_beh {
- BEH_NEVR = 0,
- BEH_TRAN_RNG = 1,
- BEH_TRAN_FLT = 2,
-};
-
-#define REG_AXXX_MH_MMU_CONFIG 0x00000040
-#define AXXX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001
-#define AXXX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002
-#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030
-#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4
-static inline uint32_t AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0
-#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300
-#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00
-#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000
-#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000
-#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000
-#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000
-#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18
-static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000
-#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20
-static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000
-#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22
-static inline uint32_t AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000
-#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24
-static inline uint32_t AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
-}
-
-#define REG_AXXX_MH_MMU_VA_RANGE 0x00000041
-
-#define REG_AXXX_MH_MMU_PT_BASE 0x00000042
-
-#define REG_AXXX_MH_MMU_PAGE_FAULT 0x00000043
-
-#define REG_AXXX_MH_MMU_TRAN_ERROR 0x00000044
-
-#define REG_AXXX_MH_MMU_INVALIDATE 0x00000045
-
-#define REG_AXXX_MH_MMU_MPU_BASE 0x00000046
-
-#define REG_AXXX_MH_MMU_MPU_END 0x00000047
-
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
@@ -275,6 +186,18 @@ static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val)
}
#define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6
+#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK 0x001f0000
+#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT 16
+static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_MEQ_END(uint32_t val)
+{
+ return ((val) << AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK;
+}
+#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK 0x1f000000
+#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT 24
+static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_ROQ_END(uint32_t val)
+{
+ return ((val) << AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK;
+}
#define REG_AXXX_CP_CSQ_AVAIL 0x000001d7
#define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f
@@ -402,6 +325,36 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK;
}
+#define REG_AXXX_CP_NON_PREFETCH_CNTRS 0x00000440
+
+#define REG_AXXX_CP_STQ_ST_STAT 0x00000443
+
+#define REG_AXXX_CP_ST_BASE 0x0000044d
+
+#define REG_AXXX_CP_ST_BUFSZ 0x0000044e
+
+#define REG_AXXX_CP_MEQ_STAT 0x0000044f
+
+#define REG_AXXX_CP_MIU_TAG_STAT 0x00000452
+
+#define REG_AXXX_CP_BIN_MASK_LO 0x00000454
+
+#define REG_AXXX_CP_BIN_MASK_HI 0x00000455
+
+#define REG_AXXX_CP_BIN_SELECT_LO 0x00000456
+
+#define REG_AXXX_CP_BIN_SELECT_HI 0x00000457
+
+#define REG_AXXX_CP_IB1_BASE 0x00000458
+
+#define REG_AXXX_CP_IB1_BUFSZ 0x00000459
+
+#define REG_AXXX_CP_IB2_BASE 0x0000045a
+
+#define REG_AXXX_CP_IB2_BUFSZ 0x0000045b
+
+#define REG_AXXX_CP_STAT 0x0000047f
+
#define REG_AXXX_CP_SCRATCH_REG0 0x00000578
#define REG_AXXX_CP_SCRATCH_REG1 0x00000579
@@ -418,6 +371,26 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
#define REG_AXXX_CP_SCRATCH_REG7 0x0000057f
+#define REG_AXXX_CP_ME_VS_EVENT_SRC 0x00000600
+
+#define REG_AXXX_CP_ME_VS_EVENT_ADDR 0x00000601
+
+#define REG_AXXX_CP_ME_VS_EVENT_DATA 0x00000602
+
+#define REG_AXXX_CP_ME_VS_EVENT_ADDR_SWM 0x00000603
+
+#define REG_AXXX_CP_ME_VS_EVENT_DATA_SWM 0x00000604
+
+#define REG_AXXX_CP_ME_PS_EVENT_SRC 0x00000605
+
+#define REG_AXXX_CP_ME_PS_EVENT_ADDR 0x00000606
+
+#define REG_AXXX_CP_ME_PS_EVENT_DATA 0x00000607
+
+#define REG_AXXX_CP_ME_PS_EVENT_ADDR_SWM 0x00000608
+
+#define REG_AXXX_CP_ME_PS_EVENT_DATA_SWM 0x00000609
+
#define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a
#define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b
@@ -428,5 +401,11 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
#define REG_AXXX_CP_ME_NRT_DATA 0x0000060e
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_SRC 0x00000612
+
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_ADDR 0x00000613
+
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_DATA 0x00000614
+
#endif /* ADRENO_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index a0b9d8a95b16..d321099abdd4 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -17,6 +17,7 @@
#include "adreno_gpu.h"
#include "msm_gem.h"
+#include "msm_mmu.h"
struct adreno_info {
struct adreno_rev rev;
@@ -44,7 +45,7 @@ static const struct adreno_info gpulist[] = {
.pfpfw = "a300_pfp.fw",
.gmem = SZ_512K,
}, {
- .rev = ADRENO_REV(3, 3, 0, 0),
+ .rev = ADRENO_REV(3, 3, 0, ANY_ID),
.revn = 330,
.name = "A330",
.pm4fw = "a330_pm4.fw",
@@ -53,6 +54,11 @@ static const struct adreno_info gpulist[] = {
},
};
+MODULE_FIRMWARE("a300_pm4.fw");
+MODULE_FIRMWARE("a300_pfp.fw");
+MODULE_FIRMWARE("a330_pm4.fw");
+MODULE_FIRMWARE("a330_pfp.fw");
+
#define RB_SIZE SZ_32K
#define RB_BLKSIZE 16
@@ -65,7 +71,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
*value = adreno_gpu->info->revn;
return 0;
case MSM_PARAM_GMEM_SIZE:
- *value = adreno_gpu->info->gmem;
+ *value = adreno_gpu->gmem;
return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
@@ -86,7 +92,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
/* size is log2(quad-words): */
AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
- AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE));
+ AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
/* Setup ringbuffer address: */
gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
@@ -286,6 +292,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
struct adreno_rev rev)
{
+ struct msm_mmu *mmu;
int i, ret;
/* identify gpu: */
@@ -311,6 +318,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
rev.core, rev.major, rev.minor, rev.patchid);
gpu->funcs = funcs;
+ gpu->gmem = gpu->info->gmem;
gpu->rev = rev;
ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev);
@@ -333,10 +341,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (ret)
return ret;
- ret = msm_iommu_attach(drm, gpu->base.iommu,
- iommu_ports, ARRAY_SIZE(iommu_ports));
- if (ret)
- return ret;
+ mmu = gpu->base.mmu;
+ if (mmu) {
+ ret = mmu->funcs->attach(mmu, iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret)
+ return ret;
+ }
gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
MSM_BO_UNCACHED);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index f73abfba7c22..ca11ea4da165 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -51,6 +51,7 @@ struct adreno_gpu {
struct msm_gpu base;
struct adreno_rev rev;
const struct adreno_info *info;
+ uint32_t gmem; /* actual gmem size */
uint32_t revn; /* numeric revision name */
const struct adreno_gpu_funcs *funcs;
@@ -70,6 +71,9 @@ struct adreno_gpu {
struct adreno_platform_config {
struct adreno_rev rev;
uint32_t fast_rate, slow_rate, bus_freq;
+#ifdef CONFIG_MSM_BUS_SCALING
+ struct msm_bus_scale_pdata *bus_scale_table;
+#endif
};
#define ADRENO_IDLE_TIMEOUT (20 * 1000)
@@ -94,6 +98,11 @@ static inline bool adreno_is_a330(struct adreno_gpu *gpu)
return gpu->revn == 330;
}
+static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
+{
+ return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
int adreno_hw_init(struct msm_gpu *gpu);
uint32_t adreno_last_fence(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 259ad709b0cc..ae992c71703f 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -66,13 +67,15 @@ enum vgt_event_type {
enum pc_di_primtype {
DI_PT_NONE = 0,
- DI_PT_POINTLIST = 1,
+ DI_PT_POINTLIST_A2XX = 1,
DI_PT_LINELIST = 2,
DI_PT_LINESTRIP = 3,
DI_PT_TRILIST = 4,
DI_PT_TRIFAN = 5,
DI_PT_TRISTRIP = 6,
+ DI_PT_LINELOOP = 7,
DI_PT_RECTLIST = 8,
+ DI_PT_POINTLIST_A3XX = 9,
DI_PT_QUADLIST = 13,
DI_PT_QUADSTRIP = 14,
DI_PT_POLYGON = 15,
@@ -119,7 +122,7 @@ enum adreno_pm4_type3_packets {
CP_WAIT_FOR_IDLE = 38,
CP_WAIT_REG_MEM = 60,
CP_WAIT_REG_EQ = 82,
- CP_WAT_REG_GTE = 83,
+ CP_WAIT_REG_GTE = 83,
CP_WAIT_UNTIL_READ = 92,
CP_WAIT_IB_PFD_COMPLETE = 93,
CP_REG_RMW = 33,
@@ -151,7 +154,6 @@ enum adreno_pm4_type3_packets {
CP_CONTEXT_UPDATE = 94,
CP_INTERRUPT = 64,
CP_IM_STORE = 44,
- CP_SET_BIN_BASE_OFFSET = 75,
CP_SET_DRAW_INIT_FLAGS = 75,
CP_SET_PROTECTED_MODE = 95,
CP_LOAD_STATE = 48,
@@ -159,6 +161,16 @@ enum adreno_pm4_type3_packets {
CP_COND_INDIRECT_BUFFER_PFD = 50,
CP_INDIRECT_BUFFER_PFE = 63,
CP_SET_BIN = 76,
+ CP_TEST_TWO_MEMS = 113,
+ CP_WAIT_FOR_ME = 19,
+ IN_IB_PREFETCH_END = 23,
+ IN_SUBBLK_PREFETCH = 31,
+ IN_INSTR_PREFETCH = 32,
+ IN_INSTR_MATCH = 71,
+ IN_CONST_PREFETCH = 73,
+ IN_INCR_UPDT_STATE = 85,
+ IN_INCR_UPDT_CONST = 86,
+ IN_INCR_UPDT_INSTR = 87,
};
enum adreno_state_block {
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 6d4c62bf70dc..87be647e3825 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index d1df38bf5747..747a6ef4211f 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 0030a111302d..48e03acf19bf 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 50d11df35b21..6f1588aa9071 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -41,7 +41,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
power_on ? "Enable" : "Disable", ctrl);
}
-static irqreturn_t hdmi_irq(int irq, void *dev_id)
+irqreturn_t hdmi_irq(int irq, void *dev_id)
{
struct hdmi *hdmi = dev_id;
@@ -71,13 +71,13 @@ void hdmi_destroy(struct kref *kref)
}
/* initialize connector */
-int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
+struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
{
struct hdmi *hdmi = NULL;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = hdmi_pdev;
struct hdmi_platform_config *config;
- int ret;
+ int i, ret;
if (!pdev) {
dev_err(dev->dev, "no hdmi device\n");
@@ -99,6 +99,7 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
hdmi->dev = dev;
hdmi->pdev = pdev;
+ hdmi->config = config;
hdmi->encoder = encoder;
/* not sure about which phy maps to which msm.. probably I miss some */
@@ -114,44 +115,70 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
goto fail;
}
- hdmi->mmio = msm_ioremap(pdev, "hdmi_msm_hdmi_addr", "HDMI");
+ hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI");
if (IS_ERR(hdmi->mmio)) {
ret = PTR_ERR(hdmi->mmio);
goto fail;
}
- hdmi->mvs = devm_regulator_get(&pdev->dev, "8901_hdmi_mvs");
- if (IS_ERR(hdmi->mvs))
- hdmi->mvs = devm_regulator_get(&pdev->dev, "hdmi_mvs");
- if (IS_ERR(hdmi->mvs)) {
- ret = PTR_ERR(hdmi->mvs);
- dev_err(dev->dev, "failed to get mvs regulator: %d\n", ret);
- goto fail;
+ BUG_ON(config->hpd_reg_cnt > ARRAY_SIZE(hdmi->hpd_regs));
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ struct regulator *reg;
+
+ reg = devm_regulator_get(&pdev->dev, config->hpd_reg_names[i]);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ goto fail;
+ }
+
+ hdmi->hpd_regs[i] = reg;
}
- hdmi->mpp0 = devm_regulator_get(&pdev->dev, "8901_mpp0");
- if (IS_ERR(hdmi->mpp0))
- hdmi->mpp0 = NULL;
+ BUG_ON(config->pwr_reg_cnt > ARRAY_SIZE(hdmi->pwr_regs));
+ for (i = 0; i < config->pwr_reg_cnt; i++) {
+ struct regulator *reg;
- hdmi->clk = devm_clk_get(&pdev->dev, "core_clk");
- if (IS_ERR(hdmi->clk)) {
- ret = PTR_ERR(hdmi->clk);
- dev_err(dev->dev, "failed to get 'clk': %d\n", ret);
- goto fail;
+ reg = devm_regulator_get(&pdev->dev, config->pwr_reg_names[i]);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n",
+ config->pwr_reg_names[i], ret);
+ goto fail;
+ }
+
+ hdmi->pwr_regs[i] = reg;
}
- hdmi->m_pclk = devm_clk_get(&pdev->dev, "master_iface_clk");
- if (IS_ERR(hdmi->m_pclk)) {
- ret = PTR_ERR(hdmi->m_pclk);
- dev_err(dev->dev, "failed to get 'm_pclk': %d\n", ret);
- goto fail;
+ BUG_ON(config->hpd_clk_cnt > ARRAY_SIZE(hdmi->hpd_clks));
+ for (i = 0; i < config->hpd_clk_cnt; i++) {
+ struct clk *clk;
+
+ clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev->dev, "failed to get hpd clk: %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ goto fail;
+ }
+
+ hdmi->hpd_clks[i] = clk;
}
- hdmi->s_pclk = devm_clk_get(&pdev->dev, "slave_iface_clk");
- if (IS_ERR(hdmi->s_pclk)) {
- ret = PTR_ERR(hdmi->s_pclk);
- dev_err(dev->dev, "failed to get 's_pclk': %d\n", ret);
- goto fail;
+ BUG_ON(config->pwr_clk_cnt > ARRAY_SIZE(hdmi->pwr_clks));
+ for (i = 0; i < config->pwr_clk_cnt; i++) {
+ struct clk *clk;
+
+ clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev->dev, "failed to get pwr clk: %s (%d)\n",
+ config->pwr_clk_names[i], ret);
+ goto fail;
+ }
+
+ hdmi->pwr_clks[i] = clk;
}
hdmi->i2c = hdmi_i2c_init(hdmi);
@@ -178,20 +205,22 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
goto fail;
}
- hdmi->irq = platform_get_irq(pdev, 0);
- if (hdmi->irq < 0) {
- ret = hdmi->irq;
- dev_err(dev->dev, "failed to get irq: %d\n", ret);
- goto fail;
- }
+ if (!config->shared_irq) {
+ hdmi->irq = platform_get_irq(pdev, 0);
+ if (hdmi->irq < 0) {
+ ret = hdmi->irq;
+ dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ goto fail;
+ }
- ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
- NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "hdmi_isr", hdmi);
- if (ret < 0) {
- dev_err(dev->dev, "failed to request IRQ%u: %d\n",
- hdmi->irq, ret);
- goto fail;
+ ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
+ NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "hdmi_isr", hdmi);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+ hdmi->irq, ret);
+ goto fail;
+ }
}
encoder->bridge = hdmi->bridge;
@@ -199,7 +228,7 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
priv->bridges[priv->num_bridges++] = hdmi->bridge;
priv->connectors[priv->num_connectors++] = hdmi->connector;
- return 0;
+ return hdmi;
fail:
if (hdmi) {
@@ -211,37 +240,100 @@ fail:
hdmi_destroy(&hdmi->refcount);
}
- return ret;
+ return ERR_PTR(ret);
}
/*
* The hdmi device:
*/
+#include <linux/of_gpio.h>
+
static int hdmi_dev_probe(struct platform_device *pdev)
{
static struct hdmi_platform_config config = {};
#ifdef CONFIG_OF
- /* TODO */
+ struct device_node *of_node = pdev->dev.of_node;
+
+ int get_gpio(const char *name)
+ {
+ int gpio = of_get_named_gpio(of_node, name, 0);
+ if (gpio < 0) {
+ dev_err(&pdev->dev, "failed to get gpio: %s (%d)\n",
+ name, gpio);
+ gpio = -1;
+ }
+ return gpio;
+ }
+
+ /* TODO actually use DT.. */
+ static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
+ static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
+ static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
+ static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
+
+ config.phy_init = hdmi_phy_8x74_init;
+ config.mmio_name = "core_physical";
+ config.hpd_reg_names = hpd_reg_names;
+ config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
+ config.pwr_reg_names = pwr_reg_names;
+ config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
+ config.hpd_clk_names = hpd_clk_names;
+ config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
+ config.pwr_clk_names = pwr_clk_names;
+ config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
+ config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk");
+ config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data");
+ config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd");
+ config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en");
+ config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel");
+ config.shared_irq = true;
+
#else
+ static const char *hpd_clk_names[] = {
+ "core_clk", "master_iface_clk", "slave_iface_clk",
+ };
if (cpu_is_apq8064()) {
+ static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
config.phy_init = hdmi_phy_8960_init;
+ config.mmio_name = "hdmi_msm_hdmi_addr";
+ config.hpd_reg_names = hpd_reg_names;
+ config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
+ config.hpd_clk_names = hpd_clk_names;
+ config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.ddc_clk_gpio = 70;
config.ddc_data_gpio = 71;
config.hpd_gpio = 72;
- config.pmic_gpio = 13 + NR_GPIO_IRQS;
- } else if (cpu_is_msm8960()) {
+ config.mux_en_gpio = -1;
+ config.mux_sel_gpio = 13 + NR_GPIO_IRQS;
+ } else if (cpu_is_msm8960() || cpu_is_msm8960ab()) {
+ static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
config.phy_init = hdmi_phy_8960_init;
+ config.mmio_name = "hdmi_msm_hdmi_addr";
+ config.hpd_reg_names = hpd_reg_names;
+ config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
+ config.hpd_clk_names = hpd_clk_names;
+ config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.ddc_clk_gpio = 100;
config.ddc_data_gpio = 101;
config.hpd_gpio = 102;
- config.pmic_gpio = -1;
+ config.mux_en_gpio = -1;
+ config.mux_sel_gpio = -1;
} else if (cpu_is_msm8x60()) {
+ static const char *hpd_reg_names[] = {
+ "8901_hdmi_mvs", "8901_mpp0"
+ };
config.phy_init = hdmi_phy_8x60_init;
+ config.mmio_name = "hdmi_msm_hdmi_addr";
+ config.hpd_reg_names = hpd_reg_names;
+ config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
+ config.hpd_clk_names = hpd_clk_names;
+ config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.ddc_clk_gpio = 170;
config.ddc_data_gpio = 171;
config.hpd_gpio = 172;
- config.pmic_gpio = -1;
+ config.mux_en_gpio = -1;
+ config.mux_sel_gpio = -1;
}
#endif
pdev->dev.platform_data = &config;
@@ -255,10 +347,19 @@ static int hdmi_dev_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,hdmi-tx" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
static struct platform_driver hdmi_driver = {
.probe = hdmi_dev_probe,
.remove = hdmi_dev_remove,
- .driver.name = "hdmi_msm",
+ .driver = {
+ .name = "hdmi_msm",
+ .of_match_table = dt_match,
+ },
};
void __init hdmi_register(void)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 2c2ec566394c..41b29add70b1 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -28,6 +28,7 @@
struct hdmi_phy;
+struct hdmi_platform_config;
struct hdmi {
struct kref refcount;
@@ -35,14 +36,14 @@ struct hdmi {
struct drm_device *dev;
struct platform_device *pdev;
- void __iomem *mmio;
+ const struct hdmi_platform_config *config;
- struct regulator *mvs; /* HDMI_5V */
- struct regulator *mpp0; /* External 5V */
+ void __iomem *mmio;
- struct clk *clk;
- struct clk *m_pclk;
- struct clk *s_pclk;
+ struct regulator *hpd_regs[2];
+ struct regulator *pwr_regs[2];
+ struct clk *hpd_clks[3];
+ struct clk *pwr_clks[2];
struct hdmi_phy *phy;
struct i2c_adapter *i2c;
@@ -60,7 +61,29 @@ struct hdmi {
/* platform config data (ie. from DT, or pdata) */
struct hdmi_platform_config {
struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
- int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, pmic_gpio;
+ const char *mmio_name;
+
+ /* regulators that need to be on for hpd: */
+ const char **hpd_reg_names;
+ int hpd_reg_cnt;
+
+ /* regulators that need to be on for screen pwr: */
+ const char **pwr_reg_names;
+ int pwr_reg_cnt;
+
+ /* clks that need to be on for hpd: */
+ const char **hpd_clk_names;
+ int hpd_clk_cnt;
+
+ /* clks that need to be on for screen pwr (ie pixel clk): */
+ const char **pwr_clk_names;
+ int pwr_clk_cnt;
+
+ /* gpio's: */
+ int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
+
+ /* older devices had their own irq, mdp5+ it is shared w/ mdp: */
+ bool shared_irq;
};
void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
@@ -106,6 +129,7 @@ struct hdmi_phy {
struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi);
struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi);
+struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi);
/*
* hdmi bridge:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 4e939f82918c..e2636582cfd7 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -212,6 +214,20 @@ static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state
#define REG_HDMI_HDCP_RESET 0x00000130
#define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001
+#define REG_HDMI_VENSPEC_INFO0 0x0000016c
+
+#define REG_HDMI_VENSPEC_INFO1 0x00000170
+
+#define REG_HDMI_VENSPEC_INFO2 0x00000174
+
+#define REG_HDMI_VENSPEC_INFO3 0x00000178
+
+#define REG_HDMI_VENSPEC_INFO4 0x0000017c
+
+#define REG_HDMI_VENSPEC_INFO5 0x00000180
+
+#define REG_HDMI_VENSPEC_INFO6 0x00000184
+
#define REG_HDMI_AUDIO_CFG 0x000001d0
#define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001
#define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0
@@ -235,6 +251,9 @@ static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val)
return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK;
}
+#define REG_HDMI_DDC_ARBITRATION 0x00000210
+#define HDMI_DDC_ARBITRATION_HW_ARBITRATION 0x00000010
+
#define REG_HDMI_DDC_INT_CTRL 0x00000214
#define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001
#define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002
@@ -340,6 +359,20 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK;
}
+#define REG_HDMI_CEC_STATUS 0x00000298
+
+#define REG_HDMI_CEC_INT 0x0000029c
+
+#define REG_HDMI_CEC_ADDR 0x000002a0
+
+#define REG_HDMI_CEC_TIME 0x000002a4
+
+#define REG_HDMI_CEC_REFTIMER 0x000002a8
+
+#define REG_HDMI_CEC_RD_DATA 0x000002ac
+
+#define REG_HDMI_CEC_RD_FILTER 0x000002b0
+
#define REG_HDMI_ACTIVE_HSYNC 0x000002b4
#define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff
#define HDMI_ACTIVE_HSYNC_START__SHIFT 0
@@ -410,17 +443,33 @@ static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
#define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000
#define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000
+#define REG_HDMI_AUD_INT 0x000002cc
+#define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001
+#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002
+#define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004
+#define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008
+
#define REG_HDMI_PHY_CTRL 0x000002d4
#define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001
#define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002
#define HDMI_PHY_CTRL_SW_RESET 0x00000004
#define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008
-#define REG_HDMI_AUD_INT 0x000002cc
-#define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001
-#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002
-#define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004
-#define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008
+#define REG_HDMI_CEC_WR_RANGE 0x000002dc
+
+#define REG_HDMI_CEC_RD_RANGE 0x000002e0
+
+#define REG_HDMI_VERSION 0x000002e4
+
+#define REG_HDMI_CEC_COMPL_CTL 0x00000360
+
+#define REG_HDMI_CEC_RD_START_RANGE 0x00000364
+
+#define REG_HDMI_CEC_RD_TOTAL_RANGE 0x00000368
+
+#define REG_HDMI_CEC_RD_ERR_RESP_LO 0x0000036c
+
+#define REG_HDMI_CEC_WR_CHECK_CONFIG 0x00000370
#define REG_HDMI_8x60_PHY_REG0 0x00000300
#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c
@@ -504,5 +553,23 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
#define REG_HDMI_8960_PHY_REG12 0x00000430
+#define REG_HDMI_8x74_ANA_CFG0 0x00000000
+
+#define REG_HDMI_8x74_ANA_CFG1 0x00000004
+
+#define REG_HDMI_8x74_PD_CTRL0 0x00000010
+
+#define REG_HDMI_8x74_PD_CTRL1 0x00000014
+
+#define REG_HDMI_8x74_BIST_CFG0 0x00000034
+
+#define REG_HDMI_8x74_BIST_PATN0 0x0000003c
+
+#define REG_HDMI_8x74_BIST_PATN1 0x00000040
+
+#define REG_HDMI_8x74_BIST_PATN2 0x00000044
+
+#define REG_HDMI_8x74_BIST_PATN3 0x00000048
+
#endif /* HDMI_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 5a8ee3473cf5..7d10e55403c6 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -21,6 +21,7 @@ struct hdmi_bridge {
struct drm_bridge base;
struct hdmi *hdmi;
+ bool power_on;
unsigned long int pixclock;
};
@@ -34,6 +35,65 @@ static void hdmi_bridge_destroy(struct drm_bridge *bridge)
kfree(hdmi_bridge);
}
+static void power_on(struct drm_bridge *bridge)
+{
+ struct drm_device *dev = bridge->dev;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ int i, ret;
+
+ for (i = 0; i < config->pwr_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->pwr_regs[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
+ config->pwr_reg_names[i], ret);
+ }
+ }
+
+ if (config->pwr_clk_cnt > 0) {
+ DBG("pixclock: %lu", hdmi_bridge->pixclock);
+ ret = clk_set_rate(hdmi->pwr_clks[0], hdmi_bridge->pixclock);
+ if (ret) {
+ dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n",
+ config->pwr_clk_names[0], ret);
+ }
+ }
+
+ for (i = 0; i < config->pwr_clk_cnt; i++) {
+ ret = clk_prepare_enable(hdmi->pwr_clks[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable pwr clk: %s (%d)\n",
+ config->pwr_clk_names[i], ret);
+ }
+ }
+}
+
+static void power_off(struct drm_bridge *bridge)
+{
+ struct drm_device *dev = bridge->dev;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ int i, ret;
+
+ /* TODO do we need to wait for final vblank somewhere before
+ * cutting the clocks?
+ */
+ mdelay(16 + 4);
+
+ for (i = 0; i < config->pwr_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->pwr_clks[i]);
+
+ for (i = 0; i < config->pwr_reg_cnt; i++) {
+ ret = regulator_disable(hdmi->pwr_regs[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
+ config->pwr_reg_names[i], ret);
+ }
+ }
+}
+
static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
@@ -41,6 +101,12 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
struct hdmi_phy *phy = hdmi->phy;
DBG("power up");
+
+ if (!hdmi_bridge->power_on) {
+ power_on(bridge);
+ hdmi_bridge->power_on = true;
+ }
+
phy->funcs->powerup(phy, hdmi_bridge->pixclock);
hdmi_set_mode(hdmi, true);
}
@@ -62,6 +128,11 @@ static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
DBG("power down");
hdmi_set_mode(hdmi, false);
phy->funcs->powerdown(phy);
+
+ if (hdmi_bridge->power_on) {
+ power_off(bridge);
+ hdmi_bridge->power_on = false;
+ }
}
static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 823eee521a31..7dedfdd12075 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -17,19 +17,20 @@
#include <linux/gpio.h>
+#include "msm_kms.h"
#include "hdmi.h"
struct hdmi_connector {
struct drm_connector base;
struct hdmi *hdmi;
+ struct work_struct hpd_work;
};
#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
static int gpio_config(struct hdmi *hdmi, bool on)
{
struct drm_device *dev = hdmi->dev;
- struct hdmi_platform_config *config =
- hdmi->pdev->dev.platform_data;
+ const struct hdmi_platform_config *config = hdmi->config;
int ret;
if (on) {
@@ -39,26 +40,43 @@ static int gpio_config(struct hdmi *hdmi, bool on)
"HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
goto error1;
}
+ gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
+
ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
if (ret) {
dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
"HDMI_DDC_DATA", config->ddc_data_gpio, ret);
goto error2;
}
+ gpio_set_value_cansleep(config->ddc_data_gpio, 1);
+
ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
if (ret) {
dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
"HDMI_HPD", config->hpd_gpio, ret);
goto error3;
}
- if (config->pmic_gpio != -1) {
- ret = gpio_request(config->pmic_gpio, "PMIC_HDMI_MUX_SEL");
+ gpio_direction_input(config->hpd_gpio);
+ gpio_set_value_cansleep(config->hpd_gpio, 1);
+
+ if (config->mux_en_gpio != -1) {
+ ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
if (ret) {
dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
- "PMIC_HDMI_MUX_SEL", config->pmic_gpio, ret);
+ "HDMI_MUX_SEL", config->mux_en_gpio, ret);
goto error4;
}
- gpio_set_value_cansleep(config->pmic_gpio, 0);
+ gpio_set_value_cansleep(config->mux_en_gpio, 1);
+ }
+
+ if (config->mux_sel_gpio != -1) {
+ ret = gpio_request(config->mux_sel_gpio, "HDMI_MUX_SEL");
+ if (ret) {
+ dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_SEL", config->mux_sel_gpio, ret);
+ goto error5;
+ }
+ gpio_set_value_cansleep(config->mux_sel_gpio, 0);
}
DBG("gpio on");
} else {
@@ -66,15 +84,23 @@ static int gpio_config(struct hdmi *hdmi, bool on)
gpio_free(config->ddc_data_gpio);
gpio_free(config->hpd_gpio);
- if (config->pmic_gpio != -1) {
- gpio_set_value_cansleep(config->pmic_gpio, 1);
- gpio_free(config->pmic_gpio);
+ if (config->mux_en_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_en_gpio, 0);
+ gpio_free(config->mux_en_gpio);
+ }
+
+ if (config->mux_sel_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_sel_gpio, 1);
+ gpio_free(config->mux_sel_gpio);
}
DBG("gpio off");
}
return 0;
+error5:
+ if (config->mux_en_gpio != -1)
+ gpio_free(config->mux_en_gpio);
error4:
gpio_free(config->hpd_gpio);
error3:
@@ -88,10 +114,11 @@ error1:
static int hpd_enable(struct hdmi_connector *hdmi_connector)
{
struct hdmi *hdmi = hdmi_connector->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
struct drm_device *dev = hdmi_connector->base.dev;
struct hdmi_phy *phy = hdmi->phy;
uint32_t hpd_ctrl;
- int ret;
+ int i, ret;
ret = gpio_config(hdmi, true);
if (ret) {
@@ -99,31 +126,22 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
goto fail;
}
- ret = clk_prepare_enable(hdmi->clk);
- if (ret) {
- dev_err(dev->dev, "failed to enable 'clk': %d\n", ret);
- goto fail;
- }
-
- ret = clk_prepare_enable(hdmi->m_pclk);
- if (ret) {
- dev_err(dev->dev, "failed to enable 'm_pclk': %d\n", ret);
- goto fail;
- }
-
- ret = clk_prepare_enable(hdmi->s_pclk);
- if (ret) {
- dev_err(dev->dev, "failed to enable 's_pclk': %d\n", ret);
- goto fail;
+ for (i = 0; i < config->hpd_clk_cnt; i++) {
+ ret = clk_prepare_enable(hdmi->hpd_clks[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ goto fail;
+ }
}
- if (hdmi->mpp0)
- ret = regulator_enable(hdmi->mpp0);
- if (!ret)
- ret = regulator_enable(hdmi->mvs);
- if (ret) {
- dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
- goto fail;
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->hpd_regs[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ goto fail;
+ }
}
hdmi_set_mode(hdmi, false);
@@ -156,26 +174,26 @@ fail:
static int hdp_disable(struct hdmi_connector *hdmi_connector)
{
struct hdmi *hdmi = hdmi_connector->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
struct drm_device *dev = hdmi_connector->base.dev;
- int ret = 0;
+ int i, ret = 0;
/* Disable HPD interrupt */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
hdmi_set_mode(hdmi, false);
- if (hdmi->mpp0)
- ret = regulator_disable(hdmi->mpp0);
- if (!ret)
- ret = regulator_disable(hdmi->mvs);
- if (ret) {
- dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
- goto fail;
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_disable(hdmi->hpd_regs[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ goto fail;
+ }
}
- clk_disable_unprepare(hdmi->clk);
- clk_disable_unprepare(hdmi->m_pclk);
- clk_disable_unprepare(hdmi->s_pclk);
+ for (i = 0; i < config->hpd_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->hpd_clks[i]);
ret = gpio_config(hdmi, false);
if (ret) {
@@ -189,9 +207,19 @@ fail:
return ret;
}
+static void
+hotplug_work(struct work_struct *work)
+{
+ struct hdmi_connector *hdmi_connector =
+ container_of(work, struct hdmi_connector, hpd_work);
+ struct drm_connector *connector = &hdmi_connector->base;
+ drm_helper_hpd_irq_event(connector->dev);
+}
+
void hdmi_connector_irq(struct drm_connector *connector)
{
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ struct msm_drm_private *priv = connector->dev->dev_private;
struct hdmi *hdmi = hdmi_connector->hdmi;
uint32_t hpd_int_status, hpd_int_ctrl;
@@ -209,13 +237,13 @@ void hdmi_connector_irq(struct drm_connector *connector)
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
- drm_helper_hpd_irq_event(connector->dev);
-
/* detect disconnect if we are connected or visa versa: */
hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
if (!detected)
hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
+
+ queue_work(priv->wq, &hdmi_connector->hpd_work);
}
}
@@ -224,6 +252,7 @@ static enum drm_connector_status hdmi_connector_detect(
{
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
struct hdmi *hdmi = hdmi_connector->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
uint32_t hpd_int_status;
int retry = 20;
@@ -233,6 +262,14 @@ static enum drm_connector_status hdmi_connector_detect(
* let that trick us into thinking the monitor is gone:
*/
while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) {
+ /* hdmi debounce logic seems to get stuck sometimes,
+ * read directly the gpio to get a second opinion:
+ */
+ if (gpio_get_value(config->hpd_gpio)) {
+ DBG("gpio tells us we are connected!");
+ hpd_int_status |= HDMI_HPD_INT_STATUS_CABLE_DETECTED;
+ break;
+ }
mdelay(10);
hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
DBG("status=%08x", hpd_int_status);
@@ -285,6 +322,8 @@ static int hdmi_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ struct hdmi *hdmi = hdmi_connector->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
struct msm_drm_private *priv = connector->dev->dev_private;
struct msm_kms *kms = priv->kms;
long actual, requested;
@@ -293,6 +332,13 @@ static int hdmi_connector_mode_valid(struct drm_connector *connector,
actual = kms->funcs->round_pixclk(kms,
requested, hdmi_connector->hdmi->encoder);
+ /* for mdp5/apq8074, we manage our own pixel clk (as opposed to
+ * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
+ * instead):
+ */
+ if (config->pwr_clk_cnt > 0)
+ actual = clk_round_rate(hdmi->pwr_clks[0], actual);
+
DBG("requested=%ld, actual=%ld", requested, actual);
if (actual != requested)
@@ -335,6 +381,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
}
hdmi_connector->hdmi = hdmi_reference(hdmi);
+ INIT_WORK(&hdmi_connector->hpd_work, hotplug_work);
connector = &hdmi_connector->base;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
new file mode 100644
index 000000000000..59fa6cdacb2a
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+struct hdmi_phy_8x74 {
+ struct hdmi_phy base;
+ struct hdmi *hdmi;
+ void __iomem *mmio;
+};
+#define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base)
+
+
+static void phy_write(struct hdmi_phy_8x74 *phy, u32 reg, u32 data)
+{
+ msm_writel(data, phy->mmio + reg);
+}
+
+//static u32 phy_read(struct hdmi_phy_8x74 *phy, u32 reg)
+//{
+// return msm_readl(phy->mmio + reg);
+//}
+
+static void hdmi_phy_8x74_destroy(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+ kfree(phy_8x74);
+}
+
+static void hdmi_phy_8x74_reset(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+ struct hdmi *hdmi = phy_8x74->hdmi;
+ unsigned int val;
+
+ /* NOTE that HDMI_PHY_CTL is in core mmio, not phy mmio: */
+
+ val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ }
+
+ msleep(100);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ }
+}
+
+static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy,
+ unsigned long int pixclock)
+{
+ struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+
+ phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG0, 0x1b);
+ phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG1, 0xf2);
+ phy_write(phy_8x74, REG_HDMI_8x74_BIST_CFG0, 0x0);
+ phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN0, 0x0);
+ phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN1, 0x0);
+ phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN2, 0x0);
+ phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN3, 0x0);
+ phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL1, 0x20);
+}
+
+static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+ phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL0, 0x7f);
+}
+
+static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = {
+ .destroy = hdmi_phy_8x74_destroy,
+ .reset = hdmi_phy_8x74_reset,
+ .powerup = hdmi_phy_8x74_powerup,
+ .powerdown = hdmi_phy_8x74_powerdown,
+};
+
+struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi)
+{
+ struct hdmi_phy_8x74 *phy_8x74;
+ struct hdmi_phy *phy = NULL;
+ int ret;
+
+ phy_8x74 = kzalloc(sizeof(*phy_8x74), GFP_KERNEL);
+ if (!phy_8x74) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ phy = &phy_8x74->base;
+
+ phy->funcs = &hdmi_phy_8x74_funcs;
+
+ phy_8x74->hdmi = hdmi;
+
+ /* for 8x74, the phy mmio is mapped separately: */
+ phy_8x74->mmio = msm_ioremap(hdmi->pdev,
+ "phy_physical", "HDMI_8x74");
+ if (IS_ERR(phy_8x74->mmio)) {
+ ret = PTR_ERR(phy_8x74->mmio);
+ goto fail;
+ }
+
+ return phy;
+
+fail:
+ if (phy)
+ hdmi_phy_8x74_destroy(phy);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index dbde4f6339b9..d591567173c4 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 9908ffe1c3ad..416a26e1e58d 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -42,27 +44,6 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-enum mdp4_bpc {
- BPC1 = 0,
- BPC5 = 1,
- BPC6 = 2,
- BPC8 = 3,
-};
-
-enum mdp4_bpc_alpha {
- BPC1A = 0,
- BPC4A = 1,
- BPC6A = 2,
- BPC8A = 3,
-};
-
-enum mdp4_alpha_type {
- FG_CONST = 0,
- BG_CONST = 1,
- FG_PIXEL = 2,
- BG_PIXEL = 3,
-};
-
enum mdp4_pipe {
VG1 = 0,
VG2 = 1,
@@ -79,15 +60,6 @@ enum mdp4_mixer {
MIXER2 = 2,
};
-enum mdp4_mixer_stage_id {
- STAGE_UNUSED = 0,
- STAGE_BASE = 1,
- STAGE0 = 2,
- STAGE1 = 3,
- STAGE2 = 4,
- STAGE3 = 5,
-};
-
enum mdp4_intf {
INTF_LCDC_DTV = 0,
INTF_DSI_VIDEO = 1,
@@ -194,56 +166,56 @@ static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0
#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007
#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008
#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070
#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080
#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700
#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800
#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
}
@@ -254,56 +226,56 @@ static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp4_mixer_stage_id va
#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100
#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007
#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008
#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070
#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080
#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700
#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800
#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000
#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000
#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000
#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000
#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000
#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000
#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000
#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000
#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000
#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
}
@@ -369,7 +341,7 @@ static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x
static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003
#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0
-static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp4_alpha_type val)
+static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp_alpha_type val)
{
return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
}
@@ -377,7 +349,7 @@ static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp4_alpha_type val)
#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008
#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030
#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4
-static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp4_alpha_type val)
+static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp_alpha_type val)
{
return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
}
@@ -472,19 +444,19 @@ static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __of
static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003
#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0
-static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp4_bpc val)
+static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp_bpc val)
{
return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
}
#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c
#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2
-static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp4_bpc val)
+static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp_bpc val)
{
return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
}
#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030
#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4
-static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp4_bpc val)
+static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp_bpc val)
{
return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
}
@@ -710,25 +682,25 @@ static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val)
static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; }
#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp4_bpc val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
{
return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
}
#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp4_bpc val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val)
{
return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
}
#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp4_bpc val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val)
{
return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
}
#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp4_bpc_alpha val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val)
{
return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 019d530187ff..84c5b13b33c9 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -39,6 +39,7 @@ struct mdp4_crtc {
spinlock_t lock;
bool stale;
uint32_t width, height;
+ uint32_t x, y;
/* next cursor to scan-out: */
uint32_t next_iova;
@@ -57,44 +58,100 @@ struct mdp4_crtc {
#define PENDING_FLIP 0x2
atomic_t pending;
- /* the fb that we currently hold a scanout ref to: */
+ /* the fb that we logically (from PoV of KMS API) hold a ref
+ * to. Which we may not yet be scanning out (we may still
+ * be scanning out previous in case of page_flip while waiting
+ * for gpu rendering to complete:
+ */
struct drm_framebuffer *fb;
+ /* the fb that we currently hold a scanout ref to: */
+ struct drm_framebuffer *scanout_fb;
+
/* for unref'ing framebuffers after scanout completes: */
struct drm_flip_work unref_fb_work;
/* for unref'ing cursor bo's after scanout completes: */
struct drm_flip_work unref_cursor_work;
- struct mdp4_irq vblank;
- struct mdp4_irq err;
+ struct mdp_irq vblank;
+ struct mdp_irq err;
};
#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
- return to_mdp4_kms(priv->kms);
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
}
-static void update_fb(struct drm_crtc *crtc, bool async,
- struct drm_framebuffer *new_fb)
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct drm_framebuffer *old_fb = mdp4_crtc->fb;
- if (old_fb)
- drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
+ atomic_or(pending, &mdp4_crtc->pending);
+ mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
+}
+
+static void crtc_flush(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ uint32_t i, flush = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
+ struct drm_plane *plane = mdp4_crtc->planes[i];
+ if (plane) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ flush |= pipe2flush(pipe_id);
+ }
+ }
+ flush |= ovlp2flush(mdp4_crtc->ovlp);
+
+ DBG("%s: flush=%08x", mdp4_crtc->name, flush);
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
+}
+
+static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_framebuffer *old_fb = mdp4_crtc->fb;
/* grab reference to incoming scanout fb: */
drm_framebuffer_reference(new_fb);
mdp4_crtc->base.fb = new_fb;
mdp4_crtc->fb = new_fb;
- if (!async) {
- /* enable vblank to pick up the old_fb */
- mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
- }
+ if (old_fb)
+ drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
+}
+
+/* unlike update_fb(), take a ref to the new scanout fb *before* updating
+ * plane, then call this. Needed to ensure we don't unref the buffer that
+ * is actually still being scanned out.
+ *
+ * Note that this whole thing goes away with atomic.. since we can defer
+ * calling into driver until rendering is done.
+ */
+static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+ /* flush updates, to make sure hw is updated to new scanout fb,
+ * so that we can safely queue unref to current fb (ie. next
+ * vblank we know hw is done w/ previous scanout_fb).
+ */
+ crtc_flush(crtc);
+
+ if (mdp4_crtc->scanout_fb)
+ drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
+ mdp4_crtc->scanout_fb);
+
+ mdp4_crtc->scanout_fb = fb;
+
+ /* enable vblank to complete flip: */
+ request_pending(crtc, PENDING_FLIP);
}
/* if file!=NULL, this is preclose potential cancel-flip path */
@@ -120,34 +177,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static void crtc_flush(struct drm_crtc *crtc)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
- uint32_t i, flush = 0;
-
- for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
- struct drm_plane *plane = mdp4_crtc->planes[i];
- if (plane) {
- enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
- flush |= pipe2flush(pipe_id);
- }
- }
- flush |= ovlp2flush(mdp4_crtc->ovlp);
-
- DBG("%s: flush=%08x", mdp4_crtc->name, flush);
-
- mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
-}
-
-static void request_pending(struct drm_crtc *crtc, uint32_t pending)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
- atomic_or(pending, &mdp4_crtc->pending);
- mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
-}
-
static void pageflip_cb(struct msm_fence_cb *cb)
{
struct mdp4_crtc *mdp4_crtc =
@@ -158,11 +187,9 @@ static void pageflip_cb(struct msm_fence_cb *cb)
if (!fb)
return;
+ drm_framebuffer_reference(fb);
mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
- crtc_flush(crtc);
-
- /* enable vblank to complete flip: */
- request_pending(crtc, PENDING_FLIP);
+ update_scanout(crtc, fb);
}
static void unref_fb_worker(struct drm_flip_work *work, void *val)
@@ -210,9 +237,9 @@ static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
if (enabled != mdp4_crtc->enabled) {
if (enabled) {
mdp4_enable(mdp4_kms);
- mdp4_irq_register(mdp4_kms, &mdp4_crtc->err);
+ mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
} else {
- mdp4_irq_unregister(mdp4_kms, &mdp4_crtc->err);
+ mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
mdp4_disable(mdp4_kms);
}
mdp4_crtc->enabled = enabled;
@@ -232,7 +259,7 @@ static void blend_setup(struct drm_crtc *crtc)
struct mdp4_kms *mdp4_kms = get_kms(crtc);
int i, ovlp = mdp4_crtc->ovlp;
uint32_t mixer_cfg = 0;
- static const enum mdp4_mixer_stage_id stages[] = {
+ static const enum mdp_mixer_stage_id stages[] = {
STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
};
/* statically (for now) map planes to mixer stage (z-order): */
@@ -262,8 +289,8 @@ static void blend_setup(struct drm_crtc *crtc)
enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
int idx = idxs[pipe_id];
if (idx > 0) {
- const struct mdp4_format *format =
- to_mdp4_format(msm_framebuffer_format(plane->fb));
+ const struct mdp_format *format =
+ to_mdp_format(msm_framebuffer_format(plane->fb));
alpha[idx-1] = format->alpha_enable;
}
mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]);
@@ -320,6 +347,20 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->fb);
+
+ ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
+ mdp4_crtc->name, ret);
+ return ret;
+ }
+
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
@@ -341,24 +382,15 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
- update_fb(crtc, false, crtc->fb);
-
- ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16);
- if (ret) {
- dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
- mdp4_crtc->name, ret);
- return ret;
- }
-
if (dma == DMA_E) {
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
}
+ update_fb(crtc, crtc->fb);
+ update_scanout(crtc, crtc->fb);
+
return 0;
}
@@ -385,13 +417,24 @@ static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_plane *plane = mdp4_crtc->plane;
struct drm_display_mode *mode = &crtc->mode;
+ int ret;
- update_fb(crtc, false, crtc->fb);
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->fb);
- return mdp4_plane_mode_set(plane, crtc, crtc->fb,
+ ret = mdp4_plane_mode_set(plane, crtc, crtc->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ return ret;
+ }
+
+ update_fb(crtc, crtc->fb);
+ update_scanout(crtc, crtc->fb);
+
+ return 0;
}
static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
@@ -419,7 +462,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
mdp4_crtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
- update_fb(crtc, true, new_fb);
+ update_fb(crtc, new_fb);
return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
}
@@ -442,12 +485,12 @@ static int mdp4_crtc_set_property(struct drm_crtc *crtc,
static void update_cursor(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
enum mdp4_dma dma = mdp4_crtc->dma;
unsigned long flags;
spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
if (mdp4_crtc->cursor.stale) {
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
uint32_t iova = mdp4_crtc->cursor.next_iova;
@@ -479,6 +522,11 @@ static void update_cursor(struct drm_crtc *crtc)
mdp4_crtc->cursor.scanout_bo = next_bo;
mdp4_crtc->cursor.stale = false;
}
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
+ MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
+ MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
+
spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
}
@@ -530,6 +578,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
drm_gem_object_unreference_unlocked(old_bo);
}
+ crtc_flush(crtc);
request_pending(crtc, PENDING_CURSOR);
return 0;
@@ -542,12 +591,15 @@ fail:
static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
- enum mdp4_dma dma = mdp4_crtc->dma;
+ unsigned long flags;
- mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
- MDP4_DMA_CURSOR_POS_X(x) |
- MDP4_DMA_CURSOR_POS_Y(y));
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ mdp4_crtc->cursor.x = x;
+ mdp4_crtc->cursor.y = y;
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+
+ crtc_flush(crtc);
+ request_pending(crtc, PENDING_CURSOR);
return 0;
}
@@ -571,14 +623,14 @@ static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
.load_lut = mdp4_crtc_load_lut,
};
-static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
+static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
struct drm_crtc *crtc = &mdp4_crtc->base;
struct msm_drm_private *priv = crtc->dev->dev_private;
unsigned pending;
- mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank);
+ mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
pending = atomic_xchg(&mdp4_crtc->pending, 0);
@@ -593,7 +645,7 @@ static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
}
}
-static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus)
+static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
struct drm_crtc *crtc = &mdp4_crtc->base;
@@ -713,6 +765,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
crtc = &mdp4_crtc->base;
mdp4_crtc->plane = plane;
+ mdp4_crtc->id = id;
mdp4_crtc->ovlp = ovlp_id;
mdp4_crtc->dma = dma_id;
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
index 5e0dcae70ab5..067ed03b35fe 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
@@ -15,8 +15,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <mach/clk.h>
-
#include "mdp4_kms.h"
#include "drm_crtc.h"
@@ -37,7 +35,7 @@ struct mdp4_dtv_encoder {
static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
{
struct msm_drm_private *priv = encoder->dev->dev_private;
- return to_mdp4_kms(priv->kms);
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
}
#ifdef CONFIG_MSM_BUS_SCALING
@@ -139,7 +137,7 @@ static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode)
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
- mdp4_irq_wait(mdp4_kms, MDP4_IRQ_EXTERNAL_VSYNC);
+ mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
new file mode 100644
index 000000000000..c740ccd1cc67
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp4_kms.h"
+
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+{
+ mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask);
+}
+
+static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+void mdp4_irq_preinstall(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
+}
+
+int mdp4_irq_postinstall(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
+ struct mdp_irq *error_handler = &mdp4_kms->error_handler;
+
+ error_handler->irq = mdp4_irq_error_handler;
+ error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
+ MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
+
+ mdp_irq_register(mdp_kms, error_handler);
+
+ return 0;
+}
+
+void mdp4_irq_uninstall(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
+}
+
+irqreturn_t mdp4_irq(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
+ struct drm_device *dev = mdp4_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ unsigned int id;
+ uint32_t status;
+
+ status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
+
+ VERB("status=%08x", status);
+
+ for (id = 0; id < priv->num_crtcs; id++)
+ if (status & mdp4_crtc_vblank(priv->crtcs[id]))
+ drm_handle_vblank(dev, id);
+
+ mdp_dispatch_irqs(mdp_kms, status);
+
+ return IRQ_HANDLED;
+}
+
+int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp4_crtc_vblank(crtc), true);
+ return 0;
+}
+
+void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp4_crtc_vblank(crtc), false);
+}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 8972ac35a43d..272e707c9487 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -17,13 +17,14 @@
#include "msm_drv.h"
+#include "msm_mmu.h"
#include "mdp4_kms.h"
static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
static int mdp4_hw_init(struct msm_kms *kms)
{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct drm_device *dev = mdp4_kms->dev;
uint32_t version, major, minor, dmap_cfg, vg_cfg;
unsigned long clk;
@@ -31,12 +32,14 @@ static int mdp4_hw_init(struct msm_kms *kms)
pm_runtime_get_sync(dev->dev);
+ mdp4_enable(mdp4_kms);
version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
+ mdp4_disable(mdp4_kms);
major = FIELD(version, MDP4_VERSION_MAJOR);
minor = FIELD(version, MDP4_VERSION_MINOR);
- DBG("found MDP version v%d.%d", major, minor);
+ DBG("found MDP4 version v%d.%d", major, minor);
if (major != 4) {
dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
@@ -130,7 +133,7 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
unsigned i;
@@ -140,11 +143,12 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
static void mdp4_destroy(struct msm_kms *kms)
{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
kfree(mdp4_kms);
}
-static const struct msm_kms_funcs kms_funcs = {
+static const struct mdp_kms_funcs kms_funcs = {
+ .base = {
.hw_init = mdp4_hw_init,
.irq_preinstall = mdp4_irq_preinstall,
.irq_postinstall = mdp4_irq_postinstall,
@@ -152,10 +156,12 @@ static const struct msm_kms_funcs kms_funcs = {
.irq = mdp4_irq,
.enable_vblank = mdp4_enable_vblank,
.disable_vblank = mdp4_disable_vblank,
- .get_format = mdp4_get_format,
+ .get_format = mdp_get_format,
.round_pixclk = mdp4_round_pixclk,
.preclose = mdp4_preclose,
.destroy = mdp4_destroy,
+ },
+ .set_irqmask = mdp4_set_irqmask,
};
int mdp4_disable(struct mdp4_kms *mdp4_kms)
@@ -189,6 +195,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
+ struct hdmi *hdmi;
int ret;
/*
@@ -238,9 +245,10 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */
priv->encoders[priv->num_encoders++] = encoder;
- ret = hdmi_init(dev, encoder);
- if (ret) {
- dev_err(dev->dev, "failed to initialize HDMI\n");
+ hdmi = hdmi_init(dev, encoder);
+ if (IS_ERR(hdmi)) {
+ ret = PTR_ERR(hdmi);
+ dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
goto fail;
}
@@ -260,6 +268,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
struct mdp4_platform_config *config = mdp4_get_config(pdev);
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
+ struct msm_mmu *mmu;
int ret;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
@@ -269,8 +278,9 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- kms = &mdp4_kms->base;
- kms->funcs = &kms_funcs;
+ mdp_kms_init(&mdp4_kms->base, &kms_funcs);
+
+ kms = &mdp4_kms->base.base;
mdp4_kms->dev = dev;
@@ -322,27 +332,34 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
clk_set_rate(mdp4_kms->clk, config->max_clk);
clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
- if (!config->iommu) {
- dev_err(dev->dev, "no iommu\n");
- ret = -ENXIO;
- goto fail;
- }
-
/* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if
* we don't disable):
*/
+ mdp4_enable(mdp4_kms);
mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
+ mdp4_disable(mdp4_kms);
mdelay(16);
- ret = msm_iommu_attach(dev, config->iommu,
- iommu_ports, ARRAY_SIZE(iommu_ports));
- if (ret)
- goto fail;
+ if (config->iommu) {
+ mmu = msm_iommu_new(dev, config->iommu);
+ if (IS_ERR(mmu)) {
+ ret = PTR_ERR(mmu);
+ goto fail;
+ }
+ ret = mmu->funcs->attach(mmu, iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret)
+ goto fail;
+ } else {
+ dev_info(dev->dev, "no iommu, fallback to phys "
+ "contig buffers for scanout\n");
+ mmu = NULL;
+ }
- mdp4_kms->id = msm_register_iommu(dev, config->iommu);
+ mdp4_kms->id = msm_register_mmu(dev, mmu);
if (mdp4_kms->id < 0) {
ret = mdp4_kms->id;
dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index eb015c834087..66a4d31aec80 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -18,29 +18,13 @@
#ifndef __MDP4_KMS_H__
#define __MDP4_KMS_H__
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-
#include "msm_drv.h"
+#include "msm_kms.h"
+#include "mdp/mdp_kms.h"
#include "mdp4.xml.h"
-
-/* For transiently registering for different MDP4 irqs that various parts
- * of the KMS code need during setup/configuration. We these are not
- * necessarily the same as what drm_vblank_get/put() are requesting, and
- * the hysteresis in drm_vblank_put() is not necessarily desirable for
- * internal housekeeping related irq usage.
- */
-struct mdp4_irq {
- struct list_head node;
- uint32_t irqmask;
- bool registered;
- void (*irq)(struct mdp4_irq *irq, uint32_t irqstatus);
-};
-
struct mdp4_kms {
- struct msm_kms base;
+ struct mdp_kms base;
struct drm_device *dev;
@@ -59,11 +43,7 @@ struct mdp4_kms {
struct clk *pclk;
struct clk *lut_clk;
- /* irq handling: */
- bool in_irq;
- struct list_head irq_list; /* list of mdp4_irq */
- uint32_t vblank_mask; /* irq bits set for userspace vblank */
- struct mdp4_irq error_handler;
+ struct mdp_irq error_handler;
};
#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
@@ -73,16 +53,6 @@ struct mdp4_platform_config {
uint32_t max_clk;
};
-struct mdp4_format {
- struct msm_format base;
- enum mdp4_bpc bpc_r, bpc_g, bpc_b;
- enum mdp4_bpc_alpha bpc_a;
- uint8_t unpack[4];
- bool alpha_enable, unpack_tight;
- uint8_t cpp, unpack_count;
-};
-#define to_mdp4_format(x) container_of(x, struct mdp4_format, base)
-
static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
{
msm_writel(data, mdp4_kms->mmio + reg);
@@ -134,7 +104,7 @@ static inline uint32_t dma2err(enum mdp4_dma dma)
}
static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe,
- enum mdp4_mixer_stage_id stage)
+ enum mdp_mixer_stage_id stage)
{
uint32_t mixer_cfg = 0;
@@ -178,19 +148,23 @@ static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe,
int mdp4_disable(struct mdp4_kms *mdp4_kms);
int mdp4_enable(struct mdp4_kms *mdp4_kms);
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
void mdp4_irq_preinstall(struct msm_kms *kms);
int mdp4_irq_postinstall(struct msm_kms *kms);
void mdp4_irq_uninstall(struct msm_kms *kms);
irqreturn_t mdp4_irq(struct msm_kms *kms);
-void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask);
-void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
-void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
-uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *formats,
- uint32_t max_formats);
-const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format);
+static inline
+uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
+ uint32_t max_formats)
+{
+ /* TODO when we have YUV, we need to filter supported formats
+ * based on pipe_id..
+ */
+ return mdp_get_formats(pixel_formats, max_formats);
+}
void mdp4_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 0f0af243f6fc..1e893dd13859 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -34,7 +34,7 @@ struct mdp4_plane {
static struct mdp4_kms *get_kms(struct drm_plane *plane)
{
struct msm_drm_private *priv = plane->dev->dev_private;
- return to_mdp4_kms(priv->kms);
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
}
static int mdp4_plane_update(struct drm_plane *plane,
@@ -132,7 +132,7 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
enum mdp4_pipe pipe = mdp4_plane->pipe;
- const struct mdp4_format *format;
+ const struct mdp_format *format;
uint32_t op_mode = 0;
uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
@@ -170,12 +170,12 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
- MDP4_PIPE_SRC_XY_X(crtc_x) |
- MDP4_PIPE_SRC_XY_Y(crtc_y));
+ MDP4_PIPE_DST_XY_X(crtc_x) |
+ MDP4_PIPE_DST_XY_Y(crtc_y));
mdp4_plane_set_scanout(plane, fb);
- format = to_mdp4_format(msm_framebuffer_format(fb));
+ format = to_mdp_format(msm_framebuffer_format(fb));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
new file mode 100644
index 000000000000..0aa51517f826
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -0,0 +1,1036 @@
+#ifndef MDP5_XML
+#define MDP5_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mdp5_intf {
+ INTF_DSI = 1,
+ INTF_HDMI = 3,
+ INTF_LCDC = 5,
+ INTF_eDP = 9,
+};
+
+enum mdp5_intfnum {
+ NO_INTF = 0,
+ INTF0 = 1,
+ INTF1 = 2,
+ INTF2 = 3,
+ INTF3 = 4,
+};
+
+enum mdp5_pipe {
+ SSPP_VIG0 = 0,
+ SSPP_VIG1 = 1,
+ SSPP_VIG2 = 2,
+ SSPP_RGB0 = 3,
+ SSPP_RGB1 = 4,
+ SSPP_RGB2 = 5,
+ SSPP_DMA0 = 6,
+ SSPP_DMA1 = 7,
+};
+
+enum mdp5_ctl_mode {
+ MODE_NONE = 0,
+ MODE_ROT0 = 1,
+ MODE_ROT1 = 2,
+ MODE_WB0 = 3,
+ MODE_WB1 = 4,
+ MODE_WFD = 5,
+};
+
+enum mdp5_pack_3d {
+ PACK_3D_FRAME_INT = 0,
+ PACK_3D_H_ROW_INT = 1,
+ PACK_3D_V_ROW_INT = 2,
+ PACK_3D_COL_INT = 3,
+};
+
+enum mdp5_chroma_samp_type {
+ CHROMA_RGB = 0,
+ CHROMA_H2V1 = 1,
+ CHROMA_H1V2 = 2,
+ CHROMA_420 = 3,
+};
+
+enum mdp5_scale_filter {
+ SCALE_FILTER_NEAREST = 0,
+ SCALE_FILTER_BIL = 1,
+ SCALE_FILTER_PCMN = 2,
+ SCALE_FILTER_CA = 3,
+};
+
+enum mdp5_pipe_bwc {
+ BWC_LOSSLESS = 0,
+ BWC_Q_HIGH = 1,
+ BWC_Q_MED = 2,
+};
+
+enum mdp5_client_id {
+ CID_UNUSED = 0,
+ CID_VIG0_Y = 1,
+ CID_VIG0_CR = 2,
+ CID_VIG0_CB = 3,
+ CID_VIG1_Y = 4,
+ CID_VIG1_CR = 5,
+ CID_VIG1_CB = 6,
+ CID_VIG2_Y = 7,
+ CID_VIG2_CR = 8,
+ CID_VIG2_CB = 9,
+ CID_DMA0_Y = 10,
+ CID_DMA0_CR = 11,
+ CID_DMA0_CB = 12,
+ CID_DMA1_Y = 13,
+ CID_DMA1_CR = 14,
+ CID_DMA1_CB = 15,
+ CID_RGB0 = 16,
+ CID_RGB1 = 17,
+ CID_RGB2 = 18,
+ CID_MAX = 19,
+};
+
+enum mdp5_igc_type {
+ IGC_VIG = 0,
+ IGC_RGB = 1,
+ IGC_DMA = 2,
+ IGC_DSPP = 3,
+};
+
+#define MDP5_IRQ_INTF0_WB_ROT_COMP 0x00000001
+#define MDP5_IRQ_INTF1_WB_ROT_COMP 0x00000002
+#define MDP5_IRQ_INTF2_WB_ROT_COMP 0x00000004
+#define MDP5_IRQ_INTF3_WB_ROT_COMP 0x00000008
+#define MDP5_IRQ_INTF0_WB_WFD 0x00000010
+#define MDP5_IRQ_INTF1_WB_WFD 0x00000020
+#define MDP5_IRQ_INTF2_WB_WFD 0x00000040
+#define MDP5_IRQ_INTF3_WB_WFD 0x00000080
+#define MDP5_IRQ_INTF0_PING_PONG_COMP 0x00000100
+#define MDP5_IRQ_INTF1_PING_PONG_COMP 0x00000200
+#define MDP5_IRQ_INTF2_PING_PONG_COMP 0x00000400
+#define MDP5_IRQ_INTF3_PING_PONG_COMP 0x00000800
+#define MDP5_IRQ_INTF0_PING_PONG_RD_PTR 0x00001000
+#define MDP5_IRQ_INTF1_PING_PONG_RD_PTR 0x00002000
+#define MDP5_IRQ_INTF2_PING_PONG_RD_PTR 0x00004000
+#define MDP5_IRQ_INTF3_PING_PONG_RD_PTR 0x00008000
+#define MDP5_IRQ_INTF0_PING_PONG_WR_PTR 0x00010000
+#define MDP5_IRQ_INTF1_PING_PONG_WR_PTR 0x00020000
+#define MDP5_IRQ_INTF2_PING_PONG_WR_PTR 0x00040000
+#define MDP5_IRQ_INTF3_PING_PONG_WR_PTR 0x00080000
+#define MDP5_IRQ_INTF0_PING_PONG_AUTO_REF 0x00100000
+#define MDP5_IRQ_INTF1_PING_PONG_AUTO_REF 0x00200000
+#define MDP5_IRQ_INTF2_PING_PONG_AUTO_REF 0x00400000
+#define MDP5_IRQ_INTF3_PING_PONG_AUTO_REF 0x00800000
+#define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000
+#define MDP5_IRQ_INTF0_VSYNC 0x02000000
+#define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000
+#define MDP5_IRQ_INTF1_VSYNC 0x08000000
+#define MDP5_IRQ_INTF2_UNDER_RUN 0x10000000
+#define MDP5_IRQ_INTF2_VSYNC 0x20000000
+#define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000
+#define MDP5_IRQ_INTF3_VSYNC 0x80000000
+#define REG_MDP5_HW_VERSION 0x00000000
+
+#define REG_MDP5_HW_INTR_STATUS 0x00000010
+#define MDP5_HW_INTR_STATUS_INTR_MDP 0x00000001
+#define MDP5_HW_INTR_STATUS_INTR_DSI0 0x00000010
+#define MDP5_HW_INTR_STATUS_INTR_DSI1 0x00000020
+#define MDP5_HW_INTR_STATUS_INTR_HDMI 0x00000100
+#define MDP5_HW_INTR_STATUS_INTR_EDP 0x00001000
+
+#define REG_MDP5_MDP_VERSION 0x00000100
+#define MDP5_MDP_VERSION_MINOR__MASK 0x00ff0000
+#define MDP5_MDP_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDP5_MDP_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << MDP5_MDP_VERSION_MINOR__SHIFT) & MDP5_MDP_VERSION_MINOR__MASK;
+}
+#define MDP5_MDP_VERSION_MAJOR__MASK 0xf0000000
+#define MDP5_MDP_VERSION_MAJOR__SHIFT 28
+static inline uint32_t MDP5_MDP_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << MDP5_MDP_VERSION_MAJOR__SHIFT) & MDP5_MDP_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDP5_DISP_INTF_SEL 0x00000104
+#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff
+#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
+#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
+#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000
+#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
+}
+
+#define REG_MDP5_INTR_EN 0x00000110
+
+#define REG_MDP5_INTR_STATUS 0x00000114
+
+#define REG_MDP5_INTR_CLEAR 0x00000118
+
+#define REG_MDP5_HIST_INTR_EN 0x0000011c
+
+#define REG_MDP5_HIST_INTR_STATUS 0x00000120
+
+#define REG_MDP5_HIST_INTR_CLEAR 0x00000124
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000180 + 0x4*i0; }
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000180 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(enum mdp5_client_id val)
+{
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+}
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(enum mdp5_client_id val)
+{
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+}
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(enum mdp5_client_id val)
+{
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+}
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000230 + 0x4*i0; }
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000230 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(enum mdp5_client_id val)
+{
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
+}
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(enum mdp5_client_id val)
+{
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
+}
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(enum mdp5_client_id val)
+{
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
+}
+
+static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
+{
+ switch (idx) {
+ case IGC_VIG: return 0x00000300;
+ case IGC_RGB: return 0x00000310;
+ case IGC_DMA: return 0x00000320;
+ case IGC_DSPP: return 0x00000400;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
+
+static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff
+#define MDP5_IGC_LUT_REG_VAL__SHIFT 0
+static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
+{
+ return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
+}
+#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
+
+static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000600 + 0x100*i0; }
+
+static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; }
+#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007
+#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038
+#define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0
+#define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00
+#define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000
+#define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000
+#define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK;
+}
+#define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000
+#define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000
+#define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000
+#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000
+
+static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000614 + 0x100*i0; }
+#define MDP5_CTL_OP_MODE__MASK 0x0000000f
+#define MDP5_CTL_OP_MODE__SHIFT 0
+static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val)
+{
+ return ((val) << MDP5_CTL_OP_MODE__SHIFT) & MDP5_CTL_OP_MODE__MASK;
+}
+#define MDP5_CTL_OP_INTF_NUM__MASK 0x00000070
+#define MDP5_CTL_OP_INTF_NUM__SHIFT 4
+static inline uint32_t MDP5_CTL_OP_INTF_NUM(enum mdp5_intfnum val)
+{
+ return ((val) << MDP5_CTL_OP_INTF_NUM__SHIFT) & MDP5_CTL_OP_INTF_NUM__MASK;
+}
+#define MDP5_CTL_OP_CMD_MODE 0x00020000
+#define MDP5_CTL_OP_PACK_3D_ENABLE 0x00080000
+#define MDP5_CTL_OP_PACK_3D__MASK 0x00300000
+#define MDP5_CTL_OP_PACK_3D__SHIFT 20
+static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val)
+{
+ return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK;
+}
+
+static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000618 + 0x100*i0; }
+#define MDP5_CTL_FLUSH_VIG0 0x00000001
+#define MDP5_CTL_FLUSH_VIG1 0x00000002
+#define MDP5_CTL_FLUSH_VIG2 0x00000004
+#define MDP5_CTL_FLUSH_RGB0 0x00000008
+#define MDP5_CTL_FLUSH_RGB1 0x00000010
+#define MDP5_CTL_FLUSH_RGB2 0x00000020
+#define MDP5_CTL_FLUSH_LM0 0x00000040
+#define MDP5_CTL_FLUSH_LM1 0x00000080
+#define MDP5_CTL_FLUSH_LM2 0x00000100
+#define MDP5_CTL_FLUSH_DMA0 0x00000800
+#define MDP5_CTL_FLUSH_DMA1 0x00001000
+#define MDP5_CTL_FLUSH_DSPP0 0x00002000
+#define MDP5_CTL_FLUSH_DSPP1 0x00004000
+#define MDP5_CTL_FLUSH_DSPP2 0x00008000
+#define MDP5_CTL_FLUSH_CTL 0x00020000
+
+static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000061c + 0x100*i0; }
+
+static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000620 + 0x100*i0; }
+
+static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000014c4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000014f0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00001500 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; }
+#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00001204 + 0x400*i0; }
+#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00001208 + 0x400*i0; }
+#define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000
+#define MDP5_PIPE_SRC_XY_Y__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_XY_Y__SHIFT) & MDP5_PIPE_SRC_XY_Y__MASK;
+}
+#define MDP5_PIPE_SRC_XY_X__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_XY_X__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000120c + 0x400*i0; }
+#define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_OUT_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_OUT_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00001210 + 0x400*i0; }
+#define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000
+#define MDP5_PIPE_OUT_XY_Y__SHIFT 16
+static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_XY_Y__SHIFT) & MDP5_PIPE_OUT_XY_Y__MASK;
+}
+#define MDP5_PIPE_OUT_XY_X__MASK 0x0000ffff
+#define MDP5_PIPE_OUT_XY_X__SHIFT 0
+static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00001214 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00001218 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000121c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00001220 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00001224 + 0x400*i0; }
+#define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P0__MASK;
+}
+#define MDP5_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000
+#define MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00001228 + 0x400*i0; }
+#define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P2__MASK;
+}
+#define MDP5_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000
+#define MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000122c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00001230 + 0x400*i0; }
+#define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
+#define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_G_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
+#define MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_B_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
+#define MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_R_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
+#define MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_A_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100
+#define MDP5_PIPE_SRC_FORMAT_CPP__MASK 0x00000600
+#define MDP5_PIPE_SRC_FORMAT_CPP__SHIFT 9
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_CPP(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CPP__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_ROT90 0x00000800
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00003000
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 12
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
+#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00780000
+#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT 19
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000
+#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp5_chroma_samp_type val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00001234 + 0x400*i0; }
+#define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
+#define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM0__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00
+#define MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT 8
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM1__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000
+#define MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM2__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000
+#define MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT 24
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00001238 + 0x400*i0; }
+#define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001
+#define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006
+#define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1
+static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val)
+{
+ return ((val) << MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT) & MDP5_PIPE_SRC_OP_MODE_BWC__MASK;
+}
+#define MDP5_PIPE_SRC_OP_MODE_FLIP_LR 0x00002000
+#define MDP5_PIPE_SRC_OP_MODE_FLIP_UD 0x00004000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_EN 0x00010000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_0 0x00020000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_1 0x00040000
+#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000
+#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000
+
+static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000123c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00001248 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000124c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00001250 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00001254 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00001258 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00001270 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000012a4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000012a8 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000012ac + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000012b0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000012b4 + 0x400*i0; }
+#define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff
+#define MDP5_PIPE_DECIMATION_VERT__SHIFT 0
+static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_DECIMATION_VERT__SHIFT) & MDP5_PIPE_DECIMATION_VERT__MASK;
+}
+#define MDP5_PIPE_DECIMATION_HORZ__MASK 0x0000ff00
+#define MDP5_PIPE_DECIMATION_HORZ__SHIFT 8
+static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00001404 + 0x400*i0; }
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK 0x00000300
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT 8
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK 0x00000c00
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT 10
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK 0x00003000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT 12
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK 0x0000c000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT 14
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK 0x00030000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT 16
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK 0x000c0000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT 18
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00001410 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00001414 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00001420 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00001424 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00003200 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00003200 + 0x400*i0; }
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010
+
+static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00003204 + 0x400*i0; }
+#define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_LM_OUT_SIZE_HEIGHT__SHIFT) & MDP5_LM_OUT_SIZE_HEIGHT__MASK;
+}
+#define MDP5_LM_OUT_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_LM_OUT_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00003208 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00003210 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; }
+#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003
+#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0
+static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
+{
+ return ((val) << MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK;
+}
+#define MDP5_LM_BLEND_OP_MODE_FG_INV_ALPHA 0x00000004
+#define MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA 0x00000008
+#define MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA 0x00000010
+#define MDP5_LM_BLEND_OP_MODE_FG_TRANSP_EN 0x00000020
+#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK 0x00000300
+#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT 8
+static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val)
+{
+ return ((val) << MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK;
+}
+#define MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA 0x00000400
+#define MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA 0x00000800
+#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000
+#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003224 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003228 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000322c + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003230 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003234 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003238 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000323c + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003240 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003244 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003248 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000032e0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000032e4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000032e8 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000032dc + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000032ec + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000032f0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000032f4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000032f8 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000032fc + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00003300 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00003304 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00003308 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000330c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00003310 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00004600 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00004600 + 0x400*i0; }
+#define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001
+#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e
+#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1
+static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val)
+{
+ return ((val) << MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT) & MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK;
+}
+#define MDP5_DSPP_OP_MODE_PCC_EN 0x00000010
+#define MDP5_DSPP_OP_MODE_DITHER_EN 0x00000100
+#define MDP5_DSPP_OP_MODE_HIST_EN 0x00010000
+#define MDP5_DSPP_OP_MODE_AUTO_CLEAR 0x00020000
+#define MDP5_DSPP_OP_MODE_HIST_LUT_EN 0x00080000
+#define MDP5_DSPP_OP_MODE_PA_EN 0x00100000
+#define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000
+#define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000
+
+static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00004630 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00004750 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00004810 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00004830 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00004834 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00004838 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000048dc + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000048b0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00012500 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00012500 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00012504 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00012508 + 0x200*i0; }
+#define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff
+#define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0
+static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT) & MDP5_INTF_HSYNC_CTL_PULSEW__MASK;
+}
+#define MDP5_INTF_HSYNC_CTL_PERIOD__MASK 0xffff0000
+#define MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT 16
+static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0001250c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00012510 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00012514 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00012518 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0001251c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00012520 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00012524 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00012528 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0001252c + 0x200*i0; }
+#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff
+#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0
+static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK;
+}
+#define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00012530 + 0x200*i0; }
+#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff
+#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0
+static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00012534 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00012538 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0001253c + 0x200*i0; }
+#define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff
+#define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0
+static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP5_INTF_DISPLAY_HCTL_START__SHIFT) & MDP5_INTF_DISPLAY_HCTL_START__MASK;
+}
+#define MDP5_INTF_DISPLAY_HCTL_END__MASK 0xffff0000
+#define MDP5_INTF_DISPLAY_HCTL_END__SHIFT 16
+static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00012540 + 0x200*i0; }
+#define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_HCTL_START__SHIFT) & MDP5_INTF_ACTIVE_HCTL_START__MASK;
+}
+#define MDP5_INTF_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP5_INTF_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_HCTL_END__SHIFT) & MDP5_INTF_ACTIVE_HCTL_END__MASK;
+}
+#define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000
+
+static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00012544 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00012548 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0001254c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00012550 + 0x200*i0; }
+#define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001
+#define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002
+#define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004
+
+static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00012554 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00012558 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0001255c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00012584 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00012590 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000125a8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000125ac + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000125b0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000125f0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000125f4 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000125f8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00012600 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00012604 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00012608 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0001260c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00012610 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00012614 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00012618 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0001261c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00013100 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00013100 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00013104 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00013108 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0001310c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00013110 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00013114 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00013118 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0001311c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00013120 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00013124 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00013128 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0001312c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00013130 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00013134 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00013138 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0001317c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000131c8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000131cc + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000131d0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000131d4 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000131d8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000131dc + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000131e0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000131e8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000131ec + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000131f0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000131f4 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000131f8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00013200 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00013244 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00013248 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0001324c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00013254 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00013258 + 0x200*i0; }
+
+
+#endif /* MDP5_XML */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
new file mode 100644
index 000000000000..f2794021f086
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -0,0 +1,574 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+#include <drm/drm_mode.h>
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "drm_flip_work.h"
+
+struct mdp5_crtc {
+ struct drm_crtc base;
+ char name[8];
+ struct drm_plane *plane;
+ struct drm_plane *planes[8];
+ int id;
+ bool enabled;
+
+ /* which mixer/encoder we route output to: */
+ int mixer;
+
+ /* if there is a pending flip, these will be non-null: */
+ struct drm_pending_vblank_event *event;
+ struct msm_fence_cb pageflip_cb;
+
+#define PENDING_CURSOR 0x1
+#define PENDING_FLIP 0x2
+ atomic_t pending;
+
+ /* the fb that we logically (from PoV of KMS API) hold a ref
+ * to. Which we may not yet be scanning out (we may still
+ * be scanning out previous in case of page_flip while waiting
+ * for gpu rendering to complete:
+ */
+ struct drm_framebuffer *fb;
+
+ /* the fb that we currently hold a scanout ref to: */
+ struct drm_framebuffer *scanout_fb;
+
+ /* for unref'ing framebuffers after scanout completes: */
+ struct drm_flip_work unref_fb_work;
+
+ struct mdp_irq vblank;
+ struct mdp_irq err;
+};
+#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
+
+static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+ atomic_or(pending, &mdp5_crtc->pending);
+ mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
+}
+
+static void crtc_flush(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ int id = mdp5_crtc->id;
+ uint32_t i, flush = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
+ struct drm_plane *plane = mdp5_crtc->planes[i];
+ if (plane) {
+ enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
+ flush |= pipe2flush(pipe);
+ }
+ }
+ flush |= mixer2flush(mdp5_crtc->id);
+ flush |= MDP5_CTL_FLUSH_CTL;
+
+ DBG("%s: flush=%08x", mdp5_crtc->name, flush);
+
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
+}
+
+static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct drm_framebuffer *old_fb = mdp5_crtc->fb;
+
+ /* grab reference to incoming scanout fb: */
+ drm_framebuffer_reference(new_fb);
+ mdp5_crtc->base.fb = new_fb;
+ mdp5_crtc->fb = new_fb;
+
+ if (old_fb)
+ drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb);
+}
+
+/* unlike update_fb(), take a ref to the new scanout fb *before* updating
+ * plane, then call this. Needed to ensure we don't unref the buffer that
+ * is actually still being scanned out.
+ *
+ * Note that this whole thing goes away with atomic.. since we can defer
+ * calling into driver until rendering is done.
+ */
+static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+ /* flush updates, to make sure hw is updated to new scanout fb,
+ * so that we can safely queue unref to current fb (ie. next
+ * vblank we know hw is done w/ previous scanout_fb).
+ */
+ crtc_flush(crtc);
+
+ if (mdp5_crtc->scanout_fb)
+ drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
+ mdp5_crtc->scanout_fb);
+
+ mdp5_crtc->scanout_fb = fb;
+
+ /* enable vblank to complete flip: */
+ request_pending(crtc, PENDING_FLIP);
+}
+
+/* if file!=NULL, this is preclose potential cancel-flip path */
+static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags, i;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = mdp5_crtc->event;
+ if (event) {
+ /* if regular vblank case (!file) or if cancel-flip from
+ * preclose on file that requested flip, then send the
+ * event:
+ */
+ if (!file || (event->base.file_priv == file)) {
+ mdp5_crtc->event = NULL;
+ drm_send_vblank_event(dev, mdp5_crtc->id, event);
+ }
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
+ struct drm_plane *plane = mdp5_crtc->planes[i];
+ if (plane)
+ mdp5_plane_complete_flip(plane);
+ }
+}
+
+static void pageflip_cb(struct msm_fence_cb *cb)
+{
+ struct mdp5_crtc *mdp5_crtc =
+ container_of(cb, struct mdp5_crtc, pageflip_cb);
+ struct drm_crtc *crtc = &mdp5_crtc->base;
+ struct drm_framebuffer *fb = mdp5_crtc->fb;
+
+ if (!fb)
+ return;
+
+ drm_framebuffer_reference(fb);
+ mdp5_plane_set_scanout(mdp5_crtc->plane, fb);
+ update_scanout(crtc, fb);
+}
+
+static void unref_fb_worker(struct drm_flip_work *work, void *val)
+{
+ struct mdp5_crtc *mdp5_crtc =
+ container_of(work, struct mdp5_crtc, unref_fb_work);
+ struct drm_device *dev = mdp5_crtc->base.dev;
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_framebuffer_unreference(val);
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void mdp5_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+ mdp5_crtc->plane->funcs->destroy(mdp5_crtc->plane);
+
+ drm_crtc_cleanup(crtc);
+ drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
+
+ kfree(mdp5_crtc);
+}
+
+static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+ DBG("%s: mode=%d", mdp5_crtc->name, mode);
+
+ if (enabled != mdp5_crtc->enabled) {
+ if (enabled) {
+ mdp5_enable(mdp5_kms);
+ mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
+ } else {
+ mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
+ mdp5_disable(mdp5_kms);
+ }
+ mdp5_crtc->enabled = enabled;
+ }
+}
+
+static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void blend_setup(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ int id = mdp5_crtc->id;
+
+ /*
+ * Hard-coded setup for now until I figure out how the
+ * layer-mixer works
+ */
+
+ /* LM[id]: */
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id),
+ MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0),
+ MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+ MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) |
+ MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00);
+
+ /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but
+ * we want to be setting CTL[m].LAYER[n]. Not sure what the
+ * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
+ * used when chaining up mixers for high resolution displays?
+ */
+
+ /* CTL[id]: */
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0),
+ MDP5_CTL_LAYER_REG_RGB0(STAGE0) |
+ MDP5_CTL_LAYER_REG_BORDER_COLOR);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0);
+}
+
+static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ int ret;
+
+ mode = adjusted_mode;
+
+ DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mdp5_crtc->name, mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->fb);
+
+ ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
+ mdp5_crtc->name, ret);
+ return ret;
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id),
+ MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
+ MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
+
+ update_fb(crtc, crtc->fb);
+ update_scanout(crtc, crtc->fb);
+
+ return 0;
+}
+
+static void mdp5_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ DBG("%s", mdp5_crtc->name);
+ /* make sure we hold a ref to mdp clks while setting up mode: */
+ mdp5_enable(get_kms(crtc));
+ mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp5_crtc_commit(struct drm_crtc *crtc)
+{
+ mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ crtc_flush(crtc);
+ /* drop the ref to mdp clk's that we got in prepare: */
+ mdp5_disable(get_kms(crtc));
+}
+
+static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct drm_plane *plane = mdp5_crtc->plane;
+ struct drm_display_mode *mode = &crtc->mode;
+ int ret;
+
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->fb);
+
+ ret = mdp5_plane_mode_set(plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ return ret;
+ }
+
+ update_fb(crtc, crtc->fb);
+ update_scanout(crtc, crtc->fb);
+
+ return 0;
+}
+
+static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static int mdp5_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *new_fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_gem_object *obj;
+ unsigned long flags;
+
+ if (mdp5_crtc->event) {
+ dev_err(dev->dev, "already pending flip!\n");
+ return -EBUSY;
+ }
+
+ obj = msm_framebuffer_bo(new_fb, 0);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ mdp5_crtc->event = event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ update_fb(crtc, new_fb);
+
+ return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb);
+}
+
+static int mdp5_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val)
+{
+ // XXX
+ return -EINVAL;
+}
+
+static const struct drm_crtc_funcs mdp5_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = mdp5_crtc_destroy,
+ .page_flip = mdp5_crtc_page_flip,
+ .set_property = mdp5_crtc_set_property,
+};
+
+static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
+ .dpms = mdp5_crtc_dpms,
+ .mode_fixup = mdp5_crtc_mode_fixup,
+ .mode_set = mdp5_crtc_mode_set,
+ .prepare = mdp5_crtc_prepare,
+ .commit = mdp5_crtc_commit,
+ .mode_set_base = mdp5_crtc_mode_set_base,
+ .load_lut = mdp5_crtc_load_lut,
+};
+
+static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
+ struct drm_crtc *crtc = &mdp5_crtc->base;
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ unsigned pending;
+
+ mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
+
+ pending = atomic_xchg(&mdp5_crtc->pending, 0);
+
+ if (pending & PENDING_FLIP) {
+ complete_flip(crtc, NULL);
+ drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
+ }
+}
+
+static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
+ struct drm_crtc *crtc = &mdp5_crtc->base;
+ DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
+ crtc_flush(crtc);
+}
+
+uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ return mdp5_crtc->vblank.irqmask;
+}
+
+void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ DBG("cancel: %p", file);
+ complete_flip(crtc, file);
+}
+
+/* set interface for routing crtc->encoder: */
+void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
+ enum mdp5_intf intf_id)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ static const enum mdp5_intfnum intfnum[] = {
+ INTF0, INTF1, INTF2, INTF3,
+ };
+ uint32_t intf_sel;
+
+ /* now that we know what irq's we want: */
+ mdp5_crtc->err.irqmask = intf2err(intf);
+ mdp5_crtc->vblank.irqmask = intf2vblank(intf);
+
+ /* when called from modeset_init(), skip the rest until later: */
+ if (!mdp5_kms)
+ return;
+
+ intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
+
+ switch (intf) {
+ case 0:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
+ break;
+ case 1:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
+ break;
+ case 2:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
+ break;
+ case 3:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ blend_setup(crtc);
+
+ DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
+
+ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id),
+ MDP5_CTL_OP_MODE(MODE_NONE) |
+ MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
+
+ crtc_flush(crtc);
+}
+
+static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
+ struct drm_plane *plane)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+ BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes));
+
+ if (mdp5_crtc->planes[pipe_id] == plane)
+ return;
+
+ mdp5_crtc->planes[pipe_id] = plane;
+ blend_setup(crtc);
+ if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane))
+ crtc_flush(crtc);
+}
+
+void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+ set_attach(crtc, mdp5_plane_pipe(plane), plane);
+}
+
+void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+ set_attach(crtc, mdp5_plane_pipe(plane), NULL);
+}
+
+/* initialize crtc */
+struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, int id)
+{
+ struct drm_crtc *crtc = NULL;
+ struct mdp5_crtc *mdp5_crtc;
+ int ret;
+
+ mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
+ if (!mdp5_crtc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ crtc = &mdp5_crtc->base;
+
+ mdp5_crtc->plane = plane;
+ mdp5_crtc->id = id;
+
+ mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
+ mdp5_crtc->err.irq = mdp5_crtc_err_irq;
+
+ snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
+ pipe2name(mdp5_plane_pipe(plane)), id);
+
+ ret = drm_flip_work_init(&mdp5_crtc->unref_fb_work, 16,
+ "unref fb", unref_fb_worker);
+ if (ret)
+ goto fail;
+
+ INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
+
+ drm_crtc_init(dev, crtc, &mdp5_crtc_funcs);
+ drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
+
+ mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base);
+
+ return crtc;
+
+fail:
+ if (crtc)
+ mdp5_crtc_destroy(crtc);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
new file mode 100644
index 000000000000..edec7bfaa952
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+struct mdp5_encoder {
+ struct drm_encoder base;
+ int intf;
+ enum mdp5_intf intf_id;
+ bool enabled;
+ uint32_t bsc;
+};
+#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
+
+static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/board.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
+ { \
+ .src = MSM_BUS_MASTER_MDP_PORT0, \
+ .dst = MSM_BUS_SLAVE_EBI_CH0, \
+ .ab = (ab_val), \
+ .ib = (ib_val), \
+ }
+
+static struct msm_bus_vectors mdp_bus_vectors[] = {
+ MDP_BUS_VECTOR_ENTRY(0, 0),
+ MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
+};
+static struct msm_bus_paths mdp_bus_usecases[] = { {
+ .num_paths = 1,
+ .vectors = &mdp_bus_vectors[0],
+}, {
+ .num_paths = 1,
+ .vectors = &mdp_bus_vectors[1],
+} };
+static struct msm_bus_scale_pdata mdp_bus_scale_table = {
+ .usecase = mdp_bus_usecases,
+ .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
+ .name = "mdss_mdp",
+};
+
+static void bs_init(struct mdp5_encoder *mdp5_encoder)
+{
+ mdp5_encoder->bsc = msm_bus_scale_register_client(
+ &mdp_bus_scale_table);
+ DBG("bus scale client: %08x", mdp5_encoder->bsc);
+}
+
+static void bs_fini(struct mdp5_encoder *mdp5_encoder)
+{
+ if (mdp5_encoder->bsc) {
+ msm_bus_scale_unregister_client(mdp5_encoder->bsc);
+ mdp5_encoder->bsc = 0;
+ }
+}
+
+static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx)
+{
+ if (mdp5_encoder->bsc) {
+ DBG("set bus scaling: %d", idx);
+ /* HACK: scaling down, and then immediately back up
+ * seems to leave things broken (underflow).. so
+ * never disable:
+ */
+ idx = 1;
+ msm_bus_scale_client_update_request(mdp5_encoder->bsc, idx);
+ }
+}
+#else
+static void bs_init(struct mdp5_encoder *mdp5_encoder) {}
+static void bs_fini(struct mdp5_encoder *mdp5_encoder) {}
+static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) {}
+#endif
+
+static void mdp5_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ bs_fini(mdp5_encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(mdp5_encoder);
+}
+
+static const struct drm_encoder_funcs mdp5_encoder_funcs = {
+ .destroy = mdp5_encoder_destroy,
+};
+
+static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int intf = mdp5_encoder->intf;
+ bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+ DBG("mode=%d", mode);
+
+ if (enabled == mdp5_encoder->enabled)
+ return;
+
+ if (enabled) {
+ bs_set(mdp5_encoder, 1);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
+ } else {
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
+ bs_set(mdp5_encoder, 0);
+ }
+
+ mdp5_encoder->enabled = enabled;
+}
+
+static bool mdp5_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int intf = mdp5_encoder->intf;
+ uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+ uint32_t display_v_start, display_v_end;
+ uint32_t hsync_start_x, hsync_end_x;
+ uint32_t format;
+
+ mode = adjusted_mode;
+
+ DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ ctrl_pol = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW;
+ /* probably need to get DATA_EN polarity from panel.. */
+
+ dtv_hsync_skew = 0; /* get this from panel? */
+ format = 0x213f; /* get this from panel? */
+
+ hsync_start_x = (mode->htotal - mode->hsync_start);
+ hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+ vsync_period = mode->vtotal * mode->htotal;
+ vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+ display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
+ display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
+
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
+ MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
+ MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_PERIOD_F0(intf), vsync_period);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_LEN_F0(intf), vsync_len);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_HCTL(intf),
+ MDP5_INTF_DISPLAY_HCTL_START(hsync_start_x) |
+ MDP5_INTF_DISPLAY_HCTL_END(hsync_end_x));
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VSTART_F0(intf), display_v_start);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VEND_F0(intf), display_v_end);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_BORDER_COLOR(intf), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_UNDERFLOW_COLOR(intf), 0xff);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_SKEW(intf), dtv_hsync_skew);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_POLARITY_CTL(intf), ctrl_pol);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_HCTL(intf),
+ MDP5_INTF_ACTIVE_HCTL_START(0) |
+ MDP5_INTF_ACTIVE_HCTL_END(0));
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VSTART_F0(intf), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
+}
+
+static void mdp5_encoder_prepare(struct drm_encoder *encoder)
+{
+ mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp5_encoder_commit(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ mdp5_crtc_set_intf(encoder->crtc, mdp5_encoder->intf,
+ mdp5_encoder->intf_id);
+ mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
+ .dpms = mdp5_encoder_dpms,
+ .mode_fixup = mdp5_encoder_mode_fixup,
+ .mode_set = mdp5_encoder_mode_set,
+ .prepare = mdp5_encoder_prepare,
+ .commit = mdp5_encoder_commit,
+};
+
+/* initialize encoder */
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
+ enum mdp5_intf intf_id)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp5_encoder *mdp5_encoder;
+ int ret;
+
+ mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL);
+ if (!mdp5_encoder) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mdp5_encoder->intf = intf;
+ mdp5_encoder->intf_id = intf_id;
+ encoder = &mdp5_encoder->base;
+
+ drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
+
+ bs_init(mdp5_encoder);
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp5_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
new file mode 100644
index 000000000000..353d494a497f
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp5_kms.h"
+
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+{
+ mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
+}
+
+static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+void mdp5_irq_preinstall(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
+}
+
+int mdp5_irq_postinstall(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+ struct mdp_irq *error_handler = &mdp5_kms->error_handler;
+
+ error_handler->irq = mdp5_irq_error_handler;
+ error_handler->irqmask = MDP5_IRQ_INTF0_UNDER_RUN |
+ MDP5_IRQ_INTF1_UNDER_RUN |
+ MDP5_IRQ_INTF2_UNDER_RUN |
+ MDP5_IRQ_INTF3_UNDER_RUN;
+
+ mdp_irq_register(mdp_kms, error_handler);
+
+ return 0;
+}
+
+void mdp5_irq_uninstall(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+}
+
+static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+ struct drm_device *dev = mdp5_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ unsigned int id;
+ uint32_t status;
+
+ status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
+
+ VERB("status=%08x", status);
+
+ for (id = 0; id < priv->num_crtcs; id++)
+ if (status & mdp5_crtc_vblank(priv->crtcs[id]))
+ drm_handle_vblank(dev, id);
+
+ mdp_dispatch_irqs(mdp_kms, status);
+}
+
+irqreturn_t mdp5_irq(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+ uint32_t intr;
+
+ intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS);
+
+ VERB("intr=%08x", intr);
+
+ if (intr & MDP5_HW_INTR_STATUS_INTR_MDP)
+ mdp5_irq_mdp(mdp_kms);
+
+ if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI)
+ hdmi_irq(0, mdp5_kms->hdmi);
+
+ return IRQ_HANDLED;
+}
+
+int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp5_crtc_vblank(crtc), true);
+ return 0;
+}
+
+void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp5_crtc_vblank(crtc), false);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
new file mode 100644
index 000000000000..ee8446c1b5f6
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "mdp5_kms.h"
+
+static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
+
+static int mdp5_hw_init(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct drm_device *dev = mdp5_kms->dev;
+ uint32_t version, major, minor;
+ int ret = 0;
+
+ pm_runtime_get_sync(dev->dev);
+
+ mdp5_enable(mdp5_kms);
+ version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
+ mdp5_disable(mdp5_kms);
+
+ major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
+ minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
+
+ DBG("found MDP5 version v%d.%d", major, minor);
+
+ if ((major != 1) || ((minor != 0) && (minor != 2))) {
+ dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto out;
+ }
+
+ mdp5_kms->rev = minor;
+
+ /* Magic unknown register writes:
+ *
+ * W VBIF:0x004 00000001 (mdss_mdp.c:839)
+ * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839)
+ * W MDP5:0x2e4 0x55 (mdss_mdp.c:839)
+ * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839)
+ * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839)
+ * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839)
+ * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839)
+ * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839)
+ * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839)
+ *
+ * Downstream fbdev driver gets these register offsets/values
+ * from DT.. not really sure what these registers are or if
+ * different values for different boards/SoC's, etc. I guess
+ * they are the golden registers.
+ *
+ * Not setting these does not seem to cause any problem. But
+ * we may be getting lucky with the bootloader initializing
+ * them for us. OTOH, if we can always count on the bootloader
+ * setting the golden registers, then perhaps we don't need to
+ * care.
+ */
+
+ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(0), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(1), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(2), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(3), 0);
+
+out:
+ pm_runtime_put_sync(dev->dev);
+
+ return ret;
+}
+
+static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder)
+{
+ return rate;
+}
+
+static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct msm_drm_private *priv = mdp5_kms->dev->dev_private;
+ unsigned i;
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ mdp5_crtc_cancel_pending_flip(priv->crtcs[i], file);
+}
+
+static void mdp5_destroy(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ kfree(mdp5_kms);
+}
+
+static const struct mdp_kms_funcs kms_funcs = {
+ .base = {
+ .hw_init = mdp5_hw_init,
+ .irq_preinstall = mdp5_irq_preinstall,
+ .irq_postinstall = mdp5_irq_postinstall,
+ .irq_uninstall = mdp5_irq_uninstall,
+ .irq = mdp5_irq,
+ .enable_vblank = mdp5_enable_vblank,
+ .disable_vblank = mdp5_disable_vblank,
+ .get_format = mdp_get_format,
+ .round_pixclk = mdp5_round_pixclk,
+ .preclose = mdp5_preclose,
+ .destroy = mdp5_destroy,
+ },
+ .set_irqmask = mdp5_set_irqmask,
+};
+
+int mdp5_disable(struct mdp5_kms *mdp5_kms)
+{
+ DBG("");
+
+ clk_disable_unprepare(mdp5_kms->ahb_clk);
+ clk_disable_unprepare(mdp5_kms->axi_clk);
+ clk_disable_unprepare(mdp5_kms->core_clk);
+ clk_disable_unprepare(mdp5_kms->lut_clk);
+
+ return 0;
+}
+
+int mdp5_enable(struct mdp5_kms *mdp5_kms)
+{
+ DBG("");
+
+ clk_prepare_enable(mdp5_kms->ahb_clk);
+ clk_prepare_enable(mdp5_kms->axi_clk);
+ clk_prepare_enable(mdp5_kms->core_clk);
+ clk_prepare_enable(mdp5_kms->lut_clk);
+
+ return 0;
+}
+
+static int modeset_init(struct mdp5_kms *mdp5_kms)
+{
+ static const enum mdp5_pipe crtcs[] = {
+ SSPP_RGB0, SSPP_RGB1, SSPP_RGB2,
+ };
+ struct drm_device *dev = mdp5_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ int i, ret;
+
+ /* construct CRTCs: */
+ for (i = 0; i < ARRAY_SIZE(crtcs); i++) {
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+
+ plane = mdp5_plane_init(dev, crtcs[i], true);
+ if (IS_ERR(plane)) {
+ ret = PTR_ERR(plane);
+ dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
+ pipe2name(crtcs[i]), ret);
+ goto fail;
+ }
+
+ crtc = mdp5_crtc_init(dev, plane, i);
+ if (IS_ERR(crtc)) {
+ ret = PTR_ERR(crtc);
+ dev_err(dev->dev, "failed to construct crtc for %s (%d)\n",
+ pipe2name(crtcs[i]), ret);
+ goto fail;
+ }
+ priv->crtcs[priv->num_crtcs++] = crtc;
+ }
+
+ /* Construct encoder for HDMI: */
+ encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
+ if (IS_ERR(encoder)) {
+ dev_err(dev->dev, "failed to construct encoder\n");
+ ret = PTR_ERR(encoder);
+ goto fail;
+ }
+
+ /* NOTE: the vsync and error irq's are actually associated with
+ * the INTF/encoder.. the easiest way to deal with this (ie. what
+ * we do now) is assume a fixed relationship between crtc's and
+ * encoders. I'm not sure if there is ever a need to more freely
+ * assign crtcs to encoders, but if there is then we need to take
+ * care of error and vblank irq's that the crtc has registered,
+ * and also update user-requested vblank_mask.
+ */
+ encoder->possible_crtcs = BIT(0);
+ mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
+
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ /* Construct bridge/connector for HDMI: */
+ mdp5_kms->hdmi = hdmi_init(dev, encoder);
+ if (IS_ERR(mdp5_kms->hdmi)) {
+ ret = PTR_ERR(mdp5_kms->hdmi);
+ dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static const char *iommu_ports[] = {
+ "mdp_0",
+};
+
+static int get_clk(struct platform_device *pdev, struct clk **clkp,
+ const char *name)
+{
+ struct device *dev = &pdev->dev;
+ struct clk *clk = devm_clk_get(dev, name);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+ *clkp = clk;
+ return 0;
+}
+
+struct msm_kms *mdp5_kms_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct mdp5_platform_config *config = mdp5_get_config(pdev);
+ struct mdp5_kms *mdp5_kms;
+ struct msm_kms *kms = NULL;
+ struct msm_mmu *mmu;
+ int ret;
+
+ mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
+ if (!mdp5_kms) {
+ dev_err(dev->dev, "failed to allocate kms\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mdp_kms_init(&mdp5_kms->base, &kms_funcs);
+
+ kms = &mdp5_kms->base.base;
+
+ mdp5_kms->dev = dev;
+ mdp5_kms->smp_blk_cnt = config->smp_blk_cnt;
+
+ mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
+ if (IS_ERR(mdp5_kms->mmio)) {
+ ret = PTR_ERR(mdp5_kms->mmio);
+ goto fail;
+ }
+
+ mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
+ if (IS_ERR(mdp5_kms->vbif)) {
+ ret = PTR_ERR(mdp5_kms->vbif);
+ goto fail;
+ }
+
+ mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(mdp5_kms->vdd)) {
+ ret = PTR_ERR(mdp5_kms->vdd);
+ goto fail;
+ }
+
+ ret = regulator_enable(mdp5_kms->vdd);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+ goto fail;
+ }
+
+ ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk") ||
+ get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk") ||
+ get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src") ||
+ get_clk(pdev, &mdp5_kms->core_clk, "core_clk") ||
+ get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk") ||
+ get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
+ if (ret)
+ goto fail;
+
+ ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk);
+
+ /* make sure things are off before attaching iommu (bootloader could
+ * have left things on, in which case we'll start getting faults if
+ * we don't disable):
+ */
+ mdp5_enable(mdp5_kms);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(0), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(1), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(2), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(3), 0);
+ mdp5_disable(mdp5_kms);
+ mdelay(16);
+
+ if (config->iommu) {
+ mmu = msm_iommu_new(dev, config->iommu);
+ if (IS_ERR(mmu)) {
+ ret = PTR_ERR(mmu);
+ goto fail;
+ }
+ ret = mmu->funcs->attach(mmu, iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret)
+ goto fail;
+ } else {
+ dev_info(dev->dev, "no iommu, fallback to phys "
+ "contig buffers for scanout\n");
+ mmu = NULL;
+ }
+
+ mdp5_kms->id = msm_register_mmu(dev, mmu);
+ if (mdp5_kms->id < 0) {
+ ret = mdp5_kms->id;
+ dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
+ goto fail;
+ }
+
+ ret = modeset_init(mdp5_kms);
+ if (ret) {
+ dev_err(dev->dev, "modeset_init failed: %d\n", ret);
+ goto fail;
+ }
+
+ return kms;
+
+fail:
+ if (kms)
+ mdp5_destroy(kms);
+ return ERR_PTR(ret);
+}
+
+static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
+{
+ static struct mdp5_platform_config config = {};
+#ifdef CONFIG_OF
+ /* TODO */
+#endif
+ return &config;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
new file mode 100644
index 000000000000..c8b1a2522c25
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP5_KMS_H__
+#define __MDP5_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "mdp/mdp_kms.h"
+#include "mdp5.xml.h"
+#include "mdp5_smp.h"
+
+struct mdp5_kms {
+ struct mdp_kms base;
+
+ struct drm_device *dev;
+
+ int rev;
+
+ /* mapper-id used to request GEM buffer mapped for scanout: */
+ int id;
+
+ /* for tracking smp allocation amongst pipes: */
+ mdp5_smp_state_t smp_state;
+ struct mdp5_client_smp_state smp_client_state[CID_MAX];
+ int smp_blk_cnt;
+
+ /* io/register spaces: */
+ void __iomem *mmio, *vbif;
+
+ struct regulator *vdd;
+
+ struct clk *axi_clk;
+ struct clk *ahb_clk;
+ struct clk *src_clk;
+ struct clk *core_clk;
+ struct clk *lut_clk;
+ struct clk *vsync_clk;
+
+ struct hdmi *hdmi;
+
+ struct mdp_irq error_handler;
+};
+#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
+
+/* platform config data (ie. from DT, or pdata) */
+struct mdp5_platform_config {
+ struct iommu_domain *iommu;
+ uint32_t max_clk;
+ int smp_blk_cnt;
+};
+
+static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
+{
+ msm_writel(data, mdp5_kms->mmio + reg);
+}
+
+static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg)
+{
+ return msm_readl(mdp5_kms->mmio + reg);
+}
+
+static inline const char *pipe2name(enum mdp5_pipe pipe)
+{
+ static const char *names[] = {
+#define NAME(n) [SSPP_ ## n] = #n
+ NAME(VIG0), NAME(VIG1), NAME(VIG2),
+ NAME(RGB0), NAME(RGB1), NAME(RGB2),
+ NAME(DMA0), NAME(DMA1),
+#undef NAME
+ };
+ return names[pipe];
+}
+
+static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
+{
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
+ case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
+ case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
+ case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
+ case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
+ case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
+ case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
+ case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
+ default: return 0;
+ }
+}
+
+static inline int pipe2nclients(enum mdp5_pipe pipe)
+{
+ switch (pipe) {
+ case SSPP_RGB0:
+ case SSPP_RGB1:
+ case SSPP_RGB2:
+ return 1;
+ default:
+ return 3;
+ }
+}
+
+static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
+{
+ WARN_ON(plane >= pipe2nclients(pipe));
+ switch (pipe) {
+ case SSPP_VIG0: return CID_VIG0_Y + plane;
+ case SSPP_VIG1: return CID_VIG1_Y + plane;
+ case SSPP_VIG2: return CID_VIG2_Y + plane;
+ case SSPP_RGB0: return CID_RGB0;
+ case SSPP_RGB1: return CID_RGB1;
+ case SSPP_RGB2: return CID_RGB2;
+ case SSPP_DMA0: return CID_DMA0_Y + plane;
+ case SSPP_DMA1: return CID_DMA1_Y + plane;
+ default: return CID_UNUSED;
+ }
+}
+
+static inline uint32_t mixer2flush(int lm)
+{
+ switch (lm) {
+ case 0: return MDP5_CTL_FLUSH_LM0;
+ case 1: return MDP5_CTL_FLUSH_LM1;
+ case 2: return MDP5_CTL_FLUSH_LM2;
+ default: return 0;
+ }
+}
+
+static inline uint32_t intf2err(int intf)
+{
+ switch (intf) {
+ case 0: return MDP5_IRQ_INTF0_UNDER_RUN;
+ case 1: return MDP5_IRQ_INTF1_UNDER_RUN;
+ case 2: return MDP5_IRQ_INTF2_UNDER_RUN;
+ case 3: return MDP5_IRQ_INTF3_UNDER_RUN;
+ default: return 0;
+ }
+}
+
+static inline uint32_t intf2vblank(int intf)
+{
+ switch (intf) {
+ case 0: return MDP5_IRQ_INTF0_VSYNC;
+ case 1: return MDP5_IRQ_INTF1_VSYNC;
+ case 2: return MDP5_IRQ_INTF2_VSYNC;
+ case 3: return MDP5_IRQ_INTF3_VSYNC;
+ default: return 0;
+ }
+}
+
+int mdp5_disable(struct mdp5_kms *mdp5_kms);
+int mdp5_enable(struct mdp5_kms *mdp5_kms);
+
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp5_irq_preinstall(struct msm_kms *kms);
+int mdp5_irq_postinstall(struct msm_kms *kms);
+void mdp5_irq_uninstall(struct msm_kms *kms);
+irqreturn_t mdp5_irq(struct msm_kms *kms);
+int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+static inline
+uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
+ uint32_t max_formats)
+{
+ /* TODO when we have YUV, we need to filter supported formats
+ * based on pipe id..
+ */
+ return mdp_get_formats(pixel_formats, max_formats);
+}
+
+void mdp5_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj);
+void mdp5_plane_set_scanout(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+int mdp5_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+void mdp5_plane_complete_flip(struct drm_plane *plane);
+enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+ enum mdp5_pipe pipe, bool private_plane);
+
+uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
+
+void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
+void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
+ enum mdp5_intf intf_id);
+void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
+void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
+struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, int id);
+
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
+ enum mdp5_intf intf_id);
+
+#endif /* __MDP5_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
new file mode 100644
index 000000000000..0ac8bb5e7e85
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+
+struct mdp5_plane {
+ struct drm_plane base;
+ const char *name;
+
+ enum mdp5_pipe pipe;
+
+ uint32_t nformats;
+ uint32_t formats[32];
+
+ bool enabled;
+};
+#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
+
+static struct mdp5_kms *get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv = plane->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static int mdp5_plane_update(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+
+ mdp5_plane->enabled = true;
+
+ if (plane->fb)
+ drm_framebuffer_unreference(plane->fb);
+
+ drm_framebuffer_reference(fb);
+
+ return mdp5_plane_mode_set(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+}
+
+static int mdp5_plane_disable(struct drm_plane *plane)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+ int i;
+
+ DBG("%s: disable", mdp5_plane->name);
+
+ /* update our SMP request to zero (release all our blks): */
+ for (i = 0; i < pipe2nclients(pipe); i++)
+ mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0);
+
+ /* TODO detaching now will cause us not to get the last
+ * vblank and mdp5_smp_commit().. so other planes will
+ * still see smp blocks previously allocated to us as
+ * in-use..
+ */
+ if (plane->crtc)
+ mdp5_crtc_detach(plane->crtc, plane);
+
+ return 0;
+}
+
+static void mdp5_plane_destroy(struct drm_plane *plane)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+
+ mdp5_plane_disable(plane);
+ drm_plane_cleanup(plane);
+
+ kfree(mdp5_plane);
+}
+
+/* helper to install properties which are common to planes and crtcs */
+void mdp5_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj)
+{
+ // XXX
+}
+
+int mdp5_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val)
+{
+ // XXX
+ return -EINVAL;
+}
+
+static const struct drm_plane_funcs mdp5_plane_funcs = {
+ .update_plane = mdp5_plane_update,
+ .disable_plane = mdp5_plane_disable,
+ .destroy = mdp5_plane_destroy,
+ .set_property = mdp5_plane_set_property,
+};
+
+void mdp5_plane_set_scanout(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+ uint32_t nplanes = drm_format_num_planes(fb->pixel_format);
+ uint32_t iova[4];
+ int i;
+
+ for (i = 0; i < nplanes; i++) {
+ struct drm_gem_object *bo = msm_framebuffer_bo(fb, i);
+ msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]);
+ }
+ for (; i < 4; i++)
+ iova[i] = 0;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
+ MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
+ MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
+ MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
+ MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]);
+
+ plane->fb = fb;
+}
+
+/* NOTE: looks like if horizontal decimation is used (if we supported that)
+ * then the width used to calculate SMP block requirements is the post-
+ * decimated width. Ie. SMP buffering sits downstream of decimation (which
+ * presumably happens during the dma from scanout buffer).
+ */
+static int request_smp_blocks(struct drm_plane *plane, uint32_t format,
+ uint32_t nplanes, uint32_t width)
+{
+ struct drm_device *dev = plane->dev;
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+ int i, hsub, nlines, nblks, ret;
+
+ hsub = drm_format_horz_chroma_subsampling(format);
+
+ /* different if BWC (compressed framebuffer?) enabled: */
+ nlines = 2;
+
+ for (i = 0, nblks = 0; i < nplanes; i++) {
+ int n, fetch_stride, cpp;
+
+ cpp = drm_format_plane_cpp(format, i);
+ fetch_stride = width * cpp / (i ? hsub : 1);
+
+ n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE);
+
+ /* for hw rev v1.00 */
+ if (mdp5_kms->rev == 0)
+ n = roundup_pow_of_two(n);
+
+ DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n);
+ ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n);
+ if (ret) {
+ dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n",
+ n, ret);
+ return ret;
+ }
+
+ nblks += n;
+ }
+
+ /* in success case, return total # of blocks allocated: */
+ return nblks;
+}
+
+static void set_fifo_thresholds(struct drm_plane *plane, int nblks)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+ uint32_t val;
+
+ /* 1/4 of SMP pool that is being fetched */
+ val = (nblks * SMP_ENTRIES_PER_BLK) / 4;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
+
+}
+
+int mdp5_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+ const struct mdp_format *format;
+ uint32_t nplanes, config = 0;
+ uint32_t phasex_step = 0, phasey_step = 0;
+ uint32_t hdecm = 0, vdecm = 0;
+ int i, nblks;
+
+ nplanes = drm_format_num_planes(fb->pixel_format);
+
+ /* bad formats should already be rejected: */
+ if (WARN_ON(nplanes > pipe2nclients(pipe)))
+ return -EINVAL;
+
+ /* src values are in Q16 fixed point, convert to integer: */
+ src_x = src_x >> 16;
+ src_y = src_y >> 16;
+ src_w = src_w >> 16;
+ src_h = src_h >> 16;
+
+ DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp5_plane->name,
+ fb->base.id, src_x, src_y, src_w, src_h,
+ crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
+
+ /*
+ * Calculate and request required # of smp blocks:
+ */
+ nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w);
+ if (nblks < 0)
+ return nblks;
+
+ /*
+ * Currently we update the hw for allocations/requests immediately,
+ * but once atomic modeset/pageflip is in place, the allocation
+ * would move into atomic->check_plane_state(), while updating the
+ * hw would remain here:
+ */
+ for (i = 0; i < pipe2nclients(pipe); i++)
+ mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i));
+
+ if (src_w != crtc_w) {
+ config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
+ /* TODO calc phasex_step, hdecm */
+ }
+
+ if (src_h != crtc_h) {
+ config |= MDP5_PIPE_SCALE_CONFIG_SCALEY_EN;
+ /* TODO calc phasey_step, vdecm */
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
+ MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
+ MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
+ MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
+ MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
+ MDP5_PIPE_SRC_XY_X(src_x) |
+ MDP5_PIPE_SRC_XY_Y(src_y));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
+ MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
+ MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
+ MDP5_PIPE_OUT_XY_X(crtc_x) |
+ MDP5_PIPE_OUT_XY_Y(crtc_y));
+
+ mdp5_plane_set_scanout(plane, fb);
+
+ format = to_mdp_format(msm_framebuffer_format(fb));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
+ MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
+ MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
+ MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
+ MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
+ COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
+ MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
+ MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
+ COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
+ MDP5_PIPE_SRC_FORMAT_NUM_PLANES(nplanes - 1) |
+ MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(CHROMA_RGB));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
+ MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
+ MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
+
+ /* not using secure mode: */
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), phasex_step);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), phasey_step);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
+ MDP5_PIPE_DECIMATION_VERT(vdecm) |
+ MDP5_PIPE_DECIMATION_HORZ(hdecm));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(SCALE_FILTER_NEAREST) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(SCALE_FILTER_NEAREST) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(SCALE_FILTER_NEAREST) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(SCALE_FILTER_NEAREST) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
+
+ set_fifo_thresholds(plane, nblks);
+
+ /* TODO detach from old crtc (if we had more than one) */
+ mdp5_crtc_attach(crtc, plane);
+
+ return 0;
+}
+
+void mdp5_plane_complete_flip(struct drm_plane *plane)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe;
+ int i;
+
+ for (i = 0; i < pipe2nclients(pipe); i++)
+ mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i));
+}
+
+enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ return mdp5_plane->pipe;
+}
+
+/* initialize plane */
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+ enum mdp5_pipe pipe, bool private_plane)
+{
+ struct drm_plane *plane = NULL;
+ struct mdp5_plane *mdp5_plane;
+ int ret;
+
+ mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
+ if (!mdp5_plane) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ plane = &mdp5_plane->base;
+
+ mdp5_plane->pipe = pipe;
+ mdp5_plane->name = pipe2name(pipe);
+
+ mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
+ ARRAY_SIZE(mdp5_plane->formats));
+
+ drm_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
+ mdp5_plane->formats, mdp5_plane->nformats,
+ private_plane);
+
+ mdp5_plane_install_properties(plane, &plane->base);
+
+ return plane;
+
+fail:
+ if (plane)
+ mdp5_plane_destroy(plane);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
new file mode 100644
index 000000000000..2d0236b963a6
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "mdp5_kms.h"
+#include "mdp5_smp.h"
+
+
+/* SMP - Shared Memory Pool
+ *
+ * These are shared between all the clients, where each plane in a
+ * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
+ * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
+ *
+ * Based on the size of the attached scanout buffer, a certain # of
+ * blocks must be allocated to that client out of the shared pool.
+ *
+ * For each block, it can be either free, or pending/in-use by a
+ * client. The updates happen in three steps:
+ *
+ * 1) mdp5_smp_request():
+ * When plane scanout is setup, calculate required number of
+ * blocks needed per client, and request. Blocks not inuse or
+ * pending by any other client are added to client's pending
+ * set.
+ *
+ * 2) mdp5_smp_configure():
+ * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
+ * are configured for the union(pending, inuse)
+ *
+ * 3) mdp5_smp_commit():
+ * After next vblank, copy pending -> inuse. Optionally update
+ * MDP5_SMP_ALLOC registers if there are newly unused blocks
+ *
+ * On the next vblank after changes have been committed to hw, the
+ * client's pending blocks become it's in-use blocks (and no-longer
+ * in-use blocks become available to other clients).
+ *
+ * btw, hurray for confusing overloaded acronyms! :-/
+ *
+ * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1
+ * should happen at (or before)? atomic->check(). And we'd need
+ * an API to discard previous requests if update is aborted or
+ * (test-only).
+ *
+ * TODO would perhaps be nice to have debugfs to dump out kernel
+ * inuse and pending state of all clients..
+ */
+
+static DEFINE_SPINLOCK(smp_lock);
+
+
+/* step #1: update # of blocks pending for the client: */
+int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
+ enum mdp5_client_id cid, int nblks)
+{
+ struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
+ int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt;
+ unsigned long flags;
+
+ spin_lock_irqsave(&smp_lock, flags);
+
+ avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt);
+ if (nblks > avail) {
+ ret = -ENOSPC;
+ goto fail;
+ }
+
+ cur_nblks = bitmap_weight(ps->pending, cnt);
+ if (nblks > cur_nblks) {
+ /* grow the existing pending reservation: */
+ for (i = cur_nblks; i < nblks; i++) {
+ int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt);
+ set_bit(blk, ps->pending);
+ set_bit(blk, mdp5_kms->smp_state);
+ }
+ } else {
+ /* shrink the existing pending reservation: */
+ for (i = cur_nblks; i > nblks; i--) {
+ int blk = find_first_bit(ps->pending, cnt);
+ clear_bit(blk, ps->pending);
+ /* don't clear in global smp_state until _commit() */
+ }
+ }
+
+fail:
+ spin_unlock_irqrestore(&smp_lock, flags);
+ return 0;
+}
+
+static void update_smp_state(struct mdp5_kms *mdp5_kms,
+ enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
+{
+ int cnt = mdp5_kms->smp_blk_cnt;
+ uint32_t blk, val;
+
+ for_each_set_bit(blk, *assigned, cnt) {
+ int idx = blk / 3;
+ int fld = blk % 3;
+
+ val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
+
+ switch (fld) {
+ case 0:
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
+ break;
+ case 1:
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
+ break;
+ case 2:
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
+ break;
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
+ }
+}
+
+/* step #2: configure hw for union(pending, inuse): */
+void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
+{
+ struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
+ int cnt = mdp5_kms->smp_blk_cnt;
+ mdp5_smp_state_t assigned;
+
+ bitmap_or(assigned, ps->inuse, ps->pending, cnt);
+ update_smp_state(mdp5_kms, cid, &assigned);
+}
+
+/* step #3: after vblank, copy pending -> inuse: */
+void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
+{
+ struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
+ int cnt = mdp5_kms->smp_blk_cnt;
+ mdp5_smp_state_t released;
+
+ /*
+ * Figure out if there are any blocks we where previously
+ * using, which can be released and made available to other
+ * clients:
+ */
+ if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&smp_lock, flags);
+ /* clear released blocks: */
+ bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state,
+ released, cnt);
+ spin_unlock_irqrestore(&smp_lock, flags);
+
+ update_smp_state(mdp5_kms, CID_UNUSED, &released);
+ }
+
+ bitmap_copy(ps->inuse, ps->pending, cnt);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
new file mode 100644
index 000000000000..0ab739e1a1dd
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP5_SMP_H__
+#define __MDP5_SMP_H__
+
+#include "msm_drv.h"
+
+#define MAX_SMP_BLOCKS 22
+#define SMP_BLK_SIZE 4096
+#define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16)
+
+typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
+
+struct mdp5_client_smp_state {
+ mdp5_smp_state_t inuse;
+ mdp5_smp_state_t pending;
+};
+
+struct mdp5_kms;
+
+int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks);
+void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
+void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
+
+
+#endif /* __MDP5_SMP_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
new file mode 100644
index 000000000000..a9629b85b983
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -0,0 +1,78 @@
+#ifndef MDP_COMMON_XML
+#define MDP_COMMON_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mdp_mixer_stage_id {
+ STAGE_UNUSED = 0,
+ STAGE_BASE = 1,
+ STAGE0 = 2,
+ STAGE1 = 3,
+ STAGE2 = 4,
+ STAGE3 = 5,
+};
+
+enum mdp_alpha_type {
+ FG_CONST = 0,
+ BG_CONST = 1,
+ FG_PIXEL = 2,
+ BG_PIXEL = 3,
+};
+
+enum mdp_bpc {
+ BPC1 = 0,
+ BPC5 = 1,
+ BPC6 = 2,
+ BPC8 = 3,
+};
+
+enum mdp_bpc_alpha {
+ BPC1A = 0,
+ BPC4A = 1,
+ BPC6A = 2,
+ BPC8A = 3,
+};
+
+
+#endif /* MDP_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c
index 17330b0927b2..e0a6ffbe6ab4 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_format.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_format.c
@@ -17,7 +17,7 @@
#include "msm_drv.h"
-#include "mdp4_kms.h"
+#include "mdp_kms.h"
#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \
.base = { .pixel_format = DRM_FORMAT_ ## name }, \
@@ -34,7 +34,7 @@
#define BPC0A 0
-static const struct mdp4_format formats[] = {
+static const struct mdp_format formats[] = {
/* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */
FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4),
FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4),
@@ -44,12 +44,11 @@ static const struct mdp4_format formats[] = {
FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3),
};
-uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
- uint32_t max_formats)
+uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats)
{
uint32_t i;
for (i = 0; i < ARRAY_SIZE(formats); i++) {
- const struct mdp4_format *f = &formats[i];
+ const struct mdp_format *f = &formats[i];
if (i == max_formats)
break;
@@ -60,11 +59,11 @@ uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
return i;
}
-const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format)
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format)
{
int i;
for (i = 0; i < ARRAY_SIZE(formats); i++) {
- const struct mdp4_format *f = &formats[i];
+ const struct mdp_format *f = &formats[i];
if (f->base.pixel_format == format)
return &f->base;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
new file mode 100644
index 000000000000..3be48f7c36be
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp_kms.h"
+
+
+struct mdp_irq_wait {
+ struct mdp_irq irq;
+ int count;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_event);
+
+static DEFINE_SPINLOCK(list_lock);
+
+static void update_irq(struct mdp_kms *mdp_kms)
+{
+ struct mdp_irq *irq;
+ uint32_t irqmask = mdp_kms->vblank_mask;
+
+ BUG_ON(!spin_is_locked(&list_lock));
+
+ list_for_each_entry(irq, &mdp_kms->irq_list, node)
+ irqmask |= irq->irqmask;
+
+ mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
+}
+
+static void update_irq_unlocked(struct mdp_kms *mdp_kms)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&list_lock, flags);
+ update_irq(mdp_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+}
+
+void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status)
+{
+ struct mdp_irq *handler, *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ mdp_kms->in_irq = true;
+ list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) {
+ if (handler->irqmask & status) {
+ spin_unlock_irqrestore(&list_lock, flags);
+ handler->irq(handler, handler->irqmask & status);
+ spin_lock_irqsave(&list_lock, flags);
+ }
+ }
+ mdp_kms->in_irq = false;
+ update_irq(mdp_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+}
+
+void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ if (enable)
+ mdp_kms->vblank_mask |= mask;
+ else
+ mdp_kms->vblank_mask &= ~mask;
+ update_irq(mdp_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+}
+
+static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp_irq_wait *wait =
+ container_of(irq, struct mdp_irq_wait, irq);
+ wait->count--;
+ wake_up_all(&wait_event);
+}
+
+void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask)
+{
+ struct mdp_irq_wait wait = {
+ .irq = {
+ .irq = wait_irq,
+ .irqmask = irqmask,
+ },
+ .count = 1,
+ };
+ mdp_irq_register(mdp_kms, &wait.irq);
+ wait_event(wait_event, (wait.count <= 0));
+ mdp_irq_unregister(mdp_kms, &wait.irq);
+}
+
+void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
+{
+ unsigned long flags;
+ bool needs_update = false;
+
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (!irq->registered) {
+ irq->registered = true;
+ list_add(&irq->node, &mdp_kms->irq_list);
+ needs_update = !mdp_kms->in_irq;
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ if (needs_update)
+ update_irq_unlocked(mdp_kms);
+}
+
+void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
+{
+ unsigned long flags;
+ bool needs_update = false;
+
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (irq->registered) {
+ irq->registered = false;
+ list_del(&irq->node);
+ needs_update = !mdp_kms->in_irq;
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ if (needs_update)
+ update_irq_unlocked(mdp_kms);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
new file mode 100644
index 000000000000..99557b5ad4fd
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP_KMS_H__
+#define __MDP_KMS_H__
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "mdp_common.xml.h"
+
+struct mdp_kms;
+
+struct mdp_kms_funcs {
+ struct msm_kms_funcs base;
+ void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask);
+};
+
+struct mdp_kms {
+ struct msm_kms base;
+
+ const struct mdp_kms_funcs *funcs;
+
+ /* irq handling: */
+ bool in_irq;
+ struct list_head irq_list; /* list of mdp4_irq */
+ uint32_t vblank_mask; /* irq bits set for userspace vblank */
+};
+#define to_mdp_kms(x) container_of(x, struct mdp_kms, base)
+
+static inline void mdp_kms_init(struct mdp_kms *mdp_kms,
+ const struct mdp_kms_funcs *funcs)
+{
+ mdp_kms->funcs = funcs;
+ INIT_LIST_HEAD(&mdp_kms->irq_list);
+ msm_kms_init(&mdp_kms->base, &funcs->base);
+}
+
+/*
+ * irq helpers:
+ */
+
+/* For transiently registering for different MDP irqs that various parts
+ * of the KMS code need during setup/configuration. These are not
+ * necessarily the same as what drm_vblank_get/put() are requesting, and
+ * the hysteresis in drm_vblank_put() is not necessarily desirable for
+ * internal housekeeping related irq usage.
+ */
+struct mdp_irq {
+ struct list_head node;
+ uint32_t irqmask;
+ bool registered;
+ void (*irq)(struct mdp_irq *irq, uint32_t irqstatus);
+};
+
+void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status);
+void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable);
+void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
+void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
+
+
+/*
+ * pixel format helpers:
+ */
+
+struct mdp_format {
+ struct msm_format base;
+ enum mdp_bpc bpc_r, bpc_g, bpc_b;
+ enum mdp_bpc_alpha bpc_a;
+ uint8_t unpack[4];
+ bool alpha_enable, unpack_tight;
+ uint8_t cpp, unpack_count;
+};
+#define to_mdp_format(x) container_of(x, struct mdp_format, base)
+
+uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats);
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
+
+#endif /* __MDP_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
deleted file mode 100644
index 5c6b7fca4edd..000000000000
--- a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-
-#include "msm_drv.h"
-#include "mdp4_kms.h"
-
-
-struct mdp4_irq_wait {
- struct mdp4_irq irq;
- int count;
-};
-
-static DECLARE_WAIT_QUEUE_HEAD(wait_event);
-
-static DEFINE_SPINLOCK(list_lock);
-
-static void update_irq(struct mdp4_kms *mdp4_kms)
-{
- struct mdp4_irq *irq;
- uint32_t irqmask = mdp4_kms->vblank_mask;
-
- BUG_ON(!spin_is_locked(&list_lock));
-
- list_for_each_entry(irq, &mdp4_kms->irq_list, node)
- irqmask |= irq->irqmask;
-
- mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask);
-}
-
-static void update_irq_unlocked(struct mdp4_kms *mdp4_kms)
-{
- unsigned long flags;
- spin_lock_irqsave(&list_lock, flags);
- update_irq(mdp4_kms);
- spin_unlock_irqrestore(&list_lock, flags);
-}
-
-static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus)
-{
- DRM_ERROR("errors: %08x\n", irqstatus);
-}
-
-void mdp4_irq_preinstall(struct msm_kms *kms)
-{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
- mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
-}
-
-int mdp4_irq_postinstall(struct msm_kms *kms)
-{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
- struct mdp4_irq *error_handler = &mdp4_kms->error_handler;
-
- INIT_LIST_HEAD(&mdp4_kms->irq_list);
-
- error_handler->irq = mdp4_irq_error_handler;
- error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
- MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
-
- mdp4_irq_register(mdp4_kms, error_handler);
-
- return 0;
-}
-
-void mdp4_irq_uninstall(struct msm_kms *kms)
-{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
- mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
-}
-
-irqreturn_t mdp4_irq(struct msm_kms *kms)
-{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
- struct drm_device *dev = mdp4_kms->dev;
- struct msm_drm_private *priv = dev->dev_private;
- struct mdp4_irq *handler, *n;
- unsigned long flags;
- unsigned int id;
- uint32_t status;
-
- status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
- mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
-
- VERB("status=%08x", status);
-
- for (id = 0; id < priv->num_crtcs; id++)
- if (status & mdp4_crtc_vblank(priv->crtcs[id]))
- drm_handle_vblank(dev, id);
-
- spin_lock_irqsave(&list_lock, flags);
- mdp4_kms->in_irq = true;
- list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) {
- if (handler->irqmask & status) {
- spin_unlock_irqrestore(&list_lock, flags);
- handler->irq(handler, handler->irqmask & status);
- spin_lock_irqsave(&list_lock, flags);
- }
- }
- mdp4_kms->in_irq = false;
- update_irq(mdp4_kms);
- spin_unlock_irqrestore(&list_lock, flags);
-
- return IRQ_HANDLED;
-}
-
-int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
-{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
- unsigned long flags;
-
- spin_lock_irqsave(&list_lock, flags);
- mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc);
- update_irq(mdp4_kms);
- spin_unlock_irqrestore(&list_lock, flags);
-
- return 0;
-}
-
-void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
-{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
- unsigned long flags;
-
- spin_lock_irqsave(&list_lock, flags);
- mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc);
- update_irq(mdp4_kms);
- spin_unlock_irqrestore(&list_lock, flags);
-}
-
-static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus)
-{
- struct mdp4_irq_wait *wait =
- container_of(irq, struct mdp4_irq_wait, irq);
- wait->count--;
- wake_up_all(&wait_event);
-}
-
-void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask)
-{
- struct mdp4_irq_wait wait = {
- .irq = {
- .irq = wait_irq,
- .irqmask = irqmask,
- },
- .count = 1,
- };
- mdp4_irq_register(mdp4_kms, &wait.irq);
- wait_event(wait_event, (wait.count <= 0));
- mdp4_irq_unregister(mdp4_kms, &wait.irq);
-}
-
-void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
-{
- unsigned long flags;
- bool needs_update = false;
-
- spin_lock_irqsave(&list_lock, flags);
-
- if (!irq->registered) {
- irq->registered = true;
- list_add(&irq->node, &mdp4_kms->irq_list);
- needs_update = !mdp4_kms->in_irq;
- }
-
- spin_unlock_irqrestore(&list_lock, flags);
-
- if (needs_update)
- update_irq_unlocked(mdp4_kms);
-}
-
-void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
-{
- unsigned long flags;
- bool needs_update = false;
-
- spin_lock_irqsave(&list_lock, flags);
-
- if (irq->registered) {
- irq->registered = false;
- list_del(&irq->node);
- needs_update = !mdp4_kms->in_irq;
- }
-
- spin_unlock_irqrestore(&list_lock, flags);
-
- if (needs_update)
- update_irq_unlocked(mdp4_kms);
-}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 86537692e45c..e6adafc7eff3 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -17,6 +17,7 @@
#include "msm_drv.h"
#include "msm_gpu.h"
+#include "msm_kms.h"
static void msm_fb_output_poll_changed(struct drm_device *dev)
{
@@ -30,50 +31,19 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
.output_poll_changed = msm_fb_output_poll_changed,
};
-static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
- unsigned long iova, int flags, void *arg)
-{
- DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
- return 0;
-}
-
-int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu)
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
{
struct msm_drm_private *priv = dev->dev_private;
- int idx = priv->num_iommus++;
+ int idx = priv->num_mmus++;
- if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus)))
+ if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
return -EINVAL;
- priv->iommus[idx] = iommu;
-
- iommu_set_fault_handler(iommu, msm_fault_handler, dev);
-
- /* need to iommu_attach_device() somewhere?? on resume?? */
+ priv->mmus[idx] = mmu;
return idx;
}
-int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
- const char **names, int cnt)
-{
- int i, ret;
-
- for (i = 0; i < cnt; i++) {
- /* TODO maybe some day msm iommu won't require this hack: */
- struct device *msm_iommu_get_ctx(const char *ctx_name);
- struct device *ctx = msm_iommu_get_ctx(names[i]);
- if (!ctx)
- continue;
- ret = iommu_attach_device(iommu, ctx);
- if (ret) {
- dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
- return ret;
- }
- }
- return 0;
-}
-
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
static bool reglog = false;
MODULE_PARM_DESC(reglog, "Enable register read/write logging");
@@ -82,6 +52,10 @@ module_param(reglog, bool, 0600);
#define reglog 0
#endif
+static char *vram;
+MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
+module_param(vram, charp, 0);
+
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname)
{
@@ -161,6 +135,14 @@ static int msm_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
}
+ if (priv->vram.paddr) {
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+ drm_mm_takedown(&priv->vram.mm);
+ dma_free_attrs(dev->dev, priv->vram.size, NULL,
+ priv->vram.paddr, &attrs);
+ }
+
dev->dev_private = NULL;
kfree(priv);
@@ -168,6 +150,24 @@ static int msm_unload(struct drm_device *dev)
return 0;
}
+static int get_mdp_ver(struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+ const static struct of_device_id match_types[] = { {
+ .compatible = "qcom,mdss_mdp",
+ .data = (void *)5,
+ }, {
+ /* end node */
+ } };
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *match;
+ match = of_match_node(match_types, dev->of_node);
+ if (match)
+ return (int)match->data;
+#endif
+ return 4;
+}
+
static int msm_load(struct drm_device *dev, unsigned long flags)
{
struct platform_device *pdev = dev->platformdev;
@@ -191,7 +191,53 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_init(dev);
- kms = mdp4_kms_init(dev);
+ /* if we have no IOMMU, then we need to use carveout allocator.
+ * Grab the entire CMA chunk carved out in early startup in
+ * mach-msm:
+ */
+ if (!iommu_present(&platform_bus_type)) {
+ DEFINE_DMA_ATTRS(attrs);
+ unsigned long size;
+ void *p;
+
+ DBG("using %s VRAM carveout", vram);
+ size = memparse(vram, NULL);
+ priv->vram.size = size;
+
+ drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
+
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+
+ /* note that for no-kernel-mapping, the vaddr returned
+ * is bogus, but non-null if allocation succeeded:
+ */
+ p = dma_alloc_attrs(dev->dev, size,
+ &priv->vram.paddr, 0, &attrs);
+ if (!p) {
+ dev_err(dev->dev, "failed to allocate VRAM\n");
+ priv->vram.paddr = 0;
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ dev_info(dev->dev, "VRAM: %08x->%08x\n",
+ (uint32_t)priv->vram.paddr,
+ (uint32_t)(priv->vram.paddr + size));
+ }
+
+ switch (get_mdp_ver(pdev)) {
+ case 4:
+ kms = mdp4_kms_init(dev);
+ break;
+ case 5:
+ kms = mdp5_kms_init(dev);
+ break;
+ default:
+ kms = ERR_PTR(-ENODEV);
+ break;
+ }
+
if (IS_ERR(kms)) {
/*
* NOTE: once we have GPU support, having no kms should not
@@ -326,7 +372,7 @@ static void msm_lastclose(struct drm_device *dev)
}
}
-static irqreturn_t msm_irq(DRM_IRQ_ARGS)
+static irqreturn_t msm_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
struct msm_drm_private *priv = dev->dev_private;
@@ -415,7 +461,7 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
{
- return drm_mm_dump_table(m, dev->mm_private);
+ return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
}
static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
@@ -778,12 +824,13 @@ static const struct dev_pm_ops msm_pm_ops = {
static int msm_pdev_probe(struct platform_device *pdev)
{
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
return drm_platform_init(&msm_driver, pdev);
}
static int msm_pdev_remove(struct platform_device *pdev)
{
- drm_platform_exit(&msm_driver, pdev);
+ drm_put_dev(platform_get_drvdata(pdev));
return 0;
}
@@ -793,12 +840,19 @@ static const struct platform_device_id msm_id[] = {
{ }
};
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,mdss_mdp" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
static struct platform_driver msm_platform_driver = {
.probe = msm_pdev_probe,
.remove = msm_pdev_remove,
.driver = {
.owner = THIS_MODULE,
.name = "msm",
+ .of_match_table = dt_match,
.pm = &msm_pm_ops,
},
.id_table = msm_id,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d39f0862b19e..3d63269c5b29 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -31,6 +31,15 @@
#include <linux/types.h>
#include <asm/sizes.h>
+
+#if defined(CONFIG_COMPILE_TEST) && !defined(CONFIG_ARCH_MSM)
+/* stubs we need for compile-test: */
+static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+ return NULL;
+}
+#endif
+
#ifndef CONFIG_OF
#include <mach/board.h>
#include <mach/socinfo.h>
@@ -44,6 +53,7 @@
struct msm_kms;
struct msm_gpu;
+struct msm_mmu;
#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
@@ -76,9 +86,9 @@ struct msm_drm_private {
/* callbacks deferred until bo is inactive: */
struct list_head fence_cbs;
- /* registered IOMMU domains: */
- unsigned int num_iommus;
- struct iommu_domain *iommus[NUM_DOMAINS];
+ /* registered MMUs: */
+ unsigned int num_mmus;
+ struct msm_mmu *mmus[NUM_DOMAINS];
unsigned int num_planes;
struct drm_plane *planes[8];
@@ -94,6 +104,16 @@ struct msm_drm_private {
unsigned int num_connectors;
struct drm_connector *connectors[8];
+
+ /* VRAM carveout, used when no IOMMU: */
+ struct {
+ unsigned long size;
+ dma_addr_t paddr;
+ /* NOTE: mm managed at the page level, size is in # of pages
+ * and position mm_node->start is in # of pages:
+ */
+ struct drm_mm mm;
+ } vram;
};
struct msm_format {
@@ -114,39 +134,7 @@ void __msm_fence_worker(struct work_struct *work);
(_cb)->func = _func; \
} while (0)
-/* As there are different display controller blocks depending on the
- * snapdragon version, the kms support is split out and the appropriate
- * implementation is loaded at runtime. The kms module is responsible
- * for constructing the appropriate planes/crtcs/encoders/connectors.
- */
-struct msm_kms_funcs {
- /* hw initialization: */
- int (*hw_init)(struct msm_kms *kms);
- /* irq handling: */
- void (*irq_preinstall)(struct msm_kms *kms);
- int (*irq_postinstall)(struct msm_kms *kms);
- void (*irq_uninstall)(struct msm_kms *kms);
- irqreturn_t (*irq)(struct msm_kms *kms);
- int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
- void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
- /* misc: */
- const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
- long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
- struct drm_encoder *encoder);
- /* cleanup: */
- void (*preclose)(struct msm_kms *kms, struct drm_file *file);
- void (*destroy)(struct msm_kms *kms);
-};
-
-struct msm_kms {
- const struct msm_kms_funcs *funcs;
-};
-
-struct msm_kms *mdp4_kms_init(struct drm_device *dev);
-
-int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu);
-int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
- const char **names, int cnt);
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
struct timespec *timeout);
@@ -202,7 +190,9 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
-int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder);
+struct hdmi;
+struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder);
+irqreturn_t hdmi_irq(int irq, void *dev_id);
void __init hdmi_register(void);
void __exit hdmi_unregister(void);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 0286c0eeb10c..81bafdf19ab3 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -16,6 +16,7 @@
*/
#include "msm_drv.h"
+#include "msm_kms.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index e587d251c590..3da8264d3039 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -22,7 +22,45 @@
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_gpu.h"
+#include "msm_mmu.h"
+static dma_addr_t physaddr(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
+ priv->vram.paddr;
+}
+
+/* allocate pages from VRAM carveout, used when no IOMMU: */
+static struct page **get_pages_vram(struct drm_gem_object *obj,
+ int npages)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ dma_addr_t paddr;
+ struct page **p;
+ int ret, i;
+
+ p = drm_malloc_ab(npages, sizeof(struct page *));
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
+ npages, 0, DRM_MM_SEARCH_DEFAULT);
+ if (ret) {
+ drm_free_large(p);
+ return ERR_PTR(ret);
+ }
+
+ paddr = physaddr(obj);
+ for (i = 0; i < npages; i++) {
+ p[i] = phys_to_page(paddr);
+ paddr += PAGE_SIZE;
+ }
+
+ return p;
+}
/* called with dev->struct_mutex held */
static struct page **get_pages(struct drm_gem_object *obj)
@@ -31,9 +69,14 @@ static struct page **get_pages(struct drm_gem_object *obj)
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
- struct page **p = drm_gem_get_pages(obj, 0);
+ struct page **p;
int npages = obj->size >> PAGE_SHIFT;
+ if (iommu_present(&platform_bus_type))
+ p = drm_gem_get_pages(obj, 0);
+ else
+ p = get_pages_vram(obj, npages);
+
if (IS_ERR(p)) {
dev_err(dev->dev, "could not get pages: %ld\n",
PTR_ERR(p));
@@ -73,7 +116,11 @@ static void put_pages(struct drm_gem_object *obj)
sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
- drm_gem_put_pages(obj, msm_obj->pages, true, false);
+ if (iommu_present(&platform_bus_type))
+ drm_gem_put_pages(obj, msm_obj->pages, true, false);
+ else
+ drm_mm_remove_node(msm_obj->vram_node);
+
msm_obj->pages = NULL;
}
}
@@ -138,7 +185,6 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct drm_device *dev = obj->dev;
struct page **pages;
unsigned long pfn;
@@ -163,7 +209,7 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
pgoff = ((unsigned long)vmf->virtual_address -
vma->vm_start) >> PAGE_SHIFT;
- pfn = page_to_pfn(msm_obj->pages[pgoff]);
+ pfn = page_to_pfn(pages[pgoff]);
VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
pfn, pfn << PAGE_SHIFT);
@@ -219,67 +265,6 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
return offset;
}
-/* helpers for dealing w/ iommu: */
-static int map_range(struct iommu_domain *domain, unsigned int iova,
- struct sg_table *sgt, unsigned int len, int prot)
-{
- struct scatterlist *sg;
- unsigned int da = iova;
- unsigned int i, j;
- int ret;
-
- if (!domain || !sgt)
- return -EINVAL;
-
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- u32 pa = sg_phys(sg) - sg->offset;
- size_t bytes = sg->length + sg->offset;
-
- VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
-
- ret = iommu_map(domain, da, pa, bytes, prot);
- if (ret)
- goto fail;
-
- da += bytes;
- }
-
- return 0;
-
-fail:
- da = iova;
-
- for_each_sg(sgt->sgl, sg, i, j) {
- size_t bytes = sg->length + sg->offset;
- iommu_unmap(domain, da, bytes);
- da += bytes;
- }
- return ret;
-}
-
-static void unmap_range(struct iommu_domain *domain, unsigned int iova,
- struct sg_table *sgt, unsigned int len)
-{
- struct scatterlist *sg;
- unsigned int da = iova;
- int i;
-
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- size_t bytes = sg->length + sg->offset;
- size_t unmapped;
-
- unmapped = iommu_unmap(domain, da, bytes);
- if (unmapped < bytes)
- break;
-
- VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
-
- BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
-
- da += bytes;
- }
-}
-
/* should be called under struct_mutex.. although it can be called
* from atomic context without struct_mutex to acquire an extra
* iova ref if you know one is already held.
@@ -295,15 +280,20 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
if (!msm_obj->domain[id].iova) {
struct msm_drm_private *priv = obj->dev->dev_private;
- uint32_t offset = (uint32_t)mmap_offset(obj);
- struct page **pages;
- pages = get_pages(obj);
+ struct msm_mmu *mmu = priv->mmus[id];
+ struct page **pages = get_pages(obj);
+
if (IS_ERR(pages))
return PTR_ERR(pages);
- // XXX ideally we would not map buffers writable when not needed...
- ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
- obj->size, IOMMU_READ | IOMMU_WRITE);
- msm_obj->domain[id].iova = offset;
+
+ if (iommu_present(&platform_bus_type)) {
+ uint32_t offset = (uint32_t)mmap_offset(obj);
+ ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
+ obj->size, IOMMU_READ | IOMMU_WRITE);
+ msm_obj->domain[id].iova = offset;
+ } else {
+ msm_obj->domain[id].iova = physaddr(obj);
+ }
}
if (!ret)
@@ -514,6 +504,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
void msm_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int id;
@@ -525,11 +516,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
list_del(&msm_obj->mm_list);
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
- if (msm_obj->domain[id].iova) {
- struct msm_drm_private *priv = obj->dev->dev_private;
+ struct msm_mmu *mmu = priv->mmus[id];
+ if (mmu && msm_obj->domain[id].iova) {
uint32_t offset = (uint32_t)mmap_offset(obj);
- unmap_range(priv->iommus[id], offset,
- msm_obj->sgt, obj->size);
+ mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
}
}
@@ -591,6 +581,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
+ unsigned sz;
switch (flags & MSM_BO_CACHE_MASK) {
case MSM_BO_UNCACHED:
@@ -603,10 +594,17 @@ static int msm_gem_new_impl(struct drm_device *dev,
return -EINVAL;
}
- msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
+ sz = sizeof(*msm_obj);
+ if (!iommu_present(&platform_bus_type))
+ sz += sizeof(struct drm_mm_node);
+
+ msm_obj = kzalloc(sz, GFP_KERNEL);
if (!msm_obj)
return -ENOMEM;
+ if (!iommu_present(&platform_bus_type))
+ msm_obj->vram_node = (void *)&msm_obj[1];
+
msm_obj->flags = flags;
msm_obj->resv = &msm_obj->_resv;
@@ -623,7 +621,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags)
{
- struct drm_gem_object *obj;
+ struct drm_gem_object *obj = NULL;
int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -634,15 +632,19 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
if (ret)
goto fail;
- ret = drm_gem_object_init(dev, obj, size);
- if (ret)
- goto fail;
+ if (iommu_present(&platform_bus_type)) {
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto fail;
+ } else {
+ drm_gem_private_object_init(dev, obj, size);
+ }
return obj;
fail:
if (obj)
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference(obj);
return ERR_PTR(ret);
}
@@ -654,6 +656,12 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct drm_gem_object *obj;
int ret, npages;
+ /* if we don't have IOMMU, don't bother pretending we can import: */
+ if (!iommu_present(&platform_bus_type)) {
+ dev_err(dev->dev, "cannot import without IOMMU\n");
+ return ERR_PTR(-EINVAL);
+ }
+
size = PAGE_ALIGN(size);
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index f4f23a578d9d..3246bb46c4f2 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -57,6 +57,11 @@ struct msm_gem_object {
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;
struct reservation_object _resv;
+
+ /* For physically contiguous buffers. Used when we don't have
+ * an IOMMU.
+ */
+ struct drm_mm_node *vram_node;
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5281d4bc37f7..5423e914e491 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -163,7 +163,7 @@ retry:
/* if locking succeeded, pin bo: */
- ret = msm_gem_get_iova(&msm_obj->base,
+ ret = msm_gem_get_iova_locked(&msm_obj->base,
submit->gpu->id, &iova);
/* this would break the logic in the fail path.. there is no
@@ -247,7 +247,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
*/
- ptr = msm_gem_vaddr(&obj->base);
+ ptr = msm_gem_vaddr_locked(&obj->base);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
@@ -307,14 +307,12 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
{
unsigned i;
- mutex_lock(&submit->dev->struct_mutex);
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit_unlock_unpin_bo(submit, i);
list_del_init(&msm_obj->submit_entry);
drm_gem_object_unreference(&msm_obj->base);
}
- mutex_unlock(&submit->dev->struct_mutex);
ww_acquire_fini(&submit->ticket);
kfree(submit);
@@ -342,6 +340,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (args->nr_cmds > MAX_CMDS)
return -EINVAL;
+ mutex_lock(&dev->struct_mutex);
+
submit = submit_create(dev, gpu, args->nr_bos);
if (!submit) {
ret = -ENOMEM;
@@ -410,5 +410,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out:
if (submit)
submit_cleanup(submit, !!ret);
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 4583d61556f5..0cfe3f426ee4 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -17,6 +17,7 @@
#include "msm_gpu.h"
#include "msm_gem.h"
+#include "msm_mmu.h"
/*
@@ -25,20 +26,10 @@
#ifdef CONFIG_MSM_BUS_SCALING
#include <mach/board.h>
-#include <mach/kgsl.h>
-static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
+static void bs_init(struct msm_gpu *gpu)
{
- struct drm_device *dev = gpu->dev;
- struct kgsl_device_platform_data *pdata;
-
- if (!pdev) {
- dev_err(dev->dev, "could not find dtv pdata\n");
- return;
- }
-
- pdata = pdev->dev.platform_data;
- if (pdata->bus_scale_table) {
- gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
+ if (gpu->bus_scale_table) {
+ gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table);
DBG("bus scale client: %08x", gpu->bsc);
}
}
@@ -59,7 +50,7 @@ static void bs_set(struct msm_gpu *gpu, int idx)
}
}
#else
-static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) {}
+static void bs_init(struct msm_gpu *gpu) {}
static void bs_fini(struct msm_gpu *gpu) {}
static void bs_set(struct msm_gpu *gpu, int idx) {}
#endif
@@ -307,8 +298,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_drm_private *priv = dev->dev_private;
int i, ret;
- mutex_lock(&dev->struct_mutex);
-
submit->fence = ++priv->next_fence;
gpu->submitted_fence = submit->fence;
@@ -340,7 +329,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
}
hangcheck_timer_reset(gpu);
- mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -363,6 +351,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, const char *ioname, const char *irqname, int ringsz)
{
+ struct iommu_domain *iommu;
int i, ret;
gpu->dev = drm;
@@ -428,13 +417,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
* and have separate page tables per context. For now, to keep things
* simple and to get something working, just use a single address space:
*/
- gpu->iommu = iommu_domain_alloc(&platform_bus_type);
- if (!gpu->iommu) {
- dev_err(drm->dev, "failed to allocate IOMMU\n");
- ret = -ENOMEM;
- goto fail;
+ iommu = iommu_domain_alloc(&platform_bus_type);
+ if (iommu) {
+ dev_info(drm->dev, "%s: using IOMMU\n", name);
+ gpu->mmu = msm_iommu_new(drm, iommu);
+ } else {
+ dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
}
- gpu->id = msm_register_iommu(drm, gpu->iommu);
+ gpu->id = msm_register_mmu(drm, gpu->mmu);
/* Create ringbuffer: */
gpu->rb = msm_ringbuffer_new(gpu, ringsz);
@@ -452,7 +442,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
- bs_init(gpu, pdev);
+ bs_init(gpu);
return 0;
@@ -474,6 +464,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_ringbuffer_destroy(gpu->rb);
}
- if (gpu->iommu)
- iommu_domain_free(gpu->iommu);
+ if (gpu->mmu)
+ gpu->mmu->funcs->destroy(gpu->mmu);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 8cd829e520bb..458db8c64c28 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -78,14 +78,18 @@ struct msm_gpu {
void __iomem *mmio;
int irq;
- struct iommu_domain *iommu;
+ struct msm_mmu *mmu;
int id;
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
struct clk *ebi1_clk, *grp_clks[5];
uint32_t fast_rate, slow_rate, bus_freq;
+
+#ifdef CONFIG_MSM_BUS_SCALING
+ struct msm_bus_scale_pdata *bus_scale_table;
uint32_t bsc;
+#endif
/* Hang Detction: */
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
new file mode 100644
index 000000000000..92b745986231
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+
+struct msm_iommu {
+ struct msm_mmu base;
+ struct iommu_domain *domain;
+};
+#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+
+static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
+ unsigned long iova, int flags, void *arg)
+{
+ DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
+ return 0;
+}
+
+static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
+{
+ struct drm_device *dev = mmu->dev;
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int i, ret;
+
+ for (i = 0; i < cnt; i++) {
+ struct device *msm_iommu_get_ctx(const char *ctx_name);
+ struct device *ctx = msm_iommu_get_ctx(names[i]);
+ if (IS_ERR_OR_NULL(ctx))
+ continue;
+ ret = iommu_attach_device(iommu->domain, ctx);
+ if (ret) {
+ dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
+ struct sg_table *sgt, unsigned len, int prot)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ struct iommu_domain *domain = iommu->domain;
+ struct scatterlist *sg;
+ unsigned int da = iova;
+ unsigned int i, j;
+ int ret;
+
+ if (!domain || !sgt)
+ return -EINVAL;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ u32 pa = sg_phys(sg) - sg->offset;
+ size_t bytes = sg->length + sg->offset;
+
+ VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
+
+ ret = iommu_map(domain, da, pa, bytes, prot);
+ if (ret)
+ goto fail;
+
+ da += bytes;
+ }
+
+ return 0;
+
+fail:
+ da = iova;
+
+ for_each_sg(sgt->sgl, sg, i, j) {
+ size_t bytes = sg->length + sg->offset;
+ iommu_unmap(domain, da, bytes);
+ da += bytes;
+ }
+ return ret;
+}
+
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
+ struct sg_table *sgt, unsigned len)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ struct iommu_domain *domain = iommu->domain;
+ struct scatterlist *sg;
+ unsigned int da = iova;
+ int i;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes = sg->length + sg->offset;
+ size_t unmapped;
+
+ unmapped = iommu_unmap(domain, da, bytes);
+ if (unmapped < bytes)
+ return unmapped;
+
+ VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
+
+ BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
+
+ da += bytes;
+ }
+
+ return 0;
+}
+
+static void msm_iommu_destroy(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ iommu_domain_free(iommu->domain);
+ kfree(iommu);
+}
+
+static const struct msm_mmu_funcs funcs = {
+ .attach = msm_iommu_attach,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .destroy = msm_iommu_destroy,
+};
+
+struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain)
+{
+ struct msm_iommu *iommu;
+
+ iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
+ if (!iommu)
+ return ERR_PTR(-ENOMEM);
+
+ iommu->domain = domain;
+ msm_mmu_init(&iommu->base, dev, &funcs);
+ iommu_set_fault_handler(domain, msm_fault_handler, dev);
+
+ return &iommu->base;
+}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
new file mode 100644
index 000000000000..06437745bc2c
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_KMS_H__
+#define __MSM_KMS_H__
+
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+
+/* As there are different display controller blocks depending on the
+ * snapdragon version, the kms support is split out and the appropriate
+ * implementation is loaded at runtime. The kms module is responsible
+ * for constructing the appropriate planes/crtcs/encoders/connectors.
+ */
+struct msm_kms_funcs {
+ /* hw initialization: */
+ int (*hw_init)(struct msm_kms *kms);
+ /* irq handling: */
+ void (*irq_preinstall)(struct msm_kms *kms);
+ int (*irq_postinstall)(struct msm_kms *kms);
+ void (*irq_uninstall)(struct msm_kms *kms);
+ irqreturn_t (*irq)(struct msm_kms *kms);
+ int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+ void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+ /* misc: */
+ const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
+ long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder);
+ /* cleanup: */
+ void (*preclose)(struct msm_kms *kms, struct drm_file *file);
+ void (*destroy)(struct msm_kms *kms);
+};
+
+struct msm_kms {
+ const struct msm_kms_funcs *funcs;
+
+ /* irq handling: */
+ bool in_irq;
+ struct list_head irq_list; /* list of mdp4_irq */
+ uint32_t vblank_mask; /* irq bits set for userspace vblank */
+};
+
+static inline void msm_kms_init(struct msm_kms *kms,
+ const struct msm_kms_funcs *funcs)
+{
+ kms->funcs = funcs;
+}
+
+struct msm_kms *mdp4_kms_init(struct drm_device *dev);
+struct msm_kms *mdp5_kms_init(struct drm_device *dev);
+
+#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
new file mode 100644
index 000000000000..030324482b4a
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_MMU_H__
+#define __MSM_MMU_H__
+
+#include <linux/iommu.h>
+
+struct msm_mmu_funcs {
+ int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
+ int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
+ unsigned len, int prot);
+ int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
+ unsigned len);
+ void (*destroy)(struct msm_mmu *mmu);
+};
+
+struct msm_mmu {
+ const struct msm_mmu_funcs *funcs;
+ struct drm_device *dev;
+};
+
+static inline void msm_mmu_init(struct msm_mmu *mmu, struct drm_device *dev,
+ const struct msm_mmu_funcs *funcs)
+{
+ mmu->dev = dev;
+ mmu->funcs = funcs;
+}
+
+struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain);
+struct msm_mmu *msm_gpummu_new(struct drm_device *dev, struct msm_gpu *gpu);
+
+#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index b3fa1ba191b7..e88145ba1bf5 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -41,6 +41,7 @@ nouveau-y += core/subdev/bios/init.o
nouveau-y += core/subdev/bios/mxm.o
nouveau-y += core/subdev/bios/perf.o
nouveau-y += core/subdev/bios/pll.o
+nouveau-y += core/subdev/bios/ramcfg.o
nouveau-y += core/subdev/bios/rammap.o
nouveau-y += core/subdev/bios/timing.o
nouveau-y += core/subdev/bios/therm.o
@@ -71,7 +72,10 @@ nouveau-y += core/subdev/devinit/nv10.o
nouveau-y += core/subdev/devinit/nv1a.o
nouveau-y += core/subdev/devinit/nv20.o
nouveau-y += core/subdev/devinit/nv50.o
+nouveau-y += core/subdev/devinit/nv84.o
+nouveau-y += core/subdev/devinit/nv98.o
nouveau-y += core/subdev/devinit/nva3.o
+nouveau-y += core/subdev/devinit/nvaf.o
nouveau-y += core/subdev/devinit/nvc0.o
nouveau-y += core/subdev/fb/base.o
nouveau-y += core/subdev/fb/nv04.o
@@ -232,6 +236,7 @@ nouveau-y += core/engine/fifo/nv50.o
nouveau-y += core/engine/fifo/nv84.o
nouveau-y += core/engine/fifo/nvc0.o
nouveau-y += core/engine/fifo/nve0.o
+nouveau-y += core/engine/fifo/nv108.o
nouveau-y += core/engine/graph/ctxnv40.o
nouveau-y += core/engine/graph/ctxnv50.o
nouveau-y += core/engine/graph/ctxnvc0.o
@@ -242,6 +247,7 @@ nouveau-y += core/engine/graph/ctxnvd7.o
nouveau-y += core/engine/graph/ctxnvd9.o
nouveau-y += core/engine/graph/ctxnve4.o
nouveau-y += core/engine/graph/ctxnvf0.o
+nouveau-y += core/engine/graph/ctxnv108.o
nouveau-y += core/engine/graph/nv04.o
nouveau-y += core/engine/graph/nv10.o
nouveau-y += core/engine/graph/nv20.o
@@ -260,6 +266,7 @@ nouveau-y += core/engine/graph/nvd7.o
nouveau-y += core/engine/graph/nvd9.o
nouveau-y += core/engine/graph/nve4.o
nouveau-y += core/engine/graph/nvf0.o
+nouveau-y += core/engine/graph/nv108.o
nouveau-y += core/engine/mpeg/nv31.o
nouveau-y += core/engine/mpeg/nv40.o
nouveau-y += core/engine/mpeg/nv44.o
diff --git a/drivers/gpu/drm/nouveau/core/core/engine.c b/drivers/gpu/drm/nouveau/core/core/engine.c
index c8bed4a26833..1f6954ae9dd3 100644
--- a/drivers/gpu/drm/nouveau/core/core/engine.c
+++ b/drivers/gpu/drm/nouveau/core/core/engine.c
@@ -42,11 +42,24 @@ nouveau_engine_create_(struct nouveau_object *parent,
if (ret)
return ret;
- if ( parent &&
- !nouveau_boolopt(nv_device(parent)->cfgopt, iname, enable)) {
- if (!enable)
- nv_warn(engine, "disabled, %s=1 to enable\n", iname);
- return -ENODEV;
+ if (parent) {
+ struct nouveau_device *device = nv_device(parent);
+ int engidx = nv_engidx(nv_object(engine));
+
+ if (device->disable_mask & (1ULL << engidx)) {
+ if (!nouveau_boolopt(device->cfgopt, iname, false)) {
+ nv_debug(engine, "engine disabled by hw/fw\n");
+ return -ENODEV;
+ }
+
+ nv_warn(engine, "ignoring hw/fw engine disable\n");
+ }
+
+ if (!nouveau_boolopt(device->cfgopt, iname, enable)) {
+ if (!enable)
+ nv_warn(engine, "disabled, %s=1 to enable\n", iname);
+ return -ENODEV;
+ }
}
INIT_LIST_HEAD(&engine->contexts);
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
index 48f06378d3f9..2ea5568b6cf5 100644
--- a/drivers/gpu/drm/nouveau/core/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -104,11 +104,8 @@ nouveau_subdev_create_(struct nouveau_object *parent,
if (parent) {
struct nouveau_device *device = nv_device(parent);
- int subidx = nv_hclass(subdev) & 0xff;
-
subdev->debug = nouveau_dbgopt(device->dbgopt, subname);
subdev->mmio = nv_subdev(device)->mmio;
- device->subdev[subidx] = *pobject;
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
index 993df09ad643..ac3291f781f6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -105,9 +105,6 @@ nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_copy_priv *priv;
int ret;
- if (nv_rd32(parent, 0x022500) & 0x00000100)
- return -ENODEV;
-
ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true,
"PCE0", "copy0", &priv);
*pobject = nv_object(priv);
@@ -133,9 +130,6 @@ nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_copy_priv *priv;
int ret;
- if (nv_rd32(parent, 0x022500) & 0x00000200)
- return -ENODEV;
-
ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true,
"PCE1", "copy1", &priv);
*pobject = nv_object(priv);
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
index 30f1ef1edcc5..748a61eb3c6f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -88,9 +88,6 @@ nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nve0_copy_priv *priv;
int ret;
- if (nv_rd32(parent, 0x022500) & 0x00000100)
- return -ENODEV;
-
ret = nouveau_engine_create(parent, engine, oclass, true,
"PCE0", "copy0", &priv);
*pobject = nv_object(priv);
@@ -112,9 +109,6 @@ nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nve0_copy_priv *priv;
int ret;
- if (nv_rd32(parent, 0x022500) & 0x00000200)
- return -ENODEV;
-
ret = nouveau_engine_create(parent, engine, oclass, true,
"PCE1", "copy1", &priv);
*pobject = nv_object(priv);
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 9135b25a29d0..dd01c6c435d6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -268,6 +268,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ device->subdev[i] = devobj->subdev[i];
+
/* note: can't init *any* subdevs until devinit has been run
* due to not knowing exactly what the vbios init tables will
* mess with. devinit also can't be run until all of its
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
index dbd2dde7b7e7..32113b08c4d5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -49,12 +49,12 @@ nv04_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv04_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
@@ -67,12 +67,12 @@ nv04_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv05_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
index 6e03dd6abeea..744f15d7e131 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -51,12 +51,12 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -68,12 +68,12 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
@@ -87,12 +87,12 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
@@ -106,12 +106,12 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
@@ -125,12 +125,12 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
@@ -144,12 +144,12 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
@@ -163,12 +163,12 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
@@ -182,12 +182,12 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
index dcde53b9f07f..27ba61fb2710 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -52,12 +52,12 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
@@ -71,12 +71,12 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
@@ -90,12 +90,12 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
@@ -109,12 +109,12 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
index 7b8662ef4f59..fd47ace67543 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -52,12 +52,12 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
@@ -71,12 +71,12 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv35_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
@@ -90,12 +90,12 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
@@ -110,12 +110,12 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv36_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
@@ -130,12 +130,12 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index c8c41e93695e..1b653dd74a70 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -57,12 +57,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -80,12 +80,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -103,12 +103,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -126,12 +126,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -149,12 +149,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -172,12 +172,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv47_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -195,12 +195,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -218,12 +218,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -241,12 +241,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -264,12 +264,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -287,12 +287,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -310,12 +310,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -333,12 +333,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv4e_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -356,12 +356,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -379,12 +379,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -402,12 +402,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index db3fc7be856a..81d5c26643d5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -65,12 +65,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv50_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -90,12 +90,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -118,12 +118,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -146,12 +146,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -174,12 +174,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -202,12 +202,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -230,12 +230,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv98_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -258,12 +258,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -286,12 +286,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv98_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvaa_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -314,12 +314,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv98_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvaa_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -342,12 +342,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nva3_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
@@ -372,12 +372,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nva3_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
@@ -401,12 +401,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nva3_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
@@ -430,12 +430,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvaf_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvaf_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 8d06eef2b9ee..b7d66b59f43d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -65,14 +65,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
@@ -97,14 +97,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
@@ -129,14 +129,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
@@ -160,14 +160,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
@@ -192,14 +192,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
@@ -224,14 +224,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
@@ -255,14 +255,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
@@ -287,14 +287,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
@@ -318,14 +318,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 3900104976fc..987edbc30a09 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -65,14 +65,14 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
@@ -98,14 +98,14 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
@@ -131,14 +131,14 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
@@ -164,14 +164,14 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
@@ -199,29 +199,27 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
-#if 0
- device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
-#endif
+ device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass;
-#if 0
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+#if 0
device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index a0bc8a89b699..7cf8b1348632 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -31,9 +31,45 @@ struct nv04_disp_priv {
struct nouveau_disp base;
};
+static int
+nv04_disp_scanoutpos(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nv04_disp_priv *priv = (void *)object->engine;
+ struct nv04_display_scanoutpos *args = data;
+ const int head = (mthd & NV04_DISP_MTHD_HEAD);
+ u32 line;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ args->vblanks = nv_rd32(priv, 0x680800 + (head * 0x2000)) & 0xffff;
+ args->vtotal = nv_rd32(priv, 0x680804 + (head * 0x2000)) & 0xffff;
+ args->vblanke = args->vtotal - 1;
+
+ args->hblanks = nv_rd32(priv, 0x680820 + (head * 0x2000)) & 0xffff;
+ args->htotal = nv_rd32(priv, 0x680824 + (head * 0x2000)) & 0xffff;
+ args->hblanke = args->htotal - 1;
+
+ args->time[0] = ktime_to_ns(ktime_get());
+ line = nv_rd32(priv, 0x600868 + (head * 0x2000));
+ args->time[1] = ktime_to_ns(ktime_get());
+ args->hline = (line & 0xffff0000) >> 16;
+ args->vline = (line & 0x0000ffff);
+ return 0;
+}
+
+#define HEAD_MTHD(n) (n), (n) + 0x01
+
+static struct nouveau_omthds
+nv04_disp_omthds[] = {
+ { HEAD_MTHD(NV04_DISP_SCANOUTPOS), nv04_disp_scanoutpos },
+ {}
+};
+
static struct nouveau_oclass
nv04_disp_sclass[] = {
- { NV04_DISP_CLASS, &nouveau_object_ofuncs },
+ { NV04_DISP_CLASS, &nouveau_object_ofuncs, nv04_disp_omthds },
{},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index c168ae3eaa97..940eaa5d8b9a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -541,6 +541,35 @@ nv50_disp_curs_ofuncs = {
* Base display object
******************************************************************************/
+int
+nv50_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv04_display_scanoutpos *args = data;
+ const int head = (mthd & NV50_DISP_MTHD_HEAD);
+ u32 blanke, blanks, total;
+
+ if (size < sizeof(*args) || head >= priv->head.nr)
+ return -EINVAL;
+ blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
+ blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
+ total = nv_rd32(priv, 0x610afc + (head * 0x540));
+
+ args->vblanke = (blanke & 0xffff0000) >> 16;
+ args->hblanke = (blanke & 0x0000ffff);
+ args->vblanks = (blanks & 0xffff0000) >> 16;
+ args->hblanks = (blanks & 0x0000ffff);
+ args->vtotal = ( total & 0xffff0000) >> 16;
+ args->htotal = ( total & 0x0000ffff);
+
+ args->time[0] = ktime_to_ns(ktime_get());
+ args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
+ args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */
+ args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
+ return 0;
+}
+
static void
nv50_disp_base_vblank_enable(struct nouveau_event *event, int head)
{
@@ -675,6 +704,7 @@ nv50_disp_base_ofuncs = {
static struct nouveau_omthds
nv50_disp_base_omthds[] = {
+ { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
{ SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
{ DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
index 1ae6ceb56704..d31d426ea1f6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -43,6 +43,10 @@ struct nv50_disp_priv {
} pior;
};
+#define HEAD_MTHD(n) (n), (n) + 0x03
+
+int nv50_disp_base_scanoutpos(struct nouveau_object *, u32, void *, u32);
+
#define DAC_MTHD(n) (n), (n) + 0x03
int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32);
@@ -132,13 +136,12 @@ void nv50_disp_intr(struct nouveau_subdev *);
extern struct nouveau_omthds nv84_disp_base_omthds[];
-extern struct nouveau_omthds nva3_disp_base_omthds[];
-
extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
+extern struct nouveau_omthds nvd0_disp_base_omthds[];
extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
extern struct nouveau_oclass nvd0_disp_cclass;
void nvd0_disp_intr_supervisor(struct work_struct *);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
index d8c74c0883a1..ef9ce300a496 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -41,6 +41,7 @@ nv84_disp_sclass[] = {
struct nouveau_omthds
nv84_disp_base_omthds[] = {
+ { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
{ SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
index a66f949c1f84..a518543c00ab 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -41,6 +41,7 @@ nv94_disp_sclass[] = {
static struct nouveau_omthds
nv94_disp_base_omthds[] = {
+ { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
{ SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
index b75413169eae..6ad6dcece43b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -39,8 +39,9 @@ nva3_disp_sclass[] = {
{}
};
-struct nouveau_omthds
+static struct nouveau_omthds
nva3_disp_base_omthds[] = {
+ { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
{ SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 378a015091d2..1c5e4e8b2c82 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -440,6 +440,36 @@ nvd0_disp_curs_ofuncs = {
* Base display object
******************************************************************************/
+static int
+nvd0_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv04_display_scanoutpos *args = data;
+ const int head = (mthd & NV50_DISP_MTHD_HEAD);
+ u32 blanke, blanks, total;
+
+ if (size < sizeof(*args) || head >= priv->head.nr)
+ return -EINVAL;
+
+ total = nv_rd32(priv, 0x640414 + (head * 0x300));
+ blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
+ blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
+
+ args->vblanke = (blanke & 0xffff0000) >> 16;
+ args->hblanke = (blanke & 0x0000ffff);
+ args->vblanks = (blanks & 0xffff0000) >> 16;
+ args->hblanks = (blanks & 0x0000ffff);
+ args->vtotal = ( total & 0xffff0000) >> 16;
+ args->htotal = ( total & 0x0000ffff);
+
+ args->time[0] = ktime_to_ns(ktime_get());
+ args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
+ args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */
+ args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
+ return 0;
+}
+
static void
nvd0_disp_base_vblank_enable(struct nouveau_event *event, int head)
{
@@ -573,9 +603,24 @@ nvd0_disp_base_ofuncs = {
.fini = nvd0_disp_base_fini,
};
+struct nouveau_omthds
+nvd0_disp_base_omthds[] = {
+ { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nvd0_disp_base_scanoutpos },
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
+ { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
+ {},
+};
+
static struct nouveau_oclass
nvd0_disp_base_oclass[] = {
- { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+ { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
{}
};
@@ -967,9 +1012,6 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int heads = nv_rd32(parent, 0x022448);
int ret;
- if (nv_rd32(parent, 0x022500) & 0x00000001)
- return -ENODEV;
-
ret = nouveau_disp_create(parent, engine, oclass, heads,
"PDISP", "display", &priv);
*pobject = nv_object(priv);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
index fb1fe6ae5e74..ab63f32c00b2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -41,7 +41,7 @@ nve0_disp_sclass[] = {
static struct nouveau_oclass
nve0_disp_base_oclass[] = {
- { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+ { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
{}
};
@@ -54,9 +54,6 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int heads = nv_rd32(parent, 0x022448);
int ret;
- if (nv_rd32(parent, 0x022500) & 0x00000001)
- return -ENODEV;
-
ret = nouveau_disp_create(parent, engine, oclass, heads,
"PDISP", "display", &priv);
*pobject = nv_object(priv);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
index 42aa6b97dbea..05fee10e0c97 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -41,7 +41,7 @@ nvf0_disp_sclass[] = {
static struct nouveau_oclass
nvf0_disp_base_oclass[] = {
- { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+ { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
{}
};
@@ -54,9 +54,6 @@ nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int heads = nv_rd32(parent, 0x022448);
int ret;
- if (nv_rd32(parent, 0x022500) & 0x00000001)
- return -ENODEV;
-
ret = nouveau_disp_create(parent, engine, oclass, heads,
"PDISP", "display", &priv);
*pobject = nv_object(priv);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/vga.c b/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
index 5a1c68474597..8836c3cb99c3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
@@ -138,10 +138,15 @@ nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value)
bool
nv_lockvgac(void *obj, bool lock)
{
+ struct nouveau_device *dev = nv_device(obj);
+
bool locked = !nv_rdvgac(obj, 0, 0x1f);
u8 data = lock ? 0x99 : 0x57;
- nv_wrvgac(obj, 0, 0x1f, data);
- if (nv_device(obj)->chipset == 0x11) {
+ if (dev->card_type < NV_50)
+ nv_wrvgac(obj, 0, 0x1f, data);
+ else
+ nv_wrvgac(obj, 0, 0x3f, data);
+ if (dev->chipset == 0x11) {
if (!(nv_rd32(obj, 0x001084) & 0x10000000))
nv_wrvgac(obj, 1, 0x1f, data);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/core/engine/falcon.c
index e03fc8e4dc1d..5e077e4ed7f6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/core/engine/falcon.c
@@ -56,6 +56,16 @@ _nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data)
nv_wr32(falcon, falcon->addr + addr, data);
}
+static void *
+vmemdup(const void *src, size_t len)
+{
+ void *p = vmalloc(len);
+
+ if (p)
+ memcpy(p, src, len);
+ return p;
+}
+
int
_nouveau_falcon_init(struct nouveau_object *object)
{
@@ -111,7 +121,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
ret = request_firmware(&fw, name, &device->pdev->dev);
if (ret == 0) {
- falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ falcon->code.data = vmemdup(fw->data, fw->size);
falcon->code.size = fw->size;
falcon->data.data = NULL;
falcon->data.size = 0;
@@ -134,7 +144,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
return ret;
}
- falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ falcon->data.data = vmemdup(fw->data, fw->size);
falcon->data.size = fw->size;
release_firmware(fw);
if (!falcon->data.data)
@@ -149,7 +159,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
return ret;
}
- falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ falcon->code.data = vmemdup(fw->data, fw->size);
falcon->code.size = fw->size;
release_firmware(fw);
if (!falcon->code.data)
@@ -235,8 +245,8 @@ _nouveau_falcon_fini(struct nouveau_object *object, bool suspend)
if (!suspend) {
nouveau_gpuobj_ref(NULL, &falcon->core);
if (falcon->external) {
- kfree(falcon->data.data);
- kfree(falcon->code.data);
+ vfree(falcon->data.data);
+ vfree(falcon->code.data);
falcon->code.data = NULL;
}
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c
new file mode 100644
index 000000000000..09362a51ba57
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nve0.h"
+
+struct nouveau_oclass *
+nv108_fifo_oclass = &(struct nve0_fifo_impl) {
+ .base.handle = NV_ENGINE(FIFO, 0x08),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_fifo_ctor,
+ .dtor = nve0_fifo_dtor,
+ .init = nve0_fifo_init,
+ .fini = _nouveau_fifo_fini,
+ },
+ .channels = 1024,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 9ac94d4e5646..b22a33f0702d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -33,6 +33,7 @@
#include <subdev/timer.h>
#include <subdev/bar.h>
+#include <subdev/fb.h>
#include <subdev/vm.h>
#include <engine/dmaobj.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 04f412922d2d..9a850fe19515 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -33,10 +33,12 @@
#include <subdev/timer.h>
#include <subdev/bar.h>
+#include <subdev/fb.h>
#include <subdev/vm.h>
#include <engine/dmaobj.h>
-#include <engine/fifo.h>
+
+#include "nve0.h"
#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
static const struct {
@@ -56,8 +58,8 @@ static const struct {
#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
struct nve0_fifo_engn {
- struct nouveau_gpuobj *playlist[2];
- int cur_playlist;
+ struct nouveau_gpuobj *runlist[2];
+ int cur_runlist;
};
struct nve0_fifo_priv {
@@ -86,7 +88,7 @@ struct nve0_fifo_chan {
******************************************************************************/
static void
-nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
+nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine)
{
struct nouveau_bar *bar = nouveau_bar(priv);
struct nve0_fifo_engn *engn = &priv->engine[engine];
@@ -95,8 +97,8 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
int i, p;
mutex_lock(&nv_subdev(priv)->mutex);
- cur = engn->playlist[engn->cur_playlist];
- engn->cur_playlist = !engn->cur_playlist;
+ cur = engn->runlist[engn->cur_runlist];
+ engn->cur_runlist = !engn->cur_runlist;
for (i = 0, p = 0; i < priv->base.max; i++) {
u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001;
@@ -111,7 +113,7 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
nv_wr32(priv, 0x002270, cur->addr >> 12);
nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
- nv_error(priv, "playlist %d update timeout\n", engine);
+ nv_error(priv, "runlist %d update timeout\n", engine);
mutex_unlock(&nv_subdev(priv)->mutex);
}
@@ -278,7 +280,7 @@ nve0_fifo_chan_init(struct nouveau_object *object)
nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
- nve0_fifo_playlist_update(priv, chan->engine);
+ nve0_fifo_runlist_update(priv, chan->engine);
nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
return 0;
}
@@ -291,7 +293,7 @@ nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
u32 chid = chan->base.chid;
nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
- nve0_fifo_playlist_update(priv, chan->engine);
+ nve0_fifo_runlist_update(priv, chan->engine);
nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
return nouveau_fifo_channel_fini(&chan->base, suspend);
@@ -375,54 +377,189 @@ nve0_fifo_cclass = {
* PFIFO engine
******************************************************************************/
-static const struct nouveau_enum nve0_fifo_fault_unit[] = {
+static const struct nouveau_enum nve0_fifo_sched_reason[] = {
+ { 0x0a, "CTXSW_TIMEOUT" },
+ {}
+};
+
+static const struct nouveau_enum nve0_fifo_fault_engine[] = {
+ { 0x00, "GR", NULL, NVDEV_ENGINE_GR },
+ { 0x03, "IFB" },
+ { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
+ { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
+ { 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
+ { 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
+ { 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
+ { 0x10, "MSVLD", NULL, NVDEV_ENGINE_BSP },
+ { 0x11, "MSPPP", NULL, NVDEV_ENGINE_PPP },
+ { 0x13, "PERF" },
+ { 0x14, "MSPDEC", NULL, NVDEV_ENGINE_VP },
+ { 0x15, "CE0", NULL, NVDEV_ENGINE_COPY0 },
+ { 0x16, "CE1", NULL, NVDEV_ENGINE_COPY1 },
+ { 0x17, "PMU" },
+ { 0x19, "MSENC", NULL, NVDEV_ENGINE_VENC },
+ { 0x1b, "CE2", NULL, NVDEV_ENGINE_COPY2 },
{}
};
static const struct nouveau_enum nve0_fifo_fault_reason[] = {
- { 0x00, "PT_NOT_PRESENT" },
- { 0x01, "PT_TOO_SHORT" },
- { 0x02, "PAGE_NOT_PRESENT" },
- { 0x03, "VM_LIMIT_EXCEEDED" },
- { 0x04, "NO_CHANNEL" },
- { 0x05, "PAGE_SYSTEM_ONLY" },
- { 0x06, "PAGE_READ_ONLY" },
- { 0x0a, "COMPRESSED_SYSRAM" },
- { 0x0c, "INVALID_STORAGE_TYPE" },
+ { 0x00, "PDE" },
+ { 0x01, "PDE_SIZE" },
+ { 0x02, "PTE" },
+ { 0x03, "VA_LIMIT_VIOLATION" },
+ { 0x04, "UNBOUND_INST_BLOCK" },
+ { 0x05, "PRIV_VIOLATION" },
+ { 0x06, "RO_VIOLATION" },
+ { 0x07, "WO_VIOLATION" },
+ { 0x08, "PITCH_MASK_VIOLATION" },
+ { 0x09, "WORK_CREATION" },
+ { 0x0a, "UNSUPPORTED_APERTURE" },
+ { 0x0b, "COMPRESSION_FAILURE" },
+ { 0x0c, "UNSUPPORTED_KIND" },
+ { 0x0d, "REGION_VIOLATION" },
+ { 0x0e, "BOTH_PTES_VALID" },
+ { 0x0f, "INFO_TYPE_POISONED" },
{}
};
static const struct nouveau_enum nve0_fifo_fault_hubclient[] = {
+ { 0x00, "VIP" },
+ { 0x01, "CE0" },
+ { 0x02, "CE1" },
+ { 0x03, "DNISO" },
+ { 0x04, "FE" },
+ { 0x05, "FECS" },
+ { 0x06, "HOST" },
+ { 0x07, "HOST_CPU" },
+ { 0x08, "HOST_CPU_NB" },
+ { 0x09, "ISO" },
+ { 0x0a, "MMU" },
+ { 0x0b, "MSPDEC" },
+ { 0x0c, "MSPPP" },
+ { 0x0d, "MSVLD" },
+ { 0x0e, "NISO" },
+ { 0x0f, "P2P" },
+ { 0x10, "PD" },
+ { 0x11, "PERF" },
+ { 0x12, "PMU" },
+ { 0x13, "RASTERTWOD" },
+ { 0x14, "SCC" },
+ { 0x15, "SCC_NB" },
+ { 0x16, "SEC" },
+ { 0x17, "SSYNC" },
+ { 0x18, "GR_COPY" },
+ { 0x19, "CE2" },
+ { 0x1a, "XV" },
+ { 0x1b, "MMU_NB" },
+ { 0x1c, "MSENC" },
+ { 0x1d, "DFALCON" },
+ { 0x1e, "SKED" },
+ { 0x1f, "AFALCON" },
{}
};
static const struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
+ { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
+ { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
+ { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
+ { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
+ { 0x0c, "RAST" },
+ { 0x0d, "GCC" },
+ { 0x0e, "GPCCS" },
+ { 0x0f, "PROP_0" },
+ { 0x10, "PROP_1" },
+ { 0x11, "PROP_2" },
+ { 0x12, "PROP_3" },
+ { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
+ { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
+ { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
+ { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
+ { 0x1f, "GPM" },
+ { 0x20, "LTP_UTLB_0" },
+ { 0x21, "LTP_UTLB_1" },
+ { 0x22, "LTP_UTLB_2" },
+ { 0x23, "LTP_UTLB_3" },
+ { 0x24, "GPC_RGG_UTLB" },
{}
};
-static const struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
- { 0x00200000, "ILLEGAL_MTHD" },
- { 0x00800000, "EMPTY_SUBC" },
+static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = {
+ { 0x00000001, "MEMREQ" },
+ { 0x00000002, "MEMACK_TIMEOUT" },
+ { 0x00000004, "MEMACK_EXTRA" },
+ { 0x00000008, "MEMDAT_TIMEOUT" },
+ { 0x00000010, "MEMDAT_EXTRA" },
+ { 0x00000020, "MEMFLUSH" },
+ { 0x00000040, "MEMOP" },
+ { 0x00000080, "LBCONNECT" },
+ { 0x00000100, "LBREQ" },
+ { 0x00000200, "LBACK_TIMEOUT" },
+ { 0x00000400, "LBACK_EXTRA" },
+ { 0x00000800, "LBDAT_TIMEOUT" },
+ { 0x00001000, "LBDAT_EXTRA" },
+ { 0x00002000, "GPFIFO" },
+ { 0x00004000, "GPPTR" },
+ { 0x00008000, "GPENTRY" },
+ { 0x00010000, "GPCRC" },
+ { 0x00020000, "PBPTR" },
+ { 0x00040000, "PBENTRY" },
+ { 0x00080000, "PBCRC" },
+ { 0x00100000, "XBARCONNECT" },
+ { 0x00200000, "METHOD" },
+ { 0x00400000, "METHODCRC" },
+ { 0x00800000, "DEVICE" },
+ { 0x02000000, "SEMAPHORE" },
+ { 0x04000000, "ACQUIRE" },
+ { 0x08000000, "PRI" },
+ { 0x20000000, "NO_CTXSW_SEG" },
+ { 0x40000000, "PBSEG" },
+ { 0x80000000, "SIGNATURE" },
{}
};
static void
-nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
+nve0_fifo_intr_sched(struct nve0_fifo_priv *priv)
+{
+ u32 intr = nv_rd32(priv, 0x00254c);
+ u32 code = intr & 0x000000ff;
+ nv_error(priv, "SCHED_ERROR [");
+ nouveau_enum_print(nve0_fifo_sched_reason, code);
+ pr_cont("]\n");
+}
+
+static void
+nve0_fifo_intr_chsw(struct nve0_fifo_priv *priv)
+{
+ u32 stat = nv_rd32(priv, 0x00256c);
+ nv_error(priv, "CHSW_ERROR 0x%08x\n", stat);
+ nv_wr32(priv, 0x00256c, stat);
+}
+
+static void
+nve0_fifo_intr_dropped_fault(struct nve0_fifo_priv *priv)
+{
+ u32 stat = nv_rd32(priv, 0x00259c);
+ nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat);
+}
+
+static void
+nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
{
u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
u32 client = (stat & 0x00001f00) >> 8;
- const struct nouveau_enum *en;
- struct nouveau_engine *engine;
+ struct nouveau_engine *engine = NULL;
struct nouveau_object *engctx = NULL;
+ const struct nouveau_enum *en;
+ const char *name = "unknown";
nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
"write" : "read", (u64)vahi << 32 | valo);
nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
pr_cont("] from ");
- en = nouveau_enum_print(nve0_fifo_fault_unit, unit);
+ en = nouveau_enum_print(nve0_fifo_fault_engine, unit);
if (stat & 0x00000040) {
pr_cont("/");
nouveau_enum_print(nve0_fifo_fault_hubclient, client);
@@ -432,14 +569,22 @@ nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
}
if (en && en->data2) {
- engine = nouveau_engine(priv, en->data2);
- if (engine)
- engctx = nouveau_engctx_get(engine, inst);
-
+ if (en->data2 == NVDEV_SUBDEV_BAR) {
+ nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
+ name = "BAR1";
+ } else
+ if (en->data2 == NVDEV_SUBDEV_INSTMEM) {
+ nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
+ name = "BAR3";
+ } else {
+ engine = nouveau_engine(priv, en->data2);
+ if (engine) {
+ engctx = nouveau_engctx_get(engine, inst);
+ name = nouveau_client_name(engctx);
+ }
+ }
}
-
- pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12,
- nouveau_client_name(engctx));
+ pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12, name);
nouveau_engctx_put(engctx);
}
@@ -471,7 +616,7 @@ out:
}
static void
-nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
+nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
{
u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
@@ -487,11 +632,11 @@ nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
}
if (show) {
- nv_error(priv, "SUBFIFO%d:", unit);
- nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
+ nv_error(priv, "PBDMA%d:", unit);
+ nouveau_bitfield_print(nve0_fifo_pbdma_intr, show);
pr_cont("\n");
nv_error(priv,
- "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
+ "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
unit, chid,
nouveau_client_name_for_fifo_chid(&priv->base, chid),
subc, mthd, data);
@@ -508,19 +653,56 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
u32 mask = nv_rd32(priv, 0x002140);
u32 stat = nv_rd32(priv, 0x002100) & mask;
+ if (stat & 0x00000001) {
+ u32 stat = nv_rd32(priv, 0x00252c);
+ nv_error(priv, "BIND_ERROR 0x%08x\n", stat);
+ nv_wr32(priv, 0x002100, 0x00000001);
+ stat &= ~0x00000001;
+ }
+
+ if (stat & 0x00000010) {
+ nv_error(priv, "PIO_ERROR\n");
+ nv_wr32(priv, 0x002100, 0x00000010);
+ stat &= ~0x00000010;
+ }
+
if (stat & 0x00000100) {
- nv_warn(priv, "unknown status 0x00000100\n");
+ nve0_fifo_intr_sched(priv);
nv_wr32(priv, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
+ if (stat & 0x00010000) {
+ nve0_fifo_intr_chsw(priv);
+ nv_wr32(priv, 0x002100, 0x00010000);
+ stat &= ~0x00010000;
+ }
+
+ if (stat & 0x00800000) {
+ nv_error(priv, "FB_FLUSH_TIMEOUT\n");
+ nv_wr32(priv, 0x002100, 0x00800000);
+ stat &= ~0x00800000;
+ }
+
+ if (stat & 0x01000000) {
+ nv_error(priv, "LB_ERROR\n");
+ nv_wr32(priv, 0x002100, 0x01000000);
+ stat &= ~0x01000000;
+ }
+
+ if (stat & 0x08000000) {
+ nve0_fifo_intr_dropped_fault(priv);
+ nv_wr32(priv, 0x002100, 0x08000000);
+ stat &= ~0x08000000;
+ }
+
if (stat & 0x10000000) {
u32 units = nv_rd32(priv, 0x00259c);
u32 u = units;
while (u) {
int i = ffs(u) - 1;
- nve0_fifo_isr_vm_fault(priv, i);
+ nve0_fifo_intr_fault(priv, i);
u &= ~(1 << i);
}
@@ -529,22 +711,28 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x20000000) {
- u32 units = nv_rd32(priv, 0x0025a0);
- u32 u = units;
+ u32 mask = nv_rd32(priv, 0x0025a0);
+ u32 temp = mask;
- while (u) {
- int i = ffs(u) - 1;
- nve0_fifo_isr_subfifo_intr(priv, i);
- u &= ~(1 << i);
+ while (temp) {
+ u32 unit = ffs(temp) - 1;
+ nve0_fifo_intr_pbdma(priv, unit);
+ temp &= ~(1 << unit);
}
- nv_wr32(priv, 0x0025a0, units);
+ nv_wr32(priv, 0x0025a0, mask);
stat &= ~0x20000000;
}
if (stat & 0x40000000) {
- nv_warn(priv, "unknown status 0x40000000\n");
- nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
+ u32 mask = nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
+
+ while (mask) {
+ u32 engn = ffs(mask) - 1;
+ /* runlist event, not currently used */
+ mask &= ~(1 << engn);
+ }
+
stat &= ~0x40000000;
}
@@ -575,53 +763,52 @@ nve0_fifo_uevent_disable(struct nouveau_event *event, int index)
nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
}
-static int
-nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+int
+nve0_fifo_fini(struct nouveau_object *object, bool suspend)
{
- struct nve0_fifo_priv *priv;
- int ret, i;
+ struct nve0_fifo_priv *priv = (void *)object;
+ int ret;
- ret = nouveau_fifo_create(parent, engine, oclass, 0, 4095, &priv);
- *pobject = nv_object(priv);
+ ret = nouveau_fifo_fini(&priv->base, suspend);
if (ret)
return ret;
- for (i = 0; i < FIFO_ENGINE_NR; i++) {
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
- 0, &priv->engine[i].playlist[0]);
- if (ret)
- return ret;
+ /* allow mmu fault interrupts, even when we're not using fifo */
+ nv_mask(priv, 0x002140, 0x10000000, 0x10000000);
+ return 0;
+}
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
- 0, &priv->engine[i].playlist[1]);
- if (ret)
- return ret;
- }
+int
+nve0_fifo_init(struct nouveau_object *object)
+{
+ struct nve0_fifo_priv *priv = (void *)object;
+ int ret, i;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 4096 * 0x200, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
+ ret = nouveau_fifo_init(&priv->base);
if (ret)
return ret;
- ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
- &priv->user.bar);
- if (ret)
- return ret;
+ /* enable all available PBDMA units */
+ nv_wr32(priv, 0x000204, 0xffffffff);
+ priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
+ nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr);
- priv->base.uevent->enable = nve0_fifo_uevent_enable;
- priv->base.uevent->disable = nve0_fifo_uevent_disable;
- priv->base.uevent->priv = priv;
+ /* PBDMA[n] */
+ for (i = 0; i < priv->spoon_nr; i++) {
+ nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+ nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+ nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+ }
- nv_subdev(priv)->unit = 0x00000100;
- nv_subdev(priv)->intr = nve0_fifo_intr;
- nv_engine(priv)->cclass = &nve0_fifo_cclass;
- nv_engine(priv)->sclass = nve0_fifo_sclass;
+ nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+
+ nv_wr32(priv, 0x002a00, 0xffffffff);
+ nv_wr32(priv, 0x002100, 0xffffffff);
+ nv_wr32(priv, 0x002140, 0x3fffffff);
return 0;
}
-static void
+void
nve0_fifo_dtor(struct nouveau_object *object)
{
struct nve0_fifo_priv *priv = (void *)object;
@@ -631,50 +818,69 @@ nve0_fifo_dtor(struct nouveau_object *object)
nouveau_gpuobj_ref(NULL, &priv->user.mem);
for (i = 0; i < FIFO_ENGINE_NR; i++) {
- nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
- nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
+ nouveau_gpuobj_ref(NULL, &priv->engine[i].runlist[1]);
+ nouveau_gpuobj_ref(NULL, &priv->engine[i].runlist[0]);
}
nouveau_fifo_destroy(&priv->base);
}
-static int
-nve0_fifo_init(struct nouveau_object *object)
+int
+nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nve0_fifo_priv *priv = (void *)object;
+ struct nve0_fifo_impl *impl = (void *)oclass;
+ struct nve0_fifo_priv *priv;
int ret, i;
- ret = nouveau_fifo_init(&priv->base);
+ ret = nouveau_fifo_create(parent, engine, oclass, 0,
+ impl->channels - 1, &priv);
+ *pobject = nv_object(priv);
if (ret)
return ret;
- /* enable all available PSUBFIFOs */
- nv_wr32(priv, 0x000204, 0xffffffff);
- priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
- nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
+ for (i = 0; i < FIFO_ENGINE_NR; i++) {
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
+ 0, &priv->engine[i].runlist[0]);
+ if (ret)
+ return ret;
- /* PSUBFIFO[n] */
- for (i = 0; i < priv->spoon_nr; i++) {
- nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
- nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
- nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
+ 0, &priv->engine[i].runlist[1]);
+ if (ret)
+ return ret;
}
- nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 4096 * 0x200, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
+ if (ret)
+ return ret;
- nv_wr32(priv, 0x002a00, 0xffffffff);
- nv_wr32(priv, 0x002100, 0xffffffff);
- nv_wr32(priv, 0x002140, 0x3fffffff);
+ ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
+ &priv->user.bar);
+ if (ret)
+ return ret;
+
+ priv->base.uevent->enable = nve0_fifo_uevent_enable;
+ priv->base.uevent->disable = nve0_fifo_uevent_disable;
+ priv->base.uevent->priv = priv;
+
+ nv_subdev(priv)->unit = 0x00000100;
+ nv_subdev(priv)->intr = nve0_fifo_intr;
+ nv_engine(priv)->cclass = &nve0_fifo_cclass;
+ nv_engine(priv)->sclass = nve0_fifo_sclass;
return 0;
}
struct nouveau_oclass *
-nve0_fifo_oclass = &(struct nouveau_oclass) {
- .handle = NV_ENGINE(FIFO, 0xe0),
- .ofuncs = &(struct nouveau_ofuncs) {
+nve0_fifo_oclass = &(struct nve0_fifo_impl) {
+ .base.handle = NV_ENGINE(FIFO, 0xe0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_fifo_ctor,
.dtor = nve0_fifo_dtor,
.init = nve0_fifo_init,
- .fini = _nouveau_fifo_fini,
+ .fini = nve0_fifo_fini,
},
-};
+ .channels = 4096,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h
new file mode 100644
index 000000000000..014344ebee66
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_FIFO_NVE0_H__
+#define __NVKM_FIFO_NVE0_H__
+
+#include <engine/fifo.h>
+
+int nve0_fifo_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void nve0_fifo_dtor(struct nouveau_object *);
+int nve0_fifo_init(struct nouveau_object *);
+
+struct nve0_fifo_impl {
+ struct nouveau_oclass base;
+ u32 channels;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
new file mode 100644
index 000000000000..a86bd3352bf8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
@@ -0,0 +1,1408 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nvc0.h"
+
+static struct nvc0_graph_init
+nv108_grctx_init_icmd[] = {
+ { 0x001000, 1, 0x01, 0x00000004 },
+ { 0x000039, 3, 0x01, 0x00000000 },
+ { 0x0000a9, 1, 0x01, 0x0000ffff },
+ { 0x000038, 1, 0x01, 0x0fac6881 },
+ { 0x00003d, 1, 0x01, 0x00000001 },
+ { 0x0000e8, 8, 0x01, 0x00000400 },
+ { 0x000078, 8, 0x01, 0x00000300 },
+ { 0x000050, 1, 0x01, 0x00000011 },
+ { 0x000058, 8, 0x01, 0x00000008 },
+ { 0x000208, 8, 0x01, 0x00000001 },
+ { 0x000081, 1, 0x01, 0x00000001 },
+ { 0x000085, 1, 0x01, 0x00000004 },
+ { 0x000088, 1, 0x01, 0x00000400 },
+ { 0x000090, 1, 0x01, 0x00000300 },
+ { 0x000098, 1, 0x01, 0x00001001 },
+ { 0x0000e3, 1, 0x01, 0x00000001 },
+ { 0x0000da, 1, 0x01, 0x00000001 },
+ { 0x0000f8, 1, 0x01, 0x00000003 },
+ { 0x0000fa, 1, 0x01, 0x00000001 },
+ { 0x00009f, 4, 0x01, 0x0000ffff },
+ { 0x0000b1, 1, 0x01, 0x00000001 },
+ { 0x0000ad, 1, 0x01, 0x0000013e },
+ { 0x0000e1, 1, 0x01, 0x00000010 },
+ { 0x000290, 16, 0x01, 0x00000000 },
+ { 0x0003b0, 16, 0x01, 0x00000000 },
+ { 0x0002a0, 16, 0x01, 0x00000000 },
+ { 0x000420, 16, 0x01, 0x00000000 },
+ { 0x0002b0, 16, 0x01, 0x00000000 },
+ { 0x000430, 16, 0x01, 0x00000000 },
+ { 0x0002c0, 16, 0x01, 0x00000000 },
+ { 0x0004d0, 16, 0x01, 0x00000000 },
+ { 0x000720, 16, 0x01, 0x00000000 },
+ { 0x0008c0, 16, 0x01, 0x00000000 },
+ { 0x000890, 16, 0x01, 0x00000000 },
+ { 0x0008e0, 16, 0x01, 0x00000000 },
+ { 0x0008a0, 16, 0x01, 0x00000000 },
+ { 0x0008f0, 16, 0x01, 0x00000000 },
+ { 0x00094c, 1, 0x01, 0x000000ff },
+ { 0x00094d, 1, 0x01, 0xffffffff },
+ { 0x00094e, 1, 0x01, 0x00000002 },
+ { 0x0002ec, 1, 0x01, 0x00000001 },
+ { 0x0002f2, 2, 0x01, 0x00000001 },
+ { 0x0002f5, 1, 0x01, 0x00000001 },
+ { 0x0002f7, 1, 0x01, 0x00000001 },
+ { 0x000303, 1, 0x01, 0x00000001 },
+ { 0x0002e6, 1, 0x01, 0x00000001 },
+ { 0x000466, 1, 0x01, 0x00000052 },
+ { 0x000301, 1, 0x01, 0x3f800000 },
+ { 0x000304, 1, 0x01, 0x30201000 },
+ { 0x000305, 1, 0x01, 0x70605040 },
+ { 0x000306, 1, 0x01, 0xb8a89888 },
+ { 0x000307, 1, 0x01, 0xf8e8d8c8 },
+ { 0x00030a, 1, 0x01, 0x00ffff00 },
+ { 0x00030b, 1, 0x01, 0x0000001a },
+ { 0x00030c, 1, 0x01, 0x00000001 },
+ { 0x000318, 1, 0x01, 0x00000001 },
+ { 0x000340, 1, 0x01, 0x00000000 },
+ { 0x000375, 1, 0x01, 0x00000001 },
+ { 0x00037d, 1, 0x01, 0x00000006 },
+ { 0x0003a0, 1, 0x01, 0x00000002 },
+ { 0x0003aa, 1, 0x01, 0x00000001 },
+ { 0x0003a9, 1, 0x01, 0x00000001 },
+ { 0x000380, 1, 0x01, 0x00000001 },
+ { 0x000383, 1, 0x01, 0x00000011 },
+ { 0x000360, 1, 0x01, 0x00000040 },
+ { 0x000366, 2, 0x01, 0x00000000 },
+ { 0x000368, 1, 0x01, 0x00000fff },
+ { 0x000370, 2, 0x01, 0x00000000 },
+ { 0x000372, 1, 0x01, 0x000fffff },
+ { 0x00037a, 1, 0x01, 0x00000012 },
+ { 0x000619, 1, 0x01, 0x00000003 },
+ { 0x000811, 1, 0x01, 0x00000003 },
+ { 0x000812, 1, 0x01, 0x00000004 },
+ { 0x000813, 1, 0x01, 0x00000006 },
+ { 0x000814, 1, 0x01, 0x00000008 },
+ { 0x000815, 1, 0x01, 0x0000000b },
+ { 0x000800, 6, 0x01, 0x00000001 },
+ { 0x000632, 1, 0x01, 0x00000001 },
+ { 0x000633, 1, 0x01, 0x00000002 },
+ { 0x000634, 1, 0x01, 0x00000003 },
+ { 0x000635, 1, 0x01, 0x00000004 },
+ { 0x000654, 1, 0x01, 0x3f800000 },
+ { 0x000657, 1, 0x01, 0x3f800000 },
+ { 0x000655, 2, 0x01, 0x3f800000 },
+ { 0x0006cd, 1, 0x01, 0x3f800000 },
+ { 0x0007f5, 1, 0x01, 0x3f800000 },
+ { 0x0007dc, 1, 0x01, 0x39291909 },
+ { 0x0007dd, 1, 0x01, 0x79695949 },
+ { 0x0007de, 1, 0x01, 0xb9a99989 },
+ { 0x0007df, 1, 0x01, 0xf9e9d9c9 },
+ { 0x0007e8, 1, 0x01, 0x00003210 },
+ { 0x0007e9, 1, 0x01, 0x00007654 },
+ { 0x0007ea, 1, 0x01, 0x00000098 },
+ { 0x0007ec, 1, 0x01, 0x39291909 },
+ { 0x0007ed, 1, 0x01, 0x79695949 },
+ { 0x0007ee, 1, 0x01, 0xb9a99989 },
+ { 0x0007ef, 1, 0x01, 0xf9e9d9c9 },
+ { 0x0007f0, 1, 0x01, 0x00003210 },
+ { 0x0007f1, 1, 0x01, 0x00007654 },
+ { 0x0007f2, 1, 0x01, 0x00000098 },
+ { 0x0005a5, 1, 0x01, 0x00000001 },
+ { 0x000980, 128, 0x01, 0x00000000 },
+ { 0x000468, 1, 0x01, 0x00000004 },
+ { 0x00046c, 1, 0x01, 0x00000001 },
+ { 0x000470, 96, 0x01, 0x00000000 },
+ { 0x000510, 16, 0x01, 0x3f800000 },
+ { 0x000520, 1, 0x01, 0x000002b6 },
+ { 0x000529, 1, 0x01, 0x00000001 },
+ { 0x000530, 16, 0x01, 0xffff0000 },
+ { 0x000585, 1, 0x01, 0x0000003f },
+ { 0x000576, 1, 0x01, 0x00000003 },
+ { 0x00057b, 1, 0x01, 0x00000059 },
+ { 0x000586, 1, 0x01, 0x00000040 },
+ { 0x000582, 2, 0x01, 0x00000080 },
+ { 0x0005c2, 1, 0x01, 0x00000001 },
+ { 0x000638, 2, 0x01, 0x00000001 },
+ { 0x00063a, 1, 0x01, 0x00000002 },
+ { 0x00063b, 2, 0x01, 0x00000001 },
+ { 0x00063d, 1, 0x01, 0x00000002 },
+ { 0x00063e, 1, 0x01, 0x00000001 },
+ { 0x0008b8, 8, 0x01, 0x00000001 },
+ { 0x000900, 8, 0x01, 0x00000001 },
+ { 0x000908, 8, 0x01, 0x00000002 },
+ { 0x000910, 16, 0x01, 0x00000001 },
+ { 0x000920, 8, 0x01, 0x00000002 },
+ { 0x000928, 8, 0x01, 0x00000001 },
+ { 0x000662, 1, 0x01, 0x00000001 },
+ { 0x000648, 9, 0x01, 0x00000001 },
+ { 0x000658, 1, 0x01, 0x0000000f },
+ { 0x0007ff, 1, 0x01, 0x0000000a },
+ { 0x00066a, 1, 0x01, 0x40000000 },
+ { 0x00066b, 1, 0x01, 0x10000000 },
+ { 0x00066c, 2, 0x01, 0xffff0000 },
+ { 0x0007af, 2, 0x01, 0x00000008 },
+ { 0x0007f6, 1, 0x01, 0x00000001 },
+ { 0x00080b, 1, 0x01, 0x00000002 },
+ { 0x0006b2, 1, 0x01, 0x00000055 },
+ { 0x0007ad, 1, 0x01, 0x00000003 },
+ { 0x000937, 1, 0x01, 0x00000001 },
+ { 0x000971, 1, 0x01, 0x00000008 },
+ { 0x000972, 1, 0x01, 0x00000040 },
+ { 0x000973, 1, 0x01, 0x0000012c },
+ { 0x00097c, 1, 0x01, 0x00000040 },
+ { 0x000979, 1, 0x01, 0x00000003 },
+ { 0x000975, 1, 0x01, 0x00000020 },
+ { 0x000976, 1, 0x01, 0x00000001 },
+ { 0x000977, 1, 0x01, 0x00000020 },
+ { 0x000978, 1, 0x01, 0x00000001 },
+ { 0x000957, 1, 0x01, 0x00000003 },
+ { 0x00095e, 1, 0x01, 0x20164010 },
+ { 0x00095f, 1, 0x01, 0x00000020 },
+ { 0x000a0d, 1, 0x01, 0x00000006 },
+ { 0x00097d, 1, 0x01, 0x00000020 },
+ { 0x000683, 1, 0x01, 0x00000006 },
+ { 0x000685, 1, 0x01, 0x003fffff },
+ { 0x000687, 1, 0x01, 0x003fffff },
+ { 0x0006a0, 1, 0x01, 0x00000005 },
+ { 0x000840, 1, 0x01, 0x00400008 },
+ { 0x000841, 1, 0x01, 0x08000080 },
+ { 0x000842, 1, 0x01, 0x00400008 },
+ { 0x000843, 1, 0x01, 0x08000080 },
+ { 0x0006aa, 1, 0x01, 0x00000001 },
+ { 0x0006ab, 1, 0x01, 0x00000002 },
+ { 0x0006ac, 1, 0x01, 0x00000080 },
+ { 0x0006ad, 2, 0x01, 0x00000100 },
+ { 0x0006b1, 1, 0x01, 0x00000011 },
+ { 0x0006bb, 1, 0x01, 0x000000cf },
+ { 0x0006ce, 1, 0x01, 0x2a712488 },
+ { 0x000739, 1, 0x01, 0x4085c000 },
+ { 0x00073a, 1, 0x01, 0x00000080 },
+ { 0x000786, 1, 0x01, 0x80000100 },
+ { 0x00073c, 1, 0x01, 0x00010100 },
+ { 0x00073d, 1, 0x01, 0x02800000 },
+ { 0x000787, 1, 0x01, 0x000000cf },
+ { 0x00078c, 1, 0x01, 0x00000008 },
+ { 0x000792, 1, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
+ { 0x000797, 1, 0x01, 0x000000cf },
+ { 0x000836, 1, 0x01, 0x00000001 },
+ { 0x00079a, 1, 0x01, 0x00000002 },
+ { 0x000833, 1, 0x01, 0x04444480 },
+ { 0x0007a1, 1, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
+ { 0x000831, 1, 0x01, 0x00000004 },
+ { 0x000b07, 1, 0x01, 0x00000002 },
+ { 0x000b08, 2, 0x01, 0x00000100 },
+ { 0x000b0a, 1, 0x01, 0x00000001 },
+ { 0x000a04, 1, 0x01, 0x000000ff },
+ { 0x000a0b, 1, 0x01, 0x00000040 },
+ { 0x00097f, 1, 0x01, 0x00000100 },
+ { 0x000a02, 1, 0x01, 0x00000001 },
+ { 0x000809, 1, 0x01, 0x00000007 },
+ { 0x00c221, 1, 0x01, 0x00000040 },
+ { 0x00c1b0, 8, 0x01, 0x0000000f },
+ { 0x00c1b8, 1, 0x01, 0x0fac6881 },
+ { 0x00c1b9, 1, 0x01, 0x00fac688 },
+ { 0x00c401, 1, 0x01, 0x00000001 },
+ { 0x00c402, 1, 0x01, 0x00010001 },
+ { 0x00c403, 2, 0x01, 0x00000001 },
+ { 0x00c40e, 1, 0x01, 0x00000020 },
+ { 0x00c500, 1, 0x01, 0x00000003 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ { 0x001000, 1, 0x01, 0x00000002 },
+ { 0x0006aa, 1, 0x01, 0x00000001 },
+ { 0x0006ad, 2, 0x01, 0x00000100 },
+ { 0x0006b1, 1, 0x01, 0x00000011 },
+ { 0x00078c, 1, 0x01, 0x00000008 },
+ { 0x000792, 1, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
+ { 0x000797, 1, 0x01, 0x000000cf },
+ { 0x00079a, 1, 0x01, 0x00000002 },
+ { 0x0007a1, 1, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
+ { 0x000831, 1, 0x01, 0x00000004 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ { 0x001000, 1, 0x01, 0x00000008 },
+ { 0x000039, 3, 0x01, 0x00000000 },
+ { 0x000380, 1, 0x01, 0x00000001 },
+ { 0x000366, 2, 0x01, 0x00000000 },
+ { 0x000368, 1, 0x01, 0x00000fff },
+ { 0x000370, 2, 0x01, 0x00000000 },
+ { 0x000372, 1, 0x01, 0x000fffff },
+ { 0x000813, 1, 0x01, 0x00000006 },
+ { 0x000814, 1, 0x01, 0x00000008 },
+ { 0x000957, 1, 0x01, 0x00000003 },
+ { 0x000b07, 1, 0x01, 0x00000002 },
+ { 0x000b08, 2, 0x01, 0x00000100 },
+ { 0x000b0a, 1, 0x01, 0x00000001 },
+ { 0x000a04, 1, 0x01, 0x000000ff },
+ { 0x000a0b, 1, 0x01, 0x00000040 },
+ { 0x00097f, 1, 0x01, 0x00000100 },
+ { 0x000a02, 1, 0x01, 0x00000001 },
+ { 0x000809, 1, 0x01, 0x00000007 },
+ { 0x00c221, 1, 0x01, 0x00000040 },
+ { 0x00c401, 1, 0x01, 0x00000001 },
+ { 0x00c402, 1, 0x01, 0x00010001 },
+ { 0x00c403, 2, 0x01, 0x00000001 },
+ { 0x00c40e, 1, 0x01, 0x00000020 },
+ { 0x00c500, 1, 0x01, 0x00000003 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ { 0x001000, 1, 0x01, 0x00000001 },
+ { 0x000b07, 1, 0x01, 0x00000002 },
+ { 0x000b08, 2, 0x01, 0x00000100 },
+ { 0x000b0a, 1, 0x01, 0x00000001 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_a197[] = {
+ { 0x000800, 1, 0x04, 0x00000000 },
+ { 0x000840, 1, 0x04, 0x00000000 },
+ { 0x000880, 1, 0x04, 0x00000000 },
+ { 0x0008c0, 1, 0x04, 0x00000000 },
+ { 0x000900, 1, 0x04, 0x00000000 },
+ { 0x000940, 1, 0x04, 0x00000000 },
+ { 0x000980, 1, 0x04, 0x00000000 },
+ { 0x0009c0, 1, 0x04, 0x00000000 },
+ { 0x000804, 1, 0x04, 0x00000000 },
+ { 0x000844, 1, 0x04, 0x00000000 },
+ { 0x000884, 1, 0x04, 0x00000000 },
+ { 0x0008c4, 1, 0x04, 0x00000000 },
+ { 0x000904, 1, 0x04, 0x00000000 },
+ { 0x000944, 1, 0x04, 0x00000000 },
+ { 0x000984, 1, 0x04, 0x00000000 },
+ { 0x0009c4, 1, 0x04, 0x00000000 },
+ { 0x000808, 1, 0x04, 0x00000400 },
+ { 0x000848, 1, 0x04, 0x00000400 },
+ { 0x000888, 1, 0x04, 0x00000400 },
+ { 0x0008c8, 1, 0x04, 0x00000400 },
+ { 0x000908, 1, 0x04, 0x00000400 },
+ { 0x000948, 1, 0x04, 0x00000400 },
+ { 0x000988, 1, 0x04, 0x00000400 },
+ { 0x0009c8, 1, 0x04, 0x00000400 },
+ { 0x00080c, 1, 0x04, 0x00000300 },
+ { 0x00084c, 1, 0x04, 0x00000300 },
+ { 0x00088c, 1, 0x04, 0x00000300 },
+ { 0x0008cc, 1, 0x04, 0x00000300 },
+ { 0x00090c, 1, 0x04, 0x00000300 },
+ { 0x00094c, 1, 0x04, 0x00000300 },
+ { 0x00098c, 1, 0x04, 0x00000300 },
+ { 0x0009cc, 1, 0x04, 0x00000300 },
+ { 0x000810, 1, 0x04, 0x000000cf },
+ { 0x000850, 1, 0x04, 0x00000000 },
+ { 0x000890, 1, 0x04, 0x00000000 },
+ { 0x0008d0, 1, 0x04, 0x00000000 },
+ { 0x000910, 1, 0x04, 0x00000000 },
+ { 0x000950, 1, 0x04, 0x00000000 },
+ { 0x000990, 1, 0x04, 0x00000000 },
+ { 0x0009d0, 1, 0x04, 0x00000000 },
+ { 0x000814, 1, 0x04, 0x00000040 },
+ { 0x000854, 1, 0x04, 0x00000040 },
+ { 0x000894, 1, 0x04, 0x00000040 },
+ { 0x0008d4, 1, 0x04, 0x00000040 },
+ { 0x000914, 1, 0x04, 0x00000040 },
+ { 0x000954, 1, 0x04, 0x00000040 },
+ { 0x000994, 1, 0x04, 0x00000040 },
+ { 0x0009d4, 1, 0x04, 0x00000040 },
+ { 0x000818, 1, 0x04, 0x00000001 },
+ { 0x000858, 1, 0x04, 0x00000001 },
+ { 0x000898, 1, 0x04, 0x00000001 },
+ { 0x0008d8, 1, 0x04, 0x00000001 },
+ { 0x000918, 1, 0x04, 0x00000001 },
+ { 0x000958, 1, 0x04, 0x00000001 },
+ { 0x000998, 1, 0x04, 0x00000001 },
+ { 0x0009d8, 1, 0x04, 0x00000001 },
+ { 0x00081c, 1, 0x04, 0x00000000 },
+ { 0x00085c, 1, 0x04, 0x00000000 },
+ { 0x00089c, 1, 0x04, 0x00000000 },
+ { 0x0008dc, 1, 0x04, 0x00000000 },
+ { 0x00091c, 1, 0x04, 0x00000000 },
+ { 0x00095c, 1, 0x04, 0x00000000 },
+ { 0x00099c, 1, 0x04, 0x00000000 },
+ { 0x0009dc, 1, 0x04, 0x00000000 },
+ { 0x000820, 1, 0x04, 0x00000000 },
+ { 0x000860, 1, 0x04, 0x00000000 },
+ { 0x0008a0, 1, 0x04, 0x00000000 },
+ { 0x0008e0, 1, 0x04, 0x00000000 },
+ { 0x000920, 1, 0x04, 0x00000000 },
+ { 0x000960, 1, 0x04, 0x00000000 },
+ { 0x0009a0, 1, 0x04, 0x00000000 },
+ { 0x0009e0, 1, 0x04, 0x00000000 },
+ { 0x001c00, 1, 0x04, 0x00000000 },
+ { 0x001c10, 1, 0x04, 0x00000000 },
+ { 0x001c20, 1, 0x04, 0x00000000 },
+ { 0x001c30, 1, 0x04, 0x00000000 },
+ { 0x001c40, 1, 0x04, 0x00000000 },
+ { 0x001c50, 1, 0x04, 0x00000000 },
+ { 0x001c60, 1, 0x04, 0x00000000 },
+ { 0x001c70, 1, 0x04, 0x00000000 },
+ { 0x001c80, 1, 0x04, 0x00000000 },
+ { 0x001c90, 1, 0x04, 0x00000000 },
+ { 0x001ca0, 1, 0x04, 0x00000000 },
+ { 0x001cb0, 1, 0x04, 0x00000000 },
+ { 0x001cc0, 1, 0x04, 0x00000000 },
+ { 0x001cd0, 1, 0x04, 0x00000000 },
+ { 0x001ce0, 1, 0x04, 0x00000000 },
+ { 0x001cf0, 1, 0x04, 0x00000000 },
+ { 0x001c04, 1, 0x04, 0x00000000 },
+ { 0x001c14, 1, 0x04, 0x00000000 },
+ { 0x001c24, 1, 0x04, 0x00000000 },
+ { 0x001c34, 1, 0x04, 0x00000000 },
+ { 0x001c44, 1, 0x04, 0x00000000 },
+ { 0x001c54, 1, 0x04, 0x00000000 },
+ { 0x001c64, 1, 0x04, 0x00000000 },
+ { 0x001c74, 1, 0x04, 0x00000000 },
+ { 0x001c84, 1, 0x04, 0x00000000 },
+ { 0x001c94, 1, 0x04, 0x00000000 },
+ { 0x001ca4, 1, 0x04, 0x00000000 },
+ { 0x001cb4, 1, 0x04, 0x00000000 },
+ { 0x001cc4, 1, 0x04, 0x00000000 },
+ { 0x001cd4, 1, 0x04, 0x00000000 },
+ { 0x001ce4, 1, 0x04, 0x00000000 },
+ { 0x001cf4, 1, 0x04, 0x00000000 },
+ { 0x001c08, 1, 0x04, 0x00000000 },
+ { 0x001c18, 1, 0x04, 0x00000000 },
+ { 0x001c28, 1, 0x04, 0x00000000 },
+ { 0x001c38, 1, 0x04, 0x00000000 },
+ { 0x001c48, 1, 0x04, 0x00000000 },
+ { 0x001c58, 1, 0x04, 0x00000000 },
+ { 0x001c68, 1, 0x04, 0x00000000 },
+ { 0x001c78, 1, 0x04, 0x00000000 },
+ { 0x001c88, 1, 0x04, 0x00000000 },
+ { 0x001c98, 1, 0x04, 0x00000000 },
+ { 0x001ca8, 1, 0x04, 0x00000000 },
+ { 0x001cb8, 1, 0x04, 0x00000000 },
+ { 0x001cc8, 1, 0x04, 0x00000000 },
+ { 0x001cd8, 1, 0x04, 0x00000000 },
+ { 0x001ce8, 1, 0x04, 0x00000000 },
+ { 0x001cf8, 1, 0x04, 0x00000000 },
+ { 0x001c0c, 1, 0x04, 0x00000000 },
+ { 0x001c1c, 1, 0x04, 0x00000000 },
+ { 0x001c2c, 1, 0x04, 0x00000000 },
+ { 0x001c3c, 1, 0x04, 0x00000000 },
+ { 0x001c4c, 1, 0x04, 0x00000000 },
+ { 0x001c5c, 1, 0x04, 0x00000000 },
+ { 0x001c6c, 1, 0x04, 0x00000000 },
+ { 0x001c7c, 1, 0x04, 0x00000000 },
+ { 0x001c8c, 1, 0x04, 0x00000000 },
+ { 0x001c9c, 1, 0x04, 0x00000000 },
+ { 0x001cac, 1, 0x04, 0x00000000 },
+ { 0x001cbc, 1, 0x04, 0x00000000 },
+ { 0x001ccc, 1, 0x04, 0x00000000 },
+ { 0x001cdc, 1, 0x04, 0x00000000 },
+ { 0x001cec, 1, 0x04, 0x00000000 },
+ { 0x001cfc, 2, 0x04, 0x00000000 },
+ { 0x001d10, 1, 0x04, 0x00000000 },
+ { 0x001d20, 1, 0x04, 0x00000000 },
+ { 0x001d30, 1, 0x04, 0x00000000 },
+ { 0x001d40, 1, 0x04, 0x00000000 },
+ { 0x001d50, 1, 0x04, 0x00000000 },
+ { 0x001d60, 1, 0x04, 0x00000000 },
+ { 0x001d70, 1, 0x04, 0x00000000 },
+ { 0x001d80, 1, 0x04, 0x00000000 },
+ { 0x001d90, 1, 0x04, 0x00000000 },
+ { 0x001da0, 1, 0x04, 0x00000000 },
+ { 0x001db0, 1, 0x04, 0x00000000 },
+ { 0x001dc0, 1, 0x04, 0x00000000 },
+ { 0x001dd0, 1, 0x04, 0x00000000 },
+ { 0x001de0, 1, 0x04, 0x00000000 },
+ { 0x001df0, 1, 0x04, 0x00000000 },
+ { 0x001d04, 1, 0x04, 0x00000000 },
+ { 0x001d14, 1, 0x04, 0x00000000 },
+ { 0x001d24, 1, 0x04, 0x00000000 },
+ { 0x001d34, 1, 0x04, 0x00000000 },
+ { 0x001d44, 1, 0x04, 0x00000000 },
+ { 0x001d54, 1, 0x04, 0x00000000 },
+ { 0x001d64, 1, 0x04, 0x00000000 },
+ { 0x001d74, 1, 0x04, 0x00000000 },
+ { 0x001d84, 1, 0x04, 0x00000000 },
+ { 0x001d94, 1, 0x04, 0x00000000 },
+ { 0x001da4, 1, 0x04, 0x00000000 },
+ { 0x001db4, 1, 0x04, 0x00000000 },
+ { 0x001dc4, 1, 0x04, 0x00000000 },
+ { 0x001dd4, 1, 0x04, 0x00000000 },
+ { 0x001de4, 1, 0x04, 0x00000000 },
+ { 0x001df4, 1, 0x04, 0x00000000 },
+ { 0x001d08, 1, 0x04, 0x00000000 },
+ { 0x001d18, 1, 0x04, 0x00000000 },
+ { 0x001d28, 1, 0x04, 0x00000000 },
+ { 0x001d38, 1, 0x04, 0x00000000 },
+ { 0x001d48, 1, 0x04, 0x00000000 },
+ { 0x001d58, 1, 0x04, 0x00000000 },
+ { 0x001d68, 1, 0x04, 0x00000000 },
+ { 0x001d78, 1, 0x04, 0x00000000 },
+ { 0x001d88, 1, 0x04, 0x00000000 },
+ { 0x001d98, 1, 0x04, 0x00000000 },
+ { 0x001da8, 1, 0x04, 0x00000000 },
+ { 0x001db8, 1, 0x04, 0x00000000 },
+ { 0x001dc8, 1, 0x04, 0x00000000 },
+ { 0x001dd8, 1, 0x04, 0x00000000 },
+ { 0x001de8, 1, 0x04, 0x00000000 },
+ { 0x001df8, 1, 0x04, 0x00000000 },
+ { 0x001d0c, 1, 0x04, 0x00000000 },
+ { 0x001d1c, 1, 0x04, 0x00000000 },
+ { 0x001d2c, 1, 0x04, 0x00000000 },
+ { 0x001d3c, 1, 0x04, 0x00000000 },
+ { 0x001d4c, 1, 0x04, 0x00000000 },
+ { 0x001d5c, 1, 0x04, 0x00000000 },
+ { 0x001d6c, 1, 0x04, 0x00000000 },
+ { 0x001d7c, 1, 0x04, 0x00000000 },
+ { 0x001d8c, 1, 0x04, 0x00000000 },
+ { 0x001d9c, 1, 0x04, 0x00000000 },
+ { 0x001dac, 1, 0x04, 0x00000000 },
+ { 0x001dbc, 1, 0x04, 0x00000000 },
+ { 0x001dcc, 1, 0x04, 0x00000000 },
+ { 0x001ddc, 1, 0x04, 0x00000000 },
+ { 0x001dec, 1, 0x04, 0x00000000 },
+ { 0x001dfc, 1, 0x04, 0x00000000 },
+ { 0x001f00, 1, 0x04, 0x00000000 },
+ { 0x001f08, 1, 0x04, 0x00000000 },
+ { 0x001f10, 1, 0x04, 0x00000000 },
+ { 0x001f18, 1, 0x04, 0x00000000 },
+ { 0x001f20, 1, 0x04, 0x00000000 },
+ { 0x001f28, 1, 0x04, 0x00000000 },
+ { 0x001f30, 1, 0x04, 0x00000000 },
+ { 0x001f38, 1, 0x04, 0x00000000 },
+ { 0x001f40, 1, 0x04, 0x00000000 },
+ { 0x001f48, 1, 0x04, 0x00000000 },
+ { 0x001f50, 1, 0x04, 0x00000000 },
+ { 0x001f58, 1, 0x04, 0x00000000 },
+ { 0x001f60, 1, 0x04, 0x00000000 },
+ { 0x001f68, 1, 0x04, 0x00000000 },
+ { 0x001f70, 1, 0x04, 0x00000000 },
+ { 0x001f78, 1, 0x04, 0x00000000 },
+ { 0x001f04, 1, 0x04, 0x00000000 },
+ { 0x001f0c, 1, 0x04, 0x00000000 },
+ { 0x001f14, 1, 0x04, 0x00000000 },
+ { 0x001f1c, 1, 0x04, 0x00000000 },
+ { 0x001f24, 1, 0x04, 0x00000000 },
+ { 0x001f2c, 1, 0x04, 0x00000000 },
+ { 0x001f34, 1, 0x04, 0x00000000 },
+ { 0x001f3c, 1, 0x04, 0x00000000 },
+ { 0x001f44, 1, 0x04, 0x00000000 },
+ { 0x001f4c, 1, 0x04, 0x00000000 },
+ { 0x001f54, 1, 0x04, 0x00000000 },
+ { 0x001f5c, 1, 0x04, 0x00000000 },
+ { 0x001f64, 1, 0x04, 0x00000000 },
+ { 0x001f6c, 1, 0x04, 0x00000000 },
+ { 0x001f74, 1, 0x04, 0x00000000 },
+ { 0x001f7c, 2, 0x04, 0x00000000 },
+ { 0x001f88, 1, 0x04, 0x00000000 },
+ { 0x001f90, 1, 0x04, 0x00000000 },
+ { 0x001f98, 1, 0x04, 0x00000000 },
+ { 0x001fa0, 1, 0x04, 0x00000000 },
+ { 0x001fa8, 1, 0x04, 0x00000000 },
+ { 0x001fb0, 1, 0x04, 0x00000000 },
+ { 0x001fb8, 1, 0x04, 0x00000000 },
+ { 0x001fc0, 1, 0x04, 0x00000000 },
+ { 0x001fc8, 1, 0x04, 0x00000000 },
+ { 0x001fd0, 1, 0x04, 0x00000000 },
+ { 0x001fd8, 1, 0x04, 0x00000000 },
+ { 0x001fe0, 1, 0x04, 0x00000000 },
+ { 0x001fe8, 1, 0x04, 0x00000000 },
+ { 0x001ff0, 1, 0x04, 0x00000000 },
+ { 0x001ff8, 1, 0x04, 0x00000000 },
+ { 0x001f84, 1, 0x04, 0x00000000 },
+ { 0x001f8c, 1, 0x04, 0x00000000 },
+ { 0x001f94, 1, 0x04, 0x00000000 },
+ { 0x001f9c, 1, 0x04, 0x00000000 },
+ { 0x001fa4, 1, 0x04, 0x00000000 },
+ { 0x001fac, 1, 0x04, 0x00000000 },
+ { 0x001fb4, 1, 0x04, 0x00000000 },
+ { 0x001fbc, 1, 0x04, 0x00000000 },
+ { 0x001fc4, 1, 0x04, 0x00000000 },
+ { 0x001fcc, 1, 0x04, 0x00000000 },
+ { 0x001fd4, 1, 0x04, 0x00000000 },
+ { 0x001fdc, 1, 0x04, 0x00000000 },
+ { 0x001fe4, 1, 0x04, 0x00000000 },
+ { 0x001fec, 1, 0x04, 0x00000000 },
+ { 0x001ff4, 1, 0x04, 0x00000000 },
+ { 0x001ffc, 2, 0x04, 0x00000000 },
+ { 0x002040, 1, 0x04, 0x00000011 },
+ { 0x002080, 1, 0x04, 0x00000020 },
+ { 0x0020c0, 1, 0x04, 0x00000030 },
+ { 0x002100, 1, 0x04, 0x00000040 },
+ { 0x002140, 1, 0x04, 0x00000051 },
+ { 0x00200c, 1, 0x04, 0x00000001 },
+ { 0x00204c, 1, 0x04, 0x00000001 },
+ { 0x00208c, 1, 0x04, 0x00000001 },
+ { 0x0020cc, 1, 0x04, 0x00000001 },
+ { 0x00210c, 1, 0x04, 0x00000001 },
+ { 0x00214c, 1, 0x04, 0x00000001 },
+ { 0x002010, 1, 0x04, 0x00000000 },
+ { 0x002050, 1, 0x04, 0x00000000 },
+ { 0x002090, 1, 0x04, 0x00000001 },
+ { 0x0020d0, 1, 0x04, 0x00000002 },
+ { 0x002110, 1, 0x04, 0x00000003 },
+ { 0x002150, 1, 0x04, 0x00000004 },
+ { 0x000380, 1, 0x04, 0x00000000 },
+ { 0x0003a0, 1, 0x04, 0x00000000 },
+ { 0x0003c0, 1, 0x04, 0x00000000 },
+ { 0x0003e0, 1, 0x04, 0x00000000 },
+ { 0x000384, 1, 0x04, 0x00000000 },
+ { 0x0003a4, 1, 0x04, 0x00000000 },
+ { 0x0003c4, 1, 0x04, 0x00000000 },
+ { 0x0003e4, 1, 0x04, 0x00000000 },
+ { 0x000388, 1, 0x04, 0x00000000 },
+ { 0x0003a8, 1, 0x04, 0x00000000 },
+ { 0x0003c8, 1, 0x04, 0x00000000 },
+ { 0x0003e8, 1, 0x04, 0x00000000 },
+ { 0x00038c, 1, 0x04, 0x00000000 },
+ { 0x0003ac, 1, 0x04, 0x00000000 },
+ { 0x0003cc, 1, 0x04, 0x00000000 },
+ { 0x0003ec, 1, 0x04, 0x00000000 },
+ { 0x000700, 1, 0x04, 0x00000000 },
+ { 0x000710, 1, 0x04, 0x00000000 },
+ { 0x000720, 1, 0x04, 0x00000000 },
+ { 0x000730, 1, 0x04, 0x00000000 },
+ { 0x000704, 1, 0x04, 0x00000000 },
+ { 0x000714, 1, 0x04, 0x00000000 },
+ { 0x000724, 1, 0x04, 0x00000000 },
+ { 0x000734, 1, 0x04, 0x00000000 },
+ { 0x000708, 1, 0x04, 0x00000000 },
+ { 0x000718, 1, 0x04, 0x00000000 },
+ { 0x000728, 1, 0x04, 0x00000000 },
+ { 0x000738, 1, 0x04, 0x00000000 },
+ { 0x002800, 128, 0x04, 0x00000000 },
+ { 0x000a00, 1, 0x04, 0x00000000 },
+ { 0x000a20, 1, 0x04, 0x00000000 },
+ { 0x000a40, 1, 0x04, 0x00000000 },
+ { 0x000a60, 1, 0x04, 0x00000000 },
+ { 0x000a80, 1, 0x04, 0x00000000 },
+ { 0x000aa0, 1, 0x04, 0x00000000 },
+ { 0x000ac0, 1, 0x04, 0x00000000 },
+ { 0x000ae0, 1, 0x04, 0x00000000 },
+ { 0x000b00, 1, 0x04, 0x00000000 },
+ { 0x000b20, 1, 0x04, 0x00000000 },
+ { 0x000b40, 1, 0x04, 0x00000000 },
+ { 0x000b60, 1, 0x04, 0x00000000 },
+ { 0x000b80, 1, 0x04, 0x00000000 },
+ { 0x000ba0, 1, 0x04, 0x00000000 },
+ { 0x000bc0, 1, 0x04, 0x00000000 },
+ { 0x000be0, 1, 0x04, 0x00000000 },
+ { 0x000a04, 1, 0x04, 0x00000000 },
+ { 0x000a24, 1, 0x04, 0x00000000 },
+ { 0x000a44, 1, 0x04, 0x00000000 },
+ { 0x000a64, 1, 0x04, 0x00000000 },
+ { 0x000a84, 1, 0x04, 0x00000000 },
+ { 0x000aa4, 1, 0x04, 0x00000000 },
+ { 0x000ac4, 1, 0x04, 0x00000000 },
+ { 0x000ae4, 1, 0x04, 0x00000000 },
+ { 0x000b04, 1, 0x04, 0x00000000 },
+ { 0x000b24, 1, 0x04, 0x00000000 },
+ { 0x000b44, 1, 0x04, 0x00000000 },
+ { 0x000b64, 1, 0x04, 0x00000000 },
+ { 0x000b84, 1, 0x04, 0x00000000 },
+ { 0x000ba4, 1, 0x04, 0x00000000 },
+ { 0x000bc4, 1, 0x04, 0x00000000 },
+ { 0x000be4, 1, 0x04, 0x00000000 },
+ { 0x000a08, 1, 0x04, 0x00000000 },
+ { 0x000a28, 1, 0x04, 0x00000000 },
+ { 0x000a48, 1, 0x04, 0x00000000 },
+ { 0x000a68, 1, 0x04, 0x00000000 },
+ { 0x000a88, 1, 0x04, 0x00000000 },
+ { 0x000aa8, 1, 0x04, 0x00000000 },
+ { 0x000ac8, 1, 0x04, 0x00000000 },
+ { 0x000ae8, 1, 0x04, 0x00000000 },
+ { 0x000b08, 1, 0x04, 0x00000000 },
+ { 0x000b28, 1, 0x04, 0x00000000 },
+ { 0x000b48, 1, 0x04, 0x00000000 },
+ { 0x000b68, 1, 0x04, 0x00000000 },
+ { 0x000b88, 1, 0x04, 0x00000000 },
+ { 0x000ba8, 1, 0x04, 0x00000000 },
+ { 0x000bc8, 1, 0x04, 0x00000000 },
+ { 0x000be8, 1, 0x04, 0x00000000 },
+ { 0x000a0c, 1, 0x04, 0x00000000 },
+ { 0x000a2c, 1, 0x04, 0x00000000 },
+ { 0x000a4c, 1, 0x04, 0x00000000 },
+ { 0x000a6c, 1, 0x04, 0x00000000 },
+ { 0x000a8c, 1, 0x04, 0x00000000 },
+ { 0x000aac, 1, 0x04, 0x00000000 },
+ { 0x000acc, 1, 0x04, 0x00000000 },
+ { 0x000aec, 1, 0x04, 0x00000000 },
+ { 0x000b0c, 1, 0x04, 0x00000000 },
+ { 0x000b2c, 1, 0x04, 0x00000000 },
+ { 0x000b4c, 1, 0x04, 0x00000000 },
+ { 0x000b6c, 1, 0x04, 0x00000000 },
+ { 0x000b8c, 1, 0x04, 0x00000000 },
+ { 0x000bac, 1, 0x04, 0x00000000 },
+ { 0x000bcc, 1, 0x04, 0x00000000 },
+ { 0x000bec, 1, 0x04, 0x00000000 },
+ { 0x000a10, 1, 0x04, 0x00000000 },
+ { 0x000a30, 1, 0x04, 0x00000000 },
+ { 0x000a50, 1, 0x04, 0x00000000 },
+ { 0x000a70, 1, 0x04, 0x00000000 },
+ { 0x000a90, 1, 0x04, 0x00000000 },
+ { 0x000ab0, 1, 0x04, 0x00000000 },
+ { 0x000ad0, 1, 0x04, 0x00000000 },
+ { 0x000af0, 1, 0x04, 0x00000000 },
+ { 0x000b10, 1, 0x04, 0x00000000 },
+ { 0x000b30, 1, 0x04, 0x00000000 },
+ { 0x000b50, 1, 0x04, 0x00000000 },
+ { 0x000b70, 1, 0x04, 0x00000000 },
+ { 0x000b90, 1, 0x04, 0x00000000 },
+ { 0x000bb0, 1, 0x04, 0x00000000 },
+ { 0x000bd0, 1, 0x04, 0x00000000 },
+ { 0x000bf0, 1, 0x04, 0x00000000 },
+ { 0x000a14, 1, 0x04, 0x00000000 },
+ { 0x000a34, 1, 0x04, 0x00000000 },
+ { 0x000a54, 1, 0x04, 0x00000000 },
+ { 0x000a74, 1, 0x04, 0x00000000 },
+ { 0x000a94, 1, 0x04, 0x00000000 },
+ { 0x000ab4, 1, 0x04, 0x00000000 },
+ { 0x000ad4, 1, 0x04, 0x00000000 },
+ { 0x000af4, 1, 0x04, 0x00000000 },
+ { 0x000b14, 1, 0x04, 0x00000000 },
+ { 0x000b34, 1, 0x04, 0x00000000 },
+ { 0x000b54, 1, 0x04, 0x00000000 },
+ { 0x000b74, 1, 0x04, 0x00000000 },
+ { 0x000b94, 1, 0x04, 0x00000000 },
+ { 0x000bb4, 1, 0x04, 0x00000000 },
+ { 0x000bd4, 1, 0x04, 0x00000000 },
+ { 0x000bf4, 1, 0x04, 0x00000000 },
+ { 0x000c00, 1, 0x04, 0x00000000 },
+ { 0x000c10, 1, 0x04, 0x00000000 },
+ { 0x000c20, 1, 0x04, 0x00000000 },
+ { 0x000c30, 1, 0x04, 0x00000000 },
+ { 0x000c40, 1, 0x04, 0x00000000 },
+ { 0x000c50, 1, 0x04, 0x00000000 },
+ { 0x000c60, 1, 0x04, 0x00000000 },
+ { 0x000c70, 1, 0x04, 0x00000000 },
+ { 0x000c80, 1, 0x04, 0x00000000 },
+ { 0x000c90, 1, 0x04, 0x00000000 },
+ { 0x000ca0, 1, 0x04, 0x00000000 },
+ { 0x000cb0, 1, 0x04, 0x00000000 },
+ { 0x000cc0, 1, 0x04, 0x00000000 },
+ { 0x000cd0, 1, 0x04, 0x00000000 },
+ { 0x000ce0, 1, 0x04, 0x00000000 },
+ { 0x000cf0, 1, 0x04, 0x00000000 },
+ { 0x000c04, 1, 0x04, 0x00000000 },
+ { 0x000c14, 1, 0x04, 0x00000000 },
+ { 0x000c24, 1, 0x04, 0x00000000 },
+ { 0x000c34, 1, 0x04, 0x00000000 },
+ { 0x000c44, 1, 0x04, 0x00000000 },
+ { 0x000c54, 1, 0x04, 0x00000000 },
+ { 0x000c64, 1, 0x04, 0x00000000 },
+ { 0x000c74, 1, 0x04, 0x00000000 },
+ { 0x000c84, 1, 0x04, 0x00000000 },
+ { 0x000c94, 1, 0x04, 0x00000000 },
+ { 0x000ca4, 1, 0x04, 0x00000000 },
+ { 0x000cb4, 1, 0x04, 0x00000000 },
+ { 0x000cc4, 1, 0x04, 0x00000000 },
+ { 0x000cd4, 1, 0x04, 0x00000000 },
+ { 0x000ce4, 1, 0x04, 0x00000000 },
+ { 0x000cf4, 1, 0x04, 0x00000000 },
+ { 0x000c08, 1, 0x04, 0x00000000 },
+ { 0x000c18, 1, 0x04, 0x00000000 },
+ { 0x000c28, 1, 0x04, 0x00000000 },
+ { 0x000c38, 1, 0x04, 0x00000000 },
+ { 0x000c48, 1, 0x04, 0x00000000 },
+ { 0x000c58, 1, 0x04, 0x00000000 },
+ { 0x000c68, 1, 0x04, 0x00000000 },
+ { 0x000c78, 1, 0x04, 0x00000000 },
+ { 0x000c88, 1, 0x04, 0x00000000 },
+ { 0x000c98, 1, 0x04, 0x00000000 },
+ { 0x000ca8, 1, 0x04, 0x00000000 },
+ { 0x000cb8, 1, 0x04, 0x00000000 },
+ { 0x000cc8, 1, 0x04, 0x00000000 },
+ { 0x000cd8, 1, 0x04, 0x00000000 },
+ { 0x000ce8, 1, 0x04, 0x00000000 },
+ { 0x000cf8, 1, 0x04, 0x00000000 },
+ { 0x000c0c, 1, 0x04, 0x3f800000 },
+ { 0x000c1c, 1, 0x04, 0x3f800000 },
+ { 0x000c2c, 1, 0x04, 0x3f800000 },
+ { 0x000c3c, 1, 0x04, 0x3f800000 },
+ { 0x000c4c, 1, 0x04, 0x3f800000 },
+ { 0x000c5c, 1, 0x04, 0x3f800000 },
+ { 0x000c6c, 1, 0x04, 0x3f800000 },
+ { 0x000c7c, 1, 0x04, 0x3f800000 },
+ { 0x000c8c, 1, 0x04, 0x3f800000 },
+ { 0x000c9c, 1, 0x04, 0x3f800000 },
+ { 0x000cac, 1, 0x04, 0x3f800000 },
+ { 0x000cbc, 1, 0x04, 0x3f800000 },
+ { 0x000ccc, 1, 0x04, 0x3f800000 },
+ { 0x000cdc, 1, 0x04, 0x3f800000 },
+ { 0x000cec, 1, 0x04, 0x3f800000 },
+ { 0x000cfc, 1, 0x04, 0x3f800000 },
+ { 0x000d00, 1, 0x04, 0xffff0000 },
+ { 0x000d08, 1, 0x04, 0xffff0000 },
+ { 0x000d10, 1, 0x04, 0xffff0000 },
+ { 0x000d18, 1, 0x04, 0xffff0000 },
+ { 0x000d20, 1, 0x04, 0xffff0000 },
+ { 0x000d28, 1, 0x04, 0xffff0000 },
+ { 0x000d30, 1, 0x04, 0xffff0000 },
+ { 0x000d38, 1, 0x04, 0xffff0000 },
+ { 0x000d04, 1, 0x04, 0xffff0000 },
+ { 0x000d0c, 1, 0x04, 0xffff0000 },
+ { 0x000d14, 1, 0x04, 0xffff0000 },
+ { 0x000d1c, 1, 0x04, 0xffff0000 },
+ { 0x000d24, 1, 0x04, 0xffff0000 },
+ { 0x000d2c, 1, 0x04, 0xffff0000 },
+ { 0x000d34, 1, 0x04, 0xffff0000 },
+ { 0x000d3c, 1, 0x04, 0xffff0000 },
+ { 0x000e00, 1, 0x04, 0x00000000 },
+ { 0x000e10, 1, 0x04, 0x00000000 },
+ { 0x000e20, 1, 0x04, 0x00000000 },
+ { 0x000e30, 1, 0x04, 0x00000000 },
+ { 0x000e40, 1, 0x04, 0x00000000 },
+ { 0x000e50, 1, 0x04, 0x00000000 },
+ { 0x000e60, 1, 0x04, 0x00000000 },
+ { 0x000e70, 1, 0x04, 0x00000000 },
+ { 0x000e80, 1, 0x04, 0x00000000 },
+ { 0x000e90, 1, 0x04, 0x00000000 },
+ { 0x000ea0, 1, 0x04, 0x00000000 },
+ { 0x000eb0, 1, 0x04, 0x00000000 },
+ { 0x000ec0, 1, 0x04, 0x00000000 },
+ { 0x000ed0, 1, 0x04, 0x00000000 },
+ { 0x000ee0, 1, 0x04, 0x00000000 },
+ { 0x000ef0, 1, 0x04, 0x00000000 },
+ { 0x000e04, 1, 0x04, 0xffff0000 },
+ { 0x000e14, 1, 0x04, 0xffff0000 },
+ { 0x000e24, 1, 0x04, 0xffff0000 },
+ { 0x000e34, 1, 0x04, 0xffff0000 },
+ { 0x000e44, 1, 0x04, 0xffff0000 },
+ { 0x000e54, 1, 0x04, 0xffff0000 },
+ { 0x000e64, 1, 0x04, 0xffff0000 },
+ { 0x000e74, 1, 0x04, 0xffff0000 },
+ { 0x000e84, 1, 0x04, 0xffff0000 },
+ { 0x000e94, 1, 0x04, 0xffff0000 },
+ { 0x000ea4, 1, 0x04, 0xffff0000 },
+ { 0x000eb4, 1, 0x04, 0xffff0000 },
+ { 0x000ec4, 1, 0x04, 0xffff0000 },
+ { 0x000ed4, 1, 0x04, 0xffff0000 },
+ { 0x000ee4, 1, 0x04, 0xffff0000 },
+ { 0x000ef4, 1, 0x04, 0xffff0000 },
+ { 0x000e08, 1, 0x04, 0xffff0000 },
+ { 0x000e18, 1, 0x04, 0xffff0000 },
+ { 0x000e28, 1, 0x04, 0xffff0000 },
+ { 0x000e38, 1, 0x04, 0xffff0000 },
+ { 0x000e48, 1, 0x04, 0xffff0000 },
+ { 0x000e58, 1, 0x04, 0xffff0000 },
+ { 0x000e68, 1, 0x04, 0xffff0000 },
+ { 0x000e78, 1, 0x04, 0xffff0000 },
+ { 0x000e88, 1, 0x04, 0xffff0000 },
+ { 0x000e98, 1, 0x04, 0xffff0000 },
+ { 0x000ea8, 1, 0x04, 0xffff0000 },
+ { 0x000eb8, 1, 0x04, 0xffff0000 },
+ { 0x000ec8, 1, 0x04, 0xffff0000 },
+ { 0x000ed8, 1, 0x04, 0xffff0000 },
+ { 0x000ee8, 1, 0x04, 0xffff0000 },
+ { 0x000ef8, 1, 0x04, 0xffff0000 },
+ { 0x000d40, 1, 0x04, 0x00000000 },
+ { 0x000d48, 1, 0x04, 0x00000000 },
+ { 0x000d50, 1, 0x04, 0x00000000 },
+ { 0x000d58, 1, 0x04, 0x00000000 },
+ { 0x000d44, 1, 0x04, 0x00000000 },
+ { 0x000d4c, 1, 0x04, 0x00000000 },
+ { 0x000d54, 1, 0x04, 0x00000000 },
+ { 0x000d5c, 1, 0x04, 0x00000000 },
+ { 0x001e00, 1, 0x04, 0x00000001 },
+ { 0x001e20, 1, 0x04, 0x00000001 },
+ { 0x001e40, 1, 0x04, 0x00000001 },
+ { 0x001e60, 1, 0x04, 0x00000001 },
+ { 0x001e80, 1, 0x04, 0x00000001 },
+ { 0x001ea0, 1, 0x04, 0x00000001 },
+ { 0x001ec0, 1, 0x04, 0x00000001 },
+ { 0x001ee0, 1, 0x04, 0x00000001 },
+ { 0x001e04, 1, 0x04, 0x00000001 },
+ { 0x001e24, 1, 0x04, 0x00000001 },
+ { 0x001e44, 1, 0x04, 0x00000001 },
+ { 0x001e64, 1, 0x04, 0x00000001 },
+ { 0x001e84, 1, 0x04, 0x00000001 },
+ { 0x001ea4, 1, 0x04, 0x00000001 },
+ { 0x001ec4, 1, 0x04, 0x00000001 },
+ { 0x001ee4, 1, 0x04, 0x00000001 },
+ { 0x001e08, 1, 0x04, 0x00000002 },
+ { 0x001e28, 1, 0x04, 0x00000002 },
+ { 0x001e48, 1, 0x04, 0x00000002 },
+ { 0x001e68, 1, 0x04, 0x00000002 },
+ { 0x001e88, 1, 0x04, 0x00000002 },
+ { 0x001ea8, 1, 0x04, 0x00000002 },
+ { 0x001ec8, 1, 0x04, 0x00000002 },
+ { 0x001ee8, 1, 0x04, 0x00000002 },
+ { 0x001e0c, 1, 0x04, 0x00000001 },
+ { 0x001e2c, 1, 0x04, 0x00000001 },
+ { 0x001e4c, 1, 0x04, 0x00000001 },
+ { 0x001e6c, 1, 0x04, 0x00000001 },
+ { 0x001e8c, 1, 0x04, 0x00000001 },
+ { 0x001eac, 1, 0x04, 0x00000001 },
+ { 0x001ecc, 1, 0x04, 0x00000001 },
+ { 0x001eec, 1, 0x04, 0x00000001 },
+ { 0x001e10, 1, 0x04, 0x00000001 },
+ { 0x001e30, 1, 0x04, 0x00000001 },
+ { 0x001e50, 1, 0x04, 0x00000001 },
+ { 0x001e70, 1, 0x04, 0x00000001 },
+ { 0x001e90, 1, 0x04, 0x00000001 },
+ { 0x001eb0, 1, 0x04, 0x00000001 },
+ { 0x001ed0, 1, 0x04, 0x00000001 },
+ { 0x001ef0, 1, 0x04, 0x00000001 },
+ { 0x001e14, 1, 0x04, 0x00000002 },
+ { 0x001e34, 1, 0x04, 0x00000002 },
+ { 0x001e54, 1, 0x04, 0x00000002 },
+ { 0x001e74, 1, 0x04, 0x00000002 },
+ { 0x001e94, 1, 0x04, 0x00000002 },
+ { 0x001eb4, 1, 0x04, 0x00000002 },
+ { 0x001ed4, 1, 0x04, 0x00000002 },
+ { 0x001ef4, 1, 0x04, 0x00000002 },
+ { 0x001e18, 1, 0x04, 0x00000001 },
+ { 0x001e38, 1, 0x04, 0x00000001 },
+ { 0x001e58, 1, 0x04, 0x00000001 },
+ { 0x001e78, 1, 0x04, 0x00000001 },
+ { 0x001e98, 1, 0x04, 0x00000001 },
+ { 0x001eb8, 1, 0x04, 0x00000001 },
+ { 0x001ed8, 1, 0x04, 0x00000001 },
+ { 0x001ef8, 1, 0x04, 0x00000001 },
+ { 0x003400, 128, 0x04, 0x00000000 },
+ { 0x00030c, 1, 0x04, 0x00000001 },
+ { 0x001944, 1, 0x04, 0x00000000 },
+ { 0x001514, 1, 0x04, 0x00000000 },
+ { 0x000d68, 1, 0x04, 0x0000ffff },
+ { 0x00121c, 1, 0x04, 0x0fac6881 },
+ { 0x000fac, 1, 0x04, 0x00000001 },
+ { 0x001538, 1, 0x04, 0x00000001 },
+ { 0x000fe0, 2, 0x04, 0x00000000 },
+ { 0x000fe8, 1, 0x04, 0x00000014 },
+ { 0x000fec, 1, 0x04, 0x00000040 },
+ { 0x000ff0, 1, 0x04, 0x00000000 },
+ { 0x00179c, 1, 0x04, 0x00000000 },
+ { 0x001228, 1, 0x04, 0x00000400 },
+ { 0x00122c, 1, 0x04, 0x00000300 },
+ { 0x001230, 1, 0x04, 0x00010001 },
+ { 0x0007f8, 1, 0x04, 0x00000000 },
+ { 0x0015b4, 1, 0x04, 0x00000001 },
+ { 0x0015cc, 1, 0x04, 0x00000000 },
+ { 0x001534, 1, 0x04, 0x00000000 },
+ { 0x000fb0, 1, 0x04, 0x00000000 },
+ { 0x0015d0, 1, 0x04, 0x00000000 },
+ { 0x00153c, 1, 0x04, 0x00000000 },
+ { 0x0016b4, 1, 0x04, 0x00000003 },
+ { 0x000fbc, 4, 0x04, 0x0000ffff },
+ { 0x000df8, 2, 0x04, 0x00000000 },
+ { 0x001948, 1, 0x04, 0x00000000 },
+ { 0x001970, 1, 0x04, 0x00000001 },
+ { 0x00161c, 1, 0x04, 0x000009f0 },
+ { 0x000dcc, 1, 0x04, 0x00000010 },
+ { 0x00163c, 1, 0x04, 0x00000000 },
+ { 0x0015e4, 1, 0x04, 0x00000000 },
+ { 0x001160, 32, 0x04, 0x25e00040 },
+ { 0x001880, 32, 0x04, 0x00000000 },
+ { 0x000f84, 2, 0x04, 0x00000000 },
+ { 0x0017c8, 2, 0x04, 0x00000000 },
+ { 0x0017d0, 1, 0x04, 0x000000ff },
+ { 0x0017d4, 1, 0x04, 0xffffffff },
+ { 0x0017d8, 1, 0x04, 0x00000002 },
+ { 0x0017dc, 1, 0x04, 0x00000000 },
+ { 0x0015f4, 2, 0x04, 0x00000000 },
+ { 0x001434, 2, 0x04, 0x00000000 },
+ { 0x000d74, 1, 0x04, 0x00000000 },
+ { 0x000dec, 1, 0x04, 0x00000001 },
+ { 0x0013a4, 1, 0x04, 0x00000000 },
+ { 0x001318, 1, 0x04, 0x00000001 },
+ { 0x001644, 1, 0x04, 0x00000000 },
+ { 0x000748, 1, 0x04, 0x00000000 },
+ { 0x000de8, 1, 0x04, 0x00000000 },
+ { 0x001648, 1, 0x04, 0x00000000 },
+ { 0x0012a4, 1, 0x04, 0x00000000 },
+ { 0x001120, 4, 0x04, 0x00000000 },
+ { 0x001118, 1, 0x04, 0x00000000 },
+ { 0x00164c, 1, 0x04, 0x00000000 },
+ { 0x001658, 1, 0x04, 0x00000000 },
+ { 0x001910, 1, 0x04, 0x00000290 },
+ { 0x001518, 1, 0x04, 0x00000000 },
+ { 0x00165c, 1, 0x04, 0x00000001 },
+ { 0x001520, 1, 0x04, 0x00000000 },
+ { 0x001604, 1, 0x04, 0x00000000 },
+ { 0x001570, 1, 0x04, 0x00000000 },
+ { 0x0013b0, 2, 0x04, 0x3f800000 },
+ { 0x00020c, 1, 0x04, 0x00000000 },
+ { 0x001670, 1, 0x04, 0x30201000 },
+ { 0x001674, 1, 0x04, 0x70605040 },
+ { 0x001678, 1, 0x04, 0xb8a89888 },
+ { 0x00167c, 1, 0x04, 0xf8e8d8c8 },
+ { 0x00166c, 1, 0x04, 0x00000000 },
+ { 0x001680, 1, 0x04, 0x00ffff00 },
+ { 0x0012d0, 1, 0x04, 0x00000003 },
+ { 0x0012d4, 1, 0x04, 0x00000002 },
+ { 0x001684, 2, 0x04, 0x00000000 },
+ { 0x000dac, 2, 0x04, 0x00001b02 },
+ { 0x000db4, 1, 0x04, 0x00000000 },
+ { 0x00168c, 1, 0x04, 0x00000000 },
+ { 0x0015bc, 1, 0x04, 0x00000000 },
+ { 0x00156c, 1, 0x04, 0x00000000 },
+ { 0x00187c, 1, 0x04, 0x00000000 },
+ { 0x001110, 1, 0x04, 0x00000001 },
+ { 0x000dc0, 3, 0x04, 0x00000000 },
+ { 0x001234, 1, 0x04, 0x00000000 },
+ { 0x001690, 1, 0x04, 0x00000000 },
+ { 0x0012ac, 1, 0x04, 0x00000001 },
+ { 0x0002c4, 1, 0x04, 0x00000000 },
+ { 0x000790, 5, 0x04, 0x00000000 },
+ { 0x00077c, 1, 0x04, 0x00000000 },
+ { 0x001000, 1, 0x04, 0x00000010 },
+ { 0x0010fc, 1, 0x04, 0x00000000 },
+ { 0x001290, 1, 0x04, 0x00000000 },
+ { 0x000218, 1, 0x04, 0x00000010 },
+ { 0x0012d8, 1, 0x04, 0x00000000 },
+ { 0x0012dc, 1, 0x04, 0x00000010 },
+ { 0x000d94, 1, 0x04, 0x00000001 },
+ { 0x00155c, 2, 0x04, 0x00000000 },
+ { 0x001564, 1, 0x04, 0x00000fff },
+ { 0x001574, 2, 0x04, 0x00000000 },
+ { 0x00157c, 1, 0x04, 0x000fffff },
+ { 0x001354, 1, 0x04, 0x00000000 },
+ { 0x001610, 1, 0x04, 0x00000012 },
+ { 0x001608, 2, 0x04, 0x00000000 },
+ { 0x00260c, 1, 0x04, 0x00000000 },
+ { 0x0007ac, 1, 0x04, 0x00000000 },
+ { 0x00162c, 1, 0x04, 0x00000003 },
+ { 0x000210, 1, 0x04, 0x00000000 },
+ { 0x000320, 1, 0x04, 0x00000000 },
+ { 0x000324, 6, 0x04, 0x3f800000 },
+ { 0x000750, 1, 0x04, 0x00000000 },
+ { 0x000760, 1, 0x04, 0x39291909 },
+ { 0x000764, 1, 0x04, 0x79695949 },
+ { 0x000768, 1, 0x04, 0xb9a99989 },
+ { 0x00076c, 1, 0x04, 0xf9e9d9c9 },
+ { 0x000770, 1, 0x04, 0x30201000 },
+ { 0x000774, 1, 0x04, 0x70605040 },
+ { 0x000778, 1, 0x04, 0x00009080 },
+ { 0x000780, 1, 0x04, 0x39291909 },
+ { 0x000784, 1, 0x04, 0x79695949 },
+ { 0x000788, 1, 0x04, 0xb9a99989 },
+ { 0x00078c, 1, 0x04, 0xf9e9d9c9 },
+ { 0x0007d0, 1, 0x04, 0x30201000 },
+ { 0x0007d4, 1, 0x04, 0x70605040 },
+ { 0x0007d8, 1, 0x04, 0x00009080 },
+ { 0x00037c, 1, 0x04, 0x00000001 },
+ { 0x000740, 2, 0x04, 0x00000000 },
+ { 0x002600, 1, 0x04, 0x00000000 },
+ { 0x001918, 1, 0x04, 0x00000000 },
+ { 0x00191c, 1, 0x04, 0x00000900 },
+ { 0x001920, 1, 0x04, 0x00000405 },
+ { 0x001308, 1, 0x04, 0x00000001 },
+ { 0x001924, 1, 0x04, 0x00000000 },
+ { 0x0013ac, 1, 0x04, 0x00000000 },
+ { 0x00192c, 1, 0x04, 0x00000001 },
+ { 0x00193c, 1, 0x04, 0x00002c1c },
+ { 0x000d7c, 1, 0x04, 0x00000000 },
+ { 0x000f8c, 1, 0x04, 0x00000000 },
+ { 0x0002c0, 1, 0x04, 0x00000001 },
+ { 0x001510, 1, 0x04, 0x00000000 },
+ { 0x001940, 1, 0x04, 0x00000000 },
+ { 0x000ff4, 2, 0x04, 0x00000000 },
+ { 0x00194c, 2, 0x04, 0x00000000 },
+ { 0x001968, 1, 0x04, 0x00000000 },
+ { 0x001590, 1, 0x04, 0x0000003f },
+ { 0x0007e8, 4, 0x04, 0x00000000 },
+ { 0x00196c, 1, 0x04, 0x00000011 },
+ { 0x0002e4, 1, 0x04, 0x0000b001 },
+ { 0x00036c, 2, 0x04, 0x00000000 },
+ { 0x00197c, 1, 0x04, 0x00000000 },
+ { 0x000fcc, 2, 0x04, 0x00000000 },
+ { 0x0002d8, 1, 0x04, 0x00000040 },
+ { 0x001980, 1, 0x04, 0x00000080 },
+ { 0x001504, 1, 0x04, 0x00000080 },
+ { 0x001984, 1, 0x04, 0x00000000 },
+ { 0x000300, 1, 0x04, 0x00000001 },
+ { 0x0013a8, 1, 0x04, 0x00000000 },
+ { 0x0012ec, 1, 0x04, 0x00000000 },
+ { 0x001310, 1, 0x04, 0x00000000 },
+ { 0x001314, 1, 0x04, 0x00000001 },
+ { 0x001380, 1, 0x04, 0x00000000 },
+ { 0x001384, 4, 0x04, 0x00000001 },
+ { 0x001394, 1, 0x04, 0x00000000 },
+ { 0x00139c, 1, 0x04, 0x00000000 },
+ { 0x001398, 1, 0x04, 0x00000000 },
+ { 0x001594, 1, 0x04, 0x00000000 },
+ { 0x001598, 4, 0x04, 0x00000001 },
+ { 0x000f54, 3, 0x04, 0x00000000 },
+ { 0x0019bc, 1, 0x04, 0x00000000 },
+ { 0x000f9c, 2, 0x04, 0x00000000 },
+ { 0x0012cc, 1, 0x04, 0x00000000 },
+ { 0x0012e8, 1, 0x04, 0x00000000 },
+ { 0x00130c, 1, 0x04, 0x00000001 },
+ { 0x001360, 8, 0x04, 0x00000000 },
+ { 0x00133c, 2, 0x04, 0x00000001 },
+ { 0x001344, 1, 0x04, 0x00000002 },
+ { 0x001348, 2, 0x04, 0x00000001 },
+ { 0x001350, 1, 0x04, 0x00000002 },
+ { 0x001358, 1, 0x04, 0x00000001 },
+ { 0x0012e4, 1, 0x04, 0x00000000 },
+ { 0x00131c, 4, 0x04, 0x00000000 },
+ { 0x0019c0, 1, 0x04, 0x00000000 },
+ { 0x001140, 1, 0x04, 0x00000000 },
+ { 0x0019c4, 1, 0x04, 0x00000000 },
+ { 0x0019c8, 1, 0x04, 0x00001500 },
+ { 0x00135c, 1, 0x04, 0x00000000 },
+ { 0x000f90, 1, 0x04, 0x00000000 },
+ { 0x0019e0, 8, 0x04, 0x00000001 },
+ { 0x0019cc, 1, 0x04, 0x00000001 },
+ { 0x0015b8, 1, 0x04, 0x00000000 },
+ { 0x001a00, 1, 0x04, 0x00001111 },
+ { 0x001a04, 7, 0x04, 0x00000000 },
+ { 0x000d6c, 2, 0x04, 0xffff0000 },
+ { 0x0010f8, 1, 0x04, 0x00001010 },
+ { 0x000d80, 5, 0x04, 0x00000000 },
+ { 0x000da0, 1, 0x04, 0x00000000 },
+ { 0x0007a4, 2, 0x04, 0x00000000 },
+ { 0x001508, 1, 0x04, 0x80000000 },
+ { 0x00150c, 1, 0x04, 0x40000000 },
+ { 0x001668, 1, 0x04, 0x00000000 },
+ { 0x000318, 2, 0x04, 0x00000008 },
+ { 0x000d9c, 1, 0x04, 0x00000001 },
+ { 0x000ddc, 1, 0x04, 0x00000002 },
+ { 0x000374, 1, 0x04, 0x00000000 },
+ { 0x000378, 1, 0x04, 0x00000020 },
+ { 0x0007dc, 1, 0x04, 0x00000000 },
+ { 0x00074c, 1, 0x04, 0x00000055 },
+ { 0x001420, 1, 0x04, 0x00000003 },
+ { 0x0017bc, 2, 0x04, 0x00000000 },
+ { 0x0017c4, 1, 0x04, 0x00000001 },
+ { 0x001008, 1, 0x04, 0x00000008 },
+ { 0x00100c, 1, 0x04, 0x00000040 },
+ { 0x001010, 1, 0x04, 0x0000012c },
+ { 0x000d60, 1, 0x04, 0x00000040 },
+ { 0x00075c, 1, 0x04, 0x00000003 },
+ { 0x001018, 1, 0x04, 0x00000020 },
+ { 0x00101c, 1, 0x04, 0x00000001 },
+ { 0x001020, 1, 0x04, 0x00000020 },
+ { 0x001024, 1, 0x04, 0x00000001 },
+ { 0x001444, 3, 0x04, 0x00000000 },
+ { 0x000360, 1, 0x04, 0x20164010 },
+ { 0x000364, 1, 0x04, 0x00000020 },
+ { 0x000368, 1, 0x04, 0x00000000 },
+ { 0x000de4, 1, 0x04, 0x00000000 },
+ { 0x000204, 1, 0x04, 0x00000006 },
+ { 0x000208, 1, 0x04, 0x00000000 },
+ { 0x0002cc, 2, 0x04, 0x003fffff },
+ { 0x001220, 1, 0x04, 0x00000005 },
+ { 0x000fdc, 1, 0x04, 0x00000000 },
+ { 0x000f98, 1, 0x04, 0x00400008 },
+ { 0x001284, 1, 0x04, 0x08000080 },
+ { 0x001450, 1, 0x04, 0x00400008 },
+ { 0x001454, 1, 0x04, 0x08000080 },
+ { 0x000214, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk40xx[] = {
+ { 0x404004, 8, 0x04, 0x00000000 },
+ { 0x404024, 1, 0x04, 0x0000e000 },
+ { 0x404028, 8, 0x04, 0x00000000 },
+ { 0x4040a8, 8, 0x04, 0x00000000 },
+ { 0x4040c8, 1, 0x04, 0xf800008f },
+ { 0x4040d0, 6, 0x04, 0x00000000 },
+ { 0x4040e8, 1, 0x04, 0x00001000 },
+ { 0x4040f8, 1, 0x04, 0x00000000 },
+ { 0x404100, 10, 0x04, 0x00000000 },
+ { 0x404130, 2, 0x04, 0x00000000 },
+ { 0x404138, 1, 0x04, 0x20000040 },
+ { 0x404150, 1, 0x04, 0x0000002e },
+ { 0x404154, 1, 0x04, 0x00000400 },
+ { 0x404158, 1, 0x04, 0x00000200 },
+ { 0x404164, 1, 0x04, 0x00000055 },
+ { 0x40417c, 2, 0x04, 0x00000000 },
+ { 0x404194, 1, 0x04, 0x01000700 },
+ { 0x4041a0, 4, 0x04, 0x00000000 },
+ { 0x404200, 1, 0x04, 0x0000a197 },
+ { 0x404204, 1, 0x04, 0x0000a1c0 },
+ { 0x404208, 1, 0x04, 0x0000a140 },
+ { 0x40420c, 1, 0x04, 0x0000902d },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk58xx[] = {
+ { 0x405800, 1, 0x04, 0x0f8000bf },
+ { 0x405830, 1, 0x04, 0x02180648 },
+ { 0x405834, 1, 0x04, 0x08000000 },
+ { 0x405838, 1, 0x04, 0x00000000 },
+ { 0x405854, 1, 0x04, 0x00000000 },
+ { 0x405870, 4, 0x04, 0x00000001 },
+ { 0x405a00, 2, 0x04, 0x00000000 },
+ { 0x405a18, 1, 0x04, 0x00000000 },
+ { 0x405a1c, 1, 0x04, 0x000000ff },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk64xx[] = {
+ { 0x4064a8, 1, 0x04, 0x00000000 },
+ { 0x4064ac, 1, 0x04, 0x00003fff },
+ { 0x4064b0, 3, 0x04, 0x00000000 },
+ { 0x4064c0, 1, 0x04, 0x802000f0 },
+ { 0x4064c4, 1, 0x04, 0x0192ffff },
+ { 0x4064c8, 1, 0x04, 0x00c20200 },
+ { 0x4064cc, 9, 0x04, 0x00000000 },
+ { 0x4064fc, 1, 0x04, 0x0000022a },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk78xx[] = {
+ { 0x407804, 1, 0x04, 0x00000063 },
+ { 0x40780c, 1, 0x04, 0x0a418820 },
+ { 0x407810, 1, 0x04, 0x062080e6 },
+ { 0x407814, 1, 0x04, 0x020398a4 },
+ { 0x407818, 1, 0x04, 0x0e629062 },
+ { 0x40781c, 1, 0x04, 0x0a418820 },
+ { 0x407820, 1, 0x04, 0x000000e6 },
+ { 0x4078bc, 1, 0x04, 0x00000103 },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk88xx[] = {
+ { 0x408800, 1, 0x04, 0x32802a3c },
+ { 0x408804, 1, 0x04, 0x00000040 },
+ { 0x408808, 1, 0x04, 0x1003e005 },
+ { 0x408840, 1, 0x04, 0x0000000b },
+ { 0x408900, 1, 0x04, 0xb080b801 },
+ { 0x408904, 1, 0x04, 0x62000001 },
+ { 0x408908, 1, 0x04, 0x02c8102f },
+ { 0x408980, 1, 0x04, 0x0000011d },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_gpc_0[] = {
+ { 0x418380, 1, 0x04, 0x00000016 },
+ { 0x418400, 1, 0x04, 0x38005e00 },
+ { 0x418404, 1, 0x04, 0x71e0ffff },
+ { 0x41840c, 1, 0x04, 0x00001008 },
+ { 0x418410, 1, 0x04, 0x0fff0fff },
+ { 0x418414, 1, 0x04, 0x02200fff },
+ { 0x418450, 6, 0x04, 0x00000000 },
+ { 0x418468, 1, 0x04, 0x00000001 },
+ { 0x41846c, 2, 0x04, 0x00000000 },
+ { 0x418600, 1, 0x04, 0x0000007f },
+ { 0x418684, 1, 0x04, 0x0000001f },
+ { 0x418700, 1, 0x04, 0x00000002 },
+ { 0x418704, 2, 0x04, 0x00000080 },
+ { 0x41870c, 2, 0x04, 0x00000000 },
+ { 0x418800, 1, 0x04, 0x7006863a },
+ { 0x418808, 1, 0x04, 0x00000000 },
+ { 0x41880c, 1, 0x04, 0x00000030 },
+ { 0x418810, 1, 0x04, 0x00000000 },
+ { 0x418828, 1, 0x04, 0x00000044 },
+ { 0x418830, 1, 0x04, 0x10000001 },
+ { 0x4188d8, 1, 0x04, 0x00000008 },
+ { 0x4188e0, 1, 0x04, 0x01000000 },
+ { 0x4188e8, 5, 0x04, 0x00000000 },
+ { 0x4188fc, 1, 0x04, 0x20100058 },
+ { 0x41891c, 1, 0x04, 0x00ff00ff },
+ { 0x418924, 1, 0x04, 0x00000000 },
+ { 0x418928, 1, 0x04, 0x00ffff00 },
+ { 0x41892c, 1, 0x04, 0x0000ff00 },
+ { 0x418b00, 1, 0x04, 0x0000001e },
+ { 0x418b08, 1, 0x04, 0x0a418820 },
+ { 0x418b0c, 1, 0x04, 0x062080e6 },
+ { 0x418b10, 1, 0x04, 0x020398a4 },
+ { 0x418b14, 1, 0x04, 0x0e629062 },
+ { 0x418b18, 1, 0x04, 0x0a418820 },
+ { 0x418b1c, 1, 0x04, 0x000000e6 },
+ { 0x418bb8, 1, 0x04, 0x00000103 },
+ { 0x418c08, 1, 0x04, 0x00000001 },
+ { 0x418c10, 8, 0x04, 0x00000000 },
+ { 0x418c40, 1, 0x04, 0xffffffff },
+ { 0x418c6c, 1, 0x04, 0x00000001 },
+ { 0x418c80, 1, 0x04, 0x2020000c },
+ { 0x418c8c, 1, 0x04, 0x00000001 },
+ { 0x418d24, 1, 0x04, 0x00000000 },
+ { 0x419000, 1, 0x04, 0x00000780 },
+ { 0x419004, 2, 0x04, 0x00000000 },
+ { 0x419014, 1, 0x04, 0x00000004 },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_tpc[] = {
+ { 0x419848, 1, 0x04, 0x00000000 },
+ { 0x419864, 1, 0x04, 0x00000129 },
+ { 0x419888, 1, 0x04, 0x00000000 },
+ { 0x419a00, 1, 0x04, 0x000100f0 },
+ { 0x419a04, 1, 0x04, 0x00000001 },
+ { 0x419a08, 1, 0x04, 0x00000421 },
+ { 0x419a0c, 1, 0x04, 0x00120000 },
+ { 0x419a10, 1, 0x04, 0x00000000 },
+ { 0x419a14, 1, 0x04, 0x00000200 },
+ { 0x419a1c, 1, 0x04, 0x0000c000 },
+ { 0x419a20, 1, 0x04, 0x00000800 },
+ { 0x419a30, 1, 0x04, 0x00000001 },
+ { 0x419ac4, 1, 0x04, 0x0037f440 },
+ { 0x419c00, 1, 0x04, 0x0000001a },
+ { 0x419c04, 1, 0x04, 0x80000006 },
+ { 0x419c08, 1, 0x04, 0x00000002 },
+ { 0x419c20, 1, 0x04, 0x00000000 },
+ { 0x419c24, 1, 0x04, 0x00084210 },
+ { 0x419c28, 1, 0x04, 0x3efbefbe },
+ { 0x419ce8, 1, 0x04, 0x00000000 },
+ { 0x419cf4, 1, 0x04, 0x00000203 },
+ { 0x419e04, 1, 0x04, 0x00000000 },
+ { 0x419e08, 1, 0x04, 0x0000001d },
+ { 0x419e0c, 1, 0x04, 0x00000000 },
+ { 0x419e10, 1, 0x04, 0x00001c02 },
+ { 0x419e44, 1, 0x04, 0x0013eff2 },
+ { 0x419e48, 1, 0x04, 0x00000000 },
+ { 0x419e4c, 1, 0x04, 0x0000007f },
+ { 0x419e50, 2, 0x04, 0x00000000 },
+ { 0x419e58, 1, 0x04, 0x00000001 },
+ { 0x419e5c, 3, 0x04, 0x00000000 },
+ { 0x419e68, 1, 0x04, 0x00000002 },
+ { 0x419e6c, 12, 0x04, 0x00000000 },
+ { 0x419eac, 1, 0x04, 0x00001f8f },
+ { 0x419eb0, 1, 0x04, 0x0db00da0 },
+ { 0x419eb8, 1, 0x04, 0x00000000 },
+ { 0x419ec8, 1, 0x04, 0x0001304f },
+ { 0x419f30, 4, 0x04, 0x00000000 },
+ { 0x419f40, 1, 0x04, 0x00000018 },
+ { 0x419f44, 3, 0x04, 0x00000000 },
+ { 0x419f58, 1, 0x04, 0x00000020 },
+ { 0x419f70, 1, 0x04, 0x00000000 },
+ { 0x419f78, 1, 0x04, 0x000001eb },
+ { 0x419f7c, 1, 0x04, 0x00000404 },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk[] = {
+ { 0x41be24, 1, 0x04, 0x00000006 },
+ { 0x41bec0, 1, 0x04, 0x10000000 },
+ { 0x41bec4, 1, 0x04, 0x00037f7f },
+ { 0x41bee4, 1, 0x04, 0x00000000 },
+ { 0x41bef0, 1, 0x04, 0x000003ff },
+ { 0x41bf00, 1, 0x04, 0x0a418820 },
+ { 0x41bf04, 1, 0x04, 0x062080e6 },
+ { 0x41bf08, 1, 0x04, 0x020398a4 },
+ { 0x41bf0c, 1, 0x04, 0x0e629062 },
+ { 0x41bf10, 1, 0x04, 0x0a418820 },
+ { 0x41bf14, 1, 0x04, 0x000000e6 },
+ { 0x41bfd0, 1, 0x04, 0x00900103 },
+ { 0x41bfe0, 1, 0x04, 0x00400001 },
+ { 0x41bfe4, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static void
+nv108_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+{
+ u32 magic[GPC_MAX][2];
+ u32 offset;
+ int gpc;
+
+ mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+ mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+ mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
+ mmio_list(0x40800c, 0x00000000, 8, 1);
+ mmio_list(0x408010, 0x80000000, 0, 0);
+ mmio_list(0x419004, 0x00000000, 8, 1);
+ mmio_list(0x419008, 0x00000000, 0, 0);
+ mmio_list(0x408004, 0x00000000, 8, 0);
+ mmio_list(0x408008, 0x80000030, 0, 0);
+ mmio_list(0x418808, 0x00000000, 8, 0);
+ mmio_list(0x41880c, 0x80000030, 0, 0);
+ mmio_list(0x418810, 0x80000000, 12, 2);
+ mmio_list(0x419848, 0x10000000, 12, 2);
+
+ mmio_list(0x405830, 0x02180648, 0, 0);
+ mmio_list(0x4064c4, 0x0192ffff, 0, 0);
+
+ for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
+ u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
+ u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
+ magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
+ magic[gpc][1] = 0x00000000 | (magic1 << 16);
+ offset += 0x0324 * priv->tpc_nr[gpc];
+ }
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
+ mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
+ offset += 0x07ff * priv->tpc_nr[gpc];
+ }
+
+ mmio_list(0x17e91c, 0x0b040a0b, 0, 0);
+ mmio_list(0x17e920, 0x00090d08, 0, 0);
+}
+
+static struct nvc0_graph_init *
+nv108_grctx_init_hub[] = {
+ nvc0_grctx_init_base,
+ nv108_grctx_init_unk40xx,
+ nvf0_grctx_init_unk44xx,
+ nve4_grctx_init_unk46xx,
+ nve4_grctx_init_unk47xx,
+ nv108_grctx_init_unk58xx,
+ nvf0_grctx_init_unk5bxx,
+ nvf0_grctx_init_unk60xx,
+ nv108_grctx_init_unk64xx,
+ nv108_grctx_init_unk78xx,
+ nve4_grctx_init_unk80xx,
+ nv108_grctx_init_unk88xx,
+ NULL
+};
+
+struct nvc0_graph_init *
+nv108_grctx_init_gpc[] = {
+ nv108_grctx_init_gpc_0,
+ nvc0_grctx_init_gpc_1,
+ nv108_grctx_init_tpc,
+ nv108_grctx_init_unk,
+ NULL
+};
+
+struct nvc0_graph_init
+nv108_grctx_init_mthd_magic[] = {
+ { 0x3410, 1, 0x04, 0x8e0e2006 },
+ { 0x3414, 1, 0x04, 0x00000038 },
+ {}
+};
+
+static struct nvc0_graph_mthd
+nv108_grctx_init_mthd[] = {
+ { 0xa197, nv108_grctx_init_a197, },
+ { 0x902d, nvc0_grctx_init_902d, },
+ { 0x902d, nv108_grctx_init_mthd_magic, },
+ {}
+};
+
+struct nouveau_oclass *
+nv108_grctx_oclass = &(struct nvc0_grctx_oclass) {
+ .base.handle = NV_ENGCTX(GR, 0x08),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_graph_context_ctor,
+ .dtor = nvc0_graph_context_dtor,
+ .init = _nouveau_graph_context_init,
+ .fini = _nouveau_graph_context_fini,
+ .rd32 = _nouveau_graph_context_rd32,
+ .wr32 = _nouveau_graph_context_wr32,
+ },
+ .main = nve4_grctx_generate_main,
+ .mods = nv108_grctx_generate_mods,
+ .unkn = nve4_grctx_generate_unkn,
+ .hub = nv108_grctx_init_hub,
+ .gpc = nv108_grctx_init_gpc,
+ .icmd = nv108_grctx_init_icmd,
+ .mthd = nv108_grctx_init_mthd,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
index dcb2ebb8c29d..44012c3da538 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
@@ -50,7 +50,7 @@ nvf0_grctx_init_unk40xx[] = {
{}
};
-static struct nvc0_graph_init
+struct nvc0_graph_init
nvf0_grctx_init_unk44xx[] = {
{ 0x404404, 12, 0x04, 0x00000000 },
{ 0x404438, 1, 0x04, 0x00000000 },
@@ -62,7 +62,7 @@ nvf0_grctx_init_unk44xx[] = {
{}
};
-static struct nvc0_graph_init
+struct nvc0_graph_init
nvf0_grctx_init_unk5bxx[] = {
{ 0x405b00, 1, 0x04, 0x00000000 },
{ 0x405b10, 1, 0x04, 0x00001000 },
@@ -70,7 +70,7 @@ nvf0_grctx_init_unk5bxx[] = {
{}
};
-static struct nvc0_graph_init
+struct nvc0_graph_init
nvf0_grctx_init_unk60xx[] = {
{ 0x406020, 1, 0x04, 0x034103c1 },
{ 0x406028, 4, 0x04, 0x00000001 },
@@ -286,7 +286,6 @@ nvf0_grctx_init_hub[] = {
nvf0_grctx_init_unk64xx,
nve4_grctx_init_unk80xx,
nvf0_grctx_init_unk88xx,
- nvd9_grctx_init_rop,
NULL
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc
index 5d24b6de16cc..e148961b8075 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc
@@ -38,7 +38,7 @@ queue_put:
cmpu b32 $r8 $r9
bra ne #queue_put_next
mov $r15 E_CMD_OVERFLOW
- call #error
+ call(error)
ret
// store cmd/data on queue
@@ -92,18 +92,16 @@ queue_get_done:
// Out: $r15 value
//
nv_rd32:
- mov $r11 0x728
- shl b32 $r11 6
mov b32 $r12 $r14
bset $r12 31 // MMIO_CTRL_PENDING
- iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
+ nv_iowr(NV_PGRAPH_FECS_MMIO_CTRL, 0, $r12)
nv_rd32_wait:
- iord $r12 I[$r11 + 0x000]
+ nv_iord($r12, NV_PGRAPH_FECS_MMIO_CTRL, 0)
xbit $r12 $r12 31
bra ne #nv_rd32_wait
mov $r10 6 // DONE_MMIO_RD
- call #wait_doneo
- iord $r15 I[$r11 + 0x100] // MMIO_RDVAL
+ call(wait_doneo)
+ nv_iord($r15, NV_PGRAPH_FECS_MMIO_RDVAL, 0)
ret
// nv_wr32 - write 32-bit value to nv register
@@ -112,37 +110,17 @@ nv_rd32:
// $r15 value
//
nv_wr32:
- mov $r11 0x728
- shl b32 $r11 6
- iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL
+ nv_iowr(NV_PGRAPH_FECS_MMIO_WRVAL, 0, $r15)
mov b32 $r12 $r14
bset $r12 31 // MMIO_CTRL_PENDING
bset $r12 30 // MMIO_CTRL_WRITE
- iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
+ nv_iowr(NV_PGRAPH_FECS_MMIO_CTRL, 0, $r12)
nv_wr32_wait:
- iord $r12 I[$r11 + 0x000]
+ nv_iord($r12, NV_PGRAPH_FECS_MMIO_CTRL, 0)
xbit $r12 $r12 31
bra ne #nv_wr32_wait
ret
-// (re)set watchdog timer
-//
-// In : $r15 timeout
-//
-watchdog_reset:
- mov $r8 0x430
- shl b32 $r8 6
- bset $r15 31
- iowr I[$r8 + 0x000] $r15
- ret
-
-// clear watchdog timer
-watchdog_clear:
- mov $r8 0x430
- shl b32 $r8 6
- iowr I[$r8 + 0x000] $r0
- ret
-
// wait_donez - wait on FUC_DONE bit to become clear
//
// In : $r10 bit to wait on
@@ -163,13 +141,9 @@ wait_donez:
//
wait_doneo:
trace_set(T_WAIT);
- mov $r8 0x818
- shl b32 $r8 6
- iowr I[$r8 + 0x000] $r10
+ nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(6), 0, $r10)
wait_doneo_e:
- mov $r8 0x400
- shl b32 $r8 6
- iord $r8 I[$r8 + 0x000]
+ nv_iord($r8, NV_PGRAPH_FECS_SIGNAL, 0)
xbit $r8 $r8 $r10
bra e #wait_doneo_e
trace_clr(T_WAIT)
@@ -209,21 +183,18 @@ mmctx_size:
//
mmctx_xfer:
trace_set(T_MMCTX)
- mov $r8 0x710
- shl b32 $r8 6
clear b32 $r9
or $r11 $r11
bra e #mmctx_base_disabled
- iowr I[$r8 + 0x000] $r11 // MMCTX_BASE
+ nv_iowr(NV_PGRAPH_FECS_MMCTX_BASE, 0, $r11)
bset $r9 0 // BASE_EN
mmctx_base_disabled:
or $r14 $r14
bra e #mmctx_multi_disabled
- iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE
- iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK
+ nv_iowr(NV_PGRAPH_FECS_MMCTX_MULTI_STRIDE, 0, $r14)
+ nv_iowr(NV_PGRAPH_FECS_MMCTX_MULTI_MASK, 0, $r15)
bset $r9 1 // MULTI_EN
mmctx_multi_disabled:
- add b32 $r8 0x100
xbit $r11 $r10 0
shl b32 $r11 16 // DIR
@@ -231,20 +202,20 @@ mmctx_xfer:
xbit $r14 $r10 1
shl b32 $r14 17
or $r11 $r14 // START_TRIGGER
- iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
+ nv_iowr(NV_PGRAPH_FECS_MMCTX_CTRL, 0, $r11)
// loop over the mmio list, and send requests to the hw
mmctx_exec_loop:
// wait for space in mmctx queue
mmctx_wait_free:
- iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
+ nv_iord($r14, NV_PGRAPH_FECS_MMCTX_CTRL, 0)
and $r14 0x1f
bra e #mmctx_wait_free
// queue up an entry
ld b32 $r14 D[$r12]
or $r14 $r9
- iowr I[$r8 + 0x300] $r14
+ nv_iowr(NV_PGRAPH_FECS_MMCTX_QUEUE, 0, $r14)
add b32 $r12 4
cmpu b32 $r12 $r13
bra ne #mmctx_exec_loop
@@ -253,22 +224,22 @@ mmctx_xfer:
bra ne #mmctx_stop
// wait for queue to empty
mmctx_fini_wait:
- iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
+ nv_iord($r11, NV_PGRAPH_FECS_MMCTX_CTRL, 0)
and $r11 0x1f
cmpu b32 $r11 0x10
bra ne #mmctx_fini_wait
mov $r10 2 // DONE_MMCTX
- call #wait_donez
+ call(wait_donez)
bra #mmctx_done
mmctx_stop:
xbit $r11 $r10 0
shl b32 $r11 16 // DIR
bset $r11 12 // QLIMIT = 0x10
bset $r11 18 // STOP_TRIGGER
- iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
+ nv_iowr(NV_PGRAPH_FECS_MMCTX_CTRL, 0, $r11)
mmctx_stop_wait:
// wait for STOP_TRIGGER to clear
- iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
+ nv_iord($r11, NV_PGRAPH_FECS_MMCTX_CTRL, 0)
xbit $r11 $r11 18
bra ne #mmctx_stop_wait
mmctx_done:
@@ -280,28 +251,24 @@ mmctx_xfer:
strand_wait:
push $r10
mov $r10 2
- call #wait_donez
+ call(wait_donez)
pop $r10
ret
// unknown - call before issuing strand commands
//
strand_pre:
- mov $r8 0x4afc
- sethi $r8 0x20000
- mov $r9 0xc
- iowr I[$r8] $r9
- call #strand_wait
+ mov $r9 NV_PGRAPH_FECS_STRAND_CMD_ENABLE
+ nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r9)
+ call(strand_wait)
ret
// unknown - call after issuing strand commands
//
strand_post:
- mov $r8 0x4afc
- sethi $r8 0x20000
- mov $r9 0xd
- iowr I[$r8] $r9
- call #strand_wait
+ mov $r9 NV_PGRAPH_FECS_STRAND_CMD_DISABLE
+ nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r9)
+ call(strand_wait)
ret
// Selects strand set?!
@@ -309,18 +276,14 @@ strand_post:
// In: $r14 id
//
strand_set:
- mov $r10 0x4ffc
- sethi $r10 0x20000
- sub b32 $r11 $r10 0x500
mov $r12 0xf
- iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf
- mov $r12 0xb
- iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb
- call #strand_wait
- iowr I[$r10 + 0x000] $r14 // 0x93c = <id>
- mov $r12 0xa
- iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa
- call #strand_wait
+ nv_iowr(NV_PGRAPH_FECS_STRAND_FILTER, 0x3f, $r12)
+ mov $r12 NV_PGRAPH_FECS_STRAND_CMD_DEACTIVATE_FILTER
+ nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12)
+ nv_iowr(NV_PGRAPH_FECS_STRAND_FILTER, 0x3f, $r14)
+ mov $r12 NV_PGRAPH_FECS_STRAND_CMD_ACTIVATE_FILTER
+ nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12)
+ call(strand_wait)
ret
// Initialise strand context data
@@ -332,30 +295,27 @@ strand_set:
//
strand_ctx_init:
trace_set(T_STRINIT)
- call #strand_pre
+ call(strand_pre)
mov $r14 3
- call #strand_set
- mov $r10 0x46fc
- sethi $r10 0x20000
- add b32 $r11 $r10 0x400
- iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0
- mov $r12 1
- iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE
- call #strand_wait
+ call(strand_set)
+
+ clear b32 $r12
+ nv_iowr(NV_PGRAPH_FECS_STRAND_SELECT, 0x3f, $r12)
+ mov $r12 NV_PGRAPH_FECS_STRAND_CMD_SEEK
+ nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12)
+ call(strand_wait)
sub b32 $r12 $r0 1
- iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff
- mov $r12 2
- iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT
- call #strand_wait
- call #strand_post
+ nv_iowr(NV_PGRAPH_FECS_STRAND_DATA, 0x3f, $r12)
+ mov $r12 NV_PGRAPH_FECS_STRAND_CMD_GET_INFO
+ nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12)
+ call(strand_wait)
+ call(strand_post)
// read the size of each strand, poke the context offset of
// each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
// about it later then.
- mov $r8 0x880
- shl b32 $r8 6
- iord $r9 I[$r8 + 0x000] // STRANDS
- add b32 $r8 0x2200
+ nv_mkio($r8, NV_PGRAPH_FECS_STRAND_SAVE_SWBASE, 0x00)
+ nv_iord($r9, NV_PGRAPH_FECS_STRANDS_CNT, 0x00)
shr b32 $r14 $r15 8
ctx_init_strand_loop:
iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
index 5547c1b3f4f2..96cbcea3b2c9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
@@ -58,12 +58,9 @@ mmio_list_base:
//
error:
push $r14
- mov $r14 -0x67ec // 0x9814
- sethi $r14 0x400000
- call #nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
- add b32 $r14 0x41c
+ nv_wr32(NV_PGRAPH_FECS_CC_SCRATCH_VAL(5), $r15)
mov $r15 1
- call #nv_wr32 // HUB_CTXCTL_INTR_UP_SET
+ nv_wr32(NV_PGRAPH_FECS_INTR_UP_SET, $r15)
pop $r14
ret
@@ -84,46 +81,40 @@ init:
mov $sp $r0
// enable fifo access
- mov $r1 0x1200
- mov $r2 2
- iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
+ mov $r2 NV_PGRAPH_GPCX_GPCCS_ACCESS_FIFO
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_ACCESS, 0, $r2)
// setup i0 handler, and route all interrupts to it
mov $r1 #ih
mov $iv0 $r1
- mov $r1 0x400
- iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_INTR_ROUTE, 0, $r0)
// enable fifo interrupt
- mov $r2 4
- iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
+ mov $r2 NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET_FIFO
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET, 0, $r2)
// enable interrupts
bset $flags ie0
// figure out which GPC we are, and how many TPCs we have
- mov $r1 0x608
- shl b32 $r1 6
- iord $r2 I[$r1 + 0x000] // UNITS
+ nv_iord($r2, NV_PGRAPH_GPCX_GPCCS_UNITS, 0)
mov $r3 1
and $r2 0x1f
shl b32 $r3 $r2
sub b32 $r3 1
st b32 D[$r0 + #tpc_count] $r2
st b32 D[$r0 + #tpc_mask] $r3
- add b32 $r1 0x400
- iord $r2 I[$r1 + 0x000] // MYINDEX
+ nv_iord($r2, NV_PGRAPH_GPCX_GPCCS_MYINDEX, 0)
st b32 D[$r0 + #gpc_id] $r2
#if NV_PGRAPH_GPCX_UNK__SIZE > 0
// figure out which, and how many, UNKs are actually present
- mov $r14 0x0c30
- sethi $r14 0x500000
+ imm32($r14, 0x500c30)
clear b32 $r2
clear b32 $r3
clear b32 $r4
init_unk_loop:
- call #nv_rd32
+ call(nv_rd32)
cmp b32 $r15 0
bra z #init_unk_next
mov $r15 1
@@ -146,23 +137,21 @@ init:
// set mmctx base addresses now so we don't have to do it later,
// they don't currently ever change
- mov $r4 0x700
- shl b32 $r4 6
shr b32 $r5 $r2 8
- iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE
- iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_MMCTX_SAVE_SWBASE, 0, $r5)
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_SWBASE, 0, $r5)
// calculate GPC mmio context size
ld b32 $r14 D[$r0 + #gpc_mmio_list_head]
ld b32 $r15 D[$r0 + #gpc_mmio_list_tail]
- call #mmctx_size
+ call(mmctx_size)
add b32 $r2 $r15
add b32 $r3 $r15
// calculate per-TPC mmio context size
ld b32 $r14 D[$r0 + #tpc_mmio_list_head]
ld b32 $r15 D[$r0 + #tpc_mmio_list_tail]
- call #mmctx_size
+ call(mmctx_size)
ld b32 $r14 D[$r0 + #tpc_count]
mulu $r14 $r15
add b32 $r2 $r14
@@ -172,7 +161,7 @@ init:
// calculate per-UNK mmio context size
ld b32 $r14 D[$r0 + #unk_mmio_list_head]
ld b32 $r15 D[$r0 + #unk_mmio_list_tail]
- call #mmctx_size
+ call(mmctx_size)
ld b32 $r14 D[$r0 + #unk_count]
mulu $r14 $r15
add b32 $r2 $r14
@@ -180,9 +169,8 @@ init:
#endif
// round up base/size to 256 byte boundary (for strand SWBASE)
- add b32 $r4 0x1300
shr b32 $r3 2
- iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!?
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_COUNT, 0, $r3) // wtf for?!
shr b32 $r2 8
shr b32 $r3 6
add b32 $r2 1
@@ -192,7 +180,7 @@ init:
// calculate size of strand context data
mov b32 $r15 $r2
- call #strand_ctx_init
+ call(strand_ctx_init)
add b32 $r3 $r15
// save context size, and tell HUB we're done
@@ -208,7 +196,7 @@ main:
bset $flags $p0
sleep $p0
mov $r13 #cmd_queue
- call #queue_get
+ call(queue_get)
bra $p1 #main
// 0x0000-0x0003 are all context transfers
@@ -224,13 +212,13 @@ main:
or $r1 $r14
mov $flags $r1
// transfer context data
- call #ctx_xfer
+ call(ctx_xfer)
bra #main
main_not_ctx_xfer:
shl b32 $r15 $r14 16
or $r15 E_BAD_COMMAND
- call #error
+ call(error)
bra #main
// interrupt handler
@@ -247,22 +235,20 @@ ih:
clear b32 $r0
// incoming fifo command?
- iord $r10 I[$r0 + 0x200] // INTR
- and $r11 $r10 0x00000004
+ nv_iord($r10, NV_PGRAPH_GPCX_GPCCS_INTR, 0)
+ and $r11 $r10 NV_PGRAPH_GPCX_GPCCS_INTR_FIFO
bra e #ih_no_fifo
// queue incoming fifo command for later processing
- mov $r11 0x1900
mov $r13 #cmd_queue
- iord $r14 I[$r11 + 0x100] // FIFO_CMD
- iord $r15 I[$r11 + 0x000] // FIFO_DATA
- call #queue_put
- add b32 $r11 0x400
+ nv_iord($r14, NV_PGRAPH_GPCX_GPCCS_FIFO_CMD, 0)
+ nv_iord($r15, NV_PGRAPH_GPCX_GPCCS_FIFO_DATA, 0)
+ call(queue_put)
mov $r14 1
- iowr I[$r11 + 0x000] $r14 // FIFO_ACK
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_FIFO_ACK, 0, $r14)
// ack, and wake up main()
ih_no_fifo:
- iowr I[$r0 + 0x100] $r10 // INTR_ACK
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_INTR_ACK, 0, $r10)
pop $r15
pop $r14
@@ -283,9 +269,7 @@ hub_barrier_done:
mov $r15 1
ld b32 $r14 D[$r0 + #gpc_id]
shl b32 $r15 $r14
- mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET
- sethi $r14 0x400000
- call #nv_wr32
+ nv_wr32(0x409418, $r15) // 0x409418 - HUB_BAR_SET
ret
// Disables various things, waits a bit, and re-enables them..
@@ -295,16 +279,15 @@ hub_barrier_done:
// funny things happen.
//
ctx_redswitch:
- mov $r14 0x614
- shl b32 $r14 6
- mov $r15 0x020
- iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER
- mov $r15 8
+ mov $r15 NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_POWER
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_RED_SWITCH, 0, $r15)
+ mov $r14 8
ctx_redswitch_delay:
- sub b32 $r15 1
+ sub b32 $r14 1
bra ne #ctx_redswitch_delay
- mov $r15 0xa20
- iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER
+ or $r15 NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_UNK11
+ or $r15 NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_ENABLE
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_RED_SWITCH, 0, $r15)
ret
// Transfer GPC context data between GPU and storage area
@@ -317,46 +300,37 @@ ctx_redswitch:
//
ctx_xfer:
// set context base address
- mov $r1 0xa04
- shl b32 $r1 6
- iowr I[$r1 + 0x000] $r15// MEM_BASE
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_MEM_BASE, 0, $r15)
bra not $p1 #ctx_xfer_not_load
- call #ctx_redswitch
+ call(ctx_redswitch)
ctx_xfer_not_load:
// strands
- mov $r1 0x4afc
- sethi $r1 0x20000
- mov $r2 0xc
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
- call #strand_wait
- mov $r2 0x47fc
- sethi $r2 0x20000
- iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
- xbit $r2 $flags $p1
- add b32 $r2 3
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+ call(strand_pre)
+ clear b32 $r2
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_STRAND_SELECT, 0x3f, $r2)
+ xbit $r2 $flags $p1 // SAVE/LOAD
+ add b32 $r2 NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_SAVE
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_STRAND_CMD, 0x3f, $r2)
// mmio context
xbit $r10 $flags $p1 // direction
or $r10 2 // first
- mov $r11 0x0000
- sethi $r11 0x500000
+ imm32($r11,0x500000)
ld b32 $r12 D[$r0 + #gpc_id]
shl b32 $r12 15
add b32 $r11 $r12 // base = NV_PGRAPH_GPCn
ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
mov $r14 0 // not multi
- call #mmctx_xfer
+ call(mmctx_xfer)
// per-TPC mmio context
xbit $r10 $flags $p1 // direction
#if !NV_PGRAPH_GPCX_UNK__SIZE
or $r10 4 // last
#endif
- mov $r11 0x4000
- sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0
+ imm32($r11, 0x504000)
ld b32 $r12 D[$r0 + #gpc_id]
shl b32 $r12 15
add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0
@@ -364,14 +338,13 @@ ctx_xfer:
ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
ld b32 $r15 D[$r0 + #tpc_mask]
mov $r14 0x800 // stride = 0x800
- call #mmctx_xfer
+ call(mmctx_xfer)
#if NV_PGRAPH_GPCX_UNK__SIZE > 0
// per-UNK mmio context
xbit $r10 $flags $p1 // direction
or $r10 4 // last
- mov $r11 0x3000
- sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_UNK0
+ imm32($r11, 0x503000)
ld b32 $r12 D[$r0 + #gpc_id]
shl b32 $r12 15
add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_UNK0
@@ -379,11 +352,11 @@ ctx_xfer:
ld b32 $r13 D[$r0 + #unk_mmio_list_tail]
ld b32 $r15 D[$r0 + #unk_mask]
mov $r14 0x200 // stride = 0x200
- call #mmctx_xfer
+ call(mmctx_xfer)
#endif
// wait for strands to finish
- call #strand_wait
+ call(strand_wait)
// if load, or a save without a load following, do some
// unknown stuff that's done after finishing a block of
@@ -391,14 +364,10 @@ ctx_xfer:
bra $p1 #ctx_xfer_post
bra not $p2 #ctx_xfer_done
ctx_xfer_post:
- mov $r1 0x4afc
- sethi $r1 0x20000
- mov $r2 0xd
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d
- call #strand_wait
+ call(strand_post)
// mark completion in HUB's barrier
ctx_xfer_done:
- call #hub_barrier_done
+ call(hub_barrier_done)
ret
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5 b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5
new file mode 100644
index 000000000000..bd30262d635b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#define NV_PGRAPH_GPCX_UNK__SIZE 0x00000001
+
+#define CHIPSET GK208
+#include "macros.fuc"
+
+.section #nv108_grgpc_data
+#define INCLUDE_DATA
+#include "com.fuc"
+#include "gpc.fuc"
+#undef INCLUDE_DATA
+
+.section #nv108_grgpc_code
+#define INCLUDE_CODE
+bra #init
+#include "com.fuc"
+#include "gpc.fuc"
+.align 256
+#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h
new file mode 100644
index 000000000000..27dc1280dc10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h
@@ -0,0 +1,473 @@
+uint32_t nv108_grgpc_data[] = {
+/* 0x0000: gpc_mmio_list_head */
+ 0x0000006c,
+/* 0x0004: gpc_mmio_list_tail */
+/* 0x0004: tpc_mmio_list_head */
+ 0x0000006c,
+/* 0x0008: tpc_mmio_list_tail */
+/* 0x0008: unk_mmio_list_head */
+ 0x0000006c,
+/* 0x000c: unk_mmio_list_tail */
+ 0x0000006c,
+/* 0x0010: gpc_id */
+ 0x00000000,
+/* 0x0014: tpc_count */
+ 0x00000000,
+/* 0x0018: tpc_mask */
+ 0x00000000,
+/* 0x001c: unk_count */
+ 0x00000000,
+/* 0x0020: unk_mask */
+ 0x00000000,
+/* 0x0024: cmd_queue */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
+
+uint32_t nv108_grgpc_code[] = {
+ 0x03140ef5,
+/* 0x0004: queue_put */
+ 0x9800d898,
+ 0x86f001d9,
+ 0xf489a408,
+ 0x020f0b1b,
+ 0x0002f87e,
+/* 0x001a: queue_put_next */
+ 0x98c400f8,
+ 0x0384b607,
+ 0xb6008dbb,
+ 0x8eb50880,
+ 0x018fb500,
+ 0xf00190b6,
+ 0xd9b50f94,
+/* 0x0037: queue_get */
+ 0xf400f801,
+ 0xd8980131,
+ 0x01d99800,
+ 0x0bf489a4,
+ 0x0789c421,
+ 0xbb0394b6,
+ 0x90b6009d,
+ 0x009e9808,
+ 0xb6019f98,
+ 0x84f00180,
+ 0x00d8b50f,
+/* 0x0063: queue_get_done */
+ 0xf80132f4,
+/* 0x0065: nv_rd32 */
+ 0xf0ecb200,
+ 0x00801fc9,
+ 0x0cf601ca,
+/* 0x0073: nv_rd32_wait */
+ 0x8c04bd00,
+ 0xcf01ca00,
+ 0xccc800cc,
+ 0xf61bf41f,
+ 0xec7e060a,
+ 0x008f0000,
+ 0xffcf01cb,
+/* 0x008f: nv_wr32 */
+ 0x8000f800,
+ 0xf601cc00,
+ 0x04bd000f,
+ 0xc9f0ecb2,
+ 0x1ec9f01f,
+ 0x01ca0080,
+ 0xbd000cf6,
+/* 0x00a9: nv_wr32_wait */
+ 0xca008c04,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f61b,
+/* 0x00b8: wait_donez */
+ 0x99f094bd,
+ 0x37008000,
+ 0x0009f602,
+ 0x008004bd,
+ 0x0af60206,
+/* 0x00cf: wait_donez_ne */
+ 0x8804bd00,
+ 0xcf010000,
+ 0x8aff0088,
+ 0xf61bf488,
+ 0x99f094bd,
+ 0x17008000,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x00ec: wait_doneo */
+ 0x99f094bd,
+ 0x37008000,
+ 0x0009f602,
+ 0x008004bd,
+ 0x0af60206,
+/* 0x0103: wait_doneo_e */
+ 0x8804bd00,
+ 0xcf010000,
+ 0x8aff0088,
+ 0xf60bf488,
+ 0x99f094bd,
+ 0x17008000,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x0120: mmctx_size */
+/* 0x0122: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0x1bf4efa4,
+ 0xf89fb2ec,
+/* 0x013d: mmctx_xfer */
+ 0xf094bd00,
+ 0x00800199,
+ 0x09f60237,
+ 0xbd04bd00,
+ 0x05bbfd94,
+ 0x800f0bf4,
+ 0xf601c400,
+ 0x04bd000b,
+/* 0x015f: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0xc6008018,
+ 0x000ef601,
+ 0x008004bd,
+ 0x0ff601c7,
+ 0xf004bd00,
+/* 0x017a: mmctx_multi_disabled */
+ 0xabc80199,
+ 0x10b4b600,
+ 0xc80cb9f0,
+ 0xe4b601ae,
+ 0x05befd11,
+ 0x01c50080,
+ 0xbd000bf6,
+/* 0x0195: mmctx_exec_loop */
+/* 0x0195: mmctx_wait_free */
+ 0xc5008e04,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f60b,
+ 0x05e9fd00,
+ 0x01c80080,
+ 0xbd000ef6,
+ 0x04c0b604,
+ 0x1bf4cda4,
+ 0x02abc8df,
+/* 0x01bf: mmctx_fini_wait */
+ 0x8b1c1bf4,
+ 0xcf01c500,
+ 0xb4f000bb,
+ 0x10b4b01f,
+ 0x0af31bf4,
+ 0x00b87e02,
+ 0x250ef400,
+/* 0x01d8: mmctx_stop */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x12b9f00c,
+ 0x01c50080,
+ 0xbd000bf6,
+/* 0x01ed: mmctx_stop_wait */
+ 0xc5008b04,
+ 0x00bbcf01,
+ 0xf412bbc8,
+/* 0x01fa: mmctx_done */
+ 0x94bdf61b,
+ 0x800199f0,
+ 0xf6021700,
+ 0x04bd0009,
+/* 0x020a: strand_wait */
+ 0xa0f900f8,
+ 0xb87e020a,
+ 0xa0fc0000,
+/* 0x0216: strand_pre */
+ 0x0c0900f8,
+ 0x024afc80,
+ 0xbd0009f6,
+ 0x020a7e04,
+/* 0x0227: strand_post */
+ 0x0900f800,
+ 0x4afc800d,
+ 0x0009f602,
+ 0x0a7e04bd,
+ 0x00f80002,
+/* 0x0238: strand_set */
+ 0xfc800f0c,
+ 0x0cf6024f,
+ 0x0c04bd00,
+ 0x4afc800b,
+ 0x000cf602,
+ 0xfc8004bd,
+ 0x0ef6024f,
+ 0x0c04bd00,
+ 0x4afc800a,
+ 0x000cf602,
+ 0x0a7e04bd,
+ 0x00f80002,
+/* 0x0268: strand_ctx_init */
+ 0x99f094bd,
+ 0x37008003,
+ 0x0009f602,
+ 0x167e04bd,
+ 0x030e0002,
+ 0x0002387e,
+ 0xfc80c4bd,
+ 0x0cf60247,
+ 0x0c04bd00,
+ 0x4afc8001,
+ 0x000cf602,
+ 0x0a7e04bd,
+ 0x0c920002,
+ 0x46fc8001,
+ 0x000cf602,
+ 0x020c04bd,
+ 0x024afc80,
+ 0xbd000cf6,
+ 0x020a7e04,
+ 0x02277e00,
+ 0x42008800,
+ 0x20008902,
+ 0x0099cf02,
+/* 0x02c7: ctx_init_strand_loop */
+ 0xf608fe95,
+ 0x8ef6008e,
+ 0x808acf40,
+ 0xb606a5b6,
+ 0xeabb01a0,
+ 0x0480b600,
+ 0xf40192b6,
+ 0xe4b6e81b,
+ 0xf2efbc08,
+ 0x99f094bd,
+ 0x17008003,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x02f8: error */
+ 0xffb2e0f9,
+ 0x4098148e,
+ 0x00008f7e,
+ 0xffb2010f,
+ 0x409c1c8e,
+ 0x00008f7e,
+ 0x00f8e0fc,
+/* 0x0314: init */
+ 0x04fe04bd,
+ 0x40020200,
+ 0x02f61200,
+ 0x4104bd00,
+ 0x10fe0465,
+ 0x07004000,
+ 0xbd0000f6,
+ 0x40040204,
+ 0x02f60400,
+ 0xf404bd00,
+ 0x00821031,
+ 0x22cf0182,
+ 0xf0010300,
+ 0x32bb1f24,
+ 0x0132b604,
+ 0xb50502b5,
+ 0x00820603,
+ 0x22cf0186,
+ 0x0402b500,
+ 0x500c308e,
+ 0x34bd24bd,
+/* 0x036a: init_unk_loop */
+ 0x657e44bd,
+ 0xf6b00000,
+ 0x0e0bf400,
+ 0xf2bb010f,
+ 0x054ffd04,
+/* 0x037f: init_unk_next */
+ 0xb60130b6,
+ 0xe0b60120,
+ 0x0126b004,
+/* 0x038b: init_unk_done */
+ 0xb5e21bf4,
+ 0x04b50703,
+ 0x01008208,
+ 0x0022cf02,
+ 0x259534bd,
+ 0xc0008008,
+ 0x0005f601,
+ 0x008004bd,
+ 0x05f601c1,
+ 0x9804bd00,
+ 0x0f98000e,
+ 0x01207e01,
+ 0x002fbb00,
+ 0x98003fbb,
+ 0x0f98010e,
+ 0x01207e02,
+ 0x050e9800,
+ 0xbb00effd,
+ 0x3ebb002e,
+ 0x020e9800,
+ 0x7e030f98,
+ 0x98000120,
+ 0xeffd070e,
+ 0x002ebb00,
+ 0xb6003ebb,
+ 0x00800235,
+ 0x03f601d3,
+ 0xb604bd00,
+ 0x35b60825,
+ 0x0120b606,
+ 0xb60130b6,
+ 0x34b60824,
+ 0x7e2fb208,
+ 0xbb000268,
+ 0x0080003f,
+ 0x03f60201,
+ 0xbd04bd00,
+ 0x1f29f024,
+ 0x02300080,
+ 0xbd0002f6,
+/* 0x0429: main */
+ 0x0031f404,
+ 0x0d0028f4,
+ 0x00377e24,
+ 0xf401f400,
+ 0xf404e4b0,
+ 0x81fe1d18,
+ 0xbd060201,
+ 0x0412fd20,
+ 0xfd01e4b6,
+ 0x18fe051e,
+ 0x04fc7e00,
+ 0xd40ef400,
+/* 0x0458: main_not_ctx_xfer */
+ 0xf010ef94,
+ 0xf87e01f5,
+ 0x0ef40002,
+/* 0x0465: ih */
+ 0xfe80f9c7,
+ 0x80f90188,
+ 0xa0f990f9,
+ 0xd0f9b0f9,
+ 0xf0f9e0f9,
+ 0x004a04bd,
+ 0x00aacf02,
+ 0xf404abc4,
+ 0x240d1f0b,
+ 0xcf1a004e,
+ 0x004f00ee,
+ 0x00ffcf19,
+ 0x0000047e,
+ 0x0040010e,
+ 0x000ef61d,
+/* 0x04a2: ih_no_fifo */
+ 0x004004bd,
+ 0x000af601,
+ 0xf0fc04bd,
+ 0xd0fce0fc,
+ 0xa0fcb0fc,
+ 0x80fc90fc,
+ 0xfc0088fe,
+ 0x0032f480,
+/* 0x04c2: hub_barrier_done */
+ 0x010f01f8,
+ 0xbb040e98,
+ 0xffb204fe,
+ 0x4094188e,
+ 0x00008f7e,
+/* 0x04d6: ctx_redswitch */
+ 0x200f00f8,
+ 0x01850080,
+ 0xbd000ff6,
+/* 0x04e3: ctx_redswitch_delay */
+ 0xb6080e04,
+ 0x1bf401e2,
+ 0x00f5f1fd,
+ 0x00f5f108,
+ 0x85008002,
+ 0x000ff601,
+ 0x00f804bd,
+/* 0x04fc: ctx_xfer */
+ 0x02810080,
+ 0xbd000ff6,
+ 0x0711f404,
+ 0x0004d67e,
+/* 0x050c: ctx_xfer_not_load */
+ 0x0002167e,
+ 0xfc8024bd,
+ 0x02f60247,
+ 0xf004bd00,
+ 0x20b6012c,
+ 0x4afc8003,
+ 0x0002f602,
+ 0xacf004bd,
+ 0x02a5f001,
+ 0x5000008b,
+ 0xb6040c98,
+ 0xbcbb0fc4,
+ 0x000c9800,
+ 0x0e010d98,
+ 0x013d7e00,
+ 0x01acf000,
+ 0x5040008b,
+ 0xb6040c98,
+ 0xbcbb0fc4,
+ 0x010c9800,
+ 0x98020d98,
+ 0x004e060f,
+ 0x013d7e08,
+ 0x01acf000,
+ 0x8b04a5f0,
+ 0x98503000,
+ 0xc4b6040c,
+ 0x00bcbb0f,
+ 0x98020c98,
+ 0x0f98030d,
+ 0x02004e08,
+ 0x00013d7e,
+ 0x00020a7e,
+ 0xf40601f4,
+/* 0x0596: ctx_xfer_post */
+ 0x277e0712,
+/* 0x059a: ctx_xfer_done */
+ 0xc27e0002,
+ 0x00f80004,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
index f2b0dea80116..0e7b01efae8d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
@@ -37,14 +37,14 @@ uint32_t nvc0_grgpc_data[] = {
};
uint32_t nvc0_grgpc_code[] = {
- 0x03180ef5,
+ 0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
- 0x00f802fe,
+ 0x00f8037e,
/* 0x001c: queue_put_next */
0xb60798c4,
0x8dbb0384,
@@ -68,184 +68,214 @@ uint32_t nvc0_grgpc_code[] = {
/* 0x0066: queue_get_done */
0x00f80132,
/* 0x0068: nv_rd32 */
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010921f5,
- 0xf840bfcf,
-/* 0x008d: nv_wr32 */
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
-/* 0x00bd: watchdog_clear */
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
-/* 0x00c9: wait_donez */
- 0xf094bd00,
- 0x07f10099,
- 0x03f00f00,
- 0x0009d002,
- 0x07f104bd,
- 0x03f00600,
- 0x000ad002,
-/* 0x00e6: wait_donez_ne */
- 0x87f104bd,
- 0x83f00000,
- 0x0088cf01,
- 0xf4888aff,
- 0x94bdf31b,
- 0xf10099f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0109: wait_doneo */
- 0xf094bd00,
+ 0xf002ecb9,
+ 0x07f11fc9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x007a: nv_rd32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0xa7f0f31b,
+ 0x1021f506,
+ 0x00f7f101,
+ 0x01f3f0cb,
+ 0xf800ffcf,
+/* 0x009d: nv_wr32 */
+ 0x0007f100,
+ 0x0103f0cc,
+ 0xbd000fd0,
+ 0x02ecb904,
+ 0xf01fc9f0,
+ 0x07f11ec9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x00be: nv_wr32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f31b,
+/* 0x00d0: wait_donez */
+ 0x99f094bd,
+ 0x0007f100,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x1bf4888a,
+ 0xf094bdf3,
0x07f10099,
- 0x03f00f00,
+ 0x03f01700,
0x0009d002,
- 0x87f104bd,
- 0x84b60818,
- 0x008ad006,
-/* 0x0124: wait_doneo_e */
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
+ 0x00f804bd,
+/* 0x0110: wait_doneo */
0x99f094bd,
0x0007f100,
- 0x0203f017,
+ 0x0203f00f,
0xbd0009d0,
-/* 0x0147: mmctx_size */
- 0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
- 0x00e89894,
- 0xb61a85b6,
- 0x84b60180,
- 0x0098bb02,
- 0xb804e0b6,
- 0x1bf404ef,
- 0x029fb9eb,
-/* 0x0166: mmctx_xfer */
- 0x94bd00f8,
- 0xf10199f0,
- 0xf00f0007,
- 0x09d00203,
- 0xf104bd00,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
-/* 0x018c: mmctx_base_disabled */
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
- 0xb70199f0,
- 0xc8010080,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x0bf4888a,
+ 0xf094bdf3,
+ 0x07f10099,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0xf404efb8,
+ 0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+ 0xbd00f802,
+ 0x0199f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0xbbfd94bd,
+ 0x120bf405,
+ 0xc40007f1,
+ 0xd00103f0,
+ 0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0x0007f11e,
+ 0x0103f0c6,
+ 0xbd000ed0,
+ 0x0007f104,
+ 0x0103f0c7,
+ 0xbd000fd0,
+ 0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x01aec80c,
+ 0xfd11e4b6,
+ 0x07f105be,
+ 0x03f0c500,
+ 0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+ 0xe7f104bd,
+ 0xe3f0c500,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f30b,
+ 0x05e9fd00,
+ 0xc80007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+ 0xb804c0b6,
+ 0x1bf404cd,
+ 0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+ 0xf11f1bf4,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x1fb4f000,
+ 0xf410b4b0,
+ 0xa7f0f01b,
+ 0xd021f402,
+/* 0x0223: mmctx_stop */
+ 0xc82b0ef4,
0xb4b600ab,
0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
-/* 0x01ea: mmctx_stop */
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
- 0x008bcf00,
- 0xf412bbc8,
-/* 0x0202: mmctx_done */
- 0x94bdfa1b,
- 0xf10199f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0215: strand_wait */
- 0xf0a0f900,
- 0x21f402a7,
- 0xf8a0fcc9,
-/* 0x0221: strand_pre */
- 0xfc87f100,
- 0x0283f04a,
- 0xd00c97f0,
- 0x21f50089,
- 0x00f80215,
-/* 0x0234: strand_post */
- 0x4afc87f1,
- 0xf00283f0,
- 0x89d00d97,
- 0x1521f500,
-/* 0x0247: strand_set */
- 0xf100f802,
- 0xf04ffca7,
- 0xaba202a3,
- 0xc7f00500,
- 0x00acd00f,
- 0xd00bc7f0,
- 0x21f500bc,
- 0xaed00215,
- 0x0ac7f000,
- 0xf500bcd0,
- 0xf8021521,
-/* 0x0271: strand_ctx_init */
- 0xf094bd00,
- 0x07f10399,
- 0x03f00f00,
+ 0xf112b9f0,
+ 0xf0c50007,
+ 0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+ 0xf104bd00,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x12bbc800,
+/* 0x024b: mmctx_done */
+ 0xbdf31bf4,
+ 0x0199f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x025e: strand_wait */
+ 0xa0f900f8,
+ 0xf402a7f0,
+ 0xa0fcd021,
+/* 0x026a: strand_pre */
+ 0x97f000f8,
+ 0xfc07f10c,
+ 0x0203f04a,
+ 0xbd0009d0,
+ 0x5e21f504,
+/* 0x027f: strand_post */
+ 0xf000f802,
+ 0x07f10d97,
+ 0x03f04afc,
0x0009d002,
0x21f504bd,
- 0xe7f00221,
- 0x4721f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x1521f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x1521f500,
- 0x3421f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+ 0x00f8025e,
+/* 0x0294: strand_set */
+ 0xf10fc7f0,
+ 0xf04ffc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f10bc7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x07f104bd,
+ 0x03f04ffc,
+ 0x000ed002,
+ 0xc7f004bd,
+ 0xfc07f10a,
+ 0x0203f04a,
+ 0xbd000cd0,
+ 0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+ 0xbd00f802,
+ 0x0399f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0x026a21f5,
+ 0xf503e7f0,
+ 0xbd029421,
+ 0xfc07f1c4,
+ 0x0203f047,
+ 0xbd000cd0,
+ 0x01c7f004,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd000c,
+ 0x025e21f5,
+ 0xf1010c92,
+ 0xf046fc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f102c7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x21f504bd,
+ 0x21f5025e,
+ 0x87f1027f,
+ 0x83f04200,
+ 0x0097f102,
+ 0x0293f020,
+ 0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
0x8ed008fe,
0x408ed000,
0xb6808acf,
@@ -259,167 +289,199 @@ uint32_t nvc0_grgpc_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
0xe0f900f8,
- 0x9814e7f1,
- 0xf440e3f0,
- 0xe0b78d21,
- 0xf7f0041c,
- 0x8d21f401,
- 0x00f8e0fc,
-/* 0x0318: init */
- 0x04fe04bd,
- 0x0017f100,
- 0x0227f012,
- 0xf10012d0,
- 0xfe042617,
- 0x17f10010,
- 0x10d00400,
- 0x0427f0c0,
- 0xf40012d0,
- 0x17f11031,
- 0x14b60608,
- 0x0012cf06,
+ 0xf102ffb9,
+ 0xf09814e7,
+ 0x21f440e3,
+ 0x01f7f09d,
+ 0xf102ffb9,
+ 0xf09c1ce7,
+ 0x21f440e3,
+ 0xf8e0fc9d,
+/* 0x03a1: init */
+ 0xfe04bd00,
+ 0x27f00004,
+ 0x0007f102,
+ 0x0003f012,
+ 0xbd0002d0,
+ 0xd517f104,
+ 0x0010fe04,
+ 0x070007f1,
+ 0xd00003f0,
+ 0x04bd0000,
+ 0xf10427f0,
+ 0xf0040007,
+ 0x02d00003,
+ 0xf404bd00,
+ 0x27f11031,
+ 0x23f08200,
+ 0x0022cf01,
0xf00137f0,
0x32bb1f24,
0x0132b604,
0x80050280,
- 0x10b70603,
- 0x12cf0400,
- 0x04028000,
- 0x010027f1,
- 0xcf0223f0,
- 0x34bd0022,
- 0x070047f1,
- 0x950644b6,
- 0x45d00825,
- 0x4045d000,
- 0x98000e98,
- 0x21f5010f,
- 0x2fbb0147,
- 0x003fbb00,
- 0x98010e98,
- 0x21f5020f,
- 0x0e980147,
- 0x00effd05,
- 0xbb002ebb,
- 0x40b7003e,
- 0x35b61300,
- 0x0043d002,
- 0xb60825b6,
- 0x20b60635,
- 0x0130b601,
- 0xb60824b6,
- 0x2fb90834,
- 0x7121f502,
- 0x003fbb02,
- 0x010007f1,
+ 0x27f10603,
+ 0x23f08600,
+ 0x0022cf01,
+ 0xf1040280,
+ 0xf0010027,
+ 0x22cf0223,
+ 0x9534bd00,
+ 0x07f10825,
+ 0x03f0c000,
+ 0x0005d001,
+ 0x07f104bd,
+ 0x03f0c100,
+ 0x0005d001,
+ 0x0e9804bd,
+ 0x010f9800,
+ 0x015021f5,
+ 0xbb002fbb,
+ 0x0e98003f,
+ 0x020f9801,
+ 0x015021f5,
+ 0xfd050e98,
+ 0x2ebb00ef,
+ 0x003ebb00,
+ 0xf10235b6,
+ 0xf0d30007,
+ 0x03d00103,
+ 0xb604bd00,
+ 0x35b60825,
+ 0x0120b606,
+ 0xb60130b6,
+ 0x34b60824,
+ 0x022fb908,
+ 0x02d321f5,
+ 0xf1003fbb,
+ 0xf0010007,
+ 0x03d00203,
+ 0xbd04bd00,
+ 0x1f29f024,
+ 0x080007f1,
0xd00203f0,
- 0x04bd0003,
- 0x29f024bd,
- 0x0007f11f,
- 0x0203f008,
- 0xbd0002d0,
-/* 0x03e9: main */
- 0x0031f404,
- 0xf00028f4,
- 0x21f41cd7,
- 0xf401f439,
- 0xf404e4b0,
- 0x81fe1e18,
- 0x0627f001,
- 0x12fd20bd,
- 0x01e4b604,
- 0xfe051efd,
- 0x21f50018,
- 0x0ef404ad,
-/* 0x0419: main_not_ctx_xfer */
- 0x10ef94d3,
- 0xf501f5f0,
- 0xf402fe21,
-/* 0x0426: ih */
- 0x80f9c60e,
- 0xf90188fe,
- 0xf990f980,
- 0xf9b0f9a0,
- 0xf9e0f9d0,
- 0xcf04bdf0,
- 0xabc4800a,
- 0x1d0bf404,
- 0x1900b7f1,
- 0xcf1cd7f0,
- 0xbfcf40be,
+ 0x04bd0002,
+/* 0x0498: main */
+ 0xf40031f4,
+ 0xd7f00028,
+ 0x3921f41c,
+ 0xb0f401f4,
+ 0x18f404e4,
+ 0x0181fe1e,
+ 0xbd0627f0,
+ 0x0412fd20,
+ 0xfd01e4b6,
+ 0x18fe051e,
+ 0x8d21f500,
+ 0xd30ef405,
+/* 0x04c8: main_not_ctx_xfer */
+ 0xf010ef94,
+ 0x21f501f5,
+ 0x0ef4037e,
+/* 0x04d5: ih */
+ 0xfe80f9c6,
+ 0x80f90188,
+ 0xa0f990f9,
+ 0xd0f9b0f9,
+ 0xf0f9e0f9,
+ 0xa7f104bd,
+ 0xa3f00200,
+ 0x00aacf00,
+ 0xf404abc4,
+ 0xd7f02c0b,
+ 0x00e7f11c,
+ 0x00e3f01a,
+ 0xf100eecf,
+ 0xf01900f7,
+ 0xffcf00f3,
0x0421f400,
- 0x0400b0b7,
- 0xd001e7f0,
-/* 0x045e: ih_no_fifo */
- 0x0ad000be,
- 0xfcf0fc40,
- 0xfcd0fce0,
- 0xfca0fcb0,
- 0xfe80fc90,
- 0x80fc0088,
- 0xf80032f4,
-/* 0x0479: hub_barrier_done */
- 0x01f7f001,
- 0xbb040e98,
- 0xe7f104fe,
- 0xe3f09418,
- 0x8d21f440,
-/* 0x048e: ctx_redswitch */
- 0xe7f100f8,
- 0xe4b60614,
- 0x20f7f006,
- 0xf000efd0,
-/* 0x049e: ctx_redswitch_delay */
- 0xf2b608f7,
- 0xfd1bf401,
- 0x0a20f7f1,
- 0xf800efd0,
-/* 0x04ad: ctx_xfer */
- 0x0417f100,
- 0x0614b60a,
- 0xf4001fd0,
- 0x21f50711,
-/* 0x04be: ctx_xfer_not_load */
- 0x17f1048e,
- 0x13f04afc,
- 0x0c27f002,
- 0xf50012d0,
- 0xf1021521,
- 0xf047fc27,
- 0x20d00223,
- 0x012cf000,
- 0xd00320b6,
- 0xacf00012,
- 0x02a5f001,
- 0xf000b7f0,
- 0x0c9850b3,
- 0x0fc4b604,
- 0x9800bcbb,
- 0x0d98000c,
- 0x00e7f001,
- 0x016621f5,
+ 0xf101e7f0,
+ 0xf01d0007,
+ 0x0ed00003,
+/* 0x0523: ih_no_fifo */
+ 0xf104bd00,
+ 0xf0010007,
+ 0x0ad00003,
+ 0xfc04bd00,
+ 0xfce0fcf0,
+ 0xfcb0fcd0,
+ 0xfc90fca0,
+ 0x0088fe80,
+ 0x32f480fc,
+/* 0x0547: hub_barrier_done */
+ 0xf001f800,
+ 0x0e9801f7,
+ 0x04febb04,
+ 0xf102ffb9,
+ 0xf09418e7,
+ 0x21f440e3,
+/* 0x055f: ctx_redswitch */
+ 0xf000f89d,
+ 0x07f120f7,
+ 0x03f08500,
+ 0x000fd001,
+ 0xe7f004bd,
+/* 0x0571: ctx_redswitch_delay */
+ 0x01e2b608,
+ 0xf1fd1bf4,
+ 0xf10800f5,
+ 0xf10200f5,
+ 0xf0850007,
+ 0x0fd00103,
+ 0xf804bd00,
+/* 0x058d: ctx_xfer */
+ 0x0007f100,
+ 0x0203f081,
+ 0xbd000fd0,
+ 0x0711f404,
+ 0x055f21f5,
+/* 0x05a0: ctx_xfer_not_load */
+ 0x026a21f5,
+ 0x07f124bd,
+ 0x03f047fc,
+ 0x0002d002,
+ 0x2cf004bd,
+ 0x0320b601,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd0002,
0xf001acf0,
- 0xb7f104a5,
- 0xb3f04000,
+ 0xb7f102a5,
+ 0xb3f00000,
0x040c9850,
0xbb0fc4b6,
0x0c9800bc,
- 0x020d9801,
- 0xf1060f98,
- 0xf50800e7,
- 0xf5016621,
- 0xf4021521,
- 0x12f40601,
-/* 0x0535: ctx_xfer_post */
- 0xfc17f114,
- 0x0213f04a,
- 0xd00d27f0,
- 0x21f50012,
-/* 0x0546: ctx_xfer_done */
- 0x21f50215,
- 0x00f80479,
+ 0x010d9800,
+ 0xf500e7f0,
+ 0xf0016f21,
+ 0xa5f001ac,
+ 0x00b7f104,
+ 0x50b3f040,
+ 0xb6040c98,
+ 0xbcbb0fc4,
+ 0x010c9800,
+ 0x98020d98,
+ 0xe7f1060f,
+ 0x21f50800,
+ 0x21f5016f,
+ 0x01f4025e,
+ 0x0712f406,
+/* 0x0618: ctx_xfer_post */
+ 0x027f21f5,
+/* 0x061c: ctx_xfer_done */
+ 0x054721f5,
+ 0x000000f8,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h
index dd346c2a1624..84dd32db28a0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h
@@ -41,14 +41,14 @@ uint32_t nvd7_grgpc_data[] = {
};
uint32_t nvd7_grgpc_code[] = {
- 0x03180ef5,
+ 0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
- 0x00f802fe,
+ 0x00f8037e,
/* 0x001c: queue_put_next */
0xb60798c4,
0x8dbb0384,
@@ -72,184 +72,214 @@ uint32_t nvd7_grgpc_code[] = {
/* 0x0066: queue_get_done */
0x00f80132,
/* 0x0068: nv_rd32 */
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010921f5,
- 0xf840bfcf,
-/* 0x008d: nv_wr32 */
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
-/* 0x00bd: watchdog_clear */
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
-/* 0x00c9: wait_donez */
- 0xf094bd00,
- 0x07f10099,
- 0x03f00f00,
- 0x0009d002,
- 0x07f104bd,
- 0x03f00600,
- 0x000ad002,
-/* 0x00e6: wait_donez_ne */
- 0x87f104bd,
- 0x83f00000,
- 0x0088cf01,
- 0xf4888aff,
- 0x94bdf31b,
- 0xf10099f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0109: wait_doneo */
- 0xf094bd00,
+ 0xf002ecb9,
+ 0x07f11fc9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x007a: nv_rd32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0xa7f0f31b,
+ 0x1021f506,
+ 0x00f7f101,
+ 0x01f3f0cb,
+ 0xf800ffcf,
+/* 0x009d: nv_wr32 */
+ 0x0007f100,
+ 0x0103f0cc,
+ 0xbd000fd0,
+ 0x02ecb904,
+ 0xf01fc9f0,
+ 0x07f11ec9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x00be: nv_wr32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f31b,
+/* 0x00d0: wait_donez */
+ 0x99f094bd,
+ 0x0007f100,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x1bf4888a,
+ 0xf094bdf3,
0x07f10099,
- 0x03f00f00,
+ 0x03f01700,
0x0009d002,
- 0x87f104bd,
- 0x84b60818,
- 0x008ad006,
-/* 0x0124: wait_doneo_e */
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
+ 0x00f804bd,
+/* 0x0110: wait_doneo */
0x99f094bd,
0x0007f100,
- 0x0203f017,
+ 0x0203f00f,
0xbd0009d0,
-/* 0x0147: mmctx_size */
- 0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
- 0x00e89894,
- 0xb61a85b6,
- 0x84b60180,
- 0x0098bb02,
- 0xb804e0b6,
- 0x1bf404ef,
- 0x029fb9eb,
-/* 0x0166: mmctx_xfer */
- 0x94bd00f8,
- 0xf10199f0,
- 0xf00f0007,
- 0x09d00203,
- 0xf104bd00,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
-/* 0x018c: mmctx_base_disabled */
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
- 0xb70199f0,
- 0xc8010080,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x0bf4888a,
+ 0xf094bdf3,
+ 0x07f10099,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0xf404efb8,
+ 0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+ 0xbd00f802,
+ 0x0199f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0xbbfd94bd,
+ 0x120bf405,
+ 0xc40007f1,
+ 0xd00103f0,
+ 0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0x0007f11e,
+ 0x0103f0c6,
+ 0xbd000ed0,
+ 0x0007f104,
+ 0x0103f0c7,
+ 0xbd000fd0,
+ 0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x01aec80c,
+ 0xfd11e4b6,
+ 0x07f105be,
+ 0x03f0c500,
+ 0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+ 0xe7f104bd,
+ 0xe3f0c500,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f30b,
+ 0x05e9fd00,
+ 0xc80007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+ 0xb804c0b6,
+ 0x1bf404cd,
+ 0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+ 0xf11f1bf4,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x1fb4f000,
+ 0xf410b4b0,
+ 0xa7f0f01b,
+ 0xd021f402,
+/* 0x0223: mmctx_stop */
+ 0xc82b0ef4,
0xb4b600ab,
0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
-/* 0x01ea: mmctx_stop */
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
- 0x008bcf00,
- 0xf412bbc8,
-/* 0x0202: mmctx_done */
- 0x94bdfa1b,
- 0xf10199f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0215: strand_wait */
- 0xf0a0f900,
- 0x21f402a7,
- 0xf8a0fcc9,
-/* 0x0221: strand_pre */
- 0xfc87f100,
- 0x0283f04a,
- 0xd00c97f0,
- 0x21f50089,
- 0x00f80215,
-/* 0x0234: strand_post */
- 0x4afc87f1,
- 0xf00283f0,
- 0x89d00d97,
- 0x1521f500,
-/* 0x0247: strand_set */
- 0xf100f802,
- 0xf04ffca7,
- 0xaba202a3,
- 0xc7f00500,
- 0x00acd00f,
- 0xd00bc7f0,
- 0x21f500bc,
- 0xaed00215,
- 0x0ac7f000,
- 0xf500bcd0,
- 0xf8021521,
-/* 0x0271: strand_ctx_init */
- 0xf094bd00,
- 0x07f10399,
- 0x03f00f00,
+ 0xf112b9f0,
+ 0xf0c50007,
+ 0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+ 0xf104bd00,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x12bbc800,
+/* 0x024b: mmctx_done */
+ 0xbdf31bf4,
+ 0x0199f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x025e: strand_wait */
+ 0xa0f900f8,
+ 0xf402a7f0,
+ 0xa0fcd021,
+/* 0x026a: strand_pre */
+ 0x97f000f8,
+ 0xfc07f10c,
+ 0x0203f04a,
+ 0xbd0009d0,
+ 0x5e21f504,
+/* 0x027f: strand_post */
+ 0xf000f802,
+ 0x07f10d97,
+ 0x03f04afc,
0x0009d002,
0x21f504bd,
- 0xe7f00221,
- 0x4721f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x1521f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x1521f500,
- 0x3421f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+ 0x00f8025e,
+/* 0x0294: strand_set */
+ 0xf10fc7f0,
+ 0xf04ffc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f10bc7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x07f104bd,
+ 0x03f04ffc,
+ 0x000ed002,
+ 0xc7f004bd,
+ 0xfc07f10a,
+ 0x0203f04a,
+ 0xbd000cd0,
+ 0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+ 0xbd00f802,
+ 0x0399f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0x026a21f5,
+ 0xf503e7f0,
+ 0xbd029421,
+ 0xfc07f1c4,
+ 0x0203f047,
+ 0xbd000cd0,
+ 0x01c7f004,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd000c,
+ 0x025e21f5,
+ 0xf1010c92,
+ 0xf046fc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f102c7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x21f504bd,
+ 0x21f5025e,
+ 0x87f1027f,
+ 0x83f04200,
+ 0x0097f102,
+ 0x0293f020,
+ 0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
0x8ed008fe,
0x408ed000,
0xb6808acf,
@@ -263,198 +293,230 @@ uint32_t nvd7_grgpc_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
0xe0f900f8,
- 0x9814e7f1,
- 0xf440e3f0,
- 0xe0b78d21,
- 0xf7f0041c,
- 0x8d21f401,
- 0x00f8e0fc,
-/* 0x0318: init */
- 0x04fe04bd,
- 0x0017f100,
- 0x0227f012,
- 0xf10012d0,
- 0xfe047017,
- 0x17f10010,
- 0x10d00400,
- 0x0427f0c0,
- 0xf40012d0,
- 0x17f11031,
- 0x14b60608,
- 0x0012cf06,
+ 0xf102ffb9,
+ 0xf09814e7,
+ 0x21f440e3,
+ 0x01f7f09d,
+ 0xf102ffb9,
+ 0xf09c1ce7,
+ 0x21f440e3,
+ 0xf8e0fc9d,
+/* 0x03a1: init */
+ 0xfe04bd00,
+ 0x27f00004,
+ 0x0007f102,
+ 0x0003f012,
+ 0xbd0002d0,
+ 0x1f17f104,
+ 0x0010fe05,
+ 0x070007f1,
+ 0xd00003f0,
+ 0x04bd0000,
+ 0xf10427f0,
+ 0xf0040007,
+ 0x02d00003,
+ 0xf404bd00,
+ 0x27f11031,
+ 0x23f08200,
+ 0x0022cf01,
0xf00137f0,
0x32bb1f24,
0x0132b604,
0x80050280,
- 0x10b70603,
- 0x12cf0400,
- 0x04028000,
- 0x0c30e7f1,
- 0xbd50e3f0,
- 0xbd34bd24,
-/* 0x0371: init_unk_loop */
- 0x6821f444,
- 0xf400f6b0,
- 0xf7f00f0b,
- 0x04f2bb01,
- 0xb6054ffd,
-/* 0x0386: init_unk_next */
- 0x20b60130,
- 0x04e0b601,
- 0xf40126b0,
-/* 0x0392: init_unk_done */
- 0x0380e21b,
- 0x08048007,
- 0x010027f1,
- 0xcf0223f0,
- 0x34bd0022,
- 0x070047f1,
- 0x950644b6,
- 0x45d00825,
- 0x4045d000,
- 0x98000e98,
- 0x21f5010f,
- 0x2fbb0147,
- 0x003fbb00,
- 0x98010e98,
- 0x21f5020f,
- 0x0e980147,
- 0x00effd05,
- 0xbb002ebb,
- 0x0e98003e,
- 0x030f9802,
- 0x014721f5,
- 0xfd070e98,
+ 0x27f10603,
+ 0x23f08600,
+ 0x0022cf01,
+ 0xf1040280,
+ 0xf00c30e7,
+ 0x24bd50e3,
+ 0x44bd34bd,
+/* 0x0410: init_unk_loop */
+ 0xb06821f4,
+ 0x0bf400f6,
+ 0x01f7f00f,
+ 0xfd04f2bb,
+ 0x30b6054f,
+/* 0x0425: init_unk_next */
+ 0x0120b601,
+ 0xb004e0b6,
+ 0x1bf40126,
+/* 0x0431: init_unk_done */
+ 0x070380e2,
+ 0xf1080480,
+ 0xf0010027,
+ 0x22cf0223,
+ 0x9534bd00,
+ 0x07f10825,
+ 0x03f0c000,
+ 0x0005d001,
+ 0x07f104bd,
+ 0x03f0c100,
+ 0x0005d001,
+ 0x0e9804bd,
+ 0x010f9800,
+ 0x015021f5,
+ 0xbb002fbb,
+ 0x0e98003f,
+ 0x020f9801,
+ 0x015021f5,
+ 0xfd050e98,
0x2ebb00ef,
0x003ebb00,
- 0x130040b7,
- 0xd00235b6,
- 0x25b60043,
- 0x0635b608,
- 0xb60120b6,
- 0x24b60130,
- 0x0834b608,
- 0xf5022fb9,
- 0xbb027121,
- 0x07f1003f,
- 0x03f00100,
- 0x0003d002,
- 0x24bd04bd,
- 0xf11f29f0,
- 0xf0080007,
- 0x02d00203,
-/* 0x0433: main */
+ 0x98020e98,
+ 0x21f5030f,
+ 0x0e980150,
+ 0x00effd07,
+ 0xbb002ebb,
+ 0x35b6003e,
+ 0x0007f102,
+ 0x0103f0d3,
+ 0xbd0003d0,
+ 0x0825b604,
+ 0xb60635b6,
+ 0x30b60120,
+ 0x0824b601,
+ 0xb90834b6,
+ 0x21f5022f,
+ 0x3fbb02d3,
+ 0x0007f100,
+ 0x0203f001,
+ 0xbd0003d0,
+ 0xf024bd04,
+ 0x07f11f29,
+ 0x03f00800,
+ 0x0002d002,
+/* 0x04e2: main */
+ 0x31f404bd,
+ 0x0028f400,
+ 0xf424d7f0,
+ 0x01f43921,
+ 0x04e4b0f4,
+ 0xfe1e18f4,
+ 0x27f00181,
+ 0xfd20bd06,
+ 0xe4b60412,
+ 0x051efd01,
+ 0xf50018fe,
+ 0xf405d721,
+/* 0x0512: main_not_ctx_xfer */
+ 0xef94d30e,
+ 0x01f5f010,
+ 0x037e21f5,
+/* 0x051f: ih */
+ 0xf9c60ef4,
+ 0x0188fe80,
+ 0x90f980f9,
+ 0xb0f9a0f9,
+ 0xe0f9d0f9,
+ 0x04bdf0f9,
+ 0x0200a7f1,
+ 0xcf00a3f0,
+ 0xabc400aa,
+ 0x2c0bf404,
+ 0xf124d7f0,
+ 0xf01a00e7,
+ 0xeecf00e3,
+ 0x00f7f100,
+ 0x00f3f019,
+ 0xf400ffcf,
+ 0xe7f00421,
+ 0x0007f101,
+ 0x0003f01d,
+ 0xbd000ed0,
+/* 0x056d: ih_no_fifo */
+ 0x0007f104,
+ 0x0003f001,
+ 0xbd000ad0,
+ 0xfcf0fc04,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0xf80032f4,
+/* 0x0591: hub_barrier_done */
+ 0x01f7f001,
+ 0xbb040e98,
+ 0xffb904fe,
+ 0x18e7f102,
+ 0x40e3f094,
+ 0xf89d21f4,
+/* 0x05a9: ctx_redswitch */
+ 0x20f7f000,
+ 0x850007f1,
+ 0xd00103f0,
+ 0x04bd000f,
+/* 0x05bb: ctx_redswitch_delay */
+ 0xb608e7f0,
+ 0x1bf401e2,
+ 0x00f5f1fd,
+ 0x00f5f108,
+ 0x0007f102,
+ 0x0103f085,
+ 0xbd000fd0,
+/* 0x05d7: ctx_xfer */
+ 0xf100f804,
+ 0xf0810007,
+ 0x0fd00203,
0xf404bd00,
- 0x28f40031,
- 0x24d7f000,
- 0xf43921f4,
- 0xe4b0f401,
- 0x1e18f404,
- 0xf00181fe,
- 0x20bd0627,
- 0xb60412fd,
- 0x1efd01e4,
- 0x0018fe05,
- 0x04f721f5,
-/* 0x0463: main_not_ctx_xfer */
- 0x94d30ef4,
- 0xf5f010ef,
- 0xfe21f501,
- 0xc60ef402,
-/* 0x0470: ih */
- 0x88fe80f9,
- 0xf980f901,
- 0xf9a0f990,
- 0xf9d0f9b0,
- 0xbdf0f9e0,
- 0x800acf04,
- 0xf404abc4,
- 0xb7f11d0b,
- 0xd7f01900,
- 0x40becf24,
- 0xf400bfcf,
- 0xb0b70421,
- 0xe7f00400,
- 0x00bed001,
-/* 0x04a8: ih_no_fifo */
- 0xfc400ad0,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
-/* 0x04c3: hub_barrier_done */
- 0xf001f800,
- 0x0e9801f7,
- 0x04febb04,
- 0x9418e7f1,
- 0xf440e3f0,
- 0x00f88d21,
-/* 0x04d8: ctx_redswitch */
- 0x0614e7f1,
- 0xf006e4b6,
- 0xefd020f7,
- 0x08f7f000,
-/* 0x04e8: ctx_redswitch_delay */
- 0xf401f2b6,
- 0xf7f1fd1b,
- 0xefd00a20,
-/* 0x04f7: ctx_xfer */
- 0xf100f800,
- 0xb60a0417,
- 0x1fd00614,
- 0x0711f400,
- 0x04d821f5,
-/* 0x0508: ctx_xfer_not_load */
- 0x4afc17f1,
- 0xf00213f0,
- 0x12d00c27,
- 0x1521f500,
- 0xfc27f102,
- 0x0223f047,
- 0xf00020d0,
- 0x20b6012c,
- 0x0012d003,
- 0xf001acf0,
- 0xb7f002a5,
- 0x50b3f000,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x000c9800,
- 0xf0010d98,
- 0x21f500e7,
- 0xacf00166,
- 0x00b7f101,
- 0x50b3f040,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x010c9800,
- 0x98020d98,
- 0xe7f1060f,
- 0x21f50800,
- 0xacf00166,
- 0x04a5f001,
- 0x3000b7f1,
+ 0x21f50711,
+/* 0x05ea: ctx_xfer_not_load */
+ 0x21f505a9,
+ 0x24bd026a,
+ 0x47fc07f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xb6012cf0,
+ 0x07f10320,
+ 0x03f04afc,
+ 0x0002d002,
+ 0xacf004bd,
+ 0x02a5f001,
+ 0x0000b7f1,
0x9850b3f0,
0xc4b6040c,
0x00bcbb0f,
- 0x98020c98,
- 0x0f98030d,
- 0x00e7f108,
- 0x6621f502,
- 0x1521f501,
- 0x0601f402,
-/* 0x05a3: ctx_xfer_post */
- 0xf11412f4,
- 0xf04afc17,
- 0x27f00213,
- 0x0012d00d,
- 0x021521f5,
-/* 0x05b4: ctx_xfer_done */
- 0x04c321f5,
- 0x000000f8,
+ 0x98000c98,
+ 0xe7f0010d,
+ 0x6f21f500,
+ 0x01acf001,
+ 0x4000b7f1,
+ 0x9850b3f0,
+ 0xc4b6040c,
+ 0x00bcbb0f,
+ 0x98010c98,
+ 0x0f98020d,
+ 0x00e7f106,
+ 0x6f21f508,
+ 0x01acf001,
+ 0xf104a5f0,
+ 0xf03000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98020c,
+ 0x080f9803,
+ 0x0200e7f1,
+ 0x016f21f5,
+ 0x025e21f5,
+ 0xf40601f4,
+/* 0x0686: ctx_xfer_post */
+ 0x21f50712,
+/* 0x068a: ctx_xfer_done */
+ 0x21f5027f,
+ 0x00f80591,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
index 7ff5ef6b0804..b6da800ee9c2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
@@ -41,14 +41,14 @@ uint32_t nve0_grgpc_data[] = {
};
uint32_t nve0_grgpc_code[] = {
- 0x03180ef5,
+ 0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
- 0x00f802fe,
+ 0x00f8037e,
/* 0x001c: queue_put_next */
0xb60798c4,
0x8dbb0384,
@@ -72,184 +72,214 @@ uint32_t nve0_grgpc_code[] = {
/* 0x0066: queue_get_done */
0x00f80132,
/* 0x0068: nv_rd32 */
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010921f5,
- 0xf840bfcf,
-/* 0x008d: nv_wr32 */
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
-/* 0x00bd: watchdog_clear */
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
-/* 0x00c9: wait_donez */
- 0xf094bd00,
- 0x07f10099,
- 0x03f00f00,
- 0x0009d002,
- 0x07f104bd,
- 0x03f00600,
- 0x000ad002,
-/* 0x00e6: wait_donez_ne */
- 0x87f104bd,
- 0x83f00000,
- 0x0088cf01,
- 0xf4888aff,
- 0x94bdf31b,
- 0xf10099f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0109: wait_doneo */
- 0xf094bd00,
+ 0xf002ecb9,
+ 0x07f11fc9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x007a: nv_rd32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0xa7f0f31b,
+ 0x1021f506,
+ 0x00f7f101,
+ 0x01f3f0cb,
+ 0xf800ffcf,
+/* 0x009d: nv_wr32 */
+ 0x0007f100,
+ 0x0103f0cc,
+ 0xbd000fd0,
+ 0x02ecb904,
+ 0xf01fc9f0,
+ 0x07f11ec9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x00be: nv_wr32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f31b,
+/* 0x00d0: wait_donez */
+ 0x99f094bd,
+ 0x0007f100,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x1bf4888a,
+ 0xf094bdf3,
0x07f10099,
- 0x03f00f00,
+ 0x03f01700,
0x0009d002,
- 0x87f104bd,
- 0x84b60818,
- 0x008ad006,
-/* 0x0124: wait_doneo_e */
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
+ 0x00f804bd,
+/* 0x0110: wait_doneo */
0x99f094bd,
0x0007f100,
- 0x0203f017,
+ 0x0203f00f,
0xbd0009d0,
-/* 0x0147: mmctx_size */
- 0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
- 0x00e89894,
- 0xb61a85b6,
- 0x84b60180,
- 0x0098bb02,
- 0xb804e0b6,
- 0x1bf404ef,
- 0x029fb9eb,
-/* 0x0166: mmctx_xfer */
- 0x94bd00f8,
- 0xf10199f0,
- 0xf00f0007,
- 0x09d00203,
- 0xf104bd00,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
-/* 0x018c: mmctx_base_disabled */
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
- 0xb70199f0,
- 0xc8010080,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x0bf4888a,
+ 0xf094bdf3,
+ 0x07f10099,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0xf404efb8,
+ 0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+ 0xbd00f802,
+ 0x0199f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0xbbfd94bd,
+ 0x120bf405,
+ 0xc40007f1,
+ 0xd00103f0,
+ 0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0x0007f11e,
+ 0x0103f0c6,
+ 0xbd000ed0,
+ 0x0007f104,
+ 0x0103f0c7,
+ 0xbd000fd0,
+ 0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x01aec80c,
+ 0xfd11e4b6,
+ 0x07f105be,
+ 0x03f0c500,
+ 0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+ 0xe7f104bd,
+ 0xe3f0c500,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f30b,
+ 0x05e9fd00,
+ 0xc80007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+ 0xb804c0b6,
+ 0x1bf404cd,
+ 0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+ 0xf11f1bf4,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x1fb4f000,
+ 0xf410b4b0,
+ 0xa7f0f01b,
+ 0xd021f402,
+/* 0x0223: mmctx_stop */
+ 0xc82b0ef4,
0xb4b600ab,
0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
-/* 0x01ea: mmctx_stop */
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
- 0x008bcf00,
- 0xf412bbc8,
-/* 0x0202: mmctx_done */
- 0x94bdfa1b,
- 0xf10199f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0215: strand_wait */
- 0xf0a0f900,
- 0x21f402a7,
- 0xf8a0fcc9,
-/* 0x0221: strand_pre */
- 0xfc87f100,
- 0x0283f04a,
- 0xd00c97f0,
- 0x21f50089,
- 0x00f80215,
-/* 0x0234: strand_post */
- 0x4afc87f1,
- 0xf00283f0,
- 0x89d00d97,
- 0x1521f500,
-/* 0x0247: strand_set */
- 0xf100f802,
- 0xf04ffca7,
- 0xaba202a3,
- 0xc7f00500,
- 0x00acd00f,
- 0xd00bc7f0,
- 0x21f500bc,
- 0xaed00215,
- 0x0ac7f000,
- 0xf500bcd0,
- 0xf8021521,
-/* 0x0271: strand_ctx_init */
- 0xf094bd00,
- 0x07f10399,
- 0x03f00f00,
+ 0xf112b9f0,
+ 0xf0c50007,
+ 0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+ 0xf104bd00,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x12bbc800,
+/* 0x024b: mmctx_done */
+ 0xbdf31bf4,
+ 0x0199f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x025e: strand_wait */
+ 0xa0f900f8,
+ 0xf402a7f0,
+ 0xa0fcd021,
+/* 0x026a: strand_pre */
+ 0x97f000f8,
+ 0xfc07f10c,
+ 0x0203f04a,
+ 0xbd0009d0,
+ 0x5e21f504,
+/* 0x027f: strand_post */
+ 0xf000f802,
+ 0x07f10d97,
+ 0x03f04afc,
0x0009d002,
0x21f504bd,
- 0xe7f00221,
- 0x4721f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x1521f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x1521f500,
- 0x3421f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+ 0x00f8025e,
+/* 0x0294: strand_set */
+ 0xf10fc7f0,
+ 0xf04ffc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f10bc7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x07f104bd,
+ 0x03f04ffc,
+ 0x000ed002,
+ 0xc7f004bd,
+ 0xfc07f10a,
+ 0x0203f04a,
+ 0xbd000cd0,
+ 0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+ 0xbd00f802,
+ 0x0399f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0x026a21f5,
+ 0xf503e7f0,
+ 0xbd029421,
+ 0xfc07f1c4,
+ 0x0203f047,
+ 0xbd000cd0,
+ 0x01c7f004,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd000c,
+ 0x025e21f5,
+ 0xf1010c92,
+ 0xf046fc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f102c7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x21f504bd,
+ 0x21f5025e,
+ 0x87f1027f,
+ 0x83f04200,
+ 0x0097f102,
+ 0x0293f020,
+ 0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
0x8ed008fe,
0x408ed000,
0xb6808acf,
@@ -263,198 +293,230 @@ uint32_t nve0_grgpc_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
0xe0f900f8,
- 0x9814e7f1,
- 0xf440e3f0,
- 0xe0b78d21,
- 0xf7f0041c,
- 0x8d21f401,
- 0x00f8e0fc,
-/* 0x0318: init */
- 0x04fe04bd,
- 0x0017f100,
- 0x0227f012,
- 0xf10012d0,
- 0xfe047017,
- 0x17f10010,
- 0x10d00400,
- 0x0427f0c0,
- 0xf40012d0,
- 0x17f11031,
- 0x14b60608,
- 0x0012cf06,
+ 0xf102ffb9,
+ 0xf09814e7,
+ 0x21f440e3,
+ 0x01f7f09d,
+ 0xf102ffb9,
+ 0xf09c1ce7,
+ 0x21f440e3,
+ 0xf8e0fc9d,
+/* 0x03a1: init */
+ 0xfe04bd00,
+ 0x27f00004,
+ 0x0007f102,
+ 0x0003f012,
+ 0xbd0002d0,
+ 0x1f17f104,
+ 0x0010fe05,
+ 0x070007f1,
+ 0xd00003f0,
+ 0x04bd0000,
+ 0xf10427f0,
+ 0xf0040007,
+ 0x02d00003,
+ 0xf404bd00,
+ 0x27f11031,
+ 0x23f08200,
+ 0x0022cf01,
0xf00137f0,
0x32bb1f24,
0x0132b604,
0x80050280,
- 0x10b70603,
- 0x12cf0400,
- 0x04028000,
- 0x0c30e7f1,
- 0xbd50e3f0,
- 0xbd34bd24,
-/* 0x0371: init_unk_loop */
- 0x6821f444,
- 0xf400f6b0,
- 0xf7f00f0b,
- 0x04f2bb01,
- 0xb6054ffd,
-/* 0x0386: init_unk_next */
- 0x20b60130,
- 0x04e0b601,
- 0xf40126b0,
-/* 0x0392: init_unk_done */
- 0x0380e21b,
- 0x08048007,
- 0x010027f1,
- 0xcf0223f0,
- 0x34bd0022,
- 0x070047f1,
- 0x950644b6,
- 0x45d00825,
- 0x4045d000,
- 0x98000e98,
- 0x21f5010f,
- 0x2fbb0147,
- 0x003fbb00,
- 0x98010e98,
- 0x21f5020f,
- 0x0e980147,
- 0x00effd05,
- 0xbb002ebb,
- 0x0e98003e,
- 0x030f9802,
- 0x014721f5,
- 0xfd070e98,
+ 0x27f10603,
+ 0x23f08600,
+ 0x0022cf01,
+ 0xf1040280,
+ 0xf00c30e7,
+ 0x24bd50e3,
+ 0x44bd34bd,
+/* 0x0410: init_unk_loop */
+ 0xb06821f4,
+ 0x0bf400f6,
+ 0x01f7f00f,
+ 0xfd04f2bb,
+ 0x30b6054f,
+/* 0x0425: init_unk_next */
+ 0x0120b601,
+ 0xb004e0b6,
+ 0x1bf40126,
+/* 0x0431: init_unk_done */
+ 0x070380e2,
+ 0xf1080480,
+ 0xf0010027,
+ 0x22cf0223,
+ 0x9534bd00,
+ 0x07f10825,
+ 0x03f0c000,
+ 0x0005d001,
+ 0x07f104bd,
+ 0x03f0c100,
+ 0x0005d001,
+ 0x0e9804bd,
+ 0x010f9800,
+ 0x015021f5,
+ 0xbb002fbb,
+ 0x0e98003f,
+ 0x020f9801,
+ 0x015021f5,
+ 0xfd050e98,
0x2ebb00ef,
0x003ebb00,
- 0x130040b7,
- 0xd00235b6,
- 0x25b60043,
- 0x0635b608,
- 0xb60120b6,
- 0x24b60130,
- 0x0834b608,
- 0xf5022fb9,
- 0xbb027121,
- 0x07f1003f,
- 0x03f00100,
- 0x0003d002,
- 0x24bd04bd,
- 0xf11f29f0,
- 0xf0080007,
- 0x02d00203,
-/* 0x0433: main */
+ 0x98020e98,
+ 0x21f5030f,
+ 0x0e980150,
+ 0x00effd07,
+ 0xbb002ebb,
+ 0x35b6003e,
+ 0x0007f102,
+ 0x0103f0d3,
+ 0xbd0003d0,
+ 0x0825b604,
+ 0xb60635b6,
+ 0x30b60120,
+ 0x0824b601,
+ 0xb90834b6,
+ 0x21f5022f,
+ 0x3fbb02d3,
+ 0x0007f100,
+ 0x0203f001,
+ 0xbd0003d0,
+ 0xf024bd04,
+ 0x07f11f29,
+ 0x03f00800,
+ 0x0002d002,
+/* 0x04e2: main */
+ 0x31f404bd,
+ 0x0028f400,
+ 0xf424d7f0,
+ 0x01f43921,
+ 0x04e4b0f4,
+ 0xfe1e18f4,
+ 0x27f00181,
+ 0xfd20bd06,
+ 0xe4b60412,
+ 0x051efd01,
+ 0xf50018fe,
+ 0xf405d721,
+/* 0x0512: main_not_ctx_xfer */
+ 0xef94d30e,
+ 0x01f5f010,
+ 0x037e21f5,
+/* 0x051f: ih */
+ 0xf9c60ef4,
+ 0x0188fe80,
+ 0x90f980f9,
+ 0xb0f9a0f9,
+ 0xe0f9d0f9,
+ 0x04bdf0f9,
+ 0x0200a7f1,
+ 0xcf00a3f0,
+ 0xabc400aa,
+ 0x2c0bf404,
+ 0xf124d7f0,
+ 0xf01a00e7,
+ 0xeecf00e3,
+ 0x00f7f100,
+ 0x00f3f019,
+ 0xf400ffcf,
+ 0xe7f00421,
+ 0x0007f101,
+ 0x0003f01d,
+ 0xbd000ed0,
+/* 0x056d: ih_no_fifo */
+ 0x0007f104,
+ 0x0003f001,
+ 0xbd000ad0,
+ 0xfcf0fc04,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0xf80032f4,
+/* 0x0591: hub_barrier_done */
+ 0x01f7f001,
+ 0xbb040e98,
+ 0xffb904fe,
+ 0x18e7f102,
+ 0x40e3f094,
+ 0xf89d21f4,
+/* 0x05a9: ctx_redswitch */
+ 0x20f7f000,
+ 0x850007f1,
+ 0xd00103f0,
+ 0x04bd000f,
+/* 0x05bb: ctx_redswitch_delay */
+ 0xb608e7f0,
+ 0x1bf401e2,
+ 0x00f5f1fd,
+ 0x00f5f108,
+ 0x0007f102,
+ 0x0103f085,
+ 0xbd000fd0,
+/* 0x05d7: ctx_xfer */
+ 0xf100f804,
+ 0xf0810007,
+ 0x0fd00203,
0xf404bd00,
- 0x28f40031,
- 0x24d7f000,
- 0xf43921f4,
- 0xe4b0f401,
- 0x1e18f404,
- 0xf00181fe,
- 0x20bd0627,
- 0xb60412fd,
- 0x1efd01e4,
- 0x0018fe05,
- 0x04f721f5,
-/* 0x0463: main_not_ctx_xfer */
- 0x94d30ef4,
- 0xf5f010ef,
- 0xfe21f501,
- 0xc60ef402,
-/* 0x0470: ih */
- 0x88fe80f9,
- 0xf980f901,
- 0xf9a0f990,
- 0xf9d0f9b0,
- 0xbdf0f9e0,
- 0x800acf04,
- 0xf404abc4,
- 0xb7f11d0b,
- 0xd7f01900,
- 0x40becf24,
- 0xf400bfcf,
- 0xb0b70421,
- 0xe7f00400,
- 0x00bed001,
-/* 0x04a8: ih_no_fifo */
- 0xfc400ad0,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
-/* 0x04c3: hub_barrier_done */
- 0xf001f800,
- 0x0e9801f7,
- 0x04febb04,
- 0x9418e7f1,
- 0xf440e3f0,
- 0x00f88d21,
-/* 0x04d8: ctx_redswitch */
- 0x0614e7f1,
- 0xf006e4b6,
- 0xefd020f7,
- 0x08f7f000,
-/* 0x04e8: ctx_redswitch_delay */
- 0xf401f2b6,
- 0xf7f1fd1b,
- 0xefd00a20,
-/* 0x04f7: ctx_xfer */
- 0xf100f800,
- 0xb60a0417,
- 0x1fd00614,
- 0x0711f400,
- 0x04d821f5,
-/* 0x0508: ctx_xfer_not_load */
- 0x4afc17f1,
- 0xf00213f0,
- 0x12d00c27,
- 0x1521f500,
- 0xfc27f102,
- 0x0223f047,
- 0xf00020d0,
- 0x20b6012c,
- 0x0012d003,
- 0xf001acf0,
- 0xb7f002a5,
- 0x50b3f000,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x000c9800,
- 0xf0010d98,
- 0x21f500e7,
- 0xacf00166,
- 0x00b7f101,
- 0x50b3f040,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x010c9800,
- 0x98020d98,
- 0xe7f1060f,
- 0x21f50800,
- 0xacf00166,
- 0x04a5f001,
- 0x3000b7f1,
+ 0x21f50711,
+/* 0x05ea: ctx_xfer_not_load */
+ 0x21f505a9,
+ 0x24bd026a,
+ 0x47fc07f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xb6012cf0,
+ 0x07f10320,
+ 0x03f04afc,
+ 0x0002d002,
+ 0xacf004bd,
+ 0x02a5f001,
+ 0x0000b7f1,
0x9850b3f0,
0xc4b6040c,
0x00bcbb0f,
- 0x98020c98,
- 0x0f98030d,
- 0x00e7f108,
- 0x6621f502,
- 0x1521f501,
- 0x0601f402,
-/* 0x05a3: ctx_xfer_post */
- 0xf11412f4,
- 0xf04afc17,
- 0x27f00213,
- 0x0012d00d,
- 0x021521f5,
-/* 0x05b4: ctx_xfer_done */
- 0x04c321f5,
- 0x000000f8,
+ 0x98000c98,
+ 0xe7f0010d,
+ 0x6f21f500,
+ 0x01acf001,
+ 0x4000b7f1,
+ 0x9850b3f0,
+ 0xc4b6040c,
+ 0x00bcbb0f,
+ 0x98010c98,
+ 0x0f98020d,
+ 0x00e7f106,
+ 0x6f21f508,
+ 0x01acf001,
+ 0xf104a5f0,
+ 0xf03000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98020c,
+ 0x080f9803,
+ 0x0200e7f1,
+ 0x016f21f5,
+ 0x025e21f5,
+ 0xf40601f4,
+/* 0x0686: ctx_xfer_post */
+ 0x21f50712,
+/* 0x068a: ctx_xfer_done */
+ 0x21f5027f,
+ 0x00f80591,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h
index f870507be880..6316ebaf5d9a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h
@@ -41,14 +41,14 @@ uint32_t nvf0_grgpc_data[] = {
};
uint32_t nvf0_grgpc_code[] = {
- 0x03180ef5,
+ 0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
- 0x00f802fe,
+ 0x00f8037e,
/* 0x001c: queue_put_next */
0xb60798c4,
0x8dbb0384,
@@ -72,184 +72,214 @@ uint32_t nvf0_grgpc_code[] = {
/* 0x0066: queue_get_done */
0x00f80132,
/* 0x0068: nv_rd32 */
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010921f5,
- 0xf840bfcf,
-/* 0x008d: nv_wr32 */
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
-/* 0x00bd: watchdog_clear */
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
-/* 0x00c9: wait_donez */
- 0xf094bd00,
- 0x07f10099,
- 0x03f03700,
- 0x0009d002,
- 0x07f104bd,
- 0x03f00600,
- 0x000ad002,
-/* 0x00e6: wait_donez_ne */
- 0x87f104bd,
- 0x83f00000,
- 0x0088cf01,
- 0xf4888aff,
- 0x94bdf31b,
- 0xf10099f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0109: wait_doneo */
- 0xf094bd00,
+ 0xf002ecb9,
+ 0x07f11fc9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x007a: nv_rd32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0xa7f0f31b,
+ 0x1021f506,
+ 0x00f7f101,
+ 0x01f3f0cb,
+ 0xf800ffcf,
+/* 0x009d: nv_wr32 */
+ 0x0007f100,
+ 0x0103f0cc,
+ 0xbd000fd0,
+ 0x02ecb904,
+ 0xf01fc9f0,
+ 0x07f11ec9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x00be: nv_wr32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f31b,
+/* 0x00d0: wait_donez */
+ 0x99f094bd,
+ 0x0007f100,
+ 0x0203f037,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x1bf4888a,
+ 0xf094bdf3,
0x07f10099,
- 0x03f03700,
+ 0x03f01700,
0x0009d002,
- 0x87f104bd,
- 0x84b60818,
- 0x008ad006,
-/* 0x0124: wait_doneo_e */
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
+ 0x00f804bd,
+/* 0x0110: wait_doneo */
0x99f094bd,
0x0007f100,
- 0x0203f017,
+ 0x0203f037,
0xbd0009d0,
-/* 0x0147: mmctx_size */
- 0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
- 0x00e89894,
- 0xb61a85b6,
- 0x84b60180,
- 0x0098bb02,
- 0xb804e0b6,
- 0x1bf404ef,
- 0x029fb9eb,
-/* 0x0166: mmctx_xfer */
- 0x94bd00f8,
- 0xf10199f0,
- 0xf0370007,
- 0x09d00203,
- 0xf104bd00,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
-/* 0x018c: mmctx_base_disabled */
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
- 0xb70199f0,
- 0xc8010080,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x0bf4888a,
+ 0xf094bdf3,
+ 0x07f10099,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0xf404efb8,
+ 0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+ 0xbd00f802,
+ 0x0199f094,
+ 0x370007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0xbbfd94bd,
+ 0x120bf405,
+ 0xc40007f1,
+ 0xd00103f0,
+ 0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0x0007f11e,
+ 0x0103f0c6,
+ 0xbd000ed0,
+ 0x0007f104,
+ 0x0103f0c7,
+ 0xbd000fd0,
+ 0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x01aec80c,
+ 0xfd11e4b6,
+ 0x07f105be,
+ 0x03f0c500,
+ 0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+ 0xe7f104bd,
+ 0xe3f0c500,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f30b,
+ 0x05e9fd00,
+ 0xc80007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+ 0xb804c0b6,
+ 0x1bf404cd,
+ 0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+ 0xf11f1bf4,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x1fb4f000,
+ 0xf410b4b0,
+ 0xa7f0f01b,
+ 0xd021f402,
+/* 0x0223: mmctx_stop */
+ 0xc82b0ef4,
0xb4b600ab,
0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
-/* 0x01ea: mmctx_stop */
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
- 0x008bcf00,
- 0xf412bbc8,
-/* 0x0202: mmctx_done */
- 0x94bdfa1b,
- 0xf10199f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0215: strand_wait */
- 0xf0a0f900,
- 0x21f402a7,
- 0xf8a0fcc9,
-/* 0x0221: strand_pre */
- 0xfc87f100,
- 0x0283f04a,
- 0xd00c97f0,
- 0x21f50089,
- 0x00f80215,
-/* 0x0234: strand_post */
- 0x4afc87f1,
- 0xf00283f0,
- 0x89d00d97,
- 0x1521f500,
-/* 0x0247: strand_set */
- 0xf100f802,
- 0xf04ffca7,
- 0xaba202a3,
- 0xc7f00500,
- 0x00acd00f,
- 0xd00bc7f0,
- 0x21f500bc,
- 0xaed00215,
- 0x0ac7f000,
- 0xf500bcd0,
- 0xf8021521,
-/* 0x0271: strand_ctx_init */
- 0xf094bd00,
- 0x07f10399,
- 0x03f03700,
+ 0xf112b9f0,
+ 0xf0c50007,
+ 0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+ 0xf104bd00,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x12bbc800,
+/* 0x024b: mmctx_done */
+ 0xbdf31bf4,
+ 0x0199f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x025e: strand_wait */
+ 0xa0f900f8,
+ 0xf402a7f0,
+ 0xa0fcd021,
+/* 0x026a: strand_pre */
+ 0x97f000f8,
+ 0xfc07f10c,
+ 0x0203f04a,
+ 0xbd0009d0,
+ 0x5e21f504,
+/* 0x027f: strand_post */
+ 0xf000f802,
+ 0x07f10d97,
+ 0x03f04afc,
0x0009d002,
0x21f504bd,
- 0xe7f00221,
- 0x4721f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x1521f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x1521f500,
- 0x3421f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+ 0x00f8025e,
+/* 0x0294: strand_set */
+ 0xf10fc7f0,
+ 0xf04ffc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f10bc7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x07f104bd,
+ 0x03f04ffc,
+ 0x000ed002,
+ 0xc7f004bd,
+ 0xfc07f10a,
+ 0x0203f04a,
+ 0xbd000cd0,
+ 0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+ 0xbd00f802,
+ 0x0399f094,
+ 0x370007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0x026a21f5,
+ 0xf503e7f0,
+ 0xbd029421,
+ 0xfc07f1c4,
+ 0x0203f047,
+ 0xbd000cd0,
+ 0x01c7f004,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd000c,
+ 0x025e21f5,
+ 0xf1010c92,
+ 0xf046fc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f102c7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x21f504bd,
+ 0x21f5025e,
+ 0x87f1027f,
+ 0x83f04200,
+ 0x0097f102,
+ 0x0293f020,
+ 0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
0x8ed008fe,
0x408ed000,
0xb6808acf,
@@ -263,198 +293,230 @@ uint32_t nvf0_grgpc_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
0xe0f900f8,
- 0x9814e7f1,
- 0xf440e3f0,
- 0xe0b78d21,
- 0xf7f0041c,
- 0x8d21f401,
- 0x00f8e0fc,
-/* 0x0318: init */
- 0x04fe04bd,
- 0x0017f100,
- 0x0227f012,
- 0xf10012d0,
- 0xfe047017,
- 0x17f10010,
- 0x10d00400,
- 0x0427f0c0,
- 0xf40012d0,
- 0x17f11031,
- 0x14b60608,
- 0x0012cf06,
+ 0xf102ffb9,
+ 0xf09814e7,
+ 0x21f440e3,
+ 0x01f7f09d,
+ 0xf102ffb9,
+ 0xf09c1ce7,
+ 0x21f440e3,
+ 0xf8e0fc9d,
+/* 0x03a1: init */
+ 0xfe04bd00,
+ 0x27f00004,
+ 0x0007f102,
+ 0x0003f012,
+ 0xbd0002d0,
+ 0x1f17f104,
+ 0x0010fe05,
+ 0x070007f1,
+ 0xd00003f0,
+ 0x04bd0000,
+ 0xf10427f0,
+ 0xf0040007,
+ 0x02d00003,
+ 0xf404bd00,
+ 0x27f11031,
+ 0x23f08200,
+ 0x0022cf01,
0xf00137f0,
0x32bb1f24,
0x0132b604,
0x80050280,
- 0x10b70603,
- 0x12cf0400,
- 0x04028000,
- 0x0c30e7f1,
- 0xbd50e3f0,
- 0xbd34bd24,
-/* 0x0371: init_unk_loop */
- 0x6821f444,
- 0xf400f6b0,
- 0xf7f00f0b,
- 0x04f2bb01,
- 0xb6054ffd,
-/* 0x0386: init_unk_next */
- 0x20b60130,
- 0x04e0b601,
- 0xf40226b0,
-/* 0x0392: init_unk_done */
- 0x0380e21b,
- 0x08048007,
- 0x010027f1,
- 0xcf0223f0,
- 0x34bd0022,
- 0x070047f1,
- 0x950644b6,
- 0x45d00825,
- 0x4045d000,
- 0x98000e98,
- 0x21f5010f,
- 0x2fbb0147,
- 0x003fbb00,
- 0x98010e98,
- 0x21f5020f,
- 0x0e980147,
- 0x00effd05,
- 0xbb002ebb,
- 0x0e98003e,
- 0x030f9802,
- 0x014721f5,
- 0xfd070e98,
+ 0x27f10603,
+ 0x23f08600,
+ 0x0022cf01,
+ 0xf1040280,
+ 0xf00c30e7,
+ 0x24bd50e3,
+ 0x44bd34bd,
+/* 0x0410: init_unk_loop */
+ 0xb06821f4,
+ 0x0bf400f6,
+ 0x01f7f00f,
+ 0xfd04f2bb,
+ 0x30b6054f,
+/* 0x0425: init_unk_next */
+ 0x0120b601,
+ 0xb004e0b6,
+ 0x1bf40226,
+/* 0x0431: init_unk_done */
+ 0x070380e2,
+ 0xf1080480,
+ 0xf0010027,
+ 0x22cf0223,
+ 0x9534bd00,
+ 0x07f10825,
+ 0x03f0c000,
+ 0x0005d001,
+ 0x07f104bd,
+ 0x03f0c100,
+ 0x0005d001,
+ 0x0e9804bd,
+ 0x010f9800,
+ 0x015021f5,
+ 0xbb002fbb,
+ 0x0e98003f,
+ 0x020f9801,
+ 0x015021f5,
+ 0xfd050e98,
0x2ebb00ef,
0x003ebb00,
- 0x130040b7,
- 0xd00235b6,
- 0x25b60043,
- 0x0635b608,
- 0xb60120b6,
- 0x24b60130,
- 0x0834b608,
- 0xf5022fb9,
- 0xbb027121,
- 0x07f1003f,
- 0x03f00100,
- 0x0003d002,
- 0x24bd04bd,
- 0xf11f29f0,
- 0xf0300007,
- 0x02d00203,
-/* 0x0433: main */
+ 0x98020e98,
+ 0x21f5030f,
+ 0x0e980150,
+ 0x00effd07,
+ 0xbb002ebb,
+ 0x35b6003e,
+ 0x0007f102,
+ 0x0103f0d3,
+ 0xbd0003d0,
+ 0x0825b604,
+ 0xb60635b6,
+ 0x30b60120,
+ 0x0824b601,
+ 0xb90834b6,
+ 0x21f5022f,
+ 0x3fbb02d3,
+ 0x0007f100,
+ 0x0203f001,
+ 0xbd0003d0,
+ 0xf024bd04,
+ 0x07f11f29,
+ 0x03f03000,
+ 0x0002d002,
+/* 0x04e2: main */
+ 0x31f404bd,
+ 0x0028f400,
+ 0xf424d7f0,
+ 0x01f43921,
+ 0x04e4b0f4,
+ 0xfe1e18f4,
+ 0x27f00181,
+ 0xfd20bd06,
+ 0xe4b60412,
+ 0x051efd01,
+ 0xf50018fe,
+ 0xf405d721,
+/* 0x0512: main_not_ctx_xfer */
+ 0xef94d30e,
+ 0x01f5f010,
+ 0x037e21f5,
+/* 0x051f: ih */
+ 0xf9c60ef4,
+ 0x0188fe80,
+ 0x90f980f9,
+ 0xb0f9a0f9,
+ 0xe0f9d0f9,
+ 0x04bdf0f9,
+ 0x0200a7f1,
+ 0xcf00a3f0,
+ 0xabc400aa,
+ 0x2c0bf404,
+ 0xf124d7f0,
+ 0xf01a00e7,
+ 0xeecf00e3,
+ 0x00f7f100,
+ 0x00f3f019,
+ 0xf400ffcf,
+ 0xe7f00421,
+ 0x0007f101,
+ 0x0003f01d,
+ 0xbd000ed0,
+/* 0x056d: ih_no_fifo */
+ 0x0007f104,
+ 0x0003f001,
+ 0xbd000ad0,
+ 0xfcf0fc04,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0xf80032f4,
+/* 0x0591: hub_barrier_done */
+ 0x01f7f001,
+ 0xbb040e98,
+ 0xffb904fe,
+ 0x18e7f102,
+ 0x40e3f094,
+ 0xf89d21f4,
+/* 0x05a9: ctx_redswitch */
+ 0x20f7f000,
+ 0x850007f1,
+ 0xd00103f0,
+ 0x04bd000f,
+/* 0x05bb: ctx_redswitch_delay */
+ 0xb608e7f0,
+ 0x1bf401e2,
+ 0x00f5f1fd,
+ 0x00f5f108,
+ 0x0007f102,
+ 0x0103f085,
+ 0xbd000fd0,
+/* 0x05d7: ctx_xfer */
+ 0xf100f804,
+ 0xf0810007,
+ 0x0fd00203,
0xf404bd00,
- 0x28f40031,
- 0x24d7f000,
- 0xf43921f4,
- 0xe4b0f401,
- 0x1e18f404,
- 0xf00181fe,
- 0x20bd0627,
- 0xb60412fd,
- 0x1efd01e4,
- 0x0018fe05,
- 0x04f721f5,
-/* 0x0463: main_not_ctx_xfer */
- 0x94d30ef4,
- 0xf5f010ef,
- 0xfe21f501,
- 0xc60ef402,
-/* 0x0470: ih */
- 0x88fe80f9,
- 0xf980f901,
- 0xf9a0f990,
- 0xf9d0f9b0,
- 0xbdf0f9e0,
- 0x800acf04,
- 0xf404abc4,
- 0xb7f11d0b,
- 0xd7f01900,
- 0x40becf24,
- 0xf400bfcf,
- 0xb0b70421,
- 0xe7f00400,
- 0x00bed001,
-/* 0x04a8: ih_no_fifo */
- 0xfc400ad0,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
-/* 0x04c3: hub_barrier_done */
- 0xf001f800,
- 0x0e9801f7,
- 0x04febb04,
- 0x9418e7f1,
- 0xf440e3f0,
- 0x00f88d21,
-/* 0x04d8: ctx_redswitch */
- 0x0614e7f1,
- 0xf006e4b6,
- 0xefd020f7,
- 0x08f7f000,
-/* 0x04e8: ctx_redswitch_delay */
- 0xf401f2b6,
- 0xf7f1fd1b,
- 0xefd00a20,
-/* 0x04f7: ctx_xfer */
- 0xf100f800,
- 0xb60a0417,
- 0x1fd00614,
- 0x0711f400,
- 0x04d821f5,
-/* 0x0508: ctx_xfer_not_load */
- 0x4afc17f1,
- 0xf00213f0,
- 0x12d00c27,
- 0x1521f500,
- 0xfc27f102,
- 0x0223f047,
- 0xf00020d0,
- 0x20b6012c,
- 0x0012d003,
- 0xf001acf0,
- 0xb7f002a5,
- 0x50b3f000,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x000c9800,
- 0xf0010d98,
- 0x21f500e7,
- 0xacf00166,
- 0x00b7f101,
- 0x50b3f040,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x010c9800,
- 0x98020d98,
- 0xe7f1060f,
- 0x21f50800,
- 0xacf00166,
- 0x04a5f001,
- 0x3000b7f1,
+ 0x21f50711,
+/* 0x05ea: ctx_xfer_not_load */
+ 0x21f505a9,
+ 0x24bd026a,
+ 0x47fc07f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xb6012cf0,
+ 0x07f10320,
+ 0x03f04afc,
+ 0x0002d002,
+ 0xacf004bd,
+ 0x02a5f001,
+ 0x0000b7f1,
0x9850b3f0,
0xc4b6040c,
0x00bcbb0f,
- 0x98020c98,
- 0x0f98030d,
- 0x00e7f108,
- 0x6621f502,
- 0x1521f501,
- 0x0601f402,
-/* 0x05a3: ctx_xfer_post */
- 0xf11412f4,
- 0xf04afc17,
- 0x27f00213,
- 0x0012d00d,
- 0x021521f5,
-/* 0x05b4: ctx_xfer_done */
- 0x04c321f5,
- 0x000000f8,
+ 0x98000c98,
+ 0xe7f0010d,
+ 0x6f21f500,
+ 0x01acf001,
+ 0x4000b7f1,
+ 0x9850b3f0,
+ 0xc4b6040c,
+ 0x00bcbb0f,
+ 0x98010c98,
+ 0x0f98020d,
+ 0x00e7f106,
+ 0x6f21f508,
+ 0x01acf001,
+ 0xf104a5f0,
+ 0xf03000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98020c,
+ 0x080f9803,
+ 0x0200e7f1,
+ 0x016f21f5,
+ 0x025e21f5,
+ 0xf40601f4,
+/* 0x0686: ctx_xfer_post */
+ 0x21f50712,
+/* 0x068a: ctx_xfer_done */
+ 0x21f5027f,
+ 0x00f80591,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
index b82d2ae89917..c8ddb8d71b91 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
@@ -68,60 +68,57 @@ error:
//
init:
clear b32 $r0
- mov $sp $r0
mov $xdbase $r0
+ // setup stack
+ nv_iord($r1, NV_PGRAPH_FECS_CAPS, 0)
+ extr $r1 $r1 9:17
+ shl b32 $r1 8
+ mov $sp $r1
+
// enable fifo access
- mov $r1 0x1200
- mov $r2 2
- iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
+ mov $r2 NV_PGRAPH_FECS_ACCESS_FIFO
+ nv_iowr(NV_PGRAPH_FECS_ACCESS, 0, $r2)
// setup i0 handler, and route all interrupts to it
mov $r1 #ih
mov $iv0 $r1
- mov $r1 0x400
- iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
- // route HUB_CHANNEL_SWITCH to fuc interrupt 8
- mov $r3 0x404
- shl b32 $r3 6
- mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8
- iowr I[$r3 + 0x000] $r2
+ clear b32 $r2
+ nv_iowr(NV_PGRAPH_FECS_INTR_ROUTE, 0, $r2)
+
+ // route HUB_CHSW_PULSE to fuc interrupt 8
+ mov $r2 0x2003 // { HUB_CHSW_PULSE, ZERO } -> intr 8
+ nv_iowr(NV_PGRAPH_FECS_IROUTE, 0, $r2)
// not sure what these are, route them because NVIDIA does, and
// the IRQ handler will signal the host if we ever get one.. we
// may find out if/why we need to handle these if so..
//
- mov $r2 0x2004
- iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9
- mov $r2 0x200b
- iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10
- mov $r2 0x200c
- iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15
+ mov $r2 0x2004 // { 0x04, ZERO } -> intr 9
+ nv_iowr(NV_PGRAPH_FECS_IROUTE, 1, $r2)
+ mov $r2 0x200b // { HUB_FIRMWARE_MTHD, ZERO } -> intr 10
+ nv_iowr(NV_PGRAPH_FECS_IROUTE, 2, $r2)
+ mov $r2 0x200c // { 0x0c, ZERO } -> intr 15
+ nv_iowr(NV_PGRAPH_FECS_IROUTE, 7, $r2)
// enable all INTR_UP interrupts
- mov $r2 0xc24
- shl b32 $r2 6
- not b32 $r3 $r0
- iowr I[$r2] $r3
+ sub b32 $r3 $r0 1
+ nv_iowr(NV_PGRAPH_FECS_INTR_UP_EN, 0, $r3)
- // enable fifo, ctxsw, 9, 10, 15 interrupts
- mov $r2 -0x78fc // 0x8704
- sethi $r2 0
- iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
+ // enable fifo, ctxsw, 9, fwmthd, 15 interrupts
+ imm32($r2, 0x8704)
+ nv_iowr(NV_PGRAPH_FECS_INTR_EN_SET, 0, $r2)
// fifo level triggered, rest edge
- sub b32 $r1 0x100
- mov $r2 4
- iowr I[$r1] $r2
+ mov $r2 NV_PGRAPH_FECS_INTR_MODE_FIFO_LEVEL
+ nv_iowr(NV_PGRAPH_FECS_INTR_MODE, 0, $r2)
// enable interrupts
bset $flags ie0
// fetch enabled GPC/ROP counts
- mov $r14 -0x69fc // 0x409604
- sethi $r14 0x400000
- call #nv_rd32
+ nv_rd32($r14, 0x409604)
extr $r1 $r15 16:20
st b32 D[$r0 + #rop_count] $r1
and $r15 0x1f
@@ -131,37 +128,40 @@ init:
mov $r1 1
shl b32 $r1 $r15
sub b32 $r1 1
- mov $r2 0x40c
- shl b32 $r2 6
- iowr I[$r2 + 0x000] $r1
- iowr I[$r2 + 0x100] $r1
+ nv_iowr(NV_PGRAPH_FECS_BAR_MASK0, 0, $r1)
+ nv_iowr(NV_PGRAPH_FECS_BAR_MASK1, 0, $r1)
// context size calculation, reserve first 256 bytes for use by fuc
mov $r1 256
+ //
+ mov $r15 2
+ call(ctx_4170s)
+ call(ctx_4170w)
+ mov $r15 0x10
+ call(ctx_86c)
+
// calculate size of mmio context data
ld b32 $r14 D[$r0 + #hub_mmio_list_head]
ld b32 $r15 D[$r0 + #hub_mmio_list_tail]
- call #mmctx_size
+ call(mmctx_size)
// set mmctx base addresses now so we don't have to do it later,
// they don't (currently) ever change
- mov $r3 0x700
- shl b32 $r3 6
shr b32 $r4 $r1 8
- iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE
- iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE
+ nv_iowr(NV_PGRAPH_FECS_MMCTX_SAVE_SWBASE, 0, $r4)
+ nv_iowr(NV_PGRAPH_FECS_MMCTX_LOAD_SWBASE, 0, $r4)
add b32 $r3 0x1300
add b32 $r1 $r15
shr b32 $r15 2
- iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!?
+ nv_iowr(NV_PGRAPH_FECS_MMCTX_LOAD_COUNT, 0, $r15) // wtf??
// strands, base offset needs to be aligned to 256 bytes
shr b32 $r1 8
add b32 $r1 1
shl b32 $r1 8
mov b32 $r15 $r1
- call #strand_ctx_init
+ call(strand_ctx_init)
add b32 $r1 $r15
// initialise each GPC in sequence by passing in the offset of its
@@ -173,30 +173,29 @@ init:
// in GPCn_CC_SCRATCH[1]
//
ld b32 $r3 D[$r0 + #gpc_count]
- mov $r4 0x2000
- sethi $r4 0x500000
+ imm32($r4, 0x502000)
init_gpc:
// setup, and start GPC ucode running
add b32 $r14 $r4 0x804
mov b32 $r15 $r1
- call #nv_wr32 // CC_SCRATCH[1] = ctx offset
+ call(nv_wr32) // CC_SCRATCH[1] = ctx offset
add b32 $r14 $r4 0x10c
clear b32 $r15
- call #nv_wr32
+ call(nv_wr32)
add b32 $r14 $r4 0x104
- call #nv_wr32 // ENTRY
+ call(nv_wr32) // ENTRY
add b32 $r14 $r4 0x100
mov $r15 2 // CTRL_START_TRIGGER
- call #nv_wr32 // CTRL
+ call(nv_wr32) // CTRL
// wait for it to complete, and adjust context size
add b32 $r14 $r4 0x800
init_gpc_wait:
- call #nv_rd32
+ call(nv_rd32)
xbit $r15 $r15 31
bra e #init_gpc_wait
add b32 $r14 $r4 0x804
- call #nv_rd32
+ call(nv_rd32)
add b32 $r1 $r15
// next!
@@ -204,6 +203,12 @@ init:
sub b32 $r3 1
bra ne #init_gpc
+ //
+ mov $r15 0
+ call(ctx_86c)
+ mov $r15 0
+ call(ctx_4170s)
+
// save context size, and tell host we're ready
nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(1), 0, $r1)
clear b32 $r1
@@ -218,17 +223,15 @@ main:
bset $flags $p0
sleep $p0
mov $r13 #cmd_queue
- call #queue_get
+ call(queue_get)
bra $p1 #main
// context switch, requested by GPU?
cmpu b32 $r14 0x4001
bra ne #main_not_ctx_switch
trace_set(T_AUTO)
- mov $r1 0xb00
- shl b32 $r1 6
- iord $r2 I[$r1 + 0x100] // CHAN_NEXT
- iord $r1 I[$r1 + 0x000] // CHAN_CUR
+ nv_iord($r1, NV_PGRAPH_FECS_CHAN_ADDR, 0)
+ nv_iord($r2, NV_PGRAPH_FECS_CHAN_NEXT, 0)
xbit $r3 $r1 31
bra e #chsw_no_prev
@@ -239,12 +242,12 @@ main:
trace_set(T_SAVE)
bclr $flags $p1
bset $flags $p2
- call #ctx_xfer
+ call(ctx_xfer)
trace_clr(T_SAVE);
pop $r2
trace_set(T_LOAD);
bset $flags $p1
- call #ctx_xfer
+ call(ctx_xfer)
trace_clr(T_LOAD);
bra #chsw_done
chsw_prev_no_next:
@@ -252,25 +255,21 @@ main:
mov b32 $r2 $r1
bclr $flags $p1
bclr $flags $p2
- call #ctx_xfer
+ call(ctx_xfer)
pop $r2
- mov $r1 0xb00
- shl b32 $r1 6
- iowr I[$r1] $r2
+ nv_iowr(NV_PGRAPH_FECS_CHAN_ADDR, 0, $r2)
bra #chsw_done
chsw_no_prev:
xbit $r3 $r2 31
bra e #chsw_done
bset $flags $p1
bclr $flags $p2
- call #ctx_xfer
+ call(ctx_xfer)
// ack the context switch request
chsw_done:
- mov $r1 0xb0c
- shl b32 $r1 6
- mov $r2 1
- iowr I[$r1 + 0x000] $r2 // 0x409b0c
+ mov $r2 NV_PGRAPH_FECS_CHSW_ACK
+ nv_iowr(NV_PGRAPH_FECS_CHSW, 0, $r2)
trace_clr(T_AUTO)
bra #main
@@ -279,7 +278,7 @@ main:
cmpu b32 $r14 0x0001
bra ne #main_not_ctx_chan
mov b32 $r2 $r15
- call #ctx_chan
+ call(ctx_chan)
bra #main_done
// request to store current channel context?
@@ -289,14 +288,14 @@ main:
trace_set(T_SAVE)
bclr $flags $p1
bclr $flags $p2
- call #ctx_xfer
+ call(ctx_xfer)
trace_clr(T_SAVE)
bra #main_done
main_not_ctx_save:
shl b32 $r15 $r14 16
or $r15 E_BAD_COMMAND
- call #error
+ call(error)
bra #main
main_done:
@@ -319,41 +318,46 @@ ih:
clear b32 $r0
// incoming fifo command?
- iord $r10 I[$r0 + 0x200] // INTR
- and $r11 $r10 0x00000004
+ nv_iord($r10, NV_PGRAPH_FECS_INTR, 0)
+ and $r11 $r10 NV_PGRAPH_FECS_INTR_FIFO
bra e #ih_no_fifo
// queue incoming fifo command for later processing
- mov $r11 0x1900
mov $r13 #cmd_queue
- iord $r14 I[$r11 + 0x100] // FIFO_CMD
- iord $r15 I[$r11 + 0x000] // FIFO_DATA
- call #queue_put
+ nv_iord($r14, NV_PGRAPH_FECS_FIFO_CMD, 0)
+ nv_iord($r15, NV_PGRAPH_FECS_FIFO_DATA, 0)
+ call(queue_put)
add b32 $r11 0x400
mov $r14 1
- iowr I[$r11 + 0x000] $r14 // FIFO_ACK
+ nv_iowr(NV_PGRAPH_FECS_FIFO_ACK, 0, $r14)
// context switch request?
ih_no_fifo:
- and $r11 $r10 0x00000100
+ and $r11 $r10 NV_PGRAPH_FECS_INTR_CHSW
bra e #ih_no_ctxsw
// enqueue a context switch for later processing
mov $r13 #cmd_queue
mov $r14 0x4001
- call #queue_put
+ call(queue_put)
- // anything we didn't handle, bring it to the host's attention
+ // firmware method?
ih_no_ctxsw:
- mov $r11 0x104
+ and $r11 $r10 NV_PGRAPH_FECS_INTR_FWMTHD
+ bra e #ih_no_fwmthd
+ // none we handle, ack, and fall-through to unhandled
+ mov $r11 0x100
+ nv_wr32(0x400144, $r11)
+
+ // anything we didn't handle, bring it to the host's attention
+ ih_no_fwmthd:
+ mov $r11 0x104 // FIFO | CHSW
not b32 $r11
and $r11 $r10 $r11
bra e #ih_no_other
- mov $r10 0xc1c
- shl b32 $r10 6
- iowr I[$r10] $r11 // INTR_UP_SET
+ nv_iowr(NV_PGRAPH_FECS_INTR_UP_SET, 0, $r11)
// ack, and wake up main()
ih_no_other:
- iowr I[$r0 + 0x100] $r10 // INTR_ACK
+ nv_iowr(NV_PGRAPH_FECS_INTR_ACK, 0, $r10)
pop $r15
pop $r14
@@ -370,12 +374,10 @@ ih:
#if CHIPSET < GK100
// Not real sure, but, MEM_CMD 7 will hang forever if this isn't done
ctx_4160s:
- mov $r14 0x4160
- sethi $r14 0x400000
mov $r15 1
- call #nv_wr32
+ nv_wr32(0x404160, $r15)
ctx_4160s_wait:
- call #nv_rd32
+ nv_rd32($r15, 0x404160)
xbit $r15 $r15 4
bra e #ctx_4160s_wait
ret
@@ -384,10 +386,8 @@ ctx_4160s:
// to hang with STATUS=0x00000007 until it's cleared.. fbcon can
// still function with it set however...
ctx_4160c:
- mov $r14 0x4160
- sethi $r14 0x400000
clear b32 $r15
- call #nv_wr32
+ nv_wr32(0x404160, $r15)
ret
#endif
@@ -396,18 +396,14 @@ ctx_4160c:
// In: $r15 value to set 0x404170 to
//
ctx_4170s:
- mov $r14 0x4170
- sethi $r14 0x400000
or $r15 0x10
- call #nv_wr32
+ nv_wr32(0x404170, $r15)
ret
// Waits for a ctx_4170s() call to complete
//
ctx_4170w:
- mov $r14 0x4170
- sethi $r14 0x400000
- call #nv_rd32
+ nv_rd32($r15, 0x404170)
and $r15 0x10
bra ne #ctx_4170w
ret
@@ -419,16 +415,18 @@ ctx_4170w:
// funny things happen.
//
ctx_redswitch:
- mov $r14 0x614
- shl b32 $r14 6
- mov $r15 0x270
- iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL
+ mov $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_GPC
+ or $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_ROP
+ or $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_GPC
+ or $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_MAIN
+ nv_iowr(NV_PGRAPH_FECS_RED_SWITCH, 0, $r14)
mov $r15 8
ctx_redswitch_delay:
sub b32 $r15 1
bra ne #ctx_redswitch_delay
- mov $r15 0x770
- iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
+ or $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_ROP
+ or $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_MAIN
+ nv_iowr(NV_PGRAPH_FECS_RED_SWITCH, 0, $r14)
ret
// Not a clue what this is for, except that unless the value is 0x10, the
@@ -437,15 +435,18 @@ ctx_redswitch:
// In: $r15 value to set to (0x00/0x10 are used)
//
ctx_86c:
- mov $r14 0x86c
- shl b32 $r14 6
- iowr I[$r14] $r15 // HUB(0x86c) = val
- mov $r14 -0x75ec
- sethi $r14 0x400000
- call #nv_wr32 // ROP(0xa14) = val
- mov $r14 -0x5794
- sethi $r14 0x410000
- call #nv_wr32 // GPC(0x86c) = val
+ nv_iowr(NV_PGRAPH_FECS_UNK86C, 0, $r15)
+ nv_wr32(0x408a14, $r15)
+ nv_wr32(NV_PGRAPH_GPCX_GPCCS_UNK86C, $r15)
+ ret
+
+// In: $r15 NV_PGRAPH_FECS_MEM_CMD_*
+ctx_mem:
+ nv_iowr(NV_PGRAPH_FECS_MEM_CMD, 0, $r15)
+ ctx_mem_wait:
+ nv_iord($r15, NV_PGRAPH_FECS_MEM_CMD, 0)
+ or $r15 $r15
+ bra ne #ctx_mem_wait
ret
// ctx_load - load's a channel's ctxctl data, and selects its vm
@@ -457,23 +458,14 @@ ctx_load:
// switch to channel, somewhat magic in parts..
mov $r10 12 // DONE_UNK12
- call #wait_donez
- mov $r1 0xa24
- shl b32 $r1 6
- iowr I[$r1 + 0x000] $r0 // 0x409a24
- mov $r3 0xb00
- shl b32 $r3 6
- iowr I[$r3 + 0x100] $r2 // CHAN_NEXT
- mov $r1 0xa0c
- shl b32 $r1 6
- mov $r4 7
- iowr I[$r1 + 0x000] $r2 // MEM_CHAN
- iowr I[$r1 + 0x100] $r4 // MEM_CMD
- ctx_chan_wait_0:
- iord $r4 I[$r1 + 0x100]
- and $r4 0x1f
- bra ne #ctx_chan_wait_0
- iowr I[$r3 + 0x000] $r2 // CHAN_CUR
+ call(wait_donez)
+ clear b32 $r15
+ nv_iowr(0x409a24, 0, $r15)
+ nv_iowr(NV_PGRAPH_FECS_CHAN_NEXT, 0, $r2)
+ nv_iowr(NV_PGRAPH_FECS_MEM_CHAN, 0, $r2)
+ mov $r15 NV_PGRAPH_FECS_MEM_CMD_LOAD_CHAN
+ call(ctx_mem)
+ nv_iowr(NV_PGRAPH_FECS_CHAN_ADDR, 0, $r2)
// load channel header, fetch PGRAPH context pointer
mov $xtargets $r0
@@ -482,14 +474,10 @@ ctx_load:
add b32 $r2 2
trace_set(T_LCHAN)
- mov $r1 0xa04
- shl b32 $r1 6
- iowr I[$r1 + 0x000] $r2 // MEM_BASE
- mov $r1 0xa20
- shl b32 $r1 6
- mov $r2 0x0002
- sethi $r2 0x80000000
- iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram
+ nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r2)
+ imm32($r2, NV_PGRAPH_FECS_MEM_TARGET_UNK31)
+ or $r2 NV_PGRAPH_FECS_MEM_TARGET_AS_VRAM
+ nv_iowr(NV_PGRAPH_FECS_MEM_TARGET, 0, $r2)
mov $r1 0x10 // chan + 0x0210
mov $r2 #xfer_data
sethi $r2 0x00020000 // 16 bytes
@@ -507,13 +495,9 @@ ctx_load:
// set transfer base to start of context, and fetch context header
trace_set(T_LCTXH)
- mov $r2 0xa04
- shl b32 $r2 6
- iowr I[$r2 + 0x000] $r1 // MEM_BASE
- mov $r2 1
- mov $r1 0xa20
- shl b32 $r1 6
- iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm
+ nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r1)
+ mov $r2 NV_PGRAPH_FECS_MEM_TARGET_AS_VM
+ nv_iowr(NV_PGRAPH_FECS_MEM_TARGET, 0, $r2)
mov $r1 #chan_data
sethi $r1 0x00060000 // 256 bytes
xdld $r0 $r1
@@ -532,21 +516,15 @@ ctx_load:
//
ctx_chan:
#if CHIPSET < GK100
- call #ctx_4160s
+ call(ctx_4160s)
#endif
- call #ctx_load
+ call(ctx_load)
mov $r10 12 // DONE_UNK12
- call #wait_donez
- mov $r1 0xa10
- shl b32 $r1 6
- mov $r2 5
- iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???)
- ctx_chan_wait:
- iord $r2 I[$r1 + 0x000]
- or $r2 $r2
- bra ne #ctx_chan_wait
+ call(wait_donez)
+ mov $r15 5 // MEM_CMD 5 ???
+ call(ctx_mem)
#if CHIPSET < GK100
- call #ctx_4160c
+ call(ctx_4160c)
#endif
ret
@@ -562,9 +540,7 @@ ctx_chan:
ctx_mmio_exec:
// set transfer base to be the mmio list
ld b32 $r3 D[$r0 + #chan_mmio_address]
- mov $r2 0xa04
- shl b32 $r2 6
- iowr I[$r2 + 0x000] $r3 // MEM_BASE
+ nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r3)
clear b32 $r3
ctx_mmio_loop:
@@ -580,7 +556,7 @@ ctx_mmio_exec:
ctx_mmio_pull:
ld b32 $r14 D[$r4 + #xfer_data + 0x00]
ld b32 $r15 D[$r4 + #xfer_data + 0x04]
- call #nv_wr32
+ call(nv_wr32)
// next!
add b32 $r3 8
@@ -590,7 +566,7 @@ ctx_mmio_exec:
// set transfer base back to the current context
ctx_mmio_done:
ld b32 $r3 D[$r0 + #ctx_current]
- iowr I[$r2 + 0x000] $r3 // MEM_BASE
+ nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r3)
// disable the mmio list now, we don't need/want to execute it again
st b32 D[$r0 + #chan_mmio_count] $r0
@@ -610,12 +586,10 @@ ctx_mmio_exec:
//
ctx_xfer:
// according to mwk, some kind of wait for idle
- mov $r15 0xc00
- shl b32 $r15 6
mov $r14 4
- iowr I[$r15 + 0x200] $r14
+ nv_iowr(0x409c08, 0, $r14)
ctx_xfer_idle:
- iord $r14 I[$r15 + 0x000]
+ nv_iord($r14, 0x409c00, 0)
and $r14 0x2000
bra ne #ctx_xfer_idle
@@ -623,50 +597,42 @@ ctx_xfer:
bra $p2 #ctx_xfer_pre_load
ctx_xfer_pre:
mov $r15 0x10
- call #ctx_86c
+ call(ctx_86c)
#if CHIPSET < GK100
- call #ctx_4160s
+ call(ctx_4160s)
#endif
bra not $p1 #ctx_xfer_exec
ctx_xfer_pre_load:
mov $r15 2
- call #ctx_4170s
- call #ctx_4170w
- call #ctx_redswitch
+ call(ctx_4170s)
+ call(ctx_4170w)
+ call(ctx_redswitch)
clear b32 $r15
- call #ctx_4170s
- call #ctx_load
+ call(ctx_4170s)
+ call(ctx_load)
// fetch context pointer, and initiate xfer on all GPCs
ctx_xfer_exec:
ld b32 $r1 D[$r0 + #ctx_current]
- mov $r2 0x414
- shl b32 $r2 6
- iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
- mov $r14 -0x5b00
- sethi $r14 0x410000
- mov b32 $r15 $r1
- call #nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
- add b32 $r14 4
+
+ clear b32 $r2
+ nv_iowr(NV_PGRAPH_FECS_BAR, 0, $r2)
+
+ nv_wr32(0x41a500, $r1) // GPC_BCAST_WRCMD_DATA = ctx pointer
xbit $r15 $flags $p1
xbit $r2 $flags $p2
shl b32 $r2 1
or $r15 $r2
- call #nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
+ nv_wr32(0x41a504, $r15) // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
// strands
- mov $r1 0x4afc
- sethi $r1 0x20000
- mov $r2 0xc
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
- call #strand_wait
- mov $r2 0x47fc
- sethi $r2 0x20000
- iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
- xbit $r2 $flags $p1
- add b32 $r2 3
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+ call(strand_pre)
+ clear b32 $r2
+ nv_iowr(NV_PGRAPH_FECS_STRAND_SELECT, 0x3f, $r2)
+ xbit $r2 $flags $p1 // SAVE/LOAD
+ add b32 $r2 NV_PGRAPH_FECS_STRAND_CMD_SAVE
+ nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r2)
// mmio context
xbit $r10 $flags $p1 // direction
@@ -675,48 +641,42 @@ ctx_xfer:
ld b32 $r12 D[$r0 + #hub_mmio_list_head]
ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
mov $r14 0 // not multi
- call #mmctx_xfer
+ call(mmctx_xfer)
// wait for GPCs to all complete
mov $r10 8 // DONE_BAR
- call #wait_doneo
+ call(wait_doneo)
// wait for strand xfer to complete
- call #strand_wait
+ call(strand_wait)
// post-op
bra $p1 #ctx_xfer_post
mov $r10 12 // DONE_UNK12
- call #wait_donez
- mov $r1 0xa10
- shl b32 $r1 6
- mov $r2 5
- iowr I[$r1] $r2 // MEM_CMD
- ctx_xfer_post_save_wait:
- iord $r2 I[$r1]
- or $r2 $r2
- bra ne #ctx_xfer_post_save_wait
+ call(wait_donez)
+ mov $r15 5 // MEM_CMD 5 ???
+ call(ctx_mem)
bra $p2 #ctx_xfer_done
ctx_xfer_post:
mov $r15 2
- call #ctx_4170s
+ call(ctx_4170s)
clear b32 $r15
- call #ctx_86c
- call #strand_post
- call #ctx_4170w
+ call(ctx_86c)
+ call(strand_post)
+ call(ctx_4170w)
clear b32 $r15
- call #ctx_4170s
+ call(ctx_4170s)
bra not $p1 #ctx_xfer_no_post_mmio
ld b32 $r1 D[$r0 + #chan_mmio_count]
or $r1 $r1
bra e #ctx_xfer_no_post_mmio
- call #ctx_mmio_exec
+ call(ctx_mmio_exec)
ctx_xfer_no_post_mmio:
#if CHIPSET < GK100
- call #ctx_4160c
+ call(ctx_4160c)
#endif
ctx_xfer_done:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5 b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5
new file mode 100644
index 000000000000..7c5d25630fa8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#define CHIPSET GK208
+#include "macros.fuc"
+
+.section #nv108_grhub_data
+#define INCLUDE_DATA
+#include "com.fuc"
+#include "hub.fuc"
+#undef INCLUDE_DATA
+
+.section #nv108_grhub_code
+#define INCLUDE_CODE
+bra #init
+#include "com.fuc"
+#include "hub.fuc"
+.align 256
+#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
new file mode 100644
index 000000000000..4750984bf380
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
@@ -0,0 +1,916 @@
+uint32_t nv108_grhub_data[] = {
+/* 0x0000: hub_mmio_list_head */
+ 0x00000300,
+/* 0x0004: hub_mmio_list_tail */
+ 0x00000304,
+/* 0x0008: gpc_count */
+ 0x00000000,
+/* 0x000c: rop_count */
+ 0x00000000,
+/* 0x0010: cmd_queue */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0058: ctx_current */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0100: chan_data */
+/* 0x0100: chan_mmio_count */
+ 0x00000000,
+/* 0x0104: chan_mmio_address */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0200: xfer_data */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0300: hub_mmio_list_base */
+ 0x0417e91c,
+};
+
+uint32_t nv108_grhub_code[] = {
+ 0x030e0ef5,
+/* 0x0004: queue_put */
+ 0x9800d898,
+ 0x86f001d9,
+ 0xf489a408,
+ 0x020f0b1b,
+ 0x0002f87e,
+/* 0x001a: queue_put_next */
+ 0x98c400f8,
+ 0x0384b607,
+ 0xb6008dbb,
+ 0x8eb50880,
+ 0x018fb500,
+ 0xf00190b6,
+ 0xd9b50f94,
+/* 0x0037: queue_get */
+ 0xf400f801,
+ 0xd8980131,
+ 0x01d99800,
+ 0x0bf489a4,
+ 0x0789c421,
+ 0xbb0394b6,
+ 0x90b6009d,
+ 0x009e9808,
+ 0xb6019f98,
+ 0x84f00180,
+ 0x00d8b50f,
+/* 0x0063: queue_get_done */
+ 0xf80132f4,
+/* 0x0065: nv_rd32 */
+ 0xf0ecb200,
+ 0x00801fc9,
+ 0x0cf601ca,
+/* 0x0073: nv_rd32_wait */
+ 0x8c04bd00,
+ 0xcf01ca00,
+ 0xccc800cc,
+ 0xf61bf41f,
+ 0xec7e060a,
+ 0x008f0000,
+ 0xffcf01cb,
+/* 0x008f: nv_wr32 */
+ 0x8000f800,
+ 0xf601cc00,
+ 0x04bd000f,
+ 0xc9f0ecb2,
+ 0x1ec9f01f,
+ 0x01ca0080,
+ 0xbd000cf6,
+/* 0x00a9: nv_wr32_wait */
+ 0xca008c04,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f61b,
+/* 0x00b8: wait_donez */
+ 0x99f094bd,
+ 0x37008000,
+ 0x0009f602,
+ 0x008004bd,
+ 0x0af60206,
+/* 0x00cf: wait_donez_ne */
+ 0x8804bd00,
+ 0xcf010000,
+ 0x8aff0088,
+ 0xf61bf488,
+ 0x99f094bd,
+ 0x17008000,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x00ec: wait_doneo */
+ 0x99f094bd,
+ 0x37008000,
+ 0x0009f602,
+ 0x008004bd,
+ 0x0af60206,
+/* 0x0103: wait_doneo_e */
+ 0x8804bd00,
+ 0xcf010000,
+ 0x8aff0088,
+ 0xf60bf488,
+ 0x99f094bd,
+ 0x17008000,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x0120: mmctx_size */
+/* 0x0122: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0x1bf4efa4,
+ 0xf89fb2ec,
+/* 0x013d: mmctx_xfer */
+ 0xf094bd00,
+ 0x00800199,
+ 0x09f60237,
+ 0xbd04bd00,
+ 0x05bbfd94,
+ 0x800f0bf4,
+ 0xf601c400,
+ 0x04bd000b,
+/* 0x015f: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0xc6008018,
+ 0x000ef601,
+ 0x008004bd,
+ 0x0ff601c7,
+ 0xf004bd00,
+/* 0x017a: mmctx_multi_disabled */
+ 0xabc80199,
+ 0x10b4b600,
+ 0xc80cb9f0,
+ 0xe4b601ae,
+ 0x05befd11,
+ 0x01c50080,
+ 0xbd000bf6,
+/* 0x0195: mmctx_exec_loop */
+/* 0x0195: mmctx_wait_free */
+ 0xc5008e04,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f60b,
+ 0x05e9fd00,
+ 0x01c80080,
+ 0xbd000ef6,
+ 0x04c0b604,
+ 0x1bf4cda4,
+ 0x02abc8df,
+/* 0x01bf: mmctx_fini_wait */
+ 0x8b1c1bf4,
+ 0xcf01c500,
+ 0xb4f000bb,
+ 0x10b4b01f,
+ 0x0af31bf4,
+ 0x00b87e02,
+ 0x250ef400,
+/* 0x01d8: mmctx_stop */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x12b9f00c,
+ 0x01c50080,
+ 0xbd000bf6,
+/* 0x01ed: mmctx_stop_wait */
+ 0xc5008b04,
+ 0x00bbcf01,
+ 0xf412bbc8,
+/* 0x01fa: mmctx_done */
+ 0x94bdf61b,
+ 0x800199f0,
+ 0xf6021700,
+ 0x04bd0009,
+/* 0x020a: strand_wait */
+ 0xa0f900f8,
+ 0xb87e020a,
+ 0xa0fc0000,
+/* 0x0216: strand_pre */
+ 0x0c0900f8,
+ 0x024afc80,
+ 0xbd0009f6,
+ 0x020a7e04,
+/* 0x0227: strand_post */
+ 0x0900f800,
+ 0x4afc800d,
+ 0x0009f602,
+ 0x0a7e04bd,
+ 0x00f80002,
+/* 0x0238: strand_set */
+ 0xfc800f0c,
+ 0x0cf6024f,
+ 0x0c04bd00,
+ 0x4afc800b,
+ 0x000cf602,
+ 0xfc8004bd,
+ 0x0ef6024f,
+ 0x0c04bd00,
+ 0x4afc800a,
+ 0x000cf602,
+ 0x0a7e04bd,
+ 0x00f80002,
+/* 0x0268: strand_ctx_init */
+ 0x99f094bd,
+ 0x37008003,
+ 0x0009f602,
+ 0x167e04bd,
+ 0x030e0002,
+ 0x0002387e,
+ 0xfc80c4bd,
+ 0x0cf60247,
+ 0x0c04bd00,
+ 0x4afc8001,
+ 0x000cf602,
+ 0x0a7e04bd,
+ 0x0c920002,
+ 0x46fc8001,
+ 0x000cf602,
+ 0x020c04bd,
+ 0x024afc80,
+ 0xbd000cf6,
+ 0x020a7e04,
+ 0x02277e00,
+ 0x42008800,
+ 0x20008902,
+ 0x0099cf02,
+/* 0x02c7: ctx_init_strand_loop */
+ 0xf608fe95,
+ 0x8ef6008e,
+ 0x808acf40,
+ 0xb606a5b6,
+ 0xeabb01a0,
+ 0x0480b600,
+ 0xf40192b6,
+ 0xe4b6e81b,
+ 0xf2efbc08,
+ 0x99f094bd,
+ 0x17008003,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x02f8: error */
+ 0x02050080,
+ 0xbd000ff6,
+ 0x80010f04,
+ 0xf6030700,
+ 0x04bd000f,
+/* 0x030e: init */
+ 0x04bd00f8,
+ 0x410007fe,
+ 0x11cf4200,
+ 0x0911e700,
+ 0x0814b601,
+ 0x020014fe,
+ 0x12004002,
+ 0xbd0002f6,
+ 0x05c94104,
+ 0xbd0010fe,
+ 0x07004024,
+ 0xbd0002f6,
+ 0x20034204,
+ 0x01010080,
+ 0xbd0002f6,
+ 0x20044204,
+ 0x01010480,
+ 0xbd0002f6,
+ 0x200b4204,
+ 0x01010880,
+ 0xbd0002f6,
+ 0x200c4204,
+ 0x01011c80,
+ 0xbd0002f6,
+ 0x01039204,
+ 0x03090080,
+ 0xbd0003f6,
+ 0x87044204,
+ 0xf6040040,
+ 0x04bd0002,
+ 0x00400402,
+ 0x0002f603,
+ 0x31f404bd,
+ 0x96048e10,
+ 0x00657e40,
+ 0xc7feb200,
+ 0x01b590f1,
+ 0x1ff4f003,
+ 0x01020fb5,
+ 0x041fbb01,
+ 0x800112b6,
+ 0xf6010300,
+ 0x04bd0001,
+ 0x01040080,
+ 0xbd0001f6,
+ 0x01004104,
+ 0x627e020f,
+ 0x717e0006,
+ 0x100f0006,
+ 0x0006b37e,
+ 0x98000e98,
+ 0x207e010f,
+ 0x14950001,
+ 0xc0008008,
+ 0x0004f601,
+ 0x008004bd,
+ 0x04f601c1,
+ 0xb704bd00,
+ 0xbb130030,
+ 0xf5b6001f,
+ 0xd3008002,
+ 0x000ff601,
+ 0x15b604bd,
+ 0x0110b608,
+ 0xb20814b6,
+ 0x02687e1f,
+ 0x001fbb00,
+ 0x84020398,
+/* 0x041f: init_gpc */
+ 0xb8502000,
+ 0x0008044e,
+ 0x8f7e1fb2,
+ 0x4eb80000,
+ 0xbd00010c,
+ 0x008f7ef4,
+ 0x044eb800,
+ 0x8f7e0001,
+ 0x4eb80000,
+ 0x0f000100,
+ 0x008f7e02,
+ 0x004eb800,
+/* 0x044e: init_gpc_wait */
+ 0x657e0008,
+ 0xffc80000,
+ 0xf90bf41f,
+ 0x08044eb8,
+ 0x00657e00,
+ 0x001fbb00,
+ 0x800040b7,
+ 0xf40132b6,
+ 0x000fb41b,
+ 0x0006b37e,
+ 0x627e000f,
+ 0x00800006,
+ 0x01f60201,
+ 0xbd04bd00,
+ 0x1f19f014,
+ 0x02300080,
+ 0xbd0001f6,
+/* 0x0491: main */
+ 0x0031f404,
+ 0x0d0028f4,
+ 0x00377e10,
+ 0xf401f400,
+ 0x4001e4b1,
+ 0x00c71bf5,
+ 0x99f094bd,
+ 0x37008004,
+ 0x0009f602,
+ 0x008104bd,
+ 0x11cf02c0,
+ 0xc1008200,
+ 0x0022cf02,
+ 0xf41f13c8,
+ 0x23c8770b,
+ 0x550bf41f,
+ 0x12b220f9,
+ 0x99f094bd,
+ 0x37008007,
+ 0x0009f602,
+ 0x32f404bd,
+ 0x0231f401,
+ 0x0008367e,
+ 0x99f094bd,
+ 0x17008007,
+ 0x0009f602,
+ 0x20fc04bd,
+ 0x99f094bd,
+ 0x37008006,
+ 0x0009f602,
+ 0x31f404bd,
+ 0x08367e01,
+ 0xf094bd00,
+ 0x00800699,
+ 0x09f60217,
+ 0xf404bd00,
+/* 0x0522: chsw_prev_no_next */
+ 0x20f92f0e,
+ 0x32f412b2,
+ 0x0232f401,
+ 0x0008367e,
+ 0x008020fc,
+ 0x02f602c0,
+ 0xf404bd00,
+/* 0x053e: chsw_no_prev */
+ 0x23c8130e,
+ 0x0d0bf41f,
+ 0xf40131f4,
+ 0x367e0232,
+/* 0x054e: chsw_done */
+ 0x01020008,
+ 0x02c30080,
+ 0xbd0002f6,
+ 0xf094bd04,
+ 0x00800499,
+ 0x09f60217,
+ 0xf504bd00,
+/* 0x056b: main_not_ctx_switch */
+ 0xb0ff2a0e,
+ 0x1bf401e4,
+ 0x7ef2b20c,
+ 0xf40007d6,
+/* 0x057a: main_not_ctx_chan */
+ 0xe4b0400e,
+ 0x2c1bf402,
+ 0x99f094bd,
+ 0x37008007,
+ 0x0009f602,
+ 0x32f404bd,
+ 0x0232f401,
+ 0x0008367e,
+ 0x99f094bd,
+ 0x17008007,
+ 0x0009f602,
+ 0x0ef404bd,
+/* 0x05a9: main_not_ctx_save */
+ 0x10ef9411,
+ 0x7e01f5f0,
+ 0xf50002f8,
+/* 0x05b7: main_done */
+ 0xbdfede0e,
+ 0x1f29f024,
+ 0x02300080,
+ 0xbd0002f6,
+ 0xcc0ef504,
+/* 0x05c9: ih */
+ 0xfe80f9fe,
+ 0x80f90188,
+ 0xa0f990f9,
+ 0xd0f9b0f9,
+ 0xf0f9e0f9,
+ 0x004a04bd,
+ 0x00aacf02,
+ 0xf404abc4,
+ 0x100d230b,
+ 0xcf1a004e,
+ 0x004f00ee,
+ 0x00ffcf19,
+ 0x0000047e,
+ 0x0400b0b7,
+ 0x0040010e,
+ 0x000ef61d,
+/* 0x060a: ih_no_fifo */
+ 0xabe404bd,
+ 0x0bf40100,
+ 0x4e100d0c,
+ 0x047e4001,
+/* 0x061a: ih_no_ctxsw */
+ 0xabe40000,
+ 0x0bf40400,
+ 0x01004b10,
+ 0x448ebfb2,
+ 0x8f7e4001,
+/* 0x062e: ih_no_fwmthd */
+ 0x044b0000,
+ 0xffb0bd01,
+ 0x0bf4b4ab,
+ 0x0700800c,
+ 0x000bf603,
+/* 0x0642: ih_no_other */
+ 0x004004bd,
+ 0x000af601,
+ 0xf0fc04bd,
+ 0xd0fce0fc,
+ 0xa0fcb0fc,
+ 0x80fc90fc,
+ 0xfc0088fe,
+ 0x0032f480,
+/* 0x0662: ctx_4170s */
+ 0xf5f001f8,
+ 0x8effb210,
+ 0x7e404170,
+ 0xf800008f,
+/* 0x0671: ctx_4170w */
+ 0x41708e00,
+ 0x00657e40,
+ 0xf0ffb200,
+ 0x1bf410f4,
+/* 0x0683: ctx_redswitch */
+ 0x4e00f8f3,
+ 0xe5f00200,
+ 0x20e5f040,
+ 0x8010e5f0,
+ 0xf6018500,
+ 0x04bd000e,
+/* 0x069a: ctx_redswitch_delay */
+ 0xf2b6080f,
+ 0xfd1bf401,
+ 0x0400e5f1,
+ 0x0100e5f1,
+ 0x01850080,
+ 0xbd000ef6,
+/* 0x06b3: ctx_86c */
+ 0x8000f804,
+ 0xf6022300,
+ 0x04bd000f,
+ 0x148effb2,
+ 0x8f7e408a,
+ 0xffb20000,
+ 0x41a88c8e,
+ 0x00008f7e,
+/* 0x06d2: ctx_mem */
+ 0x008000f8,
+ 0x0ff60284,
+/* 0x06db: ctx_mem_wait */
+ 0x8f04bd00,
+ 0xcf028400,
+ 0xfffd00ff,
+ 0xf61bf405,
+/* 0x06ea: ctx_load */
+ 0x94bd00f8,
+ 0x800599f0,
+ 0xf6023700,
+ 0x04bd0009,
+ 0xb87e0c0a,
+ 0xf4bd0000,
+ 0x02890080,
+ 0xbd000ff6,
+ 0xc1008004,
+ 0x0002f602,
+ 0x008004bd,
+ 0x02f60283,
+ 0x0f04bd00,
+ 0x06d27e07,
+ 0xc0008000,
+ 0x0002f602,
+ 0x0bfe04bd,
+ 0x1f2af000,
+ 0xb60424b6,
+ 0x94bd0220,
+ 0x800899f0,
+ 0xf6023700,
+ 0x04bd0009,
+ 0x02810080,
+ 0xbd0002f6,
+ 0x0000d204,
+ 0x25f08000,
+ 0x88008002,
+ 0x0002f602,
+ 0x100104bd,
+ 0xf0020042,
+ 0x12fa0223,
+ 0xbd03f805,
+ 0x0899f094,
+ 0x02170080,
+ 0xbd0009f6,
+ 0x81019804,
+ 0x981814b6,
+ 0x25b68002,
+ 0x0512fd08,
+ 0xbd1601b5,
+ 0x0999f094,
+ 0x02370080,
+ 0xbd0009f6,
+ 0x81008004,
+ 0x0001f602,
+ 0x010204bd,
+ 0x02880080,
+ 0xbd0002f6,
+ 0x01004104,
+ 0xfa0613f0,
+ 0x03f80501,
+ 0x99f094bd,
+ 0x17008009,
+ 0x0009f602,
+ 0x94bd04bd,
+ 0x800599f0,
+ 0xf6021700,
+ 0x04bd0009,
+/* 0x07d6: ctx_chan */
+ 0xea7e00f8,
+ 0x0c0a0006,
+ 0x0000b87e,
+ 0xd27e050f,
+ 0x00f80006,
+/* 0x07e8: ctx_mmio_exec */
+ 0x80410398,
+ 0xf6028100,
+ 0x04bd0003,
+/* 0x07f6: ctx_mmio_loop */
+ 0x34c434bd,
+ 0x0e1bf4ff,
+ 0xf0020045,
+ 0x35fa0653,
+/* 0x0807: ctx_mmio_pull */
+ 0x9803f805,
+ 0x4f98804e,
+ 0x008f7e81,
+ 0x0830b600,
+ 0xf40112b6,
+/* 0x081a: ctx_mmio_done */
+ 0x0398df1b,
+ 0x81008016,
+ 0x0003f602,
+ 0x00b504bd,
+ 0x01004140,
+ 0xfa0613f0,
+ 0x03f80601,
+/* 0x0836: ctx_xfer */
+ 0x040e00f8,
+ 0x03020080,
+ 0xbd000ef6,
+/* 0x0841: ctx_xfer_idle */
+ 0x00008e04,
+ 0x00eecf03,
+ 0x2000e4f1,
+ 0xf4f51bf4,
+ 0x02f40611,
+/* 0x0855: ctx_xfer_pre */
+ 0x7e100f0c,
+ 0xf40006b3,
+/* 0x085e: ctx_xfer_pre_load */
+ 0x020f1b11,
+ 0x0006627e,
+ 0x0006717e,
+ 0x0006837e,
+ 0x627ef4bd,
+ 0xea7e0006,
+/* 0x0876: ctx_xfer_exec */
+ 0x01980006,
+ 0x8024bd16,
+ 0xf6010500,
+ 0x04bd0002,
+ 0x008e1fb2,
+ 0x8f7e41a5,
+ 0xfcf00000,
+ 0x022cf001,
+ 0xfd0124b6,
+ 0xffb205f2,
+ 0x41a5048e,
+ 0x00008f7e,
+ 0x0002167e,
+ 0xfc8024bd,
+ 0x02f60247,
+ 0xf004bd00,
+ 0x20b6012c,
+ 0x4afc8003,
+ 0x0002f602,
+ 0xacf004bd,
+ 0x06a5f001,
+ 0x0c98000b,
+ 0x010d9800,
+ 0x3d7e000e,
+ 0x080a0001,
+ 0x0000ec7e,
+ 0x00020a7e,
+ 0x0a1201f4,
+ 0x00b87e0c,
+ 0x7e050f00,
+ 0xf40006d2,
+/* 0x08f2: ctx_xfer_post */
+ 0x020f2d02,
+ 0x0006627e,
+ 0xb37ef4bd,
+ 0x277e0006,
+ 0x717e0002,
+ 0xf4bd0006,
+ 0x0006627e,
+ 0x981011f4,
+ 0x11fd4001,
+ 0x070bf405,
+ 0x0007e87e,
+/* 0x091c: ctx_xfer_no_post_mmio */
+/* 0x091c: ctx_xfer_done */
+ 0x000000f8,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
index b59f694c0423..132f684b1946 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -206,14 +206,14 @@ uint32_t nvc0_grhub_data[] = {
};
uint32_t nvc0_grhub_code[] = {
- 0x031b0ef5,
+ 0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
- 0x00f802fe,
+ 0x00f8037e,
/* 0x001c: queue_put_next */
0xb60798c4,
0x8dbb0384,
@@ -237,184 +237,214 @@ uint32_t nvc0_grhub_code[] = {
/* 0x0066: queue_get_done */
0x00f80132,
/* 0x0068: nv_rd32 */
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010921f5,
- 0xf840bfcf,
-/* 0x008d: nv_wr32 */
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
-/* 0x00bd: watchdog_clear */
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
-/* 0x00c9: wait_donez */
- 0xf094bd00,
- 0x07f10099,
- 0x03f00f00,
- 0x0009d002,
- 0x07f104bd,
- 0x03f00600,
- 0x000ad002,
-/* 0x00e6: wait_donez_ne */
- 0x87f104bd,
- 0x83f00000,
- 0x0088cf01,
- 0xf4888aff,
- 0x94bdf31b,
- 0xf10099f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0109: wait_doneo */
- 0xf094bd00,
+ 0xf002ecb9,
+ 0x07f11fc9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x007a: nv_rd32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0xa7f0f31b,
+ 0x1021f506,
+ 0x00f7f101,
+ 0x01f3f0cb,
+ 0xf800ffcf,
+/* 0x009d: nv_wr32 */
+ 0x0007f100,
+ 0x0103f0cc,
+ 0xbd000fd0,
+ 0x02ecb904,
+ 0xf01fc9f0,
+ 0x07f11ec9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x00be: nv_wr32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f31b,
+/* 0x00d0: wait_donez */
+ 0x99f094bd,
+ 0x0007f100,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x1bf4888a,
+ 0xf094bdf3,
0x07f10099,
- 0x03f00f00,
+ 0x03f01700,
0x0009d002,
- 0x87f104bd,
- 0x84b60818,
- 0x008ad006,
-/* 0x0124: wait_doneo_e */
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
+ 0x00f804bd,
+/* 0x0110: wait_doneo */
0x99f094bd,
0x0007f100,
- 0x0203f017,
+ 0x0203f00f,
0xbd0009d0,
-/* 0x0147: mmctx_size */
- 0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
- 0x00e89894,
- 0xb61a85b6,
- 0x84b60180,
- 0x0098bb02,
- 0xb804e0b6,
- 0x1bf404ef,
- 0x029fb9eb,
-/* 0x0166: mmctx_xfer */
- 0x94bd00f8,
- 0xf10199f0,
- 0xf00f0007,
- 0x09d00203,
- 0xf104bd00,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
-/* 0x018c: mmctx_base_disabled */
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
- 0xb70199f0,
- 0xc8010080,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x0bf4888a,
+ 0xf094bdf3,
+ 0x07f10099,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0xf404efb8,
+ 0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+ 0xbd00f802,
+ 0x0199f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0xbbfd94bd,
+ 0x120bf405,
+ 0xc40007f1,
+ 0xd00103f0,
+ 0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0x0007f11e,
+ 0x0103f0c6,
+ 0xbd000ed0,
+ 0x0007f104,
+ 0x0103f0c7,
+ 0xbd000fd0,
+ 0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x01aec80c,
+ 0xfd11e4b6,
+ 0x07f105be,
+ 0x03f0c500,
+ 0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+ 0xe7f104bd,
+ 0xe3f0c500,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f30b,
+ 0x05e9fd00,
+ 0xc80007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+ 0xb804c0b6,
+ 0x1bf404cd,
+ 0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+ 0xf11f1bf4,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x1fb4f000,
+ 0xf410b4b0,
+ 0xa7f0f01b,
+ 0xd021f402,
+/* 0x0223: mmctx_stop */
+ 0xc82b0ef4,
0xb4b600ab,
0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
-/* 0x01ea: mmctx_stop */
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
- 0x008bcf00,
- 0xf412bbc8,
-/* 0x0202: mmctx_done */
- 0x94bdfa1b,
- 0xf10199f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0215: strand_wait */
- 0xf0a0f900,
- 0x21f402a7,
- 0xf8a0fcc9,
-/* 0x0221: strand_pre */
- 0xfc87f100,
- 0x0283f04a,
- 0xd00c97f0,
- 0x21f50089,
- 0x00f80215,
-/* 0x0234: strand_post */
- 0x4afc87f1,
- 0xf00283f0,
- 0x89d00d97,
- 0x1521f500,
-/* 0x0247: strand_set */
- 0xf100f802,
- 0xf04ffca7,
- 0xaba202a3,
- 0xc7f00500,
- 0x00acd00f,
- 0xd00bc7f0,
- 0x21f500bc,
- 0xaed00215,
- 0x0ac7f000,
- 0xf500bcd0,
- 0xf8021521,
-/* 0x0271: strand_ctx_init */
- 0xf094bd00,
- 0x07f10399,
- 0x03f00f00,
+ 0xf112b9f0,
+ 0xf0c50007,
+ 0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+ 0xf104bd00,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x12bbc800,
+/* 0x024b: mmctx_done */
+ 0xbdf31bf4,
+ 0x0199f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x025e: strand_wait */
+ 0xa0f900f8,
+ 0xf402a7f0,
+ 0xa0fcd021,
+/* 0x026a: strand_pre */
+ 0x97f000f8,
+ 0xfc07f10c,
+ 0x0203f04a,
+ 0xbd0009d0,
+ 0x5e21f504,
+/* 0x027f: strand_post */
+ 0xf000f802,
+ 0x07f10d97,
+ 0x03f04afc,
0x0009d002,
0x21f504bd,
- 0xe7f00221,
- 0x4721f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x1521f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x1521f500,
- 0x3421f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+ 0x00f8025e,
+/* 0x0294: strand_set */
+ 0xf10fc7f0,
+ 0xf04ffc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f10bc7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x07f104bd,
+ 0x03f04ffc,
+ 0x000ed002,
+ 0xc7f004bd,
+ 0xfc07f10a,
+ 0x0203f04a,
+ 0xbd000cd0,
+ 0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+ 0xbd00f802,
+ 0x0399f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0x026a21f5,
+ 0xf503e7f0,
+ 0xbd029421,
+ 0xfc07f1c4,
+ 0x0203f047,
+ 0xbd000cd0,
+ 0x01c7f004,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd000c,
+ 0x025e21f5,
+ 0xf1010c92,
+ 0xf046fc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f102c7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x21f504bd,
+ 0x21f5025e,
+ 0x87f1027f,
+ 0x83f04200,
+ 0x0097f102,
+ 0x0293f020,
+ 0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
0x8ed008fe,
0x408ed000,
0xb6808acf,
@@ -428,7 +458,7 @@ uint32_t nvc0_grhub_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
0x07f100f8,
0x03f00500,
0x000fd002,
@@ -436,82 +466,117 @@ uint32_t nvc0_grhub_code[] = {
0x0007f101,
0x0303f007,
0xbd000fd0,
-/* 0x031b: init */
+/* 0x039b: init */
0xbd00f804,
- 0x0004fe04,
- 0xf10007fe,
- 0xf0120017,
- 0x12d00227,
- 0xb117f100,
- 0x0010fe05,
- 0x040017f1,
- 0xf1c010d0,
- 0xb6040437,
- 0x27f10634,
- 0x32d02003,
- 0x0427f100,
- 0x0132d020,
+ 0x0007fe04,
+ 0x420017f1,
+ 0xcf0013f0,
+ 0x11e70011,
+ 0x14b60109,
+ 0x0014fe08,
+ 0xf10227f0,
+ 0xf0120007,
+ 0x02d00003,
+ 0xf104bd00,
+ 0xfe06c817,
+ 0x24bd0010,
+ 0x070007f1,
+ 0xd00003f0,
+ 0x04bd0002,
+ 0x200327f1,
+ 0x010007f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0x200427f1,
+ 0x010407f1,
+ 0xd00103f0,
+ 0x04bd0002,
0x200b27f1,
- 0xf10232d0,
- 0xd0200c27,
- 0x27f10732,
- 0x24b60c24,
- 0x0003b906,
- 0xf10023d0,
+ 0x010807f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0x200c27f1,
+ 0x011c07f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0xf1010392,
+ 0xf0090007,
+ 0x03d00303,
+ 0xf104bd00,
0xf0870427,
- 0x12d00023,
- 0x0012b700,
- 0x0427f001,
- 0xf40012d0,
- 0xe7f11031,
- 0xe3f09604,
- 0x6821f440,
- 0x8090f1c7,
- 0xf4f00301,
- 0x020f801f,
- 0xbb0117f0,
- 0x12b6041f,
- 0x0c27f101,
- 0x0624b604,
- 0xd00021d0,
- 0x17f14021,
- 0x0e980100,
- 0x010f9800,
- 0x014721f5,
- 0x070037f1,
- 0x950634b6,
- 0x34d00814,
- 0x4034d000,
- 0x130030b7,
- 0xb6001fbb,
- 0x3fd002f5,
- 0x0815b600,
- 0xb60110b6,
- 0x1fb90814,
- 0x7121f502,
- 0x001fbb02,
- 0xf1020398,
- 0xf0200047,
-/* 0x03f6: init_gpc */
- 0x4ea05043,
- 0x1fb90804,
- 0x8d21f402,
- 0x010c4ea0,
- 0x21f4f4bd,
- 0x044ea08d,
- 0x8d21f401,
- 0x01004ea0,
- 0xf402f7f0,
- 0x4ea08d21,
-/* 0x041e: init_gpc_wait */
- 0x21f40800,
- 0x1fffc868,
- 0xa0fa0bf4,
- 0xf408044e,
- 0x1fbb6821,
- 0x0040b700,
- 0x0132b680,
- 0xf1be1bf4,
+ 0x07f10023,
+ 0x03f00400,
+ 0x0002d000,
+ 0x27f004bd,
+ 0x0007f104,
+ 0x0003f003,
+ 0xbd0002d0,
+ 0x1031f404,
+ 0x9604e7f1,
+ 0xf440e3f0,
+ 0xfeb96821,
+ 0x90f1c702,
+ 0xf0030180,
+ 0x0f801ff4,
+ 0x0117f002,
+ 0xb6041fbb,
+ 0x07f10112,
+ 0x03f00300,
+ 0x0001d001,
+ 0x07f104bd,
+ 0x03f00400,
+ 0x0001d001,
+ 0x17f104bd,
+ 0xf7f00100,
+ 0xb521f502,
+ 0xc721f507,
+ 0x10f7f007,
+ 0x081421f5,
+ 0x98000e98,
+ 0x21f5010f,
+ 0x14950150,
+ 0x0007f108,
+ 0x0103f0c0,
+ 0xbd0004d0,
+ 0x0007f104,
+ 0x0103f0c1,
+ 0xbd0004d0,
+ 0x0030b704,
+ 0x001fbb13,
+ 0xf102f5b6,
+ 0xf0d30007,
+ 0x0fd00103,
+ 0xb604bd00,
+ 0x10b60815,
+ 0x0814b601,
+ 0xf5021fb9,
+ 0xbb02d321,
+ 0x0398001f,
+ 0x0047f102,
+ 0x5043f020,
+/* 0x04f4: init_gpc */
+ 0x08044ea0,
+ 0xf4021fb9,
+ 0x4ea09d21,
+ 0xf4bd010c,
+ 0xa09d21f4,
+ 0xf401044e,
+ 0x4ea09d21,
+ 0xf7f00100,
+ 0x9d21f402,
+ 0x08004ea0,
+/* 0x051c: init_gpc_wait */
+ 0xc86821f4,
+ 0x0bf41fff,
+ 0x044ea0fa,
+ 0x6821f408,
+ 0xb7001fbb,
+ 0xb6800040,
+ 0x1bf40132,
+ 0x00f7f0be,
+ 0x081421f5,
+ 0xf500f7f0,
+ 0xf107b521,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -519,402 +584,399 @@ uint32_t nvc0_grhub_code[] = {
0x080007f1,
0xd00203f0,
0x04bd0001,
-/* 0x0458: main */
+/* 0x0564: main */
0xf40031f4,
0xd7f00028,
0x3921f410,
0xb1f401f4,
0xf54001e4,
- 0xbd00de1b,
+ 0xbd00e91b,
0x0499f094,
0x0f0007f1,
0xd00203f0,
0x04bd0009,
- 0x0b0017f1,
- 0xcf0614b6,
- 0x11cf4012,
- 0x1f13c800,
- 0x00870bf5,
- 0xf41f23c8,
- 0x20f9620b,
- 0xbd0212b9,
- 0x0799f094,
- 0x0f0007f1,
- 0xd00203f0,
- 0x04bd0009,
- 0xf40132f4,
- 0x21f50231,
- 0x94bd082f,
+ 0xc00017f1,
+ 0xcf0213f0,
+ 0x27f10011,
+ 0x23f0c100,
+ 0x0022cf02,
+ 0xf51f13c8,
+ 0xc800890b,
+ 0x0bf41f23,
+ 0xb920f962,
+ 0x94bd0212,
0xf10799f0,
- 0xf0170007,
+ 0xf00f0007,
0x09d00203,
- 0xfc04bd00,
- 0xf094bd20,
- 0x07f10699,
- 0x03f00f00,
- 0x0009d002,
- 0x31f404bd,
- 0x2f21f501,
- 0xf094bd08,
- 0x07f10699,
+ 0xf404bd00,
+ 0x31f40132,
+ 0xe821f502,
+ 0xf094bd09,
+ 0x07f10799,
0x03f01700,
0x0009d002,
- 0x0ef404bd,
-/* 0x04f9: chsw_prev_no_next */
- 0xb920f931,
- 0x32f40212,
- 0x0232f401,
- 0x082f21f5,
- 0x17f120fc,
- 0x14b60b00,
- 0x0012d006,
-/* 0x0517: chsw_no_prev */
- 0xc8130ef4,
- 0x0bf41f23,
- 0x0131f40d,
- 0xf50232f4,
-/* 0x0527: chsw_done */
- 0xf1082f21,
- 0xb60b0c17,
- 0x27f00614,
- 0x0012d001,
+ 0x20fc04bd,
0x99f094bd,
- 0x0007f104,
+ 0x0007f106,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0131f404,
+ 0x09e821f5,
+ 0x99f094bd,
+ 0x0007f106,
0x0203f017,
0xbd0009d0,
- 0x130ef504,
-/* 0x0549: main_not_ctx_switch */
- 0x01e4b0ff,
- 0xb90d1bf4,
- 0x21f502f2,
- 0x0ef407bb,
-/* 0x0559: main_not_ctx_chan */
- 0x02e4b046,
- 0xbd321bf4,
- 0x0799f094,
- 0x0f0007f1,
+ 0x330ef404,
+/* 0x060c: chsw_prev_no_next */
+ 0x12b920f9,
+ 0x0132f402,
+ 0xf50232f4,
+ 0xfc09e821,
+ 0x0007f120,
+ 0x0203f0c0,
+ 0xbd0002d0,
+ 0x130ef404,
+/* 0x062c: chsw_no_prev */
+ 0xf41f23c8,
+ 0x31f40d0b,
+ 0x0232f401,
+ 0x09e821f5,
+/* 0x063c: chsw_done */
+ 0xf10127f0,
+ 0xf0c30007,
+ 0x02d00203,
+ 0xbd04bd00,
+ 0x0499f094,
+ 0x170007f1,
0xd00203f0,
0x04bd0009,
- 0xf40132f4,
- 0x21f50232,
- 0x94bd082f,
+ 0xff080ef5,
+/* 0x0660: main_not_ctx_switch */
+ 0xf401e4b0,
+ 0xf2b90d1b,
+ 0x7821f502,
+ 0x460ef409,
+/* 0x0670: main_not_ctx_chan */
+ 0xf402e4b0,
+ 0x94bd321b,
0xf10799f0,
- 0xf0170007,
+ 0xf00f0007,
0x09d00203,
0xf404bd00,
-/* 0x058e: main_not_ctx_save */
- 0xef94110e,
- 0x01f5f010,
- 0x02fe21f5,
- 0xfec00ef5,
-/* 0x059c: main_done */
- 0x29f024bd,
- 0x0007f11f,
- 0x0203f008,
- 0xbd0002d0,
- 0xab0ef504,
-/* 0x05b1: ih */
- 0xfe80f9fe,
- 0x80f90188,
- 0xa0f990f9,
- 0xd0f9b0f9,
- 0xf0f9e0f9,
- 0x0acf04bd,
- 0x04abc480,
- 0xf11d0bf4,
- 0xf01900b7,
- 0xbecf10d7,
- 0x00bfcf40,
+ 0x32f40132,
+ 0xe821f502,
+ 0xf094bd09,
+ 0x07f10799,
+ 0x03f01700,
+ 0x0009d002,
+ 0x0ef404bd,
+/* 0x06a5: main_not_ctx_save */
+ 0x10ef9411,
+ 0xf501f5f0,
+ 0xf5037e21,
+/* 0x06b3: main_done */
+ 0xbdfeb50e,
+ 0x1f29f024,
+ 0x080007f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xfea00ef5,
+/* 0x06c8: ih */
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x00a7f104,
+ 0x00a3f002,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x10d7f030,
+ 0x1a00e7f1,
+ 0xcf00e3f0,
+ 0xf7f100ee,
+ 0xf3f01900,
+ 0x00ffcf00,
0xb70421f4,
0xf00400b0,
- 0xbed001e7,
-/* 0x05e9: ih_no_fifo */
- 0x00abe400,
- 0x0d0bf401,
- 0xf110d7f0,
- 0xf44001e7,
-/* 0x05fa: ih_no_ctxsw */
- 0xb7f10421,
- 0xb0bd0104,
- 0xf4b4abff,
- 0xa7f10d0b,
- 0xa4b60c1c,
- 0x00abd006,
-/* 0x0610: ih_no_other */
- 0xfc400ad0,
+ 0x07f101e7,
+ 0x03f01d00,
+ 0x000ed000,
+/* 0x071a: ih_no_fifo */
+ 0xabe404bd,
+ 0x0bf40100,
+ 0x10d7f00d,
+ 0x4001e7f1,
+/* 0x072b: ih_no_ctxsw */
+ 0xe40421f4,
+ 0xf40400ab,
+ 0xb7f1140b,
+ 0xbfb90100,
+ 0x44e7f102,
+ 0x40e3f001,
+/* 0x0743: ih_no_fwmthd */
+ 0xf19d21f4,
+ 0xbd0104b7,
+ 0xb4abffb0,
+ 0xf10f0bf4,
+ 0xf0070007,
+ 0x0bd00303,
+/* 0x075b: ih_no_other */
+ 0xf104bd00,
+ 0xf0010007,
+ 0x0ad00003,
+ 0xfc04bd00,
0xfce0fcf0,
0xfcb0fcd0,
0xfc90fca0,
0x0088fe80,
0x32f480fc,
-/* 0x062b: ctx_4160s */
- 0xf101f800,
- 0xf04160e7,
- 0xf7f040e3,
- 0x8d21f401,
-/* 0x0638: ctx_4160s_wait */
- 0xc86821f4,
- 0x0bf404ff,
-/* 0x0643: ctx_4160c */
- 0xf100f8fa,
+/* 0x077f: ctx_4160s */
+ 0xf001f800,
+ 0xffb901f7,
+ 0x60e7f102,
+ 0x40e3f041,
+/* 0x078f: ctx_4160s_wait */
+ 0xf19d21f4,
0xf04160e7,
- 0xf4bd40e3,
- 0xf88d21f4,
-/* 0x0651: ctx_4170s */
- 0x70e7f100,
+ 0x21f440e3,
+ 0x02ffb968,
+ 0xf404ffc8,
+ 0x00f8f00b,
+/* 0x07a4: ctx_4160c */
+ 0xffb9f4bd,
+ 0x60e7f102,
0x40e3f041,
- 0xf410f5f0,
- 0x00f88d21,
-/* 0x0660: ctx_4170w */
- 0x4170e7f1,
- 0xf440e3f0,
- 0xf4f06821,
- 0xf31bf410,
-/* 0x0672: ctx_redswitch */
- 0xe7f100f8,
- 0xe4b60614,
- 0x70f7f106,
- 0x00efd002,
-/* 0x0683: ctx_redswitch_delay */
- 0xb608f7f0,
- 0x1bf401f2,
- 0x70f7f1fd,
- 0x00efd007,
-/* 0x0692: ctx_86c */
- 0xe7f100f8,
- 0xe4b6086c,
- 0x00efd006,
- 0x8a14e7f1,
- 0xf440e3f0,
- 0xe7f18d21,
- 0xe3f0a86c,
- 0x8d21f441,
-/* 0x06b2: ctx_load */
+ 0xf89d21f4,
+/* 0x07b5: ctx_4170s */
+ 0x10f5f000,
+ 0xf102ffb9,
+ 0xf04170e7,
+ 0x21f440e3,
+/* 0x07c7: ctx_4170w */
+ 0xf100f89d,
+ 0xf04170e7,
+ 0x21f440e3,
+ 0x02ffb968,
+ 0xf410f4f0,
+ 0x00f8f01b,
+/* 0x07dc: ctx_redswitch */
+ 0x0200e7f1,
+ 0xf040e5f0,
+ 0xe5f020e5,
+ 0x0007f110,
+ 0x0103f085,
+ 0xbd000ed0,
+ 0x08f7f004,
+/* 0x07f8: ctx_redswitch_delay */
+ 0xf401f2b6,
+ 0xe5f1fd1b,
+ 0xe5f10400,
+ 0x07f10100,
+ 0x03f08500,
+ 0x000ed001,
+ 0x00f804bd,
+/* 0x0814: ctx_86c */
+ 0x1b0007f1,
+ 0xd00203f0,
+ 0x04bd000f,
+ 0xf102ffb9,
+ 0xf08a14e7,
+ 0x21f440e3,
+ 0x02ffb99d,
+ 0xa86ce7f1,
+ 0xf441e3f0,
+ 0x00f89d21,
+/* 0x083c: ctx_mem */
+ 0x840007f1,
+ 0xd00203f0,
+ 0x04bd000f,
+/* 0x0848: ctx_mem_wait */
+ 0x8400f7f1,
+ 0xcf02f3f0,
+ 0xfffd00ff,
+ 0xf31bf405,
+/* 0x085a: ctx_load */
0x94bd00f8,
0xf10599f0,
0xf00f0007,
0x09d00203,
0xf004bd00,
0x21f40ca7,
- 0x2417f1c9,
- 0x0614b60a,
- 0xf10010d0,
- 0xb60b0037,
- 0x32d00634,
- 0x0c17f140,
- 0x0614b60a,
- 0xd00747f0,
- 0x14d00012,
-/* 0x06ed: ctx_chan_wait_0 */
- 0x4014cf40,
- 0xf41f44f0,
- 0x32d0fa1b,
- 0x000bfe00,
- 0xb61f2af0,
- 0x20b60424,
- 0xf094bd02,
+ 0xf1f4bdd0,
+ 0xf0890007,
+ 0x0fd00203,
+ 0xf104bd00,
+ 0xf0c10007,
+ 0x02d00203,
+ 0xf104bd00,
+ 0xf0830007,
+ 0x02d00203,
+ 0xf004bd00,
+ 0x21f507f7,
+ 0x07f1083c,
+ 0x03f0c000,
+ 0x0002d002,
+ 0x0bfe04bd,
+ 0x1f2af000,
+ 0xb60424b6,
+ 0x94bd0220,
+ 0xf10899f0,
+ 0xf00f0007,
+ 0x09d00203,
+ 0xf104bd00,
+ 0xf0810007,
+ 0x02d00203,
+ 0xf104bd00,
+ 0xf1000027,
+ 0xf0800023,
+ 0x07f10225,
+ 0x03f08800,
+ 0x0002d002,
+ 0x17f004bd,
+ 0x0027f110,
+ 0x0223f002,
+ 0xf80512fa,
+ 0xf094bd03,
0x07f10899,
- 0x03f00f00,
+ 0x03f01700,
0x0009d002,
- 0x17f104bd,
- 0x14b60a04,
- 0x0012d006,
- 0x0a2017f1,
- 0xf00614b6,
- 0x23f10227,
- 0x12d08000,
- 0x1017f000,
- 0x020027f1,
- 0xfa0223f0,
- 0x03f80512,
+ 0x019804bd,
+ 0x1814b681,
+ 0xb6800298,
+ 0x12fd0825,
+ 0x16018005,
0x99f094bd,
- 0x0007f108,
- 0x0203f017,
+ 0x0007f109,
+ 0x0203f00f,
0xbd0009d0,
- 0x81019804,
- 0x981814b6,
- 0x25b68002,
- 0x0512fd08,
- 0xbd160180,
- 0x0999f094,
- 0x0f0007f1,
- 0xd00203f0,
- 0x04bd0009,
- 0x0a0427f1,
- 0xd00624b6,
- 0x27f00021,
- 0x2017f101,
- 0x0614b60a,
- 0xf10012d0,
- 0xf0010017,
- 0x01fa0613,
- 0xbd03f805,
- 0x0999f094,
- 0x170007f1,
+ 0x0007f104,
+ 0x0203f081,
+ 0xbd0001d0,
+ 0x0127f004,
+ 0x880007f1,
0xd00203f0,
- 0x04bd0009,
+ 0x04bd0002,
+ 0x010017f1,
+ 0xfa0613f0,
+ 0x03f80501,
0x99f094bd,
- 0x0007f105,
+ 0x0007f109,
0x0203f017,
0xbd0009d0,
-/* 0x07bb: ctx_chan */
- 0xf500f804,
- 0xf5062b21,
- 0xf006b221,
- 0x21f40ca7,
- 0x1017f1c9,
- 0x0614b60a,
- 0xd00527f0,
-/* 0x07d6: ctx_chan_wait */
- 0x12cf0012,
- 0x0522fd00,
- 0xf5fa1bf4,
- 0xf8064321,
-/* 0x07e5: ctx_mmio_exec */
- 0x41039800,
- 0x0a0427f1,
- 0xd00624b6,
- 0x34bd0023,
-/* 0x07f4: ctx_mmio_loop */
+ 0xf094bd04,
+ 0x07f10599,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0978: ctx_chan */
+ 0x077f21f5,
+ 0x085a21f5,
+ 0xf40ca7f0,
+ 0xf7f0d021,
+ 0x3c21f505,
+ 0xa421f508,
+/* 0x0993: ctx_mmio_exec */
+ 0x9800f807,
+ 0x07f14103,
+ 0x03f08100,
+ 0x0003d002,
+ 0x34bd04bd,
+/* 0x09a4: ctx_mmio_loop */
0xf4ff34c4,
0x57f10f1b,
0x53f00200,
0x0535fa06,
-/* 0x0806: ctx_mmio_pull */
+/* 0x09b6: ctx_mmio_pull */
0x4e9803f8,
0x814f9880,
- 0xb68d21f4,
+ 0xb69d21f4,
0x12b60830,
0xdf1bf401,
-/* 0x0818: ctx_mmio_done */
- 0xd0160398,
- 0x00800023,
- 0x0017f140,
- 0x0613f001,
- 0xf80601fa,
-/* 0x082f: ctx_xfer */
- 0xf100f803,
- 0xb60c00f7,
- 0xe7f006f4,
- 0x80fed004,
-/* 0x083c: ctx_xfer_idle */
- 0xf100fecf,
- 0xf42000e4,
- 0x11f4f91b,
- 0x1102f406,
-/* 0x084c: ctx_xfer_pre */
- 0xf510f7f0,
- 0xf5069221,
- 0xf4062b21,
-/* 0x085a: ctx_xfer_pre_load */
- 0xf7f01c11,
- 0x5121f502,
- 0x6021f506,
- 0x7221f506,
- 0xf5f4bd06,
- 0xf5065121,
-/* 0x0873: ctx_xfer_exec */
- 0x9806b221,
- 0x27f11601,
- 0x24b60414,
- 0x0020d006,
- 0xa500e7f1,
- 0xb941e3f0,
- 0x21f4021f,
- 0x04e0b68d,
- 0xf001fcf0,
- 0x24b6022c,
- 0x05f2fd01,
- 0xf18d21f4,
- 0xf04afc17,
- 0x27f00213,
- 0x0012d00c,
- 0x021521f5,
- 0x47fc27f1,
- 0xd00223f0,
- 0x2cf00020,
+/* 0x09c8: ctx_mmio_done */
+ 0xf1160398,
+ 0xf0810007,
+ 0x03d00203,
+ 0x8004bd00,
+ 0x17f14000,
+ 0x13f00100,
+ 0x0601fa06,
+ 0x00f803f8,
+/* 0x09e8: ctx_xfer */
+ 0xf104e7f0,
+ 0xf0020007,
+ 0x0ed00303,
+/* 0x09f7: ctx_xfer_idle */
+ 0xf104bd00,
+ 0xf00000e7,
+ 0xeecf03e3,
+ 0x00e4f100,
+ 0xf21bf420,
+ 0xf40611f4,
+/* 0x0a0e: ctx_xfer_pre */
+ 0xf7f01102,
+ 0x1421f510,
+ 0x7f21f508,
+ 0x1c11f407,
+/* 0x0a1c: ctx_xfer_pre_load */
+ 0xf502f7f0,
+ 0xf507b521,
+ 0xf507c721,
+ 0xbd07dc21,
+ 0xb521f5f4,
+ 0x5a21f507,
+/* 0x0a35: ctx_xfer_exec */
+ 0x16019808,
+ 0x07f124bd,
+ 0x03f00500,
+ 0x0002d001,
+ 0x1fb904bd,
+ 0x00e7f102,
+ 0x41e3f0a5,
+ 0xf09d21f4,
+ 0x2cf001fc,
+ 0x0124b602,
+ 0xb905f2fd,
+ 0xe7f102ff,
+ 0xe3f0a504,
+ 0x9d21f441,
+ 0x026a21f5,
+ 0x07f124bd,
+ 0x03f047fc,
+ 0x0002d002,
+ 0x2cf004bd,
0x0320b601,
- 0xf00012d0,
- 0xa5f001ac,
- 0x00b7f006,
- 0x98000c98,
- 0xe7f0010d,
- 0x6621f500,
- 0x08a7f001,
- 0x010921f5,
- 0x021521f5,
- 0xf02201f4,
- 0x21f40ca7,
- 0x1017f1c9,
- 0x0614b60a,
- 0xd00527f0,
-/* 0x08fa: ctx_xfer_post_save_wait */
- 0x12cf0012,
- 0x0522fd00,
- 0xf4fa1bf4,
-/* 0x0906: ctx_xfer_post */
- 0xf7f03202,
- 0x5121f502,
- 0xf5f4bd06,
- 0xf5069221,
- 0xf5023421,
- 0xbd066021,
- 0x5121f5f4,
- 0x1011f406,
- 0xfd400198,
- 0x0bf40511,
- 0xe521f507,
-/* 0x0931: ctx_xfer_no_post_mmio */
- 0x4321f507,
-/* 0x0935: ctx_xfer_done */
- 0x0000f806,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xf001acf0,
+ 0xb7f006a5,
+ 0x000c9800,
+ 0xf0010d98,
+ 0x21f500e7,
+ 0xa7f0016f,
+ 0x1021f508,
+ 0x5e21f501,
+ 0x1301f402,
+ 0xf40ca7f0,
+ 0xf7f0d021,
+ 0x3c21f505,
+ 0x3202f408,
+/* 0x0ac4: ctx_xfer_post */
+ 0xf502f7f0,
+ 0xbd07b521,
+ 0x1421f5f4,
+ 0x7f21f508,
+ 0xc721f502,
+ 0xf5f4bd07,
+ 0xf407b521,
+ 0x01981011,
+ 0x0511fd40,
+ 0xf5070bf4,
+/* 0x0aef: ctx_xfer_no_post_mmio */
+ 0xf5099321,
+/* 0x0af3: ctx_xfer_done */
+ 0xf807a421,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
index a1b9f763996a..84af82418987 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
@@ -206,14 +206,14 @@ uint32_t nvd7_grhub_data[] = {
};
uint32_t nvd7_grhub_code[] = {
- 0x031b0ef5,
+ 0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
- 0x00f802fe,
+ 0x00f8037e,
/* 0x001c: queue_put_next */
0xb60798c4,
0x8dbb0384,
@@ -237,184 +237,214 @@ uint32_t nvd7_grhub_code[] = {
/* 0x0066: queue_get_done */
0x00f80132,
/* 0x0068: nv_rd32 */
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010921f5,
- 0xf840bfcf,
-/* 0x008d: nv_wr32 */
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
-/* 0x00bd: watchdog_clear */
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
-/* 0x00c9: wait_donez */
- 0xf094bd00,
- 0x07f10099,
- 0x03f00f00,
- 0x0009d002,
- 0x07f104bd,
- 0x03f00600,
- 0x000ad002,
-/* 0x00e6: wait_donez_ne */
- 0x87f104bd,
- 0x83f00000,
- 0x0088cf01,
- 0xf4888aff,
- 0x94bdf31b,
- 0xf10099f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0109: wait_doneo */
- 0xf094bd00,
+ 0xf002ecb9,
+ 0x07f11fc9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x007a: nv_rd32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0xa7f0f31b,
+ 0x1021f506,
+ 0x00f7f101,
+ 0x01f3f0cb,
+ 0xf800ffcf,
+/* 0x009d: nv_wr32 */
+ 0x0007f100,
+ 0x0103f0cc,
+ 0xbd000fd0,
+ 0x02ecb904,
+ 0xf01fc9f0,
+ 0x07f11ec9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x00be: nv_wr32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f31b,
+/* 0x00d0: wait_donez */
+ 0x99f094bd,
+ 0x0007f100,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x1bf4888a,
+ 0xf094bdf3,
0x07f10099,
- 0x03f00f00,
+ 0x03f01700,
0x0009d002,
- 0x87f104bd,
- 0x84b60818,
- 0x008ad006,
-/* 0x0124: wait_doneo_e */
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
+ 0x00f804bd,
+/* 0x0110: wait_doneo */
0x99f094bd,
0x0007f100,
- 0x0203f017,
+ 0x0203f00f,
0xbd0009d0,
-/* 0x0147: mmctx_size */
- 0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
- 0x00e89894,
- 0xb61a85b6,
- 0x84b60180,
- 0x0098bb02,
- 0xb804e0b6,
- 0x1bf404ef,
- 0x029fb9eb,
-/* 0x0166: mmctx_xfer */
- 0x94bd00f8,
- 0xf10199f0,
- 0xf00f0007,
- 0x09d00203,
- 0xf104bd00,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
-/* 0x018c: mmctx_base_disabled */
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
- 0xb70199f0,
- 0xc8010080,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x0bf4888a,
+ 0xf094bdf3,
+ 0x07f10099,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0xf404efb8,
+ 0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+ 0xbd00f802,
+ 0x0199f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0xbbfd94bd,
+ 0x120bf405,
+ 0xc40007f1,
+ 0xd00103f0,
+ 0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0x0007f11e,
+ 0x0103f0c6,
+ 0xbd000ed0,
+ 0x0007f104,
+ 0x0103f0c7,
+ 0xbd000fd0,
+ 0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x01aec80c,
+ 0xfd11e4b6,
+ 0x07f105be,
+ 0x03f0c500,
+ 0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+ 0xe7f104bd,
+ 0xe3f0c500,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f30b,
+ 0x05e9fd00,
+ 0xc80007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+ 0xb804c0b6,
+ 0x1bf404cd,
+ 0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+ 0xf11f1bf4,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x1fb4f000,
+ 0xf410b4b0,
+ 0xa7f0f01b,
+ 0xd021f402,
+/* 0x0223: mmctx_stop */
+ 0xc82b0ef4,
0xb4b600ab,
0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
-/* 0x01ea: mmctx_stop */
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
- 0x008bcf00,
- 0xf412bbc8,
-/* 0x0202: mmctx_done */
- 0x94bdfa1b,
- 0xf10199f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0215: strand_wait */
- 0xf0a0f900,
- 0x21f402a7,
- 0xf8a0fcc9,
-/* 0x0221: strand_pre */
- 0xfc87f100,
- 0x0283f04a,
- 0xd00c97f0,
- 0x21f50089,
- 0x00f80215,
-/* 0x0234: strand_post */
- 0x4afc87f1,
- 0xf00283f0,
- 0x89d00d97,
- 0x1521f500,
-/* 0x0247: strand_set */
- 0xf100f802,
- 0xf04ffca7,
- 0xaba202a3,
- 0xc7f00500,
- 0x00acd00f,
- 0xd00bc7f0,
- 0x21f500bc,
- 0xaed00215,
- 0x0ac7f000,
- 0xf500bcd0,
- 0xf8021521,
-/* 0x0271: strand_ctx_init */
- 0xf094bd00,
- 0x07f10399,
- 0x03f00f00,
+ 0xf112b9f0,
+ 0xf0c50007,
+ 0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+ 0xf104bd00,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x12bbc800,
+/* 0x024b: mmctx_done */
+ 0xbdf31bf4,
+ 0x0199f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x025e: strand_wait */
+ 0xa0f900f8,
+ 0xf402a7f0,
+ 0xa0fcd021,
+/* 0x026a: strand_pre */
+ 0x97f000f8,
+ 0xfc07f10c,
+ 0x0203f04a,
+ 0xbd0009d0,
+ 0x5e21f504,
+/* 0x027f: strand_post */
+ 0xf000f802,
+ 0x07f10d97,
+ 0x03f04afc,
0x0009d002,
0x21f504bd,
- 0xe7f00221,
- 0x4721f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x1521f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x1521f500,
- 0x3421f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+ 0x00f8025e,
+/* 0x0294: strand_set */
+ 0xf10fc7f0,
+ 0xf04ffc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f10bc7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x07f104bd,
+ 0x03f04ffc,
+ 0x000ed002,
+ 0xc7f004bd,
+ 0xfc07f10a,
+ 0x0203f04a,
+ 0xbd000cd0,
+ 0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+ 0xbd00f802,
+ 0x0399f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0x026a21f5,
+ 0xf503e7f0,
+ 0xbd029421,
+ 0xfc07f1c4,
+ 0x0203f047,
+ 0xbd000cd0,
+ 0x01c7f004,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd000c,
+ 0x025e21f5,
+ 0xf1010c92,
+ 0xf046fc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f102c7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x21f504bd,
+ 0x21f5025e,
+ 0x87f1027f,
+ 0x83f04200,
+ 0x0097f102,
+ 0x0293f020,
+ 0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
0x8ed008fe,
0x408ed000,
0xb6808acf,
@@ -428,7 +458,7 @@ uint32_t nvd7_grhub_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
0x07f100f8,
0x03f00500,
0x000fd002,
@@ -436,82 +466,117 @@ uint32_t nvd7_grhub_code[] = {
0x0007f101,
0x0303f007,
0xbd000fd0,
-/* 0x031b: init */
+/* 0x039b: init */
0xbd00f804,
- 0x0004fe04,
- 0xf10007fe,
- 0xf0120017,
- 0x12d00227,
- 0xb117f100,
- 0x0010fe05,
- 0x040017f1,
- 0xf1c010d0,
- 0xb6040437,
- 0x27f10634,
- 0x32d02003,
- 0x0427f100,
- 0x0132d020,
+ 0x0007fe04,
+ 0x420017f1,
+ 0xcf0013f0,
+ 0x11e70011,
+ 0x14b60109,
+ 0x0014fe08,
+ 0xf10227f0,
+ 0xf0120007,
+ 0x02d00003,
+ 0xf104bd00,
+ 0xfe06c817,
+ 0x24bd0010,
+ 0x070007f1,
+ 0xd00003f0,
+ 0x04bd0002,
+ 0x200327f1,
+ 0x010007f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0x200427f1,
+ 0x010407f1,
+ 0xd00103f0,
+ 0x04bd0002,
0x200b27f1,
- 0xf10232d0,
- 0xd0200c27,
- 0x27f10732,
- 0x24b60c24,
- 0x0003b906,
- 0xf10023d0,
+ 0x010807f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0x200c27f1,
+ 0x011c07f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0xf1010392,
+ 0xf0090007,
+ 0x03d00303,
+ 0xf104bd00,
0xf0870427,
- 0x12d00023,
- 0x0012b700,
- 0x0427f001,
- 0xf40012d0,
- 0xe7f11031,
- 0xe3f09604,
- 0x6821f440,
- 0x8090f1c7,
- 0xf4f00301,
- 0x020f801f,
- 0xbb0117f0,
- 0x12b6041f,
- 0x0c27f101,
- 0x0624b604,
- 0xd00021d0,
- 0x17f14021,
- 0x0e980100,
- 0x010f9800,
- 0x014721f5,
- 0x070037f1,
- 0x950634b6,
- 0x34d00814,
- 0x4034d000,
- 0x130030b7,
- 0xb6001fbb,
- 0x3fd002f5,
- 0x0815b600,
- 0xb60110b6,
- 0x1fb90814,
- 0x7121f502,
- 0x001fbb02,
- 0xf1020398,
- 0xf0200047,
-/* 0x03f6: init_gpc */
- 0x4ea05043,
- 0x1fb90804,
- 0x8d21f402,
- 0x010c4ea0,
- 0x21f4f4bd,
- 0x044ea08d,
- 0x8d21f401,
- 0x01004ea0,
- 0xf402f7f0,
- 0x4ea08d21,
-/* 0x041e: init_gpc_wait */
- 0x21f40800,
- 0x1fffc868,
- 0xa0fa0bf4,
- 0xf408044e,
- 0x1fbb6821,
- 0x0040b700,
- 0x0132b680,
- 0xf1be1bf4,
+ 0x07f10023,
+ 0x03f00400,
+ 0x0002d000,
+ 0x27f004bd,
+ 0x0007f104,
+ 0x0003f003,
+ 0xbd0002d0,
+ 0x1031f404,
+ 0x9604e7f1,
+ 0xf440e3f0,
+ 0xfeb96821,
+ 0x90f1c702,
+ 0xf0030180,
+ 0x0f801ff4,
+ 0x0117f002,
+ 0xb6041fbb,
+ 0x07f10112,
+ 0x03f00300,
+ 0x0001d001,
+ 0x07f104bd,
+ 0x03f00400,
+ 0x0001d001,
+ 0x17f104bd,
+ 0xf7f00100,
+ 0xb521f502,
+ 0xc721f507,
+ 0x10f7f007,
+ 0x081421f5,
+ 0x98000e98,
+ 0x21f5010f,
+ 0x14950150,
+ 0x0007f108,
+ 0x0103f0c0,
+ 0xbd0004d0,
+ 0x0007f104,
+ 0x0103f0c1,
+ 0xbd0004d0,
+ 0x0030b704,
+ 0x001fbb13,
+ 0xf102f5b6,
+ 0xf0d30007,
+ 0x0fd00103,
+ 0xb604bd00,
+ 0x10b60815,
+ 0x0814b601,
+ 0xf5021fb9,
+ 0xbb02d321,
+ 0x0398001f,
+ 0x0047f102,
+ 0x5043f020,
+/* 0x04f4: init_gpc */
+ 0x08044ea0,
+ 0xf4021fb9,
+ 0x4ea09d21,
+ 0xf4bd010c,
+ 0xa09d21f4,
+ 0xf401044e,
+ 0x4ea09d21,
+ 0xf7f00100,
+ 0x9d21f402,
+ 0x08004ea0,
+/* 0x051c: init_gpc_wait */
+ 0xc86821f4,
+ 0x0bf41fff,
+ 0x044ea0fa,
+ 0x6821f408,
+ 0xb7001fbb,
+ 0xb6800040,
+ 0x1bf40132,
+ 0x00f7f0be,
+ 0x081421f5,
+ 0xf500f7f0,
+ 0xf107b521,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -519,402 +584,399 @@ uint32_t nvd7_grhub_code[] = {
0x080007f1,
0xd00203f0,
0x04bd0001,
-/* 0x0458: main */
+/* 0x0564: main */
0xf40031f4,
0xd7f00028,
0x3921f410,
0xb1f401f4,
0xf54001e4,
- 0xbd00de1b,
+ 0xbd00e91b,
0x0499f094,
0x0f0007f1,
0xd00203f0,
0x04bd0009,
- 0x0b0017f1,
- 0xcf0614b6,
- 0x11cf4012,
- 0x1f13c800,
- 0x00870bf5,
- 0xf41f23c8,
- 0x20f9620b,
- 0xbd0212b9,
- 0x0799f094,
- 0x0f0007f1,
- 0xd00203f0,
- 0x04bd0009,
- 0xf40132f4,
- 0x21f50231,
- 0x94bd082f,
+ 0xc00017f1,
+ 0xcf0213f0,
+ 0x27f10011,
+ 0x23f0c100,
+ 0x0022cf02,
+ 0xf51f13c8,
+ 0xc800890b,
+ 0x0bf41f23,
+ 0xb920f962,
+ 0x94bd0212,
0xf10799f0,
- 0xf0170007,
+ 0xf00f0007,
0x09d00203,
- 0xfc04bd00,
- 0xf094bd20,
- 0x07f10699,
- 0x03f00f00,
- 0x0009d002,
- 0x31f404bd,
- 0x2f21f501,
- 0xf094bd08,
- 0x07f10699,
+ 0xf404bd00,
+ 0x31f40132,
+ 0xe821f502,
+ 0xf094bd09,
+ 0x07f10799,
0x03f01700,
0x0009d002,
- 0x0ef404bd,
-/* 0x04f9: chsw_prev_no_next */
- 0xb920f931,
- 0x32f40212,
- 0x0232f401,
- 0x082f21f5,
- 0x17f120fc,
- 0x14b60b00,
- 0x0012d006,
-/* 0x0517: chsw_no_prev */
- 0xc8130ef4,
- 0x0bf41f23,
- 0x0131f40d,
- 0xf50232f4,
-/* 0x0527: chsw_done */
- 0xf1082f21,
- 0xb60b0c17,
- 0x27f00614,
- 0x0012d001,
+ 0x20fc04bd,
0x99f094bd,
- 0x0007f104,
+ 0x0007f106,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0131f404,
+ 0x09e821f5,
+ 0x99f094bd,
+ 0x0007f106,
0x0203f017,
0xbd0009d0,
- 0x130ef504,
-/* 0x0549: main_not_ctx_switch */
- 0x01e4b0ff,
- 0xb90d1bf4,
- 0x21f502f2,
- 0x0ef407bb,
-/* 0x0559: main_not_ctx_chan */
- 0x02e4b046,
- 0xbd321bf4,
- 0x0799f094,
- 0x0f0007f1,
+ 0x330ef404,
+/* 0x060c: chsw_prev_no_next */
+ 0x12b920f9,
+ 0x0132f402,
+ 0xf50232f4,
+ 0xfc09e821,
+ 0x0007f120,
+ 0x0203f0c0,
+ 0xbd0002d0,
+ 0x130ef404,
+/* 0x062c: chsw_no_prev */
+ 0xf41f23c8,
+ 0x31f40d0b,
+ 0x0232f401,
+ 0x09e821f5,
+/* 0x063c: chsw_done */
+ 0xf10127f0,
+ 0xf0c30007,
+ 0x02d00203,
+ 0xbd04bd00,
+ 0x0499f094,
+ 0x170007f1,
0xd00203f0,
0x04bd0009,
- 0xf40132f4,
- 0x21f50232,
- 0x94bd082f,
+ 0xff080ef5,
+/* 0x0660: main_not_ctx_switch */
+ 0xf401e4b0,
+ 0xf2b90d1b,
+ 0x7821f502,
+ 0x460ef409,
+/* 0x0670: main_not_ctx_chan */
+ 0xf402e4b0,
+ 0x94bd321b,
0xf10799f0,
- 0xf0170007,
+ 0xf00f0007,
0x09d00203,
0xf404bd00,
-/* 0x058e: main_not_ctx_save */
- 0xef94110e,
- 0x01f5f010,
- 0x02fe21f5,
- 0xfec00ef5,
-/* 0x059c: main_done */
- 0x29f024bd,
- 0x0007f11f,
- 0x0203f008,
- 0xbd0002d0,
- 0xab0ef504,
-/* 0x05b1: ih */
- 0xfe80f9fe,
- 0x80f90188,
- 0xa0f990f9,
- 0xd0f9b0f9,
- 0xf0f9e0f9,
- 0x0acf04bd,
- 0x04abc480,
- 0xf11d0bf4,
- 0xf01900b7,
- 0xbecf10d7,
- 0x00bfcf40,
+ 0x32f40132,
+ 0xe821f502,
+ 0xf094bd09,
+ 0x07f10799,
+ 0x03f01700,
+ 0x0009d002,
+ 0x0ef404bd,
+/* 0x06a5: main_not_ctx_save */
+ 0x10ef9411,
+ 0xf501f5f0,
+ 0xf5037e21,
+/* 0x06b3: main_done */
+ 0xbdfeb50e,
+ 0x1f29f024,
+ 0x080007f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xfea00ef5,
+/* 0x06c8: ih */
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x00a7f104,
+ 0x00a3f002,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x10d7f030,
+ 0x1a00e7f1,
+ 0xcf00e3f0,
+ 0xf7f100ee,
+ 0xf3f01900,
+ 0x00ffcf00,
0xb70421f4,
0xf00400b0,
- 0xbed001e7,
-/* 0x05e9: ih_no_fifo */
- 0x00abe400,
- 0x0d0bf401,
- 0xf110d7f0,
- 0xf44001e7,
-/* 0x05fa: ih_no_ctxsw */
- 0xb7f10421,
- 0xb0bd0104,
- 0xf4b4abff,
- 0xa7f10d0b,
- 0xa4b60c1c,
- 0x00abd006,
-/* 0x0610: ih_no_other */
- 0xfc400ad0,
+ 0x07f101e7,
+ 0x03f01d00,
+ 0x000ed000,
+/* 0x071a: ih_no_fifo */
+ 0xabe404bd,
+ 0x0bf40100,
+ 0x10d7f00d,
+ 0x4001e7f1,
+/* 0x072b: ih_no_ctxsw */
+ 0xe40421f4,
+ 0xf40400ab,
+ 0xb7f1140b,
+ 0xbfb90100,
+ 0x44e7f102,
+ 0x40e3f001,
+/* 0x0743: ih_no_fwmthd */
+ 0xf19d21f4,
+ 0xbd0104b7,
+ 0xb4abffb0,
+ 0xf10f0bf4,
+ 0xf0070007,
+ 0x0bd00303,
+/* 0x075b: ih_no_other */
+ 0xf104bd00,
+ 0xf0010007,
+ 0x0ad00003,
+ 0xfc04bd00,
0xfce0fcf0,
0xfcb0fcd0,
0xfc90fca0,
0x0088fe80,
0x32f480fc,
-/* 0x062b: ctx_4160s */
- 0xf101f800,
- 0xf04160e7,
- 0xf7f040e3,
- 0x8d21f401,
-/* 0x0638: ctx_4160s_wait */
- 0xc86821f4,
- 0x0bf404ff,
-/* 0x0643: ctx_4160c */
- 0xf100f8fa,
+/* 0x077f: ctx_4160s */
+ 0xf001f800,
+ 0xffb901f7,
+ 0x60e7f102,
+ 0x40e3f041,
+/* 0x078f: ctx_4160s_wait */
+ 0xf19d21f4,
0xf04160e7,
- 0xf4bd40e3,
- 0xf88d21f4,
-/* 0x0651: ctx_4170s */
- 0x70e7f100,
+ 0x21f440e3,
+ 0x02ffb968,
+ 0xf404ffc8,
+ 0x00f8f00b,
+/* 0x07a4: ctx_4160c */
+ 0xffb9f4bd,
+ 0x60e7f102,
0x40e3f041,
- 0xf410f5f0,
- 0x00f88d21,
-/* 0x0660: ctx_4170w */
- 0x4170e7f1,
- 0xf440e3f0,
- 0xf4f06821,
- 0xf31bf410,
-/* 0x0672: ctx_redswitch */
- 0xe7f100f8,
- 0xe4b60614,
- 0x70f7f106,
- 0x00efd002,
-/* 0x0683: ctx_redswitch_delay */
- 0xb608f7f0,
- 0x1bf401f2,
- 0x70f7f1fd,
- 0x00efd007,
-/* 0x0692: ctx_86c */
- 0xe7f100f8,
- 0xe4b6086c,
- 0x00efd006,
- 0x8a14e7f1,
- 0xf440e3f0,
- 0xe7f18d21,
- 0xe3f0a86c,
- 0x8d21f441,
-/* 0x06b2: ctx_load */
+ 0xf89d21f4,
+/* 0x07b5: ctx_4170s */
+ 0x10f5f000,
+ 0xf102ffb9,
+ 0xf04170e7,
+ 0x21f440e3,
+/* 0x07c7: ctx_4170w */
+ 0xf100f89d,
+ 0xf04170e7,
+ 0x21f440e3,
+ 0x02ffb968,
+ 0xf410f4f0,
+ 0x00f8f01b,
+/* 0x07dc: ctx_redswitch */
+ 0x0200e7f1,
+ 0xf040e5f0,
+ 0xe5f020e5,
+ 0x0007f110,
+ 0x0103f085,
+ 0xbd000ed0,
+ 0x08f7f004,
+/* 0x07f8: ctx_redswitch_delay */
+ 0xf401f2b6,
+ 0xe5f1fd1b,
+ 0xe5f10400,
+ 0x07f10100,
+ 0x03f08500,
+ 0x000ed001,
+ 0x00f804bd,
+/* 0x0814: ctx_86c */
+ 0x1b0007f1,
+ 0xd00203f0,
+ 0x04bd000f,
+ 0xf102ffb9,
+ 0xf08a14e7,
+ 0x21f440e3,
+ 0x02ffb99d,
+ 0xa86ce7f1,
+ 0xf441e3f0,
+ 0x00f89d21,
+/* 0x083c: ctx_mem */
+ 0x840007f1,
+ 0xd00203f0,
+ 0x04bd000f,
+/* 0x0848: ctx_mem_wait */
+ 0x8400f7f1,
+ 0xcf02f3f0,
+ 0xfffd00ff,
+ 0xf31bf405,
+/* 0x085a: ctx_load */
0x94bd00f8,
0xf10599f0,
0xf00f0007,
0x09d00203,
0xf004bd00,
0x21f40ca7,
- 0x2417f1c9,
- 0x0614b60a,
- 0xf10010d0,
- 0xb60b0037,
- 0x32d00634,
- 0x0c17f140,
- 0x0614b60a,
- 0xd00747f0,
- 0x14d00012,
-/* 0x06ed: ctx_chan_wait_0 */
- 0x4014cf40,
- 0xf41f44f0,
- 0x32d0fa1b,
- 0x000bfe00,
- 0xb61f2af0,
- 0x20b60424,
- 0xf094bd02,
+ 0xf1f4bdd0,
+ 0xf0890007,
+ 0x0fd00203,
+ 0xf104bd00,
+ 0xf0c10007,
+ 0x02d00203,
+ 0xf104bd00,
+ 0xf0830007,
+ 0x02d00203,
+ 0xf004bd00,
+ 0x21f507f7,
+ 0x07f1083c,
+ 0x03f0c000,
+ 0x0002d002,
+ 0x0bfe04bd,
+ 0x1f2af000,
+ 0xb60424b6,
+ 0x94bd0220,
+ 0xf10899f0,
+ 0xf00f0007,
+ 0x09d00203,
+ 0xf104bd00,
+ 0xf0810007,
+ 0x02d00203,
+ 0xf104bd00,
+ 0xf1000027,
+ 0xf0800023,
+ 0x07f10225,
+ 0x03f08800,
+ 0x0002d002,
+ 0x17f004bd,
+ 0x0027f110,
+ 0x0223f002,
+ 0xf80512fa,
+ 0xf094bd03,
0x07f10899,
- 0x03f00f00,
+ 0x03f01700,
0x0009d002,
- 0x17f104bd,
- 0x14b60a04,
- 0x0012d006,
- 0x0a2017f1,
- 0xf00614b6,
- 0x23f10227,
- 0x12d08000,
- 0x1017f000,
- 0x020027f1,
- 0xfa0223f0,
- 0x03f80512,
+ 0x019804bd,
+ 0x1814b681,
+ 0xb6800298,
+ 0x12fd0825,
+ 0x16018005,
0x99f094bd,
- 0x0007f108,
- 0x0203f017,
+ 0x0007f109,
+ 0x0203f00f,
0xbd0009d0,
- 0x81019804,
- 0x981814b6,
- 0x25b68002,
- 0x0512fd08,
- 0xbd160180,
- 0x0999f094,
- 0x0f0007f1,
- 0xd00203f0,
- 0x04bd0009,
- 0x0a0427f1,
- 0xd00624b6,
- 0x27f00021,
- 0x2017f101,
- 0x0614b60a,
- 0xf10012d0,
- 0xf0010017,
- 0x01fa0613,
- 0xbd03f805,
- 0x0999f094,
- 0x170007f1,
+ 0x0007f104,
+ 0x0203f081,
+ 0xbd0001d0,
+ 0x0127f004,
+ 0x880007f1,
0xd00203f0,
- 0x04bd0009,
+ 0x04bd0002,
+ 0x010017f1,
+ 0xfa0613f0,
+ 0x03f80501,
0x99f094bd,
- 0x0007f105,
+ 0x0007f109,
0x0203f017,
0xbd0009d0,
-/* 0x07bb: ctx_chan */
- 0xf500f804,
- 0xf5062b21,
- 0xf006b221,
- 0x21f40ca7,
- 0x1017f1c9,
- 0x0614b60a,
- 0xd00527f0,
-/* 0x07d6: ctx_chan_wait */
- 0x12cf0012,
- 0x0522fd00,
- 0xf5fa1bf4,
- 0xf8064321,
-/* 0x07e5: ctx_mmio_exec */
- 0x41039800,
- 0x0a0427f1,
- 0xd00624b6,
- 0x34bd0023,
-/* 0x07f4: ctx_mmio_loop */
+ 0xf094bd04,
+ 0x07f10599,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0978: ctx_chan */
+ 0x077f21f5,
+ 0x085a21f5,
+ 0xf40ca7f0,
+ 0xf7f0d021,
+ 0x3c21f505,
+ 0xa421f508,
+/* 0x0993: ctx_mmio_exec */
+ 0x9800f807,
+ 0x07f14103,
+ 0x03f08100,
+ 0x0003d002,
+ 0x34bd04bd,
+/* 0x09a4: ctx_mmio_loop */
0xf4ff34c4,
0x57f10f1b,
0x53f00200,
0x0535fa06,
-/* 0x0806: ctx_mmio_pull */
+/* 0x09b6: ctx_mmio_pull */
0x4e9803f8,
0x814f9880,
- 0xb68d21f4,
+ 0xb69d21f4,
0x12b60830,
0xdf1bf401,
-/* 0x0818: ctx_mmio_done */
- 0xd0160398,
- 0x00800023,
- 0x0017f140,
- 0x0613f001,
- 0xf80601fa,
-/* 0x082f: ctx_xfer */
- 0xf100f803,
- 0xb60c00f7,
- 0xe7f006f4,
- 0x80fed004,
-/* 0x083c: ctx_xfer_idle */
- 0xf100fecf,
- 0xf42000e4,
- 0x11f4f91b,
- 0x1102f406,
-/* 0x084c: ctx_xfer_pre */
- 0xf510f7f0,
- 0xf5069221,
- 0xf4062b21,
-/* 0x085a: ctx_xfer_pre_load */
- 0xf7f01c11,
- 0x5121f502,
- 0x6021f506,
- 0x7221f506,
- 0xf5f4bd06,
- 0xf5065121,
-/* 0x0873: ctx_xfer_exec */
- 0x9806b221,
- 0x27f11601,
- 0x24b60414,
- 0x0020d006,
- 0xa500e7f1,
- 0xb941e3f0,
- 0x21f4021f,
- 0x04e0b68d,
- 0xf001fcf0,
- 0x24b6022c,
- 0x05f2fd01,
- 0xf18d21f4,
- 0xf04afc17,
- 0x27f00213,
- 0x0012d00c,
- 0x021521f5,
- 0x47fc27f1,
- 0xd00223f0,
- 0x2cf00020,
+/* 0x09c8: ctx_mmio_done */
+ 0xf1160398,
+ 0xf0810007,
+ 0x03d00203,
+ 0x8004bd00,
+ 0x17f14000,
+ 0x13f00100,
+ 0x0601fa06,
+ 0x00f803f8,
+/* 0x09e8: ctx_xfer */
+ 0xf104e7f0,
+ 0xf0020007,
+ 0x0ed00303,
+/* 0x09f7: ctx_xfer_idle */
+ 0xf104bd00,
+ 0xf00000e7,
+ 0xeecf03e3,
+ 0x00e4f100,
+ 0xf21bf420,
+ 0xf40611f4,
+/* 0x0a0e: ctx_xfer_pre */
+ 0xf7f01102,
+ 0x1421f510,
+ 0x7f21f508,
+ 0x1c11f407,
+/* 0x0a1c: ctx_xfer_pre_load */
+ 0xf502f7f0,
+ 0xf507b521,
+ 0xf507c721,
+ 0xbd07dc21,
+ 0xb521f5f4,
+ 0x5a21f507,
+/* 0x0a35: ctx_xfer_exec */
+ 0x16019808,
+ 0x07f124bd,
+ 0x03f00500,
+ 0x0002d001,
+ 0x1fb904bd,
+ 0x00e7f102,
+ 0x41e3f0a5,
+ 0xf09d21f4,
+ 0x2cf001fc,
+ 0x0124b602,
+ 0xb905f2fd,
+ 0xe7f102ff,
+ 0xe3f0a504,
+ 0x9d21f441,
+ 0x026a21f5,
+ 0x07f124bd,
+ 0x03f047fc,
+ 0x0002d002,
+ 0x2cf004bd,
0x0320b601,
- 0xf00012d0,
- 0xa5f001ac,
- 0x00b7f006,
- 0x98000c98,
- 0xe7f0010d,
- 0x6621f500,
- 0x08a7f001,
- 0x010921f5,
- 0x021521f5,
- 0xf02201f4,
- 0x21f40ca7,
- 0x1017f1c9,
- 0x0614b60a,
- 0xd00527f0,
-/* 0x08fa: ctx_xfer_post_save_wait */
- 0x12cf0012,
- 0x0522fd00,
- 0xf4fa1bf4,
-/* 0x0906: ctx_xfer_post */
- 0xf7f03202,
- 0x5121f502,
- 0xf5f4bd06,
- 0xf5069221,
- 0xf5023421,
- 0xbd066021,
- 0x5121f5f4,
- 0x1011f406,
- 0xfd400198,
- 0x0bf40511,
- 0xe521f507,
-/* 0x0931: ctx_xfer_no_post_mmio */
- 0x4321f507,
-/* 0x0935: ctx_xfer_done */
- 0x0000f806,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xf001acf0,
+ 0xb7f006a5,
+ 0x000c9800,
+ 0xf0010d98,
+ 0x21f500e7,
+ 0xa7f0016f,
+ 0x1021f508,
+ 0x5e21f501,
+ 0x1301f402,
+ 0xf40ca7f0,
+ 0xf7f0d021,
+ 0x3c21f505,
+ 0x3202f408,
+/* 0x0ac4: ctx_xfer_post */
+ 0xf502f7f0,
+ 0xbd07b521,
+ 0x1421f5f4,
+ 0x7f21f508,
+ 0xc721f502,
+ 0xf5f4bd07,
+ 0xf407b521,
+ 0x01981011,
+ 0x0511fd40,
+ 0xf5070bf4,
+/* 0x0aef: ctx_xfer_no_post_mmio */
+ 0xf5099321,
+/* 0x0af3: ctx_xfer_done */
+ 0xf807a421,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
index eb7bc0e9576e..1c179bdd48cc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -206,14 +206,14 @@ uint32_t nve0_grhub_data[] = {
};
uint32_t nve0_grhub_code[] = {
- 0x031b0ef5,
+ 0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
- 0x00f802fe,
+ 0x00f8037e,
/* 0x001c: queue_put_next */
0xb60798c4,
0x8dbb0384,
@@ -237,184 +237,214 @@ uint32_t nve0_grhub_code[] = {
/* 0x0066: queue_get_done */
0x00f80132,
/* 0x0068: nv_rd32 */
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010921f5,
- 0xf840bfcf,
-/* 0x008d: nv_wr32 */
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
-/* 0x00bd: watchdog_clear */
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
-/* 0x00c9: wait_donez */
- 0xf094bd00,
- 0x07f10099,
- 0x03f00f00,
- 0x0009d002,
- 0x07f104bd,
- 0x03f00600,
- 0x000ad002,
-/* 0x00e6: wait_donez_ne */
- 0x87f104bd,
- 0x83f00000,
- 0x0088cf01,
- 0xf4888aff,
- 0x94bdf31b,
- 0xf10099f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0109: wait_doneo */
- 0xf094bd00,
+ 0xf002ecb9,
+ 0x07f11fc9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x007a: nv_rd32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0xa7f0f31b,
+ 0x1021f506,
+ 0x00f7f101,
+ 0x01f3f0cb,
+ 0xf800ffcf,
+/* 0x009d: nv_wr32 */
+ 0x0007f100,
+ 0x0103f0cc,
+ 0xbd000fd0,
+ 0x02ecb904,
+ 0xf01fc9f0,
+ 0x07f11ec9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x00be: nv_wr32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f31b,
+/* 0x00d0: wait_donez */
+ 0x99f094bd,
+ 0x0007f100,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x1bf4888a,
+ 0xf094bdf3,
0x07f10099,
- 0x03f00f00,
+ 0x03f01700,
0x0009d002,
- 0x87f104bd,
- 0x84b60818,
- 0x008ad006,
-/* 0x0124: wait_doneo_e */
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
+ 0x00f804bd,
+/* 0x0110: wait_doneo */
0x99f094bd,
0x0007f100,
- 0x0203f017,
+ 0x0203f00f,
0xbd0009d0,
-/* 0x0147: mmctx_size */
- 0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
- 0x00e89894,
- 0xb61a85b6,
- 0x84b60180,
- 0x0098bb02,
- 0xb804e0b6,
- 0x1bf404ef,
- 0x029fb9eb,
-/* 0x0166: mmctx_xfer */
- 0x94bd00f8,
- 0xf10199f0,
- 0xf00f0007,
- 0x09d00203,
- 0xf104bd00,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
-/* 0x018c: mmctx_base_disabled */
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
- 0xb70199f0,
- 0xc8010080,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x0bf4888a,
+ 0xf094bdf3,
+ 0x07f10099,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0xf404efb8,
+ 0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+ 0xbd00f802,
+ 0x0199f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0xbbfd94bd,
+ 0x120bf405,
+ 0xc40007f1,
+ 0xd00103f0,
+ 0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0x0007f11e,
+ 0x0103f0c6,
+ 0xbd000ed0,
+ 0x0007f104,
+ 0x0103f0c7,
+ 0xbd000fd0,
+ 0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x01aec80c,
+ 0xfd11e4b6,
+ 0x07f105be,
+ 0x03f0c500,
+ 0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+ 0xe7f104bd,
+ 0xe3f0c500,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f30b,
+ 0x05e9fd00,
+ 0xc80007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+ 0xb804c0b6,
+ 0x1bf404cd,
+ 0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+ 0xf11f1bf4,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x1fb4f000,
+ 0xf410b4b0,
+ 0xa7f0f01b,
+ 0xd021f402,
+/* 0x0223: mmctx_stop */
+ 0xc82b0ef4,
0xb4b600ab,
0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
-/* 0x01ea: mmctx_stop */
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
- 0x008bcf00,
- 0xf412bbc8,
-/* 0x0202: mmctx_done */
- 0x94bdfa1b,
- 0xf10199f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0215: strand_wait */
- 0xf0a0f900,
- 0x21f402a7,
- 0xf8a0fcc9,
-/* 0x0221: strand_pre */
- 0xfc87f100,
- 0x0283f04a,
- 0xd00c97f0,
- 0x21f50089,
- 0x00f80215,
-/* 0x0234: strand_post */
- 0x4afc87f1,
- 0xf00283f0,
- 0x89d00d97,
- 0x1521f500,
-/* 0x0247: strand_set */
- 0xf100f802,
- 0xf04ffca7,
- 0xaba202a3,
- 0xc7f00500,
- 0x00acd00f,
- 0xd00bc7f0,
- 0x21f500bc,
- 0xaed00215,
- 0x0ac7f000,
- 0xf500bcd0,
- 0xf8021521,
-/* 0x0271: strand_ctx_init */
- 0xf094bd00,
- 0x07f10399,
- 0x03f00f00,
+ 0xf112b9f0,
+ 0xf0c50007,
+ 0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+ 0xf104bd00,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x12bbc800,
+/* 0x024b: mmctx_done */
+ 0xbdf31bf4,
+ 0x0199f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x025e: strand_wait */
+ 0xa0f900f8,
+ 0xf402a7f0,
+ 0xa0fcd021,
+/* 0x026a: strand_pre */
+ 0x97f000f8,
+ 0xfc07f10c,
+ 0x0203f04a,
+ 0xbd0009d0,
+ 0x5e21f504,
+/* 0x027f: strand_post */
+ 0xf000f802,
+ 0x07f10d97,
+ 0x03f04afc,
0x0009d002,
0x21f504bd,
- 0xe7f00221,
- 0x4721f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x1521f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x1521f500,
- 0x3421f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+ 0x00f8025e,
+/* 0x0294: strand_set */
+ 0xf10fc7f0,
+ 0xf04ffc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f10bc7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x07f104bd,
+ 0x03f04ffc,
+ 0x000ed002,
+ 0xc7f004bd,
+ 0xfc07f10a,
+ 0x0203f04a,
+ 0xbd000cd0,
+ 0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+ 0xbd00f802,
+ 0x0399f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0x026a21f5,
+ 0xf503e7f0,
+ 0xbd029421,
+ 0xfc07f1c4,
+ 0x0203f047,
+ 0xbd000cd0,
+ 0x01c7f004,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd000c,
+ 0x025e21f5,
+ 0xf1010c92,
+ 0xf046fc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f102c7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x21f504bd,
+ 0x21f5025e,
+ 0x87f1027f,
+ 0x83f04200,
+ 0x0097f102,
+ 0x0293f020,
+ 0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
0x8ed008fe,
0x408ed000,
0xb6808acf,
@@ -428,7 +458,7 @@ uint32_t nve0_grhub_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
0x07f100f8,
0x03f00500,
0x000fd002,
@@ -436,82 +466,117 @@ uint32_t nve0_grhub_code[] = {
0x0007f101,
0x0303f007,
0xbd000fd0,
-/* 0x031b: init */
+/* 0x039b: init */
0xbd00f804,
- 0x0004fe04,
- 0xf10007fe,
- 0xf0120017,
- 0x12d00227,
- 0xb117f100,
- 0x0010fe05,
- 0x040017f1,
- 0xf1c010d0,
- 0xb6040437,
- 0x27f10634,
- 0x32d02003,
- 0x0427f100,
- 0x0132d020,
+ 0x0007fe04,
+ 0x420017f1,
+ 0xcf0013f0,
+ 0x11e70011,
+ 0x14b60109,
+ 0x0014fe08,
+ 0xf10227f0,
+ 0xf0120007,
+ 0x02d00003,
+ 0xf104bd00,
+ 0xfe06c817,
+ 0x24bd0010,
+ 0x070007f1,
+ 0xd00003f0,
+ 0x04bd0002,
+ 0x200327f1,
+ 0x010007f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0x200427f1,
+ 0x010407f1,
+ 0xd00103f0,
+ 0x04bd0002,
0x200b27f1,
- 0xf10232d0,
- 0xd0200c27,
- 0x27f10732,
- 0x24b60c24,
- 0x0003b906,
- 0xf10023d0,
+ 0x010807f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0x200c27f1,
+ 0x011c07f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0xf1010392,
+ 0xf0090007,
+ 0x03d00303,
+ 0xf104bd00,
0xf0870427,
- 0x12d00023,
- 0x0012b700,
- 0x0427f001,
- 0xf40012d0,
- 0xe7f11031,
- 0xe3f09604,
- 0x6821f440,
- 0x8090f1c7,
- 0xf4f00301,
- 0x020f801f,
- 0xbb0117f0,
- 0x12b6041f,
- 0x0c27f101,
- 0x0624b604,
- 0xd00021d0,
- 0x17f14021,
- 0x0e980100,
- 0x010f9800,
- 0x014721f5,
- 0x070037f1,
- 0x950634b6,
- 0x34d00814,
- 0x4034d000,
- 0x130030b7,
- 0xb6001fbb,
- 0x3fd002f5,
- 0x0815b600,
- 0xb60110b6,
- 0x1fb90814,
- 0x7121f502,
- 0x001fbb02,
- 0xf1020398,
- 0xf0200047,
-/* 0x03f6: init_gpc */
- 0x4ea05043,
- 0x1fb90804,
- 0x8d21f402,
- 0x010c4ea0,
- 0x21f4f4bd,
- 0x044ea08d,
- 0x8d21f401,
- 0x01004ea0,
- 0xf402f7f0,
- 0x4ea08d21,
-/* 0x041e: init_gpc_wait */
- 0x21f40800,
- 0x1fffc868,
- 0xa0fa0bf4,
- 0xf408044e,
- 0x1fbb6821,
- 0x0040b700,
- 0x0132b680,
- 0xf1be1bf4,
+ 0x07f10023,
+ 0x03f00400,
+ 0x0002d000,
+ 0x27f004bd,
+ 0x0007f104,
+ 0x0003f003,
+ 0xbd0002d0,
+ 0x1031f404,
+ 0x9604e7f1,
+ 0xf440e3f0,
+ 0xfeb96821,
+ 0x90f1c702,
+ 0xf0030180,
+ 0x0f801ff4,
+ 0x0117f002,
+ 0xb6041fbb,
+ 0x07f10112,
+ 0x03f00300,
+ 0x0001d001,
+ 0x07f104bd,
+ 0x03f00400,
+ 0x0001d001,
+ 0x17f104bd,
+ 0xf7f00100,
+ 0x7f21f502,
+ 0x9121f507,
+ 0x10f7f007,
+ 0x07de21f5,
+ 0x98000e98,
+ 0x21f5010f,
+ 0x14950150,
+ 0x0007f108,
+ 0x0103f0c0,
+ 0xbd0004d0,
+ 0x0007f104,
+ 0x0103f0c1,
+ 0xbd0004d0,
+ 0x0030b704,
+ 0x001fbb13,
+ 0xf102f5b6,
+ 0xf0d30007,
+ 0x0fd00103,
+ 0xb604bd00,
+ 0x10b60815,
+ 0x0814b601,
+ 0xf5021fb9,
+ 0xbb02d321,
+ 0x0398001f,
+ 0x0047f102,
+ 0x5043f020,
+/* 0x04f4: init_gpc */
+ 0x08044ea0,
+ 0xf4021fb9,
+ 0x4ea09d21,
+ 0xf4bd010c,
+ 0xa09d21f4,
+ 0xf401044e,
+ 0x4ea09d21,
+ 0xf7f00100,
+ 0x9d21f402,
+ 0x08004ea0,
+/* 0x051c: init_gpc_wait */
+ 0xc86821f4,
+ 0x0bf41fff,
+ 0x044ea0fa,
+ 0x6821f408,
+ 0xb7001fbb,
+ 0xb6800040,
+ 0x1bf40132,
+ 0x00f7f0be,
+ 0x07de21f5,
+ 0xf500f7f0,
+ 0xf1077f21,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -519,382 +584,379 @@ uint32_t nve0_grhub_code[] = {
0x080007f1,
0xd00203f0,
0x04bd0001,
-/* 0x0458: main */
+/* 0x0564: main */
0xf40031f4,
0xd7f00028,
0x3921f410,
0xb1f401f4,
0xf54001e4,
- 0xbd00de1b,
+ 0xbd00e91b,
0x0499f094,
0x0f0007f1,
0xd00203f0,
0x04bd0009,
- 0x0b0017f1,
- 0xcf0614b6,
- 0x11cf4012,
- 0x1f13c800,
- 0x00870bf5,
- 0xf41f23c8,
- 0x20f9620b,
- 0xbd0212b9,
- 0x0799f094,
- 0x0f0007f1,
- 0xd00203f0,
- 0x04bd0009,
- 0xf40132f4,
- 0x21f50231,
- 0x94bd0801,
+ 0xc00017f1,
+ 0xcf0213f0,
+ 0x27f10011,
+ 0x23f0c100,
+ 0x0022cf02,
+ 0xf51f13c8,
+ 0xc800890b,
+ 0x0bf41f23,
+ 0xb920f962,
+ 0x94bd0212,
0xf10799f0,
- 0xf0170007,
+ 0xf00f0007,
0x09d00203,
- 0xfc04bd00,
- 0xf094bd20,
- 0x07f10699,
- 0x03f00f00,
- 0x0009d002,
- 0x31f404bd,
- 0x0121f501,
- 0xf094bd08,
- 0x07f10699,
+ 0xf404bd00,
+ 0x31f40132,
+ 0xaa21f502,
+ 0xf094bd09,
+ 0x07f10799,
0x03f01700,
0x0009d002,
- 0x0ef404bd,
-/* 0x04f9: chsw_prev_no_next */
- 0xb920f931,
- 0x32f40212,
- 0x0232f401,
- 0x080121f5,
- 0x17f120fc,
- 0x14b60b00,
- 0x0012d006,
-/* 0x0517: chsw_no_prev */
- 0xc8130ef4,
- 0x0bf41f23,
- 0x0131f40d,
- 0xf50232f4,
-/* 0x0527: chsw_done */
- 0xf1080121,
- 0xb60b0c17,
- 0x27f00614,
- 0x0012d001,
+ 0x20fc04bd,
0x99f094bd,
- 0x0007f104,
+ 0x0007f106,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0131f404,
+ 0x09aa21f5,
+ 0x99f094bd,
+ 0x0007f106,
0x0203f017,
0xbd0009d0,
- 0x130ef504,
-/* 0x0549: main_not_ctx_switch */
- 0x01e4b0ff,
- 0xb90d1bf4,
- 0x21f502f2,
- 0x0ef40795,
-/* 0x0559: main_not_ctx_chan */
- 0x02e4b046,
- 0xbd321bf4,
- 0x0799f094,
- 0x0f0007f1,
+ 0x330ef404,
+/* 0x060c: chsw_prev_no_next */
+ 0x12b920f9,
+ 0x0132f402,
+ 0xf50232f4,
+ 0xfc09aa21,
+ 0x0007f120,
+ 0x0203f0c0,
+ 0xbd0002d0,
+ 0x130ef404,
+/* 0x062c: chsw_no_prev */
+ 0xf41f23c8,
+ 0x31f40d0b,
+ 0x0232f401,
+ 0x09aa21f5,
+/* 0x063c: chsw_done */
+ 0xf10127f0,
+ 0xf0c30007,
+ 0x02d00203,
+ 0xbd04bd00,
+ 0x0499f094,
+ 0x170007f1,
0xd00203f0,
0x04bd0009,
- 0xf40132f4,
- 0x21f50232,
- 0x94bd0801,
+ 0xff080ef5,
+/* 0x0660: main_not_ctx_switch */
+ 0xf401e4b0,
+ 0xf2b90d1b,
+ 0x4221f502,
+ 0x460ef409,
+/* 0x0670: main_not_ctx_chan */
+ 0xf402e4b0,
+ 0x94bd321b,
0xf10799f0,
- 0xf0170007,
+ 0xf00f0007,
0x09d00203,
0xf404bd00,
-/* 0x058e: main_not_ctx_save */
- 0xef94110e,
- 0x01f5f010,
- 0x02fe21f5,
- 0xfec00ef5,
-/* 0x059c: main_done */
- 0x29f024bd,
- 0x0007f11f,
- 0x0203f008,
- 0xbd0002d0,
- 0xab0ef504,
-/* 0x05b1: ih */
- 0xfe80f9fe,
- 0x80f90188,
- 0xa0f990f9,
- 0xd0f9b0f9,
- 0xf0f9e0f9,
- 0x0acf04bd,
- 0x04abc480,
- 0xf11d0bf4,
- 0xf01900b7,
- 0xbecf10d7,
- 0x00bfcf40,
+ 0x32f40132,
+ 0xaa21f502,
+ 0xf094bd09,
+ 0x07f10799,
+ 0x03f01700,
+ 0x0009d002,
+ 0x0ef404bd,
+/* 0x06a5: main_not_ctx_save */
+ 0x10ef9411,
+ 0xf501f5f0,
+ 0xf5037e21,
+/* 0x06b3: main_done */
+ 0xbdfeb50e,
+ 0x1f29f024,
+ 0x080007f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xfea00ef5,
+/* 0x06c8: ih */
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x00a7f104,
+ 0x00a3f002,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x10d7f030,
+ 0x1a00e7f1,
+ 0xcf00e3f0,
+ 0xf7f100ee,
+ 0xf3f01900,
+ 0x00ffcf00,
0xb70421f4,
0xf00400b0,
- 0xbed001e7,
-/* 0x05e9: ih_no_fifo */
- 0x00abe400,
- 0x0d0bf401,
- 0xf110d7f0,
- 0xf44001e7,
-/* 0x05fa: ih_no_ctxsw */
- 0xb7f10421,
- 0xb0bd0104,
- 0xf4b4abff,
- 0xa7f10d0b,
- 0xa4b60c1c,
- 0x00abd006,
-/* 0x0610: ih_no_other */
- 0xfc400ad0,
+ 0x07f101e7,
+ 0x03f01d00,
+ 0x000ed000,
+/* 0x071a: ih_no_fifo */
+ 0xabe404bd,
+ 0x0bf40100,
+ 0x10d7f00d,
+ 0x4001e7f1,
+/* 0x072b: ih_no_ctxsw */
+ 0xe40421f4,
+ 0xf40400ab,
+ 0xb7f1140b,
+ 0xbfb90100,
+ 0x44e7f102,
+ 0x40e3f001,
+/* 0x0743: ih_no_fwmthd */
+ 0xf19d21f4,
+ 0xbd0104b7,
+ 0xb4abffb0,
+ 0xf10f0bf4,
+ 0xf0070007,
+ 0x0bd00303,
+/* 0x075b: ih_no_other */
+ 0xf104bd00,
+ 0xf0010007,
+ 0x0ad00003,
+ 0xfc04bd00,
0xfce0fcf0,
0xfcb0fcd0,
0xfc90fca0,
0x0088fe80,
0x32f480fc,
-/* 0x062b: ctx_4170s */
- 0xf101f800,
- 0xf04170e7,
- 0xf5f040e3,
- 0x8d21f410,
-/* 0x063a: ctx_4170w */
+/* 0x077f: ctx_4170s */
+ 0xf001f800,
+ 0xffb910f5,
+ 0x70e7f102,
+ 0x40e3f041,
+ 0xf89d21f4,
+/* 0x0791: ctx_4170w */
+ 0x70e7f100,
+ 0x40e3f041,
+ 0xb96821f4,
+ 0xf4f002ff,
+ 0xf01bf410,
+/* 0x07a6: ctx_redswitch */
0xe7f100f8,
- 0xe3f04170,
- 0x6821f440,
- 0xf410f4f0,
+ 0xe5f00200,
+ 0x20e5f040,
+ 0xf110e5f0,
+ 0xf0850007,
+ 0x0ed00103,
+ 0xf004bd00,
+/* 0x07c2: ctx_redswitch_delay */
+ 0xf2b608f7,
+ 0xfd1bf401,
+ 0x0400e5f1,
+ 0x0100e5f1,
+ 0x850007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+/* 0x07de: ctx_86c */
+ 0x07f100f8,
+ 0x03f01b00,
+ 0x000fd002,
+ 0xffb904bd,
+ 0x14e7f102,
+ 0x40e3f08a,
+ 0xb99d21f4,
+ 0xe7f102ff,
+ 0xe3f0a86c,
+ 0x9d21f441,
+/* 0x0806: ctx_mem */
+ 0x07f100f8,
+ 0x03f08400,
+ 0x000fd002,
+/* 0x0812: ctx_mem_wait */
+ 0xf7f104bd,
+ 0xf3f08400,
+ 0x00ffcf02,
+ 0xf405fffd,
0x00f8f31b,
-/* 0x064c: ctx_redswitch */
- 0x0614e7f1,
- 0xf106e4b6,
- 0xd00270f7,
- 0xf7f000ef,
-/* 0x065d: ctx_redswitch_delay */
- 0x01f2b608,
- 0xf1fd1bf4,
- 0xd00770f7,
- 0x00f800ef,
-/* 0x066c: ctx_86c */
- 0x086ce7f1,
- 0xd006e4b6,
- 0xe7f100ef,
- 0xe3f08a14,
- 0x8d21f440,
- 0xa86ce7f1,
- 0xf441e3f0,
- 0x00f88d21,
-/* 0x068c: ctx_load */
+/* 0x0824: ctx_load */
0x99f094bd,
0x0007f105,
0x0203f00f,
0xbd0009d0,
0x0ca7f004,
- 0xf1c921f4,
- 0xb60a2417,
- 0x10d00614,
- 0x0037f100,
- 0x0634b60b,
- 0xf14032d0,
- 0xb60a0c17,
- 0x47f00614,
- 0x0012d007,
-/* 0x06c7: ctx_chan_wait_0 */
- 0xcf4014d0,
- 0x44f04014,
- 0xfa1bf41f,
- 0xfe0032d0,
- 0x2af0000b,
- 0x0424b61f,
- 0xbd0220b6,
+ 0xbdd021f4,
+ 0x0007f1f4,
+ 0x0203f089,
+ 0xbd000fd0,
+ 0x0007f104,
+ 0x0203f0c1,
+ 0xbd0002d0,
+ 0x0007f104,
+ 0x0203f083,
+ 0xbd0002d0,
+ 0x07f7f004,
+ 0x080621f5,
+ 0xc00007f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xf0000bfe,
+ 0x24b61f2a,
+ 0x0220b604,
+ 0x99f094bd,
+ 0x0007f108,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f081,
+ 0xbd0002d0,
+ 0x0027f104,
+ 0x0023f100,
+ 0x0225f080,
+ 0x880007f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xf11017f0,
+ 0xf0020027,
+ 0x12fa0223,
+ 0xbd03f805,
0x0899f094,
- 0x0f0007f1,
+ 0x170007f1,
0xd00203f0,
0x04bd0009,
- 0x0a0417f1,
- 0xd00614b6,
- 0x17f10012,
- 0x14b60a20,
- 0x0227f006,
- 0x800023f1,
- 0xf00012d0,
- 0x27f11017,
- 0x23f00200,
- 0x0512fa02,
- 0x94bd03f8,
- 0xf10899f0,
- 0xf0170007,
+ 0xb6810198,
+ 0x02981814,
+ 0x0825b680,
+ 0x800512fd,
+ 0x94bd1601,
+ 0xf10999f0,
+ 0xf00f0007,
0x09d00203,
- 0x9804bd00,
- 0x14b68101,
- 0x80029818,
- 0xfd0825b6,
- 0x01800512,
- 0xf094bd16,
- 0x07f10999,
- 0x03f00f00,
- 0x0009d002,
- 0x27f104bd,
- 0x24b60a04,
- 0x0021d006,
- 0xf10127f0,
- 0xb60a2017,
- 0x12d00614,
- 0x0017f100,
- 0x0613f001,
- 0xf80501fa,
- 0xf094bd03,
- 0x07f10999,
- 0x03f01700,
- 0x0009d002,
- 0x94bd04bd,
- 0xf10599f0,
+ 0xf104bd00,
+ 0xf0810007,
+ 0x01d00203,
+ 0xf004bd00,
+ 0x07f10127,
+ 0x03f08800,
+ 0x0002d002,
+ 0x17f104bd,
+ 0x13f00100,
+ 0x0501fa06,
+ 0x94bd03f8,
+ 0xf10999f0,
0xf0170007,
0x09d00203,
- 0xf804bd00,
-/* 0x0795: ctx_chan */
- 0x8c21f500,
- 0x0ca7f006,
- 0xf1c921f4,
- 0xb60a1017,
- 0x27f00614,
- 0x0012d005,
-/* 0x07ac: ctx_chan_wait */
- 0xfd0012cf,
- 0x1bf40522,
-/* 0x07b7: ctx_mmio_exec */
- 0x9800f8fa,
- 0x27f14103,
- 0x24b60a04,
- 0x0023d006,
-/* 0x07c6: ctx_mmio_loop */
+ 0xbd04bd00,
+ 0x0599f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x0942: ctx_chan */
+ 0x21f500f8,
+ 0xa7f00824,
+ 0xd021f40c,
+ 0xf505f7f0,
+ 0xf8080621,
+/* 0x0955: ctx_mmio_exec */
+ 0x41039800,
+ 0x810007f1,
+ 0xd00203f0,
+ 0x04bd0003,
+/* 0x0966: ctx_mmio_loop */
0x34c434bd,
0x0f1bf4ff,
0x020057f1,
0xfa0653f0,
0x03f80535,
-/* 0x07d8: ctx_mmio_pull */
+/* 0x0978: ctx_mmio_pull */
0x98804e98,
0x21f4814f,
- 0x0830b68d,
+ 0x0830b69d,
0xf40112b6,
-/* 0x07ea: ctx_mmio_done */
+/* 0x098a: ctx_mmio_done */
0x0398df1b,
- 0x0023d016,
- 0xf1400080,
- 0xf0010017,
- 0x01fa0613,
- 0xf803f806,
-/* 0x0801: ctx_xfer */
- 0x00f7f100,
- 0x06f4b60c,
- 0xd004e7f0,
-/* 0x080e: ctx_xfer_idle */
- 0xfecf80fe,
- 0x00e4f100,
- 0xf91bf420,
- 0xf40611f4,
-/* 0x081e: ctx_xfer_pre */
- 0xf7f00d02,
- 0x6c21f510,
- 0x1c11f406,
-/* 0x0828: ctx_xfer_pre_load */
- 0xf502f7f0,
- 0xf5062b21,
- 0xf5063a21,
- 0xbd064c21,
- 0x2b21f5f4,
- 0x8c21f506,
-/* 0x0841: ctx_xfer_exec */
- 0x16019806,
- 0x041427f1,
- 0xd00624b6,
- 0xe7f10020,
- 0xe3f0a500,
- 0x021fb941,
- 0xb68d21f4,
- 0xfcf004e0,
- 0x022cf001,
- 0xfd0124b6,
- 0x21f405f2,
- 0xfc17f18d,
- 0x0213f04a,
- 0xd00c27f0,
- 0x21f50012,
- 0x27f10215,
- 0x23f047fc,
- 0x0020d002,
+ 0x0007f116,
+ 0x0203f081,
+ 0xbd0003d0,
+ 0x40008004,
+ 0x010017f1,
+ 0xfa0613f0,
+ 0x03f80601,
+/* 0x09aa: ctx_xfer */
+ 0xe7f000f8,
+ 0x0007f104,
+ 0x0303f002,
+ 0xbd000ed0,
+/* 0x09b9: ctx_xfer_idle */
+ 0x00e7f104,
+ 0x03e3f000,
+ 0xf100eecf,
+ 0xf42000e4,
+ 0x11f4f21b,
+ 0x0d02f406,
+/* 0x09d0: ctx_xfer_pre */
+ 0xf510f7f0,
+ 0xf407de21,
+/* 0x09da: ctx_xfer_pre_load */
+ 0xf7f01c11,
+ 0x7f21f502,
+ 0x9121f507,
+ 0xa621f507,
+ 0xf5f4bd07,
+ 0xf5077f21,
+/* 0x09f3: ctx_xfer_exec */
+ 0x98082421,
+ 0x24bd1601,
+ 0x050007f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0xf1021fb9,
+ 0xf0a500e7,
+ 0x21f441e3,
+ 0x01fcf09d,
+ 0xb6022cf0,
+ 0xf2fd0124,
+ 0x02ffb905,
+ 0xa504e7f1,
+ 0xf441e3f0,
+ 0x21f59d21,
+ 0x24bd026a,
+ 0x47fc07f1,
+ 0xd00203f0,
+ 0x04bd0002,
0xb6012cf0,
- 0x12d00320,
- 0x01acf000,
- 0xf006a5f0,
- 0x0c9800b7,
- 0x010d9800,
- 0xf500e7f0,
- 0xf0016621,
- 0x21f508a7,
- 0x21f50109,
- 0x01f40215,
- 0x0ca7f022,
- 0xf1c921f4,
- 0xb60a1017,
- 0x27f00614,
- 0x0012d005,
-/* 0x08c8: ctx_xfer_post_save_wait */
- 0xfd0012cf,
- 0x1bf40522,
- 0x2e02f4fa,
-/* 0x08d4: ctx_xfer_post */
- 0xf502f7f0,
- 0xbd062b21,
- 0x6c21f5f4,
- 0x3421f506,
- 0x3a21f502,
- 0xf5f4bd06,
- 0xf4062b21,
- 0x01981011,
- 0x0511fd40,
- 0xf5070bf4,
-/* 0x08ff: ctx_xfer_no_post_mmio */
-/* 0x08ff: ctx_xfer_done */
- 0xf807b721,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x07f10320,
+ 0x03f04afc,
+ 0x0002d002,
+ 0xacf004bd,
+ 0x06a5f001,
+ 0x9800b7f0,
+ 0x0d98000c,
+ 0x00e7f001,
+ 0x016f21f5,
+ 0xf508a7f0,
+ 0xf5011021,
+ 0xf4025e21,
+ 0xa7f01301,
+ 0xd021f40c,
+ 0xf505f7f0,
+ 0xf4080621,
+/* 0x0a82: ctx_xfer_post */
+ 0xf7f02e02,
+ 0x7f21f502,
+ 0xf5f4bd07,
+ 0xf507de21,
+ 0xf5027f21,
+ 0xbd079121,
+ 0x7f21f5f4,
+ 0x1011f407,
+ 0xfd400198,
+ 0x0bf40511,
+ 0x5521f507,
+/* 0x0aad: ctx_xfer_no_post_mmio */
+/* 0x0aad: ctx_xfer_done */
+ 0x0000f809,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
index 438506d14749..229c0ae37228 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
@@ -206,14 +206,14 @@ uint32_t nvf0_grhub_data[] = {
};
uint32_t nvf0_grhub_code[] = {
- 0x031b0ef5,
+ 0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
- 0x00f802fe,
+ 0x00f8037e,
/* 0x001c: queue_put_next */
0xb60798c4,
0x8dbb0384,
@@ -237,184 +237,214 @@ uint32_t nvf0_grhub_code[] = {
/* 0x0066: queue_get_done */
0x00f80132,
/* 0x0068: nv_rd32 */
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010921f5,
- 0xf840bfcf,
-/* 0x008d: nv_wr32 */
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
-/* 0x00bd: watchdog_clear */
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
-/* 0x00c9: wait_donez */
- 0xf094bd00,
- 0x07f10099,
- 0x03f03700,
- 0x0009d002,
- 0x07f104bd,
- 0x03f00600,
- 0x000ad002,
-/* 0x00e6: wait_donez_ne */
- 0x87f104bd,
- 0x83f00000,
- 0x0088cf01,
- 0xf4888aff,
- 0x94bdf31b,
- 0xf10099f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0109: wait_doneo */
- 0xf094bd00,
+ 0xf002ecb9,
+ 0x07f11fc9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x007a: nv_rd32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0xa7f0f31b,
+ 0x1021f506,
+ 0x00f7f101,
+ 0x01f3f0cb,
+ 0xf800ffcf,
+/* 0x009d: nv_wr32 */
+ 0x0007f100,
+ 0x0103f0cc,
+ 0xbd000fd0,
+ 0x02ecb904,
+ 0xf01fc9f0,
+ 0x07f11ec9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x00be: nv_wr32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f31b,
+/* 0x00d0: wait_donez */
+ 0x99f094bd,
+ 0x0007f100,
+ 0x0203f037,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x1bf4888a,
+ 0xf094bdf3,
0x07f10099,
- 0x03f03700,
+ 0x03f01700,
0x0009d002,
- 0x87f104bd,
- 0x84b60818,
- 0x008ad006,
-/* 0x0124: wait_doneo_e */
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
+ 0x00f804bd,
+/* 0x0110: wait_doneo */
0x99f094bd,
0x0007f100,
- 0x0203f017,
+ 0x0203f037,
0xbd0009d0,
-/* 0x0147: mmctx_size */
- 0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
- 0x00e89894,
- 0xb61a85b6,
- 0x84b60180,
- 0x0098bb02,
- 0xb804e0b6,
- 0x1bf404ef,
- 0x029fb9eb,
-/* 0x0166: mmctx_xfer */
- 0x94bd00f8,
- 0xf10199f0,
- 0xf0370007,
- 0x09d00203,
- 0xf104bd00,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
-/* 0x018c: mmctx_base_disabled */
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
- 0xb70199f0,
- 0xc8010080,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x0bf4888a,
+ 0xf094bdf3,
+ 0x07f10099,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0xf404efb8,
+ 0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+ 0xbd00f802,
+ 0x0199f094,
+ 0x370007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0xbbfd94bd,
+ 0x120bf405,
+ 0xc40007f1,
+ 0xd00103f0,
+ 0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0x0007f11e,
+ 0x0103f0c6,
+ 0xbd000ed0,
+ 0x0007f104,
+ 0x0103f0c7,
+ 0xbd000fd0,
+ 0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x01aec80c,
+ 0xfd11e4b6,
+ 0x07f105be,
+ 0x03f0c500,
+ 0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+ 0xe7f104bd,
+ 0xe3f0c500,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f30b,
+ 0x05e9fd00,
+ 0xc80007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+ 0xb804c0b6,
+ 0x1bf404cd,
+ 0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+ 0xf11f1bf4,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x1fb4f000,
+ 0xf410b4b0,
+ 0xa7f0f01b,
+ 0xd021f402,
+/* 0x0223: mmctx_stop */
+ 0xc82b0ef4,
0xb4b600ab,
0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
-/* 0x01ea: mmctx_stop */
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
- 0x008bcf00,
- 0xf412bbc8,
-/* 0x0202: mmctx_done */
- 0x94bdfa1b,
- 0xf10199f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0215: strand_wait */
- 0xf0a0f900,
- 0x21f402a7,
- 0xf8a0fcc9,
-/* 0x0221: strand_pre */
- 0xfc87f100,
- 0x0283f04a,
- 0xd00c97f0,
- 0x21f50089,
- 0x00f80215,
-/* 0x0234: strand_post */
- 0x4afc87f1,
- 0xf00283f0,
- 0x89d00d97,
- 0x1521f500,
-/* 0x0247: strand_set */
- 0xf100f802,
- 0xf04ffca7,
- 0xaba202a3,
- 0xc7f00500,
- 0x00acd00f,
- 0xd00bc7f0,
- 0x21f500bc,
- 0xaed00215,
- 0x0ac7f000,
- 0xf500bcd0,
- 0xf8021521,
-/* 0x0271: strand_ctx_init */
- 0xf094bd00,
- 0x07f10399,
- 0x03f03700,
+ 0xf112b9f0,
+ 0xf0c50007,
+ 0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+ 0xf104bd00,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x12bbc800,
+/* 0x024b: mmctx_done */
+ 0xbdf31bf4,
+ 0x0199f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x025e: strand_wait */
+ 0xa0f900f8,
+ 0xf402a7f0,
+ 0xa0fcd021,
+/* 0x026a: strand_pre */
+ 0x97f000f8,
+ 0xfc07f10c,
+ 0x0203f04a,
+ 0xbd0009d0,
+ 0x5e21f504,
+/* 0x027f: strand_post */
+ 0xf000f802,
+ 0x07f10d97,
+ 0x03f04afc,
0x0009d002,
0x21f504bd,
- 0xe7f00221,
- 0x4721f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x1521f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x1521f500,
- 0x3421f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+ 0x00f8025e,
+/* 0x0294: strand_set */
+ 0xf10fc7f0,
+ 0xf04ffc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f10bc7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x07f104bd,
+ 0x03f04ffc,
+ 0x000ed002,
+ 0xc7f004bd,
+ 0xfc07f10a,
+ 0x0203f04a,
+ 0xbd000cd0,
+ 0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+ 0xbd00f802,
+ 0x0399f094,
+ 0x370007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0x026a21f5,
+ 0xf503e7f0,
+ 0xbd029421,
+ 0xfc07f1c4,
+ 0x0203f047,
+ 0xbd000cd0,
+ 0x01c7f004,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd000c,
+ 0x025e21f5,
+ 0xf1010c92,
+ 0xf046fc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f102c7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x21f504bd,
+ 0x21f5025e,
+ 0x87f1027f,
+ 0x83f04200,
+ 0x0097f102,
+ 0x0293f020,
+ 0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
0x8ed008fe,
0x408ed000,
0xb6808acf,
@@ -428,7 +458,7 @@ uint32_t nvf0_grhub_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
0x07f100f8,
0x03f00500,
0x000fd002,
@@ -436,82 +466,117 @@ uint32_t nvf0_grhub_code[] = {
0x0007f101,
0x0303f007,
0xbd000fd0,
-/* 0x031b: init */
+/* 0x039b: init */
0xbd00f804,
- 0x0004fe04,
- 0xf10007fe,
- 0xf0120017,
- 0x12d00227,
- 0xb117f100,
- 0x0010fe05,
- 0x040017f1,
- 0xf1c010d0,
- 0xb6040437,
- 0x27f10634,
- 0x32d02003,
- 0x0427f100,
- 0x0132d020,
+ 0x0007fe04,
+ 0x420017f1,
+ 0xcf0013f0,
+ 0x11e70011,
+ 0x14b60109,
+ 0x0014fe08,
+ 0xf10227f0,
+ 0xf0120007,
+ 0x02d00003,
+ 0xf104bd00,
+ 0xfe06c817,
+ 0x24bd0010,
+ 0x070007f1,
+ 0xd00003f0,
+ 0x04bd0002,
+ 0x200327f1,
+ 0x010007f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0x200427f1,
+ 0x010407f1,
+ 0xd00103f0,
+ 0x04bd0002,
0x200b27f1,
- 0xf10232d0,
- 0xd0200c27,
- 0x27f10732,
- 0x24b60c24,
- 0x0003b906,
- 0xf10023d0,
+ 0x010807f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0x200c27f1,
+ 0x011c07f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0xf1010392,
+ 0xf0090007,
+ 0x03d00303,
+ 0xf104bd00,
0xf0870427,
- 0x12d00023,
- 0x0012b700,
- 0x0427f001,
- 0xf40012d0,
- 0xe7f11031,
- 0xe3f09604,
- 0x6821f440,
- 0x8090f1c7,
- 0xf4f00301,
- 0x020f801f,
- 0xbb0117f0,
- 0x12b6041f,
- 0x0c27f101,
- 0x0624b604,
- 0xd00021d0,
- 0x17f14021,
- 0x0e980100,
- 0x010f9800,
- 0x014721f5,
- 0x070037f1,
- 0x950634b6,
- 0x34d00814,
- 0x4034d000,
- 0x130030b7,
- 0xb6001fbb,
- 0x3fd002f5,
- 0x0815b600,
- 0xb60110b6,
- 0x1fb90814,
- 0x7121f502,
- 0x001fbb02,
- 0xf1020398,
- 0xf0200047,
-/* 0x03f6: init_gpc */
- 0x4ea05043,
- 0x1fb90804,
- 0x8d21f402,
- 0x010c4ea0,
- 0x21f4f4bd,
- 0x044ea08d,
- 0x8d21f401,
- 0x01004ea0,
- 0xf402f7f0,
- 0x4ea08d21,
-/* 0x041e: init_gpc_wait */
- 0x21f40800,
- 0x1fffc868,
- 0xa0fa0bf4,
- 0xf408044e,
- 0x1fbb6821,
- 0x0040b700,
- 0x0132b680,
- 0xf1be1bf4,
+ 0x07f10023,
+ 0x03f00400,
+ 0x0002d000,
+ 0x27f004bd,
+ 0x0007f104,
+ 0x0003f003,
+ 0xbd0002d0,
+ 0x1031f404,
+ 0x9604e7f1,
+ 0xf440e3f0,
+ 0xfeb96821,
+ 0x90f1c702,
+ 0xf0030180,
+ 0x0f801ff4,
+ 0x0117f002,
+ 0xb6041fbb,
+ 0x07f10112,
+ 0x03f00300,
+ 0x0001d001,
+ 0x07f104bd,
+ 0x03f00400,
+ 0x0001d001,
+ 0x17f104bd,
+ 0xf7f00100,
+ 0x7f21f502,
+ 0x9121f507,
+ 0x10f7f007,
+ 0x07de21f5,
+ 0x98000e98,
+ 0x21f5010f,
+ 0x14950150,
+ 0x0007f108,
+ 0x0103f0c0,
+ 0xbd0004d0,
+ 0x0007f104,
+ 0x0103f0c1,
+ 0xbd0004d0,
+ 0x0030b704,
+ 0x001fbb13,
+ 0xf102f5b6,
+ 0xf0d30007,
+ 0x0fd00103,
+ 0xb604bd00,
+ 0x10b60815,
+ 0x0814b601,
+ 0xf5021fb9,
+ 0xbb02d321,
+ 0x0398001f,
+ 0x0047f102,
+ 0x5043f020,
+/* 0x04f4: init_gpc */
+ 0x08044ea0,
+ 0xf4021fb9,
+ 0x4ea09d21,
+ 0xf4bd010c,
+ 0xa09d21f4,
+ 0xf401044e,
+ 0x4ea09d21,
+ 0xf7f00100,
+ 0x9d21f402,
+ 0x08004ea0,
+/* 0x051c: init_gpc_wait */
+ 0xc86821f4,
+ 0x0bf41fff,
+ 0x044ea0fa,
+ 0x6821f408,
+ 0xb7001fbb,
+ 0xb6800040,
+ 0x1bf40132,
+ 0x00f7f0be,
+ 0x07de21f5,
+ 0xf500f7f0,
+ 0xf1077f21,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -519,382 +584,379 @@ uint32_t nvf0_grhub_code[] = {
0x300007f1,
0xd00203f0,
0x04bd0001,
-/* 0x0458: main */
+/* 0x0564: main */
0xf40031f4,
0xd7f00028,
0x3921f410,
0xb1f401f4,
0xf54001e4,
- 0xbd00de1b,
+ 0xbd00e91b,
0x0499f094,
0x370007f1,
0xd00203f0,
0x04bd0009,
- 0x0b0017f1,
- 0xcf0614b6,
- 0x11cf4012,
- 0x1f13c800,
- 0x00870bf5,
- 0xf41f23c8,
- 0x20f9620b,
- 0xbd0212b9,
- 0x0799f094,
- 0x370007f1,
- 0xd00203f0,
- 0x04bd0009,
- 0xf40132f4,
- 0x21f50231,
- 0x94bd0801,
+ 0xc00017f1,
+ 0xcf0213f0,
+ 0x27f10011,
+ 0x23f0c100,
+ 0x0022cf02,
+ 0xf51f13c8,
+ 0xc800890b,
+ 0x0bf41f23,
+ 0xb920f962,
+ 0x94bd0212,
0xf10799f0,
- 0xf0170007,
+ 0xf0370007,
0x09d00203,
- 0xfc04bd00,
- 0xf094bd20,
- 0x07f10699,
- 0x03f03700,
- 0x0009d002,
- 0x31f404bd,
- 0x0121f501,
- 0xf094bd08,
- 0x07f10699,
+ 0xf404bd00,
+ 0x31f40132,
+ 0xaa21f502,
+ 0xf094bd09,
+ 0x07f10799,
0x03f01700,
0x0009d002,
- 0x0ef404bd,
-/* 0x04f9: chsw_prev_no_next */
- 0xb920f931,
- 0x32f40212,
- 0x0232f401,
- 0x080121f5,
- 0x17f120fc,
- 0x14b60b00,
- 0x0012d006,
-/* 0x0517: chsw_no_prev */
- 0xc8130ef4,
- 0x0bf41f23,
- 0x0131f40d,
- 0xf50232f4,
-/* 0x0527: chsw_done */
- 0xf1080121,
- 0xb60b0c17,
- 0x27f00614,
- 0x0012d001,
+ 0x20fc04bd,
0x99f094bd,
- 0x0007f104,
+ 0x0007f106,
+ 0x0203f037,
+ 0xbd0009d0,
+ 0x0131f404,
+ 0x09aa21f5,
+ 0x99f094bd,
+ 0x0007f106,
0x0203f017,
0xbd0009d0,
- 0x130ef504,
-/* 0x0549: main_not_ctx_switch */
- 0x01e4b0ff,
- 0xb90d1bf4,
- 0x21f502f2,
- 0x0ef40795,
-/* 0x0559: main_not_ctx_chan */
- 0x02e4b046,
- 0xbd321bf4,
- 0x0799f094,
- 0x370007f1,
+ 0x330ef404,
+/* 0x060c: chsw_prev_no_next */
+ 0x12b920f9,
+ 0x0132f402,
+ 0xf50232f4,
+ 0xfc09aa21,
+ 0x0007f120,
+ 0x0203f0c0,
+ 0xbd0002d0,
+ 0x130ef404,
+/* 0x062c: chsw_no_prev */
+ 0xf41f23c8,
+ 0x31f40d0b,
+ 0x0232f401,
+ 0x09aa21f5,
+/* 0x063c: chsw_done */
+ 0xf10127f0,
+ 0xf0c30007,
+ 0x02d00203,
+ 0xbd04bd00,
+ 0x0499f094,
+ 0x170007f1,
0xd00203f0,
0x04bd0009,
- 0xf40132f4,
- 0x21f50232,
- 0x94bd0801,
+ 0xff080ef5,
+/* 0x0660: main_not_ctx_switch */
+ 0xf401e4b0,
+ 0xf2b90d1b,
+ 0x4221f502,
+ 0x460ef409,
+/* 0x0670: main_not_ctx_chan */
+ 0xf402e4b0,
+ 0x94bd321b,
0xf10799f0,
- 0xf0170007,
+ 0xf0370007,
0x09d00203,
0xf404bd00,
-/* 0x058e: main_not_ctx_save */
- 0xef94110e,
- 0x01f5f010,
- 0x02fe21f5,
- 0xfec00ef5,
-/* 0x059c: main_done */
- 0x29f024bd,
- 0x0007f11f,
- 0x0203f030,
- 0xbd0002d0,
- 0xab0ef504,
-/* 0x05b1: ih */
- 0xfe80f9fe,
- 0x80f90188,
- 0xa0f990f9,
- 0xd0f9b0f9,
- 0xf0f9e0f9,
- 0x0acf04bd,
- 0x04abc480,
- 0xf11d0bf4,
- 0xf01900b7,
- 0xbecf10d7,
- 0x00bfcf40,
+ 0x32f40132,
+ 0xaa21f502,
+ 0xf094bd09,
+ 0x07f10799,
+ 0x03f01700,
+ 0x0009d002,
+ 0x0ef404bd,
+/* 0x06a5: main_not_ctx_save */
+ 0x10ef9411,
+ 0xf501f5f0,
+ 0xf5037e21,
+/* 0x06b3: main_done */
+ 0xbdfeb50e,
+ 0x1f29f024,
+ 0x300007f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xfea00ef5,
+/* 0x06c8: ih */
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x00a7f104,
+ 0x00a3f002,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x10d7f030,
+ 0x1a00e7f1,
+ 0xcf00e3f0,
+ 0xf7f100ee,
+ 0xf3f01900,
+ 0x00ffcf00,
0xb70421f4,
0xf00400b0,
- 0xbed001e7,
-/* 0x05e9: ih_no_fifo */
- 0x00abe400,
- 0x0d0bf401,
- 0xf110d7f0,
- 0xf44001e7,
-/* 0x05fa: ih_no_ctxsw */
- 0xb7f10421,
- 0xb0bd0104,
- 0xf4b4abff,
- 0xa7f10d0b,
- 0xa4b60c1c,
- 0x00abd006,
-/* 0x0610: ih_no_other */
- 0xfc400ad0,
+ 0x07f101e7,
+ 0x03f01d00,
+ 0x000ed000,
+/* 0x071a: ih_no_fifo */
+ 0xabe404bd,
+ 0x0bf40100,
+ 0x10d7f00d,
+ 0x4001e7f1,
+/* 0x072b: ih_no_ctxsw */
+ 0xe40421f4,
+ 0xf40400ab,
+ 0xb7f1140b,
+ 0xbfb90100,
+ 0x44e7f102,
+ 0x40e3f001,
+/* 0x0743: ih_no_fwmthd */
+ 0xf19d21f4,
+ 0xbd0104b7,
+ 0xb4abffb0,
+ 0xf10f0bf4,
+ 0xf0070007,
+ 0x0bd00303,
+/* 0x075b: ih_no_other */
+ 0xf104bd00,
+ 0xf0010007,
+ 0x0ad00003,
+ 0xfc04bd00,
0xfce0fcf0,
0xfcb0fcd0,
0xfc90fca0,
0x0088fe80,
0x32f480fc,
-/* 0x062b: ctx_4170s */
- 0xf101f800,
- 0xf04170e7,
- 0xf5f040e3,
- 0x8d21f410,
-/* 0x063a: ctx_4170w */
+/* 0x077f: ctx_4170s */
+ 0xf001f800,
+ 0xffb910f5,
+ 0x70e7f102,
+ 0x40e3f041,
+ 0xf89d21f4,
+/* 0x0791: ctx_4170w */
+ 0x70e7f100,
+ 0x40e3f041,
+ 0xb96821f4,
+ 0xf4f002ff,
+ 0xf01bf410,
+/* 0x07a6: ctx_redswitch */
0xe7f100f8,
- 0xe3f04170,
- 0x6821f440,
- 0xf410f4f0,
+ 0xe5f00200,
+ 0x20e5f040,
+ 0xf110e5f0,
+ 0xf0850007,
+ 0x0ed00103,
+ 0xf004bd00,
+/* 0x07c2: ctx_redswitch_delay */
+ 0xf2b608f7,
+ 0xfd1bf401,
+ 0x0400e5f1,
+ 0x0100e5f1,
+ 0x850007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+/* 0x07de: ctx_86c */
+ 0x07f100f8,
+ 0x03f02300,
+ 0x000fd002,
+ 0xffb904bd,
+ 0x14e7f102,
+ 0x40e3f08a,
+ 0xb99d21f4,
+ 0xe7f102ff,
+ 0xe3f0a88c,
+ 0x9d21f441,
+/* 0x0806: ctx_mem */
+ 0x07f100f8,
+ 0x03f08400,
+ 0x000fd002,
+/* 0x0812: ctx_mem_wait */
+ 0xf7f104bd,
+ 0xf3f08400,
+ 0x00ffcf02,
+ 0xf405fffd,
0x00f8f31b,
-/* 0x064c: ctx_redswitch */
- 0x0614e7f1,
- 0xf106e4b6,
- 0xd00270f7,
- 0xf7f000ef,
-/* 0x065d: ctx_redswitch_delay */
- 0x01f2b608,
- 0xf1fd1bf4,
- 0xd00770f7,
- 0x00f800ef,
-/* 0x066c: ctx_86c */
- 0x086ce7f1,
- 0xd006e4b6,
- 0xe7f100ef,
- 0xe3f08a14,
- 0x8d21f440,
- 0xa86ce7f1,
- 0xf441e3f0,
- 0x00f88d21,
-/* 0x068c: ctx_load */
+/* 0x0824: ctx_load */
0x99f094bd,
0x0007f105,
0x0203f037,
0xbd0009d0,
0x0ca7f004,
- 0xf1c921f4,
- 0xb60a2417,
- 0x10d00614,
- 0x0037f100,
- 0x0634b60b,
- 0xf14032d0,
- 0xb60a0c17,
- 0x47f00614,
- 0x0012d007,
-/* 0x06c7: ctx_chan_wait_0 */
- 0xcf4014d0,
- 0x44f04014,
- 0xfa1bf41f,
- 0xfe0032d0,
- 0x2af0000b,
- 0x0424b61f,
- 0xbd0220b6,
+ 0xbdd021f4,
+ 0x0007f1f4,
+ 0x0203f089,
+ 0xbd000fd0,
+ 0x0007f104,
+ 0x0203f0c1,
+ 0xbd0002d0,
+ 0x0007f104,
+ 0x0203f083,
+ 0xbd0002d0,
+ 0x07f7f004,
+ 0x080621f5,
+ 0xc00007f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xf0000bfe,
+ 0x24b61f2a,
+ 0x0220b604,
+ 0x99f094bd,
+ 0x0007f108,
+ 0x0203f037,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f081,
+ 0xbd0002d0,
+ 0x0027f104,
+ 0x0023f100,
+ 0x0225f080,
+ 0x880007f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xf11017f0,
+ 0xf0020027,
+ 0x12fa0223,
+ 0xbd03f805,
0x0899f094,
- 0x370007f1,
+ 0x170007f1,
0xd00203f0,
0x04bd0009,
- 0x0a0417f1,
- 0xd00614b6,
- 0x17f10012,
- 0x14b60a20,
- 0x0227f006,
- 0x800023f1,
- 0xf00012d0,
- 0x27f11017,
- 0x23f00200,
- 0x0512fa02,
- 0x94bd03f8,
- 0xf10899f0,
- 0xf0170007,
+ 0xb6810198,
+ 0x02981814,
+ 0x0825b680,
+ 0x800512fd,
+ 0x94bd1601,
+ 0xf10999f0,
+ 0xf0370007,
0x09d00203,
- 0x9804bd00,
- 0x14b68101,
- 0x80029818,
- 0xfd0825b6,
- 0x01800512,
- 0xf094bd16,
- 0x07f10999,
- 0x03f03700,
- 0x0009d002,
- 0x27f104bd,
- 0x24b60a04,
- 0x0021d006,
- 0xf10127f0,
- 0xb60a2017,
- 0x12d00614,
- 0x0017f100,
- 0x0613f001,
- 0xf80501fa,
- 0xf094bd03,
- 0x07f10999,
- 0x03f01700,
- 0x0009d002,
- 0x94bd04bd,
- 0xf10599f0,
+ 0xf104bd00,
+ 0xf0810007,
+ 0x01d00203,
+ 0xf004bd00,
+ 0x07f10127,
+ 0x03f08800,
+ 0x0002d002,
+ 0x17f104bd,
+ 0x13f00100,
+ 0x0501fa06,
+ 0x94bd03f8,
+ 0xf10999f0,
0xf0170007,
0x09d00203,
- 0xf804bd00,
-/* 0x0795: ctx_chan */
- 0x8c21f500,
- 0x0ca7f006,
- 0xf1c921f4,
- 0xb60a1017,
- 0x27f00614,
- 0x0012d005,
-/* 0x07ac: ctx_chan_wait */
- 0xfd0012cf,
- 0x1bf40522,
-/* 0x07b7: ctx_mmio_exec */
- 0x9800f8fa,
- 0x27f14103,
- 0x24b60a04,
- 0x0023d006,
-/* 0x07c6: ctx_mmio_loop */
+ 0xbd04bd00,
+ 0x0599f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x0942: ctx_chan */
+ 0x21f500f8,
+ 0xa7f00824,
+ 0xd021f40c,
+ 0xf505f7f0,
+ 0xf8080621,
+/* 0x0955: ctx_mmio_exec */
+ 0x41039800,
+ 0x810007f1,
+ 0xd00203f0,
+ 0x04bd0003,
+/* 0x0966: ctx_mmio_loop */
0x34c434bd,
0x0f1bf4ff,
0x020057f1,
0xfa0653f0,
0x03f80535,
-/* 0x07d8: ctx_mmio_pull */
+/* 0x0978: ctx_mmio_pull */
0x98804e98,
0x21f4814f,
- 0x0830b68d,
+ 0x0830b69d,
0xf40112b6,
-/* 0x07ea: ctx_mmio_done */
+/* 0x098a: ctx_mmio_done */
0x0398df1b,
- 0x0023d016,
- 0xf1400080,
- 0xf0010017,
- 0x01fa0613,
- 0xf803f806,
-/* 0x0801: ctx_xfer */
- 0x00f7f100,
- 0x06f4b60c,
- 0xd004e7f0,
-/* 0x080e: ctx_xfer_idle */
- 0xfecf80fe,
- 0x00e4f100,
- 0xf91bf420,
- 0xf40611f4,
-/* 0x081e: ctx_xfer_pre */
- 0xf7f00d02,
- 0x6c21f510,
- 0x1c11f406,
-/* 0x0828: ctx_xfer_pre_load */
- 0xf502f7f0,
- 0xf5062b21,
- 0xf5063a21,
- 0xbd064c21,
- 0x2b21f5f4,
- 0x8c21f506,
-/* 0x0841: ctx_xfer_exec */
- 0x16019806,
- 0x041427f1,
- 0xd00624b6,
- 0xe7f10020,
- 0xe3f0a500,
- 0x021fb941,
- 0xb68d21f4,
- 0xfcf004e0,
- 0x022cf001,
- 0xfd0124b6,
- 0x21f405f2,
- 0xfc17f18d,
- 0x0213f04a,
- 0xd00c27f0,
- 0x21f50012,
- 0x27f10215,
- 0x23f047fc,
- 0x0020d002,
+ 0x0007f116,
+ 0x0203f081,
+ 0xbd0003d0,
+ 0x40008004,
+ 0x010017f1,
+ 0xfa0613f0,
+ 0x03f80601,
+/* 0x09aa: ctx_xfer */
+ 0xe7f000f8,
+ 0x0007f104,
+ 0x0303f002,
+ 0xbd000ed0,
+/* 0x09b9: ctx_xfer_idle */
+ 0x00e7f104,
+ 0x03e3f000,
+ 0xf100eecf,
+ 0xf42000e4,
+ 0x11f4f21b,
+ 0x0d02f406,
+/* 0x09d0: ctx_xfer_pre */
+ 0xf510f7f0,
+ 0xf407de21,
+/* 0x09da: ctx_xfer_pre_load */
+ 0xf7f01c11,
+ 0x7f21f502,
+ 0x9121f507,
+ 0xa621f507,
+ 0xf5f4bd07,
+ 0xf5077f21,
+/* 0x09f3: ctx_xfer_exec */
+ 0x98082421,
+ 0x24bd1601,
+ 0x050007f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0xf1021fb9,
+ 0xf0a500e7,
+ 0x21f441e3,
+ 0x01fcf09d,
+ 0xb6022cf0,
+ 0xf2fd0124,
+ 0x02ffb905,
+ 0xa504e7f1,
+ 0xf441e3f0,
+ 0x21f59d21,
+ 0x24bd026a,
+ 0x47fc07f1,
+ 0xd00203f0,
+ 0x04bd0002,
0xb6012cf0,
- 0x12d00320,
- 0x01acf000,
- 0xf006a5f0,
- 0x0c9800b7,
- 0x010d9800,
- 0xf500e7f0,
- 0xf0016621,
- 0x21f508a7,
- 0x21f50109,
- 0x01f40215,
- 0x0ca7f022,
- 0xf1c921f4,
- 0xb60a1017,
- 0x27f00614,
- 0x0012d005,
-/* 0x08c8: ctx_xfer_post_save_wait */
- 0xfd0012cf,
- 0x1bf40522,
- 0x2e02f4fa,
-/* 0x08d4: ctx_xfer_post */
- 0xf502f7f0,
- 0xbd062b21,
- 0x6c21f5f4,
- 0x3421f506,
- 0x3a21f502,
- 0xf5f4bd06,
- 0xf4062b21,
- 0x01981011,
- 0x0511fd40,
- 0xf5070bf4,
-/* 0x08ff: ctx_xfer_no_post_mmio */
-/* 0x08ff: ctx_xfer_done */
- 0xf807b721,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x07f10320,
+ 0x03f04afc,
+ 0x0002d002,
+ 0xacf004bd,
+ 0x06a5f001,
+ 0x9800b7f0,
+ 0x0d98000c,
+ 0x00e7f001,
+ 0x016f21f5,
+ 0xf508a7f0,
+ 0xf5011021,
+ 0xf4025e21,
+ 0xa7f01301,
+ 0xd021f40c,
+ 0xf505f7f0,
+ 0xf4080621,
+/* 0x0a82: ctx_xfer_post */
+ 0xf7f02e02,
+ 0x7f21f502,
+ 0xf5f4bd07,
+ 0xf507de21,
+ 0xf5027f21,
+ 0xbd079121,
+ 0x7f21f5f4,
+ 0x1011f407,
+ 0xfd400198,
+ 0x0bf40511,
+ 0x5521f507,
+/* 0x0aad: ctx_xfer_no_post_mmio */
+/* 0x0aad: ctx_xfer_done */
+ 0x0000f809,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
index 33a5a82eccbd..6ffe28307dbd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
@@ -28,28 +28,135 @@
#define GF117 0xd7
#define GK100 0xe0
#define GK110 0xf0
+#define GK208 0x108
+#define NV_PGRAPH_FECS_INTR_ACK 0x409004
+#define NV_PGRAPH_FECS_INTR 0x409008
+#define NV_PGRAPH_FECS_INTR_FWMTHD 0x00000400
+#define NV_PGRAPH_FECS_INTR_CHSW 0x00000100
+#define NV_PGRAPH_FECS_INTR_FIFO 0x00000004
+#define NV_PGRAPH_FECS_INTR_MODE 0x40900c
+#define NV_PGRAPH_FECS_INTR_MODE_FIFO 0x00000004
+#define NV_PGRAPH_FECS_INTR_MODE_FIFO_LEVEL 0x00000004
+#define NV_PGRAPH_FECS_INTR_MODE_FIFO_EDGE 0x00000000
+#define NV_PGRAPH_FECS_INTR_EN_SET 0x409010
+#define NV_PGRAPH_FECS_INTR_EN_SET_FIFO 0x00000004
+#define NV_PGRAPH_FECS_INTR_ROUTE 0x40901c
+#define NV_PGRAPH_FECS_ACCESS 0x409048
+#define NV_PGRAPH_FECS_ACCESS_FIFO 0x00000002
+#define NV_PGRAPH_FECS_FIFO_DATA 0x409064
+#define NV_PGRAPH_FECS_FIFO_CMD 0x409068
+#define NV_PGRAPH_FECS_FIFO_ACK 0x409074
+#define NV_PGRAPH_FECS_CAPS 0x409108
#define NV_PGRAPH_FECS_SIGNAL 0x409400
+#define NV_PGRAPH_FECS_IROUTE 0x409404
+#define NV_PGRAPH_FECS_BAR_MASK0 0x40940c
+#define NV_PGRAPH_FECS_BAR_MASK1 0x409410
+#define NV_PGRAPH_FECS_BAR 0x409414
+#define NV_PGRAPH_FECS_BAR_SET 0x409418
+#define NV_PGRAPH_FECS_RED_SWITCH 0x409614
+#define NV_PGRAPH_FECS_RED_SWITCH_ENABLE_ROP 0x00000400
+#define NV_PGRAPH_FECS_RED_SWITCH_ENABLE_GPC 0x00000200
+#define NV_PGRAPH_FECS_RED_SWITCH_ENABLE_MAIN 0x00000100
+#define NV_PGRAPH_FECS_RED_SWITCH_POWER_ROP 0x00000040
+#define NV_PGRAPH_FECS_RED_SWITCH_POWER_GPC 0x00000020
+#define NV_PGRAPH_FECS_RED_SWITCH_POWER_MAIN 0x00000010
+#define NV_PGRAPH_FECS_RED_SWITCH_PAUSE_GPC 0x00000002
+#define NV_PGRAPH_FECS_RED_SWITCH_PAUSE_MAIN 0x00000001
+#define NV_PGRAPH_FECS_MMCTX_SAVE_SWBASE 0x409700
+#define NV_PGRAPH_FECS_MMCTX_LOAD_SWBASE 0x409704
+#define NV_PGRAPH_FECS_MMCTX_LOAD_COUNT 0x40974c
+#define NV_PGRAPH_FECS_MMCTX_SAVE_SWBASE 0x409700
+#define NV_PGRAPH_FECS_MMCTX_LOAD_SWBASE 0x409704
+#define NV_PGRAPH_FECS_MMCTX_BASE 0x409710
+#define NV_PGRAPH_FECS_MMCTX_CTRL 0x409714
+#define NV_PGRAPH_FECS_MMCTX_MULTI_STRIDE 0x409718
+#define NV_PGRAPH_FECS_MMCTX_MULTI_MASK 0x40971c
+#define NV_PGRAPH_FECS_MMCTX_QUEUE 0x409720
+#define NV_PGRAPH_FECS_MMIO_CTRL 0x409728
+#define NV_PGRAPH_FECS_MMIO_RDVAL 0x40972c
+#define NV_PGRAPH_FECS_MMIO_WRVAL 0x409730
+#define NV_PGRAPH_FECS_MMCTX_LOAD_COUNT 0x40974c
#if CHIPSET < GK110
#define NV_PGRAPH_FECS_CC_SCRATCH_VAL(n) ((n) * 4 + 0x409800)
#define NV_PGRAPH_FECS_CC_SCRATCH_SET(n) ((n) * 4 + 0x409820)
#define NV_PGRAPH_FECS_CC_SCRATCH_CLR(n) ((n) * 4 + 0x409840)
+#define NV_PGRAPH_FECS_UNK86C 0x40986c
#else
#define NV_PGRAPH_FECS_CC_SCRATCH_VAL(n) ((n) * 4 + 0x409800)
#define NV_PGRAPH_FECS_CC_SCRATCH_CLR(n) ((n) * 4 + 0x409840)
+#define NV_PGRAPH_FECS_UNK86C 0x40988c
#define NV_PGRAPH_FECS_CC_SCRATCH_SET(n) ((n) * 4 + 0x4098c0)
#endif
+#define NV_PGRAPH_FECS_STRANDS_CNT 0x409880
+#define NV_PGRAPH_FECS_STRAND_SAVE_SWBASE 0x409908
+#define NV_PGRAPH_FECS_STRAND_LOAD_SWBASE 0x40990c
+#define NV_PGRAPH_FECS_STRAND_WORDS 0x409910
+#define NV_PGRAPH_FECS_STRAND_DATA 0x409918
+#define NV_PGRAPH_FECS_STRAND_SELECT 0x40991c
+#define NV_PGRAPH_FECS_STRAND_CMD 0x409928
+#define NV_PGRAPH_FECS_STRAND_CMD_SEEK 0x00000001
+#define NV_PGRAPH_FECS_STRAND_CMD_GET_INFO 0x00000002
+#define NV_PGRAPH_FECS_STRAND_CMD_SAVE 0x00000003
+#define NV_PGRAPH_FECS_STRAND_CMD_LOAD 0x00000004
+#define NV_PGRAPH_FECS_STRAND_CMD_ACTIVATE_FILTER 0x0000000a
+#define NV_PGRAPH_FECS_STRAND_CMD_DEACTIVATE_FILTER 0x0000000b
+#define NV_PGRAPH_FECS_STRAND_CMD_ENABLE 0x0000000c
+#define NV_PGRAPH_FECS_STRAND_CMD_DISABLE 0x0000000d
+#define NV_PGRAPH_FECS_STRAND_FILTER 0x40993c
+#define NV_PGRAPH_FECS_MEM_BASE 0x409a04
+#define NV_PGRAPH_FECS_MEM_CHAN 0x409a0c
+#define NV_PGRAPH_FECS_MEM_CMD 0x409a10
+#define NV_PGRAPH_FECS_MEM_CMD_LOAD_CHAN 0x00000007
+#define NV_PGRAPH_FECS_MEM_TARGET 0x409a20
+#define NV_PGRAPH_FECS_MEM_TARGET_UNK31 0x80000000
+#define NV_PGRAPH_FECS_MEM_TARGET_AS 0x0000001f
+#define NV_PGRAPH_FECS_MEM_TARGET_AS_VM 0x00000001
+#define NV_PGRAPH_FECS_MEM_TARGET_AS_VRAM 0x00000002
+#define NV_PGRAPH_FECS_CHAN_ADDR 0x409b00
+#define NV_PGRAPH_FECS_CHAN_NEXT 0x409b04
+#define NV_PGRAPH_FECS_CHSW 0x409b0c
+#define NV_PGRAPH_FECS_CHSW_ACK 0x00000001
#define NV_PGRAPH_FECS_INTR_UP_SET 0x409c1c
+#define NV_PGRAPH_FECS_INTR_UP_EN 0x409c24
+#define NV_PGRAPH_GPCX_GPCCS_INTR_ACK 0x41a004
+#define NV_PGRAPH_GPCX_GPCCS_INTR 0x41a008
+#define NV_PGRAPH_GPCX_GPCCS_INTR_FIFO 0x00000004
+#define NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET 0x41a010
+#define NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET_FIFO 0x00000004
+#define NV_PGRAPH_GPCX_GPCCS_INTR_ROUTE 0x41a01c
+#define NV_PGRAPH_GPCX_GPCCS_ACCESS 0x41a048
+#define NV_PGRAPH_GPCX_GPCCS_ACCESS_FIFO 0x00000002
+#define NV_PGRAPH_GPCX_GPCCS_FIFO_DATA 0x41a064
+#define NV_PGRAPH_GPCX_GPCCS_FIFO_CMD 0x41a068
+#define NV_PGRAPH_GPCX_GPCCS_FIFO_ACK 0x41a074
+#define NV_PGRAPH_GPCX_GPCCS_UNITS 0x41a608
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH 0x41a614
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_UNK11 0x00000800
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_ENABLE 0x00000200
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_POWER 0x00000020
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_PAUSE 0x00000002
+#define NV_PGRAPH_GPCX_GPCCS_MYINDEX 0x41a618
+#define NV_PGRAPH_GPCX_GPCCS_MMCTX_SAVE_SWBASE 0x41a700
+#define NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_SWBASE 0x41a704
+#define NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_COUNT 0x41a74c
#if CHIPSET < GK110
#define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_VAL(n) ((n) * 4 + 0x41a800)
#define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_SET(n) ((n) * 4 + 0x41a820)
#define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_CLR(n) ((n) * 4 + 0x41a840)
+#define NV_PGRAPH_GPCX_GPCCS_UNK86C 0x41a86c
#else
#define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_VAL(n) ((n) * 4 + 0x41a800)
#define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_CLR(n) ((n) * 4 + 0x41a840)
+#define NV_PGRAPH_GPCX_GPCCS_UNK86C 0x41a88c
#define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_SET(n) ((n) * 4 + 0x41a8c0)
#endif
+#define NV_PGRAPH_GPCX_GPCCS_STRAND_SELECT 0x41a91c
+#define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD 0x41a928
+#define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_SAVE 0x00000003
+#define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_LOAD 0x00000004
+#define NV_PGRAPH_GPCX_GPCCS_MEM_BASE 0x41aa04
#define mmctx_data(r,c) .b32 (((c - 1) << 26) | r)
#define queue_init .skip 72 // (2 * 4) + ((8 * 4) * 2)
@@ -65,24 +172,50 @@
#define T_LCHAN 8
#define T_LCTXH 9
-#define nv_mkmm(rv,r) /*
-*/ movw rv ((r) & 0x0000fffc) /*
-*/ sethi rv ((r) & 0x00ff0000)
+#if CHIPSET < GK208
+#define imm32(reg,val) /*
+*/ movw reg ((val) & 0x0000ffff) /*
+*/ sethi reg ((val) & 0xffff0000)
+#else
+#define imm32(reg,val) /*
+*/ mov reg (val)
+#endif
+
#define nv_mkio(rv,r,i) /*
-*/ nv_mkmm(rv, (((r) & 0xffc) << 6) | ((i) << 2))
+*/ imm32(rv, (((r) & 0xffc) << 6) | ((i) << 2))
+
+#define hash #
+#define fn(a) a
+#if CHIPSET < GK208
+#define call(a) call fn(hash)a
+#else
+#define call(a) lcall fn(hash)a
+#endif
#define nv_iord(rv,r,i) /*
*/ nv_mkio(rv,r,i) /*
*/ iord rv I[rv]
+
#define nv_iowr(r,i,rv) /*
*/ nv_mkio($r0,r,i) /*
*/ iowr I[$r0] rv /*
*/ clear b32 $r0
+#define nv_rd32(reg,addr) /*
+*/ imm32($r14, addr) /*
+*/ call(nv_rd32) /*
+*/ mov b32 reg $r15
+
+#define nv_wr32(addr,reg) /*
+*/ mov b32 $r15 reg /*
+*/ imm32($r14, addr) /*
+*/ call(nv_wr32)
+
#define trace_set(bit) /*
*/ clear b32 $r9 /*
*/ bset $r9 bit /*
*/ nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_SET(7), 0, $r9)
+
#define trace_clr(bit) /*
*/ clear b32 $r9 /*
*/ bset $r9 bit /*
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
new file mode 100644
index 000000000000..e1af65ead379
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nvc0.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv108_graph_sclass[] = {
+ { 0x902d, &nouveau_object_ofuncs },
+ { 0xa140, &nouveau_object_ofuncs },
+ { 0xa197, &nouveau_object_ofuncs },
+ { 0xa1c0, &nouveau_object_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static struct nvc0_graph_init
+nv108_graph_init_regs[] = {
+ { 0x400080, 1, 0x04, 0x003083c2 },
+ { 0x400088, 1, 0x04, 0x0001bfe7 },
+ { 0x40008c, 1, 0x04, 0x00000000 },
+ { 0x400090, 1, 0x04, 0x00000030 },
+ { 0x40013c, 1, 0x04, 0x003901f7 },
+ { 0x400140, 1, 0x04, 0x00000100 },
+ { 0x400144, 1, 0x04, 0x00000000 },
+ { 0x400148, 1, 0x04, 0x00000110 },
+ { 0x400138, 1, 0x04, 0x00000000 },
+ { 0x400130, 2, 0x04, 0x00000000 },
+ { 0x400124, 1, 0x04, 0x00000002 },
+ {}
+};
+
+struct nvc0_graph_init
+nv108_graph_init_unk58xx[] = {
+ { 0x405844, 1, 0x04, 0x00ffffff },
+ { 0x405850, 1, 0x04, 0x00000000 },
+ { 0x405900, 1, 0x04, 0x00000000 },
+ { 0x405908, 1, 0x04, 0x00000000 },
+ { 0x405928, 1, 0x04, 0x00000000 },
+ { 0x40592c, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_graph_init_gpc[] = {
+ { 0x418408, 1, 0x04, 0x00000000 },
+ { 0x4184a0, 3, 0x04, 0x00000000 },
+ { 0x418604, 1, 0x04, 0x00000000 },
+ { 0x418680, 1, 0x04, 0x00000000 },
+ { 0x418714, 1, 0x04, 0x00000000 },
+ { 0x418384, 2, 0x04, 0x00000000 },
+ { 0x418814, 3, 0x04, 0x00000000 },
+ { 0x418b04, 1, 0x04, 0x00000000 },
+ { 0x4188c8, 2, 0x04, 0x00000000 },
+ { 0x4188d0, 1, 0x04, 0x00010000 },
+ { 0x4188d4, 1, 0x04, 0x00000201 },
+ { 0x418910, 1, 0x04, 0x00010001 },
+ { 0x418914, 1, 0x04, 0x00000301 },
+ { 0x418918, 1, 0x04, 0x00800000 },
+ { 0x418980, 1, 0x04, 0x77777770 },
+ { 0x418984, 3, 0x04, 0x77777777 },
+ { 0x418c04, 1, 0x04, 0x00000000 },
+ { 0x418c64, 2, 0x04, 0x00000000 },
+ { 0x418c88, 1, 0x04, 0x00000000 },
+ { 0x418cb4, 2, 0x04, 0x00000000 },
+ { 0x418d00, 1, 0x04, 0x00000000 },
+ { 0x418d28, 2, 0x04, 0x00000000 },
+ { 0x418f00, 1, 0x04, 0x00000400 },
+ { 0x418f08, 1, 0x04, 0x00000000 },
+ { 0x418f20, 2, 0x04, 0x00000000 },
+ { 0x418e00, 1, 0x04, 0x00000000 },
+ { 0x418e08, 1, 0x04, 0x00000000 },
+ { 0x418e1c, 2, 0x04, 0x00000000 },
+ { 0x41900c, 1, 0x04, 0x00000000 },
+ { 0x419018, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_graph_init_tpc[] = {
+ { 0x419d0c, 1, 0x04, 0x00000000 },
+ { 0x419d10, 1, 0x04, 0x00000014 },
+ { 0x419ab0, 1, 0x04, 0x00000000 },
+ { 0x419ac8, 1, 0x04, 0x00000000 },
+ { 0x419ab8, 1, 0x04, 0x000000e7 },
+ { 0x419abc, 2, 0x04, 0x00000000 },
+ { 0x419ab4, 1, 0x04, 0x00000000 },
+ { 0x419aa8, 2, 0x04, 0x00000000 },
+ { 0x41980c, 1, 0x04, 0x00000010 },
+ { 0x419844, 1, 0x04, 0x00000000 },
+ { 0x419850, 1, 0x04, 0x00000004 },
+ { 0x419854, 2, 0x04, 0x00000000 },
+ { 0x419c98, 1, 0x04, 0x00000000 },
+ { 0x419ca8, 1, 0x04, 0x00000000 },
+ { 0x419cb0, 1, 0x04, 0x01000000 },
+ { 0x419cb4, 1, 0x04, 0x00000000 },
+ { 0x419cb8, 1, 0x04, 0x00b08bea },
+ { 0x419c84, 1, 0x04, 0x00010384 },
+ { 0x419cbc, 1, 0x04, 0x281b3646 },
+ { 0x419cc0, 2, 0x04, 0x00000000 },
+ { 0x419c80, 1, 0x04, 0x00000230 },
+ { 0x419ccc, 2, 0x04, 0x00000000 },
+ { 0x419c0c, 1, 0x04, 0x00000000 },
+ { 0x419e00, 1, 0x04, 0x00000080 },
+ { 0x419ea0, 1, 0x04, 0x00000000 },
+ { 0x419ee4, 1, 0x04, 0x00000000 },
+ { 0x419ea4, 1, 0x04, 0x00000100 },
+ { 0x419ea8, 1, 0x04, 0x00000000 },
+ { 0x419eb4, 1, 0x04, 0x00000000 },
+ { 0x419ebc, 2, 0x04, 0x00000000 },
+ { 0x419edc, 1, 0x04, 0x00000000 },
+ { 0x419f00, 1, 0x04, 0x00000000 },
+ { 0x419ed0, 1, 0x04, 0x00003234 },
+ { 0x419f74, 1, 0x04, 0x00015555 },
+ { 0x419f80, 4, 0x04, 0x00000000 },
+ {}
+};
+
+static int
+nv108_graph_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nvc0_graph_priv *priv = (void *)object;
+ static const struct {
+ u32 addr;
+ u32 data;
+ } magic[] = {
+ { 0x020520, 0xfffffffc },
+ { 0x020524, 0xfffffffe },
+ { 0x020524, 0xfffffffc },
+ { 0x020524, 0xfffffff8 },
+ { 0x020524, 0xffffffe0 },
+ { 0x020530, 0xfffffffe },
+ { 0x02052c, 0xfffffffa },
+ { 0x02052c, 0xfffffff0 },
+ { 0x02052c, 0xffffffc0 },
+ { 0x02052c, 0xffffff00 },
+ { 0x02052c, 0xfffffc00 },
+ { 0x02052c, 0xfffcfc00 },
+ { 0x02052c, 0xfff0fc00 },
+ { 0x02052c, 0xff80fc00 },
+ { 0x020528, 0xfffffffe },
+ { 0x020528, 0xfffffffc },
+ };
+ int i;
+
+ nv_mask(priv, 0x000200, 0x08001000, 0x00000000);
+ nv_mask(priv, 0x0206b4, 0x00000000, 0x00000000);
+ for (i = 0; i < ARRAY_SIZE(magic); i++) {
+ nv_wr32(priv, magic[i].addr, magic[i].data);
+ nv_wait(priv, magic[i].addr, 0x80000000, 0x00000000);
+ }
+
+ return nouveau_graph_fini(&priv->base, suspend);
+}
+
+static struct nvc0_graph_init *
+nv108_graph_init_mmio[] = {
+ nv108_graph_init_regs,
+ nvf0_graph_init_unk40xx,
+ nvc0_graph_init_unk44xx,
+ nvc0_graph_init_unk78xx,
+ nvc0_graph_init_unk60xx,
+ nvd9_graph_init_unk64xx,
+ nv108_graph_init_unk58xx,
+ nvc0_graph_init_unk80xx,
+ nvf0_graph_init_unk70xx,
+ nvf0_graph_init_unk5bxx,
+ nv108_graph_init_gpc,
+ nv108_graph_init_tpc,
+ nve4_graph_init_unk,
+ nve4_graph_init_unk88xx,
+ NULL
+};
+
+#include "fuc/hubnv108.fuc5.h"
+
+static struct nvc0_graph_ucode
+nv108_graph_fecs_ucode = {
+ .code.data = nv108_grhub_code,
+ .code.size = sizeof(nv108_grhub_code),
+ .data.data = nv108_grhub_data,
+ .data.size = sizeof(nv108_grhub_data),
+};
+
+#include "fuc/gpcnv108.fuc5.h"
+
+static struct nvc0_graph_ucode
+nv108_graph_gpccs_ucode = {
+ .code.data = nv108_grgpc_code,
+ .code.size = sizeof(nv108_grgpc_code),
+ .data.data = nv108_grgpc_data,
+ .data.size = sizeof(nv108_grgpc_data),
+};
+
+struct nouveau_oclass *
+nv108_graph_oclass = &(struct nvc0_graph_oclass) {
+ .base.handle = NV_ENGINE(GR, 0x08),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_graph_ctor,
+ .dtor = nvc0_graph_dtor,
+ .init = nve4_graph_init,
+ .fini = nv108_graph_fini,
+ },
+ .cclass = &nv108_grctx_oclass,
+ .sclass = nv108_graph_sclass,
+ .mmio = nv108_graph_init_mmio,
+ .fecs.ucode = &nv108_graph_fecs_ucode,
+ .gpccs.ucode = &nv108_graph_gpccs_ucode,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index 03de5175dd9f..30ed19c52e05 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -304,12 +304,28 @@ nv84_graph_tlb_flush(struct nouveau_engine *engine)
return timeout ? -EBUSY : 0;
}
-static const struct nouveau_enum nv50_mp_exec_error_names[] = {
- { 3, "STACK_UNDERFLOW", NULL },
- { 4, "QUADON_ACTIVE", NULL },
- { 8, "TIMEOUT", NULL },
- { 0x10, "INVALID_OPCODE", NULL },
- { 0x40, "BREAKPOINT", NULL },
+static const struct nouveau_bitfield nv50_mp_exec_errors[] = {
+ { 0x01, "STACK_UNDERFLOW" },
+ { 0x02, "STACK_MISMATCH" },
+ { 0x04, "QUADON_ACTIVE" },
+ { 0x08, "TIMEOUT" },
+ { 0x10, "INVALID_OPCODE" },
+ { 0x20, "PM_OVERFLOW" },
+ { 0x40, "BREAKPOINT" },
+ {}
+};
+
+static const struct nouveau_bitfield nv50_mpc_traps[] = {
+ { 0x0000001, "LOCAL_LIMIT_READ" },
+ { 0x0000010, "LOCAL_LIMIT_WRITE" },
+ { 0x0000040, "STACK_LIMIT" },
+ { 0x0000100, "GLOBAL_LIMIT_READ" },
+ { 0x0001000, "GLOBAL_LIMIT_WRITE" },
+ { 0x0010000, "MP0" },
+ { 0x0020000, "MP1" },
+ { 0x0040000, "GLOBAL_LIMIT_RED" },
+ { 0x0400000, "GLOBAL_LIMIT_ATOM" },
+ { 0x4000000, "MP2" },
{}
};
@@ -396,6 +412,60 @@ static const struct nouveau_bitfield nv50_graph_intr_name[] = {
{}
};
+static const struct nouveau_bitfield nv50_graph_trap_prop[] = {
+ { 0x00000004, "SURF_WIDTH_OVERRUN" },
+ { 0x00000008, "SURF_HEIGHT_OVERRUN" },
+ { 0x00000010, "DST2D_FAULT" },
+ { 0x00000020, "ZETA_FAULT" },
+ { 0x00000040, "RT_FAULT" },
+ { 0x00000080, "CUDA_FAULT" },
+ { 0x00000100, "DST2D_STORAGE_TYPE_MISMATCH" },
+ { 0x00000200, "ZETA_STORAGE_TYPE_MISMATCH" },
+ { 0x00000400, "RT_STORAGE_TYPE_MISMATCH" },
+ { 0x00000800, "DST2D_LINEAR_MISMATCH" },
+ { 0x00001000, "RT_LINEAR_MISMATCH" },
+ {}
+};
+
+static void
+nv50_priv_prop_trap(struct nv50_graph_priv *priv,
+ u32 ustatus_addr, u32 ustatus, u32 tp)
+{
+ u32 e0c = nv_rd32(priv, ustatus_addr + 0x04);
+ u32 e10 = nv_rd32(priv, ustatus_addr + 0x08);
+ u32 e14 = nv_rd32(priv, ustatus_addr + 0x0c);
+ u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
+ u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
+ u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
+ u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
+
+ /* CUDA memory: l[], g[] or stack. */
+ if (ustatus & 0x00000080) {
+ if (e18 & 0x80000000) {
+ /* g[] read fault? */
+ nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
+ tp, e14, e10 | ((e18 >> 24) & 0x1f));
+ e18 &= ~0x1f000000;
+ } else if (e18 & 0xc) {
+ /* g[] write fault? */
+ nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
+ tp, e14, e10 | ((e18 >> 7) & 0x1f));
+ e18 &= ~0x00000f80;
+ } else {
+ nv_error(priv, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
+ tp, e14, e10);
+ }
+ ustatus &= ~0x00000080;
+ }
+ if (ustatus) {
+ nv_error(priv, "TRAP_PROP - TP %d -", tp);
+ nouveau_bitfield_print(nv50_graph_trap_prop, ustatus);
+ pr_cont(" - Address %02x%08x\n", e14, e10);
+ }
+ nv_error(priv, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ tp, e0c, e18, e1c, e20, e24);
+}
+
static void
nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
{
@@ -420,8 +490,8 @@ nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
oplow = nv_rd32(priv, addr + 0x70);
ophigh = nv_rd32(priv, addr + 0x74);
nv_error(priv, "TRAP_MP_EXEC - "
- "TP %d MP %d: ", tpid, i);
- nouveau_enum_print(nv50_mp_exec_error_names, status);
+ "TP %d MP %d:", tpid, i);
+ nouveau_bitfield_print(nv50_mp_exec_errors, status);
pr_cont(" at %06x warp %d, opcode %08x %08x\n",
pc&0xffffff, pc >> 24,
oplow, ophigh);
@@ -468,60 +538,19 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
nv50_priv_mp_trap(priv, i, display);
ustatus &= ~0x04030000;
}
- break;
- case 8: /* TPDMA error */
- {
- u32 e0c = nv_rd32(priv, ustatus_addr + 4);
- u32 e10 = nv_rd32(priv, ustatus_addr + 8);
- u32 e14 = nv_rd32(priv, ustatus_addr + 0xc);
- u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
- u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
- u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
- u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
- /* 2d engine destination */
- if (ustatus & 0x00000010) {
- if (display) {
- nv_error(priv, "TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
- i, e14, e10);
- nv_error(priv, "TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
- i, e0c, e18, e1c, e20, e24);
- }
- ustatus &= ~0x00000010;
- }
- /* Render target */
- if (ustatus & 0x00000040) {
- if (display) {
- nv_error(priv, "TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
- i, e14, e10);
- nv_error(priv, "TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
- i, e0c, e18, e1c, e20, e24);
- }
- ustatus &= ~0x00000040;
- }
- /* CUDA memory: l[], g[] or stack. */
- if (ustatus & 0x00000080) {
- if (display) {
- if (e18 & 0x80000000) {
- /* g[] read fault? */
- nv_error(priv, "TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
- i, e14, e10 | ((e18 >> 24) & 0x1f));
- e18 &= ~0x1f000000;
- } else if (e18 & 0xc) {
- /* g[] write fault? */
- nv_error(priv, "TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
- i, e14, e10 | ((e18 >> 7) & 0x1f));
- e18 &= ~0x00000f80;
- } else {
- nv_error(priv, "TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
- i, e14, e10);
- }
- nv_error(priv, "TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
- i, e0c, e18, e1c, e20, e24);
- }
- ustatus &= ~0x00000080;
- }
+ if (ustatus && display) {
+ nv_error("%s - TP%d:", name, i);
+ nouveau_bitfield_print(nv50_mpc_traps, ustatus);
+ pr_cont("\n");
+ ustatus = 0;
}
break;
+ case 8: /* PROP error */
+ if (display)
+ nv50_priv_prop_trap(
+ priv, ustatus_addr, ustatus, i);
+ ustatus = 0;
+ break;
}
if (ustatus) {
if (display)
@@ -727,11 +756,11 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
status &= ~0x080;
}
- /* TPDMA: Handles TP-initiated uncached memory accesses:
+ /* PROP: Handles TP-initiated uncached memory accesses:
* l[], g[], stack, 2d surfaces, render targets. */
if (status & 0x100) {
nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display,
- "TRAP_TPDMA");
+ "TRAP_PROP");
nv_wr32(priv, 0x400108, 0x100);
status &= ~0x100;
}
@@ -760,7 +789,7 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
u32 mthd = (addr & 0x00001ffc);
u32 data = nv_rd32(priv, 0x400708);
u32 class = nv_rd32(priv, 0x400814);
- u32 show = stat;
+ u32 show = stat, show_bitfield = stat;
int chid;
engctx = nouveau_engctx_get(engine, inst);
@@ -778,21 +807,26 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
nv_error(priv, "DATA_ERROR ");
nouveau_enum_print(nv50_data_error_names, ecode);
pr_cont("\n");
+ show_bitfield &= ~0x00100000;
}
if (stat & 0x00200000) {
if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12,
engctx))
show &= ~0x00200000;
+ show_bitfield &= ~0x00200000;
}
nv_wr32(priv, 0x400100, stat);
nv_wr32(priv, 0x400500, 0x00010001);
if (show) {
- nv_error(priv, "%s", "");
- nouveau_bitfield_print(nv50_graph_intr_name, show);
- pr_cont("\n");
+ show &= show_bitfield;
+ if (show) {
+ nv_error(priv, "%s", "");
+ nouveau_bitfield_print(nv50_graph_intr_name, show);
+ pr_cont("\n");
+ }
nv_error(priv,
"ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
chid, (u64)inst << 12, nouveau_client_name(engctx),
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 434bb4b0fa2e..a73ab209ea88 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -334,7 +334,7 @@ nvc0_graph_mthd(struct nvc0_graph_priv *priv, struct nvc0_graph_mthd *mthds)
while ((mthd = &mthds[i++]) && (init = mthd->init)) {
u32 addr = 0x80000000 | mthd->oclass;
for (data = 0; init->count; init++) {
- if (data != init->data) {
+ if (init == mthd->init || data != init->data) {
nv_wr32(priv, 0x40448c, init->data);
data = init->data;
}
@@ -901,6 +901,9 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
}
return 0;
+ } else
+ if (!oclass->fecs.ucode) {
+ return -ENOSYS;
}
/* load HUB microcode */
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index ea17a80ad7fc..b0ab6de270b2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -205,6 +205,11 @@ extern struct nvc0_graph_init nve4_graph_init_regs[];
extern struct nvc0_graph_init nve4_graph_init_unk[];
extern struct nvc0_graph_init nve4_graph_init_unk88xx[];
+extern struct nvc0_graph_init nvf0_graph_init_unk40xx[];
+extern struct nvc0_graph_init nvf0_graph_init_unk70xx[];
+extern struct nvc0_graph_init nvf0_graph_init_unk5bxx[];
+extern struct nvc0_graph_init nvf0_graph_init_tpc[];
+
int nvc0_grctx_generate(struct nvc0_graph_priv *);
void nvc0_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
void nvc0_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *);
@@ -266,6 +271,11 @@ extern struct nvc0_graph_init nve4_grctx_init_unk80xx[];
extern struct nvc0_graph_init nve4_grctx_init_unk90xx[];
extern struct nouveau_oclass *nvf0_grctx_oclass;
+extern struct nvc0_graph_init nvf0_grctx_init_unk44xx[];
+extern struct nvc0_graph_init nvf0_grctx_init_unk5bxx[];
+extern struct nvc0_graph_init nvf0_grctx_init_unk60xx[];
+
+extern struct nouveau_oclass *nv108_grctx_oclass;
#define mmio_data(s,a,p) do { \
info->buffer[info->buffer_nr] = round_up(info->addr, (a)); \
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
index 2f0ac7832234..b1acb9939d95 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
@@ -41,7 +41,7 @@ nvf0_graph_sclass[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-static struct nvc0_graph_init
+struct nvc0_graph_init
nvf0_graph_init_unk40xx[] = {
{ 0x40415c, 1, 0x04, 0x00000000 },
{ 0x404170, 1, 0x04, 0x00000000 },
@@ -60,7 +60,7 @@ nvf0_graph_init_unk58xx[] = {
{}
};
-static struct nvc0_graph_init
+struct nvc0_graph_init
nvf0_graph_init_unk70xx[] = {
{ 0x407010, 1, 0x04, 0x00000000 },
{ 0x407040, 1, 0x04, 0x80440424 },
@@ -68,7 +68,7 @@ nvf0_graph_init_unk70xx[] = {
{}
};
-static struct nvc0_graph_init
+struct nvc0_graph_init
nvf0_graph_init_unk5bxx[] = {
{ 0x405b44, 1, 0x04, 0x00000000 },
{ 0x405b50, 1, 0x04, 0x00000000 },
@@ -114,7 +114,7 @@ nvf0_graph_init_gpc[] = {
{}
};
-static struct nvc0_graph_init
+struct nvc0_graph_init
nvf0_graph_init_tpc[] = {
{ 0x419d0c, 1, 0x04, 0x00000000 },
{ 0x419d10, 1, 0x04, 0x00000014 },
@@ -243,6 +243,6 @@ nvf0_graph_oclass = &(struct nvc0_graph_oclass) {
.cclass = &nvf0_grctx_oclass,
.sclass = nvf0_graph_sclass,
.mmio = nvf0_graph_init_mmio,
- .fecs.ucode = 0 ? &nvf0_graph_fecs_ucode : NULL,
+ .fecs.ucode = &nvf0_graph_fecs_ucode,
.gpccs.ucode = &nvf0_graph_gpccs_ucode,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 560c3593dae7..e71a4325e670 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -230,9 +230,26 @@ struct nve0_channel_ind_class {
#define NV04_DISP_CLASS 0x00000046
+#define NV04_DISP_MTHD 0x00000000
+#define NV04_DISP_MTHD_HEAD 0x00000001
+
+#define NV04_DISP_SCANOUTPOS 0x00000000
+
struct nv04_display_class {
};
+struct nv04_display_scanoutpos {
+ s64 time[2];
+ u32 vblanks;
+ u32 vblanke;
+ u32 vtotal;
+ u32 vline;
+ u32 hblanks;
+ u32 hblanke;
+ u32 htotal;
+ u32 hline;
+};
+
/* 5070: NV50_DISP
* 8270: NV84_DISP
* 8370: NVA0_DISP
@@ -252,6 +269,11 @@ struct nv04_display_class {
#define NVE0_DISP_CLASS 0x00009170
#define NVF0_DISP_CLASS 0x00009270
+#define NV50_DISP_MTHD 0x00000000
+#define NV50_DISP_MTHD_HEAD 0x00000003
+
+#define NV50_DISP_SCANOUTPOS 0x00000000
+
#define NV50_DISP_SOR_MTHD 0x00010000
#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000
#define NV50_DISP_SOR_MTHD_HEAD 0x00000018
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index ac2881d1776a..7b8ea221b00d 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -38,7 +38,8 @@ enum nv_subdev_type {
NVDEV_SUBDEV_THERM,
NVDEV_SUBDEV_CLOCK,
- NVDEV_ENGINE_DMAOBJ,
+ NVDEV_ENGINE_FIRST,
+ NVDEV_ENGINE_DMAOBJ = NVDEV_ENGINE_FIRST,
NVDEV_ENGINE_FIFO,
NVDEV_ENGINE_SW,
NVDEV_ENGINE_GR,
@@ -70,6 +71,7 @@ struct nouveau_device {
const char *dbgopt;
const char *name;
const char *cname;
+ u64 disable_mask;
enum {
NV_04 = 0x04,
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index 8c32cf4d83c7..26b6b2bb1112 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -109,6 +109,7 @@ extern struct nouveau_oclass *nv50_fifo_oclass;
extern struct nouveau_oclass *nv84_fifo_oclass;
extern struct nouveau_oclass *nvc0_fifo_oclass;
extern struct nouveau_oclass *nve0_fifo_oclass;
+extern struct nouveau_oclass *nv108_fifo_oclass;
void nv04_fifo_intr(struct nouveau_subdev *);
int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
index 8e1b52312ddc..97705618de97 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/graph.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -69,6 +69,7 @@ extern struct nouveau_oclass *nvd7_graph_oclass;
extern struct nouveau_oclass *nvd9_graph_oclass;
extern struct nouveau_oclass *nve4_graph_oclass;
extern struct nouveau_oclass *nvf0_graph_oclass;
+extern struct nouveau_oclass *nv108_graph_oclass;
extern const struct nouveau_bitfield nv04_graph_nsource[];
extern struct nouveau_ofuncs nv04_graph_ofuncs;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
index 4f4ff4502c3d..9faa98e67ad8 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
@@ -4,8 +4,7 @@
#include <core/subdev.h>
#include <core/device.h>
-#include <subdev/fb.h>
-
+struct nouveau_mem;
struct nouveau_vma;
struct nouveau_bar {
@@ -29,27 +28,7 @@ nouveau_bar(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BAR];
}
-#define nouveau_bar_create(p,e,o,d) \
- nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_bar_init(p) \
- nouveau_subdev_init(&(p)->base)
-#define nouveau_bar_fini(p,s) \
- nouveau_subdev_fini(&(p)->base, (s))
-
-int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-void nouveau_bar_destroy(struct nouveau_bar *);
-
-void _nouveau_bar_dtor(struct nouveau_object *);
-#define _nouveau_bar_init _nouveau_subdev_init
-#define _nouveau_bar_fini _nouveau_subdev_fini
-
extern struct nouveau_oclass nv50_bar_oclass;
extern struct nouveau_oclass nvc0_bar_oclass;
-int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
- struct nouveau_mem *, struct nouveau_object **);
-
-void nv84_bar_flush(struct nouveau_bar *);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
new file mode 100644
index 000000000000..c5e6d1e6ac1d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
@@ -0,0 +1,66 @@
+#ifndef __NVBIOS_RAMCFG_H__
+#define __NVBIOS_RAMCFG_H__
+
+struct nouveau_bios;
+
+struct nvbios_ramcfg {
+ unsigned rammap_11_08_01:1;
+ unsigned rammap_11_08_0c:2;
+ unsigned rammap_11_08_10:1;
+ unsigned rammap_11_11_0c:2;
+
+ unsigned ramcfg_11_01_01:1;
+ unsigned ramcfg_11_01_02:1;
+ unsigned ramcfg_11_01_04:1;
+ unsigned ramcfg_11_01_08:1;
+ unsigned ramcfg_11_01_10:1;
+ unsigned ramcfg_11_01_20:1;
+ unsigned ramcfg_11_01_40:1;
+ unsigned ramcfg_11_01_80:1;
+ unsigned ramcfg_11_02_03:2;
+ unsigned ramcfg_11_02_04:1;
+ unsigned ramcfg_11_02_08:1;
+ unsigned ramcfg_11_02_10:1;
+ unsigned ramcfg_11_02_40:1;
+ unsigned ramcfg_11_02_80:1;
+ unsigned ramcfg_11_03_0f:4;
+ unsigned ramcfg_11_03_30:2;
+ unsigned ramcfg_11_03_c0:2;
+ unsigned ramcfg_11_03_f0:4;
+ unsigned ramcfg_11_04:8;
+ unsigned ramcfg_11_06:8;
+ unsigned ramcfg_11_07_02:1;
+ unsigned ramcfg_11_07_04:1;
+ unsigned ramcfg_11_07_08:1;
+ unsigned ramcfg_11_07_10:1;
+ unsigned ramcfg_11_07_40:1;
+ unsigned ramcfg_11_07_80:1;
+ unsigned ramcfg_11_08_01:1;
+ unsigned ramcfg_11_08_02:1;
+ unsigned ramcfg_11_08_04:1;
+ unsigned ramcfg_11_08_08:1;
+ unsigned ramcfg_11_08_10:1;
+ unsigned ramcfg_11_08_20:1;
+ unsigned ramcfg_11_09:8;
+
+ unsigned timing[11];
+ unsigned timing_20_2e_03:2;
+ unsigned timing_20_2e_30:2;
+ unsigned timing_20_2e_c0:2;
+ unsigned timing_20_2f_03:2;
+ unsigned timing_20_2c_003f:6;
+ unsigned timing_20_2c_1fc0:7;
+ unsigned timing_20_30_f8:5;
+ unsigned timing_20_30_07:3;
+ unsigned timing_20_31_0007:3;
+ unsigned timing_20_31_0078:4;
+ unsigned timing_20_31_0780:4;
+ unsigned timing_20_31_0800:1;
+ unsigned timing_20_31_7000:3;
+ unsigned timing_20_31_8000:1;
+};
+
+u8 nvbios_ramcfg_count(struct nouveau_bios *);
+u8 nvbios_ramcfg_index(struct nouveau_bios *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
index bc15e0320877..5bdf8e4db40a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
@@ -1,11 +1,25 @@
#ifndef __NVBIOS_RAMMAP_H__
#define __NVBIOS_RAMMAP_H__
-u16 nvbios_rammap_table(struct nouveau_bios *, u8 *ver, u8 *hdr,
- u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
-u16 nvbios_rammap_entry(struct nouveau_bios *, int idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_rammap_match(struct nouveau_bios *, u16 khz,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+struct nvbios_ramcfg;
+
+u32 nvbios_rammapTe(struct nouveau_bios *, u8 *ver, u8 *hdr,
+ u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
+
+u32 nvbios_rammapEe(struct nouveau_bios *, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_rammapEm(struct nouveau_bios *, u16 mhz,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_rammapEp(struct nouveau_bios *, u16 mhz,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ramcfg *);
+
+u32 nvbios_rammapSe(struct nouveau_bios *, u32 data,
+ u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
+ u8 *ver, u8 *hdr);
+u32 nvbios_rammapSp(struct nouveau_bios *, u32 data,
+ u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
+ u8 *ver, u8 *hdr,
+ struct nvbios_ramcfg *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
index 963694b54224..76d914b67ab5 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
@@ -1,8 +1,14 @@
#ifndef __NVBIOS_TIMING_H__
#define __NVBIOS_TIMING_H__
-u16 nvbios_timing_table(struct nouveau_bios *,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_timing_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+struct nvbios_ramcfg;
+
+u16 nvbios_timingTe(struct nouveau_bios *,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
+u16 nvbios_timingEe(struct nouveau_bios *, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_timingEp(struct nouveau_bios *, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ramcfg *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
index 685c9b12ee4c..ed1ac68c38b3 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
@@ -9,7 +9,6 @@ struct nouveau_devinit {
bool post;
void (*meminit)(struct nouveau_devinit *);
int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq);
-
};
static inline struct nouveau_devinit *
@@ -18,32 +17,16 @@ nouveau_devinit(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_DEVINIT];
}
-#define nouveau_devinit_create(p,e,o,d) \
- nouveau_devinit_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_devinit_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
-#define nouveau_devinit_init(p) ({ \
- struct nouveau_devinit *d = (p); \
- _nouveau_devinit_init(nv_object(d)); \
-})
-#define nouveau_devinit_fini(p,s) ({ \
- struct nouveau_devinit *d = (p); \
- _nouveau_devinit_fini(nv_object(d), (s)); \
-})
-
-int nouveau_devinit_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-#define _nouveau_devinit_dtor _nouveau_subdev_dtor
-int _nouveau_devinit_init(struct nouveau_object *);
-int _nouveau_devinit_fini(struct nouveau_object *, bool suspend);
-
-extern struct nouveau_oclass nv04_devinit_oclass;
-extern struct nouveau_oclass nv05_devinit_oclass;
-extern struct nouveau_oclass nv10_devinit_oclass;
-extern struct nouveau_oclass nv1a_devinit_oclass;
-extern struct nouveau_oclass nv20_devinit_oclass;
-extern struct nouveau_oclass nv50_devinit_oclass;
-extern struct nouveau_oclass nva3_devinit_oclass;
-extern struct nouveau_oclass nvc0_devinit_oclass;
+extern struct nouveau_oclass *nv04_devinit_oclass;
+extern struct nouveau_oclass *nv05_devinit_oclass;
+extern struct nouveau_oclass *nv10_devinit_oclass;
+extern struct nouveau_oclass *nv1a_devinit_oclass;
+extern struct nouveau_oclass *nv20_devinit_oclass;
+extern struct nouveau_oclass *nv50_devinit_oclass;
+extern struct nouveau_oclass *nv84_devinit_oclass;
+extern struct nouveau_oclass *nv98_devinit_oclass;
+extern struct nouveau_oclass *nva3_devinit_oclass;
+extern struct nouveau_oclass *nvaf_devinit_oclass;
+extern struct nouveau_oclass *nvc0_devinit_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 8541aa382ff2..d7ecafbae1ca 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -75,6 +75,11 @@ struct nouveau_fb {
static inline struct nouveau_fb *
nouveau_fb(void *obj)
{
+ /* fbram uses this before device subdev pointer is valid */
+ if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
+ nv_subidx(obj) == NVDEV_SUBDEV_FB)
+ return obj;
+
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
}
@@ -101,6 +106,13 @@ extern struct nouveau_oclass *nvaf_fb_oclass;
extern struct nouveau_oclass *nvc0_fb_oclass;
extern struct nouveau_oclass *nve0_fb_oclass;
+#include <subdev/bios/ramcfg.h>
+
+struct nouveau_ram_data {
+ struct nvbios_ramcfg bios;
+ u32 freq;
+};
+
struct nouveau_ram {
struct nouveau_object base;
enum {
@@ -137,6 +149,12 @@ struct nouveau_ram {
} rammap, ramcfg, timing;
u32 freq;
u32 mr[16];
+ u32 mr1_nuts;
+
+ struct nouveau_ram_data *next;
+ struct nouveau_ram_data former;
+ struct nouveau_ram_data xition;
+ struct nouveau_ram_data target;
};
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 9fa5da723871..7f50a858b16f 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -73,7 +73,7 @@ struct nouveau_i2c {
int (*identify)(struct nouveau_i2c *, int index,
const char *what, struct nouveau_i2c_board_info *,
bool (*match)(struct nouveau_i2c_port *,
- struct i2c_board_info *));
+ struct i2c_board_info *, void *), void *);
struct list_head ports;
};
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
index ec7a54e91a08..c1df26f3230c 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
@@ -23,21 +23,6 @@ nv_memobj(void *obj)
return obj;
}
-#define nouveau_instobj_create(p,e,o,d) \
- nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_instobj_init(p) \
- nouveau_object_init(&(p)->base)
-#define nouveau_instobj_fini(p,s) \
- nouveau_object_fini(&(p)->base, (s))
-
-int nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-void nouveau_instobj_destroy(struct nouveau_instobj *);
-
-void _nouveau_instobj_dtor(struct nouveau_object *);
-#define _nouveau_instobj_init nouveau_object_init
-#define _nouveau_instobj_fini nouveau_object_fini
-
struct nouveau_instmem {
struct nouveau_subdev base;
struct list_head list;
@@ -50,24 +35,18 @@ struct nouveau_instmem {
static inline struct nouveau_instmem *
nouveau_instmem(void *obj)
{
+ /* nv04/nv40 impls need to create objects in their constructor,
+ * which is before the subdev pointer is valid
+ */
+ if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
+ nv_subidx(obj) == NVDEV_SUBDEV_INSTMEM)
+ return obj;
+
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
}
-#define nouveau_instmem_create(p,e,o,d) \
- nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_instmem_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
-int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-int nouveau_instmem_init(struct nouveau_instmem *);
-int nouveau_instmem_fini(struct nouveau_instmem *, bool);
-
-#define _nouveau_instmem_dtor _nouveau_subdev_dtor
-int _nouveau_instmem_init(struct nouveau_object *);
-int _nouveau_instmem_fini(struct nouveau_object *, bool);
-
-extern struct nouveau_oclass nv04_instmem_oclass;
-extern struct nouveau_oclass nv40_instmem_oclass;
-extern struct nouveau_oclass nv50_instmem_oclass;
+extern struct nouveau_oclass *nv04_instmem_oclass;
+extern struct nouveau_oclass *nv40_instmem_oclass;
+extern struct nouveau_oclass *nv50_instmem_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
index fcf57fa309bf..c9509039f94b 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
@@ -131,9 +131,5 @@ void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *);
void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
-void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
- struct nouveau_mem *);
-void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
- struct nouveau_mem *mem);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
index d70ba342aa2e..7098ddd54678 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -23,7 +23,11 @@
*/
#include <core/object.h>
-#include <subdev/bar.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+
+#include "priv.h"
struct nouveau_barobj {
struct nouveau_object base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
index 160d27f3c7b4..090d594a21b3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
@@ -25,10 +25,11 @@
#include <core/gpuobj.h>
#include <subdev/timer.h>
-#include <subdev/bar.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
+#include "priv.h"
+
struct nv50_bar_priv {
struct nouveau_bar base;
spinlock_t lock;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
index b2ec7411eb2e..bac5e754de35 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -25,10 +25,11 @@
#include <core/gpuobj.h>
#include <subdev/timer.h>
-#include <subdev/bar.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
+#include "priv.h"
+
struct nvc0_bar_priv {
struct nouveau_bar base;
spinlock_t lock;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h
new file mode 100644
index 000000000000..ffad8f337ead
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_BAR_PRIV_H__
+#define __NVKM_BAR_PRIV_H__
+
+#include <subdev/bar.h>
+
+#define nouveau_bar_create(p,e,o,d) \
+ nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_bar_init(p) \
+ nouveau_subdev_init(&(p)->base)
+#define nouveau_bar_fini(p,s) \
+ nouveau_subdev_fini(&(p)->base, (s))
+
+int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+void nouveau_bar_destroy(struct nouveau_bar *);
+
+void _nouveau_bar_dtor(struct nouveau_object *);
+#define _nouveau_bar_init _nouveau_subdev_init
+#define _nouveau_bar_fini _nouveau_subdev_fini
+
+int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
+ struct nouveau_mem *, struct nouveau_object **);
+
+void nv84_bar_flush(struct nouveau_bar *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 420908cb82b6..de201baeb053 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -9,6 +9,7 @@
#include <subdev/bios/dp.h>
#include <subdev/bios/gpio.h>
#include <subdev/bios/init.h>
+#include <subdev/bios/ramcfg.h>
#include <subdev/devinit.h>
#include <subdev/i2c.h>
#include <subdev/vga.h>
@@ -365,13 +366,13 @@ static u16
init_script(struct nouveau_bios *bios, int index)
{
struct nvbios_init init = { .bios = bios };
- u16 data;
+ u16 bmp_ver = bmp_version(bios), data;
- if (bmp_version(bios) && bmp_version(bios) < 0x0510) {
- if (index > 1)
+ if (bmp_ver && bmp_ver < 0x0510) {
+ if (index > 1 || bmp_ver < 0x0100)
return 0x0000;
- data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18);
+ data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
return nv_ro16(bios, data + (index * 2));
}
@@ -391,43 +392,14 @@ init_unknown_script(struct nouveau_bios *bios)
return 0x0000;
}
-static u16
-init_ram_restrict_table(struct nvbios_init *init)
-{
- struct nouveau_bios *bios = init->bios;
- struct bit_entry bit_M;
- u16 data = 0x0000;
-
- if (!bit_entry(bios, 'M', &bit_M)) {
- if (bit_M.version == 1 && bit_M.length >= 5)
- data = nv_ro16(bios, bit_M.offset + 3);
- if (bit_M.version == 2 && bit_M.length >= 3)
- data = nv_ro16(bios, bit_M.offset + 1);
- }
-
- if (data == 0x0000)
- warn("ram restrict table not found\n");
- return data;
-}
-
static u8
init_ram_restrict_group_count(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
- struct bit_entry bit_M;
-
- if (!bit_entry(bios, 'M', &bit_M)) {
- if (bit_M.version == 1 && bit_M.length >= 5)
- return nv_ro08(bios, bit_M.offset + 2);
- if (bit_M.version == 2 && bit_M.length >= 3)
- return nv_ro08(bios, bit_M.offset + 0);
- }
-
- return 0x00;
+ return nvbios_ramcfg_count(init->bios);
}
static u8
-init_ram_restrict_strap(struct nvbios_init *init)
+init_ram_restrict(struct nvbios_init *init)
{
/* This appears to be the behaviour of the VBIOS parser, and *is*
* important to cache the NV_PEXTDEV_BOOT0 on later chipsets to
@@ -438,18 +410,8 @@ init_ram_restrict_strap(struct nvbios_init *init)
* in case *not* re-reading the strap causes similar breakage.
*/
if (!init->ramcfg || init->bios->version.major < 0x70)
- init->ramcfg = init_rd32(init, 0x101000);
- return (init->ramcfg & 0x00000003c) >> 2;
-}
-
-static u8
-init_ram_restrict(struct nvbios_init *init)
-{
- u8 strap = init_ram_restrict_strap(init);
- u16 table = init_ram_restrict_table(init);
- if (table)
- return nv_ro08(init->bios, table + strap);
- return 0x00;
+ init->ramcfg = 0x80000000 | nvbios_ramcfg_index(init->bios);
+ return (init->ramcfg & 0x7fffffff);
}
static u8
@@ -1294,7 +1256,11 @@ init_jump(struct nvbios_init *init)
u16 offset = nv_ro16(bios, init->offset + 1);
trace("JUMP\t0x%04x\n", offset);
- init->offset = offset;
+
+ if (init_exec(init))
+ init->offset = offset;
+ else
+ init->offset += 3;
}
/**
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
new file mode 100644
index 000000000000..991aedda999b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/ramcfg.h>
+
+static u8
+nvbios_ramcfg_strap(struct nouveau_bios *bios)
+{
+ return (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
+}
+
+u8
+nvbios_ramcfg_count(struct nouveau_bios *bios)
+{
+ struct bit_entry bit_M;
+
+ if (!bit_entry(bios, 'M', &bit_M)) {
+ if (bit_M.version == 1 && bit_M.length >= 5)
+ return nv_ro08(bios, bit_M.offset + 2);
+ if (bit_M.version == 2 && bit_M.length >= 3)
+ return nv_ro08(bios, bit_M.offset + 0);
+ }
+
+ return 0x00;
+}
+
+u8
+nvbios_ramcfg_index(struct nouveau_bios *bios)
+{
+ u8 strap = nvbios_ramcfg_strap(bios);
+ u32 xlat = 0x00000000;
+ struct bit_entry bit_M;
+
+ if (!bit_entry(bios, 'M', &bit_M)) {
+ if (bit_M.version == 1 && bit_M.length >= 5)
+ xlat = nv_ro16(bios, bit_M.offset + 3);
+ if (bit_M.version == 2 && bit_M.length >= 3)
+ xlat = nv_ro16(bios, bit_M.offset + 1);
+ }
+
+ if (xlat)
+ strap = nv_ro08(bios, xlat + strap);
+ return strap;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
index 916fa9d302b7..1811b2cb0472 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
@@ -24,11 +24,12 @@
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
+#include <subdev/bios/ramcfg.h>
#include <subdev/bios/rammap.h>
-u16
-nvbios_rammap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
- u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
+u32
+nvbios_rammapTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
+ u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
u16 rammap = 0x0000;
@@ -57,12 +58,12 @@ nvbios_rammap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
return 0x0000;
}
-u16
-nvbios_rammap_entry(struct nouveau_bios *bios, int idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+u32
+nvbios_rammapEe(struct nouveau_bios *bios, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
- u16 rammap = nvbios_rammap_table(bios, ver, hdr, cnt, len, &snr, &ssz);
+ u16 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz);
if (rammap && idx < *cnt) {
rammap = rammap + *hdr + (idx * (*len + (snr * ssz)));
*hdr = *len;
@@ -73,16 +74,100 @@ nvbios_rammap_entry(struct nouveau_bios *bios, int idx,
return 0x0000;
}
-u16
-nvbios_rammap_match(struct nouveau_bios *bios, u16 khz,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+u32
+nvbios_rammapEm(struct nouveau_bios *bios, u16 khz,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
int idx = 0;
u32 data;
- while ((data = nvbios_rammap_entry(bios, idx++, ver, hdr, cnt, len))) {
+ while ((data = nvbios_rammapEe(bios, idx++, ver, hdr, cnt, len))) {
if (khz >= nv_ro16(bios, data + 0x00) &&
khz <= nv_ro16(bios, data + 0x02))
break;
}
return data;
}
+
+u32
+nvbios_rammapEp(struct nouveau_bios *bios, u16 khz,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ramcfg *p)
+{
+ u32 data = nvbios_rammapEm(bios, khz, ver, hdr, cnt, len);
+ memset(p, 0x00, sizeof(*p));
+ switch (!!data * *ver) {
+ case 0x11:
+ p->rammap_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0;
+ p->rammap_11_08_0c = (nv_ro08(bios, data + 0x08) & 0x0c) >> 2;
+ p->rammap_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4;
+ p->rammap_11_11_0c = (nv_ro08(bios, data + 0x11) & 0x0c) >> 2;
+ break;
+ default:
+ data = 0;
+ break;
+ }
+ return data;
+}
+
+u32
+nvbios_rammapSe(struct nouveau_bios *bios, u32 data,
+ u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
+ u8 *ver, u8 *hdr)
+{
+ if (idx < ecnt) {
+ data = data + ehdr + (idx * elen);
+ *ver = ever;
+ *hdr = elen;
+ return data;
+ }
+ return 0;
+}
+
+u32
+nvbios_rammapSp(struct nouveau_bios *bios, u32 data,
+ u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
+ u8 *ver, u8 *hdr, struct nvbios_ramcfg *p)
+{
+ data = nvbios_rammapSe(bios, data, ever, ehdr, ecnt, elen, idx, ver, hdr);
+ switch (!!data * *ver) {
+ case 0x11:
+ p->ramcfg_11_01_01 = (nv_ro08(bios, data + 0x01) & 0x01) >> 0;
+ p->ramcfg_11_01_02 = (nv_ro08(bios, data + 0x01) & 0x02) >> 1;
+ p->ramcfg_11_01_04 = (nv_ro08(bios, data + 0x01) & 0x04) >> 2;
+ p->ramcfg_11_01_08 = (nv_ro08(bios, data + 0x01) & 0x08) >> 3;
+ p->ramcfg_11_01_10 = (nv_ro08(bios, data + 0x01) & 0x10) >> 4;
+ p->ramcfg_11_01_20 = (nv_ro08(bios, data + 0x01) & 0x20) >> 5;
+ p->ramcfg_11_01_40 = (nv_ro08(bios, data + 0x01) & 0x40) >> 6;
+ p->ramcfg_11_01_80 = (nv_ro08(bios, data + 0x01) & 0x80) >> 7;
+ p->ramcfg_11_02_03 = (nv_ro08(bios, data + 0x02) & 0x03) >> 0;
+ p->ramcfg_11_02_04 = (nv_ro08(bios, data + 0x02) & 0x04) >> 2;
+ p->ramcfg_11_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3;
+ p->ramcfg_11_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4;
+ p->ramcfg_11_02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
+ p->ramcfg_11_02_80 = (nv_ro08(bios, data + 0x02) & 0x80) >> 7;
+ p->ramcfg_11_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0;
+ p->ramcfg_11_03_30 = (nv_ro08(bios, data + 0x03) & 0x30) >> 4;
+ p->ramcfg_11_03_c0 = (nv_ro08(bios, data + 0x03) & 0xc0) >> 6;
+ p->ramcfg_11_03_f0 = (nv_ro08(bios, data + 0x03) & 0xf0) >> 4;
+ p->ramcfg_11_04 = (nv_ro08(bios, data + 0x04) & 0xff) >> 0;
+ p->ramcfg_11_06 = (nv_ro08(bios, data + 0x06) & 0xff) >> 0;
+ p->ramcfg_11_07_02 = (nv_ro08(bios, data + 0x07) & 0x02) >> 1;
+ p->ramcfg_11_07_04 = (nv_ro08(bios, data + 0x07) & 0x04) >> 2;
+ p->ramcfg_11_07_08 = (nv_ro08(bios, data + 0x07) & 0x08) >> 3;
+ p->ramcfg_11_07_10 = (nv_ro08(bios, data + 0x07) & 0x10) >> 4;
+ p->ramcfg_11_07_40 = (nv_ro08(bios, data + 0x07) & 0x40) >> 6;
+ p->ramcfg_11_07_80 = (nv_ro08(bios, data + 0x07) & 0x80) >> 7;
+ p->ramcfg_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0;
+ p->ramcfg_11_08_02 = (nv_ro08(bios, data + 0x08) & 0x02) >> 1;
+ p->ramcfg_11_08_04 = (nv_ro08(bios, data + 0x08) & 0x04) >> 2;
+ p->ramcfg_11_08_08 = (nv_ro08(bios, data + 0x08) & 0x08) >> 3;
+ p->ramcfg_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4;
+ p->ramcfg_11_08_20 = (nv_ro08(bios, data + 0x08) & 0x20) >> 5;
+ p->ramcfg_11_09 = (nv_ro08(bios, data + 0x09) & 0xff) >> 0;
+ break;
+ default:
+ data = 0;
+ break;
+ }
+ return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
index 151c2d6aaee8..350d44ab2ba2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
@@ -24,11 +24,12 @@
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
+#include <subdev/bios/ramcfg.h>
#include <subdev/bios/timing.h>
u16
-nvbios_timing_table(struct nouveau_bios *bios,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+nvbios_timingTe(struct nouveau_bios *bios,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
u16 timing = 0x0000;
@@ -47,11 +48,15 @@ nvbios_timing_table(struct nouveau_bios *bios,
*hdr = nv_ro08(bios, timing + 1);
*cnt = nv_ro08(bios, timing + 2);
*len = nv_ro08(bios, timing + 3);
+ *snr = 0;
+ *ssz = 0;
return timing;
case 0x20:
*hdr = nv_ro08(bios, timing + 1);
- *cnt = nv_ro08(bios, timing + 3);
+ *cnt = nv_ro08(bios, timing + 5);
*len = nv_ro08(bios, timing + 2);
+ *snr = nv_ro08(bios, timing + 4);
+ *ssz = nv_ro08(bios, timing + 3);
return timing;
default:
break;
@@ -63,11 +68,60 @@ nvbios_timing_table(struct nouveau_bios *bios,
}
u16
-nvbios_timing_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+nvbios_timingEe(struct nouveau_bios *bios, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
- u8 hdr, cnt;
- u16 timing = nvbios_timing_table(bios, ver, &hdr, &cnt, len);
- if (timing && idx < cnt)
- return timing + hdr + (idx * *len);
+ u8 snr, ssz;
+ u16 timing = nvbios_timingTe(bios, ver, hdr, cnt, len, &snr, &ssz);
+ if (timing && idx < *cnt) {
+ timing += *hdr + idx * (*len + (snr * ssz));
+ *hdr = *len;
+ *cnt = snr;
+ *len = ssz;
+ return timing;
+ }
return 0x0000;
}
+
+u16
+nvbios_timingEp(struct nouveau_bios *bios, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ramcfg *p)
+{
+ u16 data = nvbios_timingEe(bios, idx, ver, hdr, cnt, len), temp;
+ switch (!!data * *ver) {
+ case 0x20:
+ p->timing[0] = nv_ro32(bios, data + 0x00);
+ p->timing[1] = nv_ro32(bios, data + 0x04);
+ p->timing[2] = nv_ro32(bios, data + 0x08);
+ p->timing[3] = nv_ro32(bios, data + 0x0c);
+ p->timing[4] = nv_ro32(bios, data + 0x10);
+ p->timing[5] = nv_ro32(bios, data + 0x14);
+ p->timing[6] = nv_ro32(bios, data + 0x18);
+ p->timing[7] = nv_ro32(bios, data + 0x1c);
+ p->timing[8] = nv_ro32(bios, data + 0x20);
+ p->timing[9] = nv_ro32(bios, data + 0x24);
+ p->timing[10] = nv_ro32(bios, data + 0x28);
+ p->timing_20_2e_03 = (nv_ro08(bios, data + 0x2e) & 0x03) >> 0;
+ p->timing_20_2e_30 = (nv_ro08(bios, data + 0x2e) & 0x30) >> 4;
+ p->timing_20_2e_c0 = (nv_ro08(bios, data + 0x2e) & 0xc0) >> 6;
+ p->timing_20_2f_03 = (nv_ro08(bios, data + 0x2f) & 0x03) >> 0;
+ temp = nv_ro16(bios, data + 0x2c);
+ p->timing_20_2c_003f = (temp & 0x003f) >> 0;
+ p->timing_20_2c_1fc0 = (temp & 0x1fc0) >> 6;
+ p->timing_20_30_07 = (nv_ro08(bios, data + 0x30) & 0x07) >> 0;
+ p->timing_20_30_f8 = (nv_ro08(bios, data + 0x30) & 0xf8) >> 3;
+ temp = nv_ro16(bios, data + 0x31);
+ p->timing_20_31_0007 = (temp & 0x0007) >> 0;
+ p->timing_20_31_0078 = (temp & 0x0078) >> 3;
+ p->timing_20_31_0780 = (temp & 0x0780) >> 7;
+ p->timing_20_31_0800 = (temp & 0x0800) >> 11;
+ p->timing_20_31_7000 = (temp & 0x7000) >> 12;
+ p->timing_20_31_8000 = (temp & 0x8000) >> 15;
+ break;
+ default:
+ data = 0;
+ break;
+ }
+ return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
index e2938a21b06f..dd62baead39c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
@@ -182,9 +182,12 @@ nouveau_pstate_prog(struct nouveau_clock *clk, int pstatei)
clk->pstate = pstatei;
if (pfb->ram->calc) {
- ret = pfb->ram->calc(pfb, pstate->base.domain[nv_clk_src_mem]);
- if (ret == 0)
- ret = pfb->ram->prog(pfb);
+ int khz = pstate->base.domain[nv_clk_src_mem];
+ do {
+ ret = pfb->ram->calc(pfb, khz);
+ if (ret == 0)
+ ret = pfb->ram->prog(pfb);
+ } while (ret > 0);
pfb->ram->tidy(pfb);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index 30c1f3a4158e..b74db6cfc4e2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -25,7 +25,7 @@
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/clock.h>
-#include <subdev/devinit/priv.h>
+#include <subdev/devinit/nv04.h>
#include "pll.h"
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
index 4c62e84b96f5..d3c37c96f0e7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
@@ -457,7 +457,7 @@ nve0_domain[] = {
{ nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
{ nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE },
- { nv_clk_src_mem , 0x03, 0, "memory", 1000 },
+ { nv_clk_src_mem , 0x03, 0, "memory", 500 },
{ nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
{ nv_clk_src_hubk01 , 0x05 },
{ nv_clk_src_vdec , 0x06 },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
index 79c81d3d9bac..8fa34e8152c2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
@@ -24,9 +24,11 @@
#include <core/option.h>
-#include <subdev/devinit.h>
#include <subdev/bios.h>
#include <subdev/bios/init.h>
+#include <subdev/vga.h>
+
+#include "priv.h"
int
_nouveau_devinit_fini(struct nouveau_object *object, bool suspend)
@@ -37,18 +39,41 @@ _nouveau_devinit_fini(struct nouveau_object *object, bool suspend)
if (suspend)
devinit->post = true;
+ /* unlock the extended vga crtc regs */
+ nv_lockvgac(devinit, false);
+
return nouveau_subdev_fini(&devinit->base, suspend);
}
int
_nouveau_devinit_init(struct nouveau_object *object)
{
+ struct nouveau_devinit_impl *impl = (void *)object->oclass;
struct nouveau_devinit *devinit = (void *)object;
- int ret = nouveau_subdev_init(&devinit->base);
+ int ret;
+
+ ret = nouveau_subdev_init(&devinit->base);
+ if (ret)
+ return ret;
+
+ ret = nvbios_init(&devinit->base, devinit->post);
if (ret)
return ret;
- return nvbios_init(&devinit->base, devinit->post);
+ if (impl->disable)
+ nv_device(devinit)->disable_mask |= impl->disable(devinit);
+ return 0;
+}
+
+void
+_nouveau_devinit_dtor(struct nouveau_object *object)
+{
+ struct nouveau_devinit *devinit = (void *)object;
+
+ /* lock crtc regs */
+ nv_lockvgac(devinit, true);
+
+ nouveau_subdev_destroy(&devinit->base);
}
int
@@ -57,6 +82,7 @@ nouveau_devinit_create_(struct nouveau_object *parent,
struct nouveau_oclass *oclass,
int size, void **pobject)
{
+ struct nouveau_devinit_impl *impl = (void *)oclass;
struct nouveau_device *device = nv_device(parent);
struct nouveau_devinit *devinit;
int ret;
@@ -68,5 +94,7 @@ nouveau_devinit_create_(struct nouveau_object *parent,
return ret;
devinit->post = nouveau_boolopt(device->cfgopt, "NvForcePost", false);
+ devinit->meminit = impl->meminit;
+ devinit->pll_set = impl->pll_set;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
index 27c8235f1a85..7037eae46e44 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
@@ -27,12 +27,7 @@
#include <subdev/vga.h>
#include "fbmem.h"
-#include "priv.h"
-
-struct nv04_devinit_priv {
- struct nouveau_devinit base;
- int owner;
-};
+#include "nv04.h"
static void
nv04_devinit_meminit(struct nouveau_devinit *devinit)
@@ -393,17 +388,21 @@ int
nv04_devinit_fini(struct nouveau_object *object, bool suspend)
{
struct nv04_devinit_priv *priv = (void *)object;
+ int ret;
/* make i2c busses accessible */
nv_mask(priv, 0x000200, 0x00000001, 0x00000001);
- /* unlock extended vga crtc regs, and unslave crtcs */
- nv_lockvgac(priv, false);
+ ret = nouveau_devinit_fini(&priv->base, suspend);
+ if (ret)
+ return ret;
+
+ /* unslave crtcs */
if (priv->owner < 0)
priv->owner = nv_rdvgaowner(priv);
nv_wrvgaowner(priv, 0);
- return nouveau_devinit_fini(&priv->base, suspend);
+ return 0;
}
int
@@ -431,14 +430,13 @@ nv04_devinit_dtor(struct nouveau_object *object)
{
struct nv04_devinit_priv *priv = (void *)object;
- /* restore vga owner saved at first init, and lock crtc regs */
+ /* restore vga owner saved at first init */
nv_wrvgaowner(priv, priv->owner);
- nv_lockvgac(priv, true);
nouveau_devinit_destroy(&priv->base);
}
-static int
+int
nv04_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -451,19 +449,19 @@ nv04_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.meminit = nv04_devinit_meminit;
- priv->base.pll_set = nv04_devinit_pll_set;
priv->owner = -1;
return 0;
}
-struct nouveau_oclass
-nv04_devinit_oclass = {
- .handle = NV_SUBDEV(DEVINIT, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv04_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x04),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_devinit_ctor,
.dtor = nv04_devinit_dtor,
.init = nv04_devinit_init,
.fini = nv04_devinit_fini,
},
-};
+ .meminit = nv04_devinit_meminit,
+ .pll_set = nv04_devinit_pll_set,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h
new file mode 100644
index 000000000000..23470a57510c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h
@@ -0,0 +1,23 @@
+#ifndef __NVKM_DEVINIT_NV04_H__
+#define __NVKM_DEVINIT_NV04_H__
+
+#include "priv.h"
+
+struct nv04_devinit_priv {
+ struct nouveau_devinit base;
+ u8 owner;
+};
+
+int nv04_devinit_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void nv04_devinit_dtor(struct nouveau_object *);
+int nv04_devinit_init(struct nouveau_object *);
+int nv04_devinit_fini(struct nouveau_object *, bool);
+int nv04_devinit_pll_set(struct nouveau_devinit *, u32, u32);
+
+void setPLL_single(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
+void setPLL_double_highregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
+void setPLL_double_lowregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
index b1912a8a8942..98b7e6780dc7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
@@ -29,12 +29,7 @@
#include <subdev/vga.h>
#include "fbmem.h"
-#include "priv.h"
-
-struct nv05_devinit_priv {
- struct nouveau_devinit base;
- u8 owner;
-};
+#include "nv04.h"
static void
nv05_devinit_meminit(struct nouveau_devinit *devinit)
@@ -49,7 +44,7 @@ nv05_devinit_meminit(struct nouveau_devinit *devinit)
{ 0x06, 0x00 },
{ 0x00, 0x00 }
};
- struct nv05_devinit_priv *priv = (void *)devinit;
+ struct nv04_devinit_priv *priv = (void *)devinit;
struct nouveau_bios *bios = nouveau_bios(priv);
struct io_mapping *fb;
u32 patt = 0xdeadbeef;
@@ -130,31 +125,15 @@ out:
fbmem_fini(fb);
}
-static int
-nv05_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv05_devinit_priv *priv;
- int ret;
-
- ret = nouveau_devinit_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.meminit = nv05_devinit_meminit;
- priv->base.pll_set = nv04_devinit_pll_set;
- return 0;
-}
-
-struct nouveau_oclass
-nv05_devinit_oclass = {
- .handle = NV_SUBDEV(DEVINIT, 0x05),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv05_devinit_ctor,
+struct nouveau_oclass *
+nv05_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x05),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_devinit_ctor,
.dtor = nv04_devinit_dtor,
.init = nv04_devinit_init,
.fini = nv04_devinit_fini,
},
-};
+ .meminit = nv05_devinit_meminit,
+ .pll_set = nv04_devinit_pll_set,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
index 8d274dba1ef1..32b3d2131a7f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
@@ -27,17 +27,12 @@
#include <subdev/vga.h>
#include "fbmem.h"
-#include "priv.h"
-
-struct nv10_devinit_priv {
- struct nouveau_devinit base;
- u8 owner;
-};
+#include "nv04.h"
static void
nv10_devinit_meminit(struct nouveau_devinit *devinit)
{
- struct nv10_devinit_priv *priv = (void *)devinit;
+ struct nv04_devinit_priv *priv = (void *)devinit;
static const int mem_width[] = { 0x10, 0x00, 0x20 };
int mem_width_count;
uint32_t patt = 0xdeadbeef;
@@ -101,31 +96,15 @@ amount_found:
fbmem_fini(fb);
}
-static int
-nv10_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv10_devinit_priv *priv;
- int ret;
-
- ret = nouveau_devinit_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.meminit = nv10_devinit_meminit;
- priv->base.pll_set = nv04_devinit_pll_set;
- return 0;
-}
-
-struct nouveau_oclass
-nv10_devinit_oclass = {
- .handle = NV_SUBDEV(DEVINIT, 0x10),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv10_devinit_ctor,
+struct nouveau_oclass *
+nv10_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x10),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_devinit_ctor,
.dtor = nv04_devinit_dtor,
.init = nv04_devinit_init,
.fini = nv04_devinit_fini,
},
-};
+ .meminit = nv10_devinit_meminit,
+ .pll_set = nv04_devinit_pll_set,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
index e9743cdabe75..526d0c6faacd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
@@ -22,37 +22,16 @@
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "nv04.h"
-struct nv1a_devinit_priv {
- struct nouveau_devinit base;
- u8 owner;
-};
-
-static int
-nv1a_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv1a_devinit_priv *priv;
- int ret;
-
- ret = nouveau_devinit_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.pll_set = nv04_devinit_pll_set;
- return 0;
-}
-
-struct nouveau_oclass
-nv1a_devinit_oclass = {
- .handle = NV_SUBDEV(DEVINIT, 0x1a),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv1a_devinit_ctor,
+struct nouveau_oclass *
+nv1a_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x1a),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_devinit_ctor,
.dtor = nv04_devinit_dtor,
.init = nv04_devinit_init,
.fini = nv04_devinit_fini,
},
-};
+ .pll_set = nv04_devinit_pll_set,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
index 6cc6080d3bc0..4689ba303b0b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
@@ -24,18 +24,13 @@
*
*/
-#include "priv.h"
+#include "nv04.h"
#include "fbmem.h"
-struct nv20_devinit_priv {
- struct nouveau_devinit base;
- u8 owner;
-};
-
static void
nv20_devinit_meminit(struct nouveau_devinit *devinit)
{
- struct nv20_devinit_priv *priv = (void *)devinit;
+ struct nv04_devinit_priv *priv = (void *)devinit;
struct nouveau_device *device = nv_device(priv);
uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900);
uint32_t amount, off;
@@ -65,31 +60,15 @@ nv20_devinit_meminit(struct nouveau_devinit *devinit)
fbmem_fini(fb);
}
-static int
-nv20_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv20_devinit_priv *priv;
- int ret;
-
- ret = nouveau_devinit_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.meminit = nv20_devinit_meminit;
- priv->base.pll_set = nv04_devinit_pll_set;
- return 0;
-}
-
-struct nouveau_oclass
-nv20_devinit_oclass = {
- .handle = NV_SUBDEV(DEVINIT, 0x20),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv20_devinit_ctor,
+struct nouveau_oclass *
+nv20_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x20),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_devinit_ctor,
.dtor = nv04_devinit_dtor,
.init = nv04_devinit_init,
.fini = nv04_devinit_fini,
},
-};
+ .meminit = nv20_devinit_meminit,
+ .pll_set = nv04_devinit_pll_set,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
index 6df72247c477..b46c62a1d5d8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -28,9 +28,9 @@
#include <subdev/bios/init.h>
#include <subdev/vga.h>
-#include "priv.h"
+#include "nv50.h"
-static int
+int
nv50_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
{
struct nv50_devinit_priv *priv = (void *)devinit;
@@ -74,6 +74,19 @@ nv50_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
return 0;
}
+static u64
+nv50_devinit_disable(struct nouveau_devinit *devinit)
+{
+ struct nv50_devinit_priv *priv = (void *)devinit;
+ u32 r001540 = nv_rd32(priv, 0x001540);
+ u64 disable = 0ULL;
+
+ if (!(r001540 & 0x40000000))
+ disable |= (1ULL << NVDEV_ENGINE_MPEG);
+
+ return disable;
+}
+
int
nv50_devinit_init(struct nouveau_object *object)
{
@@ -120,7 +133,7 @@ nv50_devinit_init(struct nouveau_object *object)
return 0;
}
-static int
+int
nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -133,17 +146,18 @@ nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.pll_set = nv50_devinit_pll_set;
return 0;
}
-struct nouveau_oclass
-nv50_devinit_oclass = {
- .handle = NV_SUBDEV(DEVINIT, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv50_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x50),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_devinit_ctor,
.dtor = _nouveau_devinit_dtor,
.init = nv50_devinit_init,
.fini = _nouveau_devinit_fini,
},
-};
+ .pll_set = nv50_devinit_pll_set,
+ .disable = nv50_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
new file mode 100644
index 000000000000..141c27e9f182
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_DEVINIT_NV50_H__
+#define __NVKM_DEVINIT_NV50_H__
+
+#include "priv.h"
+
+struct nv50_devinit_priv {
+ struct nouveau_devinit base;
+};
+
+int nv50_devinit_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+int nv50_devinit_init(struct nouveau_object *);
+int nv50_devinit_pll_set(struct nouveau_devinit *, u32, u32);
+
+int nva3_devinit_pll_set(struct nouveau_devinit *, u32, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c
new file mode 100644
index 000000000000..787422505d87
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static u64
+nv84_devinit_disable(struct nouveau_devinit *devinit)
+{
+ struct nv50_devinit_priv *priv = (void *)devinit;
+ u32 r001540 = nv_rd32(priv, 0x001540);
+ u32 r00154c = nv_rd32(priv, 0x00154c);
+ u64 disable = 0ULL;
+
+ if (!(r001540 & 0x40000000)) {
+ disable |= (1ULL << NVDEV_ENGINE_MPEG);
+ disable |= (1ULL << NVDEV_ENGINE_VP);
+ disable |= (1ULL << NVDEV_ENGINE_BSP);
+ disable |= (1ULL << NVDEV_ENGINE_CRYPT);
+ }
+
+ if (!(r00154c & 0x00000004))
+ disable |= (1ULL << NVDEV_ENGINE_DISP);
+ if (!(r00154c & 0x00000020))
+ disable |= (1ULL << NVDEV_ENGINE_BSP);
+ if (!(r00154c & 0x00000040))
+ disable |= (1ULL << NVDEV_ENGINE_CRYPT);
+
+ return disable;
+}
+
+struct nouveau_oclass *
+nv84_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x84),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_devinit_ctor,
+ .dtor = _nouveau_devinit_dtor,
+ .init = nv50_devinit_init,
+ .fini = _nouveau_devinit_fini,
+ },
+ .pll_set = nv50_devinit_pll_set,
+ .disable = nv84_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c
new file mode 100644
index 000000000000..2b0e963fc6f0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static u64
+nv98_devinit_disable(struct nouveau_devinit *devinit)
+{
+ struct nv50_devinit_priv *priv = (void *)devinit;
+ u32 r001540 = nv_rd32(priv, 0x001540);
+ u32 r00154c = nv_rd32(priv, 0x00154c);
+ u64 disable = 0ULL;
+
+ if (!(r001540 & 0x40000000)) {
+ disable |= (1ULL << NVDEV_ENGINE_VP);
+ disable |= (1ULL << NVDEV_ENGINE_BSP);
+ disable |= (1ULL << NVDEV_ENGINE_PPP);
+ }
+
+ if (!(r00154c & 0x00000004))
+ disable |= (1ULL << NVDEV_ENGINE_DISP);
+ if (!(r00154c & 0x00000020))
+ disable |= (1ULL << NVDEV_ENGINE_BSP);
+ if (!(r00154c & 0x00000040))
+ disable |= (1ULL << NVDEV_ENGINE_CRYPT);
+
+ return disable;
+}
+
+struct nouveau_oclass *
+nv98_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x98),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_devinit_ctor,
+ .dtor = _nouveau_devinit_dtor,
+ .init = nv50_devinit_init,
+ .fini = _nouveau_devinit_fini,
+ },
+ .pll_set = nv50_devinit_pll_set,
+ .disable = nv98_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
index 76a68b290141..6dedf1dad7f7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
@@ -22,12 +22,12 @@
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "nv50.h"
-static int
+int
nva3_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
{
- struct nva3_devinit_priv *priv = (void *)devinit;
+ struct nv50_devinit_priv *priv = (void *)devinit;
struct nouveau_bios *bios = nouveau_bios(priv);
struct nvbios_pll info;
int N, fN, M, P;
@@ -58,30 +58,38 @@ nva3_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
return ret;
}
-static int
-nva3_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+static u64
+nva3_devinit_disable(struct nouveau_devinit *devinit)
{
- struct nv50_devinit_priv *priv;
- int ret;
+ struct nv50_devinit_priv *priv = (void *)devinit;
+ u32 r001540 = nv_rd32(priv, 0x001540);
+ u32 r00154c = nv_rd32(priv, 0x00154c);
+ u64 disable = 0ULL;
- ret = nouveau_devinit_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ if (!(r001540 & 0x40000000)) {
+ disable |= (1ULL << NVDEV_ENGINE_VP);
+ disable |= (1ULL << NVDEV_ENGINE_PPP);
+ }
+
+ if (!(r00154c & 0x00000004))
+ disable |= (1ULL << NVDEV_ENGINE_DISP);
+ if (!(r00154c & 0x00000020))
+ disable |= (1ULL << NVDEV_ENGINE_BSP);
+ if (!(r00154c & 0x00000200))
+ disable |= (1ULL << NVDEV_ENGINE_COPY0);
- priv->base.pll_set = nva3_devinit_pll_set;
- return 0;
+ return disable;
}
-struct nouveau_oclass
-nva3_devinit_oclass = {
- .handle = NV_SUBDEV(DEVINIT, 0xa3),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nva3_devinit_ctor,
+struct nouveau_oclass *
+nva3_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0xa3),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_devinit_ctor,
.dtor = _nouveau_devinit_dtor,
.init = nv50_devinit_init,
.fini = _nouveau_devinit_fini,
},
-};
+ .pll_set = nva3_devinit_pll_set,
+ .disable = nva3_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c
new file mode 100644
index 000000000000..4fc68d27eff3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static u64
+nvaf_devinit_disable(struct nouveau_devinit *devinit)
+{
+ struct nv50_devinit_priv *priv = (void *)devinit;
+ u32 r001540 = nv_rd32(priv, 0x001540);
+ u32 r00154c = nv_rd32(priv, 0x00154c);
+ u64 disable = 0;
+
+ if (!(r001540 & 0x40000000)) {
+ disable |= (1ULL << NVDEV_ENGINE_VP);
+ disable |= (1ULL << NVDEV_ENGINE_PPP);
+ }
+
+ if (!(r00154c & 0x00000004))
+ disable |= (1ULL << NVDEV_ENGINE_DISP);
+ if (!(r00154c & 0x00000020))
+ disable |= (1ULL << NVDEV_ENGINE_BSP);
+ if (!(r00154c & 0x00000040))
+ disable |= (1ULL << NVDEV_ENGINE_VIC);
+ if (!(r00154c & 0x00000200))
+ disable |= (1ULL << NVDEV_ENGINE_COPY0);
+
+ return disable;
+}
+
+struct nouveau_oclass *
+nvaf_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0xaf),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_devinit_ctor,
+ .dtor = _nouveau_devinit_dtor,
+ .init = nv50_devinit_init,
+ .fini = _nouveau_devinit_fini,
+ },
+ .pll_set = nva3_devinit_pll_set,
+ .disable = nvaf_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
index 19e265bf4574..fa7e63766b1b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
@@ -22,12 +22,12 @@
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "nv50.h"
static int
nvc0_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
{
- struct nvc0_devinit_priv *priv = (void *)devinit;
+ struct nv50_devinit_priv *priv = (void *)devinit;
struct nouveau_bios *bios = nouveau_bios(priv);
struct nvbios_pll info;
int N, fN, M, P;
@@ -59,6 +59,33 @@ nvc0_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
return ret;
}
+static u64
+nvc0_devinit_disable(struct nouveau_devinit *devinit)
+{
+ struct nv50_devinit_priv *priv = (void *)devinit;
+ u32 r022500 = nv_rd32(priv, 0x022500);
+ u64 disable = 0ULL;
+
+ if (r022500 & 0x00000001)
+ disable |= (1ULL << NVDEV_ENGINE_DISP);
+
+ if (r022500 & 0x00000002) {
+ disable |= (1ULL << NVDEV_ENGINE_VP);
+ disable |= (1ULL << NVDEV_ENGINE_PPP);
+ }
+
+ if (r022500 & 0x00000004)
+ disable |= (1ULL << NVDEV_ENGINE_BSP);
+ if (r022500 & 0x00000008)
+ disable |= (1ULL << NVDEV_ENGINE_VENC);
+ if (r022500 & 0x00000100)
+ disable |= (1ULL << NVDEV_ENGINE_COPY0);
+ if (r022500 & 0x00000200)
+ disable |= (1ULL << NVDEV_ENGINE_COPY1);
+
+ return disable;
+}
+
static int
nvc0_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -72,19 +99,20 @@ nvc0_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.pll_set = nvc0_devinit_pll_set;
if (nv_rd32(priv, 0x022500) & 0x00000001)
priv->base.post = true;
return 0;
}
-struct nouveau_oclass
-nvc0_devinit_oclass = {
- .handle = NV_SUBDEV(DEVINIT, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nvc0_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0xc0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_devinit_ctor,
.dtor = _nouveau_devinit_dtor,
.init = nv50_devinit_init,
.fini = _nouveau_devinit_fini,
},
-};
+ .pll_set = nvc0_devinit_pll_set,
+ .disable = nvc0_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
index 7d622e2b0171..822a2fbf44a5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
@@ -6,20 +6,32 @@
#include <subdev/clock/pll.h>
#include <subdev/devinit.h>
-void nv04_devinit_dtor(struct nouveau_object *);
-int nv04_devinit_init(struct nouveau_object *);
-int nv04_devinit_fini(struct nouveau_object *, bool);
-int nv04_devinit_pll_set(struct nouveau_devinit *, u32, u32);
-
-void setPLL_single(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
-void setPLL_double_highregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
-void setPLL_double_lowregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
-
-
-struct nv50_devinit_priv {
- struct nouveau_devinit base;
+struct nouveau_devinit_impl {
+ struct nouveau_oclass base;
+ void (*meminit)(struct nouveau_devinit *);
+ int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq);
+ u64 (*disable)(struct nouveau_devinit *);
};
-int nv50_devinit_init(struct nouveau_object *);
+#define nouveau_devinit_create(p,e,o,d) \
+ nouveau_devinit_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_devinit_destroy(p) ({ \
+ struct nouveau_devinit *d = (p); \
+ _nouveau_devinit_dtor(nv_object(d)); \
+})
+#define nouveau_devinit_init(p) ({ \
+ struct nouveau_devinit *d = (p); \
+ _nouveau_devinit_init(nv_object(d)); \
+})
+#define nouveau_devinit_fini(p,s) ({ \
+ struct nouveau_devinit *d = (p); \
+ _nouveau_devinit_fini(nv_object(d), (s)); \
+})
+
+int nouveau_devinit_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+void _nouveau_devinit_dtor(struct nouveau_object *);
+int _nouveau_devinit_init(struct nouveau_object *);
+int _nouveau_devinit_fini(struct nouveau_object *, bool suspend);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
index 34f9605ffee6..66fe959b4f74 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
@@ -25,35 +25,44 @@
#include <subdev/bios.h>
#include "priv.h"
+/* binary driver only executes this path if the condition (a) is true
+ * for any configuration (combination of rammap+ramcfg+timing) that
+ * can be reached on a given card. for now, we will execute the branch
+ * unconditionally in the hope that a "false everywhere" in the bios
+ * tables doesn't actually mean "don't touch this".
+ */
+#define NOTE00(a) 1
+
int
-nouveau_gddr5_calc(struct nouveau_ram *ram)
+nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts)
{
- struct nouveau_bios *bios = nouveau_bios(ram);
- int pd, lf, xd, vh, vr, vo;
- int WL, CL, WR, at, dt, ds;
+ int pd, lf, xd, vh, vr, vo, l3;
+ int WL, CL, WR, at[2], dt, ds;
int rq = ram->freq < 1000000; /* XXX */
- switch (!!ram->ramcfg.data * ram->ramcfg.version) {
+ switch (ram->ramcfg.version) {
case 0x11:
- pd = (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x80) >> 7;
- lf = (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x40) >> 6;
- xd = !(nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x20);
- vh = (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x10) >> 4;
- vr = (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x04) >> 2;
- vo = nv_ro08(bios, ram->ramcfg.data + 0x06) & 0xff;
+ pd = ram->next->bios.ramcfg_11_01_80;
+ lf = ram->next->bios.ramcfg_11_01_40;
+ xd = !ram->next->bios.ramcfg_11_01_20;
+ vh = ram->next->bios.ramcfg_11_02_10;
+ vr = ram->next->bios.ramcfg_11_02_04;
+ vo = ram->next->bios.ramcfg_11_06;
+ l3 = !ram->next->bios.ramcfg_11_07_02;
break;
default:
return -ENOSYS;
}
- switch (!!ram->timing.data * ram->timing.version) {
+ switch (ram->timing.version) {
case 0x20:
- WL = (nv_ro16(bios, ram->timing.data + 0x04) & 0x0f80) >> 7;
- CL = nv_ro08(bios, ram->timing.data + 0x04) & 0x1f;
- WR = nv_ro08(bios, ram->timing.data + 0x0a) & 0x7f;
- at = (nv_ro08(bios, ram->timing.data + 0x2e) & 0xc0) >> 6;
- dt = nv_ro08(bios, ram->timing.data + 0x2e) & 0x03;
- ds = nv_ro08(bios, ram->timing.data + 0x2f) & 0x03;
+ WL = (ram->next->bios.timing[1] & 0x00000f80) >> 7;
+ CL = (ram->next->bios.timing[1] & 0x0000001f);
+ WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
+ at[0] = ram->next->bios.timing_20_2e_c0;
+ at[1] = ram->next->bios.timing_20_2e_30;
+ dt = ram->next->bios.timing_20_2e_03;
+ ds = ram->next->bios.timing_20_2f_03;
break;
default:
return -ENOSYS;
@@ -71,13 +80,25 @@ nouveau_gddr5_calc(struct nouveau_ram *ram)
ram->mr[1] &= ~0x0bf;
ram->mr[1] |= (xd & 0x01) << 7;
- ram->mr[1] |= (at & 0x03) << 4;
+ ram->mr[1] |= (at[0] & 0x03) << 4;
ram->mr[1] |= (dt & 0x03) << 2;
ram->mr[1] |= (ds & 0x03) << 0;
+ /* this seems wrong, alternate field used for the broadcast
+ * on nuts vs non-nuts configs.. meh, it matches for now.
+ */
+ ram->mr1_nuts = ram->mr[1];
+ if (nuts) {
+ ram->mr[1] &= ~0x030;
+ ram->mr[1] |= (at[1] & 0x03) << 4;
+ }
+
ram->mr[3] &= ~0x020;
ram->mr[3] |= (rq & 0x01) << 5;
+ ram->mr[5] &= ~0x004;
+ ram->mr[5] |= (l3 << 2);
+
if (!vo)
vo = (ram->mr[6] & 0xff0) >> 4;
if (ram->mr[6] & 0x001)
@@ -86,11 +107,16 @@ nouveau_gddr5_calc(struct nouveau_ram *ram)
ram->mr[6] |= (vo & 0xff) << 4;
ram->mr[6] |= (pd & 0x01) << 0;
- if (!(ram->mr[7] & 0x100))
- vr = 0; /* binary driver does this.. bug? */
- ram->mr[7] &= ~0x188;
- ram->mr[7] |= (vr & 0x01) << 8;
+ if (NOTE00(vr)) {
+ ram->mr[7] &= ~0x300;
+ ram->mr[7] |= (vr & 0x03) << 8;
+ }
+ ram->mr[7] &= ~0x088;
ram->mr[7] |= (vh & 0x01) << 7;
ram->mr[7] |= (lf & 0x01) << 3;
+
+ ram->mr[8] &= ~0x003;
+ ram->mr[8] |= (WR & 0x10) >> 3;
+ ram->mr[8] |= (CL & 0x10) >> 4;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index e5fc37c4caac..45470e1f0385 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -33,6 +33,21 @@ nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
return likely((nvc0_pte_storage_type_map[memtype] != 0xff));
}
+static void
+nvc0_fb_intr(struct nouveau_subdev *subdev)
+{
+ struct nvc0_fb_priv *priv = (void *)subdev;
+ u32 intr = nv_rd32(priv, 0x000100);
+ if (intr & 0x08000000) {
+ nv_debug(priv, "PFFB intr\n");
+ intr &= ~0x08000000;
+ }
+ if (intr & 0x00002000) {
+ nv_debug(priv, "PBFB intr\n");
+ intr &= ~0x00002000;
+ }
+}
+
int
nvc0_fb_init(struct nouveau_object *object)
{
@@ -86,6 +101,7 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return -EFAULT;
}
+ nv_subdev(priv)->intr = nvc0_fb_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index 493125214e88..edaf95dee612 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -34,7 +34,7 @@ extern struct nouveau_oclass nvc0_ram_oclass;
extern struct nouveau_oclass nve0_ram_oclass;
int nouveau_sddr3_calc(struct nouveau_ram *ram);
-int nouveau_gddr5_calc(struct nouveau_ram *ram);
+int nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts);
#define nouveau_fb_create(p,e,c,d) \
nouveau_fb_create_((p), (e), (c), sizeof(**d), (void **)d)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index 76762a17d89c..c7fdb3a9e88b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -70,13 +70,11 @@ nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
struct nv50_ramseq *hwsq = &ram->hwsq;
struct nvbios_perfE perfE;
struct nvbios_pll mpll;
- struct bit_entry M;
struct {
u32 data;
u8 size;
} ramcfg, timing;
- u8 ver, hdr, cnt, strap;
- u32 data;
+ u8 ver, hdr, cnt, len, strap;
int N1, M1, N2, M2, P;
int ret, i;
@@ -93,16 +91,7 @@ nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
} while (perfE.memory < freq);
/* locate specific data set for the attached memory */
- if (bit_entry(bios, 'M', &M) || M.version != 1 || M.length < 5) {
- nv_error(pfb, "invalid/missing memory table\n");
- return -EINVAL;
- }
-
- strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
- data = nv_ro16(bios, M.offset + 3);
- if (data)
- strap = nv_ro08(bios, data + strap);
-
+ strap = nvbios_ramcfg_index(bios);
if (strap >= cnt) {
nv_error(pfb, "invalid ramcfg strap\n");
return -EINVAL;
@@ -113,7 +102,8 @@ nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
/* lookup memory timings, if bios says they're present */
strap = nv_ro08(bios, ramcfg.data + 0x01);
if (strap != 0xff) {
- timing.data = nvbios_timing_entry(bios, strap, &ver, &hdr);
+ timing.data = nvbios_timingEe(bios, strap, &ver, &hdr,
+ &cnt, &len);
if (!timing.data || ver != 0x10 || hdr < 0x12) {
nv_error(pfb, "invalid/missing timing entry "
"%02x %04x %02x %02x\n",
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
index f6292cd9207c..f4ae8aa46a25 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
@@ -79,8 +79,7 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
struct nva3_ram *ram = (void *)pfb->ram;
struct nva3_ramfuc *fuc = &ram->fuc;
struct nva3_clock_info mclk;
- struct bit_entry M;
- u8 ver, cnt, strap;
+ u8 ver, cnt, len, strap;
u32 data;
struct {
u32 data;
@@ -91,24 +90,15 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
int ret;
/* lookup memory config data relevant to the target frequency */
- rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size,
- &cnt, &ramcfg.size);
+ rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
+ &cnt, &ramcfg.size);
if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
nv_error(pfb, "invalid/missing rammap entry\n");
return -EINVAL;
}
/* locate specific data set for the attached memory */
- if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
- nv_error(pfb, "invalid/missing memory table\n");
- return -EINVAL;
- }
-
- strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
- data = nv_ro16(bios, M.offset + 1);
- if (data)
- strap = nv_ro08(bios, data + strap);
-
+ strap = nvbios_ramcfg_index(bios);
if (strap >= cnt) {
nv_error(pfb, "invalid ramcfg strap\n");
return -EINVAL;
@@ -123,8 +113,8 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
/* lookup memory timings, if bios says they're present */
strap = nv_ro08(bios, ramcfg.data + 0x01);
if (strap != 0xff) {
- timing.data = nvbios_timing_entry(bios, strap, &ver,
- &timing.size);
+ timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
+ &cnt, &len);
if (!timing.data || ver != 0x10 || timing.size < 0x19) {
nv_error(pfb, "invalid/missing timing entry\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index f464547c6bab..0391b824ee76 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -23,7 +23,6 @@
*/
#include <subdev/bios.h>
-#include <subdev/bios/bit.h>
#include <subdev/bios/pll.h>
#include <subdev/bios/rammap.h>
#include <subdev/bios/timing.h>
@@ -134,9 +133,7 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
struct nouveau_bios *bios = nouveau_bios(pfb);
struct nvc0_ram *ram = (void *)pfb->ram;
struct nvc0_ramfuc *fuc = &ram->fuc;
- struct bit_entry M;
- u8 ver, cnt, strap;
- u32 data;
+ u8 ver, cnt, len, strap;
struct {
u32 data;
u8 size;
@@ -147,24 +144,15 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
int ret;
/* lookup memory config data relevant to the target frequency */
- rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size,
- &cnt, &ramcfg.size);
+ rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
+ &cnt, &ramcfg.size);
if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
nv_error(pfb, "invalid/missing rammap entry\n");
return -EINVAL;
}
/* locate specific data set for the attached memory */
- if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
- nv_error(pfb, "invalid/missing memory table\n");
- return -EINVAL;
- }
-
- strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
- data = nv_ro16(bios, M.offset + 1);
- if (data)
- strap = nv_ro08(bios, data + strap);
-
+ strap = nvbios_ramcfg_index(bios);
if (strap >= cnt) {
nv_error(pfb, "invalid ramcfg strap\n");
return -EINVAL;
@@ -179,8 +167,8 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
/* lookup memory timings, if bios says they're present */
strap = nv_ro08(bios, ramcfg.data + 0x01);
if (strap != 0xff) {
- timing.data = nvbios_timing_entry(bios, strap, &ver,
- &timing.size);
+ timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
+ &cnt, &len);
if (!timing.data || ver != 0x10 || timing.size < 0x19) {
nv_error(pfb, "invalid/missing timing entry\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
index bc86cfd084f6..3257c522a021 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -25,7 +25,6 @@
#include <subdev/gpio.h>
#include <subdev/bios.h>
-#include <subdev/bios/bit.h>
#include <subdev/bios/pll.h>
#include <subdev/bios/init.h>
#include <subdev/bios/rammap.h>
@@ -42,6 +41,14 @@
#include "ramfuc.h"
+/* binary driver only executes this path if the condition (a) is true
+ * for any configuration (combination of rammap+ramcfg+timing) that
+ * can be reached on a given card. for now, we will execute the branch
+ * unconditionally in the hope that a "false everywhere" in the bios
+ * tables doesn't actually mean "don't touch this".
+ */
+#define NOTE00(a) 1
+
struct nve0_ramfuc {
struct ramfuc base;
@@ -104,7 +111,9 @@ struct nve0_ramfuc {
struct ramfuc_reg r_mr[16]; /* MR0 - MR8, MR15 */
struct ramfuc_reg r_0x62c000;
+
struct ramfuc_reg r_0x10f200;
+
struct ramfuc_reg r_0x10f210;
struct ramfuc_reg r_0x10f310;
struct ramfuc_reg r_0x10f314;
@@ -118,12 +127,17 @@ struct nve0_ramfuc {
struct ramfuc_reg r_0x10f65c;
struct ramfuc_reg r_0x10f6bc;
struct ramfuc_reg r_0x100710;
- struct ramfuc_reg r_0x10f750;
+ struct ramfuc_reg r_0x100750;
};
struct nve0_ram {
struct nouveau_ram base;
struct nve0_ramfuc fuc;
+
+ u32 parts;
+ u32 pmask;
+ u32 pnuts;
+
int from;
int mode;
int N1, fN1, M1, P1;
@@ -134,17 +148,17 @@ struct nve0_ram {
* GDDR5
******************************************************************************/
static void
-train(struct nve0_ramfuc *fuc, u32 magic)
+nve0_ram_train(struct nve0_ramfuc *fuc, u32 mask, u32 data)
{
struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
- struct nouveau_fb *pfb = nouveau_fb(ram);
- const int mc = nv_rd32(pfb, 0x02243c);
- int i;
-
- ram_mask(fuc, 0x10f910, 0xbc0e0000, magic);
- ram_mask(fuc, 0x10f914, 0xbc0e0000, magic);
- for (i = 0; i < mc; i++) {
- const u32 addr = 0x110974 + (i * 0x1000);
+ u32 addr = 0x110974, i;
+
+ ram_mask(fuc, 0x10f910, mask, data);
+ ram_mask(fuc, 0x10f914, mask, data);
+
+ for (i = 0; (data & 0x80000000) && i < ram->parts; addr += 0x1000, i++) {
+ if (ram->pmask & (1 << i))
+ continue;
ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
}
}
@@ -199,12 +213,12 @@ r1373f4_init(struct nve0_ramfuc *fuc)
}
static void
-r1373f4_fini(struct nve0_ramfuc *fuc, u32 ramcfg)
+r1373f4_fini(struct nve0_ramfuc *fuc)
{
struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
- struct nouveau_bios *bios = nouveau_bios(ram);
- u8 v0 = (nv_ro08(bios, ramcfg + 0x03) & 0xc0) >> 6;
- u8 v1 = (nv_ro08(bios, ramcfg + 0x03) & 0x30) >> 4;
+ struct nouveau_ram_data *next = ram->base.next;
+ u8 v0 = next->bios.ramcfg_11_03_c0;
+ u8 v1 = next->bios.ramcfg_11_03_30;
u32 tmp;
tmp = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
@@ -220,25 +234,46 @@ r1373f4_fini(struct nve0_ramfuc *fuc, u32 ramcfg)
ram_mask(fuc, 0x10f800, 0x00000030, (v0 ^ v1) << 4);
}
+static void
+nve0_ram_nuts(struct nve0_ram *ram, struct ramfuc_reg *reg,
+ u32 _mask, u32 _data, u32 _copy)
+{
+ struct nve0_fb_priv *priv = (void *)nouveau_fb(ram);
+ struct ramfuc *fuc = &ram->fuc.base;
+ u32 addr = 0x110000 + (reg->addr[0] & 0xfff);
+ u32 mask = _mask | _copy;
+ u32 data = (_data & _mask) | (reg->data & _copy);
+ u32 i;
+
+ for (i = 0; i < 16; i++, addr += 0x1000) {
+ if (ram->pnuts & (1 << i)) {
+ u32 prev = nv_rd32(priv, addr);
+ u32 next = (prev & ~mask) | data;
+ nouveau_memx_wr32(fuc->memx, addr, next);
+ }
+ }
+}
+#define ram_nuts(s,r,m,d,c) \
+ nve0_ram_nuts((s), &(s)->fuc.r_##r, (m), (d), (c))
+
static int
nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
{
- struct nouveau_bios *bios = nouveau_bios(pfb);
struct nve0_ram *ram = (void *)pfb->ram;
struct nve0_ramfuc *fuc = &ram->fuc;
- const u32 rammap = ram->base.rammap.data;
- const u32 ramcfg = ram->base.ramcfg.data;
- const u32 timing = ram->base.timing.data;
- int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08);
- int mv = 1; /*XXX*/
+ struct nouveau_ram_data *next = ram->base.next;
+ int vc = !(next->bios.ramcfg_11_02_08);
+ int mv = !(next->bios.ramcfg_11_02_04);
u32 mask, data;
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
ram_wr32(fuc, 0x62c000, 0x0f0f0000);
/* MR1: turn termination on early, for some reason.. */
- if ((ram->base.mr[1] & 0x03c) != 0x030)
+ if ((ram->base.mr[1] & 0x03c) != 0x030) {
ram_mask(fuc, mr[1], 0x03c, ram->base.mr[1] & 0x03c);
+ ram_nuts(ram, mr[1], 0x03c, ram->base.mr1_nuts & 0x03c, 0x000);
+ }
if (vc == 1 && ram_have(fuc, gpio2E)) {
u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
@@ -250,8 +285,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
- ram_mask(fuc, 0x10f914, 0x01020000, 0x000c0000);
- ram_mask(fuc, 0x10f910, 0x01020000, 0x000c0000);
+ nve0_ram_train(fuc, 0x01020000, 0x000c0000);
ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */
ram_nsec(fuc, 1000);
@@ -280,28 +314,28 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
if (1) {
data |= 0x800807e0;
- switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) {
- case 0xc0: data &= ~0x00000040; break;
- case 0x80: data &= ~0x00000100; break;
- case 0x40: data &= ~0x80000000; break;
- case 0x00: data &= ~0x00000400; break;
+ switch (next->bios.ramcfg_11_03_c0) {
+ case 3: data &= ~0x00000040; break;
+ case 2: data &= ~0x00000100; break;
+ case 1: data &= ~0x80000000; break;
+ case 0: data &= ~0x00000400; break;
}
- switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) {
- case 0x30: data &= ~0x00000020; break;
- case 0x20: data &= ~0x00000080; break;
- case 0x10: data &= ~0x00080000; break;
- case 0x00: data &= ~0x00000200; break;
+ switch (next->bios.ramcfg_11_03_30) {
+ case 3: data &= ~0x00000020; break;
+ case 2: data &= ~0x00000080; break;
+ case 1: data &= ~0x00080000; break;
+ case 0: data &= ~0x00000200; break;
}
}
- if (nv_ro08(bios, ramcfg + 0x02) & 0x80)
+ if (next->bios.ramcfg_11_02_80)
mask |= 0x03000000;
- if (nv_ro08(bios, ramcfg + 0x02) & 0x40)
+ if (next->bios.ramcfg_11_02_40)
mask |= 0x00002000;
- if (nv_ro08(bios, ramcfg + 0x07) & 0x10)
+ if (next->bios.ramcfg_11_07_10)
mask |= 0x00004000;
- if (nv_ro08(bios, ramcfg + 0x07) & 0x08)
+ if (next->bios.ramcfg_11_07_08)
mask |= 0x00000003;
else {
mask |= 0x34000000;
@@ -314,18 +348,18 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
if (ram->from == 2 && ram->mode != 2) {
ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
- ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000);
+ ram_mask(fuc, 0x10f200, 0x18008000, 0x00008000);
ram_mask(fuc, 0x10f800, 0x00000000, 0x00000004);
ram_mask(fuc, 0x10f830, 0x00008000, 0x01040010);
ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
r1373f4_init(fuc);
ram_mask(fuc, 0x1373f0, 0x00000002, 0x00000001);
- r1373f4_fini(fuc, ramcfg);
+ r1373f4_fini(fuc);
ram_mask(fuc, 0x10f830, 0x00c00000, 0x00240001);
} else
if (ram->from != 2 && ram->mode != 2) {
r1373f4_init(fuc);
- r1373f4_fini(fuc, ramcfg);
+ r1373f4_fini(fuc);
}
if (ram_have(fuc, gpioMV)) {
@@ -336,49 +370,54 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
}
}
- if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) ||
- (nv_ro08(bios, ramcfg + 0x07) & 0x10)) {
+ if ( (next->bios.ramcfg_11_02_40) ||
+ (next->bios.ramcfg_11_07_10)) {
ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
ram_nsec(fuc, 20000);
}
if (ram->from != 2 && ram->mode == 2) {
+ if (0 /*XXX: Titan */)
+ ram_mask(fuc, 0x10f200, 0x18000000, 0x18000000);
ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
ram_mask(fuc, 0x1373f0, 0x00000000, 0x00000002);
ram_mask(fuc, 0x10f830, 0x00800001, 0x00408010);
r1373f4_init(fuc);
- r1373f4_fini(fuc, ramcfg);
+ r1373f4_fini(fuc);
ram_mask(fuc, 0x10f808, 0x00000000, 0x00080000);
ram_mask(fuc, 0x10f200, 0x00808000, 0x00800000);
} else
if (ram->from == 2 && ram->mode == 2) {
ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
r1373f4_init(fuc);
- r1373f4_fini(fuc, ramcfg);
+ r1373f4_fini(fuc);
}
if (ram->mode != 2) /*XXX*/ {
- if (nv_ro08(bios, ramcfg + 0x07) & 0x40)
+ if (next->bios.ramcfg_11_07_40)
ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
}
- data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2;
- ram_wr32(fuc, 0x10f65c, 0x00000011 * data);
- ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
- ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+ ram_wr32(fuc, 0x10f65c, 0x00000011 * next->bios.rammap_11_11_0c);
+ ram_wr32(fuc, 0x10f6b8, 0x01010101 * next->bios.ramcfg_11_09);
+ ram_wr32(fuc, 0x10f6bc, 0x01010101 * next->bios.ramcfg_11_09);
- data = nv_ro08(bios, ramcfg + 0x04);
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
- ram_wr32(fuc, 0x10f698, 0x01010101 * data);
- ram_wr32(fuc, 0x10f69c, 0x01010101 * data);
+ if (!next->bios.ramcfg_11_07_08 && !next->bios.ramcfg_11_07_04) {
+ ram_wr32(fuc, 0x10f698, 0x01010101 * next->bios.ramcfg_11_04);
+ ram_wr32(fuc, 0x10f69c, 0x01010101 * next->bios.ramcfg_11_04);
+ } else
+ if (!next->bios.ramcfg_11_07_08) {
+ ram_wr32(fuc, 0x10f698, 0x00000000);
+ ram_wr32(fuc, 0x10f69c, 0x00000000);
}
if (ram->mode != 2) {
- u32 temp = ram_rd32(fuc, 0x10f694) & ~0xff00ff00;
- ram_wr32(fuc, 0x10f694, temp | (0x01000100 * data));
+ u32 data = 0x01000100 * next->bios.ramcfg_11_04;
+ ram_nuke(fuc, 0x10f694);
+ ram_mask(fuc, 0x10f694, 0xff00ff00, data);
}
- if (ram->mode == 2 && (nv_ro08(bios, ramcfg + 0x08) & 0x10))
+ if (ram->mode == 2 && (next->bios.ramcfg_11_08_10))
data = 0x00000080;
else
data = 0x00000000;
@@ -386,19 +425,19 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
mask = 0x00070000;
data = 0x00000000;
- if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80))
+ if (!(next->bios.ramcfg_11_02_80))
data |= 0x03000000;
- if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40))
+ if (!(next->bios.ramcfg_11_02_40))
data |= 0x00002000;
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10))
+ if (!(next->bios.ramcfg_11_07_10))
data |= 0x00004000;
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08))
+ if (!(next->bios.ramcfg_11_07_08))
data |= 0x00000003;
else
data |= 0x74000000;
ram_mask(fuc, 0x10f824, mask, data);
- if (nv_ro08(bios, ramcfg + 0x01) & 0x08)
+ if (next->bios.ramcfg_11_01_08)
data = 0x00000000;
else
data = 0x00001000;
@@ -409,61 +448,90 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x10f670, 0x80000000, 0x00000000);
}
- if (nv_ro08(bios, ramcfg + 0x08) & 0x01)
+ if (next->bios.ramcfg_11_08_01)
data = 0x00100000;
else
data = 0x00000000;
ram_mask(fuc, 0x10f82c, 0x00100000, data);
data = 0x00000000;
- if (nv_ro08(bios, ramcfg + 0x08) & 0x08)
+ if (next->bios.ramcfg_11_08_08)
data |= 0x00002000;
- if (nv_ro08(bios, ramcfg + 0x08) & 0x04)
+ if (next->bios.ramcfg_11_08_04)
data |= 0x00001000;
- if (nv_ro08(bios, ramcfg + 0x08) & 0x02)
+ if (next->bios.ramcfg_11_08_02)
data |= 0x00004000;
ram_mask(fuc, 0x10f830, 0x00007000, data);
/* PFB timing */
- ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28));
- ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00));
- ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04));
- ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08));
- ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c));
- ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10));
- ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14));
- ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18));
- ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c));
- ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20));
- ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24));
-
- data = (nv_ro08(bios, ramcfg + 0x02) & 0x03) << 8;
- if (nv_ro08(bios, ramcfg + 0x01) & 0x10)
- data |= 0x70000000;
- ram_mask(fuc, 0x10f604, 0x70000300, data);
-
- data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28;
- if (nv_ro08(bios, ramcfg + 0x01) & 0x01)
- data |= 0x00000100;
- ram_mask(fuc, 0x10f614, 0x70000000, data);
-
- data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28;
- if (nv_ro08(bios, ramcfg + 0x01) & 0x02)
- data |= 0x00000100;
- ram_mask(fuc, 0x10f610, 0x70000000, data);
+ ram_mask(fuc, 0x10f248, 0xffffffff, next->bios.timing[10]);
+ ram_mask(fuc, 0x10f290, 0xffffffff, next->bios.timing[0]);
+ ram_mask(fuc, 0x10f294, 0xffffffff, next->bios.timing[1]);
+ ram_mask(fuc, 0x10f298, 0xffffffff, next->bios.timing[2]);
+ ram_mask(fuc, 0x10f29c, 0xffffffff, next->bios.timing[3]);
+ ram_mask(fuc, 0x10f2a0, 0xffffffff, next->bios.timing[4]);
+ ram_mask(fuc, 0x10f2a4, 0xffffffff, next->bios.timing[5]);
+ ram_mask(fuc, 0x10f2a8, 0xffffffff, next->bios.timing[6]);
+ ram_mask(fuc, 0x10f2ac, 0xffffffff, next->bios.timing[7]);
+ ram_mask(fuc, 0x10f2cc, 0xffffffff, next->bios.timing[8]);
+ ram_mask(fuc, 0x10f2e8, 0xffffffff, next->bios.timing[9]);
+
+ data = mask = 0x00000000;
+ if (NOTE00(ramcfg_08_20)) {
+ if (next->bios.ramcfg_11_08_20)
+ data |= 0x01000000;
+ mask |= 0x01000000;
+ }
+ ram_mask(fuc, 0x10f200, mask, data);
+
+ data = mask = 0x00000000;
+ if (NOTE00(ramcfg_02_03 != 0)) {
+ data |= (next->bios.ramcfg_11_02_03) << 8;
+ mask |= 0x00000300;
+ }
+ if (NOTE00(ramcfg_01_10)) {
+ if (next->bios.ramcfg_11_01_10)
+ data |= 0x70000000;
+ mask |= 0x70000000;
+ }
+ ram_mask(fuc, 0x10f604, mask, data);
+
+ data = mask = 0x00000000;
+ if (NOTE00(timing_30_07 != 0)) {
+ data |= (next->bios.timing_20_30_07) << 28;
+ mask |= 0x70000000;
+ }
+ if (NOTE00(ramcfg_01_01)) {
+ if (next->bios.ramcfg_11_01_01)
+ data |= 0x00000100;
+ mask |= 0x00000100;
+ }
+ ram_mask(fuc, 0x10f614, mask, data);
+
+ data = mask = 0x00000000;
+ if (NOTE00(timing_30_07 != 0)) {
+ data |= (next->bios.timing_20_30_07) << 28;
+ mask |= 0x70000000;
+ }
+ if (NOTE00(ramcfg_01_02)) {
+ if (next->bios.ramcfg_11_01_02)
+ data |= 0x00000100;
+ mask |= 0x00000100;
+ }
+ ram_mask(fuc, 0x10f610, mask, data);
mask = 0x33f00000;
data = 0x00000000;
- if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04))
+ if (!(next->bios.ramcfg_11_01_04))
data |= 0x20200000;
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+ if (!(next->bios.ramcfg_11_07_80))
data |= 0x12800000;
/*XXX: see note above about there probably being some condition
* for the 10f824 stuff that uses ramcfg 3...
*/
- if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) {
- if (nv_ro08(bios, rammap + 0x08) & 0x0c) {
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+ if ( (next->bios.ramcfg_11_03_f0)) {
+ if (next->bios.rammap_11_08_0c) {
+ if (!(next->bios.ramcfg_11_07_80))
mask |= 0x00000020;
else
data |= 0x00000020;
@@ -476,49 +544,53 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x10f808, mask, data);
- data = nv_ro08(bios, ramcfg + 0x03) & 0x0f;
- ram_wr32(fuc, 0x10f870, 0x11111111 * data);
+ ram_wr32(fuc, 0x10f870, 0x11111111 * next->bios.ramcfg_11_03_0f);
- data = nv_ro08(bios, ramcfg + 0x02) & 0x03;
- if (nv_ro08(bios, ramcfg + 0x01) & 0x10)
- data |= 0x00000004;
- if ((nv_rd32(bios, 0x100770) & 0x00000004) != (data & 0x00000004)) {
- ram_wr32(fuc, 0x10f750, 0x04000009);
+ data = mask = 0x00000000;
+ if (NOTE00(ramcfg_02_03 != 0)) {
+ data |= next->bios.ramcfg_11_02_03;
+ mask |= 0x00000003;
+ }
+ if (NOTE00(ramcfg_01_10)) {
+ if (next->bios.ramcfg_11_01_10)
+ data |= 0x00000004;
+ mask |= 0x00000004;
+ }
+
+ if ((ram_mask(fuc, 0x100770, mask, data) & mask & 4) != (data & 4)) {
+ ram_mask(fuc, 0x100750, 0x00000008, 0x00000008);
ram_wr32(fuc, 0x100710, 0x00000000);
ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000);
}
- ram_mask(fuc, 0x100770, 0x00000007, data);
- data = (nv_ro08(bios, timing + 0x30) & 0x07) << 8;
- if (nv_ro08(bios, ramcfg + 0x01) & 0x01)
+ data = (next->bios.timing_20_30_07) << 8;
+ if (next->bios.ramcfg_11_01_01)
data |= 0x80000000;
ram_mask(fuc, 0x100778, 0x00000700, data);
- data = nv_ro16(bios, timing + 0x2c);
- ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) << 4);
- ram_mask(fuc, 0x10f24c, 0x7f000000, (data & 0x1fc0) << 18);
-
- data = nv_ro08(bios, timing + 0x30);
- ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13);
+ ram_mask(fuc, 0x10f250, 0x000003f0, next->bios.timing_20_2c_003f << 4);
+ data = (next->bios.timing[10] & 0x7f000000) >> 24;
+ if (data < next->bios.timing_20_2c_1fc0)
+ data = next->bios.timing_20_2c_1fc0;
+ ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
+ ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8 << 16);
- data = nv_ro16(bios, timing + 0x31);
- ram_mask(fuc, 0x10fec4, 0x041e0f07, (data & 0x0800) << 15 |
- (data & 0x0780) << 10 |
- (data & 0x0078) << 5 |
- (data & 0x0007));
- ram_mask(fuc, 0x10fec8, 0x00000027, (data & 0x8000) >> 10 |
- (data & 0x7000) >> 12);
+ ram_mask(fuc, 0x10fec4, 0x041e0f07, next->bios.timing_20_31_0800 << 26 |
+ next->bios.timing_20_31_0780 << 17 |
+ next->bios.timing_20_31_0078 << 8 |
+ next->bios.timing_20_31_0007);
+ ram_mask(fuc, 0x10fec8, 0x00000027, next->bios.timing_20_31_8000 << 5 |
+ next->bios.timing_20_31_7000);
ram_wr32(fuc, 0x10f090, 0x4000007e);
- ram_nsec(fuc, 1000);
+ ram_nsec(fuc, 2000);
ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
- ram_nsec(fuc, 2000);
ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
- if ((nv_ro08(bios, ramcfg + 0x08) & 0x10) && (ram->mode == 2) /*XXX*/) {
+ if ((next->bios.ramcfg_11_08_10) && (ram->mode == 2) /*XXX*/) {
u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000);
- train(fuc, 0xa4010000); /*XXX*/
+ nve0_ram_train(fuc, 0xbc0e0000, 0xa4010000); /*XXX*/
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f294, temp);
}
@@ -528,7 +600,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, mr[8], 0xfff, ram->base.mr[8]);
ram_nsec(fuc, 1000);
ram_mask(fuc, mr[1], 0xfff, ram->base.mr[1]);
- ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5]);
+ ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5] & ~0x004); /* LP3 later */
ram_mask(fuc, mr[6], 0xfff, ram->base.mr[6]);
ram_mask(fuc, mr[7], 0xfff, ram->base.mr[7]);
@@ -544,12 +616,13 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */
ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
ram_nsec(fuc, 1000);
+ ram_nuts(ram, 0x10f200, 0x18808800, 0x00000000, 0x18808800);
data = ram_rd32(fuc, 0x10f978);
data &= ~0x00046144;
data |= 0x0000000b;
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x04))
+ if (!(next->bios.ramcfg_11_07_08)) {
+ if (!(next->bios.ramcfg_11_07_04))
data |= 0x0000200c;
else
data |= 0x00000000;
@@ -563,44 +636,43 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_wr32(fuc, 0x10f830, data);
}
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
+ if (!(next->bios.ramcfg_11_07_08)) {
data = 0x88020000;
- if ( (nv_ro08(bios, ramcfg + 0x07) & 0x04))
+ if ( (next->bios.ramcfg_11_07_04))
data |= 0x10000000;
- if (!(nv_ro08(bios, rammap + 0x08) & 0x10))
+ if (!(next->bios.rammap_11_08_10))
data |= 0x00080000;
} else {
data = 0xa40e0000;
}
- train(fuc, data);
- ram_nsec(fuc, 1000);
+ nve0_ram_train(fuc, 0xbc0f0000, data);
+ if (1) /* XXX: not always? */
+ ram_nsec(fuc, 1000);
if (ram->mode == 2) { /*XXX*/
ram_mask(fuc, 0x10f800, 0x00000004, 0x00000004);
}
- /* MR5: (re)enable LP3 if necessary
- * XXX: need to find the switch, keeping off for now
- */
- ram_mask(fuc, mr[5], 0x00000004, 0x00000000);
+ /* LP3 */
+ if (ram_mask(fuc, mr[5], 0x004, ram->base.mr[5]) != ram->base.mr[5])
+ ram_nsec(fuc, 1000);
if (ram->mode != 2) {
ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000);
ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
}
- if (nv_ro08(bios, ramcfg + 0x07) & 0x02) {
- ram_mask(fuc, 0x10f910, 0x80020000, 0x01000000);
- ram_mask(fuc, 0x10f914, 0x80020000, 0x01000000);
- }
+ if (next->bios.ramcfg_11_07_02)
+ nve0_ram_train(fuc, 0x80020000, 0x01000000);
ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
- if (nv_ro08(bios, rammap + 0x08) & 0x01)
+ if (next->bios.rammap_11_08_01)
data = 0x00000800;
else
data = 0x00000000;
ram_mask(fuc, 0x10f200, 0x00000800, data);
+ ram_nuts(ram, 0x10f200, 0x18808800, data, 0x18808800);
return 0;
}
@@ -611,17 +683,14 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
static int
nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
{
- struct nouveau_bios *bios = nouveau_bios(pfb);
struct nve0_ram *ram = (void *)pfb->ram;
struct nve0_ramfuc *fuc = &ram->fuc;
const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
const u32 runk0 = ram->fN1 << 16;
const u32 runk1 = ram->fN1;
- const u32 rammap = ram->base.rammap.data;
- const u32 ramcfg = ram->base.ramcfg.data;
- const u32 timing = ram->base.timing.data;
- int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08);
- int mv = 1; /*XXX*/
+ struct nouveau_ram_data *next = ram->base.next;
+ int vc = !(next->bios.ramcfg_11_02_08);
+ int mv = !(next->bios.ramcfg_11_02_04);
u32 mask, data;
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
@@ -636,7 +705,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
}
ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
- if ((nv_ro08(bios, ramcfg + 0x03) & 0xf0))
+ if ((next->bios.ramcfg_11_03_f0))
ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000);
ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
@@ -661,28 +730,28 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
if (1) {
mask |= 0x800807e0;
data |= 0x800807e0;
- switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) {
- case 0xc0: data &= ~0x00000040; break;
- case 0x80: data &= ~0x00000100; break;
- case 0x40: data &= ~0x80000000; break;
- case 0x00: data &= ~0x00000400; break;
+ switch (next->bios.ramcfg_11_03_c0) {
+ case 3: data &= ~0x00000040; break;
+ case 2: data &= ~0x00000100; break;
+ case 1: data &= ~0x80000000; break;
+ case 0: data &= ~0x00000400; break;
}
- switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) {
- case 0x30: data &= ~0x00000020; break;
- case 0x20: data &= ~0x00000080; break;
- case 0x10: data &= ~0x00080000; break;
- case 0x00: data &= ~0x00000200; break;
+ switch (next->bios.ramcfg_11_03_30) {
+ case 3: data &= ~0x00000020; break;
+ case 2: data &= ~0x00000080; break;
+ case 1: data &= ~0x00080000; break;
+ case 0: data &= ~0x00000200; break;
}
}
- if (nv_ro08(bios, ramcfg + 0x02) & 0x80)
+ if (next->bios.ramcfg_11_02_80)
mask |= 0x03000000;
- if (nv_ro08(bios, ramcfg + 0x02) & 0x40)
+ if (next->bios.ramcfg_11_02_40)
mask |= 0x00002000;
- if (nv_ro08(bios, ramcfg + 0x07) & 0x10)
+ if (next->bios.ramcfg_11_07_10)
mask |= 0x00004000;
- if (nv_ro08(bios, ramcfg + 0x07) & 0x08)
+ if (next->bios.ramcfg_11_07_08)
mask |= 0x00000003;
else
mask |= 0x14000000;
@@ -692,7 +761,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
data = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
- data |= (nv_ro08(bios, ramcfg + 0x03) & 0x30) << 12;
+ data |= (next->bios.ramcfg_11_03_30) << 12;
ram_wr32(fuc, 0x1373ec, data);
ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
@@ -724,68 +793,67 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
}
}
- if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) ||
- (nv_ro08(bios, ramcfg + 0x07) & 0x10)) {
+ if ( (next->bios.ramcfg_11_02_40) ||
+ (next->bios.ramcfg_11_07_10)) {
ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
ram_nsec(fuc, 20000);
}
if (ram->mode != 2) /*XXX*/ {
- if (nv_ro08(bios, ramcfg + 0x07) & 0x40)
+ if (next->bios.ramcfg_11_07_40)
ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
}
- data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2;
- ram_wr32(fuc, 0x10f65c, 0x00000011 * data);
- ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
- ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+ ram_wr32(fuc, 0x10f65c, 0x00000011 * next->bios.rammap_11_11_0c);
+ ram_wr32(fuc, 0x10f6b8, 0x01010101 * next->bios.ramcfg_11_09);
+ ram_wr32(fuc, 0x10f6bc, 0x01010101 * next->bios.ramcfg_11_09);
mask = 0x00010000;
data = 0x00000000;
- if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80))
+ if (!(next->bios.ramcfg_11_02_80))
data |= 0x03000000;
- if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40))
+ if (!(next->bios.ramcfg_11_02_40))
data |= 0x00002000;
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10))
+ if (!(next->bios.ramcfg_11_07_10))
data |= 0x00004000;
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08))
+ if (!(next->bios.ramcfg_11_07_08))
data |= 0x00000003;
else
data |= 0x14000000;
ram_mask(fuc, 0x10f824, mask, data);
ram_nsec(fuc, 1000);
- if (nv_ro08(bios, ramcfg + 0x08) & 0x01)
+ if (next->bios.ramcfg_11_08_01)
data = 0x00100000;
else
data = 0x00000000;
ram_mask(fuc, 0x10f82c, 0x00100000, data);
/* PFB timing */
- ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28));
- ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00));
- ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04));
- ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08));
- ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c));
- ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10));
- ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14));
- ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18));
- ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c));
- ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20));
- ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24));
+ ram_mask(fuc, 0x10f248, 0xffffffff, next->bios.timing[10]);
+ ram_mask(fuc, 0x10f290, 0xffffffff, next->bios.timing[0]);
+ ram_mask(fuc, 0x10f294, 0xffffffff, next->bios.timing[1]);
+ ram_mask(fuc, 0x10f298, 0xffffffff, next->bios.timing[2]);
+ ram_mask(fuc, 0x10f29c, 0xffffffff, next->bios.timing[3]);
+ ram_mask(fuc, 0x10f2a0, 0xffffffff, next->bios.timing[4]);
+ ram_mask(fuc, 0x10f2a4, 0xffffffff, next->bios.timing[5]);
+ ram_mask(fuc, 0x10f2a8, 0xffffffff, next->bios.timing[6]);
+ ram_mask(fuc, 0x10f2ac, 0xffffffff, next->bios.timing[7]);
+ ram_mask(fuc, 0x10f2cc, 0xffffffff, next->bios.timing[8]);
+ ram_mask(fuc, 0x10f2e8, 0xffffffff, next->bios.timing[9]);
mask = 0x33f00000;
data = 0x00000000;
- if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04))
+ if (!(next->bios.ramcfg_11_01_04))
data |= 0x20200000;
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+ if (!(next->bios.ramcfg_11_07_80))
data |= 0x12800000;
/*XXX: see note above about there probably being some condition
* for the 10f824 stuff that uses ramcfg 3...
*/
- if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) {
- if (nv_ro08(bios, rammap + 0x08) & 0x0c) {
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+ if ( (next->bios.ramcfg_11_03_f0)) {
+ if (next->bios.rammap_11_08_0c) {
+ if (!(next->bios.ramcfg_11_07_80))
mask |= 0x00000020;
else
data |= 0x00000020;
@@ -799,21 +867,16 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x10f808, mask, data);
- data = nv_ro08(bios, ramcfg + 0x03) & 0x0f;
- ram_wr32(fuc, 0x10f870, 0x11111111 * data);
+ ram_wr32(fuc, 0x10f870, 0x11111111 * next->bios.ramcfg_11_03_0f);
- data = nv_ro16(bios, timing + 0x2c);
- ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) << 4);
+ ram_mask(fuc, 0x10f250, 0x000003f0, next->bios.timing_20_2c_003f << 4);
- if (((nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >> 6) >
- ((nv_ro32(bios, timing + 0x28) & 0x7f000000) >> 24))
- data = (nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >> 6;
- else
- data = (nv_ro32(bios, timing + 0x28) & 0x1f000000) >> 24;
+ data = (next->bios.timing[10] & 0x7f000000) >> 24;
+ if (data < next->bios.timing_20_2c_1fc0)
+ data = next->bios.timing_20_2c_1fc0;
ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
- data = nv_ro08(bios, timing + 0x30);
- ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13);
+ ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8);
ram_wr32(fuc, 0x10f090, 0x4000007f);
ram_nsec(fuc, 1000);
@@ -855,7 +918,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
- if (nv_ro08(bios, rammap + 0x08) & 0x01)
+ if (next->bios.rammap_11_08_01)
data = 0x00000800;
else
data = 0x00000000;
@@ -868,21 +931,18 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
******************************************************************************/
static int
-nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
+nve0_ram_calc_data(struct nouveau_fb *pfb, u32 freq,
+ struct nouveau_ram_data *data)
{
struct nouveau_bios *bios = nouveau_bios(pfb);
struct nve0_ram *ram = (void *)pfb->ram;
- struct nve0_ramfuc *fuc = &ram->fuc;
- struct bit_entry M;
- int ret, refclk, strap, i;
- u32 data;
- u8 cnt;
+ u8 strap, cnt, len;
/* lookup memory config data relevant to the target frequency */
- ram->base.rammap.data = nvbios_rammap_match(bios, freq / 1000,
- &ram->base.rammap.version,
- &ram->base.rammap.size, &cnt,
- &ram->base.ramcfg.size);
+ ram->base.rammap.data = nvbios_rammapEp(bios, freq / 1000,
+ &ram->base.rammap.version,
+ &ram->base.rammap.size,
+ &cnt, &len, &data->bios);
if (!ram->base.rammap.data || ram->base.rammap.version != 0x11 ||
ram->base.rammap.size < 0x09) {
nv_error(pfb, "invalid/missing rammap entry\n");
@@ -890,24 +950,13 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
}
/* locate specific data set for the attached memory */
- if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
- nv_error(pfb, "invalid/missing memory table\n");
- return -EINVAL;
- }
-
- strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
- data = nv_ro16(bios, M.offset + 1);
- if (data)
- strap = nv_ro08(bios, data + strap);
-
- if (strap >= cnt) {
- nv_error(pfb, "invalid ramcfg strap\n");
- return -EINVAL;
- }
-
- ram->base.ramcfg.version = ram->base.rammap.version;
- ram->base.ramcfg.data = ram->base.rammap.data + ram->base.rammap.size +
- (ram->base.ramcfg.size * strap);
+ ram->base.ramcfg.data = nvbios_rammapSp(bios, ram->base.rammap.data,
+ ram->base.rammap.version,
+ ram->base.rammap.size, cnt, len,
+ nvbios_ramcfg_index(bios),
+ &ram->base.ramcfg.version,
+ &ram->base.ramcfg.size,
+ &data->bios);
if (!ram->base.ramcfg.data || ram->base.ramcfg.version != 0x11 ||
ram->base.ramcfg.size < 0x08) {
nv_error(pfb, "invalid/missing ramcfg entry\n");
@@ -918,9 +967,9 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
strap = nv_ro08(bios, ram->base.ramcfg.data + 0x00);
if (strap != 0xff) {
ram->base.timing.data =
- nvbios_timing_entry(bios, strap,
- &ram->base.timing.version,
- &ram->base.timing.size);
+ nvbios_timingEp(bios, strap, &ram->base.timing.version,
+ &ram->base.timing.size, &cnt, &len,
+ &data->bios);
if (!ram->base.timing.data ||
ram->base.timing.version != 0x20 ||
ram->base.timing.size < 0x33) {
@@ -931,11 +980,23 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
ram->base.timing.data = 0;
}
+ data->freq = freq;
+ return 0;
+}
+
+static int
+nve0_ram_calc_xits(struct nouveau_fb *pfb, struct nouveau_ram_data *next)
+{
+ struct nve0_ram *ram = (void *)pfb->ram;
+ struct nve0_ramfuc *fuc = &ram->fuc;
+ int refclk, i;
+ int ret;
+
ret = ram_init(fuc, pfb);
if (ret)
return ret;
- ram->mode = (freq > fuc->refpll.vco1.max_freq) ? 2 : 1;
+ ram->mode = (next->freq > fuc->refpll.vco1.max_freq) ? 2 : 1;
ram->from = ram_rd32(fuc, 0x1373f4) & 0x0000000f;
/* XXX: this is *not* what nvidia do. on fermi nvidia generally
@@ -946,7 +1007,7 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
* so far, i've seen very weird values being chosen by nvidia on
* kepler boards, no idea how/why they're chosen.
*/
- refclk = freq;
+ refclk = next->freq;
if (ram->mode == 2)
refclk = fuc->mempll.refclk;
@@ -968,7 +1029,7 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
fuc->mempll.min_p = 1;
fuc->mempll.max_p = 2;
- ret = nva3_pll_calc(nv_subdev(pfb), &fuc->mempll, freq,
+ ret = nva3_pll_calc(nv_subdev(pfb), &fuc->mempll, next->freq,
&ram->N2, NULL, &ram->M2, &ram->P2);
if (ret <= 0) {
nv_error(pfb, "unable to calc mempll\n");
@@ -980,17 +1041,18 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
if (ram_have(fuc, mr[i]))
ram->base.mr[i] = ram_rd32(fuc, mr[i]);
}
+ ram->base.freq = next->freq;
switch (ram->base.type) {
case NV_MEM_TYPE_DDR3:
ret = nouveau_sddr3_calc(&ram->base);
if (ret == 0)
- ret = nve0_ram_calc_sddr3(pfb, freq);
+ ret = nve0_ram_calc_sddr3(pfb, next->freq);
break;
case NV_MEM_TYPE_GDDR5:
- ret = nouveau_gddr5_calc(&ram->base);
+ ret = nouveau_gddr5_calc(&ram->base, ram->pnuts != 0);
if (ret == 0)
- ret = nve0_ram_calc_gddr5(pfb, freq);
+ ret = nve0_ram_calc_gddr5(pfb, next->freq);
break;
default:
ret = -ENOSYS;
@@ -1001,13 +1063,55 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
}
static int
+nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+ struct nouveau_clock *clk = nouveau_clock(pfb);
+ struct nve0_ram *ram = (void *)pfb->ram;
+ struct nouveau_ram_data *xits = &ram->base.xition;
+ struct nouveau_ram_data *copy;
+ int ret;
+
+ if (ram->base.next == NULL) {
+ ret = nve0_ram_calc_data(pfb, clk->read(clk, nv_clk_src_mem),
+ &ram->base.former);
+ if (ret)
+ return ret;
+
+ ret = nve0_ram_calc_data(pfb, freq, &ram->base.target);
+ if (ret)
+ return ret;
+
+ if (ram->base.target.freq < ram->base.former.freq) {
+ *xits = ram->base.target;
+ copy = &ram->base.former;
+ } else {
+ *xits = ram->base.former;
+ copy = &ram->base.target;
+ }
+
+ xits->bios.ramcfg_11_02_04 = copy->bios.ramcfg_11_02_04;
+ xits->bios.ramcfg_11_02_03 = copy->bios.ramcfg_11_02_03;
+ xits->bios.timing_20_30_07 = copy->bios.timing_20_30_07;
+
+ ram->base.next = &ram->base.target;
+ if (memcmp(xits, &ram->base.former, sizeof(xits->bios)))
+ ram->base.next = &ram->base.xition;
+ } else {
+ BUG_ON(ram->base.next != &ram->base.xition);
+ ram->base.next = &ram->base.target;
+ }
+
+ return nve0_ram_calc_xits(pfb, ram->base.next);
+}
+
+static int
nve0_ram_prog(struct nouveau_fb *pfb)
{
struct nouveau_device *device = nv_device(pfb);
struct nve0_ram *ram = (void *)pfb->ram;
struct nve0_ramfuc *fuc = &ram->fuc;
ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
- return 0;
+ return (ram->base.next == &ram->base.xition);
}
static void
@@ -1015,6 +1119,7 @@ nve0_ram_tidy(struct nouveau_fb *pfb)
{
struct nve0_ram *ram = (void *)pfb->ram;
struct nve0_ramfuc *fuc = &ram->fuc;
+ ram->base.next = NULL;
ram_exec(fuc, false);
}
@@ -1055,7 +1160,7 @@ nve0_ram_init(struct nouveau_object *object)
* binary driver skips the one that's already been setup by
* the init tables.
*/
- data = nvbios_rammap_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
+ data = nvbios_rammapTe(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
if (!data || hdr < 0x15)
return -EINVAL;
@@ -1073,6 +1178,7 @@ nve0_ram_init(struct nouveau_object *object)
data += 4;
}
nv_wr32(pfb, 0x10f65c, save);
+ nv_mask(pfb, 0x10f584, 0x11000000, 0x00000000);
switch (ram->base.type) {
case NV_MEM_TYPE_GDDR5:
@@ -1117,7 +1223,8 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_gpio *gpio = nouveau_gpio(pfb);
struct dcb_gpio_func func;
struct nve0_ram *ram;
- int ret;
+ int ret, i;
+ u32 tmp;
ret = nvc0_ram_create(parent, engine, oclass, &ram);
*pobject = nv_object(ram);
@@ -1136,6 +1243,25 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
break;
}
+ /* calculate a mask of differently configured memory partitions,
+ * because, of course reclocking wasn't complicated enough
+ * already without having to treat some of them differently to
+ * the others....
+ */
+ ram->parts = nv_rd32(pfb, 0x022438);
+ ram->pmask = nv_rd32(pfb, 0x022554);
+ ram->pnuts = 0;
+ for (i = 0, tmp = 0; i < ram->parts; i++) {
+ if (!(ram->pmask & (1 << i))) {
+ u32 cfg1 = nv_rd32(pfb, 0x110204 + (i * 0x1000));
+ if (tmp && tmp != cfg1) {
+ ram->pnuts |= (1 << i);
+ continue;
+ }
+ tmp = cfg1;
+ }
+ }
+
// parse bios data for both pll's
ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll);
if (ret) {
@@ -1248,7 +1374,7 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
ram->fuc.r_0x10f65c = ramfuc_reg(0x10f65c);
ram->fuc.r_0x10f6bc = ramfuc_reg(0x10f6bc);
ram->fuc.r_0x100710 = ramfuc_reg(0x100710);
- ram->fuc.r_0x10f750 = ramfuc_reg(0x10f750);
+ ram->fuc.r_0x100750 = ramfuc_reg(0x100750);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 041fd5edaebf..c33c03d2f4af 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -197,7 +197,7 @@ static int
nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
struct nouveau_i2c_board_info *info,
bool (*match)(struct nouveau_i2c_port *,
- struct i2c_board_info *))
+ struct i2c_board_info *, void *), void *data)
{
struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index);
int i;
@@ -221,7 +221,7 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
}
if (nv_probe_i2c(port, info[i].dev.addr) &&
- (!match || match(port, &info[i].dev))) {
+ (!match || match(port, &info[i].dev, data))) {
nv_info(i2c, "detected %s: %s\n", what,
info[i].dev.type);
return i;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
index 6565f3dbbe04..14706d9842ca 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
@@ -22,7 +22,24 @@
* Authors: Ben Skeggs
*/
-#include <subdev/instmem.h>
+#include "priv.h"
+
+/******************************************************************************
+ * instmem object base implementation
+ *****************************************************************************/
+
+void
+_nouveau_instobj_dtor(struct nouveau_object *object)
+{
+ struct nouveau_instmem *imem = (void *)object->engine;
+ struct nouveau_instobj *iobj = (void *)object;
+
+ mutex_lock(&nv_subdev(imem)->mutex);
+ list_del(&iobj->head);
+ mutex_unlock(&nv_subdev(imem)->mutex);
+
+ return nouveau_object_destroy(&iobj->base);
+}
int
nouveau_instobj_create_(struct nouveau_object *parent,
@@ -46,73 +63,26 @@ nouveau_instobj_create_(struct nouveau_object *parent,
return 0;
}
-void
-nouveau_instobj_destroy(struct nouveau_instobj *iobj)
-{
- struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine);
+/******************************************************************************
+ * instmem subdev base implementation
+ *****************************************************************************/
- mutex_lock(&subdev->mutex);
- list_del(&iobj->head);
- mutex_unlock(&subdev->mutex);
-
- return nouveau_object_destroy(&iobj->base);
-}
-
-void
-_nouveau_instobj_dtor(struct nouveau_object *object)
+static int
+nouveau_instmem_alloc(struct nouveau_instmem *imem,
+ struct nouveau_object *parent, u32 size, u32 align,
+ struct nouveau_object **pobject)
{
- struct nouveau_instobj *iobj = (void *)object;
- return nouveau_instobj_destroy(iobj);
+ struct nouveau_object *engine = nv_object(imem);
+ struct nouveau_instmem_impl *impl = (void *)engine->oclass;
+ struct nouveau_instobj_args args = { .size = size, .align = align };
+ return nouveau_object_ctor(parent, engine, impl->instobj, &args,
+ sizeof(args), pobject);
}
int
-nouveau_instmem_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
- int length, void **pobject)
-{
- struct nouveau_instmem *imem;
- int ret;
-
- ret = nouveau_subdev_create_(parent, engine, oclass, 0,
- "INSTMEM", "instmem", length, pobject);
- imem = *pobject;
- if (ret)
- return ret;
-
- INIT_LIST_HEAD(&imem->list);
- return 0;
-}
-
-int
-nouveau_instmem_init(struct nouveau_instmem *imem)
-{
- struct nouveau_instobj *iobj;
- int ret, i;
-
- ret = nouveau_subdev_init(&imem->base);
- if (ret)
- return ret;
-
- mutex_lock(&imem->base.mutex);
-
- list_for_each_entry(iobj, &imem->list, head) {
- if (iobj->suspend) {
- for (i = 0; i < iobj->size; i += 4)
- nv_wo32(iobj, i, iobj->suspend[i / 4]);
- vfree(iobj->suspend);
- iobj->suspend = NULL;
- }
- }
-
- mutex_unlock(&imem->base.mutex);
-
- return 0;
-}
-
-int
-nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
+_nouveau_instmem_fini(struct nouveau_object *object, bool suspend)
{
+ struct nouveau_instmem *imem = (void *)object;
struct nouveau_instobj *iobj;
int i, ret = 0;
@@ -143,12 +113,45 @@ int
_nouveau_instmem_init(struct nouveau_object *object)
{
struct nouveau_instmem *imem = (void *)object;
- return nouveau_instmem_init(imem);
+ struct nouveau_instobj *iobj;
+ int ret, i;
+
+ ret = nouveau_subdev_init(&imem->base);
+ if (ret)
+ return ret;
+
+ mutex_lock(&imem->base.mutex);
+
+ list_for_each_entry(iobj, &imem->list, head) {
+ if (iobj->suspend) {
+ for (i = 0; i < iobj->size; i += 4)
+ nv_wo32(iobj, i, iobj->suspend[i / 4]);
+ vfree(iobj->suspend);
+ iobj->suspend = NULL;
+ }
+ }
+
+ mutex_unlock(&imem->base.mutex);
+
+ return 0;
}
int
-_nouveau_instmem_fini(struct nouveau_object *object, bool suspend)
+nouveau_instmem_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ int length, void **pobject)
{
- struct nouveau_instmem *imem = (void *)object;
- return nouveau_instmem_fini(imem, suspend);
+ struct nouveau_instmem *imem;
+ int ret;
+
+ ret = nouveau_subdev_create_(parent, engine, oclass, 0,
+ "INSTMEM", "instmem", length, pobject);
+ imem = *pobject;
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&imem->list);
+ imem->alloc = nouveau_instmem_alloc;
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
index 795393d7b2f5..7b64befee48f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -22,10 +22,35 @@
* Authors: Ben Skeggs
*/
-#include <subdev/fb.h>
-
#include "nv04.h"
+/******************************************************************************
+ * instmem object implementation
+ *****************************************************************************/
+
+static u32
+nv04_instobj_rd32(struct nouveau_object *object, u64 addr)
+{
+ struct nv04_instobj_priv *node = (void *)object;
+ return nv_ro32(object->engine, node->mem->offset + addr);
+}
+
+static void
+nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ struct nv04_instobj_priv *node = (void *)object;
+ nv_wo32(object->engine, node->mem->offset + addr, data);
+}
+
+static void
+nv04_instobj_dtor(struct nouveau_object *object)
+{
+ struct nv04_instmem_priv *priv = (void *)object->engine;
+ struct nv04_instobj_priv *node = (void *)object;
+ nouveau_mm_free(&priv->heap, &node->mem);
+ nouveau_instobj_destroy(&node->base);
+}
+
static int
nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -33,18 +58,19 @@ nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
{
struct nv04_instmem_priv *priv = (void *)engine;
struct nv04_instobj_priv *node;
- int ret, align;
+ struct nouveau_instobj_args *args = data;
+ int ret;
- align = (unsigned long)data;
- if (!align)
- align = 1;
+ if (!args->align)
+ args->align = 1;
ret = nouveau_instobj_create(parent, engine, oclass, &node);
*pobject = nv_object(node);
if (ret)
return ret;
- ret = nouveau_mm_head(&priv->heap, 1, size, size, align, &node->mem);
+ ret = nouveau_mm_head(&priv->heap, 1, args->size, args->size,
+ args->align, &node->mem);
if (ret)
return ret;
@@ -53,32 +79,9 @@ nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-static void
-nv04_instobj_dtor(struct nouveau_object *object)
-{
- struct nv04_instmem_priv *priv = (void *)object->engine;
- struct nv04_instobj_priv *node = (void *)object;
- nouveau_mm_free(&priv->heap, &node->mem);
- nouveau_instobj_destroy(&node->base);
-}
-
-static u32
-nv04_instobj_rd32(struct nouveau_object *object, u64 addr)
-{
- struct nv04_instobj_priv *node = (void *)object;
- return nv_ro32(object->engine, node->mem->offset + addr);
-}
-
-static void
-nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
-{
- struct nv04_instobj_priv *node = (void *)object;
- nv_wo32(object->engine, node->mem->offset + addr, data);
-}
-
-static struct nouveau_oclass
+struct nouveau_instobj_impl
nv04_instobj_oclass = {
- .ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_instobj_ctor,
.dtor = nv04_instobj_dtor,
.init = _nouveau_instobj_init,
@@ -88,19 +91,34 @@ nv04_instobj_oclass = {
},
};
-int
-nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
- u32 size, u32 align, struct nouveau_object **pobject)
+/******************************************************************************
+ * instmem subdev implementation
+ *****************************************************************************/
+
+static u32
+nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
{
- struct nouveau_object *engine = nv_object(imem);
- int ret;
+ return nv_rd32(object, 0x700000 + addr);
+}
- ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass,
- (void *)(unsigned long)align, size, pobject);
- if (ret)
- return ret;
+static void
+nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ return nv_wr32(object, 0x700000 + addr, data);
+}
- return 0;
+void
+nv04_instmem_dtor(struct nouveau_object *object)
+{
+ struct nv04_instmem_priv *priv = (void *)object;
+ nouveau_gpuobj_ref(NULL, &priv->ramfc);
+ nouveau_gpuobj_ref(NULL, &priv->ramro);
+ nouveau_ramht_ref(NULL, &priv->ramht);
+ nouveau_gpuobj_ref(NULL, &priv->vbios);
+ nouveau_mm_fini(&priv->heap);
+ if (priv->iomem)
+ iounmap(priv->iomem);
+ nouveau_instmem_destroy(&priv->base);
}
static int
@@ -118,7 +136,6 @@ nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
/* PRAMIN aperture maps over the end of VRAM, reserve it */
priv->base.reserved = 512 * 1024;
- priv->base.alloc = nv04_instmem_alloc;
ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
if (ret)
@@ -150,36 +167,10 @@ nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-void
-nv04_instmem_dtor(struct nouveau_object *object)
-{
- struct nv04_instmem_priv *priv = (void *)object;
- nouveau_gpuobj_ref(NULL, &priv->ramfc);
- nouveau_gpuobj_ref(NULL, &priv->ramro);
- nouveau_ramht_ref(NULL, &priv->ramht);
- nouveau_gpuobj_ref(NULL, &priv->vbios);
- nouveau_mm_fini(&priv->heap);
- if (priv->iomem)
- iounmap(priv->iomem);
- nouveau_instmem_destroy(&priv->base);
-}
-
-static u32
-nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
-{
- return nv_rd32(object, 0x700000 + addr);
-}
-
-static void
-nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
-{
- return nv_wr32(object, 0x700000 + addr, data);
-}
-
-struct nouveau_oclass
-nv04_instmem_oclass = {
- .handle = NV_SUBDEV(INSTMEM, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv04_instmem_oclass = &(struct nouveau_instmem_impl) {
+ .base.handle = NV_SUBDEV(INSTMEM, 0x04),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_instmem_ctor,
.dtor = nv04_instmem_dtor,
.init = _nouveau_instmem_init,
@@ -187,4 +178,5 @@ nv04_instmem_oclass = {
.rd32 = nv04_instmem_rd32,
.wr32 = nv04_instmem_wr32,
},
-};
+ .instobj = &nv04_instobj_oclass.base,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
index b15b61310236..095fbc6fc099 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
@@ -5,7 +5,9 @@
#include <core/ramht.h>
#include <core/mm.h>
-#include <subdev/instmem.h>
+#include "priv.h"
+
+extern struct nouveau_instobj_impl nv04_instobj_oclass;
struct nv04_instmem_priv {
struct nouveau_instmem base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index b10a143787a7..ec0b9661d614 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -26,6 +26,24 @@
#include "nv04.h"
+/******************************************************************************
+ * instmem subdev implementation
+ *****************************************************************************/
+
+static u32
+nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
+{
+ struct nv04_instmem_priv *priv = (void *)object;
+ return ioread32_native(priv->iomem + addr);
+}
+
+static void
+nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ struct nv04_instmem_priv *priv = (void *)object;
+ iowrite32_native(data, priv->iomem + addr);
+}
+
static int
nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -69,7 +87,6 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.reserved += 512 * 1024; /* object storage */
priv->base.reserved = round_up(priv->base.reserved, 4096);
- priv->base.alloc = nv04_instmem_alloc;
ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
if (ret)
@@ -106,24 +123,10 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-static u32
-nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
-{
- struct nv04_instmem_priv *priv = (void *)object;
- return ioread32_native(priv->iomem + addr);
-}
-
-static void
-nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
-{
- struct nv04_instmem_priv *priv = (void *)object;
- iowrite32_native(data, priv->iomem + addr);
-}
-
-struct nouveau_oclass
-nv40_instmem_oclass = {
- .handle = NV_SUBDEV(INSTMEM, 0x40),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv40_instmem_oclass = &(struct nouveau_instmem_impl) {
+ .base.handle = NV_SUBDEV(INSTMEM, 0x40),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv40_instmem_ctor,
.dtor = nv04_instmem_dtor,
.init = _nouveau_instmem_init,
@@ -131,4 +134,5 @@ nv40_instmem_oclass = {
.rd32 = nv40_instmem_rd32,
.wr32 = nv40_instmem_wr32,
},
-};
+ .instobj = &nv04_instobj_oclass.base,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
index 97bc5dff93e7..7cb3b098a08d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
@@ -22,11 +22,11 @@
* Authors: Ben Skeggs
*/
-#include <subdev/instmem.h>
#include <subdev/fb.h>
-
#include <core/mm.h>
+#include "priv.h"
+
struct nv50_instmem_priv {
struct nouveau_instmem base;
spinlock_t lock;
@@ -38,42 +38,9 @@ struct nv50_instobj_priv {
struct nouveau_mem *mem;
};
-static int
-nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nv50_instobj_priv *node;
- u32 align = (unsigned long)data;
- int ret;
-
- size = max((size + 4095) & ~4095, (u32)4096);
- align = max((align + 4095) & ~4095, (u32)4096);
-
- ret = nouveau_instobj_create(parent, engine, oclass, &node);
- *pobject = nv_object(node);
- if (ret)
- return ret;
-
- ret = pfb->ram->get(pfb, size, align, 0, 0x800, &node->mem);
- if (ret)
- return ret;
-
- node->base.addr = node->mem->offset;
- node->base.size = node->mem->size << 12;
- node->mem->page_shift = 12;
- return 0;
-}
-
-static void
-nv50_instobj_dtor(struct nouveau_object *object)
-{
- struct nv50_instobj_priv *node = (void *)object;
- struct nouveau_fb *pfb = nouveau_fb(object);
- pfb->ram->put(pfb, &node->mem);
- nouveau_instobj_destroy(&node->base);
-}
+/******************************************************************************
+ * instmem object implementation
+ *****************************************************************************/
static u32
nv50_instobj_rd32(struct nouveau_object *object, u64 offset)
@@ -113,9 +80,46 @@ nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data)
spin_unlock_irqrestore(&priv->lock, flags);
}
-static struct nouveau_oclass
+static void
+nv50_instobj_dtor(struct nouveau_object *object)
+{
+ struct nv50_instobj_priv *node = (void *)object;
+ struct nouveau_fb *pfb = nouveau_fb(object);
+ pfb->ram->put(pfb, &node->mem);
+ nouveau_instobj_destroy(&node->base);
+}
+
+static int
+nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_fb *pfb = nouveau_fb(parent);
+ struct nouveau_instobj_args *args = data;
+ struct nv50_instobj_priv *node;
+ int ret;
+
+ args->size = max((args->size + 4095) & ~4095, (u32)4096);
+ args->align = max((args->align + 4095) & ~4095, (u32)4096);
+
+ ret = nouveau_instobj_create(parent, engine, oclass, &node);
+ *pobject = nv_object(node);
+ if (ret)
+ return ret;
+
+ ret = pfb->ram->get(pfb, args->size, args->align, 0, 0x800, &node->mem);
+ if (ret)
+ return ret;
+
+ node->base.addr = node->mem->offset;
+ node->base.size = node->mem->size << 12;
+ node->mem->page_shift = 12;
+ return 0;
+}
+
+static struct nouveau_instobj_impl
nv50_instobj_oclass = {
- .ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_instobj_ctor,
.dtor = nv50_instobj_dtor,
.init = _nouveau_instobj_init,
@@ -125,13 +129,16 @@ nv50_instobj_oclass = {
},
};
+/******************************************************************************
+ * instmem subdev implementation
+ *****************************************************************************/
+
static int
-nv50_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
- u32 size, u32 align, struct nouveau_object **pobject)
+nv50_instmem_fini(struct nouveau_object *object, bool suspend)
{
- struct nouveau_object *engine = nv_object(imem);
- return nouveau_object_ctor(parent, engine, &nv50_instobj_oclass,
- (void *)(unsigned long)align, size, pobject);
+ struct nv50_instmem_priv *priv = (void *)object;
+ priv->addr = ~0ULL;
+ return nouveau_instmem_fini(&priv->base, suspend);
}
static int
@@ -148,25 +155,17 @@ nv50_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
spin_lock_init(&priv->lock);
- priv->base.alloc = nv50_instmem_alloc;
return 0;
}
-static int
-nv50_instmem_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv50_instmem_priv *priv = (void *)object;
- priv->addr = ~0ULL;
- return nouveau_instmem_fini(&priv->base, suspend);
-}
-
-struct nouveau_oclass
-nv50_instmem_oclass = {
- .handle = NV_SUBDEV(INSTMEM, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv50_instmem_oclass = &(struct nouveau_instmem_impl) {
+ .base.handle = NV_SUBDEV(INSTMEM, 0x50),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_instmem_ctor,
.dtor = _nouveau_instmem_dtor,
.init = _nouveau_instmem_init,
.fini = nv50_instmem_fini,
},
-};
+ .instobj = &nv50_instobj_oclass.base,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h
new file mode 100644
index 000000000000..8d67dedc5bb2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h
@@ -0,0 +1,56 @@
+#ifndef __NVKM_INSTMEM_PRIV_H__
+#define __NVKM_INSTMEM_PRIV_H__
+
+#include <subdev/instmem.h>
+
+struct nouveau_instobj_impl {
+ struct nouveau_oclass base;
+};
+
+struct nouveau_instobj_args {
+ u32 size;
+ u32 align;
+};
+
+#define nouveau_instobj_create(p,e,o,d) \
+ nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_instobj_destroy(p) ({ \
+ struct nouveau_instobj *iobj = (p); \
+ _nouveau_instobj_dtor(nv_object(iobj)); \
+})
+#define nouveau_instobj_init(p) \
+ nouveau_object_init(&(p)->base)
+#define nouveau_instobj_fini(p,s) \
+ nouveau_object_fini(&(p)->base, (s))
+
+int nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+void _nouveau_instobj_dtor(struct nouveau_object *);
+#define _nouveau_instobj_init nouveau_object_init
+#define _nouveau_instobj_fini nouveau_object_fini
+
+struct nouveau_instmem_impl {
+ struct nouveau_oclass base;
+ struct nouveau_oclass *instobj;
+};
+
+#define nouveau_instmem_create(p,e,o,d) \
+ nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_instmem_destroy(p) \
+ nouveau_subdev_destroy(&(p)->base)
+#define nouveau_instmem_init(p) ({ \
+ struct nouveau_instmem *imem = (p); \
+ _nouveau_instmem_init(nv_object(imem)); \
+})
+#define nouveau_instmem_fini(p,s) ({ \
+ struct nouveau_instmem *imem = (p); \
+ _nouveau_instmem_fini(nv_object(imem), (s)); \
+})
+
+int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+#define _nouveau_instmem_dtor _nouveau_subdev_dtor
+int _nouveau_instmem_init(struct nouveau_object *);
+int _nouveau_instmem_fini(struct nouveau_object *, bool);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index c02b4763a2d5..34472d317097 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -32,6 +32,7 @@ nvc0_mc_intr[] = {
{ 0x00000080, NVDEV_ENGINE_COPY2 },
{ 0x00000100, NVDEV_ENGINE_FIFO },
{ 0x00001000, NVDEV_ENGINE_GR },
+ { 0x00002000, NVDEV_SUBDEV_FB },
{ 0x00008000, NVDEV_ENGINE_BSP },
{ 0x00040000, NVDEV_SUBDEV_THERM },
{ 0x00020000, NVDEV_ENGINE_VP },
@@ -40,6 +41,7 @@ nvc0_mc_intr[] = {
{ 0x01000000, NVDEV_SUBDEV_PWR },
{ 0x02000000, NVDEV_SUBDEV_LTCG },
{ 0x04000000, NVDEV_ENGINE_DISP },
+ { 0x08000000, NVDEV_SUBDEV_FB },
{ 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x40000000, NVDEV_SUBDEV_IBUS },
{ 0x80000000, NVDEV_ENGINE_SW },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
index 129120473f6c..13c5af88a601 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -87,55 +87,39 @@ mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version)
0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
};
u32 mxms_args[] = { 0x00000000 };
- union acpi_object args[4] = {
- /* _DSM MUID */
- { .buffer.type = 3,
- .buffer.length = sizeof(muid),
- .buffer.pointer = muid,
- },
- /* spec says this can be zero to mean "highest revision", but
- * of course there's at least one bios out there which fails
- * unless you pass in exactly the version it supports..
- */
- { .integer.type = ACPI_TYPE_INTEGER,
- .integer.value = (version & 0xf0) << 4 | (version & 0x0f),
- },
- /* MXMS function */
- { .integer.type = ACPI_TYPE_INTEGER,
- .integer.value = 0x00000010,
- },
- /* Pointer to MXMS arguments */
- { .buffer.type = ACPI_TYPE_BUFFER,
- .buffer.length = sizeof(mxms_args),
- .buffer.pointer = (char *)mxms_args,
- },
+ union acpi_object argv4 = {
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = sizeof(mxms_args),
+ .buffer.pointer = (char *)mxms_args,
};
- struct acpi_object_list list = { ARRAY_SIZE(args), args };
- struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_handle handle;
- int ret;
+ int rev;
handle = ACPI_HANDLE(&device->pdev->dev);
if (!handle)
return false;
- ret = acpi_evaluate_object(handle, "_DSM", &list, &retn);
- if (ret) {
- nv_debug(mxm, "DSM MXMS failed: %d\n", ret);
+ /*
+ * spec says this can be zero to mean "highest revision", but
+ * of course there's at least one bios out there which fails
+ * unless you pass in exactly the version it supports..
+ */
+ rev = (version & 0xf0) << 4 | (version & 0x0f);
+ obj = acpi_evaluate_dsm(handle, muid, rev, 0x00000010, &argv4);
+ if (!obj) {
+ nv_debug(mxm, "DSM MXMS failed\n");
return false;
}
- obj = retn.pointer;
if (obj->type == ACPI_TYPE_BUFFER) {
mxm->mxms = kmemdup(obj->buffer.pointer,
obj->buffer.length, GFP_KERNEL);
- } else
- if (obj->type == ACPI_TYPE_INTEGER) {
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
nv_debug(mxm, "DSM MXMS returned 0x%llx\n", obj->integer.value);
}
- kfree(obj);
+ ACPI_FREE(obj);
return mxm->mxms != NULL;
}
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
index af129c2e8113..64f8b4702bf7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
@@ -100,7 +100,7 @@ mxm_match_dcb(struct nouveau_mxm *mxm, u8 *data, void *info)
static int
mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb)
{
- struct nouveau_mxm *mxm = nouveau_mxm(bios);
+ struct nouveau_mxm *mxm = data;
struct context ctx = { .outp = (u32 *)(bios->data + pdcb) };
u8 type, i2cidx, link, ver, len;
u8 *conn;
@@ -199,7 +199,7 @@ mxm_dcb_sanitise(struct nouveau_mxm *mxm)
return;
}
- dcb_outp_foreach(bios, NULL, mxm_dcb_sanitise_entry);
+ dcb_outp_foreach(bios, mxm, mxm_dcb_sanitise_entry);
mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc
new file mode 100644
index 000000000000..757dda700024
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc
@@ -0,0 +1,393 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define T_TIMEOUT 2200000
+#define T_RISEFALL 1000
+#define T_HOLD 5000
+
+#ifdef INCLUDE_PROC
+process(PROC_I2C_, #i2c_init, #i2c_recv)
+#endif
+
+/******************************************************************************
+ * I2C_ data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+i2c_scl_map:
+.b32 NV_PPWR_OUTPUT_I2C_0_SCL
+.b32 NV_PPWR_OUTPUT_I2C_1_SCL
+.b32 NV_PPWR_OUTPUT_I2C_2_SCL
+.b32 NV_PPWR_OUTPUT_I2C_3_SCL
+.b32 NV_PPWR_OUTPUT_I2C_4_SCL
+.b32 NV_PPWR_OUTPUT_I2C_5_SCL
+.b32 NV_PPWR_OUTPUT_I2C_6_SCL
+.b32 NV_PPWR_OUTPUT_I2C_7_SCL
+.b32 NV_PPWR_OUTPUT_I2C_8_SCL
+.b32 NV_PPWR_OUTPUT_I2C_9_SCL
+i2c_sda_map:
+.b32 NV_PPWR_OUTPUT_I2C_0_SDA
+.b32 NV_PPWR_OUTPUT_I2C_1_SDA
+.b32 NV_PPWR_OUTPUT_I2C_2_SDA
+.b32 NV_PPWR_OUTPUT_I2C_3_SDA
+.b32 NV_PPWR_OUTPUT_I2C_4_SDA
+.b32 NV_PPWR_OUTPUT_I2C_5_SDA
+.b32 NV_PPWR_OUTPUT_I2C_6_SDA
+.b32 NV_PPWR_OUTPUT_I2C_7_SDA
+.b32 NV_PPWR_OUTPUT_I2C_8_SDA
+.b32 NV_PPWR_OUTPUT_I2C_9_SDA
+#if NVKM_PPWR_CHIPSET < GF119
+i2c_ctrl:
+.b32 0x00e138
+.b32 0x00e150
+.b32 0x00e168
+.b32 0x00e180
+.b32 0x00e254
+.b32 0x00e274
+.b32 0x00e764
+.b32 0x00e780
+.b32 0x00e79c
+.b32 0x00e7b8
+#endif
+#endif
+
+/******************************************************************************
+ * I2C_ code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+
+// $r3 - value
+// $r2 - sda line
+// $r1 - scl line
+// $r0 - zero
+i2c_drive_scl:
+ cmp b32 $r3 0
+ bra e #i2c_drive_scl_lo
+ nv_iowr(NV_PPWR_OUTPUT_SET, $r1)
+ ret
+ i2c_drive_scl_lo:
+ nv_iowr(NV_PPWR_OUTPUT_CLR, $r1)
+ ret
+
+i2c_drive_sda:
+ cmp b32 $r3 0
+ bra e #i2c_drive_sda_lo
+ nv_iowr(NV_PPWR_OUTPUT_SET, $r2)
+ ret
+ i2c_drive_sda_lo:
+ nv_iowr(NV_PPWR_OUTPUT_CLR, $r2)
+ ret
+
+i2c_sense_scl:
+ bclr $flags $p1
+ nv_iord($r3, NV_PPWR_INPUT)
+ and $r3 $r1
+ bra z #i2c_sense_scl_done
+ bset $flags $p1
+ i2c_sense_scl_done:
+ ret
+
+i2c_sense_sda:
+ bclr $flags $p1
+ nv_iord($r3, NV_PPWR_INPUT)
+ and $r3 $r2
+ bra z #i2c_sense_sda_done
+ bset $flags $p1
+ i2c_sense_sda_done:
+ ret
+
+#define i2c_drive_scl(v) /*
+*/ mov $r3 (v) /*
+*/ call(i2c_drive_scl)
+#define i2c_drive_sda(v) /*
+*/ mov $r3 (v) /*
+*/ call(i2c_drive_sda)
+#define i2c_sense_scl() /*
+*/ call(i2c_sense_scl)
+#define i2c_sense_sda() /*
+*/ call(i2c_sense_sda)
+#define i2c_delay(v) /*
+*/ mov $r14 (v) /*
+*/ call(nsec)
+
+#define i2c_trace_init() /*
+*/ imm32($r6, 0x10000000) /*
+*/ sub b32 $r7 $r6 1 /*
+*/
+#define i2c_trace_down() /*
+*/ shr b32 $r6 4 /*
+*/ push $r5 /*
+*/ shl b32 $r5 $r6 4 /*
+*/ sub b32 $r5 $r6 /*
+*/ not b32 $r5 /*
+*/ and $r7 $r5 /*
+*/ pop $r5 /*
+*/
+#define i2c_trace_exit() /*
+*/ shl b32 $r6 4 /*
+*/
+#define i2c_trace_next() /*
+*/ add b32 $r7 $r6 /*
+*/
+#define i2c_trace_call(func) /*
+*/ i2c_trace_next() /*
+*/ i2c_trace_down() /*
+*/ call(func) /*
+*/ i2c_trace_exit() /*
+*/
+
+i2c_raise_scl:
+ push $r4
+ mov $r4 (T_TIMEOUT / T_RISEFALL)
+ i2c_drive_scl(1)
+ i2c_raise_scl_wait:
+ i2c_delay(T_RISEFALL)
+ i2c_sense_scl()
+ bra $p1 #i2c_raise_scl_done
+ sub b32 $r4 1
+ bra nz #i2c_raise_scl_wait
+ i2c_raise_scl_done:
+ pop $r4
+ ret
+
+i2c_start:
+ i2c_sense_scl()
+ bra not $p1 #i2c_start_rep
+ i2c_sense_sda()
+ bra not $p1 #i2c_start_rep
+ bra #i2c_start_send
+ i2c_start_rep:
+ i2c_drive_scl(0)
+ i2c_drive_sda(1)
+ i2c_trace_call(i2c_raise_scl)
+ bra not $p1 #i2c_start_out
+ i2c_start_send:
+ i2c_drive_sda(0)
+ i2c_delay(T_HOLD)
+ i2c_drive_scl(0)
+ i2c_delay(T_HOLD)
+ i2c_start_out:
+ ret
+
+i2c_stop:
+ i2c_drive_scl(0)
+ i2c_drive_sda(0)
+ i2c_delay(T_RISEFALL)
+ i2c_drive_scl(1)
+ i2c_delay(T_HOLD)
+ i2c_drive_sda(1)
+ i2c_delay(T_HOLD)
+ ret
+
+// $r3 - value
+// $r2 - sda line
+// $r1 - scl line
+// $r0 - zero
+i2c_bitw:
+ call(i2c_drive_sda)
+ i2c_delay(T_RISEFALL)
+ i2c_trace_call(i2c_raise_scl)
+ bra not $p1 #i2c_bitw_out
+ i2c_delay(T_HOLD)
+ i2c_drive_scl(0)
+ i2c_delay(T_HOLD)
+ i2c_bitw_out:
+ ret
+
+// $r3 - value (out)
+// $r2 - sda line
+// $r1 - scl line
+// $r0 - zero
+i2c_bitr:
+ i2c_drive_sda(1)
+ i2c_delay(T_RISEFALL)
+ i2c_trace_call(i2c_raise_scl)
+ bra not $p1 #i2c_bitr_done
+ i2c_sense_sda()
+ i2c_drive_scl(0)
+ i2c_delay(T_HOLD)
+ xbit $r3 $flags $p1
+ bset $flags $p1
+ i2c_bitr_done:
+ ret
+
+i2c_get_byte:
+ mov $r5 0
+ mov $r4 8
+ i2c_get_byte_next:
+ shl b32 $r5 1
+ i2c_trace_call(i2c_bitr)
+ bra not $p1 #i2c_get_byte_done
+ or $r5 $r3
+ sub b32 $r4 1
+ bra nz #i2c_get_byte_next
+ mov $r3 1
+ i2c_trace_call(i2c_bitw)
+ i2c_get_byte_done:
+ ret
+
+i2c_put_byte:
+ mov $r4 8
+ i2c_put_byte_next:
+ sub b32 $r4 1
+ xbit $r3 $r5 $r4
+ i2c_trace_call(i2c_bitw)
+ bra not $p1 #i2c_put_byte_done
+ cmp b32 $r4 0
+ bra ne #i2c_put_byte_next
+ i2c_trace_call(i2c_bitr)
+ bra not $p1 #i2c_put_byte_done
+ i2c_trace_next()
+ cmp b32 $r3 1
+ bra ne #i2c_put_byte_done
+ bclr $flags $p1 // nack
+ i2c_put_byte_done:
+ ret
+
+i2c_addr:
+ i2c_trace_call(i2c_start)
+ bra not $p1 #i2c_addr_done
+ extr $r3 $r12 I2C__MSG_DATA0_ADDR
+ shl b32 $r3 1
+ or $r5 $r3
+ i2c_trace_call(i2c_put_byte)
+ i2c_addr_done:
+ ret
+
+i2c_acquire_addr:
+ extr $r14 $r12 I2C__MSG_DATA0_PORT
+#if NVKM_PPWR_CHIPSET < GF119
+ shl b32 $r14 2
+ add b32 $r14 #i2c_ctrl
+ ld b32 $r14 D[$r14]
+#else
+ shl b32 $r14 5
+ add b32 $r14 0x00d014
+#endif
+ ret
+
+i2c_acquire:
+ call(i2c_acquire_addr)
+ call(rd32)
+ bset $r13 3
+ call(wr32)
+ ret
+
+i2c_release:
+ call(i2c_acquire_addr)
+ call(rd32)
+ bclr $r13 3
+ call(wr32)
+ ret
+
+// description
+//
+// $r15 - current (i2c)
+// $r14 - sender process name
+// $r13 - message
+// $r12 - data0
+// $r11 - data1
+// $r0 - zero
+i2c_recv:
+ bclr $flags $p1
+ extr $r1 $r12 I2C__MSG_DATA0_PORT
+ shl b32 $r1 2
+ cmp b32 $r1 (#i2c_sda_map - #i2c_scl_map)
+ bra ge #i2c_recv_done
+ add b32 $r3 $r1 #i2c_sda_map
+ ld b32 $r2 D[$r3]
+ add b32 $r3 $r1 #i2c_scl_map
+ ld b32 $r1 D[$r3]
+
+ bset $flags $p2
+ push $r13
+ push $r14
+
+ push $r13
+ i2c_trace_init()
+ i2c_trace_call(i2c_acquire)
+ pop $r13
+
+ cmp b32 $r13 I2C__MSG_RD08
+ bra ne #i2c_recv_not_rd08
+ mov $r5 0
+ i2c_trace_call(i2c_addr)
+ bra not $p1 #i2c_recv_done
+ extr $r5 $r12 I2C__MSG_DATA0_RD08_REG
+ i2c_trace_call(i2c_put_byte)
+ bra not $p1 #i2c_recv_done
+ mov $r5 1
+ i2c_trace_call(i2c_addr)
+ bra not $p1 #i2c_recv_done
+ i2c_trace_call(i2c_get_byte)
+ bra not $p1 #i2c_recv_done
+ ins $r11 $r5 I2C__MSG_DATA1_RD08_VAL
+ i2c_trace_call(i2c_stop)
+ mov b32 $r11 $r5
+ clear b32 $r7
+ bra #i2c_recv_done
+
+ i2c_recv_not_rd08:
+ cmp b32 $r13 I2C__MSG_WR08
+ bra ne #i2c_recv_not_wr08
+ mov $r5 0
+ call(i2c_addr)
+ bra not $p1 #i2c_recv_done
+ extr $r5 $r12 I2C__MSG_DATA0_WR08_REG
+ call(i2c_put_byte)
+ bra not $p1 #i2c_recv_done
+ mov $r5 0
+ call(i2c_addr)
+ bra not $p1 #i2c_recv_done
+ extr $r5 $r11 I2C__MSG_DATA1_WR08_VAL
+ call(i2c_put_byte)
+ bra not $p1 #i2c_recv_done
+ call(i2c_stop)
+ clear b32 $r7
+ extr $r5 $r12 I2C__MSG_DATA0_WR08_SYNC
+ bra nz #i2c_recv_done
+ bclr $flags $p2
+ bra #i2c_recv_done
+
+ i2c_recv_not_wr08:
+
+ i2c_recv_done:
+ extr $r14 $r12 I2C__MSG_DATA0_PORT
+ call(i2c_release)
+
+ pop $r14
+ pop $r13
+ bra not $p2 #i2c_recv_exit
+ mov b32 $r12 $r7
+ call(send)
+
+ i2c_recv_exit:
+ ret
+
+// description
+//
+// $r15 - current (i2c)
+// $r0 - zero
+i2c_init:
+ ret
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
index 0a7b05fa5c11..8f29badd785f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
@@ -51,12 +51,12 @@ time_next: .b32 0
// $r0 - zero
rd32:
nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
- mov $r14 NV_PPWR_MMIO_CTRL_OP_RD
- sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER
- nv_iowr(NV_PPWR_MMIO_CTRL, $r14)
+ mov $r13 NV_PPWR_MMIO_CTRL_OP_RD
+ sethi $r13 NV_PPWR_MMIO_CTRL_TRIGGER
+ nv_iowr(NV_PPWR_MMIO_CTRL, $r13)
rd32_wait:
- nv_iord($r14, NV_PPWR_MMIO_CTRL)
- and $r14 NV_PPWR_MMIO_CTRL_STATUS
+ nv_iord($r13, NV_PPWR_MMIO_CTRL)
+ and $r13 NV_PPWR_MMIO_CTRL_STATUS
bra nz #rd32_wait
nv_iord($r13, NV_PPWR_MMIO_DATA)
ret
@@ -70,23 +70,25 @@ rd32:
wr32:
nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
nv_iowr(NV_PPWR_MMIO_DATA, $r13)
- mov $r14 NV_PPWR_MMIO_CTRL_OP_WR
- or $r14 NV_PPWR_MMIO_CTRL_MASK_B32_0
- sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER
+ mov $r13 NV_PPWR_MMIO_CTRL_OP_WR
+ or $r13 NV_PPWR_MMIO_CTRL_MASK_B32_0
+ sethi $r13 NV_PPWR_MMIO_CTRL_TRIGGER
#ifdef NVKM_FALCON_MMIO_TRAP
- mov $r8 NV_PPWR_INTR_TRIGGER_USER1
- nv_iowr(NV_PPWR_INTR_TRIGGER, $r8)
+ push $r13
+ mov $r13 NV_PPWR_INTR_TRIGGER_USER1
+ nv_iowr(NV_PPWR_INTR_TRIGGER, $r13)
wr32_host:
- nv_iord($r8, NV_PPWR_INTR)
- and $r8 NV_PPWR_INTR_USER1
+ nv_iord($r13, NV_PPWR_INTR)
+ and $r13 NV_PPWR_INTR_USER1
bra nz #wr32_host
+ pop $r13
#endif
- nv_iowr(NV_PPWR_MMIO_CTRL, $r14)
+ nv_iowr(NV_PPWR_MMIO_CTRL, $r13)
wr32_wait:
- nv_iord($r14, NV_PPWR_MMIO_CTRL)
- and $r14 NV_PPWR_MMIO_CTRL_STATUS
+ nv_iord($r13, NV_PPWR_MMIO_CTRL)
+ and $r13 NV_PPWR_MMIO_CTRL_STATUS
bra nz #wr32_wait
ret
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
index 2a74ea907604..e2a63ac5422b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
@@ -83,6 +83,50 @@
#define NV_PPWR_MMIO_CTRL_OP_WR 0x00000002
#define NV_PPWR_OUTPUT 0x07c0
#define NV_PPWR_OUTPUT_FB_PAUSE 0x00000004
+#if NVKM_PPWR_CHIPSET < GF119
+#define NV_PPWR_OUTPUT_I2C_3_SCL 0x00000100
+#define NV_PPWR_OUTPUT_I2C_3_SDA 0x00000200
+#define NV_PPWR_OUTPUT_I2C_0_SCL 0x00001000
+#define NV_PPWR_OUTPUT_I2C_0_SDA 0x00002000
+#define NV_PPWR_OUTPUT_I2C_1_SCL 0x00004000
+#define NV_PPWR_OUTPUT_I2C_1_SDA 0x00008000
+#define NV_PPWR_OUTPUT_I2C_2_SCL 0x00010000
+#define NV_PPWR_OUTPUT_I2C_2_SDA 0x00020000
+#define NV_PPWR_OUTPUT_I2C_4_SCL 0x00040000
+#define NV_PPWR_OUTPUT_I2C_4_SDA 0x00080000
+#define NV_PPWR_OUTPUT_I2C_5_SCL 0x00100000
+#define NV_PPWR_OUTPUT_I2C_5_SDA 0x00200000
+#define NV_PPWR_OUTPUT_I2C_6_SCL 0x00400000
+#define NV_PPWR_OUTPUT_I2C_6_SDA 0x00800000
+#define NV_PPWR_OUTPUT_I2C_7_SCL 0x01000000
+#define NV_PPWR_OUTPUT_I2C_7_SDA 0x02000000
+#define NV_PPWR_OUTPUT_I2C_8_SCL 0x04000000
+#define NV_PPWR_OUTPUT_I2C_8_SDA 0x08000000
+#define NV_PPWR_OUTPUT_I2C_9_SCL 0x10000000
+#define NV_PPWR_OUTPUT_I2C_9_SDA 0x20000000
+#else
+#define NV_PPWR_OUTPUT_I2C_0_SCL 0x00000400
+#define NV_PPWR_OUTPUT_I2C_1_SCL 0x00000800
+#define NV_PPWR_OUTPUT_I2C_2_SCL 0x00001000
+#define NV_PPWR_OUTPUT_I2C_3_SCL 0x00002000
+#define NV_PPWR_OUTPUT_I2C_4_SCL 0x00004000
+#define NV_PPWR_OUTPUT_I2C_5_SCL 0x00008000
+#define NV_PPWR_OUTPUT_I2C_6_SCL 0x00010000
+#define NV_PPWR_OUTPUT_I2C_7_SCL 0x00020000
+#define NV_PPWR_OUTPUT_I2C_8_SCL 0x00040000
+#define NV_PPWR_OUTPUT_I2C_9_SCL 0x00080000
+#define NV_PPWR_OUTPUT_I2C_0_SDA 0x00100000
+#define NV_PPWR_OUTPUT_I2C_1_SDA 0x00200000
+#define NV_PPWR_OUTPUT_I2C_2_SDA 0x00400000
+#define NV_PPWR_OUTPUT_I2C_3_SDA 0x00800000
+#define NV_PPWR_OUTPUT_I2C_4_SDA 0x01000000
+#define NV_PPWR_OUTPUT_I2C_5_SDA 0x02000000
+#define NV_PPWR_OUTPUT_I2C_6_SDA 0x04000000
+#define NV_PPWR_OUTPUT_I2C_7_SDA 0x08000000
+#define NV_PPWR_OUTPUT_I2C_8_SDA 0x10000000
+#define NV_PPWR_OUTPUT_I2C_9_SDA 0x20000000
+#endif
+#define NV_PPWR_INPUT 0x07c4
#define NV_PPWR_OUTPUT_SET 0x07e0
#define NV_PPWR_OUTPUT_SET_FB_PAUSE 0x00000004
#define NV_PPWR_OUTPUT_CLR 0x07e4
@@ -125,6 +169,15 @@
*/ .b32 0 /*
*/ .skip 64
+#if NV_PPWR_CHIPSET < GK208
+#define imm32(reg,val) /*
+*/ movw reg ((val) & 0x0000ffff) /*
+*/ sethi reg ((val) & 0xffff0000)
+#else
+#define imm32(reg,val) /*
+*/ mov reg (val)
+#endif
+
#ifndef NVKM_FALCON_UNSHIFTED_IO
#define nv_iord(reg,ior) /*
*/ mov reg ior /*
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
index 947be536daef..17a8a383d91a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
@@ -37,6 +37,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_PROC
@@ -46,6 +47,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_DATA
@@ -57,6 +59,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
index 9342e2d7d3b7..4bd43a99fdcc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
@@ -89,16 +89,9 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x54534554,
- 0x00000494,
- 0x00000475,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x5f433249,
+ 0x00000877,
+ 0x0000071e,
0x00000000,
0x00000000,
0x00000000,
@@ -111,16 +104,6 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x454c4449,
- 0x0000049f,
- 0x0000049d,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
0x00000000,
0x00000000,
0x00000000,
@@ -128,17 +111,16 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x54534554,
+ 0x00000898,
+ 0x00000879,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
-/* 0x0210: proc_list_tail */
-/* 0x0210: time_prev */
0x00000000,
-/* 0x0214: time_next */
0x00000000,
-/* 0x0218: fifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -151,6 +133,9 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x454c4449,
+ 0x000008a3,
+ 0x000008a1,
0x00000000,
0x00000000,
0x00000000,
@@ -170,9 +155,12 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0268: proc_list_tail */
+/* 0x0268: time_prev */
0x00000000,
-/* 0x0298: rfifo_queue */
+/* 0x026c: time_next */
0x00000000,
+/* 0x0270: fifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -204,31 +192,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0318: memx_func_head */
- 0x00010000,
- 0x00000000,
- 0x000003a9,
-/* 0x0324: memx_func_next */
- 0x00000001,
- 0x00000000,
- 0x000003c7,
- 0x00000002,
- 0x00000002,
- 0x000003df,
- 0x00040003,
- 0x00000000,
- 0x00000407,
- 0x00010004,
- 0x00000000,
- 0x00000421,
-/* 0x0354: memx_func_tail */
-/* 0x0354: memx_data_head */
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
0x00000000,
+/* 0x02f0: rfifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -261,10 +226,25 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0370: memx_func_head */
+ 0x00010000,
0x00000000,
+ 0x000003a9,
+/* 0x037c: memx_func_next */
+ 0x00000001,
0x00000000,
+ 0x000003c7,
+ 0x00000002,
+ 0x00000002,
+ 0x000003df,
+ 0x00040003,
0x00000000,
+ 0x00000407,
+ 0x00010004,
0x00000000,
+ 0x00000421,
+/* 0x03ac: memx_func_tail */
+/* 0x03ac: memx_data_head */
0x00000000,
0x00000000,
0x00000000,
@@ -735,7 +715,6 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0b54: memx_data_tail */
0x00000000,
0x00000000,
0x00000000,
@@ -778,6 +757,29 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0bac: memx_data_tail */
+/* 0x0bac: i2c_scl_map */
+ 0x00000400,
+ 0x00000800,
+ 0x00001000,
+ 0x00002000,
+ 0x00004000,
+ 0x00008000,
+ 0x00010000,
+ 0x00020000,
+ 0x00040000,
+ 0x00080000,
+/* 0x0bd4: i2c_sda_map */
+ 0x00100000,
+ 0x00200000,
+ 0x00400000,
+ 0x00800000,
+ 0x01000000,
+ 0x02000000,
+ 0x04000000,
+ 0x08000000,
+ 0x10000000,
+ 0x20000000,
0x00000000,
};
@@ -786,13 +788,13 @@ uint32_t nv108_pwr_code[] = {
/* 0x0004: rd32 */
0xf607a040,
0x04bd000e,
- 0xe3f0010e,
+ 0xd3f0010d,
0x07ac4001,
- 0xbd000ef6,
+ 0xbd000df6,
/* 0x0019: rd32_wait */
- 0x07ac4e04,
- 0xf100eecf,
- 0xf47000e4,
+ 0x07ac4d04,
+ 0xf100ddcf,
+ 0xf47000d4,
0xa44df61b,
0x00ddcf07,
/* 0x002e: wr32 */
@@ -800,14 +802,14 @@ uint32_t nv108_pwr_code[] = {
0x000ef607,
0xa44004bd,
0x000df607,
- 0x020e04bd,
- 0xf0f0e5f0,
- 0xac4001e3,
- 0x000ef607,
+ 0x020d04bd,
+ 0xf0f0d5f0,
+ 0xac4001d3,
+ 0x000df607,
/* 0x004e: wr32_wait */
- 0xac4e04bd,
- 0x00eecf07,
- 0x7000e4f1,
+ 0xac4d04bd,
+ 0x00ddcf07,
+ 0x7000d4f1,
0xf8f61bf4,
/* 0x005d: nsec */
0xcf2c0800,
@@ -832,20 +834,20 @@ uint32_t nv108_pwr_code[] = {
0x03e99800,
0xf40096b0,
0x0a98280b,
- 0x029abb84,
+ 0x029abb9a,
0x0d0e1cf4,
0x01de7e01,
0xf494bd00,
/* 0x00b2: intr_watchdog_next_time */
0x0a98140e,
- 0x00a6b085,
+ 0x00a6b09b,
0xa6080bf4,
0x061cf49a,
/* 0x00c0: intr_watchdog_next_time_set */
/* 0x00c3: intr_watchdog_next_proc */
- 0xb58509b5,
+ 0xb59b09b5,
0xe0b603e9,
- 0x10e6b158,
+ 0x68e6b158,
0xc81bf402,
/* 0x00d2: intr */
0x00f900f8,
@@ -862,15 +864,15 @@ uint32_t nv108_pwr_code[] = {
0x080804bd,
0xc40088cf,
0x0bf40289,
- 0x8500b51f,
+ 0x9b00b51f,
0x957e580e,
0x09980000,
- 0x0096b085,
+ 0x0096b09b,
0x000d0bf4,
0x0009f634,
0x09b504bd,
/* 0x0125: intr_skip_watchdog */
- 0x0089e484,
+ 0x0089e49a,
0x360bf408,
0xcf068849,
0x9ac40099,
@@ -918,7 +920,7 @@ uint32_t nv108_pwr_code[] = {
/* 0x01c6: timer_reset */
0x3400161e,
0xbd000ef6,
- 0x840eb504,
+ 0x9a0eb504,
/* 0x01d0: timer_enable */
0x38000108,
0xbd0008f6,
@@ -949,7 +951,7 @@ uint32_t nv108_pwr_code[] = {
0xa6008a98,
0x100bf4ae,
0xb15880b6,
- 0xf4021086,
+ 0xf4026886,
0x32f4f11b,
/* 0x0239: find_done */
0xfc8eb201,
@@ -1009,7 +1011,7 @@ uint32_t nv108_pwr_code[] = {
0x0bf412a6,
0x071ec42e,
0xb704ee94,
- 0x980218e0,
+ 0x980270e0,
0xec9803eb,
0x01ed9802,
0x7e00ee98,
@@ -1031,7 +1033,7 @@ uint32_t nv108_pwr_code[] = {
0xf412a608,
0x23c4ef0b,
0x0434b607,
- 0x029830b7,
+ 0x02f030b7,
0xb5033bb5,
0x3db5023c,
0x003eb501,
@@ -1044,11 +1046,11 @@ uint32_t nv108_pwr_code[] = {
/* 0x0379: host_init */
0x00804100,
0xf11014b6,
- 0x40021815,
+ 0x40027015,
0x01f604d0,
0x4104bd00,
0x14b60080,
- 0x9815f110,
+ 0xf015f110,
0x04dc4002,
0xbd0001f6,
0x40010104,
@@ -1101,13 +1103,13 @@ uint32_t nv108_pwr_code[] = {
0x001398b2,
0x950410b6,
0x30f01034,
- 0xc835980c,
+ 0xde35980c,
0x12a655f9,
0xfced1ef4,
0x7ee0fcd0,
0xf800023f,
/* 0x0455: memx_info */
- 0x03544c00,
+ 0x03ac4c00,
0x7e08004b,
0xf800023f,
/* 0x0461: memx_recv */
@@ -1119,7 +1121,301 @@ uint32_t nv108_pwr_code[] = {
/* 0x0471: perf_recv */
/* 0x0473: perf_init */
0xf800f800,
-/* 0x0475: test_recv */
+/* 0x0475: i2c_drive_scl */
+ 0x0036b000,
+ 0x400d0bf4,
+ 0x01f607e0,
+ 0xf804bd00,
+/* 0x0485: i2c_drive_scl_lo */
+ 0x07e44000,
+ 0xbd0001f6,
+/* 0x048f: i2c_drive_sda */
+ 0xb000f804,
+ 0x0bf40036,
+ 0x07e0400d,
+ 0xbd0002f6,
+/* 0x049f: i2c_drive_sda_lo */
+ 0x4000f804,
+ 0x02f607e4,
+ 0xf804bd00,
+/* 0x04a9: i2c_sense_scl */
+ 0x0132f400,
+ 0xcf07c443,
+ 0x31fd0033,
+ 0x060bf404,
+/* 0x04bb: i2c_sense_scl_done */
+ 0xf80131f4,
+/* 0x04bd: i2c_sense_sda */
+ 0x0132f400,
+ 0xcf07c443,
+ 0x32fd0033,
+ 0x060bf404,
+/* 0x04cf: i2c_sense_sda_done */
+ 0xf80131f4,
+/* 0x04d1: i2c_raise_scl */
+ 0x4440f900,
+ 0x01030898,
+ 0x0004757e,
+/* 0x04dc: i2c_raise_scl_wait */
+ 0x7e03e84e,
+ 0x7e00005d,
+ 0xf40004a9,
+ 0x42b60901,
+ 0xef1bf401,
+/* 0x04f0: i2c_raise_scl_done */
+ 0x00f840fc,
+/* 0x04f4: i2c_start */
+ 0x0004a97e,
+ 0x7e0d11f4,
+ 0xf40004bd,
+ 0x0ef40611,
+/* 0x0505: i2c_start_rep */
+ 0x7e00032e,
+ 0x03000475,
+ 0x048f7e01,
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0xd17e50fc,
+ 0x64b60004,
+ 0x1d11f404,
+/* 0x0530: i2c_start_send */
+ 0x8f7e0003,
+ 0x884e0004,
+ 0x005d7e13,
+ 0x7e000300,
+ 0x4e000475,
+ 0x5d7e1388,
+/* 0x054a: i2c_start_out */
+ 0x00f80000,
+/* 0x054c: i2c_stop */
+ 0x757e0003,
+ 0x00030004,
+ 0x00048f7e,
+ 0x7e03e84e,
+ 0x0300005d,
+ 0x04757e01,
+ 0x13884e00,
+ 0x00005d7e,
+ 0x8f7e0103,
+ 0x884e0004,
+ 0x005d7e13,
+/* 0x057b: i2c_bitw */
+ 0x7e00f800,
+ 0x4e00048f,
+ 0x5d7e03e8,
+ 0x76bb0000,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb60004d1,
+ 0x11f40464,
+ 0x13884e17,
+ 0x00005d7e,
+ 0x757e0003,
+ 0x884e0004,
+ 0x005d7e13,
+/* 0x05b9: i2c_bitw_out */
+/* 0x05bb: i2c_bitr */
+ 0x0300f800,
+ 0x048f7e01,
+ 0x03e84e00,
+ 0x00005d7e,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x04d17e50,
+ 0x0464b600,
+ 0x7e1a11f4,
+ 0x030004bd,
+ 0x04757e00,
+ 0x13884e00,
+ 0x00005d7e,
+ 0xf4013cf0,
+/* 0x05fe: i2c_bitr_done */
+ 0x00f80131,
+/* 0x0600: i2c_get_byte */
+ 0x08040005,
+/* 0x0604: i2c_get_byte_next */
+ 0xbb0154b6,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x0005bb7e,
+ 0xf40464b6,
+ 0x53fd2a11,
+ 0x0142b605,
+ 0x03d81bf4,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x7b7e50fc,
+ 0x64b60005,
+/* 0x064d: i2c_get_byte_done */
+/* 0x064f: i2c_put_byte */
+ 0x0400f804,
+/* 0x0651: i2c_put_byte_next */
+ 0x0142b608,
+ 0xbb3854ff,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x00057b7e,
+ 0xf40464b6,
+ 0x46b03411,
+ 0xd81bf400,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x05bb7e50,
+ 0x0464b600,
+ 0xbb0f11f4,
+ 0x36b00076,
+ 0x061bf401,
+/* 0x06a7: i2c_put_byte_done */
+ 0xf80132f4,
+/* 0x06a9: i2c_addr */
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0xf47e50fc,
+ 0x64b60004,
+ 0x2911f404,
+ 0x012ec3e7,
+ 0xfd0134b6,
+ 0x76bb0553,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb600064f,
+/* 0x06ee: i2c_addr_done */
+ 0x00f80464,
+/* 0x06f0: i2c_acquire_addr */
+ 0xb6f8cec7,
+ 0xe0b705e4,
+ 0x00f8d014,
+/* 0x06fc: i2c_acquire */
+ 0x0006f07e,
+ 0x0000047e,
+ 0x7e03d9f0,
+ 0xf800002e,
+/* 0x070d: i2c_release */
+ 0x06f07e00,
+ 0x00047e00,
+ 0x03daf000,
+ 0x00002e7e,
+/* 0x071e: i2c_recv */
+ 0x32f400f8,
+ 0xf8c1c701,
+ 0xb00214b6,
+ 0x1ff52816,
+ 0x13b80137,
+ 0x98000bd4,
+ 0x13b80032,
+ 0x98000bac,
+ 0x31f40031,
+ 0xf9d0f902,
+ 0xf1d0f9e0,
+ 0xf1000067,
+ 0x92100063,
+ 0x76bb0167,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb60006fc,
+ 0xd0fc0464,
+ 0xf500d6b0,
+ 0x0500b01b,
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0xa97e50fc,
+ 0x64b60006,
+ 0xcc11f504,
+ 0xe0c5c700,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x064f7e50,
+ 0x0464b600,
+ 0x00a911f5,
+ 0x76bb0105,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb60006a9,
+ 0x11f50464,
+ 0x76bb0087,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb6000600,
+ 0x11f40464,
+ 0xe05bcb67,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x054c7e50,
+ 0x0464b600,
+ 0x74bd5bb2,
+/* 0x0823: i2c_recv_not_rd08 */
+ 0xb0410ef4,
+ 0x1bf401d6,
+ 0x7e00053b,
+ 0xf40006a9,
+ 0xc5c73211,
+ 0x064f7ee0,
+ 0x2811f400,
+ 0xa97e0005,
+ 0x11f40006,
+ 0xe0b5c71f,
+ 0x00064f7e,
+ 0x7e1511f4,
+ 0xbd00054c,
+ 0x08c5c774,
+ 0xf4091bf4,
+ 0x0ef40232,
+/* 0x0861: i2c_recv_not_wr08 */
+/* 0x0861: i2c_recv_done */
+ 0xf8cec703,
+ 0x00070d7e,
+ 0xd0fce0fc,
+ 0xb20912f4,
+ 0x023f7e7c,
+/* 0x0875: i2c_recv_exit */
+/* 0x0877: i2c_init */
+ 0xf800f800,
+/* 0x0879: test_recv */
0x04584100,
0xb60011cf,
0x58400110,
@@ -1128,26 +1424,26 @@ uint32_t nv108_pwr_code[] = {
0xe3f1d900,
0x967e134f,
0x00f80001,
-/* 0x0494: test_init */
+/* 0x0898: test_init */
0x7e08004e,
0xf8000196,
-/* 0x049d: idle_recv */
-/* 0x049f: idle */
+/* 0x08a1: idle_recv */
+/* 0x08a3: idle */
0xf400f800,
0x54410031,
0x0011cf04,
0x400110b6,
0x01f60454,
-/* 0x04b3: idle_loop */
+/* 0x08b7: idle_loop */
0x0104bd00,
0x0232f458,
-/* 0x04b8: idle_proc */
-/* 0x04b8: idle_proc_exec */
+/* 0x08bc: idle_proc */
+/* 0x08bc: idle_proc_exec */
0x1eb210f9,
0x0002487e,
0x11f410fc,
0x0231f409,
-/* 0x04cb: idle_proc_next */
+/* 0x08cf: idle_proc_next */
0xb6f00ef4,
0x1fa65810,
0xf4e81bf4,
@@ -1161,5 +1457,4 @@ uint32_t nv108_pwr_code[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
index 6fde0b89e5aa..6744fcc06151 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
@@ -37,6 +37,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_PROC
@@ -46,6 +47,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_DATA
@@ -57,6 +59,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
index 0fa4d7dcd407..5a73fa620978 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
@@ -89,9 +89,31 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x5f433249,
+ 0x00000982,
+ 0x00000825,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x54534554,
- 0x0000057b,
- 0x00000554,
+ 0x000009ab,
+ 0x00000984,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +134,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000587,
- 0x00000585,
+ 0x000009b7,
+ 0x000009b5,
0x00000000,
0x00000000,
0x00000000,
@@ -133,12 +155,12 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0210: proc_list_tail */
-/* 0x0210: time_prev */
+/* 0x0268: proc_list_tail */
+/* 0x0268: time_prev */
0x00000000,
-/* 0x0214: time_next */
+/* 0x026c: time_next */
0x00000000,
-/* 0x0218: fifo_queue */
+/* 0x0270: fifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -171,7 +193,7 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0298: rfifo_queue */
+/* 0x02f0: rfifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -204,11 +226,11 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0318: memx_func_head */
+/* 0x0370: memx_func_head */
0x00010000,
0x00000000,
0x0000046f,
-/* 0x0324: memx_func_next */
+/* 0x037c: memx_func_next */
0x00000001,
0x00000000,
0x00000496,
@@ -221,8 +243,18 @@ uint32_t nva3_pwr_data[] = {
0x00010004,
0x00000000,
0x000004fc,
-/* 0x0354: memx_func_tail */
-/* 0x0354: memx_data_head */
+/* 0x03ac: memx_func_tail */
+/* 0x03ac: memx_data_head */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
@@ -725,6 +757,42 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0bac: memx_data_tail */
+/* 0x0bac: i2c_scl_map */
+ 0x00001000,
+ 0x00004000,
+ 0x00010000,
+ 0x00000100,
+ 0x00040000,
+ 0x00100000,
+ 0x00400000,
+ 0x01000000,
+ 0x04000000,
+ 0x10000000,
+/* 0x0bd4: i2c_sda_map */
+ 0x00002000,
+ 0x00008000,
+ 0x00020000,
+ 0x00000200,
+ 0x00080000,
+ 0x00200000,
+ 0x00800000,
+ 0x02000000,
+ 0x08000000,
+ 0x20000000,
+/* 0x0bfc: i2c_ctrl */
+ 0x0000e138,
+ 0x0000e150,
+ 0x0000e168,
+ 0x0000e180,
+ 0x0000e254,
+ 0x0000e274,
+ 0x0000e764,
+ 0x0000e780,
+ 0x0000e79c,
+ 0x0000e7b8,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
@@ -735,7 +803,6 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0b54: memx_data_tail */
0x00000000,
0x00000000,
0x00000000,
@@ -787,15 +854,15 @@ uint32_t nva3_pwr_code[] = {
0x07a007f1,
0xd00604b6,
0x04bd000e,
- 0xf001e7f0,
- 0x07f101e3,
+ 0xf001d7f0,
+ 0x07f101d3,
0x04b607ac,
- 0x000ed006,
+ 0x000dd006,
/* 0x0022: rd32_wait */
- 0xe7f104bd,
- 0xe4b607ac,
- 0x00eecf06,
- 0x7000e4f1,
+ 0xd7f104bd,
+ 0xd4b607ac,
+ 0x00ddcf06,
+ 0x7000d4f1,
0xf1f21bf4,
0xb607a4d7,
0xddcf06d4,
@@ -807,15 +874,15 @@ uint32_t nva3_pwr_code[] = {
0xb607a407,
0x0dd00604,
0xf004bd00,
- 0xe5f002e7,
- 0x01e3f0f0,
+ 0xd5f002d7,
+ 0x01d3f0f0,
0x07ac07f1,
0xd00604b6,
- 0x04bd000e,
+ 0x04bd000d,
/* 0x006c: wr32_wait */
- 0x07ace7f1,
- 0xcf06e4b6,
- 0xe4f100ee,
+ 0x07acd7f1,
+ 0xcf06d4b6,
+ 0xd4f100dd,
0x1bf47000,
/* 0x007f: nsec */
0xf000f8f2,
@@ -845,21 +912,21 @@ uint32_t nva3_pwr_code[] = {
0x9800f8df,
0x96b003e9,
0x2a0bf400,
- 0xbb840a98,
+ 0xbb9a0a98,
0x1cf4029a,
0x01d7f00f,
0x025421f5,
0x0ef494bd,
/* 0x00e9: intr_watchdog_next_time */
- 0x850a9815,
+ 0x9b0a9815,
0xf400a6b0,
0x9ab8090b,
0x061cf406,
/* 0x00f8: intr_watchdog_next_time_set */
/* 0x00fb: intr_watchdog_next_proc */
- 0x80850980,
+ 0x809b0980,
0xe0b603e9,
- 0x10e6b158,
+ 0x68e6b158,
0xc61bf402,
/* 0x010a: intr */
0x00f900f8,
@@ -880,15 +947,15 @@ uint32_t nva3_pwr_code[] = {
0x0088cf06,
0xf40289c4,
0x0080230b,
- 0x58e7f085,
+ 0x58e7f09b,
0x98cb21f4,
- 0x96b08509,
+ 0x96b09b09,
0x110bf400,
0xb63407f0,
0x09d00604,
0x8004bd00,
/* 0x016e: intr_skip_watchdog */
- 0x89e48409,
+ 0x89e49a09,
0x0bf40800,
0x8897f148,
0x0694b606,
@@ -948,7 +1015,7 @@ uint32_t nva3_pwr_code[] = {
0x000ed006,
0x0e8004bd,
/* 0x0241: timer_enable */
- 0x0187f084,
+ 0x0187f09a,
0xb63807f0,
0x08d00604,
/* 0x024f: timer_done */
@@ -979,7 +1046,7 @@ uint32_t nva3_pwr_code[] = {
0xb8008a98,
0x0bf406ae,
0x5880b610,
- 0x021086b1,
+ 0x026886b1,
0xf4f01bf4,
/* 0x02b2: find_done */
0x8eb90132,
@@ -1049,7 +1116,7 @@ uint32_t nva3_pwr_code[] = {
0x320bf406,
0x94071ec4,
0xe0b704ee,
- 0xeb980218,
+ 0xeb980270,
0x02ec9803,
0x9801ed98,
0x21f500ee,
@@ -1075,7 +1142,7 @@ uint32_t nva3_pwr_code[] = {
0xe60bf406,
0xb60723c4,
0x30b70434,
- 0x3b800298,
+ 0x3b8002f0,
0x023c8003,
0x80013d80,
0x20b6003e,
@@ -1090,13 +1157,13 @@ uint32_t nva3_pwr_code[] = {
/* 0x0430: host_init */
0x008017f1,
0xf11014b6,
- 0xf1021815,
+ 0xf1027015,
0xb604d007,
0x01d00604,
0xf104bd00,
0xb6008017,
0x15f11014,
- 0x07f10298,
+ 0x07f102f0,
0x04b604dc,
0x0001d006,
0x17f004bd,
@@ -1156,14 +1223,14 @@ uint32_t nva3_pwr_code[] = {
0x00139802,
0x950410b6,
0x30f01034,
- 0xc835980c,
+ 0xde35980c,
0x12b855f9,
0xec1ef406,
0xe0fcd0fc,
0x02b921f5,
/* 0x0532: memx_info */
0xc7f100f8,
- 0xb7f10354,
+ 0xb7f103ac,
0x21f50800,
0x00f802b9,
/* 0x0540: memx_recv */
@@ -1175,7 +1242,312 @@ uint32_t nva3_pwr_code[] = {
/* 0x0550: perf_recv */
/* 0x0552: perf_init */
0x00f800f8,
-/* 0x0554: test_recv */
+/* 0x0554: i2c_drive_scl */
+ 0xf40036b0,
+ 0x07f1110b,
+ 0x04b607e0,
+ 0x0001d006,
+ 0x00f804bd,
+/* 0x0568: i2c_drive_scl_lo */
+ 0x07e407f1,
+ 0xd00604b6,
+ 0x04bd0001,
+/* 0x0576: i2c_drive_sda */
+ 0x36b000f8,
+ 0x110bf400,
+ 0x07e007f1,
+ 0xd00604b6,
+ 0x04bd0002,
+/* 0x058a: i2c_drive_sda_lo */
+ 0x07f100f8,
+ 0x04b607e4,
+ 0x0002d006,
+ 0x00f804bd,
+/* 0x0598: i2c_sense_scl */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0431fd00,
+ 0xf4060bf4,
+/* 0x05ae: i2c_sense_scl_done */
+ 0x00f80131,
+/* 0x05b0: i2c_sense_sda */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0432fd00,
+ 0xf4060bf4,
+/* 0x05c6: i2c_sense_sda_done */
+ 0x00f80131,
+/* 0x05c8: i2c_raise_scl */
+ 0x47f140f9,
+ 0x37f00898,
+ 0x5421f501,
+/* 0x05d5: i2c_raise_scl_wait */
+ 0xe8e7f105,
+ 0x7f21f403,
+ 0x059821f5,
+ 0xb60901f4,
+ 0x1bf40142,
+/* 0x05e9: i2c_raise_scl_done */
+ 0xf840fcef,
+/* 0x05ed: i2c_start */
+ 0x9821f500,
+ 0x0d11f405,
+ 0x05b021f5,
+ 0xf40611f4,
+/* 0x05fe: i2c_start_rep */
+ 0x37f0300e,
+ 0x5421f500,
+ 0x0137f005,
+ 0x057621f5,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xc821f550,
+ 0x0464b605,
+/* 0x062b: i2c_start_send */
+ 0xf01f11f4,
+ 0x21f50037,
+ 0xe7f10576,
+ 0x21f41388,
+ 0x0037f07f,
+ 0x055421f5,
+ 0x1388e7f1,
+/* 0x0647: i2c_start_out */
+ 0xf87f21f4,
+/* 0x0649: i2c_stop */
+ 0x0037f000,
+ 0x055421f5,
+ 0xf50037f0,
+ 0xf1057621,
+ 0xf403e8e7,
+ 0x37f07f21,
+ 0x5421f501,
+ 0x88e7f105,
+ 0x7f21f413,
+ 0xf50137f0,
+ 0xf1057621,
+ 0xf41388e7,
+ 0x00f87f21,
+/* 0x067c: i2c_bitw */
+ 0x057621f5,
+ 0x03e8e7f1,
+ 0xbb7f21f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x05c821f5,
+ 0xf40464b6,
+ 0xe7f11811,
+ 0x21f41388,
+ 0x0037f07f,
+ 0x055421f5,
+ 0x1388e7f1,
+/* 0x06bb: i2c_bitw_out */
+ 0xf87f21f4,
+/* 0x06bd: i2c_bitr */
+ 0x0137f000,
+ 0x057621f5,
+ 0x03e8e7f1,
+ 0xbb7f21f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x05c821f5,
+ 0xf40464b6,
+ 0x21f51b11,
+ 0x37f005b0,
+ 0x5421f500,
+ 0x88e7f105,
+ 0x7f21f413,
+ 0xf4013cf0,
+/* 0x0702: i2c_bitr_done */
+ 0x00f80131,
+/* 0x0704: i2c_get_byte */
+ 0xf00057f0,
+/* 0x070a: i2c_get_byte_next */
+ 0x54b60847,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b606bd,
+ 0x2b11f404,
+ 0xb60553fd,
+ 0x1bf40142,
+ 0x0137f0d8,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x7c21f550,
+ 0x0464b606,
+/* 0x0754: i2c_get_byte_done */
+/* 0x0756: i2c_put_byte */
+ 0x47f000f8,
+/* 0x0759: i2c_put_byte_next */
+ 0x0142b608,
+ 0xbb3854ff,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x067c21f5,
+ 0xf40464b6,
+ 0x46b03411,
+ 0xd81bf400,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xbd21f550,
+ 0x0464b606,
+ 0xbb0f11f4,
+ 0x36b00076,
+ 0x061bf401,
+/* 0x07af: i2c_put_byte_done */
+ 0xf80132f4,
+/* 0x07b1: i2c_addr */
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b605ed,
+ 0x2911f404,
+ 0x012ec3e7,
+ 0xfd0134b6,
+ 0x76bb0553,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb6075621,
+/* 0x07f6: i2c_addr_done */
+ 0x00f80464,
+/* 0x07f8: i2c_acquire_addr */
+ 0xb6f8cec7,
+ 0xe0b702e4,
+ 0xee980bfc,
+/* 0x0807: i2c_acquire */
+ 0xf500f800,
+ 0xf407f821,
+ 0xd9f00421,
+ 0x3f21f403,
+/* 0x0816: i2c_release */
+ 0x21f500f8,
+ 0x21f407f8,
+ 0x03daf004,
+ 0xf83f21f4,
+/* 0x0825: i2c_recv */
+ 0x0132f400,
+ 0xb6f8c1c7,
+ 0x16b00214,
+ 0x3a1ff528,
+ 0xd413a001,
+ 0x0032980b,
+ 0x0bac13a0,
+ 0xf4003198,
+ 0xd0f90231,
+ 0xd0f9e0f9,
+ 0x000067f1,
+ 0x100063f1,
+ 0xbb016792,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x080721f5,
+ 0xfc0464b6,
+ 0x00d6b0d0,
+ 0x00b31bf5,
+ 0xbb0057f0,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x07b121f5,
+ 0xf50464b6,
+ 0xc700d011,
+ 0x76bbe0c5,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb6075621,
+ 0x11f50464,
+ 0x57f000ad,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b607b1,
+ 0x8a11f504,
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b60704,
+ 0x6a11f404,
+ 0xbbe05bcb,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x064921f5,
+ 0xb90464b6,
+ 0x74bd025b,
+/* 0x092b: i2c_recv_not_rd08 */
+ 0xb0430ef4,
+ 0x1bf401d6,
+ 0x0057f03d,
+ 0x07b121f5,
+ 0xc73311f4,
+ 0x21f5e0c5,
+ 0x11f40756,
+ 0x0057f029,
+ 0x07b121f5,
+ 0xc71f11f4,
+ 0x21f5e0b5,
+ 0x11f40756,
+ 0x4921f515,
+ 0xc774bd06,
+ 0x1bf408c5,
+ 0x0232f409,
+/* 0x096b: i2c_recv_not_wr08 */
+/* 0x096b: i2c_recv_done */
+ 0xc7030ef4,
+ 0x21f5f8ce,
+ 0xe0fc0816,
+ 0x12f4d0fc,
+ 0x027cb90a,
+ 0x02b921f5,
+/* 0x0980: i2c_recv_exit */
+/* 0x0982: i2c_init */
+ 0x00f800f8,
+/* 0x0984: test_recv */
0x05d817f1,
0xcf0614b6,
0x10b60011,
@@ -1185,12 +1557,12 @@ uint32_t nva3_pwr_code[] = {
0x00e7f104,
0x4fe3f1d9,
0xf521f513,
-/* 0x057b: test_init */
+/* 0x09ab: test_init */
0xf100f801,
0xf50800e7,
0xf801f521,
-/* 0x0585: idle_recv */
-/* 0x0587: idle */
+/* 0x09b5: idle_recv */
+/* 0x09b7: idle */
0xf400f800,
0x17f10031,
0x14b605d4,
@@ -1198,32 +1570,20 @@ uint32_t nva3_pwr_code[] = {
0xf10110b6,
0xb605d407,
0x01d00604,
-/* 0x05a3: idle_loop */
+/* 0x09d3: idle_loop */
0xf004bd00,
0x32f45817,
-/* 0x05a9: idle_proc */
-/* 0x05a9: idle_proc_exec */
+/* 0x09d9: idle_proc */
+/* 0x09d9: idle_proc_exec */
0xb910f902,
0x21f5021e,
0x10fc02c2,
0xf40911f4,
0x0ef40231,
-/* 0x05bd: idle_proc_next */
+/* 0x09ed: idle_proc_next */
0x5810b6ef,
0xf4061fb8,
0x02f4e61b,
0x0028f4dd,
0x00bb0ef4,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
index eaa64da68e36..48f79434a449 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
@@ -37,6 +37,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_PROC
@@ -46,6 +47,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_DATA
@@ -57,6 +59,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
index 82c8e8b88917..4dba00d2dd1a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
@@ -89,9 +89,31 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x5f433249,
+ 0x00000982,
+ 0x00000825,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x54534554,
- 0x0000057b,
- 0x00000554,
+ 0x000009ab,
+ 0x00000984,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +134,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000587,
- 0x00000585,
+ 0x000009b7,
+ 0x000009b5,
0x00000000,
0x00000000,
0x00000000,
@@ -133,12 +155,12 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0210: proc_list_tail */
-/* 0x0210: time_prev */
+/* 0x0268: proc_list_tail */
+/* 0x0268: time_prev */
0x00000000,
-/* 0x0214: time_next */
+/* 0x026c: time_next */
0x00000000,
-/* 0x0218: fifo_queue */
+/* 0x0270: fifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -171,7 +193,7 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0298: rfifo_queue */
+/* 0x02f0: rfifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -204,11 +226,11 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0318: memx_func_head */
+/* 0x0370: memx_func_head */
0x00010000,
0x00000000,
0x0000046f,
-/* 0x0324: memx_func_next */
+/* 0x037c: memx_func_next */
0x00000001,
0x00000000,
0x00000496,
@@ -221,8 +243,18 @@ uint32_t nvc0_pwr_data[] = {
0x00010004,
0x00000000,
0x000004fc,
-/* 0x0354: memx_func_tail */
-/* 0x0354: memx_data_head */
+/* 0x03ac: memx_func_tail */
+/* 0x03ac: memx_data_head */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
@@ -725,6 +757,42 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0bac: memx_data_tail */
+/* 0x0bac: i2c_scl_map */
+ 0x00001000,
+ 0x00004000,
+ 0x00010000,
+ 0x00000100,
+ 0x00040000,
+ 0x00100000,
+ 0x00400000,
+ 0x01000000,
+ 0x04000000,
+ 0x10000000,
+/* 0x0bd4: i2c_sda_map */
+ 0x00002000,
+ 0x00008000,
+ 0x00020000,
+ 0x00000200,
+ 0x00080000,
+ 0x00200000,
+ 0x00800000,
+ 0x02000000,
+ 0x08000000,
+ 0x20000000,
+/* 0x0bfc: i2c_ctrl */
+ 0x0000e138,
+ 0x0000e150,
+ 0x0000e168,
+ 0x0000e180,
+ 0x0000e254,
+ 0x0000e274,
+ 0x0000e764,
+ 0x0000e780,
+ 0x0000e79c,
+ 0x0000e7b8,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
@@ -735,7 +803,6 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0b54: memx_data_tail */
0x00000000,
0x00000000,
0x00000000,
@@ -787,15 +854,15 @@ uint32_t nvc0_pwr_code[] = {
0x07a007f1,
0xd00604b6,
0x04bd000e,
- 0xf001e7f0,
- 0x07f101e3,
+ 0xf001d7f0,
+ 0x07f101d3,
0x04b607ac,
- 0x000ed006,
+ 0x000dd006,
/* 0x0022: rd32_wait */
- 0xe7f104bd,
- 0xe4b607ac,
- 0x00eecf06,
- 0x7000e4f1,
+ 0xd7f104bd,
+ 0xd4b607ac,
+ 0x00ddcf06,
+ 0x7000d4f1,
0xf1f21bf4,
0xb607a4d7,
0xddcf06d4,
@@ -807,15 +874,15 @@ uint32_t nvc0_pwr_code[] = {
0xb607a407,
0x0dd00604,
0xf004bd00,
- 0xe5f002e7,
- 0x01e3f0f0,
+ 0xd5f002d7,
+ 0x01d3f0f0,
0x07ac07f1,
0xd00604b6,
- 0x04bd000e,
+ 0x04bd000d,
/* 0x006c: wr32_wait */
- 0x07ace7f1,
- 0xcf06e4b6,
- 0xe4f100ee,
+ 0x07acd7f1,
+ 0xcf06d4b6,
+ 0xd4f100dd,
0x1bf47000,
/* 0x007f: nsec */
0xf000f8f2,
@@ -845,21 +912,21 @@ uint32_t nvc0_pwr_code[] = {
0x9800f8df,
0x96b003e9,
0x2a0bf400,
- 0xbb840a98,
+ 0xbb9a0a98,
0x1cf4029a,
0x01d7f00f,
0x025421f5,
0x0ef494bd,
/* 0x00e9: intr_watchdog_next_time */
- 0x850a9815,
+ 0x9b0a9815,
0xf400a6b0,
0x9ab8090b,
0x061cf406,
/* 0x00f8: intr_watchdog_next_time_set */
/* 0x00fb: intr_watchdog_next_proc */
- 0x80850980,
+ 0x809b0980,
0xe0b603e9,
- 0x10e6b158,
+ 0x68e6b158,
0xc61bf402,
/* 0x010a: intr */
0x00f900f8,
@@ -880,15 +947,15 @@ uint32_t nvc0_pwr_code[] = {
0x0088cf06,
0xf40289c4,
0x0080230b,
- 0x58e7f085,
+ 0x58e7f09b,
0x98cb21f4,
- 0x96b08509,
+ 0x96b09b09,
0x110bf400,
0xb63407f0,
0x09d00604,
0x8004bd00,
/* 0x016e: intr_skip_watchdog */
- 0x89e48409,
+ 0x89e49a09,
0x0bf40800,
0x8897f148,
0x0694b606,
@@ -948,7 +1015,7 @@ uint32_t nvc0_pwr_code[] = {
0x000ed006,
0x0e8004bd,
/* 0x0241: timer_enable */
- 0x0187f084,
+ 0x0187f09a,
0xb63807f0,
0x08d00604,
/* 0x024f: timer_done */
@@ -979,7 +1046,7 @@ uint32_t nvc0_pwr_code[] = {
0xb8008a98,
0x0bf406ae,
0x5880b610,
- 0x021086b1,
+ 0x026886b1,
0xf4f01bf4,
/* 0x02b2: find_done */
0x8eb90132,
@@ -1049,7 +1116,7 @@ uint32_t nvc0_pwr_code[] = {
0x320bf406,
0x94071ec4,
0xe0b704ee,
- 0xeb980218,
+ 0xeb980270,
0x02ec9803,
0x9801ed98,
0x21f500ee,
@@ -1075,7 +1142,7 @@ uint32_t nvc0_pwr_code[] = {
0xe60bf406,
0xb60723c4,
0x30b70434,
- 0x3b800298,
+ 0x3b8002f0,
0x023c8003,
0x80013d80,
0x20b6003e,
@@ -1090,13 +1157,13 @@ uint32_t nvc0_pwr_code[] = {
/* 0x0430: host_init */
0x008017f1,
0xf11014b6,
- 0xf1021815,
+ 0xf1027015,
0xb604d007,
0x01d00604,
0xf104bd00,
0xb6008017,
0x15f11014,
- 0x07f10298,
+ 0x07f102f0,
0x04b604dc,
0x0001d006,
0x17f004bd,
@@ -1156,14 +1223,14 @@ uint32_t nvc0_pwr_code[] = {
0x00139802,
0x950410b6,
0x30f01034,
- 0xc835980c,
+ 0xde35980c,
0x12b855f9,
0xec1ef406,
0xe0fcd0fc,
0x02b921f5,
/* 0x0532: memx_info */
0xc7f100f8,
- 0xb7f10354,
+ 0xb7f103ac,
0x21f50800,
0x00f802b9,
/* 0x0540: memx_recv */
@@ -1175,7 +1242,312 @@ uint32_t nvc0_pwr_code[] = {
/* 0x0550: perf_recv */
/* 0x0552: perf_init */
0x00f800f8,
-/* 0x0554: test_recv */
+/* 0x0554: i2c_drive_scl */
+ 0xf40036b0,
+ 0x07f1110b,
+ 0x04b607e0,
+ 0x0001d006,
+ 0x00f804bd,
+/* 0x0568: i2c_drive_scl_lo */
+ 0x07e407f1,
+ 0xd00604b6,
+ 0x04bd0001,
+/* 0x0576: i2c_drive_sda */
+ 0x36b000f8,
+ 0x110bf400,
+ 0x07e007f1,
+ 0xd00604b6,
+ 0x04bd0002,
+/* 0x058a: i2c_drive_sda_lo */
+ 0x07f100f8,
+ 0x04b607e4,
+ 0x0002d006,
+ 0x00f804bd,
+/* 0x0598: i2c_sense_scl */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0431fd00,
+ 0xf4060bf4,
+/* 0x05ae: i2c_sense_scl_done */
+ 0x00f80131,
+/* 0x05b0: i2c_sense_sda */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0432fd00,
+ 0xf4060bf4,
+/* 0x05c6: i2c_sense_sda_done */
+ 0x00f80131,
+/* 0x05c8: i2c_raise_scl */
+ 0x47f140f9,
+ 0x37f00898,
+ 0x5421f501,
+/* 0x05d5: i2c_raise_scl_wait */
+ 0xe8e7f105,
+ 0x7f21f403,
+ 0x059821f5,
+ 0xb60901f4,
+ 0x1bf40142,
+/* 0x05e9: i2c_raise_scl_done */
+ 0xf840fcef,
+/* 0x05ed: i2c_start */
+ 0x9821f500,
+ 0x0d11f405,
+ 0x05b021f5,
+ 0xf40611f4,
+/* 0x05fe: i2c_start_rep */
+ 0x37f0300e,
+ 0x5421f500,
+ 0x0137f005,
+ 0x057621f5,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xc821f550,
+ 0x0464b605,
+/* 0x062b: i2c_start_send */
+ 0xf01f11f4,
+ 0x21f50037,
+ 0xe7f10576,
+ 0x21f41388,
+ 0x0037f07f,
+ 0x055421f5,
+ 0x1388e7f1,
+/* 0x0647: i2c_start_out */
+ 0xf87f21f4,
+/* 0x0649: i2c_stop */
+ 0x0037f000,
+ 0x055421f5,
+ 0xf50037f0,
+ 0xf1057621,
+ 0xf403e8e7,
+ 0x37f07f21,
+ 0x5421f501,
+ 0x88e7f105,
+ 0x7f21f413,
+ 0xf50137f0,
+ 0xf1057621,
+ 0xf41388e7,
+ 0x00f87f21,
+/* 0x067c: i2c_bitw */
+ 0x057621f5,
+ 0x03e8e7f1,
+ 0xbb7f21f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x05c821f5,
+ 0xf40464b6,
+ 0xe7f11811,
+ 0x21f41388,
+ 0x0037f07f,
+ 0x055421f5,
+ 0x1388e7f1,
+/* 0x06bb: i2c_bitw_out */
+ 0xf87f21f4,
+/* 0x06bd: i2c_bitr */
+ 0x0137f000,
+ 0x057621f5,
+ 0x03e8e7f1,
+ 0xbb7f21f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x05c821f5,
+ 0xf40464b6,
+ 0x21f51b11,
+ 0x37f005b0,
+ 0x5421f500,
+ 0x88e7f105,
+ 0x7f21f413,
+ 0xf4013cf0,
+/* 0x0702: i2c_bitr_done */
+ 0x00f80131,
+/* 0x0704: i2c_get_byte */
+ 0xf00057f0,
+/* 0x070a: i2c_get_byte_next */
+ 0x54b60847,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b606bd,
+ 0x2b11f404,
+ 0xb60553fd,
+ 0x1bf40142,
+ 0x0137f0d8,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x7c21f550,
+ 0x0464b606,
+/* 0x0754: i2c_get_byte_done */
+/* 0x0756: i2c_put_byte */
+ 0x47f000f8,
+/* 0x0759: i2c_put_byte_next */
+ 0x0142b608,
+ 0xbb3854ff,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x067c21f5,
+ 0xf40464b6,
+ 0x46b03411,
+ 0xd81bf400,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xbd21f550,
+ 0x0464b606,
+ 0xbb0f11f4,
+ 0x36b00076,
+ 0x061bf401,
+/* 0x07af: i2c_put_byte_done */
+ 0xf80132f4,
+/* 0x07b1: i2c_addr */
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b605ed,
+ 0x2911f404,
+ 0x012ec3e7,
+ 0xfd0134b6,
+ 0x76bb0553,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb6075621,
+/* 0x07f6: i2c_addr_done */
+ 0x00f80464,
+/* 0x07f8: i2c_acquire_addr */
+ 0xb6f8cec7,
+ 0xe0b702e4,
+ 0xee980bfc,
+/* 0x0807: i2c_acquire */
+ 0xf500f800,
+ 0xf407f821,
+ 0xd9f00421,
+ 0x3f21f403,
+/* 0x0816: i2c_release */
+ 0x21f500f8,
+ 0x21f407f8,
+ 0x03daf004,
+ 0xf83f21f4,
+/* 0x0825: i2c_recv */
+ 0x0132f400,
+ 0xb6f8c1c7,
+ 0x16b00214,
+ 0x3a1ff528,
+ 0xd413a001,
+ 0x0032980b,
+ 0x0bac13a0,
+ 0xf4003198,
+ 0xd0f90231,
+ 0xd0f9e0f9,
+ 0x000067f1,
+ 0x100063f1,
+ 0xbb016792,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x080721f5,
+ 0xfc0464b6,
+ 0x00d6b0d0,
+ 0x00b31bf5,
+ 0xbb0057f0,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x07b121f5,
+ 0xf50464b6,
+ 0xc700d011,
+ 0x76bbe0c5,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb6075621,
+ 0x11f50464,
+ 0x57f000ad,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b607b1,
+ 0x8a11f504,
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b60704,
+ 0x6a11f404,
+ 0xbbe05bcb,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x064921f5,
+ 0xb90464b6,
+ 0x74bd025b,
+/* 0x092b: i2c_recv_not_rd08 */
+ 0xb0430ef4,
+ 0x1bf401d6,
+ 0x0057f03d,
+ 0x07b121f5,
+ 0xc73311f4,
+ 0x21f5e0c5,
+ 0x11f40756,
+ 0x0057f029,
+ 0x07b121f5,
+ 0xc71f11f4,
+ 0x21f5e0b5,
+ 0x11f40756,
+ 0x4921f515,
+ 0xc774bd06,
+ 0x1bf408c5,
+ 0x0232f409,
+/* 0x096b: i2c_recv_not_wr08 */
+/* 0x096b: i2c_recv_done */
+ 0xc7030ef4,
+ 0x21f5f8ce,
+ 0xe0fc0816,
+ 0x12f4d0fc,
+ 0x027cb90a,
+ 0x02b921f5,
+/* 0x0980: i2c_recv_exit */
+/* 0x0982: i2c_init */
+ 0x00f800f8,
+/* 0x0984: test_recv */
0x05d817f1,
0xcf0614b6,
0x10b60011,
@@ -1185,12 +1557,12 @@ uint32_t nvc0_pwr_code[] = {
0x00e7f104,
0x4fe3f1d9,
0xf521f513,
-/* 0x057b: test_init */
+/* 0x09ab: test_init */
0xf100f801,
0xf50800e7,
0xf801f521,
-/* 0x0585: idle_recv */
-/* 0x0587: idle */
+/* 0x09b5: idle_recv */
+/* 0x09b7: idle */
0xf400f800,
0x17f10031,
0x14b605d4,
@@ -1198,32 +1570,20 @@ uint32_t nvc0_pwr_code[] = {
0xf10110b6,
0xb605d407,
0x01d00604,
-/* 0x05a3: idle_loop */
+/* 0x09d3: idle_loop */
0xf004bd00,
0x32f45817,
-/* 0x05a9: idle_proc */
-/* 0x05a9: idle_proc_exec */
+/* 0x09d9: idle_proc */
+/* 0x09d9: idle_proc_exec */
0xb910f902,
0x21f5021e,
0x10fc02c2,
0xf40911f4,
0x0ef40231,
-/* 0x05bd: idle_proc_next */
+/* 0x09ed: idle_proc_next */
0x5810b6ef,
0xf4061fb8,
0x02f4e61b,
0x0028f4dd,
0x00bb0ef4,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
index 32d65ea254dd..8a89dfe41ce1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
@@ -37,6 +37,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_PROC
@@ -46,6 +47,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_DATA
@@ -57,6 +59,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
index ce65e2a4b789..5e24c6bc041d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
@@ -89,33 +89,13 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x54534554,
- 0x000004eb,
- 0x000004ca,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x5f433249,
+ 0x000008e3,
+ 0x00000786,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
- 0x454c4449,
- 0x000004f7,
- 0x000004f5,
- 0x00000000,
- 0x00000000,
0x00000000,
0x00000000,
0x00000000,
@@ -131,14 +111,13 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x54534554,
+ 0x00000906,
+ 0x000008e5,
0x00000000,
0x00000000,
-/* 0x0210: proc_list_tail */
-/* 0x0210: time_prev */
0x00000000,
-/* 0x0214: time_next */
0x00000000,
-/* 0x0218: fifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -154,6 +133,9 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x454c4449,
+ 0x00000912,
+ 0x00000910,
0x00000000,
0x00000000,
0x00000000,
@@ -171,11 +153,14 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0298: rfifo_queue */
0x00000000,
0x00000000,
+/* 0x0268: proc_list_tail */
+/* 0x0268: time_prev */
0x00000000,
+/* 0x026c: time_next */
0x00000000,
+/* 0x0270: fifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -204,31 +189,11 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0318: memx_func_head */
- 0x00010000,
- 0x00000000,
- 0x000003f4,
-/* 0x0324: memx_func_next */
- 0x00000001,
- 0x00000000,
- 0x00000415,
- 0x00000002,
- 0x00000002,
- 0x00000430,
- 0x00040003,
- 0x00000000,
- 0x00000458,
- 0x00010004,
- 0x00000000,
- 0x00000472,
-/* 0x0354: memx_func_tail */
-/* 0x0354: memx_data_head */
- 0x00000000,
- 0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
+/* 0x02f0: rfifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -261,10 +226,25 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0370: memx_func_head */
+ 0x00010000,
0x00000000,
+ 0x000003f4,
+/* 0x037c: memx_func_next */
+ 0x00000001,
0x00000000,
+ 0x00000415,
+ 0x00000002,
+ 0x00000002,
+ 0x00000430,
+ 0x00040003,
0x00000000,
+ 0x00000458,
+ 0x00010004,
0x00000000,
+ 0x00000472,
+/* 0x03ac: memx_func_tail */
+/* 0x03ac: memx_data_head */
0x00000000,
0x00000000,
0x00000000,
@@ -735,7 +715,6 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0b54: memx_data_tail */
0x00000000,
0x00000000,
0x00000000,
@@ -778,6 +757,29 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0bac: memx_data_tail */
+/* 0x0bac: i2c_scl_map */
+ 0x00000400,
+ 0x00000800,
+ 0x00001000,
+ 0x00002000,
+ 0x00004000,
+ 0x00008000,
+ 0x00010000,
+ 0x00020000,
+ 0x00040000,
+ 0x00080000,
+/* 0x0bd4: i2c_sda_map */
+ 0x00100000,
+ 0x00200000,
+ 0x00400000,
+ 0x00800000,
+ 0x01000000,
+ 0x02000000,
+ 0x04000000,
+ 0x08000000,
+ 0x10000000,
+ 0x20000000,
0x00000000,
};
@@ -786,14 +788,14 @@ uint32_t nvd0_pwr_code[] = {
/* 0x0004: rd32 */
0x07a007f1,
0xbd000ed0,
- 0x01e7f004,
- 0xf101e3f0,
+ 0x01d7f004,
+ 0xf101d3f0,
0xd007ac07,
- 0x04bd000e,
+ 0x04bd000d,
/* 0x001c: rd32_wait */
- 0x07ace7f1,
- 0xf100eecf,
- 0xf47000e4,
+ 0x07acd7f1,
+ 0xf100ddcf,
+ 0xf47000d4,
0xd7f1f51b,
0xddcf07a4,
/* 0x0033: wr32 */
@@ -802,14 +804,14 @@ uint32_t nvd0_pwr_code[] = {
0x04bd000e,
0x07a407f1,
0xbd000dd0,
- 0x02e7f004,
- 0xf0f0e5f0,
- 0x07f101e3,
- 0x0ed007ac,
+ 0x02d7f004,
+ 0xf0f0d5f0,
+ 0x07f101d3,
+ 0x0dd007ac,
/* 0x0057: wr32_wait */
0xf104bd00,
- 0xcf07ace7,
- 0xe4f100ee,
+ 0xcf07acd7,
+ 0xd4f100dd,
0x1bf47000,
/* 0x0067: nsec */
0xf000f8f5,
@@ -836,21 +838,21 @@ uint32_t nvd0_pwr_code[] = {
0x9800f8e2,
0x96b003e9,
0x2a0bf400,
- 0xbb840a98,
+ 0xbb9a0a98,
0x1cf4029a,
0x01d7f00f,
0x020621f5,
0x0ef494bd,
/* 0x00c5: intr_watchdog_next_time */
- 0x850a9815,
+ 0x9b0a9815,
0xf400a6b0,
0x9ab8090b,
0x061cf406,
/* 0x00d4: intr_watchdog_next_time_set */
/* 0x00d7: intr_watchdog_next_proc */
- 0x80850980,
+ 0x809b0980,
0xe0b603e9,
- 0x10e6b158,
+ 0x68e6b158,
0xc61bf402,
/* 0x00e6: intr */
0x00f900f8,
@@ -868,15 +870,15 @@ uint32_t nvd0_pwr_code[] = {
0x0887f004,
0xc40088cf,
0x0bf40289,
- 0x85008020,
+ 0x9b008020,
0xf458e7f0,
0x0998a721,
- 0x0096b085,
+ 0x0096b09b,
0xf00e0bf4,
0x09d03407,
0x8004bd00,
/* 0x013e: intr_skip_watchdog */
- 0x89e48409,
+ 0x89e49a09,
0x0bf40800,
0x8897f13c,
0x0099cf06,
@@ -929,7 +931,7 @@ uint32_t nvd0_pwr_code[] = {
0x0ed03407,
0x8004bd00,
/* 0x01f6: timer_enable */
- 0x87f0840e,
+ 0x87f09a0e,
0x3807f001,
0xbd0008d0,
/* 0x0201: timer_done */
@@ -960,7 +962,7 @@ uint32_t nvd0_pwr_code[] = {
0x06aeb800,
0xb6100bf4,
0x86b15880,
- 0x1bf40210,
+ 0x1bf40268,
0x0132f4f0,
/* 0x0264: find_done */
0xfc028eb9,
@@ -1024,7 +1026,7 @@ uint32_t nvd0_pwr_code[] = {
0x0bf40612,
0x071ec42f,
0xb704ee94,
- 0x980218e0,
+ 0x980270e0,
0xec9803eb,
0x01ed9802,
0xf500ee98,
@@ -1048,7 +1050,7 @@ uint32_t nvd0_pwr_code[] = {
0xec0bf406,
0xb60723c4,
0x30b70434,
- 0x3b800298,
+ 0x3b8002f0,
0x023c8003,
0x80013d80,
0x20b6003e,
@@ -1061,12 +1063,12 @@ uint32_t nvd0_pwr_code[] = {
/* 0x03be: host_init */
0x17f100f8,
0x14b60080,
- 0x1815f110,
+ 0x7015f110,
0xd007f102,
0x0001d004,
0x17f104bd,
0x14b60080,
- 0x9815f110,
+ 0xf015f110,
0xdc07f102,
0x0001d004,
0x17f004bd,
@@ -1122,13 +1124,13 @@ uint32_t nvd0_pwr_code[] = {
0x10b60013,
0x10349504,
0x980c30f0,
- 0x55f9c835,
+ 0x55f9de35,
0xf40612b8,
0xd0fcec1e,
0x21f5e0fc,
0x00f8026b,
/* 0x04a8: memx_info */
- 0x0354c7f1,
+ 0x03acc7f1,
0x0800b7f1,
0x026b21f5,
/* 0x04b6: memx_recv */
@@ -1140,49 +1142,342 @@ uint32_t nvd0_pwr_code[] = {
/* 0x04c6: perf_recv */
0x00f800f8,
/* 0x04c8: perf_init */
-/* 0x04ca: test_recv */
- 0x17f100f8,
- 0x11cf05d8,
- 0x0110b600,
- 0x05d807f1,
+/* 0x04ca: i2c_drive_scl */
+ 0x36b000f8,
+ 0x0e0bf400,
+ 0x07e007f1,
0xbd0001d0,
- 0x00e7f104,
- 0x4fe3f1d9,
- 0xb621f513,
-/* 0x04eb: test_init */
- 0xf100f801,
- 0xf50800e7,
- 0xf801b621,
-/* 0x04f5: idle_recv */
-/* 0x04f7: idle */
- 0xf400f800,
- 0x17f10031,
- 0x11cf05d4,
- 0x0110b600,
- 0x05d407f1,
- 0xbd0001d0,
-/* 0x050d: idle_loop */
- 0x5817f004,
-/* 0x0513: idle_proc */
-/* 0x0513: idle_proc_exec */
- 0xf90232f4,
- 0x021eb910,
- 0x027421f5,
- 0x11f410fc,
- 0x0231f409,
-/* 0x0527: idle_proc_next */
- 0xb6ef0ef4,
- 0x1fb85810,
- 0xe61bf406,
- 0xf4dd02f4,
- 0x0ef40028,
- 0x000000c1,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+/* 0x04db: i2c_drive_scl_lo */
+ 0xf100f804,
+ 0xd007e407,
+ 0x04bd0001,
+/* 0x04e6: i2c_drive_sda */
+ 0x36b000f8,
+ 0x0e0bf400,
+ 0x07e007f1,
+ 0xbd0002d0,
+/* 0x04f7: i2c_drive_sda_lo */
+ 0xf100f804,
+ 0xd007e407,
+ 0x04bd0002,
+/* 0x0502: i2c_sense_scl */
+ 0x32f400f8,
+ 0xc437f101,
+ 0x0033cf07,
+ 0xf40431fd,
+ 0x31f4060b,
+/* 0x0515: i2c_sense_scl_done */
+/* 0x0517: i2c_sense_sda */
+ 0xf400f801,
+ 0x37f10132,
+ 0x33cf07c4,
+ 0x0432fd00,
+ 0xf4060bf4,
+/* 0x052a: i2c_sense_sda_done */
+ 0x00f80131,
+/* 0x052c: i2c_raise_scl */
+ 0x47f140f9,
+ 0x37f00898,
+ 0xca21f501,
+/* 0x0539: i2c_raise_scl_wait */
+ 0xe8e7f104,
+ 0x6721f403,
+ 0x050221f5,
+ 0xb60901f4,
+ 0x1bf40142,
+/* 0x054d: i2c_raise_scl_done */
+ 0xf840fcef,
+/* 0x0551: i2c_start */
+ 0x0221f500,
+ 0x0d11f405,
+ 0x051721f5,
+ 0xf40611f4,
+/* 0x0562: i2c_start_rep */
+ 0x37f0300e,
+ 0xca21f500,
+ 0x0137f004,
+ 0x04e621f5,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x2c21f550,
+ 0x0464b605,
+/* 0x058f: i2c_start_send */
+ 0xf01f11f4,
+ 0x21f50037,
+ 0xe7f104e6,
+ 0x21f41388,
+ 0x0037f067,
+ 0x04ca21f5,
+ 0x1388e7f1,
+/* 0x05ab: i2c_start_out */
+ 0xf86721f4,
+/* 0x05ad: i2c_stop */
+ 0x0037f000,
+ 0x04ca21f5,
+ 0xf50037f0,
+ 0xf104e621,
+ 0xf403e8e7,
+ 0x37f06721,
+ 0xca21f501,
+ 0x88e7f104,
+ 0x6721f413,
+ 0xf50137f0,
+ 0xf104e621,
+ 0xf41388e7,
+ 0x00f86721,
+/* 0x05e0: i2c_bitw */
+ 0x04e621f5,
+ 0x03e8e7f1,
+ 0xbb6721f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x052c21f5,
+ 0xf40464b6,
+ 0xe7f11811,
+ 0x21f41388,
+ 0x0037f067,
+ 0x04ca21f5,
+ 0x1388e7f1,
+/* 0x061f: i2c_bitw_out */
+ 0xf86721f4,
+/* 0x0621: i2c_bitr */
+ 0x0137f000,
+ 0x04e621f5,
+ 0x03e8e7f1,
+ 0xbb6721f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x052c21f5,
+ 0xf40464b6,
+ 0x21f51b11,
+ 0x37f00517,
+ 0xca21f500,
+ 0x88e7f104,
+ 0x6721f413,
+ 0xf4013cf0,
+/* 0x0666: i2c_bitr_done */
+ 0x00f80131,
+/* 0x0668: i2c_get_byte */
+ 0xf00057f0,
+/* 0x066e: i2c_get_byte_next */
+ 0x54b60847,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b60621,
+ 0x2b11f404,
+ 0xb60553fd,
+ 0x1bf40142,
+ 0x0137f0d8,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xe021f550,
+ 0x0464b605,
+/* 0x06b8: i2c_get_byte_done */
+/* 0x06ba: i2c_put_byte */
+ 0x47f000f8,
+/* 0x06bd: i2c_put_byte_next */
+ 0x0142b608,
+ 0xbb3854ff,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x05e021f5,
+ 0xf40464b6,
+ 0x46b03411,
+ 0xd81bf400,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x2121f550,
+ 0x0464b606,
+ 0xbb0f11f4,
+ 0x36b00076,
+ 0x061bf401,
+/* 0x0713: i2c_put_byte_done */
+ 0xf80132f4,
+/* 0x0715: i2c_addr */
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b60551,
+ 0x2911f404,
+ 0x012ec3e7,
+ 0xfd0134b6,
+ 0x76bb0553,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb606ba21,
+/* 0x075a: i2c_addr_done */
+ 0x00f80464,
+/* 0x075c: i2c_acquire_addr */
+ 0xb6f8cec7,
+ 0xe0b705e4,
+ 0x00f8d014,
+/* 0x0768: i2c_acquire */
+ 0x075c21f5,
+ 0xf00421f4,
+ 0x21f403d9,
+/* 0x0777: i2c_release */
+ 0xf500f833,
+ 0xf4075c21,
+ 0xdaf00421,
+ 0x3321f403,
+/* 0x0786: i2c_recv */
+ 0x32f400f8,
+ 0xf8c1c701,
+ 0xb00214b6,
+ 0x1ff52816,
+ 0x13a0013a,
+ 0x32980bd4,
+ 0xac13a000,
+ 0x0031980b,
+ 0xf90231f4,
+ 0xf9e0f9d0,
+ 0x0067f1d0,
+ 0x0063f100,
+ 0x01679210,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x6821f550,
+ 0x0464b607,
+ 0xd6b0d0fc,
+ 0xb31bf500,
+ 0x0057f000,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x1521f550,
+ 0x0464b607,
+ 0x00d011f5,
+ 0xbbe0c5c7,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x06ba21f5,
+ 0xf50464b6,
+ 0xf000ad11,
+ 0x76bb0157,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb6071521,
+ 0x11f50464,
+ 0x76bb008a,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb6066821,
+ 0x11f40464,
+ 0xe05bcb6a,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xad21f550,
+ 0x0464b605,
+ 0xbd025bb9,
+ 0x430ef474,
+/* 0x088c: i2c_recv_not_rd08 */
+ 0xf401d6b0,
+ 0x57f03d1b,
+ 0x1521f500,
+ 0x3311f407,
+ 0xf5e0c5c7,
+ 0xf406ba21,
+ 0x57f02911,
+ 0x1521f500,
+ 0x1f11f407,
+ 0xf5e0b5c7,
+ 0xf406ba21,
+ 0x21f51511,
+ 0x74bd05ad,
+ 0xf408c5c7,
+ 0x32f4091b,
+ 0x030ef402,
+/* 0x08cc: i2c_recv_not_wr08 */
+/* 0x08cc: i2c_recv_done */
+ 0xf5f8cec7,
+ 0xfc077721,
+ 0xf4d0fce0,
+ 0x7cb90a12,
+ 0x6b21f502,
+/* 0x08e1: i2c_recv_exit */
+/* 0x08e3: i2c_init */
+ 0xf800f802,
+/* 0x08e5: test_recv */
+ 0xd817f100,
+ 0x0011cf05,
+ 0xf10110b6,
+ 0xd005d807,
+ 0x04bd0001,
+ 0xd900e7f1,
+ 0x134fe3f1,
+ 0x01b621f5,
+/* 0x0906: test_init */
+ 0xe7f100f8,
+ 0x21f50800,
+ 0x00f801b6,
+/* 0x0910: idle_recv */
+/* 0x0912: idle */
+ 0x31f400f8,
+ 0xd417f100,
+ 0x0011cf05,
+ 0xf10110b6,
+ 0xd005d407,
+ 0x04bd0001,
+/* 0x0928: idle_loop */
+ 0xf45817f0,
+/* 0x092e: idle_proc */
+/* 0x092e: idle_proc_exec */
+ 0x10f90232,
+ 0xf5021eb9,
+ 0xfc027421,
+ 0x0911f410,
+ 0xf40231f4,
+/* 0x0942: idle_proc_next */
+ 0x10b6ef0e,
+ 0x061fb858,
+ 0xf4e61bf4,
+ 0x28f4dd02,
+ 0xc10ef400,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
index 5fb0cccc6c64..574acfa44c8c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
@@ -7,6 +7,7 @@
#define PROC_HOST 0x54534f48
#define PROC_MEMX 0x584d454d
#define PROC_PERF 0x46524550
+#define PROC_I2C_ 0x5f433249
#define PROC_TEST 0x54534554
/* KERN: message identifiers */
@@ -24,4 +25,22 @@
#define MEMX_WAIT 3
#define MEMX_DELAY 4
+/* I2C_: message identifiers */
+#define I2C__MSG_RD08 0
+#define I2C__MSG_WR08 1
+
+#define I2C__MSG_DATA0_PORT 24:31
+#define I2C__MSG_DATA0_ADDR 14:23
+
+#define I2C__MSG_DATA0_RD08_PORT I2C__MSG_DATA0_PORT
+#define I2C__MSG_DATA0_RD08_ADDR I2C__MSG_DATA0_ADDR
+#define I2C__MSG_DATA0_RD08_REG 0:7
+#define I2C__MSG_DATA1_RD08_VAL 0:7
+
+#define I2C__MSG_DATA0_WR08_PORT I2C__MSG_DATA0_PORT
+#define I2C__MSG_DATA0_WR08_ADDR I2C__MSG_DATA0_ADDR
+#define I2C__MSG_DATA0_WR08_SYNC 8:8
+#define I2C__MSG_DATA0_WR08_REG 0:7
+#define I2C__MSG_DATA1_WR08_VAL 0:7
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index e44ed7b93c6d..7610fc5f8fa2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -29,9 +29,9 @@
static bool
probe_monitoring_device(struct nouveau_i2c_port *i2c,
- struct i2c_board_info *info)
+ struct i2c_board_info *info, void *data)
{
- struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c);
+ struct nouveau_therm_priv *priv = data;
struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
struct i2c_client *client;
@@ -96,7 +96,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
};
i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
- board, probe_monitoring_device);
+ board, probe_monitoring_device, therm);
if (priv->ic)
return;
}
@@ -108,7 +108,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
};
i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
- board, probe_monitoring_device);
+ board, probe_monitoring_device, therm);
if (priv->ic)
return;
}
@@ -117,5 +117,5 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
device. Let's try our static list.
*/
i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
- nv_board_infos, probe_monitoring_device);
+ nv_board_infos, probe_monitoring_device, therm);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index ef3133e7575c..7dd680ff2f6f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -72,13 +72,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
vmm->flush(vm);
}
-void
-nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
-{
- nouveau_vm_map_at(vma, 0, node);
-}
-
-void
+static void
nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
struct nouveau_mem *mem)
{
@@ -136,7 +130,7 @@ finish:
vmm->flush(vm);
}
-void
+static void
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
struct nouveau_mem *mem)
{
@@ -175,6 +169,18 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
}
void
+nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
+{
+ if (node->sg)
+ nouveau_vm_map_sg_table(vma, 0, node->size << 12, node);
+ else
+ if (node->pages)
+ nouveau_vm_map_sg(vma, 0, node->size << 12, node);
+ else
+ nouveau_vm_map_at(vma, 0, node);
+}
+
+void
nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
{
struct nouveau_vm *vm = vma->vm;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 936a71c59080..7fdc51e2a571 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -643,7 +643,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
get_tmds_slave(encoder))
return;
- type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL);
+ type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL, NULL);
if (type < 0)
return;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index b13ff0fc42de..2f1ed61f7c8c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -77,11 +77,6 @@ nv04_display_create(struct drm_device *dev)
nouveau_hw_save_vga_fonts(dev, 1);
- ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, 0xd1500000,
- NV04_DISP_CLASS, NULL, 0, &disp->core);
- if (ret)
- return ret;
-
nv04_crtc_create(dev, 0);
if (nv_two_heads(dev))
nv04_crtc_create(dev, 1);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 56a28db04000..4245fc3dab70 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -80,7 +80,6 @@ struct nv04_display {
struct nv04_mode_state saved_reg;
uint32_t saved_vga_font[4][16384];
uint32_t dac_users[4];
- struct nouveau_object *core;
struct nouveau_bo *image[2];
};
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 32e7064b819b..ab03f7719d2d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -55,9 +55,12 @@ struct nouveau_plane {
int hue;
int saturation;
int iturbt_709;
+
+ void (*set_params)(struct nouveau_plane *);
};
static uint32_t formats[] = {
+ DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_NV12,
};
@@ -140,10 +143,10 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
nv_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x);
nv_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w);
- if (fb->pixel_format == DRM_FORMAT_NV12) {
+ if (fb->pixel_format != DRM_FORMAT_UYVY)
format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8;
+ if (fb->pixel_format == DRM_FORMAT_NV12)
format |= NV_PVIDEO_FORMAT_PLANAR;
- }
if (nv_plane->iturbt_709)
format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
if (nv_plane->colorkey & (1 << 24))
@@ -182,9 +185,9 @@ nv10_disable_plane(struct drm_plane *plane)
}
static void
-nv10_destroy_plane(struct drm_plane *plane)
+nv_destroy_plane(struct drm_plane *plane)
{
- nv10_disable_plane(plane);
+ plane->funcs->disable_plane(plane);
drm_plane_cleanup(plane);
kfree(plane);
}
@@ -217,9 +220,9 @@ nv10_set_params(struct nouveau_plane *plane)
}
static int
-nv10_set_property(struct drm_plane *plane,
- struct drm_property *property,
- uint64_t value)
+nv_set_property(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t value)
{
struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
@@ -238,15 +241,16 @@ nv10_set_property(struct drm_plane *plane,
else
return -EINVAL;
- nv10_set_params(nv_plane);
+ if (nv_plane->set_params)
+ nv_plane->set_params(nv_plane);
return 0;
}
static const struct drm_plane_funcs nv10_plane_funcs = {
.update_plane = nv10_update_plane,
.disable_plane = nv10_disable_plane,
- .set_property = nv10_set_property,
- .destroy = nv10_destroy_plane,
+ .set_property = nv_set_property,
+ .destroy = nv_destroy_plane,
};
static void
@@ -266,7 +270,7 @@ nv10_overlay_init(struct drm_device *device)
case 0x15:
case 0x1a:
case 0x20:
- num_formats = 1;
+ num_formats = 2;
break;
}
@@ -321,8 +325,159 @@ nv10_overlay_init(struct drm_device *device)
drm_object_attach_property(&plane->base.base,
plane->props.iturbt_709, plane->iturbt_709);
+ plane->set_params = nv10_set_params;
nv10_set_params(plane);
- nv_wr32(dev, NV_PVIDEO_STOP, 1);
+ nv10_disable_plane(&plane->base);
+ return;
+cleanup:
+ drm_plane_cleanup(&plane->base);
+err:
+ kfree(plane);
+ nv_error(dev, "Failed to create plane\n");
+}
+
+static int
+nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct nouveau_device *dev = nouveau_dev(plane->dev);
+ struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
+ struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+ struct nouveau_bo *cur = nv_plane->cur;
+ uint32_t overlay = 1;
+ int brightness = (nv_plane->brightness - 512) * 62 / 512;
+ int pitch, ret, i;
+
+ /* Source parameters given in 16.16 fixed point, ignore fractional. */
+ src_x >>= 16;
+ src_y >>= 16;
+ src_w >>= 16;
+ src_h >>= 16;
+
+ pitch = ALIGN(src_w * 4, 0x100);
+
+ if (pitch > 0xffff)
+ return -ERANGE;
+
+ /* TODO: Compute an offset? Not sure how to do this for YUYV. */
+ if (src_x != 0 || src_y != 0)
+ return -ERANGE;
+
+ if (crtc_w < src_w || crtc_h < src_h)
+ return -ERANGE;
+
+ ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
+
+ nv_plane->cur = nv_fb->nvbo;
+
+ nv_wr32(dev, NV_PVIDEO_OE_STATE, 0);
+ nv_wr32(dev, NV_PVIDEO_SU_STATE, 0);
+ nv_wr32(dev, NV_PVIDEO_RM_STATE, 0);
+
+ for (i = 0; i < 2; i++) {
+ nv_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i,
+ nv_fb->nvbo->bo.offset);
+ nv_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i, pitch);
+ nv_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0);
+ }
+ nv_wr32(dev, NV_PVIDEO_WINDOW_START, crtc_y << 16 | crtc_x);
+ nv_wr32(dev, NV_PVIDEO_WINDOW_SIZE, crtc_h << 16 | crtc_w);
+ nv_wr32(dev, NV_PVIDEO_STEP_SIZE,
+ (uint32_t)(((src_h - 1) << 11) / (crtc_h - 1)) << 16 | (uint32_t)(((src_w - 1) << 11) / (crtc_w - 1)));
+
+ /* It should be possible to convert hue/contrast to this */
+ nv_wr32(dev, NV_PVIDEO_RED_CSC_OFFSET, 0x69 - brightness);
+ nv_wr32(dev, NV_PVIDEO_GREEN_CSC_OFFSET, 0x3e + brightness);
+ nv_wr32(dev, NV_PVIDEO_BLUE_CSC_OFFSET, 0x89 - brightness);
+ nv_wr32(dev, NV_PVIDEO_CSC_ADJUST, 0);
+
+ nv_wr32(dev, NV_PVIDEO_CONTROL_Y, 0x001); /* (BLUR_ON, LINE_HALF) */
+ nv_wr32(dev, NV_PVIDEO_CONTROL_X, 0x111); /* (WEIGHT_HEAVY, SHARPENING_ON, SMOOTHING_ON) */
+
+ nv_wr32(dev, NV_PVIDEO_FIFO_BURST_LENGTH, 0x03);
+ nv_wr32(dev, NV_PVIDEO_FIFO_THRES_SIZE, 0x38);
+
+ nv_wr32(dev, NV_PVIDEO_KEY, nv_plane->colorkey);
+
+ if (nv_plane->colorkey & (1 << 24))
+ overlay |= 0x10;
+ if (fb->pixel_format == DRM_FORMAT_YUYV)
+ overlay |= 0x100;
+
+ nv_wr32(dev, NV_PVIDEO_OVERLAY, overlay);
+
+ nv_wr32(dev, NV_PVIDEO_SU_STATE, nv_rd32(dev, NV_PVIDEO_SU_STATE) ^ (1 << 16));
+
+ if (cur)
+ nouveau_bo_unpin(cur);
+
+ return 0;
+}
+
+static int
+nv04_disable_plane(struct drm_plane *plane)
+{
+ struct nouveau_device *dev = nouveau_dev(plane->dev);
+ struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
+
+ nv_mask(dev, NV_PVIDEO_OVERLAY, 1, 0);
+ nv_wr32(dev, NV_PVIDEO_OE_STATE, 0);
+ nv_wr32(dev, NV_PVIDEO_SU_STATE, 0);
+ nv_wr32(dev, NV_PVIDEO_RM_STATE, 0);
+ if (nv_plane->cur) {
+ nouveau_bo_unpin(nv_plane->cur);
+ nv_plane->cur = NULL;
+ }
+
+ return 0;
+}
+
+static const struct drm_plane_funcs nv04_plane_funcs = {
+ .update_plane = nv04_update_plane,
+ .disable_plane = nv04_disable_plane,
+ .set_property = nv_set_property,
+ .destroy = nv_destroy_plane,
+};
+
+static void
+nv04_overlay_init(struct drm_device *device)
+{
+ struct nouveau_device *dev = nouveau_dev(device);
+ struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
+ int ret;
+
+ if (!plane)
+ return;
+
+ ret = drm_plane_init(device, &plane->base, 1 /* single crtc */,
+ &nv04_plane_funcs,
+ formats, 2, false);
+ if (ret)
+ goto err;
+
+ /* Set up the plane properties */
+ plane->props.colorkey = drm_property_create_range(
+ device, 0, "colorkey", 0, 0x01ffffff);
+ plane->props.brightness = drm_property_create_range(
+ device, 0, "brightness", 0, 1024);
+ if (!plane->props.colorkey ||
+ !plane->props.brightness)
+ goto cleanup;
+
+ plane->colorkey = 0;
+ drm_object_attach_property(&plane->base.base,
+ plane->props.colorkey, plane->colorkey);
+
+ plane->brightness = 512;
+ drm_object_attach_property(&plane->base.base,
+ plane->props.brightness, plane->brightness);
+
+ nv04_disable_plane(&plane->base);
return;
cleanup:
drm_plane_cleanup(&plane->base);
@@ -335,6 +490,8 @@ void
nouveau_overlay_init(struct drm_device *device)
{
struct nouveau_device *dev = nouveau_dev(device);
- if (dev->chipset >= 0x10 && dev->chipset <= 0x40)
+ if (dev->chipset < 0x10)
+ nv04_overlay_init(device);
+ else if (dev->chipset <= 0x40)
nv10_overlay_init(device);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index cc4b208ce546..244822df8ffc 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -59,7 +59,7 @@ int nv04_tv_identify(struct drm_device *dev, int i2c_index)
struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
return i2c->identify(i2c, i2c_index, "TV encoder",
- nv04_tv_encoder_info, NULL);
+ nv04_tv_encoder_info, NULL, NULL);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 6828d81ed7b9..900fae01793e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -447,6 +447,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
+ info->offset = ntfy->node->offset;
+
done:
if (ret)
nouveau_abi16_ntfy_fini(chan, ntfy);
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 95c740454049..4ef83df2b246 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -1,15 +1,10 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/video.h>
-#include <acpi/acpi.h>
#include <linux/mxm-wmi.h>
-
#include <linux/vga_switcheroo.h>
-
#include <drm/drm_edid.h>
+#include <acpi/video.h>
#include "nouveau_drm.h"
#include "nouveau_acpi.h"
@@ -51,6 +46,7 @@ static struct nouveau_dsm_priv {
bool dsm_detected;
bool optimus_detected;
acpi_handle dhandle;
+ acpi_handle other_handle;
acpi_handle rom_handle;
} nouveau_dsm_priv;
@@ -65,6 +61,7 @@ bool nouveau_is_v1_dsm(void) {
#define NOUVEAU_DSM_HAS_MUX 0x1
#define NOUVEAU_DSM_HAS_OPT 0x2
+#ifdef CONFIG_VGA_SWITCHEROO
static const char nouveau_dsm_muid[] = {
0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
@@ -77,124 +74,66 @@ static const char nouveau_op_dsm_muid[] = {
static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
{
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_object_list input;
- union acpi_object params[4];
+ int i;
union acpi_object *obj;
- int i, err;
char args_buff[4];
+ union acpi_object argv4 = {
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = 4,
+ .buffer.pointer = args_buff
+ };
- input.count = 4;
- input.pointer = params;
- params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(nouveau_op_dsm_muid);
- params[0].buffer.pointer = (char *)nouveau_op_dsm_muid;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = 0x00000100;
- params[2].type = ACPI_TYPE_INTEGER;
- params[2].integer.value = func;
- params[3].type = ACPI_TYPE_BUFFER;
- params[3].buffer.length = 4;
/* ACPI is little endian, AABBCCDD becomes {DD,CC,BB,AA} */
for (i = 0; i < 4; i++)
args_buff[i] = (arg >> i * 8) & 0xFF;
- params[3].buffer.pointer = args_buff;
- err = acpi_evaluate_object(handle, "_DSM", &input, &output);
- if (err) {
- printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
- return err;
- }
-
- obj = (union acpi_object *)output.pointer;
-
- if (obj->type == ACPI_TYPE_INTEGER)
- if (obj->integer.value == 0x80000002) {
- return -ENODEV;
- }
-
- if (obj->type == ACPI_TYPE_BUFFER) {
- if (obj->buffer.length == 4 && result) {
- *result = 0;
+ *result = 0;
+ obj = acpi_evaluate_dsm_typed(handle, nouveau_op_dsm_muid, 0x00000100,
+ func, &argv4, ACPI_TYPE_BUFFER);
+ if (!obj) {
+ acpi_handle_info(handle, "failed to evaluate _DSM\n");
+ return AE_ERROR;
+ } else {
+ if (obj->buffer.length == 4) {
*result |= obj->buffer.pointer[0];
*result |= (obj->buffer.pointer[1] << 8);
*result |= (obj->buffer.pointer[2] << 16);
*result |= (obj->buffer.pointer[3] << 24);
}
+ ACPI_FREE(obj);
}
- kfree(output.pointer);
return 0;
}
-static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
+static int nouveau_dsm(acpi_handle handle, int func, int arg)
{
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_object_list input;
- union acpi_object params[4];
+ int ret = 0;
union acpi_object *obj;
- int err;
-
- input.count = 4;
- input.pointer = params;
- params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(nouveau_dsm_muid);
- params[0].buffer.pointer = (char *)nouveau_dsm_muid;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = 0x00000102;
- params[2].type = ACPI_TYPE_INTEGER;
- params[2].integer.value = func;
- params[3].type = ACPI_TYPE_INTEGER;
- params[3].integer.value = arg;
-
- err = acpi_evaluate_object(handle, "_DSM", &input, &output);
- if (err) {
- printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
- return err;
- }
-
- obj = (union acpi_object *)output.pointer;
-
- if (obj->type == ACPI_TYPE_INTEGER)
+ union acpi_object argv4 = {
+ .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = arg,
+ };
+
+ obj = acpi_evaluate_dsm_typed(handle, nouveau_dsm_muid, 0x00000102,
+ func, &argv4, ACPI_TYPE_INTEGER);
+ if (!obj) {
+ acpi_handle_info(handle, "failed to evaluate _DSM\n");
+ return AE_ERROR;
+ } else {
if (obj->integer.value == 0x80000002)
- return -ENODEV;
-
- if (obj->type == ACPI_TYPE_BUFFER) {
- if (obj->buffer.length == 4 && result) {
- *result = 0;
- *result |= obj->buffer.pointer[0];
- *result |= (obj->buffer.pointer[1] << 8);
- *result |= (obj->buffer.pointer[2] << 16);
- *result |= (obj->buffer.pointer[3] << 24);
- }
+ ret = -ENODEV;
+ ACPI_FREE(obj);
}
- kfree(output.pointer);
- return 0;
-}
-
-/* Returns 1 if a DSM function is usable and 0 otherwise */
-static int nouveau_test_dsm(acpi_handle test_handle,
- int (*dsm_func)(acpi_handle, int, int, uint32_t *),
- int sfnc)
-{
- u32 result = 0;
-
- /* Function 0 returns a Buffer containing available functions. The args
- * parameter is ignored for function 0, so just put 0 in it */
- if (dsm_func(test_handle, 0, 0, &result))
- return 0;
-
- /* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. If
- * the n-th bit is enabled, function n is supported */
- return result & 1 && result & (1 << sfnc);
+ return ret;
}
static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
{
mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
mxm_wmi_call_mxds(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
- return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL);
+ return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id);
}
static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state)
@@ -204,7 +143,7 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
arg = NOUVEAU_DSM_POWER_SPEED;
else
arg = NOUVEAU_DSM_POWER_STAMINA;
- nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL);
+ nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg);
return 0;
}
@@ -260,14 +199,16 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
if (!dhandle)
return false;
- if (!acpi_has_method(dhandle, "_DSM"))
+ if (!acpi_has_method(dhandle, "_DSM")) {
+ nouveau_dsm_priv.other_handle = dhandle;
return false;
-
- if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
+ }
+ if (acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102,
+ 1 << NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
- if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm,
- NOUVEAU_DSM_OPTIMUS_CAPS))
+ if (acpi_check_dsm(dhandle, nouveau_op_dsm_muid, 0x00000100,
+ 1 << NOUVEAU_DSM_OPTIMUS_CAPS))
retval |= NOUVEAU_DSM_HAS_OPT;
if (retval & NOUVEAU_DSM_HAS_OPT) {
@@ -338,6 +279,16 @@ static bool nouveau_dsm_detect(void)
printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
nouveau_dsm_priv.dsm_detected = true;
+ /*
+ * On some systems hotplug events are generated for the device
+ * being switched off when _DSM is executed. They cause ACPI
+ * hotplug to trigger and attempt to remove the device from
+ * the system, which causes it to break down. Prevent that from
+ * happening by setting the no_hotplug flag for the involved
+ * ACPI device objects.
+ */
+ acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
+ acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
ret = true;
}
@@ -376,6 +327,11 @@ void nouveau_unregister_dsm_handler(void)
if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected)
vga_switcheroo_unregister_handler();
}
+#else
+void nouveau_register_dsm_handler(void) {}
+void nouveau_unregister_dsm_handler(void) {}
+void nouveau_switcheroo_optimus_dsm(void) {}
+#endif
/* retrieve the ROM in 4k blocks */
static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c0fde6b9393c..488686d490c0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -560,28 +560,6 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
}
-/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
- * TTM_PL_{VRAM,TT} directly.
- */
-
-static int
-nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
- struct nouveau_bo *nvbo, bool evict,
- bool no_wait_gpu, struct ttm_mem_reg *new_mem)
-{
- struct nouveau_fence *fence = NULL;
- int ret;
-
- ret = nouveau_fence_new(chan, false, &fence);
- if (ret)
- return ret;
-
- ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
- no_wait_gpu, new_mem);
- nouveau_fence_unref(&fence);
- return ret;
-}
-
static int
nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
@@ -798,25 +776,25 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
struct nouveau_mem *node = old_mem->mm_node;
- struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 length = (new_mem->num_pages << PAGE_SHIFT);
u64 src_offset = node->vma[0].offset;
u64 dst_offset = node->vma[1].offset;
+ int src_tiled = !!node->memtype;
+ int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
int ret;
while (length) {
u32 amount, stride, height;
+ ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
+ if (ret)
+ return ret;
+
amount = min(length, (u64)(4 * 1024 * 1024));
stride = 16 * 4;
height = amount / stride;
- if (old_mem->mem_type == TTM_PL_VRAM &&
- nouveau_bo_tile_layout(nvbo)) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- return ret;
-
+ if (src_tiled) {
BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
@@ -826,19 +804,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
OUT_RING (chan, 0);
OUT_RING (chan, 0);
} else {
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
-
BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
OUT_RING (chan, 1);
}
- if (new_mem->mem_type == TTM_PL_VRAM &&
- nouveau_bo_tile_layout(nvbo)) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- return ret;
-
+ if (dst_tiled) {
BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
@@ -848,18 +817,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
OUT_RING (chan, 0);
OUT_RING (chan, 0);
} else {
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
-
BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
OUT_RING (chan, 1);
}
- ret = RING_SPACE(chan, 14);
- if (ret)
- return ret;
-
BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
OUT_RING (chan, upper_32_bits(src_offset));
OUT_RING (chan, upper_32_bits(dst_offset));
@@ -953,23 +914,28 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
}
static int
-nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
- struct ttm_mem_reg *mem, struct nouveau_vma *vma)
+nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
{
- struct nouveau_mem *node = mem->mm_node;
+ struct nouveau_mem *old_node = bo->mem.mm_node;
+ struct nouveau_mem *new_node = mem->mm_node;
+ u64 size = (u64)mem->num_pages << PAGE_SHIFT;
int ret;
- ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
- PAGE_SHIFT, node->page_shift,
- NV_MEM_ACCESS_RW, vma);
+ ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift,
+ NV_MEM_ACCESS_RW, &old_node->vma[0]);
if (ret)
return ret;
- if (mem->mem_type == TTM_PL_VRAM)
- nouveau_vm_map(vma, node);
- else
- nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
+ ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift,
+ NV_MEM_ACCESS_RW, &old_node->vma[1]);
+ if (ret) {
+ nouveau_vm_put(&old_node->vma[0]);
+ return ret;
+ }
+ nouveau_vm_map(&old_node->vma[0], old_node);
+ nouveau_vm_map(&old_node->vma[1], new_node);
return 0;
}
@@ -979,35 +945,34 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = drm->ttm.chan;
- struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct nouveau_fence *fence;
int ret;
- mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
-
/* create temporary vmas for the transfer and attach them to the
* old nouveau_mem node, these will get cleaned up after ttm has
* destroyed the ttm_mem_reg
*/
if (nv_device(drm->device)->card_type >= NV_50) {
- struct nouveau_mem *node = old_mem->mm_node;
-
- ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
- if (ret)
- goto out;
-
- ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
+ ret = nouveau_bo_move_prep(drm, bo, new_mem);
if (ret)
- goto out;
+ return ret;
}
- ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
+ mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
+ ret = nouveau_fence_sync(bo->sync_obj, chan);
if (ret == 0) {
- ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
- no_wait_gpu, new_mem);
+ ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
+ if (ret == 0) {
+ ret = nouveau_fence_new(chan, false, &fence);
+ if (ret == 0) {
+ ret = ttm_bo_move_accel_cleanup(bo, fence,
+ evict,
+ no_wait_gpu,
+ new_mem);
+ nouveau_fence_unref(&fence);
+ }
+ }
}
-
-out:
mutex_unlock(&chan->cli->mutex);
return ret;
}
@@ -1147,19 +1112,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
return;
list_for_each_entry(vma, &nvbo->vma_list, head) {
- if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
+ if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
+ (new_mem->mem_type == TTM_PL_VRAM ||
+ nvbo->page_shift != vma->vm->vmm->lpg_shift)) {
nouveau_vm_map(vma, new_mem->mm_node);
- } else
- if (new_mem && new_mem->mem_type == TTM_PL_TT &&
- nvbo->page_shift == vma->vm->vmm->spg_shift) {
- if (((struct nouveau_mem *)new_mem->mm_node)->sg)
- nouveau_vm_map_sg_table(vma, 0, new_mem->
- num_pages << PAGE_SHIFT,
- new_mem->mm_node);
- else
- nouveau_vm_map_sg(vma, 0, new_mem->
- num_pages << PAGE_SHIFT,
- new_mem->mm_node);
} else {
nouveau_vm_unmap(vma);
}
@@ -1224,28 +1180,27 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
goto out;
}
- /* CPU copy if we have no accelerated method available */
- if (!drm->ttm.move) {
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
- goto out;
- }
-
/* Hardware assisted copy. */
- if (new_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flipd(bo, evict, intr,
- no_wait_gpu, new_mem);
- else if (old_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flips(bo, evict, intr,
- no_wait_gpu, new_mem);
- else
- ret = nouveau_bo_move_m2mf(bo, evict, intr,
- no_wait_gpu, new_mem);
-
- if (!ret)
- goto out;
+ if (drm->ttm.move) {
+ if (new_mem->mem_type == TTM_PL_SYSTEM)
+ ret = nouveau_bo_move_flipd(bo, evict, intr,
+ no_wait_gpu, new_mem);
+ else if (old_mem->mem_type == TTM_PL_SYSTEM)
+ ret = nouveau_bo_move_flips(bo, evict, intr,
+ no_wait_gpu, new_mem);
+ else
+ ret = nouveau_bo_move_m2mf(bo, evict, intr,
+ no_wait_gpu, new_mem);
+ if (!ret)
+ goto out;
+ }
/* Fallback to software copy. */
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ spin_lock(&bo->bdev->fence_lock);
+ ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
+ spin_unlock(&bo->bdev->fence_lock);
+ if (ret == 0)
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
out:
if (nv_device(drm->device)->card_type < NV_50) {
@@ -1271,6 +1226,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct nouveau_drm *drm = nouveau_bdev(bdev);
+ struct nouveau_mem *node = mem->mm_node;
struct drm_device *dev = drm->dev;
int ret;
@@ -1293,14 +1249,16 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
mem->bus.is_iomem = !dev->agp->cant_use_aperture;
}
#endif
- break;
+ if (!node->memtype)
+ /* untiled */
+ break;
+ /* fallthrough, tiled memory */
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = pci_resource_start(dev->pdev, 1);
mem->bus.is_iomem = true;
if (nv_device(drm->device)->card_type >= NV_50) {
struct nouveau_bar *bar = nouveau_bar(drm->device);
- struct nouveau_mem *node = mem->mm_node;
ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
&node->bar_vma);
@@ -1336,6 +1294,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_device *device = nv_device(drm->device);
u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
+ int ret;
/* as long as the bo isn't in vram, and isn't tiled, we've got
* nothing to do here.
@@ -1344,10 +1303,20 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
if (nv_device(drm->device)->card_type < NV_50 ||
!nouveau_bo_tile_layout(nvbo))
return 0;
+
+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+ nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
+
+ ret = nouveau_bo_validate(nvbo, false, false);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
/* make sure bo is in mappable vram */
- if (bo->mem.start + bo->mem.num_pages < mappable)
+ if (nv_device(drm->device)->card_type >= NV_50 ||
+ bo->mem.start + bo->mem.num_pages < mappable)
return 0;
@@ -1535,7 +1504,6 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
struct nouveau_vma *vma)
{
const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
- struct nouveau_mem *node = nvbo->bo.mem.mm_node;
int ret;
ret = nouveau_vm_get(vm, size, nvbo->page_shift,
@@ -1543,15 +1511,10 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
if (ret)
return ret;
- if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+ if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
+ (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
+ nvbo->page_shift != vma->vm->vmm->lpg_shift))
nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
- else if (nvbo->bo.mem.mem_type == TTM_PL_TT &&
- nvbo->page_shift == vma->vm->vmm->spg_shift) {
- if (node->sg)
- nouveau_vm_map_sg_table(vma, 0, size, node);
- else
- nouveau_vm_map_sg(vma, 0, size, node);
- }
list_add_tail(&vma->head, &nvbo->vma_list);
vma->refcount = 1;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 29c3efdfc7dd..24011596af43 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -68,20 +68,100 @@ nouveau_display_vblank_disable(struct drm_device *dev, int head)
nouveau_event_put(disp->vblank[head]);
}
+static inline int
+calc(int blanks, int blanke, int total, int line)
+{
+ if (blanke >= blanks) {
+ if (line >= blanks)
+ line -= total;
+ } else {
+ if (line >= blanks)
+ line -= total;
+ line -= blanke + 1;
+ }
+ return line;
+}
+
+int
+nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime)
+{
+ const u32 mthd = NV04_DISP_SCANOUTPOS + nouveau_crtc(crtc)->index;
+ struct nouveau_display *disp = nouveau_display(crtc->dev);
+ struct nv04_display_scanoutpos args;
+ int ret, retry = 1;
+
+ do {
+ ret = nv_exec(disp->core, mthd, &args, sizeof(args));
+ if (ret != 0)
+ return 0;
+
+ if (args.vline) {
+ ret |= DRM_SCANOUTPOS_ACCURATE;
+ ret |= DRM_SCANOUTPOS_VALID;
+ break;
+ }
+
+ if (retry) ndelay(crtc->linedur_ns);
+ } while (retry--);
+
+ *hpos = calc(args.hblanks, args.hblanke, args.htotal, args.hline);
+ *vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline);
+ if (stime) *stime = ns_to_ktime(args.time[0]);
+ if (etime) *etime = ns_to_ktime(args.time[1]);
+
+ if (*vpos < 0)
+ ret |= DRM_SCANOUTPOS_INVBL;
+ return ret;
+}
+
+int
+nouveau_display_scanoutpos(struct drm_device *dev, int head, unsigned int flags,
+ int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (nouveau_crtc(crtc)->index == head) {
+ return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
+ stime, etime);
+ }
+ }
+
+ return 0;
+}
+
+int
+nouveau_display_vblstamp(struct drm_device *dev, int head, int *max_error,
+ struct timeval *time, unsigned flags)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (nouveau_crtc(crtc)->index == head) {
+ return drm_calc_vbltimestamp_from_scanoutpos(dev,
+ head, max_error, time, flags, crtc,
+ &crtc->hwmode);
+ }
+ }
+
+ return -EINVAL;
+}
+
static void
nouveau_display_vblank_fini(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
int i;
+ drm_vblank_cleanup(dev);
+
if (disp->vblank) {
for (i = 0; i < dev->mode_config.num_crtc; i++)
nouveau_event_ref(NULL, &disp->vblank[i]);
kfree(disp->vblank);
disp->vblank = NULL;
}
-
- drm_vblank_cleanup(dev);
}
static int
@@ -407,10 +487,31 @@ nouveau_display_create(struct drm_device *dev)
drm_kms_helper_poll_disable(dev);
if (drm->vbios.dcb.entries) {
- if (nv_device(drm->device)->card_type < NV_50)
- ret = nv04_display_create(dev);
- else
- ret = nv50_display_create(dev);
+ static const u16 oclass[] = {
+ NVF0_DISP_CLASS,
+ NVE0_DISP_CLASS,
+ NVD0_DISP_CLASS,
+ NVA3_DISP_CLASS,
+ NV94_DISP_CLASS,
+ NVA0_DISP_CLASS,
+ NV84_DISP_CLASS,
+ NV50_DISP_CLASS,
+ NV04_DISP_CLASS,
+ };
+ int i;
+
+ for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
+ ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
+ NVDRM_DISPLAY, oclass[i],
+ NULL, 0, &disp->core);
+ }
+
+ if (ret == 0) {
+ if (nv_mclass(disp->core) < NV50_DISP_CLASS)
+ ret = nv04_display_create(dev);
+ else
+ ret = nv50_display_create(dev);
+ }
} else {
ret = 0;
}
@@ -439,6 +540,7 @@ void
nouveau_display_destroy(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
nouveau_backlight_exit(dev);
nouveau_display_vblank_fini(dev);
@@ -449,6 +551,8 @@ nouveau_display_destroy(struct drm_device *dev)
if (disp->dtor)
disp->dtor(dev);
+ nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_DISPLAY);
+
nouveau_drm(dev)->display = NULL;
kfree(disp);
}
@@ -603,6 +707,14 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (!s)
return -ENOMEM;
+ if (new_bo != old_bo) {
+ ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ goto fail_free;
+ }
+
+ mutex_lock(&chan->cli->mutex);
+
/* synchronise rendering channel with the kernel's channel */
spin_lock(&new_bo->bo.bdev->fence_lock);
fence = nouveau_fence_ref(new_bo->bo.sync_obj);
@@ -610,15 +722,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
ret = nouveau_fence_sync(fence, chan);
nouveau_fence_unref(&fence);
if (ret)
- return ret;
-
- if (new_bo != old_bo) {
- ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
- if (ret)
- goto fail_free;
- }
+ goto fail_unpin;
- mutex_lock(&chan->cli->mutex);
ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
if (ret)
goto fail_unpin;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 8bc8bab90e8d..a71cf77e55b2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -36,6 +36,7 @@ struct nouveau_display {
int (*init)(struct drm_device *);
void (*fini)(struct drm_device *);
+ struct nouveau_object *core;
struct nouveau_eventh **vblank;
struct drm_property *dithering_mode;
@@ -63,6 +64,10 @@ void nouveau_display_repin(struct drm_device *dev);
void nouveau_display_resume(struct drm_device *dev);
int nouveau_display_vblank_enable(struct drm_device *, int);
void nouveau_display_vblank_disable(struct drm_device *, int);
+int nouveau_display_scanoutpos(struct drm_device *, int, unsigned int,
+ int *, int *, ktime_t *, ktime_t *);
+int nouveau_display_vblstamp(struct drm_device *, int, int *,
+ struct timeval *, unsigned);
int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 40f91e1e5842..c177272152e2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -100,7 +100,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
- DRM_MEMORYBARRIER();
+ mb();
/* Flush writes. */
nouveau_bo_rd32(pb, 0);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 984004d66a6d..dc0e0c5cadb4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -155,7 +155,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
}
#define WRITE_PUT(val) do { \
- DRM_MEMORYBARRIER(); \
+ mb(); \
nouveau_bo_rd32(chan->push.buffer, 0); \
nv_wo32(chan->object, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
} while (0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 7a3759f1c41a..78c8e7146d56 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -503,19 +503,21 @@ nouveau_do_suspend(struct drm_device *dev)
if (drm->cechan) {
ret = nouveau_channel_idle(drm->cechan);
if (ret)
- return ret;
+ goto fail_display;
}
if (drm->channel) {
ret = nouveau_channel_idle(drm->channel);
if (ret)
- return ret;
+ goto fail_display;
}
NV_INFO(drm, "suspending client object trees...\n");
if (drm->fence && nouveau_fence(drm)->suspend) {
- if (!nouveau_fence(drm)->suspend(drm))
- return -ENOMEM;
+ if (!nouveau_fence(drm)->suspend(drm)) {
+ ret = -ENOMEM;
+ goto fail_display;
+ }
}
list_for_each_entry(cli, &drm->clients, head) {
@@ -537,6 +539,10 @@ fail_client:
nouveau_client_init(&cli->base);
}
+ if (drm->fence && nouveau_fence(drm)->resume)
+ nouveau_fence(drm)->resume(drm);
+
+fail_display:
if (dev->mode_config.num_crtc) {
NV_INFO(drm, "resuming display...\n");
nouveau_display_resume(dev);
@@ -798,6 +804,8 @@ driver = {
.get_vblank_counter = drm_vblank_count,
.enable_vblank = nouveau_display_vblank_enable,
.disable_vblank = nouveau_display_vblank_disable,
+ .get_scanout_position = nouveau_display_scanoutpos,
+ .get_vblank_timestamp = nouveau_display_vblstamp,
.ioctls = nouveau_ioctls,
.num_ioctls = ARRAY_SIZE(nouveau_ioctls),
@@ -858,6 +866,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
if (nouveau_runtime_pm == 0)
return -EINVAL;
+ /* are we optimus enabled? */
+ if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
+ DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+ return -EINVAL;
+ }
+
nv_debug_level(SILENT);
drm_kms_helper_poll_disable(drm_dev);
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 4b0fb6c66be9..23ca7a517246 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -54,6 +54,7 @@ enum nouveau_drm_handle {
NVDRM_CLIENT = 0xffffffff,
NVDRM_DEVICE = 0xdddddddd,
NVDRM_CONTROL = 0xdddddddc,
+ NVDRM_DISPLAY = 0xd1500000,
NVDRM_PUSH = 0xbbbb0000, /* |= client chid */
NVDRM_CHAN = 0xcccc0000, /* |= client chid */
NVDRM_NVSW = 0x55550000,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 40cf52e6d6d2..90074d620e31 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -143,7 +143,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
int ret;
fence->channel = chan;
- fence->timeout = jiffies + (15 * DRM_HZ);
+ fence->timeout = jiffies + (15 * HZ);
fence->sequence = ++fctx->sequence;
ret = fctx->emit(fence);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 78a27f8ad7d9..27c3fd89e8ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -463,12 +463,6 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
- ret = validate_sync(chan, nvbo);
- if (unlikely(ret)) {
- NV_ERROR(cli, "fail pre-validate sync\n");
- return ret;
- }
-
ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
b->write_domains,
b->valid_domains);
@@ -506,7 +500,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
b->presumed.valid = 0;
relocs++;
- if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
+ if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
&b->presumed, sizeof(b->presumed)))
return -EFAULT;
}
@@ -593,7 +587,7 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
if (!mem)
return ERR_PTR(-ENOMEM);
- if (DRM_COPY_FROM_USER(mem, userptr, size)) {
+ if (copy_from_user(mem, userptr, size)) {
u_free(mem);
return ERR_PTR(-EFAULT);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 0843ebc910d4..a4d22e5eb176 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -31,16 +31,17 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_mem *node = mem->mm_node;
- u64 size = mem->num_pages << 12;
if (ttm->sg) {
- node->sg = ttm->sg;
- nouveau_vm_map_sg_table(&node->vma[0], 0, size, node);
+ node->sg = ttm->sg;
+ node->pages = NULL;
} else {
+ node->sg = NULL;
node->pages = nvbe->ttm.dma_address;
- nouveau_vm_map_sg(&node->vma[0], 0, size, node);
}
+ node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
+ nouveau_vm_map(&node->vma[0], node);
nvbe->node = node;
return 0;
}
@@ -67,9 +68,13 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
/* noop: bound in move_notify() */
if (ttm->sg) {
- node->sg = ttm->sg;
- } else
+ node->sg = ttm->sg;
+ node->pages = NULL;
+ } else {
+ node->sg = NULL;
node->pages = nvbe->ttm.dma_address;
+ }
+ node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 19e3757291fb..d45d50da978f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -171,6 +171,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
+
node->page_shift = 12;
switch (nv_device(drm->device)->card_type) {
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 4e384a2f99c3..2dccafc6e9db 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1035,6 +1035,7 @@ static bool
nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
return true;
}
@@ -2199,16 +2200,6 @@ nv50_display_destroy(struct drm_device *dev)
int
nv50_display_create(struct drm_device *dev)
{
- static const u16 oclass[] = {
- NVF0_DISP_CLASS,
- NVE0_DISP_CLASS,
- NVD0_DISP_CLASS,
- NVA3_DISP_CLASS,
- NV94_DISP_CLASS,
- NVA0_DISP_CLASS,
- NV84_DISP_CLASS,
- NV50_DISP_CLASS,
- };
struct nouveau_device *device = nouveau_dev(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct dcb_table *dcb = &drm->vbios.dcb;
@@ -2225,6 +2216,7 @@ nv50_display_create(struct drm_device *dev)
nouveau_display(dev)->dtor = nv50_display_destroy;
nouveau_display(dev)->init = nv50_display_init;
nouveau_display(dev)->fini = nv50_display_fini;
+ disp->core = nouveau_display(dev)->core;
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -2243,17 +2235,6 @@ nv50_display_create(struct drm_device *dev)
if (ret)
goto out;
- /* attempt to allocate a supported evo display class */
- ret = -ENODEV;
- for (i = 0; ret && i < ARRAY_SIZE(oclass); i++) {
- ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
- 0xd1500000, oclass[i], NULL, 0,
- &disp->core);
- }
-
- if (ret)
- goto out;
-
/* allocate master evo channel */
ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0,
&(struct nv50_display_mast_class) {
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 0fd2eb139f6e..4313bb0a49a6 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -411,7 +411,7 @@ static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
struct drm_crtc *crtc = &omap_crtc->base;
DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus);
/* avoid getting in a flood, unregister the irq until next vblank */
- omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+ __omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
}
static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
@@ -421,13 +421,13 @@ static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
struct drm_crtc *crtc = &omap_crtc->base;
if (!omap_crtc->error_irq.registered)
- omap_irq_register(crtc->dev, &omap_crtc->error_irq);
+ __omap_irq_register(crtc->dev, &omap_crtc->error_irq);
if (!dispc_mgr_go_busy(omap_crtc->channel)) {
struct omap_drm_private *priv =
crtc->dev->dev_private;
DBG("%s: apply done", omap_crtc->name);
- omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
+ __omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
queue_work(priv->wq, &omap_crtc->apply_work);
}
}
@@ -623,6 +623,11 @@ void omap_crtc_pre_init(void)
dss_install_mgr_ops(&mgr_ops);
}
+void omap_crtc_pre_uninit(void)
+{
+ dss_uninstall_mgr_ops();
+}
+
/* initialize crtc */
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct drm_plane *plane, enum omap_channel channel, int id)
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index c27f59da7f29..d4c04d69fc4d 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -48,7 +48,7 @@ static int mm_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- return drm_mm_dump_table(m, dev->mm_private);
+ return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
}
static int fb_show(struct seq_file *m, void *arg)
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 701c4c10e08b..f926b4caf449 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -969,12 +969,21 @@ static const struct dev_pm_ops omap_dmm_pm_ops = {
};
#endif
+#if defined(CONFIG_OF)
+static const struct of_device_id dmm_of_match[] = {
+ { .compatible = "ti,omap4-dmm", },
+ { .compatible = "ti,omap5-dmm", },
+ {},
+};
+#endif
+
struct platform_driver omap_dmm_driver = {
.probe = omap_dmm_probe,
.remove = omap_dmm_remove,
.driver = {
.owner = THIS_MODULE,
.name = DMM_DRIVER_NAME,
+ .of_match_table = of_match_ptr(dmm_of_match),
#ifdef CONFIG_PM
.pm = &omap_dmm_pm_ops,
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index e7fa3cd96743..bf39fcc49e0f 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -86,6 +86,47 @@ static bool channel_used(struct drm_device *dev, enum omap_channel channel)
return false;
}
+static void omap_disconnect_dssdevs(void)
+{
+ struct omap_dss_device *dssdev = NULL;
+
+ for_each_dss_dev(dssdev)
+ dssdev->driver->disconnect(dssdev);
+}
+
+static int omap_connect_dssdevs(void)
+{
+ int r;
+ struct omap_dss_device *dssdev = NULL;
+ bool no_displays = true;
+
+ for_each_dss_dev(dssdev) {
+ r = dssdev->driver->connect(dssdev);
+ if (r == -EPROBE_DEFER) {
+ omap_dss_put_device(dssdev);
+ goto cleanup;
+ } else if (r) {
+ dev_warn(dssdev->dev, "could not connect display: %s\n",
+ dssdev->name);
+ } else {
+ no_displays = false;
+ }
+ }
+
+ if (no_displays)
+ return -EPROBE_DEFER;
+
+ return 0;
+
+cleanup:
+ /*
+ * if we are deferring probe, we disconnect the devices we previously
+ * connected
+ */
+ omap_disconnect_dssdevs();
+
+ return r;
+}
static int omap_modeset_init(struct drm_device *dev)
{
@@ -95,9 +136,6 @@ static int omap_modeset_init(struct drm_device *dev)
int num_mgrs = dss_feat_get_num_mgrs();
int num_crtcs;
int i, id = 0;
- int r;
-
- omap_crtc_pre_init();
drm_mode_config_init(dev);
@@ -119,26 +157,8 @@ static int omap_modeset_init(struct drm_device *dev)
enum omap_channel channel;
struct omap_overlay_manager *mgr;
- if (!dssdev->driver) {
- dev_warn(dev->dev, "%s has no driver.. skipping it\n",
- dssdev->name);
- continue;
- }
-
- if (!(dssdev->driver->get_timings ||
- dssdev->driver->read_edid)) {
- dev_warn(dev->dev, "%s driver does not support "
- "get_timings or read_edid.. skipping it!\n",
- dssdev->name);
- continue;
- }
-
- r = dssdev->driver->connect(dssdev);
- if (r) {
- dev_err(dev->dev, "could not connect display: %s\n",
- dssdev->name);
+ if (!omapdss_device_is_connected(dssdev))
continue;
- }
encoder = omap_encoder_init(dev, dssdev);
@@ -497,16 +517,16 @@ static int dev_unload(struct drm_device *dev)
DBG("unload: dev=%p", dev);
drm_kms_helper_poll_fini(dev);
- drm_vblank_cleanup(dev);
- omap_drm_irq_uninstall(dev);
omap_fbdev_free(dev);
omap_modeset_free(dev);
omap_gem_deinit(dev);
- flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
+ drm_vblank_cleanup(dev);
+ omap_drm_irq_uninstall(dev);
+
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -655,9 +675,19 @@ static void pdev_shutdown(struct platform_device *device)
static int pdev_probe(struct platform_device *device)
{
+ int r;
+
if (omapdss_is_initialized() == false)
return -EPROBE_DEFER;
+ omap_crtc_pre_init();
+
+ r = omap_connect_dssdevs();
+ if (r) {
+ omap_crtc_pre_uninit();
+ return r;
+ }
+
DBG("%s", device->name);
return drm_platform_init(&omap_drm_driver, device);
}
@@ -665,9 +695,11 @@ static int pdev_probe(struct platform_device *device)
static int pdev_remove(struct platform_device *device)
{
DBG("");
- drm_platform_exit(&omap_drm_driver, device);
- platform_driver_unregister(&omap_dmm_driver);
+ omap_disconnect_dssdevs();
+ omap_crtc_pre_uninit();
+
+ drm_put_dev(platform_get_drvdata(device));
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 07847693cf49..428b2981fd68 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -141,10 +141,12 @@ int omap_gem_resume(struct device *dev);
int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id);
void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id);
-irqreturn_t omap_irq_handler(DRM_IRQ_ARGS);
+irqreturn_t omap_irq_handler(int irq, void *arg);
void omap_irq_preinstall(struct drm_device *dev);
int omap_irq_postinstall(struct drm_device *dev);
void omap_irq_uninstall(struct drm_device *dev);
+void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
+void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
int omap_drm_irq_uninstall(struct drm_device *dev);
@@ -158,6 +160,7 @@ enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
int omap_crtc_apply(struct drm_crtc *crtc,
struct omap_drm_apply *apply);
void omap_crtc_pre_init(void);
+void omap_crtc_pre_uninit(void);
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct drm_plane *plane, enum omap_channel channel, int id);
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 6a12e899235b..5290a88c681d 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -51,6 +51,9 @@ struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder)
static void omap_encoder_destroy(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+
+ omap_encoder_set_enabled(encoder, false);
+
drm_encoder_cleanup(encoder);
kfree(omap_encoder);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index f2b8f0668c0c..f466c4aaee94 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -123,12 +123,16 @@ static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
{
int i;
+ drm_modeset_lock_all(fb->dev);
+
for (i = 0; i < num_clips; i++) {
omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1,
clips[i].x2 - clips[i].x1,
clips[i].y2 - clips[i].y1);
}
+ drm_modeset_unlock_all(fb->dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index cb858600185f..f035d2bceae7 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -45,12 +45,11 @@ static void omap_irq_update(struct drm_device *dev)
dispc_read_irqenable(); /* flush posted write */
}
-void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
+void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
{
struct omap_drm_private *priv = dev->dev_private;
unsigned long flags;
- dispc_runtime_get();
spin_lock_irqsave(&list_lock, flags);
if (!WARN_ON(irq->registered)) {
@@ -60,14 +59,21 @@ void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
}
spin_unlock_irqrestore(&list_lock, flags);
+}
+
+void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+ dispc_runtime_get();
+
+ __omap_irq_register(dev, irq);
+
dispc_runtime_put();
}
-void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
+void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
{
unsigned long flags;
- dispc_runtime_get();
spin_lock_irqsave(&list_lock, flags);
if (!WARN_ON(!irq->registered)) {
@@ -77,6 +83,14 @@ void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
}
spin_unlock_irqrestore(&list_lock, flags);
+}
+
+void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+ dispc_runtime_get();
+
+ __omap_irq_unregister(dev, irq);
+
dispc_runtime_put();
}
@@ -173,7 +187,7 @@ void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id)
dispc_runtime_put();
}
-irqreturn_t omap_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t omap_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
struct omap_drm_private *priv = dev->dev_private;
@@ -308,7 +322,7 @@ int omap_drm_irq_uninstall(struct drm_device *dev)
if (dev->num_crtcs) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) {
- DRM_WAKEUP(&dev->vblank[i].queue);
+ wake_up(&dev->vblank[i].queue);
dev->vblank[i].enabled = false;
dev->vblank[i].last =
dev->driver->get_vblank_counter(dev, i);
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
new file mode 100644
index 000000000000..3e0f13d1bc84
--- /dev/null
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -0,0 +1,19 @@
+config DRM_PANEL
+ bool
+ depends on DRM
+ help
+ Panel registration and lookup framework.
+
+menu "Display Panels"
+ depends on DRM_PANEL
+
+config DRM_PANEL_SIMPLE
+ tristate "support for simple panels"
+ depends on OF
+ help
+ DRM panel driver for dumb panels that need at most a regulator and
+ a GPIO to be powered up. Optionally a backlight can be attached so
+ that it can be automatically turned off when the panel goes into a
+ low power state.
+
+endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
new file mode 100644
index 000000000000..af9dfa235b94
--- /dev/null
+++ b/drivers/gpu/drm/panel/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
new file mode 100644
index 000000000000..59d52ca2c67f
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright (C) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+struct panel_desc {
+ const struct drm_display_mode *modes;
+ unsigned int num_modes;
+
+ struct {
+ unsigned int width;
+ unsigned int height;
+ } size;
+};
+
+/* TODO: convert to gpiod_*() API once it's been merged */
+#define GPIO_ACTIVE_LOW (1 << 0)
+
+struct panel_simple {
+ struct drm_panel base;
+ bool enabled;
+
+ const struct panel_desc *desc;
+
+ struct backlight_device *backlight;
+ struct regulator *supply;
+ struct i2c_adapter *ddc;
+
+ unsigned long enable_gpio_flags;
+ int enable_gpio;
+};
+
+static inline struct panel_simple *to_panel_simple(struct drm_panel *panel)
+{
+ return container_of(panel, struct panel_simple, base);
+}
+
+static int panel_simple_get_fixed_modes(struct panel_simple *panel)
+{
+ struct drm_connector *connector = panel->base.connector;
+ struct drm_device *drm = panel->base.drm;
+ struct drm_display_mode *mode;
+ unsigned int i, num = 0;
+
+ if (!panel->desc)
+ return 0;
+
+ for (i = 0; i < panel->desc->num_modes; i++) {
+ const struct drm_display_mode *m = &panel->desc->modes[i];
+
+ mode = drm_mode_duplicate(drm, m);
+ if (!mode) {
+ dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+ m->hdisplay, m->vdisplay, m->vrefresh);
+ continue;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+ num++;
+ }
+
+ connector->display_info.width_mm = panel->desc->size.width;
+ connector->display_info.height_mm = panel->desc->size.height;
+
+ return num;
+}
+
+static int panel_simple_disable(struct drm_panel *panel)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+
+ if (!p->enabled)
+ return 0;
+
+ if (p->backlight) {
+ p->backlight->props.power = FB_BLANK_POWERDOWN;
+ backlight_update_status(p->backlight);
+ }
+
+ if (gpio_is_valid(p->enable_gpio)) {
+ if (p->enable_gpio_flags & GPIO_ACTIVE_LOW)
+ gpio_set_value(p->enable_gpio, 1);
+ else
+ gpio_set_value(p->enable_gpio, 0);
+ }
+
+ regulator_disable(p->supply);
+ p->enabled = false;
+
+ return 0;
+}
+
+static int panel_simple_enable(struct drm_panel *panel)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+ int err;
+
+ if (p->enabled)
+ return 0;
+
+ err = regulator_enable(p->supply);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to enable supply: %d\n", err);
+ return err;
+ }
+
+ if (gpio_is_valid(p->enable_gpio)) {
+ if (p->enable_gpio_flags & GPIO_ACTIVE_LOW)
+ gpio_set_value(p->enable_gpio, 0);
+ else
+ gpio_set_value(p->enable_gpio, 1);
+ }
+
+ if (p->backlight) {
+ p->backlight->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(p->backlight);
+ }
+
+ p->enabled = true;
+
+ return 0;
+}
+
+static int panel_simple_get_modes(struct drm_panel *panel)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+ int num = 0;
+
+ /* probe EDID if a DDC bus is available */
+ if (p->ddc) {
+ struct edid *edid = drm_get_edid(panel->connector, p->ddc);
+ drm_mode_connector_update_edid_property(panel->connector, edid);
+ if (edid) {
+ num += drm_add_edid_modes(panel->connector, edid);
+ kfree(edid);
+ }
+ }
+
+ /* add hard-coded panel modes */
+ num += panel_simple_get_fixed_modes(p);
+
+ return num;
+}
+
+static const struct drm_panel_funcs panel_simple_funcs = {
+ .disable = panel_simple_disable,
+ .enable = panel_simple_enable,
+ .get_modes = panel_simple_get_modes,
+};
+
+static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
+{
+ struct device_node *backlight, *ddc;
+ struct panel_simple *panel;
+ enum of_gpio_flags flags;
+ int err;
+
+ panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+
+ panel->enabled = false;
+ panel->desc = desc;
+
+ panel->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(panel->supply))
+ return PTR_ERR(panel->supply);
+
+ panel->enable_gpio = of_get_named_gpio_flags(dev->of_node,
+ "enable-gpios", 0,
+ &flags);
+ if (gpio_is_valid(panel->enable_gpio)) {
+ unsigned int value;
+
+ if (flags & OF_GPIO_ACTIVE_LOW)
+ panel->enable_gpio_flags |= GPIO_ACTIVE_LOW;
+
+ err = gpio_request(panel->enable_gpio, "enable");
+ if (err < 0) {
+ dev_err(dev, "failed to request GPIO#%u: %d\n",
+ panel->enable_gpio, err);
+ return err;
+ }
+
+ value = (panel->enable_gpio_flags & GPIO_ACTIVE_LOW) != 0;
+
+ err = gpio_direction_output(panel->enable_gpio, value);
+ if (err < 0) {
+ dev_err(dev, "failed to setup GPIO%u: %d\n",
+ panel->enable_gpio, err);
+ goto free_gpio;
+ }
+ }
+
+ backlight = of_parse_phandle(dev->of_node, "backlight", 0);
+ if (backlight) {
+ panel->backlight = of_find_backlight_by_node(backlight);
+ of_node_put(backlight);
+
+ if (!panel->backlight) {
+ err = -EPROBE_DEFER;
+ goto free_gpio;
+ }
+ }
+
+ ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
+ if (ddc) {
+ panel->ddc = of_find_i2c_adapter_by_node(ddc);
+ of_node_put(ddc);
+
+ if (!panel->ddc) {
+ err = -EPROBE_DEFER;
+ goto free_backlight;
+ }
+ }
+
+ drm_panel_init(&panel->base);
+ panel->base.dev = dev;
+ panel->base.funcs = &panel_simple_funcs;
+
+ err = drm_panel_add(&panel->base);
+ if (err < 0)
+ goto free_ddc;
+
+ dev_set_drvdata(dev, panel);
+
+ return 0;
+
+free_ddc:
+ if (panel->ddc)
+ put_device(&panel->ddc->dev);
+free_backlight:
+ if (panel->backlight)
+ put_device(&panel->backlight->dev);
+free_gpio:
+ if (gpio_is_valid(panel->enable_gpio))
+ gpio_free(panel->enable_gpio);
+
+ return err;
+}
+
+static int panel_simple_remove(struct device *dev)
+{
+ struct panel_simple *panel = dev_get_drvdata(dev);
+
+ drm_panel_detach(&panel->base);
+ drm_panel_remove(&panel->base);
+
+ panel_simple_disable(&panel->base);
+
+ if (panel->ddc)
+ put_device(&panel->ddc->dev);
+
+ if (panel->backlight)
+ put_device(&panel->backlight->dev);
+
+ if (gpio_is_valid(panel->enable_gpio))
+ gpio_free(panel->enable_gpio);
+
+ regulator_disable(panel->supply);
+
+ return 0;
+}
+
+static const struct drm_display_mode auo_b101aw03_mode = {
+ .clock = 51450,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 156,
+ .hsync_end = 1024 + 156 + 8,
+ .htotal = 1024 + 156 + 8 + 156,
+ .vdisplay = 600,
+ .vsync_start = 600 + 16,
+ .vsync_end = 600 + 16 + 6,
+ .vtotal = 600 + 16 + 6 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_b101aw03 = {
+ .modes = &auo_b101aw03_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 223,
+ .height = 125,
+ },
+};
+
+static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
+ .clock = 72070,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 58,
+ .hsync_end = 1366 + 58 + 58,
+ .htotal = 1366 + 58 + 58 + 58,
+ .vdisplay = 768,
+ .vsync_start = 768 + 4,
+ .vsync_end = 768 + 4 + 4,
+ .vtotal = 768 + 4 + 4 + 4,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc chunghwa_claa101wa01a = {
+ .modes = &chunghwa_claa101wa01a_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 220,
+ .height = 120,
+ },
+};
+
+static const struct drm_display_mode chunghwa_claa101wb01_mode = {
+ .clock = 69300,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 48,
+ .hsync_end = 1366 + 48 + 32,
+ .htotal = 1366 + 48 + 32 + 20,
+ .vdisplay = 768,
+ .vsync_start = 768 + 16,
+ .vsync_end = 768 + 16 + 8,
+ .vtotal = 768 + 16 + 8 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc chunghwa_claa101wb01 = {
+ .modes = &chunghwa_claa101wb01_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 223,
+ .height = 125,
+ },
+};
+
+static const struct drm_display_mode samsung_ltn101nt05_mode = {
+ .clock = 54030,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 24,
+ .hsync_end = 1024 + 24 + 136,
+ .htotal = 1024 + 24 + 136 + 160,
+ .vdisplay = 600,
+ .vsync_start = 600 + 3,
+ .vsync_end = 600 + 3 + 6,
+ .vtotal = 600 + 3 + 6 + 61,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc samsung_ltn101nt05 = {
+ .modes = &samsung_ltn101nt05_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 1024,
+ .height = 600,
+ },
+};
+
+static const struct of_device_id platform_of_match[] = {
+ {
+ .compatible = "auo,b101aw03",
+ .data = &auo_b101aw03,
+ }, {
+ .compatible = "chunghwa,claa101wa01a",
+ .data = &chunghwa_claa101wa01a
+ }, {
+ .compatible = "chunghwa,claa101wb01",
+ .data = &chunghwa_claa101wb01
+ }, {
+ .compatible = "samsung,ltn101nt05",
+ .data = &samsung_ltn101nt05,
+ }, {
+ .compatible = "simple-panel",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, platform_of_match);
+
+static int panel_simple_platform_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+
+ id = of_match_node(platform_of_match, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ return panel_simple_probe(&pdev->dev, id->data);
+}
+
+static int panel_simple_platform_remove(struct platform_device *pdev)
+{
+ return panel_simple_remove(&pdev->dev);
+}
+
+static struct platform_driver panel_simple_platform_driver = {
+ .driver = {
+ .name = "panel-simple",
+ .owner = THIS_MODULE,
+ .of_match_table = platform_of_match,
+ },
+ .probe = panel_simple_platform_probe,
+ .remove = panel_simple_platform_remove,
+};
+
+struct panel_desc_dsi {
+ struct panel_desc desc;
+
+ enum mipi_dsi_pixel_format format;
+ unsigned int lanes;
+};
+
+static const struct drm_display_mode panasonic_vvx10f004b00_mode = {
+ .clock = 157200,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 154,
+ .hsync_end = 1920 + 154 + 16,
+ .htotal = 1920 + 154 + 16 + 32,
+ .vdisplay = 1200,
+ .vsync_start = 1200 + 17,
+ .vsync_end = 1200 + 17 + 2,
+ .vtotal = 1200 + 17 + 2 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
+ .desc = {
+ .modes = &panasonic_vvx10f004b00_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ },
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+};
+
+static const struct of_device_id dsi_of_match[] = {
+ {
+ .compatible = "panasonic,vvx10f004b00",
+ .data = &panasonic_vvx10f004b00
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, dsi_of_match);
+
+static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ const struct panel_desc_dsi *desc;
+ const struct of_device_id *id;
+ int err;
+
+ id = of_match_node(dsi_of_match, dsi->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ desc = id->data;
+
+ err = panel_simple_probe(&dsi->dev, &desc->desc);
+ if (err < 0)
+ return err;
+
+ dsi->format = desc->format;
+ dsi->lanes = desc->lanes;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ int err;
+
+ err = mipi_dsi_detach(dsi);
+ if (err < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
+
+ return panel_simple_remove(&dsi->dev);
+}
+
+static struct mipi_dsi_driver panel_simple_dsi_driver = {
+ .driver = {
+ .name = "panel-simple-dsi",
+ .owner = THIS_MODULE,
+ .of_match_table = dsi_of_match,
+ },
+ .probe = panel_simple_dsi_probe,
+ .remove = panel_simple_dsi_remove,
+};
+
+static int __init panel_simple_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&panel_simple_platform_driver);
+ if (err < 0)
+ return err;
+
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) {
+ err = mipi_dsi_driver_register(&panel_simple_dsi_driver);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+module_init(panel_simple_init);
+
+static void __exit panel_simple_exit(void)
+{
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
+ mipi_dsi_driver_unregister(&panel_simple_dsi_driver);
+
+ platform_driver_unregister(&panel_simple_platform_driver);
+}
+module_exit(panel_simple_exit);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("DRM Driver for Simple Panels");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index b5be757062b2..38c2bb72e456 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -8,6 +8,7 @@ config DRM_QXL
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_TTM
+ select CRC32
help
QXL virtual GPU for Spice virtualization desktop integration.
Do not enable this driver unless your distro ships a corresponding
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 5e827c29d194..798bde2e5881 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -24,7 +24,7 @@
*/
-#include "linux/crc32.h"
+#include <linux/crc32.h>
#include "qxl_drv.h"
#include "qxl_object.h"
@@ -399,10 +399,14 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
struct qxl_bo *qobj;
int inc = 1;
+ drm_modeset_lock_all(fb->dev);
+
qobj = gem_to_qxl_bo(qxl_fb->obj);
/* if we aren't primary surface ignore this */
- if (!qobj->is_primary)
+ if (!qobj->is_primary) {
+ drm_modeset_unlock_all(fb->dev);
return 0;
+ }
if (!num_clips) {
num_clips = 1;
@@ -417,6 +421,9 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color,
clips, num_clips, inc);
+
+ drm_modeset_unlock_all(fb->dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 7bda32f68d3b..36ed40ba773f 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -534,7 +534,7 @@ void qxl_debugfs_takedown(struct drm_minor *minor);
/* qxl_irq.c */
int qxl_irq_init(struct qxl_device *qdev);
-irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS);
+irqreturn_t qxl_irq_handler(int irq, void *arg);
/* qxl_fb.c */
int qxl_fb_init(struct qxl_device *qdev);
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 7b95c75e9626..0bb86e6d41b4 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -200,7 +200,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
for (i = 0; i < cmd->relocs_num; ++i) {
struct drm_qxl_reloc reloc;
- if (DRM_COPY_FROM_USER(&reloc,
+ if (copy_from_user(&reloc,
&((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
sizeof(reloc))) {
ret = -EFAULT;
@@ -297,7 +297,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
struct drm_qxl_command *commands =
(struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
- if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
+ if (copy_from_user(&user_cmd, &commands[cmd_num],
sizeof(user_cmd)))
return -EFAULT;
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 21393dc4700a..28f84b4fce32 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -25,7 +25,7 @@
#include "qxl_drv.h"
-irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t qxl_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index e5ca498be920..fd88eb4a3f79 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -115,7 +115,7 @@ static void qxl_gc_work(struct work_struct *work)
qxl_garbage_collect(qdev);
}
-int qxl_device_init(struct qxl_device *qdev,
+static int qxl_device_init(struct qxl_device *qdev,
struct drm_device *ddev,
struct pci_dev *pdev,
unsigned long flags)
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index c451257f08fb..59459fe4e8c5 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -892,10 +892,10 @@ static int r128_cce_get_buffers(struct drm_device *dev,
buf->file_priv = file_priv;
- if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
+ if (copy_to_user(&d->request_indices[i], &buf->idx,
sizeof(buf->idx)))
return -EFAULT;
- if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
+ if (copy_to_user(&d->request_sizes[i], &buf->total,
sizeof(buf->total)))
return -EFAULT;
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 56eb5e3f5439..5bf3f5ff805d 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -154,7 +154,7 @@ extern int r128_do_cleanup_cce(struct drm_device *dev);
extern int r128_enable_vblank(struct drm_device *dev, int crtc);
extern void r128_disable_vblank(struct drm_device *dev, int crtc);
extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
-extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t r128_driver_irq_handler(int irq, void *arg);
extern void r128_driver_irq_preinstall(struct drm_device *dev);
extern int r128_driver_irq_postinstall(struct drm_device *dev);
extern void r128_driver_irq_uninstall(struct drm_device *dev);
@@ -514,7 +514,7 @@ do { \
if (R128_VERBOSE) \
DRM_INFO("COMMIT_RING() tail=0x%06x\n", \
dev_priv->ring.tail); \
- DRM_MEMORYBARRIER(); \
+ mb(); \
R128_WRITE(R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail); \
R128_READ(R128_PM4_BUFFER_DL_WPTR); \
} while (0)
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index a954c548201e..b0d0fd3e4376 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -33,6 +33,7 @@
#include <drm/drmP.h>
#include <drm/r128_drm.h>
+#include "r128_drv.h"
typedef struct drm_r128_init32 {
int func;
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
index 2ea4f09d2691..c2ae496babb7 100644
--- a/drivers/gpu/drm/r128/r128_irq.c
+++ b/drivers/gpu/drm/r128/r128_irq.c
@@ -44,7 +44,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
return atomic_read(&dev_priv->vbl_received);
}
-irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t r128_driver_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 01dd9aef9f0e..e806dacd452f 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -895,31 +895,22 @@ static int r128_cce_dispatch_write_span(struct drm_device *dev,
if (count > 4096 || count <= 0)
return -EMSGSIZE;
- if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
+ if (copy_from_user(&x, depth->x, sizeof(x)))
return -EFAULT;
- if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
+ if (copy_from_user(&y, depth->y, sizeof(y)))
return -EFAULT;
buffer_size = depth->n * sizeof(u32);
- buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (buffer == NULL)
- return -ENOMEM;
- if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
- kfree(buffer);
- return -EFAULT;
- }
+ buffer = memdup_user(depth->buffer, buffer_size);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
mask_size = depth->n * sizeof(u8);
if (depth->mask) {
- mask = kmalloc(mask_size, GFP_KERNEL);
- if (mask == NULL) {
+ mask = memdup_user(depth->mask, mask_size);
+ if (IS_ERR(mask)) {
kfree(buffer);
- return -ENOMEM;
- }
- if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
- kfree(buffer);
- kfree(mask);
- return -EFAULT;
+ return PTR_ERR(mask);
}
for (i = 0; i < count; i++, x++) {
@@ -999,46 +990,33 @@ static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
kfree(x);
return -ENOMEM;
}
- if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
+ if (copy_from_user(x, depth->x, xbuf_size)) {
kfree(x);
kfree(y);
return -EFAULT;
}
- if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
+ if (copy_from_user(y, depth->y, xbuf_size)) {
kfree(x);
kfree(y);
return -EFAULT;
}
buffer_size = depth->n * sizeof(u32);
- buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (buffer == NULL) {
- kfree(x);
- kfree(y);
- return -ENOMEM;
- }
- if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
+ buffer = memdup_user(depth->buffer, buffer_size);
+ if (IS_ERR(buffer)) {
kfree(x);
kfree(y);
- kfree(buffer);
- return -EFAULT;
+ return PTR_ERR(buffer);
}
if (depth->mask) {
mask_size = depth->n * sizeof(u8);
- mask = kmalloc(mask_size, GFP_KERNEL);
- if (mask == NULL) {
- kfree(x);
- kfree(y);
- kfree(buffer);
- return -ENOMEM;
- }
- if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
+ mask = memdup_user(depth->mask, mask_size);
+ if (IS_ERR(mask)) {
kfree(x);
kfree(y);
kfree(buffer);
- kfree(mask);
- return -EFAULT;
+ return PTR_ERR(mask);
}
for (i = 0; i < count; i++) {
@@ -1107,9 +1085,9 @@ static int r128_cce_dispatch_read_span(struct drm_device *dev,
if (count > 4096 || count <= 0)
return -EMSGSIZE;
- if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
+ if (copy_from_user(&x, depth->x, sizeof(x)))
return -EFAULT;
- if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
+ if (copy_from_user(&y, depth->y, sizeof(y)))
return -EFAULT;
BEGIN_RING(7);
@@ -1162,12 +1140,12 @@ static int r128_cce_dispatch_read_pixels(struct drm_device *dev,
kfree(x);
return -ENOMEM;
}
- if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
+ if (copy_from_user(x, depth->x, xbuf_size)) {
kfree(x);
kfree(y);
return -EFAULT;
}
- if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
+ if (copy_from_user(y, depth->y, ybuf_size)) {
kfree(x);
kfree(y);
return -EFAULT;
@@ -1524,7 +1502,7 @@ static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file
DEV_INIT_TEST_WITH_RETURN(dev_priv);
- if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
+ if (copy_from_user(&mask, stipple->mask, 32 * sizeof(u32)))
return -EFAULT;
RING_SPACE_TEST_WITH_RETURN(dev_priv);
@@ -1622,7 +1600,7 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi
return -EINVAL;
}
- if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+ if (copy_to_user(param->value, &value, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 80a20120e625..a9338c85630f 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -209,6 +209,16 @@ static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+static const u32 vga_control_regs[6] =
+{
+ AVIVO_D1VGA_CONTROL,
+ AVIVO_D2VGA_CONTROL,
+ EVERGREEN_D3VGA_CONTROL,
+ EVERGREEN_D4VGA_CONTROL,
+ EVERGREEN_D5VGA_CONTROL,
+ EVERGREEN_D6VGA_CONTROL,
+};
+
static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -216,13 +226,23 @@ static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
struct radeon_device *rdev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
BLANK_CRTC_PS_ALLOCATION args;
+ u32 vga_control = 0;
memset(&args, 0, sizeof(args));
+ if (ASIC_IS_DCE8(rdev)) {
+ vga_control = RREG32(vga_control_regs[radeon_crtc->crtc_id]);
+ WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control | 1);
+ }
+
args.ucCRTC = radeon_crtc->crtc_id;
args.ucBlanking = state;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ if (ASIC_IS_DCE8(rdev)) {
+ WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control);
+ }
}
static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
@@ -423,7 +443,17 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
union atom_enable_ss args;
- if (!enable) {
+ if (enable) {
+ /* Don't mess with SS if percentage is 0 or external ss.
+ * SS is already disabled previously, and disabling it
+ * again can cause display problems if the pll is already
+ * programmed.
+ */
+ if (ss->percentage == 0)
+ return;
+ if (ss->type & ATOM_EXTERNAL_SS_MASK)
+ return;
+ } else {
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i] &&
rdev->mode_info.crtcs[i]->enabled &&
@@ -459,8 +489,6 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v3.ucEnable = enable;
- if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE61(rdev))
- args.v3.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE4(rdev)) {
args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
@@ -480,8 +508,6 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v2.ucEnable = enable;
- if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))
- args.v2.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE3(rdev)) {
args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
@@ -503,8 +529,7 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
args.lvds_ss_2.ucEnable = enable;
} else {
- if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
- (ss->type & ATOM_EXTERNAL_SS_MASK)) {
+ if (enable == ATOM_DISABLE) {
atombios_disable_ss(rdev, pll_id);
return;
}
@@ -938,11 +963,14 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_
radeon_atombios_get_ppll_ss_info(rdev,
&radeon_crtc->ss,
ATOM_DP_SS_ID1);
- } else
+ } else {
radeon_crtc->ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev,
&radeon_crtc->ss,
ATOM_DP_SS_ID1);
+ }
+ /* disable spread spectrum on DCE3 DP */
+ radeon_crtc->ss_enabled = false;
}
break;
case ATOM_ENCODER_MODE_LVDS:
@@ -1039,15 +1067,17 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
/* calculate ss amount and step size */
if (ASIC_IS_DCE4(rdev)) {
u32 step_size;
- u32 amount = (((fb_div * 10) + frac_fb_div) * radeon_crtc->ss.percentage) / 10000;
+ u32 amount = (((fb_div * 10) + frac_fb_div) *
+ (u32)radeon_crtc->ss.percentage) /
+ (100 * (u32)radeon_crtc->ss.percentage_divider);
radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
- step_size = (4 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
+ step_size = (4 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) /
(125 * 25 * pll->reference_freq / 100);
else
- step_size = (2 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
+ step_size = (2 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) /
(125 * 25 * pll->reference_freq / 100);
radeon_crtc->ss.step = step_size;
}
@@ -1143,31 +1173,53 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
}
if (tiling_flags & RADEON_TILING_MACRO) {
- if (rdev->family >= CHIP_BONAIRE)
- tmp = rdev->config.cik.tile_config;
- else if (rdev->family >= CHIP_TAHITI)
- tmp = rdev->config.si.tile_config;
- else if (rdev->family >= CHIP_CAYMAN)
- tmp = rdev->config.cayman.tile_config;
- else
- tmp = rdev->config.evergreen.tile_config;
+ evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
- switch ((tmp & 0xf0) >> 4) {
- case 0: /* 4 banks */
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
- break;
- case 1: /* 8 banks */
- default:
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
- break;
- case 2: /* 16 banks */
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
- break;
+ /* Set NUM_BANKS. */
+ if (rdev->family >= CHIP_BONAIRE) {
+ unsigned tileb, index, num_banks, tile_split_bytes;
+
+ /* Calculate the macrotile mode index. */
+ tile_split_bytes = 64 << tile_split;
+ tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
+ tileb = min(tile_split_bytes, tileb);
+
+ for (index = 0; tileb > 64; index++) {
+ tileb >>= 1;
+ }
+
+ if (index >= 16) {
+ DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
+ target_fb->bits_per_pixel, tile_split);
+ return -EINVAL;
+ }
+
+ num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
+ } else {
+ /* SI and older. */
+ if (rdev->family >= CHIP_TAHITI)
+ tmp = rdev->config.si.tile_config;
+ else if (rdev->family >= CHIP_CAYMAN)
+ tmp = rdev->config.cayman.tile_config;
+ else
+ tmp = rdev->config.evergreen.tile_config;
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0: /* 4 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+ break;
+ case 1: /* 8 banks */
+ default:
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+ break;
+ case 2: /* 16 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+ break;
+ }
}
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
-
- evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
@@ -1180,23 +1232,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
if (rdev->family >= CHIP_BONAIRE) {
- u32 num_pipe_configs = rdev->config.cik.max_tile_pipes;
- u32 num_rb = rdev->config.cik.max_backends_per_se;
- if (num_pipe_configs > 8)
- num_pipe_configs = 8;
- if (num_pipe_configs == 8)
- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16);
- else if (num_pipe_configs == 4) {
- if (num_rb == 4)
- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16);
- else if (num_rb < 4)
- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16);
- } else if (num_pipe_configs == 2)
- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2);
+ /* Read the pipe config from the 2D TILED SCANOUT mode.
+ * It should be the same for the other modes too, but not all
+ * modes set the pipe config field. */
+ u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
+
+ fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
} else if ((rdev->family == CHIP_TAHITI) ||
(rdev->family == CHIP_PITCAIRN))
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
- else if (rdev->family == CHIP_VERDE)
+ else if ((rdev->family == CHIP_VERDE) ||
+ (rdev->family == CHIP_OLAND) ||
+ (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
switch (radeon_crtc->crtc_id) {
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index fb3ae07a1469..4ad7643fce5f 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -157,21 +157,22 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
msg[0] = address;
msg[1] = address >> 8;
- msg[2] = AUX_NATIVE_WRITE << 4;
+ msg[2] = DP_AUX_NATIVE_WRITE << 4;
msg[3] = (msg_bytes << 4) | (send_bytes - 1);
memcpy(&msg[4], send, send_bytes);
- for (retry = 0; retry < 4; retry++) {
+ for (retry = 0; retry < 7; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, NULL, 0, delay, &ack);
if (ret == -EBUSY)
continue;
else if (ret < 0)
return ret;
- if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ ack >>= 4;
+ if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
return send_bytes;
- else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
- udelay(400);
+ else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
+ usleep_range(400, 500);
else
return -EIO;
}
@@ -191,20 +192,21 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
msg[0] = address;
msg[1] = address >> 8;
- msg[2] = AUX_NATIVE_READ << 4;
+ msg[2] = DP_AUX_NATIVE_READ << 4;
msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
- for (retry = 0; retry < 4; retry++) {
+ for (retry = 0; retry < 7; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, recv, recv_bytes, delay, &ack);
if (ret == -EBUSY)
continue;
else if (ret < 0)
return ret;
- if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ ack >>= 4;
+ if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
return ret;
- else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
- udelay(400);
+ else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
+ usleep_range(400, 500);
else if (ret == 0)
return -EPROTO;
else
@@ -246,12 +248,12 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
/* Set up the command byte */
if (mode & MODE_I2C_READ)
- msg[2] = AUX_I2C_READ << 4;
+ msg[2] = DP_AUX_I2C_READ << 4;
else
- msg[2] = AUX_I2C_WRITE << 4;
+ msg[2] = DP_AUX_I2C_WRITE << 4;
if (!(mode & MODE_I2C_STOP))
- msg[2] |= AUX_I2C_MOT << 4;
+ msg[2] |= DP_AUX_I2C_MOT << 4;
msg[0] = address;
msg[1] = address >> 8;
@@ -272,7 +274,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
break;
}
- for (retry = 0; retry < 4; retry++) {
+ for (retry = 0; retry < 7; retry++) {
ret = radeon_process_aux_ch(auxch,
msg, msg_bytes, reply, reply_bytes, 0, &ack);
if (ret == -EBUSY)
@@ -282,35 +284,35 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
return ret;
}
- switch (ack & AUX_NATIVE_REPLY_MASK) {
- case AUX_NATIVE_REPLY_ACK:
+ switch ((ack >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
+ case DP_AUX_NATIVE_REPLY_ACK:
/* I2C-over-AUX Reply field is only valid
* when paired with AUX ACK.
*/
break;
- case AUX_NATIVE_REPLY_NACK:
+ case DP_AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch native nack\n");
return -EREMOTEIO;
- case AUX_NATIVE_REPLY_DEFER:
+ case DP_AUX_NATIVE_REPLY_DEFER:
DRM_DEBUG_KMS("aux_ch native defer\n");
- udelay(400);
+ usleep_range(500, 600);
continue;
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
return -EREMOTEIO;
}
- switch (ack & AUX_I2C_REPLY_MASK) {
- case AUX_I2C_REPLY_ACK:
+ switch ((ack >> 4) & DP_AUX_I2C_REPLY_MASK) {
+ case DP_AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ)
*read_byte = reply[0];
return ret;
- case AUX_I2C_REPLY_NACK:
+ case DP_AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_i2c nack\n");
return -EREMOTEIO;
- case AUX_I2C_REPLY_DEFER:
+ case DP_AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n");
- udelay(400);
+ usleep_range(400, 500);
break;
default:
DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
@@ -671,9 +673,11 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
u8 tmp;
/* power up the sink */
- if (dp_info->dpcd[0] >= 0x11)
+ if (dp_info->dpcd[0] >= 0x11) {
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_SET_POWER, DP_SET_POWER_D0);
+ usleep_range(1000, 2000);
+ }
/* possibly enable downspread on the sink */
if (dp_info->dpcd[3] & 0x1)
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index f685035dbe39..b5162c3b6111 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -27,8 +27,6 @@
#include "radeon.h"
#include "atom.h"
-extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
-
#define TARGET_HW_I2C_CLOCK 50
/* these are a limitation of ProcessI2cChannelTransaction not the hw */
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 9b6950d9b3c0..ea103ccdf4bd 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -29,6 +29,7 @@
#include "cypress_dpm.h"
#include "btc_dpm.h"
#include "atom.h"
+#include <linux/seq_file.h>
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
@@ -49,6 +50,7 @@ struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps);
struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
+extern int ni_mc_load_microcode(struct radeon_device *rdev);
//********* BARTS **************//
static const u32 barts_cgcg_cgls_default[] =
@@ -2510,21 +2512,6 @@ int btc_dpm_enable(struct radeon_device *rdev)
if (eg_pi->ls_clock_gating)
btc_ls_clock_gating_enable(rdev, true);
- if (rdev->irq.installed &&
- r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
- PPSMC_Result result;
-
- ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- rdev->irq.dpm_thermal = true;
- radeon_irq_set(rdev);
- result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
-
- if (result != PPSMC_Result_OK)
- DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
- }
-
rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
btc_init_stutter_mode(rdev);
@@ -2576,7 +2563,11 @@ void btc_dpm_disable(struct radeon_device *rdev)
void btc_dpm_setup_asic(struct radeon_device *rdev)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ int r;
+ r = ni_mc_load_microcode(rdev);
+ if (r)
+ DRM_ERROR("Failed to load MC firmware!\n");
rv770_get_memory_type(rdev);
rv740_read_clock_registers(rdev);
btc_read_arb_registers(rdev);
@@ -2766,6 +2757,37 @@ void btc_dpm_fini(struct radeon_device *rdev)
r600_free_extended_power_table(rdev);
}
+void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
+ struct rv7xx_ps *ps = rv770_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+ CURRENT_PROFILE_INDEX_SHIFT;
+
+ if (current_index > 2) {
+ seq_printf(m, "invalid dpm profile %d\n", current_index);
+ } else {
+ if (current_index == 0)
+ pl = &ps->low;
+ else if (current_index == 1)
+ pl = &ps->medium;
+ else /* current_index == 2 */
+ pl = &ps->high;
+ seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ if (rdev->family >= CHIP_CEDAR) {
+ seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
+ current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+ } else {
+ seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n",
+ current_index, pl->sclk, pl->mclk, pl->vddc);
+ }
+ }
+}
+
u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/btcd.h b/drivers/gpu/drm/radeon/btcd.h
index 29e32de7e025..9c65be2d55a9 100644
--- a/drivers/gpu/drm/radeon/btcd.h
+++ b/drivers/gpu/drm/radeon/btcd.h
@@ -44,6 +44,10 @@
# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
# define AC_DC_SW (1 << 24)
+#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
+# define CURRENT_PROFILE_INDEX_MASK (0xf << 4)
+# define CURRENT_PROFILE_INDEX_SHIFT 4
+
#define CG_BIF_REQ_AND_RSP 0x7f4
#define CG_CLIENT_REQ(x) ((x) << 0)
#define CG_CLIENT_REQ_MASK (0xff << 0)
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 1ed479976358..8d49104ca6c2 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -171,8 +171,7 @@ extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
struct atom_voltage_table *voltage_table);
extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
-extern void cik_update_cg(struct radeon_device *rdev,
- u32 block, bool enable);
+extern int ci_mc_load_microcode(struct radeon_device *rdev);
static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
struct atom_voltage_table_entry *voltage_table,
@@ -4503,8 +4502,8 @@ static void ci_get_memory_type(struct radeon_device *rdev)
}
-void ci_update_current_ps(struct radeon_device *rdev,
- struct radeon_ps *rps)
+static void ci_update_current_ps(struct radeon_device *rdev,
+ struct radeon_ps *rps)
{
struct ci_ps *new_ps = ci_get_ps(rps);
struct ci_power_info *pi = ci_get_pi(rdev);
@@ -4514,8 +4513,8 @@ void ci_update_current_ps(struct radeon_device *rdev,
pi->current_rps.ps_priv = &pi->current_ps;
}
-void ci_update_requested_ps(struct radeon_device *rdev,
- struct radeon_ps *rps)
+static void ci_update_requested_ps(struct radeon_device *rdev,
+ struct radeon_ps *rps)
{
struct ci_ps *new_ps = ci_get_ps(rps);
struct ci_power_info *pi = ci_get_pi(rdev);
@@ -4549,6 +4548,11 @@ void ci_dpm_post_set_power_state(struct radeon_device *rdev)
void ci_dpm_setup_asic(struct radeon_device *rdev)
{
+ int r;
+
+ r = ci_mc_load_microcode(rdev);
+ if (r)
+ DRM_ERROR("Failed to load MC firmware!\n");
ci_read_clock_registers(rdev);
ci_get_memory_type(rdev);
ci_enable_acpi_power_management(rdev);
@@ -4561,13 +4565,6 @@ int ci_dpm_enable(struct radeon_device *rdev)
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
int ret;
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), false);
-
if (ci_is_smc_running(rdev))
return -EINVAL;
if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
@@ -4665,6 +4662,18 @@ int ci_dpm_enable(struct radeon_device *rdev)
DRM_ERROR("ci_enable_power_containment failed\n");
return ret;
}
+
+ ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+ ci_update_current_ps(rdev, boot_ps);
+
+ return 0;
+}
+
+int ci_dpm_late_enable(struct radeon_device *rdev)
+{
+ int ret;
+
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
#if 0
@@ -4685,19 +4694,8 @@ int ci_dpm_enable(struct radeon_device *rdev)
#endif
}
- ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
-
ci_dpm_powergate_uvd(rdev, true);
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), true);
-
- ci_update_current_ps(rdev, boot_ps);
-
return 0;
}
@@ -4706,12 +4704,6 @@ void ci_dpm_disable(struct radeon_device *rdev)
struct ci_power_info *pi = ci_get_pi(rdev);
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), false);
-
ci_dpm_powergate_uvd(rdev, false);
if (!ci_is_smc_running(rdev))
@@ -4742,13 +4734,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
struct radeon_ps *old_ps = &pi->current_rps;
int ret;
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), false);
-
ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
if (pi->pcie_performance_request)
ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
@@ -4804,13 +4789,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
if (pi->pcie_performance_request)
ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), true);
-
return 0;
}
@@ -5023,8 +5001,8 @@ static int ci_parse_power_table(struct radeon_device *rdev)
return 0;
}
-int ci_get_vbios_boot_values(struct radeon_device *rdev,
- struct ci_vbios_boot_state *boot_state)
+static int ci_get_vbios_boot_values(struct radeon_device *rdev,
+ struct ci_vbios_boot_state *boot_state)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 9c745dd22438..8debc9d47362 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -28,6 +28,7 @@
#include "cikd.h"
#include "ppsmc.h"
#include "radeon_ucode.h"
+#include "ci_dpm.h"
static int ci_set_smc_sram_address(struct radeon_device *rdev,
u32 smc_address, u32 limit)
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b43a3a3c9067..e6419ca7cd37 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1697,7 +1697,7 @@ static void cik_srbm_select(struct radeon_device *rdev,
* Load the GDDR MC ucode into the hw (CIK).
* Returns 0 on success, error on failure.
*/
-static int ci_mc_load_microcode(struct radeon_device *rdev)
+int ci_mc_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
u32 running, blackout = 0;
@@ -3057,7 +3057,7 @@ static u32 cik_create_bitmask(u32 bit_width)
* Returns the disabled RB bitmask.
*/
static u32 cik_get_rb_disabled(struct radeon_device *rdev,
- u32 max_rb_num, u32 se_num,
+ u32 max_rb_num_per_se,
u32 sh_per_se)
{
u32 data, mask;
@@ -3071,7 +3071,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
data >>= BACKEND_DISABLE_SHIFT;
- mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se);
+ mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
return data & mask;
}
@@ -3088,7 +3088,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
*/
static void cik_setup_rb(struct radeon_device *rdev,
u32 se_num, u32 sh_per_se,
- u32 max_rb_num)
+ u32 max_rb_num_per_se)
{
int i, j;
u32 data, mask;
@@ -3098,7 +3098,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
- data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+ data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
if (rdev->family == CHIP_HAWAII)
disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
else
@@ -3108,12 +3108,14 @@ static void cik_setup_rb(struct radeon_device *rdev,
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
mask = 1;
- for (i = 0; i < max_rb_num; i++) {
+ for (i = 0; i < max_rb_num_per_se * se_num; i++) {
if (!(disabled_rbs & mask))
enabled_rbs |= mask;
mask <<= 1;
}
+ rdev->config.cik.backend_enable_mask = enabled_rbs;
+
for (i = 0; i < se_num; i++) {
cik_select_se_sh(rdev, i, 0xffffffff);
data = 0;
@@ -3485,6 +3487,51 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
}
/**
+ * cik_hdp_flush_cp_ring_emit - emit an hdp flush on the cp
+ *
+ * @rdev: radeon_device pointer
+ * @ridx: radeon ring index
+ *
+ * Emits an hdp flush on the cp.
+ */
+static void cik_hdp_flush_cp_ring_emit(struct radeon_device *rdev,
+ int ridx)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+ u32 ref_and_mask;
+
+ switch (ring->idx) {
+ case CAYMAN_RING_TYPE_CP1_INDEX:
+ case CAYMAN_RING_TYPE_CP2_INDEX:
+ default:
+ switch (ring->me) {
+ case 0:
+ ref_and_mask = CP2 << ring->pipe;
+ break;
+ case 1:
+ ref_and_mask = CP6 << ring->pipe;
+ break;
+ default:
+ return;
+ }
+ break;
+ case RADEON_RING_TYPE_GFX_INDEX:
+ ref_and_mask = CP0;
+ break;
+ }
+
+ radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
+ WAIT_REG_MEM_FUNCTION(3) | /* == */
+ WAIT_REG_MEM_ENGINE(1))); /* pfp */
+ radeon_ring_write(ring, GPU_HDP_FLUSH_REQ >> 2);
+ radeon_ring_write(ring, GPU_HDP_FLUSH_DONE >> 2);
+ radeon_ring_write(ring, ref_and_mask);
+ radeon_ring_write(ring, ref_and_mask);
+ radeon_ring_write(ring, 0x20); /* poll interval */
+}
+
+/**
* cik_fence_gfx_ring_emit - emit a fence on the gfx ring
*
* @rdev: radeon_device pointer
@@ -3510,15 +3557,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
/* HDP flush */
- /* We should be using the new WAIT_REG_MEM special op packet here
- * but it causes the CP to hang
- */
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(0)));
- radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
+ cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
}
/**
@@ -3548,15 +3587,7 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
/* HDP flush */
- /* We should be using the new WAIT_REG_MEM special op packet here
- * but it causes the CP to hang
- */
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(0)));
- radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
+ cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
}
bool cik_semaphore_ring_emit(struct radeon_device *rdev,
@@ -3564,8 +3595,6 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
-/* TODO: figure out why semaphore cause lockups */
-#if 0
uint64_t addr = semaphore->gpu_addr;
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
@@ -3574,9 +3603,6 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
return true;
-#else
- return false;
-#endif
}
/**
@@ -3814,6 +3840,8 @@ static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
if (enable)
WREG32(CP_ME_CNTL, 0);
else {
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
}
@@ -4012,18 +4040,50 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
return r;
}
+
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
return 0;
}
-u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
+u32 cik_gfx_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
{
u32 rptr;
+ if (rdev->wb.enabled)
+ rptr = rdev->wb.wb[ring->rptr_offs/4];
+ else
+ rptr = RREG32(CP_RB0_RPTR);
+ return rptr;
+}
+
+u32 cik_gfx_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 wptr;
+
+ wptr = RREG32(CP_RB0_WPTR);
+
+ return wptr;
+}
+
+void cik_gfx_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ WREG32(CP_RB0_WPTR, ring->wptr);
+ (void)RREG32(CP_RB0_WPTR);
+}
+
+u32 cik_compute_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 rptr;
if (rdev->wb.enabled) {
- rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+ rptr = rdev->wb.wb[ring->rptr_offs/4];
} else {
mutex_lock(&rdev->srbm_mutex);
cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
@@ -4035,13 +4095,14 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
return rptr;
}
-u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
+u32 cik_compute_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
{
u32 wptr;
if (rdev->wb.enabled) {
- wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]);
+ /* XXX check if swapping is necessary on BE */
+ wptr = rdev->wb.wb[ring->wptr_offs/4];
} else {
mutex_lock(&rdev->srbm_mutex);
cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
@@ -4053,10 +4114,11 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
return wptr;
}
-void cik_compute_ring_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
+void cik_compute_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
{
- rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
+ /* XXX check if swapping is necessary on BE */
+ rdev->wb.wb[ring->wptr_offs/4] = ring->wptr;
WDOORBELL32(ring->doorbell_index, ring->wptr);
}
@@ -4850,6 +4912,160 @@ static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
cik_print_gpu_status_regs(rdev);
}
+struct kv_reset_save_regs {
+ u32 gmcon_reng_execute;
+ u32 gmcon_misc;
+ u32 gmcon_misc3;
+};
+
+static void kv_save_regs_for_reset(struct radeon_device *rdev,
+ struct kv_reset_save_regs *save)
+{
+ save->gmcon_reng_execute = RREG32(GMCON_RENG_EXECUTE);
+ save->gmcon_misc = RREG32(GMCON_MISC);
+ save->gmcon_misc3 = RREG32(GMCON_MISC3);
+
+ WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute & ~RENG_EXECUTE_ON_PWR_UP);
+ WREG32(GMCON_MISC, save->gmcon_misc & ~(RENG_EXECUTE_ON_REG_UPDATE |
+ STCTRL_STUTTER_EN));
+}
+
+static void kv_restore_regs_for_reset(struct radeon_device *rdev,
+ struct kv_reset_save_regs *save)
+{
+ int i;
+
+ WREG32(GMCON_PGFSM_WRITE, 0);
+ WREG32(GMCON_PGFSM_CONFIG, 0x200010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0);
+ WREG32(GMCON_PGFSM_CONFIG, 0x300010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0x210000);
+ WREG32(GMCON_PGFSM_CONFIG, 0xa00010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0x21003);
+ WREG32(GMCON_PGFSM_CONFIG, 0xb00010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0x2b00);
+ WREG32(GMCON_PGFSM_CONFIG, 0xc00010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0);
+ WREG32(GMCON_PGFSM_CONFIG, 0xd00010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0x420000);
+ WREG32(GMCON_PGFSM_CONFIG, 0x100010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0x120202);
+ WREG32(GMCON_PGFSM_CONFIG, 0x500010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0x3e3e36);
+ WREG32(GMCON_PGFSM_CONFIG, 0x600010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0x373f3e);
+ WREG32(GMCON_PGFSM_CONFIG, 0x700010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0x3e1332);
+ WREG32(GMCON_PGFSM_CONFIG, 0xe00010ff);
+
+ WREG32(GMCON_MISC3, save->gmcon_misc3);
+ WREG32(GMCON_MISC, save->gmcon_misc);
+ WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute);
+}
+
+static void cik_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ struct kv_reset_save_regs kv_save = { 0 };
+ u32 tmp, i;
+
+ dev_info(rdev->dev, "GPU pci config reset\n");
+
+ /* disable dpm? */
+
+ /* disable cg/pg */
+ cik_fini_pg(rdev);
+ cik_fini_cg(rdev);
+
+ /* Disable GFX parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+
+ /* Disable MEC parsing/prefetching */
+ WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
+
+ /* sdma0 */
+ tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET);
+ tmp |= SDMA_HALT;
+ WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+ /* sdma1 */
+ tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET);
+ tmp |= SDMA_HALT;
+ WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+ /* XXX other engines? */
+
+ /* halt the rlc, disable cp internal ints */
+ cik_rlc_stop(rdev);
+
+ udelay(50);
+
+ /* disable mem access */
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
+ }
+
+ if (rdev->flags & RADEON_IS_IGP)
+ kv_save_regs_for_reset(rdev, &kv_save);
+
+ /* disable BM */
+ pci_clear_master(rdev->pdev);
+ /* reset */
+ radeon_pci_config_reset(rdev);
+
+ udelay(100);
+
+ /* wait for asic to come out of reset */
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+ break;
+ udelay(1);
+ }
+
+ /* does asic init need to be run first??? */
+ if (rdev->flags & RADEON_IS_IGP)
+ kv_restore_regs_for_reset(rdev, &kv_save);
+}
+
/**
* cik_asic_reset - soft reset GPU
*
@@ -4868,10 +5084,17 @@ int cik_asic_reset(struct radeon_device *rdev)
if (reset_mask)
r600_set_bios_scratch_engine_hung(rdev, true);
+ /* try soft reset */
cik_gpu_soft_reset(rdev, reset_mask);
reset_mask = cik_gpu_check_soft_reset(rdev);
+ /* try pci config reset */
+ if (reset_mask && radeon_hard_reset)
+ cik_gpu_pci_config_reset(rdev);
+
+ reset_mask = cik_gpu_check_soft_reset(rdev);
+
if (!reset_mask)
r600_set_bios_scratch_engine_hung(rdev, false);
@@ -5136,20 +5359,6 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
- /* TC cache setup ??? */
- WREG32(TC_CFG_L1_LOAD_POLICY0, 0);
- WREG32(TC_CFG_L1_LOAD_POLICY1, 0);
- WREG32(TC_CFG_L1_STORE_POLICY, 0);
-
- WREG32(TC_CFG_L2_LOAD_POLICY0, 0);
- WREG32(TC_CFG_L2_LOAD_POLICY1, 0);
- WREG32(TC_CFG_L2_STORE_POLICY0, 0);
- WREG32(TC_CFG_L2_STORE_POLICY1, 0);
- WREG32(TC_CFG_L2_ATOMIC_POLICY, 0);
-
- WREG32(TC_CFG_L1_VOLATILE, 0);
- WREG32(TC_CFG_L2_VOLATILE, 0);
-
if (rdev->family == CHIP_KAVERI) {
u32 tmp = RREG32(CHUB_CONTROL);
tmp &= ~BYPASS_VM;
@@ -5365,16 +5574,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, VMID(0));
/* HDP flush */
- /* We should be using the WAIT_REG_MEM packet here like in
- * cik_fence_ring_emit(), but it causes the CP to hang in this
- * context...
- */
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(0)));
- radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
+ cik_hdp_flush_cp_ring_emit(rdev, ridx);
/* bits 0-15 are the VM contexts0-15 */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -7501,26 +7701,7 @@ static int cik_startup(struct radeon_device *rdev)
cik_mc_program(rdev);
- if (rdev->flags & RADEON_IS_IGP) {
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
- !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
- r = cik_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
- } else {
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
- !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
- !rdev->mc_fw) {
- r = cik_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
-
+ if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
r = ci_mc_load_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
@@ -7625,7 +7806,6 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- CP_RB0_RPTR, CP_RB0_WPTR,
PACKET3(PACKET3_NOP, 0x3FFF));
if (r)
return r;
@@ -7634,7 +7814,6 @@ static int cik_startup(struct radeon_device *rdev)
/* type-2 packets are deprecated on MEC, use type-3 instead */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
- CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
PACKET3(PACKET3_NOP, 0x3FFF));
if (r)
return r;
@@ -7646,7 +7825,6 @@ static int cik_startup(struct radeon_device *rdev)
/* type-2 packets are deprecated on MEC, use type-3 instead */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
- CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
PACKET3(PACKET3_NOP, 0x3FFF));
if (r)
return r;
@@ -7658,16 +7836,12 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
- SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET,
- SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET,
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
- SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET,
- SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET,
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
if (r)
return r;
@@ -7683,7 +7857,6 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
RADEON_CP_PACKET2);
if (!r)
r = uvd_v1_0_init(rdev);
@@ -7729,6 +7902,8 @@ int cik_resume(struct radeon_device *rdev)
/* init golden registers */
cik_init_golden_registers(rdev);
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = cik_startup(rdev);
if (r) {
@@ -7752,6 +7927,7 @@ int cik_resume(struct radeon_device *rdev)
*/
int cik_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
dce6_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
cik_cp_enable(rdev, false);
@@ -7833,6 +8009,30 @@ int cik_init(struct radeon_device *rdev)
if (r)
return r;
+ if (rdev->flags & RADEON_IS_IGP) {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+ !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
+ r = cik_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ } else {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+ !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
+ !rdev->mc_fw) {
+ r = cik_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ }
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
@@ -7913,6 +8113,7 @@ int cik_init(struct radeon_device *rdev)
*/
void cik_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
cik_cp_fini(rdev);
cik_sdma_fini(rdev);
cik_fini_pg(rdev);
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 0300727a4f70..1ecb3f1070e3 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -52,6 +52,75 @@ u32 cik_gpu_check_soft_reset(struct radeon_device *rdev);
*/
/**
+ * cik_sdma_get_rptr - get the current read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current rptr from the hardware (CIK+).
+ */
+uint32_t cik_sdma_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 rptr, reg;
+
+ if (rdev->wb.enabled) {
+ rptr = rdev->wb.wb[ring->rptr_offs/4];
+ } else {
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ reg = SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET;
+ else
+ reg = SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET;
+
+ rptr = RREG32(reg);
+ }
+
+ return (rptr & 0x3fffc) >> 2;
+}
+
+/**
+ * cik_sdma_get_wptr - get the current write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current wptr from the hardware (CIK+).
+ */
+uint32_t cik_sdma_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 reg;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
+ else
+ reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
+
+ return (RREG32(reg) & 0x3fffc) >> 2;
+}
+
+/**
+ * cik_sdma_set_wptr - commit the write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Write the wptr back to the hardware (CIK+).
+ */
+void cik_sdma_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 reg;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
+ else
+ reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
+
+ WREG32(reg, (ring->wptr << 2) & 0x3fffc);
+}
+
+/**
* cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
*
* @rdev: radeon_device pointer
@@ -88,6 +157,35 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
}
/**
+ * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @ridx: radeon ring index
+ *
+ * Emit an hdp flush packet on the requested DMA ring.
+ */
+static void cik_sdma_hdp_flush_ring_emit(struct radeon_device *rdev,
+ int ridx)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+ u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
+ u32 ref_and_mask;
+
+ if (ridx == R600_RING_TYPE_DMA_INDEX)
+ ref_and_mask = SDMA0;
+ else
+ ref_and_mask = SDMA1;
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+ radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
+ radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
+ radeon_ring_write(ring, ref_and_mask); /* reference */
+ radeon_ring_write(ring, ref_and_mask); /* mask */
+ radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
+}
+
+/**
* cik_sdma_fence_ring_emit - emit a fence on the DMA ring
*
* @rdev: radeon_device pointer
@@ -111,12 +209,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
/* generate an interrupt */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
/* flush HDP */
- /* We should be using the new POLL_REG_MEM special op packet here
- * but it causes sDMA to hang sometimes
- */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
- radeon_ring_write(ring, 0);
+ cik_sdma_hdp_flush_ring_emit(rdev, fence->ring);
}
/**
@@ -157,7 +250,9 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
u32 rb_cntl, reg_offset;
int i;
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+ (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
for (i = 0; i < 2; i++) {
if (i == 0)
@@ -288,7 +383,9 @@ static int cik_sdma_gfx_resume(struct radeon_device *rdev)
}
}
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+ if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+ (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
@@ -458,7 +555,7 @@ int cik_copy_dma(struct radeon_device *rdev,
radeon_ring_write(ring, 0); /* src/dst endian swap */
radeon_ring_write(ring, src_offset & 0xffffffff);
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
- radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, dst_offset & 0xffffffff);
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
src_offset += cur_size_in_bytes;
dst_offset += cur_size_in_bytes;
@@ -747,12 +844,7 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm
radeon_ring_write(ring, VMID(0));
/* flush HDP */
- /* We should be using the new POLL_REG_MEM special op packet here
- * but it causes sDMA to hang sometimes
- */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
- radeon_ring_write(ring, 0);
+ cik_sdma_hdp_flush_ring_emit(rdev, ridx);
/* flush TLB */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 5964af5e5b2d..98bae9d7b74d 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -724,6 +724,17 @@
#define ATC_MISC_CG 0x3350
+#define GMCON_RENG_EXECUTE 0x3508
+#define RENG_EXECUTE_ON_PWR_UP (1 << 0)
+#define GMCON_MISC 0x350c
+#define RENG_EXECUTE_ON_REG_UPDATE (1 << 11)
+#define STCTRL_STUTTER_EN (1 << 16)
+
+#define GMCON_PGFSM_CONFIG 0x3538
+#define GMCON_PGFSM_WRITE 0x353c
+#define GMCON_PGFSM_READ 0x3540
+#define GMCON_MISC3 0x3544
+
#define MC_SEQ_CNTL_3 0x3600
# define CAC_EN (1 << 31)
#define MC_SEQ_G5PDX_CTRL 0x3604
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 920e1e4a52c5..cf783fc0ef21 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -1905,21 +1905,6 @@ int cypress_dpm_enable(struct radeon_device *rdev)
if (pi->mg_clock_gating)
cypress_mg_clock_gating_enable(rdev, true);
- if (rdev->irq.installed &&
- r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
- PPSMC_Result result;
-
- ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- rdev->irq.dpm_thermal = true;
- radeon_irq_set(rdev);
- result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
-
- if (result != PPSMC_Result_OK)
- DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
- }
-
rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
return 0;
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index de86493cbc44..713a5d359901 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -174,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
}
@@ -235,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
}
@@ -308,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev)
rdev->audio.enabled = true;
if (ASIC_IS_DCE8(rdev))
- rdev->audio.num_pins = 7;
+ rdev->audio.num_pins = 6;
+ else if (ASIC_IS_DCE61(rdev))
+ rdev->audio.num_pins = 4;
else
rdev->audio.num_pins = 6;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 9702e55e924e..f2b9e21ce4da 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -146,6 +146,7 @@ extern u32 si_get_csb_size(struct radeon_device *rdev);
extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
extern u32 cik_get_csb_size(struct radeon_device *rdev);
extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
+extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
static const u32 evergreen_golden_registers[] =
{
@@ -3867,6 +3868,48 @@ static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
evergreen_print_gpu_status_regs(rdev);
}
+void evergreen_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ u32 tmp, i;
+
+ dev_info(rdev->dev, "GPU pci config reset\n");
+
+ /* disable dpm? */
+
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+ udelay(50);
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+ /* XXX other engines? */
+
+ /* halt the rlc */
+ r600_rlc_stop(rdev);
+
+ udelay(50);
+
+ /* set mclk/sclk to bypass */
+ rv770_set_clk_bypass_mode(rdev);
+ /* disable BM */
+ pci_clear_master(rdev->pdev);
+ /* disable mem access */
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
+ }
+ /* reset */
+ radeon_pci_config_reset(rdev);
+ /* wait for asic to come out of reset */
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+ break;
+ udelay(1);
+ }
+}
+
int evergreen_asic_reset(struct radeon_device *rdev)
{
u32 reset_mask;
@@ -3876,10 +3919,17 @@ int evergreen_asic_reset(struct radeon_device *rdev)
if (reset_mask)
r600_set_bios_scratch_engine_hung(rdev, true);
+ /* try soft reset */
evergreen_gpu_soft_reset(rdev, reset_mask);
reset_mask = evergreen_gpu_check_soft_reset(rdev);
+ /* try pci config reset */
+ if (reset_mask && radeon_hard_reset)
+ evergreen_gpu_pci_config_reset(rdev);
+
+ reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
if (!reset_mask)
r600_set_bios_scratch_engine_hung(rdev, false);
@@ -4298,8 +4348,8 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
- /* only one DAC on DCE6 */
- if (!ASIC_IS_DCE6(rdev))
+ /* only one DAC on DCE5 */
+ if (!ASIC_IS_DCE5(rdev))
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
@@ -5109,27 +5159,12 @@ static int evergreen_startup(struct radeon_device *rdev)
evergreen_mc_program(rdev);
- if (ASIC_IS_DCE5(rdev)) {
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
- r = ni_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
+ if (ASIC_IS_DCE5(rdev) && !rdev->pm.dpm_enabled) {
r = ni_mc_load_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
return r;
}
- } else {
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
- r = r600_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
}
if (rdev->flags & RADEON_IS_AGP) {
@@ -5199,14 +5234,12 @@ static int evergreen_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- R600_CP_RB_RPTR, R600_CP_RB_WPTR,
RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
- DMA_RB_RPTR, DMA_RB_WPTR,
DMA_PACKET(DMA_PACKET_NOP, 0, 0));
if (r)
return r;
@@ -5224,7 +5257,6 @@ static int evergreen_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
RADEON_CP_PACKET2);
if (!r)
r = uvd_v1_0_init(rdev);
@@ -5267,6 +5299,8 @@ int evergreen_resume(struct radeon_device *rdev)
/* init golden registers */
evergreen_init_golden_registers(rdev);
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = evergreen_startup(rdev);
if (r) {
@@ -5281,6 +5315,7 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r600_audio_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
@@ -5357,6 +5392,27 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
+ if (ASIC_IS_DCE5(rdev)) {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ } else {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ }
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
@@ -5409,6 +5465,7 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r600_audio_fini(rdev);
r700_cp_fini(rdev);
r600_dma_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index eb8ac315f92f..c7cac07f139b 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -967,7 +967,10 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
if (track->cb_dirty) {
tmp = track->cb_target_mask;
for (i = 0; i < 8; i++) {
- if ((tmp >> (i * 4)) & 0xF) {
+ u32 format = G_028C70_FORMAT(track->cb_color_info[i]);
+
+ if (format != V_028C70_COLOR_INVALID &&
+ (tmp >> (i * 4)) & 0xF) {
/* at least one component is enabled */
if (track->cb_color_bo[i] == NULL) {
dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index aa695c4feb3d..0c6d5cef4cf1 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
}
@@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 8a4e641f0e3c..a0f63ff5a5e9 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -33,6 +33,7 @@
#define EVERGREEN_PIF_PHY0_DATA 0xc
#define EVERGREEN_PIF_PHY1_INDEX 0x10
#define EVERGREEN_PIF_PHY1_DATA 0x14
+#define EVERGREEN_MM_INDEX_HI 0x18
#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310
#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 17f990798992..f9c7963b3ee6 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -82,12 +82,16 @@
#define CG_SPLL_FUNC_CNTL_2 0x604
#define SCLK_MUX_SEL(x) ((x) << 0)
#define SCLK_MUX_SEL_MASK (0x1ff << 0)
+#define SCLK_MUX_UPDATE (1 << 26)
#define CG_SPLL_FUNC_CNTL_3 0x608
#define SPLL_FB_DIV(x) ((x) << 0)
#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
#define SPLL_DITHEN (1 << 28)
+#define CG_SPLL_STATUS 0x60c
+#define SPLL_CHG_STATUS (1 << 1)
#define MPLL_CNTL_MODE 0x61c
+# define MPLL_MCLK_SEL (1 << 11)
# define SS_SSEN (1 << 24)
# define SS_DSMODE_EN (1 << 25)
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index b41905573cd2..351db361239d 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -1126,11 +1126,6 @@ int kv_dpm_enable(struct radeon_device *rdev)
struct kv_power_info *pi = kv_get_pi(rdev);
int ret;
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_HDP), false);
-
ret = kv_process_firmware_header(rdev);
if (ret) {
DRM_ERROR("kv_process_firmware_header failed\n");
@@ -1215,6 +1210,21 @@ int kv_dpm_enable(struct radeon_device *rdev)
kv_reset_acp_boot_level(rdev);
+ ret = kv_smc_bapm_enable(rdev, false);
+ if (ret) {
+ DRM_ERROR("kv_smc_bapm_enable failed\n");
+ return ret;
+ }
+
+ kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+ return ret;
+}
+
+int kv_dpm_late_enable(struct radeon_device *rdev)
+{
+ int ret = 0;
+
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1226,35 +1236,17 @@ int kv_dpm_enable(struct radeon_device *rdev)
radeon_irq_set(rdev);
}
- ret = kv_smc_bapm_enable(rdev, false);
- if (ret) {
- DRM_ERROR("kv_smc_bapm_enable failed\n");
- return ret;
- }
-
/* powerdown unused blocks for now */
kv_dpm_powergate_acp(rdev, true);
kv_dpm_powergate_samu(rdev, true);
kv_dpm_powergate_vce(rdev, true);
kv_dpm_powergate_uvd(rdev, true);
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_HDP), true);
-
- kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
-
return ret;
}
void kv_dpm_disable(struct radeon_device *rdev)
{
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_HDP), false);
-
kv_smc_bapm_enable(rdev, false);
/* powerup blocks */
@@ -1779,11 +1771,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
/*struct radeon_ps *old_ps = &pi->current_rps;*/
int ret;
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_HDP), false);
-
if (pi->bapm_enable) {
ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power);
if (ret) {
@@ -1849,11 +1836,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
}
}
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_HDP), true);
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 11aab2ab54ce..ea932ac66fc6 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -174,6 +174,7 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
extern void evergreen_program_aspm(struct radeon_device *rdev);
extern void sumo_rlc_fini(struct radeon_device *rdev);
extern int sumo_rlc_init(struct radeon_device *rdev);
+extern void evergreen_gpu_pci_config_reset(struct radeon_device *rdev);
/* Firmware Names */
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
@@ -895,6 +896,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x999C)) {
rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2;
+ rdev->config.cayman.max_hw_contexts = 8;
+ rdev->config.cayman.sx_max_export_size = 256;
+ rdev->config.cayman.sx_max_export_pos_size = 64;
+ rdev->config.cayman.sx_max_export_smx_size = 192;
} else if ((rdev->pdev->device == 0x9903) ||
(rdev->pdev->device == 0x9904) ||
(rdev->pdev->device == 0x990A) ||
@@ -905,6 +910,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x999D)) {
rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2;
+ rdev->config.cayman.max_hw_contexts = 8;
+ rdev->config.cayman.sx_max_export_size = 256;
+ rdev->config.cayman.sx_max_export_pos_size = 64;
+ rdev->config.cayman.sx_max_export_smx_size = 192;
} else if ((rdev->pdev->device == 0x9919) ||
(rdev->pdev->device == 0x9990) ||
(rdev->pdev->device == 0x9991) ||
@@ -915,9 +924,17 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x99A0)) {
rdev->config.cayman.max_simds_per_se = 3;
rdev->config.cayman.max_backends_per_se = 1;
+ rdev->config.cayman.max_hw_contexts = 4;
+ rdev->config.cayman.sx_max_export_size = 128;
+ rdev->config.cayman.sx_max_export_pos_size = 32;
+ rdev->config.cayman.sx_max_export_smx_size = 96;
} else {
rdev->config.cayman.max_simds_per_se = 2;
rdev->config.cayman.max_backends_per_se = 1;
+ rdev->config.cayman.max_hw_contexts = 4;
+ rdev->config.cayman.sx_max_export_size = 128;
+ rdev->config.cayman.sx_max_export_pos_size = 32;
+ rdev->config.cayman.sx_max_export_smx_size = 96;
}
rdev->config.cayman.max_texture_channel_caches = 2;
rdev->config.cayman.max_gprs = 256;
@@ -925,10 +942,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.max_gs_threads = 32;
rdev->config.cayman.max_stack_entries = 512;
rdev->config.cayman.sx_num_of_sets = 8;
- rdev->config.cayman.sx_max_export_size = 256;
- rdev->config.cayman.sx_max_export_pos_size = 64;
- rdev->config.cayman.sx_max_export_smx_size = 192;
- rdev->config.cayman.max_hw_contexts = 8;
rdev->config.cayman.sq_num_cf_insts = 2;
rdev->config.cayman.sc_prim_fifo_size = 0x40;
@@ -1318,13 +1331,12 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_ACTION_ENA;
/* flush read cache over gart for this vmid */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
@@ -1340,6 +1352,8 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
+ u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_ACTION_ENA;
/* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
@@ -1364,14 +1378,11 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
(ib->vm ? (ib->vm->id << 24) : 0));
/* flush read cache over gart for this vmid */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 10); /* poll interval */
+ radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */
}
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
@@ -1379,13 +1390,63 @@ static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
if (enable)
WREG32(CP_ME_CNTL, 0);
else {
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
}
}
+u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 rptr;
+
+ if (rdev->wb.enabled)
+ rptr = rdev->wb.wb[ring->rptr_offs/4];
+ else {
+ if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
+ rptr = RREG32(CP_RB0_RPTR);
+ else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
+ rptr = RREG32(CP_RB1_RPTR);
+ else
+ rptr = RREG32(CP_RB2_RPTR);
+ }
+
+ return rptr;
+}
+
+u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 wptr;
+
+ if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
+ wptr = RREG32(CP_RB0_WPTR);
+ else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
+ wptr = RREG32(CP_RB1_WPTR);
+ else
+ wptr = RREG32(CP_RB2_WPTR);
+
+ return wptr;
+}
+
+void cayman_gfx_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
+ WREG32(CP_RB0_WPTR, ring->wptr);
+ (void)RREG32(CP_RB0_WPTR);
+ } else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) {
+ WREG32(CP_RB1_WPTR, ring->wptr);
+ (void)RREG32(CP_RB1_WPTR);
+ } else {
+ WREG32(CP_RB2_WPTR, ring->wptr);
+ (void)RREG32(CP_RB2_WPTR);
+ }
+}
+
static int cayman_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -1514,6 +1575,16 @@ static int cayman_cp_resume(struct radeon_device *rdev)
CP_RB1_BASE,
CP_RB2_BASE
};
+ static const unsigned cp_rb_rptr[] = {
+ CP_RB0_RPTR,
+ CP_RB1_RPTR,
+ CP_RB2_RPTR
+ };
+ static const unsigned cp_rb_wptr[] = {
+ CP_RB0_WPTR,
+ CP_RB1_WPTR,
+ CP_RB2_WPTR
+ };
struct radeon_ring *ring;
int i, r;
@@ -1572,8 +1643,8 @@ static int cayman_cp_resume(struct radeon_device *rdev)
WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
ring->rptr = ring->wptr = 0;
- WREG32(ring->rptr_reg, ring->rptr);
- WREG32(ring->wptr_reg, ring->wptr);
+ WREG32(cp_rb_rptr[i], ring->rptr);
+ WREG32(cp_rb_wptr[i], ring->wptr);
mdelay(1);
WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
@@ -1593,6 +1664,9 @@ static int cayman_cp_resume(struct radeon_device *rdev)
return r;
}
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
return 0;
}
@@ -1819,8 +1893,10 @@ int cayman_asic_reset(struct radeon_device *rdev)
reset_mask = cayman_gpu_check_soft_reset(rdev);
- if (!reset_mask)
- r600_set_bios_scratch_engine_hung(rdev, false);
+ if (reset_mask)
+ evergreen_gpu_pci_config_reset(rdev);
+
+ r600_set_bios_scratch_engine_hung(rdev, false);
return 0;
}
@@ -1866,23 +1942,7 @@ static int cayman_startup(struct radeon_device *rdev)
evergreen_mc_program(rdev);
- if (rdev->flags & RADEON_IS_IGP) {
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
- r = ni_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
- } else {
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
- r = ni_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
-
+ if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
r = ni_mc_load_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
@@ -1969,23 +2029,18 @@ static int cayman_startup(struct radeon_device *rdev)
evergreen_irq_set(rdev);
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- CP_RB0_RPTR, CP_RB0_WPTR,
RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
- DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
- DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
- DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
- DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -2004,7 +2059,6 @@ static int cayman_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
RADEON_CP_PACKET2);
if (!r)
r = uvd_v1_0_init(rdev);
@@ -2051,6 +2105,8 @@ int cayman_resume(struct radeon_device *rdev)
/* init golden registers */
ni_init_golden_registers(rdev);
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = cayman_startup(rdev);
if (r) {
@@ -2063,6 +2119,7 @@ int cayman_resume(struct radeon_device *rdev)
int cayman_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
if (ASIC_IS_DCE6(rdev))
dce6_audio_fini(rdev);
else
@@ -2133,6 +2190,27 @@ int cayman_init(struct radeon_device *rdev)
if (r)
return r;
+ if (rdev->flags & RADEON_IS_IGP) {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ } else {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ }
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
@@ -2192,6 +2270,7 @@ int cayman_init(struct radeon_device *rdev)
void cayman_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
cayman_cp_fini(rdev);
cayman_dma_fini(rdev);
r600_irq_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index bdeb65ed3658..7cf96b15377f 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -43,6 +43,75 @@ u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
*/
/**
+ * cayman_dma_get_rptr - get the current read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current rptr from the hardware (cayman+).
+ */
+uint32_t cayman_dma_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 rptr, reg;
+
+ if (rdev->wb.enabled) {
+ rptr = rdev->wb.wb[ring->rptr_offs/4];
+ } else {
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ reg = DMA_RB_RPTR + DMA0_REGISTER_OFFSET;
+ else
+ reg = DMA_RB_RPTR + DMA1_REGISTER_OFFSET;
+
+ rptr = RREG32(reg);
+ }
+
+ return (rptr & 0x3fffc) >> 2;
+}
+
+/**
+ * cayman_dma_get_wptr - get the current write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current wptr from the hardware (cayman+).
+ */
+uint32_t cayman_dma_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 reg;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ reg = DMA_RB_WPTR + DMA0_REGISTER_OFFSET;
+ else
+ reg = DMA_RB_WPTR + DMA1_REGISTER_OFFSET;
+
+ return (RREG32(reg) & 0x3fffc) >> 2;
+}
+
+/**
+ * cayman_dma_set_wptr - commit the write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Write the wptr back to the hardware (cayman+).
+ */
+void cayman_dma_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 reg;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ reg = DMA_RB_WPTR + DMA0_REGISTER_OFFSET;
+ else
+ reg = DMA_RB_WPTR + DMA1_REGISTER_OFFSET;
+
+ WREG32(reg, (ring->wptr << 2) & 0x3fffc);
+}
+
+/**
* cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
*
* @rdev: radeon_device pointer
@@ -88,7 +157,9 @@ void cayman_dma_stop(struct radeon_device *rdev)
{
u32 rb_cntl;
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+ (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
/* dma0 */
rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
@@ -190,7 +261,9 @@ int cayman_dma_resume(struct radeon_device *rdev)
}
}
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+ if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+ (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 49c4d48f54d6..1217fbcbdcca 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -720,6 +720,8 @@ static const u32 cayman_sysls_enable[] =
struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
+extern int ni_mc_load_microcode(struct radeon_device *rdev);
+
struct ni_power_info *ni_get_pi(struct radeon_device *rdev)
{
struct ni_power_info *pi = rdev->pm.dpm.priv;
@@ -3565,7 +3567,11 @@ void ni_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
void ni_dpm_setup_asic(struct radeon_device *rdev)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ int r;
+ r = ni_mc_load_microcode(rdev);
+ if (r)
+ DRM_ERROR("Failed to load MC firmware!\n");
ni_read_clock_registers(rdev);
btc_read_arb_registers(rdev);
rv770_get_memory_type(rdev);
@@ -3710,21 +3716,6 @@ int ni_dpm_enable(struct radeon_device *rdev)
if (eg_pi->ls_clock_gating)
ni_ls_clockgating_enable(rdev, true);
- if (rdev->irq.installed &&
- r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
- PPSMC_Result result;
-
- ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, 0xff * 1000);
- if (ret)
- return ret;
- rdev->irq.dpm_thermal = true;
- radeon_irq_set(rdev);
- result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
-
- if (result != PPSMC_Result_OK)
- DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
- }
-
rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
ni_update_current_ps(rdev, boot_ps);
@@ -3954,7 +3945,6 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_ps *ps = ni_get_ps(rps);
- u16 vddc;
struct rv7xx_pl *pl = &ps->performance_levels[index];
ps->performance_level_count = index + 1;
@@ -3970,8 +3960,8 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
/* patch up vddc if necessary */
if (pl->vddc == 0xff01) {
- if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
- pl->vddc = vddc;
+ if (pi->max_vddc)
+ pl->vddc = pi->max_vddc;
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
@@ -4331,7 +4321,8 @@ void ni_dpm_print_power_state(struct radeon_device *rdev,
void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
struct ni_ps *ps = ni_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 22421bc80c0d..d996033c243e 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -1154,6 +1154,7 @@
# define PACKET3_DB_ACTION_ENA (1 << 26)
# define PACKET3_SH_ACTION_ENA (1 << 27)
# define PACKET3_SX_ACTION_ENA (1 << 28)
+# define PACKET3_ENGINE_ME (1 << 31)
#define PACKET3_ME_INITIALIZE 0x44
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
index da43ab328833..2d532996c697 100644
--- a/drivers/gpu/drm/radeon/pptable.h
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -23,7 +23,7 @@
#ifndef _PPTABLE_H
#define _PPTABLE_H
-#pragma pack(push, 1)
+#pragma pack(1)
typedef struct _ATOM_PPLIB_THERMALCONTROLLER
@@ -677,6 +677,6 @@ typedef struct _ATOM_PPLIB_PPM_Table
ULONG ulTjmax;
} ATOM_PPLIB_PPM_Table;
-#pragma pack(pop)
+#pragma pack()
#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 10abc4d5a6cc..ef024ce3f7cc 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1050,6 +1050,36 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
return err;
}
+u32 r100_gfx_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 rptr;
+
+ if (rdev->wb.enabled)
+ rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+ else
+ rptr = RREG32(RADEON_CP_RB_RPTR);
+
+ return rptr;
+}
+
+u32 r100_gfx_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 wptr;
+
+ wptr = RREG32(RADEON_CP_RB_WPTR);
+
+ return wptr;
+}
+
+void r100_gfx_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ WREG32(RADEON_CP_RB_WPTR, ring->wptr);
+ (void)RREG32(RADEON_CP_RB_WPTR);
+}
+
static void r100_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -1102,7 +1132,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
ring_size = (1 << (rb_bufsz + 1)) * 4;
r100_cp_load_microcode(rdev);
r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
- RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
RADEON_CP_PACKET2);
if (r) {
return r;
@@ -3913,6 +3942,8 @@ int r100_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = r100_startup(rdev);
if (r) {
@@ -3923,6 +3954,7 @@ int r100_resume(struct radeon_device *rdev)
int r100_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -3933,6 +3965,7 @@ int r100_suspend(struct radeon_device *rdev)
void r100_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -4039,6 +4072,9 @@ int r100_init(struct radeon_device *rdev)
}
r100_set_safe_registers(rdev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->accel_working = true;
r = r100_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index d8dd269b9159..7c63ef840e86 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1430,6 +1430,8 @@ int r300_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = r300_startup(rdev);
if (r) {
@@ -1440,6 +1442,7 @@ int r300_resume(struct radeon_device *rdev)
int r300_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -1452,6 +1455,7 @@ int r300_suspend(struct radeon_device *rdev)
void r300_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -1538,6 +1542,9 @@ int r300_init(struct radeon_device *rdev)
}
r300_set_reg_safe(rdev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->accel_working = true;
r = r300_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 60170ea5e3a2..84b1d5367a11 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -75,7 +75,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
for (i = 0; i < nr; ++i) {
- if (DRM_COPY_FROM_USER
+ if (copy_from_user
(&box, &cmdbuf->boxes[n + i], sizeof(box))) {
DRM_ERROR("copy cliprect faulted\n");
return -EFAULT;
@@ -928,12 +928,12 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
*buf_idx *= 2; /* 8 bytes per buf */
- if (DRM_COPY_TO_USER(ref_age_base + *buf_idx,
+ if (copy_to_user(ref_age_base + *buf_idx,
&dev_priv->scratch_ages[header.scratch.reg],
sizeof(u32)))
return -EINVAL;
- if (DRM_COPY_FROM_USER(&h_pending,
+ if (copy_from_user(&h_pending,
ref_age_base + *buf_idx + 1,
sizeof(u32)))
return -EINVAL;
@@ -943,7 +943,7 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
h_pending--;
- if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1,
+ if (copy_to_user(ref_age_base + *buf_idx + 1,
&h_pending,
sizeof(u32)))
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 6edf2b3a52b4..3768aab2710b 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -325,6 +325,8 @@ int r420_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = r420_startup(rdev);
if (r) {
@@ -335,6 +337,7 @@ int r420_resume(struct radeon_device *rdev)
int r420_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r420_cp_errata_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -348,6 +351,7 @@ int r420_suspend(struct radeon_device *rdev)
void r420_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -444,6 +448,9 @@ int r420_init(struct radeon_device *rdev)
}
r420_set_reg_safe(rdev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->accel_working = true;
r = r420_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index e1aece73b370..e209eb75024f 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -240,6 +240,8 @@ int r520_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = r520_startup(rdev);
if (r) {
@@ -312,6 +314,9 @@ int r520_init(struct radeon_device *rdev)
return r;
rv515_set_safe_registers(rdev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->accel_working = true;
r = r520_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9ad06732a78b..cdbc4171fe73 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -105,6 +105,7 @@ void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
extern int evergreen_rlc_resume(struct radeon_device *rdev);
+extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
/**
* r600_get_xclk - get the xclk
@@ -1644,6 +1645,67 @@ static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
r600_print_gpu_status_regs(rdev);
}
+static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+ struct rv515_mc_save save;
+ u32 tmp, i;
+
+ dev_info(rdev->dev, "GPU pci config reset\n");
+
+ /* disable dpm? */
+
+ /* Disable CP parsing/prefetching */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
+ else
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+
+ /* disable the RLC */
+ WREG32(RLC_CNTL, 0);
+
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+
+ mdelay(50);
+
+ /* set mclk/sclk to bypass */
+ if (rdev->family >= CHIP_RV770)
+ rv770_set_clk_bypass_mode(rdev);
+ /* disable BM */
+ pci_clear_master(rdev->pdev);
+ /* disable mem access */
+ rv515_mc_stop(rdev, &save);
+ if (r600_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ /* BIF reset workaround. Not sure if this is needed on 6xx */
+ tmp = RREG32(BUS_CNTL);
+ tmp |= VGA_COHE_SPEC_TIMER_DIS;
+ WREG32(BUS_CNTL, tmp);
+
+ tmp = RREG32(BIF_SCRATCH0);
+
+ /* reset */
+ radeon_pci_config_reset(rdev);
+ mdelay(1);
+
+ /* BIF reset workaround. Not sure if this is needed on 6xx */
+ tmp = SOFT_RESET_BIF;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ mdelay(1);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ /* wait for asic to come out of reset */
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+ break;
+ udelay(1);
+ }
+}
+
int r600_asic_reset(struct radeon_device *rdev)
{
u32 reset_mask;
@@ -1653,10 +1715,17 @@ int r600_asic_reset(struct radeon_device *rdev)
if (reset_mask)
r600_set_bios_scratch_engine_hung(rdev, true);
+ /* try soft reset */
r600_gpu_soft_reset(rdev, reset_mask);
reset_mask = r600_gpu_check_soft_reset(rdev);
+ /* try pci config reset */
+ if (reset_mask && radeon_hard_reset)
+ r600_gpu_pci_config_reset(rdev);
+
+ reset_mask = r600_gpu_check_soft_reset(rdev);
+
if (!reset_mask)
r600_set_bios_scratch_engine_hung(rdev, false);
@@ -2185,7 +2254,8 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
*/
void r600_cp_stop(struct radeon_device *rdev)
{
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
WREG32(SCRATCH_UMSK, 0);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
@@ -2382,6 +2452,36 @@ out:
return err;
}
+u32 r600_gfx_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 rptr;
+
+ if (rdev->wb.enabled)
+ rptr = rdev->wb.wb[ring->rptr_offs/4];
+ else
+ rptr = RREG32(R600_CP_RB_RPTR);
+
+ return rptr;
+}
+
+u32 r600_gfx_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 wptr;
+
+ wptr = RREG32(R600_CP_RB_WPTR);
+
+ return wptr;
+}
+
+void r600_gfx_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ WREG32(R600_CP_RB_WPTR, ring->wptr);
+ (void)RREG32(R600_CP_RB_WPTR);
+}
+
static int r600_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -2513,6 +2613,10 @@ int r600_cp_resume(struct radeon_device *rdev)
ring->ready = false;
return r;
}
+
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
return 0;
}
@@ -2607,14 +2711,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
+ PACKET3_SH_ACTION_ENA;
+
+ if (rdev->family >= CHIP_RV770)
+ cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
if (rdev->wb.use_event) {
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
/* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
- PACKET3_VC_ACTION_ENA |
- PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
@@ -2628,9 +2735,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
} else {
/* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
- PACKET3_VC_ACTION_ENA |
- PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
@@ -2775,14 +2880,6 @@ static int r600_startup(struct radeon_device *rdev)
r600_mc_program(rdev);
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
- r = r600_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
-
if (rdev->flags & RADEON_IS_AGP) {
r600_agp_enable(rdev);
} else {
@@ -2803,12 +2900,6 @@ static int r600_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
- return r;
- }
-
/* Enable IRQ */
if (!rdev->irq.installed) {
r = radeon_irq_kms_init(rdev);
@@ -2826,18 +2917,10 @@ static int r600_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- R600_CP_RB_RPTR, R600_CP_RB_WPTR,
RADEON_CP_PACKET2);
if (r)
return r;
- ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
- r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
- DMA_RB_RPTR, DMA_RB_WPTR,
- DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
- if (r)
- return r;
-
r = r600_cp_load_microcode(rdev);
if (r)
return r;
@@ -2845,10 +2928,6 @@ static int r600_startup(struct radeon_device *rdev)
if (r)
return r;
- r = r600_dma_resume(rdev);
- if (r)
- return r;
-
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -2889,6 +2968,8 @@ int r600_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = r600_startup(rdev);
if (r) {
@@ -2902,9 +2983,9 @@ int r600_resume(struct radeon_device *rdev)
int r600_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r600_audio_fini(rdev);
r600_cp_stop(rdev);
- r600_dma_stop(rdev);
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
@@ -2970,12 +3051,20 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
- rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
- r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
-
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -2988,7 +3077,6 @@ int r600_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r600_cp_fini(rdev);
- r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -3002,9 +3090,9 @@ int r600_init(struct radeon_device *rdev)
void r600_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r600_audio_fini(rdev);
r600_cp_fini(rdev);
- r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -3903,6 +3991,10 @@ restart_ih:
break;
}
break;
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index d8eb48bff0ed..8c9b7e26533c 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -2515,7 +2515,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev,
buf = radeon_freelist_get(dev);
if (!buf) {
DRM_DEBUG("EAGAIN\n");
- if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
+ if (copy_to_user(tex->image, image, sizeof(*image)))
return -EFAULT;
return -EAGAIN;
}
@@ -2528,7 +2528,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev,
buffer =
(u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
- if (DRM_COPY_FROM_USER(buffer, data, pass_size)) {
+ if (copy_from_user(buffer, data, pass_size)) {
DRM_ERROR("EFAULT on pad, %d bytes\n", pass_size);
return -EFAULT;
}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 5dceea6f71ae..2812c7d1ae6f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -749,7 +749,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
}
for (i = 0; i < 8; i++) {
- if ((tmp >> (i * 4)) & 0xF) {
+ u32 format = G_0280A0_FORMAT(track->cb_color_info[i]);
+
+ if (format != V_0280A0_COLOR_INVALID &&
+ (tmp >> (i * 4)) & 0xF) {
/* at least one component is enabled */
if (track->cb_color_bo[i] == NULL) {
dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
@@ -1004,8 +1007,22 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_008C64_SQ_VSTMP_RING_SIZE:
case R_0288C8_SQ_GS_VERT_ITEMSIZE:
/* get value to populate the IB don't remove */
- tmp =radeon_get_ib_value(p, idx);
- ib[idx] = 0;
+ /*tmp =radeon_get_ib_value(p, idx);
+ ib[idx] = 0;*/
+ break;
+ case SQ_ESGS_RING_BASE:
+ case SQ_GSVS_RING_BASE:
+ case SQ_ESTMP_RING_BASE:
+ case SQ_GSTMP_RING_BASE:
+ case SQ_PSTMP_RING_BASE:
+ case SQ_VSTMP_RING_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case SQ_CONFIG:
track->sq_config = radeon_get_ib_value(p, idx);
@@ -2386,7 +2403,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
ib_chunk = &parser.chunks[parser.chunk_ib_idx];
parser.ib.length_dw = ib_chunk->length_dw;
*l = parser.ib.length_dw;
- if (DRM_COPY_FROM_USER(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
+ if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
r = -EFAULT;
r600_cs_parser_fini(&parser, r);
return r;
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 7844d15c139f..b2d4c91e6272 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -51,7 +51,14 @@ u32 r600_gpu_check_soft_reset(struct radeon_device *rdev);
uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
- return (radeon_ring_generic_get_rptr(rdev, ring) & 0x3fffc) >> 2;
+ u32 rptr;
+
+ if (rdev->wb.enabled)
+ rptr = rdev->wb.wb[ring->rptr_offs/4];
+ else
+ rptr = RREG32(DMA_RB_RPTR);
+
+ return (rptr & 0x3fffc) >> 2;
}
/**
@@ -65,7 +72,7 @@ uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
- return (RREG32(ring->wptr_reg) & 0x3fffc) >> 2;
+ return (RREG32(DMA_RB_WPTR) & 0x3fffc) >> 2;
}
/**
@@ -79,7 +86,7 @@ uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
void r600_dma_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
- WREG32(ring->wptr_reg, (ring->wptr << 2) & 0x3fffc);
+ WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc);
}
/**
@@ -93,7 +100,8 @@ void r600_dma_stop(struct radeon_device *rdev)
{
u32 rb_cntl = RREG32(DMA_RB_CNTL);
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
rb_cntl &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL, rb_cntl);
@@ -180,7 +188,8 @@ int r600_dma_resume(struct radeon_device *rdev)
return r;
}
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+ if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 5513d8f06252..e4cc9b314ce9 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -729,8 +729,8 @@ bool r600_is_uvd_state(u32 class, u32 class2)
return false;
}
-int r600_set_thermal_temperature_range(struct radeon_device *rdev,
- int min_temp, int max_temp)
+static int r600_set_thermal_temperature_range(struct radeon_device *rdev,
+ int min_temp, int max_temp)
{
int low_temp = 0 * 1000;
int high_temp = 255 * 1000;
@@ -777,6 +777,22 @@ bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
}
}
+int r600_dpm_late_enable(struct radeon_device *rdev)
+{
+ int ret;
+
+ if (rdev->irq.installed &&
+ r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+ ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ rdev->irq.dpm_thermal = true;
+ radeon_irq_set(rdev);
+ }
+
+ return 0;
+}
+
union power_info {
struct _ATOM_POWERPLAY_INFO info;
struct _ATOM_POWERPLAY_INFO_V2 info_2;
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
index 1000bf9719f2..07eab2b04e81 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.h
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -213,8 +213,6 @@ void r600_wait_for_power_level(struct radeon_device *rdev,
void r600_start_dpm(struct radeon_device *rdev);
void r600_stop_dpm(struct radeon_device *rdev);
-int r600_set_thermal_temperature_range(struct radeon_device *rdev,
- int min_temp, int max_temp);
bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor);
int r600_parse_extended_power_table(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index b7d3ecba43e3..3016fc14f502 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -250,7 +250,7 @@ static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
value, ~HDMI0_AUDIO_TEST_EN);
}
-void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+static void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index ebe38724a976..37455f65107f 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -701,11 +701,18 @@
#define RLC_UCODE_DATA 0x3f30
#define SRBM_SOFT_RESET 0xe60
+# define SOFT_RESET_BIF (1 << 1)
# define SOFT_RESET_DMA (1 << 12)
# define SOFT_RESET_RLC (1 << 13)
# define SOFT_RESET_UVD (1 << 18)
# define RV770_SOFT_RESET_DMA (1 << 20)
+#define BIF_SCRATCH0 0x5438
+
+#define BUS_CNTL 0x5420
+# define BIOS_ROM_DIS (1 << 1)
+# define VGA_COHE_SPEC_TIMER_DIS (1 << 9)
+
#define CP_INT_CNTL 0xc124
# define CNTX_BUSY_INT_ENABLE (1 << 19)
# define CNTX_EMPTY_INT_ENABLE (1 << 20)
@@ -1575,6 +1582,7 @@
# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
+# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */
# define PACKET3_TC_ACTION_ENA (1 << 23)
# define PACKET3_VC_ACTION_ENA (1 << 24)
# define PACKET3_CB_ACTION_ENA (1 << 25)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b1f990d0eaa1..4a8ac1cd6b4c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -99,6 +99,7 @@ extern int radeon_fastfb;
extern int radeon_dpm;
extern int radeon_aspm;
extern int radeon_runtime_pm;
+extern int radeon_hard_reset;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -139,6 +140,9 @@ extern int radeon_runtime_pm;
#define RADEON_VA_RESERVED_SIZE (8 << 20)
#define RADEON_IB_VM_MAX_SIZE (64 << 10)
+/* hard reset data */
+#define RADEON_ASIC_RESET_DATA 0x39d5e86b
+
/* reset flags */
#define RADEON_RESET_GFX (1 << 0)
#define RADEON_RESET_COMPUTE (1 << 1)
@@ -252,6 +256,7 @@ struct radeon_clock {
* Power management
*/
int radeon_pm_init(struct radeon_device *rdev);
+int radeon_pm_late_init(struct radeon_device *rdev);
void radeon_pm_fini(struct radeon_device *rdev);
void radeon_pm_compute_clocks(struct radeon_device *rdev);
void radeon_pm_suspend(struct radeon_device *rdev);
@@ -413,6 +418,11 @@ struct radeon_mman {
struct ttm_bo_device bdev;
bool mem_global_referenced;
bool initialized;
+
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *vram;
+ struct dentry *gtt;
+#endif
};
/* bo virtual address in a specific vm */
@@ -779,13 +789,11 @@ struct radeon_ring {
volatile uint32_t *ring;
unsigned rptr;
unsigned rptr_offs;
- unsigned rptr_reg;
unsigned rptr_save_reg;
u64 next_rptr_gpu_addr;
volatile u32 *next_rptr_cpu_addr;
unsigned wptr;
unsigned wptr_old;
- unsigned wptr_reg;
unsigned ring_size;
unsigned ring_free_dw;
int count_dw;
@@ -859,6 +867,8 @@ struct radeon_vm {
struct radeon_fence *fence;
/* last flush or NULL if we still need to flush */
struct radeon_fence *last_flush;
+ /* last use of vmid */
+ struct radeon_fence *last_id_use;
};
struct radeon_vm_manager {
@@ -949,7 +959,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned size, uint32_t *data);
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
- unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop);
+ unsigned rptr_offs, u32 nop);
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
@@ -1775,6 +1785,7 @@ struct radeon_asic {
int (*init)(struct radeon_device *rdev);
void (*setup_asic)(struct radeon_device *rdev);
int (*enable)(struct radeon_device *rdev);
+ int (*late_enable)(struct radeon_device *rdev);
void (*disable)(struct radeon_device *rdev);
int (*pre_set_power_state)(struct radeon_device *rdev);
int (*set_power_state)(struct radeon_device *rdev);
@@ -1940,7 +1951,7 @@ struct si_asic {
unsigned sc_earlyz_tile_fifo_size;
unsigned num_tile_pipes;
- unsigned num_backends_per_se;
+ unsigned backend_enable_mask;
unsigned backend_disable_mask_per_asic;
unsigned backend_map;
unsigned num_texture_channel_caches;
@@ -1970,7 +1981,7 @@ struct cik_asic {
unsigned sc_earlyz_tile_fifo_size;
unsigned num_tile_pipes;
- unsigned num_backends_per_se;
+ unsigned backend_enable_mask;
unsigned backend_disable_mask_per_asic;
unsigned backend_map;
unsigned num_texture_channel_caches;
@@ -2650,6 +2661,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev))
#define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev))
#define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev))
+#define radeon_dpm_late_enable(rdev) rdev->asic->dpm.late_enable((rdev))
#define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev))
#define radeon_dpm_pre_set_power_state(rdev) rdev->asic->dpm.pre_set_power_state((rdev))
#define radeon_dpm_set_power_state(rdev) rdev->asic->dpm.set_power_state((rdev))
@@ -2668,6 +2680,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
/* Common functions */
/* AGP */
extern int radeon_gpu_reset(struct radeon_device *rdev);
+extern void radeon_pci_config_reset(struct radeon_device *rdev);
extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung);
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 98a9074b306b..77e9d07c55b6 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -25,18 +25,14 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/power_supply.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_bus.h>
+#include <linux/vga_switcheroo.h>
#include <acpi/video.h>
-
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "radeon.h"
#include "radeon_acpi.h"
#include "atom.h"
-#include <linux/vga_switcheroo.h>
-
#define ACPI_AC_CLASS "ac_adapter"
extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e354ce94cdd1..dda02bfc10a4 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -182,9 +182,9 @@ static struct radeon_asic_ring r100_gfx_ring = {
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
+ .get_rptr = &r100_gfx_get_rptr,
+ .get_wptr = &r100_gfx_get_wptr,
+ .set_wptr = &r100_gfx_set_wptr,
};
static struct radeon_asic r100_asic = {
@@ -330,9 +330,9 @@ static struct radeon_asic_ring r300_gfx_ring = {
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
+ .get_rptr = &r100_gfx_get_rptr,
+ .get_wptr = &r100_gfx_get_wptr,
+ .set_wptr = &r100_gfx_set_wptr,
};
static struct radeon_asic r300_asic = {
@@ -883,9 +883,9 @@ static struct radeon_asic_ring r600_gfx_ring = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
+ .get_rptr = &r600_gfx_get_rptr,
+ .get_wptr = &r600_gfx_get_wptr,
+ .set_wptr = &r600_gfx_set_wptr,
};
static struct radeon_asic_ring r600_dma_ring = {
@@ -1045,6 +1045,7 @@ static struct radeon_asic rv6xx_asic = {
.init = &rv6xx_dpm_init,
.setup_asic = &rv6xx_setup_asic,
.enable = &rv6xx_dpm_enable,
+ .late_enable = &r600_dpm_late_enable,
.disable = &rv6xx_dpm_disable,
.pre_set_power_state = &r600_dpm_pre_set_power_state,
.set_power_state = &rv6xx_dpm_set_power_state,
@@ -1135,6 +1136,7 @@ static struct radeon_asic rs780_asic = {
.init = &rs780_dpm_init,
.setup_asic = &rs780_dpm_setup_asic,
.enable = &rs780_dpm_enable,
+ .late_enable = &r600_dpm_late_enable,
.disable = &rs780_dpm_disable,
.pre_set_power_state = &r600_dpm_pre_set_power_state,
.set_power_state = &rs780_dpm_set_power_state,
@@ -1239,6 +1241,7 @@ static struct radeon_asic rv770_asic = {
.init = &rv770_dpm_init,
.setup_asic = &rv770_dpm_setup_asic,
.enable = &rv770_dpm_enable,
+ .late_enable = &rv770_dpm_late_enable,
.disable = &rv770_dpm_disable,
.pre_set_power_state = &r600_dpm_pre_set_power_state,
.set_power_state = &rv770_dpm_set_power_state,
@@ -1267,9 +1270,9 @@ static struct radeon_asic_ring evergreen_gfx_ring = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
+ .get_rptr = &r600_gfx_get_rptr,
+ .get_wptr = &r600_gfx_get_wptr,
+ .set_wptr = &r600_gfx_set_wptr,
};
static struct radeon_asic_ring evergreen_dma_ring = {
@@ -1357,6 +1360,7 @@ static struct radeon_asic evergreen_asic = {
.init = &cypress_dpm_init,
.setup_asic = &cypress_dpm_setup_asic,
.enable = &cypress_dpm_enable,
+ .late_enable = &rv770_dpm_late_enable,
.disable = &cypress_dpm_disable,
.pre_set_power_state = &r600_dpm_pre_set_power_state,
.set_power_state = &cypress_dpm_set_power_state,
@@ -1449,6 +1453,7 @@ static struct radeon_asic sumo_asic = {
.init = &sumo_dpm_init,
.setup_asic = &sumo_dpm_setup_asic,
.enable = &sumo_dpm_enable,
+ .late_enable = &sumo_dpm_late_enable,
.disable = &sumo_dpm_disable,
.pre_set_power_state = &sumo_dpm_pre_set_power_state,
.set_power_state = &sumo_dpm_set_power_state,
@@ -1540,6 +1545,7 @@ static struct radeon_asic btc_asic = {
.init = &btc_dpm_init,
.setup_asic = &btc_dpm_setup_asic,
.enable = &btc_dpm_enable,
+ .late_enable = &rv770_dpm_late_enable,
.disable = &btc_dpm_disable,
.pre_set_power_state = &btc_dpm_pre_set_power_state,
.set_power_state = &btc_dpm_set_power_state,
@@ -1549,7 +1555,7 @@ static struct radeon_asic btc_asic = {
.get_sclk = &btc_dpm_get_sclk,
.get_mclk = &btc_dpm_get_mclk,
.print_power_state = &rv770_dpm_print_power_state,
- .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
+ .debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &btc_dpm_vblank_too_short,
},
@@ -1570,9 +1576,9 @@ static struct radeon_asic_ring cayman_gfx_ring = {
.ib_test = &r600_ib_test,
.is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
+ .get_rptr = &cayman_gfx_get_rptr,
+ .get_wptr = &cayman_gfx_get_wptr,
+ .set_wptr = &cayman_gfx_set_wptr,
};
static struct radeon_asic_ring cayman_dma_ring = {
@@ -1585,9 +1591,9 @@ static struct radeon_asic_ring cayman_dma_ring = {
.ib_test = &r600_dma_ib_test,
.is_lockup = &cayman_dma_is_lockup,
.vm_flush = &cayman_dma_vm_flush,
- .get_rptr = &r600_dma_get_rptr,
- .get_wptr = &r600_dma_get_wptr,
- .set_wptr = &r600_dma_set_wptr
+ .get_rptr = &cayman_dma_get_rptr,
+ .get_wptr = &cayman_dma_get_wptr,
+ .set_wptr = &cayman_dma_set_wptr
};
static struct radeon_asic_ring cayman_uvd_ring = {
@@ -1683,6 +1689,7 @@ static struct radeon_asic cayman_asic = {
.init = &ni_dpm_init,
.setup_asic = &ni_dpm_setup_asic,
.enable = &ni_dpm_enable,
+ .late_enable = &rv770_dpm_late_enable,
.disable = &ni_dpm_disable,
.pre_set_power_state = &ni_dpm_pre_set_power_state,
.set_power_state = &ni_dpm_set_power_state,
@@ -1783,6 +1790,7 @@ static struct radeon_asic trinity_asic = {
.init = &trinity_dpm_init,
.setup_asic = &trinity_dpm_setup_asic,
.enable = &trinity_dpm_enable,
+ .late_enable = &trinity_dpm_late_enable,
.disable = &trinity_dpm_disable,
.pre_set_power_state = &trinity_dpm_pre_set_power_state,
.set_power_state = &trinity_dpm_set_power_state,
@@ -1813,9 +1821,9 @@ static struct radeon_asic_ring si_gfx_ring = {
.ib_test = &r600_ib_test,
.is_lockup = &si_gfx_is_lockup,
.vm_flush = &si_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
+ .get_rptr = &cayman_gfx_get_rptr,
+ .get_wptr = &cayman_gfx_get_wptr,
+ .set_wptr = &cayman_gfx_set_wptr,
};
static struct radeon_asic_ring si_dma_ring = {
@@ -1828,9 +1836,9 @@ static struct radeon_asic_ring si_dma_ring = {
.ib_test = &r600_dma_ib_test,
.is_lockup = &si_dma_is_lockup,
.vm_flush = &si_dma_vm_flush,
- .get_rptr = &r600_dma_get_rptr,
- .get_wptr = &r600_dma_get_wptr,
- .set_wptr = &r600_dma_set_wptr,
+ .get_rptr = &cayman_dma_get_rptr,
+ .get_wptr = &cayman_dma_get_wptr,
+ .set_wptr = &cayman_dma_set_wptr,
};
static struct radeon_asic si_asic = {
@@ -1913,6 +1921,7 @@ static struct radeon_asic si_asic = {
.init = &si_dpm_init,
.setup_asic = &si_dpm_setup_asic,
.enable = &si_dpm_enable,
+ .late_enable = &si_dpm_late_enable,
.disable = &si_dpm_disable,
.pre_set_power_state = &si_dpm_pre_set_power_state,
.set_power_state = &si_dpm_set_power_state,
@@ -1943,9 +1952,9 @@ static struct radeon_asic_ring ci_gfx_ring = {
.ib_test = &cik_ib_test,
.is_lockup = &cik_gfx_is_lockup,
.vm_flush = &cik_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
+ .get_rptr = &cik_gfx_get_rptr,
+ .get_wptr = &cik_gfx_get_wptr,
+ .set_wptr = &cik_gfx_set_wptr,
};
static struct radeon_asic_ring ci_cp_ring = {
@@ -1958,9 +1967,9 @@ static struct radeon_asic_ring ci_cp_ring = {
.ib_test = &cik_ib_test,
.is_lockup = &cik_gfx_is_lockup,
.vm_flush = &cik_vm_flush,
- .get_rptr = &cik_compute_ring_get_rptr,
- .get_wptr = &cik_compute_ring_get_wptr,
- .set_wptr = &cik_compute_ring_set_wptr,
+ .get_rptr = &cik_compute_get_rptr,
+ .get_wptr = &cik_compute_get_wptr,
+ .set_wptr = &cik_compute_set_wptr,
};
static struct radeon_asic_ring ci_dma_ring = {
@@ -1973,9 +1982,9 @@ static struct radeon_asic_ring ci_dma_ring = {
.ib_test = &cik_sdma_ib_test,
.is_lockup = &cik_sdma_is_lockup,
.vm_flush = &cik_dma_vm_flush,
- .get_rptr = &r600_dma_get_rptr,
- .get_wptr = &r600_dma_get_wptr,
- .set_wptr = &r600_dma_set_wptr,
+ .get_rptr = &cik_sdma_get_rptr,
+ .get_wptr = &cik_sdma_get_wptr,
+ .set_wptr = &cik_sdma_set_wptr,
};
static struct radeon_asic ci_asic = {
@@ -2021,7 +2030,7 @@ static struct radeon_asic ci_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = NULL,
+ .blit = &cik_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &cik_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2058,6 +2067,7 @@ static struct radeon_asic ci_asic = {
.init = &ci_dpm_init,
.setup_asic = &ci_dpm_setup_asic,
.enable = &ci_dpm_enable,
+ .late_enable = &ci_dpm_late_enable,
.disable = &ci_dpm_disable,
.pre_set_power_state = &ci_dpm_pre_set_power_state,
.set_power_state = &ci_dpm_set_power_state,
@@ -2122,7 +2132,7 @@ static struct radeon_asic kv_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = NULL,
+ .blit = &cik_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &cik_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2159,6 +2169,7 @@ static struct radeon_asic kv_asic = {
.init = &kv_dpm_init,
.setup_asic = &kv_dpm_setup_asic,
.enable = &kv_dpm_enable,
+ .late_enable = &kv_dpm_late_enable,
.disable = &kv_dpm_disable,
.pre_set_power_state = &kv_dpm_pre_set_power_state,
.set_power_state = &kv_dpm_set_power_state,
@@ -2449,7 +2460,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGCG |
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
@@ -2468,7 +2479,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGCG |
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CP_LS |
@@ -2493,7 +2504,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGCG |
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
@@ -2521,7 +2532,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGCG |
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c9fd97b58076..ae637cfda783 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -47,13 +47,6 @@ u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder);
void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder);
-u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-void radeon_ring_generic_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-
/*
* r100,rv100,rs100,rv200,rs200
*/
@@ -148,6 +141,13 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
+u32 r100_gfx_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+u32 r100_gfx_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void r100_gfx_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+
/*
* r200,rv250,rs300,rv280
*/
@@ -368,6 +368,12 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
int r600_pcie_gart_init(struct radeon_device *rdev);
void r600_scratch_init(struct radeon_device *rdev);
int r600_init_microcode(struct radeon_device *rdev);
+u32 r600_gfx_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+u32 r600_gfx_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void r600_gfx_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
/* r600 irq */
int r600_irq_process(struct radeon_device *rdev);
int r600_irq_init(struct radeon_device *rdev);
@@ -392,6 +398,7 @@ int rv6xx_get_temp(struct radeon_device *rdev);
int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
void r600_dpm_post_set_power_state(struct radeon_device *rdev);
+int r600_dpm_late_enable(struct radeon_device *rdev);
/* r600 dma */
uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring);
@@ -454,6 +461,7 @@ int rv770_get_temp(struct radeon_device *rdev);
/* rv7xx pm */
int rv770_dpm_init(struct radeon_device *rdev);
int rv770_dpm_enable(struct radeon_device *rdev);
+int rv770_dpm_late_enable(struct radeon_device *rdev);
void rv770_dpm_disable(struct radeon_device *rdev);
int rv770_dpm_set_power_state(struct radeon_device *rdev);
void rv770_dpm_setup_asic(struct radeon_device *rdev);
@@ -543,8 +551,11 @@ void btc_dpm_fini(struct radeon_device *rdev);
u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low);
u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low);
bool btc_dpm_vblank_too_short(struct radeon_device *rdev);
+void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m);
int sumo_dpm_init(struct radeon_device *rdev);
int sumo_dpm_enable(struct radeon_device *rdev);
+int sumo_dpm_late_enable(struct radeon_device *rdev);
void sumo_dpm_disable(struct radeon_device *rdev);
int sumo_dpm_pre_set_power_state(struct radeon_device *rdev);
int sumo_dpm_set_power_state(struct radeon_device *rdev);
@@ -591,6 +602,19 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void cayman_gfx_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+uint32_t cayman_dma_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+uint32_t cayman_dma_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void cayman_dma_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+
int ni_dpm_init(struct radeon_device *rdev);
void ni_dpm_setup_asic(struct radeon_device *rdev);
int ni_dpm_enable(struct radeon_device *rdev);
@@ -610,6 +634,7 @@ int ni_dpm_force_performance_level(struct radeon_device *rdev,
bool ni_dpm_vblank_too_short(struct radeon_device *rdev);
int trinity_dpm_init(struct radeon_device *rdev);
int trinity_dpm_enable(struct radeon_device *rdev);
+int trinity_dpm_late_enable(struct radeon_device *rdev);
void trinity_dpm_disable(struct radeon_device *rdev);
int trinity_dpm_pre_set_power_state(struct radeon_device *rdev);
int trinity_dpm_set_power_state(struct radeon_device *rdev);
@@ -669,6 +694,7 @@ int si_get_temp(struct radeon_device *rdev);
int si_dpm_init(struct radeon_device *rdev);
void si_dpm_setup_asic(struct radeon_device *rdev);
int si_dpm_enable(struct radeon_device *rdev);
+int si_dpm_late_enable(struct radeon_device *rdev);
void si_dpm_disable(struct radeon_device *rdev);
int si_dpm_pre_set_power_state(struct radeon_device *rdev);
int si_dpm_set_power_state(struct radeon_device *rdev);
@@ -739,17 +765,30 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
uint32_t incr, uint32_t flags);
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
-u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-void cik_compute_ring_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
+u32 cik_gfx_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+u32 cik_gfx_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void cik_gfx_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+u32 cik_compute_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+u32 cik_compute_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void cik_compute_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+u32 cik_sdma_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+u32 cik_sdma_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void cik_sdma_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
int ci_get_temp(struct radeon_device *rdev);
int kv_get_temp(struct radeon_device *rdev);
int ci_dpm_init(struct radeon_device *rdev);
int ci_dpm_enable(struct radeon_device *rdev);
+int ci_dpm_late_enable(struct radeon_device *rdev);
void ci_dpm_disable(struct radeon_device *rdev);
int ci_dpm_pre_set_power_state(struct radeon_device *rdev);
int ci_dpm_set_power_state(struct radeon_device *rdev);
@@ -770,6 +809,7 @@ void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
int kv_dpm_init(struct radeon_device *rdev);
int kv_dpm_enable(struct radeon_device *rdev);
+int kv_dpm_late_enable(struct radeon_device *rdev);
void kv_dpm_disable(struct radeon_device *rdev);
int kv_dpm_pre_set_power_state(struct radeon_device *rdev);
int kv_dpm_set_power_state(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5c39bf7c3d88..30844814c25a 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -30,27 +30,10 @@
#include "atom.h"
#include "atom-bits.h"
-/* from radeon_encoder.c */
-extern uint32_t
-radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
- uint8_t dac);
-extern void radeon_link_encoder_connector(struct drm_device *dev);
extern void
radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device, u16 caps);
-/* from radeon_connector.c */
-extern void
-radeon_add_atom_connector(struct drm_device *dev,
- uint32_t connector_id,
- uint32_t supported_device,
- int connector_type,
- struct radeon_i2c_bus_rec *i2c_bus,
- uint32_t igp_lane_info,
- uint16_t connector_object_id,
- struct radeon_hpd *hpd,
- struct radeon_router *router);
-
/* from radeon_legacy_encoder.c */
extern void
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
@@ -1528,6 +1511,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
ss->type = ss_assign->v1.ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
+ ss->percentage_divider = 100;
return true;
}
ss_assign = (union asic_ss_assignment *)
@@ -1545,6 +1529,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
ss->type = ss_assign->v2.ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
+ ss->percentage_divider = 100;
if ((crev == 2) &&
((id == ASIC_INTERNAL_ENGINE_SS) ||
(id == ASIC_INTERNAL_MEMORY_SS)))
@@ -1566,6 +1551,11 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
ss->type = ss_assign->v3.ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
+ if (ss_assign->v3.ucSpreadSpectrumMode &
+ SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK)
+ ss->percentage_divider = 1000;
+ else
+ ss->percentage_divider = 100;
if ((id == ASIC_INTERNAL_ENGINE_SS) ||
(id == ASIC_INTERNAL_MEMORY_SS))
ss->rate /= 100;
@@ -1809,7 +1799,8 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
if (misc & ATOM_DOUBLE_CLOCK_MODE)
mode->flags |= DRM_MODE_FLAG_DBLSCAN;
- mode->clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
+ mode->crtc_clock = mode->clock =
+ le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
if (index == 1) {
/* PAL timings appear to have wrong values for totals */
@@ -1852,7 +1843,8 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
if (misc & ATOM_DOUBLE_CLOCK_MODE)
mode->flags |= DRM_MODE_FLAG_DBLSCAN;
- mode->clock = le16_to_cpu(dtd_timings->usPixClk) * 10;
+ mode->crtc_clock = mode->clock =
+ le16_to_cpu(dtd_timings->usPixClk) * 10;
break;
}
return true;
@@ -3884,16 +3876,18 @@ int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
}
reg_table->last = i;
- while ((*(u32 *)reg_data != END_OF_REG_DATA_BLOCK) &&
+ while ((le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK) &&
(num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) {
- t_mem_id = (u8)((*(u32 *)reg_data & MEM_ID_MASK) >> MEM_ID_SHIFT);
+ t_mem_id = (u8)((le32_to_cpu(*(u32 *)reg_data) & MEM_ID_MASK)
+ >> MEM_ID_SHIFT);
if (module_index == t_mem_id) {
reg_table->mc_reg_table_entry[num_ranges].mclk_max =
- (u32)((*(u32 *)reg_data & CLOCK_RANGE_MASK) >> CLOCK_RANGE_SHIFT);
+ (u32)((le32_to_cpu(*(u32 *)reg_data) & CLOCK_RANGE_MASK)
+ >> CLOCK_RANGE_SHIFT);
for (i = 0, j = 1; i < reg_table->last; i++) {
if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
- (u32)*((u32 *)reg_data + j);
+ (u32)le32_to_cpu(*((u32 *)reg_data + j));
j++;
} else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
@@ -3905,7 +3899,7 @@ int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
((u8 *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize));
}
- if (*(u32 *)reg_data != END_OF_REG_DATA_BLOCK)
+ if (le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK)
return -EINVAL;
reg_table->num_entries = num_ranges;
} else
@@ -3944,6 +3938,10 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
/* tell the bios not to handle mode switching */
bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
+ /* clear the vbios dpms state */
+ if (ASIC_IS_DCE4(rdev))
+ bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
+
if (rdev->family >= CHIP_R600) {
WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 9d302eaeea15..485848f889f5 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -33,6 +33,7 @@ static struct radeon_atpx_priv {
bool atpx_detected;
/* handle for device - and atpx */
acpi_handle dhandle;
+ acpi_handle other_handle;
struct radeon_atpx atpx;
} radeon_atpx_priv;
@@ -451,9 +452,10 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
return false;
status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
+ radeon_atpx_priv.other_handle = dhandle;
return false;
-
+ }
radeon_atpx_priv.dhandle = dhandle;
radeon_atpx_priv.atpx.handle = atpx_handle;
return true;
@@ -530,6 +532,16 @@ static bool radeon_atpx_detect(void)
printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
acpi_method_name);
radeon_atpx_priv.atpx_detected = true;
+ /*
+ * On some systems hotplug events are generated for the device
+ * being switched off when ATPX is executed. They cause ACPI
+ * hotplug to trigger and attempt to remove the device from
+ * the system, which causes it to break down. Prevent that from
+ * happening by setting the no_hotplug flag for the involved
+ * ACPI device objects.
+ */
+ acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
+ acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
return true;
}
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 68ce36056019..6651177110f0 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -37,22 +37,6 @@
#include <asm/pci-bridge.h>
#endif /* CONFIG_PPC_PMAC */
-/* from radeon_encoder.c */
-extern uint32_t
-radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
- uint8_t dac);
-extern void radeon_link_encoder_connector(struct drm_device *dev);
-
-/* from radeon_connector.c */
-extern void
-radeon_add_legacy_connector(struct drm_device *dev,
- uint32_t connector_id,
- uint32_t supported_device,
- int connector_type,
- struct radeon_i2c_bus_rec *i2c_bus,
- uint16_t connector_object_id,
- struct radeon_hpd *hpd);
-
/* from radeon_legacy_encoder.c */
extern void
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 20a768ac89a8..82d4f865546e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -33,15 +33,6 @@
#include <linux/pm_runtime.h>
-extern void
-radeon_combios_connected_scratch_regs(struct drm_connector *connector,
- struct drm_encoder *encoder,
- bool connected);
-extern void
-radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
- struct drm_encoder *encoder,
- bool connected);
-
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 3cae2bbc1854..bb0d5c3a8311 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2020,10 +2020,10 @@ static int radeon_cp_get_buffers(struct drm_device *dev,
buf->file_priv = file_priv;
- if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
+ if (copy_to_user(&d->request_indices[i], &buf->idx,
sizeof(buf->idx)))
return -EFAULT;
- if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
+ if (copy_to_user(&d->request_sizes[i], &buf->total,
sizeof(buf->total)))
return -EFAULT;
@@ -2228,7 +2228,7 @@ void radeon_commit_ring(drm_radeon_private_t *dev_priv)
dev_priv->ring.tail &= dev_priv->ring.tail_mask;
- DRM_MEMORYBARRIER();
+ mb();
GET_RING_HEAD( dev_priv );
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 0b366169d64d..dfb5a1db87d4 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -138,7 +138,7 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
p->ring = R600_RING_TYPE_DMA_INDEX;
else
p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
- } else if (p->rdev->family >= CHIP_R600) {
+ } else if (p->rdev->family >= CHIP_RV770) {
p->ring = R600_RING_TYPE_DMA_INDEX;
} else {
return -EINVAL;
@@ -192,7 +192,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -ENOMEM;
}
chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
- if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
+ if (copy_from_user(p->chunks_array, chunk_array_ptr,
sizeof(uint64_t)*cs->num_chunks)) {
return -EFAULT;
}
@@ -208,7 +208,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
uint32_t __user *cdata;
chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
- if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
+ if (copy_from_user(&user_chunk, chunk_ptr,
sizeof(struct drm_radeon_cs_chunk))) {
return -EFAULT;
}
@@ -252,7 +252,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
if (p->chunks[i].kdata == NULL) {
return -ENOMEM;
}
- if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) {
+ if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
return -EFAULT;
}
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
@@ -472,7 +472,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
}
parser->const_ib.is_const_ib = true;
parser->const_ib.length_dw = ib_chunk->length_dw;
- if (DRM_COPY_FROM_USER(parser->const_ib.ptr,
+ if (copy_from_user(parser->const_ib.ptr,
ib_chunk->user_ptr,
ib_chunk->length_dw * 4))
return -EFAULT;
@@ -495,7 +495,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
parser->ib.length_dw = ib_chunk->length_dw;
if (ib_chunk->kdata)
memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
- else if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
+ else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
return -EFAULT;
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 39b033b441d2..b012cbbc3ed5 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -144,6 +144,11 @@ void radeon_program_register_sequence(struct radeon_device *rdev,
}
}
+void radeon_pci_config_reset(struct radeon_device *rdev)
+{
+ pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
+}
+
/**
* radeon_surface_init - Clear GPU surface registers.
*
@@ -249,7 +254,7 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
* Init doorbell driver information (CIK)
* Returns 0 on success, error on failure.
*/
-int radeon_doorbell_init(struct radeon_device *rdev)
+static int radeon_doorbell_init(struct radeon_device *rdev)
{
/* doorbell bar mapping */
rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
@@ -278,7 +283,7 @@ int radeon_doorbell_init(struct radeon_device *rdev)
*
* Tear down doorbell driver information (CIK)
*/
-void radeon_doorbell_fini(struct radeon_device *rdev)
+static void radeon_doorbell_fini(struct radeon_device *rdev)
{
iounmap(rdev->doorbell.ptr);
rdev->doorbell.ptr = NULL;
@@ -1330,6 +1335,7 @@ int radeon_device_init(struct radeon_device *rdev,
if (r)
return r;
}
+
if ((radeon_testing & 1)) {
if (rdev->accel_working)
radeon_test_moves(rdev);
@@ -1455,7 +1461,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
radeon_save_bios_scratch_regs(rdev);
- radeon_pm_suspend(rdev);
radeon_suspend(rdev);
radeon_hpd_fini(rdev);
/* evict remaining vram memory */
@@ -1516,14 +1521,22 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
- radeon_pm_resume(rdev);
+ if (rdev->pm.dpm_enabled) {
+ /* do dpm late init */
+ r = radeon_pm_late_init(rdev);
+ if (r) {
+ rdev->pm.dpm_enabled = false;
+ DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+ }
+ }
+
radeon_restore_bios_scratch_regs(rdev);
if (fbcon) {
radeon_fbdev_set_suspend(rdev, 0);
console_unlock();
}
-
+
/* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 7b253815a323..d680608f6f5b 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -306,7 +306,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
* to complete in this vblank?
*/
if (update_pending &&
- (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
+ (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0,
&vpos, &hpos, NULL, NULL)) &&
((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
(vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
@@ -1464,12 +1464,22 @@ int radeon_modeset_init(struct radeon_device *rdev)
/* setup afmt */
radeon_afmt_init(rdev);
- /* Initialize power management */
- radeon_pm_init(rdev);
-
radeon_fbdev_init(rdev);
drm_kms_helper_poll_init(rdev->ddev);
+ if (rdev->pm.dpm_enabled) {
+ /* do dpm late init */
+ ret = radeon_pm_late_init(rdev);
+ if (ret) {
+ rdev->pm.dpm_enabled = false;
+ DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+ }
+ /* set the dpm state for PX since there won't be
+ * a modeset to call this.
+ */
+ radeon_pm_compute_clocks(rdev);
+ }
+
return 0;
}
@@ -1477,7 +1487,6 @@ void radeon_modeset_fini(struct radeon_device *rdev)
{
radeon_fbdev_fini(rdev);
kfree(rdev->mode_info.bios_hardcoded_edid);
- radeon_pm_fini(rdev);
if (rdev->mode_info.mode_config_initialized) {
radeon_afmt_fini(rdev);
@@ -1601,6 +1610,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
*
* \param dev Device to query.
* \param crtc Crtc to query.
+ * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
* \param *vpos Location where vertical scanout position should be stored.
* \param *hpos Location where horizontal scanout position should go.
* \param *stime Target location for timestamp taken immediately before
@@ -1622,8 +1632,8 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
* unknown small number of scanlines wrt. real scanout position.
*
*/
-int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime)
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags,
+ int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
{
u32 stat_crtc = 0, vbl = 0, position = 0;
int vbl_start, vbl_end, vtotal, ret = 0;
@@ -1765,5 +1775,27 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int
if (in_vbl)
ret |= DRM_SCANOUTPOS_INVBL;
+ /* Is vpos outside nominal vblank area, but less than
+ * 1/100 of a frame height away from start of vblank?
+ * If so, assume this isn't a massively delayed vblank
+ * interrupt, but a vblank interrupt that fired a few
+ * microseconds before true start of vblank. Compensate
+ * by adding a full frame duration to the final timestamp.
+ * Happens, e.g., on ATI R500, R600.
+ *
+ * We only do this if DRM_CALLED_FROM_VBLIRQ.
+ */
+ if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
+ vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
+ vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
+
+ if (vbl_start - *vpos < vtotal / 100) {
+ *vpos -= vtotal;
+
+ /* Signal this correction as "applied". */
+ ret |= 0x8;
+ }
+ }
+
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 9f5ff28864f6..84a1bbb75f91 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -77,9 +77,11 @@
* 2.33.0 - Add SI tiling mode array query
* 2.34.0 - Add CIK tiling mode array query
* 2.35.0 - Add CIK macrotile mode array query
+ * 2.36.0 - Fix CIK DCE tiling setup
+ * 2.37.0 - allow GS ring setup on r6xx/r7xx
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 35
+#define KMS_DRIVER_MINOR 37
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -101,13 +103,14 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
-irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
+irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg);
void radeon_gem_object_free(struct drm_gem_object *obj);
int radeon_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file_priv);
void radeon_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+ unsigned int flags,
int *vpos, int *hpos, ktime_t *stime,
ktime_t *etime);
extern const struct drm_ioctl_desc radeon_ioctls_kms[];
@@ -167,6 +170,7 @@ int radeon_fastfb = 0;
int radeon_dpm = -1;
int radeon_aspm = -1;
int radeon_runtime_pm = -1;
+int radeon_hard_reset = 0;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -231,6 +235,9 @@ module_param_named(aspm, radeon_aspm, int, 0444);
MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
module_param_named(runpm, radeon_runtime_pm, int, 0444);
+MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
+module_param_named(hard_reset, radeon_hard_reset, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
@@ -399,6 +406,9 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
if (radeon_runtime_pm == 0)
return -EINVAL;
+ if (radeon_runtime_pm == -1 && !radeon_is_px())
+ return -EINVAL;
+
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
@@ -421,6 +431,9 @@ static int radeon_pmops_runtime_resume(struct device *dev)
if (radeon_runtime_pm == 0)
return -EINVAL;
+ if (radeon_runtime_pm == -1 && !radeon_is_px())
+ return -EINVAL;
+
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
pci_set_power_state(pdev, PCI_D0);
@@ -508,15 +521,6 @@ static const struct file_operations radeon_driver_kms_fops = {
#endif
};
-
-static void
-radeon_pci_shutdown(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
-
- radeon_driver_unload_kms(dev);
-}
-
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP |
@@ -586,7 +590,6 @@ static struct pci_driver radeon_kms_pci_driver = {
.probe = radeon_pci_probe,
.remove = radeon_pci_remove,
.driver.pm = &radeon_pm_ops,
- .shutdown = radeon_pci_shutdown,
};
static int __init radeon_init(void)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 00e0d449021c..dafd812e4571 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -405,7 +405,7 @@ extern void radeon_do_release(struct drm_device * dev);
extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
-extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t radeon_driver_irq_handler(int irq, void *arg);
extern void radeon_driver_irq_preinstall(struct drm_device * dev);
extern int radeon_driver_irq_postinstall(struct drm_device *dev);
extern void radeon_driver_irq_uninstall(struct drm_device * dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index d3a86e43c012..c37cb79a9489 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -121,7 +121,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
(*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
(*fence)->ring = ring;
radeon_fence_ring_emit(rdev, ring, *fence);
- trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
+ trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
return 0;
}
@@ -313,7 +313,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
continue;
last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
- trace_radeon_fence_wait_begin(rdev->ddev, target_seq[i]);
+ trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
radeon_irq_kms_sw_irq_get(rdev, i);
}
@@ -332,7 +332,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
continue;
radeon_irq_kms_sw_irq_put(rdev, i);
- trace_radeon_fence_wait_end(rdev->ddev, target_seq[i]);
+ trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
}
if (unlikely(r < 0))
@@ -841,6 +841,8 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
if (!rdev->fence_drv[i].initialized)
continue;
+ radeon_fence_process(rdev, i);
+
seq_printf(m, "--- ring %d ---\n", i);
seq_printf(m, "Last signaled fence 0x%016llx\n",
(unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 96e440061bdb..a8f9b463bf2a 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -713,7 +713,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
unsigned i;
/* check if the id is still valid */
- if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id])
+ if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
return NULL;
/* we definately need to flush */
@@ -726,6 +726,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
if (fence == NULL) {
/* found a free one */
vm->id = i;
+ trace_radeon_vm_grab_id(vm->id, ring);
return NULL;
}
@@ -769,6 +770,9 @@ void radeon_vm_fence(struct radeon_device *rdev,
radeon_fence_unref(&vm->fence);
vm->fence = radeon_fence_ref(fence);
+
+ radeon_fence_unref(&vm->last_id_use);
+ vm->last_id_use = radeon_fence_ref(fence);
}
/**
@@ -1303,6 +1307,8 @@ void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
{
vm->id = 0;
vm->fence = NULL;
+ vm->last_flush = NULL;
+ vm->last_id_use = NULL;
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->list);
INIT_LIST_HEAD(&vm->va);
@@ -1341,5 +1347,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
}
radeon_fence_unref(&vm->fence);
radeon_fence_unref(&vm->last_flush);
+ radeon_fence_unref(&vm->last_id_use);
mutex_unlock(&vm->mutex);
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 805c5e566b9a..b96c819024b3 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -86,7 +86,7 @@ retry:
return 0;
}
-int radeon_gem_set_domain(struct drm_gem_object *gobj,
+static int radeon_gem_set_domain(struct drm_gem_object *gobj,
uint32_t rdomain, uint32_t wdomain)
{
struct radeon_bo *robj;
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index fc60b74ee304..e24ca6ab96de 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -1020,6 +1020,9 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
/* Add the default buses */
void radeon_i2c_init(struct radeon_device *rdev)
{
+ if (radeon_hw_i2c)
+ DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
+
if (rdev->is_atom_bios)
radeon_atombios_i2c_init(rdev);
else
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index 8d68e972789a..244b19bab2e7 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -181,7 +181,7 @@ static u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_dis
* tied to dma at all, this is just a hangover from dri prehistory.
*/
-irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t radeon_driver_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_radeon_private_t *dev_priv =
@@ -203,7 +203,7 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
/* SW interrupt */
if (stat & RADEON_SW_INT_TEST)
- DRM_WAKEUP(&dev_priv->swi_queue);
+ wake_up(&dev_priv->swi_queue);
/* VBLANK interrupt */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
@@ -249,7 +249,7 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
- DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * HZ,
RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
return ret;
@@ -302,7 +302,7 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr
result = radeon_emit_irq(dev);
- if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+ if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
@@ -354,7 +354,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
(drm_radeon_private_t *) dev->dev_private;
atomic_set(&dev_priv->swi_emitted, 0);
- DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
+ init_waitqueue_head(&dev_priv->swi_queue);
dev->max_vblank_count = 0x001fffff;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index ec6240b00469..089c9ffb0aa9 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -39,13 +39,13 @@
/**
* radeon_driver_irq_handler_kms - irq handler for KMS
*
- * @DRM_IRQ_ARGS: args
+ * @int irq, void *arg: args
*
* This is the irq handler for the radeon KMS driver (all asics).
* radeon_irq_process is a macro that points to the per-asic
* irq handler callback.
*/
-irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
+irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 55d0b474bd37..114d1672d616 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -191,7 +191,7 @@ static void radeon_set_filp_rights(struct drm_device *dev,
* etc. (all asics).
* Returns 0 on success, -EINVAL on failure.
*/
-int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_info *info = data;
@@ -223,7 +223,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
*value = rdev->accel_working;
break;
case RADEON_INFO_CRTC_FROM_ID:
- if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
@@ -269,7 +269,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
*
* When returning, the value is 1 if filp owns hyper-z access,
* 0 otherwise. */
- if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
@@ -281,7 +281,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
break;
case RADEON_INFO_WANT_CMASK:
/* The same logic as Hyper-Z. */
- if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
@@ -417,7 +417,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
*value = rdev->fastfb_working;
break;
case RADEON_INFO_RING_WORKING:
- if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
@@ -461,11 +461,27 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case RADEON_INFO_SI_CP_DMA_COMPUTE:
*value = 1;
break;
+ case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
+ if (rdev->family >= CHIP_BONAIRE) {
+ *value = rdev->config.cik.backend_enable_mask;
+ } else if (rdev->family >= CHIP_TAHITI) {
+ *value = rdev->config.si.backend_enable_mask;
+ } else {
+ DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
+ }
+ break;
+ case RADEON_INFO_MAX_SCLK:
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
+ rdev->pm.dpm_enabled)
+ *value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
+ else
+ *value = rdev->pm.default_sclk * 10;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
}
- if (DRM_COPY_TO_USER(value_ptr, (char*)value, value_size)) {
+ if (copy_to_user(value_ptr, (char*)value, value_size)) {
DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
@@ -703,11 +719,12 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
vblank_time, flags,
- drmcrtc);
+ drmcrtc, &drmcrtc->hwmode);
}
#define KMS_INVALID_IOCTL(name) \
-int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
+static int name(struct drm_device *dev, void *data, struct drm_file \
+ *file_priv) \
{ \
DRM_ERROR("invalid ioctl with kms %s\n", __func__); \
return -EINVAL; \
diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c
index d54d2d7c9031..146d253f1131 100644
--- a/drivers/gpu/drm/radeon/radeon_mem.c
+++ b/drivers/gpu/drm/radeon/radeon_mem.c
@@ -243,7 +243,7 @@ int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_p
if (!block)
return -ENOMEM;
- if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
+ if (copy_to_user(alloc->region_offset, &block->start,
sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 3f0dd664af90..402dbe32c234 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -291,6 +291,7 @@ struct radeon_tv_regs {
struct radeon_atom_ss {
uint16_t percentage;
+ uint16_t percentage_divider;
uint8_t type;
uint16_t step;
uint8_t delay;
@@ -624,6 +625,30 @@ struct atom_voltage_table
struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
};
+
+extern void
+radeon_add_atom_connector(struct drm_device *dev,
+ uint32_t connector_id,
+ uint32_t supported_device,
+ int connector_type,
+ struct radeon_i2c_bus_rec *i2c_bus,
+ uint32_t igp_lane_info,
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd,
+ struct radeon_router *router);
+extern void
+radeon_add_legacy_connector(struct drm_device *dev,
+ uint32_t connector_id,
+ uint32_t supported_device,
+ int connector_type,
+ struct radeon_i2c_bus_rec *i2c_bus,
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd);
+extern uint32_t
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+ uint8_t dac);
+extern void radeon_link_encoder_connector(struct drm_device *dev);
+
extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev);
extern enum radeon_tv_std
@@ -631,6 +656,15 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev);
extern void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
u16 *vddc, u16 *vddci, u16 *mvdd);
+extern void
+radeon_combios_connected_scratch_regs(struct drm_connector *connector,
+ struct drm_encoder *encoder,
+ bool connected);
+extern void
+radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+ struct drm_encoder *encoder,
+ bool connected);
+
extern struct drm_connector *
radeon_get_connector_for_encoder(struct drm_encoder *encoder);
extern struct drm_connector *
@@ -666,6 +700,7 @@ extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
u8 write_byte, u8 *read_byte);
+void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
extern void radeon_i2c_init(struct radeon_device *rdev);
extern void radeon_i2c_fini(struct radeon_device *rdev);
@@ -766,6 +801,7 @@ extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+ unsigned int flags,
int *vpos, int *hpos, ktime_t *stime,
ktime_t *etime);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index c0fa4aa9ceea..08595cf90b01 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -46,7 +46,7 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
* function are calling it.
*/
-void radeon_bo_clear_va(struct radeon_bo *bo)
+static void radeon_bo_clear_va(struct radeon_bo *bo)
{
struct radeon_bo_va *bo_va, *tmp;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 984097b907ef..8e8153e471c2 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -924,6 +924,10 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
if (rdev->asic->dpm.powergate_uvd) {
mutex_lock(&rdev->pm.mutex);
+ /* don't powergate anything if we
+ have active but pause streams */
+ enable |= rdev->pm.dpm.sd > 0;
+ enable |= rdev->pm.dpm.hd > 0;
/* enable/disable UVD */
radeon_dpm_powergate_uvd(rdev, !enable);
mutex_unlock(&rdev->pm.mutex);
@@ -1010,8 +1014,10 @@ static void radeon_pm_resume_old(struct radeon_device *rdev)
rdev->pm.current_clock_mode_index = 0;
rdev->pm.current_sclk = rdev->pm.default_sclk;
rdev->pm.current_mclk = rdev->pm.default_mclk;
- rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
- rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
+ if (rdev->pm.power_state) {
+ rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
+ }
if (rdev->pm.pm_method == PM_METHOD_DYNPM
&& rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
@@ -1032,25 +1038,27 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
radeon_dpm_setup_asic(rdev);
ret = radeon_dpm_enable(rdev);
mutex_unlock(&rdev->pm.mutex);
- if (ret) {
- DRM_ERROR("radeon: dpm resume failed\n");
- if ((rdev->family >= CHIP_BARTS) &&
- (rdev->family <= CHIP_CAYMAN) &&
- rdev->mc_fw) {
- if (rdev->pm.default_vddc)
- radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
- SET_VOLTAGE_TYPE_ASIC_VDDC);
- if (rdev->pm.default_vddci)
- radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
- SET_VOLTAGE_TYPE_ASIC_VDDCI);
- if (rdev->pm.default_sclk)
- radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
- if (rdev->pm.default_mclk)
- radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
- }
- } else {
- rdev->pm.dpm_enabled = true;
- radeon_pm_compute_clocks(rdev);
+ if (ret)
+ goto dpm_resume_fail;
+ rdev->pm.dpm_enabled = true;
+ radeon_pm_compute_clocks(rdev);
+ return;
+
+dpm_resume_fail:
+ DRM_ERROR("radeon: dpm resume failed\n");
+ if ((rdev->family >= CHIP_BARTS) &&
+ (rdev->family <= CHIP_CAYMAN) &&
+ rdev->mc_fw) {
+ if (rdev->pm.default_vddc)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+ SET_VOLTAGE_TYPE_ASIC_VDDC);
+ if (rdev->pm.default_vddci)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+ SET_VOLTAGE_TYPE_ASIC_VDDCI);
+ if (rdev->pm.default_sclk)
+ radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+ if (rdev->pm.default_mclk)
+ radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
}
}
@@ -1170,51 +1178,50 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
radeon_dpm_setup_asic(rdev);
ret = radeon_dpm_enable(rdev);
mutex_unlock(&rdev->pm.mutex);
- if (ret) {
- rdev->pm.dpm_enabled = false;
- if ((rdev->family >= CHIP_BARTS) &&
- (rdev->family <= CHIP_CAYMAN) &&
- rdev->mc_fw) {
- if (rdev->pm.default_vddc)
- radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
- SET_VOLTAGE_TYPE_ASIC_VDDC);
- if (rdev->pm.default_vddci)
- radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
- SET_VOLTAGE_TYPE_ASIC_VDDCI);
- if (rdev->pm.default_sclk)
- radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
- if (rdev->pm.default_mclk)
- radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
- }
- DRM_ERROR("radeon: dpm initialization failed\n");
- return ret;
- }
+ if (ret)
+ goto dpm_failed;
rdev->pm.dpm_enabled = true;
- radeon_pm_compute_clocks(rdev);
- if (rdev->pm.num_power_states > 1) {
- ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
- if (ret)
- DRM_ERROR("failed to create device file for dpm state\n");
- ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
- if (ret)
- DRM_ERROR("failed to create device file for dpm state\n");
- /* XXX: these are noops for dpm but are here for backwards compat */
- ret = device_create_file(rdev->dev, &dev_attr_power_profile);
- if (ret)
- DRM_ERROR("failed to create device file for power profile\n");
- ret = device_create_file(rdev->dev, &dev_attr_power_method);
- if (ret)
- DRM_ERROR("failed to create device file for power method\n");
-
- if (radeon_debugfs_pm_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for dpm!\n");
- }
+ ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
+ if (ret)
+ DRM_ERROR("failed to create device file for dpm state\n");
+ ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
+ if (ret)
+ DRM_ERROR("failed to create device file for dpm state\n");
+ /* XXX: these are noops for dpm but are here for backwards compat */
+ ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+ if (ret)
+ DRM_ERROR("failed to create device file for power profile\n");
+ ret = device_create_file(rdev->dev, &dev_attr_power_method);
+ if (ret)
+ DRM_ERROR("failed to create device file for power method\n");
- DRM_INFO("radeon: dpm initialized\n");
+ if (radeon_debugfs_pm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for dpm!\n");
}
+ DRM_INFO("radeon: dpm initialized\n");
+
return 0;
+
+dpm_failed:
+ rdev->pm.dpm_enabled = false;
+ if ((rdev->family >= CHIP_BARTS) &&
+ (rdev->family <= CHIP_CAYMAN) &&
+ rdev->mc_fw) {
+ if (rdev->pm.default_vddc)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+ SET_VOLTAGE_TYPE_ASIC_VDDC);
+ if (rdev->pm.default_vddci)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+ SET_VOLTAGE_TYPE_ASIC_VDDCI);
+ if (rdev->pm.default_sclk)
+ radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+ if (rdev->pm.default_mclk)
+ radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+ }
+ DRM_ERROR("radeon: dpm initialization failed\n");
+ return ret;
}
int radeon_pm_init(struct radeon_device *rdev)
@@ -1228,11 +1235,10 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_RV670:
case CHIP_RS780:
case CHIP_RS880:
+ case CHIP_BARTS:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
case CHIP_CAYMAN:
- case CHIP_BONAIRE:
- case CHIP_KABINI:
- case CHIP_KAVERI:
- case CHIP_HAWAII:
/* DPM requires the RLC, RV770+ dGPU requires SMC */
if (!rdev->rlc_fw)
rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1257,15 +1263,16 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
- case CHIP_BARTS:
- case CHIP_TURKS:
- case CHIP_CAICOS:
case CHIP_ARUBA:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
+ case CHIP_BONAIRE:
+ case CHIP_KABINI:
+ case CHIP_KAVERI:
+ case CHIP_HAWAII:
/* DPM requires the RLC, RV770+ dGPU requires SMC */
if (!rdev->rlc_fw)
rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1290,6 +1297,18 @@ int radeon_pm_init(struct radeon_device *rdev)
return radeon_pm_init_old(rdev);
}
+int radeon_pm_late_init(struct radeon_device *rdev)
+{
+ int ret = 0;
+
+ if (rdev->pm.pm_method == PM_METHOD_DPM) {
+ mutex_lock(&rdev->pm.mutex);
+ ret = radeon_dpm_late_enable(rdev);
+ mutex_unlock(&rdev->pm.mutex);
+ }
+ return ret;
+}
+
static void radeon_pm_fini_old(struct radeon_device *rdev)
{
if (rdev->pm.num_power_states > 1) {
@@ -1420,6 +1439,9 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
+ if (!rdev->pm.dpm_enabled)
+ return;
+
mutex_lock(&rdev->pm.mutex);
/* update active crtc counts */
@@ -1464,7 +1486,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
*/
for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
if (rdev->pm.active_crtcs & (1 << crtc)) {
- vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos, NULL, NULL);
+ vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL);
if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
!(vbl_status & DRM_SCANOUTPOS_INVBL))
in_vbl = false;
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 9214403ae173..1b783f0e6d3a 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -332,36 +332,6 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
}
}
-u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- u32 rptr;
-
- if (rdev->wb.enabled)
- rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
- else
- rptr = RREG32(ring->rptr_reg);
-
- return rptr;
-}
-
-u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- u32 wptr;
-
- wptr = RREG32(ring->wptr_reg);
-
- return wptr;
-}
-
-void radeon_ring_generic_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- WREG32(ring->wptr_reg, ring->wptr);
- (void)RREG32(ring->wptr_reg);
-}
-
/**
* radeon_ring_free_size - update the free size
*
@@ -463,7 +433,7 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
while (ring->wptr & ring->align_mask) {
radeon_ring_write(ring, ring->nop);
}
- DRM_MEMORYBARRIER();
+ mb();
radeon_ring_set_wptr(rdev, ring);
}
@@ -689,22 +659,18 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
* @ring: radeon_ring structure holding ring information
* @ring_size: size of the ring
* @rptr_offs: offset of the rptr writeback location in the WB buffer
- * @rptr_reg: MMIO offset of the rptr register
- * @wptr_reg: MMIO offset of the wptr register
* @nop: nop packet for this ring
*
* Initialize the driver information for the selected ring (all asics).
* Returns 0 on success, error on failure.
*/
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
- unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop)
+ unsigned rptr_offs, u32 nop)
{
int r;
ring->ring_size = ring_size;
ring->rptr_offs = rptr_offs;
- ring->rptr_reg = rptr_reg;
- ring->wptr_reg = wptr_reg;
ring->nop = nop;
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
@@ -790,34 +756,54 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
struct radeon_device *rdev = dev->dev_private;
int ridx = *(int*)node->info_ent->data;
struct radeon_ring *ring = &rdev->ring[ridx];
+
+ uint32_t rptr, wptr, rptr_next;
unsigned count, i, j;
- u32 tmp;
radeon_ring_free_size(rdev, ring);
count = (ring->ring_size / 4) - ring->ring_free_dw;
- tmp = radeon_ring_get_wptr(rdev, ring);
- seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
- tmp = radeon_ring_get_rptr(rdev, ring);
- seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
+
+ wptr = radeon_ring_get_wptr(rdev, ring);
+ seq_printf(m, "wptr: 0x%08x [%5d]\n",
+ wptr, wptr);
+
+ rptr = radeon_ring_get_rptr(rdev, ring);
+ seq_printf(m, "rptr: 0x%08x [%5d]\n",
+ rptr, rptr);
+
if (ring->rptr_save_reg) {
- seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
- RREG32(ring->rptr_save_reg));
- }
- seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
- seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
- seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr);
- seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr);
+ rptr_next = RREG32(ring->rptr_save_reg);
+ seq_printf(m, "rptr next(0x%04x): 0x%08x [%5d]\n",
+ ring->rptr_save_reg, rptr_next, rptr_next);
+ } else
+ rptr_next = ~0;
+
+ seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
+ ring->wptr, ring->wptr);
+ seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n",
+ ring->rptr, ring->rptr);
+ seq_printf(m, "last semaphore signal addr : 0x%016llx\n",
+ ring->last_semaphore_signal_addr);
+ seq_printf(m, "last semaphore wait addr : 0x%016llx\n",
+ ring->last_semaphore_wait_addr);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
+
+ if (!ring->ready)
+ return 0;
+
/* print 8 dw before current rptr as often it's the last executed
* packet that is the root issue
*/
- i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
- if (ring->ready) {
- for (j = 0; j <= (count + 32); j++) {
- seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
- i = (i + 1) & ring->ptr_mask;
- }
+ i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
+ for (j = 0; j <= (count + 32); j++) {
+ seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
+ if (rptr == i)
+ seq_puts(m, " *");
+ if (rptr_next == i)
+ seq_puts(m, " #");
+ seq_puts(m, "\n");
+ i = (i + 1) & ring->ptr_mask;
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index f0bac68254b7..c0625805cdd7 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -402,13 +402,15 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
spin_lock(&sa_manager->wq.lock);
list_for_each_entry(i, &sa_manager->olist, olist) {
+ uint64_t soffset = i->soffset + sa_manager->gpu_addr;
+ uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
if (&i->olist == sa_manager->hole) {
seq_printf(m, ">");
} else {
seq_printf(m, " ");
}
- seq_printf(m, "[0x%08x 0x%08x] size %8d",
- i->soffset, i->eoffset, i->eoffset - i->soffset);
+ seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
+ soffset, eoffset, eoffset - soffset);
if (i->fence) {
seq_printf(m, " protected by 0x%016llx on ring %d",
i->fence->seq, i->fence->ring);
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 4d20910899d4..956ab7f14e16 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -1810,7 +1810,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
}
if (!buf) {
DRM_DEBUG("EAGAIN\n");
- if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
+ if (copy_to_user(tex->image, image, sizeof(*image)))
return -EFAULT;
return -EAGAIN;
}
@@ -1823,7 +1823,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
#define RADEON_COPY_MT(_buf, _data, _width) \
do { \
- if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
+ if (copy_from_user(_buf, _data, (_width))) {\
DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
return -EFAULT; \
} \
@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
+ if (copy_from_user(&depth_boxes, clear->depth_boxes,
sarea_priv->nbox * sizeof(depth_boxes[0])))
return -EFAULT;
@@ -2436,7 +2436,7 @@ static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file
return -EINVAL;
}
- if (DRM_COPY_FROM_USER(&image,
+ if (copy_from_user(&image,
(drm_radeon_tex_image_t __user *) tex->image,
sizeof(image)))
return -EFAULT;
@@ -2460,7 +2460,7 @@ static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file
LOCK_TEST_WITH_RETURN(dev, file_priv);
- if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
+ if (copy_from_user(&mask, stipple->mask, 32 * sizeof(u32)))
return -EFAULT;
RING_SPACE_TEST_WITH_RETURN(dev_priv);
@@ -2585,13 +2585,13 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file
drm_radeon_prim_t prim;
drm_radeon_tcl_prim_t tclprim;
- if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim)))
+ if (copy_from_user(&prim, &vertex->prim[i], sizeof(prim)))
return -EFAULT;
if (prim.stateidx != laststate) {
drm_radeon_state_t state;
- if (DRM_COPY_FROM_USER(&state,
+ if (copy_from_user(&state,
&vertex->state[prim.stateidx],
sizeof(state)))
return -EFAULT;
@@ -2799,7 +2799,7 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev,
do {
if (i < cmdbuf->nbox) {
- if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box)))
+ if (copy_from_user(&box, &boxes[i], sizeof(box)))
return -EFAULT;
/* FIXME The second and subsequent times round
* this loop, send a WAIT_UNTIL_3D_IDLE before
@@ -3116,7 +3116,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
return -EINVAL;
}
- if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+ if (copy_to_user(param->value, &value, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 0473257d4078..f749f2c3bbdb 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -106,42 +106,45 @@ TRACE_EVENT(radeon_vm_set_page,
DECLARE_EVENT_CLASS(radeon_fence_request,
- TP_PROTO(struct drm_device *dev, u32 seqno),
+ TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
- TP_ARGS(dev, seqno),
+ TP_ARGS(dev, ring, seqno),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(int, ring)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
+ __entry->ring = ring;
__entry->seqno = seqno;
),
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_printk("dev=%u, ring=%d, seqno=%u",
+ __entry->dev, __entry->ring, __entry->seqno)
);
DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
- TP_PROTO(struct drm_device *dev, u32 seqno),
+ TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
- TP_ARGS(dev, seqno)
+ TP_ARGS(dev, ring, seqno)
);
DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
- TP_PROTO(struct drm_device *dev, u32 seqno),
+ TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
- TP_ARGS(dev, seqno)
+ TP_ARGS(dev, ring, seqno)
);
DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
- TP_PROTO(struct drm_device *dev, u32 seqno),
+ TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
- TP_ARGS(dev, seqno)
+ TP_ARGS(dev, ring, seqno)
);
DECLARE_EVENT_CLASS(radeon_semaphore_request,
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 71245d6f34a2..77f5b0c3edb8 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -39,12 +39,14 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/swiotlb.h>
+#include <linux/debugfs.h>
#include "radeon_reg.h"
#include "radeon.h"
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
+static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
{
@@ -142,7 +144,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
- if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
+ if (!rdev->ddev->agp) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
return -EINVAL;
@@ -753,6 +755,7 @@ void radeon_ttm_fini(struct radeon_device *rdev)
if (!rdev->mman.initialized)
return;
+ radeon_ttm_debugfs_fini(rdev);
if (rdev->stollen_vga_memory) {
r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
if (r == 0) {
@@ -832,16 +835,15 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
return 0;
}
-
-#define RADEON_DEBUGFS_MEM_TYPES 2
-
#if defined(CONFIG_DEBUG_FS)
+
static int radeon_mm_dump_table(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
+ unsigned ttm_pl = *(int *)node->info_ent->data;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
+ struct drm_mm *mm = (struct drm_mm *)rdev->mman.bdev.man[ttm_pl].priv;
int ret;
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
@@ -850,46 +852,169 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
spin_unlock(&glob->lru_lock);
return ret;
}
+
+static int ttm_pl_vram = TTM_PL_VRAM;
+static int ttm_pl_tt = TTM_PL_TT;
+
+static struct drm_info_list radeon_ttm_debugfs_list[] = {
+ {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
+ {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
+ {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
+#ifdef CONFIG_SWIOTLB
+ {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
#endif
+};
-static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
+static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
{
-#if defined(CONFIG_DEBUG_FS)
- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
- unsigned i;
+ struct radeon_device *rdev = inode->i_private;
+ i_size_write(inode, rdev->mc.mc_vram_size);
+ filep->private_data = inode->i_private;
+ return 0;
+}
- for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
- if (i == 0)
- sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
- else
- sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
- radeon_mem_types_list[i].driver_features = 0;
- if (i == 0)
- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
- else
- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
+static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct radeon_device *rdev = f->private_data;
+ ssize_t result = 0;
+ int r;
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ unsigned long flags;
+ uint32_t value;
+
+ if (*pos >= rdev->mc.mc_vram_size)
+ return result;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+ WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000);
+ if (rdev->family >= CHIP_CEDAR)
+ WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31);
+ value = RREG32(RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
}
- /* Add ttm page pool to debugfs */
- sprintf(radeon_mem_types_names[i], "ttm_page_pool");
- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
- radeon_mem_types_list[i].driver_features = 0;
- radeon_mem_types_list[i++].data = NULL;
-#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
- sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
- radeon_mem_types_list[i].driver_features = 0;
- radeon_mem_types_list[i++].data = NULL;
+
+ return result;
+}
+
+static const struct file_operations radeon_ttm_vram_fops = {
+ .owner = THIS_MODULE,
+ .open = radeon_ttm_vram_open,
+ .read = radeon_ttm_vram_read,
+ .llseek = default_llseek
+};
+
+static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep)
+{
+ struct radeon_device *rdev = inode->i_private;
+ i_size_write(inode, rdev->mc.gtt_size);
+ filep->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct radeon_device *rdev = f->private_data;
+ ssize_t result = 0;
+ int r;
+
+ while (size) {
+ loff_t p = *pos / PAGE_SIZE;
+ unsigned off = *pos & ~PAGE_MASK;
+ ssize_t cur_size = min(size, PAGE_SIZE - off);
+ struct page *page;
+ void *ptr;
+
+ if (p >= rdev->gart.num_cpu_pages)
+ return result;
+
+ page = rdev->gart.pages[p];
+ if (page) {
+ ptr = kmap(page);
+ ptr += off;
+
+ r = copy_to_user(buf, ptr, cur_size);
+ kunmap(rdev->gart.pages[p]);
+ } else
+ r = clear_user(buf, cur_size);
+
+ if (r)
+ return -EFAULT;
+
+ result += cur_size;
+ buf += cur_size;
+ *pos += cur_size;
+ size -= cur_size;
}
+
+ return result;
+}
+
+static const struct file_operations radeon_ttm_gtt_fops = {
+ .owner = THIS_MODULE,
+ .open = radeon_ttm_gtt_open,
+ .read = radeon_ttm_gtt_read,
+ .llseek = default_llseek
+};
+
#endif
- return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
+static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ unsigned count;
+
+ struct drm_minor *minor = rdev->ddev->primary;
+ struct dentry *ent, *root = minor->debugfs_root;
+
+ ent = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO, root,
+ rdev, &radeon_ttm_vram_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+ rdev->mman.vram = ent;
+
+ ent = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO, root,
+ rdev, &radeon_ttm_gtt_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+ rdev->mman.gtt = ent;
+
+ count = ARRAY_SIZE(radeon_ttm_debugfs_list);
+
+#ifdef CONFIG_SWIOTLB
+ if (!swiotlb_nr_tbl())
+ --count;
#endif
+
+ return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
+#else
+
return 0;
+#endif
+}
+
+static void radeon_ttm_debugfs_fini(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+
+ debugfs_remove(rdev->mman.vram);
+ rdev->mman.vram = NULL;
+
+ debugfs_remove(rdev->mman.gtt);
+ rdev->mman.gtt = NULL;
+#endif
}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 373d088bac66..6781fee1eaad 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -91,6 +91,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
case CHIP_VERDE:
case CHIP_PITCAIRN:
case CHIP_ARUBA:
+ case CHIP_OLAND:
fw_name = FIRMWARE_TAHITI;
break;
@@ -473,7 +474,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
return -EINVAL;
}
- if ((start >> 28) != (end >> 28)) {
+ if ((start >> 28) != ((end - 1) >> 28)) {
DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
start, end);
return -EINVAL;
@@ -778,6 +779,8 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work)
if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+ radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd,
+ &rdev->pm.dpm.hd);
radeon_dpm_enable_uvd(rdev, false);
} else {
radeon_set_uvd_clocks(rdev, 0, 0);
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 20bfbda7b3f1..ec0c6829c1dc 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -18,6 +18,7 @@ r600 0x9400
0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
0x00028A40 VGT_GS_MODE
0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028B38 VGT_GS_MAX_VERT_OUT
0x000088C8 VGT_GS_PER_ES
0x000088E8 VGT_GS_PER_VS
0x000088D4 VGT_GS_VERTEX_REUSE
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 9566b5940a5a..b5c2369cda2f 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -474,6 +474,8 @@ int rs400_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = rs400_startup(rdev);
if (r) {
@@ -484,6 +486,7 @@ int rs400_resume(struct radeon_device *rdev)
int rs400_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -493,6 +496,7 @@ int rs400_suspend(struct radeon_device *rdev)
void rs400_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -560,6 +564,9 @@ int rs400_init(struct radeon_device *rdev)
return r;
r300_set_reg_safe(rdev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->accel_working = true;
r = rs400_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 76cc8d3aafec..fdcde7693032 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -1048,6 +1048,8 @@ int rs600_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = rs600_startup(rdev);
if (r) {
@@ -1058,6 +1060,7 @@ int rs600_resume(struct radeon_device *rdev)
int rs600_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -1068,6 +1071,7 @@ int rs600_suspend(struct radeon_device *rdev)
void rs600_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r600_audio_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
@@ -1136,6 +1140,9 @@ int rs600_init(struct radeon_device *rdev)
return r;
rs600_set_safe_registers(rdev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->accel_working = true;
r = rs600_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1c560629575a..35950738bd5e 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev)
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
base = G_000100_MC_FB_START(base) << 16;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+ /* Some boards seem to be configured for 128MB of sideport memory,
+ * but really only have 64MB. Just skip the sideport and use
+ * UMA memory.
+ */
+ if (rdev->mc.igp_sideport_enabled &&
+ (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
+ base += 128 * 1024 * 1024;
+ rdev->mc.real_vram_size -= 128 * 1024 * 1024;
+ rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+ }
/* Use K8 direct mapping for fast fb access. */
rdev->fastfb_working = false;
@@ -746,6 +756,8 @@ int rs690_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = rs690_startup(rdev);
if (r) {
@@ -756,6 +768,7 @@ int rs690_resume(struct radeon_device *rdev)
int rs690_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -766,6 +779,7 @@ int rs690_suspend(struct radeon_device *rdev)
void rs690_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r600_audio_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
@@ -835,6 +849,9 @@ int rs690_init(struct radeon_device *rdev)
return r;
rs600_set_safe_registers(rdev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->accel_working = true;
r = rs690_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 6af8505cf4d2..8512085b0aef 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -623,14 +623,6 @@ int rs780_dpm_enable(struct radeon_device *rdev)
if (pi->gfx_clock_gating)
r600_gfx_clockgating_enable(rdev, true);
- if (rdev->irq.installed && (rdev->pm.int_thermal_type == THERMAL_TYPE_RV6XX)) {
- ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- rdev->irq.dpm_thermal = true;
- radeon_irq_set(rdev);
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 5d1c316115ef..98e8138ff779 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -586,6 +586,8 @@ int rv515_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = rv515_startup(rdev);
if (r) {
@@ -596,6 +598,7 @@ int rv515_resume(struct radeon_device *rdev)
int rv515_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
rs600_irq_disable(rdev);
@@ -612,6 +615,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
void rv515_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -685,6 +689,9 @@ int rv515_init(struct radeon_device *rdev)
return r;
rv515_set_safe_registers(rdev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->accel_working = true;
r = rv515_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 26633a025252..bebf31c4d841 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -1546,7 +1546,6 @@ int rv6xx_dpm_enable(struct radeon_device *rdev)
{
struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
- int ret;
if (r600_dynamicpm_enabled(rdev))
return -EINVAL;
@@ -1594,15 +1593,6 @@ int rv6xx_dpm_enable(struct radeon_device *rdev)
r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, true);
- if (rdev->irq.installed &&
- r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
- ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- rdev->irq.dpm_thermal = true;
- radeon_irq_set(rdev);
- }
-
rv6xx_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
r600_start_dpm(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 9f5846743c9e..6c772e58c784 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1071,7 +1071,8 @@ static void rv770_mc_program(struct radeon_device *rdev)
*/
void r700_cp_stop(struct radeon_device *rdev)
{
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
@@ -1123,6 +1124,35 @@ void r700_cp_fini(struct radeon_device *rdev)
radeon_scratch_free(rdev, ring->rptr_save_reg);
}
+void rv770_set_clk_bypass_mode(struct radeon_device *rdev)
+{
+ u32 tmp, i;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+ tmp &= SCLK_MUX_SEL_MASK;
+ tmp |= SCLK_MUX_SEL(1) | SCLK_MUX_UPDATE;
+ WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(CG_SPLL_STATUS) & SPLL_CHG_STATUS)
+ break;
+ udelay(1);
+ }
+
+ tmp &= ~SCLK_MUX_UPDATE;
+ WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+ tmp = RREG32(MPLL_CNTL_MODE);
+ if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
+ tmp &= ~RV730_MPLL_MCLK_SEL;
+ else
+ tmp &= ~MPLL_MCLK_SEL;
+ WREG32(MPLL_CNTL_MODE, tmp);
+}
+
/*
* Core functions
*/
@@ -1665,14 +1695,6 @@ static int rv770_startup(struct radeon_device *rdev)
rv770_mc_program(rdev);
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
- r = r600_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
-
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
} else {
@@ -1728,14 +1750,12 @@ static int rv770_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- R600_CP_RB_RPTR, R600_CP_RB_WPTR,
RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
- DMA_RB_RPTR, DMA_RB_WPTR,
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -1754,7 +1774,6 @@ static int rv770_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
RADEON_CP_PACKET2);
if (!r)
r = uvd_v1_0_init(rdev);
@@ -1792,6 +1811,8 @@ int rv770_resume(struct radeon_device *rdev)
/* init golden registers */
rv770_init_golden_registers(rdev);
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = rv770_startup(rdev);
if (r) {
@@ -1806,6 +1827,7 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r600_audio_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
@@ -1876,6 +1898,17 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
@@ -1915,6 +1948,7 @@ int rv770_init(struct radeon_device *rdev)
void rv770_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r700_cp_fini(rdev);
r600_dma_fini(rdev);
r600_irq_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 913b025ae9b3..5b2ea8ac0731 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -1863,8 +1863,8 @@ void rv770_enable_auto_throttle_source(struct radeon_device *rdev,
}
}
-int rv770_set_thermal_temperature_range(struct radeon_device *rdev,
- int min_temp, int max_temp)
+static int rv770_set_thermal_temperature_range(struct radeon_device *rdev,
+ int min_temp, int max_temp)
{
int low_temp = 0 * 1000;
int high_temp = 255 * 1000;
@@ -1966,6 +1966,15 @@ int rv770_dpm_enable(struct radeon_device *rdev)
if (pi->mg_clock_gating)
rv770_mg_clock_gating_enable(rdev, true);
+ rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+ return 0;
+}
+
+int rv770_dpm_late_enable(struct radeon_device *rdev)
+{
+ int ret;
+
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
PPSMC_Result result;
@@ -1981,8 +1990,6 @@ int rv770_dpm_enable(struct radeon_device *rdev)
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
}
- rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
-
return 0;
}
@@ -2167,7 +2174,6 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct rv7xx_ps *ps = rv770_get_ps(rps);
u32 sclk, mclk;
- u16 vddc;
struct rv7xx_pl *pl;
switch (index) {
@@ -2207,8 +2213,8 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
/* patch up vddc if necessary */
if (pl->vddc == 0xff01) {
- if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
- pl->vddc = vddc;
+ if (pi->max_vddc)
+ pl->vddc = pi->max_vddc;
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
@@ -2244,14 +2250,12 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
pl->vddci = vddci;
}
- if (rdev->family >= CHIP_BARTS) {
- if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
- ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
- rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
- rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
- rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
- rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
- }
+ if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+ ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
}
}
@@ -2328,6 +2332,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_MEMORY_SS, 0);
+ /* disable ss, causes hangs on some cayman boards */
+ if (rdev->family == CHIP_CAYMAN) {
+ pi->sclk_ss = false;
+ pi->mclk_ss = false;
+ }
+
if (pi->sclk_ss || pi->mclk_ss)
pi->dynamic_ss = true;
else
@@ -2525,6 +2535,12 @@ bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
(rdev->pdev->subsystem_device == 0x1c42))
switch_limit = 200;
+ /* RV770 */
+ /* mclk switching doesn't seem to work reliably on desktop RV770s */
+ if ((rdev->family == CHIP_RV770) &&
+ !(rdev->flags & RADEON_IS_MOBILITY))
+ switch_limit = 0xffffffff; /* disable mclk switching */
+
if (vblank_time < switch_limit)
return true;
else
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h
index 9244effc6b59..f776634840c9 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.h
+++ b/drivers/gpu/drm/radeon/rv770_dpm.h
@@ -283,8 +283,4 @@ int rv770_read_smc_soft_register(struct radeon_device *rdev,
int rv770_write_smc_soft_register(struct radeon_device *rdev,
u16 reg_offset, u32 value);
-/* thermal */
-int rv770_set_thermal_temperature_range(struct radeon_device *rdev,
- int min_temp, int max_temp);
-
#endif
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 1ae277152cc7..3cf1e2921545 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -100,14 +100,21 @@
#define CG_SPLL_FUNC_CNTL_2 0x604
#define SCLK_MUX_SEL(x) ((x) << 0)
#define SCLK_MUX_SEL_MASK (0x1ff << 0)
+#define SCLK_MUX_UPDATE (1 << 26)
#define CG_SPLL_FUNC_CNTL_3 0x608
#define SPLL_FB_DIV(x) ((x) << 0)
#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
#define SPLL_DITHEN (1 << 28)
+#define CG_SPLL_STATUS 0x60c
+#define SPLL_CHG_STATUS (1 << 1)
#define SPLL_CNTL_MODE 0x610
#define SPLL_DIV_SYNC (1 << 5)
+#define MPLL_CNTL_MODE 0x61c
+# define MPLL_MCLK_SEL (1 << 11)
+# define RV730_MPLL_MCLK_SEL (1 << 25)
+
#define MPLL_AD_FUNC_CNTL 0x624
#define CLKF(x) ((x) << 0)
#define CLKF_MASK (0x7f << 0)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index a36736dab5e0..83578324e5d1 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -80,6 +80,8 @@ extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
bool enable);
+static void si_init_pg(struct radeon_device *rdev);
+static void si_init_cg(struct radeon_device *rdev);
static void si_fini_pg(struct radeon_device *rdev);
static void si_fini_cg(struct radeon_device *rdev);
static void si_rlc_stop(struct radeon_device *rdev);
@@ -1460,7 +1462,7 @@ static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
};
/* ucode loading */
-static int si_mc_load_microcode(struct radeon_device *rdev)
+int si_mc_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
u32 running, blackout = 0;
@@ -2811,7 +2813,7 @@ static void si_setup_spi(struct radeon_device *rdev,
}
static u32 si_get_rb_disabled(struct radeon_device *rdev,
- u32 max_rb_num, u32 se_num,
+ u32 max_rb_num_per_se,
u32 sh_per_se)
{
u32 data, mask;
@@ -2825,14 +2827,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev,
data >>= BACKEND_DISABLE_SHIFT;
- mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
+ mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
return data & mask;
}
static void si_setup_rb(struct radeon_device *rdev,
u32 se_num, u32 sh_per_se,
- u32 max_rb_num)
+ u32 max_rb_num_per_se)
{
int i, j;
u32 data, mask;
@@ -2842,19 +2844,21 @@ static void si_setup_rb(struct radeon_device *rdev,
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
si_select_se_sh(rdev, i, j);
- data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+ data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
}
}
si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
mask = 1;
- for (i = 0; i < max_rb_num; i++) {
+ for (i = 0; i < max_rb_num_per_se * se_num; i++) {
if (!(disabled_rbs & mask))
enabled_rbs |= mask;
mask <<= 1;
}
+ rdev->config.si.backend_enable_mask = enabled_rbs;
+
for (i = 0; i < se_num; i++) {
si_select_se_sh(rdev, i, 0xffffffff);
data = 0;
@@ -3245,7 +3249,8 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
if (enable)
WREG32(CP_ME_CNTL, 0);
else {
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
WREG32(SCRATCH_UMSK, 0);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
@@ -3506,6 +3511,9 @@ static int si_cp_resume(struct radeon_device *rdev)
si_enable_gui_idle_interrupt(rdev, true);
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
return 0;
}
@@ -3722,6 +3730,106 @@ static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
evergreen_print_gpu_status_regs(rdev);
}
+static void si_set_clk_bypass_mode(struct radeon_device *rdev)
+{
+ u32 tmp, i;
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL);
+ tmp |= SPLL_BYPASS_EN;
+ WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+ tmp |= SPLL_CTLREQ_CHG;
+ WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
+ break;
+ udelay(1);
+ }
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+ tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
+ WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+ tmp = RREG32(MPLL_CNTL_MODE);
+ tmp &= ~MPLL_MCLK_SEL;
+ WREG32(MPLL_CNTL_MODE, tmp);
+}
+
+static void si_spll_powerdown(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = RREG32(SPLL_CNTL_MODE);
+ tmp |= SPLL_SW_DIR_CONTROL;
+ WREG32(SPLL_CNTL_MODE, tmp);
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL);
+ tmp |= SPLL_RESET;
+ WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL);
+ tmp |= SPLL_SLEEP;
+ WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+ tmp = RREG32(SPLL_CNTL_MODE);
+ tmp &= ~SPLL_SW_DIR_CONTROL;
+ WREG32(SPLL_CNTL_MODE, tmp);
+}
+
+static void si_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ u32 tmp, i;
+
+ dev_info(rdev->dev, "GPU pci config reset\n");
+
+ /* disable dpm? */
+
+ /* disable cg/pg */
+ si_fini_pg(rdev);
+ si_fini_cg(rdev);
+
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ /* XXX other engines? */
+
+ /* halt the rlc, disable cp internal ints */
+ si_rlc_stop(rdev);
+
+ udelay(50);
+
+ /* disable mem access */
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
+ }
+
+ /* set mclk/sclk to bypass */
+ si_set_clk_bypass_mode(rdev);
+ /* powerdown spll */
+ si_spll_powerdown(rdev);
+ /* disable BM */
+ pci_clear_master(rdev->pdev);
+ /* reset */
+ radeon_pci_config_reset(rdev);
+ /* wait for asic to come out of reset */
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+ break;
+ udelay(1);
+ }
+}
+
int si_asic_reset(struct radeon_device *rdev)
{
u32 reset_mask;
@@ -3731,10 +3839,17 @@ int si_asic_reset(struct radeon_device *rdev)
if (reset_mask)
r600_set_bios_scratch_engine_hung(rdev, true);
+ /* try soft reset */
si_gpu_soft_reset(rdev, reset_mask);
reset_mask = si_gpu_check_soft_reset(rdev);
+ /* try pci config reset */
+ if (reset_mask && radeon_hard_reset)
+ si_gpu_pci_config_reset(rdev);
+
+ reset_mask = si_gpu_check_soft_reset(rdev);
+
if (!reset_mask)
r600_set_bios_scratch_engine_hung(rdev, false);
@@ -5210,8 +5325,8 @@ static void si_enable_hdp_ls(struct radeon_device *rdev,
WREG32(HDP_MEM_POWER_LS, data);
}
-void si_update_cg(struct radeon_device *rdev,
- u32 block, bool enable)
+static void si_update_cg(struct radeon_device *rdev,
+ u32 block, bool enable)
{
if (block & RADEON_CG_BLOCK_GFX) {
si_enable_gui_idle_interrupt(rdev, false);
@@ -5377,6 +5492,9 @@ static void si_init_pg(struct radeon_device *rdev)
si_init_ao_cu_mask(rdev);
if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
si_init_gfx_cgpg(rdev);
+ } else {
+ WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+ WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
}
si_enable_dma_pg(rdev, true);
si_enable_gfx_cgpg(rdev, true);
@@ -5564,7 +5682,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
}
if (!ASIC_IS_NODCE(rdev)) {
- WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+ WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD1_INT_CONTROL, tmp);
@@ -6220,6 +6338,10 @@ restart_ih:
break;
}
break;
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+ break;
case 146:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
@@ -6322,21 +6444,14 @@ static int si_startup(struct radeon_device *rdev)
si_mc_program(rdev);
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
- !rdev->rlc_fw || !rdev->mc_fw) {
- r = si_init_microcode(rdev);
+ if (!rdev->pm.dpm_enabled) {
+ r = si_mc_load_microcode(rdev);
if (r) {
- DRM_ERROR("Failed to load firmware!\n");
+ DRM_ERROR("Failed to load MC firmware!\n");
return r;
}
}
- r = si_mc_load_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load MC firmware!\n");
- return r;
- }
-
r = si_pcie_gart_enable(rdev);
if (r)
return r;
@@ -6419,37 +6534,30 @@ static int si_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- CP_RB0_RPTR, CP_RB0_WPTR,
RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
- CP_RB1_RPTR, CP_RB1_WPTR,
RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
- CP_RB2_RPTR, CP_RB2_WPTR,
RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
- DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
- DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
- DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
- DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
if (r)
return r;
@@ -6469,7 +6577,6 @@ static int si_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
RADEON_CP_PACKET2);
if (!r)
r = uvd_v1_0_init(rdev);
@@ -6511,6 +6618,8 @@ int si_resume(struct radeon_device *rdev)
/* init golden registers */
si_init_golden_registers(rdev);
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = si_startup(rdev);
if (r) {
@@ -6525,6 +6634,7 @@ int si_resume(struct radeon_device *rdev)
int si_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
dce6_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
si_cp_enable(rdev, false);
@@ -6598,6 +6708,18 @@ int si_init(struct radeon_device *rdev)
if (r)
return r;
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+ !rdev->rlc_fw || !rdev->mc_fw) {
+ r = si_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
@@ -6664,6 +6786,7 @@ int si_init(struct radeon_device *rdev)
void si_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
si_cp_fini(rdev);
cayman_dma_fini(rdev);
si_fini_pg(rdev);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 0b00c790fb77..eafb0e6bc67e 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -1738,6 +1738,8 @@ struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
struct ni_power_info *ni_get_pi(struct radeon_device *rdev);
struct ni_ps *ni_get_ps(struct radeon_ps *rps);
+extern int si_mc_load_microcode(struct radeon_device *rdev);
+
static int si_populate_voltage_value(struct radeon_device *rdev,
const struct atom_voltage_table *table,
u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
@@ -1753,9 +1755,6 @@ static int si_calculate_sclk_params(struct radeon_device *rdev,
u32 engine_clock,
SISLANDS_SMC_SCLK_VALUE *sclk);
-extern void si_update_cg(struct radeon_device *rdev,
- u32 block, bool enable);
-
static struct si_power_info *si_get_pi(struct radeon_device *rdev)
{
struct si_power_info *pi = rdev->pm.dpm.priv;
@@ -2396,7 +2395,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev,
if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
enable_sq_ramping = false;
- if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
enable_sq_ramping = false;
for (i = 0; i < state->performance_level_count; i++) {
@@ -3591,10 +3590,9 @@ static void si_program_display_gap(struct radeon_device *rdev)
/* Setting this to false forces the performance state to low if the crtcs are disabled.
* This can be a problem on PowerXpress systems or if you want to use the card
- * for offscreen rendering or compute if there are no crtcs enabled. Set it to
- * true for now so that performance scales even if the displays are off.
+ * for offscreen rendering or compute if there are no crtcs enabled.
*/
- si_notify_smc_display_change(rdev, true /*rdev->pm.dpm.new_active_crtc_count > 0*/);
+ si_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0);
}
static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
@@ -5414,7 +5412,7 @@ static void si_populate_mc_reg_addresses(struct radeon_device *rdev,
for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
- if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
+ if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
break;
mc_reg_table->address[i].s0 =
cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
@@ -5754,6 +5752,11 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
void si_dpm_setup_asic(struct radeon_device *rdev)
{
+ int r;
+
+ r = si_mc_load_microcode(rdev);
+ if (r)
+ DRM_ERROR("Failed to load MC firmware!\n");
rv770_get_memory_type(rdev);
si_read_clock_registers(rdev);
si_enable_acpi_power_management(rdev);
@@ -5791,13 +5794,6 @@ int si_dpm_enable(struct radeon_device *rdev)
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
int ret;
- si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), false);
-
if (si_is_smc_running(rdev))
return -EINVAL;
if (pi->voltage_control)
@@ -5900,6 +5896,17 @@ int si_dpm_enable(struct radeon_device *rdev)
si_enable_sclk_control(rdev, true);
si_start_dpm(rdev);
+ si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+ ni_update_current_ps(rdev, boot_ps);
+
+ return 0;
+}
+
+int si_dpm_late_enable(struct radeon_device *rdev)
+{
+ int ret;
+
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
PPSMC_Result result;
@@ -5915,17 +5922,6 @@ int si_dpm_enable(struct radeon_device *rdev)
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
}
- si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
-
- si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), true);
-
- ni_update_current_ps(rdev, boot_ps);
-
return 0;
}
@@ -5934,13 +5930,6 @@ void si_dpm_disable(struct radeon_device *rdev)
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
- si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), false);
-
if (!si_is_smc_running(rdev))
return;
si_disable_ulv(rdev);
@@ -6005,13 +5994,6 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
struct radeon_ps *old_ps = &eg_pi->current_rps;
int ret;
- si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), false);
-
ret = si_disable_ulv(rdev);
if (ret) {
DRM_ERROR("si_disable_ulv failed\n");
@@ -6104,13 +6086,6 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
- si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), true);
-
return 0;
}
@@ -6497,7 +6472,8 @@ void si_dpm_fini(struct radeon_device *rdev)
void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
struct ni_ps *ps = ni_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index d422a1cbf727..e80efcf0c230 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -28,6 +28,7 @@
#include "sid.h"
#include "ppsmc.h"
#include "radeon_ucode.h"
+#include "sislands_smc.h"
static int si_set_smc_sram_address(struct radeon_device *rdev,
u32 smc_address, u32 limit)
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index b322acc48097..9239a6d29128 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -94,6 +94,8 @@
#define CG_SPLL_FUNC_CNTL_2 0x604
#define SCLK_MUX_SEL(x) ((x) << 0)
#define SCLK_MUX_SEL_MASK (0x1ff << 0)
+#define SPLL_CTLREQ_CHG (1 << 23)
+#define SCLK_MUX_UPDATE (1 << 26)
#define CG_SPLL_FUNC_CNTL_3 0x608
#define SPLL_FB_DIV(x) ((x) << 0)
#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
@@ -101,7 +103,10 @@
#define SPLL_DITHEN (1 << 28)
#define CG_SPLL_FUNC_CNTL_4 0x60c
+#define SPLL_STATUS 0x614
+#define SPLL_CHG_STATUS (1 << 1)
#define SPLL_CNTL_MODE 0x618
+#define SPLL_SW_DIR_CONTROL (1 << 0)
# define SPLL_REFCLK_SEL(x) ((x) << 8)
# define SPLL_REFCLK_SEL_MASK 0xFF00
@@ -559,6 +564,8 @@
# define MRDCK0_BYPASS (1 << 24)
# define MRDCK1_BYPASS (1 << 25)
+#define MPLL_CNTL_MODE 0x2bb0
+# define MPLL_MCLK_SEL (1 << 11)
#define MPLL_FUNC_CNTL 0x2bb4
#define BWCTRL(x) ((x) << 20)
#define BWCTRL_MASK (0xff << 20)
@@ -815,7 +822,7 @@
# define GRPH_PFLIP_INT_MASK (1 << 0)
# define GRPH_PFLIP_INT_TYPE (1 << 8)
-#define DACA_AUTODETECT_INT_CONTROL 0x66c8
+#define DAC_AUTODETECT_INT_CONTROL 0x67c8
#define DC_HPD1_INT_STATUS 0x601c
#define DC_HPD2_INT_STATUS 0x6028
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
index 5578e9837026..10e945a49479 100644
--- a/drivers/gpu/drm/radeon/sislands_smc.h
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -374,8 +374,6 @@ typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration;
#pragma pack(pop)
-int si_set_smc_sram_address(struct radeon_device *rdev,
- u32 smc_address, u32 limit);
int si_copy_bytes_to_smc(struct radeon_device *rdev,
u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit);
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 96ea6db8bf57..8b47b3cd0357 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -71,7 +71,7 @@ static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] =
SUMO_DTC_DFLT_14,
};
-struct sumo_ps *sumo_get_ps(struct radeon_ps *rps)
+static struct sumo_ps *sumo_get_ps(struct radeon_ps *rps)
{
struct sumo_ps *ps = rps->ps_priv;
@@ -1202,14 +1202,10 @@ static void sumo_update_requested_ps(struct radeon_device *rdev,
int sumo_dpm_enable(struct radeon_device *rdev)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
- int ret;
if (sumo_dpm_enabled(rdev))
return -EINVAL;
- ret = sumo_enable_clock_power_gating(rdev);
- if (ret)
- return ret;
sumo_program_bootup_state(rdev);
sumo_init_bsp(rdev);
sumo_reset_am(rdev);
@@ -1233,6 +1229,19 @@ int sumo_dpm_enable(struct radeon_device *rdev)
if (pi->enable_boost)
sumo_enable_boost_timer(rdev);
+ sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+ return 0;
+}
+
+int sumo_dpm_late_enable(struct radeon_device *rdev)
+{
+ int ret;
+
+ ret = sumo_enable_clock_power_gating(rdev);
+ if (ret)
+ return ret;
+
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
ret = sumo_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1242,8 +1251,6 @@ int sumo_dpm_enable(struct radeon_device *rdev)
radeon_irq_set(rdev);
}
- sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
-
return 0;
}
@@ -1800,7 +1807,7 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev
struct seq_file *m)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct radeon_ps *rps = &pi->current_rps;
struct sumo_ps *ps = sumo_get_ps(rps);
struct sumo_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/sumo_smc.c b/drivers/gpu/drm/radeon/sumo_smc.c
index 18abba5b5810..fb081d2ae374 100644
--- a/drivers/gpu/drm/radeon/sumo_smc.c
+++ b/drivers/gpu/drm/radeon/sumo_smc.c
@@ -31,7 +31,6 @@
#define SUMO_SMU_SERVICE_ROUTINE_ALTVDDNB_NOTIFY 27
#define SUMO_SMU_SERVICE_ROUTINE_GFX_SRV_ID_20 20
-struct sumo_ps *sumo_get_ps(struct radeon_ps *rps);
struct sumo_power_info *sumo_get_pi(struct radeon_device *rdev);
static void sumo_send_msg_to_smu(struct radeon_device *rdev, u32 id)
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index d700698a1f22..2da0e17eb960 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -342,14 +342,14 @@ static void trinity_apply_state_adjust_rules(struct radeon_device *rdev,
struct radeon_ps *new_rps,
struct radeon_ps *old_rps);
-struct trinity_ps *trinity_get_ps(struct radeon_ps *rps)
+static struct trinity_ps *trinity_get_ps(struct radeon_ps *rps)
{
struct trinity_ps *ps = rps->ps_priv;
return ps;
}
-struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev)
+static struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev)
{
struct trinity_power_info *pi = rdev->pm.dpm.priv;
@@ -1082,7 +1082,6 @@ void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
int trinity_dpm_enable(struct radeon_device *rdev)
{
struct trinity_power_info *pi = trinity_get_pi(rdev);
- int ret;
trinity_acquire_mutex(rdev);
@@ -1091,7 +1090,6 @@ int trinity_dpm_enable(struct radeon_device *rdev)
return -EINVAL;
}
- trinity_enable_clock_power_gating(rdev);
trinity_program_bootup_state(rdev);
sumo_program_vc(rdev, 0x00C00033);
trinity_start_am(rdev);
@@ -1105,6 +1103,18 @@ int trinity_dpm_enable(struct radeon_device *rdev)
trinity_dpm_bapm_enable(rdev, false);
trinity_release_mutex(rdev);
+ trinity_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+ return 0;
+}
+
+int trinity_dpm_late_enable(struct radeon_device *rdev)
+{
+ int ret;
+
+ trinity_acquire_mutex(rdev);
+ trinity_enable_clock_power_gating(rdev);
+
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
ret = trinity_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1115,8 +1125,7 @@ int trinity_dpm_enable(struct radeon_device *rdev)
rdev->irq.dpm_thermal = true;
radeon_irq_set(rdev);
}
-
- trinity_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+ trinity_release_mutex(rdev);
return 0;
}
@@ -1917,7 +1926,8 @@ void trinity_dpm_print_power_state(struct radeon_device *rdev,
void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct trinity_power_info *pi = trinity_get_pi(rdev);
+ struct radeon_ps *rps = &pi->current_rps;
struct trinity_ps *ps = trinity_get_ps(rps);
struct trinity_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c
index 9672bcbc7312..99dd0455334d 100644
--- a/drivers/gpu/drm/radeon/trinity_smc.c
+++ b/drivers/gpu/drm/radeon/trinity_smc.c
@@ -27,9 +27,6 @@
#include "trinity_dpm.h"
#include "ppsmc.h"
-struct trinity_ps *trinity_get_ps(struct radeon_ps *rps);
-struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev);
-
static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id)
{
int i;
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index b19ef4951085..d1771004cb52 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -57,7 +57,6 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
radeon_ring_write(ring, 2);
- return;
}
/**
@@ -153,6 +152,7 @@ int uvd_v2_2_resume(struct radeon_device *rdev)
chip_id = 0x01000015;
break;
case CHIP_PITCAIRN:
+ case CHIP_OLAND:
chip_id = 0x01000016;
break;
case CHIP_ARUBA:
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index a9d24e4bf792..fbf4be316d0b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -371,7 +371,6 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
goto error;
rcrtc->plane->format = format;
- rcrtc->plane->pitch = crtc->fb->pitches[0];
rcrtc->plane->src_x = x;
rcrtc->plane->src_y = y;
@@ -413,7 +412,7 @@ static int rcar_du_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
rcrtc->plane->src_x = x;
rcrtc->plane->src_y = y;
- rcar_du_crtc_update_base(to_rcar_crtc(crtc));
+ rcar_du_crtc_update_base(rcrtc);
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 0023f9719cf1..792fd1d20e86 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -224,7 +224,9 @@ static int rcar_du_probe(struct platform_device *pdev)
static int rcar_du_remove(struct platform_device *pdev)
{
- drm_platform_exit(&rcar_du_driver, pdev);
+ struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
+
+ drm_put_dev(rcdu->ddev);
return 0;
}
@@ -249,8 +251,8 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = {
};
static const struct rcar_du_device_info rcar_du_r8a7790_info = {
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_ALIGN_128B
- | RCAR_DU_FEATURE_DEFR8,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_DEFR8,
+ .quirks = RCAR_DU_QUIRK_ALIGN_128B | RCAR_DU_QUIRK_LVDS_LANES,
.num_crtcs = 3,
.routes = {
/* R8A7790 has one RGB output, two LVDS outputs and one
@@ -272,9 +274,29 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
.num_lvds = 2,
};
+static const struct rcar_du_device_info rcar_du_r8a7791_info = {
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_DEFR8,
+ .num_crtcs = 2,
+ .routes = {
+ /* R8A7791 has one RGB output, one LVDS output and one
+ * (currently unsupported) TCON output.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(1),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_LVDS,
+ },
+ },
+ .num_lvds = 1,
+};
+
static const struct platform_device_id rcar_du_id_table[] = {
{ "rcar-du-r8a7779", (kernel_ulong_t)&rcar_du_r8a7779_info },
{ "rcar-du-r8a7790", (kernel_ulong_t)&rcar_du_r8a7790_info },
+ { "rcar-du-r8a7791", (kernel_ulong_t)&rcar_du_r8a7791_info },
{ }
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 65d2d636b002..e31b735d3f25 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -28,8 +28,10 @@ struct rcar_du_device;
struct rcar_du_lvdsenc;
#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */
-#define RCAR_DU_FEATURE_ALIGN_128B (1 << 1) /* Align pitches to 128 bytes */
-#define RCAR_DU_FEATURE_DEFR8 (1 << 2) /* Has DEFR8 register */
+#define RCAR_DU_FEATURE_DEFR8 (1 << 1) /* Has DEFR8 register */
+
+#define RCAR_DU_QUIRK_ALIGN_128B (1 << 0) /* Align pitches to 128 bytes */
+#define RCAR_DU_QUIRK_LVDS_LANES (1 << 1) /* LVDS lanes 1 and 3 inverted */
/*
* struct rcar_du_output_routing - Output routing specification
@@ -48,12 +50,14 @@ struct rcar_du_output_routing {
/*
* struct rcar_du_device_info - DU model-specific information
* @features: device features (RCAR_DU_FEATURE_*)
+ * @quirks: device quirks (RCAR_DU_QUIRK_*)
* @num_crtcs: total number of CRTCs
* @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*)
* @num_lvds: number of internal LVDS encoders
*/
struct rcar_du_device_info {
unsigned int features;
+ unsigned int quirks;
unsigned int num_crtcs;
struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX];
unsigned int num_lvds;
@@ -84,6 +88,12 @@ static inline bool rcar_du_has(struct rcar_du_device *rcdu,
return rcdu->info->features & feature;
}
+static inline bool rcar_du_needs(struct rcar_du_device *rcdu,
+ unsigned int quirk)
+{
+ return rcdu->info->quirks & quirk;
+}
+
static inline u32 rcar_du_read(struct rcar_du_device *rcdu, u32 reg)
{
return ioread32(rcdu->mmio + reg);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index b31ac080c4a7..fbeabd9a281f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -119,7 +119,7 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
/* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
* but the R8A7790 DU seems to require a 128 bytes pitch alignment.
*/
- if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
+ if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
align = 128;
else
align = 16 * args->bpp / 8;
@@ -144,7 +144,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-EINVAL);
}
- if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
+ if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
align = 128;
else
align = 16 * format->bpp / 8;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
index a0f6a1781925..df30a075d793 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -44,6 +44,7 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
const struct drm_display_mode *mode = &rcrtc->crtc.mode;
unsigned int freq = mode->clock;
u32 lvdcr0;
+ u32 lvdhcr;
u32 pllcr;
int ret;
@@ -72,15 +73,19 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
* VSYNC -> CTRL1
* DISP -> CTRL2
* 0 -> CTRL3
- *
- * Channels 1 and 3 are switched on ES1.
*/
rcar_lvds_write(lvds, LVDCTRCR, LVDCTRCR_CTR3SEL_ZERO |
LVDCTRCR_CTR2SEL_DISP | LVDCTRCR_CTR1SEL_VSYNC |
LVDCTRCR_CTR0SEL_HSYNC);
- rcar_lvds_write(lvds, LVDCHCR,
- LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3) |
- LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1));
+
+ if (rcar_du_needs(lvds->dev, RCAR_DU_QUIRK_LVDS_LANES))
+ lvdhcr = LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3)
+ | LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1);
+ else
+ lvdhcr = LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 1)
+ | LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 3);
+
+ rcar_lvds_write(lvds, LVDCHCR, lvdhcr);
/* Select the input, hardcode mode 0, enable LVDS operation and turn
* bias circuitry on.
@@ -144,18 +149,9 @@ static int rcar_du_lvdsenc_get_resources(struct rcar_du_lvdsenc *lvds,
sprintf(name, "lvds.%u", lvds->index);
mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
- if (mem == NULL) {
- dev_err(&pdev->dev, "failed to get memory resource for %s\n",
- name);
- return -EINVAL;
- }
-
lvds->mmio = devm_ioremap_resource(&pdev->dev, mem);
- if (lvds->mmio == NULL) {
- dev_err(&pdev->dev, "failed to remap memory resource for %s\n",
- name);
- return -ENOMEM;
- }
+ if (IS_ERR(lvds->mmio))
+ return PTR_ERR(lvds->mmio);
lvds->clock = devm_clk_get(&pdev->dev, name);
if (IS_ERR(lvds->clock)) {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 53000644733f..3fb69d9ae61b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -104,6 +104,15 @@ void rcar_du_plane_update_base(struct rcar_du_plane *plane)
{
struct rcar_du_group *rgrp = plane->group;
unsigned int index = plane->hwindex;
+ u32 mwr;
+
+ /* Memory pitch (expressed in pixels) */
+ if (plane->format->planes == 2)
+ mwr = plane->pitch;
+ else
+ mwr = plane->pitch * 8 / plane->format->bpp;
+
+ rcar_du_plane_write(rgrp, index, PnMWR, mwr);
/* The Y position is expressed in raster line units and must be doubled
* for 32bpp formats, according to the R8A7790 datasheet. No mention of
@@ -133,6 +142,8 @@ void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
{
struct drm_gem_cma_object *gem;
+ plane->pitch = fb->pitches[0];
+
gem = drm_fb_cma_get_gem_obj(fb, 0);
plane->dma[0] = gem->paddr + fb->offsets[0];
@@ -209,7 +220,6 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
struct rcar_du_group *rgrp = plane->group;
u32 ddcr2 = PnDDCR2_CODE;
u32 ddcr4;
- u32 mwr;
/* Data format
*
@@ -240,14 +250,6 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2);
rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4);
- /* Memory pitch (expressed in pixels) */
- if (plane->format->planes == 2)
- mwr = plane->pitch;
- else
- mwr = plane->pitch * 8 / plane->format->bpp;
-
- rcar_du_plane_write(rgrp, index, PnMWR, mwr);
-
/* Destination position and size */
rcar_du_plane_write(rgrp, index, PnDSXR, plane->width);
rcar_du_plane_write(rgrp, index, PnDSYR, plane->height);
@@ -309,7 +311,6 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
rplane->crtc = crtc;
rplane->format = format;
- rplane->pitch = fb->pitches[0];
rplane->src_x = src_x >> 16;
rplane->src_y = src_y >> 16;
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index b17d0710871a..d2b2df9e26f3 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -49,7 +49,7 @@ savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
#endif
for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
- DRM_MEMORYBARRIER();
+ mb();
status = dev_priv->status_ptr[0];
if ((status & mask) < threshold)
return 0;
@@ -123,7 +123,7 @@ savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
int i;
for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
- DRM_MEMORYBARRIER();
+ mb();
status = dev_priv->status_ptr[1];
if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
(status & 0xffff) == 0)
@@ -449,7 +449,7 @@ static void savage_dma_flush(drm_savage_private_t * dev_priv)
}
}
- DRM_MEMORYBARRIER();
+ mb();
/* do flush ... */
phys_addr = dev_priv->cmd_dma->offset +
@@ -990,10 +990,10 @@ static int savage_bci_get_buffers(struct drm_device *dev,
buf->file_priv = file_priv;
- if (DRM_COPY_TO_USER(&d->request_indices[i],
+ if (copy_to_user(&d->request_indices[i],
&buf->idx, sizeof(buf->idx)))
return -EFAULT;
- if (DRM_COPY_TO_USER(&d->request_sizes[i],
+ if (copy_to_user(&d->request_sizes[i],
&buf->total, sizeof(buf->total)))
return -EFAULT;
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index b35e75ed890c..c01ad0aeaa58 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -992,7 +992,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
if (kcmd_addr == NULL)
return -ENOMEM;
- if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr,
+ if (copy_from_user(kcmd_addr, cmdbuf->cmd_addr,
cmdbuf->size * 8))
{
kfree(kcmd_addr);
@@ -1007,7 +1007,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
goto done;
}
- if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr,
+ if (copy_from_user(kvb_addr, cmdbuf->vb_addr,
cmdbuf->vb_size)) {
ret = -EFAULT;
goto done;
@@ -1022,7 +1022,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
goto done;
}
- if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr,
+ if (copy_from_user(kbox_addr, cmdbuf->box_addr,
cmdbuf->nbox * sizeof(struct drm_clip_rect))) {
ret = -EFAULT;
goto done;
@@ -1032,7 +1032,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
/* Make sure writes to DMA buffers are finished before sending
* DMA commands to the graphics hardware. */
- DRM_MEMORYBARRIER();
+ mb();
/* Coming from user space. Don't know if the Xserver has
* emitted wait commands. Assuming the worst. */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 562f9a401cf6..0428076f1ce8 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -37,14 +37,21 @@
* Clock management
*/
-static void shmob_drm_clk_on(struct shmob_drm_device *sdev)
+static int shmob_drm_clk_on(struct shmob_drm_device *sdev)
{
- if (sdev->clock)
- clk_prepare_enable(sdev->clock);
+ int ret;
+
+ if (sdev->clock) {
+ ret = clk_prepare_enable(sdev->clock);
+ if (ret < 0)
+ return ret;
+ }
#if 0
if (sdev->meram_dev && sdev->meram_dev->pdev)
pm_runtime_get_sync(&sdev->meram_dev->pdev->dev);
#endif
+
+ return 0;
}
static void shmob_drm_clk_off(struct shmob_drm_device *sdev)
@@ -161,6 +168,7 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
struct drm_device *dev = sdev->ddev;
struct drm_plane *plane;
u32 value;
+ int ret;
if (scrtc->started)
return;
@@ -170,7 +178,9 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
return;
/* Enable clocks before accessing the hardware. */
- shmob_drm_clk_on(sdev);
+ ret = shmob_drm_clk_on(sdev);
+ if (ret < 0)
+ return;
/* Reset and enable the LCDC. */
lcdc_write(sdev, LDCNT2R, lcdc_read(sdev, LDCNT2R) | LDCNT2R_BR);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 015551866b4a..c839c9c89efb 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -336,7 +336,9 @@ static int shmob_drm_probe(struct platform_device *pdev)
static int shmob_drm_remove(struct platform_device *pdev)
{
- drm_platform_exit(&shmob_drm_driver, pdev);
+ struct shmob_drm_device *sdev = platform_get_drvdata(pdev);
+
+ drm_put_dev(sdev->ddev);
return 0;
}
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 4383b74a3aa4..756f787b7143 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -94,7 +94,7 @@ static int sis_driver_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
+static void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct sis_file_private *file_priv = file->driver_priv;
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 01857d836350..0573be0d2933 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -266,7 +266,7 @@ int sis_idle(struct drm_device *dev)
* because its polling frequency is too low.
*/
- end = jiffies + (DRM_HZ * 3);
+ end = jiffies + (HZ * 3);
for (i = 0; i < 4; ++i) {
do {
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 8961ba6a34b8..354ddb29231f 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -1,13 +1,12 @@
config DRM_TEGRA
- bool "NVIDIA Tegra DRM"
- depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
+ tristate "NVIDIA Tegra DRM"
+ depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
depends on DRM
- select TEGRA_HOST1X
+ depends on RESET_CONTROLLER
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
+ select DRM_MIPI_DSI
+ select DRM_PANEL
+ select TEGRA_HOST1X
help
Choose this option if you have an NVIDIA Tegra SoC.
@@ -16,6 +15,18 @@ config DRM_TEGRA
if DRM_TEGRA
+config DRM_TEGRA_FBDEV
+ bool "Enable legacy fbdev support"
+ select DRM_KMS_FB_HELPER
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ default y
+ help
+ Choose this option if you have a need for the legacy fbdev support.
+ Note that this support also provides the Linux console on top of
+ the Tegra modesetting driver.
+
config DRM_TEGRA_DEBUG
bool "NVIDIA Tegra DRM debug support"
help
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index edc76abd58bb..8d220afbd85f 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -9,6 +9,8 @@ tegra-drm-y := \
output.o \
rgb.o \
hdmi.o \
+ mipi-phy.o \
+ dsi.o \
gr2d.o \
gr3d.o
diff --git a/drivers/gpu/drm/tegra/bus.c b/drivers/gpu/drm/tegra/bus.c
index 565f8f7b9a47..e38e5967d77b 100644
--- a/drivers/gpu/drm/tegra/bus.c
+++ b/drivers/gpu/drm/tegra/bus.c
@@ -46,7 +46,6 @@ int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device)
struct drm_device *drm;
int ret;
- INIT_LIST_HEAD(&driver->device_list);
driver->bus = &drm_host1x_bus;
drm = drm_dev_alloc(driver, &device->dev);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index ae1cb31ead7e..9336006b475d 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -8,13 +8,17 @@
*/
#include <linux/clk.h>
-#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
+#include <linux/reset.h>
#include "dc.h"
#include "drm.h"
#include "gem.h"
+struct tegra_dc_soc_info {
+ bool supports_interlacing;
+};
+
struct tegra_plane {
struct drm_plane base;
unsigned int index;
@@ -658,19 +662,12 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
/* program display mode */
tegra_dc_set_timings(dc, mode);
- value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
- tegra_dc_writel(dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
-
- value = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_POLARITY(1));
- value &= ~LVS_OUTPUT_POLARITY_LOW;
- value &= ~LHS_OUTPUT_POLARITY_LOW;
- tegra_dc_writel(dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
-
- value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB |
- DISP_ORDER_RED_BLUE;
- tegra_dc_writel(dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
-
- tegra_dc_writel(dc, 0x00010001, DC_DISP_SHIFT_CLOCK_OPTIONS);
+ /* interlacing isn't supported yet, so disable it */
+ if (dc->soc->supports_interlacing) {
+ value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL);
+ value &= ~INTERLACE_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_INTERLACE_CONTROL);
+ }
value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
@@ -712,7 +709,7 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
unsigned long value;
/* hardware initialization */
- tegra_periph_reset_deassert(dc->clk);
+ reset_control_deassert(dc->rst);
usleep_range(10000, 20000);
if (dc->pipe)
@@ -735,10 +732,6 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
- value |= DISP_CTRL_MODE_C_DISPLAY;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
-
/* initialize timer */
value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
@@ -1107,8 +1100,6 @@ static int tegra_dc_init(struct host1x_client *client)
struct tegra_dc *dc = host1x_client_to_dc(client);
int err;
- dc->pipe = tegra->drm->mode_config.num_crtc;
-
drm_crtc_init(tegra->drm, &dc->base, &tegra_crtc_funcs);
drm_mode_crtc_set_gamma_size(&dc->base, 256);
drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
@@ -1167,8 +1158,71 @@ static const struct host1x_client_ops dc_client_ops = {
.exit = tegra_dc_exit,
};
+static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
+ .supports_interlacing = false,
+};
+
+static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
+ .supports_interlacing = false,
+};
+
+static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
+ .supports_interlacing = true,
+};
+
+static const struct of_device_id tegra_dc_of_match[] = {
+ {
+ .compatible = "nvidia,tegra124-dc",
+ .data = &tegra124_dc_soc_info,
+ }, {
+ .compatible = "nvidia,tegra30-dc",
+ .data = &tegra30_dc_soc_info,
+ }, {
+ .compatible = "nvidia,tegra20-dc",
+ .data = &tegra20_dc_soc_info,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int tegra_dc_parse_dt(struct tegra_dc *dc)
+{
+ struct device_node *np;
+ u32 value = 0;
+ int err;
+
+ err = of_property_read_u32(dc->dev->of_node, "nvidia,head", &value);
+ if (err < 0) {
+ dev_err(dc->dev, "missing \"nvidia,head\" property\n");
+
+ /*
+ * If the nvidia,head property isn't present, try to find the
+ * correct head number by looking up the position of this
+ * display controller's node within the device tree. Assuming
+ * that the nodes are ordered properly in the DTS file and
+ * that the translation into a flattened device tree blob
+ * preserves that ordering this will actually yield the right
+ * head number.
+ *
+ * If those assumptions don't hold, this will still work for
+ * cases where only a single display controller is used.
+ */
+ for_each_matching_node(np, tegra_dc_of_match) {
+ if (np == dc->dev->of_node)
+ break;
+
+ value++;
+ }
+ }
+
+ dc->pipe = value;
+
+ return 0;
+}
+
static int tegra_dc_probe(struct platform_device *pdev)
{
+ const struct of_device_id *id;
struct resource *regs;
struct tegra_dc *dc;
int err;
@@ -1177,9 +1231,18 @@ static int tegra_dc_probe(struct platform_device *pdev)
if (!dc)
return -ENOMEM;
+ id = of_match_node(tegra_dc_of_match, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
spin_lock_init(&dc->lock);
INIT_LIST_HEAD(&dc->list);
dc->dev = &pdev->dev;
+ dc->soc = id->data;
+
+ err = tegra_dc_parse_dt(dc);
+ if (err < 0)
+ return err;
dc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dc->clk)) {
@@ -1187,6 +1250,12 @@ static int tegra_dc_probe(struct platform_device *pdev)
return PTR_ERR(dc->clk);
}
+ dc->rst = devm_reset_control_get(&pdev->dev, "dc");
+ if (IS_ERR(dc->rst)) {
+ dev_err(&pdev->dev, "failed to get reset\n");
+ return PTR_ERR(dc->rst);
+ }
+
err = clk_prepare_enable(dc->clk);
if (err < 0)
return err;
@@ -1247,12 +1316,6 @@ static int tegra_dc_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id tegra_dc_of_match[] = {
- { .compatible = "nvidia,tegra30-dc", },
- { .compatible = "nvidia,tegra20-dc", },
- { },
-};
-
struct platform_driver tegra_dc_driver = {
.driver = {
.name = "tegra-dc",
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 91bbda291470..3c2c0ea1cd87 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -28,6 +28,7 @@
#define DISP_CTRL_MODE_STOP (0 << 5)
#define DISP_CTRL_MODE_C_DISPLAY (1 << 5)
#define DISP_CTRL_MODE_NC_DISPLAY (2 << 5)
+#define DISP_CTRL_MODE_MASK (3 << 5)
#define DC_CMD_SIGNAL_RAISE 0x033
#define DC_CMD_DISPLAY_POWER_CONTROL 0x036
#define PW0_ENABLE (1 << 0)
@@ -116,6 +117,7 @@
#define DC_DISP_DISP_WIN_OPTIONS 0x402
#define HDMI_ENABLE (1 << 30)
+#define DSI_ENABLE (1 << 29)
#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403
#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24)
@@ -238,6 +240,8 @@
#define DITHER_CONTROL_ERRDIFF (3 << 8)
#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431
+#define SC1_H_QUALIFIER_NONE (1 << 16)
+#define SC0_H_QUALIFIER_NONE (1 << 0)
#define DC_DISP_DATA_ENABLE_OPTIONS 0x432
#define DE_SELECT_ACTIVE_BLANK (0 << 0)
@@ -292,6 +296,11 @@
#define DC_DISP_SD_HW_K_VALUES 0x4dd
#define DC_DISP_SD_MAN_K_VALUES 0x4de
+#define DC_DISP_INTERLACE_CONTROL 0x4e5
+#define INTERLACE_STATUS (1 << 2)
+#define INTERLACE_START (1 << 1)
+#define INTERLACE_ENABLE (1 << 0)
+
#define DC_WIN_CSC_YOF 0x611
#define DC_WIN_CSC_KYRGB 0x612
#define DC_WIN_CSC_KUR 0x613
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 07eba596d458..88a529008ce0 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -104,9 +104,11 @@ static void tegra_drm_context_free(struct tegra_drm_context *context)
static void tegra_drm_lastclose(struct drm_device *drm)
{
+#ifdef CONFIG_TEGRA_DRM_FBDEV
struct tegra_drm *tegra = drm->dev_private;
tegra_fbdev_restore_mode(tegra->fbdev);
+#endif
}
static struct host1x_bo *
@@ -578,7 +580,7 @@ static void tegra_debugfs_cleanup(struct drm_minor *minor)
#endif
static struct drm_driver tegra_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.load = tegra_drm_load,
.unload = tegra_drm_unload,
.open = tegra_drm_open,
@@ -596,6 +598,12 @@ static struct drm_driver tegra_drm_driver = {
.gem_free_object = tegra_bo_free_object,
.gem_vm_ops = &tegra_bo_vm_ops,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = tegra_gem_prime_export,
+ .gem_prime_import = tegra_gem_prime_import,
+
.dumb_create = tegra_bo_dumb_create,
.dumb_map_offset = tegra_bo_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
@@ -653,8 +661,10 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra30-hdmi", },
{ .compatible = "nvidia,tegra30-gr2d", },
{ .compatible = "nvidia,tegra30-gr3d", },
+ { .compatible = "nvidia,tegra114-dsi", },
{ .compatible = "nvidia,tegra114-hdmi", },
{ .compatible = "nvidia,tegra114-gr3d", },
+ { .compatible = "nvidia,tegra124-dc", },
{ /* sentinel */ }
};
@@ -677,10 +687,14 @@ static int __init host1x_drm_init(void)
if (err < 0)
goto unregister_host1x;
- err = platform_driver_register(&tegra_hdmi_driver);
+ err = platform_driver_register(&tegra_dsi_driver);
if (err < 0)
goto unregister_dc;
+ err = platform_driver_register(&tegra_hdmi_driver);
+ if (err < 0)
+ goto unregister_dsi;
+
err = platform_driver_register(&tegra_gr2d_driver);
if (err < 0)
goto unregister_hdmi;
@@ -695,6 +709,8 @@ unregister_gr2d:
platform_driver_unregister(&tegra_gr2d_driver);
unregister_hdmi:
platform_driver_unregister(&tegra_hdmi_driver);
+unregister_dsi:
+ platform_driver_unregister(&tegra_dsi_driver);
unregister_dc:
platform_driver_unregister(&tegra_dc_driver);
unregister_host1x:
@@ -708,6 +724,7 @@ static void __exit host1x_drm_exit(void)
platform_driver_unregister(&tegra_gr3d_driver);
platform_driver_unregister(&tegra_gr2d_driver);
platform_driver_unregister(&tegra_hdmi_driver);
+ platform_driver_unregister(&tegra_dsi_driver);
platform_driver_unregister(&tegra_dc_driver);
host1x_driver_unregister(&host1x_drm_driver);
}
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 7da0b923131f..bf1cac7658f8 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -19,16 +19,20 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fixed.h>
+struct reset_control;
+
struct tegra_fb {
struct drm_framebuffer base;
struct tegra_bo **planes;
unsigned int num_planes;
};
+#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_fbdev {
struct drm_fb_helper base;
struct tegra_fb *fb;
};
+#endif
struct tegra_drm {
struct drm_device *drm;
@@ -36,7 +40,9 @@ struct tegra_drm {
struct mutex clients_lock;
struct list_head clients;
+#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_fbdev *fbdev;
+#endif
};
struct tegra_drm_client;
@@ -82,6 +88,7 @@ extern int tegra_drm_unregister_client(struct tegra_drm *tegra,
extern int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
extern int tegra_drm_exit(struct tegra_drm *tegra);
+struct tegra_dc_soc_info;
struct tegra_output;
struct tegra_dc {
@@ -93,6 +100,7 @@ struct tegra_dc {
int pipe;
struct clk *clk;
+ struct reset_control *rst;
void __iomem *regs;
int irq;
@@ -106,6 +114,8 @@ struct tegra_dc {
/* page-flip handling */
struct drm_pending_vblank_event *event;
+
+ const struct tegra_dc_soc_info *soc;
};
static inline struct tegra_dc *
@@ -174,6 +184,7 @@ struct tegra_output_ops {
enum tegra_output_type {
TEGRA_OUTPUT_RGB,
TEGRA_OUTPUT_HDMI,
+ TEGRA_OUTPUT_DSI,
};
struct tegra_output {
@@ -183,6 +194,7 @@ struct tegra_output {
const struct tegra_output_ops *ops;
enum tegra_output_type type;
+ struct drm_panel *panel;
struct i2c_adapter *ddc;
const struct edid *edid;
unsigned int hpd_irq;
@@ -260,9 +272,12 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer);
extern int tegra_drm_fb_init(struct drm_device *drm);
extern void tegra_drm_fb_exit(struct drm_device *drm);
+#ifdef CONFIG_DRM_TEGRA_FBDEV
extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
+#endif
extern struct platform_driver tegra_dc_driver;
+extern struct platform_driver tegra_dsi_driver;
extern struct platform_driver tegra_hdmi_driver;
extern struct platform_driver tegra_gr2d_driver;
extern struct platform_driver tegra_gr3d_driver;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
new file mode 100644
index 000000000000..d452faab0235
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -0,0 +1,971 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+#include "dc.h"
+#include "drm.h"
+#include "dsi.h"
+#include "mipi-phy.h"
+
+#define DSI_VIDEO_FIFO_DEPTH (1920 / 4)
+#define DSI_HOST_FIFO_DEPTH 64
+
+struct tegra_dsi {
+ struct host1x_client client;
+ struct tegra_output output;
+ struct device *dev;
+
+ void __iomem *regs;
+
+ struct reset_control *rst;
+ struct clk *clk_parent;
+ struct clk *clk_lp;
+ struct clk *clk;
+
+ struct drm_info_list *debugfs_files;
+ struct drm_minor *minor;
+ struct dentry *debugfs;
+
+ enum mipi_dsi_pixel_format format;
+ unsigned int lanes;
+
+ struct tegra_mipi_device *mipi;
+ struct mipi_dsi_host host;
+};
+
+static inline struct tegra_dsi *
+host1x_client_to_dsi(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_dsi, client);
+}
+
+static inline struct tegra_dsi *host_to_tegra(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct tegra_dsi, host);
+}
+
+static inline struct tegra_dsi *to_dsi(struct tegra_output *output)
+{
+ return container_of(output, struct tegra_dsi, output);
+}
+
+static inline unsigned long tegra_dsi_readl(struct tegra_dsi *dsi,
+ unsigned long reg)
+{
+ return readl(dsi->regs + (reg << 2));
+}
+
+static inline void tegra_dsi_writel(struct tegra_dsi *dsi, unsigned long value,
+ unsigned long reg)
+{
+ writel(value, dsi->regs + (reg << 2));
+}
+
+static int tegra_dsi_show_regs(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_dsi *dsi = node->info_ent->data;
+
+#define DUMP_REG(name) \
+ seq_printf(s, "%-32s %#05x %08lx\n", #name, name, \
+ tegra_dsi_readl(dsi, name))
+
+ DUMP_REG(DSI_INCR_SYNCPT);
+ DUMP_REG(DSI_INCR_SYNCPT_CONTROL);
+ DUMP_REG(DSI_INCR_SYNCPT_ERROR);
+ DUMP_REG(DSI_CTXSW);
+ DUMP_REG(DSI_RD_DATA);
+ DUMP_REG(DSI_WR_DATA);
+ DUMP_REG(DSI_POWER_CONTROL);
+ DUMP_REG(DSI_INT_ENABLE);
+ DUMP_REG(DSI_INT_STATUS);
+ DUMP_REG(DSI_INT_MASK);
+ DUMP_REG(DSI_HOST_CONTROL);
+ DUMP_REG(DSI_CONTROL);
+ DUMP_REG(DSI_SOL_DELAY);
+ DUMP_REG(DSI_MAX_THRESHOLD);
+ DUMP_REG(DSI_TRIGGER);
+ DUMP_REG(DSI_TX_CRC);
+ DUMP_REG(DSI_STATUS);
+
+ DUMP_REG(DSI_INIT_SEQ_CONTROL);
+ DUMP_REG(DSI_INIT_SEQ_DATA_0);
+ DUMP_REG(DSI_INIT_SEQ_DATA_1);
+ DUMP_REG(DSI_INIT_SEQ_DATA_2);
+ DUMP_REG(DSI_INIT_SEQ_DATA_3);
+ DUMP_REG(DSI_INIT_SEQ_DATA_4);
+ DUMP_REG(DSI_INIT_SEQ_DATA_5);
+ DUMP_REG(DSI_INIT_SEQ_DATA_6);
+ DUMP_REG(DSI_INIT_SEQ_DATA_7);
+
+ DUMP_REG(DSI_PKT_SEQ_0_LO);
+ DUMP_REG(DSI_PKT_SEQ_0_HI);
+ DUMP_REG(DSI_PKT_SEQ_1_LO);
+ DUMP_REG(DSI_PKT_SEQ_1_HI);
+ DUMP_REG(DSI_PKT_SEQ_2_LO);
+ DUMP_REG(DSI_PKT_SEQ_2_HI);
+ DUMP_REG(DSI_PKT_SEQ_3_LO);
+ DUMP_REG(DSI_PKT_SEQ_3_HI);
+ DUMP_REG(DSI_PKT_SEQ_4_LO);
+ DUMP_REG(DSI_PKT_SEQ_4_HI);
+ DUMP_REG(DSI_PKT_SEQ_5_LO);
+ DUMP_REG(DSI_PKT_SEQ_5_HI);
+
+ DUMP_REG(DSI_DCS_CMDS);
+
+ DUMP_REG(DSI_PKT_LEN_0_1);
+ DUMP_REG(DSI_PKT_LEN_2_3);
+ DUMP_REG(DSI_PKT_LEN_4_5);
+ DUMP_REG(DSI_PKT_LEN_6_7);
+
+ DUMP_REG(DSI_PHY_TIMING_0);
+ DUMP_REG(DSI_PHY_TIMING_1);
+ DUMP_REG(DSI_PHY_TIMING_2);
+ DUMP_REG(DSI_BTA_TIMING);
+
+ DUMP_REG(DSI_TIMEOUT_0);
+ DUMP_REG(DSI_TIMEOUT_1);
+ DUMP_REG(DSI_TO_TALLY);
+
+ DUMP_REG(DSI_PAD_CONTROL_0);
+ DUMP_REG(DSI_PAD_CONTROL_CD);
+ DUMP_REG(DSI_PAD_CD_STATUS);
+ DUMP_REG(DSI_VIDEO_MODE_CONTROL);
+ DUMP_REG(DSI_PAD_CONTROL_1);
+ DUMP_REG(DSI_PAD_CONTROL_2);
+ DUMP_REG(DSI_PAD_CONTROL_3);
+ DUMP_REG(DSI_PAD_CONTROL_4);
+
+ DUMP_REG(DSI_GANGED_MODE_CONTROL);
+ DUMP_REG(DSI_GANGED_MODE_START);
+ DUMP_REG(DSI_GANGED_MODE_SIZE);
+
+ DUMP_REG(DSI_RAW_DATA_BYTE_COUNT);
+ DUMP_REG(DSI_ULTRA_LOW_POWER_CONTROL);
+
+ DUMP_REG(DSI_INIT_SEQ_DATA_8);
+ DUMP_REG(DSI_INIT_SEQ_DATA_9);
+ DUMP_REG(DSI_INIT_SEQ_DATA_10);
+ DUMP_REG(DSI_INIT_SEQ_DATA_11);
+ DUMP_REG(DSI_INIT_SEQ_DATA_12);
+ DUMP_REG(DSI_INIT_SEQ_DATA_13);
+ DUMP_REG(DSI_INIT_SEQ_DATA_14);
+ DUMP_REG(DSI_INIT_SEQ_DATA_15);
+
+#undef DUMP_REG
+
+ return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+ { "regs", tegra_dsi_show_regs, 0, NULL },
+};
+
+static int tegra_dsi_debugfs_init(struct tegra_dsi *dsi,
+ struct drm_minor *minor)
+{
+ const char *name = dev_name(dsi->dev);
+ unsigned int i;
+ int err;
+
+ dsi->debugfs = debugfs_create_dir(name, minor->debugfs_root);
+ if (!dsi->debugfs)
+ return -ENOMEM;
+
+ dsi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+ GFP_KERNEL);
+ if (!dsi->debugfs_files) {
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+ dsi->debugfs_files[i].data = dsi;
+
+ err = drm_debugfs_create_files(dsi->debugfs_files,
+ ARRAY_SIZE(debugfs_files),
+ dsi->debugfs, minor);
+ if (err < 0)
+ goto free;
+
+ dsi->minor = minor;
+
+ return 0;
+
+free:
+ kfree(dsi->debugfs_files);
+ dsi->debugfs_files = NULL;
+remove:
+ debugfs_remove(dsi->debugfs);
+ dsi->debugfs = NULL;
+
+ return err;
+}
+
+static int tegra_dsi_debugfs_exit(struct tegra_dsi *dsi)
+{
+ drm_debugfs_remove_files(dsi->debugfs_files, ARRAY_SIZE(debugfs_files),
+ dsi->minor);
+ dsi->minor = NULL;
+
+ kfree(dsi->debugfs_files);
+ dsi->debugfs_files = NULL;
+
+ debugfs_remove(dsi->debugfs);
+ dsi->debugfs = NULL;
+
+ return 0;
+}
+
+#define PKT_ID0(id) ((((id) & 0x3f) << 3) | (1 << 9))
+#define PKT_LEN0(len) (((len) & 0x07) << 0)
+#define PKT_ID1(id) ((((id) & 0x3f) << 13) | (1 << 19))
+#define PKT_LEN1(len) (((len) & 0x07) << 10)
+#define PKT_ID2(id) ((((id) & 0x3f) << 23) | (1 << 29))
+#define PKT_LEN2(len) (((len) & 0x07) << 20)
+
+#define PKT_LP (1 << 30)
+#define NUM_PKT_SEQ 12
+
+/* non-burst mode with sync-end */
+static const u32 pkt_seq_vnb_syne[NUM_PKT_SEQ] = {
+ [ 0] = PKT_ID0(MIPI_DSI_V_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+ PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+ PKT_LP,
+ [ 1] = 0,
+ [ 2] = PKT_ID0(MIPI_DSI_V_SYNC_END) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+ PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+ PKT_LP,
+ [ 3] = 0,
+ [ 4] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+ PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+ PKT_LP,
+ [ 5] = 0,
+ [ 6] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+ PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0),
+ [ 7] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(2) |
+ PKT_ID1(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN1(3) |
+ PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4),
+ [ 8] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+ PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+ PKT_LP,
+ [ 9] = 0,
+ [10] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+ PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0),
+ [11] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(2) |
+ PKT_ID1(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN1(3) |
+ PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4),
+};
+
+static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
+{
+ struct mipi_dphy_timing timing;
+ unsigned long value, period;
+ long rate;
+ int err;
+
+ rate = clk_get_rate(dsi->clk);
+ if (rate < 0)
+ return rate;
+
+ period = DIV_ROUND_CLOSEST(1000000000UL, rate * 2);
+
+ err = mipi_dphy_timing_get_default(&timing, period);
+ if (err < 0)
+ return err;
+
+ err = mipi_dphy_timing_validate(&timing, period);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to validate D-PHY timing: %d\n", err);
+ return err;
+ }
+
+ /*
+ * The D-PHY timing fields below are expressed in byte-clock cycles,
+ * so multiply the period by 8.
+ */
+ period *= 8;
+
+ value = DSI_TIMING_FIELD(timing.hsexit, period, 1) << 24 |
+ DSI_TIMING_FIELD(timing.hstrail, period, 0) << 16 |
+ DSI_TIMING_FIELD(timing.hszero, period, 3) << 8 |
+ DSI_TIMING_FIELD(timing.hsprepare, period, 1);
+ tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_0);
+
+ value = DSI_TIMING_FIELD(timing.clktrail, period, 1) << 24 |
+ DSI_TIMING_FIELD(timing.clkpost, period, 1) << 16 |
+ DSI_TIMING_FIELD(timing.clkzero, period, 1) << 8 |
+ DSI_TIMING_FIELD(timing.lpx, period, 1);
+ tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_1);
+
+ value = DSI_TIMING_FIELD(timing.clkprepare, period, 1) << 16 |
+ DSI_TIMING_FIELD(timing.clkpre, period, 1) << 8 |
+ DSI_TIMING_FIELD(0xff * period, period, 0) << 0;
+ tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_2);
+
+ value = DSI_TIMING_FIELD(timing.taget, period, 1) << 16 |
+ DSI_TIMING_FIELD(timing.tasure, period, 1) << 8 |
+ DSI_TIMING_FIELD(timing.tago, period, 1);
+ tegra_dsi_writel(dsi, value, DSI_BTA_TIMING);
+
+ return 0;
+}
+
+static int tegra_dsi_get_muldiv(enum mipi_dsi_pixel_format format,
+ unsigned int *mulp, unsigned int *divp)
+{
+ switch (format) {
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ case MIPI_DSI_FMT_RGB888:
+ *mulp = 3;
+ *divp = 1;
+ break;
+
+ case MIPI_DSI_FMT_RGB565:
+ *mulp = 2;
+ *divp = 1;
+ break;
+
+ case MIPI_DSI_FMT_RGB666:
+ *mulp = 9;
+ *divp = 4;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tegra_output_dsi_enable(struct tegra_output *output)
+{
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ struct drm_display_mode *mode = &dc->base.mode;
+ unsigned int hact, hsw, hbp, hfp, i, mul, div;
+ struct tegra_dsi *dsi = to_dsi(output);
+ /* FIXME: don't hardcode this */
+ const u32 *pkt_seq = pkt_seq_vnb_syne;
+ unsigned long value;
+ int err;
+
+ err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
+ if (err < 0)
+ return err;
+
+ err = clk_enable(dsi->clk);
+ if (err < 0)
+ return err;
+
+ reset_control_deassert(dsi->rst);
+
+ value = DSI_CONTROL_CHANNEL(0) | DSI_CONTROL_FORMAT(dsi->format) |
+ DSI_CONTROL_LANES(dsi->lanes - 1) |
+ DSI_CONTROL_SOURCE(dc->pipe);
+ tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+ tegra_dsi_writel(dsi, DSI_VIDEO_FIFO_DEPTH, DSI_MAX_THRESHOLD);
+
+ value = DSI_HOST_CONTROL_HS | DSI_HOST_CONTROL_CS |
+ DSI_HOST_CONTROL_ECC;
+ tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+
+ value = tegra_dsi_readl(dsi, DSI_CONTROL);
+ value |= DSI_CONTROL_HS_CLK_CTRL;
+ value &= ~DSI_CONTROL_TX_TRIG(3);
+ value &= ~DSI_CONTROL_DCS_ENABLE;
+ value |= DSI_CONTROL_VIDEO_ENABLE;
+ value &= ~DSI_CONTROL_HOST_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+ err = tegra_dsi_set_phy_timing(dsi);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < NUM_PKT_SEQ; i++)
+ tegra_dsi_writel(dsi, pkt_seq[i], DSI_PKT_SEQ_0_LO + i);
+
+ /* horizontal active pixels */
+ hact = mode->hdisplay * mul / div;
+
+ /* horizontal sync width */
+ hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
+ hsw -= 10;
+
+ /* horizontal back porch */
+ hbp = (mode->htotal - mode->hsync_end) * mul / div;
+ hbp -= 14;
+
+ /* horizontal front porch */
+ hfp = (mode->hsync_start - mode->hdisplay) * mul / div;
+ hfp -= 8;
+
+ tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
+ tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3);
+ tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5);
+ tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7);
+
+ /* set SOL delay */
+ tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY);
+
+ /* enable display controller */
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= DSI_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ value |= DISP_CTRL_MODE_C_DISPLAY;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ /* enable DSI controller */
+ value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+ value |= DSI_POWER_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+ return 0;
+}
+
+static int tegra_output_dsi_disable(struct tegra_output *output)
+{
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ struct tegra_dsi *dsi = to_dsi(output);
+ unsigned long value;
+
+ /* disable DSI controller */
+ value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+ value &= DSI_POWER_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+ /*
+ * The following accesses registers of the display controller, so make
+ * sure it's only executed when the output is attached to one.
+ */
+ if (dc) {
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~DSI_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+ }
+
+ clk_disable(dsi->clk);
+
+ return 0;
+}
+
+static int tegra_output_dsi_setup_clock(struct tegra_output *output,
+ struct clk *clk, unsigned long pclk)
+{
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ struct drm_display_mode *mode = &dc->base.mode;
+ unsigned int timeout, mul, div, vrefresh;
+ struct tegra_dsi *dsi = to_dsi(output);
+ unsigned long bclk, plld, value;
+ struct clk *base;
+ int err;
+
+ err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
+ if (err < 0)
+ return err;
+
+ vrefresh = drm_mode_vrefresh(mode);
+
+ pclk = mode->htotal * mode->vtotal * vrefresh;
+ bclk = (pclk * mul) / (div * dsi->lanes);
+ plld = DIV_ROUND_UP(bclk * 8, 1000000);
+ pclk = (plld * 1000000) / 2;
+
+ err = clk_set_parent(clk, dsi->clk_parent);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to set parent clock: %d\n", err);
+ return err;
+ }
+
+ base = clk_get_parent(dsi->clk_parent);
+
+ /*
+ * This assumes that the parent clock is pll_d_out0 or pll_d2_out
+ * respectively, each of which divides the base pll_d by 2.
+ */
+ err = clk_set_rate(base, pclk * 2);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to set base clock rate to %lu Hz\n",
+ pclk * 2);
+ return err;
+ }
+
+ /*
+ * XXX: Move the below somewhere else so that we don't need to have
+ * access to the vrefresh in this function?
+ */
+
+ /* one frame high-speed transmission timeout */
+ timeout = (bclk / vrefresh) / 512;
+ value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout);
+ tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0);
+
+ /* 2 ms peripheral timeout for panel */
+ timeout = 2 * bclk / 512 * 1000;
+ value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000);
+ tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1);
+
+ value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0);
+ tegra_dsi_writel(dsi, value, DSI_TO_TALLY);
+
+ return 0;
+}
+
+static int tegra_output_dsi_check_mode(struct tegra_output *output,
+ struct drm_display_mode *mode,
+ enum drm_mode_status *status)
+{
+ /*
+ * FIXME: For now, always assume that the mode is okay.
+ */
+
+ *status = MODE_OK;
+
+ return 0;
+}
+
+static const struct tegra_output_ops dsi_ops = {
+ .enable = tegra_output_dsi_enable,
+ .disable = tegra_output_dsi_disable,
+ .setup_clock = tegra_output_dsi_setup_clock,
+ .check_mode = tegra_output_dsi_check_mode,
+};
+
+static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
+{
+ unsigned long value;
+
+ value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
+ tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
+
+ return 0;
+}
+
+static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
+{
+ unsigned long value;
+
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
+
+ /* start calibration */
+ tegra_dsi_pad_enable(dsi);
+
+ value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
+ DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
+ DSI_PAD_OUT_CLK(0x0);
+ tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
+
+ return tegra_mipi_calibrate(dsi->mipi);
+}
+
+static int tegra_dsi_init(struct host1x_client *client)
+{
+ struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+ unsigned long value, i;
+ int err;
+
+ dsi->output.type = TEGRA_OUTPUT_DSI;
+ dsi->output.dev = client->dev;
+ dsi->output.ops = &dsi_ops;
+
+ err = tegra_output_init(tegra->drm, &dsi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output setup failed: %d\n", err);
+ return err;
+ }
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_dsi_debugfs_init(dsi, tegra->drm->primary);
+ if (err < 0)
+ dev_err(dsi->dev, "debugfs setup failed: %d\n", err);
+ }
+
+ /*
+ * enable high-speed mode, checksum generation, ECC generation and
+ * disable raw mode
+ */
+ value = tegra_dsi_readl(dsi, DSI_HOST_CONTROL);
+ value |= DSI_HOST_CONTROL_ECC | DSI_HOST_CONTROL_CS |
+ DSI_HOST_CONTROL_HS;
+ value &= ~DSI_HOST_CONTROL_RAW;
+ tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+
+ tegra_dsi_writel(dsi, 0, DSI_SOL_DELAY);
+ tegra_dsi_writel(dsi, 0, DSI_MAX_THRESHOLD);
+
+ tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_CONTROL);
+
+ for (i = 0; i < 8; i++) {
+ tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_0 + i);
+ tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_8 + i);
+ }
+
+ for (i = 0; i < 12; i++)
+ tegra_dsi_writel(dsi, 0, DSI_PKT_SEQ_0_LO + i);
+
+ tegra_dsi_writel(dsi, 0, DSI_DCS_CMDS);
+
+ err = tegra_dsi_pad_calibrate(dsi);
+ if (err < 0) {
+ dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
+ return err;
+ }
+
+ tegra_dsi_writel(dsi, DSI_POWER_CONTROL_ENABLE, DSI_POWER_CONTROL);
+ usleep_range(300, 1000);
+
+ return 0;
+}
+
+static int tegra_dsi_exit(struct host1x_client *client)
+{
+ struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+ int err;
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_dsi_debugfs_exit(dsi);
+ if (err < 0)
+ dev_err(dsi->dev, "debugfs cleanup failed: %d\n", err);
+ }
+
+ err = tegra_output_disable(&dsi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output failed to disable: %d\n", err);
+ return err;
+ }
+
+ err = tegra_output_exit(&dsi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output cleanup failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct host1x_client_ops dsi_client_ops = {
+ .init = tegra_dsi_init,
+ .exit = tegra_dsi_exit,
+};
+
+static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi)
+{
+ struct clk *parent;
+ int err;
+
+ parent = clk_get_parent(dsi->clk);
+ if (!parent)
+ return -EINVAL;
+
+ err = clk_set_parent(parent, dsi->clk_parent);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void tegra_dsi_initialize(struct tegra_dsi *dsi)
+{
+ unsigned int i;
+
+ tegra_dsi_writel(dsi, 0, DSI_POWER_CONTROL);
+
+ tegra_dsi_writel(dsi, 0, DSI_INT_ENABLE);
+ tegra_dsi_writel(dsi, 0, DSI_INT_STATUS);
+ tegra_dsi_writel(dsi, 0, DSI_INT_MASK);
+
+ tegra_dsi_writel(dsi, 0, DSI_HOST_CONTROL);
+ tegra_dsi_writel(dsi, 0, DSI_CONTROL);
+
+ tegra_dsi_writel(dsi, 0, DSI_SOL_DELAY);
+ tegra_dsi_writel(dsi, 0, DSI_MAX_THRESHOLD);
+
+ tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_CONTROL);
+
+ for (i = 0; i < 8; i++) {
+ tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_0 + i);
+ tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_8 + i);
+ }
+
+ for (i = 0; i < 12; i++)
+ tegra_dsi_writel(dsi, 0, DSI_PKT_SEQ_0_LO + i);
+
+ tegra_dsi_writel(dsi, 0, DSI_DCS_CMDS);
+
+ for (i = 0; i < 4; i++)
+ tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_0_1 + i);
+
+ tegra_dsi_writel(dsi, 0x00000000, DSI_PHY_TIMING_0);
+ tegra_dsi_writel(dsi, 0x00000000, DSI_PHY_TIMING_1);
+ tegra_dsi_writel(dsi, 0x000000ff, DSI_PHY_TIMING_2);
+ tegra_dsi_writel(dsi, 0x00000000, DSI_BTA_TIMING);
+
+ tegra_dsi_writel(dsi, 0, DSI_TIMEOUT_0);
+ tegra_dsi_writel(dsi, 0, DSI_TIMEOUT_1);
+ tegra_dsi_writel(dsi, 0, DSI_TO_TALLY);
+
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_CD);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CD_STATUS);
+ tegra_dsi_writel(dsi, 0, DSI_VIDEO_MODE_CONTROL);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
+
+ tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
+ tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_START);
+ tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_SIZE);
+}
+
+static int tegra_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct tegra_dsi *dsi = host_to_tegra(host);
+ struct tegra_output *output = &dsi->output;
+
+ dsi->format = device->format;
+ dsi->lanes = device->lanes;
+
+ output->panel = of_drm_find_panel(device->dev.of_node);
+ if (output->panel) {
+ if (output->connector.dev)
+ drm_helper_hpd_irq_event(output->connector.dev);
+ }
+
+ return 0;
+}
+
+static int tegra_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct tegra_dsi *dsi = host_to_tegra(host);
+ struct tegra_output *output = &dsi->output;
+
+ if (output->panel && &device->dev == output->panel->dev) {
+ if (output->connector.dev)
+ drm_helper_hpd_irq_event(output->connector.dev);
+
+ output->panel = NULL;
+ }
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops tegra_dsi_host_ops = {
+ .attach = tegra_dsi_host_attach,
+ .detach = tegra_dsi_host_detach,
+};
+
+static int tegra_dsi_probe(struct platform_device *pdev)
+{
+ struct tegra_dsi *dsi;
+ struct resource *regs;
+ int err;
+
+ dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ dsi->output.dev = dsi->dev = &pdev->dev;
+
+ err = tegra_output_probe(&dsi->output);
+ if (err < 0)
+ return err;
+
+ /*
+ * Assume these values by default. When a DSI peripheral driver
+ * attaches to the DSI host, the parameters will be taken from
+ * the attached device.
+ */
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 4;
+
+ dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
+ if (IS_ERR(dsi->rst))
+ return PTR_ERR(dsi->rst);
+
+ dsi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dsi->clk)) {
+ dev_err(&pdev->dev, "cannot get DSI clock\n");
+ return PTR_ERR(dsi->clk);
+ }
+
+ err = clk_prepare_enable(dsi->clk);
+ if (err < 0) {
+ dev_err(&pdev->dev, "cannot enable DSI clock\n");
+ return err;
+ }
+
+ dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
+ if (IS_ERR(dsi->clk_lp)) {
+ dev_err(&pdev->dev, "cannot get low-power clock\n");
+ return PTR_ERR(dsi->clk_lp);
+ }
+
+ err = clk_prepare_enable(dsi->clk_lp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "cannot enable low-power clock\n");
+ return err;
+ }
+
+ dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
+ if (IS_ERR(dsi->clk_parent)) {
+ dev_err(&pdev->dev, "cannot get parent clock\n");
+ return PTR_ERR(dsi->clk_parent);
+ }
+
+ err = clk_prepare_enable(dsi->clk_parent);
+ if (err < 0) {
+ dev_err(&pdev->dev, "cannot enable parent clock\n");
+ return err;
+ }
+
+ err = tegra_dsi_setup_clocks(dsi);
+ if (err < 0) {
+ dev_err(&pdev->dev, "cannot setup clocks\n");
+ return err;
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(dsi->regs))
+ return PTR_ERR(dsi->regs);
+
+ tegra_dsi_initialize(dsi);
+
+ dsi->mipi = tegra_mipi_request(&pdev->dev);
+ if (IS_ERR(dsi->mipi))
+ return PTR_ERR(dsi->mipi);
+
+ dsi->host.ops = &tegra_dsi_host_ops;
+ dsi->host.dev = &pdev->dev;
+
+ err = mipi_dsi_host_register(&dsi->host);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register DSI host: %d\n", err);
+ return err;
+ }
+
+ INIT_LIST_HEAD(&dsi->client.list);
+ dsi->client.ops = &dsi_client_ops;
+ dsi->client.dev = &pdev->dev;
+
+ err = host1x_client_register(&dsi->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, dsi);
+
+ return 0;
+}
+
+static int tegra_dsi_remove(struct platform_device *pdev)
+{
+ struct tegra_dsi *dsi = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&dsi->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ mipi_dsi_host_unregister(&dsi->host);
+ tegra_mipi_free(dsi->mipi);
+
+ clk_disable_unprepare(dsi->clk_parent);
+ clk_disable_unprepare(dsi->clk_lp);
+ clk_disable_unprepare(dsi->clk);
+
+ err = tegra_output_remove(&dsi->output);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to remove output: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id tegra_dsi_of_match[] = {
+ { .compatible = "nvidia,tegra114-dsi", },
+ { },
+};
+
+struct platform_driver tegra_dsi_driver = {
+ .driver = {
+ .name = "tegra-dsi",
+ .of_match_table = tegra_dsi_of_match,
+ },
+ .probe = tegra_dsi_probe,
+ .remove = tegra_dsi_remove,
+};
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h
new file mode 100644
index 000000000000..00e79c1f448c
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dsi.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef DRM_TEGRA_DSI_H
+#define DRM_TEGRA_DSI_H
+
+#define DSI_INCR_SYNCPT 0x00
+#define DSI_INCR_SYNCPT_CONTROL 0x01
+#define DSI_INCR_SYNCPT_ERROR 0x02
+#define DSI_CTXSW 0x08
+#define DSI_RD_DATA 0x09
+#define DSI_WR_DATA 0x0a
+#define DSI_POWER_CONTROL 0x0b
+#define DSI_POWER_CONTROL_ENABLE (1 << 0)
+#define DSI_INT_ENABLE 0x0c
+#define DSI_INT_STATUS 0x0d
+#define DSI_INT_MASK 0x0e
+#define DSI_HOST_CONTROL 0x0f
+#define DSI_HOST_CONTROL_RAW (1 << 6)
+#define DSI_HOST_CONTROL_HS (1 << 5)
+#define DSI_HOST_CONTROL_BTA (1 << 2)
+#define DSI_HOST_CONTROL_CS (1 << 1)
+#define DSI_HOST_CONTROL_ECC (1 << 0)
+#define DSI_CONTROL 0x10
+#define DSI_CONTROL_HS_CLK_CTRL (1 << 20)
+#define DSI_CONTROL_CHANNEL(c) (((c) & 0x3) << 16)
+#define DSI_CONTROL_FORMAT(f) (((f) & 0x3) << 12)
+#define DSI_CONTROL_TX_TRIG(x) (((x) & 0x3) << 8)
+#define DSI_CONTROL_LANES(n) (((n) & 0x3) << 4)
+#define DSI_CONTROL_DCS_ENABLE (1 << 3)
+#define DSI_CONTROL_SOURCE(s) (((s) & 0x1) << 2)
+#define DSI_CONTROL_VIDEO_ENABLE (1 << 1)
+#define DSI_CONTROL_HOST_ENABLE (1 << 0)
+#define DSI_SOL_DELAY 0x11
+#define DSI_MAX_THRESHOLD 0x12
+#define DSI_TRIGGER 0x13
+#define DSI_TX_CRC 0x14
+#define DSI_STATUS 0x15
+#define DSI_STATUS_IDLE (1 << 10)
+#define DSI_INIT_SEQ_CONTROL 0x1a
+#define DSI_INIT_SEQ_DATA_0 0x1b
+#define DSI_INIT_SEQ_DATA_1 0x1c
+#define DSI_INIT_SEQ_DATA_2 0x1d
+#define DSI_INIT_SEQ_DATA_3 0x1e
+#define DSI_INIT_SEQ_DATA_4 0x1f
+#define DSI_INIT_SEQ_DATA_5 0x20
+#define DSI_INIT_SEQ_DATA_6 0x21
+#define DSI_INIT_SEQ_DATA_7 0x22
+#define DSI_PKT_SEQ_0_LO 0x23
+#define DSI_PKT_SEQ_0_HI 0x24
+#define DSI_PKT_SEQ_1_LO 0x25
+#define DSI_PKT_SEQ_1_HI 0x26
+#define DSI_PKT_SEQ_2_LO 0x27
+#define DSI_PKT_SEQ_2_HI 0x28
+#define DSI_PKT_SEQ_3_LO 0x29
+#define DSI_PKT_SEQ_3_HI 0x2a
+#define DSI_PKT_SEQ_4_LO 0x2b
+#define DSI_PKT_SEQ_4_HI 0x2c
+#define DSI_PKT_SEQ_5_LO 0x2d
+#define DSI_PKT_SEQ_5_HI 0x2e
+#define DSI_DCS_CMDS 0x33
+#define DSI_PKT_LEN_0_1 0x34
+#define DSI_PKT_LEN_2_3 0x35
+#define DSI_PKT_LEN_4_5 0x36
+#define DSI_PKT_LEN_6_7 0x37
+#define DSI_PHY_TIMING_0 0x3c
+#define DSI_PHY_TIMING_1 0x3d
+#define DSI_PHY_TIMING_2 0x3e
+#define DSI_BTA_TIMING 0x3f
+
+#define DSI_TIMING_FIELD(value, period, hwinc) \
+ ((DIV_ROUND_CLOSEST(value, period) - (hwinc)) & 0xff)
+
+#define DSI_TIMEOUT_0 0x44
+#define DSI_TIMEOUT_LRX(x) (((x) & 0xffff) << 16)
+#define DSI_TIMEOUT_HTX(x) (((x) & 0xffff) << 0)
+#define DSI_TIMEOUT_1 0x45
+#define DSI_TIMEOUT_PR(x) (((x) & 0xffff) << 16)
+#define DSI_TIMEOUT_TA(x) (((x) & 0xffff) << 0)
+#define DSI_TO_TALLY 0x46
+#define DSI_TALLY_TA(x) (((x) & 0xff) << 16)
+#define DSI_TALLY_LRX(x) (((x) & 0xff) << 8)
+#define DSI_TALLY_HTX(x) (((x) & 0xff) << 0)
+#define DSI_PAD_CONTROL_0 0x4b
+#define DSI_PAD_CONTROL_VS1_PDIO(x) (((x) & 0xf) << 0)
+#define DSI_PAD_CONTROL_VS1_PDIO_CLK (1 << 8)
+#define DSI_PAD_CONTROL_VS1_PULLDN(x) (((x) & 0xf) << 16)
+#define DSI_PAD_CONTROL_VS1_PULLDN_CLK (1 << 24)
+#define DSI_PAD_CONTROL_CD 0x4c
+#define DSI_PAD_CD_STATUS 0x4d
+#define DSI_VIDEO_MODE_CONTROL 0x4e
+#define DSI_PAD_CONTROL_1 0x4f
+#define DSI_PAD_CONTROL_2 0x50
+#define DSI_PAD_OUT_CLK(x) (((x) & 0x7) << 0)
+#define DSI_PAD_LP_DN(x) (((x) & 0x7) << 4)
+#define DSI_PAD_LP_UP(x) (((x) & 0x7) << 8)
+#define DSI_PAD_SLEW_DN(x) (((x) & 0x7) << 12)
+#define DSI_PAD_SLEW_UP(x) (((x) & 0x7) << 16)
+#define DSI_PAD_CONTROL_3 0x51
+#define DSI_PAD_CONTROL_4 0x52
+#define DSI_GANGED_MODE_CONTROL 0x53
+#define DSI_GANGED_MODE_START 0x54
+#define DSI_GANGED_MODE_SIZE 0x55
+#define DSI_RAW_DATA_BYTE_COUNT 0x56
+#define DSI_ULTRA_LOW_POWER_CONTROL 0x57
+#define DSI_INIT_SEQ_DATA_8 0x58
+#define DSI_INIT_SEQ_DATA_9 0x59
+#define DSI_INIT_SEQ_DATA_10 0x5a
+#define DSI_INIT_SEQ_DATA_11 0x5b
+#define DSI_INIT_SEQ_DATA_12 0x5c
+#define DSI_INIT_SEQ_DATA_13 0x5d
+#define DSI_INIT_SEQ_DATA_14 0x5e
+#define DSI_INIT_SEQ_DATA_15 0x5f
+
+#endif
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index a3835e7de184..f7fca09d4921 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -18,10 +18,12 @@ static inline struct tegra_fb *to_tegra_fb(struct drm_framebuffer *fb)
return container_of(fb, struct tegra_fb, base);
}
+#ifdef CONFIG_DRM_TEGRA_FBDEV
static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
{
return container_of(helper, struct tegra_fbdev, base);
}
+#endif
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
unsigned int index)
@@ -98,8 +100,10 @@ static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
return ERR_PTR(-ENOMEM);
fb->planes = kzalloc(num_planes * sizeof(*planes), GFP_KERNEL);
- if (!fb->planes)
+ if (!fb->planes) {
+ kfree(fb);
return ERR_PTR(-ENOMEM);
+ }
fb->num_planes = num_planes;
@@ -172,6 +176,7 @@ unreference:
return ERR_PTR(err);
}
+#ifdef CONFIG_DRM_TEGRA_FBDEV
static struct fb_ops tegra_fb_ops = {
.owner = THIS_MODULE,
.fb_fillrect = sys_fillrect,
@@ -339,6 +344,15 @@ static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
kfree(fbdev);
}
+void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
+{
+ if (fbdev) {
+ drm_modeset_lock_all(fbdev->base.dev);
+ drm_fb_helper_restore_fbdev_mode(&fbdev->base);
+ drm_modeset_unlock_all(fbdev->base.dev);
+ }
+}
+
static void tegra_fb_output_poll_changed(struct drm_device *drm)
{
struct tegra_drm *tegra = drm->dev_private;
@@ -346,16 +360,20 @@ static void tegra_fb_output_poll_changed(struct drm_device *drm)
if (tegra->fbdev)
drm_fb_helper_hotplug_event(&tegra->fbdev->base);
}
+#endif
static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
.fb_create = tegra_fb_create,
+#ifdef CONFIG_DRM_TEGRA_FBDEV
.output_poll_changed = tegra_fb_output_poll_changed,
+#endif
};
int tegra_drm_fb_init(struct drm_device *drm)
{
+#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_drm *tegra = drm->dev_private;
- struct tegra_fbdev *fbdev;
+#endif
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
@@ -365,28 +383,21 @@ int tegra_drm_fb_init(struct drm_device *drm)
drm->mode_config.funcs = &tegra_drm_mode_funcs;
- fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc,
- drm->mode_config.num_connector);
- if (IS_ERR(fbdev))
- return PTR_ERR(fbdev);
-
- tegra->fbdev = fbdev;
+#ifdef CONFIG_DRM_TEGRA_FBDEV
+ tegra->fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+ if (IS_ERR(tegra->fbdev))
+ return PTR_ERR(tegra->fbdev);
+#endif
return 0;
}
void tegra_drm_fb_exit(struct drm_device *drm)
{
+#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_drm *tegra = drm->dev_private;
tegra_fbdev_free(tegra->fbdev);
-}
-
-void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
-{
- if (fbdev) {
- drm_modeset_lock_all(fbdev->base.dev);
- drm_fb_helper_restore_fbdev_mode(&fbdev->base);
- drm_modeset_unlock_all(fbdev->base.dev);
- }
+#endif
}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 28a9cbc07ab9..ef853e558036 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -18,6 +18,7 @@
* GNU General Public License for more details.
*/
+#include <linux/dma-buf.h>
#include <drm/tegra_drm.h>
#include "gem.h"
@@ -83,7 +84,7 @@ static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
return bo;
}
-const struct host1x_bo_ops tegra_bo_ops = {
+static const struct host1x_bo_ops tegra_bo_ops = {
.get = tegra_bo_get,
.put = tegra_bo_put,
.pin = tegra_bo_pin,
@@ -145,7 +146,6 @@ err_dma:
kfree(bo);
return ERR_PTR(err);
-
}
struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
@@ -174,13 +174,87 @@ err:
return ERR_PTR(ret);
}
+struct tegra_bo *tegra_bo_import(struct drm_device *drm, struct dma_buf *buf)
+{
+ struct dma_buf_attachment *attach;
+ struct tegra_bo *bo;
+ ssize_t size;
+ int err;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ host1x_bo_init(&bo->base, &tegra_bo_ops);
+ size = round_up(buf->size, PAGE_SIZE);
+
+ err = drm_gem_object_init(drm, &bo->gem, size);
+ if (err < 0)
+ goto free;
+
+ err = drm_gem_create_mmap_offset(&bo->gem);
+ if (err < 0)
+ goto release;
+
+ attach = dma_buf_attach(buf, drm->dev);
+ if (IS_ERR(attach)) {
+ err = PTR_ERR(attach);
+ goto free_mmap;
+ }
+
+ get_dma_buf(buf);
+
+ bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
+ if (!bo->sgt) {
+ err = -ENOMEM;
+ goto detach;
+ }
+
+ if (IS_ERR(bo->sgt)) {
+ err = PTR_ERR(bo->sgt);
+ goto detach;
+ }
+
+ if (bo->sgt->nents > 1) {
+ err = -EINVAL;
+ goto detach;
+ }
+
+ bo->paddr = sg_dma_address(bo->sgt->sgl);
+ bo->gem.import_attach = attach;
+
+ return bo;
+
+detach:
+ if (!IS_ERR_OR_NULL(bo->sgt))
+ dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
+
+ dma_buf_detach(buf, attach);
+ dma_buf_put(buf);
+free_mmap:
+ drm_gem_free_mmap_offset(&bo->gem);
+release:
+ drm_gem_object_release(&bo->gem);
+free:
+ kfree(bo);
+
+ return ERR_PTR(err);
+}
+
void tegra_bo_free_object(struct drm_gem_object *gem)
{
struct tegra_bo *bo = to_tegra_bo(gem);
+ if (gem->import_attach) {
+ dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
+ DMA_TO_DEVICE);
+ drm_prime_gem_destroy(gem, NULL);
+ } else {
+ tegra_bo_destroy(gem->dev, bo);
+ }
+
drm_gem_free_mmap_offset(gem);
drm_gem_object_release(gem);
- tegra_bo_destroy(gem->dev, bo);
kfree(bo);
}
@@ -256,3 +330,106 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
}
+
+static struct sg_table *
+tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *gem = attach->dmabuf->priv;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+ struct sg_table *sgt;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return NULL;
+
+ if (sg_alloc_table(sgt, 1, GFP_KERNEL)) {
+ kfree(sgt);
+ return NULL;
+ }
+
+ sg_dma_address(sgt->sgl) = bo->paddr;
+ sg_dma_len(sgt->sgl) = gem->size;
+
+ return sgt;
+}
+
+static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static void tegra_gem_prime_release(struct dma_buf *buf)
+{
+ drm_gem_dmabuf_release(buf);
+}
+
+static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
+ unsigned long page)
+{
+ return NULL;
+}
+
+static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
+ unsigned long page,
+ void *addr)
+{
+}
+
+static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
+{
+ return NULL;
+}
+
+static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
+ void *addr)
+{
+}
+
+static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
+ .map_dma_buf = tegra_gem_prime_map_dma_buf,
+ .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
+ .release = tegra_gem_prime_release,
+ .kmap_atomic = tegra_gem_prime_kmap_atomic,
+ .kunmap_atomic = tegra_gem_prime_kunmap_atomic,
+ .kmap = tegra_gem_prime_kmap,
+ .kunmap = tegra_gem_prime_kunmap,
+ .mmap = tegra_gem_prime_mmap,
+};
+
+struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
+ struct drm_gem_object *gem,
+ int flags)
+{
+ return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
+ flags);
+}
+
+struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
+ struct dma_buf *buf)
+{
+ struct tegra_bo *bo;
+
+ if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
+ struct drm_gem_object *gem = buf->priv;
+
+ if (gem->dev == drm) {
+ drm_gem_object_reference(gem);
+ return gem;
+ }
+ }
+
+ bo = tegra_bo_import(drm, buf);
+ if (IS_ERR(bo))
+ return ERR_CAST(bo);
+
+ return &bo->gem;
+}
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 7674000bf47d..ffd4f792b410 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -31,6 +31,7 @@ struct tegra_bo {
struct drm_gem_object gem;
struct host1x_bo base;
unsigned long flags;
+ struct sg_table *sgt;
dma_addr_t paddr;
void *vaddr;
};
@@ -40,8 +41,6 @@ static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
return container_of(gem, struct tegra_bo, gem);
}
-extern const struct host1x_bo_ops tegra_bo_ops;
-
struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
unsigned long flags);
struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
@@ -59,4 +58,10 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
extern const struct vm_operations_struct tegra_bo_vm_ops;
+struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
+ struct drm_gem_object *gem,
+ int flags);
+struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
+ struct dma_buf *buf);
+
#endif
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 4cec8f526af7..0cbb24b1ae04 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -11,6 +11,7 @@
#include <linux/host1x.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/tegra-powergate.h>
#include "drm.h"
@@ -22,6 +23,8 @@ struct gr3d {
struct host1x_channel *channel;
struct clk *clk_secondary;
struct clk *clk;
+ struct reset_control *rst_secondary;
+ struct reset_control *rst;
DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
};
@@ -255,15 +258,29 @@ static int gr3d_probe(struct platform_device *pdev)
return PTR_ERR(gr3d->clk);
}
+ gr3d->rst = devm_reset_control_get(&pdev->dev, "3d");
+ if (IS_ERR(gr3d->rst)) {
+ dev_err(&pdev->dev, "cannot get reset\n");
+ return PTR_ERR(gr3d->rst);
+ }
+
if (of_device_is_compatible(np, "nvidia,tegra30-gr3d")) {
gr3d->clk_secondary = devm_clk_get(&pdev->dev, "3d2");
if (IS_ERR(gr3d->clk)) {
dev_err(&pdev->dev, "cannot get secondary clock\n");
return PTR_ERR(gr3d->clk);
}
+
+ gr3d->rst_secondary = devm_reset_control_get(&pdev->dev,
+ "3d2");
+ if (IS_ERR(gr3d->rst_secondary)) {
+ dev_err(&pdev->dev, "cannot get secondary reset\n");
+ return PTR_ERR(gr3d->rst_secondary);
+ }
}
- err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D, gr3d->clk);
+ err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D, gr3d->clk,
+ gr3d->rst);
if (err < 0) {
dev_err(&pdev->dev, "failed to power up 3D unit\n");
return err;
@@ -271,7 +288,8 @@ static int gr3d_probe(struct platform_device *pdev)
if (gr3d->clk_secondary) {
err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D1,
- gr3d->clk_secondary);
+ gr3d->clk_secondary,
+ gr3d->rst_secondary);
if (err < 0) {
dev_err(&pdev->dev,
"failed to power up secondary 3D unit\n");
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 0cd9bc2056e8..6928015d11a4 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -8,10 +8,10 @@
*/
#include <linux/clk.h>
-#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
#include <linux/hdmi.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include "hdmi.h"
#include "drm.h"
@@ -40,6 +40,7 @@ struct tegra_hdmi {
struct host1x_client client;
struct tegra_output output;
struct device *dev;
+ bool enabled;
struct regulator *vdd;
struct regulator *pll;
@@ -49,6 +50,7 @@ struct tegra_hdmi {
struct clk *clk_parent;
struct clk *clk;
+ struct reset_control *rst;
const struct tegra_hdmi_config *config;
@@ -378,7 +380,7 @@ static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
if (f > 96000)
delta = 2;
- else if (f > 480000)
+ else if (f > 48000)
delta = 6;
else
delta = 9;
@@ -698,6 +700,9 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
int retries = 1000;
int err;
+ if (hdmi->enabled)
+ return 0;
+
hdmi->dvi = !tegra_output_is_hdmi(output);
pclk = mode->clock * 1000;
@@ -731,9 +736,9 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
return err;
}
- tegra_periph_reset_assert(hdmi->clk);
+ reset_control_assert(hdmi->rst);
usleep_range(1000, 2000);
- tegra_periph_reset_deassert(hdmi->clk);
+ reset_control_deassert(hdmi->rst);
tegra_dc_writel(dc, VSYNC_H_POSITION(1),
DC_DISP_DISP_TIMING_OPTIONS);
@@ -838,10 +843,6 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
value |= SOR_CSTM_ROTCLK(2);
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM);
- tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
- tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
-
/* start SOR */
tegra_hdmi_writel(hdmi,
SOR_PWR_NORMAL_STATE_PU |
@@ -891,31 +892,67 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
HDMI_NV_PDISP_SOR_STATE1);
tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
- tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
-
- value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
- PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= HDMI_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
- value = DISP_CTRL_MODE_C_DISPLAY;
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ value |= DISP_CTRL_MODE_C_DISPLAY;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
/* TODO: add HDCP support */
+ hdmi->enabled = true;
+
return 0;
}
static int tegra_output_hdmi_disable(struct tegra_output *output)
{
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
struct tegra_hdmi *hdmi = to_hdmi(output);
+ unsigned long value;
- tegra_periph_reset_assert(hdmi->clk);
+ if (!hdmi->enabled)
+ return 0;
+
+ /*
+ * The following accesses registers of the display controller, so make
+ * sure it's only executed when the output is attached to one.
+ */
+ if (dc) {
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~HDMI_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+ }
+
+ reset_control_assert(hdmi->rst);
clk_disable(hdmi->clk);
regulator_disable(hdmi->pll);
+ hdmi->enabled = false;
+
return 0;
}
@@ -959,7 +996,7 @@ static int tegra_output_hdmi_check_mode(struct tegra_output *output,
parent = clk_get_parent(hdmi->clk_parent);
err = clk_round_rate(parent, pclk * 4);
- if (err < 0)
+ if (err <= 0)
*status = MODE_NOCLOCK;
else
*status = MODE_OK;
@@ -1338,6 +1375,12 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
return PTR_ERR(hdmi->clk);
}
+ hdmi->rst = devm_reset_control_get(&pdev->dev, "hdmi");
+ if (IS_ERR(hdmi->rst)) {
+ dev_err(&pdev->dev, "failed to get reset\n");
+ return PTR_ERR(hdmi->rst);
+ }
+
err = clk_prepare(hdmi->clk);
if (err < 0)
return err;
@@ -1375,9 +1418,6 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
return err;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs)
- return -ENXIO;
-
hdmi->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
diff --git a/drivers/gpu/drm/tegra/mipi-phy.c b/drivers/gpu/drm/tegra/mipi-phy.c
new file mode 100644
index 000000000000..e2c4aedaee78
--- /dev/null
+++ b/drivers/gpu/drm/tegra/mipi-phy.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+
+#include "mipi-phy.h"
+
+/*
+ * Default D-PHY timings based on MIPI D-PHY specification. Derived from
+ * the valid ranges specified in Section 5.9 of the D-PHY specification
+ * with minor adjustments.
+ */
+int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
+ unsigned long period)
+{
+ timing->clkmiss = 0;
+ timing->clkpost = 70 + 52 * period;
+ timing->clkpre = 8;
+ timing->clkprepare = 65;
+ timing->clksettle = 95;
+ timing->clktermen = 0;
+ timing->clktrail = 80;
+ timing->clkzero = 260;
+ timing->dtermen = 0;
+ timing->eot = 0;
+ timing->hsexit = 120;
+ timing->hsprepare = 65 + 5 * period;
+ timing->hszero = 145 + 5 * period;
+ timing->hssettle = 85 + 6 * period;
+ timing->hsskip = 40;
+ timing->hstrail = max(8 * period, 60 + 4 * period);
+ timing->init = 100000;
+ timing->lpx = 60;
+ timing->taget = 5 * timing->lpx;
+ timing->tago = 4 * timing->lpx;
+ timing->tasure = 2 * timing->lpx;
+ timing->wakeup = 1000000;
+
+ return 0;
+}
+
+/*
+ * Validate D-PHY timing according to MIPI Alliance Specification for D-PHY,
+ * Section 5.9 "Global Operation Timing Parameters".
+ */
+int mipi_dphy_timing_validate(struct mipi_dphy_timing *timing,
+ unsigned long period)
+{
+ if (timing->clkmiss > 60)
+ return -EINVAL;
+
+ if (timing->clkpost < (60 + 52 * period))
+ return -EINVAL;
+
+ if (timing->clkpre < 8)
+ return -EINVAL;
+
+ if (timing->clkprepare < 38 || timing->clkprepare > 95)
+ return -EINVAL;
+
+ if (timing->clksettle < 95 || timing->clksettle > 300)
+ return -EINVAL;
+
+ if (timing->clktermen > 38)
+ return -EINVAL;
+
+ if (timing->clktrail < 60)
+ return -EINVAL;
+
+ if (timing->clkprepare + timing->clkzero < 300)
+ return -EINVAL;
+
+ if (timing->dtermen > 35 + 4 * period)
+ return -EINVAL;
+
+ if (timing->eot > 105 + 12 * period)
+ return -EINVAL;
+
+ if (timing->hsexit < 100)
+ return -EINVAL;
+
+ if (timing->hsprepare < 40 + 4 * period ||
+ timing->hsprepare > 85 + 6 * period)
+ return -EINVAL;
+
+ if (timing->hsprepare + timing->hszero < 145 + 10 * period)
+ return -EINVAL;
+
+ if ((timing->hssettle < 85 + 6 * period) ||
+ (timing->hssettle > 145 + 10 * period))
+ return -EINVAL;
+
+ if (timing->hsskip < 40 || timing->hsskip > 55 + 4 * period)
+ return -EINVAL;
+
+ if (timing->hstrail < max(8 * period, 60 + 4 * period))
+ return -EINVAL;
+
+ if (timing->init < 100000)
+ return -EINVAL;
+
+ if (timing->lpx < 50)
+ return -EINVAL;
+
+ if (timing->taget != 5 * timing->lpx)
+ return -EINVAL;
+
+ if (timing->tago != 4 * timing->lpx)
+ return -EINVAL;
+
+ if (timing->tasure < timing->lpx || timing->tasure > 2 * timing->lpx)
+ return -EINVAL;
+
+ if (timing->wakeup < 1000000)
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tegra/mipi-phy.h b/drivers/gpu/drm/tegra/mipi-phy.h
new file mode 100644
index 000000000000..d3591694432d
--- /dev/null
+++ b/drivers/gpu/drm/tegra/mipi-phy.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef DRM_TEGRA_MIPI_PHY_H
+#define DRM_TEGRA_MIPI_PHY_H
+
+/*
+ * D-PHY timing parameters
+ *
+ * A detailed description of these parameters can be found in the MIPI
+ * Alliance Specification for D-PHY, Section 5.9 "Global Operation Timing
+ * Parameters".
+ *
+ * All parameters are specified in nanoseconds.
+ */
+struct mipi_dphy_timing {
+ unsigned int clkmiss;
+ unsigned int clkpost;
+ unsigned int clkpre;
+ unsigned int clkprepare;
+ unsigned int clksettle;
+ unsigned int clktermen;
+ unsigned int clktrail;
+ unsigned int clkzero;
+ unsigned int dtermen;
+ unsigned int eot;
+ unsigned int hsexit;
+ unsigned int hsprepare;
+ unsigned int hszero;
+ unsigned int hssettle;
+ unsigned int hsskip;
+ unsigned int hstrail;
+ unsigned int init;
+ unsigned int lpx;
+ unsigned int taget;
+ unsigned int tago;
+ unsigned int tasure;
+ unsigned int wakeup;
+};
+
+int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
+ unsigned long period);
+int mipi_dphy_timing_validate(struct mipi_dphy_timing *timing,
+ unsigned long period);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 2cb0065e0578..57cecbd18ca8 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -9,6 +9,7 @@
#include <linux/of_gpio.h>
+#include <drm/drm_panel.h>
#include "drm.h"
static int tegra_connector_get_modes(struct drm_connector *connector)
@@ -17,6 +18,16 @@ static int tegra_connector_get_modes(struct drm_connector *connector)
struct edid *edid = NULL;
int err = 0;
+ /*
+ * If the panel provides one or more modes, use them exclusively and
+ * ignore any other means of obtaining a mode.
+ */
+ if (output->panel) {
+ err = output->panel->funcs->get_modes(output->panel);
+ if (err > 0)
+ return err;
+ }
+
if (output->edid)
edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL);
else if (output->ddc)
@@ -72,6 +83,11 @@ tegra_connector_detect(struct drm_connector *connector, bool force)
else
status = connector_status_connected;
} else {
+ if (!output->panel)
+ status = connector_status_disconnected;
+ else
+ status = connector_status_connected;
+
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
status = connector_status_connected;
}
@@ -115,6 +131,16 @@ static const struct drm_encoder_funcs encoder_funcs = {
static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct drm_panel *panel = output->panel;
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ drm_panel_disable(panel);
+ tegra_output_disable(output);
+ } else {
+ tegra_output_enable(output);
+ drm_panel_enable(panel);
+ }
}
static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder,
@@ -163,14 +189,22 @@ static irqreturn_t hpd_irq(int irq, void *data)
int tegra_output_probe(struct tegra_output *output)
{
+ struct device_node *ddc, *panel;
enum of_gpio_flags flags;
- struct device_node *ddc;
- size_t size;
- int err;
+ int err, size;
if (!output->of_node)
output->of_node = output->dev->of_node;
+ panel = of_parse_phandle(output->of_node, "nvidia,panel", 0);
+ if (panel) {
+ output->panel = of_drm_find_panel(panel);
+ if (!output->panel)
+ return -EPROBE_DEFER;
+
+ of_node_put(panel);
+ }
+
output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
@@ -185,9 +219,6 @@ int tegra_output_probe(struct tegra_output *output)
of_node_put(ddc);
}
- if (!output->edid && !output->ddc)
- return -ENODEV;
-
output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
"nvidia,hpd-gpio", 0,
&flags);
@@ -256,6 +287,11 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
encoder = DRM_MODE_ENCODER_TMDS;
break;
+ case TEGRA_OUTPUT_DSI:
+ connector = DRM_MODE_CONNECTOR_DSI;
+ encoder = DRM_MODE_ENCODER_DSI;
+ break;
+
default:
connector = DRM_MODE_CONNECTOR_Unknown;
encoder = DRM_MODE_ENCODER_NONE;
@@ -267,6 +303,9 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
drm_connector_helper_add(&output->connector, &connector_helper_funcs);
output->connector.dpms = DRM_MODE_DPMS_OFF;
+ if (output->panel)
+ drm_panel_attach(output->panel, &output->connector);
+
drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 3b29018913a5..338f7f6561d7 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -87,15 +87,60 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
static int tegra_output_rgb_enable(struct tegra_output *output)
{
struct tegra_rgb *rgb = to_rgb(output);
+ unsigned long value;
tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
+ value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
+ tegra_dc_writel(rgb->dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
+
+ /* XXX: parameterize? */
+ value = tegra_dc_readl(rgb->dc, DC_COM_PIN_OUTPUT_POLARITY(1));
+ value &= ~LVS_OUTPUT_POLARITY_LOW;
+ value &= ~LHS_OUTPUT_POLARITY_LOW;
+ tegra_dc_writel(rgb->dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
+
+ /* XXX: parameterize? */
+ value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB |
+ DISP_ORDER_RED_BLUE;
+ tegra_dc_writel(rgb->dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
+
+ /* XXX: parameterize? */
+ value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE;
+ tegra_dc_writel(rgb->dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS);
+
+ value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ value |= DISP_CTRL_MODE_C_DISPLAY;
+ tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+ tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
return 0;
}
static int tegra_output_rgb_disable(struct tegra_output *output)
{
struct tegra_rgb *rgb = to_rgb(output);
+ unsigned long value;
+
+ value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+ tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
@@ -213,7 +258,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
* RGB outputs are an exception, so we make sure they can be attached
* to only their parent display controller.
*/
- rgb->output.encoder.possible_crtcs = 1 << dc->pipe;
+ rgb->output.encoder.possible_crtcs = drm_crtc_mask(&dc->base);
return 0;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 116da199b942..171a8203892c 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -311,7 +311,7 @@ static void tilcdc_lastclose(struct drm_device *dev)
drm_fbdev_cma_restore_mode(priv->fbdev);
}
-static irqreturn_t tilcdc_irq(DRM_IRQ_ARGS)
+static irqreturn_t tilcdc_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
struct tilcdc_drm_private *priv = dev->dev_private;
@@ -444,7 +444,7 @@ static int tilcdc_mm_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- return drm_mm_dump_table(m, dev->mm_private);
+ return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
}
static struct drm_info_list tilcdc_debugfs_list[] = {
@@ -594,7 +594,7 @@ static int tilcdc_pdev_probe(struct platform_device *pdev)
static int tilcdc_pdev_remove(struct platform_device *pdev)
{
- drm_platform_exit(&tilcdc_driver, pdev);
+ drm_put_dev(platform_get_drvdata(pdev));
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 07e02c4bf5a8..a06651309388 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -957,7 +957,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_mem_space);
-int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
bool interruptible,
bool no_wait_gpu)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 15b86a94949d..1df856f78568 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -187,7 +187,7 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
}
}
-int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@@ -219,7 +219,7 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
return 0;
}
-void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void *virtual)
{
struct ttm_mem_type_manager *man;
@@ -353,7 +353,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
* Don't move nonexistent data. Clear destination instead.
*/
if (old_iomap == NULL &&
- (ttm == NULL || ttm->state == tt_unpopulated)) {
+ (ttm == NULL || (ttm->state == tt_unpopulated &&
+ !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
goto out2;
}
@@ -593,7 +594,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
if (start_page > bo->num_pages)
return -EINVAL;
#if 0
- if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
+ if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
return -EPERM;
#endif
(void) ttm_mem_io_lock(man, false);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index b249ab9b1eb2..801231c9ae48 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -132,6 +132,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
}
+ /*
+ * Refuse to fault imported pages. This should be handled
+ * (if at all) by redirecting mmap to the exporter.
+ */
+ if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ retval = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
if (bdev->driver->fault_reserve_notify) {
ret = bdev->driver->fault_reserve_notify(bo);
switch (ret) {
@@ -169,9 +178,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
- drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
- page_last = vma_pages(vma) +
- drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
+ vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
+ page_last = vma_pages(vma) + vma->vm_pgoff -
+ drm_vma_node_start(&bo->vma_node);
if (unlikely(page_offset >= bo->num_pages)) {
retval = VM_FAULT_SIGBUS;
@@ -217,10 +226,17 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} else if (unlikely(!page)) {
break;
}
+ page->mapping = vma->vm_file->f_mapping;
+ page->index = drm_vma_node_start(&bo->vma_node) +
+ page_offset;
pfn = page_to_pfn(page);
}
- ret = vm_insert_mixed(&cvma, address, pfn);
+ if (vma->vm_flags & VM_MIXEDMAP)
+ ret = vm_insert_mixed(&cvma, address, pfn);
+ else
+ ret = vm_insert_pfn(&cvma, address, pfn);
+
/*
* Somebody beat us to this PTE or prefaulting to
* an already populated PTE, or prefaulting error.
@@ -250,6 +266,8 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
struct ttm_buffer_object *bo =
(struct ttm_buffer_object *)vma->vm_private_data;
+ WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
+
(void)ttm_bo_reference(bo);
}
@@ -319,7 +337,14 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
*/
vma->vm_private_data = bo;
- vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+
+ /*
+ * PFNMAP is faster than MIXEDMAP due to reduced page
+ * administration. So use MIXEDMAP only if private VMA, where
+ * we need to support COW.
+ */
+ vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
out_unref:
ttm_bo_unref(&bo);
@@ -334,7 +359,8 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
vma->vm_ops = &ttm_bo_vm_ops;
vma->vm_private_data = ttm_bo_reference(bo);
- vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND;
return 0;
}
EXPORT_SYMBOL(ttm_fbdev_mmap);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index 3daa9a3930b8..6a954544727f 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -186,14 +186,6 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
}
EXPORT_SYMBOL(ttm_write_lock);
-void ttm_write_lock_downgrade(struct ttm_lock *lock)
-{
- spin_lock(&lock->lock);
- lock->rw = 1;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
-}
-
static int __ttm_vt_unlock(struct ttm_lock *lock)
{
int ret = 0;
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 6fe7b92a82d1..53b51c4e671a 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -68,7 +68,7 @@
struct ttm_object_file {
struct ttm_object_device *tdev;
- rwlock_t lock;
+ spinlock_t lock;
struct list_head ref_list;
struct drm_open_hash ref_hash[TTM_REF_NUM];
struct kref refcount;
@@ -118,6 +118,7 @@ struct ttm_object_device {
*/
struct ttm_ref_object {
+ struct rcu_head rcu_head;
struct drm_hash_item hash;
struct list_head head;
struct kref kref;
@@ -210,10 +211,9 @@ static void ttm_release_base(struct kref *kref)
* call_rcu() or ttm_base_object_kfree().
*/
- if (base->refcount_release) {
- ttm_object_file_unref(&base->tfile);
+ ttm_object_file_unref(&base->tfile);
+ if (base->refcount_release)
base->refcount_release(&base);
- }
}
void ttm_base_object_unref(struct ttm_base_object **p_base)
@@ -229,32 +229,46 @@ EXPORT_SYMBOL(ttm_base_object_unref);
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint32_t key)
{
- struct ttm_object_device *tdev = tfile->tdev;
- struct ttm_base_object *uninitialized_var(base);
+ struct ttm_base_object *base = NULL;
struct drm_hash_item *hash;
+ struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
int ret;
rcu_read_lock();
- ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
+ ret = drm_ht_find_item_rcu(ht, key, &hash);
if (likely(ret == 0)) {
- base = drm_hash_entry(hash, struct ttm_base_object, hash);
- ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
+ base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
+ if (!kref_get_unless_zero(&base->refcount))
+ base = NULL;
}
rcu_read_unlock();
- if (unlikely(ret != 0))
- return NULL;
+ return base;
+}
+EXPORT_SYMBOL(ttm_base_object_lookup);
- if (tfile != base->tfile && !base->shareable) {
- pr_err("Attempted access of non-shareable object\n");
- ttm_base_object_unref(&base);
- return NULL;
+struct ttm_base_object *
+ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
+{
+ struct ttm_base_object *base = NULL;
+ struct drm_hash_item *hash;
+ struct drm_open_hash *ht = &tdev->object_hash;
+ int ret;
+
+ rcu_read_lock();
+ ret = drm_ht_find_item_rcu(ht, key, &hash);
+
+ if (likely(ret == 0)) {
+ base = drm_hash_entry(hash, struct ttm_base_object, hash);
+ if (!kref_get_unless_zero(&base->refcount))
+ base = NULL;
}
+ rcu_read_unlock();
return base;
}
-EXPORT_SYMBOL(ttm_base_object_lookup);
+EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
@@ -266,21 +280,25 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
int ret = -EINVAL;
+ if (base->tfile != tfile && !base->shareable)
+ return -EPERM;
+
if (existed != NULL)
*existed = true;
while (ret == -EINVAL) {
- read_lock(&tfile->lock);
- ret = drm_ht_find_item(ht, base->hash.key, &hash);
+ rcu_read_lock();
+ ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
- kref_get(&ref->kref);
- read_unlock(&tfile->lock);
- break;
+ if (kref_get_unless_zero(&ref->kref)) {
+ rcu_read_unlock();
+ break;
+ }
}
- read_unlock(&tfile->lock);
+ rcu_read_unlock();
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
false, false);
if (unlikely(ret != 0))
@@ -297,19 +315,19 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
ref->ref_type = ref_type;
kref_init(&ref->kref);
- write_lock(&tfile->lock);
- ret = drm_ht_insert_item(ht, &ref->hash);
+ spin_lock(&tfile->lock);
+ ret = drm_ht_insert_item_rcu(ht, &ref->hash);
if (likely(ret == 0)) {
list_add_tail(&ref->head, &tfile->ref_list);
kref_get(&base->refcount);
- write_unlock(&tfile->lock);
+ spin_unlock(&tfile->lock);
if (existed != NULL)
*existed = false;
break;
}
- write_unlock(&tfile->lock);
+ spin_unlock(&tfile->lock);
BUG_ON(ret != -EINVAL);
ttm_mem_global_free(mem_glob, sizeof(*ref));
@@ -330,17 +348,17 @@ static void ttm_ref_object_release(struct kref *kref)
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
ht = &tfile->ref_hash[ref->ref_type];
- (void)drm_ht_remove_item(ht, &ref->hash);
+ (void)drm_ht_remove_item_rcu(ht, &ref->hash);
list_del(&ref->head);
- write_unlock(&tfile->lock);
+ spin_unlock(&tfile->lock);
if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
base->ref_obj_release(base, ref->ref_type);
ttm_base_object_unref(&ref->obj);
ttm_mem_global_free(mem_glob, sizeof(*ref));
- kfree(ref);
- write_lock(&tfile->lock);
+ kfree_rcu(ref, rcu_head);
+ spin_lock(&tfile->lock);
}
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
@@ -351,15 +369,15 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
struct drm_hash_item *hash;
int ret;
- write_lock(&tfile->lock);
+ spin_lock(&tfile->lock);
ret = drm_ht_find_item(ht, key, &hash);
if (unlikely(ret != 0)) {
- write_unlock(&tfile->lock);
+ spin_unlock(&tfile->lock);
return -EINVAL;
}
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
kref_put(&ref->kref, ttm_ref_object_release);
- write_unlock(&tfile->lock);
+ spin_unlock(&tfile->lock);
return 0;
}
EXPORT_SYMBOL(ttm_ref_object_base_unref);
@@ -372,7 +390,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
struct ttm_object_file *tfile = *p_tfile;
*p_tfile = NULL;
- write_lock(&tfile->lock);
+ spin_lock(&tfile->lock);
/*
* Since we release the lock within the loop, we have to
@@ -388,7 +406,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
for (i = 0; i < TTM_REF_NUM; ++i)
drm_ht_remove(&tfile->ref_hash[i]);
- write_unlock(&tfile->lock);
+ spin_unlock(&tfile->lock);
ttm_object_file_unref(&tfile);
}
EXPORT_SYMBOL(ttm_object_file_release);
@@ -404,7 +422,7 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
if (unlikely(tfile == NULL))
return NULL;
- rwlock_init(&tfile->lock);
+ spin_lock_init(&tfile->lock);
tfile->tdev = tdev;
kref_init(&tfile->refcount);
INIT_LIST_HEAD(&tfile->ref_list);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 210d50365162..75f319090043 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -170,9 +170,8 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
ttm_tt_unbind(ttm);
}
- if (ttm->state == tt_unbound) {
- ttm->bdev->driver->ttm_tt_unpopulate(ttm);
- }
+ if (ttm->state == tt_unbound)
+ ttm_tt_unpopulate(ttm);
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
ttm->swap_storage)
@@ -362,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
page_cache_release(to_page);
}
- ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+ ttm_tt_unpopulate(ttm);
ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
if (persistent_swap_storage)
@@ -375,3 +374,26 @@ out_err:
return ret;
}
+
+static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
+{
+ pgoff_t i;
+ struct page **page = ttm->pages;
+
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG)
+ return;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ (*page)->mapping = NULL;
+ (*page++)->index = 0;
+ }
+}
+
+void ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ if (ttm->state == tt_unpopulated)
+ return;
+
+ ttm_tt_clear_mapping(ttm);
+ ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 97e9d614700f..dbadd49e4c4a 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -403,15 +403,17 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
int i;
int ret = 0;
+ drm_modeset_lock_all(fb->dev);
+
if (!ufb->active_16)
- return 0;
+ goto unlock;
if (ufb->obj->base.import_attach) {
ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
0, ufb->obj->base.size,
DMA_FROM_DEVICE);
if (ret)
- return ret;
+ goto unlock;
}
for (i = 0; i < num_clips; i++) {
@@ -419,7 +421,7 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
clips[i].x2 - clips[i].x1,
clips[i].y2 - clips[i].y1);
if (ret)
- break;
+ goto unlock;
}
if (ufb->obj->base.import_attach) {
@@ -427,6 +429,10 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
0, ufb->obj->base.size,
DMA_FROM_DEVICE);
}
+
+ unlock:
+ drm_modeset_unlock_all(fb->dev);
+
return ret;
}
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 652f9b43ec9d..a18479c6b6da 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -60,7 +60,7 @@
dev_priv->dma_low += 8; \
}
-#define via_flush_write_combine() DRM_MEMORYBARRIER()
+#define via_flush_write_combine() mb()
#define VIA_OUT_RING_QW(w1, w2) do { \
*vb++ = (w1); \
@@ -234,13 +234,13 @@ static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *fil
switch (init->func) {
case VIA_INIT_DMA:
- if (!DRM_SUSER(DRM_CURPROC))
+ if (!capable(CAP_SYS_ADMIN))
retcode = -EPERM;
else
retcode = via_initialize(dev, dev_priv, init);
break;
case VIA_CLEANUP_DMA:
- if (!DRM_SUSER(DRM_CURPROC))
+ if (!capable(CAP_SYS_ADMIN))
retcode = -EPERM;
else
retcode = via_dma_cleanup(dev);
@@ -273,7 +273,7 @@ static int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *c
if (cmd->size > VIA_PCI_BUF_SIZE)
return -ENOMEM;
- if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
+ if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
return -EFAULT;
/*
@@ -346,7 +346,7 @@ static int via_dispatch_pci_cmdbuffer(struct drm_device *dev,
if (cmd->size > VIA_PCI_BUF_SIZE)
return -ENOMEM;
- if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
+ if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
return -EFAULT;
if ((ret =
@@ -543,7 +543,7 @@ static void via_cmdbuf_start(drm_via_private_t *dev_priv)
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
- DRM_WRITEMEMORYBARRIER();
+ wmb();
VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
VIA_READ(VIA_REG_TRANSPACE);
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 8b0f25904e6d..ba33cf679180 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -217,7 +217,7 @@ via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
- DRM_WRITEMEMORYBARRIER();
+ wmb();
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
}
@@ -338,7 +338,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
blitq->blits[cur]->aborted = blitq->aborting;
blitq->done_blit_handle++;
- DRM_WAKEUP(blitq->blit_queue + cur);
+ wake_up(blitq->blit_queue + cur);
cur++;
if (cur >= VIA_NUM_BLIT_SLOTS)
@@ -363,7 +363,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
via_abort_dmablit(dev, engine);
blitq->aborting = 1;
- blitq->end = jiffies + DRM_HZ;
+ blitq->end = jiffies + HZ;
}
if (!blitq->is_active) {
@@ -372,7 +372,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
blitq->is_active = 1;
blitq->cur = cur;
blitq->num_outstanding--;
- blitq->end = jiffies + DRM_HZ;
+ blitq->end = jiffies + HZ;
if (!timer_pending(&blitq->poll_timer))
mod_timer(&blitq->poll_timer, jiffies + 1);
} else {
@@ -436,7 +436,7 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
int ret = 0;
if (via_dmablit_active(blitq, engine, handle, &queue)) {
- DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, *queue, 3 * HZ,
!via_dmablit_active(blitq, engine, handle, NULL));
}
DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
@@ -521,7 +521,7 @@ via_dmablit_workqueue(struct work_struct *work)
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
- DRM_WAKEUP(&blitq->busy_queue);
+ wake_up(&blitq->busy_queue);
via_free_sg_info(dev->pdev, cur_sg);
kfree(cur_sg);
@@ -561,8 +561,8 @@ via_init_dmablit(struct drm_device *dev)
blitq->aborting = 0;
spin_lock_init(&blitq->blit_lock);
for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
- DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
- DRM_INIT_WAITQUEUE(&blitq->busy_queue);
+ init_waitqueue_head(blitq->blit_queue + j);
+ init_waitqueue_head(&blitq->busy_queue);
INIT_WORK(&blitq->wq, via_dmablit_workqueue);
setup_timer(&blitq->poll_timer, via_dmablit_timer,
(unsigned long)blitq);
@@ -688,7 +688,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
while (blitq->num_free == 0) {
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
- DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
+ DRM_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
if (ret)
return (-EINTR == ret) ? -EAGAIN : ret;
@@ -713,7 +713,7 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)
spin_lock_irqsave(&blitq->blit_lock, irqsave);
blitq->num_free++;
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
- DRM_WAKEUP(&blitq->busy_queue);
+ wake_up(&blitq->busy_queue);
}
/*
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index 92684a9b7e34..50abc2adfaee 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -46,7 +46,7 @@ static int via_driver_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
+static void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct via_file_private *file_priv = file->driver_priv;
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index a811ef2b505f..ad0273256beb 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -138,7 +138,7 @@ extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
extern int via_enable_vblank(struct drm_device *dev, int crtc);
extern void via_disable_vblank(struct drm_device *dev, int crtc);
-extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t via_driver_irq_handler(int irq, void *arg);
extern void via_driver_irq_preinstall(struct drm_device *dev);
extern int via_driver_irq_postinstall(struct drm_device *dev);
extern void via_driver_irq_uninstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index ac98964297cf..1319433816d3 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -104,7 +104,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
return atomic_read(&dev_priv->vbl_received);
}
-irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t via_driver_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
for (i = 0; i < dev_priv->num_irqs; ++i) {
if (status & cur_irq->pending_mask) {
atomic_inc(&cur_irq->irq_received);
- DRM_WAKEUP(&cur_irq->irq_queue);
+ wake_up(&cur_irq->irq_queue);
handled = 1;
if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
via_dmablit_handler(dev, 0, 1);
@@ -239,12 +239,12 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
cur_irq = dev_priv->via_irqs + real_irq;
if (masks[real_irq][2] && !force_sequence) {
- DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
masks[irq][4]));
cur_irq_sequence = atomic_read(&cur_irq->irq_received);
} else {
- DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
(((cur_irq_sequence =
atomic_read(&cur_irq->irq_received)) -
*sequence) <= (1 << 23)));
@@ -287,7 +287,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
atomic_set(&cur_irq->irq_received, 0);
cur_irq->enable_mask = dev_priv->irq_masks[i][0];
cur_irq->pending_mask = dev_priv->irq_masks[i][1];
- DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
+ init_waitqueue_head(&cur_irq->irq_queue);
dev_priv->irq_enable_mask |= cur_irq->enable_mask;
dev_priv->irq_pending_mask |= cur_irq->pending_mask;
cur_irq++;
diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c
index 6569efa2ff6e..a9ffbad1cfdd 100644
--- a/drivers/gpu/drm/via/via_video.c
+++ b/drivers/gpu/drm/via/via_video.c
@@ -36,7 +36,7 @@ void via_init_futex(drm_via_private_t *dev_priv)
DRM_DEBUG("\n");
for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
- DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i]));
+ init_waitqueue_head(&(dev_priv->decoder_queue[i]));
XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
}
}
@@ -58,7 +58,7 @@ void via_release_futex(drm_via_private_t *dev_priv, int context)
if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
if (_DRM_LOCK_IS_HELD(*lock)
&& (*lock & _DRM_LOCK_CONT)) {
- DRM_WAKEUP(&(dev_priv->decoder_queue[i]));
+ wake_up(&(dev_priv->decoder_queue[i]));
}
*lock = 0;
}
@@ -83,10 +83,10 @@ int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_
switch (fx->func) {
case VIA_FUTEX_WAIT:
DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
- (fx->ms / 10) * (DRM_HZ / 100), *lock != fx->val);
+ (fx->ms / 10) * (HZ / 100), *lock != fx->val);
return ret;
case VIA_FUTEX_WAKE:
- DRM_WAKEUP(&(dev_priv->decoder_queue[fx->lock]));
+ wake_up(&(dev_priv->decoder_queue[fx->lock]));
return 0;
}
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 9f8b690bcf52..458cdf6d81e8 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
- vmwgfx_surface.o vmwgfx_prime.o
+ vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index d0e085ee8249..b645647b7776 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -34,6 +34,8 @@
#include "svga_reg.h"
+typedef uint32 PPN;
+typedef __le64 PPN64;
/*
* 3D Hardware Version
@@ -71,6 +73,9 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
#define SVGA3D_MAX_CONTEXT_IDS 256
#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
+#define SVGA3D_NUM_TEXTURE_UNITS 32
+#define SVGA3D_NUM_LIGHTS 8
+
/*
* Surface formats.
*
@@ -81,6 +86,7 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
*/
typedef enum SVGA3dSurfaceFormat {
+ SVGA3D_FORMAT_MIN = 0,
SVGA3D_FORMAT_INVALID = 0,
SVGA3D_X8R8G8B8 = 1,
@@ -134,12 +140,6 @@ typedef enum SVGA3dSurfaceFormat {
SVGA3D_RG_S10E5 = 35,
SVGA3D_RG_S23E8 = 36,
- /*
- * Any surface can be used as a buffer object, but SVGA3D_BUFFER is
- * the most efficient format to use when creating new surfaces
- * expressly for index or vertex data.
- */
-
SVGA3D_BUFFER = 37,
SVGA3D_Z_D24X8 = 38,
@@ -159,15 +159,114 @@ typedef enum SVGA3dSurfaceFormat {
/* Video format with alpha */
SVGA3D_AYUV = 45,
+ SVGA3D_R32G32B32A32_TYPELESS = 46,
+ SVGA3D_R32G32B32A32_FLOAT = 25,
+ SVGA3D_R32G32B32A32_UINT = 47,
+ SVGA3D_R32G32B32A32_SINT = 48,
+ SVGA3D_R32G32B32_TYPELESS = 49,
+ SVGA3D_R32G32B32_FLOAT = 50,
+ SVGA3D_R32G32B32_UINT = 51,
+ SVGA3D_R32G32B32_SINT = 52,
+ SVGA3D_R16G16B16A16_TYPELESS = 53,
+ SVGA3D_R16G16B16A16_FLOAT = 24,
+ SVGA3D_R16G16B16A16_UNORM = 41,
+ SVGA3D_R16G16B16A16_UINT = 54,
+ SVGA3D_R16G16B16A16_SNORM = 55,
+ SVGA3D_R16G16B16A16_SINT = 56,
+ SVGA3D_R32G32_TYPELESS = 57,
+ SVGA3D_R32G32_FLOAT = 36,
+ SVGA3D_R32G32_UINT = 58,
+ SVGA3D_R32G32_SINT = 59,
+ SVGA3D_R32G8X24_TYPELESS = 60,
+ SVGA3D_D32_FLOAT_S8X24_UINT = 61,
+ SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62,
+ SVGA3D_X32_TYPELESS_G8X24_UINT = 63,
+ SVGA3D_R10G10B10A2_TYPELESS = 64,
+ SVGA3D_R10G10B10A2_UNORM = 26,
+ SVGA3D_R10G10B10A2_UINT = 65,
+ SVGA3D_R11G11B10_FLOAT = 66,
+ SVGA3D_R8G8B8A8_TYPELESS = 67,
+ SVGA3D_R8G8B8A8_UNORM = 68,
+ SVGA3D_R8G8B8A8_UNORM_SRGB = 69,
+ SVGA3D_R8G8B8A8_UINT = 70,
+ SVGA3D_R8G8B8A8_SNORM = 28,
+ SVGA3D_R8G8B8A8_SINT = 71,
+ SVGA3D_R16G16_TYPELESS = 72,
+ SVGA3D_R16G16_FLOAT = 35,
+ SVGA3D_R16G16_UNORM = 40,
+ SVGA3D_R16G16_UINT = 73,
+ SVGA3D_R16G16_SNORM = 39,
+ SVGA3D_R16G16_SINT = 74,
+ SVGA3D_R32_TYPELESS = 75,
+ SVGA3D_D32_FLOAT = 76,
+ SVGA3D_R32_FLOAT = 34,
+ SVGA3D_R32_UINT = 77,
+ SVGA3D_R32_SINT = 78,
+ SVGA3D_R24G8_TYPELESS = 79,
+ SVGA3D_D24_UNORM_S8_UINT = 80,
+ SVGA3D_R24_UNORM_X8_TYPELESS = 81,
+ SVGA3D_X24_TYPELESS_G8_UINT = 82,
+ SVGA3D_R8G8_TYPELESS = 83,
+ SVGA3D_R8G8_UNORM = 84,
+ SVGA3D_R8G8_UINT = 85,
+ SVGA3D_R8G8_SNORM = 27,
+ SVGA3D_R8G8_SINT = 86,
+ SVGA3D_R16_TYPELESS = 87,
+ SVGA3D_R16_FLOAT = 33,
+ SVGA3D_D16_UNORM = 8,
+ SVGA3D_R16_UNORM = 88,
+ SVGA3D_R16_UINT = 89,
+ SVGA3D_R16_SNORM = 90,
+ SVGA3D_R16_SINT = 91,
+ SVGA3D_R8_TYPELESS = 92,
+ SVGA3D_R8_UNORM = 93,
+ SVGA3D_R8_UINT = 94,
+ SVGA3D_R8_SNORM = 95,
+ SVGA3D_R8_SINT = 96,
+ SVGA3D_A8_UNORM = 32,
+ SVGA3D_R1_UNORM = 97,
+ SVGA3D_R9G9B9E5_SHAREDEXP = 98,
+ SVGA3D_R8G8_B8G8_UNORM = 99,
+ SVGA3D_G8R8_G8B8_UNORM = 100,
+ SVGA3D_BC1_TYPELESS = 101,
+ SVGA3D_BC1_UNORM = 15,
+ SVGA3D_BC1_UNORM_SRGB = 102,
+ SVGA3D_BC2_TYPELESS = 103,
+ SVGA3D_BC2_UNORM = 17,
+ SVGA3D_BC2_UNORM_SRGB = 104,
+ SVGA3D_BC3_TYPELESS = 105,
+ SVGA3D_BC3_UNORM = 19,
+ SVGA3D_BC3_UNORM_SRGB = 106,
+ SVGA3D_BC4_TYPELESS = 107,
SVGA3D_BC4_UNORM = 108,
+ SVGA3D_BC4_SNORM = 109,
+ SVGA3D_BC5_TYPELESS = 110,
SVGA3D_BC5_UNORM = 111,
+ SVGA3D_BC5_SNORM = 112,
+ SVGA3D_B5G6R5_UNORM = 3,
+ SVGA3D_B5G5R5A1_UNORM = 5,
+ SVGA3D_B8G8R8A8_UNORM = 2,
+ SVGA3D_B8G8R8X8_UNORM = 1,
+ SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113,
+ SVGA3D_B8G8R8A8_TYPELESS = 114,
+ SVGA3D_B8G8R8A8_UNORM_SRGB = 115,
+ SVGA3D_B8G8R8X8_TYPELESS = 116,
+ SVGA3D_B8G8R8X8_UNORM_SRGB = 117,
/* Advanced D3D9 depth formats. */
SVGA3D_Z_DF16 = 118,
SVGA3D_Z_DF24 = 119,
SVGA3D_Z_D24S8_INT = 120,
- SVGA3D_FORMAT_MAX
+ /* Planar video formats. */
+ SVGA3D_YV12 = 121,
+
+ /* Shader constant formats. */
+ SVGA3D_SURFACE_SHADERCONST_FLOAT = 122,
+ SVGA3D_SURFACE_SHADERCONST_INT = 123,
+ SVGA3D_SURFACE_SHADERCONST_BOOL = 124,
+
+ SVGA3D_FORMAT_MAX = 125,
} SVGA3dSurfaceFormat;
typedef uint32 SVGA3dColor; /* a, r, g, b */
@@ -957,15 +1056,21 @@ typedef enum {
} SVGA3dCubeFace;
typedef enum {
+ SVGA3D_SHADERTYPE_INVALID = 0,
+ SVGA3D_SHADERTYPE_MIN = 1,
SVGA3D_SHADERTYPE_VS = 1,
SVGA3D_SHADERTYPE_PS = 2,
- SVGA3D_SHADERTYPE_MAX
+ SVGA3D_SHADERTYPE_MAX = 3,
+ SVGA3D_SHADERTYPE_GS = 3,
} SVGA3dShaderType;
+#define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
+
typedef enum {
SVGA3D_CONST_TYPE_FLOAT = 0,
SVGA3D_CONST_TYPE_INT = 1,
SVGA3D_CONST_TYPE_BOOL = 2,
+ SVGA3D_CONST_TYPE_MAX
} SVGA3dShaderConstType;
#define SVGA3D_MAX_SURFACE_FACES 6
@@ -1056,9 +1161,74 @@ typedef enum {
#define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31
#define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40
#define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41
-#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 42
-
-#define SVGA_3D_CMD_FUTURE_MAX 2000
+#define SVGA_3D_CMD_SCREEN_DMA 1082
+#define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083
+#define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE 1084
+
+#define SVGA_3D_CMD_LOGICOPS_BITBLT 1085
+#define SVGA_3D_CMD_LOGICOPS_TRANSBLT 1086
+#define SVGA_3D_CMD_LOGICOPS_STRETCHBLT 1087
+#define SVGA_3D_CMD_LOGICOPS_COLORFILL 1088
+#define SVGA_3D_CMD_LOGICOPS_ALPHABLEND 1089
+#define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND 1090
+
+#define SVGA_3D_CMD_SET_OTABLE_BASE 1091
+#define SVGA_3D_CMD_READBACK_OTABLE 1092
+
+#define SVGA_3D_CMD_DEFINE_GB_MOB 1093
+#define SVGA_3D_CMD_DESTROY_GB_MOB 1094
+#define SVGA_3D_CMD_REDEFINE_GB_MOB 1095
+#define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING 1096
+
+#define SVGA_3D_CMD_DEFINE_GB_SURFACE 1097
+#define SVGA_3D_CMD_DESTROY_GB_SURFACE 1098
+#define SVGA_3D_CMD_BIND_GB_SURFACE 1099
+#define SVGA_3D_CMD_COND_BIND_GB_SURFACE 1100
+#define SVGA_3D_CMD_UPDATE_GB_IMAGE 1101
+#define SVGA_3D_CMD_UPDATE_GB_SURFACE 1102
+#define SVGA_3D_CMD_READBACK_GB_IMAGE 1103
+#define SVGA_3D_CMD_READBACK_GB_SURFACE 1104
+#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1105
+#define SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1106
+
+#define SVGA_3D_CMD_DEFINE_GB_CONTEXT 1107
+#define SVGA_3D_CMD_DESTROY_GB_CONTEXT 1108
+#define SVGA_3D_CMD_BIND_GB_CONTEXT 1109
+#define SVGA_3D_CMD_READBACK_GB_CONTEXT 1110
+#define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT 1111
+
+#define SVGA_3D_CMD_DEFINE_GB_SHADER 1112
+#define SVGA_3D_CMD_DESTROY_GB_SHADER 1113
+#define SVGA_3D_CMD_BIND_GB_SHADER 1114
+
+#define SVGA_3D_CMD_SET_OTABLE_BASE64 1115
+
+#define SVGA_3D_CMD_BEGIN_GB_QUERY 1116
+#define SVGA_3D_CMD_END_GB_QUERY 1117
+#define SVGA_3D_CMD_WAIT_FOR_GB_QUERY 1118
+
+#define SVGA_3D_CMD_NOP 1119
+
+#define SVGA_3D_CMD_ENABLE_GART 1120
+#define SVGA_3D_CMD_DISABLE_GART 1121
+#define SVGA_3D_CMD_MAP_MOB_INTO_GART 1122
+#define SVGA_3D_CMD_UNMAP_GART_RANGE 1123
+
+#define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET 1124
+#define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET 1125
+#define SVGA_3D_CMD_BIND_GB_SCREENTARGET 1126
+#define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET 1127
+
+#define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL 1128
+#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
+
+#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130
+
+#define SVGA_3D_CMD_DEFINE_GB_MOB64 1135
+#define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136
+
+#define SVGA_3D_CMD_MAX 1142
+#define SVGA_3D_CMD_FUTURE_MAX 3000
/*
* Common substructures used in multiple FIFO commands:
@@ -1750,6 +1920,495 @@ struct {
/*
+ * Guest-backed surface definitions.
+ */
+
+typedef uint32 SVGAMobId;
+
+typedef enum SVGAMobFormat {
+ SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID,
+ SVGA3D_MOBFMT_PTDEPTH_0 = 0,
+ SVGA3D_MOBFMT_PTDEPTH_1 = 1,
+ SVGA3D_MOBFMT_PTDEPTH_2 = 2,
+ SVGA3D_MOBFMT_RANGE = 3,
+ SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
+ SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
+ SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
+ SVGA3D_MOBFMT_MAX,
+} SVGAMobFormat;
+
+/*
+ * Sizes of opaque types.
+ */
+
+#define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16
+#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8
+#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64
+#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16
+#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64
+#define SVGA3D_CONTEXT_DATA_SIZE 16384
+
+/*
+ * SVGA3dCmdSetOTableBase --
+ *
+ * This command allows the guest to specify the base PPN of the
+ * specified object table.
+ */
+
+typedef enum {
+ SVGA_OTABLE_MOB = 0,
+ SVGA_OTABLE_MIN = 0,
+ SVGA_OTABLE_SURFACE = 1,
+ SVGA_OTABLE_CONTEXT = 2,
+ SVGA_OTABLE_SHADER = 3,
+ SVGA_OTABLE_SCREEN_TARGET = 4,
+ SVGA_OTABLE_DX9_MAX = 5,
+ SVGA_OTABLE_MAX = 8
+} SVGAOTableType;
+
+typedef
+struct {
+ SVGAOTableType type;
+ PPN baseAddress;
+ uint32 sizeInBytes;
+ uint32 validSizeInBytes;
+ SVGAMobFormat ptDepth;
+}
+__attribute__((__packed__))
+SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
+
+typedef
+struct {
+ SVGAOTableType type;
+ PPN64 baseAddress;
+ uint32 sizeInBytes;
+ uint32 validSizeInBytes;
+ SVGAMobFormat ptDepth;
+}
+__attribute__((__packed__))
+SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
+
+typedef
+struct {
+ SVGAOTableType type;
+}
+__attribute__((__packed__))
+SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
+
+/*
+ * Define a memory object (Mob) in the OTable.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBMob {
+ SVGAMobId mobid;
+ SVGAMobFormat ptDepth;
+ PPN base;
+ uint32 sizeInBytes;
+}
+__attribute__((__packed__))
+SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
+
+
+/*
+ * Destroys an object in the OTable.
+ */
+
+typedef
+struct SVGA3dCmdDestroyGBMob {
+ SVGAMobId mobid;
+}
+__attribute__((__packed__))
+SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
+
+/*
+ * Redefine an object in the OTable.
+ */
+
+typedef
+struct SVGA3dCmdRedefineGBMob {
+ SVGAMobId mobid;
+ SVGAMobFormat ptDepth;
+ PPN base;
+ uint32 sizeInBytes;
+}
+__attribute__((__packed__))
+SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */
+
+/*
+ * Define a memory object (Mob) in the OTable with a PPN64 base.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBMob64 {
+ SVGAMobId mobid;
+ SVGAMobFormat ptDepth;
+ PPN64 base;
+ uint32 sizeInBytes;
+}
+__attribute__((__packed__))
+SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
+
+/*
+ * Redefine an object in the OTable with PPN64 base.
+ */
+
+typedef
+struct SVGA3dCmdRedefineGBMob64 {
+ SVGAMobId mobid;
+ SVGAMobFormat ptDepth;
+ PPN64 base;
+ uint32 sizeInBytes;
+}
+__attribute__((__packed__))
+SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
+
+/*
+ * Notification that the page tables have been modified.
+ */
+
+typedef
+struct SVGA3dCmdUpdateGBMobMapping {
+ SVGAMobId mobid;
+}
+__attribute__((__packed__))
+SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
+
+/*
+ * Define a guest-backed surface.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBSurface {
+ uint32 sid;
+ SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ uint32 numMipLevels;
+ uint32 multisampleCount;
+ SVGA3dTextureFilter autogenFilter;
+ SVGA3dSize size;
+} SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
+
+/*
+ * Destroy a guest-backed surface.
+ */
+
+typedef
+struct SVGA3dCmdDestroyGBSurface {
+ uint32 sid;
+} SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
+
+/*
+ * Bind a guest-backed surface to an object.
+ */
+
+typedef
+struct SVGA3dCmdBindGBSurface {
+ uint32 sid;
+ SVGAMobId mobid;
+} SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
+
+/*
+ * Conditionally bind a mob to a guest backed surface if testMobid
+ * matches the currently bound mob. Optionally issue a readback on
+ * the surface while it is still bound to the old mobid if the mobid
+ * is changed by this command.
+ */
+
+#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
+
+typedef
+struct{
+ uint32 sid;
+ SVGAMobId testMobid;
+ SVGAMobId mobid;
+ uint32 flags;
+}
+SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
+
+/*
+ * Update an image in a guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+struct SVGA3dCmdUpdateGBImage {
+ SVGA3dSurfaceImageId image;
+ SVGA3dBox box;
+} SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
+
+/*
+ * Update an entire guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+struct SVGA3dCmdUpdateGBSurface {
+ uint32 sid;
+} SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
+
+/*
+ * Readback an image in a guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBImage {
+ SVGA3dSurfaceImageId image;
+} SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
+
+/*
+ * Readback an entire guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBSurface {
+ uint32 sid;
+} SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
+
+/*
+ * Readback a sub rect of an image in a guest-backed surface. After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBImagePartial {
+ SVGA3dSurfaceImageId image;
+ SVGA3dBox box;
+ uint32 invertBox;
+}
+SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
+
+/*
+ * Invalidate an image in a guest-backed surface.
+ * (Notify the device that the contents can be lost.)
+ */
+
+typedef
+struct SVGA3dCmdInvalidateGBImage {
+ SVGA3dSurfaceImageId image;
+} SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
+
+/*
+ * Invalidate an entire guest-backed surface.
+ * (Notify the device that the contents if all images can be lost.)
+ */
+
+typedef
+struct SVGA3dCmdInvalidateGBSurface {
+ uint32 sid;
+} SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
+
+/*
+ * Invalidate a sub rect of an image in a guest-backed surface. After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+struct SVGA3dCmdInvalidateGBImagePartial {
+ SVGA3dSurfaceImageId image;
+ SVGA3dBox box;
+ uint32 invertBox;
+}
+SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
+
+/*
+ * Define a guest-backed context.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBContext {
+ uint32 cid;
+} SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
+
+/*
+ * Destroy a guest-backed context.
+ */
+
+typedef
+struct SVGA3dCmdDestroyGBContext {
+ uint32 cid;
+} SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
+
+/*
+ * Bind a guest-backed context.
+ *
+ * validContents should be set to 0 for new contexts,
+ * and 1 if this is an old context which is getting paged
+ * back on to the device.
+ *
+ * For new contexts, it is recommended that the driver
+ * issue commands to initialize all interesting state
+ * prior to rendering.
+ */
+
+typedef
+struct SVGA3dCmdBindGBContext {
+ uint32 cid;
+ SVGAMobId mobid;
+ uint32 validContents;
+} SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
+
+/*
+ * Readback a guest-backed context.
+ * (Request that the device flush the contents back into guest memory.)
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBContext {
+ uint32 cid;
+} SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
+
+/*
+ * Invalidate a guest-backed context.
+ */
+typedef
+struct SVGA3dCmdInvalidateGBContext {
+ uint32 cid;
+} SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
+
+/*
+ * Define a guest-backed shader.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBShader {
+ uint32 shid;
+ SVGA3dShaderType type;
+ uint32 sizeInBytes;
+} SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
+
+/*
+ * Bind a guest-backed shader.
+ */
+
+typedef struct SVGA3dCmdBindGBShader {
+ uint32 shid;
+ SVGAMobId mobid;
+ uint32 offsetInBytes;
+} SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
+
+/*
+ * Destroy a guest-backed shader.
+ */
+
+typedef struct SVGA3dCmdDestroyGBShader {
+ uint32 shid;
+} SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 regStart;
+ SVGA3dShaderType shaderType;
+ SVGA3dShaderConstType constType;
+
+ /*
+ * Followed by a variable number of shader constants.
+ *
+ * Note that FLOAT and INT constants are 4-dwords in length, while
+ * BOOL constants are 1-dword in length.
+ */
+} SVGA3dCmdSetGBShaderConstInline;
+/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+} SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+ SVGAMobId mobid;
+ uint32 offset;
+} SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
+
+
+/*
+ * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
+ *
+ * The semantics of this command are identical to the
+ * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
+ * to a Mob instead of a GMR.
+ */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+ SVGAMobId mobid;
+ uint32 offset;
+} SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
+
+typedef
+struct {
+ SVGAMobId mobid;
+ uint32 fbOffset;
+ uint32 initalized;
+}
+SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
+
+typedef
+struct {
+ SVGAMobId mobid;
+ uint32 gartOffset;
+}
+SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
+
+
+typedef
+struct {
+ uint32 gartOffset;
+ uint32 numPages;
+}
+SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
+
+
+/*
+ * Screen Targets
+ */
+#define SVGA_STFLAG_PRIMARY (1 << 0)
+
+typedef
+struct {
+ uint32 stid;
+ uint32 width;
+ uint32 height;
+ int32 xRoot;
+ int32 yRoot;
+ uint32 flags;
+}
+SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
+
+typedef
+struct {
+ uint32 stid;
+}
+SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
+
+typedef
+struct {
+ uint32 stid;
+ SVGA3dSurfaceImageId image;
+}
+SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
+
+typedef
+struct {
+ uint32 stid;
+ SVGA3dBox box;
+}
+SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
+
+/*
* Capability query index.
*
* Notes:
@@ -1879,10 +2538,41 @@ typedef enum {
SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83,
/*
- * Don't add new caps into the previous section; the values in this
- * enumeration must not change. You can put new values right before
- * SVGA3D_DEVCAP_MAX.
+ * Deprecated.
+ */
+ SVGA3D_DEVCAP_VGPU10 = 84,
+
+ /*
+ * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
+ * ored together, one for every type of video decoding supported.
+ */
+ SVGA3D_DEVCAP_VIDEO_DECODE = 85,
+
+ /*
+ * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
+ * ored together, one for every type of video processing supported.
+ */
+ SVGA3D_DEVCAP_VIDEO_PROCESS = 86,
+
+ SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */
+ SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */
+ SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */
+ SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */
+
+ SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91,
+
+ /*
+ * Does the host support the SVGA logic ops commands?
+ */
+ SVGA3D_DEVCAP_LOGICOPS = 92,
+
+ /*
+ * What support does the host have for screen targets?
+ *
+ * See the SVGA3D_SCREENTARGET_CAP bits below.
*/
+ SVGA3D_DEVCAP_SCREENTARGETS = 93,
+
SVGA3D_DEVCAP_MAX /* This must be the last index. */
} SVGA3dDevCapIndex;
@@ -1893,4 +2583,28 @@ typedef union {
float f;
} SVGA3dDevCapResult;
+typedef enum {
+ SVGA3DCAPS_RECORD_UNKNOWN = 0,
+ SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
+ SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
+ SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
+} SVGA3dCapsRecordType;
+
+typedef
+struct SVGA3dCapsRecordHeader {
+ uint32 length;
+ SVGA3dCapsRecordType type;
+}
+SVGA3dCapsRecordHeader;
+
+typedef
+struct SVGA3dCapsRecord {
+ SVGA3dCapsRecordHeader header;
+ uint32 data[1];
+}
+SVGA3dCapsRecord;
+
+
+typedef uint32 SVGA3dCapPair[2];
+
#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
index 01f63cb49678..71defa4d2d75 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -169,7 +169,10 @@ enum {
SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */
SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
- SVGA_REG_TOP = 48, /* Must be 1 more than the last register */
+ SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */
+ SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */
+ SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
+ SVGA_REG_TOP = 53, /* Must be 1 more than the last register */
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
@@ -431,7 +434,10 @@ struct SVGASignedPoint {
#define SVGA_CAP_TRACES 0x00200000
#define SVGA_CAP_GMR2 0x00400000
#define SVGA_CAP_SCREEN_OBJECT_2 0x00800000
-
+#define SVGA_CAP_COMMAND_BUFFERS 0x01000000
+#define SVGA_CAP_DEAD1 0x02000000
+#define SVGA_CAP_CMD_BUFFERS_2 0x04000000
+#define SVGA_CAP_GBOBJECTS 0x08000000
/*
* FIFO register indices.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 0489c6152482..6327cfc36805 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -40,6 +40,10 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
TTM_PL_FLAG_CACHED;
+static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM |
+ TTM_PL_FLAG_CACHED |
+ TTM_PL_FLAG_NO_EVICT;
+
static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
TTM_PL_FLAG_CACHED;
@@ -47,6 +51,9 @@ static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR |
TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_NO_EVICT;
+static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB |
+ TTM_PL_FLAG_CACHED;
+
struct ttm_placement vmw_vram_placement = {
.fpfn = 0,
.lpfn = 0,
@@ -116,16 +123,26 @@ struct ttm_placement vmw_sys_placement = {
.busy_placement = &sys_placement_flags
};
+struct ttm_placement vmw_sys_ne_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .placement = &sys_ne_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &sys_ne_placement_flags
+};
+
static uint32_t evictable_placement_flags[] = {
TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
- VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+ VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
+ VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
};
struct ttm_placement vmw_evictable_placement = {
.fpfn = 0,
.lpfn = 0,
- .num_placement = 3,
+ .num_placement = 4,
.placement = evictable_placement_flags,
.num_busy_placement = 1,
.busy_placement = &sys_placement_flags
@@ -140,10 +157,21 @@ struct ttm_placement vmw_srf_placement = {
.busy_placement = gmr_vram_placement_flags
};
+struct ttm_placement vmw_mob_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .num_busy_placement = 1,
+ .placement = &mob_placement_flags,
+ .busy_placement = &mob_placement_flags
+};
+
struct vmw_ttm_tt {
struct ttm_dma_tt dma_ttm;
struct vmw_private *dev_priv;
int gmr_id;
+ struct vmw_mob *mob;
+ int mem_type;
struct sg_table sgt;
struct vmw_sg_table vsgt;
uint64_t sg_alloc_size;
@@ -244,6 +272,7 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
viter->dma_address = &__vmw_piter_dma_addr;
viter->page = &__vmw_piter_non_sg_page;
viter->addrs = vsgt->addrs;
+ viter->pages = vsgt->pages;
break;
case vmw_dma_map_populate:
case vmw_dma_map_bind:
@@ -424,6 +453,63 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
vmw_tt->mapped = false;
}
+
+/**
+ * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
+ * instead of a pointer to a struct vmw_ttm_backend as argument.
+ * Note that the buffer object must be either pinned or reserved before
+ * calling this function.
+ */
+int vmw_bo_map_dma(struct ttm_buffer_object *bo)
+{
+ struct vmw_ttm_tt *vmw_tt =
+ container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+ return vmw_ttm_map_dma(vmw_tt);
+}
+
+
+/**
+ * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
+ * instead of a pointer to a struct vmw_ttm_backend as argument.
+ */
+void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
+{
+ struct vmw_ttm_tt *vmw_tt =
+ container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+ vmw_ttm_unmap_dma(vmw_tt);
+}
+
+
+/**
+ * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
+ * TTM buffer object
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Returns a pointer to a struct vmw_sg_table object. The object should
+ * not be freed after use.
+ * Note that for the device addresses to be valid, the buffer object must
+ * either be reserved or pinned.
+ */
+const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
+{
+ struct vmw_ttm_tt *vmw_tt =
+ container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+ return &vmw_tt->vsgt;
+}
+
+
static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
struct vmw_ttm_tt *vmw_be =
@@ -435,9 +521,27 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
return ret;
vmw_be->gmr_id = bo_mem->start;
+ vmw_be->mem_type = bo_mem->mem_type;
+
+ switch (bo_mem->mem_type) {
+ case VMW_PL_GMR:
+ return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
+ ttm->num_pages, vmw_be->gmr_id);
+ case VMW_PL_MOB:
+ if (unlikely(vmw_be->mob == NULL)) {
+ vmw_be->mob =
+ vmw_mob_create(ttm->num_pages);
+ if (unlikely(vmw_be->mob == NULL))
+ return -ENOMEM;
+ }
- return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
- ttm->num_pages, vmw_be->gmr_id);
+ return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
+ &vmw_be->vsgt, ttm->num_pages,
+ vmw_be->gmr_id);
+ default:
+ BUG();
+ }
+ return 0;
}
static int vmw_ttm_unbind(struct ttm_tt *ttm)
@@ -445,7 +549,16 @@ static int vmw_ttm_unbind(struct ttm_tt *ttm)
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
- vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
+ switch (vmw_be->mem_type) {
+ case VMW_PL_GMR:
+ vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
+ break;
+ case VMW_PL_MOB:
+ vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
+ break;
+ default:
+ BUG();
+ }
if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
vmw_ttm_unmap_dma(vmw_be);
@@ -453,6 +566,7 @@ static int vmw_ttm_unbind(struct ttm_tt *ttm)
return 0;
}
+
static void vmw_ttm_destroy(struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be =
@@ -463,9 +577,14 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm)
ttm_dma_tt_fini(&vmw_be->dma_ttm);
else
ttm_tt_fini(ttm);
+
+ if (vmw_be->mob)
+ vmw_mob_destroy(vmw_be->mob);
+
kfree(vmw_be);
}
+
static int vmw_ttm_populate(struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_tt =
@@ -500,6 +619,12 @@ static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
struct vmw_private *dev_priv = vmw_tt->dev_priv;
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+
+ if (vmw_tt->mob) {
+ vmw_mob_destroy(vmw_tt->mob);
+ vmw_tt->mob = NULL;
+ }
+
vmw_ttm_unmap_dma(vmw_tt);
if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
size_t size =
@@ -517,7 +642,7 @@ static struct ttm_backend_func vmw_ttm_func = {
.destroy = vmw_ttm_destroy,
};
-struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
@@ -530,6 +655,7 @@ struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
+ vmw_be->mob = NULL;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags,
@@ -546,12 +672,12 @@ out_no_init:
return NULL;
}
-int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
}
-int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
switch (type) {
@@ -571,6 +697,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case VMW_PL_GMR:
+ case VMW_PL_MOB:
/*
* "Guest Memory Regions" is an aperture like feature with
* one slot per bo. There is an upper limit of the number of
@@ -589,7 +716,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
return 0;
}
-void vmw_evict_flags(struct ttm_buffer_object *bo,
+static void vmw_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
*placement = vmw_sys_placement;
@@ -618,6 +745,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
case VMW_PL_GMR:
+ case VMW_PL_MOB:
return 0;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -677,6 +805,38 @@ static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
VMW_FENCE_WAIT_TIMEOUT);
}
+/**
+ * vmw_move_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The truct ttm_mem_reg indicating to what memory
+ * region the move is taking place.
+ *
+ * Calls move_notify for all subsystems needing it.
+ * (currently only resources).
+ */
+static void vmw_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
+{
+ vmw_resource_move_notify(bo, mem);
+}
+
+
+/**
+ * vmw_swap_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to be swapped out.
+ */
+static void vmw_swap_notify(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ spin_lock(&bdev->fence_lock);
+ ttm_bo_wait(bo, false, false, false);
+ spin_unlock(&bdev->fence_lock);
+}
+
+
struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_create = &vmw_ttm_tt_create,
.ttm_tt_populate = &vmw_ttm_populate,
@@ -691,8 +851,8 @@ struct ttm_bo_driver vmw_bo_driver = {
.sync_obj_flush = vmw_sync_obj_flush,
.sync_obj_unref = vmw_sync_obj_unref,
.sync_obj_ref = vmw_sync_obj_ref,
- .move_notify = NULL,
- .swap_notify = NULL,
+ .move_notify = vmw_move_notify,
+ .swap_notify = vmw_swap_notify,
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 00ae0925aca8..9426c53fb483 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -32,12 +32,30 @@
struct vmw_user_context {
struct ttm_base_object base;
struct vmw_resource res;
+ struct vmw_ctx_binding_state cbs;
};
+
+
+typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
+
static void vmw_user_context_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_context_base_to_res(struct ttm_base_object *base);
+static int vmw_gb_context_create(struct vmw_resource *res);
+static int vmw_gb_context_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_gb_context_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_gb_context_destroy(struct vmw_resource *res);
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+ bool rebind);
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
+static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
+static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
static uint64_t vmw_user_context_size;
static const struct vmw_user_resource_conv user_context_conv = {
@@ -62,6 +80,23 @@ static const struct vmw_res_func vmw_legacy_context_func = {
.unbind = NULL
};
+static const struct vmw_res_func vmw_gb_context_func = {
+ .res_type = vmw_res_context,
+ .needs_backup = true,
+ .may_evict = true,
+ .type_name = "guest backed contexts",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_gb_context_create,
+ .destroy = vmw_gb_context_destroy,
+ .bind = vmw_gb_context_bind,
+ .unbind = vmw_gb_context_unbind
+};
+
+static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
+ [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
+ [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
+ [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
+
/**
* Context management:
*/
@@ -76,6 +111,20 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
} *cmd;
+ if (res->func->destroy == vmw_gb_context_destroy) {
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+ mutex_lock(&dev_priv->binding_mutex);
+ (void) vmw_context_binding_state_kill
+ (&container_of(res, struct vmw_user_context, res)->cbs);
+ (void) vmw_gb_context_destroy(res);
+ if (dev_priv->pinned_bo != NULL &&
+ !dev_priv->query_cid_valid)
+ __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
+ mutex_unlock(&dev_priv->binding_mutex);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+ return;
+ }
+
vmw_execbuf_release_pinned_bo(dev_priv);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
@@ -92,6 +141,33 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
vmw_3d_resource_dec(dev_priv, false);
}
+static int vmw_gb_context_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+ struct vmw_user_context *uctx =
+ container_of(res, struct vmw_user_context, res);
+
+ ret = vmw_resource_init(dev_priv, res, true,
+ res_free, &vmw_gb_context_func);
+ res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
+
+ if (unlikely(ret != 0)) {
+ if (res_free)
+ res_free(res);
+ else
+ kfree(res);
+ return ret;
+ }
+
+ memset(&uctx->cbs, 0, sizeof(uctx->cbs));
+ INIT_LIST_HEAD(&uctx->cbs.list);
+
+ vmw_resource_activate(res, vmw_hw_context_destroy);
+ return 0;
+}
+
static int vmw_context_init(struct vmw_private *dev_priv,
struct vmw_resource *res,
void (*res_free) (struct vmw_resource *res))
@@ -103,6 +179,9 @@ static int vmw_context_init(struct vmw_private *dev_priv,
SVGA3dCmdDefineContext body;
} *cmd;
+ if (dev_priv->has_mob)
+ return vmw_gb_context_init(dev_priv, res, res_free);
+
ret = vmw_resource_init(dev_priv, res, false,
res_free, &vmw_legacy_context_func);
@@ -154,6 +233,180 @@ struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
return (ret == 0) ? res : NULL;
}
+
+static int vmw_gb_context_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ int ret;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBContext body;
+ } *cmd;
+
+ if (likely(res->id != -1))
+ return 0;
+
+ ret = vmw_resource_alloc_id(res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a context id.\n");
+ goto out_no_id;
+ }
+
+ if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "creation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ (void) vmw_3d_resource_inc(dev_priv, false);
+
+ return 0;
+
+out_no_fifo:
+ vmw_resource_release_id(res);
+out_no_id:
+ return ret;
+}
+
+static int vmw_gb_context_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBContext body;
+ } *cmd;
+ struct ttm_buffer_object *bo = val_buf->bo;
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ cmd->body.mobid = bo->mem.start;
+ cmd->body.validContents = res->backup_dirty;
+ res->backup_dirty = false;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+static int vmw_gb_context_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = val_buf->bo;
+ struct vmw_fence_obj *fence;
+ struct vmw_user_context *uctx =
+ container_of(res, struct vmw_user_context, res);
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdReadbackGBContext body;
+ } *cmd1;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBContext body;
+ } *cmd2;
+ uint32_t submit_size;
+ uint8_t *cmd;
+
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_context_binding_state_scrub(&uctx->cbs);
+
+ submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
+
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "unbinding.\n");
+ mutex_unlock(&dev_priv->binding_mutex);
+ return -ENOMEM;
+ }
+
+ cmd2 = (void *) cmd;
+ if (readback) {
+ cmd1 = (void *) cmd;
+ cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
+ cmd1->header.size = sizeof(cmd1->body);
+ cmd1->body.cid = res->id;
+ cmd2 = (void *) (&cmd1[1]);
+ }
+ cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
+ cmd2->header.size = sizeof(cmd2->body);
+ cmd2->body.cid = res->id;
+ cmd2->body.mobid = SVGA3D_INVALID_ID;
+
+ vmw_fifo_commit(dev_priv, submit_size);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+
+ vmw_fence_single_bo(bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+static int vmw_gb_context_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyGBContext body;
+ } *cmd;
+
+ if (likely(res->id == -1))
+ return 0;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "destruction.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ if (dev_priv->query_cid == res->id)
+ dev_priv->query_cid_valid = false;
+ vmw_resource_release_id(res);
+ vmw_3d_resource_dec(dev_priv, false);
+
+ return 0;
+}
+
/**
* User-space context management:
*/
@@ -272,3 +525,383 @@ out_unlock:
return ret;
}
+
+/**
+ * vmw_context_scrub_shader - scrub a shader binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetShader body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_SET_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = bi->ctx->id;
+ cmd->body.type = bi->i1.shader_type;
+ cmd->body.shid =
+ cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_context_scrub_render_target - scrub a render target binding
+ * from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+ bool rebind)
+{
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetRenderTarget body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for render target "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = bi->ctx->id;
+ cmd->body.type = bi->i1.rt_type;
+ cmd->body.target.sid =
+ cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ cmd->body.target.face = 0;
+ cmd->body.target.mipmap = 0;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_context_scrub_texture - scrub a texture binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ *
+ * TODO: Possibly complement this function with a function that takes
+ * a list of texture bindings and combines them to a single command.
+ */
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
+ bool rebind)
+{
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ struct {
+ SVGA3dCmdSetTextureState c;
+ SVGA3dTextureState s1;
+ } body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for texture "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+
+ cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.c.cid = bi->ctx->id;
+ cmd->body.s1.stage = bi->i1.texture_stage;
+ cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
+ cmd->body.s1.value =
+ cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_context_binding_drop: Stop tracking a context binding
+ *
+ * @cb: Pointer to binding tracker storage.
+ *
+ * Stops tracking a context binding, and re-initializes its storage.
+ * Typically used when the context binding is replaced with a binding to
+ * another (or the same, for that matter) resource.
+ */
+static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
+{
+ list_del(&cb->ctx_list);
+ if (!list_empty(&cb->res_list))
+ list_del(&cb->res_list);
+ cb->bi.ctx = NULL;
+}
+
+/**
+ * vmw_context_binding_add: Start tracking a context binding
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ * Performs basic checks on the binding to make sure arguments are within
+ * bounds and then starts tracking the binding in the context binding
+ * state structure @cbs.
+ */
+int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *bi)
+{
+ struct vmw_ctx_binding *loc;
+
+ switch (bi->bt) {
+ case vmw_ctx_binding_rt:
+ if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
+ DRM_ERROR("Illegal render target type %u.\n",
+ (unsigned) bi->i1.rt_type);
+ return -EINVAL;
+ }
+ loc = &cbs->render_targets[bi->i1.rt_type];
+ break;
+ case vmw_ctx_binding_tex:
+ if (unlikely((unsigned)bi->i1.texture_stage >=
+ SVGA3D_NUM_TEXTURE_UNITS)) {
+ DRM_ERROR("Illegal texture/sampler unit %u.\n",
+ (unsigned) bi->i1.texture_stage);
+ return -EINVAL;
+ }
+ loc = &cbs->texture_units[bi->i1.texture_stage];
+ break;
+ case vmw_ctx_binding_shader:
+ if (unlikely((unsigned)bi->i1.shader_type >=
+ SVGA3D_SHADERTYPE_MAX)) {
+ DRM_ERROR("Illegal shader type %u.\n",
+ (unsigned) bi->i1.shader_type);
+ return -EINVAL;
+ }
+ loc = &cbs->shaders[bi->i1.shader_type];
+ break;
+ default:
+ BUG();
+ }
+
+ if (loc->bi.ctx != NULL)
+ vmw_context_binding_drop(loc);
+
+ loc->bi = *bi;
+ loc->bi.scrubbed = false;
+ list_add_tail(&loc->ctx_list, &cbs->list);
+ INIT_LIST_HEAD(&loc->res_list);
+
+ return 0;
+}
+
+/**
+ * vmw_context_binding_transfer: Transfer a context binding tracking entry.
+ *
+ * @cbs: Pointer to the persistent context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ */
+static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *bi)
+{
+ struct vmw_ctx_binding *loc;
+
+ switch (bi->bt) {
+ case vmw_ctx_binding_rt:
+ loc = &cbs->render_targets[bi->i1.rt_type];
+ break;
+ case vmw_ctx_binding_tex:
+ loc = &cbs->texture_units[bi->i1.texture_stage];
+ break;
+ case vmw_ctx_binding_shader:
+ loc = &cbs->shaders[bi->i1.shader_type];
+ break;
+ default:
+ BUG();
+ }
+
+ if (loc->bi.ctx != NULL)
+ vmw_context_binding_drop(loc);
+
+ if (bi->res != NULL) {
+ loc->bi = *bi;
+ list_add_tail(&loc->ctx_list, &cbs->list);
+ list_add_tail(&loc->res_list, &bi->res->binding_head);
+ }
+}
+
+/**
+ * vmw_context_binding_kill - Kill a binding on the device
+ * and stop tracking it.
+ *
+ * @cb: Pointer to binding tracker storage.
+ *
+ * Emits FIFO commands to scrub a binding represented by @cb.
+ * Then stops tracking the binding and re-initializes its storage.
+ */
+static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
+{
+ if (!cb->bi.scrubbed) {
+ (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
+ cb->bi.scrubbed = true;
+ }
+ vmw_context_binding_drop(cb);
+}
+
+/**
+ * vmw_context_binding_state_kill - Kill all bindings associated with a
+ * struct vmw_ctx_binding state structure, and re-initialize the structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker. Then re-initializes the whole structure.
+ */
+static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_ctx_binding *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+ vmw_context_binding_kill(entry);
+}
+
+/**
+ * vmw_context_binding_state_scrub - Scrub all bindings associated with a
+ * struct vmw_ctx_binding state structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker.
+ */
+static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_ctx_binding *entry;
+
+ list_for_each_entry(entry, &cbs->list, ctx_list) {
+ if (!entry->bi.scrubbed) {
+ (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
+ entry->bi.scrubbed = true;
+ }
+ }
+}
+
+/**
+ * vmw_context_binding_res_list_kill - Kill all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Kills all bindings associated with a specific resource. Typically
+ * called before the resource is destroyed.
+ */
+void vmw_context_binding_res_list_kill(struct list_head *head)
+{
+ struct vmw_ctx_binding *entry, *next;
+
+ list_for_each_entry_safe(entry, next, head, res_list)
+ vmw_context_binding_kill(entry);
+}
+
+/**
+ * vmw_context_binding_res_list_scrub - Scrub all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Scrub all bindings associated with a specific resource. Typically
+ * called before the resource is evicted.
+ */
+void vmw_context_binding_res_list_scrub(struct list_head *head)
+{
+ struct vmw_ctx_binding *entry;
+
+ list_for_each_entry(entry, head, res_list) {
+ if (!entry->bi.scrubbed) {
+ (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
+ entry->bi.scrubbed = true;
+ }
+ }
+}
+
+/**
+ * vmw_context_binding_state_transfer - Commit staged binding info
+ *
+ * @ctx: Pointer to context to commit the staged binding info to.
+ * @from: Staged binding info built during execbuf.
+ *
+ * Transfers binding info from a temporary structure to the persistent
+ * structure in the context. This can be done once commands
+ */
+void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
+ struct vmw_ctx_binding_state *from)
+{
+ struct vmw_user_context *uctx =
+ container_of(ctx, struct vmw_user_context, res);
+ struct vmw_ctx_binding *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &from->list, ctx_list)
+ vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
+}
+
+/**
+ * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
+ *
+ * @ctx: The context resource
+ *
+ * Walks through the context binding list and rebinds all scrubbed
+ * resources.
+ */
+int vmw_context_rebind_all(struct vmw_resource *ctx)
+{
+ struct vmw_ctx_binding *entry;
+ struct vmw_user_context *uctx =
+ container_of(ctx, struct vmw_user_context, res);
+ struct vmw_ctx_binding_state *cbs = &uctx->cbs;
+ int ret;
+
+ list_for_each_entry(entry, &cbs->list, ctx_list) {
+ if (likely(!entry->bi.scrubbed))
+ continue;
+
+ if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
+ SVGA3D_INVALID_ID))
+ continue;
+
+ ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ entry->bi.scrubbed = false;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_context_binding_list - Return a list of context bindings
+ *
+ * @ctx: The context resource
+ *
+ * Returns the current list of bindings of the given context. Note that
+ * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ */
+struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
+{
+ return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index d4e54fcc0acd..a75840211b3c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -290,8 +290,7 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
/**
* vmw_bo_pin - Pin or unpin a buffer object without moving it.
*
- * @bo: The buffer object. Must be reserved, and present either in VRAM
- * or GMR memory.
+ * @bo: The buffer object. Must be reserved.
* @pin: Whether to pin or unpin.
*
*/
@@ -303,10 +302,9 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
int ret;
lockdep_assert_held(&bo->resv->lock.base);
- BUG_ON(old_mem_type != TTM_PL_VRAM &&
- old_mem_type != VMW_PL_GMR);
- pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
+ pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
+ | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
if (pin)
pl_flags |= TTM_PL_FLAG_NO_EVICT;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index c7a549694e59..3bdc0adc656d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -112,6 +112,21 @@
#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
struct drm_vmw_update_layout_arg)
+#define DRM_IOCTL_VMW_CREATE_SHADER \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
+ struct drm_vmw_shader_create_arg)
+#define DRM_IOCTL_VMW_UNREF_SHADER \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
+ struct drm_vmw_shader_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
+ union drm_vmw_gb_surface_create_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_REF \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
+ union drm_vmw_gb_surface_reference_arg)
+#define DRM_IOCTL_VMW_SYNCCPU \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
+ struct drm_vmw_synccpu_arg)
/**
* The core DRM version of this macro doesn't account for
@@ -177,6 +192,21 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
vmw_kms_update_layout_ioctl,
DRM_MASTER | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_CREATE_SHADER,
+ vmw_shader_define_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_UNREF_SHADER,
+ vmw_shader_destroy_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
+ vmw_gb_surface_define_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
+ vmw_gb_surface_reference_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_SYNCCPU,
+ vmw_user_dmabuf_synccpu_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
};
static struct pci_device_id vmw_pci_id_list[] = {
@@ -189,6 +219,7 @@ static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
static int vmw_force_iommu;
static int vmw_restrict_iommu;
static int vmw_force_coherent;
+static int vmw_restrict_dma_mask;
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static void vmw_master_init(struct vmw_master *);
@@ -203,6 +234,8 @@ MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
+MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
+module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
static void vmw_print_capabilities(uint32_t capabilities)
@@ -240,38 +273,52 @@ static void vmw_print_capabilities(uint32_t capabilities)
DRM_INFO(" GMR2.\n");
if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
DRM_INFO(" Screen Object 2.\n");
+ if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
+ DRM_INFO(" Command Buffers.\n");
+ if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
+ DRM_INFO(" Command Buffers 2.\n");
+ if (capabilities & SVGA_CAP_GBOBJECTS)
+ DRM_INFO(" Guest Backed Resources.\n");
}
-
/**
- * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
- * the start of a buffer object.
+ * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
*
- * @dev_priv: The device private structure.
+ * @dev_priv: A device private structure.
*
- * This function will idle the buffer using an uninterruptible wait, then
- * map the first page and initialize a pending occlusion query result structure,
- * Finally it will unmap the buffer.
+ * This function creates a small buffer object that holds the query
+ * result for dummy queries emitted as query barriers.
+ * The function will then map the first page and initialize a pending
+ * occlusion query result structure, Finally it will unmap the buffer.
+ * No interruptible waits are done within this function.
*
- * TODO: Since we're only mapping a single page, we should optimize the map
- * to use kmap_atomic / iomap_atomic.
+ * Returns an error if bo creation or initialization fails.
*/
-static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
+static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
+ int ret;
+ struct ttm_buffer_object *bo;
struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result;
bool dummy;
- int ret;
- struct ttm_bo_device *bdev = &dev_priv->bdev;
- struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
- ttm_bo_reserve(bo, false, false, false, 0);
- spin_lock(&bdev->fence_lock);
- ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bdev->fence_lock);
+ /*
+ * Create the bo as pinned, so that a tryreserve will
+ * immediately succeed. This is because we're the only
+ * user of the bo currently.
+ */
+ ret = ttm_bo_create(&dev_priv->bdev,
+ PAGE_SIZE,
+ ttm_bo_type_device,
+ &vmw_sys_ne_placement,
+ 0, false, NULL,
+ &bo);
+
if (unlikely(ret != 0))
- (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
- 10*HZ);
+ return ret;
+
+ ret = ttm_bo_reserve(bo, false, true, false, 0);
+ BUG_ON(ret != 0);
ret = ttm_bo_kmap(bo, 0, 1, &map);
if (likely(ret == 0)) {
@@ -280,34 +327,19 @@ static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
result->state = SVGA3D_QUERYSTATE_PENDING;
result->result32 = 0xff;
ttm_bo_kunmap(&map);
- } else
- DRM_ERROR("Dummy query buffer map failed.\n");
+ }
+ vmw_bo_pin(bo, false);
ttm_bo_unreserve(bo);
-}
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Dummy query buffer map failed.\n");
+ ttm_bo_unref(&bo);
+ } else
+ dev_priv->dummy_query_bo = bo;
-/**
- * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
- *
- * @dev_priv: A device private structure.
- *
- * This function creates a small buffer object that holds the query
- * result for dummy queries emitted as query barriers.
- * No interruptible waits are done within this function.
- *
- * Returns an error if bo creation fails.
- */
-static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
-{
- return ttm_bo_create(&dev_priv->bdev,
- PAGE_SIZE,
- ttm_bo_type_device,
- &vmw_vram_sys_placement,
- 0, false, NULL,
- &dev_priv->dummy_query_bo);
+ return ret;
}
-
static int vmw_request_device(struct vmw_private *dev_priv)
{
int ret;
@@ -318,14 +350,24 @@ static int vmw_request_device(struct vmw_private *dev_priv)
return ret;
}
vmw_fence_fifo_up(dev_priv->fman);
+ if (dev_priv->has_mob) {
+ ret = vmw_otables_setup(dev_priv);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Unable to initialize "
+ "guest Memory OBjects.\n");
+ goto out_no_mob;
+ }
+ }
ret = vmw_dummy_query_bo_create(dev_priv);
if (unlikely(ret != 0))
goto out_no_query_bo;
- vmw_dummy_query_bo_prepare(dev_priv);
return 0;
out_no_query_bo:
+ if (dev_priv->has_mob)
+ vmw_otables_takedown(dev_priv);
+out_no_mob:
vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
return ret;
@@ -341,10 +383,13 @@ static void vmw_release_device(struct vmw_private *dev_priv)
BUG_ON(dev_priv->pinned_bo != NULL);
ttm_bo_unref(&dev_priv->dummy_query_bo);
+ if (dev_priv->has_mob)
+ vmw_otables_takedown(dev_priv);
vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
}
+
/**
* Increase the 3d resource refcount.
* If the count was prevously zero, initialize the fifo, switching to svga
@@ -510,6 +555,33 @@ out_fixup:
return 0;
}
+/**
+ * vmw_dma_masks - set required page- and dma masks
+ *
+ * @dev: Pointer to struct drm-device
+ *
+ * With 32-bit we can only handle 32 bit PFNs. Optionally set that
+ * restriction also for 64-bit systems.
+ */
+#ifdef CONFIG_INTEL_IOMMU
+static int vmw_dma_masks(struct vmw_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (intel_iommu_enabled &&
+ (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
+ DRM_INFO("Restricting DMA addresses to 44 bits.\n");
+ return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
+ }
+ return 0;
+}
+#else
+static int vmw_dma_masks(struct vmw_private *dev_priv)
+{
+ return 0;
+}
+#endif
+
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
struct vmw_private *dev_priv;
@@ -532,6 +604,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
+ mutex_init(&dev_priv->binding_mutex);
rwlock_init(&dev_priv->resource_lock);
for (i = vmw_res_context; i < vmw_res_max; ++i) {
@@ -578,14 +651,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
vmw_get_initial_size(dev_priv);
- if (dev_priv->capabilities & SVGA_CAP_GMR) {
- dev_priv->max_gmr_descriptors =
- vmw_read(dev_priv,
- SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
+ if (dev_priv->capabilities & SVGA_CAP_GMR2) {
dev_priv->max_gmr_ids =
vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
- }
- if (dev_priv->capabilities & SVGA_CAP_GMR2) {
dev_priv->max_gmr_pages =
vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
dev_priv->memory_size =
@@ -598,23 +666,42 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
*/
dev_priv->memory_size = 512*1024*1024;
}
+ dev_priv->max_mob_pages = 0;
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+ uint64_t mem_size =
+ vmw_read(dev_priv,
+ SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
+
+ dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
+ dev_priv->prim_bb_mem =
+ vmw_read(dev_priv,
+ SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
+ } else
+ dev_priv->prim_bb_mem = dev_priv->vram_size;
+
+ ret = vmw_dma_masks(dev_priv);
+ if (unlikely(ret != 0)) {
+ mutex_unlock(&dev_priv->hw_mutex);
+ goto out_err0;
+ }
+
+ if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size))
+ dev_priv->prim_bb_mem = dev_priv->vram_size;
mutex_unlock(&dev_priv->hw_mutex);
vmw_print_capabilities(dev_priv->capabilities);
- if (dev_priv->capabilities & SVGA_CAP_GMR) {
+ if (dev_priv->capabilities & SVGA_CAP_GMR2) {
DRM_INFO("Max GMR ids is %u\n",
(unsigned)dev_priv->max_gmr_ids);
- DRM_INFO("Max GMR descriptors is %u\n",
- (unsigned)dev_priv->max_gmr_descriptors);
- }
- if (dev_priv->capabilities & SVGA_CAP_GMR2) {
DRM_INFO("Max number of GMR pages is %u\n",
(unsigned)dev_priv->max_gmr_pages);
DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
(unsigned)dev_priv->memory_size / 1024);
}
+ DRM_INFO("Maximum display memory size is %u kiB\n",
+ dev_priv->prim_bb_mem / 1024);
DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
dev_priv->vram_start, dev_priv->vram_size / 1024);
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
@@ -649,12 +736,22 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->has_gmr = true;
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
- dev_priv->max_gmr_ids) != 0) {
+ VMW_PL_GMR) != 0) {
DRM_INFO("No GMR memory available. "
"Graphics memory resources are very limited.\n");
dev_priv->has_gmr = false;
}
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+ dev_priv->has_mob = true;
+ if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
+ VMW_PL_MOB) != 0) {
+ DRM_INFO("No MOB memory available. "
+ "3D will be disabled.\n");
+ dev_priv->has_mob = false;
+ }
+ }
+
dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
dev_priv->mmio_size);
@@ -757,6 +854,8 @@ out_err4:
iounmap(dev_priv->mmio_virt);
out_err3:
arch_phys_wc_del(dev_priv->mmio_mtrr);
+ if (dev_priv->has_mob)
+ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
if (dev_priv->has_gmr)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
@@ -801,6 +900,8 @@ static int vmw_driver_unload(struct drm_device *dev)
ttm_object_device_release(&dev_priv->tdev);
iounmap(dev_priv->mmio_virt);
arch_phys_wc_del(dev_priv->mmio_mtrr);
+ if (dev_priv->has_mob)
+ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
if (dev_priv->has_gmr)
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
@@ -840,6 +941,7 @@ static void vmw_postclose(struct drm_device *dev,
drm_master_put(&vmw_fp->locked_master);
}
+ vmw_compat_shader_man_destroy(vmw_fp->shman);
ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp);
}
@@ -859,11 +961,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
if (unlikely(vmw_fp->tfile == NULL))
goto out_no_tfile;
+ vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
+ if (IS_ERR(vmw_fp->shman))
+ goto out_no_shman;
+
file_priv->driver_priv = vmw_fp;
dev_priv->bdev.dev_mapping = dev->dev_mapping;
return 0;
+out_no_shman:
+ ttm_object_file_release(&vmw_fp->tfile);
out_no_tfile:
kfree(vmw_fp);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 20890ad8408b..ecaa302a6154 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,9 +40,9 @@
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20120209"
+#define VMWGFX_DRIVER_DATE "20121114"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 4
+#define VMWGFX_DRIVER_MINOR 5
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -50,19 +50,39 @@
#define VMWGFX_MAX_VALIDATIONS 2048
#define VMWGFX_MAX_DISPLAYS 16
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
+#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
+
+/*
+ * Perhaps we should have sysfs entries for these.
+ */
+#define VMWGFX_NUM_GB_CONTEXT 256
+#define VMWGFX_NUM_GB_SHADER 20000
+#define VMWGFX_NUM_GB_SURFACE 32768
+#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
+#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
+ VMWGFX_NUM_GB_SHADER +\
+ VMWGFX_NUM_GB_SURFACE +\
+ VMWGFX_NUM_GB_SCREEN_TARGET)
#define VMW_PL_GMR TTM_PL_PRIV0
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
+#define VMW_PL_MOB TTM_PL_PRIV1
+#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
#define VMW_RES_CONTEXT ttm_driver_type0
#define VMW_RES_SURFACE ttm_driver_type1
#define VMW_RES_STREAM ttm_driver_type2
#define VMW_RES_FENCE ttm_driver_type3
+#define VMW_RES_SHADER ttm_driver_type4
+
+struct vmw_compat_shader_manager;
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
struct list_head fence_events;
+ bool gb_aware;
+ struct vmw_compat_shader_manager *shman;
};
struct vmw_dma_buffer {
@@ -82,6 +102,7 @@ struct vmw_dma_buffer {
struct vmw_validate_buffer {
struct ttm_validate_buffer base;
struct drm_hash_item hash;
+ bool validate_as_mob;
};
struct vmw_res_func;
@@ -98,6 +119,7 @@ struct vmw_resource {
const struct vmw_res_func *func;
struct list_head lru_head; /* Protected by the resource lock */
struct list_head mob_head; /* Protected by @backup reserved */
+ struct list_head binding_head; /* Protected by binding_mutex */
void (*res_free) (struct vmw_resource *res);
void (*hw_destroy) (struct vmw_resource *res);
};
@@ -106,6 +128,7 @@ enum vmw_res_type {
vmw_res_context,
vmw_res_surface,
vmw_res_stream,
+ vmw_res_shader,
vmw_res_max
};
@@ -154,6 +177,7 @@ struct vmw_fifo_state {
};
struct vmw_relocation {
+ SVGAMobId *mob_loc;
SVGAGuestPtr *location;
uint32_t index;
};
@@ -229,11 +253,77 @@ struct vmw_piter {
struct page *(*page)(struct vmw_piter *);
};
+/*
+ * enum vmw_ctx_binding_type - abstract resource to context binding types
+ */
+enum vmw_ctx_binding_type {
+ vmw_ctx_binding_shader,
+ vmw_ctx_binding_rt,
+ vmw_ctx_binding_tex,
+ vmw_ctx_binding_max
+};
+
+/**
+ * struct vmw_ctx_bindinfo - structure representing a single context binding
+ *
+ * @ctx: Pointer to the context structure. NULL means the binding is not
+ * active.
+ * @res: Non ref-counted pointer to the bound resource.
+ * @bt: The binding type.
+ * @i1: Union of information needed to unbind.
+ */
+struct vmw_ctx_bindinfo {
+ struct vmw_resource *ctx;
+ struct vmw_resource *res;
+ enum vmw_ctx_binding_type bt;
+ bool scrubbed;
+ union {
+ SVGA3dShaderType shader_type;
+ SVGA3dRenderTargetType rt_type;
+ uint32 texture_stage;
+ } i1;
+};
+
+/**
+ * struct vmw_ctx_binding - structure representing a single context binding
+ * - suitable for tracking in a context
+ *
+ * @ctx_list: List head for context.
+ * @res_list: List head for bound resource.
+ * @bi: Binding info
+ */
+struct vmw_ctx_binding {
+ struct list_head ctx_list;
+ struct list_head res_list;
+ struct vmw_ctx_bindinfo bi;
+};
+
+
+/**
+ * struct vmw_ctx_binding_state - context binding state
+ *
+ * @list: linked list of individual bindings.
+ * @render_targets: Render target bindings.
+ * @texture_units: Texture units/samplers bindings.
+ * @shaders: Shader bindings.
+ *
+ * Note that this structure also provides storage space for the individual
+ * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
+ * for individual bindings.
+ *
+ */
+struct vmw_ctx_binding_state {
+ struct list_head list;
+ struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
+ struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
+ struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
+};
+
struct vmw_sw_context{
struct drm_open_hash res_ht;
bool res_ht_initialized;
bool kernel; /**< is the called made from the kernel */
- struct ttm_object_file *tfile;
+ struct vmw_fpriv *fp;
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
@@ -250,6 +340,8 @@ struct vmw_sw_context{
struct vmw_resource *last_query_ctx;
bool needs_post_query_barrier;
struct vmw_resource *error_resource;
+ struct vmw_ctx_binding_state staged_bindings;
+ struct list_head staged_shaders;
};
struct vmw_legacy_display;
@@ -281,6 +373,7 @@ struct vmw_private {
unsigned int io_start;
uint32_t vram_start;
uint32_t vram_size;
+ uint32_t prim_bb_mem;
uint32_t mmio_start;
uint32_t mmio_size;
uint32_t fb_max_width;
@@ -290,11 +383,12 @@ struct vmw_private {
__le32 __iomem *mmio_virt;
int mmio_mtrr;
uint32_t capabilities;
- uint32_t max_gmr_descriptors;
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
+ uint32_t max_mob_pages;
uint32_t memory_size;
bool has_gmr;
+ bool has_mob;
struct mutex hw_mutex;
/*
@@ -370,6 +464,7 @@ struct vmw_private {
struct vmw_sw_context ctx;
struct mutex cmdbuf_mutex;
+ struct mutex binding_mutex;
/**
* Operating mode.
@@ -415,6 +510,12 @@ struct vmw_private {
* DMA mapping stuff.
*/
enum vmw_dma_map_mode map_mode;
+
+ /*
+ * Guest Backed stuff
+ */
+ struct ttm_buffer_object *otable_bo;
+ struct vmw_otable *otables;
};
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -471,23 +572,14 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
* Resource utilities - vmwgfx_resource.c
*/
struct vmw_user_resource_conv;
-extern const struct vmw_user_resource_conv *user_surface_converter;
-extern const struct vmw_user_resource_conv *user_context_converter;
-extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res);
extern int vmw_resource_validate(struct vmw_resource *res);
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
-extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_context_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- int id,
- struct vmw_resource **p_res);
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
@@ -499,18 +591,6 @@ extern int vmw_user_resource_lookup_handle(
uint32_t handle,
const struct vmw_user_resource_conv *converter,
struct vmw_resource **p_res);
-extern void vmw_surface_res_free(struct vmw_resource *res);
-extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_surface_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle, int *id);
-extern int vmw_surface_validate(struct vmw_private *dev_priv,
- struct vmw_surface *srf);
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct vmw_dma_buffer *vmw_bo,
@@ -519,10 +599,21 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
void (*bo_free) (struct ttm_buffer_object *bo));
extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile);
+extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t size,
+ bool shareable,
+ uint32_t *handle,
+ struct vmw_dma_buffer **p_dma_buf);
+extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+ struct vmw_dma_buffer *dma_buf,
+ uint32_t *handle);
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
uint32_t cur_validate_node);
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
@@ -622,10 +713,16 @@ extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement;
extern struct ttm_placement vmw_vram_gmr_ne_placement;
extern struct ttm_placement vmw_sys_placement;
+extern struct ttm_placement vmw_sys_ne_placement;
extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement;
+extern struct ttm_placement vmw_mob_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
+extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
+extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
+extern const struct vmw_sg_table *
+vmw_bo_sg_table(struct ttm_buffer_object *bo);
extern void vmw_piter_start(struct vmw_piter *viter,
const struct vmw_sg_table *vsgt,
unsigned long p_offs);
@@ -701,7 +798,7 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
* IRQs and wating - vmwgfx_irq.c
*/
-extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t vmw_irq_handler(int irq, void *arg);
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
uint32_t seqno, bool interruptible,
unsigned long timeout);
@@ -832,6 +929,101 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
uint32_t handle, uint32_t flags,
int *prime_fd);
+/*
+ * MemoryOBject management - vmwgfx_mob.c
+ */
+struct vmw_mob;
+extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
+ const struct vmw_sg_table *vsgt,
+ unsigned long num_data_pages, int32_t mob_id);
+extern void vmw_mob_unbind(struct vmw_private *dev_priv,
+ struct vmw_mob *mob);
+extern void vmw_mob_destroy(struct vmw_mob *mob);
+extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
+extern int vmw_otables_setup(struct vmw_private *dev_priv);
+extern void vmw_otables_takedown(struct vmw_private *dev_priv);
+
+/*
+ * Context management - vmwgfx_context.c
+ */
+
+extern const struct vmw_user_resource_conv *user_context_converter;
+
+extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
+
+extern int vmw_context_check(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ int id,
+ struct vmw_resource **p_res);
+extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *ci);
+extern void
+vmw_context_binding_state_transfer(struct vmw_resource *res,
+ struct vmw_ctx_binding_state *cbs);
+extern void vmw_context_binding_res_list_kill(struct list_head *head);
+extern void vmw_context_binding_res_list_scrub(struct list_head *head);
+extern int vmw_context_rebind_all(struct vmw_resource *ctx);
+extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
+
+/*
+ * Surface management - vmwgfx_surface.c
+ */
+
+extern const struct vmw_user_resource_conv *user_surface_converter;
+
+extern void vmw_surface_res_free(struct vmw_resource *res);
+extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_surface_check(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle, int *id);
+extern int vmw_surface_validate(struct vmw_private *dev_priv,
+ struct vmw_surface *srf);
+
+/*
+ * Shader management - vmwgfx_shader.c
+ */
+
+extern const struct vmw_user_resource_conv *user_shader_converter;
+
+extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
+ SVGA3dShaderType shader_type,
+ u32 *user_key);
+extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
+ struct list_head *list);
+extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
+ struct list_head *list);
+extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+ u32 user_key,
+ SVGA3dShaderType shader_type,
+ struct list_head *list);
+extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+ u32 user_key, const void *bytecode,
+ SVGA3dShaderType shader_type,
+ size_t size,
+ struct ttm_object_file *tfile,
+ struct list_head *list);
+extern struct vmw_compat_shader_manager *
+vmw_compat_shader_man_create(struct vmw_private *dev_priv);
+extern void
+vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
+
/**
* Inline helper functions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 599f6469a1eb..269b85cc875a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -54,6 +54,8 @@ struct vmw_resource_relocation {
* @res: Ref-counted pointer to the resource.
* @switch_backup: Boolean whether to switch backup buffer on unreserve.
* @new_backup: Refcounted pointer to the new backup buffer.
+ * @staged_bindings: If @res is a context, tracks bindings set up during
+ * the command batch. Otherwise NULL.
* @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
* @first_usage: Set to true the first time the resource is referenced in
* the command stream.
@@ -65,12 +67,32 @@ struct vmw_resource_val_node {
struct drm_hash_item hash;
struct vmw_resource *res;
struct vmw_dma_buffer *new_backup;
+ struct vmw_ctx_binding_state *staged_bindings;
unsigned long new_backup_offset;
bool first_usage;
bool no_buffer_needed;
};
/**
+ * struct vmw_cmd_entry - Describe a command for the verifier
+ *
+ * @user_allow: Whether allowed from the execbuf ioctl.
+ * @gb_disable: Whether disabled if guest-backed objects are available.
+ * @gb_enable: Whether enabled iff guest-backed objects are available.
+ */
+struct vmw_cmd_entry {
+ int (*func) (struct vmw_private *, struct vmw_sw_context *,
+ SVGA3dCmdHeader *);
+ bool user_allow;
+ bool gb_disable;
+ bool gb_enable;
+};
+
+#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
+ [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
+ (_gb_disable), (_gb_enable)}
+
+/**
* vmw_resource_unreserve - unreserve resources previously reserved for
* command submission.
*
@@ -87,6 +109,18 @@ static void vmw_resource_list_unreserve(struct list_head *list,
struct vmw_dma_buffer *new_backup =
backoff ? NULL : val->new_backup;
+ /*
+ * Transfer staged context bindings to the
+ * persistent context binding tracker.
+ */
+ if (unlikely(val->staged_bindings)) {
+ if (!backoff) {
+ vmw_context_binding_state_transfer
+ (val->res, val->staged_bindings);
+ }
+ kfree(val->staged_bindings);
+ val->staged_bindings = NULL;
+ }
vmw_resource_unreserve(res, new_backup,
val->new_backup_offset);
vmw_dmabuf_unreference(&val->new_backup);
@@ -146,6 +180,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
}
/**
+ * vmw_resource_context_res_add - Put resources previously bound to a context on
+ * the validation list
+ *
+ * @dev_priv: Pointer to a device private structure
+ * @sw_context: Pointer to a software context used for this command submission
+ * @ctx: Pointer to the context resource
+ *
+ * This function puts all resources that were previously bound to @ctx on
+ * the resource validation list. This is part of the context state reemission
+ */
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ struct vmw_resource *ctx)
+{
+ struct list_head *binding_list;
+ struct vmw_ctx_binding *entry;
+ int ret = 0;
+ struct vmw_resource *res;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ binding_list = vmw_context_binding_list(ctx);
+
+ list_for_each_entry(entry, binding_list, ctx_list) {
+ res = vmw_resource_reference_unless_doomed(entry->bi.res);
+ if (unlikely(res == NULL))
+ continue;
+
+ ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
+ vmw_resource_unreference(&res);
+ if (unlikely(ret != 0))
+ break;
+ }
+
+ mutex_unlock(&dev_priv->binding_mutex);
+ return ret;
+}
+
+/**
* vmw_resource_relocation_add - Add a relocation to the relocation list
*
* @list: Pointer to head of relocation list.
@@ -201,8 +273,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
{
struct vmw_resource_relocation *rel;
- list_for_each_entry(rel, list, head)
- cb[rel->offset] = rel->res->id;
+ list_for_each_entry(rel, list, head) {
+ if (likely(rel->res != NULL))
+ cb[rel->offset] = rel->res->id;
+ else
+ cb[rel->offset] = SVGA_3D_CMD_NOP;
+ }
}
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
@@ -224,6 +300,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
*
* @sw_context: The software context used for this command submission batch.
* @bo: The buffer object to add.
+ * @validate_as_mob: Validate this buffer as a MOB.
* @p_val_node: If non-NULL Will be updated with the validate node number
* on return.
*
@@ -232,6 +309,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
*/
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct ttm_buffer_object *bo,
+ bool validate_as_mob,
uint32_t *p_val_node)
{
uint32_t val_node;
@@ -244,6 +322,10 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
&hash) == 0)) {
vval_buf = container_of(hash, struct vmw_validate_buffer,
hash);
+ if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
+ DRM_ERROR("Inconsistent buffer usage.\n");
+ return -EINVAL;
+ }
val_buf = &vval_buf->base;
val_node = vval_buf - sw_context->val_bufs;
} else {
@@ -266,6 +348,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
val_buf->bo = ttm_bo_reference(bo);
val_buf->reserved = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
+ vval_buf->validate_as_mob = validate_as_mob;
}
sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
@@ -302,7 +385,8 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
struct ttm_buffer_object *bo = &res->backup->base;
ret = vmw_bo_to_validate_list
- (sw_context, bo, NULL);
+ (sw_context, bo,
+ vmw_resource_needs_backup(res), NULL);
if (unlikely(ret != 0))
return ret;
@@ -339,22 +423,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
}
/**
- * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @converter: User-space visisble type specific information.
- * @id: Pointer to the location in the command buffer currently being
+ * @id: user-space resource id handle.
+ * @id_loc: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated
+ * on exit.
*/
-static int vmw_cmd_res_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- enum vmw_res_type res_type,
- const struct vmw_user_resource_conv *converter,
- uint32_t *id,
- struct vmw_resource_val_node **p_val)
+static int
+vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t id,
+ uint32_t *id_loc,
+ struct vmw_resource_val_node **p_val)
{
struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[res_type];
@@ -362,15 +451,22 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_resource_val_node *node;
int ret;
- if (*id == SVGA3D_INVALID_ID)
+ if (id == SVGA3D_INVALID_ID) {
+ if (p_val)
+ *p_val = NULL;
+ if (res_type == vmw_res_context) {
+ DRM_ERROR("Illegal context invalid id.\n");
+ return -EINVAL;
+ }
return 0;
+ }
/*
* Fastpath in case of repeated commands referencing the same
* resource
*/
- if (likely(rcache->valid && *id == rcache->handle)) {
+ if (likely(rcache->valid && id == rcache->handle)) {
const struct vmw_resource *res = rcache->res;
rcache->node->first_usage = false;
@@ -379,28 +475,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
- id - sw_context->buf_start);
+ id_loc - sw_context->buf_start);
}
ret = vmw_user_resource_lookup_handle(dev_priv,
- sw_context->tfile,
- *id,
+ sw_context->fp->tfile,
+ id,
converter,
&res);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use resource 0x%08x.\n",
- (unsigned) *id);
+ (unsigned) id);
dump_stack();
return ret;
}
rcache->valid = true;
rcache->res = res;
- rcache->handle = *id;
+ rcache->handle = id;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
- id - sw_context->buf_start);
+ id_loc - sw_context->buf_start);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -411,6 +507,22 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
rcache->node = node;
if (p_val)
*p_val = node;
+
+ if (dev_priv->has_mob && node->first_usage &&
+ res_type == vmw_res_context) {
+ ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
+ node->staged_bindings =
+ kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
+ if (node->staged_bindings == NULL) {
+ DRM_ERROR("Failed to allocate context binding "
+ "information.\n");
+ goto out_no_reloc;
+ }
+ INIT_LIST_HEAD(&node->staged_bindings->list);
+ }
+
vmw_resource_unreference(&res);
return 0;
@@ -422,6 +534,59 @@ out_no_reloc:
}
/**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id_loc: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated
+ * on exit.
+ */
+static int
+vmw_cmd_res_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t *id_loc,
+ struct vmw_resource_val_node **p_val)
+{
+ return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
+ converter, *id_loc, id_loc, p_val);
+}
+
+/**
+ * vmw_rebind_contexts - Rebind all resources previously bound to
+ * referenced contexts.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Rebind context binding points that have been scrubbed because of eviction.
+ */
+static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
+{
+ struct vmw_resource_val_node *val;
+ int ret;
+
+ list_for_each_entry(val, &sw_context->resource_list, head) {
+ if (likely(!val->staged_bindings))
+ continue;
+
+ ret = vmw_context_rebind_all(val->res);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to rebind context.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
* vmw_cmd_cid_check - Check a command header for valid context information.
*
* @dev_priv: Pointer to a device private structure.
@@ -453,17 +618,35 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
SVGA3dCmdHeader header;
SVGA3dCmdSetRenderTarget body;
} *cmd;
+ struct vmw_resource_val_node *ctx_node;
+ struct vmw_resource_val_node *res_node;
int ret;
- ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ cmd = container_of(header, struct vmw_sid_cmd, header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ &ctx_node);
if (unlikely(ret != 0))
return ret;
- cmd = container_of(header, struct vmw_sid_cmd, header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->body.target.sid, NULL);
- return ret;
+ &cmd->body.target.sid, &res_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (dev_priv->has_mob) {
+ struct vmw_ctx_bindinfo bi;
+
+ bi.ctx = ctx_node->res;
+ bi.res = res_node ? res_node->res : NULL;
+ bi.bt = vmw_ctx_binding_rt;
+ bi.i1.rt_type = cmd->body.type;
+ return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+ }
+
+ return 0;
}
static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
@@ -519,11 +702,6 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
cmd = container_of(header, struct vmw_sid_cmd, header);
- if (unlikely(!sw_context->kernel)) {
- DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
- return -EPERM;
- }
-
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.srcImage.sid, NULL);
@@ -541,11 +719,6 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
cmd = container_of(header, struct vmw_sid_cmd, header);
- if (unlikely(!sw_context->kernel)) {
- DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
- return -EPERM;
- }
-
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, &cmd->body.sid,
NULL);
@@ -586,7 +759,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
sw_context->needs_post_query_barrier = true;
ret = vmw_bo_to_validate_list(sw_context,
sw_context->cur_query_bo,
- NULL);
+ dev_priv->has_mob, NULL);
if (unlikely(ret != 0))
return ret;
}
@@ -594,7 +767,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
ret = vmw_bo_to_validate_list(sw_context,
dev_priv->dummy_query_bo,
- NULL);
+ dev_priv->has_mob, NULL);
if (unlikely(ret != 0))
return ret;
@@ -672,6 +845,66 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
}
/**
+ * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
+ * handle to a MOB id.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: The software context used for this command batch validation.
+ * @id: Pointer to the user-space handle to be translated.
+ * @vmw_bo_p: Points to a location that, on successful return will carry
+ * a reference-counted pointer to the DMA buffer identified by the
+ * user-space handle in @id.
+ *
+ * This function saves information needed to translate a user-space buffer
+ * handle to a MOB id. The translation does not take place immediately, but
+ * during a call to vmw_apply_relocations(). This function builds a relocation
+ * list and a list of buffers to validate. The former needs to be freed using
+ * either vmw_apply_relocations() or vmw_free_relocations(). The latter
+ * needs to be freed using vmw_clear_validations.
+ */
+static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGAMobId *id,
+ struct vmw_dma_buffer **vmw_bo_p)
+{
+ struct vmw_dma_buffer *vmw_bo = NULL;
+ struct ttm_buffer_object *bo;
+ uint32_t handle = *id;
+ struct vmw_relocation *reloc;
+ int ret;
+
+ ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not find or use MOB buffer.\n");
+ return -EINVAL;
+ }
+ bo = &vmw_bo->base;
+
+ if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
+ DRM_ERROR("Max number relocations per submission"
+ " exceeded\n");
+ ret = -EINVAL;
+ goto out_no_reloc;
+ }
+
+ reloc = &sw_context->relocs[sw_context->cur_reloc++];
+ reloc->mob_loc = id;
+ reloc->location = NULL;
+
+ ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
+
+ *vmw_bo_p = vmw_bo;
+ return 0;
+
+out_no_reloc:
+ vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_p = NULL;
+ return ret;
+}
+
+/**
* vmw_translate_guest_pointer - Prepare to translate a user-space buffer
* handle to a valid SVGAGuestPtr
*
@@ -701,7 +934,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+ ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
return -EINVAL;
@@ -718,7 +951,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
- ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
+ ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -732,6 +965,30 @@ out_no_reloc:
}
/**
+ * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_begin_gb_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBeginGBQuery q;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_begin_gb_query_cmd,
+ header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->q.cid,
+ NULL);
+}
+
+/**
* vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
@@ -750,12 +1007,64 @@ static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
cmd = container_of(header, struct vmw_begin_query_cmd,
header);
+ if (unlikely(dev_priv->has_mob)) {
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBeginGBQuery q;
+ } gb_cmd;
+
+ BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
+
+ gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
+ gb_cmd.header.size = cmd->header.size;
+ gb_cmd.q.cid = cmd->q.cid;
+ gb_cmd.q.type = cmd->q.type;
+
+ memcpy(cmd, &gb_cmd, sizeof(*cmd));
+ return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
+ }
+
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->q.cid,
NULL);
}
/**
+ * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dma_buffer *vmw_bo;
+ struct vmw_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdEndGBQuery q;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_query_cmd, header);
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context,
+ &cmd->q.mobid,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
+
+ vmw_dmabuf_unreference(&vmw_bo);
+ return ret;
+}
+
+/**
* vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
@@ -774,6 +1083,25 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_query_cmd, header);
+ if (dev_priv->has_mob) {
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdEndGBQuery q;
+ } gb_cmd;
+
+ BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
+
+ gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
+ gb_cmd.header.size = cmd->header.size;
+ gb_cmd.q.cid = cmd->q.cid;
+ gb_cmd.q.type = cmd->q.type;
+ gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
+ gb_cmd.q.offset = cmd->q.guestResult.offset;
+
+ memcpy(cmd, &gb_cmd, sizeof(*cmd));
+ return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
+ }
+
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
@@ -790,7 +1118,40 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
return ret;
}
-/*
+/**
+ * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dma_buffer *vmw_bo;
+ struct vmw_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdWaitForGBQuery q;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_query_cmd, header);
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context,
+ &cmd->q.mobid,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_dmabuf_unreference(&vmw_bo);
+ return 0;
+}
+
+/**
* vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
@@ -809,6 +1170,25 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_query_cmd, header);
+ if (dev_priv->has_mob) {
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdWaitForGBQuery q;
+ } gb_cmd;
+
+ BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
+
+ gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
+ gb_cmd.header.size = cmd->header.size;
+ gb_cmd.q.cid = cmd->q.cid;
+ gb_cmd.q.type = cmd->q.type;
+ gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
+ gb_cmd.q.offset = cmd->q.guestResult.offset;
+
+ memcpy(cmd, &gb_cmd, sizeof(*cmd));
+ return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
+ }
+
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
@@ -853,7 +1233,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
- vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
+ vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
+ header);
out_no_surface:
vmw_dmabuf_unreference(&vmw_bo);
@@ -921,15 +1302,22 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
struct vmw_tex_state_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSetTextureState state;
- };
+ } *cmd;
SVGA3dTextureState *last_state = (SVGA3dTextureState *)
((unsigned long) header + header->size + sizeof(header));
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
+ struct vmw_resource_val_node *ctx_node;
+ struct vmw_resource_val_node *res_node;
int ret;
- ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ cmd = container_of(header, struct vmw_tex_state_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->state.cid,
+ &ctx_node);
if (unlikely(ret != 0))
return ret;
@@ -939,9 +1327,20 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cur_state->value, NULL);
+ &cur_state->value, &res_node);
if (unlikely(ret != 0))
return ret;
+
+ if (dev_priv->has_mob) {
+ struct vmw_ctx_bindinfo bi;
+
+ bi.ctx = ctx_node->res;
+ bi.res = res_node ? res_node->res : NULL;
+ bi.bt = vmw_ctx_binding_tex;
+ bi.i1.texture_stage = cur_state->stage;
+ vmw_context_binding_add(ctx_node->staged_bindings,
+ &bi);
+ }
}
return 0;
@@ -971,6 +1370,314 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
}
/**
+ * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @res_type: The resource type.
+ * @converter: Information about user-space binding for this resource type.
+ * @res_id: Pointer to the user-space resource handle in the command stream.
+ * @buf_id: Pointer to the user-space backup buffer handle in the command
+ * stream.
+ * @backup_offset: Offset of backup into MOB.
+ *
+ * This function prepares for registering a switch of backup buffers
+ * in the resource metadata just prior to unreserving.
+ */
+static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv
+ *converter,
+ uint32_t *res_id,
+ uint32_t *buf_id,
+ unsigned long backup_offset)
+{
+ int ret;
+ struct vmw_dma_buffer *dma_buf;
+ struct vmw_resource_val_node *val_node;
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
+ converter, res_id, &val_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (val_node->first_usage)
+ val_node->no_buffer_needed = true;
+
+ vmw_dmabuf_unreference(&val_node->new_backup);
+ val_node->new_backup = dma_buf;
+ val_node->new_backup_offset = backup_offset;
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_bind_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBSurface body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
+
+ return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.sid, &cmd->body.mobid,
+ 0);
+}
+
+/**
+ * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBImage body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBSurface body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.sid, NULL);
+}
+
+/**
+ * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdReadbackGBImage body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdReadbackGBSurface body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.sid, NULL);
+}
+
+/**
+ * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdInvalidateGBImage body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_invalidate_gb_surface - Validate an
+ * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdInvalidateGBSurface body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.sid, NULL);
+}
+
+
+/**
+ * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_shader_define_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineShader body;
+ } *cmd;
+ int ret;
+ size_t size;
+
+ cmd = container_of(header, struct vmw_shader_define_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(!dev_priv->has_mob))
+ return 0;
+
+ size = cmd->header.size - sizeof(cmd->body);
+ ret = vmw_compat_shader_add(sw_context->fp->shman,
+ cmd->body.shid, cmd + 1,
+ cmd->body.type, size,
+ sw_context->fp->tfile,
+ &sw_context->staged_shaders);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return vmw_resource_relocation_add(&sw_context->res_relocations,
+ NULL, &cmd->header.id -
+ sw_context->buf_start);
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_shader_destroy_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyShader body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_shader_destroy_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(!dev_priv->has_mob))
+ return 0;
+
+ ret = vmw_compat_shader_remove(sw_context->fp->shman,
+ cmd->body.shid,
+ cmd->body.type,
+ &sw_context->staged_shaders);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return vmw_resource_relocation_add(&sw_context->res_relocations,
+ NULL, &cmd->header.id -
+ sw_context->buf_start);
+
+ return 0;
+}
+
+/**
* vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
* command
*
@@ -986,18 +1693,105 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdHeader header;
SVGA3dCmdSetShader body;
} *cmd;
+ struct vmw_resource_val_node *ctx_node;
int ret;
cmd = container_of(header, struct vmw_set_shader_cmd,
header);
- ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ &ctx_node);
if (unlikely(ret != 0))
return ret;
+ if (dev_priv->has_mob) {
+ struct vmw_ctx_bindinfo bi;
+ struct vmw_resource_val_node *res_node;
+ u32 shid = cmd->body.shid;
+
+ if (shid != SVGA3D_INVALID_ID)
+ (void) vmw_compat_shader_lookup(sw_context->fp->shman,
+ cmd->body.type,
+ &shid);
+
+ ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
+ vmw_res_shader,
+ user_shader_converter,
+ shid,
+ &cmd->body.shid, &res_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ bi.ctx = ctx_node->res;
+ bi.res = res_node ? res_node->res : NULL;
+ bi.bt = vmw_ctx_binding_shader;
+ bi.i1.shader_type = cmd->body.type;
+ return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+ }
+
return 0;
}
+/**
+ * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_set_shader_const_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetShaderConst body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_set_shader_const_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (dev_priv->has_mob)
+ header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_bind_gb_shader_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBShader body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
+ header);
+
+ return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
+ user_shader_converter,
+ &cmd->body.shid, &cmd->body.mobid,
+ cmd->body.offsetInBytes);
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -1041,50 +1835,173 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
return 0;
}
-typedef int (*vmw_cmd_func) (struct vmw_private *,
- struct vmw_sw_context *,
- SVGA3dCmdHeader *);
-
-#define VMW_CMD_DEF(cmd, func) \
- [cmd - SVGA_3D_CMD_BASE] = func
-
-static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
- VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
+static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
+ true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
- &vmw_cmd_set_render_target_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
- VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
- VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
- VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
- VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
- VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
+ &vmw_cmd_set_render_target_check, true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
+ true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
- &vmw_cmd_blt_surf_screen_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
+ &vmw_cmd_blt_surf_screen_check, false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
+ &vmw_cmd_update_gb_surface, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
+ &vmw_cmd_readback_gb_image, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
+ &vmw_cmd_readback_gb_surface, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
+ &vmw_cmd_invalidate_gb_image, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
+ &vmw_cmd_invalidate_gb_surface, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
+ true, false, true)
};
static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -1095,6 +2012,8 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
uint32_t size_remaining = *size;
SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
int ret;
+ const struct vmw_cmd_entry *entry;
+ bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
/* Handle any none 3D commands */
@@ -1107,18 +2026,40 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
cmd_id -= SVGA_3D_CMD_BASE;
if (unlikely(*size > size_remaining))
- goto out_err;
+ goto out_invalid;
if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
- goto out_err;
+ goto out_invalid;
+
+ entry = &vmw_cmd_entries[cmd_id];
+ if (unlikely(!entry->user_allow && !sw_context->kernel))
+ goto out_privileged;
+
+ if (unlikely(entry->gb_disable && gb))
+ goto out_old;
+
+ if (unlikely(entry->gb_enable && !gb))
+ goto out_new;
- ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
+ ret = entry->func(dev_priv, sw_context, header);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_invalid;
return 0;
-out_err:
- DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
+out_invalid:
+ DRM_ERROR("Invalid SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
+ return -EINVAL;
+out_privileged:
+ DRM_ERROR("Privileged SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
+ return -EPERM;
+out_old:
+ DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
+ return -EINVAL;
+out_new:
+ DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
cmd_id + SVGA_3D_CMD_BASE);
return -EINVAL;
}
@@ -1174,6 +2115,9 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
case VMW_PL_GMR:
reloc->location->gmrId = bo->mem.start;
break;
+ case VMW_PL_MOB:
+ *reloc->mob_loc = bo->mem.start;
+ break;
default:
BUG();
}
@@ -1198,6 +2142,8 @@ static void vmw_resource_list_unreference(struct list_head *list)
list_for_each_entry_safe(val, val_next, list, head) {
list_del_init(&val->head);
vmw_resource_unreference(&val->res);
+ if (unlikely(val->staged_bindings))
+ kfree(val->staged_bindings);
kfree(val);
}
}
@@ -1224,7 +2170,8 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
}
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
- struct ttm_buffer_object *bo)
+ struct ttm_buffer_object *bo,
+ bool validate_as_mob)
{
int ret;
@@ -1238,6 +2185,9 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
dev_priv->dummy_query_bo_pinned))
return 0;
+ if (validate_as_mob)
+ return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
+
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
* If there is no space in VRAM and GMR ids are all used up,
@@ -1259,7 +2209,6 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
return ret;
}
-
static int vmw_validate_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
@@ -1267,7 +2216,8 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv,
int ret;
list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
- ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
+ ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
+ entry->validate_as_mob);
if (unlikely(ret != 0))
return ret;
}
@@ -1461,7 +2411,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} else
sw_context->kernel = true;
- sw_context->tfile = vmw_fpriv(file_priv)->tfile;
+ sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
sw_context->fence_flags = 0;
@@ -1478,16 +2428,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock;
sw_context->res_ht_initialized = true;
}
+ INIT_LIST_HEAD(&sw_context->staged_shaders);
INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = vmw_resources_reserve(sw_context);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
if (unlikely(ret != 0))
@@ -1509,11 +2460,23 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_err;
}
+ ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
+ if (unlikely(ret != 0)) {
+ ret = -ERESTARTSYS;
+ goto out_err;
+ }
+
+ if (dev_priv->has_mob) {
+ ret = vmw_rebind_contexts(sw_context);
+ if (unlikely(ret != 0))
+ goto out_err;
+ }
+
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
ret = -ENOMEM;
- goto out_err;
+ goto out_unlock_binding;
}
vmw_apply_relocations(sw_context);
@@ -1538,6 +2501,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
DRM_ERROR("Fence submission error. Syncing.\n");
vmw_resource_list_unreserve(&sw_context->resource_list, false);
+ mutex_unlock(&dev_priv->binding_mutex);
+
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);
@@ -1558,6 +2523,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
}
list_splice_init(&sw_context->resource_list, &resource_list);
+ vmw_compat_shaders_commit(sw_context->fp->shman,
+ &sw_context->staged_shaders);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
@@ -1568,11 +2535,14 @@ int vmw_execbuf_process(struct drm_file *file_priv,
return 0;
+out_unlock_binding:
+ mutex_unlock(&dev_priv->binding_mutex);
out_err:
- vmw_resource_relocations_free(&sw_context->res_relocations);
- vmw_free_relocations(sw_context);
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
+out_err_nores:
vmw_resource_list_unreserve(&sw_context->resource_list, true);
+ vmw_resource_relocations_free(&sw_context->res_relocations);
+ vmw_free_relocations(sw_context);
vmw_clear_validations(sw_context);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
@@ -1581,6 +2551,8 @@ out_unlock:
list_splice_init(&sw_context->resource_list, &resource_list);
error_resource = sw_context->error_resource;
sw_context->error_resource = NULL;
+ vmw_compat_shaders_revert(sw_context->fp->shman,
+ &sw_context->staged_shaders);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index c62d20e8a6f1..436b013b4231 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -271,7 +271,7 @@ void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
spin_unlock_irq(&fman->lock);
}
-void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
+static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
struct list_head *list)
{
struct vmw_fence_action *action, *next_action;
@@ -897,7 +897,7 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
* Note that the action callbacks may be executed before this function
* returns.
*/
-void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
+static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
struct vmw_fence_action *action)
{
struct vmw_fence_manager *fman = fence->fman;
@@ -993,7 +993,7 @@ struct vmw_event_fence_pending {
struct drm_vmw_event_fence event;
};
-int vmw_event_fence_action_create(struct drm_file *file_priv,
+static int vmw_event_fence_action_create(struct drm_file *file_priv,
struct vmw_fence_obj *fence,
uint32_t flags,
uint64_t user_data,
@@ -1080,7 +1080,8 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
*/
if (arg->handle) {
struct ttm_base_object *base =
- ttm_base_object_lookup(vmw_fp->tfile, arg->handle);
+ ttm_base_object_lookup_for_ref(dev_priv->tdev,
+ arg->handle);
if (unlikely(base == NULL)) {
DRM_ERROR("Fence event invalid fence object handle "
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 3eb148667d63..6ccd993e26bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -35,6 +35,23 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
uint32_t fifo_min, hwversion;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+ if (!(dev_priv->capabilities & SVGA_CAP_3D))
+ return false;
+
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+ uint32_t result;
+
+ if (!dev_priv->has_mob)
+ return false;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
+ result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return (result != 0);
+ }
+
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
@@ -511,24 +528,16 @@ out_err:
}
/**
- * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo.
+ * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
+ * legacy query commands.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
- * This function is used to emit a dummy occlusion query with
- * no primitives rendered between query begin and query end.
- * It's used to provide a query barrier, in order to know that when
- * this query is finished, all preceding queries are also finished.
- *
- * A Query results structure should have been initialized at the start
- * of the dev_priv->dummy_query_bo buffer object. And that buffer object
- * must also be either reserved or pinned when this function is called.
- *
- * Returns -ENOMEM on failure to reserve fifo space.
+ * See the vmw_fifo_emit_dummy_query documentation.
*/
-int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
- uint32_t cid)
+static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
+ uint32_t cid)
{
/*
* A query wait without a preceding query end will
@@ -566,3 +575,75 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
return 0;
}
+
+/**
+ * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
+ * guest-backed resource query commands.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context id used for the query.
+ *
+ * See the vmw_fifo_emit_dummy_query documentation.
+ */
+static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
+ uint32_t cid)
+{
+ /*
+ * A query wait without a preceding query end will
+ * actually finish all queries for this cid
+ * without writing to the query result structure.
+ */
+
+ struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdWaitForGBQuery body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Out of fifo space for dummy query.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = cid;
+ cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ cmd->body.mobid = bo->mem.start;
+ cmd->body.offset = 0;
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+
+/**
+ * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
+ * appropriate resource query commands.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context id used for the query.
+ *
+ * This function is used to emit a dummy occlusion query with
+ * no primitives rendered between query begin and query end.
+ * It's used to provide a query barrier, in order to know that when
+ * this query is finished, all preceding queries are also finished.
+ *
+ * A Query results structure should have been initialized at the start
+ * of the dev_priv->dummy_query_bo buffer object. And that buffer object
+ * must also be either reserved or pinned when this function is called.
+ *
+ * Returns -ENOMEM on failure to reserve fifo space.
+ */
+int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+ uint32_t cid)
+{
+ if (dev_priv->has_mob)
+ return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
+
+ return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 6ef0b035becb..61d8d803199f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -125,181 +125,27 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
}
-static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
- struct list_head *desc_pages)
-{
- struct page *page, *next;
- struct svga_guest_mem_descriptor *page_virtual;
- unsigned int desc_per_page = PAGE_SIZE /
- sizeof(struct svga_guest_mem_descriptor) - 1;
-
- if (list_empty(desc_pages))
- return;
-
- list_for_each_entry_safe(page, next, desc_pages, lru) {
- list_del_init(&page->lru);
-
- if (likely(desc_dma != DMA_ADDR_INVALID)) {
- dma_unmap_page(dev, desc_dma, PAGE_SIZE,
- DMA_TO_DEVICE);
- }
-
- page_virtual = kmap_atomic(page);
- desc_dma = (dma_addr_t)
- le32_to_cpu(page_virtual[desc_per_page].ppn) <<
- PAGE_SHIFT;
- kunmap_atomic(page_virtual);
-
- __free_page(page);
- }
-}
-
-/**
- * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
- * the number of used descriptors.
- *
- */
-
-static int vmw_gmr_build_descriptors(struct device *dev,
- struct list_head *desc_pages,
- struct vmw_piter *iter,
- unsigned long num_pages,
- dma_addr_t *first_dma)
-{
- struct page *page;
- struct svga_guest_mem_descriptor *page_virtual = NULL;
- struct svga_guest_mem_descriptor *desc_virtual = NULL;
- unsigned int desc_per_page;
- unsigned long prev_pfn;
- unsigned long pfn;
- int ret;
- dma_addr_t desc_dma;
-
- desc_per_page = PAGE_SIZE /
- sizeof(struct svga_guest_mem_descriptor) - 1;
-
- while (likely(num_pages != 0)) {
- page = alloc_page(__GFP_HIGHMEM);
- if (unlikely(page == NULL)) {
- ret = -ENOMEM;
- goto out_err;
- }
-
- list_add_tail(&page->lru, desc_pages);
- page_virtual = kmap_atomic(page);
- desc_virtual = page_virtual - 1;
- prev_pfn = ~(0UL);
-
- while (likely(num_pages != 0)) {
- pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
-
- if (pfn != prev_pfn + 1) {
-
- if (desc_virtual - page_virtual ==
- desc_per_page - 1)
- break;
-
- (++desc_virtual)->ppn = cpu_to_le32(pfn);
- desc_virtual->num_pages = cpu_to_le32(1);
- } else {
- uint32_t tmp =
- le32_to_cpu(desc_virtual->num_pages);
- desc_virtual->num_pages = cpu_to_le32(tmp + 1);
- }
- prev_pfn = pfn;
- --num_pages;
- vmw_piter_next(iter);
- }
-
- (++desc_virtual)->ppn = DMA_PAGE_INVALID;
- desc_virtual->num_pages = cpu_to_le32(0);
- kunmap_atomic(page_virtual);
- }
-
- desc_dma = 0;
- list_for_each_entry_reverse(page, desc_pages, lru) {
- page_virtual = kmap_atomic(page);
- page_virtual[desc_per_page].ppn = cpu_to_le32
- (desc_dma >> PAGE_SHIFT);
- kunmap_atomic(page_virtual);
- desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
- DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(dev, desc_dma)))
- goto out_err;
- }
- *first_dma = desc_dma;
-
- return 0;
-out_err:
- vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages);
- return ret;
-}
-
-static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
- int gmr_id, dma_addr_t desc_dma)
-{
- mutex_lock(&dev_priv->hw_mutex);
-
- vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
- wmb();
- vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT);
- mb();
-
- mutex_unlock(&dev_priv->hw_mutex);
-
-}
-
int vmw_gmr_bind(struct vmw_private *dev_priv,
const struct vmw_sg_table *vsgt,
unsigned long num_pages,
int gmr_id)
{
- struct list_head desc_pages;
- dma_addr_t desc_dma = 0;
- struct device *dev = dev_priv->dev->dev;
struct vmw_piter data_iter;
- int ret;
vmw_piter_start(&data_iter, vsgt, 0);
if (unlikely(!vmw_piter_next(&data_iter)))
return 0;
- if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
- return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
-
- if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
- return -EINVAL;
-
- if (vsgt->num_regions > dev_priv->max_gmr_descriptors)
+ if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2)))
return -EINVAL;
- INIT_LIST_HEAD(&desc_pages);
-
- ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter,
- num_pages, &desc_dma);
- if (unlikely(ret != 0))
- return ret;
-
- vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma);
- vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages);
-
- return 0;
+ return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
}
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
{
- if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
+ if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
vmw_gmr2_unbind(dev_priv, gmr_id);
- return;
- }
-
- mutex_lock(&dev_priv->hw_mutex);
- vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
- wmb();
- vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
- mb();
- mutex_unlock(&dev_priv->hw_mutex);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index c5c054ae9056..b1273e8e9a69 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -125,10 +125,21 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
return -ENOMEM;
spin_lock_init(&gman->lock);
- gman->max_gmr_pages = dev_priv->max_gmr_pages;
gman->used_gmr_pages = 0;
ida_init(&gman->gmr_ida);
- gman->max_gmr_ids = p_size;
+
+ switch (p_size) {
+ case VMW_PL_GMR:
+ gman->max_gmr_ids = dev_priv->max_gmr_ids;
+ gman->max_gmr_pages = dev_priv->max_gmr_pages;
+ break;
+ case VMW_PL_MOB:
+ gman->max_gmr_ids = VMWGFX_NUM_MOB;
+ gman->max_gmr_pages = dev_priv->max_mob_pages;
+ break;
+ default:
+ BUG();
+ }
man->priv = (void *) gman;
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index a51f48e3e917..f9881f9e62bd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -29,12 +29,18 @@
#include <drm/vmwgfx_drm.h>
#include "vmwgfx_kms.h"
+struct svga_3d_compat_cap {
+ SVGA3dCapsRecordHeader header;
+ SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
+};
+
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_getparam_arg *param =
(struct drm_vmw_getparam_arg *)data;
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
switch (param->param) {
case DRM_VMW_PARAM_NUM_STREAMS:
@@ -53,13 +59,18 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
param->value = dev_priv->fifo.capabilities;
break;
case DRM_VMW_PARAM_MAX_FB_SIZE:
- param->value = dev_priv->vram_size;
+ param->value = dev_priv->prim_bb_mem;
break;
case DRM_VMW_PARAM_FIFO_HW_VERSION:
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+ if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
+ param->value = SVGA3D_HWVERSION_WS8_B1;
+ break;
+ }
+
param->value =
ioread32(fifo_mem +
((fifo->capabilities &
@@ -68,6 +79,29 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
SVGA_FIFO_3D_HWVERSION));
break;
}
+ case DRM_VMW_PARAM_MAX_SURF_MEMORY:
+ if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+ !vmw_fp->gb_aware)
+ param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
+ else
+ param->value = dev_priv->memory_size;
+ break;
+ case DRM_VMW_PARAM_3D_CAPS_SIZE:
+ if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+ vmw_fp->gb_aware)
+ param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+ else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
+ param->value = sizeof(struct svga_3d_compat_cap) +
+ sizeof(uint32_t);
+ else
+ param->value = (SVGA_FIFO_3D_CAPS_LAST -
+ SVGA_FIFO_3D_CAPS + 1) *
+ sizeof(uint32_t);
+ break;
+ case DRM_VMW_PARAM_MAX_MOB_MEMORY:
+ vmw_fp->gb_aware = true;
+ param->value = dev_priv->max_mob_pages * PAGE_SIZE;
+ break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
@@ -77,6 +111,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
return 0;
}
+static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
+ size_t size)
+{
+ struct svga_3d_compat_cap *compat_cap =
+ (struct svga_3d_compat_cap *) bounce;
+ unsigned int i;
+ size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
+ unsigned int max_size;
+
+ if (size < pair_offset)
+ return -EINVAL;
+
+ max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
+
+ if (max_size > SVGA3D_DEVCAP_MAX)
+ max_size = SVGA3D_DEVCAP_MAX;
+
+ compat_cap->header.length =
+ (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
+ compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ for (i = 0; i < max_size; ++i) {
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
+ compat_cap->pairs[i][0] = i;
+ compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return 0;
+}
+
int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -89,29 +155,58 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
void *bounce;
int ret;
+ bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
if (unlikely(arg->pad64 != 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
- size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) << 2;
+ if (gb_objects && vmw_fp->gb_aware)
+ size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+ else if (gb_objects)
+ size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
+ else
+ size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
+ sizeof(uint32_t);
if (arg->max_size < size)
size = arg->max_size;
- bounce = vmalloc(size);
+ bounce = vzalloc(size);
if (unlikely(bounce == NULL)) {
DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
return -ENOMEM;
}
- fifo_mem = dev_priv->mmio_virt;
- memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
+ if (gb_objects && vmw_fp->gb_aware) {
+ int i, num;
+ uint32_t *bounce32 = (uint32_t *) bounce;
+
+ num = size / sizeof(uint32_t);
+ if (num > SVGA3D_DEVCAP_MAX)
+ num = SVGA3D_DEVCAP_MAX;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ for (i = 0; i < num; ++i) {
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
+ *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+ } else if (gb_objects) {
+ ret = vmw_fill_compat_cap(dev_priv, bounce, size);
+ if (unlikely(ret != 0))
+ goto out_err;
+ } else {
+ fifo_mem = dev_priv->mmio_virt;
+ memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
+ }
ret = copy_to_user(buffer, bounce, size);
if (ret)
ret = -EFAULT;
+out_err:
vfree(bounce);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 4640adbcaf91..0c423766c441 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -30,7 +30,7 @@
#define VMW_FENCE_WRAP (1 << 24)
-irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t vmw_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
struct vmw_private *dev_priv = vmw_priv(dev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 03f1c2038631..8a650413dea5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -40,7 +40,7 @@ struct vmw_clip_rect {
* Clip @num_rects number of @rects against @clip storing the
* results in @out_rects and the number of passed rects in @out_num.
*/
-void vmw_clip_cliprects(struct drm_clip_rect *rects,
+static void vmw_clip_cliprects(struct drm_clip_rect *rects,
int num_rects,
struct vmw_clip_rect clip,
SVGASignedRect *out_rects,
@@ -423,7 +423,7 @@ struct vmw_framebuffer_surface {
struct drm_master *master;
};
-void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
+static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
@@ -589,7 +589,7 @@ out_free_tmp:
return ret;
}
-int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
+static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
@@ -609,9 +609,13 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
if (!dev_priv->sou_priv)
return -EINVAL;
+ drm_modeset_lock_all(dev_priv->dev);
+
ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
+ drm_modeset_unlock_all(dev_priv->dev);
return ret;
+ }
if (!num_clips) {
num_clips = 1;
@@ -629,6 +633,9 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
clips, num_clips, inc, NULL);
ttm_read_unlock(&vmaster->lock);
+
+ drm_modeset_unlock_all(dev_priv->dev);
+
return 0;
}
@@ -665,9 +672,9 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
if (unlikely(surface->mip_levels[0] != 1 ||
surface->num_sizes != 1 ||
- surface->sizes[0].width < mode_cmd->width ||
- surface->sizes[0].height < mode_cmd->height ||
- surface->sizes[0].depth != 1)) {
+ surface->base_size.width < mode_cmd->width ||
+ surface->base_size.height < mode_cmd->height ||
+ surface->base_size.depth != 1)) {
DRM_ERROR("Incompatible surface dimensions "
"for requested mode.\n");
return -EINVAL;
@@ -754,7 +761,7 @@ struct vmw_framebuffer_dmabuf {
struct vmw_dma_buffer *buffer;
};
-void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
+static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
@@ -940,7 +947,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
return ret;
}
-int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
+static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
@@ -953,9 +960,13 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
struct drm_clip_rect norect;
int ret, increment = 1;
+ drm_modeset_lock_all(dev_priv->dev);
+
ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
+ drm_modeset_unlock_all(dev_priv->dev);
return ret;
+ }
if (!num_clips) {
num_clips = 1;
@@ -979,6 +990,9 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
}
ttm_read_unlock(&vmaster->lock);
+
+ drm_modeset_unlock_all(dev_priv->dev);
+
return ret;
}
@@ -1631,7 +1645,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height)
{
- return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
+ return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem;
}
@@ -1663,7 +1677,7 @@ void vmw_disable_vblank(struct drm_device *dev, int crtc)
* Small shared kms functions.
*/
-int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
+static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
struct drm_vmw_rect *rects)
{
struct drm_device *dev = dev_priv->dev;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
new file mode 100644
index 000000000000..d4a5a19cb8c3
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -0,0 +1,653 @@
+/**************************************************************************
+ *
+ * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+
+/*
+ * If we set up the screen target otable, screen objects stop working.
+ */
+
+#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1)
+
+#ifdef CONFIG_64BIT
+#define VMW_PPN_SIZE 8
+#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0
+#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1
+#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2
+#else
+#define VMW_PPN_SIZE 4
+#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0
+#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1
+#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2
+#endif
+
+/*
+ * struct vmw_mob - Structure containing page table and metadata for a
+ * Guest Memory OBject.
+ *
+ * @num_pages Number of pages that make up the page table.
+ * @pt_level The indirection level of the page table. 0-2.
+ * @pt_root_page DMA address of the level 0 page of the page table.
+ */
+struct vmw_mob {
+ struct ttm_buffer_object *pt_bo;
+ unsigned long num_pages;
+ unsigned pt_level;
+ dma_addr_t pt_root_page;
+ uint32_t id;
+};
+
+/*
+ * struct vmw_otable - Guest Memory OBject table metadata
+ *
+ * @size: Size of the table (page-aligned).
+ * @page_table: Pointer to a struct vmw_mob holding the page table.
+ */
+struct vmw_otable {
+ unsigned long size;
+ struct vmw_mob *page_table;
+};
+
+static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
+ struct vmw_mob *mob);
+static void vmw_mob_pt_setup(struct vmw_mob *mob,
+ struct vmw_piter data_iter,
+ unsigned long num_data_pages);
+
+/*
+ * vmw_setup_otable_base - Issue an object table base setup command to
+ * the device
+ *
+ * @dev_priv: Pointer to a device private structure
+ * @type: Type of object table base
+ * @offset Start of table offset into dev_priv::otable_bo
+ * @otable Pointer to otable metadata;
+ *
+ * This function returns -ENOMEM if it fails to reserve fifo space,
+ * and may block waiting for fifo space.
+ */
+static int vmw_setup_otable_base(struct vmw_private *dev_priv,
+ SVGAOTableType type,
+ unsigned long offset,
+ struct vmw_otable *otable)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetOTableBase64 body;
+ } *cmd;
+ struct vmw_mob *mob;
+ const struct vmw_sg_table *vsgt;
+ struct vmw_piter iter;
+ int ret;
+
+ BUG_ON(otable->page_table != NULL);
+
+ vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
+ vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
+ WARN_ON(!vmw_piter_next(&iter));
+
+ mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
+ if (unlikely(mob == NULL)) {
+ DRM_ERROR("Failed creating OTable page table.\n");
+ return -ENOMEM;
+ }
+
+ if (otable->size <= PAGE_SIZE) {
+ mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
+ mob->pt_root_page = vmw_piter_dma_addr(&iter);
+ } else if (vsgt->num_regions == 1) {
+ mob->pt_level = SVGA3D_MOBFMT_RANGE;
+ mob->pt_root_page = vmw_piter_dma_addr(&iter);
+ } else {
+ ret = vmw_mob_pt_populate(dev_priv, mob);
+ if (unlikely(ret != 0))
+ goto out_no_populate;
+
+ vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
+ mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.type = type;
+ cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+ cmd->body.sizeInBytes = otable->size;
+ cmd->body.validSizeInBytes = 0;
+ cmd->body.ptDepth = mob->pt_level;
+
+ /*
+ * The device doesn't support this, But the otable size is
+ * determined at compile-time, so this BUG shouldn't trigger
+ * randomly.
+ */
+ BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ otable->page_table = mob;
+
+ return 0;
+
+out_no_fifo:
+out_no_populate:
+ vmw_mob_destroy(mob);
+ return ret;
+}
+
+/*
+ * vmw_takedown_otable_base - Issue an object table base takedown command
+ * to the device
+ *
+ * @dev_priv: Pointer to a device private structure
+ * @type: Type of object table base
+ *
+ */
+static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
+ SVGAOTableType type,
+ struct vmw_otable *otable)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetOTableBase body;
+ } *cmd;
+ struct ttm_buffer_object *bo;
+
+ if (otable->page_table == NULL)
+ return;
+
+ bo = otable->page_table->pt_bo;
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
+ DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.type = type;
+ cmd->body.baseAddress = 0;
+ cmd->body.sizeInBytes = 0;
+ cmd->body.validSizeInBytes = 0;
+ cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ if (bo) {
+ int ret;
+
+ ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ BUG_ON(ret != 0);
+
+ vmw_fence_single_bo(bo, NULL);
+ ttm_bo_unreserve(bo);
+ }
+
+ vmw_mob_destroy(otable->page_table);
+ otable->page_table = NULL;
+}
+
+/*
+ * vmw_otables_setup - Set up guest backed memory object tables
+ *
+ * @dev_priv: Pointer to a device private structure
+ *
+ * Takes care of the device guest backed surface
+ * initialization, by setting up the guest backed memory object tables.
+ * Returns 0 on success and various error codes on failure. A succesful return
+ * means the object tables can be taken down using the vmw_otables_takedown
+ * function.
+ */
+int vmw_otables_setup(struct vmw_private *dev_priv)
+{
+ unsigned long offset;
+ unsigned long bo_size;
+ struct vmw_otable *otables;
+ SVGAOTableType i;
+ int ret;
+
+ otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
+ GFP_KERNEL);
+ if (unlikely(otables == NULL)) {
+ DRM_ERROR("Failed to allocate space for otable "
+ "metadata.\n");
+ return -ENOMEM;
+ }
+
+ otables[SVGA_OTABLE_MOB].size =
+ VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
+ otables[SVGA_OTABLE_SURFACE].size =
+ VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
+ otables[SVGA_OTABLE_CONTEXT].size =
+ VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
+ otables[SVGA_OTABLE_SHADER].size =
+ VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
+ otables[SVGA_OTABLE_SCREEN_TARGET].size =
+ VMWGFX_NUM_GB_SCREEN_TARGET *
+ SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
+
+ bo_size = 0;
+ for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) {
+ otables[i].size =
+ (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
+ bo_size += otables[i].size;
+ }
+
+ ret = ttm_bo_create(&dev_priv->bdev, bo_size,
+ ttm_bo_type_device,
+ &vmw_sys_ne_placement,
+ 0, false, NULL,
+ &dev_priv->otable_bo);
+
+ if (unlikely(ret != 0))
+ goto out_no_bo;
+
+ ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL);
+ BUG_ON(ret != 0);
+ ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
+ if (unlikely(ret != 0))
+ goto out_unreserve;
+ ret = vmw_bo_map_dma(dev_priv->otable_bo);
+ if (unlikely(ret != 0))
+ goto out_unreserve;
+
+ ttm_bo_unreserve(dev_priv->otable_bo);
+
+ offset = 0;
+ for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) {
+ ret = vmw_setup_otable_base(dev_priv, i, offset,
+ &otables[i]);
+ if (unlikely(ret != 0))
+ goto out_no_setup;
+ offset += otables[i].size;
+ }
+
+ dev_priv->otables = otables;
+ return 0;
+
+out_unreserve:
+ ttm_bo_unreserve(dev_priv->otable_bo);
+out_no_setup:
+ for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
+ vmw_takedown_otable_base(dev_priv, i, &otables[i]);
+
+ ttm_bo_unref(&dev_priv->otable_bo);
+out_no_bo:
+ kfree(otables);
+ return ret;
+}
+
+
+/*
+ * vmw_otables_takedown - Take down guest backed memory object tables
+ *
+ * @dev_priv: Pointer to a device private structure
+ *
+ * Take down the Guest Memory Object tables.
+ */
+void vmw_otables_takedown(struct vmw_private *dev_priv)
+{
+ SVGAOTableType i;
+ struct ttm_buffer_object *bo = dev_priv->otable_bo;
+ int ret;
+
+ for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
+ vmw_takedown_otable_base(dev_priv, i,
+ &dev_priv->otables[i]);
+
+ ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ BUG_ON(ret != 0);
+
+ vmw_fence_single_bo(bo, NULL);
+ ttm_bo_unreserve(bo);
+
+ ttm_bo_unref(&dev_priv->otable_bo);
+ kfree(dev_priv->otables);
+ dev_priv->otables = NULL;
+}
+
+
+/*
+ * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
+ * needed for a guest backed memory object.
+ *
+ * @data_pages: Number of data pages in the memory object buffer.
+ */
+static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
+{
+ unsigned long data_size = data_pages * PAGE_SIZE;
+ unsigned long tot_size = 0;
+
+ while (likely(data_size > PAGE_SIZE)) {
+ data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
+ data_size *= VMW_PPN_SIZE;
+ tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
+ }
+
+ return tot_size >> PAGE_SHIFT;
+}
+
+/*
+ * vmw_mob_create - Create a mob, but don't populate it.
+ *
+ * @data_pages: Number of data pages of the underlying buffer object.
+ */
+struct vmw_mob *vmw_mob_create(unsigned long data_pages)
+{
+ struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
+
+ if (unlikely(mob == NULL))
+ return NULL;
+
+ mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
+
+ return mob;
+}
+
+/*
+ * vmw_mob_pt_populate - Populate the mob pagetable
+ *
+ * @mob: Pointer to the mob the pagetable of which we want to
+ * populate.
+ *
+ * This function allocates memory to be used for the pagetable, and
+ * adjusts TTM memory accounting accordingly. Returns ENOMEM if
+ * memory resources aren't sufficient and may cause TTM buffer objects
+ * to be swapped out by using the TTM memory accounting function.
+ */
+static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
+ struct vmw_mob *mob)
+{
+ int ret;
+ BUG_ON(mob->pt_bo != NULL);
+
+ ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
+ ttm_bo_type_device,
+ &vmw_sys_ne_placement,
+ 0, false, NULL, &mob->pt_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL);
+
+ BUG_ON(ret != 0);
+ ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
+ if (unlikely(ret != 0))
+ goto out_unreserve;
+ ret = vmw_bo_map_dma(mob->pt_bo);
+ if (unlikely(ret != 0))
+ goto out_unreserve;
+
+ ttm_bo_unreserve(mob->pt_bo);
+
+ return 0;
+
+out_unreserve:
+ ttm_bo_unreserve(mob->pt_bo);
+ ttm_bo_unref(&mob->pt_bo);
+
+ return ret;
+}
+
+/**
+ * vmw_mob_assign_ppn - Assign a value to a page table entry
+ *
+ * @addr: Pointer to pointer to page table entry.
+ * @val: The page table entry
+ *
+ * Assigns a value to a page table entry pointed to by *@addr and increments
+ * *@addr according to the page table entry size.
+ */
+#if (VMW_PPN_SIZE == 8)
+static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+{
+ *((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT);
+ *addr += 2;
+}
+#else
+static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+{
+ *(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT);
+}
+#endif
+
+/*
+ * vmw_mob_build_pt - Build a pagetable
+ *
+ * @data_addr: Array of DMA addresses to the underlying buffer
+ * object's data pages.
+ * @num_data_pages: Number of buffer object data pages.
+ * @pt_pages: Array of page pointers to the page table pages.
+ *
+ * Returns the number of page table pages actually used.
+ * Uses atomic kmaps of highmem pages to avoid TLB thrashing.
+ */
+static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
+ unsigned long num_data_pages,
+ struct vmw_piter *pt_iter)
+{
+ unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
+ unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
+ unsigned long pt_page;
+ __le32 *addr, *save_addr;
+ unsigned long i;
+ struct page *page;
+
+ for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
+ page = vmw_piter_page(pt_iter);
+
+ save_addr = addr = kmap_atomic(page);
+
+ for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
+ vmw_mob_assign_ppn(&addr,
+ vmw_piter_dma_addr(data_iter));
+ if (unlikely(--num_data_pages == 0))
+ break;
+ WARN_ON(!vmw_piter_next(data_iter));
+ }
+ kunmap_atomic(save_addr);
+ vmw_piter_next(pt_iter);
+ }
+
+ return num_pt_pages;
+}
+
+/*
+ * vmw_mob_build_pt - Set up a multilevel mob pagetable
+ *
+ * @mob: Pointer to a mob whose page table needs setting up.
+ * @data_addr Array of DMA addresses to the buffer object's data
+ * pages.
+ * @num_data_pages: Number of buffer object data pages.
+ *
+ * Uses tail recursion to set up a multilevel mob page table.
+ */
+static void vmw_mob_pt_setup(struct vmw_mob *mob,
+ struct vmw_piter data_iter,
+ unsigned long num_data_pages)
+{
+ unsigned long num_pt_pages = 0;
+ struct ttm_buffer_object *bo = mob->pt_bo;
+ struct vmw_piter save_pt_iter;
+ struct vmw_piter pt_iter;
+ const struct vmw_sg_table *vsgt;
+ int ret;
+
+ ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ BUG_ON(ret != 0);
+
+ vsgt = vmw_bo_sg_table(bo);
+ vmw_piter_start(&pt_iter, vsgt, 0);
+ BUG_ON(!vmw_piter_next(&pt_iter));
+ mob->pt_level = 0;
+ while (likely(num_data_pages > 1)) {
+ ++mob->pt_level;
+ BUG_ON(mob->pt_level > 2);
+ save_pt_iter = pt_iter;
+ num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
+ &pt_iter);
+ data_iter = save_pt_iter;
+ num_data_pages = num_pt_pages;
+ }
+
+ mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
+ ttm_bo_unreserve(bo);
+}
+
+/*
+ * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary.
+ *
+ * @mob: Pointer to a mob to destroy.
+ */
+void vmw_mob_destroy(struct vmw_mob *mob)
+{
+ if (mob->pt_bo)
+ ttm_bo_unref(&mob->pt_bo);
+ kfree(mob);
+}
+
+/*
+ * vmw_mob_unbind - Hide a mob from the device.
+ *
+ * @dev_priv: Pointer to a device private.
+ * @mob_id: Device id of the mob to unbind.
+ */
+void vmw_mob_unbind(struct vmw_private *dev_priv,
+ struct vmw_mob *mob)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyGBMob body;
+ } *cmd;
+ int ret;
+ struct ttm_buffer_object *bo = mob->pt_bo;
+
+ if (bo) {
+ ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ /*
+ * Noone else should be using this buffer.
+ */
+ BUG_ON(ret != 0);
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for Memory "
+ "Object unbinding.\n");
+ }
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.mobid = mob->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ if (bo) {
+ vmw_fence_single_bo(bo, NULL);
+ ttm_bo_unreserve(bo);
+ }
+ vmw_3d_resource_dec(dev_priv, false);
+}
+
+/*
+ * vmw_mob_bind - Make a mob visible to the device after first
+ * populating it if necessary.
+ *
+ * @dev_priv: Pointer to a device private.
+ * @mob: Pointer to the mob we're making visible.
+ * @data_addr: Array of DMA addresses to the data pages of the underlying
+ * buffer object.
+ * @num_data_pages: Number of data pages of the underlying buffer
+ * object.
+ * @mob_id: Device id of the mob to bind
+ *
+ * This function is intended to be interfaced with the ttm_tt backend
+ * code.
+ */
+int vmw_mob_bind(struct vmw_private *dev_priv,
+ struct vmw_mob *mob,
+ const struct vmw_sg_table *vsgt,
+ unsigned long num_data_pages,
+ int32_t mob_id)
+{
+ int ret;
+ bool pt_set_up = false;
+ struct vmw_piter data_iter;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBMob64 body;
+ } *cmd;
+
+ mob->id = mob_id;
+ vmw_piter_start(&data_iter, vsgt, 0);
+ if (unlikely(!vmw_piter_next(&data_iter)))
+ return 0;
+
+ if (likely(num_data_pages == 1)) {
+ mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
+ mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
+ } else if (vsgt->num_regions == 1) {
+ mob->pt_level = SVGA3D_MOBFMT_RANGE;
+ mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
+ } else if (unlikely(mob->pt_bo == NULL)) {
+ ret = vmw_mob_pt_populate(dev_priv, mob);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_mob_pt_setup(mob, data_iter, num_data_pages);
+ pt_set_up = true;
+ mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
+ }
+
+ (void) vmw_3d_resource_inc(dev_priv, false);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for Memory "
+ "Object binding.\n");
+ goto out_no_cmd_space;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.mobid = mob_id;
+ cmd->body.ptDepth = mob->pt_level;
+ cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+ cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+
+out_no_cmd_space:
+ vmw_3d_resource_dec(dev_priv, false);
+ if (pt_set_up)
+ ttm_bo_unref(&mob->pt_bo);
+
+ return -ENOMEM;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 9b5ea2ac7ddf..2aa4bc6a4d60 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
return res;
}
+struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res)
+{
+ return kref_get_unless_zero(&res->kref) ? res : NULL;
+}
/**
* vmw_resource_release_id - release a resource id to the id manager.
@@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref)
vmw_dmabuf_unreference(&res->backup);
}
- if (likely(res->hw_destroy != NULL))
+ if (likely(res->hw_destroy != NULL)) {
res->hw_destroy(res);
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_context_binding_res_list_kill(&res->binding_head);
+ mutex_unlock(&dev_priv->binding_mutex);
+ }
id = res->id;
if (res->res_free != NULL)
@@ -215,6 +224,7 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
res->func = func;
INIT_LIST_HEAD(&res->lru_head);
INIT_LIST_HEAD(&res->mob_head);
+ INIT_LIST_HEAD(&res->binding_head);
res->id = -1;
res->backup = NULL;
res->backup_offset = 0;
@@ -441,6 +451,21 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
ttm_bo_unref(&bo);
}
+static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
+ enum ttm_ref_type ref_type)
+{
+ struct vmw_user_dma_buffer *user_bo;
+ user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
+
+ switch (ref_type) {
+ case TTM_REF_SYNCCPU_WRITE:
+ ttm_bo_synccpu_write_release(&user_bo->dma.base);
+ break;
+ default:
+ BUG();
+ }
+}
+
/**
* vmw_user_dmabuf_alloc - Allocate a user dma buffer
*
@@ -471,6 +496,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
}
ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
+ (dev_priv->has_mob) ?
+ &vmw_sys_placement :
&vmw_vram_sys_placement, true,
&vmw_user_dmabuf_destroy);
if (unlikely(ret != 0))
@@ -482,7 +509,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
&user_bo->prime,
shareable,
ttm_buffer_type,
- &vmw_user_dmabuf_release, NULL);
+ &vmw_user_dmabuf_release,
+ &vmw_user_dmabuf_ref_obj_release);
if (unlikely(ret != 0)) {
ttm_bo_unref(&tmp);
goto out_no_base_object;
@@ -515,6 +543,130 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
}
+/**
+ * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
+ * access, idling previous GPU operations on the buffer and optionally
+ * blocking it for further command submissions.
+ *
+ * @user_bo: Pointer to the buffer object being grabbed for CPU access
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating how the grab should be performed.
+ *
+ * A blocking grab will be automatically released when @tfile is closed.
+ */
+static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
+ struct ttm_object_file *tfile,
+ uint32_t flags)
+{
+ struct ttm_buffer_object *bo = &user_bo->dma.base;
+ bool existed;
+ int ret;
+
+ if (flags & drm_vmw_synccpu_allow_cs) {
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ spin_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, true,
+ !!(flags & drm_vmw_synccpu_dontblock));
+ spin_unlock(&bdev->fence_lock);
+ return ret;
+ }
+
+ ret = ttm_bo_synccpu_write_grab
+ (bo, !!(flags & drm_vmw_synccpu_dontblock));
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
+ TTM_REF_SYNCCPU_WRITE, &existed);
+ if (ret != 0 || existed)
+ ttm_bo_synccpu_write_release(&user_bo->dma.base);
+
+ return ret;
+}
+
+/**
+ * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
+ * and unblock command submission on the buffer if blocked.
+ *
+ * @handle: Handle identifying the buffer object.
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating the type of release.
+ */
+static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
+ struct ttm_object_file *tfile,
+ uint32_t flags)
+{
+ if (!(flags & drm_vmw_synccpu_allow_cs))
+ return ttm_ref_object_base_unref(tfile, handle,
+ TTM_REF_SYNCCPU_WRITE);
+
+ return 0;
+}
+
+/**
+ * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
+ * functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ *
+ * This function checks the ioctl arguments for validity and calls the
+ * relevant synccpu functions.
+ */
+int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_synccpu_arg *arg =
+ (struct drm_vmw_synccpu_arg *) data;
+ struct vmw_dma_buffer *dma_buf;
+ struct vmw_user_dma_buffer *user_bo;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret;
+
+ if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
+ || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
+ drm_vmw_synccpu_dontblock |
+ drm_vmw_synccpu_allow_cs)) != 0) {
+ DRM_ERROR("Illegal synccpu flags.\n");
+ return -EINVAL;
+ }
+
+ switch (arg->op) {
+ case drm_vmw_synccpu_grab:
+ ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
+ if (unlikely(ret != 0))
+ return ret;
+
+ user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
+ dma);
+ ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
+ vmw_dmabuf_unreference(&dma_buf);
+ if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
+ ret != -EBUSY)) {
+ DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
+ (unsigned int) arg->handle);
+ return ret;
+ }
+ break;
+ case drm_vmw_synccpu_release:
+ ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
+ arg->flags);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
+ (unsigned int) arg->handle);
+ return ret;
+ }
+ break;
+ default:
+ DRM_ERROR("Invalid synccpu operation.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -591,7 +743,8 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
}
int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
- struct vmw_dma_buffer *dma_buf)
+ struct vmw_dma_buffer *dma_buf,
+ uint32_t *handle)
{
struct vmw_user_dma_buffer *user_bo;
@@ -599,6 +752,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
return -EINVAL;
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
+
+ *handle = user_bo->prime.base.hash.key;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_USAGE, NULL);
}
@@ -1291,11 +1446,54 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
* @mem: The truct ttm_mem_reg indicating to what memory
* region the move is taking place.
*
- * For now does nothing.
+ * Evicts the Guest Backed hardware resource if the backup
+ * buffer is being moved out of MOB memory.
+ * Note that this function should not race with the resource
+ * validation code as long as it accesses only members of struct
+ * resource that remain static while bo::res is !NULL and
+ * while we have @bo reserved. struct resource::backup is *not* a
+ * static member. The resource validation code will take care
+ * to set @bo::res to NULL, while having @bo reserved when the
+ * buffer is no longer bound to the resource, so @bo:res can be
+ * used to determine whether there is a need to unbind and whether
+ * it is safe to unbind.
*/
void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
+ struct vmw_dma_buffer *dma_buf;
+
+ if (mem == NULL)
+ return;
+
+ if (bo->destroy != vmw_dmabuf_bo_free &&
+ bo->destroy != vmw_user_dmabuf_destroy)
+ return;
+
+ dma_buf = container_of(bo, struct vmw_dma_buffer, base);
+
+ if (mem->mem_type != VMW_PL_MOB) {
+ struct vmw_resource *res, *n;
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_validate_buffer val_buf;
+
+ val_buf.bo = bo;
+
+ list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
+
+ if (unlikely(res->func->unbind == NULL))
+ continue;
+
+ (void) res->func->unbind(res, true, &val_buf);
+ res->backup_dirty = true;
+ res->res_dirty = false;
+ list_del_init(&res->mob_head);
+ }
+
+ spin_lock(&bdev->fence_lock);
+ (void) ttm_bo_wait(bo, false, false, false);
+ spin_unlock(&bdev->fence_lock);
+ }
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
new file mode 100644
index 000000000000..217d941b8176
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -0,0 +1,810 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "ttm/ttm_placement.h"
+
+#define VMW_COMPAT_SHADER_HT_ORDER 12
+
+struct vmw_shader {
+ struct vmw_resource res;
+ SVGA3dShaderType type;
+ uint32_t size;
+};
+
+struct vmw_user_shader {
+ struct ttm_base_object base;
+ struct vmw_shader shader;
+};
+
+/**
+ * enum vmw_compat_shader_state - Staging state for compat shaders
+ */
+enum vmw_compat_shader_state {
+ VMW_COMPAT_COMMITED,
+ VMW_COMPAT_ADD,
+ VMW_COMPAT_DEL
+};
+
+/**
+ * struct vmw_compat_shader - Metadata for compat shaders.
+ *
+ * @handle: The TTM handle of the guest backed shader.
+ * @tfile: The struct ttm_object_file the guest backed shader is registered
+ * with.
+ * @hash: Hash item for lookup.
+ * @head: List head for staging lists or the compat shader manager list.
+ * @state: Staging state.
+ *
+ * The structure is protected by the cmdbuf lock.
+ */
+struct vmw_compat_shader {
+ u32 handle;
+ struct ttm_object_file *tfile;
+ struct drm_hash_item hash;
+ struct list_head head;
+ enum vmw_compat_shader_state state;
+};
+
+/**
+ * struct vmw_compat_shader_manager - Compat shader manager.
+ *
+ * @shaders: Hash table containing staged and commited compat shaders
+ * @list: List of commited shaders.
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * @shaders and @list are protected by the cmdbuf mutex for now.
+ */
+struct vmw_compat_shader_manager {
+ struct drm_open_hash shaders;
+ struct list_head list;
+ struct vmw_private *dev_priv;
+};
+
+static void vmw_user_shader_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_shader_base_to_res(struct ttm_base_object *base);
+
+static int vmw_gb_shader_create(struct vmw_resource *res);
+static int vmw_gb_shader_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_gb_shader_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_gb_shader_destroy(struct vmw_resource *res);
+
+static uint64_t vmw_user_shader_size;
+
+static const struct vmw_user_resource_conv user_shader_conv = {
+ .object_type = VMW_RES_SHADER,
+ .base_obj_to_res = vmw_user_shader_base_to_res,
+ .res_free = vmw_user_shader_free
+};
+
+const struct vmw_user_resource_conv *user_shader_converter =
+ &user_shader_conv;
+
+
+static const struct vmw_res_func vmw_gb_shader_func = {
+ .res_type = vmw_res_shader,
+ .needs_backup = true,
+ .may_evict = true,
+ .type_name = "guest backed shaders",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_gb_shader_create,
+ .destroy = vmw_gb_shader_destroy,
+ .bind = vmw_gb_shader_bind,
+ .unbind = vmw_gb_shader_unbind
+};
+
+/**
+ * Shader management:
+ */
+
+static inline struct vmw_shader *
+vmw_res_to_shader(struct vmw_resource *res)
+{
+ return container_of(res, struct vmw_shader, res);
+}
+
+static void vmw_hw_shader_destroy(struct vmw_resource *res)
+{
+ (void) vmw_gb_shader_destroy(res);
+}
+
+static int vmw_gb_shader_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ uint32_t size,
+ uint64_t offset,
+ SVGA3dShaderType type,
+ struct vmw_dma_buffer *byte_code,
+ void (*res_free) (struct vmw_resource *res))
+{
+ struct vmw_shader *shader = vmw_res_to_shader(res);
+ int ret;
+
+ ret = vmw_resource_init(dev_priv, res, true,
+ res_free, &vmw_gb_shader_func);
+
+
+ if (unlikely(ret != 0)) {
+ if (res_free)
+ res_free(res);
+ else
+ kfree(res);
+ return ret;
+ }
+
+ res->backup_size = size;
+ if (byte_code) {
+ res->backup = vmw_dmabuf_reference(byte_code);
+ res->backup_offset = offset;
+ }
+ shader->size = size;
+ shader->type = type;
+
+ vmw_resource_activate(res, vmw_hw_shader_destroy);
+ return 0;
+}
+
+static int vmw_gb_shader_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_shader *shader = vmw_res_to_shader(res);
+ int ret;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBShader body;
+ } *cmd;
+
+ if (likely(res->id != -1))
+ return 0;
+
+ ret = vmw_resource_alloc_id(res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a shader id.\n");
+ goto out_no_id;
+ }
+
+ if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "creation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.shid = res->id;
+ cmd->body.type = shader->type;
+ cmd->body.sizeInBytes = shader->size;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ (void) vmw_3d_resource_inc(dev_priv, false);
+
+ return 0;
+
+out_no_fifo:
+ vmw_resource_release_id(res);
+out_no_id:
+ return ret;
+}
+
+static int vmw_gb_shader_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBShader body;
+ } *cmd;
+ struct ttm_buffer_object *bo = val_buf->bo;
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.shid = res->id;
+ cmd->body.mobid = bo->mem.start;
+ cmd->body.offsetInBytes = 0;
+ res->backup_dirty = false;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+static int vmw_gb_shader_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBShader body;
+ } *cmd;
+ struct vmw_fence_obj *fence;
+
+ BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.shid = res->id;
+ cmd->body.mobid = SVGA3D_INVALID_ID;
+ cmd->body.offsetInBytes = 0;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+
+ vmw_fence_single_bo(val_buf->bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+static int vmw_gb_shader_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyGBShader body;
+ } *cmd;
+
+ if (likely(res->id == -1))
+ return 0;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_context_binding_res_list_scrub(&res->binding_head);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "destruction.\n");
+ mutex_unlock(&dev_priv->binding_mutex);
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.shid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ mutex_unlock(&dev_priv->binding_mutex);
+ vmw_resource_release_id(res);
+ vmw_3d_resource_dec(dev_priv, false);
+
+ return 0;
+}
+
+/**
+ * User-space shader management:
+ */
+
+static struct vmw_resource *
+vmw_user_shader_base_to_res(struct ttm_base_object *base)
+{
+ return &(container_of(base, struct vmw_user_shader, base)->
+ shader.res);
+}
+
+static void vmw_user_shader_free(struct vmw_resource *res)
+{
+ struct vmw_user_shader *ushader =
+ container_of(res, struct vmw_user_shader, shader.res);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ ttm_base_object_kfree(ushader, base);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_shader_size);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_shader_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_resource *res = vmw_user_shader_base_to_res(base);
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_ref_object_base_unref(tfile, arg->handle,
+ TTM_REF_USAGE);
+}
+
+int vmw_shader_alloc(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buffer,
+ size_t shader_size,
+ size_t offset,
+ SVGA3dShaderType shader_type,
+ struct ttm_object_file *tfile,
+ u32 *handle)
+{
+ struct vmw_user_shader *ushader;
+ struct vmw_resource *res, *tmp;
+ int ret;
+
+ /*
+ * Approximate idr memory usage with 128 bytes. It will be limited
+ * by maximum number_of shaders anyway.
+ */
+ if (unlikely(vmw_user_shader_size == 0))
+ vmw_user_shader_size =
+ ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_user_shader_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for shader "
+ "creation.\n");
+ goto out;
+ }
+
+ ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
+ if (unlikely(ushader == NULL)) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_shader_size);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ res = &ushader->shader.res;
+ ushader->base.shareable = false;
+ ushader->base.tfile = NULL;
+
+ /*
+ * From here on, the destructor takes over resource freeing.
+ */
+
+ ret = vmw_gb_shader_init(dev_priv, res, shader_size,
+ offset, shader_type, buffer,
+ vmw_user_shader_free);
+ if (unlikely(ret != 0))
+ goto out;
+
+ tmp = vmw_resource_reference(res);
+ ret = ttm_base_object_init(tfile, &ushader->base, false,
+ VMW_RES_SHADER,
+ &vmw_user_shader_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ goto out_err;
+ }
+
+ if (handle)
+ *handle = ushader->base.hash.key;
+out_err:
+ vmw_resource_unreference(&res);
+out:
+ return ret;
+}
+
+
+int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_shader_create_arg *arg =
+ (struct drm_vmw_shader_create_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ struct vmw_dma_buffer *buffer = NULL;
+ SVGA3dShaderType shader_type;
+ int ret;
+
+ if (arg->buffer_handle != SVGA3D_INVALID_ID) {
+ ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
+ &buffer);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not find buffer for shader "
+ "creation.\n");
+ return ret;
+ }
+
+ if ((u64)buffer->base.num_pages * PAGE_SIZE <
+ (u64)arg->size + (u64)arg->offset) {
+ DRM_ERROR("Illegal buffer- or shader size.\n");
+ ret = -EINVAL;
+ goto out_bad_arg;
+ }
+ }
+
+ switch (arg->shader_type) {
+ case drm_vmw_shader_type_vs:
+ shader_type = SVGA3D_SHADERTYPE_VS;
+ break;
+ case drm_vmw_shader_type_ps:
+ shader_type = SVGA3D_SHADERTYPE_PS;
+ break;
+ case drm_vmw_shader_type_gs:
+ shader_type = SVGA3D_SHADERTYPE_GS;
+ break;
+ default:
+ DRM_ERROR("Illegal shader type.\n");
+ ret = -EINVAL;
+ goto out_bad_arg;
+ }
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ goto out_bad_arg;
+
+ ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
+ shader_type, tfile, &arg->shader_handle);
+
+ ttm_read_unlock(&vmaster->lock);
+out_bad_arg:
+ vmw_dmabuf_unreference(&buffer);
+ return ret;
+}
+
+/**
+ * vmw_compat_shader_lookup - Look up a compat shader
+ *
+ * @man: Pointer to the compat shader manager.
+ * @shader_type: The shader type, that combined with the user_key identifies
+ * the shader.
+ * @user_key: On entry, this should be a pointer to the user_key.
+ * On successful exit, it will contain the guest-backed shader's TTM handle.
+ *
+ * Returns 0 on success. Non-zero on failure, in which case the value pointed
+ * to by @user_key is unmodified.
+ */
+int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
+ SVGA3dShaderType shader_type,
+ u32 *user_key)
+{
+ struct drm_hash_item *hash;
+ int ret;
+ unsigned long key = *user_key | (shader_type << 24);
+
+ ret = drm_ht_find_item(&man->shaders, key, &hash);
+ if (unlikely(ret != 0))
+ return ret;
+
+ *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
+ hash)->handle;
+
+ return 0;
+}
+
+/**
+ * vmw_compat_shader_free - Free a compat shader.
+ *
+ * @man: Pointer to the compat shader manager.
+ * @entry: Pointer to a struct vmw_compat_shader.
+ *
+ * Frees a struct vmw_compat_shder entry and drops its reference to the
+ * guest backed shader.
+ */
+static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
+ struct vmw_compat_shader *entry)
+{
+ list_del(&entry->head);
+ WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
+ WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
+ TTM_REF_USAGE));
+ kfree(entry);
+}
+
+/**
+ * vmw_compat_shaders_commit - Commit a list of compat shader actions.
+ *
+ * @man: Pointer to the compat shader manager.
+ * @list: Caller's list of compat shader actions.
+ *
+ * This function commits a list of compat shader additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions has commited the fifo contents to the device.
+ */
+void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
+ struct list_head *list)
+{
+ struct vmw_compat_shader *entry, *next;
+
+ list_for_each_entry_safe(entry, next, list, head) {
+ list_del(&entry->head);
+ switch (entry->state) {
+ case VMW_COMPAT_ADD:
+ entry->state = VMW_COMPAT_COMMITED;
+ list_add_tail(&entry->head, &man->list);
+ break;
+ case VMW_COMPAT_DEL:
+ ttm_ref_object_base_unref(entry->tfile, entry->handle,
+ TTM_REF_USAGE);
+ kfree(entry);
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+}
+
+/**
+ * vmw_compat_shaders_revert - Revert a list of compat shader actions
+ *
+ * @man: Pointer to the compat shader manager.
+ * @list: Caller's list of compat shader actions.
+ *
+ * This function reverts a list of compat shader additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions failed for some reason, and the command stream was never
+ * submitted.
+ */
+void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
+ struct list_head *list)
+{
+ struct vmw_compat_shader *entry, *next;
+ int ret;
+
+ list_for_each_entry_safe(entry, next, list, head) {
+ switch (entry->state) {
+ case VMW_COMPAT_ADD:
+ vmw_compat_shader_free(man, entry);
+ break;
+ case VMW_COMPAT_DEL:
+ ret = drm_ht_insert_item(&man->shaders, &entry->hash);
+ list_del(&entry->head);
+ list_add_tail(&entry->head, &man->list);
+ entry->state = VMW_COMPAT_COMMITED;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+}
+
+/**
+ * vmw_compat_shader_remove - Stage a compat shader for removal.
+ *
+ * @man: Pointer to the compat shader manager
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @shader_type: Shader type.
+ * @list: Caller's list of staged shader actions.
+ *
+ * This function stages a compat shader for removal and removes the key from
+ * the shader manager's hash table. If the shader was previously only staged
+ * for addition it is completely removed (But the execbuf code may keep a
+ * reference if it was bound to a context between addition and removal). If
+ * it was previously commited to the manager, it is staged for removal.
+ */
+int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+ u32 user_key, SVGA3dShaderType shader_type,
+ struct list_head *list)
+{
+ struct vmw_compat_shader *entry;
+ struct drm_hash_item *hash;
+ int ret;
+
+ ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
+ &hash);
+ if (likely(ret != 0))
+ return -EINVAL;
+
+ entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
+
+ switch (entry->state) {
+ case VMW_COMPAT_ADD:
+ vmw_compat_shader_free(man, entry);
+ break;
+ case VMW_COMPAT_COMMITED:
+ (void) drm_ht_remove_item(&man->shaders, &entry->hash);
+ list_del(&entry->head);
+ entry->state = VMW_COMPAT_DEL;
+ list_add_tail(&entry->head, list);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_compat_shader_add - Create a compat shader and add the
+ * key to the manager
+ *
+ * @man: Pointer to the compat shader manager
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @bytecode: Pointer to the bytecode of the shader.
+ * @shader_type: Shader type.
+ * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
+ * to be created with.
+ * @list: Caller's list of staged shader actions.
+ *
+ * Note that only the key is added to the shader manager's hash table.
+ * The shader is not yet added to the shader manager's list of shaders.
+ */
+int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+ u32 user_key, const void *bytecode,
+ SVGA3dShaderType shader_type,
+ size_t size,
+ struct ttm_object_file *tfile,
+ struct list_head *list)
+{
+ struct vmw_dma_buffer *buf;
+ struct ttm_bo_kmap_obj map;
+ bool is_iomem;
+ struct vmw_compat_shader *compat;
+ u32 handle;
+ int ret;
+
+ if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
+ return -EINVAL;
+
+ /* Allocate and pin a DMA buffer */
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (unlikely(buf == NULL))
+ return -ENOMEM;
+
+ ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
+ true, vmw_dmabuf_bo_free);
+ if (unlikely(ret != 0))
+ goto out;
+
+ ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
+ if (unlikely(ret != 0))
+ goto no_reserve;
+
+ /* Map and copy shader bytecode. */
+ ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
+ &map);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unreserve(&buf->base);
+ goto no_reserve;
+ }
+
+ memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
+ WARN_ON(is_iomem);
+
+ ttm_bo_kunmap(&map);
+ ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
+ WARN_ON(ret != 0);
+ ttm_bo_unreserve(&buf->base);
+
+ /* Create a guest-backed shader container backed by the dma buffer */
+ ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
+ tfile, &handle);
+ vmw_dmabuf_unreference(&buf);
+ if (unlikely(ret != 0))
+ goto no_reserve;
+ /*
+ * Create a compat shader structure and stage it for insertion
+ * in the manager
+ */
+ compat = kzalloc(sizeof(*compat), GFP_KERNEL);
+ if (compat == NULL)
+ goto no_compat;
+
+ compat->hash.key = user_key | (shader_type << 24);
+ ret = drm_ht_insert_item(&man->shaders, &compat->hash);
+ if (unlikely(ret != 0))
+ goto out_invalid_key;
+
+ compat->state = VMW_COMPAT_ADD;
+ compat->handle = handle;
+ compat->tfile = tfile;
+ list_add_tail(&compat->head, list);
+
+ return 0;
+
+out_invalid_key:
+ kfree(compat);
+no_compat:
+ ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+no_reserve:
+out:
+ return ret;
+}
+
+/**
+ * vmw_compat_shader_man_create - Create a compat shader manager
+ *
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * Typically done at file open time. If successful returns a pointer to a
+ * compat shader manager. Otherwise returns an error pointer.
+ */
+struct vmw_compat_shader_manager *
+vmw_compat_shader_man_create(struct vmw_private *dev_priv)
+{
+ struct vmw_compat_shader_manager *man;
+ int ret;
+
+ man = kzalloc(sizeof(*man), GFP_KERNEL);
+
+ man->dev_priv = dev_priv;
+ INIT_LIST_HEAD(&man->list);
+ ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
+ if (ret == 0)
+ return man;
+
+ kfree(man);
+ return ERR_PTR(ret);
+}
+
+/**
+ * vmw_compat_shader_man_destroy - Destroy a compat shader manager
+ *
+ * @man: Pointer to the shader manager to destroy.
+ *
+ * Typically done at file close time.
+ */
+void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
+{
+ struct vmw_compat_shader *entry, *next;
+
+ mutex_lock(&man->dev_priv->cmdbuf_mutex);
+ list_for_each_entry_safe(entry, next, &man->list, head)
+ vmw_compat_shader_free(man, entry);
+
+ mutex_unlock(&man->dev_priv->cmdbuf_mutex);
+ kfree(man);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7de2ea8bd553..82468d902915 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -41,7 +41,6 @@ struct vmw_user_surface {
struct ttm_prime_object prime;
struct vmw_surface srf;
uint32_t size;
- uint32_t backup_handle;
};
/**
@@ -68,6 +67,14 @@ static int vmw_legacy_srf_unbind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_legacy_srf_create(struct vmw_resource *res);
static int vmw_legacy_srf_destroy(struct vmw_resource *res);
+static int vmw_gb_surface_create(struct vmw_resource *res);
+static int vmw_gb_surface_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_gb_surface_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_gb_surface_destroy(struct vmw_resource *res);
+
static const struct vmw_user_resource_conv user_surface_conv = {
.object_type = VMW_RES_SURFACE,
@@ -93,6 +100,18 @@ static const struct vmw_res_func vmw_legacy_surface_func = {
.unbind = &vmw_legacy_srf_unbind
};
+static const struct vmw_res_func vmw_gb_surface_func = {
+ .res_type = vmw_res_surface,
+ .needs_backup = true,
+ .may_evict = true,
+ .type_name = "guest backed surfaces",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_gb_surface_create,
+ .destroy = vmw_gb_surface_destroy,
+ .bind = vmw_gb_surface_bind,
+ .unbind = vmw_gb_surface_unbind
+};
+
/**
* struct vmw_surface_dma - SVGA3D DMA command
*/
@@ -291,6 +310,11 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
struct vmw_surface *srf;
void *cmd;
+ if (res->func->destroy == vmw_gb_surface_destroy) {
+ (void) vmw_gb_surface_destroy(res);
+ return;
+ }
+
if (res->id != -1) {
cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
@@ -549,12 +573,15 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_resource *res = &srf->res;
BUG_ON(res_free == NULL);
- (void) vmw_3d_resource_inc(dev_priv, false);
+ if (!dev_priv->has_mob)
+ (void) vmw_3d_resource_inc(dev_priv, false);
ret = vmw_resource_init(dev_priv, res, true, res_free,
+ (dev_priv->has_mob) ? &vmw_gb_surface_func :
&vmw_legacy_surface_func);
if (unlikely(ret != 0)) {
- vmw_3d_resource_dec(dev_priv, false);
+ if (!dev_priv->has_mob)
+ vmw_3d_resource_dec(dev_priv, false);
res_free(res);
return ret;
}
@@ -750,7 +777,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->base_size = *srf->sizes;
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
- srf->multisample_count = 1;
+ srf->multisample_count = 0;
cur_bo_offset = 0;
cur_offset = srf->offsets;
@@ -843,6 +870,7 @@ out_unlock:
int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vmw_private *dev_priv = vmw_priv(dev);
union drm_vmw_surface_reference_arg *arg =
(union drm_vmw_surface_reference_arg *)data;
struct drm_vmw_surface_arg *req = &arg->req;
@@ -854,7 +882,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
struct ttm_base_object *base;
int ret = -EINVAL;
- base = ttm_base_object_lookup(tfile, req->sid);
+ base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
if (unlikely(base == NULL)) {
DRM_ERROR("Could not find surface to reference.\n");
return -EINVAL;
@@ -880,8 +908,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
rep->size_addr;
if (user_sizes)
- ret = copy_to_user(user_sizes, srf->sizes,
- srf->num_sizes * sizeof(*srf->sizes));
+ ret = copy_to_user(user_sizes, &srf->base_size,
+ sizeof(srf->base_size));
if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
@@ -893,3 +921,436 @@ out_no_reference:
return ret;
}
+
+/**
+ * vmw_surface_define_encode - Encode a surface_define command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static int vmw_gb_surface_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_surface *srf = vmw_res_to_srf(res);
+ uint32_t cmd_len, submit_len;
+ int ret;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBSurface body;
+ } *cmd;
+
+ if (likely(res->id != -1))
+ return 0;
+
+ (void) vmw_3d_resource_inc(dev_priv, false);
+ ret = vmw_resource_alloc_id(res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a surface id.\n");
+ goto out_no_id;
+ }
+
+ if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
+ }
+
+ cmd_len = sizeof(cmd->body);
+ submit_len = sizeof(*cmd);
+ cmd = vmw_fifo_reserve(dev_priv, submit_len);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "creation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
+ cmd->header.size = cmd_len;
+ cmd->body.sid = srf->res.id;
+ cmd->body.surfaceFlags = srf->flags;
+ cmd->body.format = cpu_to_le32(srf->format);
+ cmd->body.numMipLevels = srf->mip_levels[0];
+ cmd->body.multisampleCount = srf->multisample_count;
+ cmd->body.autogenFilter = srf->autogen_filter;
+ cmd->body.size.width = srf->base_size.width;
+ cmd->body.size.height = srf->base_size.height;
+ cmd->body.size.depth = srf->base_size.depth;
+ vmw_fifo_commit(dev_priv, submit_len);
+
+ return 0;
+
+out_no_fifo:
+ vmw_resource_release_id(res);
+out_no_id:
+ vmw_3d_resource_dec(dev_priv, false);
+ return ret;
+}
+
+
+static int vmw_gb_surface_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBSurface body;
+ } *cmd1;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBSurface body;
+ } *cmd2;
+ uint32_t submit_size;
+ struct ttm_buffer_object *bo = val_buf->bo;
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
+
+ cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd1 == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
+ cmd1->header.size = sizeof(cmd1->body);
+ cmd1->body.sid = res->id;
+ cmd1->body.mobid = bo->mem.start;
+ if (res->backup_dirty) {
+ cmd2 = (void *) &cmd1[1];
+ cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
+ cmd2->header.size = sizeof(cmd2->body);
+ cmd2->body.sid = res->id;
+ res->backup_dirty = false;
+ }
+ vmw_fifo_commit(dev_priv, submit_size);
+
+ return 0;
+}
+
+static int vmw_gb_surface_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = val_buf->bo;
+ struct vmw_fence_obj *fence;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdReadbackGBSurface body;
+ } *cmd1;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdInvalidateGBSurface body;
+ } *cmd2;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBSurface body;
+ } *cmd3;
+ uint32_t submit_size;
+ uint8_t *cmd;
+
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ if (readback) {
+ cmd1 = (void *) cmd;
+ cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
+ cmd1->header.size = sizeof(cmd1->body);
+ cmd1->body.sid = res->id;
+ cmd3 = (void *) &cmd1[1];
+ } else {
+ cmd2 = (void *) cmd;
+ cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
+ cmd2->header.size = sizeof(cmd2->body);
+ cmd2->body.sid = res->id;
+ cmd3 = (void *) &cmd2[1];
+ }
+
+ cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
+ cmd3->header.size = sizeof(cmd3->body);
+ cmd3->body.sid = res->id;
+ cmd3->body.mobid = SVGA3D_INVALID_ID;
+
+ vmw_fifo_commit(dev_priv, submit_size);
+
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+
+ vmw_fence_single_bo(val_buf->bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+static int vmw_gb_surface_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyGBSurface body;
+ } *cmd;
+
+ if (likely(res->id == -1))
+ return 0;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_context_binding_res_list_scrub(&res->binding_head);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ mutex_unlock(&dev_priv->binding_mutex);
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.sid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ mutex_unlock(&dev_priv->binding_mutex);
+ vmw_resource_release_id(res);
+ vmw_3d_resource_dec(dev_priv, false);
+
+ return 0;
+}
+
+/**
+ * vmw_gb_surface_define_ioctl - Ioctl function implementing
+ * the user surface define functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_surface *user_srf;
+ struct vmw_surface *srf;
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ union drm_vmw_gb_surface_create_arg *arg =
+ (union drm_vmw_gb_surface_create_arg *)data;
+ struct drm_vmw_gb_surface_create_req *req = &arg->req;
+ struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret;
+ uint32_t size;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ const struct svga3d_surface_desc *desc;
+ uint32_t backup_handle;
+
+ if (unlikely(vmw_user_surface_size == 0))
+ vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+ 128;
+
+ size = vmw_user_surface_size + 128;
+
+ desc = svga3dsurface_get_desc(req->format);
+ if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+ DRM_ERROR("Invalid surface format for surface creation.\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ size, false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for surface"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+ user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+ if (unlikely(user_srf == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_user_srf;
+ }
+
+ srf = &user_srf->srf;
+ res = &srf->res;
+
+ srf->flags = req->svga3d_flags;
+ srf->format = req->format;
+ srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
+ srf->mip_levels[0] = req->mip_levels;
+ srf->num_sizes = 1;
+ srf->sizes = NULL;
+ srf->offsets = NULL;
+ user_srf->size = size;
+ srf->base_size = req->base_size;
+ srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+ srf->multisample_count = req->multisample_count;
+ res->backup_size = svga3dsurface_get_serialized_size
+ (srf->format, srf->base_size, srf->mip_levels[0],
+ srf->flags & SVGA3D_SURFACE_CUBEMAP);
+
+ user_srf->prime.base.shareable = false;
+ user_srf->prime.base.tfile = NULL;
+
+ /**
+ * From this point, the generic resource management functions
+ * destroy the object on failure.
+ */
+
+ ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+
+ if (req->buffer_handle != SVGA3D_INVALID_ID) {
+ ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
+ &res->backup);
+ } else if (req->drm_surface_flags &
+ drm_vmw_surface_flag_create_buffer)
+ ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
+ res->backup_size,
+ req->drm_surface_flags &
+ drm_vmw_surface_flag_shareable,
+ &backup_handle,
+ &res->backup);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ tmp = vmw_resource_reference(&srf->res);
+ ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+ req->drm_surface_flags &
+ drm_vmw_surface_flag_shareable,
+ VMW_RES_SURFACE,
+ &vmw_user_surface_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ rep->handle = user_srf->prime.base.hash.key;
+ rep->backup_size = res->backup_size;
+ if (res->backup) {
+ rep->buffer_map_handle =
+ drm_vma_node_offset_addr(&res->backup->base.vma_node);
+ rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
+ rep->buffer_handle = backup_handle;
+ } else {
+ rep->buffer_map_handle = 0;
+ rep->buffer_size = 0;
+ rep->buffer_handle = SVGA3D_INVALID_ID;
+ }
+
+ vmw_resource_unreference(&res);
+
+ ttm_read_unlock(&vmaster->lock);
+ return 0;
+out_no_user_srf:
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
+}
+
+/**
+ * vmw_gb_surface_reference_ioctl - Ioctl function implementing
+ * the user surface reference functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ union drm_vmw_gb_surface_reference_arg *arg =
+ (union drm_vmw_gb_surface_reference_arg *)data;
+ struct drm_vmw_surface_arg *req = &arg->req;
+ struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_surface *srf;
+ struct vmw_user_surface *user_srf;
+ struct ttm_base_object *base;
+ uint32_t backup_handle;
+ int ret = -EINVAL;
+
+ base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
+ if (unlikely(base == NULL)) {
+ DRM_ERROR("Could not find surface to reference.\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
+ goto out_bad_resource;
+
+ user_srf = container_of(base, struct vmw_user_surface, prime.base);
+ srf = &user_srf->srf;
+ if (srf->res.backup == NULL) {
+ DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
+ goto out_bad_resource;
+ }
+
+ ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
+ TTM_REF_USAGE, NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not add a reference to a GB surface.\n");
+ goto out_bad_resource;
+ }
+
+ mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
+ ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
+ &backup_handle);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not add a reference to a GB surface "
+ "backup buffer.\n");
+ (void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ req->sid,
+ TTM_REF_USAGE);
+ goto out_bad_resource;
+ }
+
+ rep->creq.svga3d_flags = srf->flags;
+ rep->creq.format = srf->format;
+ rep->creq.mip_levels = srf->mip_levels[0];
+ rep->creq.drm_surface_flags = 0;
+ rep->creq.multisample_count = srf->multisample_count;
+ rep->creq.autogen_filter = srf->autogen_filter;
+ rep->creq.buffer_handle = backup_handle;
+ rep->creq.base_size = srf->base_size;
+ rep->crep.handle = user_srf->prime.base.hash.key;
+ rep->crep.backup_size = srf->res.backup_size;
+ rep->crep.buffer_handle = backup_handle;
+ rep->crep.buffer_map_handle =
+ drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
+ rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
+
+out_bad_resource:
+ ttm_base_object_unref(&base);
+
+ return ret;
+}
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index 7d6bed222542..b2fd029d67b3 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -1,6 +1,6 @@
config TEGRA_HOST1X
tristate "NVIDIA Tegra host1x driver"
- depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
+ depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
help
Driver for the NVIDIA Tegra host1x hardware.
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index afa1e9e4e512..c1189f004441 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -7,7 +7,9 @@ host1x-y = \
channel.o \
job.o \
debug.o \
+ mipi.o \
hw/host1x01.o \
- hw/host1x02.o
+ hw/host1x02.o \
+ hw/host1x04.o
obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 6a929591aa73..ccdd2e6da5e3 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -188,6 +188,7 @@ int host1x_device_init(struct host1x_device *device)
return 0;
}
+EXPORT_SYMBOL(host1x_device_init);
int host1x_device_exit(struct host1x_device *device)
{
@@ -213,6 +214,7 @@ int host1x_device_exit(struct host1x_device *device)
return 0;
}
+EXPORT_SYMBOL(host1x_device_exit);
static int host1x_register_client(struct host1x *host1x,
struct host1x_client *client)
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
index 83ea51b9f0fc..b4ae3affb987 100644
--- a/drivers/gpu/host1x/channel.c
+++ b/drivers/gpu/host1x/channel.c
@@ -43,6 +43,7 @@ int host1x_job_submit(struct host1x_job *job)
return host1x_hw_channel_submit(host, job);
}
+EXPORT_SYMBOL(host1x_job_submit);
struct host1x_channel *host1x_channel_get(struct host1x_channel *channel)
{
@@ -60,6 +61,7 @@ struct host1x_channel *host1x_channel_get(struct host1x_channel *channel)
return err ? NULL : channel;
}
+EXPORT_SYMBOL(host1x_channel_get);
void host1x_channel_put(struct host1x_channel *channel)
{
@@ -76,6 +78,7 @@ void host1x_channel_put(struct host1x_channel *channel)
mutex_unlock(&channel->reflock);
}
+EXPORT_SYMBOL(host1x_channel_put);
struct host1x_channel *host1x_channel_request(struct device *dev)
{
@@ -115,6 +118,7 @@ fail:
mutex_unlock(&host->chlist_mutex);
return NULL;
}
+EXPORT_SYMBOL(host1x_channel_request);
void host1x_channel_free(struct host1x_channel *channel)
{
@@ -124,3 +128,4 @@ void host1x_channel_free(struct host1x_channel *channel)
list_del(&channel->list);
kfree(channel);
}
+EXPORT_SYMBOL(host1x_channel_free);
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
index 3ec7d77de24d..ee3d12b51c50 100644
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@ -96,7 +96,6 @@ static void show_all(struct host1x *m, struct output *o)
show_channels(ch, o, true);
}
-#ifdef CONFIG_DEBUG_FS
static void show_all_no_fifo(struct host1x *host1x, struct output *o)
{
struct host1x_channel *ch;
@@ -153,7 +152,7 @@ static const struct file_operations host1x_debug_fops = {
.release = single_release,
};
-void host1x_debug_init(struct host1x *host1x)
+static void host1x_debugfs_init(struct host1x *host1x)
{
struct dentry *de = debugfs_create_dir("tegra-host1x", NULL);
@@ -180,18 +179,22 @@ void host1x_debug_init(struct host1x *host1x)
&host1x_debug_force_timeout_channel);
}
-void host1x_debug_deinit(struct host1x *host1x)
+static void host1x_debugfs_exit(struct host1x *host1x)
{
debugfs_remove_recursive(host1x->debugfs);
}
-#else
+
void host1x_debug_init(struct host1x *host1x)
{
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ host1x_debugfs_init(host1x);
}
+
void host1x_debug_deinit(struct host1x *host1x)
{
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ host1x_debugfs_exit(host1x);
}
-#endif
void host1x_debug_dump(struct host1x *host1x)
{
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 80da003d63de..2529908d304b 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -34,6 +34,7 @@
#include "debug.h"
#include "hw/host1x01.h"
#include "hw/host1x02.h"
+#include "hw/host1x04.h"
void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
{
@@ -77,7 +78,17 @@ static const struct host1x_info host1x02_info = {
.sync_offset = 0x3000,
};
+static const struct host1x_info host1x04_info = {
+ .nb_channels = 12,
+ .nb_pts = 192,
+ .nb_mlocks = 16,
+ .nb_bases = 64,
+ .init = host1x04_init,
+ .sync_offset = 0x2100,
+};
+
static struct of_device_id host1x_of_match[] = {
+ { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
{ .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
{ .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
{ .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
@@ -210,17 +221,26 @@ static int __init tegra_host1x_init(void)
return err;
err = platform_driver_register(&tegra_host1x_driver);
- if (err < 0) {
- host1x_bus_exit();
- return err;
- }
+ if (err < 0)
+ goto unregister_bus;
+
+ err = platform_driver_register(&tegra_mipi_driver);
+ if (err < 0)
+ goto unregister_host1x;
return 0;
+
+unregister_host1x:
+ platform_driver_unregister(&tegra_host1x_driver);
+unregister_bus:
+ host1x_bus_exit();
+ return err;
}
module_init(tegra_host1x_init);
static void __exit tegra_host1x_exit(void)
{
+ platform_driver_unregister(&tegra_mipi_driver);
platform_driver_unregister(&tegra_host1x_driver);
host1x_bus_exit();
}
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index a61a976e7a42..0b6e8e9629c5 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -306,4 +306,6 @@ static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
host->debug_op->show_mlocks(host, o);
}
+extern struct platform_driver tegra_mipi_driver;
+
#endif
diff --git a/drivers/gpu/host1x/hw/host1x02.c b/drivers/gpu/host1x/hw/host1x02.c
index e98caca0ca42..928946c2144b 100644
--- a/drivers/gpu/host1x/hw/host1x02.c
+++ b/drivers/gpu/host1x/hw/host1x02.c
@@ -17,8 +17,8 @@
*/
/* include hw specification */
-#include "host1x01.h"
-#include "host1x01_hardware.h"
+#include "host1x02.h"
+#include "host1x02_hardware.h"
/* include code */
#include "cdma_hw.c"
diff --git a/drivers/gpu/host1x/hw/host1x02_hardware.h b/drivers/gpu/host1x/hw/host1x02_hardware.h
new file mode 100644
index 000000000000..154901860bc6
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x02_hardware.h
@@ -0,0 +1,142 @@
+/*
+ * Tegra host1x Register Offsets for Tegra114
+ *
+ * Copyright (c) 2010-2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_HOST1X02_HARDWARE_H
+#define __HOST1X_HOST1X02_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "hw_host1x02_channel.h"
+#include "hw_host1x02_sync.h"
+#include "hw_host1x02_uclass.h"
+
+static inline u32 host1x_class_host_wait_syncpt(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_wait_syncpt_indx_f(indx)
+ | host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 host1x_class_host_load_syncpt_base(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_load_syncpt_base_base_indx_f(indx)
+ | host1x_uclass_load_syncpt_base_value_f(threshold);
+}
+
+static inline u32 host1x_class_host_wait_syncpt_base(
+ unsigned indx, unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_wait_syncpt_base_indx_f(indx)
+ | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt_base(
+ unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt(
+ unsigned cond, unsigned indx)
+{
+ return host1x_uclass_incr_syncpt_cond_f(cond)
+ | host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+static inline u32 host1x_class_host_indoff_reg_write(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indbe_f(0xf)
+ | host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset);
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+static inline u32 host1x_class_host_indoff_reg_read(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset)
+ | host1x_uclass_indoff_rwn_read_v();
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+/* cdma opcodes */
+static inline u32 host1x_opcode_setclass(
+ unsigned class_id, unsigned offset, unsigned mask)
+{
+ return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
+{
+ return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
+{
+ return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
+{
+ return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
+{
+ return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+ return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
+ host1x_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 host1x_opcode_restart(unsigned address)
+{
+ return (5 << 28) | (address >> 4);
+}
+
+static inline u32 host1x_opcode_gather(unsigned count)
+{
+ return (6 << 28) | count;
+}
+
+static inline u32 host1x_opcode_gather_nonincr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/host1x04.c b/drivers/gpu/host1x/hw/host1x04.c
new file mode 100644
index 000000000000..8007c70fa9c4
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x04.c
@@ -0,0 +1,42 @@
+/*
+ * Host1x init for Tegra124 SoCs
+ *
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "host1x04.h"
+#include "host1x04_hardware.h"
+
+/* include code */
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
+
+#include "../dev.h"
+
+int host1x04_init(struct host1x *host)
+{
+ host->channel_op = &host1x_channel_ops;
+ host->cdma_op = &host1x_cdma_ops;
+ host->cdma_pb_op = &host1x_pushbuffer_ops;
+ host->syncpt_op = &host1x_syncpt_ops;
+ host->intr_op = &host1x_intr_ops;
+ host->debug_op = &host1x_debug_ops;
+
+ return 0;
+}
diff --git a/drivers/gpu/host1x/hw/host1x04.h b/drivers/gpu/host1x/hw/host1x04.h
new file mode 100644
index 000000000000..a9ab7496c06e
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x04.h
@@ -0,0 +1,26 @@
+/*
+ * Host1x init for Tegra124 SoCs
+ *
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_HOST1X04_H
+#define HOST1X_HOST1X04_H
+
+struct host1x;
+
+int host1x04_init(struct host1x *host);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/host1x04_hardware.h b/drivers/gpu/host1x/hw/host1x04_hardware.h
new file mode 100644
index 000000000000..de1a38175328
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x04_hardware.h
@@ -0,0 +1,142 @@
+/*
+ * Tegra host1x Register Offsets for Tegra124
+ *
+ * Copyright (c) 2010-2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_HOST1X04_HARDWARE_H
+#define __HOST1X_HOST1X04_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "hw_host1x04_channel.h"
+#include "hw_host1x04_sync.h"
+#include "hw_host1x04_uclass.h"
+
+static inline u32 host1x_class_host_wait_syncpt(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_wait_syncpt_indx_f(indx)
+ | host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 host1x_class_host_load_syncpt_base(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_load_syncpt_base_base_indx_f(indx)
+ | host1x_uclass_load_syncpt_base_value_f(threshold);
+}
+
+static inline u32 host1x_class_host_wait_syncpt_base(
+ unsigned indx, unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_wait_syncpt_base_indx_f(indx)
+ | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt_base(
+ unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt(
+ unsigned cond, unsigned indx)
+{
+ return host1x_uclass_incr_syncpt_cond_f(cond)
+ | host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+static inline u32 host1x_class_host_indoff_reg_write(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indbe_f(0xf)
+ | host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset);
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+static inline u32 host1x_class_host_indoff_reg_read(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset)
+ | host1x_uclass_indoff_rwn_read_v();
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+/* cdma opcodes */
+static inline u32 host1x_opcode_setclass(
+ unsigned class_id, unsigned offset, unsigned mask)
+{
+ return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
+{
+ return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
+{
+ return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
+{
+ return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
+{
+ return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+ return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
+ host1x_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 host1x_opcode_restart(unsigned address)
+{
+ return (5 << 28) | (address >> 4);
+}
+
+static inline u32 host1x_opcode_gather(unsigned count)
+{
+ return (6 << 28) | count;
+}
+
+static inline u32 host1x_opcode_gather_nonincr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_uclass.h b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
index a3b3c9874413..028e49d9bac9 100644
--- a/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
@@ -111,6 +111,12 @@ static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
}
#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+ return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+ host1x_uclass_load_syncpt_base_r()
static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
{
return (v & 0xff) << 24;
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_channel.h b/drivers/gpu/host1x/hw/hw_host1x04_channel.h
new file mode 100644
index 000000000000..95e6f96142b9
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x04_channel.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef HOST1X_HW_HOST1X04_CHANNEL_H
+#define HOST1X_HW_HOST1X04_CHANNEL_H
+
+static inline u32 host1x_channel_fifostat_r(void)
+{
+ return 0x0;
+}
+#define HOST1X_CHANNEL_FIFOSTAT \
+ host1x_channel_fifostat_r()
+static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
+{
+ return (r >> 11) & 0x1;
+}
+#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
+ host1x_channel_fifostat_cfempty_v(r)
+static inline u32 host1x_channel_dmastart_r(void)
+{
+ return 0x14;
+}
+#define HOST1X_CHANNEL_DMASTART \
+ host1x_channel_dmastart_r()
+static inline u32 host1x_channel_dmaput_r(void)
+{
+ return 0x18;
+}
+#define HOST1X_CHANNEL_DMAPUT \
+ host1x_channel_dmaput_r()
+static inline u32 host1x_channel_dmaget_r(void)
+{
+ return 0x1c;
+}
+#define HOST1X_CHANNEL_DMAGET \
+ host1x_channel_dmaget_r()
+static inline u32 host1x_channel_dmaend_r(void)
+{
+ return 0x20;
+}
+#define HOST1X_CHANNEL_DMAEND \
+ host1x_channel_dmaend_r()
+static inline u32 host1x_channel_dmactrl_r(void)
+{
+ return 0x24;
+}
+#define HOST1X_CHANNEL_DMACTRL \
+ host1x_channel_dmactrl_r()
+static inline u32 host1x_channel_dmactrl_dmastop(void)
+{
+ return 1 << 0;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
+ host1x_channel_dmactrl_dmastop()
+static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
+{
+ return (r >> 0) & 0x1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
+ host1x_channel_dmactrl_dmastop_v(r)
+static inline u32 host1x_channel_dmactrl_dmagetrst(void)
+{
+ return 1 << 1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
+ host1x_channel_dmactrl_dmagetrst()
+static inline u32 host1x_channel_dmactrl_dmainitget(void)
+{
+ return 1 << 2;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
+ host1x_channel_dmactrl_dmainitget()
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_sync.h b/drivers/gpu/host1x/hw/hw_host1x04_sync.h
new file mode 100644
index 000000000000..ef2275b5407a
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x04_sync.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef HOST1X_HW_HOST1X04_SYNC_H
+#define HOST1X_HW_HOST1X04_SYNC_H
+
+#define REGISTER_STRIDE 4
+
+static inline u32 host1x_sync_syncpt_r(unsigned int id)
+{
+ return 0xf80 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT(id) \
+ host1x_sync_syncpt_r(id)
+static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
+{
+ return 0xe80 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
+ host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
+{
+ return 0xf00 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
+ host1x_sync_syncpt_thresh_int_disable_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
+{
+ return 0xf20 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
+ host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
+static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
+{
+ return 0xc00 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CF_SETUP(channel) \
+ host1x_sync_cf_setup_r(channel)
+static inline u32 host1x_sync_cf_setup_base_v(u32 r)
+{
+ return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
+ host1x_sync_cf_setup_base_v(r)
+static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
+{
+ return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
+ host1x_sync_cf_setup_limit_v(r)
+static inline u32 host1x_sync_cmdproc_stop_r(void)
+{
+ return 0xac;
+}
+#define HOST1X_SYNC_CMDPROC_STOP \
+ host1x_sync_cmdproc_stop_r()
+static inline u32 host1x_sync_ch_teardown_r(void)
+{
+ return 0xb0;
+}
+#define HOST1X_SYNC_CH_TEARDOWN \
+ host1x_sync_ch_teardown_r()
+static inline u32 host1x_sync_usec_clk_r(void)
+{
+ return 0x1a4;
+}
+#define HOST1X_SYNC_USEC_CLK \
+ host1x_sync_usec_clk_r()
+static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
+{
+ return 0x1a8;
+}
+#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
+ host1x_sync_ctxsw_timeout_cfg_r()
+static inline u32 host1x_sync_ip_busy_timeout_r(void)
+{
+ return 0x1bc;
+}
+#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
+ host1x_sync_ip_busy_timeout_r()
+static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
+{
+ return 0x340 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_MLOCK_OWNER(id) \
+ host1x_sync_mlock_owner_r(id)
+static inline u32 host1x_sync_mlock_owner_chid_f(u32 v)
+{
+ return (v & 0xf) << 8;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \
+ host1x_sync_mlock_owner_chid_f(v)
+static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
+{
+ return (r >> 1) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
+ host1x_sync_mlock_owner_cpu_owns_v(r)
+static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
+{
+ return (r >> 0) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
+ host1x_sync_mlock_owner_ch_owns_v(r)
+static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
+{
+ return 0x1380 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
+ host1x_sync_syncpt_int_thresh_r(id)
+static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
+{
+ return 0x600 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_BASE(id) \
+ host1x_sync_syncpt_base_r(id)
+static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
+{
+ return 0xf60 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
+ host1x_sync_syncpt_cpu_incr_r(id)
+static inline u32 host1x_sync_cbread_r(unsigned int channel)
+{
+ return 0xc80 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBREAD(channel) \
+ host1x_sync_cbread_r(channel)
+static inline u32 host1x_sync_cfpeek_ctrl_r(void)
+{
+ return 0x74c;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL \
+ host1x_sync_cfpeek_ctrl_r()
+static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
+{
+ return (v & 0x3ff) << 0;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
+ host1x_sync_cfpeek_ctrl_addr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
+{
+ return (v & 0xf) << 16;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
+ host1x_sync_cfpeek_ctrl_channr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
+{
+ return (v & 0x1) << 31;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
+ host1x_sync_cfpeek_ctrl_ena_f(v)
+static inline u32 host1x_sync_cfpeek_read_r(void)
+{
+ return 0x750;
+}
+#define HOST1X_SYNC_CFPEEK_READ \
+ host1x_sync_cfpeek_read_r()
+static inline u32 host1x_sync_cfpeek_ptrs_r(void)
+{
+ return 0x754;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS \
+ host1x_sync_cfpeek_ptrs_r()
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
+{
+ return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
+ host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
+{
+ return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
+ host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
+static inline u32 host1x_sync_cbstat_r(unsigned int channel)
+{
+ return 0xcc0 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBSTAT(channel) \
+ host1x_sync_cbstat_r(channel)
+static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
+{
+ return (r >> 0) & 0xffff;
+}
+#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
+ host1x_sync_cbstat_cboffset_v(r)
+static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
+{
+ return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
+ host1x_sync_cbstat_cbclass_v(r)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_uclass.h b/drivers/gpu/host1x/hw/hw_host1x04_uclass.h
new file mode 100644
index 000000000000..d1460e971493
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x04_uclass.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef HOST1X_HW_HOST1X04_UCLASS_H
+#define HOST1X_HW_HOST1X04_UCLASS_H
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+ return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+ host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+ return (v & 0xff) << 8;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+ host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+ host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+ return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+ host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+ host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+ host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+ return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+ host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+ host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+ host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+ return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+ host1x_uclass_load_syncpt_base_r()
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+ host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+ host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+ return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+ host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+ return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+ host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+ return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+ host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+ return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+ host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+ return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+ host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+ return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+ host1x_uclass_indoff_indroffset_f(v)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index b26dcc83bc1b..db9017adfe2b 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -20,7 +20,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
-#include <asm/mach/irq.h>
#include "../intr.h"
#include "../dev.h"
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index de5ec333ce1a..1146e3bba6e1 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -75,12 +75,14 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
return job;
}
+EXPORT_SYMBOL(host1x_job_alloc);
struct host1x_job *host1x_job_get(struct host1x_job *job)
{
kref_get(&job->ref);
return job;
}
+EXPORT_SYMBOL(host1x_job_get);
static void job_free(struct kref *ref)
{
@@ -93,6 +95,7 @@ void host1x_job_put(struct host1x_job *job)
{
kref_put(&job->ref, job_free);
}
+EXPORT_SYMBOL(host1x_job_put);
void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
u32 words, u32 offset)
@@ -104,6 +107,7 @@ void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
cur_gather->offset = offset;
job->num_gathers++;
}
+EXPORT_SYMBOL(host1x_job_add_gather);
/*
* NULL an already satisfied WAIT_SYNCPT host method, by patching its
@@ -560,6 +564,7 @@ out:
return err;
}
+EXPORT_SYMBOL(host1x_job_pin);
void host1x_job_unpin(struct host1x_job *job)
{
@@ -577,6 +582,7 @@ void host1x_job_unpin(struct host1x_job *job)
job->gather_copy_mapped,
job->gather_copy);
}
+EXPORT_SYMBOL(host1x_job_unpin);
/*
* Debug routine used to dump job entries
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
new file mode 100644
index 000000000000..9882ea122024
--- /dev/null
+++ b/drivers/gpu/host1x/mipi.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/host1x.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dev.h"
+
+#define MIPI_CAL_CTRL 0x00
+#define MIPI_CAL_CTRL_START (1 << 0)
+
+#define MIPI_CAL_AUTOCAL_CTRL 0x01
+
+#define MIPI_CAL_STATUS 0x02
+#define MIPI_CAL_STATUS_DONE (1 << 16)
+#define MIPI_CAL_STATUS_ACTIVE (1 << 0)
+
+#define MIPI_CAL_CONFIG_CSIA 0x05
+#define MIPI_CAL_CONFIG_CSIB 0x06
+#define MIPI_CAL_CONFIG_CSIC 0x07
+#define MIPI_CAL_CONFIG_CSID 0x08
+#define MIPI_CAL_CONFIG_CSIE 0x09
+#define MIPI_CAL_CONFIG_DSIA 0x0e
+#define MIPI_CAL_CONFIG_DSIB 0x0f
+#define MIPI_CAL_CONFIG_DSIC 0x10
+#define MIPI_CAL_CONFIG_DSID 0x11
+
+#define MIPI_CAL_CONFIG_SELECT (1 << 21)
+#define MIPI_CAL_CONFIG_HSPDOS(x) (((x) & 0x1f) << 16)
+#define MIPI_CAL_CONFIG_HSPUOS(x) (((x) & 0x1f) << 8)
+#define MIPI_CAL_CONFIG_TERMOS(x) (((x) & 0x1f) << 0)
+
+#define MIPI_CAL_BIAS_PAD_CFG0 0x16
+#define MIPI_CAL_BIAS_PAD_PDVCLAMP (1 << 1)
+#define MIPI_CAL_BIAS_PAD_E_VCLAMP_REF (1 << 0)
+
+#define MIPI_CAL_BIAS_PAD_CFG1 0x17
+
+#define MIPI_CAL_BIAS_PAD_CFG2 0x18
+#define MIPI_CAL_BIAS_PAD_PDVREG (1 << 1)
+
+static const struct module {
+ unsigned long reg;
+} modules[] = {
+ { .reg = MIPI_CAL_CONFIG_CSIA },
+ { .reg = MIPI_CAL_CONFIG_CSIB },
+ { .reg = MIPI_CAL_CONFIG_CSIC },
+ { .reg = MIPI_CAL_CONFIG_CSID },
+ { .reg = MIPI_CAL_CONFIG_CSIE },
+ { .reg = MIPI_CAL_CONFIG_DSIA },
+ { .reg = MIPI_CAL_CONFIG_DSIB },
+ { .reg = MIPI_CAL_CONFIG_DSIC },
+ { .reg = MIPI_CAL_CONFIG_DSID },
+};
+
+struct tegra_mipi {
+ void __iomem *regs;
+ struct mutex lock;
+ struct clk *clk;
+};
+
+struct tegra_mipi_device {
+ struct platform_device *pdev;
+ struct tegra_mipi *mipi;
+ struct device *device;
+ unsigned long pads;
+};
+
+static inline unsigned long tegra_mipi_readl(struct tegra_mipi *mipi,
+ unsigned long reg)
+{
+ return readl(mipi->regs + (reg << 2));
+}
+
+static inline void tegra_mipi_writel(struct tegra_mipi *mipi,
+ unsigned long value, unsigned long reg)
+{
+ writel(value, mipi->regs + (reg << 2));
+}
+
+struct tegra_mipi_device *tegra_mipi_request(struct device *device)
+{
+ struct device_node *np = device->of_node;
+ struct tegra_mipi_device *dev;
+ struct of_phandle_args args;
+ int err;
+
+ err = of_parse_phandle_with_args(np, "nvidia,mipi-calibrate",
+ "#nvidia,mipi-calibrate-cells", 0,
+ &args);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ of_node_put(args.np);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dev->pdev = of_find_device_by_node(args.np);
+ if (!dev->pdev) {
+ of_node_put(args.np);
+ err = -ENODEV;
+ goto free;
+ }
+
+ of_node_put(args.np);
+
+ dev->mipi = platform_get_drvdata(dev->pdev);
+ if (!dev->mipi) {
+ err = -EPROBE_DEFER;
+ goto pdev_put;
+ }
+
+ dev->pads = args.args[0];
+ dev->device = device;
+
+ return dev;
+
+pdev_put:
+ platform_device_put(dev->pdev);
+free:
+ kfree(dev);
+out:
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(tegra_mipi_request);
+
+void tegra_mipi_free(struct tegra_mipi_device *device)
+{
+ platform_device_put(device->pdev);
+ kfree(device);
+}
+EXPORT_SYMBOL(tegra_mipi_free);
+
+static int tegra_mipi_wait(struct tegra_mipi *mipi)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(250);
+ unsigned long value;
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_mipi_readl(mipi, MIPI_CAL_STATUS);
+ if ((value & MIPI_CAL_STATUS_ACTIVE) == 0 &&
+ (value & MIPI_CAL_STATUS_DONE) != 0)
+ return 0;
+
+ usleep_range(10, 50);
+ }
+
+ return -ETIMEDOUT;
+}
+
+int tegra_mipi_calibrate(struct tegra_mipi_device *device)
+{
+ unsigned long value;
+ unsigned int i;
+ int err;
+
+ err = clk_enable(device->mipi->clk);
+ if (err < 0)
+ return err;
+
+ mutex_lock(&device->mipi->lock);
+
+ value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG0);
+ value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP;
+ value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
+ tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
+
+ value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG2);
+ value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
+ tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
+
+ for (i = 0; i < ARRAY_SIZE(modules); i++) {
+ if (device->pads & BIT(i))
+ value = MIPI_CAL_CONFIG_SELECT |
+ MIPI_CAL_CONFIG_HSPDOS(0) |
+ MIPI_CAL_CONFIG_HSPUOS(4) |
+ MIPI_CAL_CONFIG_TERMOS(5);
+ else
+ value = 0;
+
+ tegra_mipi_writel(device->mipi, value, modules[i].reg);
+ }
+
+ tegra_mipi_writel(device->mipi, MIPI_CAL_CTRL_START, MIPI_CAL_CTRL);
+
+ err = tegra_mipi_wait(device->mipi);
+
+ mutex_unlock(&device->mipi->lock);
+ clk_disable(device->mipi->clk);
+
+ return err;
+}
+EXPORT_SYMBOL(tegra_mipi_calibrate);
+
+static int tegra_mipi_probe(struct platform_device *pdev)
+{
+ struct tegra_mipi *mipi;
+ struct resource *res;
+ int err;
+
+ mipi = devm_kzalloc(&pdev->dev, sizeof(*mipi), GFP_KERNEL);
+ if (!mipi)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mipi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mipi->regs))
+ return PTR_ERR(mipi->regs);
+
+ mutex_init(&mipi->lock);
+
+ mipi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(mipi->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(mipi->clk);
+ }
+
+ err = clk_prepare(mipi->clk);
+ if (err < 0)
+ return err;
+
+ platform_set_drvdata(pdev, mipi);
+
+ return 0;
+}
+
+static int tegra_mipi_remove(struct platform_device *pdev)
+{
+ struct tegra_mipi *mipi = platform_get_drvdata(pdev);
+
+ clk_unprepare(mipi->clk);
+
+ return 0;
+}
+
+static struct of_device_id tegra_mipi_of_match[] = {
+ { .compatible = "nvidia,tegra114-mipi", },
+ { },
+};
+
+struct platform_driver tegra_mipi_driver = {
+ .driver = {
+ .name = "tegra-mipi",
+ .of_match_table = tegra_mipi_of_match,
+ },
+ .probe = tegra_mipi_probe,
+ .remove = tegra_mipi_remove,
+};
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 159c479829c9..bfb09d802abd 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -93,6 +93,7 @@ u32 host1x_syncpt_id(struct host1x_syncpt *sp)
{
return sp->id;
}
+EXPORT_SYMBOL(host1x_syncpt_id);
/*
* Updates the value sent to hardware.
@@ -168,6 +169,7 @@ int host1x_syncpt_incr(struct host1x_syncpt *sp)
{
return host1x_hw_syncpt_cpu_incr(sp->host, sp);
}
+EXPORT_SYMBOL(host1x_syncpt_incr);
/*
* Updated sync point form hardware, and returns true if syncpoint is expired,
@@ -377,6 +379,7 @@ struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
struct host1x *host = dev_get_drvdata(dev->parent);
return host1x_syncpt_alloc(host, dev, flags);
}
+EXPORT_SYMBOL(host1x_syncpt_request);
void host1x_syncpt_free(struct host1x_syncpt *sp)
{
@@ -390,6 +393,7 @@ void host1x_syncpt_free(struct host1x_syncpt *sp)
sp->name = NULL;
sp->client_managed = false;
}
+EXPORT_SYMBOL(host1x_syncpt_free);
void host1x_syncpt_deinit(struct host1x *host)
{
@@ -408,6 +412,7 @@ u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
smp_rmb();
return (u32)atomic_read(&sp->max_val);
}
+EXPORT_SYMBOL(host1x_syncpt_read_max);
/*
* Read min, which is a shadow of the current sync point value in hardware.
@@ -417,6 +422,7 @@ u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
smp_rmb();
return (u32)atomic_read(&sp->min_val);
}
+EXPORT_SYMBOL(host1x_syncpt_read_min);
int host1x_syncpt_nb_pts(struct host1x *host)
{
@@ -439,13 +445,16 @@ struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
return NULL;
return host->syncpt + id;
}
+EXPORT_SYMBOL(host1x_syncpt_get);
struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp)
{
return sp ? sp->base : NULL;
}
+EXPORT_SYMBOL(host1x_syncpt_get_base);
u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
{
return base->id;
}
+EXPORT_SYMBOL(host1x_syncpt_base_id);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 34e2d39d4ce8..f7220011a00b 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -344,6 +344,7 @@ config HID_LOGITECH
config HID_LOGITECH_DJ
tristate "Logitech Unifying receivers full support"
+ depends on HIDRAW
depends on HID_LOGITECH
---help---
Say Y if you want support for Logitech Unifying receivers and devices.
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 253fe23ef7fe..3bfac3accd22 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1715,6 +1715,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) },
@@ -1823,8 +1824,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS817_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
@@ -1832,6 +1831,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
@@ -2117,6 +2118,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE) },
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 8453214ec376..53b771d5683c 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -768,6 +768,8 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_ALTERASE] = "AlternateErase", [KEY_CANCEL] = "Cancel",
[KEY_BRIGHTNESSDOWN] = "BrightnessDown", [KEY_BRIGHTNESSUP] = "BrightnessUp",
[KEY_MEDIA] = "Media", [KEY_UNKNOWN] = "Unknown",
+ [BTN_DPAD_UP] = "BtnDPadUp", [BTN_DPAD_DOWN] = "BtnDPadDown",
+ [BTN_DPAD_LEFT] = "BtnDPadLeft", [BTN_DPAD_RIGHT] = "BtnDPadRight",
[BTN_0] = "Btn0", [BTN_1] = "Btn1",
[BTN_2] = "Btn2", [BTN_3] = "Btn3",
[BTN_4] = "Btn4", [BTN_5] = "Btn5",
@@ -797,7 +799,8 @@ static const char *keys[KEY_MAX + 1] = {
[BTN_TOOL_MOUSE] = "ToolMouse", [BTN_TOOL_LENS] = "ToolLens",
[BTN_TOUCH] = "Touch", [BTN_STYLUS] = "Stylus",
[BTN_STYLUS2] = "Stylus2", [BTN_TOOL_DOUBLETAP] = "ToolDoubleTap",
- [BTN_TOOL_TRIPLETAP] = "ToolTripleTap", [BTN_GEAR_DOWN] = "WheelBtn",
+ [BTN_TOOL_TRIPLETAP] = "ToolTripleTap", [BTN_TOOL_QUADTAP] = "ToolQuadrupleTap",
+ [BTN_GEAR_DOWN] = "WheelBtn",
[BTN_GEAR_UP] = "Gear up", [KEY_OK] = "Ok",
[KEY_SELECT] = "Select", [KEY_GOTO] = "Goto",
[KEY_CLEAR] = "Clear", [KEY_POWER2] = "Power2",
diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c
index 0caa676de622..d60fbd0adc0c 100644
--- a/drivers/hid/hid-holtek-mouse.c
+++ b/drivers/hid/hid-holtek-mouse.c
@@ -49,6 +49,7 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
break;
case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A:
+ case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070:
case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081:
if (*rsize >= 113 && rdesc[106] == 0xff && rdesc[107] == 0x7f
&& rdesc[111] == 0xff && rdesc[112] == 0x7f) {
@@ -65,6 +66,8 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
static const struct hid_device_id holtek_mouse_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
+ USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f9304cb37154..5a5248f2cc07 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -241,6 +241,8 @@
#define USB_VENDOR_ID_CYGNAL 0x10c4
#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a
+#define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244
+
#define USB_VENDOR_ID_CYPRESS 0x04b4
#define USB_DEVICE_ID_CYPRESS_MOUSE 0x0001
#define USB_DEVICE_ID_CYPRESS_HIDCOM 0x5500
@@ -445,6 +447,10 @@
#define USB_VENDOR_ID_ILITEK 0x222a
#define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001
+#define USB_VENDOR_ID_INTEL_0 0x8086
+#define USB_VENDOR_ID_INTEL_1 0x8087
+#define USB_DEVICE_ID_INTEL_HID_SENSOR 0x09fa
+
#define USB_VENDOR_ID_ION 0x15e4
#define USB_DEVICE_ID_ICADE 0x0132
@@ -455,6 +461,7 @@
#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055
#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A 0xa04a
#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067
+#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070 0xa070
#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072 0xa072
#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081
@@ -552,6 +559,7 @@
#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD 0xc20a
#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD 0xc211
#define USB_DEVICE_ID_LOGITECH_EXTREME_3D 0xc215
+#define USB_DEVICE_ID_LOGITECH_DUAL_ACTION 0xc216
#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2 0xc218
#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2 0xc219
#define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D 0xc283
@@ -755,9 +763,11 @@
#define USB_VENDOR_ID_SIGMATEL 0x066F
#define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780
-#define USB_VENDOR_ID_SIS2_TOUCH 0x0457
+#define USB_VENDOR_ID_SIS_TOUCH 0x0457
#define USB_DEVICE_ID_SIS9200_TOUCH 0x9200
#define USB_DEVICE_ID_SIS817_TOUCH 0x0817
+#define USB_DEVICE_ID_SIS_TS 0x1013
+#define USB_DEVICE_ID_SIS1030_TOUCH 0x1030
#define USB_VENDOR_ID_SKYCABLE 0x1223
#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
@@ -767,6 +777,7 @@
#define USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE 0x0374
#define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306
#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
#define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
#define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002
#define USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER 0x1000
@@ -809,6 +820,8 @@
#define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013
#define USB_DEVICE_ID_SYNAPTICS_LTS1 0x0af8
#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
+#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
+#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
#define USB_VENDOR_ID_THINGM 0x27b8
#define USB_DEVICE_ID_BLINK1 0x01ed
@@ -939,7 +952,5 @@
#define USB_VENDOR_ID_PRIMAX 0x0461
#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
-#define USB_VENDOR_ID_SIS 0x0457
-#define USB_DEVICE_ID_SIS_TS 0x1013
#endif
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index d97f2323af57..d50e7313b171 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1279,7 +1279,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid)
input_dev->id.vendor = hid->vendor;
input_dev->id.product = hid->product;
input_dev->id.version = hid->version;
- input_dev->dev.parent = hid->dev.parent;
+ input_dev->dev.parent = &hid->dev;
hidinput->input = input_dev;
list_add_tail(&hidinput->list, &hid->inputs);
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 06eb45fa6331..9fe9d4ac3114 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -758,6 +758,8 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D),
.driver_data = LG_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION),
+ .driver_data = LG_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL),
.driver_data = LG_NOGET | LG_FF4 },
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index a7947d8251a8..f45279c3b11a 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -516,6 +516,14 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
kfree(dj_report);
+
+ /*
+ * Ugly sleep to work around a USB 3.0 bug when the receiver is still
+ * processing the "switch-to-dj" command while we send an other command.
+ * 50 msec should gives enough time to the receiver to be ready.
+ */
+ msleep(50);
+
return retval;
}
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 551795b7da1d..c6ef6eed3091 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -73,6 +73,7 @@ static int ms_ergonomy_kb_quirk(struct hid_input *hi, struct hid_usage *usage,
set_bit(KEY_F16, input->keybit);
set_bit(KEY_F17, input->keybit);
set_bit(KEY_F18, input->keybit);
+ break;
default:
return 0;
}
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index d83b1e8b505b..f134d73beca1 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1301,11 +1301,14 @@ static const struct hid_device_id mt_devices[] = {
/* SiS panels */
{ .driver_data = MT_CLS_DEFAULT,
- HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH,
+ HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH,
USB_DEVICE_ID_SIS9200_TOUCH) },
{ .driver_data = MT_CLS_DEFAULT,
- HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH,
+ HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH,
USB_DEVICE_ID_SIS817_TOUCH) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH,
+ USB_DEVICE_ID_SIS1030_TOUCH) },
/* Stantum panels */
{ .driver_data = MT_CLS_CONFIDENCE,
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 8fab82829f8b..46f4480035bc 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -26,6 +26,8 @@
#include <linux/hid-sensor-hub.h>
#include "hid-ids.h"
+#define HID_SENSOR_HUB_ENUM_QUIRK 0x01
+
/**
* struct sensor_hub_pending - Synchronous read pending information
* @status: Pending status true/false.
@@ -64,6 +66,7 @@ struct sensor_hub_data {
spinlock_t dyn_callback_lock;
struct mfd_cell *hid_sensor_hub_client_devs;
int hid_sensor_client_cnt;
+ unsigned long quirks;
};
/**
@@ -497,6 +500,40 @@ void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev)
}
EXPORT_SYMBOL_GPL(sensor_hub_device_close);
+static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ int index;
+ struct sensor_hub_data *sd = hid_get_drvdata(hdev);
+ unsigned char report_block[] = {
+ 0x0a, 0x16, 0x03, 0x15, 0x00, 0x25, 0x05};
+ unsigned char power_block[] = {
+ 0x0a, 0x19, 0x03, 0x15, 0x00, 0x25, 0x05};
+
+ if (!(sd->quirks & HID_SENSOR_HUB_ENUM_QUIRK)) {
+ hid_dbg(hdev, "No Enum quirks\n");
+ return rdesc;
+ }
+
+ /* Looks for power and report state usage id and force to 1 */
+ for (index = 0; index < *rsize; ++index) {
+ if (((*rsize - index) > sizeof(report_block)) &&
+ !memcmp(&rdesc[index], report_block,
+ sizeof(report_block))) {
+ rdesc[index + 4] = 0x01;
+ index += sizeof(report_block);
+ }
+ if (((*rsize - index) > sizeof(power_block)) &&
+ !memcmp(&rdesc[index], power_block,
+ sizeof(power_block))) {
+ rdesc[index + 4] = 0x01;
+ index += sizeof(power_block);
+ }
+ }
+
+ return rdesc;
+}
+
static int sensor_hub_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -520,6 +557,7 @@ static int sensor_hub_probe(struct hid_device *hdev,
return -ENOMEM;
}
hid_set_drvdata(hdev, sd);
+ sd->quirks = id->driver_data;
sd->hsdev->hdev = hdev;
sd->hsdev->vendor_id = hdev->vendor;
sd->hsdev->product_id = hdev->product;
@@ -621,6 +659,12 @@ static void sensor_hub_remove(struct hid_device *hdev)
}
static const struct hid_device_id sensor_hub_devices[] = {
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_0,
+ USB_DEVICE_ID_INTEL_HID_SENSOR),
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1,
+ USB_DEVICE_ID_INTEL_HID_SENSOR),
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
HID_ANY_ID) },
{ }
@@ -633,6 +677,7 @@ static struct hid_driver sensor_hub_driver = {
.probe = sensor_hub_probe,
.remove = sensor_hub_remove,
.raw_event = sensor_hub_raw_event,
+ .report_fixup = sensor_hub_report_fixup,
#ifdef CONFIG_PM
.suspend = sensor_hub_suspend,
.resume = sensor_hub_resume,
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 098af2f84b8c..12354055d474 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -33,11 +33,17 @@
#include "hid-ids.h"
-#define VAIO_RDESC_CONSTANT (1 << 0)
-#define SIXAXIS_CONTROLLER_USB (1 << 1)
-#define SIXAXIS_CONTROLLER_BT (1 << 2)
-#define BUZZ_CONTROLLER (1 << 3)
-#define PS3REMOTE (1 << 4)
+#define VAIO_RDESC_CONSTANT BIT(0)
+#define SIXAXIS_CONTROLLER_USB BIT(1)
+#define SIXAXIS_CONTROLLER_BT BIT(2)
+#define BUZZ_CONTROLLER BIT(3)
+#define PS3REMOTE BIT(4)
+#define DUALSHOCK4_CONTROLLER_USB BIT(5)
+#define DUALSHOCK4_CONTROLLER_BT BIT(6)
+
+#define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER_USB | BUZZ_CONTROLLER | DUALSHOCK4_CONTROLLER_USB)
+
+#define MAX_LEDS 4
static const u8 sixaxis_rdesc_fixup[] = {
0x95, 0x13, 0x09, 0x01, 0x81, 0x02, 0x95, 0x0C,
@@ -67,6 +73,265 @@ static const u8 sixaxis_rdesc_fixup2[] = {
0xb1, 0x02, 0xc0, 0xc0,
};
+/* The default descriptor doesn't provide mapping for the accelerometers
+ * or orientation sensors. This fixed descriptor maps the accelerometers
+ * to usage values 0x40, 0x41 and 0x42 and maps the orientation sensors
+ * to usage values 0x43, 0x44 and 0x45.
+ */
+static u8 dualshock4_usb_rdesc[] = {
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x05, /* Usage (Gamepad), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x01, /* Report ID (1), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x09, 0x32, /* Usage (Z), */
+ 0x09, 0x35, /* Usage (Rz), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x04, /* Report Count (4), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x39, /* Usage (Hat Switch), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x07, /* Logical Maximum (7), */
+ 0x35, 0x00, /* Physical Minimum (0), */
+ 0x46, 0x3B, 0x01, /* Physical Maximum (315), */
+ 0x65, 0x14, /* Unit (Degrees), */
+ 0x75, 0x04, /* Report Size (4), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x42, /* Input (Variable, Null State), */
+ 0x65, 0x00, /* Unit, */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x0E, /* Usage Maximum (0Eh), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x0E, /* Report Count (14), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+ 0x09, 0x20, /* Usage (20h), */
+ 0x75, 0x06, /* Report Size (6), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x7F, /* Logical Maximum (127), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x33, /* Usage (Rx), */
+ 0x09, 0x34, /* Usage (Ry), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+ 0x09, 0x21, /* Usage (21h), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x19, 0x40, /* Usage Minimum (40h), */
+ 0x29, 0x42, /* Usage Maximum (42h), */
+ 0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
+ 0x26, 0x00, 0x7F, /* Logical Maximum (32767), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x19, 0x43, /* Usage Minimum (43h), */
+ 0x29, 0x45, /* Usage Maximum (45h), */
+ 0x16, 0xFF, 0xBF, /* Logical Minimum (-16385), */
+ 0x26, 0x00, 0x40, /* Logical Maximum (16384), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+ 0x09, 0x21, /* Usage (21h), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0xFF, /* Logical Maximum (255), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x27, /* Report Count (39), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x85, 0x05, /* Report ID (5), */
+ 0x09, 0x22, /* Usage (22h), */
+ 0x95, 0x1F, /* Report Count (31), */
+ 0x91, 0x02, /* Output (Variable), */
+ 0x85, 0x04, /* Report ID (4), */
+ 0x09, 0x23, /* Usage (23h), */
+ 0x95, 0x24, /* Report Count (36), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x02, /* Report ID (2), */
+ 0x09, 0x24, /* Usage (24h), */
+ 0x95, 0x24, /* Report Count (36), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x08, /* Report ID (8), */
+ 0x09, 0x25, /* Usage (25h), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x10, /* Report ID (16), */
+ 0x09, 0x26, /* Usage (26h), */
+ 0x95, 0x04, /* Report Count (4), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x11, /* Report ID (17), */
+ 0x09, 0x27, /* Usage (27h), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x12, /* Report ID (18), */
+ 0x06, 0x02, 0xFF, /* Usage Page (FF02h), */
+ 0x09, 0x21, /* Usage (21h), */
+ 0x95, 0x0F, /* Report Count (15), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x13, /* Report ID (19), */
+ 0x09, 0x22, /* Usage (22h), */
+ 0x95, 0x16, /* Report Count (22), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x14, /* Report ID (20), */
+ 0x06, 0x05, 0xFF, /* Usage Page (FF05h), */
+ 0x09, 0x20, /* Usage (20h), */
+ 0x95, 0x10, /* Report Count (16), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x15, /* Report ID (21), */
+ 0x09, 0x21, /* Usage (21h), */
+ 0x95, 0x2C, /* Report Count (44), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x06, 0x80, 0xFF, /* Usage Page (FF80h), */
+ 0x85, 0x80, /* Report ID (128), */
+ 0x09, 0x20, /* Usage (20h), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x81, /* Report ID (129), */
+ 0x09, 0x21, /* Usage (21h), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x82, /* Report ID (130), */
+ 0x09, 0x22, /* Usage (22h), */
+ 0x95, 0x05, /* Report Count (5), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x83, /* Report ID (131), */
+ 0x09, 0x23, /* Usage (23h), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x84, /* Report ID (132), */
+ 0x09, 0x24, /* Usage (24h), */
+ 0x95, 0x04, /* Report Count (4), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x85, /* Report ID (133), */
+ 0x09, 0x25, /* Usage (25h), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x86, /* Report ID (134), */
+ 0x09, 0x26, /* Usage (26h), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x87, /* Report ID (135), */
+ 0x09, 0x27, /* Usage (27h), */
+ 0x95, 0x23, /* Report Count (35), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x88, /* Report ID (136), */
+ 0x09, 0x28, /* Usage (28h), */
+ 0x95, 0x22, /* Report Count (34), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x89, /* Report ID (137), */
+ 0x09, 0x29, /* Usage (29h), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x90, /* Report ID (144), */
+ 0x09, 0x30, /* Usage (30h), */
+ 0x95, 0x05, /* Report Count (5), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x91, /* Report ID (145), */
+ 0x09, 0x31, /* Usage (31h), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x92, /* Report ID (146), */
+ 0x09, 0x32, /* Usage (32h), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0x93, /* Report ID (147), */
+ 0x09, 0x33, /* Usage (33h), */
+ 0x95, 0x0C, /* Report Count (12), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xA0, /* Report ID (160), */
+ 0x09, 0x40, /* Usage (40h), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xA1, /* Report ID (161), */
+ 0x09, 0x41, /* Usage (41h), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xA2, /* Report ID (162), */
+ 0x09, 0x42, /* Usage (42h), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xA3, /* Report ID (163), */
+ 0x09, 0x43, /* Usage (43h), */
+ 0x95, 0x30, /* Report Count (48), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xA4, /* Report ID (164), */
+ 0x09, 0x44, /* Usage (44h), */
+ 0x95, 0x0D, /* Report Count (13), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xA5, /* Report ID (165), */
+ 0x09, 0x45, /* Usage (45h), */
+ 0x95, 0x15, /* Report Count (21), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xA6, /* Report ID (166), */
+ 0x09, 0x46, /* Usage (46h), */
+ 0x95, 0x15, /* Report Count (21), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xF0, /* Report ID (240), */
+ 0x09, 0x47, /* Usage (47h), */
+ 0x95, 0x3F, /* Report Count (63), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xF1, /* Report ID (241), */
+ 0x09, 0x48, /* Usage (48h), */
+ 0x95, 0x3F, /* Report Count (63), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xF2, /* Report ID (242), */
+ 0x09, 0x49, /* Usage (49h), */
+ 0x95, 0x0F, /* Report Count (15), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xA7, /* Report ID (167), */
+ 0x09, 0x4A, /* Usage (4Ah), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xA8, /* Report ID (168), */
+ 0x09, 0x4B, /* Usage (4Bh), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xA9, /* Report ID (169), */
+ 0x09, 0x4C, /* Usage (4Ch), */
+ 0x95, 0x08, /* Report Count (8), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xAA, /* Report ID (170), */
+ 0x09, 0x4E, /* Usage (4Eh), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xAB, /* Report ID (171), */
+ 0x09, 0x4F, /* Usage (4Fh), */
+ 0x95, 0x39, /* Report Count (57), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xAC, /* Report ID (172), */
+ 0x09, 0x50, /* Usage (50h), */
+ 0x95, 0x39, /* Report Count (57), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xAD, /* Report ID (173), */
+ 0x09, 0x51, /* Usage (51h), */
+ 0x95, 0x0B, /* Report Count (11), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xAE, /* Report ID (174), */
+ 0x09, 0x52, /* Usage (52h), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xAF, /* Report ID (175), */
+ 0x09, 0x53, /* Usage (53h), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x85, 0xB0, /* Report ID (176), */
+ 0x09, 0x54, /* Usage (54h), */
+ 0x95, 0x3F, /* Report Count (63), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0xC0 /* End Collection */
+};
+
static __u8 ps3remote_rdesc[] = {
0x05, 0x01, /* GUsagePage Generic Desktop */
0x09, 0x05, /* LUsage 0x05 [Game Pad] */
@@ -223,21 +488,19 @@ static const unsigned int buzz_keymap[] = {
};
struct sony_sc {
+ struct hid_device *hdev;
+ struct led_classdev *leds[MAX_LEDS];
+ struct hid_report *output_report;
unsigned long quirks;
+ struct work_struct state_worker;
#ifdef CONFIG_SONY_FF
- struct work_struct rumble_worker;
- struct hid_device *hdev;
__u8 left;
__u8 right;
#endif
- void *extra;
-};
-
-struct buzz_extra {
- int led_state;
- struct led_classdev *leds[4];
+ __u8 led_state[MAX_LEDS];
+ __u8 led_count;
};
static __u8 *ps3remote_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -304,6 +567,17 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[55] = 0x06;
}
+ /*
+ * The default Dualshock 4 USB descriptor doesn't assign
+ * the gyroscope values to corresponding axes so we need a
+ * modified one.
+ */
+ if ((sc->quirks & DUALSHOCK4_CONTROLLER_USB) && *rsize == 467) {
+ hid_info(hdev, "Using modified Dualshock 4 report descriptor with gyroscope axes\n");
+ rdesc = dualshock4_usb_rdesc;
+ *rsize = sizeof(dualshock4_usb_rdesc);
+ }
+
/* The HID descriptor exposed over BT has a trailing zero byte */
if ((((sc->quirks & SIXAXIS_CONTROLLER_USB) && *rsize == 148) ||
((sc->quirks & SIXAXIS_CONTROLLER_BT) && *rsize == 149)) &&
@@ -448,7 +722,7 @@ static int sixaxis_set_operational_bt(struct hid_device *hdev)
return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
}
-static void buzz_set_leds(struct hid_device *hdev, int leds)
+static void buzz_set_leds(struct hid_device *hdev, const __u8 *leds)
{
struct list_head *report_list =
&hdev->report_enum[HID_OUTPUT_REPORT].report_list;
@@ -457,67 +731,76 @@ static void buzz_set_leds(struct hid_device *hdev, int leds)
__s32 *value = report->field[0]->value;
value[0] = 0x00;
- value[1] = (leds & 1) ? 0xff : 0x00;
- value[2] = (leds & 2) ? 0xff : 0x00;
- value[3] = (leds & 4) ? 0xff : 0x00;
- value[4] = (leds & 8) ? 0xff : 0x00;
+ value[1] = leds[0] ? 0xff : 0x00;
+ value[2] = leds[1] ? 0xff : 0x00;
+ value[3] = leds[2] ? 0xff : 0x00;
+ value[4] = leds[3] ? 0xff : 0x00;
value[5] = 0x00;
value[6] = 0x00;
hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
}
-static void buzz_led_set_brightness(struct led_classdev *led,
+static void sony_set_leds(struct hid_device *hdev, const __u8 *leds, int count)
+{
+ struct sony_sc *drv_data = hid_get_drvdata(hdev);
+ int n;
+
+ BUG_ON(count > MAX_LEDS);
+
+ if (drv_data->quirks & BUZZ_CONTROLLER && count == 4) {
+ buzz_set_leds(hdev, leds);
+ } else if ((drv_data->quirks & SIXAXIS_CONTROLLER_USB) ||
+ (drv_data->quirks & DUALSHOCK4_CONTROLLER_USB)) {
+ for (n = 0; n < count; n++)
+ drv_data->led_state[n] = leds[n];
+ schedule_work(&drv_data->state_worker);
+ }
+}
+
+static void sony_led_set_brightness(struct led_classdev *led,
enum led_brightness value)
{
struct device *dev = led->dev->parent;
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct sony_sc *drv_data;
- struct buzz_extra *buzz;
int n;
drv_data = hid_get_drvdata(hdev);
- if (!drv_data || !drv_data->extra) {
+ if (!drv_data) {
hid_err(hdev, "No device data\n");
return;
}
- buzz = drv_data->extra;
-
- for (n = 0; n < 4; n++) {
- if (led == buzz->leds[n]) {
- int on = !! (buzz->led_state & (1 << n));
- if (value == LED_OFF && on) {
- buzz->led_state &= ~(1 << n);
- buzz_set_leds(hdev, buzz->led_state);
- } else if (value != LED_OFF && !on) {
- buzz->led_state |= (1 << n);
- buzz_set_leds(hdev, buzz->led_state);
+
+ for (n = 0; n < drv_data->led_count; n++) {
+ if (led == drv_data->leds[n]) {
+ if (value != drv_data->led_state[n]) {
+ drv_data->led_state[n] = value;
+ sony_set_leds(hdev, drv_data->led_state, drv_data->led_count);
}
break;
}
}
}
-static enum led_brightness buzz_led_get_brightness(struct led_classdev *led)
+static enum led_brightness sony_led_get_brightness(struct led_classdev *led)
{
struct device *dev = led->dev->parent;
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct sony_sc *drv_data;
- struct buzz_extra *buzz;
int n;
int on = 0;
drv_data = hid_get_drvdata(hdev);
- if (!drv_data || !drv_data->extra) {
+ if (!drv_data) {
hid_err(hdev, "No device data\n");
return LED_OFF;
}
- buzz = drv_data->extra;
- for (n = 0; n < 4; n++) {
- if (led == buzz->leds[n]) {
- on = !! (buzz->led_state & (1 << n));
+ for (n = 0; n < drv_data->led_count; n++) {
+ if (led == drv_data->leds[n]) {
+ on = !!(drv_data->led_state[n]);
break;
}
}
@@ -525,110 +808,122 @@ static enum led_brightness buzz_led_get_brightness(struct led_classdev *led)
return on ? LED_FULL : LED_OFF;
}
-static int buzz_init(struct hid_device *hdev)
+static void sony_leds_remove(struct hid_device *hdev)
+{
+ struct sony_sc *drv_data;
+ struct led_classdev *led;
+ int n;
+
+ drv_data = hid_get_drvdata(hdev);
+ BUG_ON(!(drv_data->quirks & SONY_LED_SUPPORT));
+
+ for (n = 0; n < drv_data->led_count; n++) {
+ led = drv_data->leds[n];
+ drv_data->leds[n] = NULL;
+ if (!led)
+ continue;
+ led_classdev_unregister(led);
+ kfree(led);
+ }
+
+ drv_data->led_count = 0;
+}
+
+static int sony_leds_init(struct hid_device *hdev)
{
struct sony_sc *drv_data;
- struct buzz_extra *buzz;
int n, ret = 0;
+ int max_brightness;
+ int use_colors;
struct led_classdev *led;
size_t name_sz;
char *name;
+ size_t name_len;
+ const char *name_fmt;
+ static const char * const color_str[] = { "red", "green", "blue" };
+ static const __u8 initial_values[MAX_LEDS] = { 0x00, 0x00, 0x00, 0x00 };
drv_data = hid_get_drvdata(hdev);
- BUG_ON(!(drv_data->quirks & BUZZ_CONTROLLER));
-
- /* Validate expected report characteristics. */
- if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 7))
- return -ENODEV;
-
- buzz = kzalloc(sizeof(*buzz), GFP_KERNEL);
- if (!buzz) {
- hid_err(hdev, "Insufficient memory, cannot allocate driver data\n");
- return -ENOMEM;
+ BUG_ON(!(drv_data->quirks & SONY_LED_SUPPORT));
+
+ if (drv_data->quirks & BUZZ_CONTROLLER) {
+ drv_data->led_count = 4;
+ max_brightness = 1;
+ use_colors = 0;
+ name_len = strlen("::buzz#");
+ name_fmt = "%s::buzz%d";
+ /* Validate expected report characteristics. */
+ if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 7))
+ return -ENODEV;
+ } else if (drv_data->quirks & DUALSHOCK4_CONTROLLER_USB) {
+ drv_data->led_count = 3;
+ max_brightness = 255;
+ use_colors = 1;
+ name_len = 0;
+ name_fmt = "%s:%s";
+ } else {
+ drv_data->led_count = 4;
+ max_brightness = 1;
+ use_colors = 0;
+ name_len = strlen("::sony#");
+ name_fmt = "%s::sony%d";
}
- drv_data->extra = buzz;
/* Clear LEDs as we have no way of reading their initial state. This is
* only relevant if the driver is loaded after somebody actively set the
* LEDs to on */
- buzz_set_leds(hdev, 0x00);
+ sony_set_leds(hdev, initial_values, drv_data->led_count);
- name_sz = strlen(dev_name(&hdev->dev)) + strlen("::buzz#") + 1;
+ name_sz = strlen(dev_name(&hdev->dev)) + name_len + 1;
+
+ for (n = 0; n < drv_data->led_count; n++) {
+
+ if (use_colors)
+ name_sz = strlen(dev_name(&hdev->dev)) + strlen(color_str[n]) + 2;
- for (n = 0; n < 4; n++) {
led = kzalloc(sizeof(struct led_classdev) + name_sz, GFP_KERNEL);
if (!led) {
hid_err(hdev, "Couldn't allocate memory for LED %d\n", n);
+ ret = -ENOMEM;
goto error_leds;
}
name = (void *)(&led[1]);
- snprintf(name, name_sz, "%s::buzz%d", dev_name(&hdev->dev), n + 1);
+ if (use_colors)
+ snprintf(name, name_sz, name_fmt, dev_name(&hdev->dev), color_str[n]);
+ else
+ snprintf(name, name_sz, name_fmt, dev_name(&hdev->dev), n + 1);
led->name = name;
led->brightness = 0;
- led->max_brightness = 1;
- led->brightness_get = buzz_led_get_brightness;
- led->brightness_set = buzz_led_set_brightness;
+ led->max_brightness = max_brightness;
+ led->brightness_get = sony_led_get_brightness;
+ led->brightness_set = sony_led_set_brightness;
- if (led_classdev_register(&hdev->dev, led)) {
+ ret = led_classdev_register(&hdev->dev, led);
+ if (ret) {
hid_err(hdev, "Failed to register LED %d\n", n);
kfree(led);
goto error_leds;
}
- buzz->leds[n] = led;
+ drv_data->leds[n] = led;
}
return ret;
error_leds:
- for (n = 0; n < 4; n++) {
- led = buzz->leds[n];
- buzz->leds[n] = NULL;
- if (!led)
- continue;
- led_classdev_unregister(led);
- kfree(led);
- }
+ sony_leds_remove(hdev);
- kfree(drv_data->extra);
- drv_data->extra = NULL;
return ret;
}
-static void buzz_remove(struct hid_device *hdev)
-{
- struct sony_sc *drv_data;
- struct buzz_extra *buzz;
- struct led_classdev *led;
- int n;
-
- drv_data = hid_get_drvdata(hdev);
- BUG_ON(!(drv_data->quirks & BUZZ_CONTROLLER));
-
- buzz = drv_data->extra;
-
- for (n = 0; n < 4; n++) {
- led = buzz->leds[n];
- buzz->leds[n] = NULL;
- if (!led)
- continue;
- led_classdev_unregister(led);
- kfree(led);
- }
-
- kfree(drv_data->extra);
- drv_data->extra = NULL;
-}
-
-#ifdef CONFIG_SONY_FF
-static void sony_rumble_worker(struct work_struct *work)
+static void sixaxis_state_worker(struct work_struct *work)
{
- struct sony_sc *sc = container_of(work, struct sony_sc, rumble_worker);
+ struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
unsigned char buf[] = {
0x01,
0x00, 0xff, 0x00, 0xff, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
0xff, 0x27, 0x10, 0x00, 0x32,
0xff, 0x27, 0x10, 0x00, 0x32,
0xff, 0x27, 0x10, 0x00, 0x32,
@@ -636,13 +931,42 @@ static void sony_rumble_worker(struct work_struct *work)
0x00, 0x00, 0x00, 0x00, 0x00
};
- buf[3] = sc->right;
+#ifdef CONFIG_SONY_FF
+ buf[3] = sc->right ? 1 : 0;
buf[5] = sc->left;
+#endif
+
+ buf[10] |= sc->led_state[0] << 1;
+ buf[10] |= sc->led_state[1] << 2;
+ buf[10] |= sc->led_state[2] << 3;
+ buf[10] |= sc->led_state[3] << 4;
sc->hdev->hid_output_raw_report(sc->hdev, buf, sizeof(buf),
HID_OUTPUT_REPORT);
}
+static void dualshock4_state_worker(struct work_struct *work)
+{
+ struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
+ struct hid_device *hdev = sc->hdev;
+ struct hid_report *report = sc->output_report;
+ __s32 *value = report->field[0]->value;
+
+ value[0] = 0x03;
+
+#ifdef CONFIG_SONY_FF
+ value[3] = sc->right;
+ value[4] = sc->left;
+#endif
+
+ value[5] = sc->led_state[0];
+ value[6] = sc->led_state[1];
+ value[7] = sc->led_state[2];
+
+ hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
+}
+
+#ifdef CONFIG_SONY_FF
static int sony_play_effect(struct input_dev *dev, void *data,
struct ff_effect *effect)
{
@@ -653,9 +977,9 @@ static int sony_play_effect(struct input_dev *dev, void *data,
return 0;
sc->left = effect->u.rumble.strong_magnitude / 256;
- sc->right = effect->u.rumble.weak_magnitude ? 1 : 0;
+ sc->right = effect->u.rumble.weak_magnitude / 256;
- schedule_work(&sc->rumble_worker);
+ schedule_work(&sc->state_worker);
return 0;
}
@@ -664,10 +988,6 @@ static int sony_init_ff(struct hid_device *hdev)
struct hid_input *hidinput = list_entry(hdev->inputs.next,
struct hid_input, list);
struct input_dev *input_dev = hidinput->input;
- struct sony_sc *sc = hid_get_drvdata(hdev);
-
- sc->hdev = hdev;
- INIT_WORK(&sc->rumble_worker, sony_rumble_worker);
input_set_capability(input_dev, EV_FF, FF_RUMBLE);
return input_ff_create_memless(input_dev, NULL, sony_play_effect);
@@ -677,7 +997,7 @@ static void sony_destroy_ff(struct hid_device *hdev)
{
struct sony_sc *sc = hid_get_drvdata(hdev);
- cancel_work_sync(&sc->rumble_worker);
+ cancel_work_sync(&sc->state_worker);
}
#else
@@ -691,6 +1011,33 @@ static void sony_destroy_ff(struct hid_device *hdev)
}
#endif
+static int sony_set_output_report(struct sony_sc *sc, int req_id, int req_size)
+{
+ struct list_head *head, *list;
+ struct hid_report *report;
+ struct hid_device *hdev = sc->hdev;
+
+ list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list;
+
+ list_for_each(head, list) {
+ report = list_entry(head, struct hid_report, list);
+
+ if (report->id == req_id) {
+ if (report->size < req_size) {
+ hid_err(hdev, "Output report 0x%02x (%i bits) is smaller than requested size (%i bits)\n",
+ req_id, report->size, req_size);
+ return -EINVAL;
+ }
+ sc->output_report = report;
+ return 0;
+ }
+ }
+
+ hid_err(hdev, "Unable to locate output report 0x%02x\n", req_id);
+
+ return -EINVAL;
+}
+
static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
@@ -706,6 +1053,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
sc->quirks = quirks;
hid_set_drvdata(hdev, sc);
+ sc->hdev = hdev;
ret = hid_parse(hdev);
if (ret) {
@@ -729,23 +1077,38 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
hdev->hid_output_raw_report = sixaxis_usb_output_raw_report;
ret = sixaxis_set_operational_usb(hdev);
+ INIT_WORK(&sc->state_worker, sixaxis_state_worker);
}
else if (sc->quirks & SIXAXIS_CONTROLLER_BT)
ret = sixaxis_set_operational_bt(hdev);
- else if (sc->quirks & BUZZ_CONTROLLER)
- ret = buzz_init(hdev);
- else
+ else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
+ /* Report 5 (31 bytes) is used to send data to the controller via USB */
+ ret = sony_set_output_report(sc, 0x05, 248);
+ if (ret < 0)
+ goto err_stop;
+
+ INIT_WORK(&sc->state_worker, dualshock4_state_worker);
+ } else {
ret = 0;
+ }
if (ret < 0)
goto err_stop;
+ if (sc->quirks & SONY_LED_SUPPORT) {
+ ret = sony_leds_init(hdev);
+ if (ret < 0)
+ goto err_stop;
+ }
+
ret = sony_init_ff(hdev);
if (ret < 0)
goto err_stop;
return 0;
err_stop:
+ if (sc->quirks & SONY_LED_SUPPORT)
+ sony_leds_remove(hdev);
hid_hw_stop(hdev);
return ret;
}
@@ -754,8 +1117,8 @@ static void sony_remove(struct hid_device *hdev)
{
struct sony_sc *sc = hid_get_drvdata(hdev);
- if (sc->quirks & BUZZ_CONTROLLER)
- buzz_remove(hdev);
+ if (sc->quirks & SONY_LED_SUPPORT)
+ sony_leds_remove(hdev);
sony_destroy_ff(hdev);
@@ -785,6 +1148,11 @@ static const struct hid_device_id sony_devices[] = {
/* Logitech Harmony Adapter for PS3 */
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3),
.driver_data = PS3REMOTE },
+ /* Sony Dualshock 4 controllers for PS4 */
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
+ .driver_data = DUALSHOCK4_CONTROLLER_USB },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
+ .driver_data = DUALSHOCK4_CONTROLLER_BT },
{ }
};
MODULE_DEVICE_TABLE(hid, sony_devices);
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 6a6dd5cd7833..cb0137b3718d 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -6,7 +6,7 @@
* to work on raw hid events as they want to, and avoids a need to
* use a transport-specific userspace libhid/libusb libraries.
*
- * Copyright (c) 2007 Jiri Kosina
+ * Copyright (c) 2007-2014 Jiri Kosina
*/
/*
@@ -104,8 +104,11 @@ out:
return ret;
}
-/* The first byte is expected to be a report number.
- * This function is to be called with the minors_lock mutex held */
+/*
+ * The first byte of the report buffer is expected to be a report number.
+ *
+ * This function is to be called with the minors_lock mutex held.
+ */
static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, size_t count, unsigned char report_type)
{
unsigned int minor = iminor(file_inode(file));
@@ -157,7 +160,6 @@ out:
return ret;
}
-/* the first byte is expected to be a report number */
static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
ssize_t ret;
@@ -168,12 +170,15 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t
}
-/* This function performs a Get_Report transfer over the control endpoint
+/*
+ * This function performs a Get_Report transfer over the control endpoint
* per section 7.2.1 of the HID specification, version 1.1. The first byte
* of buffer is the report number to request, or 0x0 if the defice does not
* use numbered reports. The report_type parameter can be HID_FEATURE_REPORT
- * or HID_INPUT_REPORT. This function is to be called with the minors_lock
- * mutex held. */
+ * or HID_INPUT_REPORT.
+ *
+ * This function is to be called with the minors_lock mutex held.
+ */
static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t count, unsigned char report_type)
{
unsigned int minor = iminor(file_inode(file));
@@ -209,8 +214,10 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
goto out;
}
- /* Read the first byte from the user. This is the report number,
- * which is passed to dev->hid_get_raw_report(). */
+ /*
+ * Read the first byte from the user. This is the report number,
+ * which is passed to dev->hid_get_raw_report().
+ */
if (copy_from_user(&report_number, buffer, 1)) {
ret = -EFAULT;
goto out_free;
@@ -498,7 +505,7 @@ int hidraw_connect(struct hid_device *hid)
int minor, result;
struct hidraw *dev;
- /* we accept any HID device, no matter the applications */
+ /* we accept any HID device, all applications */
dev = kzalloc(sizeof(struct hidraw), GFP_KERNEL);
if (!dev)
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 5f7e55f4b7f0..d1f81f52481a 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -850,37 +850,23 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
0xF7, 0xF6, 0xDF, 0x3C, 0x67, 0x42, 0x55, 0x45,
0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE,
};
- union acpi_object params[4];
- struct acpi_object_list input;
+ union acpi_object *obj;
struct acpi_device *adev;
- unsigned long long value;
acpi_handle handle;
handle = ACPI_HANDLE(&client->dev);
if (!handle || acpi_bus_get_device(handle, &adev))
return -ENODEV;
- input.count = ARRAY_SIZE(params);
- input.pointer = params;
-
- params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(i2c_hid_guid);
- params[0].buffer.pointer = i2c_hid_guid;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = 1;
- params[2].type = ACPI_TYPE_INTEGER;
- params[2].integer.value = 1; /* HID function */
- params[3].type = ACPI_TYPE_PACKAGE;
- params[3].package.count = 0;
- params[3].package.elements = NULL;
-
- if (ACPI_FAILURE(acpi_evaluate_integer(handle, "_DSM", &input,
- &value))) {
+ obj = acpi_evaluate_dsm_typed(handle, i2c_hid_guid, 1, 1, NULL,
+ ACPI_TYPE_INTEGER);
+ if (!obj) {
dev_err(&client->dev, "device _DSM execution failed\n");
return -ENODEV;
}
- pdata->hid_descriptor_address = value;
+ pdata->hid_descriptor_address = obj->integer.value;
+ ACPI_FREE(obj);
return 0;
}
@@ -1061,6 +1047,7 @@ static int i2c_hid_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
+ disable_irq(client->irq);
if (device_may_wakeup(&client->dev))
enable_irq_wake(client->irq);
@@ -1075,6 +1062,7 @@ static int i2c_hid_resume(struct device *dev)
int ret;
struct i2c_client *client = to_i2c_client(dev);
+ enable_irq(client->irq);
ret = i2c_hid_hwreset(client);
if (ret)
return ret;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 0db9a67278ba..175ec0afb70c 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -84,8 +84,10 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SIS2_TOUCH, USB_DEVICE_ID_SIS817_TOUCH, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS817_TOUCH, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS_TS, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS1030_TOUCH, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
@@ -114,7 +116,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SIS, USB_DEVICE_ID_SIS_TS, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS },
{ 0, 0 }
};
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index 796086980f4a..9a332e683db7 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -146,7 +146,7 @@ static void usb_kbd_irq(struct urb *urb)
input_report_key(kbd->dev, usb_kbd_keycode[kbd->new[i]], 1);
else
hid_info(urb->dev,
- "Unknown key (scancode %#x) released.\n",
+ "Unknown key (scancode %#x) pressed.\n",
kbd->new[i]);
}
}
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index cea623c36ae2..69ea36f07b4d 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -209,7 +209,6 @@ static int create_gpadl_header(void *kbuffer, u32 size,
{
int i;
int pagecount;
- unsigned long long pfn;
struct vmbus_channel_gpadl_header *gpadl_header;
struct vmbus_channel_gpadl_body *gpadl_body;
struct vmbus_channel_msginfo *msgheader;
@@ -219,7 +218,6 @@ static int create_gpadl_header(void *kbuffer, u32 size,
int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
pagecount = size >> PAGE_SHIFT;
- pfn = virt_to_phys(kbuffer) >> PAGE_SHIFT;
/* do we need a gpadl body msg */
pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
@@ -248,7 +246,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range[0].byte_offset = 0;
gpadl_header->range[0].byte_count = size;
for (i = 0; i < pfncount; i++)
- gpadl_header->range[0].pfn_array[i] = pfn+i;
+ gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
+ kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
*msginfo = msgheader;
*messagecount = 1;
@@ -301,7 +300,9 @@ static int create_gpadl_header(void *kbuffer, u32 size,
* so the hypervisor gurantees that this is ok.
*/
for (i = 0; i < pfncurr; i++)
- gpadl_body->pfn[i] = pfn + pfnsum + i;
+ gpadl_body->pfn[i] = slow_virt_to_phys(
+ kbuffer + PAGE_SIZE * (pfnsum + i)) >>
+ PAGE_SHIFT;
/* add to msg header */
list_add_tail(&msgbody->msglistentry,
@@ -327,7 +328,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range[0].byte_offset = 0;
gpadl_header->range[0].byte_count = size;
for (i = 0; i < pagecount; i++)
- gpadl_header->range[0].pfn_array[i] = pfn+i;
+ gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
+ kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
*msginfo = msgheader;
*messagecount = 1;
@@ -344,7 +346,7 @@ nomem:
* vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
*
* @channel: a channel
- * @kbuffer: from kmalloc
+ * @kbuffer: from kmalloc or vmalloc
* @size: page-size multiple
* @gpadl_handle: some funky thing
*/
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index af6edf9b1936..f2d7bf90c9fe 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -67,7 +67,6 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
int ret = 0;
struct vmbus_channel_initiate_contact *msg;
unsigned long flags;
- int t;
init_completion(&msginfo->waitevent);
@@ -78,6 +77,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
+ if (version == VERSION_WIN8)
+ msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
/*
* Add to list before we send the request since we may
@@ -100,15 +101,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
}
/* Wait for the connection response */
- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
- if (t == 0) {
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
- flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
- flags);
- return -ETIMEDOUT;
- }
+ wait_for_completion(&msginfo->waitevent);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index f0c5e07c25ec..bcb49502c3bf 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -301,7 +301,7 @@ err:
return -ENOMEM;
}
-void hv_synic_free_cpu(int cpu)
+static void hv_synic_free_cpu(int cpu)
{
kfree(hv_context.event_dpc[cpu]);
if (hv_context.synic_event_page[cpu])
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 48aad4faea06..077bb1bdac34 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -30,7 +30,6 @@
#include <linux/sysctl.h>
#include <linux/slab.h>
#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
#include <linux/completion.h>
#include <linux/hyperv.h>
#include <linux/kernel_stat.h>
@@ -39,7 +38,6 @@
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
-
static struct acpi_device *hv_acpi_dev;
static struct tasklet_struct msg_dpc;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 52d548f1dc1d..5ce43d8dfa98 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -573,8 +573,8 @@ config SENSORS_IT87
help
If you say yes here you get support for ITE IT8705F, IT8712F,
IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E,
- IT8771E, IT8772E, IT8782F, and IT8783E/F sensor chips, and the
- SiS950 clone.
+ IT8771E, IT8772E, IT8782F, IT8783E/F and IT8603E sensor chips,
+ and the SiS950 clone.
This driver can also be built as a module. If so, the module
will be called it87.
@@ -650,6 +650,7 @@ config SENSORS_LM73
config SENSORS_LM75
tristate "National Semiconductor LM75 and compatibles"
depends on I2C
+ depends on THERMAL || !THERMAL_OF
help
If you say yes here you get support for one common type of
temperature sensor chip, with models including:
@@ -1285,6 +1286,7 @@ config SENSORS_THMC50
config SENSORS_TMP102
tristate "Texas Instruments TMP102"
depends on I2C
+ depends on THERMAL || !THERMAL_OF
help
If you say yes here you get support for Texas Instruments TMP102
sensor chips.
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 6a34f7f48eb9..579bdf93be43 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -30,8 +30,7 @@
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/err.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_bus.h>
+#include <linux/acpi.h>
#define ACPI_POWER_METER_NAME "power_meter"
ACPI_MODULE_NAME(ACPI_POWER_METER_NAME);
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index 7e16e5d07bc6..9ffc4c8ca8b5 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -2,7 +2,7 @@
* adm1025.c
*
* Copyright (C) 2000 Chen-Yuan Wu <gwu@esoft.com>
- * Copyright (C) 2003-2009 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2003-2009 Jean Delvare <jdelvare@suse.de>
*
* The ADM1025 is a sensor chip made by Analog Devices. It reports up to 6
* voltages (including its own power source) and up to two temperatures
@@ -615,6 +615,6 @@ static struct adm1025_data *adm1025_update_device(struct device *dev)
module_i2c_driver(adm1025_driver);
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("ADM1025 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index 9ee5e066423b..d19c790e410a 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2006 Corentin LABBE <corentin.labbe@geomatys.fr>
*
- * Based on LM83 Driver by Jean Delvare <khali@linux-fr.org>
+ * Based on LM83 Driver by Jean Delvare <jdelvare@suse.de>
*
* Give only processor, motherboard temperatures and fan tachs
* Very rare chip please let me know if you use it
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 253ea396106d..a8a540ca8c34 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -4,7 +4,7 @@
* Based on lm75.c and lm85.c
* Supports adm1030 / adm1031
* Copyright (C) 2004 Alexandre d'Alton <alex@alexdalton.org>
- * Reworked by Jean Delvare <khali@linux-fr.org>
+ * Reworked by Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 22d008bbdc10..3cefd1aeb24f 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -3,7 +3,7 @@
* Copyright (C) 2007-2008, Advanced Micro Devices, Inc.
* Copyright (C) 2008 Jordan Crouse <jordan@cosmicpenguin.net>
* Copyright (C) 2008 Hans de Goede <hdegoede@redhat.com>
- * Copyright (C) 2009 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2009 Jean Delvare <jdelvare@suse.de>
*
* Derived from the lm83 driver by Jean Delvare
*
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index dafc63c6932d..ae208f612198 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -16,11 +16,7 @@
#include <linux/dmi.h>
#include <linux/jiffies.h>
#include <linux/err.h>
-
-#include <acpi/acpi.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_bus.h>
-
+#include <linux/acpi.h>
#define ATK_HID "ATK0110"
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 78be66176840..bbb0b0d463f7 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -36,6 +36,7 @@
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/moduleparam.h>
+#include <linux/pci.h>
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/cpu_device_id.h>
@@ -52,7 +53,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */
-#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
+#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
@@ -176,20 +177,33 @@ static ssize_t show_temp(struct device *dev,
/* Check whether the time interval has elapsed */
if (!tdata->valid || time_after(jiffies, tdata->last_updated + HZ)) {
rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
- tdata->valid = 0;
- /* Check whether the data is valid */
- if (eax & 0x80000000) {
- tdata->temp = tdata->tjmax -
- ((eax >> 16) & 0x7f) * 1000;
- tdata->valid = 1;
- }
+ /*
+ * Ignore the valid bit. In all observed cases the register
+ * value is either low or zero if the valid bit is 0.
+ * Return it instead of reporting an error which doesn't
+ * really help at all.
+ */
+ tdata->temp = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
+ tdata->valid = 1;
tdata->last_updated = jiffies;
}
mutex_unlock(&tdata->update_lock);
- return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
+ return sprintf(buf, "%d\n", tdata->temp);
}
+struct tjmax_pci {
+ unsigned int device;
+ int tjmax;
+};
+
+static const struct tjmax_pci tjmax_pci_table[] = {
+ { 0x0708, 110000 }, /* CE41x0 (Sodaville ) */
+ { 0x0c72, 102000 }, /* Atom S1240 (Centerton) */
+ { 0x0c73, 95000 }, /* Atom S1220 (Centerton) */
+ { 0x0c75, 95000 }, /* Atom S1260 (Centerton) */
+};
+
struct tjmax {
char const *id;
int tjmax;
@@ -198,9 +212,6 @@ struct tjmax {
static const struct tjmax tjmax_table[] = {
{ "CPU 230", 100000 }, /* Model 0x1c, stepping 2 */
{ "CPU 330", 125000 }, /* Model 0x1c, stepping 2 */
- { "CPU CE4110", 110000 }, /* Model 0x1c, stepping 10 Sodaville */
- { "CPU CE4150", 110000 }, /* Model 0x1c, stepping 10 */
- { "CPU CE4170", 110000 }, /* Model 0x1c, stepping 10 */
};
struct tjmax_model {
@@ -222,8 +233,11 @@ static const struct tjmax_model tjmax_model_table[] = {
* is undetectable by software
*/
{ 0x27, ANY, 90000 }, /* Atom Medfield (Z2460) */
- { 0x35, ANY, 90000 }, /* Atom Clover Trail/Cloverview (Z2760) */
- { 0x36, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx) */
+ { 0x35, ANY, 90000 }, /* Atom Clover Trail/Cloverview (Z27x0) */
+ { 0x36, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx)
+ * Also matches S12x0 (stepping 9), covered by
+ * PCI table
+ */
};
static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
@@ -236,8 +250,20 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
int err;
u32 eax, edx;
int i;
+ struct pci_dev *host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+
+ /*
+ * Explicit tjmax table entries override heuristics.
+ * First try PCI host bridge IDs, followed by model ID strings
+ * and model/stepping information.
+ */
+ if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL) {
+ for (i = 0; i < ARRAY_SIZE(tjmax_pci_table); i++) {
+ if (host_bridge->device == tjmax_pci_table[i].device)
+ return tjmax_pci_table[i].tjmax;
+ }
+ }
- /* explicit tjmax table entries override heuristics */
for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
if (strstr(c->x86_model_id, tjmax_table[i].id))
return tjmax_table[i].tjmax;
@@ -343,12 +369,12 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
if (cpu_has_tjmax(c))
dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
} else {
- val = (eax >> 16) & 0xff;
+ val = (eax >> 16) & 0x7f;
/*
* If the TjMax is not plausible, an assumption
* will be used
*/
- if (val) {
+ if (val >= 85) {
dev_dbg(dev, "TjMax is %d degrees C\n", val);
return val * 1000;
}
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index 960fac3fb166..afd31042b452 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -45,7 +45,7 @@ static const char * const input_names[] = {
/* Conversion function for VDDOUT and VBAT */
static inline int volt_reg_to_mv(int value)
{
- return DIV_ROUND_CLOSEST(value * 1000, 512) + 2500;
+ return DIV_ROUND_CLOSEST(value * 2000, 1023) + 2500;
}
/* Conversion function for ADC channels 4, 5 and 6 */
@@ -57,7 +57,7 @@ static inline int input_reg_to_mv(int value)
/* Conversion function for VBBAT */
static inline int vbbat_reg_to_mv(int value)
{
- return DIV_ROUND_CLOSEST(value * 2500, 512);
+ return DIV_ROUND_CLOSEST(value * 5000, 1023);
}
static inline int da9052_enable_vddout_channel(struct da9052 *da9052)
diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
index 029ecabc4380..73b3865f1207 100644
--- a/drivers/hwmon/da9055-hwmon.c
+++ b/drivers/hwmon/da9055-hwmon.c
@@ -278,10 +278,6 @@ static int da9055_hwmon_probe(struct platform_device *pdev)
if (hwmon_irq < 0)
return hwmon_irq;
- hwmon_irq = regmap_irq_get_virq(hwmon->da9055->irq_data, hwmon_irq);
- if (hwmon_irq < 0)
- return hwmon_irq;
-
ret = devm_request_threaded_irq(&pdev->dev, hwmon_irq,
NULL, da9055_auxadc_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index 872d76744e30..fc6f5d54e7f7 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -4,7 +4,7 @@
* Christian W. Zuckschwerdt <zany@triq.net> 2000-11-23
* based on lm75.c by Frodo Looijaard <frodol@dds.nl>
* Ported to Linux 2.6 by Aurelien Jarno <aurelien@aurel32.net> with
- * the help of Jean Delvare <khali@linux-fr.org>
+ * the help of Jean Delvare <jdelvare@suse.de>
*
* The DS1621 device is a digital temperature/thermometer with 9-bit
* resolution, a thermal alarm output (Tout), and user-defined minimum
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index 82e661e8241b..f76a74cb6dc4 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -1,6 +1,6 @@
/*
* emc6w201.c - Hardware monitoring driver for the SMSC EMC6W201
- * Copyright (C) 2011 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2011 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -548,6 +548,6 @@ static struct i2c_driver emc6w201_driver = {
module_i2c_driver(emc6w201_driver);
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("SMSC EMC6W201 hardware monitoring driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 15b7f5281def..1a8aa1265262 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -1,7 +1,7 @@
/*
* f71805f.c - driver for the Fintek F71805F/FG and F71872F/FG Super-I/O
* chips integrated hardware monitoring features
- * Copyright (C) 2005-2006 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2006 Jean Delvare <jdelvare@suse.de>
*
* The F71805F/FG is a LPC Super-I/O chip made by Fintek. It integrates
* complete hardware monitoring features: voltage, fan and temperature
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index dff841085baf..6040121a405a 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -249,7 +249,7 @@ static void fam15h_power_remove(struct pci_dev *pdev)
sysfs_remove_group(&dev->kobj, &fam15h_power_attr_group);
}
-static DEFINE_PCI_DEVICE_TABLE(fam15h_power_id_table) = {
+static const struct pci_device_id fam15h_power_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
{}
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 95257a5621d8..1e9830513045 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -4,7 +4,7 @@
* Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and
* Kyosti Malkki <kmalkki@cc.hut.fi>
* Copyright (C) 2004 Hong-Gunn Chew <hglinux@gunnet.org> and
- * Jean Delvare <khali@linux-fr.org>
+ * Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 29ffa27c60b8..70749fc15a4f 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -10,7 +10,8 @@
* This driver supports only the Environment Controller in the IT8705F and
* similar parts. The other devices are supported by different drivers.
*
- * Supports: IT8705F Super I/O chip w/LPC interface
+ * Supports: IT8603E Super I/O chip w/LPC interface
+ * IT8705F Super I/O chip w/LPC interface
* IT8712F Super I/O chip w/LPC interface
* IT8716F Super I/O chip w/LPC interface
* IT8718F Super I/O chip w/LPC interface
@@ -26,7 +27,7 @@
* Sis950 A clone of the IT8705F
*
* Copyright (C) 2001 Chris Gauthron
- * Copyright (C) 2005-2010 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2010 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -64,7 +65,7 @@
#define DRVNAME "it87"
enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8771,
- it8772, it8782, it8783 };
+ it8772, it8782, it8783, it8603 };
static unsigned short force_id;
module_param(force_id, ushort, 0);
@@ -146,6 +147,7 @@ static inline void superio_exit(void)
#define IT8772E_DEVID 0x8772
#define IT8782F_DEVID 0x8782
#define IT8783E_DEVID 0x8783
+#define IT8306E_DEVID 0x8603
#define IT87_ACT_REG 0x30
#define IT87_BASE_REG 0x60
@@ -315,6 +317,12 @@ static const struct it87_devices it87_devices[] = {
| FEAT_TEMP_OLD_PECI,
.old_peci_mask = 0x4,
},
+ [it8603] = {
+ .name = "it8603",
+ .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI,
+ .peci_mask = 0x07,
+ },
};
#define has_16bit_fans(data) ((data)->features & FEAT_16BIT_FANS)
@@ -361,7 +369,7 @@ struct it87_data {
unsigned long last_updated; /* In jiffies */
u16 in_scaled; /* Internal voltage sensors are scaled */
- u8 in[9][3]; /* [nr][0]=in, [1]=min, [2]=max */
+ u8 in[10][3]; /* [nr][0]=in, [1]=min, [2]=max */
u8 has_fan; /* Bitfield, fans enabled */
u16 fan[5][2]; /* Register values, [nr][0]=fan, [1]=min */
u8 has_temp; /* Bitfield, temp sensors enabled */
@@ -578,6 +586,7 @@ static SENSOR_DEVICE_ATTR_2(in7_max, S_IRUGO | S_IWUSR, show_in, set_in,
7, 2);
static SENSOR_DEVICE_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 8, 0);
+static SENSOR_DEVICE_ATTR_2(in9_input, S_IRUGO, show_in, NULL, 9, 0);
/* 3 temperatures */
static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
@@ -734,7 +743,7 @@ static int pwm_mode(const struct it87_data *data, int nr)
{
int ctrl = data->fan_main_ctrl & (1 << nr);
- if (ctrl == 0) /* Full speed */
+ if (ctrl == 0 && data->type != it8603) /* Full speed */
return 0;
if (data->pwm_ctrl[nr] & 0x80) /* Automatic mode */
return 2;
@@ -929,6 +938,10 @@ static ssize_t set_pwm_enable(struct device *dev,
return -EINVAL;
}
+ /* IT8603E does not have on/off mode */
+ if (val == 0 && data->type == it8603)
+ return -EINVAL;
+
mutex_lock(&data->update_lock);
if (val == 0) {
@@ -948,10 +961,13 @@ static ssize_t set_pwm_enable(struct device *dev,
else /* Automatic mode */
data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]);
- /* set SmartGuardian mode */
- data->fan_main_ctrl |= (1 << nr);
- it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
- data->fan_main_ctrl);
+
+ if (data->type != it8603) {
+ /* set SmartGuardian mode */
+ data->fan_main_ctrl |= (1 << nr);
+ it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
+ data->fan_main_ctrl);
+ }
}
mutex_unlock(&data->update_lock);
@@ -1415,6 +1431,8 @@ static ssize_t show_label(struct device *dev, struct device_attribute *attr,
static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1);
static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_label, NULL, 2);
+/* special AVCC3 IT8306E in9 */
+static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL, 0);
static ssize_t show_name(struct device *dev, struct device_attribute
*devattr, char *buf)
@@ -1424,7 +1442,7 @@ static ssize_t show_name(struct device *dev, struct device_attribute
}
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-static struct attribute *it87_attributes_in[9][5] = {
+static struct attribute *it87_attributes_in[10][5] = {
{
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_min.dev_attr.attr,
@@ -1476,9 +1494,12 @@ static struct attribute *it87_attributes_in[9][5] = {
}, {
&sensor_dev_attr_in8_input.dev_attr.attr,
NULL
+}, {
+ &sensor_dev_attr_in9_input.dev_attr.attr,
+ NULL
} };
-static const struct attribute_group it87_group_in[9] = {
+static const struct attribute_group it87_group_in[10] = {
{ .attrs = it87_attributes_in[0] },
{ .attrs = it87_attributes_in[1] },
{ .attrs = it87_attributes_in[2] },
@@ -1488,6 +1509,7 @@ static const struct attribute_group it87_group_in[9] = {
{ .attrs = it87_attributes_in[6] },
{ .attrs = it87_attributes_in[7] },
{ .attrs = it87_attributes_in[8] },
+ { .attrs = it87_attributes_in[9] },
};
static struct attribute *it87_attributes_temp[3][6] = {
@@ -1546,7 +1568,8 @@ static struct attribute *it87_attributes_in_beep[] = {
&sensor_dev_attr_in5_beep.dev_attr.attr,
&sensor_dev_attr_in6_beep.dev_attr.attr,
&sensor_dev_attr_in7_beep.dev_attr.attr,
- NULL
+ NULL,
+ NULL,
};
static struct attribute *it87_attributes_temp_beep[] = {
@@ -1685,6 +1708,7 @@ static struct attribute *it87_attributes_label[] = {
&sensor_dev_attr_in3_label.dev_attr.attr,
&sensor_dev_attr_in7_label.dev_attr.attr,
&sensor_dev_attr_in8_label.dev_attr.attr,
+ &sensor_dev_attr_in9_label.dev_attr.attr,
NULL
};
@@ -1742,6 +1766,9 @@ static int __init it87_find(unsigned short *address,
case IT8783E_DEVID:
sio_data->type = it8783;
break;
+ case IT8306E_DEVID:
+ sio_data->type = it8603;
+ break;
case 0xffff: /* No device at all */
goto exit;
default:
@@ -1763,11 +1790,16 @@ static int __init it87_find(unsigned short *address,
err = 0;
sio_data->revision = superio_inb(DEVREV) & 0x0f;
- pr_info("Found IT%04xF chip at 0x%x, revision %d\n",
- chip_type, *address, sio_data->revision);
+ pr_info("Found IT%04x%c chip at 0x%x, revision %d\n", chip_type,
+ chip_type == 0x8771 || chip_type == 0x8772 ||
+ chip_type == 0x8603 ? 'E' : 'F', *address,
+ sio_data->revision);
/* in8 (Vbat) is always internal */
sio_data->internal = (1 << 2);
+ /* Only the IT8603E has in9 */
+ if (sio_data->type != it8603)
+ sio_data->skip_in |= (1 << 9);
/* Read GPIO config and VID value from LDN 7 (GPIO) */
if (sio_data->type == it87) {
@@ -1844,7 +1876,38 @@ static int __init it87_find(unsigned short *address,
sio_data->internal |= (1 << 1);
sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
+ } else if (sio_data->type == it8603) {
+ int reg27, reg29;
+
+ sio_data->skip_vid = 1; /* No VID */
+ superio_select(GPIO);
+ reg27 = superio_inb(IT87_SIO_GPIO3_REG);
+
+ /* Check if fan3 is there or not */
+ if (reg27 & (1 << 6))
+ sio_data->skip_pwm |= (1 << 2);
+ if (reg27 & (1 << 7))
+ sio_data->skip_fan |= (1 << 2);
+
+ /* Check if fan2 is there or not */
+ reg29 = superio_inb(IT87_SIO_GPIO5_REG);
+ if (reg29 & (1 << 1))
+ sio_data->skip_pwm |= (1 << 1);
+ if (reg29 & (1 << 2))
+ sio_data->skip_fan |= (1 << 1);
+
+ sio_data->skip_in |= (1 << 5); /* No VIN5 */
+ sio_data->skip_in |= (1 << 6); /* No VIN6 */
+
+ /* no fan4 */
+ sio_data->skip_pwm |= (1 << 3);
+ sio_data->skip_fan |= (1 << 3);
+
+ sio_data->internal |= (1 << 1); /* in7 is VSB */
+ sio_data->internal |= (1 << 3); /* in9 is AVCC */
+
+ sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
} else {
int reg;
bool uart6;
@@ -1966,7 +2029,7 @@ static void it87_remove_files(struct device *dev)
int i;
sysfs_remove_group(&dev->kobj, &it87_group);
- for (i = 0; i < 9; i++) {
+ for (i = 0; i < 10; i++) {
if (sio_data->skip_in & (1 << i))
continue;
sysfs_remove_group(&dev->kobj, &it87_group_in[i]);
@@ -2080,6 +2143,8 @@ static int it87_probe(struct platform_device *pdev)
data->in_scaled |= (1 << 7); /* in7 is VSB */
if (sio_data->internal & (1 << 2))
data->in_scaled |= (1 << 8); /* in8 is Vbat */
+ if (sio_data->internal & (1 << 3))
+ data->in_scaled |= (1 << 9); /* in9 is AVCC */
} else if (sio_data->type == it8782 || sio_data->type == it8783) {
if (sio_data->internal & (1 << 0))
data->in_scaled |= (1 << 3); /* in3 is VCC5V */
@@ -2102,7 +2167,7 @@ static int it87_probe(struct platform_device *pdev)
if (err)
return err;
- for (i = 0; i < 9; i++) {
+ for (i = 0; i < 10; i++) {
if (sio_data->skip_in & (1 << i))
continue;
err = sysfs_create_group(&dev->kobj, &it87_group_in[i]);
@@ -2202,7 +2267,7 @@ static int it87_probe(struct platform_device *pdev)
}
/* Export labels for internal sensors */
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < 4; i++) {
if (!(sio_data->internal & (1 << i)))
continue;
err = sysfs_create_file(&dev->kobj,
@@ -2383,8 +2448,9 @@ static void it87_init_device(struct platform_device *pdev)
}
data->has_fan = (data->fan_main_ctrl >> 4) & 0x07;
- /* Set tachometers to 16-bit mode if needed */
- if (has_16bit_fans(data)) {
+ /* Set tachometers to 16-bit mode if needed, IT8603E (and IT8728F?)
+ * has it by default */
+ if (has_16bit_fans(data) && data->type != it8603) {
tmp = it87_read_value(data, IT87_REG_FAN_16BIT);
if (~tmp & 0x07 & data->has_fan) {
dev_dbg(&pdev->dev,
@@ -2464,6 +2530,8 @@ static struct it87_data *it87_update_device(struct device *dev)
}
/* in8 (battery) has no limit registers */
data->in[8][0] = it87_read_value(data, IT87_REG_VIN(8));
+ if (data->type == it8603)
+ data->in[9][0] = it87_read_value(data, 0x2f);
for (i = 0; i < 5; i++) {
/* Skip disabled fans */
@@ -2620,7 +2688,7 @@ static void __exit sm_it87_exit(void)
}
-MODULE_AUTHOR("Chris Gauthron, Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Chris Gauthron, Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("IT8705F/IT871xF/IT872xF hardware monitoring driver");
module_param(update_vbat, bool, 0);
MODULE_PARM_DESC(update_vbat, "Update vbat if set else return powerup value");
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index d65f3fd895dd..baf375b5ab0d 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -204,12 +204,13 @@ static void k10temp_remove(struct pci_dev *pdev)
&sensor_dev_attr_temp1_crit_hyst.dev_attr);
}
-static DEFINE_PCI_DEVICE_TABLE(k10temp_id_table) = {
+static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{}
};
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 5b50e9e4f96b..734d55d48cc8 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -135,7 +135,7 @@ static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 1, 0);
static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 1, 1);
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-static DEFINE_PCI_DEVICE_TABLE(k8temp_ids) = {
+static const struct pci_device_id k8temp_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ 0 },
};
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index d0def50ea860..b4ad598feb6c 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -1,7 +1,7 @@
/*
* lm63.c - driver for the National Semiconductor LM63 temperature sensor
* with integrated fan control
- * Copyright (C) 2004-2008 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004-2008 Jean Delvare <jdelvare@suse.de>
* Based on the lm90 driver.
*
* The LM63 is a sensor chip made by National Semiconductor. It measures
@@ -1202,6 +1202,6 @@ static struct i2c_driver lm63_driver = {
module_i2c_driver(lm63_driver);
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("LM63 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 7e3ef134f1d2..84a55eacd903 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -27,6 +27,8 @@
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/thermal.h>
#include "lm75.h"
@@ -71,6 +73,7 @@ static const u8 LM75_REG_TEMP[3] = {
/* Each client has this additional data */
struct lm75_data {
struct device *hwmon_dev;
+ struct thermal_zone_device *tz;
struct mutex update_lock;
u8 orig_conf;
u8 resolution; /* In bits, between 9 and 12 */
@@ -91,22 +94,36 @@ static struct lm75_data *lm75_update_device(struct device *dev);
/*-----------------------------------------------------------------------*/
+static inline long lm75_reg_to_mc(s16 temp, u8 resolution)
+{
+ return ((temp >> (16 - resolution)) * 1000) >> (resolution - 8);
+}
+
/* sysfs attributes for hwmon */
+static int lm75_read_temp(void *dev, long *temp)
+{
+ struct lm75_data *data = lm75_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ *temp = lm75_reg_to_mc(data->temp[0], data->resolution);
+
+ return 0;
+}
+
static ssize_t show_temp(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm75_data *data = lm75_update_device(dev);
- long temp;
if (IS_ERR(data))
return PTR_ERR(data);
- temp = ((data->temp[attr->index] >> (16 - data->resolution)) * 1000)
- >> (data->resolution - 8);
-
- return sprintf(buf, "%ld\n", temp);
+ return sprintf(buf, "%ld\n", lm75_reg_to_mc(data->temp[attr->index],
+ data->resolution));
}
static ssize_t set_temp(struct device *dev, struct device_attribute *da,
@@ -273,6 +290,13 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
goto exit_remove;
}
+ data->tz = thermal_zone_of_sensor_register(&client->dev,
+ 0,
+ &client->dev,
+ lm75_read_temp, NULL);
+ if (IS_ERR(data->tz))
+ data->tz = NULL;
+
dev_info(&client->dev, "%s: sensor '%s'\n",
dev_name(data->hwmon_dev), client->name);
@@ -287,6 +311,7 @@ static int lm75_remove(struct i2c_client *client)
{
struct lm75_data *data = i2c_get_clientdata(client);
+ thermal_zone_of_sensor_unregister(&client->dev, data->tz);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm75_group);
lm75_write_value(client, LM75_REG_CONF, data->orig_conf);
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index a2f3b4a365e4..9efadfc851bc 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -2,7 +2,7 @@
* lm78.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring
* Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl>
- * Copyright (c) 2007, 2011 Jean Delvare <khali@linux-fr.org>
+ * Copyright (c) 2007, 2011 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1108,7 +1108,7 @@ static void __exit sm_lm78_exit(void)
i2c_del_driver(&lm78_driver);
}
-MODULE_AUTHOR("Frodo Looijaard, Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Frodo Looijaard, Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("LM78/LM79 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index e998034f1f11..abd270243ba7 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -1,7 +1,7 @@
/*
* lm83.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring
- * Copyright (C) 2003-2009 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2003-2009 Jean Delvare <jdelvare@suse.de>
*
* Heavily inspired from the lm78, lm75 and adm1021 drivers. The LM83 is
* a sensor chip made by National Semiconductor. It reports up to four
@@ -427,6 +427,6 @@ static struct lm83_data *lm83_update_device(struct device *dev)
module_i2c_driver(lm83_driver);
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("LM83 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 3894c408fda3..bed4af358308 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -5,7 +5,7 @@
* Copyright (c) 2002, 2003 Philip Pokorny <ppokorny@penguincomputing.com>
* Copyright (c) 2003 Margit Schubert-While <margitsw@t-online.de>
* Copyright (c) 2004 Justin Thiessen <jthiessen@penguincomputing.com>
- * Copyright (C) 2007--2009 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2007--2009 Jean Delvare <jdelvare@suse.de>
*
* Chip details at <http://www.national.com/ds/LM/LM85.pdf>
*
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index 333092ce2465..4c5f20231c1a 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -5,7 +5,7 @@
* Philip Edelbrock <phil@netroedge.com>
* Stephen Rousset <stephen.rousset@rocketlogix.com>
* Dan Eaton <dan.eaton@rocketlogix.com>
- * Copyright (C) 2004-2008 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004-2008 Jean Delvare <jdelvare@suse.de>
*
* Original port to Linux 2.6 by Jeff Oliver.
*
@@ -1011,6 +1011,6 @@ static struct i2c_driver lm87_driver = {
module_i2c_driver(lm87_driver);
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org> and others");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de> and others");
MODULE_DESCRIPTION("LM87 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 8b8f3aa49726..701e952ae523 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1,7 +1,7 @@
/*
* lm90.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring
- * Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2003-2010 Jean Delvare <jdelvare@suse.de>
*
* Based on the lm83 driver. The LM90 is a sensor chip made by National
* Semiconductor. It reports up to two temperatures (its own plus up to
@@ -1679,6 +1679,6 @@ static struct i2c_driver lm90_driver = {
module_i2c_driver(lm90_driver);
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("LM90/ADM1032 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 71626f3c8742..9d0e87a4f0cb 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -1,6 +1,6 @@
/*
* lm92 - Hardware monitoring driver
- * Copyright (C) 2005-2008 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2008 Jean Delvare <jdelvare@suse.de>
*
* Based on the lm90 driver, with some ideas taken from the lm_sensors
* lm92 driver as well.
@@ -440,6 +440,6 @@ static struct i2c_driver lm92_driver = {
module_i2c_driver(lm92_driver);
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("LM92/MAX6635 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index a6f46058b1be..6f1c6c0dbaf5 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -12,7 +12,7 @@
* Copyright (c) 2003 Margit Schubert-While <margitsw@t-online.de>
*
* derived in part from w83l785ts.c:
- * Copyright (c) 2003-2004 Jean Delvare <khali@linux-fr.org>
+ * Copyright (c) 2003-2004 Jean Delvare <jdelvare@suse.de>
*
* Ported to Linux 2.6 by Eric J. Bowersox <ericb@aspsys.com>
* Copyright (c) 2005 Aspen Systems, Inc.
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index 445e5d40ac82..6638e997f83f 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -2,7 +2,7 @@
* max1619.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring
* Copyright (C) 2003-2004 Oleksij Rempel <bug-track@fisher-privat.net>
- * Jean Delvare <khali@linux-fr.org>
+ * Jean Delvare <jdelvare@suse.de>
*
* Based on the lm90 driver. The MAX1619 is a sensor chip made by Maxim.
* It reports up to two temperatures (its own plus up to
@@ -357,7 +357,6 @@ static struct max1619_data *max1619_update_device(struct device *dev)
module_i2c_driver(max1619_driver);
-MODULE_AUTHOR("Oleksij Rempel <bug-track@fisher-privat.net> and "
- "Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Oleksij Rempel <bug-track@fisher-privat.net>, Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("MAX1619 sensor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 8326fbd60150..6520bc51d02a 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -8,7 +8,7 @@
*
* Based on the max1619 driver.
* Copyright (C) 2003-2004 Oleksij Rempel <bug-track@fisher-privat.net>
- * Jean Delvare <khali@linux-fr.org>
+ * Jean Delvare <jdelvare@suse.de>
*
* The MAX6642 is a sensor chip made by Maxim.
* It reports up to two temperatures (its own plus up to
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index cf811c1a1475..38d5a6334053 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -5,7 +5,7 @@
* Copyright (C) 2012 Guenter Roeck <linux@roeck-us.net>
*
* Derived from w83627ehf driver
- * Copyright (C) 2005-2012 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2012 Jean Delvare <jdelvare@suse.de>
* Copyright (C) 2006 Yuan Mu (Winbond),
* Rudolf Marek <r.marek@assembler.cz>
* David Hubbard <david.c.hubbard@gmail.com>
@@ -3936,6 +3936,18 @@ static int nct6775_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
+static void nct6791_enable_io_mapping(int sioaddr)
+{
+ int val;
+
+ val = superio_inb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE);
+ if (val & 0x10) {
+ pr_info("Enabling hardware monitor logical device mappings.\n");
+ superio_outb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE,
+ val & ~0x10);
+ }
+}
+
#ifdef CONFIG_PM
static int nct6775_suspend(struct device *dev)
{
@@ -3955,11 +3967,20 @@ static int nct6775_suspend(struct device *dev)
static int nct6775_resume(struct device *dev)
{
struct nct6775_data *data = dev_get_drvdata(dev);
- int i, j;
+ int i, j, err = 0;
mutex_lock(&data->update_lock);
data->bank = 0xff; /* Force initial bank selection */
+ if (data->kind == nct6791) {
+ err = superio_enter(data->sioreg);
+ if (err)
+ goto abort;
+
+ nct6791_enable_io_mapping(data->sioreg);
+ superio_exit(data->sioreg);
+ }
+
/* Restore limits */
for (i = 0; i < data->in_num; i++) {
if (!(data->have_in & (1 << i)))
@@ -3996,11 +4017,12 @@ static int nct6775_resume(struct device *dev)
nct6775_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2);
}
+abort:
/* Force re-reading all values */
data->valid = false;
mutex_unlock(&data->update_lock);
- return 0;
+ return err;
}
static const struct dev_pm_ops nct6775_dev_pm_ops = {
@@ -4088,15 +4110,9 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
pr_warn("Forcibly enabling Super-I/O. Sensor is probably unusable.\n");
superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
}
- if (sio_data->kind == nct6791) {
- val = superio_inb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE);
- if (val & 0x10) {
- pr_info("Enabling hardware monitor logical device mappings.\n");
- superio_outb(sioaddr,
- NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE,
- val & ~0x10);
- }
- }
+
+ if (sio_data->kind == nct6791)
+ nct6791_enable_io_mapping(sioaddr);
superio_exit(sioaddr);
pr_info("Found %s or compatible chip at %#x:%#x\n",
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8c23203915af..8a17f01e8672 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -145,7 +145,7 @@ struct ntc_data {
static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
{
struct iio_channel *channel = pdata->chan;
- unsigned int result;
+ s64 result;
int val, ret;
ret = iio_read_channel_raw(channel, &val);
@@ -155,10 +155,10 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
}
/* unit: mV */
- result = pdata->pullup_uv * val;
+ result = pdata->pullup_uv * (s64) val;
result >>= 12;
- return result;
+ return (int)result;
}
static const struct of_device_id ntc_match[] = {
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index aa615ba73d4b..330fe117e219 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -1,7 +1,7 @@
/*
* pc87360.c - Part of lm_sensors, Linux kernel modules
* for hardware monitoring
- * Copyright (C) 2004, 2007 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004, 2007 Jean Delvare <jdelvare@suse.de>
*
* Copied from smsc47m1.c:
* Copyright (C) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
@@ -1808,7 +1808,7 @@ static void __exit pc87360_exit(void)
}
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("PC8736x hardware monitor");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 6e6ea4437bb6..d847e0a084e0 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -1,7 +1,7 @@
/*
* pc87427.c - hardware monitoring driver for the
* National Semiconductor PC87427 Super-I/O chip
- * Copyright (C) 2006, 2008, 2010 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2006, 2008, 2010 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1347,7 +1347,7 @@ static void __exit pc87427_exit(void)
platform_driver_unregister(&pc87427_driver);
}
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("PC87427 hardware monitoring driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c
index 825883d29002..5740888c6242 100644
--- a/drivers/hwmon/pcf8591.c
+++ b/drivers/hwmon/pcf8591.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2001-2004 Aurelien Jarno <aurelien@aurel32.net>
* Ported to Linux 2.6 by Aurelien Jarno <aurelien@aurel32.net> with
- * the help of Jean Delvare <khali@linux-fr.org>
+ * the help of Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 3cbf66e9d861..291d11fe93e7 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -90,7 +90,8 @@ struct pmbus_data {
u32 flags; /* from platform data */
- int exponent; /* linear mode: exponent for output voltages */
+ int exponent[PMBUS_PAGES];
+ /* linear mode: exponent for output voltages */
const struct pmbus_driver_info *info;
@@ -410,7 +411,7 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
long val;
if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */
- exponent = data->exponent;
+ exponent = data->exponent[sensor->page];
mantissa = (u16) sensor->data;
} else { /* LINEAR11 */
exponent = ((s16)sensor->data) >> 11;
@@ -516,7 +517,7 @@ static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
#define MIN_MANTISSA (511 * 1000)
static u16 pmbus_data2reg_linear(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
s16 exponent = 0, mantissa;
bool negative = false;
@@ -525,7 +526,7 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
if (val == 0)
return 0;
- if (class == PSC_VOLTAGE_OUT) {
+ if (sensor->class == PSC_VOLTAGE_OUT) {
/* LINEAR16 does not support negative voltages */
if (val < 0)
return 0;
@@ -534,10 +535,10 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
* For a static exponents, we don't have a choice
* but to adjust the value to it.
*/
- if (data->exponent < 0)
- val <<= -data->exponent;
+ if (data->exponent[sensor->page] < 0)
+ val <<= -data->exponent[sensor->page];
else
- val >>= data->exponent;
+ val >>= data->exponent[sensor->page];
val = DIV_ROUND_CLOSEST(val, 1000);
return val & 0xffff;
}
@@ -548,14 +549,14 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
}
/* Power is in uW. Convert to mW before converting. */
- if (class == PSC_POWER)
+ if (sensor->class == PSC_POWER)
val = DIV_ROUND_CLOSEST(val, 1000L);
/*
* For simplicity, convert fan data to milli-units
* before calculating the exponent.
*/
- if (class == PSC_FAN)
+ if (sensor->class == PSC_FAN)
val = val * 1000;
/* Reduce large mantissa until it fits into 10 bit */
@@ -585,22 +586,22 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
}
static u16 pmbus_data2reg_direct(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
long m, b, R;
- m = data->info->m[class];
- b = data->info->b[class];
- R = data->info->R[class];
+ m = data->info->m[sensor->class];
+ b = data->info->b[sensor->class];
+ R = data->info->R[sensor->class];
/* Power is in uW. Adjust R and b. */
- if (class == PSC_POWER) {
+ if (sensor->class == PSC_POWER) {
R -= 3;
b *= 1000;
}
/* Calculate Y = (m * X + b) * 10^R */
- if (class != PSC_FAN) {
+ if (sensor->class != PSC_FAN) {
R -= 3; /* Adjust R and b for data in milli-units */
b *= 1000;
}
@@ -619,7 +620,7 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
}
static u16 pmbus_data2reg_vid(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
val = clamp_val(val, 500, 1600);
@@ -627,20 +628,20 @@ static u16 pmbus_data2reg_vid(struct pmbus_data *data,
}
static u16 pmbus_data2reg(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
u16 regval;
- switch (data->info->format[class]) {
+ switch (data->info->format[sensor->class]) {
case direct:
- regval = pmbus_data2reg_direct(data, class, val);
+ regval = pmbus_data2reg_direct(data, sensor, val);
break;
case vid:
- regval = pmbus_data2reg_vid(data, class, val);
+ regval = pmbus_data2reg_vid(data, sensor, val);
break;
case linear:
default:
- regval = pmbus_data2reg_linear(data, class, val);
+ regval = pmbus_data2reg_linear(data, sensor, val);
break;
}
return regval;
@@ -746,7 +747,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
return -EINVAL;
mutex_lock(&data->update_lock);
- regval = pmbus_data2reg(data, sensor->class, val);
+ regval = pmbus_data2reg(data, sensor, val);
ret = _pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
if (ret < 0)
rv = ret;
@@ -1643,12 +1644,13 @@ static int pmbus_find_attributes(struct i2c_client *client,
* This function is called for all chips.
*/
static int pmbus_identify_common(struct i2c_client *client,
- struct pmbus_data *data)
+ struct pmbus_data *data, int page)
{
int vout_mode = -1;
- if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE))
- vout_mode = _pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+ if (pmbus_check_byte_register(client, page, PMBUS_VOUT_MODE))
+ vout_mode = _pmbus_read_byte_data(client, page,
+ PMBUS_VOUT_MODE);
if (vout_mode >= 0 && vout_mode != 0xff) {
/*
* Not all chips support the VOUT_MODE command,
@@ -1659,7 +1661,7 @@ static int pmbus_identify_common(struct i2c_client *client,
if (data->info->format[PSC_VOLTAGE_OUT] != linear)
return -ENODEV;
- data->exponent = ((s8)(vout_mode << 3)) >> 3;
+ data->exponent[page] = ((s8)(vout_mode << 3)) >> 3;
break;
case 1: /* VID mode */
if (data->info->format[PSC_VOLTAGE_OUT] != vid)
@@ -1674,7 +1676,7 @@ static int pmbus_identify_common(struct i2c_client *client,
}
}
- pmbus_clear_fault_page(client, 0);
+ pmbus_clear_fault_page(client, page);
return 0;
}
@@ -1682,7 +1684,7 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
struct pmbus_driver_info *info)
{
struct device *dev = &client->dev;
- int ret;
+ int page, ret;
/*
* Some PMBus chips don't support PMBUS_STATUS_BYTE, so try
@@ -1715,10 +1717,12 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
return -ENODEV;
}
- ret = pmbus_identify_common(client, data);
- if (ret < 0) {
- dev_err(dev, "Failed to identify chip capabilities\n");
- return ret;
+ for (page = 0; page < info->pages; page++) {
+ ret = pmbus_identify_common(client, data, page);
+ if (ret < 0) {
+ dev_err(dev, "Failed to identify chip capabilities\n");
+ return ret;
+ }
}
return 0;
}
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 72a889702f0d..3532026e25da 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -6,7 +6,7 @@
* Kyösti Mälkki <kmalkki@cc.hut.fi>, and
* Mark D. Studebaker <mdsxyz123@yahoo.com>
* Ported to Linux 2.6 by Aurelien Jarno <aurelien@aurel32.net> with
- * the help of Jean Delvare <khali@linux-fr.org>
+ * the help of Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -754,7 +754,7 @@ static struct sis5595_data *sis5595_update_device(struct device *dev)
return data;
}
-static DEFINE_PCI_DEVICE_TABLE(sis5595_pci_ids) = {
+static const struct pci_device_id sis5595_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ 0, }
};
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index 81348fadf3b6..bd89e87bd6ae 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -9,7 +9,7 @@
*
* derived in part from smsc47m1.c:
* Copyright (C) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
- * Copyright (C) 2004 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 05cb814539cb..23a22c4eee51 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -7,7 +7,7 @@
* Super-I/O chips.
*
* Copyright (C) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
- * Copyright (C) 2004-2007 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004-2007 Jean Delvare <jdelvare@suse.de>
* Ported to Linux 2.6 by Gabriele Gorla <gorlik@yahoo.com>
* and Jean Delvare
*
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index d7b47abf37fe..6748b4583e7b 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -27,6 +27,8 @@
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/jiffies.h>
+#include <linux/thermal.h>
+#include <linux/of.h>
#define DRIVER_NAME "tmp102"
@@ -50,6 +52,7 @@
struct tmp102 {
struct device *hwmon_dev;
+ struct thermal_zone_device *tz;
struct mutex lock;
u16 config_orig;
unsigned long last_update;
@@ -93,6 +96,15 @@ static struct tmp102 *tmp102_update_device(struct i2c_client *client)
return tmp102;
}
+static int tmp102_read_temp(void *dev, long *temp)
+{
+ struct tmp102 *tmp102 = tmp102_update_device(to_i2c_client(dev));
+
+ *temp = tmp102->temp[0];
+
+ return 0;
+}
+
static ssize_t tmp102_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -204,6 +216,12 @@ static int tmp102_probe(struct i2c_client *client,
goto fail_remove_sysfs;
}
+ tmp102->tz = thermal_zone_of_sensor_register(&client->dev, 0,
+ &client->dev,
+ tmp102_read_temp, NULL);
+ if (IS_ERR(tmp102->tz))
+ tmp102->tz = NULL;
+
dev_info(&client->dev, "initialized\n");
return 0;
@@ -220,6 +238,7 @@ static int tmp102_remove(struct i2c_client *client)
{
struct tmp102 *tmp102 = i2c_get_clientdata(client);
+ thermal_zone_of_sensor_unregister(&client->dev, tmp102->tz);
hwmon_device_unregister(tmp102->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group);
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index c9dcce8c3dc3..babd732b4e18 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -824,7 +824,7 @@ static struct via686a_data *via686a_update_device(struct device *dev)
return data;
}
-static DEFINE_PCI_DEVICE_TABLE(via686a_pci_ids) = {
+static const struct pci_device_id via686a_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) },
{ }
};
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index aee14e2192f8..b3babe3326fb 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -766,7 +766,7 @@ static struct platform_driver vt8231_driver = {
.remove = vt8231_remove,
};
-static DEFINE_PCI_DEVICE_TABLE(vt8231_pci_ids) = {
+static const struct pci_device_id vt8231_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) },
{ 0, }
};
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 23ff210513d3..f0ab61db7a0d 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1,7 +1,7 @@
/*
* w83627ehf - Driver for the hardware monitoring functionality of
* the Winbond W83627EHF Super-I/O chip
- * Copyright (C) 2005-2012 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2012 Jean Delvare <jdelvare@suse.de>
* Copyright (C) 2006 Yuan Mu (Winbond),
* Rudolf Marek <r.marek@assembler.cz>
* David Hubbard <david.c.hubbard@gmail.com>
@@ -2889,7 +2889,7 @@ static void __exit sensors_w83627ehf_exit(void)
platform_driver_unregister(&w83627ehf_driver);
}
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("W83627EHF driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index cb9cd326ecb5..c1726be3654c 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -5,7 +5,7 @@
* Philip Edelbrock <phil@netroedge.com>,
* and Mark Studebaker <mdsxyz123@yahoo.com>
* Ported to 2.6 by Bernhard C. Schrenk <clemy@clemy.org>
- * Copyright (c) 2007 - 1012 Jean Delvare <khali@linux-fr.org>
+ * Copyright (c) 2007 - 1012 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index f9d513949a38..84911616d8c0 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -4,7 +4,7 @@
* Copyright (c) 1998 - 2001 Frodo Looijaard <frodol@dds.nl>,
* Philip Edelbrock <phil@netroedge.com>,
* and Mark Studebaker <mdsxyz123@yahoo.com>
- * Copyright (c) 2007 - 2008 Jean Delvare <khali@linux-fr.org>
+ * Copyright (c) 2007 - 2008 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 908209d24664..21894131190f 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -2,7 +2,7 @@
* w83795.c - Linux kernel driver for hardware monitoring
* Copyright (C) 2008 Nuvoton Technology Corp.
* Wei Song
- * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2010 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -2282,6 +2282,6 @@ static struct i2c_driver w83795_driver = {
module_i2c_driver(w83795_driver);
-MODULE_AUTHOR("Wei Song, Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Wei Song, Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("W83795G/ADG hardware monitoring driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index 39dbe990dc10..6384b268f590 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -1,7 +1,7 @@
/*
* w83l785ts.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring
- * Copyright (C) 2003-2009 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2003-2009 Jean Delvare <jdelvare@suse.de>
*
* Inspired from the lm83 driver. The W83L785TS-S is a sensor chip made
* by Winbond. It reports a single external temperature with a 1 deg
@@ -10,7 +10,7 @@
* http://www.winbond-usa.com/products/winbond_products/pdfs/PCIC/W83L785TS-S.pdf
*
* Ported to Linux 2.6 by Wolfgang Ziegler <nuppla@gmx.at> and Jean Delvare
- * <khali@linux-fr.org>.
+ * <jdelvare@suse.de>.
*
* Thanks to James Bolt <james@evilpenguin.com> for benchmarking the read
* error handling mechanism.
@@ -299,6 +299,6 @@ static struct w83l785ts_data *w83l785ts_update_device(struct device *dev)
module_i2c_driver(w83l785ts_driver);
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("W83L785TS-S driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index fad22b0bb5b0..65ef9664d5da 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -20,12 +20,11 @@
* ------------------------------------------------------------------------- */
/* With some changes from Frodo Looijaard <frodol@dds.nl>, Kyösti Mälkki
- <kmalkki@cc.hut.fi> and Jean Delvare <khali@linux-fr.org> */
+ <kmalkki@cc.hut.fi> and Jean Delvare <jdelvare@suse.de> */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/i2c.h>
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index f892a424009b..8b10f88b13d9 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -24,7 +24,6 @@
#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-pca.h>
diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
index 5c2379522aa9..34370090b753 100644
--- a/drivers/i2c/algos/i2c-algo-pcf.c
+++ b/drivers/i2c/algos/i2c-algo-pcf.c
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-pcf.h>
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 3b26129f6055..f5ed03164d86 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -152,6 +152,7 @@ config I2C_PIIX4
ATI SB700/SP5100
ATI SB800
AMD Hudson-2
+ AMD ML
AMD CZ
Serverworks OSB4
Serverworks CSB5
@@ -412,7 +413,6 @@ config I2C_DESIGNWARE_CORE
config I2C_DESIGNWARE_PLATFORM
tristate "Synopsys DesignWare Platform"
- depends on HAVE_CLK
select I2C_DESIGNWARE_CORE
help
If you say yes to this option, support will be included for the
@@ -648,6 +648,16 @@ config I2C_PXA_SLAVE
is necessary for systems where the PXA may be a target on the
I2C bus.
+config I2C_RIIC
+ tristate "Renesas RIIC adapter"
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ help
+ If you say yes to this option, support will be included for the
+ Renesas RIIC I2C interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-riic.
+
config HAVE_S3C2410_I2C
bool
help
@@ -683,7 +693,7 @@ config I2C_SH7760
config I2C_SH_MOBILE
tristate "SuperH Mobile I2C Controller"
- depends on SUPERH || ARM || COMPILE_TEST
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Renesas SH-Mobile processor.
@@ -796,7 +806,7 @@ config I2C_XLR
config I2C_RCAR
tristate "Renesas R-Car I2C Controller"
- depends on ARM || COMPILE_TEST
+ depends on ARCH_SHMOBILE || COMPILE_TEST
help
If you say yes to this option, support will be included for the
R-Car I2C controller.
@@ -865,6 +875,16 @@ config I2C_PARPORT_LIGHT
This support is also available as a module. If so, the module
will be called i2c-parport-light.
+config I2C_ROBOTFUZZ_OSIF
+ tristate "RobotFuzz Open Source InterFace USB adapter"
+ depends on USB
+ help
+ If you say yes to this option, support will be included for the
+ RobotFuzz Open Source InterFace USB to I2C interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-osif.
+
config I2C_TAOS_EVM
tristate "TAOS evaluation module"
depends on TTY
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index c73eb0ea788e..a08931fe73e1 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o
obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o
+obj-$(CONFIG_I2C_RIIC) += i2c-riic.o
obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
obj-$(CONFIG_I2C_S6000) += i2c-s6000.o
obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
@@ -83,6 +84,7 @@ obj-$(CONFIG_I2C_RCAR) += i2c-rcar.o
obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o
obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o
+obj-$(CONFIG_I2C_ROBOTFUZZ_OSIF) += i2c-robotfuzz-osif.o
obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o
obj-$(CONFIG_I2C_TINY_USB) += i2c-tiny-usb.o
obj-$(CONFIG_I2C_VIPERBOARD) += i2c-viperboard.o
diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c
index ed9f48d566db..9d7be5af2bf2 100644
--- a/drivers/i2c/busses/i2c-acorn.c
+++ b/drivers/i2c/busses/i2c-acorn.c
@@ -12,7 +12,7 @@
* On Acorn machines, the following i2c devices are on the bus:
* - PCF8583 real time clock & static RAM
*/
-#include <linux/init.h>
+#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 3f491815e2c4..7d60d3a1f621 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -58,7 +58,6 @@
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index 84ccd9496a5e..4611e4754a67 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -20,7 +20,6 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/acpi.h>
#define ALI1563_MAX_TIMEOUT 500
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 26bcc6127cee..4823206a4870 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -65,7 +65,6 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
index 07f01ac853ff..41fc6837fb8b 100644
--- a/drivers/i2c/busses/i2c-amd756-s4882.c
+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
@@ -1,7 +1,7 @@
/*
* i2c-amd756-s4882.c - i2c-amd756 extras for the Tyan S4882 motherboard
*
- * Copyright (C) 2004, 2008 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004, 2008 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -250,7 +250,7 @@ static void __exit amd756_s4882_exit(void)
"Physical bus restoration failed\n");
}
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("S4882 SMBus multiplexing");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index e13e2aa2d05d..819d3c1062a7 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -41,7 +41,6 @@
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index a44e6e77c5a1..f3d4d79855b5 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/ioport.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/acpi.h>
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 8edba9de76df..843d01268ae9 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -589,6 +589,9 @@ static const struct of_device_id atmel_twi_dt_ids[] = {
.compatible = "atmel,at91sam9260-i2c",
.data = &at91sam9260_config,
} , {
+ .compatible = "atmel,at91sam9261-i2c",
+ .data = &at91sam9261_config,
+ } , {
.compatible = "atmel,at91sam9g20-i2c",
.data = &at91sam9g20_config,
} , {
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index b5b89239d622..8762458ca7da 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -31,7 +31,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/slab.h>
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
index ce7ffba2b020..bdf040fd8675 100644
--- a/drivers/i2c/busses/i2c-cbus-gpio.c
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -19,7 +19,6 @@
#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/errno.h>
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 3e5ea2c87a6e..be7f0a20d634 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -33,7 +33,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/stddef.h>
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index e89e3e2145e5..14c4b30d4ccc 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -26,7 +26,6 @@
*
*/
#include <linux/export.h>
-#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/i2c.h>
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index ff15ae90aaf5..e08e458bab02 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -18,7 +18,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/fs.h>
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index 044f85b01d06..9fd711c03dd2 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index 436b0f254916..512fcfabc18e 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -12,7 +12,6 @@
* of this archive for more details.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index 79c3d9069a48..e248257fe517 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -27,7 +27,6 @@
#include <linux/types.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <asm/hydra.h>
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 737e29866887..349c2d35e792 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -2,7 +2,7 @@
Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl>,
Philip Edelbrock <phil@netroedge.com>, and Mark D. Studebaker
<mdsxyz123@yahoo.com>
- Copyright (C) 2007 - 2012 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2007 - 2012 Jean Delvare <jdelvare@suse.de>
Copyright (C) 2010 Intel Corporation,
David Woodhouse <dwmw2@infradead.org>
@@ -1312,8 +1312,7 @@ static void __exit i2c_i801_exit(void)
pci_unregister_driver(&i801_driver);
}
-MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>, "
- "Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>, Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("I801 SMBus driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index f7444100f397..274312c96b12 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -36,7 +36,6 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/irq.h>
#include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index d0cfbb4cb964..db895fb22e65 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -607,7 +607,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "can't get irq number\n");
- return -ENOENT;
+ return irq;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index dd24aa0424a9..3d16c2f60a5e 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -34,7 +34,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 8c38aaa7417c..cf99dbf21fd1 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -33,7 +33,6 @@
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/acpi.h>
@@ -275,7 +274,8 @@ static int smbus_sch_probe(struct platform_device *dev)
if (!res)
return -EBUSY;
- if (!request_region(res->start, resource_size(res), dev->name)) {
+ if (!devm_request_region(&dev->dev, res->start, resource_size(res),
+ dev->name)) {
dev_err(&dev->dev, "SMBus region 0x%x already in use!\n",
sch_smba);
return -EBUSY;
@@ -294,7 +294,6 @@ static int smbus_sch_probe(struct platform_device *dev)
retval = i2c_add_adapter(&sch_adapter);
if (retval) {
dev_err(&dev->dev, "Couldn't register adapter!\n");
- release_region(res->start, resource_size(res));
sch_smba = 0;
}
@@ -303,11 +302,8 @@ static int smbus_sch_probe(struct platform_device *dev)
static int smbus_sch_remove(struct platform_device *pdev)
{
- struct resource *res;
if (sch_smba) {
i2c_del_adapter(&sch_adapter);
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- release_region(res->start, resource_size(res));
sch_smba = 0;
}
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 0043ede234c2..8ce4f517fc56 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -62,7 +62,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
@@ -344,6 +343,7 @@ static int ismt_process_desc(const struct ismt_desc *desc,
data->word = dma_buffer[0] | (dma_buffer[1] << 8);
break;
case I2C_SMBUS_BLOCK_DATA:
+ case I2C_SMBUS_I2C_BLOCK_DATA:
memcpy(&data->block[1], dma_buffer, desc->rxbytes);
data->block[0] = desc->rxbytes;
break;
@@ -509,6 +509,41 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
}
break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ /* Make sure the length is valid */
+ if (data->block[0] < 1)
+ data->block[0] = 1;
+
+ if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
+ data->block[0] = I2C_SMBUS_BLOCK_MAX;
+
+ if (read_write == I2C_SMBUS_WRITE) {
+ /* i2c Block Write */
+ dev_dbg(dev, "I2C_SMBUS_I2C_BLOCK_DATA: WRITE\n");
+ dma_size = data->block[0] + 1;
+ dma_direction = DMA_TO_DEVICE;
+ desc->wr_len_cmd = dma_size;
+ desc->control |= ISMT_DESC_I2C;
+ priv->dma_buffer[0] = command;
+ memcpy(&priv->dma_buffer[1], &data->block[1], dma_size);
+ } else {
+ /* i2c Block Read */
+ dev_dbg(dev, "I2C_SMBUS_I2C_BLOCK_DATA: READ\n");
+ dma_size = data->block[0];
+ dma_direction = DMA_FROM_DEVICE;
+ desc->rd_len = dma_size;
+ desc->wr_len_cmd = command;
+ desc->control |= (ISMT_DESC_I2C | ISMT_DESC_CWRL);
+ /*
+ * Per the "Table 15-15. I2C Commands",
+ * in the External Design Specification (EDS),
+ * (Document Number: 508084, Revision: 2.0),
+ * the _rw bit must be 0
+ */
+ desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, 0);
+ }
+ break;
+
default:
dev_err(dev, "Unsupported transaction %d\n",
size);
@@ -582,6 +617,7 @@ static u32 ismt_func(struct i2c_adapter *adap)
I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_PROC_CALL |
I2C_FUNC_SMBUS_BLOCK_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK |
I2C_FUNC_SMBUS_PEC;
}
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index b6a741caf4f6..f5391633b53a 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 8be7e42aa4de..d52d84937ad3 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -97,7 +97,6 @@ enum {
enum {
MV64XXX_I2C_ACTION_INVALID,
MV64XXX_I2C_ACTION_CONTINUE,
- MV64XXX_I2C_ACTION_OFFLOAD_SEND_START,
MV64XXX_I2C_ACTION_SEND_START,
MV64XXX_I2C_ACTION_SEND_RESTART,
MV64XXX_I2C_ACTION_OFFLOAD_RESTART,
@@ -204,6 +203,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
unsigned long ctrl_reg;
struct i2c_msg *msg = drv_data->msgs;
+ if (!drv_data->offload_enabled)
+ return -EOPNOTSUPP;
+
drv_data->msg = msg;
drv_data->byte_posn = 0;
drv_data->bytes_left = msg->len;
@@ -433,8 +435,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
drv_data->msgs++;
drv_data->num_msgs--;
- if (!(drv_data->offload_enabled &&
- mv64xxx_i2c_offload_msg(drv_data))) {
+ if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START;
writel(drv_data->cntl_bits,
drv_data->reg_base + drv_data->reg_offsets.control);
@@ -458,15 +459,14 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
drv_data->reg_base + drv_data->reg_offsets.control);
break;
- case MV64XXX_I2C_ACTION_OFFLOAD_SEND_START:
- if (!mv64xxx_i2c_offload_msg(drv_data))
- break;
- else
- drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
- /* FALLTHRU */
case MV64XXX_I2C_ACTION_SEND_START:
- writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
- drv_data->reg_base + drv_data->reg_offsets.control);
+ /* Can we offload this msg ? */
+ if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
+ /* No, switch to standard path */
+ mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
+ writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
+ drv_data->reg_base + drv_data->reg_offsets.control);
+ }
break;
case MV64XXX_I2C_ACTION_SEND_ADDR_1:
@@ -625,15 +625,10 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
unsigned long flags;
spin_lock_irqsave(&drv_data->lock, flags);
- if (drv_data->offload_enabled) {
- drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_START;
- drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
- } else {
- mv64xxx_i2c_prepare_for_io(drv_data, msg);
- drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
- drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
- }
+ drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
+ drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
+
drv_data->send_stop = is_last;
drv_data->block = 1;
mv64xxx_i2c_do_action(drv_data);
@@ -692,6 +687,7 @@ static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
{ .compatible = "allwinner,sun4i-i2c", .data = &mv64xxx_i2c_regs_sun4i},
{ .compatible = "marvell,mv64xxx-i2c", .data = &mv64xxx_i2c_regs_mv64xxx},
{ .compatible = "marvell,mv78230-i2c", .data = &mv64xxx_i2c_regs_mv64xxx},
+ { .compatible = "marvell,mv78230-a0-i2c", .data = &mv64xxx_i2c_regs_mv64xxx},
{}
};
MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
@@ -783,6 +779,10 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
drv_data->errata_delay = true;
}
+ if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) {
+ drv_data->offload_enabled = false;
+ drv_data->errata_delay = true;
+ }
out:
return rc;
#endif
diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
index 2ca268d6140b..b170bdffb5de 100644
--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
@@ -1,7 +1,7 @@
/*
* i2c-nforce2-s4985.c - i2c-nforce2 extras for the Tyan S4985 motherboard
*
- * Copyright (C) 2008 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2008 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -245,7 +245,7 @@ static void __exit nforce2_s4985_exit(void)
"Physical bus restoration failed\n");
}
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("S4985 SMBus multiplexing");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index ac88f4000cc2..0038c451095c 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -51,7 +51,6 @@
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/ioport.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/dmi.h>
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 8bf9ac01301a..4443613514ee 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -22,7 +22,6 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
-#include <linux/platform_data/i2c-nomadik.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
@@ -104,6 +103,29 @@
/* maximum threshold value */
#define MAX_I2C_FIFO_THRESHOLD 15
+enum i2c_freq_mode {
+ I2C_FREQ_MODE_STANDARD, /* up to 100 Kb/s */
+ I2C_FREQ_MODE_FAST, /* up to 400 Kb/s */
+ I2C_FREQ_MODE_HIGH_SPEED, /* up to 3.4 Mb/s */
+ I2C_FREQ_MODE_FAST_PLUS, /* up to 1 Mb/s */
+};
+
+/**
+ * struct nmk_i2c_controller - client specific controller configuration
+ * @clk_freq: clock frequency for the operation mode
+ * @tft: Tx FIFO Threshold in bytes
+ * @rft: Rx FIFO Threshold in bytes
+ * @timeout Slave response timeout(ms)
+ * @sm: speed mode
+ */
+struct nmk_i2c_controller {
+ u32 clk_freq;
+ unsigned char tft;
+ unsigned char rft;
+ int timeout;
+ enum i2c_freq_mode sm;
+};
+
/**
* struct i2c_vendor_data - per-vendor variations
* @has_mtdws: variant has the MTDWS bit
@@ -340,6 +362,8 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
{
u32 brcr1, brcr2;
u32 i2c_clk, div;
+ u32 ns;
+ u16 slsu;
writel(0x0, dev->virtbase + I2C_CR);
writel(0x0, dev->virtbase + I2C_HSMCR);
@@ -347,18 +371,38 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
writel(0x0, dev->virtbase + I2C_RFTR);
writel(0x0, dev->virtbase + I2C_DMAR);
+ i2c_clk = clk_get_rate(dev->clk);
+
/*
* set the slsu:
*
* slsu defines the data setup time after SCL clock
- * stretching in terms of i2c clk cycles. The
- * needed setup time for the three modes are 250ns,
- * 100ns, 10ns respectively thus leading to the values
- * of 14, 6, 2 for a 48 MHz i2c clk.
+ * stretching in terms of i2c clk cycles + 1 (zero means
+ * "wait one cycle"), the needed setup time for the three
+ * modes are 250ns, 100ns, 10ns respectively.
+ *
+ * As the time for one cycle T in nanoseconds is
+ * T = (1/f) * 1000000000 =>
+ * slsu = cycles / (1000000000 / f) + 1
*/
- writel(dev->cfg.slsu << 16, dev->virtbase + I2C_SCR);
+ ns = DIV_ROUND_UP_ULL(1000000000ULL, i2c_clk);
+ switch (dev->cfg.sm) {
+ case I2C_FREQ_MODE_FAST:
+ case I2C_FREQ_MODE_FAST_PLUS:
+ slsu = DIV_ROUND_UP(100, ns); /* Fast */
+ break;
+ case I2C_FREQ_MODE_HIGH_SPEED:
+ slsu = DIV_ROUND_UP(10, ns); /* High */
+ break;
+ case I2C_FREQ_MODE_STANDARD:
+ default:
+ slsu = DIV_ROUND_UP(250, ns); /* Standard */
+ break;
+ }
+ slsu += 1;
- i2c_clk = clk_get_rate(dev->clk);
+ dev_dbg(&dev->adev->dev, "calculated SLSU = %04x\n", slsu);
+ writel(slsu << 16, dev->virtbase + I2C_SCR);
/*
* The spec says, in case of std. mode the divider is
@@ -915,11 +959,6 @@ static const struct i2c_algorithm nmk_i2c_algo = {
};
static struct nmk_i2c_controller u8500_i2c = {
- /*
- * Slave data setup time; 250ns, 100ns, and 10ns, which
- * is 14, 6 and 2 respectively for a 48Mhz i2c clock.
- */
- .slsu = 0xe,
.tft = 1, /* Tx FIFO threshold */
.rft = 8, /* Rx FIFO threshold */
.clk_freq = 400000, /* fast mode operation */
@@ -1027,7 +1066,6 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
/* fetch the controller configuration from machine */
dev->cfg.clk_freq = pdata->clk_freq;
- dev->cfg.slsu = pdata->slsu;
dev->cfg.tft = pdata->tft;
dev->cfg.rft = pdata->rft;
dev->cfg.sm = pdata->sm;
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index c61f37a10a07..80e06fa45720 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -15,7 +15,6 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index b929ba271b47..81042b08a947 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -18,7 +18,6 @@
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/of.h>
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index aa9577881925..62f55fe624cb 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------ *
* i2c-parport-light.c I2C bus over parallel port *
* ------------------------------------------------------------------------ *
- Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2003-2010 Jean Delvare <jdelvare@suse.de>
Based on older i2c-velleman.c driver
Copyright (C) 1995-2000 Simon G. Vogl
@@ -273,7 +273,7 @@ static void __exit i2c_parport_exit(void)
release_region(base, 3);
}
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("I2C bus over parallel port (light)");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 81d887869620..a27aae2d6757 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------ *
* i2c-parport.c I2C bus over parallel port *
* ------------------------------------------------------------------------ *
- Copyright (C) 2003-2011 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2003-2011 Jean Delvare <jdelvare@suse.de>
Based on older i2c-philips-par.c driver
Copyright (C) 1995-2000 Simon G. Vogl
@@ -298,7 +298,7 @@ static void __exit i2c_parport_exit(void)
parport_unregister_driver(&i2c_parport_driver);
}
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("I2C bus over parallel port");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-parport.h b/drivers/i2c/busses/i2c-parport.h
index 3fe652302ea7..e572f3aac0f7 100644
--- a/drivers/i2c/busses/i2c-parport.h
+++ b/drivers/i2c/busses/i2c-parport.h
@@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------ *
* i2c-parport.h I2C bus over parallel port *
* ------------------------------------------------------------------------ *
- Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2003-2010 Jean Delvare <jdelvare@suse.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 39e2755e3f25..845f12598e79 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -12,7 +12,6 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index a028617b8f13..39dd8ec60dfd 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -22,7 +22,7 @@
Intel PIIX4, 440MX
Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100
ATI IXP200, IXP300, IXP400, SB600, SB700/SP5100, SB800
- AMD Hudson-2, CZ
+ AMD Hudson-2, ML, CZ
SMSC Victory66
Note: we assume there can only be one device, with one or more
@@ -38,7 +38,6 @@
#include <linux/ioport.h>
#include <linux/i2c.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/acpi.h>
#include <linux/io.h>
@@ -208,16 +207,16 @@ static int piix4_setup(struct pci_dev *PIIX4_dev,
"WARNING: SMBus interface has been FORCEFULLY ENABLED!\n");
} else {
dev_err(&PIIX4_dev->dev,
- "Host SMBus controller not enabled!\n");
+ "SMBus Host Controller not enabled!\n");
release_region(piix4_smba, SMBIOSIZE);
return -ENODEV;
}
}
if (((temp & 0x0E) == 8) || ((temp & 0x0E) == 2))
- dev_dbg(&PIIX4_dev->dev, "Using Interrupt 9 for SMBus.\n");
+ dev_dbg(&PIIX4_dev->dev, "Using IRQ for SMBus\n");
else if ((temp & 0x0E) == 0)
- dev_dbg(&PIIX4_dev->dev, "Using Interrupt SMI# for SMBus.\n");
+ dev_dbg(&PIIX4_dev->dev, "Using SMI# for SMBus\n");
else
dev_err(&PIIX4_dev->dev, "Illegal Interrupt configuration "
"(or code out of date)!\n");
@@ -235,7 +234,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
{
unsigned short piix4_smba;
unsigned short smba_idx = 0xcd6;
- u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en;
+ u8 smba_en_lo, smba_en_hi, smb_en, smb_en_status;
+ u8 i2ccfg, i2ccfg_offset = 0x10;
/* SB800 and later SMBus does not support forcing address */
if (force || force_addr) {
@@ -245,7 +245,15 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
}
/* Determine the address of the SMBus areas */
- smb_en = (aux) ? 0x28 : 0x2c;
+ if ((PIIX4_dev->vendor == PCI_VENDOR_ID_AMD &&
+ PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
+ PIIX4_dev->revision >= 0x41) ||
+ (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD &&
+ PIIX4_dev->device == 0x790b &&
+ PIIX4_dev->revision >= 0x49))
+ smb_en = 0x00;
+ else
+ smb_en = (aux) ? 0x28 : 0x2c;
if (!request_region(smba_idx, 2, "smba_idx")) {
dev_err(&PIIX4_dev->dev, "SMBus base address index region "
@@ -258,13 +266,22 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
smba_en_hi = inb_p(smba_idx + 1);
release_region(smba_idx, 2);
- if ((smba_en_lo & 1) == 0) {
+ if (!smb_en) {
+ smb_en_status = smba_en_lo & 0x10;
+ piix4_smba = smba_en_hi << 8;
+ if (aux)
+ piix4_smba |= 0x20;
+ } else {
+ smb_en_status = smba_en_lo & 0x01;
+ piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
+ }
+
+ if (!smb_en_status) {
dev_err(&PIIX4_dev->dev,
- "Host SMBus controller not enabled!\n");
+ "SMBus Host Controller not enabled!\n");
return -ENODEV;
}
- piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
return -ENODEV;
@@ -277,7 +294,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
/* Aux SMBus does not support IRQ information */
if (aux) {
dev_info(&PIIX4_dev->dev,
- "SMBus Host Controller at 0x%x\n", piix4_smba);
+ "Auxiliary SMBus Host Controller at 0x%x\n",
+ piix4_smba);
return piix4_smba;
}
@@ -292,9 +310,9 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
release_region(piix4_smba + i2ccfg_offset, 1);
if (i2ccfg & 1)
- dev_dbg(&PIIX4_dev->dev, "Using IRQ for SMBus.\n");
+ dev_dbg(&PIIX4_dev->dev, "Using IRQ for SMBus\n");
else
- dev_dbg(&PIIX4_dev->dev, "Using SMI# for SMBus.\n");
+ dev_dbg(&PIIX4_dev->dev, "Using SMI# for SMBus\n");
dev_info(&PIIX4_dev->dev,
"SMBus Host Controller at 0x%x, revision %d\n",
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index f6389e2c9d02..8564768fee32 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index c9a352f0a9a5..dc7ff829ad78 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -628,11 +628,9 @@ static int i2c_pnx_probe(struct platform_device *pdev)
struct resource *res;
u32 speed = I2C_PNX_SPEED_KHZ_DEFAULT * 1000;
- alg_data = kzalloc(sizeof(*alg_data), GFP_KERNEL);
- if (!alg_data) {
- ret = -ENOMEM;
- goto err_kzalloc;
- }
+ alg_data = devm_kzalloc(&pdev->dev, sizeof(*alg_data), GFP_KERNEL);
+ if (!alg_data)
+ return -ENOMEM;
platform_set_drvdata(pdev, alg_data);
@@ -657,11 +655,9 @@ static int i2c_pnx_probe(struct platform_device *pdev)
*/
}
#endif
- alg_data->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(alg_data->clk)) {
- ret = PTR_ERR(alg_data->clk);
- goto out_drvdata;
- }
+ alg_data->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(alg_data->clk))
+ return PTR_ERR(alg_data->clk);
init_timer(&alg_data->mif.timer);
alg_data->mif.timer.function = i2c_pnx_timeout;
@@ -672,31 +668,13 @@ static int i2c_pnx_probe(struct platform_device *pdev)
/* Register I/O resource */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Unable to get mem resource.\n");
- ret = -EBUSY;
- goto out_clkget;
- }
- if (!request_mem_region(res->start, I2C_PNX_REGION_SIZE,
- pdev->name)) {
- dev_err(&pdev->dev,
- "I/O region 0x%08x for I2C already in use.\n",
- res->start);
- ret = -ENOMEM;
- goto out_clkget;
- }
-
- alg_data->base = res->start;
- alg_data->ioaddr = ioremap(res->start, I2C_PNX_REGION_SIZE);
- if (!alg_data->ioaddr) {
- dev_err(&pdev->dev, "Couldn't ioremap I2C I/O region\n");
- ret = -ENOMEM;
- goto out_release;
- }
+ alg_data->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(alg_data->ioaddr))
+ return PTR_ERR(alg_data->ioaddr);
ret = clk_enable(alg_data->clk);
if (ret)
- goto out_unmap;
+ return ret;
freq = clk_get_rate(alg_data->clk);
@@ -730,8 +708,8 @@ static int i2c_pnx_probe(struct platform_device *pdev)
ret = alg_data->irq;
goto out_clock;
}
- ret = request_irq(alg_data->irq, i2c_pnx_interrupt,
- 0, pdev->name, alg_data);
+ ret = devm_request_irq(&pdev->dev, alg_data->irq, i2c_pnx_interrupt,
+ 0, pdev->name, alg_data);
if (ret)
goto out_clock;
@@ -739,7 +717,7 @@ static int i2c_pnx_probe(struct platform_device *pdev)
ret = i2c_add_numbered_adapter(&alg_data->adapter);
if (ret < 0) {
dev_err(&pdev->dev, "I2C: Failed to add bus\n");
- goto out_irq;
+ goto out_clock;
}
dev_dbg(&pdev->dev, "%s: Master at %#8x, irq %d.\n",
@@ -747,19 +725,8 @@ static int i2c_pnx_probe(struct platform_device *pdev)
return 0;
-out_irq:
- free_irq(alg_data->irq, alg_data);
out_clock:
clk_disable(alg_data->clk);
-out_unmap:
- iounmap(alg_data->ioaddr);
-out_release:
- release_mem_region(res->start, I2C_PNX_REGION_SIZE);
-out_clkget:
- clk_put(alg_data->clk);
-out_drvdata:
- kfree(alg_data);
-err_kzalloc:
return ret;
}
@@ -767,13 +734,8 @@ static int i2c_pnx_remove(struct platform_device *pdev)
{
struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
- free_irq(alg_data->irq, alg_data);
i2c_del_adapter(&alg_data->adapter);
clk_disable(alg_data->clk);
- iounmap(alg_data->ioaddr);
- release_mem_region(alg_data->base, I2C_PNX_REGION_SIZE);
- clk_put(alg_data->clk);
- kfree(alg_data);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 8c87f4a9793b..01e967763c2a 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -24,7 +24,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/of_irq.h>
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
index ac80199885be..c83fc3ccdd2b 100644
--- a/drivers/i2c/busses/i2c-puv3.c
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -17,7 +17,6 @@
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 2c2fd7c2b116..0282d4d42805 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -26,7 +26,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/i2c.h>
@@ -111,6 +110,7 @@ struct rcar_i2c_priv {
void __iomem *io;
struct i2c_adapter adap;
struct i2c_msg *msg;
+ struct clk *clk;
spinlock_t lock;
wait_queue_head_t wait;
@@ -227,18 +227,12 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
u32 bus_speed,
struct device *dev)
{
- struct clk *clkp = clk_get(dev, NULL);
u32 scgd, cdf;
u32 round, ick;
u32 scl;
u32 cdf_width;
unsigned long rate;
- if (IS_ERR(clkp)) {
- dev_err(dev, "couldn't get clock\n");
- return PTR_ERR(clkp);
- }
-
switch (priv->devtype) {
case I2C_RCAR_GEN1:
cdf_width = 2;
@@ -266,7 +260,7 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
* clkp : peripheral_clk
* F[] : integer up-valuation
*/
- rate = clk_get_rate(clkp);
+ rate = clk_get_rate(priv->clk);
cdf = rate / 20000000;
if (cdf >= 1 << cdf_width) {
dev_err(dev, "Input clock %lu too high\n", rate);
@@ -308,7 +302,7 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
scgd_find:
dev_dbg(dev, "clk %d/%d(%lu), round %u, CDF:0x%x, SCGD: 0x%x\n",
- scl, bus_speed, clk_get_rate(clkp), round, cdf, scgd);
+ scl, bus_speed, clk_get_rate(priv->clk), round, cdf, scgd);
/*
* keep icccr value
@@ -604,7 +598,7 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
* error handling
*/
if (rcar_i2c_flags_has(priv, ID_NACK)) {
- ret = -EREMOTEIO;
+ ret = -ENXIO;
break;
}
@@ -623,7 +617,7 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
pm_runtime_put(dev);
- if (ret < 0)
+ if (ret < 0 && ret != -ENXIO)
dev_err(dev, "error %d : %x\n", ret, priv->flags);
return ret;
@@ -664,6 +658,12 @@ static int rcar_i2c_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "cannot get clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
bus_speed = 100000; /* default 100 kHz */
ret = of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed);
if (ret < 0 && pdata && pdata->bus_speed)
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
new file mode 100644
index 000000000000..9e1f8bacfb39
--- /dev/null
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -0,0 +1,427 @@
+/*
+ * Renesas RIIC driver
+ *
+ * Copyright (C) 2013 Wolfram Sang <wsa@sang-engineering.com>
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+/*
+ * This i2c core has a lot of interrupts, namely 8. We use their chaining as
+ * some kind of state machine.
+ *
+ * 1) The main xfer routine kicks off a transmission by putting the start bit
+ * (or repeated start) on the bus and enabling the transmit interrupt (TIE)
+ * since we need to send the slave address + RW bit in every case.
+ *
+ * 2) TIE sends slave address + RW bit and selects how to continue.
+ *
+ * 3a) Write case: We keep utilizing TIE as long as we have data to send. If we
+ * are done, we switch over to the transmission done interrupt (TEIE) and mark
+ * the message as completed (includes sending STOP) there.
+ *
+ * 3b) Read case: We switch over to receive interrupt (RIE). One dummy read is
+ * needed to start clocking, then we keep receiving until we are done. Note
+ * that we use the RDRFS mode all the time, i.e. we ACK/NACK every byte by
+ * writing to the ACKBT bit. I tried using the RDRFS mode only at the end of a
+ * message to create the final NACK as sketched in the datasheet. This caused
+ * some subtle races (when byte n was processed and byte n+1 was already
+ * waiting), though, and I started with the safe approach.
+ *
+ * 4) If we got a NACK somewhere, we flag the error and stop the transmission
+ * via NAKIE.
+ *
+ * Also check the comments in the interrupt routines for some gory details.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define RIIC_ICCR1 0x00
+#define RIIC_ICCR2 0x04
+#define RIIC_ICMR1 0x08
+#define RIIC_ICMR3 0x10
+#define RIIC_ICSER 0x18
+#define RIIC_ICIER 0x1c
+#define RIIC_ICSR2 0x24
+#define RIIC_ICBRL 0x34
+#define RIIC_ICBRH 0x38
+#define RIIC_ICDRT 0x3c
+#define RIIC_ICDRR 0x40
+
+#define ICCR1_ICE 0x80
+#define ICCR1_IICRST 0x40
+#define ICCR1_SOWP 0x10
+
+#define ICCR2_BBSY 0x80
+#define ICCR2_SP 0x08
+#define ICCR2_RS 0x04
+#define ICCR2_ST 0x02
+
+#define ICMR1_CKS_MASK 0x70
+#define ICMR1_BCWP 0x08
+#define ICMR1_CKS(_x) ((((_x) << 4) & ICMR1_CKS_MASK) | ICMR1_BCWP)
+
+#define ICMR3_RDRFS 0x20
+#define ICMR3_ACKWP 0x10
+#define ICMR3_ACKBT 0x08
+
+#define ICIER_TIE 0x80
+#define ICIER_TEIE 0x40
+#define ICIER_RIE 0x20
+#define ICIER_NAKIE 0x10
+
+#define ICSR2_NACKF 0x10
+
+/* ICBRx (@ PCLK 33MHz) */
+#define ICBR_RESERVED 0xe0 /* Should be 1 on writes */
+#define ICBRL_SP100K (19 | ICBR_RESERVED)
+#define ICBRH_SP100K (16 | ICBR_RESERVED)
+#define ICBRL_SP400K (21 | ICBR_RESERVED)
+#define ICBRH_SP400K (9 | ICBR_RESERVED)
+
+#define RIIC_INIT_MSG -1
+
+struct riic_dev {
+ void __iomem *base;
+ u8 *buf;
+ struct i2c_msg *msg;
+ int bytes_left;
+ int err;
+ int is_last;
+ struct completion msg_done;
+ struct i2c_adapter adapter;
+ struct clk *clk;
+};
+
+struct riic_irq_desc {
+ int res_num;
+ irq_handler_t isr;
+ char *name;
+};
+
+static inline void riic_clear_set_bit(struct riic_dev *riic, u8 clear, u8 set, u8 reg)
+{
+ writeb((readb(riic->base + reg) & ~clear) | set, riic->base + reg);
+}
+
+static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+ struct riic_dev *riic = i2c_get_adapdata(adap);
+ unsigned long time_left;
+ int i, ret;
+ u8 start_bit;
+
+ ret = clk_prepare_enable(riic->clk);
+ if (ret)
+ return ret;
+
+ if (readb(riic->base + RIIC_ICCR2) & ICCR2_BBSY) {
+ riic->err = -EBUSY;
+ goto out;
+ }
+
+ reinit_completion(&riic->msg_done);
+ riic->err = 0;
+
+ writeb(0, riic->base + RIIC_ICSR2);
+
+ for (i = 0, start_bit = ICCR2_ST; i < num; i++) {
+ riic->bytes_left = RIIC_INIT_MSG;
+ riic->buf = msgs[i].buf;
+ riic->msg = &msgs[i];
+ riic->is_last = (i == num - 1);
+
+ writeb(ICIER_NAKIE | ICIER_TIE, riic->base + RIIC_ICIER);
+
+ writeb(start_bit, riic->base + RIIC_ICCR2);
+
+ time_left = wait_for_completion_timeout(&riic->msg_done, riic->adapter.timeout);
+ if (time_left == 0)
+ riic->err = -ETIMEDOUT;
+
+ if (riic->err)
+ break;
+
+ start_bit = ICCR2_RS;
+ }
+
+ out:
+ clk_disable_unprepare(riic->clk);
+
+ return riic->err ?: num;
+}
+
+static irqreturn_t riic_tdre_isr(int irq, void *data)
+{
+ struct riic_dev *riic = data;
+ u8 val;
+
+ if (!riic->bytes_left)
+ return IRQ_NONE;
+
+ if (riic->bytes_left == RIIC_INIT_MSG) {
+ val = !!(riic->msg->flags & I2C_M_RD);
+ if (val)
+ /* On read, switch over to receive interrupt */
+ riic_clear_set_bit(riic, ICIER_TIE, ICIER_RIE, RIIC_ICIER);
+ else
+ /* On write, initialize length */
+ riic->bytes_left = riic->msg->len;
+
+ val |= (riic->msg->addr << 1);
+ } else {
+ val = *riic->buf;
+ riic->buf++;
+ riic->bytes_left--;
+ }
+
+ /*
+ * Switch to transmission ended interrupt when done. Do check here
+ * after bytes_left was initialized to support SMBUS_QUICK (new msg has
+ * 0 length then)
+ */
+ if (riic->bytes_left == 0)
+ riic_clear_set_bit(riic, ICIER_TIE, ICIER_TEIE, RIIC_ICIER);
+
+ /*
+ * This acks the TIE interrupt. We get another TIE immediately if our
+ * value could be moved to the shadow shift register right away. So
+ * this must be after updates to ICIER (where we want to disable TIE)!
+ */
+ writeb(val, riic->base + RIIC_ICDRT);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t riic_tend_isr(int irq, void *data)
+{
+ struct riic_dev *riic = data;
+
+ if (readb(riic->base + RIIC_ICSR2) & ICSR2_NACKF) {
+ /* We got a NACKIE */
+ readb(riic->base + RIIC_ICDRR); /* dummy read */
+ riic->err = -ENXIO;
+ } else if (riic->bytes_left) {
+ return IRQ_NONE;
+ }
+
+ if (riic->is_last || riic->err)
+ writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
+
+ writeb(0, riic->base + RIIC_ICIER);
+ complete(&riic->msg_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t riic_rdrf_isr(int irq, void *data)
+{
+ struct riic_dev *riic = data;
+
+ if (!riic->bytes_left)
+ return IRQ_NONE;
+
+ if (riic->bytes_left == RIIC_INIT_MSG) {
+ riic->bytes_left = riic->msg->len;
+ readb(riic->base + RIIC_ICDRR); /* dummy read */
+ return IRQ_HANDLED;
+ }
+
+ if (riic->bytes_left == 1) {
+ /* STOP must come before we set ACKBT! */
+ if (riic->is_last)
+ writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
+
+ riic_clear_set_bit(riic, 0, ICMR3_ACKBT, RIIC_ICMR3);
+
+ writeb(0, riic->base + RIIC_ICIER);
+ complete(&riic->msg_done);
+ } else {
+ riic_clear_set_bit(riic, ICMR3_ACKBT, 0, RIIC_ICMR3);
+ }
+
+ /* Reading acks the RIE interrupt */
+ *riic->buf = readb(riic->base + RIIC_ICDRR);
+ riic->buf++;
+ riic->bytes_left--;
+
+ return IRQ_HANDLED;
+}
+
+static u32 riic_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm riic_algo = {
+ .master_xfer = riic_xfer,
+ .functionality = riic_func,
+};
+
+static int riic_init_hw(struct riic_dev *riic, u32 spd)
+{
+ int ret;
+ unsigned long rate;
+
+ ret = clk_prepare_enable(riic->clk);
+ if (ret)
+ return ret;
+
+ /*
+ * TODO: Implement formula to calculate the timing values depending on
+ * variable parent clock rate and arbitrary bus speed
+ */
+ rate = clk_get_rate(riic->clk);
+ if (rate != 33325000) {
+ dev_err(&riic->adapter.dev,
+ "invalid parent clk (%lu). Must be 33325000Hz\n", rate);
+ clk_disable_unprepare(riic->clk);
+ return -EINVAL;
+ }
+
+ /* Changing the order of accessing IICRST and ICE may break things! */
+ writeb(ICCR1_IICRST | ICCR1_SOWP, riic->base + RIIC_ICCR1);
+ riic_clear_set_bit(riic, 0, ICCR1_ICE, RIIC_ICCR1);
+
+ switch (spd) {
+ case 100000:
+ writeb(ICMR1_CKS(3), riic->base + RIIC_ICMR1);
+ writeb(ICBRH_SP100K, riic->base + RIIC_ICBRH);
+ writeb(ICBRL_SP100K, riic->base + RIIC_ICBRL);
+ break;
+ case 400000:
+ writeb(ICMR1_CKS(1), riic->base + RIIC_ICMR1);
+ writeb(ICBRH_SP400K, riic->base + RIIC_ICBRH);
+ writeb(ICBRL_SP400K, riic->base + RIIC_ICBRL);
+ break;
+ default:
+ dev_err(&riic->adapter.dev,
+ "unsupported bus speed (%dHz). Use 100000 or 400000\n", spd);
+ clk_disable_unprepare(riic->clk);
+ return -EINVAL;
+ }
+
+ writeb(0, riic->base + RIIC_ICSER);
+ writeb(ICMR3_ACKWP | ICMR3_RDRFS, riic->base + RIIC_ICMR3);
+
+ riic_clear_set_bit(riic, ICCR1_IICRST, 0, RIIC_ICCR1);
+
+ clk_disable_unprepare(riic->clk);
+
+ return 0;
+}
+
+static struct riic_irq_desc riic_irqs[] = {
+ { .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" },
+ { .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" },
+ { .res_num = 2, .isr = riic_tdre_isr, .name = "riic-tdre" },
+ { .res_num = 5, .isr = riic_tend_isr, .name = "riic-nack" },
+};
+
+static int riic_i2c_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct riic_dev *riic;
+ struct i2c_adapter *adap;
+ struct resource *res;
+ u32 bus_rate = 0;
+ int i, ret;
+
+ riic = devm_kzalloc(&pdev->dev, sizeof(*riic), GFP_KERNEL);
+ if (!riic)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ riic->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(riic->base))
+ return PTR_ERR(riic->base);
+
+ riic->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(riic->clk)) {
+ dev_err(&pdev->dev, "missing controller clock");
+ return PTR_ERR(riic->clk);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(riic_irqs); i++) {
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, riic_irqs[i].res_num);
+ if (!res)
+ return -ENODEV;
+
+ ret = devm_request_irq(&pdev->dev, res->start, riic_irqs[i].isr,
+ 0, riic_irqs[i].name, riic);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq %s\n", riic_irqs[i].name);
+ return ret;
+ }
+ }
+
+ adap = &riic->adapter;
+ i2c_set_adapdata(adap, riic);
+ strlcpy(adap->name, "Renesas RIIC adapter", sizeof(adap->name));
+ adap->owner = THIS_MODULE;
+ adap->algo = &riic_algo;
+ adap->dev.parent = &pdev->dev;
+ adap->dev.of_node = pdev->dev.of_node;
+
+ init_completion(&riic->msg_done);
+
+ of_property_read_u32(np, "clock-frequency", &bus_rate);
+ ret = riic_init_hw(riic, bus_rate);
+ if (ret)
+ return ret;
+
+
+ ret = i2c_add_adapter(adap);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add adapter\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, riic);
+
+ dev_info(&pdev->dev, "registered with %dHz bus speed\n", bus_rate);
+ return 0;
+}
+
+static int riic_i2c_remove(struct platform_device *pdev)
+{
+ struct riic_dev *riic = platform_get_drvdata(pdev);
+
+ writeb(0, riic->base + RIIC_ICIER);
+ i2c_del_adapter(&riic->adapter);
+
+ return 0;
+}
+
+static struct of_device_id riic_i2c_dt_ids[] = {
+ { .compatible = "renesas,riic-rz" },
+ { /* Sentinel */ },
+};
+
+static struct platform_driver riic_i2c_driver = {
+ .probe = riic_i2c_probe,
+ .remove = riic_i2c_remove,
+ .driver = {
+ .name = "i2c-riic",
+ .owner = THIS_MODULE,
+ .of_match_table = riic_i2c_dt_ids,
+ },
+};
+
+module_platform_driver(riic_i2c_driver);
+
+MODULE_DESCRIPTION("Renesas RIIC adapter");
+MODULE_AUTHOR("Wolfram Sang <wsa@sang-engineering.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, riic_i2c_dt_ids);
diff --git a/drivers/i2c/busses/i2c-robotfuzz-osif.c b/drivers/i2c/busses/i2c-robotfuzz-osif.c
new file mode 100644
index 000000000000..ced9c6a308d1
--- /dev/null
+++ b/drivers/i2c/busses/i2c-robotfuzz-osif.c
@@ -0,0 +1,202 @@
+/*
+ * Driver for RobotFuzz OSIF
+ *
+ * Copyright (c) 2013 Andrew Lunn <andrew@lunn.ch>
+ * Copyright (c) 2007 Barry Carter <Barry.Carter@robotfuzz.com>
+ *
+ * Based on the i2c-tiny-usb by
+ *
+ * Copyright (C) 2006 Til Harbaum (Till@Harbaum.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#define OSIFI2C_READ 20
+#define OSIFI2C_WRITE 21
+#define OSIFI2C_STOP 22
+#define OSIFI2C_STATUS 23
+#define OSIFI2C_SET_BIT_RATE 24
+
+#define STATUS_ADDRESS_ACK 0
+#define STATUS_ADDRESS_NAK 2
+
+struct osif_priv {
+ struct usb_device *usb_dev;
+ struct usb_interface *interface;
+ struct i2c_adapter adapter;
+ unsigned char status;
+};
+
+static int osif_usb_read(struct i2c_adapter *adapter, int cmd,
+ int value, int index, void *data, int len)
+{
+ struct osif_priv *priv = adapter->algo_data;
+
+ return usb_control_msg(priv->usb_dev, usb_rcvctrlpipe(priv->usb_dev, 0),
+ cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE |
+ USB_DIR_IN, value, index, data, len, 2000);
+}
+
+static int osif_usb_write(struct i2c_adapter *adapter, int cmd,
+ int value, int index, void *data, int len)
+{
+
+ struct osif_priv *priv = adapter->algo_data;
+
+ return usb_control_msg(priv->usb_dev, usb_sndctrlpipe(priv->usb_dev, 0),
+ cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ value, index, data, len, 2000);
+}
+
+static int osif_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+ int num)
+{
+ struct osif_priv *priv = adapter->algo_data;
+ struct i2c_msg *pmsg;
+ int ret = 0;
+ int i, cmd;
+
+ for (i = 0; ret >= 0 && i < num; i++) {
+ pmsg = &msgs[i];
+
+ if (pmsg->flags & I2C_M_RD) {
+ cmd = OSIFI2C_READ;
+
+ ret = osif_usb_read(adapter, cmd, pmsg->flags,
+ pmsg->addr, pmsg->buf,
+ pmsg->len);
+ if (ret != pmsg->len) {
+ dev_err(&adapter->dev, "failure reading data\n");
+ return -EREMOTEIO;
+ }
+ } else {
+ cmd = OSIFI2C_WRITE;
+
+ ret = osif_usb_write(adapter, cmd, pmsg->flags,
+ pmsg->addr, pmsg->buf, pmsg->len);
+ if (ret != pmsg->len) {
+ dev_err(&adapter->dev, "failure writing data\n");
+ return -EREMOTEIO;
+ }
+ }
+
+ ret = osif_usb_read(adapter, OSIFI2C_STOP, 0, 0, NULL, 0);
+ if (ret) {
+ dev_err(&adapter->dev, "failure sending STOP\n");
+ return -EREMOTEIO;
+ }
+
+ /* read status */
+ ret = osif_usb_read(adapter, OSIFI2C_STATUS, 0, 0,
+ &priv->status, 1);
+ if (ret != 1) {
+ dev_err(&adapter->dev, "failure reading status\n");
+ return -EREMOTEIO;
+ }
+
+ if (priv->status != STATUS_ADDRESS_ACK) {
+ dev_dbg(&adapter->dev, "status = %d\n", priv->status);
+ return -EREMOTEIO;
+ }
+ }
+
+ return i;
+}
+
+static u32 osif_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static struct i2c_algorithm osif_algorithm = {
+ .master_xfer = osif_xfer,
+ .functionality = osif_func,
+};
+
+#define USB_OSIF_VENDOR_ID 0x1964
+#define USB_OSIF_PRODUCT_ID 0x0001
+
+static struct usb_device_id osif_table[] = {
+ { USB_DEVICE(USB_OSIF_VENDOR_ID, USB_OSIF_PRODUCT_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, osif_table);
+
+static int osif_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ int ret;
+ struct osif_priv *priv;
+ u16 version;
+
+ priv = devm_kzalloc(&interface->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->usb_dev = usb_get_dev(interface_to_usbdev(interface));
+ priv->interface = interface;
+
+ usb_set_intfdata(interface, priv);
+
+ priv->adapter.owner = THIS_MODULE;
+ priv->adapter.class = I2C_CLASS_HWMON;
+ priv->adapter.algo = &osif_algorithm;
+ priv->adapter.algo_data = priv;
+ snprintf(priv->adapter.name, sizeof(priv->adapter.name),
+ "OSIF at bus %03d device %03d",
+ priv->usb_dev->bus->busnum, priv->usb_dev->devnum);
+
+ /*
+ * Set bus frequency. The frequency is:
+ * 120,000,000 / ( 16 + 2 * div * 4^prescale).
+ * Using dev = 52, prescale = 0 give 100KHz */
+ ret = osif_usb_read(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0,
+ NULL, 0);
+ if (ret) {
+ dev_err(&interface->dev, "failure sending bit rate");
+ usb_put_dev(priv->usb_dev);
+ return ret;
+ }
+
+ i2c_add_adapter(&(priv->adapter));
+
+ version = le16_to_cpu(priv->usb_dev->descriptor.bcdDevice);
+ dev_info(&interface->dev,
+ "version %x.%02x found at bus %03d address %03d",
+ version >> 8, version & 0xff,
+ priv->usb_dev->bus->busnum, priv->usb_dev->devnum);
+
+ return 0;
+}
+
+static void osif_disconnect(struct usb_interface *interface)
+{
+ struct osif_priv *priv = usb_get_intfdata(interface);
+
+ i2c_del_adapter(&(priv->adapter));
+ usb_set_intfdata(interface, NULL);
+ usb_put_dev(priv->usb_dev);
+}
+
+static struct usb_driver osif_driver = {
+ .name = "RobotFuzz Open Source InterFace, OSIF",
+ .probe = osif_probe,
+ .disconnect = osif_disconnect,
+ .id_table = osif_table,
+};
+
+module_usb_driver(osif_driver);
+
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
+MODULE_AUTHOR("Barry Carter <barry.carter@robotfuzz.com>");
+MODULE_DESCRIPTION("RobotFuzz OSIF driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index bf8fb94ebc5d..684d21e71e4a 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -86,6 +86,7 @@
#define QUIRK_S3C2440 (1 << 0)
#define QUIRK_HDMIPHY (1 << 1)
#define QUIRK_NO_GPIO (1 << 2)
+#define QUIRK_POLL (1 << 3)
/* Max time to wait for bus to become idle after a xfer (in us) */
#define S3C2410_IDLE_TIMEOUT 5000
@@ -101,7 +102,7 @@ enum s3c24xx_i2c_state {
struct s3c24xx_i2c {
wait_queue_head_t wait;
- unsigned int quirks;
+ kernel_ulong_t quirks;
unsigned int suspended:1;
struct i2c_msg *msg;
@@ -123,7 +124,7 @@ struct s3c24xx_i2c {
struct s3c2410_platform_i2c *pdata;
int gpios[2];
struct pinctrl *pctrl;
-#ifdef CONFIG_CPU_FREQ
+#if defined(CONFIG_ARM_S3C24XX_CPUFREQ)
struct notifier_block freq_transition;
#endif
};
@@ -142,6 +143,8 @@ static struct platform_device_id s3c24xx_driver_ids[] = {
};
MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
+static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat);
+
#ifdef CONFIG_OF
static const struct of_device_id s3c24xx_i2c_match[] = {
{ .compatible = "samsung,s3c2410-i2c", .data = (void *)0 },
@@ -150,6 +153,8 @@ static const struct of_device_id s3c24xx_i2c_match[] = {
.data = (void *)(QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO) },
{ .compatible = "samsung,exynos5440-i2c",
.data = (void *)(QUIRK_S3C2440 | QUIRK_NO_GPIO) },
+ { .compatible = "samsung,exynos5-sata-phy-i2c",
+ .data = (void *)(QUIRK_S3C2440 | QUIRK_POLL | QUIRK_NO_GPIO) },
{},
};
MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
@@ -160,12 +165,12 @@ MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
* Get controller type either from device tree or platform device variant.
*/
-static inline unsigned int s3c24xx_get_device_quirks(struct platform_device *pdev)
+static inline kernel_ulong_t s3c24xx_get_device_quirks(struct platform_device *pdev)
{
if (pdev->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(s3c24xx_i2c_match, pdev->dev.of_node);
- return (unsigned int)match->data;
+ return (kernel_ulong_t)match->data;
}
return platform_get_device_id(pdev)->driver_data;
@@ -188,7 +193,8 @@ static inline void s3c24xx_i2c_master_complete(struct s3c24xx_i2c *i2c, int ret)
if (ret)
i2c->msg_idx = ret;
- wake_up(&i2c->wait);
+ if (!(i2c->quirks & QUIRK_POLL))
+ wake_up(&i2c->wait);
}
static inline void s3c24xx_i2c_disable_ack(struct s3c24xx_i2c *i2c)
@@ -225,6 +231,22 @@ static inline void s3c24xx_i2c_enable_irq(struct s3c24xx_i2c *i2c)
writel(tmp | S3C2410_IICCON_IRQEN, i2c->regs + S3C2410_IICCON);
}
+static bool is_ack(struct s3c24xx_i2c *i2c)
+{
+ int tries;
+
+ for (tries = 50; tries; --tries) {
+ if (readl(i2c->regs + S3C2410_IICCON)
+ & S3C2410_IICCON_IRQPEND) {
+ if (!(readl(i2c->regs + S3C2410_IICSTAT)
+ & S3C2410_IICSTAT_LASTBIT))
+ return true;
+ }
+ usleep_range(1000, 2000);
+ }
+ dev_err(i2c->dev, "ack was not recieved\n");
+ return false;
+}
/* s3c24xx_i2c_message_start
*
@@ -269,6 +291,16 @@ static void s3c24xx_i2c_message_start(struct s3c24xx_i2c *i2c,
stat |= S3C2410_IICSTAT_START;
writel(stat, i2c->regs + S3C2410_IICSTAT);
+
+ if (i2c->quirks & QUIRK_POLL) {
+ while ((i2c->msg_num != 0) && is_ack(i2c)) {
+ i2c_s3c_irq_nextbyte(i2c, stat);
+ stat = readl(i2c->regs + S3C2410_IICSTAT);
+
+ if (stat & S3C2410_IICSTAT_ARBITR)
+ dev_err(i2c->dev, "deal with arbitration loss\n");
+ }
+ }
}
static inline void s3c24xx_i2c_stop(struct s3c24xx_i2c *i2c, int ret)
@@ -676,6 +708,15 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
s3c24xx_i2c_enable_irq(i2c);
s3c24xx_i2c_message_start(i2c, msgs);
+ if (i2c->quirks & QUIRK_POLL) {
+ ret = i2c->msg_idx;
+
+ if (ret != num)
+ dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
+
+ goto out;
+ }
+
timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
ret = i2c->msg_idx;
@@ -821,6 +862,9 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
if (div1 == 512)
iiccon |= S3C2410_IICCON_TXDIV_512;
+ if (i2c->quirks & QUIRK_POLL)
+ iiccon |= S3C2410_IICCON_SCALE(2);
+
writel(iiccon, i2c->regs + S3C2410_IICCON);
if (i2c->quirks & QUIRK_S3C2440) {
@@ -843,7 +887,7 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
return 0;
}
-#ifdef CONFIG_CPU_FREQ
+#if defined(CONFIG_ARM_S3C24XX_CPUFREQ)
#define freq_to_i2c(_n) container_of(_n, struct s3c24xx_i2c, freq_transition)
@@ -1118,18 +1162,20 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
* ensure no current IRQs pending
*/
- i2c->irq = ret = platform_get_irq(pdev, 0);
- if (ret <= 0) {
- dev_err(&pdev->dev, "cannot find IRQ\n");
- return ret;
- }
+ if (!(i2c->quirks & QUIRK_POLL)) {
+ i2c->irq = ret = platform_get_irq(pdev, 0);
+ if (ret <= 0) {
+ dev_err(&pdev->dev, "cannot find IRQ\n");
+ return ret;
+ }
ret = devm_request_irq(&pdev->dev, i2c->irq, s3c24xx_i2c_irq, 0,
- dev_name(&pdev->dev), i2c);
+ dev_name(&pdev->dev), i2c);
- if (ret != 0) {
- dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
- return ret;
+ if (ret != 0) {
+ dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
+ return ret;
+ }
}
ret = s3c24xx_i2c_register_cpufreq(i2c);
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 599235514138..dfc98df7b1b6 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -12,7 +12,6 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index 5e8f136e233f..d76f3d9737ec 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -11,7 +11,6 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 4fc87e7c94c9..294c80f21d65 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 36a9556d7cfa..19b8505d0cdd 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -45,7 +45,6 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/ioport.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
#include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index b9faf9b6002b..f8aa0c29f02b 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -36,7 +36,6 @@
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index 6ffa56e08517..057602683553 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -3,7 +3,7 @@
* These devices include an I2C master which can be controlled over the
* serial port.
*
- * Copyright (C) 2007 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2007 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -321,7 +321,7 @@ static void __exit taos_exit(void)
serio_unregister_driver(&taos_drv);
}
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("TAOS evaluation module driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index e661edee4d0c..9704537aee3c 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -27,7 +27,7 @@
#include <linux/slab.h>
#include <linux/of_device.h>
#include <linux/module.h>
-#include <linux/clk/tegra.h>
+#include <linux/reset.h>
#include <asm/unaligned.h>
@@ -160,6 +160,7 @@ struct tegra_i2c_dev {
struct i2c_adapter adapter;
struct clk *div_clk;
struct clk *fast_clk;
+ struct reset_control *rst;
void __iomem *base;
int cont_id;
int irq;
@@ -415,9 +416,9 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
return err;
}
- tegra_periph_reset_assert(i2c_dev->div_clk);
+ reset_control_assert(i2c_dev->rst);
udelay(2);
- tegra_periph_reset_deassert(i2c_dev->div_clk);
+ reset_control_deassert(i2c_dev->rst);
if (i2c_dev->is_dvc)
tegra_dvc_init(i2c_dev);
@@ -743,6 +744,12 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->cont_id = pdev->id;
i2c_dev->dev = &pdev->dev;
+ i2c_dev->rst = devm_reset_control_get(&pdev->dev, "i2c");
+ if (IS_ERR(i2c_dev->rst)) {
+ dev_err(&pdev->dev, "missing controller reset");
+ return PTR_ERR(i2c_dev->rst);
+ }
+
ret = of_property_read_u32(i2c_dev->dev->of_node, "clock-frequency",
&i2c_dev->bus_clk_rate);
if (ret)
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index e7d3b755af3b..0ed77eeff31e 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -162,7 +162,6 @@ static const struct i2c_algorithm usb_algorithm = {
static const struct usb_device_id i2c_tiny_usb_table[] = {
{ USB_DEVICE(0x0403, 0xc631) }, /* FTDI */
{ USB_DEVICE(0x1c40, 0x0534) }, /* EZPrototypes */
- { USB_DEVICE(0x1964, 0x0001) }, /* Robofuzz OSIF */
{ } /* Terminating entry */
};
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index be662511c58b..49d7f14b9d27 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ioport.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index b2d90e105f41..40d36df678de 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -2,7 +2,7 @@
Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl>,
Philip Edelbrock <phil@netroedge.com>, Kyösti Mälkki <kmalkki@cc.hut.fi>,
Mark D. Studebaker <mdsxyz123@yahoo.com>
- Copyright (C) 2005 - 2008 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2005 - 2008 Jean Delvare <jdelvare@suse.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -503,7 +503,7 @@ static void __exit i2c_vt596_exit(void)
MODULE_AUTHOR("Kyosti Malkki <kmalkki@cc.hut.fi>, "
"Mark D. Studebaker <mdsxyz123@yahoo.com> and "
- "Jean Delvare <khali@linux-fr.org>");
+ "Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("vt82c596 SMBus driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index c68450cd8d5f..7533fa34d737 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -118,8 +118,7 @@ static int vprbrd_i2c_addr(struct usb_device *usb_dev,
static int vprbrd_i2c_read(struct vprbrd *vb, struct i2c_msg *msg)
{
int ret;
- u16 remain_len, bytes_xfer, len1, len2,
- start = 0x0000;
+ u16 remain_len, len1, len2, start = 0x0000;
struct vprbrd_i2c_read_msg *rmsg =
(struct vprbrd_i2c_read_msg *)vb->buf;
@@ -166,7 +165,6 @@ static int vprbrd_i2c_read(struct vprbrd *vb, struct i2c_msg *msg)
rmsg->header.len3 = remain_len - 512;
rmsg->header.len4 = 0x00;
rmsg->header.len5 = 0x00;
- bytes_xfer = remain_len;
remain_len = 0;
} else if (remain_len <= 1022) {
len1 = 512;
@@ -367,7 +365,7 @@ static int vprbrd_i2c_probe(struct platform_device *pdev)
int ret;
int pipe;
- vb_i2c = kzalloc(sizeof(*vb_i2c), GFP_KERNEL);
+ vb_i2c = devm_kzalloc(&pdev->dev, sizeof(*vb_i2c), GFP_KERNEL);
if (vb_i2c == NULL)
return -ENOMEM;
@@ -394,14 +392,12 @@ static int vprbrd_i2c_probe(struct platform_device *pdev)
if (ret != 1) {
dev_err(&pdev->dev,
"failure setting i2c_bus_freq to %d\n", i2c_bus_freq);
- ret = -EIO;
- goto error;
+ return -EIO;
}
} else {
dev_err(&pdev->dev,
"invalid i2c_bus_freq setting:%d\n", i2c_bus_freq);
- ret = -EIO;
- goto error;
+ return -EIO;
}
vb_i2c->i2c.dev.parent = &pdev->dev;
@@ -412,10 +408,6 @@ static int vprbrd_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vb_i2c);
return 0;
-
-error:
- kfree(vb_i2c);
- return ret;
}
static int vprbrd_i2c_remove(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index fc2716afdfd9..28107502517f 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -30,8 +30,8 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/errno.h>
+#include <linux/err.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
@@ -69,7 +69,7 @@ struct xiic_i2c {
struct i2c_adapter adap;
struct i2c_msg *tx_msg;
spinlock_t lock;
- unsigned int tx_pos;
+ unsigned int tx_pos;
unsigned int nmsgs;
enum xilinx_i2c_state state;
struct i2c_msg *rx_msg;
@@ -272,8 +272,8 @@ static void xiic_read_rx(struct xiic_i2c *i2c)
bytes_in_fifo = xiic_getreg8(i2c, XIIC_RFO_REG_OFFSET) + 1;
- dev_dbg(i2c->adap.dev.parent, "%s entry, bytes in fifo: %d, msg: %d"
- ", SR: 0x%x, CR: 0x%x\n",
+ dev_dbg(i2c->adap.dev.parent,
+ "%s entry, bytes in fifo: %d, msg: %d, SR: 0x%x, CR: 0x%x\n",
__func__, bytes_in_fifo, xiic_rx_space(i2c),
xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
@@ -340,9 +340,10 @@ static void xiic_process(struct xiic_i2c *i2c)
ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
pend = isr & ier;
- dev_dbg(i2c->adap.dev.parent, "%s entry, IER: 0x%x, ISR: 0x%x, "
- "pend: 0x%x, SR: 0x%x, msg: %p, nmsgs: %d\n",
- __func__, ier, isr, pend, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
+ dev_dbg(i2c->adap.dev.parent, "%s: IER: 0x%x, ISR: 0x%x, pend: 0x%x\n",
+ __func__, ier, isr, pend);
+ dev_dbg(i2c->adap.dev.parent, "%s: SR: 0x%x, msg: %p, nmsgs: %d\n",
+ __func__, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
i2c->tx_msg, i2c->nmsgs);
/* Do not processes a devices interrupts if the device has no
@@ -542,9 +543,10 @@ static void xiic_start_send(struct xiic_i2c *i2c)
xiic_irq_clr(i2c, XIIC_INTR_TX_ERROR_MASK);
- dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, len: %d, "
- "ISR: 0x%x, CR: 0x%x\n",
- __func__, msg, msg->len, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
+ dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, len: %d",
+ __func__, msg, msg->len);
+ dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n",
+ __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
if (!(msg->flags & I2C_M_NOSTART)) {
@@ -695,33 +697,21 @@ static int xiic_i2c_probe(struct platform_device *pdev)
int ret, irq;
u8 i;
+ i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- goto resource_missing;
+ i2c->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(i2c->base))
+ return PTR_ERR(i2c->base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- goto resource_missing;
+ return irq;
pdata = dev_get_platdata(&pdev->dev);
- i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
- if (!i2c)
- return -ENOMEM;
-
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "Memory region busy\n");
- ret = -EBUSY;
- goto request_mem_failed;
- }
-
- i2c->base = ioremap(res->start, resource_size(res));
- if (!i2c->base) {
- dev_err(&pdev->dev, "Unable to map registers\n");
- ret = -EIO;
- goto map_failed;
- }
-
/* hook up driver to tree */
platform_set_drvdata(pdev, i2c);
i2c->adap = xiic_adapter;
@@ -729,21 +719,23 @@ static int xiic_i2c_probe(struct platform_device *pdev)
i2c->adap.dev.parent = &pdev->dev;
i2c->adap.dev.of_node = pdev->dev.of_node;
- xiic_reinit(i2c);
-
spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
- ret = request_irq(irq, xiic_isr, 0, pdev->name, i2c);
- if (ret) {
+
+ ret = devm_request_irq(&pdev->dev, irq, xiic_isr, 0, pdev->name, i2c);
+ if (ret < 0) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
- goto request_irq_failed;
+ return ret;
}
+ xiic_reinit(i2c);
+
/* add i2c adapter to i2c tree */
ret = i2c_add_adapter(&i2c->adap);
if (ret) {
dev_err(&pdev->dev, "Failed to add adapter\n");
- goto add_adapter_failed;
+ xiic_deinit(i2c);
+ return ret;
}
if (pdata) {
@@ -753,43 +745,17 @@ static int xiic_i2c_probe(struct platform_device *pdev)
}
return 0;
-
-add_adapter_failed:
- free_irq(irq, i2c);
-request_irq_failed:
- xiic_deinit(i2c);
- iounmap(i2c->base);
-map_failed:
- release_mem_region(res->start, resource_size(res));
-request_mem_failed:
- kfree(i2c);
-
- return ret;
-resource_missing:
- dev_err(&pdev->dev, "IRQ or Memory resource is missing\n");
- return -ENOENT;
}
static int xiic_i2c_remove(struct platform_device *pdev)
{
struct xiic_i2c *i2c = platform_get_drvdata(pdev);
- struct resource *res;
/* remove adapter & data */
i2c_del_adapter(&i2c->adap);
xiic_deinit(i2c);
- free_irq(platform_get_irq(pdev, 0), i2c);
-
- iounmap(i2c->base);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
- release_mem_region(res->start, resource_size(res));
-
- kfree(i2c);
-
return 0;
}
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 7945b05d3ea0..17f7352eca6b 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/errno.h>
diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c
index ae1258b95d60..8eadf0f47ad7 100644
--- a/drivers/i2c/busses/scx200_i2c.c
+++ b/drivers/i2c/busses/scx200_i2c.c
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/io.h>
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index d74c0b34248e..5fb80b8962a2 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -21,7 +21,7 @@
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi>.
All SMBus-related things are written by Frodo Looijaard <frodol@dds.nl>
SMBus 2.0 support by Mark Studebaker <mdsxyz123@yahoo.com> and
- Jean Delvare <khali@linux-fr.org>
+ Jean Delvare <jdelvare@suse.de>
Mux support by Rodolfo Giometti <giometti@enneenne.com> and
Michael Lawnick <michael.lawnick.ext@nsn.com>
OF support is copyright (c) 2008 Jochen Friedrich <jochen@scram.de>
@@ -104,6 +104,11 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct i2c_client *client = to_i2c_client(dev);
+ int rc;
+
+ rc = acpi_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
if (add_uevent_var(env, "MODALIAS=%s%s",
I2C_MODULE_PREFIX, client->name))
@@ -256,10 +261,9 @@ static int i2c_device_probe(struct device *dev)
acpi_dev_pm_attach(&client->dev, true);
status = driver->probe(client, i2c_match_id(driver->id_table, client));
- if (status) {
- i2c_set_clientdata(client, NULL);
+ if (status)
acpi_dev_pm_detach(&client->dev, true);
- }
+
return status;
}
@@ -267,7 +271,7 @@ static int i2c_device_remove(struct device *dev)
{
struct i2c_client *client = i2c_verify_client(dev);
struct i2c_driver *driver;
- int status;
+ int status = 0;
if (!client || !dev->driver)
return 0;
@@ -276,12 +280,8 @@ static int i2c_device_remove(struct device *dev)
if (driver->remove) {
dev_dbg(dev, "remove\n");
status = driver->remove(client);
- } else {
- dev->driver = NULL;
- status = 0;
}
- if (status == 0)
- i2c_set_clientdata(client, NULL);
+
acpi_dev_pm_detach(&client->dev, true);
return status;
}
@@ -409,6 +409,12 @@ static ssize_t
show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
+ int len;
+
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE -1);
+ if (len != -ENODEV)
+ return len;
+
return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name);
}
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index c99b22987366..fc99f0d6b4a5 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -2,7 +2,7 @@
* i2c-smbus.c - SMBus extensions to the I2C protocol
*
* Copyright (C) 2008 David Brownell
- * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2010 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -246,6 +246,6 @@ EXPORT_SYMBOL_GPL(i2c_handle_smbus_alert);
module_i2c_driver(smbalert_driver);
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("SMBus protocol extensions support");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/i2c-stub.c b/drivers/i2c/i2c-stub.c
index d0a9c590c3cd..77e4849d2f2a 100644
--- a/drivers/i2c/i2c-stub.c
+++ b/drivers/i2c/i2c-stub.c
@@ -2,7 +2,7 @@
i2c-stub.c - I2C/SMBus chip emulator
Copyright (c) 2004 Mark M. Hoffman <mhoffman@lightlink.com>
- Copyright (C) 2007, 2012 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2007, 2012 Jean Delvare <jdelvare@suse.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index c58e093b6032..69afffa8f427 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 8a8c56f4b026..d8989c823f50 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -12,7 +12,6 @@
#include <linux/i2c-mux.h>
#include <linux/i2c-mux-gpio.h>
#include <linux/platform_device.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/gpio.h>
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index c4f08ad31183..cb772775da43 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -17,7 +17,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/slab.h>
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index bad5b84a5985..550bd36aa5d6 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -28,21 +28,21 @@
* Based on:
* i2c-virtual_cb.c from Brian Kuschak <bkuschak@yahoo.com>
* and
- * pca9540.c from Jean Delvare <khali@linux-fr.org>.
+ * pca9540.c from Jean Delvare <jdelvare@suse.de>.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
#include <linux/device.h>
+#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
-
#include <linux/i2c/pca954x.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/slab.h>
#define PCA954X_MAX_NCHANS 8
@@ -186,28 +186,43 @@ static int pca954x_probe(struct i2c_client *client,
{
struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct device_node *np = client->dev.of_node;
int num, force, class;
struct pca954x *data;
- int ret = -ENODEV;
+ int ret;
if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE))
- goto err;
+ return -ENODEV;
- data = kzalloc(sizeof(struct pca954x), GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto err;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct pca954x), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
+ if (IS_ENABLED(CONFIG_OF) && np) {
+ enum of_gpio_flags flags;
+ int gpio;
+
+ /* Get the mux out of reset if a reset GPIO is specified. */
+ gpio = of_get_named_gpio_flags(np, "reset-gpio", 0, &flags);
+ if (gpio_is_valid(gpio)) {
+ ret = devm_gpio_request_one(&client->dev, gpio,
+ flags & OF_GPIO_ACTIVE_LOW ?
+ GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+ "pca954x reset");
+ if (ret < 0)
+ return ret;
+ }
+ }
+
/* Write the mux register at addr to verify
* that the mux is in fact present. This also
* initializes the mux to disconnected state.
*/
if (i2c_smbus_write_byte(client, 0) < 0) {
dev_warn(&client->dev, "probe failed\n");
- goto exit_free;
+ return -ENODEV;
}
data->type = id->driver_data;
@@ -252,9 +267,6 @@ static int pca954x_probe(struct i2c_client *client,
virt_reg_failed:
for (num--; num >= 0; num--)
i2c_del_mux_adapter(data->virt_adaps[num]);
-exit_free:
- kfree(data);
-err:
return ret;
}
@@ -270,7 +282,6 @@ static int pca954x_remove(struct i2c_client *client)
data->virt_adaps[i] = NULL;
}
- kfree(data);
return 0;
}
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index d7978dc4ad0b..4ff0ef3e07a6 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -18,7 +18,6 @@
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/i2c-mux-pinctrl.h>
diff --git a/drivers/ide/buddha.c b/drivers/ide/buddha.c
index b1d38590ac01..46eaf58d881b 100644
--- a/drivers/ide/buddha.c
+++ b/drivers/ide/buddha.c
@@ -198,7 +198,7 @@ fail_base2:
continue;
}
}
- buddha_board = ZTWO_VADDR(board);
+ buddha_board = (unsigned long)ZTWO_VADDR(board);
/* write to BUDDHA_IRQ_MR to enable the board IRQ */
/* X-Surf doesn't have this. IRQs are always on */
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index d9e1f7ccfe6f..b6940992a6ff 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -14,7 +14,6 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <acpi/acpi.h>
#include <linux/ide.h>
#include <linux/pci.h>
#include <linux/dmi.h>
@@ -98,6 +97,17 @@ bool ide_port_acpi(ide_hwif_t *hwif)
return ide_noacpi == 0 && hwif->acpidata;
}
+static acpi_handle acpi_get_child(acpi_handle handle, u64 addr)
+{
+ struct acpi_device *adev;
+
+ if (!handle || acpi_bus_get_device(handle, &adev))
+ return NULL;
+
+ adev = acpi_find_child_device(adev, addr, false);
+ return adev ? adev->handle : NULL;
+}
+
/**
* ide_get_dev_handle - finds acpi_handle and PCI device.function
* @dev: device to locate
diff --git a/drivers/ide/ide-cd_verbose.c b/drivers/ide/ide-cd_verbose.c
index 6490a2dea96b..f079ca2f260b 100644
--- a/drivers/ide/ide-cd_verbose.c
+++ b/drivers/ide/ide-cd_verbose.c
@@ -9,7 +9,9 @@
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/cdrom.h>
+#include <linux/ide.h>
#include <scsi/scsi.h>
+#include "ide-cd.h"
#ifndef CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS
void ide_cd_log_error(const char *name, struct request *failed_command,
diff --git a/drivers/ide/ide-pio-blacklist.c b/drivers/ide/ide-pio-blacklist.c
index a8c2c8f8660a..40e683a84ff9 100644
--- a/drivers/ide/ide-pio-blacklist.c
+++ b/drivers/ide/ide-pio-blacklist.c
@@ -7,6 +7,7 @@
*/
#include <linux/string.h>
+#include <linux/ide.h>
static struct ide_pio_info {
const char *name;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 92d1206482a6..8e1939f564f4 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -123,7 +123,7 @@ static struct cpuidle_state *cpuidle_state_table;
* which is also the index into the MWAIT hint array.
* Thus C0 is a dummy.
*/
-static struct cpuidle_state nehalem_cstates[] __initdata = {
+static struct cpuidle_state nehalem_cstates[] = {
{
.name = "C1-NHM",
.desc = "MWAIT 0x00",
@@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
.enter = NULL }
};
-static struct cpuidle_state snb_cstates[] __initdata = {
+static struct cpuidle_state snb_cstates[] = {
{
.name = "C1-SNB",
.desc = "MWAIT 0x00",
@@ -196,7 +196,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.enter = NULL }
};
-static struct cpuidle_state ivb_cstates[] __initdata = {
+static struct cpuidle_state ivb_cstates[] = {
{
.name = "C1-IVB",
.desc = "MWAIT 0x00",
@@ -236,7 +236,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.enter = NULL }
};
-static struct cpuidle_state hsw_cstates[] __initdata = {
+static struct cpuidle_state hsw_cstates[] = {
{
.name = "C1-HSW",
.desc = "MWAIT 0x00",
@@ -297,7 +297,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.enter = NULL }
};
-static struct cpuidle_state atom_cstates[] __initdata = {
+static struct cpuidle_state atom_cstates[] = {
{
.name = "C1E-ATM",
.desc = "MWAIT 0x00",
@@ -329,7 +329,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
{
.enter = NULL }
};
-static struct cpuidle_state avn_cstates[] __initdata = {
+static struct cpuidle_state avn_cstates[] = {
{
.name = "C1-AVN",
.desc = "MWAIT 0x00",
@@ -344,6 +344,8 @@ static struct cpuidle_state avn_cstates[] __initdata = {
.exit_latency = 15,
.target_residency = 45,
.enter = &intel_idle },
+ {
+ .enter = NULL }
};
/**
@@ -375,13 +377,7 @@ static int intel_idle(struct cpuidle_device *dev,
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
- if (!current_set_polling_and_test()) {
-
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- smp_mb();
- if (!need_resched())
- __mwait(eax, ecx);
- }
+ mwait_idle_with_hints(eax, ecx);
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
@@ -639,39 +635,10 @@ static int __init intel_idle_cpuidle_driver_init(void)
*/
static int intel_idle_cpu_init(int cpu)
{
- int cstate;
struct cpuidle_device *dev;
dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
- dev->state_count = 1;
-
- for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
- int num_substates, mwait_hint, mwait_cstate, mwait_substate;
-
- if (cpuidle_state_table[cstate].enter == NULL)
- break;
-
- if (cstate + 1 > max_cstate) {
- printk(PREFIX "max_cstate %d reached\n", max_cstate);
- break;
- }
-
- mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
- mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
- mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
-
- /* does the state exist in CPUID.MWAIT? */
- num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
- & MWAIT_SUBSTATE_MASK;
-
- /* if sub-state in table is not enumerated by CPUID */
- if ((mwait_substate + 1) > num_substates)
- continue;
-
- dev->state_count += 1;
- }
-
dev->cpu = cpu;
if (cpuidle_register_device(dev)) {
@@ -683,6 +650,9 @@ static int intel_idle_cpu_init(int cpu)
if (icpu->auto_demotion_disable_flags)
smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
+ if (icpu->disable_promotion_to_c1e)
+ smp_call_function_single(cpu, c1e_promotion_disable, NULL, 1);
+
return 0;
}
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 90cf0cda50c4..5dd0e120a504 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -65,9 +65,11 @@ source "drivers/iio/common/Kconfig"
source "drivers/iio/dac/Kconfig"
source "drivers/iio/frequency/Kconfig"
source "drivers/iio/gyro/Kconfig"
+source "drivers/iio/humidity/Kconfig"
source "drivers/iio/imu/Kconfig"
source "drivers/iio/light/Kconfig"
source "drivers/iio/magnetometer/Kconfig"
+source "drivers/iio/orientation/Kconfig"
if IIO_TRIGGER
source "drivers/iio/trigger/Kconfig"
endif #IIO_TRIGGER
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index bcf7e9e3b053..887d39090d75 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -18,9 +18,11 @@ obj-y += common/
obj-y += dac/
obj-y += gyro/
obj-y += frequency/
+obj-y += humidity/
obj-y += imu/
obj-y += light/
obj-y += magnetometer/
+obj-y += orientation/
obj-y += pressure/
obj-y += temperature/
obj-y += trigger/
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 28b39283bccf..bfec313492b3 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -447,23 +447,28 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
{ },
};
-#define BMA180_CHANNEL(_index) { \
+#define BMA180_CHANNEL(_axis) { \
.type = IIO_ACCEL, \
- .indexed = 1, \
- .channel = (_index), \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .scan_index = (_index), \
- .scan_type = IIO_ST('s', 14, 16, 2), \
+ .scan_index = AXIS_##_axis, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 14, \
+ .storagebits = 16, \
+ .shift = 2, \
+ }, \
.ext_info = bma180_ext_info, \
}
static const struct iio_chan_spec bma180_channels[] = {
- BMA180_CHANNEL(AXIS_X),
- BMA180_CHANNEL(AXIS_Y),
- BMA180_CHANNEL(AXIS_Z),
- IIO_CHAN_SOFT_TIMESTAMP(4),
+ BMA180_CHANNEL(X),
+ BMA180_CHANNEL(Y),
+ BMA180_CHANNEL(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
};
static irqreturn_t bma180_trigger_handler(int irq, void *p)
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 1cae4e920c9b..3dcdbad65456 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -262,6 +262,18 @@ static int accel_3d_parse_report(struct platform_device *pdev,
st->accel[1].index, st->accel[1].report_id,
st->accel[2].index, st->accel[2].report_id);
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_ACCELERATION,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+
return ret;
}
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 58e945594c7b..70f78c3062a7 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -43,19 +43,22 @@ struct ad7266_state {
* The buffer needs to be large enough to hold two samples (4 bytes) and
* the naturally aligned timestamp (8 bytes).
*/
- uint8_t data[ALIGN(4, sizeof(s64)) + sizeof(s64)] ____cacheline_aligned;
+ struct {
+ __be16 sample[2];
+ s64 timestamp;
+ } data ____cacheline_aligned;
};
static int ad7266_wakeup(struct ad7266_state *st)
{
/* Any read with >= 2 bytes will wake the device */
- return spi_read(st->spi, st->data, 2);
+ return spi_read(st->spi, &st->data.sample[0], 2);
}
static int ad7266_powerdown(struct ad7266_state *st)
{
/* Any read with < 2 bytes will powerdown the device */
- return spi_read(st->spi, st->data, 1);
+ return spi_read(st->spi, &st->data.sample[0], 1);
}
static int ad7266_preenable(struct iio_dev *indio_dev)
@@ -84,9 +87,9 @@ static irqreturn_t ad7266_trigger_handler(int irq, void *p)
struct ad7266_state *st = iio_priv(indio_dev);
int ret;
- ret = spi_read(st->spi, st->data, 4);
+ ret = spi_read(st->spi, st->data.sample, 4);
if (ret == 0) {
- iio_push_to_buffers_with_timestamp(indio_dev, st->data,
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->data,
pf->timestamp);
}
@@ -137,7 +140,7 @@ static int ad7266_read_single(struct ad7266_state *st, int *val,
ad7266_select_input(st, address);
ret = spi_sync(st->spi, &st->single_msg);
- *val = be16_to_cpu(st->data[address % 2]);
+ *val = be16_to_cpu(st->data.sample[address % 2]);
return ret;
}
@@ -442,15 +445,15 @@ static int ad7266_probe(struct spi_device *spi)
ad7266_init_channels(indio_dev);
/* wakeup */
- st->single_xfer[0].rx_buf = &st->data;
+ st->single_xfer[0].rx_buf = &st->data.sample[0];
st->single_xfer[0].len = 2;
st->single_xfer[0].cs_change = 1;
/* conversion */
- st->single_xfer[1].rx_buf = &st->data;
+ st->single_xfer[1].rx_buf = st->data.sample;
st->single_xfer[1].len = 4;
st->single_xfer[1].cs_change = 1;
/* powerdown */
- st->single_xfer[2].tx_buf = &st->data;
+ st->single_xfer[2].tx_buf = &st->data.sample[0];
st->single_xfer[2].len = 1;
spi_message_init(&st->single_msg);
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index acb7f90359a3..749a6cadab8b 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -200,7 +200,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.address = 1,
.scan_index = 1,
- .scan_type = IIO_ST('u', 12, 16, 0),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 12,
+ .storagebits = 16,
+ .shift = 0,
+ .endianness = IIO_BE,
+ },
},
.channel[1] = {
.type = IIO_VOLTAGE,
@@ -210,7 +216,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.address = 0,
.scan_index = 0,
- .scan_type = IIO_ST('u', 12, 16, 0),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 12,
+ .storagebits = 16,
+ .shift = 0,
+ .endianness = IIO_BE,
+ },
},
.channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
.int_vref_mv = 2500,
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 6118dced02b6..360259266d4f 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1039,10 +1039,10 @@ static const struct iio_info max1238_info = {
};
static const struct iio_info max1363_info = {
- .read_event_value_new = &max1363_read_thresh,
- .write_event_value_new = &max1363_write_thresh,
- .read_event_config_new = &max1363_read_event_config,
- .write_event_config_new = &max1363_write_event_config,
+ .read_event_value = &max1363_read_thresh,
+ .write_event_value = &max1363_write_thresh,
+ .read_event_config = &max1363_read_event_config,
+ .write_event_config = &max1363_write_event_config,
.read_raw = &max1363_read_raw,
.update_scan_mode = &max1363_update_scan_mode,
.driver_module = THIS_MODULE,
@@ -1560,7 +1560,7 @@ static int max1363_probe(struct i2c_client *client,
st->client = client;
st->vref_uv = st->chip_info->int_vref_mv * 1000;
- vref = devm_regulator_get(&client->dev, "vref");
+ vref = devm_regulator_get_optional(&client->dev, "vref");
if (!IS_ERR(vref)) {
int vref_uv;
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index c8c1baaec6c1..47dcb34ff44c 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -362,7 +362,7 @@ static int mcp3422_probe(struct i2c_client *client,
| MCP3422_SAMPLE_RATE_VALUE(MCP3422_SRATE_240));
mcp3422_update_config(adc, config);
- err = iio_device_register(indio_dev);
+ err = devm_iio_device_register(&client->dev, indio_dev);
if (err < 0)
return err;
@@ -371,12 +371,6 @@ static int mcp3422_probe(struct i2c_client *client,
return 0;
}
-static int mcp3422_remove(struct i2c_client *client)
-{
- iio_device_unregister(i2c_get_clientdata(client));
- return 0;
-}
-
static const struct i2c_device_id mcp3422_id[] = {
{ "mcp3422", 2 },
{ "mcp3423", 3 },
@@ -400,7 +394,6 @@ static struct i2c_driver mcp3422_driver = {
.of_match_table = of_match_ptr(mcp3422_of_match),
},
.probe = mcp3422_probe,
- .remove = mcp3422_remove,
.id_table = mcp3422_id,
};
module_i2c_driver(mcp3422_driver);
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index d4d748214e4b..31e786e3999b 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -60,6 +60,24 @@ static u32 get_adc_step_mask(struct tiadc_device *adc_dev)
return step_en;
}
+static u32 get_adc_chan_step_mask(struct tiadc_device *adc_dev,
+ struct iio_chan_spec const *chan)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(adc_dev->channel_step); i++) {
+ if (chan->channel == adc_dev->channel_line[i]) {
+ u32 step;
+
+ step = adc_dev->channel_step[i];
+ /* +1 for the charger */
+ return 1 << (step + 1);
+ }
+ }
+ WARN_ON(1);
+ return 0;
+}
+
static u32 get_adc_step_bit(struct tiadc_device *adc_dev, int chan)
{
return 1 << adc_dev->channel_step[chan];
@@ -181,7 +199,7 @@ static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
enb |= (get_adc_step_bit(adc_dev, bit) << 1);
adc_dev->buffer_en_ch_steps = enb;
- am335x_tsc_se_set(adc_dev->mfd_tscadc, enb);
+ am335x_tsc_se_set_cache(adc_dev->mfd_tscadc, enb);
tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1THRES
| IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW);
@@ -199,6 +217,7 @@ static int tiadc_buffer_predisable(struct iio_dev *indio_dev)
tiadc_writel(adc_dev, REG_IRQCLR, (IRQENB_FIFO1THRES |
IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW));
am335x_tsc_se_clr(adc_dev->mfd_tscadc, adc_dev->buffer_en_ch_steps);
+ adc_dev->buffer_en_ch_steps = 0;
/* Flush FIFO of leftover data in the time it takes to disable adc */
fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
@@ -328,34 +347,43 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
unsigned int fifo1count, read, stepid;
bool found = false;
u32 step_en;
- unsigned long timeout = jiffies + usecs_to_jiffies
- (IDLE_TIMEOUT * adc_dev->channels);
+ unsigned long timeout;
if (iio_buffer_enabled(indio_dev))
return -EBUSY;
- step_en = get_adc_step_mask(adc_dev);
- am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en);
+ step_en = get_adc_chan_step_mask(adc_dev, chan);
+ if (!step_en)
+ return -EINVAL;
+
+ fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
+ while (fifo1count--)
+ tiadc_readl(adc_dev, REG_FIFO1);
+
+ am335x_tsc_se_set_once(adc_dev->mfd_tscadc, step_en);
- /* Wait for ADC sequencer to complete sampling */
- while (tiadc_readl(adc_dev, REG_ADCFSM) & SEQ_STATUS) {
- if (time_after(jiffies, timeout))
+ timeout = jiffies + usecs_to_jiffies
+ (IDLE_TIMEOUT * adc_dev->channels);
+ /* Wait for Fifo threshold interrupt */
+ while (1) {
+ fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
+ if (fifo1count)
+ break;
+
+ if (time_after(jiffies, timeout)) {
+ am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
return -EAGAIN;
}
+ }
map_val = chan->channel + TOTAL_CHANNELS;
/*
- * When the sub-system is first enabled,
- * the sequencer will always start with the
- * lowest step (1) and continue until step (16).
- * For ex: If we have enabled 4 ADC channels and
- * currently use only 1 out of them, the
- * sequencer still configures all the 4 steps,
- * leading to 3 unwanted data.
- * Hence we need to flush out this data.
+ * We check the complete FIFO. We programmed just one entry but in case
+ * something went wrong we left empty handed (-EAGAIN previously) and
+ * then the value apeared somehow in the FIFO we would have two entries.
+ * Therefore we read every item and keep only the latest version of the
+ * requested channel.
*/
-
- fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
for (i = 0; i < fifo1count; i++) {
read = tiadc_readl(adc_dev, REG_FIFO1);
stepid = read & FIFOREAD_CHNLID_MASK;
@@ -367,6 +395,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
*val = (u16) read;
}
}
+ am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
if (found == false)
return -EBUSY;
@@ -494,7 +523,8 @@ static int tiadc_resume(struct device *dev)
tiadc_writel(adc_dev, REG_CTRL, restore);
tiadc_step_config(indio_dev);
-
+ am335x_tsc_se_set_cache(adc_dev->mfd_tscadc,
+ adc_dev->buffer_en_ch_steps);
return 0;
}
diff --git a/drivers/iio/adc/viperboard_adc.c b/drivers/iio/adc/viperboard_adc.c
index 09727a71e9fa..d0add8f9416b 100644
--- a/drivers/iio/adc/viperboard_adc.c
+++ b/drivers/iio/adc/viperboard_adc.c
@@ -42,12 +42,6 @@ struct vprbrd_adc {
.indexed = 1, \
.channel = _index, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .scan_index = _index, \
- .scan_type = { \
- .sign = 'u', \
- .realbits = 8, \
- .storagebits = 8, \
- }, \
}
static struct iio_chan_spec const vprbrd_adc_iio_channels[] = {
@@ -73,7 +67,7 @@ static int vprbrd_iio_read_raw(struct iio_dev *iio_dev,
mutex_lock(&vb->lock);
admsg->cmd = VPRBRD_ADC_CMD_GET;
- admsg->chan = chan->scan_index;
+ admsg->chan = chan->channel;
admsg->val = 0x00;
ret = usb_control_msg(vb->usb_dev,
@@ -139,7 +133,7 @@ static int vprbrd_adc_probe(struct platform_device *pdev)
indio_dev->channels = vprbrd_adc_iio_channels;
indio_dev->num_channels = ARRAY_SIZE(vprbrd_adc_iio_channels);
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(&pdev->dev, indio_dev);
if (ret) {
dev_err(&pdev->dev, "could not register iio (adc)");
return ret;
@@ -150,22 +144,12 @@ static int vprbrd_adc_probe(struct platform_device *pdev)
return 0;
}
-static int vprbrd_adc_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
-
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-
static struct platform_driver vprbrd_adc_driver = {
.driver = {
.name = "viperboard-adc",
.owner = THIS_MODULE,
},
.probe = vprbrd_adc_probe,
- .remove = vprbrd_adc_remove,
};
module_platform_driver(vprbrd_adc_driver);
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index cb9c6366032c..f03b92fd3803 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -299,7 +299,12 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE), \
.address = addr, \
- .scan_type = IIO_ST('u', (bits), 16, 20 - (bits)), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 20 - bits, \
+ }, \
.ext_info = ad5064_ext_info, \
}
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index b968af50db0a..64634d7f578e 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -107,7 +107,12 @@ enum ad5360_type {
BIT(IIO_CHAN_INFO_OFFSET) | \
BIT(IIO_CHAN_INFO_CALIBSCALE) | \
BIT(IIO_CHAN_INFO_CALIBBIAS), \
- .scan_type = IIO_ST('u', (bits), 16, 16 - (bits)) \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 16 - (bits), \
+ }, \
}
static const struct ad5360_chip_info ad5360_chip_info_tbl[] = {
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index a59ff0e7b888..9de4c4d38280 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -261,7 +261,12 @@ static struct iio_chan_spec_ext_info ad5380_ext_info[] = {
BIT(IIO_CHAN_INFO_CALIBSCALE) | \
BIT(IIO_CHAN_INFO_CALIBBIAS), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .scan_type = IIO_ST('u', (_bits), 16, 14 - (_bits)), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 16, \
+ .shift = 14 - (_bits), \
+ }, \
.ext_info = ad5380_ext_info, \
}
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index 3eeaa82075f7..787ef1d859c6 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -75,7 +75,7 @@ struct ad5421_state {
* transfer buffers to live in their own cache lines.
*/
union {
- u32 d32;
+ __be32 d32;
u8 d8[4];
} data[2] ____cacheline_aligned;
};
@@ -114,7 +114,11 @@ static const struct iio_chan_spec ad5421_channels[] = {
BIT(IIO_CHAN_INFO_CALIBBIAS),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OFFSET),
- .scan_type = IIO_ST('u', 16, 16, 0),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ },
.event_spec = ad5421_current_event,
.num_event_specs = ARRAY_SIZE(ad5421_current_event),
},
@@ -458,9 +462,9 @@ static int ad5421_read_event_value(struct iio_dev *indio_dev,
static const struct iio_info ad5421_info = {
.read_raw = ad5421_read_raw,
.write_raw = ad5421_write_raw,
- .read_event_config_new = ad5421_read_event_config,
- .write_event_config_new = ad5421_write_event_config,
- .read_event_value_new = ad5421_read_event_value,
+ .read_event_config = ad5421_read_event_config,
+ .write_event_config = ad5421_write_event_config,
+ .read_event_value = ad5421_read_event_value,
.driver_module = THIS_MODULE,
};
@@ -514,16 +518,7 @@ static int ad5421_probe(struct spi_device *spi)
return ret;
}
- return iio_device_register(indio_dev);
-}
-
-static int ad5421_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- iio_device_unregister(indio_dev);
-
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static struct spi_driver ad5421_driver = {
@@ -532,7 +527,6 @@ static struct spi_driver ad5421_driver = {
.owner = THIS_MODULE,
},
.probe = ad5421_probe,
- .remove = ad5421_remove,
};
module_spi_driver(ad5421_driver);
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 1263b0e5ad84..46bb62a5c1d4 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -139,14 +139,19 @@ static const struct iio_chan_spec_ext_info ad5446_ext_info_powerdown[] = {
{ },
};
-#define _AD5446_CHANNEL(bits, storage, shift, ext) { \
+#define _AD5446_CHANNEL(bits, storage, _shift, ext) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.output = 1, \
.channel = 0, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .scan_type = IIO_ST('u', (bits), (storage), (shift)), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = (storage), \
+ .shift = (_shift), \
+ }, \
.ext_info = (ext), \
}
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
index 82e208f6cde2..64d7256cbb6d 100644
--- a/drivers/iio/dac/ad5449.c
+++ b/drivers/iio/dac/ad5449.c
@@ -204,7 +204,12 @@ static const struct iio_info ad5449_info = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE), \
.address = (chan), \
- .scan_type = IIO_ST('u', (bits), 16, 12 - (bits)), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 12 - (bits), \
+ }, \
}
#define DECLARE_AD5449_CHANNELS(name, bits) \
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index c0957a918e17..1e6449346b50 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -47,14 +47,16 @@
* @vref_mv: actual reference voltage used
* @pwr_down_mask power down mask
* @pwr_down_mode current power down mode
+ * @data: transfer buffer
*/
-
struct ad5504_state {
struct spi_device *spi;
struct regulator *reg;
unsigned short vref_mv;
unsigned pwr_down_mask;
unsigned pwr_down_mode;
+
+ __be16 data[2] ____cacheline_aligned;
};
/**
@@ -66,31 +68,29 @@ enum ad5504_supported_device_ids {
ID_AD5501,
};
-static int ad5504_spi_write(struct spi_device *spi, u8 addr, u16 val)
+static int ad5504_spi_write(struct ad5504_state *st, u8 addr, u16 val)
{
- u16 tmp = cpu_to_be16(AD5504_CMD_WRITE |
- AD5504_ADDR(addr) |
+ st->data[0] = cpu_to_be16(AD5504_CMD_WRITE | AD5504_ADDR(addr) |
(val & AD5504_RES_MASK));
- return spi_write(spi, (u8 *)&tmp, 2);
+ return spi_write(st->spi, &st->data[0], 2);
}
-static int ad5504_spi_read(struct spi_device *spi, u8 addr)
+static int ad5504_spi_read(struct ad5504_state *st, u8 addr)
{
- u16 tmp = cpu_to_be16(AD5504_CMD_READ | AD5504_ADDR(addr));
- u16 val;
int ret;
- struct spi_transfer t = {
- .tx_buf = &tmp,
- .rx_buf = &val,
- .len = 2,
- };
- ret = spi_sync_transfer(spi, &t, 1);
-
+ struct spi_transfer t = {
+ .tx_buf = &st->data[0],
+ .rx_buf = &st->data[1],
+ .len = 2,
+ };
+
+ st->data[0] = cpu_to_be16(AD5504_CMD_READ | AD5504_ADDR(addr));
+ ret = spi_sync_transfer(st->spi, &t, 1);
if (ret < 0)
return ret;
- return be16_to_cpu(val) & AD5504_RES_MASK;
+ return be16_to_cpu(st->data[1]) & AD5504_RES_MASK;
}
static int ad5504_read_raw(struct iio_dev *indio_dev,
@@ -104,7 +104,7 @@ static int ad5504_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = ad5504_spi_read(st->spi, chan->address);
+ ret = ad5504_spi_read(st, chan->address);
if (ret < 0)
return ret;
@@ -133,7 +133,7 @@ static int ad5504_write_raw(struct iio_dev *indio_dev,
if (val >= (1 << chan->scan_type.realbits) || val < 0)
return -EINVAL;
- return ad5504_spi_write(st->spi, chan->address, val);
+ return ad5504_spi_write(st, chan->address, val);
default:
ret = -EINVAL;
}
@@ -197,12 +197,12 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev,
else
st->pwr_down_mask &= ~(1 << chan->channel);
- ret = ad5504_spi_write(st->spi, AD5504_ADDR_CTRL,
+ ret = ad5504_spi_write(st, AD5504_ADDR_CTRL,
AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) |
AD5504_DAC_PWR(st->pwr_down_mask));
/* writes to the CTRL register must be followed by a NOOP */
- ad5504_spi_write(st->spi, AD5504_ADDR_NOOP, 0);
+ ad5504_spi_write(st, AD5504_ADDR_NOOP, 0);
return ret ? ret : len;
}
@@ -261,7 +261,11 @@ static const struct iio_chan_spec_ext_info ad5504_ext_info[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.address = AD5504_ADDR_DAC(_chan), \
- .scan_type = IIO_ST('u', 12, 16, 0), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ }, \
.ext_info = ad5504_ext_info, \
}
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index 774dd968145b..e8199cce2aea 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -176,7 +176,12 @@ static const struct iio_chan_spec_ext_info ad5624r_ext_info[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.address = (_chan), \
- .scan_type = IIO_ST('u', (_bits), 16, 16 - (_bits)), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 16, \
+ .shift = 16 - (_bits), \
+ }, \
.ext_info = ad5624r_ext_info, \
}
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index 30e506e37dd2..17aca4d9bd06 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -78,7 +78,7 @@ struct ad5686_state {
*/
union {
- u32 d32;
+ __be32 d32;
u8 d8[4];
} data[3] ____cacheline_aligned;
};
@@ -267,7 +267,7 @@ static const struct iio_chan_spec_ext_info ad5686_ext_info[] = {
{ },
};
-#define AD5868_CHANNEL(chan, bits, shift) { \
+#define AD5868_CHANNEL(chan, bits, _shift) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.output = 1, \
@@ -275,7 +275,12 @@ static const struct iio_chan_spec_ext_info ad5686_ext_info[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
.address = AD5686_ADDR_DAC(chan), \
- .scan_type = IIO_ST('u', bits, 16, shift), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = (_shift), \
+ }, \
.ext_info = ad5686_ext_info, \
}
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index 9a78d5abb2f6..a7c851f62d7c 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -97,7 +97,7 @@ struct ad5755_state {
*/
union {
- u32 d32;
+ __be32 d32;
u8 d8[4];
} data[2] ____cacheline_aligned;
};
@@ -392,7 +392,12 @@ static const struct iio_chan_spec_ext_info ad5755_ext_info[] = {
BIT(IIO_CHAN_INFO_OFFSET) | \
BIT(IIO_CHAN_INFO_CALIBSCALE) | \
BIT(IIO_CHAN_INFO_CALIBBIAS), \
- .scan_type = IIO_ST('u', (_bits), 16, 16 - (_bits)), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 16, \
+ .shift = 16 - (_bits), \
+ }, \
.ext_info = ad5755_ext_info, \
}
@@ -589,16 +594,7 @@ static int ad5755_probe(struct spi_device *spi)
if (ret)
return ret;
- return iio_device_register(indio_dev);
-}
-
-static int ad5755_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- iio_device_unregister(indio_dev);
-
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id ad5755_id[] = {
@@ -617,7 +613,6 @@ static struct spi_driver ad5755_driver = {
.owner = THIS_MODULE,
},
.probe = ad5755_probe,
- .remove = ad5755_remove,
.id_table = ad5755_id,
};
module_spi_driver(ad5755_driver);
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
index a8ff5b2ed13e..d0d38165339d 100644
--- a/drivers/iio/dac/ad5764.c
+++ b/drivers/iio/dac/ad5764.c
@@ -83,7 +83,12 @@ enum ad5764_type {
BIT(IIO_CHAN_INFO_CALIBSCALE) | \
BIT(IIO_CHAN_INFO_CALIBBIAS), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET), \
- .scan_type = IIO_ST('u', (_bits), 16, 16 - (_bits)) \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 16, \
+ .shift = 16 - (_bits), \
+ }, \
}
#define DECLARE_AD5764_CHANNELS(_name, _bits) \
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index d64acbd89482..ae49afe2b380 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -91,6 +91,11 @@ struct ad5791_state {
unsigned ctrl;
unsigned pwr_down_mode;
bool pwr_down;
+
+ union {
+ __be32 d32;
+ u8 d8[4];
+ } data[3] ____cacheline_aligned;
};
/**
@@ -104,48 +109,39 @@ enum ad5791_supported_device_ids {
ID_AD5791,
};
-static int ad5791_spi_write(struct spi_device *spi, u8 addr, u32 val)
+static int ad5791_spi_write(struct ad5791_state *st, u8 addr, u32 val)
{
- union {
- u32 d32;
- u8 d8[4];
- } data;
-
- data.d32 = cpu_to_be32(AD5791_CMD_WRITE |
+ st->data[0].d32 = cpu_to_be32(AD5791_CMD_WRITE |
AD5791_ADDR(addr) |
(val & AD5791_DAC_MASK));
- return spi_write(spi, &data.d8[1], 3);
+ return spi_write(st->spi, &st->data[0].d8[1], 3);
}
-static int ad5791_spi_read(struct spi_device *spi, u8 addr, u32 *val)
+static int ad5791_spi_read(struct ad5791_state *st, u8 addr, u32 *val)
{
- union {
- u32 d32;
- u8 d8[4];
- } data[3];
int ret;
struct spi_transfer xfers[] = {
{
- .tx_buf = &data[0].d8[1],
+ .tx_buf = &st->data[0].d8[1],
.bits_per_word = 8,
.len = 3,
.cs_change = 1,
}, {
- .tx_buf = &data[1].d8[1],
- .rx_buf = &data[2].d8[1],
+ .tx_buf = &st->data[1].d8[1],
+ .rx_buf = &st->data[2].d8[1],
.bits_per_word = 8,
.len = 3,
},
};
- data[0].d32 = cpu_to_be32(AD5791_CMD_READ |
+ st->data[0].d32 = cpu_to_be32(AD5791_CMD_READ |
AD5791_ADDR(addr));
- data[1].d32 = cpu_to_be32(AD5791_ADDR(AD5791_ADDR_NOOP));
+ st->data[1].d32 = cpu_to_be32(AD5791_ADDR(AD5791_ADDR_NOOP));
- ret = spi_sync_transfer(spi, xfers, ARRAY_SIZE(xfers));
+ ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
- *val = be32_to_cpu(data[2].d32);
+ *val = be32_to_cpu(st->data[2].d32);
return ret;
}
@@ -210,7 +206,7 @@ static ssize_t ad5791_write_dac_powerdown(struct iio_dev *indio_dev,
}
st->pwr_down = pwr_down;
- ret = ad5791_spi_write(st->spi, AD5791_ADDR_CTRL, st->ctrl);
+ ret = ad5791_spi_write(st, AD5791_ADDR_CTRL, st->ctrl);
return ret ? ret : len;
}
@@ -263,7 +259,7 @@ static int ad5791_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = ad5791_spi_read(st->spi, chan->address, val);
+ ret = ad5791_spi_read(st, chan->address, val);
if (ret)
return ret;
*val &= AD5791_DAC_MASK;
@@ -297,7 +293,7 @@ static const struct iio_chan_spec_ext_info ad5791_ext_info[] = {
{ },
};
-#define AD5791_CHAN(bits, shift) { \
+#define AD5791_CHAN(bits, _shift) { \
.type = IIO_VOLTAGE, \
.output = 1, \
.indexed = 1, \
@@ -306,7 +302,12 @@ static const struct iio_chan_spec_ext_info ad5791_ext_info[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_OFFSET), \
- .scan_type = IIO_ST('u', bits, 24, shift), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 24, \
+ .shift = (_shift), \
+ }, \
.ext_info = ad5791_ext_info, \
}
@@ -330,7 +331,7 @@ static int ad5791_write_raw(struct iio_dev *indio_dev,
val &= AD5791_RES_MASK(chan->scan_type.realbits);
val <<= chan->scan_type.shift;
- return ad5791_spi_write(st->spi, chan->address, val);
+ return ad5791_spi_write(st, chan->address, val);
default:
return -EINVAL;
@@ -393,7 +394,7 @@ static int ad5791_probe(struct spi_device *spi)
dev_warn(&spi->dev, "reference voltage unspecified\n");
}
- ret = ad5791_spi_write(spi, AD5791_ADDR_SW_CTRL, AD5791_SWCTRL_RESET);
+ ret = ad5791_spi_write(st, AD5791_ADDR_SW_CTRL, AD5791_SWCTRL_RESET);
if (ret)
goto error_disable_reg_neg;
@@ -405,7 +406,7 @@ static int ad5791_probe(struct spi_device *spi)
| ((pdata && pdata->use_rbuf_gain2) ? 0 : AD5791_CTRL_RBUF) |
AD5791_CTRL_BIN2SC;
- ret = ad5791_spi_write(spi, AD5791_ADDR_CTRL, st->ctrl |
+ ret = ad5791_spi_write(st, AD5791_ADDR_CTRL, st->ctrl |
AD5791_CTRL_OPGND | AD5791_CTRL_DACTRI);
if (ret)
goto error_disable_reg_neg;
diff --git a/drivers/iio/dac/max517.c b/drivers/iio/dac/max517.c
index 6e1903537950..de76e6a34c1e 100644
--- a/drivers/iio/dac/max517.c
+++ b/drivers/iio/dac/max517.c
@@ -146,7 +146,6 @@ static const struct iio_info max517_info = {
.channel = (chan), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE), \
- .scan_type = IIO_ST('u', 8, 8, 0), \
}
static const struct iio_chan_spec max517_channels[] = {
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 9f57ae84ab89..7d9f5c31d2fc 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -209,7 +209,6 @@ static const struct iio_chan_spec mcp4725_channel = {
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
- .scan_type = IIO_ST('u', 12, 16, 0),
.ext_info = mcp4725_ext_info,
};
diff --git a/drivers/iio/gyro/adis16130.c b/drivers/iio/gyro/adis16130.c
index 445c2aecfadd..8d08c7ed1ea6 100644
--- a/drivers/iio/gyro/adis16130.c
+++ b/drivers/iio/gyro/adis16130.c
@@ -161,13 +161,7 @@ static int adis16130_probe(struct spi_device *spi)
indio_dev->info = &adis16130_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- return iio_device_register(indio_dev);
-}
-
-static int adis16130_remove(struct spi_device *spi)
-{
- iio_device_unregister(spi_get_drvdata(spi));
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static struct spi_driver adis16130_driver = {
@@ -176,7 +170,6 @@ static struct spi_driver adis16130_driver = {
.owner = THIS_MODULE,
},
.probe = adis16130_probe,
- .remove = adis16130_remove,
};
module_spi_driver(adis16130_driver);
diff --git a/drivers/iio/gyro/adxrs450.c b/drivers/iio/gyro/adxrs450.c
index 1e546ba7ba45..eb0e08ec9e20 100644
--- a/drivers/iio/gyro/adxrs450.c
+++ b/drivers/iio/gyro/adxrs450.c
@@ -434,23 +434,14 @@ static int adxrs450_probe(struct spi_device *spi)
indio_dev->num_channels = ARRAY_SIZE(adxrs450_channels);
indio_dev->name = spi->dev.driver->name;
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret)
return ret;
/* Get the device into a sane initial state */
ret = adxrs450_initial_setup(indio_dev);
if (ret)
- goto error_initial;
- return 0;
-error_initial:
- iio_device_unregister(indio_dev);
- return ret;
-}
-
-static int adxrs450_remove(struct spi_device *spi)
-{
- iio_device_unregister(spi_get_drvdata(spi));
+ return ret;
return 0;
}
@@ -468,7 +459,6 @@ static struct spi_driver adxrs450_driver = {
.owner = THIS_MODULE,
},
.probe = adxrs450_probe,
- .remove = adxrs450_remove,
.id_table = adxrs450_id,
};
module_spi_driver(adxrs450_driver);
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index e54f0f4959d3..59d6bc3e04df 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -262,6 +262,17 @@ static int gyro_3d_parse_report(struct platform_device *pdev,
st->gyro[1].index, st->gyro[1].report_id,
st->gyro[2].index, st->gyro[2].report_id);
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_ANGL_VELOCITY,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
return ret;
}
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
new file mode 100644
index 000000000000..463c4d9da79e
--- /dev/null
+++ b/drivers/iio/humidity/Kconfig
@@ -0,0 +1,15 @@
+#
+# humidity sensor drivers
+#
+menu "Humidity sensors"
+
+config DHT11
+ tristate "DHT11 (and compatible sensors) driver"
+ depends on GPIOLIB
+ help
+ This driver supports reading data via a single interrupt
+ generating GPIO line. Currently tested are DHT11 and DHT22.
+ Other sensors should work as well as long as they speak the
+ same protocol.
+
+endmenu
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
new file mode 100644
index 000000000000..d5d36c0c95f9
--- /dev/null
+++ b/drivers/iio/humidity/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for IIO humidity sensor drivers
+#
+
+obj-$(CONFIG_DHT11) += dht11.o
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
new file mode 100644
index 000000000000..d8771f546bf2
--- /dev/null
+++ b/drivers/iio/humidity/dht11.c
@@ -0,0 +1,294 @@
+/*
+ * DHT11/DHT22 bit banging GPIO driver
+ *
+ * Copyright (c) Harald Geyer <harald@ccbib.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/sysfs.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#include <linux/iio/iio.h>
+
+#define DRIVER_NAME "dht11"
+
+#define DHT11_DATA_VALID_TIME 2000000000 /* 2s in ns */
+
+#define DHT11_EDGES_PREAMBLE 4
+#define DHT11_BITS_PER_READ 40
+#define DHT11_EDGES_PER_READ (2*DHT11_BITS_PER_READ + DHT11_EDGES_PREAMBLE + 1)
+
+/* Data transmission timing (nano seconds) */
+#define DHT11_START_TRANSMISSION 18 /* ms */
+#define DHT11_SENSOR_RESPONSE 80000
+#define DHT11_START_BIT 50000
+#define DHT11_DATA_BIT_LOW 27000
+#define DHT11_DATA_BIT_HIGH 70000
+
+struct dht11 {
+ struct device *dev;
+
+ int gpio;
+ int irq;
+
+ struct completion completion;
+
+ s64 timestamp;
+ int temperature;
+ int humidity;
+
+ /* num_edges: -1 means "no transmission in progress" */
+ int num_edges;
+ struct {s64 ts; int value; } edges[DHT11_EDGES_PER_READ];
+};
+
+static unsigned char dht11_decode_byte(int *timing, int threshold)
+{
+ unsigned char ret = 0;
+ int i;
+
+ for (i = 0; i < 8; ++i) {
+ ret <<= 1;
+ if (timing[i] >= threshold)
+ ++ret;
+ }
+
+ return ret;
+}
+
+static int dht11_decode(struct dht11 *dht11, int offset)
+{
+ int i, t, timing[DHT11_BITS_PER_READ], threshold,
+ timeres = DHT11_SENSOR_RESPONSE;
+ unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum;
+
+ /* Calculate timestamp resolution */
+ for (i = 0; i < dht11->num_edges; ++i) {
+ t = dht11->edges[i].ts - dht11->edges[i-1].ts;
+ if (t > 0 && t < timeres)
+ timeres = t;
+ }
+ if (2*timeres > DHT11_DATA_BIT_HIGH) {
+ pr_err("dht11: timeresolution %d too bad for decoding\n",
+ timeres);
+ return -EIO;
+ }
+ threshold = DHT11_DATA_BIT_HIGH / timeres;
+ if (DHT11_DATA_BIT_LOW/timeres + 1 >= threshold)
+ pr_err("dht11: WARNING: decoding ambiguous\n");
+
+ /* scale down with timeres and check validity */
+ for (i = 0; i < DHT11_BITS_PER_READ; ++i) {
+ t = dht11->edges[offset + 2*i + 2].ts -
+ dht11->edges[offset + 2*i + 1].ts;
+ if (!dht11->edges[offset + 2*i + 1].value)
+ return -EIO; /* lost synchronisation */
+ timing[i] = t / timeres;
+ }
+
+ hum_int = dht11_decode_byte(timing, threshold);
+ hum_dec = dht11_decode_byte(&timing[8], threshold);
+ temp_int = dht11_decode_byte(&timing[16], threshold);
+ temp_dec = dht11_decode_byte(&timing[24], threshold);
+ checksum = dht11_decode_byte(&timing[32], threshold);
+
+ if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum)
+ return -EIO;
+
+ dht11->timestamp = iio_get_time_ns();
+ if (hum_int < 20) { /* DHT22 */
+ dht11->temperature = (((temp_int & 0x7f) << 8) + temp_dec) *
+ ((temp_int & 0x80) ? -100 : 100);
+ dht11->humidity = ((hum_int << 8) + hum_dec) * 100;
+ } else if (temp_dec == 0 && hum_dec == 0) { /* DHT11 */
+ dht11->temperature = temp_int * 1000;
+ dht11->humidity = hum_int * 1000;
+ } else {
+ dev_err(dht11->dev,
+ "Don't know how to decode data: %d %d %d %d\n",
+ hum_int, hum_dec, temp_int, temp_dec);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int dht11_read_raw(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long m)
+{
+ struct dht11 *dht11 = iio_priv(iio_dev);
+ int ret;
+
+ if (dht11->timestamp + DHT11_DATA_VALID_TIME < iio_get_time_ns()) {
+ reinit_completion(&dht11->completion);
+
+ dht11->num_edges = 0;
+ ret = gpio_direction_output(dht11->gpio, 0);
+ if (ret)
+ goto err;
+ msleep(DHT11_START_TRANSMISSION);
+ ret = gpio_direction_input(dht11->gpio);
+ if (ret)
+ goto err;
+
+ ret = wait_for_completion_killable_timeout(&dht11->completion,
+ HZ);
+ if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) {
+ dev_err(&iio_dev->dev,
+ "Only %d signal edges detected\n",
+ dht11->num_edges);
+ ret = -ETIMEDOUT;
+ }
+ if (ret < 0)
+ goto err;
+
+ ret = dht11_decode(dht11,
+ dht11->num_edges == DHT11_EDGES_PER_READ ?
+ DHT11_EDGES_PREAMBLE :
+ DHT11_EDGES_PREAMBLE - 2);
+ if (ret)
+ goto err;
+ }
+
+ ret = IIO_VAL_INT;
+ if (chan->type == IIO_TEMP)
+ *val = dht11->temperature;
+ else if (chan->type == IIO_HUMIDITYRELATIVE)
+ *val = dht11->humidity;
+ else
+ ret = -EINVAL;
+err:
+ dht11->num_edges = -1;
+ return ret;
+}
+
+static const struct iio_info dht11_iio_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = dht11_read_raw,
+};
+
+/*
+ * IRQ handler called on GPIO edges
+*/
+static irqreturn_t dht11_handle_irq(int irq, void *data)
+{
+ struct iio_dev *iio = data;
+ struct dht11 *dht11 = iio_priv(iio);
+
+ /* TODO: Consider making the handler safe for IRQ sharing */
+ if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
+ dht11->edges[dht11->num_edges].ts = iio_get_time_ns();
+ dht11->edges[dht11->num_edges++].value =
+ gpio_get_value(dht11->gpio);
+
+ if (dht11->num_edges >= DHT11_EDGES_PER_READ)
+ complete(&dht11->completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_chan_spec dht11_chan_spec[] = {
+ { .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), },
+ { .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), }
+};
+
+static const struct of_device_id dht11_dt_ids[] = {
+ { .compatible = "dht11", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, dht11_dt_ids);
+
+static int dht11_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct dht11 *dht11;
+ struct iio_dev *iio;
+ int ret;
+
+ iio = devm_iio_device_alloc(dev, sizeof(*dht11));
+ if (!iio) {
+ dev_err(dev, "Failed to allocate IIO device\n");
+ return -ENOMEM;
+ }
+
+ dht11 = iio_priv(iio);
+ dht11->dev = dev;
+
+ dht11->gpio = ret = of_get_gpio(node, 0);
+ if (ret < 0)
+ return ret;
+ ret = devm_gpio_request_one(dev, dht11->gpio, GPIOF_IN, pdev->name);
+ if (ret)
+ return ret;
+
+ dht11->irq = gpio_to_irq(dht11->gpio);
+ if (dht11->irq < 0) {
+ dev_err(dev, "GPIO %d has no interrupt\n", dht11->gpio);
+ return -EINVAL;
+ }
+ ret = devm_request_irq(dev, dht11->irq, dht11_handle_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ pdev->name, iio);
+ if (ret)
+ return ret;
+
+ dht11->timestamp = iio_get_time_ns() - DHT11_DATA_VALID_TIME - 1;
+ dht11->num_edges = -1;
+
+ platform_set_drvdata(pdev, iio);
+
+ init_completion(&dht11->completion);
+ iio->name = pdev->name;
+ iio->dev.parent = &pdev->dev;
+ iio->info = &dht11_iio_info;
+ iio->modes = INDIO_DIRECT_MODE;
+ iio->channels = dht11_chan_spec;
+ iio->num_channels = ARRAY_SIZE(dht11_chan_spec);
+
+ return devm_iio_device_register(dev, iio);
+}
+
+static struct platform_driver dht11_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = dht11_dt_ids,
+ },
+ .probe = dht11_probe,
+};
+
+module_platform_driver(dht11_driver);
+
+MODULE_AUTHOR("Harald Geyer <harald@ccbib.org>");
+MODULE_DESCRIPTION("DHT11 humidity/temperature sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
index 2f8f9d632386..0916bf6b6c31 100644
--- a/drivers/iio/imu/adis16400.h
+++ b/drivers/iio/imu/adis16400.h
@@ -189,6 +189,7 @@ enum {
ADIS16300_SCAN_INCLI_X,
ADIS16300_SCAN_INCLI_Y,
ADIS16400_SCAN_ADC,
+ ADIS16400_SCAN_TIMESTAMP,
};
#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 3fb7757a1028..7c582f7ae34e 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -632,7 +632,7 @@ static const struct iio_chan_spec adis16400_channels[] = {
ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14),
ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12),
ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12),
- IIO_CHAN_SOFT_TIMESTAMP(12)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16448_channels[] = {
@@ -651,10 +651,15 @@ static const struct iio_chan_spec adis16448_channels[] = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.address = ADIS16448_BARO_OUT,
.scan_index = ADIS16400_SCAN_BARO,
- .scan_type = IIO_ST('s', 16, 16, 0),
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
},
ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
- IIO_CHAN_SOFT_TIMESTAMP(11)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16350_channels[] = {
@@ -672,7 +677,7 @@ static const struct iio_chan_spec adis16350_channels[] = {
ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12),
ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12),
ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12),
- IIO_CHAN_SOFT_TIMESTAMP(11)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16300_channels[] = {
@@ -685,7 +690,7 @@ static const struct iio_chan_spec adis16300_channels[] = {
ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12),
ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13),
ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13),
- IIO_CHAN_SOFT_TIMESTAMP(14)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16334_channels[] = {
@@ -696,7 +701,7 @@ static const struct iio_chan_spec adis16334_channels[] = {
ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12),
- IIO_CHAN_SOFT_TIMESTAMP(8)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static struct attribute *adis16400_attributes[] = {
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 7f9152c3c4d3..c67d83bdc8f0 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -37,6 +37,14 @@ static bool iio_buffer_is_active(struct iio_buffer *buf)
return !list_empty(&buf->buffer_list);
}
+static bool iio_buffer_data_available(struct iio_buffer *buf)
+{
+ if (buf->access->data_available)
+ return buf->access->data_available(buf);
+
+ return buf->stufftoread;
+}
+
/**
* iio_buffer_read_first_n_outer() - chrdev read for buffer access
*
@@ -48,13 +56,34 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
{
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
+ int ret;
if (!indio_dev->info)
return -ENODEV;
if (!rb || !rb->access->read_first_n)
return -EINVAL;
- return rb->access->read_first_n(rb, n, buf);
+
+ do {
+ if (!iio_buffer_data_available(rb)) {
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(rb->pollq,
+ iio_buffer_data_available(rb) ||
+ indio_dev->info == NULL);
+ if (ret)
+ return ret;
+ if (indio_dev->info == NULL)
+ return -ENODEV;
+ }
+
+ ret = rb->access->read_first_n(rb, n, buf);
+ if (ret == 0 && (filp->f_flags & O_NONBLOCK))
+ ret = -EAGAIN;
+ } while (ret == 0);
+
+ return ret;
}
/**
@@ -70,7 +99,7 @@ unsigned int iio_buffer_poll(struct file *filp,
return -ENODEV;
poll_wait(filp, &rb->pollq, wait);
- if (rb->stufftoread)
+ if (iio_buffer_data_available(rb))
return POLLIN | POLLRDNORM;
/* need a way of knowing if there may be enough data... */
return 0;
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 18f72e3d0ed6..acc911a836ca 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -69,6 +69,7 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_ALTVOLTAGE] = "altvoltage",
[IIO_CCT] = "cct",
[IIO_PRESSURE] = "pressure",
+ [IIO_HUMIDITYRELATIVE] = "humidityrelative",
};
static const char * const iio_modifier_names[] = {
@@ -107,6 +108,11 @@ static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_INT_TIME] = "integration_time",
};
+/**
+ * iio_find_channel_from_si() - get channel from its scan index
+ * @indio_dev: device
+ * @si: scan index to match
+ */
const struct iio_chan_spec
*iio_find_channel_from_si(struct iio_dev *indio_dev, int si)
{
@@ -922,6 +928,10 @@ struct device_type iio_device_type = {
.release = iio_dev_release,
};
+/**
+ * iio_device_alloc() - allocate an iio_dev from a driver
+ * @sizeof_priv: Space to allocate for private structure.
+ **/
struct iio_dev *iio_device_alloc(int sizeof_priv)
{
struct iio_dev *dev;
@@ -962,6 +972,10 @@ struct iio_dev *iio_device_alloc(int sizeof_priv)
}
EXPORT_SYMBOL(iio_device_alloc);
+/**
+ * iio_device_free() - free an iio_dev from a driver
+ * @dev: the iio_dev associated with the device
+ **/
void iio_device_free(struct iio_dev *dev)
{
if (dev)
@@ -984,6 +998,20 @@ static int devm_iio_device_match(struct device *dev, void *res, void *data)
return *r == data;
}
+/**
+ * devm_iio_device_alloc - Resource-managed iio_device_alloc()
+ * @dev: Device to allocate iio_dev for
+ * @sizeof_priv: Space to allocate for private structure.
+ *
+ * Managed iio_device_alloc. iio_dev allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * If an iio_dev allocated with this function needs to be freed separately,
+ * devm_iio_device_free() must be used.
+ *
+ * RETURNS:
+ * Pointer to allocated iio_dev on success, NULL on failure.
+ */
struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv)
{
struct iio_dev **ptr, *iio_dev;
@@ -1006,6 +1034,13 @@ struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv)
}
EXPORT_SYMBOL_GPL(devm_iio_device_alloc);
+/**
+ * devm_iio_device_free - Resource-managed iio_device_free()
+ * @dev: Device this iio_dev belongs to
+ * @iio_dev: the iio_dev associated with the device
+ *
+ * Free iio_dev allocated with devm_iio_device_alloc().
+ */
void devm_iio_device_free(struct device *dev, struct iio_dev *iio_dev)
{
int rc;
@@ -1080,6 +1115,10 @@ static const struct file_operations iio_buffer_fileops = {
static const struct iio_buffer_setup_ops noop_ring_setup_ops;
+/**
+ * iio_device_register() - register a device with the IIO subsystem
+ * @indio_dev: Device structure filled by the device driver
+ **/
int iio_device_register(struct iio_dev *indio_dev)
{
int ret;
@@ -1141,6 +1180,10 @@ error_ret:
}
EXPORT_SYMBOL(iio_device_register);
+/**
+ * iio_device_unregister() - unregister a device from the IIO subsystem
+ * @indio_dev: Device structure representing the device.
+ **/
void iio_device_unregister(struct iio_dev *indio_dev)
{
mutex_lock(&indio_dev->info_exist_lock);
@@ -1161,6 +1204,65 @@ void iio_device_unregister(struct iio_dev *indio_dev)
mutex_unlock(&indio_dev->info_exist_lock);
}
EXPORT_SYMBOL(iio_device_unregister);
+
+static void devm_iio_device_unreg(struct device *dev, void *res)
+{
+ iio_device_unregister(*(struct iio_dev **)res);
+}
+
+/**
+ * devm_iio_device_register - Resource-managed iio_device_register()
+ * @dev: Device to allocate iio_dev for
+ * @indio_dev: Device structure filled by the device driver
+ *
+ * Managed iio_device_register. The IIO device registered with this
+ * function is automatically unregistered on driver detach. This function
+ * calls iio_device_register() internally. Refer to that function for more
+ * information.
+ *
+ * If an iio_dev registered with this function needs to be unregistered
+ * separately, devm_iio_device_unregister() must be used.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev)
+{
+ struct iio_dev **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_iio_device_unreg, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ *ptr = indio_dev;
+ ret = iio_device_register(indio_dev);
+ if (!ret)
+ devres_add(dev, ptr);
+ else
+ devres_free(ptr);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_iio_device_register);
+
+/**
+ * devm_iio_device_unregister - Resource-managed iio_device_unregister()
+ * @dev: Device this iio_dev belongs to
+ * @indio_dev: the iio_dev associated with the device
+ *
+ * Unregister iio_dev registered with devm_iio_device_register().
+ */
+void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev)
+{
+ int rc;
+
+ rc = devres_release(dev, devm_iio_device_unreg,
+ devm_iio_device_match, indio_dev);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_iio_device_unregister);
+
subsys_initcall(iio_init);
module_exit(iio_exit);
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index c10eab64bc05..c9c1419fe6e0 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -42,6 +42,12 @@ struct iio_event_interface {
struct attribute_group group;
};
+/**
+ * iio_push_event() - try to add event to the list for userspace reading
+ * @indio_dev: IIO device structure
+ * @ev_code: What event
+ * @timestamp: When the event occurred
+ **/
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
{
struct iio_event_interface *ev_int = indio_dev->event_interface;
@@ -236,13 +242,9 @@ static ssize_t iio_ev_state_store(struct device *dev,
if (ret < 0)
return ret;
- if (indio_dev->info->write_event_config)
- ret = indio_dev->info->write_event_config(indio_dev,
- this_attr->address, val);
- else
- ret = indio_dev->info->write_event_config_new(indio_dev,
- this_attr->c, iio_ev_attr_type(this_attr),
- iio_ev_attr_dir(this_attr), val);
+ ret = indio_dev->info->write_event_config(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr), val);
return (ret < 0) ? ret : len;
}
@@ -255,13 +257,9 @@ static ssize_t iio_ev_state_show(struct device *dev,
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int val;
- if (indio_dev->info->read_event_config)
- val = indio_dev->info->read_event_config(indio_dev,
- this_attr->address);
- else
- val = indio_dev->info->read_event_config_new(indio_dev,
- this_attr->c, iio_ev_attr_type(this_attr),
- iio_ev_attr_dir(this_attr));
+ val = indio_dev->info->read_event_config(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr));
if (val < 0)
return val;
else
@@ -277,21 +275,13 @@ static ssize_t iio_ev_value_show(struct device *dev,
int val, val2;
int ret;
- if (indio_dev->info->read_event_value) {
- ret = indio_dev->info->read_event_value(indio_dev,
- this_attr->address, &val);
- if (ret < 0)
- return ret;
- return sprintf(buf, "%d\n", val);
- } else {
- ret = indio_dev->info->read_event_value_new(indio_dev,
- this_attr->c, iio_ev_attr_type(this_attr),
- iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
- &val, &val2);
- if (ret < 0)
- return ret;
- return iio_format_value(buf, ret, val, val2);
- }
+ ret = indio_dev->info->read_event_value(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
+ &val, &val2);
+ if (ret < 0)
+ return ret;
+ return iio_format_value(buf, ret, val, val2);
}
static ssize_t iio_ev_value_store(struct device *dev,
@@ -304,25 +294,16 @@ static ssize_t iio_ev_value_store(struct device *dev,
int val, val2;
int ret;
- if (!indio_dev->info->write_event_value &&
- !indio_dev->info->write_event_value_new)
+ if (!indio_dev->info->write_event_value)
return -EINVAL;
- if (indio_dev->info->write_event_value) {
- ret = kstrtoint(buf, 10, &val);
- if (ret)
- return ret;
- ret = indio_dev->info->write_event_value(indio_dev,
- this_attr->address, val);
- } else {
- ret = iio_str_to_fixpoint(buf, 100000, &val, &val2);
- if (ret)
- return ret;
- ret = indio_dev->info->write_event_value_new(indio_dev,
- this_attr->c, iio_ev_attr_type(this_attr),
- iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
- val, val2);
- }
+ ret = iio_str_to_fixpoint(buf, 100000, &val, &val2);
+ if (ret)
+ return ret;
+ ret = indio_dev->info->write_event_value(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
+ val, val2);
if (ret < 0)
return ret;
@@ -371,7 +352,7 @@ static int iio_device_add_event(struct iio_dev *indio_dev,
return attrcount;
}
-static int iio_device_add_event_sysfs_new(struct iio_dev *indio_dev,
+static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
int ret = 0, i, attrcount = 0;
@@ -414,89 +395,6 @@ error_ret:
return ret;
}
-static int iio_device_add_event_sysfs_old(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan)
-{
- int ret = 0, i, attrcount = 0;
- u64 mask = 0;
- char *postfix;
- if (!chan->event_mask)
- return 0;
-
- for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
- postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
- iio_ev_type_text[i/IIO_EV_DIR_MAX],
- iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
- if (postfix == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- if (chan->modified)
- mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel2,
- i/IIO_EV_DIR_MAX,
- i%IIO_EV_DIR_MAX);
- else if (chan->differential)
- mask = IIO_EVENT_CODE(chan->type,
- 0, 0,
- i%IIO_EV_DIR_MAX,
- i/IIO_EV_DIR_MAX,
- 0,
- chan->channel,
- chan->channel2);
- else
- mask = IIO_UNMOD_EVENT_CODE(chan->type,
- chan->channel,
- i/IIO_EV_DIR_MAX,
- i%IIO_EV_DIR_MAX);
-
- ret = __iio_add_chan_devattr(postfix,
- chan,
- &iio_ev_state_show,
- iio_ev_state_store,
- mask,
- 0,
- &indio_dev->dev,
- &indio_dev->event_interface->
- dev_attr_list);
- kfree(postfix);
- if (ret)
- goto error_ret;
- attrcount++;
- postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
- iio_ev_type_text[i/IIO_EV_DIR_MAX],
- iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
- if (postfix == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = __iio_add_chan_devattr(postfix, chan,
- iio_ev_value_show,
- iio_ev_value_store,
- mask,
- 0,
- &indio_dev->dev,
- &indio_dev->event_interface->
- dev_attr_list);
- kfree(postfix);
- if (ret)
- goto error_ret;
- attrcount++;
- }
- ret = attrcount;
-error_ret:
- return ret;
-}
-
-
-static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan)
-{
- if (chan->event_mask)
- return iio_device_add_event_sysfs_old(indio_dev, chan);
- else
- return iio_device_add_event_sysfs_new(indio_dev, chan);
-}
-
static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
{
int j, ret, attrcount = 0;
@@ -517,8 +415,6 @@ static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
int j;
for (j = 0; j < indio_dev->num_channels; j++) {
- if (indio_dev->channels[j].event_mask != 0)
- return true;
if (indio_dev->channels[j].num_event_specs != 0)
return true;
}
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index bf5e70a32d3f..766fab24b720 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -55,15 +55,7 @@ static struct attribute *iio_trig_dev_attrs[] = {
&dev_attr_name.attr,
NULL,
};
-
-static struct attribute_group iio_trig_attr_group = {
- .attrs = iio_trig_dev_attrs,
-};
-
-static const struct attribute_group *iio_trig_attr_groups[] = {
- &iio_trig_attr_group,
- NULL
-};
+ATTRIBUTE_GROUPS(iio_trig_dev);
int iio_trigger_register(struct iio_trigger *trig_info)
{
@@ -318,7 +310,7 @@ static ssize_t iio_trigger_read_current(struct device *dev,
* iio_trigger_write_current() - trigger consumer sysfs set current trigger
*
* For trigger consumers the current_trigger interface allows the trigger
- * used for this device to be specified at run time based on the triggers
+ * used for this device to be specified at run time based on the trigger's
* name.
**/
static ssize_t iio_trigger_write_current(struct device *dev,
@@ -356,7 +348,7 @@ static ssize_t iio_trigger_write_current(struct device *dev,
indio_dev->trig = trig;
- if (oldtrig && indio_dev->trig != oldtrig)
+ if (oldtrig)
iio_trigger_put(oldtrig);
if (indio_dev->trig)
iio_trigger_get(indio_dev->trig);
@@ -403,7 +395,7 @@ static void iio_trig_release(struct device *device)
static struct device_type iio_trig_type = {
.release = iio_trig_release,
- .groups = iio_trig_attr_groups,
+ .groups = iio_trig_dev_groups,
};
static void iio_trig_subirqmask(struct irq_data *d)
@@ -506,6 +498,23 @@ static int devm_iio_trigger_match(struct device *dev, void *res, void *data)
return *r == data;
}
+/**
+ * devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
+ * @dev: Device to allocate iio_trigger for
+ * @fmt: trigger name format. If it includes format
+ * specifiers, the additional arguments following
+ * format are formatted and inserted in the resulting
+ * string replacing their respective specifiers.
+ *
+ * Managed iio_trigger_alloc. iio_trigger allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * If an iio_trigger allocated with this function needs to be freed separately,
+ * devm_iio_trigger_free() must be used.
+ *
+ * RETURNS:
+ * Pointer to allocated iio_trigger on success, NULL on failure.
+ */
struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
const char *fmt, ...)
{
@@ -532,6 +541,13 @@ struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc);
+/**
+ * devm_iio_trigger_free - Resource-managed iio_trigger_free()
+ * @dev: Device this iio_dev belongs to
+ * @iio_trig: the iio_trigger associated with the device
+ *
+ * Free iio_trigger allocated with devm_iio_trigger_alloc().
+ */
void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig)
{
int rc;
diff --git a/drivers/iio/kfifo_buf.c b/drivers/iio/kfifo_buf.c
index 95c6fc81c2c7..7134e8ada09a 100644
--- a/drivers/iio/kfifo_buf.c
+++ b/drivers/iio/kfifo_buf.c
@@ -42,7 +42,6 @@ static int iio_request_update_kfifo(struct iio_buffer *r)
} else {
kfifo_reset_out(&buf->kf);
}
- r->stufftoread = false;
mutex_unlock(&buf->user_lock);
return ret;
@@ -108,7 +107,7 @@ static int iio_store_to_kfifo(struct iio_buffer *r,
ret = kfifo_in(&kf->kf, data, 1);
if (ret != 1)
return -EBUSY;
- r->stufftoread = true;
+
wake_up_interruptible_poll(&r->pollq, POLLIN | POLLRDNORM);
return 0;
@@ -127,13 +126,6 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
ret = -EINVAL;
else
ret = kfifo_to_user(&kf->kf, buf, n, &copied);
-
- if (kfifo_is_empty(&kf->kf))
- r->stufftoread = false;
- /* verify it is still empty to avoid race */
- if (!kfifo_is_empty(&kf->kf))
- r->stufftoread = true;
-
mutex_unlock(&kf->user_lock);
if (ret < 0)
return ret;
@@ -141,6 +133,18 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
return copied;
}
+static bool iio_kfifo_buf_data_available(struct iio_buffer *r)
+{
+ struct iio_kfifo *kf = iio_to_kfifo(r);
+ bool empty;
+
+ mutex_lock(&kf->user_lock);
+ empty = kfifo_is_empty(&kf->kf);
+ mutex_unlock(&kf->user_lock);
+
+ return !empty;
+}
+
static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
{
struct iio_kfifo *kf = iio_to_kfifo(buffer);
@@ -153,6 +157,7 @@ static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
static const struct iio_buffer_access_funcs kfifo_access_funcs = {
.store_to = &iio_store_to_kfifo,
.read_first_n = &iio_read_first_n_kfifo,
+ .data_available = iio_kfifo_buf_data_available,
.request_update = &iio_request_update_kfifo,
.get_bytes_per_datum = &iio_get_bytes_per_datum_kfifo,
.set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index a022f27c6690..d12b2a0dbfbc 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -27,6 +27,17 @@ config APDS9300
To compile this driver as a module, choose M here: the
module will be called apds9300.
+config CM32181
+ depends on I2C
+ tristate "CM32181 driver"
+ help
+ Say Y here if you use cm32181.
+ This option enables ambient light sensor using
+ Capella cm32181 device driver.
+
+ To compile this driver as a module, choose M here:
+ the module will be called cm32181.
+
config CM36651
depends on I2C
tristate "CM36651 driver"
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index daa327f39e04..60e35ac07ff0 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -5,6 +5,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ADJD_S311) += adjd_s311.o
obj-$(CONFIG_APDS9300) += apds9300.o
+obj-$(CONFIG_CM32181) += cm32181.o
obj-$(CONFIG_CM36651) += cm36651.o
obj-$(CONFIG_GP2AP020A00F) += gp2ap020a00f.o
obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 83d15c5baf64..f3068477b466 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -155,7 +155,12 @@ done:
BIT(IIO_CHAN_INFO_INT_TIME), \
.channel2 = (IIO_MOD_LIGHT_##_color), \
.scan_index = (_scan_idx), \
- .scan_type = IIO_ST('u', 10, 16, 0), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 10, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
}
static const struct iio_chan_spec adjd_s311_channels[] = {
diff --git a/drivers/iio/light/apds9300.c b/drivers/iio/light/apds9300.c
index 51097bbd59c9..9ddde0ca9c34 100644
--- a/drivers/iio/light/apds9300.c
+++ b/drivers/iio/light/apds9300.c
@@ -344,10 +344,10 @@ static const struct iio_info apds9300_info_no_irq = {
static const struct iio_info apds9300_info = {
.driver_module = THIS_MODULE,
.read_raw = apds9300_read_raw,
- .read_event_value_new = apds9300_read_thresh,
- .write_event_value_new = apds9300_write_thresh,
- .read_event_config_new = apds9300_read_interrupt_config,
- .write_event_config_new = apds9300_write_interrupt_config,
+ .read_event_value = apds9300_read_thresh,
+ .write_event_value = apds9300_write_thresh,
+ .read_event_config = apds9300_read_interrupt_config,
+ .write_event_config = apds9300_write_interrupt_config,
};
static const struct iio_event_spec apds9300_event_spec[] = {
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
new file mode 100644
index 000000000000..f17b4e6183c6
--- /dev/null
+++ b/drivers/iio/light/cm32181.c
@@ -0,0 +1,379 @@
+/*
+ * Copyright (C) 2013 Capella Microsystems Inc.
+ * Author: Kevin Tsai <ktsai@capellamicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2, as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/init.h>
+
+/* Registers Address */
+#define CM32181_REG_ADDR_CMD 0x00
+#define CM32181_REG_ADDR_ALS 0x04
+#define CM32181_REG_ADDR_STATUS 0x06
+#define CM32181_REG_ADDR_ID 0x07
+
+/* Number of Configurable Registers */
+#define CM32181_CONF_REG_NUM 0x01
+
+/* CMD register */
+#define CM32181_CMD_ALS_ENABLE 0x00
+#define CM32181_CMD_ALS_DISABLE 0x01
+#define CM32181_CMD_ALS_INT_EN 0x02
+
+#define CM32181_CMD_ALS_IT_SHIFT 6
+#define CM32181_CMD_ALS_IT_MASK (0x0F << CM32181_CMD_ALS_IT_SHIFT)
+#define CM32181_CMD_ALS_IT_DEFAULT (0x00 << CM32181_CMD_ALS_IT_SHIFT)
+
+#define CM32181_CMD_ALS_SM_SHIFT 11
+#define CM32181_CMD_ALS_SM_MASK (0x03 << CM32181_CMD_ALS_SM_SHIFT)
+#define CM32181_CMD_ALS_SM_DEFAULT (0x01 << CM32181_CMD_ALS_SM_SHIFT)
+
+#define CM32181_MLUX_PER_BIT 5 /* ALS_SM=01 IT=800ms */
+#define CM32181_MLUX_PER_BIT_BASE_IT 800000 /* Based on IT=800ms */
+#define CM32181_CALIBSCALE_DEFAULT 1000
+#define CM32181_CALIBSCALE_RESOLUTION 1000
+#define MLUX_PER_LUX 1000
+
+static const u8 cm32181_reg[CM32181_CONF_REG_NUM] = {
+ CM32181_REG_ADDR_CMD,
+};
+
+static const int als_it_bits[] = {12, 8, 0, 1, 2, 3};
+static const int als_it_value[] = {25000, 50000, 100000, 200000, 400000,
+ 800000};
+
+struct cm32181_chip {
+ struct i2c_client *client;
+ struct mutex lock;
+ u16 conf_regs[CM32181_CONF_REG_NUM];
+ int calibscale;
+};
+
+/**
+ * cm32181_reg_init() - Initialize CM32181 registers
+ * @cm32181: pointer of struct cm32181.
+ *
+ * Initialize CM32181 ambient light sensor register to default values.
+ *
+ * Return: 0 for success; otherwise for error code.
+ */
+static int cm32181_reg_init(struct cm32181_chip *cm32181)
+{
+ struct i2c_client *client = cm32181->client;
+ int i;
+ s32 ret;
+
+ ret = i2c_smbus_read_word_data(client, CM32181_REG_ADDR_ID);
+ if (ret < 0)
+ return ret;
+
+ /* check device ID */
+ if ((ret & 0xFF) != 0x81)
+ return -ENODEV;
+
+ /* Default Values */
+ cm32181->conf_regs[CM32181_REG_ADDR_CMD] = CM32181_CMD_ALS_ENABLE |
+ CM32181_CMD_ALS_IT_DEFAULT | CM32181_CMD_ALS_SM_DEFAULT;
+ cm32181->calibscale = CM32181_CALIBSCALE_DEFAULT;
+
+ /* Initialize registers*/
+ for (i = 0; i < CM32181_CONF_REG_NUM; i++) {
+ ret = i2c_smbus_write_word_data(client, cm32181_reg[i],
+ cm32181->conf_regs[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * cm32181_read_als_it() - Get sensor integration time (ms)
+ * @cm32181: pointer of struct cm32181
+ * @val: pointer of int to load the als_it value.
+ *
+ * Report the current integartion time by millisecond.
+ *
+ * Return: IIO_VAL_INT for success, otherwise -EINVAL.
+ */
+static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val)
+{
+ u16 als_it;
+ int i;
+
+ als_it = cm32181->conf_regs[CM32181_REG_ADDR_CMD];
+ als_it &= CM32181_CMD_ALS_IT_MASK;
+ als_it >>= CM32181_CMD_ALS_IT_SHIFT;
+ for (i = 0; i < ARRAY_SIZE(als_it_bits); i++) {
+ if (als_it == als_it_bits[i]) {
+ *val = als_it_value[i];
+ return IIO_VAL_INT;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * cm32181_write_als_it() - Write sensor integration time
+ * @cm32181: pointer of struct cm32181.
+ * @val: integration time by millisecond.
+ *
+ * Convert integration time (ms) to sensor value.
+ *
+ * Return: i2c_smbus_write_word_data command return value.
+ */
+static int cm32181_write_als_it(struct cm32181_chip *cm32181, int val)
+{
+ struct i2c_client *client = cm32181->client;
+ u16 als_it;
+ int ret, i, n;
+
+ n = ARRAY_SIZE(als_it_value);
+ for (i = 0; i < n; i++)
+ if (val <= als_it_value[i])
+ break;
+ if (i >= n)
+ i = n - 1;
+
+ als_it = als_it_bits[i];
+ als_it <<= CM32181_CMD_ALS_IT_SHIFT;
+
+ mutex_lock(&cm32181->lock);
+ cm32181->conf_regs[CM32181_REG_ADDR_CMD] &=
+ ~CM32181_CMD_ALS_IT_MASK;
+ cm32181->conf_regs[CM32181_REG_ADDR_CMD] |=
+ als_it;
+ ret = i2c_smbus_write_word_data(client, CM32181_REG_ADDR_CMD,
+ cm32181->conf_regs[CM32181_REG_ADDR_CMD]);
+ mutex_unlock(&cm32181->lock);
+
+ return ret;
+}
+
+/**
+ * cm32181_get_lux() - report current lux value
+ * @cm32181: pointer of struct cm32181.
+ *
+ * Convert sensor raw data to lux. It depends on integration
+ * time and claibscale variable.
+ *
+ * Return: Positive value is lux, otherwise is error code.
+ */
+static int cm32181_get_lux(struct cm32181_chip *cm32181)
+{
+ struct i2c_client *client = cm32181->client;
+ int ret;
+ int als_it;
+ unsigned long lux;
+
+ ret = cm32181_read_als_it(cm32181, &als_it);
+ if (ret < 0)
+ return -EINVAL;
+
+ lux = CM32181_MLUX_PER_BIT;
+ lux *= CM32181_MLUX_PER_BIT_BASE_IT;
+ lux /= als_it;
+
+ ret = i2c_smbus_read_word_data(client, CM32181_REG_ADDR_ALS);
+ if (ret < 0)
+ return ret;
+
+ lux *= ret;
+ lux *= cm32181->calibscale;
+ lux /= CM32181_CALIBSCALE_RESOLUTION;
+ lux /= MLUX_PER_LUX;
+
+ if (lux > 0xFFFF)
+ lux = 0xFFFF;
+
+ return lux;
+}
+
+static int cm32181_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct cm32181_chip *cm32181 = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ ret = cm32181_get_lux(cm32181);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ *val = cm32181->calibscale;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_INT_TIME:
+ ret = cm32181_read_als_it(cm32181, val);
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+static int cm32181_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct cm32181_chip *cm32181 = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ cm32181->calibscale = val;
+ return val;
+ case IIO_CHAN_INFO_INT_TIME:
+ ret = cm32181_write_als_it(cm32181, val);
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * cm32181_get_it_available() - Get available ALS IT value
+ * @dev: pointer of struct device.
+ * @attr: pointer of struct device_attribute.
+ * @buf: pointer of return string buffer.
+ *
+ * Display the available integration time values by millisecond.
+ *
+ * Return: string length.
+ */
+static ssize_t cm32181_get_it_available(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, n, len;
+
+ n = ARRAY_SIZE(als_it_value);
+ for (i = 0, len = 0; i < n; i++)
+ len += sprintf(buf + len, "%d ", als_it_value[i]);
+ return len + sprintf(buf + len, "\n");
+}
+
+static const struct iio_chan_spec cm32181_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ }
+};
+
+static IIO_DEVICE_ATTR(in_illuminance_integration_time_available,
+ S_IRUGO, cm32181_get_it_available, NULL, 0);
+
+static struct attribute *cm32181_attributes[] = {
+ &iio_dev_attr_in_illuminance_integration_time_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group cm32181_attribute_group = {
+ .attrs = cm32181_attributes
+};
+
+static const struct iio_info cm32181_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &cm32181_read_raw,
+ .write_raw = &cm32181_write_raw,
+ .attrs = &cm32181_attribute_group,
+};
+
+static int cm32181_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct cm32181_chip *cm32181;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*cm32181));
+ if (!indio_dev) {
+ dev_err(&client->dev, "devm_iio_device_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ cm32181 = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ cm32181->client = client;
+
+ mutex_init(&cm32181->lock);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = cm32181_channels;
+ indio_dev->num_channels = ARRAY_SIZE(cm32181_channels);
+ indio_dev->info = &cm32181_info;
+ indio_dev->name = id->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = cm32181_reg_init(cm32181);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s: register init failed\n",
+ __func__);
+ return ret;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s: regist device failed\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cm32181_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ return 0;
+}
+
+static const struct i2c_device_id cm32181_id[] = {
+ { "cm32181", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, cm32181_id);
+
+static const struct of_device_id cm32181_of_match[] = {
+ { .compatible = "capella,cm32181" },
+ { }
+};
+
+static struct i2c_driver cm32181_driver = {
+ .driver = {
+ .name = "cm32181",
+ .of_match_table = of_match_ptr(cm32181_of_match),
+ .owner = THIS_MODULE,
+ },
+ .id_table = cm32181_id,
+ .probe = cm32181_probe,
+ .remove = cm32181_remove,
+};
+
+module_i2c_driver(cm32181_driver);
+
+MODULE_AUTHOR("Kevin Tsai <ktsai@capellamicro.com>");
+MODULE_DESCRIPTION("CM32181 ambient light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 21df57130018..0a142af83e25 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651,
return -EINVAL;
}
- return IIO_VAL_INT_PLUS_MICRO;
+ return IIO_VAL_INT;
}
static int cm36651_write_int_time(struct cm36651_data *cm36651,
@@ -488,7 +488,11 @@ static int cm36651_write_raw(struct iio_dev *indio_dev,
}
static int cm36651_read_prox_thresh(struct iio_dev *indio_dev,
- u64 event_code, int *val)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
{
struct cm36651_data *cm36651 = iio_priv(indio_dev);
@@ -498,7 +502,11 @@ static int cm36651_read_prox_thresh(struct iio_dev *indio_dev,
}
static int cm36651_write_prox_thresh(struct iio_dev *indio_dev,
- u64 event_code, int val)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
{
struct cm36651_data *cm36651 = iio_priv(indio_dev);
struct i2c_client *client = cm36651->client;
@@ -520,7 +528,10 @@ static int cm36651_write_prox_thresh(struct iio_dev *indio_dev,
}
static int cm36651_write_prox_event_config(struct iio_dev *indio_dev,
- u64 event_code, int state)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
{
struct cm36651_data *cm36651 = iio_priv(indio_dev);
int cmd, ret = -EINVAL;
@@ -536,7 +547,9 @@ static int cm36651_write_prox_event_config(struct iio_dev *indio_dev,
}
static int cm36651_read_prox_event_config(struct iio_dev *indio_dev,
- u64 event_code)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
{
struct cm36651_data *cm36651 = iio_priv(indio_dev);
int event_en;
@@ -559,12 +572,22 @@ static int cm36651_read_prox_event_config(struct iio_dev *indio_dev,
.channel2 = IIO_MOD_LIGHT_##_color, \
} \
+static const struct iio_event_spec cm36651_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }
+};
+
static const struct iio_chan_spec cm36651_channels[] = {
{
.type = IIO_PROXIMITY,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_INT_TIME),
- .event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER)
+ .event_spec = cm36651_event_spec,
+ .num_event_specs = ARRAY_SIZE(cm36651_event_spec),
},
CM36651_LIGHT_CHANNEL(RED, CM36651_LIGHT_CHANNEL_IDX_RED),
CM36651_LIGHT_CHANNEL(GREEN, CM36651_LIGHT_CHANNEL_IDX_GREEN),
@@ -693,7 +716,7 @@ static const struct of_device_id cm36651_of_match[] = {
static struct i2c_driver cm36651_driver = {
.driver = {
.name = "cm36651",
- .of_match_table = of_match_ptr(cm36651_of_match),
+ .of_match_table = cm36651_of_match,
.owner = THIS_MODULE,
},
.probe = cm36651_probe,
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index dc79835be308..5ea4a03c7e71 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -1388,10 +1388,10 @@ static const struct iio_chan_spec gp2ap020a00f_channels[] = {
static const struct iio_info gp2ap020a00f_info = {
.read_raw = &gp2ap020a00f_read_raw,
- .read_event_value_new = &gp2ap020a00f_read_event_val,
- .read_event_config_new = &gp2ap020a00f_read_event_config,
- .write_event_value_new = &gp2ap020a00f_write_event_val,
- .write_event_config_new = &gp2ap020a00f_write_event_config,
+ .read_event_value = &gp2ap020a00f_read_event_val,
+ .read_event_config = &gp2ap020a00f_read_event_config,
+ .write_event_value = &gp2ap020a00f_write_event_val,
+ .write_event_config = &gp2ap020a00f_write_event_config,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index 8e8b9d722853..621541fb10a9 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -229,6 +229,17 @@ static int als_parse_report(struct platform_device *pdev,
dev_dbg(&pdev->dev, "als %x:%x\n", st->als_illum.index,
st->als_illum.report_id);
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_LIGHT,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
return ret;
}
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 45df2204614a..887fecf1f9bb 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -67,7 +67,12 @@ struct tcs3472_data {
.channel2 = IIO_MOD_LIGHT_##_color, \
.address = _addr, \
.scan_index = _si, \
- .scan_type = IIO_ST('u', 16, 16, 0), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
}
static const int tcs3472_agains[] = { 1, 4, 16, 60 };
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 5e5d9dea22c5..94daa9fc1247 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -460,10 +460,14 @@ static int tsl2563_write_raw(struct iio_dev *indio_dev,
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
- if (chan->channel == IIO_MOD_LIGHT_BOTH)
+ if (mask != IIO_CHAN_INFO_CALIBSCALE)
+ return -EINVAL;
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
chip->calib0 = calib_from_sysfs(val);
- else
+ else if (chan->channel2 == IIO_MOD_LIGHT_IR)
chip->calib1 = calib_from_sysfs(val);
+ else
+ return -EINVAL;
return 0;
}
@@ -472,14 +476,14 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
- long m)
+ long mask)
{
int ret = -EINVAL;
u32 calib0, calib1;
struct tsl2563_chip *chip = iio_priv(indio_dev);
mutex_lock(&chip->lock);
- switch (m) {
+ switch (mask) {
case IIO_CHAN_INFO_RAW:
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
@@ -498,7 +502,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
ret = tsl2563_get_adc(chip);
if (ret)
goto error_ret;
- if (chan->channel == 0)
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
*val = chip->data0;
else
*val = chip->data1;
@@ -510,7 +514,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
break;
case IIO_CHAN_INFO_CALIBSCALE:
- if (chan->channel == 0)
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
*val = calib_to_sysfs(chip->calib0);
else
*val = calib_to_sysfs(chip->calib1);
@@ -702,10 +706,10 @@ static const struct iio_info tsl2563_info = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2563_read_raw,
.write_raw = &tsl2563_write_raw,
- .read_event_value_new = &tsl2563_read_thresh,
- .write_event_value_new = &tsl2563_write_thresh,
- .read_event_config_new = &tsl2563_read_interrupt_config,
- .write_event_config_new = &tsl2563_write_interrupt_config,
+ .read_event_value = &tsl2563_read_thresh,
+ .write_event_value = &tsl2563_write_thresh,
+ .read_event_config = &tsl2563_read_interrupt_config,
+ .write_event_config = &tsl2563_write_interrupt_config,
};
static int tsl2563_probe(struct i2c_client *client,
@@ -714,6 +718,7 @@ static int tsl2563_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
struct tsl2563_chip *chip;
struct tsl2563_platform_data *pdata = client->dev.platform_data;
+ struct device_node *np = client->dev.of_node;
int err = 0;
u8 id = 0;
@@ -750,6 +755,9 @@ static int tsl2563_probe(struct i2c_client *client,
if (pdata)
chip->cover_comp_gain = pdata->cover_comp_gain;
+ else if (np)
+ of_property_read_u32(np, "amstaos,cover-comp-gain",
+ &chip->cover_comp_gain);
else
chip->cover_comp_gain = 1;
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index ecb3341ef9c0..d948c4778ba6 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -56,7 +56,7 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
u8 rdy_mask, u8 data_reg, int *val)
{
int tries = 20;
- u16 buf;
+ __be16 buf;
int ret;
ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND,
@@ -179,13 +179,7 @@ static int vcnl4000_probe(struct i2c_client *client,
indio_dev->name = VCNL4000_DRV_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
- return iio_device_register(indio_dev);
-}
-
-static int vcnl4000_remove(struct i2c_client *client)
-{
- iio_device_unregister(i2c_get_clientdata(client));
- return 0;
+ return devm_iio_device_register(&client->dev, indio_dev);
}
static struct i2c_driver vcnl4000_driver = {
@@ -194,7 +188,6 @@ static struct i2c_driver vcnl4000_driver = {
.owner = THIS_MODULE,
},
.probe = vcnl4000_probe,
- .remove = vcnl4000_remove,
.id_table = vcnl4000_id,
};
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index ff284e5afd95..05423543f89d 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -85,6 +85,7 @@
#define AK8975_MAX_CONVERSION_TIMEOUT 500
#define AK8975_CONVERSION_DONE_POLL_TIME 10
#define AK8975_DATA_READY_TIMEOUT ((100*HZ)/1000)
+#define RAW_TO_GAUSS(asa) ((((asa) + 128) * 3000) / 256)
/*
* Per-instance context data for the device.
@@ -265,15 +266,15 @@ static int ak8975_setup(struct i2c_client *client)
*
* Since 1uT = 0.01 gauss, our final scale factor becomes:
*
- * Hadj = H * ((ASA + 128) / 256) * 3/10 * 100
- * Hadj = H * ((ASA + 128) * 30 / 256
+ * Hadj = H * ((ASA + 128) / 256) * 3/10 * 1/100
+ * Hadj = H * ((ASA + 128) * 0.003) / 256
*
* Since ASA doesn't change, we cache the resultant scale factor into the
* device context in ak8975_setup().
*/
- data->raw_to_gauss[0] = ((data->asa[0] + 128) * 30) >> 8;
- data->raw_to_gauss[1] = ((data->asa[1] + 128) * 30) >> 8;
- data->raw_to_gauss[2] = ((data->asa[2] + 128) * 30) >> 8;
+ data->raw_to_gauss[0] = RAW_TO_GAUSS(data->asa[0]);
+ data->raw_to_gauss[1] = RAW_TO_GAUSS(data->asa[1]);
+ data->raw_to_gauss[2] = RAW_TO_GAUSS(data->asa[2]);
return 0;
}
@@ -428,8 +429,9 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
return ak8975_read_axis(indio_dev, chan->address, val);
case IIO_CHAN_INFO_SCALE:
- *val = data->raw_to_gauss[chan->address];
- return IIO_VAL_INT;
+ *val = 0;
+ *val2 = data->raw_to_gauss[chan->address];
+ return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
}
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index b26e1028a0a0..6d162b7e7af5 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -263,6 +263,18 @@ static int magn_3d_parse_report(struct platform_device *pdev,
st->magn[1].index, st->magn[1].report_id,
st->magn[2].index, st->magn[2].report_id);
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_ORIENTATION,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+
return ret;
}
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index becf54496967..f66955fb3509 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -106,7 +106,7 @@ static ssize_t mag3110_show_int_plus_micros(char *buf,
while (n-- > 0)
len += scnprintf(buf + len, PAGE_SIZE - len,
- "%d.%d ", vals[n][0], vals[n][1]);
+ "%d.%06d ", vals[n][0], vals[n][1]);
/* replace trailing space by newline */
buf[len - 1] = '\n';
@@ -154,6 +154,9 @@ static int mag3110_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
switch (chan->type) {
case IIO_MAGN: /* in 0.1 uT / LSB */
ret = mag3110_read(data, buffer);
@@ -199,6 +202,9 @@ static int mag3110_write_raw(struct iio_dev *indio_dev,
struct mag3110_data *data = iio_priv(indio_dev);
int rate;
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
rate = mag3110_get_samp_freq_index(data, val, val2);
@@ -266,7 +272,11 @@ static const struct iio_chan_spec mag3110_channels[] = {
.type = IIO_TEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.scan_index = 3,
- .scan_type = IIO_ST('s', 8, 8, 0),
+ .scan_type = {
+ .sign = 's',
+ .realbits = 8,
+ .storagebits = 8,
+ },
},
IIO_CHAN_SOFT_TIMESTAMP(4),
};
diff --git a/drivers/iio/orientation/Kconfig b/drivers/iio/orientation/Kconfig
new file mode 100644
index 000000000000..58c62c837e12
--- /dev/null
+++ b/drivers/iio/orientation/Kconfig
@@ -0,0 +1,19 @@
+#
+# Inclinometer sensors
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Inclinometer sensors"
+
+config HID_SENSOR_INCLINOMETER_3D
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ tristate "HID Inclinometer 3D"
+ help
+ Say yes here to build support for the HID SENSOR
+ Inclinometer 3D.
+
+endmenu
diff --git a/drivers/iio/orientation/Makefile b/drivers/iio/orientation/Makefile
new file mode 100644
index 000000000000..2c97572ee919
--- /dev/null
+++ b/drivers/iio/orientation/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for industrial I/O Inclinometer sensor drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_HID_SENSOR_INCLINOMETER_3D) += hid-sensor-incl-3d.o
diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c
new file mode 100644
index 000000000000..070feab08faa
--- /dev/null
+++ b/drivers/iio/orientation/hid-sensor-incl-3d.c
@@ -0,0 +1,428 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include "../common/hid-sensors/hid-sensor-trigger.h"
+
+enum incl_3d_channel {
+ CHANNEL_SCAN_INDEX_X,
+ CHANNEL_SCAN_INDEX_Y,
+ CHANNEL_SCAN_INDEX_Z,
+ INCLI_3D_CHANNEL_MAX,
+};
+
+struct incl_3d_state {
+ struct hid_sensor_hub_callbacks callbacks;
+ struct hid_sensor_common common_attributes;
+ struct hid_sensor_hub_attribute_info incl[INCLI_3D_CHANNEL_MAX];
+ u32 incl_val[INCLI_3D_CHANNEL_MAX];
+};
+
+static const u32 incl_3d_addresses[INCLI_3D_CHANNEL_MAX] = {
+ HID_USAGE_SENSOR_ORIENT_TILT_X,
+ HID_USAGE_SENSOR_ORIENT_TILT_Y,
+ HID_USAGE_SENSOR_ORIENT_TILT_Z
+};
+
+/* Channel definitions */
+static const struct iio_chan_spec incl_3d_channels[] = {
+ {
+ .type = IIO_INCLI,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_X,
+ }, {
+ .type = IIO_INCLI,
+ .modified = 1,
+ .channel2 = IIO_MOD_Y,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_Y,
+ }, {
+ .type = IIO_INCLI,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_Z,
+ }
+};
+
+/* Adjust channel real bits based on report descriptor */
+static void incl_3d_adjust_channel_bit_mask(struct iio_chan_spec *chan,
+ int size)
+{
+ chan->scan_type.sign = 's';
+ /* Real storage bits will change based on the report desc. */
+ chan->scan_type.realbits = size * 8;
+ /* Maximum size of a sample to capture is u32 */
+ chan->scan_type.storagebits = sizeof(u32) * 8;
+}
+
+/* Channel read_raw handler */
+static int incl_3d_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct incl_3d_state *incl_state = iio_priv(indio_dev);
+ int report_id = -1;
+ u32 address;
+ int ret_type;
+
+ *val = 0;
+ *val2 = 0;
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ report_id =
+ incl_state->incl[chan->scan_index].report_id;
+ address = incl_3d_addresses[chan->scan_index];
+ if (report_id >= 0)
+ *val = sensor_hub_input_attr_get_raw_value(
+ incl_state->common_attributes.hsdev,
+ HID_USAGE_SENSOR_INCLINOMETER_3D, address,
+ report_id);
+ else {
+ return -EINVAL;
+ }
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = incl_state->incl[CHANNEL_SCAN_INDEX_X].units;
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = hid_sensor_convert_exponent(
+ incl_state->incl[CHANNEL_SCAN_INDEX_X].unit_expo);
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret_type = hid_sensor_read_samp_freq_value(
+ &incl_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret_type = hid_sensor_read_raw_hyst_value(
+ &incl_state->common_attributes, val, val2);
+ break;
+ default:
+ ret_type = -EINVAL;
+ break;
+ }
+
+ return ret_type;
+}
+
+/* Channel write_raw handler */
+static int incl_3d_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct incl_3d_state *incl_state = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hid_sensor_write_samp_freq_value(
+ &incl_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret = hid_sensor_write_raw_hyst_value(
+ &incl_state->common_attributes, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info incl_3d_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &incl_3d_read_raw,
+ .write_raw = &incl_3d_write_raw,
+};
+
+/* Function to push data to buffer */
+static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
+{
+ dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
+ iio_push_to_buffers(indio_dev, (u8 *)data);
+}
+
+/* Callback handler to send event after all samples are received and captured */
+static int incl_3d_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct incl_3d_state *incl_state = iio_priv(indio_dev);
+
+ dev_dbg(&indio_dev->dev, "incl_3d_proc_event [%d]\n",
+ incl_state->common_attributes.data_ready);
+ if (incl_state->common_attributes.data_ready)
+ hid_sensor_push_data(indio_dev,
+ (u8 *)incl_state->incl_val,
+ sizeof(incl_state->incl_val));
+
+ return 0;
+}
+
+/* Capture samples in local storage */
+static int incl_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ size_t raw_len, char *raw_data,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct incl_3d_state *incl_state = iio_priv(indio_dev);
+ int ret = 0;
+
+ switch (usage_id) {
+ case HID_USAGE_SENSOR_ORIENT_TILT_X:
+ incl_state->incl_val[CHANNEL_SCAN_INDEX_X] = *(u32 *)raw_data;
+ break;
+ case HID_USAGE_SENSOR_ORIENT_TILT_Y:
+ incl_state->incl_val[CHANNEL_SCAN_INDEX_Y] = *(u32 *)raw_data;
+ break;
+ case HID_USAGE_SENSOR_ORIENT_TILT_Z:
+ incl_state->incl_val[CHANNEL_SCAN_INDEX_Z] = *(u32 *)raw_data;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Parse report which is specific to an usage id*/
+static int incl_3d_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ struct iio_chan_spec *channels,
+ unsigned usage_id,
+ struct incl_3d_state *st)
+{
+ int ret;
+
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_ORIENT_TILT_X,
+ &st->incl[CHANNEL_SCAN_INDEX_X]);
+ if (ret)
+ return ret;
+ incl_3d_adjust_channel_bit_mask(&channels[CHANNEL_SCAN_INDEX_X],
+ st->incl[CHANNEL_SCAN_INDEX_X].size);
+
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_ORIENT_TILT_Y,
+ &st->incl[CHANNEL_SCAN_INDEX_Y]);
+ if (ret)
+ return ret;
+ incl_3d_adjust_channel_bit_mask(&channels[CHANNEL_SCAN_INDEX_Y],
+ st->incl[CHANNEL_SCAN_INDEX_Y].size);
+
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_ORIENT_TILT_Z,
+ &st->incl[CHANNEL_SCAN_INDEX_Z]);
+ if (ret)
+ return ret;
+ incl_3d_adjust_channel_bit_mask(&channels[CHANNEL_SCAN_INDEX_Z],
+ st->incl[CHANNEL_SCAN_INDEX_Z].size);
+
+ dev_dbg(&pdev->dev, "incl_3d %x:%x, %x:%x, %x:%x\n",
+ st->incl[0].index,
+ st->incl[0].report_id,
+ st->incl[1].index, st->incl[1].report_id,
+ st->incl[2].index, st->incl[2].report_id);
+
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_ORIENTATION,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+ return ret;
+}
+
+/* Function to initialize the processing for usage id */
+static int hid_incl_3d_probe(struct platform_device *pdev)
+{
+ int ret;
+ static char *name = "incli_3d";
+ struct iio_dev *indio_dev;
+ struct incl_3d_state *incl_state;
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_chan_spec *channels;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(struct incl_3d_state));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ incl_state = iio_priv(indio_dev);
+ incl_state->common_attributes.hsdev = hsdev;
+ incl_state->common_attributes.pdev = pdev;
+
+ ret = hid_sensor_parse_common_attributes(hsdev,
+ HID_USAGE_SENSOR_INCLINOMETER_3D,
+ &incl_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup common attributes\n");
+ return ret;
+ }
+
+ channels = kmemdup(incl_3d_channels, sizeof(incl_3d_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ dev_err(&pdev->dev, "failed to duplicate channels\n");
+ return -ENOMEM;
+ }
+
+ ret = incl_3d_parse_report(pdev, hsdev, channels,
+ HID_USAGE_SENSOR_INCLINOMETER_3D, incl_state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup attributes\n");
+ goto error_free_dev_mem;
+ }
+
+ indio_dev->channels = channels;
+ indio_dev->num_channels = ARRAY_SIZE(incl_3d_channels);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &incl_3d_info;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
+ goto error_free_dev_mem;
+ }
+ incl_state->common_attributes.data_ready = false;
+ ret = hid_sensor_setup_trigger(indio_dev, name,
+ &incl_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "trigger setup failed\n");
+ goto error_unreg_buffer_funcs;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "device register failed\n");
+ goto error_remove_trigger;
+ }
+
+ incl_state->callbacks.send_event = incl_3d_proc_event;
+ incl_state->callbacks.capture_sample = incl_3d_capture_sample;
+ incl_state->callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev,
+ HID_USAGE_SENSOR_INCLINOMETER_3D,
+ &incl_state->callbacks);
+ if (ret) {
+ dev_err(&pdev->dev, "callback reg failed\n");
+ goto error_iio_unreg;
+ }
+
+ return 0;
+
+error_iio_unreg:
+ iio_device_unregister(indio_dev);
+error_remove_trigger:
+ hid_sensor_remove_trigger(&incl_state->common_attributes);
+error_unreg_buffer_funcs:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_free_dev_mem:
+ kfree(indio_dev->channels);
+ return ret;
+}
+
+/* Function to deinitialize the processing for usage id */
+static int hid_incl_3d_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct incl_3d_state *incl_state = iio_priv(indio_dev);
+
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_INCLINOMETER_3D);
+ iio_device_unregister(indio_dev);
+ hid_sensor_remove_trigger(&incl_state->common_attributes);
+ iio_triggered_buffer_cleanup(indio_dev);
+ kfree(indio_dev->channels);
+
+ return 0;
+}
+
+static struct platform_device_id hid_incl_3d_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200086",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_incl_3d_ids);
+
+static struct platform_driver hid_incl_3d_platform_driver = {
+ .id_table = hid_incl_3d_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = hid_incl_3d_probe,
+ .remove = hid_incl_3d_remove,
+};
+module_platform_driver(hid_incl_3d_platform_driver);
+
+MODULE_DESCRIPTION("HID Sensor Inclinometer 3D");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 4f2e0f9bad8c..a8b9cae5c173 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -5,6 +5,18 @@
menu "Pressure sensors"
+config MPL3115
+ tristate "Freescale MPL3115A2 pressure sensor driver"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for the Freescale MPL3115A2
+ pressure sensor / altimeter.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mpl3115.
+
config IIO_ST_PRESS
tristate "STMicroelectronics pressure sensor Driver"
depends on (I2C || SPI_MASTER) && SYSFS
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index be71464c2752..42bb9fcf5436 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -3,6 +3,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_MPL3115) += mpl3115.o
obj-$(CONFIG_IIO_ST_PRESS) += st_pressure.o
st_pressure-y := st_pressure_core.o
st_pressure-$(CONFIG_IIO_BUFFER) += st_pressure_buffer.o
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
new file mode 100644
index 000000000000..ac8c8ab723e5
--- /dev/null
+++ b/drivers/iio/pressure/mpl3115.c
@@ -0,0 +1,329 @@
+/*
+ * mpl3115.c - Support for Freescale MPL3115A2 pressure/temperature sensor
+ *
+ * Copyright (c) 2013 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * (7-bit I2C slave address 0x60)
+ *
+ * TODO: FIFO buffer, altimeter mode, oversampling, continuous mode,
+ * interrupts, user offset correction, raw mode
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/delay.h>
+
+#define MPL3115_STATUS 0x00
+#define MPL3115_OUT_PRESS 0x01 /* MSB first, 20 bit */
+#define MPL3115_OUT_TEMP 0x04 /* MSB first, 12 bit */
+#define MPL3115_WHO_AM_I 0x0c
+#define MPL3115_CTRL_REG1 0x26
+
+#define MPL3115_DEVICE_ID 0xc4
+
+#define MPL3115_STATUS_PRESS_RDY BIT(2)
+#define MPL3115_STATUS_TEMP_RDY BIT(1)
+
+#define MPL3115_CTRL_RESET BIT(2) /* software reset */
+#define MPL3115_CTRL_OST BIT(1) /* initiate measurement */
+#define MPL3115_CTRL_ACTIVE BIT(0) /* continuous measurement */
+#define MPL3115_CTRL_OS_258MS (BIT(5) | BIT(4)) /* 64x oversampling */
+
+struct mpl3115_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ u8 ctrl_reg1;
+};
+
+static int mpl3115_request(struct mpl3115_data *data)
+{
+ int ret, tries = 15;
+
+ /* trigger measurement */
+ ret = i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG1,
+ data->ctrl_reg1 | MPL3115_CTRL_OST);
+ if (ret < 0)
+ return ret;
+
+ while (tries-- > 0) {
+ ret = i2c_smbus_read_byte_data(data->client, MPL3115_CTRL_REG1);
+ if (ret < 0)
+ return ret;
+ /* wait for data ready, i.e. OST cleared */
+ if (!(ret & MPL3115_CTRL_OST))
+ break;
+ msleep(20);
+ }
+
+ if (tries < 0) {
+ dev_err(&data->client->dev, "data not ready\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mpl3115_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mpl3115_data *data = iio_priv(indio_dev);
+ s32 tmp = 0;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ switch (chan->type) {
+ case IIO_PRESSURE: /* in 0.25 pascal / LSB */
+ mutex_lock(&data->lock);
+ ret = mpl3115_request(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ MPL3115_OUT_PRESS, 3, (u8 *) &tmp);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(be32_to_cpu(tmp) >> 12, 23);
+ return IIO_VAL_INT;
+ case IIO_TEMP: /* in 0.0625 celsius / LSB */
+ mutex_lock(&data->lock);
+ ret = mpl3115_request(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ MPL3115_OUT_TEMP, 2, (u8 *) &tmp);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(be32_to_cpu(tmp) >> 20, 15);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ *val = 0;
+ *val2 = 250; /* want kilopascal */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = 0;
+ *val2 = 62500;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ }
+ return -EINVAL;
+}
+
+static irqreturn_t mpl3115_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct mpl3115_data *data = iio_priv(indio_dev);
+ u8 buffer[16]; /* 32-bit channel + 16-bit channel + padding + ts */
+ int ret, pos = 0;
+
+ mutex_lock(&data->lock);
+ ret = mpl3115_request(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ goto done;
+ }
+
+ memset(buffer, 0, sizeof(buffer));
+ if (test_bit(0, indio_dev->active_scan_mask)) {
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ MPL3115_OUT_PRESS, 3, &buffer[pos]);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ goto done;
+ }
+ pos += 4;
+ }
+
+ if (test_bit(1, indio_dev->active_scan_mask)) {
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ MPL3115_OUT_TEMP, 2, &buffer[pos]);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ goto done;
+ }
+ }
+ mutex_unlock(&data->lock);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ iio_get_time_ns());
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static const struct iio_chan_spec mpl3115_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 20,
+ .storagebits = 32,
+ .shift = 12,
+ .endianness = IIO_BE,
+ }
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 12,
+ .storagebits = 16,
+ .shift = 4,
+ .endianness = IIO_BE,
+ }
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+static const struct iio_info mpl3115_info = {
+ .read_raw = &mpl3115_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int mpl3115_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mpl3115_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, MPL3115_WHO_AM_I);
+ if (ret < 0)
+ return ret;
+ if (ret != MPL3115_DEVICE_ID)
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ i2c_set_clientdata(client, indio_dev);
+ indio_dev->info = &mpl3115_info;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = mpl3115_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mpl3115_channels);
+
+ /* software reset, I2C transfer is aborted (fails) */
+ i2c_smbus_write_byte_data(client, MPL3115_CTRL_REG1,
+ MPL3115_CTRL_RESET);
+ msleep(50);
+
+ data->ctrl_reg1 = MPL3115_CTRL_OS_258MS;
+ ret = i2c_smbus_write_byte_data(client, MPL3115_CTRL_REG1,
+ data->ctrl_reg1);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ mpl3115_trigger_handler, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto buffer_cleanup;
+ return 0;
+
+buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+
+static int mpl3115_standby(struct mpl3115_data *data)
+{
+ return i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG1,
+ data->ctrl_reg1 & ~MPL3115_CTRL_ACTIVE);
+}
+
+static int mpl3115_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ mpl3115_standby(iio_priv(indio_dev));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mpl3115_suspend(struct device *dev)
+{
+ return mpl3115_standby(iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev))));
+}
+
+static int mpl3115_resume(struct device *dev)
+{
+ struct mpl3115_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+
+ return i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG1,
+ data->ctrl_reg1);
+}
+
+static SIMPLE_DEV_PM_OPS(mpl3115_pm_ops, mpl3115_suspend, mpl3115_resume);
+#define MPL3115_PM_OPS (&mpl3115_pm_ops)
+#else
+#define MPL3115_PM_OPS NULL
+#endif
+
+static const struct i2c_device_id mpl3115_id[] = {
+ { "mpl3115", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mpl3115_id);
+
+static struct i2c_driver mpl3115_driver = {
+ .driver = {
+ .name = "mpl3115",
+ .pm = MPL3115_PM_OPS,
+ },
+ .probe = mpl3115_probe,
+ .remove = mpl3115_remove,
+ .id_table = mpl3115_id,
+};
+module_i2c_driver(mpl3115_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Freescale MPL3115 pressure/temperature driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 5ceda710f516..77089399359b 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -3,6 +3,8 @@ menuconfig INFINIBAND
depends on PCI || BROKEN
depends on HAS_IOMEM
depends on NET
+ depends on INET
+ depends on m || IPV6 != m
---help---
Core support for InfiniBand (IB). Make sure to also select
any protocols you wish to use as well as drivers for your
@@ -38,8 +40,7 @@ config INFINIBAND_USER_MEM
config INFINIBAND_ADDR_TRANS
bool
- depends on INET
- depends on !(INFINIBAND = y && IPV6 = m)
+ depends on INFINIBAND
default y
source "drivers/infiniband/hw/mthca/Kconfig"
@@ -53,6 +54,7 @@ source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/mlx5/Kconfig"
source "drivers/infiniband/hw/nes/Kconfig"
source "drivers/infiniband/hw/ocrdma/Kconfig"
+source "drivers/infiniband/hw/usnic/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index 1fe69888515f..bf508b5550c4 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
obj-$(CONFIG_MLX5_INFINIBAND) += hw/mlx5/
obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
obj-$(CONFIG_INFINIBAND_OCRDMA) += hw/ocrdma/
+obj-$(CONFIG_INFINIBAND_USNIC) += hw/usnic/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index c8bbaef1becb..3ab3865544bb 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,8 +1,9 @@
-infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o
+infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_cm.o
user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o
obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
- ib_cm.o iw_cm.o $(infiniband-y)
+ ib_cm.o iw_cm.o ib_addr.o \
+ $(infiniband-y)
obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
$(user_access-y)
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index e90f2b2eabd7..8172d37f9add 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -86,6 +86,8 @@ int rdma_addr_size(struct sockaddr *addr)
}
EXPORT_SYMBOL(rdma_addr_size);
+static struct rdma_addr_client self;
+
void rdma_addr_register_client(struct rdma_addr_client *client)
{
atomic_set(&client->refcount, 1);
@@ -119,7 +121,8 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
}
EXPORT_SYMBOL(rdma_copy_addr);
-int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
+int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr,
+ u16 *vlan_id)
{
struct net_device *dev;
int ret = -EADDRNOTAVAIL;
@@ -142,6 +145,8 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
return ret;
ret = rdma_copy_addr(dev_addr, dev, NULL);
+ if (vlan_id)
+ *vlan_id = rdma_vlan_dev_vlan_id(dev);
dev_put(dev);
break;
@@ -153,6 +158,8 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
&((struct sockaddr_in6 *) addr)->sin6_addr,
dev, 1)) {
ret = rdma_copy_addr(dev_addr, dev, NULL);
+ if (vlan_id)
+ *vlan_id = rdma_vlan_dev_vlan_id(dev);
break;
}
}
@@ -238,7 +245,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
src_in->sin_addr.s_addr = fl4.saddr;
if (rt->dst.dev->flags & IFF_LOOPBACK) {
- ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
+ ret = rdma_translate_ip((struct sockaddr *)dst_in, addr, NULL);
if (!ret)
memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
goto put;
@@ -286,7 +293,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
}
if (dst->dev->flags & IFF_LOOPBACK) {
- ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
+ ret = rdma_translate_ip((struct sockaddr *)dst_in, addr, NULL);
if (!ret)
memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
goto put;
@@ -437,6 +444,88 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
}
EXPORT_SYMBOL(rdma_addr_cancel);
+struct resolve_cb_context {
+ struct rdma_dev_addr *addr;
+ struct completion comp;
+};
+
+static void resolve_cb(int status, struct sockaddr *src_addr,
+ struct rdma_dev_addr *addr, void *context)
+{
+ memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct
+ rdma_dev_addr));
+ complete(&((struct resolve_cb_context *)context)->comp);
+}
+
+int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac,
+ u16 *vlan_id)
+{
+ int ret = 0;
+ struct rdma_dev_addr dev_addr;
+ struct resolve_cb_context ctx;
+ struct net_device *dev;
+
+ union {
+ struct sockaddr _sockaddr;
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid_addr, dgid_addr;
+
+
+ ret = rdma_gid2ip(&sgid_addr._sockaddr, sgid);
+ if (ret)
+ return ret;
+
+ ret = rdma_gid2ip(&dgid_addr._sockaddr, dgid);
+ if (ret)
+ return ret;
+
+ memset(&dev_addr, 0, sizeof(dev_addr));
+
+ ctx.addr = &dev_addr;
+ init_completion(&ctx.comp);
+ ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
+ &dev_addr, 1000, resolve_cb, &ctx);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&ctx.comp);
+
+ memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
+ dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
+ if (!dev)
+ return -ENODEV;
+ if (vlan_id)
+ *vlan_id = rdma_vlan_dev_vlan_id(dev);
+ dev_put(dev);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_addr_find_dmac_by_grh);
+
+int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
+{
+ int ret = 0;
+ struct rdma_dev_addr dev_addr;
+ union {
+ struct sockaddr _sockaddr;
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } gid_addr;
+
+ ret = rdma_gid2ip(&gid_addr._sockaddr, sgid);
+
+ if (ret)
+ return ret;
+ memset(&dev_addr, 0, sizeof(dev_addr));
+ ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
+ if (ret)
+ return ret;
+
+ memcpy(smac, dev_addr.src_dev_addr, ETH_ALEN);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_addr_find_smac_by_sgid);
+
static int netevent_callback(struct notifier_block *self, unsigned long event,
void *ctx)
{
@@ -461,11 +550,13 @@ static int __init addr_init(void)
return -ENOMEM;
register_netevent_notifier(&nb);
+ rdma_addr_register_client(&self);
return 0;
}
static void __exit addr_cleanup(void)
{
+ rdma_addr_unregister_client(&self);
unregister_netevent_notifier(&nb);
destroy_workqueue(addr_wq);
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f2ef7ef0f36f..0601b9daf840 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -47,6 +47,7 @@
#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include <linux/kdev_t.h>
+#include <linux/etherdevice.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_cm.h>
@@ -177,6 +178,8 @@ struct cm_av {
struct ib_ah_attr ah_attr;
u16 pkey_index;
u8 timeout;
+ u8 valid;
+ u8 smac[ETH_ALEN];
};
struct cm_work {
@@ -346,6 +349,23 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
grh, &av->ah_attr);
}
+int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac)
+{
+ struct cm_id_private *cm_id_priv;
+
+ cm_id_priv = container_of(id, struct cm_id_private, id);
+
+ if (smac != NULL)
+ memcpy(cm_id_priv->av.smac, smac, sizeof(cm_id_priv->av.smac));
+
+ if (alt_smac != NULL)
+ memcpy(cm_id_priv->alt_av.smac, alt_smac,
+ sizeof(cm_id_priv->alt_av.smac));
+
+ return 0;
+}
+EXPORT_SYMBOL(ib_update_cm_av);
+
static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
{
struct cm_device *cm_dev;
@@ -376,6 +396,9 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
&av->ah_attr);
av->timeout = path->packet_life_time + 1;
+ memcpy(av->smac, path->smac, sizeof(av->smac));
+
+ av->valid = 1;
return 0;
}
@@ -1554,6 +1577,9 @@ static int cm_req_handler(struct cm_work *work)
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
+
+ memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN);
+ work->path[0].vlan_id = cm_id_priv->av.ah_attr.vlan_id;
ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
if (ret) {
ib_get_cached_gid(work->port->cm_dev->ib_device,
@@ -3500,6 +3526,32 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
IB_QP_DEST_QPN | IB_QP_RQ_PSN;
qp_attr->ah_attr = cm_id_priv->av.ah_attr;
+ if (!cm_id_priv->av.valid) {
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return -EINVAL;
+ }
+ if (cm_id_priv->av.ah_attr.vlan_id != 0xffff) {
+ qp_attr->vlan_id = cm_id_priv->av.ah_attr.vlan_id;
+ *qp_attr_mask |= IB_QP_VID;
+ }
+ if (!is_zero_ether_addr(cm_id_priv->av.smac)) {
+ memcpy(qp_attr->smac, cm_id_priv->av.smac,
+ sizeof(qp_attr->smac));
+ *qp_attr_mask |= IB_QP_SMAC;
+ }
+ if (cm_id_priv->alt_av.valid) {
+ if (cm_id_priv->alt_av.ah_attr.vlan_id != 0xffff) {
+ qp_attr->alt_vlan_id =
+ cm_id_priv->alt_av.ah_attr.vlan_id;
+ *qp_attr_mask |= IB_QP_ALT_VID;
+ }
+ if (!is_zero_ether_addr(cm_id_priv->alt_av.smac)) {
+ memcpy(qp_attr->alt_smac,
+ cm_id_priv->alt_av.smac,
+ sizeof(qp_attr->alt_smac));
+ *qp_attr_mask |= IB_QP_ALT_SMAC;
+ }
+ }
qp_attr->path_mtu = cm_id_priv->path_mtu;
qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 8e49db690f33..199958d9ddc8 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -340,7 +340,7 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
int ret;
if (addr->sa_family != AF_IB) {
- ret = rdma_translate_ip(addr, dev_addr);
+ ret = rdma_translate_ip(addr, dev_addr, NULL);
} else {
cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
ret = 0;
@@ -365,7 +365,9 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
return -EINVAL;
mutex_lock(&lock);
- iboe_addr_get_sgid(dev_addr, &iboe_gid);
+ rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+ &iboe_gid);
+
memcpy(&gid, dev_addr->src_dev_addr +
rdma_addr_gid_offset(dev_addr), sizeof gid);
if (listen_id_priv &&
@@ -603,6 +605,7 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
{
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
+ union ib_gid sgid;
mutex_lock(&id_priv->qp_mutex);
if (!id_priv->id.qp) {
@@ -625,6 +628,20 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
if (ret)
goto out;
+ ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
+ qp_attr.ah_attr.grh.sgid_index, &sgid);
+ if (ret)
+ goto out;
+
+ if (rdma_node_get_transport(id_priv->cma_dev->device->node_type)
+ == RDMA_TRANSPORT_IB &&
+ rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)
+ == IB_LINK_LAYER_ETHERNET) {
+ ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
+
+ if (ret)
+ goto out;
+ }
if (conn_param)
qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
@@ -725,6 +742,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
else
ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
qp_attr_mask);
+
if (qp_attr->qp_state == IB_QPS_RTR)
qp_attr->rq_psn = id_priv->seq_num;
break;
@@ -1266,6 +1284,15 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
struct rdma_id_private *listen_id, *conn_id;
struct rdma_cm_event event;
int offset, ret;
+ u8 smac[ETH_ALEN];
+ u8 alt_smac[ETH_ALEN];
+ u8 *psmac = smac;
+ u8 *palt_smac = alt_smac;
+ int is_iboe = ((rdma_node_get_transport(cm_id->device->node_type) ==
+ RDMA_TRANSPORT_IB) &&
+ (rdma_port_get_link_layer(cm_id->device,
+ ib_event->param.req_rcvd.port) ==
+ IB_LINK_LAYER_ETHERNET));
listen_id = cm_id->context;
if (!cma_check_req_qp_type(&listen_id->id, ib_event))
@@ -1310,12 +1337,29 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
if (ret)
goto err3;
+ if (is_iboe) {
+ if (ib_event->param.req_rcvd.primary_path != NULL)
+ rdma_addr_find_smac_by_sgid(
+ &ib_event->param.req_rcvd.primary_path->sgid,
+ psmac, NULL);
+ else
+ psmac = NULL;
+ if (ib_event->param.req_rcvd.alternate_path != NULL)
+ rdma_addr_find_smac_by_sgid(
+ &ib_event->param.req_rcvd.alternate_path->sgid,
+ palt_smac, NULL);
+ else
+ palt_smac = NULL;
+ }
/*
* Acquire mutex to prevent user executing rdma_destroy_id()
* while we're accessing the cm_id.
*/
mutex_lock(&lock);
- if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD))
+ if (is_iboe)
+ ib_update_cm_av(cm_id, psmac, palt_smac);
+ if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
+ (conn_id->id.qp_type != IB_QPT_UD))
ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
mutex_unlock(&lock);
mutex_unlock(&conn_id->handler_mutex);
@@ -1474,7 +1518,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
conn_id->state = RDMA_CM_CONNECT;
- ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
+ ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL);
if (ret) {
mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id);
@@ -1873,7 +1917,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
struct cma_work *work;
int ret;
struct net_device *ndev = NULL;
- u16 vid;
+
work = kzalloc(sizeof *work, GFP_KERNEL);
if (!work)
@@ -1897,10 +1941,14 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err2;
}
- vid = rdma_vlan_dev_vlan_id(ndev);
+ route->path_rec->vlan_id = rdma_vlan_dev_vlan_id(ndev);
+ memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN);
+ memcpy(route->path_rec->smac, ndev->dev_addr, ndev->addr_len);
- iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid);
- iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid);
+ rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+ &route->path_rec->sgid);
+ rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
+ &route->path_rec->dgid);
route->path_rec->hop_limit = 1;
route->path_rec->reversible = 1;
@@ -2063,6 +2111,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
RDMA_CM_ADDR_RESOLVED))
goto out;
+ memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
if (!status && !id_priv->cma_dev)
status = cma_acquire_dev(id_priv, NULL);
@@ -2072,10 +2121,8 @@ static void addr_handler(int status, struct sockaddr *src_addr,
goto out;
event.event = RDMA_CM_EVENT_ADDR_ERROR;
event.status = status;
- } else {
- memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
+ } else
event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
- }
if (id_priv->id.event_handler(&id_priv->id, &event)) {
cma_exch(id_priv, RDMA_CM_DESTROYING);
@@ -2310,7 +2357,7 @@ static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
inet_get_local_port_range(&init_net, &low, &high);
remaining = (high - low) + 1;
- rover = net_random() % remaining + low;
+ rover = prandom_u32() % remaining + low;
retry:
if (last_used_port != rover &&
!idr_find(ps, (unsigned short) rover)) {
@@ -2480,8 +2527,11 @@ static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
return 0;
sin6 = (struct sockaddr_in6 *) addr;
- if ((ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
- !sin6->sin6_scope_id)
+
+ if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
+ return 0;
+
+ if (!sin6->sin6_scope_id)
return -EINVAL;
dev_addr->bound_dev_if = sin6->sin6_scope_id;
@@ -2556,6 +2606,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
if (ret)
goto err1;
+ memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
if (!cma_any_addr(addr)) {
ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
if (ret)
@@ -2566,7 +2617,6 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
goto err1;
}
- memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
if (addr->sa_family == AF_INET)
id_priv->afonly = 1;
@@ -3295,7 +3345,8 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
err = -EINVAL;
goto out2;
}
- iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid);
+ rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+ &mc->multicast.ib->rec.port_gid);
work->id = id_priv;
work->mc = mc;
INIT_WORK(&work->work, iboe_mcast_work_handler);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index a565af5c2d2e..87d1936f5c1c 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -49,4 +49,6 @@ void ib_sysfs_cleanup(void);
int ib_cache_setup(void);
void ib_cache_cleanup(void);
+int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
+ struct ib_qp_attr *qp_attr, int *qp_attr_mask);
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index c47c2034ca71..3d2e489ab732 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -181,9 +181,16 @@ static void add_ref(struct iw_cm_id *cm_id)
static void rem_ref(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
+ int cb_destroy;
+
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
- if (iwcm_deref_id(cm_id_priv) &&
- test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) {
+
+ /*
+ * Test bit before deref in case the cm_id gets freed on another
+ * thread.
+ */
+ cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
+ if (iwcm_deref_id(cm_id_priv) && cb_destroy) {
BUG_ON(!list_empty(&cm_id_priv->work_list));
free_cm_id(cm_id_priv);
}
@@ -327,7 +334,6 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
unsigned long flags;
- int ret;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
/*
@@ -343,7 +349,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
cm_id_priv->state = IW_CM_STATE_DESTROYING;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
/* destroy the listening endpoint */
- ret = cm_id->device->iwcm->destroy_listen(cm_id);
+ cm_id->device->iwcm->destroy_listen(cm_id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
break;
case IW_CM_STATE_ESTABLISHED:
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 9838ca484389..f820958e4047 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -42,7 +42,7 @@
#include <linux/kref.h>
#include <linux/idr.h>
#include <linux/workqueue.h>
-
+#include <uapi/linux/if_ether.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_cache.h>
#include "sa.h"
@@ -556,6 +556,13 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
ah_attr->grh.hop_limit = rec->hop_limit;
ah_attr->grh.traffic_class = rec->traffic_class;
}
+ if (force_grh) {
+ memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN);
+ ah_attr->vlan_id = rec->vlan_id;
+ } else {
+ ah_attr->vlan_id = 0xffff;
+ }
+
return 0;
}
EXPORT_SYMBOL(ib_init_ah_from_path);
@@ -670,6 +677,9 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
mad->data, &rec);
+ rec.vlan_id = 0xffff;
+ memset(rec.dmac, 0, ETH_ALEN);
+ memset(rec.smac, 0, ETH_ALEN);
query->callback(status, &rec, query->context);
} else
query->callback(status, NULL, query->context);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index faad2caf22b1..7d3292c7b4b4 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -613,6 +613,7 @@ static ssize_t show_node_type(struct device *device,
case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
case RDMA_NODE_USNIC: return sprintf(buf, "%d: usNIC\n", dev->node_type);
+ case RDMA_NODE_USNIC_UDP: return sprintf(buf, "%d: usNIC UDP\n", dev->node_type);
case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type);
default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ab8b1c30b36b..56a4b7ca7ee3 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -655,24 +655,14 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
struct rdma_route *route)
{
- struct rdma_dev_addr *dev_addr;
- struct net_device *dev;
- u16 vid = 0;
resp->num_paths = route->num_paths;
switch (route->num_paths) {
case 0:
- dev_addr = &route->addr.dev_addr;
- dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
- if (dev) {
- vid = rdma_vlan_dev_vlan_id(dev);
- dev_put(dev);
- }
-
- iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
- dev_addr->dst_dev_addr, vid);
- iboe_addr_get_sgid(dev_addr,
- (union ib_gid *) &resp->ib_route[0].sgid);
+ rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
+ (union ib_gid *)&resp->ib_route[0].dgid);
+ rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
+ (union ib_gid *)&resp->ib_route[0].sgid);
resp->ib_route[0].pkey = cpu_to_be16(0xffff);
break;
case 2:
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index bdc842e9faef..a283274a5a09 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -49,12 +49,20 @@
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
do { \
- (udata)->inbuf = (void __user *) (ibuf); \
+ (udata)->inbuf = (const void __user *) (ibuf); \
(udata)->outbuf = (void __user *) (obuf); \
(udata)->inlen = (ilen); \
(udata)->outlen = (olen); \
} while (0)
+#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \
+ do { \
+ (udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \
+ (udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \
+ (udata)->inlen = (ilen); \
+ (udata)->outlen = (olen); \
+ } while (0)
+
/*
* Our lifetime rules for these structs are the following:
*
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 65f6e7dc380c..ea6203ee7bcc 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -40,6 +40,7 @@
#include <asm/uaccess.h>
#include "uverbs.h"
+#include "core_priv.h"
struct uverbs_lock_class {
struct lock_class_key key;
@@ -1961,6 +1962,9 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
if (qp->real_qp == qp) {
+ ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
+ if (ret)
+ goto out;
ret = qp->device->modify_qp(qp, attr,
modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
} else {
@@ -2593,6 +2597,9 @@ out_put:
static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
union ib_flow_spec *ib_spec)
{
+ if (kern_spec->reserved)
+ return -EINVAL;
+
ib_spec->type = kern_spec->type;
switch (ib_spec->type) {
@@ -2646,6 +2653,9 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
void *ib_spec;
int i;
+ if (ucore->inlen < sizeof(cmd))
+ return -EINVAL;
+
if (ucore->outlen < sizeof(resp))
return -ENOSPC;
@@ -2671,6 +2681,10 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
(cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
return -EINVAL;
+ if (cmd.flow_attr.reserved[0] ||
+ cmd.flow_attr.reserved[1])
+ return -EINVAL;
+
if (cmd.flow_attr.num_of_specs) {
kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
GFP_KERNEL);
@@ -2731,6 +2745,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
i, cmd.flow_attr.size);
+ err = -EINVAL;
goto err_free;
}
flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
@@ -2791,10 +2806,16 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
struct ib_uobject *uobj;
int ret;
+ if (ucore->inlen < sizeof(cmd))
+ return -EINVAL;
+
ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
if (ret)
return ret;
+ if (cmd.comp_mask)
+ return -EINVAL;
+
uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
file->ucontext);
if (!uobj)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 34386943ebcf..08219fb3338b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -668,25 +668,30 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count)
return -EINVAL;
+ if (ex_hdr.cmd_hdr_reserved)
+ return -EINVAL;
+
if (ex_hdr.response) {
if (!hdr.out_words && !ex_hdr.provider_out_words)
return -EINVAL;
+
+ if (!access_ok(VERIFY_WRITE,
+ (void __user *) (unsigned long) ex_hdr.response,
+ (hdr.out_words + ex_hdr.provider_out_words) * 8))
+ return -EFAULT;
} else {
if (hdr.out_words || ex_hdr.provider_out_words)
return -EINVAL;
}
- INIT_UDATA(&ucore,
- (hdr.in_words) ? buf : 0,
- (unsigned long)ex_hdr.response,
- hdr.in_words * 8,
- hdr.out_words * 8);
-
- INIT_UDATA(&uhw,
- (ex_hdr.provider_in_words) ? buf + ucore.inlen : 0,
- (ex_hdr.provider_out_words) ? (unsigned long)ex_hdr.response + ucore.outlen : 0,
- ex_hdr.provider_in_words * 8,
- ex_hdr.provider_out_words * 8);
+ INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
+ hdr.in_words * 8, hdr.out_words * 8);
+
+ INIT_UDATA_BUF_OR_NULL(&uhw,
+ buf + ucore.inlen,
+ (unsigned long) ex_hdr.response + ucore.outlen,
+ ex_hdr.provider_in_words * 8,
+ ex_hdr.provider_out_words * 8);
err = uverbs_ex_cmd_table[command](file,
&ucore,
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index d4f6ddf72ffa..3ac795115438 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -44,6 +44,9 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_cache.h>
+#include <rdma/ib_addr.h>
+
+#include "core_priv.h"
int ib_rate_to_mult(enum ib_rate rate)
{
@@ -116,6 +119,8 @@ rdma_node_get_transport(enum rdma_node_type node_type)
return RDMA_TRANSPORT_IWARP;
case RDMA_NODE_USNIC:
return RDMA_TRANSPORT_USNIC;
+ case RDMA_NODE_USNIC_UDP:
+ return RDMA_TRANSPORT_USNIC_UDP;
default:
BUG();
return 0;
@@ -133,6 +138,7 @@ enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_
return IB_LINK_LAYER_INFINIBAND;
case RDMA_TRANSPORT_IWARP:
case RDMA_TRANSPORT_USNIC:
+ case RDMA_TRANSPORT_USNIC_UDP:
return IB_LINK_LAYER_ETHERNET;
default:
return IB_LINK_LAYER_UNSPECIFIED;
@@ -192,8 +198,28 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
u32 flow_class;
u16 gid_index;
int ret;
+ int is_eth = (rdma_port_get_link_layer(device, port_num) ==
+ IB_LINK_LAYER_ETHERNET);
memset(ah_attr, 0, sizeof *ah_attr);
+ if (is_eth) {
+ if (!(wc->wc_flags & IB_WC_GRH))
+ return -EPROTOTYPE;
+
+ if (wc->wc_flags & IB_WC_WITH_SMAC &&
+ wc->wc_flags & IB_WC_WITH_VLAN) {
+ memcpy(ah_attr->dmac, wc->smac, ETH_ALEN);
+ ah_attr->vlan_id = wc->vlan_id;
+ } else {
+ ret = rdma_addr_find_dmac_by_grh(&grh->dgid, &grh->sgid,
+ ah_attr->dmac, &ah_attr->vlan_id);
+ if (ret)
+ return ret;
+ }
+ } else {
+ ah_attr->vlan_id = 0xffff;
+ }
+
ah_attr->dlid = wc->slid;
ah_attr->sl = wc->sl;
ah_attr->src_path_bits = wc->dlid_path_bits;
@@ -476,7 +502,9 @@ EXPORT_SYMBOL(ib_create_qp);
static const struct {
int valid;
enum ib_qp_attr_mask req_param[IB_QPT_MAX];
+ enum ib_qp_attr_mask req_param_add_eth[IB_QPT_MAX];
enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
+ enum ib_qp_attr_mask opt_param_add_eth[IB_QPT_MAX];
} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = {
[IB_QPS_RESET] = { .valid = 1 },
@@ -557,6 +585,12 @@ static const struct {
IB_QP_MAX_DEST_RD_ATOMIC |
IB_QP_MIN_RNR_TIMER),
},
+ .req_param_add_eth = {
+ [IB_QPT_RC] = (IB_QP_SMAC),
+ [IB_QPT_UC] = (IB_QP_SMAC),
+ [IB_QPT_XRC_INI] = (IB_QP_SMAC),
+ [IB_QPT_XRC_TGT] = (IB_QP_SMAC)
+ },
.opt_param = {
[IB_QPT_UD] = (IB_QP_PKEY_INDEX |
IB_QP_QKEY),
@@ -576,7 +610,21 @@ static const struct {
IB_QP_QKEY),
[IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
IB_QP_QKEY),
- }
+ },
+ .opt_param_add_eth = {
+ [IB_QPT_RC] = (IB_QP_ALT_SMAC |
+ IB_QP_VID |
+ IB_QP_ALT_VID),
+ [IB_QPT_UC] = (IB_QP_ALT_SMAC |
+ IB_QP_VID |
+ IB_QP_ALT_VID),
+ [IB_QPT_XRC_INI] = (IB_QP_ALT_SMAC |
+ IB_QP_VID |
+ IB_QP_ALT_VID),
+ [IB_QPT_XRC_TGT] = (IB_QP_ALT_SMAC |
+ IB_QP_VID |
+ IB_QP_ALT_VID)
+ }
}
},
[IB_QPS_RTR] = {
@@ -779,7 +827,8 @@ static const struct {
};
int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
- enum ib_qp_type type, enum ib_qp_attr_mask mask)
+ enum ib_qp_type type, enum ib_qp_attr_mask mask,
+ enum rdma_link_layer ll)
{
enum ib_qp_attr_mask req_param, opt_param;
@@ -798,6 +847,13 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
req_param = qp_state_table[cur_state][next_state].req_param[type];
opt_param = qp_state_table[cur_state][next_state].opt_param[type];
+ if (ll == IB_LINK_LAYER_ETHERNET) {
+ req_param |= qp_state_table[cur_state][next_state].
+ req_param_add_eth[type];
+ opt_param |= qp_state_table[cur_state][next_state].
+ opt_param_add_eth[type];
+ }
+
if ((mask & req_param) != req_param)
return 0;
@@ -808,10 +864,51 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
}
EXPORT_SYMBOL(ib_modify_qp_is_ok);
+int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
+ struct ib_qp_attr *qp_attr, int *qp_attr_mask)
+{
+ int ret = 0;
+ union ib_gid sgid;
+
+ if ((*qp_attr_mask & IB_QP_AV) &&
+ (rdma_port_get_link_layer(qp->device, qp_attr->ah_attr.port_num) == IB_LINK_LAYER_ETHERNET)) {
+ ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num,
+ qp_attr->ah_attr.grh.sgid_index, &sgid);
+ if (ret)
+ goto out;
+ if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
+ rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, qp_attr->ah_attr.dmac);
+ rdma_get_ll_mac((struct in6_addr *)sgid.raw, qp_attr->smac);
+ qp_attr->vlan_id = rdma_get_vlan_id(&sgid);
+ } else {
+ ret = rdma_addr_find_dmac_by_grh(&sgid, &qp_attr->ah_attr.grh.dgid,
+ qp_attr->ah_attr.dmac, &qp_attr->vlan_id);
+ if (ret)
+ goto out;
+ ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr->smac, NULL);
+ if (ret)
+ goto out;
+ }
+ *qp_attr_mask |= IB_QP_SMAC;
+ if (qp_attr->vlan_id < 0xFFFF)
+ *qp_attr_mask |= IB_QP_VID;
+ }
+out:
+ return ret;
+}
+EXPORT_SYMBOL(ib_resolve_eth_l2_attrs);
+
+
int ib_modify_qp(struct ib_qp *qp,
struct ib_qp_attr *qp_attr,
int qp_attr_mask)
{
+ int ret;
+
+ ret = ib_resolve_eth_l2_attrs(qp, qp_attr, &qp_attr_mask);
+ if (ret)
+ return ret;
+
return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
}
EXPORT_SYMBOL(ib_modify_qp);
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index d53cf519f42a..00400c352c1a 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -1082,6 +1082,7 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
/* Initialize network device */
if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
+ ret = -ENOMEM;
iounmap(mmio_regs);
goto bail4;
}
@@ -1151,7 +1152,8 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
goto bail10;
}
- if (c2_register_device(c2dev))
+ ret = c2_register_device(c2dev);
+ if (ret)
goto bail10;
return 0;
diff --git a/drivers/infiniband/hw/amso1100/c2_intr.c b/drivers/infiniband/hw/amso1100/c2_intr.c
index 8951db4ae29d..3a17d9b36dba 100644
--- a/drivers/infiniband/hw/amso1100/c2_intr.c
+++ b/drivers/infiniband/hw/amso1100/c2_intr.c
@@ -169,7 +169,8 @@ static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
* We should never get here, as the adapter should
* never send us a reply that we're not expecting.
*/
- vq_repbuf_free(c2dev, host_msg);
+ if (reply_msg != NULL)
+ vq_repbuf_free(c2dev, host_msg);
pr_debug("handle_vq: UNEXPECTEDLY got NULL req\n");
return;
}
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index b7c986990053..d2a6d961344b 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -576,7 +576,8 @@ int c2_rnic_init(struct c2_dev *c2dev)
goto bail4;
/* Initialize cached the adapter limits */
- if (c2_rnic_query(c2dev, &c2dev->props))
+ err = c2_rnic_query(c2dev, &c2dev->props);
+ if (err)
goto bail5;
/* Initialize the PD pool */
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 12fef76c791c..d286bdebe2ab 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -524,50 +524,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
-#define VLAN_NONE 0xfff
-#define FILTER_SEL_VLAN_NONE 0xffff
-#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
-#define FILTER_SEL_WIDTH_VIN_P_FC \
- (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
-#define FILTER_SEL_WIDTH_TAG_P_FC \
- (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
-#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
-
-static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
- struct l2t_entry *l2t)
-{
- unsigned int ntuple = 0;
- u32 viid;
-
- switch (dev->rdev.lldi.filt_mode) {
-
- /* default filter mode */
- case HW_TPL_FR_MT_PR_IV_P_FC:
- if (l2t->vlan == VLAN_NONE)
- ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
- else {
- ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
- ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC;
- }
- ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
- FILTER_SEL_WIDTH_VLD_TAG_P_FC;
- break;
- case HW_TPL_FR_MT_PR_OV_P_FC: {
- viid = cxgb4_port_viid(l2t->neigh->dev);
-
- ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
- ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
- ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
- ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
- FILTER_SEL_WIDTH_VLD_TAG_P_FC;
- break;
- }
- default:
- break;
- }
- return ntuple;
-}
-
static int send_connect(struct c4iw_ep *ep)
{
struct cpl_act_open_req *req;
@@ -641,8 +597,9 @@ static int send_connect(struct c4iw_ep *ep)
req->local_ip = la->sin_addr.s_addr;
req->peer_ip = ra->sin_addr.s_addr;
req->opt0 = cpu_to_be64(opt0);
- req->params = cpu_to_be32(select_ntuple(ep->com.dev,
- ep->dst, ep->l2t));
+ req->params = cpu_to_be32(cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
+ ep->l2t));
req->opt2 = cpu_to_be32(opt2);
} else {
req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
@@ -662,9 +619,9 @@ static int send_connect(struct c4iw_ep *ep)
req6->peer_ip_lo = *((__be64 *)
(ra6->sin6_addr.s6_addr + 8));
req6->opt0 = cpu_to_be64(opt0);
- req6->params = cpu_to_be32(
- select_ntuple(ep->com.dev, ep->dst,
- ep->l2t));
+ req6->params = cpu_to_be32(cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
+ ep->l2t));
req6->opt2 = cpu_to_be32(opt2);
}
} else {
@@ -681,8 +638,9 @@ static int send_connect(struct c4iw_ep *ep)
t5_req->peer_ip = ra->sin_addr.s_addr;
t5_req->opt0 = cpu_to_be64(opt0);
t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
- select_ntuple(ep->com.dev,
- ep->dst, ep->l2t)));
+ cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
+ ep->l2t)));
t5_req->opt2 = cpu_to_be32(opt2);
} else {
t5_req6 = (struct cpl_t5_act_open_req6 *)
@@ -703,7 +661,9 @@ static int send_connect(struct c4iw_ep *ep)
(ra6->sin6_addr.s6_addr + 8));
t5_req6->opt0 = cpu_to_be64(opt0);
t5_req6->params = (__force __be64)cpu_to_be32(
- select_ntuple(ep->com.dev, ep->dst, ep->l2t));
+ cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
+ ep->l2t));
t5_req6->opt2 = cpu_to_be32(opt2);
}
}
@@ -1630,7 +1590,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
memset(req, 0, sizeof(*req));
req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
- req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst,
+ req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
sin = (struct sockaddr_in *)&ep->com.local_addr;
req->le.lport = sin->sin_port;
@@ -2938,7 +2899,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
/*
* Allocate a server TID.
*/
- if (dev->rdev.lldi.enable_fw_ofld_conn)
+ if (dev->rdev.lldi.enable_fw_ofld_conn &&
+ ep->com.local_addr.ss_family == AF_INET)
ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
cm_id->local_addr.ss_family, ep);
else
@@ -3323,9 +3285,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
/*
* Calculate the server tid from filter hit index from cpl_rx_pkt.
*/
- stid = (__force int) cpu_to_be32((__force u32) rss->hash_val)
- - dev->rdev.lldi.tids->sftid_base
- + dev->rdev.lldi.tids->nstids;
+ stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
if (!lep) {
@@ -3392,12 +3352,15 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
goto free_dst;
}
+ neigh_release(neigh);
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
window = (__force u16) htons((__force u16)tcph->window);
/* Calcuate filter portion for LE region. */
- filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e));
+ filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
+ dev->rdev.lldi.ports[0],
+ e));
/*
* Synthesize the cpl_pass_accept_req. We have everything except the
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 4cb8eb24497c..41b11951a30a 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -76,7 +76,7 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
INIT_ULPTX_WR(req, wr_len, 0, 0);
req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
(wait ? FW_WR_COMPL(1) : 0));
- req->wr.wr_lo = wait ? (__force __be64)&wr_wait : 0;
+ req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
@@ -173,7 +173,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
return ret;
}
-int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
+static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
{
u32 remain = len;
u32 dmalen;
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 00d6861a6a18..2e89356c46fa 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -1329,7 +1329,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
if (!smi_reset2init &&
!ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
- attr_mask)) {
+ attr_mask, IB_LINK_LAYER_UNSPECIFIED)) {
ret = -EINVAL;
ehca_err(ibqp->device,
"Invalid qp transition new_state=%x cur_state=%x "
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 0857a9c3cd3d..face87602dc1 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -463,7 +463,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
- attr_mask))
+ attr_mask, IB_LINK_LAYER_UNSPECIFIED))
goto inval;
if (attr_mask & IB_QP_AV) {
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
index 24ab11a9ad1e..fc01deac1d3c 100644
--- a/drivers/infiniband/hw/mlx4/Kconfig
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -1,6 +1,6 @@
config MLX4_INFINIBAND
tristate "Mellanox ConnectX HCA support"
- depends on NETDEVICES && ETHERNET && PCI
+ depends on NETDEVICES && ETHERNET && PCI && INET
select NET_VENDOR_MELLANOX
select MLX4_CORE
---help---
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index a251becdaa98..170dca608042 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -39,25 +39,6 @@
#include "mlx4_ib.h"
-int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
- u8 *mac, int *is_mcast, u8 port)
-{
- struct in6_addr in6;
-
- *is_mcast = 0;
-
- memcpy(&in6, ah_attr->grh.dgid.raw, sizeof in6);
- if (rdma_link_local_addr(&in6))
- rdma_get_ll_mac(&in6, mac);
- else if (rdma_is_multicast_addr(&in6)) {
- rdma_get_mcast_mac(&in6, mac);
- *is_mcast = 1;
- } else
- return -EINVAL;
-
- return 0;
-}
-
static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
struct mlx4_ib_ah *ah)
{
@@ -92,21 +73,18 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
{
struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
struct mlx4_dev *dev = ibdev->dev;
- union ib_gid sgid;
- u8 mac[6];
- int err;
int is_mcast;
+ struct in6_addr in6;
u16 vlan_tag;
- err = mlx4_ib_resolve_grh(ibdev, ah_attr, mac, &is_mcast, ah_attr->port_num);
- if (err)
- return ERR_PTR(err);
-
- memcpy(ah->av.eth.mac, mac, 6);
- err = ib_get_cached_gid(pd->device, ah_attr->port_num, ah_attr->grh.sgid_index, &sgid);
- if (err)
- return ERR_PTR(err);
- vlan_tag = rdma_get_vlan_id(&sgid);
+ memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
+ if (rdma_is_multicast_addr(&in6)) {
+ is_mcast = 1;
+ rdma_get_mcast_mac(&in6, ah->av.eth.mac);
+ } else {
+ memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN);
+ }
+ vlan_tag = ah_attr->vlan_id;
if (vlan_tag < 0x1000)
vlan_tag |= (ah_attr->sl & 7) << 13;
ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 66dbf8062374..cc40f08ca8f1 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -798,6 +798,15 @@ repoll:
wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
else
wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
+ if (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_VLAN_PRESENT_MASK) {
+ wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
+ MLX4_CQE_VID_MASK;
+ } else {
+ wc->vlan_id = 0xffff;
+ }
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ memcpy(wc->smac, cqe->smac, ETH_ALEN);
+ wc->wc_flags |= IB_WC_WITH_SMAC;
}
return 0;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1958c5ca792a..e81c5547e647 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -39,6 +39,8 @@
#include <linux/inetdevice.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
+#include <net/ipv6.h>
+#include <net/addrconf.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
@@ -55,6 +57,7 @@
#define DRV_RELDATE "April 4, 2008"
#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
+#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
@@ -92,21 +95,27 @@ static union ib_gid zgid;
static int check_flow_steering_support(struct mlx4_dev *dev)
{
+ int eth_num_ports = 0;
int ib_num_ports = 0;
- int i;
-
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
- ib_num_ports++;
- if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
- if (ib_num_ports || mlx4_is_mfunc(dev)) {
- pr_warn("Device managed flow steering is unavailable "
- "for IB ports or in multifunction env.\n");
- return 0;
+ int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
+
+ if (dmfs) {
+ int i;
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
+ eth_num_ports++;
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ ib_num_ports++;
+ dmfs &= (!ib_num_ports ||
+ (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
+ (!eth_num_ports ||
+ (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
+ if (ib_num_ports && mlx4_is_mfunc(dev)) {
+ pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
+ dmfs = 0;
}
- return 1;
}
- return 0;
+ return dmfs;
}
static int mlx4_ib_query_device(struct ib_device *ibdev,
@@ -165,7 +174,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
else
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
- if (check_flow_steering_support(dev->dev))
+ if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
}
@@ -338,7 +347,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
IB_WIDTH_4X : IB_WIDTH_1X;
props->active_speed = IB_SPEED_QDR;
- props->port_cap_flags = IB_PORT_CM_SUP;
+ props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
props->max_msg_sz = mdev->dev->caps.max_msg_sz;
props->pkey_tbl_len = 1;
@@ -787,7 +796,6 @@ static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
union ib_gid *gid)
{
- u8 mac[6];
struct net_device *ndev;
int ret = 0;
@@ -801,11 +809,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
spin_unlock(&mdev->iboe.lock);
if (ndev) {
- rdma_get_mcast_mac((struct in6_addr *)gid, mac);
- rtnl_lock();
- dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac);
ret = 1;
- rtnl_unlock();
dev_put(ndev);
}
@@ -819,6 +823,7 @@ struct mlx4_ib_steering {
};
static int parse_flow_attr(struct mlx4_dev *dev,
+ u32 qp_num,
union ib_flow_spec *ib_spec,
struct _rule_hw *mlx4_spec)
{
@@ -834,6 +839,14 @@ static int parse_flow_attr(struct mlx4_dev *dev,
mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
break;
+ case IB_FLOW_SPEC_IB:
+ type = MLX4_NET_TRANS_RULE_ID_IB;
+ mlx4_spec->ib.l3_qpn =
+ cpu_to_be32(qp_num);
+ mlx4_spec->ib.qpn_mask =
+ cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
+ break;
+
case IB_FLOW_SPEC_IPV4:
type = MLX4_NET_TRANS_RULE_ID_IPV4;
@@ -865,6 +878,115 @@ static int parse_flow_attr(struct mlx4_dev *dev,
return mlx4_hw_rule_sz(dev, type);
}
+struct default_rules {
+ __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
+ __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
+ __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
+ __u8 link_layer;
+};
+static const struct default_rules default_table[] = {
+ {
+ .mandatory_fields = {IB_FLOW_SPEC_IPV4},
+ .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
+ .rules_create_list = {IB_FLOW_SPEC_IB},
+ .link_layer = IB_LINK_LAYER_INFINIBAND
+ }
+};
+
+static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
+ struct ib_flow_attr *flow_attr)
+{
+ int i, j, k;
+ void *ib_flow;
+ const struct default_rules *pdefault_rules = default_table;
+ u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
+
+ for (i = 0; i < sizeof(default_table)/sizeof(default_table[0]); i++,
+ pdefault_rules++) {
+ __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
+ memset(&field_types, 0, sizeof(field_types));
+
+ if (link_layer != pdefault_rules->link_layer)
+ continue;
+
+ ib_flow = flow_attr + 1;
+ /* we assume the specs are sorted */
+ for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
+ j < flow_attr->num_of_specs; k++) {
+ union ib_flow_spec *current_flow =
+ (union ib_flow_spec *)ib_flow;
+
+ /* same layer but different type */
+ if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
+ (pdefault_rules->mandatory_fields[k] &
+ IB_FLOW_SPEC_LAYER_MASK)) &&
+ (current_flow->type !=
+ pdefault_rules->mandatory_fields[k]))
+ goto out;
+
+ /* same layer, try match next one */
+ if (current_flow->type ==
+ pdefault_rules->mandatory_fields[k]) {
+ j++;
+ ib_flow +=
+ ((union ib_flow_spec *)ib_flow)->size;
+ }
+ }
+
+ ib_flow = flow_attr + 1;
+ for (j = 0; j < flow_attr->num_of_specs;
+ j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
+ for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
+ /* same layer and same type */
+ if (((union ib_flow_spec *)ib_flow)->type ==
+ pdefault_rules->mandatory_not_fields[k])
+ goto out;
+
+ return i;
+ }
+out:
+ return -1;
+}
+
+static int __mlx4_ib_create_default_rules(
+ struct mlx4_ib_dev *mdev,
+ struct ib_qp *qp,
+ const struct default_rules *pdefault_rules,
+ struct _rule_hw *mlx4_spec) {
+ int size = 0;
+ int i;
+
+ for (i = 0; i < sizeof(pdefault_rules->rules_create_list)/
+ sizeof(pdefault_rules->rules_create_list[0]); i++) {
+ int ret;
+ union ib_flow_spec ib_spec;
+ switch (pdefault_rules->rules_create_list[i]) {
+ case 0:
+ /* no rule */
+ continue;
+ case IB_FLOW_SPEC_IB:
+ ib_spec.type = IB_FLOW_SPEC_IB;
+ ib_spec.size = sizeof(struct ib_flow_spec_ib);
+
+ break;
+ default:
+ /* invalid rule */
+ return -EINVAL;
+ }
+ /* We must put empty rule, qpn is being ignored */
+ ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
+ mlx4_spec);
+ if (ret < 0) {
+ pr_info("invalid parsing\n");
+ return -EINVAL;
+ }
+
+ mlx4_spec = (void *)mlx4_spec + ret;
+ size += ret;
+ }
+ return size;
+}
+
static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
int domain,
enum mlx4_net_trans_promisc_mode flow_type,
@@ -876,6 +998,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
struct mlx4_ib_dev *mdev = to_mdev(qp->device);
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_net_trans_rule_hw_ctrl *ctrl;
+ int default_flow;
static const u16 __mlx4_domain[] = {
[IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
@@ -910,8 +1033,21 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
ib_flow = flow_attr + 1;
size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
+ /* Add default flows */
+ default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
+ if (default_flow >= 0) {
+ ret = __mlx4_ib_create_default_rules(
+ mdev, qp, default_table + default_flow,
+ mailbox->buf + size);
+ if (ret < 0) {
+ mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+ return -EINVAL;
+ }
+ size += ret;
+ }
for (i = 0; i < flow_attr->num_of_specs; i++) {
- ret = parse_flow_attr(mdev->dev, ib_flow, mailbox->buf + size);
+ ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
+ mailbox->buf + size);
if (ret < 0) {
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return -EINVAL;
@@ -1025,6 +1161,8 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
struct mlx4_ib_qp *mqp = to_mqp(ibqp);
u64 reg_id;
struct mlx4_ib_steering *ib_steering = NULL;
+ enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
+ MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -1036,7 +1174,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
!!(mqp->flags &
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
- MLX4_PROT_IB_IPV6, &reg_id);
+ prot, &reg_id);
if (err)
goto err_malloc;
@@ -1055,7 +1193,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err_add:
mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
- MLX4_PROT_IB_IPV6, reg_id);
+ prot, reg_id);
err_malloc:
kfree(ib_steering);
@@ -1083,10 +1221,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int err;
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
struct mlx4_ib_qp *mqp = to_mqp(ibqp);
- u8 mac[6];
struct net_device *ndev;
struct mlx4_ib_gid_entry *ge;
u64 reg_id = 0;
+ enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
+ MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -1109,7 +1248,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
}
err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
- MLX4_PROT_IB_IPV6, reg_id);
+ prot, reg_id);
if (err)
return err;
@@ -1121,13 +1260,8 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if (ndev)
dev_hold(ndev);
spin_unlock(&mdev->iboe.lock);
- rdma_get_mcast_mac((struct in6_addr *)gid, mac);
- if (ndev) {
- rtnl_lock();
- dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac);
- rtnl_unlock();
+ if (ndev)
dev_put(ndev);
- }
list_del(&ge->list);
kfree(ge);
} else
@@ -1223,7 +1357,8 @@ static struct device_attribute *mlx4_class_attributes[] = {
&dev_attr_board_id
};
-static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
+static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
+ struct net_device *dev)
{
memcpy(eui, dev->dev_addr, 3);
memcpy(eui + 5, dev->dev_addr + 3, 3);
@@ -1259,161 +1394,377 @@ static void update_gids_task(struct work_struct *work)
MLX4_CMD_WRAPPED);
if (err)
pr_warn("set port command failed\n");
- else {
- memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
+ else
mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ kfree(gw);
+}
+
+static void reset_gids_task(struct work_struct *work)
+{
+ struct update_gid_work *gw =
+ container_of(work, struct update_gid_work, work);
+ struct mlx4_cmd_mailbox *mailbox;
+ union ib_gid *gids;
+ int err;
+ struct mlx4_dev *dev = gw->dev->dev;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ pr_warn("reset gid table failed\n");
+ goto free;
+ }
+
+ gids = mailbox->buf;
+ memcpy(gids, gw->gids, sizeof(gw->gids));
+
+ if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
+ IB_LINK_LAYER_ETHERNET) {
+ err = mlx4_cmd(dev, mailbox->dma,
+ MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
+ 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ pr_warn(KERN_WARNING
+ "set port %d command failed\n", gw->port);
}
mlx4_free_cmd_mailbox(dev, mailbox);
+free:
kfree(gw);
}
-static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
+static int update_gid_table(struct mlx4_ib_dev *dev, int port,
+ union ib_gid *gid, int clear,
+ int default_gid)
{
- struct net_device *ndev = dev->iboe.netdevs[port - 1];
struct update_gid_work *work;
- struct net_device *tmp;
int i;
- u8 *hits;
- int ret;
- union ib_gid gid;
- int free;
- int found;
int need_update = 0;
- u16 vid;
-
- work = kzalloc(sizeof *work, GFP_ATOMIC);
- if (!work)
- return -ENOMEM;
-
- hits = kzalloc(128, GFP_ATOMIC);
- if (!hits) {
- ret = -ENOMEM;
- goto out;
- }
+ int free = -1;
+ int found = -1;
+ int max_gids;
- rcu_read_lock();
- for_each_netdev_rcu(&init_net, tmp) {
- if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
- gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
- vid = rdma_vlan_dev_vlan_id(tmp);
- mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
- found = 0;
- free = -1;
- for (i = 0; i < 128; ++i) {
- if (free < 0 &&
- !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
- free = i;
- if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
- hits[i] = 1;
- found = 1;
+ if (default_gid) {
+ free = 0;
+ } else {
+ max_gids = dev->dev->caps.gid_table_len[port];
+ for (i = 1; i < max_gids; ++i) {
+ if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
+ sizeof(*gid)))
+ found = i;
+
+ if (clear) {
+ if (found >= 0) {
+ need_update = 1;
+ dev->iboe.gid_table[port - 1][found] =
+ zgid;
break;
}
- }
+ } else {
+ if (found >= 0)
+ break;
- if (!found) {
- if (tmp == ndev &&
- (memcmp(&dev->iboe.gid_table[port - 1][0],
- &gid, sizeof gid) ||
- !memcmp(&dev->iboe.gid_table[port - 1][0],
- &zgid, sizeof gid))) {
- dev->iboe.gid_table[port - 1][0] = gid;
- ++need_update;
- hits[0] = 1;
- } else if (free >= 0) {
- dev->iboe.gid_table[port - 1][free] = gid;
- hits[free] = 1;
- ++need_update;
- }
+ if (free < 0 &&
+ !memcmp(&dev->iboe.gid_table[port - 1][i],
+ &zgid, sizeof(*gid)))
+ free = i;
}
}
}
- rcu_read_unlock();
- for (i = 0; i < 128; ++i)
- if (!hits[i]) {
- if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
- ++need_update;
- dev->iboe.gid_table[port - 1][i] = zgid;
- }
+ if (found == -1 && !clear && free >= 0) {
+ dev->iboe.gid_table[port - 1][free] = *gid;
+ need_update = 1;
+ }
- if (need_update) {
- memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids);
- INIT_WORK(&work->work, update_gids_task);
- work->port = port;
- work->dev = dev;
- queue_work(wq, &work->work);
- } else
- kfree(work);
+ if (!need_update)
+ return 0;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM;
+
+ memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids));
+ INIT_WORK(&work->work, update_gids_task);
+ work->port = port;
+ work->dev = dev;
+ queue_work(wq, &work->work);
- kfree(hits);
return 0;
+}
-out:
- kfree(work);
- return ret;
+static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid)
+{
+ gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
}
-static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
+
+static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port)
{
- switch (event) {
- case NETDEV_UP:
- case NETDEV_CHANGEADDR:
- update_ipv6_gids(dev, port, 0);
- break;
+ struct update_gid_work *work;
- case NETDEV_DOWN:
- update_ipv6_gids(dev, port, 1);
- dev->iboe.netdevs[port - 1] = NULL;
- }
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM;
+
+ memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids));
+ memset(work->gids, 0, sizeof(work->gids));
+ INIT_WORK(&work->work, reset_gids_task);
+ work->dev = dev;
+ work->port = port;
+ queue_work(wq, &work->work);
+ return 0;
}
-static void netdev_added(struct mlx4_ib_dev *dev, int port)
+static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
+ struct mlx4_ib_dev *ibdev, union ib_gid *gid)
{
- update_ipv6_gids(dev, port, 0);
+ struct mlx4_ib_iboe *iboe;
+ int port = 0;
+ struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
+ rdma_vlan_dev_real_dev(event_netdev) :
+ event_netdev;
+ union ib_gid default_gid;
+
+ mlx4_make_default_gid(real_dev, &default_gid);
+
+ if (!memcmp(gid, &default_gid, sizeof(*gid)))
+ return 0;
+
+ if (event != NETDEV_DOWN && event != NETDEV_UP)
+ return 0;
+
+ if ((real_dev != event_netdev) &&
+ (event == NETDEV_DOWN) &&
+ rdma_link_local_addr((struct in6_addr *)gid))
+ return 0;
+
+ iboe = &ibdev->iboe;
+ spin_lock(&iboe->lock);
+
+ for (port = 1; port <= MLX4_MAX_PORTS; ++port)
+ if ((netif_is_bond_master(real_dev) &&
+ (real_dev == iboe->masters[port - 1])) ||
+ (!netif_is_bond_master(real_dev) &&
+ (real_dev == iboe->netdevs[port - 1])))
+ update_gid_table(ibdev, port, gid,
+ event == NETDEV_DOWN, 0);
+
+ spin_unlock(&iboe->lock);
+ return 0;
+
}
-static void netdev_removed(struct mlx4_ib_dev *dev, int port)
+static u8 mlx4_ib_get_dev_port(struct net_device *dev,
+ struct mlx4_ib_dev *ibdev)
{
- update_ipv6_gids(dev, port, 1);
+ u8 port = 0;
+ struct mlx4_ib_iboe *iboe;
+ struct net_device *real_dev = rdma_vlan_dev_real_dev(dev) ?
+ rdma_vlan_dev_real_dev(dev) : dev;
+
+ iboe = &ibdev->iboe;
+
+ for (port = 1; port <= MLX4_MAX_PORTS; ++port)
+ if ((netif_is_bond_master(real_dev) &&
+ (real_dev == iboe->masters[port - 1])) ||
+ (!netif_is_bond_master(real_dev) &&
+ (real_dev == iboe->netdevs[port - 1])))
+ break;
+
+ if ((port == 0) || (port > MLX4_MAX_PORTS))
+ return 0;
+ else
+ return port;
}
-static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
+static int mlx4_ib_inet_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct mlx4_ib_dev *ibdev;
+ struct in_ifaddr *ifa = ptr;
+ union ib_gid gid;
+ struct net_device *event_netdev = ifa->ifa_dev->dev;
+
+ ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
+
+ ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet);
+
+ mlx4_ib_addr_event(event, event_netdev, ibdev, &gid);
+ return NOTIFY_DONE;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct mlx4_ib_dev *ibdev;
- struct net_device *oldnd;
+ struct inet6_ifaddr *ifa = ptr;
+ union ib_gid *gid = (union ib_gid *)&ifa->addr;
+ struct net_device *event_netdev = ifa->idev->dev;
+
+ ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet6);
+
+ mlx4_ib_addr_event(event, event_netdev, ibdev, gid);
+ return NOTIFY_DONE;
+}
+#endif
+
+static void mlx4_ib_get_dev_addr(struct net_device *dev,
+ struct mlx4_ib_dev *ibdev, u8 port)
+{
+ struct in_device *in_dev;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_dev *in6_dev;
+ union ib_gid *pgid;
+ struct inet6_ifaddr *ifp;
+#endif
+ union ib_gid gid;
+
+
+ if ((port == 0) || (port > MLX4_MAX_PORTS))
+ return;
+
+ /* IPv4 gids */
+ in_dev = in_dev_get(dev);
+ if (in_dev) {
+ for_ifa(in_dev) {
+ /*ifa->ifa_address;*/
+ ipv6_addr_set_v4mapped(ifa->ifa_address,
+ (struct in6_addr *)&gid);
+ update_gid_table(ibdev, port, &gid, 0, 0);
+ }
+ endfor_ifa(in_dev);
+ in_dev_put(in_dev);
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ /* IPv6 gids */
+ in6_dev = in6_dev_get(dev);
+ if (in6_dev) {
+ read_lock_bh(&in6_dev->lock);
+ list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
+ pgid = (union ib_gid *)&ifp->addr;
+ update_gid_table(ibdev, port, pgid, 0, 0);
+ }
+ read_unlock_bh(&in6_dev->lock);
+ in6_dev_put(in6_dev);
+ }
+#endif
+}
+
+static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev,
+ struct net_device *dev, u8 port)
+{
+ union ib_gid gid;
+ mlx4_make_default_gid(dev, &gid);
+ update_gid_table(ibdev, port, &gid, 0, 1);
+}
+
+static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
+{
+ struct net_device *dev;
+ struct mlx4_ib_iboe *iboe = &ibdev->iboe;
+ int i;
+
+ for (i = 1; i <= ibdev->num_ports; ++i)
+ if (reset_gid_table(ibdev, i))
+ return -1;
+
+ read_lock(&dev_base_lock);
+ spin_lock(&iboe->lock);
+
+ for_each_netdev(&init_net, dev) {
+ u8 port = mlx4_ib_get_dev_port(dev, ibdev);
+ if (port)
+ mlx4_ib_get_dev_addr(dev, ibdev, port);
+ }
+
+ spin_unlock(&iboe->lock);
+ read_unlock(&dev_base_lock);
+
+ return 0;
+}
+
+static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
+{
struct mlx4_ib_iboe *iboe;
int port;
- if (!net_eq(dev_net(dev), &init_net))
- return NOTIFY_DONE;
-
- ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
iboe = &ibdev->iboe;
spin_lock(&iboe->lock);
mlx4_foreach_ib_transport_port(port, ibdev->dev) {
- oldnd = iboe->netdevs[port - 1];
+ enum ib_port_state port_state = IB_PORT_NOP;
+ struct net_device *old_master = iboe->masters[port - 1];
+ struct net_device *curr_netdev;
+ struct net_device *curr_master;
+
iboe->netdevs[port - 1] =
mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
- if (oldnd != iboe->netdevs[port - 1]) {
- if (iboe->netdevs[port - 1])
- netdev_added(ibdev, port);
- else
- netdev_removed(ibdev, port);
+ if (iboe->netdevs[port - 1])
+ mlx4_ib_set_default_gid(ibdev,
+ iboe->netdevs[port - 1], port);
+ curr_netdev = iboe->netdevs[port - 1];
+
+ if (iboe->netdevs[port - 1] &&
+ netif_is_bond_slave(iboe->netdevs[port - 1])) {
+ iboe->masters[port - 1] = netdev_master_upper_dev_get(
+ iboe->netdevs[port - 1]);
+ } else {
+ iboe->masters[port - 1] = NULL;
+ }
+ curr_master = iboe->masters[port - 1];
+
+ if (curr_netdev) {
+ port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ } else {
+ reset_gid_table(ibdev, port);
+ }
+ /* if using bonding/team and a slave port is down, we don't the bond IP
+ * based gids in the table since flows that select port by gid may get
+ * the down port.
+ */
+ if (curr_master && (port_state == IB_PORT_DOWN)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ }
+ /* if bonding is used it is possible that we add it to masters
+ * only after IP address is assigned to the net bonding
+ * interface.
+ */
+ if (curr_master && (old_master != curr_master)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ mlx4_ib_get_dev_addr(curr_master, ibdev, port);
}
- }
- if (dev == iboe->netdevs[0] ||
- (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0]))
- handle_en_event(ibdev, 1, event);
- else if (dev == iboe->netdevs[1]
- || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1]))
- handle_en_event(ibdev, 2, event);
+ if (!curr_master && (old_master != curr_master)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
+ }
+ }
spin_unlock(&iboe->lock);
+}
+
+static int mlx4_ib_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct mlx4_ib_dev *ibdev;
+
+ if (!net_eq(dev_net(dev), &init_net))
+ return NOTIFY_DONE;
+
+ ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
+ mlx4_ib_scan_netdevs(ibdev);
return NOTIFY_DONE;
}
@@ -1533,6 +1884,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
int i, j;
int err;
struct mlx4_ib_iboe *iboe;
+ int ib_num_ports = 0;
pr_info_once("%s", mlx4_ib_version);
@@ -1682,6 +2034,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
}
if (check_flow_steering_support(dev)) {
+ ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
@@ -1707,11 +2060,42 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->counters[i] = -1;
}
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ ib_num_ports++;
+
spin_lock_init(&ibdev->sm_lock);
mutex_init(&ibdev->cap_mask_mutex);
+ if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
+ ib_num_ports) {
+ ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
+ err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
+ MLX4_IB_UC_STEER_QPN_ALIGN,
+ &ibdev->steer_qpn_base);
+ if (err)
+ goto err_counter;
+
+ ibdev->ib_uc_qpns_bitmap =
+ kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
+ sizeof(long),
+ GFP_KERNEL);
+ if (!ibdev->ib_uc_qpns_bitmap) {
+ dev_err(&dev->pdev->dev, "bit map alloc failed\n");
+ goto err_steer_qp_release;
+ }
+
+ bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
+
+ err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
+ dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_base +
+ ibdev->steer_qpn_count - 1);
+ if (err)
+ goto err_steer_free_bitmap;
+ }
+
if (ib_register_device(&ibdev->ib_dev, NULL))
- goto err_counter;
+ goto err_steer_free_bitmap;
if (mlx4_ib_mad_init(ibdev))
goto err_reg;
@@ -1719,11 +2103,39 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (mlx4_ib_init_sriov(ibdev))
goto err_mad;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
- iboe->nb.notifier_call = mlx4_ib_netdev_event;
- err = register_netdevice_notifier(&iboe->nb);
- if (err)
- goto err_sriov;
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) {
+ if (!iboe->nb.notifier_call) {
+ iboe->nb.notifier_call = mlx4_ib_netdev_event;
+ err = register_netdevice_notifier(&iboe->nb);
+ if (err) {
+ iboe->nb.notifier_call = NULL;
+ goto err_notif;
+ }
+ }
+ if (!iboe->nb_inet.notifier_call) {
+ iboe->nb_inet.notifier_call = mlx4_ib_inet_event;
+ err = register_inetaddr_notifier(&iboe->nb_inet);
+ if (err) {
+ iboe->nb_inet.notifier_call = NULL;
+ goto err_notif;
+ }
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ if (!iboe->nb_inet6.notifier_call) {
+ iboe->nb_inet6.notifier_call = mlx4_ib_inet6_event;
+ err = register_inet6addr_notifier(&iboe->nb_inet6);
+ if (err) {
+ iboe->nb_inet6.notifier_call = NULL;
+ goto err_notif;
+ }
+ }
+#endif
+ for (i = 1 ; i <= ibdev->num_ports ; ++i)
+ reset_gid_table(ibdev, i);
+ rtnl_lock();
+ mlx4_ib_scan_netdevs(ibdev);
+ rtnl_unlock();
+ mlx4_ib_init_gid_table(ibdev);
}
for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
@@ -1749,11 +2161,25 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
return ibdev;
err_notif:
- if (unregister_netdevice_notifier(&ibdev->iboe.nb))
- pr_warn("failure unregistering notifier\n");
+ if (ibdev->iboe.nb.notifier_call) {
+ if (unregister_netdevice_notifier(&ibdev->iboe.nb))
+ pr_warn("failure unregistering notifier\n");
+ ibdev->iboe.nb.notifier_call = NULL;
+ }
+ if (ibdev->iboe.nb_inet.notifier_call) {
+ if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
+ pr_warn("failure unregistering notifier\n");
+ ibdev->iboe.nb_inet.notifier_call = NULL;
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ibdev->iboe.nb_inet6.notifier_call) {
+ if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
+ pr_warn("failure unregistering notifier\n");
+ ibdev->iboe.nb_inet6.notifier_call = NULL;
+ }
+#endif
flush_workqueue(wq);
-err_sriov:
mlx4_ib_close_sriov(ibdev);
err_mad:
@@ -1762,6 +2188,13 @@ err_mad:
err_reg:
ib_unregister_device(&ibdev->ib_dev);
+err_steer_free_bitmap:
+ kfree(ibdev->ib_uc_qpns_bitmap);
+
+err_steer_qp_release:
+ if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
+ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_count);
err_counter:
for (; i; --i)
if (ibdev->counters[i - 1] != -1)
@@ -1782,6 +2215,69 @@ err_dealloc:
return NULL;
}
+int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
+{
+ int offset;
+
+ WARN_ON(!dev->ib_uc_qpns_bitmap);
+
+ offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
+ dev->steer_qpn_count,
+ get_count_order(count));
+ if (offset < 0)
+ return offset;
+
+ *qpn = dev->steer_qpn_base + offset;
+ return 0;
+}
+
+void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
+{
+ if (!qpn ||
+ dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return;
+
+ BUG_ON(qpn < dev->steer_qpn_base);
+
+ bitmap_release_region(dev->ib_uc_qpns_bitmap,
+ qpn - dev->steer_qpn_base,
+ get_count_order(count));
+}
+
+int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
+ int is_attach)
+{
+ int err;
+ size_t flow_size;
+ struct ib_flow_attr *flow = NULL;
+ struct ib_flow_spec_ib *ib_spec;
+
+ if (is_attach) {
+ flow_size = sizeof(struct ib_flow_attr) +
+ sizeof(struct ib_flow_spec_ib);
+ flow = kzalloc(flow_size, GFP_KERNEL);
+ if (!flow)
+ return -ENOMEM;
+ flow->port = mqp->port;
+ flow->num_of_specs = 1;
+ flow->size = flow_size;
+ ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
+ ib_spec->type = IB_FLOW_SPEC_IB;
+ ib_spec->size = sizeof(struct ib_flow_spec_ib);
+ /* Add an empty rule for IB L2 */
+ memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
+
+ err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
+ IB_FLOW_DOMAIN_NIC,
+ MLX4_FS_REGULAR,
+ &mqp->reg_id);
+ } else {
+ err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
+ }
+ kfree(flow);
+ return err;
+}
+
static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
{
struct mlx4_ib_dev *ibdev = ibdev_ptr;
@@ -1795,6 +2291,26 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
pr_warn("failure unregistering notifier\n");
ibdev->iboe.nb.notifier_call = NULL;
}
+
+ if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_count);
+ kfree(ibdev->ib_uc_qpns_bitmap);
+ }
+
+ if (ibdev->iboe.nb_inet.notifier_call) {
+ if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
+ pr_warn("failure unregistering notifier\n");
+ ibdev->iboe.nb_inet.notifier_call = NULL;
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ibdev->iboe.nb_inet6.notifier_call) {
+ if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
+ pr_warn("failure unregistering notifier\n");
+ ibdev->iboe.nb_inet6.notifier_call = NULL;
+ }
+#endif
+
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
if (ibdev->counters[p] != -1)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 036b663dd26e..a230683af940 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -68,6 +68,8 @@ enum {
/*module param to indicate if SM assigns the alias_GUID*/
extern int mlx4_ib_sm_guid_assign;
+#define MLX4_IB_UC_STEER_QPN_ALIGN 1
+#define MLX4_IB_UC_MAX_NUM_QPS 256
struct mlx4_ib_ucontext {
struct ib_ucontext ibucontext;
struct mlx4_uar uar;
@@ -153,6 +155,7 @@ struct mlx4_ib_wq {
enum mlx4_ib_qp_flags {
MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
+ MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30,
MLX4_IB_SRIOV_SQP = 1 << 31,
};
@@ -270,6 +273,7 @@ struct mlx4_ib_qp {
struct list_head gid_list;
struct list_head steering_rules;
struct mlx4_ib_buf *sqp_proxy_rcv;
+ u64 reg_id;
};
@@ -428,7 +432,10 @@ struct mlx4_ib_sriov {
struct mlx4_ib_iboe {
spinlock_t lock;
struct net_device *netdevs[MLX4_MAX_PORTS];
+ struct net_device *masters[MLX4_MAX_PORTS];
struct notifier_block nb;
+ struct notifier_block nb_inet;
+ struct notifier_block nb_inet6;
union ib_gid gid_table[MLX4_MAX_PORTS][128];
};
@@ -494,6 +501,10 @@ struct mlx4_ib_dev {
struct kobject *dev_ports_parent[MLX4_MFUNC_MAX];
struct mlx4_ib_iov_port iov_ports[MLX4_MAX_PORTS];
struct pkey_mgt pkeys;
+ unsigned long *ib_uc_qpns_bitmap;
+ int steer_qpn_count;
+ int steer_qpn_base;
+ int steering_support;
};
struct ib_event_work {
@@ -675,9 +686,6 @@ int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid, int netw_view);
-int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
- u8 *mac, int *is_mcast, u8 port);
-
static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
{
u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
@@ -752,5 +760,9 @@ void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device);
__be64 mlx4_ib_gen_node_guid(void);
+int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
+void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
+int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
+ int is_attach);
#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 4f10af2905b5..d8f4d1fe8494 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -90,6 +90,21 @@ enum {
MLX4_RAW_QP_MSGMAX = 31,
};
+#ifndef ETH_ALEN
+#define ETH_ALEN 6
+#endif
+static inline u64 mlx4_mac_to_u64(u8 *addr)
+{
+ u64 mac = 0;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac <<= 8;
+ mac |= addr[i];
+ }
+ return mac;
+}
+
static const __be32 mlx4_ib_opcode[] = {
[IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
[IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
@@ -716,6 +731,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
qp->flags |= MLX4_IB_QP_LSO;
+ if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
+ if (dev->steering_support ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ qp->flags |= MLX4_IB_QP_NETIF;
+ else
+ goto err;
+ }
+
err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
if (err)
goto err;
@@ -765,7 +788,11 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (init_attr->qp_type == IB_QPT_RAW_PACKET)
err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn);
else
- err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
+ if (qp->flags & MLX4_IB_QP_NETIF)
+ err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn);
+ else
+ err = mlx4_qp_reserve_range(dev->dev, 1, 1,
+ &qpn);
if (err)
goto err_proxy;
}
@@ -790,8 +817,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
return 0;
err_qpn:
- if (!sqpn)
- mlx4_qp_release_range(dev->dev, qpn, 1);
+ if (!sqpn) {
+ if (qp->flags & MLX4_IB_QP_NETIF)
+ mlx4_ib_steer_qp_free(dev, qpn, 1);
+ else
+ mlx4_qp_release_range(dev->dev, qpn, 1);
+ }
err_proxy:
if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
free_proxy_bufs(pd->device, qp);
@@ -932,8 +963,12 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
mlx4_qp_free(dev->dev, &qp->mqp);
- if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp))
- mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
+ if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) {
+ if (qp->flags & MLX4_IB_QP_NETIF)
+ mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1);
+ else
+ mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
+ }
mlx4_mtt_cleanup(dev->dev, &qp->mtt);
@@ -987,9 +1022,16 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
*/
if (init_attr->create_flags & ~(MLX4_IB_QP_LSO |
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK |
- MLX4_IB_SRIOV_TUNNEL_QP | MLX4_IB_SRIOV_SQP))
+ MLX4_IB_SRIOV_TUNNEL_QP |
+ MLX4_IB_SRIOV_SQP |
+ MLX4_IB_QP_NETIF))
return ERR_PTR(-EINVAL);
+ if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
+ if (init_attr->qp_type != IB_QPT_UD)
+ return ERR_PTR(-EINVAL);
+ }
+
if (init_attr->create_flags &&
(udata ||
((init_attr->create_flags & ~MLX4_IB_SRIOV_SQP) &&
@@ -1144,16 +1186,15 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
}
-static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
- struct mlx4_qp_path *path, u8 port)
+static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
+ u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
+ u8 port)
{
- int err;
int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
IB_LINK_LAYER_ETHERNET;
- u8 mac[6];
- int is_mcast;
- u16 vlan_tag;
int vidx;
+ int smac_index;
+
path->grh_mylmc = ah->src_path_bits & 0x7f;
path->rlid = cpu_to_be16(ah->dlid);
@@ -1188,22 +1229,27 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
if (!(ah->ah_flags & IB_AH_GRH))
return -1;
- err = mlx4_ib_resolve_grh(dev, ah, mac, &is_mcast, port);
- if (err)
- return err;
-
- memcpy(path->dmac, mac, 6);
+ memcpy(path->dmac, ah->dmac, ETH_ALEN);
path->ackto = MLX4_IB_LINK_TYPE_ETH;
- /* use index 0 into MAC table for IBoE */
- path->grh_mylmc &= 0x80;
+ /* find the index into MAC table for IBoE */
+ if (!is_zero_ether_addr((const u8 *)&smac)) {
+ if (mlx4_find_cached_mac(dev->dev, port, smac,
+ &smac_index))
+ return -ENOENT;
+ } else {
+ smac_index = 0;
+ }
+
+ path->grh_mylmc &= 0x80 | smac_index;
- vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]);
+ path->feup |= MLX4_FEUP_FORCE_ETH_UP;
if (vlan_tag < 0x1000) {
if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx))
return -ENOENT;
path->vlan_index = vidx;
path->fl = 1 << 6;
+ path->feup |= MLX4_FVL_FORCE_ETH_VLAN;
}
} else
path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
@@ -1212,6 +1258,28 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
return 0;
}
+static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
+ enum ib_qp_attr_mask qp_attr_mask,
+ struct mlx4_qp_path *path, u8 port)
+{
+ return _mlx4_set_path(dev, &qp->ah_attr,
+ mlx4_mac_to_u64((u8 *)qp->smac),
+ (qp_attr_mask & IB_QP_VID) ? qp->vlan_id : 0xffff,
+ path, port);
+}
+
+static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
+ const struct ib_qp_attr *qp,
+ enum ib_qp_attr_mask qp_attr_mask,
+ struct mlx4_qp_path *path, u8 port)
+{
+ return _mlx4_set_path(dev, &qp->alt_ah_attr,
+ mlx4_mac_to_u64((u8 *)qp->alt_smac),
+ (qp_attr_mask & IB_QP_ALT_VID) ?
+ qp->alt_vlan_id : 0xffff,
+ path, port);
+}
+
static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
{
struct mlx4_ib_gid_entry *ge, *tmp;
@@ -1235,6 +1303,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
struct mlx4_qp_context *context;
enum mlx4_qp_optpar optpar = 0;
int sqd_event;
+ int steer_qp = 0;
int err = -EINVAL;
context = kzalloc(sizeof *context, GFP_KERNEL);
@@ -1319,6 +1388,11 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
} else
context->pri_path.counter_index = 0xff;
+
+ if (qp->flags & MLX4_IB_QP_NETIF) {
+ mlx4_ib_steer_qp_reg(dev, qp, 1);
+ steer_qp = 1;
+ }
}
if (attr_mask & IB_QP_PKEY_INDEX) {
@@ -1329,7 +1403,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
if (attr_mask & IB_QP_AV) {
- if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path,
+ if (mlx4_set_path(dev, attr, attr_mask, &context->pri_path,
attr_mask & IB_QP_PORT ?
attr->port_num : qp->port))
goto out;
@@ -1352,8 +1426,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
dev->dev->caps.pkey_table_len[attr->alt_port_num])
goto out;
- if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
- attr->alt_port_num))
+ if (mlx4_set_alt_path(dev, attr, attr_mask, &context->alt_path,
+ attr->alt_port_num))
goto out;
context->alt_path.pkey_index = attr->alt_pkey_index;
@@ -1464,6 +1538,17 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
MLX4_IB_LINK_TYPE_ETH;
+ if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
+ int is_eth = rdma_port_get_link_layer(
+ &dev->ib_dev, qp->port) ==
+ IB_LINK_LAYER_ETHERNET;
+ if (is_eth) {
+ context->pri_path.ackto = MLX4_IB_LINK_TYPE_ETH;
+ optpar |= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH;
+ }
+ }
+
+
if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
sqd_event = 1;
@@ -1547,9 +1632,14 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
qp->sq_next_wqe = 0;
if (qp->rq.wqe_cnt)
*qp->db.db = 0;
+
+ if (qp->flags & MLX4_IB_QP_NETIF)
+ mlx4_ib_steer_qp_reg(dev, qp, 0);
}
out:
+ if (err && steer_qp)
+ mlx4_ib_steer_qp_reg(dev, qp, 0);
kfree(context);
return err;
}
@@ -1561,13 +1651,21 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct mlx4_ib_qp *qp = to_mqp(ibqp);
enum ib_qp_state cur_state, new_state;
int err = -EINVAL;
-
+ int ll;
mutex_lock(&qp->mutex);
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
+ if (cur_state == new_state && cur_state == IB_QPS_RESET) {
+ ll = IB_LINK_LAYER_UNSPECIFIED;
+ } else {
+ int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
+ ll = rdma_port_get_link_layer(&dev->ib_dev, port);
+ }
+
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+ attr_mask, ll)) {
pr_debug("qpn 0x%x: invalid attribute mask specified "
"for transition %d to %d. qp_type %d,"
" attr_mask 0x%x\n",
@@ -1784,8 +1882,10 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
return err;
}
- vlan = rdma_get_vlan_id(&sgid);
- is_vlan = vlan < 0x1000;
+ if (ah->av.eth.vlan != 0xffff) {
+ vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
+ is_vlan = 1;
+ }
}
ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header);
@@ -2762,6 +2862,9 @@ done:
if (qp->flags & MLX4_IB_QP_LSO)
qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
+ if (qp->flags & MLX4_IB_QP_NETIF)
+ qp_init_attr->create_flags |= IB_QP_CREATE_NETIF_QP;
+
qp_init_attr->sq_sig_type =
qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ?
IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index 97516eb363b7..db2ea31df832 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -582,8 +582,10 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
p->pkey_group.attrs =
alloc_group_attrs(show_port_pkey, store_port_pkey,
dev->dev->caps.pkey_table_len[port_num]);
- if (!p->pkey_group.attrs)
+ if (!p->pkey_group.attrs) {
+ ret = -ENOMEM;
goto err_alloc;
+ }
ret = sysfs_create_group(&p->kobj, &p->pkey_group);
if (ret)
@@ -591,8 +593,10 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
p->gid_group.name = "gid_idx";
p->gid_group.attrs = alloc_group_attrs(show_port_gid_idx, NULL, 1);
- if (!p->gid_group.attrs)
+ if (!p->gid_group.attrs) {
+ ret = -ENOMEM;
goto err_free_pkey;
+ }
ret = sysfs_create_group(&p->kobj, &p->gid_group);
if (ret)
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
index 8e6aebfaf8a4..10df386c6344 100644
--- a/drivers/infiniband/hw/mlx5/Kconfig
+++ b/drivers/infiniband/hw/mlx5/Kconfig
@@ -1,6 +1,6 @@
config MLX5_INFINIBAND
tristate "Mellanox Connect-IB HCA support"
- depends on NETDEVICES && ETHERNET && PCI && X86
+ depends on NETDEVICES && ETHERNET && PCI
select NET_VENDOR_MELLANOX
select MLX5_CORE
---help---
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index b72627429745..b1705ce6eb88 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -73,14 +73,24 @@ static void *get_cqe(struct mlx5_ib_cq *cq, int n)
return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz);
}
+static u8 sw_ownership_bit(int n, int nent)
+{
+ return (n & nent) ? 1 : 0;
+}
+
static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
{
void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
struct mlx5_cqe64 *cqe64;
cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
- return ((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^
- !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
+
+ if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) &&
+ !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
+ return cqe;
+ } else {
+ return NULL;
+ }
}
static void *next_cqe_sw(struct mlx5_ib_cq *cq)
@@ -351,6 +361,11 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
qp->sq.last_poll = tail;
}
+static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
+{
+ mlx5_buf_free(&dev->mdev, &buf->buf);
+}
+
static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_ib_qp **cur_qp,
struct ib_wc *wc)
@@ -366,6 +381,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
void *cqe;
int idx;
+repoll:
cqe = next_cqe_sw(cq);
if (!cqe)
return -EAGAIN;
@@ -379,7 +395,18 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
*/
rmb();
- /* TBD: resize CQ */
+ opcode = cqe64->op_own >> 4;
+ if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
+ if (likely(cq->resize_buf)) {
+ free_cq_buf(dev, &cq->buf);
+ cq->buf = *cq->resize_buf;
+ kfree(cq->resize_buf);
+ cq->resize_buf = NULL;
+ goto repoll;
+ } else {
+ mlx5_ib_warn(dev, "unexpected resize cqe\n");
+ }
+ }
qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
@@ -398,7 +425,6 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
}
wc->qp = &(*cur_qp)->ibqp;
- opcode = cqe64->op_own >> 4;
switch (opcode) {
case MLX5_CQE_REQ:
wq = &(*cur_qp)->sq;
@@ -503,15 +529,11 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
return err;
buf->cqe_size = cqe_size;
+ buf->nent = nent;
return 0;
}
-static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
-{
- mlx5_buf_free(&dev->mdev, &buf->buf);
-}
-
static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
struct ib_ucontext *context, struct mlx5_ib_cq *cq,
int entries, struct mlx5_create_cq_mbox_in **cqb,
@@ -576,16 +598,16 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
ib_umem_release(cq->buf.umem);
}
-static void init_cq_buf(struct mlx5_ib_cq *cq, int nent)
+static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
{
int i;
void *cqe;
struct mlx5_cqe64 *cqe64;
- for (i = 0; i < nent; i++) {
- cqe = get_cqe(cq, i);
- cqe64 = (cq->buf.cqe_size == 64) ? cqe : cqe + 64;
- cqe64->op_own = 0xf1;
+ for (i = 0; i < buf->nent; i++) {
+ cqe = get_cqe_from_buf(buf, i, buf->cqe_size);
+ cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
+ cqe64->op_own = MLX5_CQE_INVALID << 4;
}
}
@@ -610,7 +632,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (err)
goto err_db;
- init_cq_buf(cq, entries);
+ init_cq_buf(cq, &cq->buf);
*inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages;
*cqb = mlx5_vzalloc(*inlen);
@@ -818,12 +840,266 @@ void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
- return -ENOSYS;
+ struct mlx5_modify_cq_mbox_in *in;
+ struct mlx5_ib_dev *dev = to_mdev(cq->device);
+ struct mlx5_ib_cq *mcq = to_mcq(cq);
+ int err;
+ u32 fsel;
+
+ if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
+ return -ENOSYS;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ in->cqn = cpu_to_be32(mcq->mcq.cqn);
+ fsel = (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
+ in->ctx.cq_period = cpu_to_be16(cq_period);
+ in->ctx.cq_max_count = cpu_to_be16(cq_count);
+ in->field_select = cpu_to_be32(fsel);
+ err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in, sizeof(*in));
+ kfree(in);
+
+ if (err)
+ mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
+
+ return err;
+}
+
+static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
+ int entries, struct ib_udata *udata, int *npas,
+ int *page_shift, int *cqe_size)
+{
+ struct mlx5_ib_resize_cq ucmd;
+ struct ib_umem *umem;
+ int err;
+ int npages;
+ struct ib_ucontext *context = cq->buf.umem->context;
+
+ err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
+ if (err)
+ return err;
+
+ if (ucmd.reserved0 || ucmd.reserved1)
+ return -EINVAL;
+
+ umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
+ IB_ACCESS_LOCAL_WRITE, 1);
+ if (IS_ERR(umem)) {
+ err = PTR_ERR(umem);
+ return err;
+ }
+
+ mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift,
+ npas, NULL);
+
+ cq->resize_umem = umem;
+ *cqe_size = ucmd.cqe_size;
+
+ return 0;
+}
+
+static void un_resize_user(struct mlx5_ib_cq *cq)
+{
+ ib_umem_release(cq->resize_umem);
+}
+
+static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
+ int entries, int cqe_size)
+{
+ int err;
+
+ cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
+ if (!cq->resize_buf)
+ return -ENOMEM;
+
+ err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size);
+ if (err)
+ goto ex;
+
+ init_cq_buf(cq, cq->resize_buf);
+
+ return 0;
+
+ex:
+ kfree(cq->resize_buf);
+ return err;
+}
+
+static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
+{
+ free_cq_buf(dev, cq->resize_buf);
+ cq->resize_buf = NULL;
+}
+
+static int copy_resize_cqes(struct mlx5_ib_cq *cq)
+{
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct mlx5_cqe64 *scqe64;
+ struct mlx5_cqe64 *dcqe64;
+ void *start_cqe;
+ void *scqe;
+ void *dcqe;
+ int ssize;
+ int dsize;
+ int i;
+ u8 sw_own;
+
+ ssize = cq->buf.cqe_size;
+ dsize = cq->resize_buf->cqe_size;
+ if (ssize != dsize) {
+ mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
+ return -EINVAL;
+ }
+
+ i = cq->mcq.cons_index;
+ scqe = get_sw_cqe(cq, i);
+ scqe64 = ssize == 64 ? scqe : scqe + 64;
+ start_cqe = scqe;
+ if (!scqe) {
+ mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
+ return -EINVAL;
+ }
+
+ while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) {
+ dcqe = get_cqe_from_buf(cq->resize_buf,
+ (i + 1) & (cq->resize_buf->nent),
+ dsize);
+ dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
+ sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
+ memcpy(dcqe, scqe, dsize);
+ dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own;
+
+ ++i;
+ scqe = get_sw_cqe(cq, i);
+ scqe64 = ssize == 64 ? scqe : scqe + 64;
+ if (!scqe) {
+ mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
+ return -EINVAL;
+ }
+
+ if (scqe == start_cqe) {
+ pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
+ cq->mcq.cqn);
+ return -ENOMEM;
+ }
+ }
+ ++cq->mcq.cons_index;
+ return 0;
}
int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
{
- return -ENOSYS;
+ struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
+ struct mlx5_modify_cq_mbox_in *in;
+ int err;
+ int npas;
+ int page_shift;
+ int inlen;
+ int uninitialized_var(cqe_size);
+ unsigned long flags;
+
+ if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
+ pr_info("Firmware does not support resize CQ\n");
+ return -ENOSYS;
+ }
+
+ if (entries < 1)
+ return -EINVAL;
+
+ entries = roundup_pow_of_two(entries + 1);
+ if (entries > dev->mdev.caps.max_cqes + 1)
+ return -EINVAL;
+
+ if (entries == ibcq->cqe + 1)
+ return 0;
+
+ mutex_lock(&cq->resize_mutex);
+ if (udata) {
+ err = resize_user(dev, cq, entries, udata, &npas, &page_shift,
+ &cqe_size);
+ } else {
+ cqe_size = 64;
+ err = resize_kernel(dev, cq, entries, cqe_size);
+ if (!err) {
+ npas = cq->resize_buf->buf.npages;
+ page_shift = cq->resize_buf->buf.page_shift;
+ }
+ }
+
+ if (err)
+ goto ex;
+
+ inlen = sizeof(*in) + npas * sizeof(in->pas[0]);
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ err = -ENOMEM;
+ goto ex_resize;
+ }
+
+ if (udata)
+ mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
+ in->pas, 0);
+ else
+ mlx5_fill_page_array(&cq->resize_buf->buf, in->pas);
+
+ in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE |
+ MLX5_MODIFY_CQ_MASK_PG_OFFSET |
+ MLX5_MODIFY_CQ_MASK_PG_SIZE);
+ in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
+ in->ctx.page_offset = 0;
+ in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24);
+ in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
+ in->cqn = cpu_to_be32(cq->mcq.cqn);
+
+ err = mlx5_core_modify_cq(&dev->mdev, &cq->mcq, in, inlen);
+ if (err)
+ goto ex_alloc;
+
+ if (udata) {
+ cq->ibcq.cqe = entries - 1;
+ ib_umem_release(cq->buf.umem);
+ cq->buf.umem = cq->resize_umem;
+ cq->resize_umem = NULL;
+ } else {
+ struct mlx5_ib_cq_buf tbuf;
+ int resized = 0;
+
+ spin_lock_irqsave(&cq->lock, flags);
+ if (cq->resize_buf) {
+ err = copy_resize_cqes(cq);
+ if (!err) {
+ tbuf = cq->buf;
+ cq->buf = *cq->resize_buf;
+ kfree(cq->resize_buf);
+ cq->resize_buf = NULL;
+ resized = 1;
+ }
+ }
+ cq->ibcq.cqe = entries - 1;
+ spin_unlock_irqrestore(&cq->lock, flags);
+ if (resized)
+ free_cq_buf(dev, &tbuf);
+ }
+ mutex_unlock(&cq->resize_mutex);
+
+ mlx5_vfree(in);
+ return 0;
+
+ex_alloc:
+ mlx5_vfree(in);
+
+ex_resize:
+ if (udata)
+ un_resize_user(cq);
+ else
+ un_resize_kernel(dev, cq);
+ex:
+ mutex_unlock(&cq->resize_mutex);
+ return err;
}
int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 306534109627..aa03e732b6a8 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -261,8 +261,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_SYS_IMAGE_GUID |
- IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
+ IB_DEVICE_RC_RNR_NAK_GEN;
flags = dev->mdev.caps.flags;
if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
@@ -536,34 +535,51 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_ib_alloc_ucontext_req req;
+ struct mlx5_ib_alloc_ucontext_req_v2 req;
struct mlx5_ib_alloc_ucontext_resp resp;
struct mlx5_ib_ucontext *context;
struct mlx5_uuar_info *uuari;
struct mlx5_uar *uars;
+ int gross_uuars;
int num_uars;
+ int ver;
int uuarn;
int err;
int i;
+ int reqlen;
if (!dev->ib_active)
return ERR_PTR(-EAGAIN);
- err = ib_copy_from_udata(&req, udata, sizeof(req));
+ memset(&req, 0, sizeof(req));
+ reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
+ if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
+ ver = 0;
+ else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
+ ver = 2;
+ else
+ return ERR_PTR(-EINVAL);
+
+ err = ib_copy_from_udata(&req, udata, reqlen);
if (err)
return ERR_PTR(err);
+ if (req.flags || req.reserved)
+ return ERR_PTR(-EINVAL);
+
if (req.total_num_uuars > MLX5_MAX_UUARS)
return ERR_PTR(-ENOMEM);
if (req.total_num_uuars == 0)
return ERR_PTR(-EINVAL);
- req.total_num_uuars = ALIGN(req.total_num_uuars, MLX5_BF_REGS_PER_PAGE);
+ req.total_num_uuars = ALIGN(req.total_num_uuars,
+ MLX5_NON_FP_BF_REGS_PER_PAGE);
if (req.num_low_latency_uuars > req.total_num_uuars - 1)
return ERR_PTR(-EINVAL);
- num_uars = req.total_num_uuars / MLX5_BF_REGS_PER_PAGE;
+ num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
+ gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
resp.qp_tab_size = 1 << dev->mdev.caps.log_max_qp;
resp.bf_reg_size = dev->mdev.caps.bf_reg_size;
resp.cache_line_size = L1_CACHE_BYTES;
@@ -585,7 +601,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
goto out_ctx;
}
- uuari->bitmap = kcalloc(BITS_TO_LONGS(req.total_num_uuars),
+ uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
sizeof(*uuari->bitmap),
GFP_KERNEL);
if (!uuari->bitmap) {
@@ -595,13 +611,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
/*
* clear all fast path uuars
*/
- for (i = 0; i < req.total_num_uuars; i++) {
+ for (i = 0; i < gross_uuars; i++) {
uuarn = i & 3;
if (uuarn == 2 || uuarn == 3)
set_bit(i, uuari->bitmap);
}
- uuari->count = kcalloc(req.total_num_uuars, sizeof(*uuari->count), GFP_KERNEL);
+ uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
if (!uuari->count) {
err = -ENOMEM;
goto out_bitmap;
@@ -623,6 +639,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (err)
goto out_uars;
+ uuari->ver = ver;
uuari->num_low_latency_uuars = req.num_low_latency_uuars;
uuari->uars = uars;
uuari->num_uars = num_uars;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 4c134d93d4fc..389e31965773 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -195,6 +195,7 @@ struct mlx5_ib_cq_buf {
struct mlx5_buf buf;
struct ib_umem *umem;
int cqe_size;
+ int nent;
};
enum mlx5_ib_qp_flags {
@@ -220,7 +221,7 @@ struct mlx5_ib_cq {
/* protect resize cq
*/
struct mutex resize_mutex;
- struct mlx5_ib_cq_resize *resize_buf;
+ struct mlx5_ib_cq_buf *resize_buf;
struct ib_umem *resize_umem;
int cqe_size;
};
@@ -264,7 +265,6 @@ struct mlx5_ib_mr {
enum ib_wc_status status;
struct mlx5_ib_dev *dev;
struct mlx5_create_mkey_mbox_out out;
- unsigned long start;
};
struct mlx5_ib_fast_reg_page_list {
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 039c3e40fcb4..7c95ca1f0c25 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -146,7 +146,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
spin_lock_irq(&ent->lock);
ent->pending++;
spin_unlock_irq(&ent->lock);
- mr->start = jiffies;
err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in,
sizeof(*in), reg_mr_callback,
mr, &mr->out);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 7c6b4ba49bec..7dfe8a1c84cf 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -216,7 +216,9 @@ static int sq_overhead(enum ib_qp_type qp_type)
case IB_QPT_UC:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_raddr_seg);
+ sizeof(struct mlx5_wqe_raddr_seg) +
+ sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg);
break;
case IB_QPT_UD:
@@ -340,14 +342,57 @@ static int qp_has_rq(struct ib_qp_init_attr *attr)
return 1;
}
+static int first_med_uuar(void)
+{
+ return 1;
+}
+
+static int next_uuar(int n)
+{
+ n++;
+
+ while (((n % 4) & 2))
+ n++;
+
+ return n;
+}
+
+static int num_med_uuar(struct mlx5_uuar_info *uuari)
+{
+ int n;
+
+ n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
+ uuari->num_low_latency_uuars - 1;
+
+ return n >= 0 ? n : 0;
+}
+
+static int max_uuari(struct mlx5_uuar_info *uuari)
+{
+ return uuari->num_uars * 4;
+}
+
+static int first_hi_uuar(struct mlx5_uuar_info *uuari)
+{
+ int med;
+ int i;
+ int t;
+
+ med = num_med_uuar(uuari);
+ for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
+ t++;
+ if (t == med)
+ return next_uuar(i);
+ }
+
+ return 0;
+}
+
static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
{
- int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
- int start_uuar;
int i;
- start_uuar = nuuars - uuari->num_low_latency_uuars;
- for (i = start_uuar; i < nuuars; i++) {
+ for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
if (!test_bit(i, uuari->bitmap)) {
set_bit(i, uuari->bitmap);
uuari->count[i]++;
@@ -360,19 +405,10 @@ static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
{
- int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
- int minidx = 1;
- int uuarn;
- int end;
+ int minidx = first_med_uuar();
int i;
- end = nuuars - uuari->num_low_latency_uuars;
-
- for (i = 1; i < end; i++) {
- uuarn = i & 3;
- if (uuarn == 2 || uuarn == 3)
- continue;
-
+ for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
if (uuari->count[i] < uuari->count[minidx])
minidx = i;
}
@@ -394,11 +430,17 @@ static int alloc_uuar(struct mlx5_uuar_info *uuari,
break;
case MLX5_IB_LATENCY_CLASS_MEDIUM:
- uuarn = alloc_med_class_uuar(uuari);
+ if (uuari->ver < 2)
+ uuarn = -ENOMEM;
+ else
+ uuarn = alloc_med_class_uuar(uuari);
break;
case MLX5_IB_LATENCY_CLASS_HIGH:
- uuarn = alloc_high_class_uuar(uuari);
+ if (uuari->ver < 2)
+ uuarn = -ENOMEM;
+ else
+ uuarn = alloc_high_class_uuar(uuari);
break;
case MLX5_IB_LATENCY_CLASS_FAST_PATH:
@@ -489,12 +531,12 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
{
struct mlx5_ib_ucontext *context;
struct mlx5_ib_create_qp ucmd;
- int page_shift;
+ int page_shift = 0;
int uar_index;
int npages;
- u32 offset;
+ u32 offset = 0;
int uuarn;
- int ncont;
+ int ncont = 0;
int err;
err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
@@ -510,11 +552,16 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
if (uuarn < 0) {
mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
- mlx5_ib_dbg(dev, "reverting to high latency\n");
- uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
+ mlx5_ib_dbg(dev, "reverting to medium latency\n");
+ uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
if (uuarn < 0) {
- mlx5_ib_dbg(dev, "uuar allocation failed\n");
- return uuarn;
+ mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
+ mlx5_ib_dbg(dev, "reverting to high latency\n");
+ uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
+ if (uuarn < 0) {
+ mlx5_ib_warn(dev, "uuar allocation failed\n");
+ return uuarn;
+ }
}
}
@@ -525,23 +572,29 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (err)
goto err_uuar;
- qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
- qp->buf_size, 0, 0);
- if (IS_ERR(qp->umem)) {
- mlx5_ib_dbg(dev, "umem_get failed\n");
- err = PTR_ERR(qp->umem);
- goto err_uuar;
+ if (ucmd.buf_addr && qp->buf_size) {
+ qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
+ qp->buf_size, 0, 0);
+ if (IS_ERR(qp->umem)) {
+ mlx5_ib_dbg(dev, "umem_get failed\n");
+ err = PTR_ERR(qp->umem);
+ goto err_uuar;
+ }
+ } else {
+ qp->umem = NULL;
}
- mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
- &ncont, NULL);
- err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
- if (err) {
- mlx5_ib_warn(dev, "bad offset\n");
- goto err_umem;
+ if (qp->umem) {
+ mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
+ &ncont, NULL);
+ err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
+ if (err) {
+ mlx5_ib_warn(dev, "bad offset\n");
+ goto err_umem;
+ }
+ mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
+ ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
}
- mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
- ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
*inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
*in = mlx5_vzalloc(*inlen);
@@ -549,7 +602,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
err = -ENOMEM;
goto err_umem;
}
- mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
+ if (qp->umem)
+ mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
(*in)->ctx.log_pg_sz_remote_qpn =
cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
(*in)->ctx.params2 = cpu_to_be32(offset << 6);
@@ -580,7 +634,8 @@ err_free:
mlx5_vfree(*in);
err_umem:
- ib_umem_release(qp->umem);
+ if (qp->umem)
+ ib_umem_release(qp->umem);
err_uuar:
free_uuar(&context->uuari, uuarn);
@@ -593,7 +648,8 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
context = to_mucontext(pd->uobject->context);
mlx5_ib_db_unmap_user(context, &qp->db);
- ib_umem_release(qp->umem);
+ if (qp->umem)
+ ib_umem_release(qp->umem);
free_uuar(&context->uuari, qp->uuarn);
}
@@ -609,8 +665,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
int err;
uuari = &dev->mdev.priv.uuari;
- if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
- qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
+ if (init_attr->create_flags)
+ return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
@@ -1616,7 +1672,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
- !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask))
+ !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
+ IB_LINK_LAYER_UNSPECIFIED))
goto out;
if ((attr_mask & IB_QP_PORT) &&
@@ -2212,6 +2269,10 @@ out:
qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
+ /* Make sure doorbell record is visible to the HCA before
+ * we hit doorbell */
+ wmb();
+
if (bf->need_lock)
spin_lock(&bf->lock);
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
index a886de3e593c..0f4f8e42a17f 100644
--- a/drivers/infiniband/hw/mlx5/user.h
+++ b/drivers/infiniband/hw/mlx5/user.h
@@ -62,6 +62,13 @@ struct mlx5_ib_alloc_ucontext_req {
__u32 num_low_latency_uuars;
};
+struct mlx5_ib_alloc_ucontext_req_v2 {
+ __u32 total_num_uuars;
+ __u32 num_low_latency_uuars;
+ __u32 flags;
+ __u32 reserved;
+};
+
struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 bf_reg_size;
@@ -93,6 +100,9 @@ struct mlx5_ib_create_cq_resp {
struct mlx5_ib_resize_cq {
__u64 buf_addr;
+ __u16 cqe_size;
+ __u16 reserved0;
+ __u32 reserved1;
};
struct mlx5_ib_create_srq {
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 26a684536109..e354b2f04ad9 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -860,7 +860,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
+ IB_LINK_LAYER_UNSPECIFIED)) {
mthca_dbg(dev, "Bad QP transition (transport %d) "
"%d->%d with attr 0x%08x\n",
qp->transport, cur_state, new_state,
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 429141078eec..353c7b05a90a 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -675,8 +675,11 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status);
/* Initialize network devices */
- if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL)
+ netdev = nes_netdev_init(nesdev, mmio_regs);
+ if (netdev == NULL) {
+ ret = -ENOMEM;
goto bail7;
+ }
/* Register network device */
ret = register_netdev(netdev);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 6b29249aa85a..9c9f2f57e960 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1354,8 +1354,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
neigh->ha, ntohl(rt->rt_gateway));
if (arpindex >= 0) {
- if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
- neigh->ha, ETH_ALEN)) {
+ if (ether_addr_equal(nesadapter->arp_table[arpindex].mac_addr, neigh->ha)) {
/* Mac address same as in nes_arp_table */
goto out;
}
diff --git a/drivers/infiniband/hw/ocrdma/Kconfig b/drivers/infiniband/hw/ocrdma/Kconfig
index b5b6056c8518..c0cddc0192d1 100644
--- a/drivers/infiniband/hw/ocrdma/Kconfig
+++ b/drivers/infiniband/hw/ocrdma/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_OCRDMA
tristate "Emulex One Connect HCA support"
- depends on ETHERNET && NETDEVICES && PCI && (IPV6 || IPV6=n)
+ depends on ETHERNET && NETDEVICES && PCI && INET && (IPV6 || IPV6=n)
select NET_VENDOR_EMULEX
select BE2NET
---help---
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 294dd27b601e..7c001b97b23f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -423,5 +423,17 @@ static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe)
OCRDMA_CQE_WRITE_IMM) ? 1 : 0;
}
+static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
+ struct ib_ah_attr *ah_attr, u8 *mac_addr)
+{
+ struct in6_addr in6;
+
+ memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
+ if (rdma_is_multicast_addr(&in6))
+ rdma_get_mcast_mac(&in6, mac_addr);
+ else
+ memcpy(mac_addr, ah_attr->dmac, ETH_ALEN);
+ return 0;
+}
#endif
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index ee499d942257..34071143006e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -49,7 +49,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
ah->sgid_index = attr->grh.sgid_index;
- vlan_tag = rdma_get_vlan_id(&attr->grh.dgid);
+ vlan_tag = attr->vlan_id;
if (!vlan_tag || (vlan_tag > 0xFFF))
vlan_tag = dev->pvid;
if (vlan_tag && (vlan_tag < 0x1000)) {
@@ -64,7 +64,8 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
eth_sz = sizeof(struct ocrdma_eth_basic);
}
memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
- status = ocrdma_resolve_dgid(dev, &attr->grh.dgid, &eth.dmac[0]);
+ memcpy(&eth.dmac[0], attr->dmac, ETH_ALEN);
+ status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]);
if (status)
return status;
status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index,
@@ -84,6 +85,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
if (vlan_enabled)
ah->av->valid |= OCRDMA_AV_VLAN_VALID;
+ ah->av->valid = cpu_to_le32(ah->av->valid);
return status;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 56bf32fcb62c..1664d648cbfc 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -2076,23 +2076,6 @@ mbx_err:
return status;
}
-int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid,
- u8 *mac_addr)
-{
- struct in6_addr in6;
-
- memcpy(&in6, dgid, sizeof in6);
- if (rdma_is_multicast_addr(&in6)) {
- rdma_get_mcast_mac(&in6, mac_addr);
- } else if (rdma_link_local_addr(&in6)) {
- rdma_get_ll_mac(&in6, mac_addr);
- } else {
- pr_err("%s() fail to resolve mac_addr.\n", __func__);
- return -EINVAL;
- }
- return 0;
-}
-
static int ocrdma_set_av_params(struct ocrdma_qp *qp,
struct ocrdma_modify_qp *cmd,
struct ib_qp_attr *attrs)
@@ -2126,14 +2109,14 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
qp->sgid_idx = ah_attr->grh.sgid_index;
memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
- ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]);
+ ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]);
cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
(mac_addr[2] << 16) | (mac_addr[3] << 24);
/* convert them to LE format. */
ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
- vlan_id = rdma_get_vlan_id(&sgid);
+ vlan_id = ah_attr->vlan_id;
if (vlan_id && (vlan_id < 0x1000)) {
cmd->params.vlan_dmac_b4_to_b5 |=
vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index f2a89d4cc7c4..82fe332ae6c6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -94,7 +94,6 @@ void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed);
int ocrdma_query_config(struct ocrdma_dev *,
struct ocrdma_mbx_query_config *config);
-int ocrdma_resolve_dgid(struct ocrdma_dev *, union ib_gid *dgid, u8 *mac_addr);
int ocrdma_mbx_alloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);
int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 91443bcb9e0e..1a8a945efa60 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -67,46 +67,24 @@ void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
guid[7] = mac_addr[5];
}
-static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr,
- bool is_vlan, u16 vlan_id)
-{
- sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
- sgid->raw[8] = mac_addr[0] ^ 2;
- sgid->raw[9] = mac_addr[1];
- sgid->raw[10] = mac_addr[2];
- if (is_vlan) {
- sgid->raw[11] = vlan_id >> 8;
- sgid->raw[12] = vlan_id & 0xff;
- } else {
- sgid->raw[11] = 0xff;
- sgid->raw[12] = 0xfe;
- }
- sgid->raw[13] = mac_addr[3];
- sgid->raw[14] = mac_addr[4];
- sgid->raw[15] = mac_addr[5];
-}
-
-static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
- bool is_vlan, u16 vlan_id)
+static bool ocrdma_add_sgid(struct ocrdma_dev *dev, union ib_gid *new_sgid)
{
int i;
- union ib_gid new_sgid;
unsigned long flags;
memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
- ocrdma_build_sgid_mac(&new_sgid, mac_addr, is_vlan, vlan_id);
spin_lock_irqsave(&dev->sgid_lock, flags);
for (i = 0; i < OCRDMA_MAX_SGID; i++) {
if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
sizeof(union ib_gid))) {
/* found free entry */
- memcpy(&dev->sgid_tbl[i], &new_sgid,
+ memcpy(&dev->sgid_tbl[i], new_sgid,
sizeof(union ib_gid));
spin_unlock_irqrestore(&dev->sgid_lock, flags);
return true;
- } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid,
+ } else if (!memcmp(&dev->sgid_tbl[i], new_sgid,
sizeof(union ib_gid))) {
/* entry already present, no addition is required. */
spin_unlock_irqrestore(&dev->sgid_lock, flags);
@@ -117,20 +95,17 @@ static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
return false;
}
-static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
- bool is_vlan, u16 vlan_id)
+static bool ocrdma_del_sgid(struct ocrdma_dev *dev, union ib_gid *sgid)
{
int found = false;
int i;
- union ib_gid sgid;
unsigned long flags;
- ocrdma_build_sgid_mac(&sgid, mac_addr, is_vlan, vlan_id);
spin_lock_irqsave(&dev->sgid_lock, flags);
/* first is default sgid, which cannot be deleted. */
for (i = 1; i < OCRDMA_MAX_SGID; i++) {
- if (!memcmp(&dev->sgid_tbl[i], &sgid, sizeof(union ib_gid))) {
+ if (!memcmp(&dev->sgid_tbl[i], sgid, sizeof(union ib_gid))) {
/* found matching entry */
memset(&dev->sgid_tbl[i], 0, sizeof(union ib_gid));
found = true;
@@ -141,75 +116,18 @@ static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
return found;
}
-static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
-{
- /* GID Index 0 - Invariant manufacturer-assigned EUI-64 */
- union ib_gid *sgid = &dev->sgid_tbl[0];
-
- sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
- ocrdma_get_guid(dev, &sgid->raw[8]);
-}
-
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
-static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
-{
- struct net_device *netdev, *tmp;
- u16 vlan_id;
- bool is_vlan;
-
- netdev = dev->nic_info.netdev;
-
- rcu_read_lock();
- for_each_netdev_rcu(&init_net, tmp) {
- if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) {
- if (!netif_running(tmp) || !netif_oper_up(tmp))
- continue;
- if (netdev != tmp) {
- vlan_id = vlan_dev_vlan_id(tmp);
- is_vlan = true;
- } else {
- is_vlan = false;
- vlan_id = 0;
- tmp = netdev;
- }
- ocrdma_add_sgid(dev, tmp->dev_addr, is_vlan, vlan_id);
- }
- }
- rcu_read_unlock();
-}
-#else
-static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
-{
-
-}
-#endif /* VLAN */
-
-static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
+static int ocrdma_addr_event(unsigned long event, struct net_device *netdev,
+ union ib_gid *gid)
{
- ocrdma_add_default_sgid(dev);
- ocrdma_add_vlan_sgids(dev);
- return 0;
-}
-
-#if IS_ENABLED(CONFIG_IPV6)
-
-static int ocrdma_inet6addr_event(struct notifier_block *notifier,
- unsigned long event, void *ptr)
-{
- struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
- struct net_device *netdev = ifa->idev->dev;
struct ib_event gid_event;
struct ocrdma_dev *dev;
bool found = false;
bool updated = false;
bool is_vlan = false;
- u16 vid = 0;
is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
- if (is_vlan) {
- vid = vlan_dev_vlan_id(netdev);
- netdev = vlan_dev_real_dev(netdev);
- }
+ if (is_vlan)
+ netdev = rdma_vlan_dev_real_dev(netdev);
rcu_read_lock();
list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
@@ -222,16 +140,14 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
if (!found)
return NOTIFY_DONE;
- if (!rdma_link_local_addr((struct in6_addr *)&ifa->addr))
- return NOTIFY_DONE;
mutex_lock(&dev->dev_lock);
switch (event) {
case NETDEV_UP:
- updated = ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
+ updated = ocrdma_add_sgid(dev, gid);
break;
case NETDEV_DOWN:
- updated = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
+ updated = ocrdma_del_sgid(dev, gid);
break;
default:
break;
@@ -247,6 +163,32 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
return NOTIFY_OK;
}
+static int ocrdma_inetaddr_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ union ib_gid gid;
+ struct net_device *netdev = ifa->ifa_dev->dev;
+
+ ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
+ return ocrdma_addr_event(event, netdev, &gid);
+}
+
+static struct notifier_block ocrdma_inetaddr_notifier = {
+ .notifier_call = ocrdma_inetaddr_event
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+static int ocrdma_inet6addr_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
+{
+ struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
+ union ib_gid *gid = (union ib_gid *)&ifa->addr;
+ struct net_device *netdev = ifa->idev->dev;
+ return ocrdma_addr_event(event, netdev, gid);
+}
+
static struct notifier_block ocrdma_inet6addr_notifier = {
.notifier_call = ocrdma_inet6addr_event
};
@@ -423,10 +365,6 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
if (status)
goto alloc_err;
- status = ocrdma_build_sgid_tbl(dev);
- if (status)
- goto alloc_err;
-
status = ocrdma_register_device(dev);
if (status)
goto alloc_err;
@@ -553,6 +491,10 @@ static int __init ocrdma_init_module(void)
{
int status;
+ status = register_inetaddr_notifier(&ocrdma_inetaddr_notifier);
+ if (status)
+ return status;
+
#if IS_ENABLED(CONFIG_IPV6)
status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier);
if (status)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 9f9570ec3c2e..60d5ac23ea80 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -31,7 +31,7 @@
#define Bit(_b) (1 << (_b))
#define OCRDMA_GEN1_FAMILY 0xB
-#define OCRDMA_GEN2_FAMILY 0x2
+#define OCRDMA_GEN2_FAMILY 0x0F
#define OCRDMA_SUBSYS_ROCE 10
enum {
@@ -1694,7 +1694,7 @@ struct ocrdma_grh {
u16 rsvd;
} __packed;
-#define OCRDMA_AV_VALID Bit(0)
+#define OCRDMA_AV_VALID Bit(7)
#define OCRDMA_AV_VLAN_VALID Bit(1)
struct ocrdma_av {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 7686dceadd29..e0cc201be41a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -176,7 +176,7 @@ int ocrdma_query_port(struct ib_device *ibdev,
props->port_cap_flags =
IB_PORT_CM_SUP |
IB_PORT_REINIT_SUP |
- IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP;
+ IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = OCRDMA_MAX_SGID;
props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0;
@@ -1326,7 +1326,8 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_qps = old_qps;
spin_unlock_irqrestore(&qp->q_lock, flags);
- if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
+ if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
+ IB_LINK_LAYER_ETHERNET)) {
pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
"qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
__func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
@@ -1415,7 +1416,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
- OCRDMA_QP_PARAMS_SQ_PSN_MASK) >>
+ OCRDMA_QP_PARAMS_TCLASS_MASK) >>
OCRDMA_QP_PARAMS_TCLASS_SHIFT;
qp_attr->ah_attr.ah_flags = IB_AH_GRH;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5bfc02f450e6..d1bd21319d7d 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2395,6 +2395,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
qib_write_kreg(dd, kr_scratch, 0ULL);
+ /* ensure previous Tx parameters are not still forced */
+ qib_write_kreg_port(ppd, krp_tx_deemph_override,
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ reset_tx_deemphasis_override));
+
if (qib_compat_ddr_negotiate) {
ppd->cpspec->ibdeltainprog = 1;
ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 3cca55b51e54..0cad0c40d742 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -585,7 +585,7 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
- attr_mask))
+ attr_mask, IB_LINK_LAYER_UNSPECIFIED))
goto inval;
if (attr_mask & IB_QP_AV) {
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index d6c7fe7f88d5..3ad651c3356c 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -57,13 +57,20 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
struct qib_sge *sge;
struct ib_wc wc;
u32 length;
+ enum ib_qp_type sqptype, dqptype;
qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
if (!qp) {
ibp->n_pkt_drops++;
return;
}
- if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
+
+ sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
+ IB_QPT_UD : sqp->ibqp.qp_type;
+ dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
+ IB_QPT_UD : qp->ibqp.qp_type;
+
+ if (dqptype != sqptype ||
!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
ibp->n_pkt_drops++;
goto drop;
diff --git a/drivers/infiniband/hw/usnic/Kconfig b/drivers/infiniband/hw/usnic/Kconfig
new file mode 100644
index 000000000000..29ab11c34f3f
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/Kconfig
@@ -0,0 +1,10 @@
+config INFINIBAND_USNIC
+ tristate "Verbs support for Cisco VIC"
+ depends on NETDEVICES && ETHERNET && INET && PCI && INTEL_IOMMU
+ select ENIC
+ select NET_VENDOR_CISCO
+ select PCI_IOV
+ select INFINIBAND_USER_ACCESS
+ ---help---
+ This is a low-level driver for Cisco's Virtual Interface
+ Cards (VICs), including the VIC 1240 and 1280 cards.
diff --git a/drivers/infiniband/hw/usnic/Makefile b/drivers/infiniband/hw/usnic/Makefile
new file mode 100644
index 000000000000..99fb2db47cd5
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/Makefile
@@ -0,0 +1,15 @@
+ccflags-y := -Idrivers/net/ethernet/cisco/enic
+
+obj-$(CONFIG_INFINIBAND_USNIC)+= usnic_verbs.o
+
+usnic_verbs-y=\
+usnic_fwd.o \
+usnic_transport.o \
+usnic_uiom.o \
+usnic_uiom_interval_tree.o \
+usnic_vnic.o \
+usnic_ib_main.o \
+usnic_ib_qp_grp.o \
+usnic_ib_sysfs.o \
+usnic_ib_verbs.o \
+usnic_debugfs.o \
diff --git a/drivers/infiniband/hw/usnic/usnic.h b/drivers/infiniband/hw/usnic/usnic.h
new file mode 100644
index 000000000000..5be13d8991bc
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_H_
+#define USNIC_H_
+
+#define DRV_NAME "usnic_verbs"
+
+#define PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC 0x00cf /* User space NIC */
+
+#define DRV_VERSION "1.0.3"
+#define DRV_RELDATE "December 19, 2013"
+
+#endif /* USNIC_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_abi.h b/drivers/infiniband/hw/usnic/usnic_abi.h
new file mode 100644
index 000000000000..04a66229584e
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_abi.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+
+#ifndef USNIC_ABI_H
+#define USNIC_ABI_H
+
+/* ABI between userspace and kernel */
+#define USNIC_UVERBS_ABI_VERSION 4
+
+#define USNIC_QP_GRP_MAX_WQS 8
+#define USNIC_QP_GRP_MAX_RQS 8
+#define USNIC_QP_GRP_MAX_CQS 16
+
+enum usnic_transport_type {
+ USNIC_TRANSPORT_UNKNOWN = 0,
+ USNIC_TRANSPORT_ROCE_CUSTOM = 1,
+ USNIC_TRANSPORT_IPV4_UDP = 2,
+ USNIC_TRANSPORT_MAX = 3,
+};
+
+struct usnic_transport_spec {
+ enum usnic_transport_type trans_type;
+ union {
+ struct {
+ uint16_t port_num;
+ } usnic_roce;
+ struct {
+ uint32_t sock_fd;
+ } udp;
+ };
+};
+
+struct usnic_ib_create_qp_cmd {
+ struct usnic_transport_spec spec;
+};
+
+/*TODO: Future - usnic_modify_qp needs to pass in generic filters */
+struct usnic_ib_create_qp_resp {
+ u32 vfid;
+ u32 qp_grp_id;
+ u64 bar_bus_addr;
+ u32 bar_len;
+/*
+ * WQ, RQ, CQ are explicity specified bc exposing a generic resources inteface
+ * expands the scope of ABI to many files.
+ */
+ u32 wq_cnt;
+ u32 rq_cnt;
+ u32 cq_cnt;
+ u32 wq_idx[USNIC_QP_GRP_MAX_WQS];
+ u32 rq_idx[USNIC_QP_GRP_MAX_RQS];
+ u32 cq_idx[USNIC_QP_GRP_MAX_CQS];
+ u32 transport;
+ u32 reserved[9];
+};
+
+#endif /* USNIC_ABI_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h b/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h
new file mode 100644
index 000000000000..393567266142
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_CMN_PKT_HDR_H
+#define USNIC_CMN_PKT_HDR_H
+
+#define USNIC_ROCE_ETHERTYPE (0x8915)
+#define USNIC_ROCE_GRH_VER (8)
+#define USNIC_PROTO_VER (1)
+#define USNIC_ROCE_GRH_VER_SHIFT (4)
+
+#endif /* USNIC_COMMON_PKT_HDR_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_common_util.h b/drivers/infiniband/hw/usnic/usnic_common_util.h
new file mode 100644
index 000000000000..9d737ed5e55d
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_common_util.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_CMN_UTIL_H
+#define USNIC_CMN_UTIL_H
+
+static inline void
+usnic_mac_to_gid(const char *const mac, char *raw_gid)
+{
+ raw_gid[0] = 0xfe;
+ raw_gid[1] = 0x80;
+ memset(&raw_gid[2], 0, 6);
+ raw_gid[8] = mac[0]^2;
+ raw_gid[9] = mac[1];
+ raw_gid[10] = mac[2];
+ raw_gid[11] = 0xff;
+ raw_gid[12] = 0xfe;
+ raw_gid[13] = mac[3];
+ raw_gid[14] = mac[4];
+ raw_gid[15] = mac[5];
+}
+
+static inline void
+usnic_mac_ip_to_gid(const char *const mac, const __be32 inaddr, char *raw_gid)
+{
+ raw_gid[0] = 0xfe;
+ raw_gid[1] = 0x80;
+ memset(&raw_gid[2], 0, 2);
+ memcpy(&raw_gid[4], &inaddr, 4);
+ raw_gid[8] = mac[0]^2;
+ raw_gid[9] = mac[1];
+ raw_gid[10] = mac[2];
+ raw_gid[11] = 0xff;
+ raw_gid[12] = 0xfe;
+ raw_gid[13] = mac[3];
+ raw_gid[14] = mac[4];
+ raw_gid[15] = mac[5];
+}
+
+static inline void
+usnic_write_gid_if_id_from_mac(char *mac, char *raw_gid)
+{
+ raw_gid[8] = mac[0]^2;
+ raw_gid[9] = mac[1];
+ raw_gid[10] = mac[2];
+ raw_gid[11] = 0xff;
+ raw_gid[12] = 0xfe;
+ raw_gid[13] = mac[3];
+ raw_gid[14] = mac[4];
+ raw_gid[15] = mac[5];
+}
+
+#endif /* USNIC_COMMON_UTIL_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.c b/drivers/infiniband/hw/usnic/usnic_debugfs.c
new file mode 100644
index 000000000000..5d13860161a4
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_debugfs.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+#include "usnic.h"
+#include "usnic_log.h"
+#include "usnic_debugfs.h"
+#include "usnic_ib_qp_grp.h"
+#include "usnic_transport.h"
+
+static struct dentry *debugfs_root;
+static struct dentry *flows_dentry;
+
+static ssize_t usnic_debugfs_buildinfo_read(struct file *f, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ char buf[500];
+ int res;
+
+ if (*ppos > 0)
+ return 0;
+
+ res = scnprintf(buf, sizeof(buf),
+ "version: %s\n"
+ "build date: %s\n",
+ DRV_VERSION, DRV_RELDATE);
+
+ return simple_read_from_buffer(data, count, ppos, buf, res);
+}
+
+static const struct file_operations usnic_debugfs_buildinfo_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = usnic_debugfs_buildinfo_read
+};
+
+static ssize_t flowinfo_read(struct file *f, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct usnic_ib_qp_grp_flow *qp_flow;
+ int n;
+ int left;
+ char *ptr;
+ char buf[512];
+
+ qp_flow = f->private_data;
+ ptr = buf;
+ left = count;
+
+ if (*ppos > 0)
+ return 0;
+
+ spin_lock(&qp_flow->qp_grp->lock);
+ n = scnprintf(ptr, left,
+ "QP Grp ID: %d Transport: %s ",
+ qp_flow->qp_grp->grp_id,
+ usnic_transport_to_str(qp_flow->trans_type));
+ UPDATE_PTR_LEFT(n, ptr, left);
+ if (qp_flow->trans_type == USNIC_TRANSPORT_ROCE_CUSTOM) {
+ n = scnprintf(ptr, left, "Port_Num:%hu\n",
+ qp_flow->usnic_roce.port_num);
+ UPDATE_PTR_LEFT(n, ptr, left);
+ } else if (qp_flow->trans_type == USNIC_TRANSPORT_IPV4_UDP) {
+ n = usnic_transport_sock_to_str(ptr, left,
+ qp_flow->udp.sock);
+ UPDATE_PTR_LEFT(n, ptr, left);
+ n = scnprintf(ptr, left, "\n");
+ UPDATE_PTR_LEFT(n, ptr, left);
+ }
+ spin_unlock(&qp_flow->qp_grp->lock);
+
+ return simple_read_from_buffer(data, count, ppos, buf, ptr - buf);
+}
+
+static const struct file_operations flowinfo_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = flowinfo_read,
+};
+
+void usnic_debugfs_init(void)
+{
+ debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
+ if (IS_ERR(debugfs_root)) {
+ usnic_err("Failed to create debugfs root dir, check if debugfs is enabled in kernel configuration\n");
+ goto out_clear_root;
+ }
+
+ flows_dentry = debugfs_create_dir("flows", debugfs_root);
+ if (IS_ERR_OR_NULL(flows_dentry)) {
+ usnic_err("Failed to create debugfs flow dir with err %ld\n",
+ PTR_ERR(flows_dentry));
+ goto out_free_root;
+ }
+
+ debugfs_create_file("build-info", S_IRUGO, debugfs_root,
+ NULL, &usnic_debugfs_buildinfo_ops);
+ return;
+
+out_free_root:
+ debugfs_remove_recursive(debugfs_root);
+out_clear_root:
+ debugfs_root = NULL;
+}
+
+void usnic_debugfs_exit(void)
+{
+ if (!debugfs_root)
+ return;
+
+ debugfs_remove_recursive(debugfs_root);
+ debugfs_root = NULL;
+}
+
+void usnic_debugfs_flow_add(struct usnic_ib_qp_grp_flow *qp_flow)
+{
+ if (IS_ERR_OR_NULL(flows_dentry))
+ return;
+
+ scnprintf(qp_flow->dentry_name, sizeof(qp_flow->dentry_name),
+ "%u", qp_flow->flow->flow_id);
+ qp_flow->dbgfs_dentry = debugfs_create_file(qp_flow->dentry_name,
+ S_IRUGO,
+ flows_dentry,
+ qp_flow,
+ &flowinfo_ops);
+ if (IS_ERR_OR_NULL(qp_flow->dbgfs_dentry)) {
+ usnic_err("Failed to create dbg fs entry for flow %u\n",
+ qp_flow->flow->flow_id);
+ }
+}
+
+void usnic_debugfs_flow_remove(struct usnic_ib_qp_grp_flow *qp_flow)
+{
+ if (!IS_ERR_OR_NULL(qp_flow->dbgfs_dentry))
+ debugfs_remove(qp_flow->dbgfs_dentry);
+}
diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.h b/drivers/infiniband/hw/usnic/usnic_debugfs.h
new file mode 100644
index 000000000000..4087d24a88f6
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_debugfs.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#ifndef USNIC_DEBUGFS_H_
+#define USNIC_DEBUGFS_H_
+
+#include "usnic_ib_qp_grp.h"
+
+void usnic_debugfs_init(void);
+
+void usnic_debugfs_exit(void);
+void usnic_debugfs_flow_add(struct usnic_ib_qp_grp_flow *qp_flow);
+void usnic_debugfs_flow_remove(struct usnic_ib_qp_grp_flow *qp_flow);
+
+#endif /*!USNIC_DEBUGFS_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.c b/drivers/infiniband/hw/usnic/usnic_fwd.c
new file mode 100644
index 000000000000..e3c9bd9d3ba3
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+#include "enic_api.h"
+#include "usnic_common_pkt_hdr.h"
+#include "usnic_fwd.h"
+#include "usnic_log.h"
+
+static int usnic_fwd_devcmd_locked(struct usnic_fwd_dev *ufdev, int vnic_idx,
+ enum vnic_devcmd_cmd cmd, u64 *a0,
+ u64 *a1)
+{
+ int status;
+ struct net_device *netdev = ufdev->netdev;
+
+ lockdep_assert_held(&ufdev->lock);
+
+ status = enic_api_devcmd_proxy_by_index(netdev,
+ vnic_idx,
+ cmd,
+ a0, a1,
+ 1000);
+ if (status) {
+ if (status == ERR_EINVAL && cmd == CMD_DEL_FILTER) {
+ usnic_dbg("Dev %s vnic idx %u cmd %u already deleted",
+ ufdev->name, vnic_idx, cmd);
+ } else {
+ usnic_err("Dev %s vnic idx %u cmd %u failed with status %d\n",
+ ufdev->name, vnic_idx, cmd,
+ status);
+ }
+ } else {
+ usnic_dbg("Dev %s vnic idx %u cmd %u success",
+ ufdev->name, vnic_idx, cmd);
+ }
+
+ return status;
+}
+
+static int usnic_fwd_devcmd(struct usnic_fwd_dev *ufdev, int vnic_idx,
+ enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1)
+{
+ int status;
+
+ spin_lock(&ufdev->lock);
+ status = usnic_fwd_devcmd_locked(ufdev, vnic_idx, cmd, a0, a1);
+ spin_unlock(&ufdev->lock);
+
+ return status;
+}
+
+struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev)
+{
+ struct usnic_fwd_dev *ufdev;
+
+ ufdev = kzalloc(sizeof(*ufdev), GFP_KERNEL);
+ if (!ufdev)
+ return NULL;
+
+ ufdev->pdev = pdev;
+ ufdev->netdev = pci_get_drvdata(pdev);
+ spin_lock_init(&ufdev->lock);
+ strncpy(ufdev->name, netdev_name(ufdev->netdev),
+ sizeof(ufdev->name) - 1);
+
+ return ufdev;
+}
+
+void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev)
+{
+ kfree(ufdev);
+}
+
+void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN])
+{
+ spin_lock(&ufdev->lock);
+ memcpy(&ufdev->mac, mac, sizeof(ufdev->mac));
+ spin_unlock(&ufdev->lock);
+}
+
+int usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr)
+{
+ int status;
+
+ spin_lock(&ufdev->lock);
+ if (ufdev->inaddr == 0) {
+ ufdev->inaddr = inaddr;
+ status = 0;
+ } else {
+ status = -EFAULT;
+ }
+ spin_unlock(&ufdev->lock);
+
+ return status;
+}
+
+void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev)
+{
+ spin_lock(&ufdev->lock);
+ ufdev->inaddr = 0;
+ spin_unlock(&ufdev->lock);
+}
+
+void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev)
+{
+ spin_lock(&ufdev->lock);
+ ufdev->link_up = 1;
+ spin_unlock(&ufdev->lock);
+}
+
+void usnic_fwd_carrier_down(struct usnic_fwd_dev *ufdev)
+{
+ spin_lock(&ufdev->lock);
+ ufdev->link_up = 0;
+ spin_unlock(&ufdev->lock);
+}
+
+void usnic_fwd_set_mtu(struct usnic_fwd_dev *ufdev, unsigned int mtu)
+{
+ spin_lock(&ufdev->lock);
+ ufdev->mtu = mtu;
+ spin_unlock(&ufdev->lock);
+}
+
+static int usnic_fwd_dev_ready_locked(struct usnic_fwd_dev *ufdev)
+{
+ lockdep_assert_held(&ufdev->lock);
+
+ if (!ufdev->link_up)
+ return -EPERM;
+
+ return 0;
+}
+
+static int validate_filter_locked(struct usnic_fwd_dev *ufdev,
+ struct filter *filter)
+{
+
+ lockdep_assert_held(&ufdev->lock);
+
+ if (filter->type == FILTER_IPV4_5TUPLE) {
+ if (!(filter->u.ipv4.flags & FILTER_FIELD_5TUP_DST_AD))
+ return -EACCES;
+ if (!(filter->u.ipv4.flags & FILTER_FIELD_5TUP_DST_PT))
+ return -EBUSY;
+ else if (ufdev->inaddr == 0)
+ return -EINVAL;
+ else if (filter->u.ipv4.dst_port == 0)
+ return -ERANGE;
+ else if (ntohl(ufdev->inaddr) != filter->u.ipv4.dst_addr)
+ return -EFAULT;
+ else
+ return 0;
+ }
+
+ return 0;
+}
+
+static void fill_tlv(struct filter_tlv *tlv, struct filter *filter,
+ struct filter_action *action)
+{
+ tlv->type = CLSF_TLV_FILTER;
+ tlv->length = sizeof(struct filter);
+ *((struct filter *)&tlv->val) = *filter;
+
+ tlv = (struct filter_tlv *)((char *)tlv + sizeof(struct filter_tlv) +
+ sizeof(struct filter));
+ tlv->type = CLSF_TLV_ACTION;
+ tlv->length = sizeof(struct filter_action);
+ *((struct filter_action *)&tlv->val) = *action;
+}
+
+struct usnic_fwd_flow*
+usnic_fwd_alloc_flow(struct usnic_fwd_dev *ufdev, struct filter *filter,
+ struct usnic_filter_action *uaction)
+{
+ struct filter_tlv *tlv;
+ struct pci_dev *pdev;
+ struct usnic_fwd_flow *flow;
+ uint64_t a0, a1;
+ uint64_t tlv_size;
+ dma_addr_t tlv_pa;
+ int status;
+
+ pdev = ufdev->pdev;
+ tlv_size = (2*sizeof(struct filter_tlv) + sizeof(struct filter) +
+ sizeof(struct filter_action));
+
+ flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
+ if (!flow)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = pci_alloc_consistent(pdev, tlv_size, &tlv_pa);
+ if (!tlv) {
+ usnic_err("Failed to allocate memory\n");
+ status = -ENOMEM;
+ goto out_free_flow;
+ }
+
+ fill_tlv(tlv, filter, &uaction->action);
+
+ spin_lock(&ufdev->lock);
+ status = usnic_fwd_dev_ready_locked(ufdev);
+ if (status) {
+ usnic_err("Forwarding dev %s not ready with status %d\n",
+ ufdev->name, status);
+ goto out_free_tlv;
+ }
+
+ status = validate_filter_locked(ufdev, filter);
+ if (status) {
+ usnic_err("Failed to validate filter with status %d\n",
+ status);
+ goto out_free_tlv;
+ }
+
+ /* Issue Devcmd */
+ a0 = tlv_pa;
+ a1 = tlv_size;
+ status = usnic_fwd_devcmd_locked(ufdev, uaction->vnic_idx,
+ CMD_ADD_FILTER, &a0, &a1);
+ if (status) {
+ usnic_err("VF %s Filter add failed with status:%d",
+ ufdev->name, status);
+ status = -EFAULT;
+ goto out_free_tlv;
+ } else {
+ usnic_dbg("VF %s FILTER ID:%llu", ufdev->name, a0);
+ }
+
+ flow->flow_id = (uint32_t) a0;
+ flow->vnic_idx = uaction->vnic_idx;
+ flow->ufdev = ufdev;
+
+out_free_tlv:
+ spin_unlock(&ufdev->lock);
+ pci_free_consistent(pdev, tlv_size, tlv, tlv_pa);
+ if (!status)
+ return flow;
+out_free_flow:
+ kfree(flow);
+ return ERR_PTR(status);
+}
+
+int usnic_fwd_dealloc_flow(struct usnic_fwd_flow *flow)
+{
+ int status;
+ u64 a0, a1;
+
+ a0 = flow->flow_id;
+
+ status = usnic_fwd_devcmd(flow->ufdev, flow->vnic_idx,
+ CMD_DEL_FILTER, &a0, &a1);
+ if (status) {
+ if (status == ERR_EINVAL) {
+ usnic_dbg("Filter %u already deleted for VF Idx %u pf: %s status: %d",
+ flow->flow_id, flow->vnic_idx,
+ flow->ufdev->name, status);
+ } else {
+ usnic_err("PF %s VF Idx %u Filter: %u FILTER DELETE failed with status %d",
+ flow->ufdev->name, flow->vnic_idx,
+ flow->flow_id, status);
+ }
+ status = 0;
+ /*
+ * Log the error and fake success to the caller because if
+ * a flow fails to be deleted in the firmware, it is an
+ * unrecoverable error.
+ */
+ } else {
+ usnic_dbg("PF %s VF Idx %u Filter: %u FILTER DELETED",
+ flow->ufdev->name, flow->vnic_idx,
+ flow->flow_id);
+ }
+
+ kfree(flow);
+ return status;
+}
+
+int usnic_fwd_enable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx)
+{
+ int status;
+ struct net_device *pf_netdev;
+ u64 a0, a1;
+
+ pf_netdev = ufdev->netdev;
+ a0 = qp_idx;
+ a1 = CMD_QP_RQWQ;
+
+ status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_ENABLE,
+ &a0, &a1);
+ if (status) {
+ usnic_err("PF %s VNIC Index %u RQ Index: %u ENABLE Failed with status %d",
+ netdev_name(pf_netdev),
+ vnic_idx,
+ qp_idx,
+ status);
+ } else {
+ usnic_dbg("PF %s VNIC Index %u RQ Index: %u ENABLED",
+ netdev_name(pf_netdev),
+ vnic_idx, qp_idx);
+ }
+
+ return status;
+}
+
+int usnic_fwd_disable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx)
+{
+ int status;
+ u64 a0, a1;
+ struct net_device *pf_netdev;
+
+ pf_netdev = ufdev->netdev;
+ a0 = qp_idx;
+ a1 = CMD_QP_RQWQ;
+
+ status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_DISABLE,
+ &a0, &a1);
+ if (status) {
+ usnic_err("PF %s VNIC Index %u RQ Index: %u DISABLE Failed with status %d",
+ netdev_name(pf_netdev),
+ vnic_idx,
+ qp_idx,
+ status);
+ } else {
+ usnic_dbg("PF %s VNIC Index %u RQ Index: %u DISABLED",
+ netdev_name(pf_netdev),
+ vnic_idx,
+ qp_idx);
+ }
+
+ return status;
+}
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.h b/drivers/infiniband/hw/usnic/usnic_fwd.h
new file mode 100644
index 000000000000..93713a2230b3
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_FWD_H_
+#define USNIC_FWD_H_
+
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/in.h>
+
+#include "usnic_abi.h"
+#include "usnic_common_pkt_hdr.h"
+#include "vnic_devcmd.h"
+
+struct usnic_fwd_dev {
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ spinlock_t lock;
+ /*
+ * The following fields can be read directly off the device.
+ * However, they should be set by a accessor function, except name,
+ * which cannot be changed.
+ */
+ bool link_up;
+ char mac[ETH_ALEN];
+ unsigned int mtu;
+ __be32 inaddr;
+ char name[IFNAMSIZ+1];
+};
+
+struct usnic_fwd_flow {
+ uint32_t flow_id;
+ struct usnic_fwd_dev *ufdev;
+ unsigned int vnic_idx;
+};
+
+struct usnic_filter_action {
+ int vnic_idx;
+ struct filter_action action;
+};
+
+struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev);
+void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev);
+
+void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN]);
+int usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr);
+void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev);
+void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev);
+void usnic_fwd_carrier_down(struct usnic_fwd_dev *ufdev);
+void usnic_fwd_set_mtu(struct usnic_fwd_dev *ufdev, unsigned int mtu);
+
+/*
+ * Allocate a flow on this forwarding device. Whoever calls this function,
+ * must monitor netdev events on ufdev's netdevice. If NETDEV_REBOOT or
+ * NETDEV_DOWN is seen, flow will no longer function and must be
+ * immediately freed by calling usnic_dealloc_flow.
+ */
+struct usnic_fwd_flow*
+usnic_fwd_alloc_flow(struct usnic_fwd_dev *ufdev, struct filter *filter,
+ struct usnic_filter_action *action);
+int usnic_fwd_dealloc_flow(struct usnic_fwd_flow *flow);
+int usnic_fwd_enable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx);
+int usnic_fwd_disable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx);
+
+static inline void usnic_fwd_init_usnic_filter(struct filter *filter,
+ uint32_t usnic_id)
+{
+ filter->type = FILTER_USNIC_ID;
+ filter->u.usnic.ethtype = USNIC_ROCE_ETHERTYPE;
+ filter->u.usnic.flags = FILTER_FIELD_USNIC_ETHTYPE |
+ FILTER_FIELD_USNIC_ID |
+ FILTER_FIELD_USNIC_PROTO;
+ filter->u.usnic.proto_version = (USNIC_ROCE_GRH_VER <<
+ USNIC_ROCE_GRH_VER_SHIFT) |
+ USNIC_PROTO_VER;
+ filter->u.usnic.usnic_id = usnic_id;
+}
+
+static inline void usnic_fwd_init_udp_filter(struct filter *filter,
+ uint32_t daddr, uint16_t dport)
+{
+ filter->type = FILTER_IPV4_5TUPLE;
+ filter->u.ipv4.flags = FILTER_FIELD_5TUP_PROTO;
+ filter->u.ipv4.protocol = PROTO_UDP;
+
+ if (daddr) {
+ filter->u.ipv4.flags |= FILTER_FIELD_5TUP_DST_AD;
+ filter->u.ipv4.dst_addr = daddr;
+ }
+
+ if (dport) {
+ filter->u.ipv4.flags |= FILTER_FIELD_5TUP_DST_PT;
+ filter->u.ipv4.dst_port = dport;
+ }
+}
+
+#endif /* !USNIC_FWD_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib.h b/drivers/infiniband/hw/usnic/usnic_ib.h
new file mode 100644
index 000000000000..e5a9297dd1bd
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_IB_H_
+#define USNIC_IB_H_
+
+#include <linux/iommu.h>
+#include <linux/netdevice.h>
+
+#include <rdma/ib_verbs.h>
+
+
+#include "usnic.h"
+#include "usnic_abi.h"
+#include "usnic_vnic.h"
+
+#define USNIC_IB_PORT_CNT 1
+#define USNIC_IB_NUM_COMP_VECTORS 1
+
+extern unsigned int usnic_ib_share_vf;
+
+struct usnic_ib_ucontext {
+ struct ib_ucontext ibucontext;
+ /* Protected by usnic_ib_dev->usdev_lock */
+ struct list_head qp_grp_list;
+ struct list_head link;
+};
+
+struct usnic_ib_pd {
+ struct ib_pd ibpd;
+ struct usnic_uiom_pd *umem_pd;
+};
+
+struct usnic_ib_mr {
+ struct ib_mr ibmr;
+ struct usnic_uiom_reg *umem;
+};
+
+struct usnic_ib_dev {
+ struct ib_device ib_dev;
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ struct usnic_fwd_dev *ufdev;
+ struct list_head ib_dev_link;
+ struct list_head vf_dev_list;
+ struct list_head ctx_list;
+ struct mutex usdev_lock;
+
+ /* provisioning information */
+ struct kref vf_cnt;
+ unsigned int vf_res_cnt[USNIC_VNIC_RES_TYPE_MAX];
+
+ /* sysfs vars for QPN reporting */
+ struct kobject *qpn_kobj;
+};
+
+struct usnic_ib_vf {
+ struct usnic_ib_dev *pf;
+ spinlock_t lock;
+ struct usnic_vnic *vnic;
+ unsigned int qp_grp_ref_cnt;
+ struct usnic_ib_pd *pd;
+ struct list_head link;
+};
+
+static inline
+struct usnic_ib_dev *to_usdev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct usnic_ib_dev, ib_dev);
+}
+
+static inline
+struct usnic_ib_ucontext *to_ucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct usnic_ib_ucontext, ibucontext);
+}
+
+static inline
+struct usnic_ib_pd *to_upd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct usnic_ib_pd, ibpd);
+}
+
+static inline
+struct usnic_ib_ucontext *to_uucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct usnic_ib_ucontext, ibucontext);
+}
+
+static inline
+struct usnic_ib_mr *to_umr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct usnic_ib_mr, ibmr);
+}
+void usnic_ib_log_vf(struct usnic_ib_vf *vf);
+
+#define UPDATE_PTR_LEFT(N, P, L) \
+do { \
+ L -= (N); \
+ P += (N); \
+} while (0)
+
+#endif /* USNIC_IB_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
new file mode 100644
index 000000000000..fb6d026f92cd
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -0,0 +1,682 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Author: Upinder Malhi <umalhi@cisco.com>
+ * Author: Anant Deepak <anadeepa@cisco.com>
+ * Author: Cesare Cantu' <cantuc@cisco.com>
+ * Author: Jeff Squyres <jsquyres@cisco.com>
+ * Author: Kiran Thirumalai <kithirum@cisco.com>
+ * Author: Xuyang Wang <xuywang@cisco.com>
+ * Author: Reese Faucette <rfaucett@cisco.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/inetdevice.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_addr.h>
+
+#include "usnic_abi.h"
+#include "usnic_common_util.h"
+#include "usnic_ib.h"
+#include "usnic_ib_qp_grp.h"
+#include "usnic_log.h"
+#include "usnic_fwd.h"
+#include "usnic_debugfs.h"
+#include "usnic_ib_verbs.h"
+#include "usnic_transport.h"
+#include "usnic_uiom.h"
+#include "usnic_ib_sysfs.h"
+
+unsigned int usnic_log_lvl = USNIC_LOG_LVL_ERR;
+unsigned int usnic_ib_share_vf = 1;
+
+static const char usnic_version[] =
+ DRV_NAME ": Cisco VIC (USNIC) Verbs Driver v"
+ DRV_VERSION " (" DRV_RELDATE ")\n";
+
+static DEFINE_MUTEX(usnic_ib_ibdev_list_lock);
+static LIST_HEAD(usnic_ib_ibdev_list);
+
+/* Callback dump funcs */
+static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz)
+{
+ struct usnic_ib_vf *vf = obj;
+ return scnprintf(buf, buf_sz, "PF: %s ", vf->pf->ib_dev.name);
+}
+/* End callback dump funcs */
+
+static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz)
+{
+ usnic_vnic_dump(vf->vnic, buf, buf_sz, vf,
+ usnic_ib_dump_vf_hdr,
+ usnic_ib_qp_grp_dump_hdr, usnic_ib_qp_grp_dump_rows);
+}
+
+void usnic_ib_log_vf(struct usnic_ib_vf *vf)
+{
+ char buf[1000];
+ usnic_ib_dump_vf(vf, buf, sizeof(buf));
+ usnic_dbg("%s\n", buf);
+}
+
+/* Start of netdev section */
+static inline const char *usnic_ib_netdev_event_to_string(unsigned long event)
+{
+ const char *event2str[] = {"NETDEV_NONE", "NETDEV_UP", "NETDEV_DOWN",
+ "NETDEV_REBOOT", "NETDEV_CHANGE",
+ "NETDEV_REGISTER", "NETDEV_UNREGISTER", "NETDEV_CHANGEMTU",
+ "NETDEV_CHANGEADDR", "NETDEV_GOING_DOWN", "NETDEV_FEAT_CHANGE",
+ "NETDEV_BONDING_FAILOVER", "NETDEV_PRE_UP",
+ "NETDEV_PRE_TYPE_CHANGE", "NETDEV_POST_TYPE_CHANGE",
+ "NETDEV_POST_INT", "NETDEV_UNREGISTER_FINAL", "NETDEV_RELEASE",
+ "NETDEV_NOTIFY_PEERS", "NETDEV_JOIN"
+ };
+
+ if (event >= ARRAY_SIZE(event2str))
+ return "UNKNOWN_NETDEV_EVENT";
+ else
+ return event2str[event];
+}
+
+static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev)
+{
+ struct usnic_ib_ucontext *ctx;
+ struct usnic_ib_qp_grp *qp_grp;
+ enum ib_qp_state cur_state;
+ int status;
+
+ BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
+
+ list_for_each_entry(ctx, &us_ibdev->ctx_list, link) {
+ list_for_each_entry(qp_grp, &ctx->qp_grp_list, link) {
+ cur_state = qp_grp->state;
+ if (cur_state == IB_QPS_INIT ||
+ cur_state == IB_QPS_RTR ||
+ cur_state == IB_QPS_RTS) {
+ status = usnic_ib_qp_grp_modify(qp_grp,
+ IB_QPS_ERR,
+ NULL);
+ if (status) {
+ usnic_err("Failed to transistion qp grp %u from %s to %s\n",
+ qp_grp->grp_id,
+ usnic_ib_qp_grp_state_to_string
+ (cur_state),
+ usnic_ib_qp_grp_state_to_string
+ (IB_QPS_ERR));
+ }
+ }
+ }
+ }
+}
+
+static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
+ unsigned long event)
+{
+ struct net_device *netdev;
+ struct ib_event ib_event;
+
+ memset(&ib_event, 0, sizeof(ib_event));
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ netdev = us_ibdev->netdev;
+ switch (event) {
+ case NETDEV_REBOOT:
+ usnic_info("PF Reset on %s\n", us_ibdev->ib_dev.name);
+ usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
+ ib_event.event = IB_EVENT_PORT_ERR;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ break;
+ case NETDEV_UP:
+ case NETDEV_DOWN:
+ case NETDEV_CHANGE:
+ if (!us_ibdev->ufdev->link_up &&
+ netif_carrier_ok(netdev)) {
+ usnic_fwd_carrier_up(us_ibdev->ufdev);
+ usnic_info("Link UP on %s\n", us_ibdev->ib_dev.name);
+ ib_event.event = IB_EVENT_PORT_ACTIVE;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ } else if (us_ibdev->ufdev->link_up &&
+ !netif_carrier_ok(netdev)) {
+ usnic_fwd_carrier_down(us_ibdev->ufdev);
+ usnic_info("Link DOWN on %s\n", us_ibdev->ib_dev.name);
+ usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
+ ib_event.event = IB_EVENT_PORT_ERR;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ } else {
+ usnic_dbg("Ignoring %s on %s\n",
+ usnic_ib_netdev_event_to_string(event),
+ us_ibdev->ib_dev.name);
+ }
+ break;
+ case NETDEV_CHANGEADDR:
+ if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
+ sizeof(us_ibdev->ufdev->mac))) {
+ usnic_dbg("Ignoring addr change on %s\n",
+ us_ibdev->ib_dev.name);
+ } else {
+ usnic_info(" %s old mac: %pM new mac: %pM\n",
+ us_ibdev->ib_dev.name,
+ us_ibdev->ufdev->mac,
+ netdev->dev_addr);
+ usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr);
+ usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
+ ib_event.event = IB_EVENT_GID_CHANGE;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ }
+
+ break;
+ case NETDEV_CHANGEMTU:
+ if (us_ibdev->ufdev->mtu != netdev->mtu) {
+ usnic_info("MTU Change on %s old: %u new: %u\n",
+ us_ibdev->ib_dev.name,
+ us_ibdev->ufdev->mtu, netdev->mtu);
+ usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu);
+ usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
+ } else {
+ usnic_dbg("Ignoring MTU change on %s\n",
+ us_ibdev->ib_dev.name);
+ }
+ break;
+ default:
+ usnic_dbg("Ignoring event %s on %s",
+ usnic_ib_netdev_event_to_string(event),
+ us_ibdev->ib_dev.name);
+ }
+ mutex_unlock(&us_ibdev->usdev_lock);
+}
+
+static int usnic_ib_netdevice_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
+{
+ struct usnic_ib_dev *us_ibdev;
+
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ mutex_lock(&usnic_ib_ibdev_list_lock);
+ list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
+ if (us_ibdev->netdev == netdev) {
+ usnic_ib_handle_usdev_event(us_ibdev, event);
+ break;
+ }
+ }
+ mutex_unlock(&usnic_ib_ibdev_list_lock);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block usnic_ib_netdevice_notifier = {
+ .notifier_call = usnic_ib_netdevice_event
+};
+/* End of netdev section */
+
+/* Start of inet section */
+static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ struct ib_event ib_event;
+
+ mutex_lock(&us_ibdev->usdev_lock);
+
+ switch (event) {
+ case NETDEV_DOWN:
+ usnic_info("%s via ip notifiers",
+ usnic_ib_netdev_event_to_string(event));
+ usnic_fwd_del_ipaddr(us_ibdev->ufdev);
+ usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
+ ib_event.event = IB_EVENT_GID_CHANGE;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ break;
+ case NETDEV_UP:
+ usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
+ usnic_info("%s via ip notifiers: ip %pI4",
+ usnic_ib_netdev_event_to_string(event),
+ &us_ibdev->ufdev->inaddr);
+ ib_event.event = IB_EVENT_GID_CHANGE;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ break;
+ default:
+ usnic_info("Ignoring event %s on %s",
+ usnic_ib_netdev_event_to_string(event),
+ us_ibdev->ib_dev.name);
+ }
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ return NOTIFY_DONE;
+}
+
+static int usnic_ib_inetaddr_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
+{
+ struct usnic_ib_dev *us_ibdev;
+ struct in_ifaddr *ifa = ptr;
+ struct net_device *netdev = ifa->ifa_dev->dev;
+
+ mutex_lock(&usnic_ib_ibdev_list_lock);
+ list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
+ if (us_ibdev->netdev == netdev) {
+ usnic_ib_handle_inet_event(us_ibdev, event, ptr);
+ break;
+ }
+ }
+ mutex_unlock(&usnic_ib_ibdev_list_lock);
+
+ return NOTIFY_DONE;
+}
+static struct notifier_block usnic_ib_inetaddr_notifier = {
+ .notifier_call = usnic_ib_inetaddr_event
+};
+/* End of inet section*/
+
+/* Start of PF discovery section */
+static void *usnic_ib_device_add(struct pci_dev *dev)
+{
+ struct usnic_ib_dev *us_ibdev;
+ union ib_gid gid;
+ struct in_ifaddr *in;
+ struct net_device *netdev;
+
+ usnic_dbg("\n");
+ netdev = pci_get_drvdata(dev);
+
+ us_ibdev = (struct usnic_ib_dev *)ib_alloc_device(sizeof(*us_ibdev));
+ if (IS_ERR_OR_NULL(us_ibdev)) {
+ usnic_err("Device %s context alloc failed\n",
+ netdev_name(pci_get_drvdata(dev)));
+ return ERR_PTR(us_ibdev ? PTR_ERR(us_ibdev) : -EFAULT);
+ }
+
+ us_ibdev->ufdev = usnic_fwd_dev_alloc(dev);
+ if (IS_ERR_OR_NULL(us_ibdev->ufdev)) {
+ usnic_err("Failed to alloc ufdev for %s with err %ld\n",
+ pci_name(dev), PTR_ERR(us_ibdev->ufdev));
+ goto err_dealloc;
+ }
+
+ mutex_init(&us_ibdev->usdev_lock);
+ INIT_LIST_HEAD(&us_ibdev->vf_dev_list);
+ INIT_LIST_HEAD(&us_ibdev->ctx_list);
+
+ us_ibdev->pdev = dev;
+ us_ibdev->netdev = pci_get_drvdata(dev);
+ us_ibdev->ib_dev.owner = THIS_MODULE;
+ us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP;
+ us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT;
+ us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS;
+ us_ibdev->ib_dev.dma_device = &dev->dev;
+ us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION;
+ strlcpy(us_ibdev->ib_dev.name, "usnic_%d", IB_DEVICE_NAME_MAX);
+
+ us_ibdev->ib_dev.uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_OPEN_QP);
+
+ us_ibdev->ib_dev.query_device = usnic_ib_query_device;
+ us_ibdev->ib_dev.query_port = usnic_ib_query_port;
+ us_ibdev->ib_dev.query_pkey = usnic_ib_query_pkey;
+ us_ibdev->ib_dev.query_gid = usnic_ib_query_gid;
+ us_ibdev->ib_dev.get_link_layer = usnic_ib_port_link_layer;
+ us_ibdev->ib_dev.alloc_pd = usnic_ib_alloc_pd;
+ us_ibdev->ib_dev.dealloc_pd = usnic_ib_dealloc_pd;
+ us_ibdev->ib_dev.create_qp = usnic_ib_create_qp;
+ us_ibdev->ib_dev.modify_qp = usnic_ib_modify_qp;
+ us_ibdev->ib_dev.query_qp = usnic_ib_query_qp;
+ us_ibdev->ib_dev.destroy_qp = usnic_ib_destroy_qp;
+ us_ibdev->ib_dev.create_cq = usnic_ib_create_cq;
+ us_ibdev->ib_dev.destroy_cq = usnic_ib_destroy_cq;
+ us_ibdev->ib_dev.reg_user_mr = usnic_ib_reg_mr;
+ us_ibdev->ib_dev.dereg_mr = usnic_ib_dereg_mr;
+ us_ibdev->ib_dev.alloc_ucontext = usnic_ib_alloc_ucontext;
+ us_ibdev->ib_dev.dealloc_ucontext = usnic_ib_dealloc_ucontext;
+ us_ibdev->ib_dev.mmap = usnic_ib_mmap;
+ us_ibdev->ib_dev.create_ah = usnic_ib_create_ah;
+ us_ibdev->ib_dev.destroy_ah = usnic_ib_destroy_ah;
+ us_ibdev->ib_dev.post_send = usnic_ib_post_send;
+ us_ibdev->ib_dev.post_recv = usnic_ib_post_recv;
+ us_ibdev->ib_dev.poll_cq = usnic_ib_poll_cq;
+ us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq;
+ us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr;
+
+
+ if (ib_register_device(&us_ibdev->ib_dev, NULL))
+ goto err_fwd_dealloc;
+
+ usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
+ usnic_fwd_set_mac(us_ibdev->ufdev, us_ibdev->netdev->dev_addr);
+ if (netif_carrier_ok(us_ibdev->netdev))
+ usnic_fwd_carrier_up(us_ibdev->ufdev);
+
+ in = ((struct in_device *)(netdev->ip_ptr))->ifa_list;
+ if (in != NULL)
+ usnic_fwd_add_ipaddr(us_ibdev->ufdev, in->ifa_address);
+
+ usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr,
+ us_ibdev->ufdev->inaddr, &gid.raw[0]);
+ memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id,
+ sizeof(gid.global.interface_id));
+ kref_init(&us_ibdev->vf_cnt);
+
+ usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n",
+ us_ibdev->ib_dev.name, netdev_name(us_ibdev->netdev),
+ us_ibdev->ufdev->mac, us_ibdev->ufdev->link_up,
+ us_ibdev->ufdev->mtu);
+ return us_ibdev;
+
+err_fwd_dealloc:
+ usnic_fwd_dev_free(us_ibdev->ufdev);
+err_dealloc:
+ usnic_err("failed -- deallocing device\n");
+ ib_dealloc_device(&us_ibdev->ib_dev);
+ return NULL;
+}
+
+static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev)
+{
+ usnic_info("Unregistering %s\n", us_ibdev->ib_dev.name);
+ usnic_ib_sysfs_unregister_usdev(us_ibdev);
+ usnic_fwd_dev_free(us_ibdev->ufdev);
+ ib_unregister_device(&us_ibdev->ib_dev);
+ ib_dealloc_device(&us_ibdev->ib_dev);
+}
+
+static void usnic_ib_undiscover_pf(struct kref *kref)
+{
+ struct usnic_ib_dev *us_ibdev, *tmp;
+ struct pci_dev *dev;
+ bool found = false;
+
+ dev = container_of(kref, struct usnic_ib_dev, vf_cnt)->pdev;
+ mutex_lock(&usnic_ib_ibdev_list_lock);
+ list_for_each_entry_safe(us_ibdev, tmp,
+ &usnic_ib_ibdev_list, ib_dev_link) {
+ if (us_ibdev->pdev == dev) {
+ list_del(&us_ibdev->ib_dev_link);
+ usnic_ib_device_remove(us_ibdev);
+ found = true;
+ break;
+ }
+ }
+
+ WARN(!found, "Failed to remove PF %s\n", pci_name(dev));
+
+ mutex_unlock(&usnic_ib_ibdev_list_lock);
+}
+
+static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
+{
+ struct usnic_ib_dev *us_ibdev;
+ struct pci_dev *parent_pci, *vf_pci;
+ int err;
+
+ vf_pci = usnic_vnic_get_pdev(vnic);
+ parent_pci = pci_physfn(vf_pci);
+
+ BUG_ON(!parent_pci);
+
+ mutex_lock(&usnic_ib_ibdev_list_lock);
+ list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
+ if (us_ibdev->pdev == parent_pci) {
+ kref_get(&us_ibdev->vf_cnt);
+ goto out;
+ }
+ }
+
+ us_ibdev = usnic_ib_device_add(parent_pci);
+ if (IS_ERR_OR_NULL(us_ibdev)) {
+ us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT);
+ goto out;
+ }
+
+ err = usnic_ib_sysfs_register_usdev(us_ibdev);
+ if (err) {
+ usnic_ib_device_remove(us_ibdev);
+ us_ibdev = ERR_PTR(err);
+ goto out;
+ }
+
+ list_add(&us_ibdev->ib_dev_link, &usnic_ib_ibdev_list);
+out:
+ mutex_unlock(&usnic_ib_ibdev_list_lock);
+ return us_ibdev;
+}
+/* End of PF discovery section */
+
+/* Start of PCI section */
+
+static DEFINE_PCI_DEVICE_TABLE(usnic_ib_pci_ids) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)},
+ {0,}
+};
+
+static int usnic_ib_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int err;
+ struct usnic_ib_dev *pf;
+ struct usnic_ib_vf *vf;
+ enum usnic_vnic_res_type res_type;
+
+ vf = kzalloc(sizeof(*vf), GFP_KERNEL);
+ if (!vf)
+ return -ENOMEM;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ usnic_err("Failed to enable %s with err %d\n",
+ pci_name(pdev), err);
+ goto out_clean_vf;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ usnic_err("Failed to request region for %s with err %d\n",
+ pci_name(pdev), err);
+ goto out_disable_device;
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, vf);
+
+ vf->vnic = usnic_vnic_alloc(pdev);
+ if (IS_ERR_OR_NULL(vf->vnic)) {
+ err = vf->vnic ? PTR_ERR(vf->vnic) : -ENOMEM;
+ usnic_err("Failed to alloc vnic for %s with err %d\n",
+ pci_name(pdev), err);
+ goto out_release_regions;
+ }
+
+ pf = usnic_ib_discover_pf(vf->vnic);
+ if (IS_ERR_OR_NULL(pf)) {
+ usnic_err("Failed to discover pf of vnic %s with err%ld\n",
+ pci_name(pdev), PTR_ERR(pf));
+ err = pf ? PTR_ERR(pf) : -EFAULT;
+ goto out_clean_vnic;
+ }
+
+ vf->pf = pf;
+ spin_lock_init(&vf->lock);
+ mutex_lock(&pf->usdev_lock);
+ list_add_tail(&vf->link, &pf->vf_dev_list);
+ /*
+ * Save max settings (will be same for each VF, easier to re-write than
+ * to say "if (!set) { set_values(); set=1; }
+ */
+ for (res_type = USNIC_VNIC_RES_TYPE_EOL+1;
+ res_type < USNIC_VNIC_RES_TYPE_MAX;
+ res_type++) {
+ pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic,
+ res_type);
+ }
+
+ mutex_unlock(&pf->usdev_lock);
+
+ usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev),
+ pf->ib_dev.name);
+ usnic_ib_log_vf(vf);
+ return 0;
+
+out_clean_vnic:
+ usnic_vnic_free(vf->vnic);
+out_release_regions:
+ pci_set_drvdata(pdev, NULL);
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+out_disable_device:
+ pci_disable_device(pdev);
+out_clean_vf:
+ kfree(vf);
+ return err;
+}
+
+static void usnic_ib_pci_remove(struct pci_dev *pdev)
+{
+ struct usnic_ib_vf *vf = pci_get_drvdata(pdev);
+ struct usnic_ib_dev *pf = vf->pf;
+
+ mutex_lock(&pf->usdev_lock);
+ list_del(&vf->link);
+ mutex_unlock(&pf->usdev_lock);
+
+ kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf);
+ usnic_vnic_free(vf->vnic);
+ pci_set_drvdata(pdev, NULL);
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ kfree(vf);
+
+ usnic_info("Removed VF %s\n", pci_name(pdev));
+}
+
+/* PCI driver entry points */
+static struct pci_driver usnic_ib_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = usnic_ib_pci_ids,
+ .probe = usnic_ib_pci_probe,
+ .remove = usnic_ib_pci_remove,
+};
+/* End of PCI section */
+
+/* Start of module section */
+static int __init usnic_ib_init(void)
+{
+ int err;
+
+ printk_once(KERN_INFO "%s", usnic_version);
+
+ err = usnic_uiom_init(DRV_NAME);
+ if (err) {
+ usnic_err("Unable to initalize umem with err %d\n", err);
+ return err;
+ }
+
+ if (pci_register_driver(&usnic_ib_pci_driver)) {
+ usnic_err("Unable to register with PCI\n");
+ goto out_umem_fini;
+ }
+
+ err = register_netdevice_notifier(&usnic_ib_netdevice_notifier);
+ if (err) {
+ usnic_err("Failed to register netdev notifier\n");
+ goto out_pci_unreg;
+ }
+
+ err = register_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
+ if (err) {
+ usnic_err("Failed to register inet addr notifier\n");
+ goto out_unreg_netdev_notifier;
+ }
+
+ err = usnic_transport_init();
+ if (err) {
+ usnic_err("Failed to initialize transport\n");
+ goto out_unreg_inetaddr_notifier;
+ }
+
+ usnic_debugfs_init();
+
+ return 0;
+
+out_unreg_inetaddr_notifier:
+ unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
+out_unreg_netdev_notifier:
+ unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
+out_pci_unreg:
+ pci_unregister_driver(&usnic_ib_pci_driver);
+out_umem_fini:
+ usnic_uiom_fini();
+
+ return err;
+}
+
+static void __exit usnic_ib_destroy(void)
+{
+ usnic_dbg("\n");
+ usnic_debugfs_exit();
+ usnic_transport_fini();
+ unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
+ unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
+ pci_unregister_driver(&usnic_ib_pci_driver);
+ usnic_uiom_fini();
+}
+
+MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver");
+MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR);
+module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3");
+MODULE_PARM_DESC(usnic_ib_share_vf, "Off=0, On=1 VF sharing amongst QPs");
+MODULE_DEVICE_TABLE(pci, usnic_ib_pci_ids);
+
+module_init(usnic_ib_init);
+module_exit(usnic_ib_destroy);
+/* End of module section */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
new file mode 100644
index 000000000000..f8dfd76be89f
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -0,0 +1,761 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+#include "usnic_log.h"
+#include "usnic_vnic.h"
+#include "usnic_fwd.h"
+#include "usnic_uiom.h"
+#include "usnic_debugfs.h"
+#include "usnic_ib_qp_grp.h"
+#include "usnic_ib_sysfs.h"
+#include "usnic_transport.h"
+
+#define DFLT_RQ_IDX 0
+
+const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
+{
+ switch (state) {
+ case IB_QPS_RESET:
+ return "Rst";
+ case IB_QPS_INIT:
+ return "Init";
+ case IB_QPS_RTR:
+ return "RTR";
+ case IB_QPS_RTS:
+ return "RTS";
+ case IB_QPS_SQD:
+ return "SQD";
+ case IB_QPS_SQE:
+ return "SQE";
+ case IB_QPS_ERR:
+ return "ERR";
+ default:
+ return "UNKOWN STATE";
+
+ }
+}
+
+int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz)
+{
+ return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
+}
+
+int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz)
+{
+ struct usnic_ib_qp_grp *qp_grp = obj;
+ struct usnic_ib_qp_grp_flow *default_flow;
+ if (obj) {
+ default_flow = list_first_entry(&qp_grp->flows_lst,
+ struct usnic_ib_qp_grp_flow, link);
+ return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d",
+ qp_grp->ibqp.qp_num,
+ usnic_ib_qp_grp_state_to_string(
+ qp_grp->state),
+ qp_grp->owner_pid,
+ usnic_vnic_get_index(qp_grp->vf->vnic),
+ default_flow->flow->flow_id);
+ } else {
+ return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
+ }
+}
+
+static struct usnic_vnic_res_chunk *
+get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp)
+{
+ lockdep_assert_held(&qp_grp->lock);
+ /*
+ * The QP res chunk, used to derive qp indices,
+ * are just indices of the RQs
+ */
+ return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
+}
+
+static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
+{
+
+ int status;
+ int i, vnic_idx;
+ struct usnic_vnic_res_chunk *res_chunk;
+ struct usnic_vnic_res *res;
+
+ lockdep_assert_held(&qp_grp->lock);
+
+ vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
+
+ res_chunk = get_qp_res_chunk(qp_grp);
+ if (IS_ERR_OR_NULL(res_chunk)) {
+ usnic_err("Unable to get qp res with err %ld\n",
+ PTR_ERR(res_chunk));
+ return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
+ }
+
+ for (i = 0; i < res_chunk->cnt; i++) {
+ res = res_chunk->res[i];
+ status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx,
+ res->vnic_idx);
+ if (status) {
+ usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
+ res->vnic_idx, qp_grp->ufdev->name,
+ vnic_idx, status);
+ goto out_err;
+ }
+ }
+
+ return 0;
+
+out_err:
+ for (i--; i >= 0; i--) {
+ res = res_chunk->res[i];
+ usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
+ res->vnic_idx);
+ }
+
+ return status;
+}
+
+static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
+{
+ int i, vnic_idx;
+ struct usnic_vnic_res_chunk *res_chunk;
+ struct usnic_vnic_res *res;
+ int status = 0;
+
+ lockdep_assert_held(&qp_grp->lock);
+ vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
+
+ res_chunk = get_qp_res_chunk(qp_grp);
+ if (IS_ERR_OR_NULL(res_chunk)) {
+ usnic_err("Unable to get qp res with err %ld\n",
+ PTR_ERR(res_chunk));
+ return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
+ }
+
+ for (i = 0; i < res_chunk->cnt; i++) {
+ res = res_chunk->res[i];
+ status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
+ res->vnic_idx);
+ if (status) {
+ usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
+ res->vnic_idx,
+ qp_grp->ufdev->name,
+ vnic_idx, status);
+ }
+ }
+
+ return status;
+
+}
+
+static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
+ struct usnic_filter_action *uaction)
+{
+ struct usnic_vnic_res_chunk *res_chunk;
+
+ res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
+ if (IS_ERR_OR_NULL(res_chunk)) {
+ usnic_err("Unable to get %s with err %ld\n",
+ usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
+ PTR_ERR(res_chunk));
+ return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
+ }
+
+ uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
+ uaction->action.type = FILTER_ACTION_RQ_STEERING;
+ uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx;
+
+ return 0;
+}
+
+static struct usnic_ib_qp_grp_flow*
+create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
+ struct usnic_transport_spec *trans_spec)
+{
+ uint16_t port_num;
+ int err;
+ struct filter filter;
+ struct usnic_filter_action uaction;
+ struct usnic_ib_qp_grp_flow *qp_flow;
+ struct usnic_fwd_flow *flow;
+ enum usnic_transport_type trans_type;
+
+ trans_type = trans_spec->trans_type;
+ port_num = trans_spec->usnic_roce.port_num;
+
+ /* Reserve Port */
+ port_num = usnic_transport_rsrv_port(trans_type, port_num);
+ if (port_num == 0)
+ return ERR_PTR(-EINVAL);
+
+ /* Create Flow */
+ usnic_fwd_init_usnic_filter(&filter, port_num);
+ err = init_filter_action(qp_grp, &uaction);
+ if (err)
+ goto out_unreserve_port;
+
+ flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
+ if (IS_ERR_OR_NULL(flow)) {
+ usnic_err("Unable to alloc flow failed with err %ld\n",
+ PTR_ERR(flow));
+ err = flow ? PTR_ERR(flow) : -EFAULT;
+ goto out_unreserve_port;
+ }
+
+ /* Create Flow Handle */
+ qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
+ if (IS_ERR_OR_NULL(qp_flow)) {
+ err = qp_flow ? PTR_ERR(qp_flow) : -ENOMEM;
+ goto out_dealloc_flow;
+ }
+ qp_flow->flow = flow;
+ qp_flow->trans_type = trans_type;
+ qp_flow->usnic_roce.port_num = port_num;
+ qp_flow->qp_grp = qp_grp;
+ return qp_flow;
+
+out_dealloc_flow:
+ usnic_fwd_dealloc_flow(flow);
+out_unreserve_port:
+ usnic_transport_unrsrv_port(trans_type, port_num);
+ return ERR_PTR(err);
+}
+
+static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow)
+{
+ usnic_fwd_dealloc_flow(qp_flow->flow);
+ usnic_transport_unrsrv_port(qp_flow->trans_type,
+ qp_flow->usnic_roce.port_num);
+ kfree(qp_flow);
+}
+
+static struct usnic_ib_qp_grp_flow*
+create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
+ struct usnic_transport_spec *trans_spec)
+{
+ struct socket *sock;
+ int sock_fd;
+ int err;
+ struct filter filter;
+ struct usnic_filter_action uaction;
+ struct usnic_ib_qp_grp_flow *qp_flow;
+ struct usnic_fwd_flow *flow;
+ enum usnic_transport_type trans_type;
+ uint32_t addr;
+ uint16_t port_num;
+ int proto;
+
+ trans_type = trans_spec->trans_type;
+ sock_fd = trans_spec->udp.sock_fd;
+
+ /* Get and check socket */
+ sock = usnic_transport_get_socket(sock_fd);
+ if (IS_ERR_OR_NULL(sock))
+ return ERR_CAST(sock);
+
+ err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num);
+ if (err)
+ goto out_put_sock;
+
+ if (proto != IPPROTO_UDP) {
+ usnic_err("Protocol for fd %d is not UDP", sock_fd);
+ err = -EPERM;
+ goto out_put_sock;
+ }
+
+ /* Create flow */
+ usnic_fwd_init_udp_filter(&filter, addr, port_num);
+ err = init_filter_action(qp_grp, &uaction);
+ if (err)
+ goto out_put_sock;
+
+ flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
+ if (IS_ERR_OR_NULL(flow)) {
+ usnic_err("Unable to alloc flow failed with err %ld\n",
+ PTR_ERR(flow));
+ err = flow ? PTR_ERR(flow) : -EFAULT;
+ goto out_put_sock;
+ }
+
+ /* Create qp_flow */
+ qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
+ if (IS_ERR_OR_NULL(qp_flow)) {
+ err = qp_flow ? PTR_ERR(qp_flow) : -ENOMEM;
+ goto out_dealloc_flow;
+ }
+ qp_flow->flow = flow;
+ qp_flow->trans_type = trans_type;
+ qp_flow->udp.sock = sock;
+ qp_flow->qp_grp = qp_grp;
+ return qp_flow;
+
+out_dealloc_flow:
+ usnic_fwd_dealloc_flow(flow);
+out_put_sock:
+ usnic_transport_put_socket(sock);
+ return ERR_PTR(err);
+}
+
+static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow)
+{
+ usnic_fwd_dealloc_flow(qp_flow->flow);
+ usnic_transport_put_socket(qp_flow->udp.sock);
+ kfree(qp_flow);
+}
+
+static struct usnic_ib_qp_grp_flow*
+create_and_add_flow(struct usnic_ib_qp_grp *qp_grp,
+ struct usnic_transport_spec *trans_spec)
+{
+ struct usnic_ib_qp_grp_flow *qp_flow;
+ enum usnic_transport_type trans_type;
+
+ trans_type = trans_spec->trans_type;
+ switch (trans_type) {
+ case USNIC_TRANSPORT_ROCE_CUSTOM:
+ qp_flow = create_roce_custom_flow(qp_grp, trans_spec);
+ break;
+ case USNIC_TRANSPORT_IPV4_UDP:
+ qp_flow = create_udp_flow(qp_grp, trans_spec);
+ break;
+ default:
+ usnic_err("Unsupported transport %u\n",
+ trans_spec->trans_type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!IS_ERR_OR_NULL(qp_flow)) {
+ list_add_tail(&qp_flow->link, &qp_grp->flows_lst);
+ usnic_debugfs_flow_add(qp_flow);
+ }
+
+
+ return qp_flow;
+}
+
+static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow)
+{
+ usnic_debugfs_flow_remove(qp_flow);
+ list_del(&qp_flow->link);
+
+ switch (qp_flow->trans_type) {
+ case USNIC_TRANSPORT_ROCE_CUSTOM:
+ release_roce_custom_flow(qp_flow);
+ break;
+ case USNIC_TRANSPORT_IPV4_UDP:
+ release_udp_flow(qp_flow);
+ break;
+ default:
+ WARN(1, "Unsupported transport %u\n",
+ qp_flow->trans_type);
+ break;
+ }
+}
+
+static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp)
+{
+ struct usnic_ib_qp_grp_flow *qp_flow, *tmp;
+ list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link)
+ release_and_remove_flow(qp_flow);
+}
+
+int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
+ enum ib_qp_state new_state,
+ void *data)
+{
+ int status = 0;
+ int vnic_idx;
+ struct ib_event ib_event;
+ enum ib_qp_state old_state;
+ struct usnic_transport_spec *trans_spec;
+ struct usnic_ib_qp_grp_flow *qp_flow;
+
+ old_state = qp_grp->state;
+ vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
+ trans_spec = (struct usnic_transport_spec *) data;
+
+ spin_lock(&qp_grp->lock);
+ switch (new_state) {
+ case IB_QPS_RESET:
+ switch (old_state) {
+ case IB_QPS_RESET:
+ /* NO-OP */
+ break;
+ case IB_QPS_INIT:
+ release_and_remove_all_flows(qp_grp);
+ status = 0;
+ break;
+ case IB_QPS_RTR:
+ case IB_QPS_RTS:
+ case IB_QPS_ERR:
+ status = disable_qp_grp(qp_grp);
+ release_and_remove_all_flows(qp_grp);
+ break;
+ default:
+ status = -EINVAL;
+ }
+ break;
+ case IB_QPS_INIT:
+ switch (old_state) {
+ case IB_QPS_RESET:
+ if (trans_spec) {
+ qp_flow = create_and_add_flow(qp_grp,
+ trans_spec);
+ if (IS_ERR_OR_NULL(qp_flow)) {
+ status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
+ break;
+ }
+ } else {
+ /*
+ * Optional to specify filters.
+ */
+ status = 0;
+ }
+ break;
+ case IB_QPS_INIT:
+ if (trans_spec) {
+ qp_flow = create_and_add_flow(qp_grp,
+ trans_spec);
+ if (IS_ERR_OR_NULL(qp_flow)) {
+ status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
+ break;
+ }
+ } else {
+ /*
+ * Doesn't make sense to go into INIT state
+ * from INIT state w/o adding filters.
+ */
+ status = -EINVAL;
+ }
+ break;
+ case IB_QPS_RTR:
+ status = disable_qp_grp(qp_grp);
+ break;
+ case IB_QPS_RTS:
+ status = disable_qp_grp(qp_grp);
+ break;
+ default:
+ status = -EINVAL;
+ }
+ break;
+ case IB_QPS_RTR:
+ switch (old_state) {
+ case IB_QPS_INIT:
+ status = enable_qp_grp(qp_grp);
+ break;
+ default:
+ status = -EINVAL;
+ }
+ break;
+ case IB_QPS_RTS:
+ switch (old_state) {
+ case IB_QPS_RTR:
+ /* NO-OP FOR NOW */
+ break;
+ default:
+ status = -EINVAL;
+ }
+ break;
+ case IB_QPS_ERR:
+ ib_event.device = &qp_grp->vf->pf->ib_dev;
+ ib_event.element.qp = &qp_grp->ibqp;
+ ib_event.event = IB_EVENT_QP_FATAL;
+
+ switch (old_state) {
+ case IB_QPS_RESET:
+ qp_grp->ibqp.event_handler(&ib_event,
+ qp_grp->ibqp.qp_context);
+ break;
+ case IB_QPS_INIT:
+ release_and_remove_all_flows(qp_grp);
+ qp_grp->ibqp.event_handler(&ib_event,
+ qp_grp->ibqp.qp_context);
+ break;
+ case IB_QPS_RTR:
+ case IB_QPS_RTS:
+ status = disable_qp_grp(qp_grp);
+ release_and_remove_all_flows(qp_grp);
+ qp_grp->ibqp.event_handler(&ib_event,
+ qp_grp->ibqp.qp_context);
+ break;
+ default:
+ status = -EINVAL;
+ }
+ break;
+ default:
+ status = -EINVAL;
+ }
+ spin_unlock(&qp_grp->lock);
+
+ if (!status) {
+ qp_grp->state = new_state;
+ usnic_info("Transistioned %u from %s to %s",
+ qp_grp->grp_id,
+ usnic_ib_qp_grp_state_to_string(old_state),
+ usnic_ib_qp_grp_state_to_string(new_state));
+ } else {
+ usnic_err("Failed to transistion %u from %s to %s",
+ qp_grp->grp_id,
+ usnic_ib_qp_grp_state_to_string(old_state),
+ usnic_ib_qp_grp_state_to_string(new_state));
+ }
+
+ return status;
+}
+
+static struct usnic_vnic_res_chunk**
+alloc_res_chunk_list(struct usnic_vnic *vnic,
+ struct usnic_vnic_res_spec *res_spec, void *owner_obj)
+{
+ enum usnic_vnic_res_type res_type;
+ struct usnic_vnic_res_chunk **res_chunk_list;
+ int err, i, res_cnt, res_lst_sz;
+
+ for (res_lst_sz = 0;
+ res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL;
+ res_lst_sz++) {
+ /* Do Nothing */
+ }
+
+ res_chunk_list = kzalloc(sizeof(*res_chunk_list)*(res_lst_sz+1),
+ GFP_ATOMIC);
+ if (!res_chunk_list)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL;
+ i++) {
+ res_type = res_spec->resources[i].type;
+ res_cnt = res_spec->resources[i].cnt;
+
+ res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type,
+ res_cnt, owner_obj);
+ if (IS_ERR_OR_NULL(res_chunk_list[i])) {
+ err = res_chunk_list[i] ?
+ PTR_ERR(res_chunk_list[i]) : -ENOMEM;
+ usnic_err("Failed to get %s from %s with err %d\n",
+ usnic_vnic_res_type_to_str(res_type),
+ usnic_vnic_pci_name(vnic),
+ err);
+ goto out_free_res;
+ }
+ }
+
+ return res_chunk_list;
+
+out_free_res:
+ for (i--; i > 0; i--)
+ usnic_vnic_put_resources(res_chunk_list[i]);
+ kfree(res_chunk_list);
+ return ERR_PTR(err);
+}
+
+static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list)
+{
+ int i;
+ for (i = 0; res_chunk_list[i]; i++)
+ usnic_vnic_put_resources(res_chunk_list[i]);
+ kfree(res_chunk_list);
+}
+
+static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf,
+ struct usnic_ib_pd *pd,
+ struct usnic_ib_qp_grp *qp_grp)
+{
+ int err;
+ struct pci_dev *pdev;
+
+ lockdep_assert_held(&vf->lock);
+
+ pdev = usnic_vnic_get_pdev(vf->vnic);
+ if (vf->qp_grp_ref_cnt == 0) {
+ err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev);
+ if (err) {
+ usnic_err("Failed to attach %s to domain\n",
+ pci_name(pdev));
+ return err;
+ }
+ vf->pd = pd;
+ }
+ vf->qp_grp_ref_cnt++;
+
+ WARN_ON(vf->pd != pd);
+ qp_grp->vf = vf;
+
+ return 0;
+}
+
+static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp)
+{
+ struct pci_dev *pdev;
+ struct usnic_ib_pd *pd;
+
+ lockdep_assert_held(&qp_grp->vf->lock);
+
+ pd = qp_grp->vf->pd;
+ pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
+ if (--qp_grp->vf->qp_grp_ref_cnt == 0) {
+ qp_grp->vf->pd = NULL;
+ usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev);
+ }
+ qp_grp->vf = NULL;
+}
+
+static void log_spec(struct usnic_vnic_res_spec *res_spec)
+{
+ char buf[512];
+ usnic_vnic_spec_dump(buf, sizeof(buf), res_spec);
+ usnic_dbg("%s\n", buf);
+}
+
+static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
+ uint32_t *id)
+{
+ enum usnic_transport_type trans_type = qp_flow->trans_type;
+ int err;
+ uint16_t port_num = 0;
+
+ switch (trans_type) {
+ case USNIC_TRANSPORT_ROCE_CUSTOM:
+ *id = qp_flow->usnic_roce.port_num;
+ break;
+ case USNIC_TRANSPORT_IPV4_UDP:
+ err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
+ NULL, NULL,
+ &port_num);
+ if (err)
+ return err;
+ /*
+ * Copy port_num to stack first and then to *id,
+ * so that the short to int cast works for little
+ * and big endian systems.
+ */
+ *id = port_num;
+ break;
+ default:
+ usnic_err("Unsupported transport %u\n", trans_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct usnic_ib_qp_grp *
+usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
+ struct usnic_ib_pd *pd,
+ struct usnic_vnic_res_spec *res_spec,
+ struct usnic_transport_spec *transport_spec)
+{
+ struct usnic_ib_qp_grp *qp_grp;
+ int err;
+ enum usnic_transport_type transport = transport_spec->trans_type;
+ struct usnic_ib_qp_grp_flow *qp_flow;
+
+ lockdep_assert_held(&vf->lock);
+
+ err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
+ res_spec);
+ if (err) {
+ usnic_err("Spec does not meet miniumum req for transport %d\n",
+ transport);
+ log_spec(res_spec);
+ return ERR_PTR(err);
+ }
+
+ qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
+ if (!qp_grp) {
+ usnic_err("Unable to alloc qp_grp - Out of memory\n");
+ return NULL;
+ }
+
+ qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
+ qp_grp);
+ if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
+ err = qp_grp->res_chunk_list ?
+ PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
+ usnic_err("Unable to alloc res for %d with err %d\n",
+ qp_grp->grp_id, err);
+ goto out_free_qp_grp;
+ }
+
+ err = qp_grp_and_vf_bind(vf, pd, qp_grp);
+ if (err)
+ goto out_free_res;
+
+ INIT_LIST_HEAD(&qp_grp->flows_lst);
+ spin_lock_init(&qp_grp->lock);
+ qp_grp->ufdev = ufdev;
+ qp_grp->state = IB_QPS_RESET;
+ qp_grp->owner_pid = current->pid;
+
+ qp_flow = create_and_add_flow(qp_grp, transport_spec);
+ if (IS_ERR_OR_NULL(qp_flow)) {
+ usnic_err("Unable to create and add flow with err %ld\n",
+ PTR_ERR(qp_flow));
+ err = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
+ goto out_qp_grp_vf_unbind;
+ }
+
+ err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id);
+ if (err)
+ goto out_release_flow;
+ qp_grp->ibqp.qp_num = qp_grp->grp_id;
+
+ usnic_ib_sysfs_qpn_add(qp_grp);
+
+ return qp_grp;
+
+out_release_flow:
+ release_and_remove_flow(qp_flow);
+out_qp_grp_vf_unbind:
+ qp_grp_and_vf_unbind(qp_grp);
+out_free_res:
+ free_qp_grp_res(qp_grp->res_chunk_list);
+out_free_qp_grp:
+ kfree(qp_grp);
+
+ return ERR_PTR(err);
+}
+
+void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
+{
+
+ WARN_ON(qp_grp->state != IB_QPS_RESET);
+ lockdep_assert_held(&qp_grp->vf->lock);
+
+ release_and_remove_all_flows(qp_grp);
+ usnic_ib_sysfs_qpn_remove(qp_grp);
+ qp_grp_and_vf_unbind(qp_grp);
+ free_qp_grp_res(qp_grp->res_chunk_list);
+ kfree(qp_grp);
+}
+
+struct usnic_vnic_res_chunk*
+usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
+ enum usnic_vnic_res_type res_type)
+{
+ int i;
+
+ for (i = 0; qp_grp->res_chunk_list[i]; i++) {
+ if (qp_grp->res_chunk_list[i]->type == res_type)
+ return qp_grp->res_chunk_list[i];
+ }
+
+ return ERR_PTR(-EINVAL);
+}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
new file mode 100644
index 000000000000..b0aafe8db0c3
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_IB_QP_GRP_H_
+#define USNIC_IB_QP_GRP_H_
+
+#include <linux/debugfs.h>
+#include <rdma/ib_verbs.h>
+
+#include "usnic_ib.h"
+#include "usnic_abi.h"
+#include "usnic_fwd.h"
+#include "usnic_vnic.h"
+
+/*
+ * The qp group struct represents all the hw resources needed to present a ib_qp
+ */
+struct usnic_ib_qp_grp {
+ struct ib_qp ibqp;
+ enum ib_qp_state state;
+ int grp_id;
+
+ struct usnic_fwd_dev *ufdev;
+ struct usnic_ib_ucontext *ctx;
+ struct list_head flows_lst;
+
+ struct usnic_vnic_res_chunk **res_chunk_list;
+
+ pid_t owner_pid;
+ struct usnic_ib_vf *vf;
+ struct list_head link;
+
+ spinlock_t lock;
+
+ struct kobject kobj;
+};
+
+struct usnic_ib_qp_grp_flow {
+ struct usnic_fwd_flow *flow;
+ enum usnic_transport_type trans_type;
+ union {
+ struct {
+ uint16_t port_num;
+ } usnic_roce;
+ struct {
+ struct socket *sock;
+ } udp;
+ };
+ struct usnic_ib_qp_grp *qp_grp;
+ struct list_head link;
+
+ /* Debug FS */
+ struct dentry *dbgfs_dentry;
+ char dentry_name[32];
+};
+
+static const struct
+usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX] = {
+ { /*USNIC_TRANSPORT_UNKNOWN*/
+ .resources = {
+ {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
+ },
+ },
+ { /*USNIC_TRANSPORT_ROCE_CUSTOM*/
+ .resources = {
+ {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
+ },
+ },
+ { /*USNIC_TRANSPORT_IPV4_UDP*/
+ .resources = {
+ {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
+ },
+ },
+};
+
+const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state);
+int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz);
+int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz);
+struct usnic_ib_qp_grp *
+usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
+ struct usnic_ib_pd *pd,
+ struct usnic_vnic_res_spec *res_spec,
+ struct usnic_transport_spec *trans_spec);
+void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp);
+int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
+ enum ib_qp_state new_state,
+ void *data);
+struct usnic_vnic_res_chunk
+*usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
+ enum usnic_vnic_res_type type);
+static inline
+struct usnic_ib_qp_grp *to_uqp_grp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct usnic_ib_qp_grp, ibqp);
+}
+#endif /* USNIC_IB_QP_GRP_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
new file mode 100644
index 000000000000..27dc67c1689f
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_addr.h>
+
+#include "usnic_common_util.h"
+#include "usnic_ib.h"
+#include "usnic_ib_qp_grp.h"
+#include "usnic_vnic.h"
+#include "usnic_ib_verbs.h"
+#include "usnic_log.h"
+
+static ssize_t usnic_ib_show_fw_ver(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct usnic_ib_dev *us_ibdev =
+ container_of(device, struct usnic_ib_dev, ib_dev.dev);
+ struct ethtool_drvinfo info;
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", info.fw_version);
+}
+
+static ssize_t usnic_ib_show_board(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct usnic_ib_dev *us_ibdev =
+ container_of(device, struct usnic_ib_dev, ib_dev.dev);
+ unsigned short subsystem_device_id;
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ subsystem_device_id = us_ibdev->pdev->subsystem_device;
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%hu\n", subsystem_device_id);
+}
+
+/*
+ * Report the configuration for this PF
+ */
+static ssize_t
+usnic_ib_show_config(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct usnic_ib_dev *us_ibdev;
+ char *ptr;
+ unsigned left;
+ unsigned n;
+ enum usnic_vnic_res_type res_type;
+
+ us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
+
+ /* Buffer space limit is 1 page */
+ ptr = buf;
+ left = PAGE_SIZE;
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ if (atomic_read(&us_ibdev->vf_cnt.refcount) > 0) {
+ char *busname;
+
+ /*
+ * bus name seems to come with annoying prefix.
+ * Remove it if it is predictable
+ */
+ busname = us_ibdev->pdev->bus->name;
+ if (strncmp(busname, "PCI Bus ", 8) == 0)
+ busname += 8;
+
+ n = scnprintf(ptr, left,
+ "%s: %s:%d.%d, %s, %pM, %u VFs\n Per VF:",
+ us_ibdev->ib_dev.name,
+ busname,
+ PCI_SLOT(us_ibdev->pdev->devfn),
+ PCI_FUNC(us_ibdev->pdev->devfn),
+ netdev_name(us_ibdev->netdev),
+ us_ibdev->ufdev->mac,
+ atomic_read(&us_ibdev->vf_cnt.refcount));
+ UPDATE_PTR_LEFT(n, ptr, left);
+
+ for (res_type = USNIC_VNIC_RES_TYPE_EOL;
+ res_type < USNIC_VNIC_RES_TYPE_MAX;
+ res_type++) {
+ if (us_ibdev->vf_res_cnt[res_type] == 0)
+ continue;
+ n = scnprintf(ptr, left, " %d %s%s",
+ us_ibdev->vf_res_cnt[res_type],
+ usnic_vnic_res_type_to_str(res_type),
+ (res_type < (USNIC_VNIC_RES_TYPE_MAX - 1)) ?
+ "," : "");
+ UPDATE_PTR_LEFT(n, ptr, left);
+ }
+ n = scnprintf(ptr, left, "\n");
+ UPDATE_PTR_LEFT(n, ptr, left);
+ } else {
+ n = scnprintf(ptr, left, "%s: no VFs\n",
+ us_ibdev->ib_dev.name);
+ UPDATE_PTR_LEFT(n, ptr, left);
+ }
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ return ptr - buf;
+}
+
+static ssize_t
+usnic_ib_show_iface(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct usnic_ib_dev *us_ibdev;
+
+ us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ netdev_name(us_ibdev->netdev));
+}
+
+static ssize_t
+usnic_ib_show_max_vf(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct usnic_ib_dev *us_ibdev;
+
+ us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ atomic_read(&us_ibdev->vf_cnt.refcount));
+}
+
+static ssize_t
+usnic_ib_show_qp_per_vf(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct usnic_ib_dev *us_ibdev;
+ int qp_per_vf;
+
+ us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
+ qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
+ us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
+
+ return scnprintf(buf, PAGE_SIZE,
+ "%d\n", qp_per_vf);
+}
+
+static ssize_t
+usnic_ib_show_cq_per_vf(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct usnic_ib_dev *us_ibdev;
+
+ us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ]);
+}
+
+static DEVICE_ATTR(fw_ver, S_IRUGO, usnic_ib_show_fw_ver, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, usnic_ib_show_board, NULL);
+static DEVICE_ATTR(config, S_IRUGO, usnic_ib_show_config, NULL);
+static DEVICE_ATTR(iface, S_IRUGO, usnic_ib_show_iface, NULL);
+static DEVICE_ATTR(max_vf, S_IRUGO, usnic_ib_show_max_vf, NULL);
+static DEVICE_ATTR(qp_per_vf, S_IRUGO, usnic_ib_show_qp_per_vf, NULL);
+static DEVICE_ATTR(cq_per_vf, S_IRUGO, usnic_ib_show_cq_per_vf, NULL);
+
+static struct device_attribute *usnic_class_attributes[] = {
+ &dev_attr_fw_ver,
+ &dev_attr_board_id,
+ &dev_attr_config,
+ &dev_attr_iface,
+ &dev_attr_max_vf,
+ &dev_attr_qp_per_vf,
+ &dev_attr_cq_per_vf,
+};
+
+struct qpn_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct usnic_ib_qp_grp *, char *buf);
+};
+
+/*
+ * Definitions for supporting QPN entries in sysfs
+ */
+static ssize_t
+usnic_ib_qpn_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct usnic_ib_qp_grp *qp_grp;
+ struct qpn_attribute *qpn_attr;
+
+ qp_grp = container_of(kobj, struct usnic_ib_qp_grp, kobj);
+ qpn_attr = container_of(attr, struct qpn_attribute, attr);
+
+ return qpn_attr->show(qp_grp, buf);
+}
+
+static const struct sysfs_ops usnic_ib_qpn_sysfs_ops = {
+ .show = usnic_ib_qpn_attr_show
+};
+
+#define QPN_ATTR_RO(NAME) \
+struct qpn_attribute qpn_attr_##NAME = __ATTR_RO(NAME)
+
+static ssize_t context_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "0x%p\n", qp_grp->ctx);
+}
+
+static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
+{
+ int i, j, n;
+ int left;
+ char *ptr;
+ struct usnic_vnic_res_chunk *res_chunk;
+ struct usnic_vnic_res *vnic_res;
+
+ left = PAGE_SIZE;
+ ptr = buf;
+
+ n = scnprintf(ptr, left,
+ "QPN: %d State: (%s) PID: %u VF Idx: %hu ",
+ qp_grp->ibqp.qp_num,
+ usnic_ib_qp_grp_state_to_string(qp_grp->state),
+ qp_grp->owner_pid,
+ usnic_vnic_get_index(qp_grp->vf->vnic));
+ UPDATE_PTR_LEFT(n, ptr, left);
+
+ for (i = 0; qp_grp->res_chunk_list[i]; i++) {
+ res_chunk = qp_grp->res_chunk_list[i];
+ for (j = 0; j < res_chunk->cnt; j++) {
+ vnic_res = res_chunk->res[j];
+ n = scnprintf(ptr, left, "%s[%d] ",
+ usnic_vnic_res_type_to_str(vnic_res->type),
+ vnic_res->vnic_idx);
+ UPDATE_PTR_LEFT(n, ptr, left);
+ }
+ }
+
+ n = scnprintf(ptr, left, "\n");
+ UPDATE_PTR_LEFT(n, ptr, left);
+
+ return ptr - buf;
+}
+
+static QPN_ATTR_RO(context);
+static QPN_ATTR_RO(summary);
+
+static struct attribute *usnic_ib_qpn_default_attrs[] = {
+ &qpn_attr_context.attr,
+ &qpn_attr_summary.attr,
+ NULL
+};
+
+static struct kobj_type usnic_ib_qpn_type = {
+ .sysfs_ops = &usnic_ib_qpn_sysfs_ops,
+ .default_attrs = usnic_ib_qpn_default_attrs
+};
+
+int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev)
+{
+ int i;
+ int err;
+ for (i = 0; i < ARRAY_SIZE(usnic_class_attributes); ++i) {
+ err = device_create_file(&us_ibdev->ib_dev.dev,
+ usnic_class_attributes[i]);
+ if (err) {
+ usnic_err("Failed to create device file %d for %s eith err %d",
+ i, us_ibdev->ib_dev.name, err);
+ return -EINVAL;
+ }
+ }
+
+ /* create kernel object for looking at individual QPs */
+ kobject_get(&us_ibdev->ib_dev.dev.kobj);
+ us_ibdev->qpn_kobj = kobject_create_and_add("qpn",
+ &us_ibdev->ib_dev.dev.kobj);
+ if (us_ibdev->qpn_kobj == NULL) {
+ kobject_put(&us_ibdev->ib_dev.dev.kobj);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(usnic_class_attributes); ++i) {
+ device_remove_file(&us_ibdev->ib_dev.dev,
+ usnic_class_attributes[i]);
+ }
+
+ kobject_put(us_ibdev->qpn_kobj);
+}
+
+void usnic_ib_sysfs_qpn_add(struct usnic_ib_qp_grp *qp_grp)
+{
+ struct usnic_ib_dev *us_ibdev;
+ int err;
+
+ us_ibdev = qp_grp->vf->pf;
+
+ err = kobject_init_and_add(&qp_grp->kobj, &usnic_ib_qpn_type,
+ kobject_get(us_ibdev->qpn_kobj),
+ "%d", qp_grp->grp_id);
+ if (err) {
+ kobject_put(us_ibdev->qpn_kobj);
+ return;
+ }
+}
+
+void usnic_ib_sysfs_qpn_remove(struct usnic_ib_qp_grp *qp_grp)
+{
+ struct usnic_ib_dev *us_ibdev;
+
+ us_ibdev = qp_grp->vf->pf;
+
+ kobject_put(&qp_grp->kobj);
+ kobject_put(us_ibdev->qpn_kobj);
+}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
new file mode 100644
index 000000000000..0d09b493cd02
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_IB_SYSFS_H_
+#define USNIC_IB_SYSFS_H_
+
+#include "usnic_ib.h"
+
+int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev);
+void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev);
+void usnic_ib_sysfs_qpn_add(struct usnic_ib_qp_grp *qp_grp);
+void usnic_ib_sysfs_qpn_remove(struct usnic_ib_qp_grp *qp_grp);
+
+#endif /* !USNIC_IB_SYSFS_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
new file mode 100644
index 000000000000..d48d2c0a2e3c
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -0,0 +1,765 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_addr.h>
+
+#include "usnic_abi.h"
+#include "usnic_ib.h"
+#include "usnic_common_util.h"
+#include "usnic_ib_qp_grp.h"
+#include "usnic_fwd.h"
+#include "usnic_log.h"
+#include "usnic_uiom.h"
+#include "usnic_transport.h"
+
+#define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
+
+static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
+{
+ *fw_ver = (u64) *fw_ver_str;
+}
+
+static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
+ struct ib_udata *udata)
+{
+ struct usnic_ib_dev *us_ibdev;
+ struct usnic_ib_create_qp_resp resp;
+ struct pci_dev *pdev;
+ struct vnic_dev_bar *bar;
+ struct usnic_vnic_res_chunk *chunk;
+ struct usnic_ib_qp_grp_flow *default_flow;
+ int i, err;
+
+ memset(&resp, 0, sizeof(resp));
+
+ us_ibdev = qp_grp->vf->pf;
+ pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
+ if (!pdev) {
+ usnic_err("Failed to get pdev of qp_grp %d\n",
+ qp_grp->grp_id);
+ return -EFAULT;
+ }
+
+ bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0);
+ if (!bar) {
+ usnic_err("Failed to get bar0 of qp_grp %d vf %s",
+ qp_grp->grp_id, pci_name(pdev));
+ return -EFAULT;
+ }
+
+ resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic);
+ resp.bar_bus_addr = bar->bus_addr;
+ resp.bar_len = bar->len;
+
+ chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
+ if (IS_ERR_OR_NULL(chunk)) {
+ usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
+ usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
+ qp_grp->grp_id,
+ PTR_ERR(chunk));
+ return chunk ? PTR_ERR(chunk) : -ENOMEM;
+ }
+
+ WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
+ resp.rq_cnt = chunk->cnt;
+ for (i = 0; i < chunk->cnt; i++)
+ resp.rq_idx[i] = chunk->res[i]->vnic_idx;
+
+ chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
+ if (IS_ERR_OR_NULL(chunk)) {
+ usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
+ usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
+ qp_grp->grp_id,
+ PTR_ERR(chunk));
+ return chunk ? PTR_ERR(chunk) : -ENOMEM;
+ }
+
+ WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
+ resp.wq_cnt = chunk->cnt;
+ for (i = 0; i < chunk->cnt; i++)
+ resp.wq_idx[i] = chunk->res[i]->vnic_idx;
+
+ chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
+ if (IS_ERR_OR_NULL(chunk)) {
+ usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
+ usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
+ qp_grp->grp_id,
+ PTR_ERR(chunk));
+ return chunk ? PTR_ERR(chunk) : -ENOMEM;
+ }
+
+ WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
+ resp.cq_cnt = chunk->cnt;
+ for (i = 0; i < chunk->cnt; i++)
+ resp.cq_idx[i] = chunk->res[i]->vnic_idx;
+
+ default_flow = list_first_entry(&qp_grp->flows_lst,
+ struct usnic_ib_qp_grp_flow, link);
+ resp.transport = default_flow->trans_type;
+
+ err = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (err) {
+ usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
+ return err;
+ }
+
+ return 0;
+}
+
+static struct usnic_ib_qp_grp*
+find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
+ struct usnic_ib_pd *pd,
+ struct usnic_transport_spec *trans_spec,
+ struct usnic_vnic_res_spec *res_spec)
+{
+ struct usnic_ib_vf *vf;
+ struct usnic_vnic *vnic;
+ struct usnic_ib_qp_grp *qp_grp;
+ struct device *dev, **dev_list;
+ int i, found = 0;
+
+ BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
+
+ if (list_empty(&us_ibdev->vf_dev_list)) {
+ usnic_info("No vfs to allocate\n");
+ return NULL;
+ }
+
+ if (usnic_ib_share_vf) {
+ /* Try to find resouces on a used vf which is in pd */
+ dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
+ for (i = 0; dev_list[i]; i++) {
+ dev = dev_list[i];
+ vf = pci_get_drvdata(to_pci_dev(dev));
+ spin_lock(&vf->lock);
+ vnic = vf->vnic;
+ if (!usnic_vnic_check_room(vnic, res_spec)) {
+ usnic_dbg("Found used vnic %s from %s\n",
+ us_ibdev->ib_dev.name,
+ pci_name(usnic_vnic_get_pdev(
+ vnic)));
+ found = 1;
+ break;
+ }
+ spin_unlock(&vf->lock);
+
+ }
+ usnic_uiom_free_dev_list(dev_list);
+ }
+
+ if (!found) {
+ /* Try to find resources on an unused vf */
+ list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
+ spin_lock(&vf->lock);
+ vnic = vf->vnic;
+ if (vf->qp_grp_ref_cnt == 0 &&
+ usnic_vnic_check_room(vnic, res_spec) == 0) {
+ found = 1;
+ break;
+ }
+ spin_unlock(&vf->lock);
+ }
+ }
+
+ if (!found) {
+ usnic_info("No free qp grp found on %s\n",
+ us_ibdev->ib_dev.name);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, pd, res_spec,
+ trans_spec);
+ spin_unlock(&vf->lock);
+ if (IS_ERR_OR_NULL(qp_grp)) {
+ usnic_err("Failed to allocate qp_grp\n");
+ return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
+ }
+
+ return qp_grp;
+}
+
+static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
+{
+ struct usnic_ib_vf *vf = qp_grp->vf;
+
+ WARN_ON(qp_grp->state != IB_QPS_RESET);
+
+ spin_lock(&vf->lock);
+ usnic_ib_qp_grp_destroy(qp_grp);
+ spin_unlock(&vf->lock);
+}
+
+static void eth_speed_to_ib_speed(int speed, u8 *active_speed,
+ u8 *active_width)
+{
+ if (speed <= 10000) {
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_FDR10;
+ } else if (speed <= 20000) {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_DDR;
+ } else if (speed <= 30000) {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_QDR;
+ } else if (speed <= 40000) {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_FDR10;
+ } else {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_EDR;
+ }
+}
+
+static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
+{
+ if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN ||
+ cmd.spec.trans_type >= USNIC_TRANSPORT_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Start of ib callback functions */
+
+enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
+ u8 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+int usnic_ib_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props)
+{
+ struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
+ union ib_gid gid;
+ struct ethtool_drvinfo info;
+ struct ethtool_cmd cmd;
+ int qp_per_vf;
+
+ usnic_dbg("\n");
+ mutex_lock(&us_ibdev->usdev_lock);
+ us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
+ us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
+ memset(props, 0, sizeof(*props));
+ usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
+ &gid.raw[0]);
+ memcpy(&props->sys_image_guid, &gid.global.interface_id,
+ sizeof(gid.global.interface_id));
+ usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver);
+ props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE;
+ props->page_size_cap = USNIC_UIOM_PAGE_SIZE;
+ props->vendor_id = PCI_VENDOR_ID_CISCO;
+ props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC;
+ props->hw_ver = us_ibdev->pdev->subsystem_device;
+ qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
+ us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
+ props->max_qp = qp_per_vf *
+ atomic_read(&us_ibdev->vf_cnt.refcount);
+ props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
+ IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
+ props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
+ atomic_read(&us_ibdev->vf_cnt.refcount);
+ props->max_pd = USNIC_UIOM_MAX_PD_CNT;
+ props->max_mr = USNIC_UIOM_MAX_MR_CNT;
+ props->local_ca_ack_delay = 0;
+ props->max_pkeys = 0;
+ props->atomic_cap = IB_ATOMIC_NONE;
+ props->masked_atomic_cap = props->atomic_cap;
+ props->max_qp_rd_atom = 0;
+ props->max_qp_init_rd_atom = 0;
+ props->max_res_rd_atom = 0;
+ props->max_srq = 0;
+ props->max_srq_wr = 0;
+ props->max_srq_sge = 0;
+ props->max_fast_reg_page_list_len = 0;
+ props->max_mcast_grp = 0;
+ props->max_mcast_qp_attach = 0;
+ props->max_total_mcast_qp_attach = 0;
+ props->max_map_per_fmr = 0;
+ /* Owned by Userspace
+ * max_qp_wr, max_sge, max_sge_rd, max_cqe */
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ return 0;
+}
+
+int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
+ struct ethtool_cmd cmd;
+
+ usnic_dbg("\n");
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
+ memset(props, 0, sizeof(*props));
+
+ props->lid = 0;
+ props->lmc = 1;
+ props->sm_lid = 0;
+ props->sm_sl = 0;
+
+ if (!us_ibdev->ufdev->link_up) {
+ props->state = IB_PORT_DOWN;
+ props->phys_state = 3;
+ } else if (!us_ibdev->ufdev->inaddr) {
+ props->state = IB_PORT_INIT;
+ props->phys_state = 4;
+ } else {
+ props->state = IB_PORT_ACTIVE;
+ props->phys_state = 5;
+ }
+
+ props->port_cap_flags = 0;
+ props->gid_tbl_len = 1;
+ props->pkey_tbl_len = 1;
+ props->bad_pkey_cntr = 0;
+ props->qkey_viol_cntr = 0;
+ eth_speed_to_ib_speed(cmd.speed, &props->active_speed,
+ &props->active_width);
+ props->max_mtu = IB_MTU_4096;
+ props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
+ /* Userspace will adjust for hdrs */
+ props->max_msg_sz = us_ibdev->ufdev->mtu;
+ props->max_vl_num = 1;
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ return 0;
+}
+
+int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct usnic_ib_qp_grp *qp_grp;
+ struct usnic_ib_vf *vf;
+ int err;
+
+ usnic_dbg("\n");
+
+ memset(qp_attr, 0, sizeof(*qp_attr));
+ memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+
+ qp_grp = to_uqp_grp(qp);
+ vf = qp_grp->vf;
+ mutex_lock(&vf->pf->usdev_lock);
+ usnic_dbg("\n");
+ qp_attr->qp_state = qp_grp->state;
+ qp_attr->cur_qp_state = qp_grp->state;
+
+ switch (qp_grp->ibqp.qp_type) {
+ case IB_QPT_UD:
+ qp_attr->qkey = 0;
+ break;
+ default:
+ usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type);
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ mutex_unlock(&vf->pf->usdev_lock);
+ return 0;
+
+err_out:
+ mutex_unlock(&vf->pf->usdev_lock);
+ return err;
+}
+
+int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+
+ struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
+ usnic_dbg("\n");
+
+ if (index > 1)
+ return -EINVAL;
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ memset(&(gid->raw[0]), 0, sizeof(gid->raw));
+ usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
+ &gid->raw[0]);
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ return 0;
+}
+
+int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ if (index > 1)
+ return -EINVAL;
+
+ *pkey = 0xffff;
+ return 0;
+}
+
+struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct usnic_ib_pd *pd;
+ void *umem_pd;
+
+ usnic_dbg("\n");
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
+ if (IS_ERR_OR_NULL(umem_pd)) {
+ kfree(pd);
+ return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
+ }
+
+ usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
+ pd, context, ibdev->name);
+ return &pd->ibpd;
+}
+
+int usnic_ib_dealloc_pd(struct ib_pd *pd)
+{
+ usnic_info("freeing domain 0x%p\n", pd);
+
+ usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
+ kfree(pd);
+ return 0;
+}
+
+struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ int err;
+ struct usnic_ib_dev *us_ibdev;
+ struct usnic_ib_qp_grp *qp_grp;
+ struct usnic_ib_ucontext *ucontext;
+ int cq_cnt;
+ struct usnic_vnic_res_spec res_spec;
+ struct usnic_ib_create_qp_cmd cmd;
+ struct usnic_transport_spec trans_spec;
+
+ usnic_dbg("\n");
+
+ ucontext = to_uucontext(pd->uobject->context);
+ us_ibdev = to_usdev(pd->device);
+
+ err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
+ if (err) {
+ usnic_err("%s: cannot copy udata for create_qp\n",
+ us_ibdev->ib_dev.name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ err = create_qp_validate_user_data(cmd);
+ if (err) {
+ usnic_err("%s: Failed to validate user data\n",
+ us_ibdev->ib_dev.name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (init_attr->qp_type != IB_QPT_UD) {
+ usnic_err("%s asked to make a non-UD QP: %d\n",
+ us_ibdev->ib_dev.name, init_attr->qp_type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ trans_spec = cmd.spec;
+ mutex_lock(&us_ibdev->usdev_lock);
+ cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
+ res_spec = min_transport_spec[trans_spec.trans_type];
+ usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
+ qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
+ &trans_spec,
+ &res_spec);
+ if (IS_ERR_OR_NULL(qp_grp)) {
+ err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
+ goto out_release_mutex;
+ }
+
+ err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
+ if (err) {
+ err = -EBUSY;
+ goto out_release_qp_grp;
+ }
+
+ qp_grp->ctx = ucontext;
+ list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
+ usnic_ib_log_vf(qp_grp->vf);
+ mutex_unlock(&us_ibdev->usdev_lock);
+ return &qp_grp->ibqp;
+
+out_release_qp_grp:
+ qp_grp_destroy(qp_grp);
+out_release_mutex:
+ mutex_unlock(&us_ibdev->usdev_lock);
+ return ERR_PTR(err);
+}
+
+int usnic_ib_destroy_qp(struct ib_qp *qp)
+{
+ struct usnic_ib_qp_grp *qp_grp;
+ struct usnic_ib_vf *vf;
+
+ usnic_dbg("\n");
+
+ qp_grp = to_uqp_grp(qp);
+ vf = qp_grp->vf;
+ mutex_lock(&vf->pf->usdev_lock);
+ if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) {
+ usnic_err("Failed to move qp grp %u to reset\n",
+ qp_grp->grp_id);
+ }
+
+ list_del(&qp_grp->link);
+ qp_grp_destroy(qp_grp);
+ mutex_unlock(&vf->pf->usdev_lock);
+
+ return 0;
+}
+
+int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct usnic_ib_qp_grp *qp_grp;
+ int status;
+ usnic_dbg("\n");
+
+ qp_grp = to_uqp_grp(ibqp);
+
+ /* TODO: Future Support All States */
+ mutex_lock(&qp_grp->vf->pf->usdev_lock);
+ if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT) {
+ status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_INIT, NULL);
+ } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTR) {
+ status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTR, NULL);
+ } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTS) {
+ status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTS, NULL);
+ } else {
+ usnic_err("Unexpected combination mask: %u state: %u\n",
+ attr_mask & IB_QP_STATE, attr->qp_state);
+ status = -EINVAL;
+ }
+
+ mutex_unlock(&qp_grp->vf->pf->usdev_lock);
+ return status;
+}
+
+struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, int entries,
+ int vector, struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct ib_cq *cq;
+
+ usnic_dbg("\n");
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq)
+ return ERR_PTR(-EBUSY);
+
+ return cq;
+}
+
+int usnic_ib_destroy_cq(struct ib_cq *cq)
+{
+ usnic_dbg("\n");
+ kfree(cq);
+ return 0;
+}
+
+struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata)
+{
+ struct usnic_ib_mr *mr;
+ int err;
+
+ usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
+ virt_addr, length);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(mr))
+ return ERR_PTR(mr ? PTR_ERR(mr) : -ENOMEM);
+
+ mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
+ access_flags, 0);
+ if (IS_ERR_OR_NULL(mr->umem)) {
+ err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT;
+ goto err_free;
+ }
+
+ mr->ibmr.lkey = mr->ibmr.rkey = 0;
+ return &mr->ibmr;
+
+err_free:
+ kfree(mr);
+ return ERR_PTR(err);
+}
+
+int usnic_ib_dereg_mr(struct ib_mr *ibmr)
+{
+ struct usnic_ib_mr *mr = to_umr(ibmr);
+
+ usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
+
+ usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing);
+ kfree(mr);
+ return 0;
+}
+
+struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct usnic_ib_ucontext *context;
+ struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
+ usnic_dbg("\n");
+
+ context = kmalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&context->qp_grp_list);
+ mutex_lock(&us_ibdev->usdev_lock);
+ list_add_tail(&context->link, &us_ibdev->ctx_list);
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ return &context->ibucontext;
+}
+
+int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
+{
+ struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
+ struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
+ usnic_dbg("\n");
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ BUG_ON(!list_empty(&context->qp_grp_list));
+ list_del(&context->link);
+ mutex_unlock(&us_ibdev->usdev_lock);
+ kfree(context);
+ return 0;
+}
+
+int usnic_ib_mmap(struct ib_ucontext *context,
+ struct vm_area_struct *vma)
+{
+ struct usnic_ib_ucontext *uctx = to_ucontext(context);
+ struct usnic_ib_dev *us_ibdev;
+ struct usnic_ib_qp_grp *qp_grp;
+ struct usnic_ib_vf *vf;
+ struct vnic_dev_bar *bar;
+ dma_addr_t bus_addr;
+ unsigned int len;
+ unsigned int vfid;
+
+ usnic_dbg("\n");
+
+ us_ibdev = to_usdev(context->device);
+ vma->vm_flags |= VM_IO;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vfid = vma->vm_pgoff;
+ usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
+ vma->vm_pgoff, PAGE_SHIFT, vfid);
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) {
+ vf = qp_grp->vf;
+ if (usnic_vnic_get_index(vf->vnic) == vfid) {
+ bar = usnic_vnic_get_bar(vf->vnic, 0);
+ if ((vma->vm_end - vma->vm_start) != bar->len) {
+ usnic_err("Bar0 Len %lu - Request map %lu\n",
+ bar->len,
+ vma->vm_end - vma->vm_start);
+ mutex_unlock(&us_ibdev->usdev_lock);
+ return -EINVAL;
+ }
+ bus_addr = bar->bus_addr;
+ len = bar->len;
+ usnic_dbg("bus: %pa vaddr: %p size: %ld\n",
+ &bus_addr, bar->vaddr, bar->len);
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ return remap_pfn_range(vma,
+ vma->vm_start,
+ bus_addr >> PAGE_SHIFT,
+ len, vma->vm_page_prot);
+ }
+ }
+
+ mutex_unlock(&us_ibdev->usdev_lock);
+ usnic_err("No VF %u found\n", vfid);
+ return -EINVAL;
+}
+
+/* In ib callbacks section - Start of stub funcs */
+struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
+ struct ib_ah_attr *ah_attr)
+{
+ usnic_dbg("\n");
+ return ERR_PTR(-EPERM);
+}
+
+int usnic_ib_destroy_ah(struct ib_ah *ah)
+{
+ usnic_dbg("\n");
+ return -EINVAL;
+}
+
+int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ usnic_dbg("\n");
+ return -EINVAL;
+}
+
+int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ usnic_dbg("\n");
+ return -EINVAL;
+}
+
+int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
+ struct ib_wc *wc)
+{
+ usnic_dbg("\n");
+ return -EINVAL;
+}
+
+int usnic_ib_req_notify_cq(struct ib_cq *cq,
+ enum ib_cq_notify_flags flags)
+{
+ usnic_dbg("\n");
+ return -EINVAL;
+}
+
+struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ usnic_dbg("\n");
+ return ERR_PTR(-ENOMEM);
+}
+
+
+/* In ib callbacks section - End of stub funcs */
+/* End of ib callbacks section */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
new file mode 100644
index 000000000000..bb864f5aed70
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_IB_VERBS_H_
+#define USNIC_IB_VERBS_H_
+
+#include "usnic_ib.h"
+
+enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
+ u8 port_num);
+int usnic_ib_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props);
+int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props);
+int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr);
+int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid);
+int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey);
+struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+int usnic_ib_dealloc_pd(struct ib_pd *pd);
+struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int usnic_ib_destroy_qp(struct ib_qp *qp);
+int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, int entries,
+ int vector, struct ib_ucontext *context,
+ struct ib_udata *udata);
+int usnic_ib_destroy_cq(struct ib_cq *cq);
+struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata);
+int usnic_ib_dereg_mr(struct ib_mr *ibmr);
+struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata);
+int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
+int usnic_ib_mmap(struct ib_ucontext *context,
+ struct vm_area_struct *vma);
+struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
+ struct ib_ah_attr *ah_attr);
+int usnic_ib_destroy_ah(struct ib_ah *ah);
+int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
+ struct ib_wc *wc);
+int usnic_ib_req_notify_cq(struct ib_cq *cq,
+ enum ib_cq_notify_flags flags);
+struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc);
+#endif /* !USNIC_IB_VERBS_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_log.h b/drivers/infiniband/hw/usnic/usnic_log.h
new file mode 100644
index 000000000000..75777a66c684
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_log.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_LOG_H_
+#define USNIC_LOG_H_
+
+#include "usnic.h"
+
+extern unsigned int usnic_log_lvl;
+
+#define USNIC_LOG_LVL_NONE (0)
+#define USNIC_LOG_LVL_ERR (1)
+#define USNIC_LOG_LVL_INFO (2)
+#define USNIC_LOG_LVL_DBG (3)
+
+#define usnic_printk(lvl, args...) \
+ do { \
+ printk(lvl "%s:%s:%d: ", DRV_NAME, __func__, \
+ __LINE__); \
+ printk(args); \
+ } while (0)
+
+#define usnic_dbg(args...) \
+ do { \
+ if (unlikely(usnic_log_lvl >= USNIC_LOG_LVL_DBG)) { \
+ usnic_printk(KERN_INFO, args); \
+ } \
+} while (0)
+
+#define usnic_info(args...) \
+do { \
+ if (usnic_log_lvl >= USNIC_LOG_LVL_INFO) { \
+ usnic_printk(KERN_INFO, args); \
+ } \
+} while (0)
+
+#define usnic_err(args...) \
+ do { \
+ if (usnic_log_lvl >= USNIC_LOG_LVL_ERR) { \
+ usnic_printk(KERN_ERR, args); \
+ } \
+ } while (0)
+#endif /* !USNIC_LOG_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_transport.c b/drivers/infiniband/hw/usnic/usnic_transport.c
new file mode 100644
index 000000000000..ddef6f77a78c
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_transport.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/bitmap.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <net/inet_sock.h>
+
+#include "usnic_transport.h"
+#include "usnic_log.h"
+
+/* ROCE */
+static unsigned long *roce_bitmap;
+static u16 roce_next_port = 1;
+#define ROCE_BITMAP_SZ ((1 << (8 /*CHAR_BIT*/ * sizeof(u16)))/8 /*CHAR BIT*/)
+static DEFINE_SPINLOCK(roce_bitmap_lock);
+
+const char *usnic_transport_to_str(enum usnic_transport_type type)
+{
+ switch (type) {
+ case USNIC_TRANSPORT_UNKNOWN:
+ return "Unknown";
+ case USNIC_TRANSPORT_ROCE_CUSTOM:
+ return "roce custom";
+ case USNIC_TRANSPORT_IPV4_UDP:
+ return "IPv4 UDP";
+ case USNIC_TRANSPORT_MAX:
+ return "Max?";
+ default:
+ return "Not known";
+ }
+}
+
+int usnic_transport_sock_to_str(char *buf, int buf_sz,
+ struct socket *sock)
+{
+ int err;
+ uint32_t addr;
+ uint16_t port;
+ int proto;
+
+ memset(buf, 0, buf_sz);
+ err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port);
+ if (err)
+ return 0;
+
+ return scnprintf(buf, buf_sz, "Proto:%u Addr:%pI4h Port:%hu",
+ proto, &addr, port);
+}
+
+/*
+ * reserve a port number. if "0" specified, we will try to pick one
+ * starting at roce_next_port. roce_next_port will take on the values
+ * 1..4096
+ */
+u16 usnic_transport_rsrv_port(enum usnic_transport_type type, u16 port_num)
+{
+ if (type == USNIC_TRANSPORT_ROCE_CUSTOM) {
+ spin_lock(&roce_bitmap_lock);
+ if (!port_num) {
+ port_num = bitmap_find_next_zero_area(roce_bitmap,
+ ROCE_BITMAP_SZ,
+ roce_next_port /* start */,
+ 1 /* nr */,
+ 0 /* align */);
+ roce_next_port = (port_num & 4095) + 1;
+ } else if (test_bit(port_num, roce_bitmap)) {
+ usnic_err("Failed to allocate port for %s\n",
+ usnic_transport_to_str(type));
+ spin_unlock(&roce_bitmap_lock);
+ goto out_fail;
+ }
+ bitmap_set(roce_bitmap, port_num, 1);
+ spin_unlock(&roce_bitmap_lock);
+ } else {
+ usnic_err("Failed to allocate port - transport %s unsupported\n",
+ usnic_transport_to_str(type));
+ goto out_fail;
+ }
+
+ usnic_dbg("Allocating port %hu for %s\n", port_num,
+ usnic_transport_to_str(type));
+ return port_num;
+
+out_fail:
+ return 0;
+}
+
+void usnic_transport_unrsrv_port(enum usnic_transport_type type, u16 port_num)
+{
+ if (type == USNIC_TRANSPORT_ROCE_CUSTOM) {
+ spin_lock(&roce_bitmap_lock);
+ if (!port_num) {
+ usnic_err("Unreserved unvalid port num 0 for %s\n",
+ usnic_transport_to_str(type));
+ goto out_roce_custom;
+ }
+
+ if (!test_bit(port_num, roce_bitmap)) {
+ usnic_err("Unreserving invalid %hu for %s\n",
+ port_num,
+ usnic_transport_to_str(type));
+ goto out_roce_custom;
+ }
+ bitmap_clear(roce_bitmap, port_num, 1);
+ usnic_dbg("Freeing port %hu for %s\n", port_num,
+ usnic_transport_to_str(type));
+out_roce_custom:
+ spin_unlock(&roce_bitmap_lock);
+ } else {
+ usnic_err("Freeing invalid port %hu for %d\n", port_num, type);
+ }
+}
+
+struct socket *usnic_transport_get_socket(int sock_fd)
+{
+ struct socket *sock;
+ int err;
+ char buf[25];
+
+ /* sockfd_lookup will internally do a fget */
+ sock = sockfd_lookup(sock_fd, &err);
+ if (!sock) {
+ usnic_err("Unable to lookup socket for fd %d with err %d\n",
+ sock_fd, err);
+ return ERR_PTR(-ENOENT);
+ }
+
+ usnic_transport_sock_to_str(buf, sizeof(buf), sock);
+ usnic_dbg("Get sock %s\n", buf);
+
+ return sock;
+}
+
+void usnic_transport_put_socket(struct socket *sock)
+{
+ char buf[100];
+
+ usnic_transport_sock_to_str(buf, sizeof(buf), sock);
+ usnic_dbg("Put sock %s\n", buf);
+ sockfd_put(sock);
+}
+
+int usnic_transport_sock_get_addr(struct socket *sock, int *proto,
+ uint32_t *addr, uint16_t *port)
+{
+ int len;
+ int err;
+ struct sockaddr_in sock_addr;
+
+ err = sock->ops->getname(sock,
+ (struct sockaddr *)&sock_addr,
+ &len, 0);
+ if (err)
+ return err;
+
+ if (sock_addr.sin_family != AF_INET)
+ return -EINVAL;
+
+ if (proto)
+ *proto = sock->sk->sk_protocol;
+ if (port)
+ *port = ntohs(((struct sockaddr_in *)&sock_addr)->sin_port);
+ if (addr)
+ *addr = ntohl(((struct sockaddr_in *)
+ &sock_addr)->sin_addr.s_addr);
+
+ return 0;
+}
+
+int usnic_transport_init(void)
+{
+ roce_bitmap = kzalloc(ROCE_BITMAP_SZ, GFP_KERNEL);
+ if (!roce_bitmap) {
+ usnic_err("Failed to allocate bit map");
+ return -ENOMEM;
+ }
+
+ /* Do not ever allocate bit 0, hence set it here */
+ bitmap_set(roce_bitmap, 0, 1);
+ return 0;
+}
+
+void usnic_transport_fini(void)
+{
+ kfree(roce_bitmap);
+}
diff --git a/drivers/infiniband/hw/usnic/usnic_transport.h b/drivers/infiniband/hw/usnic/usnic_transport.h
new file mode 100644
index 000000000000..7e5dc6d9f462
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_transport.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_TRANSPORT_H_
+#define USNIC_TRANSPORT_H_
+
+#include "usnic_abi.h"
+
+const char *usnic_transport_to_str(enum usnic_transport_type trans_type);
+/*
+ * Returns number of bytes written, excluding null terminator. If
+ * nothing was written, the function returns 0.
+ */
+int usnic_transport_sock_to_str(char *buf, int buf_sz,
+ struct socket *sock);
+/*
+ * Reserve a port. If "port_num" is set, then the function will try
+ * to reserve that particular port.
+ */
+u16 usnic_transport_rsrv_port(enum usnic_transport_type type, u16 port_num);
+void usnic_transport_unrsrv_port(enum usnic_transport_type type, u16 port_num);
+/*
+ * Do a fget on the socket refered to by sock_fd and returns the socket.
+ * Socket will not be destroyed before usnic_transport_put_socket has
+ * been called.
+ */
+struct socket *usnic_transport_get_socket(int sock_fd);
+void usnic_transport_put_socket(struct socket *sock);
+/*
+ * Call usnic_transport_get_socket before calling *_sock_get_addr
+ */
+int usnic_transport_sock_get_addr(struct socket *sock, int *proto,
+ uint32_t *addr, uint16_t *port);
+int usnic_transport_init(void);
+void usnic_transport_fini(void);
+#endif /* !USNIC_TRANSPORT_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
new file mode 100644
index 000000000000..16755cdab2c0
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -0,0 +1,604 @@
+/*
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013 Cisco Systems. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/hugetlb.h>
+#include <linux/dma-attrs.h>
+#include <linux/iommu.h>
+#include <linux/workqueue.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+
+#include "usnic_log.h"
+#include "usnic_uiom.h"
+#include "usnic_uiom_interval_tree.h"
+
+static struct workqueue_struct *usnic_uiom_wq;
+
+#define USNIC_UIOM_PAGE_CHUNK \
+ ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
+ ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
+ (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
+
+static void usnic_uiom_reg_account(struct work_struct *work)
+{
+ struct usnic_uiom_reg *umem = container_of(work,
+ struct usnic_uiom_reg, work);
+
+ down_write(&umem->mm->mmap_sem);
+ umem->mm->locked_vm -= umem->diff;
+ up_write(&umem->mm->mmap_sem);
+ mmput(umem->mm);
+ kfree(umem);
+}
+
+static int usnic_uiom_dma_fault(struct iommu_domain *domain,
+ struct device *dev,
+ unsigned long iova, int flags,
+ void *token)
+{
+ usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
+ dev_name(dev),
+ domain, iova, flags);
+ return -ENOSYS;
+}
+
+static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
+{
+ struct usnic_uiom_chunk *chunk, *tmp;
+ struct page *page;
+ struct scatterlist *sg;
+ int i;
+ dma_addr_t pa;
+
+ list_for_each_entry_safe(chunk, tmp, chunk_list, list) {
+ for_each_sg(chunk->page_list, sg, chunk->nents, i) {
+ page = sg_page(sg);
+ pa = sg_phys(sg);
+ if (dirty)
+ set_page_dirty_lock(page);
+ put_page(page);
+ usnic_dbg("pa: %pa\n", &pa);
+ }
+ kfree(chunk);
+ }
+}
+
+static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
+ int dmasync, struct list_head *chunk_list)
+{
+ struct page **page_list;
+ struct scatterlist *sg;
+ struct usnic_uiom_chunk *chunk;
+ unsigned long locked;
+ unsigned long lock_limit;
+ unsigned long cur_base;
+ unsigned long npages;
+ int ret;
+ int off;
+ int i;
+ int flags;
+ dma_addr_t pa;
+ DEFINE_DMA_ATTRS(attrs);
+
+ if (dmasync)
+ dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+
+ if (!can_do_mlock())
+ return -EPERM;
+
+ INIT_LIST_HEAD(chunk_list);
+
+ page_list = (struct page **) __get_free_page(GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
+
+ down_write(&current->mm->mmap_sem);
+
+ locked = npages + current->mm->locked_vm;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+ if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ flags = IOMMU_READ | IOMMU_CACHE;
+ flags |= (writable) ? IOMMU_WRITE : 0;
+ cur_base = addr & PAGE_MASK;
+ ret = 0;
+
+ while (npages) {
+ ret = get_user_pages(current, current->mm, cur_base,
+ min_t(unsigned long, npages,
+ PAGE_SIZE / sizeof(struct page *)),
+ 1, !writable, page_list, NULL);
+
+ if (ret < 0)
+ goto out;
+
+ npages -= ret;
+ off = 0;
+
+ while (ret) {
+ chunk = kmalloc(sizeof(*chunk) +
+ sizeof(struct scatterlist) *
+ min_t(int, ret, USNIC_UIOM_PAGE_CHUNK),
+ GFP_KERNEL);
+ if (!chunk) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK);
+ sg_init_table(chunk->page_list, chunk->nents);
+ for_each_sg(chunk->page_list, sg, chunk->nents, i) {
+ sg_set_page(sg, page_list[i + off],
+ PAGE_SIZE, 0);
+ pa = sg_phys(sg);
+ usnic_dbg("va: 0x%lx pa: %pa\n",
+ cur_base + i*PAGE_SIZE, &pa);
+ }
+ cur_base += chunk->nents * PAGE_SIZE;
+ ret -= chunk->nents;
+ off += chunk->nents;
+ list_add_tail(&chunk->list, chunk_list);
+ }
+
+ ret = 0;
+ }
+
+out:
+ if (ret < 0)
+ usnic_uiom_put_pages(chunk_list, 0);
+ else
+ current->mm->locked_vm = locked;
+
+ up_write(&current->mm->mmap_sem);
+ free_page((unsigned long) page_list);
+ return ret;
+}
+
+static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals,
+ struct usnic_uiom_pd *pd)
+{
+ struct usnic_uiom_interval_node *interval, *tmp;
+ long unsigned va, size;
+
+ list_for_each_entry_safe(interval, tmp, intervals, link) {
+ va = interval->start << PAGE_SHIFT;
+ size = ((interval->last - interval->start) + 1) << PAGE_SHIFT;
+ while (size > 0) {
+ /* Workaround for RH 970401 */
+ usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE);
+ iommu_unmap(pd->domain, va, PAGE_SIZE);
+ va += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ }
+}
+
+static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
+ struct usnic_uiom_reg *uiomr,
+ int dirty)
+{
+ int npages;
+ unsigned long vpn_start, vpn_last;
+ struct usnic_uiom_interval_node *interval, *tmp;
+ int writable = 0;
+ LIST_HEAD(rm_intervals);
+
+ npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
+ vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT;
+ vpn_last = vpn_start + npages - 1;
+
+ spin_lock(&pd->lock);
+ usnic_uiom_remove_interval(&pd->rb_root, vpn_start,
+ vpn_last, &rm_intervals);
+ usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
+
+ list_for_each_entry_safe(interval, tmp, &rm_intervals, link) {
+ if (interval->flags & IOMMU_WRITE)
+ writable = 1;
+ list_del(&interval->link);
+ kfree(interval);
+ }
+
+ usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable);
+ spin_unlock(&pd->lock);
+}
+
+static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
+ struct usnic_uiom_reg *uiomr)
+{
+ int i, err;
+ size_t size;
+ struct usnic_uiom_chunk *chunk;
+ struct usnic_uiom_interval_node *interval_node;
+ dma_addr_t pa;
+ dma_addr_t pa_start = 0;
+ dma_addr_t pa_end = 0;
+ long int va_start = -EINVAL;
+ struct usnic_uiom_pd *pd = uiomr->pd;
+ long int va = uiomr->va & PAGE_MASK;
+ int flags = IOMMU_READ | IOMMU_CACHE;
+
+ flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
+ chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
+ list);
+ list_for_each_entry(interval_node, intervals, link) {
+iter_chunk:
+ for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
+ pa = sg_phys(&chunk->page_list[i]);
+ if ((va >> PAGE_SHIFT) < interval_node->start)
+ continue;
+
+ if ((va >> PAGE_SHIFT) == interval_node->start) {
+ /* First page of the interval */
+ va_start = va;
+ pa_start = pa;
+ pa_end = pa;
+ }
+
+ WARN_ON(va_start == -EINVAL);
+
+ if ((pa_end + PAGE_SIZE != pa) &&
+ (pa != pa_start)) {
+ /* PAs are not contiguous */
+ size = pa_end - pa_start + PAGE_SIZE;
+ usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
+ va_start, &pa_start, size, flags);
+ err = iommu_map(pd->domain, va_start, pa_start,
+ size, flags);
+ if (err) {
+ usnic_err("Failed to map va 0x%lx pa 0x%pa size 0x%zx with err %d\n",
+ va_start, &pa_start, size, err);
+ goto err_out;
+ }
+ va_start = va;
+ pa_start = pa;
+ pa_end = pa;
+ }
+
+ if ((va >> PAGE_SHIFT) == interval_node->last) {
+ /* Last page of the interval */
+ size = pa - pa_start + PAGE_SIZE;
+ usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
+ va_start, &pa_start, size, flags);
+ err = iommu_map(pd->domain, va_start, pa_start,
+ size, flags);
+ if (err) {
+ usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
+ va_start, &pa_start, size, err);
+ goto err_out;
+ }
+ break;
+ }
+
+ if (pa != pa_start)
+ pa_end += PAGE_SIZE;
+ }
+
+ if (i == chunk->nents) {
+ /*
+ * Hit last entry of the chunk,
+ * hence advance to next chunk
+ */
+ chunk = list_first_entry(&chunk->list,
+ struct usnic_uiom_chunk,
+ list);
+ goto iter_chunk;
+ }
+ }
+
+ return 0;
+
+err_out:
+ usnic_uiom_unmap_sorted_intervals(intervals, pd);
+ return err;
+}
+
+struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
+ unsigned long addr, size_t size,
+ int writable, int dmasync)
+{
+ struct usnic_uiom_reg *uiomr;
+ unsigned long va_base, vpn_start, vpn_last;
+ unsigned long npages;
+ int offset, err;
+ LIST_HEAD(sorted_diff_intervals);
+
+ /*
+ * Intel IOMMU map throws an error if a translation entry is
+ * changed from read to write. This module may not unmap
+ * and then remap the entry after fixing the permission
+ * b/c this open up a small windows where hw DMA may page fault
+ * Hence, make all entries to be writable.
+ */
+ writable = 1;
+
+ va_base = addr & PAGE_MASK;
+ offset = addr & ~PAGE_MASK;
+ npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT;
+ vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT;
+ vpn_last = vpn_start + npages - 1;
+
+ uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL);
+ if (!uiomr)
+ return ERR_PTR(-ENOMEM);
+
+ uiomr->va = va_base;
+ uiomr->offset = offset;
+ uiomr->length = size;
+ uiomr->writable = writable;
+ uiomr->pd = pd;
+
+ err = usnic_uiom_get_pages(addr, size, writable, dmasync,
+ &uiomr->chunk_list);
+ if (err) {
+ usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
+ vpn_start, vpn_last, err);
+ goto out_free_uiomr;
+ }
+
+ spin_lock(&pd->lock);
+ err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
+ (writable) ? IOMMU_WRITE : 0,
+ IOMMU_WRITE,
+ &pd->rb_root,
+ &sorted_diff_intervals);
+ if (err) {
+ usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
+ vpn_start, vpn_last, err);
+ goto out_put_pages;
+ }
+
+ err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr);
+ if (err) {
+ usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
+ vpn_start, vpn_last, err);
+ goto out_put_intervals;
+
+ }
+
+ err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last,
+ (writable) ? IOMMU_WRITE : 0);
+ if (err) {
+ usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
+ vpn_start, vpn_last, err);
+ goto out_unmap_intervals;
+ }
+
+ usnic_uiom_put_interval_set(&sorted_diff_intervals);
+ spin_unlock(&pd->lock);
+
+ return uiomr;
+
+out_unmap_intervals:
+ usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd);
+out_put_intervals:
+ usnic_uiom_put_interval_set(&sorted_diff_intervals);
+out_put_pages:
+ usnic_uiom_put_pages(&uiomr->chunk_list, 0);
+ spin_unlock(&pd->lock);
+out_free_uiomr:
+ kfree(uiomr);
+ return ERR_PTR(err);
+}
+
+void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
+{
+ struct mm_struct *mm;
+ unsigned long diff;
+
+ __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
+
+ mm = get_task_mm(current);
+ if (!mm) {
+ kfree(uiomr);
+ return;
+ }
+
+ diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
+
+ /*
+ * We may be called with the mm's mmap_sem already held. This
+ * can happen when a userspace munmap() is the call that drops
+ * the last reference to our file and calls our release
+ * method. If there are memory regions to destroy, we'll end
+ * up here and not be able to take the mmap_sem. In that case
+ * we defer the vm_locked accounting to the system workqueue.
+ */
+ if (closing) {
+ if (!down_write_trylock(&mm->mmap_sem)) {
+ INIT_WORK(&uiomr->work, usnic_uiom_reg_account);
+ uiomr->mm = mm;
+ uiomr->diff = diff;
+
+ queue_work(usnic_uiom_wq, &uiomr->work);
+ return;
+ }
+ } else
+ down_write(&mm->mmap_sem);
+
+ current->mm->locked_vm -= diff;
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ kfree(uiomr);
+}
+
+struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
+{
+ struct usnic_uiom_pd *pd;
+ void *domain;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ pd->domain = domain = iommu_domain_alloc(&pci_bus_type);
+ if (IS_ERR_OR_NULL(domain)) {
+ usnic_err("Failed to allocate IOMMU domain with err %ld\n",
+ PTR_ERR(pd->domain));
+ kfree(pd);
+ return ERR_PTR(domain ? PTR_ERR(domain) : -ENOMEM);
+ }
+
+ iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
+
+ spin_lock_init(&pd->lock);
+ INIT_LIST_HEAD(&pd->devs);
+
+ return pd;
+}
+
+void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd)
+{
+ iommu_domain_free(pd->domain);
+ kfree(pd);
+}
+
+int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
+{
+ struct usnic_uiom_dev *uiom_dev;
+ int err;
+
+ uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC);
+ if (!uiom_dev)
+ return -ENOMEM;
+ uiom_dev->dev = dev;
+
+ err = iommu_attach_device(pd->domain, dev);
+ if (err)
+ goto out_free_dev;
+
+ if (!iommu_domain_has_cap(pd->domain, IOMMU_CAP_CACHE_COHERENCY)) {
+ usnic_err("IOMMU of %s does not support cache coherency\n",
+ dev_name(dev));
+ err = -EINVAL;
+ goto out_detach_device;
+ }
+
+ spin_lock(&pd->lock);
+ list_add_tail(&uiom_dev->link, &pd->devs);
+ pd->dev_cnt++;
+ spin_unlock(&pd->lock);
+
+ return 0;
+
+out_detach_device:
+ iommu_detach_device(pd->domain, dev);
+out_free_dev:
+ kfree(uiom_dev);
+ return err;
+}
+
+void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev)
+{
+ struct usnic_uiom_dev *uiom_dev;
+ int found = 0;
+
+ spin_lock(&pd->lock);
+ list_for_each_entry(uiom_dev, &pd->devs, link) {
+ if (uiom_dev->dev == dev) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ usnic_err("Unable to free dev %s - not found\n",
+ dev_name(dev));
+ spin_unlock(&pd->lock);
+ return;
+ }
+
+ list_del(&uiom_dev->link);
+ pd->dev_cnt--;
+ spin_unlock(&pd->lock);
+
+ return iommu_detach_device(pd->domain, dev);
+}
+
+struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd)
+{
+ struct usnic_uiom_dev *uiom_dev;
+ struct device **devs;
+ int i = 0;
+
+ spin_lock(&pd->lock);
+ devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC);
+ if (!devs) {
+ devs = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ list_for_each_entry(uiom_dev, &pd->devs, link) {
+ devs[i++] = uiom_dev->dev;
+ }
+out:
+ spin_unlock(&pd->lock);
+ return devs;
+}
+
+void usnic_uiom_free_dev_list(struct device **devs)
+{
+ kfree(devs);
+}
+
+int usnic_uiom_init(char *drv_name)
+{
+ if (!iommu_present(&pci_bus_type)) {
+ usnic_err("IOMMU required but not present or enabled. USNIC QPs will not function w/o enabling IOMMU\n");
+ return -EPERM;
+ }
+
+ usnic_uiom_wq = create_workqueue(drv_name);
+ if (!usnic_uiom_wq) {
+ usnic_err("Unable to alloc wq for drv %s\n", drv_name);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void usnic_uiom_fini(void)
+{
+ flush_workqueue(usnic_uiom_wq);
+ destroy_workqueue(usnic_uiom_wq);
+}
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h
new file mode 100644
index 000000000000..70440996e8f2
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_UIOM_H_
+#define USNIC_UIOM_H_
+
+#include <linux/list.h>
+#include <linux/scatterlist.h>
+
+#include "usnic_uiom_interval_tree.h"
+
+#define USNIC_UIOM_READ (1)
+#define USNIC_UIOM_WRITE (2)
+
+#define USNIC_UIOM_MAX_PD_CNT (1000)
+#define USNIC_UIOM_MAX_MR_CNT (1000000)
+#define USNIC_UIOM_MAX_MR_SIZE (~0UL)
+#define USNIC_UIOM_PAGE_SIZE (PAGE_SIZE)
+
+struct usnic_uiom_dev {
+ struct device *dev;
+ struct list_head link;
+};
+
+struct usnic_uiom_pd {
+ struct iommu_domain *domain;
+ spinlock_t lock;
+ struct rb_root rb_root;
+ struct list_head devs;
+ int dev_cnt;
+};
+
+struct usnic_uiom_reg {
+ struct usnic_uiom_pd *pd;
+ unsigned long va;
+ size_t length;
+ int offset;
+ int page_size;
+ int writable;
+ struct list_head chunk_list;
+ struct work_struct work;
+ struct mm_struct *mm;
+ unsigned long diff;
+};
+
+struct usnic_uiom_chunk {
+ struct list_head list;
+ int nents;
+ struct scatterlist page_list[0];
+};
+
+struct usnic_uiom_pd *usnic_uiom_alloc_pd(void);
+void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd);
+int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev);
+void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd,
+ struct device *dev);
+struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd);
+void usnic_uiom_free_dev_list(struct device **devs);
+struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
+ unsigned long addr, size_t size,
+ int access, int dmasync);
+void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing);
+int usnic_uiom_init(char *drv_name);
+void usnic_uiom_fini(void);
+#endif /* USNIC_UIOM_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
new file mode 100644
index 000000000000..d135ad90d914
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
@@ -0,0 +1,236 @@
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/list_sort.h>
+
+#include <linux/interval_tree_generic.h>
+#include "usnic_uiom_interval_tree.h"
+
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+
+#define MAKE_NODE(node, start, end, ref_cnt, flags, err, err_out) \
+ do { \
+ node = usnic_uiom_interval_node_alloc(start, \
+ end, ref_cnt, flags); \
+ if (!node) { \
+ err = -ENOMEM; \
+ goto err_out; \
+ } \
+ } while (0)
+
+#define MARK_FOR_ADD(node, list) (list_add_tail(&node->link, list))
+
+#define MAKE_NODE_AND_APPEND(node, start, end, ref_cnt, flags, err, \
+ err_out, list) \
+ do { \
+ MAKE_NODE(node, start, end, \
+ ref_cnt, flags, err, \
+ err_out); \
+ MARK_FOR_ADD(node, list); \
+ } while (0)
+
+#define FLAGS_EQUAL(flags1, flags2, mask) \
+ (((flags1) & (mask)) == ((flags2) & (mask)))
+
+static struct usnic_uiom_interval_node*
+usnic_uiom_interval_node_alloc(long int start, long int last, int ref_cnt,
+ int flags)
+{
+ struct usnic_uiom_interval_node *interval = kzalloc(sizeof(*interval),
+ GFP_ATOMIC);
+ if (!interval)
+ return NULL;
+
+ interval->start = start;
+ interval->last = last;
+ interval->flags = flags;
+ interval->ref_cnt = ref_cnt;
+
+ return interval;
+}
+
+static int interval_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct usnic_uiom_interval_node *node_a, *node_b;
+
+ node_a = list_entry(a, struct usnic_uiom_interval_node, link);
+ node_b = list_entry(b, struct usnic_uiom_interval_node, link);
+
+ /* long to int */
+ if (node_a->start < node_b->start)
+ return -1;
+ else if (node_a->start > node_b->start)
+ return 1;
+
+ return 0;
+}
+
+static void
+find_intervals_intersection_sorted(struct rb_root *root, unsigned long start,
+ unsigned long last,
+ struct list_head *list)
+{
+ struct usnic_uiom_interval_node *node;
+
+ INIT_LIST_HEAD(list);
+
+ for (node = usnic_uiom_interval_tree_iter_first(root, start, last);
+ node;
+ node = usnic_uiom_interval_tree_iter_next(node, start, last))
+ list_add_tail(&node->link, list);
+
+ list_sort(NULL, list, interval_cmp);
+}
+
+int usnic_uiom_get_intervals_diff(unsigned long start, unsigned long last,
+ int flags, int flag_mask,
+ struct rb_root *root,
+ struct list_head *diff_set)
+{
+ struct usnic_uiom_interval_node *interval, *tmp;
+ int err = 0;
+ long int pivot = start;
+ LIST_HEAD(intersection_set);
+
+ INIT_LIST_HEAD(diff_set);
+
+ find_intervals_intersection_sorted(root, start, last,
+ &intersection_set);
+
+ list_for_each_entry(interval, &intersection_set, link) {
+ if (pivot < interval->start) {
+ MAKE_NODE_AND_APPEND(tmp, pivot, interval->start - 1,
+ 1, flags, err, err_out,
+ diff_set);
+ pivot = interval->start;
+ }
+
+ /*
+ * Invariant: Set [start, pivot] is either in diff_set or root,
+ * but not in both.
+ */
+
+ if (pivot > interval->last) {
+ continue;
+ } else if (pivot <= interval->last &&
+ FLAGS_EQUAL(interval->flags, flags,
+ flag_mask)) {
+ pivot = interval->last + 1;
+ }
+ }
+
+ if (pivot <= last)
+ MAKE_NODE_AND_APPEND(tmp, pivot, last, 1, flags, err, err_out,
+ diff_set);
+
+ return 0;
+
+err_out:
+ list_for_each_entry_safe(interval, tmp, diff_set, link) {
+ list_del(&interval->link);
+ kfree(interval);
+ }
+
+ return err;
+}
+
+void usnic_uiom_put_interval_set(struct list_head *intervals)
+{
+ struct usnic_uiom_interval_node *interval, *tmp;
+ list_for_each_entry_safe(interval, tmp, intervals, link)
+ kfree(interval);
+}
+
+int usnic_uiom_insert_interval(struct rb_root *root, unsigned long start,
+ unsigned long last, int flags)
+{
+ struct usnic_uiom_interval_node *interval, *tmp;
+ unsigned long istart, ilast;
+ int iref_cnt, iflags;
+ unsigned long lpivot = start;
+ int err = 0;
+ LIST_HEAD(to_add);
+ LIST_HEAD(intersection_set);
+
+ find_intervals_intersection_sorted(root, start, last,
+ &intersection_set);
+
+ list_for_each_entry(interval, &intersection_set, link) {
+ /*
+ * Invariant - lpivot is the left edge of next interval to be
+ * inserted
+ */
+ istart = interval->start;
+ ilast = interval->last;
+ iref_cnt = interval->ref_cnt;
+ iflags = interval->flags;
+
+ if (istart < lpivot) {
+ MAKE_NODE_AND_APPEND(tmp, istart, lpivot - 1, iref_cnt,
+ iflags, err, err_out, &to_add);
+ } else if (istart > lpivot) {
+ MAKE_NODE_AND_APPEND(tmp, lpivot, istart - 1, 1, flags,
+ err, err_out, &to_add);
+ lpivot = istart;
+ } else {
+ lpivot = istart;
+ }
+
+ if (ilast > last) {
+ MAKE_NODE_AND_APPEND(tmp, lpivot, last, iref_cnt + 1,
+ iflags | flags, err, err_out,
+ &to_add);
+ MAKE_NODE_AND_APPEND(tmp, last + 1, ilast, iref_cnt,
+ iflags, err, err_out, &to_add);
+ } else {
+ MAKE_NODE_AND_APPEND(tmp, lpivot, ilast, iref_cnt + 1,
+ iflags | flags, err, err_out,
+ &to_add);
+ }
+
+ lpivot = ilast + 1;
+ }
+
+ if (lpivot <= last)
+ MAKE_NODE_AND_APPEND(tmp, lpivot, last, 1, flags, err, err_out,
+ &to_add);
+
+ list_for_each_entry_safe(interval, tmp, &intersection_set, link) {
+ usnic_uiom_interval_tree_remove(interval, root);
+ kfree(interval);
+ }
+
+ list_for_each_entry(interval, &to_add, link)
+ usnic_uiom_interval_tree_insert(interval, root);
+
+ return 0;
+
+err_out:
+ list_for_each_entry_safe(interval, tmp, &to_add, link)
+ kfree(interval);
+
+ return err;
+}
+
+void usnic_uiom_remove_interval(struct rb_root *root, unsigned long start,
+ unsigned long last, struct list_head *removed)
+{
+ struct usnic_uiom_interval_node *interval;
+
+ for (interval = usnic_uiom_interval_tree_iter_first(root, start, last);
+ interval;
+ interval = usnic_uiom_interval_tree_iter_next(interval,
+ start,
+ last)) {
+ if (--interval->ref_cnt == 0)
+ list_add_tail(&interval->link, removed);
+ }
+
+ list_for_each_entry(interval, removed, link)
+ usnic_uiom_interval_tree_remove(interval, root);
+}
+
+INTERVAL_TREE_DEFINE(struct usnic_uiom_interval_node, rb,
+ unsigned long, __subtree_last,
+ START, LAST, , usnic_uiom_interval_tree)
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
new file mode 100644
index 000000000000..d4f752e258fd
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_UIOM_INTERVAL_TREE_H_
+#define USNIC_UIOM_INTERVAL_TREE_H_
+
+#include <linux/rbtree.h>
+
+struct usnic_uiom_interval_node {
+ struct rb_node rb;
+ struct list_head link;
+ unsigned long start;
+ unsigned long last;
+ unsigned long __subtree_last;
+ unsigned int ref_cnt;
+ int flags;
+};
+
+extern void
+usnic_uiom_interval_tree_insert(struct usnic_uiom_interval_node *node,
+ struct rb_root *root);
+extern void
+usnic_uiom_interval_tree_remove(struct usnic_uiom_interval_node *node,
+ struct rb_root *root);
+extern struct usnic_uiom_interval_node *
+usnic_uiom_interval_tree_iter_first(struct rb_root *root,
+ unsigned long start,
+ unsigned long last);
+extern struct usnic_uiom_interval_node *
+usnic_uiom_interval_tree_iter_next(struct usnic_uiom_interval_node *node,
+ unsigned long start, unsigned long last);
+/*
+ * Inserts {start...last} into {root}. If there are overlaps,
+ * nodes will be broken up and merged
+ */
+int usnic_uiom_insert_interval(struct rb_root *root,
+ unsigned long start, unsigned long last,
+ int flags);
+/*
+ * Removed {start...last} from {root}. The nodes removed are returned in
+ * 'removed.' The caller is responsibile for freeing memory of nodes in
+ * 'removed.'
+ */
+void usnic_uiom_remove_interval(struct rb_root *root,
+ unsigned long start, unsigned long last,
+ struct list_head *removed);
+/*
+ * Returns {start...last} - {root} (relative complement of {start...last} in
+ * {root}) in diff_set sorted ascendingly
+ */
+int usnic_uiom_get_intervals_diff(unsigned long start,
+ unsigned long last, int flags,
+ int flag_mask,
+ struct rb_root *root,
+ struct list_head *diff_set);
+/* Call this to free diff_set returned by usnic_uiom_get_intervals_diff */
+void usnic_uiom_put_interval_set(struct list_head *intervals);
+#endif /* USNIC_UIOM_INTERVAL_TREE_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.c b/drivers/infiniband/hw/usnic/usnic_vnic.c
new file mode 100644
index 000000000000..656b88c39eda
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_vnic.c
@@ -0,0 +1,467 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "usnic_ib.h"
+#include "vnic_resource.h"
+#include "usnic_log.h"
+#include "usnic_vnic.h"
+
+struct usnic_vnic {
+ struct vnic_dev *vdev;
+ struct vnic_dev_bar bar[PCI_NUM_RESOURCES];
+ struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX];
+ spinlock_t res_lock;
+};
+
+static enum vnic_res_type _to_vnic_res_type(enum usnic_vnic_res_type res_type)
+{
+#define DEFINE_USNIC_VNIC_RES_AT(usnic_vnic_res_t, vnic_res_type, desc, val) \
+ vnic_res_type,
+#define DEFINE_USNIC_VNIC_RES(usnic_vnic_res_t, vnic_res_type, desc) \
+ vnic_res_type,
+ static enum vnic_res_type usnic_vnic_type_2_vnic_type[] = {
+ USNIC_VNIC_RES_TYPES};
+#undef DEFINE_USNIC_VNIC_RES
+#undef DEFINE_USNIC_VNIC_RES_AT
+
+ if (res_type >= USNIC_VNIC_RES_TYPE_MAX)
+ return RES_TYPE_MAX;
+
+ return usnic_vnic_type_2_vnic_type[res_type];
+}
+
+const char *usnic_vnic_res_type_to_str(enum usnic_vnic_res_type res_type)
+{
+#define DEFINE_USNIC_VNIC_RES_AT(usnic_vnic_res_t, vnic_res_type, desc, val) \
+ desc,
+#define DEFINE_USNIC_VNIC_RES(usnic_vnic_res_t, vnic_res_type, desc) \
+ desc,
+ static const char * const usnic_vnic_res_type_desc[] = {
+ USNIC_VNIC_RES_TYPES};
+#undef DEFINE_USNIC_VNIC_RES
+#undef DEFINE_USNIC_VNIC_RES_AT
+
+ if (res_type >= USNIC_VNIC_RES_TYPE_MAX)
+ return "unknown";
+
+ return usnic_vnic_res_type_desc[res_type];
+
+}
+
+const char *usnic_vnic_pci_name(struct usnic_vnic *vnic)
+{
+ return pci_name(usnic_vnic_get_pdev(vnic));
+}
+
+int usnic_vnic_dump(struct usnic_vnic *vnic, char *buf,
+ int buf_sz,
+ void *hdr_obj,
+ int (*printtitle)(void *, char*, int),
+ int (*printcols)(char *, int),
+ int (*printrow)(void *, char *, int))
+{
+ struct usnic_vnic_res_chunk *chunk;
+ struct usnic_vnic_res *res;
+ struct vnic_dev_bar *bar0;
+ int i, j, offset;
+
+ offset = 0;
+ bar0 = usnic_vnic_get_bar(vnic, 0);
+ offset += scnprintf(buf + offset, buf_sz - offset,
+ "VF:%hu BAR0 bus_addr=%pa vaddr=0x%p size=%ld ",
+ usnic_vnic_get_index(vnic),
+ &bar0->bus_addr,
+ bar0->vaddr, bar0->len);
+ if (printtitle)
+ offset += printtitle(hdr_obj, buf + offset, buf_sz - offset);
+ offset += scnprintf(buf + offset, buf_sz - offset, "\n");
+ offset += scnprintf(buf + offset, buf_sz - offset,
+ "|RES\t|CTRL_PIN\t\t|IN_USE\t");
+ if (printcols)
+ offset += printcols(buf + offset, buf_sz - offset);
+ offset += scnprintf(buf + offset, buf_sz - offset, "\n");
+
+ spin_lock(&vnic->res_lock);
+ for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) {
+ chunk = &vnic->chunks[i];
+ for (j = 0; j < chunk->cnt; j++) {
+ res = chunk->res[j];
+ offset += scnprintf(buf + offset, buf_sz - offset,
+ "|%s[%u]\t|0x%p\t|%u\t",
+ usnic_vnic_res_type_to_str(res->type),
+ res->vnic_idx, res->ctrl, !!res->owner);
+ if (printrow) {
+ offset += printrow(res->owner, buf + offset,
+ buf_sz - offset);
+ }
+ offset += scnprintf(buf + offset, buf_sz - offset,
+ "\n");
+ }
+ }
+ spin_unlock(&vnic->res_lock);
+ return offset;
+}
+
+void usnic_vnic_res_spec_update(struct usnic_vnic_res_spec *spec,
+ enum usnic_vnic_res_type trgt_type,
+ u16 cnt)
+{
+ int i;
+
+ for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) {
+ if (spec->resources[i].type == trgt_type) {
+ spec->resources[i].cnt = cnt;
+ return;
+ }
+ }
+
+ WARN_ON(1);
+}
+
+int usnic_vnic_res_spec_satisfied(const struct usnic_vnic_res_spec *min_spec,
+ struct usnic_vnic_res_spec *res_spec)
+{
+ int found, i, j;
+
+ for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) {
+ found = 0;
+
+ for (j = 0; j < USNIC_VNIC_RES_TYPE_MAX; j++) {
+ if (res_spec->resources[i].type !=
+ min_spec->resources[i].type)
+ continue;
+ found = 1;
+ if (min_spec->resources[i].cnt >
+ res_spec->resources[i].cnt)
+ return -EINVAL;
+ break;
+ }
+
+ if (!found)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int usnic_vnic_spec_dump(char *buf, int buf_sz,
+ struct usnic_vnic_res_spec *res_spec)
+{
+ enum usnic_vnic_res_type res_type;
+ int res_cnt;
+ int i;
+ int offset = 0;
+
+ for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) {
+ res_type = res_spec->resources[i].type;
+ res_cnt = res_spec->resources[i].cnt;
+ offset += scnprintf(buf + offset, buf_sz - offset,
+ "Res: %s Cnt: %d ",
+ usnic_vnic_res_type_to_str(res_type),
+ res_cnt);
+ }
+
+ return offset;
+}
+
+int usnic_vnic_check_room(struct usnic_vnic *vnic,
+ struct usnic_vnic_res_spec *res_spec)
+{
+ int i;
+ enum usnic_vnic_res_type res_type;
+ int res_cnt;
+
+ for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) {
+ res_type = res_spec->resources[i].type;
+ res_cnt = res_spec->resources[i].cnt;
+
+ if (res_type == USNIC_VNIC_RES_TYPE_EOL)
+ break;
+
+ if (res_cnt > usnic_vnic_res_free_cnt(vnic, res_type))
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int usnic_vnic_res_cnt(struct usnic_vnic *vnic,
+ enum usnic_vnic_res_type type)
+{
+ return vnic->chunks[type].cnt;
+}
+
+int usnic_vnic_res_free_cnt(struct usnic_vnic *vnic,
+ enum usnic_vnic_res_type type)
+{
+ return vnic->chunks[type].free_cnt;
+}
+
+struct usnic_vnic_res_chunk *
+usnic_vnic_get_resources(struct usnic_vnic *vnic, enum usnic_vnic_res_type type,
+ int cnt, void *owner)
+{
+ struct usnic_vnic_res_chunk *src, *ret;
+ struct usnic_vnic_res *res;
+ int i;
+
+ if (usnic_vnic_res_free_cnt(vnic, type) < cnt || cnt < 1 || !owner)
+ return ERR_PTR(-EINVAL);
+
+ ret = kzalloc(sizeof(*ret), GFP_ATOMIC);
+ if (!ret) {
+ usnic_err("Failed to allocate chunk for %s - Out of memory\n",
+ usnic_vnic_pci_name(vnic));
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret->res = kzalloc(sizeof(*(ret->res))*cnt, GFP_ATOMIC);
+ if (!ret->res) {
+ usnic_err("Failed to allocate resources for %s. Out of memory\n",
+ usnic_vnic_pci_name(vnic));
+ kfree(ret);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock(&vnic->res_lock);
+ src = &vnic->chunks[type];
+ for (i = 0; i < src->cnt && ret->cnt < cnt; i++) {
+ res = src->res[i];
+ if (!res->owner) {
+ src->free_cnt--;
+ res->owner = owner;
+ ret->res[ret->cnt++] = res;
+ }
+ }
+
+ spin_unlock(&vnic->res_lock);
+ ret->type = type;
+ ret->vnic = vnic;
+ WARN_ON(ret->cnt != cnt);
+
+ return ret;
+}
+
+void usnic_vnic_put_resources(struct usnic_vnic_res_chunk *chunk)
+{
+
+ struct usnic_vnic_res *res;
+ int i;
+ struct usnic_vnic *vnic = chunk->vnic;
+
+ spin_lock(&vnic->res_lock);
+ while ((i = --chunk->cnt) >= 0) {
+ res = chunk->res[i];
+ chunk->res[i] = NULL;
+ res->owner = NULL;
+ vnic->chunks[res->type].free_cnt++;
+ }
+ spin_unlock(&vnic->res_lock);
+
+ kfree(chunk->res);
+ kfree(chunk);
+}
+
+u16 usnic_vnic_get_index(struct usnic_vnic *vnic)
+{
+ return usnic_vnic_get_pdev(vnic)->devfn - 1;
+}
+
+static int usnic_vnic_alloc_res_chunk(struct usnic_vnic *vnic,
+ enum usnic_vnic_res_type type,
+ struct usnic_vnic_res_chunk *chunk)
+{
+ int cnt, err, i;
+ struct usnic_vnic_res *res;
+
+ cnt = vnic_dev_get_res_count(vnic->vdev, _to_vnic_res_type(type));
+ if (cnt < 1)
+ return -EINVAL;
+
+ chunk->cnt = chunk->free_cnt = cnt;
+ chunk->res = kzalloc(sizeof(*(chunk->res))*cnt, GFP_KERNEL);
+ if (!chunk->res)
+ return -ENOMEM;
+
+ for (i = 0; i < cnt; i++) {
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ err = -ENOMEM;
+ goto fail;
+ }
+ res->type = type;
+ res->vnic_idx = i;
+ res->vnic = vnic;
+ res->ctrl = vnic_dev_get_res(vnic->vdev,
+ _to_vnic_res_type(type), i);
+ chunk->res[i] = res;
+ }
+
+ chunk->vnic = vnic;
+ return 0;
+fail:
+ for (i--; i >= 0; i--)
+ kfree(chunk->res[i]);
+ kfree(chunk->res);
+ return err;
+}
+
+static void usnic_vnic_free_res_chunk(struct usnic_vnic_res_chunk *chunk)
+{
+ int i;
+ for (i = 0; i < chunk->cnt; i++)
+ kfree(chunk->res[i]);
+ kfree(chunk->res);
+}
+
+static int usnic_vnic_discover_resources(struct pci_dev *pdev,
+ struct usnic_vnic *vnic)
+{
+ enum usnic_vnic_res_type res_type;
+ int i;
+ int err = 0;
+
+ for (i = 0; i < ARRAY_SIZE(vnic->bar); i++) {
+ if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
+ continue;
+ vnic->bar[i].len = pci_resource_len(pdev, i);
+ vnic->bar[i].vaddr = pci_iomap(pdev, i, vnic->bar[i].len);
+ if (!vnic->bar[i].vaddr) {
+ usnic_err("Cannot memory-map BAR %d, aborting\n",
+ i);
+ err = -ENODEV;
+ goto out_clean_bar;
+ }
+ vnic->bar[i].bus_addr = pci_resource_start(pdev, i);
+ }
+
+ vnic->vdev = vnic_dev_register(NULL, pdev, pdev, vnic->bar,
+ ARRAY_SIZE(vnic->bar));
+ if (!vnic->vdev) {
+ usnic_err("Failed to register device %s\n",
+ pci_name(pdev));
+ err = -EINVAL;
+ goto out_clean_bar;
+ }
+
+ for (res_type = USNIC_VNIC_RES_TYPE_EOL + 1;
+ res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) {
+ err = usnic_vnic_alloc_res_chunk(vnic, res_type,
+ &vnic->chunks[res_type]);
+ if (err) {
+ usnic_err("Failed to alloc res %s with err %d\n",
+ usnic_vnic_res_type_to_str(res_type),
+ err);
+ goto out_clean_chunks;
+ }
+ }
+
+ return 0;
+
+out_clean_chunks:
+ for (res_type--; res_type > USNIC_VNIC_RES_TYPE_EOL; res_type--)
+ usnic_vnic_free_res_chunk(&vnic->chunks[res_type]);
+ vnic_dev_unregister(vnic->vdev);
+out_clean_bar:
+ for (i = 0; i < ARRAY_SIZE(vnic->bar); i++) {
+ if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
+ continue;
+ if (!vnic->bar[i].vaddr)
+ break;
+
+ iounmap(vnic->bar[i].vaddr);
+ }
+
+ return err;
+}
+
+struct pci_dev *usnic_vnic_get_pdev(struct usnic_vnic *vnic)
+{
+ return vnic_dev_get_pdev(vnic->vdev);
+}
+
+struct vnic_dev_bar *usnic_vnic_get_bar(struct usnic_vnic *vnic,
+ int bar_num)
+{
+ return (bar_num < ARRAY_SIZE(vnic->bar)) ? &vnic->bar[bar_num] : NULL;
+}
+
+static void usnic_vnic_release_resources(struct usnic_vnic *vnic)
+{
+ int i;
+ struct pci_dev *pdev;
+ enum usnic_vnic_res_type res_type;
+
+ pdev = usnic_vnic_get_pdev(vnic);
+
+ for (res_type = USNIC_VNIC_RES_TYPE_EOL + 1;
+ res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++)
+ usnic_vnic_free_res_chunk(&vnic->chunks[res_type]);
+
+ vnic_dev_unregister(vnic->vdev);
+
+ for (i = 0; i < ARRAY_SIZE(vnic->bar); i++) {
+ if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
+ continue;
+ iounmap(vnic->bar[i].vaddr);
+ }
+}
+
+struct usnic_vnic *usnic_vnic_alloc(struct pci_dev *pdev)
+{
+ struct usnic_vnic *vnic;
+ int err = 0;
+
+ if (!pci_is_enabled(pdev)) {
+ usnic_err("PCI dev %s is disabled\n", pci_name(pdev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ vnic = kzalloc(sizeof(*vnic), GFP_KERNEL);
+ if (!vnic) {
+ usnic_err("Failed to alloc vnic for %s - out of memory\n",
+ pci_name(pdev));
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_init(&vnic->res_lock);
+
+ err = usnic_vnic_discover_resources(pdev, vnic);
+ if (err) {
+ usnic_err("Failed to discover %s resources with err %d\n",
+ pci_name(pdev), err);
+ goto out_free_vnic;
+ }
+
+ usnic_dbg("Allocated vnic for %s\n", usnic_vnic_pci_name(vnic));
+
+ return vnic;
+
+out_free_vnic:
+ kfree(vnic);
+
+ return ERR_PTR(err);
+}
+
+void usnic_vnic_free(struct usnic_vnic *vnic)
+{
+ usnic_vnic_release_resources(vnic);
+ kfree(vnic);
+}
diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.h b/drivers/infiniband/hw/usnic/usnic_vnic.h
new file mode 100644
index 000000000000..14d931a8829d
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_vnic.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_VNIC_H_
+#define USNIC_VNIC_H_
+
+#include <linux/pci.h>
+
+#include "vnic_dev.h"
+
+/* =USNIC_VNIC_RES_TYPE= =VNIC_RES= =DESC= */
+#define USNIC_VNIC_RES_TYPES \
+ DEFINE_USNIC_VNIC_RES_AT(EOL, RES_TYPE_EOL, "EOL", 0) \
+ DEFINE_USNIC_VNIC_RES(WQ, RES_TYPE_WQ, "WQ") \
+ DEFINE_USNIC_VNIC_RES(RQ, RES_TYPE_RQ, "RQ") \
+ DEFINE_USNIC_VNIC_RES(CQ, RES_TYPE_CQ, "CQ") \
+ DEFINE_USNIC_VNIC_RES(INTR, RES_TYPE_INTR_CTRL, "INT") \
+ DEFINE_USNIC_VNIC_RES(MAX, RES_TYPE_MAX, "MAX")\
+
+#define DEFINE_USNIC_VNIC_RES_AT(usnic_vnic_res_t, vnic_res_type, desc, val) \
+ USNIC_VNIC_RES_TYPE_##usnic_vnic_res_t = val,
+#define DEFINE_USNIC_VNIC_RES(usnic_vnic_res_t, vnic_res_type, desc) \
+ USNIC_VNIC_RES_TYPE_##usnic_vnic_res_t,
+enum usnic_vnic_res_type {
+ USNIC_VNIC_RES_TYPES
+};
+#undef DEFINE_USNIC_VNIC_RES
+#undef DEFINE_USNIC_VNIC_RES_AT
+
+struct usnic_vnic_res {
+ enum usnic_vnic_res_type type;
+ unsigned int vnic_idx;
+ struct usnic_vnic *vnic;
+ void __iomem *ctrl;
+ void *owner;
+};
+
+struct usnic_vnic_res_chunk {
+ enum usnic_vnic_res_type type;
+ int cnt;
+ int free_cnt;
+ struct usnic_vnic_res **res;
+ struct usnic_vnic *vnic;
+};
+
+struct usnic_vnic_res_desc {
+ enum usnic_vnic_res_type type;
+ uint16_t cnt;
+};
+
+struct usnic_vnic_res_spec {
+ struct usnic_vnic_res_desc resources[USNIC_VNIC_RES_TYPE_MAX];
+};
+
+const char *usnic_vnic_res_type_to_str(enum usnic_vnic_res_type res_type);
+const char *usnic_vnic_pci_name(struct usnic_vnic *vnic);
+int usnic_vnic_dump(struct usnic_vnic *vnic, char *buf, int buf_sz,
+ void *hdr_obj,
+ int (*printtitle)(void *, char*, int),
+ int (*printcols)(char *, int),
+ int (*printrow)(void *, char *, int));
+void usnic_vnic_res_spec_update(struct usnic_vnic_res_spec *spec,
+ enum usnic_vnic_res_type trgt_type,
+ u16 cnt);
+int usnic_vnic_res_spec_satisfied(const struct usnic_vnic_res_spec *min_spec,
+ struct usnic_vnic_res_spec *res_spec);
+int usnic_vnic_spec_dump(char *buf, int buf_sz,
+ struct usnic_vnic_res_spec *res_spec);
+int usnic_vnic_check_room(struct usnic_vnic *vnic,
+ struct usnic_vnic_res_spec *res_spec);
+int usnic_vnic_res_cnt(struct usnic_vnic *vnic,
+ enum usnic_vnic_res_type type);
+int usnic_vnic_res_free_cnt(struct usnic_vnic *vnic,
+ enum usnic_vnic_res_type type);
+struct usnic_vnic_res_chunk *
+usnic_vnic_get_resources(struct usnic_vnic *vnic,
+ enum usnic_vnic_res_type type,
+ int cnt,
+ void *owner);
+void usnic_vnic_put_resources(struct usnic_vnic_res_chunk *chunk);
+struct pci_dev *usnic_vnic_get_pdev(struct usnic_vnic *vnic);
+struct vnic_dev_bar *usnic_vnic_get_bar(struct usnic_vnic *vnic,
+ int bar_num);
+struct usnic_vnic *usnic_vnic_alloc(struct pci_dev *pdev);
+void usnic_vnic_free(struct usnic_vnic *vnic);
+u16 usnic_vnic_get_index(struct usnic_vnic *vnic);
+
+#endif /*!USNIC_VNIC_H_*/
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index d64ed05fb082..5786a78ff8bc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -104,6 +104,8 @@ int ipoib_open(struct net_device *dev)
ipoib_dbg(priv, "bringing up interface\n");
+ netif_carrier_off(dev);
+
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
if (ipoib_pkey_dev_delay_open(dev))
@@ -1366,8 +1368,6 @@ void ipoib_setup(struct net_device *dev)
memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
- netif_carrier_off(dev);
-
priv->dev = dev;
spin_lock_init(&priv->lock);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index c29b5c838833..cdc7df4fdb8a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -31,6 +31,7 @@
*/
#include <linux/netdevice.h>
+#include <linux/if_arp.h> /* For ARPHRD_xxx */
#include <linux/module.h>
#include <net/rtnetlink.h>
#include "ipoib.h"
@@ -103,7 +104,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
return -EINVAL;
pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
- if (!pdev)
+ if (!pdev || pdev->type != ARPHRD_INFINIBAND)
return -ENODEV;
ppriv = netdev_priv(pdev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 049a997caff3..c56d5d44c53b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -192,6 +192,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
if (priv->hca_caps & IB_DEVICE_BLOCK_MULTICAST_LOOPBACK)
init_attr.create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
+ if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING)
+ init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
+
if (dev->features & NETIF_F_SG)
init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 538822684d5b..334f34b1cd46 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -610,11 +610,12 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc,
ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
kmem_cache_free(ig.desc_cache, tx_desc);
+ tx_desc = NULL;
}
atomic_dec(&ib_conn->post_send_buf_count);
- if (tx_desc->type == ISCSI_TX_CONTROL) {
+ if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */
task = (void *) ((long)(void *)tx_desc -
sizeof(struct iscsi_task));
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index afe95674008b..ca37edef2791 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -652,9 +652,13 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
/* getting here when the state is UP means that the conn is being *
* terminated asynchronously from the iSCSI layer's perspective. */
if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
- ISER_CONN_TERMINATING))
- iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
- ISCSI_ERR_CONN_FAILED);
+ ISER_CONN_TERMINATING)){
+ if (ib_conn->iser_conn)
+ iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
+ ISCSI_ERR_CONN_FAILED);
+ else
+ iser_err("iscsi_iser connection isn't bound\n");
+ }
/* Complete the termination process if no posts are pending */
if (ib_conn->post_recv_buf_count == 0 &&
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 6be57c38638d..d18d08a076e8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -47,10 +47,10 @@ static int
isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct isert_rdma_wr *wr);
static void
-isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
+isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
static int
-isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr);
+isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct isert_rdma_wr *wr);
static void
isert_qp_event_callback(struct ib_event *e, void *context)
@@ -207,7 +207,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
isert_conn->conn_rx_descs = NULL;
}
+static void isert_cq_tx_work(struct work_struct *);
static void isert_cq_tx_callback(struct ib_cq *, void *);
+static void isert_cq_rx_work(struct work_struct *);
static void isert_cq_rx_callback(struct ib_cq *, void *);
static int
@@ -225,11 +227,11 @@ isert_create_device_ib_res(struct isert_device *device)
/* asign function handlers */
if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
- device->use_frwr = 1;
- device->reg_rdma_mem = isert_reg_rdma_frwr;
- device->unreg_rdma_mem = isert_unreg_rdma_frwr;
+ device->use_fastreg = 1;
+ device->reg_rdma_mem = isert_reg_rdma;
+ device->unreg_rdma_mem = isert_unreg_rdma;
} else {
- device->use_frwr = 0;
+ device->use_fastreg = 0;
device->reg_rdma_mem = isert_map_rdma;
device->unreg_rdma_mem = isert_unmap_cmd;
}
@@ -237,9 +239,10 @@ isert_create_device_ib_res(struct isert_device *device)
device->cqs_used = min_t(int, num_online_cpus(),
device->ib_device->num_comp_vectors);
device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
- pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n",
+ pr_debug("Using %d CQs, device %s supports %d vectors support "
+ "Fast registration %d\n",
device->cqs_used, device->ib_device->name,
- device->ib_device->num_comp_vectors, device->use_frwr);
+ device->ib_device->num_comp_vectors, device->use_fastreg);
device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
device->cqs_used, GFP_KERNEL);
if (!device->cq_desc) {
@@ -248,47 +251,43 @@ isert_create_device_ib_res(struct isert_device *device)
}
cq_desc = device->cq_desc;
- device->dev_pd = ib_alloc_pd(ib_dev);
- if (IS_ERR(device->dev_pd)) {
- ret = PTR_ERR(device->dev_pd);
- pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
- goto out_cq_desc;
- }
-
for (i = 0; i < device->cqs_used; i++) {
cq_desc[i].device = device;
cq_desc[i].cq_index = i;
+ INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
isert_cq_rx_callback,
isert_cq_event_callback,
(void *)&cq_desc[i],
ISER_MAX_RX_CQ_LEN, i);
- if (IS_ERR(device->dev_rx_cq[i]))
+ if (IS_ERR(device->dev_rx_cq[i])) {
+ ret = PTR_ERR(device->dev_rx_cq[i]);
+ device->dev_rx_cq[i] = NULL;
goto out_cq;
+ }
+ INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
isert_cq_tx_callback,
isert_cq_event_callback,
(void *)&cq_desc[i],
ISER_MAX_TX_CQ_LEN, i);
- if (IS_ERR(device->dev_tx_cq[i]))
+ if (IS_ERR(device->dev_tx_cq[i])) {
+ ret = PTR_ERR(device->dev_tx_cq[i]);
+ device->dev_tx_cq[i] = NULL;
goto out_cq;
+ }
- if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP))
+ ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
+ if (ret)
goto out_cq;
- if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP))
+ ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
+ if (ret)
goto out_cq;
}
- device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(device->dev_mr)) {
- ret = PTR_ERR(device->dev_mr);
- pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
- goto out_cq;
- }
-
return 0;
out_cq:
@@ -304,9 +303,6 @@ out_cq:
ib_destroy_cq(device->dev_tx_cq[j]);
}
}
- ib_dealloc_pd(device->dev_pd);
-
-out_cq_desc:
kfree(device->cq_desc);
return ret;
@@ -329,8 +325,6 @@ isert_free_device_ib_res(struct isert_device *device)
device->dev_tx_cq[i] = NULL;
}
- ib_dereg_mr(device->dev_mr);
- ib_dealloc_pd(device->dev_pd);
kfree(device->cq_desc);
}
@@ -386,18 +380,18 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
}
static void
-isert_conn_free_frwr_pool(struct isert_conn *isert_conn)
+isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
{
struct fast_reg_descriptor *fr_desc, *tmp;
int i = 0;
- if (list_empty(&isert_conn->conn_frwr_pool))
+ if (list_empty(&isert_conn->conn_fr_pool))
return;
- pr_debug("Freeing conn %p frwr pool", isert_conn);
+ pr_debug("Freeing conn %p fastreg pool", isert_conn);
list_for_each_entry_safe(fr_desc, tmp,
- &isert_conn->conn_frwr_pool, list) {
+ &isert_conn->conn_fr_pool, list) {
list_del(&fr_desc->list);
ib_free_fast_reg_page_list(fr_desc->data_frpl);
ib_dereg_mr(fr_desc->data_mr);
@@ -405,20 +399,47 @@ isert_conn_free_frwr_pool(struct isert_conn *isert_conn)
++i;
}
- if (i < isert_conn->conn_frwr_pool_size)
+ if (i < isert_conn->conn_fr_pool_size)
pr_warn("Pool still has %d regions registered\n",
- isert_conn->conn_frwr_pool_size - i);
+ isert_conn->conn_fr_pool_size - i);
+}
+
+static int
+isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
+ struct fast_reg_descriptor *fr_desc)
+{
+ fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
+ ISCSI_ISER_SG_TABLESIZE);
+ if (IS_ERR(fr_desc->data_frpl)) {
+ pr_err("Failed to allocate data frpl err=%ld\n",
+ PTR_ERR(fr_desc->data_frpl));
+ return PTR_ERR(fr_desc->data_frpl);
+ }
+
+ fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
+ if (IS_ERR(fr_desc->data_mr)) {
+ pr_err("Failed to allocate data frmr err=%ld\n",
+ PTR_ERR(fr_desc->data_mr));
+ ib_free_fast_reg_page_list(fr_desc->data_frpl);
+ return PTR_ERR(fr_desc->data_mr);
+ }
+ pr_debug("Create fr_desc %p page_list %p\n",
+ fr_desc, fr_desc->data_frpl->page_list);
+
+ fr_desc->valid = true;
+
+ return 0;
}
static int
-isert_conn_create_frwr_pool(struct isert_conn *isert_conn)
+isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
{
struct fast_reg_descriptor *fr_desc;
struct isert_device *device = isert_conn->conn_device;
int i, ret;
- INIT_LIST_HEAD(&isert_conn->conn_frwr_pool);
- isert_conn->conn_frwr_pool_size = 0;
+ INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
+ isert_conn->conn_fr_pool_size = 0;
for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
if (!fr_desc) {
@@ -427,40 +448,26 @@ isert_conn_create_frwr_pool(struct isert_conn *isert_conn)
goto err;
}
- fr_desc->data_frpl =
- ib_alloc_fast_reg_page_list(device->ib_device,
- ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(fr_desc->data_frpl)) {
- pr_err("Failed to allocate fr_pg_list err=%ld\n",
- PTR_ERR(fr_desc->data_frpl));
- ret = PTR_ERR(fr_desc->data_frpl);
- goto err;
- }
-
- fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd,
- ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(fr_desc->data_mr)) {
- pr_err("Failed to allocate frmr err=%ld\n",
- PTR_ERR(fr_desc->data_mr));
- ret = PTR_ERR(fr_desc->data_mr);
- ib_free_fast_reg_page_list(fr_desc->data_frpl);
+ ret = isert_create_fr_desc(device->ib_device,
+ isert_conn->conn_pd, fr_desc);
+ if (ret) {
+ pr_err("Failed to create fastreg descriptor err=%d\n",
+ ret);
+ kfree(fr_desc);
goto err;
}
- pr_debug("Create fr_desc %p page_list %p\n",
- fr_desc, fr_desc->data_frpl->page_list);
- fr_desc->valid = true;
- list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
- isert_conn->conn_frwr_pool_size++;
+ list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
+ isert_conn->conn_fr_pool_size++;
}
- pr_debug("Creating conn %p frwr pool size=%d",
- isert_conn, isert_conn->conn_frwr_pool_size);
+ pr_debug("Creating conn %p fastreg pool size=%d",
+ isert_conn, isert_conn->conn_fr_pool_size);
return 0;
err:
- isert_conn_free_frwr_pool(isert_conn);
+ isert_conn_free_fastreg_pool(isert_conn);
return ret;
}
@@ -546,14 +553,29 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
}
isert_conn->conn_device = device;
- isert_conn->conn_pd = device->dev_pd;
- isert_conn->conn_mr = device->dev_mr;
+ isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
+ if (IS_ERR(isert_conn->conn_pd)) {
+ ret = PTR_ERR(isert_conn->conn_pd);
+ pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
+ isert_conn, ret);
+ goto out_pd;
+ }
- if (device->use_frwr) {
- ret = isert_conn_create_frwr_pool(isert_conn);
+ isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(isert_conn->conn_mr)) {
+ ret = PTR_ERR(isert_conn->conn_mr);
+ pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
+ isert_conn, ret);
+ goto out_mr;
+ }
+
+ if (device->use_fastreg) {
+ ret = isert_conn_create_fastreg_pool(isert_conn);
if (ret) {
- pr_err("Conn: %p failed to create frwr_pool\n", isert_conn);
- goto out_frwr;
+ pr_err("Conn: %p failed to create fastreg pool\n",
+ isert_conn);
+ goto out_fastreg;
}
}
@@ -570,9 +592,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
return 0;
out_conn_dev:
- if (device->use_frwr)
- isert_conn_free_frwr_pool(isert_conn);
-out_frwr:
+ if (device->use_fastreg)
+ isert_conn_free_fastreg_pool(isert_conn);
+out_fastreg:
+ ib_dereg_mr(isert_conn->conn_mr);
+out_mr:
+ ib_dealloc_pd(isert_conn->conn_pd);
+out_pd:
isert_device_try_release(device);
out_rsp_dma_map:
ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
@@ -596,8 +622,8 @@ isert_connect_release(struct isert_conn *isert_conn)
pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
- if (device && device->use_frwr)
- isert_conn_free_frwr_pool(isert_conn);
+ if (device && device->use_fastreg)
+ isert_conn_free_fastreg_pool(isert_conn);
if (isert_conn->conn_qp) {
cq_index = ((struct isert_cq_desc *)
@@ -611,6 +637,9 @@ isert_connect_release(struct isert_conn *isert_conn)
isert_free_rx_descriptors(isert_conn);
rdma_destroy_id(isert_conn->conn_cm_id);
+ ib_dereg_mr(isert_conn->conn_mr);
+ ib_dealloc_pd(isert_conn->conn_pd);
+
if (isert_conn->login_buf) {
ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
@@ -1012,13 +1041,13 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
}
static struct iscsi_cmd
-*isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp)
+*isert_allocate_cmd(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct isert_cmd *isert_cmd;
struct iscsi_cmd *cmd;
- cmd = iscsit_allocate_cmd(conn, gfp);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd) {
pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
return NULL;
@@ -1207,7 +1236,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
switch (opcode) {
case ISCSI_OP_SCSI_CMD:
- cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+ cmd = isert_allocate_cmd(conn);
if (!cmd)
break;
@@ -1221,7 +1250,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
rx_desc, (unsigned char *)hdr);
break;
case ISCSI_OP_NOOP_OUT:
- cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+ cmd = isert_allocate_cmd(conn);
if (!cmd)
break;
@@ -1234,7 +1263,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
(unsigned char *)hdr);
break;
case ISCSI_OP_SCSI_TMFUNC:
- cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+ cmd = isert_allocate_cmd(conn);
if (!cmd)
break;
@@ -1242,7 +1271,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
(unsigned char *)hdr);
break;
case ISCSI_OP_LOGOUT:
- cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+ cmd = isert_allocate_cmd(conn);
if (!cmd)
break;
@@ -1253,7 +1282,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
HZ);
break;
case ISCSI_OP_TEXT:
- cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+ cmd = isert_allocate_cmd(conn);
if (!cmd)
break;
@@ -1392,25 +1421,25 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
}
static void
-isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
+isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
{
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
LIST_HEAD(unmap_list);
- pr_debug("unreg_frwr_cmd: %p\n", isert_cmd);
+ pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
if (wr->fr_desc) {
- pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n",
+ pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
isert_cmd, wr->fr_desc);
spin_lock_bh(&isert_conn->conn_lock);
- list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool);
+ list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
spin_unlock_bh(&isert_conn->conn_lock);
wr->fr_desc = NULL;
}
if (wr->sge) {
- pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd);
+ pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
@@ -1724,7 +1753,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context)
{
struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
- INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
}
@@ -1768,7 +1796,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context)
{
struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
- INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
}
@@ -2153,26 +2180,22 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
static int
isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
- struct isert_cmd *isert_cmd, struct isert_conn *isert_conn,
- struct ib_sge *ib_sge, u32 offset, unsigned int data_len)
+ struct isert_conn *isert_conn, struct scatterlist *sg_start,
+ struct ib_sge *ib_sge, u32 sg_nents, u32 offset,
+ unsigned int data_len)
{
- struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
- struct scatterlist *sg_start;
- u32 sg_off, page_off;
struct ib_send_wr fr_wr, inv_wr;
struct ib_send_wr *bad_wr, *wr = NULL;
+ int ret, pagelist_len;
+ u32 page_off;
u8 key;
- int ret, sg_nents, pagelist_len;
- sg_off = offset / PAGE_SIZE;
- sg_start = &cmd->se_cmd.t_data_sg[sg_off];
- sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off,
- ISCSI_ISER_SG_TABLESIZE);
+ sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE);
page_off = offset % PAGE_SIZE;
- pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n",
- isert_cmd, fr_desc, sg_nents, sg_off, offset);
+ pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
+ fr_desc, sg_nents, offset);
pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
&fr_desc->data_frpl->page_list[0]);
@@ -2222,8 +2245,8 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
}
static int
-isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr)
+isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct isert_rdma_wr *wr)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
@@ -2241,9 +2264,9 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
data_left = se_cmd->data_length;
} else {
- sg_off = cmd->write_data_done / PAGE_SIZE;
- data_left = se_cmd->data_length - cmd->write_data_done;
offset = cmd->write_data_done;
+ sg_off = offset / PAGE_SIZE;
+ data_left = se_cmd->data_length - cmd->write_data_done;
isert_cmd->tx_desc.isert_cmd = isert_cmd;
}
@@ -2301,16 +2324,16 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
wr->fr_desc = NULL;
} else {
spin_lock_irqsave(&isert_conn->conn_lock, flags);
- fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
+ fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
struct fast_reg_descriptor, list);
list_del(&fr_desc->list);
spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
wr->fr_desc = fr_desc;
- ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
- ib_sge, offset, data_len);
+ ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start,
+ ib_sge, sg_nents, offset, data_len);
if (ret) {
- list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
+ list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
goto unmap_sg;
}
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 691f90ff2d83..708a069002f3 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -119,9 +119,9 @@ struct isert_conn {
wait_queue_head_t conn_wait;
wait_queue_head_t conn_wait_comp_err;
struct kref conn_kref;
- struct list_head conn_frwr_pool;
- int conn_frwr_pool_size;
- /* lock to protect frwr_pool */
+ struct list_head conn_fr_pool;
+ int conn_fr_pool_size;
+ /* lock to protect fastreg pool */
spinlock_t conn_lock;
#define ISERT_COMP_BATCH_COUNT 8
int conn_comp_batch;
@@ -139,13 +139,11 @@ struct isert_cq_desc {
};
struct isert_device {
- int use_frwr;
+ int use_fastreg;
int cqs_used;
int refcount;
int cq_active_qps[ISERT_MAX_CQ];
struct ib_device *ib_device;
- struct ib_pd *dev_pd;
- struct ib_mr *dev_mr;
struct ib_cq *dev_rx_cq[ISERT_MAX_CQ];
struct ib_cq *dev_tx_cq[ISERT_MAX_CQ];
struct isert_cq_desc *cq_desc;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index a88631918e85..529b6bcdca7a 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -660,6 +660,7 @@ static void srp_remove_target(struct srp_target_port *target)
srp_rport_get(target->rport);
srp_remove_host(target->scsi_host);
scsi_remove_host(target->scsi_host);
+ srp_stop_rport_timers(target->rport);
srp_disconnect_target(target);
ib_destroy_cm_id(target->cm_id);
srp_free_target_ib(target);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 520a7e5a490b..0e537d8d0e47 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3666,9 +3666,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size(
unsigned long val;
int ret;
- ret = strict_strtoul(page, 0, &val);
+ ret = kstrtoul(page, 0, &val);
if (ret < 0) {
- pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_RDMA_SIZE) {
@@ -3706,9 +3706,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size(
unsigned long val;
int ret;
- ret = strict_strtoul(page, 0, &val);
+ ret = kstrtoul(page, 0, &val);
if (ret < 0) {
- pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_RSP_SIZE) {
@@ -3746,9 +3746,9 @@ static ssize_t srpt_tpg_attrib_store_srp_sq_size(
unsigned long val;
int ret;
- ret = strict_strtoul(page, 0, &val);
+ ret = kstrtoul(page, 0, &val);
if (ret < 0) {
- pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_SRQ_SIZE) {
@@ -3793,7 +3793,7 @@ static ssize_t srpt_tpg_store_enable(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
return -EINVAL;
diff --git a/drivers/input/gameport/emu10k1-gp.c b/drivers/input/gameport/emu10k1-gp.c
index fa7a95c1da0e..2909e9561cf3 100644
--- a/drivers/input/gameport/emu10k1-gp.c
+++ b/drivers/input/gameport/emu10k1-gp.c
@@ -30,7 +30,6 @@
#include <linux/module.h>
#include <linux/ioport.h>
-#include <linux/init.h>
#include <linux/gameport.h>
#include <linux/slab.h>
#include <linux/pci.h>
diff --git a/drivers/input/gameport/fm801-gp.c b/drivers/input/gameport/fm801-gp.c
index ae912d3aee4e..7c03114158e0 100644
--- a/drivers/input/gameport/fm801-gp.c
+++ b/drivers/input/gameport/fm801-gp.c
@@ -27,7 +27,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/gameport.h>
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 846ccdd905b1..1c4c0db05550 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1653,35 +1653,36 @@ static void input_dev_toggle(struct input_dev *dev, bool activate)
*/
void input_reset_device(struct input_dev *dev)
{
- mutex_lock(&dev->mutex);
+ unsigned long flags;
- if (dev->users) {
- input_dev_toggle(dev, true);
+ mutex_lock(&dev->mutex);
+ spin_lock_irqsave(&dev->event_lock, flags);
- /*
- * Keys that have been pressed at suspend time are unlikely
- * to be still pressed when we resume.
- */
- spin_lock_irq(&dev->event_lock);
- input_dev_release_keys(dev);
- spin_unlock_irq(&dev->event_lock);
- }
+ input_dev_toggle(dev, true);
+ input_dev_release_keys(dev);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL(input_reset_device);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int input_dev_suspend(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
- mutex_lock(&input_dev->mutex);
+ spin_lock_irq(&input_dev->event_lock);
- if (input_dev->users)
- input_dev_toggle(input_dev, false);
+ /*
+ * Keys that are pressed now are unlikely to be
+ * still pressed when we resume.
+ */
+ input_dev_release_keys(input_dev);
+
+ /* Turn off LEDs and sounds, if any are active. */
+ input_dev_toggle(input_dev, false);
- mutex_unlock(&input_dev->mutex);
+ spin_unlock_irq(&input_dev->event_lock);
return 0;
}
@@ -1690,7 +1691,43 @@ static int input_dev_resume(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
- input_reset_device(input_dev);
+ spin_lock_irq(&input_dev->event_lock);
+
+ /* Restore state of LEDs and sounds, if any were active. */
+ input_dev_toggle(input_dev, true);
+
+ spin_unlock_irq(&input_dev->event_lock);
+
+ return 0;
+}
+
+static int input_dev_freeze(struct device *dev)
+{
+ struct input_dev *input_dev = to_input_dev(dev);
+
+ spin_lock_irq(&input_dev->event_lock);
+
+ /*
+ * Keys that are pressed now are unlikely to be
+ * still pressed when we resume.
+ */
+ input_dev_release_keys(input_dev);
+
+ spin_unlock_irq(&input_dev->event_lock);
+
+ return 0;
+}
+
+static int input_dev_poweroff(struct device *dev)
+{
+ struct input_dev *input_dev = to_input_dev(dev);
+
+ spin_lock_irq(&input_dev->event_lock);
+
+ /* Turn off LEDs and sounds, if any are active. */
+ input_dev_toggle(input_dev, false);
+
+ spin_unlock_irq(&input_dev->event_lock);
return 0;
}
@@ -1698,7 +1735,8 @@ static int input_dev_resume(struct device *dev)
static const struct dev_pm_ops input_dev_pm_ops = {
.suspend = input_dev_suspend,
.resume = input_dev_resume,
- .poweroff = input_dev_suspend,
+ .freeze = input_dev_freeze,
+ .poweroff = input_dev_poweroff,
.restore = input_dev_resume,
};
#endif /* CONFIG_PM */
@@ -1707,7 +1745,7 @@ static struct device_type input_dev_type = {
.groups = input_dev_attr_groups,
.release = input_dev_release,
.uevent = input_dev_uevent,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.pm = &input_dev_pm_ops,
#endif
};
@@ -1871,6 +1909,10 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
break;
case EV_ABS:
+ input_alloc_absinfo(dev);
+ if (!dev->absinfo)
+ return;
+
__set_bit(code, dev->absbit);
break;
diff --git a/drivers/input/joystick/a3d.c b/drivers/input/joystick/a3d.c
index 85bc8dc07cfc..55efdfc7eb62 100644
--- a/drivers/input/joystick/a3d.c
+++ b/drivers/input/joystick/a3d.c
@@ -29,7 +29,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/gameport.h>
#include <linux/input.h>
#include <linux/jiffies.h>
diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c
index 0cbfd2dfabf4..b78425765d3e 100644
--- a/drivers/input/joystick/adi.c
+++ b/drivers/input/joystick/adi.c
@@ -33,7 +33,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/gameport.h>
-#include <linux/init.h>
#include <linux/jiffies.h>
#define DRIVER_DESC "Logitech ADI joystick family driver"
diff --git a/drivers/input/joystick/cobra.c b/drivers/input/joystick/cobra.c
index 65367e44d715..ae3ee24a2368 100644
--- a/drivers/input/joystick/cobra.c
+++ b/drivers/input/joystick/cobra.c
@@ -29,7 +29,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/gameport.h>
#include <linux/input.h>
#include <linux/jiffies.h>
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index ab1cf2882004..0f519db64748 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/gameport.h>
#include <linux/jiffies.h>
diff --git a/drivers/input/joystick/grip.c b/drivers/input/joystick/grip.c
index 9e1beff57c33..eac9c5b8d73e 100644
--- a/drivers/input/joystick/grip.c
+++ b/drivers/input/joystick/grip.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/gameport.h>
#include <linux/input.h>
diff --git a/drivers/input/joystick/grip_mp.c b/drivers/input/joystick/grip_mp.c
index c0f9c7b7eb4e..573191dd78e8 100644
--- a/drivers/input/joystick/grip_mp.c
+++ b/drivers/input/joystick/grip_mp.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/gameport.h>
#include <linux/input.h>
diff --git a/drivers/input/joystick/guillemot.c b/drivers/input/joystick/guillemot.c
index 55196f730af6..a9ac2f9cfce0 100644
--- a/drivers/input/joystick/guillemot.c
+++ b/drivers/input/joystick/guillemot.c
@@ -30,7 +30,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/gameport.h>
#include <linux/input.h>
#include <linux/jiffies.h>
diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
index b1d7d9b0eb86..96ae4f5bd0eb 100644
--- a/drivers/input/joystick/iforce/iforce.h
+++ b/drivers/input/joystick/iforce/iforce.h
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/usb.h>
#include <linux/serio.h>
diff --git a/drivers/input/joystick/interact.c b/drivers/input/joystick/interact.c
index 88c22623a2e8..17c2c800743c 100644
--- a/drivers/input/joystick/interact.c
+++ b/drivers/input/joystick/interact.c
@@ -33,7 +33,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/gameport.h>
#include <linux/input.h>
#include <linux/jiffies.h>
diff --git a/drivers/input/joystick/joydump.c b/drivers/input/joystick/joydump.c
index 7eb878bab968..d1c6e4846a4a 100644
--- a/drivers/input/joystick/joydump.c
+++ b/drivers/input/joystick/joydump.c
@@ -31,7 +31,6 @@
#include <linux/gameport.h>
#include <linux/kernel.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/slab.h>
#define DRIVER_DESC "Gameport data dumper module"
diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c
index 9fb153eef2fc..c5358ba1f571 100644
--- a/drivers/input/joystick/magellan.c
+++ b/drivers/input/joystick/magellan.c
@@ -31,7 +31,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Magellan and SpaceMouse 6dof controller driver"
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index 04c69af37148..4a95b224169f 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/gameport.h>
#include <linux/jiffies.h>
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index 80a7b27a457a..f4445a4e8d6a 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -33,7 +33,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/serio.h>
diff --git a/drivers/input/joystick/spaceorb.c b/drivers/input/joystick/spaceorb.c
index a41f291652e6..f2667820e8c5 100644
--- a/drivers/input/joystick/spaceorb.c
+++ b/drivers/input/joystick/spaceorb.c
@@ -32,7 +32,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/serio.h>
diff --git a/drivers/input/joystick/stinger.c b/drivers/input/joystick/stinger.c
index 0f51a60e14a7..099c6d7b5e08 100644
--- a/drivers/input/joystick/stinger.c
+++ b/drivers/input/joystick/stinger.c
@@ -32,7 +32,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Gravis Stinger gamepad driver"
diff --git a/drivers/input/joystick/tmdc.c b/drivers/input/joystick/tmdc.c
index 5ef9bcdb0345..7e17cde464f0 100644
--- a/drivers/input/joystick/tmdc.c
+++ b/drivers/input/joystick/tmdc.c
@@ -33,7 +33,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/gameport.h>
#include <linux/input.h>
#include <linux/jiffies.h>
diff --git a/drivers/input/joystick/twidjoy.c b/drivers/input/joystick/twidjoy.c
index 2556a8193579..7f7e5ab3f9e3 100644
--- a/drivers/input/joystick/twidjoy.c
+++ b/drivers/input/joystick/twidjoy.c
@@ -52,7 +52,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Handykey Twiddler keyboard as a joystick driver"
diff --git a/drivers/input/joystick/warrior.c b/drivers/input/joystick/warrior.c
index 23b3071abb6e..e13a9144a25d 100644
--- a/drivers/input/joystick/warrior.c
+++ b/drivers/input/joystick/warrior.c
@@ -31,7 +31,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Logitech WingMan Warrior joystick driver"
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 75e3b102ce45..603fe0dd3682 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -74,7 +74,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/module.h>
@@ -125,6 +124,8 @@ static const struct xpad_device {
{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
+ { 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
+ { 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc242, "Logitech Chillstream Controller", 0, XTYPE_XBOX360 },
{ 0x046d, 0xca84, "Logitech Xbox Cordless Controller", 0, XTYPE_XBOX },
{ 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX },
@@ -166,8 +167,8 @@ static const struct xpad_device {
{ 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 },
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
- { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x1689, 0xfd01, "Razer Onza Classic Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
+ { 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
diff --git a/drivers/input/joystick/zhenhua.c b/drivers/input/joystick/zhenhua.c
index c4de4388fd7f..30af2e8c670c 100644
--- a/drivers/input/joystick/zhenhua.c
+++ b/drivers/input/joystick/zhenhua.c
@@ -49,7 +49,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "RC transmitter with 5-byte Zhen Hua protocol joystick driver"
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index bb174c1a9886..a673c9f3a0b9 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -525,7 +525,7 @@ config KEYBOARD_SUNKBD
config KEYBOARD_SH_KEYSC
tristate "SuperH KEYSC keypad support"
- depends on SUPERH || ARM || COMPILE_TEST
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
help
Say Y here if you want to use a keypad attached to the KEYSC block
on SuperH processors such as sh7722 and sh7343.
diff --git a/drivers/input/keyboard/adp5520-keys.c b/drivers/input/keyboard/adp5520-keys.c
index ef26b17fb159..4cc14c2fa7d5 100644
--- a/drivers/input/keyboard/adp5520-keys.c
+++ b/drivers/input/keyboard/adp5520-keys.c
@@ -8,7 +8,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/mfd/adp5520.h>
@@ -71,7 +70,7 @@ static int adp5520_keys_notifier(struct notifier_block *nb,
static int adp5520_keys_probe(struct platform_device *pdev)
{
- struct adp5520_keys_platform_data *pdata = pdev->dev.platform_data;
+ struct adp5520_keys_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct input_dev *input;
struct adp5520_keys *dev;
int ret, i;
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 3ed23513d881..bb3b57bea8ba 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -9,7 +9,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/workqueue.h>
@@ -173,7 +172,7 @@ static int adp5588_build_gpiomap(struct adp5588_kpad *kpad,
static int adp5588_gpio_add(struct adp5588_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
- const struct adp5588_kpad_platform_data *pdata = dev->platform_data;
+ const struct adp5588_kpad_platform_data *pdata = dev_get_platdata(dev);
const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
int i, error;
@@ -227,7 +226,7 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
static void adp5588_gpio_remove(struct adp5588_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
- const struct adp5588_kpad_platform_data *pdata = dev->platform_data;
+ const struct adp5588_kpad_platform_data *pdata = dev_get_platdata(dev);
const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
int error;
@@ -321,7 +320,8 @@ static irqreturn_t adp5588_irq(int irq, void *handle)
static int adp5588_setup(struct i2c_client *client)
{
- const struct adp5588_kpad_platform_data *pdata = client->dev.platform_data;
+ const struct adp5588_kpad_platform_data *pdata =
+ dev_get_platdata(&client->dev);
const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
int i, ret;
unsigned char evt_mode1 = 0, evt_mode2 = 0, evt_mode3 = 0;
@@ -424,7 +424,8 @@ static int adp5588_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adp5588_kpad *kpad;
- const struct adp5588_kpad_platform_data *pdata = client->dev.platform_data;
+ const struct adp5588_kpad_platform_data *pdata =
+ dev_get_platdata(&client->dev);
struct input_dev *input;
unsigned int revid;
int ret, i;
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 60dafd4fa692..6329549bf6ad 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -8,7 +8,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/workqueue.h>
@@ -499,7 +498,7 @@ static int adp5589_build_gpiomap(struct adp5589_kpad *kpad,
static int adp5589_gpio_add(struct adp5589_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
- const struct adp5589_kpad_platform_data *pdata = dev->platform_data;
+ const struct adp5589_kpad_platform_data *pdata = dev_get_platdata(dev);
const struct adp5589_gpio_platform_data *gpio_data = pdata->gpio_data;
int i, error;
@@ -553,7 +552,7 @@ static int adp5589_gpio_add(struct adp5589_kpad *kpad)
static void adp5589_gpio_remove(struct adp5589_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
- const struct adp5589_kpad_platform_data *pdata = dev->platform_data;
+ const struct adp5589_kpad_platform_data *pdata = dev_get_platdata(dev);
const struct adp5589_gpio_platform_data *gpio_data = pdata->gpio_data;
int error;
@@ -658,7 +657,7 @@ static int adp5589_setup(struct adp5589_kpad *kpad)
{
struct i2c_client *client = kpad->client;
const struct adp5589_kpad_platform_data *pdata =
- client->dev.platform_data;
+ dev_get_platdata(&client->dev);
u8 (*reg) (u8) = kpad->var->reg;
unsigned char evt_mode1 = 0, evt_mode2 = 0, evt_mode3 = 0;
unsigned char pull_mask = 0;
@@ -864,7 +863,7 @@ static int adp5589_probe(struct i2c_client *client,
{
struct adp5589_kpad *kpad;
const struct adp5589_kpad_platform_data *pdata =
- client->dev.platform_data;
+ dev_get_platdata(&client->dev);
struct input_dev *input;
unsigned int revid;
int ret, i;
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index 09b91d093087..e6d46c5994d7 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -30,7 +30,6 @@
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -180,7 +179,7 @@ static irqreturn_t bfin_kpad_isr(int irq, void *dev_id)
static int bfin_kpad_probe(struct platform_device *pdev)
{
struct bf54x_kpad *bf54x_kpad;
- struct bfin_kpad_platform_data *pdata = pdev->dev.platform_data;
+ struct bfin_kpad_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct input_dev *input;
int i, error;
@@ -333,7 +332,7 @@ out:
static int bfin_kpad_remove(struct platform_device *pdev)
{
- struct bfin_kpad_platform_data *pdata = pdev->dev.platform_data;
+ struct bfin_kpad_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct bf54x_kpad *bf54x_kpad = platform_get_drvdata(pdev);
del_timer_sync(&bf54x_kpad->timer);
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 7e8b0a52af25..408379669d3c 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -38,6 +38,7 @@
* @row_shift: log2 or number of rows, rounded up
* @keymap_data: Matrix keymap data used to convert to keyscan values
* @ghost_filter: true to enable the matrix key-ghosting filter
+ * @old_kb_state: bitmap of keys pressed last scan
* @dev: Device pointer
* @idev: Input device
* @ec: Top level ChromeOS device to use to talk to EC
@@ -49,6 +50,7 @@ struct cros_ec_keyb {
int row_shift;
const struct matrix_keymap_data *keymap_data;
bool ghost_filter;
+ uint8_t *old_kb_state;
struct device *dev;
struct input_dev *idev;
@@ -135,6 +137,7 @@ static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev,
struct input_dev *idev = ckdev->idev;
int col, row;
int new_state;
+ int old_state;
int num_cols;
num_cols = len;
@@ -153,18 +156,19 @@ static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev,
for (row = 0; row < ckdev->rows; row++) {
int pos = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
const unsigned short *keycodes = idev->keycode;
- int code;
- code = keycodes[pos];
new_state = kb_state[col] & (1 << row);
- if (!!new_state != test_bit(code, idev->key)) {
+ old_state = ckdev->old_kb_state[col] & (1 << row);
+ if (new_state != old_state) {
dev_dbg(ckdev->dev,
"changed: [r%d c%d]: byte %02x\n",
row, col, new_state);
- input_report_key(idev, code, new_state);
+ input_report_key(idev, keycodes[pos],
+ new_state);
}
}
+ ckdev->old_kb_state[col] = kb_state[col];
}
input_sync(ckdev->idev);
}
@@ -226,6 +230,9 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
&ckdev->cols);
if (err)
return err;
+ ckdev->old_kb_state = devm_kzalloc(&pdev->dev, ckdev->cols, GFP_KERNEL);
+ if (!ckdev->old_kb_state)
+ return -ENOMEM;
idev = devm_input_allocate_device(&pdev->dev);
if (!idev)
diff --git a/drivers/input/keyboard/davinci_keyscan.c b/drivers/input/keyboard/davinci_keyscan.c
index d15977a8361e..1559dc1cf951 100644
--- a/drivers/input/keyboard/davinci_keyscan.c
+++ b/drivers/input/keyboard/davinci_keyscan.c
@@ -172,7 +172,7 @@ static int __init davinci_ks_probe(struct platform_device *pdev)
struct input_dev *key_dev;
struct resource *res, *mem;
struct device *dev = &pdev->dev;
- struct davinci_ks_platform_data *pdata = pdev->dev.platform_data;
+ struct davinci_ks_platform_data *pdata = dev_get_platdata(&pdev->dev);
int error, i;
if (pdata->device_enable) {
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 47206bdba411..e59876212b8c 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -244,7 +244,7 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
if (!keypad)
return -ENOMEM;
- keypad->pdata = pdev->dev.platform_data;
+ keypad->pdata = dev_get_platdata(&pdev->dev);
if (!keypad->pdata) {
err = -EINVAL;
goto failed_free;
diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c
index 9f60a2ec88db..69e854763370 100644
--- a/drivers/input/keyboard/goldfish_events.c
+++ b/drivers/input/keyboard/goldfish_events.c
@@ -14,7 +14,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/input.h>
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 4e428199e580..e571e194ff84 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input-polldev.h>
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index 589e3c258f3f..610a8af795a1 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -36,7 +36,6 @@
#include <linux/serio.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/pci_ids.h>
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index 328cfc1eed95..cbf4f8038cba 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -13,7 +13,6 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/input/matrix_keypad.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -425,7 +424,8 @@ MODULE_DEVICE_TABLE(of, imx_keypad_of_match);
static int imx_keypad_probe(struct platform_device *pdev)
{
- const struct matrix_keymap_data *keymap_data = pdev->dev.platform_data;
+ const struct matrix_keymap_data *keymap_data =
+ dev_get_platdata(&pdev->dev);
struct imx_keypad *keypad;
struct input_dev *input_dev;
struct resource *res;
diff --git a/drivers/input/keyboard/jornada680_kbd.c b/drivers/input/keyboard/jornada680_kbd.c
index a2a034c25f0b..69b1f002ff52 100644
--- a/drivers/input/keyboard/jornada680_kbd.c
+++ b/drivers/input/keyboard/jornada680_kbd.c
@@ -16,7 +16,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/input-polldev.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/keyboard/jornada720_kbd.c b/drivers/input/keyboard/jornada720_kbd.c
index b0ad457ca9d8..cd729d485e98 100644
--- a/drivers/input/keyboard/jornada720_kbd.c
+++ b/drivers/input/keyboard/jornada720_kbd.c
@@ -18,7 +18,6 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c
index fc0a63c2f278..9fcd9f1d5dc8 100644
--- a/drivers/input/keyboard/lkkbd.c
+++ b/drivers/input/keyboard/lkkbd.c
@@ -65,7 +65,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/workqueue.h>
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 0de23f41b2d3..0b42118cbf8f 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -627,7 +627,7 @@ static DEVICE_ATTR(disable_kp, 0644, lm8323_show_disable, lm8323_set_disable);
static int lm8323_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct lm8323_platform_data *pdata = client->dev.platform_data;
+ struct lm8323_platform_data *pdata = dev_get_platdata(&client->dev);
struct input_dev *idev;
struct lm8323_chip *lm;
int pwm;
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
index 5a8ca35dc9af..9081cbef11ea 100644
--- a/drivers/input/keyboard/lm8333.c
+++ b/drivers/input/keyboard/lm8333.c
@@ -131,7 +131,8 @@ static irqreturn_t lm8333_irq_thread(int irq, void *data)
static int lm8333_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct lm8333_platform_data *pdata = client->dev.platform_data;
+ const struct lm8333_platform_data *pdata =
+ dev_get_platdata(&client->dev);
struct lm8333 *lm8333;
struct input_dev *input;
int err, active_time;
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 90ff73ace424..8d2e19e81e1e 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -14,7 +14,6 @@
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/keyboard/max7359_keypad.c b/drivers/input/keyboard/max7359_keypad.c
index bc2cdaf563fd..430b54539720 100644
--- a/drivers/input/keyboard/max7359_keypad.c
+++ b/drivers/input/keyboard/max7359_keypad.c
@@ -182,7 +182,8 @@ static void max7359_initialize(struct i2c_client *client)
static int max7359_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct matrix_keymap_data *keymap_data = client->dev.platform_data;
+ const struct matrix_keymap_data *keymap_data =
+ dev_get_platdata(&client->dev);
struct max7359_keypad *keypad;
struct input_dev *input_dev;
int ret;
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index 7c236f9c6a51..1da8e0b44b56 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/i2c/mcs.h>
#include <linux/interrupt.h>
@@ -108,7 +107,7 @@ static int mcs_touchkey_probe(struct i2c_client *client,
int error;
int i;
- pdata = client->dev.platform_data;
+ pdata = dev_get_platdata(&client->dev);
if (!pdata) {
dev_err(&client->dev, "no platform data defined\n");
return -EINVAL;
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index f7f3e9a9fd3f..009c82256e89 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -13,7 +13,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/i2c.h>
#include <linux/slab.h>
@@ -188,7 +187,8 @@ err_i2c_write:
static int mpr_touchkey_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct mpr121_platform_data *pdata = client->dev.platform_data;
+ const struct mpr121_platform_data *pdata =
+ dev_get_platdata(&client->dev);
struct mpr121_touchkey *mpr121;
struct input_dev *input_dev;
int error;
diff --git a/drivers/input/keyboard/newtonkbd.c b/drivers/input/keyboard/newtonkbd.c
index f971898ad591..20f044377990 100644
--- a/drivers/input/keyboard/newtonkbd.c
+++ b/drivers/input/keyboard/newtonkbd.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/input.h>
-#include <linux/init.h>
#include <linux/serio.h>
#define DRIVER_DESC "Newton keyboard driver"
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
index c7d505cce72f..63332e2f8628 100644
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -222,7 +222,8 @@ static irqreturn_t ske_keypad_irq(int irq, void *dev_id)
static int __init ske_keypad_probe(struct platform_device *pdev)
{
- const struct ske_keypad_platform_data *plat = pdev->dev.platform_data;
+ const struct ske_keypad_platform_data *plat =
+ dev_get_platdata(&pdev->dev);
struct ske_keypad *keypad;
struct input_dev *input;
struct resource *res;
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index d0d5226d9cd4..b1acc9852eb7 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -25,7 +25,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/input.h>
@@ -248,7 +247,7 @@ static int omap_kp_probe(struct platform_device *pdev)
{
struct omap_kp *omap_kp;
struct input_dev *input_dev;
- struct omap_kp_platform_data *pdata = pdev->dev.platform_data;
+ struct omap_kp_platform_data *pdata = dev_get_platdata(&pdev->dev);
int i, col_idx, row_idx, ret;
unsigned int row_shift, keycodemax;
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 30acfd49fa6c..0400b3f2b4b9 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -22,7 +22,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/errno.h>
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 186138c720c7..d8241ba0afa0 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/device.h>
diff --git a/drivers/input/keyboard/pxa930_rotary.c b/drivers/input/keyboard/pxa930_rotary.c
index 248cdcf95296..374ca0246c8f 100644
--- a/drivers/input/keyboard/pxa930_rotary.c
+++ b/drivers/input/keyboard/pxa930_rotary.c
@@ -8,7 +8,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/platform_device.h>
@@ -84,7 +83,8 @@ static void pxa930_rotary_close(struct input_dev *dev)
static int pxa930_rotary_probe(struct platform_device *pdev)
{
- struct pxa930_rotary_platform_data *pdata = pdev->dev.platform_data;
+ struct pxa930_rotary_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
struct pxa930_rotary *r;
struct input_dev *input_dev;
struct resource *res;
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index 6c561ec3cc09..52cd6e88acd7 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -25,7 +25,6 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/slab.h>
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 1c0ddad0a1cc..819b22897c13 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -19,7 +19,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index ac43a486c775..5e80fbf7b5ed 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -14,7 +14,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -244,8 +243,8 @@ static void samsung_keypad_close(struct input_dev *input_dev)
}
#ifdef CONFIG_OF
-static struct samsung_keypad_platdata *samsung_keypad_parse_dt(
- struct device *dev)
+static struct samsung_keypad_platdata *
+samsung_keypad_parse_dt(struct device *dev)
{
struct samsung_keypad_platdata *pdata;
struct matrix_keymap_data *keymap_data;
@@ -253,17 +252,22 @@ static struct samsung_keypad_platdata *samsung_keypad_parse_dt(
struct device_node *np = dev->of_node, *key_np;
unsigned int key_count;
+ if (!np) {
+ dev_err(dev, "missing device tree data\n");
+ return ERR_PTR(-EINVAL);
+ }
+
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
dev_err(dev, "could not allocate memory for platform data\n");
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
of_property_read_u32(np, "samsung,keypad-num-rows", &num_rows);
of_property_read_u32(np, "samsung,keypad-num-columns", &num_cols);
if (!num_rows || !num_cols) {
dev_err(dev, "number of keypad rows/columns not specified\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
pdata->rows = num_rows;
pdata->cols = num_cols;
@@ -271,7 +275,7 @@ static struct samsung_keypad_platdata *samsung_keypad_parse_dt(
keymap_data = devm_kzalloc(dev, sizeof(*keymap_data), GFP_KERNEL);
if (!keymap_data) {
dev_err(dev, "could not allocate memory for keymap data\n");
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
pdata->keymap_data = keymap_data;
@@ -280,7 +284,7 @@ static struct samsung_keypad_platdata *samsung_keypad_parse_dt(
keymap = devm_kzalloc(dev, sizeof(uint32_t) * key_count, GFP_KERNEL);
if (!keymap) {
dev_err(dev, "could not allocate memory for keymap\n");
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
keymap_data->keymap = keymap;
@@ -294,16 +298,19 @@ static struct samsung_keypad_platdata *samsung_keypad_parse_dt(
if (of_get_property(np, "linux,input-no-autorepeat", NULL))
pdata->no_autorepeat = true;
+
if (of_get_property(np, "linux,input-wakeup", NULL))
pdata->wakeup = true;
return pdata;
}
#else
-static
-struct samsung_keypad_platdata *samsung_keypad_parse_dt(struct device *dev)
+static struct samsung_keypad_platdata *
+samsung_keypad_parse_dt(struct device *dev)
{
- return NULL;
+ dev_err(dev, "no platform data defined\n");
+
+ return ERR_PTR(-EINVAL);
}
#endif
@@ -318,13 +325,11 @@ static int samsung_keypad_probe(struct platform_device *pdev)
unsigned int keymap_size;
int error;
- if (pdev->dev.of_node)
- pdata = samsung_keypad_parse_dt(&pdev->dev);
- else
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
- dev_err(&pdev->dev, "no platform data defined\n");
- return -EINVAL;
+ pdata = samsung_keypad_parse_dt(&pdev->dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
}
keymap_data = pdata->keymap_data;
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index fe0e498d2479..7abf03b4cc9c 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/delay.h>
@@ -171,7 +170,7 @@ static int sh_keysc_probe(struct platform_device *pdev)
int i;
int irq, error;
- if (!pdev->dev.platform_data) {
+ if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "no platform data defined\n");
error = -EINVAL;
goto err0;
@@ -198,7 +197,7 @@ static int sh_keysc_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, priv);
- memcpy(&priv->pdata, pdev->dev.platform_data, sizeof(priv->pdata));
+ memcpy(&priv->pdata, dev_get_platdata(&pdev->dev), sizeof(priv->pdata));
pdata = &priv->pdata;
priv->iomem_base = ioremap_nocache(res->start, resource_size(res));
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index 85ff530d9a91..258af10e5811 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -12,7 +12,6 @@
#include <linux/clk.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/io.h>
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index 5cbec56f7720..c6727dda68f2 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -6,7 +6,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/keyboard/stowaway.c b/drivers/input/keyboard/stowaway.c
index cc612c5d5427..a6e0d565e306 100644
--- a/drivers/input/keyboard/stowaway.c
+++ b/drivers/input/keyboard/stowaway.c
@@ -32,7 +32,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/input.h>
-#include <linux/init.h>
#include <linux/serio.h>
#define DRIVER_DESC "Stowaway keyboard driver"
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index 5f836b1638c1..dc6bb9d5b4f0 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -31,7 +31,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/workqueue.h>
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 208de7cbb7fa..74494a357522 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -10,7 +10,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/platform_device.h>
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index bfc832c35a7c..dc983ab6c0ad 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -213,7 +213,7 @@ static int tca6416_keypad_probe(struct i2c_client *client,
return -ENODEV;
}
- pdata = client->dev.platform_data;
+ pdata = dev_get_platdata(&client->dev);
if (!pdata) {
dev_dbg(&client->dev, "no platform data\n");
return -EINVAL;
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 8508879f6faf..9757a58bc897 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -31,7 +31,7 @@
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/input/matrix_keypad.h>
-#include <linux/clk/tegra.h>
+#include <linux/reset.h>
#include <linux/err.h>
#define KBC_MAX_KPENT 8
@@ -116,6 +116,7 @@ struct tegra_kbc {
u32 wakeup_key;
struct timer_list timer;
struct clk *clk;
+ struct reset_control *rst;
const struct tegra_kbc_hw_support *hw_support;
int max_keys;
int num_rows_and_columns;
@@ -373,9 +374,9 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
clk_prepare_enable(kbc->clk);
/* Reset the KBC controller to clear all previous status.*/
- tegra_periph_reset_assert(kbc->clk);
+ reset_control_assert(kbc->rst);
udelay(100);
- tegra_periph_reset_deassert(kbc->clk);
+ reset_control_assert(kbc->rst);
udelay(100);
tegra_kbc_config_pins(kbc);
@@ -663,6 +664,12 @@ static int tegra_kbc_probe(struct platform_device *pdev)
return PTR_ERR(kbc->clk);
}
+ kbc->rst = devm_reset_control_get(&pdev->dev, "kbc");
+ if (IS_ERR(kbc->rst)) {
+ dev_err(&pdev->dev, "failed to get keyboard reset\n");
+ return PTR_ERR(kbc->rst);
+ }
+
/*
* The time delay between two consecutive reads of the FIFO is
* the sum of the repeat time and the time taken for scanning
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index 8bd24d52bf1b..086511c2121b 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -162,7 +162,7 @@ static int keypad_probe(struct platform_device *pdev)
int error = 0, sz, row_shift;
u32 rev = 0;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(dev, "cannot find device data\n");
return -EINVAL;
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index d2d178c84ea7..c5a11700a1bf 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -27,12 +27,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/i2c/twl.h>
#include <linux/slab.h>
+#include <linux/of.h>
/*
* The TWL4030 family chips include a keypad controller that supports
@@ -60,6 +60,7 @@
struct twl4030_keypad {
unsigned short keymap[TWL4030_KEYMAP_SIZE];
u16 kp_state[TWL4030_MAX_ROWS];
+ bool autorepeat;
unsigned n_rows;
unsigned n_cols;
unsigned irq;
@@ -330,70 +331,89 @@ static int twl4030_kp_program(struct twl4030_keypad *kp)
*/
static int twl4030_kp_probe(struct platform_device *pdev)
{
- struct twl4030_keypad_data *pdata = pdev->dev.platform_data;
- const struct matrix_keymap_data *keymap_data;
+ struct twl4030_keypad_data *pdata = dev_get_platdata(&pdev->dev);
+ const struct matrix_keymap_data *keymap_data = NULL;
struct twl4030_keypad *kp;
struct input_dev *input;
u8 reg;
int error;
- if (!pdata || !pdata->rows || !pdata->cols || !pdata->keymap_data ||
- pdata->rows > TWL4030_MAX_ROWS || pdata->cols > TWL4030_MAX_COLS) {
- dev_err(&pdev->dev, "Invalid platform_data\n");
- return -EINVAL;
- }
+ kp = devm_kzalloc(&pdev->dev, sizeof(*kp), GFP_KERNEL);
+ if (!kp)
+ return -ENOMEM;
- keymap_data = pdata->keymap_data;
-
- kp = kzalloc(sizeof(*kp), GFP_KERNEL);
- input = input_allocate_device();
- if (!kp || !input) {
- error = -ENOMEM;
- goto err1;
- }
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
- /* Get the debug Device */
- kp->dbg_dev = &pdev->dev;
- kp->input = input;
-
- kp->n_rows = pdata->rows;
- kp->n_cols = pdata->cols;
- kp->irq = platform_get_irq(pdev, 0);
+ /* get the debug device */
+ kp->dbg_dev = &pdev->dev;
+ kp->input = input;
/* setup input device */
input->name = "TWL4030 Keypad";
input->phys = "twl4030_keypad/input0";
- input->dev.parent = &pdev->dev;
input->id.bustype = BUS_HOST;
input->id.vendor = 0x0001;
input->id.product = 0x0001;
input->id.version = 0x0003;
+ if (pdata) {
+ if (!pdata->rows || !pdata->cols || !pdata->keymap_data) {
+ dev_err(&pdev->dev, "Missing platform_data\n");
+ return -EINVAL;
+ }
+
+ kp->n_rows = pdata->rows;
+ kp->n_cols = pdata->cols;
+ kp->autorepeat = pdata->rep;
+ keymap_data = pdata->keymap_data;
+ } else {
+ error = matrix_keypad_parse_of_params(&pdev->dev, &kp->n_rows,
+ &kp->n_cols);
+ if (error)
+ return error;
+
+ kp->autorepeat = true;
+ }
+
+ if (kp->n_rows > TWL4030_MAX_ROWS || kp->n_cols > TWL4030_MAX_COLS) {
+ dev_err(&pdev->dev,
+ "Invalid rows/cols amount specified in platform/devicetree data\n");
+ return -EINVAL;
+ }
+
+ kp->irq = platform_get_irq(pdev, 0);
+ if (!kp->irq) {
+ dev_err(&pdev->dev, "no keyboard irq assigned\n");
+ return -EINVAL;
+ }
+
error = matrix_keypad_build_keymap(keymap_data, NULL,
TWL4030_MAX_ROWS,
1 << TWL4030_ROW_SHIFT,
kp->keymap, input);
if (error) {
dev_err(kp->dbg_dev, "Failed to build keymap\n");
- goto err1;
+ return error;
}
input_set_capability(input, EV_MSC, MSC_SCAN);
/* Enable auto repeat feature of Linux input subsystem */
- if (pdata->rep)
+ if (kp->autorepeat)
__set_bit(EV_REP, input->evbit);
error = input_register_device(input);
if (error) {
dev_err(kp->dbg_dev,
"Unable to register twl4030 keypad device\n");
- goto err1;
+ return error;
}
error = twl4030_kp_program(kp);
if (error)
- goto err2;
+ return error;
/*
* This ISR will always execute in kernel thread context because of
@@ -401,47 +421,33 @@ static int twl4030_kp_probe(struct platform_device *pdev)
*
* NOTE: we assume this host is wired to TWL4040 INT1, not INT2 ...
*/
- error = request_threaded_irq(kp->irq, NULL, do_kp_irq,
- 0, pdev->name, kp);
+ error = devm_request_threaded_irq(&pdev->dev, kp->irq, NULL, do_kp_irq,
+ 0, pdev->name, kp);
if (error) {
- dev_info(kp->dbg_dev, "request_irq failed for irq no=%d\n",
- kp->irq);
- goto err2;
+ dev_info(kp->dbg_dev, "request_irq failed for irq no=%d: %d\n",
+ kp->irq, error);
+ return error;
}
/* Enable KP and TO interrupts now. */
reg = (u8) ~(KEYP_IMR1_KP | KEYP_IMR1_TO);
if (twl4030_kpwrite_u8(kp, reg, KEYP_IMR1)) {
- error = -EIO;
- goto err3;
+ /* mask all events - we don't care about the result */
+ (void) twl4030_kpwrite_u8(kp, 0xff, KEYP_IMR1);
+ return -EIO;
}
platform_set_drvdata(pdev, kp);
return 0;
-
-err3:
- /* mask all events - we don't care about the result */
- (void) twl4030_kpwrite_u8(kp, 0xff, KEYP_IMR1);
- free_irq(kp->irq, kp);
-err2:
- input_unregister_device(input);
- input = NULL;
-err1:
- input_free_device(input);
- kfree(kp);
- return error;
}
-static int twl4030_kp_remove(struct platform_device *pdev)
-{
- struct twl4030_keypad *kp = platform_get_drvdata(pdev);
-
- free_irq(kp->irq, kp);
- input_unregister_device(kp->input);
- kfree(kp);
-
- return 0;
-}
+#ifdef CONFIG_OF
+static const struct of_device_id twl4030_keypad_dt_match_table[] = {
+ { .compatible = "ti,twl4030-keypad" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, twl4030_keypad_dt_match_table);
+#endif
/*
* NOTE: twl4030 are multi-function devices connected via I2C.
@@ -451,10 +457,10 @@ static int twl4030_kp_remove(struct platform_device *pdev)
static struct platform_driver twl4030_kp_driver = {
.probe = twl4030_kp_probe,
- .remove = twl4030_kp_remove,
.driver = {
.name = "twl4030_keypad",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(twl4030_keypad_dt_match_table),
},
};
module_platform_driver(twl4030_kp_driver);
diff --git a/drivers/input/keyboard/w90p910_keypad.c b/drivers/input/keyboard/w90p910_keypad.c
index 7b039162a3f8..e8b9d94daae7 100644
--- a/drivers/input/keyboard/w90p910_keypad.c
+++ b/drivers/input/keyboard/w90p910_keypad.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/device.h>
@@ -121,7 +120,7 @@ static void w90p910_keypad_close(struct input_dev *dev)
static int w90p910_keypad_probe(struct platform_device *pdev)
{
const struct w90p910_keypad_platform_data *pdata =
- pdev->dev.platform_data;
+ dev_get_platdata(&pdev->dev);
const struct matrix_keymap_data *keymap_data;
struct w90p910_keypad *keypad;
struct input_dev *input_dev;
diff --git a/drivers/input/keyboard/xtkbd.c b/drivers/input/keyboard/xtkbd.c
index d050d9d0011b..7c2325bd7408 100644
--- a/drivers/input/keyboard/xtkbd.c
+++ b/drivers/input/keyboard/xtkbd.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/input.h>
-#include <linux/init.h>
#include <linux/serio.h>
#define DRIVER_DESC "XT keyboard driver"
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 5f4967d01bc3..7904ab05527a 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -168,7 +168,7 @@ config INPUT_MAX8997_HAPTIC
config INPUT_MC13783_PWRBUTTON
tristate "MC13783 ON buttons"
- depends on MFD_MC13783
+ depends on MFD_MC13XXX
help
Support the ON buttons of MC13783 PMIC as an input device
reporting power button status.
@@ -222,6 +222,15 @@ config INPUT_GP2A
To compile this driver as a module, choose M here: the
module will be called gp2ap002a00f.
+config INPUT_GPIO_BEEPER
+ tristate "Generic GPIO Beeper support"
+ depends on OF_GPIO
+ help
+ Say Y here if you have a beeper connected to a GPIO pin.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gpio-beeper.
+
config INPUT_GPIO_TILT_POLLED
tristate "Polled GPIO tilt switch"
depends on GPIOLIB
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 0ebfb6dbf0f7..cda71fc52fb3 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_INPUT_DA9052_ONKEY) += da9052_onkey.o
obj-$(CONFIG_INPUT_DA9055_ONKEY) += da9055_onkey.o
obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o
obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
+obj-$(CONFIG_INPUT_GPIO_BEEPER) += gpio-beeper.o
obj-$(CONFIG_INPUT_GPIO_TILT_POLLED) += gpio_tilt_polled.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o
diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c
index 2e5d5e1de647..7a61e9ee682c 100644
--- a/drivers/input/misc/ad714x.c
+++ b/drivers/input/misc/ad714x.c
@@ -7,7 +7,6 @@
*/
#include <linux/device.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
@@ -969,7 +968,7 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
int error;
struct input_dev *input[MAX_DEVICE_NUM];
- struct ad714x_platform_data *plat_data = dev->platform_data;
+ struct ad714x_platform_data *plat_data = dev_get_platdata(dev);
struct ad714x_chip *ad714x;
void *drv_mem;
unsigned long irqflags;
@@ -986,7 +985,7 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
goto err_out;
}
- if (dev->platform_data == NULL) {
+ if (dev_get_platdata(dev) == NULL) {
dev_err(dev, "platform data for ad714x doesn't exist\n");
error = -EINVAL;
goto err_out;
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 1cb1da294419..2b2d02f408bb 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -8,7 +8,6 @@
*/
#include <linux/device.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/interrupt.h>
@@ -714,7 +713,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
ac->fifo_delay = fifo_delay_default;
- pdata = dev->platform_data;
+ pdata = dev_get_platdata(dev);
if (!pdata) {
dev_dbg(dev,
"No platform data: Using default initialization\n");
diff --git a/drivers/input/misc/atlas_btns.c b/drivers/input/misc/atlas_btns.c
index 5d4402365a52..638165c78e75 100644
--- a/drivers/input/misc/atlas_btns.c
+++ b/drivers/input/misc/atlas_btns.c
@@ -25,11 +25,10 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/types.h>
+#include <linux/acpi.h>
#include <asm/uaccess.h>
-#include <acpi/acpi_drivers.h>
#define ACPI_ATLAS_NAME "Atlas ACPI"
#define ACPI_ATLAS_CLASS "Atlas"
diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c
index cd139cb17e32..e69d9bcb37e1 100644
--- a/drivers/input/misc/bfin_rotary.c
+++ b/drivers/input/misc/bfin_rotary.c
@@ -6,7 +6,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/pm.h>
@@ -92,7 +91,7 @@ static irqreturn_t bfin_rotary_isr(int irq, void *dev_id)
static int bfin_rotary_probe(struct platform_device *pdev)
{
- struct bfin_rotary_platform_data *pdata = pdev->dev.platform_data;
+ struct bfin_rotary_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct bfin_rot *rotary;
struct input_dev *input;
int error;
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index 865c2f9d25b9..52d3a9b28f0b 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -526,7 +526,8 @@ static int bma150_register_polled_device(struct bma150_data *bma150)
static int bma150_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct bma150_platform_data *pdata = client->dev.platform_data;
+ const struct bma150_platform_data *pdata =
+ dev_get_platdata(&client->dev);
const struct bma150_cfg *cfg;
struct bma150_data *bma150;
int chip_id;
diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c
index df9b756594f8..c7d00748277b 100644
--- a/drivers/input/misc/cma3000_d0x.c
+++ b/drivers/input/misc/cma3000_d0x.c
@@ -284,7 +284,7 @@ EXPORT_SYMBOL(cma3000_resume);
struct cma3000_accl_data *cma3000_init(struct device *dev, int irq,
const struct cma3000_bus_ops *bops)
{
- const struct cma3000_platform_data *pdata = dev->platform_data;
+ const struct cma3000_platform_data *pdata = dev_get_platdata(dev);
struct cma3000_accl_data *data;
struct input_dev *input_dev;
int rev;
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index b5d71d245854..3e11510ff82d 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -17,7 +17,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <linux/init.h>
#include <linux/input-polldev.h>
#include <linux/ioport.h>
#include <linux/module.h>
diff --git a/drivers/input/misc/da9052_onkey.c b/drivers/input/misc/da9052_onkey.c
index 020569a499f2..1f695f229ea8 100644
--- a/drivers/input/misc/da9052_onkey.c
+++ b/drivers/input/misc/da9052_onkey.c
@@ -11,7 +11,6 @@
* option) any later version.
*/
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/input/misc/da9055_onkey.c b/drivers/input/misc/da9055_onkey.c
index a0af8b2506ce..4b11ede34950 100644
--- a/drivers/input/misc/da9055_onkey.c
+++ b/drivers/input/misc/da9055_onkey.c
@@ -11,7 +11,6 @@
* option) any later version.
*/
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/input/misc/dm355evm_keys.c b/drivers/input/misc/dm355evm_keys.c
index a309a5c0899e..0eba94f581df 100644
--- a/drivers/input/misc/dm355evm_keys.c
+++ b/drivers/input/misc/dm355evm_keys.c
@@ -9,7 +9,6 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
index fe30bd0fe4bd..de21e317da32 100644
--- a/drivers/input/misc/gp2ap002a00f.c
+++ b/drivers/input/misc/gp2ap002a00f.c
@@ -125,7 +125,7 @@ static int gp2a_initialize(struct gp2a_data *dt)
static int gp2a_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct gp2a_platform_data *pdata = client->dev.platform_data;
+ const struct gp2a_platform_data *pdata = dev_get_platdata(&client->dev);
struct gp2a_data *dt;
int error;
diff --git a/drivers/input/misc/gpio-beeper.c b/drivers/input/misc/gpio-beeper.c
new file mode 100644
index 000000000000..b757435e2b3d
--- /dev/null
+++ b/drivers/input/misc/gpio-beeper.c
@@ -0,0 +1,127 @@
+/*
+ * Generic GPIO beeper driver
+ *
+ * Copyright (C) 2013 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+
+#define BEEPER_MODNAME "gpio-beeper"
+
+struct gpio_beeper {
+ struct work_struct work;
+ int gpio;
+ bool active_low;
+ bool beeping;
+};
+
+static void gpio_beeper_toggle(struct gpio_beeper *beep, bool on)
+{
+ gpio_set_value_cansleep(beep->gpio, on ^ beep->active_low);
+}
+
+static void gpio_beeper_work(struct work_struct *work)
+{
+ struct gpio_beeper *beep = container_of(work, struct gpio_beeper, work);
+
+ gpio_beeper_toggle(beep, beep->beeping);
+}
+
+static int gpio_beeper_event(struct input_dev *dev, unsigned int type,
+ unsigned int code, int value)
+{
+ struct gpio_beeper *beep = input_get_drvdata(dev);
+
+ if (type != EV_SND || code != SND_BELL)
+ return -ENOTSUPP;
+
+ if (value < 0)
+ return -EINVAL;
+
+ beep->beeping = value;
+ /* Schedule work to actually turn the beeper on or off */
+ schedule_work(&beep->work);
+
+ return 0;
+}
+
+static void gpio_beeper_close(struct input_dev *input)
+{
+ struct gpio_beeper *beep = input_get_drvdata(input);
+
+ cancel_work_sync(&beep->work);
+ gpio_beeper_toggle(beep, false);
+}
+
+static int gpio_beeper_probe(struct platform_device *pdev)
+{
+ struct gpio_beeper *beep;
+ enum of_gpio_flags flags;
+ struct input_dev *input;
+ unsigned long gflags;
+ int err;
+
+ beep = devm_kzalloc(&pdev->dev, sizeof(*beep), GFP_KERNEL);
+ if (!beep)
+ return -ENOMEM;
+
+ beep->gpio = of_get_gpio_flags(pdev->dev.of_node, 0, &flags);
+ if (!gpio_is_valid(beep->gpio))
+ return beep->gpio;
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ INIT_WORK(&beep->work, gpio_beeper_work);
+
+ input->name = pdev->name;
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
+ input->close = gpio_beeper_close;
+ input->event = gpio_beeper_event;
+
+ input_set_capability(input, EV_SND, SND_BELL);
+
+ beep->active_low = flags & OF_GPIO_ACTIVE_LOW;
+ gflags = beep->active_low ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
+
+ err = devm_gpio_request_one(&pdev->dev, beep->gpio, gflags, pdev->name);
+ if (err)
+ return err;
+
+ input_set_drvdata(input, beep);
+
+ return input_register_device(input);
+}
+
+static struct of_device_id gpio_beeper_of_match[] = {
+ { .compatible = BEEPER_MODNAME, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpio_beeper_of_match);
+
+static struct platform_driver gpio_beeper_platform_driver = {
+ .driver = {
+ .name = BEEPER_MODNAME,
+ .owner = THIS_MODULE,
+ .of_match_table = gpio_beeper_of_match,
+ },
+ .probe = gpio_beeper_probe,
+};
+module_platform_driver(gpio_beeper_platform_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("Generic GPIO beeper driver");
diff --git a/drivers/input/misc/gpio_tilt_polled.c b/drivers/input/misc/gpio_tilt_polled.c
index 714c68369134..1a81d9115226 100644
--- a/drivers/input/misc/gpio_tilt_polled.c
+++ b/drivers/input/misc/gpio_tilt_polled.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input-polldev.h>
@@ -98,7 +97,8 @@ static void gpio_tilt_polled_close(struct input_polled_dev *dev)
static int gpio_tilt_polled_probe(struct platform_device *pdev)
{
- const struct gpio_tilt_platform_data *pdata = pdev->dev.platform_data;
+ const struct gpio_tilt_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
struct gpio_tilt_polled_dev *tdev;
struct input_polled_dev *poll_dev;
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index 290fa5f97ded..01f3b5b300f3 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb/input.h>
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
index a993b67a8a5b..d708478bc5b5 100644
--- a/drivers/input/misc/kxtj9.c
+++ b/drivers/input/misc/kxtj9.c
@@ -509,7 +509,8 @@ out:
static int kxtj9_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct kxtj9_platform_data *pdata = client->dev.platform_data;
+ const struct kxtj9_platform_data *pdata =
+ dev_get_platdata(&client->dev);
struct kxtj9_data *tj9;
int err;
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index e973133212a5..1fea5484941f 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -23,7 +23,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/err.h>
diff --git a/drivers/input/misc/mc13783-pwrbutton.c b/drivers/input/misc/mc13783-pwrbutton.c
index d0277a7b1579..0df6e8d8bd03 100644
--- a/drivers/input/misc/mc13783-pwrbutton.c
+++ b/drivers/input/misc/mc13783-pwrbutton.c
@@ -20,7 +20,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/input.h>
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index 6983ffbbfb94..5e5051351c3a 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -30,7 +30,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
diff --git a/drivers/input/misc/pcap_keys.c b/drivers/input/misc/pcap_keys.c
index 40ac9a5adf89..cd230365166e 100644
--- a/drivers/input/misc/pcap_keys.c
+++ b/drivers/input/misc/pcap_keys.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/input.h>
diff --git a/drivers/input/misc/pcf50633-input.c b/drivers/input/misc/pcf50633-input.c
index 73b13ebabe56..db92f4f3c99b 100644
--- a/drivers/input/misc/pcf50633-input.c
+++ b/drivers/input/misc/pcf50633-input.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/input.h>
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index 0deca5a3c87f..97f711a7bd20 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -7,7 +7,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 7288b267613d..674a2cfc3c0e 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i8253.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/timex.h>
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
index ec086f6f3cc3..b88b7cbf93e2 100644
--- a/drivers/input/misc/pm8xxx-vibrator.c
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -11,13 +11,12 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/slab.h>
-#include <linux/mfd/pm8xxx/core.h>
+#include <linux/regmap.h>
#define VIB_DRV 0x4A
@@ -35,7 +34,7 @@
* struct pm8xxx_vib - structure to hold vibrator data
* @vib_input_dev: input device supporting force feedback
* @work: work structure to set the vibration parameters
- * @dev: device supporting force feedback
+ * @regmap: regmap for register read/write
* @speed: speed of vibration set from userland
* @active: state of vibrator
* @level: level of vibration to set in the chip
@@ -44,7 +43,7 @@
struct pm8xxx_vib {
struct input_dev *vib_input_dev;
struct work_struct work;
- struct device *dev;
+ struct regmap *regmap;
int speed;
int level;
bool active;
@@ -52,42 +51,6 @@ struct pm8xxx_vib {
};
/**
- * pm8xxx_vib_read_u8 - helper to read a byte from pmic chip
- * @vib: pointer to vibrator structure
- * @data: placeholder for data to be read
- * @reg: register address
- */
-static int pm8xxx_vib_read_u8(struct pm8xxx_vib *vib,
- u8 *data, u16 reg)
-{
- int rc;
-
- rc = pm8xxx_readb(vib->dev->parent, reg, data);
- if (rc < 0)
- dev_warn(vib->dev, "Error reading pm8xxx reg 0x%x(0x%x)\n",
- reg, rc);
- return rc;
-}
-
-/**
- * pm8xxx_vib_write_u8 - helper to write a byte to pmic chip
- * @vib: pointer to vibrator structure
- * @data: data to write
- * @reg: register address
- */
-static int pm8xxx_vib_write_u8(struct pm8xxx_vib *vib,
- u8 data, u16 reg)
-{
- int rc;
-
- rc = pm8xxx_writeb(vib->dev->parent, reg, data);
- if (rc < 0)
- dev_warn(vib->dev, "Error writing pm8xxx reg 0x%x(0x%x)\n",
- reg, rc);
- return rc;
-}
-
-/**
* pm8xxx_vib_set - handler to start/stop vibration
* @vib: pointer to vibrator structure
* @on: state to set
@@ -95,14 +58,14 @@ static int pm8xxx_vib_write_u8(struct pm8xxx_vib *vib,
static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on)
{
int rc;
- u8 val = vib->reg_vib_drv;
+ unsigned int val = vib->reg_vib_drv;
if (on)
val |= ((vib->level << VIB_DRV_SEL_SHIFT) & VIB_DRV_SEL_MASK);
else
val &= ~VIB_DRV_SEL_MASK;
- rc = pm8xxx_vib_write_u8(vib, val, VIB_DRV);
+ rc = regmap_write(vib->regmap, VIB_DRV, val);
if (rc < 0)
return rc;
@@ -118,9 +81,9 @@ static void pm8xxx_work_handler(struct work_struct *work)
{
struct pm8xxx_vib *vib = container_of(work, struct pm8xxx_vib, work);
int rc;
- u8 val;
+ unsigned int val;
- rc = pm8xxx_vib_read_u8(vib, &val, VIB_DRV);
+ rc = regmap_read(vib->regmap, VIB_DRV, &val);
if (rc < 0)
return;
@@ -184,34 +147,37 @@ static int pm8xxx_vib_probe(struct platform_device *pdev)
struct pm8xxx_vib *vib;
struct input_dev *input_dev;
int error;
- u8 val;
-
- vib = kzalloc(sizeof(*vib), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!vib || !input_dev) {
- dev_err(&pdev->dev, "couldn't allocate memory\n");
- error = -ENOMEM;
- goto err_free_mem;
- }
+ unsigned int val;
+
+ vib = devm_kzalloc(&pdev->dev, sizeof(*vib), GFP_KERNEL);
+ if (!vib)
+ return -ENOMEM;
+
+ vib->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!vib->regmap)
+ return -ENODEV;
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev)
+ return -ENOMEM;
INIT_WORK(&vib->work, pm8xxx_work_handler);
- vib->dev = &pdev->dev;
vib->vib_input_dev = input_dev;
/* operate in manual mode */
- error = pm8xxx_vib_read_u8(vib, &val, VIB_DRV);
+ error = regmap_read(vib->regmap, VIB_DRV, &val);
if (error < 0)
- goto err_free_mem;
+ return error;
+
val &= ~VIB_DRV_EN_MANUAL_MASK;
- error = pm8xxx_vib_write_u8(vib, val, VIB_DRV);
+ error = regmap_write(vib->regmap, VIB_DRV, val);
if (error < 0)
- goto err_free_mem;
+ return error;
vib->reg_vib_drv = val;
input_dev->name = "pm8xxx_vib_ffmemless";
input_dev->id.version = 1;
- input_dev->dev.parent = &pdev->dev;
input_dev->close = pm8xxx_vib_close;
input_set_drvdata(input_dev, vib);
input_set_capability(vib->vib_input_dev, EV_FF, FF_RUMBLE);
@@ -221,35 +187,17 @@ static int pm8xxx_vib_probe(struct platform_device *pdev)
if (error) {
dev_err(&pdev->dev,
"couldn't register vibrator as FF device\n");
- goto err_free_mem;
+ return error;
}
error = input_register_device(input_dev);
if (error) {
dev_err(&pdev->dev, "couldn't register input device\n");
- goto err_destroy_memless;
+ return error;
}
platform_set_drvdata(pdev, vib);
return 0;
-
-err_destroy_memless:
- input_ff_destroy(input_dev);
-err_free_mem:
- input_free_device(input_dev);
- kfree(vib);
-
- return error;
-}
-
-static int pm8xxx_vib_remove(struct platform_device *pdev)
-{
- struct pm8xxx_vib *vib = platform_get_drvdata(pdev);
-
- input_unregister_device(vib->vib_input_dev);
- kfree(vib);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -268,7 +216,6 @@ static SIMPLE_DEV_PM_OPS(pm8xxx_vib_pm_ops, pm8xxx_vib_suspend, NULL);
static struct platform_driver pm8xxx_vib_driver = {
.probe = pm8xxx_vib_probe,
- .remove = pm8xxx_vib_remove,
.driver = {
.name = "pm8xxx-vib",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index b49b738aa9c6..0e1a05f95858 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -11,16 +11,15 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/log2.h>
-#include <linux/mfd/pm8xxx/core.h>
#include <linux/input/pmic8xxx-pwrkey.h>
#define PON_CNTL_1 0x1C
@@ -32,26 +31,25 @@
* @key_press_irq: key press irq number
*/
struct pmic8xxx_pwrkey {
- struct input_dev *pwr;
int key_press_irq;
};
-static irqreturn_t pwrkey_press_irq(int irq, void *_pwrkey)
+static irqreturn_t pwrkey_press_irq(int irq, void *_pwr)
{
- struct pmic8xxx_pwrkey *pwrkey = _pwrkey;
+ struct input_dev *pwr = _pwr;
- input_report_key(pwrkey->pwr, KEY_POWER, 1);
- input_sync(pwrkey->pwr);
+ input_report_key(pwr, KEY_POWER, 1);
+ input_sync(pwr);
return IRQ_HANDLED;
}
-static irqreturn_t pwrkey_release_irq(int irq, void *_pwrkey)
+static irqreturn_t pwrkey_release_irq(int irq, void *_pwr)
{
- struct pmic8xxx_pwrkey *pwrkey = _pwrkey;
+ struct input_dev *pwr = _pwr;
- input_report_key(pwrkey->pwr, KEY_POWER, 0);
- input_sync(pwrkey->pwr);
+ input_report_key(pwr, KEY_POWER, 0);
+ input_sync(pwr);
return IRQ_HANDLED;
}
@@ -88,7 +86,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
int key_press_irq = platform_get_irq(pdev, 1);
int err;
unsigned int delay;
- u8 pon_cntl;
+ unsigned int pon_cntl;
+ struct regmap *regmap;
struct pmic8xxx_pwrkey *pwrkey;
const struct pm8xxx_pwrkey_platform_data *pdata =
dev_get_platdata(&pdev->dev);
@@ -103,30 +102,36 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
return -EINVAL;
}
- pwrkey = kzalloc(sizeof(*pwrkey), GFP_KERNEL);
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap) {
+ dev_err(&pdev->dev, "failed to locate regmap for the device\n");
+ return -ENODEV;
+ }
+
+ pwrkey = devm_kzalloc(&pdev->dev, sizeof(*pwrkey), GFP_KERNEL);
if (!pwrkey)
return -ENOMEM;
- pwr = input_allocate_device();
+ pwrkey->key_press_irq = key_press_irq;
+
+ pwr = devm_input_allocate_device(&pdev->dev);
if (!pwr) {
dev_dbg(&pdev->dev, "Can't allocate power button\n");
- err = -ENOMEM;
- goto free_pwrkey;
+ return -ENOMEM;
}
input_set_capability(pwr, EV_KEY, KEY_POWER);
pwr->name = "pmic8xxx_pwrkey";
pwr->phys = "pmic8xxx_pwrkey/input0";
- pwr->dev.parent = &pdev->dev;
delay = (pdata->kpd_trigger_delay_us << 10) / USEC_PER_SEC;
delay = 1 + ilog2(delay);
- err = pm8xxx_readb(pdev->dev.parent, PON_CNTL_1, &pon_cntl);
+ err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
if (err < 0) {
dev_err(&pdev->dev, "failed reading PON_CNTL_1 err=%d\n", err);
- goto free_input_dev;
+ return err;
}
pon_cntl &= ~PON_CNTL_TRIG_DELAY_MASK;
@@ -136,69 +141,46 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
else
pon_cntl &= ~PON_CNTL_PULL_UP;
- err = pm8xxx_writeb(pdev->dev.parent, PON_CNTL_1, pon_cntl);
+ err = regmap_write(regmap, PON_CNTL_1, pon_cntl);
if (err < 0) {
dev_err(&pdev->dev, "failed writing PON_CNTL_1 err=%d\n", err);
- goto free_input_dev;
+ return err;
}
- err = input_register_device(pwr);
+ err = devm_request_irq(&pdev->dev, key_press_irq, pwrkey_press_irq,
+ IRQF_TRIGGER_RISING,
+ "pmic8xxx_pwrkey_press", pwr);
if (err) {
- dev_dbg(&pdev->dev, "Can't register power key: %d\n", err);
- goto free_input_dev;
+ dev_err(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
+ key_press_irq, err);
+ return err;
}
- pwrkey->key_press_irq = key_press_irq;
- pwrkey->pwr = pwr;
-
- platform_set_drvdata(pdev, pwrkey);
-
- err = request_irq(key_press_irq, pwrkey_press_irq,
- IRQF_TRIGGER_RISING, "pmic8xxx_pwrkey_press", pwrkey);
- if (err < 0) {
- dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
- key_press_irq, err);
- goto unreg_input_dev;
+ err = devm_request_irq(&pdev->dev, key_release_irq, pwrkey_release_irq,
+ IRQF_TRIGGER_RISING,
+ "pmic8xxx_pwrkey_release", pwr);
+ if (err) {
+ dev_err(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
+ key_release_irq, err);
+ return err;
}
- err = request_irq(key_release_irq, pwrkey_release_irq,
- IRQF_TRIGGER_RISING, "pmic8xxx_pwrkey_release", pwrkey);
- if (err < 0) {
- dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
- key_release_irq, err);
-
- goto free_press_irq;
+ err = input_register_device(pwr);
+ if (err) {
+ dev_err(&pdev->dev, "Can't register power key: %d\n", err);
+ return err;
}
+ platform_set_drvdata(pdev, pwrkey);
device_init_wakeup(&pdev->dev, pdata->wakeup);
return 0;
-
-free_press_irq:
- free_irq(key_press_irq, pwrkey);
-unreg_input_dev:
- input_unregister_device(pwr);
- pwr = NULL;
-free_input_dev:
- input_free_device(pwr);
-free_pwrkey:
- kfree(pwrkey);
- return err;
}
static int pmic8xxx_pwrkey_remove(struct platform_device *pdev)
{
- struct pmic8xxx_pwrkey *pwrkey = platform_get_drvdata(pdev);
- int key_release_irq = platform_get_irq(pdev, 0);
- int key_press_irq = platform_get_irq(pdev, 1);
-
device_init_wakeup(&pdev->dev, 0);
- free_irq(key_press_irq, pwrkey);
- free_irq(key_release_irq, pwrkey);
- input_unregister_device(pwrkey->pwr);
- kfree(pwrkey);
-
return 0;
}
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index 49c0c3ebd321..63b539d3daba 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -31,7 +31,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/usb/input.h>
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index 940566e7be13..8ef288e7c971 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -68,7 +68,7 @@ static int pwm_beeper_event(struct input_dev *input,
static int pwm_beeper_probe(struct platform_device *pdev)
{
- unsigned long pwm_id = (unsigned long)pdev->dev.platform_data;
+ unsigned long pwm_id = (unsigned long)dev_get_platdata(&pdev->dev);
struct pwm_beeper *beeper;
int error;
diff --git a/drivers/input/misc/retu-pwrbutton.c b/drivers/input/misc/retu-pwrbutton.c
index 7ca09baa0016..4bff1aa9b0db 100644
--- a/drivers/input/misc/retu-pwrbutton.c
+++ b/drivers/input/misc/retu-pwrbutton.c
@@ -17,7 +17,6 @@
*/
#include <linux/irq.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/input.h>
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index f920ba7ab51f..99b9e42aa748 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/device.h>
diff --git a/drivers/input/misc/sgi_btns.c b/drivers/input/misc/sgi_btns.c
index 95cf299ef9a3..f10474937a64 100644
--- a/drivers/input/misc/sgi_btns.c
+++ b/drivers/input/misc/sgi_btns.c
@@ -17,7 +17,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <linux/init.h>
#include <linux/input-polldev.h>
#include <linux/ioport.h>
#include <linux/module.h>
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index 7b8b03e0d0be..e8897c36d21b 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -7,7 +7,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index b9a05fda03e4..fb3b63b2f85c 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -52,15 +52,15 @@ static irqreturn_t powerbutton_irq(int irq, void *_pwr)
return IRQ_HANDLED;
}
-static int __init twl4030_pwrbutton_probe(struct platform_device *pdev)
+static int twl4030_pwrbutton_probe(struct platform_device *pdev)
{
struct input_dev *pwr;
int irq = platform_get_irq(pdev, 0);
int err;
- pwr = input_allocate_device();
+ pwr = devm_input_allocate_device(&pdev->dev);
if (!pwr) {
- dev_dbg(&pdev->dev, "Can't allocate power button\n");
+ dev_err(&pdev->dev, "Can't allocate power button\n");
return -ENOMEM;
}
@@ -70,52 +70,42 @@ static int __init twl4030_pwrbutton_probe(struct platform_device *pdev)
pwr->phys = "twl4030_pwrbutton/input0";
pwr->dev.parent = &pdev->dev;
- err = request_threaded_irq(irq, NULL, powerbutton_irq,
+ err = devm_request_threaded_irq(&pwr->dev, irq, NULL, powerbutton_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"twl4030_pwrbutton", pwr);
if (err < 0) {
- dev_dbg(&pdev->dev, "Can't get IRQ for pwrbutton: %d\n", err);
- goto free_input_dev;
+ dev_err(&pdev->dev, "Can't get IRQ for pwrbutton: %d\n", err);
+ return err;
}
err = input_register_device(pwr);
if (err) {
- dev_dbg(&pdev->dev, "Can't register power button: %d\n", err);
- goto free_irq;
+ dev_err(&pdev->dev, "Can't register power button: %d\n", err);
+ return err;
}
platform_set_drvdata(pdev, pwr);
return 0;
-
-free_irq:
- free_irq(irq, pwr);
-free_input_dev:
- input_free_device(pwr);
- return err;
}
-static int __exit twl4030_pwrbutton_remove(struct platform_device *pdev)
-{
- struct input_dev *pwr = platform_get_drvdata(pdev);
- int irq = platform_get_irq(pdev, 0);
-
- free_irq(irq, pwr);
- input_unregister_device(pwr);
-
- return 0;
-}
+#ifdef CONFIG_OF
+static const struct of_device_id twl4030_pwrbutton_dt_match_table[] = {
+ { .compatible = "ti,twl4030-pwrbutton" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, twl4030_pwrbutton_dt_match_table);
+#endif
static struct platform_driver twl4030_pwrbutton_driver = {
- .remove = __exit_p(twl4030_pwrbutton_remove),
+ .probe = twl4030_pwrbutton_probe,
.driver = {
.name = "twl4030_pwrbutton",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(twl4030_pwrbutton_dt_match_table),
},
};
-
-module_platform_driver_probe(twl4030_pwrbutton_driver,
- twl4030_pwrbutton_probe);
+module_platform_driver(twl4030_pwrbutton_driver);
MODULE_ALIAS("platform:twl4030_pwrbutton");
MODULE_DESCRIPTION("Triton2 Power Button");
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 68a5f33152a8..960ef2a70910 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -185,15 +185,17 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
if (pdata && pdata->coexist)
return true;
- if (of_find_node_by_name(node, "codec"))
+ if (of_find_node_by_name(node, "codec")) {
+ of_node_put(node);
return true;
+ }
return false;
}
static int twl4030_vibra_probe(struct platform_device *pdev)
{
- struct twl4030_vibra_data *pdata = pdev->dev.platform_data;
+ struct twl4030_vibra_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *twl4030_core_node = pdev->dev.parent->of_node;
struct vibra_info *info;
int ret;
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 7864b0c3ebb3..77dc23b94eb1 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -258,17 +258,14 @@ static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL);
static int twl6040_vibra_probe(struct platform_device *pdev)
{
struct device *twl6040_core_dev = pdev->dev.parent;
- struct device_node *twl6040_core_node = NULL;
+ struct device_node *twl6040_core_node;
struct vibra_info *info;
int vddvibl_uV = 0;
int vddvibr_uV = 0;
int ret;
-#ifdef CONFIG_OF
twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
"vibra");
-#endif
-
if (!twl6040_core_node) {
dev_err(&pdev->dev, "parent of node is missing?\n");
return -EINVAL;
@@ -276,6 +273,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info) {
+ of_node_put(twl6040_core_node);
dev_err(&pdev->dev, "couldn't allocate memory\n");
return -ENOMEM;
}
@@ -295,6 +293,8 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
of_property_read_u32(twl6040_core_node, "ti,vddvibl-uV", &vddvibl_uV);
of_property_read_u32(twl6040_core_node, "ti,vddvibr-uV", &vddvibr_uV);
+ of_node_put(twl6040_core_node);
+
if ((!info->vibldrv_res && !info->viblmotor_res) ||
(!info->vibrdrv_res && !info->vibrmotor_res)) {
dev_err(info->dev, "invalid vibra driver/motor resistance\n");
diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c
index caa2c4068f09..173b6dcca0da 100644
--- a/drivers/input/misc/wm831x-on.c
+++ b/drivers/input/misc/wm831x-on.c
@@ -18,7 +18,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index e21c1816a8f9..fbfdc10573be 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -29,6 +29,7 @@
#include <xen/interface/io/fbif.h>
#include <xen/interface/io/kbdif.h>
#include <xen/xenbus.h>
+#include <xen/platform_pci.h>
struct xenkbd_info {
struct input_dev *kbd;
@@ -380,6 +381,9 @@ static int __init xenkbd_init(void)
if (xen_initial_domain())
return -ENODEV;
+ if (!xen_has_pv_devices())
+ return -ENODEV;
+
return xenbus_register_frontend(&xenkbd_driver);
}
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 285a5bd6cbc9..79c964c075f1 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -47,7 +47,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/rwsem.h>
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 5cf62e315218..fb15c64ffb95 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -277,6 +277,57 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
}
/*
+ * Process bitmap data for V5 protocols. Return value is null.
+ *
+ * The bitmaps don't have enough data to track fingers, so this function
+ * only generates points representing a bounding box of at most two contacts.
+ * These two points are returned in x1, y1, x2, and y2.
+ */
+static void alps_process_bitmap_dolphin(struct alps_data *priv,
+ struct alps_fields *fields,
+ int *x1, int *y1, int *x2, int *y2)
+{
+ int box_middle_x, box_middle_y;
+ unsigned int x_map, y_map;
+ unsigned char start_bit, end_bit;
+ unsigned char x_msb, x_lsb, y_msb, y_lsb;
+
+ x_map = fields->x_map;
+ y_map = fields->y_map;
+
+ if (!x_map || !y_map)
+ return;
+
+ /* Get Most-significant and Least-significant bit */
+ x_msb = fls(x_map);
+ x_lsb = ffs(x_map);
+ y_msb = fls(y_map);
+ y_lsb = ffs(y_map);
+
+ /* Most-significant bit should never exceed max sensor line number */
+ if (x_msb > priv->x_bits || y_msb > priv->y_bits)
+ return;
+
+ *x1 = *y1 = *x2 = *y2 = 0;
+
+ if (fields->fingers > 1) {
+ start_bit = priv->x_bits - x_msb;
+ end_bit = priv->x_bits - x_lsb;
+ box_middle_x = (priv->x_max * (start_bit + end_bit)) /
+ (2 * (priv->x_bits - 1));
+
+ start_bit = y_lsb - 1;
+ end_bit = y_msb - 1;
+ box_middle_y = (priv->y_max * (start_bit + end_bit)) /
+ (2 * (priv->y_bits - 1));
+ *x1 = fields->x;
+ *y1 = fields->y;
+ *x2 = 2 * box_middle_x - *x1;
+ *y2 = 2 * box_middle_y - *y1;
+ }
+}
+
+/*
* Process bitmap data from v3 and v4 protocols. Returns the number of
* fingers detected. A return value of 0 means at least one of the
* bitmaps was empty.
@@ -481,7 +532,8 @@ static void alps_decode_buttons_v3(struct alps_fields *f, unsigned char *p)
f->ts_middle = !!(p[3] & 0x40);
}
-static void alps_decode_pinnacle(struct alps_fields *f, unsigned char *p)
+static void alps_decode_pinnacle(struct alps_fields *f, unsigned char *p,
+ struct psmouse *psmouse)
{
f->first_mp = !!(p[4] & 0x40);
f->is_mp = !!(p[0] & 0x40);
@@ -502,48 +554,61 @@ static void alps_decode_pinnacle(struct alps_fields *f, unsigned char *p)
alps_decode_buttons_v3(f, p);
}
-static void alps_decode_rushmore(struct alps_fields *f, unsigned char *p)
+static void alps_decode_rushmore(struct alps_fields *f, unsigned char *p,
+ struct psmouse *psmouse)
{
- alps_decode_pinnacle(f, p);
+ alps_decode_pinnacle(f, p, psmouse);
f->x_map |= (p[5] & 0x10) << 11;
f->y_map |= (p[5] & 0x20) << 6;
}
-static void alps_decode_dolphin(struct alps_fields *f, unsigned char *p)
+static void alps_decode_dolphin(struct alps_fields *f, unsigned char *p,
+ struct psmouse *psmouse)
{
+ u64 palm_data = 0;
+ struct alps_data *priv = psmouse->private;
+
f->first_mp = !!(p[0] & 0x02);
f->is_mp = !!(p[0] & 0x20);
- f->fingers = ((p[0] & 0x6) >> 1 |
+ if (!f->is_mp) {
+ f->x = ((p[1] & 0x7f) | ((p[4] & 0x0f) << 7));
+ f->y = ((p[2] & 0x7f) | ((p[4] & 0xf0) << 3));
+ f->z = (p[0] & 4) ? 0 : p[5] & 0x7f;
+ alps_decode_buttons_v3(f, p);
+ } else {
+ f->fingers = ((p[0] & 0x6) >> 1 |
(p[0] & 0x10) >> 2);
- f->x_map = ((p[2] & 0x60) >> 5) |
- ((p[4] & 0x7f) << 2) |
- ((p[5] & 0x7f) << 9) |
- ((p[3] & 0x07) << 16) |
- ((p[3] & 0x70) << 15) |
- ((p[0] & 0x01) << 22);
- f->y_map = (p[1] & 0x7f) |
- ((p[2] & 0x1f) << 7);
-
- f->x = ((p[1] & 0x7f) | ((p[4] & 0x0f) << 7));
- f->y = ((p[2] & 0x7f) | ((p[4] & 0xf0) << 3));
- f->z = (p[0] & 4) ? 0 : p[5] & 0x7f;
- alps_decode_buttons_v3(f, p);
+ palm_data = (p[1] & 0x7f) |
+ ((p[2] & 0x7f) << 7) |
+ ((p[4] & 0x7f) << 14) |
+ ((p[5] & 0x7f) << 21) |
+ ((p[3] & 0x07) << 28) |
+ (((u64)p[3] & 0x70) << 27) |
+ (((u64)p[0] & 0x01) << 34);
+
+ /* Y-profile is stored in P(0) to p(n-1), n = y_bits; */
+ f->y_map = palm_data & (BIT(priv->y_bits) - 1);
+
+ /* X-profile is stored in p(n) to p(n+m-1), m = x_bits; */
+ f->x_map = (palm_data >> priv->y_bits) &
+ (BIT(priv->x_bits) - 1);
+ }
}
-static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
+static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
unsigned char *packet = psmouse->packet;
struct input_dev *dev = psmouse->dev;
struct input_dev *dev2 = priv->dev2;
int x1 = 0, y1 = 0, x2 = 0, y2 = 0;
- int fingers = 0, bmap_fingers;
- struct alps_fields f;
+ int fingers = 0, bmap_fn;
+ struct alps_fields f = {0};
- priv->decode_fields(&f, packet);
+ priv->decode_fields(&f, packet, psmouse);
/*
* There's no single feature of touchpad position and bitmap packets
@@ -560,19 +625,38 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
*/
if (f.is_mp) {
fingers = f.fingers;
- bmap_fingers = alps_process_bitmap(priv,
- f.x_map, f.y_map,
- &x1, &y1, &x2, &y2);
-
- /*
- * We shouldn't report more than one finger if
- * we don't have two coordinates.
- */
- if (fingers > 1 && bmap_fingers < 2)
- fingers = bmap_fingers;
-
- /* Now process position packet */
- priv->decode_fields(&f, priv->multi_data);
+ if (priv->proto_version == ALPS_PROTO_V3) {
+ bmap_fn = alps_process_bitmap(priv, f.x_map,
+ f.y_map, &x1, &y1,
+ &x2, &y2);
+
+ /*
+ * We shouldn't report more than one finger if
+ * we don't have two coordinates.
+ */
+ if (fingers > 1 && bmap_fn < 2)
+ fingers = bmap_fn;
+
+ /* Now process position packet */
+ priv->decode_fields(&f, priv->multi_data,
+ psmouse);
+ } else {
+ /*
+ * Because Dolphin uses position packet's
+ * coordinate data as Pt1 and uses it to
+ * calculate Pt2, so we need to do position
+ * packet decode first.
+ */
+ priv->decode_fields(&f, priv->multi_data,
+ psmouse);
+
+ /*
+ * Since Dolphin's finger number is reliable,
+ * there is no need to compare with bmap_fn.
+ */
+ alps_process_bitmap_dolphin(priv, &f, &x1, &y1,
+ &x2, &y2);
+ }
} else {
priv->multi_packet = 0;
}
@@ -662,7 +746,7 @@ static void alps_process_packet_v3(struct psmouse *psmouse)
return;
}
- alps_process_touchpad_packet_v3(psmouse);
+ alps_process_touchpad_packet_v3_v5(psmouse);
}
static void alps_process_packet_v6(struct psmouse *psmouse)
@@ -1709,6 +1793,52 @@ error:
return -1;
}
+static int alps_dolphin_get_device_area(struct psmouse *psmouse,
+ struct alps_data *priv)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ unsigned char param[4] = {0};
+ int num_x_electrode, num_y_electrode;
+
+ if (alps_enter_command_mode(psmouse))
+ return -1;
+
+ param[0] = 0x0a;
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETPOLL) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETPOLL) ||
+ ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE))
+ return -1;
+
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
+ return -1;
+
+ /*
+ * Dolphin's sensor line number is not fixed. It can be calculated
+ * by adding the device's register value with DOLPHIN_PROFILE_X/YOFFSET.
+ * Further more, we can get device's x_max and y_max by multiplying
+ * sensor line number with DOLPHIN_COUNT_PER_ELECTRODE.
+ *
+ * e.g. When we get register's sensor_x = 11 & sensor_y = 8,
+ * real sensor line number X = 11 + 8 = 19, and
+ * real sensor line number Y = 8 + 1 = 9.
+ * So, x_max = (19 - 1) * 64 = 1152, and
+ * y_max = (9 - 1) * 64 = 512.
+ */
+ num_x_electrode = DOLPHIN_PROFILE_XOFFSET + (param[2] & 0x0F);
+ num_y_electrode = DOLPHIN_PROFILE_YOFFSET + ((param[2] >> 4) & 0x0F);
+ priv->x_bits = num_x_electrode;
+ priv->y_bits = num_y_electrode;
+ priv->x_max = (num_x_electrode - 1) * DOLPHIN_COUNT_PER_ELECTRODE;
+ priv->y_max = (num_y_electrode - 1) * DOLPHIN_COUNT_PER_ELECTRODE;
+
+ if (alps_exit_command_mode(psmouse))
+ return -1;
+
+ return 0;
+}
+
static int alps_hw_init_dolphin_v1(struct psmouse *psmouse)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
@@ -1763,13 +1893,13 @@ static void alps_set_defaults(struct alps_data *priv)
break;
case ALPS_PROTO_V5:
priv->hw_init = alps_hw_init_dolphin_v1;
- priv->process_packet = alps_process_packet_v3;
+ priv->process_packet = alps_process_touchpad_packet_v3_v5;
priv->decode_fields = alps_decode_dolphin;
priv->set_abs_params = alps_set_abs_params_mt;
priv->nibble_commands = alps_v3_nibble_commands;
priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
priv->byte0 = 0xc8;
- priv->mask0 = 0xc8;
+ priv->mask0 = 0xd8;
priv->flags = 0;
priv->x_max = 1360;
priv->y_max = 660;
@@ -1845,11 +1975,13 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
if (alps_match_table(psmouse, priv, e7, ec) == 0) {
return 0;
} else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 &&
- ec[0] == 0x73 && ec[1] == 0x01) {
+ ec[0] == 0x73 && (ec[1] == 0x01 || ec[1] == 0x02)) {
priv->proto_version = ALPS_PROTO_V5;
alps_set_defaults(priv);
-
- return 0;
+ if (alps_dolphin_get_device_area(psmouse, priv))
+ return -EIO;
+ else
+ return 0;
} else if (ec[0] == 0x88 && ec[1] == 0x08) {
priv->proto_version = ALPS_PROTO_V3;
alps_set_defaults(priv);
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index 704f0f924307..03f88b6940c7 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -19,6 +19,10 @@
#define ALPS_PROTO_V5 5
#define ALPS_PROTO_V6 6
+#define DOLPHIN_COUNT_PER_ELECTRODE 64
+#define DOLPHIN_PROFILE_XOFFSET 8 /* x-electrode offset */
+#define DOLPHIN_PROFILE_YOFFSET 1 /* y-electrode offset */
+
/**
* struct alps_model_info - touchpad ID table
* @signature: E7 response string to match.
@@ -146,7 +150,8 @@ struct alps_data {
int (*hw_init)(struct psmouse *psmouse);
void (*process_packet)(struct psmouse *psmouse);
- void (*decode_fields)(struct alps_fields *f, unsigned char *p);
+ void (*decode_fields)(struct alps_fields *f, unsigned char *p,
+ struct psmouse *psmouse);
void (*set_abs_params)(struct alps_data *priv, struct input_dev *dev1);
int prev_fin;
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index e42f1fa8cdc0..800ca7dfafc2 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb/input.h>
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index a73f9618b0ad..c329cdb0b91a 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -34,7 +34,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb/input.h>
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index a5869a856ea5..87095e2f5153 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -15,7 +15,6 @@
* the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 597e9b8fc18d..ef1cf52f8bb9 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -486,6 +486,7 @@ static void elantech_input_sync_v4(struct psmouse *psmouse)
unsigned char *packet = psmouse->packet;
input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
+ input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
input_mt_report_pointer_emulation(dev, true);
input_sync(dev);
}
@@ -984,6 +985,44 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
}
/*
+ * Advertise INPUT_PROP_BUTTONPAD for clickpads. The testing of bit 12 in
+ * fw_version for this is based on the following fw_version & caps table:
+ *
+ * Laptop-model: fw_version: caps: buttons:
+ * Acer S3 0x461f00 10, 13, 0e clickpad
+ * Acer S7-392 0x581f01 50, 17, 0d clickpad
+ * Acer V5-131 0x461f02 01, 16, 0c clickpad
+ * Acer V5-551 0x461f00 ? clickpad
+ * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
+ * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
+ * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
+ * Asus UX31 0x361f00 20, 15, 0e clickpad
+ * Asus UX32VD 0x361f02 00, 15, 0e clickpad
+ * Avatar AVIU-145A2 0x361f00 ? clickpad
+ * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
+ * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
+ * Samsung NF210 0x150b00 78, 14, 0a 2 hw buttons
+ * Samsung NP770Z5E 0x575f01 10, 15, 0f clickpad
+ * Samsung NP700Z5B 0x361f06 21, 15, 0f clickpad
+ * Samsung NP900X3E-A02 0x575f03 ? clickpad
+ * Samsung NP-QX410 0x851b00 19, 14, 0c clickpad
+ * Samsung RC512 0x450f00 08, 15, 0c 2 hw buttons
+ * Samsung RF710 0x450f00 ? 2 hw buttons
+ * System76 Pangolin 0x250f01 ? 2 hw buttons
+ * (*) + 3 trackpoint buttons
+ */
+static void elantech_set_buttonpad_prop(struct psmouse *psmouse)
+{
+ struct input_dev *dev = psmouse->dev;
+ struct elantech_data *etd = psmouse->private;
+
+ if (etd->fw_version & 0x001000) {
+ __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
+ __clear_bit(BTN_RIGHT, dev->keybit);
+ }
+}
+
+/*
* Set the appropriate event bits for the input subsystem
*/
static int elantech_set_input_params(struct psmouse *psmouse)
@@ -1026,6 +1065,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
__set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
/* fall through */
case 3:
+ if (etd->hw_version == 3)
+ elantech_set_buttonpad_prop(psmouse);
input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
if (etd->reports_pressure) {
@@ -1047,9 +1088,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
*/
psmouse_warn(psmouse, "couldn't query resolution data.\n");
}
- /* v4 is clickpad, with only one button. */
- __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
- __clear_bit(BTN_RIGHT, dev->keybit);
+ elantech_set_buttonpad_prop(psmouse);
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
/* For X to recognize me as touchpad. */
input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 6b44413f54e3..8c7d94200bdb 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -8,7 +8,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/input-polldev.h>
@@ -48,7 +47,7 @@ static void gpio_mouse_scan(struct input_polled_dev *dev)
static int gpio_mouse_probe(struct platform_device *pdev)
{
- struct gpio_mouse_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_mouse_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct input_polled_dev *input_poll;
struct input_dev *input;
int pin, i;
diff --git a/drivers/input/mouse/logips2pp.c b/drivers/input/mouse/logips2pp.c
index 84de2fc6acc1..136e222e2a16 100644
--- a/drivers/input/mouse/logips2pp.c
+++ b/drivers/input/mouse/logips2pp.c
@@ -220,7 +220,7 @@ static const struct ps2pp_info *get_model_info(unsigned char model)
{ 61, PS2PP_KIND_MX, /* MX700 */
PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN |
PS2PP_EXTRA_BTN | PS2PP_NAV_BTN },
- { 66, PS2PP_KIND_MX, /* MX3100 reciver */
+ { 66, PS2PP_KIND_MX, /* MX3100 receiver */
PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN |
PS2PP_EXTRA_BTN | PS2PP_NAV_BTN | PS2PP_HWHEEL },
{ 72, PS2PP_KIND_TRACKMAN, 0 }, /* T-CH11: TrackMan Marble */
diff --git a/drivers/input/mouse/navpoint.c b/drivers/input/mouse/navpoint.c
index 0b8d33591dee..1ccc88af1f0b 100644
--- a/drivers/input/mouse/navpoint.c
+++ b/drivers/input/mouse/navpoint.c
@@ -9,7 +9,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index 0ecb9e7945eb..9b4d9a59e229 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -10,7 +10,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -166,7 +165,7 @@ static int pxa930_trkball_probe(struct platform_device *pdev)
if (!trkball)
return -ENOMEM;
- trkball->pdata = pdev->dev.platform_data;
+ trkball->pdata = dev_get_platdata(&pdev->dev);
if (!trkball->pdata) {
dev_err(&pdev->dev, "no platform data defined\n");
error = -EINVAL;
diff --git a/drivers/input/mouse/sermouse.c b/drivers/input/mouse/sermouse.c
index d5928fd0c914..8df526620ebf 100644
--- a/drivers/input/mouse/sermouse.c
+++ b/drivers/input/mouse/sermouse.c
@@ -32,7 +32,6 @@
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Serial mouse driver"
diff --git a/drivers/input/mouse/synaptics_usb.c b/drivers/input/mouse/synaptics_usb.c
index 64cf34ea7604..e122bda16aab 100644
--- a/drivers/input/mouse/synaptics_usb.c
+++ b/drivers/input/mouse/synaptics_usb.c
@@ -39,7 +39,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/input/mouse/vsxxxaa.c b/drivers/input/mouse/vsxxxaa.c
index e900d465aaf6..38298232124f 100644
--- a/drivers/input/mouse/vsxxxaa.c
+++ b/drivers/input/mouse/vsxxxaa.c
@@ -82,7 +82,6 @@
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Driver for DEC VSXXX-AA and -GA mice and VSXXX-AB tablet"
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 8541f949778d..aec54e283580 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -16,14 +16,19 @@ config SERIO
To compile this driver as a module, choose M here: the
module will be called serio.
+config ARCH_MIGHT_HAVE_PC_SERIO
+ bool
+ help
+ Select this config option from the architecture Kconfig if
+ the architecture might use a PC serio device (i8042) to
+ communicate with keyboard, mouse, etc.
+
if SERIO
config SERIO_I8042
tristate "i8042 PC Keyboard controller"
default y
- depends on !PARISC && (!ARM || FOOTBRIDGE_HOST) && \
- (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 && \
- !ARC
+ depends on ARCH_MIGHT_HAVE_PC_SERIO
help
i8042 is the chip over which the standard AT keyboard and PS/2
mouse are connected to the computer. If you use these devices,
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index 4777a73cd390..cce69d6b9587 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 4e2fd44865e1..762b08432de0 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -10,7 +10,6 @@
* (at your option) any later version.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/serio.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -167,8 +166,6 @@ static int amba_kmi_remove(struct amba_device *dev)
{
struct amba_kmi_port *kmi = amba_get_drvdata(dev);
- amba_set_drvdata(dev, NULL);
-
serio_unregister_port(kmi->io);
clk_put(kmi->clk);
iounmap(kmi->base);
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index 3a83c3c14b23..613261994621 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -160,7 +160,9 @@ static void hv_kbd_on_receive(struct hv_device *hv_dev,
if (info & IS_E0)
serio_interrupt(kbd_dev->hv_serio,
XTKBD_EMUL0, 0);
-
+ if (info & IS_E1)
+ serio_interrupt(kbd_dev->hv_serio,
+ XTKBD_EMUL1, 0);
scan_code = __le16_to_cpu(ks_msg->make_code);
if (info & IS_BREAK)
scan_code |= XTKBD_RELEASE;
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 07a8363f3c5c..75516996db20 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -18,7 +18,6 @@
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/i8042.h>
-#include <linux/init.h>
#include <linux/libps2.h>
#define DRIVER_DESC "PS/2 driver library"
diff --git a/drivers/input/serio/olpc_apsp.c b/drivers/input/serio/olpc_apsp.c
index 51b1d40cc286..5d2fe7ece7ca 100644
--- a/drivers/input/serio/olpc_apsp.c
+++ b/drivers/input/serio/olpc_apsp.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/serio.h>
#include <linux/err.h>
#include <linux/platform_device.h>
diff --git a/drivers/input/serio/pcips2.c b/drivers/input/serio/pcips2.c
index 76f83836fd5a..e862c6ea9d9e 100644
--- a/drivers/input/serio/pcips2.c
+++ b/drivers/input/serio/pcips2.c
@@ -16,7 +16,6 @@
#include <linux/input.h>
#include <linux/pci.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/serio.h>
#include <linux/delay.h>
#include <asm/io.h>
@@ -181,7 +180,6 @@ static void pcips2_remove(struct pci_dev *dev)
struct pcips2_data *ps2if = pci_get_drvdata(dev);
serio_unregister_port(ps2if->io);
- pci_set_drvdata(dev, NULL);
kfree(ps2if);
pci_release_regions(dev);
pci_disable_device(dev);
diff --git a/drivers/input/serio/q40kbd.c b/drivers/input/serio/q40kbd.c
index 7a65a1bc5226..594256c38554 100644
--- a/drivers/input/serio/q40kbd.c
+++ b/drivers/input/serio/q40kbd.c
@@ -30,7 +30,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/serio.h>
#include <linux/interrupt.h>
#include <linux/err.h>
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index 567566ae0dae..e462e7791bb8 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -29,7 +29,6 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/serio.h>
#include <linux/err.h>
#include <linux/platform_device.h>
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index 59df2e7317a3..c9a02fe57576 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -15,7 +15,6 @@
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/serio.h>
-#include <linux/init.h>
#include <linux/major.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 8755f5f3ad37..0cb7ef59071b 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -124,7 +124,7 @@ static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *c
{
struct serport *serport = (struct serport*) tty->disc_data;
unsigned long flags;
- unsigned int ch_flags;
+ unsigned int ch_flags = 0;
int i;
spin_lock_irqsave(&serport->lock, flags);
@@ -133,18 +133,20 @@ static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *c
goto out;
for (i = 0; i < count; i++) {
- switch (fp[i]) {
- case TTY_FRAME:
- ch_flags = SERIO_FRAME;
- break;
-
- case TTY_PARITY:
- ch_flags = SERIO_PARITY;
- break;
-
- default:
- ch_flags = 0;
- break;
+ if (fp) {
+ switch (fp[i]) {
+ case TTY_FRAME:
+ ch_flags = SERIO_FRAME;
+ break;
+
+ case TTY_PARITY:
+ ch_flags = SERIO_PARITY;
+ break;
+
+ default:
+ ch_flags = 0;
+ break;
+ }
}
serio_interrupt(serport->serio, cp[i], ch_flags);
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index dfbcd872f95e..e6cf52ebad87 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -20,7 +20,6 @@
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/of_address.h>
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index e062ec899ca1..889f6b77e8cb 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/usb/input.h>
/*
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index ee83c3904ee8..e7f966da6efa 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -74,7 +74,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/usb/input.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 29e01ab6859f..caecffe8caff 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -53,7 +53,6 @@ Scott Hill shill@gtcocalcomp.com
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/usb.h>
diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c
index 5cc04124995c..cd852059b99e 100644
--- a/drivers/input/tablet/hanwang.c
+++ b/drivers/input/tablet/hanwang.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/usb/input.h>
#define DRIVER_AUTHOR "Xing Wei <weixing@hanwang.com.cn>"
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 3fba74b9b602..d2ac7c2b5b82 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -1,7 +1,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/usb/input.h>
#include <asm/unaligned.h>
diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
index b79d45198d82..9ebf0ed3b3b3 100644
--- a/drivers/input/tablet/wacom.h
+++ b/drivers/input/tablet/wacom.h
@@ -86,7 +86,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/init.h>
#include <linux/usb/input.h>
#include <linux/power_supply.h>
#include <asm/unaligned.h>
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 867e7c33ac55..b16ebef5b911 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -304,7 +304,7 @@ static int wacom_parse_hid(struct usb_interface *intf,
struct usb_device *dev = interface_to_usbdev(intf);
char limit = 0;
/* result has to be defined as int for some devices */
- int result = 0;
+ int result = 0, touch_max = 0;
int i = 0, usage = WCM_UNDEFINED, finger = 0, pen = 0;
unsigned char *report;
@@ -351,7 +351,8 @@ static int wacom_parse_hid(struct usb_interface *intf,
if (usage == WCM_DESKTOP) {
if (finger) {
features->device_type = BTN_TOOL_FINGER;
-
+ /* touch device at least supports one touch point */
+ touch_max = 1;
switch (features->type) {
case TABLETPC2FG:
features->pktlen = WACOM_PKGLEN_TPC2FG;
@@ -504,6 +505,8 @@ static int wacom_parse_hid(struct usb_interface *intf,
}
out:
+ if (!features->touch_max && touch_max)
+ features->touch_max = touch_max;
result = 0;
kfree(report);
return result;
@@ -1194,12 +1197,15 @@ static void wacom_wireless_work(struct work_struct *work)
wacom_wac1->features.device_type = BTN_TOOL_PEN;
snprintf(wacom_wac1->name, WACOM_NAME_MAX, "%s (WL) Pen",
wacom_wac1->features.name);
+ wacom_wac1->shared->touch_max = wacom_wac1->features.touch_max;
+ wacom_wac1->shared->type = wacom_wac1->features.type;
error = wacom_register_input(wacom1);
if (error)
goto fail;
/* Touch interface */
- if (wacom_wac1->features.touch_max) {
+ if (wacom_wac1->features.touch_max ||
+ wacom_wac1->features.type == INTUOSHT) {
wacom_wac2->features =
*((struct wacom_features *)id->driver_info);
wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
@@ -1214,6 +1220,10 @@ static void wacom_wireless_work(struct work_struct *work)
error = wacom_register_input(wacom2);
if (error)
goto fail;
+
+ if (wacom_wac1->features.type == INTUOSHT &&
+ wacom_wac1->features.touch_max)
+ wacom_wac->shared->touch_input = wacom_wac2->input;
}
error = wacom_initialize_battery(wacom);
@@ -1322,7 +1332,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
* HID descriptor. If this is the touch interface (wMaxPacketSize
* of WACOM_PKGLEN_BBTOUCH3), override the table values.
*/
- if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
+ if (features->type >= INTUOS5S && features->type <= INTUOSHT) {
if (endpoint->wMaxPacketSize == WACOM_PKGLEN_BBTOUCH3) {
features->device_type = BTN_TOOL_FINGER;
features->pktlen = WACOM_PKGLEN_BBTOUCH3;
@@ -1393,6 +1403,11 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
}
}
+ if (wacom_wac->features.type == INTUOSHT && wacom_wac->features.touch_max) {
+ if (wacom_wac->features.device_type == BTN_TOOL_FINGER)
+ wacom_wac->shared->touch_input = wacom_wac->input;
+ }
+
return 0;
fail5: wacom_destroy_leds(wacom);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 782c2535f1d8..05f371df6c40 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -210,6 +210,62 @@ static int wacom_dtu_irq(struct wacom_wac *wacom)
return 1;
}
+static int wacom_dtus_irq(struct wacom_wac *wacom)
+{
+ char *data = wacom->data;
+ struct input_dev *input = wacom->input;
+ unsigned short prox, pressure = 0;
+
+ if (data[0] != WACOM_REPORT_DTUS && data[0] != WACOM_REPORT_DTUSPAD) {
+ dev_dbg(input->dev.parent,
+ "%s: received unknown report #%d", __func__, data[0]);
+ return 0;
+ } else if (data[0] == WACOM_REPORT_DTUSPAD) {
+ input_report_key(input, BTN_0, (data[1] & 0x01));
+ input_report_key(input, BTN_1, (data[1] & 0x02));
+ input_report_key(input, BTN_2, (data[1] & 0x04));
+ input_report_key(input, BTN_3, (data[1] & 0x08));
+ input_report_abs(input, ABS_MISC,
+ data[1] & 0x0f ? PAD_DEVICE_ID : 0);
+ /*
+ * Serial number is required when expresskeys are
+ * reported through pen interface.
+ */
+ input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
+ return 1;
+ } else {
+ prox = data[1] & 0x80;
+ if (prox) {
+ switch ((data[1] >> 3) & 3) {
+ case 1: /* Rubber */
+ wacom->tool[0] = BTN_TOOL_RUBBER;
+ wacom->id[0] = ERASER_DEVICE_ID;
+ break;
+
+ case 2: /* Pen */
+ wacom->tool[0] = BTN_TOOL_PEN;
+ wacom->id[0] = STYLUS_DEVICE_ID;
+ break;
+ }
+ }
+
+ input_report_key(input, BTN_STYLUS, data[1] & 0x20);
+ input_report_key(input, BTN_STYLUS2, data[1] & 0x40);
+ input_report_abs(input, ABS_X, get_unaligned_be16(&data[3]));
+ input_report_abs(input, ABS_Y, get_unaligned_be16(&data[5]));
+ pressure = ((data[1] & 0x03) << 8) | (data[2] & 0xff);
+ input_report_abs(input, ABS_PRESSURE, pressure);
+ input_report_key(input, BTN_TOUCH, pressure > 10);
+
+ if (!prox) /* out-prox */
+ wacom->id[0] = 0;
+ input_report_key(input, wacom->tool[0], prox);
+ input_report_abs(input, ABS_MISC, wacom->id[0]);
+ input_event(input, EV_MSC, MSC_SERIAL, 1);
+ return 1;
+ }
+}
+
static int wacom_graphire_irq(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
@@ -331,7 +387,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
/* Enter report */
if ((data[1] & 0xfc) == 0xc0) {
- if (features->quirks == WACOM_QUIRK_MULTI_INPUT)
+ if (features->quirks & WACOM_QUIRK_MULTI_INPUT)
wacom->shared->stylus_in_proximity = true;
/* serial number of the tool */
@@ -436,7 +492,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
/* Exit report */
if ((data[1] & 0xfe) == 0x80) {
- if (features->quirks == WACOM_QUIRK_MULTI_INPUT)
+ if (features->quirks & WACOM_QUIRK_MULTI_INPUT)
wacom->shared->stylus_in_proximity = false;
/*
@@ -1151,8 +1207,8 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
int width, height;
if (features->type >= INTUOSPS && features->type <= INTUOSPL) {
- width = data[5];
- height = data[6];
+ width = data[5] * 100;
+ height = data[6] * 100;
} else {
/*
* "a" is a scaled-down area which we assume is
@@ -1176,10 +1232,16 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data)
{
struct input_dev *input = wacom->input;
+ struct wacom_features *features = &wacom->features;
- input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0);
+ if (features->type == INTUOSHT) {
+ input_report_key(input, BTN_LEFT, (data[1] & 0x02) != 0);
+ input_report_key(input, BTN_BACK, (data[1] & 0x08) != 0);
+ } else {
+ input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0);
+ input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0);
+ }
input_report_key(input, BTN_FORWARD, (data[1] & 0x04) != 0);
- input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0);
input_report_key(input, BTN_RIGHT, (data[1] & 0x01) != 0);
}
@@ -1213,13 +1275,23 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
static int wacom_bpt_pen(struct wacom_wac *wacom)
{
+ struct wacom_features *features = &wacom->features;
struct input_dev *input = wacom->input;
unsigned char *data = wacom->data;
int prox = 0, x = 0, y = 0, p = 0, d = 0, pen = 0, btn1 = 0, btn2 = 0;
- if (data[0] != 0x02)
+ if (data[0] != WACOM_REPORT_PENABLED && data[0] != WACOM_REPORT_USB)
return 0;
+ if (data[0] == WACOM_REPORT_USB) {
+ if (features->type == INTUOSHT && features->touch_max) {
+ input_report_switch(wacom->shared->touch_input,
+ SW_MUTE_DEVICE, data[8] & 0x40);
+ input_sync(wacom->shared->touch_input);
+ }
+ return 0;
+ }
+
prox = (data[1] & 0x20) == 0x20;
/*
@@ -1252,8 +1324,8 @@ static int wacom_bpt_pen(struct wacom_wac *wacom)
* touching and applying pressure; do not report negative
* distance.
*/
- if (data[8] <= wacom->features.distance_max)
- d = wacom->features.distance_max - data[8];
+ if (data[8] <= features->distance_max)
+ d = features->distance_max - data[8];
pen = data[1] & 0x01;
btn1 = data[1] & 0x02;
@@ -1297,13 +1369,20 @@ static int wacom_wireless_irq(struct wacom_wac *wacom, size_t len)
unsigned char *data = wacom->data;
int connected;
- if (len != WACOM_PKGLEN_WIRELESS || data[0] != 0x80)
+ if (len != WACOM_PKGLEN_WIRELESS || data[0] != WACOM_REPORT_WL)
return 0;
connected = data[1] & 0x01;
if (connected) {
int pid, battery;
+ if ((wacom->shared->type == INTUOSHT) &&
+ wacom->shared->touch_max) {
+ input_report_switch(wacom->shared->touch_input,
+ SW_MUTE_DEVICE, data[5] & 0x40);
+ input_sync(wacom->shared->touch_input);
+ }
+
pid = get_unaligned_be16(&data[6]);
battery = data[5] & 0x3f;
if (wacom->pid != pid) {
@@ -1348,6 +1427,10 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
sync = wacom_dtu_irq(wacom_wac);
break;
+ case DTUS:
+ sync = wacom_dtus_irq(wacom_wac);
+ break;
+
case INTUOS:
case INTUOS3S:
case INTUOS3:
@@ -1391,6 +1474,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
break;
case BAMBOO_PT:
+ case INTUOSHT:
sync = wacom_bpt_irq(wacom_wac, len);
break;
@@ -1459,7 +1543,7 @@ void wacom_setup_device_quirks(struct wacom_features *features)
/* these device have multiple inputs */
if (features->type >= WIRELESS ||
- (features->type >= INTUOS5S && features->type <= INTUOSPL) ||
+ (features->type >= INTUOS5S && features->type <= INTUOSHT) ||
(features->oVid && features->oPid))
features->quirks |= WACOM_QUIRK_MULTI_INPUT;
@@ -1538,7 +1622,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
wacom_abs_set_axis(input_dev, wacom_wac);
- switch (wacom_wac->features.type) {
+ switch (features->type) {
case WACOM_MO:
input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
/* fall through */
@@ -1749,8 +1833,14 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
/* fall through */
+ case DTUS:
case PL:
case DTU:
+ if (features->type == DTUS) {
+ input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
+ for (i = 0; i < 3; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+ }
__set_bit(BTN_TOOL_PEN, input_dev->keybit);
__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
__set_bit(BTN_STYLUS, input_dev->keybit);
@@ -1771,33 +1861,50 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
break;
+ case INTUOSHT:
+ if (features->touch_max &&
+ features->device_type == BTN_TOOL_FINGER) {
+ input_dev->evbit[0] |= BIT_MASK(EV_SW);
+ __set_bit(SW_MUTE_DEVICE, input_dev->swbit);
+ }
+ /* fall through */
+
case BAMBOO_PT:
__clear_bit(ABS_MISC, input_dev->absbit);
- __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
-
if (features->device_type == BTN_TOOL_FINGER) {
- unsigned int flags = INPUT_MT_POINTER;
__set_bit(BTN_LEFT, input_dev->keybit);
__set_bit(BTN_FORWARD, input_dev->keybit);
__set_bit(BTN_BACK, input_dev->keybit);
__set_bit(BTN_RIGHT, input_dev->keybit);
- if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
- input_set_abs_params(input_dev,
+ if (features->touch_max) {
+ /* touch interface */
+ unsigned int flags = INPUT_MT_POINTER;
+
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+ if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
+ input_set_abs_params(input_dev,
ABS_MT_TOUCH_MAJOR,
0, features->x_max, 0, 0);
- input_set_abs_params(input_dev,
+ input_set_abs_params(input_dev,
ABS_MT_TOUCH_MINOR,
0, features->y_max, 0, 0);
+ } else {
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+ flags = 0;
+ }
+ input_mt_init_slots(input_dev, features->touch_max, flags);
} else {
- __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
- __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
- flags = 0;
+ /* buttons/keys only interface */
+ __clear_bit(ABS_X, input_dev->absbit);
+ __clear_bit(ABS_Y, input_dev->absbit);
+ __clear_bit(BTN_TOUCH, input_dev->keybit);
}
- input_mt_init_slots(input_dev, features->touch_max, flags);
} else if (features->device_type == BTN_TOOL_PEN) {
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
__set_bit(BTN_TOOL_PEN, input_dev->keybit);
__set_bit(BTN_STYLUS, input_dev->keybit);
@@ -2055,6 +2162,9 @@ static const struct wacom_features wacom_features_0xCE =
static const struct wacom_features wacom_features_0xF0 =
{ "Wacom DTU1631", WACOM_PKGLEN_GRAPHIRE, 34623, 19553, 511,
0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xFB =
+ { "Wacom DTU1031", WACOM_PKGLEN_DTUS, 22096, 13960, 511,
+ 0, DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x57 =
{ "Wacom DTK2241", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES};
@@ -2200,6 +2310,17 @@ static const struct wacom_features wacom_features_0x300 =
static const struct wacom_features wacom_features_0x301 =
{ "Wacom Bamboo One M", WACOM_PKGLEN_BBPEN, 21648, 13530, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x302 =
+ { "Wacom Intuos PT S", WACOM_PKGLEN_BBPEN, 15200, 9500, 1023,
+ 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 16 };
+static const struct wacom_features wacom_features_0x303 =
+ { "Wacom Intuos PT M", WACOM_PKGLEN_BBPEN, 21600, 13500, 1023,
+ 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 16 };
+static const struct wacom_features wacom_features_0x30E =
+ { "Wacom Intuos S", WACOM_PKGLEN_BBPEN, 15200, 9500, 1023,
+ 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2337,6 +2458,9 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x10F) },
{ USB_DEVICE_WACOM(0x300) },
{ USB_DEVICE_WACOM(0x301) },
+ { USB_DEVICE_DETAILED(0x302, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_DETAILED(0x303, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_DETAILED(0x30E, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_WACOM(0x304) },
{ USB_DEVICE_DETAILED(0x314, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_DETAILED(0x315, USB_CLASS_HID, 0, 0) },
@@ -2347,6 +2471,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xF8) },
{ USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_WACOM(0xFA) },
+ { USB_DEVICE_WACOM(0xFB) },
{ USB_DEVICE_WACOM(0x0307) },
{ USB_DEVICE_DETAILED(0x0309, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_LENOVO(0x6004) },
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index fd23a3790605..f69c0ebe7fa9 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -12,7 +12,7 @@
#include <linux/types.h>
/* maximum packet length for USB devices */
-#define WACOM_PKGLEN_MAX 64
+#define WACOM_PKGLEN_MAX 68
#define WACOM_NAME_MAX 64
@@ -29,6 +29,7 @@
#define WACOM_PKGLEN_WIRELESS 32
#define WACOM_PKGLEN_MTOUCH 62
#define WACOM_PKGLEN_MTTPC 40
+#define WACOM_PKGLEN_DTUS 68
/* wacom data size per MT contact */
#define WACOM_BYTES_PER_MT_PACKET 11
@@ -47,13 +48,17 @@
#define WACOM_REPORT_INTUOSWRITE 6
#define WACOM_REPORT_INTUOSPAD 12
#define WACOM_REPORT_INTUOS5PAD 3
+#define WACOM_REPORT_DTUSPAD 21
#define WACOM_REPORT_TPC1FG 6
#define WACOM_REPORT_TPC2FG 13
#define WACOM_REPORT_TPCMT 13
#define WACOM_REPORT_TPCHID 15
#define WACOM_REPORT_TPCST 16
+#define WACOM_REPORT_DTUS 17
#define WACOM_REPORT_TPC1FGE 18
#define WACOM_REPORT_24HDT 1
+#define WACOM_REPORT_WL 128
+#define WACOM_REPORT_USB 192
/* device quirks */
#define WACOM_QUIRK_MULTI_INPUT 0x0001
@@ -68,6 +73,7 @@ enum {
PTU,
PL,
DTU,
+ DTUS,
INTUOS,
INTUOS3S,
INTUOS3,
@@ -81,6 +87,7 @@ enum {
INTUOSPS,
INTUOSPM,
INTUOSPL,
+ INTUOSHT,
WACOM_21UX2,
WACOM_22HD,
DTK,
@@ -129,6 +136,10 @@ struct wacom_features {
struct wacom_shared {
bool stylus_in_proximity;
bool touch_down;
+ /* for wireless device to access USB interfaces */
+ unsigned touch_max;
+ int type;
+ struct input_dev *touch_input;
};
struct wacom_wac {
diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c
index f7de14a268bf..544e20c551f8 100644
--- a/drivers/input/touchscreen/88pm860x-ts.c
+++ b/drivers/input/touchscreen/88pm860x-ts.c
@@ -172,7 +172,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
static int pm860x_touch_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
- struct pm860x_touch_pdata *pdata = pdev->dev.platform_data;
+ struct pm860x_touch_pdata *pdata = dev_get_platdata(&pdev->dev);
struct pm860x_touch *touch;
struct i2c_client *i2c = (chip->id == CHIP_PM8607) ? chip->client \
: chip->companion;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 961d58d32647..07e9e82029d1 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -717,7 +717,7 @@ config TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_MC13783
tristate "Freescale MC13783 touchscreen input driver"
- depends on MFD_MC13783
+ depends on MFD_MC13XXX
help
Say Y here if you have an Freescale MC13783 PMIC on your
board and want to use its touchscreen
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index 69834dd3c313..6793c85903ae 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -37,7 +37,6 @@
#include <linux/device.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/interrupt.h>
@@ -686,7 +685,7 @@ static int ad7877_probe(struct spi_device *spi)
{
struct ad7877 *ts;
struct input_dev *input_dev;
- struct ad7877_platform_data *pdata = spi->dev.platform_data;
+ struct ad7877_platform_data *pdata = dev_get_platdata(&spi->dev);
int err;
u16 verify;
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index facd3057b62d..fce590677b7b 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -22,7 +22,6 @@
*/
#include <linux/device.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/interrupt.h>
@@ -470,7 +469,7 @@ static int ad7879_gpio_add(struct ad7879 *ts,
static void ad7879_gpio_remove(struct ad7879 *ts)
{
- const struct ad7879_platform_data *pdata = ts->dev->platform_data;
+ const struct ad7879_platform_data *pdata = dev_get_platdata(ts->dev);
int ret;
if (pdata->gpio_export) {
@@ -495,7 +494,7 @@ static inline void ad7879_gpio_remove(struct ad7879 *ts)
struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
const struct ad7879_bus_ops *bops)
{
- struct ad7879_platform_data *pdata = dev->platform_data;
+ struct ad7879_platform_data *pdata = dev_get_platdata(dev);
struct ad7879 *ts;
struct input_dev *input_dev;
int err;
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index ea195360747e..45a06e495ed2 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -19,7 +19,6 @@
*/
#include <linux/types.h>
#include <linux/hwmon.h>
-#include <linux/init.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/delay.h>
@@ -101,8 +100,7 @@ struct ads7846 {
struct spi_device *spi;
struct regulator *reg;
-#if defined(CONFIG_HWMON) || defined(CONFIG_HWMON_MODULE)
- struct attribute_group *attr_group;
+#if IS_ENABLED(CONFIG_HWMON)
struct device *hwmon;
#endif
@@ -421,7 +419,7 @@ static int ads7845_read12_ser(struct device *dev, unsigned command)
return status;
}
-#if defined(CONFIG_HWMON) || defined(CONFIG_HWMON_MODULE)
+#if IS_ENABLED(CONFIG_HWMON)
#define SHOW(name, var, adjust) static ssize_t \
name ## _show(struct device *dev, struct device_attribute *attr, char *buf) \
@@ -479,42 +477,36 @@ static inline unsigned vbatt_adjust(struct ads7846 *ts, ssize_t v)
SHOW(in0_input, vaux, vaux_adjust)
SHOW(in1_input, vbatt, vbatt_adjust)
-static struct attribute *ads7846_attributes[] = {
- &dev_attr_temp0.attr,
- &dev_attr_temp1.attr,
- &dev_attr_in0_input.attr,
- &dev_attr_in1_input.attr,
- NULL,
-};
-
-static struct attribute_group ads7846_attr_group = {
- .attrs = ads7846_attributes,
-};
+static umode_t ads7846_is_visible(struct kobject *kobj, struct attribute *attr,
+ int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct ads7846 *ts = dev_get_drvdata(dev);
-static struct attribute *ads7843_attributes[] = {
- &dev_attr_in0_input.attr,
- &dev_attr_in1_input.attr,
- NULL,
-};
+ if (ts->model == 7843 && index < 2) /* in0, in1 */
+ return 0;
+ if (ts->model == 7845 && index != 2) /* in0 */
+ return 0;
-static struct attribute_group ads7843_attr_group = {
- .attrs = ads7843_attributes,
-};
+ return attr->mode;
+}
-static struct attribute *ads7845_attributes[] = {
- &dev_attr_in0_input.attr,
+static struct attribute *ads7846_attributes[] = {
+ &dev_attr_temp0.attr, /* 0 */
+ &dev_attr_temp1.attr, /* 1 */
+ &dev_attr_in0_input.attr, /* 2 */
+ &dev_attr_in1_input.attr, /* 3 */
NULL,
};
-static struct attribute_group ads7845_attr_group = {
- .attrs = ads7845_attributes,
+static struct attribute_group ads7846_attr_group = {
+ .attrs = ads7846_attributes,
+ .is_visible = ads7846_is_visible,
};
+__ATTRIBUTE_GROUPS(ads7846_attr);
static int ads784x_hwmon_register(struct spi_device *spi, struct ads7846 *ts)
{
- struct device *hwmon;
- int err;
-
/* hwmon sensors need a reference voltage */
switch (ts->model) {
case 7846:
@@ -535,43 +527,19 @@ static int ads784x_hwmon_register(struct spi_device *spi, struct ads7846 *ts)
break;
}
- /* different chips have different sensor groups */
- switch (ts->model) {
- case 7846:
- ts->attr_group = &ads7846_attr_group;
- break;
- case 7845:
- ts->attr_group = &ads7845_attr_group;
- break;
- case 7843:
- ts->attr_group = &ads7843_attr_group;
- break;
- default:
- dev_dbg(&spi->dev, "ADS%d not recognized\n", ts->model);
- return 0;
- }
-
- err = sysfs_create_group(&spi->dev.kobj, ts->attr_group);
- if (err)
- return err;
-
- hwmon = hwmon_device_register(&spi->dev);
- if (IS_ERR(hwmon)) {
- sysfs_remove_group(&spi->dev.kobj, ts->attr_group);
- return PTR_ERR(hwmon);
- }
+ ts->hwmon = hwmon_device_register_with_groups(&spi->dev, spi->modalias,
+ ts, ads7846_attr_groups);
+ if (IS_ERR(ts->hwmon))
+ return PTR_ERR(ts->hwmon);
- ts->hwmon = hwmon;
return 0;
}
static void ads784x_hwmon_unregister(struct spi_device *spi,
struct ads7846 *ts)
{
- if (ts->hwmon) {
- sysfs_remove_group(&spi->dev.kobj, ts->attr_group);
+ if (ts->hwmon)
hwmon_device_unregister(ts->hwmon);
- }
}
#else
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 59aa24002c7b..a70400754e92 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
@@ -1130,7 +1129,7 @@ static void mxt_input_close(struct input_dev *dev)
static int mxt_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct mxt_platform_data *pdata = client->dev.platform_data;
+ const struct mxt_platform_data *pdata = dev_get_platdata(&client->dev);
struct mxt_data *data;
struct input_dev *input_dev;
int error;
diff --git a/drivers/input/touchscreen/atmel_tsadcc.c b/drivers/input/touchscreen/atmel_tsadcc.c
index bddabc595077..a7c9d6967d1e 100644
--- a/drivers/input/touchscreen/atmel_tsadcc.c
+++ b/drivers/input/touchscreen/atmel_tsadcc.c
@@ -12,7 +12,6 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -182,7 +181,7 @@ static int atmel_tsadcc_probe(struct platform_device *pdev)
struct atmel_tsadcc *ts_dev;
struct input_dev *input_dev;
struct resource *res;
- struct at91_tsadcc_data *pdata = pdev->dev.platform_data;
+ struct at91_tsadcc_data *pdata = dev_get_platdata(&pdev->dev);
int err;
unsigned int prsc;
unsigned int reg;
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index 8c651985a5c4..5bf1aeeea825 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -178,7 +178,7 @@ static irqreturn_t cy8ctmg110_irq_thread(int irq, void *dev_id)
static int cy8ctmg110_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct cy8ctmg110_pdata *pdata = client->dev.platform_data;
+ const struct cy8ctmg110_pdata *pdata = dev_get_platdata(&client->dev);
struct cy8ctmg110 *ts;
struct input_dev *input_dev;
int err;
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index 4204841cdc49..eee656f77a2e 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -534,7 +534,7 @@ static void cyttsp_close(struct input_dev *dev)
struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
struct device *dev, int irq, size_t xfer_buf_size)
{
- const struct cyttsp_platform_data *pdata = dev->platform_data;
+ const struct cyttsp_platform_data *pdata = dev_get_platdata(dev);
struct cyttsp *ts;
struct input_dev *input_dev;
int error;
@@ -553,7 +553,7 @@ struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
ts->dev = dev;
ts->input = input_dev;
- ts->pdata = dev->platform_data;
+ ts->pdata = dev_get_platdata(dev);
ts->bus_ops = bus_ops;
ts->irq = irq;
diff --git a/drivers/input/touchscreen/cyttsp_i2c_common.c b/drivers/input/touchscreen/cyttsp_i2c_common.c
index 1d7b6f154168..ccefa56ca212 100644
--- a/drivers/input/touchscreen/cyttsp_i2c_common.c
+++ b/drivers/input/touchscreen/cyttsp_i2c_common.c
@@ -31,6 +31,8 @@
#include <linux/module.h>
#include <linux/types.h>
+#include "cyttsp4_core.h"
+
int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf,
u16 addr, u8 length, void *values)
{
diff --git a/drivers/input/touchscreen/da9034-ts.c b/drivers/input/touchscreen/da9034-ts.c
index 34ad84105e6e..8ccf7bb4028a 100644
--- a/drivers/input/touchscreen/da9034-ts.c
+++ b/drivers/input/touchscreen/da9034-ts.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/input.h>
@@ -299,7 +298,7 @@ static void da9034_touch_close(struct input_dev *dev)
static int da9034_touch_probe(struct platform_device *pdev)
{
- struct da9034_touch_pdata *pdata = pdev->dev.platform_data;
+ struct da9034_touch_pdata *pdata = dev_get_platdata(&pdev->dev);
struct da9034_touch *touch;
struct input_dev *input_dev;
int ret;
diff --git a/drivers/input/touchscreen/dynapro.c b/drivers/input/touchscreen/dynapro.c
index 1809677a6513..86237a910876 100644
--- a/drivers/input/touchscreen/dynapro.c
+++ b/drivers/input/touchscreen/dynapro.c
@@ -24,7 +24,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Dynapro serial touchscreen driver"
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 83fa1b15a97f..412a85ec9ba5 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -623,8 +623,9 @@ static int edt_ft5x06_ts_reset(struct i2c_client *client,
if (gpio_is_valid(reset_pin)) {
/* this pulls reset down, enabling the low active reset */
- error = gpio_request_one(reset_pin, GPIOF_OUT_INIT_LOW,
- "edt-ft5x06 reset");
+ error = devm_gpio_request_one(&client->dev, reset_pin,
+ GPIOF_OUT_INIT_LOW,
+ "edt-ft5x06 reset");
if (error) {
dev_err(&client->dev,
"Failed to request GPIO %d as reset pin, error %d\n",
@@ -705,7 +706,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct edt_ft5x06_platform_data *pdata =
- client->dev.platform_data;
+ dev_get_platdata(&client->dev);
struct edt_ft5x06_ts_data *tsdata;
struct input_dev *input;
int error;
@@ -723,8 +724,8 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
return error;
if (gpio_is_valid(pdata->irq_pin)) {
- error = gpio_request_one(pdata->irq_pin,
- GPIOF_IN, "edt-ft5x06 irq");
+ error = devm_gpio_request_one(&client->dev, pdata->irq_pin,
+ GPIOF_IN, "edt-ft5x06 irq");
if (error) {
dev_err(&client->dev,
"Failed to request GPIO %d, error %d\n",
@@ -733,12 +734,16 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
}
}
- tsdata = kzalloc(sizeof(*tsdata), GFP_KERNEL);
- input = input_allocate_device();
- if (!tsdata || !input) {
+ tsdata = devm_kzalloc(&client->dev, sizeof(*tsdata), GFP_KERNEL);
+ if (!tsdata) {
dev_err(&client->dev, "failed to allocate driver data.\n");
- error = -ENOMEM;
- goto err_free_mem;
+ return -ENOMEM;
+ }
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input) {
+ dev_err(&client->dev, "failed to allocate input device.\n");
+ return -ENOMEM;
}
mutex_init(&tsdata->mutex);
@@ -749,7 +754,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
error = edt_ft5x06_ts_identify(client, tsdata->name, fw_version);
if (error) {
dev_err(&client->dev, "touchscreen probe failed\n");
- goto err_free_mem;
+ return error;
}
edt_ft5x06_ts_get_defaults(tsdata, pdata);
@@ -776,27 +781,30 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
error = input_mt_init_slots(input, MAX_SUPPORT_POINTS, 0);
if (error) {
dev_err(&client->dev, "Unable to init MT slots.\n");
- goto err_free_mem;
+ return error;
}
input_set_drvdata(input, tsdata);
i2c_set_clientdata(client, tsdata);
- error = request_threaded_irq(client->irq, NULL, edt_ft5x06_ts_isr,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- client->name, tsdata);
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, edt_ft5x06_ts_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->name, tsdata);
if (error) {
dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
- goto err_free_mem;
+ return error;
}
error = sysfs_create_group(&client->dev.kobj, &edt_ft5x06_attr_group);
if (error)
- goto err_free_irq;
+ return error;
error = input_register_device(input);
- if (error)
- goto err_remove_attrs;
+ if (error) {
+ sysfs_remove_group(&client->dev.kobj, &edt_ft5x06_attr_group);
+ return error;
+ }
edt_ft5x06_ts_prepare_debugfs(tsdata, dev_driver_string(&client->dev));
device_init_wakeup(&client->dev, 1);
@@ -806,40 +814,15 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
pdata->irq_pin, pdata->reset_pin);
return 0;
-
-err_remove_attrs:
- sysfs_remove_group(&client->dev.kobj, &edt_ft5x06_attr_group);
-err_free_irq:
- free_irq(client->irq, tsdata);
-err_free_mem:
- input_free_device(input);
- kfree(tsdata);
-
- if (gpio_is_valid(pdata->irq_pin))
- gpio_free(pdata->irq_pin);
-
- return error;
}
static int edt_ft5x06_ts_remove(struct i2c_client *client)
{
- const struct edt_ft5x06_platform_data *pdata =
- dev_get_platdata(&client->dev);
struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
edt_ft5x06_ts_teardown_debugfs(tsdata);
sysfs_remove_group(&client->dev.kobj, &edt_ft5x06_attr_group);
- free_irq(client->irq, tsdata);
- input_unregister_device(tsdata->input);
-
- if (gpio_is_valid(pdata->irq_pin))
- gpio_free(pdata->irq_pin);
- if (gpio_is_valid(pdata->reset_pin))
- gpio_free(pdata->reset_pin);
-
- kfree(tsdata);
-
return 0;
}
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 1ce3d29ffca5..b1884ddd7a84 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -157,7 +157,7 @@ static void eeti_ts_close(struct input_dev *dev)
static int eeti_ts_probe(struct i2c_client *client,
const struct i2c_device_id *idp)
{
- struct eeti_ts_platform_data *pdata = client->dev.platform_data;
+ struct eeti_ts_platform_data *pdata = dev_get_platdata(&client->dev);
struct eeti_ts_priv *priv;
struct input_dev *input;
unsigned int irq_flags;
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index 054d22583248..e6bcb13680b2 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -18,7 +18,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/input.h>
diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
index 957423d1471d..8051a4b704ea 100644
--- a/drivers/input/touchscreen/elo.c
+++ b/drivers/input/touchscreen/elo.c
@@ -22,7 +22,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#include <linux/ctype.h>
#define DRIVER_DESC "Elo serial touchscreen driver"
diff --git a/drivers/input/touchscreen/fujitsu_ts.c b/drivers/input/touchscreen/fujitsu_ts.c
index 10794ddbdf58..d0e46a7e183b 100644
--- a/drivers/input/touchscreen/fujitsu_ts.c
+++ b/drivers/input/touchscreen/fujitsu_ts.c
@@ -16,7 +16,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Fujitsu serial touchscreen driver"
diff --git a/drivers/input/touchscreen/gunze.c b/drivers/input/touchscreen/gunze.c
index 41c71766bf18..e2ee62615273 100644
--- a/drivers/input/touchscreen/gunze.c
+++ b/drivers/input/touchscreen/gunze.c
@@ -32,7 +32,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Gunze AHL-51S touchscreen driver"
diff --git a/drivers/input/touchscreen/hampshire.c b/drivers/input/touchscreen/hampshire.c
index 0cc47ea98acf..ecb1e0e01328 100644
--- a/drivers/input/touchscreen/hampshire.c
+++ b/drivers/input/touchscreen/hampshire.c
@@ -23,7 +23,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Hampshire serial touchscreen driver"
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 1418bdda61bb..2a5089139818 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -184,7 +184,7 @@ static int ili210x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
- const struct ili210x_platform_data *pdata = dev->platform_data;
+ const struct ili210x_platform_data *pdata = dev_get_platdata(dev);
struct ili210x *priv;
struct input_dev *input;
struct panel_info panel;
diff --git a/drivers/input/touchscreen/inexio.c b/drivers/input/touchscreen/inexio.c
index a29c99c32245..adb80b65a259 100644
--- a/drivers/input/touchscreen/inexio.c
+++ b/drivers/input/touchscreen/inexio.c
@@ -23,7 +23,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "iNexio serial touchscreen driver"
diff --git a/drivers/input/touchscreen/intel-mid-touch.c b/drivers/input/touchscreen/intel-mid-touch.c
index e30d837dae2f..4f6b156144e9 100644
--- a/drivers/input/touchscreen/intel-mid-touch.c
+++ b/drivers/input/touchscreen/intel-mid-touch.c
@@ -27,7 +27,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/err.h>
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c
index e463a79ffecc..7324c5c0fb86 100644
--- a/drivers/input/touchscreen/jornada720_ts.c
+++ b/drivers/input/touchscreen/jornada720_ts.c
@@ -14,7 +14,6 @@
*/
#include <linux/platform_device.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
diff --git a/drivers/input/touchscreen/lpc32xx_ts.c b/drivers/input/touchscreen/lpc32xx_ts.c
index 9101ee529c92..2058253b55d9 100644
--- a/drivers/input/touchscreen/lpc32xx_ts.c
+++ b/drivers/input/touchscreen/lpc32xx_ts.c
@@ -15,7 +15,6 @@
*/
#include <linux/platform_device.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
index 7d2b2136e5ad..0786010d7ed0 100644
--- a/drivers/input/touchscreen/mainstone-wm97xx.c
+++ b/drivers/input/touchscreen/mainstone-wm97xx.c
@@ -25,7 +25,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c
index 9f84fcd08732..a68ec142ee9a 100644
--- a/drivers/input/touchscreen/max11801_ts.c
+++ b/drivers/input/touchscreen/max11801_ts.c
@@ -33,7 +33,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/input.h>
diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c
index f9f4e0c56eda..647e36f5930e 100644
--- a/drivers/input/touchscreen/mcs5000_ts.c
+++ b/drivers/input/touchscreen/mcs5000_ts.c
@@ -14,7 +14,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/i2c/mcs.h>
#include <linux/interrupt.h>
@@ -194,7 +193,7 @@ static int mcs5000_ts_probe(struct i2c_client *client,
struct input_dev *input_dev;
int ret;
- if (!client->dev.platform_data)
+ if (!dev_get_platdata(&client->dev))
return -EINVAL;
data = kzalloc(sizeof(struct mcs5000_ts_data), GFP_KERNEL);
@@ -207,7 +206,7 @@ static int mcs5000_ts_probe(struct i2c_client *client,
data->client = client;
data->input_dev = input_dev;
- data->platform_data = client->dev.platform_data;
+ data->platform_data = dev_get_platdata(&client->dev);
input_dev->name = "MELPAS MCS-5000 Touchscreen";
input_dev->id.bustype = BUS_I2C;
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 1443532fe6c4..8a598c065391 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -8,7 +8,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/i2c.h>
diff --git a/drivers/input/touchscreen/mtouch.c b/drivers/input/touchscreen/mtouch.c
index eb66b7c37c2f..9b5552a26169 100644
--- a/drivers/input/touchscreen/mtouch.c
+++ b/drivers/input/touchscreen/mtouch.c
@@ -21,7 +21,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "MicroTouch serial touchscreen driver"
diff --git a/drivers/input/touchscreen/pcap_ts.c b/drivers/input/touchscreen/pcap_ts.c
index f22e04dd4e16..cff2376817e5 100644
--- a/drivers/input/touchscreen/pcap_ts.c
+++ b/drivers/input/touchscreen/pcap_ts.c
@@ -11,7 +11,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/slab.h>
diff --git a/drivers/input/touchscreen/penmount.c b/drivers/input/touchscreen/penmount.c
index b49f0b836925..417d87379265 100644
--- a/drivers/input/touchscreen/penmount.c
+++ b/drivers/input/touchscreen/penmount.c
@@ -21,7 +21,6 @@
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "PenMount serial touchscreen driver"
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 6cc6b36663ff..02392d2061d6 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -128,7 +128,8 @@ static SIMPLE_DEV_PM_OPS(pixcir_dev_pm_ops,
static int pixcir_i2c_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct pixcir_ts_platform_data *pdata = client->dev.platform_data;
+ const struct pixcir_ts_platform_data *pdata =
+ dev_get_platdata(&client->dev);
struct pixcir_i2c_ts_data *tsdata;
struct input_dev *input;
int error;
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index b061af2c8376..19cb247dbb86 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -28,7 +28,6 @@
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/input.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
@@ -251,7 +250,7 @@ static int s3c2410ts_probe(struct platform_device *pdev)
ts.dev = dev;
- info = pdev->dev.platform_data;
+ info = dev_get_platdata(&pdev->dev);
if (!info) {
dev_err(dev, "no platform data, cannot attach\n");
return -EINVAL;
@@ -392,7 +391,7 @@ static int s3c2410ts_suspend(struct device *dev)
static int s3c2410ts_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
- struct s3c2410_ts_mach_info *info = pdev->dev.platform_data;
+ struct s3c2410_ts_mach_info *info = dev_get_platdata(&pdev->dev);
clk_enable(ts.clock);
enable_irq(ts.irq_tc);
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 2f03b2f289dd..5c342b3139e8 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -154,7 +154,7 @@ static int st1232_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct st1232_ts_data *ts;
- struct st1232_pdata *pdata = client->dev.platform_data;
+ struct st1232_pdata *pdata = dev_get_platdata(&client->dev);
struct input_dev *input_dev;
int error;
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index 59e81b00f244..42ce31afa259 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 68beadaabceb..4e793a17361f 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -14,7 +14,6 @@
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -198,7 +197,7 @@ static void titsc_step_config(struct titsc *ts_dev)
/* The steps1 … end and bit 0 for TS_Charge */
stepenable = (1 << (end_step + 2)) - 1;
ts_dev->step_mask = stepenable;
- am335x_tsc_se_set(ts_dev->mfd_tscadc, ts_dev->step_mask);
+ am335x_tsc_se_set_cache(ts_dev->mfd_tscadc, ts_dev->step_mask);
}
static void titsc_read_coordinates(struct titsc *ts_dev,
@@ -322,7 +321,7 @@ static irqreturn_t titsc_irq(int irq, void *dev)
if (irqclr) {
titsc_writel(ts_dev, REG_IRQSTATUS, irqclr);
- am335x_tsc_se_set(ts_dev->mfd_tscadc, ts_dev->step_mask);
+ am335x_tsc_se_set_cache(ts_dev->mfd_tscadc, ts_dev->step_mask);
return IRQ_HANDLED;
}
return IRQ_NONE;
diff --git a/drivers/input/touchscreen/touchit213.c b/drivers/input/touchscreen/touchit213.c
index 5f29e5b8e1c1..c27cf8f3d1ca 100644
--- a/drivers/input/touchscreen/touchit213.c
+++ b/drivers/input/touchscreen/touchit213.c
@@ -21,7 +21,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Sahara TouchIT-213 serial touchscreen driver"
diff --git a/drivers/input/touchscreen/touchright.c b/drivers/input/touchscreen/touchright.c
index 8a2887daf194..4000e5205407 100644
--- a/drivers/input/touchscreen/touchright.c
+++ b/drivers/input/touchscreen/touchright.c
@@ -20,7 +20,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Touchright serial touchscreen driver"
diff --git a/drivers/input/touchscreen/touchwin.c b/drivers/input/touchscreen/touchwin.c
index 588cdcb839dd..ba90f447df75 100644
--- a/drivers/input/touchscreen/touchwin.c
+++ b/drivers/input/touchscreen/touchwin.c
@@ -27,7 +27,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Touchwindow serial touchscreen driver"
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 811353353917..550adcbbfc23 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -571,7 +571,7 @@ static void tsc2005_setup_spi_xfer(struct tsc2005 *ts)
static int tsc2005_probe(struct spi_device *spi)
{
- const struct tsc2005_platform_data *pdata = spi->dev.platform_data;
+ const struct tsc2005_platform_data *pdata = dev_get_platdata(&spi->dev);
struct tsc2005 *ts;
struct input_dev *input_dev;
unsigned int max_x, max_y, max_p;
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index 0b67ba476b4c..1bf9906b5a3f 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -26,6 +26,9 @@
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/i2c/tsc2007.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#define TSC2007_MEASURE_TEMP0 (0x0 << 4)
#define TSC2007_MEASURE_AUX (0x2 << 4)
@@ -72,15 +75,18 @@ struct tsc2007 {
u16 model;
u16 x_plate_ohms;
u16 max_rt;
- unsigned long poll_delay;
unsigned long poll_period;
+ int fuzzx;
+ int fuzzy;
+ int fuzzz;
+ unsigned gpio;
int irq;
wait_queue_head_t wait;
bool stopped;
- int (*get_pendown_state)(void);
+ int (*get_pendown_state)(struct device *);
void (*clear_penirq)(void);
};
@@ -161,7 +167,7 @@ static bool tsc2007_is_pen_down(struct tsc2007 *ts)
if (!ts->get_pendown_state)
return true;
- return ts->get_pendown_state();
+ return ts->get_pendown_state(&ts->client->dev);
}
static irqreturn_t tsc2007_soft_irq(int irq, void *handle)
@@ -178,7 +184,7 @@ static irqreturn_t tsc2007_soft_irq(int irq, void *handle)
rt = tsc2007_calculate_pressure(ts, &tc);
- if (rt == 0 && !ts->get_pendown_state) {
+ if (!rt && !ts->get_pendown_state) {
/*
* If pressure reported is 0 and we don't have
* callback to check pendown state, we have to
@@ -228,7 +234,7 @@ static irqreturn_t tsc2007_hard_irq(int irq, void *handle)
{
struct tsc2007 *ts = handle;
- if (!ts->get_pendown_state || likely(ts->get_pendown_state()))
+ if (tsc2007_is_pen_down(ts))
return IRQ_WAKE_THREAD;
if (ts->clear_penirq)
@@ -273,49 +279,134 @@ static void tsc2007_close(struct input_dev *input_dev)
tsc2007_stop(ts);
}
-static int tsc2007_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+#ifdef CONFIG_OF
+static int tsc2007_get_pendown_state_gpio(struct device *dev)
{
- struct tsc2007 *ts;
- struct tsc2007_platform_data *pdata = client->dev.platform_data;
- struct input_dev *input_dev;
- int err;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tsc2007 *ts = i2c_get_clientdata(client);
+
+ return !gpio_get_value(ts->gpio);
+}
+
+static int tsc2007_probe_dt(struct i2c_client *client, struct tsc2007 *ts)
+{
+ struct device_node *np = client->dev.of_node;
+ u32 val32;
+ u64 val64;
- if (!pdata) {
- dev_err(&client->dev, "platform data is required!\n");
+ if (!np) {
+ dev_err(&client->dev, "missing device tree data\n");
return -EINVAL;
}
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_READ_WORD_DATA))
- return -EIO;
+ if (!of_property_read_u32(np, "ti,max-rt", &val32))
+ ts->max_rt = val32;
+ else
+ ts->max_rt = MAX_12BIT;
+
+ if (!of_property_read_u32(np, "ti,fuzzx", &val32))
+ ts->fuzzx = val32;
+
+ if (!of_property_read_u32(np, "ti,fuzzy", &val32))
+ ts->fuzzy = val32;
+
+ if (!of_property_read_u32(np, "ti,fuzzz", &val32))
+ ts->fuzzz = val32;
+
+ if (!of_property_read_u64(np, "ti,poll-period", &val64))
+ ts->poll_period = val64;
+ else
+ ts->poll_period = 1;
- ts = kzalloc(sizeof(struct tsc2007), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ts || !input_dev) {
- err = -ENOMEM;
- goto err_free_mem;
+ if (!of_property_read_u32(np, "ti,x-plate-ohms", &val32)) {
+ ts->x_plate_ohms = val32;
+ } else {
+ dev_err(&client->dev, "missing ti,x-plate-ohms devicetree property.");
+ return -EINVAL;
}
- ts->client = client;
- ts->irq = client->irq;
- ts->input = input_dev;
- init_waitqueue_head(&ts->wait);
+ ts->gpio = of_get_gpio(np, 0);
+ if (gpio_is_valid(ts->gpio))
+ ts->get_pendown_state = tsc2007_get_pendown_state_gpio;
+ else
+ dev_warn(&client->dev,
+ "GPIO not specified in DT (of_get_gpio returned %d)\n",
+ ts->gpio);
+
+ return 0;
+}
+#else
+static int tsc2007_probe_dt(struct i2c_client *client, struct tsc2007 *ts)
+{
+ dev_err(&client->dev, "platform data is required!\n");
+ return -EINVAL;
+}
+#endif
+static int tsc2007_probe_pdev(struct i2c_client *client, struct tsc2007 *ts,
+ const struct tsc2007_platform_data *pdata,
+ const struct i2c_device_id *id)
+{
ts->model = pdata->model;
ts->x_plate_ohms = pdata->x_plate_ohms;
ts->max_rt = pdata->max_rt ? : MAX_12BIT;
- ts->poll_delay = pdata->poll_delay ? : 1;
ts->poll_period = pdata->poll_period ? : 1;
ts->get_pendown_state = pdata->get_pendown_state;
ts->clear_penirq = pdata->clear_penirq;
+ ts->fuzzx = pdata->fuzzx;
+ ts->fuzzy = pdata->fuzzy;
+ ts->fuzzz = pdata->fuzzz;
if (pdata->x_plate_ohms == 0) {
dev_err(&client->dev, "x_plate_ohms is not set up in platform data");
- err = -EINVAL;
- goto err_free_mem;
+ return -EINVAL;
}
+ return 0;
+}
+
+static void tsc2007_call_exit_platform_hw(void *data)
+{
+ struct device *dev = data;
+ const struct tsc2007_platform_data *pdata = dev_get_platdata(dev);
+
+ pdata->exit_platform_hw();
+}
+
+static int tsc2007_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct tsc2007_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct tsc2007 *ts;
+ struct input_dev *input_dev;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_WORD_DATA))
+ return -EIO;
+
+ ts = devm_kzalloc(&client->dev, sizeof(struct tsc2007), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ if (pdata)
+ err = tsc2007_probe_pdev(client, ts, pdata, id);
+ else
+ err = tsc2007_probe_dt(client, ts);
+ if (err)
+ return err;
+
+ input_dev = devm_input_allocate_device(&client->dev);
+ if (!input_dev)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, ts);
+
+ ts->client = client;
+ ts->irq = client->irq;
+ ts->input = input_dev;
+ init_waitqueue_head(&ts->wait);
+
snprintf(ts->phys, sizeof(ts->phys),
"%s/input0", dev_name(&client->dev));
@@ -331,53 +422,46 @@ static int tsc2007_probe(struct i2c_client *client,
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
- input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, pdata->fuzzx, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, pdata->fuzzy, 0);
+ input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, ts->fuzzx, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, ts->fuzzy, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT,
- pdata->fuzzz, 0);
+ ts->fuzzz, 0);
+
+ if (pdata) {
+ if (pdata->exit_platform_hw) {
+ err = devm_add_action(&client->dev,
+ tsc2007_call_exit_platform_hw,
+ &client->dev);
+ if (err) {
+ dev_err(&client->dev,
+ "Failed to register exit_platform_hw action, %d\n",
+ err);
+ return err;
+ }
+ }
- if (pdata->init_platform_hw)
- pdata->init_platform_hw();
+ if (pdata->init_platform_hw)
+ pdata->init_platform_hw();
+ }
- err = request_threaded_irq(ts->irq, tsc2007_hard_irq, tsc2007_soft_irq,
- IRQF_ONESHOT, client->dev.driver->name, ts);
- if (err < 0) {
- dev_err(&client->dev, "irq %d busy?\n", ts->irq);
- goto err_free_mem;
+ err = devm_request_threaded_irq(&client->dev, ts->irq,
+ tsc2007_hard_irq, tsc2007_soft_irq,
+ IRQF_ONESHOT,
+ client->dev.driver->name, ts);
+ if (err) {
+ dev_err(&client->dev, "Failed to request irq %d: %d\n",
+ ts->irq, err);
+ return err;
}
tsc2007_stop(ts);
err = input_register_device(input_dev);
- if (err)
- goto err_free_irq;
-
- i2c_set_clientdata(client, ts);
-
- return 0;
-
- err_free_irq:
- free_irq(ts->irq, ts);
- if (pdata->exit_platform_hw)
- pdata->exit_platform_hw();
- err_free_mem:
- input_free_device(input_dev);
- kfree(ts);
- return err;
-}
-
-static int tsc2007_remove(struct i2c_client *client)
-{
- struct tsc2007 *ts = i2c_get_clientdata(client);
- struct tsc2007_platform_data *pdata = client->dev.platform_data;
-
- free_irq(ts->irq, ts);
-
- if (pdata->exit_platform_hw)
- pdata->exit_platform_hw();
-
- input_unregister_device(ts->input);
- kfree(ts);
+ if (err) {
+ dev_err(&client->dev,
+ "Failed to register input device: %d\n", err);
+ return err;
+ }
return 0;
}
@@ -389,14 +473,22 @@ static const struct i2c_device_id tsc2007_idtable[] = {
MODULE_DEVICE_TABLE(i2c, tsc2007_idtable);
+#ifdef CONFIG_OF
+static const struct of_device_id tsc2007_of_match[] = {
+ { .compatible = "ti,tsc2007" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tsc2007_of_match);
+#endif
+
static struct i2c_driver tsc2007_driver = {
.driver = {
.owner = THIS_MODULE,
- .name = "tsc2007"
+ .name = "tsc2007",
+ .of_match_table = of_match_ptr(tsc2007_of_match),
},
.id_table = tsc2007_idtable,
.probe = tsc2007_probe,
- .remove = tsc2007_remove,
};
module_i2c_driver(tsc2007_driver);
diff --git a/drivers/input/touchscreen/tsc40.c b/drivers/input/touchscreen/tsc40.c
index eb96f168fb9d..29687872cb94 100644
--- a/drivers/input/touchscreen/tsc40.c
+++ b/drivers/input/touchscreen/tsc40.c
@@ -11,7 +11,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define PACKET_LENGTH 5
struct tsc_ser {
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index 1271f97b4079..b46c55cd1bbb 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -19,7 +19,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/wait.h>
@@ -320,7 +319,7 @@ static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb,
static int ucb1400_ts_probe(struct platform_device *pdev)
{
- struct ucb1400_ts *ucb = pdev->dev.platform_data;
+ struct ucb1400_ts *ucb = dev_get_platdata(&pdev->dev);
int error, x_res, y_res;
u16 fcsr;
@@ -399,7 +398,7 @@ err:
static int ucb1400_ts_remove(struct platform_device *pdev)
{
- struct ucb1400_ts *ucb = pdev->dev.platform_data;
+ struct ucb1400_ts *ucb = dev_get_platdata(&pdev->dev);
free_irq(ucb->irq, ucb);
input_unregister_device(ucb->ts_idev);
@@ -410,7 +409,7 @@ static int ucb1400_ts_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int ucb1400_ts_suspend(struct device *dev)
{
- struct ucb1400_ts *ucb = dev->platform_data;
+ struct ucb1400_ts *ucb = dev_get_platdata(dev);
struct input_dev *idev = ucb->ts_idev;
mutex_lock(&idev->mutex);
@@ -424,7 +423,7 @@ static int ucb1400_ts_suspend(struct device *dev)
static int ucb1400_ts_resume(struct device *dev)
{
- struct ucb1400_ts *ucb = dev->platform_data;
+ struct ucb1400_ts *ucb = dev_get_platdata(dev);
struct input_dev *idev = ucb->ts_idev;
mutex_lock(&idev->mutex);
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 5f87bed05467..a0966331a89b 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -51,7 +51,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
#include <linux/hid.h>
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 9a83be6b6584..2792ca397dd0 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -18,7 +18,6 @@
#include <linux/slab.h>
#include <linux/input/mt.h>
#include <linux/serio.h>
-#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/delay.h>
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index 6be2eb6a153a..1b953a066b2c 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/string.h>
#include <linux/pm.h>
#include <linux/input.h>
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 7e45c9f6e6b7..d0ef91fc87d1 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -584,7 +584,7 @@ static void wm97xx_ts_input_close(struct input_dev *idev)
static int wm97xx_probe(struct device *dev)
{
struct wm97xx *wm;
- struct wm97xx_pdata *pdata = dev->platform_data;
+ struct wm97xx_pdata *pdata = dev_get_platdata(dev);
int ret = 0, id = 0;
wm = kzalloc(sizeof(struct wm97xx), GFP_KERNEL);
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 75762d6ff3ba..2175f3419002 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -279,7 +279,8 @@ static int zforce_start(struct zforce_ts *ts)
goto error;
}
- if (zforce_setconfig(ts, SETCONFIG_DUALTOUCH)) {
+ ret = zforce_setconfig(ts, SETCONFIG_DUALTOUCH);
+ if (ret) {
dev_err(&client->dev, "Unable to set config\n");
goto error;
}
@@ -455,7 +456,18 @@ static void zforce_complete(struct zforce_ts *ts, int cmd, int result)
}
}
-static irqreturn_t zforce_interrupt(int irq, void *dev_id)
+static irqreturn_t zforce_irq(int irq, void *dev_id)
+{
+ struct zforce_ts *ts = dev_id;
+ struct i2c_client *client = ts->client;
+
+ if (ts->suspended && device_may_wakeup(&client->dev))
+ pm_wakeup_event(&client->dev, 500);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
{
struct zforce_ts *ts = dev_id;
struct i2c_client *client = ts->client;
@@ -465,12 +477,10 @@ static irqreturn_t zforce_interrupt(int irq, void *dev_id)
u8 *payload;
/*
- * When suspended, emit a wakeup signal if necessary and return.
+ * When still suspended, return.
* Due to the level-interrupt we will get re-triggered later.
*/
if (ts->suspended) {
- if (device_may_wakeup(&client->dev))
- pm_wakeup_event(&client->dev, 500);
msleep(20);
return IRQ_HANDLED;
}
@@ -763,8 +773,8 @@ static int zforce_probe(struct i2c_client *client,
* Therefore we can trigger the interrupt anytime it is low and do
* not need to limit it to the interrupt edge.
*/
- ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
- zforce_interrupt,
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ zforce_irq, zforce_irq_thread,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
input_dev->name, ts);
if (ret) {
diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c
index bf0869a7a78e..e2ccd683de6e 100644
--- a/drivers/input/touchscreen/zylonite-wm97xx.c
+++ b/drivers/input/touchscreen/zylonite-wm97xx.c
@@ -20,7 +20,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/irq.h>
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 3e7fdbb4916b..79bbc21c1d01 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -207,6 +207,7 @@ config SHMOBILE_IOMMU
bool "IOMMU for Renesas IPMMU/IPMMUI"
default n
depends on ARM
+ depends on SH_MOBILE || COMPILE_TEST
select IOMMU_API
select ARM_DMA_USE_IOMMU
select SHMOBILE_IPMMU
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 72531f008a5e..faf0da4bb3a2 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -248,8 +248,8 @@ static bool check_device(struct device *dev)
if (!dev || !dev->dma_mask)
return false;
- /* No device or no PCI device */
- if (dev->bus != &pci_bus_type)
+ /* No PCI device */
+ if (!dev_is_pci(dev))
return false;
devid = get_device_id(dev);
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 8f798be6e398..28b4bea7c109 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -26,7 +26,6 @@
#include <linux/msi.h>
#include <linux/amd-iommu.h>
#include <linux/export.h>
-#include <acpi/acpi.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/gart.h>
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index e46a88700b68..8911850c9444 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -24,7 +24,7 @@
* - v7/v8 long-descriptor format
* - Non-secure access to the SMMU
* - 4k and 64k pages, with contiguous pte hints.
- * - Up to 39-bit addressing
+ * - Up to 42-bit addressing (dependent on VA_BITS)
* - Context fault reporting
*/
@@ -61,12 +61,13 @@
#define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize)
/* Page table bits */
-#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
+#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53)
#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10)
#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8)
#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8)
#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8)
+#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
#if PAGE_SIZE == SZ_4K
#define ARM_SMMU_PTE_CONT_ENTRIES 16
@@ -1205,7 +1206,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
unsigned long pfn, int flags, int stage)
{
pte_t *pte, *start;
- pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
+ pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN;
if (pmd_none(*pmd)) {
/* Allocate a new set of tables */
@@ -1244,7 +1245,9 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
}
/* If no access, create a faulting entry to avoid TLB fills */
- if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
+ if (flags & IOMMU_EXEC)
+ pteval &= ~ARM_SMMU_PTE_XN;
+ else if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
pteval &= ~ARM_SMMU_PTE_PAGE;
pteval |= ARM_SMMU_PTE_SH_IS;
@@ -1494,6 +1497,13 @@ static int arm_smmu_add_device(struct device *dev)
{
struct arm_smmu_device *child, *parent, *smmu;
struct arm_smmu_master *master = NULL;
+ struct iommu_group *group;
+ int ret;
+
+ if (dev->archdata.iommu) {
+ dev_warn(dev, "IOMMU driver already assigned to device\n");
+ return -EINVAL;
+ }
spin_lock(&arm_smmu_devices_lock);
list_for_each_entry(parent, &arm_smmu_devices, list) {
@@ -1526,13 +1536,23 @@ static int arm_smmu_add_device(struct device *dev)
if (!master)
return -ENODEV;
+ group = iommu_group_alloc();
+ if (IS_ERR(group)) {
+ dev_err(dev, "Failed to allocate IOMMU group\n");
+ return PTR_ERR(group);
+ }
+
+ ret = iommu_group_add_device(group, dev);
+ iommu_group_put(group);
dev->archdata.iommu = smmu;
- return 0;
+
+ return ret;
}
static void arm_smmu_remove_device(struct device *dev)
{
dev->archdata.iommu = NULL;
+ iommu_group_remove_device(dev);
}
static struct iommu_ops arm_smmu_ops = {
@@ -1730,7 +1750,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
* allocation (PTRS_PER_PGD).
*/
#ifdef CONFIG_64BIT
- /* Current maximum output size of 39 bits */
smmu->s1_output_size = min(39UL, size);
#else
smmu->s1_output_size = min(32UL, size);
@@ -1745,7 +1764,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
} else {
#ifdef CONFIG_64BIT
size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
- size = min(39, arm_smmu_id_size_to_bits(size));
+ size = min(VA_BITS, arm_smmu_id_size_to_bits(size));
#else
size = 32;
#endif
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 8b452c9676d9..158156543410 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -52,6 +52,9 @@ LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl;
static acpi_size dmar_tbl_size;
+static int alloc_iommu(struct dmar_drhd_unit *drhd);
+static void free_iommu(struct intel_iommu *iommu);
+
static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{
/*
@@ -100,7 +103,6 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
if (!pdev) {
pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
segment, scope->bus, path->device, path->function);
- *dev = NULL;
return 0;
}
if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
@@ -151,7 +153,7 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
ret = dmar_parse_one_dev_scope(scope,
&(*devices)[index], segment);
if (ret) {
- kfree(*devices);
+ dmar_free_dev_scope(devices, cnt);
return ret;
}
index ++;
@@ -162,6 +164,17 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
return 0;
}
+void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
+{
+ if (*devices && *cnt) {
+ while (--*cnt >= 0)
+ pci_dev_put((*devices)[*cnt]);
+ kfree(*devices);
+ *devices = NULL;
+ *cnt = 0;
+ }
+}
+
/**
* dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
* structure which uniquely represent one DMA remapping hardware unit
@@ -193,25 +206,28 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
return 0;
}
+static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
+{
+ if (dmaru->devices && dmaru->devices_cnt)
+ dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
+ if (dmaru->iommu)
+ free_iommu(dmaru->iommu);
+ kfree(dmaru);
+}
+
static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
{
struct acpi_dmar_hardware_unit *drhd;
- int ret = 0;
drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
if (dmaru->include_all)
return 0;
- ret = dmar_parse_dev_scope((void *)(drhd + 1),
- ((void *)drhd) + drhd->header.length,
- &dmaru->devices_cnt, &dmaru->devices,
- drhd->segment);
- if (ret) {
- list_del(&dmaru->list);
- kfree(dmaru);
- }
- return ret;
+ return dmar_parse_dev_scope((void *)(drhd + 1),
+ ((void *)drhd) + drhd->header.length,
+ &dmaru->devices_cnt, &dmaru->devices,
+ drhd->segment);
}
#ifdef CONFIG_ACPI_NUMA
@@ -423,7 +439,7 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
int __init dmar_dev_scope_init(void)
{
static int dmar_dev_scope_initialized;
- struct dmar_drhd_unit *drhd, *drhd_n;
+ struct dmar_drhd_unit *drhd;
int ret = -ENODEV;
if (dmar_dev_scope_initialized)
@@ -432,7 +448,7 @@ int __init dmar_dev_scope_init(void)
if (list_empty(&dmar_drhd_units))
goto fail;
- list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
+ list_for_each_entry(drhd, &dmar_drhd_units, list) {
ret = dmar_parse_dev(drhd);
if (ret)
goto fail;
@@ -456,24 +472,23 @@ int __init dmar_table_init(void)
static int dmar_table_initialized;
int ret;
- if (dmar_table_initialized)
- return 0;
-
- dmar_table_initialized = 1;
-
- ret = parse_dmar_table();
- if (ret) {
- if (ret != -ENODEV)
- pr_info("parse DMAR table failure.\n");
- return ret;
- }
+ if (dmar_table_initialized == 0) {
+ ret = parse_dmar_table();
+ if (ret < 0) {
+ if (ret != -ENODEV)
+ pr_info("parse DMAR table failure.\n");
+ } else if (list_empty(&dmar_drhd_units)) {
+ pr_info("No DMAR devices found\n");
+ ret = -ENODEV;
+ }
- if (list_empty(&dmar_drhd_units)) {
- pr_info("No DMAR devices found\n");
- return -ENODEV;
+ if (ret < 0)
+ dmar_table_initialized = ret;
+ else
+ dmar_table_initialized = 1;
}
- return 0;
+ return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
}
static void warn_invalid_dmar(u64 addr, const char *message)
@@ -488,7 +503,7 @@ static void warn_invalid_dmar(u64 addr, const char *message)
dmi_get_system_info(DMI_PRODUCT_VERSION));
}
-int __init check_zero_address(void)
+static int __init check_zero_address(void)
{
struct acpi_table_dmar *dmar;
struct acpi_dmar_header *entry_header;
@@ -546,14 +561,6 @@ int __init detect_intel_iommu(void)
if (ret)
ret = check_zero_address();
{
- struct acpi_table_dmar *dmar;
-
- dmar = (struct acpi_table_dmar *) dmar_tbl;
-
- if (ret && irq_remapping_enabled && cpu_has_x2apic &&
- dmar->flags & 0x1)
- pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
-
if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
iommu_detected = 1;
/* Make sure ACS will be enabled */
@@ -565,7 +572,7 @@ int __init detect_intel_iommu(void)
x86_init.iommu.iommu_init = intel_iommu_init;
#endif
}
- early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
+ early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
dmar_tbl = NULL;
return ret ? 1 : -ENODEV;
@@ -647,7 +654,7 @@ out:
return err;
}
-int alloc_iommu(struct dmar_drhd_unit *drhd)
+static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
u32 ver, sts;
@@ -721,12 +728,19 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
return err;
}
-void free_iommu(struct intel_iommu *iommu)
+static void free_iommu(struct intel_iommu *iommu)
{
- if (!iommu)
- return;
+ if (iommu->irq) {
+ free_irq(iommu->irq, iommu);
+ irq_set_handler_data(iommu->irq, NULL);
+ destroy_irq(iommu->irq);
+ }
- free_dmar_iommu(iommu);
+ if (iommu->qi) {
+ free_page((unsigned long)iommu->qi->desc);
+ kfree(iommu->qi->desc_status);
+ kfree(iommu->qi);
+ }
if (iommu->reg)
unmap_iommu(iommu);
@@ -1050,7 +1064,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
if (!desc_page) {
kfree(qi);
- iommu->qi = 0;
+ iommu->qi = NULL;
return -ENOMEM;
}
@@ -1060,7 +1074,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
if (!qi->desc_status) {
free_page((unsigned long) qi->desc);
kfree(qi);
- iommu->qi = 0;
+ iommu->qi = NULL;
return -ENOMEM;
}
@@ -1111,9 +1125,7 @@ static const char *irq_remap_fault_reasons[] =
"Blocked an interrupt request due to source-id verification failure",
};
-#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
-
-const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
+static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
{
if (fault_reason >= 0x20 && (fault_reason - 0x20 <
ARRAY_SIZE(irq_remap_fault_reasons))) {
@@ -1303,15 +1315,14 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
int __init enable_drhd_fault_handling(void)
{
struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
/*
* Enable fault control interrupt.
*/
- for_each_drhd_unit(drhd) {
- int ret;
- struct intel_iommu *iommu = drhd->iommu;
+ for_each_iommu(iommu, drhd) {
u32 fault_status;
- ret = dmar_set_interrupt(iommu);
+ int ret = dmar_set_interrupt(iommu);
if (ret) {
pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
@@ -1366,4 +1377,22 @@ int __init dmar_ir_support(void)
return 0;
return dmar->flags & 0x1;
}
+
+static int __init dmar_free_unused_resources(void)
+{
+ struct dmar_drhd_unit *dmaru, *dmaru_n;
+
+ /* DMAR units are in use */
+ if (irq_remapping_enabled || intel_iommu_enabled)
+ return 0;
+
+ list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
+ list_del(&dmaru->list);
+ dmar_free_drhd(dmaru);
+ }
+
+ return 0;
+}
+
+late_initcall(dmar_free_unused_resources);
IOMMU_INIT_POST(detect_intel_iommu);
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index c857c30da979..93072ba44b1d 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -691,7 +691,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
* Use LIODN of the PCI controller while attaching a
* PCI device.
*/
- if (dev->bus == &pci_bus_type) {
+ if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev);
pci_ctl = pci_bus_to_host(pdev->bus);
/*
@@ -729,7 +729,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
* Use LIODN of the PCI controller while detaching a
* PCI device.
*/
- if (dev->bus == &pci_bus_type) {
+ if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev);
pci_ctl = pci_bus_to_host(pdev->bus);
/*
@@ -1056,7 +1056,7 @@ static int fsl_pamu_add_device(struct device *dev)
* For platform devices we allocate a separate group for
* each of the devices.
*/
- if (dev->bus == &pci_bus_type) {
+ if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev);
/* Don't create device groups for virtual PCI bridges */
if (pdev->subordinate)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 43b9bfea48fa..a22c86c867fa 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -63,6 +63,7 @@
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
#define MAX_AGAW_WIDTH 64
+#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
@@ -106,12 +107,12 @@ static inline int agaw_to_level(int agaw)
static inline int agaw_to_width(int agaw)
{
- return 30 + agaw * LEVEL_STRIDE;
+ return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
}
static inline int width_to_agaw(int width)
{
- return (width - 30) / LEVEL_STRIDE;
+ return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
}
static inline unsigned int level_to_offset_bits(int level)
@@ -141,7 +142,7 @@ static inline unsigned long align_to_level(unsigned long pfn, int level)
static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
{
- return 1 << ((lvl - 1) * LEVEL_STRIDE);
+ return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
}
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
@@ -288,26 +289,6 @@ static inline void dma_clear_pte(struct dma_pte *pte)
pte->val = 0;
}
-static inline void dma_set_pte_readable(struct dma_pte *pte)
-{
- pte->val |= DMA_PTE_READ;
-}
-
-static inline void dma_set_pte_writable(struct dma_pte *pte)
-{
- pte->val |= DMA_PTE_WRITE;
-}
-
-static inline void dma_set_pte_snp(struct dma_pte *pte)
-{
- pte->val |= DMA_PTE_SNP;
-}
-
-static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
-{
- pte->val = (pte->val & ~3) | (prot & 3);
-}
-
static inline u64 dma_pte_addr(struct dma_pte *pte)
{
#ifdef CONFIG_64BIT
@@ -318,11 +299,6 @@ static inline u64 dma_pte_addr(struct dma_pte *pte)
#endif
}
-static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
-{
- pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
-}
-
static inline bool dma_pte_present(struct dma_pte *pte)
{
return (pte->val & 3) != 0;
@@ -406,7 +382,7 @@ struct device_domain_info {
static void flush_unmaps_timeout(unsigned long data);
-DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
+static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
#define HIGH_WATER_MARK 250
struct deferred_flush_tables {
@@ -652,9 +628,7 @@ static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
struct dmar_drhd_unit *drhd = NULL;
int i;
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
+ for_each_active_drhd_unit(drhd) {
if (segment != drhd->segment)
continue;
@@ -865,7 +839,6 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
unsigned int large_page = 1;
struct dma_pte *first_pte, *pte;
- int order;
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@@ -890,8 +863,7 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
} while (start_pfn && start_pfn <= last_pfn);
- order = (large_page - 1) * 9;
- return order;
+ return min_t(int, (large_page - 1) * 9, MAX_AGAW_PFN_WIDTH);
}
static void dma_pte_free_level(struct dmar_domain *domain, int level,
@@ -917,7 +889,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
/* If range covers entire pagetable, free it */
if (!(start_pfn > level_pfn ||
- last_pfn < level_pfn + level_size(level))) {
+ last_pfn < level_pfn + level_size(level) - 1)) {
dma_clear_pte(pte);
domain_flush_cache(domain, pte, sizeof(*pte));
free_pgtable_page(level_pte);
@@ -1255,8 +1227,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
unsigned long nlongs;
ndomains = cap_ndoms(iommu->cap);
- pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id,
- ndomains);
+ pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
+ iommu->seq_id, ndomains);
nlongs = BITS_TO_LONGS(ndomains);
spin_lock_init(&iommu->lock);
@@ -1266,13 +1238,17 @@ static int iommu_init_domains(struct intel_iommu *iommu)
*/
iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
if (!iommu->domain_ids) {
- printk(KERN_ERR "Allocating domain id array failed\n");
+ pr_err("IOMMU%d: allocating domain id array failed\n",
+ iommu->seq_id);
return -ENOMEM;
}
iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
GFP_KERNEL);
if (!iommu->domains) {
- printk(KERN_ERR "Allocating domain array failed\n");
+ pr_err("IOMMU%d: allocating domain array failed\n",
+ iommu->seq_id);
+ kfree(iommu->domain_ids);
+ iommu->domain_ids = NULL;
return -ENOMEM;
}
@@ -1289,10 +1265,10 @@ static int iommu_init_domains(struct intel_iommu *iommu)
static void domain_exit(struct dmar_domain *domain);
static void vm_domain_exit(struct dmar_domain *domain);
-void free_dmar_iommu(struct intel_iommu *iommu)
+static void free_dmar_iommu(struct intel_iommu *iommu)
{
struct dmar_domain *domain;
- int i;
+ int i, count;
unsigned long flags;
if ((iommu->domains) && (iommu->domain_ids)) {
@@ -1301,28 +1277,24 @@ void free_dmar_iommu(struct intel_iommu *iommu)
clear_bit(i, iommu->domain_ids);
spin_lock_irqsave(&domain->iommu_lock, flags);
- if (--domain->iommu_count == 0) {
+ count = --domain->iommu_count;
+ spin_unlock_irqrestore(&domain->iommu_lock, flags);
+ if (count == 0) {
if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
vm_domain_exit(domain);
else
domain_exit(domain);
}
- spin_unlock_irqrestore(&domain->iommu_lock, flags);
}
}
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
- if (iommu->irq) {
- irq_set_handler_data(iommu->irq, NULL);
- /* This will mask the irq */
- free_irq(iommu->irq, iommu);
- destroy_irq(iommu->irq);
- }
-
kfree(iommu->domains);
kfree(iommu->domain_ids);
+ iommu->domains = NULL;
+ iommu->domain_ids = NULL;
g_iommus[iommu->seq_id] = NULL;
@@ -2245,8 +2217,6 @@ static int __init si_domain_init(int hw)
if (!si_domain)
return -EFAULT;
- pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
-
for_each_active_iommu(iommu, drhd) {
ret = iommu_attach_domain(si_domain, iommu);
if (ret) {
@@ -2261,6 +2231,8 @@ static int __init si_domain_init(int hw)
}
si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
+ pr_debug("IOMMU: identity mapping domain is domain %d\n",
+ si_domain->id);
if (hw)
return 0;
@@ -2492,11 +2464,7 @@ static int __init init_dmars(void)
goto error;
}
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
-
- iommu = drhd->iommu;
+ for_each_active_iommu(iommu, drhd) {
g_iommus[iommu->seq_id] = iommu;
ret = iommu_init_domains(iommu);
@@ -2520,12 +2488,7 @@ static int __init init_dmars(void)
/*
* Start from the sane iommu hardware state.
*/
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
-
- iommu = drhd->iommu;
-
+ for_each_active_iommu(iommu, drhd) {
/*
* If the queued invalidation is already initialized by us
* (for example, while enabling interrupt-remapping) then
@@ -2545,12 +2508,7 @@ static int __init init_dmars(void)
dmar_disable_qi(iommu);
}
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
-
- iommu = drhd->iommu;
-
+ for_each_active_iommu(iommu, drhd) {
if (dmar_enable_qi(iommu)) {
/*
* Queued Invalidate not enabled, use Register Based
@@ -2633,17 +2591,16 @@ static int __init init_dmars(void)
* global invalidate iotlb
* enable translation
*/
- for_each_drhd_unit(drhd) {
+ for_each_iommu(iommu, drhd) {
if (drhd->ignored) {
/*
* we always have to disable PMRs or DMA may fail on
* this device
*/
if (force_on)
- iommu_disable_protect_mem_regions(drhd->iommu);
+ iommu_disable_protect_mem_regions(iommu);
continue;
}
- iommu = drhd->iommu;
iommu_flush_write_buffer(iommu);
@@ -2665,12 +2622,9 @@ static int __init init_dmars(void)
return 0;
error:
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
- iommu = drhd->iommu;
- free_iommu(iommu);
- }
+ for_each_active_iommu(iommu, drhd)
+ free_dmar_iommu(iommu);
+ kfree(deferred_flush);
kfree(g_iommus);
return ret;
}
@@ -2758,7 +2712,7 @@ static int iommu_no_mapping(struct device *dev)
struct pci_dev *pdev;
int found;
- if (unlikely(dev->bus != &pci_bus_type))
+ if (unlikely(!dev_is_pci(dev)))
return 1;
pdev = to_pci_dev(dev);
@@ -3318,9 +3272,9 @@ static void __init init_no_remapping_devices(void)
}
}
- for_each_drhd_unit(drhd) {
+ for_each_active_drhd_unit(drhd) {
int i;
- if (drhd->ignored || drhd->include_all)
+ if (drhd->include_all)
continue;
for (i = 0; i < drhd->devices_cnt; i++)
@@ -3514,18 +3468,12 @@ static int __init
rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
{
struct acpi_dmar_reserved_memory *rmrr;
- int ret;
rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
- ret = dmar_parse_dev_scope((void *)(rmrr + 1),
- ((void *)rmrr) + rmrr->header.length,
- &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
-
- if (ret || (rmrru->devices_cnt == 0)) {
- list_del(&rmrru->list);
- kfree(rmrru);
- }
- return ret;
+ return dmar_parse_dev_scope((void *)(rmrr + 1),
+ ((void *)rmrr) + rmrr->header.length,
+ &rmrru->devices_cnt, &rmrru->devices,
+ rmrr->segment);
}
static LIST_HEAD(dmar_atsr_units);
@@ -3550,23 +3498,39 @@ int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
{
- int rc;
struct acpi_dmar_atsr *atsr;
if (atsru->include_all)
return 0;
atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
- rc = dmar_parse_dev_scope((void *)(atsr + 1),
- (void *)atsr + atsr->header.length,
- &atsru->devices_cnt, &atsru->devices,
- atsr->segment);
- if (rc || !atsru->devices_cnt) {
- list_del(&atsru->list);
- kfree(atsru);
+ return dmar_parse_dev_scope((void *)(atsr + 1),
+ (void *)atsr + atsr->header.length,
+ &atsru->devices_cnt, &atsru->devices,
+ atsr->segment);
+}
+
+static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
+{
+ dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
+ kfree(atsru);
+}
+
+static void intel_iommu_free_dmars(void)
+{
+ struct dmar_rmrr_unit *rmrru, *rmrr_n;
+ struct dmar_atsr_unit *atsru, *atsr_n;
+
+ list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
+ list_del(&rmrru->list);
+ dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
+ kfree(rmrru);
}
- return rc;
+ list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
+ list_del(&atsru->list);
+ intel_iommu_free_atsr(atsru);
+ }
}
int dmar_find_matched_atsr_unit(struct pci_dev *dev)
@@ -3610,17 +3574,17 @@ found:
int __init dmar_parse_rmrr_atsr_dev(void)
{
- struct dmar_rmrr_unit *rmrr, *rmrr_n;
- struct dmar_atsr_unit *atsr, *atsr_n;
+ struct dmar_rmrr_unit *rmrr;
+ struct dmar_atsr_unit *atsr;
int ret = 0;
- list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
+ list_for_each_entry(rmrr, &dmar_rmrr_units, list) {
ret = rmrr_parse_dev(rmrr);
if (ret)
return ret;
}
- list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
+ list_for_each_entry(atsr, &dmar_atsr_units, list) {
ret = atsr_parse_dev(atsr);
if (ret)
return ret;
@@ -3667,8 +3631,9 @@ static struct notifier_block device_nb = {
int __init intel_iommu_init(void)
{
- int ret = 0;
+ int ret = -ENODEV;
struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
/* VT-d is required for a TXT/tboot launch, so enforce that */
force_on = tboot_force_iommu();
@@ -3676,36 +3641,29 @@ int __init intel_iommu_init(void)
if (dmar_table_init()) {
if (force_on)
panic("tboot: Failed to initialize DMAR table\n");
- return -ENODEV;
+ goto out_free_dmar;
}
/*
* Disable translation if already enabled prior to OS handover.
*/
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu;
-
- if (drhd->ignored)
- continue;
-
- iommu = drhd->iommu;
+ for_each_active_iommu(iommu, drhd)
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
- }
if (dmar_dev_scope_init() < 0) {
if (force_on)
panic("tboot: Failed to initialize DMAR device scope\n");
- return -ENODEV;
+ goto out_free_dmar;
}
if (no_iommu || dmar_disabled)
- return -ENODEV;
+ goto out_free_dmar;
if (iommu_init_mempool()) {
if (force_on)
panic("tboot: Failed to initialize iommu memory\n");
- return -ENODEV;
+ goto out_free_dmar;
}
if (list_empty(&dmar_rmrr_units))
@@ -3717,7 +3675,7 @@ int __init intel_iommu_init(void)
if (dmar_init_reserved_ranges()) {
if (force_on)
panic("tboot: Failed to reserve iommu ranges\n");
- return -ENODEV;
+ goto out_free_mempool;
}
init_no_remapping_devices();
@@ -3727,9 +3685,7 @@ int __init intel_iommu_init(void)
if (force_on)
panic("tboot: Failed to initialize DMARs\n");
printk(KERN_ERR "IOMMU: dmar init failed\n");
- put_iova_domain(&reserved_iova_list);
- iommu_exit_mempool();
- return ret;
+ goto out_free_reserved_range;
}
printk(KERN_INFO
"PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
@@ -3749,6 +3705,14 @@ int __init intel_iommu_init(void)
intel_iommu_enabled = 1;
return 0;
+
+out_free_reserved_range:
+ put_iova_domain(&reserved_iova_list);
+out_free_mempool:
+ iommu_exit_mempool();
+out_free_dmar:
+ intel_iommu_free_dmars();
+ return ret;
}
static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
@@ -3877,7 +3841,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
}
/* domain id for virtual machine, it won't be set in context */
-static unsigned long vm_domid;
+static atomic_t vm_domid = ATOMIC_INIT(0);
static struct dmar_domain *iommu_alloc_vm_domain(void)
{
@@ -3887,7 +3851,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
if (!domain)
return NULL;
- domain->id = vm_domid++;
+ domain->id = atomic_inc_return(&vm_domid);
domain->nid = -1;
memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
@@ -3934,11 +3898,7 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
unsigned long i;
unsigned long ndomains;
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
- iommu = drhd->iommu;
-
+ for_each_active_iommu(iommu, drhd) {
ndomains = cap_ndoms(iommu->cap);
for_each_set_bit(i, iommu->domain_ids, ndomains) {
if (iommu->domains[i] == domain) {
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index bab10b1002fb..ef5f65dbafe9 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -6,11 +6,11 @@
#include <linux/hpet.h>
#include <linux/pci.h>
#include <linux/irq.h>
+#include <linux/intel-iommu.h>
+#include <linux/acpi.h>
#include <asm/io_apic.h>
#include <asm/smp.h>
#include <asm/cpu.h>
-#include <linux/intel-iommu.h>
-#include <acpi/acpi.h>
#include <asm/irq_remapping.h>
#include <asm/pci-direct.h>
#include <asm/msidef.h>
@@ -40,13 +40,15 @@ static int ir_ioapic_num, ir_hpet_num;
static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
+static int __init parse_ioapics_under_ir(void);
+
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
struct irq_cfg *cfg = irq_get_chip_data(irq);
return cfg ? &cfg->irq_2_iommu : NULL;
}
-int get_irte(int irq, struct irte *entry)
+static int get_irte(int irq, struct irte *entry)
{
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
unsigned long flags;
@@ -69,19 +71,13 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
struct ir_table *table = iommu->ir_table;
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
struct irq_cfg *cfg = irq_get_chip_data(irq);
- u16 index, start_index;
unsigned int mask = 0;
unsigned long flags;
- int i;
+ int index;
if (!count || !irq_iommu)
return -1;
- /*
- * start the IRTE search from index 0.
- */
- index = start_index = 0;
-
if (count > 1) {
count = __roundup_pow_of_two(count);
mask = ilog2(count);
@@ -96,32 +92,17 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
}
raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
- do {
- for (i = index; i < index + count; i++)
- if (table->base[i].present)
- break;
- /* empty index found */
- if (i == index + count)
- break;
-
- index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
-
- if (index == start_index) {
- raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
- printk(KERN_ERR "can't allocate an IRTE\n");
- return -1;
- }
- } while (1);
-
- for (i = index; i < index + count; i++)
- table->base[i].present = 1;
-
- cfg->remapped = 1;
- irq_iommu->iommu = iommu;
- irq_iommu->irte_index = index;
- irq_iommu->sub_handle = 0;
- irq_iommu->irte_mask = mask;
-
+ index = bitmap_find_free_region(table->bitmap,
+ INTR_REMAP_TABLE_ENTRIES, mask);
+ if (index < 0) {
+ pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
+ } else {
+ cfg->remapped = 1;
+ irq_iommu->iommu = iommu;
+ irq_iommu->irte_index = index;
+ irq_iommu->sub_handle = 0;
+ irq_iommu->irte_mask = mask;
+ }
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return index;
@@ -254,6 +235,8 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
set_64bit(&entry->low, 0);
set_64bit(&entry->high, 0);
}
+ bitmap_release_region(iommu->ir_table->bitmap, index,
+ irq_iommu->irte_mask);
return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
}
@@ -336,7 +319,7 @@ static int set_ioapic_sid(struct irte *irte, int apic)
return -1;
}
- set_irte_sid(irte, 1, 0, sid);
+ set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
return 0;
}
@@ -453,6 +436,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
{
struct ir_table *ir_table;
struct page *pages;
+ unsigned long *bitmap;
ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
GFP_ATOMIC);
@@ -464,13 +448,23 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
INTR_REMAP_PAGE_ORDER);
if (!pages) {
- printk(KERN_ERR "failed to allocate pages of order %d\n",
- INTR_REMAP_PAGE_ORDER);
+ pr_err("IR%d: failed to allocate pages of order %d\n",
+ iommu->seq_id, INTR_REMAP_PAGE_ORDER);
kfree(iommu->ir_table);
return -ENOMEM;
}
+ bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
+ sizeof(long), GFP_ATOMIC);
+ if (bitmap == NULL) {
+ pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
+ __free_pages(pages, INTR_REMAP_PAGE_ORDER);
+ kfree(ir_table);
+ return -ENOMEM;
+ }
+
ir_table->base = page_address(pages);
+ ir_table->bitmap = bitmap;
iommu_set_irq_remapping(iommu, mode);
return 0;
@@ -521,6 +515,7 @@ static int __init dmar_x2apic_optout(void)
static int __init intel_irq_remapping_supported(void)
{
struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
if (disable_irq_remap)
return 0;
@@ -539,12 +534,9 @@ static int __init intel_irq_remapping_supported(void)
if (!dmar_ir_support())
return 0;
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
+ for_each_iommu(iommu, drhd)
if (!ecap_ir_support(iommu->ecap))
return 0;
- }
return 1;
}
@@ -552,6 +544,7 @@ static int __init intel_irq_remapping_supported(void)
static int __init intel_enable_irq_remapping(void)
{
struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
bool x2apic_present;
int setup = 0;
int eim = 0;
@@ -564,6 +557,8 @@ static int __init intel_enable_irq_remapping(void)
}
if (x2apic_present) {
+ pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
+
eim = !dmar_x2apic_optout();
if (!eim)
printk(KERN_WARNING
@@ -572,9 +567,7 @@ static int __init intel_enable_irq_remapping(void)
"Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
}
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
+ for_each_iommu(iommu, drhd) {
/*
* If the queued invalidation is already initialized,
* shouldn't disable it.
@@ -599,9 +592,7 @@ static int __init intel_enable_irq_remapping(void)
/*
* check for the Interrupt-remapping support
*/
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
+ for_each_iommu(iommu, drhd) {
if (!ecap_ir_support(iommu->ecap))
continue;
@@ -615,10 +606,8 @@ static int __init intel_enable_irq_remapping(void)
/*
* Enable queued invalidation for all the DRHD's.
*/
- for_each_drhd_unit(drhd) {
- int ret;
- struct intel_iommu *iommu = drhd->iommu;
- ret = dmar_enable_qi(iommu);
+ for_each_iommu(iommu, drhd) {
+ int ret = dmar_enable_qi(iommu);
if (ret) {
printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
@@ -631,9 +620,7 @@ static int __init intel_enable_irq_remapping(void)
/*
* Setup Interrupt-remapping for all the DRHD's now.
*/
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
+ for_each_iommu(iommu, drhd) {
if (!ecap_ir_support(iommu->ecap))
continue;
@@ -774,22 +761,20 @@ static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
* Finds the assocaition between IOAPIC's and its Interrupt-remapping
* hardware unit.
*/
-int __init parse_ioapics_under_ir(void)
+static int __init parse_ioapics_under_ir(void)
{
struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
int ir_supported = 0;
int ioapic_idx;
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
+ for_each_iommu(iommu, drhd)
if (ecap_ir_support(iommu->ecap)) {
if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
return -1;
ir_supported = 1;
}
- }
if (!ir_supported)
return 0;
@@ -807,7 +792,7 @@ int __init parse_ioapics_under_ir(void)
return 1;
}
-int __init ir_dev_scope_init(void)
+static int __init ir_dev_scope_init(void)
{
if (!irq_remapping_enabled)
return 0;
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 39f81aeefcd6..228632c99adb 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -150,7 +150,7 @@ static int irq_remapping_setup_msi_irqs(struct pci_dev *dev,
return do_setup_msix_irqs(dev, nvec);
}
-void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
+static void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
{
/*
* Intr-remapping uses pin number as the virtual vector
@@ -295,8 +295,8 @@ int setup_ioapic_remapped_entry(int irq,
vector, attr);
}
-int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
- bool force)
+static int set_remapped_irq_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
{
if (!config_enabled(CONFIG_SMP) || !remap_ops ||
!remap_ops->set_affinity)
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index ee249bc959f8..e550ccb7634e 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -20,6 +20,7 @@
#include <linux/export.h>
#include <linux/limits.h>
#include <linux/of.h>
+#include <linux/of_iommu.h>
/**
* of_get_dma_window - Parse *dma-window property and returns 0 if found.
diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c
index d572863dfccd..7a3b928fad1c 100644
--- a/drivers/iommu/shmobile-iommu.c
+++ b/drivers/iommu/shmobile-iommu.c
@@ -380,14 +380,13 @@ int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu)
kmem_cache_destroy(l1cache);
return -ENOMEM;
}
- archdata = kmalloc(sizeof(*archdata), GFP_KERNEL);
+ archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
if (!archdata) {
kmem_cache_destroy(l1cache);
kmem_cache_destroy(l2cache);
return -ENOMEM;
}
spin_lock_init(&archdata->attach_lock);
- archdata->attached = NULL;
archdata->ipmmu = ipmmu;
ipmmu_archdata = archdata;
bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops);
diff --git a/drivers/iommu/shmobile-ipmmu.c b/drivers/iommu/shmobile-ipmmu.c
index 8321f89596c4..e3bc2e19b6dd 100644
--- a/drivers/iommu/shmobile-ipmmu.c
+++ b/drivers/iommu/shmobile-ipmmu.c
@@ -35,12 +35,12 @@ void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu)
if (!ipmmu)
return;
- mutex_lock(&ipmmu->flush_lock);
+ spin_lock(&ipmmu->flush_lock);
if (ipmmu->tlb_enabled)
ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN);
else
ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH);
- mutex_unlock(&ipmmu->flush_lock);
+ spin_unlock(&ipmmu->flush_lock);
}
void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
@@ -49,7 +49,7 @@ void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
if (!ipmmu)
return;
- mutex_lock(&ipmmu->flush_lock);
+ spin_lock(&ipmmu->flush_lock);
switch (size) {
default:
ipmmu->tlb_enabled = 0;
@@ -85,7 +85,7 @@ void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
}
ipmmu_reg_write(ipmmu, IMTTBR, phys);
ipmmu_reg_write(ipmmu, IMASID, asid);
- mutex_unlock(&ipmmu->flush_lock);
+ spin_unlock(&ipmmu->flush_lock);
}
static int ipmmu_probe(struct platform_device *pdev)
@@ -104,7 +104,7 @@ static int ipmmu_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot allocate device data\n");
return -ENOMEM;
}
- mutex_init(&ipmmu->flush_lock);
+ spin_lock_init(&ipmmu->flush_lock);
ipmmu->dev = &pdev->dev;
ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start,
resource_size(res));
diff --git a/drivers/iommu/shmobile-ipmmu.h b/drivers/iommu/shmobile-ipmmu.h
index 4d53684673e1..9524743ca1fb 100644
--- a/drivers/iommu/shmobile-ipmmu.h
+++ b/drivers/iommu/shmobile-ipmmu.h
@@ -14,7 +14,7 @@ struct shmobile_ipmmu {
struct device *dev;
void __iomem *ipmmu_base;
int tlb_enabled;
- struct mutex flush_lock;
+ spinlock_t flush_lock;
const char * const *dev_names;
unsigned int num_dev_names;
};
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 3792a1aa52b8..61ffdca96e25 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -30,6 +30,10 @@ config ARM_VIC_NR
The maximum number of VICs available in the system, for
power management.
+config DW_APB_ICTL
+ bool
+ select IRQ_DOMAIN
+
config IMGPDC_IRQ
bool
select GENERIC_IRQ_CHIP
@@ -61,3 +65,7 @@ config VERSATILE_FPGA_IRQ_NR
int
default 4
depends on VERSATILE_FPGA_IRQ
+
+config XTENSA_MX
+ bool
+ select IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index c60b9010b152..5194afb39e78 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
obj-$(CONFIG_ARCH_MVEBU) += irq-armada-370-xp.o
obj-$(CONFIG_ARCH_MXS) += irq-mxs.o
obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o
+obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o
obj-$(CONFIG_METAG) += irq-metag-ext.o
obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o
obj-$(CONFIG_ARCH_MOXART) += irq-moxart.o
@@ -20,5 +21,8 @@ obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
+obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o
obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
+obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
+obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index 868ed40cb6bf..40e6440348ff 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -171,8 +171,7 @@ static struct irq_domain_ops combiner_irq_domain_ops = {
static void __init combiner_init(void __iomem *combiner_base,
struct device_node *np,
- unsigned int max_nr,
- int irq_base)
+ unsigned int max_nr)
{
int i, irq;
unsigned int nr_irq;
@@ -186,7 +185,7 @@ static void __init combiner_init(void __iomem *combiner_base,
return;
}
- combiner_irq_domain = irq_domain_add_simple(np, nr_irq, irq_base,
+ combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
&combiner_irq_domain_ops, combiner_data);
if (WARN_ON(!combiner_irq_domain)) {
pr_warning("%s: irq domain init failed\n", __func__);
@@ -207,7 +206,6 @@ static int __init combiner_of_init(struct device_node *np,
{
void __iomem *combiner_base;
unsigned int max_nr = 20;
- int irq_base = -1;
combiner_base = of_iomap(np, 0);
if (!combiner_base) {
@@ -221,14 +219,7 @@ static int __init combiner_of_init(struct device_node *np,
__func__, max_nr);
}
- /*
- * FIXME: This is a hardwired COMBINER_IRQ(0,0). Once all devices
- * get their IRQ from DT, remove this in order to get dynamic
- * allocation.
- */
- irq_base = 160;
-
- combiner_init(combiner_base, np, max_nr, irq_base);
+ combiner_init(combiner_base, np, max_nr);
return 0;
}
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 433cc8568dec..540956465ed2 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -59,8 +59,6 @@
#define PCI_MSI_DOORBELL_END (32)
#define PCI_MSI_DOORBELL_MASK 0xFFFF0000
-static DEFINE_RAW_SPINLOCK(irq_controller_lock);
-
static void __iomem *per_cpu_int_base;
static void __iomem *main_int_base;
static struct irq_domain *armada_370_xp_mpic_domain;
@@ -239,6 +237,8 @@ static inline int armada_370_xp_msi_init(struct device_node *node,
#endif
#ifdef CONFIG_SMP
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
static int armada_xp_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
@@ -381,7 +381,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
& PCI_MSI_DOORBELL_MASK;
- writel(~PCI_MSI_DOORBELL_MASK, per_cpu_int_base +
+ writel(~msimask, per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
for (msinr = PCI_MSI_DOORBELL_START;
@@ -407,7 +407,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
& IPI_DOORBELL_MASK;
- writel(~IPI_DOORBELL_MASK, per_cpu_int_base +
+ writel(~ipimask, per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
/* Handle all pending doorbells */
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
new file mode 100644
index 000000000000..31e231e1f566
--- /dev/null
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -0,0 +1,150 @@
+/*
+ * Synopsys DW APB ICTL irqchip driver.
+ *
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ *
+ * based on GPL'ed 2.6 kernel sources
+ * (c) Marvell International Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include "irqchip.h"
+
+#define APB_INT_ENABLE_L 0x00
+#define APB_INT_ENABLE_H 0x04
+#define APB_INT_MASK_L 0x08
+#define APB_INT_MASK_H 0x0c
+#define APB_INT_FINALSTATUS_L 0x30
+#define APB_INT_FINALSTATUS_H 0x34
+
+static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_get_chip(irq);
+ struct irq_chip_generic *gc = irq_get_handler_data(irq);
+ struct irq_domain *d = gc->private;
+ u32 stat;
+ int n;
+
+ chained_irq_enter(chip, desc);
+
+ for (n = 0; n < gc->num_ct; n++) {
+ stat = readl_relaxed(gc->reg_base +
+ APB_INT_FINALSTATUS_L + 4 * n);
+ while (stat) {
+ u32 hwirq = ffs(stat) - 1;
+ generic_handle_irq(irq_find_mapping(d,
+ gc->irq_base + hwirq + 32 * n));
+ stat &= ~(1 << hwirq);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int __init dw_apb_ictl_init(struct device_node *np,
+ struct device_node *parent)
+{
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct resource r;
+ struct irq_domain *domain;
+ struct irq_chip_generic *gc;
+ void __iomem *iobase;
+ int ret, nrirqs, irq;
+ u32 reg;
+
+ /* Map the parent interrupt for the chained handler */
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("%s: unable to parse irq\n", np->full_name);
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(np, 0, &r);
+ if (ret) {
+ pr_err("%s: unable to get resource\n", np->full_name);
+ return ret;
+ }
+
+ if (!request_mem_region(r.start, resource_size(&r), np->full_name)) {
+ pr_err("%s: unable to request mem region\n", np->full_name);
+ return -ENOMEM;
+ }
+
+ iobase = ioremap(r.start, resource_size(&r));
+ if (!iobase) {
+ pr_err("%s: unable to map resource\n", np->full_name);
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+ /*
+ * DW IP can be configured to allow 2-64 irqs. We can determine
+ * the number of irqs supported by writing into enable register
+ * and look for bits not set, as corresponding flip-flops will
+ * have been removed by sythesis tool.
+ */
+
+ /* mask and enable all interrupts */
+ writel(~0, iobase + APB_INT_MASK_L);
+ writel(~0, iobase + APB_INT_MASK_H);
+ writel(~0, iobase + APB_INT_ENABLE_L);
+ writel(~0, iobase + APB_INT_ENABLE_H);
+
+ reg = readl(iobase + APB_INT_ENABLE_H);
+ if (reg)
+ nrirqs = 32 + fls(reg);
+ else
+ nrirqs = fls(readl(iobase + APB_INT_ENABLE_L));
+
+ domain = irq_domain_add_linear(np, nrirqs,
+ &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ pr_err("%s: unable to add irq domain\n", np->full_name);
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ ret = irq_alloc_domain_generic_chips(domain, 32, (nrirqs > 32) ? 2 : 1,
+ np->name, handle_level_irq, clr, 0,
+ IRQ_GC_INIT_MASK_CACHE);
+ if (ret) {
+ pr_err("%s: unable to alloc irq domain gc\n", np->full_name);
+ goto err_unmap;
+ }
+
+ gc = irq_get_domain_generic_chip(domain, 0);
+ gc->private = domain;
+ gc->reg_base = iobase;
+
+ gc->chip_types[0].regs.mask = APB_INT_MASK_L;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+
+ if (nrirqs > 32) {
+ gc->chip_types[1].regs.mask = APB_INT_MASK_H;
+ gc->chip_types[1].chip.irq_mask = irq_gc_mask_set_bit;
+ gc->chip_types[1].chip.irq_unmask = irq_gc_mask_clr_bit;
+ }
+
+ irq_set_handler_data(irq, gc);
+ irq_set_chained_handler(irq, dw_apb_ictl_handler);
+
+ return 0;
+
+err_unmap:
+ iounmap(iobase);
+err_release:
+ release_mem_region(r.start, resource_size(&r));
+ return ret;
+}
+IRQCHIP_DECLARE(dw_apb_ictl,
+ "snps,dw-apb-ictl", dw_apb_ictl_init);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 82cec63a9011..3ee78f02e5d7 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -149,8 +149,9 @@ static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p,
static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
int irq, int do_mask)
{
- int bitfield_width = 4; /* PRIO assumed to have fixed bitfield width */
- int shift = (7 - irq) * bitfield_width; /* PRIO assumed to be 32-bit */
+ /* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */
+ int bitfield_width = 4;
+ int shift = 32 - (irq + 1) * bitfield_width;
intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO,
shift, bitfield_width,
@@ -159,8 +160,9 @@ static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value)
{
+ /* The SENSE register is assumed to be 32-bit. */
int bitfield_width = p->config.sense_bitfield_width;
- int shift = (7 - irq) * bitfield_width; /* SENSE assumed to be 32-bit */
+ int shift = 32 - (irq + 1) * bitfield_width;
dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value);
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index 2f404ba61c6c..8777065012a5 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -81,15 +81,12 @@ static void irqc_irq_disable(struct irq_data *d)
iowrite32(BIT(hw_irq), p->cpu_int_base + IRQC_EN_STS);
}
-#define INTC_IRQ_SENSE_VALID 0x10
-#define INTC_IRQ_SENSE(x) (x + INTC_IRQ_SENSE_VALID)
-
static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
- [IRQ_TYPE_LEVEL_LOW] = INTC_IRQ_SENSE(0x01),
- [IRQ_TYPE_LEVEL_HIGH] = INTC_IRQ_SENSE(0x02),
- [IRQ_TYPE_EDGE_FALLING] = INTC_IRQ_SENSE(0x04), /* Synchronous */
- [IRQ_TYPE_EDGE_RISING] = INTC_IRQ_SENSE(0x08), /* Synchronous */
- [IRQ_TYPE_EDGE_BOTH] = INTC_IRQ_SENSE(0x0c), /* Synchronous */
+ [IRQ_TYPE_LEVEL_LOW] = 0x01,
+ [IRQ_TYPE_LEVEL_HIGH] = 0x02,
+ [IRQ_TYPE_EDGE_FALLING] = 0x04, /* Synchronous */
+ [IRQ_TYPE_EDGE_RISING] = 0x08, /* Synchronous */
+ [IRQ_TYPE_EDGE_BOTH] = 0x0c, /* Synchronous */
};
static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
@@ -101,12 +98,12 @@ static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
irqc_dbg(&p->irq[hw_irq], "sense");
- if (!(value & INTC_IRQ_SENSE_VALID))
+ if (!value)
return -EINVAL;
tmp = ioread32(p->iomem + IRQC_CONFIG(hw_irq));
tmp &= ~0x3f;
- tmp |= value ^ INTC_IRQ_SENSE_VALID;
+ tmp |= value;
iowrite32(tmp, p->iomem + IRQC_CONFIG(hw_irq));
return 0;
}
@@ -212,10 +209,8 @@ static int irqc_probe(struct platform_device *pdev)
irq_chip->name = name;
irq_chip->irq_mask = irqc_irq_disable;
irq_chip->irq_unmask = irqc_irq_enable;
- irq_chip->irq_enable = irqc_irq_enable;
- irq_chip->irq_disable = irqc_irq_disable;
irq_chip->irq_set_type = irqc_irq_set_type;
- irq_chip->flags = IRQCHIP_SKIP_SET_WAKE;
+ irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
p->number_of_irqs,
diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c
index 4851afae38dc..3a070c587ed9 100644
--- a/drivers/irqchip/irq-sirfsoc.c
+++ b/drivers/irqchip/irq-sirfsoc.c
@@ -34,9 +34,10 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
struct irq_chip_type *ct;
int ret;
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ unsigned int set = IRQ_LEVEL;
ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc",
- handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
+ handle_level_irq, clr, set, IRQ_GC_INIT_MASK_CACHE);
gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start);
gc->reg_base = base;
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 47a52ab580d8..3ae2bb8d9cf2 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
@@ -167,8 +168,12 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
f->used_irqs++;
}
- pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs\n",
+ pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs",
fpga_irq_id, name, base, f->used_irqs);
+ if (parent_irq != -1)
+ pr_cont(", parent IRQ: %d\n", parent_irq);
+ else
+ pr_cont("\n");
fpga_irq_id++;
}
@@ -180,6 +185,7 @@ int __init fpga_irq_of_init(struct device_node *node,
void __iomem *base;
u32 clear_mask;
u32 valid_mask;
+ int parent_irq;
if (WARN_ON(!node))
return -ENODEV;
@@ -193,7 +199,12 @@ int __init fpga_irq_of_init(struct device_node *node,
if (of_property_read_u32(node, "valid-mask", &valid_mask))
valid_mask = 0;
- fpga_irq_init(base, node->name, 0, -1, valid_mask, node);
+ /* Some chips are cascaded from a parent IRQ */
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (!parent_irq)
+ parent_irq = -1;
+
+ fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
writel(clear_mask, base + IRQ_ENABLE_CLEAR);
writel(clear_mask, base + FIQ_ENABLE_CLEAR);
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
new file mode 100644
index 000000000000..f693f1bc1348
--- /dev/null
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -0,0 +1,164 @@
+/*
+ * Xtensa MX interrupt distributor
+ *
+ * Copyright (C) 2002 - 2013 Tensilica, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+
+#include <asm/mxregs.h>
+
+#include "irqchip.h"
+
+#define HW_IRQ_IPI_COUNT 2
+#define HW_IRQ_MX_BASE 2
+#define HW_IRQ_EXTERN_BASE 3
+
+static DEFINE_PER_CPU(unsigned int, cached_irq_mask);
+
+static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ if (hw < HW_IRQ_IPI_COUNT) {
+ struct irq_chip *irq_chip = d->host_data;
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_percpu_irq, "ipi");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ return 0;
+ }
+ return xtensa_irq_map(d, irq, hw);
+}
+
+/*
+ * Device Tree IRQ specifier translation function which works with one or
+ * two cell bindings. First cell value maps directly to the hwirq number.
+ * Second cell if present specifies whether hwirq number is external (1) or
+ * internal (0).
+ */
+static int xtensa_mx_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ return xtensa_irq_domain_xlate(intspec, intsize,
+ intspec[0], intspec[0] + HW_IRQ_EXTERN_BASE,
+ out_hwirq, out_type);
+}
+
+static const struct irq_domain_ops xtensa_mx_irq_domain_ops = {
+ .xlate = xtensa_mx_irq_domain_xlate,
+ .map = xtensa_mx_irq_map,
+};
+
+void secondary_init_irq(void)
+{
+ __this_cpu_write(cached_irq_mask,
+ XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL);
+ set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable);
+}
+
+static void xtensa_mx_irq_mask(struct irq_data *d)
+{
+ unsigned int mask = 1u << d->hwirq;
+
+ if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
+ set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
+ HW_IRQ_MX_BASE), MIENG);
+ } else {
+ mask = __this_cpu_read(cached_irq_mask) & ~mask;
+ __this_cpu_write(cached_irq_mask, mask);
+ set_sr(mask, intenable);
+ }
+}
+
+static void xtensa_mx_irq_unmask(struct irq_data *d)
+{
+ unsigned int mask = 1u << d->hwirq;
+
+ if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
+ set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
+ HW_IRQ_MX_BASE), MIENGSET);
+ } else {
+ mask |= __this_cpu_read(cached_irq_mask);
+ __this_cpu_write(cached_irq_mask, mask);
+ set_sr(mask, intenable);
+ }
+}
+
+static void xtensa_mx_irq_enable(struct irq_data *d)
+{
+ variant_irq_enable(d->hwirq);
+ xtensa_mx_irq_unmask(d);
+}
+
+static void xtensa_mx_irq_disable(struct irq_data *d)
+{
+ xtensa_mx_irq_mask(d);
+ variant_irq_disable(d->hwirq);
+}
+
+static void xtensa_mx_irq_ack(struct irq_data *d)
+{
+ set_sr(1 << d->hwirq, intclear);
+}
+
+static int xtensa_mx_irq_retrigger(struct irq_data *d)
+{
+ set_sr(1 << d->hwirq, intset);
+ return 1;
+}
+
+static int xtensa_mx_irq_set_affinity(struct irq_data *d,
+ const struct cpumask *dest, bool force)
+{
+ unsigned mask = 1u << cpumask_any(dest);
+
+ set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
+ return 0;
+
+}
+
+static struct irq_chip xtensa_mx_irq_chip = {
+ .name = "xtensa-mx",
+ .irq_enable = xtensa_mx_irq_enable,
+ .irq_disable = xtensa_mx_irq_disable,
+ .irq_mask = xtensa_mx_irq_mask,
+ .irq_unmask = xtensa_mx_irq_unmask,
+ .irq_ack = xtensa_mx_irq_ack,
+ .irq_retrigger = xtensa_mx_irq_retrigger,
+ .irq_set_affinity = xtensa_mx_irq_set_affinity,
+};
+
+int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
+{
+ struct irq_domain *root_domain =
+ irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
+ &xtensa_mx_irq_domain_ops,
+ &xtensa_mx_irq_chip);
+ irq_set_default_host(root_domain);
+ secondary_init_irq();
+ return 0;
+}
+
+static int __init xtensa_mx_init(struct device_node *np,
+ struct device_node *interrupt_parent)
+{
+ struct irq_domain *root_domain =
+ irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
+ &xtensa_mx_irq_chip);
+ irq_set_default_host(root_domain);
+ secondary_init_irq();
+ return 0;
+}
+IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
new file mode 100644
index 000000000000..7d71126d1ce5
--- /dev/null
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -0,0 +1,108 @@
+/*
+ * Xtensa built-in interrupt controller
+ *
+ * Copyright (C) 2002 - 2013 Tensilica, Inc.
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Kevin Chea
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+
+#include "irqchip.h"
+
+unsigned int cached_irq_mask;
+
+/*
+ * Device Tree IRQ specifier translation function which works with one or
+ * two cell bindings. First cell value maps directly to the hwirq number.
+ * Second cell if present specifies whether hwirq number is external (1) or
+ * internal (0).
+ */
+static int xtensa_pic_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ return xtensa_irq_domain_xlate(intspec, intsize,
+ intspec[0], intspec[0],
+ out_hwirq, out_type);
+}
+
+static const struct irq_domain_ops xtensa_irq_domain_ops = {
+ .xlate = xtensa_pic_irq_domain_xlate,
+ .map = xtensa_irq_map,
+};
+
+static void xtensa_irq_mask(struct irq_data *d)
+{
+ cached_irq_mask &= ~(1 << d->hwirq);
+ set_sr(cached_irq_mask, intenable);
+}
+
+static void xtensa_irq_unmask(struct irq_data *d)
+{
+ cached_irq_mask |= 1 << d->hwirq;
+ set_sr(cached_irq_mask, intenable);
+}
+
+static void xtensa_irq_enable(struct irq_data *d)
+{
+ variant_irq_enable(d->hwirq);
+ xtensa_irq_unmask(d);
+}
+
+static void xtensa_irq_disable(struct irq_data *d)
+{
+ xtensa_irq_mask(d);
+ variant_irq_disable(d->hwirq);
+}
+
+static void xtensa_irq_ack(struct irq_data *d)
+{
+ set_sr(1 << d->hwirq, intclear);
+}
+
+static int xtensa_irq_retrigger(struct irq_data *d)
+{
+ set_sr(1 << d->hwirq, intset);
+ return 1;
+}
+
+static struct irq_chip xtensa_irq_chip = {
+ .name = "xtensa",
+ .irq_enable = xtensa_irq_enable,
+ .irq_disable = xtensa_irq_disable,
+ .irq_mask = xtensa_irq_mask,
+ .irq_unmask = xtensa_irq_unmask,
+ .irq_ack = xtensa_irq_ack,
+ .irq_retrigger = xtensa_irq_retrigger,
+};
+
+int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
+{
+ struct irq_domain *root_domain =
+ irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
+ &xtensa_irq_domain_ops, &xtensa_irq_chip);
+ irq_set_default_host(root_domain);
+ return 0;
+}
+
+static int __init xtensa_pic_init(struct device_node *np,
+ struct device_node *interrupt_parent)
+{
+ struct irq_domain *root_domain =
+ irq_domain_add_linear(np, NR_IRQS, &xtensa_irq_domain_ops,
+ &xtensa_irq_chip);
+ irq_set_default_host(root_domain);
+ return 0;
+}
+IRQCHIP_DECLARE(xtensa_irq_chip, "cdns,xtensa-pic", xtensa_pic_init);
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
new file mode 100644
index 000000000000..8ed04c4a43ee
--- /dev/null
+++ b/drivers/irqchip/irq-zevio.c
@@ -0,0 +1,127 @@
+/*
+ * linux/drivers/irqchip/irq-zevio.c
+ *
+ * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/mach/irq.h>
+#include <asm/exception.h>
+
+#include "irqchip.h"
+
+#define IO_STATUS 0x000
+#define IO_RAW_STATUS 0x004
+#define IO_ENABLE 0x008
+#define IO_DISABLE 0x00C
+#define IO_CURRENT 0x020
+#define IO_RESET 0x028
+#define IO_MAX_PRIOTY 0x02C
+
+#define IO_IRQ_BASE 0x000
+#define IO_FIQ_BASE 0x100
+
+#define IO_INVERT_SEL 0x200
+#define IO_STICKY_SEL 0x204
+#define IO_PRIORITY_SEL 0x300
+
+#define MAX_INTRS 32
+#define FIQ_START MAX_INTRS
+
+static struct irq_domain *zevio_irq_domain;
+static void __iomem *zevio_irq_io;
+
+static void zevio_irq_ack(struct irq_data *irqd)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(irqd);
+ struct irq_chip_regs *regs =
+ &container_of(irqd->chip, struct irq_chip_type, chip)->regs;
+
+ readl(gc->reg_base + regs->ack);
+}
+
+static asmlinkage void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
+{
+ int irqnr;
+
+ while (readl(zevio_irq_io + IO_STATUS)) {
+ irqnr = readl(zevio_irq_io + IO_CURRENT);
+ irqnr = irq_find_mapping(zevio_irq_domain, irqnr);
+ handle_IRQ(irqnr, regs);
+ };
+}
+
+static void __init zevio_init_irq_base(void __iomem *base)
+{
+ /* Disable all interrupts */
+ writel(~0, base + IO_DISABLE);
+
+ /* Accept interrupts of all priorities */
+ writel(0xF, base + IO_MAX_PRIOTY);
+
+ /* Reset existing interrupts */
+ readl(base + IO_RESET);
+}
+
+static int __init zevio_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct irq_chip_generic *gc;
+ int ret;
+
+ if (WARN_ON(zevio_irq_io || zevio_irq_domain))
+ return -EBUSY;
+
+ zevio_irq_io = of_iomap(node, 0);
+ BUG_ON(!zevio_irq_io);
+
+ /* Do not invert interrupt status bits */
+ writel(~0, zevio_irq_io + IO_INVERT_SEL);
+
+ /* Disable sticky interrupts */
+ writel(0, zevio_irq_io + IO_STICKY_SEL);
+
+ /* We don't use IRQ priorities. Set each IRQ to highest priority. */
+ memset_io(zevio_irq_io + IO_PRIORITY_SEL, 0, MAX_INTRS * sizeof(u32));
+
+ /* Init IRQ and FIQ */
+ zevio_init_irq_base(zevio_irq_io + IO_IRQ_BASE);
+ zevio_init_irq_base(zevio_irq_io + IO_FIQ_BASE);
+
+ zevio_irq_domain = irq_domain_add_linear(node, MAX_INTRS,
+ &irq_generic_chip_ops, NULL);
+ BUG_ON(!zevio_irq_domain);
+
+ ret = irq_alloc_domain_generic_chips(zevio_irq_domain, MAX_INTRS, 1,
+ "zevio_intc", handle_level_irq,
+ clr, 0, IRQ_GC_INIT_MASK_CACHE);
+ BUG_ON(ret);
+
+ gc = irq_get_domain_generic_chip(zevio_irq_domain, 0);
+ gc->reg_base = zevio_irq_io;
+ gc->chip_types[0].chip.irq_ack = zevio_irq_ack;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
+ gc->chip_types[0].regs.mask = IO_IRQ_BASE + IO_ENABLE;
+ gc->chip_types[0].regs.enable = IO_IRQ_BASE + IO_ENABLE;
+ gc->chip_types[0].regs.disable = IO_IRQ_BASE + IO_DISABLE;
+ gc->chip_types[0].regs.ack = IO_IRQ_BASE + IO_RESET;
+
+ set_handle_irq(zevio_handle_irq);
+
+ pr_info("TI-NSPIRE classic IRQ controller\n");
+ return 0;
+}
+
+IRQCHIP_DECLARE(zevio_irq, "lsi,zevio-intc", zevio_of_init);
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 497bd026c237..4a4825528188 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1643,10 +1643,6 @@ setup_hfcpci(struct IsdnCard *card)
int i;
struct pci_dev *tmp_hfcpci = NULL;
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
-
strcpy(tmp, hfcpci_revision);
printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index af1b020a81f1..b420f8bd862e 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -810,7 +810,7 @@ prfeatureind(char *dest, u_char *p)
dp += sprintf(dp, " octet 3 ");
dp += prbits(dp, *p, 8, 8);
*dp++ = '\n';
- if (!(*p++ & 80)) {
+ if (!(*p++ & 0x80)) {
dp += sprintf(dp, " octet 4 ");
dp += prbits(dp, *p++, 8, 8);
*dp++ = '\n';
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c
index f6ab63aa6995..33eeb4602c7e 100644
--- a/drivers/isdn/hisax/telespci.c
+++ b/drivers/isdn/hisax/telespci.c
@@ -290,10 +290,6 @@ int setup_telespci(struct IsdnCard *card)
struct IsdnCardState *cs = card->cs;
char tmp[64];
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
-
strcpy(tmp, telespci_revision);
printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp));
if (cs->typ != ISDN_CTYPE_TELESPCI)
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 8b98d53d9976..d9aebbc510cc 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1371,7 +1371,7 @@ isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev)
eth = eth_hdr(skb);
if (*eth->h_dest & 1) {
- if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
+ if (ether_addr_equal(eth->h_dest, dev->broadcast))
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
@@ -1382,7 +1382,7 @@ isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev)
*/
else if (dev->flags & (IFF_PROMISC /*| IFF_ALLMULTI*/)) {
- if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
+ if (!ether_addr_equal(eth->h_dest, dev->dev_addr))
skb->pkt_type = PACKET_OTHERHOST;
}
if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 5cefb479c707..1be82284cf9d 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -135,7 +135,7 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
return err;
if (msg->msg_name) {
- struct sockaddr_mISDN *maddr = msg->msg_name;
+ DECLARE_SOCKADDR(struct sockaddr_mISDN *, maddr, msg->msg_name);
maddr->family = AF_ISDN;
maddr->dev = _pms(sk)->dev->id;
@@ -179,7 +179,6 @@ mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct sock *sk = sock->sk;
struct sk_buff *skb;
int err = -ENOMEM;
- struct sockaddr_mISDN *maddr;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s: len %d flags %x ch %d proto %x\n",
@@ -214,7 +213,7 @@ mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) {
/* if we have a address, we use it */
- maddr = (struct sockaddr_mISDN *)msg->msg_name;
+ DECLARE_SOCKADDR(struct sockaddr_mISDN *, maddr, msg->msg_name);
mISDN_HEAD_ID(skb) = maddr->channel;
} else { /* use default for L2 messages */
if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
diff --git a/drivers/isdn/sc/event.c b/drivers/isdn/sc/event.c
index 717003a3bdf4..833d96c2cf92 100644
--- a/drivers/isdn/sc/event.c
+++ b/drivers/isdn/sc/event.c
@@ -57,7 +57,7 @@ int indicate_status(int card, int event, ulong Channel, char *Data)
memcpy(&cmd.parm.setup, Data, sizeof(cmd.parm.setup));
break;
default:
- strcpy(cmd.parm.num, Data);
+ strlcpy(cmd.parm.num, Data, sizeof(cmd.parm.num));
}
}
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 3c972b2f9893..e387f41a9cb7 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -242,18 +242,14 @@ EXPORT_SYMBOL_GPL(led_trigger_unregister);
void led_trigger_event(struct led_trigger *trig,
enum led_brightness brightness)
{
- struct list_head *entry;
+ struct led_classdev *led_cdev;
if (!trig)
return;
read_lock(&trig->leddev_list_lock);
- list_for_each(entry, &trig->led_cdevs) {
- struct led_classdev *led_cdev;
-
- led_cdev = list_entry(entry, struct led_classdev, trig_list);
+ list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list)
led_set_brightness(led_cdev, brightness);
- }
read_unlock(&trig->leddev_list_lock);
}
EXPORT_SYMBOL_GPL(led_trigger_event);
@@ -264,16 +260,13 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
int oneshot,
int invert)
{
- struct list_head *entry;
+ struct led_classdev *led_cdev;
if (!trig)
return;
read_lock(&trig->leddev_list_lock);
- list_for_each(entry, &trig->led_cdevs) {
- struct led_classdev *led_cdev;
-
- led_cdev = list_entry(entry, struct led_classdev, trig_list);
+ list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) {
if (oneshot)
led_blink_set_oneshot(led_cdev, delay_on, delay_off,
invert);
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 05188351711d..2ec34cfcedce 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -152,12 +152,26 @@ static void lp5521_load_engine(struct lp55xx_chip *chip)
lp5521_wait_opmode_done();
}
-static void lp5521_stop_engine(struct lp55xx_chip *chip)
+static void lp5521_stop_all_engines(struct lp55xx_chip *chip)
{
lp55xx_write(chip, LP5521_REG_OP_MODE, 0);
lp5521_wait_opmode_done();
}
+static void lp5521_stop_engine(struct lp55xx_chip *chip)
+{
+ enum lp55xx_engine_index idx = chip->engine_idx;
+ u8 mask[] = {
+ [LP55XX_ENGINE_1] = LP5521_MODE_R_M,
+ [LP55XX_ENGINE_2] = LP5521_MODE_G_M,
+ [LP55XX_ENGINE_3] = LP5521_MODE_B_M,
+ };
+
+ lp55xx_update_bits(chip, LP5521_REG_OP_MODE, mask[idx], 0);
+
+ lp5521_wait_opmode_done();
+}
+
static void lp5521_run_engine(struct lp55xx_chip *chip, bool start)
{
int ret;
@@ -244,18 +258,12 @@ static int lp5521_update_program_memory(struct lp55xx_chip *chip,
if (i % 2)
goto err;
- mutex_lock(&chip->lock);
-
for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) {
ret = lp55xx_write(chip, addr[idx] + i, pattern[i]);
- if (ret) {
- mutex_unlock(&chip->lock);
+ if (ret)
return -EINVAL;
- }
}
- mutex_unlock(&chip->lock);
-
return size;
err:
@@ -427,15 +435,17 @@ static ssize_t store_engine_load(struct device *dev,
{
struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
struct lp55xx_chip *chip = led->chip;
+ int ret;
mutex_lock(&chip->lock);
chip->engine_idx = nr;
lp5521_load_engine(chip);
+ ret = lp5521_update_program_memory(chip, buf, len);
mutex_unlock(&chip->lock);
- return lp5521_update_program_memory(chip, buf, len);
+ return ret;
}
store_load(1)
store_load(2)
@@ -568,7 +578,7 @@ static int lp5521_remove(struct i2c_client *client)
struct lp55xx_led *led = i2c_get_clientdata(client);
struct lp55xx_chip *chip = led->chip;
- lp5521_stop_engine(chip);
+ lp5521_stop_all_engines(chip);
lp55xx_unregister_sysfs(chip);
lp55xx_unregister_leds(led, chip);
lp55xx_deinit_device(chip);
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 2f97651c6224..4ade66a2d9d4 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -195,12 +195,26 @@ static void lp5523_load_engine_and_select_page(struct lp55xx_chip *chip)
lp55xx_write(chip, LP5523_REG_PROG_PAGE_SEL, page_sel[idx]);
}
-static void lp5523_stop_engine(struct lp55xx_chip *chip)
+static void lp5523_stop_all_engines(struct lp55xx_chip *chip)
{
lp55xx_write(chip, LP5523_REG_OP_MODE, 0);
lp5523_wait_opmode_done();
}
+static void lp5523_stop_engine(struct lp55xx_chip *chip)
+{
+ enum lp55xx_engine_index idx = chip->engine_idx;
+ u8 mask[] = {
+ [LP55XX_ENGINE_1] = LP5523_MODE_ENG1_M,
+ [LP55XX_ENGINE_2] = LP5523_MODE_ENG2_M,
+ [LP55XX_ENGINE_3] = LP5523_MODE_ENG3_M,
+ };
+
+ lp55xx_update_bits(chip, LP5523_REG_OP_MODE, mask[idx], 0);
+
+ lp5523_wait_opmode_done();
+}
+
static void lp5523_turn_off_channels(struct lp55xx_chip *chip)
{
int i;
@@ -311,7 +325,7 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
}
out:
- lp5523_stop_engine(chip);
+ lp5523_stop_all_engines(chip);
return ret;
}
@@ -345,18 +359,12 @@ static int lp5523_update_program_memory(struct lp55xx_chip *chip,
if (i % 2)
goto err;
- mutex_lock(&chip->lock);
-
for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) {
ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]);
- if (ret) {
- mutex_unlock(&chip->lock);
+ if (ret)
return -EINVAL;
- }
}
- mutex_unlock(&chip->lock);
-
return size;
err:
@@ -556,15 +564,17 @@ static ssize_t store_engine_load(struct device *dev,
{
struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
struct lp55xx_chip *chip = led->chip;
+ int ret;
mutex_lock(&chip->lock);
chip->engine_idx = nr;
lp5523_load_engine_and_select_page(chip);
+ ret = lp5523_update_program_memory(chip, buf, len);
mutex_unlock(&chip->lock);
- return lp5523_update_program_memory(chip, buf, len);
+ return ret;
}
store_load(1)
store_load(2)
@@ -786,7 +796,7 @@ static int lp5523_remove(struct i2c_client *client)
struct lp55xx_led *led = i2c_get_clientdata(client);
struct lp55xx_chip *chip = led->chip;
- lp5523_stop_engine(chip);
+ lp5523_stop_all_engines(chip);
lp55xx_unregister_sysfs(chip);
lp55xx_unregister_leds(led, chip);
lp55xx_deinit_device(chip);
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index 9acc6bb7deef..88317b4f7bf3 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -210,6 +210,7 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
{
struct lp55xx_chip *chip = context;
struct device *dev = &chip->cl->dev;
+ enum lp55xx_engine_index idx = chip->engine_idx;
if (!fw) {
dev_err(dev, "firmware request failed\n");
@@ -219,6 +220,7 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
/* handling firmware data is chip dependent */
mutex_lock(&chip->lock);
+ chip->engines[idx - 1].mode = LP55XX_ENGINE_LOAD;
chip->fw = fw;
if (chip->cfg->firmware_cb)
chip->cfg->firmware_cb(chip);
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index fa9b439323bd..ca87a1b4a0db 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -117,9 +117,7 @@ static void mc13xxx_led_work(struct work_struct *work)
BUG();
}
- mc13xxx_lock(led->master);
mc13xxx_reg_rmw(led->master, reg, mask << shift, value << shift);
- mc13xxx_unlock(led->master);
}
static void mc13xxx_led_set(struct led_classdev *led_cdev,
@@ -132,75 +130,6 @@ static void mc13xxx_led_set(struct led_classdev *led_cdev,
schedule_work(&led->work);
}
-static int __init mc13xxx_led_setup(struct mc13xxx_led *led, int max_current)
-{
- int shift, mask, reg, ret, bank;
-
- switch (led->id) {
- case MC13783_LED_MD:
- reg = MC13XXX_REG_LED_CONTROL(2);
- shift = 0;
- mask = 0x07;
- break;
- case MC13783_LED_AD:
- reg = MC13XXX_REG_LED_CONTROL(2);
- shift = 3;
- mask = 0x07;
- break;
- case MC13783_LED_KP:
- reg = MC13XXX_REG_LED_CONTROL(2);
- shift = 6;
- mask = 0x07;
- break;
- case MC13783_LED_R1:
- case MC13783_LED_G1:
- case MC13783_LED_B1:
- case MC13783_LED_R2:
- case MC13783_LED_G2:
- case MC13783_LED_B2:
- case MC13783_LED_R3:
- case MC13783_LED_G3:
- case MC13783_LED_B3:
- bank = (led->id - MC13783_LED_R1) / 3;
- reg = MC13XXX_REG_LED_CONTROL(3) + bank;
- shift = ((led->id - MC13783_LED_R1) - bank * 3) * 2;
- mask = 0x03;
- break;
- case MC13892_LED_MD:
- reg = MC13XXX_REG_LED_CONTROL(0);
- shift = 9;
- mask = 0x07;
- break;
- case MC13892_LED_AD:
- reg = MC13XXX_REG_LED_CONTROL(0);
- shift = 21;
- mask = 0x07;
- break;
- case MC13892_LED_KP:
- reg = MC13XXX_REG_LED_CONTROL(1);
- shift = 9;
- mask = 0x07;
- break;
- case MC13892_LED_R:
- case MC13892_LED_G:
- case MC13892_LED_B:
- bank = (led->id - MC13892_LED_R) / 2;
- reg = MC13XXX_REG_LED_CONTROL(2) + bank;
- shift = ((led->id - MC13892_LED_R) - bank * 2) * 12 + 9;
- mask = 0x07;
- break;
- default:
- BUG();
- }
-
- mc13xxx_lock(led->master);
- ret = mc13xxx_reg_rmw(led->master, reg, mask << shift,
- max_current << shift);
- mc13xxx_unlock(led->master);
-
- return ret;
-}
-
static int __init mc13xxx_led_probe(struct platform_device *pdev)
{
struct mc13xxx_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -233,31 +162,22 @@ static int __init mc13xxx_led_probe(struct platform_device *pdev)
leds->num_leds = num_leds;
platform_set_drvdata(pdev, leds);
- mc13xxx_lock(mcdev);
for (i = 0; i < devtype->num_regs; i++) {
reg = pdata->led_control[i];
WARN_ON(reg >= (1 << 24));
ret = mc13xxx_reg_write(mcdev, MC13XXX_REG_LED_CONTROL(i), reg);
if (ret)
- break;
- }
- mc13xxx_unlock(mcdev);
-
- if (ret) {
- dev_err(&pdev->dev, "Unable to init LED driver\n");
- return ret;
+ return ret;
}
for (i = 0; i < num_leds; i++) {
const char *name, *trig;
- char max_current;
ret = -EINVAL;
id = pdata->led[i].id;
name = pdata->led[i].name;
trig = pdata->led[i].default_trigger;
- max_current = pdata->led[i].max_current;
if ((id > devtype->led_max) || (id < devtype->led_min)) {
dev_err(&pdev->dev, "Invalid ID %i\n", id);
@@ -280,11 +200,6 @@ static int __init mc13xxx_led_probe(struct platform_device *pdev)
INIT_WORK(&leds->led[i].work, mc13xxx_led_work);
- ret = mc13xxx_led_setup(&leds->led[i], max_current);
- if (ret) {
- dev_err(&pdev->dev, "Unable to setup LED %i\n", id);
- break;
- }
ret = led_classdev_register(pdev->dev.parent,
&leds->led[i].cdev);
if (ret) {
@@ -313,10 +228,8 @@ static int mc13xxx_led_remove(struct platform_device *pdev)
cancel_work_sync(&leds->led[i].work);
}
- mc13xxx_lock(mcdev);
for (i = 0; i < leds->devtype->num_regs; i++)
mc13xxx_reg_write(mcdev, MC13XXX_REG_LED_CONTROL(i), 0);
- mc13xxx_unlock(mcdev);
return 0;
}
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index b31d8e99c419..605047428b5a 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -66,9 +66,11 @@ static void led_pwm_set(struct led_classdev *led_cdev,
struct led_pwm_data *led_dat =
container_of(led_cdev, struct led_pwm_data, cdev);
unsigned int max = led_dat->cdev.max_brightness;
- unsigned int period = led_dat->period;
+ unsigned long long duty = led_dat->period;
- led_dat->duty = brightness * period / max;
+ duty *= brightness;
+ do_div(duty, max);
+ led_dat->duty = duty;
if (led_dat->can_sleep)
schedule_work(&led_dat->work);
@@ -85,11 +87,10 @@ static inline size_t sizeof_pwm_leds_priv(int num_leds)
static int led_pwm_create_of(struct platform_device *pdev,
struct led_pwm_priv *priv)
{
- struct device_node *node = pdev->dev.of_node;
struct device_node *child;
int ret;
- for_each_child_of_node(node, child) {
+ for_each_child_of_node(pdev->dev.of_node, child) {
struct led_pwm_data *led_dat = &priv->leds[priv->num_leds];
led_dat->cdev.name = of_get_property(child, "label",
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index 76483fb5ee45..98174e7240ee 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -18,10 +18,10 @@
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/platform_data/leds-s3c24xx.h>
-#include <mach/hardware.h>
#include <mach/regs-gpio.h>
-#include <linux/platform_data/leds-s3c24xx.h>
+#include <plat/gpio-cfg.h>
/* our context */
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 8cc304f36728..3d9e267a56c4 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -4,77 +4,87 @@
* The TCA6507 is a programmable LED controller that can drive 7
* separate lines either by holding them low, or by pulsing them
* with modulated width.
- * The modulation can be varied in a simple pattern to produce a blink or
- * double-blink.
+ * The modulation can be varied in a simple pattern to produce a
+ * blink or double-blink.
*
- * This driver can configure each line either as a 'GPIO' which is out-only
- * (no pull-up) or as an LED with variable brightness and hardware-assisted
- * blinking.
+ * This driver can configure each line either as a 'GPIO' which is
+ * out-only (pull-up resistor required) or as an LED with variable
+ * brightness and hardware-assisted blinking.
*
- * Apart from OFF and ON there are three programmable brightness levels which
- * can be programmed from 0 to 15 and indicate how many 500usec intervals in
- * each 8msec that the led is 'on'. The levels are named MASTER, BANK0 and
- * BANK1.
+ * Apart from OFF and ON there are three programmable brightness
+ * levels which can be programmed from 0 to 15 and indicate how many
+ * 500usec intervals in each 8msec that the led is 'on'. The levels
+ * are named MASTER, BANK0 and BANK1.
*
- * There are two different blink rates that can be programmed, each with
- * separate time for rise, on, fall, off and second-off. Thus if 3 or more
- * different non-trivial rates are required, software must be used for the extra
- * rates. The two different blink rates must align with the two levels BANK0 and
- * BANK1.
- * This driver does not support double-blink so 'second-off' always matches
- * 'off'.
+ * There are two different blink rates that can be programmed, each
+ * with separate time for rise, on, fall, off and second-off. Thus if
+ * 3 or more different non-trivial rates are required, software must
+ * be used for the extra rates. The two different blink rates must
+ * align with the two levels BANK0 and BANK1. This driver does not
+ * support double-blink so 'second-off' always matches 'off'.
*
- * Only 16 different times can be programmed in a roughly logarithmic scale from
- * 64ms to 16320ms. To be precise the possible times are:
+ * Only 16 different times can be programmed in a roughly logarithmic
+ * scale from 64ms to 16320ms. To be precise the possible times are:
* 0, 64, 128, 192, 256, 384, 512, 768,
* 1024, 1536, 2048, 3072, 4096, 5760, 8128, 16320
*
- * Times that cannot be closely matched with these must be
- * handled in software. This driver allows 12.5% error in matching.
+ * Times that cannot be closely matched with these must be handled in
+ * software. This driver allows 12.5% error in matching.
*
- * This driver does not allow rise/fall rates to be set explicitly. When trying
- * to match a given 'on' or 'off' period, an appropriate pair of 'change' and
- * 'hold' times are chosen to get a close match. If the target delay is even,
- * the 'change' number will be the smaller; if odd, the 'hold' number will be
- * the smaller.
-
- * Choosing pairs of delays with 12.5% errors allows us to match delays in the
- * ranges: 56-72, 112-144, 168-216, 224-27504, 28560-36720.
- * 26% of the achievable sums can be matched by multiple pairings. For example
- * 1536 == 1536+0, 1024+512, or 768+768. This driver will always choose the
- * pairing with the least maximum - 768+768 in this case. Other pairings are
- * not available.
+ * This driver does not allow rise/fall rates to be set explicitly.
+ * When trying to match a given 'on' or 'off' period, an appropriate
+ * pair of 'change' and 'hold' times are chosen to get a close match.
+ * If the target delay is even, the 'change' number will be the
+ * smaller; if odd, the 'hold' number will be the smaller.
+
+ * Choosing pairs of delays with 12.5% errors allows us to match
+ * delays in the ranges: 56-72, 112-144, 168-216, 224-27504,
+ * 28560-36720.
+ * 26% of the achievable sums can be matched by multiple pairings.
+ * For example 1536 == 1536+0, 1024+512, or 768+768.
+ * This driver will always choose the pairing with the least
+ * maximum - 768+768 in this case. Other pairings are not available.
*
- * Access to the 3 levels and 2 blinks are on a first-come, first-served basis.
- * Access can be shared by multiple leds if they have the same level and
- * either same blink rates, or some don't blink.
- * When a led changes, it relinquishes access and tries again, so it might
- * lose access to hardware blink.
- * If a blink engine cannot be allocated, software blink is used.
- * If the desired brightness cannot be allocated, the closest available non-zero
- * brightness is used. As 'full' is always available, the worst case would be
- * to have two different blink rates at '1', with Max at '2', then other leds
- * will have to choose between '2' and '16'. Hopefully this is not likely.
+ * Access to the 3 levels and 2 blinks are on a first-come,
+ * first-served basis. Access can be shared by multiple leds if they
+ * have the same level and either same blink rates, or some don't
+ * blink. When a led changes, it relinquishes access and tries again,
+ * so it might lose access to hardware blink.
*
- * Each bank (BANK0 and BANK1) has two usage counts - LEDs using the brightness
- * and LEDs using the blink. It can only be reprogrammed when the appropriate
- * counter is zero. The MASTER level has a single usage count.
+ * If a blink engine cannot be allocated, software blink is used. If
+ * the desired brightness cannot be allocated, the closest available
+ * non-zero brightness is used. As 'full' is always available, the
+ * worst case would be to have two different blink rates at '1', with
+ * Max at '2', then other leds will have to choose between '2' and
+ * '16'. Hopefully this is not likely.
*
- * Each Led has programmable 'on' and 'off' time as milliseconds. With each
- * there is a flag saying if it was explicitly requested or defaulted.
- * Similarly the banks know if each time was explicit or a default. Defaults
- * are permitted to be changed freely - they are not recognised when matching.
+ * Each bank (BANK0 and BANK1) has two usage counts - LEDs using the
+ * brightness and LEDs using the blink. It can only be reprogrammed
+ * when the appropriate counter is zero. The MASTER level has a
+ * single usage count.
*
+ * Each LED has programmable 'on' and 'off' time as milliseconds.
+ * With each there is a flag saying if it was explicitly requested or
+ * defaulted. Similarly the banks know if each time was explicit or a
+ * default. Defaults are permitted to be changed freely - they are
+ * not recognised when matching.
*
- * An led-tca6507 device must be provided with platform data. This data
- * lists for each output: the name, default trigger, and whether the signal
- * is being used as a GPiO rather than an led. 'struct led_plaform_data'
- * is used for this. If 'name' is NULL, the output isn't used. If 'flags'
- * is TCA6507_MAKE_CPIO, the output is a GPO.
- * The "struct led_platform_data" can be embedded in a
- * "struct tca6507_platform_data" which adds a 'gpio_base' for the GPiOs,
- * and a 'setup' callback which is called once the GPiOs are available.
*
+ * An led-tca6507 device must be provided with platform data or
+ * configured via devicetree.
+ *
+ * The platform-data lists for each output: the name, default trigger,
+ * and whether the signal is being used as a GPIO rather than an LED.
+ * 'struct led_plaform_data' is used for this. If 'name' is NULL, the
+ * output isn't used. If 'flags' is TCA6507_MAKE_GPIO, the output is
+ * a GPO. The "struct led_platform_data" can be embedded in a "struct
+ * tca6507_platform_data" which adds a 'gpio_base' for the GPIOs, and
+ * a 'setup' callback which is called once the GPIOs are available.
+ *
+ * When configured via devicetree there is one child for each output.
+ * The "reg" determines the output number and "compatible" determines
+ * whether it is an LED or a GPIO. "linux,default-trigger" can set a
+ * default trigger.
*/
#include <linux/module.h>
@@ -192,17 +202,18 @@ MODULE_DEVICE_TABLE(i2c, tca6507_id);
static int choose_times(int msec, int *c1p, int *c2p)
{
/*
- * Choose two timecodes which add to 'msec' as near as possible.
- * The first returned is the 'on' or 'off' time. The second is to be
- * used as a 'fade-on' or 'fade-off' time. If 'msec' is even,
- * the first will not be smaller than the second. If 'msec' is odd,
- * the first will not be larger than the second.
- * If we cannot get a sum within 1/8 of 'msec' fail with -EINVAL,
- * otherwise return the sum that was achieved, plus 1 if the first is
- * smaller.
- * If two possibilities are equally good (e.g. 512+0, 256+256), choose
- * the first pair so there is more change-time visible (i.e. it is
- * softer).
+ * Choose two timecodes which add to 'msec' as near as
+ * possible. The first returned is the 'on' or 'off' time.
+ * The second is to be used as a 'fade-on' or 'fade-off' time.
+ * If 'msec' is even, the first will not be smaller than the
+ * second. If 'msec' is odd, the first will not be larger
+ * than the second.
+ * If we cannot get a sum within 1/8 of 'msec' fail with
+ * -EINVAL, otherwise return the sum that was achieved, plus 1
+ * if the first is smaller.
+ * If two possibilities are equally good (e.g. 512+0,
+ * 256+256), choose the first pair so there is more
+ * change-time visible (i.e. it is softer).
*/
int c1, c2;
int tmax = msec * 9 / 8;
@@ -255,8 +266,8 @@ static int choose_times(int msec, int *c1p, int *c2p)
}
/*
- * Update the register file with the appropriate 3-bit state for
- * the given led.
+ * Update the register file with the appropriate 3-bit state for the
+ * given led.
*/
static void set_select(struct tca6507_chip *tca, int led, int val)
{
@@ -274,9 +285,9 @@ static void set_select(struct tca6507_chip *tca, int led, int val)
}
}
-/* Update the register file with the appropriate 4-bit code for
- * one bank or other. This can be used for timers, for levels, or
- * for initialisation.
+/* Update the register file with the appropriate 4-bit code for one
+ * bank or other. This can be used for timers, for levels, or for
+ * initialization.
*/
static void set_code(struct tca6507_chip *tca, int reg, int bank, int new)
{
@@ -309,7 +320,7 @@ static void set_level(struct tca6507_chip *tca, int bank, int level)
tca->bank[bank].level = level;
}
-/* Record all relevant time code for a given bank */
+/* Record all relevant time codes for a given bank */
static void set_times(struct tca6507_chip *tca, int bank)
{
int c1, c2;
@@ -317,7 +328,8 @@ static void set_times(struct tca6507_chip *tca, int bank)
result = choose_times(tca->bank[bank].ontime, &c1, &c2);
dev_dbg(&tca->client->dev,
- "Chose on times %d(%d) %d(%d) for %dms\n", c1, time_codes[c1],
+ "Chose on times %d(%d) %d(%d) for %dms\n",
+ c1, time_codes[c1],
c2, time_codes[c2], tca->bank[bank].ontime);
set_code(tca, TCA6507_FADE_ON, bank, c2);
set_code(tca, TCA6507_FULL_ON, bank, c1);
@@ -325,7 +337,8 @@ static void set_times(struct tca6507_chip *tca, int bank)
result = choose_times(tca->bank[bank].offtime, &c1, &c2);
dev_dbg(&tca->client->dev,
- "Chose off times %d(%d) %d(%d) for %dms\n", c1, time_codes[c1],
+ "Chose off times %d(%d) %d(%d) for %dms\n",
+ c1, time_codes[c1],
c2, time_codes[c2], tca->bank[bank].offtime);
set_code(tca, TCA6507_FADE_OFF, bank, c2);
set_code(tca, TCA6507_FIRST_OFF, bank, c1);
@@ -373,7 +386,8 @@ static void led_release(struct tca6507_led *led)
static int led_prepare(struct tca6507_led *led)
{
- /* Assign this led to a bank, configuring that bank if necessary. */
+ /* Assign this led to a bank, configuring that bank if
+ * necessary. */
int level = TO_LEVEL(led->led_cdev.brightness);
struct tca6507_chip *tca = led->chip;
int c1, c2;
@@ -389,10 +403,10 @@ static int led_prepare(struct tca6507_led *led)
if (led->ontime == 0 || led->offtime == 0) {
/*
- * Just set the brightness, choosing first usable bank.
- * If none perfect, choose best.
- * Count backwards so we check MASTER bank first
- * to avoid wasting a timer.
+ * Just set the brightness, choosing first usable
+ * bank. If none perfect, choose best. Count
+ * backwards so we check MASTER bank first to avoid
+ * wasting a timer.
*/
int best = -1;/* full-on */
int diff = 15-level;
@@ -433,9 +447,9 @@ static int led_prepare(struct tca6507_led *led)
}
/*
- * We have on/off time so we need to try to allocate a timing bank.
- * First check if times are compatible with hardware and give up if
- * not.
+ * We have on/off time so we need to try to allocate a timing
+ * bank. First check if times are compatible with hardware
+ * and give up if not.
*/
if (choose_times(led->ontime, &c1, &c2) < 0)
return -EINVAL;
@@ -523,8 +537,8 @@ static int led_assign(struct tca6507_led *led)
err = led_prepare(led);
if (err) {
/*
- * Can only fail on timer setup. In that case we need to
- * re-establish as steady level.
+ * Can only fail on timer setup. In that case we need
+ * to re-establish as steady level.
*/
led->ontime = 0;
led->offtime = 0;
@@ -594,8 +608,8 @@ static void tca6507_gpio_set_value(struct gpio_chip *gc,
spin_lock_irqsave(&tca->lock, flags);
/*
- * 'OFF' is floating high, and 'ON' is pulled down, so it has the
- * inverse sense of 'val'.
+ * 'OFF' is floating high, and 'ON' is pulled down, so it has
+ * the inverse sense of 'val'.
*/
set_select(tca, tca->gpio_map[offset],
val ? TCA6507_LS_LED_OFF : TCA6507_LS_LED_ON);
@@ -638,6 +652,9 @@ static int tca6507_probe_gpios(struct i2c_client *client,
tca->gpio.direction_output = tca6507_gpio_direction_output;
tca->gpio.set = tca6507_gpio_set_value;
tca->gpio.dev = &client->dev;
+#ifdef CONFIG_OF_GPIO
+ tca->gpio.of_node = of_node_get(client->dev.of_node);
+#endif
err = gpiochip_add(&tca->gpio);
if (err) {
tca->gpio.ngpio = 0;
@@ -682,7 +699,7 @@ tca6507_led_dt_init(struct i2c_client *client)
return ERR_PTR(-ENODEV);
tca_leds = devm_kzalloc(&client->dev,
- sizeof(struct led_info) * count, GFP_KERNEL);
+ sizeof(struct led_info) * NUM_LEDS, GFP_KERNEL);
if (!tca_leds)
return ERR_PTR(-ENOMEM);
@@ -695,9 +712,11 @@ tca6507_led_dt_init(struct i2c_client *client)
of_get_property(child, "label", NULL) ? : child->name;
led.default_trigger =
of_get_property(child, "linux,default-trigger", NULL);
-
+ led.flags = 0;
+ if (of_property_match_string(child, "compatible", "gpio") >= 0)
+ led.flags |= TCA6507_MAKE_GPIO;
ret = of_property_read_u32(child, "reg", &reg);
- if (ret != 0)
+ if (ret != 0 || reg < 0 || reg >= NUM_LEDS)
continue;
tca_leds[reg] = led;
@@ -708,8 +727,10 @@ tca6507_led_dt_init(struct i2c_client *client)
return ERR_PTR(-ENOMEM);
pdata->leds.leds = tca_leds;
- pdata->leds.num_leds = count;
-
+ pdata->leds.num_leds = NUM_LEDS;
+#ifdef CONFIG_GPIOLIB
+ pdata->gpio_base = -1;
+#endif
return pdata;
}
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index d26a312f117a..3067d56b11a6 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -32,7 +32,7 @@ config ADB_MACII
config ADB_MACIISI
bool "Include Mac IIsi ADB driver"
- depends on ADB && MAC
+ depends on ADB && MAC && BROKEN
help
Say Y here if want your kernel to support Macintosh systems that use
the Mac IIsi style ADB. This includes the IIsi, IIvi, IIvx, Classic
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index 9ef32b3df91f..590214ba736c 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -133,7 +133,7 @@ static int wf_lm75_probe(struct i2c_client *client,
lm->inited = 0;
lm->ds1775 = ds1775;
lm->i2c = client;
- lm->sens.name = (char *)name; /* XXX fix constness in structure */
+ lm->sens.name = name;
lm->sens.ops = &wf_lm75_ops;
i2c_set_clientdata(client, lm);
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index 945a25b2f31e..87e439b10318 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -95,7 +95,7 @@ static int wf_max6690_probe(struct i2c_client *client,
}
max->i2c = client;
- max->sens.name = (char *)name; /* XXX fix constness in structure */
+ max->sens.name = name;
max->sens.ops = &wf_max6690_ops;
i2c_set_clientdata(client, max);
diff --git a/drivers/mailbox/omap-mbox.h b/drivers/mailbox/omap-mbox.h
index 6cd38fc68599..86d7518cd13b 100644
--- a/drivers/mailbox/omap-mbox.h
+++ b/drivers/mailbox/omap-mbox.h
@@ -52,7 +52,7 @@ struct omap_mbox_queue {
struct omap_mbox {
const char *name;
- unsigned int irq;
+ int irq;
struct omap_mbox_queue *txq, *rxq;
struct omap_mbox_ops *ops;
struct device *dev;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index f2ccbc3b9fe4..9a06fe883766 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -176,8 +176,12 @@ config MD_FAULTY
source "drivers/md/bcache/Kconfig"
+config BLK_DEV_DM_BUILTIN
+ boolean
+
config BLK_DEV_DM
tristate "Device mapper support"
+ select BLK_DEV_DM_BUILTIN
---help---
Device-mapper is a low level volume manager. It works by allowing
people to specify mappings for ranges of logical sectors. Various
@@ -238,6 +242,7 @@ config DM_CRYPT
config DM_SNAPSHOT
tristate "Snapshot target"
depends on BLK_DEV_DM
+ select DM_BUFIO
---help---
Allow volume managers to take writable snapshots of a device.
@@ -250,12 +255,12 @@ config DM_THIN_PROVISIONING
Provides thin provisioning and snapshots that share a data store.
config DM_DEBUG_BLOCK_STACK_TRACING
- boolean "Keep stack trace of thin provisioning block lock holders"
- depends on STACKTRACE_SUPPORT && DM_THIN_PROVISIONING
+ boolean "Keep stack trace of persistent data block lock holders"
+ depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA
select STACKTRACE
---help---
Enable this for messages that may help debug problems with the
- block manager locking used by thin provisioning.
+ block manager locking used by thin provisioning and caching.
If unsure, say N.
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 2acc43fe0229..f26d83292579 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_MD_FAULTY) += faulty.o
obj-$(CONFIG_BCACHE) += bcache/
obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
+obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o
obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
diff --git a/drivers/md/bcache/Makefile b/drivers/md/bcache/Makefile
index 0e9c82523be6..c488b846f831 100644
--- a/drivers/md/bcache/Makefile
+++ b/drivers/md/bcache/Makefile
@@ -1,7 +1,8 @@
obj-$(CONFIG_BCACHE) += bcache.o
-bcache-y := alloc.o btree.o bset.o io.o journal.o writeback.o\
- movinggc.o request.o super.o sysfs.o debug.o util.o trace.o stats.o closure.o
+bcache-y := alloc.o bset.o btree.o closure.o debug.o extents.o\
+ io.o journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\
+ util.o writeback.o
CFLAGS_request.o += -Iblock
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 2b46bf1d7e40..c0d37d082443 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -132,10 +132,16 @@ bool bch_bucket_add_unused(struct cache *ca, struct bucket *b)
{
BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b));
- if (fifo_used(&ca->free) > ca->watermark[WATERMARK_MOVINGGC] &&
- CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO)
- return false;
+ if (CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO) {
+ unsigned i;
+
+ for (i = 0; i < RESERVE_NONE; i++)
+ if (!fifo_full(&ca->free[i]))
+ goto add;
+ return false;
+ }
+add:
b->prio = 0;
if (can_inc_bucket_gen(b) &&
@@ -162,8 +168,21 @@ static void invalidate_one_bucket(struct cache *ca, struct bucket *b)
fifo_push(&ca->free_inc, b - ca->buckets);
}
-#define bucket_prio(b) \
- (((unsigned) (b->prio - ca->set->min_prio)) * GC_SECTORS_USED(b))
+/*
+ * Determines what order we're going to reuse buckets, smallest bucket_prio()
+ * first: we also take into account the number of sectors of live data in that
+ * bucket, and in order for that multiply to make sense we have to scale bucket
+ *
+ * Thus, we scale the bucket priorities so that the bucket with the smallest
+ * prio is worth 1/8th of what INITIAL_PRIO is worth.
+ */
+
+#define bucket_prio(b) \
+({ \
+ unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
+ \
+ (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
+})
#define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
#define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
@@ -304,6 +323,21 @@ do { \
__set_current_state(TASK_RUNNING); \
} while (0)
+static int bch_allocator_push(struct cache *ca, long bucket)
+{
+ unsigned i;
+
+ /* Prios/gens are actually the most important reserve */
+ if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
+ return true;
+
+ for (i = 0; i < RESERVE_NR; i++)
+ if (fifo_push(&ca->free[i], bucket))
+ return true;
+
+ return false;
+}
+
static int bch_allocator_thread(void *arg)
{
struct cache *ca = arg;
@@ -336,9 +370,7 @@ static int bch_allocator_thread(void *arg)
mutex_lock(&ca->set->bucket_lock);
}
- allocator_wait(ca, !fifo_full(&ca->free));
-
- fifo_push(&ca->free, bucket);
+ allocator_wait(ca, bch_allocator_push(ca, bucket));
wake_up(&ca->set->bucket_wait);
}
@@ -365,34 +397,29 @@ static int bch_allocator_thread(void *arg)
}
}
-long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait)
+long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
{
DEFINE_WAIT(w);
struct bucket *b;
long r;
/* fastpath */
- if (fifo_used(&ca->free) > ca->watermark[watermark]) {
- fifo_pop(&ca->free, r);
+ if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
+ fifo_pop(&ca->free[reserve], r))
goto out;
- }
if (!wait)
return -1;
- while (1) {
- if (fifo_used(&ca->free) > ca->watermark[watermark]) {
- fifo_pop(&ca->free, r);
- break;
- }
-
+ do {
prepare_to_wait(&ca->set->bucket_wait, &w,
TASK_UNINTERRUPTIBLE);
mutex_unlock(&ca->set->bucket_lock);
schedule();
mutex_lock(&ca->set->bucket_lock);
- }
+ } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
+ !fifo_pop(&ca->free[reserve], r));
finish_wait(&ca->set->bucket_wait, &w);
out:
@@ -401,12 +428,14 @@ out:
if (expensive_debug_checks(ca->set)) {
size_t iter;
long i;
+ unsigned j;
for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
- fifo_for_each(i, &ca->free, iter)
- BUG_ON(i == r);
+ for (j = 0; j < RESERVE_NR; j++)
+ fifo_for_each(i, &ca->free[j], iter)
+ BUG_ON(i == r);
fifo_for_each(i, &ca->free_inc, iter)
BUG_ON(i == r);
fifo_for_each(i, &ca->unused, iter)
@@ -419,11 +448,13 @@ out:
SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
- if (watermark <= WATERMARK_METADATA) {
+ if (reserve <= RESERVE_PRIO) {
SET_GC_MARK(b, GC_MARK_METADATA);
+ SET_GC_MOVE(b, 0);
b->prio = BTREE_PRIO;
} else {
SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+ SET_GC_MOVE(b, 0);
b->prio = INITIAL_PRIO;
}
@@ -443,7 +474,7 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
}
}
-int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
+int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
struct bkey *k, int n, bool wait)
{
int i;
@@ -457,7 +488,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
for (i = 0; i < n; i++) {
struct cache *ca = c->cache_by_alloc[i];
- long b = bch_bucket_alloc(ca, watermark, wait);
+ long b = bch_bucket_alloc(ca, reserve, wait);
if (b == -1)
goto err;
@@ -476,12 +507,12 @@ err:
return -1;
}
-int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
+int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
struct bkey *k, int n, bool wait)
{
int ret;
mutex_lock(&c->bucket_lock);
- ret = __bch_bucket_alloc_set(c, watermark, k, n, wait);
+ ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
mutex_unlock(&c->bucket_lock);
return ret;
}
@@ -571,8 +602,8 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
unsigned watermark = write_prio
- ? WATERMARK_MOVINGGC
- : WATERMARK_NONE;
+ ? RESERVE_MOVINGGC
+ : RESERVE_NONE;
spin_unlock(&c->data_bucket_lock);
@@ -687,7 +718,7 @@ int bch_cache_allocator_init(struct cache *ca)
* Then 8 for btree allocations
* Then half for the moving garbage collector
*/
-
+#if 0
ca->watermark[WATERMARK_PRIO] = 0;
ca->watermark[WATERMARK_METADATA] = prio_buckets(ca);
@@ -697,6 +728,6 @@ int bch_cache_allocator_init(struct cache *ca)
ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
ca->watermark[WATERMARK_MOVINGGC];
-
+#endif
return 0;
}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 4beb55a0ff30..a4c7306ff43d 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -187,6 +187,7 @@
#include <linux/types.h>
#include <linux/workqueue.h>
+#include "bset.h"
#include "util.h"
#include "closure.h"
@@ -197,7 +198,7 @@ struct bucket {
uint8_t disk_gen;
uint8_t last_gc; /* Most out of date gen in the btree */
uint8_t gc_gen;
- uint16_t gc_mark;
+ uint16_t gc_mark; /* Bitfield used by GC. See below for field */
};
/*
@@ -209,7 +210,10 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
#define GC_MARK_RECLAIMABLE 0
#define GC_MARK_DIRTY 1
#define GC_MARK_METADATA 2
-BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14);
+#define GC_SECTORS_USED_SIZE 13
+#define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE))
+BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
+BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
#include "journal.h"
#include "stats.h"
@@ -279,7 +283,6 @@ struct bcache_device {
unsigned long sectors_dirty_last;
long sectors_dirty_derivative;
- mempool_t *unaligned_bvec;
struct bio_set *bio_split;
unsigned data_csum:1;
@@ -309,7 +312,8 @@ struct cached_dev {
struct cache_sb sb;
struct bio sb_bio;
struct bio_vec sb_bv[1];
- struct closure_with_waitlist sb_write;
+ struct closure sb_write;
+ struct semaphore sb_write_mutex;
/* Refcount on the cache set. Always nonzero when we're caching. */
atomic_t count;
@@ -372,22 +376,22 @@ struct cached_dev {
unsigned char writeback_percent;
unsigned writeback_delay;
- int writeback_rate_change;
- int64_t writeback_rate_derivative;
uint64_t writeback_rate_target;
+ int64_t writeback_rate_proportional;
+ int64_t writeback_rate_derivative;
+ int64_t writeback_rate_change;
unsigned writeback_rate_update_seconds;
unsigned writeback_rate_d_term;
unsigned writeback_rate_p_term_inverse;
- unsigned writeback_rate_d_smooth;
};
-enum alloc_watermarks {
- WATERMARK_PRIO,
- WATERMARK_METADATA,
- WATERMARK_MOVINGGC,
- WATERMARK_NONE,
- WATERMARK_MAX
+enum alloc_reserve {
+ RESERVE_BTREE,
+ RESERVE_PRIO,
+ RESERVE_MOVINGGC,
+ RESERVE_NONE,
+ RESERVE_NR,
};
struct cache {
@@ -399,8 +403,6 @@ struct cache {
struct kobject kobj;
struct block_device *bdev;
- unsigned watermark[WATERMARK_MAX];
-
struct task_struct *alloc_thread;
struct closure prio;
@@ -429,7 +431,7 @@ struct cache {
* because all the data they contained was overwritten), so we only
* need to discard them before they can be moved to the free list.
*/
- DECLARE_FIFO(long, free);
+ DECLARE_FIFO(long, free)[RESERVE_NR];
DECLARE_FIFO(long, free_inc);
DECLARE_FIFO(long, unused);
@@ -445,7 +447,6 @@ struct cache {
* call prio_write() to keep gens from wrapping.
*/
uint8_t need_save_prio;
- unsigned gc_move_threshold;
/*
* If nonzero, we know we aren't going to find any buckets to invalidate
@@ -515,7 +516,8 @@ struct cache_set {
uint64_t cached_dev_sectors;
struct closure caching;
- struct closure_with_waitlist sb_write;
+ struct closure sb_write;
+ struct semaphore sb_write_mutex;
mempool_t *search;
mempool_t *bio_meta;
@@ -630,13 +632,15 @@ struct cache_set {
#ifdef CONFIG_BCACHE_DEBUG
struct btree *verify_data;
+ struct bset *verify_ondisk;
struct mutex verify_lock;
#endif
unsigned nr_uuids;
struct uuid_entry *uuids;
BKEY_PADDED(uuid_bucket);
- struct closure_with_waitlist uuid_write;
+ struct closure uuid_write;
+ struct semaphore uuid_write_mutex;
/*
* A btree node on disk could have too many bsets for an iterator to fit
@@ -644,13 +648,7 @@ struct cache_set {
*/
mempool_t *fill_iter;
- /*
- * btree_sort() is a merge sort and requires temporary space - single
- * element mempool
- */
- struct mutex sort_lock;
- struct bset *sort;
- unsigned sort_crit_factor;
+ struct bset_sort_state sort;
/* List of buckets we're currently writing data to */
struct list_head data_buckets;
@@ -666,7 +664,6 @@ struct cache_set {
unsigned congested_read_threshold_us;
unsigned congested_write_threshold_us;
- struct time_stats sort_time;
struct time_stats btree_gc_time;
struct time_stats btree_split_time;
struct time_stats btree_read_time;
@@ -684,9 +681,9 @@ struct cache_set {
unsigned error_decay;
unsigned short journal_delay_ms;
+ bool expensive_debug_checks;
unsigned verify:1;
unsigned key_merging_disabled:1;
- unsigned expensive_debug_checks:1;
unsigned gc_always_rewrite:1;
unsigned shrinker_disabled:1;
unsigned copy_gc_enabled:1;
@@ -708,13 +705,8 @@ struct bbio {
struct bio bio;
};
-static inline unsigned local_clock_us(void)
-{
- return local_clock() >> 10;
-}
-
#define BTREE_PRIO USHRT_MAX
-#define INITIAL_PRIO 32768
+#define INITIAL_PRIO 32768U
#define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE)
#define btree_blocks(b) \
@@ -727,21 +719,6 @@ static inline unsigned local_clock_us(void)
#define bucket_bytes(c) ((c)->sb.bucket_size << 9)
#define block_bytes(c) ((c)->sb.block_size << 9)
-#define __set_bytes(i, k) (sizeof(*(i)) + (k) * sizeof(uint64_t))
-#define set_bytes(i) __set_bytes(i, i->keys)
-
-#define __set_blocks(i, k, c) DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c))
-#define set_blocks(i, c) __set_blocks(i, (i)->keys, c)
-
-#define node(i, j) ((struct bkey *) ((i)->d + (j)))
-#define end(i) node(i, (i)->keys)
-
-#define index(i, b) \
- ((size_t) (((void *) i - (void *) (b)->sets[0].data) / \
- block_bytes(b->c)))
-
-#define btree_data_space(b) (PAGE_SIZE << (b)->page_order)
-
#define prios_per_bucket(c) \
((bucket_bytes(c) - sizeof(struct prio_set)) / \
sizeof(struct bucket_disk))
@@ -784,20 +761,34 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c,
return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
}
-/* Btree key macros */
+static inline uint8_t gen_after(uint8_t a, uint8_t b)
+{
+ uint8_t r = a - b;
+ return r > 128U ? 0 : r;
+}
-static inline void bkey_init(struct bkey *k)
+static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
+ unsigned i)
{
- *k = ZERO_KEY;
+ return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
}
+static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
+ unsigned i)
+{
+ return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
+}
+
+/* Btree key macros */
+
/*
* This is used for various on disk data structures - cache_sb, prio_set, bset,
* jset: The checksum is _always_ the first 8 bytes of these structs
*/
#define csum_set(i) \
bch_crc64(((void *) (i)) + sizeof(uint64_t), \
- ((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t)))
+ ((void *) bset_bkey_last(i)) - \
+ (((void *) (i)) + sizeof(uint64_t)))
/* Error handling macros */
@@ -902,7 +893,6 @@ void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
void bch_bbio_free(struct bio *, struct cache_set *);
struct bio *bch_bbio_alloc(struct cache_set *);
-struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);
void bch_generic_make_request(struct bio *, struct bio_split_pool *);
void __bch_submit_bbio(struct bio *, struct cache_set *);
void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 7d388b8bb50e..3f74b4b0747b 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -5,30 +5,134 @@
* Copyright 2012 Google, Inc.
*/
-#include "bcache.h"
-#include "btree.h"
-#include "debug.h"
+#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
+#include "util.h"
+#include "bset.h"
+
+#include <linux/console.h>
#include <linux/random.h>
#include <linux/prefetch.h>
+#ifdef CONFIG_BCACHE_DEBUG
+
+void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
+{
+ struct bkey *k, *next;
+
+ for (k = i->start; k < bset_bkey_last(i); k = next) {
+ next = bkey_next(k);
+
+ printk(KERN_ERR "block %u key %li/%u: ", set,
+ (uint64_t *) k - i->d, i->keys);
+
+ if (b->ops->key_dump)
+ b->ops->key_dump(b, k);
+ else
+ printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
+
+ if (next < bset_bkey_last(i) &&
+ bkey_cmp(k, b->ops->is_extents ?
+ &START_KEY(next) : next) > 0)
+ printk(KERN_ERR "Key skipped backwards\n");
+ }
+}
+
+void bch_dump_bucket(struct btree_keys *b)
+{
+ unsigned i;
+
+ console_lock();
+ for (i = 0; i <= b->nsets; i++)
+ bch_dump_bset(b, b->set[i].data,
+ bset_sector_offset(b, b->set[i].data));
+ console_unlock();
+}
+
+int __bch_count_data(struct btree_keys *b)
+{
+ unsigned ret = 0;
+ struct btree_iter iter;
+ struct bkey *k;
+
+ if (b->ops->is_extents)
+ for_each_key(b, k, &iter)
+ ret += KEY_SIZE(k);
+ return ret;
+}
+
+void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
+{
+ va_list args;
+ struct bkey *k, *p = NULL;
+ struct btree_iter iter;
+ const char *err;
+
+ for_each_key(b, k, &iter) {
+ if (b->ops->is_extents) {
+ err = "Keys out of order";
+ if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
+ goto bug;
+
+ if (bch_ptr_invalid(b, k))
+ continue;
+
+ err = "Overlapping keys";
+ if (p && bkey_cmp(p, &START_KEY(k)) > 0)
+ goto bug;
+ } else {
+ if (bch_ptr_bad(b, k))
+ continue;
+
+ err = "Duplicate keys";
+ if (p && !bkey_cmp(p, k))
+ goto bug;
+ }
+ p = k;
+ }
+#if 0
+ err = "Key larger than btree node key";
+ if (p && bkey_cmp(p, &b->key) > 0)
+ goto bug;
+#endif
+ return;
+bug:
+ bch_dump_bucket(b);
+
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+
+ panic("bch_check_keys error: %s:\n", err);
+}
+
+static void bch_btree_iter_next_check(struct btree_iter *iter)
+{
+ struct bkey *k = iter->data->k, *next = bkey_next(k);
+
+ if (next < iter->data->end &&
+ bkey_cmp(k, iter->b->ops->is_extents ?
+ &START_KEY(next) : next) > 0) {
+ bch_dump_bucket(iter->b);
+ panic("Key skipped backwards\n");
+ }
+}
+
+#else
+
+static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
+
+#endif
+
/* Keylists */
-int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
+int __bch_keylist_realloc(struct keylist *l, unsigned u64s)
{
size_t oldsize = bch_keylist_nkeys(l);
- size_t newsize = oldsize + 2 + nptrs;
+ size_t newsize = oldsize + u64s;
uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
uint64_t *new_keys;
- /* The journalling code doesn't handle the case where the keys to insert
- * is bigger than an empty write: If we just return -ENOMEM here,
- * bio_insert() and bio_invalidate() will insert the keys created so far
- * and finish the rest when the keylist is empty.
- */
- if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
- return -ENOMEM;
-
newsize = roundup_pow_of_two(newsize);
if (newsize <= KEYLIST_INLINE ||
@@ -71,136 +175,6 @@ void bch_keylist_pop_front(struct keylist *l)
bch_keylist_bytes(l));
}
-/* Pointer validation */
-
-static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
-{
- unsigned i;
-
- for (i = 0; i < KEY_PTRS(k); i++)
- if (ptr_available(c, k, i)) {
- struct cache *ca = PTR_CACHE(c, k, i);
- size_t bucket = PTR_BUCKET_NR(c, k, i);
- size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-
- if (KEY_SIZE(k) + r > c->sb.bucket_size ||
- bucket < ca->sb.first_bucket ||
- bucket >= ca->sb.nbuckets)
- return true;
- }
-
- return false;
-}
-
-bool bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
-{
- char buf[80];
-
- if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
- goto bad;
-
- if (__ptr_invalid(c, k))
- goto bad;
-
- return false;
-bad:
- bch_bkey_to_text(buf, sizeof(buf), k);
- cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
- return true;
-}
-
-bool bch_extent_ptr_invalid(struct cache_set *c, const struct bkey *k)
-{
- char buf[80];
-
- if (!KEY_SIZE(k))
- return true;
-
- if (KEY_SIZE(k) > KEY_OFFSET(k))
- goto bad;
-
- if (__ptr_invalid(c, k))
- goto bad;
-
- return false;
-bad:
- bch_bkey_to_text(buf, sizeof(buf), k);
- cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
- return true;
-}
-
-static bool ptr_bad_expensive_checks(struct btree *b, const struct bkey *k,
- unsigned ptr)
-{
- struct bucket *g = PTR_BUCKET(b->c, k, ptr);
- char buf[80];
-
- if (mutex_trylock(&b->c->bucket_lock)) {
- if (b->level) {
- if (KEY_DIRTY(k) ||
- g->prio != BTREE_PRIO ||
- (b->c->gc_mark_valid &&
- GC_MARK(g) != GC_MARK_METADATA))
- goto err;
-
- } else {
- if (g->prio == BTREE_PRIO)
- goto err;
-
- if (KEY_DIRTY(k) &&
- b->c->gc_mark_valid &&
- GC_MARK(g) != GC_MARK_DIRTY)
- goto err;
- }
- mutex_unlock(&b->c->bucket_lock);
- }
-
- return false;
-err:
- mutex_unlock(&b->c->bucket_lock);
- bch_bkey_to_text(buf, sizeof(buf), k);
- btree_bug(b,
-"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
- buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
- g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
- return true;
-}
-
-bool bch_ptr_bad(struct btree *b, const struct bkey *k)
-{
- struct bucket *g;
- unsigned i, stale;
-
- if (!bkey_cmp(k, &ZERO_KEY) ||
- !KEY_PTRS(k) ||
- bch_ptr_invalid(b, k))
- return true;
-
- for (i = 0; i < KEY_PTRS(k); i++) {
- if (!ptr_available(b->c, k, i))
- return true;
-
- g = PTR_BUCKET(b->c, k, i);
- stale = ptr_stale(b->c, k, i);
-
- btree_bug_on(stale > 96, b,
- "key too stale: %i, need_gc %u",
- stale, b->c->need_gc);
-
- btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
- b, "stale dirty pointer");
-
- if (stale)
- return true;
-
- if (expensive_debug_checks(b->c) &&
- ptr_bad_expensive_checks(b, k, i))
- return true;
- }
-
- return false;
-}
-
/* Key/pointer manipulation */
void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
@@ -255,56 +229,138 @@ bool __bch_cut_back(const struct bkey *where, struct bkey *k)
return true;
}
-static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
+/* Auxiliary search trees */
+
+/* 32 bits total: */
+#define BKEY_MID_BITS 3
+#define BKEY_EXPONENT_BITS 7
+#define BKEY_MANTISSA_BITS (32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS)
+#define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1)
+
+struct bkey_float {
+ unsigned exponent:BKEY_EXPONENT_BITS;
+ unsigned m:BKEY_MID_BITS;
+ unsigned mantissa:BKEY_MANTISSA_BITS;
+} __packed;
+
+/*
+ * BSET_CACHELINE was originally intended to match the hardware cacheline size -
+ * it used to be 64, but I realized the lookup code would touch slightly less
+ * memory if it was 128.
+ *
+ * It definites the number of bytes (in struct bset) per struct bkey_float in
+ * the auxiliar search tree - when we're done searching the bset_float tree we
+ * have this many bytes left that we do a linear search over.
+ *
+ * Since (after level 5) every level of the bset_tree is on a new cacheline,
+ * we're touching one fewer cacheline in the bset tree in exchange for one more
+ * cacheline in the linear search - but the linear search might stop before it
+ * gets to the second cacheline.
+ */
+
+#define BSET_CACHELINE 128
+
+/* Space required for the btree node keys */
+static inline size_t btree_keys_bytes(struct btree_keys *b)
{
- return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
- ~((uint64_t)1 << 63);
+ return PAGE_SIZE << b->page_order;
}
-/* Tries to merge l and r: l should be lower than r
- * Returns true if we were able to merge. If we did merge, l will be the merged
- * key, r will be untouched.
- */
-bool bch_bkey_try_merge(struct btree *b, struct bkey *l, struct bkey *r)
+static inline size_t btree_keys_cachelines(struct btree_keys *b)
{
- unsigned i;
+ return btree_keys_bytes(b) / BSET_CACHELINE;
+}
- if (key_merging_disabled(b->c))
- return false;
+/* Space required for the auxiliary search trees */
+static inline size_t bset_tree_bytes(struct btree_keys *b)
+{
+ return btree_keys_cachelines(b) * sizeof(struct bkey_float);
+}
- if (KEY_PTRS(l) != KEY_PTRS(r) ||
- KEY_DIRTY(l) != KEY_DIRTY(r) ||
- bkey_cmp(l, &START_KEY(r)))
- return false;
+/* Space required for the prev pointers */
+static inline size_t bset_prev_bytes(struct btree_keys *b)
+{
+ return btree_keys_cachelines(b) * sizeof(uint8_t);
+}
- for (i = 0; i < KEY_PTRS(l); i++)
- if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
- PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
- return false;
+/* Memory allocation */
- /* Keys with no pointers aren't restricted to one bucket and could
- * overflow KEY_SIZE
- */
- if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
- SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
- SET_KEY_SIZE(l, USHRT_MAX);
+void bch_btree_keys_free(struct btree_keys *b)
+{
+ struct bset_tree *t = b->set;
- bch_cut_front(l, r);
- return false;
- }
+ if (bset_prev_bytes(b) < PAGE_SIZE)
+ kfree(t->prev);
+ else
+ free_pages((unsigned long) t->prev,
+ get_order(bset_prev_bytes(b)));
- if (KEY_CSUM(l)) {
- if (KEY_CSUM(r))
- l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
- else
- SET_KEY_CSUM(l, 0);
- }
+ if (bset_tree_bytes(b) < PAGE_SIZE)
+ kfree(t->tree);
+ else
+ free_pages((unsigned long) t->tree,
+ get_order(bset_tree_bytes(b)));
- SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
- SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
+ free_pages((unsigned long) t->data, b->page_order);
- return true;
+ t->prev = NULL;
+ t->tree = NULL;
+ t->data = NULL;
+}
+EXPORT_SYMBOL(bch_btree_keys_free);
+
+int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp)
+{
+ struct bset_tree *t = b->set;
+
+ BUG_ON(t->data);
+
+ b->page_order = page_order;
+
+ t->data = (void *) __get_free_pages(gfp, b->page_order);
+ if (!t->data)
+ goto err;
+
+ t->tree = bset_tree_bytes(b) < PAGE_SIZE
+ ? kmalloc(bset_tree_bytes(b), gfp)
+ : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
+ if (!t->tree)
+ goto err;
+
+ t->prev = bset_prev_bytes(b) < PAGE_SIZE
+ ? kmalloc(bset_prev_bytes(b), gfp)
+ : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
+ if (!t->prev)
+ goto err;
+
+ return 0;
+err:
+ bch_btree_keys_free(b);
+ return -ENOMEM;
}
+EXPORT_SYMBOL(bch_btree_keys_alloc);
+
+void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
+ bool *expensive_debug_checks)
+{
+ unsigned i;
+
+ b->ops = ops;
+ b->expensive_debug_checks = expensive_debug_checks;
+ b->nsets = 0;
+ b->last_set_unwritten = 0;
+
+ /* XXX: shouldn't be needed */
+ for (i = 0; i < MAX_BSETS; i++)
+ b->set[i].size = 0;
+ /*
+ * Second loop starts at 1 because b->keys[0]->data is the memory we
+ * allocated
+ */
+ for (i = 1; i < MAX_BSETS; i++)
+ b->set[i].data = NULL;
+}
+EXPORT_SYMBOL(bch_btree_keys_init);
/* Binary tree stuff for auxiliary search trees */
@@ -455,9 +511,11 @@ static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
return ((void *) k - (void *) t->data) / BSET_CACHELINE;
}
-static unsigned bkey_to_cacheline_offset(struct bkey *k)
+static unsigned bkey_to_cacheline_offset(struct bset_tree *t,
+ unsigned cacheline,
+ struct bkey *k)
{
- return ((size_t) k & (BSET_CACHELINE - 1)) / sizeof(uint64_t);
+ return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
}
static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
@@ -504,7 +562,7 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
: tree_to_prev_bkey(t, j >> ffs(j));
struct bkey *r = is_power_of_2(j + 1)
- ? node(t->data, t->data->keys - bkey_u64s(&t->end))
+ ? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end))
: tree_to_bkey(t, j >> (ffz(j) + 1));
BUG_ON(m < l || m > r);
@@ -528,9 +586,9 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
f->exponent = 127;
}
-static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
+static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
{
- if (t != b->sets) {
+ if (t != b->set) {
unsigned j = roundup(t[-1].size,
64 / sizeof(struct bkey_float));
@@ -538,33 +596,54 @@ static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
t->prev = t[-1].prev + j;
}
- while (t < b->sets + MAX_BSETS)
+ while (t < b->set + MAX_BSETS)
t++->size = 0;
}
-static void bset_build_unwritten_tree(struct btree *b)
+static void bch_bset_build_unwritten_tree(struct btree_keys *b)
{
- struct bset_tree *t = b->sets + b->nsets;
+ struct bset_tree *t = bset_tree_last(b);
+
+ BUG_ON(b->last_set_unwritten);
+ b->last_set_unwritten = 1;
bset_alloc_tree(b, t);
- if (t->tree != b->sets->tree + bset_tree_space(b)) {
- t->prev[0] = bkey_to_cacheline_offset(t->data->start);
+ if (t->tree != b->set->tree + btree_keys_cachelines(b)) {
+ t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start);
t->size = 1;
}
}
-static void bset_build_written_tree(struct btree *b)
+void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
+{
+ if (i != b->set->data) {
+ b->set[++b->nsets].data = i;
+ i->seq = b->set->data->seq;
+ } else
+ get_random_bytes(&i->seq, sizeof(uint64_t));
+
+ i->magic = magic;
+ i->version = 0;
+ i->keys = 0;
+
+ bch_bset_build_unwritten_tree(b);
+}
+EXPORT_SYMBOL(bch_bset_init_next);
+
+void bch_bset_build_written_tree(struct btree_keys *b)
{
- struct bset_tree *t = b->sets + b->nsets;
- struct bkey *k = t->data->start;
+ struct bset_tree *t = bset_tree_last(b);
+ struct bkey *prev = NULL, *k = t->data->start;
unsigned j, cacheline = 1;
+ b->last_set_unwritten = 0;
+
bset_alloc_tree(b, t);
t->size = min_t(unsigned,
- bkey_to_cacheline(t, end(t->data)),
- b->sets->tree + bset_tree_space(b) - t->tree);
+ bkey_to_cacheline(t, bset_bkey_last(t->data)),
+ b->set->tree + btree_keys_cachelines(b) - t->tree);
if (t->size < 2) {
t->size = 0;
@@ -577,16 +656,14 @@ static void bset_build_written_tree(struct btree *b)
for (j = inorder_next(0, t->size);
j;
j = inorder_next(j, t->size)) {
- while (bkey_to_cacheline(t, k) != cacheline)
- k = bkey_next(k);
+ while (bkey_to_cacheline(t, k) < cacheline)
+ prev = k, k = bkey_next(k);
- t->prev[j] = bkey_u64s(k);
- k = bkey_next(k);
- cacheline++;
- t->tree[j].m = bkey_to_cacheline_offset(k);
+ t->prev[j] = bkey_u64s(prev);
+ t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k);
}
- while (bkey_next(k) != end(t->data))
+ while (bkey_next(k) != bset_bkey_last(t->data))
k = bkey_next(k);
t->end = *k;
@@ -597,14 +674,17 @@ static void bset_build_written_tree(struct btree *b)
j = inorder_next(j, t->size))
make_bfloat(t, j);
}
+EXPORT_SYMBOL(bch_bset_build_written_tree);
-void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k)
+/* Insert */
+
+void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
{
struct bset_tree *t;
unsigned inorder, j = 1;
- for (t = b->sets; t <= &b->sets[b->nsets]; t++)
- if (k < end(t->data))
+ for (t = b->set; t <= bset_tree_last(b); t++)
+ if (k < bset_bkey_last(t->data))
goto found_set;
BUG();
@@ -617,7 +697,7 @@ found_set:
if (k == t->data->start)
goto fix_left;
- if (bkey_next(k) == end(t->data)) {
+ if (bkey_next(k) == bset_bkey_last(t->data)) {
t->end = *k;
goto fix_right;
}
@@ -642,10 +722,12 @@ fix_right: do {
j = j * 2 + 1;
} while (j < t->size);
}
+EXPORT_SYMBOL(bch_bset_fix_invalidated_key);
-void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
+static void bch_bset_fix_lookup_table(struct btree_keys *b,
+ struct bset_tree *t,
+ struct bkey *k)
{
- struct bset_tree *t = &b->sets[b->nsets];
unsigned shift = bkey_u64s(k);
unsigned j = bkey_to_cacheline(t, k);
@@ -657,8 +739,8 @@ void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
* lookup table for the first key that is strictly greater than k:
* it's either k's cacheline or the next one
*/
- if (j < t->size &&
- table_to_bkey(t, j) <= k)
+ while (j < t->size &&
+ table_to_bkey(t, j) <= k)
j++;
/* Adjust all the lookup table entries, and find a new key for any that
@@ -673,54 +755,124 @@ void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
while (k < cacheline_to_bkey(t, j, 0))
k = bkey_next(k);
- t->prev[j] = bkey_to_cacheline_offset(k);
+ t->prev[j] = bkey_to_cacheline_offset(t, j, k);
}
}
- if (t->size == b->sets->tree + bset_tree_space(b) - t->tree)
+ if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree)
return;
/* Possibly add a new entry to the end of the lookup table */
for (k = table_to_bkey(t, t->size - 1);
- k != end(t->data);
+ k != bset_bkey_last(t->data);
k = bkey_next(k))
if (t->size == bkey_to_cacheline(t, k)) {
- t->prev[t->size] = bkey_to_cacheline_offset(k);
+ t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k);
t->size++;
}
}
-void bch_bset_init_next(struct btree *b)
+/*
+ * Tries to merge l and r: l should be lower than r
+ * Returns true if we were able to merge. If we did merge, l will be the merged
+ * key, r will be untouched.
+ */
+bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r)
{
- struct bset *i = write_block(b);
+ if (!b->ops->key_merge)
+ return false;
- if (i != b->sets[0].data) {
- b->sets[++b->nsets].data = i;
- i->seq = b->sets[0].data->seq;
- } else
- get_random_bytes(&i->seq, sizeof(uint64_t));
+ /*
+ * Generic header checks
+ * Assumes left and right are in order
+ * Left and right must be exactly aligned
+ */
+ if (!bch_bkey_equal_header(l, r) ||
+ bkey_cmp(l, &START_KEY(r)))
+ return false;
- i->magic = bset_magic(&b->c->sb);
- i->version = 0;
- i->keys = 0;
+ return b->ops->key_merge(b, l, r);
+}
+EXPORT_SYMBOL(bch_bkey_try_merge);
+
+void bch_bset_insert(struct btree_keys *b, struct bkey *where,
+ struct bkey *insert)
+{
+ struct bset_tree *t = bset_tree_last(b);
+
+ BUG_ON(!b->last_set_unwritten);
+ BUG_ON(bset_byte_offset(b, t->data) +
+ __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) >
+ PAGE_SIZE << b->page_order);
+
+ memmove((uint64_t *) where + bkey_u64s(insert),
+ where,
+ (void *) bset_bkey_last(t->data) - (void *) where);
+
+ t->data->keys += bkey_u64s(insert);
+ bkey_copy(where, insert);
+ bch_bset_fix_lookup_table(b, t, where);
+}
+EXPORT_SYMBOL(bch_bset_insert);
+
+unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+ struct bkey *replace_key)
+{
+ unsigned status = BTREE_INSERT_STATUS_NO_INSERT;
+ struct bset *i = bset_tree_last(b)->data;
+ struct bkey *m, *prev = NULL;
+ struct btree_iter iter;
+
+ BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
+
+ m = bch_btree_iter_init(b, &iter, b->ops->is_extents
+ ? PRECEDING_KEY(&START_KEY(k))
+ : PRECEDING_KEY(k));
+
+ if (b->ops->insert_fixup(b, k, &iter, replace_key))
+ return status;
- bset_build_unwritten_tree(b);
+ status = BTREE_INSERT_STATUS_INSERT;
+
+ while (m != bset_bkey_last(i) &&
+ bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0)
+ prev = m, m = bkey_next(m);
+
+ /* prev is in the tree, if we merge we're done */
+ status = BTREE_INSERT_STATUS_BACK_MERGE;
+ if (prev &&
+ bch_bkey_try_merge(b, prev, k))
+ goto merged;
+#if 0
+ status = BTREE_INSERT_STATUS_OVERWROTE;
+ if (m != bset_bkey_last(i) &&
+ KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
+ goto copy;
+#endif
+ status = BTREE_INSERT_STATUS_FRONT_MERGE;
+ if (m != bset_bkey_last(i) &&
+ bch_bkey_try_merge(b, k, m))
+ goto copy;
+
+ bch_bset_insert(b, m, k);
+copy: bkey_copy(m, k);
+merged:
+ return status;
}
+EXPORT_SYMBOL(bch_btree_insert_key);
+
+/* Lookup */
struct bset_search_iter {
struct bkey *l, *r;
};
-static struct bset_search_iter bset_search_write_set(struct btree *b,
- struct bset_tree *t,
+static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
const struct bkey *search)
{
unsigned li = 0, ri = t->size;
- BUG_ON(!b->nsets &&
- t->size < bkey_to_cacheline(t, end(t->data)));
-
while (li + 1 != ri) {
unsigned m = (li + ri) >> 1;
@@ -732,12 +884,11 @@ static struct bset_search_iter bset_search_write_set(struct btree *b,
return (struct bset_search_iter) {
table_to_bkey(t, li),
- ri < t->size ? table_to_bkey(t, ri) : end(t->data)
+ ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data)
};
}
-static struct bset_search_iter bset_search_tree(struct btree *b,
- struct bset_tree *t,
+static struct bset_search_iter bset_search_tree(struct bset_tree *t,
const struct bkey *search)
{
struct bkey *l, *r;
@@ -784,7 +935,7 @@ static struct bset_search_iter bset_search_tree(struct btree *b,
f = &t->tree[inorder_next(j, t->size)];
r = cacheline_to_bkey(t, inorder, f->m);
} else
- r = end(t->data);
+ r = bset_bkey_last(t->data);
} else {
r = cacheline_to_bkey(t, inorder, f->m);
@@ -798,7 +949,7 @@ static struct bset_search_iter bset_search_tree(struct btree *b,
return (struct bset_search_iter) {l, r};
}
-struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
+struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
const struct bkey *search)
{
struct bset_search_iter i;
@@ -820,7 +971,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
if (unlikely(!t->size)) {
i.l = t->data->start;
- i.r = end(t->data);
+ i.r = bset_bkey_last(t->data);
} else if (bset_written(b, t)) {
/*
* Each node in the auxiliary search tree covers a certain range
@@ -830,23 +981,27 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
*/
if (unlikely(bkey_cmp(search, &t->end) >= 0))
- return end(t->data);
+ return bset_bkey_last(t->data);
if (unlikely(bkey_cmp(search, t->data->start) < 0))
return t->data->start;
- i = bset_search_tree(b, t, search);
- } else
- i = bset_search_write_set(b, t, search);
+ i = bset_search_tree(t, search);
+ } else {
+ BUG_ON(!b->nsets &&
+ t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
- if (expensive_debug_checks(b->c)) {
+ i = bset_search_write_set(t, search);
+ }
+
+ if (btree_keys_expensive_checks(b)) {
BUG_ON(bset_written(b, t) &&
i.l != t->data->start &&
bkey_cmp(tree_to_prev_bkey(t,
inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
search) > 0);
- BUG_ON(i.r != end(t->data) &&
+ BUG_ON(i.r != bset_bkey_last(t->data) &&
bkey_cmp(i.r, search) <= 0);
}
@@ -856,22 +1011,17 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
return i.l;
}
+EXPORT_SYMBOL(__bch_bset_search);
/* Btree iterator */
-/*
- * Returns true if l > r - unless l == r, in which case returns true if l is
- * older than r.
- *
- * Necessary for btree_sort_fixup() - if there are multiple keys that compare
- * equal in different sets, we have to process them newest to oldest.
- */
+typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
+ struct btree_iter_set);
+
static inline bool btree_iter_cmp(struct btree_iter_set l,
struct btree_iter_set r)
{
- int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
-
- return c ? c > 0 : l.k < r.k;
+ return bkey_cmp(l.k, r.k) > 0;
}
static inline bool btree_iter_end(struct btree_iter *iter)
@@ -888,8 +1038,10 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
btree_iter_cmp));
}
-struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
- struct bkey *search, struct bset_tree *start)
+static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
+ struct btree_iter *iter,
+ struct bkey *search,
+ struct bset_tree *start)
{
struct bkey *ret = NULL;
iter->size = ARRAY_SIZE(iter->data);
@@ -899,15 +1051,24 @@ struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
iter->b = b;
#endif
- for (; start <= &b->sets[b->nsets]; start++) {
+ for (; start <= bset_tree_last(b); start++) {
ret = bch_bset_search(b, start, search);
- bch_btree_iter_push(iter, ret, end(start->data));
+ bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
}
return ret;
}
-struct bkey *bch_btree_iter_next(struct btree_iter *iter)
+struct bkey *bch_btree_iter_init(struct btree_keys *b,
+ struct btree_iter *iter,
+ struct bkey *search)
+{
+ return __bch_btree_iter_init(b, iter, search, b->set);
+}
+EXPORT_SYMBOL(bch_btree_iter_init);
+
+static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
+ btree_iter_cmp_fn *cmp)
{
struct btree_iter_set unused;
struct bkey *ret = NULL;
@@ -924,16 +1085,23 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
}
if (iter->data->k == iter->data->end)
- heap_pop(iter, unused, btree_iter_cmp);
+ heap_pop(iter, unused, cmp);
else
- heap_sift(iter, 0, btree_iter_cmp);
+ heap_sift(iter, 0, cmp);
}
return ret;
}
+struct bkey *bch_btree_iter_next(struct btree_iter *iter)
+{
+ return __bch_btree_iter_next(iter, btree_iter_cmp);
+
+}
+EXPORT_SYMBOL(bch_btree_iter_next);
+
struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
- struct btree *b, ptr_filter_fn fn)
+ struct btree_keys *b, ptr_filter_fn fn)
{
struct bkey *ret;
@@ -946,70 +1114,58 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
/* Mergesort */
-static void sort_key_next(struct btree_iter *iter,
- struct btree_iter_set *i)
+void bch_bset_sort_state_free(struct bset_sort_state *state)
{
- i->k = bkey_next(i->k);
-
- if (i->k == i->end)
- *i = iter->data[--iter->used];
+ if (state->pool)
+ mempool_destroy(state->pool);
}
-static void btree_sort_fixup(struct btree_iter *iter)
+int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
{
- while (iter->used > 1) {
- struct btree_iter_set *top = iter->data, *i = top + 1;
-
- if (iter->used > 2 &&
- btree_iter_cmp(i[0], i[1]))
- i++;
-
- if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
- break;
+ spin_lock_init(&state->time.lock);
- if (!KEY_SIZE(i->k)) {
- sort_key_next(iter, i);
- heap_sift(iter, i - top, btree_iter_cmp);
- continue;
- }
+ state->page_order = page_order;
+ state->crit_factor = int_sqrt(1 << page_order);
- if (top->k > i->k) {
- if (bkey_cmp(top->k, i->k) >= 0)
- sort_key_next(iter, i);
- else
- bch_cut_front(top->k, i->k);
+ state->pool = mempool_create_page_pool(1, page_order);
+ if (!state->pool)
+ return -ENOMEM;
- heap_sift(iter, i - top, btree_iter_cmp);
- } else {
- /* can't happen because of comparison func */
- BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
- bch_cut_back(&START_KEY(i->k), top->k);
- }
- }
+ return 0;
}
+EXPORT_SYMBOL(bch_bset_sort_state_init);
-static void btree_mergesort(struct btree *b, struct bset *out,
+static void btree_mergesort(struct btree_keys *b, struct bset *out,
struct btree_iter *iter,
bool fixup, bool remove_stale)
{
+ int i;
struct bkey *k, *last = NULL;
- bool (*bad)(struct btree *, const struct bkey *) = remove_stale
+ BKEY_PADDED(k) tmp;
+ bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
? bch_ptr_bad
: bch_ptr_invalid;
+ /* Heapify the iterator, using our comparison function */
+ for (i = iter->used / 2 - 1; i >= 0; --i)
+ heap_sift(iter, i, b->ops->sort_cmp);
+
while (!btree_iter_end(iter)) {
- if (fixup && !b->level)
- btree_sort_fixup(iter);
+ if (b->ops->sort_fixup && fixup)
+ k = b->ops->sort_fixup(iter, &tmp.k);
+ else
+ k = NULL;
+
+ if (!k)
+ k = __bch_btree_iter_next(iter, b->ops->sort_cmp);
- k = bch_btree_iter_next(iter);
if (bad(b, k))
continue;
if (!last) {
last = out->start;
bkey_copy(last, k);
- } else if (b->level ||
- !bch_bkey_try_merge(b, last, k)) {
+ } else if (!bch_bkey_try_merge(b, last, k)) {
last = bkey_next(last);
bkey_copy(last, k);
}
@@ -1020,27 +1176,30 @@ static void btree_mergesort(struct btree *b, struct bset *out,
pr_debug("sorted %i keys", out->keys);
}
-static void __btree_sort(struct btree *b, struct btree_iter *iter,
- unsigned start, unsigned order, bool fixup)
+static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
+ unsigned start, unsigned order, bool fixup,
+ struct bset_sort_state *state)
{
uint64_t start_time;
- bool remove_stale = !b->written;
+ bool used_mempool = false;
struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
order);
if (!out) {
- mutex_lock(&b->c->sort_lock);
- out = b->c->sort;
- order = ilog2(bucket_pages(b->c));
+ struct page *outp;
+
+ BUG_ON(order > state->page_order);
+
+ outp = mempool_alloc(state->pool, GFP_NOIO);
+ out = page_address(outp);
+ used_mempool = true;
+ order = state->page_order;
}
start_time = local_clock();
- btree_mergesort(b, out, iter, fixup, remove_stale);
+ btree_mergesort(b, out, iter, fixup, false);
b->nsets = start;
- if (!fixup && !start && b->written)
- bch_btree_verify(b, out);
-
if (!start && order == b->page_order) {
/*
* Our temporary buffer is the same size as the btree node's
@@ -1048,84 +1207,76 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
* memcpy()
*/
- out->magic = bset_magic(&b->c->sb);
- out->seq = b->sets[0].data->seq;
- out->version = b->sets[0].data->version;
- swap(out, b->sets[0].data);
-
- if (b->c->sort == b->sets[0].data)
- b->c->sort = out;
+ out->magic = b->set->data->magic;
+ out->seq = b->set->data->seq;
+ out->version = b->set->data->version;
+ swap(out, b->set->data);
} else {
- b->sets[start].data->keys = out->keys;
- memcpy(b->sets[start].data->start, out->start,
- (void *) end(out) - (void *) out->start);
+ b->set[start].data->keys = out->keys;
+ memcpy(b->set[start].data->start, out->start,
+ (void *) bset_bkey_last(out) - (void *) out->start);
}
- if (out == b->c->sort)
- mutex_unlock(&b->c->sort_lock);
+ if (used_mempool)
+ mempool_free(virt_to_page(out), state->pool);
else
free_pages((unsigned long) out, order);
- if (b->written)
- bset_build_written_tree(b);
+ bch_bset_build_written_tree(b);
if (!start)
- bch_time_stats_update(&b->c->sort_time, start_time);
+ bch_time_stats_update(&state->time, start_time);
}
-void bch_btree_sort_partial(struct btree *b, unsigned start)
+void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
+ struct bset_sort_state *state)
{
size_t order = b->page_order, keys = 0;
struct btree_iter iter;
int oldsize = bch_count_data(b);
- __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
-
- BUG_ON(b->sets[b->nsets].data == write_block(b) &&
- (b->sets[b->nsets].size || b->nsets));
-
+ __bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
if (start) {
unsigned i;
for (i = start; i <= b->nsets; i++)
- keys += b->sets[i].data->keys;
+ keys += b->set[i].data->keys;
- order = roundup_pow_of_two(__set_bytes(b->sets->data,
- keys)) / PAGE_SIZE;
- if (order)
- order = ilog2(order);
+ order = get_order(__set_bytes(b->set->data, keys));
}
- __btree_sort(b, &iter, start, order, false);
+ __btree_sort(b, &iter, start, order, false, state);
- EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
+ EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
}
+EXPORT_SYMBOL(bch_btree_sort_partial);
-void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)
+void bch_btree_sort_and_fix_extents(struct btree_keys *b,
+ struct btree_iter *iter,
+ struct bset_sort_state *state)
{
- BUG_ON(!b->written);
- __btree_sort(b, iter, 0, b->page_order, true);
+ __btree_sort(b, iter, 0, b->page_order, true, state);
}
-void bch_btree_sort_into(struct btree *b, struct btree *new)
+void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
+ struct bset_sort_state *state)
{
uint64_t start_time = local_clock();
struct btree_iter iter;
bch_btree_iter_init(b, &iter, NULL);
- btree_mergesort(b, new->sets->data, &iter, false, true);
+ btree_mergesort(b, new->set->data, &iter, false, true);
- bch_time_stats_update(&b->c->sort_time, start_time);
+ bch_time_stats_update(&state->time, start_time);
- bkey_copy_key(&new->key, &b->key);
- new->sets->size = 0;
+ new->set->size = 0; // XXX: why?
}
#define SORT_CRIT (4096 / sizeof(uint64_t))
-void bch_btree_sort_lazy(struct btree *b)
+void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
{
unsigned crit = SORT_CRIT;
int i;
@@ -1134,50 +1285,32 @@ void bch_btree_sort_lazy(struct btree *b)
if (!b->nsets)
goto out;
- /* If not a leaf node, always sort */
- if (b->level) {
- bch_btree_sort(b);
- return;
- }
-
for (i = b->nsets - 1; i >= 0; --i) {
- crit *= b->c->sort_crit_factor;
+ crit *= state->crit_factor;
- if (b->sets[i].data->keys < crit) {
- bch_btree_sort_partial(b, i);
+ if (b->set[i].data->keys < crit) {
+ bch_btree_sort_partial(b, i, state);
return;
}
}
/* Sort if we'd overflow */
if (b->nsets + 1 == MAX_BSETS) {
- bch_btree_sort(b);
+ bch_btree_sort(b, state);
return;
}
out:
- bset_build_written_tree(b);
+ bch_bset_build_written_tree(b);
}
+EXPORT_SYMBOL(bch_btree_sort_lazy);
-/* Sysfs stuff */
-
-struct bset_stats {
- struct btree_op op;
- size_t nodes;
- size_t sets_written, sets_unwritten;
- size_t bytes_written, bytes_unwritten;
- size_t floats, failed;
-};
-
-static int btree_bset_stats(struct btree_op *op, struct btree *b)
+void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
{
- struct bset_stats *stats = container_of(op, struct bset_stats, op);
unsigned i;
- stats->nodes++;
-
for (i = 0; i <= b->nsets; i++) {
- struct bset_tree *t = &b->sets[i];
+ struct bset_tree *t = &b->set[i];
size_t bytes = t->data->keys * sizeof(uint64_t);
size_t j;
@@ -1195,32 +1328,4 @@ static int btree_bset_stats(struct btree_op *op, struct btree *b)
stats->bytes_unwritten += bytes;
}
}
-
- return MAP_CONTINUE;
-}
-
-int bch_bset_print_stats(struct cache_set *c, char *buf)
-{
- struct bset_stats t;
- int ret;
-
- memset(&t, 0, sizeof(struct bset_stats));
- bch_btree_op_init(&t.op, -1);
-
- ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
- if (ret < 0)
- return ret;
-
- return snprintf(buf, PAGE_SIZE,
- "btree nodes: %zu\n"
- "written sets: %zu\n"
- "unwritten sets: %zu\n"
- "written key bytes: %zu\n"
- "unwritten key bytes: %zu\n"
- "floats: %zu\n"
- "failed: %zu\n",
- t.nodes,
- t.sets_written, t.sets_unwritten,
- t.bytes_written, t.bytes_unwritten,
- t.floats, t.failed);
}
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index 1d3c24f9fa0e..003260f4ddf6 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -1,7 +1,11 @@
#ifndef _BCACHE_BSET_H
#define _BCACHE_BSET_H
-#include <linux/slab.h>
+#include <linux/bcache.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "util.h" /* for time_stats */
/*
* BKEYS:
@@ -142,20 +146,13 @@
* first key in that range of bytes again.
*/
-/* Btree key comparison/iteration */
+struct btree_keys;
+struct btree_iter;
+struct btree_iter_set;
+struct bkey_float;
#define MAX_BSETS 4U
-struct btree_iter {
- size_t size, used;
-#ifdef CONFIG_BCACHE_DEBUG
- struct btree *b;
-#endif
- struct btree_iter_set {
- struct bkey *k, *end;
- } data[MAX_BSETS];
-};
-
struct bset_tree {
/*
* We construct a binary tree in an array as if the array
@@ -165,14 +162,14 @@ struct bset_tree {
*/
/* size of the binary tree and prev array */
- unsigned size;
+ unsigned size;
/* function of size - precalculated for to_inorder() */
- unsigned extra;
+ unsigned extra;
/* copy of the last key in the set */
- struct bkey end;
- struct bkey_float *tree;
+ struct bkey end;
+ struct bkey_float *tree;
/*
* The nodes in the bset tree point to specific keys - this
@@ -182,12 +179,219 @@ struct bset_tree {
* to keep bkey_float to 4 bytes and prev isn't used in the fast
* path.
*/
- uint8_t *prev;
+ uint8_t *prev;
/* The actual btree node, with pointers to each sorted set */
- struct bset *data;
+ struct bset *data;
+};
+
+struct btree_keys_ops {
+ bool (*sort_cmp)(struct btree_iter_set,
+ struct btree_iter_set);
+ struct bkey *(*sort_fixup)(struct btree_iter *, struct bkey *);
+ bool (*insert_fixup)(struct btree_keys *, struct bkey *,
+ struct btree_iter *, struct bkey *);
+ bool (*key_invalid)(struct btree_keys *,
+ const struct bkey *);
+ bool (*key_bad)(struct btree_keys *, const struct bkey *);
+ bool (*key_merge)(struct btree_keys *,
+ struct bkey *, struct bkey *);
+ void (*key_to_text)(char *, size_t, const struct bkey *);
+ void (*key_dump)(struct btree_keys *, const struct bkey *);
+
+ /*
+ * Only used for deciding whether to use START_KEY(k) or just the key
+ * itself in a couple places
+ */
+ bool is_extents;
+};
+
+struct btree_keys {
+ const struct btree_keys_ops *ops;
+ uint8_t page_order;
+ uint8_t nsets;
+ unsigned last_set_unwritten:1;
+ bool *expensive_debug_checks;
+
+ /*
+ * Sets of sorted keys - the real btree node - plus a binary search tree
+ *
+ * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
+ * to the memory we have allocated for this btree node. Additionally,
+ * set[0]->data points to the entire btree node as it exists on disk.
+ */
+ struct bset_tree set[MAX_BSETS];
+};
+
+static inline struct bset_tree *bset_tree_last(struct btree_keys *b)
+{
+ return b->set + b->nsets;
+}
+
+static inline bool bset_written(struct btree_keys *b, struct bset_tree *t)
+{
+ return t <= b->set + b->nsets - b->last_set_unwritten;
+}
+
+static inline bool bkey_written(struct btree_keys *b, struct bkey *k)
+{
+ return !b->last_set_unwritten || k < b->set[b->nsets].data->start;
+}
+
+static inline unsigned bset_byte_offset(struct btree_keys *b, struct bset *i)
+{
+ return ((size_t) i) - ((size_t) b->set->data);
+}
+
+static inline unsigned bset_sector_offset(struct btree_keys *b, struct bset *i)
+{
+ return bset_byte_offset(b, i) >> 9;
+}
+
+#define __set_bytes(i, k) (sizeof(*(i)) + (k) * sizeof(uint64_t))
+#define set_bytes(i) __set_bytes(i, i->keys)
+
+#define __set_blocks(i, k, block_bytes) \
+ DIV_ROUND_UP(__set_bytes(i, k), block_bytes)
+#define set_blocks(i, block_bytes) \
+ __set_blocks(i, (i)->keys, block_bytes)
+
+static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b)
+{
+ struct bset_tree *t = bset_tree_last(b);
+
+ BUG_ON((PAGE_SIZE << b->page_order) <
+ (bset_byte_offset(b, t->data) + set_bytes(t->data)));
+
+ if (!b->last_set_unwritten)
+ return 0;
+
+ return ((PAGE_SIZE << b->page_order) -
+ (bset_byte_offset(b, t->data) + set_bytes(t->data))) /
+ sizeof(u64);
+}
+
+static inline struct bset *bset_next_set(struct btree_keys *b,
+ unsigned block_bytes)
+{
+ struct bset *i = bset_tree_last(b)->data;
+
+ return ((void *) i) + roundup(set_bytes(i), block_bytes);
+}
+
+void bch_btree_keys_free(struct btree_keys *);
+int bch_btree_keys_alloc(struct btree_keys *, unsigned, gfp_t);
+void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *,
+ bool *);
+
+void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t);
+void bch_bset_build_written_tree(struct btree_keys *);
+void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *);
+bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *);
+void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *);
+unsigned bch_btree_insert_key(struct btree_keys *, struct bkey *,
+ struct bkey *);
+
+enum {
+ BTREE_INSERT_STATUS_NO_INSERT = 0,
+ BTREE_INSERT_STATUS_INSERT,
+ BTREE_INSERT_STATUS_BACK_MERGE,
+ BTREE_INSERT_STATUS_OVERWROTE,
+ BTREE_INSERT_STATUS_FRONT_MERGE,
};
+/* Btree key iteration */
+
+struct btree_iter {
+ size_t size, used;
+#ifdef CONFIG_BCACHE_DEBUG
+ struct btree_keys *b;
+#endif
+ struct btree_iter_set {
+ struct bkey *k, *end;
+ } data[MAX_BSETS];
+};
+
+typedef bool (*ptr_filter_fn)(struct btree_keys *, const struct bkey *);
+
+struct bkey *bch_btree_iter_next(struct btree_iter *);
+struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
+ struct btree_keys *, ptr_filter_fn);
+
+void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
+struct bkey *bch_btree_iter_init(struct btree_keys *, struct btree_iter *,
+ struct bkey *);
+
+struct bkey *__bch_bset_search(struct btree_keys *, struct bset_tree *,
+ const struct bkey *);
+
+/*
+ * Returns the first key that is strictly greater than search
+ */
+static inline struct bkey *bch_bset_search(struct btree_keys *b,
+ struct bset_tree *t,
+ const struct bkey *search)
+{
+ return search ? __bch_bset_search(b, t, search) : t->data->start;
+}
+
+#define for_each_key_filter(b, k, iter, filter) \
+ for (bch_btree_iter_init((b), (iter), NULL); \
+ ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
+
+#define for_each_key(b, k, iter) \
+ for (bch_btree_iter_init((b), (iter), NULL); \
+ ((k) = bch_btree_iter_next(iter));)
+
+/* Sorting */
+
+struct bset_sort_state {
+ mempool_t *pool;
+
+ unsigned page_order;
+ unsigned crit_factor;
+
+ struct time_stats time;
+};
+
+void bch_bset_sort_state_free(struct bset_sort_state *);
+int bch_bset_sort_state_init(struct bset_sort_state *, unsigned);
+void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *);
+void bch_btree_sort_into(struct btree_keys *, struct btree_keys *,
+ struct bset_sort_state *);
+void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *,
+ struct bset_sort_state *);
+void bch_btree_sort_partial(struct btree_keys *, unsigned,
+ struct bset_sort_state *);
+
+static inline void bch_btree_sort(struct btree_keys *b,
+ struct bset_sort_state *state)
+{
+ bch_btree_sort_partial(b, 0, state);
+}
+
+struct bset_stats {
+ size_t sets_written, sets_unwritten;
+ size_t bytes_written, bytes_unwritten;
+ size_t floats, failed;
+};
+
+void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *);
+
+/* Bkey utility code */
+
+#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys)
+
+static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx)
+{
+ return bkey_idx(i->start, idx);
+}
+
+static inline void bkey_init(struct bkey *k)
+{
+ *k = ZERO_KEY;
+}
+
static __always_inline int64_t bkey_cmp(const struct bkey *l,
const struct bkey *r)
{
@@ -196,6 +400,62 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l,
: (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
}
+void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
+ unsigned);
+bool __bch_cut_front(const struct bkey *, struct bkey *);
+bool __bch_cut_back(const struct bkey *, struct bkey *);
+
+static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
+{
+ BUG_ON(bkey_cmp(where, k) > 0);
+ return __bch_cut_front(where, k);
+}
+
+static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
+{
+ BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0);
+ return __bch_cut_back(where, k);
+}
+
+#define PRECEDING_KEY(_k) \
+({ \
+ struct bkey *_ret = NULL; \
+ \
+ if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
+ _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
+ \
+ if (!_ret->low) \
+ _ret->high--; \
+ _ret->low--; \
+ } \
+ \
+ _ret; \
+})
+
+static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
+{
+ return b->ops->key_invalid(b, k);
+}
+
+static inline bool bch_ptr_bad(struct btree_keys *b, const struct bkey *k)
+{
+ return b->ops->key_bad(b, k);
+}
+
+static inline void bch_bkey_to_text(struct btree_keys *b, char *buf,
+ size_t size, const struct bkey *k)
+{
+ return b->ops->key_to_text(buf, size, k);
+}
+
+static inline bool bch_bkey_equal_header(const struct bkey *l,
+ const struct bkey *r)
+{
+ return (KEY_DIRTY(l) == KEY_DIRTY(r) &&
+ KEY_PTRS(l) == KEY_PTRS(r) &&
+ KEY_CSUM(l) == KEY_CSUM(l));
+}
+
/* Keylists */
struct keylist {
@@ -257,136 +517,44 @@ static inline size_t bch_keylist_bytes(struct keylist *l)
struct bkey *bch_keylist_pop(struct keylist *);
void bch_keylist_pop_front(struct keylist *);
-int bch_keylist_realloc(struct keylist *, int, struct cache_set *);
-
-void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
- unsigned);
-bool __bch_cut_front(const struct bkey *, struct bkey *);
-bool __bch_cut_back(const struct bkey *, struct bkey *);
+int __bch_keylist_realloc(struct keylist *, unsigned);
-static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
-{
- BUG_ON(bkey_cmp(where, k) > 0);
- return __bch_cut_front(where, k);
-}
+/* Debug stuff */
-static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
-{
- BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0);
- return __bch_cut_back(where, k);
-}
-
-const char *bch_ptr_status(struct cache_set *, const struct bkey *);
-bool bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
-bool bch_extent_ptr_invalid(struct cache_set *, const struct bkey *);
-
-bool bch_ptr_bad(struct btree *, const struct bkey *);
-
-static inline uint8_t gen_after(uint8_t a, uint8_t b)
-{
- uint8_t r = a - b;
- return r > 128U ? 0 : r;
-}
-
-static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
- unsigned i)
-{
- return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
-}
-
-static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
- unsigned i)
-{
- return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
-}
-
-
-typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *);
-
-struct bkey *bch_btree_iter_next(struct btree_iter *);
-struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
- struct btree *, ptr_filter_fn);
-
-void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
-struct bkey *__bch_btree_iter_init(struct btree *, struct btree_iter *,
- struct bkey *, struct bset_tree *);
-
-/* 32 bits total: */
-#define BKEY_MID_BITS 3
-#define BKEY_EXPONENT_BITS 7
-#define BKEY_MANTISSA_BITS 22
-#define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1)
-
-struct bkey_float {
- unsigned exponent:BKEY_EXPONENT_BITS;
- unsigned m:BKEY_MID_BITS;
- unsigned mantissa:BKEY_MANTISSA_BITS;
-} __packed;
-
-/*
- * BSET_CACHELINE was originally intended to match the hardware cacheline size -
- * it used to be 64, but I realized the lookup code would touch slightly less
- * memory if it was 128.
- *
- * It definites the number of bytes (in struct bset) per struct bkey_float in
- * the auxiliar search tree - when we're done searching the bset_float tree we
- * have this many bytes left that we do a linear search over.
- *
- * Since (after level 5) every level of the bset_tree is on a new cacheline,
- * we're touching one fewer cacheline in the bset tree in exchange for one more
- * cacheline in the linear search - but the linear search might stop before it
- * gets to the second cacheline.
- */
-
-#define BSET_CACHELINE 128
-#define bset_tree_space(b) (btree_data_space(b) / BSET_CACHELINE)
+#ifdef CONFIG_BCACHE_DEBUG
-#define bset_tree_bytes(b) (bset_tree_space(b) * sizeof(struct bkey_float))
-#define bset_prev_bytes(b) (bset_tree_space(b) * sizeof(uint8_t))
+int __bch_count_data(struct btree_keys *);
+void __bch_check_keys(struct btree_keys *, const char *, ...);
+void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
+void bch_dump_bucket(struct btree_keys *);
-void bch_bset_init_next(struct btree *);
+#else
-void bch_bset_fix_invalidated_key(struct btree *, struct bkey *);
-void bch_bset_fix_lookup_table(struct btree *, struct bkey *);
+static inline int __bch_count_data(struct btree_keys *b) { return -1; }
+static inline void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {}
+static inline void bch_dump_bucket(struct btree_keys *b) {}
+void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
-struct bkey *__bch_bset_search(struct btree *, struct bset_tree *,
- const struct bkey *);
+#endif
-/*
- * Returns the first key that is strictly greater than search
- */
-static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t,
- const struct bkey *search)
+static inline bool btree_keys_expensive_checks(struct btree_keys *b)
{
- return search ? __bch_bset_search(b, t, search) : t->data->start;
+#ifdef CONFIG_BCACHE_DEBUG
+ return *b->expensive_debug_checks;
+#else
+ return false;
+#endif
}
-#define PRECEDING_KEY(_k) \
-({ \
- struct bkey *_ret = NULL; \
- \
- if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
- _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
- \
- if (!_ret->low) \
- _ret->high--; \
- _ret->low--; \
- } \
- \
- _ret; \
-})
-
-bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *);
-void bch_btree_sort_lazy(struct btree *);
-void bch_btree_sort_into(struct btree *, struct btree *);
-void bch_btree_sort_and_fix_extents(struct btree *, struct btree_iter *);
-void bch_btree_sort_partial(struct btree *, unsigned);
-
-static inline void bch_btree_sort(struct btree *b)
+static inline int bch_count_data(struct btree_keys *b)
{
- bch_btree_sort_partial(b, 0);
+ return btree_keys_expensive_checks(b) ? __bch_count_data(b) : -1;
}
-int bch_bset_print_stats(struct cache_set *, char *);
+#define bch_check_keys(b, ...) \
+do { \
+ if (btree_keys_expensive_checks(b)) \
+ __bch_check_keys(b, __VA_ARGS__); \
+} while (0)
#endif
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 5e2765aadce1..5f9c2a665ca5 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -23,7 +23,7 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
-#include "writeback.h"
+#include "extents.h"
#include <linux/slab.h>
#include <linux/bitops.h>
@@ -89,13 +89,6 @@
* Test module load/unload
*/
-enum {
- BTREE_INSERT_STATUS_INSERT,
- BTREE_INSERT_STATUS_BACK_MERGE,
- BTREE_INSERT_STATUS_OVERWROTE,
- BTREE_INSERT_STATUS_FRONT_MERGE,
-};
-
#define MAX_NEED_GC 64
#define MAX_SAVE_PRIO 72
@@ -106,14 +99,6 @@ enum {
static struct workqueue_struct *btree_io_wq;
-static inline bool should_split(struct btree *b)
-{
- struct bset *i = write_block(b);
- return b->written >= btree_blocks(b) ||
- (b->written + __set_blocks(i, i->keys + 15, b->c)
- > btree_blocks(b));
-}
-
#define insert_lock(s, b) ((b)->level <= (s)->lock)
/*
@@ -167,6 +152,8 @@ static inline bool should_split(struct btree *b)
_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
} \
rw_unlock(_w, _b); \
+ if (_r == -EINTR) \
+ schedule(); \
bch_cannibalize_unlock(c); \
if (_r == -ENOSPC) { \
wait_event((c)->try_wait, \
@@ -175,9 +162,15 @@ static inline bool should_split(struct btree *b)
} \
} while (_r == -EINTR); \
\
+ finish_wait(&(c)->bucket_wait, &(op)->wait); \
_r; \
})
+static inline struct bset *write_block(struct btree *b)
+{
+ return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
+}
+
/* Btree key manipulation */
void bkey_put(struct cache_set *c, struct bkey *k)
@@ -194,16 +187,16 @@ void bkey_put(struct cache_set *c, struct bkey *k)
static uint64_t btree_csum_set(struct btree *b, struct bset *i)
{
uint64_t crc = b->key.ptr[0];
- void *data = (void *) i + 8, *end = end(i);
+ void *data = (void *) i + 8, *end = bset_bkey_last(i);
crc = bch_crc64_update(crc, data, end - data);
return crc ^ 0xffffffffffffffffULL;
}
-static void bch_btree_node_read_done(struct btree *b)
+void bch_btree_node_read_done(struct btree *b)
{
const char *err = "bad btree header";
- struct bset *i = b->sets[0].data;
+ struct bset *i = btree_bset_first(b);
struct btree_iter *iter;
iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
@@ -211,21 +204,22 @@ static void bch_btree_node_read_done(struct btree *b)
iter->used = 0;
#ifdef CONFIG_BCACHE_DEBUG
- iter->b = b;
+ iter->b = &b->keys;
#endif
if (!i->seq)
goto err;
for (;
- b->written < btree_blocks(b) && i->seq == b->sets[0].data->seq;
+ b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
i = write_block(b)) {
err = "unsupported bset version";
if (i->version > BCACHE_BSET_VERSION)
goto err;
err = "bad btree header";
- if (b->written + set_blocks(i, b->c) > btree_blocks(b))
+ if (b->written + set_blocks(i, block_bytes(b->c)) >
+ btree_blocks(b))
goto err;
err = "bad magic";
@@ -245,39 +239,40 @@ static void bch_btree_node_read_done(struct btree *b)
}
err = "empty set";
- if (i != b->sets[0].data && !i->keys)
+ if (i != b->keys.set[0].data && !i->keys)
goto err;
- bch_btree_iter_push(iter, i->start, end(i));
+ bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
- b->written += set_blocks(i, b->c);
+ b->written += set_blocks(i, block_bytes(b->c));
}
err = "corrupted btree";
for (i = write_block(b);
- index(i, b) < btree_blocks(b);
+ bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
i = ((void *) i) + block_bytes(b->c))
- if (i->seq == b->sets[0].data->seq)
+ if (i->seq == b->keys.set[0].data->seq)
goto err;
- bch_btree_sort_and_fix_extents(b, iter);
+ bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
- i = b->sets[0].data;
+ i = b->keys.set[0].data;
err = "short btree key";
- if (b->sets[0].size &&
- bkey_cmp(&b->key, &b->sets[0].end) < 0)
+ if (b->keys.set[0].size &&
+ bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
goto err;
if (b->written < btree_blocks(b))
- bch_bset_init_next(b);
+ bch_bset_init_next(&b->keys, write_block(b),
+ bset_magic(&b->c->sb));
out:
mempool_free(iter, b->c->fill_iter);
return;
err:
set_btree_node_io_error(b);
- bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys",
+ bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
err, PTR_BUCKET_NR(b->c, &b->key, 0),
- index(i, b), i->keys);
+ bset_block_offset(b, i), i->keys);
goto out;
}
@@ -287,7 +282,7 @@ static void btree_node_read_endio(struct bio *bio, int error)
closure_put(cl);
}
-void bch_btree_node_read(struct btree *b)
+static void bch_btree_node_read(struct btree *b)
{
uint64_t start_time = local_clock();
struct closure cl;
@@ -299,11 +294,11 @@ void bch_btree_node_read(struct btree *b)
bio = bch_bbio_alloc(b->c);
bio->bi_rw = REQ_META|READ_SYNC;
- bio->bi_size = KEY_SIZE(&b->key) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
bio->bi_end_io = btree_node_read_endio;
bio->bi_private = &cl;
- bch_bio_map(bio, b->sets[0].data);
+ bch_bio_map(bio, b->keys.set[0].data);
bch_submit_bbio(bio, b->c, &b->key, 0);
closure_sync(&cl);
@@ -340,9 +335,16 @@ static void btree_complete_write(struct btree *b, struct btree_write *w)
w->journal = NULL;
}
+static void btree_node_write_unlock(struct closure *cl)
+{
+ struct btree *b = container_of(cl, struct btree, io);
+
+ up(&b->io_mutex);
+}
+
static void __btree_node_write_done(struct closure *cl)
{
- struct btree *b = container_of(cl, struct btree, io.cl);
+ struct btree *b = container_of(cl, struct btree, io);
struct btree_write *w = btree_prev_write(b);
bch_bbio_free(b->bio, b->c);
@@ -353,16 +355,16 @@ static void __btree_node_write_done(struct closure *cl)
queue_delayed_work(btree_io_wq, &b->work,
msecs_to_jiffies(30000));
- closure_return(cl);
+ closure_return_with_destructor(cl, btree_node_write_unlock);
}
static void btree_node_write_done(struct closure *cl)
{
- struct btree *b = container_of(cl, struct btree, io.cl);
+ struct btree *b = container_of(cl, struct btree, io);
struct bio_vec *bv;
int n;
- __bio_for_each_segment(bv, b->bio, n, 0)
+ bio_for_each_segment_all(bv, b->bio, n)
__free_page(bv->bv_page);
__btree_node_write_done(cl);
@@ -371,7 +373,7 @@ static void btree_node_write_done(struct closure *cl)
static void btree_node_write_endio(struct bio *bio, int error)
{
struct closure *cl = bio->bi_private;
- struct btree *b = container_of(cl, struct btree, io.cl);
+ struct btree *b = container_of(cl, struct btree, io);
if (error)
set_btree_node_io_error(b);
@@ -382,8 +384,8 @@ static void btree_node_write_endio(struct bio *bio, int error)
static void do_btree_node_write(struct btree *b)
{
- struct closure *cl = &b->io.cl;
- struct bset *i = b->sets[b->nsets].data;
+ struct closure *cl = &b->io;
+ struct bset *i = btree_bset_last(b);
BKEY_PADDED(key) k;
i->version = BCACHE_BSET_VERSION;
@@ -395,7 +397,7 @@ static void do_btree_node_write(struct btree *b)
b->bio->bi_end_io = btree_node_write_endio;
b->bio->bi_private = cl;
b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
- b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
+ b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
bch_bio_map(b->bio, i);
/*
@@ -414,14 +416,15 @@ static void do_btree_node_write(struct btree *b)
*/
bkey_copy(&k.key, &b->key);
- SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
+ SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
+ bset_sector_offset(&b->keys, i));
if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
int j;
struct bio_vec *bv;
void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
- bio_for_each_segment(bv, b->bio, j)
+ bio_for_each_segment_all(bv, b->bio, j)
memcpy(page_address(bv->bv_page),
base + j * PAGE_SIZE, PAGE_SIZE);
@@ -435,40 +438,54 @@ static void do_btree_node_write(struct btree *b)
bch_submit_bbio(b->bio, b->c, &k.key, 0);
closure_sync(cl);
- __btree_node_write_done(cl);
+ continue_at_nobarrier(cl, __btree_node_write_done, NULL);
}
}
void bch_btree_node_write(struct btree *b, struct closure *parent)
{
- struct bset *i = b->sets[b->nsets].data;
+ struct bset *i = btree_bset_last(b);
trace_bcache_btree_write(b);
BUG_ON(current->bio_list);
BUG_ON(b->written >= btree_blocks(b));
BUG_ON(b->written && !i->keys);
- BUG_ON(b->sets->data->seq != i->seq);
- bch_check_keys(b, "writing");
+ BUG_ON(btree_bset_first(b)->seq != i->seq);
+ bch_check_keys(&b->keys, "writing");
cancel_delayed_work(&b->work);
/* If caller isn't waiting for write, parent refcount is cache set */
- closure_lock(&b->io, parent ?: &b->c->cl);
+ down(&b->io_mutex);
+ closure_init(&b->io, parent ?: &b->c->cl);
clear_bit(BTREE_NODE_dirty, &b->flags);
change_bit(BTREE_NODE_write_idx, &b->flags);
do_btree_node_write(b);
- b->written += set_blocks(i, b->c);
- atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size,
+ atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
- bch_btree_sort_lazy(b);
+ b->written += set_blocks(i, block_bytes(b->c));
+
+ /* If not a leaf node, always sort */
+ if (b->level && b->keys.nsets)
+ bch_btree_sort(&b->keys, &b->c->sort);
+ else
+ bch_btree_sort_lazy(&b->keys, &b->c->sort);
+
+ /*
+ * do verify if there was more than one set initially (i.e. we did a
+ * sort) and we sorted down to a single set:
+ */
+ if (i != b->keys.set->data && !b->keys.nsets)
+ bch_btree_verify(b);
if (b->written < btree_blocks(b))
- bch_bset_init_next(b);
+ bch_bset_init_next(&b->keys, write_block(b),
+ bset_magic(&b->c->sb));
}
static void bch_btree_node_write_sync(struct btree *b)
@@ -493,7 +510,7 @@ static void btree_node_write_work(struct work_struct *w)
static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
{
- struct bset *i = b->sets[b->nsets].data;
+ struct bset *i = btree_bset_last(b);
struct btree_write *w = btree_current_write(b);
BUG_ON(!b->written);
@@ -528,24 +545,6 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
* mca -> memory cache
*/
-static void mca_reinit(struct btree *b)
-{
- unsigned i;
-
- b->flags = 0;
- b->written = 0;
- b->nsets = 0;
-
- for (i = 0; i < MAX_BSETS; i++)
- b->sets[i].size = 0;
- /*
- * Second loop starts at 1 because b->sets[0]->data is the memory we
- * allocated
- */
- for (i = 1; i < MAX_BSETS; i++)
- b->sets[i].data = NULL;
-}
-
#define mca_reserve(c) (((c->root && c->root->level) \
? c->root->level : 1) * 8 + 16)
#define mca_can_free(c) \
@@ -553,28 +552,12 @@ static void mca_reinit(struct btree *b)
static void mca_data_free(struct btree *b)
{
- struct bset_tree *t = b->sets;
- BUG_ON(!closure_is_unlocked(&b->io.cl));
+ BUG_ON(b->io_mutex.count != 1);
- if (bset_prev_bytes(b) < PAGE_SIZE)
- kfree(t->prev);
- else
- free_pages((unsigned long) t->prev,
- get_order(bset_prev_bytes(b)));
-
- if (bset_tree_bytes(b) < PAGE_SIZE)
- kfree(t->tree);
- else
- free_pages((unsigned long) t->tree,
- get_order(bset_tree_bytes(b)));
+ bch_btree_keys_free(&b->keys);
- free_pages((unsigned long) t->data, b->page_order);
-
- t->prev = NULL;
- t->tree = NULL;
- t->data = NULL;
- list_move(&b->list, &b->c->btree_cache_freed);
b->c->bucket_cache_used--;
+ list_move(&b->list, &b->c->btree_cache_freed);
}
static void mca_bucket_free(struct btree *b)
@@ -593,34 +576,16 @@ static unsigned btree_order(struct bkey *k)
static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
{
- struct bset_tree *t = b->sets;
- BUG_ON(t->data);
-
- b->page_order = max_t(unsigned,
- ilog2(b->c->btree_pages),
- btree_order(k));
-
- t->data = (void *) __get_free_pages(gfp, b->page_order);
- if (!t->data)
- goto err;
-
- t->tree = bset_tree_bytes(b) < PAGE_SIZE
- ? kmalloc(bset_tree_bytes(b), gfp)
- : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
- if (!t->tree)
- goto err;
-
- t->prev = bset_prev_bytes(b) < PAGE_SIZE
- ? kmalloc(bset_prev_bytes(b), gfp)
- : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
- if (!t->prev)
- goto err;
-
- list_move(&b->list, &b->c->btree_cache);
- b->c->bucket_cache_used++;
- return;
-err:
- mca_data_free(b);
+ if (!bch_btree_keys_alloc(&b->keys,
+ max_t(unsigned,
+ ilog2(b->c->btree_pages),
+ btree_order(k)),
+ gfp)) {
+ b->c->bucket_cache_used++;
+ list_move(&b->list, &b->c->btree_cache);
+ } else {
+ list_move(&b->list, &b->c->btree_cache_freed);
+ }
}
static struct btree *mca_bucket_alloc(struct cache_set *c,
@@ -635,7 +600,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
INIT_LIST_HEAD(&b->list);
INIT_DELAYED_WORK(&b->work, btree_node_write_work);
b->c = c;
- closure_init_unlocked(&b->io);
+ sema_init(&b->io_mutex, 1);
mca_data_alloc(b, k, gfp);
return b;
@@ -651,24 +616,31 @@ static int mca_reap(struct btree *b, unsigned min_order, bool flush)
if (!down_write_trylock(&b->lock))
return -ENOMEM;
- BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
+ BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
- if (b->page_order < min_order ||
- (!flush &&
- (btree_node_dirty(b) ||
- atomic_read(&b->io.cl.remaining) != -1))) {
- rw_unlock(true, b);
- return -ENOMEM;
+ if (b->keys.page_order < min_order)
+ goto out_unlock;
+
+ if (!flush) {
+ if (btree_node_dirty(b))
+ goto out_unlock;
+
+ if (down_trylock(&b->io_mutex))
+ goto out_unlock;
+ up(&b->io_mutex);
}
if (btree_node_dirty(b))
bch_btree_node_write_sync(b);
/* wait for any in flight btree write */
- closure_wait_event(&b->io.wait, &cl,
- atomic_read(&b->io.cl.remaining) == -1);
+ down(&b->io_mutex);
+ up(&b->io_mutex);
return 0;
+out_unlock:
+ rw_unlock(true, b);
+ return -ENOMEM;
}
static unsigned long bch_mca_scan(struct shrinker *shrink,
@@ -714,14 +686,10 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
}
}
- /*
- * Can happen right when we first start up, before we've read in any
- * btree nodes
- */
- if (list_empty(&c->btree_cache))
- goto out;
-
for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
+ if (list_empty(&c->btree_cache))
+ goto out;
+
b = list_first_entry(&c->btree_cache, struct btree, list);
list_rotate_left(&c->btree_cache);
@@ -767,6 +735,8 @@ void bch_btree_cache_free(struct cache_set *c)
#ifdef CONFIG_BCACHE_DEBUG
if (c->verify_data)
list_move(&c->verify_data->list, &c->btree_cache);
+
+ free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
#endif
list_splice(&c->btree_cache_freeable,
@@ -807,10 +777,13 @@ int bch_btree_cache_alloc(struct cache_set *c)
#ifdef CONFIG_BCACHE_DEBUG
mutex_init(&c->verify_lock);
+ c->verify_ondisk = (void *)
+ __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
+
c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
if (c->verify_data &&
- c->verify_data->sets[0].data)
+ c->verify_data->keys.set->data)
list_del_init(&c->verify_data->list);
else
c->verify_data = NULL;
@@ -908,7 +881,7 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
list_for_each_entry(b, &c->btree_cache_freed, list)
if (!mca_reap(b, 0, false)) {
mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
- if (!b->sets[0].data)
+ if (!b->keys.set[0].data)
goto err;
else
goto out;
@@ -919,10 +892,10 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
goto err;
BUG_ON(!down_write_trylock(&b->lock));
- if (!b->sets->data)
+ if (!b->keys.set->data)
goto err;
out:
- BUG_ON(!closure_is_unlocked(&b->io.cl));
+ BUG_ON(b->io_mutex.count != 1);
bkey_copy(&b->key, k);
list_move(&b->list, &c->btree_cache);
@@ -930,10 +903,17 @@ out:
hlist_add_head_rcu(&b->hash, mca_hash(c, k));
lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
- b->level = level;
b->parent = (void *) ~0UL;
+ b->flags = 0;
+ b->written = 0;
+ b->level = level;
- mca_reinit(b);
+ if (!b->level)
+ bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
+ &b->c->expensive_debug_checks);
+ else
+ bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
+ &b->c->expensive_debug_checks);
return b;
err:
@@ -994,13 +974,13 @@ retry:
b->accessed = 1;
- for (; i <= b->nsets && b->sets[i].size; i++) {
- prefetch(b->sets[i].tree);
- prefetch(b->sets[i].data);
+ for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
+ prefetch(b->keys.set[i].tree);
+ prefetch(b->keys.set[i].data);
}
- for (; i <= b->nsets; i++)
- prefetch(b->sets[i].data);
+ for (; i <= b->keys.nsets; i++)
+ prefetch(b->keys.set[i].data);
if (btree_node_io_error(b)) {
rw_unlock(write, b);
@@ -1063,7 +1043,7 @@ struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait)
mutex_lock(&c->bucket_lock);
retry:
- if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, wait))
+ if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
goto err;
bkey_put(c, &k.key);
@@ -1080,7 +1060,7 @@ retry:
}
b->accessed = 1;
- bch_bset_init_next(b);
+ bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
mutex_unlock(&c->bucket_lock);
@@ -1098,8 +1078,10 @@ err:
static struct btree *btree_node_alloc_replacement(struct btree *b, bool wait)
{
struct btree *n = bch_btree_node_alloc(b->c, b->level, wait);
- if (!IS_ERR_OR_NULL(n))
- bch_btree_sort_into(b, n);
+ if (!IS_ERR_OR_NULL(n)) {
+ bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
+ bkey_copy_key(&n->key, &b->key);
+ }
return n;
}
@@ -1120,6 +1102,28 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
atomic_inc(&b->c->prio_blocked);
}
+static int btree_check_reserve(struct btree *b, struct btree_op *op)
+{
+ struct cache_set *c = b->c;
+ struct cache *ca;
+ unsigned i, reserve = c->root->level * 2 + 1;
+ int ret = 0;
+
+ mutex_lock(&c->bucket_lock);
+
+ for_each_cache(ca, c, i)
+ if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
+ if (op)
+ prepare_to_wait(&c->bucket_wait, &op->wait,
+ TASK_UNINTERRUPTIBLE);
+ ret = -EINTR;
+ break;
+ }
+
+ mutex_unlock(&c->bucket_lock);
+ return ret;
+}
+
/* Garbage collection */
uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
@@ -1163,7 +1167,7 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
/* guard against overflow */
SET_GC_SECTORS_USED(g, min_t(unsigned,
GC_SECTORS_USED(g) + KEY_SIZE(k),
- (1 << 14) - 1));
+ MAX_GC_SECTORS_USED));
BUG_ON(!GC_SECTORS_USED(g));
}
@@ -1183,11 +1187,11 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
gc->nodes++;
- for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
+ for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
stale = max(stale, btree_mark_key(b, k));
keys++;
- if (bch_ptr_bad(b, k))
+ if (bch_ptr_bad(&b->keys, k))
continue;
gc->key_bytes += bkey_u64s(k);
@@ -1197,9 +1201,9 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
gc->data += KEY_SIZE(k);
}
- for (t = b->sets; t <= &b->sets[b->nsets]; t++)
+ for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
btree_bug_on(t->size &&
- bset_written(b, t) &&
+ bset_written(&b->keys, t) &&
bkey_cmp(&b->key, &t->end) < 0,
b, "found short btree key in gc");
@@ -1243,7 +1247,8 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
blocks = btree_default_blocks(b->c) * 2 / 3;
if (nodes < 2 ||
- __set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1))
+ __set_blocks(b->keys.set[0].data, keys,
+ block_bytes(b->c)) > blocks * (nodes - 1))
return 0;
for (i = 0; i < nodes; i++) {
@@ -1253,18 +1258,19 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
}
for (i = nodes - 1; i > 0; --i) {
- struct bset *n1 = new_nodes[i]->sets->data;
- struct bset *n2 = new_nodes[i - 1]->sets->data;
+ struct bset *n1 = btree_bset_first(new_nodes[i]);
+ struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
struct bkey *k, *last = NULL;
keys = 0;
if (i > 1) {
for (k = n2->start;
- k < end(n2);
+ k < bset_bkey_last(n2);
k = bkey_next(k)) {
if (__set_blocks(n1, n1->keys + keys +
- bkey_u64s(k), b->c) > blocks)
+ bkey_u64s(k),
+ block_bytes(b->c)) > blocks)
break;
last = k;
@@ -1280,7 +1286,8 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
* though)
*/
if (__set_blocks(n1, n1->keys + n2->keys,
- b->c) > btree_blocks(new_nodes[i]))
+ block_bytes(b->c)) >
+ btree_blocks(new_nodes[i]))
goto out_nocoalesce;
keys = n2->keys;
@@ -1288,27 +1295,28 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
last = &r->b->key;
}
- BUG_ON(__set_blocks(n1, n1->keys + keys,
- b->c) > btree_blocks(new_nodes[i]));
+ BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
+ btree_blocks(new_nodes[i]));
if (last)
bkey_copy_key(&new_nodes[i]->key, last);
- memcpy(end(n1),
+ memcpy(bset_bkey_last(n1),
n2->start,
- (void *) node(n2, keys) - (void *) n2->start);
+ (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
n1->keys += keys;
r[i].keys = n1->keys;
memmove(n2->start,
- node(n2, keys),
- (void *) end(n2) - (void *) node(n2, keys));
+ bset_bkey_idx(n2, keys),
+ (void *) bset_bkey_last(n2) -
+ (void *) bset_bkey_idx(n2, keys));
n2->keys -= keys;
- if (bch_keylist_realloc(keylist,
- KEY_PTRS(&new_nodes[i]->key), b->c))
+ if (__bch_keylist_realloc(keylist,
+ bkey_u64s(&new_nodes[i]->key)))
goto out_nocoalesce;
bch_btree_node_write(new_nodes[i], &cl);
@@ -1316,7 +1324,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
}
for (i = 0; i < nodes; i++) {
- if (bch_keylist_realloc(keylist, KEY_PTRS(&r[i].b->key), b->c))
+ if (__bch_keylist_realloc(keylist, bkey_u64s(&r[i].b->key)))
goto out_nocoalesce;
make_btree_freeing_key(r[i].b, keylist->top);
@@ -1324,7 +1332,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
}
/* We emptied out this node */
- BUG_ON(new_nodes[0]->sets->data->keys);
+ BUG_ON(btree_bset_first(new_nodes[0])->keys);
btree_node_free(new_nodes[0]);
rw_unlock(true, new_nodes[0]);
@@ -1370,7 +1378,7 @@ static unsigned btree_gc_count_keys(struct btree *b)
struct btree_iter iter;
unsigned ret = 0;
- for_each_key_filter(b, k, &iter, bch_ptr_bad)
+ for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
ret += bkey_u64s(k);
return ret;
@@ -1390,13 +1398,13 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
struct gc_merge_info *last = r + GC_MERGE_NODES - 1;
bch_keylist_init(&keys);
- bch_btree_iter_init(b, &iter, &b->c->gc_done);
+ bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
for (i = 0; i < GC_MERGE_NODES; i++)
r[i].b = ERR_PTR(-EINTR);
while (1) {
- k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+ k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
if (k) {
r->b = bch_btree_node_get(b->c, k, b->level - 1, true);
if (IS_ERR(r->b)) {
@@ -1416,7 +1424,8 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
if (!IS_ERR(last->b)) {
should_rewrite = btree_gc_mark_node(last->b, gc);
- if (should_rewrite) {
+ if (should_rewrite &&
+ !btree_check_reserve(b, NULL)) {
n = btree_node_alloc_replacement(last->b,
false);
@@ -1561,6 +1570,28 @@ size_t bch_btree_gc_finish(struct cache_set *c)
SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
GC_MARK_METADATA);
+ /* don't reclaim buckets to which writeback keys point */
+ rcu_read_lock();
+ for (i = 0; i < c->nr_uuids; i++) {
+ struct bcache_device *d = c->devices[i];
+ struct cached_dev *dc;
+ struct keybuf_key *w, *n;
+ unsigned j;
+
+ if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
+ continue;
+ dc = container_of(d, struct cached_dev, disk);
+
+ spin_lock(&dc->writeback_keys.lock);
+ rbtree_postorder_for_each_entry_safe(w, n,
+ &dc->writeback_keys.keys, node)
+ for (j = 0; j < KEY_PTRS(&w->key); j++)
+ SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
+ GC_MARK_DIRTY);
+ spin_unlock(&dc->writeback_keys.lock);
+ }
+ rcu_read_unlock();
+
for_each_cache(ca, c, i) {
uint64_t *i;
@@ -1683,7 +1714,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
struct bucket *g;
struct btree_iter iter;
- for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
+ for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
for (i = 0; i < KEY_PTRS(k); i++) {
if (!ptr_available(b->c, k, i))
continue;
@@ -1706,10 +1737,11 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
}
if (b->level) {
- bch_btree_iter_init(b, &iter, NULL);
+ bch_btree_iter_init(&b->keys, &iter, NULL);
do {
- k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+ k = bch_btree_iter_next_filter(&iter, &b->keys,
+ bch_ptr_bad);
if (k)
btree_node_prefetch(b->c, k, b->level - 1);
@@ -1752,234 +1784,36 @@ err:
/* Btree insertion */
-static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
-{
- struct bset *i = b->sets[b->nsets].data;
-
- memmove((uint64_t *) where + bkey_u64s(insert),
- where,
- (void *) end(i) - (void *) where);
-
- i->keys += bkey_u64s(insert);
- bkey_copy(where, insert);
- bch_bset_fix_lookup_table(b, where);
-}
-
-static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
- struct btree_iter *iter,
- struct bkey *replace_key)
+static bool btree_insert_key(struct btree *b, struct bkey *k,
+ struct bkey *replace_key)
{
- void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
- {
- if (KEY_DIRTY(k))
- bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
- offset, -sectors);
- }
-
- uint64_t old_offset;
- unsigned old_size, sectors_found = 0;
-
- while (1) {
- struct bkey *k = bch_btree_iter_next(iter);
- if (!k ||
- bkey_cmp(&START_KEY(k), insert) >= 0)
- break;
-
- if (bkey_cmp(k, &START_KEY(insert)) <= 0)
- continue;
-
- old_offset = KEY_START(k);
- old_size = KEY_SIZE(k);
-
- /*
- * We might overlap with 0 size extents; we can't skip these
- * because if they're in the set we're inserting to we have to
- * adjust them so they don't overlap with the key we're
- * inserting. But we don't want to check them for replace
- * operations.
- */
-
- if (replace_key && KEY_SIZE(k)) {
- /*
- * k might have been split since we inserted/found the
- * key we're replacing
- */
- unsigned i;
- uint64_t offset = KEY_START(k) -
- KEY_START(replace_key);
-
- /* But it must be a subset of the replace key */
- if (KEY_START(k) < KEY_START(replace_key) ||
- KEY_OFFSET(k) > KEY_OFFSET(replace_key))
- goto check_failed;
-
- /* We didn't find a key that we were supposed to */
- if (KEY_START(k) > KEY_START(insert) + sectors_found)
- goto check_failed;
-
- if (KEY_PTRS(replace_key) != KEY_PTRS(k))
- goto check_failed;
-
- /* skip past gen */
- offset <<= 8;
-
- BUG_ON(!KEY_PTRS(replace_key));
-
- for (i = 0; i < KEY_PTRS(replace_key); i++)
- if (k->ptr[i] != replace_key->ptr[i] + offset)
- goto check_failed;
-
- sectors_found = KEY_OFFSET(k) - KEY_START(insert);
- }
-
- if (bkey_cmp(insert, k) < 0 &&
- bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
- /*
- * We overlapped in the middle of an existing key: that
- * means we have to split the old key. But we have to do
- * slightly different things depending on whether the
- * old key has been written out yet.
- */
-
- struct bkey *top;
-
- subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
-
- if (bkey_written(b, k)) {
- /*
- * We insert a new key to cover the top of the
- * old key, and the old key is modified in place
- * to represent the bottom split.
- *
- * It's completely arbitrary whether the new key
- * is the top or the bottom, but it has to match
- * up with what btree_sort_fixup() does - it
- * doesn't check for this kind of overlap, it
- * depends on us inserting a new key for the top
- * here.
- */
- top = bch_bset_search(b, &b->sets[b->nsets],
- insert);
- shift_keys(b, top, k);
- } else {
- BKEY_PADDED(key) temp;
- bkey_copy(&temp.key, k);
- shift_keys(b, k, &temp.key);
- top = bkey_next(k);
- }
-
- bch_cut_front(insert, top);
- bch_cut_back(&START_KEY(insert), k);
- bch_bset_fix_invalidated_key(b, k);
- return false;
- }
-
- if (bkey_cmp(insert, k) < 0) {
- bch_cut_front(insert, k);
- } else {
- if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
- old_offset = KEY_START(insert);
-
- if (bkey_written(b, k) &&
- bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
- /*
- * Completely overwrote, so we don't have to
- * invalidate the binary search tree
- */
- bch_cut_front(k, k);
- } else {
- __bch_cut_back(&START_KEY(insert), k);
- bch_bset_fix_invalidated_key(b, k);
- }
- }
+ unsigned status;
- subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
- }
+ BUG_ON(bkey_cmp(k, &b->key) > 0);
-check_failed:
- if (replace_key) {
- if (!sectors_found) {
- return true;
- } else if (sectors_found < KEY_SIZE(insert)) {
- SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
- (KEY_SIZE(insert) - sectors_found));
- SET_KEY_SIZE(insert, sectors_found);
- }
- }
+ status = bch_btree_insert_key(&b->keys, k, replace_key);
+ if (status != BTREE_INSERT_STATUS_NO_INSERT) {
+ bch_check_keys(&b->keys, "%u for %s", status,
+ replace_key ? "replace" : "insert");
- return false;
+ trace_bcache_btree_insert_key(b, k, replace_key != NULL,
+ status);
+ return true;
+ } else
+ return false;
}
-static bool btree_insert_key(struct btree *b, struct btree_op *op,
- struct bkey *k, struct bkey *replace_key)
+static size_t insert_u64s_remaining(struct btree *b)
{
- struct bset *i = b->sets[b->nsets].data;
- struct bkey *m, *prev;
- unsigned status = BTREE_INSERT_STATUS_INSERT;
-
- BUG_ON(bkey_cmp(k, &b->key) > 0);
- BUG_ON(b->level && !KEY_PTRS(k));
- BUG_ON(!b->level && !KEY_OFFSET(k));
-
- if (!b->level) {
- struct btree_iter iter;
+ long ret = bch_btree_keys_u64s_remaining(&b->keys);
- /*
- * bset_search() returns the first key that is strictly greater
- * than the search key - but for back merging, we want to find
- * the previous key.
- */
- prev = NULL;
- m = bch_btree_iter_init(b, &iter, PRECEDING_KEY(&START_KEY(k)));
-
- if (fix_overlapping_extents(b, k, &iter, replace_key)) {
- op->insert_collision = true;
- return false;
- }
-
- if (KEY_DIRTY(k))
- bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
- KEY_START(k), KEY_SIZE(k));
-
- while (m != end(i) &&
- bkey_cmp(k, &START_KEY(m)) > 0)
- prev = m, m = bkey_next(m);
-
- if (key_merging_disabled(b->c))
- goto insert;
-
- /* prev is in the tree, if we merge we're done */
- status = BTREE_INSERT_STATUS_BACK_MERGE;
- if (prev &&
- bch_bkey_try_merge(b, prev, k))
- goto merged;
-
- status = BTREE_INSERT_STATUS_OVERWROTE;
- if (m != end(i) &&
- KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
- goto copy;
-
- status = BTREE_INSERT_STATUS_FRONT_MERGE;
- if (m != end(i) &&
- bch_bkey_try_merge(b, k, m))
- goto copy;
- } else {
- BUG_ON(replace_key);
- m = bch_bset_search(b, &b->sets[b->nsets], k);
- }
-
-insert: shift_keys(b, m, k);
-copy: bkey_copy(m, k);
-merged:
- bch_check_keys(b, "%u for %s", status,
- replace_key ? "replace" : "insert");
-
- if (b->level && !KEY_OFFSET(k))
- btree_current_write(b)->prio_blocked++;
-
- trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
+ /*
+ * Might land in the middle of an existing extent and have to split it
+ */
+ if (b->keys.ops->is_extents)
+ ret -= KEY_MAX_U64S;
- return true;
+ return max(ret, 0L);
}
static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
@@ -1987,21 +1821,19 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
struct bkey *replace_key)
{
bool ret = false;
- int oldsize = bch_count_data(b);
+ int oldsize = bch_count_data(&b->keys);
while (!bch_keylist_empty(insert_keys)) {
- struct bset *i = write_block(b);
struct bkey *k = insert_keys->keys;
- if (b->written + __set_blocks(i, i->keys + bkey_u64s(k), b->c)
- > btree_blocks(b))
+ if (bkey_u64s(k) > insert_u64s_remaining(b))
break;
if (bkey_cmp(k, &b->key) <= 0) {
if (!b->level)
bkey_put(b->c, k);
- ret |= btree_insert_key(b, op, k, replace_key);
+ ret |= btree_insert_key(b, k, replace_key);
bch_keylist_pop_front(insert_keys);
} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
BKEY_PADDED(key) temp;
@@ -2010,16 +1842,19 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
bch_cut_back(&b->key, &temp.key);
bch_cut_front(&b->key, insert_keys->keys);
- ret |= btree_insert_key(b, op, &temp.key, replace_key);
+ ret |= btree_insert_key(b, &temp.key, replace_key);
break;
} else {
break;
}
}
+ if (!ret)
+ op->insert_collision = true;
+
BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
- BUG_ON(bch_count_data(b) < oldsize);
+ BUG_ON(bch_count_data(&b->keys) < oldsize);
return ret;
}
@@ -2036,16 +1871,21 @@ static int btree_split(struct btree *b, struct btree_op *op,
closure_init_stack(&cl);
bch_keylist_init(&parent_keys);
+ if (!b->level &&
+ btree_check_reserve(b, op))
+ return -EINTR;
+
n1 = btree_node_alloc_replacement(b, true);
if (IS_ERR(n1))
goto err;
- split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5;
+ split = set_blocks(btree_bset_first(n1),
+ block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
if (split) {
unsigned keys = 0;
- trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
+ trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
n2 = bch_btree_node_alloc(b->c, b->level, true);
if (IS_ERR(n2))
@@ -2064,18 +1904,20 @@ static int btree_split(struct btree *b, struct btree_op *op,
* search tree yet
*/
- while (keys < (n1->sets[0].data->keys * 3) / 5)
- keys += bkey_u64s(node(n1->sets[0].data, keys));
+ while (keys < (btree_bset_first(n1)->keys * 3) / 5)
+ keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
+ keys));
- bkey_copy_key(&n1->key, node(n1->sets[0].data, keys));
- keys += bkey_u64s(node(n1->sets[0].data, keys));
+ bkey_copy_key(&n1->key,
+ bset_bkey_idx(btree_bset_first(n1), keys));
+ keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
- n2->sets[0].data->keys = n1->sets[0].data->keys - keys;
- n1->sets[0].data->keys = keys;
+ btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
+ btree_bset_first(n1)->keys = keys;
- memcpy(n2->sets[0].data->start,
- end(n1->sets[0].data),
- n2->sets[0].data->keys * sizeof(uint64_t));
+ memcpy(btree_bset_first(n2)->start,
+ bset_bkey_last(btree_bset_first(n1)),
+ btree_bset_first(n2)->keys * sizeof(uint64_t));
bkey_copy_key(&n2->key, &b->key);
@@ -2083,7 +1925,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
bch_btree_node_write(n2, &cl);
rw_unlock(true, n2);
} else {
- trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
+ trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
bch_btree_insert_keys(n1, op, insert_keys, replace_key);
}
@@ -2126,18 +1968,21 @@ static int btree_split(struct btree *b, struct btree_op *op,
return 0;
err_free2:
+ bkey_put(b->c, &n2->key);
btree_node_free(n2);
rw_unlock(true, n2);
err_free1:
+ bkey_put(b->c, &n1->key);
btree_node_free(n1);
rw_unlock(true, n1);
err:
+ WARN(1, "bcache: btree split failed");
+
if (n3 == ERR_PTR(-EAGAIN) ||
n2 == ERR_PTR(-EAGAIN) ||
n1 == ERR_PTR(-EAGAIN))
return -EAGAIN;
- pr_warn("couldn't split");
return -ENOMEM;
}
@@ -2148,7 +1993,7 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
{
BUG_ON(b->level && replace_key);
- if (should_split(b)) {
+ if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
if (current->bio_list) {
op->lock = b->c->root->level + 1;
return -EAGAIN;
@@ -2157,11 +2002,13 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
return -EINTR;
} else {
/* Invalidated all iterators */
- return btree_split(b, op, insert_keys, replace_key) ?:
- -EINTR;
+ int ret = btree_split(b, op, insert_keys, replace_key);
+
+ return bch_keylist_empty(insert_keys) ?
+ 0 : ret ?: -EINTR;
}
} else {
- BUG_ON(write_block(b) != b->sets[b->nsets].data);
+ BUG_ON(write_block(b) != btree_bset_last(b));
if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
if (!b->level)
@@ -2217,7 +2064,7 @@ struct btree_insert_op {
struct bkey *replace_key;
};
-int btree_insert_fn(struct btree_op *b_op, struct btree *b)
+static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
{
struct btree_insert_op *op = container_of(b_op,
struct btree_insert_op, op);
@@ -2300,9 +2147,9 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
struct bkey *k;
struct btree_iter iter;
- bch_btree_iter_init(b, &iter, from);
+ bch_btree_iter_init(&b->keys, &iter, from);
- while ((k = bch_btree_iter_next_filter(&iter, b,
+ while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
bch_ptr_bad))) {
ret = btree(map_nodes_recurse, k, b,
op, from, fn, flags);
@@ -2333,9 +2180,9 @@ static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
struct bkey *k;
struct btree_iter iter;
- bch_btree_iter_init(b, &iter, from);
+ bch_btree_iter_init(&b->keys, &iter, from);
- while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) {
+ while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
ret = !b->level
? fn(op, b, k)
: btree(map_keys_recurse, k, b, op, from, fn, flags);
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 767e75570896..af065e97e55c 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -130,20 +130,12 @@ struct btree {
unsigned long flags;
uint16_t written; /* would be nice to kill */
uint8_t level;
- uint8_t nsets;
- uint8_t page_order;
-
- /*
- * Set of sorted keys - the real btree node - plus a binary search tree
- *
- * sets[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
- * to the memory we have allocated for this btree node. Additionally,
- * set[0]->data points to the entire btree node as it exists on disk.
- */
- struct bset_tree sets[MAX_BSETS];
+
+ struct btree_keys keys;
/* For outstanding btree writes, used as a lock - protects write_idx */
- struct closure_with_waitlist io;
+ struct closure io;
+ struct semaphore io_mutex;
struct list_head list;
struct delayed_work work;
@@ -179,24 +171,19 @@ static inline struct btree_write *btree_prev_write(struct btree *b)
return b->writes + (btree_node_write_idx(b) ^ 1);
}
-static inline unsigned bset_offset(struct btree *b, struct bset *i)
+static inline struct bset *btree_bset_first(struct btree *b)
{
- return (((size_t) i) - ((size_t) b->sets->data)) >> 9;
+ return b->keys.set->data;
}
-static inline struct bset *write_block(struct btree *b)
+static inline struct bset *btree_bset_last(struct btree *b)
{
- return ((void *) b->sets[0].data) + b->written * block_bytes(b->c);
+ return bset_tree_last(&b->keys)->data;
}
-static inline bool bset_written(struct btree *b, struct bset_tree *t)
+static inline unsigned bset_block_offset(struct btree *b, struct bset *i)
{
- return t->data < write_block(b);
-}
-
-static inline bool bkey_written(struct btree *b, struct bkey *k)
-{
- return k < write_block(b)->start;
+ return bset_sector_offset(&b->keys, i) >> b->c->block_bits;
}
static inline void set_gc_sectors(struct cache_set *c)
@@ -204,21 +191,6 @@ static inline void set_gc_sectors(struct cache_set *c)
atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
}
-static inline struct bkey *bch_btree_iter_init(struct btree *b,
- struct btree_iter *iter,
- struct bkey *search)
-{
- return __bch_btree_iter_init(b, iter, search, b->sets);
-}
-
-static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
-{
- if (b->level)
- return bch_btree_ptr_invalid(b->c, k);
- else
- return bch_extent_ptr_invalid(b->c, k);
-}
-
void bkey_put(struct cache_set *c, struct bkey *k);
/* Looping macros */
@@ -229,17 +201,12 @@ void bkey_put(struct cache_set *c, struct bkey *k);
iter++) \
hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
-#define for_each_key_filter(b, k, iter, filter) \
- for (bch_btree_iter_init((b), (iter), NULL); \
- ((k) = bch_btree_iter_next_filter((iter), b, filter));)
-
-#define for_each_key(b, k, iter) \
- for (bch_btree_iter_init((b), (iter), NULL); \
- ((k) = bch_btree_iter_next(iter));)
-
/* Recursing down the btree */
struct btree_op {
+ /* for waiting on btree reserve in btree_split() */
+ wait_queue_t wait;
+
/* Btree level at which we start taking write locks */
short lock;
@@ -249,6 +216,7 @@ struct btree_op {
static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
{
memset(op, 0, sizeof(struct btree_op));
+ init_wait(&op->wait);
op->lock = write_lock_level;
}
@@ -267,7 +235,7 @@ static inline void rw_unlock(bool w, struct btree *b)
(w ? up_write : up_read)(&b->lock);
}
-void bch_btree_node_read(struct btree *);
+void bch_btree_node_read_done(struct btree *);
void bch_btree_node_write(struct btree *, struct closure *);
void bch_btree_set_root(struct btree *);
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index dfff2410322e..7a228de95fd7 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -11,19 +11,6 @@
#include "closure.h"
-#define CL_FIELD(type, field) \
- case TYPE_ ## type: \
- return &container_of(cl, struct type, cl)->field
-
-static struct closure_waitlist *closure_waitlist(struct closure *cl)
-{
- switch (cl->type) {
- CL_FIELD(closure_with_waitlist, wait);
- default:
- return NULL;
- }
-}
-
static inline void closure_put_after_sub(struct closure *cl, int flags)
{
int r = flags & CLOSURE_REMAINING_MASK;
@@ -42,17 +29,10 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
closure_queue(cl);
} else {
struct closure *parent = cl->parent;
- struct closure_waitlist *wait = closure_waitlist(cl);
closure_fn *destructor = cl->fn;
closure_debug_destroy(cl);
- smp_mb();
- atomic_set(&cl->remaining, -1);
-
- if (wait)
- closure_wake_up(wait);
-
if (destructor)
destructor(cl);
@@ -69,19 +49,18 @@ void closure_sub(struct closure *cl, int v)
}
EXPORT_SYMBOL(closure_sub);
+/**
+ * closure_put - decrement a closure's refcount
+ */
void closure_put(struct closure *cl)
{
closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
}
EXPORT_SYMBOL(closure_put);
-static void set_waiting(struct closure *cl, unsigned long f)
-{
-#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
- cl->waiting_on = f;
-#endif
-}
-
+/**
+ * closure_wake_up - wake up all closures on a wait list, without memory barrier
+ */
void __closure_wake_up(struct closure_waitlist *wait_list)
{
struct llist_node *list;
@@ -106,27 +85,34 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
cl = container_of(reverse, struct closure, list);
reverse = llist_next(reverse);
- set_waiting(cl, 0);
+ closure_set_waiting(cl, 0);
closure_sub(cl, CLOSURE_WAITING + 1);
}
}
EXPORT_SYMBOL(__closure_wake_up);
-bool closure_wait(struct closure_waitlist *list, struct closure *cl)
+/**
+ * closure_wait - add a closure to a waitlist
+ *
+ * @waitlist will own a ref on @cl, which will be released when
+ * closure_wake_up() is called on @waitlist.
+ *
+ */
+bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
{
if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
return false;
- set_waiting(cl, _RET_IP_);
+ closure_set_waiting(cl, _RET_IP_);
atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
- llist_add(&cl->list, &list->list);
+ llist_add(&cl->list, &waitlist->list);
return true;
}
EXPORT_SYMBOL(closure_wait);
/**
- * closure_sync() - sleep until a closure a closure has nothing left to wait on
+ * closure_sync - sleep until a closure a closure has nothing left to wait on
*
* Sleeps until the refcount hits 1 - the thread that's running the closure owns
* the last refcount.
@@ -148,46 +134,6 @@ void closure_sync(struct closure *cl)
}
EXPORT_SYMBOL(closure_sync);
-/**
- * closure_trylock() - try to acquire the closure, without waiting
- * @cl: closure to lock
- *
- * Returns true if the closure was succesfully locked.
- */
-bool closure_trylock(struct closure *cl, struct closure *parent)
-{
- if (atomic_cmpxchg(&cl->remaining, -1,
- CLOSURE_REMAINING_INITIALIZER) != -1)
- return false;
-
- smp_mb();
-
- cl->parent = parent;
- if (parent)
- closure_get(parent);
-
- closure_set_ret_ip(cl);
- closure_debug_create(cl);
- return true;
-}
-EXPORT_SYMBOL(closure_trylock);
-
-void __closure_lock(struct closure *cl, struct closure *parent,
- struct closure_waitlist *wait_list)
-{
- struct closure wait;
- closure_init_stack(&wait);
-
- while (1) {
- if (closure_trylock(cl, parent))
- return;
-
- closure_wait_event(wait_list, &wait,
- atomic_read(&cl->remaining) == -1);
- }
-}
-EXPORT_SYMBOL(__closure_lock);
-
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
static LIST_HEAD(closure_list);
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index 9762f1be3304..7ef7461912be 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -72,30 +72,6 @@
* closure - _always_ use continue_at(). Doing so consistently will help
* eliminate an entire class of particularly pernicious races.
*
- * For a closure to wait on an arbitrary event, we need to introduce waitlists:
- *
- * struct closure_waitlist list;
- * closure_wait_event(list, cl, condition);
- * closure_wake_up(wait_list);
- *
- * These work analagously to wait_event() and wake_up() - except that instead of
- * operating on the current thread (for wait_event()) and lists of threads, they
- * operate on an explicit closure and lists of closures.
- *
- * Because it's a closure we can now wait either synchronously or
- * asynchronously. closure_wait_event() returns the current value of the
- * condition, and if it returned false continue_at() or closure_sync() can be
- * used to wait for it to become true.
- *
- * It's useful for waiting on things when you can't sleep in the context in
- * which you must check the condition (perhaps a spinlock held, or you might be
- * beneath generic_make_request() - in which case you can't sleep on IO).
- *
- * closure_wait_event() will wait either synchronously or asynchronously,
- * depending on whether the closure is in blocking mode or not. You can pick a
- * mode explicitly with closure_wait_event_sync() and
- * closure_wait_event_async(), which do just what you might expect.
- *
* Lastly, you might have a wait list dedicated to a specific event, and have no
* need for specifying the condition - you just want to wait until someone runs
* closure_wake_up() on the appropriate wait list. In that case, just use
@@ -121,40 +97,6 @@
* All this implies that a closure should typically be embedded in a particular
* struct (which its refcount will normally control the lifetime of), and that
* struct can very much be thought of as a stack frame.
- *
- * Locking:
- *
- * Closures are based on work items but they can be thought of as more like
- * threads - in that like threads and unlike work items they have a well
- * defined lifetime; they are created (with closure_init()) and eventually
- * complete after a continue_at(cl, NULL, NULL).
- *
- * Suppose you've got some larger structure with a closure embedded in it that's
- * used for periodically doing garbage collection. You only want one garbage
- * collection happening at a time, so the natural thing to do is protect it with
- * a lock. However, it's difficult to use a lock protecting a closure correctly
- * because the unlock should come after the last continue_to() (additionally, if
- * you're using the closure asynchronously a mutex won't work since a mutex has
- * to be unlocked by the same process that locked it).
- *
- * So to make it less error prone and more efficient, we also have the ability
- * to use closures as locks:
- *
- * closure_init_unlocked();
- * closure_trylock();
- *
- * That's all we need for trylock() - the last closure_put() implicitly unlocks
- * it for you. But for closure_lock(), we also need a wait list:
- *
- * struct closure_with_waitlist frobnicator_cl;
- *
- * closure_init_unlocked(&frobnicator_cl);
- * closure_lock(&frobnicator_cl);
- *
- * A closure_with_waitlist embeds a closure and a wait list - much like struct
- * delayed_work embeds a work item and a timer_list. The important thing is, use
- * it exactly like you would a regular closure and closure_put() will magically
- * handle everything for you.
*/
struct closure;
@@ -164,12 +106,6 @@ struct closure_waitlist {
struct llist_head list;
};
-enum closure_type {
- TYPE_closure = 0,
- TYPE_closure_with_waitlist = 1,
- MAX_CLOSURE_TYPE = 1,
-};
-
enum closure_state {
/*
* CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
@@ -224,8 +160,6 @@ struct closure {
atomic_t remaining;
- enum closure_type type;
-
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
#define CLOSURE_MAGIC_DEAD 0xc054dead
#define CLOSURE_MAGIC_ALIVE 0xc054a11e
@@ -237,34 +171,12 @@ struct closure {
#endif
};
-struct closure_with_waitlist {
- struct closure cl;
- struct closure_waitlist wait;
-};
-
-extern unsigned invalid_closure_type(void);
-
-#define __CLOSURE_TYPE(cl, _t) \
- __builtin_types_compatible_p(typeof(cl), struct _t) \
- ? TYPE_ ## _t : \
-
-#define __closure_type(cl) \
-( \
- __CLOSURE_TYPE(cl, closure) \
- __CLOSURE_TYPE(cl, closure_with_waitlist) \
- invalid_closure_type() \
-)
-
void closure_sub(struct closure *cl, int v);
void closure_put(struct closure *cl);
void __closure_wake_up(struct closure_waitlist *list);
bool closure_wait(struct closure_waitlist *list, struct closure *cl);
void closure_sync(struct closure *cl);
-bool closure_trylock(struct closure *cl, struct closure *parent);
-void __closure_lock(struct closure *cl, struct closure *parent,
- struct closure_waitlist *wait_list);
-
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
void closure_debug_init(void);
@@ -293,134 +205,97 @@ static inline void closure_set_ret_ip(struct closure *cl)
#endif
}
-static inline void closure_get(struct closure *cl)
+static inline void closure_set_waiting(struct closure *cl, unsigned long f)
{
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
- BUG_ON((atomic_inc_return(&cl->remaining) &
- CLOSURE_REMAINING_MASK) <= 1);
-#else
- atomic_inc(&cl->remaining);
+ cl->waiting_on = f;
#endif
}
-static inline void closure_set_stopped(struct closure *cl)
+static inline void __closure_end_sleep(struct closure *cl)
{
- atomic_sub(CLOSURE_RUNNING, &cl->remaining);
+ __set_current_state(TASK_RUNNING);
+
+ if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING)
+ atomic_sub(CLOSURE_SLEEPING, &cl->remaining);
}
-static inline bool closure_is_unlocked(struct closure *cl)
+static inline void __closure_start_sleep(struct closure *cl)
{
- return atomic_read(&cl->remaining) == -1;
+ closure_set_ip(cl);
+ cl->task = current;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+
+ if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
+ atomic_add(CLOSURE_SLEEPING, &cl->remaining);
}
-static inline void do_closure_init(struct closure *cl, struct closure *parent,
- bool running)
+static inline void closure_set_stopped(struct closure *cl)
{
- cl->parent = parent;
- if (parent)
- closure_get(parent);
-
- if (running) {
- closure_debug_create(cl);
- atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
- } else
- atomic_set(&cl->remaining, -1);
+ atomic_sub(CLOSURE_RUNNING, &cl->remaining);
+}
+static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
+ struct workqueue_struct *wq)
+{
+ BUG_ON(object_is_on_stack(cl));
closure_set_ip(cl);
+ cl->fn = fn;
+ cl->wq = wq;
+ /* between atomic_dec() in closure_put() */
+ smp_mb__before_atomic_dec();
}
-/*
- * Hack to get at the embedded closure if there is one, by doing an unsafe cast:
- * the result of __closure_type() is thrown away, it's used merely for type
- * checking.
- */
-#define __to_internal_closure(cl) \
-({ \
- BUILD_BUG_ON(__closure_type(*cl) > MAX_CLOSURE_TYPE); \
- (struct closure *) cl; \
-})
-
-#define closure_init_type(cl, parent, running) \
-do { \
- struct closure *_cl = __to_internal_closure(cl); \
- _cl->type = __closure_type(*(cl)); \
- do_closure_init(_cl, parent, running); \
-} while (0)
+static inline void closure_queue(struct closure *cl)
+{
+ struct workqueue_struct *wq = cl->wq;
+ if (wq) {
+ INIT_WORK(&cl->work, cl->work.func);
+ BUG_ON(!queue_work(wq, &cl->work));
+ } else
+ cl->fn(cl);
+}
/**
- * __closure_init() - Initialize a closure, skipping the memset()
- *
- * May be used instead of closure_init() when memory has already been zeroed.
+ * closure_get - increment a closure's refcount
*/
-#define __closure_init(cl, parent) \
- closure_init_type(cl, parent, true)
+static inline void closure_get(struct closure *cl)
+{
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+ BUG_ON((atomic_inc_return(&cl->remaining) &
+ CLOSURE_REMAINING_MASK) <= 1);
+#else
+ atomic_inc(&cl->remaining);
+#endif
+}
/**
- * closure_init() - Initialize a closure, setting the refcount to 1
+ * closure_init - Initialize a closure, setting the refcount to 1
* @cl: closure to initialize
* @parent: parent of the new closure. cl will take a refcount on it for its
* lifetime; may be NULL.
*/
-#define closure_init(cl, parent) \
-do { \
- memset((cl), 0, sizeof(*(cl))); \
- __closure_init(cl, parent); \
-} while (0)
-
-static inline void closure_init_stack(struct closure *cl)
+static inline void closure_init(struct closure *cl, struct closure *parent)
{
memset(cl, 0, sizeof(struct closure));
- atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
-}
-
-/**
- * closure_init_unlocked() - Initialize a closure but leave it unlocked.
- * @cl: closure to initialize
- *
- * For when the closure will be used as a lock. The closure may not be used
- * until after a closure_lock() or closure_trylock().
- */
-#define closure_init_unlocked(cl) \
-do { \
- memset((cl), 0, sizeof(*(cl))); \
- closure_init_type(cl, NULL, false); \
-} while (0)
-
-/**
- * closure_lock() - lock and initialize a closure.
- * @cl: the closure to lock
- * @parent: the new parent for this closure
- *
- * The closure must be of one of the types that has a waitlist (otherwise we
- * wouldn't be able to sleep on contention).
- *
- * @parent has exactly the same meaning as in closure_init(); if non null, the
- * closure will take a reference on @parent which will be released when it is
- * unlocked.
- */
-#define closure_lock(cl, parent) \
- __closure_lock(__to_internal_closure(cl), parent, &(cl)->wait)
+ cl->parent = parent;
+ if (parent)
+ closure_get(parent);
-static inline void __closure_end_sleep(struct closure *cl)
-{
- __set_current_state(TASK_RUNNING);
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
- if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING)
- atomic_sub(CLOSURE_SLEEPING, &cl->remaining);
+ closure_debug_create(cl);
+ closure_set_ip(cl);
}
-static inline void __closure_start_sleep(struct closure *cl)
+static inline void closure_init_stack(struct closure *cl)
{
- closure_set_ip(cl);
- cl->task = current;
- set_current_state(TASK_UNINTERRUPTIBLE);
-
- if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
- atomic_add(CLOSURE_SLEEPING, &cl->remaining);
+ memset(cl, 0, sizeof(struct closure));
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
}
/**
- * closure_wake_up() - wake up all closures on a wait list.
+ * closure_wake_up - wake up all closures on a wait list.
*/
static inline void closure_wake_up(struct closure_waitlist *list)
{
@@ -428,69 +303,19 @@ static inline void closure_wake_up(struct closure_waitlist *list)
__closure_wake_up(list);
}
-/*
- * Wait on an event, synchronously or asynchronously - analogous to wait_event()
- * but for closures.
- *
- * The loop is oddly structured so as to avoid a race; we must check the
- * condition again after we've added ourself to the waitlist. We know if we were
- * already on the waitlist because closure_wait() returns false; thus, we only
- * schedule or break if closure_wait() returns false. If it returns true, we
- * just loop again - rechecking the condition.
- *
- * The __closure_wake_up() is necessary because we may race with the event
- * becoming true; i.e. we see event false -> wait -> recheck condition, but the
- * thread that made the event true may have called closure_wake_up() before we
- * added ourself to the wait list.
- *
- * We have to call closure_sync() at the end instead of just
- * __closure_end_sleep() because a different thread might've called
- * closure_wake_up() before us and gotten preempted before they dropped the
- * refcount on our closure. If this was a stack allocated closure, that would be
- * bad.
+/**
+ * continue_at - jump to another function with barrier
+ *
+ * After @cl is no longer waiting on anything (i.e. all outstanding refs have
+ * been dropped with closure_put()), it will resume execution at @fn running out
+ * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
+ *
+ * NOTE: This macro expands to a return in the calling function!
+ *
+ * This is because after calling continue_at() you no longer have a ref on @cl,
+ * and whatever @cl owns may be freed out from under you - a running closure fn
+ * has a ref on its own closure which continue_at() drops.
*/
-#define closure_wait_event(list, cl, condition) \
-({ \
- typeof(condition) ret; \
- \
- while (1) { \
- ret = (condition); \
- if (ret) { \
- __closure_wake_up(list); \
- closure_sync(cl); \
- break; \
- } \
- \
- __closure_start_sleep(cl); \
- \
- if (!closure_wait(list, cl)) \
- schedule(); \
- } \
- \
- ret; \
-})
-
-static inline void closure_queue(struct closure *cl)
-{
- struct workqueue_struct *wq = cl->wq;
- if (wq) {
- INIT_WORK(&cl->work, cl->work.func);
- BUG_ON(!queue_work(wq, &cl->work));
- } else
- cl->fn(cl);
-}
-
-static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
- struct workqueue_struct *wq)
-{
- BUG_ON(object_is_on_stack(cl));
- closure_set_ip(cl);
- cl->fn = fn;
- cl->wq = wq;
- /* between atomic_dec() in closure_put() */
- smp_mb__before_atomic_dec();
-}
-
#define continue_at(_cl, _fn, _wq) \
do { \
set_closure_fn(_cl, _fn, _wq); \
@@ -498,8 +323,28 @@ do { \
return; \
} while (0)
+/**
+ * closure_return - finish execution of a closure
+ *
+ * This is used to indicate that @cl is finished: when all outstanding refs on
+ * @cl have been dropped @cl's ref on its parent closure (as passed to
+ * closure_init()) will be dropped, if one was specified - thus this can be
+ * thought of as returning to the parent closure.
+ */
#define closure_return(_cl) continue_at((_cl), NULL, NULL)
+/**
+ * continue_at_nobarrier - jump to another function without barrier
+ *
+ * Causes @fn to be executed out of @cl, in @wq context (or called directly if
+ * @wq is NULL).
+ *
+ * NOTE: like continue_at(), this macro expands to a return in the caller!
+ *
+ * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
+ * thus it's not safe to touch anything protected by @cl after a
+ * continue_at_nobarrier().
+ */
#define continue_at_nobarrier(_cl, _fn, _wq) \
do { \
set_closure_fn(_cl, _fn, _wq); \
@@ -507,6 +352,15 @@ do { \
return; \
} while (0)
+/**
+ * closure_return - finish execution of a closure, with destructor
+ *
+ * Works like closure_return(), except @destructor will be called when all
+ * outstanding refs on @cl have been dropped; @destructor may be used to safely
+ * free the memory occupied by @cl, and it is called with the ref on the parent
+ * closure still held - so @destructor could safely return an item to a
+ * freelist protected by @cl's parent.
+ */
#define closure_return_with_destructor(_cl, _destructor) \
do { \
set_closure_fn(_cl, _destructor, NULL); \
@@ -514,6 +368,13 @@ do { \
return; \
} while (0)
+/**
+ * closure_call - execute @fn out of a new, uninitialized closure
+ *
+ * Typically used when running out of one closure, and we want to run @fn
+ * asynchronously out of a new closure - @parent will then wait for @cl to
+ * finish.
+ */
static inline void closure_call(struct closure *cl, closure_fn fn,
struct workqueue_struct *wq,
struct closure *parent)
@@ -522,12 +383,4 @@ static inline void closure_call(struct closure *cl, closure_fn fn,
continue_at_nobarrier(cl, fn, wq);
}
-static inline void closure_trylock_call(struct closure *cl, closure_fn fn,
- struct workqueue_struct *wq,
- struct closure *parent)
-{
- if (closure_trylock(cl, parent))
- continue_at_nobarrier(cl, fn, wq);
-}
-
#endif /* _LINUX_CLOSURE_H */
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 264fcfbd6290..8b1f1d5c1819 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -8,6 +8,7 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
+#include "extents.h"
#include <linux/console.h>
#include <linux/debugfs.h>
@@ -17,163 +18,96 @@
static struct dentry *debug;
-const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
-{
- unsigned i;
-
- for (i = 0; i < KEY_PTRS(k); i++)
- if (ptr_available(c, k, i)) {
- struct cache *ca = PTR_CACHE(c, k, i);
- size_t bucket = PTR_BUCKET_NR(c, k, i);
- size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-
- if (KEY_SIZE(k) + r > c->sb.bucket_size)
- return "bad, length too big";
- if (bucket < ca->sb.first_bucket)
- return "bad, short offset";
- if (bucket >= ca->sb.nbuckets)
- return "bad, offset past end of device";
- if (ptr_stale(c, k, i))
- return "stale";
- }
-
- if (!bkey_cmp(k, &ZERO_KEY))
- return "bad, null key";
- if (!KEY_PTRS(k))
- return "bad, no pointers";
- if (!KEY_SIZE(k))
- return "zeroed key";
- return "";
-}
-
-int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
-{
- unsigned i = 0;
- char *out = buf, *end = buf + size;
-
-#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
-
- p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_OFFSET(k), KEY_SIZE(k));
-
- if (KEY_PTRS(k))
- while (1) {
- p("%llu:%llu gen %llu",
- PTR_DEV(k, i), PTR_OFFSET(k, i), PTR_GEN(k, i));
-
- if (++i == KEY_PTRS(k))
- break;
-
- p(", ");
- }
-
- p("]");
-
- if (KEY_DIRTY(k))
- p(" dirty");
- if (KEY_CSUM(k))
- p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
-#undef p
- return out - buf;
-}
-
#ifdef CONFIG_BCACHE_DEBUG
-static void dump_bset(struct btree *b, struct bset *i)
-{
- struct bkey *k, *next;
- unsigned j;
- char buf[80];
-
- for (k = i->start; k < end(i); k = next) {
- next = bkey_next(k);
-
- bch_bkey_to_text(buf, sizeof(buf), k);
- printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
- (uint64_t *) k - i->d, i->keys, buf);
-
- for (j = 0; j < KEY_PTRS(k); j++) {
- size_t n = PTR_BUCKET_NR(b->c, k, j);
- printk(" bucket %zu", n);
-
- if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
- printk(" prio %i",
- PTR_BUCKET(b->c, k, j)->prio);
- }
+#define for_each_written_bset(b, start, i) \
+ for (i = (start); \
+ (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
+ i->seq == (start)->seq; \
+ i = (void *) i + set_blocks(i, block_bytes(b->c)) * \
+ block_bytes(b->c))
- printk(" %s\n", bch_ptr_status(b->c, k));
-
- if (next < end(i) &&
- bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
- printk(KERN_ERR "Key skipped backwards\n");
- }
-}
-
-static void bch_dump_bucket(struct btree *b)
-{
- unsigned i;
-
- console_lock();
- for (i = 0; i <= b->nsets; i++)
- dump_bset(b, b->sets[i].data);
- console_unlock();
-}
-
-void bch_btree_verify(struct btree *b, struct bset *new)
+void bch_btree_verify(struct btree *b)
{
struct btree *v = b->c->verify_data;
- struct closure cl;
- closure_init_stack(&cl);
+ struct bset *ondisk, *sorted, *inmemory;
+ struct bio *bio;
- if (!b->c->verify)
+ if (!b->c->verify || !b->c->verify_ondisk)
return;
- closure_wait_event(&b->io.wait, &cl,
- atomic_read(&b->io.cl.remaining) == -1);
-
+ down(&b->io_mutex);
mutex_lock(&b->c->verify_lock);
+ ondisk = b->c->verify_ondisk;
+ sorted = b->c->verify_data->keys.set->data;
+ inmemory = b->keys.set->data;
+
bkey_copy(&v->key, &b->key);
v->written = 0;
v->level = b->level;
+ v->keys.ops = b->keys.ops;
+
+ bio = bch_bbio_alloc(b->c);
+ bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev;
+ bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
+ bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
+ bch_bio_map(bio, sorted);
- bch_btree_node_read(v);
- closure_wait_event(&v->io.wait, &cl,
- atomic_read(&b->io.cl.remaining) == -1);
+ submit_bio_wait(REQ_META|READ_SYNC, bio);
+ bch_bbio_free(bio, b->c);
- if (new->keys != v->sets[0].data->keys ||
- memcmp(new->start,
- v->sets[0].data->start,
- (void *) end(new) - (void *) new->start)) {
- unsigned i, j;
+ memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9);
+
+ bch_btree_node_read_done(v);
+ sorted = v->keys.set->data;
+
+ if (inmemory->keys != sorted->keys ||
+ memcmp(inmemory->start,
+ sorted->start,
+ (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) {
+ struct bset *i;
+ unsigned j;
console_lock();
- printk(KERN_ERR "*** original memory node:\n");
- for (i = 0; i <= b->nsets; i++)
- dump_bset(b, b->sets[i].data);
+ printk(KERN_ERR "*** in memory:\n");
+ bch_dump_bset(&b->keys, inmemory, 0);
- printk(KERN_ERR "*** sorted memory node:\n");
- dump_bset(b, new);
+ printk(KERN_ERR "*** read back in:\n");
+ bch_dump_bset(&v->keys, sorted, 0);
- printk(KERN_ERR "*** on disk node:\n");
- dump_bset(v, v->sets[0].data);
+ for_each_written_bset(b, ondisk, i) {
+ unsigned block = ((void *) i - (void *) ondisk) /
+ block_bytes(b->c);
+
+ printk(KERN_ERR "*** on disk block %u:\n", block);
+ bch_dump_bset(&b->keys, i, block);
+ }
- for (j = 0; j < new->keys; j++)
- if (new->d[j] != v->sets[0].data->d[j])
+ printk(KERN_ERR "*** block %zu not written\n",
+ ((void *) i - (void *) ondisk) / block_bytes(b->c));
+
+ for (j = 0; j < inmemory->keys; j++)
+ if (inmemory->d[j] != sorted->d[j])
break;
+ printk(KERN_ERR "b->written %u\n", b->written);
+
console_unlock();
panic("verify failed at %u\n", j);
}
mutex_unlock(&b->c->verify_lock);
+ up(&b->io_mutex);
}
void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{
char name[BDEVNAME_SIZE];
struct bio *check;
- struct bio_vec *bv;
+ struct bio_vec bv, *bv2;
+ struct bvec_iter iter;
int i;
check = bio_clone(bio, GFP_NOIO);
@@ -185,95 +119,27 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
submit_bio_wait(READ_SYNC, check);
- bio_for_each_segment(bv, bio, i) {
- void *p1 = kmap_atomic(bv->bv_page);
- void *p2 = page_address(check->bi_io_vec[i].bv_page);
+ bio_for_each_segment(bv, bio, iter) {
+ void *p1 = kmap_atomic(bv.bv_page);
+ void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
- cache_set_err_on(memcmp(p1 + bv->bv_offset,
- p2 + bv->bv_offset,
- bv->bv_len),
+ cache_set_err_on(memcmp(p1 + bv.bv_offset,
+ p2 + bv.bv_offset,
+ bv.bv_len),
dc->disk.c,
"verify failed at dev %s sector %llu",
bdevname(dc->bdev, name),
- (uint64_t) bio->bi_sector);
+ (uint64_t) bio->bi_iter.bi_sector);
kunmap_atomic(p1);
}
- bio_for_each_segment_all(bv, check, i)
- __free_page(bv->bv_page);
+ bio_for_each_segment_all(bv2, check, i)
+ __free_page(bv2->bv_page);
out_put:
bio_put(check);
}
-int __bch_count_data(struct btree *b)
-{
- unsigned ret = 0;
- struct btree_iter iter;
- struct bkey *k;
-
- if (!b->level)
- for_each_key(b, k, &iter)
- ret += KEY_SIZE(k);
- return ret;
-}
-
-void __bch_check_keys(struct btree *b, const char *fmt, ...)
-{
- va_list args;
- struct bkey *k, *p = NULL;
- struct btree_iter iter;
- const char *err;
-
- for_each_key(b, k, &iter) {
- if (!b->level) {
- err = "Keys out of order";
- if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
- goto bug;
-
- if (bch_ptr_invalid(b, k))
- continue;
-
- err = "Overlapping keys";
- if (p && bkey_cmp(p, &START_KEY(k)) > 0)
- goto bug;
- } else {
- if (bch_ptr_bad(b, k))
- continue;
-
- err = "Duplicate keys";
- if (p && !bkey_cmp(p, k))
- goto bug;
- }
- p = k;
- }
-
- err = "Key larger than btree node key";
- if (p && bkey_cmp(p, &b->key) > 0)
- goto bug;
-
- return;
-bug:
- bch_dump_bucket(b);
-
- va_start(args, fmt);
- vprintk(fmt, args);
- va_end(args);
-
- panic("bcache error: %s:\n", err);
-}
-
-void bch_btree_iter_next_check(struct btree_iter *iter)
-{
- struct bkey *k = iter->data->k, *next = bkey_next(k);
-
- if (next < iter->data->end &&
- bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) {
- bch_dump_bucket(iter->b);
- panic("Key skipped backwards\n");
- }
-}
-
#endif
#ifdef CONFIG_DEBUG_FS
@@ -320,7 +186,7 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
if (!w)
break;
- bch_bkey_to_text(kbuf, sizeof(kbuf), &w->key);
+ bch_extent_to_text(kbuf, sizeof(kbuf), &w->key);
i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
bch_keybuf_del(&i->keys, w);
}
diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h
index 2ede60e31874..1f63c195d247 100644
--- a/drivers/md/bcache/debug.h
+++ b/drivers/md/bcache/debug.h
@@ -1,47 +1,30 @@
#ifndef _BCACHE_DEBUG_H
#define _BCACHE_DEBUG_H
-/* Btree/bkey debug printing */
-
-int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k);
+struct bio;
+struct cached_dev;
+struct cache_set;
#ifdef CONFIG_BCACHE_DEBUG
-void bch_btree_verify(struct btree *, struct bset *);
+void bch_btree_verify(struct btree *);
void bch_data_verify(struct cached_dev *, struct bio *);
-int __bch_count_data(struct btree *);
-void __bch_check_keys(struct btree *, const char *, ...);
-void bch_btree_iter_next_check(struct btree_iter *);
-#define EBUG_ON(cond) BUG_ON(cond)
#define expensive_debug_checks(c) ((c)->expensive_debug_checks)
#define key_merging_disabled(c) ((c)->key_merging_disabled)
#define bypass_torture_test(d) ((d)->bypass_torture_test)
#else /* DEBUG */
-static inline void bch_btree_verify(struct btree *b, struct bset *i) {}
+static inline void bch_btree_verify(struct btree *b) {}
static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
-static inline int __bch_count_data(struct btree *b) { return -1; }
-static inline void __bch_check_keys(struct btree *b, const char *fmt, ...) {}
-static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
-#define EBUG_ON(cond) do { if (cond); } while (0)
#define expensive_debug_checks(c) 0
#define key_merging_disabled(c) 0
#define bypass_torture_test(d) 0
#endif
-#define bch_count_data(b) \
- (expensive_debug_checks((b)->c) ? __bch_count_data(b) : -1)
-
-#define bch_check_keys(b, ...) \
-do { \
- if (expensive_debug_checks((b)->c)) \
- __bch_check_keys(b, __VA_ARGS__); \
-} while (0)
-
#ifdef CONFIG_DEBUG_FS
void bch_debug_init_cache_set(struct cache_set *);
#else
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
new file mode 100644
index 000000000000..416d1a3e028e
--- /dev/null
+++ b/drivers/md/bcache/extents.c
@@ -0,0 +1,616 @@
+/*
+ * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
+ *
+ * Uses a block device as cache for other block devices; optimized for SSDs.
+ * All allocation is done in buckets, which should match the erase block size
+ * of the device.
+ *
+ * Buckets containing cached data are kept on a heap sorted by priority;
+ * bucket priority is increased on cache hit, and periodically all the buckets
+ * on the heap have their priority scaled down. This currently is just used as
+ * an LRU but in the future should allow for more intelligent heuristics.
+ *
+ * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
+ * counter. Garbage collection is used to remove stale pointers.
+ *
+ * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
+ * as keys are inserted we only sort the pages that have not yet been written.
+ * When garbage collection is run, we resort the entire node.
+ *
+ * All configuration is done via sysfs; see Documentation/bcache.txt.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "extents.h"
+#include "writeback.h"
+
+static void sort_key_next(struct btree_iter *iter,
+ struct btree_iter_set *i)
+{
+ i->k = bkey_next(i->k);
+
+ if (i->k == i->end)
+ *i = iter->data[--iter->used];
+}
+
+static bool bch_key_sort_cmp(struct btree_iter_set l,
+ struct btree_iter_set r)
+{
+ int64_t c = bkey_cmp(l.k, r.k);
+
+ return c ? c > 0 : l.k < r.k;
+}
+
+static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+ unsigned i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i)) {
+ struct cache *ca = PTR_CACHE(c, k, i);
+ size_t bucket = PTR_BUCKET_NR(c, k, i);
+ size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+ if (KEY_SIZE(k) + r > c->sb.bucket_size ||
+ bucket < ca->sb.first_bucket ||
+ bucket >= ca->sb.nbuckets)
+ return true;
+ }
+
+ return false;
+}
+
+/* Common among btree and extent ptrs */
+
+static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
+{
+ unsigned i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i)) {
+ struct cache *ca = PTR_CACHE(c, k, i);
+ size_t bucket = PTR_BUCKET_NR(c, k, i);
+ size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+ if (KEY_SIZE(k) + r > c->sb.bucket_size)
+ return "bad, length too big";
+ if (bucket < ca->sb.first_bucket)
+ return "bad, short offset";
+ if (bucket >= ca->sb.nbuckets)
+ return "bad, offset past end of device";
+ if (ptr_stale(c, k, i))
+ return "stale";
+ }
+
+ if (!bkey_cmp(k, &ZERO_KEY))
+ return "bad, null key";
+ if (!KEY_PTRS(k))
+ return "bad, no pointers";
+ if (!KEY_SIZE(k))
+ return "zeroed key";
+ return "";
+}
+
+void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
+{
+ unsigned i = 0;
+ char *out = buf, *end = buf + size;
+
+#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
+
+ p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k));
+
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ if (i)
+ p(", ");
+
+ if (PTR_DEV(k, i) == PTR_CHECK_DEV)
+ p("check dev");
+ else
+ p("%llu:%llu gen %llu", PTR_DEV(k, i),
+ PTR_OFFSET(k, i), PTR_GEN(k, i));
+ }
+
+ p("]");
+
+ if (KEY_DIRTY(k))
+ p(" dirty");
+ if (KEY_CSUM(k))
+ p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
+#undef p
+}
+
+static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
+{
+ struct btree *b = container_of(keys, struct btree, keys);
+ unsigned j;
+ char buf[80];
+
+ bch_extent_to_text(buf, sizeof(buf), k);
+ printk(" %s", buf);
+
+ for (j = 0; j < KEY_PTRS(k); j++) {
+ size_t n = PTR_BUCKET_NR(b->c, k, j);
+ printk(" bucket %zu", n);
+
+ if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
+ printk(" prio %i",
+ PTR_BUCKET(b->c, k, j)->prio);
+ }
+
+ printk(" %s\n", bch_ptr_status(b->c, k));
+}
+
+/* Btree ptrs */
+
+bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+ char buf[80];
+
+ if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
+ goto bad;
+
+ if (__ptr_invalid(c, k))
+ goto bad;
+
+ return false;
+bad:
+ bch_extent_to_text(buf, sizeof(buf), k);
+ cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
+ return true;
+}
+
+static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
+{
+ struct btree *b = container_of(bk, struct btree, keys);
+ return __bch_btree_ptr_invalid(b->c, k);
+}
+
+static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
+{
+ unsigned i;
+ char buf[80];
+ struct bucket *g;
+
+ if (mutex_trylock(&b->c->bucket_lock)) {
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(b->c, k, i)) {
+ g = PTR_BUCKET(b->c, k, i);
+
+ if (KEY_DIRTY(k) ||
+ g->prio != BTREE_PRIO ||
+ (b->c->gc_mark_valid &&
+ GC_MARK(g) != GC_MARK_METADATA))
+ goto err;
+ }
+
+ mutex_unlock(&b->c->bucket_lock);
+ }
+
+ return false;
+err:
+ mutex_unlock(&b->c->bucket_lock);
+ bch_extent_to_text(buf, sizeof(buf), k);
+ btree_bug(b,
+"inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+ buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
+ g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
+ return true;
+}
+
+static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
+{
+ struct btree *b = container_of(bk, struct btree, keys);
+ unsigned i;
+
+ if (!bkey_cmp(k, &ZERO_KEY) ||
+ !KEY_PTRS(k) ||
+ bch_ptr_invalid(bk, k))
+ return true;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (!ptr_available(b->c, k, i) ||
+ ptr_stale(b->c, k, i))
+ return true;
+
+ if (expensive_debug_checks(b->c) &&
+ btree_ptr_bad_expensive(b, k))
+ return true;
+
+ return false;
+}
+
+static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
+ struct bkey *insert,
+ struct btree_iter *iter,
+ struct bkey *replace_key)
+{
+ struct btree *b = container_of(bk, struct btree, keys);
+
+ if (!KEY_OFFSET(insert))
+ btree_current_write(b)->prio_blocked++;
+
+ return false;
+}
+
+const struct btree_keys_ops bch_btree_keys_ops = {
+ .sort_cmp = bch_key_sort_cmp,
+ .insert_fixup = bch_btree_ptr_insert_fixup,
+ .key_invalid = bch_btree_ptr_invalid,
+ .key_bad = bch_btree_ptr_bad,
+ .key_to_text = bch_extent_to_text,
+ .key_dump = bch_bkey_dump,
+};
+
+/* Extents */
+
+/*
+ * Returns true if l > r - unless l == r, in which case returns true if l is
+ * older than r.
+ *
+ * Necessary for btree_sort_fixup() - if there are multiple keys that compare
+ * equal in different sets, we have to process them newest to oldest.
+ */
+static bool bch_extent_sort_cmp(struct btree_iter_set l,
+ struct btree_iter_set r)
+{
+ int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
+
+ return c ? c > 0 : l.k < r.k;
+}
+
+static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
+ struct bkey *tmp)
+{
+ while (iter->used > 1) {
+ struct btree_iter_set *top = iter->data, *i = top + 1;
+
+ if (iter->used > 2 &&
+ bch_extent_sort_cmp(i[0], i[1]))
+ i++;
+
+ if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
+ break;
+
+ if (!KEY_SIZE(i->k)) {
+ sort_key_next(iter, i);
+ heap_sift(iter, i - top, bch_extent_sort_cmp);
+ continue;
+ }
+
+ if (top->k > i->k) {
+ if (bkey_cmp(top->k, i->k) >= 0)
+ sort_key_next(iter, i);
+ else
+ bch_cut_front(top->k, i->k);
+
+ heap_sift(iter, i - top, bch_extent_sort_cmp);
+ } else {
+ /* can't happen because of comparison func */
+ BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
+
+ if (bkey_cmp(i->k, top->k) < 0) {
+ bkey_copy(tmp, top->k);
+
+ bch_cut_back(&START_KEY(i->k), tmp);
+ bch_cut_front(i->k, top->k);
+ heap_sift(iter, 0, bch_extent_sort_cmp);
+
+ return tmp;
+ } else {
+ bch_cut_back(&START_KEY(i->k), top->k);
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static bool bch_extent_insert_fixup(struct btree_keys *b,
+ struct bkey *insert,
+ struct btree_iter *iter,
+ struct bkey *replace_key)
+{
+ struct cache_set *c = container_of(b, struct btree, keys)->c;
+
+ void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
+ {
+ if (KEY_DIRTY(k))
+ bcache_dev_sectors_dirty_add(c, KEY_INODE(k),
+ offset, -sectors);
+ }
+
+ uint64_t old_offset;
+ unsigned old_size, sectors_found = 0;
+
+ BUG_ON(!KEY_OFFSET(insert));
+ BUG_ON(!KEY_SIZE(insert));
+
+ while (1) {
+ struct bkey *k = bch_btree_iter_next(iter);
+ if (!k)
+ break;
+
+ if (bkey_cmp(&START_KEY(k), insert) >= 0) {
+ if (KEY_SIZE(k))
+ break;
+ else
+ continue;
+ }
+
+ if (bkey_cmp(k, &START_KEY(insert)) <= 0)
+ continue;
+
+ old_offset = KEY_START(k);
+ old_size = KEY_SIZE(k);
+
+ /*
+ * We might overlap with 0 size extents; we can't skip these
+ * because if they're in the set we're inserting to we have to
+ * adjust them so they don't overlap with the key we're
+ * inserting. But we don't want to check them for replace
+ * operations.
+ */
+
+ if (replace_key && KEY_SIZE(k)) {
+ /*
+ * k might have been split since we inserted/found the
+ * key we're replacing
+ */
+ unsigned i;
+ uint64_t offset = KEY_START(k) -
+ KEY_START(replace_key);
+
+ /* But it must be a subset of the replace key */
+ if (KEY_START(k) < KEY_START(replace_key) ||
+ KEY_OFFSET(k) > KEY_OFFSET(replace_key))
+ goto check_failed;
+
+ /* We didn't find a key that we were supposed to */
+ if (KEY_START(k) > KEY_START(insert) + sectors_found)
+ goto check_failed;
+
+ if (!bch_bkey_equal_header(k, replace_key))
+ goto check_failed;
+
+ /* skip past gen */
+ offset <<= 8;
+
+ BUG_ON(!KEY_PTRS(replace_key));
+
+ for (i = 0; i < KEY_PTRS(replace_key); i++)
+ if (k->ptr[i] != replace_key->ptr[i] + offset)
+ goto check_failed;
+
+ sectors_found = KEY_OFFSET(k) - KEY_START(insert);
+ }
+
+ if (bkey_cmp(insert, k) < 0 &&
+ bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
+ /*
+ * We overlapped in the middle of an existing key: that
+ * means we have to split the old key. But we have to do
+ * slightly different things depending on whether the
+ * old key has been written out yet.
+ */
+
+ struct bkey *top;
+
+ subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
+
+ if (bkey_written(b, k)) {
+ /*
+ * We insert a new key to cover the top of the
+ * old key, and the old key is modified in place
+ * to represent the bottom split.
+ *
+ * It's completely arbitrary whether the new key
+ * is the top or the bottom, but it has to match
+ * up with what btree_sort_fixup() does - it
+ * doesn't check for this kind of overlap, it
+ * depends on us inserting a new key for the top
+ * here.
+ */
+ top = bch_bset_search(b, bset_tree_last(b),
+ insert);
+ bch_bset_insert(b, top, k);
+ } else {
+ BKEY_PADDED(key) temp;
+ bkey_copy(&temp.key, k);
+ bch_bset_insert(b, k, &temp.key);
+ top = bkey_next(k);
+ }
+
+ bch_cut_front(insert, top);
+ bch_cut_back(&START_KEY(insert), k);
+ bch_bset_fix_invalidated_key(b, k);
+ goto out;
+ }
+
+ if (bkey_cmp(insert, k) < 0) {
+ bch_cut_front(insert, k);
+ } else {
+ if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
+ old_offset = KEY_START(insert);
+
+ if (bkey_written(b, k) &&
+ bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
+ /*
+ * Completely overwrote, so we don't have to
+ * invalidate the binary search tree
+ */
+ bch_cut_front(k, k);
+ } else {
+ __bch_cut_back(&START_KEY(insert), k);
+ bch_bset_fix_invalidated_key(b, k);
+ }
+ }
+
+ subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
+ }
+
+check_failed:
+ if (replace_key) {
+ if (!sectors_found) {
+ return true;
+ } else if (sectors_found < KEY_SIZE(insert)) {
+ SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
+ (KEY_SIZE(insert) - sectors_found));
+ SET_KEY_SIZE(insert, sectors_found);
+ }
+ }
+out:
+ if (KEY_DIRTY(insert))
+ bcache_dev_sectors_dirty_add(c, KEY_INODE(insert),
+ KEY_START(insert),
+ KEY_SIZE(insert));
+
+ return false;
+}
+
+static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
+{
+ struct btree *b = container_of(bk, struct btree, keys);
+ char buf[80];
+
+ if (!KEY_SIZE(k))
+ return true;
+
+ if (KEY_SIZE(k) > KEY_OFFSET(k))
+ goto bad;
+
+ if (__ptr_invalid(b->c, k))
+ goto bad;
+
+ return false;
+bad:
+ bch_extent_to_text(buf, sizeof(buf), k);
+ cache_bug(b->c, "spotted extent %s: %s", buf, bch_ptr_status(b->c, k));
+ return true;
+}
+
+static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
+ unsigned ptr)
+{
+ struct bucket *g = PTR_BUCKET(b->c, k, ptr);
+ char buf[80];
+
+ if (mutex_trylock(&b->c->bucket_lock)) {
+ if (b->c->gc_mark_valid &&
+ ((GC_MARK(g) != GC_MARK_DIRTY &&
+ KEY_DIRTY(k)) ||
+ GC_MARK(g) == GC_MARK_METADATA))
+ goto err;
+
+ if (g->prio == BTREE_PRIO)
+ goto err;
+
+ mutex_unlock(&b->c->bucket_lock);
+ }
+
+ return false;
+err:
+ mutex_unlock(&b->c->bucket_lock);
+ bch_extent_to_text(buf, sizeof(buf), k);
+ btree_bug(b,
+"inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+ buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
+ g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
+ return true;
+}
+
+static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
+{
+ struct btree *b = container_of(bk, struct btree, keys);
+ struct bucket *g;
+ unsigned i, stale;
+
+ if (!KEY_PTRS(k) ||
+ bch_extent_invalid(bk, k))
+ return true;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (!ptr_available(b->c, k, i))
+ return true;
+
+ if (!expensive_debug_checks(b->c) && KEY_DIRTY(k))
+ return false;
+
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ g = PTR_BUCKET(b->c, k, i);
+ stale = ptr_stale(b->c, k, i);
+
+ btree_bug_on(stale > 96, b,
+ "key too stale: %i, need_gc %u",
+ stale, b->c->need_gc);
+
+ btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
+ b, "stale dirty pointer");
+
+ if (stale)
+ return true;
+
+ if (expensive_debug_checks(b->c) &&
+ bch_extent_bad_expensive(b, k, i))
+ return true;
+ }
+
+ return false;
+}
+
+static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
+{
+ return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
+ ~((uint64_t)1 << 63);
+}
+
+static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r)
+{
+ struct btree *b = container_of(bk, struct btree, keys);
+ unsigned i;
+
+ if (key_merging_disabled(b->c))
+ return false;
+
+ for (i = 0; i < KEY_PTRS(l); i++)
+ if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
+ PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
+ return false;
+
+ /* Keys with no pointers aren't restricted to one bucket and could
+ * overflow KEY_SIZE
+ */
+ if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
+ SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
+ SET_KEY_SIZE(l, USHRT_MAX);
+
+ bch_cut_front(l, r);
+ return false;
+ }
+
+ if (KEY_CSUM(l)) {
+ if (KEY_CSUM(r))
+ l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
+ else
+ SET_KEY_CSUM(l, 0);
+ }
+
+ SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
+ SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
+
+ return true;
+}
+
+const struct btree_keys_ops bch_extent_keys_ops = {
+ .sort_cmp = bch_extent_sort_cmp,
+ .sort_fixup = bch_extent_sort_fixup,
+ .insert_fixup = bch_extent_insert_fixup,
+ .key_invalid = bch_extent_invalid,
+ .key_bad = bch_extent_bad,
+ .key_merge = bch_extent_merge,
+ .key_to_text = bch_extent_to_text,
+ .key_dump = bch_bkey_dump,
+ .is_extents = true,
+};
diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h
new file mode 100644
index 000000000000..e4e23409782d
--- /dev/null
+++ b/drivers/md/bcache/extents.h
@@ -0,0 +1,13 @@
+#ifndef _BCACHE_EXTENTS_H
+#define _BCACHE_EXTENTS_H
+
+extern const struct btree_keys_ops bch_btree_keys_ops;
+extern const struct btree_keys_ops bch_extent_keys_ops;
+
+struct bkey;
+struct cache_set;
+
+void bch_extent_to_text(char *, size_t, const struct bkey *);
+bool __bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
+
+#endif /* _BCACHE_EXTENTS_H */
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9056632995b1..fa028fa82df4 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -11,178 +11,40 @@
#include <linux/blkdev.h>
-static void bch_bi_idx_hack_endio(struct bio *bio, int error)
-{
- struct bio *p = bio->bi_private;
-
- bio_endio(p, error);
- bio_put(bio);
-}
-
-static void bch_generic_make_request_hack(struct bio *bio)
-{
- if (bio->bi_idx) {
- struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
-
- memcpy(clone->bi_io_vec,
- bio_iovec(bio),
- bio_segments(bio) * sizeof(struct bio_vec));
-
- clone->bi_sector = bio->bi_sector;
- clone->bi_bdev = bio->bi_bdev;
- clone->bi_rw = bio->bi_rw;
- clone->bi_vcnt = bio_segments(bio);
- clone->bi_size = bio->bi_size;
-
- clone->bi_private = bio;
- clone->bi_end_io = bch_bi_idx_hack_endio;
-
- bio = clone;
- }
-
- /*
- * Hack, since drivers that clone bios clone up to bi_max_vecs, but our
- * bios might have had more than that (before we split them per device
- * limitations).
- *
- * To be taken out once immutable bvec stuff is in.
- */
- bio->bi_max_vecs = bio->bi_vcnt;
-
- generic_make_request(bio);
-}
-
-/**
- * bch_bio_split - split a bio
- * @bio: bio to split
- * @sectors: number of sectors to split from the front of @bio
- * @gfp: gfp mask
- * @bs: bio set to allocate from
- *
- * Allocates and returns a new bio which represents @sectors from the start of
- * @bio, and updates @bio to represent the remaining sectors.
- *
- * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
- * unchanged.
- *
- * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
- * bvec boundry; it is the caller's responsibility to ensure that @bio is not
- * freed before the split.
- */
-struct bio *bch_bio_split(struct bio *bio, int sectors,
- gfp_t gfp, struct bio_set *bs)
-{
- unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9;
- struct bio_vec *bv;
- struct bio *ret = NULL;
-
- BUG_ON(sectors <= 0);
-
- if (sectors >= bio_sectors(bio))
- return bio;
-
- if (bio->bi_rw & REQ_DISCARD) {
- ret = bio_alloc_bioset(gfp, 1, bs);
- if (!ret)
- return NULL;
- idx = 0;
- goto out;
- }
-
- bio_for_each_segment(bv, bio, idx) {
- vcnt = idx - bio->bi_idx;
-
- if (!nbytes) {
- ret = bio_alloc_bioset(gfp, vcnt, bs);
- if (!ret)
- return NULL;
-
- memcpy(ret->bi_io_vec, bio_iovec(bio),
- sizeof(struct bio_vec) * vcnt);
-
- break;
- } else if (nbytes < bv->bv_len) {
- ret = bio_alloc_bioset(gfp, ++vcnt, bs);
- if (!ret)
- return NULL;
-
- memcpy(ret->bi_io_vec, bio_iovec(bio),
- sizeof(struct bio_vec) * vcnt);
-
- ret->bi_io_vec[vcnt - 1].bv_len = nbytes;
- bv->bv_offset += nbytes;
- bv->bv_len -= nbytes;
- break;
- }
-
- nbytes -= bv->bv_len;
- }
-out:
- ret->bi_bdev = bio->bi_bdev;
- ret->bi_sector = bio->bi_sector;
- ret->bi_size = sectors << 9;
- ret->bi_rw = bio->bi_rw;
- ret->bi_vcnt = vcnt;
- ret->bi_max_vecs = vcnt;
-
- bio->bi_sector += sectors;
- bio->bi_size -= sectors << 9;
- bio->bi_idx = idx;
-
- if (bio_integrity(bio)) {
- if (bio_integrity_clone(ret, bio, gfp)) {
- bio_put(ret);
- return NULL;
- }
-
- bio_integrity_trim(ret, 0, bio_sectors(ret));
- bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio));
- }
-
- return ret;
-}
-
static unsigned bch_bio_max_sectors(struct bio *bio)
{
- unsigned ret = bio_sectors(bio);
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
- unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES,
- queue_max_segments(q));
+ struct bio_vec bv;
+ struct bvec_iter iter;
+ unsigned ret = 0, seg = 0;
if (bio->bi_rw & REQ_DISCARD)
- return min(ret, q->limits.max_discard_sectors);
-
- if (bio_segments(bio) > max_segments ||
- q->merge_bvec_fn) {
- struct bio_vec *bv;
- int i, seg = 0;
-
- ret = 0;
-
- bio_for_each_segment(bv, bio, i) {
- struct bvec_merge_data bvm = {
- .bi_bdev = bio->bi_bdev,
- .bi_sector = bio->bi_sector,
- .bi_size = ret << 9,
- .bi_rw = bio->bi_rw,
- };
-
- if (seg == max_segments)
- break;
+ return min(bio_sectors(bio), q->limits.max_discard_sectors);
+
+ bio_for_each_segment(bv, bio, iter) {
+ struct bvec_merge_data bvm = {
+ .bi_bdev = bio->bi_bdev,
+ .bi_sector = bio->bi_iter.bi_sector,
+ .bi_size = ret << 9,
+ .bi_rw = bio->bi_rw,
+ };
+
+ if (seg == min_t(unsigned, BIO_MAX_PAGES,
+ queue_max_segments(q)))
+ break;
- if (q->merge_bvec_fn &&
- q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
- break;
+ if (q->merge_bvec_fn &&
+ q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
+ break;
- seg++;
- ret += bv->bv_len >> 9;
- }
+ seg++;
+ ret += bv.bv_len >> 9;
}
ret = min(ret, queue_max_sectors(q));
WARN_ON(!ret);
- ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9);
+ ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
return ret;
}
@@ -193,7 +55,7 @@ static void bch_bio_submit_split_done(struct closure *cl)
s->bio->bi_end_io = s->bi_end_io;
s->bio->bi_private = s->bi_private;
- bio_endio(s->bio, 0);
+ bio_endio_nodec(s->bio, 0);
closure_debug_destroy(&s->cl);
mempool_free(s, s->p->bio_split_hook);
@@ -232,19 +94,19 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
bio_get(bio);
do {
- n = bch_bio_split(bio, bch_bio_max_sectors(bio),
- GFP_NOIO, s->p->bio_split);
+ n = bio_next_split(bio, bch_bio_max_sectors(bio),
+ GFP_NOIO, s->p->bio_split);
n->bi_end_io = bch_bio_submit_split_endio;
n->bi_private = &s->cl;
closure_get(&s->cl);
- bch_generic_make_request_hack(n);
+ generic_make_request(n);
} while (n != bio);
continue_at(&s->cl, bch_bio_submit_split_done, NULL);
submit:
- bch_generic_make_request_hack(bio);
+ generic_make_request(bio);
}
/* Bios with headers */
@@ -272,8 +134,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
{
struct bbio *b = container_of(bio, struct bbio, bio);
- bio->bi_sector = PTR_OFFSET(&b->key, 0);
- bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
+ bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
+ bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
b->submit_time_us = local_clock_us();
closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ecdaa671bd50..18039affc306 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -44,17 +44,17 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
closure_init_stack(&cl);
- pr_debug("reading %llu", (uint64_t) bucket);
+ pr_debug("reading %u", bucket_index);
while (offset < ca->sb.bucket_size) {
reread: left = ca->sb.bucket_size - offset;
- len = min_t(unsigned, left, PAGE_SECTORS * 8);
+ len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
bio_reset(bio);
- bio->bi_sector = bucket + offset;
+ bio->bi_iter.bi_sector = bucket + offset;
bio->bi_bdev = ca->bdev;
bio->bi_rw = READ;
- bio->bi_size = len << 9;
+ bio->bi_iter.bi_size = len << 9;
bio->bi_end_io = journal_read_endio;
bio->bi_private = &cl;
@@ -74,19 +74,28 @@ reread: left = ca->sb.bucket_size - offset;
struct list_head *where;
size_t blocks, bytes = set_bytes(j);
- if (j->magic != jset_magic(&ca->sb))
+ if (j->magic != jset_magic(&ca->sb)) {
+ pr_debug("%u: bad magic", bucket_index);
return ret;
+ }
- if (bytes > left << 9)
+ if (bytes > left << 9 ||
+ bytes > PAGE_SIZE << JSET_BITS) {
+ pr_info("%u: too big, %zu bytes, offset %u",
+ bucket_index, bytes, offset);
return ret;
+ }
if (bytes > len << 9)
goto reread;
- if (j->csum != csum_set(j))
+ if (j->csum != csum_set(j)) {
+ pr_info("%u: bad csum, %zu bytes, offset %u",
+ bucket_index, bytes, offset);
return ret;
+ }
- blocks = set_blocks(j, ca->set);
+ blocks = set_blocks(j, block_bytes(ca->set));
while (!list_empty(list)) {
i = list_first_entry(list,
@@ -275,7 +284,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
}
for (k = i->j.start;
- k < end(&i->j);
+ k < bset_bkey_last(&i->j);
k = bkey_next(k)) {
unsigned j;
@@ -313,7 +322,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
n, i->j.seq - 1, start, end);
for (k = i->j.start;
- k < end(&i->j);
+ k < bset_bkey_last(&i->j);
k = bkey_next(k)) {
trace_bcache_journal_replay_key(k);
@@ -437,13 +446,13 @@ static void do_journal_discard(struct cache *ca)
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
bio_init(bio);
- bio->bi_sector = bucket_to_sector(ca->set,
+ bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
bio->bi_bdev = ca->bdev;
bio->bi_rw = REQ_WRITE|REQ_DISCARD;
bio->bi_max_vecs = 1;
bio->bi_io_vec = bio->bi_inline_vecs;
- bio->bi_size = bucket_bytes(ca);
+ bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = journal_discard_endio;
closure_get(&ca->set->cl);
@@ -555,6 +564,14 @@ static void journal_write_done(struct closure *cl)
continue_at_nobarrier(cl, journal_write, system_wq);
}
+static void journal_write_unlock(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+
+ c->journal.io_in_flight = 0;
+ spin_unlock(&c->journal.lock);
+}
+
static void journal_write_unlocked(struct closure *cl)
__releases(c->journal.lock)
{
@@ -562,22 +579,15 @@ static void journal_write_unlocked(struct closure *cl)
struct cache *ca;
struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key;
- unsigned i, sectors = set_blocks(w->data, c) * c->sb.block_size;
+ unsigned i, sectors = set_blocks(w->data, block_bytes(c)) *
+ c->sb.block_size;
struct bio *bio;
struct bio_list list;
bio_list_init(&list);
if (!w->need_write) {
- /*
- * XXX: have to unlock closure before we unlock journal lock,
- * else we race with bch_journal(). But this way we race
- * against cache set unregister. Doh.
- */
- set_closure_fn(cl, NULL, NULL);
- closure_sub(cl, CLOSURE_RUNNING + 1);
- spin_unlock(&c->journal.lock);
- return;
+ closure_return_with_destructor(cl, journal_write_unlock);
} else if (journal_full(&c->journal)) {
journal_reclaim(c);
spin_unlock(&c->journal.lock);
@@ -586,7 +596,7 @@ static void journal_write_unlocked(struct closure *cl)
continue_at(cl, journal_write, system_wq);
}
- c->journal.blocks_free -= set_blocks(w->data, c);
+ c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
w->data->btree_level = c->root->level;
@@ -608,10 +618,10 @@ static void journal_write_unlocked(struct closure *cl)
atomic_long_add(sectors, &ca->meta_sectors_written);
bio_reset(bio);
- bio->bi_sector = PTR_OFFSET(k, i);
+ bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
bio->bi_bdev = ca->bdev;
bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
- bio->bi_size = sectors << 9;
+ bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
bio->bi_private = w;
@@ -653,10 +663,12 @@ static void journal_try_write(struct cache_set *c)
w->need_write = true;
- if (closure_trylock(cl, &c->cl))
- journal_write_unlocked(cl);
- else
+ if (!c->journal.io_in_flight) {
+ c->journal.io_in_flight = 1;
+ closure_call(cl, journal_write_unlocked, NULL, &c->cl);
+ } else {
spin_unlock(&c->journal.lock);
+ }
}
static struct journal_write *journal_wait_for_write(struct cache_set *c,
@@ -664,6 +676,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
{
size_t sectors;
struct closure cl;
+ bool wait = false;
closure_init_stack(&cl);
@@ -673,16 +686,19 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
struct journal_write *w = c->journal.cur;
sectors = __set_blocks(w->data, w->data->keys + nkeys,
- c) * c->sb.block_size;
+ block_bytes(c)) * c->sb.block_size;
if (sectors <= min_t(size_t,
c->journal.blocks_free * c->sb.block_size,
PAGE_SECTORS << JSET_BITS))
return w;
- /* XXX: tracepoint */
+ if (wait)
+ closure_wait(&c->journal.wait, &cl);
+
if (!journal_full(&c->journal)) {
- trace_bcache_journal_entry_full(c);
+ if (wait)
+ trace_bcache_journal_entry_full(c);
/*
* XXX: If we were inserting so many keys that they
@@ -692,12 +708,11 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
*/
BUG_ON(!w->data->keys);
- closure_wait(&w->wait, &cl);
journal_try_write(c); /* unlocks */
} else {
- trace_bcache_journal_full(c);
+ if (wait)
+ trace_bcache_journal_full(c);
- closure_wait(&c->journal.wait, &cl);
journal_reclaim(c);
spin_unlock(&c->journal.lock);
@@ -706,6 +721,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
closure_sync(&cl);
spin_lock(&c->journal.lock);
+ wait = true;
}
}
@@ -736,7 +752,7 @@ atomic_t *bch_journal(struct cache_set *c,
w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
- memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys));
+ memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
w->data->keys += bch_keylist_nkeys(keys);
ret = &fifo_back(&c->journal.pin);
@@ -780,7 +796,6 @@ int bch_journal_alloc(struct cache_set *c)
{
struct journal *j = &c->journal;
- closure_init_unlocked(&j->io);
spin_lock_init(&j->lock);
INIT_DELAYED_WORK(&j->work, journal_write_work);
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index a6472fda94b2..9180c4465075 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -104,6 +104,7 @@ struct journal {
/* used when waiting because the journal was full */
struct closure_waitlist wait;
struct closure io;
+ int io_in_flight;
struct delayed_work work;
/* Number of blocks free in the bucket(s) we're currently writing to */
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 7c1275e66025..9eb60d102de8 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
unsigned i;
for (i = 0; i < KEY_PTRS(k); i++) {
- struct cache *ca = PTR_CACHE(c, k, i);
struct bucket *g = PTR_BUCKET(c, k, i);
- if (GC_SECTORS_USED(g) < ca->gc_move_threshold)
+ if (GC_MOVE(g))
return true;
}
@@ -65,11 +64,16 @@ static void write_moving_finish(struct closure *cl)
static void read_moving_endio(struct bio *bio, int error)
{
+ struct bbio *b = container_of(bio, struct bbio, bio);
struct moving_io *io = container_of(bio->bi_private,
struct moving_io, cl);
if (error)
io->op.error = error;
+ else if (!KEY_DIRTY(&b->key) &&
+ ptr_stale(io->op.c, &b->key, 0)) {
+ io->op.error = -EINTR;
+ }
bch_bbio_endio(io->op.c, bio, error, "reading data to move");
}
@@ -82,7 +86,7 @@ static void moving_init(struct moving_io *io)
bio_get(bio);
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
- bio->bi_size = KEY_SIZE(&io->w->key) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
PAGE_SECTORS);
bio->bi_private = &io->cl;
@@ -98,7 +102,7 @@ static void write_moving(struct closure *cl)
if (!op->error) {
moving_init(io);
- io->bio.bio.bi_sector = KEY_START(&io->w->key);
+ io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
op->write_prio = 1;
op->bio = &io->bio.bio;
@@ -141,6 +145,11 @@ static void read_moving(struct cache_set *c)
if (!w)
break;
+ if (ptr_stale(c, &w->key, 0)) {
+ bch_keybuf_del(&c->moving_gc_keys, w);
+ continue;
+ }
+
io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
* DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
GFP_KERNEL);
@@ -184,7 +193,8 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)
static unsigned bucket_heap_top(struct cache *ca)
{
- return GC_SECTORS_USED(heap_peek(&ca->heap));
+ struct bucket *b;
+ return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
}
void bch_moving_gc(struct cache_set *c)
@@ -201,7 +211,7 @@ void bch_moving_gc(struct cache_set *c)
for_each_cache(ca, c, i) {
unsigned sectors_to_move = 0;
unsigned reserve_sectors = ca->sb.bucket_size *
- min(fifo_used(&ca->free), ca->free.size / 2);
+ fifo_used(&ca->free[RESERVE_MOVINGGC]);
ca->heap.used = 0;
@@ -226,9 +236,8 @@ void bch_moving_gc(struct cache_set *c)
sectors_to_move -= GC_SECTORS_USED(b);
}
- ca->gc_move_threshold = bucket_heap_top(ca);
-
- pr_debug("threshold %u", ca->gc_move_threshold);
+ while (heap_pop(&ca->heap, b, bucket_cmp))
+ SET_GC_MOVE(b, 1);
}
mutex_unlock(&c->bucket_lock);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index fbcc851ed5a5..5d5d031cf381 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -163,7 +163,6 @@ static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
static void bcachecg_destroy(struct cgroup *cgroup)
{
struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
- free_css_id(&bcache_subsys, &cg->css);
kfree(cg);
}
@@ -198,14 +197,14 @@ static bool verify(struct cached_dev *dc, struct bio *bio)
static void bio_csum(struct bio *bio, struct bkey *k)
{
- struct bio_vec *bv;
+ struct bio_vec bv;
+ struct bvec_iter iter;
uint64_t csum = 0;
- int i;
- bio_for_each_segment(bv, bio, i) {
- void *d = kmap(bv->bv_page) + bv->bv_offset;
- csum = bch_crc64_update(csum, d, bv->bv_len);
- kunmap(bv->bv_page);
+ bio_for_each_segment(bv, bio, iter) {
+ void *d = kmap(bv.bv_page) + bv.bv_offset;
+ csum = bch_crc64_update(csum, d, bv.bv_len);
+ kunmap(bv.bv_page);
}
k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
@@ -255,26 +254,44 @@ static void bch_data_insert_keys(struct closure *cl)
closure_return(cl);
}
+static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
+ struct cache_set *c)
+{
+ size_t oldsize = bch_keylist_nkeys(l);
+ size_t newsize = oldsize + u64s;
+
+ /*
+ * The journalling code doesn't handle the case where the keys to insert
+ * is bigger than an empty write: If we just return -ENOMEM here,
+ * bio_insert() and bio_invalidate() will insert the keys created so far
+ * and finish the rest when the keylist is empty.
+ */
+ if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
+ return -ENOMEM;
+
+ return __bch_keylist_realloc(l, u64s);
+}
+
static void bch_data_invalidate(struct closure *cl)
{
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio;
pr_debug("invalidating %i sectors from %llu",
- bio_sectors(bio), (uint64_t) bio->bi_sector);
+ bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
while (bio_sectors(bio)) {
unsigned sectors = min(bio_sectors(bio),
1U << (KEY_SIZE_BITS - 1));
- if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
+ if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
goto out;
- bio->bi_sector += sectors;
- bio->bi_size -= sectors << 9;
+ bio->bi_iter.bi_sector += sectors;
+ bio->bi_iter.bi_size -= sectors << 9;
bch_keylist_add(&op->insert_keys,
- &KEY(op->inode, bio->bi_sector, sectors));
+ &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
}
op->insert_data_done = true;
@@ -336,14 +353,14 @@ static void bch_data_insert_start(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
- if (op->bypass)
- return bch_data_invalidate(cl);
-
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
set_gc_sectors(op->c);
wake_up_gc(op->c);
}
+ if (op->bypass)
+ return bch_data_invalidate(cl);
+
/*
* Journal writes are marked REQ_FLUSH; if the original write was a
* flush, it'll wait on the journal write.
@@ -357,21 +374,21 @@ static void bch_data_insert_start(struct closure *cl)
/* 1 for the device pointer and 1 for the chksum */
if (bch_keylist_realloc(&op->insert_keys,
- 1 + (op->csum ? 1 : 0),
+ 3 + (op->csum ? 1 : 0),
op->c))
continue_at(cl, bch_data_insert_keys, bcache_wq);
k = op->insert_keys.top;
bkey_init(k);
SET_KEY_INODE(k, op->inode);
- SET_KEY_OFFSET(k, bio->bi_sector);
+ SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
op->write_point, op->write_prio,
op->writeback))
goto err;
- n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
+ n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
n->bi_end_io = bch_data_insert_endio;
n->bi_private = cl;
@@ -522,7 +539,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
(bio->bi_rw & REQ_WRITE)))
goto skip;
- if (bio->bi_sector & (c->sb.block_size - 1) ||
+ if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
bio_sectors(bio) & (c->sb.block_size - 1)) {
pr_debug("skipping unaligned io");
goto skip;
@@ -546,8 +563,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
spin_lock(&dc->io_lock);
- hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
- if (i->last == bio->bi_sector &&
+ hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
+ if (i->last == bio->bi_iter.bi_sector &&
time_before(jiffies, i->jiffies))
goto found;
@@ -556,8 +573,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
add_sequential(task);
i->sequential = 0;
found:
- if (i->sequential + bio->bi_size > i->sequential)
- i->sequential += bio->bi_size;
+ if (i->sequential + bio->bi_iter.bi_size > i->sequential)
+ i->sequential += bio->bi_iter.bi_size;
i->last = bio_end_sector(bio);
i->jiffies = jiffies + msecs_to_jiffies(5000);
@@ -597,16 +614,13 @@ struct search {
/* Stack frame for bio_complete */
struct closure cl;
- struct bcache_device *d;
-
struct bbio bio;
struct bio *orig_bio;
struct bio *cache_miss;
+ struct bcache_device *d;
unsigned insert_bio_sectors;
-
unsigned recoverable:1;
- unsigned unaligned_bvec:1;
unsigned write:1;
unsigned read_dirty_data:1;
@@ -631,7 +645,8 @@ static void bch_cache_read_endio(struct bio *bio, int error)
if (error)
s->iop.error = error;
- else if (ptr_stale(s->iop.c, &b->key, 0)) {
+ else if (!KEY_DIRTY(&b->key) &&
+ ptr_stale(s->iop.c, &b->key, 0)) {
atomic_long_inc(&s->iop.c->cache_read_races);
s->iop.error = -EINTR;
}
@@ -650,15 +665,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
struct bkey *bio_key;
unsigned ptr;
- if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0)
+ if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
return MAP_CONTINUE;
if (KEY_INODE(k) != s->iop.inode ||
- KEY_START(k) > bio->bi_sector) {
+ KEY_START(k) > bio->bi_iter.bi_sector) {
unsigned bio_sectors = bio_sectors(bio);
unsigned sectors = KEY_INODE(k) == s->iop.inode
? min_t(uint64_t, INT_MAX,
- KEY_START(k) - bio->bi_sector)
+ KEY_START(k) - bio->bi_iter.bi_sector)
: INT_MAX;
int ret = s->d->cache_miss(b, s, bio, sectors);
@@ -680,14 +695,14 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
if (KEY_DIRTY(k))
s->read_dirty_data = true;
- n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
- KEY_OFFSET(k) - bio->bi_sector),
- GFP_NOIO, s->d->bio_split);
+ n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
+ KEY_OFFSET(k) - bio->bi_iter.bi_sector),
+ GFP_NOIO, s->d->bio_split);
bio_key = &container_of(n, struct bbio, bio)->key;
bch_bkey_copy_single_ptr(bio_key, k, ptr);
- bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key);
+ bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
n->bi_end_io = bch_cache_read_endio;
@@ -712,10 +727,13 @@ static void cache_lookup(struct closure *cl)
{
struct search *s = container_of(cl, struct search, iop.cl);
struct bio *bio = &s->bio.bio;
+ int ret;
+
+ bch_btree_op_init(&s->op, -1);
- int ret = bch_btree_map_keys(&s->op, s->iop.c,
- &KEY(s->iop.inode, bio->bi_sector, 0),
- cache_lookup_fn, MAP_END_KEY);
+ ret = bch_btree_map_keys(&s->op, s->iop.c,
+ &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
+ cache_lookup_fn, MAP_END_KEY);
if (ret == -EAGAIN)
continue_at(cl, cache_lookup, bcache_wq);
@@ -756,13 +774,15 @@ static void bio_complete(struct search *s)
}
}
-static void do_bio_hook(struct search *s)
+static void do_bio_hook(struct search *s, struct bio *orig_bio)
{
struct bio *bio = &s->bio.bio;
- memcpy(bio, s->orig_bio, sizeof(struct bio));
+ bio_init(bio);
+ __bio_clone_fast(bio, orig_bio);
bio->bi_end_io = request_endio;
bio->bi_private = &s->cl;
+
atomic_set(&bio->bi_cnt, 3);
}
@@ -774,43 +794,36 @@ static void search_free(struct closure *cl)
if (s->iop.bio)
bio_put(s->iop.bio);
- if (s->unaligned_bvec)
- mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
-
closure_debug_destroy(cl);
mempool_free(s, s->d->c->search);
}
-static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
+static inline struct search *search_alloc(struct bio *bio,
+ struct bcache_device *d)
{
struct search *s;
- struct bio_vec *bv;
s = mempool_alloc(d->c->search, GFP_NOIO);
- memset(s, 0, offsetof(struct search, iop.insert_keys));
- __closure_init(&s->cl, NULL);
+ closure_init(&s->cl, NULL);
+ do_bio_hook(s, bio);
- s->iop.inode = d->id;
- s->iop.c = d->c;
- s->d = d;
- s->op.lock = -1;
- s->iop.write_point = hash_long((unsigned long) current, 16);
s->orig_bio = bio;
- s->write = (bio->bi_rw & REQ_WRITE) != 0;
- s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
+ s->cache_miss = NULL;
+ s->d = d;
s->recoverable = 1;
+ s->write = (bio->bi_rw & REQ_WRITE) != 0;
+ s->read_dirty_data = 0;
s->start_time = jiffies;
- do_bio_hook(s);
- if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
- bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
- memcpy(bv, bio_iovec(bio),
- sizeof(struct bio_vec) * bio_segments(bio));
-
- s->bio.bio.bi_io_vec = bv;
- s->unaligned_bvec = 1;
- }
+ s->iop.c = d->c;
+ s->iop.bio = NULL;
+ s->iop.inode = d->id;
+ s->iop.write_point = hash_long((unsigned long) current, 16);
+ s->iop.write_prio = 0;
+ s->iop.error = 0;
+ s->iop.flags = 0;
+ s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
return s;
}
@@ -850,26 +863,13 @@ static void cached_dev_read_error(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
struct bio *bio = &s->bio.bio;
- struct bio_vec *bv;
- int i;
if (s->recoverable) {
/* Retry from the backing device: */
trace_bcache_read_retry(s->orig_bio);
s->iop.error = 0;
- bv = s->bio.bio.bi_io_vec;
- do_bio_hook(s);
- s->bio.bio.bi_io_vec = bv;
-
- if (!s->unaligned_bvec)
- bio_for_each_segment(bv, s->orig_bio, i)
- bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
- else
- memcpy(s->bio.bio.bi_io_vec,
- bio_iovec(s->orig_bio),
- sizeof(struct bio_vec) *
- bio_segments(s->orig_bio));
+ do_bio_hook(s, s->orig_bio);
/* XXX: invalidate cache */
@@ -894,9 +894,9 @@ static void cached_dev_read_done(struct closure *cl)
if (s->iop.bio) {
bio_reset(s->iop.bio);
- s->iop.bio->bi_sector = s->cache_miss->bi_sector;
+ s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
- s->iop.bio->bi_size = s->insert_bio_sectors << 9;
+ s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
bch_bio_map(s->iop.bio, NULL);
bio_copy_data(s->cache_miss, s->iop.bio);
@@ -905,8 +905,7 @@ static void cached_dev_read_done(struct closure *cl)
s->cache_miss = NULL;
}
- if (verify(dc, &s->bio.bio) && s->recoverable &&
- !s->unaligned_bvec && !s->read_dirty_data)
+ if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
bch_data_verify(dc, s->orig_bio);
bio_complete(s);
@@ -946,7 +945,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct bio *miss, *cache_bio;
if (s->cache_miss || s->iop.bypass) {
- miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+ miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
goto out_submit;
}
@@ -960,7 +959,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
s->iop.replace_key = KEY(s->iop.inode,
- bio->bi_sector + s->insert_bio_sectors,
+ bio->bi_iter.bi_sector + s->insert_bio_sectors,
s->insert_bio_sectors);
ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
@@ -969,7 +968,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->iop.replace = true;
- miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+ miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
/* btree_search_recurse()'s btree iterator is no good anymore */
ret = miss == bio ? MAP_DONE : -EINTR;
@@ -980,9 +979,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
if (!cache_bio)
goto out_submit;
- cache_bio->bi_sector = miss->bi_sector;
- cache_bio->bi_bdev = miss->bi_bdev;
- cache_bio->bi_size = s->insert_bio_sectors << 9;
+ cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
+ cache_bio->bi_bdev = miss->bi_bdev;
+ cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
cache_bio->bi_end_io = request_endio;
cache_bio->bi_private = &s->cl;
@@ -1032,7 +1031,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
{
struct closure *cl = &s->cl;
struct bio *bio = &s->bio.bio;
- struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0);
+ struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
@@ -1088,8 +1087,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
closure_bio_submit(flush, cl, s->d);
}
} else {
- s->iop.bio = bio_clone_bioset(bio, GFP_NOIO,
- dc->disk.bio_split);
+ s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
closure_bio_submit(bio, cl, s->d);
}
@@ -1127,13 +1125,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
part_stat_unlock();
bio->bi_bdev = dc->bdev;
- bio->bi_sector += dc->sb.data_offset;
+ bio->bi_iter.bi_sector += dc->sb.data_offset;
if (cached_dev_get(dc)) {
s = search_alloc(bio, d);
trace_bcache_request_start(s->d, bio);
- if (!bio->bi_size) {
+ if (!bio->bi_iter.bi_size) {
/*
* can't call bch_journal_meta from under
* generic_make_request
@@ -1205,24 +1203,24 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
static int flash_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned sectors)
{
- struct bio_vec *bv;
- int i;
+ struct bio_vec bv;
+ struct bvec_iter iter;
/* Zero fill bio */
- bio_for_each_segment(bv, bio, i) {
- unsigned j = min(bv->bv_len >> 9, sectors);
+ bio_for_each_segment(bv, bio, iter) {
+ unsigned j = min(bv.bv_len >> 9, sectors);
- void *p = kmap(bv->bv_page);
- memset(p + bv->bv_offset, 0, j << 9);
- kunmap(bv->bv_page);
+ void *p = kmap(bv.bv_page);
+ memset(p + bv.bv_offset, 0, j << 9);
+ kunmap(bv.bv_page);
sectors -= j;
}
- bio_advance(bio, min(sectors << 9, bio->bi_size));
+ bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
- if (!bio->bi_size)
+ if (!bio->bi_iter.bi_size)
return MAP_DONE;
return MAP_CONTINUE;
@@ -1256,7 +1254,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
trace_bcache_request_start(s->d, bio);
- if (!bio->bi_size) {
+ if (!bio->bi_iter.bi_size) {
/*
* can't call bch_journal_meta from under
* generic_make_request
@@ -1266,7 +1264,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
bcache_wq);
} else if (rw) {
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
- &KEY(d->id, bio->bi_sector, 0),
+ &KEY(d->id, bio->bi_iter.bi_sector, 0),
&KEY(d->id, bio_end_sector(bio), 0));
s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 2cd65bf073c2..39f21dbedc38 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -13,17 +13,22 @@ struct data_insert_op {
uint16_t write_prio;
short error;
- unsigned bypass:1;
- unsigned writeback:1;
- unsigned flush_journal:1;
- unsigned csum:1;
+ union {
+ uint16_t flags;
- unsigned replace:1;
- unsigned replace_collision:1;
+ struct {
+ unsigned bypass:1;
+ unsigned writeback:1;
+ unsigned flush_journal:1;
+ unsigned csum:1;
- unsigned insert_data_done:1;
+ unsigned replace:1;
+ unsigned replace_collision:1;
+
+ unsigned insert_data_done:1;
+ };
+ };
- /* Anything past this point won't get zeroed in search_alloc() */
struct keylist insert_keys;
BKEY_PADDED(replace_key);
};
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index dec15cd2d797..24a3a1546caa 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -9,6 +9,7 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
+#include "extents.h"
#include "request.h"
#include "writeback.h"
@@ -225,7 +226,7 @@ static void write_bdev_super_endio(struct bio *bio, int error)
struct cached_dev *dc = bio->bi_private;
/* XXX: error checking */
- closure_put(&dc->sb_write.cl);
+ closure_put(&dc->sb_write);
}
static void __write_super(struct cache_sb *sb, struct bio *bio)
@@ -233,9 +234,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
unsigned i;
- bio->bi_sector = SB_SECTOR;
- bio->bi_rw = REQ_SYNC|REQ_META;
- bio->bi_size = SB_SIZE;
+ bio->bi_iter.bi_sector = SB_SECTOR;
+ bio->bi_rw = REQ_SYNC|REQ_META;
+ bio->bi_iter.bi_size = SB_SIZE;
bch_bio_map(bio, NULL);
out->offset = cpu_to_le64(sb->offset);
@@ -263,12 +264,20 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
submit_bio(REQ_WRITE, bio);
}
+static void bch_write_bdev_super_unlock(struct closure *cl)
+{
+ struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
+
+ up(&dc->sb_write_mutex);
+}
+
void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
{
- struct closure *cl = &dc->sb_write.cl;
+ struct closure *cl = &dc->sb_write;
struct bio *bio = &dc->sb_bio;
- closure_lock(&dc->sb_write, parent);
+ down(&dc->sb_write_mutex);
+ closure_init(cl, parent);
bio_reset(bio);
bio->bi_bdev = dc->bdev;
@@ -278,7 +287,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
closure_get(cl);
__write_super(&dc->sb, bio);
- closure_return(cl);
+ closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
}
static void write_super_endio(struct bio *bio, int error)
@@ -286,16 +295,24 @@ static void write_super_endio(struct bio *bio, int error)
struct cache *ca = bio->bi_private;
bch_count_io_errors(ca, error, "writing superblock");
- closure_put(&ca->set->sb_write.cl);
+ closure_put(&ca->set->sb_write);
+}
+
+static void bcache_write_super_unlock(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, sb_write);
+
+ up(&c->sb_write_mutex);
}
void bcache_write_super(struct cache_set *c)
{
- struct closure *cl = &c->sb_write.cl;
+ struct closure *cl = &c->sb_write;
struct cache *ca;
unsigned i;
- closure_lock(&c->sb_write, &c->cl);
+ down(&c->sb_write_mutex);
+ closure_init(cl, &c->cl);
c->sb.seq++;
@@ -317,7 +334,7 @@ void bcache_write_super(struct cache_set *c)
__write_super(&ca->sb, bio);
}
- closure_return(cl);
+ closure_return_with_destructor(cl, bcache_write_super_unlock);
}
/* UUID io */
@@ -325,29 +342,37 @@ void bcache_write_super(struct cache_set *c)
static void uuid_endio(struct bio *bio, int error)
{
struct closure *cl = bio->bi_private;
- struct cache_set *c = container_of(cl, struct cache_set, uuid_write.cl);
+ struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
cache_set_err_on(error, c, "accessing uuids");
bch_bbio_free(bio, c);
closure_put(cl);
}
+static void uuid_io_unlock(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
+
+ up(&c->uuid_write_mutex);
+}
+
static void uuid_io(struct cache_set *c, unsigned long rw,
struct bkey *k, struct closure *parent)
{
- struct closure *cl = &c->uuid_write.cl;
+ struct closure *cl = &c->uuid_write;
struct uuid_entry *u;
unsigned i;
char buf[80];
BUG_ON(!parent);
- closure_lock(&c->uuid_write, parent);
+ down(&c->uuid_write_mutex);
+ closure_init(cl, parent);
for (i = 0; i < KEY_PTRS(k); i++) {
struct bio *bio = bch_bbio_alloc(c);
bio->bi_rw = REQ_SYNC|REQ_META|rw;
- bio->bi_size = KEY_SIZE(k) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
bio->bi_end_io = uuid_endio;
bio->bi_private = cl;
@@ -359,7 +384,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
break;
}
- bch_bkey_to_text(buf, sizeof(buf), k);
+ bch_extent_to_text(buf, sizeof(buf), k);
pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf);
for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
@@ -368,14 +393,14 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
u - c->uuids, u->uuid, u->label,
u->first_reg, u->last_reg, u->invalidated);
- closure_return(cl);
+ closure_return_with_destructor(cl, uuid_io_unlock);
}
static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
{
struct bkey *k = &j->uuid_bucket;
- if (bch_btree_ptr_invalid(c, k))
+ if (__bch_btree_ptr_invalid(c, k))
return "bad uuid pointer";
bkey_copy(&c->uuid_bucket, k);
@@ -420,7 +445,7 @@ static int __uuid_write(struct cache_set *c)
lockdep_assert_held(&bch_register_lock);
- if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true))
+ if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
return 1;
SET_KEY_SIZE(&k.key, c->sb.bucket_size);
@@ -503,10 +528,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
closure_init_stack(cl);
- bio->bi_sector = bucket * ca->sb.bucket_size;
- bio->bi_bdev = ca->bdev;
- bio->bi_rw = REQ_SYNC|REQ_META|rw;
- bio->bi_size = bucket_bytes(ca);
+ bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
+ bio->bi_bdev = ca->bdev;
+ bio->bi_rw = REQ_SYNC|REQ_META|rw;
+ bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = prio_endio;
bio->bi_private = ca;
@@ -538,8 +563,8 @@ void bch_prio_write(struct cache *ca)
atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
&ca->meta_sectors_written);
- pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
- fifo_used(&ca->free_inc), fifo_used(&ca->unused));
+ //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
+ // fifo_used(&ca->free_inc), fifo_used(&ca->unused));
for (i = prio_buckets(ca) - 1; i >= 0; --i) {
long bucket;
@@ -558,7 +583,7 @@ void bch_prio_write(struct cache *ca)
p->magic = pset_magic(&ca->sb);
p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
- bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, true);
+ bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
BUG_ON(bucket == -1);
mutex_unlock(&ca->set->bucket_lock);
@@ -739,8 +764,6 @@ static void bcache_device_free(struct bcache_device *d)
}
bio_split_pool_free(&d->bio_split_hook);
- if (d->unaligned_bvec)
- mempool_destroy(d->unaligned_bvec);
if (d->bio_split)
bioset_free(d->bio_split);
if (is_vmalloc_addr(d->full_dirty_stripes))
@@ -793,8 +816,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
return minor;
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
- !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
- sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
bio_split_pool_init(&d->bio_split_hook) ||
!(d->disk = alloc_disk(1))) {
ida_simple_remove(&bcache_minor, minor);
@@ -1102,7 +1123,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
INIT_WORK(&dc->detach, cached_dev_detach_finish);
- closure_init_unlocked(&dc->sb_write);
+ sema_init(&dc->sb_write_mutex, 1);
INIT_LIST_HEAD(&dc->io_lru);
spin_lock_init(&dc->io_lock);
bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
@@ -1114,6 +1135,12 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
}
+ dc->disk.stripe_size = q->limits.io_opt >> 9;
+
+ if (dc->disk.stripe_size)
+ dc->partial_stripes_expensive =
+ q->limits.raid_partial_stripes_expensive;
+
ret = bcache_device_init(&dc->disk, block_size,
dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
if (ret)
@@ -1325,8 +1352,8 @@ static void cache_set_free(struct closure *cl)
if (ca)
kobject_put(&ca->kobj);
+ bch_bset_sort_state_free(&c->sort);
free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
- free_pages((unsigned long) c->sort, ilog2(bucket_pages(c)));
if (c->bio_split)
bioset_free(c->bio_split);
@@ -1451,21 +1478,17 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->block_bits = ilog2(sb->block_size);
c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
- c->btree_pages = c->sb.bucket_size / PAGE_SECTORS;
+ c->btree_pages = bucket_pages(c);
if (c->btree_pages > BTREE_MAX_PAGES)
c->btree_pages = max_t(int, c->btree_pages / 4,
BTREE_MAX_PAGES);
- c->sort_crit_factor = int_sqrt(c->btree_pages);
-
- closure_init_unlocked(&c->sb_write);
+ sema_init(&c->sb_write_mutex, 1);
mutex_init(&c->bucket_lock);
init_waitqueue_head(&c->try_wait);
init_waitqueue_head(&c->bucket_wait);
- closure_init_unlocked(&c->uuid_write);
- mutex_init(&c->sort_lock);
+ sema_init(&c->uuid_write_mutex, 1);
- spin_lock_init(&c->sort_time.lock);
spin_lock_init(&c->btree_gc_time.lock);
spin_lock_init(&c->btree_split_time.lock);
spin_lock_init(&c->btree_read_time.lock);
@@ -1493,11 +1516,11 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
bucket_pages(c))) ||
!(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
!(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
- !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) ||
!(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
bch_journal_alloc(c) ||
bch_btree_cache_alloc(c) ||
- bch_open_buckets_alloc(c))
+ bch_open_buckets_alloc(c) ||
+ bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
goto err;
c->congested_read_threshold_us = 2000;
@@ -1553,7 +1576,7 @@ static void run_cache_set(struct cache_set *c)
k = &j->btree_root;
err = "bad btree root";
- if (bch_btree_ptr_invalid(c, k))
+ if (__bch_btree_ptr_invalid(c, k))
goto err;
err = "error reading btree root";
@@ -1676,7 +1699,7 @@ err:
static bool can_attach_cache(struct cache *ca, struct cache_set *c)
{
return ca->sb.block_size == c->sb.block_size &&
- ca->sb.bucket_size == c->sb.block_size &&
+ ca->sb.bucket_size == c->sb.bucket_size &&
ca->sb.nr_in_set == c->sb.nr_in_set;
}
@@ -1747,6 +1770,7 @@ err:
void bch_cache_release(struct kobject *kobj)
{
struct cache *ca = container_of(kobj, struct cache, kobj);
+ unsigned i;
if (ca->set)
ca->set->cache[ca->sb.nr_this_dev] = NULL;
@@ -1760,7 +1784,9 @@ void bch_cache_release(struct kobject *kobj)
free_heap(&ca->heap);
free_fifo(&ca->unused);
free_fifo(&ca->free_inc);
- free_fifo(&ca->free);
+
+ for (i = 0; i < RESERVE_NR; i++)
+ free_fifo(&ca->free[i]);
if (ca->sb_bio.bi_inline_vecs[0].bv_page)
put_page(ca->sb_bio.bi_io_vec[0].bv_page);
@@ -1786,10 +1812,12 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
ca->journal.bio.bi_max_vecs = 8;
ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
- free = roundup_pow_of_two(ca->sb.nbuckets) >> 9;
- free = max_t(size_t, free, (prio_buckets(ca) + 8) * 2);
+ free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
- if (!init_fifo(&ca->free, free, GFP_KERNEL) ||
+ if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
+ !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
+ !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
+ !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
!init_fifo(&ca->unused, free << 2, GFP_KERNEL) ||
!init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
@@ -2034,7 +2062,8 @@ static void bcache_exit(void)
kobject_put(bcache_kobj);
if (bcache_wq)
destroy_workqueue(bcache_wq);
- unregister_blkdev(bcache_major, "bcache");
+ if (bcache_major)
+ unregister_blkdev(bcache_major, "bcache");
unregister_reboot_notifier(&reboot);
}
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 80d4c2bee18a..d8458d477a12 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -83,7 +83,6 @@ rw_attribute(writeback_rate);
rw_attribute(writeback_rate_update_seconds);
rw_attribute(writeback_rate_d_term);
rw_attribute(writeback_rate_p_term_inverse);
-rw_attribute(writeback_rate_d_smooth);
read_attribute(writeback_rate_debug);
read_attribute(stripe_size);
@@ -103,7 +102,6 @@ rw_attribute(bypass_torture_test);
rw_attribute(key_merging_disabled);
rw_attribute(gc_always_rewrite);
rw_attribute(expensive_debug_checks);
-rw_attribute(freelist_percent);
rw_attribute(cache_replacement_policy);
rw_attribute(btree_shrinker_disabled);
rw_attribute(copy_gc_enabled);
@@ -129,31 +127,41 @@ SHOW(__bch_cached_dev)
var_printf(writeback_running, "%i");
var_print(writeback_delay);
var_print(writeback_percent);
- sysfs_print(writeback_rate, dc->writeback_rate.rate);
+ sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
var_print(writeback_rate_update_seconds);
var_print(writeback_rate_d_term);
var_print(writeback_rate_p_term_inverse);
- var_print(writeback_rate_d_smooth);
if (attr == &sysfs_writeback_rate_debug) {
+ char rate[20];
char dirty[20];
- char derivative[20];
char target[20];
- bch_hprint(dirty,
- bcache_dev_sectors_dirty(&dc->disk) << 9);
- bch_hprint(derivative, dc->writeback_rate_derivative << 9);
+ char proportional[20];
+ char derivative[20];
+ char change[20];
+ s64 next_io;
+
+ bch_hprint(rate, dc->writeback_rate.rate << 9);
+ bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
bch_hprint(target, dc->writeback_rate_target << 9);
+ bch_hprint(proportional,dc->writeback_rate_proportional << 9);
+ bch_hprint(derivative, dc->writeback_rate_derivative << 9);
+ bch_hprint(change, dc->writeback_rate_change << 9);
+
+ next_io = div64_s64(dc->writeback_rate.next - local_clock(),
+ NSEC_PER_MSEC);
return sprintf(buf,
- "rate:\t\t%u\n"
- "change:\t\t%i\n"
+ "rate:\t\t%s/sec\n"
"dirty:\t\t%s\n"
+ "target:\t\t%s\n"
+ "proportional:\t%s\n"
"derivative:\t%s\n"
- "target:\t\t%s\n",
- dc->writeback_rate.rate,
- dc->writeback_rate_change,
- dirty, derivative, target);
+ "change:\t\t%s/sec\n"
+ "next io:\t%llims\n",
+ rate, dirty, target, proportional,
+ derivative, change, next_io);
}
sysfs_hprint(dirty_data,
@@ -189,6 +197,7 @@ STORE(__cached_dev)
struct kobj_uevent_env *env;
#define d_strtoul(var) sysfs_strtoul(var, dc->var)
+#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
sysfs_strtoul(data_csum, dc->disk.data_csum);
@@ -197,16 +206,15 @@ STORE(__cached_dev)
d_strtoul(writeback_metadata);
d_strtoul(writeback_running);
d_strtoul(writeback_delay);
- sysfs_strtoul_clamp(writeback_rate,
- dc->writeback_rate.rate, 1, 1000000);
+
sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
- d_strtoul(writeback_rate_update_seconds);
+ sysfs_strtoul_clamp(writeback_rate,
+ dc->writeback_rate.rate, 1, INT_MAX);
+
+ d_strtoul_nonzero(writeback_rate_update_seconds);
d_strtoul(writeback_rate_d_term);
- d_strtoul(writeback_rate_p_term_inverse);
- sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
- dc->writeback_rate_p_term_inverse, 1, INT_MAX);
- d_strtoul(writeback_rate_d_smooth);
+ d_strtoul_nonzero(writeback_rate_p_term_inverse);
d_strtoi_h(sequential_cutoff);
d_strtoi_h(readahead);
@@ -313,7 +321,6 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_writeback_rate_update_seconds,
&sysfs_writeback_rate_d_term,
&sysfs_writeback_rate_p_term_inverse,
- &sysfs_writeback_rate_d_smooth,
&sysfs_writeback_rate_debug,
&sysfs_dirty_data,
&sysfs_stripe_size,
@@ -393,6 +400,48 @@ static struct attribute *bch_flash_dev_files[] = {
};
KTYPE(bch_flash_dev);
+struct bset_stats_op {
+ struct btree_op op;
+ size_t nodes;
+ struct bset_stats stats;
+};
+
+static int btree_bset_stats(struct btree_op *b_op, struct btree *b)
+{
+ struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
+
+ op->nodes++;
+ bch_btree_keys_stats(&b->keys, &op->stats);
+
+ return MAP_CONTINUE;
+}
+
+static int bch_bset_print_stats(struct cache_set *c, char *buf)
+{
+ struct bset_stats_op op;
+ int ret;
+
+ memset(&op, 0, sizeof(op));
+ bch_btree_op_init(&op.op, -1);
+
+ ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, btree_bset_stats);
+ if (ret < 0)
+ return ret;
+
+ return snprintf(buf, PAGE_SIZE,
+ "btree nodes: %zu\n"
+ "written sets: %zu\n"
+ "unwritten sets: %zu\n"
+ "written key bytes: %zu\n"
+ "unwritten key bytes: %zu\n"
+ "floats: %zu\n"
+ "failed: %zu\n",
+ op.nodes,
+ op.stats.sets_written, op.stats.sets_unwritten,
+ op.stats.bytes_written, op.stats.bytes_unwritten,
+ op.stats.floats, op.stats.failed);
+}
+
SHOW(__bch_cache_set)
{
unsigned root_usage(struct cache_set *c)
@@ -411,7 +460,7 @@ lock_root:
rw_lock(false, b, b->level);
} while (b != c->root);
- for_each_key_filter(b, k, &iter, bch_ptr_bad)
+ for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
bytes += bkey_bytes(k);
rw_unlock(false, b);
@@ -426,7 +475,7 @@ lock_root:
mutex_lock(&c->bucket_lock);
list_for_each_entry(b, &c->btree_cache, list)
- ret += 1 << (b->page_order + PAGE_SHIFT);
+ ret += 1 << (b->keys.page_order + PAGE_SHIFT);
mutex_unlock(&c->bucket_lock);
return ret;
@@ -483,7 +532,7 @@ lock_root:
sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
- sysfs_print_time_stats(&c->sort_time, btree_sort, ms, us);
+ sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
sysfs_print_time_stats(&c->try_harder_time, try_harder, ms, us);
@@ -703,9 +752,6 @@ SHOW(__bch_cache)
sysfs_print(io_errors,
atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
- sysfs_print(freelist_percent, ca->free.size * 100 /
- ((size_t) ca->sb.nbuckets));
-
if (attr == &sysfs_cache_replacement_policy)
return bch_snprint_string_list(buf, PAGE_SIZE,
cache_replacement_policies,
@@ -812,32 +858,6 @@ STORE(__bch_cache)
}
}
- if (attr == &sysfs_freelist_percent) {
- DECLARE_FIFO(long, free);
- long i;
- size_t p = strtoul_or_return(buf);
-
- p = clamp_t(size_t,
- ((size_t) ca->sb.nbuckets * p) / 100,
- roundup_pow_of_two(ca->sb.nbuckets) >> 9,
- ca->sb.nbuckets / 2);
-
- if (!init_fifo_exact(&free, p, GFP_KERNEL))
- return -ENOMEM;
-
- mutex_lock(&ca->set->bucket_lock);
-
- fifo_move(&free, &ca->free);
- fifo_swap(&free, &ca->free);
-
- mutex_unlock(&ca->set->bucket_lock);
-
- while (fifo_pop(&free, i))
- atomic_dec(&ca->buckets[i].pin);
-
- free_fifo(&free);
- }
-
if (attr == &sysfs_clear_stats) {
atomic_long_set(&ca->sectors_written, 0);
atomic_long_set(&ca->btree_sectors_written, 0);
@@ -861,7 +881,6 @@ static struct attribute *bch_cache_files[] = {
&sysfs_metadata_written,
&sysfs_io_errors,
&sysfs_clear_stats,
- &sysfs_freelist_percent,
&sysfs_cache_replacement_policy,
NULL
};
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 462214eeacbe..db3ae4c2b223 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -209,7 +209,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
{
uint64_t now = local_clock();
- d->next += div_u64(done, d->rate);
+ d->next += div_u64(done * NSEC_PER_SEC, d->rate);
+
+ if (time_before64(now + NSEC_PER_SEC, d->next))
+ d->next = now + NSEC_PER_SEC;
+
+ if (time_after64(now - NSEC_PER_SEC * 2, d->next))
+ d->next = now - NSEC_PER_SEC * 2;
return time_after64(d->next, now)
? div_u64(d->next - now, NSEC_PER_SEC / HZ)
@@ -218,10 +224,10 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
void bch_bio_map(struct bio *bio, void *base)
{
- size_t size = bio->bi_size;
+ size_t size = bio->bi_iter.bi_size;
struct bio_vec *bv = bio->bi_io_vec;
- BUG_ON(!bio->bi_size);
+ BUG_ON(!bio->bi_iter.bi_size);
BUG_ON(bio->bi_vcnt);
bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0;
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 362c4b3f8b4a..ac7d0d1f70d7 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -2,6 +2,7 @@
#ifndef _BCACHE_UTIL_H
#define _BCACHE_UTIL_H
+#include <linux/blkdev.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/llist.h>
@@ -17,11 +18,13 @@ struct closure;
#ifdef CONFIG_BCACHE_DEBUG
+#define EBUG_ON(cond) BUG_ON(cond)
#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
#else /* DEBUG */
+#define EBUG_ON(cond) do { if (cond); } while (0)
#define atomic_dec_bug(v) atomic_dec(v)
#define atomic_inc_bug(v, i) atomic_inc(v)
@@ -110,7 +113,7 @@ do { \
_r; \
})
-#define heap_peek(h) ((h)->size ? (h)->data[0] : NULL)
+#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
#define heap_full(h) ((h)->used == (h)->size)
@@ -391,6 +394,11 @@ struct time_stats {
void bch_time_stats_update(struct time_stats *stats, uint64_t time);
+static inline unsigned local_clock_us(void)
+{
+ return local_clock() >> 10;
+}
+
#define NSEC_PER_ns 1L
#define NSEC_PER_us NSEC_PER_USEC
#define NSEC_PER_ms NSEC_PER_MSEC
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 99053b1251be..f4300e4c0114 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -30,38 +30,40 @@ static void __update_writeback_rate(struct cached_dev *dc)
/* PD controller */
- int change = 0;
- int64_t error;
int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
int64_t derivative = dirty - dc->disk.sectors_dirty_last;
+ int64_t proportional = dirty - target;
+ int64_t change;
dc->disk.sectors_dirty_last = dirty;
- derivative *= dc->writeback_rate_d_term;
- derivative = clamp(derivative, -dirty, dirty);
+ /* Scale to sectors per second */
- derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
- dc->writeback_rate_d_smooth, 0);
+ proportional *= dc->writeback_rate_update_seconds;
+ proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
- /* Avoid divide by zero */
- if (!target)
- goto out;
+ derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
- error = div64_s64((dirty + derivative - target) << 8, target);
+ derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
+ (dc->writeback_rate_d_term /
+ dc->writeback_rate_update_seconds) ?: 1, 0);
+
+ derivative *= dc->writeback_rate_d_term;
+ derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
- change = div_s64((dc->writeback_rate.rate * error) >> 8,
- dc->writeback_rate_p_term_inverse);
+ change = proportional + derivative;
/* Don't increase writeback rate if the device isn't keeping up */
if (change > 0 &&
time_after64(local_clock(),
- dc->writeback_rate.next + 10 * NSEC_PER_MSEC))
+ dc->writeback_rate.next + NSEC_PER_MSEC))
change = 0;
dc->writeback_rate.rate =
- clamp_t(int64_t, dc->writeback_rate.rate + change,
+ clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
1, NSEC_PER_MSEC);
-out:
+
+ dc->writeback_rate_proportional = proportional;
dc->writeback_rate_derivative = derivative;
dc->writeback_rate_change = change;
dc->writeback_rate_target = target;
@@ -87,15 +89,11 @@ static void update_writeback_rate(struct work_struct *work)
static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
{
- uint64_t ret;
-
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
!dc->writeback_percent)
return 0;
- ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
-
- return min_t(uint64_t, ret, HZ);
+ return bch_next_delay(&dc->writeback_rate, sectors);
}
struct dirty_io {
@@ -113,7 +111,7 @@ static void dirty_init(struct keybuf_key *w)
if (!io->dc->writeback_percent)
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
- bio->bi_size = KEY_SIZE(&w->key) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
bio->bi_private = w;
bio->bi_io_vec = bio->bi_inline_vecs;
@@ -186,7 +184,7 @@ static void write_dirty(struct closure *cl)
dirty_init(w);
io->bio.bi_rw = WRITE;
- io->bio.bi_sector = KEY_START(&w->key);
+ io->bio.bi_iter.bi_sector = KEY_START(&w->key);
io->bio.bi_bdev = io->dc->bdev;
io->bio.bi_end_io = dirty_endio;
@@ -241,7 +239,7 @@ static void read_dirty(struct cached_dev *dc)
if (KEY_START(&w->key) != dc->last_read ||
jiffies_to_msecs(delay) > 50)
while (!kthread_should_stop() && delay)
- delay = schedule_timeout_interruptible(delay);
+ delay = schedule_timeout_uninterruptible(delay);
dc->last_read = KEY_OFFSET(&w->key);
@@ -255,7 +253,7 @@ static void read_dirty(struct cached_dev *dc)
io->dc = dc;
dirty_init(w);
- io->bio.bi_sector = PTR_OFFSET(&w->key, 0);
+ io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
&w->key, 0)->bdev;
io->bio.bi_rw = READ;
@@ -438,7 +436,7 @@ static int bch_writeback_thread(void *arg)
while (delay &&
!kthread_should_stop() &&
!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
- delay = schedule_timeout_interruptible(delay);
+ delay = schedule_timeout_uninterruptible(delay);
}
}
@@ -476,6 +474,8 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
sectors_dirty_init_fn, 0);
+
+ dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
}
int bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -490,18 +490,15 @@ int bch_cached_dev_writeback_init(struct cached_dev *dc)
dc->writeback_delay = 30;
dc->writeback_rate.rate = 1024;
- dc->writeback_rate_update_seconds = 30;
- dc->writeback_rate_d_term = 16;
- dc->writeback_rate_p_term_inverse = 64;
- dc->writeback_rate_d_smooth = 8;
+ dc->writeback_rate_update_seconds = 5;
+ dc->writeback_rate_d_term = 30;
+ dc->writeback_rate_p_term_inverse = 6000;
dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
"bcache_writeback");
if (IS_ERR(dc->writeback_thread))
return PTR_ERR(dc->writeback_thread);
- set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE);
-
INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
schedule_delayed_work(&dc->writeback_rate_update,
dc->writeback_rate_update_seconds * HZ);
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index c9ddcf4614b9..e2f8598937ac 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -50,7 +50,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
return false;
if (dc->partial_stripes_expensive &&
- bcache_dev_stripe_dirty(dc, bio->bi_sector,
+ bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
bio_sectors(bio)))
return true;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 12dc29ba7399..4195a01b1535 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1635,7 +1635,7 @@ int bitmap_create(struct mddev *mddev)
sector_t blocks = mddev->resync_max_sectors;
struct file *file = mddev->bitmap_info.file;
int err;
- struct sysfs_dirent *bm = NULL;
+ struct kernfs_node *bm = NULL;
BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index df4aeb6ac6f0..30210b9c4ef9 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -225,7 +225,7 @@ struct bitmap {
wait_queue_head_t overflow_wait;
wait_queue_head_t behind_wait;
- struct sysfs_dirent *sysfs_can_clear;
+ struct kernfs_node *sysfs_can_clear;
};
/* the bitmap API */
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index 3a8cfa2645c7..dd3646111561 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -17,55 +17,24 @@
* original bio state.
*/
-struct dm_bio_vec_details {
-#if PAGE_SIZE < 65536
- __u16 bv_len;
- __u16 bv_offset;
-#else
- unsigned bv_len;
- unsigned bv_offset;
-#endif
-};
-
struct dm_bio_details {
- sector_t bi_sector;
struct block_device *bi_bdev;
- unsigned int bi_size;
- unsigned short bi_idx;
unsigned long bi_flags;
- struct dm_bio_vec_details bi_io_vec[BIO_MAX_PAGES];
+ struct bvec_iter bi_iter;
};
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
{
- unsigned i;
-
- bd->bi_sector = bio->bi_sector;
bd->bi_bdev = bio->bi_bdev;
- bd->bi_size = bio->bi_size;
- bd->bi_idx = bio->bi_idx;
bd->bi_flags = bio->bi_flags;
-
- for (i = 0; i < bio->bi_vcnt; i++) {
- bd->bi_io_vec[i].bv_len = bio->bi_io_vec[i].bv_len;
- bd->bi_io_vec[i].bv_offset = bio->bi_io_vec[i].bv_offset;
- }
+ bd->bi_iter = bio->bi_iter;
}
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
{
- unsigned i;
-
- bio->bi_sector = bd->bi_sector;
bio->bi_bdev = bd->bi_bdev;
- bio->bi_size = bd->bi_size;
- bio->bi_idx = bd->bi_idx;
bio->bi_flags = bd->bi_flags;
-
- for (i = 0; i < bio->bi_vcnt; i++) {
- bio->bi_io_vec[i].bv_len = bd->bi_io_vec[i].bv_len;
- bio->bi_io_vec[i].bv_offset = bd->bi_io_vec[i].bv_offset;
- }
+ bio->bi_iter = bd->bi_iter;
}
#endif
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 54bdd923316f..66c5d130c8c2 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -104,6 +104,8 @@ struct dm_bufio_client {
struct list_head reserved_buffers;
unsigned need_reserved_buffers;
+ unsigned minimum_buffers;
+
struct hlist_head *cache_hash;
wait_queue_head_t free_buffer_wait;
@@ -538,7 +540,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
bio_init(&b->bio);
b->bio.bi_io_vec = b->bio_vec;
b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
- b->bio.bi_sector = block << b->c->sectors_per_block_bits;
+ b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
b->bio.bi_bdev = b->c->bdev;
b->bio.bi_end_io = end_io;
@@ -861,8 +863,8 @@ static void __get_memory_limit(struct dm_bufio_client *c,
buffers = dm_bufio_cache_size_per_client >>
(c->sectors_per_block_bits + SECTOR_SHIFT);
- if (buffers < DM_BUFIO_MIN_BUFFERS)
- buffers = DM_BUFIO_MIN_BUFFERS;
+ if (buffers < c->minimum_buffers)
+ buffers = c->minimum_buffers;
*limit_buffers = buffers;
*threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
@@ -1350,6 +1352,34 @@ retry:
}
EXPORT_SYMBOL_GPL(dm_bufio_release_move);
+/*
+ * Free the given buffer.
+ *
+ * This is just a hint, if the buffer is in use or dirty, this function
+ * does nothing.
+ */
+void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
+{
+ struct dm_buffer *b;
+
+ dm_bufio_lock(c);
+
+ b = __find(c, block);
+ if (b && likely(!b->hold_count) && likely(!b->state)) {
+ __unlink_buffer(b);
+ __free_buffer_wake(b);
+ }
+
+ dm_bufio_unlock(c);
+}
+EXPORT_SYMBOL(dm_bufio_forget);
+
+void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
+{
+ c->minimum_buffers = n;
+}
+EXPORT_SYMBOL(dm_bufio_set_minimum_buffers);
+
unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
{
return c->block_size;
@@ -1546,6 +1576,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
INIT_LIST_HEAD(&c->reserved_buffers);
c->need_reserved_buffers = reserved_buffers;
+ c->minimum_buffers = DM_BUFIO_MIN_BUFFERS;
+
init_waitqueue_head(&c->free_buffer_wait);
c->async_write_error = 0;
diff --git a/drivers/md/dm-bufio.h b/drivers/md/dm-bufio.h
index b142946a9e32..c096779a7292 100644
--- a/drivers/md/dm-bufio.h
+++ b/drivers/md/dm-bufio.h
@@ -108,6 +108,18 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c);
*/
void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block);
+/*
+ * Free the given buffer.
+ * This is just a hint, if the buffer is in use or dirty, this function
+ * does nothing.
+ */
+void dm_bufio_forget(struct dm_bufio_client *c, sector_t block);
+
+/*
+ * Set the minimum number of buffers before cleanup happens.
+ */
+void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
+
unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
sector_t dm_bufio_get_block_number(struct dm_buffer *b);
diff --git a/drivers/md/dm-builtin.c b/drivers/md/dm-builtin.c
new file mode 100644
index 000000000000..6c9049c51b2b
--- /dev/null
+++ b/drivers/md/dm-builtin.c
@@ -0,0 +1,48 @@
+#include "dm.h"
+
+/*
+ * The kobject release method must not be placed in the module itself,
+ * otherwise we are subject to module unload races.
+ *
+ * The release method is called when the last reference to the kobject is
+ * dropped. It may be called by any other kernel code that drops the last
+ * reference.
+ *
+ * The release method suffers from module unload race. We may prevent the
+ * module from being unloaded at the start of the release method (using
+ * increased module reference count or synchronizing against the release
+ * method), however there is no way to prevent the module from being
+ * unloaded at the end of the release method.
+ *
+ * If this code were placed in the dm module, the following race may
+ * happen:
+ * 1. Some other process takes a reference to dm kobject
+ * 2. The user issues ioctl function to unload the dm device
+ * 3. dm_sysfs_exit calls kobject_put, however the object is not released
+ * because of the other reference taken at step 1
+ * 4. dm_sysfs_exit waits on the completion
+ * 5. The other process that took the reference in step 1 drops it,
+ * dm_kobject_release is called from this process
+ * 6. dm_kobject_release calls complete()
+ * 7. a reschedule happens before dm_kobject_release returns
+ * 8. dm_sysfs_exit continues, the dm device is unloaded, module reference
+ * count is decremented
+ * 9. The user unloads the dm module
+ * 10. The other process that was rescheduled in step 7 continues to run,
+ * it is now executing code in unloaded module, so it crashes
+ *
+ * Note that if the process that takes the foreign reference to dm kobject
+ * has a low priority and the system is sufficiently loaded with
+ * higher-priority processes that prevent the low-priority process from
+ * being scheduled long enough, this bug may really happen.
+ *
+ * In order to fix this module unload race, we place the release method
+ * into a helper code that is compiled directly into the kernel.
+ */
+
+void dm_kobject_release(struct kobject *kobj)
+{
+ complete(dm_get_completion_from_kobject(kobj));
+}
+
+EXPORT_SYMBOL(dm_kobject_release);
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 64780ad73bb0..1e018e986610 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -72,7 +72,7 @@ static enum io_pattern iot_pattern(struct io_tracker *t)
static void iot_update_stats(struct io_tracker *t, struct bio *bio)
{
- if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1)
+ if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)
t->nr_seq_samples++;
else {
/*
@@ -87,7 +87,7 @@ static void iot_update_stats(struct io_tracker *t, struct bio *bio)
t->nr_rand_samples++;
}
- t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1);
+ t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);
}
static void iot_check_for_pattern_switch(struct io_tracker *t)
@@ -287,9 +287,8 @@ static struct entry *alloc_entry(struct entry_pool *ep)
static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock)
{
struct entry *e = ep->entries + from_cblock(cblock);
- list_del(&e->list);
- INIT_LIST_HEAD(&e->list);
+ list_del_init(&e->list);
INIT_HLIST_NODE(&e->hlist);
ep->nr_allocated++;
@@ -391,6 +390,10 @@ struct mq_policy {
*/
unsigned promote_threshold;
+ unsigned discard_promote_adjustment;
+ unsigned read_promote_adjustment;
+ unsigned write_promote_adjustment;
+
/*
* The hash table allows us to quickly find an entry by origin
* block. Both pre_cache and cache entries are in here.
@@ -400,6 +403,10 @@ struct mq_policy {
struct hlist_head *table;
};
+#define DEFAULT_DISCARD_PROMOTE_ADJUSTMENT 1
+#define DEFAULT_READ_PROMOTE_ADJUSTMENT 4
+#define DEFAULT_WRITE_PROMOTE_ADJUSTMENT 8
+
/*----------------------------------------------------------------*/
/*
@@ -642,25 +649,21 @@ static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
* We bias towards reads, since they can be demoted at no cost if they
* haven't been dirtied.
*/
-#define DISCARDED_PROMOTE_THRESHOLD 1
-#define READ_PROMOTE_THRESHOLD 4
-#define WRITE_PROMOTE_THRESHOLD 8
-
static unsigned adjusted_promote_threshold(struct mq_policy *mq,
bool discarded_oblock, int data_dir)
{
if (data_dir == READ)
- return mq->promote_threshold + READ_PROMOTE_THRESHOLD;
+ return mq->promote_threshold + mq->read_promote_adjustment;
if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {
/*
* We don't need to do any copying at all, so give this a
* very low threshold.
*/
- return DISCARDED_PROMOTE_THRESHOLD;
+ return mq->discard_promote_adjustment;
}
- return mq->promote_threshold + WRITE_PROMOTE_THRESHOLD;
+ return mq->promote_threshold + mq->write_promote_adjustment;
}
static bool should_promote(struct mq_policy *mq, struct entry *e,
@@ -809,7 +812,7 @@ static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
bool can_migrate, bool discarded_oblock,
int data_dir, struct policy_result *result)
{
- if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) == 1) {
+ if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) <= 1) {
if (can_migrate)
insert_in_cache(mq, oblock, result);
else
@@ -1135,20 +1138,28 @@ static int mq_set_config_value(struct dm_cache_policy *p,
const char *key, const char *value)
{
struct mq_policy *mq = to_mq_policy(p);
- enum io_pattern pattern;
unsigned long tmp;
- if (!strcasecmp(key, "random_threshold"))
- pattern = PATTERN_RANDOM;
- else if (!strcasecmp(key, "sequential_threshold"))
- pattern = PATTERN_SEQUENTIAL;
- else
- return -EINVAL;
-
if (kstrtoul(value, 10, &tmp))
return -EINVAL;
- mq->tracker.thresholds[pattern] = tmp;
+ if (!strcasecmp(key, "random_threshold")) {
+ mq->tracker.thresholds[PATTERN_RANDOM] = tmp;
+
+ } else if (!strcasecmp(key, "sequential_threshold")) {
+ mq->tracker.thresholds[PATTERN_SEQUENTIAL] = tmp;
+
+ } else if (!strcasecmp(key, "discard_promote_adjustment"))
+ mq->discard_promote_adjustment = tmp;
+
+ else if (!strcasecmp(key, "read_promote_adjustment"))
+ mq->read_promote_adjustment = tmp;
+
+ else if (!strcasecmp(key, "write_promote_adjustment"))
+ mq->write_promote_adjustment = tmp;
+
+ else
+ return -EINVAL;
return 0;
}
@@ -1158,9 +1169,16 @@ static int mq_emit_config_values(struct dm_cache_policy *p, char *result, unsign
ssize_t sz = 0;
struct mq_policy *mq = to_mq_policy(p);
- DMEMIT("4 random_threshold %u sequential_threshold %u",
+ DMEMIT("10 random_threshold %u "
+ "sequential_threshold %u "
+ "discard_promote_adjustment %u "
+ "read_promote_adjustment %u "
+ "write_promote_adjustment %u",
mq->tracker.thresholds[PATTERN_RANDOM],
- mq->tracker.thresholds[PATTERN_SEQUENTIAL]);
+ mq->tracker.thresholds[PATTERN_SEQUENTIAL],
+ mq->discard_promote_adjustment,
+ mq->read_promote_adjustment,
+ mq->write_promote_adjustment);
return 0;
}
@@ -1213,6 +1231,9 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
mq->hit_count = 0;
mq->generation = 0;
mq->promote_threshold = 0;
+ mq->discard_promote_adjustment = DEFAULT_DISCARD_PROMOTE_ADJUSTMENT;
+ mq->read_promote_adjustment = DEFAULT_READ_PROMOTE_ADJUSTMENT;
+ mq->write_promote_adjustment = DEFAULT_WRITE_PROMOTE_ADJUSTMENT;
mutex_init(&mq->lock);
spin_lock_init(&mq->tick_lock);
@@ -1244,7 +1265,7 @@ bad_pre_cache_init:
static struct dm_cache_policy_type mq_policy_type = {
.name = "mq",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.hint_size = 4,
.owner = THIS_MODULE,
.create = mq_create
@@ -1252,10 +1273,11 @@ static struct dm_cache_policy_type mq_policy_type = {
static struct dm_cache_policy_type default_policy_type = {
.name = "default",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.hint_size = 4,
.owner = THIS_MODULE,
- .create = mq_create
+ .create = mq_create,
+ .real = &mq_policy_type
};
static int __init mq_init(void)
diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c
index d80057968407..c1a3cee99b44 100644
--- a/drivers/md/dm-cache-policy.c
+++ b/drivers/md/dm-cache-policy.c
@@ -146,6 +146,10 @@ const char *dm_cache_policy_get_name(struct dm_cache_policy *p)
{
struct dm_cache_policy_type *t = p->private;
+ /* if t->real is set then an alias was used (e.g. "default") */
+ if (t->real)
+ return t->real->name;
+
return t->name;
}
EXPORT_SYMBOL_GPL(dm_cache_policy_get_name);
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index 052c00a84a5c..f50fe360c546 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -223,6 +223,12 @@ struct dm_cache_policy_type {
unsigned version[CACHE_POLICY_VERSION_SIZE];
/*
+ * For use by an alias dm_cache_policy_type to point to the
+ * real dm_cache_policy_type.
+ */
+ struct dm_cache_policy_type *real;
+
+ /*
* Policies may store a hint for each each cache block.
* Currently the size of this hint must be 0 or 4 bytes but we
* expect to relax this in future.
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1b1469ebe5cb..ffd472e015ca 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -85,6 +85,12 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
{
bio->bi_end_io = h->bi_end_io;
bio->bi_private = h->bi_private;
+
+ /*
+ * Must bump bi_remaining to allow bio to complete with
+ * restored bi_end_io.
+ */
+ atomic_inc(&bio->bi_remaining);
}
/*----------------------------------------------------------------*/
@@ -664,15 +670,17 @@ static void remap_to_origin(struct cache *cache, struct bio *bio)
static void remap_to_cache(struct cache *cache, struct bio *bio,
dm_cblock_t cblock)
{
- sector_t bi_sector = bio->bi_sector;
+ sector_t bi_sector = bio->bi_iter.bi_sector;
bio->bi_bdev = cache->cache_dev->bdev;
if (!block_size_is_power_of_two(cache))
- bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
- sector_div(bi_sector, cache->sectors_per_block);
+ bio->bi_iter.bi_sector =
+ (from_cblock(cblock) * cache->sectors_per_block) +
+ sector_div(bi_sector, cache->sectors_per_block);
else
- bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
- (bi_sector & (cache->sectors_per_block - 1));
+ bio->bi_iter.bi_sector =
+ (from_cblock(cblock) << cache->sectors_per_block_shift) |
+ (bi_sector & (cache->sectors_per_block - 1));
}
static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
@@ -712,7 +720,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
{
- sector_t block_nr = bio->bi_sector;
+ sector_t block_nr = bio->bi_iter.bi_sector;
if (!block_size_is_power_of_two(cache))
(void) sector_div(block_nr, cache->sectors_per_block);
@@ -1027,7 +1035,7 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
{
return (bio_data_dir(bio) == WRITE) &&
- (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
+ (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
}
static void avoid_copy(struct dm_cache_migration *mg)
@@ -1252,7 +1260,7 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
size_t pb_data_size = get_per_bio_data_size(cache);
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
- BUG_ON(bio->bi_size);
+ BUG_ON(bio->bi_iter.bi_size);
if (!pb->req_nr)
remap_to_origin(cache, bio);
else
@@ -1275,9 +1283,9 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
*/
static void process_discard_bio(struct cache *cache, struct bio *bio)
{
- dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
+ dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
cache->discard_block_size);
- dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
+ dm_block_t end_block = bio_end_sector(bio);
dm_block_t b;
end_block = block_div(end_block, cache->discard_block_size);
@@ -2826,12 +2834,13 @@ static void cache_resume(struct dm_target *ti)
/*
* Status format:
*
- * <#used metadata blocks>/<#total metadata blocks>
+ * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
+ * <cache block size> <#used cache blocks>/<#total cache blocks>
* <#read hits> <#read misses> <#write hits> <#write misses>
- * <#demotions> <#promotions> <#blocks in cache> <#dirty>
+ * <#demotions> <#promotions> <#dirty>
* <#features> <features>*
* <#core args> <core args>
- * <#policy args> <policy args>*
+ * <policy name> <#policy args> <policy args>*
*/
static void cache_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
@@ -2869,17 +2878,20 @@ static void cache_status(struct dm_target *ti, status_type_t type,
residency = policy_residency(cache->policy);
- DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ",
+ DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %llu ",
+ (unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
(unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
(unsigned long long)nr_blocks_metadata,
+ cache->sectors_per_block,
+ (unsigned long long) from_cblock(residency),
+ (unsigned long long) from_cblock(cache->cache_size),
(unsigned) atomic_read(&cache->stats.read_hit),
(unsigned) atomic_read(&cache->stats.read_miss),
(unsigned) atomic_read(&cache->stats.write_hit),
(unsigned) atomic_read(&cache->stats.write_miss),
(unsigned) atomic_read(&cache->stats.demotion),
(unsigned) atomic_read(&cache->stats.promotion),
- (unsigned long long) from_cblock(residency),
- cache->nr_dirty);
+ (unsigned long long) from_cblock(cache->nr_dirty));
if (writethrough_mode(&cache->features))
DMEMIT("1 writethrough ");
@@ -2896,6 +2908,8 @@ static void cache_status(struct dm_target *ti, status_type_t type,
}
DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
+
+ DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
if (sz < maxlen) {
r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
if (r)
@@ -3129,7 +3143,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {1, 2, 0},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 81b0fa660452..784695d22fde 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -39,10 +39,8 @@ struct convert_context {
struct completion restart;
struct bio *bio_in;
struct bio *bio_out;
- unsigned int offset_in;
- unsigned int offset_out;
- unsigned int idx_in;
- unsigned int idx_out;
+ struct bvec_iter iter_in;
+ struct bvec_iter iter_out;
sector_t cc_sector;
atomic_t cc_pending;
};
@@ -826,10 +824,10 @@ static void crypt_convert_init(struct crypt_config *cc,
{
ctx->bio_in = bio_in;
ctx->bio_out = bio_out;
- ctx->offset_in = 0;
- ctx->offset_out = 0;
- ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
- ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
+ if (bio_in)
+ ctx->iter_in = bio_in->bi_iter;
+ if (bio_out)
+ ctx->iter_out = bio_out->bi_iter;
ctx->cc_sector = sector + cc->iv_offset;
init_completion(&ctx->restart);
}
@@ -857,8 +855,8 @@ static int crypt_convert_block(struct crypt_config *cc,
struct convert_context *ctx,
struct ablkcipher_request *req)
{
- struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
- struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
+ struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
+ struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
struct dm_crypt_request *dmreq;
u8 *iv;
int r;
@@ -869,24 +867,15 @@ static int crypt_convert_block(struct crypt_config *cc,
dmreq->iv_sector = ctx->cc_sector;
dmreq->ctx = ctx;
sg_init_table(&dmreq->sg_in, 1);
- sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
- bv_in->bv_offset + ctx->offset_in);
+ sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
+ bv_in.bv_offset);
sg_init_table(&dmreq->sg_out, 1);
- sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
- bv_out->bv_offset + ctx->offset_out);
+ sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
+ bv_out.bv_offset);
- ctx->offset_in += 1 << SECTOR_SHIFT;
- if (ctx->offset_in >= bv_in->bv_len) {
- ctx->offset_in = 0;
- ctx->idx_in++;
- }
-
- ctx->offset_out += 1 << SECTOR_SHIFT;
- if (ctx->offset_out >= bv_out->bv_len) {
- ctx->offset_out = 0;
- ctx->idx_out++;
- }
+ bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
+ bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
if (cc->iv_gen_ops) {
r = cc->iv_gen_ops->generator(cc, iv, dmreq);
@@ -937,8 +926,7 @@ static int crypt_convert(struct crypt_config *cc,
atomic_set(&ctx->cc_pending, 1);
- while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
- ctx->idx_out < ctx->bio_out->bi_vcnt) {
+ while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
crypt_alloc_req(cc, ctx);
@@ -1021,7 +1009,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
size -= len;
}
- if (!clone->bi_size) {
+ if (!clone->bi_iter.bi_size) {
bio_put(clone);
return NULL;
}
@@ -1161,7 +1149,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
crypt_inc_pending(io);
clone_init(io, clone);
- clone->bi_sector = cc->start + io->sector;
+ clone->bi_iter.bi_sector = cc->start + io->sector;
generic_make_request(clone);
return 0;
@@ -1207,9 +1195,9 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
}
/* crypt_convert should have filled the clone bio */
- BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
+ BUG_ON(io->ctx.iter_out.bi_size);
- clone->bi_sector = cc->start + io->sector;
+ clone->bi_iter.bi_sector = cc->start + io->sector;
if (async)
kcryptd_queue_io(io);
@@ -1224,7 +1212,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
struct dm_crypt_io *new_io;
int crypt_finished;
unsigned out_of_pages = 0;
- unsigned remaining = io->base_bio->bi_size;
+ unsigned remaining = io->base_bio->bi_iter.bi_size;
sector_t sector = io->sector;
int r;
@@ -1246,9 +1234,9 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
}
io->ctx.bio_out = clone;
- io->ctx.idx_out = 0;
+ io->ctx.iter_out = clone->bi_iter;
- remaining -= clone->bi_size;
+ remaining -= clone->bi_iter.bi_size;
sector += bio_sectors(clone);
crypt_inc_pending(io);
@@ -1290,8 +1278,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
crypt_inc_pending(new_io);
crypt_convert_init(cc, &new_io->ctx, NULL,
io->base_bio, sector);
- new_io->ctx.idx_in = io->ctx.idx_in;
- new_io->ctx.offset_in = io->ctx.offset_in;
+ new_io->ctx.iter_in = io->ctx.iter_in;
/*
* Fragments after the first use the base_io
@@ -1869,11 +1856,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
bio->bi_bdev = cc->dev->bdev;
if (bio_sectors(bio))
- bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector = cc->start +
+ dm_target_offset(ti, bio->bi_iter.bi_sector);
return DM_MAPIO_REMAPPED;
}
- io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector));
+ io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
if (bio_data_dir(io->base_bio) == READ) {
if (kcryptd_io_read(io, GFP_NOWAIT))
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 2f91d6d4a2cc..42c3a27a14cc 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -24,7 +24,6 @@ struct delay_c {
struct work_struct flush_expired_bios;
struct list_head delayed_bios;
atomic_t may_delay;
- mempool_t *delayed_pool;
struct dm_dev *dev_read;
sector_t start_read;
@@ -40,14 +39,11 @@ struct delay_c {
struct dm_delay_info {
struct delay_c *context;
struct list_head list;
- struct bio *bio;
unsigned long expires;
};
static DEFINE_MUTEX(delayed_bios_lock);
-static struct kmem_cache *delayed_cache;
-
static void handle_delayed_timer(unsigned long data)
{
struct delay_c *dc = (struct delay_c *)data;
@@ -87,13 +83,14 @@ static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
mutex_lock(&delayed_bios_lock);
list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
if (flush_all || time_after_eq(jiffies, delayed->expires)) {
+ struct bio *bio = dm_bio_from_per_bio_data(delayed,
+ sizeof(struct dm_delay_info));
list_del(&delayed->list);
- bio_list_add(&flush_bios, delayed->bio);
- if ((bio_data_dir(delayed->bio) == WRITE))
+ bio_list_add(&flush_bios, bio);
+ if ((bio_data_dir(bio) == WRITE))
delayed->context->writes--;
else
delayed->context->reads--;
- mempool_free(delayed, dc->delayed_pool);
continue;
}
@@ -185,12 +182,6 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
out:
- dc->delayed_pool = mempool_create_slab_pool(128, delayed_cache);
- if (!dc->delayed_pool) {
- DMERR("Couldn't create delayed bio pool.");
- goto bad_dev_write;
- }
-
dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
if (!dc->kdelayd_wq) {
DMERR("Couldn't start kdelayd");
@@ -206,12 +197,11 @@ out:
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
+ ti->per_bio_data_size = sizeof(struct dm_delay_info);
ti->private = dc;
return 0;
bad_queue:
- mempool_destroy(dc->delayed_pool);
-bad_dev_write:
if (dc->dev_write)
dm_put_device(ti, dc->dev_write);
bad_dev_read:
@@ -232,7 +222,6 @@ static void delay_dtr(struct dm_target *ti)
if (dc->dev_write)
dm_put_device(ti, dc->dev_write);
- mempool_destroy(dc->delayed_pool);
kfree(dc);
}
@@ -244,10 +233,9 @@ static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
if (!delay || !atomic_read(&dc->may_delay))
return 1;
- delayed = mempool_alloc(dc->delayed_pool, GFP_NOIO);
+ delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
delayed->context = dc;
- delayed->bio = bio;
delayed->expires = expires = jiffies + (delay * HZ / 1000);
mutex_lock(&delayed_bios_lock);
@@ -289,14 +277,15 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
bio->bi_bdev = dc->dev_write->bdev;
if (bio_sectors(bio))
- bio->bi_sector = dc->start_write +
- dm_target_offset(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector = dc->start_write +
+ dm_target_offset(ti, bio->bi_iter.bi_sector);
return delay_bio(dc, dc->write_delay, bio);
}
bio->bi_bdev = dc->dev_read->bdev;
- bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector = dc->start_read +
+ dm_target_offset(ti, bio->bi_iter.bi_sector);
return delay_bio(dc, dc->read_delay, bio);
}
@@ -356,13 +345,7 @@ static struct target_type delay_target = {
static int __init dm_delay_init(void)
{
- int r = -ENOMEM;
-
- delayed_cache = KMEM_CACHE(dm_delay_info, 0);
- if (!delayed_cache) {
- DMERR("Couldn't create delayed bio cache.");
- goto bad_memcache;
- }
+ int r;
r = dm_register_target(&delay_target);
if (r < 0) {
@@ -373,15 +356,12 @@ static int __init dm_delay_init(void)
return 0;
bad_register:
- kmem_cache_destroy(delayed_cache);
-bad_memcache:
return r;
}
static void __exit dm_delay_exit(void)
{
dm_unregister_target(&delay_target);
- kmem_cache_destroy(delayed_cache);
}
/* Module hooks */
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index c80a0ec5f126..b257e46876d3 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -248,7 +248,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
bio->bi_bdev = fc->dev->bdev;
if (bio_sectors(bio))
- bio->bi_sector = flakey_map_sector(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector =
+ flakey_map_sector(ti, bio->bi_iter.bi_sector);
}
static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
@@ -265,8 +266,8 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
"(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
- (bio_data_dir(bio) == WRITE) ? 'w' : 'r',
- bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes);
+ (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
+ (unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
}
}
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2a20986a2fec..b2b8a10e8427 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -201,26 +201,29 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
/*
* Functions for getting the pages from a bvec.
*/
-static void bvec_get_page(struct dpages *dp,
+static void bio_get_page(struct dpages *dp,
struct page **p, unsigned long *len, unsigned *offset)
{
- struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
- *p = bvec->bv_page;
- *len = bvec->bv_len;
- *offset = bvec->bv_offset;
+ struct bio *bio = dp->context_ptr;
+ struct bio_vec bvec = bio_iovec(bio);
+ *p = bvec.bv_page;
+ *len = bvec.bv_len;
+ *offset = bvec.bv_offset;
}
-static void bvec_next_page(struct dpages *dp)
+static void bio_next_page(struct dpages *dp)
{
- struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
- dp->context_ptr = bvec + 1;
+ struct bio *bio = dp->context_ptr;
+ struct bio_vec bvec = bio_iovec(bio);
+
+ bio_advance(bio, bvec.bv_len);
}
-static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
+static void bio_dp_init(struct dpages *dp, struct bio *bio)
{
- dp->get_page = bvec_get_page;
- dp->next_page = bvec_next_page;
- dp->context_ptr = bvec;
+ dp->get_page = bio_get_page;
+ dp->next_page = bio_next_page;
+ dp->context_ptr = bio;
}
/*
@@ -304,14 +307,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
- bio->bi_sector = where->sector + (where->count - remaining);
+ bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
bio->bi_end_io = endio;
store_io_and_region_in_bio(bio, io, region);
if (rw & REQ_DISCARD) {
num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
- bio->bi_size = num_sectors << SECTOR_SHIFT;
+ bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
remaining -= num_sectors;
} else if (rw & REQ_WRITE_SAME) {
/*
@@ -320,7 +323,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
dp->get_page(dp, &page, &len, &offset);
bio_add_page(bio, page, logical_block_size, offset);
num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
- bio->bi_size = num_sectors << SECTOR_SHIFT;
+ bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
offset = 0;
remaining -= num_sectors;
@@ -457,8 +460,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
break;
- case DM_IO_BVEC:
- bvec_dp_init(dp, io_req->mem.ptr.bvec);
+ case DM_IO_BIO:
+ bio_dp_init(dp, io_req->mem.ptr.bio);
break;
case DM_IO_VMA:
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 4f99d267340c..53e848c10939 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -85,7 +85,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
bio->bi_bdev = lc->dev->bdev;
if (bio_sectors(bio))
- bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector =
+ linear_map_sector(ti, bio->bi_iter.bi_sector);
}
static int linear_map(struct dm_target *ti, struct bio *bio)
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 9429159d9ee3..b953db6cc229 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -10,10 +10,11 @@
#include <linux/device-mapper.h>
#include <linux/dm-log-userspace.h>
#include <linux/module.h>
+#include <linux/workqueue.h>
#include "dm-log-userspace-transfer.h"
-#define DM_LOG_USERSPACE_VSN "1.1.0"
+#define DM_LOG_USERSPACE_VSN "1.3.0"
struct flush_entry {
int type;
@@ -58,6 +59,18 @@ struct log_c {
spinlock_t flush_lock;
struct list_head mark_list;
struct list_head clear_list;
+
+ /*
+ * Workqueue for flush of clear region requests.
+ */
+ struct workqueue_struct *dmlog_wq;
+ struct delayed_work flush_log_work;
+ atomic_t sched_flush;
+
+ /*
+ * Combine userspace flush and mark requests for efficiency.
+ */
+ uint32_t integrated_flush;
};
static mempool_t *flush_entry_pool;
@@ -122,6 +135,9 @@ static int build_constructor_string(struct dm_target *ti,
*ctr_str = NULL;
+ /*
+ * Determine overall size of the string.
+ */
for (i = 0, str_size = 0; i < argc; i++)
str_size += strlen(argv[i]) + 1; /* +1 for space between args */
@@ -141,18 +157,39 @@ static int build_constructor_string(struct dm_target *ti,
return str_size;
}
+static void do_flush(struct work_struct *work)
+{
+ int r;
+ struct log_c *lc = container_of(work, struct log_c, flush_log_work.work);
+
+ atomic_set(&lc->sched_flush, 0);
+
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, NULL, 0, NULL, NULL);
+
+ if (r)
+ dm_table_event(lc->ti->table);
+}
+
/*
* userspace_ctr
*
* argv contains:
- * <UUID> <other args>
- * Where 'other args' is the userspace implementation specific log
- * arguments. An example might be:
- * <UUID> clustered-disk <arg count> <log dev> <region_size> [[no]sync]
+ * <UUID> [integrated_flush] <other args>
+ * Where 'other args' are the userspace implementation-specific log
+ * arguments.
+ *
+ * Example:
+ * <UUID> [integrated_flush] clustered-disk <arg count> <log dev>
+ * <region_size> [[no]sync]
+ *
+ * This module strips off the <UUID> and uses it for identification
+ * purposes when communicating with userspace about a log.
*
- * So, this module will strip off the <UUID> for identification purposes
- * when communicating with userspace about a log; but will pass on everything
- * else.
+ * If integrated_flush is defined, the kernel combines flush
+ * and mark requests.
+ *
+ * The rest of the line, beginning with 'clustered-disk', is passed
+ * to the userspace ctr function.
*/
static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
unsigned argc, char **argv)
@@ -188,12 +225,22 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
return -EINVAL;
}
+ lc->usr_argc = argc;
+
strncpy(lc->uuid, argv[0], DM_UUID_LEN);
+ argc--;
+ argv++;
spin_lock_init(&lc->flush_lock);
INIT_LIST_HEAD(&lc->mark_list);
INIT_LIST_HEAD(&lc->clear_list);
- str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str);
+ if (!strcasecmp(argv[0], "integrated_flush")) {
+ lc->integrated_flush = 1;
+ argc--;
+ argv++;
+ }
+
+ str_size = build_constructor_string(ti, argc, argv, &ctr_str);
if (str_size < 0) {
kfree(lc);
return str_size;
@@ -246,6 +293,19 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
DMERR("Failed to register %s with device-mapper",
devices_rdata);
}
+
+ if (lc->integrated_flush) {
+ lc->dmlog_wq = alloc_workqueue("dmlogd", WQ_MEM_RECLAIM, 0);
+ if (!lc->dmlog_wq) {
+ DMERR("couldn't start dmlogd");
+ r = -ENOMEM;
+ goto out;
+ }
+
+ INIT_DELAYED_WORK(&lc->flush_log_work, do_flush);
+ atomic_set(&lc->sched_flush, 0);
+ }
+
out:
kfree(devices_rdata);
if (r) {
@@ -253,7 +313,6 @@ out:
kfree(ctr_str);
} else {
lc->usr_argv_str = ctr_str;
- lc->usr_argc = argc;
log->context = lc;
}
@@ -264,9 +323,16 @@ static void userspace_dtr(struct dm_dirty_log *log)
{
struct log_c *lc = log->context;
+ if (lc->integrated_flush) {
+ /* flush workqueue */
+ if (atomic_read(&lc->sched_flush))
+ flush_delayed_work(&lc->flush_log_work);
+
+ destroy_workqueue(lc->dmlog_wq);
+ }
+
(void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
- NULL, 0,
- NULL, NULL);
+ NULL, 0, NULL, NULL);
if (lc->log_dev)
dm_put_device(lc->ti, lc->log_dev);
@@ -283,8 +349,7 @@ static int userspace_presuspend(struct dm_dirty_log *log)
struct log_c *lc = log->context;
r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND,
- NULL, 0,
- NULL, NULL);
+ NULL, 0, NULL, NULL);
return r;
}
@@ -294,9 +359,14 @@ static int userspace_postsuspend(struct dm_dirty_log *log)
int r;
struct log_c *lc = log->context;
+ /*
+ * Run planned flush earlier.
+ */
+ if (lc->integrated_flush && atomic_read(&lc->sched_flush))
+ flush_delayed_work(&lc->flush_log_work);
+
r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND,
- NULL, 0,
- NULL, NULL);
+ NULL, 0, NULL, NULL);
return r;
}
@@ -308,8 +378,7 @@ static int userspace_resume(struct dm_dirty_log *log)
lc->in_sync_hint = 0;
r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME,
- NULL, 0,
- NULL, NULL);
+ NULL, 0, NULL, NULL);
return r;
}
@@ -405,7 +474,8 @@ static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list)
return r;
}
-static int flush_by_group(struct log_c *lc, struct list_head *flush_list)
+static int flush_by_group(struct log_c *lc, struct list_head *flush_list,
+ int flush_with_payload)
{
int r = 0;
int count;
@@ -431,15 +501,29 @@ static int flush_by_group(struct log_c *lc, struct list_head *flush_list)
break;
}
- r = userspace_do_request(lc, lc->uuid, type,
- (char *)(group),
- count * sizeof(uint64_t),
- NULL, NULL);
- if (r) {
- /* Group send failed. Attempt one-by-one. */
- list_splice_init(&tmp_list, flush_list);
- r = flush_one_by_one(lc, flush_list);
- break;
+ if (flush_with_payload) {
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
+ (char *)(group),
+ count * sizeof(uint64_t),
+ NULL, NULL);
+ /*
+ * Integrated flush failed.
+ */
+ if (r)
+ break;
+ } else {
+ r = userspace_do_request(lc, lc->uuid, type,
+ (char *)(group),
+ count * sizeof(uint64_t),
+ NULL, NULL);
+ if (r) {
+ /*
+ * Group send failed. Attempt one-by-one.
+ */
+ list_splice_init(&tmp_list, flush_list);
+ r = flush_one_by_one(lc, flush_list);
+ break;
+ }
}
}
@@ -476,6 +560,8 @@ static int userspace_flush(struct dm_dirty_log *log)
struct log_c *lc = log->context;
LIST_HEAD(mark_list);
LIST_HEAD(clear_list);
+ int mark_list_is_empty;
+ int clear_list_is_empty;
struct flush_entry *fe, *tmp_fe;
spin_lock_irqsave(&lc->flush_lock, flags);
@@ -483,23 +569,51 @@ static int userspace_flush(struct dm_dirty_log *log)
list_splice_init(&lc->clear_list, &clear_list);
spin_unlock_irqrestore(&lc->flush_lock, flags);
- if (list_empty(&mark_list) && list_empty(&clear_list))
+ mark_list_is_empty = list_empty(&mark_list);
+ clear_list_is_empty = list_empty(&clear_list);
+
+ if (mark_list_is_empty && clear_list_is_empty)
return 0;
- r = flush_by_group(lc, &mark_list);
+ r = flush_by_group(lc, &clear_list, 0);
if (r)
- goto fail;
+ goto out;
- r = flush_by_group(lc, &clear_list);
+ if (!lc->integrated_flush) {
+ r = flush_by_group(lc, &mark_list, 0);
+ if (r)
+ goto out;
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
+ NULL, 0, NULL, NULL);
+ goto out;
+ }
+
+ /*
+ * Send integrated flush request with mark_list as payload.
+ */
+ r = flush_by_group(lc, &mark_list, 1);
if (r)
- goto fail;
+ goto out;
- r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
- NULL, 0, NULL, NULL);
+ if (mark_list_is_empty && !atomic_read(&lc->sched_flush)) {
+ /*
+ * When there are only clear region requests,
+ * we schedule a flush in the future.
+ */
+ queue_delayed_work(lc->dmlog_wq, &lc->flush_log_work, 3 * HZ);
+ atomic_set(&lc->sched_flush, 1);
+ } else {
+ /*
+ * Cancel pending flush because we
+ * have already flushed in mark_region.
+ */
+ cancel_delayed_work(&lc->flush_log_work);
+ atomic_set(&lc->sched_flush, 0);
+ }
-fail:
+out:
/*
- * We can safely remove these entries, even if failure.
+ * We can safely remove these entries, even after failure.
* Calling code will receive an error and will know that
* the log facility has failed.
*/
@@ -603,8 +717,7 @@ static int userspace_get_resync_work(struct dm_dirty_log *log, region_t *region)
rdata_size = sizeof(pkg);
r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_RESYNC_WORK,
- NULL, 0,
- (char *)&pkg, &rdata_size);
+ NULL, 0, (char *)&pkg, &rdata_size);
*region = pkg.r;
return (r) ? r : (int)pkg.i;
@@ -630,8 +743,7 @@ static void userspace_set_region_sync(struct dm_dirty_log *log,
pkg.i = (int64_t)in_sync;
r = userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC,
- (char *)&pkg, sizeof(pkg),
- NULL, NULL);
+ (char *)&pkg, sizeof(pkg), NULL, NULL);
/*
* It would be nice to be able to report failures.
@@ -657,8 +769,7 @@ static region_t userspace_get_sync_count(struct dm_dirty_log *log)
rdata_size = sizeof(sync_count);
r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_SYNC_COUNT,
- NULL, 0,
- (char *)&sync_count, &rdata_size);
+ NULL, 0, (char *)&sync_count, &rdata_size);
if (r)
return 0;
@@ -685,8 +796,7 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
switch (status_type) {
case STATUSTYPE_INFO:
r = userspace_do_request(lc, lc->uuid, DM_ULOG_STATUS_INFO,
- NULL, 0,
- result, &sz);
+ NULL, 0, result, &sz);
if (r) {
sz = 0;
@@ -699,8 +809,10 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
BUG_ON(!table_args); /* There will always be a ' ' */
table_args++;
- DMEMIT("%s %u %s %s ", log->type->name, lc->usr_argc,
- lc->uuid, table_args);
+ DMEMIT("%s %u %s ", log->type->name, lc->usr_argc, lc->uuid);
+ if (lc->integrated_flush)
+ DMEMIT("integrated_flush ");
+ DMEMIT("%s ", table_args);
break;
}
return (r) ? 0 : (int)sz;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9584443c5614..f284e0bfb25f 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -432,7 +432,7 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
region_t region = dm_rh_bio_to_region(ms->rh, bio);
if (log->type->in_sync(log, region, 0))
- return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
+ return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0;
return 0;
}
@@ -442,15 +442,15 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
*/
static sector_t map_sector(struct mirror *m, struct bio *bio)
{
- if (unlikely(!bio->bi_size))
+ if (unlikely(!bio->bi_iter.bi_size))
return 0;
- return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
+ return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
}
static void map_bio(struct mirror *m, struct bio *bio)
{
bio->bi_bdev = m->dev->bdev;
- bio->bi_sector = map_sector(m, bio);
+ bio->bi_iter.bi_sector = map_sector(m, bio);
}
static void map_region(struct dm_io_region *io, struct mirror *m,
@@ -526,8 +526,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
struct dm_io_region io;
struct dm_io_request io_req = {
.bi_rw = READ,
- .mem.type = DM_IO_BVEC,
- .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
+ .mem.type = DM_IO_BIO,
+ .mem.ptr.bio = bio,
.notify.fn = read_callback,
.notify.context = bio,
.client = m->ms->io_client,
@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
* We can only read balance if the region is in sync.
*/
if (likely(region_in_sync(ms, region, 1)))
- m = choose_mirror(ms, bio->bi_sector);
+ m = choose_mirror(ms, bio->bi_iter.bi_sector);
else if (m && atomic_read(&m->error_count))
m = NULL;
@@ -629,8 +629,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
struct mirror *m;
struct dm_io_request io_req = {
.bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
- .mem.type = DM_IO_BVEC,
- .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
+ .mem.type = DM_IO_BIO,
+ .mem.ptr.bio = bio,
.notify.fn = write_callback,
.notify.context = bio,
.client = ms->io_client,
@@ -1181,7 +1181,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
* The region is in-sync and we can perform reads directly.
* Store enough information so we can retry if it fails.
*/
- m = choose_mirror(ms, bio->bi_sector);
+ m = choose_mirror(ms, bio->bi_iter.bi_sector);
if (unlikely(!m))
return -EIO;
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 69732e03eb34..b929fd5f4984 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -126,7 +126,8 @@ EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
{
- return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
+ return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
+ rh->target_begin);
}
EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 2d2b1b7588d7..afc3d017de4c 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -13,10 +13,13 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/dm-io.h>
+#include "dm-bufio.h"
#define DM_MSG_PREFIX "persistent snapshot"
#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
+#define DM_PREFETCH_CHUNKS 12
+
/*-----------------------------------------------------------------
* Persistent snapshots, by persistent we mean that the snapshot
* will survive a reboot.
@@ -257,6 +260,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
INIT_WORK_ONSTACK(&req.work, do_metadata);
queue_work(ps->metadata_wq, &req.work);
flush_workqueue(ps->metadata_wq);
+ destroy_work_on_stack(&req.work);
return req.result;
}
@@ -401,17 +405,18 @@ static int write_header(struct pstore *ps)
/*
* Access functions for the disk exceptions, these do the endian conversions.
*/
-static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
+static struct disk_exception *get_exception(struct pstore *ps, void *ps_area,
+ uint32_t index)
{
BUG_ON(index >= ps->exceptions_per_area);
- return ((struct disk_exception *) ps->area) + index;
+ return ((struct disk_exception *) ps_area) + index;
}
-static void read_exception(struct pstore *ps,
+static void read_exception(struct pstore *ps, void *ps_area,
uint32_t index, struct core_exception *result)
{
- struct disk_exception *de = get_exception(ps, index);
+ struct disk_exception *de = get_exception(ps, ps_area, index);
/* copy it */
result->old_chunk = le64_to_cpu(de->old_chunk);
@@ -421,7 +426,7 @@ static void read_exception(struct pstore *ps,
static void write_exception(struct pstore *ps,
uint32_t index, struct core_exception *e)
{
- struct disk_exception *de = get_exception(ps, index);
+ struct disk_exception *de = get_exception(ps, ps->area, index);
/* copy it */
de->old_chunk = cpu_to_le64(e->old_chunk);
@@ -430,7 +435,7 @@ static void write_exception(struct pstore *ps,
static void clear_exception(struct pstore *ps, uint32_t index)
{
- struct disk_exception *de = get_exception(ps, index);
+ struct disk_exception *de = get_exception(ps, ps->area, index);
/* clear it */
de->old_chunk = 0;
@@ -442,7 +447,7 @@ static void clear_exception(struct pstore *ps, uint32_t index)
* 'full' is filled in to indicate if the area has been
* filled.
*/
-static int insert_exceptions(struct pstore *ps,
+static int insert_exceptions(struct pstore *ps, void *ps_area,
int (*callback)(void *callback_context,
chunk_t old, chunk_t new),
void *callback_context,
@@ -456,7 +461,7 @@ static int insert_exceptions(struct pstore *ps,
*full = 1;
for (i = 0; i < ps->exceptions_per_area; i++) {
- read_exception(ps, i, &e);
+ read_exception(ps, ps_area, i, &e);
/*
* If the new_chunk is pointing at the start of
@@ -493,26 +498,72 @@ static int read_exceptions(struct pstore *ps,
void *callback_context)
{
int r, full = 1;
+ struct dm_bufio_client *client;
+ chunk_t prefetch_area = 0;
+
+ client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
+ ps->store->chunk_size << SECTOR_SHIFT,
+ 1, 0, NULL, NULL);
+
+ if (IS_ERR(client))
+ return PTR_ERR(client);
+
+ /*
+ * Setup for one current buffer + desired readahead buffers.
+ */
+ dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS);
/*
* Keeping reading chunks and inserting exceptions until
* we find a partially full area.
*/
for (ps->current_area = 0; full; ps->current_area++) {
- r = area_io(ps, READ);
- if (r)
- return r;
+ struct dm_buffer *bp;
+ void *area;
+ chunk_t chunk;
+
+ if (unlikely(prefetch_area < ps->current_area))
+ prefetch_area = ps->current_area;
+
+ if (DM_PREFETCH_CHUNKS) do {
+ chunk_t pf_chunk = area_location(ps, prefetch_area);
+ if (unlikely(pf_chunk >= dm_bufio_get_device_size(client)))
+ break;
+ dm_bufio_prefetch(client, pf_chunk, 1);
+ prefetch_area++;
+ if (unlikely(!prefetch_area))
+ break;
+ } while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS);
+
+ chunk = area_location(ps, ps->current_area);
+
+ area = dm_bufio_read(client, chunk, &bp);
+ if (unlikely(IS_ERR(area))) {
+ r = PTR_ERR(area);
+ goto ret_destroy_bufio;
+ }
- r = insert_exceptions(ps, callback, callback_context, &full);
- if (r)
- return r;
+ r = insert_exceptions(ps, area, callback, callback_context,
+ &full);
+
+ dm_bufio_release(bp);
+
+ dm_bufio_forget(client, chunk);
+
+ if (unlikely(r))
+ goto ret_destroy_bufio;
}
ps->current_area--;
skip_metadata(ps);
- return 0;
+ r = 0;
+
+ret_destroy_bufio:
+ dm_bufio_client_destroy(client);
+
+ return r;
}
static struct pstore *get_info(struct dm_exception_store *store)
@@ -733,7 +784,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
ps->current_committed = ps->exceptions_per_area;
}
- read_exception(ps, ps->current_committed - 1, &ce);
+ read_exception(ps, ps->area, ps->current_committed - 1, &ce);
*last_old_chunk = ce.old_chunk;
*last_new_chunk = ce.new_chunk;
@@ -743,8 +794,8 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
*/
for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
nr_consecutive++) {
- read_exception(ps, ps->current_committed - 1 - nr_consecutive,
- &ce);
+ read_exception(ps, ps->area,
+ ps->current_committed - 1 - nr_consecutive, &ce);
if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
ce.new_chunk != *last_new_chunk - nr_consecutive)
break;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 944690bafd93..ebddef5237e4 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -610,12 +610,12 @@ static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
return NULL;
}
-static struct dm_exception *alloc_completed_exception(void)
+static struct dm_exception *alloc_completed_exception(gfp_t gfp)
{
struct dm_exception *e;
- e = kmem_cache_alloc(exception_cache, GFP_NOIO);
- if (!e)
+ e = kmem_cache_alloc(exception_cache, gfp);
+ if (!e && gfp == GFP_NOIO)
e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
return e;
@@ -697,7 +697,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
struct dm_snapshot *s = context;
struct dm_exception *e;
- e = alloc_completed_exception();
+ e = alloc_completed_exception(GFP_KERNEL);
if (!e)
return -ENOMEM;
@@ -1405,7 +1405,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
goto out;
}
- e = alloc_completed_exception();
+ e = alloc_completed_exception(GFP_NOIO);
if (!e) {
down_write(&s->lock);
__invalidate_snapshot(s, -ENOMEM);
@@ -1438,6 +1438,7 @@ out:
if (full_bio) {
full_bio->bi_end_io = pe->full_bio_end_io;
full_bio->bi_private = pe->full_bio_private;
+ atomic_inc(&full_bio->bi_remaining);
}
free_pending_exception(pe);
@@ -1619,11 +1620,10 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
struct bio *bio, chunk_t chunk)
{
bio->bi_bdev = s->cow->bdev;
- bio->bi_sector = chunk_to_sector(s->store,
- dm_chunk_number(e->new_chunk) +
- (chunk - e->old_chunk)) +
- (bio->bi_sector &
- s->store->chunk_mask);
+ bio->bi_iter.bi_sector =
+ chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
+ (chunk - e->old_chunk)) +
+ (bio->bi_iter.bi_sector & s->store->chunk_mask);
}
static int snapshot_map(struct dm_target *ti, struct bio *bio)
@@ -1641,7 +1641,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
- chunk = sector_to_chunk(s->store, bio->bi_sector);
+ chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
/* Full snapshots are not usable */
/* To get here the table must be live so s->active is always set. */
@@ -1702,7 +1702,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
r = DM_MAPIO_SUBMITTED;
if (!pe->started &&
- bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
+ bio->bi_iter.bi_size ==
+ (s->store->chunk_size << SECTOR_SHIFT)) {
pe->started = 1;
up_write(&s->lock);
start_full_bio(pe, bio);
@@ -1758,7 +1759,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
- chunk = sector_to_chunk(s->store, bio->bi_sector);
+ chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
down_write(&s->lock);
@@ -2095,7 +2096,7 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
if (o)
- r = __origin_write(&o->snapshots, bio->bi_sector, bio);
+ r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
up_read(&_origins_lock);
return r;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 73c1712dad96..d1600d2aa2e2 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -259,13 +259,15 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
{
sector_t begin, end;
- stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin);
+ stripe_map_range_sector(sc, bio->bi_iter.bi_sector,
+ target_stripe, &begin);
stripe_map_range_sector(sc, bio_end_sector(bio),
target_stripe, &end);
if (begin < end) {
bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
- bio->bi_sector = begin + sc->stripe[target_stripe].physical_start;
- bio->bi_size = to_bytes(end - begin);
+ bio->bi_iter.bi_sector = begin +
+ sc->stripe[target_stripe].physical_start;
+ bio->bi_iter.bi_size = to_bytes(end - begin);
return DM_MAPIO_REMAPPED;
} else {
/* The range doesn't map to the target stripe */
@@ -293,9 +295,10 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
return stripe_map_range(sc, bio, target_bio_nr);
}
- stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
+ stripe_map_sector(sc, bio->bi_iter.bi_sector,
+ &stripe, &bio->bi_iter.bi_sector);
- bio->bi_sector += sc->stripe[stripe].physical_start;
+ bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
bio->bi_bdev = sc->stripe[stripe].dev->bdev;
return DM_MAPIO_REMAPPED;
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index ff9ac4be4721..09a688b3d48c 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -311,11 +311,11 @@ error:
static int switch_map(struct dm_target *ti, struct bio *bio)
{
struct switch_ctx *sctx = ti->private;
- sector_t offset = dm_target_offset(ti, bio->bi_sector);
+ sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
unsigned path_nr = switch_get_path_nr(sctx, offset);
bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev;
- bio->bi_sector = sctx->path_list[path_nr].start + offset;
+ bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
return DM_MAPIO_REMAPPED;
}
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index 84d2b91e4efb..c62c5ab6aed5 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -86,6 +86,7 @@ static const struct sysfs_ops dm_sysfs_ops = {
static struct kobj_type dm_ktype = {
.sysfs_ops = &dm_sysfs_ops,
.default_attrs = dm_attrs,
+ .release = dm_kobject_release,
};
/*
@@ -104,5 +105,7 @@ int dm_sysfs_init(struct mapped_device *md)
*/
void dm_sysfs_exit(struct mapped_device *md)
{
- kobject_put(dm_kobject(md));
+ struct kobject *kobj = dm_kobject(md);
+ kobject_put(kobj);
+ wait_for_completion(dm_get_completion_from_kobject(kobj));
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 3ba6a3859ce3..6a7f2b83a126 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -155,7 +155,6 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
{
sector_t *n_highs;
struct dm_target *n_targets;
- int n = t->num_targets;
/*
* Allocate both the target array and offset array at once.
@@ -169,12 +168,7 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
n_targets = (struct dm_target *) (n_highs + num);
- if (n) {
- memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
- memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
- }
-
- memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
+ memset(n_highs, -1, sizeof(*n_highs) * num);
vfree(t->highs);
t->num_allocated = num;
@@ -261,17 +255,6 @@ void dm_table_destroy(struct dm_table *t)
}
/*
- * Checks to see if we need to extend highs or targets.
- */
-static inline int check_space(struct dm_table *t)
-{
- if (t->num_targets >= t->num_allocated)
- return alloc_targets(t, t->num_allocated * 2);
-
- return 0;
-}
-
-/*
* See if we've already got a device in the list.
*/
static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
@@ -731,8 +714,7 @@ int dm_table_add_target(struct dm_table *t, const char *type,
return -EINVAL;
}
- if ((r = check_space(t)))
- return r;
+ BUG_ON(t->num_targets >= t->num_allocated);
tgt = t->targets + t->num_targets;
memset(tgt, 0, sizeof(*tgt));
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 8a30ad54bd46..7da347665552 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1349,6 +1349,12 @@ dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)
return td->id;
}
+/*
+ * Check whether @time (of block creation) is older than @td's last snapshot.
+ * If so then the associated block is shared with the last snapshot device.
+ * Any block on a device created *after* the device last got snapshotted is
+ * necessarily not shared.
+ */
static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)
{
return td->snapshotted_time > time;
@@ -1458,6 +1464,20 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
return r;
}
+int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
+{
+ int r;
+ uint32_t ref_count;
+
+ down_read(&pmd->root_lock);
+ r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
+ if (!r)
+ *result = (ref_count != 0);
+ up_read(&pmd->root_lock);
+
+ return r;
+}
+
bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
{
int r;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 7bcc0e1d6238..9a368567632f 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -131,7 +131,7 @@ dm_thin_id dm_thin_dev_id(struct dm_thin_device *td);
struct dm_thin_lookup_result {
dm_block_t block;
- unsigned shared:1;
+ bool shared:1;
};
/*
@@ -181,6 +181,8 @@ int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result);
int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
+int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
+
/*
* Returns -ENOSPC if the new size is too small and already allocated
* blocks would be lost.
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index ee29037ffc2e..faaf944597ab 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -144,6 +144,7 @@ struct pool_features {
bool zero_new_blocks:1;
bool discard_enabled:1;
bool discard_passdown:1;
+ bool error_if_no_space:1;
};
struct thin_c;
@@ -163,8 +164,7 @@ struct pool {
int sectors_per_block_shift;
struct pool_features pf;
- unsigned low_water_triggered:1; /* A dm event has been sent */
- unsigned no_free_space:1; /* A -ENOSPC warning has been issued */
+ bool low_water_triggered:1; /* A dm event has been sent */
struct dm_bio_prison *prison;
struct dm_kcopyd_client *copier;
@@ -198,7 +198,8 @@ struct pool {
};
static enum pool_mode get_pool_mode(struct pool *pool);
-static void set_pool_mode(struct pool *pool, enum pool_mode mode);
+static void out_of_data_space(struct pool *pool);
+static void metadata_operation_failed(struct pool *pool, const char *op, int r);
/*
* Target context for a pool.
@@ -413,7 +414,7 @@ static bool block_size_is_power_of_two(struct pool *pool)
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
struct pool *pool = tc->pool;
- sector_t block_nr = bio->bi_sector;
+ sector_t block_nr = bio->bi_iter.bi_sector;
if (block_size_is_power_of_two(pool))
block_nr >>= pool->sectors_per_block_shift;
@@ -426,14 +427,15 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
struct pool *pool = tc->pool;
- sector_t bi_sector = bio->bi_sector;
+ sector_t bi_sector = bio->bi_iter.bi_sector;
bio->bi_bdev = tc->pool_dev->bdev;
if (block_size_is_power_of_two(pool))
- bio->bi_sector = (block << pool->sectors_per_block_shift) |
- (bi_sector & (pool->sectors_per_block - 1));
+ bio->bi_iter.bi_sector =
+ (block << pool->sectors_per_block_shift) |
+ (bi_sector & (pool->sectors_per_block - 1));
else
- bio->bi_sector = (block * pool->sectors_per_block) +
+ bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
sector_div(bi_sector, pool->sectors_per_block);
}
@@ -509,15 +511,16 @@ static void remap_and_issue(struct thin_c *tc, struct bio *bio,
struct dm_thin_new_mapping {
struct list_head list;
- unsigned quiesced:1;
- unsigned prepared:1;
- unsigned pass_discard:1;
+ bool quiesced:1;
+ bool prepared:1;
+ bool pass_discard:1;
+ bool definitely_not_shared:1;
+ int err;
struct thin_c *tc;
dm_block_t virt_block;
dm_block_t data_block;
struct dm_bio_prison_cell *cell, *cell2;
- int err;
/*
* If the bio covers the whole area of a block then we can avoid
@@ -534,7 +537,7 @@ static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
struct pool *pool = m->tc->pool;
if (m->quiesced && m->prepared) {
- list_add(&m->list, &pool->prepared_mappings);
+ list_add_tail(&m->list, &pool->prepared_mappings);
wake_worker(pool);
}
}
@@ -548,7 +551,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
m->err = read_err || write_err ? -EIO : 0;
spin_lock_irqsave(&pool->lock, flags);
- m->prepared = 1;
+ m->prepared = true;
__maybe_add_mapping(m);
spin_unlock_irqrestore(&pool->lock, flags);
}
@@ -563,7 +566,7 @@ static void overwrite_endio(struct bio *bio, int err)
m->err = err;
spin_lock_irqsave(&pool->lock, flags);
- m->prepared = 1;
+ m->prepared = true;
__maybe_add_mapping(m);
spin_unlock_irqrestore(&pool->lock, flags);
}
@@ -610,8 +613,10 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
- if (m->bio)
+ if (m->bio) {
m->bio->bi_end_io = m->saved_bi_end_io;
+ atomic_inc(&m->bio->bi_remaining);
+ }
cell_error(m->tc->pool, m->cell);
list_del(&m->list);
mempool_free(m, m->tc->pool->mapping_pool);
@@ -625,8 +630,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
int r;
bio = m->bio;
- if (bio)
+ if (bio) {
bio->bi_end_io = m->saved_bi_end_io;
+ atomic_inc(&bio->bi_remaining);
+ }
if (m->err) {
cell_error(pool, m->cell);
@@ -640,9 +647,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
*/
r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
if (r) {
- DMERR_LIMIT("%s: dm_thin_insert_block() failed: error = %d",
- dm_device_name(pool->pool_md), r);
- set_pool_mode(pool, PM_READ_ONLY);
+ metadata_operation_failed(pool, "dm_thin_insert_block", r);
cell_error(pool, m->cell);
goto out;
}
@@ -683,7 +688,15 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
cell_defer_no_holder(tc, m->cell2);
if (m->pass_discard)
- remap_and_issue(tc, m->bio, m->data_block);
+ if (m->definitely_not_shared)
+ remap_and_issue(tc, m->bio, m->data_block);
+ else {
+ bool used = false;
+ if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
+ bio_endio(m->bio, 0);
+ else
+ remap_and_issue(tc, m->bio, m->data_block);
+ }
else
bio_endio(m->bio, 0);
@@ -723,7 +736,8 @@ static void process_prepared(struct pool *pool, struct list_head *head,
*/
static int io_overlaps_block(struct pool *pool, struct bio *bio)
{
- return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
+ return bio->bi_iter.bi_size ==
+ (pool->sectors_per_block << SECTOR_SHIFT);
}
static int io_overwrites_block(struct pool *pool, struct bio *bio)
@@ -751,13 +765,17 @@ static int ensure_next_mapping(struct pool *pool)
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
{
- struct dm_thin_new_mapping *r = pool->next_mapping;
+ struct dm_thin_new_mapping *m = pool->next_mapping;
BUG_ON(!pool->next_mapping);
+ memset(m, 0, sizeof(struct dm_thin_new_mapping));
+ INIT_LIST_HEAD(&m->list);
+ m->bio = NULL;
+
pool->next_mapping = NULL;
- return r;
+ return m;
}
static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
@@ -769,18 +787,13 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
struct pool *pool = tc->pool;
struct dm_thin_new_mapping *m = get_next_mapping(pool);
- INIT_LIST_HEAD(&m->list);
- m->quiesced = 0;
- m->prepared = 0;
m->tc = tc;
m->virt_block = virt_block;
m->data_block = data_dest;
m->cell = cell;
- m->err = 0;
- m->bio = NULL;
if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
- m->quiesced = 1;
+ m->quiesced = true;
/*
* IO to pool_dev remaps to the pool target's data_dev.
@@ -840,15 +853,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
struct pool *pool = tc->pool;
struct dm_thin_new_mapping *m = get_next_mapping(pool);
- INIT_LIST_HEAD(&m->list);
- m->quiesced = 1;
- m->prepared = 0;
+ m->quiesced = true;
+ m->prepared = false;
m->tc = tc;
m->virt_block = virt_block;
m->data_block = data_block;
m->cell = cell;
- m->err = 0;
- m->bio = NULL;
/*
* If the whole block of data is being overwritten or we are not
@@ -895,41 +905,42 @@ static int commit(struct pool *pool)
return -EINVAL;
r = dm_pool_commit_metadata(pool->pmd);
- if (r) {
- DMERR_LIMIT("%s: dm_pool_commit_metadata failed: error = %d",
- dm_device_name(pool->pool_md), r);
- set_pool_mode(pool, PM_READ_ONLY);
- }
+ if (r)
+ metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
return r;
}
-static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
+static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
{
- int r;
- dm_block_t free_blocks;
unsigned long flags;
- struct pool *pool = tc->pool;
-
- /*
- * Once no_free_space is set we must not allow allocation to succeed.
- * Otherwise it is difficult to explain, debug, test and support.
- */
- if (pool->no_free_space)
- return -ENOSPC;
-
- r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
- if (r)
- return r;
if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
DMWARN("%s: reached low water mark for data device: sending event.",
dm_device_name(pool->pool_md));
spin_lock_irqsave(&pool->lock, flags);
- pool->low_water_triggered = 1;
+ pool->low_water_triggered = true;
spin_unlock_irqrestore(&pool->lock, flags);
dm_table_event(pool->ti->table);
}
+}
+
+static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
+{
+ int r;
+ dm_block_t free_blocks;
+ struct pool *pool = tc->pool;
+
+ if (get_pool_mode(pool) != PM_WRITE)
+ return -EINVAL;
+
+ r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
+ if (r) {
+ metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
+ return r;
+ }
+
+ check_low_water_mark(pool, free_blocks);
if (!free_blocks) {
/*
@@ -941,35 +952,20 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
return r;
r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
- if (r)
+ if (r) {
+ metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
return r;
+ }
- /*
- * If we still have no space we set a flag to avoid
- * doing all this checking and return -ENOSPC. This
- * flag serves as a latch that disallows allocations from
- * this pool until the admin takes action (e.g. resize or
- * table reload).
- */
if (!free_blocks) {
- DMWARN("%s: no free data space available.",
- dm_device_name(pool->pool_md));
- spin_lock_irqsave(&pool->lock, flags);
- pool->no_free_space = 1;
- spin_unlock_irqrestore(&pool->lock, flags);
+ out_of_data_space(pool);
return -ENOSPC;
}
}
r = dm_pool_alloc_data_block(pool->pmd, result);
if (r) {
- if (r == -ENOSPC &&
- !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
- !free_blocks) {
- DMWARN("%s: no free metadata space available.",
- dm_device_name(pool->pool_md));
- set_pool_mode(pool, PM_READ_ONLY);
- }
+ metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
return r;
}
@@ -992,7 +988,21 @@ static void retry_on_resume(struct bio *bio)
spin_unlock_irqrestore(&pool->lock, flags);
}
-static void no_space(struct pool *pool, struct dm_bio_prison_cell *cell)
+static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
+{
+ /*
+ * When pool is read-only, no cell locking is needed because
+ * nothing is changing.
+ */
+ WARN_ON_ONCE(get_pool_mode(pool) != PM_READ_ONLY);
+
+ if (pool->pf.error_if_no_space)
+ bio_io_error(bio);
+ else
+ retry_on_resume(bio);
+}
+
+static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
{
struct bio *bio;
struct bio_list bios;
@@ -1001,7 +1011,7 @@ static void no_space(struct pool *pool, struct dm_bio_prison_cell *cell)
cell_release(pool, cell, &bios);
while ((bio = bio_list_pop(&bios)))
- retry_on_resume(bio);
+ handle_unserviceable_bio(pool, bio);
}
static void process_discard(struct thin_c *tc, struct bio *bio)
@@ -1040,17 +1050,17 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
*/
m = get_next_mapping(pool);
m->tc = tc;
- m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown;
+ m->pass_discard = pool->pf.discard_passdown;
+ m->definitely_not_shared = !lookup_result.shared;
m->virt_block = block;
m->data_block = lookup_result.block;
m->cell = cell;
m->cell2 = cell2;
- m->err = 0;
m->bio = bio;
if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
spin_lock_irqsave(&pool->lock, flags);
- list_add(&m->list, &pool->prepared_discards);
+ list_add_tail(&m->list, &pool->prepared_discards);
spin_unlock_irqrestore(&pool->lock, flags);
wake_worker(pool);
}
@@ -1105,13 +1115,12 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
break;
case -ENOSPC:
- no_space(pool, cell);
+ retry_bios_on_resume(pool, cell);
break;
default:
DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
__func__, r);
- set_pool_mode(pool, PM_READ_ONLY);
cell_error(pool, cell);
break;
}
@@ -1133,7 +1142,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
if (bio_detain(pool, &key, bio, &cell))
return;
- if (bio_data_dir(bio) == WRITE && bio->bi_size)
+ if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
break_sharing(tc, bio, block, &key, lookup_result, cell);
else {
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -1156,7 +1165,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
/*
* Remap empty bios (flushes) immediately, without provisioning.
*/
- if (!bio->bi_size) {
+ if (!bio->bi_iter.bi_size) {
inc_all_io_entry(pool, bio);
cell_defer_no_holder(tc, cell);
@@ -1184,13 +1193,12 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
break;
case -ENOSPC:
- no_space(pool, cell);
+ retry_bios_on_resume(pool, cell);
break;
default:
DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
__func__, r);
- set_pool_mode(pool, PM_READ_ONLY);
cell_error(pool, cell);
break;
}
@@ -1256,8 +1264,8 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
switch (r) {
case 0:
- if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
- bio_io_error(bio);
+ if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
+ handle_unserviceable_bio(tc->pool, bio);
else {
inc_all_io_entry(tc->pool, bio);
remap_and_issue(tc, bio, lookup_result.block);
@@ -1266,7 +1274,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
case -ENODATA:
if (rw != READ) {
- bio_io_error(bio);
+ handle_unserviceable_bio(tc->pool, bio);
break;
}
@@ -1390,16 +1398,16 @@ static enum pool_mode get_pool_mode(struct pool *pool)
return pool->pf.mode;
}
-static void set_pool_mode(struct pool *pool, enum pool_mode mode)
+static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
{
int r;
+ enum pool_mode old_mode = pool->pf.mode;
- pool->pf.mode = mode;
-
- switch (mode) {
+ switch (new_mode) {
case PM_FAIL:
- DMERR("%s: switching pool to failure mode",
- dm_device_name(pool->pool_md));
+ if (old_mode != new_mode)
+ DMERR("%s: switching pool to failure mode",
+ dm_device_name(pool->pool_md));
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_fail;
pool->process_discard = process_bio_fail;
@@ -1408,13 +1416,15 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
break;
case PM_READ_ONLY:
- DMERR("%s: switching pool to read-only mode",
- dm_device_name(pool->pool_md));
+ if (old_mode != new_mode)
+ DMERR("%s: switching pool to read-only mode",
+ dm_device_name(pool->pool_md));
r = dm_pool_abort_metadata(pool->pmd);
if (r) {
DMERR("%s: aborting transaction failed",
dm_device_name(pool->pool_md));
- set_pool_mode(pool, PM_FAIL);
+ new_mode = PM_FAIL;
+ set_pool_mode(pool, new_mode);
} else {
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_read_only;
@@ -1425,6 +1435,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
break;
case PM_WRITE:
+ if (old_mode != new_mode)
+ DMINFO("%s: switching pool to write mode",
+ dm_device_name(pool->pool_md));
dm_pool_metadata_read_write(pool->pmd);
pool->process_bio = process_bio;
pool->process_discard = process_discard;
@@ -1432,6 +1445,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
pool->process_prepared_discard = process_prepared_discard;
break;
}
+
+ pool->pf.mode = new_mode;
+}
+
+/*
+ * Rather than calling set_pool_mode directly, use these which describe the
+ * reason for mode degradation.
+ */
+static void out_of_data_space(struct pool *pool)
+{
+ DMERR_LIMIT("%s: no free data space available.",
+ dm_device_name(pool->pool_md));
+ set_pool_mode(pool, PM_READ_ONLY);
+}
+
+static void metadata_operation_failed(struct pool *pool, const char *op, int r)
+{
+ dm_block_t free_blocks;
+
+ DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
+ dm_device_name(pool->pool_md), op, r);
+
+ if (r == -ENOSPC &&
+ !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
+ !free_blocks)
+ DMERR_LIMIT("%s: no free metadata space available.",
+ dm_device_name(pool->pool_md));
+
+ set_pool_mode(pool, PM_READ_ONLY);
}
/*----------------------------------------------------------------*/
@@ -1538,9 +1580,9 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
/*
* This block isn't provisioned, and we have no way
- * of doing so. Just error it.
+ * of doing so.
*/
- bio_io_error(bio);
+ handle_unserviceable_bio(tc->pool, bio);
return DM_MAPIO_SUBMITTED;
}
/* fall through */
@@ -1648,6 +1690,17 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
enum pool_mode new_mode = pt->adjusted_pf.mode;
/*
+ * Don't change the pool's mode until set_pool_mode() below.
+ * Otherwise the pool's process_* function pointers may
+ * not match the desired pool mode.
+ */
+ pt->adjusted_pf.mode = old_mode;
+
+ pool->ti = ti;
+ pool->pf = pt->adjusted_pf;
+ pool->low_water_blocks = pt->low_water_blocks;
+
+ /*
* If we were in PM_FAIL mode, rollback of metadata failed. We're
* not going to recover without a thin_repair. So we never let the
* pool move out of the old mode. On the other hand a PM_READ_ONLY
@@ -1657,10 +1710,6 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
if (old_mode == PM_FAIL)
new_mode = old_mode;
- pool->ti = ti;
- pool->low_water_blocks = pt->low_water_blocks;
- pool->pf = pt->adjusted_pf;
-
set_pool_mode(pool, new_mode);
return 0;
@@ -1682,6 +1731,7 @@ static void pool_features_init(struct pool_features *pf)
pf->zero_new_blocks = true;
pf->discard_enabled = true;
pf->discard_passdown = true;
+ pf->error_if_no_space = false;
}
static void __pool_destroy(struct pool *pool)
@@ -1772,8 +1822,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
bio_list_init(&pool->deferred_flush_bios);
INIT_LIST_HEAD(&pool->prepared_mappings);
INIT_LIST_HEAD(&pool->prepared_discards);
- pool->low_water_triggered = 0;
- pool->no_free_space = 0;
+ pool->low_water_triggered = false;
bio_list_init(&pool->retry_on_resume_list);
pool->shared_read_ds = dm_deferred_set_create();
@@ -1898,7 +1947,7 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
const char *arg_name;
static struct dm_arg _args[] = {
- {0, 3, "Invalid number of pool feature arguments"},
+ {0, 4, "Invalid number of pool feature arguments"},
};
/*
@@ -1927,6 +1976,9 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
else if (!strcasecmp(arg_name, "read_only"))
pf->mode = PM_READ_ONLY;
+ else if (!strcasecmp(arg_name, "error_if_no_space"))
+ pf->error_if_no_space = true;
+
else {
ti->error = "Unrecognised pool feature requested";
r = -EINVAL;
@@ -1997,6 +2049,8 @@ static dm_block_t calc_metadata_threshold(struct pool_c *pt)
* skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
* ignore_discard: disable discard
* no_discard_passdown: don't pass discards down to the data device
+ * read_only: Don't allow any changes to be made to the pool metadata.
+ * error_if_no_space: error IOs, instead of queueing, if no space.
*/
static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
{
@@ -2192,11 +2246,13 @@ static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
return -EINVAL;
} else if (data_size > sb_data_size) {
+ if (sb_data_size)
+ DMINFO("%s: growing the data device from %llu to %llu blocks",
+ dm_device_name(pool->pool_md),
+ sb_data_size, (unsigned long long)data_size);
r = dm_pool_resize_data_dev(pool->pmd, data_size);
if (r) {
- DMERR("%s: failed to resize data device",
- dm_device_name(pool->pool_md));
- set_pool_mode(pool, PM_READ_ONLY);
+ metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
return r;
}
@@ -2231,10 +2287,12 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
return -EINVAL;
} else if (metadata_dev_size > sb_metadata_dev_size) {
+ DMINFO("%s: growing the metadata device from %llu to %llu blocks",
+ dm_device_name(pool->pool_md),
+ sb_metadata_dev_size, metadata_dev_size);
r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
if (r) {
- DMERR("%s: failed to resize metadata device",
- dm_device_name(pool->pool_md));
+ metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
return r;
}
@@ -2290,8 +2348,7 @@ static void pool_resume(struct dm_target *ti)
unsigned long flags;
spin_lock_irqsave(&pool->lock, flags);
- pool->low_water_triggered = 0;
- pool->no_free_space = 0;
+ pool->low_water_triggered = false;
__requeue_bios(pool);
spin_unlock_irqrestore(&pool->lock, flags);
@@ -2510,7 +2567,8 @@ static void emit_flags(struct pool_features *pf, char *result,
unsigned sz, unsigned maxlen)
{
unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
- !pf->discard_passdown + (pf->mode == PM_READ_ONLY);
+ !pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
+ pf->error_if_no_space;
DMEMIT("%u ", count);
if (!pf->zero_new_blocks)
@@ -2524,6 +2582,9 @@ static void emit_flags(struct pool_features *pf, char *result,
if (pf->mode == PM_READ_ONLY)
DMEMIT("read_only ");
+
+ if (pf->error_if_no_space)
+ DMEMIT("error_if_no_space ");
}
/*
@@ -2618,11 +2679,16 @@ static void pool_status(struct dm_target *ti, status_type_t type,
DMEMIT("rw ");
if (!pool->pf.discard_enabled)
- DMEMIT("ignore_discard");
+ DMEMIT("ignore_discard ");
else if (pool->pf.discard_passdown)
- DMEMIT("discard_passdown");
+ DMEMIT("discard_passdown ");
+ else
+ DMEMIT("no_discard_passdown ");
+
+ if (pool->pf.error_if_no_space)
+ DMEMIT("error_if_no_space ");
else
- DMEMIT("no_discard_passdown");
+ DMEMIT("queue_if_no_space ");
break;
@@ -2721,7 +2787,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 9, 0},
+ .version = {1, 10, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2879,7 +2945,7 @@ out_unlock:
static int thin_map(struct dm_target *ti, struct bio *bio)
{
- bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
return thin_bio_map(ti, bio);
}
@@ -2899,7 +2965,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
spin_lock_irqsave(&pool->lock, flags);
list_for_each_entry_safe(m, tmp, &work, list) {
list_del(&m->list);
- m->quiesced = 1;
+ m->quiesced = true;
__maybe_add_mapping(m);
}
spin_unlock_irqrestore(&pool->lock, flags);
@@ -2911,7 +2977,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
if (!list_empty(&work)) {
spin_lock_irqsave(&pool->lock, flags);
list_for_each_entry_safe(m, tmp, &work, list)
- list_add(&m->list, &pool->prepared_discards);
+ list_add_tail(&m->list, &pool->prepared_discards);
spin_unlock_irqrestore(&pool->lock, flags);
wake_worker(pool);
}
@@ -3008,7 +3074,7 @@ static int thin_iterate_devices(struct dm_target *ti,
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 9, 0},
+ .version = {1, 10, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 4b7941db3aff..796007a5e0e1 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -73,15 +73,10 @@ struct dm_verity_io {
sector_t block;
unsigned n_blocks;
- /* saved bio vector */
- struct bio_vec *io_vec;
- unsigned io_vec_size;
+ struct bvec_iter iter;
struct work_struct work;
- /* A space for short vectors; longer vectors are allocated separately. */
- struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE];
-
/*
* Three variably-size fields follow this struct:
*
@@ -284,9 +279,10 @@ release_ret_r:
static int verity_verify_io(struct dm_verity_io *io)
{
struct dm_verity *v = io->v;
+ struct bio *bio = dm_bio_from_per_bio_data(io,
+ v->ti->per_bio_data_size);
unsigned b;
int i;
- unsigned vector = 0, offset = 0;
for (b = 0; b < io->n_blocks; b++) {
struct shash_desc *desc;
@@ -336,31 +332,22 @@ test_block_hash:
}
todo = 1 << v->data_dev_block_bits;
- do {
- struct bio_vec *bv;
+ while (io->iter.bi_size) {
u8 *page;
- unsigned len;
-
- BUG_ON(vector >= io->io_vec_size);
- bv = &io->io_vec[vector];
- page = kmap_atomic(bv->bv_page);
- len = bv->bv_len - offset;
- if (likely(len >= todo))
- len = todo;
- r = crypto_shash_update(desc,
- page + bv->bv_offset + offset, len);
+ struct bio_vec bv = bio_iter_iovec(bio, io->iter);
+
+ page = kmap_atomic(bv.bv_page);
+ r = crypto_shash_update(desc, page + bv.bv_offset,
+ bv.bv_len);
kunmap_atomic(page);
+
if (r < 0) {
DMERR("crypto_shash_update failed: %d", r);
return r;
}
- offset += len;
- if (likely(offset == bv->bv_len)) {
- offset = 0;
- vector++;
- }
- todo -= len;
- } while (todo);
+
+ bio_advance_iter(bio, &io->iter, bv.bv_len);
+ }
if (!v->version) {
r = crypto_shash_update(desc, v->salt, v->salt_size);
@@ -383,8 +370,6 @@ test_block_hash:
return -EIO;
}
}
- BUG_ON(vector != io->io_vec_size);
- BUG_ON(offset);
return 0;
}
@@ -400,10 +385,7 @@ static void verity_finish_io(struct dm_verity_io *io, int error)
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_private = io->orig_bi_private;
- if (io->io_vec != io->io_vec_inline)
- mempool_free(io->io_vec, v->vec_mempool);
-
- bio_endio(bio, error);
+ bio_endio_nodec(bio, error);
}
static void verity_work(struct work_struct *w)
@@ -493,9 +475,9 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
struct dm_verity_io *io;
bio->bi_bdev = v->data_dev->bdev;
- bio->bi_sector = verity_map_sector(v, bio->bi_sector);
+ bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
- if (((unsigned)bio->bi_sector | bio_sectors(bio)) &
+ if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
DMERR_LIMIT("unaligned io");
return -EIO;
@@ -514,18 +496,12 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
io->v = v;
io->orig_bi_end_io = bio->bi_end_io;
io->orig_bi_private = bio->bi_private;
- io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
- io->n_blocks = bio->bi_size >> v->data_dev_block_bits;
+ io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
+ io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
bio->bi_end_io = verity_end_io;
bio->bi_private = io;
- io->io_vec_size = bio_segments(bio);
- if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE)
- io->io_vec = io->io_vec_inline;
- else
- io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO);
- memcpy(io->io_vec, bio_iovec(bio),
- io->io_vec_size * sizeof(struct bio_vec));
+ io->iter = bio->bi_iter;
verity_submit_prefetch(v, io);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 0704c523a76b..8c53b09b9a2c 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -200,8 +200,8 @@ struct mapped_device {
/* forced geometry settings */
struct hd_geometry geometry;
- /* sysfs handle */
- struct kobject kobj;
+ /* kobject and completion */
+ struct dm_kobject_holder kobj_holder;
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
@@ -575,7 +575,7 @@ static void start_io_acct(struct dm_io *io)
atomic_inc_return(&md->pending[rw]));
if (unlikely(dm_stats_used(&md->stats)))
- dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+ dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
bio_sectors(bio), false, 0, &io->stats_aux);
}
@@ -593,7 +593,7 @@ static void end_io_acct(struct dm_io *io)
part_stat_unlock();
if (unlikely(dm_stats_used(&md->stats)))
- dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+ dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
bio_sectors(bio), true, duration, &io->stats_aux);
/*
@@ -742,7 +742,7 @@ static void dec_pending(struct dm_io *io, int error)
if (io_error == DM_ENDIO_REQUEUE)
return;
- if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
+ if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
/*
* Preflush done for flush with data, reissue
* without REQ_FLUSH.
@@ -797,7 +797,7 @@ static void end_clone_bio(struct bio *clone, int error)
struct dm_rq_clone_bio_info *info = clone->bi_private;
struct dm_rq_target_io *tio = info->tio;
struct bio *bio = info->orig;
- unsigned int nr_bytes = info->orig->bi_size;
+ unsigned int nr_bytes = info->orig->bi_iter.bi_size;
bio_put(clone);
@@ -1128,7 +1128,7 @@ static void __map_bio(struct dm_target_io *tio)
* this io.
*/
atomic_inc(&tio->io->io_count);
- sector = clone->bi_sector;
+ sector = clone->bi_iter.bi_sector;
r = ti->type->map(ti, clone);
if (r == DM_MAPIO_REMAPPED) {
/* the bio has been remapped so dispatch it */
@@ -1155,76 +1155,32 @@ struct clone_info {
struct dm_io *io;
sector_t sector;
sector_t sector_count;
- unsigned short idx;
};
static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
{
- bio->bi_sector = sector;
- bio->bi_size = to_bytes(len);
-}
-
-static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
-{
- bio->bi_idx = idx;
- bio->bi_vcnt = idx + bv_count;
- bio->bi_flags &= ~(1 << BIO_SEG_VALID);
-}
-
-static void clone_bio_integrity(struct bio *bio, struct bio *clone,
- unsigned short idx, unsigned len, unsigned offset,
- unsigned trim)
-{
- if (!bio_integrity(bio))
- return;
-
- bio_integrity_clone(clone, bio, GFP_NOIO);
-
- if (trim)
- bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len);
-}
-
-/*
- * Creates a little bio that just does part of a bvec.
- */
-static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
- sector_t sector, unsigned short idx,
- unsigned offset, unsigned len)
-{
- struct bio *clone = &tio->clone;
- struct bio_vec *bv = bio->bi_io_vec + idx;
-
- *clone->bi_io_vec = *bv;
-
- bio_setup_sector(clone, sector, len);
-
- clone->bi_bdev = bio->bi_bdev;
- clone->bi_rw = bio->bi_rw;
- clone->bi_vcnt = 1;
- clone->bi_io_vec->bv_offset = offset;
- clone->bi_io_vec->bv_len = clone->bi_size;
- clone->bi_flags |= 1 << BIO_CLONED;
-
- clone_bio_integrity(bio, clone, idx, len, offset, 1);
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_iter.bi_size = to_bytes(len);
}
/*
* Creates a bio that consists of range of complete bvecs.
*/
static void clone_bio(struct dm_target_io *tio, struct bio *bio,
- sector_t sector, unsigned short idx,
- unsigned short bv_count, unsigned len)
+ sector_t sector, unsigned len)
{
struct bio *clone = &tio->clone;
- unsigned trim = 0;
- __bio_clone(clone, bio);
- bio_setup_sector(clone, sector, len);
- bio_setup_bv(clone, idx, bv_count);
+ __bio_clone_fast(clone, bio);
+
+ if (bio_integrity(bio))
+ bio_integrity_clone(clone, bio, GFP_NOIO);
- if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
- trim = 1;
- clone_bio_integrity(bio, clone, idx, len, 0, trim);
+ bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
+ clone->bi_iter.bi_size = to_bytes(len);
+
+ if (bio_integrity(bio))
+ bio_integrity_trim(clone, 0, len);
}
static struct dm_target_io *alloc_tio(struct clone_info *ci,
@@ -1257,7 +1213,7 @@ static void __clone_and_map_simple_bio(struct clone_info *ci,
* ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
* and discard, so no need for concern about wasted bvec allocations.
*/
- __bio_clone(clone, ci->bio);
+ __bio_clone_fast(clone, ci->bio);
if (len)
bio_setup_sector(clone, ci->sector, len);
@@ -1286,10 +1242,7 @@ static int __send_empty_flush(struct clone_info *ci)
}
static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
- sector_t sector, int nr_iovecs,
- unsigned short idx, unsigned short bv_count,
- unsigned offset, unsigned len,
- unsigned split_bvec)
+ sector_t sector, unsigned len)
{
struct bio *bio = ci->bio;
struct dm_target_io *tio;
@@ -1303,11 +1256,8 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti
num_target_bios = ti->num_write_bios(ti, bio);
for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
- tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr);
- if (split_bvec)
- clone_split_bio(tio, bio, sector, idx, offset, len);
- else
- clone_bio(tio, bio, sector, idx, bv_count, len);
+ tio = alloc_tio(ci, ti, 0, target_bio_nr);
+ clone_bio(tio, bio, sector, len);
__map_bio(tio);
}
}
@@ -1379,68 +1329,13 @@ static int __send_write_same(struct clone_info *ci)
}
/*
- * Find maximum number of sectors / bvecs we can process with a single bio.
- */
-static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
-{
- struct bio *bio = ci->bio;
- sector_t bv_len, total_len = 0;
-
- for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
- bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
-
- if (bv_len > max)
- break;
-
- max -= bv_len;
- total_len += bv_len;
- }
-
- return total_len;
-}
-
-static int __split_bvec_across_targets(struct clone_info *ci,
- struct dm_target *ti, sector_t max)
-{
- struct bio *bio = ci->bio;
- struct bio_vec *bv = bio->bi_io_vec + ci->idx;
- sector_t remaining = to_sector(bv->bv_len);
- unsigned offset = 0;
- sector_t len;
-
- do {
- if (offset) {
- ti = dm_table_find_target(ci->map, ci->sector);
- if (!dm_target_is_valid(ti))
- return -EIO;
-
- max = max_io_len(ci->sector, ti);
- }
-
- len = min(remaining, max);
-
- __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
- bv->bv_offset + offset, len, 1);
-
- ci->sector += len;
- ci->sector_count -= len;
- offset += to_bytes(len);
- } while (remaining -= len);
-
- ci->idx++;
-
- return 0;
-}
-
-/*
* Select the correct strategy for processing a non-flush bio.
*/
static int __split_and_process_non_flush(struct clone_info *ci)
{
struct bio *bio = ci->bio;
struct dm_target *ti;
- sector_t len, max;
- int idx;
+ unsigned len;
if (unlikely(bio->bi_rw & REQ_DISCARD))
return __send_discard(ci);
@@ -1451,41 +1346,14 @@ static int __split_and_process_non_flush(struct clone_info *ci)
if (!dm_target_is_valid(ti))
return -EIO;
- max = max_io_len(ci->sector, ti);
-
- /*
- * Optimise for the simple case where we can do all of
- * the remaining io with a single clone.
- */
- if (ci->sector_count <= max) {
- __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
- ci->idx, bio->bi_vcnt - ci->idx, 0,
- ci->sector_count, 0);
- ci->sector_count = 0;
- return 0;
- }
+ len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
- /*
- * There are some bvecs that don't span targets.
- * Do as many of these as possible.
- */
- if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
- len = __len_within_target(ci, max, &idx);
-
- __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
- ci->idx, idx - ci->idx, 0, len, 0);
+ __clone_and_map_data_bio(ci, ti, ci->sector, len);
- ci->sector += len;
- ci->sector_count -= len;
- ci->idx = idx;
+ ci->sector += len;
+ ci->sector_count -= len;
- return 0;
- }
-
- /*
- * Handle a bvec that must be split between two or more targets.
- */
- return __split_bvec_across_targets(ci, ti, max);
+ return 0;
}
/*
@@ -1510,8 +1378,7 @@ static void __split_and_process_bio(struct mapped_device *md,
ci.io->bio = bio;
ci.io->md = md;
spin_lock_init(&ci.io->endio_lock);
- ci.sector = bio->bi_sector;
- ci.idx = bio->bi_idx;
+ ci.sector = bio->bi_iter.bi_sector;
start_io_acct(ci.io);
@@ -2041,6 +1908,7 @@ static struct mapped_device *alloc_dev(int minor)
init_waitqueue_head(&md->wait);
INIT_WORK(&md->work, dm_wq_work);
init_waitqueue_head(&md->eventq);
+ init_completion(&md->kobj_holder.completion);
md->disk->major = _major;
md->disk->first_minor = minor;
@@ -2902,20 +2770,14 @@ struct gendisk *dm_disk(struct mapped_device *md)
struct kobject *dm_kobject(struct mapped_device *md)
{
- return &md->kobj;
+ return &md->kobj_holder.kobj;
}
-/*
- * struct mapped_device should not be exported outside of dm.c
- * so use this check to verify that kobj is part of md structure
- */
struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
{
struct mapped_device *md;
- md = container_of(kobj, struct mapped_device, kobj);
- if (&md->kobj != kobj)
- return NULL;
+ md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
if (test_bit(DMF_FREEING, &md->flags) ||
dm_deleting_md(md))
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index c57ba550f69e..c4569f02f50f 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -15,6 +15,8 @@
#include <linux/list.h>
#include <linux/blkdev.h>
#include <linux/hdreg.h>
+#include <linux/completion.h>
+#include <linux/kobject.h>
#include "dm-stats.h"
@@ -148,12 +150,27 @@ void dm_interface_exit(void);
/*
* sysfs interface
*/
+struct dm_kobject_holder {
+ struct kobject kobj;
+ struct completion completion;
+};
+
+static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
+{
+ return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
+}
+
int dm_sysfs_init(struct mapped_device *md);
void dm_sysfs_exit(struct mapped_device *md);
struct kobject *dm_kobject(struct mapped_device *md);
struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
/*
+ * The kobject helper
+ */
+void dm_kobject_release(struct kobject *kobj);
+
+/*
* Targets for linear and striped mappings
*/
int dm_linear_init(void);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 3193aefe982b..e8b4574956c7 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -74,8 +74,8 @@ static void faulty_fail(struct bio *bio, int error)
{
struct bio *b = bio->bi_private;
- b->bi_size = bio->bi_size;
- b->bi_sector = bio->bi_sector;
+ b->bi_iter.bi_size = bio->bi_iter.bi_size;
+ b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
bio_put(bio);
@@ -185,26 +185,31 @@ static void make_request(struct mddev *mddev, struct bio *bio)
return;
}
- if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE))
+ if (check_sector(conf, bio->bi_iter.bi_sector,
+ bio_end_sector(bio), WRITE))
failit = 1;
if (check_mode(conf, WritePersistent)) {
- add_sector(conf, bio->bi_sector, WritePersistent);
+ add_sector(conf, bio->bi_iter.bi_sector,
+ WritePersistent);
failit = 1;
}
if (check_mode(conf, WriteTransient))
failit = 1;
} else {
/* read request */
- if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ))
+ if (check_sector(conf, bio->bi_iter.bi_sector,
+ bio_end_sector(bio), READ))
failit = 1;
if (check_mode(conf, ReadTransient))
failit = 1;
if (check_mode(conf, ReadPersistent)) {
- add_sector(conf, bio->bi_sector, ReadPersistent);
+ add_sector(conf, bio->bi_iter.bi_sector,
+ ReadPersistent);
failit = 1;
}
if (check_mode(conf, ReadFixable)) {
- add_sector(conf, bio->bi_sector, ReadFixable);
+ add_sector(conf, bio->bi_iter.bi_sector,
+ ReadFixable);
failit = 1;
}
}
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index f03fabd2b37b..56f534b4a2d2 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -288,65 +288,65 @@ static int linear_stop (struct mddev *mddev)
static void linear_make_request(struct mddev *mddev, struct bio *bio)
{
+ char b[BDEVNAME_SIZE];
struct dev_info *tmp_dev;
- sector_t start_sector;
+ struct bio *split;
+ sector_t start_sector, end_sector, data_offset;
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
return;
}
- rcu_read_lock();
- tmp_dev = which_dev(mddev, bio->bi_sector);
- start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
-
-
- if (unlikely(bio->bi_sector >= (tmp_dev->end_sector)
- || (bio->bi_sector < start_sector))) {
- char b[BDEVNAME_SIZE];
-
- printk(KERN_ERR
- "md/linear:%s: make_request: Sector %llu out of bounds on "
- "dev %s: %llu sectors, offset %llu\n",
- mdname(mddev),
- (unsigned long long)bio->bi_sector,
- bdevname(tmp_dev->rdev->bdev, b),
- (unsigned long long)tmp_dev->rdev->sectors,
- (unsigned long long)start_sector);
- rcu_read_unlock();
- bio_io_error(bio);
- return;
- }
- if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) {
- /* This bio crosses a device boundary, so we have to
- * split it.
- */
- struct bio_pair *bp;
- sector_t end_sector = tmp_dev->end_sector;
+ do {
+ rcu_read_lock();
- rcu_read_unlock();
-
- bp = bio_split(bio, end_sector - bio->bi_sector);
+ tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
+ start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
+ end_sector = tmp_dev->end_sector;
+ data_offset = tmp_dev->rdev->data_offset;
+ bio->bi_bdev = tmp_dev->rdev->bdev;
- linear_make_request(mddev, &bp->bio1);
- linear_make_request(mddev, &bp->bio2);
- bio_pair_release(bp);
- return;
- }
-
- bio->bi_bdev = tmp_dev->rdev->bdev;
- bio->bi_sector = bio->bi_sector - start_sector
- + tmp_dev->rdev->data_offset;
- rcu_read_unlock();
+ rcu_read_unlock();
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
- /* Just ignore it */
- bio_endio(bio, 0);
- return;
- }
+ if (unlikely(bio->bi_iter.bi_sector >= end_sector ||
+ bio->bi_iter.bi_sector < start_sector))
+ goto out_of_bounds;
+
+ if (unlikely(bio_end_sector(bio) > end_sector)) {
+ /* This bio crosses a device boundary, so we have to
+ * split it.
+ */
+ split = bio_split(bio, end_sector -
+ bio->bi_iter.bi_sector,
+ GFP_NOIO, fs_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
- generic_make_request(bio);
+ split->bi_iter.bi_sector = split->bi_iter.bi_sector -
+ start_sector + data_offset;
+
+ if (unlikely((split->bi_rw & REQ_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
+ /* Just ignore it */
+ bio_endio(split, 0);
+ } else
+ generic_make_request(split);
+ } while (split != bio);
+ return;
+
+out_of_bounds:
+ printk(KERN_ERR
+ "md/linear:%s: make_request: Sector %llu out of bounds on "
+ "dev %s: %llu sectors, offset %llu\n",
+ mdname(mddev),
+ (unsigned long long)bio->bi_iter.bi_sector,
+ bdevname(tmp_dev->rdev->bdev, b),
+ (unsigned long long)tmp_dev->rdev->sectors,
+ (unsigned long long)start_sector);
+ bio_io_error(bio);
}
static void linear_status (struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 21f4d7ff0da2..4ad5cc4e63e8 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -393,7 +393,7 @@ static void md_submit_flush_data(struct work_struct *ws)
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct bio *bio = mddev->flush_bio;
- if (bio->bi_size == 0)
+ if (bio->bi_iter.bi_size == 0)
/* an empty barrier - all done */
bio_endio(bio, 0);
else {
@@ -754,7 +754,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
@@ -782,18 +782,16 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
int ret;
- rw |= REQ_SYNC;
-
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
rdev->meta_bdev : rdev->bdev;
if (metadata_op)
- bio->bi_sector = sector + rdev->sb_start;
+ bio->bi_iter.bi_sector = sector + rdev->sb_start;
else if (rdev->mddev->reshape_position != MaxSector &&
(rdev->mddev->reshape_backwards ==
(sector >= rdev->mddev->reshape_position)))
- bio->bi_sector = sector + rdev->new_data_offset;
+ bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
else
- bio->bi_sector = sector + rdev->data_offset;
+ bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio_add_page(bio, page, size, 0);
submit_bio_wait(rw, bio);
@@ -1077,6 +1075,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
+ clear_bit(Bitmap_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
if (mddev->raid_disks == 0) {
@@ -1155,6 +1154,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
+ if (ev1 < mddev->events)
+ set_bit(Bitmap_sync, &rdev->flags);
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
@@ -1170,6 +1171,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
desc->raid_disk < mddev->raid_disks */) {
set_bit(In_sync, &rdev->flags);
rdev->raid_disk = desc->raid_disk;
+ rdev->saved_raid_disk = desc->raid_disk;
} else if (desc->state & (1<<MD_DISK_ACTIVE)) {
/* active but not in sync implies recovery up to
* reshape position. We don't know exactly where
@@ -1563,6 +1565,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
+ clear_bit(Bitmap_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
if (mddev->raid_disks == 0) {
@@ -1645,6 +1648,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
+ if (ev1 < mddev->events)
+ set_bit(Bitmap_sync, &rdev->flags);
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
@@ -1665,10 +1670,14 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
set_bit(Faulty, &rdev->flags);
break;
default:
+ rdev->saved_raid_disk = role;
if ((le32_to_cpu(sb->feature_map) &
- MD_FEATURE_RECOVERY_OFFSET))
+ MD_FEATURE_RECOVERY_OFFSET)) {
rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
- else
+ if (!(le32_to_cpu(sb->feature_map) &
+ MD_FEATURE_RECOVERY_BITMAP))
+ rdev->saved_raid_disk = -1;
+ } else
set_bit(In_sync, &rdev->flags);
rdev->raid_disk = role;
break;
@@ -1730,6 +1739,9 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
sb->recovery_offset =
cpu_to_le64(rdev->recovery_offset);
+ if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
+ sb->feature_map |=
+ cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
}
if (test_bit(Replacement, &rdev->flags))
sb->feature_map |=
@@ -2471,8 +2483,7 @@ repeat:
if (rdev->sb_loaded != 1)
continue; /* no noise on spare devices */
- if (!test_bit(Faulty, &rdev->flags) &&
- rdev->saved_raid_disk == -1) {
+ if (!test_bit(Faulty, &rdev->flags)) {
md_super_write(mddev,rdev,
rdev->sb_start, rdev->sb_size,
rdev->sb_page);
@@ -2488,11 +2499,9 @@ repeat:
rdev->badblocks.size = 0;
}
- } else if (test_bit(Faulty, &rdev->flags))
+ } else
pr_debug("md: %s (skipping faulty)\n",
bdevname(rdev->bdev, b));
- else
- pr_debug("(skipping incremental s/r ");
if (mddev->level == LEVEL_MULTIPATH)
/* only need to write one superblock... */
@@ -2608,6 +2617,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
* blocked - sets the Blocked flags
* -blocked - clears the Blocked and possibly simulates an error
* insync - sets Insync providing device isn't active
+ * -insync - clear Insync for a device with a slot assigned,
+ * so that it gets rebuilt based on bitmap
* write_error - sets WriteErrorSeen
* -write_error - clears WriteErrorSeen
*/
@@ -2656,6 +2667,11 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
set_bit(In_sync, &rdev->flags);
err = 0;
+ } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) {
+ clear_bit(In_sync, &rdev->flags);
+ rdev->saved_raid_disk = rdev->raid_disk;
+ rdev->raid_disk = -1;
+ err = 0;
} else if (cmd_match(buf, "write_error")) {
set_bit(WriteErrorSeen, &rdev->flags);
err = 0;
@@ -2788,6 +2804,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
else
rdev->saved_raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
+ clear_bit(Bitmap_sync, &rdev->flags);
err = rdev->mddev->pers->
hot_add_disk(rdev->mddev, rdev);
if (err) {
@@ -3582,6 +3599,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
pers->run(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev_resume(mddev);
+ if (!mddev->thread)
+ md_update_sb(mddev, 1);
sysfs_notify(&mddev->kobj, NULL, "level");
md_new_event(mddev);
return rv;
@@ -5760,8 +5779,10 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
info->raid_disk < mddev->raid_disks) {
rdev->raid_disk = info->raid_disk;
set_bit(In_sync, &rdev->flags);
+ clear_bit(Bitmap_sync, &rdev->flags);
} else
rdev->raid_disk = -1;
+ rdev->saved_raid_disk = rdev->raid_disk;
} else
super_types[mddev->major_version].
validate_super(mddev, rdev);
@@ -5774,11 +5795,6 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
return -EINVAL;
}
- if (test_bit(In_sync, &rdev->flags))
- rdev->saved_raid_disk = rdev->raid_disk;
- else
- rdev->saved_raid_disk = -1;
-
clear_bit(In_sync, &rdev->flags); /* just to be sure */
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
@@ -6328,6 +6344,32 @@ static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
+static inline bool md_ioctl_valid(unsigned int cmd)
+{
+ switch (cmd) {
+ case ADD_NEW_DISK:
+ case BLKROSET:
+ case GET_ARRAY_INFO:
+ case GET_BITMAP_FILE:
+ case GET_DISK_INFO:
+ case HOT_ADD_DISK:
+ case HOT_REMOVE_DISK:
+ case PRINT_RAID_DEBUG:
+ case RAID_AUTORUN:
+ case RAID_VERSION:
+ case RESTART_ARRAY_RW:
+ case RUN_ARRAY:
+ case SET_ARRAY_INFO:
+ case SET_BITMAP_FILE:
+ case SET_DISK_FAULTY:
+ case STOP_ARRAY:
+ case STOP_ARRAY_RO:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int md_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
@@ -6336,6 +6378,9 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
struct mddev *mddev = NULL;
int ro;
+ if (!md_ioctl_valid(cmd))
+ return -ENOTTY;
+
switch (cmd) {
case RAID_VERSION:
case GET_ARRAY_INFO:
@@ -7706,10 +7751,12 @@ static int remove_and_add_spares(struct mddev *mddev,
if (test_bit(Faulty, &rdev->flags))
continue;
if (mddev->ro &&
- rdev->saved_raid_disk < 0)
+ ! (rdev->saved_raid_disk >= 0 &&
+ !test_bit(Bitmap_sync, &rdev->flags)))
continue;
- rdev->recovery_offset = 0;
+ if (rdev->saved_raid_disk < 0)
+ rdev->recovery_offset = 0;
if (mddev->pers->
hot_add_disk(mddev, rdev) == 0) {
if (sysfs_link_rdev(mddev, rdev))
@@ -7787,9 +7834,12 @@ void md_check_recovery(struct mddev *mddev)
* As we only add devices that are already in-sync,
* we can activate the spares immediately.
*/
- clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
remove_and_add_spares(mddev, NULL);
- mddev->pers->spare_active(mddev);
+ /* There is no thread, but we need to call
+ * ->spare_active and clear saved_raid_disk
+ */
+ md_reap_sync_thread(mddev);
+ clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}
@@ -7926,14 +7976,10 @@ void md_reap_sync_thread(struct mddev *mddev)
mddev->pers->finish_reshape(mddev);
/* If array is no-longer degraded, then any saved_raid_disk
- * information must be scrapped. Also if any device is now
- * In_sync we must scrape the saved_raid_disk for that device
- * do the superblock for an incrementally recovered device
- * written out.
+ * information must be scrapped.
*/
- rdev_for_each(rdev, mddev)
- if (!mddev->degraded ||
- test_bit(In_sync, &rdev->flags))
+ if (!mddev->degraded)
+ rdev_for_each(rdev, mddev)
rdev->saved_raid_disk = -1;
md_update_sb(mddev, 1);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 2f5cc8a7ef3e..07bba96de260 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -106,7 +106,7 @@ struct md_rdev {
*/
struct work_struct del_work; /* used for delayed sysfs removal */
- struct sysfs_dirent *sysfs_state; /* handle for 'state'
+ struct kernfs_node *sysfs_state; /* handle for 'state'
* sysfs entry */
struct badblocks {
@@ -129,6 +129,9 @@ struct md_rdev {
enum flag_bits {
Faulty, /* device is known to have a fault */
In_sync, /* device is in_sync with rest of array */
+ Bitmap_sync, /* ..actually, not quite In_sync. Need a
+ * bitmap-based recovery to get fully in sync
+ */
Unmerged, /* device is being added to array and should
* be considerred for bvec_merge_fn but not
* yet for actual IO
@@ -376,10 +379,10 @@ struct mddev {
sector_t resync_max; /* resync should pause
* when it gets here */
- struct sysfs_dirent *sysfs_state; /* handle for 'array_state'
+ struct kernfs_node *sysfs_state; /* handle for 'array_state'
* file in sysfs.
*/
- struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */
+ struct kernfs_node *sysfs_action; /* handle for 'sync_action' */
struct work_struct del_work; /* used for delayed sysfs removal */
@@ -498,13 +501,13 @@ struct md_sysfs_entry {
};
extern struct attribute_group md_bitmap_group;
-static inline struct sysfs_dirent *sysfs_get_dirent_safe(struct sysfs_dirent *sd, char *name)
+static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name)
{
if (sd)
return sysfs_get_dirent(sd, name);
return sd;
}
-static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd)
+static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd)
{
if (sd)
sysfs_notify_dirent(sd);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 1642eae75a33..849ad39f547b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -100,7 +100,7 @@ static void multipath_end_request(struct bio *bio, int error)
md_error (mp_bh->mddev, rdev);
printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
bdevname(rdev->bdev,b),
- (unsigned long long)bio->bi_sector);
+ (unsigned long long)bio->bi_iter.bi_sector);
multipath_reschedule_retry(mp_bh);
} else
multipath_end_bh_io(mp_bh, error);
@@ -132,7 +132,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
multipath = conf->multipaths + mp_bh->path;
mp_bh->bio = *bio;
- mp_bh->bio.bi_sector += multipath->rdev->data_offset;
+ mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request;
@@ -355,21 +355,22 @@ static void multipathd(struct md_thread *thread)
spin_unlock_irqrestore(&conf->device_lock, flags);
bio = &mp_bh->bio;
- bio->bi_sector = mp_bh->master_bio->bi_sector;
+ bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
if ((mp_bh->path = multipath_map (conf))<0) {
printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
" error for block %llu\n",
bdevname(bio->bi_bdev,b),
- (unsigned long long)bio->bi_sector);
+ (unsigned long long)bio->bi_iter.bi_sector);
multipath_end_bh_io(mp_bh, -EIO);
} else {
printk(KERN_ERR "multipath: %s: redirecting sector %llu"
" to another IO path\n",
bdevname(bio->bi_bdev,b),
- (unsigned long long)bio->bi_sector);
+ (unsigned long long)bio->bi_iter.bi_sector);
*bio = *(mp_bh->master_bio);
- bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
+ bio->bi_iter.bi_sector +=
+ conf->multipaths[mp_bh->path].rdev->data_offset;
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
bio->bi_end_io = multipath_end_request;
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 064a3c271baa..455f79279a16 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -104,7 +104,7 @@ static int __check_holder(struct block_lock *lock)
for (i = 0; i < MAX_HOLDERS; i++) {
if (lock->holders[i] == current) {
- DMERR("recursive lock detected in pool metadata");
+ DMERR("recursive lock detected in metadata");
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
DMERR("previously held here:");
print_stack_trace(lock->traces + i, 4);
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 468e371ee9b2..416060c25709 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -770,8 +770,8 @@ EXPORT_SYMBOL_GPL(dm_btree_insert_notify);
/*----------------------------------------------------------------*/
-static int find_highest_key(struct ro_spine *s, dm_block_t block,
- uint64_t *result_key, dm_block_t *next_block)
+static int find_key(struct ro_spine *s, dm_block_t block, bool find_highest,
+ uint64_t *result_key, dm_block_t *next_block)
{
int i, r;
uint32_t flags;
@@ -788,7 +788,11 @@ static int find_highest_key(struct ro_spine *s, dm_block_t block,
else
i--;
- *result_key = le64_to_cpu(ro_node(s)->keys[i]);
+ if (find_highest)
+ *result_key = le64_to_cpu(ro_node(s)->keys[i]);
+ else
+ *result_key = le64_to_cpu(ro_node(s)->keys[0]);
+
if (next_block || flags & INTERNAL_NODE)
block = value64(ro_node(s), i);
@@ -799,16 +803,16 @@ static int find_highest_key(struct ro_spine *s, dm_block_t block,
return 0;
}
-int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
- uint64_t *result_keys)
+static int dm_btree_find_key(struct dm_btree_info *info, dm_block_t root,
+ bool find_highest, uint64_t *result_keys)
{
int r = 0, count = 0, level;
struct ro_spine spine;
init_ro_spine(&spine, info);
for (level = 0; level < info->levels; level++) {
- r = find_highest_key(&spine, root, result_keys + level,
- level == info->levels - 1 ? NULL : &root);
+ r = find_key(&spine, root, find_highest, result_keys + level,
+ level == info->levels - 1 ? NULL : &root);
if (r == -ENODATA) {
r = 0;
break;
@@ -822,8 +826,23 @@ int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
return r ? r : count;
}
+
+int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *result_keys)
+{
+ return dm_btree_find_key(info, root, true, result_keys);
+}
EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
+int dm_btree_find_lowest_key(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *result_keys)
+{
+ return dm_btree_find_key(info, root, false, result_keys);
+}
+EXPORT_SYMBOL_GPL(dm_btree_find_lowest_key);
+
+/*----------------------------------------------------------------*/
+
/*
* FIXME: We shouldn't use a recursive algorithm when we have limited stack
* space. Also this only works for single level trees.
diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h
index 8672d159e0b5..dacfc34180b4 100644
--- a/drivers/md/persistent-data/dm-btree.h
+++ b/drivers/md/persistent-data/dm-btree.h
@@ -137,6 +137,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
/*
* Returns < 0 on failure. Otherwise the number of key entries that have
* been filled out. Remember trees can have zero entries, and as such have
+ * no lowest key.
+ */
+int dm_btree_find_lowest_key(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *result_keys);
+
+/*
+ * Returns < 0 on failure. Otherwise the number of key entries that have
+ * been filled out. Remember trees can have zero entries, and as such have
* no highest key.
*/
int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 466a60bbd716..aacbe70c2c2e 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -245,6 +245,10 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
return -EINVAL;
}
+ /*
+ * We need to set this before the dm_tm_new_block() call below.
+ */
+ ll->nr_blocks = nr_blocks;
for (i = old_blocks; i < blocks; i++) {
struct dm_block *b;
struct disk_index_entry idx;
@@ -252,6 +256,7 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b);
if (r < 0)
return r;
+
idx.blocknr = cpu_to_le64(dm_block_location(b));
r = dm_tm_unlock(ll->tm, b);
@@ -266,7 +271,6 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
return r;
}
- ll->nr_blocks = nr_blocks;
return 0;
}
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 58fc1eef7499..536782e3bcb7 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -385,13 +385,13 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
int r = sm_metadata_new_block_(sm, b);
if (r) {
- DMERR("unable to allocate new metadata block");
+ DMERR_LIMIT("unable to allocate new metadata block");
return r;
}
r = sm_metadata_get_nr_free(sm, &count);
if (r) {
- DMERR("couldn't get free block count");
+ DMERR_LIMIT("couldn't get free block count");
return r;
}
@@ -608,20 +608,38 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
* Flick into a mode where all blocks get allocated in the new area.
*/
smm->begin = old_len;
- memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
+ memcpy(sm, &bootstrap_ops, sizeof(*sm));
/*
* Extend.
*/
r = sm_ll_extend(&smm->ll, extra_blocks);
+ if (r)
+ goto out;
/*
- * Switch back to normal behaviour.
+ * We repeatedly increment then commit until the commit doesn't
+ * allocate any new blocks.
*/
- memcpy(&smm->sm, &ops, sizeof(smm->sm));
- for (i = old_len; !r && i < smm->begin; i++)
- r = sm_ll_inc(&smm->ll, i, &ev);
+ do {
+ for (i = old_len; !r && i < smm->begin; i++) {
+ r = sm_ll_inc(&smm->ll, i, &ev);
+ if (r)
+ goto out;
+ }
+ old_len = smm->begin;
+
+ r = sm_ll_commit(&smm->ll);
+ if (r)
+ goto out;
+
+ } while (old_len != smm->begin);
+out:
+ /*
+ * Switch back to normal behaviour.
+ */
+ memcpy(sm, &ops, sizeof(*sm));
return r;
}
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c4d420b7d2f4..407a99e46f69 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -501,10 +501,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
unsigned int chunk_sects, struct bio *bio)
{
if (likely(is_power_of_2(chunk_sects))) {
- return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
+ return chunk_sects >=
+ ((bio->bi_iter.bi_sector & (chunk_sects-1))
+ bio_sectors(bio));
} else{
- sector_t sector = bio->bi_sector;
+ sector_t sector = bio->bi_iter.bi_sector;
return chunk_sects >= (sector_div(sector, chunk_sects)
+ bio_sectors(bio));
}
@@ -512,64 +513,44 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
static void raid0_make_request(struct mddev *mddev, struct bio *bio)
{
- unsigned int chunk_sects;
- sector_t sector_offset;
struct strip_zone *zone;
struct md_rdev *tmp_dev;
+ struct bio *split;
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
return;
}
- chunk_sects = mddev->chunk_sectors;
- if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
- sector_t sector = bio->bi_sector;
- struct bio_pair *bp;
- /* Sanity check -- queue functions should prevent this happening */
- if (bio_segments(bio) > 1)
- goto bad_map;
- /* This is a one page bio that upper layers
- * refuse to split for us, so we need to split it.
- */
- if (likely(is_power_of_2(chunk_sects)))
- bp = bio_split(bio, chunk_sects - (sector &
- (chunk_sects-1)));
- else
- bp = bio_split(bio, chunk_sects -
- sector_div(sector, chunk_sects));
- raid0_make_request(mddev, &bp->bio1);
- raid0_make_request(mddev, &bp->bio2);
- bio_pair_release(bp);
- return;
- }
+ do {
+ sector_t sector = bio->bi_iter.bi_sector;
+ unsigned chunk_sects = mddev->chunk_sectors;
- sector_offset = bio->bi_sector;
- zone = find_zone(mddev->private, &sector_offset);
- tmp_dev = map_sector(mddev, zone, bio->bi_sector,
- &sector_offset);
- bio->bi_bdev = tmp_dev->bdev;
- bio->bi_sector = sector_offset + zone->dev_start +
- tmp_dev->data_offset;
-
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
- /* Just ignore it */
- bio_endio(bio, 0);
- return;
- }
+ unsigned sectors = chunk_sects -
+ (likely(is_power_of_2(chunk_sects))
+ ? (sector & (chunk_sects-1))
+ : sector_div(sector, chunk_sects));
- generic_make_request(bio);
- return;
-
-bad_map:
- printk("md/raid0:%s: make_request bug: can't convert block across chunks"
- " or bigger than %dk %llu %d\n",
- mdname(mddev), chunk_sects / 2,
- (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
+ if (sectors < bio_sectors(bio)) {
+ split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
- bio_io_error(bio);
- return;
+ zone = find_zone(mddev->private, &sector);
+ tmp_dev = map_sector(mddev, zone, sector, &sector);
+ split->bi_bdev = tmp_dev->bdev;
+ split->bi_iter.bi_sector = sector + zone->dev_start +
+ tmp_dev->data_offset;
+
+ if (unlikely((split->bi_rw & REQ_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
+ /* Just ignore it */
+ bio_endio(split, 0);
+ } else
+ generic_make_request(split);
+ } while (split != bio);
}
static void raid0_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1e5a540995e9..4a6ca1cb2e78 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -229,7 +229,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
int done;
struct r1conf *conf = r1_bio->mddev->private;
sector_t start_next_window = r1_bio->start_next_window;
- sector_t bi_sector = bio->bi_sector;
+ sector_t bi_sector = bio->bi_iter.bi_sector;
if (bio->bi_phys_segments) {
unsigned long flags;
@@ -265,9 +265,8 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
(bio_data_dir(bio) == WRITE) ? "write" : "read",
- (unsigned long long) bio->bi_sector,
- (unsigned long long) bio->bi_sector +
- bio_sectors(bio) - 1);
+ (unsigned long long) bio->bi_iter.bi_sector,
+ (unsigned long long) bio_end_sector(bio) - 1);
call_bio_endio(r1_bio);
}
@@ -466,9 +465,8 @@ static void raid1_end_write_request(struct bio *bio, int error)
struct bio *mbio = r1_bio->master_bio;
pr_debug("raid1: behind end write sectors"
" %llu-%llu\n",
- (unsigned long long) mbio->bi_sector,
- (unsigned long long) mbio->bi_sector +
- bio_sectors(mbio) - 1);
+ (unsigned long long) mbio->bi_iter.bi_sector,
+ (unsigned long long) bio_end_sector(mbio) - 1);
call_bio_endio(r1_bio);
}
}
@@ -875,7 +873,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
>= bio_end_sector(bio)) ||
(conf->next_resync + NEXT_NORMALIO_DISTANCE
- <= bio->bi_sector))
+ <= bio->bi_iter.bi_sector))
wait = false;
else
wait = true;
@@ -913,20 +911,19 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
if (bio && bio_data_dir(bio) == WRITE) {
if (conf->next_resync + NEXT_NORMALIO_DISTANCE
- <= bio->bi_sector) {
+ <= bio->bi_iter.bi_sector) {
if (conf->start_next_window == MaxSector)
conf->start_next_window =
conf->next_resync +
NEXT_NORMALIO_DISTANCE;
if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
- <= bio->bi_sector)
+ <= bio->bi_iter.bi_sector)
conf->next_window_requests++;
else
conf->current_window_requests++;
- }
- if (bio->bi_sector >= conf->start_next_window)
sector = conf->start_next_window;
+ }
}
conf->nr_pending++;
@@ -1028,7 +1025,8 @@ do_sync_io:
if (bvecs[i].bv_page)
put_page(bvecs[i].bv_page);
kfree(bvecs);
- pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
+ pr_debug("%dB behind alloc failed, doing sync I/O\n",
+ bio->bi_iter.bi_size);
}
struct raid1_plug_cb {
@@ -1108,7 +1106,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
if (bio_data_dir(bio) == WRITE &&
bio_end_sector(bio) > mddev->suspend_lo &&
- bio->bi_sector < mddev->suspend_hi) {
+ bio->bi_iter.bi_sector < mddev->suspend_hi) {
/* As the suspend_* range is controlled by
* userspace, we want an interruptible
* wait.
@@ -1119,7 +1117,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
prepare_to_wait(&conf->wait_barrier,
&w, TASK_INTERRUPTIBLE);
if (bio_end_sector(bio) <= mddev->suspend_lo ||
- bio->bi_sector >= mddev->suspend_hi)
+ bio->bi_iter.bi_sector >= mddev->suspend_hi)
break;
schedule();
}
@@ -1141,7 +1139,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
r1_bio->sectors = bio_sectors(bio);
r1_bio->state = 0;
r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_sector;
+ r1_bio->sector = bio->bi_iter.bi_sector;
/* We might need to issue multiple reads to different
* devices if there are bad blocks around, so we keep
@@ -1181,12 +1179,13 @@ read_again:
r1_bio->read_disk = rdisk;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(read_bio, r1_bio->sector - bio->bi_sector,
+ bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
max_sectors);
r1_bio->bios[rdisk] = read_bio;
- read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
+ read_bio->bi_iter.bi_sector = r1_bio->sector +
+ mirror->rdev->data_offset;
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid1_end_read_request;
read_bio->bi_rw = READ | do_sync;
@@ -1198,7 +1197,7 @@ read_again:
*/
sectors_handled = (r1_bio->sector + max_sectors
- - bio->bi_sector);
+ - bio->bi_iter.bi_sector);
r1_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
if (bio->bi_phys_segments == 0)
@@ -1219,7 +1218,8 @@ read_again:
r1_bio->sectors = bio_sectors(bio) - sectors_handled;
r1_bio->state = 0;
r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_sector + sectors_handled;
+ r1_bio->sector = bio->bi_iter.bi_sector +
+ sectors_handled;
goto read_again;
} else
generic_make_request(read_bio);
@@ -1322,7 +1322,7 @@ read_again:
if (r1_bio->bios[j])
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
r1_bio->state = 0;
- allow_barrier(conf, start_next_window, bio->bi_sector);
+ allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
start_next_window = wait_barrier(conf, bio);
/*
@@ -1349,7 +1349,7 @@ read_again:
bio->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock);
}
- sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
+ sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
atomic_set(&r1_bio->remaining, 1);
atomic_set(&r1_bio->behind_remaining, 0);
@@ -1361,7 +1361,7 @@ read_again:
continue;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
+ bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
if (first_clone) {
/* do behind I/O ?
@@ -1395,7 +1395,7 @@ read_again:
r1_bio->bios[i] = mbio;
- mbio->bi_sector = (r1_bio->sector +
+ mbio->bi_iter.bi_sector = (r1_bio->sector +
conf->mirrors[i].rdev->data_offset);
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
@@ -1435,7 +1435,7 @@ read_again:
r1_bio->sectors = bio_sectors(bio) - sectors_handled;
r1_bio->state = 0;
r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_sector + sectors_handled;
+ r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
goto retry_write;
}
@@ -1953,20 +1953,24 @@ static int process_checks(struct r1bio *r1_bio)
for (i = 0; i < conf->raid_disks * 2; i++) {
int j;
int size;
+ int uptodate;
struct bio *b = r1_bio->bios[i];
if (b->bi_end_io != end_sync_read)
continue;
- /* fixup the bio for reuse */
+ /* fixup the bio for reuse, but preserve BIO_UPTODATE */
+ uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
bio_reset(b);
+ if (!uptodate)
+ clear_bit(BIO_UPTODATE, &b->bi_flags);
b->bi_vcnt = vcnt;
- b->bi_size = r1_bio->sectors << 9;
- b->bi_sector = r1_bio->sector +
+ b->bi_iter.bi_size = r1_bio->sectors << 9;
+ b->bi_iter.bi_sector = r1_bio->sector +
conf->mirrors[i].rdev->data_offset;
b->bi_bdev = conf->mirrors[i].rdev->bdev;
b->bi_end_io = end_sync_read;
b->bi_private = r1_bio;
- size = b->bi_size;
+ size = b->bi_iter.bi_size;
for (j = 0; j < vcnt ; j++) {
struct bio_vec *bi;
bi = &b->bi_io_vec[j];
@@ -1990,11 +1994,14 @@ static int process_checks(struct r1bio *r1_bio)
int j;
struct bio *pbio = r1_bio->bios[primary];
struct bio *sbio = r1_bio->bios[i];
+ int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
if (sbio->bi_end_io != end_sync_read)
continue;
+ /* Now we can 'fixup' the BIO_UPTODATE flag */
+ set_bit(BIO_UPTODATE, &sbio->bi_flags);
- if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
+ if (uptodate) {
for (j = vcnt; j-- ; ) {
struct page *p, *s;
p = pbio->bi_io_vec[j].bv_page;
@@ -2009,7 +2016,7 @@ static int process_checks(struct r1bio *r1_bio)
if (j >= 0)
atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
- && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
+ && uptodate)) {
/* No need to write to this device. */
sbio->bi_end_io = NULL;
rdev_dec_pending(conf->mirrors[i].rdev, mddev);
@@ -2221,11 +2228,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
}
wbio->bi_rw = WRITE;
- wbio->bi_sector = r1_bio->sector;
- wbio->bi_size = r1_bio->sectors << 9;
+ wbio->bi_iter.bi_sector = r1_bio->sector;
+ wbio->bi_iter.bi_size = r1_bio->sectors << 9;
bio_trim(wbio, sector - r1_bio->sector, sectors);
- wbio->bi_sector += rdev->data_offset;
+ wbio->bi_iter.bi_sector += rdev->data_offset;
wbio->bi_bdev = rdev->bdev;
if (submit_bio_wait(WRITE, wbio) == 0)
/* failure! */
@@ -2339,7 +2346,8 @@ read_more:
}
r1_bio->read_disk = disk;
bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
- bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors);
+ bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
+ max_sectors);
r1_bio->bios[r1_bio->read_disk] = bio;
rdev = conf->mirrors[disk].rdev;
printk_ratelimited(KERN_ERR
@@ -2348,7 +2356,7 @@ read_more:
mdname(mddev),
(unsigned long long)r1_bio->sector,
bdevname(rdev->bdev, b));
- bio->bi_sector = r1_bio->sector + rdev->data_offset;
+ bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
bio->bi_rw = READ | do_sync;
@@ -2357,7 +2365,7 @@ read_more:
/* Drat - have to split this up more */
struct bio *mbio = r1_bio->master_bio;
int sectors_handled = (r1_bio->sector + max_sectors
- - mbio->bi_sector);
+ - mbio->bi_iter.bi_sector);
r1_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
if (mbio->bi_phys_segments == 0)
@@ -2375,7 +2383,8 @@ read_more:
r1_bio->state = 0;
set_bit(R1BIO_ReadError, &r1_bio->state);
r1_bio->mddev = mddev;
- r1_bio->sector = mbio->bi_sector + sectors_handled;
+ r1_bio->sector = mbio->bi_iter.bi_sector +
+ sectors_handled;
goto read_more;
} else
@@ -2599,7 +2608,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
}
if (bio->bi_end_io) {
atomic_inc(&rdev->nr_pending);
- bio->bi_sector = sector_nr + rdev->data_offset;
+ bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_private = r1_bio;
}
@@ -2699,7 +2708,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
continue;
/* remove last page from this bio */
bio->bi_vcnt--;
- bio->bi_size -= len;
+ bio->bi_iter.bi_size -= len;
bio->bi_flags &= ~(1<< BIO_SEG_VALID);
}
goto bio_full;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c504e8389e69..33fc408e5eac 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1152,14 +1152,12 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
kfree(plug);
}
-static void make_request(struct mddev *mddev, struct bio * bio)
+static void __make_request(struct mddev *mddev, struct bio *bio)
{
struct r10conf *conf = mddev->private;
struct r10bio *r10_bio;
struct bio *read_bio;
int i;
- sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
- int chunk_sects = chunk_mask + 1;
const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
@@ -1174,88 +1172,27 @@ static void make_request(struct mddev *mddev, struct bio * bio)
int max_sectors;
int sectors;
- if (unlikely(bio->bi_rw & REQ_FLUSH)) {
- md_flush_request(mddev, bio);
- return;
- }
-
- /* If this request crosses a chunk boundary, we need to
- * split it. This will only happen for 1 PAGE (or less) requests.
- */
- if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
- > chunk_sects
- && (conf->geo.near_copies < conf->geo.raid_disks
- || conf->prev.near_copies < conf->prev.raid_disks))) {
- struct bio_pair *bp;
- /* Sanity check -- queue functions should prevent this happening */
- if (bio_segments(bio) > 1)
- goto bad_map;
- /* This is a one page bio that upper layers
- * refuse to split for us, so we need to split it.
- */
- bp = bio_split(bio,
- chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
-
- /* Each of these 'make_request' calls will call 'wait_barrier'.
- * If the first succeeds but the second blocks due to the resync
- * thread raising the barrier, we will deadlock because the
- * IO to the underlying device will be queued in generic_make_request
- * and will never complete, so will never reduce nr_pending.
- * So increment nr_waiting here so no new raise_barriers will
- * succeed, and so the second wait_barrier cannot block.
- */
- spin_lock_irq(&conf->resync_lock);
- conf->nr_waiting++;
- spin_unlock_irq(&conf->resync_lock);
-
- make_request(mddev, &bp->bio1);
- make_request(mddev, &bp->bio2);
-
- spin_lock_irq(&conf->resync_lock);
- conf->nr_waiting--;
- wake_up(&conf->wait_barrier);
- spin_unlock_irq(&conf->resync_lock);
-
- bio_pair_release(bp);
- return;
- bad_map:
- printk("md/raid10:%s: make_request bug: can't convert block across chunks"
- " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
- (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
-
- bio_io_error(bio);
- return;
- }
-
- md_write_start(mddev, bio);
-
- /*
- * Register the new request and wait if the reconstruction
- * thread has put up a bar for new requests.
- * Continue immediately if no resync is active currently.
- */
- wait_barrier(conf);
-
sectors = bio_sectors(bio);
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- bio->bi_sector < conf->reshape_progress &&
- bio->bi_sector + sectors > conf->reshape_progress) {
+ bio->bi_iter.bi_sector < conf->reshape_progress &&
+ bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
/* IO spans the reshape position. Need to wait for
* reshape to pass
*/
allow_barrier(conf);
wait_event(conf->wait_barrier,
- conf->reshape_progress <= bio->bi_sector ||
- conf->reshape_progress >= bio->bi_sector + sectors);
+ conf->reshape_progress <= bio->bi_iter.bi_sector ||
+ conf->reshape_progress >= bio->bi_iter.bi_sector +
+ sectors);
wait_barrier(conf);
}
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
bio_data_dir(bio) == WRITE &&
(mddev->reshape_backwards
- ? (bio->bi_sector < conf->reshape_safe &&
- bio->bi_sector + sectors > conf->reshape_progress)
- : (bio->bi_sector + sectors > conf->reshape_safe &&
- bio->bi_sector < conf->reshape_progress))) {
+ ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
+ bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
+ : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
+ bio->bi_iter.bi_sector < conf->reshape_progress))) {
/* Need to update reshape_position in metadata */
mddev->reshape_position = conf->reshape_progress;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -1273,7 +1210,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
r10_bio->sectors = sectors;
r10_bio->mddev = mddev;
- r10_bio->sector = bio->bi_sector;
+ r10_bio->sector = bio->bi_iter.bi_sector;
r10_bio->state = 0;
/* We might need to issue multiple reads to different
@@ -1302,13 +1239,13 @@ read_again:
slot = r10_bio->read_slot;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(read_bio, r10_bio->sector - bio->bi_sector,
+ bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
max_sectors);
r10_bio->devs[slot].bio = read_bio;
r10_bio->devs[slot].rdev = rdev;
- read_bio->bi_sector = r10_bio->devs[slot].addr +
+ read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
choose_data_offset(r10_bio, rdev);
read_bio->bi_bdev = rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
@@ -1319,15 +1256,15 @@ read_again:
/* Could not read all from this device, so we will
* need another r10_bio.
*/
- sectors_handled = (r10_bio->sectors + max_sectors
- - bio->bi_sector);
+ sectors_handled = (r10_bio->sector + max_sectors
+ - bio->bi_iter.bi_sector);
r10_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
if (bio->bi_phys_segments == 0)
bio->bi_phys_segments = 2;
else
bio->bi_phys_segments++;
- spin_unlock(&conf->device_lock);
+ spin_unlock_irq(&conf->device_lock);
/* Cannot call generic_make_request directly
* as that will be queued in __generic_make_request
* and subsequent mempool_alloc might block
@@ -1341,7 +1278,8 @@ read_again:
r10_bio->sectors = bio_sectors(bio) - sectors_handled;
r10_bio->state = 0;
r10_bio->mddev = mddev;
- r10_bio->sector = bio->bi_sector + sectors_handled;
+ r10_bio->sector = bio->bi_iter.bi_sector +
+ sectors_handled;
goto read_again;
} else
generic_make_request(read_bio);
@@ -1499,7 +1437,8 @@ retry_write:
bio->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock);
}
- sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
+ sectors_handled = r10_bio->sector + max_sectors -
+ bio->bi_iter.bi_sector;
atomic_set(&r10_bio->remaining, 1);
bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
@@ -1510,11 +1449,11 @@ retry_write:
if (r10_bio->devs[i].bio) {
struct md_rdev *rdev = conf->mirrors[d].rdev;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+ bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
max_sectors);
r10_bio->devs[i].bio = mbio;
- mbio->bi_sector = (r10_bio->devs[i].addr+
+ mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
choose_data_offset(r10_bio,
rdev));
mbio->bi_bdev = rdev->bdev;
@@ -1553,11 +1492,11 @@ retry_write:
rdev = conf->mirrors[d].rdev;
}
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+ bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
max_sectors);
r10_bio->devs[i].repl_bio = mbio;
- mbio->bi_sector = (r10_bio->devs[i].addr +
+ mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
choose_data_offset(
r10_bio, rdev));
mbio->bi_bdev = rdev->bdev;
@@ -1591,11 +1530,57 @@ retry_write:
r10_bio->sectors = bio_sectors(bio) - sectors_handled;
r10_bio->mddev = mddev;
- r10_bio->sector = bio->bi_sector + sectors_handled;
+ r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
r10_bio->state = 0;
goto retry_write;
}
one_write_done(r10_bio);
+}
+
+static void make_request(struct mddev *mddev, struct bio *bio)
+{
+ struct r10conf *conf = mddev->private;
+ sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
+ int chunk_sects = chunk_mask + 1;
+
+ struct bio *split;
+
+ if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+ md_flush_request(mddev, bio);
+ return;
+ }
+
+ md_write_start(mddev, bio);
+
+ /*
+ * Register the new request and wait if the reconstruction
+ * thread has put up a bar for new requests.
+ * Continue immediately if no resync is active currently.
+ */
+ wait_barrier(conf);
+
+ do {
+
+ /*
+ * If this request crosses a chunk boundary, we need to split
+ * it.
+ */
+ if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
+ bio_sectors(bio) > chunk_sects
+ && (conf->geo.near_copies < conf->geo.raid_disks
+ || conf->prev.near_copies <
+ conf->prev.raid_disks))) {
+ split = bio_split(bio, chunk_sects -
+ (bio->bi_iter.bi_sector &
+ (chunk_sects - 1)),
+ GFP_NOIO, fs_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
+
+ __make_request(mddev, split);
+ } while (split != bio);
/* In case raid10d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
@@ -2124,10 +2109,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
bio_reset(tbio);
tbio->bi_vcnt = vcnt;
- tbio->bi_size = r10_bio->sectors << 9;
+ tbio->bi_iter.bi_size = r10_bio->sectors << 9;
tbio->bi_rw = WRITE;
tbio->bi_private = r10_bio;
- tbio->bi_sector = r10_bio->devs[i].addr;
+ tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
for (j=0; j < vcnt ; j++) {
tbio->bi_io_vec[j].bv_offset = 0;
@@ -2144,7 +2129,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
- tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
+ tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
generic_make_request(tbio);
}
@@ -2614,8 +2599,8 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
sectors = sect_to_write;
/* Write at 'sector' for 'sectors' */
wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(wbio, sector - bio->bi_sector, sectors);
- wbio->bi_sector = (r10_bio->devs[i].addr+
+ bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
+ wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
choose_data_offset(r10_bio, rdev) +
(sector - r10_bio->sector));
wbio->bi_bdev = rdev->bdev;
@@ -2687,10 +2672,10 @@ read_more:
(unsigned long long)r10_bio->sector);
bio = bio_clone_mddev(r10_bio->master_bio,
GFP_NOIO, mddev);
- bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors);
+ bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
r10_bio->devs[slot].bio = bio;
r10_bio->devs[slot].rdev = rdev;
- bio->bi_sector = r10_bio->devs[slot].addr
+ bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
+ choose_data_offset(r10_bio, rdev);
bio->bi_bdev = rdev->bdev;
bio->bi_rw = READ | do_sync;
@@ -2701,7 +2686,7 @@ read_more:
struct bio *mbio = r10_bio->master_bio;
int sectors_handled =
r10_bio->sector + max_sectors
- - mbio->bi_sector;
+ - mbio->bi_iter.bi_sector;
r10_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
if (mbio->bi_phys_segments == 0)
@@ -2719,7 +2704,7 @@ read_more:
set_bit(R10BIO_ReadError,
&r10_bio->state);
r10_bio->mddev = mddev;
- r10_bio->sector = mbio->bi_sector
+ r10_bio->sector = mbio->bi_iter.bi_sector
+ sectors_handled;
goto read_more;
@@ -3157,7 +3142,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_end_io = end_sync_read;
bio->bi_rw = READ;
from_addr = r10_bio->devs[j].addr;
- bio->bi_sector = from_addr + rdev->data_offset;
+ bio->bi_iter.bi_sector = from_addr +
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
atomic_inc(&rdev->nr_pending);
/* and we write to 'i' (if not in_sync) */
@@ -3181,7 +3167,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
bio->bi_rw = WRITE;
- bio->bi_sector = to_addr
+ bio->bi_iter.bi_sector = to_addr
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
atomic_inc(&r10_bio->remaining);
@@ -3210,7 +3196,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
bio->bi_rw = WRITE;
- bio->bi_sector = to_addr + rdev->data_offset;
+ bio->bi_iter.bi_sector = to_addr +
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
atomic_inc(&r10_bio->remaining);
break;
@@ -3218,10 +3205,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (j == conf->copies) {
/* Cannot recover, so abort the recovery or
* record a bad block */
- put_buf(r10_bio);
- if (rb2)
- atomic_dec(&rb2->remaining);
- r10_bio = rb2;
if (any_working) {
/* problem is that there are bad blocks
* on other device(s)
@@ -3253,6 +3236,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
mirror->recovery_disabled
= mddev->recovery_disabled;
}
+ put_buf(r10_bio);
+ if (rb2)
+ atomic_dec(&rb2->remaining);
+ r10_bio = rb2;
break;
}
}
@@ -3328,7 +3315,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
bio->bi_rw = READ;
- bio->bi_sector = sector +
+ bio->bi_iter.bi_sector = sector +
conf->mirrors[d].rdev->data_offset;
bio->bi_bdev = conf->mirrors[d].rdev->bdev;
count++;
@@ -3350,7 +3337,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
bio->bi_rw = WRITE;
- bio->bi_sector = sector +
+ bio->bi_iter.bi_sector = sector +
conf->mirrors[d].replacement->data_offset;
bio->bi_bdev = conf->mirrors[d].replacement->bdev;
count++;
@@ -3397,7 +3384,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio2 = bio2->bi_next) {
/* remove last page from this bio */
bio2->bi_vcnt--;
- bio2->bi_size -= len;
+ bio2->bi_iter.bi_size -= len;
bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
}
goto bio_full;
@@ -3747,7 +3734,8 @@ static int run(struct mddev *mddev)
!test_bit(In_sync, &disk->rdev->flags)) {
disk->head_position = 0;
mddev->degraded++;
- if (disk->rdev)
+ if (disk->rdev &&
+ disk->rdev->saved_raid_disk < 0)
conf->fullsync = 1;
}
disk->recovery_disabled = mddev->recovery_disabled - 1;
@@ -4417,7 +4405,7 @@ read_more:
read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
read_bio->bi_bdev = rdev->bdev;
- read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+ read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset);
read_bio->bi_private = r10_bio;
read_bio->bi_end_io = end_sync_read;
@@ -4425,7 +4413,7 @@ read_more:
read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
read_bio->bi_flags |= 1 << BIO_UPTODATE;
read_bio->bi_vcnt = 0;
- read_bio->bi_size = 0;
+ read_bio->bi_iter.bi_size = 0;
r10_bio->master_bio = read_bio;
r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
@@ -4451,7 +4439,8 @@ read_more:
bio_reset(b);
b->bi_bdev = rdev2->bdev;
- b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
+ b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
+ rdev2->new_data_offset;
b->bi_private = r10_bio;
b->bi_end_io = end_reshape_write;
b->bi_rw = WRITE;
@@ -4478,7 +4467,7 @@ read_more:
bio2 = bio2->bi_next) {
/* Remove last page from this bio */
bio2->bi_vcnt--;
- bio2->bi_size -= len;
+ bio2->bi_iter.bi_size -= len;
bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
}
goto bio_full;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index cc055da02e2a..16f5c21963db 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -133,7 +133,7 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
{
int sectors = bio_sectors(bio);
- if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
+ if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
return bio->bi_next;
else
return NULL;
@@ -225,7 +225,7 @@ static void return_io(struct bio *return_bi)
return_bi = bi->bi_next;
bi->bi_next = NULL;
- bi->bi_size = 0;
+ bi->bi_iter.bi_size = 0;
trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
bi, 0);
bio_endio(bi, 0);
@@ -675,8 +675,10 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
|| !conf->inactive_blocked),
*(conf->hash_locks + hash));
conf->inactive_blocked = 0;
- } else
+ } else {
init_stripe(sh, sector, previous);
+ atomic_inc(&sh->count);
+ }
} else {
spin_lock(&conf->device_lock);
if (atomic_read(&sh->count)) {
@@ -687,20 +689,19 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
} else {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
- BUG_ON(list_empty(&sh->lru));
+ BUG_ON(list_empty(&sh->lru) &&
+ !test_bit(STRIPE_EXPANDING, &sh->state));
list_del_init(&sh->lru);
if (sh->group) {
sh->group->stripes_cnt--;
sh->group = NULL;
}
}
+ atomic_inc(&sh->count);
spin_unlock(&conf->device_lock);
}
} while (sh == NULL);
- if (sh)
- atomic_inc(&sh->count);
-
spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
@@ -851,10 +852,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_rw, i);
atomic_inc(&sh->count);
if (use_new_offset(conf, sh))
- bi->bi_sector = (sh->sector
+ bi->bi_iter.bi_sector = (sh->sector
+ rdev->new_data_offset);
else
- bi->bi_sector = (sh->sector
+ bi->bi_iter.bi_sector = (sh->sector
+ rdev->data_offset);
if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
bi->bi_rw |= REQ_NOMERGE;
@@ -862,7 +863,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_vcnt = 1;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
bi->bi_io_vec[0].bv_offset = 0;
- bi->bi_size = STRIPE_SIZE;
+ bi->bi_iter.bi_size = STRIPE_SIZE;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
@@ -898,15 +899,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
rbi->bi_rw, i);
atomic_inc(&sh->count);
if (use_new_offset(conf, sh))
- rbi->bi_sector = (sh->sector
+ rbi->bi_iter.bi_sector = (sh->sector
+ rrdev->new_data_offset);
else
- rbi->bi_sector = (sh->sector
+ rbi->bi_iter.bi_sector = (sh->sector
+ rrdev->data_offset);
rbi->bi_vcnt = 1;
rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
rbi->bi_io_vec[0].bv_offset = 0;
- rbi->bi_size = STRIPE_SIZE;
+ rbi->bi_iter.bi_size = STRIPE_SIZE;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
@@ -934,24 +935,24 @@ static struct dma_async_tx_descriptor *
async_copy_data(int frombio, struct bio *bio, struct page *page,
sector_t sector, struct dma_async_tx_descriptor *tx)
{
- struct bio_vec *bvl;
+ struct bio_vec bvl;
+ struct bvec_iter iter;
struct page *bio_page;
- int i;
int page_offset;
struct async_submit_ctl submit;
enum async_tx_flags flags = 0;
- if (bio->bi_sector >= sector)
- page_offset = (signed)(bio->bi_sector - sector) * 512;
+ if (bio->bi_iter.bi_sector >= sector)
+ page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
else
- page_offset = (signed)(sector - bio->bi_sector) * -512;
+ page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
if (frombio)
flags |= ASYNC_TX_FENCE;
init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
- bio_for_each_segment(bvl, bio, i) {
- int len = bvl->bv_len;
+ bio_for_each_segment(bvl, bio, iter) {
+ int len = bvl.bv_len;
int clen;
int b_offset = 0;
@@ -967,8 +968,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
clen = len;
if (clen > 0) {
- b_offset += bvl->bv_offset;
- bio_page = bvl->bv_page;
+ b_offset += bvl.bv_offset;
+ bio_page = bvl.bv_page;
if (frombio)
tx = async_memcpy(page, bio_page, page_offset,
b_offset, clen, &submit);
@@ -1011,7 +1012,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
BUG_ON(!dev->read);
rbi = dev->read;
dev->read = NULL;
- while (rbi && rbi->bi_sector <
+ while (rbi && rbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
rbi2 = r5_next_bio(rbi, dev->sector);
if (!raid5_dec_bi_active_stripes(rbi)) {
@@ -1047,7 +1048,7 @@ static void ops_run_biofill(struct stripe_head *sh)
dev->read = rbi = dev->toread;
dev->toread = NULL;
spin_unlock_irq(&sh->stripe_lock);
- while (rbi && rbi->bi_sector <
+ while (rbi && rbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
tx = async_copy_data(0, rbi, dev->page,
dev->sector, tx);
@@ -1389,7 +1390,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
wbi = dev->written = chosen;
spin_unlock_irq(&sh->stripe_lock);
- while (wbi && wbi->bi_sector <
+ while (wbi && wbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
if (wbi->bi_rw & REQ_FUA)
set_bit(R5_WantFUA, &dev->flags);
@@ -2110,6 +2111,7 @@ static void raid5_end_write_request(struct bio *bi, int error)
set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
} else {
if (!uptodate) {
+ set_bit(STRIPE_DEGRADED, &sh->state);
set_bit(WriteErrorSeen, &rdev->flags);
set_bit(R5_WriteError, &sh->dev[i].flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
@@ -2613,7 +2615,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
int firstwrite=0;
pr_debug("adding bi b#%llu to stripe s#%llu\n",
- (unsigned long long)bi->bi_sector,
+ (unsigned long long)bi->bi_iter.bi_sector,
(unsigned long long)sh->sector);
/*
@@ -2631,12 +2633,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
firstwrite = 1;
} else
bip = &sh->dev[dd_idx].toread;
- while (*bip && (*bip)->bi_sector < bi->bi_sector) {
- if (bio_end_sector(*bip) > bi->bi_sector)
+ while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
+ if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
goto overlap;
bip = & (*bip)->bi_next;
}
- if (*bip && (*bip)->bi_sector < bio_end_sector(bi))
+ if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
goto overlap;
BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2650,7 +2652,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
sector_t sector = sh->dev[dd_idx].sector;
for (bi=sh->dev[dd_idx].towrite;
sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
- bi && bi->bi_sector <= sector;
+ bi && bi->bi_iter.bi_sector <= sector;
bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
if (bio_end_sector(bi) >= sector)
sector = bio_end_sector(bi);
@@ -2660,7 +2662,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
}
pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
- (unsigned long long)(*bip)->bi_sector,
+ (unsigned long long)(*bip)->bi_iter.bi_sector,
(unsigned long long)sh->sector, dd_idx);
spin_unlock_irq(&sh->stripe_lock);
@@ -2735,7 +2737,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
- while (bi && bi->bi_sector <
+ while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2754,7 +2756,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
bi = sh->dev[i].written;
sh->dev[i].written = NULL;
if (bi) bitmap_end = 1;
- while (bi && bi->bi_sector <
+ while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2778,7 +2780,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
spin_unlock_irq(&sh->stripe_lock);
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
- while (bi && bi->bi_sector <
+ while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi =
r5_next_bio(bi, sh->dev[i].sector);
@@ -3002,7 +3004,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
clear_bit(R5_UPTODATE, &dev->flags);
wbi = dev->written;
dev->written = NULL;
- while (wbi && wbi->bi_sector <
+ while (wbi && wbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
wbi2 = r5_next_bio(wbi, dev->sector);
if (!raid5_dec_bi_active_stripes(wbi)) {
@@ -3608,7 +3610,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
*/
set_bit(R5_Insync, &dev->flags);
- if (rdev && test_bit(R5_WriteError, &dev->flags)) {
+ if (test_bit(R5_WriteError, &dev->flags)) {
/* This flag does not apply to '.replacement'
* only to .rdev, so make sure to check that*/
struct md_rdev *rdev2 = rcu_dereference(
@@ -3621,7 +3623,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
} else
clear_bit(R5_WriteError, &dev->flags);
}
- if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
+ if (test_bit(R5_MadeGood, &dev->flags)) {
/* This flag does not apply to '.replacement'
* only to .rdev, so make sure to check that*/
struct md_rdev *rdev2 = rcu_dereference(
@@ -4094,7 +4096,7 @@ static int raid5_mergeable_bvec(struct request_queue *q,
static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
{
- sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
+ sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bio_sectors(bio);
@@ -4231,9 +4233,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
/*
* compute position
*/
- align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
- 0,
- &dd_idx, NULL);
+ align_bi->bi_iter.bi_sector =
+ raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
+ 0, &dd_idx, NULL);
end_sector = bio_end_sector(align_bi);
rcu_read_lock();
@@ -4258,7 +4260,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
if (!bio_fits_rdev(align_bi) ||
- is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
+ is_badblock(rdev, align_bi->bi_iter.bi_sector,
+ bio_sectors(align_bi),
&first_bad, &bad_sectors)) {
/* too big in some way, or has a known bad block */
bio_put(align_bi);
@@ -4267,7 +4270,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
}
/* No reshape active, so we can trust rdev->data_offset */
- align_bi->bi_sector += rdev->data_offset;
+ align_bi->bi_iter.bi_sector += rdev->data_offset;
spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_stripe,
@@ -4279,7 +4282,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
align_bi, disk_devt(mddev->gendisk),
- raid_bio->bi_sector);
+ raid_bio->bi_iter.bi_sector);
generic_make_request(align_bi);
return 1;
} else {
@@ -4462,8 +4465,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
/* Skip discard while reshape is happening */
return;
- logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
- last_sector = bi->bi_sector + (bi->bi_size>>9);
+ logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+ last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -4567,7 +4570,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
return;
}
- logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+ logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
last_sector = bio_end_sector(bi);
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -5051,7 +5054,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
int remaining;
int handled = 0;
- logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+ logical_sector = raid_bio->bi_iter.bi_sector &
+ ~((sector_t)STRIPE_SECTORS-1);
sector = raid5_compute_sector(conf, logical_sector,
0, &dd_idx, NULL);
last_sector = bio_end_sector(raid_bio);
@@ -5510,23 +5514,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
return sectors * (raid_disks - conf->max_degraded);
}
+static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+ safe_put_page(percpu->spare_page);
+ kfree(percpu->scribble);
+ percpu->spare_page = NULL;
+ percpu->scribble = NULL;
+}
+
+static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+ if (conf->level == 6 && !percpu->spare_page)
+ percpu->spare_page = alloc_page(GFP_KERNEL);
+ if (!percpu->scribble)
+ percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
+
+ if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
+ free_scratch_buffer(conf, percpu);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static void raid5_free_percpu(struct r5conf *conf)
{
- struct raid5_percpu *percpu;
unsigned long cpu;
if (!conf->percpu)
return;
- get_online_cpus();
- for_each_possible_cpu(cpu) {
- percpu = per_cpu_ptr(conf->percpu, cpu);
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
- }
#ifdef CONFIG_HOTPLUG_CPU
unregister_cpu_notifier(&conf->cpu_notify);
#endif
+
+ get_online_cpus();
+ for_each_possible_cpu(cpu)
+ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
put_online_cpus();
free_percpu(conf->percpu);
@@ -5553,15 +5577,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- if (conf->level == 6 && !percpu->spare_page)
- percpu->spare_page = alloc_page(GFP_KERNEL);
- if (!percpu->scribble)
- percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
-
- if (!percpu->scribble ||
- (conf->level == 6 && !percpu->spare_page)) {
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
+ if (alloc_scratch_buffer(conf, percpu)) {
pr_err("%s: failed memory allocation for cpu%ld\n",
__func__, cpu);
return notifier_from_errno(-ENOMEM);
@@ -5569,10 +5585,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
- percpu->spare_page = NULL;
- percpu->scribble = NULL;
+ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
break;
default:
break;
@@ -5584,40 +5597,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
static int raid5_alloc_percpu(struct r5conf *conf)
{
unsigned long cpu;
- struct page *spare_page;
- struct raid5_percpu __percpu *allcpus;
- void *scribble;
- int err;
+ int err = 0;
- allcpus = alloc_percpu(struct raid5_percpu);
- if (!allcpus)
+ conf->percpu = alloc_percpu(struct raid5_percpu);
+ if (!conf->percpu)
return -ENOMEM;
- conf->percpu = allcpus;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ conf->cpu_notify.notifier_call = raid456_cpu_notify;
+ conf->cpu_notify.priority = 0;
+ err = register_cpu_notifier(&conf->cpu_notify);
+ if (err)
+ return err;
+#endif
get_online_cpus();
- err = 0;
for_each_present_cpu(cpu) {
- if (conf->level == 6) {
- spare_page = alloc_page(GFP_KERNEL);
- if (!spare_page) {
- err = -ENOMEM;
- break;
- }
- per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
- }
- scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
- if (!scribble) {
- err = -ENOMEM;
+ err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
+ if (err) {
+ pr_err("%s: failed memory allocation for cpu%ld\n",
+ __func__, cpu);
break;
}
- per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
}
-#ifdef CONFIG_HOTPLUG_CPU
- conf->cpu_notify.notifier_call = raid456_cpu_notify;
- conf->cpu_notify.priority = 0;
- if (err == 0)
- err = register_cpu_notifier(&conf->cpu_notify);
-#endif
put_online_cpus();
return err;
@@ -6099,6 +6101,7 @@ static int run(struct mddev *mddev)
blk_queue_io_min(mddev->queue, chunk_size);
blk_queue_io_opt(mddev->queue, chunk_size *
(conf->raid_disks - conf->max_degraded));
+ mddev->queue->limits.raid_partial_stripes_expensive = 1;
/*
* We can only discard a whole stripe. It doesn't make sense to
* discard data disk but write parity disk
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 8270388e2a0d..1d0758aeb8e4 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -172,6 +172,9 @@ comment "Media ancillary drivers (tuners, sensors, i2c, frontends)"
config MEDIA_SUBDRV_AUTOSELECT
bool "Autoselect ancillary drivers (tuners, sensors, i2c, frontends)"
depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_CAMERA_SUPPORT
+ depends on HAS_IOMEM
+ select I2C
+ select I2C_MUX
default y
help
By default, a media driver auto-selects all possible ancillary
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index 419a2d6b4349..f19a2ccd1e4b 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -239,6 +239,7 @@
#define USB_PID_AVERMEDIA_A835B_4835 0x4835
#define USB_PID_AVERMEDIA_1867 0x1867
#define USB_PID_AVERMEDIA_A867 0xa867
+#define USB_PID_AVERMEDIA_H335 0x0335
#define USB_PID_AVERMEDIA_TWINSTAR 0x0825
#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009
@@ -317,6 +318,7 @@
#define USB_PID_WINFAST_DTV_DONGLE_H 0x60f6
#define USB_PID_WINFAST_DTV_DONGLE_STK7700P_2 0x6f01
#define USB_PID_WINFAST_DTV_DONGLE_GOLD 0x6029
+#define USB_PID_WINFAST_DTV_DONGLE_MINID 0x6f0f
#define USB_PID_GENPIX_8PSK_REV_1_COLD 0x0200
#define USB_PID_GENPIX_8PSK_REV_1_WARM 0x0201
#define USB_PID_GENPIX_8PSK_REV_2 0x0202
@@ -365,6 +367,7 @@
#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac
#define USB_PID_TECHNISAT_USB2_HDCI_V1 0x0001
#define USB_PID_TECHNISAT_USB2_HDCI_V2 0x0002
+#define USB_PID_TECHNISAT_USB2_CABLESTAR_HDCI 0x0003
#define USB_PID_TECHNISAT_AIRSTAR_TELESTICK_2 0x0004
#define USB_PID_TECHNISAT_USB2_DVB_S2 0x0500
#define USB_PID_CPYTO_REDI_PC50A 0xa803
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index f91c80c0e9ec..8a86b3025637 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -179,7 +179,7 @@ static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
eth = eth_hdr(skb);
if (*eth->h_dest & 1) {
- if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
+ if(ether_addr_equal(eth->h_dest,dev->broadcast))
skb->pkt_type=PACKET_BROADCAST;
else
skb->pkt_type=PACKET_MULTICAST;
@@ -674,11 +674,13 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
if (priv->rx_mode != RX_MODE_PROMISC) {
if (priv->ule_skb->data[0] & 0x01) {
/* multicast or broadcast */
- if (memcmp(priv->ule_skb->data, bc_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(priv->ule_skb->data, bc_addr)) {
/* multicast */
if (priv->rx_mode == RX_MODE_MULTI) {
int i;
- for(i = 0; i < priv->multi_num && memcmp(priv->ule_skb->data, priv->multi_macs[i], ETH_ALEN); i++)
+ for(i = 0; i < priv->multi_num &&
+ !ether_addr_equal(priv->ule_skb->data,
+ priv->multi_macs[i]); i++)
;
if (i == priv->multi_num)
drop = 1;
@@ -688,7 +690,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
}
/* else: broadcast */
}
- else if (memcmp(priv->ule_skb->data, dev->dev_addr, ETH_ALEN))
+ else if (!ether_addr_equal(priv->ule_skb->data, dev->dev_addr))
drop = 1;
/* else: destination address matches the MAC address of our receiver device */
}
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index bddbab43a2df..dd12a1ebda82 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -35,6 +35,13 @@ config DVB_STV6110x
help
A Silicon tuner that supports DVB-S and DVB-S2 modes
+config DVB_M88DS3103
+ tristate "Montage M88DS3103"
+ depends on DVB_CORE && I2C && I2C_MUX
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y when you want to support this frontend.
+
comment "Multistandard (cable + terrestrial) frontends"
depends on DVB_CORE
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index f9cb43d9aed9..0c75a6aafb9d 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -85,6 +85,7 @@ obj-$(CONFIG_DVB_STV6110) += stv6110.o
obj-$(CONFIG_DVB_STV0900) += stv0900.o
obj-$(CONFIG_DVB_STV090x) += stv090x.o
obj-$(CONFIG_DVB_STV6110x) += stv6110x.o
+obj-$(CONFIG_DVB_M88DS3103) += m88ds3103.o
obj-$(CONFIG_DVB_ISL6423) += isl6423.o
obj-$(CONFIG_DVB_EC100) += ec100.o
obj-$(CONFIG_DVB_HD29L2) += hd29l2.o
diff --git a/drivers/media/dvb-frontends/a8293.c b/drivers/media/dvb-frontends/a8293.c
index 74fbb5d58bed..780da58132f1 100644
--- a/drivers/media/dvb-frontends/a8293.c
+++ b/drivers/media/dvb-frontends/a8293.c
@@ -96,6 +96,8 @@ static int a8293_set_voltage(struct dvb_frontend *fe,
if (ret)
goto err;
+ usleep_range(1500, 50000);
+
return ret;
err:
dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
index 476b422ccf19..a6c3c9e2e897 100644
--- a/drivers/media/dvb-frontends/cx24117.c
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -135,15 +135,33 @@
enum cmds {
- CMD_SET_VCO = 0x10,
- CMD_TUNEREQUEST = 0x11,
- CMD_MPEGCONFIG = 0x13,
- CMD_TUNERINIT = 0x14,
- CMD_LNBSEND = 0x21, /* Formerly CMD_SEND_DISEQC */
- CMD_LNBDCLEVEL = 0x22,
- CMD_SET_TONE = 0x23,
- CMD_UPDFWVERS = 0x35,
- CMD_TUNERSLEEP = 0x36,
+ CMD_SET_VCOFREQ = 0x10,
+ CMD_TUNEREQUEST = 0x11,
+ CMD_GLOBAL_MPEGCFG = 0x13,
+ CMD_MPEGCFG = 0x14,
+ CMD_TUNERINIT = 0x15,
+ CMD_GET_SRATE = 0x18,
+ CMD_SET_GOLDCODE = 0x19,
+ CMD_GET_AGCACC = 0x1a,
+ CMD_DEMODINIT = 0x1b,
+ CMD_GETCTLACC = 0x1c,
+
+ CMD_LNBCONFIG = 0x20,
+ CMD_LNBSEND = 0x21,
+ CMD_LNBDCLEVEL = 0x22,
+ CMD_LNBPCBCONFIG = 0x23,
+ CMD_LNBSENDTONEBST = 0x24,
+ CMD_LNBUPDREPLY = 0x25,
+
+ CMD_SET_GPIOMODE = 0x30,
+ CMD_SET_GPIOEN = 0x31,
+ CMD_SET_GPIODIR = 0x32,
+ CMD_SET_GPIOOUT = 0x33,
+ CMD_ENABLERSCORR = 0x34,
+ CMD_FWVERSION = 0x35,
+ CMD_SET_SLEEPMODE = 0x36,
+ CMD_BERCTRL = 0x3c,
+ CMD_EVENTCTRL = 0x3d,
};
static LIST_HEAD(hybrid_tuner_instance_list);
@@ -619,8 +637,8 @@ static int cx24117_load_firmware(struct dvb_frontend *fe,
cx24117_writereg(state, 0xf7, 0x0c);
cx24117_writereg(state, 0xe0, 0x00);
- /* CMD 1B */
- cmd.args[0] = 0x1b;
+ /* Init demodulator */
+ cmd.args[0] = CMD_DEMODINIT;
cmd.args[1] = 0x00;
cmd.args[2] = 0x01;
cmd.args[3] = 0x00;
@@ -629,8 +647,8 @@ static int cx24117_load_firmware(struct dvb_frontend *fe,
if (ret != 0)
goto error;
- /* CMD 10 */
- cmd.args[0] = CMD_SET_VCO;
+ /* Set VCO frequency */
+ cmd.args[0] = CMD_SET_VCOFREQ;
cmd.args[1] = 0x06;
cmd.args[2] = 0x2b;
cmd.args[3] = 0xd8;
@@ -648,8 +666,8 @@ static int cx24117_load_firmware(struct dvb_frontend *fe,
if (ret != 0)
goto error;
- /* CMD 15 */
- cmd.args[0] = 0x15;
+ /* Tuner init */
+ cmd.args[0] = CMD_TUNERINIT;
cmd.args[1] = 0x00;
cmd.args[2] = 0x01;
cmd.args[3] = 0x00;
@@ -667,8 +685,8 @@ static int cx24117_load_firmware(struct dvb_frontend *fe,
if (ret != 0)
goto error;
- /* CMD 13 */
- cmd.args[0] = CMD_MPEGCONFIG;
+ /* Global MPEG config */
+ cmd.args[0] = CMD_GLOBAL_MPEGCFG;
cmd.args[1] = 0x00;
cmd.args[2] = 0x00;
cmd.args[3] = 0x00;
@@ -679,9 +697,9 @@ static int cx24117_load_firmware(struct dvb_frontend *fe,
if (ret != 0)
goto error;
- /* CMD 14 */
+ /* MPEG config for each demod */
for (i = 0; i < 2; i++) {
- cmd.args[0] = CMD_TUNERINIT;
+ cmd.args[0] = CMD_MPEGCFG;
cmd.args[1] = (u8) i;
cmd.args[2] = 0x00;
cmd.args[3] = 0x05;
@@ -699,8 +717,8 @@ static int cx24117_load_firmware(struct dvb_frontend *fe,
cx24117_writereg(state, 0xcf, 0x00);
cx24117_writereg(state, 0xe5, 0x04);
- /* Firmware CMD 35: Get firmware version */
- cmd.args[0] = CMD_UPDFWVERS;
+ /* Get firmware version */
+ cmd.args[0] = CMD_FWVERSION;
cmd.len = 2;
for (i = 0; i < 4; i++) {
cmd.args[1] = i;
@@ -779,8 +797,8 @@ static int cx24117_read_signal_strength(struct dvb_frontend *fe,
u8 reg = (state->demod == 0) ?
CX24117_REG_SSTATUS0 : CX24117_REG_SSTATUS1;
- /* Firmware CMD 1A */
- cmd.args[0] = 0x1a;
+ /* Read AGC accumulator register */
+ cmd.args[0] = CMD_GET_AGCACC;
cmd.args[1] = (u8) state->demod;
cmd.len = 2;
ret = cx24117_cmd_execute(fe, &cmd);
@@ -899,22 +917,15 @@ static int cx24117_set_voltage(struct dvb_frontend *fe,
voltage == SEC_VOLTAGE_18 ? "SEC_VOLTAGE_18" :
"SEC_VOLTAGE_OFF");
- /* CMD 32 */
- cmd.args[0] = 0x32;
- cmd.args[1] = reg;
- cmd.args[2] = reg;
+ /* Prepare a set GPIO logic level CMD */
+ cmd.args[0] = CMD_SET_GPIOOUT;
+ cmd.args[2] = reg; /* mask */
cmd.len = 3;
- ret = cx24117_cmd_execute(fe, &cmd);
- if (ret)
- return ret;
if ((voltage == SEC_VOLTAGE_13) ||
(voltage == SEC_VOLTAGE_18)) {
- /* CMD 33 */
- cmd.args[0] = 0x33;
+ /* power on LNB */
cmd.args[1] = reg;
- cmd.args[2] = reg;
- cmd.len = 3;
ret = cx24117_cmd_execute(fe, &cmd);
if (ret != 0)
return ret;
@@ -926,22 +937,22 @@ static int cx24117_set_voltage(struct dvb_frontend *fe,
/* Wait for voltage/min repeat delay */
msleep(100);
- /* CMD 22 - CMD_LNBDCLEVEL */
+ /* Set 13V/18V select pin */
cmd.args[0] = CMD_LNBDCLEVEL;
cmd.args[1] = state->demod ? 0 : 1;
cmd.args[2] = (voltage == SEC_VOLTAGE_18 ? 0x01 : 0x00);
cmd.len = 3;
+ ret = cx24117_cmd_execute(fe, &cmd);
/* Min delay time before DiSEqC send */
msleep(20);
} else {
- cmd.args[0] = 0x33;
+ /* power off LNB */
cmd.args[1] = 0x00;
- cmd.args[2] = reg;
- cmd.len = 3;
+ ret = cx24117_cmd_execute(fe, &cmd);
}
- return cx24117_cmd_execute(fe, &cmd);
+ return ret;
}
static int cx24117_set_tone(struct dvb_frontend *fe,
@@ -968,8 +979,7 @@ static int cx24117_set_tone(struct dvb_frontend *fe,
msleep(20);
/* Set the tone */
- /* CMD 23 - CMD_SET_TONE */
- cmd.args[0] = CMD_SET_TONE;
+ cmd.args[0] = CMD_LNBPCBCONFIG;
cmd.args[1] = (state->demod ? 0 : 1);
cmd.args[2] = 0x00;
cmd.args[3] = 0x00;
@@ -1166,7 +1176,7 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
switch (demod) {
case 0:
- dev_err(&state->priv->i2c->dev,
+ dev_err(&i2c->dev,
"%s: Error attaching frontend %d\n",
KBUILD_MODNAME, demod);
goto error1;
@@ -1190,12 +1200,6 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
state->demod = demod - 1;
state->priv = priv;
- /* test i2c bus for ack */
- if (demod == 0) {
- if (cx24117_readreg(state, 0x00) < 0)
- goto error3;
- }
-
dev_info(&state->priv->i2c->dev,
"%s: Attaching frontend %d\n",
KBUILD_MODNAME, state->demod);
@@ -1206,8 +1210,6 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
state->frontend.demodulator_priv = state;
return &state->frontend;
-error3:
- kfree(state);
error2:
cx24117_release_priv(priv);
error1:
@@ -1231,8 +1233,8 @@ static int cx24117_initfe(struct dvb_frontend *fe)
mutex_lock(&state->priv->fe_lock);
- /* Firmware CMD 36: Power config */
- cmd.args[0] = CMD_TUNERSLEEP;
+ /* Set sleep mode off */
+ cmd.args[0] = CMD_SET_SLEEPMODE;
cmd.args[1] = (state->demod ? 1 : 0);
cmd.args[2] = 0;
cmd.len = 3;
@@ -1244,8 +1246,8 @@ static int cx24117_initfe(struct dvb_frontend *fe)
if (ret != 0)
goto exit;
- /* CMD 3C */
- cmd.args[0] = 0x3c;
+ /* Set BER control */
+ cmd.args[0] = CMD_BERCTRL;
cmd.args[1] = (state->demod ? 1 : 0);
cmd.args[2] = 0x10;
cmd.args[3] = 0x10;
@@ -1254,12 +1256,22 @@ static int cx24117_initfe(struct dvb_frontend *fe)
if (ret != 0)
goto exit;
- /* CMD 34 */
- cmd.args[0] = 0x34;
+ /* Set RS correction (enable/disable) */
+ cmd.args[0] = CMD_ENABLERSCORR;
cmd.args[1] = (state->demod ? 1 : 0);
cmd.args[2] = CX24117_OCC;
cmd.len = 3;
ret = cx24117_cmd_execute_nolock(fe, &cmd);
+ if (ret != 0)
+ goto exit;
+
+ /* Set GPIO direction */
+ /* Set as output - controls LNB power on/off */
+ cmd.args[0] = CMD_SET_GPIODIR;
+ cmd.args[1] = 0x30;
+ cmd.args[2] = 0x30;
+ cmd.len = 3;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
exit:
mutex_unlock(&state->priv->fe_lock);
@@ -1278,8 +1290,8 @@ static int cx24117_sleep(struct dvb_frontend *fe)
dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n",
__func__, state->demod);
- /* Firmware CMD 36: Power config */
- cmd.args[0] = CMD_TUNERSLEEP;
+ /* Set sleep mode on */
+ cmd.args[0] = CMD_SET_SLEEPMODE;
cmd.args[1] = (state->demod ? 1 : 0);
cmd.args[2] = 1;
cmd.len = 3;
@@ -1558,7 +1570,8 @@ static int cx24117_get_frontend(struct dvb_frontend *fe)
u8 buf[0x1f-4];
- cmd.args[0] = 0x1c;
+ /* Read current tune parameters */
+ cmd.args[0] = CMD_GETCTLACC;
cmd.args[1] = (u8) state->demod;
cmd.len = 2;
ret = cx24117_cmd_execute(fe, &cmd);
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index 6dbbee453ee1..1632d78a5479 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
+#include <asm/div64.h>
#include "dvb_math.h"
@@ -118,6 +119,12 @@ struct dib8000_state {
u8 longest_intlv_layer;
u16 output_mode;
+ /* for DVBv5 stats */
+ s64 init_ucb;
+ unsigned long per_jiffies_stats;
+ unsigned long ber_jiffies_stats;
+ unsigned long ber_jiffies_stats_layer[3];
+
#ifdef DIB8000_AGC_FREEZE
u16 agc1_max;
u16 agc1_min;
@@ -157,15 +164,10 @@ static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
return ret;
}
-static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
+static u16 __dib8000_read_word(struct dib8000_state *state, u16 reg)
{
u16 ret;
- if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
- return 0;
- }
-
state->i2c_write_buffer[0] = reg >> 8;
state->i2c_write_buffer[1] = reg & 0xff;
@@ -183,6 +185,21 @@ static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
dprintk("i2c read error on %d", reg);
ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+
+ return ret;
+}
+
+static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
+{
+ u16 ret;
+
+ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return 0;
+ }
+
+ ret = __dib8000_read_word(state, reg);
+
mutex_unlock(&state->i2c_buffer_lock);
return ret;
@@ -192,8 +209,15 @@ static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
{
u16 rw[2];
- rw[0] = dib8000_read_word(state, reg + 0);
- rw[1] = dib8000_read_word(state, reg + 1);
+ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return 0;
+ }
+
+ rw[0] = __dib8000_read_word(state, reg + 0);
+ rw[1] = __dib8000_read_word(state, reg + 1);
+
+ mutex_unlock(&state->i2c_buffer_lock);
return ((rw[0] << 16) | (rw[1]));
}
@@ -787,7 +811,7 @@ int dib8000_update_pll(struct dvb_frontend *fe,
dprintk("PLL: Update ratio (prediv: %d, ratio: %d)", state->cfg.pll->pll_prediv, ratio);
dib8000_write_word(state, 901, (state->cfg.pll->pll_prediv << 8) | (ratio << 0)); /* only the PLL ratio is updated. */
}
-}
+ }
return 0;
}
@@ -966,6 +990,45 @@ static u16 dib8000_identify(struct i2c_device *client)
return value;
}
+static int dib8000_read_unc_blocks(struct dvb_frontend *fe, u32 *unc);
+
+static void dib8000_reset_stats(struct dvb_frontend *fe)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache;
+ u32 ucb;
+
+ memset(&c->strength, 0, sizeof(c->strength));
+ memset(&c->cnr, 0, sizeof(c->cnr));
+ memset(&c->post_bit_error, 0, sizeof(c->post_bit_error));
+ memset(&c->post_bit_count, 0, sizeof(c->post_bit_count));
+ memset(&c->block_error, 0, sizeof(c->block_error));
+
+ c->strength.len = 1;
+ c->cnr.len = 1;
+ c->block_error.len = 1;
+ c->block_count.len = 1;
+ c->post_bit_error.len = 1;
+ c->post_bit_count.len = 1;
+
+ c->strength.stat[0].scale = FE_SCALE_DECIBEL;
+ c->strength.stat[0].uvalue = 0;
+
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ dib8000_read_unc_blocks(fe, &ucb);
+
+ state->init_ucb = -ucb;
+ state->ber_jiffies_stats = 0;
+ state->per_jiffies_stats = 0;
+ memset(&state->ber_jiffies_stats_layer, 0,
+ sizeof(state->ber_jiffies_stats_layer));
+}
+
static int dib8000_reset(struct dvb_frontend *fe)
{
struct dib8000_state *state = fe->demodulator_priv;
@@ -1071,6 +1134,8 @@ static int dib8000_reset(struct dvb_frontend *fe)
dib8000_set_power_mode(state, DIB8000_POWER_INTERFACE_ONLY);
+ dib8000_reset_stats(fe);
+
return 0;
}
@@ -2445,7 +2510,8 @@ static int dib8000_autosearch_start(struct dvb_frontend *fe)
if (state->revision == 0x8090)
internal = dib8000_read32(state, 23) / 1000;
- if (state->autosearch_state == AS_SEARCHING_FFT) {
+ if ((state->revision >= 0x8002) &&
+ (state->autosearch_state == AS_SEARCHING_FFT)) {
dib8000_write_word(state, 37, 0x0065); /* P_ctrl_pha_off_max default values */
dib8000_write_word(state, 116, 0x0000); /* P_ana_gain to 0 */
@@ -2481,7 +2547,8 @@ static int dib8000_autosearch_start(struct dvb_frontend *fe)
dib8000_write_word(state, 770, (dib8000_read_word(state, 770) & 0xdfff) | (1 << 13)); /* P_restart_ccg = 1 */
dib8000_write_word(state, 770, (dib8000_read_word(state, 770) & 0xdfff) | (0 << 13)); /* P_restart_ccg = 0 */
dib8000_write_word(state, 0, (dib8000_read_word(state, 0) & 0x7ff) | (0 << 15) | (1 << 13)); /* P_restart_search = 0; */
- } else if (state->autosearch_state == AS_SEARCHING_GUARD) {
+ } else if ((state->revision >= 0x8002) &&
+ (state->autosearch_state == AS_SEARCHING_GUARD)) {
c->transmission_mode = TRANSMISSION_MODE_8K;
c->guard_interval = GUARD_INTERVAL_1_8;
c->inversion = 0;
@@ -2583,7 +2650,8 @@ static int dib8000_autosearch_irq(struct dvb_frontend *fe)
struct dib8000_state *state = fe->demodulator_priv;
u16 irq_pending = dib8000_read_word(state, 1284);
- if (state->autosearch_state == AS_SEARCHING_FFT) {
+ if ((state->revision >= 0x8002) &&
+ (state->autosearch_state == AS_SEARCHING_FFT)) {
if (irq_pending & 0x1) {
dprintk("dib8000_autosearch_irq: max correlation result available");
return 3;
@@ -2853,6 +2921,91 @@ static int dib8090p_init_sdram(struct dib8000_state *state)
return 0;
}
+/**
+ * is_manual_mode - Check if TMCC should be used for parameters settings
+ * @c: struct dvb_frontend_properties
+ *
+ * By default, TMCC table should be used for parameter settings on most
+ * usercases. However, sometimes it is desirable to lock the demod to
+ * use the manual parameters.
+ *
+ * On manual mode, the current dib8000_tune state machine is very restrict:
+ * It requires that both per-layer and per-transponder parameters to be
+ * properly specified, otherwise the device won't lock.
+ *
+ * Check if all those conditions are properly satisfied before allowing
+ * the device to use the manual frequency lock mode.
+ */
+static int is_manual_mode(struct dtv_frontend_properties *c)
+{
+ int i, n_segs = 0;
+
+ /* Use auto mode on DVB-T compat mode */
+ if (c->delivery_system != SYS_ISDBT)
+ return 0;
+
+ /*
+ * Transmission mode is only detected on auto mode, currently
+ */
+ if (c->transmission_mode == TRANSMISSION_MODE_AUTO) {
+ dprintk("transmission mode auto");
+ return 0;
+ }
+
+ /*
+ * Guard interval is only detected on auto mode, currently
+ */
+ if (c->guard_interval == GUARD_INTERVAL_AUTO) {
+ dprintk("guard interval auto");
+ return 0;
+ }
+
+ /*
+ * If no layer is enabled, assume auto mode, as at least one
+ * layer should be enabled
+ */
+ if (!c->isdbt_layer_enabled) {
+ dprintk("no layer modulation specified");
+ return 0;
+ }
+
+ /*
+ * Check if the per-layer parameters aren't auto and
+ * disable a layer if segment count is 0 or invalid.
+ */
+ for (i = 0; i < 3; i++) {
+ if (!(c->isdbt_layer_enabled & 1 << i))
+ continue;
+
+ if ((c->layer[i].segment_count > 13) ||
+ (c->layer[i].segment_count == 0)) {
+ c->isdbt_layer_enabled &= ~(1 << i);
+ continue;
+ }
+
+ n_segs += c->layer[i].segment_count;
+
+ if ((c->layer[i].modulation == QAM_AUTO) ||
+ (c->layer[i].fec == FEC_AUTO)) {
+ dprintk("layer %c has either modulation or FEC auto",
+ 'A' + i);
+ return 0;
+ }
+ }
+
+ /*
+ * Userspace specified a wrong number of segments.
+ * fallback to auto mode.
+ */
+ if (n_segs == 0 || n_segs > 13) {
+ dprintk("number of segments is invalid");
+ return 0;
+ }
+
+ /* Everything looks ok for manual mode */
+ return 1;
+}
+
static int dib8000_tune(struct dvb_frontend *fe)
{
struct dib8000_state *state = fe->demodulator_priv;
@@ -2878,40 +3031,19 @@ static int dib8000_tune(struct dvb_frontend *fe)
switch (*tune_state) {
case CT_DEMOD_START: /* 30 */
+ dib8000_reset_stats(fe);
+
if (state->revision == 0x8090)
dib8090p_init_sdram(state);
state->status = FE_STATUS_TUNE_PENDING;
- if ((c->delivery_system != SYS_ISDBT) ||
- (c->inversion == INVERSION_AUTO) ||
- (c->transmission_mode == TRANSMISSION_MODE_AUTO) ||
- (c->guard_interval == GUARD_INTERVAL_AUTO) ||
- (((c->isdbt_layer_enabled & (1 << 0)) != 0) &&
- (c->layer[0].segment_count != 0xff) &&
- (c->layer[0].segment_count != 0) &&
- ((c->layer[0].modulation == QAM_AUTO) ||
- (c->layer[0].fec == FEC_AUTO))) ||
- (((c->isdbt_layer_enabled & (1 << 1)) != 0) &&
- (c->layer[1].segment_count != 0xff) &&
- (c->layer[1].segment_count != 0) &&
- ((c->layer[1].modulation == QAM_AUTO) ||
- (c->layer[1].fec == FEC_AUTO))) ||
- (((c->isdbt_layer_enabled & (1 << 2)) != 0) &&
- (c->layer[2].segment_count != 0xff) &&
- (c->layer[2].segment_count != 0) &&
- ((c->layer[2].modulation == QAM_AUTO) ||
- (c->layer[2].fec == FEC_AUTO))) ||
- (((c->layer[0].segment_count == 0) ||
- ((c->isdbt_layer_enabled & (1 << 0)) == 0)) &&
- ((c->layer[1].segment_count == 0) ||
- ((c->isdbt_layer_enabled & (2 << 0)) == 0)) &&
- ((c->layer[2].segment_count == 0) || ((c->isdbt_layer_enabled & (3 << 0)) == 0))))
- state->channel_parameters_set = 0; /* auto search */
- else
- state->channel_parameters_set = 1; /* channel parameters are known */
+ state->channel_parameters_set = is_manual_mode(c);
+
+ dprintk("Tuning channel on %s search mode",
+ state->channel_parameters_set ? "manual" : "auto");
dib8000_viterbi_state(state, 0); /* force chan dec in restart */
- /* Layer monit */
+ /* Layer monitor */
dib8000_write_word(state, 285, dib8000_read_word(state, 285) & 0x60);
dib8000_set_frequency_offset(state);
@@ -3256,15 +3388,27 @@ static int dib8000_sleep(struct dvb_frontend *fe)
return dib8000_set_adc_state(state, DIBX000_SLOW_ADC_OFF) | dib8000_set_adc_state(state, DIBX000_ADC_OFF);
}
+static int dib8000_read_status(struct dvb_frontend *fe, fe_status_t * stat);
+
static int dib8000_get_frontend(struct dvb_frontend *fe)
{
struct dib8000_state *state = fe->demodulator_priv;
u16 i, val = 0;
- fe_status_t stat;
+ fe_status_t stat = 0;
u8 index_frontend, sub_index_frontend;
fe->dtv_property_cache.bandwidth_hz = 6000000;
+ /*
+ * If called to early, get_frontend makes dib8000_tune to either
+ * not lock or not sync. This causes dvbv5-scan/dvbv5-zap to fail.
+ * So, let's just return if frontend 0 has not locked.
+ */
+ dib8000_read_status(fe, &stat);
+ if (!(stat & FE_HAS_SYNC))
+ return 0;
+
+ dprintk("TMCC lock");
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
state->fe[index_frontend]->ops.read_status(state->fe[index_frontend], &stat);
if (stat&FE_HAS_SYNC) {
@@ -3335,9 +3479,13 @@ static int dib8000_get_frontend(struct dvb_frontend *fe)
fe->dtv_property_cache.layer[i].segment_count = val & 0x0F;
dprintk("dib8000_get_frontend : Layer %d segments = %d ", i, fe->dtv_property_cache.layer[i].segment_count);
- val = dib8000_read_word(state, 499 + i);
- fe->dtv_property_cache.layer[i].interleaving = val & 0x3;
- dprintk("dib8000_get_frontend : Layer %d time_intlv = %d ", i, fe->dtv_property_cache.layer[i].interleaving);
+ val = dib8000_read_word(state, 499 + i) & 0x3;
+ /* Interleaving can be 0, 1, 2 or 4 */
+ if (val == 3)
+ val = 4;
+ fe->dtv_property_cache.layer[i].interleaving = val;
+ dprintk("dib8000_get_frontend : Layer %d time_intlv = %d ",
+ i, fe->dtv_property_cache.layer[i].interleaving);
val = dib8000_read_word(state, 481 + i);
switch (val & 0x7) {
@@ -3556,6 +3704,8 @@ static int dib8000_set_frontend(struct dvb_frontend *fe)
return 0;
}
+static int dib8000_get_stats(struct dvb_frontend *fe, fe_status_t stat);
+
static int dib8000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
{
struct dib8000_state *state = fe->demodulator_priv;
@@ -3593,6 +3743,7 @@ static int dib8000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
if (lock & 0x01)
*stat |= FE_HAS_VITERBI;
}
+ dib8000_get_stats(fe, *stat);
return 0;
}
@@ -3699,6 +3850,357 @@ static int dib8000_read_snr(struct dvb_frontend *fe, u16 * snr)
return 0;
}
+struct per_layer_regs {
+ u16 lock, ber, per;
+};
+
+static const struct per_layer_regs per_layer_regs[] = {
+ { 554, 560, 562 },
+ { 555, 576, 578 },
+ { 556, 581, 583 },
+};
+
+struct linear_segments {
+ unsigned x;
+ signed y;
+};
+
+/*
+ * Table to estimate signal strength in dBm.
+ * This table was empirically determinated by measuring the signal
+ * strength generated by a DTA-2111 RF generator directly connected into
+ * a dib8076 device (a PixelView PV-D231U stick), using a good quality
+ * 3 meters RC6 cable and good RC6 connectors.
+ * The real value can actually be different on other devices, depending
+ * on several factors, like if LNA is enabled or not, if diversity is
+ * enabled, type of connectors, etc.
+ * Yet, it is better to use this measure in dB than a random non-linear
+ * percentage value, especially for antenna adjustments.
+ * On my tests, the precision of the measure using this table is about
+ * 0.5 dB, with sounds reasonable enough.
+ */
+static struct linear_segments strength_to_db_table[] = {
+ { 55953, 108500 }, /* -22.5 dBm */
+ { 55394, 108000 },
+ { 53834, 107000 },
+ { 52863, 106000 },
+ { 52239, 105000 },
+ { 52012, 104000 },
+ { 51803, 103000 },
+ { 51566, 102000 },
+ { 51356, 101000 },
+ { 51112, 100000 },
+ { 50869, 99000 },
+ { 50600, 98000 },
+ { 50363, 97000 },
+ { 50117, 96000 }, /* -35 dBm */
+ { 49889, 95000 },
+ { 49680, 94000 },
+ { 49493, 93000 },
+ { 49302, 92000 },
+ { 48929, 91000 },
+ { 48416, 90000 },
+ { 48035, 89000 },
+ { 47593, 88000 },
+ { 47282, 87000 },
+ { 46953, 86000 },
+ { 46698, 85000 },
+ { 45617, 84000 },
+ { 44773, 83000 },
+ { 43845, 82000 },
+ { 43020, 81000 },
+ { 42010, 80000 }, /* -51 dBm */
+ { 0, 0 },
+};
+
+static u32 interpolate_value(u32 value, struct linear_segments *segments,
+ unsigned len)
+{
+ u64 tmp64;
+ u32 dx;
+ s32 dy;
+ int i, ret;
+
+ if (value >= segments[0].x)
+ return segments[0].y;
+ if (value < segments[len-1].x)
+ return segments[len-1].y;
+
+ for (i = 1; i < len - 1; i++) {
+ /* If value is identical, no need to interpolate */
+ if (value == segments[i].x)
+ return segments[i].y;
+ if (value > segments[i].x)
+ break;
+ }
+
+ /* Linear interpolation between the two (x,y) points */
+ dy = segments[i - 1].y - segments[i].y;
+ dx = segments[i - 1].x - segments[i].x;
+
+ tmp64 = value - segments[i].x;
+ tmp64 *= dy;
+ do_div(tmp64, dx);
+ ret = segments[i].y + tmp64;
+
+ return ret;
+}
+
+static u32 dib8000_get_time_us(struct dvb_frontend *fe, int layer)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache;
+ int ini_layer, end_layer, i;
+ u64 time_us, tmp64;
+ u32 tmp, denom;
+ int guard, rate_num, rate_denum = 1, bits_per_symbol, nsegs;
+ int interleaving = 0, fft_div;
+
+ if (layer >= 0) {
+ ini_layer = layer;
+ end_layer = layer + 1;
+ } else {
+ ini_layer = 0;
+ end_layer = 3;
+ }
+
+ switch (c->guard_interval) {
+ case GUARD_INTERVAL_1_4:
+ guard = 4;
+ break;
+ case GUARD_INTERVAL_1_8:
+ guard = 8;
+ break;
+ case GUARD_INTERVAL_1_16:
+ guard = 16;
+ break;
+ default:
+ case GUARD_INTERVAL_1_32:
+ guard = 32;
+ break;
+ }
+
+ switch (c->transmission_mode) {
+ case TRANSMISSION_MODE_2K:
+ fft_div = 4;
+ break;
+ case TRANSMISSION_MODE_4K:
+ fft_div = 2;
+ break;
+ default:
+ case TRANSMISSION_MODE_8K:
+ fft_div = 1;
+ break;
+ }
+
+ denom = 0;
+ for (i = ini_layer; i < end_layer; i++) {
+ nsegs = c->layer[i].segment_count;
+ if (nsegs == 0 || nsegs > 13)
+ continue;
+
+ switch (c->layer[i].modulation) {
+ case DQPSK:
+ case QPSK:
+ bits_per_symbol = 2;
+ break;
+ case QAM_16:
+ bits_per_symbol = 4;
+ break;
+ default:
+ case QAM_64:
+ bits_per_symbol = 6;
+ break;
+ }
+
+ switch (c->layer[i].fec) {
+ case FEC_1_2:
+ rate_num = 1;
+ rate_denum = 2;
+ break;
+ case FEC_2_3:
+ rate_num = 2;
+ rate_denum = 3;
+ break;
+ case FEC_3_4:
+ rate_num = 3;
+ rate_denum = 4;
+ break;
+ case FEC_5_6:
+ rate_num = 5;
+ rate_denum = 6;
+ break;
+ default:
+ case FEC_7_8:
+ rate_num = 7;
+ rate_denum = 8;
+ break;
+ }
+
+ interleaving = c->layer[i].interleaving;
+
+ denom += bits_per_symbol * rate_num * fft_div * nsegs * 384;
+ }
+
+ /* If all goes wrong, wait for 1s for the next stats */
+ if (!denom)
+ return 0;
+
+ /* Estimate the period for the total bit rate */
+ time_us = rate_denum * (1008 * 1562500L);
+ tmp64 = time_us;
+ do_div(tmp64, guard);
+ time_us = time_us + tmp64;
+ time_us += denom / 2;
+ do_div(time_us, denom);
+
+ tmp = 1008 * 96 * interleaving;
+ time_us += tmp + tmp / guard;
+
+ return time_us;
+}
+
+static int dib8000_get_stats(struct dvb_frontend *fe, fe_status_t stat)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache;
+ int i;
+ int show_per_stats = 0;
+ u32 time_us = 0, snr, val;
+ u64 blocks;
+ s32 db;
+ u16 strength;
+
+ /* Get Signal strength */
+ dib8000_read_signal_strength(fe, &strength);
+ val = strength;
+ db = interpolate_value(val,
+ strength_to_db_table,
+ ARRAY_SIZE(strength_to_db_table)) - 131000;
+ c->strength.stat[0].svalue = db;
+
+ /* UCB/BER/CNR measures require lock */
+ if (!(stat & FE_HAS_LOCK)) {
+ c->cnr.len = 1;
+ c->block_count.len = 1;
+ c->block_error.len = 1;
+ c->post_bit_error.len = 1;
+ c->post_bit_count.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ return 0;
+ }
+
+ /* Check if time for stats was elapsed */
+ if (time_after(jiffies, state->per_jiffies_stats)) {
+ state->per_jiffies_stats = jiffies + msecs_to_jiffies(1000);
+
+ /* Get SNR */
+ snr = dib8000_get_snr(fe);
+ for (i = 1; i < MAX_NUMBER_OF_FRONTENDS; i++) {
+ if (state->fe[i])
+ snr += dib8000_get_snr(state->fe[i]);
+ }
+ snr = snr >> 16;
+
+ if (snr) {
+ snr = 10 * intlog10(snr);
+ snr = (1000L * snr) >> 24;
+ } else {
+ snr = 0;
+ }
+ c->cnr.stat[0].svalue = snr;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+
+ /* Get UCB measures */
+ dib8000_read_unc_blocks(fe, &val);
+ if (val < state->init_ucb)
+ state->init_ucb += 0x100000000LL;
+
+ c->block_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_error.stat[0].uvalue = val + state->init_ucb;
+
+ /* Estimate the number of packets based on bitrate */
+ if (!time_us)
+ time_us = dib8000_get_time_us(fe, -1);
+
+ if (time_us) {
+ blocks = 1250000ULL * 1000000ULL;
+ do_div(blocks, time_us * 8 * 204);
+ c->block_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_count.stat[0].uvalue += blocks;
+ }
+
+ show_per_stats = 1;
+ }
+
+ /* Get post-BER measures */
+ if (time_after(jiffies, state->ber_jiffies_stats)) {
+ time_us = dib8000_get_time_us(fe, -1);
+ state->ber_jiffies_stats = jiffies + msecs_to_jiffies((time_us + 500) / 1000);
+
+ dprintk("Next all layers stats available in %u us.", time_us);
+
+ dib8000_read_ber(fe, &val);
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue += val;
+
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[0].uvalue += 100000000;
+ }
+
+ if (state->revision < 0x8002)
+ return 0;
+
+ c->block_error.len = 4;
+ c->post_bit_error.len = 4;
+ c->post_bit_count.len = 4;
+
+ for (i = 0; i < 3; i++) {
+ unsigned nsegs = c->layer[i].segment_count;
+
+ if (nsegs == 0 || nsegs > 13)
+ continue;
+
+ time_us = 0;
+
+ if (time_after(jiffies, state->ber_jiffies_stats_layer[i])) {
+ time_us = dib8000_get_time_us(fe, i);
+
+ state->ber_jiffies_stats_layer[i] = jiffies + msecs_to_jiffies((time_us + 500) / 1000);
+ dprintk("Next layer %c stats will be available in %u us\n",
+ 'A' + i, time_us);
+
+ val = dib8000_read_word(state, per_layer_regs[i].ber);
+ c->post_bit_error.stat[1 + i].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[1 + i].uvalue += val;
+
+ c->post_bit_count.stat[1 + i].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[1 + i].uvalue += 100000000;
+ }
+
+ if (show_per_stats) {
+ val = dib8000_read_word(state, per_layer_regs[i].per);
+
+ c->block_error.stat[1 + i].scale = FE_SCALE_COUNTER;
+ c->block_error.stat[1 + i].uvalue += val;
+
+ if (!time_us)
+ time_us = dib8000_get_time_us(fe, i);
+ if (time_us) {
+ blocks = 1250000ULL * 1000000ULL;
+ do_div(blocks, time_us * 8 * 204);
+ c->block_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_count.stat[0].uvalue += blocks;
+ }
+ }
+ }
+ return 0;
+}
+
int dib8000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_slave)
{
struct dib8000_state *state = fe->demodulator_priv;
diff --git a/drivers/media/dvb-frontends/drxk.h b/drivers/media/dvb-frontends/drxk.h
index f22eb9f13ad5..f6cb34660327 100644
--- a/drivers/media/dvb-frontends/drxk.h
+++ b/drivers/media/dvb-frontends/drxk.h
@@ -29,7 +29,6 @@
* A value of 0 (default) or lower indicates that
* the correct number of parameters will be
* automatically detected.
- * @load_firmware_sync: Force the firmware load to be synchronous.
*
* On the *_gpio vars, bit 0 is UIO-1, bit 1 is UIO-2 and bit 2 is
* UIO-3.
@@ -41,7 +40,6 @@ struct drxk_config {
bool parallel_ts;
bool dynamic_clk;
bool enable_merr_cfg;
- bool load_firmware_sync;
bool antenna_dvbt;
u16 antenna_gpio;
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index bf29a3f0e6f0..cce94a75b2e1 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -6830,25 +6830,13 @@ struct dvb_frontend *drxk_attach(const struct drxk_config *config,
/* Load firmware and initialize DRX-K */
if (state->microcode_name) {
- if (config->load_firmware_sync) {
- const struct firmware *fw = NULL;
+ const struct firmware *fw = NULL;
- status = request_firmware(&fw, state->microcode_name,
- state->i2c->dev.parent);
- if (status < 0)
- fw = NULL;
- load_firmware_cb(fw, state);
- } else {
- status = request_firmware_nowait(THIS_MODULE, 1,
- state->microcode_name,
- state->i2c->dev.parent,
- GFP_KERNEL,
- state, load_firmware_cb);
- if (status < 0) {
- pr_err("failed to request a firmware\n");
- return NULL;
- }
- }
+ status = request_firmware(&fw, state->microcode_name,
+ state->i2c->dev.parent);
+ if (status < 0)
+ fw = NULL;
+ load_firmware_cb(fw, state);
} else if (init_drxk(state) < 0)
goto error;
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
new file mode 100644
index 000000000000..b8a7897e7bd8
--- /dev/null
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -0,0 +1,1311 @@
+/*
+ * Montage M88DS3103 demodulator driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "m88ds3103_priv.h"
+
+static struct dvb_frontend_ops m88ds3103_ops;
+
+/* write multiple registers */
+static int m88ds3103_wr_regs(struct m88ds3103_priv *priv,
+ u8 reg, const u8 *val, int len)
+{
+#define MAX_WR_LEN 32
+#define MAX_WR_XFER_LEN (MAX_WR_LEN + 1)
+ int ret;
+ u8 buf[MAX_WR_XFER_LEN];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg->i2c_addr,
+ .flags = 0,
+ .len = 1 + len,
+ .buf = buf,
+ }
+ };
+
+ if (WARN_ON(len > MAX_WR_LEN))
+ return -EINVAL;
+
+ buf[0] = reg;
+ memcpy(&buf[1], val, len);
+
+ mutex_lock(&priv->i2c_mutex);
+ ret = i2c_transfer(priv->i2c, msg, 1);
+ mutex_unlock(&priv->i2c_mutex);
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+/* read multiple registers */
+static int m88ds3103_rd_regs(struct m88ds3103_priv *priv,
+ u8 reg, u8 *val, int len)
+{
+#define MAX_RD_LEN 3
+#define MAX_RD_XFER_LEN (MAX_RD_LEN)
+ int ret;
+ u8 buf[MAX_RD_XFER_LEN];
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->cfg->i2c_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &reg,
+ }, {
+ .addr = priv->cfg->i2c_addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buf,
+ }
+ };
+
+ if (WARN_ON(len > MAX_RD_LEN))
+ return -EINVAL;
+
+ mutex_lock(&priv->i2c_mutex);
+ ret = i2c_transfer(priv->i2c, msg, 2);
+ mutex_unlock(&priv->i2c_mutex);
+ if (ret == 2) {
+ memcpy(val, buf, len);
+ ret = 0;
+ } else {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c rd failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+/* write single register */
+static int m88ds3103_wr_reg(struct m88ds3103_priv *priv, u8 reg, u8 val)
+{
+ return m88ds3103_wr_regs(priv, reg, &val, 1);
+}
+
+/* read single register */
+static int m88ds3103_rd_reg(struct m88ds3103_priv *priv, u8 reg, u8 *val)
+{
+ return m88ds3103_rd_regs(priv, reg, val, 1);
+}
+
+/* write single register with mask */
+static int m88ds3103_wr_reg_mask(struct m88ds3103_priv *priv,
+ u8 reg, u8 val, u8 mask)
+{
+ int ret;
+ u8 u8tmp;
+
+ /* no need for read if whole reg is written */
+ if (mask != 0xff) {
+ ret = m88ds3103_rd_regs(priv, reg, &u8tmp, 1);
+ if (ret)
+ return ret;
+
+ val &= mask;
+ u8tmp &= ~mask;
+ val |= u8tmp;
+ }
+
+ return m88ds3103_wr_regs(priv, reg, &val, 1);
+}
+
+/* read single register with mask */
+static int m88ds3103_rd_reg_mask(struct m88ds3103_priv *priv,
+ u8 reg, u8 *val, u8 mask)
+{
+ int ret, i;
+ u8 u8tmp;
+
+ ret = m88ds3103_rd_regs(priv, reg, &u8tmp, 1);
+ if (ret)
+ return ret;
+
+ u8tmp &= mask;
+
+ /* find position of the first bit */
+ for (i = 0; i < 8; i++) {
+ if ((mask >> i) & 0x01)
+ break;
+ }
+ *val = u8tmp >> i;
+
+ return 0;
+}
+
+/* write reg val table using reg addr auto increment */
+static int m88ds3103_wr_reg_val_tab(struct m88ds3103_priv *priv,
+ const struct m88ds3103_reg_val *tab, int tab_len)
+{
+ int ret, i, j;
+ u8 buf[83];
+ dev_dbg(&priv->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
+
+ if (tab_len > 83) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0, j = 0; i < tab_len; i++, j++) {
+ buf[j] = tab[i].val;
+
+ if (i == tab_len - 1 || tab[i].reg != tab[i + 1].reg - 1 ||
+ !((j + 1) % (priv->cfg->i2c_wr_max - 1))) {
+ ret = m88ds3103_wr_regs(priv, tab[i].reg - j, buf, j + 1);
+ if (ret)
+ goto err;
+
+ j = -1;
+ }
+ }
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ds3103_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret;
+ u8 u8tmp;
+
+ *status = 0;
+
+ if (!priv->warm) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ ret = m88ds3103_rd_reg_mask(priv, 0xd1, &u8tmp, 0x07);
+ if (ret)
+ goto err;
+
+ if (u8tmp == 0x07)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC |
+ FE_HAS_LOCK;
+ break;
+ case SYS_DVBS2:
+ ret = m88ds3103_rd_reg_mask(priv, 0x0d, &u8tmp, 0x8f);
+ if (ret)
+ goto err;
+
+ if (u8tmp == 0x8f)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC |
+ FE_HAS_LOCK;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
+ __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ priv->fe_status = *status;
+
+ dev_dbg(&priv->i2c->dev, "%s: lock=%02x status=%02x\n",
+ __func__, u8tmp, *status);
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ds3103_set_frontend(struct dvb_frontend *fe)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret, len;
+ const struct m88ds3103_reg_val *init;
+ u8 u8tmp, u8tmp1, u8tmp2;
+ u8 buf[2];
+ u16 u16tmp, divide_ratio;
+ u32 tuner_frequency, target_mclk, ts_clk;
+ s32 s32tmp;
+ dev_dbg(&priv->i2c->dev,
+ "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
+ __func__, c->delivery_system,
+ c->modulation, c->frequency, c->symbol_rate,
+ c->inversion, c->pilot, c->rolloff);
+
+ if (!priv->warm) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ /* program tuner */
+ if (fe->ops.tuner_ops.set_params) {
+ ret = fe->ops.tuner_ops.set_params(fe);
+ if (ret)
+ goto err;
+ }
+
+ if (fe->ops.tuner_ops.get_frequency) {
+ ret = fe->ops.tuner_ops.get_frequency(fe, &tuner_frequency);
+ if (ret)
+ goto err;
+ }
+
+ /* reset */
+ ret = m88ds3103_wr_reg(priv, 0x07, 0x80);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0x07, 0x00);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0xb2, 0x01);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ len = ARRAY_SIZE(m88ds3103_dvbs_init_reg_vals);
+ init = m88ds3103_dvbs_init_reg_vals;
+ target_mclk = 96000;
+ break;
+ case SYS_DVBS2:
+ len = ARRAY_SIZE(m88ds3103_dvbs2_init_reg_vals);
+ init = m88ds3103_dvbs2_init_reg_vals;
+
+ switch (priv->cfg->ts_mode) {
+ case M88DS3103_TS_SERIAL:
+ case M88DS3103_TS_SERIAL_D7:
+ if (c->symbol_rate < 18000000)
+ target_mclk = 96000;
+ else
+ target_mclk = 144000;
+ break;
+ case M88DS3103_TS_PARALLEL:
+ case M88DS3103_TS_PARALLEL_12:
+ case M88DS3103_TS_PARALLEL_16:
+ case M88DS3103_TS_PARALLEL_19_2:
+ case M88DS3103_TS_CI:
+ if (c->symbol_rate < 18000000)
+ target_mclk = 96000;
+ else if (c->symbol_rate < 28000000)
+ target_mclk = 144000;
+ else
+ target_mclk = 192000;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid ts_mode\n",
+ __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
+ __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* program init table */
+ if (c->delivery_system != priv->delivery_system) {
+ ret = m88ds3103_wr_reg_val_tab(priv, init, len);
+ if (ret)
+ goto err;
+ }
+
+ u8tmp1 = 0; /* silence compiler warning */
+ switch (priv->cfg->ts_mode) {
+ case M88DS3103_TS_SERIAL:
+ u8tmp1 = 0x00;
+ ts_clk = 0;
+ u8tmp = 0x46;
+ break;
+ case M88DS3103_TS_SERIAL_D7:
+ u8tmp1 = 0x20;
+ ts_clk = 0;
+ u8tmp = 0x46;
+ break;
+ case M88DS3103_TS_PARALLEL:
+ ts_clk = 24000;
+ u8tmp = 0x42;
+ break;
+ case M88DS3103_TS_PARALLEL_12:
+ ts_clk = 12000;
+ u8tmp = 0x42;
+ break;
+ case M88DS3103_TS_PARALLEL_16:
+ ts_clk = 16000;
+ u8tmp = 0x42;
+ break;
+ case M88DS3103_TS_PARALLEL_19_2:
+ ts_clk = 19200;
+ u8tmp = 0x42;
+ break;
+ case M88DS3103_TS_CI:
+ ts_clk = 6000;
+ u8tmp = 0x43;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid ts_mode\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* TS mode */
+ ret = m88ds3103_wr_reg(priv, 0xfd, u8tmp);
+ if (ret)
+ goto err;
+
+ switch (priv->cfg->ts_mode) {
+ case M88DS3103_TS_SERIAL:
+ case M88DS3103_TS_SERIAL_D7:
+ ret = m88ds3103_wr_reg_mask(priv, 0x29, u8tmp1, 0x20);
+ if (ret)
+ goto err;
+ }
+
+ if (ts_clk) {
+ divide_ratio = DIV_ROUND_UP(target_mclk, ts_clk);
+ u8tmp1 = divide_ratio / 2;
+ u8tmp2 = DIV_ROUND_UP(divide_ratio, 2);
+ } else {
+ divide_ratio = 0;
+ u8tmp1 = 0;
+ u8tmp2 = 0;
+ }
+
+ dev_dbg(&priv->i2c->dev,
+ "%s: target_mclk=%d ts_clk=%d divide_ratio=%d\n",
+ __func__, target_mclk, ts_clk, divide_ratio);
+
+ u8tmp1--;
+ u8tmp2--;
+ /* u8tmp1[5:2] => fe[3:0], u8tmp1[1:0] => ea[7:6] */
+ u8tmp1 &= 0x3f;
+ /* u8tmp2[5:0] => ea[5:0] */
+ u8tmp2 &= 0x3f;
+
+ ret = m88ds3103_rd_reg(priv, 0xfe, &u8tmp);
+ if (ret)
+ goto err;
+
+ u8tmp = ((u8tmp & 0xf0) << 0) | u8tmp1 >> 2;
+ ret = m88ds3103_wr_reg(priv, 0xfe, u8tmp);
+ if (ret)
+ goto err;
+
+ u8tmp = ((u8tmp1 & 0x03) << 6) | u8tmp2 >> 0;
+ ret = m88ds3103_wr_reg(priv, 0xea, u8tmp);
+ if (ret)
+ goto err;
+
+ switch (target_mclk) {
+ case 72000:
+ u8tmp1 = 0x00; /* 0b00 */
+ u8tmp2 = 0x03; /* 0b11 */
+ break;
+ case 96000:
+ u8tmp1 = 0x02; /* 0b10 */
+ u8tmp2 = 0x01; /* 0b01 */
+ break;
+ case 115200:
+ u8tmp1 = 0x01; /* 0b01 */
+ u8tmp2 = 0x01; /* 0b01 */
+ break;
+ case 144000:
+ u8tmp1 = 0x00; /* 0b00 */
+ u8tmp2 = 0x01; /* 0b01 */
+ break;
+ case 192000:
+ u8tmp1 = 0x03; /* 0b11 */
+ u8tmp2 = 0x00; /* 0b00 */
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid target_mclk\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x22, u8tmp1 << 6, 0xc0);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x24, u8tmp2 << 6, 0xc0);
+ if (ret)
+ goto err;
+
+ if (c->symbol_rate <= 3000000)
+ u8tmp = 0x20;
+ else if (c->symbol_rate <= 10000000)
+ u8tmp = 0x10;
+ else
+ u8tmp = 0x06;
+
+ ret = m88ds3103_wr_reg(priv, 0xc3, 0x08);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0xc8, u8tmp);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0xc4, 0x08);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0xc7, 0x00);
+ if (ret)
+ goto err;
+
+ u16tmp = DIV_ROUND_CLOSEST((c->symbol_rate / 1000) << 15, M88DS3103_MCLK_KHZ / 2);
+ buf[0] = (u16tmp >> 0) & 0xff;
+ buf[1] = (u16tmp >> 8) & 0xff;
+ ret = m88ds3103_wr_regs(priv, 0x61, buf, 2);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x4d, priv->cfg->spec_inv << 1, 0x02);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x30, priv->cfg->agc_inv << 4, 0x10);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0x33, priv->cfg->agc);
+ if (ret)
+ goto err;
+
+ dev_dbg(&priv->i2c->dev, "%s: carrier offset=%d\n", __func__,
+ (tuner_frequency - c->frequency));
+
+ s32tmp = 0x10000 * (tuner_frequency - c->frequency);
+ s32tmp = DIV_ROUND_CLOSEST(s32tmp, M88DS3103_MCLK_KHZ);
+ if (s32tmp < 0)
+ s32tmp += 0x10000;
+
+ buf[0] = (s32tmp >> 0) & 0xff;
+ buf[1] = (s32tmp >> 8) & 0xff;
+ ret = m88ds3103_wr_regs(priv, 0x5e, buf, 2);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0x00, 0x00);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0xb2, 0x00);
+ if (ret)
+ goto err;
+
+ priv->delivery_system = c->delivery_system;
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ds3103_init(struct dvb_frontend *fe)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ int ret, len, remaining;
+ const struct firmware *fw = NULL;
+ u8 *fw_file = M88DS3103_FIRMWARE;
+ u8 u8tmp;
+ dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+
+ /* set cold state by default */
+ priv->warm = false;
+
+ /* wake up device from sleep */
+ ret = m88ds3103_wr_reg_mask(priv, 0x08, 0x01, 0x01);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x04, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x23, 0x00, 0x10);
+ if (ret)
+ goto err;
+
+ /* reset */
+ ret = m88ds3103_wr_reg(priv, 0x07, 0x60);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0x07, 0x00);
+ if (ret)
+ goto err;
+
+ /* firmware status */
+ ret = m88ds3103_rd_reg(priv, 0xb9, &u8tmp);
+ if (ret)
+ goto err;
+
+ dev_dbg(&priv->i2c->dev, "%s: firmware=%02x\n", __func__, u8tmp);
+
+ if (u8tmp)
+ goto skip_fw_download;
+
+ /* cold state - try to download firmware */
+ dev_info(&priv->i2c->dev, "%s: found a '%s' in cold state\n",
+ KBUILD_MODNAME, m88ds3103_ops.info.name);
+
+ /* request the firmware, this will block and timeout */
+ ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
+ if (ret) {
+ dev_err(&priv->i2c->dev, "%s: firmare file '%s' not found\n",
+ KBUILD_MODNAME, fw_file);
+ goto err;
+ }
+
+ dev_info(&priv->i2c->dev, "%s: downloading firmware from file '%s'\n",
+ KBUILD_MODNAME, fw_file);
+
+ ret = m88ds3103_wr_reg(priv, 0xb2, 0x01);
+ if (ret)
+ goto err;
+
+ for (remaining = fw->size; remaining > 0;
+ remaining -= (priv->cfg->i2c_wr_max - 1)) {
+ len = remaining;
+ if (len > (priv->cfg->i2c_wr_max - 1))
+ len = (priv->cfg->i2c_wr_max - 1);
+
+ ret = m88ds3103_wr_regs(priv, 0xb0,
+ &fw->data[fw->size - remaining], len);
+ if (ret) {
+ dev_err(&priv->i2c->dev,
+ "%s: firmware download failed=%d\n",
+ KBUILD_MODNAME, ret);
+ goto err;
+ }
+ }
+
+ ret = m88ds3103_wr_reg(priv, 0xb2, 0x00);
+ if (ret)
+ goto err;
+
+ release_firmware(fw);
+ fw = NULL;
+
+ ret = m88ds3103_rd_reg(priv, 0xb9, &u8tmp);
+ if (ret)
+ goto err;
+
+ if (!u8tmp) {
+ dev_info(&priv->i2c->dev, "%s: firmware did not run\n",
+ KBUILD_MODNAME);
+ ret = -EFAULT;
+ goto err;
+ }
+
+ dev_info(&priv->i2c->dev, "%s: found a '%s' in warm state\n",
+ KBUILD_MODNAME, m88ds3103_ops.info.name);
+ dev_info(&priv->i2c->dev, "%s: firmware version %X.%X\n",
+ KBUILD_MODNAME, (u8tmp >> 4) & 0xf, (u8tmp >> 0 & 0xf));
+
+skip_fw_download:
+ /* warm state */
+ priv->warm = true;
+
+ return 0;
+err:
+ if (fw)
+ release_firmware(fw);
+
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ds3103_sleep(struct dvb_frontend *fe)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ int ret;
+ dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+
+ priv->delivery_system = SYS_UNDEFINED;
+
+ /* TS Hi-Z */
+ ret = m88ds3103_wr_reg_mask(priv, 0x27, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ /* sleep */
+ ret = m88ds3103_wr_reg_mask(priv, 0x08, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x04, 0x01, 0x01);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x23, 0x10, 0x10);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ds3103_get_frontend(struct dvb_frontend *fe)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret;
+ u8 buf[3];
+ dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+
+ if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ ret = m88ds3103_rd_reg(priv, 0xe0, &buf[0]);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_rd_reg(priv, 0xe6, &buf[1]);
+ if (ret)
+ goto err;
+
+ switch ((buf[0] >> 2) & 0x01) {
+ case 0:
+ c->inversion = INVERSION_OFF;
+ break;
+ case 1:
+ c->inversion = INVERSION_ON;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid inversion\n",
+ __func__);
+ }
+
+ switch ((buf[1] >> 5) & 0x07) {
+ case 0:
+ c->fec_inner = FEC_7_8;
+ break;
+ case 1:
+ c->fec_inner = FEC_5_6;
+ break;
+ case 2:
+ c->fec_inner = FEC_3_4;
+ break;
+ case 3:
+ c->fec_inner = FEC_2_3;
+ break;
+ case 4:
+ c->fec_inner = FEC_1_2;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid fec_inner\n",
+ __func__);
+ }
+
+ c->modulation = QPSK;
+
+ break;
+ case SYS_DVBS2:
+ ret = m88ds3103_rd_reg(priv, 0x7e, &buf[0]);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_rd_reg(priv, 0x89, &buf[1]);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_rd_reg(priv, 0xf2, &buf[2]);
+ if (ret)
+ goto err;
+
+ switch ((buf[0] >> 0) & 0x0f) {
+ case 2:
+ c->fec_inner = FEC_2_5;
+ break;
+ case 3:
+ c->fec_inner = FEC_1_2;
+ break;
+ case 4:
+ c->fec_inner = FEC_3_5;
+ break;
+ case 5:
+ c->fec_inner = FEC_2_3;
+ break;
+ case 6:
+ c->fec_inner = FEC_3_4;
+ break;
+ case 7:
+ c->fec_inner = FEC_4_5;
+ break;
+ case 8:
+ c->fec_inner = FEC_5_6;
+ break;
+ case 9:
+ c->fec_inner = FEC_8_9;
+ break;
+ case 10:
+ c->fec_inner = FEC_9_10;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid fec_inner\n",
+ __func__);
+ }
+
+ switch ((buf[0] >> 5) & 0x01) {
+ case 0:
+ c->pilot = PILOT_OFF;
+ break;
+ case 1:
+ c->pilot = PILOT_ON;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid pilot\n",
+ __func__);
+ }
+
+ switch ((buf[0] >> 6) & 0x07) {
+ case 0:
+ c->modulation = QPSK;
+ break;
+ case 1:
+ c->modulation = PSK_8;
+ break;
+ case 2:
+ c->modulation = APSK_16;
+ break;
+ case 3:
+ c->modulation = APSK_32;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid modulation\n",
+ __func__);
+ }
+
+ switch ((buf[1] >> 7) & 0x01) {
+ case 0:
+ c->inversion = INVERSION_OFF;
+ break;
+ case 1:
+ c->inversion = INVERSION_ON;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid inversion\n",
+ __func__);
+ }
+
+ switch ((buf[2] >> 0) & 0x03) {
+ case 0:
+ c->rolloff = ROLLOFF_35;
+ break;
+ case 1:
+ c->rolloff = ROLLOFF_25;
+ break;
+ case 2:
+ c->rolloff = ROLLOFF_20;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid rolloff\n",
+ __func__);
+ }
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
+ __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = m88ds3103_rd_regs(priv, 0x6d, buf, 2);
+ if (ret)
+ goto err;
+
+ c->symbol_rate = 1ull * ((buf[1] << 8) | (buf[0] << 0)) *
+ M88DS3103_MCLK_KHZ * 1000 / 0x10000;
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ds3103_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret, i, tmp;
+ u8 buf[3];
+ u16 noise, signal;
+ u32 noise_tot, signal_tot;
+ dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+ /* reports SNR in resolution of 0.1 dB */
+
+ /* more iterations for more accurate estimation */
+ #define M88DS3103_SNR_ITERATIONS 3
+
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ tmp = 0;
+
+ for (i = 0; i < M88DS3103_SNR_ITERATIONS; i++) {
+ ret = m88ds3103_rd_reg(priv, 0xff, &buf[0]);
+ if (ret)
+ goto err;
+
+ tmp += buf[0];
+ }
+
+ /* use of one register limits max value to 15 dB */
+ /* SNR(X) dB = 10 * ln(X) / ln(10) dB */
+ tmp = DIV_ROUND_CLOSEST(tmp, 8 * M88DS3103_SNR_ITERATIONS);
+ if (tmp)
+ *snr = 100ul * intlog2(tmp) / intlog2(10);
+ else
+ *snr = 0;
+ break;
+ case SYS_DVBS2:
+ noise_tot = 0;
+ signal_tot = 0;
+
+ for (i = 0; i < M88DS3103_SNR_ITERATIONS; i++) {
+ ret = m88ds3103_rd_regs(priv, 0x8c, buf, 3);
+ if (ret)
+ goto err;
+
+ noise = buf[1] << 6; /* [13:6] */
+ noise |= buf[0] & 0x3f; /* [5:0] */
+ noise >>= 2;
+ signal = buf[2] * buf[2];
+ signal >>= 1;
+
+ noise_tot += noise;
+ signal_tot += signal;
+ }
+
+ noise = noise_tot / M88DS3103_SNR_ITERATIONS;
+ signal = signal_tot / M88DS3103_SNR_ITERATIONS;
+
+ /* SNR(X) dB = 10 * log10(X) dB */
+ if (signal > noise) {
+ tmp = signal / noise;
+ *snr = 100ul * intlog10(tmp) / (1 << 24);
+ } else {
+ *snr = 0;
+ }
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
+ __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+
+static int m88ds3103_set_tone(struct dvb_frontend *fe,
+ fe_sec_tone_mode_t fe_sec_tone_mode)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ int ret;
+ u8 u8tmp, tone, reg_a1_mask;
+ dev_dbg(&priv->i2c->dev, "%s: fe_sec_tone_mode=%d\n", __func__,
+ fe_sec_tone_mode);
+
+ if (!priv->warm) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ switch (fe_sec_tone_mode) {
+ case SEC_TONE_ON:
+ tone = 0;
+ reg_a1_mask = 0x87;
+ break;
+ case SEC_TONE_OFF:
+ tone = 1;
+ reg_a1_mask = 0x00;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid fe_sec_tone_mode\n",
+ __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ u8tmp = tone << 7 | priv->cfg->envelope_mode << 5;
+ ret = m88ds3103_wr_reg_mask(priv, 0xa2, u8tmp, 0xe0);
+ if (ret)
+ goto err;
+
+ u8tmp = 1 << 2;
+ ret = m88ds3103_wr_reg_mask(priv, 0xa1, u8tmp, reg_a1_mask);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ds3103_diseqc_send_master_cmd(struct dvb_frontend *fe,
+ struct dvb_diseqc_master_cmd *diseqc_cmd)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ int ret, i;
+ u8 u8tmp;
+ dev_dbg(&priv->i2c->dev, "%s: msg=%*ph\n", __func__,
+ diseqc_cmd->msg_len, diseqc_cmd->msg);
+
+ if (!priv->warm) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ if (diseqc_cmd->msg_len < 3 || diseqc_cmd->msg_len > 6) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ u8tmp = priv->cfg->envelope_mode << 5;
+ ret = m88ds3103_wr_reg_mask(priv, 0xa2, u8tmp, 0xe0);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_regs(priv, 0xa3, diseqc_cmd->msg,
+ diseqc_cmd->msg_len);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0xa1,
+ (diseqc_cmd->msg_len - 1) << 3 | 0x07);
+ if (ret)
+ goto err;
+
+ /* DiSEqC message typical period is 54 ms */
+ usleep_range(40000, 60000);
+
+ /* wait DiSEqC TX ready */
+ for (i = 20, u8tmp = 1; i && u8tmp; i--) {
+ usleep_range(5000, 10000);
+
+ ret = m88ds3103_rd_reg_mask(priv, 0xa1, &u8tmp, 0x40);
+ if (ret)
+ goto err;
+ }
+
+ dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
+
+ if (i == 0) {
+ dev_dbg(&priv->i2c->dev, "%s: diseqc tx timeout\n", __func__);
+
+ ret = m88ds3103_wr_reg_mask(priv, 0xa1, 0x40, 0xc0);
+ if (ret)
+ goto err;
+ }
+
+ ret = m88ds3103_wr_reg_mask(priv, 0xa2, 0x80, 0xc0);
+ if (ret)
+ goto err;
+
+ if (i == 0) {
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ds3103_diseqc_send_burst(struct dvb_frontend *fe,
+ fe_sec_mini_cmd_t fe_sec_mini_cmd)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ int ret, i;
+ u8 u8tmp, burst;
+ dev_dbg(&priv->i2c->dev, "%s: fe_sec_mini_cmd=%d\n", __func__,
+ fe_sec_mini_cmd);
+
+ if (!priv->warm) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ u8tmp = priv->cfg->envelope_mode << 5;
+ ret = m88ds3103_wr_reg_mask(priv, 0xa2, u8tmp, 0xe0);
+ if (ret)
+ goto err;
+
+ switch (fe_sec_mini_cmd) {
+ case SEC_MINI_A:
+ burst = 0x02;
+ break;
+ case SEC_MINI_B:
+ burst = 0x01;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid fe_sec_mini_cmd\n",
+ __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = m88ds3103_wr_reg(priv, 0xa1, burst);
+ if (ret)
+ goto err;
+
+ /* DiSEqC ToneBurst period is 12.5 ms */
+ usleep_range(11000, 20000);
+
+ /* wait DiSEqC TX ready */
+ for (i = 5, u8tmp = 1; i && u8tmp; i--) {
+ usleep_range(800, 2000);
+
+ ret = m88ds3103_rd_reg_mask(priv, 0xa1, &u8tmp, 0x40);
+ if (ret)
+ goto err;
+ }
+
+ dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
+
+ ret = m88ds3103_wr_reg_mask(priv, 0xa2, 0x80, 0xc0);
+ if (ret)
+ goto err;
+
+ if (i == 0) {
+ dev_dbg(&priv->i2c->dev, "%s: diseqc tx timeout\n", __func__);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ds3103_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *s)
+{
+ s->min_delay_ms = 3000;
+
+ return 0;
+}
+
+static void m88ds3103_release(struct dvb_frontend *fe)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ i2c_del_mux_adapter(priv->i2c_adapter);
+ kfree(priv);
+}
+
+static int m88ds3103_select(struct i2c_adapter *adap, void *mux_priv, u32 chan)
+{
+ struct m88ds3103_priv *priv = mux_priv;
+ int ret;
+ struct i2c_msg gate_open_msg[1] = {
+ {
+ .addr = priv->cfg->i2c_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = "\x03\x11",
+ }
+ };
+
+ mutex_lock(&priv->i2c_mutex);
+
+ /* open tuner I2C repeater for 1 xfer, closes automatically */
+ ret = __i2c_transfer(priv->i2c, gate_open_msg, 1);
+ if (ret != 1) {
+ dev_warn(&priv->i2c->dev, "%s: i2c wr failed=%d\n",
+ KBUILD_MODNAME, ret);
+ if (ret >= 0)
+ ret = -EREMOTEIO;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int m88ds3103_deselect(struct i2c_adapter *adap, void *mux_priv,
+ u32 chan)
+{
+ struct m88ds3103_priv *priv = mux_priv;
+
+ mutex_unlock(&priv->i2c_mutex);
+
+ return 0;
+}
+
+struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
+ struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter)
+{
+ int ret;
+ struct m88ds3103_priv *priv;
+ u8 chip_id, u8tmp;
+
+ /* allocate memory for the internal priv */
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ dev_err(&i2c->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME);
+ goto err;
+ }
+
+ priv->cfg = cfg;
+ priv->i2c = i2c;
+ mutex_init(&priv->i2c_mutex);
+
+ ret = m88ds3103_rd_reg(priv, 0x01, &chip_id);
+ if (ret)
+ goto err;
+
+ dev_dbg(&priv->i2c->dev, "%s: chip_id=%02x\n", __func__, chip_id);
+
+ switch (chip_id) {
+ case 0xd0:
+ break;
+ default:
+ goto err;
+ }
+
+ switch (priv->cfg->clock_out) {
+ case M88DS3103_CLOCK_OUT_DISABLED:
+ u8tmp = 0x80;
+ break;
+ case M88DS3103_CLOCK_OUT_ENABLED:
+ u8tmp = 0x00;
+ break;
+ case M88DS3103_CLOCK_OUT_ENABLED_DIV2:
+ u8tmp = 0x10;
+ break;
+ default:
+ goto err;
+ }
+
+ ret = m88ds3103_wr_reg(priv, 0x29, u8tmp);
+ if (ret)
+ goto err;
+
+ /* sleep */
+ ret = m88ds3103_wr_reg_mask(priv, 0x08, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x04, 0x01, 0x01);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x23, 0x10, 0x10);
+ if (ret)
+ goto err;
+
+ /* create mux i2c adapter for tuner */
+ priv->i2c_adapter = i2c_add_mux_adapter(i2c, &i2c->dev, priv, 0, 0, 0,
+ m88ds3103_select, m88ds3103_deselect);
+ if (priv->i2c_adapter == NULL)
+ goto err;
+
+ *tuner_i2c_adapter = priv->i2c_adapter;
+
+ /* create dvb_frontend */
+ memcpy(&priv->fe.ops, &m88ds3103_ops, sizeof(struct dvb_frontend_ops));
+ priv->fe.demodulator_priv = priv;
+
+ return &priv->fe;
+err:
+ dev_dbg(&i2c->dev, "%s: failed=%d\n", __func__, ret);
+ kfree(priv);
+ return NULL;
+}
+EXPORT_SYMBOL(m88ds3103_attach);
+
+static struct dvb_frontend_ops m88ds3103_ops = {
+ .delsys = { SYS_DVBS, SYS_DVBS2 },
+ .info = {
+ .name = "Montage M88DS3103",
+ .frequency_min = 950000,
+ .frequency_max = 2150000,
+ .frequency_tolerance = 5000,
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 45000000,
+ .caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_4_5 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_6_7 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_8_9 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_RECOVER |
+ FE_CAN_2G_MODULATION
+ },
+
+ .release = m88ds3103_release,
+
+ .get_tune_settings = m88ds3103_get_tune_settings,
+
+ .init = m88ds3103_init,
+ .sleep = m88ds3103_sleep,
+
+ .set_frontend = m88ds3103_set_frontend,
+ .get_frontend = m88ds3103_get_frontend,
+
+ .read_status = m88ds3103_read_status,
+ .read_snr = m88ds3103_read_snr,
+
+ .diseqc_send_master_cmd = m88ds3103_diseqc_send_master_cmd,
+ .diseqc_send_burst = m88ds3103_diseqc_send_burst,
+
+ .set_tone = m88ds3103_set_tone,
+};
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Montage M88DS3103 DVB-S/S2 demodulator driver");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(M88DS3103_FIRMWARE);
diff --git a/drivers/media/dvb-frontends/m88ds3103.h b/drivers/media/dvb-frontends/m88ds3103.h
new file mode 100644
index 000000000000..bbb7e3aa5675
--- /dev/null
+++ b/drivers/media/dvb-frontends/m88ds3103.h
@@ -0,0 +1,114 @@
+/*
+ * Montage M88DS3103 demodulator driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef M88DS3103_H
+#define M88DS3103_H
+
+#include <linux/dvb/frontend.h>
+
+struct m88ds3103_config {
+ /*
+ * I2C address
+ * Default: none, must set
+ * 0x68, ...
+ */
+ u8 i2c_addr;
+
+ /*
+ * clock
+ * Default: none, must set
+ * 27000000
+ */
+ u32 clock;
+
+ /*
+ * max bytes I2C provider is asked to write at once
+ * Default: none, must set
+ * 33, 65, ...
+ */
+ u16 i2c_wr_max;
+
+ /*
+ * TS output mode
+ * Default: M88DS3103_TS_SERIAL
+ */
+#define M88DS3103_TS_SERIAL 0 /* TS output pin D0, normal */
+#define M88DS3103_TS_SERIAL_D7 1 /* TS output pin D7 */
+#define M88DS3103_TS_PARALLEL 2 /* 24 MHz, normal */
+#define M88DS3103_TS_PARALLEL_12 3 /* 12 MHz */
+#define M88DS3103_TS_PARALLEL_16 4 /* 16 MHz */
+#define M88DS3103_TS_PARALLEL_19_2 5 /* 19.2 MHz */
+#define M88DS3103_TS_CI 6 /* 6 MHz */
+ u8 ts_mode;
+
+ /*
+ * spectrum inversion
+ * Default: 0
+ */
+ u8 spec_inv:1;
+
+ /*
+ * AGC polarity
+ * Default: 0
+ */
+ u8 agc_inv:1;
+
+ /*
+ * clock output
+ * Default: M88DS3103_CLOCK_OUT_DISABLED
+ */
+#define M88DS3103_CLOCK_OUT_DISABLED 0
+#define M88DS3103_CLOCK_OUT_ENABLED 1
+#define M88DS3103_CLOCK_OUT_ENABLED_DIV2 2
+ u8 clock_out;
+
+ /*
+ * DiSEqC envelope mode
+ * Default: 0
+ */
+ u8 envelope_mode:1;
+
+ /*
+ * AGC configuration
+ * Default: none, must set
+ */
+ u8 agc;
+};
+
+/*
+ * Driver implements own I2C-adapter for tuner I2C access. That's since chip
+ * has I2C-gate control which closes gate automatically after I2C transfer.
+ * Using own I2C adapter we can workaround that.
+ */
+
+#if defined(CONFIG_DVB_M88DS3103) || \
+ (defined(CONFIG_DVB_M88DS3103_MODULE) && defined(MODULE))
+extern struct dvb_frontend *m88ds3103_attach(
+ const struct m88ds3103_config *config,
+ struct i2c_adapter *i2c,
+ struct i2c_adapter **tuner_i2c);
+#else
+static inline struct dvb_frontend *m88ds3103_attach(
+ const struct m88ds3103_config *config,
+ struct i2c_adapter *i2c,
+ struct i2c_adapter **tuner_i2c)
+{
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb-frontends/m88ds3103_priv.h b/drivers/media/dvb-frontends/m88ds3103_priv.h
new file mode 100644
index 000000000000..84c3c06df622
--- /dev/null
+++ b/drivers/media/dvb-frontends/m88ds3103_priv.h
@@ -0,0 +1,215 @@
+/*
+ * Montage M88DS3103 demodulator driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef M88DS3103_PRIV_H
+#define M88DS3103_PRIV_H
+
+#include "dvb_frontend.h"
+#include "m88ds3103.h"
+#include "dvb_math.h"
+#include <linux/firmware.h>
+#include <linux/i2c-mux.h>
+
+#define M88DS3103_FIRMWARE "dvb-demod-m88ds3103.fw"
+#define M88DS3103_MCLK_KHZ 96000
+
+struct m88ds3103_priv {
+ struct i2c_adapter *i2c;
+ /* mutex needed due to own tuner I2C adapter */
+ struct mutex i2c_mutex;
+ const struct m88ds3103_config *cfg;
+ struct dvb_frontend fe;
+ fe_delivery_system_t delivery_system;
+ fe_status_t fe_status;
+ bool warm; /* FW running */
+ struct i2c_adapter *i2c_adapter;
+};
+
+struct m88ds3103_reg_val {
+ u8 reg;
+ u8 val;
+};
+
+static const struct m88ds3103_reg_val m88ds3103_dvbs_init_reg_vals[] = {
+ {0x23, 0x07},
+ {0x08, 0x03},
+ {0x0c, 0x02},
+ {0x21, 0x54},
+ {0x25, 0x8a},
+ {0x27, 0x31},
+ {0x30, 0x08},
+ {0x31, 0x40},
+ {0x32, 0x32},
+ {0x35, 0xff},
+ {0x3a, 0x00},
+ {0x37, 0x10},
+ {0x38, 0x10},
+ {0x39, 0x02},
+ {0x42, 0x60},
+ {0x4a, 0x80},
+ {0x4b, 0x04},
+ {0x4d, 0x91},
+ {0x5d, 0xc8},
+ {0x50, 0x36},
+ {0x51, 0x36},
+ {0x52, 0x36},
+ {0x53, 0x36},
+ {0x56, 0x01},
+ {0x63, 0x0f},
+ {0x64, 0x30},
+ {0x65, 0x40},
+ {0x68, 0x26},
+ {0x69, 0x4c},
+ {0x70, 0x20},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x40},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x60},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x80},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0xa0},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x1f},
+ {0x76, 0x38},
+ {0x77, 0xa6},
+ {0x78, 0x0c},
+ {0x79, 0x80},
+ {0x7f, 0x14},
+ {0x7c, 0x00},
+ {0xae, 0x82},
+ {0x80, 0x64},
+ {0x81, 0x66},
+ {0x82, 0x44},
+ {0x85, 0x04},
+ {0xcd, 0xf4},
+ {0x90, 0x33},
+ {0xa0, 0x44},
+ {0xc0, 0x08},
+ {0xc3, 0x10},
+ {0xc4, 0x08},
+ {0xc5, 0xf0},
+ {0xc6, 0xff},
+ {0xc7, 0x00},
+ {0xc8, 0x1a},
+ {0xc9, 0x80},
+ {0xe0, 0xf8},
+ {0xe6, 0x8b},
+ {0xd0, 0x40},
+ {0xf8, 0x20},
+ {0xfa, 0x0f},
+ {0x00, 0x00},
+ {0xbd, 0x01},
+ {0xb8, 0x00},
+};
+
+static const struct m88ds3103_reg_val m88ds3103_dvbs2_init_reg_vals[] = {
+ {0x23, 0x07},
+ {0x08, 0x07},
+ {0x0c, 0x02},
+ {0x21, 0x54},
+ {0x25, 0x8a},
+ {0x27, 0x31},
+ {0x30, 0x08},
+ {0x32, 0x32},
+ {0x35, 0xff},
+ {0x3a, 0x00},
+ {0x37, 0x10},
+ {0x38, 0x10},
+ {0x39, 0x02},
+ {0x42, 0x60},
+ {0x4a, 0x80},
+ {0x4b, 0x04},
+ {0x4d, 0x91},
+ {0x5d, 0xc8},
+ {0x50, 0x36},
+ {0x51, 0x36},
+ {0x52, 0x36},
+ {0x53, 0x36},
+ {0x56, 0x01},
+ {0x63, 0x0f},
+ {0x64, 0x10},
+ {0x65, 0x20},
+ {0x68, 0x46},
+ {0x69, 0xcd},
+ {0x70, 0x20},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x40},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x60},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x80},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0xa0},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x1f},
+ {0x76, 0x38},
+ {0x77, 0xa6},
+ {0x78, 0x0c},
+ {0x79, 0x80},
+ {0x7f, 0x14},
+ {0x85, 0x08},
+ {0xcd, 0xf4},
+ {0x90, 0x33},
+ {0x86, 0x00},
+ {0x87, 0x0f},
+ {0x89, 0x00},
+ {0x8b, 0x44},
+ {0x8c, 0x66},
+ {0x9d, 0xc1},
+ {0x8a, 0x10},
+ {0xad, 0x40},
+ {0xa0, 0x44},
+ {0xc0, 0x08},
+ {0xc1, 0x10},
+ {0xc2, 0x08},
+ {0xc3, 0x10},
+ {0xc4, 0x08},
+ {0xc5, 0xf0},
+ {0xc6, 0xff},
+ {0xc7, 0x00},
+ {0xc8, 0x1a},
+ {0xc9, 0x80},
+ {0xca, 0x23},
+ {0xcb, 0x24},
+ {0xcc, 0xf4},
+ {0xce, 0x74},
+ {0x00, 0x00},
+ {0xbd, 0x01},
+ {0xb8, 0x00},
+};
+
+#endif
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index 4da5272075cb..b2351466b0da 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -110,28 +110,94 @@ static u8 m88rs2000_readreg(struct m88rs2000_state *state, u8 reg)
return b1[0];
}
+static u32 m88rs2000_get_mclk(struct dvb_frontend *fe)
+{
+ struct m88rs2000_state *state = fe->demodulator_priv;
+ u32 mclk;
+ u8 reg;
+ /* Must not be 0x00 or 0xff */
+ reg = m88rs2000_readreg(state, 0x86);
+ if (!reg || reg == 0xff)
+ return 0;
+
+ reg /= 2;
+ reg += 1;
+
+ mclk = (u32)(reg * RS2000_FE_CRYSTAL_KHZ + 28 / 2) / 28;
+
+ return mclk;
+}
+
+static int m88rs2000_set_carrieroffset(struct dvb_frontend *fe, s16 offset)
+{
+ struct m88rs2000_state *state = fe->demodulator_priv;
+ u32 mclk;
+ s32 tmp;
+ u8 reg;
+ int ret;
+
+ mclk = m88rs2000_get_mclk(fe);
+ if (!mclk)
+ return -EINVAL;
+
+ tmp = (offset * 4096 + (s32)mclk / 2) / (s32)mclk;
+ if (tmp < 0)
+ tmp += 4096;
+
+ /* Carrier Offset */
+ ret = m88rs2000_writereg(state, 0x9c, (u8)(tmp >> 4));
+
+ reg = m88rs2000_readreg(state, 0x9d);
+ reg &= 0xf;
+ reg |= (u8)(tmp & 0xf) << 4;
+
+ ret |= m88rs2000_writereg(state, 0x9d, reg);
+
+ return ret;
+}
+
static int m88rs2000_set_symbolrate(struct dvb_frontend *fe, u32 srate)
{
struct m88rs2000_state *state = fe->demodulator_priv;
int ret;
- u32 temp;
+ u64 temp;
+ u32 mclk;
u8 b[3];
if ((srate < 1000000) || (srate > 45000000))
return -EINVAL;
+ mclk = m88rs2000_get_mclk(fe);
+ if (!mclk)
+ return -EINVAL;
+
temp = srate / 1000;
- temp *= 11831;
- temp /= 68;
- temp -= 3;
+ temp *= 1 << 24;
+
+ do_div(temp, mclk);
b[0] = (u8) (temp >> 16) & 0xff;
b[1] = (u8) (temp >> 8) & 0xff;
b[2] = (u8) temp & 0xff;
+
ret = m88rs2000_writereg(state, 0x93, b[2]);
ret |= m88rs2000_writereg(state, 0x94, b[1]);
ret |= m88rs2000_writereg(state, 0x95, b[0]);
+ if (srate > 10000000)
+ ret |= m88rs2000_writereg(state, 0xa0, 0x20);
+ else
+ ret |= m88rs2000_writereg(state, 0xa0, 0x60);
+
+ ret |= m88rs2000_writereg(state, 0xa1, 0xe0);
+
+ if (srate > 12000000)
+ ret |= m88rs2000_writereg(state, 0xa3, 0x20);
+ else if (srate > 2800000)
+ ret |= m88rs2000_writereg(state, 0xa3, 0x98);
+ else
+ ret |= m88rs2000_writereg(state, 0xa3, 0x90);
+
deb_info("m88rs2000: m88rs2000_set_symbolrate\n");
return ret;
}
@@ -261,8 +327,6 @@ struct inittab m88rs2000_shutdown[] = {
struct inittab fe_reset[] = {
{DEMOD_WRITE, 0x00, 0x01},
- {DEMOD_WRITE, 0xf1, 0xbf},
- {DEMOD_WRITE, 0x00, 0x01},
{DEMOD_WRITE, 0x20, 0x81},
{DEMOD_WRITE, 0x21, 0x80},
{DEMOD_WRITE, 0x10, 0x33},
@@ -305,9 +369,6 @@ struct inittab fe_trigger[] = {
{DEMOD_WRITE, 0x9b, 0x64},
{DEMOD_WRITE, 0x9e, 0x00},
{DEMOD_WRITE, 0x9f, 0xf8},
- {DEMOD_WRITE, 0xa0, 0x20},
- {DEMOD_WRITE, 0xa1, 0xe0},
- {DEMOD_WRITE, 0xa3, 0x38},
{DEMOD_WRITE, 0x98, 0xff},
{DEMOD_WRITE, 0xc0, 0x0f},
{DEMOD_WRITE, 0x89, 0x01},
@@ -408,7 +469,7 @@ static int m88rs2000_read_status(struct dvb_frontend *fe, fe_status_t *status)
*status = 0;
- if ((reg & 0x7) == 0x7) {
+ if ((reg & 0xee) == 0xee) {
*status = FE_HAS_CARRIER | FE_HAS_SIGNAL | FE_HAS_VITERBI
| FE_HAS_SYNC | FE_HAS_LOCK;
if (state->config->set_ts_params)
@@ -480,33 +541,38 @@ static int m88rs2000_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
static int m88rs2000_set_fec(struct m88rs2000_state *state,
fe_code_rate_t fec)
{
- u16 fec_set;
+ u8 fec_set, reg;
+ int ret;
+
switch (fec) {
- /* This is not confirmed kept for reference */
-/* case FEC_1_2:
- fec_set = 0x88;
+ case FEC_1_2:
+ fec_set = 0x8;
break;
case FEC_2_3:
- fec_set = 0x68;
+ fec_set = 0x10;
break;
case FEC_3_4:
- fec_set = 0x48;
+ fec_set = 0x20;
break;
case FEC_5_6:
- fec_set = 0x28;
+ fec_set = 0x40;
break;
case FEC_7_8:
- fec_set = 0x18;
- break; */
+ fec_set = 0x80;
+ break;
case FEC_AUTO:
default:
- fec_set = 0x08;
+ fec_set = 0x0;
}
- m88rs2000_writereg(state, 0x76, fec_set);
- return 0;
-}
+ reg = m88rs2000_readreg(state, 0x70);
+ reg &= 0x7;
+ ret = m88rs2000_writereg(state, 0x70, reg | fec_set);
+ ret |= m88rs2000_writereg(state, 0x76, 0x8);
+
+ return ret;
+}
static fe_code_rate_t m88rs2000_get_fec(struct m88rs2000_state *state)
{
@@ -515,18 +581,20 @@ static fe_code_rate_t m88rs2000_get_fec(struct m88rs2000_state *state)
reg = m88rs2000_readreg(state, 0x76);
m88rs2000_writereg(state, 0x9a, 0xb0);
+ reg &= 0xf0;
+ reg >>= 5;
+
switch (reg) {
- case 0x88:
+ case 0x4:
return FEC_1_2;
- case 0x68:
+ case 0x3:
return FEC_2_3;
- case 0x48:
+ case 0x2:
return FEC_3_4;
- case 0x28:
+ case 0x1:
return FEC_5_6;
- case 0x18:
+ case 0x0:
return FEC_7_8;
- case 0x08:
default:
break;
}
@@ -540,9 +608,8 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
fe_status_t status;
int i, ret = 0;
- s32 tmp;
u32 tuner_freq;
- u16 offset = 0;
+ s16 offset = 0;
u8 reg;
state->no_lock_count = 0;
@@ -567,38 +634,31 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
if (ret < 0)
return -ENODEV;
- offset = tuner_freq - c->frequency;
-
- /* calculate offset assuming 96000kHz*/
- tmp = offset;
- tmp *= 65536;
-
- tmp = (2 * tmp + 96000) / (2 * 96000);
- if (tmp < 0)
- tmp += 65536;
+ offset = (s16)((s32)tuner_freq - c->frequency);
- offset = tmp & 0xffff;
+ /* default mclk value 96.4285 * 2 * 1000 = 192857 */
+ if (((c->frequency % 192857) >= (192857 - 3000)) ||
+ (c->frequency % 192857) <= 3000)
+ ret = m88rs2000_writereg(state, 0x86, 0xc2);
+ else
+ ret = m88rs2000_writereg(state, 0x86, 0xc6);
- ret = m88rs2000_writereg(state, 0x9a, 0x30);
- /* Unknown usually 0xc6 sometimes 0xc1 */
- reg = m88rs2000_readreg(state, 0x86);
- ret |= m88rs2000_writereg(state, 0x86, reg);
- /* Offset lower nibble always 0 */
- ret |= m88rs2000_writereg(state, 0x9c, (offset >> 8));
- ret |= m88rs2000_writereg(state, 0x9d, offset & 0xf0);
+ ret |= m88rs2000_set_carrieroffset(fe, offset);
+ if (ret < 0)
+ return -ENODEV;
+ /* Reset demod by symbol rate */
+ if (c->symbol_rate > 27500000)
+ ret = m88rs2000_writereg(state, 0xf1, 0xa4);
+ else
+ ret = m88rs2000_writereg(state, 0xf1, 0xbf);
- /* Reset Demod */
- ret = m88rs2000_tab_set(state, fe_reset);
+ ret |= m88rs2000_tab_set(state, fe_reset);
if (ret < 0)
return -ENODEV;
- /* Unknown */
- reg = m88rs2000_readreg(state, 0x70);
- ret = m88rs2000_writereg(state, 0x70, reg);
-
/* Set FEC */
- ret |= m88rs2000_set_fec(state, c->fec_inner);
+ ret = m88rs2000_set_fec(state, c->fec_inner);
ret |= m88rs2000_writereg(state, 0x85, 0x1);
ret |= m88rs2000_writereg(state, 0x8a, 0xbf);
ret |= m88rs2000_writereg(state, 0x8d, 0x1e);
@@ -620,7 +680,7 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
for (i = 0; i < 25; i++) {
reg = m88rs2000_readreg(state, 0x8c);
- if ((reg & 0x7) == 0x7) {
+ if ((reg & 0xee) == 0xee) {
status = FE_HAS_LOCK;
break;
}
diff --git a/drivers/media/dvb-frontends/m88rs2000.h b/drivers/media/dvb-frontends/m88rs2000.h
index 14ce31e76ae6..0a50ea90736b 100644
--- a/drivers/media/dvb-frontends/m88rs2000.h
+++ b/drivers/media/dvb-frontends/m88rs2000.h
@@ -53,6 +53,8 @@ static inline struct dvb_frontend *m88rs2000_attach(
}
#endif /* CONFIG_DVB_M88RS2000 */
+#define RS2000_FE_CRYSTAL_KHZ 27000
+
enum {
DEMOD_WRITE = 0x1,
WRITE_DELAY = 0x10,
diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
index fbca9856313a..8a8e1ecb762d 100644
--- a/drivers/media/dvb-frontends/nxt200x.c
+++ b/drivers/media/dvb-frontends/nxt200x.c
@@ -2,7 +2,7 @@
* Support for NXT2002 and NXT2004 - VSB/QAM
*
* Copyright (C) 2005 Kirk Lapray <kirk.lapray@gmail.com>
- * Copyright (C) 2006 Michael Krufky <mkrufky@m1k.net>
+ * Copyright (C) 2006-2014 Michael Krufky <mkrufky@linuxtv.org>
* based on nxt2002 by Taylor Jacob <rtjacob@earthlink.net>
* and nxt2004 by Jean-Francois Thibert <jeanfrancois@sagetv.com>
*
@@ -40,7 +40,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE 64
+#define MAX_XFER_SIZE 256
#define NXT2002_DEFAULT_FIRMWARE "dvb-fe-nxt2002.fw"
#define NXT2004_DEFAULT_FIRMWARE "dvb-fe-nxt2004.fw"
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 842654d33317..4aa9c5311cc5 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -555,14 +555,6 @@ config VIDEO_MT9V032
This is a Video4Linux2 sensor-level driver for the Micron
MT9V032 752x480 CMOS sensor.
-config VIDEO_TCM825X
- tristate "TCM825x camera sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_INT_DEVICE
- depends on MEDIA_CAMERA_SUPPORT
- ---help---
- This is a driver for the Toshiba TCM825x VGA camera sensor.
- It is used for example in Nokia N800.
-
config VIDEO_SR030PC30
tristate "Siliconfile SR030PC30 sensor support"
depends on I2C && VIDEO_V4L2
@@ -594,6 +586,13 @@ config VIDEO_S5K4ECGX
This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
camera sensor with an embedded SoC image signal processor.
+config VIDEO_S5K5BAF
+ tristate "Samsung S5K5BAF sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ ---help---
+ This is a V4L2 sensor-level driver for Samsung S5K5BAF 2M
+ camera sensor with an embedded SoC image signal processor.
+
source "drivers/media/i2c/smiapp/Kconfig"
config VIDEO_S5C73M3
@@ -655,6 +654,18 @@ config VIDEO_UPD64083
To compile this driver as a module, choose M here: the
module will be called upd64083.
+comment "Audio/Video compression chips"
+
+config VIDEO_SAA6752HS
+ tristate "Philips SAA6752HS MPEG-2 Audio/Video Encoder"
+ depends on VIDEO_V4L2 && I2C
+ ---help---
+ Support for the Philips SAA6752HS MPEG-2 video and MPEG-audio/AC-3
+ audio encoder with multiplexer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called saa6752hs.
+
comment "Miscellaneous helper chips"
config VIDEO_THS7303
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index e03f1776f4f4..48888ae876fb 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_VIDEO_SAA717X) += saa717x.o
obj-$(CONFIG_VIDEO_SAA7127) += saa7127.o
obj-$(CONFIG_VIDEO_SAA7185) += saa7185.o
obj-$(CONFIG_VIDEO_SAA7191) += saa7191.o
+obj-$(CONFIG_VIDEO_SAA6752HS) += saa6752hs.o
obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o
obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o
@@ -57,7 +58,6 @@ obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
obj-$(CONFIG_VIDEO_OV7640) += ov7640.o
obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
obj-$(CONFIG_VIDEO_OV9650) += ov9650.o
-obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
obj-$(CONFIG_VIDEO_MT9M032) += mt9m032.o
obj-$(CONFIG_VIDEO_MT9P031) += mt9p031.o
obj-$(CONFIG_VIDEO_MT9T001) += mt9t001.o
@@ -67,6 +67,7 @@ obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
obj-$(CONFIG_VIDEO_S5K6AA) += s5k6aa.o
obj-$(CONFIG_VIDEO_S5K4ECGX) += s5k4ecgx.o
+obj-$(CONFIG_VIDEO_S5K5BAF) += s5k5baf.o
obj-$(CONFIG_VIDEO_S5C73M3) += s5c73m3/
obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o
obj-$(CONFIG_VIDEO_AS3645A) += as3645a.o
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index b06a7e54ee0d..83225d6a0dd9 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -66,11 +66,6 @@ MODULE_LICENSE("GPL");
**********************************************************************
*/
-struct i2c_reg_value {
- u8 reg;
- u8 value;
-};
-
struct ad9389b_state_edid {
/* total number of blocks */
u32 blocks;
@@ -143,14 +138,14 @@ static int ad9389b_wr(struct v4l2_subdev *sd, u8 reg, u8 val)
if (ret == 0)
return 0;
}
- v4l2_err(sd, "I2C Write Problem\n");
+ v4l2_err(sd, "%s: failed reg 0x%x, val 0x%x\n", __func__, reg, val);
return ret;
}
/* To set specific bits in the register, a clear-mask is given (to be AND-ed),
and then the value-mask (to be OR-ed). */
static inline void ad9389b_wr_and_or(struct v4l2_subdev *sd, u8 reg,
- u8 clr_mask, u8 val_mask)
+ u8 clr_mask, u8 val_mask)
{
ad9389b_wr(sd, reg, (ad9389b_rd(sd, reg) & clr_mask) | val_mask);
}
@@ -321,12 +316,12 @@ static int ad9389b_s_ctrl(struct v4l2_ctrl *ctrl)
struct ad9389b_state *state = get_ad9389b_state(sd);
v4l2_dbg(1, debug, sd,
- "%s: ctrl id: %d, ctrl->val %d\n", __func__, ctrl->id, ctrl->val);
+ "%s: ctrl id: %d, ctrl->val %d\n", __func__, ctrl->id, ctrl->val);
if (state->hdmi_mode_ctrl == ctrl) {
/* Set HDMI or DVI-D */
ad9389b_wr_and_or(sd, 0xaf, 0xfd,
- ctrl->val == V4L2_DV_TX_MODE_HDMI ? 0x02 : 0x00);
+ ctrl->val == V4L2_DV_TX_MODE_HDMI ? 0x02 : 0x00);
return 0;
}
if (state->rgb_quantization_range_ctrl == ctrl)
@@ -387,61 +382,57 @@ static int ad9389b_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "chip revision %d\n", state->chip_revision);
v4l2_info(sd, "power %s\n", state->power_on ? "on" : "off");
v4l2_info(sd, "%s hotplug, %s Rx Sense, %s EDID (%d block(s))\n",
- (ad9389b_rd(sd, 0x42) & MASK_AD9389B_HPD_DETECT) ?
- "detected" : "no",
- (ad9389b_rd(sd, 0x42) & MASK_AD9389B_MSEN_DETECT) ?
- "detected" : "no",
- edid->segments ? "found" : "no", edid->blocks);
- if (state->have_monitor) {
- v4l2_info(sd, "%s output %s\n",
- (ad9389b_rd(sd, 0xaf) & 0x02) ?
- "HDMI" : "DVI-D",
- (ad9389b_rd(sd, 0xa1) & 0x3c) ?
- "disabled" : "enabled");
- }
+ (ad9389b_rd(sd, 0x42) & MASK_AD9389B_HPD_DETECT) ?
+ "detected" : "no",
+ (ad9389b_rd(sd, 0x42) & MASK_AD9389B_MSEN_DETECT) ?
+ "detected" : "no",
+ edid->segments ? "found" : "no", edid->blocks);
+ v4l2_info(sd, "%s output %s\n",
+ (ad9389b_rd(sd, 0xaf) & 0x02) ?
+ "HDMI" : "DVI-D",
+ (ad9389b_rd(sd, 0xa1) & 0x3c) ?
+ "disabled" : "enabled");
v4l2_info(sd, "ad9389b: %s\n", (ad9389b_rd(sd, 0xb8) & 0x40) ?
- "encrypted" : "no encryption");
+ "encrypted" : "no encryption");
v4l2_info(sd, "state: %s, error: %s, detect count: %u, msk/irq: %02x/%02x\n",
- states[ad9389b_rd(sd, 0xc8) & 0xf],
- errors[ad9389b_rd(sd, 0xc8) >> 4],
- state->edid_detect_counter,
- ad9389b_rd(sd, 0x94), ad9389b_rd(sd, 0x96));
+ states[ad9389b_rd(sd, 0xc8) & 0xf],
+ errors[ad9389b_rd(sd, 0xc8) >> 4],
+ state->edid_detect_counter,
+ ad9389b_rd(sd, 0x94), ad9389b_rd(sd, 0x96));
manual_gear = ad9389b_rd(sd, 0x98) & 0x80;
v4l2_info(sd, "ad9389b: RGB quantization: %s range\n",
- ad9389b_rd(sd, 0x3b) & 0x01 ? "limited" : "full");
+ ad9389b_rd(sd, 0x3b) & 0x01 ? "limited" : "full");
v4l2_info(sd, "ad9389b: %s gear %d\n",
manual_gear ? "manual" : "automatic",
manual_gear ? ((ad9389b_rd(sd, 0x98) & 0x70) >> 4) :
- ((ad9389b_rd(sd, 0x9e) & 0x0e) >> 1));
- if (state->have_monitor) {
- if (ad9389b_rd(sd, 0xaf) & 0x02) {
- /* HDMI only */
- u8 manual_cts = ad9389b_rd(sd, 0x0a) & 0x80;
- u32 N = (ad9389b_rd(sd, 0x01) & 0xf) << 16 |
- ad9389b_rd(sd, 0x02) << 8 |
- ad9389b_rd(sd, 0x03);
- u8 vic_detect = ad9389b_rd(sd, 0x3e) >> 2;
- u8 vic_sent = ad9389b_rd(sd, 0x3d) & 0x3f;
- u32 CTS;
-
- if (manual_cts)
- CTS = (ad9389b_rd(sd, 0x07) & 0xf) << 16 |
- ad9389b_rd(sd, 0x08) << 8 |
- ad9389b_rd(sd, 0x09);
- else
- CTS = (ad9389b_rd(sd, 0x04) & 0xf) << 16 |
- ad9389b_rd(sd, 0x05) << 8 |
- ad9389b_rd(sd, 0x06);
- N = (ad9389b_rd(sd, 0x01) & 0xf) << 16 |
- ad9389b_rd(sd, 0x02) << 8 |
- ad9389b_rd(sd, 0x03);
-
- v4l2_info(sd, "ad9389b: CTS %s mode: N %d, CTS %d\n",
- manual_cts ? "manual" : "automatic", N, CTS);
-
- v4l2_info(sd, "ad9389b: VIC: detected %d, sent %d\n",
- vic_detect, vic_sent);
- }
+ ((ad9389b_rd(sd, 0x9e) & 0x0e) >> 1));
+ if (ad9389b_rd(sd, 0xaf) & 0x02) {
+ /* HDMI only */
+ u8 manual_cts = ad9389b_rd(sd, 0x0a) & 0x80;
+ u32 N = (ad9389b_rd(sd, 0x01) & 0xf) << 16 |
+ ad9389b_rd(sd, 0x02) << 8 |
+ ad9389b_rd(sd, 0x03);
+ u8 vic_detect = ad9389b_rd(sd, 0x3e) >> 2;
+ u8 vic_sent = ad9389b_rd(sd, 0x3d) & 0x3f;
+ u32 CTS;
+
+ if (manual_cts)
+ CTS = (ad9389b_rd(sd, 0x07) & 0xf) << 16 |
+ ad9389b_rd(sd, 0x08) << 8 |
+ ad9389b_rd(sd, 0x09);
+ else
+ CTS = (ad9389b_rd(sd, 0x04) & 0xf) << 16 |
+ ad9389b_rd(sd, 0x05) << 8 |
+ ad9389b_rd(sd, 0x06);
+ N = (ad9389b_rd(sd, 0x01) & 0xf) << 16 |
+ ad9389b_rd(sd, 0x02) << 8 |
+ ad9389b_rd(sd, 0x03);
+
+ v4l2_info(sd, "ad9389b: CTS %s mode: N %d, CTS %d\n",
+ manual_cts ? "manual" : "automatic", N, CTS);
+
+ v4l2_info(sd, "ad9389b: VIC: detected %d, sent %d\n",
+ vic_detect, vic_sent);
}
if (state->dv_timings.type == V4L2_DV_BT_656_1120)
v4l2_print_dv_timings(sd->name, "timings: ",
@@ -486,7 +477,7 @@ static int ad9389b_s_power(struct v4l2_subdev *sd, int on)
}
if (i > 1)
v4l2_dbg(1, debug, sd,
- "needed %d retries to powerup the ad9389b\n", i);
+ "needed %d retries to powerup the ad9389b\n", i);
/* Select chip: AD9389B */
ad9389b_wr_and_or(sd, 0xba, 0xef, 0x10);
@@ -556,14 +547,16 @@ static int ad9389b_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
irq_status = ad9389b_rd(sd, 0x96);
/* clear detected interrupts */
ad9389b_wr(sd, 0x96, irq_status);
+ /* enable interrupts */
+ ad9389b_set_isr(sd, true);
+
+ v4l2_dbg(1, debug, sd, "%s: irq_status 0x%x\n", __func__, irq_status);
- if (irq_status & (MASK_AD9389B_HPD_INT | MASK_AD9389B_MSEN_INT))
+ if (irq_status & (MASK_AD9389B_HPD_INT))
ad9389b_check_monitor_present_status(sd);
if (irq_status & MASK_AD9389B_EDID_RDY_INT)
ad9389b_check_edid_status(sd);
- /* enable interrupts */
- ad9389b_set_isr(sd, true);
*handled = true;
return 0;
}
@@ -599,7 +592,7 @@ static int ad9389b_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edi
if (edid->blocks + edid->start_block >= state->edid.segments * 2)
edid->blocks = state->edid.segments * 2 - edid->start_block;
memcpy(edid->edid, &state->edid.data[edid->start_block * 128],
- 128 * edid->blocks);
+ 128 * edid->blocks);
return 0;
}
@@ -612,8 +605,6 @@ static const struct v4l2_subdev_pad_ops ad9389b_pad_ops = {
/* Enable/disable ad9389b output */
static int ad9389b_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct ad9389b_state *state = get_ad9389b_state(sd);
-
v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis"));
ad9389b_wr_and_or(sd, 0xa1, ~0x3c, (enable ? 0 : 0x3c));
@@ -621,7 +612,6 @@ static int ad9389b_s_stream(struct v4l2_subdev *sd, int enable)
ad9389b_check_monitor_present_status(sd);
} else {
ad9389b_s_power(sd, 0);
- state->have_monitor = false;
}
return 0;
}
@@ -686,14 +676,14 @@ static int ad9389b_g_dv_timings(struct v4l2_subdev *sd,
}
static int ad9389b_enum_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_enum_dv_timings *timings)
+ struct v4l2_enum_dv_timings *timings)
{
return v4l2_enum_dv_timings_cap(timings, &ad9389b_timings_cap,
NULL, NULL);
}
static int ad9389b_dv_timings_cap(struct v4l2_subdev *sd,
- struct v4l2_dv_timings_cap *cap)
+ struct v4l2_dv_timings_cap *cap)
{
*cap = ad9389b_timings_cap;
return 0;
@@ -724,15 +714,15 @@ static int ad9389b_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
u32 N;
switch (freq) {
- case 32000: N = 4096; break;
- case 44100: N = 6272; break;
- case 48000: N = 6144; break;
- case 88200: N = 12544; break;
- case 96000: N = 12288; break;
+ case 32000: N = 4096; break;
+ case 44100: N = 6272; break;
+ case 48000: N = 6144; break;
+ case 88200: N = 12544; break;
+ case 96000: N = 12288; break;
case 176400: N = 25088; break;
case 192000: N = 24576; break;
default:
- return -EINVAL;
+ return -EINVAL;
}
/* Set N (used with CTS to regenerate the audio clock) */
@@ -748,15 +738,15 @@ static int ad9389b_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq)
u32 i2s_sf;
switch (freq) {
- case 32000: i2s_sf = 0x30; break;
- case 44100: i2s_sf = 0x00; break;
- case 48000: i2s_sf = 0x20; break;
- case 88200: i2s_sf = 0x80; break;
- case 96000: i2s_sf = 0xa0; break;
+ case 32000: i2s_sf = 0x30; break;
+ case 44100: i2s_sf = 0x00; break;
+ case 48000: i2s_sf = 0x20; break;
+ case 88200: i2s_sf = 0x80; break;
+ case 96000: i2s_sf = 0xa0; break;
case 176400: i2s_sf = 0xc0; break;
case 192000: i2s_sf = 0xe0; break;
default:
- return -EINVAL;
+ return -EINVAL;
}
/* Set sampling frequency for I2S audio to 48 kHz */
@@ -800,7 +790,7 @@ static const struct v4l2_subdev_ops ad9389b_ops = {
/* ----------------------------------------------------------------------- */
static void ad9389b_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd,
- int segment, u8 *buf)
+ int segment, u8 *buf)
{
int i, j;
@@ -826,8 +816,8 @@ static void ad9389b_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd,
static void ad9389b_edid_handler(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct ad9389b_state *state = container_of(dwork,
- struct ad9389b_state, edid_handler);
+ struct ad9389b_state *state =
+ container_of(dwork, struct ad9389b_state, edid_handler);
struct v4l2_subdev *sd = &state->sd;
struct ad9389b_edid_detect ed;
@@ -845,11 +835,10 @@ static void ad9389b_edid_handler(struct work_struct *work)
if (state->edid.read_retries) {
state->edid.read_retries--;
v4l2_dbg(1, debug, sd, "%s: edid read failed\n", __func__);
- state->have_monitor = false;
ad9389b_s_power(sd, false);
ad9389b_s_power(sd, true);
queue_delayed_work(state->work_queue,
- &state->edid_handler, EDID_DELAY);
+ &state->edid_handler, EDID_DELAY);
return;
}
}
@@ -915,49 +904,35 @@ static void ad9389b_notify_monitor_detect(struct v4l2_subdev *sd)
v4l2_subdev_notify(sd, AD9389B_MONITOR_DETECT, (void *)&mdt);
}
-static void ad9389b_check_monitor_present_status(struct v4l2_subdev *sd)
+static void ad9389b_update_monitor_present_status(struct v4l2_subdev *sd)
{
struct ad9389b_state *state = get_ad9389b_state(sd);
/* read hotplug and rx-sense state */
u8 status = ad9389b_rd(sd, 0x42);
v4l2_dbg(1, debug, sd, "%s: status: 0x%x%s%s\n",
- __func__,
- status,
- status & MASK_AD9389B_HPD_DETECT ? ", hotplug" : "",
- status & MASK_AD9389B_MSEN_DETECT ? ", rx-sense" : "");
+ __func__,
+ status,
+ status & MASK_AD9389B_HPD_DETECT ? ", hotplug" : "",
+ status & MASK_AD9389B_MSEN_DETECT ? ", rx-sense" : "");
- if ((status & MASK_AD9389B_HPD_DETECT) &&
- ((status & MASK_AD9389B_MSEN_DETECT) || state->edid.segments)) {
- v4l2_dbg(1, debug, sd,
- "%s: hotplug and (rx-sense or edid)\n", __func__);
- if (!state->have_monitor) {
- v4l2_dbg(1, debug, sd, "%s: monitor detected\n", __func__);
- state->have_monitor = true;
- ad9389b_set_isr(sd, true);
- if (!ad9389b_s_power(sd, true)) {
- v4l2_dbg(1, debug, sd,
- "%s: monitor detected, powerup failed\n", __func__);
- return;
- }
- ad9389b_setup(sd);
- ad9389b_notify_monitor_detect(sd);
- state->edid.read_retries = EDID_MAX_RETRIES;
- queue_delayed_work(state->work_queue,
- &state->edid_handler, EDID_DELAY);
- }
- } else if (status & MASK_AD9389B_HPD_DETECT) {
+ if (status & MASK_AD9389B_HPD_DETECT) {
v4l2_dbg(1, debug, sd, "%s: hotplug detected\n", __func__);
+ state->have_monitor = true;
+ if (!ad9389b_s_power(sd, true)) {
+ v4l2_dbg(1, debug, sd,
+ "%s: monitor detected, powerup failed\n", __func__);
+ return;
+ }
+ ad9389b_setup(sd);
+ ad9389b_notify_monitor_detect(sd);
state->edid.read_retries = EDID_MAX_RETRIES;
queue_delayed_work(state->work_queue,
- &state->edid_handler, EDID_DELAY);
+ &state->edid_handler, EDID_DELAY);
} else if (!(status & MASK_AD9389B_HPD_DETECT)) {
v4l2_dbg(1, debug, sd, "%s: hotplug not detected\n", __func__);
- if (state->have_monitor) {
- v4l2_dbg(1, debug, sd, "%s: monitor not detected\n", __func__);
- state->have_monitor = false;
- ad9389b_notify_monitor_detect(sd);
- }
+ state->have_monitor = false;
+ ad9389b_notify_monitor_detect(sd);
ad9389b_s_power(sd, false);
memset(&state->edid, 0, sizeof(struct ad9389b_state_edid));
}
@@ -966,6 +941,35 @@ static void ad9389b_check_monitor_present_status(struct v4l2_subdev *sd)
v4l2_ctrl_s_ctrl(state->hotplug_ctrl, ad9389b_have_hotplug(sd) ? 0x1 : 0x0);
v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, ad9389b_have_rx_sense(sd) ? 0x1 : 0x0);
v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
+
+ /* update with setting from ctrls */
+ ad9389b_s_ctrl(state->rgb_quantization_range_ctrl);
+ ad9389b_s_ctrl(state->hdmi_mode_ctrl);
+}
+
+static void ad9389b_check_monitor_present_status(struct v4l2_subdev *sd)
+{
+ struct ad9389b_state *state = get_ad9389b_state(sd);
+ int retry = 0;
+
+ ad9389b_update_monitor_present_status(sd);
+
+ /*
+ * Rapid toggling of the hotplug may leave the chip powered off,
+ * even if we think it is on. In that case reset and power up again.
+ */
+ while (state->power_on && (ad9389b_rd(sd, 0x41) & 0x40)) {
+ if (++retry > 5) {
+ v4l2_err(sd, "retried %d times, give up\n", retry);
+ return;
+ }
+ v4l2_dbg(1, debug, sd, "%s: reset and re-check status (%d)\n", __func__, retry);
+ ad9389b_notify_monitor_detect(sd);
+ cancel_delayed_work_sync(&state->edid_handler);
+ memset(&state->edid, 0, sizeof(struct ad9389b_state_edid));
+ ad9389b_s_power(sd, false);
+ ad9389b_update_monitor_present_status(sd);
+ }
}
static bool edid_block_verify_crc(u8 *edid_block)
@@ -978,7 +982,7 @@ static bool edid_block_verify_crc(u8 *edid_block)
return sum == 0;
}
-static bool edid_segment_verify_crc(struct v4l2_subdev *sd, u32 segment)
+static bool edid_verify_crc(struct v4l2_subdev *sd, u32 segment)
{
struct ad9389b_state *state = get_ad9389b_state(sd);
u32 blocks = state->edid.blocks;
@@ -992,6 +996,25 @@ static bool edid_segment_verify_crc(struct v4l2_subdev *sd, u32 segment)
return false;
}
+static bool edid_verify_header(struct v4l2_subdev *sd, u32 segment)
+{
+ static const u8 hdmi_header[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+ };
+ struct ad9389b_state *state = get_ad9389b_state(sd);
+ u8 *data = state->edid.data;
+ int i;
+
+ if (segment)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_header); i++)
+ if (data[i] != hdmi_header[i])
+ return false;
+
+ return true;
+}
+
static bool ad9389b_check_edid_status(struct v4l2_subdev *sd)
{
struct ad9389b_state *state = get_ad9389b_state(sd);
@@ -1000,7 +1023,7 @@ static bool ad9389b_check_edid_status(struct v4l2_subdev *sd)
u8 edidRdy = ad9389b_rd(sd, 0xc5);
v4l2_dbg(1, debug, sd, "%s: edid ready (retries: %d)\n",
- __func__, EDID_MAX_RETRIES - state->edid.read_retries);
+ __func__, EDID_MAX_RETRIES - state->edid.read_retries);
if (!(edidRdy & MASK_AD9389B_EDID_RDY))
return false;
@@ -1013,16 +1036,16 @@ static bool ad9389b_check_edid_status(struct v4l2_subdev *sd)
v4l2_dbg(1, debug, sd, "%s: got segment %d\n", __func__, segment);
ad9389b_edid_rd(sd, 256, &state->edid.data[segment * 256]);
ad9389b_dbg_dump_edid(2, debug, sd, segment,
- &state->edid.data[segment * 256]);
+ &state->edid.data[segment * 256]);
if (segment == 0) {
state->edid.blocks = state->edid.data[0x7e] + 1;
v4l2_dbg(1, debug, sd, "%s: %d blocks in total\n",
- __func__, state->edid.blocks);
+ __func__, state->edid.blocks);
}
- if (!edid_segment_verify_crc(sd, segment)) {
+ if (!edid_verify_crc(sd, segment) ||
+ !edid_verify_header(sd, segment)) {
/* edid crc error, force reread of edid segment */
- v4l2_err(sd, "%s: edid crc error\n", __func__);
- state->have_monitor = false;
+ v4l2_err(sd, "%s: edid crc or header error\n", __func__);
ad9389b_s_power(sd, false);
ad9389b_s_power(sd, true);
return false;
@@ -1032,12 +1055,12 @@ static bool ad9389b_check_edid_status(struct v4l2_subdev *sd)
if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
/* Request next EDID segment */
v4l2_dbg(1, debug, sd, "%s: request segment %d\n",
- __func__, state->edid.segments);
+ __func__, state->edid.segments);
ad9389b_wr(sd, 0xc9, 0xf);
ad9389b_wr(sd, 0xc4, state->edid.segments);
state->edid.read_retries = EDID_MAX_RETRIES;
queue_delayed_work(state->work_queue,
- &state->edid_handler, EDID_DELAY);
+ &state->edid_handler, EDID_DELAY);
return false;
}
@@ -1081,7 +1104,7 @@ static int ad9389b_probe(struct i2c_client *client, const struct i2c_device_id *
return -EIO;
v4l_dbg(1, debug, client, "detecting ad9389b client on address 0x%x\n",
- client->addr << 1);
+ client->addr << 1);
state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
if (!state)
@@ -1140,7 +1163,7 @@ static int ad9389b_probe(struct i2c_client *client, const struct i2c_device_id *
goto err_entity;
}
v4l2_dbg(1, debug, sd, "reg 0x41 0x%x, chip version (reg 0x00) 0x%x\n",
- ad9389b_rd(sd, 0x41), state->chip_revision);
+ ad9389b_rd(sd, 0x41), state->chip_revision);
state->edid_i2c_client = i2c_new_dummy(client->adapter, (0x7e>>1));
if (state->edid_i2c_client == NULL) {
@@ -1163,7 +1186,7 @@ static int ad9389b_probe(struct i2c_client *client, const struct i2c_device_id *
ad9389b_set_isr(sd, true);
v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
- client->addr << 1, client->adapter->name);
+ client->addr << 1, client->adapter->name);
return 0;
err_unreg:
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 7c8d971f1f61..ee618942cb8e 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -452,6 +452,29 @@ static int adv7511_log_status(struct v4l2_subdev *sd)
errors[adv7511_rd(sd, 0xc8) >> 4], state->edid_detect_counter,
adv7511_rd(sd, 0x94), adv7511_rd(sd, 0x96));
v4l2_info(sd, "RGB quantization: %s range\n", adv7511_rd(sd, 0x18) & 0x80 ? "limited" : "full");
+ if (adv7511_rd(sd, 0xaf) & 0x02) {
+ /* HDMI only */
+ u8 manual_cts = adv7511_rd(sd, 0x0a) & 0x80;
+ u32 N = (adv7511_rd(sd, 0x01) & 0xf) << 16 |
+ adv7511_rd(sd, 0x02) << 8 |
+ adv7511_rd(sd, 0x03);
+ u8 vic_detect = adv7511_rd(sd, 0x3e) >> 2;
+ u8 vic_sent = adv7511_rd(sd, 0x3d) & 0x3f;
+ u32 CTS;
+
+ if (manual_cts)
+ CTS = (adv7511_rd(sd, 0x07) & 0xf) << 16 |
+ adv7511_rd(sd, 0x08) << 8 |
+ adv7511_rd(sd, 0x09);
+ else
+ CTS = (adv7511_rd(sd, 0x04) & 0xf) << 16 |
+ adv7511_rd(sd, 0x05) << 8 |
+ adv7511_rd(sd, 0x06);
+ v4l2_info(sd, "CTS %s mode: N %d, CTS %d\n",
+ manual_cts ? "manual" : "automatic", N, CTS);
+ v4l2_info(sd, "VIC: detected %d, sent %d\n",
+ vic_detect, vic_sent);
+ }
if (state->dv_timings.type == V4L2_DV_BT_656_1120)
v4l2_print_dv_timings(sd->name, "timings: ",
&state->dv_timings, false);
@@ -942,26 +965,38 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
static bool edid_block_verify_crc(uint8_t *edid_block)
{
- int i;
uint8_t sum = 0;
+ int i;
for (i = 0; i < 128; i++)
- sum += *(edid_block + i);
- return (sum == 0);
+ sum += edid_block[i];
+ return sum == 0;
}
-static bool edid_segment_verify_crc(struct v4l2_subdev *sd, u32 segment)
+static bool edid_verify_crc(struct v4l2_subdev *sd, u32 segment)
{
struct adv7511_state *state = get_adv7511_state(sd);
u32 blocks = state->edid.blocks;
uint8_t *data = state->edid.data;
- if (edid_block_verify_crc(&data[segment * 256])) {
- if ((segment + 1) * 2 <= blocks)
- return edid_block_verify_crc(&data[segment * 256 + 128]);
+ if (!edid_block_verify_crc(&data[segment * 256]))
+ return false;
+ if ((segment + 1) * 2 <= blocks)
+ return edid_block_verify_crc(&data[segment * 256 + 128]);
+ return true;
+}
+
+static bool edid_verify_header(struct v4l2_subdev *sd, u32 segment)
+{
+ static const u8 hdmi_header[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+ };
+ struct adv7511_state *state = get_adv7511_state(sd);
+ u8 *data = state->edid.data;
+
+ if (segment != 0)
return true;
- }
- return false;
+ return !memcmp(data, hdmi_header, sizeof(hdmi_header));
}
static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
@@ -990,9 +1025,10 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
state->edid.blocks = state->edid.data[0x7e] + 1;
v4l2_dbg(1, debug, sd, "%s: %d blocks in total\n", __func__, state->edid.blocks);
}
- if (!edid_segment_verify_crc(sd, segment)) {
+ if (!edid_verify_crc(sd, segment) ||
+ !edid_verify_header(sd, segment)) {
/* edid crc error, force reread of edid segment */
- v4l2_dbg(1, debug, sd, "%s: edid crc error\n", __func__);
+ v4l2_err(sd, "%s: edid crc or header error\n", __func__);
state->have_monitor = false;
adv7511_s_power(sd, false);
adv7511_s_power(sd, true);
@@ -1038,6 +1074,12 @@ static void adv7511_init_setup(struct v4l2_subdev *sd)
/* clear all interrupts */
adv7511_wr(sd, 0x96, 0xff);
+ /*
+ * Stop HPD from resetting a lot of registers.
+ * It might leave the chip in a partly un-initialized state,
+ * in particular with regards to hotplug bounces.
+ */
+ adv7511_wr_and_or(sd, 0xd6, 0x3f, 0xc0);
memset(edid, 0, sizeof(struct adv7511_state_edid));
state->have_monitor = false;
adv7511_set_isr(sd, false);
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index a324106b9f11..71c8570bd9ea 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -53,8 +53,6 @@ MODULE_LICENSE("GPL");
/* ADV7604 system clock frequency */
#define ADV7604_fsc (28636360)
-#define DIGITAL_INPUT (state->mode == ADV7604_MODE_HDMI)
-
/*
**********************************************************************
*
@@ -67,17 +65,19 @@ struct adv7604_state {
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler hdl;
- enum adv7604_mode mode;
+ enum adv7604_input_port selected_input;
struct v4l2_dv_timings timings;
- u8 edid[256];
- unsigned edid_blocks;
+ struct {
+ u8 edid[256];
+ u32 present;
+ unsigned blocks;
+ } edid;
+ u16 spa_port_a[2];
struct v4l2_fract aspect_ratio;
u32 rgb_quantization_range;
struct workqueue_struct *work_queues;
struct delayed_work delayed_work_enable_hotplug;
- bool connector_hdmi;
bool restart_stdi_once;
- u32 prev_input_status;
/* i2c clients */
struct i2c_client *i2c_avlink;
@@ -160,6 +160,7 @@ static const struct v4l2_dv_timings adv7604_timings[] = {
V4L2_DV_BT_DMT_1792X1344P60,
V4L2_DV_BT_DMT_1856X1392P60,
V4L2_DV_BT_DMT_1920X1200P60_RB,
+ V4L2_DV_BT_DMT_1366X768P60_RB,
V4L2_DV_BT_DMT_1366X768P60,
V4L2_DV_BT_DMT_1920X1080P60,
{ },
@@ -507,57 +508,31 @@ static inline int edid_read_block(struct v4l2_subdev *sd, unsigned len, u8 *val)
return 0;
}
-static void adv7604_delayed_work_enable_hotplug(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct adv7604_state *state = container_of(dwork, struct adv7604_state,
- delayed_work_enable_hotplug);
- struct v4l2_subdev *sd = &state->sd;
-
- v4l2_dbg(2, debug, sd, "%s: enable hotplug\n", __func__);
-
- v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)1);
-}
-
static inline int edid_write_block(struct v4l2_subdev *sd,
unsigned len, const u8 *val)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct adv7604_state *state = to_state(sd);
int err = 0;
int i;
v4l2_dbg(2, debug, sd, "%s: write EDID block (%d byte)\n", __func__, len);
- v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)0);
-
- /* Disables I2C access to internal EDID ram from DDC port */
- rep_write_and_or(sd, 0x77, 0xf0, 0x0);
-
for (i = 0; !err && i < len; i += I2C_SMBUS_BLOCK_MAX)
err = adv_smbus_write_i2c_block_data(state->i2c_edid, i,
I2C_SMBUS_BLOCK_MAX, val + i);
- if (err)
- return err;
+ return err;
+}
- /* adv7604 calculates the checksums and enables I2C access to internal
- EDID ram from DDC port. */
- rep_write_and_or(sd, 0x77, 0xf0, 0x1);
+static void adv7604_delayed_work_enable_hotplug(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct adv7604_state *state = container_of(dwork, struct adv7604_state,
+ delayed_work_enable_hotplug);
+ struct v4l2_subdev *sd = &state->sd;
- for (i = 0; i < 1000; i++) {
- if (rep_read(sd, 0x7d) & 1)
- break;
- mdelay(1);
- }
- if (i == 1000) {
- v4l_err(client, "error enabling edid\n");
- return -EIO;
- }
+ v4l2_dbg(2, debug, sd, "%s: enable hotplug\n", __func__);
- /* enable hotplug after 100 ms */
- queue_delayed_work(state->work_queues,
- &state->delayed_work_enable_hotplug, HZ / 10);
- return 0;
+ v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)&state->edid.present);
}
static inline int hdmi_read(struct v4l2_subdev *sd, u8 reg)
@@ -574,6 +549,11 @@ static inline int hdmi_write(struct v4l2_subdev *sd, u8 reg, u8 val)
return adv_smbus_write_byte_data(state->i2c_hdmi, reg, val);
}
+static inline int hdmi_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+{
+ return hdmi_write(sd, reg, (hdmi_read(sd, reg) & mask) | val);
+}
+
static inline int test_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv7604_state *state = to_state(sd);
@@ -623,6 +603,26 @@ static inline int vdp_write(struct v4l2_subdev *sd, u8 reg, u8 val)
/* ----------------------------------------------------------------------- */
+static inline bool is_analog_input(struct v4l2_subdev *sd)
+{
+ struct adv7604_state *state = to_state(sd);
+
+ return state->selected_input == ADV7604_INPUT_VGA_RGB ||
+ state->selected_input == ADV7604_INPUT_VGA_COMP;
+}
+
+static inline bool is_digital_input(struct v4l2_subdev *sd)
+{
+ struct adv7604_state *state = to_state(sd);
+
+ return state->selected_input == ADV7604_INPUT_HDMI_PORT_A ||
+ state->selected_input == ADV7604_INPUT_HDMI_PORT_B ||
+ state->selected_input == ADV7604_INPUT_HDMI_PORT_C ||
+ state->selected_input == ADV7604_INPUT_HDMI_PORT_D;
+}
+
+/* ----------------------------------------------------------------------- */
+
#ifdef CONFIG_VIDEO_ADV_DEBUG
static void adv7604_inv_register(struct v4l2_subdev *sd)
{
@@ -696,45 +696,47 @@ static int adv7604_g_register(struct v4l2_subdev *sd,
static int adv7604_s_register(struct v4l2_subdev *sd,
const struct v4l2_dbg_register *reg)
{
+ u8 val = reg->val & 0xff;
+
switch (reg->reg >> 8) {
case 0:
- io_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ io_write(sd, reg->reg & 0xff, val);
break;
case 1:
- avlink_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ avlink_write(sd, reg->reg & 0xff, val);
break;
case 2:
- cec_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ cec_write(sd, reg->reg & 0xff, val);
break;
case 3:
- infoframe_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ infoframe_write(sd, reg->reg & 0xff, val);
break;
case 4:
- esdp_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ esdp_write(sd, reg->reg & 0xff, val);
break;
case 5:
- dpp_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ dpp_write(sd, reg->reg & 0xff, val);
break;
case 6:
- afe_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ afe_write(sd, reg->reg & 0xff, val);
break;
case 7:
- rep_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ rep_write(sd, reg->reg & 0xff, val);
break;
case 8:
- edid_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ edid_write(sd, reg->reg & 0xff, val);
break;
case 9:
- hdmi_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ hdmi_write(sd, reg->reg & 0xff, val);
break;
case 0xa:
- test_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ test_write(sd, reg->reg & 0xff, val);
break;
case 0xb:
- cp_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ cp_write(sd, reg->reg & 0xff, val);
break;
case 0xc:
- vdp_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ vdp_write(sd, reg->reg & 0xff, val);
break;
default:
v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
@@ -748,10 +750,13 @@ static int adv7604_s_register(struct v4l2_subdev *sd,
static int adv7604_s_detect_tx_5v_ctrl(struct v4l2_subdev *sd)
{
struct adv7604_state *state = to_state(sd);
+ u8 reg_io_6f = io_read(sd, 0x6f);
- /* port A only */
return v4l2_ctrl_s_ctrl(state->detect_tx_5v_ctrl,
- ((io_read(sd, 0x6f) & 0x10) >> 4));
+ ((reg_io_6f & 0x10) >> 4) |
+ ((reg_io_6f & 0x08) >> 2) |
+ (reg_io_6f & 0x04) |
+ ((reg_io_6f & 0x02) << 2));
}
static int find_and_set_predefined_video_timings(struct v4l2_subdev *sd,
@@ -759,12 +764,11 @@ static int find_and_set_predefined_video_timings(struct v4l2_subdev *sd,
const struct adv7604_video_standards *predef_vid_timings,
const struct v4l2_dv_timings *timings)
{
- struct adv7604_state *state = to_state(sd);
int i;
for (i = 0; predef_vid_timings[i].timings.bt.width; i++) {
if (!v4l2_match_dv_timings(timings, &predef_vid_timings[i].timings,
- DIGITAL_INPUT ? 250000 : 1000000))
+ is_digital_input(sd) ? 250000 : 1000000))
continue;
io_write(sd, 0x00, predef_vid_timings[i].vid_std); /* video std */
io_write(sd, 0x01, (predef_vid_timings[i].v_freq << 4) +
@@ -799,27 +803,22 @@ static int configure_predefined_video_timings(struct v4l2_subdev *sd,
cp_write(sd, 0xab, 0x00);
cp_write(sd, 0xac, 0x00);
- switch (state->mode) {
- case ADV7604_MODE_COMP:
- case ADV7604_MODE_GR:
+ if (is_analog_input(sd)) {
err = find_and_set_predefined_video_timings(sd,
0x01, adv7604_prim_mode_comp, timings);
if (err)
err = find_and_set_predefined_video_timings(sd,
0x02, adv7604_prim_mode_gr, timings);
- break;
- case ADV7604_MODE_HDMI:
+ } else if (is_digital_input(sd)) {
err = find_and_set_predefined_video_timings(sd,
0x05, adv7604_prim_mode_hdmi_comp, timings);
if (err)
err = find_and_set_predefined_video_timings(sd,
0x06, adv7604_prim_mode_hdmi_gr, timings);
- break;
- default:
- v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
- __func__, state->mode);
+ } else {
+ v4l2_dbg(2, debug, sd, "%s: Unknown port %d selected\n",
+ __func__, state->selected_input);
err = -1;
- break;
}
@@ -846,9 +845,7 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
v4l2_dbg(2, debug, sd, "%s\n", __func__);
- switch (state->mode) {
- case ADV7604_MODE_COMP:
- case ADV7604_MODE_GR:
+ if (is_analog_input(sd)) {
/* auto graphics */
io_write(sd, 0x00, 0x07); /* video std */
io_write(sd, 0x01, 0x02); /* prim mode */
@@ -858,33 +855,28 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
/* Should only be set in auto-graphics mode [REF_02, p. 91-92] */
/* setup PLL_DIV_MAN_EN and PLL_DIV_RATIO */
/* IO-map reg. 0x16 and 0x17 should be written in sequence */
- if (adv_smbus_write_i2c_block_data(client, 0x16, 2, pll)) {
+ if (adv_smbus_write_i2c_block_data(client, 0x16, 2, pll))
v4l2_err(sd, "writing to reg 0x16 and 0x17 failed\n");
- break;
- }
/* active video - horizontal timing */
cp_write(sd, 0xa2, (cp_start_sav >> 4) & 0xff);
cp_write(sd, 0xa3, ((cp_start_sav & 0x0f) << 4) |
- ((cp_start_eav >> 8) & 0x0f));
+ ((cp_start_eav >> 8) & 0x0f));
cp_write(sd, 0xa4, cp_start_eav & 0xff);
/* active video - vertical timing */
cp_write(sd, 0xa5, (cp_start_vbi >> 4) & 0xff);
cp_write(sd, 0xa6, ((cp_start_vbi & 0xf) << 4) |
- ((cp_end_vbi >> 8) & 0xf));
+ ((cp_end_vbi >> 8) & 0xf));
cp_write(sd, 0xa7, cp_end_vbi & 0xff);
- break;
- case ADV7604_MODE_HDMI:
+ } else if (is_digital_input(sd)) {
/* set default prim_mode/vid_std for HDMI
according to [REF_03, c. 4.2] */
io_write(sd, 0x00, 0x02); /* video std */
io_write(sd, 0x01, 0x06); /* prim mode */
- break;
- default:
- v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
- __func__, state->mode);
- break;
+ } else {
+ v4l2_dbg(2, debug, sd, "%s: Unknown port %d selected\n",
+ __func__, state->selected_input);
}
cp_write(sd, 0x8f, (ch1_fr_ll >> 8) & 0x7);
@@ -893,43 +885,149 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
cp_write(sd, 0xac, (height & 0x0f) << 4);
}
+static void adv7604_set_offset(struct v4l2_subdev *sd, bool auto_offset, u16 offset_a, u16 offset_b, u16 offset_c)
+{
+ struct adv7604_state *state = to_state(sd);
+ u8 offset_buf[4];
+
+ if (auto_offset) {
+ offset_a = 0x3ff;
+ offset_b = 0x3ff;
+ offset_c = 0x3ff;
+ }
+
+ v4l2_dbg(2, debug, sd, "%s: %s offset: a = 0x%x, b = 0x%x, c = 0x%x\n",
+ __func__, auto_offset ? "Auto" : "Manual",
+ offset_a, offset_b, offset_c);
+
+ offset_buf[0] = (cp_read(sd, 0x77) & 0xc0) | ((offset_a & 0x3f0) >> 4);
+ offset_buf[1] = ((offset_a & 0x00f) << 4) | ((offset_b & 0x3c0) >> 6);
+ offset_buf[2] = ((offset_b & 0x03f) << 2) | ((offset_c & 0x300) >> 8);
+ offset_buf[3] = offset_c & 0x0ff;
+
+ /* Registers must be written in this order with no i2c access in between */
+ if (adv_smbus_write_i2c_block_data(state->i2c_cp, 0x77, 4, offset_buf))
+ v4l2_err(sd, "%s: i2c error writing to CP reg 0x77, 0x78, 0x79, 0x7a\n", __func__);
+}
+
+static void adv7604_set_gain(struct v4l2_subdev *sd, bool auto_gain, u16 gain_a, u16 gain_b, u16 gain_c)
+{
+ struct adv7604_state *state = to_state(sd);
+ u8 gain_buf[4];
+ u8 gain_man = 1;
+ u8 agc_mode_man = 1;
+
+ if (auto_gain) {
+ gain_man = 0;
+ agc_mode_man = 0;
+ gain_a = 0x100;
+ gain_b = 0x100;
+ gain_c = 0x100;
+ }
+
+ v4l2_dbg(2, debug, sd, "%s: %s gain: a = 0x%x, b = 0x%x, c = 0x%x\n",
+ __func__, auto_gain ? "Auto" : "Manual",
+ gain_a, gain_b, gain_c);
+
+ gain_buf[0] = ((gain_man << 7) | (agc_mode_man << 6) | ((gain_a & 0x3f0) >> 4));
+ gain_buf[1] = (((gain_a & 0x00f) << 4) | ((gain_b & 0x3c0) >> 6));
+ gain_buf[2] = (((gain_b & 0x03f) << 2) | ((gain_c & 0x300) >> 8));
+ gain_buf[3] = ((gain_c & 0x0ff));
+
+ /* Registers must be written in this order with no i2c access in between */
+ if (adv_smbus_write_i2c_block_data(state->i2c_cp, 0x73, 4, gain_buf))
+ v4l2_err(sd, "%s: i2c error writing to CP reg 0x73, 0x74, 0x75, 0x76\n", __func__);
+}
+
static void set_rgb_quantization_range(struct v4l2_subdev *sd)
{
struct adv7604_state *state = to_state(sd);
+ bool rgb_output = io_read(sd, 0x02) & 0x02;
+ bool hdmi_signal = hdmi_read(sd, 0x05) & 0x80;
+
+ v4l2_dbg(2, debug, sd, "%s: RGB quantization range: %d, RGB out: %d, HDMI: %d\n",
+ __func__, state->rgb_quantization_range,
+ rgb_output, hdmi_signal);
+
+ adv7604_set_gain(sd, true, 0x0, 0x0, 0x0);
+ adv7604_set_offset(sd, true, 0x0, 0x0, 0x0);
switch (state->rgb_quantization_range) {
case V4L2_DV_RGB_RANGE_AUTO:
- /* automatic */
- if (DIGITAL_INPUT && !(hdmi_read(sd, 0x05) & 0x80)) {
- /* receiving DVI-D signal */
+ if (state->selected_input == ADV7604_INPUT_VGA_RGB) {
+ /* Receiving analog RGB signal
+ * Set RGB full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ break;
+ }
+
+ if (state->selected_input == ADV7604_INPUT_VGA_COMP) {
+ /* Receiving analog YPbPr signal
+ * Set automode */
+ io_write_and_or(sd, 0x02, 0x0f, 0xf0);
+ break;
+ }
+
+ if (hdmi_signal) {
+ /* Receiving HDMI signal
+ * Set automode */
+ io_write_and_or(sd, 0x02, 0x0f, 0xf0);
+ break;
+ }
- /* ADV7604 selects RGB limited range regardless of
- input format (CE/IT) in automatic mode */
- if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
- /* RGB limited range (16-235) */
- io_write_and_or(sd, 0x02, 0x0f, 0x00);
+ /* Receiving DVI-D signal
+ * ADV7604 selects RGB limited range regardless of
+ * input format (CE/IT) in automatic mode */
+ if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
+ /* RGB limited range (16-235) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x00);
+ } else {
+ /* RGB full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ if (is_digital_input(sd) && rgb_output) {
+ adv7604_set_offset(sd, false, 0x40, 0x40, 0x40);
} else {
- /* RGB full range (0-255) */
- io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ adv7604_set_gain(sd, false, 0xe0, 0xe0, 0xe0);
+ adv7604_set_offset(sd, false, 0x70, 0x70, 0x70);
}
- } else {
- /* receiving HDMI or analog signal, set automode */
- io_write_and_or(sd, 0x02, 0x0f, 0xf0);
}
break;
case V4L2_DV_RGB_RANGE_LIMITED:
+ if (state->selected_input == ADV7604_INPUT_VGA_COMP) {
+ /* YCrCb limited range (16-235) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x20);
+ break;
+ }
+
/* RGB limited range (16-235) */
io_write_and_or(sd, 0x02, 0x0f, 0x00);
+
break;
case V4L2_DV_RGB_RANGE_FULL:
+ if (state->selected_input == ADV7604_INPUT_VGA_COMP) {
+ /* YCrCb full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x60);
+ break;
+ }
+
/* RGB full range (0-255) */
io_write_and_or(sd, 0x02, 0x0f, 0x10);
+
+ if (is_analog_input(sd) || hdmi_signal)
+ break;
+
+ /* Adjust gain/offset for DVI-D signals only */
+ if (rgb_output) {
+ adv7604_set_offset(sd, false, 0x40, 0x40, 0x40);
+ } else {
+ adv7604_set_gain(sd, false, 0xe0, 0xe0, 0xe0);
+ adv7604_set_offset(sd, false, 0x70, 0x70, 0x70);
+ }
break;
}
}
-
static int adv7604_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
@@ -983,8 +1081,9 @@ static inline bool no_power(struct v4l2_subdev *sd)
static inline bool no_signal_tmds(struct v4l2_subdev *sd)
{
- /* TODO port B, C and D */
- return !(io_read(sd, 0x6a) & 0x10);
+ struct adv7604_state *state = to_state(sd);
+
+ return !(io_read(sd, 0x6a) & (0x10 >> state->selected_input));
}
static inline bool no_lock_tmds(struct v4l2_subdev *sd)
@@ -1011,7 +1110,6 @@ static inline bool no_lock_stdi(struct v4l2_subdev *sd)
static inline bool no_signal(struct v4l2_subdev *sd)
{
- struct adv7604_state *state = to_state(sd);
bool ret;
ret = no_power(sd);
@@ -1019,7 +1117,7 @@ static inline bool no_signal(struct v4l2_subdev *sd)
ret |= no_lock_stdi(sd);
ret |= no_lock_sspd(sd);
- if (DIGITAL_INPUT) {
+ if (is_digital_input(sd)) {
ret |= no_lock_tmds(sd);
ret |= no_signal_tmds(sd);
}
@@ -1036,13 +1134,11 @@ static inline bool no_lock_cp(struct v4l2_subdev *sd)
static int adv7604_g_input_status(struct v4l2_subdev *sd, u32 *status)
{
- struct adv7604_state *state = to_state(sd);
-
*status = 0;
*status |= no_power(sd) ? V4L2_IN_ST_NO_POWER : 0;
*status |= no_signal(sd) ? V4L2_IN_ST_NO_SIGNAL : 0;
if (no_lock_cp(sd))
- *status |= DIGITAL_INPUT ? V4L2_IN_ST_NO_SYNC : V4L2_IN_ST_NO_H_LOCK;
+ *status |= is_digital_input(sd) ? V4L2_IN_ST_NO_SYNC : V4L2_IN_ST_NO_H_LOCK;
v4l2_dbg(1, debug, sd, "%s: status = 0x%x\n", __func__, *status);
@@ -1157,13 +1253,11 @@ static int adv7604_enum_dv_timings(struct v4l2_subdev *sd,
static int adv7604_dv_timings_cap(struct v4l2_subdev *sd,
struct v4l2_dv_timings_cap *cap)
{
- struct adv7604_state *state = to_state(sd);
-
cap->type = V4L2_DV_BT_656_1120;
cap->bt.max_width = 1920;
cap->bt.max_height = 1200;
cap->bt.min_pixelclock = 25000000;
- if (DIGITAL_INPUT)
+ if (is_digital_input(sd))
cap->bt.max_pixelclock = 225000000;
else
cap->bt.max_pixelclock = 170000000;
@@ -1179,12 +1273,11 @@ static int adv7604_dv_timings_cap(struct v4l2_subdev *sd,
static void adv7604_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
- struct adv7604_state *state = to_state(sd);
int i;
for (i = 0; adv7604_timings[i].bt.width; i++) {
if (v4l2_match_dv_timings(timings, &adv7604_timings[i],
- DIGITAL_INPUT ? 250000 : 1000000)) {
+ is_digital_input(sd) ? 250000 : 1000000)) {
*timings = adv7604_timings[i];
break;
}
@@ -1204,6 +1297,7 @@ static int adv7604_query_dv_timings(struct v4l2_subdev *sd,
memset(timings, 0, sizeof(struct v4l2_dv_timings));
if (no_signal(sd)) {
+ state->restart_stdi_once = true;
v4l2_dbg(1, debug, sd, "%s: no valid signal\n", __func__);
return -ENOLINK;
}
@@ -1216,7 +1310,7 @@ static int adv7604_query_dv_timings(struct v4l2_subdev *sd,
bt->interlaced = stdi.interlaced ?
V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
- if (DIGITAL_INPUT) {
+ if (is_digital_input(sd)) {
uint32_t freq;
timings->type = V4L2_DV_BT_656_1120;
@@ -1305,8 +1399,8 @@ found:
return -ENOLINK;
}
- if ((!DIGITAL_INPUT && bt->pixelclock > 170000000) ||
- (DIGITAL_INPUT && bt->pixelclock > 225000000)) {
+ if ((is_analog_input(sd) && bt->pixelclock > 170000000) ||
+ (is_digital_input(sd) && bt->pixelclock > 225000000)) {
v4l2_dbg(1, debug, sd, "%s: pixelclock out of range %d\n",
__func__, (u32)bt->pixelclock);
return -ERANGE;
@@ -1329,10 +1423,15 @@ static int adv7604_s_dv_timings(struct v4l2_subdev *sd,
if (!timings)
return -EINVAL;
+ if (v4l2_match_dv_timings(&state->timings, timings, 0)) {
+ v4l2_dbg(1, debug, sd, "%s: no change\n", __func__);
+ return 0;
+ }
+
bt = &timings->bt;
- if ((!DIGITAL_INPUT && bt->pixelclock > 170000000) ||
- (DIGITAL_INPUT && bt->pixelclock > 225000000)) {
+ if ((is_analog_input(sd) && bt->pixelclock > 170000000) ||
+ (is_digital_input(sd) && bt->pixelclock > 225000000)) {
v4l2_dbg(1, debug, sd, "%s: pixelclock out of range %d\n",
__func__, (u32)bt->pixelclock);
return -ERANGE;
@@ -1354,7 +1453,6 @@ static int adv7604_s_dv_timings(struct v4l2_subdev *sd,
set_rgb_quantization_range(sd);
-
if (debug > 1)
v4l2_print_dv_timings(sd->name, "adv7604_s_dv_timings: ",
timings, true);
@@ -1374,30 +1472,24 @@ static void enable_input(struct v4l2_subdev *sd)
{
struct adv7604_state *state = to_state(sd);
- switch (state->mode) {
- case ADV7604_MODE_COMP:
- case ADV7604_MODE_GR:
- /* enable */
+ if (is_analog_input(sd)) {
io_write(sd, 0x15, 0xb0); /* Disable Tristate of Pins (no audio) */
- break;
- case ADV7604_MODE_HDMI:
- /* enable */
- hdmi_write(sd, 0x1a, 0x0a); /* Unmute audio */
+ } else if (is_digital_input(sd)) {
+ hdmi_write_and_or(sd, 0x00, 0xfc, state->selected_input);
hdmi_write(sd, 0x01, 0x00); /* Enable HDMI clock terminators */
io_write(sd, 0x15, 0xa0); /* Disable Tristate of Pins */
- break;
- default:
- v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
- __func__, state->mode);
- break;
+ hdmi_write_and_or(sd, 0x1a, 0xef, 0x00); /* Unmute audio */
+ } else {
+ v4l2_dbg(2, debug, sd, "%s: Unknown port %d selected\n",
+ __func__, state->selected_input);
}
}
static void disable_input(struct v4l2_subdev *sd)
{
- /* disable */
+ hdmi_write_and_or(sd, 0x1a, 0xef, 0x10); /* Mute audio */
+ msleep(16); /* 512 samples with >= 32 kHz sample rate [REF_03, c. 7.16.10] */
io_write(sd, 0x15, 0xbe); /* Tristate all outputs from video core */
- hdmi_write(sd, 0x1a, 0x1a); /* Mute audio */
hdmi_write(sd, 0x01, 0x78); /* Disable HDMI clock terminators */
}
@@ -1405,9 +1497,7 @@ static void select_input(struct v4l2_subdev *sd)
{
struct adv7604_state *state = to_state(sd);
- switch (state->mode) {
- case ADV7604_MODE_COMP:
- case ADV7604_MODE_GR:
+ if (is_analog_input(sd)) {
/* reset ADI recommended settings for HDMI: */
/* "ADV7604 Register Settings Recommendations (rev. 2.5, June 2010)" p. 4. */
hdmi_write(sd, 0x0d, 0x04); /* HDMI filter optimization */
@@ -1433,9 +1523,9 @@ static void select_input(struct v4l2_subdev *sd)
cp_write(sd, 0x3e, 0x04); /* CP core pre-gain control */
cp_write(sd, 0xc3, 0x39); /* CP coast control. Graphics mode */
cp_write(sd, 0x40, 0x5c); /* CP core pre-gain control. Graphics mode */
- break;
+ } else if (is_digital_input(sd)) {
+ hdmi_write(sd, 0x00, state->selected_input & 0x03);
- case ADV7604_MODE_HDMI:
/* set ADI recommended settings for HDMI: */
/* "ADV7604 Register Settings Recommendations (rev. 2.5, June 2010)" p. 4. */
hdmi_write(sd, 0x0d, 0x84); /* HDMI filter optimization */
@@ -1461,12 +1551,9 @@ static void select_input(struct v4l2_subdev *sd)
cp_write(sd, 0x3e, 0x00); /* CP core pre-gain control */
cp_write(sd, 0xc3, 0x39); /* CP coast control. Graphics mode */
cp_write(sd, 0x40, 0x80); /* CP core pre-gain control. Graphics mode */
-
- break;
- default:
- v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
- __func__, state->mode);
- break;
+ } else {
+ v4l2_dbg(2, debug, sd, "%s: Unknown port %d selected\n",
+ __func__, state->selected_input);
}
}
@@ -1475,9 +1562,13 @@ static int adv7604_s_routing(struct v4l2_subdev *sd,
{
struct adv7604_state *state = to_state(sd);
- v4l2_dbg(2, debug, sd, "%s: input %d", __func__, input);
+ v4l2_dbg(2, debug, sd, "%s: input %d, selected input %d",
+ __func__, input, state->selected_input);
+
+ if (input == state->selected_input)
+ return 0;
- state->mode = input;
+ state->selected_input = input;
disable_input(sd);
@@ -1516,36 +1607,47 @@ static int adv7604_g_mbus_fmt(struct v4l2_subdev *sd,
static int adv7604_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
{
- struct adv7604_state *state = to_state(sd);
- u8 fmt_change, fmt_change_digital, tx_5v;
- u32 input_status;
+ const u8 irq_reg_0x43 = io_read(sd, 0x43);
+ const u8 irq_reg_0x6b = io_read(sd, 0x6b);
+ const u8 irq_reg_0x70 = io_read(sd, 0x70);
+ u8 fmt_change_digital;
+ u8 fmt_change;
+ u8 tx_5v;
+
+ if (irq_reg_0x43)
+ io_write(sd, 0x44, irq_reg_0x43);
+ if (irq_reg_0x70)
+ io_write(sd, 0x71, irq_reg_0x70);
+ if (irq_reg_0x6b)
+ io_write(sd, 0x6c, irq_reg_0x6b);
+
+ v4l2_dbg(2, debug, sd, "%s: ", __func__);
/* format change */
- fmt_change = io_read(sd, 0x43) & 0x98;
- if (fmt_change)
- io_write(sd, 0x44, fmt_change);
- fmt_change_digital = DIGITAL_INPUT ? (io_read(sd, 0x6b) & 0xc0) : 0;
- if (fmt_change_digital)
- io_write(sd, 0x6c, fmt_change_digital);
+ fmt_change = irq_reg_0x43 & 0x98;
+ fmt_change_digital = is_digital_input(sd) ? (irq_reg_0x6b & 0xc0) : 0;
+
if (fmt_change || fmt_change_digital) {
v4l2_dbg(1, debug, sd,
"%s: fmt_change = 0x%x, fmt_change_digital = 0x%x\n",
__func__, fmt_change, fmt_change_digital);
- adv7604_g_input_status(sd, &input_status);
- if (input_status != state->prev_input_status) {
- v4l2_dbg(1, debug, sd,
- "%s: input_status = 0x%x, prev_input_status = 0x%x\n",
- __func__, input_status, state->prev_input_status);
- state->prev_input_status = input_status;
- v4l2_subdev_notify(sd, ADV7604_FMT_CHANGE, NULL);
- }
+ v4l2_subdev_notify(sd, ADV7604_FMT_CHANGE, NULL);
if (handled)
*handled = true;
}
+ /* HDMI/DVI mode */
+ if (irq_reg_0x6b & 0x01) {
+ v4l2_dbg(1, debug, sd, "%s: irq %s mode\n", __func__,
+ (io_read(sd, 0x6a) & 0x01) ? "HDMI" : "DVI");
+ set_rgb_quantization_range(sd);
+ if (handled)
+ *handled = true;
+ }
+
/* tx 5v detect */
- tx_5v = io_read(sd, 0x70) & 0x10;
+ tx_5v = io_read(sd, 0x70) & 0x1e;
if (tx_5v) {
v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v);
io_write(sd, 0x71, tx_5v);
@@ -1559,55 +1661,178 @@ static int adv7604_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
static int adv7604_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
{
struct adv7604_state *state = to_state(sd);
+ u8 *data = NULL;
- if (edid->pad != 0)
+ if (edid->pad > ADV7604_EDID_PORT_D)
return -EINVAL;
if (edid->blocks == 0)
return -EINVAL;
- if (edid->start_block >= state->edid_blocks)
+ if (edid->blocks > 2)
+ return -EINVAL;
+ if (edid->start_block > 1)
return -EINVAL;
- if (edid->start_block + edid->blocks > state->edid_blocks)
- edid->blocks = state->edid_blocks - edid->start_block;
+ if (edid->start_block == 1)
+ edid->blocks = 1;
if (!edid->edid)
return -EINVAL;
- memcpy(edid->edid + edid->start_block * 128,
- state->edid + edid->start_block * 128,
+
+ if (edid->blocks > state->edid.blocks)
+ edid->blocks = state->edid.blocks;
+
+ switch (edid->pad) {
+ case ADV7604_EDID_PORT_A:
+ case ADV7604_EDID_PORT_B:
+ case ADV7604_EDID_PORT_C:
+ case ADV7604_EDID_PORT_D:
+ if (state->edid.present & (1 << edid->pad))
+ data = state->edid.edid;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ if (!data)
+ return -ENODATA;
+
+ memcpy(edid->edid,
+ data + edid->start_block * 128,
edid->blocks * 128);
return 0;
}
+static int get_edid_spa_location(const u8 *edid)
+{
+ u8 d;
+
+ if ((edid[0x7e] != 1) ||
+ (edid[0x80] != 0x02) ||
+ (edid[0x81] != 0x03)) {
+ return -1;
+ }
+
+ /* search Vendor Specific Data Block (tag 3) */
+ d = edid[0x82] & 0x7f;
+ if (d > 4) {
+ int i = 0x84;
+ int end = 0x80 + d;
+
+ do {
+ u8 tag = edid[i] >> 5;
+ u8 len = edid[i] & 0x1f;
+
+ if ((tag == 3) && (len >= 5))
+ return i + 4;
+ i += len + 1;
+ } while (i < end);
+ }
+ return -1;
+}
+
static int adv7604_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
{
struct adv7604_state *state = to_state(sd);
+ int spa_loc;
+ int tmp = 0;
int err;
+ int i;
- if (edid->pad != 0)
+ if (edid->pad > ADV7604_EDID_PORT_D)
return -EINVAL;
if (edid->start_block != 0)
return -EINVAL;
if (edid->blocks == 0) {
- /* Pull down the hotplug pin */
- v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)0);
- /* Disables I2C access to internal EDID ram from DDC port */
- rep_write_and_or(sd, 0x77, 0xf0, 0x0);
- state->edid_blocks = 0;
+ /* Disable hotplug and I2C access to EDID RAM from DDC port */
+ state->edid.present &= ~(1 << edid->pad);
+ v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)&state->edid.present);
+ rep_write_and_or(sd, 0x77, 0xf0, state->edid.present);
+
/* Fall back to a 16:9 aspect ratio */
state->aspect_ratio.numerator = 16;
state->aspect_ratio.denominator = 9;
+
+ if (!state->edid.present)
+ state->edid.blocks = 0;
+
+ v4l2_dbg(2, debug, sd, "%s: clear EDID pad %d, edid.present = 0x%x\n",
+ __func__, edid->pad, state->edid.present);
return 0;
}
- if (edid->blocks > 2)
+ if (edid->blocks > 2) {
+ edid->blocks = 2;
return -E2BIG;
+ }
if (!edid->edid)
return -EINVAL;
- memcpy(state->edid, edid->edid, 128 * edid->blocks);
- state->edid_blocks = edid->blocks;
+
+ v4l2_dbg(2, debug, sd, "%s: write EDID pad %d, edid.present = 0x%x\n",
+ __func__, edid->pad, state->edid.present);
+
+ /* Disable hotplug and I2C access to EDID RAM from DDC port */
+ cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
+ v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)&tmp);
+ rep_write_and_or(sd, 0x77, 0xf0, 0x00);
+
+ spa_loc = get_edid_spa_location(edid->edid);
+ if (spa_loc < 0)
+ spa_loc = 0xc0; /* Default value [REF_02, p. 116] */
+
+ switch (edid->pad) {
+ case ADV7604_EDID_PORT_A:
+ state->spa_port_a[0] = edid->edid[spa_loc];
+ state->spa_port_a[1] = edid->edid[spa_loc + 1];
+ break;
+ case ADV7604_EDID_PORT_B:
+ rep_write(sd, 0x70, edid->edid[spa_loc]);
+ rep_write(sd, 0x71, edid->edid[spa_loc + 1]);
+ break;
+ case ADV7604_EDID_PORT_C:
+ rep_write(sd, 0x72, edid->edid[spa_loc]);
+ rep_write(sd, 0x73, edid->edid[spa_loc + 1]);
+ break;
+ case ADV7604_EDID_PORT_D:
+ rep_write(sd, 0x74, edid->edid[spa_loc]);
+ rep_write(sd, 0x75, edid->edid[spa_loc + 1]);
+ break;
+ default:
+ return -EINVAL;
+ }
+ rep_write(sd, 0x76, spa_loc & 0xff);
+ rep_write_and_or(sd, 0x77, 0xbf, (spa_loc >> 2) & 0x40);
+
+ edid->edid[spa_loc] = state->spa_port_a[0];
+ edid->edid[spa_loc + 1] = state->spa_port_a[1];
+
+ memcpy(state->edid.edid, edid->edid, 128 * edid->blocks);
+ state->edid.blocks = edid->blocks;
state->aspect_ratio = v4l2_calc_aspect_ratio(edid->edid[0x15],
edid->edid[0x16]);
- err = edid_write_block(sd, 128 * edid->blocks, state->edid);
- if (err < 0)
- v4l2_err(sd, "error %d writing edid\n", err);
- return err;
+ state->edid.present |= 1 << edid->pad;
+
+ err = edid_write_block(sd, 128 * edid->blocks, state->edid.edid);
+ if (err < 0) {
+ v4l2_err(sd, "error %d writing edid pad %d\n", err, edid->pad);
+ return err;
+ }
+
+ /* adv7604 calculates the checksums and enables I2C access to internal
+ EDID RAM from DDC port. */
+ rep_write_and_or(sd, 0x77, 0xf0, state->edid.present);
+
+ for (i = 0; i < 1000; i++) {
+ if (rep_read(sd, 0x7d) & state->edid.present)
+ break;
+ mdelay(1);
+ }
+ if (i == 1000) {
+ v4l2_err(sd, "error enabling edid (0x%x)\n", state->edid.present);
+ return -EIO;
+ }
+
+
+ /* enable hotplug after 100 ms */
+ queue_delayed_work(state->work_queues,
+ &state->delayed_work_enable_hotplug, HZ / 10);
+ return 0;
}
/*********** avi info frame CEA-861-E **************/
@@ -1670,7 +1895,7 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
char *input_color_space_txt[16] = {
"RGB limited range (16-235)", "RGB full range (0-255)",
"YCbCr Bt.601 (16-235)", "YCbCr Bt.709 (16-235)",
- "XvYCC Bt.601", "XvYCC Bt.709",
+ "xvYCC Bt.601", "xvYCC Bt.709",
"YCbCr Bt.601 (0-255)", "YCbCr Bt.709 (0-255)",
"invalid", "invalid", "invalid", "invalid", "invalid",
"invalid", "invalid", "automatic"
@@ -1689,16 +1914,20 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "-----Chip status-----\n");
v4l2_info(sd, "Chip power: %s\n", no_power(sd) ? "off" : "on");
- v4l2_info(sd, "Connector type: %s\n", state->connector_hdmi ?
- "HDMI" : (DIGITAL_INPUT ? "DVI-D" : "DVI-A"));
- v4l2_info(sd, "EDID: %s\n", ((rep_read(sd, 0x7d) & 0x01) &&
- (rep_read(sd, 0x77) & 0x01)) ? "enabled" : "disabled ");
+ v4l2_info(sd, "EDID enabled port A: %s, B: %s, C: %s, D: %s\n",
+ ((rep_read(sd, 0x7d) & 0x01) ? "Yes" : "No"),
+ ((rep_read(sd, 0x7d) & 0x02) ? "Yes" : "No"),
+ ((rep_read(sd, 0x7d) & 0x04) ? "Yes" : "No"),
+ ((rep_read(sd, 0x7d) & 0x08) ? "Yes" : "No"));
v4l2_info(sd, "CEC: %s\n", !!(cec_read(sd, 0x2a) & 0x01) ?
"enabled" : "disabled");
v4l2_info(sd, "-----Signal status-----\n");
- v4l2_info(sd, "Cable detected (+5V power): %s\n",
- (io_read(sd, 0x6f) & 0x10) ? "true" : "false");
+ v4l2_info(sd, "Cable detected (+5V power) port A: %s, B: %s, C: %s, D: %s\n",
+ ((io_read(sd, 0x6f) & 0x10) ? "Yes" : "No"),
+ ((io_read(sd, 0x6f) & 0x08) ? "Yes" : "No"),
+ ((io_read(sd, 0x6f) & 0x04) ? "Yes" : "No"),
+ ((io_read(sd, 0x6f) & 0x02) ? "Yes" : "No"));
v4l2_info(sd, "TMDS signal detected: %s\n",
no_signal_tmds(sd) ? "false" : "true");
v4l2_info(sd, "TMDS signal locked: %s\n",
@@ -1744,11 +1973,14 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "Color space conversion: %s\n",
csc_coeff_sel_rb[cp_read(sd, 0xfc) >> 4]);
- if (!DIGITAL_INPUT)
+ if (!is_digital_input(sd))
return 0;
v4l2_info(sd, "-----%s status-----\n", is_hdmi(sd) ? "HDMI" : "DVI-D");
- v4l2_info(sd, "HDCP encrypted content: %s\n", (hdmi_read(sd, 0x05) & 0x40) ? "true" : "false");
+ v4l2_info(sd, "Digital video port selected: %c\n",
+ (hdmi_read(sd, 0x00) & 0x03) + 'A');
+ v4l2_info(sd, "HDCP encrypted content: %s\n",
+ (hdmi_read(sd, 0x05) & 0x40) ? "true" : "false");
v4l2_info(sd, "HDCP keys read: %s%s\n",
(hdmi_read(sd, 0x04) & 0x20) ? "yes" : "no",
(hdmi_read(sd, 0x04) & 0x10) ? "ERROR" : "");
@@ -1894,10 +2126,16 @@ static int adv7604_core_init(struct v4l2_subdev *sd)
pdata->replicate_av_codes << 1 |
pdata->invert_cbcr << 0);
- /* TODO from platform data */
cp_write(sd, 0x69, 0x30); /* Enable CP CSC */
- io_write(sd, 0x06, 0xa6); /* positive VS and HS */
- io_write(sd, 0x14, 0x7f); /* Drive strength adjusted to max */
+
+ /* VS, HS polarities */
+ io_write(sd, 0x06, 0xa0 | pdata->inv_vs_pol << 2 | pdata->inv_hs_pol << 1);
+
+ /* Adjust drive strength */
+ io_write(sd, 0x14, 0x40 | pdata->dr_str_data << 4 |
+ pdata->dr_str_clk << 2 |
+ pdata->dr_str_sync);
+
cp_write(sd, 0xba, (pdata->hdmi_free_run_mode << 1) | 0x01); /* HDMI free run */
cp_write(sd, 0xf3, 0xdc); /* Low threshold to enter/exit free run mode */
cp_write(sd, 0xf9, 0x23); /* STDI ch. 1 - LCVS change threshold -
@@ -1907,6 +2145,11 @@ static int adv7604_core_init(struct v4l2_subdev *sd)
cp_write(sd, 0xc9, 0x2d); /* use prim_mode and vid_std as free run resolution
for digital formats */
+ /* HDMI audio */
+ hdmi_write_and_or(sd, 0x15, 0xfc, 0x03); /* Mute on FIFO over-/underflow [REF_01, c. 1.2.18] */
+ hdmi_write_and_or(sd, 0x1a, 0xf1, 0x08); /* Wait 1 s before unmute */
+ hdmi_write_and_or(sd, 0x68, 0xf9, 0x06); /* FIFO reset on over-/underflow [REF_01, c. 1.2.19] */
+
/* TODO from platform data */
afe_write(sd, 0xb5, 0x01); /* Setting MCLK to 256Fs */
@@ -1917,8 +2160,8 @@ static int adv7604_core_init(struct v4l2_subdev *sd)
io_write(sd, 0x40, 0xc2); /* Configure INT1 */
io_write(sd, 0x41, 0xd7); /* STDI irq for any change, disable INT2 */
io_write(sd, 0x46, 0x98); /* Enable SSPD, STDI and CP unlocked interrupts */
- io_write(sd, 0x6e, 0xc0); /* Enable V_LOCKED and DE_REGEN_LCK interrupts */
- io_write(sd, 0x73, 0x10); /* Enable CABLE_DET_A_ST (+5v) interrupt */
+ io_write(sd, 0x6e, 0xc1); /* Enable V_LOCKED, DE_REGEN_LCK, HDMI_MODE interrupts */
+ io_write(sd, 0x73, 0x1e); /* Enable CABLE_DET_A_ST (+5v) interrupts */
return v4l2_ctrl_handler_setup(sd->ctrl_handler);
}
@@ -1964,6 +2207,8 @@ static struct i2c_client *adv7604_dummy_client(struct v4l2_subdev *sd,
static int adv7604_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ static const struct v4l2_dv_timings cea640x480 =
+ V4L2_DV_BT_CEA_640X480P59_94;
struct adv7604_state *state;
struct adv7604_platform_data *pdata = client->dev.platform_data;
struct v4l2_ctrl_handler *hdl;
@@ -1984,19 +2229,19 @@ static int adv7604_probe(struct i2c_client *client,
/* initialize variables */
state->restart_stdi_once = true;
- state->prev_input_status = ~0;
+ state->selected_input = ~0;
/* platform data */
if (!pdata) {
v4l_err(client, "No platform data!\n");
return -ENODEV;
}
- memcpy(&state->pdata, pdata, sizeof(state->pdata));
+ state->pdata = *pdata;
+ state->timings = cea640x480;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7604_ops);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- state->connector_hdmi = pdata->connector_hdmi;
/* i2c access to adv7604? */
if (adv_smbus_read_byte_data_check(client, 0xfb, false) != 0x68) {
@@ -2020,7 +2265,7 @@ static int adv7604_probe(struct i2c_client *client,
/* private controls */
state->detect_tx_5v_ctrl = v4l2_ctrl_new_std(hdl, NULL,
- V4L2_CID_DV_RX_POWER_PRESENT, 0, 1, 0, 0);
+ V4L2_CID_DV_RX_POWER_PRESENT, 0, 0x0f, 0, 0);
state->rgb_quantization_range_ctrl =
v4l2_ctrl_new_std_menu(hdl, &adv7604_ctrl_ops,
V4L2_CID_DV_RX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index b154f36740b4..9bbd6656fb8f 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -20,10 +20,13 @@
/*
* References (c = chapter, p = page):
- * REF_01 - Analog devices, ADV7842, Register Settings Recommendations,
- * Revision 2.5, June 2010
- * REF_02 - Analog devices, Register map documentation, Documentation of
- * the register maps, Software manual, Rev. F, June 2010
+ * REF_01 - Analog devices, ADV7842,
+ * Register Settings Recommendations, Rev. 1.9, April 2011
+ * REF_02 - Analog devices, Software User Guide, UG-206,
+ * ADV7842 I2C Register Maps, Rev. 0, November 2010
+ * REF_03 - Analog devices, Hardware User Guide, UG-214,
+ * ADV7842 Fast Switching 2:1 HDMI 1.4 Receiver with 3D-Comb
+ * Decoder and Digitizer , Rev. 0, January 2011
*/
@@ -61,6 +64,7 @@ MODULE_LICENSE("GPL");
*/
struct adv7842_state {
+ struct adv7842_platform_data pdata;
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler hdl;
@@ -81,7 +85,7 @@ struct adv7842_state {
bool is_cea_format;
struct workqueue_struct *work_queues;
struct delayed_work delayed_work_enable_hotplug;
- bool connector_hdmi;
+ bool restart_stdi_once;
bool hdmi_port_a;
/* i2c clients */
@@ -491,6 +495,11 @@ static inline int hdmi_write(struct v4l2_subdev *sd, u8 reg, u8 val)
return adv_smbus_write_byte_data(state->i2c_hdmi, reg, val);
}
+static inline int hdmi_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+{
+ return hdmi_write(sd, reg, (hdmi_read(sd, reg) & mask) | val);
+}
+
static inline int cp_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv7842_state *state = to_state(sd);
@@ -532,7 +541,7 @@ static void main_reset(struct v4l2_subdev *sd)
adv_smbus_write_byte_no_check(client, 0xff, 0x80);
- mdelay(2);
+ mdelay(5);
}
/* ----------------------------------------------------------------------- */
@@ -587,10 +596,10 @@ static void adv7842_delayed_work_enable_hotplug(struct work_struct *work)
v4l2_dbg(2, debug, sd, "%s: enable hotplug on ports: 0x%x\n",
__func__, present);
- if (present & 0x1)
- mask |= 0x20; /* port A */
- if (present & 0x2)
- mask |= 0x10; /* port B */
+ if (present & (0x04 << ADV7842_EDID_PORT_A))
+ mask |= 0x20;
+ if (present & (0x04 << ADV7842_EDID_PORT_B))
+ mask |= 0x10;
io_write_and_or(sd, 0x20, 0xcf, mask);
}
@@ -679,14 +688,12 @@ static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port)
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct adv7842_state *state = to_state(sd);
const u8 *val = state->hdmi_edid.edid;
- u8 cur_mask = rep_read(sd, 0x77) & 0x0c;
- u8 mask = port == 0 ? 0x4 : 0x8;
int spa_loc = edid_spa_location(val);
int err = 0;
int i;
- v4l2_dbg(2, debug, sd, "%s: write EDID on port %d (spa at 0x%x)\n",
- __func__, port, spa_loc);
+ v4l2_dbg(2, debug, sd, "%s: write EDID on port %c (spa at 0x%x)\n",
+ __func__, (port == ADV7842_EDID_PORT_A) ? 'A' : 'B', spa_loc);
/* HPA disable on port A and B */
io_write_and_or(sd, 0x20, 0xcf, 0x00);
@@ -694,6 +701,9 @@ static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port)
/* Disable I2C access to internal EDID ram from HDMI DDC ports */
rep_write_and_or(sd, 0x77, 0xf3, 0x00);
+ if (!state->hdmi_edid.present)
+ return 0;
+
/* edid segment pointer '0' for HDMI ports */
rep_write_and_or(sd, 0x77, 0xef, 0x00);
@@ -703,44 +713,32 @@ static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port)
if (err)
return err;
- if (spa_loc > 0) {
- if (port == 0) {
- /* port A SPA */
- rep_write(sd, 0x72, val[spa_loc]);
- rep_write(sd, 0x73, val[spa_loc + 1]);
- } else {
- /* port B SPA */
- rep_write(sd, 0x74, val[spa_loc]);
- rep_write(sd, 0x75, val[spa_loc + 1]);
- }
- rep_write(sd, 0x76, spa_loc);
+ if (spa_loc < 0)
+ spa_loc = 0xc0; /* Default value [REF_02, p. 199] */
+
+ if (port == ADV7842_EDID_PORT_A) {
+ rep_write(sd, 0x72, val[spa_loc]);
+ rep_write(sd, 0x73, val[spa_loc + 1]);
} else {
- /* default register values for SPA */
- if (port == 0) {
- /* port A SPA */
- rep_write(sd, 0x72, 0);
- rep_write(sd, 0x73, 0);
- } else {
- /* port B SPA */
- rep_write(sd, 0x74, 0);
- rep_write(sd, 0x75, 0);
- }
- rep_write(sd, 0x76, 0xc0);
+ rep_write(sd, 0x74, val[spa_loc]);
+ rep_write(sd, 0x75, val[spa_loc + 1]);
}
- rep_write_and_or(sd, 0x77, 0xbf, 0x00);
+ rep_write(sd, 0x76, spa_loc & 0xff);
+ rep_write_and_or(sd, 0x77, 0xbf, (spa_loc >> 2) & 0x40);
/* Calculates the checksums and enables I2C access to internal
* EDID ram from HDMI DDC ports
*/
- rep_write_and_or(sd, 0x77, 0xf3, mask | cur_mask);
+ rep_write_and_or(sd, 0x77, 0xf3, state->hdmi_edid.present);
for (i = 0; i < 1000; i++) {
- if (rep_read(sd, 0x7d) & mask)
+ if (rep_read(sd, 0x7d) & state->hdmi_edid.present)
break;
mdelay(1);
}
if (i == 1000) {
- v4l_err(client, "error enabling edid on port %d\n", port);
+ v4l_err(client, "error enabling edid on port %c\n",
+ (port == ADV7842_EDID_PORT_A) ? 'A' : 'B');
return -EIO;
}
@@ -927,7 +925,7 @@ static int configure_predefined_video_timings(struct v4l2_subdev *sd,
cp_write(sd, 0x27, 0x00);
cp_write(sd, 0x28, 0x00);
cp_write(sd, 0x29, 0x00);
- cp_write(sd, 0x8f, 0x00);
+ cp_write(sd, 0x8f, 0x40);
cp_write(sd, 0x90, 0x00);
cp_write(sd, 0xa5, 0x00);
cp_write(sd, 0xa6, 0x00);
@@ -1033,34 +1031,60 @@ static void set_rgb_quantization_range(struct v4l2_subdev *sd)
{
struct adv7842_state *state = to_state(sd);
+ v4l2_dbg(2, debug, sd, "%s: rgb_quantization_range = %d\n",
+ __func__, state->rgb_quantization_range);
+
switch (state->rgb_quantization_range) {
case V4L2_DV_RGB_RANGE_AUTO:
- /* automatic */
- if (is_digital_input(sd) && !(hdmi_read(sd, 0x05) & 0x80)) {
- /* receiving DVI-D signal */
-
- /* ADV7842 selects RGB limited range regardless of
- input format (CE/IT) in automatic mode */
- if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
- /* RGB limited range (16-235) */
- io_write_and_or(sd, 0x02, 0x0f, 0x00);
-
- } else {
- /* RGB full range (0-255) */
- io_write_and_or(sd, 0x02, 0x0f, 0x10);
- }
- } else {
- /* receiving HDMI or analog signal, set automode */
+ if (state->mode == ADV7842_MODE_RGB) {
+ /* Receiving analog RGB signal
+ * Set RGB full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ break;
+ }
+
+ if (state->mode == ADV7842_MODE_COMP) {
+ /* Receiving analog YPbPr signal
+ * Set automode */
io_write_and_or(sd, 0x02, 0x0f, 0xf0);
+ break;
+ }
+
+ if (hdmi_read(sd, 0x05) & 0x80) {
+ /* Receiving HDMI signal
+ * Set automode */
+ io_write_and_or(sd, 0x02, 0x0f, 0xf0);
+ break;
+ }
+
+ /* Receiving DVI-D signal
+ * ADV7842 selects RGB limited range regardless of
+ * input format (CE/IT) in automatic mode */
+ if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
+ /* RGB limited range (16-235) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x00);
+ } else {
+ /* RGB full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x10);
}
break;
case V4L2_DV_RGB_RANGE_LIMITED:
- /* RGB limited range (16-235) */
- io_write_and_or(sd, 0x02, 0x0f, 0x00);
+ if (state->mode == ADV7842_MODE_COMP) {
+ /* YCrCb limited range (16-235) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x20);
+ } else {
+ /* RGB limited range (16-235) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x00);
+ }
break;
case V4L2_DV_RGB_RANGE_FULL:
- /* RGB full range (0-255) */
- io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ if (state->mode == ADV7842_MODE_COMP) {
+ /* YCrCb full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x60);
+ } else {
+ /* RGB full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ }
break;
}
}
@@ -1298,7 +1322,7 @@ static int adv7842_dv_timings_cap(struct v4l2_subdev *sd,
}
/* Fill the optional fields .standards and .flags in struct v4l2_dv_timings
- if the format is listed in adv7604_timings[] */
+ if the format is listed in adv7842_timings[] */
static void adv7842_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
@@ -1314,119 +1338,106 @@ static int adv7842_query_dv_timings(struct v4l2_subdev *sd,
struct v4l2_bt_timings *bt = &timings->bt;
struct stdi_readback stdi = { 0 };
+ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
/* SDP block */
if (state->mode == ADV7842_MODE_SDP)
return -ENODATA;
/* read STDI */
if (read_stdi(sd, &stdi)) {
+ state->restart_stdi_once = true;
v4l2_dbg(1, debug, sd, "%s: no valid signal\n", __func__);
return -ENOLINK;
}
bt->interlaced = stdi.interlaced ?
V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
- bt->polarities = ((hdmi_read(sd, 0x05) & 0x10) ? V4L2_DV_VSYNC_POS_POL : 0) |
- ((hdmi_read(sd, 0x05) & 0x20) ? V4L2_DV_HSYNC_POS_POL : 0);
- bt->vsync = stdi.lcvs;
if (is_digital_input(sd)) {
- bool lock = hdmi_read(sd, 0x04) & 0x02;
- bool interlaced = hdmi_read(sd, 0x0b) & 0x20;
- unsigned w = (hdmi_read(sd, 0x07) & 0x1f) * 256 + hdmi_read(sd, 0x08);
- unsigned h = (hdmi_read(sd, 0x09) & 0x1f) * 256 + hdmi_read(sd, 0x0a);
- unsigned w_total = (hdmi_read(sd, 0x1e) & 0x3f) * 256 +
- hdmi_read(sd, 0x1f);
- unsigned h_total = ((hdmi_read(sd, 0x26) & 0x3f) * 256 +
- hdmi_read(sd, 0x27)) / 2;
- unsigned freq = (((hdmi_read(sd, 0x51) << 1) +
- (hdmi_read(sd, 0x52) >> 7)) * 1000000) +
- ((hdmi_read(sd, 0x52) & 0x7f) * 1000000) / 128;
- int i;
+ uint32_t freq;
- if (is_hdmi(sd)) {
- /* adjust for deep color mode */
- freq = freq * 8 / (((hdmi_read(sd, 0x0b) & 0xc0)>>6) * 2 + 8);
- }
-
- /* No lock? */
- if (!lock) {
- v4l2_dbg(1, debug, sd, "%s: no lock on TMDS signal\n", __func__);
- return -ENOLCK;
- }
- /* Interlaced? */
- if (interlaced) {
- v4l2_dbg(1, debug, sd, "%s: interlaced video not supported\n", __func__);
- return -ERANGE;
- }
-
- for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
- const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt;
-
- if (!v4l2_valid_dv_timings(&v4l2_dv_timings_presets[i],
- adv7842_get_dv_timings_cap(sd),
- adv7842_check_dv_timings, NULL))
- continue;
- if (w_total != htotal(bt) || h_total != vtotal(bt))
- continue;
+ timings->type = V4L2_DV_BT_656_1120;
- if (w != bt->width || h != bt->height)
- continue;
+ bt->width = (hdmi_read(sd, 0x07) & 0x0f) * 256 + hdmi_read(sd, 0x08);
+ bt->height = (hdmi_read(sd, 0x09) & 0x0f) * 256 + hdmi_read(sd, 0x0a);
+ freq = (hdmi_read(sd, 0x06) * 1000000) +
+ ((hdmi_read(sd, 0x3b) & 0x30) >> 4) * 250000;
- if (abs(freq - bt->pixelclock) > 1000000)
- continue;
- *timings = v4l2_dv_timings_presets[i];
- return 0;
+ if (is_hdmi(sd)) {
+ /* adjust for deep color mode */
+ freq = freq * 8 / (((hdmi_read(sd, 0x0b) & 0xc0) >> 5) + 8);
}
-
- timings->type = V4L2_DV_BT_656_1120;
-
- bt->width = w;
- bt->height = h;
- bt->interlaced = (hdmi_read(sd, 0x0b) & 0x20) ?
- V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
- bt->polarities = ((hdmi_read(sd, 0x05) & 0x10) ?
- V4L2_DV_VSYNC_POS_POL : 0) | ((hdmi_read(sd, 0x05) & 0x20) ?
- V4L2_DV_HSYNC_POS_POL : 0);
- bt->pixelclock = (((hdmi_read(sd, 0x51) << 1) +
- (hdmi_read(sd, 0x52) >> 7)) * 1000000) +
- ((hdmi_read(sd, 0x52) & 0x7f) * 1000000) / 128;
- bt->hfrontporch = (hdmi_read(sd, 0x20) & 0x1f) * 256 +
+ bt->pixelclock = freq;
+ bt->hfrontporch = (hdmi_read(sd, 0x20) & 0x03) * 256 +
hdmi_read(sd, 0x21);
- bt->hsync = (hdmi_read(sd, 0x22) & 0x1f) * 256 +
+ bt->hsync = (hdmi_read(sd, 0x22) & 0x03) * 256 +
hdmi_read(sd, 0x23);
- bt->hbackporch = (hdmi_read(sd, 0x24) & 0x1f) * 256 +
+ bt->hbackporch = (hdmi_read(sd, 0x24) & 0x03) * 256 +
hdmi_read(sd, 0x25);
- bt->vfrontporch = ((hdmi_read(sd, 0x2a) & 0x3f) * 256 +
- hdmi_read(sd, 0x2b)) / 2;
- bt->il_vfrontporch = ((hdmi_read(sd, 0x2c) & 0x3f) * 256 +
- hdmi_read(sd, 0x2d)) / 2;
- bt->vsync = ((hdmi_read(sd, 0x2e) & 0x3f) * 256 +
- hdmi_read(sd, 0x2f)) / 2;
- bt->il_vsync = ((hdmi_read(sd, 0x30) & 0x3f) * 256 +
- hdmi_read(sd, 0x31)) / 2;
- bt->vbackporch = ((hdmi_read(sd, 0x32) & 0x3f) * 256 +
- hdmi_read(sd, 0x33)) / 2;
- bt->il_vbackporch = ((hdmi_read(sd, 0x34) & 0x3f) * 256 +
- hdmi_read(sd, 0x35)) / 2;
-
- bt->standards = 0;
- bt->flags = 0;
- } else {
- /* Interlaced? */
- if (stdi.interlaced) {
- v4l2_dbg(1, debug, sd, "%s: interlaced video not supported\n", __func__);
- return -ERANGE;
+ bt->vfrontporch = ((hdmi_read(sd, 0x2a) & 0x1f) * 256 +
+ hdmi_read(sd, 0x2b)) / 2;
+ bt->vsync = ((hdmi_read(sd, 0x2e) & 0x1f) * 256 +
+ hdmi_read(sd, 0x2f)) / 2;
+ bt->vbackporch = ((hdmi_read(sd, 0x32) & 0x1f) * 256 +
+ hdmi_read(sd, 0x33)) / 2;
+ bt->polarities = ((hdmi_read(sd, 0x05) & 0x10) ? V4L2_DV_VSYNC_POS_POL : 0) |
+ ((hdmi_read(sd, 0x05) & 0x20) ? V4L2_DV_HSYNC_POS_POL : 0);
+ if (bt->interlaced == V4L2_DV_INTERLACED) {
+ bt->height += (hdmi_read(sd, 0x0b) & 0x0f) * 256 +
+ hdmi_read(sd, 0x0c);
+ bt->il_vfrontporch = ((hdmi_read(sd, 0x2c) & 0x1f) * 256 +
+ hdmi_read(sd, 0x2d)) / 2;
+ bt->il_vsync = ((hdmi_read(sd, 0x30) & 0x1f) * 256 +
+ hdmi_read(sd, 0x31)) / 2;
+ bt->vbackporch = ((hdmi_read(sd, 0x34) & 0x1f) * 256 +
+ hdmi_read(sd, 0x35)) / 2;
}
-
+ adv7842_fill_optional_dv_timings_fields(sd, timings);
+ } else {
+ /* find format
+ * Since LCVS values are inaccurate [REF_03, p. 339-340],
+ * stdi2dv_timings() is called with lcvs +-1 if the first attempt fails.
+ */
+ if (!stdi2dv_timings(sd, &stdi, timings))
+ goto found;
+ stdi.lcvs += 1;
+ v4l2_dbg(1, debug, sd, "%s: lcvs + 1 = %d\n", __func__, stdi.lcvs);
+ if (!stdi2dv_timings(sd, &stdi, timings))
+ goto found;
+ stdi.lcvs -= 2;
+ v4l2_dbg(1, debug, sd, "%s: lcvs - 1 = %d\n", __func__, stdi.lcvs);
if (stdi2dv_timings(sd, &stdi, timings)) {
+ /*
+ * The STDI block may measure wrong values, especially
+ * for lcvs and lcf. If the driver can not find any
+ * valid timing, the STDI block is restarted to measure
+ * the video timings again. The function will return an
+ * error, but the restart of STDI will generate a new
+ * STDI interrupt and the format detection process will
+ * restart.
+ */
+ if (state->restart_stdi_once) {
+ v4l2_dbg(1, debug, sd, "%s: restart STDI\n", __func__);
+ /* TODO restart STDI for Sync Channel 2 */
+ /* enter one-shot mode */
+ cp_write_and_or(sd, 0x86, 0xf9, 0x00);
+ /* trigger STDI restart */
+ cp_write_and_or(sd, 0x86, 0xf9, 0x04);
+ /* reset to continuous mode */
+ cp_write_and_or(sd, 0x86, 0xf9, 0x02);
+ state->restart_stdi_once = false;
+ return -ENOLINK;
+ }
v4l2_dbg(1, debug, sd, "%s: format not supported\n", __func__);
return -ERANGE;
}
+ state->restart_stdi_once = true;
}
+found:
if (debug > 1)
- v4l2_print_dv_timings(sd->name, "adv7842_query_dv_timings: ",
- timings, true);
+ v4l2_print_dv_timings(sd->name, "adv7842_query_dv_timings:",
+ timings, true);
return 0;
}
@@ -1437,9 +1448,16 @@ static int adv7842_s_dv_timings(struct v4l2_subdev *sd,
struct v4l2_bt_timings *bt;
int err;
+ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
if (state->mode == ADV7842_MODE_SDP)
return -ENODATA;
+ if (v4l2_match_dv_timings(&state->timings, timings, 0)) {
+ v4l2_dbg(1, debug, sd, "%s: no change\n", __func__);
+ return 0;
+ }
+
bt = &timings->bt;
if (!v4l2_valid_dv_timings(timings, adv7842_get_dv_timings_cap(sd),
@@ -1450,7 +1468,7 @@ static int adv7842_s_dv_timings(struct v4l2_subdev *sd,
state->timings = *timings;
- cp_write(sd, 0x91, bt->interlaced ? 0x50 : 0x10);
+ cp_write(sd, 0x91, bt->interlaced ? 0x40 : 0x00);
/* Use prim_mode and vid_std when available */
err = configure_predefined_video_timings(sd, timings);
@@ -1483,18 +1501,18 @@ static int adv7842_g_dv_timings(struct v4l2_subdev *sd,
static void enable_input(struct v4l2_subdev *sd)
{
struct adv7842_state *state = to_state(sd);
+
+ set_rgb_quantization_range(sd);
switch (state->mode) {
case ADV7842_MODE_SDP:
case ADV7842_MODE_COMP:
case ADV7842_MODE_RGB:
- /* enable */
io_write(sd, 0x15, 0xb0); /* Disable Tristate of Pins (no audio) */
break;
case ADV7842_MODE_HDMI:
- /* enable */
- hdmi_write(sd, 0x1a, 0x0a); /* Unmute audio */
hdmi_write(sd, 0x01, 0x00); /* Enable HDMI clock terminators */
io_write(sd, 0x15, 0xa0); /* Disable Tristate of Pins */
+ hdmi_write_and_or(sd, 0x1a, 0xef, 0x00); /* Unmute audio */
break;
default:
v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
@@ -1505,9 +1523,9 @@ static void enable_input(struct v4l2_subdev *sd)
static void disable_input(struct v4l2_subdev *sd)
{
- /* disable */
+ hdmi_write_and_or(sd, 0x1a, 0xef, 0x10); /* Mute audio [REF_01, c. 2.2.2] */
+ msleep(16); /* 512 samples with >= 32 kHz sample rate [REF_03, c. 8.29] */
io_write(sd, 0x15, 0xbe); /* Tristate all outputs from video core */
- hdmi_write(sd, 0x1a, 0x1a); /* Mute audio */
hdmi_write(sd, 0x01, 0x78); /* Disable HDMI clock terminators */
}
@@ -1575,9 +1593,6 @@ static void select_input(struct v4l2_subdev *sd,
afe_write(sd, 0x00, 0x00); /* power up ADC */
afe_write(sd, 0xc8, 0x00); /* phase control */
- io_write(sd, 0x19, 0x83); /* LLC DLL phase */
- io_write(sd, 0x33, 0x40); /* LLC DLL enable */
-
io_write(sd, 0xdd, 0x90); /* Manual 2x output clock */
/* script says register 0xde, which don't exist in manual */
@@ -1611,8 +1626,6 @@ static void select_input(struct v4l2_subdev *sd,
/* deinterlacer enabled and 3D comb */
sdp_write_and_or(sd, 0x12, 0xf6, 0x09);
- sdp_write(sd, 0xdd, 0x08); /* free run auto */
-
break;
case ADV7842_MODE_COMP:
@@ -1627,6 +1640,13 @@ static void select_input(struct v4l2_subdev *sd,
afe_write(sd, 0x00, 0x00); /* power up ADC */
afe_write(sd, 0xc8, 0x00); /* phase control */
+ if (state->mode == ADV7842_MODE_COMP) {
+ /* force to YCrCb */
+ io_write_and_or(sd, 0x02, 0x0f, 0x60);
+ } else {
+ /* force to RGB */
+ io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ }
/* set ADI recommended settings for digitizer */
/* "ADV7842 Register Settings Recommendations
@@ -1722,19 +1742,19 @@ static int adv7842_s_routing(struct v4l2_subdev *sd,
switch (input) {
case ADV7842_SELECT_HDMI_PORT_A:
- /* TODO select HDMI_COMP or HDMI_GR */
state->mode = ADV7842_MODE_HDMI;
state->vid_std_select = ADV7842_HDMI_COMP_VID_STD_HD_1250P;
state->hdmi_port_a = true;
break;
case ADV7842_SELECT_HDMI_PORT_B:
- /* TODO select HDMI_COMP or HDMI_GR */
state->mode = ADV7842_MODE_HDMI;
state->vid_std_select = ADV7842_HDMI_COMP_VID_STD_HD_1250P;
state->hdmi_port_a = false;
break;
case ADV7842_SELECT_VGA_COMP:
- v4l2_info(sd, "%s: VGA component: todo\n", __func__);
+ state->mode = ADV7842_MODE_COMP;
+ state->vid_std_select = ADV7842_RGB_VID_STD_AUTO_GRAPH_MODE;
+ break;
case ADV7842_SELECT_VGA_RGB:
state->mode = ADV7842_MODE_RGB;
state->vid_std_select = ADV7842_RGB_VID_STD_AUTO_GRAPH_MODE;
@@ -1814,12 +1834,15 @@ static void adv7842_irq_enable(struct v4l2_subdev *sd, bool enable)
io_write(sd, 0x78, 0x03);
/* Enable SDP Standard Detection Change and SDP Video Detected */
io_write(sd, 0xa0, 0x09);
+ /* Enable HDMI_MODE interrupt */
+ io_write(sd, 0x69, 0x08);
} else {
io_write(sd, 0x46, 0x0);
io_write(sd, 0x5a, 0x0);
io_write(sd, 0x73, 0x0);
io_write(sd, 0x78, 0x0);
io_write(sd, 0xa0, 0x0);
+ io_write(sd, 0x69, 0x0);
}
}
@@ -1827,11 +1850,9 @@ static int adv7842_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
{
struct adv7842_state *state = to_state(sd);
u8 fmt_change_cp, fmt_change_digital, fmt_change_sdp;
- u8 irq_status[5];
- u8 irq_cfg = io_read(sd, 0x40);
+ u8 irq_status[6];
- /* disable irq-pin output */
- io_write(sd, 0x40, irq_cfg | 0x3);
+ adv7842_irq_enable(sd, false);
/* read status */
irq_status[0] = io_read(sd, 0x43);
@@ -1839,6 +1860,7 @@ static int adv7842_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
irq_status[2] = io_read(sd, 0x70);
irq_status[3] = io_read(sd, 0x75);
irq_status[4] = io_read(sd, 0x9d);
+ irq_status[5] = io_read(sd, 0x66);
/* and clear */
if (irq_status[0])
@@ -1851,10 +1873,14 @@ static int adv7842_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
io_write(sd, 0x76, irq_status[3]);
if (irq_status[4])
io_write(sd, 0x9e, irq_status[4]);
+ if (irq_status[5])
+ io_write(sd, 0x67, irq_status[5]);
- v4l2_dbg(1, debug, sd, "%s: irq %x, %x, %x, %x, %x\n", __func__,
+ adv7842_irq_enable(sd, true);
+
+ v4l2_dbg(1, debug, sd, "%s: irq %x, %x, %x, %x, %x, %x\n", __func__,
irq_status[0], irq_status[1], irq_status[2],
- irq_status[3], irq_status[4]);
+ irq_status[3], irq_status[4], irq_status[5]);
/* format change CP */
fmt_change_cp = irq_status[0] & 0x9c;
@@ -1871,25 +1897,72 @@ static int adv7842_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
else
fmt_change_digital = 0;
- /* notify */
+ /* format change */
if (fmt_change_cp || fmt_change_digital || fmt_change_sdp) {
v4l2_dbg(1, debug, sd,
"%s: fmt_change_cp = 0x%x, fmt_change_digital = 0x%x, fmt_change_sdp = 0x%x\n",
__func__, fmt_change_cp, fmt_change_digital,
fmt_change_sdp);
v4l2_subdev_notify(sd, ADV7842_FMT_CHANGE, NULL);
+ if (handled)
+ *handled = true;
}
- /* 5v cable detect */
- if (irq_status[2])
+ /* HDMI/DVI mode */
+ if (irq_status[5] & 0x08) {
+ v4l2_dbg(1, debug, sd, "%s: irq %s mode\n", __func__,
+ (io_read(sd, 0x65) & 0x08) ? "HDMI" : "DVI");
+ if (handled)
+ *handled = true;
+ }
+
+ /* tx 5v detect */
+ if (irq_status[2] & 0x3) {
+ v4l2_dbg(1, debug, sd, "%s: irq tx_5v\n", __func__);
adv7842_s_detect_tx_5v_ctrl(sd);
+ if (handled)
+ *handled = true;
+ }
+ return 0;
+}
+
+static int adv7842_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+{
+ struct adv7842_state *state = to_state(sd);
+ u8 *data = NULL;
- if (handled)
- *handled = true;
+ if (edid->pad > ADV7842_EDID_PORT_VGA)
+ return -EINVAL;
+ if (edid->blocks == 0)
+ return -EINVAL;
+ if (edid->blocks > 2)
+ return -EINVAL;
+ if (edid->start_block > 1)
+ return -EINVAL;
+ if (edid->start_block == 1)
+ edid->blocks = 1;
+ if (!edid->edid)
+ return -EINVAL;
- /* re-enable irq-pin output */
- io_write(sd, 0x40, irq_cfg);
+ switch (edid->pad) {
+ case ADV7842_EDID_PORT_A:
+ case ADV7842_EDID_PORT_B:
+ if (state->hdmi_edid.present & (0x04 << edid->pad))
+ data = state->hdmi_edid.edid;
+ break;
+ case ADV7842_EDID_PORT_VGA:
+ if (state->vga_edid.present)
+ data = state->vga_edid.edid;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (!data)
+ return -ENODATA;
+ memcpy(edid->edid,
+ data + edid->start_block * 128,
+ edid->blocks * 128);
return 0;
}
@@ -1898,7 +1971,7 @@ static int adv7842_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *e)
struct adv7842_state *state = to_state(sd);
int err = 0;
- if (e->pad > 2)
+ if (e->pad > ADV7842_EDID_PORT_VGA)
return -EINVAL;
if (e->start_block != 0)
return -EINVAL;
@@ -1911,20 +1984,25 @@ static int adv7842_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *e)
state->aspect_ratio = v4l2_calc_aspect_ratio(e->edid[0x15],
e->edid[0x16]);
- if (e->pad == 2) {
+ switch (e->pad) {
+ case ADV7842_EDID_PORT_VGA:
memset(&state->vga_edid.edid, 0, 256);
state->vga_edid.present = e->blocks ? 0x1 : 0x0;
memcpy(&state->vga_edid.edid, e->edid, 128 * e->blocks);
err = edid_write_vga_segment(sd);
- } else {
- u32 mask = 0x1<<e->pad;
+ break;
+ case ADV7842_EDID_PORT_A:
+ case ADV7842_EDID_PORT_B:
memset(&state->hdmi_edid.edid, 0, 256);
if (e->blocks)
- state->hdmi_edid.present |= mask;
+ state->hdmi_edid.present |= 0x04 << e->pad;
else
- state->hdmi_edid.present &= ~mask;
- memcpy(&state->hdmi_edid.edid, e->edid, 128*e->blocks);
+ state->hdmi_edid.present &= ~(0x04 << e->pad);
+ memcpy(&state->hdmi_edid.edid, e->edid, 128 * e->blocks);
err = edid_write_hdmi_segment(sd, e->pad);
+ break;
+ default:
+ return -EINVAL;
}
if (err < 0)
v4l2_err(sd, "error %d writing edid on port %d\n", err, e->pad);
@@ -2156,7 +2234,7 @@ static int adv7842_cp_log_status(struct v4l2_subdev *sd)
static const char * const input_color_space_txt[16] = {
"RGB limited range (16-235)", "RGB full range (0-255)",
"YCbCr Bt.601 (16-235)", "YCbCr Bt.709 (16-235)",
- "XvYCC Bt.601", "XvYCC Bt.709",
+ "xvYCC Bt.601", "xvYCC Bt.709",
"YCbCr Bt.601 (0-255)", "YCbCr Bt.709 (0-255)",
"invalid", "invalid", "invalid", "invalid", "invalid",
"invalid", "invalid", "automatic"
@@ -2175,8 +2253,6 @@ static int adv7842_cp_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "-----Chip status-----\n");
v4l2_info(sd, "Chip power: %s\n", no_power(sd) ? "off" : "on");
- v4l2_info(sd, "Connector type: %s\n", state->connector_hdmi ?
- "HDMI" : (is_digital_input(sd) ? "DVI-D" : "DVI-A"));
v4l2_info(sd, "HDMI/DVI-D port selected: %s\n",
state->hdmi_port_a ? "A" : "B");
v4l2_info(sd, "EDID A %s, B %s\n",
@@ -2354,15 +2430,63 @@ static int adv7842_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
return 0;
}
+static void adv7842_s_sdp_io(struct v4l2_subdev *sd, struct adv7842_sdp_io_sync_adjustment *s)
+{
+ if (s && s->adjust) {
+ sdp_io_write(sd, 0x94, (s->hs_beg >> 8) & 0xf);
+ sdp_io_write(sd, 0x95, s->hs_beg & 0xff);
+ sdp_io_write(sd, 0x96, (s->hs_width >> 8) & 0xf);
+ sdp_io_write(sd, 0x97, s->hs_width & 0xff);
+ sdp_io_write(sd, 0x98, (s->de_beg >> 8) & 0xf);
+ sdp_io_write(sd, 0x99, s->de_beg & 0xff);
+ sdp_io_write(sd, 0x9a, (s->de_end >> 8) & 0xf);
+ sdp_io_write(sd, 0x9b, s->de_end & 0xff);
+ sdp_io_write(sd, 0xa8, s->vs_beg_o);
+ sdp_io_write(sd, 0xa9, s->vs_beg_e);
+ sdp_io_write(sd, 0xaa, s->vs_end_o);
+ sdp_io_write(sd, 0xab, s->vs_end_e);
+ sdp_io_write(sd, 0xac, s->de_v_beg_o);
+ sdp_io_write(sd, 0xad, s->de_v_beg_e);
+ sdp_io_write(sd, 0xae, s->de_v_end_o);
+ sdp_io_write(sd, 0xaf, s->de_v_end_e);
+ } else {
+ /* set to default */
+ sdp_io_write(sd, 0x94, 0x00);
+ sdp_io_write(sd, 0x95, 0x00);
+ sdp_io_write(sd, 0x96, 0x00);
+ sdp_io_write(sd, 0x97, 0x20);
+ sdp_io_write(sd, 0x98, 0x00);
+ sdp_io_write(sd, 0x99, 0x00);
+ sdp_io_write(sd, 0x9a, 0x00);
+ sdp_io_write(sd, 0x9b, 0x00);
+ sdp_io_write(sd, 0xa8, 0x04);
+ sdp_io_write(sd, 0xa9, 0x04);
+ sdp_io_write(sd, 0xaa, 0x04);
+ sdp_io_write(sd, 0xab, 0x04);
+ sdp_io_write(sd, 0xac, 0x04);
+ sdp_io_write(sd, 0xad, 0x04);
+ sdp_io_write(sd, 0xae, 0x04);
+ sdp_io_write(sd, 0xaf, 0x04);
+ }
+}
+
static int adv7842_s_std(struct v4l2_subdev *sd, v4l2_std_id norm)
{
struct adv7842_state *state = to_state(sd);
+ struct adv7842_platform_data *pdata = &state->pdata;
v4l2_dbg(1, debug, sd, "%s:\n", __func__);
if (state->mode != ADV7842_MODE_SDP)
return -ENODATA;
+ if (norm & V4L2_STD_625_50)
+ adv7842_s_sdp_io(sd, &pdata->sdp_io_sync_625);
+ else if (norm & V4L2_STD_525_60)
+ adv7842_s_sdp_io(sd, &pdata->sdp_io_sync_525);
+ else
+ adv7842_s_sdp_io(sd, NULL);
+
if (norm & V4L2_STD_ALL) {
state->norm = norm;
return 0;
@@ -2385,9 +2509,10 @@ static int adv7842_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm)
/* ----------------------------------------------------------------------- */
-static int adv7842_core_init(struct v4l2_subdev *sd,
- const struct adv7842_platform_data *pdata)
+static int adv7842_core_init(struct v4l2_subdev *sd)
{
+ struct adv7842_state *state = to_state(sd);
+ struct adv7842_platform_data *pdata = &state->pdata;
hdmi_write(sd, 0x48,
(pdata->disable_pwrdnb ? 0x80 : 0) |
(pdata->disable_cable_det_rst ? 0x40 : 0));
@@ -2400,7 +2525,7 @@ static int adv7842_core_init(struct v4l2_subdev *sd,
/* video format */
io_write(sd, 0x02,
- pdata->inp_color_space << 4 |
+ 0xf0 |
pdata->alt_gamma << 3 |
pdata->op_656_range << 2 |
pdata->rgb_out << 1 |
@@ -2412,13 +2537,24 @@ static int adv7842_core_init(struct v4l2_subdev *sd,
pdata->replicate_av_codes << 1 |
pdata->invert_cbcr << 0);
+ /* HDMI audio */
+ hdmi_write_and_or(sd, 0x1a, 0xf1, 0x08); /* Wait 1 s before unmute */
+
/* Drive strength */
- io_write_and_or(sd, 0x14, 0xc0, pdata->drive_strength.data<<4 |
- pdata->drive_strength.clock<<2 |
- pdata->drive_strength.sync);
+ io_write_and_or(sd, 0x14, 0xc0,
+ pdata->dr_str_data << 4 |
+ pdata->dr_str_clk << 2 |
+ pdata->dr_str_sync);
/* HDMI free run */
- cp_write(sd, 0xba, (pdata->hdmi_free_run_mode << 1) | 0x01);
+ cp_write_and_or(sd, 0xba, 0xfc, pdata->hdmi_free_run_enable |
+ (pdata->hdmi_free_run_mode << 1));
+
+ /* SPD free run */
+ sdp_write_and_or(sd, 0xdd, 0xf0, pdata->sdp_free_run_force |
+ (pdata->sdp_free_run_cbar_en << 1) |
+ (pdata->sdp_free_run_man_col_en << 2) |
+ (pdata->sdp_free_run_auto << 3));
/* TODO from platform data */
cp_write(sd, 0x69, 0x14); /* Enable CP CSC */
@@ -2431,18 +2567,6 @@ static int adv7842_core_init(struct v4l2_subdev *sd,
sdp_csc_coeff(sd, &pdata->sdp_csc_coeff);
- if (pdata->sdp_io_sync.adjust) {
- const struct adv7842_sdp_io_sync_adjustment *s = &pdata->sdp_io_sync;
- sdp_io_write(sd, 0x94, (s->hs_beg>>8) & 0xf);
- sdp_io_write(sd, 0x95, s->hs_beg & 0xff);
- sdp_io_write(sd, 0x96, (s->hs_width>>8) & 0xf);
- sdp_io_write(sd, 0x97, s->hs_width & 0xff);
- sdp_io_write(sd, 0x98, (s->de_beg>>8) & 0xf);
- sdp_io_write(sd, 0x99, s->de_beg & 0xff);
- sdp_io_write(sd, 0x9a, (s->de_end>>8) & 0xf);
- sdp_io_write(sd, 0x9b, s->de_end & 0xff);
- }
-
/* todo, improve settings for sdram */
if (pdata->sd_ram_size >= 128) {
sdp_write(sd, 0x12, 0x0d); /* Frame TBC,3D comb enabled */
@@ -2483,12 +2607,11 @@ static int adv7842_core_init(struct v4l2_subdev *sd,
io_write_and_or(sd, 0x20, 0xcf, 0x00);
/* LLC */
- /* Set phase to 16. TODO: get this from platform_data */
- io_write(sd, 0x19, 0x90);
+ io_write(sd, 0x19, 0x80 | pdata->llc_dll_phase);
io_write(sd, 0x33, 0x40);
/* interrupts */
- io_write(sd, 0x40, 0xe2); /* Configure INT1 */
+ io_write(sd, 0x40, 0xf2); /* Configure INT1 */
adv7842_irq_enable(sd, true);
@@ -2588,6 +2711,7 @@ static int adv7842_command_ram_test(struct v4l2_subdev *sd)
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct adv7842_state *state = to_state(sd);
struct adv7842_platform_data *pdata = client->dev.platform_data;
+ struct v4l2_dv_timings timings;
int ret = 0;
if (!pdata)
@@ -2610,7 +2734,7 @@ static int adv7842_command_ram_test(struct v4l2_subdev *sd)
adv7842_rewrite_i2c_addresses(sd, pdata);
/* and re-init chip and state */
- adv7842_core_init(sd, pdata);
+ adv7842_core_init(sd);
disable_input(sd);
@@ -2618,11 +2742,15 @@ static int adv7842_command_ram_test(struct v4l2_subdev *sd)
enable_input(sd);
- adv7842_s_dv_timings(sd, &state->timings);
-
edid_write_vga_segment(sd);
- edid_write_hdmi_segment(sd, 0);
- edid_write_hdmi_segment(sd, 1);
+ edid_write_hdmi_segment(sd, ADV7842_EDID_PORT_A);
+ edid_write_hdmi_segment(sd, ADV7842_EDID_PORT_B);
+
+ timings = state->timings;
+
+ memset(&state->timings, 0, sizeof(struct v4l2_dv_timings));
+
+ adv7842_s_dv_timings(sd, &timings);
return ret;
}
@@ -2670,6 +2798,7 @@ static const struct v4l2_subdev_video_ops adv7842_video_ops = {
};
static const struct v4l2_subdev_pad_ops adv7842_pad_ops = {
+ .get_edid = adv7842_get_edid,
.set_edid = adv7842_set_edid,
};
@@ -2712,8 +2841,9 @@ static const struct v4l2_ctrl_config adv7842_ctrl_free_run_color = {
};
-static void adv7842_unregister_clients(struct adv7842_state *state)
+static void adv7842_unregister_clients(struct v4l2_subdev *sd)
{
+ struct adv7842_state *state = to_state(sd);
if (state->i2c_avlink)
i2c_unregister_device(state->i2c_avlink);
if (state->i2c_cec)
@@ -2736,21 +2866,79 @@ static void adv7842_unregister_clients(struct adv7842_state *state)
i2c_unregister_device(state->i2c_cp);
if (state->i2c_vdp)
i2c_unregister_device(state->i2c_vdp);
+
+ state->i2c_avlink = NULL;
+ state->i2c_cec = NULL;
+ state->i2c_infoframe = NULL;
+ state->i2c_sdp_io = NULL;
+ state->i2c_sdp = NULL;
+ state->i2c_afe = NULL;
+ state->i2c_repeater = NULL;
+ state->i2c_edid = NULL;
+ state->i2c_hdmi = NULL;
+ state->i2c_cp = NULL;
+ state->i2c_vdp = NULL;
}
-static struct i2c_client *adv7842_dummy_client(struct v4l2_subdev *sd,
+static struct i2c_client *adv7842_dummy_client(struct v4l2_subdev *sd, const char *desc,
u8 addr, u8 io_reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct i2c_client *cp;
io_write(sd, io_reg, addr << 1);
- return i2c_new_dummy(client->adapter, io_read(sd, io_reg) >> 1);
+
+ if (addr == 0) {
+ v4l2_err(sd, "no %s i2c addr configured\n", desc);
+ return NULL;
+ }
+
+ cp = i2c_new_dummy(client->adapter, io_read(sd, io_reg) >> 1);
+ if (!cp)
+ v4l2_err(sd, "register %s on i2c addr 0x%x failed\n", desc, addr);
+
+ return cp;
+}
+
+static int adv7842_register_clients(struct v4l2_subdev *sd)
+{
+ struct adv7842_state *state = to_state(sd);
+ struct adv7842_platform_data *pdata = &state->pdata;
+
+ state->i2c_avlink = adv7842_dummy_client(sd, "avlink", pdata->i2c_avlink, 0xf3);
+ state->i2c_cec = adv7842_dummy_client(sd, "cec", pdata->i2c_cec, 0xf4);
+ state->i2c_infoframe = adv7842_dummy_client(sd, "infoframe", pdata->i2c_infoframe, 0xf5);
+ state->i2c_sdp_io = adv7842_dummy_client(sd, "sdp_io", pdata->i2c_sdp_io, 0xf2);
+ state->i2c_sdp = adv7842_dummy_client(sd, "sdp", pdata->i2c_sdp, 0xf1);
+ state->i2c_afe = adv7842_dummy_client(sd, "afe", pdata->i2c_afe, 0xf8);
+ state->i2c_repeater = adv7842_dummy_client(sd, "repeater", pdata->i2c_repeater, 0xf9);
+ state->i2c_edid = adv7842_dummy_client(sd, "edid", pdata->i2c_edid, 0xfa);
+ state->i2c_hdmi = adv7842_dummy_client(sd, "hdmi", pdata->i2c_hdmi, 0xfb);
+ state->i2c_cp = adv7842_dummy_client(sd, "cp", pdata->i2c_cp, 0xfd);
+ state->i2c_vdp = adv7842_dummy_client(sd, "vdp", pdata->i2c_vdp, 0xfe);
+
+ if (!state->i2c_avlink ||
+ !state->i2c_cec ||
+ !state->i2c_infoframe ||
+ !state->i2c_sdp_io ||
+ !state->i2c_sdp ||
+ !state->i2c_afe ||
+ !state->i2c_repeater ||
+ !state->i2c_edid ||
+ !state->i2c_hdmi ||
+ !state->i2c_cp ||
+ !state->i2c_vdp)
+ return -1;
+
+ return 0;
}
static int adv7842_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adv7842_state *state;
+ static const struct v4l2_dv_timings cea640x480 =
+ V4L2_DV_BT_CEA_640X480P59_94;
struct adv7842_platform_data *pdata = client->dev.platform_data;
struct v4l2_ctrl_handler *hdl;
struct v4l2_subdev *sd;
@@ -2775,13 +2963,17 @@ static int adv7842_probe(struct i2c_client *client,
return -ENOMEM;
}
+ /* platform data */
+ state->pdata = *pdata;
+ state->timings = cea640x480;
+
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7842_ops);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- state->connector_hdmi = pdata->connector_hdmi;
state->mode = pdata->mode;
- state->hdmi_port_a = true;
+ state->hdmi_port_a = pdata->input == ADV7842_SELECT_HDMI_PORT_A;
+ state->restart_stdi_once = true;
/* i2c access to adv7842? */
rev = adv_smbus_read_byte_data_check(client, 0xea, false) << 8 |
@@ -2843,21 +3035,7 @@ static int adv7842_probe(struct i2c_client *client,
goto err_hdl;
}
- state->i2c_avlink = adv7842_dummy_client(sd, pdata->i2c_avlink, 0xf3);
- state->i2c_cec = adv7842_dummy_client(sd, pdata->i2c_cec, 0xf4);
- state->i2c_infoframe = adv7842_dummy_client(sd, pdata->i2c_infoframe, 0xf5);
- state->i2c_sdp_io = adv7842_dummy_client(sd, pdata->i2c_sdp_io, 0xf2);
- state->i2c_sdp = adv7842_dummy_client(sd, pdata->i2c_sdp, 0xf1);
- state->i2c_afe = adv7842_dummy_client(sd, pdata->i2c_afe, 0xf8);
- state->i2c_repeater = adv7842_dummy_client(sd, pdata->i2c_repeater, 0xf9);
- state->i2c_edid = adv7842_dummy_client(sd, pdata->i2c_edid, 0xfa);
- state->i2c_hdmi = adv7842_dummy_client(sd, pdata->i2c_hdmi, 0xfb);
- state->i2c_cp = adv7842_dummy_client(sd, pdata->i2c_cp, 0xfd);
- state->i2c_vdp = adv7842_dummy_client(sd, pdata->i2c_vdp, 0xfe);
- if (!state->i2c_avlink || !state->i2c_cec || !state->i2c_infoframe ||
- !state->i2c_sdp_io || !state->i2c_sdp || !state->i2c_afe ||
- !state->i2c_repeater || !state->i2c_edid || !state->i2c_hdmi ||
- !state->i2c_cp || !state->i2c_vdp) {
+ if (adv7842_register_clients(sd) < 0) {
err = -ENOMEM;
v4l2_err(sd, "failed to create all i2c clients\n");
goto err_i2c;
@@ -2879,7 +3057,7 @@ static int adv7842_probe(struct i2c_client *client,
if (err)
goto err_work_queues;
- err = adv7842_core_init(sd, pdata);
+ err = adv7842_core_init(sd);
if (err)
goto err_entity;
@@ -2893,7 +3071,7 @@ err_work_queues:
cancel_delayed_work(&state->delayed_work_enable_hotplug);
destroy_workqueue(state->work_queues);
err_i2c:
- adv7842_unregister_clients(state);
+ adv7842_unregister_clients(sd);
err_hdl:
v4l2_ctrl_handler_free(hdl);
return err;
@@ -2912,7 +3090,7 @@ static int adv7842_remove(struct i2c_client *client)
destroy_workqueue(state->work_queues);
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
- adv7842_unregister_clients(to_state(sd));
+ adv7842_unregister_clients(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
return 0;
}
diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c
index 3317a9ae3961..d98ca3aebe23 100644
--- a/drivers/media/i2c/lm3560.c
+++ b/drivers/media/i2c/lm3560.c
@@ -172,28 +172,28 @@ static int lm3560_flash_brt_ctrl(struct lm3560_flash *flash,
static int lm3560_get_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no)
{
struct lm3560_flash *flash = to_lm3560_flash(ctrl, led_no);
+ int rval = -EINVAL;
mutex_lock(&flash->lock);
if (ctrl->id == V4L2_CID_FLASH_FAULT) {
- int rval;
s32 fault = 0;
unsigned int reg_val;
rval = regmap_read(flash->regmap, REG_FLAG, &reg_val);
if (rval < 0)
- return rval;
- if (rval & FAULT_SHORT_CIRCUIT)
+ goto out;
+ if (reg_val & FAULT_SHORT_CIRCUIT)
fault |= V4L2_FLASH_FAULT_SHORT_CIRCUIT;
- if (rval & FAULT_OVERTEMP)
+ if (reg_val & FAULT_OVERTEMP)
fault |= V4L2_FLASH_FAULT_OVER_TEMPERATURE;
- if (rval & FAULT_TIMEOUT)
+ if (reg_val & FAULT_TIMEOUT)
fault |= V4L2_FLASH_FAULT_TIMEOUT;
ctrl->cur.val = fault;
- return 0;
}
+out:
mutex_unlock(&flash->lock);
- return -EINVAL;
+ return rval;
}
static int lm3560_set_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no)
@@ -219,15 +219,19 @@ static int lm3560_set_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no)
break;
case V4L2_CID_FLASH_STROBE:
- if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
- return -EBUSY;
+ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) {
+ rval = -EBUSY;
+ goto err_out;
+ }
flash->led_mode = V4L2_FLASH_LED_MODE_FLASH;
rval = lm3560_mode_ctrl(flash);
break;
case V4L2_CID_FLASH_STROBE_STOP:
- if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
- return -EBUSY;
+ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) {
+ rval = -EBUSY;
+ goto err_out;
+ }
flash->led_mode = V4L2_FLASH_LED_MODE_NONE;
rval = lm3560_mode_ctrl(flash);
break;
@@ -247,8 +251,8 @@ static int lm3560_set_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no)
break;
}
- mutex_unlock(&flash->lock);
err_out:
+ mutex_unlock(&flash->lock);
return rval;
}
@@ -444,14 +448,14 @@ static int lm3560_probe(struct i2c_client *client,
if (rval < 0)
return rval;
+ i2c_set_clientdata(client, flash);
+
return 0;
}
static int lm3560_remove(struct i2c_client *client)
{
- struct v4l2_subdev *subdev = i2c_get_clientdata(client);
- struct lm3560_flash *flash = container_of(subdev, struct lm3560_flash,
- subdev_led[LM3560_LED_MAX]);
+ struct lm3560_flash *flash = i2c_get_clientdata(client);
unsigned int i;
for (i = LM3560_LED0; i < LM3560_LED_MAX; i++) {
diff --git a/drivers/media/i2c/mt9m032.c b/drivers/media/i2c/mt9m032.c
index 846b15f0bf64..85ec3bacdf1c 100644
--- a/drivers/media/i2c/mt9m032.c
+++ b/drivers/media/i2c/mt9m032.c
@@ -459,13 +459,15 @@ static int mt9m032_set_pad_crop(struct v4l2_subdev *subdev,
MT9M032_COLUMN_START_MAX);
rect.top = clamp(ALIGN(crop->rect.top, 2), MT9M032_ROW_START_MIN,
MT9M032_ROW_START_MAX);
- rect.width = clamp(ALIGN(crop->rect.width, 2), MT9M032_COLUMN_SIZE_MIN,
- MT9M032_COLUMN_SIZE_MAX);
- rect.height = clamp(ALIGN(crop->rect.height, 2), MT9M032_ROW_SIZE_MIN,
- MT9M032_ROW_SIZE_MAX);
-
- rect.width = min(rect.width, MT9M032_PIXEL_ARRAY_WIDTH - rect.left);
- rect.height = min(rect.height, MT9M032_PIXEL_ARRAY_HEIGHT - rect.top);
+ rect.width = clamp_t(unsigned int, ALIGN(crop->rect.width, 2),
+ MT9M032_COLUMN_SIZE_MIN, MT9M032_COLUMN_SIZE_MAX);
+ rect.height = clamp_t(unsigned int, ALIGN(crop->rect.height, 2),
+ MT9M032_ROW_SIZE_MIN, MT9M032_ROW_SIZE_MAX);
+
+ rect.width = min_t(unsigned int, rect.width,
+ MT9M032_PIXEL_ARRAY_WIDTH - rect.left);
+ rect.height = min_t(unsigned int, rect.height,
+ MT9M032_PIXEL_ARRAY_HEIGHT - rect.top);
__crop = __mt9m032_get_pad_crop(sensor, fh, crop->which);
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 1c2303d18bf4..e5ddf47030fd 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -519,11 +519,13 @@ static int mt9p031_set_format(struct v4l2_subdev *subdev,
/* Clamp the width and height to avoid dividing by zero. */
width = clamp_t(unsigned int, ALIGN(format->format.width, 2),
- max(__crop->width / 7, MT9P031_WINDOW_WIDTH_MIN),
+ max_t(unsigned int, __crop->width / 7,
+ MT9P031_WINDOW_WIDTH_MIN),
__crop->width);
height = clamp_t(unsigned int, ALIGN(format->format.height, 2),
- max(__crop->height / 8, MT9P031_WINDOW_HEIGHT_MIN),
- __crop->height);
+ max_t(unsigned int, __crop->height / 8,
+ MT9P031_WINDOW_HEIGHT_MIN),
+ __crop->height);
hratio = DIV_ROUND_CLOSEST(__crop->width, width);
vratio = DIV_ROUND_CLOSEST(__crop->height, height);
@@ -565,15 +567,17 @@ static int mt9p031_set_crop(struct v4l2_subdev *subdev,
MT9P031_COLUMN_START_MAX);
rect.top = clamp(ALIGN(crop->rect.top, 2), MT9P031_ROW_START_MIN,
MT9P031_ROW_START_MAX);
- rect.width = clamp(ALIGN(crop->rect.width, 2),
- MT9P031_WINDOW_WIDTH_MIN,
- MT9P031_WINDOW_WIDTH_MAX);
- rect.height = clamp(ALIGN(crop->rect.height, 2),
- MT9P031_WINDOW_HEIGHT_MIN,
- MT9P031_WINDOW_HEIGHT_MAX);
-
- rect.width = min(rect.width, MT9P031_PIXEL_ARRAY_WIDTH - rect.left);
- rect.height = min(rect.height, MT9P031_PIXEL_ARRAY_HEIGHT - rect.top);
+ rect.width = clamp_t(unsigned int, ALIGN(crop->rect.width, 2),
+ MT9P031_WINDOW_WIDTH_MIN,
+ MT9P031_WINDOW_WIDTH_MAX);
+ rect.height = clamp_t(unsigned int, ALIGN(crop->rect.height, 2),
+ MT9P031_WINDOW_HEIGHT_MIN,
+ MT9P031_WINDOW_HEIGHT_MAX);
+
+ rect.width = min_t(unsigned int, rect.width,
+ MT9P031_PIXEL_ARRAY_WIDTH - rect.left);
+ rect.height = min_t(unsigned int, rect.height,
+ MT9P031_PIXEL_ARRAY_HEIGHT - rect.top);
__crop = __mt9p031_get_pad_crop(mt9p031, fh, crop->pad, crop->which);
diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c
index 796463466ef0..d41c70eaf838 100644
--- a/drivers/media/i2c/mt9t001.c
+++ b/drivers/media/i2c/mt9t001.c
@@ -291,10 +291,12 @@ static int mt9t001_set_format(struct v4l2_subdev *subdev,
/* Clamp the width and height to avoid dividing by zero. */
width = clamp_t(unsigned int, ALIGN(format->format.width, 2),
- max(__crop->width / 8, MT9T001_WINDOW_HEIGHT_MIN + 1),
+ max_t(unsigned int, __crop->width / 8,
+ MT9T001_WINDOW_HEIGHT_MIN + 1),
__crop->width);
height = clamp_t(unsigned int, ALIGN(format->format.height, 2),
- max(__crop->height / 8, MT9T001_WINDOW_HEIGHT_MIN + 1),
+ max_t(unsigned int, __crop->height / 8,
+ MT9T001_WINDOW_HEIGHT_MIN + 1),
__crop->height);
hratio = DIV_ROUND_CLOSEST(__crop->width, width);
@@ -339,15 +341,17 @@ static int mt9t001_set_crop(struct v4l2_subdev *subdev,
rect.top = clamp(ALIGN(crop->rect.top, 2),
MT9T001_ROW_START_MIN,
MT9T001_ROW_START_MAX);
- rect.width = clamp(ALIGN(crop->rect.width, 2),
- MT9T001_WINDOW_WIDTH_MIN + 1,
- MT9T001_WINDOW_WIDTH_MAX + 1);
- rect.height = clamp(ALIGN(crop->rect.height, 2),
- MT9T001_WINDOW_HEIGHT_MIN + 1,
- MT9T001_WINDOW_HEIGHT_MAX + 1);
-
- rect.width = min(rect.width, MT9T001_PIXEL_ARRAY_WIDTH - rect.left);
- rect.height = min(rect.height, MT9T001_PIXEL_ARRAY_HEIGHT - rect.top);
+ rect.width = clamp_t(unsigned int, ALIGN(crop->rect.width, 2),
+ MT9T001_WINDOW_WIDTH_MIN + 1,
+ MT9T001_WINDOW_WIDTH_MAX + 1);
+ rect.height = clamp_t(unsigned int, ALIGN(crop->rect.height, 2),
+ MT9T001_WINDOW_HEIGHT_MIN + 1,
+ MT9T001_WINDOW_HEIGHT_MAX + 1);
+
+ rect.width = min_t(unsigned int, rect.width,
+ MT9T001_PIXEL_ARRAY_WIDTH - rect.left);
+ rect.height = min_t(unsigned int, rect.height,
+ MT9T001_PIXEL_ARRAY_HEIGHT - rect.top);
__crop = __mt9t001_get_pad_crop(mt9t001, fh, crop->pad, crop->which);
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 2c50effaa334..36c504b78f2c 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -27,14 +27,16 @@
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
-#define MT9V032_PIXEL_ARRAY_HEIGHT 492
-#define MT9V032_PIXEL_ARRAY_WIDTH 782
+/* The first four rows are black rows. The active area spans 753x481 pixels. */
+#define MT9V032_PIXEL_ARRAY_HEIGHT 485
+#define MT9V032_PIXEL_ARRAY_WIDTH 753
#define MT9V032_SYSCLK_FREQ_DEF 26600000
#define MT9V032_CHIP_VERSION 0x00
#define MT9V032_CHIP_ID_REV1 0x1311
#define MT9V032_CHIP_ID_REV3 0x1313
+#define MT9V034_CHIP_ID_REV1 0X1324
#define MT9V032_COLUMN_START 0x01
#define MT9V032_COLUMN_START_MIN 1
#define MT9V032_COLUMN_START_DEF 1
@@ -53,12 +55,15 @@
#define MT9V032_WINDOW_WIDTH_MAX 752
#define MT9V032_HORIZONTAL_BLANKING 0x05
#define MT9V032_HORIZONTAL_BLANKING_MIN 43
+#define MT9V034_HORIZONTAL_BLANKING_MIN 61
#define MT9V032_HORIZONTAL_BLANKING_DEF 94
#define MT9V032_HORIZONTAL_BLANKING_MAX 1023
#define MT9V032_VERTICAL_BLANKING 0x06
#define MT9V032_VERTICAL_BLANKING_MIN 4
+#define MT9V034_VERTICAL_BLANKING_MIN 2
#define MT9V032_VERTICAL_BLANKING_DEF 45
#define MT9V032_VERTICAL_BLANKING_MAX 3000
+#define MT9V034_VERTICAL_BLANKING_MAX 32288
#define MT9V032_CHIP_CONTROL 0x07
#define MT9V032_CHIP_CONTROL_MASTER_MODE (1 << 3)
#define MT9V032_CHIP_CONTROL_DOUT_ENABLE (1 << 7)
@@ -68,8 +73,10 @@
#define MT9V032_SHUTTER_WIDTH_CONTROL 0x0a
#define MT9V032_TOTAL_SHUTTER_WIDTH 0x0b
#define MT9V032_TOTAL_SHUTTER_WIDTH_MIN 1
+#define MT9V034_TOTAL_SHUTTER_WIDTH_MIN 0
#define MT9V032_TOTAL_SHUTTER_WIDTH_DEF 480
#define MT9V032_TOTAL_SHUTTER_WIDTH_MAX 32767
+#define MT9V034_TOTAL_SHUTTER_WIDTH_MAX 32765
#define MT9V032_RESET 0x0c
#define MT9V032_READ_MODE 0x0d
#define MT9V032_READ_MODE_ROW_BIN_MASK (3 << 0)
@@ -81,6 +88,8 @@
#define MT9V032_READ_MODE_DARK_COLUMNS (1 << 6)
#define MT9V032_READ_MODE_DARK_ROWS (1 << 7)
#define MT9V032_PIXEL_OPERATION_MODE 0x0f
+#define MT9V034_PIXEL_OPERATION_MODE_HDR (1 << 0)
+#define MT9V034_PIXEL_OPERATION_MODE_COLOR (1 << 1)
#define MT9V032_PIXEL_OPERATION_MODE_COLOR (1 << 2)
#define MT9V032_PIXEL_OPERATION_MODE_HDR (1 << 6)
#define MT9V032_ANALOG_GAIN 0x35
@@ -96,9 +105,12 @@
#define MT9V032_DARK_AVG_HIGH_THRESH_MASK (255 << 8)
#define MT9V032_DARK_AVG_HIGH_THRESH_SHIFT 8
#define MT9V032_ROW_NOISE_CORR_CONTROL 0x70
+#define MT9V034_ROW_NOISE_CORR_ENABLE (1 << 0)
+#define MT9V034_ROW_NOISE_CORR_USE_BLK_AVG (1 << 1)
#define MT9V032_ROW_NOISE_CORR_ENABLE (1 << 5)
#define MT9V032_ROW_NOISE_CORR_USE_BLK_AVG (1 << 7)
#define MT9V032_PIXEL_CLOCK 0x74
+#define MT9V034_PIXEL_CLOCK 0x72
#define MT9V032_PIXEL_CLOCK_INV_LINE (1 << 0)
#define MT9V032_PIXEL_CLOCK_INV_FRAME (1 << 1)
#define MT9V032_PIXEL_CLOCK_XOR_LINE (1 << 2)
@@ -120,12 +132,88 @@
#define MT9V032_AGC_ENABLE (1 << 1)
#define MT9V032_THERMAL_INFO 0xc1
+enum mt9v032_model {
+ MT9V032_MODEL_V032_COLOR,
+ MT9V032_MODEL_V032_MONO,
+ MT9V032_MODEL_V034_COLOR,
+ MT9V032_MODEL_V034_MONO,
+};
+
+struct mt9v032_model_version {
+ unsigned int version;
+ const char *name;
+};
+
+struct mt9v032_model_data {
+ unsigned int min_row_time;
+ unsigned int min_hblank;
+ unsigned int min_vblank;
+ unsigned int max_vblank;
+ unsigned int min_shutter;
+ unsigned int max_shutter;
+ unsigned int pclk_reg;
+};
+
+struct mt9v032_model_info {
+ const struct mt9v032_model_data *data;
+ bool color;
+};
+
+static const struct mt9v032_model_version mt9v032_versions[] = {
+ { MT9V032_CHIP_ID_REV1, "MT9V032 rev1/2" },
+ { MT9V032_CHIP_ID_REV3, "MT9V032 rev3" },
+ { MT9V034_CHIP_ID_REV1, "MT9V034 rev1" },
+};
+
+static const struct mt9v032_model_data mt9v032_model_data[] = {
+ {
+ /* MT9V032 revisions 1/2/3 */
+ .min_row_time = 660,
+ .min_hblank = MT9V032_HORIZONTAL_BLANKING_MIN,
+ .min_vblank = MT9V032_VERTICAL_BLANKING_MIN,
+ .max_vblank = MT9V032_VERTICAL_BLANKING_MAX,
+ .min_shutter = MT9V032_TOTAL_SHUTTER_WIDTH_MIN,
+ .max_shutter = MT9V032_TOTAL_SHUTTER_WIDTH_MAX,
+ .pclk_reg = MT9V032_PIXEL_CLOCK,
+ }, {
+ /* MT9V034 */
+ .min_row_time = 690,
+ .min_hblank = MT9V034_HORIZONTAL_BLANKING_MIN,
+ .min_vblank = MT9V034_VERTICAL_BLANKING_MIN,
+ .max_vblank = MT9V034_VERTICAL_BLANKING_MAX,
+ .min_shutter = MT9V034_TOTAL_SHUTTER_WIDTH_MIN,
+ .max_shutter = MT9V034_TOTAL_SHUTTER_WIDTH_MAX,
+ .pclk_reg = MT9V034_PIXEL_CLOCK,
+ },
+};
+
+static const struct mt9v032_model_info mt9v032_models[] = {
+ [MT9V032_MODEL_V032_COLOR] = {
+ .data = &mt9v032_model_data[0],
+ .color = true,
+ },
+ [MT9V032_MODEL_V032_MONO] = {
+ .data = &mt9v032_model_data[0],
+ .color = false,
+ },
+ [MT9V032_MODEL_V034_COLOR] = {
+ .data = &mt9v032_model_data[1],
+ .color = true,
+ },
+ [MT9V032_MODEL_V034_MONO] = {
+ .data = &mt9v032_model_data[1],
+ .color = false,
+ },
+};
+
struct mt9v032 {
struct v4l2_subdev subdev;
struct media_pad pad;
struct v4l2_mbus_framefmt format;
struct v4l2_rect crop;
+ unsigned int hratio;
+ unsigned int vratio;
struct v4l2_ctrl_handler ctrls;
struct {
@@ -139,6 +227,8 @@ struct mt9v032 {
struct clk *clk;
struct mt9v032_platform_data *pdata;
+ const struct mt9v032_model_info *model;
+ const struct mt9v032_model_version *version;
u32 sysclk;
u16 chip_control;
@@ -210,12 +300,17 @@ mt9v032_update_hblank(struct mt9v032 *mt9v032)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9v032->subdev);
struct v4l2_rect *crop = &mt9v032->crop;
+ unsigned int min_hblank = mt9v032->model->data->min_hblank;
+ unsigned int hblank;
- return mt9v032_write(client, MT9V032_HORIZONTAL_BLANKING,
- max_t(s32, mt9v032->hblank, 660 - crop->width));
-}
+ if (mt9v032->version->version == MT9V034_CHIP_ID_REV1)
+ min_hblank += (mt9v032->hratio - 1) * 10;
+ min_hblank = max_t(unsigned int, (int)mt9v032->model->data->min_row_time - crop->width,
+ (int)min_hblank);
+ hblank = max_t(unsigned int, mt9v032->hblank, min_hblank);
-#define EXT_CLK 25000000
+ return mt9v032_write(client, MT9V032_HORIZONTAL_BLANKING, hblank);
+}
static int mt9v032_power_on(struct mt9v032 *mt9v032)
{
@@ -259,7 +354,7 @@ static int __mt9v032_set_power(struct mt9v032 *mt9v032, bool on)
/* Configure the pixel clock polarity */
if (mt9v032->pdata && mt9v032->pdata->clk_pol) {
- ret = mt9v032_write(client, MT9V032_PIXEL_CLOCK,
+ ret = mt9v032_write(client, mt9v032->model->data->pclk_reg,
MT9V032_PIXEL_CLOCK_INV_PXL_CLK);
if (ret < 0)
return ret;
@@ -312,22 +407,20 @@ static int mt9v032_s_stream(struct v4l2_subdev *subdev, int enable)
| MT9V032_CHIP_CONTROL_SEQUENTIAL;
struct i2c_client *client = v4l2_get_subdevdata(subdev);
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
- struct v4l2_mbus_framefmt *format = &mt9v032->format;
struct v4l2_rect *crop = &mt9v032->crop;
- unsigned int hratio;
- unsigned int vratio;
+ unsigned int hbin;
+ unsigned int vbin;
int ret;
if (!enable)
return mt9v032_set_chip_control(mt9v032, mode, 0);
/* Configure the window size and row/column bin */
- hratio = DIV_ROUND_CLOSEST(crop->width, format->width);
- vratio = DIV_ROUND_CLOSEST(crop->height, format->height);
-
+ hbin = fls(mt9v032->hratio) - 1;
+ vbin = fls(mt9v032->vratio) - 1;
ret = mt9v032_write(client, MT9V032_READ_MODE,
- (hratio - 1) << MT9V032_READ_MODE_ROW_BIN_SHIFT |
- (vratio - 1) << MT9V032_READ_MODE_COLUMN_BIN_SHIFT);
+ hbin << MT9V032_READ_MODE_COLUMN_BIN_SHIFT |
+ vbin << MT9V032_READ_MODE_ROW_BIN_SHIFT);
if (ret < 0)
return ret;
@@ -370,12 +463,12 @@ static int mt9v032_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_fh *fh,
struct v4l2_subdev_frame_size_enum *fse)
{
- if (fse->index >= 8 || fse->code != V4L2_MBUS_FMT_SGRBG10_1X10)
+ if (fse->index >= 3 || fse->code != V4L2_MBUS_FMT_SGRBG10_1X10)
return -EINVAL;
- fse->min_width = MT9V032_WINDOW_WIDTH_DEF / fse->index;
+ fse->min_width = MT9V032_WINDOW_WIDTH_DEF / (1 << fse->index);
fse->max_width = fse->min_width;
- fse->min_height = MT9V032_WINDOW_HEIGHT_DEF / fse->index;
+ fse->min_height = MT9V032_WINDOW_HEIGHT_DEF / (1 << fse->index);
fse->max_height = fse->min_height;
return 0;
@@ -392,18 +485,30 @@ static int mt9v032_get_format(struct v4l2_subdev *subdev,
return 0;
}
-static void mt9v032_configure_pixel_rate(struct mt9v032 *mt9v032,
- unsigned int hratio)
+static void mt9v032_configure_pixel_rate(struct mt9v032 *mt9v032)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9v032->subdev);
int ret;
ret = v4l2_ctrl_s_ctrl_int64(mt9v032->pixel_rate,
- mt9v032->sysclk / hratio);
+ mt9v032->sysclk / mt9v032->hratio);
if (ret < 0)
dev_warn(&client->dev, "failed to set pixel rate (%d)\n", ret);
}
+static unsigned int mt9v032_calc_ratio(unsigned int input, unsigned int output)
+{
+ /* Compute the power-of-two binning factor closest to the input size to
+ * output size ratio. Given that the output size is bounded by input/4
+ * and input, a generic implementation would be an ineffective luxury.
+ */
+ if (output * 3 > input * 2)
+ return 1;
+ if (output * 3 > input)
+ return 2;
+ return 4;
+}
+
static int mt9v032_set_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_fh *fh,
struct v4l2_subdev_format *format)
@@ -420,22 +525,28 @@ static int mt9v032_set_format(struct v4l2_subdev *subdev,
format->which);
/* Clamp the width and height to avoid dividing by zero. */
- width = clamp_t(unsigned int, ALIGN(format->format.width, 2),
- max(__crop->width / 8, MT9V032_WINDOW_WIDTH_MIN),
- __crop->width);
- height = clamp_t(unsigned int, ALIGN(format->format.height, 2),
- max(__crop->height / 8, MT9V032_WINDOW_HEIGHT_MIN),
- __crop->height);
-
- hratio = DIV_ROUND_CLOSEST(__crop->width, width);
- vratio = DIV_ROUND_CLOSEST(__crop->height, height);
+ width = clamp(ALIGN(format->format.width, 2),
+ max_t(unsigned int, __crop->width / 4,
+ MT9V032_WINDOW_WIDTH_MIN),
+ __crop->width);
+ height = clamp(ALIGN(format->format.height, 2),
+ max_t(unsigned int, __crop->height / 4,
+ MT9V032_WINDOW_HEIGHT_MIN),
+ __crop->height);
+
+ hratio = mt9v032_calc_ratio(__crop->width, width);
+ vratio = mt9v032_calc_ratio(__crop->height, height);
__format = __mt9v032_get_pad_format(mt9v032, fh, format->pad,
format->which);
__format->width = __crop->width / hratio;
__format->height = __crop->height / vratio;
- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- mt9v032_configure_pixel_rate(mt9v032, hratio);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ mt9v032->hratio = hratio;
+ mt9v032->vratio = vratio;
+ mt9v032_configure_pixel_rate(mt9v032);
+ }
format->format = *__format;
@@ -471,15 +582,17 @@ static int mt9v032_set_crop(struct v4l2_subdev *subdev,
rect.top = clamp(ALIGN(crop->rect.top + 1, 2) - 1,
MT9V032_ROW_START_MIN,
MT9V032_ROW_START_MAX);
- rect.width = clamp(ALIGN(crop->rect.width, 2),
- MT9V032_WINDOW_WIDTH_MIN,
- MT9V032_WINDOW_WIDTH_MAX);
- rect.height = clamp(ALIGN(crop->rect.height, 2),
- MT9V032_WINDOW_HEIGHT_MIN,
- MT9V032_WINDOW_HEIGHT_MAX);
-
- rect.width = min(rect.width, MT9V032_PIXEL_ARRAY_WIDTH - rect.left);
- rect.height = min(rect.height, MT9V032_PIXEL_ARRAY_HEIGHT - rect.top);
+ rect.width = clamp_t(unsigned int, ALIGN(crop->rect.width, 2),
+ MT9V032_WINDOW_WIDTH_MIN,
+ MT9V032_WINDOW_WIDTH_MAX);
+ rect.height = clamp_t(unsigned int, ALIGN(crop->rect.height, 2),
+ MT9V032_WINDOW_HEIGHT_MIN,
+ MT9V032_WINDOW_HEIGHT_MAX);
+
+ rect.width = min_t(unsigned int,
+ rect.width, MT9V032_PIXEL_ARRAY_WIDTH - rect.left);
+ rect.height = min_t(unsigned int,
+ rect.height, MT9V032_PIXEL_ARRAY_HEIGHT - rect.top);
__crop = __mt9v032_get_pad_crop(mt9v032, fh, crop->pad, crop->which);
@@ -491,8 +604,11 @@ static int mt9v032_set_crop(struct v4l2_subdev *subdev,
crop->which);
__format->width = rect.width;
__format->height = rect.height;
- if (crop->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- mt9v032_configure_pixel_rate(mt9v032, 1);
+ if (crop->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ mt9v032->hratio = 1;
+ mt9v032->vratio = 1;
+ mt9v032_configure_pixel_rate(mt9v032);
+ }
}
*__crop = rect;
@@ -641,7 +757,8 @@ static int mt9v032_registered(struct v4l2_subdev *subdev)
{
struct i2c_client *client = v4l2_get_subdevdata(subdev);
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
- s32 data;
+ unsigned int i;
+ s32 version;
int ret;
dev_info(&client->dev, "Probing MT9V032 at address 0x%02x\n",
@@ -654,25 +771,38 @@ static int mt9v032_registered(struct v4l2_subdev *subdev)
}
/* Read and check the sensor version */
- data = mt9v032_read(client, MT9V032_CHIP_VERSION);
- if (data != MT9V032_CHIP_ID_REV1 && data != MT9V032_CHIP_ID_REV3) {
- dev_err(&client->dev, "MT9V032 not detected, wrong version "
- "0x%04x\n", data);
+ version = mt9v032_read(client, MT9V032_CHIP_VERSION);
+ if (version < 0) {
+ dev_err(&client->dev, "Failed reading chip version\n");
+ return version;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt9v032_versions); ++i) {
+ if (mt9v032_versions[i].version == version) {
+ mt9v032->version = &mt9v032_versions[i];
+ break;
+ }
+ }
+
+ if (mt9v032->version == NULL) {
+ dev_err(&client->dev, "Unsupported chip version 0x%04x\n",
+ version);
return -ENODEV;
}
mt9v032_power_off(mt9v032);
- dev_info(&client->dev, "MT9V032 detected at address 0x%02x\n",
- client->addr);
+ dev_info(&client->dev, "%s detected at address 0x%02x\n",
+ mt9v032->version->name, client->addr);
- mt9v032_configure_pixel_rate(mt9v032, 1);
+ mt9v032_configure_pixel_rate(mt9v032);
return ret;
}
static int mt9v032_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
{
+ struct mt9v032 *mt9v032 = to_mt9v032(subdev);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
@@ -683,7 +813,12 @@ static int mt9v032_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
crop->height = MT9V032_WINDOW_HEIGHT_DEF;
format = v4l2_subdev_get_try_format(fh, 0);
- format->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+ if (mt9v032->model->color)
+ format->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ else
+ format->code = V4L2_MBUS_FMT_Y10_1X10;
+
format->width = MT9V032_WINDOW_WIDTH_DEF;
format->height = MT9V032_WINDOW_HEIGHT_DEF;
format->field = V4L2_FIELD_NONE;
@@ -755,6 +890,7 @@ static int mt9v032_probe(struct i2c_client *client,
mutex_init(&mt9v032->power_lock);
mt9v032->pdata = pdata;
+ mt9v032->model = (const void *)did->driver_data;
v4l2_ctrl_handler_init(&mt9v032->ctrls, 10);
@@ -767,16 +903,16 @@ static int mt9v032_probe(struct i2c_client *client,
V4L2_CID_EXPOSURE_AUTO, V4L2_EXPOSURE_MANUAL, 0,
V4L2_EXPOSURE_AUTO);
v4l2_ctrl_new_std(&mt9v032->ctrls, &mt9v032_ctrl_ops,
- V4L2_CID_EXPOSURE, MT9V032_TOTAL_SHUTTER_WIDTH_MIN,
- MT9V032_TOTAL_SHUTTER_WIDTH_MAX, 1,
+ V4L2_CID_EXPOSURE, mt9v032->model->data->min_shutter,
+ mt9v032->model->data->max_shutter, 1,
MT9V032_TOTAL_SHUTTER_WIDTH_DEF);
v4l2_ctrl_new_std(&mt9v032->ctrls, &mt9v032_ctrl_ops,
- V4L2_CID_HBLANK, MT9V032_HORIZONTAL_BLANKING_MIN,
+ V4L2_CID_HBLANK, mt9v032->model->data->min_hblank,
MT9V032_HORIZONTAL_BLANKING_MAX, 1,
MT9V032_HORIZONTAL_BLANKING_DEF);
v4l2_ctrl_new_std(&mt9v032->ctrls, &mt9v032_ctrl_ops,
- V4L2_CID_VBLANK, MT9V032_VERTICAL_BLANKING_MIN,
- MT9V032_VERTICAL_BLANKING_MAX, 1,
+ V4L2_CID_VBLANK, mt9v032->model->data->min_vblank,
+ mt9v032->model->data->max_vblank, 1,
MT9V032_VERTICAL_BLANKING_DEF);
mt9v032->test_pattern = v4l2_ctrl_new_std_menu_items(&mt9v032->ctrls,
&mt9v032_ctrl_ops, V4L2_CID_TEST_PATTERN,
@@ -819,12 +955,19 @@ static int mt9v032_probe(struct i2c_client *client,
mt9v032->crop.width = MT9V032_WINDOW_WIDTH_DEF;
mt9v032->crop.height = MT9V032_WINDOW_HEIGHT_DEF;
- mt9v032->format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ if (mt9v032->model->color)
+ mt9v032->format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ else
+ mt9v032->format.code = V4L2_MBUS_FMT_Y10_1X10;
+
mt9v032->format.width = MT9V032_WINDOW_WIDTH_DEF;
mt9v032->format.height = MT9V032_WINDOW_HEIGHT_DEF;
mt9v032->format.field = V4L2_FIELD_NONE;
mt9v032->format.colorspace = V4L2_COLORSPACE_SRGB;
+ mt9v032->hratio = 1;
+ mt9v032->vratio = 1;
+
mt9v032->aec_agc = MT9V032_AEC_ENABLE | MT9V032_AGC_ENABLE;
mt9v032->hblank = MT9V032_HORIZONTAL_BLANKING_DEF;
mt9v032->sysclk = MT9V032_SYSCLK_FREQ_DEF;
@@ -855,7 +998,10 @@ static int mt9v032_remove(struct i2c_client *client)
}
static const struct i2c_device_id mt9v032_id[] = {
- { "mt9v032", 0 },
+ { "mt9v032", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V032_COLOR] },
+ { "mt9v032m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V032_MONO] },
+ { "mt9v034", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V034_COLOR] },
+ { "mt9v034m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V034_MONO] },
{ }
};
MODULE_DEVICE_TABLE(i2c, mt9v032_id);
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
new file mode 100644
index 000000000000..77e10e0fd8d6
--- /dev/null
+++ b/drivers/media/i2c/s5k5baf.c
@@ -0,0 +1,2053 @@
+/*
+ * Driver for Samsung S5K5BAF UXGA 1/5" 2M CMOS Image Sensor
+ * with embedded SoC ISP.
+ *
+ * Copyright (C) 2013, Samsung Electronics Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Based on S5K6AA driver authored by Sylwester Nawrocki
+ * Copyright (C) 2013, Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/media.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-mediabus.h>
+#include <media/v4l2-of.h>
+
+static int debug;
+module_param(debug, int, 0644);
+
+#define S5K5BAF_DRIVER_NAME "s5k5baf"
+#define S5K5BAF_DEFAULT_MCLK_FREQ 24000000U
+#define S5K5BAF_CLK_NAME "mclk"
+
+#define S5K5BAF_FW_FILENAME "s5k5baf-cfg.bin"
+#define S5K5BAF_FW_TAG "SF00"
+#define S5K5BAG_FW_TAG_LEN 2
+#define S5K5BAG_FW_MAX_COUNT 16
+
+#define S5K5BAF_CIS_WIDTH 1600
+#define S5K5BAF_CIS_HEIGHT 1200
+#define S5K5BAF_WIN_WIDTH_MIN 8
+#define S5K5BAF_WIN_HEIGHT_MIN 8
+#define S5K5BAF_GAIN_RED_DEF 127
+#define S5K5BAF_GAIN_GREEN_DEF 95
+#define S5K5BAF_GAIN_BLUE_DEF 180
+/* Default number of MIPI CSI-2 data lanes used */
+#define S5K5BAF_DEF_NUM_LANES 1
+
+#define AHB_MSB_ADDR_PTR 0xfcfc
+
+/*
+ * Register interface pages (the most significant word of the address)
+ */
+#define PAGE_IF_HW 0xd000
+#define PAGE_IF_SW 0x7000
+
+/*
+ * H/W register Interface (PAGE_IF_HW)
+ */
+#define REG_SW_LOAD_COMPLETE 0x0014
+#define REG_CMDWR_PAGE 0x0028
+#define REG_CMDWR_ADDR 0x002a
+#define REG_CMDRD_PAGE 0x002c
+#define REG_CMDRD_ADDR 0x002e
+#define REG_CMD_BUF 0x0f12
+#define REG_SET_HOST_INT 0x1000
+#define REG_CLEAR_HOST_INT 0x1030
+#define REG_PATTERN_SET 0x3100
+#define REG_PATTERN_WIDTH 0x3118
+#define REG_PATTERN_HEIGHT 0x311a
+#define REG_PATTERN_PARAM 0x311c
+
+/*
+ * S/W register interface (PAGE_IF_SW)
+ */
+
+/* Firmware revision information */
+#define REG_FW_APIVER 0x012e
+#define S5K5BAF_FW_APIVER 0x0001
+#define REG_FW_REVISION 0x0130
+#define REG_FW_SENSOR_ID 0x0152
+
+/* Initialization parameters */
+/* Master clock frequency in KHz */
+#define REG_I_INCLK_FREQ_L 0x01b8
+#define REG_I_INCLK_FREQ_H 0x01ba
+#define MIN_MCLK_FREQ_KHZ 6000U
+#define MAX_MCLK_FREQ_KHZ 48000U
+#define REG_I_USE_NPVI_CLOCKS 0x01c6
+#define NPVI_CLOCKS 1
+#define REG_I_USE_NMIPI_CLOCKS 0x01c8
+#define NMIPI_CLOCKS 1
+#define REG_I_BLOCK_INTERNAL_PLL_CALC 0x01ca
+
+/* Clock configurations, n = 0..2. REG_I_* frequency unit is 4 kHz. */
+#define REG_I_OPCLK_4KHZ(n) ((n) * 6 + 0x01cc)
+#define REG_I_MIN_OUTRATE_4KHZ(n) ((n) * 6 + 0x01ce)
+#define REG_I_MAX_OUTRATE_4KHZ(n) ((n) * 6 + 0x01d0)
+#define SCLK_PVI_FREQ 24000
+#define SCLK_MIPI_FREQ 48000
+#define PCLK_MIN_FREQ 6000
+#define PCLK_MAX_FREQ 48000
+#define REG_I_USE_REGS_API 0x01de
+#define REG_I_INIT_PARAMS_UPDATED 0x01e0
+#define REG_I_ERROR_INFO 0x01e2
+
+/* General purpose parameters */
+#define REG_USER_BRIGHTNESS 0x01e4
+#define REG_USER_CONTRAST 0x01e6
+#define REG_USER_SATURATION 0x01e8
+#define REG_USER_SHARPBLUR 0x01ea
+
+#define REG_G_SPEC_EFFECTS 0x01ee
+#define REG_G_ENABLE_PREV 0x01f0
+#define REG_G_ENABLE_PREV_CHG 0x01f2
+#define REG_G_NEW_CFG_SYNC 0x01f8
+#define REG_G_PREVREQ_IN_WIDTH 0x01fa
+#define REG_G_PREVREQ_IN_HEIGHT 0x01fc
+#define REG_G_PREVREQ_IN_XOFFS 0x01fe
+#define REG_G_PREVREQ_IN_YOFFS 0x0200
+#define REG_G_PREVZOOM_IN_WIDTH 0x020a
+#define REG_G_PREVZOOM_IN_HEIGHT 0x020c
+#define REG_G_PREVZOOM_IN_XOFFS 0x020e
+#define REG_G_PREVZOOM_IN_YOFFS 0x0210
+#define REG_G_INPUTS_CHANGE_REQ 0x021a
+#define REG_G_ACTIVE_PREV_CFG 0x021c
+#define REG_G_PREV_CFG_CHG 0x021e
+#define REG_G_PREV_OPEN_AFTER_CH 0x0220
+#define REG_G_PREV_CFG_ERROR 0x0222
+#define CFG_ERROR_RANGE 0x0b
+#define REG_G_PREV_CFG_BYPASS_CHANGED 0x022a
+#define REG_G_ACTUAL_P_FR_TIME 0x023a
+#define REG_G_ACTUAL_P_OUT_RATE 0x023c
+#define REG_G_ACTUAL_C_FR_TIME 0x023e
+#define REG_G_ACTUAL_C_OUT_RATE 0x0240
+
+/* Preview control section. n = 0...4. */
+#define PREG(n, x) ((n) * 0x26 + x)
+#define REG_P_OUT_WIDTH(n) PREG(n, 0x0242)
+#define REG_P_OUT_HEIGHT(n) PREG(n, 0x0244)
+#define REG_P_FMT(n) PREG(n, 0x0246)
+#define REG_P_MAX_OUT_RATE(n) PREG(n, 0x0248)
+#define REG_P_MIN_OUT_RATE(n) PREG(n, 0x024a)
+#define REG_P_PVI_MASK(n) PREG(n, 0x024c)
+#define PVI_MASK_MIPI 0x52
+#define REG_P_CLK_INDEX(n) PREG(n, 0x024e)
+#define CLK_PVI_INDEX 0
+#define CLK_MIPI_INDEX NPVI_CLOCKS
+#define REG_P_FR_RATE_TYPE(n) PREG(n, 0x0250)
+#define FR_RATE_DYNAMIC 0
+#define FR_RATE_FIXED 1
+#define FR_RATE_FIXED_ACCURATE 2
+#define REG_P_FR_RATE_Q_TYPE(n) PREG(n, 0x0252)
+#define FR_RATE_Q_DYNAMIC 0
+#define FR_RATE_Q_BEST_FRRATE 1 /* Binning enabled */
+#define FR_RATE_Q_BEST_QUALITY 2 /* Binning disabled */
+/* Frame period in 0.1 ms units */
+#define REG_P_MAX_FR_TIME(n) PREG(n, 0x0254)
+#define REG_P_MIN_FR_TIME(n) PREG(n, 0x0256)
+#define S5K5BAF_MIN_FR_TIME 333 /* x100 us */
+#define S5K5BAF_MAX_FR_TIME 6500 /* x100 us */
+/* The below 5 registers are for "device correction" values */
+#define REG_P_SATURATION(n) PREG(n, 0x0258)
+#define REG_P_SHARP_BLUR(n) PREG(n, 0x025a)
+#define REG_P_GLAMOUR(n) PREG(n, 0x025c)
+#define REG_P_COLORTEMP(n) PREG(n, 0x025e)
+#define REG_P_GAMMA_INDEX(n) PREG(n, 0x0260)
+#define REG_P_PREV_MIRROR(n) PREG(n, 0x0262)
+#define REG_P_CAP_MIRROR(n) PREG(n, 0x0264)
+#define REG_P_CAP_ROTATION(n) PREG(n, 0x0266)
+
+/* Extended image property controls */
+/* Exposure time in 10 us units */
+#define REG_SF_USR_EXPOSURE_L 0x03bc
+#define REG_SF_USR_EXPOSURE_H 0x03be
+#define REG_SF_USR_EXPOSURE_CHG 0x03c0
+#define REG_SF_USR_TOT_GAIN 0x03c2
+#define REG_SF_USR_TOT_GAIN_CHG 0x03c4
+#define REG_SF_RGAIN 0x03c6
+#define REG_SF_RGAIN_CHG 0x03c8
+#define REG_SF_GGAIN 0x03ca
+#define REG_SF_GGAIN_CHG 0x03cc
+#define REG_SF_BGAIN 0x03ce
+#define REG_SF_BGAIN_CHG 0x03d0
+#define REG_SF_WBGAIN_CHG 0x03d2
+#define REG_SF_FLICKER_QUANT 0x03d4
+#define REG_SF_FLICKER_QUANT_CHG 0x03d6
+
+/* Output interface (parallel/MIPI) setup */
+#define REG_OIF_EN_MIPI_LANES 0x03f2
+#define REG_OIF_EN_PACKETS 0x03f4
+#define EN_PACKETS_CSI2 0xc3
+#define REG_OIF_CFG_CHG 0x03f6
+
+/* Auto-algorithms enable mask */
+#define REG_DBG_AUTOALG_EN 0x03f8
+#define AALG_ALL_EN BIT(0)
+#define AALG_AE_EN BIT(1)
+#define AALG_DIVLEI_EN BIT(2)
+#define AALG_WB_EN BIT(3)
+#define AALG_USE_WB_FOR_ISP BIT(4)
+#define AALG_FLICKER_EN BIT(5)
+#define AALG_FIT_EN BIT(6)
+#define AALG_WRHW_EN BIT(7)
+
+/* Pointers to color correction matrices */
+#define REG_PTR_CCM_HORIZON 0x06d0
+#define REG_PTR_CCM_INCANDESCENT 0x06d4
+#define REG_PTR_CCM_WARM_WHITE 0x06d8
+#define REG_PTR_CCM_COOL_WHITE 0x06dc
+#define REG_PTR_CCM_DL50 0x06e0
+#define REG_PTR_CCM_DL65 0x06e4
+#define REG_PTR_CCM_OUTDOOR 0x06ec
+
+#define REG_ARR_CCM(n) (0x2800 + 36 * (n))
+
+static const char * const s5k5baf_supply_names[] = {
+ "vdda", /* Analog power supply 2.8V (2.6V to 3.0V) */
+ "vddreg", /* Regulator input power supply 1.8V (1.7V to 1.9V)
+ or 2.8V (2.6V to 3.0) */
+ "vddio", /* I/O power supply 1.8V (1.65V to 1.95V)
+ or 2.8V (2.5V to 3.1V) */
+};
+#define S5K5BAF_NUM_SUPPLIES ARRAY_SIZE(s5k5baf_supply_names)
+
+struct s5k5baf_gpio {
+ int gpio;
+ int level;
+};
+
+enum s5k5baf_gpio_id {
+ STBY,
+ RST,
+ NUM_GPIOS,
+};
+
+#define PAD_CIS 0
+#define PAD_OUT 1
+#define NUM_CIS_PADS 1
+#define NUM_ISP_PADS 2
+
+struct s5k5baf_pixfmt {
+ enum v4l2_mbus_pixelcode code;
+ u32 colorspace;
+ /* REG_P_FMT(x) register value */
+ u16 reg_p_fmt;
+};
+
+struct s5k5baf_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct { /* Auto / manual white balance cluster */
+ struct v4l2_ctrl *awb;
+ struct v4l2_ctrl *gain_red;
+ struct v4l2_ctrl *gain_blue;
+ };
+ struct { /* Mirror cluster */
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ };
+ struct { /* Auto exposure / manual exposure and gain cluster */
+ struct v4l2_ctrl *auto_exp;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *gain;
+ };
+};
+
+enum {
+ S5K5BAF_FW_ID_PATCH,
+ S5K5BAF_FW_ID_CCM,
+ S5K5BAF_FW_ID_CIS,
+};
+
+struct s5k5baf_fw {
+ u16 count;
+ struct {
+ u16 id;
+ u16 offset;
+ } seq[0];
+ u16 data[0];
+};
+
+struct s5k5baf {
+ struct s5k5baf_gpio gpios[NUM_GPIOS];
+ enum v4l2_mbus_type bus_type;
+ u8 nlanes;
+ struct regulator_bulk_data supplies[S5K5BAF_NUM_SUPPLIES];
+
+ struct clk *clock;
+ u32 mclk_frequency;
+
+ struct s5k5baf_fw *fw;
+
+ struct v4l2_subdev cis_sd;
+ struct media_pad cis_pad;
+
+ struct v4l2_subdev sd;
+ struct media_pad pads[NUM_ISP_PADS];
+
+ /* protects the struct members below */
+ struct mutex lock;
+
+ int error;
+
+ struct v4l2_rect crop_sink;
+ struct v4l2_rect compose;
+ struct v4l2_rect crop_source;
+ /* index to s5k5baf_formats array */
+ int pixfmt;
+ /* actual frame interval in 100us */
+ u16 fiv;
+ /* requested frame interval in 100us */
+ u16 req_fiv;
+ /* cache for REG_DBG_AUTOALG_EN register */
+ u16 auto_alg;
+
+ struct s5k5baf_ctrls ctrls;
+
+ unsigned int streaming:1;
+ unsigned int apply_cfg:1;
+ unsigned int apply_crop:1;
+ unsigned int valid_auto_alg:1;
+ unsigned int power;
+};
+
+static const struct s5k5baf_pixfmt s5k5baf_formats[] = {
+ { V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG, 5 },
+ /* range 16-240 */
+ { V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_REC709, 6 },
+ { V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_JPEG, 0 },
+};
+
+static struct v4l2_rect s5k5baf_cis_rect = {
+ 0, 0, S5K5BAF_CIS_WIDTH, S5K5BAF_CIS_HEIGHT
+};
+
+/* Setfile contains set of I2C command sequences. Each sequence has its ID.
+ * setfile format:
+ * u8 magic[4];
+ * u16 count; number of sequences
+ * struct {
+ * u16 id; sequence id
+ * u16 offset; sequence offset in data array
+ * } seq[count];
+ * u16 data[*]; array containing sequences
+ *
+ */
+static int s5k5baf_fw_parse(struct device *dev, struct s5k5baf_fw **fw,
+ size_t count, const u16 *data)
+{
+ struct s5k5baf_fw *f;
+ u16 *d, i, *end;
+ int ret;
+
+ if (count < S5K5BAG_FW_TAG_LEN + 1) {
+ dev_err(dev, "firmware file too short (%zu)\n", count);
+ return -EINVAL;
+ }
+
+ ret = memcmp(data, S5K5BAF_FW_TAG, S5K5BAG_FW_TAG_LEN * sizeof(u16));
+ if (ret != 0) {
+ dev_err(dev, "invalid firmware magic number\n");
+ return -EINVAL;
+ }
+
+ data += S5K5BAG_FW_TAG_LEN;
+ count -= S5K5BAG_FW_TAG_LEN;
+
+ d = devm_kzalloc(dev, count * sizeof(u16), GFP_KERNEL);
+
+ for (i = 0; i < count; ++i)
+ d[i] = le16_to_cpu(data[i]);
+
+ f = (struct s5k5baf_fw *)d;
+ if (count < 1 + 2 * f->count) {
+ dev_err(dev, "invalid firmware header (count=%d size=%zu)\n",
+ f->count, 2 * (count + S5K5BAG_FW_TAG_LEN));
+ return -EINVAL;
+ }
+ end = d + count;
+ d += 1 + 2 * f->count;
+
+ for (i = 0; i < f->count; ++i) {
+ if (f->seq[i].offset + d <= end)
+ continue;
+ dev_err(dev, "invalid firmware header (seq=%d)\n", i);
+ return -EINVAL;
+ }
+
+ *fw = f;
+
+ return 0;
+}
+
+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct s5k5baf, ctrls.handler)->sd;
+}
+
+static inline bool s5k5baf_is_cis_subdev(struct v4l2_subdev *sd)
+{
+ return sd->entity.type == MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+}
+
+static inline struct s5k5baf *to_s5k5baf(struct v4l2_subdev *sd)
+{
+ if (s5k5baf_is_cis_subdev(sd))
+ return container_of(sd, struct s5k5baf, cis_sd);
+ else
+ return container_of(sd, struct s5k5baf, sd);
+}
+
+static u16 s5k5baf_i2c_read(struct s5k5baf *state, u16 addr)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
+ __be16 w, r;
+ struct i2c_msg msg[] = {
+ { .addr = c->addr, .flags = 0,
+ .len = 2, .buf = (u8 *)&w },
+ { .addr = c->addr, .flags = I2C_M_RD,
+ .len = 2, .buf = (u8 *)&r },
+ };
+ int ret;
+
+ if (state->error)
+ return 0;
+
+ w = cpu_to_be16(addr);
+ ret = i2c_transfer(c->adapter, msg, 2);
+ r = be16_to_cpu(r);
+
+ v4l2_dbg(3, debug, c, "i2c_read: 0x%04x : 0x%04x\n", addr, r);
+
+ if (ret != 2) {
+ v4l2_err(c, "i2c_read: error during transfer (%d)\n", ret);
+ state->error = ret;
+ }
+ return r;
+}
+
+static void s5k5baf_i2c_write(struct s5k5baf *state, u16 addr, u16 val)
+{
+ u8 buf[4] = { addr >> 8, addr & 0xFF, val >> 8, val & 0xFF };
+ struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
+ int ret;
+
+ if (state->error)
+ return;
+
+ ret = i2c_master_send(c, buf, 4);
+ v4l2_dbg(3, debug, c, "i2c_write: 0x%04x : 0x%04x\n", addr, val);
+
+ if (ret != 4) {
+ v4l2_err(c, "i2c_write: error during transfer (%d)\n", ret);
+ state->error = ret;
+ }
+}
+
+static u16 s5k5baf_read(struct s5k5baf *state, u16 addr)
+{
+ s5k5baf_i2c_write(state, REG_CMDRD_ADDR, addr);
+ return s5k5baf_i2c_read(state, REG_CMD_BUF);
+}
+
+static void s5k5baf_write(struct s5k5baf *state, u16 addr, u16 val)
+{
+ s5k5baf_i2c_write(state, REG_CMDWR_ADDR, addr);
+ s5k5baf_i2c_write(state, REG_CMD_BUF, val);
+}
+
+static void s5k5baf_write_arr_seq(struct s5k5baf *state, u16 addr,
+ u16 count, const u16 *seq)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
+ __be16 buf[65];
+
+ s5k5baf_i2c_write(state, REG_CMDWR_ADDR, addr);
+ if (state->error)
+ return;
+
+ v4l2_dbg(3, debug, c, "i2c_write_seq(count=%d): %*ph\n", count,
+ min(2 * count, 64), seq);
+
+ buf[0] = __constant_cpu_to_be16(REG_CMD_BUF);
+
+ while (count > 0) {
+ int n = min_t(int, count, ARRAY_SIZE(buf) - 1);
+ int ret, i;
+
+ for (i = 1; i <= n; ++i)
+ buf[i] = cpu_to_be16(*seq++);
+
+ i *= 2;
+ ret = i2c_master_send(c, (char *)buf, i);
+ if (ret != i) {
+ v4l2_err(c, "i2c_write_seq: error during transfer (%d)\n", ret);
+ state->error = ret;
+ break;
+ }
+
+ count -= n;
+ }
+}
+
+#define s5k5baf_write_seq(state, addr, seq...) \
+ s5k5baf_write_arr_seq(state, addr, sizeof((char[]){ seq }), \
+ (const u16 []){ seq });
+
+/* add items count at the beginning of the list */
+#define NSEQ(seq...) sizeof((char[]){ seq }), seq
+
+/*
+ * s5k5baf_write_nseq() - Writes sequences of values to sensor memory via i2c
+ * @nseq: sequence of u16 words in format:
+ * (N, address, value[1]...value[N-1])*,0
+ * Ex.:
+ * u16 seq[] = { NSEQ(0x4000, 1, 1), NSEQ(0x4010, 640, 480), 0 };
+ * ret = s5k5baf_write_nseq(c, seq);
+ */
+static void s5k5baf_write_nseq(struct s5k5baf *state, const u16 *nseq)
+{
+ int count;
+
+ while ((count = *nseq++)) {
+ u16 addr = *nseq++;
+ --count;
+
+ s5k5baf_write_arr_seq(state, addr, count, nseq);
+ nseq += count;
+ }
+}
+
+static void s5k5baf_synchronize(struct s5k5baf *state, int timeout, u16 addr)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(timeout);
+ u16 reg;
+
+ s5k5baf_write(state, addr, 1);
+ do {
+ reg = s5k5baf_read(state, addr);
+ if (state->error || !reg)
+ return;
+ usleep_range(5000, 10000);
+ } while (time_is_after_jiffies(end));
+
+ v4l2_err(&state->sd, "timeout on register synchronize (%#x)\n", addr);
+ state->error = -ETIMEDOUT;
+}
+
+static u16 *s5k5baf_fw_get_seq(struct s5k5baf *state, u16 seq_id)
+{
+ struct s5k5baf_fw *fw = state->fw;
+ u16 *data;
+ int i;
+
+ if (fw == NULL)
+ return NULL;
+
+ data = fw->data + 2 * fw->count;
+
+ for (i = 0; i < fw->count; ++i) {
+ if (fw->seq[i].id == seq_id)
+ return data + fw->seq[i].offset;
+ }
+
+ return NULL;
+}
+
+static void s5k5baf_hw_patch(struct s5k5baf *state)
+{
+ u16 *seq = s5k5baf_fw_get_seq(state, S5K5BAF_FW_ID_PATCH);
+
+ if (seq)
+ s5k5baf_write_nseq(state, seq);
+}
+
+static void s5k5baf_hw_set_clocks(struct s5k5baf *state)
+{
+ unsigned long mclk = state->mclk_frequency / 1000;
+ u16 status;
+ static const u16 nseq_clk_cfg[] = {
+ NSEQ(REG_I_USE_NPVI_CLOCKS,
+ NPVI_CLOCKS, NMIPI_CLOCKS, 0,
+ SCLK_PVI_FREQ / 4, PCLK_MIN_FREQ / 4, PCLK_MAX_FREQ / 4,
+ SCLK_MIPI_FREQ / 4, PCLK_MIN_FREQ / 4, PCLK_MAX_FREQ / 4),
+ NSEQ(REG_I_USE_REGS_API, 1),
+ 0
+ };
+
+ s5k5baf_write_seq(state, REG_I_INCLK_FREQ_L, mclk & 0xffff, mclk >> 16);
+ s5k5baf_write_nseq(state, nseq_clk_cfg);
+
+ s5k5baf_synchronize(state, 250, REG_I_INIT_PARAMS_UPDATED);
+ status = s5k5baf_read(state, REG_I_ERROR_INFO);
+ if (!state->error && status) {
+ v4l2_err(&state->sd, "error configuring PLL (%d)\n", status);
+ state->error = -EINVAL;
+ }
+}
+
+/* set custom color correction matrices for various illuminations */
+static void s5k5baf_hw_set_ccm(struct s5k5baf *state)
+{
+ u16 *seq = s5k5baf_fw_get_seq(state, S5K5BAF_FW_ID_CCM);
+
+ if (seq)
+ s5k5baf_write_nseq(state, seq);
+}
+
+/* CIS sensor tuning, based on undocumented android driver code */
+static void s5k5baf_hw_set_cis(struct s5k5baf *state)
+{
+ u16 *seq = s5k5baf_fw_get_seq(state, S5K5BAF_FW_ID_CIS);
+
+ if (!seq)
+ return;
+
+ s5k5baf_i2c_write(state, REG_CMDWR_PAGE, PAGE_IF_HW);
+ s5k5baf_write_nseq(state, seq);
+ s5k5baf_i2c_write(state, REG_CMDWR_PAGE, PAGE_IF_SW);
+}
+
+static void s5k5baf_hw_sync_cfg(struct s5k5baf *state)
+{
+ s5k5baf_write(state, REG_G_PREV_CFG_CHG, 1);
+ if (state->apply_crop) {
+ s5k5baf_write(state, REG_G_INPUTS_CHANGE_REQ, 1);
+ s5k5baf_write(state, REG_G_PREV_CFG_BYPASS_CHANGED, 1);
+ }
+ s5k5baf_synchronize(state, 500, REG_G_NEW_CFG_SYNC);
+}
+/* Set horizontal and vertical image flipping */
+static void s5k5baf_hw_set_mirror(struct s5k5baf *state)
+{
+ u16 flip = state->ctrls.vflip->val | (state->ctrls.vflip->val << 1);
+
+ s5k5baf_write(state, REG_P_PREV_MIRROR(0), flip);
+ if (state->streaming)
+ s5k5baf_hw_sync_cfg(state);
+}
+
+static void s5k5baf_hw_set_alg(struct s5k5baf *state, u16 alg, bool enable)
+{
+ u16 cur_alg, new_alg;
+
+ if (!state->valid_auto_alg)
+ cur_alg = s5k5baf_read(state, REG_DBG_AUTOALG_EN);
+ else
+ cur_alg = state->auto_alg;
+
+ new_alg = enable ? (cur_alg | alg) : (cur_alg & ~alg);
+
+ if (new_alg != cur_alg)
+ s5k5baf_write(state, REG_DBG_AUTOALG_EN, new_alg);
+
+ if (state->error)
+ return;
+
+ state->valid_auto_alg = 1;
+ state->auto_alg = new_alg;
+}
+
+/* Configure auto/manual white balance and R/G/B gains */
+static void s5k5baf_hw_set_awb(struct s5k5baf *state, int awb)
+{
+ struct s5k5baf_ctrls *ctrls = &state->ctrls;
+
+ if (!awb)
+ s5k5baf_write_seq(state, REG_SF_RGAIN,
+ ctrls->gain_red->val, 1,
+ S5K5BAF_GAIN_GREEN_DEF, 1,
+ ctrls->gain_blue->val, 1,
+ 1);
+
+ s5k5baf_hw_set_alg(state, AALG_WB_EN, awb);
+}
+
+/* Program FW with exposure time, 'exposure' in us units */
+static void s5k5baf_hw_set_user_exposure(struct s5k5baf *state, int exposure)
+{
+ unsigned int time = exposure / 10;
+
+ s5k5baf_write_seq(state, REG_SF_USR_EXPOSURE_L,
+ time & 0xffff, time >> 16, 1);
+}
+
+static void s5k5baf_hw_set_user_gain(struct s5k5baf *state, int gain)
+{
+ s5k5baf_write_seq(state, REG_SF_USR_TOT_GAIN, gain, 1);
+}
+
+/* Set auto/manual exposure and total gain */
+static void s5k5baf_hw_set_auto_exposure(struct s5k5baf *state, int value)
+{
+ if (value == V4L2_EXPOSURE_AUTO) {
+ s5k5baf_hw_set_alg(state, AALG_AE_EN | AALG_DIVLEI_EN, true);
+ } else {
+ unsigned int exp_time = state->ctrls.exposure->val;
+
+ s5k5baf_hw_set_user_exposure(state, exp_time);
+ s5k5baf_hw_set_user_gain(state, state->ctrls.gain->val);
+ s5k5baf_hw_set_alg(state, AALG_AE_EN | AALG_DIVLEI_EN, false);
+ }
+}
+
+static void s5k5baf_hw_set_anti_flicker(struct s5k5baf *state, int v)
+{
+ if (v == V4L2_CID_POWER_LINE_FREQUENCY_AUTO) {
+ s5k5baf_hw_set_alg(state, AALG_FLICKER_EN, true);
+ } else {
+ /* The V4L2_CID_LINE_FREQUENCY control values match
+ * the register values */
+ s5k5baf_write_seq(state, REG_SF_FLICKER_QUANT, v, 1);
+ s5k5baf_hw_set_alg(state, AALG_FLICKER_EN, false);
+ }
+}
+
+static void s5k5baf_hw_set_colorfx(struct s5k5baf *state, int val)
+{
+ static const u16 colorfx[] = {
+ [V4L2_COLORFX_NONE] = 0,
+ [V4L2_COLORFX_BW] = 1,
+ [V4L2_COLORFX_NEGATIVE] = 2,
+ [V4L2_COLORFX_SEPIA] = 3,
+ [V4L2_COLORFX_SKY_BLUE] = 4,
+ [V4L2_COLORFX_SKETCH] = 5,
+ };
+
+ s5k5baf_write(state, REG_G_SPEC_EFFECTS, colorfx[val]);
+}
+
+static int s5k5baf_find_pixfmt(struct v4l2_mbus_framefmt *mf)
+{
+ int i, c = -1;
+
+ for (i = 0; i < ARRAY_SIZE(s5k5baf_formats); i++) {
+ if (mf->colorspace != s5k5baf_formats[i].colorspace)
+ continue;
+ if (mf->code == s5k5baf_formats[i].code)
+ return i;
+ if (c < 0)
+ c = i;
+ }
+ return (c < 0) ? 0 : c;
+}
+
+static int s5k5baf_clear_error(struct s5k5baf *state)
+{
+ int ret = state->error;
+
+ state->error = 0;
+ return ret;
+}
+
+static int s5k5baf_hw_set_video_bus(struct s5k5baf *state)
+{
+ u16 en_pkts;
+
+ if (state->bus_type == V4L2_MBUS_CSI2)
+ en_pkts = EN_PACKETS_CSI2;
+ else
+ en_pkts = 0;
+
+ s5k5baf_write_seq(state, REG_OIF_EN_MIPI_LANES,
+ state->nlanes, en_pkts, 1);
+
+ return s5k5baf_clear_error(state);
+}
+
+static u16 s5k5baf_get_cfg_error(struct s5k5baf *state)
+{
+ u16 err = s5k5baf_read(state, REG_G_PREV_CFG_ERROR);
+ if (err)
+ s5k5baf_write(state, REG_G_PREV_CFG_ERROR, 0);
+ return err;
+}
+
+static void s5k5baf_hw_set_fiv(struct s5k5baf *state, u16 fiv)
+{
+ s5k5baf_write(state, REG_P_MAX_FR_TIME(0), fiv);
+ s5k5baf_hw_sync_cfg(state);
+}
+
+static void s5k5baf_hw_find_min_fiv(struct s5k5baf *state)
+{
+ u16 err, fiv;
+ int n;
+
+ fiv = s5k5baf_read(state, REG_G_ACTUAL_P_FR_TIME);
+ if (state->error)
+ return;
+
+ for (n = 5; n > 0; --n) {
+ s5k5baf_hw_set_fiv(state, fiv);
+ err = s5k5baf_get_cfg_error(state);
+ if (state->error)
+ return;
+ switch (err) {
+ case CFG_ERROR_RANGE:
+ ++fiv;
+ break;
+ case 0:
+ state->fiv = fiv;
+ v4l2_info(&state->sd,
+ "found valid frame interval: %d00us\n", fiv);
+ return;
+ default:
+ v4l2_err(&state->sd,
+ "error setting frame interval: %d\n", err);
+ state->error = -EINVAL;
+ }
+ };
+ v4l2_err(&state->sd, "cannot find correct frame interval\n");
+ state->error = -ERANGE;
+}
+
+static void s5k5baf_hw_validate_cfg(struct s5k5baf *state)
+{
+ u16 err;
+
+ err = s5k5baf_get_cfg_error(state);
+ if (state->error)
+ return;
+
+ switch (err) {
+ case 0:
+ state->apply_cfg = 1;
+ return;
+ case CFG_ERROR_RANGE:
+ s5k5baf_hw_find_min_fiv(state);
+ if (!state->error)
+ state->apply_cfg = 1;
+ return;
+ default:
+ v4l2_err(&state->sd,
+ "error setting format: %d\n", err);
+ state->error = -EINVAL;
+ }
+}
+
+static void s5k5baf_rescale(struct v4l2_rect *r, const struct v4l2_rect *v,
+ const struct v4l2_rect *n,
+ const struct v4l2_rect *d)
+{
+ r->left = v->left * n->width / d->width;
+ r->top = v->top * n->height / d->height;
+ r->width = v->width * n->width / d->width;
+ r->height = v->height * n->height / d->height;
+}
+
+static int s5k5baf_hw_set_crop_rects(struct s5k5baf *state)
+{
+ struct v4l2_rect *p, r;
+ u16 err;
+ int ret;
+
+ p = &state->crop_sink;
+ s5k5baf_write_seq(state, REG_G_PREVREQ_IN_WIDTH, p->width, p->height,
+ p->left, p->top);
+
+ s5k5baf_rescale(&r, &state->crop_source, &state->crop_sink,
+ &state->compose);
+ s5k5baf_write_seq(state, REG_G_PREVZOOM_IN_WIDTH, r.width, r.height,
+ r.left, r.top);
+
+ s5k5baf_synchronize(state, 500, REG_G_INPUTS_CHANGE_REQ);
+ s5k5baf_synchronize(state, 500, REG_G_PREV_CFG_BYPASS_CHANGED);
+ err = s5k5baf_get_cfg_error(state);
+ ret = s5k5baf_clear_error(state);
+ if (ret < 0)
+ return ret;
+
+ switch (err) {
+ case 0:
+ break;
+ case CFG_ERROR_RANGE:
+ /* retry crop with frame interval set to max */
+ s5k5baf_hw_set_fiv(state, S5K5BAF_MAX_FR_TIME);
+ err = s5k5baf_get_cfg_error(state);
+ ret = s5k5baf_clear_error(state);
+ if (ret < 0)
+ return ret;
+ if (err) {
+ v4l2_err(&state->sd,
+ "crop error on max frame interval: %d\n", err);
+ state->error = -EINVAL;
+ }
+ s5k5baf_hw_set_fiv(state, state->req_fiv);
+ s5k5baf_hw_validate_cfg(state);
+ break;
+ default:
+ v4l2_err(&state->sd, "crop error: %d\n", err);
+ return -EINVAL;
+ }
+
+ if (!state->apply_cfg)
+ return 0;
+
+ p = &state->crop_source;
+ s5k5baf_write_seq(state, REG_P_OUT_WIDTH(0), p->width, p->height);
+ s5k5baf_hw_set_fiv(state, state->req_fiv);
+ s5k5baf_hw_validate_cfg(state);
+
+ return s5k5baf_clear_error(state);
+}
+
+static void s5k5baf_hw_set_config(struct s5k5baf *state)
+{
+ u16 reg_fmt = s5k5baf_formats[state->pixfmt].reg_p_fmt;
+ struct v4l2_rect *r = &state->crop_source;
+
+ s5k5baf_write_seq(state, REG_P_OUT_WIDTH(0),
+ r->width, r->height, reg_fmt,
+ PCLK_MAX_FREQ >> 2, PCLK_MIN_FREQ >> 2,
+ PVI_MASK_MIPI, CLK_MIPI_INDEX,
+ FR_RATE_FIXED, FR_RATE_Q_DYNAMIC,
+ state->req_fiv, S5K5BAF_MIN_FR_TIME);
+ s5k5baf_hw_sync_cfg(state);
+ s5k5baf_hw_validate_cfg(state);
+}
+
+
+static void s5k5baf_hw_set_test_pattern(struct s5k5baf *state, int id)
+{
+ s5k5baf_i2c_write(state, REG_PATTERN_WIDTH, 800);
+ s5k5baf_i2c_write(state, REG_PATTERN_HEIGHT, 511);
+ s5k5baf_i2c_write(state, REG_PATTERN_PARAM, 0);
+ s5k5baf_i2c_write(state, REG_PATTERN_SET, id);
+}
+
+static void s5k5baf_gpio_assert(struct s5k5baf *state, int id)
+{
+ struct s5k5baf_gpio *gpio = &state->gpios[id];
+
+ gpio_set_value(gpio->gpio, gpio->level);
+}
+
+static void s5k5baf_gpio_deassert(struct s5k5baf *state, int id)
+{
+ struct s5k5baf_gpio *gpio = &state->gpios[id];
+
+ gpio_set_value(gpio->gpio, !gpio->level);
+}
+
+static int s5k5baf_power_on(struct s5k5baf *state)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(S5K5BAF_NUM_SUPPLIES, state->supplies);
+ if (ret < 0)
+ goto err;
+
+ ret = clk_set_rate(state->clock, state->mclk_frequency);
+ if (ret < 0)
+ goto err_reg_dis;
+
+ ret = clk_prepare_enable(state->clock);
+ if (ret < 0)
+ goto err_reg_dis;
+
+ v4l2_dbg(1, debug, &state->sd, "clock frequency: %ld\n",
+ clk_get_rate(state->clock));
+
+ s5k5baf_gpio_deassert(state, STBY);
+ usleep_range(50, 100);
+ s5k5baf_gpio_deassert(state, RST);
+ return 0;
+
+err_reg_dis:
+ regulator_bulk_disable(S5K5BAF_NUM_SUPPLIES, state->supplies);
+err:
+ v4l2_err(&state->sd, "%s() failed (%d)\n", __func__, ret);
+ return ret;
+}
+
+static int s5k5baf_power_off(struct s5k5baf *state)
+{
+ int ret;
+
+ state->streaming = 0;
+ state->apply_cfg = 0;
+ state->apply_crop = 0;
+
+ s5k5baf_gpio_assert(state, RST);
+ s5k5baf_gpio_assert(state, STBY);
+
+ if (!IS_ERR(state->clock))
+ clk_disable_unprepare(state->clock);
+
+ ret = regulator_bulk_disable(S5K5BAF_NUM_SUPPLIES,
+ state->supplies);
+ if (ret < 0)
+ v4l2_err(&state->sd, "failed to disable regulators\n");
+
+ return 0;
+}
+
+static void s5k5baf_hw_init(struct s5k5baf *state)
+{
+ s5k5baf_i2c_write(state, AHB_MSB_ADDR_PTR, PAGE_IF_HW);
+ s5k5baf_i2c_write(state, REG_CLEAR_HOST_INT, 0);
+ s5k5baf_i2c_write(state, REG_SW_LOAD_COMPLETE, 1);
+ s5k5baf_i2c_write(state, REG_CMDRD_PAGE, PAGE_IF_SW);
+ s5k5baf_i2c_write(state, REG_CMDWR_PAGE, PAGE_IF_SW);
+}
+
+/*
+ * V4L2 subdev core and video operations
+ */
+
+static void s5k5baf_initialize_data(struct s5k5baf *state)
+{
+ state->pixfmt = 0;
+ state->req_fiv = 10000 / 15;
+ state->fiv = state->req_fiv;
+ state->valid_auto_alg = 0;
+}
+
+static int s5k5baf_load_setfile(struct s5k5baf *state)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, S5K5BAF_FW_FILENAME, &c->dev);
+ if (ret < 0) {
+ dev_warn(&c->dev, "firmware file (%s) not loaded\n",
+ S5K5BAF_FW_FILENAME);
+ return ret;
+ }
+
+ ret = s5k5baf_fw_parse(&c->dev, &state->fw, fw->size / 2,
+ (u16 *)fw->data);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int s5k5baf_set_power(struct v4l2_subdev *sd, int on)
+{
+ struct s5k5baf *state = to_s5k5baf(sd);
+ int ret = 0;
+
+ mutex_lock(&state->lock);
+
+ if (!on != state->power)
+ goto out;
+
+ if (on) {
+ if (state->fw == NULL)
+ s5k5baf_load_setfile(state);
+
+ s5k5baf_initialize_data(state);
+ ret = s5k5baf_power_on(state);
+ if (ret < 0)
+ goto out;
+
+ s5k5baf_hw_init(state);
+ s5k5baf_hw_patch(state);
+ s5k5baf_i2c_write(state, REG_SET_HOST_INT, 1);
+ s5k5baf_hw_set_clocks(state);
+
+ ret = s5k5baf_hw_set_video_bus(state);
+ if (ret < 0)
+ goto out;
+
+ s5k5baf_hw_set_cis(state);
+ s5k5baf_hw_set_ccm(state);
+
+ ret = s5k5baf_clear_error(state);
+ if (!ret)
+ state->power++;
+ } else {
+ s5k5baf_power_off(state);
+ state->power--;
+ }
+
+out:
+ mutex_unlock(&state->lock);
+
+ if (!ret && on)
+ ret = v4l2_ctrl_handler_setup(&state->ctrls.handler);
+
+ return ret;
+}
+
+static void s5k5baf_hw_set_stream(struct s5k5baf *state, int enable)
+{
+ s5k5baf_write_seq(state, REG_G_ENABLE_PREV, enable, 1);
+}
+
+static int s5k5baf_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct s5k5baf *state = to_s5k5baf(sd);
+ int ret;
+
+ mutex_lock(&state->lock);
+
+ if (state->streaming == !!on) {
+ ret = 0;
+ goto out;
+ }
+
+ if (on) {
+ s5k5baf_hw_set_config(state);
+ ret = s5k5baf_hw_set_crop_rects(state);
+ if (ret < 0)
+ goto out;
+ s5k5baf_hw_set_stream(state, 1);
+ s5k5baf_i2c_write(state, 0xb0cc, 0x000b);
+ } else {
+ s5k5baf_hw_set_stream(state, 0);
+ }
+ ret = s5k5baf_clear_error(state);
+ if (!ret)
+ state->streaming = !state->streaming;
+
+out:
+ mutex_unlock(&state->lock);
+
+ return ret;
+}
+
+static int s5k5baf_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct s5k5baf *state = to_s5k5baf(sd);
+
+ mutex_lock(&state->lock);
+ fi->interval.numerator = state->fiv;
+ fi->interval.denominator = 10000;
+ mutex_unlock(&state->lock);
+
+ return 0;
+}
+
+static void s5k5baf_set_frame_interval(struct s5k5baf *state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct v4l2_fract *i = &fi->interval;
+
+ if (fi->interval.denominator == 0)
+ state->req_fiv = S5K5BAF_MAX_FR_TIME;
+ else
+ state->req_fiv = clamp_t(u32,
+ i->numerator * 10000 / i->denominator,
+ S5K5BAF_MIN_FR_TIME,
+ S5K5BAF_MAX_FR_TIME);
+
+ state->fiv = state->req_fiv;
+ if (state->apply_cfg) {
+ s5k5baf_hw_set_fiv(state, state->req_fiv);
+ s5k5baf_hw_validate_cfg(state);
+ }
+ *i = (struct v4l2_fract){ state->fiv, 10000 };
+ if (state->fiv == state->req_fiv)
+ v4l2_info(&state->sd, "frame interval changed to %d00us\n",
+ state->fiv);
+}
+
+static int s5k5baf_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct s5k5baf *state = to_s5k5baf(sd);
+
+ mutex_lock(&state->lock);
+ s5k5baf_set_frame_interval(state, fi);
+ mutex_unlock(&state->lock);
+ return 0;
+}
+
+/*
+ * V4L2 subdev pad level and video operations
+ */
+static int s5k5baf_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ if (fie->index > S5K5BAF_MAX_FR_TIME - S5K5BAF_MIN_FR_TIME ||
+ fie->pad != PAD_CIS)
+ return -EINVAL;
+
+ v4l_bound_align_image(&fie->width, S5K5BAF_WIN_WIDTH_MIN,
+ S5K5BAF_CIS_WIDTH, 1,
+ &fie->height, S5K5BAF_WIN_HEIGHT_MIN,
+ S5K5BAF_CIS_HEIGHT, 1, 0);
+
+ fie->interval.numerator = S5K5BAF_MIN_FR_TIME + fie->index;
+ fie->interval.denominator = 10000;
+
+ return 0;
+}
+
+static int s5k5baf_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad == PAD_CIS) {
+ if (code->index > 0)
+ return -EINVAL;
+ code->code = V4L2_MBUS_FMT_FIXED;
+ return 0;
+ }
+
+ if (code->index >= ARRAY_SIZE(s5k5baf_formats))
+ return -EINVAL;
+
+ code->code = s5k5baf_formats[code->index].code;
+ return 0;
+}
+
+static int s5k5baf_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int i;
+
+ if (fse->index > 0)
+ return -EINVAL;
+
+ if (fse->pad == PAD_CIS) {
+ fse->code = V4L2_MBUS_FMT_FIXED;
+ fse->min_width = S5K5BAF_CIS_WIDTH;
+ fse->max_width = S5K5BAF_CIS_WIDTH;
+ fse->min_height = S5K5BAF_CIS_HEIGHT;
+ fse->max_height = S5K5BAF_CIS_HEIGHT;
+ return 0;
+ }
+
+ i = ARRAY_SIZE(s5k5baf_formats);
+ while (--i)
+ if (fse->code == s5k5baf_formats[i].code)
+ break;
+ fse->code = s5k5baf_formats[i].code;
+ fse->min_width = S5K5BAF_WIN_WIDTH_MIN;
+ fse->max_width = S5K5BAF_CIS_WIDTH;
+ fse->max_height = S5K5BAF_WIN_HEIGHT_MIN;
+ fse->min_height = S5K5BAF_CIS_HEIGHT;
+
+ return 0;
+}
+
+static void s5k5baf_try_cis_format(struct v4l2_mbus_framefmt *mf)
+{
+ mf->width = S5K5BAF_CIS_WIDTH;
+ mf->height = S5K5BAF_CIS_HEIGHT;
+ mf->code = V4L2_MBUS_FMT_FIXED;
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mf->field = V4L2_FIELD_NONE;
+}
+
+static int s5k5baf_try_isp_format(struct v4l2_mbus_framefmt *mf)
+{
+ int pixfmt;
+
+ v4l_bound_align_image(&mf->width, S5K5BAF_WIN_WIDTH_MIN,
+ S5K5BAF_CIS_WIDTH, 1,
+ &mf->height, S5K5BAF_WIN_HEIGHT_MIN,
+ S5K5BAF_CIS_HEIGHT, 1, 0);
+
+ pixfmt = s5k5baf_find_pixfmt(mf);
+
+ mf->colorspace = s5k5baf_formats[pixfmt].colorspace;
+ mf->code = s5k5baf_formats[pixfmt].code;
+ mf->field = V4L2_FIELD_NONE;
+
+ return pixfmt;
+}
+
+static int s5k5baf_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct s5k5baf *state = to_s5k5baf(sd);
+ const struct s5k5baf_pixfmt *pixfmt;
+ struct v4l2_mbus_framefmt *mf;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ mf = &fmt->format;
+ if (fmt->pad == PAD_CIS) {
+ s5k5baf_try_cis_format(mf);
+ return 0;
+ }
+ mf->field = V4L2_FIELD_NONE;
+ mutex_lock(&state->lock);
+ pixfmt = &s5k5baf_formats[state->pixfmt];
+ mf->width = state->crop_source.width;
+ mf->height = state->crop_source.height;
+ mf->code = pixfmt->code;
+ mf->colorspace = pixfmt->colorspace;
+ mutex_unlock(&state->lock);
+
+ return 0;
+}
+
+static int s5k5baf_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct s5k5baf *state = to_s5k5baf(sd);
+ const struct s5k5baf_pixfmt *pixfmt;
+ int ret = 0;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(fh, fmt->pad) = *mf;
+ return 0;
+ }
+
+ if (fmt->pad == PAD_CIS) {
+ s5k5baf_try_cis_format(mf);
+ return 0;
+ }
+
+ mutex_lock(&state->lock);
+
+ if (state->streaming) {
+ mutex_unlock(&state->lock);
+ return -EBUSY;
+ }
+
+ state->pixfmt = s5k5baf_try_isp_format(mf);
+ pixfmt = &s5k5baf_formats[state->pixfmt];
+ mf->code = pixfmt->code;
+ mf->colorspace = pixfmt->colorspace;
+ mf->width = state->crop_source.width;
+ mf->height = state->crop_source.height;
+
+ mutex_unlock(&state->lock);
+ return ret;
+}
+
+enum selection_rect { R_CIS, R_CROP_SINK, R_COMPOSE, R_CROP_SOURCE, R_INVALID };
+
+static enum selection_rect s5k5baf_get_sel_rect(u32 pad, u32 target)
+{
+ switch (target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ return pad ? R_COMPOSE : R_CIS;
+ case V4L2_SEL_TGT_CROP:
+ return pad ? R_CROP_SOURCE : R_CROP_SINK;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ return pad ? R_INVALID : R_CROP_SINK;
+ case V4L2_SEL_TGT_COMPOSE:
+ return pad ? R_INVALID : R_COMPOSE;
+ default:
+ return R_INVALID;
+ }
+}
+
+static int s5k5baf_is_bound_target(u32 target)
+{
+ return target == V4L2_SEL_TGT_CROP_BOUNDS ||
+ target == V4L2_SEL_TGT_COMPOSE_BOUNDS;
+}
+
+static int s5k5baf_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ static enum selection_rect rtype;
+ struct s5k5baf *state = to_s5k5baf(sd);
+
+ rtype = s5k5baf_get_sel_rect(sel->pad, sel->target);
+
+ switch (rtype) {
+ case R_INVALID:
+ return -EINVAL;
+ case R_CIS:
+ sel->r = s5k5baf_cis_rect;
+ return 0;
+ default:
+ break;
+ }
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ if (rtype == R_COMPOSE)
+ sel->r = *v4l2_subdev_get_try_compose(fh, sel->pad);
+ else
+ sel->r = *v4l2_subdev_get_try_crop(fh, sel->pad);
+ return 0;
+ }
+
+ mutex_lock(&state->lock);
+ switch (rtype) {
+ case R_CROP_SINK:
+ sel->r = state->crop_sink;
+ break;
+ case R_COMPOSE:
+ sel->r = state->compose;
+ break;
+ case R_CROP_SOURCE:
+ sel->r = state->crop_source;
+ break;
+ default:
+ break;
+ }
+ if (s5k5baf_is_bound_target(sel->target)) {
+ sel->r.left = 0;
+ sel->r.top = 0;
+ }
+ mutex_unlock(&state->lock);
+
+ return 0;
+}
+
+/* bounds range [start, start+len) to [0, max) and aligns to 2 */
+static void s5k5baf_bound_range(u32 *start, u32 *len, u32 max)
+{
+ if (*len > max)
+ *len = max;
+ if (*start + *len > max)
+ *start = max - *len;
+ *start &= ~1;
+ *len &= ~1;
+ if (*len < S5K5BAF_WIN_WIDTH_MIN)
+ *len = S5K5BAF_WIN_WIDTH_MIN;
+}
+
+static void s5k5baf_bound_rect(struct v4l2_rect *r, u32 width, u32 height)
+{
+ s5k5baf_bound_range(&r->left, &r->width, width);
+ s5k5baf_bound_range(&r->top, &r->height, height);
+}
+
+static void s5k5baf_set_rect_and_adjust(struct v4l2_rect **rects,
+ enum selection_rect first,
+ struct v4l2_rect *v)
+{
+ struct v4l2_rect *r, *br;
+ enum selection_rect i = first;
+
+ *rects[first] = *v;
+ do {
+ r = rects[i];
+ br = rects[i - 1];
+ s5k5baf_bound_rect(r, br->width, br->height);
+ } while (++i != R_INVALID);
+ *v = *rects[first];
+}
+
+static bool s5k5baf_cmp_rect(const struct v4l2_rect *r1,
+ const struct v4l2_rect *r2)
+{
+ return !memcmp(r1, r2, sizeof(*r1));
+}
+
+static int s5k5baf_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ static enum selection_rect rtype;
+ struct s5k5baf *state = to_s5k5baf(sd);
+ struct v4l2_rect **rects;
+ int ret = 0;
+
+ rtype = s5k5baf_get_sel_rect(sel->pad, sel->target);
+ if (rtype == R_INVALID || s5k5baf_is_bound_target(sel->target))
+ return -EINVAL;
+
+ /* allow only scaling on compose */
+ if (rtype == R_COMPOSE) {
+ sel->r.left = 0;
+ sel->r.top = 0;
+ }
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ rects = (struct v4l2_rect * []) {
+ &s5k5baf_cis_rect,
+ v4l2_subdev_get_try_crop(fh, PAD_CIS),
+ v4l2_subdev_get_try_compose(fh, PAD_CIS),
+ v4l2_subdev_get_try_crop(fh, PAD_OUT)
+ };
+ s5k5baf_set_rect_and_adjust(rects, rtype, &sel->r);
+ return 0;
+ }
+
+ rects = (struct v4l2_rect * []) {
+ &s5k5baf_cis_rect,
+ &state->crop_sink,
+ &state->compose,
+ &state->crop_source
+ };
+ mutex_lock(&state->lock);
+ if (state->streaming) {
+ /* adjust sel->r to avoid output resolution change */
+ if (rtype < R_CROP_SOURCE) {
+ if (sel->r.width < state->crop_source.width)
+ sel->r.width = state->crop_source.width;
+ if (sel->r.height < state->crop_source.height)
+ sel->r.height = state->crop_source.height;
+ } else {
+ sel->r.width = state->crop_source.width;
+ sel->r.height = state->crop_source.height;
+ }
+ }
+ s5k5baf_set_rect_and_adjust(rects, rtype, &sel->r);
+ if (!s5k5baf_cmp_rect(&state->crop_sink, &s5k5baf_cis_rect) ||
+ !s5k5baf_cmp_rect(&state->compose, &s5k5baf_cis_rect))
+ state->apply_crop = 1;
+ if (state->streaming)
+ ret = s5k5baf_hw_set_crop_rects(state);
+ mutex_unlock(&state->lock);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_pad_ops s5k5baf_cis_pad_ops = {
+ .enum_mbus_code = s5k5baf_enum_mbus_code,
+ .enum_frame_size = s5k5baf_enum_frame_size,
+ .get_fmt = s5k5baf_get_fmt,
+ .set_fmt = s5k5baf_set_fmt,
+};
+
+static const struct v4l2_subdev_pad_ops s5k5baf_pad_ops = {
+ .enum_mbus_code = s5k5baf_enum_mbus_code,
+ .enum_frame_size = s5k5baf_enum_frame_size,
+ .enum_frame_interval = s5k5baf_enum_frame_interval,
+ .get_fmt = s5k5baf_get_fmt,
+ .set_fmt = s5k5baf_set_fmt,
+ .get_selection = s5k5baf_get_selection,
+ .set_selection = s5k5baf_set_selection,
+};
+
+static const struct v4l2_subdev_video_ops s5k5baf_video_ops = {
+ .g_frame_interval = s5k5baf_g_frame_interval,
+ .s_frame_interval = s5k5baf_s_frame_interval,
+ .s_stream = s5k5baf_s_stream,
+};
+
+/*
+ * V4L2 subdev controls
+ */
+
+static int s5k5baf_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct s5k5baf *state = to_s5k5baf(sd);
+ int ret;
+
+ v4l2_dbg(1, debug, sd, "ctrl: %s, value: %d\n", ctrl->name, ctrl->val);
+
+ mutex_lock(&state->lock);
+
+ if (state->power == 0)
+ goto unlock;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ s5k5baf_hw_set_awb(state, ctrl->val);
+ break;
+
+ case V4L2_CID_BRIGHTNESS:
+ s5k5baf_write(state, REG_USER_BRIGHTNESS, ctrl->val);
+ break;
+
+ case V4L2_CID_COLORFX:
+ s5k5baf_hw_set_colorfx(state, ctrl->val);
+ break;
+
+ case V4L2_CID_CONTRAST:
+ s5k5baf_write(state, REG_USER_CONTRAST, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE_AUTO:
+ s5k5baf_hw_set_auto_exposure(state, ctrl->val);
+ break;
+
+ case V4L2_CID_HFLIP:
+ s5k5baf_hw_set_mirror(state);
+ break;
+
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ s5k5baf_hw_set_anti_flicker(state, ctrl->val);
+ break;
+
+ case V4L2_CID_SATURATION:
+ s5k5baf_write(state, REG_USER_SATURATION, ctrl->val);
+ break;
+
+ case V4L2_CID_SHARPNESS:
+ s5k5baf_write(state, REG_USER_SHARPBLUR, ctrl->val);
+ break;
+
+ case V4L2_CID_WHITE_BALANCE_TEMPERATURE:
+ s5k5baf_write(state, REG_P_COLORTEMP(0), ctrl->val);
+ if (state->apply_cfg)
+ s5k5baf_hw_sync_cfg(state);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ s5k5baf_hw_set_test_pattern(state, ctrl->val);
+ break;
+ }
+unlock:
+ ret = s5k5baf_clear_error(state);
+ mutex_unlock(&state->lock);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops s5k5baf_ctrl_ops = {
+ .s_ctrl = s5k5baf_s_ctrl,
+};
+
+static const char * const s5k5baf_test_pattern_menu[] = {
+ "Disabled",
+ "Blank",
+ "Bars",
+ "Gradients",
+ "Textile",
+ "Textile2",
+ "Squares"
+};
+
+static int s5k5baf_initialize_ctrls(struct s5k5baf *state)
+{
+ const struct v4l2_ctrl_ops *ops = &s5k5baf_ctrl_ops;
+ struct s5k5baf_ctrls *ctrls = &state->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(hdl, 16);
+ if (ret < 0) {
+ v4l2_err(&state->sd, "cannot init ctrl handler (%d)\n", ret);
+ return ret;
+ }
+
+ /* Auto white balance cluster */
+ ctrls->awb = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTO_WHITE_BALANCE,
+ 0, 1, 1, 1);
+ ctrls->gain_red = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_RED_BALANCE,
+ 0, 255, 1, S5K5BAF_GAIN_RED_DEF);
+ ctrls->gain_blue = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BLUE_BALANCE,
+ 0, 255, 1, S5K5BAF_GAIN_BLUE_DEF);
+ v4l2_ctrl_auto_cluster(3, &ctrls->awb, 0, false);
+
+ ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_cluster(2, &ctrls->hflip);
+
+ ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ V4L2_EXPOSURE_MANUAL, 0, V4L2_EXPOSURE_AUTO);
+ /* Exposure time: x 1 us */
+ ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
+ 0, 6000000U, 1, 100000U);
+ /* Total gain: 256 <=> 1x */
+ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN,
+ 0, 256, 1, 256);
+ v4l2_ctrl_auto_cluster(3, &ctrls->auto_exp, 0, false);
+
+ v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO, 0,
+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO);
+
+ v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_COLORFX,
+ V4L2_COLORFX_SKY_BLUE, ~0x6f, V4L2_COLORFX_NONE);
+
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_WHITE_BALANCE_TEMPERATURE,
+ 0, 256, 1, 0);
+
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION, -127, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BRIGHTNESS, -127, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST, -127, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SHARPNESS, -127, 127, 1, 0);
+
+ v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(s5k5baf_test_pattern_menu) - 1,
+ 0, 0, s5k5baf_test_pattern_menu);
+
+ if (hdl->error) {
+ v4l2_err(&state->sd, "error creating controls (%d)\n",
+ hdl->error);
+ ret = hdl->error;
+ v4l2_ctrl_handler_free(hdl);
+ return ret;
+ }
+
+ state->sd.ctrl_handler = hdl;
+ return 0;
+}
+
+/*
+ * V4L2 subdev internal operations
+ */
+static int s5k5baf_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = v4l2_subdev_get_try_format(fh, PAD_CIS);
+ s5k5baf_try_cis_format(mf);
+
+ if (s5k5baf_is_cis_subdev(sd))
+ return 0;
+
+ mf = v4l2_subdev_get_try_format(fh, PAD_OUT);
+ mf->colorspace = s5k5baf_formats[0].colorspace;
+ mf->code = s5k5baf_formats[0].code;
+ mf->width = s5k5baf_cis_rect.width;
+ mf->height = s5k5baf_cis_rect.height;
+ mf->field = V4L2_FIELD_NONE;
+
+ *v4l2_subdev_get_try_crop(fh, PAD_CIS) = s5k5baf_cis_rect;
+ *v4l2_subdev_get_try_compose(fh, PAD_CIS) = s5k5baf_cis_rect;
+ *v4l2_subdev_get_try_crop(fh, PAD_OUT) = s5k5baf_cis_rect;
+
+ return 0;
+}
+
+static int s5k5baf_check_fw_revision(struct s5k5baf *state)
+{
+ u16 api_ver = 0, fw_rev = 0, s_id = 0;
+ int ret;
+
+ api_ver = s5k5baf_read(state, REG_FW_APIVER);
+ fw_rev = s5k5baf_read(state, REG_FW_REVISION) & 0xff;
+ s_id = s5k5baf_read(state, REG_FW_SENSOR_ID);
+ ret = s5k5baf_clear_error(state);
+ if (ret < 0)
+ return ret;
+
+ v4l2_info(&state->sd, "FW API=%#x, revision=%#x sensor_id=%#x\n",
+ api_ver, fw_rev, s_id);
+
+ if (api_ver != S5K5BAF_FW_APIVER) {
+ v4l2_err(&state->sd, "FW API version not supported\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int s5k5baf_registered(struct v4l2_subdev *sd)
+{
+ struct s5k5baf *state = to_s5k5baf(sd);
+ int ret;
+
+ ret = v4l2_device_register_subdev(sd->v4l2_dev, &state->cis_sd);
+ if (ret < 0)
+ v4l2_err(sd, "failed to register subdev %s\n",
+ state->cis_sd.name);
+ else
+ ret = media_entity_create_link(&state->cis_sd.entity, PAD_CIS,
+ &state->sd.entity, PAD_CIS,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ return ret;
+}
+
+static void s5k5baf_unregistered(struct v4l2_subdev *sd)
+{
+ struct s5k5baf *state = to_s5k5baf(sd);
+ v4l2_device_unregister_subdev(&state->cis_sd);
+}
+
+static const struct v4l2_subdev_ops s5k5baf_cis_subdev_ops = {
+ .pad = &s5k5baf_cis_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops s5k5baf_cis_subdev_internal_ops = {
+ .open = s5k5baf_open,
+};
+
+static const struct v4l2_subdev_internal_ops s5k5baf_subdev_internal_ops = {
+ .registered = s5k5baf_registered,
+ .unregistered = s5k5baf_unregistered,
+ .open = s5k5baf_open,
+};
+
+static const struct v4l2_subdev_core_ops s5k5baf_core_ops = {
+ .s_power = s5k5baf_set_power,
+ .log_status = v4l2_ctrl_subdev_log_status,
+};
+
+static const struct v4l2_subdev_ops s5k5baf_subdev_ops = {
+ .core = &s5k5baf_core_ops,
+ .pad = &s5k5baf_pad_ops,
+ .video = &s5k5baf_video_ops,
+};
+
+static int s5k5baf_configure_gpios(struct s5k5baf *state)
+{
+ static const char const *name[] = { "S5K5BAF_STBY", "S5K5BAF_RST" };
+ struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
+ struct s5k5baf_gpio *g = state->gpios;
+ int ret, i;
+
+ for (i = 0; i < NUM_GPIOS; ++i) {
+ int flags = GPIOF_DIR_OUT;
+ if (g[i].level)
+ flags |= GPIOF_INIT_HIGH;
+ ret = devm_gpio_request_one(&c->dev, g[i].gpio, flags, name[i]);
+ if (ret < 0) {
+ v4l2_err(c, "failed to request gpio %s\n", name[i]);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int s5k5baf_parse_gpios(struct s5k5baf_gpio *gpios, struct device *dev)
+{
+ static const char * const names[] = {
+ "stbyn-gpios",
+ "rstn-gpios",
+ };
+ struct device_node *node = dev->of_node;
+ enum of_gpio_flags flags;
+ int ret, i;
+
+ for (i = 0; i < NUM_GPIOS; ++i) {
+ ret = of_get_named_gpio_flags(node, names[i], 0, &flags);
+ if (ret < 0) {
+ dev_err(dev, "no %s GPIO pin provided\n", names[i]);
+ return ret;
+ }
+ gpios[i].gpio = ret;
+ gpios[i].level = !(flags & OF_GPIO_ACTIVE_LOW);
+ }
+
+ return 0;
+}
+
+static int s5k5baf_parse_device_node(struct s5k5baf *state, struct device *dev)
+{
+ struct device_node *node = dev->of_node;
+ struct device_node *node_ep;
+ struct v4l2_of_endpoint ep;
+ int ret;
+
+ if (!node) {
+ dev_err(dev, "no device-tree node provided\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "clock-frequency",
+ &state->mclk_frequency);
+ if (ret < 0) {
+ state->mclk_frequency = S5K5BAF_DEFAULT_MCLK_FREQ;
+ dev_info(dev, "using default %u Hz clock frequency\n",
+ state->mclk_frequency);
+ }
+
+ ret = s5k5baf_parse_gpios(state->gpios, dev);
+ if (ret < 0)
+ return ret;
+
+ node_ep = v4l2_of_get_next_endpoint(node, NULL);
+ if (!node_ep) {
+ dev_err(dev, "no endpoint defined at node %s\n",
+ node->full_name);
+ return -EINVAL;
+ }
+
+ v4l2_of_parse_endpoint(node_ep, &ep);
+ of_node_put(node_ep);
+ state->bus_type = ep.bus_type;
+
+ switch (state->bus_type) {
+ case V4L2_MBUS_CSI2:
+ state->nlanes = ep.bus.mipi_csi2.num_data_lanes;
+ break;
+ case V4L2_MBUS_PARALLEL:
+ break;
+ default:
+ dev_err(dev, "unsupported bus in endpoint defined at node %s\n",
+ node->full_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int s5k5baf_configure_subdevs(struct s5k5baf *state,
+ struct i2c_client *c)
+{
+ struct v4l2_subdev *sd;
+ int ret;
+
+ sd = &state->cis_sd;
+ v4l2_subdev_init(sd, &s5k5baf_cis_subdev_ops);
+ sd->owner = THIS_MODULE;
+ v4l2_set_subdevdata(sd, state);
+ snprintf(sd->name, sizeof(sd->name), "S5K5BAF-CIS %d-%04x",
+ i2c_adapter_id(c->adapter), c->addr);
+
+ sd->internal_ops = &s5k5baf_cis_subdev_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ state->cis_pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+ ret = media_entity_init(&sd->entity, NUM_CIS_PADS, &state->cis_pad, 0);
+ if (ret < 0)
+ goto err;
+
+ sd = &state->sd;
+ v4l2_i2c_subdev_init(sd, c, &s5k5baf_subdev_ops);
+ snprintf(sd->name, sizeof(sd->name), "S5K5BAF-ISP %d-%04x",
+ i2c_adapter_id(c->adapter), c->addr);
+
+ sd->internal_ops = &s5k5baf_subdev_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ state->pads[PAD_CIS].flags = MEDIA_PAD_FL_SINK;
+ state->pads[PAD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ ret = media_entity_init(&sd->entity, NUM_ISP_PADS, state->pads, 0);
+
+ if (!ret)
+ return 0;
+
+ media_entity_cleanup(&state->cis_sd.entity);
+err:
+ dev_err(&c->dev, "cannot init media entity %s\n", sd->name);
+ return ret;
+}
+
+static int s5k5baf_configure_regulators(struct s5k5baf *state)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
+ int ret;
+ int i;
+
+ for (i = 0; i < S5K5BAF_NUM_SUPPLIES; i++)
+ state->supplies[i].supply = s5k5baf_supply_names[i];
+
+ ret = devm_regulator_bulk_get(&c->dev, S5K5BAF_NUM_SUPPLIES,
+ state->supplies);
+ if (ret < 0)
+ v4l2_err(c, "failed to get regulators\n");
+ return ret;
+}
+
+static int s5k5baf_probe(struct i2c_client *c,
+ const struct i2c_device_id *id)
+{
+ struct s5k5baf *state;
+ int ret;
+
+ state = devm_kzalloc(&c->dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ mutex_init(&state->lock);
+ state->crop_sink = s5k5baf_cis_rect;
+ state->compose = s5k5baf_cis_rect;
+ state->crop_source = s5k5baf_cis_rect;
+
+ ret = s5k5baf_parse_device_node(state, &c->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = s5k5baf_configure_subdevs(state, c);
+ if (ret < 0)
+ return ret;
+
+ ret = s5k5baf_configure_gpios(state);
+ if (ret < 0)
+ goto err_me;
+
+ ret = s5k5baf_configure_regulators(state);
+ if (ret < 0)
+ goto err_me;
+
+ state->clock = devm_clk_get(state->sd.dev, S5K5BAF_CLK_NAME);
+ if (IS_ERR(state->clock)) {
+ ret = -EPROBE_DEFER;
+ goto err_me;
+ }
+
+ ret = s5k5baf_power_on(state);
+ if (ret < 0) {
+ ret = -EPROBE_DEFER;
+ goto err_me;
+ }
+ s5k5baf_hw_init(state);
+ ret = s5k5baf_check_fw_revision(state);
+
+ s5k5baf_power_off(state);
+ if (ret < 0)
+ goto err_me;
+
+ ret = s5k5baf_initialize_ctrls(state);
+ if (ret < 0)
+ goto err_me;
+
+ ret = v4l2_async_register_subdev(&state->sd);
+ if (ret < 0)
+ goto err_ctrl;
+
+ return 0;
+
+err_ctrl:
+ v4l2_ctrl_handler_free(state->sd.ctrl_handler);
+err_me:
+ media_entity_cleanup(&state->sd.entity);
+ media_entity_cleanup(&state->cis_sd.entity);
+ return ret;
+}
+
+static int s5k5baf_remove(struct i2c_client *c)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(c);
+ struct s5k5baf *state = to_s5k5baf(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ media_entity_cleanup(&sd->entity);
+
+ sd = &state->cis_sd;
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+
+ return 0;
+}
+
+static const struct i2c_device_id s5k5baf_id[] = {
+ { S5K5BAF_DRIVER_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, s5k5baf_id);
+
+static const struct of_device_id s5k5baf_of_match[] = {
+ { .compatible = "samsung,s5k5baf" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s5k5baf_of_match);
+
+static struct i2c_driver s5k5baf_i2c_driver = {
+ .driver = {
+ .of_match_table = s5k5baf_of_match,
+ .name = S5K5BAF_DRIVER_NAME
+ },
+ .probe = s5k5baf_probe,
+ .remove = s5k5baf_remove,
+ .id_table = s5k5baf_id,
+};
+
+module_i2c_driver(s5k5baf_i2c_driver);
+
+MODULE_DESCRIPTION("Samsung S5K5BAF(X) UXGA camera driver");
+MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/saa6588.c b/drivers/media/i2c/saa6588.c
index 70bc72e795d0..2960b5a8362a 100644
--- a/drivers/media/i2c/saa6588.c
+++ b/drivers/media/i2c/saa6588.c
@@ -150,14 +150,14 @@ static inline struct saa6588 *to_saa6588(struct v4l2_subdev *sd)
/* ---------------------------------------------------------------------- */
-static int block_to_user_buf(struct saa6588 *s, unsigned char __user *user_buf)
+static bool block_from_buf(struct saa6588 *s, unsigned char *buf)
{
int i;
if (s->rd_index == s->wr_index) {
if (debug > 2)
dprintk(PREFIX "Read: buffer empty.\n");
- return 0;
+ return false;
}
if (debug > 2) {
@@ -166,8 +166,7 @@ static int block_to_user_buf(struct saa6588 *s, unsigned char __user *user_buf)
dprintk("0x%02x ", s->buffer[i]);
}
- if (copy_to_user(user_buf, &s->buffer[s->rd_index], 3))
- return -EFAULT;
+ memcpy(buf, &s->buffer[s->rd_index], 3);
s->rd_index += 3;
if (s->rd_index >= s->buf_size)
@@ -177,22 +176,22 @@ static int block_to_user_buf(struct saa6588 *s, unsigned char __user *user_buf)
if (debug > 2)
dprintk("%d blocks total.\n", s->block_count);
- return 1;
+ return true;
}
static void read_from_buf(struct saa6588 *s, struct saa6588_command *a)
{
- unsigned long flags;
-
unsigned char __user *buf_ptr = a->buffer;
- unsigned int i;
+ unsigned char buf[3];
+ unsigned long flags;
unsigned int rd_blocks;
+ unsigned int i;
a->result = 0;
if (!a->buffer)
return;
- while (!s->data_available_for_read) {
+ while (!a->nonblocking && !s->data_available_for_read) {
int ret = wait_event_interruptible(s->read_queue,
s->data_available_for_read);
if (ret == -ERESTARTSYS) {
@@ -201,24 +200,31 @@ static void read_from_buf(struct saa6588 *s, struct saa6588_command *a)
}
}
- spin_lock_irqsave(&s->lock, flags);
rd_blocks = a->block_count;
+ spin_lock_irqsave(&s->lock, flags);
if (rd_blocks > s->block_count)
rd_blocks = s->block_count;
+ spin_unlock_irqrestore(&s->lock, flags);
- if (!rd_blocks) {
- spin_unlock_irqrestore(&s->lock, flags);
+ if (!rd_blocks)
return;
- }
for (i = 0; i < rd_blocks; i++) {
- if (block_to_user_buf(s, buf_ptr)) {
- buf_ptr += 3;
- a->result++;
- } else
+ bool got_block;
+
+ spin_lock_irqsave(&s->lock, flags);
+ got_block = block_from_buf(s, buf);
+ spin_unlock_irqrestore(&s->lock, flags);
+ if (!got_block)
break;
+ if (copy_to_user(buf_ptr, buf, 3)) {
+ a->result = -EFAULT;
+ return;
+ }
+ buf_ptr += 3;
+ a->result += 3;
}
- a->result *= 3;
+ spin_lock_irqsave(&s->lock, flags);
s->data_available_for_read = (s->block_count > 0);
spin_unlock_irqrestore(&s->lock, flags);
}
@@ -394,14 +400,11 @@ static long saa6588_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
struct saa6588_command *a = arg;
switch (cmd) {
- /* --- open() for /dev/radio --- */
- case SAA6588_CMD_OPEN:
- a->result = 0; /* return error if chip doesn't work ??? */
- break;
/* --- close() for /dev/radio --- */
case SAA6588_CMD_CLOSE:
s->data_available_for_read = 1;
wake_up_interruptible(&s->read_queue);
+ s->data_available_for_read = 0;
a->result = 0;
break;
/* --- read() for /dev/radio --- */
@@ -411,9 +414,8 @@ static long saa6588_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
/* --- poll() for /dev/radio --- */
case SAA6588_CMD_POLL:
a->result = 0;
- if (s->data_available_for_read) {
+ if (s->data_available_for_read)
a->result |= POLLIN | POLLRDNORM;
- }
poll_wait(a->instance, &s->read_queue, a->event_list);
break;
diff --git a/drivers/media/pci/saa7134/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
index 8ac4b1f2322d..8272c0b9c5bf 100644
--- a/drivers/media/pci/saa7134/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -33,11 +33,11 @@
#include <linux/i2c.h>
#include <linux/types.h>
#include <linux/videodev2.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-common.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
#define MPEG_VIDEO_TARGET_BITRATE_MAX 27000
#define MPEG_VIDEO_MAX_BITRATE_MAX 27000
@@ -124,7 +124,7 @@ static inline struct saa6752hs_state *to_state(struct v4l2_subdev *sd)
/* ---------------------------------------------------------------------- */
-static u8 PAT[] = {
+static const u8 PAT[] = {
0xc2, /* i2c register */
0x00, /* table number for encoder */
@@ -150,7 +150,7 @@ static u8 PAT[] = {
0x00, 0x00, 0x00, 0x00 /* CRC32 */
};
-static u8 PMT[] = {
+static const u8 PMT[] = {
0xc2, /* i2c register */
0x01, /* table number for encoder */
@@ -179,7 +179,7 @@ static u8 PMT[] = {
0x00, 0x00, 0x00, 0x00 /* CRC32 */
};
-static u8 PMT_AC3[] = {
+static const u8 PMT_AC3[] = {
0xc2, /* i2c register */
0x01, /* table number for encoder(1) */
0x47, /* sync */
@@ -212,7 +212,7 @@ static u8 PMT_AC3[] = {
0xED, 0xDE, 0x2D, 0xF3 /* CRC32 BE */
};
-static struct saa6752hs_mpeg_params param_defaults =
+static const struct saa6752hs_mpeg_params param_defaults =
{
.ts_pid_pmt = 16,
.ts_pid_video = 260,
@@ -643,13 +643,6 @@ static const struct v4l2_ctrl_ops saa6752hs_ctrl_ops = {
static const struct v4l2_subdev_core_ops saa6752hs_core_ops = {
.init = saa6752hs_init,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
.s_std = saa6752hs_s_std,
};
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index ae66d91bf713..8741cae9c9f2 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -399,7 +399,6 @@ static void smiapp_update_mbus_formats(struct smiapp_sensor *sensor)
BUG_ON(max(internal_csi_format_idx, csi_format_idx) + pixel_order
>= ARRAY_SIZE(smiapp_csi_data_formats));
- BUG_ON(min(internal_csi_format_idx, csi_format_idx) < 0);
dev_dbg(&client->dev, "new pixel order %s\n",
pixel_order_str[pixel_order]);
@@ -2028,8 +2027,8 @@ static int smiapp_set_crop(struct v4l2_subdev *subdev,
sel->r.width = min(sel->r.width, src_size->width);
sel->r.height = min(sel->r.height, src_size->height);
- sel->r.left = min(sel->r.left, src_size->width - sel->r.width);
- sel->r.top = min(sel->r.top, src_size->height - sel->r.height);
+ sel->r.left = min_t(int, sel->r.left, src_size->width - sel->r.width);
+ sel->r.top = min_t(int, sel->r.top, src_size->height - sel->r.height);
*crops[sel->pad] = sel->r;
@@ -2121,8 +2120,8 @@ static int smiapp_set_selection(struct v4l2_subdev *subdev,
sel->r.left = max(0, sel->r.left & ~1);
sel->r.top = max(0, sel->r.top & ~1);
- sel->r.width = max(0, SMIAPP_ALIGN_DIM(sel->r.width, sel->flags));
- sel->r.height = max(0, SMIAPP_ALIGN_DIM(sel->r.height, sel->flags));
+ sel->r.width = SMIAPP_ALIGN_DIM(sel->r.width, sel->flags);
+ sel->r.height = SMIAPP_ALIGN_DIM(sel->r.height, sel->flags);
sel->r.width = max_t(unsigned int,
sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE],
diff --git a/drivers/media/i2c/soc_camera/mt9m111.c b/drivers/media/i2c/soc_camera/mt9m111.c
index 6f4056668bbc..ccf59406a172 100644
--- a/drivers/media/i2c/soc_camera/mt9m111.c
+++ b/drivers/media/i2c/soc_camera/mt9m111.c
@@ -208,8 +208,8 @@ struct mt9m111 {
struct mt9m111_context *ctx;
struct v4l2_rect rect; /* cropping rectangle */
struct v4l2_clk *clk;
- int width; /* output */
- int height; /* sizes */
+ unsigned int width; /* output */
+ unsigned int height; /* sizes */
struct mutex power_lock; /* lock to protect power_count */
int power_count;
const struct mt9m111_datafmt *fmt;
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 89c0b13463b7..542d2528b3f9 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -58,21 +58,17 @@ static int tvp5150_read(struct v4l2_subdev *sd, unsigned char addr)
struct i2c_client *c = v4l2_get_subdevdata(sd);
unsigned char buffer[1];
int rc;
-
- buffer[0] = addr;
-
- rc = i2c_master_send(c, buffer, 1);
- if (rc < 0) {
- v4l2_err(sd, "i2c i/o error: rc == %d (should be 1)\n", rc);
- return rc;
- }
-
- msleep(10);
-
- rc = i2c_master_recv(c, buffer, 1);
- if (rc < 0) {
- v4l2_err(sd, "i2c i/o error: rc == %d (should be 1)\n", rc);
- return rc;
+ struct i2c_msg msg[] = {
+ { .addr = c->addr, .flags = 0,
+ .buf = &addr, .len = 1 },
+ { .addr = c->addr, .flags = I2C_M_RD,
+ .buf = buffer, .len = 1 }
+ };
+
+ rc = i2c_transfer(c->adapter, msg, 2);
+ if (rc < 0 || rc != 2) {
+ v4l2_err(sd, "i2c i/o error: rc == %d (should be 2)\n", rc);
+ return rc < 0 ? rc : -EIO;
}
v4l2_dbg(2, debug, sd, "tvp5150: read 0x%02x = 0x%02x\n", addr, buffer[0]);
@@ -867,7 +863,7 @@ static int tvp5150_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
struct v4l2_rect rect = a->c;
struct tvp5150 *decoder = to_tvp5150(sd);
v4l2_std_id std;
- int hmax;
+ unsigned int hmax;
v4l2_dbg(1, debug, sd, "%s left=%d, top=%d, width=%d, height=%d\n",
__func__, rect.left, rect.top, rect.width, rect.height);
@@ -877,9 +873,9 @@ static int tvp5150_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
/* tvp5150 has some special limits */
rect.left = clamp(rect.left, 0, TVP5150_MAX_CROP_LEFT);
- rect.width = clamp(rect.width,
- TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left,
- TVP5150_H_MAX - rect.left);
+ rect.width = clamp_t(unsigned int, rect.width,
+ TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left,
+ TVP5150_H_MAX - rect.left);
rect.top = clamp(rect.top, 0, TVP5150_MAX_CROP_TOP);
/* Calculate height based on current standard */
@@ -893,9 +889,9 @@ static int tvp5150_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
else
hmax = TVP5150_V_MAX_OTHERS;
- rect.height = clamp(rect.height,
- hmax - TVP5150_MAX_CROP_TOP - rect.top,
- hmax - rect.top);
+ rect.height = clamp_t(unsigned int, rect.height,
+ hmax - TVP5150_MAX_CROP_TOP - rect.top,
+ hmax - rect.top);
tvp5150_write(sd, TVP5150_VERT_BLANKING_START, rect.top);
tvp5150_write(sd, TVP5150_VERT_BLANKING_STOP,
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index 25bdd9312fea..23f4f65fccd7 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -503,6 +503,7 @@ static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
return &container_of(ctrl->handler, struct vs6624, hdl)->sd;
}
+#ifdef CONFIG_VIDEO_ADV_DEBUG
static int vs6624_read(struct v4l2_subdev *sd, u16 index)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -515,6 +516,7 @@ static int vs6624_read(struct v4l2_subdev *sd, u16 index)
return buf[0];
}
+#endif
static int vs6624_write(struct v4l2_subdev *sd, u16 index,
u8 value)
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index 2c286c307145..37c334edc7e8 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -235,6 +235,8 @@ __must_check int media_entity_pipeline_start(struct media_entity *entity,
media_entity_graph_walk_start(&graph, entity);
while ((entity = media_entity_graph_walk_next(&graph))) {
+ DECLARE_BITMAP(active, entity->num_pads);
+ DECLARE_BITMAP(has_no_links, entity->num_pads);
unsigned int i;
entity->stream_count++;
@@ -248,21 +250,46 @@ __must_check int media_entity_pipeline_start(struct media_entity *entity,
if (!entity->ops || !entity->ops->link_validate)
continue;
+ bitmap_zero(active, entity->num_pads);
+ bitmap_fill(has_no_links, entity->num_pads);
+
for (i = 0; i < entity->num_links; i++) {
struct media_link *link = &entity->links[i];
-
- /* Is this pad part of an enabled link? */
- if (!(link->flags & MEDIA_LNK_FL_ENABLED))
- continue;
-
- /* Are we the sink or not? */
- if (link->sink->entity != entity)
+ struct media_pad *pad = link->sink->entity == entity
+ ? link->sink : link->source;
+
+ /* Mark that a pad is connected by a link. */
+ bitmap_clear(has_no_links, pad->index, 1);
+
+ /*
+ * Pads that either do not need to connect or
+ * are connected through an enabled link are
+ * fine.
+ */
+ if (!(pad->flags & MEDIA_PAD_FL_MUST_CONNECT) ||
+ link->flags & MEDIA_LNK_FL_ENABLED)
+ bitmap_set(active, pad->index, 1);
+
+ /*
+ * Link validation will only take place for
+ * sink ends of the link that are enabled.
+ */
+ if (link->sink != pad ||
+ !(link->flags & MEDIA_LNK_FL_ENABLED))
continue;
ret = entity->ops->link_validate(link);
if (ret < 0 && ret != -ENOIOCTLCMD)
goto error;
}
+
+ /* Either no links or validated links are fine. */
+ bitmap_or(active, active, has_no_links, entity->num_pads);
+
+ if (!bitmap_full(active, entity->num_pads)) {
+ ret = -EPIPE;
+ goto error;
+ }
}
mutex_unlock(&mdev->graph_mutex);
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index d85cb0ace4dc..6662b495b22c 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -2426,7 +2426,7 @@ struct tvcard bttv_tvcards[] = {
},
/* ---- card 0x87---------------------------------- */
[BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE] = {
- /* Michael Krufky <mkrufky@m1k.net> */
+ /* Michael Krufky <mkrufky@linuxtv.org> */
.name = "DViCO FusionHDTV 5 Lite",
.tuner_type = TUNER_LG_TDVS_H06XF, /* TDVS-H064F */
.tuner_addr = ADDR_UNSET,
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 92a06fd85865..afcd53bfcf8e 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -1126,9 +1126,9 @@ bttv_crop_calc_limits(struct bttv_crop *c)
c->min_scaled_height = 32;
} else {
c->min_scaled_width =
- (max(48, c->rect.width >> 4) + 3) & ~3;
+ (max_t(unsigned int, 48, c->rect.width >> 4) + 3) & ~3;
c->min_scaled_height =
- max(32, c->rect.height >> 4);
+ max_t(unsigned int, 32, c->rect.height >> 4);
}
c->max_scaled_width = c->rect.width & ~3;
@@ -2024,7 +2024,7 @@ limit_scaled_size_lock (struct bttv_fh * fh,
/* We cannot scale up. When the scaled image is larger
than crop.rect we adjust the crop.rect as required
by the V4L2 spec, hence cropcap.bounds are our limit. */
- max_width = min(b->width, (__s32) MAX_HACTIVE);
+ max_width = min_t(unsigned int, b->width, MAX_HACTIVE);
max_height = b->height;
/* We cannot capture the same line as video and VBI data.
@@ -3266,7 +3266,9 @@ static ssize_t radio_read(struct file *file, char __user *data,
struct bttv_fh *fh = file->private_data;
struct bttv *btv = fh->btv;
struct saa6588_command cmd;
- cmd.block_count = count/3;
+
+ cmd.block_count = count / 3;
+ cmd.nonblocking = file->f_flags & O_NONBLOCK;
cmd.buffer = data;
cmd.instance = file;
cmd.result = -ENODEV;
diff --git a/drivers/media/pci/bt8xx/bttv-gpio.c b/drivers/media/pci/bt8xx/bttv-gpio.c
index 922e8233fd0b..3f364b7062b9 100644
--- a/drivers/media/pci/bt8xx/bttv-gpio.c
+++ b/drivers/media/pci/bt8xx/bttv-gpio.c
@@ -98,7 +98,7 @@ int bttv_sub_add_device(struct bttv_core *core, char *name)
err = device_register(&sub->dev);
if (0 != err) {
- kfree(sub);
+ put_device(&sub->dev);
return err;
}
pr_info("%d: add subdevice \"%s\"\n", core->nr, dev_name(&sub->dev));
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index c1f8cc6f14b2..716bdc57fac6 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -327,13 +327,16 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
struct i2c_client *c;
u8 eedata[256];
+ memset(tv, 0, sizeof(*tv));
+
c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return;
strlcpy(c->name, "cx18 tveeprom tmp", sizeof(c->name));
c->adapter = &cx->i2c_adap[0];
c->addr = 0xa0 >> 1;
- memset(tv, 0, sizeof(*tv));
if (tveeprom_read(c, eedata, sizeof(eedata)))
goto ret;
diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c
index 6e91e84d6bf9..b1e08c3e55cd 100644
--- a/drivers/media/pci/cx25821/cx25821-alsa.c
+++ b/drivers/media/pci/cx25821/cx25821-alsa.c
@@ -618,7 +618,7 @@ static int snd_cx25821_pcm(struct cx25821_audio_dev *chip, int device,
* Only boards with eeprom and byte 1 at eeprom=1 have it
*/
-static DEFINE_PCI_DEVICE_TABLE(cx25821_audio_pci_tbl) = {
+static const struct pci_device_id cx25821_audio_pci_tbl[] = {
{0x14f1, 0x0920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index b762c5b2ca10..e81173c41e5a 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -1361,7 +1361,7 @@ static void cx25821_finidev(struct pci_dev *pci_dev)
kfree(dev);
}
-static DEFINE_PCI_DEVICE_TABLE(cx25821_pci_tbl) = {
+static const struct pci_device_id cx25821_pci_tbl[] = {
{
/* CX25821 Athena */
.vendor = 0x14f1,
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index 400eb1c42d3f..d014206e7176 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -931,9 +931,9 @@ error:
*/
static void cx88_audio_finidev(struct pci_dev *pci)
{
- struct cx88_audio_dev *card = pci_get_drvdata(pci);
+ struct snd_card *card = pci_get_drvdata(pci);
- snd_card_free((void *)card);
+ snd_card_free(card);
devno--;
}
diff --git a/drivers/media/pci/saa7134/Kconfig b/drivers/media/pci/saa7134/Kconfig
index 15b90d6e9130..7883393571e5 100644
--- a/drivers/media/pci/saa7134/Kconfig
+++ b/drivers/media/pci/saa7134/Kconfig
@@ -6,6 +6,7 @@ config VIDEO_SAA7134
select VIDEO_TVEEPROM
select CRC32
select VIDEO_SAA6588 if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_SAA6752HS if MEDIA_SUBDRV_AUTOSELECT
---help---
This is a video4linux driver for Philips SAA713x based
TV cards.
diff --git a/drivers/media/pci/saa7134/Makefile b/drivers/media/pci/saa7134/Makefile
index 35375480ed4d..58de9b085689 100644
--- a/drivers/media/pci/saa7134/Makefile
+++ b/drivers/media/pci/saa7134/Makefile
@@ -4,7 +4,7 @@ saa7134-y += saa7134-ts.o saa7134-tvaudio.o saa7134-vbi.o
saa7134-y += saa7134-video.o
saa7134-$(CONFIG_VIDEO_SAA7134_RC) += saa7134-input.o
-obj-$(CONFIG_VIDEO_SAA7134) += saa6752hs.o saa7134.o saa7134-empress.o
+obj-$(CONFIG_VIDEO_SAA7134) += saa7134.o saa7134-empress.o
obj-$(CONFIG_VIDEO_SAA7134_ALSA) += saa7134-alsa.o
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index d45e7f6ff332..c9b2350e92c8 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -2590,7 +2590,7 @@ struct saa7134_board saa7134_boards[] = {
}},
},
[SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180] = {
- /* Michael Krufky <mkrufky@m1k.net>
+ /* Michael Krufky <mkrufky@linuxtv.org>
* Uses Alps Electric TDHU2, containing NXT2004 ATSC Decoder
* AFAIK, there is no analog demod, thus,
* no support for analog television.
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 27d7ee709c58..1362b4aab473 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -751,6 +751,7 @@ static int saa7134_hwfini(struct saa7134_dev *dev)
saa7134_input_fini(dev);
saa7134_vbi_fini(dev);
saa7134_tvaudio_fini(dev);
+ saa7134_video_fini(dev);
return 0;
}
@@ -802,7 +803,6 @@ static struct video_device *vdev_init(struct saa7134_dev *dev,
*vfd = *template;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
- vfd->debug = video_debug;
snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)",
dev->name, type, saa7134_boards[dev->board].name);
set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
@@ -1008,13 +1008,13 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
/* load i2c helpers */
if (card_is_empress(dev)) {
- struct v4l2_subdev *sd =
+ dev->empress_sd =
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
"saa6752hs",
saa7134_boards[dev->board].empress_addr, NULL);
- if (sd)
- sd->grp_id = GRP_EMPRESS;
+ if (dev->empress_sd)
+ dev->empress_sd->grp_id = GRP_EMPRESS;
}
if (saa7134_boards[dev->board].rds_addr) {
@@ -1046,6 +1046,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
printk(KERN_INFO "%s: Overlay support disabled.\n", dev->name);
dev->video_dev = vdev_init(dev,&saa7134_video_template,"video");
+ dev->video_dev->ctrl_handler = &dev->ctrl_handler;
err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER,
video_nr[dev->nr]);
if (err < 0) {
@@ -1057,6 +1058,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
dev->name, video_device_node_name(dev->video_dev));
dev->vbi_dev = vdev_init(dev, &saa7134_video_template, "vbi");
+ dev->vbi_dev->ctrl_handler = &dev->ctrl_handler;
err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
vbi_nr[dev->nr]);
@@ -1067,6 +1069,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
if (card_has_radio(dev)) {
dev->radio_dev = vdev_init(dev,&saa7134_radio_template,"radio");
+ dev->radio_dev->ctrl_handler = &dev->radio_ctrl_handler;
err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO,
radio_nr[dev->nr]);
if (err < 0)
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index 3022eb2a7925..0a9047e754b9 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -23,12 +23,12 @@
#include <linux/kernel.h>
#include <linux/delay.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+
#include "saa7134-reg.h"
#include "saa7134.h"
-#include <media/saa6752hs.h>
-#include <media/v4l2-common.h>
-
/* ------------------------------------------------------------------ */
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
@@ -85,52 +85,54 @@ static int ts_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct saa7134_dev *dev = video_drvdata(file);
- int err;
+ struct saa7134_fh *fh;
- dprintk("open dev=%s\n", video_device_node_name(vdev));
- err = -EBUSY;
- if (!mutex_trylock(&dev->empress_tsq.vb_lock))
- return err;
- if (atomic_read(&dev->empress_users))
- goto done;
+ /* allocate + initialize per filehandle data */
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (NULL == fh)
+ return -ENOMEM;
+
+ v4l2_fh_init(&fh->fh, vdev);
+ file->private_data = fh;
+ fh->is_empress = true;
+ v4l2_fh_add(&fh->fh);
/* Unmute audio */
saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
saa_readb(SAA7134_AUDIO_MUTE_CTRL) & ~(1 << 6));
- atomic_inc(&dev->empress_users);
- file->private_data = dev;
- err = 0;
-
-done:
- mutex_unlock(&dev->empress_tsq.vb_lock);
- return err;
+ return 0;
}
static int ts_release(struct file *file)
{
- struct saa7134_dev *dev = file->private_data;
+ struct saa7134_dev *dev = video_drvdata(file);
+ struct saa7134_fh *fh = file->private_data;
- videobuf_stop(&dev->empress_tsq);
- videobuf_mmap_free(&dev->empress_tsq);
+ if (res_check(fh, RESOURCE_EMPRESS)) {
+ videobuf_stop(&dev->empress_tsq);
+ videobuf_mmap_free(&dev->empress_tsq);
- /* stop the encoder */
- ts_reset_encoder(dev);
+ /* stop the encoder */
+ ts_reset_encoder(dev);
- /* Mute audio */
- saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
- saa_readb(SAA7134_AUDIO_MUTE_CTRL) | (1 << 6));
-
- atomic_dec(&dev->empress_users);
+ /* Mute audio */
+ saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
+ saa_readb(SAA7134_AUDIO_MUTE_CTRL) | (1 << 6));
+ }
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
return 0;
}
static ssize_t
ts_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
- struct saa7134_dev *dev = file->private_data;
+ struct saa7134_dev *dev = video_drvdata(file);
+ if (res_locked(dev, RESOURCE_EMPRESS))
+ return -EBUSY;
if (!dev->empress_started)
ts_init_encoder(dev);
@@ -142,68 +144,27 @@ ts_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
static unsigned int
ts_poll(struct file *file, struct poll_table_struct *wait)
{
- struct saa7134_dev *dev = file->private_data;
+ unsigned long req_events = poll_requested_events(wait);
+ struct saa7134_dev *dev = video_drvdata(file);
+ struct saa7134_fh *fh = file->private_data;
+ unsigned int rc = 0;
- return videobuf_poll_stream(file, &dev->empress_tsq, wait);
+ if (v4l2_event_pending(&fh->fh))
+ rc = POLLPRI;
+ else if (req_events & POLLPRI)
+ poll_wait(file, &fh->fh.wait, wait);
+ return rc | videobuf_poll_stream(file, &dev->empress_tsq, wait);
}
static int
ts_mmap(struct file *file, struct vm_area_struct * vma)
{
- struct saa7134_dev *dev = file->private_data;
+ struct saa7134_dev *dev = video_drvdata(file);
return videobuf_mmap_mapper(&dev->empress_tsq, vma);
}
-/*
- * This function is _not_ called directly, but from
- * video_generic_ioctl (and maybe others). userspace
- * copying is done already, arg is a kernel pointer.
- */
-
-static int empress_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct saa7134_dev *dev = file->private_data;
-
- strcpy(cap->driver, "saa7134");
- strlcpy(cap->card, saa7134_boards[dev->board].name,
- sizeof(cap->card));
- sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
- cap->capabilities =
- V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING;
- return 0;
-}
-
-static int empress_enum_input(struct file *file, void *priv,
- struct v4l2_input *i)
-{
- if (i->index != 0)
- return -EINVAL;
-
- i->type = V4L2_INPUT_TYPE_CAMERA;
- strcpy(i->name, "CCIR656");
-
- return 0;
-}
-
-static int empress_g_input(struct file *file, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-static int empress_s_input(struct file *file, void *priv, unsigned int i)
-{
- if (i != 0)
- return -EINVAL;
-
- return 0;
-}
-
static int empress_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
@@ -219,7 +180,7 @@ static int empress_enum_fmt_vid_cap(struct file *file, void *priv,
static int empress_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_dev *dev = file->private_data;
+ struct saa7134_dev *dev = video_drvdata(file);
struct v4l2_mbus_framefmt mbus_fmt;
saa_call_all(dev, video, g_mbus_fmt, &mbus_fmt);
@@ -236,7 +197,7 @@ static int empress_g_fmt_vid_cap(struct file *file, void *priv,
static int empress_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_dev *dev = file->private_data;
+ struct saa7134_dev *dev = video_drvdata(file);
struct v4l2_mbus_framefmt mbus_fmt;
v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED);
@@ -254,7 +215,7 @@ static int empress_s_fmt_vid_cap(struct file *file, void *priv,
static int empress_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_dev *dev = file->private_data;
+ struct saa7134_dev *dev = video_drvdata(file);
struct v4l2_mbus_framefmt mbus_fmt;
v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED);
@@ -269,175 +230,6 @@ static int empress_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int empress_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p)
-{
- struct saa7134_dev *dev = file->private_data;
-
- return videobuf_reqbufs(&dev->empress_tsq, p);
-}
-
-static int empress_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *b)
-{
- struct saa7134_dev *dev = file->private_data;
-
- return videobuf_querybuf(&dev->empress_tsq, b);
-}
-
-static int empress_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct saa7134_dev *dev = file->private_data;
-
- return videobuf_qbuf(&dev->empress_tsq, b);
-}
-
-static int empress_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct saa7134_dev *dev = file->private_data;
-
- return videobuf_dqbuf(&dev->empress_tsq, b,
- file->f_flags & O_NONBLOCK);
-}
-
-static int empress_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct saa7134_dev *dev = file->private_data;
-
- return videobuf_streamon(&dev->empress_tsq);
-}
-
-static int empress_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct saa7134_dev *dev = file->private_data;
-
- return videobuf_streamoff(&dev->empress_tsq);
-}
-
-static int empress_s_ext_ctrls(struct file *file, void *priv,
- struct v4l2_ext_controls *ctrls)
-{
- struct saa7134_dev *dev = file->private_data;
- int err;
-
- /* count == 0 is abused in saa6752hs.c, so that special
- case is handled here explicitly. */
- if (ctrls->count == 0)
- return 0;
-
- if (ctrls->ctrl_class != V4L2_CTRL_CLASS_MPEG)
- return -EINVAL;
-
- err = saa_call_empress(dev, core, s_ext_ctrls, ctrls);
- ts_init_encoder(dev);
-
- return err;
-}
-
-static int empress_g_ext_ctrls(struct file *file, void *priv,
- struct v4l2_ext_controls *ctrls)
-{
- struct saa7134_dev *dev = file->private_data;
-
- if (ctrls->ctrl_class != V4L2_CTRL_CLASS_MPEG)
- return -EINVAL;
- return saa_call_empress(dev, core, g_ext_ctrls, ctrls);
-}
-
-static int empress_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *c)
-{
- struct saa7134_dev *dev = file->private_data;
-
- return saa7134_g_ctrl_internal(dev, NULL, c);
-}
-
-static int empress_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *c)
-{
- struct saa7134_dev *dev = file->private_data;
-
- return saa7134_s_ctrl_internal(dev, NULL, c);
-}
-
-static int empress_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *c)
-{
- /* Must be sorted from low to high control ID! */
- static const u32 user_ctrls[] = {
- V4L2_CID_USER_CLASS,
- V4L2_CID_BRIGHTNESS,
- V4L2_CID_CONTRAST,
- V4L2_CID_SATURATION,
- V4L2_CID_HUE,
- V4L2_CID_AUDIO_VOLUME,
- V4L2_CID_AUDIO_MUTE,
- V4L2_CID_HFLIP,
- 0
- };
-
- /* Must be sorted from low to high control ID! */
- static const u32 mpeg_ctrls[] = {
- V4L2_CID_MPEG_CLASS,
- V4L2_CID_MPEG_STREAM_TYPE,
- V4L2_CID_MPEG_STREAM_PID_PMT,
- V4L2_CID_MPEG_STREAM_PID_AUDIO,
- V4L2_CID_MPEG_STREAM_PID_VIDEO,
- V4L2_CID_MPEG_STREAM_PID_PCR,
- V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ,
- V4L2_CID_MPEG_AUDIO_ENCODING,
- V4L2_CID_MPEG_AUDIO_L2_BITRATE,
- V4L2_CID_MPEG_VIDEO_ENCODING,
- V4L2_CID_MPEG_VIDEO_ASPECT,
- V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
- V4L2_CID_MPEG_VIDEO_BITRATE,
- V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
- 0
- };
- static const u32 *ctrl_classes[] = {
- user_ctrls,
- mpeg_ctrls,
- NULL
- };
- struct saa7134_dev *dev = file->private_data;
-
- c->id = v4l2_ctrl_next(ctrl_classes, c->id);
- if (c->id == 0)
- return -EINVAL;
- if (c->id == V4L2_CID_USER_CLASS || c->id == V4L2_CID_MPEG_CLASS)
- return v4l2_ctrl_query_fill(c, 0, 0, 0, 0);
- if (V4L2_CTRL_ID2CLASS(c->id) != V4L2_CTRL_CLASS_MPEG)
- return saa7134_queryctrl(file, priv, c);
- return saa_call_empress(dev, core, queryctrl, c);
-}
-
-static int empress_querymenu(struct file *file, void *priv,
- struct v4l2_querymenu *c)
-{
- struct saa7134_dev *dev = file->private_data;
-
- if (V4L2_CTRL_ID2CLASS(c->id) != V4L2_CTRL_CLASS_MPEG)
- return -EINVAL;
- return saa_call_empress(dev, core, querymenu, c);
-}
-
-static int empress_s_std(struct file *file, void *priv, v4l2_std_id id)
-{
- struct saa7134_dev *dev = file->private_data;
-
- return saa7134_s_std_internal(dev, NULL, id);
-}
-
-static int empress_g_std(struct file *file, void *priv, v4l2_std_id *id)
-{
- struct saa7134_dev *dev = file->private_data;
-
- *id = dev->tvnorm->id;
- return 0;
-}
-
static const struct v4l2_file_operations ts_fops =
{
.owner = THIS_MODULE,
@@ -450,28 +242,29 @@ static const struct v4l2_file_operations ts_fops =
};
static const struct v4l2_ioctl_ops ts_ioctl_ops = {
- .vidioc_querycap = empress_querycap,
+ .vidioc_querycap = saa7134_querycap,
.vidioc_enum_fmt_vid_cap = empress_enum_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = empress_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = empress_s_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = empress_g_fmt_vid_cap,
- .vidioc_reqbufs = empress_reqbufs,
- .vidioc_querybuf = empress_querybuf,
- .vidioc_qbuf = empress_qbuf,
- .vidioc_dqbuf = empress_dqbuf,
- .vidioc_streamon = empress_streamon,
- .vidioc_streamoff = empress_streamoff,
- .vidioc_s_ext_ctrls = empress_s_ext_ctrls,
- .vidioc_g_ext_ctrls = empress_g_ext_ctrls,
- .vidioc_enum_input = empress_enum_input,
- .vidioc_g_input = empress_g_input,
- .vidioc_s_input = empress_s_input,
- .vidioc_queryctrl = empress_queryctrl,
- .vidioc_querymenu = empress_querymenu,
- .vidioc_g_ctrl = empress_g_ctrl,
- .vidioc_s_ctrl = empress_s_ctrl,
- .vidioc_s_std = empress_s_std,
- .vidioc_g_std = empress_g_std,
+ .vidioc_reqbufs = saa7134_reqbufs,
+ .vidioc_querybuf = saa7134_querybuf,
+ .vidioc_qbuf = saa7134_qbuf,
+ .vidioc_dqbuf = saa7134_dqbuf,
+ .vidioc_streamon = saa7134_streamon,
+ .vidioc_streamoff = saa7134_streamoff,
+ .vidioc_g_frequency = saa7134_g_frequency,
+ .vidioc_s_frequency = saa7134_s_frequency,
+ .vidioc_g_tuner = saa7134_g_tuner,
+ .vidioc_s_tuner = saa7134_s_tuner,
+ .vidioc_enum_input = saa7134_enum_input,
+ .vidioc_g_input = saa7134_g_input,
+ .vidioc_s_input = saa7134_s_input,
+ .vidioc_s_std = saa7134_s_std,
+ .vidioc_g_std = saa7134_g_std,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/* ----------------------------------------------------------- */
@@ -501,9 +294,26 @@ static void empress_signal_change(struct saa7134_dev *dev)
schedule_work(&dev->empress_workqueue);
}
+static bool empress_ctrl_filter(const struct v4l2_ctrl *ctrl)
+{
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ case V4L2_CID_HUE:
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_SATURATION:
+ case V4L2_CID_AUDIO_MUTE:
+ case V4L2_CID_AUDIO_VOLUME:
+ case V4L2_CID_PRIVATE_INVERT:
+ case V4L2_CID_PRIVATE_AUTOMUTE:
+ return true;
+ default:
+ return false;
+ }
+}
static int empress_init(struct saa7134_dev *dev)
{
+ struct v4l2_ctrl_handler *hdl = &dev->empress_ctrl_handler;
int err;
dprintk("%s: %s\n",dev->name,__func__);
@@ -516,6 +326,16 @@ static int empress_init(struct saa7134_dev *dev)
snprintf(dev->empress_dev->name, sizeof(dev->empress_dev->name),
"%s empress (%s)", dev->name,
saa7134_boards[dev->board].name);
+ set_bit(V4L2_FL_USE_FH_PRIO, &dev->empress_dev->flags);
+ v4l2_ctrl_handler_init(hdl, 21);
+ v4l2_ctrl_add_handler(hdl, &dev->ctrl_handler, empress_ctrl_filter);
+ if (dev->empress_sd)
+ v4l2_ctrl_add_handler(hdl, dev->empress_sd->ctrl_handler, NULL);
+ if (hdl->error) {
+ video_device_release(dev->empress_dev);
+ return hdl->error;
+ }
+ dev->empress_dev->ctrl_handler = hdl;
INIT_WORK(&dev->empress_workqueue, empress_signal_update);
@@ -551,6 +371,7 @@ static int empress_fini(struct saa7134_dev *dev)
return 0;
flush_work(&dev->empress_workqueue);
video_unregister_device(dev->empress_dev);
+ v4l2_ctrl_handler_free(&dev->empress_ctrl_handler);
dev->empress_dev = NULL;
return 0;
}
diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
index e9aa94b807f1..d4da18d049f3 100644
--- a/drivers/media/pci/saa7134/saa7134-vbi.c
+++ b/drivers/media/pci/saa7134/saa7134-vbi.c
@@ -117,8 +117,7 @@ static int buffer_prepare(struct videobuf_queue *q,
struct videobuf_buffer *vb,
enum v4l2_field field)
{
- struct saa7134_fh *fh = q->priv_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = q->priv_data;
struct saa7134_buf *buf = container_of(vb,struct saa7134_buf,vb);
struct saa7134_tvnorm *norm = dev->tvnorm;
unsigned int lines, llength, size;
@@ -141,7 +140,7 @@ static int buffer_prepare(struct videobuf_queue *q,
buf->vb.width = llength;
buf->vb.height = lines;
buf->vb.size = size;
- buf->pt = &fh->pt_vbi;
+ buf->pt = &dev->pt_vbi;
err = videobuf_iolock(q,&buf->vb,NULL);
if (err)
@@ -166,8 +165,7 @@ static int buffer_prepare(struct videobuf_queue *q,
static int
buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
{
- struct saa7134_fh *fh = q->priv_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = q->priv_data;
int llength,lines;
lines = dev->tvnorm->vbi_v_stop_0 - dev->tvnorm->vbi_v_start_0 +1;
@@ -181,8 +179,7 @@ buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
{
- struct saa7134_fh *fh = q->priv_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = q->priv_data;
struct saa7134_buf *buf = container_of(vb,struct saa7134_buf,vb);
saa7134_buffer_queue(dev,&dev->vbi_q,buf);
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index fb60da85bc2c..eb472b5b26a0 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -27,11 +27,13 @@
#include <linux/slab.h>
#include <linux/sort.h>
-#include "saa7134-reg.h"
-#include "saa7134.h"
#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
#include <media/saa6588.h>
+#include "saa7134-reg.h"
+#include "saa7134.h"
+
/* ------------------------------------------------------------------ */
unsigned int video_debug;
@@ -369,117 +371,6 @@ static struct saa7134_tvnorm tvnorms[] = {
};
#define TVNORMS ARRAY_SIZE(tvnorms)
-#define V4L2_CID_PRIVATE_INVERT (V4L2_CID_PRIVATE_BASE + 0)
-#define V4L2_CID_PRIVATE_Y_ODD (V4L2_CID_PRIVATE_BASE + 1)
-#define V4L2_CID_PRIVATE_Y_EVEN (V4L2_CID_PRIVATE_BASE + 2)
-#define V4L2_CID_PRIVATE_AUTOMUTE (V4L2_CID_PRIVATE_BASE + 3)
-#define V4L2_CID_PRIVATE_LASTP1 (V4L2_CID_PRIVATE_BASE + 4)
-
-static const struct v4l2_queryctrl no_ctrl = {
- .name = "42",
- .flags = V4L2_CTRL_FLAG_DISABLED,
-};
-static const struct v4l2_queryctrl video_ctrls[] = {
- /* --- video --- */
- {
- .id = V4L2_CID_BRIGHTNESS,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 128,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },{
- .id = V4L2_CID_CONTRAST,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 127,
- .step = 1,
- .default_value = 68,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },{
- .id = V4L2_CID_SATURATION,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 127,
- .step = 1,
- .default_value = 64,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },{
- .id = V4L2_CID_HUE,
- .name = "Hue",
- .minimum = -128,
- .maximum = 127,
- .step = 1,
- .default_value = 0,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },{
- .id = V4L2_CID_HFLIP,
- .name = "Mirror",
- .minimum = 0,
- .maximum = 1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- },
- /* --- audio --- */
- {
- .id = V4L2_CID_AUDIO_MUTE,
- .name = "Mute",
- .minimum = 0,
- .maximum = 1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- },{
- .id = V4L2_CID_AUDIO_VOLUME,
- .name = "Volume",
- .minimum = -15,
- .maximum = 15,
- .step = 1,
- .default_value = 0,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },
- /* --- private --- */
- {
- .id = V4L2_CID_PRIVATE_INVERT,
- .name = "Invert",
- .minimum = 0,
- .maximum = 1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- },{
- .id = V4L2_CID_PRIVATE_Y_ODD,
- .name = "y offset odd field",
- .minimum = 0,
- .maximum = 128,
- .step = 1,
- .default_value = 0,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },{
- .id = V4L2_CID_PRIVATE_Y_EVEN,
- .name = "y offset even field",
- .minimum = 0,
- .maximum = 128,
- .step = 1,
- .default_value = 0,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },{
- .id = V4L2_CID_PRIVATE_AUTOMUTE,
- .name = "automute",
- .minimum = 0,
- .maximum = 1,
- .default_value = 1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- }
-};
-static const unsigned int CTRLS = ARRAY_SIZE(video_ctrls);
-
-static const struct v4l2_queryctrl* ctrl_by_id(unsigned int id)
-{
- unsigned int i;
-
- for (i = 0; i < CTRLS; i++)
- if (video_ctrls[i].id == id)
- return video_ctrls+i;
- return NULL;
-}
-
static struct saa7134_format* format_by_fourcc(unsigned int fourcc)
{
unsigned int i;
@@ -514,16 +405,6 @@ static int res_get(struct saa7134_dev *dev, struct saa7134_fh *fh, unsigned int
return 1;
}
-static int res_check(struct saa7134_fh *fh, unsigned int bit)
-{
- return (fh->resources & bit);
-}
-
-static int res_locked(struct saa7134_dev *dev, unsigned int bit)
-{
- return (dev->resources & bit);
-}
-
static
void res_free(struct saa7134_dev *dev, struct saa7134_fh *fh, unsigned int bits)
{
@@ -868,7 +749,7 @@ static int verify_preview(struct saa7134_dev *dev, struct v4l2_window *win, bool
return 0;
}
-static int start_preview(struct saa7134_dev *dev, struct saa7134_fh *fh)
+static int start_preview(struct saa7134_dev *dev)
{
unsigned long base,control,bpl;
int err;
@@ -923,7 +804,7 @@ static int start_preview(struct saa7134_dev *dev, struct saa7134_fh *fh)
return 0;
}
-static int stop_preview(struct saa7134_dev *dev, struct saa7134_fh *fh)
+static int stop_preview(struct saa7134_dev *dev)
{
dev->ovenable = 0;
saa7134_set_dmabits(dev);
@@ -1018,8 +899,7 @@ static int buffer_prepare(struct videobuf_queue *q,
struct videobuf_buffer *vb,
enum v4l2_field field)
{
- struct saa7134_fh *fh = q->priv_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = q->priv_data;
struct saa7134_buf *buf = container_of(vb,struct saa7134_buf,vb);
unsigned int size;
int err;
@@ -1057,7 +937,7 @@ static int buffer_prepare(struct videobuf_queue *q,
buf->vb.size = size;
buf->vb.field = field;
buf->fmt = dev->fmt;
- buf->pt = &fh->pt_cap;
+ buf->pt = &dev->pt_cap;
dev->video_q.curr = NULL;
err = videobuf_iolock(q,&buf->vb,&dev->ovbuf);
@@ -1082,8 +962,7 @@ static int buffer_prepare(struct videobuf_queue *q,
static int
buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
{
- struct saa7134_fh *fh = q->priv_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = q->priv_data;
*size = dev->fmt->depth * dev->width * dev->height >> 3;
if (0 == *count)
@@ -1094,10 +973,10 @@ buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
{
- struct saa7134_fh *fh = q->priv_data;
+ struct saa7134_dev *dev = q->priv_data;
struct saa7134_buf *buf = container_of(vb,struct saa7134_buf,vb);
- saa7134_buffer_queue(fh->dev,&fh->dev->video_q,buf);
+ saa7134_buffer_queue(dev, &dev->video_q, buf);
}
static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
@@ -1116,133 +995,56 @@ static struct videobuf_queue_ops video_qops = {
/* ------------------------------------------------------------------ */
-int saa7134_g_ctrl_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, struct v4l2_control *c)
+static int saa7134_s_ctrl(struct v4l2_ctrl *ctrl)
{
- const struct v4l2_queryctrl* ctrl;
-
- ctrl = ctrl_by_id(c->id);
- if (NULL == ctrl)
- return -EINVAL;
- switch (c->id) {
- case V4L2_CID_BRIGHTNESS:
- c->value = dev->ctl_bright;
- break;
- case V4L2_CID_HUE:
- c->value = dev->ctl_hue;
- break;
- case V4L2_CID_CONTRAST:
- c->value = dev->ctl_contrast;
- break;
- case V4L2_CID_SATURATION:
- c->value = dev->ctl_saturation;
- break;
- case V4L2_CID_AUDIO_MUTE:
- c->value = dev->ctl_mute;
- break;
- case V4L2_CID_AUDIO_VOLUME:
- c->value = dev->ctl_volume;
- break;
- case V4L2_CID_PRIVATE_INVERT:
- c->value = dev->ctl_invert;
- break;
- case V4L2_CID_HFLIP:
- c->value = dev->ctl_mirror;
- break;
- case V4L2_CID_PRIVATE_Y_EVEN:
- c->value = dev->ctl_y_even;
- break;
- case V4L2_CID_PRIVATE_Y_ODD:
- c->value = dev->ctl_y_odd;
- break;
- case V4L2_CID_PRIVATE_AUTOMUTE:
- c->value = dev->ctl_automute;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(saa7134_g_ctrl_internal);
-
-static int saa7134_g_ctrl(struct file *file, void *priv, struct v4l2_control *c)
-{
- struct saa7134_fh *fh = priv;
-
- return saa7134_g_ctrl_internal(fh->dev, fh, c);
-}
-
-int saa7134_s_ctrl_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, struct v4l2_control *c)
-{
- const struct v4l2_queryctrl* ctrl;
+ struct saa7134_dev *dev = container_of(ctrl->handler, struct saa7134_dev, ctrl_handler);
unsigned long flags;
int restart_overlay = 0;
- int err;
- err = -EINVAL;
-
- mutex_lock(&dev->lock);
-
- ctrl = ctrl_by_id(c->id);
- if (NULL == ctrl)
- goto error;
-
- dprintk("set_control name=%s val=%d\n",ctrl->name,c->value);
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_BOOLEAN:
- case V4L2_CTRL_TYPE_MENU:
- case V4L2_CTRL_TYPE_INTEGER:
- if (c->value < ctrl->minimum)
- c->value = ctrl->minimum;
- if (c->value > ctrl->maximum)
- c->value = ctrl->maximum;
- break;
- default:
- /* nothing */;
- }
- switch (c->id) {
+ switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- dev->ctl_bright = c->value;
- saa_writeb(SAA7134_DEC_LUMA_BRIGHT, dev->ctl_bright);
+ dev->ctl_bright = ctrl->val;
+ saa_writeb(SAA7134_DEC_LUMA_BRIGHT, ctrl->val);
break;
case V4L2_CID_HUE:
- dev->ctl_hue = c->value;
- saa_writeb(SAA7134_DEC_CHROMA_HUE, dev->ctl_hue);
+ dev->ctl_hue = ctrl->val;
+ saa_writeb(SAA7134_DEC_CHROMA_HUE, ctrl->val);
break;
case V4L2_CID_CONTRAST:
- dev->ctl_contrast = c->value;
+ dev->ctl_contrast = ctrl->val;
saa_writeb(SAA7134_DEC_LUMA_CONTRAST,
dev->ctl_invert ? -dev->ctl_contrast : dev->ctl_contrast);
break;
case V4L2_CID_SATURATION:
- dev->ctl_saturation = c->value;
+ dev->ctl_saturation = ctrl->val;
saa_writeb(SAA7134_DEC_CHROMA_SATURATION,
dev->ctl_invert ? -dev->ctl_saturation : dev->ctl_saturation);
break;
case V4L2_CID_AUDIO_MUTE:
- dev->ctl_mute = c->value;
+ dev->ctl_mute = ctrl->val;
saa7134_tvaudio_setmute(dev);
break;
case V4L2_CID_AUDIO_VOLUME:
- dev->ctl_volume = c->value;
+ dev->ctl_volume = ctrl->val;
saa7134_tvaudio_setvolume(dev,dev->ctl_volume);
break;
case V4L2_CID_PRIVATE_INVERT:
- dev->ctl_invert = c->value;
+ dev->ctl_invert = ctrl->val;
saa_writeb(SAA7134_DEC_LUMA_CONTRAST,
dev->ctl_invert ? -dev->ctl_contrast : dev->ctl_contrast);
saa_writeb(SAA7134_DEC_CHROMA_SATURATION,
dev->ctl_invert ? -dev->ctl_saturation : dev->ctl_saturation);
break;
case V4L2_CID_HFLIP:
- dev->ctl_mirror = c->value;
+ dev->ctl_mirror = ctrl->val;
restart_overlay = 1;
break;
case V4L2_CID_PRIVATE_Y_EVEN:
- dev->ctl_y_even = c->value;
+ dev->ctl_y_even = ctrl->val;
restart_overlay = 1;
break;
case V4L2_CID_PRIVATE_Y_ODD:
- dev->ctl_y_odd = c->value;
+ dev->ctl_y_odd = ctrl->val;
restart_overlay = 1;
break;
case V4L2_CID_PRIVATE_AUTOMUTE:
@@ -1252,7 +1054,7 @@ int saa7134_s_ctrl_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, str
tda9887_cfg.tuner = TUNER_TDA9887;
tda9887_cfg.priv = &dev->tda9887_conf;
- dev->ctl_automute = c->value;
+ dev->ctl_automute = ctrl->val;
if (dev->tda9887_conf) {
if (dev->ctl_automute)
dev->tda9887_conf |= TDA9887_AUTOMUTE;
@@ -1264,27 +1066,15 @@ int saa7134_s_ctrl_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, str
break;
}
default:
- goto error;
+ return -EINVAL;
}
- if (restart_overlay && fh && res_check(fh, RESOURCE_OVERLAY)) {
- spin_lock_irqsave(&dev->slock,flags);
- stop_preview(dev,fh);
- start_preview(dev,fh);
- spin_unlock_irqrestore(&dev->slock,flags);
+ if (restart_overlay && res_locked(dev, RESOURCE_OVERLAY)) {
+ spin_lock_irqsave(&dev->slock, flags);
+ stop_preview(dev);
+ start_preview(dev);
+ spin_unlock_irqrestore(&dev->slock, flags);
}
- err = 0;
-
-error:
- mutex_unlock(&dev->lock);
- return err;
-}
-EXPORT_SYMBOL_GPL(saa7134_s_ctrl_internal);
-
-static int saa7134_s_ctrl(struct file *file, void *f, struct v4l2_control *c)
-{
- struct saa7134_fh *fh = f;
-
- return saa7134_s_ctrl_internal(fh->dev, fh, c);
+ return 0;
}
/* ------------------------------------------------------------------ */
@@ -1292,15 +1082,16 @@ static int saa7134_s_ctrl(struct file *file, void *f, struct v4l2_control *c)
static struct videobuf_queue *saa7134_queue(struct file *file)
{
struct video_device *vdev = video_devdata(file);
+ struct saa7134_dev *dev = video_drvdata(file);
struct saa7134_fh *fh = file->private_data;
struct videobuf_queue *q = NULL;
switch (vdev->vfl_type) {
case VFL_TYPE_GRABBER:
- q = &fh->cap;
+ q = fh->is_empress ? &dev->empress_tsq : &dev->cap;
break;
case VFL_TYPE_VBI:
- q = &fh->vbi;
+ q = &dev->vbi;
break;
default:
BUG();
@@ -1311,9 +1102,10 @@ static struct videobuf_queue *saa7134_queue(struct file *file)
static int saa7134_resource(struct file *file)
{
struct video_device *vdev = video_devdata(file);
+ struct saa7134_fh *fh = file->private_data;
if (vdev->vfl_type == VFL_TYPE_GRABBER)
- return RESOURCE_VIDEO;
+ return fh->is_empress ? RESOURCE_EMPRESS : RESOURCE_VIDEO;
if (vdev->vfl_type == VFL_TYPE_VBI)
return RESOURCE_VBI;
@@ -1335,22 +1127,6 @@ static int video_open(struct file *file)
v4l2_fh_init(&fh->fh, vdev);
file->private_data = fh;
- fh->dev = dev;
-
- videobuf_queue_sg_init(&fh->cap, &video_qops,
- &dev->pci->dev, &dev->slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED,
- sizeof(struct saa7134_buf),
- fh, NULL);
- videobuf_queue_sg_init(&fh->vbi, &saa7134_vbi_qops,
- &dev->pci->dev, &dev->slock,
- V4L2_BUF_TYPE_VBI_CAPTURE,
- V4L2_FIELD_SEQ_TB,
- sizeof(struct saa7134_buf),
- fh, NULL);
- saa7134_pgtable_alloc(dev->pci,&fh->pt_cap);
- saa7134_pgtable_alloc(dev->pci,&fh->pt_vbi);
if (vdev->vfl_type == VFL_TYPE_RADIO) {
/* switch to radio mode */
@@ -1369,17 +1145,18 @@ static ssize_t
video_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
struct video_device *vdev = video_devdata(file);
+ struct saa7134_dev *dev = video_drvdata(file);
struct saa7134_fh *fh = file->private_data;
switch (vdev->vfl_type) {
case VFL_TYPE_GRABBER:
- if (res_locked(fh->dev,RESOURCE_VIDEO))
+ if (res_locked(dev, RESOURCE_VIDEO))
return -EBUSY;
return videobuf_read_one(saa7134_queue(file),
data, count, ppos,
file->f_flags & O_NONBLOCK);
case VFL_TYPE_VBI:
- if (!res_get(fh->dev,fh,RESOURCE_VBI))
+ if (!res_get(dev, fh, RESOURCE_VBI))
return -EBUSY;
return videobuf_read_stream(saa7134_queue(file),
data, count, ppos, 1,
@@ -1394,52 +1171,59 @@ video_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
static unsigned int
video_poll(struct file *file, struct poll_table_struct *wait)
{
+ unsigned long req_events = poll_requested_events(wait);
struct video_device *vdev = video_devdata(file);
+ struct saa7134_dev *dev = video_drvdata(file);
struct saa7134_fh *fh = file->private_data;
struct videobuf_buffer *buf = NULL;
unsigned int rc = 0;
+ if (v4l2_event_pending(&fh->fh))
+ rc = POLLPRI;
+ else if (req_events & POLLPRI)
+ poll_wait(file, &fh->fh.wait, wait);
+
if (vdev->vfl_type == VFL_TYPE_VBI)
- return videobuf_poll_stream(file, &fh->vbi, wait);
+ return rc | videobuf_poll_stream(file, &dev->vbi, wait);
- if (res_check(fh,RESOURCE_VIDEO)) {
- mutex_lock(&fh->cap.vb_lock);
- if (!list_empty(&fh->cap.stream))
- buf = list_entry(fh->cap.stream.next, struct videobuf_buffer, stream);
+ if (res_check(fh, RESOURCE_VIDEO)) {
+ mutex_lock(&dev->cap.vb_lock);
+ if (!list_empty(&dev->cap.stream))
+ buf = list_entry(dev->cap.stream.next, struct videobuf_buffer, stream);
} else {
- mutex_lock(&fh->cap.vb_lock);
- if (UNSET == fh->cap.read_off) {
+ mutex_lock(&dev->cap.vb_lock);
+ if (UNSET == dev->cap.read_off) {
/* need to capture a new frame */
- if (res_locked(fh->dev,RESOURCE_VIDEO))
+ if (res_locked(dev, RESOURCE_VIDEO))
goto err;
- if (0 != fh->cap.ops->buf_prepare(&fh->cap,fh->cap.read_buf,fh->cap.field))
+ if (0 != dev->cap.ops->buf_prepare(&dev->cap,
+ dev->cap.read_buf, dev->cap.field))
goto err;
- fh->cap.ops->buf_queue(&fh->cap,fh->cap.read_buf);
- fh->cap.read_off = 0;
+ dev->cap.ops->buf_queue(&dev->cap, dev->cap.read_buf);
+ dev->cap.read_off = 0;
}
- buf = fh->cap.read_buf;
+ buf = dev->cap.read_buf;
}
if (!buf)
goto err;
poll_wait(file, &buf->done, wait);
- if (buf->state == VIDEOBUF_DONE ||
- buf->state == VIDEOBUF_ERROR)
- rc = POLLIN|POLLRDNORM;
- mutex_unlock(&fh->cap.vb_lock);
+ if (buf->state == VIDEOBUF_DONE || buf->state == VIDEOBUF_ERROR)
+ rc |= POLLIN | POLLRDNORM;
+ mutex_unlock(&dev->cap.vb_lock);
return rc;
err:
- mutex_unlock(&fh->cap.vb_lock);
- return POLLERR;
+ mutex_unlock(&dev->cap.vb_lock);
+ return rc | POLLERR;
}
static int video_release(struct file *file)
{
struct video_device *vdev = video_devdata(file);
- struct saa7134_fh *fh = file->private_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
+ struct saa7134_fh *fh = file->private_data;
struct saa6588_command cmd;
unsigned long flags;
@@ -1448,26 +1232,28 @@ static int video_release(struct file *file)
/* turn off overlay */
if (res_check(fh, RESOURCE_OVERLAY)) {
spin_lock_irqsave(&dev->slock,flags);
- stop_preview(dev,fh);
+ stop_preview(dev);
spin_unlock_irqrestore(&dev->slock,flags);
- res_free(dev,fh,RESOURCE_OVERLAY);
+ res_free(dev, fh, RESOURCE_OVERLAY);
}
/* stop video capture */
if (res_check(fh, RESOURCE_VIDEO)) {
pm_qos_remove_request(&dev->qos_request);
- videobuf_streamoff(&fh->cap);
- res_free(dev,fh,RESOURCE_VIDEO);
+ videobuf_streamoff(&dev->cap);
+ res_free(dev, fh, RESOURCE_VIDEO);
+ videobuf_mmap_free(&dev->cap);
}
- if (fh->cap.read_buf) {
- buffer_release(&fh->cap,fh->cap.read_buf);
- kfree(fh->cap.read_buf);
+ if (dev->cap.read_buf) {
+ buffer_release(&dev->cap, dev->cap.read_buf);
+ kfree(dev->cap.read_buf);
}
/* stop vbi capture */
if (res_check(fh, RESOURCE_VBI)) {
- videobuf_stop(&fh->vbi);
- res_free(dev,fh,RESOURCE_VBI);
+ videobuf_stop(&dev->vbi);
+ res_free(dev, fh, RESOURCE_VBI);
+ videobuf_mmap_free(&dev->vbi);
}
/* ts-capture will not work in planar mode, so turn it off Hac: 04.05*/
@@ -1480,12 +1266,6 @@ static int video_release(struct file *file)
if (vdev->vfl_type == VFL_TYPE_RADIO)
saa_call_all(dev, core, ioctl, SAA6588_CMD_CLOSE, &cmd);
- /* free stuff */
- videobuf_mmap_free(&fh->cap);
- videobuf_mmap_free(&fh->vbi);
- saa7134_pgtable_free(dev->pci,&fh->pt_cap);
- saa7134_pgtable_free(dev->pci,&fh->pt_vbi);
-
v4l2_fh_del(&fh->fh);
v4l2_fh_exit(&fh->fh);
file->private_data = NULL;
@@ -1501,11 +1281,11 @@ static int video_mmap(struct file *file, struct vm_area_struct * vma)
static ssize_t radio_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
{
- struct saa7134_fh *fh = file->private_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
struct saa6588_command cmd;
cmd.block_count = count/3;
+ cmd.nonblocking = file->f_flags & O_NONBLOCK;
cmd.buffer = data;
cmd.instance = file;
cmd.result = -ENODEV;
@@ -1517,16 +1297,16 @@ static ssize_t radio_read(struct file *file, char __user *data,
static unsigned int radio_poll(struct file *file, poll_table *wait)
{
- struct saa7134_fh *fh = file->private_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
struct saa6588_command cmd;
+ unsigned int rc = v4l2_ctrl_poll(file, wait);
cmd.instance = file;
cmd.event_list = wait;
- cmd.result = -ENODEV;
+ cmd.result = 0;
saa_call_all(dev, core, ioctl, SAA6588_CMD_POLL, &cmd);
- return cmd.result;
+ return rc | cmd.result;
}
/* ------------------------------------------------------------------ */
@@ -1534,8 +1314,7 @@ static unsigned int radio_poll(struct file *file, poll_table *wait)
static int saa7134_try_get_set_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
struct saa7134_tvnorm *norm = dev->tvnorm;
memset(&f->fmt.vbi.reserved, 0, sizeof(f->fmt.vbi.reserved));
@@ -1555,12 +1334,11 @@ static int saa7134_try_get_set_fmt_vbi_cap(struct file *file, void *priv,
static int saa7134_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
- f->fmt.pix.field = fh->cap.field;
+ f->fmt.pix.field = dev->cap.field;
f->fmt.pix.pixelformat = dev->fmt->fourcc;
f->fmt.pix.bytesperline =
(f->fmt.pix.width * dev->fmt->depth) >> 3;
@@ -1574,8 +1352,7 @@ static int saa7134_g_fmt_vid_cap(struct file *file, void *priv,
static int saa7134_g_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
struct v4l2_clip __user *clips = f->fmt.win.clips;
u32 clipcount = f->fmt.win.clipcount;
int err = 0;
@@ -1607,8 +1384,7 @@ static int saa7134_g_fmt_vid_overlay(struct file *file, void *priv,
static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
struct saa7134_format *fmt;
enum v4l2_field field;
unsigned int maxw, maxh;
@@ -1659,8 +1435,7 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
static int saa7134_try_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
if (saa7134_no_overlay > 0) {
printk(KERN_ERR "V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n");
@@ -1675,8 +1450,7 @@ static int saa7134_try_fmt_vid_overlay(struct file *file, void *priv,
static int saa7134_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
int err;
err = saa7134_try_fmt_vid_cap(file, priv, f);
@@ -1686,15 +1460,14 @@ static int saa7134_s_fmt_vid_cap(struct file *file, void *priv,
dev->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
dev->width = f->fmt.pix.width;
dev->height = f->fmt.pix.height;
- fh->cap.field = f->fmt.pix.field;
+ dev->cap.field = f->fmt.pix.field;
return 0;
}
static int saa7134_s_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
int err;
unsigned long flags;
@@ -1719,10 +1492,10 @@ static int saa7134_s_fmt_vid_overlay(struct file *file, void *priv,
return -EFAULT;
}
- if (res_check(fh, RESOURCE_OVERLAY)) {
+ if (res_check(priv, RESOURCE_OVERLAY)) {
spin_lock_irqsave(&dev->slock, flags);
- stop_preview(dev, fh);
- start_preview(dev, fh);
+ stop_preview(dev);
+ start_preview(dev);
spin_unlock_irqrestore(&dev->slock, flags);
}
@@ -1730,26 +1503,9 @@ static int saa7134_s_fmt_vid_overlay(struct file *file, void *priv,
return 0;
}
-int saa7134_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *c)
-{
- const struct v4l2_queryctrl *ctrl;
-
- if ((c->id < V4L2_CID_BASE ||
- c->id >= V4L2_CID_LASTP1) &&
- (c->id < V4L2_CID_PRIVATE_BASE ||
- c->id >= V4L2_CID_PRIVATE_LASTP1))
- return -EINVAL;
- ctrl = ctrl_by_id(c->id);
- *c = (NULL != ctrl) ? *ctrl : no_ctrl;
- return 0;
-}
-EXPORT_SYMBOL_GPL(saa7134_queryctrl);
-
-static int saa7134_enum_input(struct file *file, void *priv,
- struct v4l2_input *i)
+int saa7134_enum_input(struct file *file, void *priv, struct v4l2_input *i)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
unsigned int n;
n = i->index;
@@ -1769,27 +1525,27 @@ static int saa7134_enum_input(struct file *file, void *priv,
if (0 != (v1 & 0x40))
i->status |= V4L2_IN_ST_NO_H_LOCK;
if (0 != (v2 & 0x40))
- i->status |= V4L2_IN_ST_NO_SYNC;
+ i->status |= V4L2_IN_ST_NO_SIGNAL;
if (0 != (v2 & 0x0e))
i->status |= V4L2_IN_ST_MACROVISION;
}
i->std = SAA7134_NORMS;
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_enum_input);
-static int saa7134_g_input(struct file *file, void *priv, unsigned int *i)
+int saa7134_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
*i = dev->ctl_input;
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_g_input);
-static int saa7134_s_input(struct file *file, void *priv, unsigned int i)
+int saa7134_s_input(struct file *file, void *priv, unsigned int i)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
if (i >= SAA7134_INPUT_MAX)
return -EINVAL;
@@ -1800,13 +1556,14 @@ static int saa7134_s_input(struct file *file, void *priv, unsigned int i)
mutex_unlock(&dev->lock);
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_s_input);
-static int saa7134_querycap(struct file *file, void *priv,
+int saa7134_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
struct video_device *vdev = video_devdata(file);
+ struct saa7134_fh *fh = priv;
u32 radio_caps, video_caps, vbi_caps;
unsigned int tuner_type = dev->tuner_type;
@@ -1825,7 +1582,7 @@ static int saa7134_querycap(struct file *file, void *priv,
radio_caps |= V4L2_CAP_RDS_CAPTURE;
video_caps = V4L2_CAP_VIDEO_CAPTURE;
- if (saa7134_no_overlay <= 0)
+ if (saa7134_no_overlay <= 0 && !fh->is_empress)
video_caps |= V4L2_CAP_VIDEO_OVERLAY;
vbi_caps = V4L2_CAP_VBI_CAPTURE;
@@ -1851,14 +1608,17 @@ static int saa7134_querycap(struct file *file, void *priv,
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_querycap);
-int saa7134_s_std_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, v4l2_std_id id)
+int saa7134_s_std(struct file *file, void *priv, v4l2_std_id id)
{
+ struct saa7134_dev *dev = video_drvdata(file);
+ struct saa7134_fh *fh = priv;
unsigned long flags;
unsigned int i;
v4l2_std_id fixup;
- if (!fh && res_locked(dev, RESOURCE_OVERLAY)) {
+ if (fh->is_empress && res_locked(dev, RESOURCE_OVERLAY)) {
/* Don't change the std from the mpeg device
if overlay is active. */
return -EBUSY;
@@ -1898,15 +1658,15 @@ int saa7134_s_std_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, v4l2_
id = tvnorms[i].id;
mutex_lock(&dev->lock);
- if (fh && res_check(fh, RESOURCE_OVERLAY)) {
+ if (!fh->is_empress && res_check(fh, RESOURCE_OVERLAY)) {
spin_lock_irqsave(&dev->slock, flags);
- stop_preview(dev, fh);
+ stop_preview(dev);
spin_unlock_irqrestore(&dev->slock, flags);
set_tvnorm(dev, &tvnorms[i]);
spin_lock_irqsave(&dev->slock, flags);
- start_preview(dev, fh);
+ start_preview(dev);
spin_unlock_irqrestore(&dev->slock, flags);
} else
set_tvnorm(dev, &tvnorms[i]);
@@ -1915,29 +1675,21 @@ int saa7134_s_std_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, v4l2_
mutex_unlock(&dev->lock);
return 0;
}
-EXPORT_SYMBOL_GPL(saa7134_s_std_internal);
+EXPORT_SYMBOL_GPL(saa7134_s_std);
-static int saa7134_s_std(struct file *file, void *priv, v4l2_std_id id)
+int saa7134_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
- struct saa7134_fh *fh = priv;
-
- return saa7134_s_std_internal(fh->dev, fh, id);
-}
-
-static int saa7134_g_std(struct file *file, void *priv, v4l2_std_id *id)
-{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
*id = dev->tvnorm->id;
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_g_std);
static int saa7134_cropcap(struct file *file, void *priv,
struct v4l2_cropcap *cap)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
@@ -1959,8 +1711,7 @@ static int saa7134_cropcap(struct file *file, void *priv,
static int saa7134_g_crop(struct file *file, void *f, struct v4l2_crop *crop)
{
- struct saa7134_fh *fh = f;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
@@ -1971,22 +1722,17 @@ static int saa7134_g_crop(struct file *file, void *f, struct v4l2_crop *crop)
static int saa7134_s_crop(struct file *file, void *f, const struct v4l2_crop *crop)
{
- struct saa7134_fh *fh = f;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
struct v4l2_rect *b = &dev->crop_bounds;
struct v4l2_rect *c = &dev->crop_current;
if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
return -EINVAL;
- if (crop->c.height < 0)
- return -EINVAL;
- if (crop->c.width < 0)
- return -EINVAL;
- if (res_locked(fh->dev, RESOURCE_OVERLAY))
+ if (res_locked(dev, RESOURCE_OVERLAY))
return -EBUSY;
- if (res_locked(fh->dev, RESOURCE_VIDEO))
+ if (res_locked(dev, RESOURCE_VIDEO))
return -EBUSY;
*c = crop->c;
@@ -2006,11 +1752,10 @@ static int saa7134_s_crop(struct file *file, void *f, const struct v4l2_crop *cr
return 0;
}
-static int saa7134_g_tuner(struct file *file, void *priv,
+int saa7134_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
int n;
if (0 != t->index)
@@ -2037,12 +1782,12 @@ static int saa7134_g_tuner(struct file *file, void *priv,
t->signal = 0xffff;
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_g_tuner);
-static int saa7134_s_tuner(struct file *file, void *priv,
+int saa7134_s_tuner(struct file *file, void *priv,
const struct v4l2_tuner *t)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
int rx, mode;
if (0 != t->index)
@@ -2058,12 +1803,12 @@ static int saa7134_s_tuner(struct file *file, void *priv,
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_s_tuner);
-static int saa7134_g_frequency(struct file *file, void *priv,
+int saa7134_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
if (0 != f->tuner)
return -EINVAL;
@@ -2072,12 +1817,12 @@ static int saa7134_g_frequency(struct file *file, void *priv,
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_g_frequency);
-static int saa7134_s_frequency(struct file *file, void *priv,
+int saa7134_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
if (0 != f->tuner)
return -EINVAL;
@@ -2089,6 +1834,7 @@ static int saa7134_s_frequency(struct file *file, void *priv,
mutex_unlock(&dev->lock);
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_s_frequency);
static int saa7134_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
@@ -2126,8 +1872,7 @@ static int saa7134_enum_fmt_vid_overlay(struct file *file, void *priv,
static int saa7134_g_fbuf(struct file *file, void *f,
struct v4l2_framebuffer *fb)
{
- struct saa7134_fh *fh = f;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
*fb = dev->ovbuf;
fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
@@ -2138,8 +1883,7 @@ static int saa7134_g_fbuf(struct file *file, void *f,
static int saa7134_s_fbuf(struct file *file, void *f,
const struct v4l2_framebuffer *fb)
{
- struct saa7134_fh *fh = f;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
struct saa7134_format *fmt;
if (!capable(CAP_SYS_ADMIN) &&
@@ -2160,10 +1904,9 @@ static int saa7134_s_fbuf(struct file *file, void *f,
return 0;
}
-static int saa7134_overlay(struct file *file, void *f, unsigned int on)
+static int saa7134_overlay(struct file *file, void *priv, unsigned int on)
{
- struct saa7134_fh *fh = f;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
unsigned long flags;
if (on) {
@@ -2172,54 +1915,57 @@ static int saa7134_overlay(struct file *file, void *f, unsigned int on)
return -EINVAL;
}
- if (!res_get(dev, fh, RESOURCE_OVERLAY))
+ if (!res_get(dev, priv, RESOURCE_OVERLAY))
return -EBUSY;
spin_lock_irqsave(&dev->slock, flags);
- start_preview(dev, fh);
+ start_preview(dev);
spin_unlock_irqrestore(&dev->slock, flags);
}
if (!on) {
- if (!res_check(fh, RESOURCE_OVERLAY))
+ if (!res_check(priv, RESOURCE_OVERLAY))
return -EINVAL;
spin_lock_irqsave(&dev->slock, flags);
- stop_preview(dev, fh);
+ stop_preview(dev);
spin_unlock_irqrestore(&dev->slock, flags);
- res_free(dev, fh, RESOURCE_OVERLAY);
+ res_free(dev, priv, RESOURCE_OVERLAY);
}
return 0;
}
-static int saa7134_reqbufs(struct file *file, void *priv,
+int saa7134_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *p)
{
return videobuf_reqbufs(saa7134_queue(file), p);
}
+EXPORT_SYMBOL_GPL(saa7134_reqbufs);
-static int saa7134_querybuf(struct file *file, void *priv,
+int saa7134_querybuf(struct file *file, void *priv,
struct v4l2_buffer *b)
{
return videobuf_querybuf(saa7134_queue(file), b);
}
+EXPORT_SYMBOL_GPL(saa7134_querybuf);
-static int saa7134_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
+int saa7134_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
{
return videobuf_qbuf(saa7134_queue(file), b);
}
+EXPORT_SYMBOL_GPL(saa7134_qbuf);
-static int saa7134_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
+int saa7134_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
{
return videobuf_dqbuf(saa7134_queue(file), b,
file->f_flags & O_NONBLOCK);
}
+EXPORT_SYMBOL_GPL(saa7134_dqbuf);
-static int saa7134_streamon(struct file *file, void *priv,
+int saa7134_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
int res = saa7134_resource(file);
- if (!res_get(dev, fh, res))
+ if (!res_get(dev, priv, res))
return -EBUSY;
/* The SAA7134 has a 1K FIFO; the datasheet suggests that when
@@ -2229,36 +1975,37 @@ static int saa7134_streamon(struct file *file, void *priv,
* Unfortunately, I lack register-level documentation to check the
* Linux FIFO setup and confirm the perfect value.
*/
- pm_qos_add_request(&dev->qos_request,
- PM_QOS_CPU_DMA_LATENCY,
- 20);
+ if (res != RESOURCE_EMPRESS)
+ pm_qos_add_request(&dev->qos_request,
+ PM_QOS_CPU_DMA_LATENCY, 20);
return videobuf_streamon(saa7134_queue(file));
}
+EXPORT_SYMBOL_GPL(saa7134_streamon);
-static int saa7134_streamoff(struct file *file, void *priv,
+int saa7134_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
+ struct saa7134_dev *dev = video_drvdata(file);
int err;
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
int res = saa7134_resource(file);
- pm_qos_remove_request(&dev->qos_request);
+ if (res != RESOURCE_EMPRESS)
+ pm_qos_remove_request(&dev->qos_request);
err = videobuf_streamoff(saa7134_queue(file));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ res_free(dev, priv, res);
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_streamoff);
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int vidioc_g_register (struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
reg->val = saa_readb(reg->reg & 0xffffff);
reg->size = 1;
@@ -2268,8 +2015,7 @@ static int vidioc_g_register (struct file *file, void *priv,
static int vidioc_s_register (struct file *file, void *priv,
const struct v4l2_dbg_register *reg)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
saa_writeb(reg->reg & 0xffffff, reg->val);
return 0;
@@ -2279,8 +2025,7 @@ static int vidioc_s_register (struct file *file, void *priv,
static int radio_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
- struct saa7134_fh *fh = file->private_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
if (0 != t->index)
return -EINVAL;
@@ -2299,8 +2044,7 @@ static int radio_g_tuner(struct file *file, void *priv,
static int radio_s_tuner(struct file *file, void *priv,
const struct v4l2_tuner *t)
{
- struct saa7134_fh *fh = file->private_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
if (0 != t->index)
return -EINVAL;
@@ -2309,50 +2053,6 @@ static int radio_s_tuner(struct file *file, void *priv,
return 0;
}
-static int radio_enum_input(struct file *file, void *priv,
- struct v4l2_input *i)
-{
- if (i->index != 0)
- return -EINVAL;
-
- strcpy(i->name, "Radio");
- i->type = V4L2_INPUT_TYPE_TUNER;
-
- return 0;
-}
-
-static int radio_g_input(struct file *filp, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-static int radio_s_input(struct file *filp, void *priv, unsigned int i)
-{
- return 0;
-}
-
-static int radio_s_std(struct file *file, void *fh, v4l2_std_id norm)
-{
- return 0;
-}
-
-static int radio_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *c)
-{
- const struct v4l2_queryctrl *ctrl;
-
- if (c->id < V4L2_CID_BASE ||
- c->id >= V4L2_CID_LASTP1)
- return -EINVAL;
- if (c->id == V4L2_CID_AUDIO_MUTE) {
- ctrl = ctrl_by_id(c->id);
- *c = *ctrl;
- } else
- *c = no_ctrl;
- return 0;
-}
-
static const struct v4l2_file_operations video_fops =
{
.owner = THIS_MODULE,
@@ -2387,9 +2087,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_enum_input = saa7134_enum_input,
.vidioc_g_input = saa7134_g_input,
.vidioc_s_input = saa7134_s_input,
- .vidioc_queryctrl = saa7134_queryctrl,
- .vidioc_g_ctrl = saa7134_g_ctrl,
- .vidioc_s_ctrl = saa7134_s_ctrl,
.vidioc_streamon = saa7134_streamon,
.vidioc_streamoff = saa7134_streamoff,
.vidioc_g_tuner = saa7134_g_tuner,
@@ -2405,6 +2102,9 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_g_register = vidioc_g_register,
.vidioc_s_register = vidioc_s_register,
#endif
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static const struct v4l2_file_operations radio_fops = {
@@ -2419,16 +2119,11 @@ static const struct v4l2_file_operations radio_fops = {
static const struct v4l2_ioctl_ops radio_ioctl_ops = {
.vidioc_querycap = saa7134_querycap,
.vidioc_g_tuner = radio_g_tuner,
- .vidioc_enum_input = radio_enum_input,
.vidioc_s_tuner = radio_s_tuner,
- .vidioc_s_input = radio_s_input,
- .vidioc_s_std = radio_s_std,
- .vidioc_queryctrl = radio_queryctrl,
- .vidioc_g_input = radio_g_input,
- .vidioc_g_ctrl = saa7134_g_ctrl,
- .vidioc_s_ctrl = saa7134_s_ctrl,
.vidioc_g_frequency = saa7134_g_frequency,
.vidioc_s_frequency = saa7134_s_frequency,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/* ----------------------------------------------------------- */
@@ -2447,8 +2142,55 @@ struct video_device saa7134_radio_template = {
.ioctl_ops = &radio_ioctl_ops,
};
+static const struct v4l2_ctrl_ops saa7134_ctrl_ops = {
+ .s_ctrl = saa7134_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config saa7134_ctrl_invert = {
+ .ops = &saa7134_ctrl_ops,
+ .id = V4L2_CID_PRIVATE_INVERT,
+ .name = "Invert",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config saa7134_ctrl_y_odd = {
+ .ops = &saa7134_ctrl_ops,
+ .id = V4L2_CID_PRIVATE_Y_ODD,
+ .name = "Y Offset Odd Field",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 128,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config saa7134_ctrl_y_even = {
+ .ops = &saa7134_ctrl_ops,
+ .id = V4L2_CID_PRIVATE_Y_EVEN,
+ .name = "Y Offset Even Field",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 128,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config saa7134_ctrl_automute = {
+ .ops = &saa7134_ctrl_ops,
+ .id = V4L2_CID_PRIVATE_AUTOMUTE,
+ .name = "Automute",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 1,
+};
+
int saa7134_video_init1(struct saa7134_dev *dev)
{
+ struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler;
+
/* sanitycheck insmod options */
if (gbuffers < 2 || gbuffers > VIDEO_MAX_FRAME)
gbuffers = 2;
@@ -2456,17 +2198,38 @@ int saa7134_video_init1(struct saa7134_dev *dev)
gbufsize = gbufsize_max;
gbufsize = (gbufsize + PAGE_SIZE - 1) & PAGE_MASK;
- /* put some sensible defaults into the data structures ... */
- dev->ctl_bright = ctrl_by_id(V4L2_CID_BRIGHTNESS)->default_value;
- dev->ctl_contrast = ctrl_by_id(V4L2_CID_CONTRAST)->default_value;
- dev->ctl_hue = ctrl_by_id(V4L2_CID_HUE)->default_value;
- dev->ctl_saturation = ctrl_by_id(V4L2_CID_SATURATION)->default_value;
- dev->ctl_volume = ctrl_by_id(V4L2_CID_AUDIO_VOLUME)->default_value;
- dev->ctl_mute = 1; // ctrl_by_id(V4L2_CID_AUDIO_MUTE)->default_value;
- dev->ctl_invert = ctrl_by_id(V4L2_CID_PRIVATE_INVERT)->default_value;
- dev->ctl_automute = ctrl_by_id(V4L2_CID_PRIVATE_AUTOMUTE)->default_value;
-
- if (dev->tda9887_conf && dev->ctl_automute)
+ v4l2_ctrl_handler_init(hdl, 11);
+ v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 127, 1, 68);
+ v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 127, 1, 64);
+ v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+ V4L2_CID_HUE, -128, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, -15, 15, 1, 0);
+ v4l2_ctrl_new_custom(hdl, &saa7134_ctrl_invert, NULL);
+ v4l2_ctrl_new_custom(hdl, &saa7134_ctrl_y_odd, NULL);
+ v4l2_ctrl_new_custom(hdl, &saa7134_ctrl_y_even, NULL);
+ v4l2_ctrl_new_custom(hdl, &saa7134_ctrl_automute, NULL);
+ if (hdl->error)
+ return hdl->error;
+ if (card_has_radio(dev)) {
+ hdl = &dev->radio_ctrl_handler;
+ v4l2_ctrl_handler_init(hdl, 2);
+ v4l2_ctrl_add_handler(hdl, &dev->ctrl_handler,
+ v4l2_ctrl_radio_filter);
+ if (hdl->error)
+ return hdl->error;
+ }
+ dev->ctl_mute = 1;
+
+ if (dev->tda9887_conf && saa7134_ctrl_automute.def)
dev->tda9887_conf |= TDA9887_AUTOMUTE;
dev->automute = 0;
@@ -2489,9 +2252,34 @@ int saa7134_video_init1(struct saa7134_dev *dev)
if (saa7134_boards[dev->board].video_out)
saa7134_videoport_init(dev);
+ videobuf_queue_sg_init(&dev->cap, &video_qops,
+ &dev->pci->dev, &dev->slock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ V4L2_FIELD_INTERLACED,
+ sizeof(struct saa7134_buf),
+ dev, NULL);
+ videobuf_queue_sg_init(&dev->vbi, &saa7134_vbi_qops,
+ &dev->pci->dev, &dev->slock,
+ V4L2_BUF_TYPE_VBI_CAPTURE,
+ V4L2_FIELD_SEQ_TB,
+ sizeof(struct saa7134_buf),
+ dev, NULL);
+ saa7134_pgtable_alloc(dev->pci, &dev->pt_cap);
+ saa7134_pgtable_alloc(dev->pci, &dev->pt_vbi);
+
return 0;
}
+void saa7134_video_fini(struct saa7134_dev *dev)
+{
+ /* free stuff */
+ saa7134_pgtable_free(dev->pci, &dev->pt_cap);
+ saa7134_pgtable_free(dev->pci, &dev->pt_vbi);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ if (card_has_radio(dev))
+ v4l2_ctrl_handler_free(&dev->radio_ctrl_handler);
+}
+
int saa7134_videoport_init(struct saa7134_dev *dev)
{
/* enable video output */
@@ -2533,6 +2321,7 @@ int saa7134_video_init2(struct saa7134_dev *dev)
/* init video hw */
set_tvnorm(dev,&tvnorms[0]);
video_mux(dev,0);
+ v4l2_ctrl_handler_setup(&dev->ctrl_handler);
saa7134_tvaudio_setmute(dev);
saa7134_tvaudio_setvolume(dev,dev->ctl_volume);
return 0;
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 8d1453a48014..2474e848f2c0 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -37,6 +37,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
#include <media/tuner.h>
#include <media/rc-core.h>
#include <media/ir-kbd-i2c.h>
@@ -410,12 +411,18 @@ struct saa7134_board {
#define card(dev) (saa7134_boards[dev->board])
#define card_in(dev,n) (saa7134_boards[dev->board].inputs[n])
+#define V4L2_CID_PRIVATE_INVERT (V4L2_CID_USER_SAA7134_BASE + 0)
+#define V4L2_CID_PRIVATE_Y_ODD (V4L2_CID_USER_SAA7134_BASE + 1)
+#define V4L2_CID_PRIVATE_Y_EVEN (V4L2_CID_USER_SAA7134_BASE + 2)
+#define V4L2_CID_PRIVATE_AUTOMUTE (V4L2_CID_USER_SAA7134_BASE + 3)
+
/* ----------------------------------------------------------- */
/* device / file handle status */
#define RESOURCE_OVERLAY 1
#define RESOURCE_VIDEO 2
#define RESOURCE_VBI 4
+#define RESOURCE_EMPRESS 8
#define INTERLACE_AUTO 0
#define INTERLACE_ON 1
@@ -470,16 +477,8 @@ struct saa7134_dmaqueue {
/* video filehandle status */
struct saa7134_fh {
struct v4l2_fh fh;
- struct saa7134_dev *dev;
+ bool is_empress;
unsigned int resources;
-
- /* video capture */
- struct videobuf_queue cap;
- struct saa7134_pgtable pt_cap;
-
- /* vbi capture */
- struct videobuf_queue vbi;
- struct saa7134_pgtable pt_vbi;
};
/* dmasound dsp status */
@@ -589,7 +588,11 @@ struct saa7134_dev {
/* video+ts+vbi capture */
struct saa7134_dmaqueue video_q;
+ struct videobuf_queue cap;
+ struct saa7134_pgtable pt_cap;
struct saa7134_dmaqueue vbi_q;
+ struct videobuf_queue vbi;
+ struct saa7134_pgtable pt_vbi;
unsigned int video_fieldcount;
unsigned int vbi_fieldcount;
struct saa7134_format *fmt;
@@ -599,6 +602,7 @@ struct saa7134_dev {
/* various v4l controls */
struct saa7134_tvnorm *tvnorm; /* video */
struct saa7134_tvaudio *tvaudio;
+ struct v4l2_ctrl_handler ctrl_handler;
unsigned int ctl_input;
int ctl_bright;
int ctl_contrast;
@@ -626,6 +630,7 @@ struct saa7134_dev {
int last_carrier;
int nosignal;
unsigned int insuspend;
+ struct v4l2_ctrl_handler radio_ctrl_handler;
/* I2C keyboard data */
struct IR_i2c_init_data init_data;
@@ -638,10 +643,11 @@ struct saa7134_dev {
/* SAA7134_MPEG_EMPRESS only */
struct video_device *empress_dev;
+ struct v4l2_subdev *empress_sd;
struct videobuf_queue empress_tsq;
- atomic_t empress_users;
struct work_struct empress_workqueue;
int empress_started;
+ struct v4l2_ctrl_handler empress_ctrl_handler;
#if IS_ENABLED(CONFIG_VIDEO_SAA7134_DVB)
/* SAA7134_MPEG_DVB only */
@@ -699,6 +705,16 @@ struct saa7134_dev {
_rc; \
})
+static inline int res_check(struct saa7134_fh *fh, unsigned int bit)
+{
+ return fh->resources & bit;
+}
+
+static inline int res_locked(struct saa7134_dev *dev, unsigned int bit)
+{
+ return dev->resources & bit;
+}
+
/* ----------------------------------------------------------- */
/* saa7134-core.c */
@@ -761,10 +777,31 @@ extern unsigned int video_debug;
extern struct video_device saa7134_video_template;
extern struct video_device saa7134_radio_template;
-int saa7134_s_ctrl_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, struct v4l2_control *c);
-int saa7134_g_ctrl_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, struct v4l2_control *c);
-int saa7134_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *c);
-int saa7134_s_std_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, v4l2_std_id id);
+int saa7134_s_std(struct file *file, void *priv, v4l2_std_id id);
+int saa7134_g_std(struct file *file, void *priv, v4l2_std_id *id);
+int saa7134_enum_input(struct file *file, void *priv, struct v4l2_input *i);
+int saa7134_g_input(struct file *file, void *priv, unsigned int *i);
+int saa7134_s_input(struct file *file, void *priv, unsigned int i);
+int saa7134_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap);
+int saa7134_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t);
+int saa7134_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *t);
+int saa7134_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f);
+int saa7134_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *f);
+int saa7134_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p);
+int saa7134_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *b);
+int saa7134_qbuf(struct file *file, void *priv, struct v4l2_buffer *b);
+int saa7134_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b);
+int saa7134_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type);
+int saa7134_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type);
int saa7134_videoport_init(struct saa7134_dev *dev);
void saa7134_set_tvnorm_hw(struct saa7134_dev *dev);
@@ -773,6 +810,7 @@ int saa7134_video_init1(struct saa7134_dev *dev);
int saa7134_video_init2(struct saa7134_dev *dev);
void saa7134_irq_video_signalchange(struct saa7134_dev *dev);
void saa7134_irq_video_done(struct saa7134_dev *dev, unsigned long status);
+void saa7134_video_fini(struct saa7134_dev *dev);
/* ----------------------------------------------------------- */
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 77edc113e485..e5cfb6cfa18d 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -1303,7 +1303,7 @@ static int sta2x11_vip_resume(struct pci_dev *pdev)
#endif
-static DEFINE_PCI_DEVICE_TABLE(sta2x11_vip_pci_tbl) = {
+static const struct pci_device_id sta2x11_vip_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_VIP)},
{0,}
};
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index d7f0249e4050..b2a4403940c5 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -36,7 +36,8 @@ source "drivers/media/platform/blackfin/Kconfig"
config VIDEO_SH_VOU
tristate "SuperH VOU video output driver"
depends on MEDIA_CAMERA_SUPPORT
- depends on VIDEO_DEV && ARCH_SHMOBILE && I2C
+ depends on VIDEO_DEV && I2C
+ depends on ARCH_SHMOBILE || COMPILE_TEST
select VIDEOBUF_DMA_CONTIG
help
Support for the Video Output Unit (VOU) on SuperH SoCs.
@@ -90,13 +91,6 @@ config VIDEO_M32R_AR_M64278
To compile this driver as a module, choose M here: the
module will be called arv.
-config VIDEO_OMAP2
- tristate "OMAP2 Camera Capture Interface driver"
- depends on VIDEO_DEV && ARCH_OMAP2 && VIDEO_V4L2_INT_DEVICE
- select VIDEOBUF_DMA_SG
- ---help---
- This is a v4l2 driver for the TI OMAP2 camera capture interface
-
config VIDEO_OMAP3
tristate "OMAP 3 Camera support"
depends on OMAP_IOVMM && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 1348ba1faf92..e5269da91906 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -2,8 +2,6 @@
# Makefile for the video capture/playback device drivers.
#
-omap2cam-objs := omap24xxcam.o omap24xxcam-dma.o
-
obj-$(CONFIG_VIDEO_VINO) += indycam.o
obj-$(CONFIG_VIDEO_VINO) += vino.o
@@ -14,7 +12,6 @@ obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o
obj-$(CONFIG_VIDEO_CAFE_CCIC) += marvell-ccic/
obj-$(CONFIG_VIDEO_MMP_CAMERA) += marvell-ccic/
-obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o
obj-$(CONFIG_VIDEO_OMAP3) += omap3isp/
obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index eac472b5ae83..b02aba488826 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -347,7 +347,7 @@ static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
/* If buffer queue is empty, return error */
if (list_empty(&layer->dma_queue)) {
v4l2_err(&vpbe_dev->v4l2_dev, "buffer queue is empty\n");
- return -EINVAL;
+ return -ENOBUFS;
}
/* Get the next frame from the buffer queue */
layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 52ac5e6c8625..735ec47601a9 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -277,7 +277,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
if (list_empty(&common->dma_queue)) {
spin_unlock_irqrestore(&common->irqlock, flags);
vpif_dbg(1, debug, "buffer queue is empty\n");
- return -EIO;
+ return -ENOBUFS;
}
/* Get the next frame from the buffer queue */
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index c31bcf129a5d..9d115cdc6bdb 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -239,7 +239,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
if (list_empty(&common->dma_queue)) {
spin_unlock_irqrestore(&common->irqlock, flags);
vpif_err("buffer queue is empty\n");
- return -EIO;
+ return -ENOBUFS;
}
/* Get the next frame from the buffer queue */
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
index d2d3b4b61435..01ed1ecdff7e 100644
--- a/drivers/media/platform/exynos4-is/Kconfig
+++ b/drivers/media/platform/exynos4-is/Kconfig
@@ -1,7 +1,7 @@
config VIDEO_SAMSUNG_EXYNOS4_IS
bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && PM_RUNTIME
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on (PLAT_S5P || ARCH_EXYNOS)
help
Say Y here to enable camera host interface devices for
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index fb27ff7e1e07..8a712ca91d11 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -549,7 +549,7 @@ static int fimc_capture_release(struct file *file)
vc->streaming = false;
}
- ret = vb2_fop_release(file);
+ ret = _vb2_fop_release(file, NULL);
if (close) {
clear_bit(ST_CAPT_BUSY, &fimc->state);
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index f7915695c907..da2fc86cc524 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -998,41 +998,46 @@ static int fimc_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, res->start, fimc_irq_handler,
0, dev_name(dev), fimc);
- if (ret) {
+ if (ret < 0) {
dev_err(dev, "failed to install irq (%d)\n", ret);
- goto err_clk;
+ goto err_sclk;
}
ret = fimc_initialize_capture_subdev(fimc);
- if (ret)
- goto err_clk;
+ if (ret < 0)
+ goto err_sclk;
platform_set_drvdata(pdev, fimc);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
- goto err_sd;
+
+ if (!pm_runtime_enabled(dev)) {
+ ret = clk_enable(fimc->clock[CLK_GATE]);
+ if (ret < 0)
+ goto err_sd;
+ }
+
/* Initialize contiguous memory allocator */
fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev);
if (IS_ERR(fimc->alloc_ctx)) {
ret = PTR_ERR(fimc->alloc_ctx);
- goto err_pm;
+ goto err_gclk;
}
dev_dbg(dev, "FIMC.%d registered successfully\n", fimc->id);
-
- pm_runtime_put(dev);
return 0;
-err_pm:
- pm_runtime_put(dev);
+
+err_gclk:
+ if (!pm_runtime_enabled(dev))
+ clk_disable(fimc->clock[CLK_GATE]);
err_sd:
fimc_unregister_capture_subdev(fimc);
-err_clk:
+err_sclk:
clk_disable(fimc->clock[CLK_BUS]);
fimc_clk_put(fimc);
return ret;
}
+#ifdef CONFIG_PM_RUNTIME
static int fimc_runtime_resume(struct device *dev)
{
struct fimc_dev *fimc = dev_get_drvdata(dev);
@@ -1065,6 +1070,7 @@ static int fimc_runtime_suspend(struct device *dev)
dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
return ret;
}
+#endif
#ifdef CONFIG_PM_SLEEP
static int fimc_resume(struct device *dev)
diff --git a/drivers/media/platform/exynos4-is/fimc-core.h b/drivers/media/platform/exynos4-is/fimc-core.h
index 3d376faec777..1790fb4e32ea 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.h
+++ b/drivers/media/platform/exynos4-is/fimc-core.h
@@ -481,7 +481,6 @@ struct fimc_ctrls {
* @flags: additional flags for image conversion
* @state: flags to keep track of user configuration
* @fimc_dev: the FIMC device this context applies to
- * @m2m_ctx: memory-to-memory device context
* @fh: v4l2 file handle
* @ctrls: v4l2 controls structure
*/
@@ -502,7 +501,6 @@ struct fimc_ctx {
u32 flags;
u32 state;
struct fimc_dev *fimc_dev;
- struct v4l2_m2m_ctx *m2m_ctx;
struct v4l2_fh fh;
struct fimc_ctrls ctrls;
};
diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.c b/drivers/media/platform/exynos4-is/fimc-is-regs.c
index f758e2694fa3..2628733c4e10 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-regs.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-regs.c
@@ -33,47 +33,23 @@ void fimc_is_hw_set_intgr0_gd0(struct fimc_is *is)
mcuctl_write(INTGR0_INTGD(0), is, MCUCTL_REG_INTGR0);
}
-int fimc_is_hw_wait_intsr0_intsd0(struct fimc_is *is)
-{
- unsigned int timeout = 2000;
- u32 cfg, status;
-
- cfg = mcuctl_read(is, MCUCTL_REG_INTSR0);
- status = INTSR0_GET_INTSD(0, cfg);
-
- while (status) {
- cfg = mcuctl_read(is, MCUCTL_REG_INTSR0);
- status = INTSR0_GET_INTSD(0, cfg);
- if (timeout == 0) {
- dev_warn(&is->pdev->dev, "%s timeout\n",
- __func__);
- return -ETIME;
- }
- timeout--;
- udelay(1);
- }
- return 0;
-}
-
int fimc_is_hw_wait_intmsr0_intmsd0(struct fimc_is *is)
{
unsigned int timeout = 2000;
u32 cfg, status;
- cfg = mcuctl_read(is, MCUCTL_REG_INTMSR0);
- status = INTMSR0_GET_INTMSD(0, cfg);
-
- while (status) {
+ do {
cfg = mcuctl_read(is, MCUCTL_REG_INTMSR0);
status = INTMSR0_GET_INTMSD(0, cfg);
- if (timeout == 0) {
+
+ if (--timeout == 0) {
dev_warn(&is->pdev->dev, "%s timeout\n",
__func__);
- return -ETIME;
+ return -ETIMEDOUT;
}
- timeout--;
udelay(1);
- }
+ } while (status != 0);
+
return 0;
}
diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.h b/drivers/media/platform/exynos4-is/fimc-is-regs.h
index 5fa2fda46742..1d9d4ffc6ad5 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-regs.h
+++ b/drivers/media/platform/exynos4-is/fimc-is-regs.h
@@ -145,7 +145,6 @@ void fimc_is_fw_clear_irq2(struct fimc_is *is);
int fimc_is_hw_get_params(struct fimc_is *is, unsigned int num);
void fimc_is_hw_set_intgr0_gd0(struct fimc_is *is);
-int fimc_is_hw_wait_intsr0_intsd0(struct fimc_is *is);
int fimc_is_hw_wait_intmsr0_intmsd0(struct fimc_is *is);
void fimc_is_hw_set_sensor_num(struct fimc_is *is);
void fimc_is_hw_stream_on(struct fimc_is *is);
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 9770fa98d6a1..13a4228952e3 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -781,6 +781,9 @@ static int fimc_is_debugfs_create(struct fimc_is *is)
return is->debugfs_entry == NULL ? -EIO : 0;
}
+static int fimc_is_runtime_resume(struct device *dev);
+static int fimc_is_runtime_suspend(struct device *dev);
+
static int fimc_is_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -835,14 +838,20 @@ static int fimc_is_probe(struct platform_device *pdev)
}
pm_runtime_enable(dev);
+ if (!pm_runtime_enabled(dev)) {
+ ret = fimc_is_runtime_resume(dev);
+ if (ret < 0)
+ goto err_irq;
+ }
+
ret = pm_runtime_get_sync(dev);
if (ret < 0)
- goto err_irq;
+ goto err_pm;
is->alloc_ctx = vb2_dma_contig_init_ctx(dev);
if (IS_ERR(is->alloc_ctx)) {
ret = PTR_ERR(is->alloc_ctx);
- goto err_irq;
+ goto err_pm;
}
/*
* Register FIMC-IS V4L2 subdevs to this driver. The video nodes
@@ -867,10 +876,13 @@ static int fimc_is_probe(struct platform_device *pdev)
err_dfs:
fimc_is_debugfs_remove(is);
-err_vb:
- vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
err_sd:
fimc_is_unregister_subdevs(is);
+err_vb:
+ vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
+err_pm:
+ if (!pm_runtime_enabled(dev))
+ fimc_is_runtime_suspend(dev);
err_irq:
free_irq(is->irq, is);
err_clk:
@@ -919,10 +931,13 @@ static int fimc_is_suspend(struct device *dev)
static int fimc_is_remove(struct platform_device *pdev)
{
- struct fimc_is *is = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct fimc_is *is = dev_get_drvdata(dev);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ if (!pm_runtime_status_suspended(dev))
+ fimc_is_runtime_suspend(dev);
free_irq(is->irq, is);
fimc_is_unregister_subdevs(is);
vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
diff --git a/drivers/media/platform/exynos4-is/fimc-lite-reg.c b/drivers/media/platform/exynos4-is/fimc-lite-reg.c
index 72a343e3b5e8..d0dc7ee04452 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite-reg.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite-reg.c
@@ -133,7 +133,7 @@ void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f)
int i = ARRAY_SIZE(src_pixfmt_map);
u32 cfg;
- while (--i >= 0) {
+ while (--i) {
if (src_pixfmt_map[i][0] == pixelcode)
break;
}
@@ -240,7 +240,7 @@ static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT);
int i = ARRAY_SIZE(pixcode);
- while (--i >= 0)
+ while (--i)
if (pixcode[i][0] == f->fmt->mbus_code)
break;
cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK;
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index e5798f70d149..779ec3cd259d 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -546,7 +546,7 @@ static int fimc_lite_release(struct file *file)
mutex_unlock(&entity->parent->graph_mutex);
}
- vb2_fop_release(file);
+ _vb2_fop_release(file, NULL);
pm_runtime_put(&fimc->pdev->dev);
clear_bit(ST_FLITE_SUSPENDED, &fimc->state);
@@ -1549,42 +1549,46 @@ static int fimc_lite_probe(struct platform_device *pdev)
0, dev_name(dev), fimc);
if (ret) {
dev_err(dev, "Failed to install irq (%d)\n", ret);
- goto err_clk;
+ goto err_clk_put;
}
/* The video node will be created within the subdev's registered() op */
ret = fimc_lite_create_capture_subdev(fimc);
if (ret)
- goto err_clk;
+ goto err_clk_put;
platform_set_drvdata(pdev, fimc);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
- goto err_sd;
+
+ if (!pm_runtime_enabled(dev)) {
+ ret = clk_enable(fimc->clock);
+ if (ret < 0)
+ goto err_sd;
+ }
fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev);
if (IS_ERR(fimc->alloc_ctx)) {
ret = PTR_ERR(fimc->alloc_ctx);
- goto err_pm;
+ goto err_clk_dis;
}
- pm_runtime_put(dev);
-
fimc_lite_set_default_config(fimc);
dev_dbg(dev, "FIMC-LITE.%d registered successfully\n",
fimc->index);
return 0;
-err_pm:
- pm_runtime_put(dev);
+
+err_clk_dis:
+ if (!pm_runtime_enabled(dev))
+ clk_disable(fimc->clock);
err_sd:
fimc_lite_unregister_capture_subdev(fimc);
-err_clk:
+err_clk_put:
fimc_lite_clk_put(fimc);
return ret;
}
+#ifdef CONFIG_PM_RUNTIME
static int fimc_lite_runtime_resume(struct device *dev)
{
struct fimc_lite *fimc = dev_get_drvdata(dev);
@@ -1600,6 +1604,7 @@ static int fimc_lite_runtime_suspend(struct device *dev)
clk_disable(fimc->clock);
return 0;
}
+#endif
#ifdef CONFIG_PM_SLEEP
static int fimc_lite_resume(struct device *dev)
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index 8d33b68c76ba..9da95bd14820 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -44,17 +44,17 @@ void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
{
struct vb2_buffer *src_vb, *dst_vb;
- if (!ctx || !ctx->m2m_ctx)
+ if (!ctx || !ctx->fh.m2m_ctx)
return;
- src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
if (src_vb && dst_vb) {
v4l2_m2m_buf_done(src_vb, vb_state);
v4l2_m2m_buf_done(dst_vb, vb_state);
v4l2_m2m_job_finish(ctx->fimc_dev->m2m.m2m_dev,
- ctx->m2m_ctx);
+ ctx->fh.m2m_ctx);
}
}
@@ -123,12 +123,12 @@ static void fimc_device_run(void *priv)
fimc_prepare_dma_offset(ctx, df);
}
- src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
ret = fimc_prepare_addr(ctx, src_vb, sf, &sf->paddr);
if (ret)
goto dma_unlock;
- dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
ret = fimc_prepare_addr(ctx, dst_vb, df, &df->paddr);
if (ret)
goto dma_unlock;
@@ -219,31 +219,15 @@ static int fimc_buf_prepare(struct vb2_buffer *vb)
static void fimc_buf_queue(struct vb2_buffer *vb)
{
struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
-
- dbg("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
-
- if (ctx->m2m_ctx)
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
-}
-
-static void fimc_lock(struct vb2_queue *vq)
-{
- struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
- mutex_lock(&ctx->fimc_dev->lock);
-}
-
-static void fimc_unlock(struct vb2_queue *vq)
-{
- struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
- mutex_unlock(&ctx->fimc_dev->lock);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
}
static struct vb2_ops fimc_qops = {
.queue_setup = fimc_queue_setup,
.buf_prepare = fimc_buf_prepare,
.buf_queue = fimc_buf_queue,
- .wait_prepare = fimc_unlock,
- .wait_finish = fimc_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.stop_streaming = stop_streaming,
.start_streaming = start_streaming,
};
@@ -385,7 +369,7 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
if (ret)
return ret;
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (vb2_is_busy(vq)) {
v4l2_err(&fimc->m2m.vfd, "queue (%d) busy\n", f->type);
@@ -410,56 +394,6 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
return 0;
}
-static int fimc_m2m_reqbufs(struct file *file, void *fh,
- struct v4l2_requestbuffers *reqbufs)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
-}
-
-static int fimc_m2m_querybuf(struct file *file, void *fh,
- struct v4l2_buffer *buf)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
-}
-
-static int fimc_m2m_qbuf(struct file *file, void *fh,
- struct v4l2_buffer *buf)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int fimc_m2m_dqbuf(struct file *file, void *fh,
- struct v4l2_buffer *buf)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int fimc_m2m_expbuf(struct file *file, void *fh,
- struct v4l2_exportbuffer *eb)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
-}
-
-
-static int fimc_m2m_streamon(struct file *file, void *fh,
- enum v4l2_buf_type type)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
-}
-
-static int fimc_m2m_streamoff(struct file *file, void *fh,
- enum v4l2_buf_type type)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
-}
-
static int fimc_m2m_cropcap(struct file *file, void *fh,
struct v4l2_cropcap *cr)
{
@@ -598,13 +532,13 @@ static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
.vidioc_try_fmt_vid_out_mplane = fimc_m2m_try_fmt_mplane,
.vidioc_s_fmt_vid_cap_mplane = fimc_m2m_s_fmt_mplane,
.vidioc_s_fmt_vid_out_mplane = fimc_m2m_s_fmt_mplane,
- .vidioc_reqbufs = fimc_m2m_reqbufs,
- .vidioc_querybuf = fimc_m2m_querybuf,
- .vidioc_qbuf = fimc_m2m_qbuf,
- .vidioc_dqbuf = fimc_m2m_dqbuf,
- .vidioc_expbuf = fimc_m2m_expbuf,
- .vidioc_streamon = fimc_m2m_streamon,
- .vidioc_streamoff = fimc_m2m_streamoff,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_g_crop = fimc_m2m_g_crop,
.vidioc_s_crop = fimc_m2m_s_crop,
.vidioc_cropcap = fimc_m2m_cropcap
@@ -624,6 +558,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->fimc_dev->lock;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -636,6 +571,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->fimc_dev->lock;
return vb2_queue_init(dst_vq);
}
@@ -708,9 +644,9 @@ static int fimc_m2m_open(struct file *file)
ctx->out_path = FIMC_IO_DMA;
ctx->scaler.enabled = 1;
- ctx->m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
- if (IS_ERR(ctx->m2m_ctx)) {
- ret = PTR_ERR(ctx->m2m_ctx);
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
goto error_c;
}
@@ -725,7 +661,7 @@ static int fimc_m2m_open(struct file *file)
return 0;
error_m2m_ctx:
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
error_c:
fimc_ctrls_delete(ctx);
error_fh:
@@ -747,7 +683,7 @@ static int fimc_m2m_release(struct file *file)
mutex_lock(&fimc->lock);
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
fimc_ctrls_delete(ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
@@ -760,45 +696,13 @@ static int fimc_m2m_release(struct file *file)
return 0;
}
-static unsigned int fimc_m2m_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
- struct fimc_dev *fimc = ctx->fimc_dev;
- int ret;
-
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
-
- ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
- mutex_unlock(&fimc->lock);
-
- return ret;
-}
-
-
-static int fimc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
- struct fimc_dev *fimc = ctx->fimc_dev;
- int ret;
-
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
-
- ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
- mutex_unlock(&fimc->lock);
-
- return ret;
-}
-
static const struct v4l2_file_operations fimc_m2m_fops = {
.owner = THIS_MODULE,
.open = fimc_m2m_open,
.release = fimc_m2m_release,
- .poll = fimc_m2m_poll,
+ .poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
- .mmap = fimc_m2m_mmap,
+ .mmap = v4l2_m2m_fop_mmap,
};
static struct v4l2_m2m_ops m2m_ops = {
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index 9fc2af6a0446..f3c3591fdc5d 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -91,7 +91,7 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)");
#define S5PCSIS_INTSRC_ODD_BEFORE (1 << 29)
#define S5PCSIS_INTSRC_ODD_AFTER (1 << 28)
#define S5PCSIS_INTSRC_ODD (0x3 << 28)
-#define S5PCSIS_INTSRC_NON_IMAGE_DATA (0xff << 28)
+#define S5PCSIS_INTSRC_NON_IMAGE_DATA (0xf << 28)
#define S5PCSIS_INTSRC_FRAME_START (1 << 27)
#define S5PCSIS_INTSRC_FRAME_END (1 << 26)
#define S5PCSIS_INTSRC_ERR_SOT_HS (0xf << 12)
@@ -790,6 +790,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
#define s5pcsis_parse_dt(pdev, state) (-ENOSYS)
#endif
+static int s5pcsis_pm_resume(struct device *dev, bool runtime);
static const struct of_device_id s5pcsis_of_match[];
static int s5pcsis_probe(struct platform_device *pdev)
@@ -902,13 +903,21 @@ static int s5pcsis_probe(struct platform_device *pdev)
/* .. and a pointer to the subdev. */
platform_set_drvdata(pdev, &state->sd);
memcpy(state->events, s5pcsis_events, sizeof(state->events));
+
pm_runtime_enable(dev);
+ if (!pm_runtime_enabled(dev)) {
+ ret = s5pcsis_pm_resume(dev, true);
+ if (ret < 0)
+ goto e_m_ent;
+ }
dev_info(&pdev->dev, "lanes: %d, hs_settle: %d, wclk: %d, freq: %u\n",
state->num_lanes, state->hs_settle, state->wclk_ext,
state->clk_frequency);
return 0;
+e_m_ent:
+ media_entity_cleanup(&state->sd.entity);
e_clkdis:
clk_disable(state->clock[CSIS_CLK_MUX]);
e_clkput:
@@ -1014,7 +1023,7 @@ static int s5pcsis_remove(struct platform_device *pdev)
struct csis_state *state = sd_to_csis_state(sd);
pm_runtime_disable(&pdev->dev);
- s5pcsis_pm_suspend(&pdev->dev, false);
+ s5pcsis_pm_suspend(&pdev->dev, true);
clk_disable(state->clock[CSIS_CLK_MUX]);
pm_runtime_set_suspended(&pdev->dev);
s5pcsis_clk_put(state);
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index 6a232239ee8c..dbf0ce38a8e7 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1580,7 +1580,7 @@ static int viu_of_probe(struct platform_device *op)
}
/* enable VIU clock */
- clk = devm_clk_get(&op->dev, "viu_clk");
+ clk = devm_clk_get(&op->dev, "ipg");
if (IS_ERR(clk)) {
dev_err(&op->dev, "failed to lookup the clock!\n");
ret = PTR_ERR(clk);
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 65cab70fefcb..6bb86b581a34 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -918,7 +918,7 @@ static int deinterlace_open(struct file *file)
return ret;
}
- ctx->xt = kzalloc(sizeof(struct dma_async_tx_descriptor) +
+ ctx->xt = kzalloc(sizeof(struct dma_interleaved_template) +
sizeof(struct data_chunk), GFP_KERNEL);
if (!ctx->xt) {
kfree(ctx);
diff --git a/drivers/media/platform/mem2mem_testdev.c b/drivers/media/platform/mem2mem_testdev.c
index 8df5975b700a..08e24379b794 100644
--- a/drivers/media/platform/mem2mem_testdev.c
+++ b/drivers/media/platform/mem2mem_testdev.c
@@ -177,8 +177,6 @@ struct m2mtest_ctx {
enum v4l2_colorspace colorspace;
- struct v4l2_m2m_ctx *m2m_ctx;
-
/* Source and destination queue data */
struct m2mtest_q_data q_data[2];
};
@@ -342,8 +340,8 @@ static int job_ready(void *priv)
{
struct m2mtest_ctx *ctx = priv;
- if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < ctx->translen
- || v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < ctx->translen) {
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen
+ || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen) {
dprintk(ctx->dev, "Not enough buffers available\n");
return 0;
}
@@ -359,21 +357,6 @@ static void job_abort(void *priv)
ctx->aborting = 1;
}
-static void m2mtest_lock(void *priv)
-{
- struct m2mtest_ctx *ctx = priv;
- struct m2mtest_dev *dev = ctx->dev;
- mutex_lock(&dev->dev_mutex);
-}
-
-static void m2mtest_unlock(void *priv)
-{
- struct m2mtest_ctx *ctx = priv;
- struct m2mtest_dev *dev = ctx->dev;
- mutex_unlock(&dev->dev_mutex);
-}
-
-
/* device_run() - prepares and starts the device
*
* This simulates all the immediate preparations required before starting
@@ -386,8 +369,8 @@ static void device_run(void *priv)
struct m2mtest_dev *dev = ctx->dev;
struct vb2_buffer *src_buf, *dst_buf;
- src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
device_process(ctx, src_buf, dst_buf);
@@ -409,8 +392,8 @@ static void device_isr(unsigned long priv)
return;
}
- src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
- dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+ src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
curr_ctx->num_processed++;
@@ -423,7 +406,7 @@ static void device_isr(unsigned long priv)
|| curr_ctx->aborting) {
dprintk(curr_ctx->dev, "Finishing transaction\n");
curr_ctx->num_processed = 0;
- v4l2_m2m_job_finish(m2mtest_dev->m2m_dev, curr_ctx->m2m_ctx);
+ v4l2_m2m_job_finish(m2mtest_dev->m2m_dev, curr_ctx->fh.m2m_ctx);
} else {
device_run(curr_ctx);
}
@@ -491,7 +474,7 @@ static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
struct vb2_queue *vq;
struct m2mtest_q_data *q_data;
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
@@ -594,7 +577,7 @@ static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
struct m2mtest_q_data *q_data;
struct vb2_queue *vq;
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
@@ -648,52 +631,6 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
return ret;
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *reqbufs)
-{
- struct m2mtest_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct m2mtest_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct m2mtest_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct m2mtest_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct m2mtest_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
-}
-
-static int vidioc_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct m2mtest_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
-}
-
static int m2mtest_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct m2mtest_ctx *ctx =
@@ -748,14 +685,14 @@ static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = {
.vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
.vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -818,27 +755,15 @@ static int m2mtest_buf_prepare(struct vb2_buffer *vb)
static void m2mtest_buf_queue(struct vb2_buffer *vb)
{
struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
-}
-
-static void m2mtest_wait_prepare(struct vb2_queue *q)
-{
- struct m2mtest_ctx *ctx = vb2_get_drv_priv(q);
- m2mtest_unlock(ctx);
-}
-
-static void m2mtest_wait_finish(struct vb2_queue *q)
-{
- struct m2mtest_ctx *ctx = vb2_get_drv_priv(q);
- m2mtest_lock(ctx);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
}
static struct vb2_ops m2mtest_qops = {
.queue_setup = m2mtest_queue_setup,
.buf_prepare = m2mtest_buf_prepare,
.buf_queue = m2mtest_buf_queue,
- .wait_prepare = m2mtest_wait_prepare,
- .wait_finish = m2mtest_wait_finish,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
@@ -853,6 +778,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
src_vq->ops = &m2mtest_qops;
src_vq->mem_ops = &vb2_vmalloc_memops;
src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->dev_mutex;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -865,6 +791,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
dst_vq->ops = &m2mtest_qops;
dst_vq->mem_ops = &vb2_vmalloc_memops;
dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->dev_mutex;
return vb2_queue_init(dst_vq);
}
@@ -936,10 +863,10 @@ static int m2mtest_open(struct file *file)
ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC];
ctx->colorspace = V4L2_COLORSPACE_REC709;
- ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
- if (IS_ERR(ctx->m2m_ctx)) {
- rc = PTR_ERR(ctx->m2m_ctx);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ rc = PTR_ERR(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(hdl);
kfree(ctx);
@@ -949,7 +876,8 @@ static int m2mtest_open(struct file *file)
v4l2_fh_add(&ctx->fh);
atomic_inc(&dev->num_inst);
- dprintk(dev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx);
+ dprintk(dev, "Created instance: %p, m2m_ctx: %p\n",
+ ctx, ctx->fh.m2m_ctx);
open_unlock:
mutex_unlock(&dev->dev_mutex);
@@ -967,7 +895,7 @@ static int m2mtest_release(struct file *file)
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
mutex_lock(&dev->dev_mutex);
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
mutex_unlock(&dev->dev_mutex);
kfree(ctx);
@@ -976,34 +904,13 @@ static int m2mtest_release(struct file *file)
return 0;
}
-static unsigned int m2mtest_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct m2mtest_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
-}
-
-static int m2mtest_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct m2mtest_dev *dev = video_drvdata(file);
- struct m2mtest_ctx *ctx = file2ctx(file);
- int res;
-
- if (mutex_lock_interruptible(&dev->dev_mutex))
- return -ERESTARTSYS;
- res = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
- mutex_unlock(&dev->dev_mutex);
- return res;
-}
-
static const struct v4l2_file_operations m2mtest_fops = {
.owner = THIS_MODULE,
.open = m2mtest_open,
.release = m2mtest_release,
- .poll = m2mtest_poll,
+ .poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
- .mmap = m2mtest_mmap,
+ .mmap = v4l2_m2m_fop_mmap,
};
static struct video_device m2mtest_videodev = {
@@ -1019,8 +926,6 @@ static struct v4l2_m2m_ops m2m_ops = {
.device_run = device_run,
.job_ready = job_ready,
.job_abort = job_abort,
- .lock = m2mtest_lock,
- .unlock = m2mtest_unlock,
};
static int m2mtest_probe(struct platform_device *pdev)
@@ -1133,4 +1038,3 @@ static int __init m2mtest_init(void)
module_init(m2mtest_init);
module_exit(m2mtest_exit);
-
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 561bce8ffb1b..5807185262fe 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -290,9 +290,11 @@ static int isp_xclk_init(struct isp_device *isp)
struct clk_init_data init;
unsigned int i;
+ for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i)
+ isp->xclks[i].clk = ERR_PTR(-EINVAL);
+
for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) {
struct isp_xclk *xclk = &isp->xclks[i];
- struct clk *clk;
xclk->isp = isp;
xclk->id = i == 0 ? ISP_XCLK_A : ISP_XCLK_B;
@@ -305,10 +307,15 @@ static int isp_xclk_init(struct isp_device *isp)
init.num_parents = 1;
xclk->hw.init = &init;
-
- clk = devm_clk_register(isp->dev, &xclk->hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ /*
+ * The first argument is NULL in order to avoid circular
+ * reference, as this driver takes reference on the
+ * sensor subdevice modules and the sensors would take
+ * reference on this module through clk_get().
+ */
+ xclk->clk = clk_register(NULL, &xclk->hw);
+ if (IS_ERR(xclk->clk))
+ return PTR_ERR(xclk->clk);
if (pdata->xclks[i].con_id == NULL &&
pdata->xclks[i].dev_id == NULL)
@@ -320,7 +327,7 @@ static int isp_xclk_init(struct isp_device *isp)
xclk->lookup->con_id = pdata->xclks[i].con_id;
xclk->lookup->dev_id = pdata->xclks[i].dev_id;
- xclk->lookup->clk = clk;
+ xclk->lookup->clk = xclk->clk;
clkdev_add(xclk->lookup);
}
@@ -335,6 +342,9 @@ static void isp_xclk_cleanup(struct isp_device *isp)
for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) {
struct isp_xclk *xclk = &isp->xclks[i];
+ if (!IS_ERR(xclk->clk))
+ clk_unregister(xclk->clk);
+
if (xclk->lookup)
clkdev_drop(xclk->lookup);
}
@@ -863,15 +873,12 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe,
unsigned long flags;
int ret;
- /* If the preview engine crashed it might not respond to read/write
- * operations on the L4 bus. This would result in a bus fault and a
- * kernel oops. Refuse to start streaming in that case. This check must
- * be performed before the loop below to avoid starting entities if the
- * pipeline won't start anyway (those entities would then likely fail to
- * stop, making the problem worse).
+ /* Refuse to start streaming if an entity included in the pipeline has
+ * crashed. This check must be performed before the loop below to avoid
+ * starting entities if the pipeline won't start anyway (those entities
+ * would then likely fail to stop, making the problem worse).
*/
- if ((pipe->entities & isp->crashed) &
- (1U << isp->isp_prev.subdev.entity.id))
+ if (pipe->entities & isp->crashed)
return -EIO;
spin_lock_irqsave(&pipe->lock, flags);
@@ -1004,13 +1011,23 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
else
ret = 0;
+ /* Handle stop failures. An entity that fails to stop can
+ * usually just be restarted. Flag the stop failure nonetheless
+ * to trigger an ISP reset the next time the device is released,
+ * just in case.
+ *
+ * The preview engine is a special case. A failure to stop can
+ * mean a hardware crash. When that happens the preview engine
+ * won't respond to read/write operations on the L4 bus anymore,
+ * resulting in a bus fault and a kernel oops next time it gets
+ * accessed. Mark it as crashed to prevent pipelines including
+ * it from being started.
+ */
if (ret) {
dev_info(isp->dev, "Unable to stop %s\n", subdev->name);
- /* If the entity failed to stopped, assume it has
- * crashed. Mark it as such, the ISP will be reset when
- * applications will release it.
- */
- isp->crashed |= 1U << subdev->entity.id;
+ isp->stop_failure = true;
+ if (subdev == &isp->isp_prev.subdev)
+ isp->crashed |= 1U << subdev->entity.id;
failure = -ETIMEDOUT;
}
}
@@ -1047,6 +1064,23 @@ int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe,
}
/*
+ * omap3isp_pipeline_cancel_stream - Cancel stream on a pipeline
+ * @pipe: ISP pipeline
+ *
+ * Cancelling a stream mark all buffers on all video nodes in the pipeline as
+ * erroneous and makes sure no new buffer can be queued. This function is called
+ * when a fatal error that prevents any further operation on the pipeline
+ * occurs.
+ */
+void omap3isp_pipeline_cancel_stream(struct isp_pipeline *pipe)
+{
+ if (pipe->input)
+ omap3isp_video_cancel_stream(pipe->input);
+ if (pipe->output)
+ omap3isp_video_cancel_stream(pipe->output);
+}
+
+/*
* isp_pipeline_resume - Resume streaming on a pipeline
* @pipe: ISP pipeline
*
@@ -1198,6 +1232,7 @@ static int isp_reset(struct isp_device *isp)
udelay(1);
}
+ isp->stop_failure = false;
isp->crashed = 0;
return 0;
}
@@ -1609,7 +1644,7 @@ void omap3isp_put(struct isp_device *isp)
/* Reset the ISP if an entity has failed to stop. This is the
* only way to recover from such conditions.
*/
- if (isp->crashed)
+ if (isp->crashed || isp->stop_failure)
isp_reset(isp);
isp_disable_clocks(isp);
}
@@ -2120,28 +2155,13 @@ static int isp_map_mem_resource(struct platform_device *pdev,
/* request the mem region for the camera registers */
mem = platform_get_resource(pdev, IORESOURCE_MEM, res);
- if (!mem) {
- dev_err(isp->dev, "no mem resource?\n");
- return -ENODEV;
- }
-
- if (!devm_request_mem_region(isp->dev, mem->start, resource_size(mem),
- pdev->name)) {
- dev_err(isp->dev,
- "cannot reserve camera register I/O region\n");
- return -ENODEV;
- }
- isp->mmio_base_phys[res] = mem->start;
- isp->mmio_size[res] = resource_size(mem);
/* map the region */
- isp->mmio_base[res] = devm_ioremap_nocache(isp->dev,
- isp->mmio_base_phys[res],
- isp->mmio_size[res]);
- if (!isp->mmio_base[res]) {
- dev_err(isp->dev, "cannot map camera register I/O region\n");
- return -ENODEV;
- }
+ isp->mmio_base[res] = devm_ioremap_resource(isp->dev, mem);
+ if (IS_ERR(isp->mmio_base[res]))
+ return PTR_ERR(isp->mmio_base[res]);
+
+ isp->mmio_base_phys[res] = mem->start;
return 0;
}
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index ce65d3ae1aa7..081f5ec5a663 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -135,6 +135,7 @@ struct isp_xclk {
struct isp_device *isp;
struct clk_hw hw;
struct clk_lookup *lookup;
+ struct clk *clk;
enum isp_xclk_id id;
spinlock_t lock; /* Protects enabled and divider */
@@ -151,9 +152,9 @@ struct isp_xclk {
* regions.
* @mmio_base_phys: Array with physical L4 bus addresses for ISP register
* regions.
- * @mmio_size: Array with ISP register regions size in bytes.
* @stat_lock: Spinlock for handling statistics
* @isp_mutex: Mutex for serializing requests to ISP.
+ * @stop_failure: Indicates that an entity failed to stop.
* @crashed: Bitmask of crashed entities (indexed by entity ID)
* @has_context: Context has been saved at least once and can be restored.
* @ref_count: Reference count for handling multiple ISP requests.
@@ -187,11 +188,11 @@ struct isp_device {
void __iomem *mmio_base[OMAP3_ISP_IOMEM_LAST];
unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST];
- resource_size_t mmio_size[OMAP3_ISP_IOMEM_LAST];
/* ISP Obj */
spinlock_t stat_lock; /* common lock for statistic drivers */
struct mutex isp_mutex; /* For handling ref_count field */
+ bool stop_failure;
u32 crashed;
int has_context;
int ref_count;
@@ -237,6 +238,7 @@ int omap3isp_module_sync_is_stopping(wait_queue_head_t *wait,
int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe,
enum isp_pipeline_stream_state state);
+void omap3isp_pipeline_cancel_stream(struct isp_pipeline *pipe);
void omap3isp_configure_bridge(struct isp_device *isp,
enum ccdc_input_entity input,
const struct isp_parallel_platform_data *pdata,
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 907a205da5a5..5db2c88b9ad8 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -1516,6 +1516,8 @@ static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc)
if (ccdc_sbl_wait_idle(ccdc, 1000)) {
dev_info(isp->dev, "CCDC won't become idle!\n");
+ isp->crashed |= 1U << ccdc->subdev.entity.id;
+ omap3isp_pipeline_cancel_stream(pipe);
goto done;
}
@@ -2484,7 +2486,8 @@ static int ccdc_init_entities(struct isp_ccdc_device *ccdc)
v4l2_set_subdevdata(sd, ccdc);
sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
- pads[CCDC_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[CCDC_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
pads[CCDC_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
pads[CCDC_PAD_SOURCE_OF].flags = MEDIA_PAD_FL_SOURCE;
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index e71651429dda..e84fe0543e47 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -1076,7 +1076,8 @@ static int ccp2_init_entities(struct isp_ccp2_device *ccp2)
v4l2_set_subdevdata(sd, ccp2);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- pads[CCP2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[CCP2_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
pads[CCP2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
me->ops = &ccp2_media_ops;
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index 6db245d84bbb..620560828a48 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -1245,7 +1245,8 @@ static int csi2_init_entities(struct isp_csi2_device *csi2)
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
pads[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
- pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
me->ops = &csi2_media_ops;
ret = media_entity_init(me, CSI2_PADS_NUM, pads, 0);
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index cd8831aebdeb..1c776c1186f1 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -2283,7 +2283,8 @@ static int preview_init_entities(struct isp_prev_device *prev)
v4l2_ctrl_handler_setup(&prev->ctrls);
sd->ctrl_handler = &prev->ctrls;
- pads[PREV_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[PREV_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
pads[PREV_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
me->ops = &preview_media_ops;
diff --git a/drivers/media/platform/omap3isp/ispqueue.c b/drivers/media/platform/omap3isp/ispqueue.c
index e15f01342058..5f0f8fab1d17 100644
--- a/drivers/media/platform/omap3isp/ispqueue.c
+++ b/drivers/media/platform/omap3isp/ispqueue.c
@@ -553,8 +553,10 @@ static void isp_video_buffer_query(struct isp_video_buffer *buf,
switch (buf->state) {
case ISP_BUF_STATE_ERROR:
vbuf->flags |= V4L2_BUF_FLAG_ERROR;
+ /* Fallthrough */
case ISP_BUF_STATE_DONE:
vbuf->flags |= V4L2_BUF_FLAG_DONE;
+ break;
case ISP_BUF_STATE_QUEUED:
case ISP_BUF_STATE_ACTIVE:
vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
index d11fb261d530..0d36b8bc9f98 100644
--- a/drivers/media/platform/omap3isp/ispresizer.c
+++ b/drivers/media/platform/omap3isp/ispresizer.c
@@ -1532,6 +1532,20 @@ static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
return 0;
}
+static int resizer_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct isp_res_device *res = v4l2_get_subdevdata(sd);
+ struct isp_pipeline *pipe = to_isp_pipeline(&sd->entity);
+
+ omap3isp_resizer_max_rate(res, &pipe->max_rate);
+
+ return v4l2_subdev_link_validate_default(sd, link,
+ source_fmt, sink_fmt);
+}
+
/*
* resizer_init_formats - Initialize formats on all pads
* @sd: ISP resizer V4L2 subdevice
@@ -1570,6 +1584,7 @@ static const struct v4l2_subdev_pad_ops resizer_v4l2_pad_ops = {
.set_fmt = resizer_set_format,
.get_selection = resizer_get_selection,
.set_selection = resizer_set_selection,
+ .link_validate = resizer_link_validate,
};
/* subdev operations */
@@ -1701,7 +1716,8 @@ static int resizer_init_entities(struct isp_res_device *res)
v4l2_set_subdevdata(sd, res);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- pads[RESZ_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[RESZ_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
pads[RESZ_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
me->ops = &resizer_media_ops;
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index 61e17f9bd8b9..a75407c3a726 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -1067,7 +1067,7 @@ static int isp_stat_init_entities(struct ispstat *stat, const char *name,
subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
v4l2_set_subdevdata(subdev, stat);
- stat->pad.flags = MEDIA_PAD_FL_SINK;
+ stat->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
me->ops = NULL;
return media_entity_init(me, 1, &stat->pad, 0);
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index f6304bb074f5..856fdf554035 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -278,55 +278,6 @@ static int isp_video_get_graph_data(struct isp_video *video,
return 0;
}
-/*
- * Validate a pipeline by checking both ends of all links for format
- * discrepancies.
- *
- * Compute the minimum time per frame value as the maximum of time per frame
- * limits reported by every block in the pipeline.
- *
- * Return 0 if all formats match, or -EPIPE if at least one link is found with
- * different formats on its two ends or if the pipeline doesn't start with a
- * video source (either a subdev with no input pad, or a non-subdev entity).
- */
-static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
-{
- struct isp_device *isp = pipe->output->isp;
- struct media_pad *pad;
- struct v4l2_subdev *subdev;
-
- subdev = isp_video_remote_subdev(pipe->output, NULL);
- if (subdev == NULL)
- return -EPIPE;
-
- while (1) {
- /* Retrieve the sink format */
- pad = &subdev->entity.pads[0];
- if (!(pad->flags & MEDIA_PAD_FL_SINK))
- break;
-
- /* Update the maximum frame rate */
- if (subdev == &isp->isp_res.subdev)
- omap3isp_resizer_max_rate(&isp->isp_res,
- &pipe->max_rate);
-
- /* Retrieve the source format. Return an error if no source
- * entity can be found, and stop checking the pipeline if the
- * source entity isn't a subdev.
- */
- pad = media_entity_remote_pad(pad);
- if (pad == NULL)
- return -EPIPE;
-
- if (media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
- break;
-
- subdev = media_entity_to_v4l2_subdev(pad->entity);
- }
-
- return 0;
-}
-
static int
__isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
{
@@ -460,6 +411,15 @@ static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
struct isp_video *video = vfh->video;
unsigned long addr;
+ /* Refuse to prepare the buffer is the video node has registered an
+ * error. We don't need to take any lock here as the operation is
+ * inherently racy. The authoritative check will be performed in the
+ * queue handler, which can't return an error, this check is just a best
+ * effort to notify userspace as early as possible.
+ */
+ if (unlikely(video->error))
+ return -EIO;
+
addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
if (IS_ERR_VALUE(addr))
return -EIO;
@@ -496,6 +456,12 @@ static void isp_video_buffer_queue(struct isp_video_buffer *buf)
unsigned int empty;
unsigned int start;
+ if (unlikely(video->error)) {
+ buf->state = ISP_BUF_STATE_ERROR;
+ wake_up(&buf->wait);
+ return;
+ }
+
empty = list_empty(&video->dmaqueue);
list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
@@ -618,6 +584,36 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
}
/*
+ * omap3isp_video_cancel_stream - Cancel stream on a video node
+ * @video: ISP video object
+ *
+ * Cancelling a stream mark all buffers on the video node as erroneous and makes
+ * sure no new buffer can be queued.
+ */
+void omap3isp_video_cancel_stream(struct isp_video *video)
+{
+ struct isp_video_queue *queue = video->queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->irqlock, flags);
+
+ while (!list_empty(&video->dmaqueue)) {
+ struct isp_video_buffer *buf;
+
+ buf = list_first_entry(&video->dmaqueue,
+ struct isp_video_buffer, irqlist);
+ list_del(&buf->irqlist);
+
+ buf->state = ISP_BUF_STATE_ERROR;
+ wake_up(&buf->wait);
+ }
+
+ video->error = true;
+
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+}
+
+/*
* omap3isp_video_resume - Perform resume operation on the buffers
* @video: ISP video object
* @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
@@ -1051,11 +1047,6 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
if (ret < 0)
goto err_check_format;
- /* Validate the pipeline and update its state. */
- ret = isp_video_validate_pipeline(pipe);
- if (ret < 0)
- goto err_check_format;
-
pipe->error = false;
spin_lock_irqsave(&pipe->lock, flags);
@@ -1159,6 +1150,7 @@ isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
omap3isp_video_queue_streamoff(&vfh->queue);
video->queue = NULL;
video->streaming = 0;
+ video->error = false;
if (video->isp->pdata->set_constraints)
video->isp->pdata->set_constraints(video->isp, false);
@@ -1332,11 +1324,13 @@ int omap3isp_video_init(struct isp_video *video, const char *name)
switch (video->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
direction = "output";
- video->pad.flags = MEDIA_PAD_FL_SINK;
+ video->pad.flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
direction = "input";
- video->pad.flags = MEDIA_PAD_FL_SOURCE;
+ video->pad.flags = MEDIA_PAD_FL_SOURCE
+ | MEDIA_PAD_FL_MUST_CONNECT;
video->video.vfl_dir = VFL_DIR_TX;
break;
diff --git a/drivers/media/platform/omap3isp/ispvideo.h b/drivers/media/platform/omap3isp/ispvideo.h
index 1ad470ec2b9d..4e194076cc60 100644
--- a/drivers/media/platform/omap3isp/ispvideo.h
+++ b/drivers/media/platform/omap3isp/ispvideo.h
@@ -178,6 +178,7 @@ struct isp_video {
/* Pipeline state */
struct isp_pipeline pipe;
struct mutex stream_lock; /* pipeline and stream states */
+ bool error;
/* Video buffers queue */
struct isp_video_queue *queue;
@@ -207,6 +208,7 @@ int omap3isp_video_register(struct isp_video *video,
struct v4l2_device *vdev);
void omap3isp_video_unregister(struct isp_video *video);
struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video);
+void omap3isp_video_cancel_stream(struct isp_video *video);
void omap3isp_video_resume(struct isp_video *video, int continuous);
struct media_pad *omap3isp_video_remote_pad(struct isp_video *video);
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 0b2948376aee..0fcf7d75e841 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -136,10 +136,9 @@ static int g2d_buf_prepare(struct vb2_buffer *vb)
static void g2d_buf_queue(struct vb2_buffer *vb)
{
struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
}
-
static struct vb2_ops g2d_qops = {
.queue_setup = g2d_queue_setup,
.buf_prepare = g2d_buf_prepare,
@@ -159,6 +158,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->mutex;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -171,6 +171,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->mutex;
return vb2_queue_init(dst_vq);
}
@@ -253,9 +254,9 @@ static int g2d_open(struct file *file)
kfree(ctx);
return -ERESTARTSYS;
}
- ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
- if (IS_ERR(ctx->m2m_ctx)) {
- ret = PTR_ERR(ctx->m2m_ctx);
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
mutex_unlock(&dev->mutex);
kfree(ctx);
return ret;
@@ -324,7 +325,7 @@ static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
struct vb2_queue *vq;
struct g2d_frame *frm;
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
frm = get_frame(ctx, f->type);
@@ -384,7 +385,7 @@ static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
ret = vidioc_try_fmt(file, prv, f);
if (ret)
return ret;
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (vb2_is_busy(vq)) {
v4l2_err(&dev->v4l2_dev, "queue (%d) bust\n", f->type);
return -EBUSY;
@@ -410,72 +411,6 @@ static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static unsigned int g2d_poll(struct file *file, struct poll_table_struct *wait)
-{
- struct g2d_ctx *ctx = fh2ctx(file->private_data);
- struct g2d_dev *dev = ctx->dev;
- unsigned int res;
-
- mutex_lock(&dev->mutex);
- res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
- mutex_unlock(&dev->mutex);
- return res;
-}
-
-static int g2d_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct g2d_ctx *ctx = fh2ctx(file->private_data);
- struct g2d_dev *dev = ctx->dev;
- int ret;
-
- if (mutex_lock_interruptible(&dev->mutex))
- return -ERESTARTSYS;
- ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
- mutex_unlock(&dev->mutex);
- return ret;
-}
-
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *reqbufs)
-{
- struct g2d_ctx *ctx = priv;
- return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct g2d_ctx *ctx = priv;
- return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct g2d_ctx *ctx = priv;
- return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct g2d_ctx *ctx = priv;
- return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
-}
-
-
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct g2d_ctx *ctx = priv;
- return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
-}
-
-static int vidioc_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct g2d_ctx *ctx = priv;
- return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
-}
-
static int vidioc_cropcap(struct file *file, void *priv,
struct v4l2_cropcap *cr)
{
@@ -551,20 +486,6 @@ static int vidioc_s_crop(struct file *file, void *prv, const struct v4l2_crop *c
return 0;
}
-static void g2d_lock(void *prv)
-{
- struct g2d_ctx *ctx = prv;
- struct g2d_dev *dev = ctx->dev;
- mutex_lock(&dev->mutex);
-}
-
-static void g2d_unlock(void *prv)
-{
- struct g2d_ctx *ctx = prv;
- struct g2d_dev *dev = ctx->dev;
- mutex_unlock(&dev->mutex);
-}
-
static void job_abort(void *prv)
{
struct g2d_ctx *ctx = prv;
@@ -589,8 +510,8 @@ static void device_run(void *prv)
dev->curr = ctx;
- src = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- dst = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
clk_enable(dev->gate);
g2d_reset(dev);
@@ -631,8 +552,8 @@ static irqreturn_t g2d_isr(int irq, void *prv)
BUG_ON(ctx == NULL);
- src = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- dst = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
BUG_ON(src == NULL);
BUG_ON(dst == NULL);
@@ -642,7 +563,7 @@ static irqreturn_t g2d_isr(int irq, void *prv)
v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
- v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx);
+ v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx);
dev->curr = NULL;
wake_up(&dev->irq_queue);
@@ -653,9 +574,9 @@ static const struct v4l2_file_operations g2d_fops = {
.owner = THIS_MODULE,
.open = g2d_open,
.release = g2d_release,
- .poll = g2d_poll,
+ .poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
- .mmap = g2d_mmap,
+ .mmap = v4l2_m2m_fop_mmap,
};
static const struct v4l2_ioctl_ops g2d_ioctl_ops = {
@@ -671,14 +592,13 @@ static const struct v4l2_ioctl_ops g2d_ioctl_ops = {
.vidioc_try_fmt_vid_out = vidioc_try_fmt,
.vidioc_s_fmt_vid_out = vidioc_s_fmt,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
-
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_g_crop = vidioc_g_crop,
.vidioc_s_crop = vidioc_s_crop,
@@ -697,8 +617,6 @@ static struct video_device g2d_videodev = {
static struct v4l2_m2m_ops g2d_m2m_ops = {
.device_run = device_run,
.job_abort = job_abort,
- .lock = g2d_lock,
- .unlock = g2d_unlock,
};
static const struct of_device_id exynos_g2d_match[];
diff --git a/drivers/media/platform/s5p-g2d/g2d.h b/drivers/media/platform/s5p-g2d/g2d.h
index 300ca05ba404..b0e52ab7ecdb 100644
--- a/drivers/media/platform/s5p-g2d/g2d.h
+++ b/drivers/media/platform/s5p-g2d/g2d.h
@@ -57,7 +57,6 @@ struct g2d_frame {
struct g2d_ctx {
struct v4l2_fh fh;
struct g2d_dev *dev;
- struct v4l2_m2m_ctx *m2m_ctx;
struct g2d_frame in;
struct g2d_frame out;
struct v4l2_ctrl *ctrl_hflip;
diff --git a/drivers/media/platform/s5p-jpeg/Makefile b/drivers/media/platform/s5p-jpeg/Makefile
index d18cb5edd2d5..a1a9169254c3 100644
--- a/drivers/media/platform/s5p-jpeg/Makefile
+++ b/drivers/media/platform/s5p-jpeg/Makefile
@@ -1,2 +1,2 @@
-s5p-jpeg-objs := jpeg-core.o
+s5p-jpeg-objs := jpeg-core.o jpeg-hw-exynos4.o jpeg-hw-s5p.o
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg.o
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 9b88a4601007..7d68d0b9966a 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1,9 +1,10 @@
/* linux/drivers/media/platform/s5p-jpeg/jpeg-core.c
*
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2011-2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -17,6 +18,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -28,70 +30,234 @@
#include <media/videobuf2-dma-contig.h>
#include "jpeg-core.h"
-#include "jpeg-hw.h"
+#include "jpeg-hw-s5p.h"
+#include "jpeg-hw-exynos4.h"
+#include "jpeg-regs.h"
-static struct s5p_jpeg_fmt formats_enc[] = {
+static struct s5p_jpeg_fmt sjpeg_formats[] = {
{
.name = "JPEG JFIF",
.fourcc = V4L2_PIX_FMT_JPEG,
+ .flags = SJPEG_FMT_FLAG_ENC_CAPTURE |
+ SJPEG_FMT_FLAG_DEC_OUTPUT |
+ SJPEG_FMT_FLAG_S5P |
+ SJPEG_FMT_FLAG_EXYNOS4,
+ },
+ {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
.colplanes = 1,
- .types = MEM2MEM_CAPTURE,
+ .h_align = 4,
+ .v_align = 3,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_S5P |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
.name = "YUV 4:2:2 packed, YCbYCr",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.colplanes = 1,
- .types = MEM2MEM_OUTPUT,
+ .h_align = 1,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ },
+ {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .depth = 16,
+ .colplanes = 1,
+ .h_align = 1,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
.name = "RGB565",
.fourcc = V4L2_PIX_FMT_RGB565,
.depth = 16,
.colplanes = 1,
- .types = MEM2MEM_OUTPUT,
+ .h_align = 0,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
},
-};
-#define NUM_FORMATS_ENC ARRAY_SIZE(formats_enc)
-
-static struct s5p_jpeg_fmt formats_dec[] = {
{
- .name = "YUV 4:2:0 planar, YCbCr",
- .fourcc = V4L2_PIX_FMT_YUV420,
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .depth = 16,
+ .colplanes = 1,
+ .h_align = 0,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_S5P |
+ SJPEG_FMT_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ },
+ {
+ .name = "ARGB8888, 32 bpp",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .depth = 32,
+ .colplanes = 1,
+ .h_align = 0,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ },
+ {
+ .name = "YUV 4:4:4 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV24,
+ .depth = 24,
+ .colplanes = 2,
+ .h_align = 0,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ },
+ {
+ .name = "YUV 4:4:4 planar, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV42,
+ .depth = 24,
+ .colplanes = 2,
+ .h_align = 0,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ },
+ {
+ .name = "YUV 4:2:2 planar, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .depth = 16,
+ .colplanes = 2,
+ .h_align = 1,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ },
+ {
+ .name = "YUV 4:2:2 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .depth = 16,
+ .colplanes = 2,
+ .h_align = 1,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ },
+ {
+ .name = "YUV 4:2:0 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12,
.depth = 12,
- .colplanes = 3,
+ .colplanes = 2,
+ .h_align = 1,
+ .v_align = 1,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+ },
+ {
+ .name = "YUV 4:2:0 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .depth = 12,
+ .colplanes = 2,
.h_align = 4,
.v_align = 4,
- .types = MEM2MEM_CAPTURE,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_S5P |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
},
{
- .name = "YUV 4:2:2 packed, YCbYCr",
- .fourcc = V4L2_PIX_FMT_YUYV,
- .depth = 16,
- .colplanes = 1,
- .h_align = 4,
- .v_align = 3,
- .types = MEM2MEM_CAPTURE,
+ .name = "YUV 4:2:0 planar, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .depth = 12,
+ .colplanes = 2,
+ .h_align = 1,
+ .v_align = 1,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
},
{
- .name = "JPEG JFIF",
- .fourcc = V4L2_PIX_FMT_JPEG,
+ .name = "YUV 4:2:0 contiguous 3-planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = 12,
+ .colplanes = 3,
+ .h_align = 1,
+ .v_align = 1,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+ },
+ {
+ .name = "Gray",
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .depth = 8,
.colplanes = 1,
- .types = MEM2MEM_OUTPUT,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY,
},
};
-#define NUM_FORMATS_DEC ARRAY_SIZE(formats_dec)
+#define SJPEG_NUM_FORMATS ARRAY_SIZE(sjpeg_formats)
static const unsigned char qtbl_luminance[4][64] = {
- {/* level 1 - high quality */
- 8, 6, 6, 8, 12, 14, 16, 17,
- 6, 6, 6, 8, 10, 13, 12, 15,
- 6, 6, 7, 8, 13, 14, 18, 24,
- 8, 8, 8, 14, 13, 19, 24, 35,
- 12, 10, 13, 13, 20, 26, 34, 39,
- 14, 13, 14, 19, 26, 34, 39, 39,
- 16, 12, 18, 24, 34, 39, 39, 39,
- 17, 15, 24, 35, 39, 39, 39, 39
+ {/*level 0 - high compression quality */
+ 20, 16, 25, 39, 50, 46, 62, 68,
+ 16, 18, 23, 38, 38, 53, 65, 68,
+ 25, 23, 31, 38, 53, 65, 68, 68,
+ 39, 38, 38, 53, 65, 68, 68, 68,
+ 50, 38, 53, 65, 68, 68, 68, 68,
+ 46, 53, 65, 68, 68, 68, 68, 68,
+ 62, 65, 68, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68
+ },
+ {/* level 1 */
+ 16, 11, 11, 16, 23, 27, 31, 30,
+ 11, 12, 12, 15, 20, 23, 23, 30,
+ 11, 12, 13, 16, 23, 26, 35, 47,
+ 16, 15, 16, 23, 26, 37, 47, 64,
+ 23, 20, 23, 26, 39, 51, 64, 64,
+ 27, 23, 26, 37, 51, 64, 64, 64,
+ 31, 23, 35, 47, 64, 64, 64, 64,
+ 30, 30, 47, 64, 64, 64, 64, 64
},
{/* level 2 */
12, 8, 8, 12, 17, 21, 24, 23,
@@ -103,38 +269,38 @@ static const unsigned char qtbl_luminance[4][64] = {
24, 18, 27, 36, 51, 59, 59, 59,
23, 23, 36, 53, 59, 59, 59, 59
},
- {/* level 3 */
- 16, 11, 11, 16, 23, 27, 31, 30,
- 11, 12, 12, 15, 20, 23, 23, 30,
- 11, 12, 13, 16, 23, 26, 35, 47,
- 16, 15, 16, 23, 26, 37, 47, 64,
- 23, 20, 23, 26, 39, 51, 64, 64,
- 27, 23, 26, 37, 51, 64, 64, 64,
- 31, 23, 35, 47, 64, 64, 64, 64,
- 30, 30, 47, 64, 64, 64, 64, 64
- },
- {/*level 4 - low quality */
- 20, 16, 25, 39, 50, 46, 62, 68,
- 16, 18, 23, 38, 38, 53, 65, 68,
- 25, 23, 31, 38, 53, 65, 68, 68,
- 39, 38, 38, 53, 65, 68, 68, 68,
- 50, 38, 53, 65, 68, 68, 68, 68,
- 46, 53, 65, 68, 68, 68, 68, 68,
- 62, 65, 68, 68, 68, 68, 68, 68,
- 68, 68, 68, 68, 68, 68, 68, 68
+ {/* level 3 - low compression quality */
+ 8, 6, 6, 8, 12, 14, 16, 17,
+ 6, 6, 6, 8, 10, 13, 12, 15,
+ 6, 6, 7, 8, 13, 14, 18, 24,
+ 8, 8, 8, 14, 13, 19, 24, 35,
+ 12, 10, 13, 13, 20, 26, 34, 39,
+ 14, 13, 14, 19, 26, 34, 39, 39,
+ 16, 12, 18, 24, 34, 39, 39, 39,
+ 17, 15, 24, 35, 39, 39, 39, 39
}
};
static const unsigned char qtbl_chrominance[4][64] = {
- {/* level 1 - high quality */
- 9, 8, 9, 11, 14, 17, 19, 24,
- 8, 10, 9, 11, 14, 13, 17, 22,
- 9, 9, 13, 14, 13, 15, 23, 26,
- 11, 11, 14, 14, 15, 20, 26, 33,
- 14, 14, 13, 15, 20, 24, 33, 39,
- 17, 13, 15, 20, 24, 32, 39, 39,
- 19, 17, 23, 26, 33, 39, 39, 39,
- 24, 22, 26, 33, 39, 39, 39, 39
+ {/*level 0 - high compression quality */
+ 21, 25, 32, 38, 54, 68, 68, 68,
+ 25, 28, 24, 38, 54, 68, 68, 68,
+ 32, 24, 32, 43, 66, 68, 68, 68,
+ 38, 38, 43, 53, 68, 68, 68, 68,
+ 54, 54, 66, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68
+ },
+ {/* level 1 */
+ 17, 15, 17, 21, 20, 26, 38, 48,
+ 15, 19, 18, 17, 20, 26, 35, 43,
+ 17, 18, 20, 22, 26, 30, 46, 53,
+ 21, 17, 22, 28, 30, 39, 53, 64,
+ 20, 20, 26, 30, 39, 48, 64, 64,
+ 26, 26, 30, 39, 48, 63, 64, 64,
+ 38, 35, 46, 53, 64, 64, 64, 64,
+ 48, 43, 53, 64, 64, 64, 64, 64
},
{/* level 2 */
13, 11, 13, 16, 20, 20, 29, 37,
@@ -146,25 +312,15 @@ static const unsigned char qtbl_chrominance[4][64] = {
29, 26, 35, 40, 50, 59, 59, 59,
37, 32, 40, 50, 59, 59, 59, 59
},
- {/* level 3 */
- 17, 15, 17, 21, 20, 26, 38, 48,
- 15, 19, 18, 17, 20, 26, 35, 43,
- 17, 18, 20, 22, 26, 30, 46, 53,
- 21, 17, 22, 28, 30, 39, 53, 64,
- 20, 20, 26, 30, 39, 48, 64, 64,
- 26, 26, 30, 39, 48, 63, 64, 64,
- 38, 35, 46, 53, 64, 64, 64, 64,
- 48, 43, 53, 64, 64, 64, 64, 64
- },
- {/*level 4 - low quality */
- 21, 25, 32, 38, 54, 68, 68, 68,
- 25, 28, 24, 38, 54, 68, 68, 68,
- 32, 24, 32, 43, 66, 68, 68, 68,
- 38, 38, 43, 53, 68, 68, 68, 68,
- 54, 54, 66, 68, 68, 68, 68, 68,
- 68, 68, 68, 68, 68, 68, 68, 68,
- 68, 68, 68, 68, 68, 68, 68, 68,
- 68, 68, 68, 68, 68, 68, 68, 68
+ {/* level 3 - low compression quality */
+ 9, 8, 9, 11, 14, 17, 19, 24,
+ 8, 10, 9, 11, 14, 13, 17, 22,
+ 9, 9, 13, 14, 13, 15, 23, 26,
+ 11, 11, 14, 14, 15, 20, 26, 33,
+ 14, 14, 13, 15, 20, 24, 33, 39,
+ 17, 13, 15, 20, 24, 32, 39, 39,
+ 19, 17, 23, 26, 33, 39, 39, 39,
+ 24, 22, 26, 33, 39, 39, 39, 39
}
};
@@ -202,6 +358,106 @@ static const unsigned char hactblg0[162] = {
0xf9, 0xfa
};
+/*
+ * Fourcc downgrade schema lookup tables for 422 and 420
+ * chroma subsampling - fourcc on each position maps on the
+ * fourcc from the table fourcc_to_dwngrd_schema_id which allows
+ * to get the most suitable fourcc counterpart for the given
+ * downgraded subsampling property.
+ */
+static const u32 subs422_fourcc_dwngrd_schema[] = {
+ V4L2_PIX_FMT_NV16,
+ V4L2_PIX_FMT_NV61,
+};
+
+static const u32 subs420_fourcc_dwngrd_schema[] = {
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+ V4L2_PIX_FMT_GREY,
+ V4L2_PIX_FMT_GREY,
+ V4L2_PIX_FMT_GREY,
+ V4L2_PIX_FMT_GREY,
+};
+
+/*
+ * Lookup table for translation of a fourcc to the position
+ * of its downgraded counterpart in the *fourcc_dwngrd_schema
+ * tables.
+ */
+static const u32 fourcc_to_dwngrd_schema_id[] = {
+ V4L2_PIX_FMT_NV24,
+ V4L2_PIX_FMT_NV42,
+ V4L2_PIX_FMT_NV16,
+ V4L2_PIX_FMT_NV61,
+ V4L2_PIX_FMT_YUYV,
+ V4L2_PIX_FMT_YVYU,
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_GREY,
+};
+
+static int s5p_jpeg_get_dwngrd_sch_id_by_fourcc(u32 fourcc)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(fourcc_to_dwngrd_schema_id); ++i) {
+ if (fourcc_to_dwngrd_schema_id[i] == fourcc)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int s5p_jpeg_adjust_fourcc_to_subsampling(
+ enum v4l2_jpeg_chroma_subsampling subs,
+ u32 in_fourcc,
+ u32 *out_fourcc,
+ struct s5p_jpeg_ctx *ctx)
+{
+ int dwngrd_sch_id;
+
+ if (ctx->subsampling != V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY) {
+ dwngrd_sch_id =
+ s5p_jpeg_get_dwngrd_sch_id_by_fourcc(in_fourcc);
+ if (dwngrd_sch_id < 0)
+ return -EINVAL;
+ }
+
+ switch (ctx->subsampling) {
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY:
+ *out_fourcc = V4L2_PIX_FMT_GREY;
+ break;
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_420:
+ if (dwngrd_sch_id >
+ ARRAY_SIZE(subs420_fourcc_dwngrd_schema) - 1)
+ return -EINVAL;
+ *out_fourcc = subs420_fourcc_dwngrd_schema[dwngrd_sch_id];
+ break;
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_422:
+ if (dwngrd_sch_id >
+ ARRAY_SIZE(subs422_fourcc_dwngrd_schema) - 1)
+ return -EINVAL;
+ *out_fourcc = subs422_fourcc_dwngrd_schema[dwngrd_sch_id];
+ break;
+ default:
+ *out_fourcc = V4L2_PIX_FMT_GREY;
+ break;
+ }
+
+ return 0;
+}
+
+static int exynos4x12_decoded_subsampling[] = {
+ V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+};
+
static inline struct s5p_jpeg_ctx *ctrl_to_ctx(struct v4l2_ctrl *c)
{
return container_of(c->handler, struct s5p_jpeg_ctx, ctrl_handler);
@@ -212,8 +468,24 @@ static inline struct s5p_jpeg_ctx *fh_to_ctx(struct v4l2_fh *fh)
return container_of(fh, struct s5p_jpeg_ctx, fh);
}
-static inline void jpeg_set_qtbl(void __iomem *regs, const unsigned char *qtbl,
- unsigned long tab, int len)
+static int s5p_jpeg_to_user_subsampling(struct s5p_jpeg_ctx *ctx)
+{
+ WARN_ON(ctx->subsampling > 3);
+
+ if (ctx->jpeg->variant->version == SJPEG_S5P) {
+ if (ctx->subsampling > 2)
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
+ return ctx->subsampling;
+ } else {
+ if (ctx->subsampling > 2)
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_420;
+ return exynos4x12_decoded_subsampling[ctx->subsampling];
+ }
+}
+
+static inline void s5p_jpeg_set_qtbl(void __iomem *regs,
+ const unsigned char *qtbl,
+ unsigned long tab, int len)
{
int i;
@@ -221,22 +493,25 @@ static inline void jpeg_set_qtbl(void __iomem *regs, const unsigned char *qtbl,
writel((unsigned int)qtbl[i], regs + tab + (i * 0x04));
}
-static inline void jpeg_set_qtbl_lum(void __iomem *regs, int quality)
+static inline void s5p_jpeg_set_qtbl_lum(void __iomem *regs, int quality)
{
/* this driver fills quantisation table 0 with data for luma */
- jpeg_set_qtbl(regs, qtbl_luminance[quality], S5P_JPG_QTBL_CONTENT(0),
- ARRAY_SIZE(qtbl_luminance[quality]));
+ s5p_jpeg_set_qtbl(regs, qtbl_luminance[quality],
+ S5P_JPG_QTBL_CONTENT(0),
+ ARRAY_SIZE(qtbl_luminance[quality]));
}
-static inline void jpeg_set_qtbl_chr(void __iomem *regs, int quality)
+static inline void s5p_jpeg_set_qtbl_chr(void __iomem *regs, int quality)
{
/* this driver fills quantisation table 1 with data for chroma */
- jpeg_set_qtbl(regs, qtbl_chrominance[quality], S5P_JPG_QTBL_CONTENT(1),
- ARRAY_SIZE(qtbl_chrominance[quality]));
+ s5p_jpeg_set_qtbl(regs, qtbl_chrominance[quality],
+ S5P_JPG_QTBL_CONTENT(1),
+ ARRAY_SIZE(qtbl_chrominance[quality]));
}
-static inline void jpeg_set_htbl(void __iomem *regs, const unsigned char *htbl,
- unsigned long tab, int len)
+static inline void s5p_jpeg_set_htbl(void __iomem *regs,
+ const unsigned char *htbl,
+ unsigned long tab, int len)
{
int i;
@@ -244,28 +519,84 @@ static inline void jpeg_set_htbl(void __iomem *regs, const unsigned char *htbl,
writel((unsigned int)htbl[i], regs + tab + (i * 0x04));
}
-static inline void jpeg_set_hdctbl(void __iomem *regs)
+static inline void s5p_jpeg_set_hdctbl(void __iomem *regs)
{
/* this driver fills table 0 for this component */
- jpeg_set_htbl(regs, hdctbl0, S5P_JPG_HDCTBL(0), ARRAY_SIZE(hdctbl0));
+ s5p_jpeg_set_htbl(regs, hdctbl0, S5P_JPG_HDCTBL(0),
+ ARRAY_SIZE(hdctbl0));
}
-static inline void jpeg_set_hdctblg(void __iomem *regs)
+static inline void s5p_jpeg_set_hdctblg(void __iomem *regs)
{
/* this driver fills table 0 for this component */
- jpeg_set_htbl(regs, hdctblg0, S5P_JPG_HDCTBLG(0), ARRAY_SIZE(hdctblg0));
+ s5p_jpeg_set_htbl(regs, hdctblg0, S5P_JPG_HDCTBLG(0),
+ ARRAY_SIZE(hdctblg0));
}
-static inline void jpeg_set_hactbl(void __iomem *regs)
+static inline void s5p_jpeg_set_hactbl(void __iomem *regs)
{
/* this driver fills table 0 for this component */
- jpeg_set_htbl(regs, hactbl0, S5P_JPG_HACTBL(0), ARRAY_SIZE(hactbl0));
+ s5p_jpeg_set_htbl(regs, hactbl0, S5P_JPG_HACTBL(0),
+ ARRAY_SIZE(hactbl0));
}
-static inline void jpeg_set_hactblg(void __iomem *regs)
+static inline void s5p_jpeg_set_hactblg(void __iomem *regs)
{
/* this driver fills table 0 for this component */
- jpeg_set_htbl(regs, hactblg0, S5P_JPG_HACTBLG(0), ARRAY_SIZE(hactblg0));
+ s5p_jpeg_set_htbl(regs, hactblg0, S5P_JPG_HACTBLG(0),
+ ARRAY_SIZE(hactblg0));
+}
+
+static inline void exynos4_jpeg_set_tbl(void __iomem *regs,
+ const unsigned char *tbl,
+ unsigned long tab, int len)
+{
+ int i;
+ unsigned int dword;
+
+ for (i = 0; i < len; i += 4) {
+ dword = tbl[i] |
+ (tbl[i + 1] << 8) |
+ (tbl[i + 2] << 16) |
+ (tbl[i + 3] << 24);
+ writel(dword, regs + tab + i);
+ }
+}
+
+static inline void exynos4_jpeg_set_qtbl_lum(void __iomem *regs, int quality)
+{
+ /* this driver fills quantisation table 0 with data for luma */
+ exynos4_jpeg_set_tbl(regs, qtbl_luminance[quality],
+ EXYNOS4_QTBL_CONTENT(0),
+ ARRAY_SIZE(qtbl_luminance[quality]));
+}
+
+static inline void exynos4_jpeg_set_qtbl_chr(void __iomem *regs, int quality)
+{
+ /* this driver fills quantisation table 1 with data for chroma */
+ exynos4_jpeg_set_tbl(regs, qtbl_chrominance[quality],
+ EXYNOS4_QTBL_CONTENT(1),
+ ARRAY_SIZE(qtbl_chrominance[quality]));
+}
+
+void exynos4_jpeg_set_huff_tbl(void __iomem *base)
+{
+ exynos4_jpeg_set_tbl(base, hdctbl0, EXYNOS4_HUFF_TBL_HDCLL,
+ ARRAY_SIZE(hdctbl0));
+ exynos4_jpeg_set_tbl(base, hdctbl0, EXYNOS4_HUFF_TBL_HDCCL,
+ ARRAY_SIZE(hdctbl0));
+ exynos4_jpeg_set_tbl(base, hdctblg0, EXYNOS4_HUFF_TBL_HDCLV,
+ ARRAY_SIZE(hdctblg0));
+ exynos4_jpeg_set_tbl(base, hdctblg0, EXYNOS4_HUFF_TBL_HDCCV,
+ ARRAY_SIZE(hdctblg0));
+ exynos4_jpeg_set_tbl(base, hactbl0, EXYNOS4_HUFF_TBL_HACLL,
+ ARRAY_SIZE(hactbl0));
+ exynos4_jpeg_set_tbl(base, hactbl0, EXYNOS4_HUFF_TBL_HACCL,
+ ARRAY_SIZE(hactbl0));
+ exynos4_jpeg_set_tbl(base, hactblg0, EXYNOS4_HUFF_TBL_HACLV,
+ ARRAY_SIZE(hactblg0));
+ exynos4_jpeg_set_tbl(base, hactblg0, EXYNOS4_HUFF_TBL_HACCV,
+ ARRAY_SIZE(hactblg0));
}
/*
@@ -276,8 +607,8 @@ static inline void jpeg_set_hactblg(void __iomem *regs)
static int queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq);
-static struct s5p_jpeg_fmt *s5p_jpeg_find_format(unsigned int mode,
- __u32 pixelformat);
+static struct s5p_jpeg_fmt *s5p_jpeg_find_format(struct s5p_jpeg_ctx *ctx,
+ __u32 pixelformat, unsigned int fmt_type);
static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx);
static int s5p_jpeg_open(struct file *file)
@@ -285,7 +616,7 @@ static int s5p_jpeg_open(struct file *file)
struct s5p_jpeg *jpeg = video_drvdata(file);
struct video_device *vfd = video_devdata(file);
struct s5p_jpeg_ctx *ctx;
- struct s5p_jpeg_fmt *out_fmt;
+ struct s5p_jpeg_fmt *out_fmt, *cap_fmt;
int ret = 0;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -306,24 +637,31 @@ static int s5p_jpeg_open(struct file *file)
ctx->jpeg = jpeg;
if (vfd == jpeg->vfd_encoder) {
ctx->mode = S5P_JPEG_ENCODE;
- out_fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_RGB565);
+ out_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_RGB565,
+ FMT_TYPE_OUTPUT);
+ cap_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_JPEG,
+ FMT_TYPE_CAPTURE);
} else {
ctx->mode = S5P_JPEG_DECODE;
- out_fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_JPEG);
+ out_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_JPEG,
+ FMT_TYPE_OUTPUT);
+ cap_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_YUYV,
+ FMT_TYPE_CAPTURE);
}
- ret = s5p_jpeg_controls_create(ctx);
- if (ret < 0)
- goto error;
-
- ctx->m2m_ctx = v4l2_m2m_ctx_init(jpeg->m2m_dev, ctx, queue_init);
- if (IS_ERR(ctx->m2m_ctx)) {
- ret = PTR_ERR(ctx->m2m_ctx);
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(jpeg->m2m_dev, ctx, queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
goto error;
}
ctx->out_q.fmt = out_fmt;
- ctx->cap_q.fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_YUYV);
+ ctx->cap_q.fmt = cap_fmt;
+
+ ret = s5p_jpeg_controls_create(ctx);
+ if (ret < 0)
+ goto error;
+
mutex_unlock(&jpeg->lock);
return 0;
@@ -342,49 +680,23 @@ static int s5p_jpeg_release(struct file *file)
struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
mutex_lock(&jpeg->lock);
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
- mutex_unlock(&jpeg->lock);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
-
- return 0;
-}
-
-static unsigned int s5p_jpeg_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct s5p_jpeg *jpeg = video_drvdata(file);
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
- unsigned int res;
-
- mutex_lock(&jpeg->lock);
- res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
mutex_unlock(&jpeg->lock);
- return res;
-}
-
-static int s5p_jpeg_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct s5p_jpeg *jpeg = video_drvdata(file);
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
- int ret;
- if (mutex_lock_interruptible(&jpeg->lock))
- return -ERESTARTSYS;
- ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
- mutex_unlock(&jpeg->lock);
- return ret;
+ return 0;
}
static const struct v4l2_file_operations s5p_jpeg_fops = {
.owner = THIS_MODULE,
.open = s5p_jpeg_open,
.release = s5p_jpeg_release,
- .poll = s5p_jpeg_poll,
+ .poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
- .mmap = s5p_jpeg_mmap,
+ .mmap = v4l2_m2m_fop_mmap,
};
/*
@@ -427,10 +739,11 @@ static void skip(struct s5p_jpeg_buffer *buf, long len)
}
static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
- unsigned long buffer, unsigned long size)
+ unsigned long buffer, unsigned long size,
+ struct s5p_jpeg_ctx *ctx)
{
int c, components, notfound;
- unsigned int height, width, word;
+ unsigned int height, width, word, subsampling = 0;
long length;
struct s5p_jpeg_buffer jpeg_buffer;
@@ -469,7 +782,15 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
break;
notfound = 0;
- skip(&jpeg_buffer, components * 3);
+ if (components == 1) {
+ subsampling = 0x33;
+ } else {
+ skip(&jpeg_buffer, 1);
+ subsampling = get_byte(&jpeg_buffer);
+ skip(&jpeg_buffer, 1);
+ }
+
+ skip(&jpeg_buffer, components * 2);
break;
/* skip payload-less markers */
@@ -491,6 +812,24 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
result->w = width;
result->h = height;
result->size = components;
+
+ switch (subsampling) {
+ case 0x11:
+ ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444;
+ break;
+ case 0x21:
+ ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422;
+ break;
+ case 0x22:
+ ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420;
+ break;
+ case 0x33:
+ ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
+ break;
+ default:
+ return false;
+ }
+
return !notfound;
}
@@ -521,13 +860,13 @@ static int s5p_jpeg_querycap(struct file *file, void *priv,
return 0;
}
-static int enum_fmt(struct s5p_jpeg_fmt *formats, int n,
+static int enum_fmt(struct s5p_jpeg_fmt *sjpeg_formats, int n,
struct v4l2_fmtdesc *f, u32 type)
{
int i, num = 0;
for (i = 0; i < n; ++i) {
- if (formats[i].types & type) {
+ if (sjpeg_formats[i].flags & type) {
/* index-th format of type type found ? */
if (num == f->index)
break;
@@ -541,8 +880,8 @@ static int enum_fmt(struct s5p_jpeg_fmt *formats, int n,
if (i >= n)
return -EINVAL;
- strlcpy(f->description, formats[i].name, sizeof(f->description));
- f->pixelformat = formats[i].fourcc;
+ strlcpy(f->description, sjpeg_formats[i].name, sizeof(f->description));
+ f->pixelformat = sjpeg_formats[i].fourcc;
return 0;
}
@@ -553,10 +892,11 @@ static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
if (ctx->mode == S5P_JPEG_ENCODE)
- return enum_fmt(formats_enc, NUM_FORMATS_ENC, f,
- MEM2MEM_CAPTURE);
+ return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ SJPEG_FMT_FLAG_ENC_CAPTURE);
- return enum_fmt(formats_dec, NUM_FORMATS_DEC, f, MEM2MEM_CAPTURE);
+ return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ SJPEG_FMT_FLAG_DEC_CAPTURE);
}
static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
@@ -565,10 +905,11 @@ static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
if (ctx->mode == S5P_JPEG_ENCODE)
- return enum_fmt(formats_enc, NUM_FORMATS_ENC, f,
- MEM2MEM_OUTPUT);
+ return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ SJPEG_FMT_FLAG_ENC_OUTPUT);
- return enum_fmt(formats_dec, NUM_FORMATS_DEC, f, MEM2MEM_OUTPUT);
+ return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ SJPEG_FMT_FLAG_DEC_OUTPUT);
}
static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx,
@@ -589,7 +930,7 @@ static int s5p_jpeg_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
struct v4l2_pix_format *pix = &f->fmt.pix;
struct s5p_jpeg_ctx *ct = fh_to_ctx(priv);
- vq = v4l2_m2m_get_vq(ct->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ct->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
@@ -615,29 +956,35 @@ static int s5p_jpeg_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
return 0;
}
-static struct s5p_jpeg_fmt *s5p_jpeg_find_format(unsigned int mode,
- u32 pixelformat)
+static struct s5p_jpeg_fmt *s5p_jpeg_find_format(struct s5p_jpeg_ctx *ctx,
+ u32 pixelformat, unsigned int fmt_type)
{
- unsigned int k;
- struct s5p_jpeg_fmt *formats;
- int n;
+ unsigned int k, fmt_flag, ver_flag;
- if (mode == S5P_JPEG_ENCODE) {
- formats = formats_enc;
- n = NUM_FORMATS_ENC;
- } else {
- formats = formats_dec;
- n = NUM_FORMATS_DEC;
- }
+ if (ctx->mode == S5P_JPEG_ENCODE)
+ fmt_flag = (fmt_type == FMT_TYPE_OUTPUT) ?
+ SJPEG_FMT_FLAG_ENC_OUTPUT :
+ SJPEG_FMT_FLAG_ENC_CAPTURE;
+ else
+ fmt_flag = (fmt_type == FMT_TYPE_OUTPUT) ?
+ SJPEG_FMT_FLAG_DEC_OUTPUT :
+ SJPEG_FMT_FLAG_DEC_CAPTURE;
+
+ if (ctx->jpeg->variant->version == SJPEG_S5P)
+ ver_flag = SJPEG_FMT_FLAG_S5P;
+ else
+ ver_flag = SJPEG_FMT_FLAG_EXYNOS4;
- for (k = 0; k < n; k++) {
- struct s5p_jpeg_fmt *fmt = &formats[k];
- if (fmt->fourcc == pixelformat)
+ for (k = 0; k < ARRAY_SIZE(sjpeg_formats); k++) {
+ struct s5p_jpeg_fmt *fmt = &sjpeg_formats[k];
+ if (fmt->fourcc == pixelformat &&
+ fmt->flags & fmt_flag &&
+ fmt->flags & ver_flag) {
return fmt;
+ }
}
return NULL;
-
}
static void jpeg_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
@@ -673,7 +1020,7 @@ static int vidioc_try_fmt(struct v4l2_format *f, struct s5p_jpeg_fmt *fmt,
/* V4L2 specification suggests the driver corrects the format struct
* if any of the dimensions is unsupported */
- if (q_type == MEM2MEM_OUTPUT)
+ if (q_type == FMT_TYPE_OUTPUT)
jpeg_bound_align_image(&pix->width, S5P_JPEG_MIN_WIDTH,
S5P_JPEG_MAX_WIDTH, 0,
&pix->height, S5P_JPEG_MIN_HEIGHT,
@@ -695,7 +1042,7 @@ static int vidioc_try_fmt(struct v4l2_format *f, struct s5p_jpeg_fmt *fmt,
bpl = pix->width; /* planar */
if (fmt->colplanes == 1 && /* packed */
- (bpl << 3) * fmt->depth < pix->width)
+ (bpl << 3) / fmt->depth < pix->width)
bpl = (pix->width * fmt->depth) >> 3;
pix->bytesperline = bpl;
@@ -709,17 +1056,41 @@ static int s5p_jpeg_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
struct s5p_jpeg_fmt *fmt;
+ int ret;
- fmt = s5p_jpeg_find_format(ctx->mode, f->fmt.pix.pixelformat);
- if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) {
+ fmt = s5p_jpeg_find_format(ctx, f->fmt.pix.pixelformat,
+ FMT_TYPE_CAPTURE);
+ if (!fmt) {
v4l2_err(&ctx->jpeg->v4l2_dev,
"Fourcc format (0x%08x) invalid.\n",
f->fmt.pix.pixelformat);
return -EINVAL;
}
- return vidioc_try_fmt(f, fmt, ctx, MEM2MEM_CAPTURE);
+ /*
+ * The exynos4x12 device requires resulting YUV image
+ * subsampling not to be lower than the input jpeg subsampling.
+ * If this requirement is not met then downgrade the requested
+ * capture format to the one with subsampling equal to the input jpeg.
+ */
+ if ((ctx->jpeg->variant->version != SJPEG_S5P) &&
+ (ctx->mode == S5P_JPEG_DECODE) &&
+ (fmt->flags & SJPEG_FMT_NON_RGB) &&
+ (fmt->subsampling < ctx->subsampling)) {
+ ret = s5p_jpeg_adjust_fourcc_to_subsampling(ctx->subsampling,
+ fmt->fourcc,
+ &pix->pixelformat,
+ ctx);
+ if (ret < 0)
+ pix->pixelformat = V4L2_PIX_FMT_GREY;
+
+ fmt = s5p_jpeg_find_format(ctx, pix->pixelformat,
+ FMT_TYPE_CAPTURE);
+ }
+
+ return vidioc_try_fmt(f, fmt, ctx, FMT_TYPE_CAPTURE);
}
static int s5p_jpeg_try_fmt_vid_out(struct file *file, void *priv,
@@ -728,15 +1099,16 @@ static int s5p_jpeg_try_fmt_vid_out(struct file *file, void *priv,
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
struct s5p_jpeg_fmt *fmt;
- fmt = s5p_jpeg_find_format(ctx->mode, f->fmt.pix.pixelformat);
- if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) {
+ fmt = s5p_jpeg_find_format(ctx, f->fmt.pix.pixelformat,
+ FMT_TYPE_OUTPUT);
+ if (!fmt) {
v4l2_err(&ctx->jpeg->v4l2_dev,
"Fourcc format (0x%08x) invalid.\n",
f->fmt.pix.pixelformat);
return -EINVAL;
}
- return vidioc_try_fmt(f, fmt, ctx, MEM2MEM_OUTPUT);
+ return vidioc_try_fmt(f, fmt, ctx, FMT_TYPE_OUTPUT);
}
static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f)
@@ -744,8 +1116,10 @@ static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f)
struct vb2_queue *vq;
struct s5p_jpeg_q_data *q_data = NULL;
struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_ctrl *ctrl_subs;
+ unsigned int f_type;
- vq = v4l2_m2m_get_vq(ct->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ct->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
@@ -757,7 +1131,10 @@ static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f)
return -EBUSY;
}
- q_data->fmt = s5p_jpeg_find_format(ct->mode, pix->pixelformat);
+ f_type = V4L2_TYPE_IS_OUTPUT(f->type) ?
+ FMT_TYPE_OUTPUT : FMT_TYPE_CAPTURE;
+
+ q_data->fmt = s5p_jpeg_find_format(ct, pix->pixelformat, f_type);
q_data->w = pix->width;
q_data->h = pix->height;
if (q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG)
@@ -765,6 +1142,13 @@ static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f)
else
q_data->size = pix->sizeimage;
+ if (f_type == FMT_TYPE_OUTPUT) {
+ ctrl_subs = v4l2_ctrl_find(&ct->ctrl_handler,
+ V4L2_CID_JPEG_CHROMA_SUBSAMPLING);
+ if (ctrl_subs)
+ v4l2_ctrl_s_ctrl(ctrl_subs, q_data->fmt->subsampling);
+ }
+
return 0;
}
@@ -792,60 +1176,14 @@ static int s5p_jpeg_s_fmt_vid_out(struct file *file, void *priv,
return s5p_jpeg_s_fmt(fh_to_ctx(priv), f);
}
-static int s5p_jpeg_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *reqbufs)
-{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
-
- return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
-}
-
-static int s5p_jpeg_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
-
- return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
-}
-
-static int s5p_jpeg_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
-
- return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int s5p_jpeg_dqbuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
-
- return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int s5p_jpeg_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
-
- return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
-}
-
-static int s5p_jpeg_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
-
- return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
-}
-
static int s5p_jpeg_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
- s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ ctx->jpeg->variant->version != SJPEG_S5P)
return -EINVAL;
/* For JPEG blob active == default == bounds */
@@ -884,12 +1222,7 @@ static int s5p_jpeg_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
spin_lock_irqsave(&jpeg->slock, flags);
-
- WARN_ON(ctx->subsampling > S5P_SUBSAMPLING_MODE_GRAY);
- if (ctx->subsampling > 2)
- ctrl->val = V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
- else
- ctrl->val = ctx->subsampling;
+ ctrl->val = s5p_jpeg_to_user_subsampling(ctx);
spin_unlock_irqrestore(&jpeg->slock, flags);
break;
}
@@ -897,6 +1230,40 @@ static int s5p_jpeg_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
return 0;
}
+static int s5p_jpeg_try_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&ctx->jpeg->slock, flags);
+
+ if (ctrl->id == V4L2_CID_JPEG_CHROMA_SUBSAMPLING) {
+ if (ctx->jpeg->variant->version == SJPEG_S5P)
+ goto error_free;
+ /*
+ * The exynos4x12 device requires input raw image fourcc
+ * to be V4L2_PIX_FMT_GREY if gray jpeg format
+ * is to be set.
+ */
+ if (ctx->out_q.fmt->fourcc != V4L2_PIX_FMT_GREY &&
+ ctrl->val == V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY) {
+ ret = -EINVAL;
+ goto error_free;
+ }
+ /*
+ * The exynos4x12 device requires resulting jpeg subsampling
+ * not to be lower than the input raw image subsampling.
+ */
+ if (ctx->out_q.fmt->subsampling > ctrl->val)
+ ctrl->val = ctx->out_q.fmt->subsampling;
+ }
+
+error_free:
+ spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
+ return ret;
+}
+
static int s5p_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl);
@@ -906,7 +1273,7 @@ static int s5p_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_JPEG_COMPRESSION_QUALITY:
- ctx->compr_quality = S5P_JPEG_COMPR_QUAL_WORST - ctrl->val;
+ ctx->compr_quality = ctrl->val;
break;
case V4L2_CID_JPEG_RESTART_INTERVAL:
ctx->restart_interval = ctrl->val;
@@ -922,6 +1289,7 @@ static int s5p_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
static const struct v4l2_ctrl_ops s5p_jpeg_ctrl_ops = {
.g_volatile_ctrl = s5p_jpeg_g_volatile_ctrl,
+ .try_ctrl = s5p_jpeg_try_ctrl,
.s_ctrl = s5p_jpeg_s_ctrl,
};
@@ -929,18 +1297,20 @@ static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx)
{
unsigned int mask = ~0x27; /* 444, 422, 420, GRAY */
struct v4l2_ctrl *ctrl;
+ int ret;
v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3);
if (ctx->mode == S5P_JPEG_ENCODE) {
v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
V4L2_CID_JPEG_COMPRESSION_QUALITY,
- 0, 3, 1, 3);
+ 0, 3, 1, S5P_JPEG_COMPR_QUAL_WORST);
v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
V4L2_CID_JPEG_RESTART_INTERVAL,
0, 3, 0xffff, 0);
- mask = ~0x06; /* 422, 420 */
+ if (ctx->jpeg->variant->version == SJPEG_S5P)
+ mask = ~0x06; /* 422, 420 */
}
ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
@@ -948,13 +1318,24 @@ static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx)
V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY, mask,
V4L2_JPEG_CHROMA_SUBSAMPLING_422);
- if (ctx->ctrl_handler.error)
- return ctx->ctrl_handler.error;
+ if (ctx->ctrl_handler.error) {
+ ret = ctx->ctrl_handler.error;
+ goto error_free;
+ }
if (ctx->mode == S5P_JPEG_DECODE)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE |
V4L2_CTRL_FLAG_READ_ONLY;
- return 0;
+
+ ret = v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+ if (ret < 0)
+ goto error_free;
+
+ return ret;
+
+error_free:
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return ret;
}
static const struct v4l2_ioctl_ops s5p_jpeg_ioctl_ops = {
@@ -972,14 +1353,13 @@ static const struct v4l2_ioctl_ops s5p_jpeg_ioctl_ops = {
.vidioc_s_fmt_vid_cap = s5p_jpeg_s_fmt_vid_cap,
.vidioc_s_fmt_vid_out = s5p_jpeg_s_fmt_vid_out,
- .vidioc_reqbufs = s5p_jpeg_reqbufs,
- .vidioc_querybuf = s5p_jpeg_querybuf,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
- .vidioc_qbuf = s5p_jpeg_qbuf,
- .vidioc_dqbuf = s5p_jpeg_dqbuf,
-
- .vidioc_streamon = s5p_jpeg_streamon,
- .vidioc_streamoff = s5p_jpeg_streamoff,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_g_selection = s5p_jpeg_g_selection,
};
@@ -995,74 +1375,181 @@ static void s5p_jpeg_device_run(void *priv)
struct s5p_jpeg_ctx *ctx = priv;
struct s5p_jpeg *jpeg = ctx->jpeg;
struct vb2_buffer *src_buf, *dst_buf;
- unsigned long src_addr, dst_addr;
+ unsigned long src_addr, dst_addr, flags;
+
+ spin_lock_irqsave(&ctx->jpeg->slock, flags);
- src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
- jpeg_reset(jpeg->regs);
- jpeg_poweron(jpeg->regs);
- jpeg_proc_mode(jpeg->regs, ctx->mode);
+ s5p_jpeg_reset(jpeg->regs);
+ s5p_jpeg_poweron(jpeg->regs);
+ s5p_jpeg_proc_mode(jpeg->regs, ctx->mode);
if (ctx->mode == S5P_JPEG_ENCODE) {
if (ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB565)
- jpeg_input_raw_mode(jpeg->regs, S5P_JPEG_RAW_IN_565);
+ s5p_jpeg_input_raw_mode(jpeg->regs,
+ S5P_JPEG_RAW_IN_565);
else
- jpeg_input_raw_mode(jpeg->regs, S5P_JPEG_RAW_IN_422);
- jpeg_subsampling_mode(jpeg->regs, ctx->subsampling);
- jpeg_dri(jpeg->regs, ctx->restart_interval);
- jpeg_x(jpeg->regs, ctx->out_q.w);
- jpeg_y(jpeg->regs, ctx->out_q.h);
- jpeg_imgadr(jpeg->regs, src_addr);
- jpeg_jpgadr(jpeg->regs, dst_addr);
+ s5p_jpeg_input_raw_mode(jpeg->regs,
+ S5P_JPEG_RAW_IN_422);
+ s5p_jpeg_subsampling_mode(jpeg->regs, ctx->subsampling);
+ s5p_jpeg_dri(jpeg->regs, ctx->restart_interval);
+ s5p_jpeg_x(jpeg->regs, ctx->out_q.w);
+ s5p_jpeg_y(jpeg->regs, ctx->out_q.h);
+ s5p_jpeg_imgadr(jpeg->regs, src_addr);
+ s5p_jpeg_jpgadr(jpeg->regs, dst_addr);
/* ultimately comes from sizeimage from userspace */
- jpeg_enc_stream_int(jpeg->regs, ctx->cap_q.size);
+ s5p_jpeg_enc_stream_int(jpeg->regs, ctx->cap_q.size);
/* JPEG RGB to YCbCr conversion matrix */
- jpeg_coef(jpeg->regs, 1, 1, S5P_JPEG_COEF11);
- jpeg_coef(jpeg->regs, 1, 2, S5P_JPEG_COEF12);
- jpeg_coef(jpeg->regs, 1, 3, S5P_JPEG_COEF13);
- jpeg_coef(jpeg->regs, 2, 1, S5P_JPEG_COEF21);
- jpeg_coef(jpeg->regs, 2, 2, S5P_JPEG_COEF22);
- jpeg_coef(jpeg->regs, 2, 3, S5P_JPEG_COEF23);
- jpeg_coef(jpeg->regs, 3, 1, S5P_JPEG_COEF31);
- jpeg_coef(jpeg->regs, 3, 2, S5P_JPEG_COEF32);
- jpeg_coef(jpeg->regs, 3, 3, S5P_JPEG_COEF33);
+ s5p_jpeg_coef(jpeg->regs, 1, 1, S5P_JPEG_COEF11);
+ s5p_jpeg_coef(jpeg->regs, 1, 2, S5P_JPEG_COEF12);
+ s5p_jpeg_coef(jpeg->regs, 1, 3, S5P_JPEG_COEF13);
+ s5p_jpeg_coef(jpeg->regs, 2, 1, S5P_JPEG_COEF21);
+ s5p_jpeg_coef(jpeg->regs, 2, 2, S5P_JPEG_COEF22);
+ s5p_jpeg_coef(jpeg->regs, 2, 3, S5P_JPEG_COEF23);
+ s5p_jpeg_coef(jpeg->regs, 3, 1, S5P_JPEG_COEF31);
+ s5p_jpeg_coef(jpeg->regs, 3, 2, S5P_JPEG_COEF32);
+ s5p_jpeg_coef(jpeg->regs, 3, 3, S5P_JPEG_COEF33);
/*
* JPEG IP allows storing 4 quantization tables
* We fill table 0 for luma and table 1 for chroma
*/
- jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality);
- jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality);
+ s5p_jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality);
+ s5p_jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality);
/* use table 0 for Y */
- jpeg_qtbl(jpeg->regs, 1, 0);
+ s5p_jpeg_qtbl(jpeg->regs, 1, 0);
/* use table 1 for Cb and Cr*/
- jpeg_qtbl(jpeg->regs, 2, 1);
- jpeg_qtbl(jpeg->regs, 3, 1);
+ s5p_jpeg_qtbl(jpeg->regs, 2, 1);
+ s5p_jpeg_qtbl(jpeg->regs, 3, 1);
/* Y, Cb, Cr use Huffman table 0 */
- jpeg_htbl_ac(jpeg->regs, 1);
- jpeg_htbl_dc(jpeg->regs, 1);
- jpeg_htbl_ac(jpeg->regs, 2);
- jpeg_htbl_dc(jpeg->regs, 2);
- jpeg_htbl_ac(jpeg->regs, 3);
- jpeg_htbl_dc(jpeg->regs, 3);
+ s5p_jpeg_htbl_ac(jpeg->regs, 1);
+ s5p_jpeg_htbl_dc(jpeg->regs, 1);
+ s5p_jpeg_htbl_ac(jpeg->regs, 2);
+ s5p_jpeg_htbl_dc(jpeg->regs, 2);
+ s5p_jpeg_htbl_ac(jpeg->regs, 3);
+ s5p_jpeg_htbl_dc(jpeg->regs, 3);
} else { /* S5P_JPEG_DECODE */
- jpeg_rst_int_enable(jpeg->regs, true);
- jpeg_data_num_int_enable(jpeg->regs, true);
- jpeg_final_mcu_num_int_enable(jpeg->regs, true);
+ s5p_jpeg_rst_int_enable(jpeg->regs, true);
+ s5p_jpeg_data_num_int_enable(jpeg->regs, true);
+ s5p_jpeg_final_mcu_num_int_enable(jpeg->regs, true);
if (ctx->cap_q.fmt->fourcc == V4L2_PIX_FMT_YUYV)
- jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_422);
+ s5p_jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_422);
else
- jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_420);
- jpeg_jpgadr(jpeg->regs, src_addr);
- jpeg_imgadr(jpeg->regs, dst_addr);
+ s5p_jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_420);
+ s5p_jpeg_jpgadr(jpeg->regs, src_addr);
+ s5p_jpeg_imgadr(jpeg->regs, dst_addr);
}
- jpeg_start(jpeg->regs);
+ s5p_jpeg_start(jpeg->regs);
+
+ spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
+}
+
+static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
+{
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ struct s5p_jpeg_fmt *fmt;
+ struct vb2_buffer *vb;
+ struct s5p_jpeg_addr jpeg_addr;
+ u32 pix_size, padding_bytes = 0;
+
+ pix_size = ctx->cap_q.w * ctx->cap_q.h;
+
+ if (ctx->mode == S5P_JPEG_ENCODE) {
+ vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ fmt = ctx->out_q.fmt;
+ if (ctx->out_q.w % 2 && fmt->h_align > 0)
+ padding_bytes = ctx->out_q.h;
+ } else {
+ fmt = ctx->cap_q.fmt;
+ vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ }
+
+ jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ if (fmt->colplanes == 2) {
+ jpeg_addr.cb = jpeg_addr.y + pix_size - padding_bytes;
+ } else if (fmt->colplanes == 3) {
+ jpeg_addr.cb = jpeg_addr.y + pix_size;
+ if (fmt->fourcc == V4L2_PIX_FMT_YUV420)
+ jpeg_addr.cr = jpeg_addr.cb + pix_size / 4;
+ else
+ jpeg_addr.cr = jpeg_addr.cb + pix_size / 2;
+ }
+
+ exynos4_jpeg_set_frame_buf_address(jpeg->regs, &jpeg_addr);
+}
+
+static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
+{
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ struct vb2_buffer *vb;
+ unsigned int jpeg_addr = 0;
+
+ if (ctx->mode == S5P_JPEG_ENCODE)
+ vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ else
+ vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+
+ jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ exynos4_jpeg_set_stream_buf_address(jpeg->regs, jpeg_addr);
+}
+
+static void exynos4_jpeg_device_run(void *priv)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ unsigned int bitstream_size;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->jpeg->slock, flags);
+
+ if (ctx->mode == S5P_JPEG_ENCODE) {
+ exynos4_jpeg_sw_reset(jpeg->regs);
+ exynos4_jpeg_set_interrupt(jpeg->regs);
+ exynos4_jpeg_set_huf_table_enable(jpeg->regs, 1);
+
+ exynos4_jpeg_set_huff_tbl(jpeg->regs);
+
+ /*
+ * JPEG IP allows storing 4 quantization tables
+ * We fill table 0 for luma and table 1 for chroma
+ */
+ exynos4_jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality);
+ exynos4_jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality);
+
+ exynos4_jpeg_set_encode_tbl_select(jpeg->regs,
+ ctx->compr_quality);
+ exynos4_jpeg_set_stream_size(jpeg->regs, ctx->cap_q.w,
+ ctx->cap_q.h);
+
+ exynos4_jpeg_set_enc_out_fmt(jpeg->regs, ctx->subsampling);
+ exynos4_jpeg_set_img_fmt(jpeg->regs, ctx->out_q.fmt->fourcc);
+ exynos4_jpeg_set_img_addr(ctx);
+ exynos4_jpeg_set_jpeg_addr(ctx);
+ exynos4_jpeg_set_encode_hoff_cnt(jpeg->regs,
+ ctx->out_q.fmt->fourcc);
+ } else {
+ exynos4_jpeg_sw_reset(jpeg->regs);
+ exynos4_jpeg_set_interrupt(jpeg->regs);
+ exynos4_jpeg_set_img_addr(ctx);
+ exynos4_jpeg_set_jpeg_addr(ctx);
+ exynos4_jpeg_set_img_fmt(jpeg->regs, ctx->cap_q.fmt->fourcc);
+
+ bitstream_size = DIV_ROUND_UP(ctx->out_q.size, 32);
+
+ exynos4_jpeg_set_dec_bitstream_size(jpeg->regs, bitstream_size);
+ }
+
+ exynos4_jpeg_set_enc_dec_mode(jpeg->regs, ctx->mode);
+
+ spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
}
static int s5p_jpeg_job_ready(void *priv)
@@ -1082,6 +1569,12 @@ static struct v4l2_m2m_ops s5p_jpeg_m2m_ops = {
.device_run = s5p_jpeg_device_run,
.job_ready = s5p_jpeg_job_ready,
.job_abort = s5p_jpeg_job_abort,
+}
+;
+static struct v4l2_m2m_ops exynos_jpeg_m2m_ops = {
+ .device_run = exynos4_jpeg_device_run,
+ .job_ready = s5p_jpeg_job_ready,
+ .job_abort = s5p_jpeg_job_abort,
};
/*
@@ -1149,7 +1642,7 @@ static void s5p_jpeg_buf_queue(struct vb2_buffer *vb)
ctx->hdr_parsed = s5p_jpeg_parse_hdr(&tmp,
(unsigned long)vb2_plane_vaddr(vb, 0),
min((unsigned long)ctx->out_q.size,
- vb2_get_plane_payload(vb, 0)));
+ vb2_get_plane_payload(vb, 0)), ctx);
if (!ctx->hdr_parsed) {
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
return;
@@ -1162,30 +1655,9 @@ static void s5p_jpeg_buf_queue(struct vb2_buffer *vb)
q_data = &ctx->cap_q;
q_data->w = tmp.w;
q_data->h = tmp.h;
-
- jpeg_bound_align_image(&q_data->w, S5P_JPEG_MIN_WIDTH,
- S5P_JPEG_MAX_WIDTH, q_data->fmt->h_align,
- &q_data->h, S5P_JPEG_MIN_HEIGHT,
- S5P_JPEG_MAX_HEIGHT, q_data->fmt->v_align
- );
- q_data->size = q_data->w * q_data->h * q_data->fmt->depth >> 3;
}
- if (ctx->m2m_ctx)
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
-}
-
-static void s5p_jpeg_wait_prepare(struct vb2_queue *vq)
-{
- struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
- mutex_unlock(&ctx->jpeg->lock);
-}
-
-static void s5p_jpeg_wait_finish(struct vb2_queue *vq)
-{
- struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
-
- mutex_lock(&ctx->jpeg->lock);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
}
static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
@@ -1211,8 +1683,8 @@ static struct vb2_ops s5p_jpeg_qops = {
.queue_setup = s5p_jpeg_queue_setup,
.buf_prepare = s5p_jpeg_buf_prepare,
.buf_queue = s5p_jpeg_buf_queue,
- .wait_prepare = s5p_jpeg_wait_prepare,
- .wait_finish = s5p_jpeg_wait_finish,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.start_streaming = s5p_jpeg_start_streaming,
.stop_streaming = s5p_jpeg_stop_streaming,
};
@@ -1230,6 +1702,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->ops = &s5p_jpeg_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->jpeg->lock;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -1242,6 +1715,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->ops = &s5p_jpeg_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->jpeg->lock;
return vb2_queue_init(dst_vq);
}
@@ -1267,26 +1741,27 @@ static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id)
curr_ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
- src_buf = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
- dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+ src_buf = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
if (curr_ctx->mode == S5P_JPEG_ENCODE)
- enc_jpeg_too_large = jpeg_enc_stream_stat(jpeg->regs);
- timer_elapsed = jpeg_timer_stat(jpeg->regs);
- op_completed = jpeg_result_stat_ok(jpeg->regs);
+ enc_jpeg_too_large = s5p_jpeg_enc_stream_stat(jpeg->regs);
+ timer_elapsed = s5p_jpeg_timer_stat(jpeg->regs);
+ op_completed = s5p_jpeg_result_stat_ok(jpeg->regs);
if (curr_ctx->mode == S5P_JPEG_DECODE)
- op_completed = op_completed && jpeg_stream_stat_ok(jpeg->regs);
+ op_completed = op_completed &&
+ s5p_jpeg_stream_stat_ok(jpeg->regs);
if (enc_jpeg_too_large) {
state = VB2_BUF_STATE_ERROR;
- jpeg_clear_enc_stream_stat(jpeg->regs);
+ s5p_jpeg_clear_enc_stream_stat(jpeg->regs);
} else if (timer_elapsed) {
state = VB2_BUF_STATE_ERROR;
- jpeg_clear_timer_stat(jpeg->regs);
+ s5p_jpeg_clear_timer_stat(jpeg->regs);
} else if (!op_completed) {
state = VB2_BUF_STATE_ERROR;
} else {
- payload_size = jpeg_compressed_size(jpeg->regs);
+ payload_size = s5p_jpeg_compressed_size(jpeg->regs);
}
dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
@@ -1296,16 +1771,79 @@ static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id)
if (curr_ctx->mode == S5P_JPEG_ENCODE)
vb2_set_plane_payload(dst_buf, 0, payload_size);
v4l2_m2m_buf_done(dst_buf, state);
- v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->m2m_ctx);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
- curr_ctx->subsampling = jpeg_get_subsampling_mode(jpeg->regs);
+ curr_ctx->subsampling = s5p_jpeg_get_subsampling_mode(jpeg->regs);
spin_unlock(&jpeg->slock);
- jpeg_clear_int(jpeg->regs);
+ s5p_jpeg_clear_int(jpeg->regs);
return IRQ_HANDLED;
}
+static irqreturn_t exynos4_jpeg_irq(int irq, void *priv)
+{
+ unsigned int int_status;
+ struct vb2_buffer *src_vb, *dst_vb;
+ struct s5p_jpeg *jpeg = priv;
+ struct s5p_jpeg_ctx *curr_ctx;
+ unsigned long payload_size = 0;
+
+ spin_lock(&jpeg->slock);
+
+ curr_ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
+
+ src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
+
+ int_status = exynos4_jpeg_get_int_status(jpeg->regs);
+
+ if (int_status) {
+ switch (int_status & 0x1f) {
+ case 0x1:
+ jpeg->irq_ret = ERR_PROT;
+ break;
+ case 0x2:
+ jpeg->irq_ret = OK_ENC_OR_DEC;
+ break;
+ case 0x4:
+ jpeg->irq_ret = ERR_DEC_INVALID_FORMAT;
+ break;
+ case 0x8:
+ jpeg->irq_ret = ERR_MULTI_SCAN;
+ break;
+ case 0x10:
+ jpeg->irq_ret = ERR_FRAME;
+ break;
+ default:
+ jpeg->irq_ret = ERR_UNKNOWN;
+ break;
+ }
+ } else {
+ jpeg->irq_ret = ERR_UNKNOWN;
+ }
+
+ if (jpeg->irq_ret == OK_ENC_OR_DEC) {
+ if (curr_ctx->mode == S5P_JPEG_ENCODE) {
+ payload_size = exynos4_jpeg_get_stream_size(jpeg->regs);
+ vb2_set_plane_payload(dst_vb, 0, payload_size);
+ }
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+ } else {
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
+ }
+
+ v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
+ curr_ctx->subsampling = exynos4_jpeg_get_frame_fmt(jpeg->regs);
+
+ spin_unlock(&jpeg->slock);
+ return IRQ_HANDLED;
+}
+
+static void *jpeg_get_drv_data(struct platform_device *pdev);
+
/*
* ============================================================================
* Driver basic infrastructure
@@ -1316,13 +1854,19 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
{
struct s5p_jpeg *jpeg;
struct resource *res;
+ struct v4l2_m2m_ops *samsung_jpeg_m2m_ops;
int ret;
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
/* JPEG IP abstraction struct */
jpeg = devm_kzalloc(&pdev->dev, sizeof(struct s5p_jpeg), GFP_KERNEL);
if (!jpeg)
return -ENOMEM;
+ jpeg->variant = jpeg_get_drv_data(pdev);
+
mutex_init(&jpeg->lock);
spin_lock_init(&jpeg->slock);
jpeg->dev = &pdev->dev;
@@ -1341,8 +1885,8 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_request_irq(&pdev->dev, jpeg->irq, s5p_jpeg_irq, 0,
- dev_name(&pdev->dev), jpeg);
+ ret = devm_request_irq(&pdev->dev, jpeg->irq, jpeg->variant->jpeg_irq,
+ 0, dev_name(&pdev->dev), jpeg);
if (ret) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", jpeg->irq);
return ret;
@@ -1356,7 +1900,6 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
return ret;
}
dev_dbg(&pdev->dev, "clock source %p\n", jpeg->clk);
- clk_prepare_enable(jpeg->clk);
/* v4l2 device */
ret = v4l2_device_register(&pdev->dev, &jpeg->v4l2_dev);
@@ -1365,8 +1908,13 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
goto clk_get_rollback;
}
+ if (jpeg->variant->version == SJPEG_S5P)
+ samsung_jpeg_m2m_ops = &s5p_jpeg_m2m_ops;
+ else
+ samsung_jpeg_m2m_ops = &exynos_jpeg_m2m_ops;
+
/* mem2mem device */
- jpeg->m2m_dev = v4l2_m2m_init(&s5p_jpeg_m2m_ops);
+ jpeg->m2m_dev = v4l2_m2m_init(samsung_jpeg_m2m_ops);
if (IS_ERR(jpeg->m2m_dev)) {
v4l2_err(&jpeg->v4l2_dev, "Failed to init mem2mem device\n");
ret = PTR_ERR(jpeg->m2m_dev);
@@ -1387,8 +1935,8 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto vb2_allocator_rollback;
}
- strlcpy(jpeg->vfd_encoder->name, S5P_JPEG_M2M_NAME,
- sizeof(jpeg->vfd_encoder->name));
+ snprintf(jpeg->vfd_encoder->name, sizeof(jpeg->vfd_encoder->name),
+ "%s-enc", S5P_JPEG_M2M_NAME);
jpeg->vfd_encoder->fops = &s5p_jpeg_fops;
jpeg->vfd_encoder->ioctl_ops = &s5p_jpeg_ioctl_ops;
jpeg->vfd_encoder->minor = -1;
@@ -1415,8 +1963,8 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto enc_vdev_register_rollback;
}
- strlcpy(jpeg->vfd_decoder->name, S5P_JPEG_M2M_NAME,
- sizeof(jpeg->vfd_decoder->name));
+ snprintf(jpeg->vfd_decoder->name, sizeof(jpeg->vfd_decoder->name),
+ "%s-dec", S5P_JPEG_M2M_NAME);
jpeg->vfd_decoder->fops = &s5p_jpeg_fops;
jpeg->vfd_decoder->ioctl_ops = &s5p_jpeg_ioctl_ops;
jpeg->vfd_decoder->minor = -1;
@@ -1464,7 +2012,6 @@ device_register_rollback:
v4l2_device_unregister(&jpeg->v4l2_dev);
clk_get_rollback:
- clk_disable_unprepare(jpeg->clk);
clk_put(jpeg->clk);
return ret;
@@ -1484,7 +2031,9 @@ static int s5p_jpeg_remove(struct platform_device *pdev)
v4l2_m2m_release(jpeg->m2m_dev);
v4l2_device_unregister(&jpeg->v4l2_dev);
- clk_disable_unprepare(jpeg->clk);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ clk_disable_unprepare(jpeg->clk);
+
clk_put(jpeg->clk);
return 0;
@@ -1492,41 +2041,119 @@ static int s5p_jpeg_remove(struct platform_device *pdev)
static int s5p_jpeg_runtime_suspend(struct device *dev)
{
+ struct s5p_jpeg *jpeg = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(jpeg->clk);
+
return 0;
}
static int s5p_jpeg_runtime_resume(struct device *dev)
{
struct s5p_jpeg *jpeg = dev_get_drvdata(dev);
+ unsigned long flags;
+ int ret;
+
+ ret = clk_prepare_enable(jpeg->clk);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&jpeg->slock, flags);
+
/*
* JPEG IP allows storing two Huffman tables for each component
- * We fill table 0 for each component
+ * We fill table 0 for each component and do this here only
+ * for S5PC210 device as Exynos4x12 requires programming its
+ * Huffman tables each time the encoding process is initialized.
*/
- jpeg_set_hdctbl(jpeg->regs);
- jpeg_set_hdctblg(jpeg->regs);
- jpeg_set_hactbl(jpeg->regs);
- jpeg_set_hactblg(jpeg->regs);
+ if (jpeg->variant->version == SJPEG_S5P) {
+ s5p_jpeg_set_hdctbl(jpeg->regs);
+ s5p_jpeg_set_hdctblg(jpeg->regs);
+ s5p_jpeg_set_hactbl(jpeg->regs);
+ s5p_jpeg_set_hactblg(jpeg->regs);
+ }
+
+ spin_unlock_irqrestore(&jpeg->slock, flags);
+
return 0;
}
+static int s5p_jpeg_suspend(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return s5p_jpeg_runtime_suspend(dev);
+}
+
+static int s5p_jpeg_resume(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return s5p_jpeg_runtime_resume(dev);
+}
+
static const struct dev_pm_ops s5p_jpeg_pm_ops = {
- .runtime_suspend = s5p_jpeg_runtime_suspend,
- .runtime_resume = s5p_jpeg_runtime_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(s5p_jpeg_suspend, s5p_jpeg_resume)
+ SET_RUNTIME_PM_OPS(s5p_jpeg_runtime_suspend, s5p_jpeg_runtime_resume, NULL)
+};
+
+#ifdef CONFIG_OF
+static struct s5p_jpeg_variant s5p_jpeg_drvdata = {
+ .version = SJPEG_S5P,
+ .jpeg_irq = s5p_jpeg_irq,
+};
+
+static struct s5p_jpeg_variant exynos4_jpeg_drvdata = {
+ .version = SJPEG_EXYNOS4,
+ .jpeg_irq = exynos4_jpeg_irq,
+};
+
+static const struct of_device_id samsung_jpeg_match[] = {
+ {
+ .compatible = "samsung,s5pv210-jpeg",
+ .data = &s5p_jpeg_drvdata,
+ }, {
+ .compatible = "samsung,exynos4210-jpeg",
+ .data = &s5p_jpeg_drvdata,
+ }, {
+ .compatible = "samsung,exynos4212-jpeg",
+ .data = &exynos4_jpeg_drvdata,
+ },
+ {},
};
+MODULE_DEVICE_TABLE(of, samsung_jpeg_match);
+
+static void *jpeg_get_drv_data(struct platform_device *pdev)
+{
+ struct s5p_jpeg_variant *driver_data = NULL;
+ const struct of_device_id *match;
+
+ match = of_match_node(of_match_ptr(samsung_jpeg_match),
+ pdev->dev.of_node);
+ if (match)
+ driver_data = (struct s5p_jpeg_variant *)match->data;
+
+ return driver_data;
+}
+#endif
+
static struct platform_driver s5p_jpeg_driver = {
.probe = s5p_jpeg_probe,
.remove = s5p_jpeg_remove,
.driver = {
- .owner = THIS_MODULE,
- .name = S5P_JPEG_M2M_NAME,
- .pm = &s5p_jpeg_pm_ops,
+ .of_match_table = of_match_ptr(samsung_jpeg_match),
+ .owner = THIS_MODULE,
+ .name = S5P_JPEG_M2M_NAME,
+ .pm = &s5p_jpeg_pm_ops,
},
};
module_platform_driver(s5p_jpeg_driver);
MODULE_AUTHOR("Andrzej Pietrasiewicz <andrzej.p@samsung.com>");
+MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>");
MODULE_DESCRIPTION("Samsung JPEG codec driver");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
index 8a4013e3aee7..f482dbf55d5f 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -13,6 +13,7 @@
#ifndef JPEG_CORE_H_
#define JPEG_CORE_H_
+#include <linux/interrupt.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-ctrls.h>
@@ -43,8 +44,45 @@
#define DHP 0xde
/* Flags that indicate a format can be used for capture/output */
-#define MEM2MEM_CAPTURE (1 << 0)
-#define MEM2MEM_OUTPUT (1 << 1)
+#define SJPEG_FMT_FLAG_ENC_CAPTURE (1 << 0)
+#define SJPEG_FMT_FLAG_ENC_OUTPUT (1 << 1)
+#define SJPEG_FMT_FLAG_DEC_CAPTURE (1 << 2)
+#define SJPEG_FMT_FLAG_DEC_OUTPUT (1 << 3)
+#define SJPEG_FMT_FLAG_S5P (1 << 4)
+#define SJPEG_FMT_FLAG_EXYNOS4 (1 << 5)
+#define SJPEG_FMT_RGB (1 << 6)
+#define SJPEG_FMT_NON_RGB (1 << 7)
+
+#define S5P_JPEG_ENCODE 0
+#define S5P_JPEG_DECODE 1
+
+#define FMT_TYPE_OUTPUT 0
+#define FMT_TYPE_CAPTURE 1
+
+#define SJPEG_SUBSAMPLING_444 0x11
+#define SJPEG_SUBSAMPLING_422 0x21
+#define SJPEG_SUBSAMPLING_420 0x22
+
+/* Version numbers */
+
+#define SJPEG_S5P 1
+#define SJPEG_EXYNOS4 2
+
+enum exynos4_jpeg_result {
+ OK_ENC_OR_DEC,
+ ERR_PROT,
+ ERR_DEC_INVALID_FORMAT,
+ ERR_MULTI_SCAN,
+ ERR_FRAME,
+ ERR_UNKNOWN,
+};
+
+enum exynos4_jpeg_img_quality_level {
+ QUALITY_LEVEL_1 = 0, /* high */
+ QUALITY_LEVEL_2,
+ QUALITY_LEVEL_3,
+ QUALITY_LEVEL_4, /* low */
+};
/**
* struct s5p_jpeg - JPEG IP abstraction
@@ -71,9 +109,16 @@ struct s5p_jpeg {
void __iomem *regs;
unsigned int irq;
+ enum exynos4_jpeg_result irq_ret;
struct clk *clk;
struct device *dev;
void *alloc_ctx;
+ struct s5p_jpeg_variant *variant;
+};
+
+struct s5p_jpeg_variant {
+ unsigned int version;
+ irqreturn_t (*jpeg_irq)(int irq, void *priv);
};
/**
@@ -84,16 +129,18 @@ struct s5p_jpeg {
* @colplanes: number of color planes (1 for packed formats)
* @h_align: horizontal alignment order (align to 2^h_align)
* @v_align: vertical alignment order (align to 2^v_align)
- * @types: types of queue this format is applicable to
+ * @flags: flags describing format applicability
*/
struct s5p_jpeg_fmt {
char *name;
u32 fourcc;
int depth;
int colplanes;
+ int memplanes;
int h_align;
int v_align;
- u32 types;
+ int subsampling;
+ u32 flags;
};
/**
@@ -115,7 +162,6 @@ struct s5p_jpeg_q_data {
* @jpeg: JPEG IP device for this context
* @mode: compression (encode) operation or decompression (decode)
* @compr_quality: destination image quality in compression (encode) mode
- * @m2m_ctx: mem2mem device context
* @out_q: source (output) queue information
* @cap_fmt: destination (capture) queue queue information
* @hdr_parsed: set if header has been parsed during decompression
@@ -127,7 +173,6 @@ struct s5p_jpeg_ctx {
unsigned short compr_quality;
unsigned short restart_interval;
unsigned short subsampling;
- struct v4l2_m2m_ctx *m2m_ctx;
struct s5p_jpeg_q_data out_q;
struct s5p_jpeg_q_data cap_q;
struct v4l2_fh fh;
@@ -147,4 +192,16 @@ struct s5p_jpeg_buffer {
unsigned long data;
};
+/**
+ * struct s5p_jpeg_addr - JPEG converter physical address set for DMA
+ * @y: luminance plane physical address
+ * @cb: Cb plane physical address
+ * @cr: Cr plane physical address
+ */
+struct s5p_jpeg_addr {
+ u32 y;
+ u32 cb;
+ u32 cr;
+};
+
#endif /* JPEG_CORE_H */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
new file mode 100644
index 000000000000..da8d6a1a984f
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
@@ -0,0 +1,279 @@
+/* Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * Register interface file for JPEG driver on Exynos4x12.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include "jpeg-core.h"
+#include "jpeg-hw-exynos4.h"
+#include "jpeg-regs.h"
+
+void exynos4_jpeg_sw_reset(void __iomem *base)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_JPEG_CNTL_REG);
+ writel(reg & ~EXYNOS4_SOFT_RESET_HI, base + EXYNOS4_JPEG_CNTL_REG);
+
+ ndelay(100000);
+
+ writel(reg | EXYNOS4_SOFT_RESET_HI, base + EXYNOS4_JPEG_CNTL_REG);
+}
+
+void exynos4_jpeg_set_enc_dec_mode(void __iomem *base, unsigned int mode)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_JPEG_CNTL_REG);
+ /* set exynos4_jpeg mod register */
+ if (mode == S5P_JPEG_DECODE) {
+ writel((reg & EXYNOS4_ENC_DEC_MODE_MASK) |
+ EXYNOS4_DEC_MODE,
+ base + EXYNOS4_JPEG_CNTL_REG);
+ } else {/* encode */
+ writel((reg & EXYNOS4_ENC_DEC_MODE_MASK) |
+ EXYNOS4_ENC_MODE,
+ base + EXYNOS4_JPEG_CNTL_REG);
+ }
+}
+
+void exynos4_jpeg_set_img_fmt(void __iomem *base, unsigned int img_fmt)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_IMG_FMT_REG) &
+ EXYNOS4_ENC_IN_FMT_MASK; /* clear except enc format */
+
+ switch (img_fmt) {
+ case V4L2_PIX_FMT_GREY:
+ reg = reg | EXYNOS4_ENC_GRAY_IMG | EXYNOS4_GRAY_IMG_IP;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ reg = reg | EXYNOS4_ENC_RGB_IMG |
+ EXYNOS4_RGB_IP_RGB_32BIT_IMG;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ reg = reg | EXYNOS4_ENC_RGB_IMG |
+ EXYNOS4_RGB_IP_RGB_16BIT_IMG;
+ break;
+ case V4L2_PIX_FMT_NV24:
+ reg = reg | EXYNOS4_ENC_YUV_444_IMG |
+ EXYNOS4_YUV_444_IP_YUV_444_2P_IMG |
+ EXYNOS4_SWAP_CHROMA_CBCR;
+ break;
+ case V4L2_PIX_FMT_NV42:
+ reg = reg | EXYNOS4_ENC_YUV_444_IMG |
+ EXYNOS4_YUV_444_IP_YUV_444_2P_IMG |
+ EXYNOS4_SWAP_CHROMA_CRCB;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ reg = reg | EXYNOS4_DEC_YUV_422_IMG |
+ EXYNOS4_YUV_422_IP_YUV_422_1P_IMG |
+ EXYNOS4_SWAP_CHROMA_CBCR;
+ break;
+
+ case V4L2_PIX_FMT_YVYU:
+ reg = reg | EXYNOS4_DEC_YUV_422_IMG |
+ EXYNOS4_YUV_422_IP_YUV_422_1P_IMG |
+ EXYNOS4_SWAP_CHROMA_CRCB;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ reg = reg | EXYNOS4_DEC_YUV_422_IMG |
+ EXYNOS4_YUV_422_IP_YUV_422_2P_IMG |
+ EXYNOS4_SWAP_CHROMA_CBCR;
+ break;
+ case V4L2_PIX_FMT_NV61:
+ reg = reg | EXYNOS4_DEC_YUV_422_IMG |
+ EXYNOS4_YUV_422_IP_YUV_422_2P_IMG |
+ EXYNOS4_SWAP_CHROMA_CRCB;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ reg = reg | EXYNOS4_DEC_YUV_420_IMG |
+ EXYNOS4_YUV_420_IP_YUV_420_2P_IMG |
+ EXYNOS4_SWAP_CHROMA_CBCR;
+ break;
+ case V4L2_PIX_FMT_NV21:
+ reg = reg | EXYNOS4_DEC_YUV_420_IMG |
+ EXYNOS4_YUV_420_IP_YUV_420_2P_IMG |
+ EXYNOS4_SWAP_CHROMA_CRCB;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ reg = reg | EXYNOS4_DEC_YUV_420_IMG |
+ EXYNOS4_YUV_420_IP_YUV_420_3P_IMG |
+ EXYNOS4_SWAP_CHROMA_CBCR;
+ break;
+ default:
+ break;
+
+ }
+
+ writel(reg, base + EXYNOS4_IMG_FMT_REG);
+}
+
+void exynos4_jpeg_set_enc_out_fmt(void __iomem *base, unsigned int out_fmt)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_IMG_FMT_REG) &
+ ~EXYNOS4_ENC_FMT_MASK; /* clear enc format */
+
+ switch (out_fmt) {
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY:
+ reg = reg | EXYNOS4_ENC_FMT_GRAY;
+ break;
+
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_444:
+ reg = reg | EXYNOS4_ENC_FMT_YUV_444;
+ break;
+
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_422:
+ reg = reg | EXYNOS4_ENC_FMT_YUV_422;
+ break;
+
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_420:
+ reg = reg | EXYNOS4_ENC_FMT_YUV_420;
+ break;
+
+ default:
+ break;
+ }
+
+ writel(reg, base + EXYNOS4_IMG_FMT_REG);
+}
+
+void exynos4_jpeg_set_interrupt(void __iomem *base)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_INT_EN_REG) & ~EXYNOS4_INT_EN_MASK;
+ writel(EXYNOS4_INT_EN_ALL, base + EXYNOS4_INT_EN_REG);
+}
+
+unsigned int exynos4_jpeg_get_int_status(void __iomem *base)
+{
+ unsigned int int_status;
+
+ int_status = readl(base + EXYNOS4_INT_STATUS_REG);
+
+ return int_status;
+}
+
+unsigned int exynos4_jpeg_get_fifo_status(void __iomem *base)
+{
+ unsigned int fifo_status;
+
+ fifo_status = readl(base + EXYNOS4_FIFO_STATUS_REG);
+
+ return fifo_status;
+}
+
+void exynos4_jpeg_set_huf_table_enable(void __iomem *base, int value)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_JPEG_CNTL_REG) & ~EXYNOS4_HUF_TBL_EN;
+
+ if (value == 1)
+ writel(reg | EXYNOS4_HUF_TBL_EN,
+ base + EXYNOS4_JPEG_CNTL_REG);
+ else
+ writel(reg | ~EXYNOS4_HUF_TBL_EN,
+ base + EXYNOS4_JPEG_CNTL_REG);
+}
+
+void exynos4_jpeg_set_sys_int_enable(void __iomem *base, int value)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_JPEG_CNTL_REG) & ~(EXYNOS4_SYS_INT_EN);
+
+ if (value == 1)
+ writel(EXYNOS4_SYS_INT_EN, base + EXYNOS4_JPEG_CNTL_REG);
+ else
+ writel(~EXYNOS4_SYS_INT_EN, base + EXYNOS4_JPEG_CNTL_REG);
+}
+
+void exynos4_jpeg_set_stream_buf_address(void __iomem *base,
+ unsigned int address)
+{
+ writel(address, base + EXYNOS4_OUT_MEM_BASE_REG);
+}
+
+void exynos4_jpeg_set_stream_size(void __iomem *base,
+ unsigned int x_value, unsigned int y_value)
+{
+ writel(0x0, base + EXYNOS4_JPEG_IMG_SIZE_REG); /* clear */
+ writel(EXYNOS4_X_SIZE(x_value) | EXYNOS4_Y_SIZE(y_value),
+ base + EXYNOS4_JPEG_IMG_SIZE_REG);
+}
+
+void exynos4_jpeg_set_frame_buf_address(void __iomem *base,
+ struct s5p_jpeg_addr *exynos4_jpeg_addr)
+{
+ writel(exynos4_jpeg_addr->y, base + EXYNOS4_IMG_BA_PLANE_1_REG);
+ writel(exynos4_jpeg_addr->cb, base + EXYNOS4_IMG_BA_PLANE_2_REG);
+ writel(exynos4_jpeg_addr->cr, base + EXYNOS4_IMG_BA_PLANE_3_REG);
+}
+
+void exynos4_jpeg_set_encode_tbl_select(void __iomem *base,
+ enum exynos4_jpeg_img_quality_level level)
+{
+ unsigned int reg;
+
+ reg = EXYNOS4_Q_TBL_COMP1_0 | EXYNOS4_Q_TBL_COMP2_1 |
+ EXYNOS4_Q_TBL_COMP3_1 |
+ EXYNOS4_HUFF_TBL_COMP1_AC_0_DC_1 |
+ EXYNOS4_HUFF_TBL_COMP2_AC_0_DC_0 |
+ EXYNOS4_HUFF_TBL_COMP3_AC_1_DC_1;
+
+ writel(reg, base + EXYNOS4_TBL_SEL_REG);
+}
+
+void exynos4_jpeg_set_encode_hoff_cnt(void __iomem *base, unsigned int fmt)
+{
+ if (fmt == V4L2_PIX_FMT_GREY)
+ writel(0xd2, base + EXYNOS4_HUFF_CNT_REG);
+ else
+ writel(0x1a2, base + EXYNOS4_HUFF_CNT_REG);
+}
+
+unsigned int exynos4_jpeg_get_stream_size(void __iomem *base)
+{
+ unsigned int size;
+
+ size = readl(base + EXYNOS4_BITSTREAM_SIZE_REG);
+ return size;
+}
+
+void exynos4_jpeg_set_dec_bitstream_size(void __iomem *base, unsigned int size)
+{
+ writel(size, base + EXYNOS4_BITSTREAM_SIZE_REG);
+}
+
+void exynos4_jpeg_get_frame_size(void __iomem *base,
+ unsigned int *width, unsigned int *height)
+{
+ *width = (readl(base + EXYNOS4_DECODE_XY_SIZE_REG) &
+ EXYNOS4_DECODED_SIZE_MASK);
+ *height = (readl(base + EXYNOS4_DECODE_XY_SIZE_REG) >> 16) &
+ EXYNOS4_DECODED_SIZE_MASK;
+}
+
+unsigned int exynos4_jpeg_get_frame_fmt(void __iomem *base)
+{
+ return readl(base + EXYNOS4_DECODE_IMG_FMT_REG) &
+ EXYNOS4_JPEG_DECODED_IMG_FMT_MASK;
+}
+
+void exynos4_jpeg_set_timer_count(void __iomem *base, unsigned int size)
+{
+ writel(size, base + EXYNOS4_INT_TIMER_COUNT_REG);
+}
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h
new file mode 100644
index 000000000000..c228d28a4bc7
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h
@@ -0,0 +1,42 @@
+/* Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * Header file of the register interface for JPEG driver on Exynos4x12.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef JPEG_HW_EXYNOS4_H_
+#define JPEG_HW_EXYNOS4_H_
+
+void exynos4_jpeg_sw_reset(void __iomem *base);
+void exynos4_jpeg_set_enc_dec_mode(void __iomem *base, unsigned int mode);
+void exynos4_jpeg_set_img_fmt(void __iomem *base, unsigned int img_fmt);
+void exynos4_jpeg_set_enc_out_fmt(void __iomem *base, unsigned int out_fmt);
+void exynos4_jpeg_set_enc_tbl(void __iomem *base);
+void exynos4_jpeg_set_interrupt(void __iomem *base);
+unsigned int exynos4_jpeg_get_int_status(void __iomem *base);
+void exynos4_jpeg_set_huf_table_enable(void __iomem *base, int value);
+void exynos4_jpeg_set_sys_int_enable(void __iomem *base, int value);
+void exynos4_jpeg_set_stream_buf_address(void __iomem *base,
+ unsigned int address);
+void exynos4_jpeg_set_stream_size(void __iomem *base,
+ unsigned int x_value, unsigned int y_value);
+void exynos4_jpeg_set_frame_buf_address(void __iomem *base,
+ struct s5p_jpeg_addr *jpeg_addr);
+void exynos4_jpeg_set_encode_tbl_select(void __iomem *base,
+ enum exynos4_jpeg_img_quality_level level);
+void exynos4_jpeg_set_encode_hoff_cnt(void __iomem *base, unsigned int fmt);
+void exynos4_jpeg_set_dec_bitstream_size(void __iomem *base, unsigned int size);
+unsigned int exynos4_jpeg_get_stream_size(void __iomem *base);
+void exynos4_jpeg_get_frame_size(void __iomem *base,
+ unsigned int *width, unsigned int *height);
+unsigned int exynos4_jpeg_get_frame_fmt(void __iomem *base);
+unsigned int exynos4_jpeg_get_fifo_status(void __iomem *base);
+void exynos4_jpeg_set_timer_count(void __iomem *base, unsigned int size);
+
+#endif /* JPEG_HW_EXYNOS4_H_ */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw.h b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
index b47e887b6138..52407d790726 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
@@ -9,27 +9,15 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#ifndef JPEG_HW_H_
-#define JPEG_HW_H_
#include <linux/io.h>
#include <linux/videodev2.h>
-#include "jpeg-hw.h"
+#include "jpeg-core.h"
#include "jpeg-regs.h"
+#include "jpeg-hw-s5p.h"
-#define S5P_JPEG_MIN_WIDTH 32
-#define S5P_JPEG_MIN_HEIGHT 32
-#define S5P_JPEG_MAX_WIDTH 8192
-#define S5P_JPEG_MAX_HEIGHT 8192
-#define S5P_JPEG_ENCODE 0
-#define S5P_JPEG_DECODE 1
-#define S5P_JPEG_RAW_IN_565 0
-#define S5P_JPEG_RAW_IN_422 1
-#define S5P_JPEG_RAW_OUT_422 0
-#define S5P_JPEG_RAW_OUT_420 1
-
-static inline void jpeg_reset(void __iomem *regs)
+void s5p_jpeg_reset(void __iomem *regs)
{
unsigned long reg;
@@ -42,12 +30,12 @@ static inline void jpeg_reset(void __iomem *regs)
}
}
-static inline void jpeg_poweron(void __iomem *regs)
+void s5p_jpeg_poweron(void __iomem *regs)
{
writel(S5P_POWER_ON, regs + S5P_JPGCLKCON);
}
-static inline void jpeg_input_raw_mode(void __iomem *regs, unsigned long mode)
+void s5p_jpeg_input_raw_mode(void __iomem *regs, unsigned long mode)
{
unsigned long reg, m;
@@ -63,7 +51,7 @@ static inline void jpeg_input_raw_mode(void __iomem *regs, unsigned long mode)
writel(reg, regs + S5P_JPGCMOD);
}
-static inline void jpeg_input_raw_y16(void __iomem *regs, bool y16)
+void s5p_jpeg_input_raw_y16(void __iomem *regs, bool y16)
{
unsigned long reg;
@@ -75,7 +63,7 @@ static inline void jpeg_input_raw_y16(void __iomem *regs, bool y16)
writel(reg, regs + S5P_JPGCMOD);
}
-static inline void jpeg_proc_mode(void __iomem *regs, unsigned long mode)
+void s5p_jpeg_proc_mode(void __iomem *regs, unsigned long mode)
{
unsigned long reg, m;
@@ -90,7 +78,7 @@ static inline void jpeg_proc_mode(void __iomem *regs, unsigned long mode)
writel(reg, regs + S5P_JPGMOD);
}
-static inline void jpeg_subsampling_mode(void __iomem *regs, unsigned int mode)
+void s5p_jpeg_subsampling_mode(void __iomem *regs, unsigned int mode)
{
unsigned long reg, m;
@@ -105,12 +93,12 @@ static inline void jpeg_subsampling_mode(void __iomem *regs, unsigned int mode)
writel(reg, regs + S5P_JPGMOD);
}
-static inline unsigned int jpeg_get_subsampling_mode(void __iomem *regs)
+unsigned int s5p_jpeg_get_subsampling_mode(void __iomem *regs)
{
return readl(regs + S5P_JPGMOD) & S5P_SUBSAMPLING_MODE_MASK;
}
-static inline void jpeg_dri(void __iomem *regs, unsigned int dri)
+void s5p_jpeg_dri(void __iomem *regs, unsigned int dri)
{
unsigned long reg;
@@ -125,7 +113,7 @@ static inline void jpeg_dri(void __iomem *regs, unsigned int dri)
writel(reg, regs + S5P_JPGDRI_L);
}
-static inline void jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n)
+void s5p_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n)
{
unsigned long reg;
@@ -135,7 +123,7 @@ static inline void jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n)
writel(reg, regs + S5P_JPG_QTBL);
}
-static inline void jpeg_htbl_ac(void __iomem *regs, unsigned int t)
+void s5p_jpeg_htbl_ac(void __iomem *regs, unsigned int t)
{
unsigned long reg;
@@ -146,7 +134,7 @@ static inline void jpeg_htbl_ac(void __iomem *regs, unsigned int t)
writel(reg, regs + S5P_JPG_HTBL);
}
-static inline void jpeg_htbl_dc(void __iomem *regs, unsigned int t)
+void s5p_jpeg_htbl_dc(void __iomem *regs, unsigned int t)
{
unsigned long reg;
@@ -157,7 +145,7 @@ static inline void jpeg_htbl_dc(void __iomem *regs, unsigned int t)
writel(reg, regs + S5P_JPG_HTBL);
}
-static inline void jpeg_y(void __iomem *regs, unsigned int y)
+void s5p_jpeg_y(void __iomem *regs, unsigned int y)
{
unsigned long reg;
@@ -172,7 +160,7 @@ static inline void jpeg_y(void __iomem *regs, unsigned int y)
writel(reg, regs + S5P_JPGY_L);
}
-static inline void jpeg_x(void __iomem *regs, unsigned int x)
+void s5p_jpeg_x(void __iomem *regs, unsigned int x)
{
unsigned long reg;
@@ -187,7 +175,7 @@ static inline void jpeg_x(void __iomem *regs, unsigned int x)
writel(reg, regs + S5P_JPGX_L);
}
-static inline void jpeg_rst_int_enable(void __iomem *regs, bool enable)
+void s5p_jpeg_rst_int_enable(void __iomem *regs, bool enable)
{
unsigned long reg;
@@ -198,7 +186,7 @@ static inline void jpeg_rst_int_enable(void __iomem *regs, bool enable)
writel(reg, regs + S5P_JPGINTSE);
}
-static inline void jpeg_data_num_int_enable(void __iomem *regs, bool enable)
+void s5p_jpeg_data_num_int_enable(void __iomem *regs, bool enable)
{
unsigned long reg;
@@ -209,7 +197,7 @@ static inline void jpeg_data_num_int_enable(void __iomem *regs, bool enable)
writel(reg, regs + S5P_JPGINTSE);
}
-static inline void jpeg_final_mcu_num_int_enable(void __iomem *regs, bool enbl)
+void s5p_jpeg_final_mcu_num_int_enable(void __iomem *regs, bool enbl)
{
unsigned long reg;
@@ -220,7 +208,7 @@ static inline void jpeg_final_mcu_num_int_enable(void __iomem *regs, bool enbl)
writel(reg, regs + S5P_JPGINTSE);
}
-static inline void jpeg_timer_enable(void __iomem *regs, unsigned long val)
+void s5p_jpeg_timer_enable(void __iomem *regs, unsigned long val)
{
unsigned long reg;
@@ -231,7 +219,7 @@ static inline void jpeg_timer_enable(void __iomem *regs, unsigned long val)
writel(reg, regs + S5P_JPG_TIMER_SE);
}
-static inline void jpeg_timer_disable(void __iomem *regs)
+void s5p_jpeg_timer_disable(void __iomem *regs)
{
unsigned long reg;
@@ -240,13 +228,13 @@ static inline void jpeg_timer_disable(void __iomem *regs)
writel(reg, regs + S5P_JPG_TIMER_SE);
}
-static inline int jpeg_timer_stat(void __iomem *regs)
+int s5p_jpeg_timer_stat(void __iomem *regs)
{
return (int)((readl(regs + S5P_JPG_TIMER_ST) & S5P_TIMER_INT_STAT_MASK)
>> S5P_TIMER_INT_STAT_SHIFT);
}
-static inline void jpeg_clear_timer_stat(void __iomem *regs)
+void s5p_jpeg_clear_timer_stat(void __iomem *regs)
{
unsigned long reg;
@@ -255,7 +243,7 @@ static inline void jpeg_clear_timer_stat(void __iomem *regs)
writel(reg, regs + S5P_JPG_TIMER_SE);
}
-static inline void jpeg_enc_stream_int(void __iomem *regs, unsigned long size)
+void s5p_jpeg_enc_stream_int(void __iomem *regs, unsigned long size)
{
unsigned long reg;
@@ -266,13 +254,13 @@ static inline void jpeg_enc_stream_int(void __iomem *regs, unsigned long size)
writel(reg, regs + S5P_JPG_ENC_STREAM_INTSE);
}
-static inline int jpeg_enc_stream_stat(void __iomem *regs)
+int s5p_jpeg_enc_stream_stat(void __iomem *regs)
{
return (int)(readl(regs + S5P_JPG_ENC_STREAM_INTST) &
S5P_ENC_STREAM_INT_STAT_MASK);
}
-static inline void jpeg_clear_enc_stream_stat(void __iomem *regs)
+void s5p_jpeg_clear_enc_stream_stat(void __iomem *regs)
{
unsigned long reg;
@@ -281,7 +269,7 @@ static inline void jpeg_clear_enc_stream_stat(void __iomem *regs)
writel(reg, regs + S5P_JPG_ENC_STREAM_INTSE);
}
-static inline void jpeg_outform_raw(void __iomem *regs, unsigned long format)
+void s5p_jpeg_outform_raw(void __iomem *regs, unsigned long format)
{
unsigned long reg, f;
@@ -296,17 +284,17 @@ static inline void jpeg_outform_raw(void __iomem *regs, unsigned long format)
writel(reg, regs + S5P_JPG_OUTFORM);
}
-static inline void jpeg_jpgadr(void __iomem *regs, unsigned long addr)
+void s5p_jpeg_jpgadr(void __iomem *regs, unsigned long addr)
{
writel(addr, regs + S5P_JPG_JPGADR);
}
-static inline void jpeg_imgadr(void __iomem *regs, unsigned long addr)
+void s5p_jpeg_imgadr(void __iomem *regs, unsigned long addr)
{
writel(addr, regs + S5P_JPG_IMGADR);
}
-static inline void jpeg_coef(void __iomem *regs, unsigned int i,
+void s5p_jpeg_coef(void __iomem *regs, unsigned int i,
unsigned int j, unsigned int coef)
{
unsigned long reg;
@@ -317,24 +305,24 @@ static inline void jpeg_coef(void __iomem *regs, unsigned int i,
writel(reg, regs + S5P_JPG_COEF(i));
}
-static inline void jpeg_start(void __iomem *regs)
+void s5p_jpeg_start(void __iomem *regs)
{
writel(1, regs + S5P_JSTART);
}
-static inline int jpeg_result_stat_ok(void __iomem *regs)
+int s5p_jpeg_result_stat_ok(void __iomem *regs)
{
return (int)((readl(regs + S5P_JPGINTST) & S5P_RESULT_STAT_MASK)
>> S5P_RESULT_STAT_SHIFT);
}
-static inline int jpeg_stream_stat_ok(void __iomem *regs)
+int s5p_jpeg_stream_stat_ok(void __iomem *regs)
{
return !(int)((readl(regs + S5P_JPGINTST) & S5P_STREAM_STAT_MASK)
>> S5P_STREAM_STAT_SHIFT);
}
-static inline void jpeg_clear_int(void __iomem *regs)
+void s5p_jpeg_clear_int(void __iomem *regs)
{
unsigned long reg;
@@ -343,7 +331,7 @@ static inline void jpeg_clear_int(void __iomem *regs)
reg = readl(regs + S5P_JPGOPR);
}
-static inline unsigned int jpeg_compressed_size(void __iomem *regs)
+unsigned int s5p_jpeg_compressed_size(void __iomem *regs)
{
unsigned long jpeg_size = 0;
@@ -353,5 +341,3 @@ static inline unsigned int jpeg_compressed_size(void __iomem *regs)
return (unsigned int)jpeg_size;
}
-
-#endif /* JPEG_HW_H_ */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h
new file mode 100644
index 000000000000..c11ebe86b9c9
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h
@@ -0,0 +1,63 @@
+/* linux/drivers/media/platform/s5p-jpeg/jpeg-hw.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef JPEG_HW_S5P_H_
+#define JPEG_HW_S5P_H_
+
+#include <linux/io.h>
+#include <linux/videodev2.h>
+
+#include "jpeg-regs.h"
+
+#define S5P_JPEG_MIN_WIDTH 32
+#define S5P_JPEG_MIN_HEIGHT 32
+#define S5P_JPEG_MAX_WIDTH 8192
+#define S5P_JPEG_MAX_HEIGHT 8192
+#define S5P_JPEG_RAW_IN_565 0
+#define S5P_JPEG_RAW_IN_422 1
+#define S5P_JPEG_RAW_OUT_422 0
+#define S5P_JPEG_RAW_OUT_420 1
+
+void s5p_jpeg_reset(void __iomem *regs);
+void s5p_jpeg_poweron(void __iomem *regs);
+void s5p_jpeg_input_raw_mode(void __iomem *regs, unsigned long mode);
+void s5p_jpeg_input_raw_y16(void __iomem *regs, bool y16);
+void s5p_jpeg_proc_mode(void __iomem *regs, unsigned long mode);
+void s5p_jpeg_subsampling_mode(void __iomem *regs, unsigned int mode);
+unsigned int s5p_jpeg_get_subsampling_mode(void __iomem *regs);
+void s5p_jpeg_dri(void __iomem *regs, unsigned int dri);
+void s5p_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n);
+void s5p_jpeg_htbl_ac(void __iomem *regs, unsigned int t);
+void s5p_jpeg_htbl_dc(void __iomem *regs, unsigned int t);
+void s5p_jpeg_y(void __iomem *regs, unsigned int y);
+void s5p_jpeg_x(void __iomem *regs, unsigned int x);
+void s5p_jpeg_rst_int_enable(void __iomem *regs, bool enable);
+void s5p_jpeg_data_num_int_enable(void __iomem *regs, bool enable);
+void s5p_jpeg_final_mcu_num_int_enable(void __iomem *regs, bool enbl);
+void s5p_jpeg_timer_enable(void __iomem *regs, unsigned long val);
+void s5p_jpeg_timer_disable(void __iomem *regs);
+int s5p_jpeg_timer_stat(void __iomem *regs);
+void s5p_jpeg_clear_timer_stat(void __iomem *regs);
+void s5p_jpeg_enc_stream_int(void __iomem *regs, unsigned long size);
+int s5p_jpeg_enc_stream_stat(void __iomem *regs);
+void s5p_jpeg_clear_enc_stream_stat(void __iomem *regs);
+void s5p_jpeg_outform_raw(void __iomem *regs, unsigned long format);
+void s5p_jpeg_jpgadr(void __iomem *regs, unsigned long addr);
+void s5p_jpeg_imgadr(void __iomem *regs, unsigned long addr);
+void s5p_jpeg_coef(void __iomem *regs, unsigned int i,
+ unsigned int j, unsigned int coef);
+void s5p_jpeg_start(void __iomem *regs);
+int s5p_jpeg_result_stat_ok(void __iomem *regs);
+int s5p_jpeg_stream_stat_ok(void __iomem *regs);
+void s5p_jpeg_clear_int(void __iomem *regs);
+unsigned int s5p_jpeg_compressed_size(void __iomem *regs);
+
+#endif /* JPEG_HW_S5P_H_ */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-regs.h b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
index 38e50815668c..33f2c7374cfd 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-regs.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
@@ -2,10 +2,11 @@
*
* Register definition file for Samsung JPEG codec driver
*
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2011-2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -15,6 +16,8 @@
#ifndef JPEG_REGS_H_
#define JPEG_REGS_H_
+/* Register and bit definitions for S5PC210 */
+
/* JPEG mode register */
#define S5P_JPGMOD 0x00
#define S5P_PROC_MODE_MASK (0x1 << 3)
@@ -166,5 +169,209 @@
/* JPEG AC Huffman table register */
#define S5P_JPG_HACTBLG(n) (0x8c0 + (n) * 0x400)
+
+/* Register and bit definitions for Exynos 4x12 */
+
+/* JPEG Codec Control Registers */
+#define EXYNOS4_JPEG_CNTL_REG 0x00
+#define EXYNOS4_INT_EN_REG 0x04
+#define EXYNOS4_INT_TIMER_COUNT_REG 0x08
+#define EXYNOS4_INT_STATUS_REG 0x0c
+#define EXYNOS4_OUT_MEM_BASE_REG 0x10
+#define EXYNOS4_JPEG_IMG_SIZE_REG 0x14
+#define EXYNOS4_IMG_BA_PLANE_1_REG 0x18
+#define EXYNOS4_IMG_SO_PLANE_1_REG 0x1c
+#define EXYNOS4_IMG_PO_PLANE_1_REG 0x20
+#define EXYNOS4_IMG_BA_PLANE_2_REG 0x24
+#define EXYNOS4_IMG_SO_PLANE_2_REG 0x28
+#define EXYNOS4_IMG_PO_PLANE_2_REG 0x2c
+#define EXYNOS4_IMG_BA_PLANE_3_REG 0x30
+#define EXYNOS4_IMG_SO_PLANE_3_REG 0x34
+#define EXYNOS4_IMG_PO_PLANE_3_REG 0x38
+
+#define EXYNOS4_TBL_SEL_REG 0x3c
+
+#define EXYNOS4_IMG_FMT_REG 0x40
+
+#define EXYNOS4_BITSTREAM_SIZE_REG 0x44
+#define EXYNOS4_PADDING_REG 0x48
+#define EXYNOS4_HUFF_CNT_REG 0x4c
+#define EXYNOS4_FIFO_STATUS_REG 0x50
+#define EXYNOS4_DECODE_XY_SIZE_REG 0x54
+#define EXYNOS4_DECODE_IMG_FMT_REG 0x58
+
+#define EXYNOS4_QUAN_TBL_ENTRY_REG 0x100
+#define EXYNOS4_HUFF_TBL_ENTRY_REG 0x200
+
+
+/****************************************************************/
+/* Bit definition part */
+/****************************************************************/
+
+/* JPEG CNTL Register bit */
+#define EXYNOS4_ENC_DEC_MODE_MASK (0xfffffffc << 0)
+#define EXYNOS4_DEC_MODE (1 << 0)
+#define EXYNOS4_ENC_MODE (1 << 1)
+#define EXYNOS4_AUTO_RST_MARKER (1 << 2)
+#define EXYNOS4_RST_INTERVAL_SHIFT 3
+#define EXYNOS4_RST_INTERVAL(x) (((x) & 0xffff) \
+ << EXYNOS4_RST_INTERVAL_SHIFT)
+#define EXYNOS4_HUF_TBL_EN (1 << 19)
+#define EXYNOS4_HOR_SCALING_SHIFT 20
+#define EXYNOS4_HOR_SCALING_MASK (3 << EXYNOS4_HOR_SCALING_SHIFT)
+#define EXYNOS4_HOR_SCALING(x) (((x) & 0x3) \
+ << EXYNOS4_HOR_SCALING_SHIFT)
+#define EXYNOS4_VER_SCALING_SHIFT 22
+#define EXYNOS4_VER_SCALING_MASK (3 << EXYNOS4_VER_SCALING_SHIFT)
+#define EXYNOS4_VER_SCALING(x) (((x) & 0x3) \
+ << EXYNOS4_VER_SCALING_SHIFT)
+#define EXYNOS4_PADDING (1 << 27)
+#define EXYNOS4_SYS_INT_EN (1 << 28)
+#define EXYNOS4_SOFT_RESET_HI (1 << 29)
+
+/* JPEG INT Register bit */
+#define EXYNOS4_INT_EN_MASK (0x1f << 0)
+#define EXYNOS4_PROT_ERR_INT_EN (1 << 0)
+#define EXYNOS4_IMG_COMPLETION_INT_EN (1 << 1)
+#define EXYNOS4_DEC_INVALID_FORMAT_EN (1 << 2)
+#define EXYNOS4_MULTI_SCAN_ERROR_EN (1 << 3)
+#define EXYNOS4_FRAME_ERR_EN (1 << 4)
+#define EXYNOS4_INT_EN_ALL (0x1f << 0)
+
+#define EXYNOS4_MOD_REG_PROC_ENC (0 << 3)
+#define EXYNOS4_MOD_REG_PROC_DEC (1 << 3)
+
+#define EXYNOS4_MOD_REG_SUBSAMPLE_444 (0 << 0)
+#define EXYNOS4_MOD_REG_SUBSAMPLE_422 (1 << 0)
+#define EXYNOS4_MOD_REG_SUBSAMPLE_420 (2 << 0)
+#define EXYNOS4_MOD_REG_SUBSAMPLE_GRAY (3 << 0)
+
+
+/* JPEG IMAGE SIZE Register bit */
+#define EXYNOS4_X_SIZE_SHIFT 0
+#define EXYNOS4_X_SIZE_MASK (0xffff << EXYNOS4_X_SIZE_SHIFT)
+#define EXYNOS4_X_SIZE(x) (((x) & 0xffff) << EXYNOS4_X_SIZE_SHIFT)
+#define EXYNOS4_Y_SIZE_SHIFT 16
+#define EXYNOS4_Y_SIZE_MASK (0xffff << EXYNOS4_Y_SIZE_SHIFT)
+#define EXYNOS4_Y_SIZE(x) (((x) & 0xffff) << EXYNOS4_Y_SIZE_SHIFT)
+
+/* JPEG IMAGE FORMAT Register bit */
+#define EXYNOS4_ENC_IN_FMT_MASK 0xffff0000
+#define EXYNOS4_ENC_GRAY_IMG (0 << 0)
+#define EXYNOS4_ENC_RGB_IMG (1 << 0)
+#define EXYNOS4_ENC_YUV_444_IMG (2 << 0)
+#define EXYNOS4_ENC_YUV_422_IMG (3 << 0)
+#define EXYNOS4_ENC_YUV_440_IMG (4 << 0)
+
+#define EXYNOS4_DEC_GRAY_IMG (0 << 0)
+#define EXYNOS4_DEC_RGB_IMG (1 << 0)
+#define EXYNOS4_DEC_YUV_444_IMG (2 << 0)
+#define EXYNOS4_DEC_YUV_422_IMG (3 << 0)
+#define EXYNOS4_DEC_YUV_420_IMG (4 << 0)
+
+#define EXYNOS4_GRAY_IMG_IP_SHIFT 3
+#define EXYNOS4_GRAY_IMG_IP_MASK (7 << EXYNOS4_GRAY_IMG_IP_SHIFT)
+#define EXYNOS4_GRAY_IMG_IP (4 << EXYNOS4_GRAY_IMG_IP_SHIFT)
+
+#define EXYNOS4_RGB_IP_SHIFT 6
+#define EXYNOS4_RGB_IP_MASK (7 << EXYNOS4_RGB_IP_SHIFT)
+#define EXYNOS4_RGB_IP_RGB_16BIT_IMG (4 << EXYNOS4_RGB_IP_SHIFT)
+#define EXYNOS4_RGB_IP_RGB_32BIT_IMG (5 << EXYNOS4_RGB_IP_SHIFT)
+
+#define EXYNOS4_YUV_444_IP_SHIFT 9
+#define EXYNOS4_YUV_444_IP_MASK (7 << EXYNOS4_YUV_444_IP_SHIFT)
+#define EXYNOS4_YUV_444_IP_YUV_444_2P_IMG (4 << EXYNOS4_YUV_444_IP_SHIFT)
+#define EXYNOS4_YUV_444_IP_YUV_444_3P_IMG (5 << EXYNOS4_YUV_444_IP_SHIFT)
+
+#define EXYNOS4_YUV_422_IP_SHIFT 12
+#define EXYNOS4_YUV_422_IP_MASK (7 << EXYNOS4_YUV_422_IP_SHIFT)
+#define EXYNOS4_YUV_422_IP_YUV_422_1P_IMG (4 << EXYNOS4_YUV_422_IP_SHIFT)
+#define EXYNOS4_YUV_422_IP_YUV_422_2P_IMG (5 << EXYNOS4_YUV_422_IP_SHIFT)
+#define EXYNOS4_YUV_422_IP_YUV_422_3P_IMG (6 << EXYNOS4_YUV_422_IP_SHIFT)
+
+#define EXYNOS4_YUV_420_IP_SHIFT 15
+#define EXYNOS4_YUV_420_IP_MASK (7 << EXYNOS4_YUV_420_IP_SHIFT)
+#define EXYNOS4_YUV_420_IP_YUV_420_2P_IMG (4 << EXYNOS4_YUV_420_IP_SHIFT)
+#define EXYNOS4_YUV_420_IP_YUV_420_3P_IMG (5 << EXYNOS4_YUV_420_IP_SHIFT)
+
+#define EXYNOS4_ENC_FMT_SHIFT 24
+#define EXYNOS4_ENC_FMT_MASK (3 << EXYNOS4_ENC_FMT_SHIFT)
+#define EXYNOS4_ENC_FMT_GRAY (0 << EXYNOS4_ENC_FMT_SHIFT)
+#define EXYNOS4_ENC_FMT_YUV_444 (1 << EXYNOS4_ENC_FMT_SHIFT)
+#define EXYNOS4_ENC_FMT_YUV_422 (2 << EXYNOS4_ENC_FMT_SHIFT)
+#define EXYNOS4_ENC_FMT_YUV_420 (3 << EXYNOS4_ENC_FMT_SHIFT)
+
+#define EXYNOS4_JPEG_DECODED_IMG_FMT_MASK 0x03
+
+#define EXYNOS4_SWAP_CHROMA_CRCB (1 << 26)
+#define EXYNOS4_SWAP_CHROMA_CBCR (0 << 26)
+
+/* JPEG HUFF count Register bit */
+#define EXYNOS4_HUFF_COUNT_MASK 0xffff
+
+/* JPEG Decoded_img_x_y_size Register bit */
+#define EXYNOS4_DECODED_SIZE_MASK 0x0000ffff
+
+/* JPEG Decoded image format Register bit */
+#define EXYNOS4_DECODED_IMG_FMT_MASK 0x3
+
+/* JPEG TBL SEL Register bit */
+#define EXYNOS4_Q_TBL_COMP1_0 (0 << 0)
+#define EXYNOS4_Q_TBL_COMP1_1 (1 << 0)
+#define EXYNOS4_Q_TBL_COMP1_2 (2 << 0)
+#define EXYNOS4_Q_TBL_COMP1_3 (3 << 0)
+
+#define EXYNOS4_Q_TBL_COMP2_0 (0 << 2)
+#define EXYNOS4_Q_TBL_COMP2_1 (1 << 2)
+#define EXYNOS4_Q_TBL_COMP2_2 (2 << 2)
+#define EXYNOS4_Q_TBL_COMP2_3 (3 << 2)
+
+#define EXYNOS4_Q_TBL_COMP3_0 (0 << 4)
+#define EXYNOS4_Q_TBL_COMP3_1 (1 << 4)
+#define EXYNOS4_Q_TBL_COMP3_2 (2 << 4)
+#define EXYNOS4_Q_TBL_COMP3_3 (3 << 4)
+
+#define EXYNOS4_HUFF_TBL_COMP1_AC_0_DC_0 (0 << 6)
+#define EXYNOS4_HUFF_TBL_COMP1_AC_0_DC_1 (1 << 6)
+#define EXYNOS4_HUFF_TBL_COMP1_AC_1_DC_0 (2 << 6)
+#define EXYNOS4_HUFF_TBL_COMP1_AC_1_DC_1 (3 << 6)
+
+#define EXYNOS4_HUFF_TBL_COMP2_AC_0_DC_0 (0 << 8)
+#define EXYNOS4_HUFF_TBL_COMP2_AC_0_DC_1 (1 << 8)
+#define EXYNOS4_HUFF_TBL_COMP2_AC_1_DC_0 (2 << 8)
+#define EXYNOS4_HUFF_TBL_COMP2_AC_1_DC_1 (3 << 8)
+
+#define EXYNOS4_HUFF_TBL_COMP3_AC_0_DC_0 (0 << 10)
+#define EXYNOS4_HUFF_TBL_COMP3_AC_0_DC_1 (1 << 10)
+#define EXYNOS4_HUFF_TBL_COMP3_AC_1_DC_0 (2 << 10)
+#define EXYNOS4_HUFF_TBL_COMP3_AC_1_DC_1 (3 << 10)
+
+/* JPEG quantizer table register */
+#define EXYNOS4_QTBL_CONTENT(n) (0x100 + (n) * 0x40)
+
+/* JPEG DC luminance (code length) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HDCLL 0x200
+
+/* JPEG DC luminance (values) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HDCLV 0x210
+
+/* JPEG DC chrominance (code length) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HDCCL 0x220
+
+/* JPEG DC chrominance (values) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HDCCV 0x230
+
+/* JPEG AC luminance (code length) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HACLL 0x240
+
+/* JPEG AC luminance (values) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HACLV 0x250
+
+/* JPEG AC chrominance (code length) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HACCL 0x300
+
+/* JPEG AC chrominance (values) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HACCV 0x310
+
#endif /* JPEG_REGS_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index e46067a57853..e2aac592d29f 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -177,21 +177,6 @@ unlock:
mutex_unlock(&dev->mfc_mutex);
}
-static enum s5p_mfc_node_type s5p_mfc_get_node_type(struct file *file)
-{
- struct video_device *vdev = video_devdata(file);
-
- if (!vdev) {
- mfc_err("failed to get video_device");
- return MFCNODE_INVALID;
- }
- if (vdev->index == 0)
- return MFCNODE_DECODER;
- else if (vdev->index == 1)
- return MFCNODE_ENCODER;
- return MFCNODE_INVALID;
-}
-
static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
{
mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
@@ -705,6 +690,7 @@ irq_cleanup_hw:
/* Open an MFC node */
static int s5p_mfc_open(struct file *file)
{
+ struct video_device *vdev = video_devdata(file);
struct s5p_mfc_dev *dev = video_drvdata(file);
struct s5p_mfc_ctx *ctx = NULL;
struct vb2_queue *q;
@@ -742,7 +728,7 @@ static int s5p_mfc_open(struct file *file)
/* Mark context as idle */
clear_work_bit_irqsave(ctx);
dev->ctx[ctx->num] = ctx;
- if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+ if (vdev == dev->vfd_dec) {
ctx->type = MFCINST_DECODER;
ctx->c_ops = get_dec_codec_ops();
s5p_mfc_dec_init(ctx);
@@ -752,7 +738,7 @@ static int s5p_mfc_open(struct file *file)
mfc_err("Failed to setup mfc controls\n");
goto err_ctrls_setup;
}
- } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+ } else if (vdev == dev->vfd_enc) {
ctx->type = MFCINST_ENCODER;
ctx->c_ops = get_enc_codec_ops();
/* only for encoder */
@@ -797,10 +783,10 @@ static int s5p_mfc_open(struct file *file)
q = &ctx->vq_dst;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
q->drv_priv = &ctx->fh;
- if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+ if (vdev == dev->vfd_dec) {
q->io_modes = VB2_MMAP;
q->ops = get_dec_queue_ops();
- } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+ } else if (vdev == dev->vfd_enc) {
q->io_modes = VB2_MMAP | VB2_USERPTR;
q->ops = get_enc_queue_ops();
} else {
@@ -819,10 +805,10 @@ static int s5p_mfc_open(struct file *file)
q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
q->io_modes = VB2_MMAP;
q->drv_priv = &ctx->fh;
- if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+ if (vdev == dev->vfd_dec) {
q->io_modes = VB2_MMAP;
q->ops = get_dec_queue_ops();
- } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+ } else if (vdev == dev->vfd_enc) {
q->io_modes = VB2_MMAP | VB2_USERPTR;
q->ops = get_enc_queue_ops();
} else {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 6920b546181a..f723f1f2f578 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -115,15 +115,6 @@ enum s5p_mfc_fmt_type {
};
/**
- * enum s5p_mfc_node_type - The type of an MFC device node.
- */
-enum s5p_mfc_node_type {
- MFCNODE_INVALID = -1,
- MFCNODE_DECODER = 0,
- MFCNODE_ENCODER = 1,
-};
-
-/**
* enum s5p_mfc_inst_type - The type of an MFC instance.
*/
enum s5p_mfc_inst_type {
@@ -422,6 +413,11 @@ struct s5p_mfc_vp8_enc_params {
enum v4l2_vp8_golden_frame_sel golden_frame_sel;
u8 hier_layer;
u8 hier_layer_qp[3];
+ u8 rc_min_qp;
+ u8 rc_max_qp;
+ u8 rc_frame_qp;
+ u8 rc_p_frame_qp;
+ u8 profile;
};
/**
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 4ff3b6cd6842..91b6e020ddf3 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -618,6 +618,46 @@ static struct mfc_control controls[] = {
.default_value = V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV,
.menu_skip_mask = 0,
},
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_MAX_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 127,
+ .step = 1,
+ .default_value = 127,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_MIN_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 11,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 127,
+ .step = 1,
+ .default_value = 10,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 127,
+ .step = 1,
+ .default_value = 10,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_PROFILE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 3,
+ .step = 1,
+ .default_value = 0,
+ },
};
#define NUM_CTRLS ARRAY_SIZE(controls)
@@ -1557,6 +1597,21 @@ static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
p->codec.vp8.golden_frame_sel = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP:
+ p->codec.vp8.rc_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP:
+ p->codec.vp8.rc_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP:
+ p->codec.vp8.rc_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP:
+ p->codec.vp8.rc_p_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:
+ p->codec.vp8.profile = ctrl->val;
+ break;
default:
v4l2_err(&dev->v4l2_dev, "Invalid control, id=%d, val=%d\n",
ctrl->id, ctrl->val);
@@ -1863,7 +1918,7 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
if (ctx->src_bufs_cnt < ctx->pb_count) {
mfc_err("Need minimum %d OUTPUT buffers\n",
ctx->pb_count);
- return -EINVAL;
+ return -ENOBUFS;
}
}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 461358c4a790..f6ff2dbf3a1d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -1197,10 +1197,8 @@ static int s5p_mfc_set_enc_params_vp8(struct s5p_mfc_ctx *ctx)
reg |= ((p->num_b_frame & 0x3) << 16);
WRITEL(reg, S5P_FIMV_E_GOP_CONFIG_V6);
- /* profile & level */
- reg = 0;
- /** profile */
- reg |= (0x1 << 4);
+ /* profile - 0 ~ 3 */
+ reg = p_vp8->profile & 0x3;
WRITEL(reg, S5P_FIMV_E_PICTURE_PROFILE_V6);
/* rate control config. */
@@ -1218,6 +1216,26 @@ static int s5p_mfc_set_enc_params_vp8(struct s5p_mfc_ctx *ctx)
WRITEL(reg, S5P_FIMV_E_RC_FRAME_RATE_V6);
}
+ /* frame QP */
+ reg &= ~(0x7F);
+ reg |= p_vp8->rc_frame_qp & 0x7F;
+ WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6);
+
+ /* other QPs */
+ WRITEL(0x0, S5P_FIMV_E_FIXED_PICTURE_QP_V6);
+ if (!p->rc_frame && !p->rc_mb) {
+ reg = 0;
+ reg |= ((p_vp8->rc_p_frame_qp & 0x7F) << 8);
+ reg |= p_vp8->rc_frame_qp & 0x7F;
+ WRITEL(reg, S5P_FIMV_E_FIXED_PICTURE_QP_V6);
+ }
+
+ /* max QP */
+ reg = ((p_vp8->rc_max_qp & 0x7F) << 8);
+ /* min QP */
+ reg |= p_vp8->rc_min_qp & 0x7F;
+ WRITEL(reg, S5P_FIMV_E_RC_QP_BOUND_V6);
+
/* vbv buffer size */
if (p->frame_skip_mode ==
V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
diff --git a/drivers/media/platform/s5p-tv/mixer_drv.c b/drivers/media/platform/s5p-tv/mixer_drv.c
index 51805a5e2beb..bc08b5f28e44 100644
--- a/drivers/media/platform/s5p-tv/mixer_drv.c
+++ b/drivers/media/platform/s5p-tv/mixer_drv.c
@@ -347,19 +347,41 @@ static int mxr_runtime_resume(struct device *dev)
{
struct mxr_device *mdev = to_mdev(dev);
struct mxr_resources *res = &mdev->res;
+ int ret;
mxr_dbg(mdev, "resume - start\n");
mutex_lock(&mdev->mutex);
/* turn clocks on */
- clk_enable(res->mixer);
- clk_enable(res->vp);
- clk_enable(res->sclk_mixer);
+ ret = clk_prepare_enable(res->mixer);
+ if (ret < 0) {
+ dev_err(mdev->dev, "clk_prepare_enable(mixer) failed\n");
+ goto fail;
+ }
+ ret = clk_prepare_enable(res->vp);
+ if (ret < 0) {
+ dev_err(mdev->dev, "clk_prepare_enable(vp) failed\n");
+ goto fail_mixer;
+ }
+ ret = clk_prepare_enable(res->sclk_mixer);
+ if (ret < 0) {
+ dev_err(mdev->dev, "clk_prepare_enable(sclk_mixer) failed\n");
+ goto fail_vp;
+ }
/* apply default configuration */
mxr_reg_reset(mdev);
mxr_dbg(mdev, "resume - finished\n");
mutex_unlock(&mdev->mutex);
return 0;
+
+fail_vp:
+ clk_disable_unprepare(res->vp);
+fail_mixer:
+ clk_disable_unprepare(res->mixer);
+fail:
+ mutex_unlock(&mdev->mutex);
+ dev_err(mdev->dev, "resume failed\n");
+ return ret;
}
static int mxr_runtime_suspend(struct device *dev)
@@ -369,9 +391,9 @@ static int mxr_runtime_suspend(struct device *dev)
mxr_dbg(mdev, "suspend - start\n");
mutex_lock(&mdev->mutex);
/* turn clocks off */
- clk_disable(res->sclk_mixer);
- clk_disable(res->vp);
- clk_disable(res->mixer);
+ clk_disable_unprepare(res->sclk_mixer);
+ clk_disable_unprepare(res->vp);
+ clk_disable_unprepare(res->mixer);
mutex_unlock(&mdev->mutex);
mxr_dbg(mdev, "suspend - finished\n");
return 0;
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index 81b97db111d8..c5059ba0d733 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -948,7 +948,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
if (count == 0) {
mxr_dbg(mdev, "no output buffers queued\n");
- return -EINVAL;
+ return -ENOBUFS;
}
/* block any changes in output configuration */
diff --git a/drivers/media/platform/s5p-tv/sdo_drv.c b/drivers/media/platform/s5p-tv/sdo_drv.c
index 0afa90f0f6ab..5a7c3796f22e 100644
--- a/drivers/media/platform/s5p-tv/sdo_drv.c
+++ b/drivers/media/platform/s5p-tv/sdo_drv.c
@@ -55,6 +55,8 @@ struct sdo_device {
struct clk *dacphy;
/** clock for control of VPLL */
struct clk *fout_vpll;
+ /** vpll rate before sdo stream was on */
+ unsigned long vpll_rate;
/** regulator for SDO IP power */
struct regulator *vdac;
/** regulator for SDO plug detection */
@@ -193,17 +195,33 @@ static int sdo_s_power(struct v4l2_subdev *sd, int on)
static int sdo_streamon(struct sdo_device *sdev)
{
+ int ret;
+
/* set proper clock for Timing Generator */
- clk_set_rate(sdev->fout_vpll, 54000000);
+ sdev->vpll_rate = clk_get_rate(sdev->fout_vpll);
+ ret = clk_set_rate(sdev->fout_vpll, 54000000);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Failed to set vpll rate\n");
+ return ret;
+ }
dev_info(sdev->dev, "fout_vpll.rate = %lu\n",
clk_get_rate(sdev->fout_vpll));
/* enable clock in SDO */
sdo_write_mask(sdev, SDO_CLKCON, ~0, SDO_TVOUT_CLOCK_ON);
- clk_enable(sdev->dacphy);
+ ret = clk_prepare_enable(sdev->dacphy);
+ if (ret < 0) {
+ dev_err(sdev->dev, "clk_prepare_enable(dacphy) failed\n");
+ goto fail;
+ }
/* enable DAC */
sdo_write_mask(sdev, SDO_DAC, ~0, SDO_POWER_ON_DAC);
sdo_reg_debug(sdev);
return 0;
+
+fail:
+ sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_CLOCK_ON);
+ clk_set_rate(sdev->fout_vpll, sdev->vpll_rate);
+ return ret;
}
static int sdo_streamoff(struct sdo_device *sdev)
@@ -211,7 +229,7 @@ static int sdo_streamoff(struct sdo_device *sdev)
int tries;
sdo_write_mask(sdev, SDO_DAC, 0, SDO_POWER_ON_DAC);
- clk_disable(sdev->dacphy);
+ clk_disable_unprepare(sdev->dacphy);
sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_CLOCK_ON);
for (tries = 100; tries; --tries) {
if (sdo_read(sdev, SDO_CLKCON) & SDO_TVOUT_CLOCK_READY)
@@ -220,6 +238,7 @@ static int sdo_streamoff(struct sdo_device *sdev)
}
if (tries == 0)
dev_err(sdev->dev, "failed to stop streaming\n");
+ clk_set_rate(sdev->fout_vpll, sdev->vpll_rate);
return tries ? 0 : -EIO;
}
@@ -254,7 +273,7 @@ static int sdo_runtime_suspend(struct device *dev)
dev_info(dev, "suspend\n");
regulator_disable(sdev->vdet);
regulator_disable(sdev->vdac);
- clk_disable(sdev->sclk_dac);
+ clk_disable_unprepare(sdev->sclk_dac);
return 0;
}
@@ -266,7 +285,7 @@ static int sdo_runtime_resume(struct device *dev)
dev_info(dev, "resume\n");
- ret = clk_enable(sdev->sclk_dac);
+ ret = clk_prepare_enable(sdev->sclk_dac);
if (ret < 0)
return ret;
@@ -299,7 +318,7 @@ static int sdo_runtime_resume(struct device *dev)
vdac_r_dis:
regulator_disable(sdev->vdac);
dac_clk_dis:
- clk_disable(sdev->sclk_dac);
+ clk_disable_unprepare(sdev->sclk_dac);
return ret;
}
@@ -405,7 +424,11 @@ static int sdo_probe(struct platform_device *pdev)
}
/* enable gate for dac clock, because mixer uses it */
- clk_enable(sdev->dac);
+ ret = clk_prepare_enable(sdev->dac);
+ if (ret < 0) {
+ dev_err(dev, "clk_prepare_enable(dac) failed\n");
+ goto fail_fout_vpll;
+ }
/* configure power management */
pm_runtime_enable(dev);
@@ -444,7 +467,7 @@ static int sdo_remove(struct platform_device *pdev)
struct sdo_device *sdev = sd_to_sdev(sd);
pm_runtime_disable(&pdev->dev);
- clk_disable(sdev->dac);
+ clk_disable_unprepare(sdev->dac);
clk_put(sdev->fout_vpll);
clk_put(sdev->dacphy);
clk_put(sdev->dac);
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 4f30341dc2ab..e5f1d4c14f2c 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -286,7 +286,7 @@ static int sh_vou_buf_prepare(struct videobuf_queue *vq,
vb->size = vb->height * bytes_per_line;
if (vb->baddr && vb->bsize < vb->size) {
/* User buffer too small */
- dev_warn(vq->dev, "User buffer too small: [%u] @ %lx\n",
+ dev_warn(vq->dev, "User buffer too small: [%zu] @ %lx\n",
vb->bsize, vb->baddr);
return -EINVAL;
}
@@ -302,9 +302,10 @@ static int sh_vou_buf_prepare(struct videobuf_queue *vq,
}
dev_dbg(vou_dev->v4l2_dev.dev,
- "%s(): fmt #%d, %u bytes per line, phys 0x%x, type %d, state %d\n",
+ "%s(): fmt #%d, %u bytes per line, phys %pad, type %d, state %d\n",
__func__, vou_dev->pix_idx, bytes_per_line,
- videobuf_to_dma_contig(vb), vb->memory, vb->state);
+ ({ dma_addr_t addr = videobuf_to_dma_contig(vb); &addr; }),
+ vb->memory, vb->state);
return 0;
}
@@ -442,7 +443,7 @@ static void sh_vou_configure_geometry(struct sh_vou_device *vou_dev,
int pix_idx, int w_idx, int h_idx)
{
struct sh_vou_fmt *fmt = vou_fmt + pix_idx;
- unsigned int black_left, black_top, width_max, height_max,
+ unsigned int black_left, black_top, width_max,
frame_in_height, frame_out_height, frame_out_top;
struct v4l2_rect *rect = &vou_dev->rect;
struct v4l2_pix_format *pix = &vou_dev->pix;
@@ -450,10 +451,10 @@ static void sh_vou_configure_geometry(struct sh_vou_device *vou_dev,
if (vou_dev->std & V4L2_STD_525_60) {
width_max = 858;
- height_max = 262;
+ /* height_max = 262; */
} else {
width_max = 864;
- height_max = 312;
+ /* height_max = 312; */
}
frame_in_height = pix->height / 2;
@@ -1052,7 +1053,6 @@ static irqreturn_t sh_vou_isr(int irq, void *dev_id)
static unsigned long j;
struct videobuf_buffer *vb;
static int cnt;
- static int side;
u32 irq_status = sh_vou_reg_a_read(vou_dev, VOUIR), masked;
u32 vou_status = sh_vou_reg_a_read(vou_dev, VOUSTR);
@@ -1080,7 +1080,7 @@ static irqreturn_t sh_vou_isr(int irq, void *dev_id)
irq_status, masked, vou_status, cnt);
cnt++;
- side = vou_status & 0x10000;
+ /* side = vou_status & 0x10000; */
/* Clear only set interrupts */
sh_vou_reg_a_write(vou_dev, VOUIR, masked);
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 104485632501..4835173d7f80 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -34,13 +34,6 @@
#define MIN_FRAME_RATE 15
#define FRAME_INTERVAL_MILLI_SEC (1000 / MIN_FRAME_RATE)
-/* ISI states */
-enum {
- ISI_STATE_IDLE = 0,
- ISI_STATE_READY,
- ISI_STATE_WAIT_SOF,
-};
-
/* Frame buffer descriptor */
struct fbd {
/* Physical address of the frame buffer */
@@ -75,11 +68,6 @@ struct atmel_isi {
void __iomem *regs;
int sequence;
- /* State of the ISI module in capturing mode */
- int state;
-
- /* Wait queue for waiting for SOF */
- wait_queue_head_t vsync_wq;
struct vb2_alloc_ctx *alloc_ctx;
@@ -124,16 +112,16 @@ static int configure_geometry(struct atmel_isi *isi, u32 width,
case V4L2_MBUS_FMT_Y8_1X8:
cr = ISI_CFG2_GRAYSCALE;
break;
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case V4L2_MBUS_FMT_VYUY8_2X8:
cr = ISI_CFG2_YCC_SWAP_MODE_3;
break;
- case V4L2_MBUS_FMT_VYUY8_2X8:
+ case V4L2_MBUS_FMT_UYVY8_2X8:
cr = ISI_CFG2_YCC_SWAP_MODE_2;
break;
- case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_YVYU8_2X8:
cr = ISI_CFG2_YCC_SWAP_MODE_1;
break;
- case V4L2_MBUS_FMT_YVYU8_2X8:
+ case V4L2_MBUS_FMT_YUYV8_2X8:
cr = ISI_CFG2_YCC_SWAP_DEFAULT;
break;
/* RGB, TODO */
@@ -144,6 +132,8 @@ static int configure_geometry(struct atmel_isi *isi, u32 width,
isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
cfg2 = isi_readl(isi, ISI_CFG2);
+ /* Set YCC swap mode */
+ cfg2 &= ~ISI_CFG2_YCC_SWAP_MODE_MASK;
cfg2 |= cr;
/* Set width */
cfg2 &= ~(ISI_CFG2_IM_HSIZE_MASK);
@@ -207,12 +197,6 @@ static irqreturn_t isi_interrupt(int irq, void *dev_id)
isi_writel(isi, ISI_INTDIS, ISI_CTRL_DIS);
ret = IRQ_HANDLED;
} else {
- if ((pending & ISI_SR_VSYNC) &&
- (isi->state == ISI_STATE_IDLE)) {
- isi->state = ISI_STATE_READY;
- wake_up_interruptible(&isi->vsync_wq);
- ret = IRQ_HANDLED;
- }
if (likely(pending & ISI_SR_CXFR_DONE))
ret = atmel_isi_handle_streaming(isi);
}
@@ -259,16 +243,6 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct atmel_isi *isi = ici->priv;
unsigned long size;
- int ret;
-
- /* Reset ISI */
- ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
- if (ret < 0) {
- dev_err(icd->parent, "Reset ISI timed out\n");
- return ret;
- }
- /* Disable all interrupts */
- isi_writel(isi, ISI_INTDIS, ~0UL);
size = icd->sizeimage;
@@ -374,6 +348,7 @@ static void start_dma(struct atmel_isi *isi, struct frame_buffer *buffer)
isi_writel(isi, ISI_DMA_C_CTRL, ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
+ cfg1 &= ~ISI_CFG1_FRATE_DIV_MASK;
/* Enable linked list */
cfg1 |= isi->pdata->frate | ISI_CFG1_DISCR;
@@ -407,43 +382,27 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct atmel_isi *isi = ici->priv;
-
u32 sr = 0;
int ret;
- spin_lock_irq(&isi->lock);
- isi->state = ISI_STATE_IDLE;
- /* Clear any pending SOF interrupt */
- sr = isi_readl(isi, ISI_STATUS);
- /* Enable VSYNC interrupt for SOF */
- isi_writel(isi, ISI_INTEN, ISI_SR_VSYNC);
- isi_writel(isi, ISI_CTRL, ISI_CTRL_EN);
- spin_unlock_irq(&isi->lock);
-
- dev_dbg(icd->parent, "Waiting for SOF\n");
- ret = wait_event_interruptible(isi->vsync_wq,
- isi->state != ISI_STATE_IDLE);
- if (ret)
- goto err;
-
- if (isi->state != ISI_STATE_READY) {
- ret = -EIO;
- goto err;
+ /* Reset ISI */
+ ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
+ if (ret < 0) {
+ dev_err(icd->parent, "Reset ISI timed out\n");
+ return ret;
}
+ /* Disable all interrupts */
+ isi_writel(isi, ISI_INTDIS, ~0UL);
spin_lock_irq(&isi->lock);
- isi->state = ISI_STATE_WAIT_SOF;
- isi_writel(isi, ISI_INTDIS, ISI_SR_VSYNC);
+ /* Clear any pending interrupt */
+ sr = isi_readl(isi, ISI_STATUS);
+
if (count)
start_dma(isi, isi->active);
spin_unlock_irq(&isi->lock);
return 0;
-err:
- isi->active = NULL;
- isi->sequence = 0;
- INIT_LIST_HEAD(&isi->video_buffer_list);
- return ret;
}
/* abort streaming and wait for last buffer */
@@ -765,14 +724,16 @@ static int isi_camera_clock_start(struct soc_camera_host *ici)
struct atmel_isi *isi = ici->priv;
int ret;
- ret = clk_enable(isi->pclk);
+ ret = clk_prepare_enable(isi->pclk);
if (ret)
return ret;
- ret = clk_enable(isi->mck);
- if (ret) {
- clk_disable(isi->pclk);
- return ret;
+ if (!IS_ERR(isi->mck)) {
+ ret = clk_prepare_enable(isi->mck);
+ if (ret) {
+ clk_disable_unprepare(isi->pclk);
+ return ret;
+ }
}
return 0;
@@ -783,8 +744,9 @@ static void isi_camera_clock_stop(struct soc_camera_host *ici)
{
struct atmel_isi *isi = ici->priv;
- clk_disable(isi->mck);
- clk_disable(isi->pclk);
+ if (!IS_ERR(isi->mck))
+ clk_disable_unprepare(isi->mck);
+ clk_disable_unprepare(isi->pclk);
}
static unsigned int isi_camera_poll(struct file *file, poll_table *pt)
@@ -906,7 +868,6 @@ static int atmel_isi_remove(struct platform_device *pdev)
struct atmel_isi *isi = container_of(soc_host,
struct atmel_isi, soc_host);
- free_irq(isi->irq, isi);
soc_camera_host_unregister(soc_host);
vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
dma_free_coherent(&pdev->dev,
@@ -914,13 +875,6 @@ static int atmel_isi_remove(struct platform_device *pdev)
isi->p_fb_descriptors,
isi->fb_descriptors_phys);
- iounmap(isi->regs);
- clk_unprepare(isi->mck);
- clk_put(isi->mck);
- clk_unprepare(isi->pclk);
- clk_put(isi->pclk);
- kfree(isi);
-
return 0;
}
@@ -928,7 +882,6 @@ static int atmel_isi_probe(struct platform_device *pdev)
{
unsigned int irq;
struct atmel_isi *isi;
- struct clk *pclk;
struct resource *regs;
int ret, i;
struct device *dev = &pdev->dev;
@@ -936,64 +889,50 @@ static int atmel_isi_probe(struct platform_device *pdev)
struct isi_platform_data *pdata;
pdata = dev->platform_data;
- if (!pdata || !pdata->data_width_flags || !pdata->mck_hz) {
+ if (!pdata || !pdata->data_width_flags) {
dev_err(&pdev->dev,
"No config available for Atmel ISI\n");
return -EINVAL;
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs)
- return -ENXIO;
-
- pclk = clk_get(&pdev->dev, "isi_clk");
- if (IS_ERR(pclk))
- return PTR_ERR(pclk);
-
- ret = clk_prepare(pclk);
- if (ret)
- goto err_clk_prepare_pclk;
-
- isi = kzalloc(sizeof(struct atmel_isi), GFP_KERNEL);
+ isi = devm_kzalloc(&pdev->dev, sizeof(struct atmel_isi), GFP_KERNEL);
if (!isi) {
- ret = -ENOMEM;
dev_err(&pdev->dev, "Can't allocate interface!\n");
- goto err_alloc_isi;
+ return -ENOMEM;
}
- isi->pclk = pclk;
+ isi->pclk = devm_clk_get(&pdev->dev, "isi_clk");
+ if (IS_ERR(isi->pclk))
+ return PTR_ERR(isi->pclk);
+
isi->pdata = pdata;
isi->active = NULL;
spin_lock_init(&isi->lock);
- init_waitqueue_head(&isi->vsync_wq);
INIT_LIST_HEAD(&isi->video_buffer_list);
INIT_LIST_HEAD(&isi->dma_desc_head);
- /* Get ISI_MCK, provided by programmable clock or external clock */
- isi->mck = clk_get(dev, "isi_mck");
- if (IS_ERR(isi->mck)) {
- dev_err(dev, "Failed to get isi_mck\n");
- ret = PTR_ERR(isi->mck);
- goto err_clk_get;
+ /* ISI_MCK is the sensor master clock. It should be handled by the
+ * sensor driver directly, as the ISI has no use for that clock. Make
+ * the clock optional here while platforms transition to the correct
+ * model.
+ */
+ isi->mck = devm_clk_get(dev, "isi_mck");
+ if (!IS_ERR(isi->mck)) {
+ /* Set ISI_MCK's frequency, it should be faster than pixel
+ * clock.
+ */
+ ret = clk_set_rate(isi->mck, pdata->mck_hz);
+ if (ret < 0)
+ return ret;
}
- ret = clk_prepare(isi->mck);
- if (ret)
- goto err_clk_prepare_mck;
-
- /* Set ISI_MCK's frequency, it should be faster than pixel clock */
- ret = clk_set_rate(isi->mck, pdata->mck_hz);
- if (ret < 0)
- goto err_set_mck_rate;
-
isi->p_fb_descriptors = dma_alloc_coherent(&pdev->dev,
sizeof(struct fbd) * MAX_BUFFER_NUM,
&isi->fb_descriptors_phys,
GFP_KERNEL);
if (!isi->p_fb_descriptors) {
- ret = -ENOMEM;
dev_err(&pdev->dev, "Can't allocate descriptors!\n");
- goto err_alloc_descriptors;
+ return -ENOMEM;
}
for (i = 0; i < MAX_BUFFER_NUM; i++) {
@@ -1009,9 +948,10 @@ static int atmel_isi_probe(struct platform_device *pdev)
goto err_alloc_ctx;
}
- isi->regs = ioremap(regs->start, resource_size(regs));
- if (!isi->regs) {
- ret = -ENOMEM;
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ isi->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(isi->regs)) {
+ ret = PTR_ERR(isi->regs);
goto err_ioremap;
}
@@ -1028,7 +968,7 @@ static int atmel_isi_probe(struct platform_device *pdev)
goto err_req_irq;
}
- ret = request_irq(irq, isi_interrupt, 0, "isi", isi);
+ ret = devm_request_irq(&pdev->dev, irq, isi_interrupt, 0, "isi", isi);
if (ret) {
dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
goto err_req_irq;
@@ -1050,9 +990,7 @@ static int atmel_isi_probe(struct platform_device *pdev)
return 0;
err_register_soc_camera_host:
- free_irq(isi->irq, isi);
err_req_irq:
- iounmap(isi->regs);
err_ioremap:
vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
err_alloc_ctx:
@@ -1060,17 +998,6 @@ err_alloc_ctx:
sizeof(struct fbd) * MAX_BUFFER_NUM,
isi->p_fb_descriptors,
isi->fb_descriptors_phys);
-err_alloc_descriptors:
-err_set_mck_rate:
- clk_unprepare(isi->mck);
-err_clk_prepare_mck:
- clk_put(isi->mck);
-err_clk_get:
- kfree(isi);
-err_alloc_isi:
- clk_unprepare(pclk);
-err_clk_prepare_pclk:
- clk_put(pclk);
return ret;
}
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index 45a0276be4e5..d73abca9c6ee 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -659,7 +659,7 @@ static int mx2_start_streaming(struct vb2_queue *q, unsigned int count)
unsigned long flags;
if (count < 2)
- return -EINVAL;
+ return -ENOBUFS;
spin_lock_irqsave(&pcdev->lock, flags);
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 6866bb4fbebc..3b1c05a72d00 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -106,7 +106,7 @@
#define VIN_MAX_HEIGHT 2048
enum chip_id {
- RCAR_H2,
+ RCAR_GEN2,
RCAR_H1,
RCAR_M1,
RCAR_E1,
@@ -302,7 +302,7 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv)
dmr = 0;
break;
case V4L2_PIX_FMT_RGB32:
- if (priv->chip == RCAR_H2 || priv->chip == RCAR_H1 ||
+ if (priv->chip == RCAR_GEN2 || priv->chip == RCAR_H1 ||
priv->chip == RCAR_E1) {
dmr = VNDMR_EXRGB;
break;
@@ -1384,7 +1384,8 @@ static struct soc_camera_host_ops rcar_vin_host_ops = {
};
static struct platform_device_id rcar_vin_id_table[] = {
- { "r8a7790-vin", RCAR_H2 },
+ { "r8a7791-vin", RCAR_GEN2 },
+ { "r8a7790-vin", RCAR_GEN2 },
{ "r8a7779-vin", RCAR_H1 },
{ "r8a7778-vin", RCAR_M1 },
{ "uPD35004-vin", RCAR_E1 },
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c
index cbd3a34f4f3f..8e74fb7f2a07 100644
--- a/drivers/media/platform/soc_camera/soc_scale_crop.c
+++ b/drivers/media/platform/soc_camera/soc_scale_crop.c
@@ -141,8 +141,8 @@ int soc_camera_client_s_crop(struct v4l2_subdev *sd,
* Popular special case - some cameras can only handle fixed sizes like
* QVGA, VGA,... Take care to avoid infinite loop.
*/
- width = max(cam_rect->width, 2);
- height = max(cam_rect->height, 2);
+ width = max_t(unsigned int, cam_rect->width, 2);
+ height = max_t(unsigned int, cam_rect->height, 2);
/*
* Loop as long as sensor is not covering the requested rectangle and
diff --git a/drivers/media/platform/ti-vpe/Makefile b/drivers/media/platform/ti-vpe/Makefile
index cbf0a806ba1d..be680f839e77 100644
--- a/drivers/media/platform/ti-vpe/Makefile
+++ b/drivers/media/platform/ti-vpe/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
-ti-vpe-y := vpe.o vpdma.o
+ti-vpe-y := vpe.o sc.o csc.o vpdma.o
ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG
diff --git a/drivers/media/platform/ti-vpe/csc.c b/drivers/media/platform/ti-vpe/csc.c
new file mode 100644
index 000000000000..acfea500710e
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/csc.c
@@ -0,0 +1,196 @@
+/*
+ * Color space converter library
+ *
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include "csc.h"
+
+/*
+ * 16 coefficients in the order:
+ * a0, b0, c0, a1, b1, c1, a2, b2, c2, d0, d1, d2
+ * (we may need to pass non-default values from user space later on, we might
+ * need to make the coefficient struct more easy to populate)
+ */
+struct colorspace_coeffs {
+ u16 sd[12];
+ u16 hd[12];
+};
+
+/* VIDEO_RANGE: limited range, GRAPHICS_RANGE: full range */
+#define CSC_COEFFS_VIDEO_RANGE_Y2R 0
+#define CSC_COEFFS_GRAPHICS_RANGE_Y2R 1
+#define CSC_COEFFS_VIDEO_RANGE_R2Y 2
+#define CSC_COEFFS_GRAPHICS_RANGE_R2Y 3
+
+/* default colorspace coefficients */
+static struct colorspace_coeffs colorspace_coeffs[4] = {
+ [CSC_COEFFS_VIDEO_RANGE_Y2R] = {
+ {
+ /* SDTV */
+ 0x0400, 0x0000, 0x057D, 0x0400, 0x1EA7, 0x1D35,
+ 0x0400, 0x06EF, 0x1FFE, 0x0D40, 0x0210, 0x0C88,
+ },
+ {
+ /* HDTV */
+ 0x0400, 0x0000, 0x0629, 0x0400, 0x1F45, 0x1E2B,
+ 0x0400, 0x0742, 0x0000, 0x0CEC, 0x0148, 0x0C60,
+ },
+ },
+ [CSC_COEFFS_GRAPHICS_RANGE_Y2R] = {
+ {
+ /* SDTV */
+ 0x04A8, 0x1FFE, 0x0662, 0x04A8, 0x1E6F, 0x1CBF,
+ 0x04A8, 0x0812, 0x1FFF, 0x0C84, 0x0220, 0x0BAC,
+ },
+ {
+ /* HDTV */
+ 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
+ 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
+ },
+ },
+ [CSC_COEFFS_VIDEO_RANGE_R2Y] = {
+ {
+ /* SDTV */
+ 0x0132, 0x0259, 0x0075, 0x1F50, 0x1EA5, 0x020B,
+ 0x020B, 0x1E4A, 0x1FAB, 0x0000, 0x0200, 0x0200,
+ },
+ {
+ /* HDTV */
+ 0x00DA, 0x02DC, 0x004A, 0x1F88, 0x1E6C, 0x020C,
+ 0x020C, 0x1E24, 0x1FD0, 0x0000, 0x0200, 0x0200,
+ },
+ },
+ [CSC_COEFFS_GRAPHICS_RANGE_R2Y] = {
+ {
+ /* SDTV */
+ 0x0107, 0x0204, 0x0064, 0x1F68, 0x1ED6, 0x01C2,
+ 0x01C2, 0x1E87, 0x1FB7, 0x0040, 0x0200, 0x0200,
+ },
+ {
+ /* HDTV */
+ 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
+ 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
+ },
+ },
+};
+
+void csc_dump_regs(struct csc_data *csc)
+{
+ struct device *dev = &csc->pdev->dev;
+
+ u32 read_reg(struct csc_data *csc, int offset)
+ {
+ return ioread32(csc->base + offset);
+ }
+
+#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(csc, CSC_##r))
+
+ DUMPREG(CSC00);
+ DUMPREG(CSC01);
+ DUMPREG(CSC02);
+ DUMPREG(CSC03);
+ DUMPREG(CSC04);
+ DUMPREG(CSC05);
+
+#undef DUMPREG
+}
+
+void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5)
+{
+ *csc_reg5 |= CSC_BYPASS;
+}
+
+/*
+ * set the color space converter coefficient shadow register values
+ */
+void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
+ enum v4l2_colorspace src_colorspace,
+ enum v4l2_colorspace dst_colorspace)
+{
+ u32 *csc_reg5 = csc_reg0 + 5;
+ u32 *shadow_csc = csc_reg0;
+ struct colorspace_coeffs *sd_hd_coeffs;
+ u16 *coeff, *end_coeff;
+ enum v4l2_colorspace yuv_colorspace;
+ int sel = 0;
+
+ /*
+ * support only graphics data range(full range) for now, a control ioctl
+ * would be nice here
+ */
+ /* Y2R */
+ if (dst_colorspace == V4L2_COLORSPACE_SRGB &&
+ (src_colorspace == V4L2_COLORSPACE_SMPTE170M ||
+ src_colorspace == V4L2_COLORSPACE_REC709)) {
+ /* Y2R */
+ sel = 1;
+ yuv_colorspace = src_colorspace;
+ } else if ((dst_colorspace == V4L2_COLORSPACE_SMPTE170M ||
+ dst_colorspace == V4L2_COLORSPACE_REC709) &&
+ src_colorspace == V4L2_COLORSPACE_SRGB) {
+ /* R2Y */
+ sel = 3;
+ yuv_colorspace = dst_colorspace;
+ } else {
+ *csc_reg5 |= CSC_BYPASS;
+ return;
+ }
+
+ sd_hd_coeffs = &colorspace_coeffs[sel];
+
+ /* select between SD or HD coefficients */
+ if (yuv_colorspace == V4L2_COLORSPACE_SMPTE170M)
+ coeff = sd_hd_coeffs->sd;
+ else
+ coeff = sd_hd_coeffs->hd;
+
+ end_coeff = coeff + 12;
+
+ for (; coeff < end_coeff; coeff += 2)
+ *shadow_csc++ = (*(coeff + 1) << 16) | *coeff;
+}
+
+struct csc_data *csc_create(struct platform_device *pdev)
+{
+ struct csc_data *csc;
+
+ dev_dbg(&pdev->dev, "csc_create\n");
+
+ csc = devm_kzalloc(&pdev->dev, sizeof(*csc), GFP_KERNEL);
+ if (!csc) {
+ dev_err(&pdev->dev, "couldn't alloc csc_data\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ csc->pdev = pdev;
+
+ csc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "vpe_csc");
+ if (csc->res == NULL) {
+ dev_err(&pdev->dev, "missing platform resources data\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ csc->base = devm_ioremap_resource(&pdev->dev, csc->res);
+ if (!csc->base) {
+ dev_err(&pdev->dev, "failed to ioremap\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return csc;
+}
diff --git a/drivers/media/platform/ti-vpe/csc.h b/drivers/media/platform/ti-vpe/csc.h
new file mode 100644
index 000000000000..1ad2b6dad561
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/csc.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef TI_CSC_H
+#define TI_CSC_H
+
+/* VPE color space converter regs */
+#define CSC_CSC00 0x00
+#define CSC_A0_MASK 0x1fff
+#define CSC_A0_SHIFT 0
+#define CSC_B0_MASK 0x1fff
+#define CSC_B0_SHIFT 16
+
+#define CSC_CSC01 0x04
+#define CSC_C0_MASK 0x1fff
+#define CSC_C0_SHIFT 0
+#define CSC_A1_MASK 0x1fff
+#define CSC_A1_SHIFT 16
+
+#define CSC_CSC02 0x08
+#define CSC_B1_MASK 0x1fff
+#define CSC_B1_SHIFT 0
+#define CSC_C1_MASK 0x1fff
+#define CSC_C1_SHIFT 16
+
+#define CSC_CSC03 0x0c
+#define CSC_A2_MASK 0x1fff
+#define CSC_A2_SHIFT 0
+#define CSC_B2_MASK 0x1fff
+#define CSC_B2_SHIFT 16
+
+#define CSC_CSC04 0x10
+#define CSC_C2_MASK 0x1fff
+#define CSC_C2_SHIFT 0
+#define CSC_D0_MASK 0x0fff
+#define CSC_D0_SHIFT 16
+
+#define CSC_CSC05 0x14
+#define CSC_D1_MASK 0x0fff
+#define CSC_D1_SHIFT 0
+#define CSC_D2_MASK 0x0fff
+#define CSC_D2_SHIFT 16
+
+#define CSC_BYPASS (1 << 28)
+
+struct csc_data {
+ void __iomem *base;
+ struct resource *res;
+
+ struct platform_device *pdev;
+};
+
+void csc_dump_regs(struct csc_data *csc);
+void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5);
+void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
+ enum v4l2_colorspace src_colorspace,
+ enum v4l2_colorspace dst_colorspace);
+struct csc_data *csc_create(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/media/platform/ti-vpe/sc.c b/drivers/media/platform/ti-vpe/sc.c
new file mode 100644
index 000000000000..93f0af546b76
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/sc.c
@@ -0,0 +1,311 @@
+/*
+ * Scaler library
+ *
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "sc.h"
+#include "sc_coeff.h"
+
+void sc_dump_regs(struct sc_data *sc)
+{
+ struct device *dev = &sc->pdev->dev;
+
+ u32 read_reg(struct sc_data *sc, int offset)
+ {
+ return ioread32(sc->base + offset);
+ }
+
+#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(sc, CFG_##r))
+
+ DUMPREG(SC0);
+ DUMPREG(SC1);
+ DUMPREG(SC2);
+ DUMPREG(SC3);
+ DUMPREG(SC4);
+ DUMPREG(SC5);
+ DUMPREG(SC6);
+ DUMPREG(SC8);
+ DUMPREG(SC9);
+ DUMPREG(SC10);
+ DUMPREG(SC11);
+ DUMPREG(SC12);
+ DUMPREG(SC13);
+ DUMPREG(SC17);
+ DUMPREG(SC18);
+ DUMPREG(SC19);
+ DUMPREG(SC20);
+ DUMPREG(SC21);
+ DUMPREG(SC22);
+ DUMPREG(SC23);
+ DUMPREG(SC24);
+ DUMPREG(SC25);
+
+#undef DUMPREG
+}
+
+/*
+ * set the horizontal scaler coefficients according to the ratio of output to
+ * input widths, after accounting for up to two levels of decimation
+ */
+void sc_set_hs_coeffs(struct sc_data *sc, void *addr, unsigned int src_w,
+ unsigned int dst_w)
+{
+ int sixteenths;
+ int idx;
+ int i, j;
+ u16 *coeff_h = addr;
+ const u16 *cp;
+
+ if (dst_w > src_w) {
+ idx = HS_UP_SCALE;
+ } else {
+ if ((dst_w << 1) < src_w)
+ dst_w <<= 1; /* first level decimation */
+ if ((dst_w << 1) < src_w)
+ dst_w <<= 1; /* second level decimation */
+
+ if (dst_w == src_w) {
+ idx = HS_LE_16_16_SCALE;
+ } else {
+ sixteenths = (dst_w << 4) / src_w;
+ if (sixteenths < 8)
+ sixteenths = 8;
+ idx = HS_LT_9_16_SCALE + sixteenths - 8;
+ }
+ }
+
+ if (idx == sc->hs_index)
+ return;
+
+ cp = scaler_hs_coeffs[idx];
+
+ for (i = 0; i < SC_NUM_PHASES * 2; i++) {
+ for (j = 0; j < SC_H_NUM_TAPS; j++)
+ *coeff_h++ = *cp++;
+ /*
+ * for each phase, the scaler expects space for 8 coefficients
+ * in it's memory. For the horizontal scaler, we copy the first
+ * 7 coefficients and skip the last slot to move to the next
+ * row to hold coefficients for the next phase
+ */
+ coeff_h += SC_NUM_TAPS_MEM_ALIGN - SC_H_NUM_TAPS;
+ }
+
+ sc->hs_index = idx;
+
+ sc->load_coeff_h = true;
+}
+
+/*
+ * set the vertical scaler coefficients according to the ratio of output to
+ * input heights
+ */
+void sc_set_vs_coeffs(struct sc_data *sc, void *addr, unsigned int src_h,
+ unsigned int dst_h)
+{
+ int sixteenths;
+ int idx;
+ int i, j;
+ u16 *coeff_v = addr;
+ const u16 *cp;
+
+ if (dst_h > src_h) {
+ idx = VS_UP_SCALE;
+ } else if (dst_h == src_h) {
+ idx = VS_1_TO_1_SCALE;
+ } else {
+ sixteenths = (dst_h << 4) / src_h;
+ if (sixteenths < 8)
+ sixteenths = 8;
+ idx = VS_LT_9_16_SCALE + sixteenths - 8;
+ }
+
+ if (idx == sc->vs_index)
+ return;
+
+ cp = scaler_vs_coeffs[idx];
+
+ for (i = 0; i < SC_NUM_PHASES * 2; i++) {
+ for (j = 0; j < SC_V_NUM_TAPS; j++)
+ *coeff_v++ = *cp++;
+ /*
+ * for the vertical scaler, we copy the first 5 coefficients and
+ * skip the last 3 slots to move to the next row to hold
+ * coefficients for the next phase
+ */
+ coeff_v += SC_NUM_TAPS_MEM_ALIGN - SC_V_NUM_TAPS;
+ }
+
+ sc->vs_index = idx;
+ sc->load_coeff_v = true;
+}
+
+void sc_config_scaler(struct sc_data *sc, u32 *sc_reg0, u32 *sc_reg8,
+ u32 *sc_reg17, unsigned int src_w, unsigned int src_h,
+ unsigned int dst_w, unsigned int dst_h)
+{
+ struct device *dev = &sc->pdev->dev;
+ u32 val;
+ int dcm_x, dcm_shift;
+ bool use_rav;
+ unsigned long lltmp;
+ u32 lin_acc_inc, lin_acc_inc_u;
+ u32 col_acc_offset;
+ u16 factor = 0;
+ int row_acc_init_rav = 0, row_acc_init_rav_b = 0;
+ u32 row_acc_inc = 0, row_acc_offset = 0, row_acc_offset_b = 0;
+ /*
+ * location of SC register in payload memory with respect to the first
+ * register in the mmr address data block
+ */
+ u32 *sc_reg9 = sc_reg8 + 1;
+ u32 *sc_reg12 = sc_reg8 + 4;
+ u32 *sc_reg13 = sc_reg8 + 5;
+ u32 *sc_reg24 = sc_reg17 + 7;
+
+ val = sc_reg0[0];
+
+ /* clear all the features(they may get enabled elsewhere later) */
+ val &= ~(CFG_SELFGEN_FID | CFG_TRIM | CFG_ENABLE_SIN2_VER_INTP |
+ CFG_INTERLACE_I | CFG_DCM_4X | CFG_DCM_2X | CFG_AUTO_HS |
+ CFG_ENABLE_EV | CFG_USE_RAV | CFG_INVT_FID | CFG_SC_BYPASS |
+ CFG_INTERLACE_O | CFG_Y_PK_EN | CFG_HP_BYPASS | CFG_LINEAR);
+
+ if (src_w == dst_w && src_h == dst_h) {
+ val |= CFG_SC_BYPASS;
+ sc_reg0[0] = val;
+ return;
+ }
+
+ /* we only support linear scaling for now */
+ val |= CFG_LINEAR;
+
+ /* configure horizontal scaler */
+
+ /* enable 2X or 4X decimation */
+ dcm_x = src_w / dst_w;
+ if (dcm_x > 4) {
+ val |= CFG_DCM_4X;
+ dcm_shift = 2;
+ } else if (dcm_x > 2) {
+ val |= CFG_DCM_2X;
+ dcm_shift = 1;
+ } else {
+ dcm_shift = 0;
+ }
+
+ lltmp = dst_w - 1;
+ lin_acc_inc = div64_u64(((u64)(src_w >> dcm_shift) - 1) << 24, lltmp);
+ lin_acc_inc_u = 0;
+ col_acc_offset = 0;
+
+ dev_dbg(dev, "hs config: src_w = %d, dst_w = %d, decimation = %s, lin_acc_inc = %08x\n",
+ src_w, dst_w, dcm_shift == 2 ? "4x" :
+ (dcm_shift == 1 ? "2x" : "none"), lin_acc_inc);
+
+ /* configure vertical scaler */
+
+ /* use RAV for vertical scaler if vertical downscaling is > 4x */
+ if (dst_h < (src_h >> 2)) {
+ use_rav = true;
+ val |= CFG_USE_RAV;
+ } else {
+ use_rav = false;
+ }
+
+ if (use_rav) {
+ /* use RAV */
+ factor = (u16) ((dst_h << 10) / src_h);
+
+ row_acc_init_rav = factor + ((1 + factor) >> 1);
+ if (row_acc_init_rav >= 1024)
+ row_acc_init_rav -= 1024;
+
+ row_acc_init_rav_b = row_acc_init_rav +
+ (1 + (row_acc_init_rav >> 1)) -
+ (1024 >> 1);
+
+ if (row_acc_init_rav_b < 0) {
+ row_acc_init_rav_b += row_acc_init_rav;
+ row_acc_init_rav *= 2;
+ }
+
+ dev_dbg(dev, "vs config(RAV): src_h = %d, dst_h = %d, factor = %d, acc_init = %08x, acc_init_b = %08x\n",
+ src_h, dst_h, factor, row_acc_init_rav,
+ row_acc_init_rav_b);
+ } else {
+ /* use polyphase */
+ row_acc_inc = ((src_h - 1) << 16) / (dst_h - 1);
+ row_acc_offset = 0;
+ row_acc_offset_b = 0;
+
+ dev_dbg(dev, "vs config(POLY): src_h = %d, dst_h = %d,row_acc_inc = %08x\n",
+ src_h, dst_h, row_acc_inc);
+ }
+
+
+ sc_reg0[0] = val;
+ sc_reg0[1] = row_acc_inc;
+ sc_reg0[2] = row_acc_offset;
+ sc_reg0[3] = row_acc_offset_b;
+
+ sc_reg0[4] = ((lin_acc_inc_u & CFG_LIN_ACC_INC_U_MASK) <<
+ CFG_LIN_ACC_INC_U_SHIFT) | (dst_w << CFG_TAR_W_SHIFT) |
+ (dst_h << CFG_TAR_H_SHIFT);
+
+ sc_reg0[5] = (src_w << CFG_SRC_W_SHIFT) | (src_h << CFG_SRC_H_SHIFT);
+
+ sc_reg0[6] = (row_acc_init_rav_b << CFG_ROW_ACC_INIT_RAV_B_SHIFT) |
+ (row_acc_init_rav << CFG_ROW_ACC_INIT_RAV_SHIFT);
+
+ *sc_reg9 = lin_acc_inc;
+
+ *sc_reg12 = col_acc_offset << CFG_COL_ACC_OFFSET_SHIFT;
+
+ *sc_reg13 = factor;
+
+ *sc_reg24 = (src_w << CFG_ORG_W_SHIFT) | (src_h << CFG_ORG_H_SHIFT);
+}
+
+struct sc_data *sc_create(struct platform_device *pdev)
+{
+ struct sc_data *sc;
+
+ dev_dbg(&pdev->dev, "sc_create\n");
+
+ sc = devm_kzalloc(&pdev->dev, sizeof(*sc), GFP_KERNEL);
+ if (!sc) {
+ dev_err(&pdev->dev, "couldn't alloc sc_data\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sc->pdev = pdev;
+
+ sc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sc");
+ if (!sc->res) {
+ dev_err(&pdev->dev, "missing platform resources data\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ sc->base = devm_ioremap_resource(&pdev->dev, sc->res);
+ if (!sc->base) {
+ dev_err(&pdev->dev, "failed to ioremap\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return sc;
+}
diff --git a/drivers/media/platform/ti-vpe/sc.h b/drivers/media/platform/ti-vpe/sc.h
new file mode 100644
index 000000000000..60e411e05c30
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/sc.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef TI_SC_H
+#define TI_SC_H
+
+/* Scaler regs */
+#define CFG_SC0 0x0
+#define CFG_INTERLACE_O (1 << 0)
+#define CFG_LINEAR (1 << 1)
+#define CFG_SC_BYPASS (1 << 2)
+#define CFG_INVT_FID (1 << 3)
+#define CFG_USE_RAV (1 << 4)
+#define CFG_ENABLE_EV (1 << 5)
+#define CFG_AUTO_HS (1 << 6)
+#define CFG_DCM_2X (1 << 7)
+#define CFG_DCM_4X (1 << 8)
+#define CFG_HP_BYPASS (1 << 9)
+#define CFG_INTERLACE_I (1 << 10)
+#define CFG_ENABLE_SIN2_VER_INTP (1 << 11)
+#define CFG_Y_PK_EN (1 << 14)
+#define CFG_TRIM (1 << 15)
+#define CFG_SELFGEN_FID (1 << 16)
+
+#define CFG_SC1 0x4
+#define CFG_ROW_ACC_INC_MASK 0x07ffffff
+#define CFG_ROW_ACC_INC_SHIFT 0
+
+#define CFG_SC2 0x08
+#define CFG_ROW_ACC_OFFSET_MASK 0x0fffffff
+#define CFG_ROW_ACC_OFFSET_SHIFT 0
+
+#define CFG_SC3 0x0c
+#define CFG_ROW_ACC_OFFSET_B_MASK 0x0fffffff
+#define CFG_ROW_ACC_OFFSET_B_SHIFT 0
+
+#define CFG_SC4 0x10
+#define CFG_TAR_H_MASK 0x07ff
+#define CFG_TAR_H_SHIFT 0
+#define CFG_TAR_W_MASK 0x07ff
+#define CFG_TAR_W_SHIFT 12
+#define CFG_LIN_ACC_INC_U_MASK 0x07
+#define CFG_LIN_ACC_INC_U_SHIFT 24
+#define CFG_NLIN_ACC_INIT_U_MASK 0x07
+#define CFG_NLIN_ACC_INIT_U_SHIFT 28
+
+#define CFG_SC5 0x14
+#define CFG_SRC_H_MASK 0x07ff
+#define CFG_SRC_H_SHIFT 0
+#define CFG_SRC_W_MASK 0x07ff
+#define CFG_SRC_W_SHIFT 12
+#define CFG_NLIN_ACC_INC_U_MASK 0x07
+#define CFG_NLIN_ACC_INC_U_SHIFT 24
+
+#define CFG_SC6 0x18
+#define CFG_ROW_ACC_INIT_RAV_MASK 0x03ff
+#define CFG_ROW_ACC_INIT_RAV_SHIFT 0
+#define CFG_ROW_ACC_INIT_RAV_B_MASK 0x03ff
+#define CFG_ROW_ACC_INIT_RAV_B_SHIFT 10
+
+#define CFG_SC8 0x20
+#define CFG_NLIN_LEFT_MASK 0x07ff
+#define CFG_NLIN_LEFT_SHIFT 0
+#define CFG_NLIN_RIGHT_MASK 0x07ff
+#define CFG_NLIN_RIGHT_SHIFT 12
+
+#define CFG_SC9 0x24
+#define CFG_LIN_ACC_INC CFG_SC9
+
+#define CFG_SC10 0x28
+#define CFG_NLIN_ACC_INIT CFG_SC10
+
+#define CFG_SC11 0x2c
+#define CFG_NLIN_ACC_INC CFG_SC11
+
+#define CFG_SC12 0x30
+#define CFG_COL_ACC_OFFSET_MASK 0x01ffffff
+#define CFG_COL_ACC_OFFSET_SHIFT 0
+
+#define CFG_SC13 0x34
+#define CFG_SC_FACTOR_RAV_MASK 0xff
+#define CFG_SC_FACTOR_RAV_SHIFT 0
+#define CFG_CHROMA_INTP_THR_MASK 0x03ff
+#define CFG_CHROMA_INTP_THR_SHIFT 12
+#define CFG_DELTA_CHROMA_THR_MASK 0x0f
+#define CFG_DELTA_CHROMA_THR_SHIFT 24
+
+#define CFG_SC17 0x44
+#define CFG_EV_THR_MASK 0x03ff
+#define CFG_EV_THR_SHIFT 12
+#define CFG_DELTA_LUMA_THR_MASK 0x0f
+#define CFG_DELTA_LUMA_THR_SHIFT 24
+#define CFG_DELTA_EV_THR_MASK 0x0f
+#define CFG_DELTA_EV_THR_SHIFT 28
+
+#define CFG_SC18 0x48
+#define CFG_HS_FACTOR_MASK 0x03ff
+#define CFG_HS_FACTOR_SHIFT 0
+#define CFG_CONF_DEFAULT_MASK 0x01ff
+#define CFG_CONF_DEFAULT_SHIFT 16
+
+#define CFG_SC19 0x4c
+#define CFG_HPF_COEFF0_MASK 0xff
+#define CFG_HPF_COEFF0_SHIFT 0
+#define CFG_HPF_COEFF1_MASK 0xff
+#define CFG_HPF_COEFF1_SHIFT 8
+#define CFG_HPF_COEFF2_MASK 0xff
+#define CFG_HPF_COEFF2_SHIFT 16
+#define CFG_HPF_COEFF3_MASK 0xff
+#define CFG_HPF_COEFF3_SHIFT 23
+
+#define CFG_SC20 0x50
+#define CFG_HPF_COEFF4_MASK 0xff
+#define CFG_HPF_COEFF4_SHIFT 0
+#define CFG_HPF_COEFF5_MASK 0xff
+#define CFG_HPF_COEFF5_SHIFT 8
+#define CFG_HPF_NORM_SHIFT_MASK 0x07
+#define CFG_HPF_NORM_SHIFT_SHIFT 16
+#define CFG_NL_LIMIT_MASK 0x1ff
+#define CFG_NL_LIMIT_SHIFT 20
+
+#define CFG_SC21 0x54
+#define CFG_NL_LO_THR_MASK 0x01ff
+#define CFG_NL_LO_THR_SHIFT 0
+#define CFG_NL_LO_SLOPE_MASK 0xff
+#define CFG_NL_LO_SLOPE_SHIFT 16
+
+#define CFG_SC22 0x58
+#define CFG_NL_HI_THR_MASK 0x01ff
+#define CFG_NL_HI_THR_SHIFT 0
+#define CFG_NL_HI_SLOPE_SH_MASK 0x07
+#define CFG_NL_HI_SLOPE_SH_SHIFT 16
+
+#define CFG_SC23 0x5c
+#define CFG_GRADIENT_THR_MASK 0x07ff
+#define CFG_GRADIENT_THR_SHIFT 0
+#define CFG_GRADIENT_THR_RANGE_MASK 0x0f
+#define CFG_GRADIENT_THR_RANGE_SHIFT 12
+#define CFG_MIN_GY_THR_MASK 0xff
+#define CFG_MIN_GY_THR_SHIFT 16
+#define CFG_MIN_GY_THR_RANGE_MASK 0x0f
+#define CFG_MIN_GY_THR_RANGE_SHIFT 28
+
+#define CFG_SC24 0x60
+#define CFG_ORG_H_MASK 0x07ff
+#define CFG_ORG_H_SHIFT 0
+#define CFG_ORG_W_MASK 0x07ff
+#define CFG_ORG_W_SHIFT 16
+
+#define CFG_SC25 0x64
+#define CFG_OFF_H_MASK 0x07ff
+#define CFG_OFF_H_SHIFT 0
+#define CFG_OFF_W_MASK 0x07ff
+#define CFG_OFF_W_SHIFT 16
+
+/* number of phases supported by the polyphase scalers */
+#define SC_NUM_PHASES 32
+
+/* number of taps used by horizontal polyphase scaler */
+#define SC_H_NUM_TAPS 7
+
+/* number of taps used by vertical polyphase scaler */
+#define SC_V_NUM_TAPS 5
+
+/* number of taps expected by the scaler in it's coefficient memory */
+#define SC_NUM_TAPS_MEM_ALIGN 8
+
+/*
+ * coefficient memory size in bytes:
+ * num phases x num sets(luma and chroma) x num taps(aligned) x coeff size
+ */
+#define SC_COEF_SRAM_SIZE (SC_NUM_PHASES * 2 * SC_NUM_TAPS_MEM_ALIGN * 2)
+
+struct sc_data {
+ void __iomem *base;
+ struct resource *res;
+
+ dma_addr_t loaded_coeff_h; /* loaded h coeffs in SC */
+ dma_addr_t loaded_coeff_v; /* loaded v coeffs in SC */
+
+ bool load_coeff_h; /* have new h SC coeffs */
+ bool load_coeff_v; /* have new v SC coeffs */
+
+ unsigned int hs_index; /* h SC coeffs selector */
+ unsigned int vs_index; /* v SC coeffs selector */
+
+ struct platform_device *pdev;
+};
+
+void sc_dump_regs(struct sc_data *sc);
+void sc_set_hs_coeffs(struct sc_data *sc, void *addr, unsigned int src_w,
+ unsigned int dst_w);
+void sc_set_vs_coeffs(struct sc_data *sc, void *addr, unsigned int src_h,
+ unsigned int dst_h);
+void sc_config_scaler(struct sc_data *sc, u32 *sc_reg0, u32 *sc_reg8,
+ u32 *sc_reg17, unsigned int src_w, unsigned int src_h,
+ unsigned int dst_w, unsigned int dst_h);
+struct sc_data *sc_create(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/media/platform/ti-vpe/sc_coeff.h b/drivers/media/platform/ti-vpe/sc_coeff.h
new file mode 100644
index 000000000000..5bfa5c03aec6
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/sc_coeff.h
@@ -0,0 +1,1342 @@
+/*
+ * VPE SC coefs
+ *
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __TI_SC_COEFF_H
+#define __TI_SC_COEFF_H
+
+/* horizontal scaler coefficients */
+enum {
+ HS_UP_SCALE = 0,
+ HS_LT_9_16_SCALE,
+ HS_LT_10_16_SCALE,
+ HS_LT_11_16_SCALE,
+ HS_LT_12_16_SCALE,
+ HS_LT_13_16_SCALE,
+ HS_LT_14_16_SCALE,
+ HS_LT_15_16_SCALE,
+ HS_LE_16_16_SCALE,
+};
+
+static const u16 scaler_hs_coeffs[13][SC_NUM_PHASES * 2 * SC_H_NUM_TAPS] = {
+ [HS_UP_SCALE] = {
+ /* Luma */
+ 0x001F, 0x1F90, 0x00D2, 0x06FE, 0x00D2, 0x1F90, 0x001F,
+ 0x001C, 0x1F9E, 0x009F, 0x06FB, 0x0108, 0x1F82, 0x0022,
+ 0x0019, 0x1FAC, 0x006F, 0x06F3, 0x0140, 0x1F74, 0x0025,
+ 0x0016, 0x1FB9, 0x0041, 0x06E7, 0x017B, 0x1F66, 0x0028,
+ 0x0013, 0x1FC6, 0x0017, 0x06D6, 0x01B7, 0x1F58, 0x002B,
+ 0x0010, 0x1FD3, 0x1FEF, 0x06C0, 0x01F6, 0x1F4B, 0x002D,
+ 0x000E, 0x1FDF, 0x1FCB, 0x06A5, 0x0235, 0x1F3F, 0x002F,
+ 0x000B, 0x1FEA, 0x1FAA, 0x0686, 0x0277, 0x1F33, 0x0031,
+ 0x0009, 0x1FF5, 0x1F8C, 0x0663, 0x02B8, 0x1F28, 0x0033,
+ 0x0007, 0x1FFF, 0x1F72, 0x063A, 0x02FB, 0x1F1F, 0x0034,
+ 0x0005, 0x0008, 0x1F5A, 0x060F, 0x033E, 0x1F17, 0x0035,
+ 0x0003, 0x0010, 0x1F46, 0x05E0, 0x0382, 0x1F10, 0x0035,
+ 0x0002, 0x0017, 0x1F34, 0x05AF, 0x03C5, 0x1F0B, 0x0034,
+ 0x0001, 0x001E, 0x1F26, 0x0579, 0x0407, 0x1F08, 0x0033,
+ 0x0000, 0x0023, 0x1F1A, 0x0541, 0x0449, 0x1F07, 0x0032,
+ 0x1FFF, 0x0028, 0x1F12, 0x0506, 0x048A, 0x1F08, 0x002F,
+ 0x002C, 0x1F0C, 0x04C8, 0x04C8, 0x1F0C, 0x002C, 0x0000,
+ 0x002F, 0x1F08, 0x048A, 0x0506, 0x1F12, 0x0028, 0x1FFF,
+ 0x0032, 0x1F07, 0x0449, 0x0541, 0x1F1A, 0x0023, 0x0000,
+ 0x0033, 0x1F08, 0x0407, 0x0579, 0x1F26, 0x001E, 0x0001,
+ 0x0034, 0x1F0B, 0x03C5, 0x05AF, 0x1F34, 0x0017, 0x0002,
+ 0x0035, 0x1F10, 0x0382, 0x05E0, 0x1F46, 0x0010, 0x0003,
+ 0x0035, 0x1F17, 0x033E, 0x060F, 0x1F5A, 0x0008, 0x0005,
+ 0x0034, 0x1F1F, 0x02FB, 0x063A, 0x1F72, 0x1FFF, 0x0007,
+ 0x0033, 0x1F28, 0x02B8, 0x0663, 0x1F8C, 0x1FF5, 0x0009,
+ 0x0031, 0x1F33, 0x0277, 0x0686, 0x1FAA, 0x1FEA, 0x000B,
+ 0x002F, 0x1F3F, 0x0235, 0x06A5, 0x1FCB, 0x1FDF, 0x000E,
+ 0x002D, 0x1F4B, 0x01F6, 0x06C0, 0x1FEF, 0x1FD3, 0x0010,
+ 0x002B, 0x1F58, 0x01B7, 0x06D6, 0x0017, 0x1FC6, 0x0013,
+ 0x0028, 0x1F66, 0x017B, 0x06E7, 0x0041, 0x1FB9, 0x0016,
+ 0x0025, 0x1F74, 0x0140, 0x06F3, 0x006F, 0x1FAC, 0x0019,
+ 0x0022, 0x1F82, 0x0108, 0x06FB, 0x009F, 0x1F9E, 0x001C,
+ /* Chroma */
+ 0x001F, 0x1F90, 0x00D2, 0x06FE, 0x00D2, 0x1F90, 0x001F,
+ 0x001C, 0x1F9E, 0x009F, 0x06FB, 0x0108, 0x1F82, 0x0022,
+ 0x0019, 0x1FAC, 0x006F, 0x06F3, 0x0140, 0x1F74, 0x0025,
+ 0x0016, 0x1FB9, 0x0041, 0x06E7, 0x017B, 0x1F66, 0x0028,
+ 0x0013, 0x1FC6, 0x0017, 0x06D6, 0x01B7, 0x1F58, 0x002B,
+ 0x0010, 0x1FD3, 0x1FEF, 0x06C0, 0x01F6, 0x1F4B, 0x002D,
+ 0x000E, 0x1FDF, 0x1FCB, 0x06A5, 0x0235, 0x1F3F, 0x002F,
+ 0x000B, 0x1FEA, 0x1FAA, 0x0686, 0x0277, 0x1F33, 0x0031,
+ 0x0009, 0x1FF5, 0x1F8C, 0x0663, 0x02B8, 0x1F28, 0x0033,
+ 0x0007, 0x1FFF, 0x1F72, 0x063A, 0x02FB, 0x1F1F, 0x0034,
+ 0x0005, 0x0008, 0x1F5A, 0x060F, 0x033E, 0x1F17, 0x0035,
+ 0x0003, 0x0010, 0x1F46, 0x05E0, 0x0382, 0x1F10, 0x0035,
+ 0x0002, 0x0017, 0x1F34, 0x05AF, 0x03C5, 0x1F0B, 0x0034,
+ 0x0001, 0x001E, 0x1F26, 0x0579, 0x0407, 0x1F08, 0x0033,
+ 0x0000, 0x0023, 0x1F1A, 0x0541, 0x0449, 0x1F07, 0x0032,
+ 0x1FFF, 0x0028, 0x1F12, 0x0506, 0x048A, 0x1F08, 0x002F,
+ 0x002C, 0x1F0C, 0x04C8, 0x04C8, 0x1F0C, 0x002C, 0x0000,
+ 0x002F, 0x1F08, 0x048A, 0x0506, 0x1F12, 0x0028, 0x1FFF,
+ 0x0032, 0x1F07, 0x0449, 0x0541, 0x1F1A, 0x0023, 0x0000,
+ 0x0033, 0x1F08, 0x0407, 0x0579, 0x1F26, 0x001E, 0x0001,
+ 0x0034, 0x1F0B, 0x03C5, 0x05AF, 0x1F34, 0x0017, 0x0002,
+ 0x0035, 0x1F10, 0x0382, 0x05E0, 0x1F46, 0x0010, 0x0003,
+ 0x0035, 0x1F17, 0x033E, 0x060F, 0x1F5A, 0x0008, 0x0005,
+ 0x0034, 0x1F1F, 0x02FB, 0x063A, 0x1F72, 0x1FFF, 0x0007,
+ 0x0033, 0x1F28, 0x02B8, 0x0663, 0x1F8C, 0x1FF5, 0x0009,
+ 0x0031, 0x1F33, 0x0277, 0x0686, 0x1FAA, 0x1FEA, 0x000B,
+ 0x002F, 0x1F3F, 0x0235, 0x06A5, 0x1FCB, 0x1FDF, 0x000E,
+ 0x002D, 0x1F4B, 0x01F6, 0x06C0, 0x1FEF, 0x1FD3, 0x0010,
+ 0x002B, 0x1F58, 0x01B7, 0x06D6, 0x0017, 0x1FC6, 0x0013,
+ 0x0028, 0x1F66, 0x017B, 0x06E7, 0x0041, 0x1FB9, 0x0016,
+ 0x0025, 0x1F74, 0x0140, 0x06F3, 0x006F, 0x1FAC, 0x0019,
+ 0x0022, 0x1F82, 0x0108, 0x06FB, 0x009F, 0x1F9E, 0x001C,
+ },
+ [HS_LT_9_16_SCALE] = {
+ /* Luma */
+ 0x1FA3, 0x005E, 0x024A, 0x036A, 0x024A, 0x005E, 0x1FA3,
+ 0x1FA3, 0x0052, 0x023A, 0x036A, 0x0259, 0x006A, 0x1FA4,
+ 0x1FA3, 0x0046, 0x022A, 0x036A, 0x0269, 0x0076, 0x1FA4,
+ 0x1FA3, 0x003B, 0x021A, 0x0368, 0x0278, 0x0083, 0x1FA5,
+ 0x1FA4, 0x0031, 0x020A, 0x0365, 0x0286, 0x0090, 0x1FA6,
+ 0x1FA5, 0x0026, 0x01F9, 0x0362, 0x0294, 0x009E, 0x1FA8,
+ 0x1FA6, 0x001C, 0x01E8, 0x035E, 0x02A3, 0x00AB, 0x1FAA,
+ 0x1FA7, 0x0013, 0x01D7, 0x035A, 0x02B0, 0x00B9, 0x1FAC,
+ 0x1FA9, 0x000A, 0x01C6, 0x0354, 0x02BD, 0x00C7, 0x1FAF,
+ 0x1FAA, 0x0001, 0x01B6, 0x034E, 0x02C9, 0x00D6, 0x1FB2,
+ 0x1FAC, 0x1FF9, 0x01A5, 0x0347, 0x02D5, 0x00E5, 0x1FB5,
+ 0x1FAE, 0x1FF1, 0x0194, 0x0340, 0x02E1, 0x00F3, 0x1FB9,
+ 0x1FB0, 0x1FEA, 0x0183, 0x0338, 0x02EC, 0x0102, 0x1FBD,
+ 0x1FB2, 0x1FE3, 0x0172, 0x0330, 0x02F6, 0x0112, 0x1FC1,
+ 0x1FB4, 0x1FDC, 0x0161, 0x0327, 0x0301, 0x0121, 0x1FC6,
+ 0x1FB7, 0x1FD6, 0x0151, 0x031D, 0x030A, 0x0130, 0x1FCB,
+ 0x1FD2, 0x0136, 0x02F8, 0x02F8, 0x0136, 0x1FD2, 0x0000,
+ 0x1FCB, 0x0130, 0x030A, 0x031D, 0x0151, 0x1FD6, 0x1FB7,
+ 0x1FC6, 0x0121, 0x0301, 0x0327, 0x0161, 0x1FDC, 0x1FB4,
+ 0x1FC1, 0x0112, 0x02F6, 0x0330, 0x0172, 0x1FE3, 0x1FB2,
+ 0x1FBD, 0x0102, 0x02EC, 0x0338, 0x0183, 0x1FEA, 0x1FB0,
+ 0x1FB9, 0x00F3, 0x02E1, 0x0340, 0x0194, 0x1FF1, 0x1FAE,
+ 0x1FB5, 0x00E5, 0x02D5, 0x0347, 0x01A5, 0x1FF9, 0x1FAC,
+ 0x1FB2, 0x00D6, 0x02C9, 0x034E, 0x01B6, 0x0001, 0x1FAA,
+ 0x1FAF, 0x00C7, 0x02BD, 0x0354, 0x01C6, 0x000A, 0x1FA9,
+ 0x1FAC, 0x00B9, 0x02B0, 0x035A, 0x01D7, 0x0013, 0x1FA7,
+ 0x1FAA, 0x00AB, 0x02A3, 0x035E, 0x01E8, 0x001C, 0x1FA6,
+ 0x1FA8, 0x009E, 0x0294, 0x0362, 0x01F9, 0x0026, 0x1FA5,
+ 0x1FA6, 0x0090, 0x0286, 0x0365, 0x020A, 0x0031, 0x1FA4,
+ 0x1FA5, 0x0083, 0x0278, 0x0368, 0x021A, 0x003B, 0x1FA3,
+ 0x1FA4, 0x0076, 0x0269, 0x036A, 0x022A, 0x0046, 0x1FA3,
+ 0x1FA4, 0x006A, 0x0259, 0x036A, 0x023A, 0x0052, 0x1FA3,
+ /* Chroma */
+ 0x1FA3, 0x005E, 0x024A, 0x036A, 0x024A, 0x005E, 0x1FA3,
+ 0x1FA3, 0x0052, 0x023A, 0x036A, 0x0259, 0x006A, 0x1FA4,
+ 0x1FA3, 0x0046, 0x022A, 0x036A, 0x0269, 0x0076, 0x1FA4,
+ 0x1FA3, 0x003B, 0x021A, 0x0368, 0x0278, 0x0083, 0x1FA5,
+ 0x1FA4, 0x0031, 0x020A, 0x0365, 0x0286, 0x0090, 0x1FA6,
+ 0x1FA5, 0x0026, 0x01F9, 0x0362, 0x0294, 0x009E, 0x1FA8,
+ 0x1FA6, 0x001C, 0x01E8, 0x035E, 0x02A3, 0x00AB, 0x1FAA,
+ 0x1FA7, 0x0013, 0x01D7, 0x035A, 0x02B0, 0x00B9, 0x1FAC,
+ 0x1FA9, 0x000A, 0x01C6, 0x0354, 0x02BD, 0x00C7, 0x1FAF,
+ 0x1FAA, 0x0001, 0x01B6, 0x034E, 0x02C9, 0x00D6, 0x1FB2,
+ 0x1FAC, 0x1FF9, 0x01A5, 0x0347, 0x02D5, 0x00E5, 0x1FB5,
+ 0x1FAE, 0x1FF1, 0x0194, 0x0340, 0x02E1, 0x00F3, 0x1FB9,
+ 0x1FB0, 0x1FEA, 0x0183, 0x0338, 0x02EC, 0x0102, 0x1FBD,
+ 0x1FB2, 0x1FE3, 0x0172, 0x0330, 0x02F6, 0x0112, 0x1FC1,
+ 0x1FB4, 0x1FDC, 0x0161, 0x0327, 0x0301, 0x0121, 0x1FC6,
+ 0x1FB7, 0x1FD6, 0x0151, 0x031D, 0x030A, 0x0130, 0x1FCB,
+ 0x1FD2, 0x0136, 0x02F8, 0x02F8, 0x0136, 0x1FD2, 0x0000,
+ 0x1FCB, 0x0130, 0x030A, 0x031D, 0x0151, 0x1FD6, 0x1FB7,
+ 0x1FC6, 0x0121, 0x0301, 0x0327, 0x0161, 0x1FDC, 0x1FB4,
+ 0x1FC1, 0x0112, 0x02F6, 0x0330, 0x0172, 0x1FE3, 0x1FB2,
+ 0x1FBD, 0x0102, 0x02EC, 0x0338, 0x0183, 0x1FEA, 0x1FB0,
+ 0x1FB9, 0x00F3, 0x02E1, 0x0340, 0x0194, 0x1FF1, 0x1FAE,
+ 0x1FB5, 0x00E5, 0x02D5, 0x0347, 0x01A5, 0x1FF9, 0x1FAC,
+ 0x1FB2, 0x00D6, 0x02C9, 0x034E, 0x01B6, 0x0001, 0x1FAA,
+ 0x1FAF, 0x00C7, 0x02BD, 0x0354, 0x01C6, 0x000A, 0x1FA9,
+ 0x1FAC, 0x00B9, 0x02B0, 0x035A, 0x01D7, 0x0013, 0x1FA7,
+ 0x1FAA, 0x00AB, 0x02A3, 0x035E, 0x01E8, 0x001C, 0x1FA6,
+ 0x1FA8, 0x009E, 0x0294, 0x0362, 0x01F9, 0x0026, 0x1FA5,
+ 0x1FA6, 0x0090, 0x0286, 0x0365, 0x020A, 0x0031, 0x1FA4,
+ 0x1FA5, 0x0083, 0x0278, 0x0368, 0x021A, 0x003B, 0x1FA3,
+ 0x1FA4, 0x0076, 0x0269, 0x036A, 0x022A, 0x0046, 0x1FA3,
+ 0x1FA4, 0x006A, 0x0259, 0x036A, 0x023A, 0x0052, 0x1FA3,
+ },
+ [HS_LT_10_16_SCALE] = {
+ /* Luma */
+ 0x1F8D, 0x000C, 0x026A, 0x03FA, 0x026A, 0x000C, 0x1F8D,
+ 0x1F8F, 0x0000, 0x0255, 0x03FA, 0x027F, 0x0019, 0x1F8A,
+ 0x1F92, 0x1FF5, 0x023F, 0x03F8, 0x0293, 0x0027, 0x1F88,
+ 0x1F95, 0x1FEA, 0x022A, 0x03F6, 0x02A7, 0x0034, 0x1F86,
+ 0x1F99, 0x1FDF, 0x0213, 0x03F2, 0x02BB, 0x0043, 0x1F85,
+ 0x1F9C, 0x1FD5, 0x01FE, 0x03ED, 0x02CF, 0x0052, 0x1F83,
+ 0x1FA0, 0x1FCC, 0x01E8, 0x03E7, 0x02E1, 0x0061, 0x1F83,
+ 0x1FA4, 0x1FC3, 0x01D2, 0x03E0, 0x02F4, 0x0071, 0x1F82,
+ 0x1FA7, 0x1FBB, 0x01BC, 0x03D9, 0x0306, 0x0081, 0x1F82,
+ 0x1FAB, 0x1FB4, 0x01A6, 0x03D0, 0x0317, 0x0092, 0x1F82,
+ 0x1FAF, 0x1FAD, 0x0190, 0x03C7, 0x0327, 0x00A3, 0x1F83,
+ 0x1FB3, 0x1FA7, 0x017A, 0x03BC, 0x0337, 0x00B5, 0x1F84,
+ 0x1FB8, 0x1FA1, 0x0165, 0x03B0, 0x0346, 0x00C7, 0x1F85,
+ 0x1FBC, 0x1F9C, 0x0150, 0x03A4, 0x0354, 0x00D9, 0x1F87,
+ 0x1FC0, 0x1F98, 0x013A, 0x0397, 0x0361, 0x00EC, 0x1F8A,
+ 0x1FC4, 0x1F93, 0x0126, 0x0389, 0x036F, 0x00FE, 0x1F8D,
+ 0x1F93, 0x010A, 0x0363, 0x0363, 0x010A, 0x1F93, 0x0000,
+ 0x1F8D, 0x00FE, 0x036F, 0x0389, 0x0126, 0x1F93, 0x1FC4,
+ 0x1F8A, 0x00EC, 0x0361, 0x0397, 0x013A, 0x1F98, 0x1FC0,
+ 0x1F87, 0x00D9, 0x0354, 0x03A4, 0x0150, 0x1F9C, 0x1FBC,
+ 0x1F85, 0x00C7, 0x0346, 0x03B0, 0x0165, 0x1FA1, 0x1FB8,
+ 0x1F84, 0x00B5, 0x0337, 0x03BC, 0x017A, 0x1FA7, 0x1FB3,
+ 0x1F83, 0x00A3, 0x0327, 0x03C7, 0x0190, 0x1FAD, 0x1FAF,
+ 0x1F82, 0x0092, 0x0317, 0x03D0, 0x01A6, 0x1FB4, 0x1FAB,
+ 0x1F82, 0x0081, 0x0306, 0x03D9, 0x01BC, 0x1FBB, 0x1FA7,
+ 0x1F82, 0x0071, 0x02F4, 0x03E0, 0x01D2, 0x1FC3, 0x1FA4,
+ 0x1F83, 0x0061, 0x02E1, 0x03E7, 0x01E8, 0x1FCC, 0x1FA0,
+ 0x1F83, 0x0052, 0x02CF, 0x03ED, 0x01FE, 0x1FD5, 0x1F9C,
+ 0x1F85, 0x0043, 0x02BB, 0x03F2, 0x0213, 0x1FDF, 0x1F99,
+ 0x1F86, 0x0034, 0x02A7, 0x03F6, 0x022A, 0x1FEA, 0x1F95,
+ 0x1F88, 0x0027, 0x0293, 0x03F8, 0x023F, 0x1FF5, 0x1F92,
+ 0x1F8A, 0x0019, 0x027F, 0x03FA, 0x0255, 0x0000, 0x1F8F,
+ /* Chroma */
+ 0x1F8D, 0x000C, 0x026A, 0x03FA, 0x026A, 0x000C, 0x1F8D,
+ 0x1F8F, 0x0000, 0x0255, 0x03FA, 0x027F, 0x0019, 0x1F8A,
+ 0x1F92, 0x1FF5, 0x023F, 0x03F8, 0x0293, 0x0027, 0x1F88,
+ 0x1F95, 0x1FEA, 0x022A, 0x03F6, 0x02A7, 0x0034, 0x1F86,
+ 0x1F99, 0x1FDF, 0x0213, 0x03F2, 0x02BB, 0x0043, 0x1F85,
+ 0x1F9C, 0x1FD5, 0x01FE, 0x03ED, 0x02CF, 0x0052, 0x1F83,
+ 0x1FA0, 0x1FCC, 0x01E8, 0x03E7, 0x02E1, 0x0061, 0x1F83,
+ 0x1FA4, 0x1FC3, 0x01D2, 0x03E0, 0x02F4, 0x0071, 0x1F82,
+ 0x1FA7, 0x1FBB, 0x01BC, 0x03D9, 0x0306, 0x0081, 0x1F82,
+ 0x1FAB, 0x1FB4, 0x01A6, 0x03D0, 0x0317, 0x0092, 0x1F82,
+ 0x1FAF, 0x1FAD, 0x0190, 0x03C7, 0x0327, 0x00A3, 0x1F83,
+ 0x1FB3, 0x1FA7, 0x017A, 0x03BC, 0x0337, 0x00B5, 0x1F84,
+ 0x1FB8, 0x1FA1, 0x0165, 0x03B0, 0x0346, 0x00C7, 0x1F85,
+ 0x1FBC, 0x1F9C, 0x0150, 0x03A4, 0x0354, 0x00D9, 0x1F87,
+ 0x1FC0, 0x1F98, 0x013A, 0x0397, 0x0361, 0x00EC, 0x1F8A,
+ 0x1FC4, 0x1F93, 0x0126, 0x0389, 0x036F, 0x00FE, 0x1F8D,
+ 0x1F93, 0x010A, 0x0363, 0x0363, 0x010A, 0x1F93, 0x0000,
+ 0x1F8D, 0x00FE, 0x036F, 0x0389, 0x0126, 0x1F93, 0x1FC4,
+ 0x1F8A, 0x00EC, 0x0361, 0x0397, 0x013A, 0x1F98, 0x1FC0,
+ 0x1F87, 0x00D9, 0x0354, 0x03A4, 0x0150, 0x1F9C, 0x1FBC,
+ 0x1F85, 0x00C7, 0x0346, 0x03B0, 0x0165, 0x1FA1, 0x1FB8,
+ 0x1F84, 0x00B5, 0x0337, 0x03BC, 0x017A, 0x1FA7, 0x1FB3,
+ 0x1F83, 0x00A3, 0x0327, 0x03C7, 0x0190, 0x1FAD, 0x1FAF,
+ 0x1F82, 0x0092, 0x0317, 0x03D0, 0x01A6, 0x1FB4, 0x1FAB,
+ 0x1F82, 0x0081, 0x0306, 0x03D9, 0x01BC, 0x1FBB, 0x1FA7,
+ 0x1F82, 0x0071, 0x02F4, 0x03E0, 0x01D2, 0x1FC3, 0x1FA4,
+ 0x1F83, 0x0061, 0x02E1, 0x03E7, 0x01E8, 0x1FCC, 0x1FA0,
+ 0x1F83, 0x0052, 0x02CF, 0x03ED, 0x01FE, 0x1FD5, 0x1F9C,
+ 0x1F85, 0x0043, 0x02BB, 0x03F2, 0x0213, 0x1FDF, 0x1F99,
+ 0x1F86, 0x0034, 0x02A7, 0x03F6, 0x022A, 0x1FEA, 0x1F95,
+ 0x1F88, 0x0027, 0x0293, 0x03F8, 0x023F, 0x1FF5, 0x1F92,
+ 0x1F8A, 0x0019, 0x027F, 0x03FA, 0x0255, 0x0000, 0x1F8F,
+ },
+ [HS_LT_11_16_SCALE] = {
+ /* Luma */
+ 0x1F95, 0x1FB5, 0x0272, 0x0488, 0x0272, 0x1FB5, 0x1F95,
+ 0x1F9B, 0x1FAA, 0x0257, 0x0486, 0x028D, 0x1FC1, 0x1F90,
+ 0x1FA0, 0x1FA0, 0x023C, 0x0485, 0x02A8, 0x1FCD, 0x1F8A,
+ 0x1FA6, 0x1F96, 0x0221, 0x0481, 0x02C2, 0x1FDB, 0x1F85,
+ 0x1FAC, 0x1F8E, 0x0205, 0x047C, 0x02DC, 0x1FE9, 0x1F80,
+ 0x1FB1, 0x1F86, 0x01E9, 0x0476, 0x02F6, 0x1FF8, 0x1F7C,
+ 0x1FB7, 0x1F7F, 0x01CE, 0x046E, 0x030F, 0x0008, 0x1F77,
+ 0x1FBD, 0x1F79, 0x01B3, 0x0465, 0x0326, 0x0019, 0x1F73,
+ 0x1FC3, 0x1F73, 0x0197, 0x045B, 0x033E, 0x002A, 0x1F70,
+ 0x1FC8, 0x1F6F, 0x017D, 0x044E, 0x0355, 0x003C, 0x1F6D,
+ 0x1FCE, 0x1F6B, 0x0162, 0x0441, 0x036B, 0x004F, 0x1F6A,
+ 0x1FD3, 0x1F68, 0x0148, 0x0433, 0x0380, 0x0063, 0x1F67,
+ 0x1FD8, 0x1F65, 0x012E, 0x0424, 0x0395, 0x0077, 0x1F65,
+ 0x1FDE, 0x1F63, 0x0115, 0x0413, 0x03A8, 0x008B, 0x1F64,
+ 0x1FE3, 0x1F62, 0x00FC, 0x0403, 0x03BA, 0x00A0, 0x1F62,
+ 0x1FE7, 0x1F62, 0x00E4, 0x03EF, 0x03CC, 0x00B6, 0x1F62,
+ 0x1F63, 0x00CA, 0x03D3, 0x03D3, 0x00CA, 0x1F63, 0x0000,
+ 0x1F62, 0x00B6, 0x03CC, 0x03EF, 0x00E4, 0x1F62, 0x1FE7,
+ 0x1F62, 0x00A0, 0x03BA, 0x0403, 0x00FC, 0x1F62, 0x1FE3,
+ 0x1F64, 0x008B, 0x03A8, 0x0413, 0x0115, 0x1F63, 0x1FDE,
+ 0x1F65, 0x0077, 0x0395, 0x0424, 0x012E, 0x1F65, 0x1FD8,
+ 0x1F67, 0x0063, 0x0380, 0x0433, 0x0148, 0x1F68, 0x1FD3,
+ 0x1F6A, 0x004F, 0x036B, 0x0441, 0x0162, 0x1F6B, 0x1FCE,
+ 0x1F6D, 0x003C, 0x0355, 0x044E, 0x017D, 0x1F6F, 0x1FC8,
+ 0x1F70, 0x002A, 0x033E, 0x045B, 0x0197, 0x1F73, 0x1FC3,
+ 0x1F73, 0x0019, 0x0326, 0x0465, 0x01B3, 0x1F79, 0x1FBD,
+ 0x1F77, 0x0008, 0x030F, 0x046E, 0x01CE, 0x1F7F, 0x1FB7,
+ 0x1F7C, 0x1FF8, 0x02F6, 0x0476, 0x01E9, 0x1F86, 0x1FB1,
+ 0x1F80, 0x1FE9, 0x02DC, 0x047C, 0x0205, 0x1F8E, 0x1FAC,
+ 0x1F85, 0x1FDB, 0x02C2, 0x0481, 0x0221, 0x1F96, 0x1FA6,
+ 0x1F8A, 0x1FCD, 0x02A8, 0x0485, 0x023C, 0x1FA0, 0x1FA0,
+ 0x1F90, 0x1FC1, 0x028D, 0x0486, 0x0257, 0x1FAA, 0x1F9B,
+ /* Chroma */
+ 0x1F95, 0x1FB5, 0x0272, 0x0488, 0x0272, 0x1FB5, 0x1F95,
+ 0x1F9B, 0x1FAA, 0x0257, 0x0486, 0x028D, 0x1FC1, 0x1F90,
+ 0x1FA0, 0x1FA0, 0x023C, 0x0485, 0x02A8, 0x1FCD, 0x1F8A,
+ 0x1FA6, 0x1F96, 0x0221, 0x0481, 0x02C2, 0x1FDB, 0x1F85,
+ 0x1FAC, 0x1F8E, 0x0205, 0x047C, 0x02DC, 0x1FE9, 0x1F80,
+ 0x1FB1, 0x1F86, 0x01E9, 0x0476, 0x02F6, 0x1FF8, 0x1F7C,
+ 0x1FB7, 0x1F7F, 0x01CE, 0x046E, 0x030F, 0x0008, 0x1F77,
+ 0x1FBD, 0x1F79, 0x01B3, 0x0465, 0x0326, 0x0019, 0x1F73,
+ 0x1FC3, 0x1F73, 0x0197, 0x045B, 0x033E, 0x002A, 0x1F70,
+ 0x1FC8, 0x1F6F, 0x017D, 0x044E, 0x0355, 0x003C, 0x1F6D,
+ 0x1FCE, 0x1F6B, 0x0162, 0x0441, 0x036B, 0x004F, 0x1F6A,
+ 0x1FD3, 0x1F68, 0x0148, 0x0433, 0x0380, 0x0063, 0x1F67,
+ 0x1FD8, 0x1F65, 0x012E, 0x0424, 0x0395, 0x0077, 0x1F65,
+ 0x1FDE, 0x1F63, 0x0115, 0x0413, 0x03A8, 0x008B, 0x1F64,
+ 0x1FE3, 0x1F62, 0x00FC, 0x0403, 0x03BA, 0x00A0, 0x1F62,
+ 0x1FE7, 0x1F62, 0x00E4, 0x03EF, 0x03CC, 0x00B6, 0x1F62,
+ 0x1F63, 0x00CA, 0x03D3, 0x03D3, 0x00CA, 0x1F63, 0x0000,
+ 0x1F62, 0x00B6, 0x03CC, 0x03EF, 0x00E4, 0x1F62, 0x1FE7,
+ 0x1F62, 0x00A0, 0x03BA, 0x0403, 0x00FC, 0x1F62, 0x1FE3,
+ 0x1F64, 0x008B, 0x03A8, 0x0413, 0x0115, 0x1F63, 0x1FDE,
+ 0x1F65, 0x0077, 0x0395, 0x0424, 0x012E, 0x1F65, 0x1FD8,
+ 0x1F67, 0x0063, 0x0380, 0x0433, 0x0148, 0x1F68, 0x1FD3,
+ 0x1F6A, 0x004F, 0x036B, 0x0441, 0x0162, 0x1F6B, 0x1FCE,
+ 0x1F6D, 0x003C, 0x0355, 0x044E, 0x017D, 0x1F6F, 0x1FC8,
+ 0x1F70, 0x002A, 0x033E, 0x045B, 0x0197, 0x1F73, 0x1FC3,
+ 0x1F73, 0x0019, 0x0326, 0x0465, 0x01B3, 0x1F79, 0x1FBD,
+ 0x1F77, 0x0008, 0x030F, 0x046E, 0x01CE, 0x1F7F, 0x1FB7,
+ 0x1F7C, 0x1FF8, 0x02F6, 0x0476, 0x01E9, 0x1F86, 0x1FB1,
+ 0x1F80, 0x1FE9, 0x02DC, 0x047C, 0x0205, 0x1F8E, 0x1FAC,
+ 0x1F85, 0x1FDB, 0x02C2, 0x0481, 0x0221, 0x1F96, 0x1FA6,
+ 0x1F8A, 0x1FCD, 0x02A8, 0x0485, 0x023C, 0x1FA0, 0x1FA0,
+ 0x1F90, 0x1FC1, 0x028D, 0x0486, 0x0257, 0x1FAA, 0x1F9B,
+ },
+ [HS_LT_12_16_SCALE] = {
+ /* Luma */
+ 0x1FBB, 0x1F65, 0x025E, 0x0504, 0x025E, 0x1F65, 0x1FBB,
+ 0x1FC3, 0x1F5D, 0x023C, 0x0503, 0x027F, 0x1F6E, 0x1FB4,
+ 0x1FCA, 0x1F56, 0x021B, 0x0501, 0x02A0, 0x1F78, 0x1FAC,
+ 0x1FD1, 0x1F50, 0x01FA, 0x04FD, 0x02C0, 0x1F83, 0x1FA5,
+ 0x1FD8, 0x1F4B, 0x01D9, 0x04F6, 0x02E1, 0x1F90, 0x1F9D,
+ 0x1FDF, 0x1F47, 0x01B8, 0x04EF, 0x0301, 0x1F9D, 0x1F95,
+ 0x1FE6, 0x1F43, 0x0198, 0x04E5, 0x0321, 0x1FAB, 0x1F8E,
+ 0x1FEC, 0x1F41, 0x0178, 0x04DA, 0x0340, 0x1FBB, 0x1F86,
+ 0x1FF2, 0x1F40, 0x0159, 0x04CC, 0x035E, 0x1FCC, 0x1F7F,
+ 0x1FF8, 0x1F40, 0x013A, 0x04BE, 0x037B, 0x1FDD, 0x1F78,
+ 0x1FFE, 0x1F40, 0x011B, 0x04AD, 0x0398, 0x1FF0, 0x1F72,
+ 0x0003, 0x1F41, 0x00FD, 0x049C, 0x03B4, 0x0004, 0x1F6B,
+ 0x0008, 0x1F43, 0x00E0, 0x0489, 0x03CE, 0x0019, 0x1F65,
+ 0x000D, 0x1F46, 0x00C4, 0x0474, 0x03E8, 0x002E, 0x1F5F,
+ 0x0011, 0x1F49, 0x00A9, 0x045E, 0x0400, 0x0045, 0x1F5A,
+ 0x0015, 0x1F4D, 0x008E, 0x0447, 0x0418, 0x005C, 0x1F55,
+ 0x1F4F, 0x0076, 0x043B, 0x043B, 0x0076, 0x1F4F, 0x0000,
+ 0x1F55, 0x005C, 0x0418, 0x0447, 0x008E, 0x1F4D, 0x0015,
+ 0x1F5A, 0x0045, 0x0400, 0x045E, 0x00A9, 0x1F49, 0x0011,
+ 0x1F5F, 0x002E, 0x03E8, 0x0474, 0x00C4, 0x1F46, 0x000D,
+ 0x1F65, 0x0019, 0x03CE, 0x0489, 0x00E0, 0x1F43, 0x0008,
+ 0x1F6B, 0x0004, 0x03B4, 0x049C, 0x00FD, 0x1F41, 0x0003,
+ 0x1F72, 0x1FF0, 0x0398, 0x04AD, 0x011B, 0x1F40, 0x1FFE,
+ 0x1F78, 0x1FDD, 0x037B, 0x04BE, 0x013A, 0x1F40, 0x1FF8,
+ 0x1F7F, 0x1FCC, 0x035E, 0x04CC, 0x0159, 0x1F40, 0x1FF2,
+ 0x1F86, 0x1FBB, 0x0340, 0x04DA, 0x0178, 0x1F41, 0x1FEC,
+ 0x1F8E, 0x1FAB, 0x0321, 0x04E5, 0x0198, 0x1F43, 0x1FE6,
+ 0x1F95, 0x1F9D, 0x0301, 0x04EF, 0x01B8, 0x1F47, 0x1FDF,
+ 0x1F9D, 0x1F90, 0x02E1, 0x04F6, 0x01D9, 0x1F4B, 0x1FD8,
+ 0x1FA5, 0x1F83, 0x02C0, 0x04FD, 0x01FA, 0x1F50, 0x1FD1,
+ 0x1FAC, 0x1F78, 0x02A0, 0x0501, 0x021B, 0x1F56, 0x1FCA,
+ 0x1FB4, 0x1F6E, 0x027F, 0x0503, 0x023C, 0x1F5D, 0x1FC3,
+ /* Chroma */
+ 0x1FBB, 0x1F65, 0x025E, 0x0504, 0x025E, 0x1F65, 0x1FBB,
+ 0x1FC3, 0x1F5D, 0x023C, 0x0503, 0x027F, 0x1F6E, 0x1FB4,
+ 0x1FCA, 0x1F56, 0x021B, 0x0501, 0x02A0, 0x1F78, 0x1FAC,
+ 0x1FD1, 0x1F50, 0x01FA, 0x04FD, 0x02C0, 0x1F83, 0x1FA5,
+ 0x1FD8, 0x1F4B, 0x01D9, 0x04F6, 0x02E1, 0x1F90, 0x1F9D,
+ 0x1FDF, 0x1F47, 0x01B8, 0x04EF, 0x0301, 0x1F9D, 0x1F95,
+ 0x1FE6, 0x1F43, 0x0198, 0x04E5, 0x0321, 0x1FAB, 0x1F8E,
+ 0x1FEC, 0x1F41, 0x0178, 0x04DA, 0x0340, 0x1FBB, 0x1F86,
+ 0x1FF2, 0x1F40, 0x0159, 0x04CC, 0x035E, 0x1FCC, 0x1F7F,
+ 0x1FF8, 0x1F40, 0x013A, 0x04BE, 0x037B, 0x1FDD, 0x1F78,
+ 0x1FFE, 0x1F40, 0x011B, 0x04AD, 0x0398, 0x1FF0, 0x1F72,
+ 0x0003, 0x1F41, 0x00FD, 0x049C, 0x03B4, 0x0004, 0x1F6B,
+ 0x0008, 0x1F43, 0x00E0, 0x0489, 0x03CE, 0x0019, 0x1F65,
+ 0x000D, 0x1F46, 0x00C4, 0x0474, 0x03E8, 0x002E, 0x1F5F,
+ 0x0011, 0x1F49, 0x00A9, 0x045E, 0x0400, 0x0045, 0x1F5A,
+ 0x0015, 0x1F4D, 0x008E, 0x0447, 0x0418, 0x005C, 0x1F55,
+ 0x1F4F, 0x0076, 0x043B, 0x043B, 0x0076, 0x1F4F, 0x0000,
+ 0x1F55, 0x005C, 0x0418, 0x0447, 0x008E, 0x1F4D, 0x0015,
+ 0x1F5A, 0x0045, 0x0400, 0x045E, 0x00A9, 0x1F49, 0x0011,
+ 0x1F5F, 0x002E, 0x03E8, 0x0474, 0x00C4, 0x1F46, 0x000D,
+ 0x1F65, 0x0019, 0x03CE, 0x0489, 0x00E0, 0x1F43, 0x0008,
+ 0x1F6B, 0x0004, 0x03B4, 0x049C, 0x00FD, 0x1F41, 0x0003,
+ 0x1F72, 0x1FF0, 0x0398, 0x04AD, 0x011B, 0x1F40, 0x1FFE,
+ 0x1F78, 0x1FDD, 0x037B, 0x04BE, 0x013A, 0x1F40, 0x1FF8,
+ 0x1F7F, 0x1FCC, 0x035E, 0x04CC, 0x0159, 0x1F40, 0x1FF2,
+ 0x1F86, 0x1FBB, 0x0340, 0x04DA, 0x0178, 0x1F41, 0x1FEC,
+ 0x1F8E, 0x1FAB, 0x0321, 0x04E5, 0x0198, 0x1F43, 0x1FE6,
+ 0x1F95, 0x1F9D, 0x0301, 0x04EF, 0x01B8, 0x1F47, 0x1FDF,
+ 0x1F9D, 0x1F90, 0x02E1, 0x04F6, 0x01D9, 0x1F4B, 0x1FD8,
+ 0x1FA5, 0x1F83, 0x02C0, 0x04FD, 0x01FA, 0x1F50, 0x1FD1,
+ 0x1FAC, 0x1F78, 0x02A0, 0x0501, 0x021B, 0x1F56, 0x1FCA,
+ 0x1FB4, 0x1F6E, 0x027F, 0x0503, 0x023C, 0x1F5D, 0x1FC3,
+ },
+ [HS_LT_13_16_SCALE] = {
+ /* Luma */
+ 0x1FF4, 0x1F29, 0x022D, 0x056C, 0x022D, 0x1F29, 0x1FF4,
+ 0x1FFC, 0x1F26, 0x0206, 0x056A, 0x0254, 0x1F2E, 0x1FEC,
+ 0x0003, 0x1F24, 0x01E0, 0x0567, 0x027A, 0x1F34, 0x1FE4,
+ 0x000A, 0x1F23, 0x01BA, 0x0561, 0x02A2, 0x1F3B, 0x1FDB,
+ 0x0011, 0x1F22, 0x0194, 0x055B, 0x02C9, 0x1F43, 0x1FD2,
+ 0x0017, 0x1F23, 0x016F, 0x0551, 0x02F0, 0x1F4D, 0x1FC9,
+ 0x001D, 0x1F25, 0x014B, 0x0545, 0x0316, 0x1F58, 0x1FC0,
+ 0x0022, 0x1F28, 0x0127, 0x0538, 0x033C, 0x1F65, 0x1FB6,
+ 0x0027, 0x1F2C, 0x0104, 0x0528, 0x0361, 0x1F73, 0x1FAD,
+ 0x002B, 0x1F30, 0x00E2, 0x0518, 0x0386, 0x1F82, 0x1FA3,
+ 0x002F, 0x1F36, 0x00C2, 0x0504, 0x03AA, 0x1F92, 0x1F99,
+ 0x0032, 0x1F3C, 0x00A2, 0x04EF, 0x03CD, 0x1FA4, 0x1F90,
+ 0x0035, 0x1F42, 0x0083, 0x04D9, 0x03EF, 0x1FB8, 0x1F86,
+ 0x0038, 0x1F49, 0x0065, 0x04C0, 0x0410, 0x1FCD, 0x1F7D,
+ 0x003A, 0x1F51, 0x0048, 0x04A6, 0x0431, 0x1FE3, 0x1F73,
+ 0x003C, 0x1F59, 0x002D, 0x048A, 0x0450, 0x1FFA, 0x1F6A,
+ 0x1F5D, 0x0014, 0x048F, 0x048F, 0x0014, 0x1F5D, 0x0000,
+ 0x1F6A, 0x1FFA, 0x0450, 0x048A, 0x002D, 0x1F59, 0x003C,
+ 0x1F73, 0x1FE3, 0x0431, 0x04A6, 0x0048, 0x1F51, 0x003A,
+ 0x1F7D, 0x1FCD, 0x0410, 0x04C0, 0x0065, 0x1F49, 0x0038,
+ 0x1F86, 0x1FB8, 0x03EF, 0x04D9, 0x0083, 0x1F42, 0x0035,
+ 0x1F90, 0x1FA4, 0x03CD, 0x04EF, 0x00A2, 0x1F3C, 0x0032,
+ 0x1F99, 0x1F92, 0x03AA, 0x0504, 0x00C2, 0x1F36, 0x002F,
+ 0x1FA3, 0x1F82, 0x0386, 0x0518, 0x00E2, 0x1F30, 0x002B,
+ 0x1FAD, 0x1F73, 0x0361, 0x0528, 0x0104, 0x1F2C, 0x0027,
+ 0x1FB6, 0x1F65, 0x033C, 0x0538, 0x0127, 0x1F28, 0x0022,
+ 0x1FC0, 0x1F58, 0x0316, 0x0545, 0x014B, 0x1F25, 0x001D,
+ 0x1FC9, 0x1F4D, 0x02F0, 0x0551, 0x016F, 0x1F23, 0x0017,
+ 0x1FD2, 0x1F43, 0x02C9, 0x055B, 0x0194, 0x1F22, 0x0011,
+ 0x1FDB, 0x1F3B, 0x02A2, 0x0561, 0x01BA, 0x1F23, 0x000A,
+ 0x1FE4, 0x1F34, 0x027A, 0x0567, 0x01E0, 0x1F24, 0x0003,
+ 0x1FEC, 0x1F2E, 0x0254, 0x056A, 0x0206, 0x1F26, 0x1FFC,
+ /* Chroma */
+ 0x1FF4, 0x1F29, 0x022D, 0x056C, 0x022D, 0x1F29, 0x1FF4,
+ 0x1FFC, 0x1F26, 0x0206, 0x056A, 0x0254, 0x1F2E, 0x1FEC,
+ 0x0003, 0x1F24, 0x01E0, 0x0567, 0x027A, 0x1F34, 0x1FE4,
+ 0x000A, 0x1F23, 0x01BA, 0x0561, 0x02A2, 0x1F3B, 0x1FDB,
+ 0x0011, 0x1F22, 0x0194, 0x055B, 0x02C9, 0x1F43, 0x1FD2,
+ 0x0017, 0x1F23, 0x016F, 0x0551, 0x02F0, 0x1F4D, 0x1FC9,
+ 0x001D, 0x1F25, 0x014B, 0x0545, 0x0316, 0x1F58, 0x1FC0,
+ 0x0022, 0x1F28, 0x0127, 0x0538, 0x033C, 0x1F65, 0x1FB6,
+ 0x0027, 0x1F2C, 0x0104, 0x0528, 0x0361, 0x1F73, 0x1FAD,
+ 0x002B, 0x1F30, 0x00E2, 0x0518, 0x0386, 0x1F82, 0x1FA3,
+ 0x002F, 0x1F36, 0x00C2, 0x0504, 0x03AA, 0x1F92, 0x1F99,
+ 0x0032, 0x1F3C, 0x00A2, 0x04EF, 0x03CD, 0x1FA4, 0x1F90,
+ 0x0035, 0x1F42, 0x0083, 0x04D9, 0x03EF, 0x1FB8, 0x1F86,
+ 0x0038, 0x1F49, 0x0065, 0x04C0, 0x0410, 0x1FCD, 0x1F7D,
+ 0x003A, 0x1F51, 0x0048, 0x04A6, 0x0431, 0x1FE3, 0x1F73,
+ 0x003C, 0x1F59, 0x002D, 0x048A, 0x0450, 0x1FFA, 0x1F6A,
+ 0x1F5D, 0x0014, 0x048F, 0x048F, 0x0014, 0x1F5D, 0x0000,
+ 0x1F6A, 0x1FFA, 0x0450, 0x048A, 0x002D, 0x1F59, 0x003C,
+ 0x1F73, 0x1FE3, 0x0431, 0x04A6, 0x0048, 0x1F51, 0x003A,
+ 0x1F7D, 0x1FCD, 0x0410, 0x04C0, 0x0065, 0x1F49, 0x0038,
+ 0x1F86, 0x1FB8, 0x03EF, 0x04D9, 0x0083, 0x1F42, 0x0035,
+ 0x1F90, 0x1FA4, 0x03CD, 0x04EF, 0x00A2, 0x1F3C, 0x0032,
+ 0x1F99, 0x1F92, 0x03AA, 0x0504, 0x00C2, 0x1F36, 0x002F,
+ 0x1FA3, 0x1F82, 0x0386, 0x0518, 0x00E2, 0x1F30, 0x002B,
+ 0x1FAD, 0x1F73, 0x0361, 0x0528, 0x0104, 0x1F2C, 0x0027,
+ 0x1FB6, 0x1F65, 0x033C, 0x0538, 0x0127, 0x1F28, 0x0022,
+ 0x1FC0, 0x1F58, 0x0316, 0x0545, 0x014B, 0x1F25, 0x001D,
+ 0x1FC9, 0x1F4D, 0x02F0, 0x0551, 0x016F, 0x1F23, 0x0017,
+ 0x1FD2, 0x1F43, 0x02C9, 0x055B, 0x0194, 0x1F22, 0x0011,
+ 0x1FDB, 0x1F3B, 0x02A2, 0x0561, 0x01BA, 0x1F23, 0x000A,
+ 0x1FE4, 0x1F34, 0x027A, 0x0567, 0x01E0, 0x1F24, 0x0003,
+ 0x1FEC, 0x1F2E, 0x0254, 0x056A, 0x0206, 0x1F26, 0x1FFC,
+ },
+ [HS_LT_14_16_SCALE] = {
+ /* Luma */
+ 0x002F, 0x1F0B, 0x01E7, 0x05BE, 0x01E7, 0x1F0B, 0x002F,
+ 0x0035, 0x1F0D, 0x01BC, 0x05BD, 0x0213, 0x1F0A, 0x0028,
+ 0x003A, 0x1F11, 0x0191, 0x05BA, 0x023F, 0x1F0A, 0x0021,
+ 0x003F, 0x1F15, 0x0167, 0x05B3, 0x026C, 0x1F0C, 0x001A,
+ 0x0043, 0x1F1B, 0x013E, 0x05AA, 0x0299, 0x1F0F, 0x0012,
+ 0x0046, 0x1F21, 0x0116, 0x05A1, 0x02C6, 0x1F13, 0x0009,
+ 0x0049, 0x1F28, 0x00EF, 0x0593, 0x02F4, 0x1F19, 0x0000,
+ 0x004C, 0x1F30, 0x00C9, 0x0584, 0x0321, 0x1F20, 0x1FF6,
+ 0x004E, 0x1F39, 0x00A4, 0x0572, 0x034D, 0x1F2A, 0x1FEC,
+ 0x004F, 0x1F43, 0x0080, 0x055E, 0x037A, 0x1F34, 0x1FE2,
+ 0x0050, 0x1F4D, 0x005E, 0x0548, 0x03A5, 0x1F41, 0x1FD7,
+ 0x0050, 0x1F57, 0x003D, 0x0531, 0x03D1, 0x1F4F, 0x1FCB,
+ 0x0050, 0x1F62, 0x001E, 0x0516, 0x03FB, 0x1F5F, 0x1FC0,
+ 0x004F, 0x1F6D, 0x0000, 0x04FA, 0x0425, 0x1F71, 0x1FB4,
+ 0x004E, 0x1F79, 0x1FE4, 0x04DC, 0x044D, 0x1F84, 0x1FA8,
+ 0x004D, 0x1F84, 0x1FCA, 0x04BC, 0x0474, 0x1F99, 0x1F9C,
+ 0x1F8C, 0x1FAE, 0x04C6, 0x04C6, 0x1FAE, 0x1F8C, 0x0000,
+ 0x1F9C, 0x1F99, 0x0474, 0x04BC, 0x1FCA, 0x1F84, 0x004D,
+ 0x1FA8, 0x1F84, 0x044D, 0x04DC, 0x1FE4, 0x1F79, 0x004E,
+ 0x1FB4, 0x1F71, 0x0425, 0x04FA, 0x0000, 0x1F6D, 0x004F,
+ 0x1FC0, 0x1F5F, 0x03FB, 0x0516, 0x001E, 0x1F62, 0x0050,
+ 0x1FCB, 0x1F4F, 0x03D1, 0x0531, 0x003D, 0x1F57, 0x0050,
+ 0x1FD7, 0x1F41, 0x03A5, 0x0548, 0x005E, 0x1F4D, 0x0050,
+ 0x1FE2, 0x1F34, 0x037A, 0x055E, 0x0080, 0x1F43, 0x004F,
+ 0x1FEC, 0x1F2A, 0x034D, 0x0572, 0x00A4, 0x1F39, 0x004E,
+ 0x1FF6, 0x1F20, 0x0321, 0x0584, 0x00C9, 0x1F30, 0x004C,
+ 0x0000, 0x1F19, 0x02F4, 0x0593, 0x00EF, 0x1F28, 0x0049,
+ 0x0009, 0x1F13, 0x02C6, 0x05A1, 0x0116, 0x1F21, 0x0046,
+ 0x0012, 0x1F0F, 0x0299, 0x05AA, 0x013E, 0x1F1B, 0x0043,
+ 0x001A, 0x1F0C, 0x026C, 0x05B3, 0x0167, 0x1F15, 0x003F,
+ 0x0021, 0x1F0A, 0x023F, 0x05BA, 0x0191, 0x1F11, 0x003A,
+ 0x0028, 0x1F0A, 0x0213, 0x05BD, 0x01BC, 0x1F0D, 0x0035,
+ /* Chroma */
+ 0x002F, 0x1F0B, 0x01E7, 0x05BE, 0x01E7, 0x1F0B, 0x002F,
+ 0x0035, 0x1F0D, 0x01BC, 0x05BD, 0x0213, 0x1F0A, 0x0028,
+ 0x003A, 0x1F11, 0x0191, 0x05BA, 0x023F, 0x1F0A, 0x0021,
+ 0x003F, 0x1F15, 0x0167, 0x05B3, 0x026C, 0x1F0C, 0x001A,
+ 0x0043, 0x1F1B, 0x013E, 0x05AA, 0x0299, 0x1F0F, 0x0012,
+ 0x0046, 0x1F21, 0x0116, 0x05A1, 0x02C6, 0x1F13, 0x0009,
+ 0x0049, 0x1F28, 0x00EF, 0x0593, 0x02F4, 0x1F19, 0x0000,
+ 0x004C, 0x1F30, 0x00C9, 0x0584, 0x0321, 0x1F20, 0x1FF6,
+ 0x004E, 0x1F39, 0x00A4, 0x0572, 0x034D, 0x1F2A, 0x1FEC,
+ 0x004F, 0x1F43, 0x0080, 0x055E, 0x037A, 0x1F34, 0x1FE2,
+ 0x0050, 0x1F4D, 0x005E, 0x0548, 0x03A5, 0x1F41, 0x1FD7,
+ 0x0050, 0x1F57, 0x003D, 0x0531, 0x03D1, 0x1F4F, 0x1FCB,
+ 0x0050, 0x1F62, 0x001E, 0x0516, 0x03FB, 0x1F5F, 0x1FC0,
+ 0x004F, 0x1F6D, 0x0000, 0x04FA, 0x0425, 0x1F71, 0x1FB4,
+ 0x004E, 0x1F79, 0x1FE4, 0x04DC, 0x044D, 0x1F84, 0x1FA8,
+ 0x004D, 0x1F84, 0x1FCA, 0x04BC, 0x0474, 0x1F99, 0x1F9C,
+ 0x1F8C, 0x1FAE, 0x04C6, 0x04C6, 0x1FAE, 0x1F8C, 0x0000,
+ 0x1F9C, 0x1F99, 0x0474, 0x04BC, 0x1FCA, 0x1F84, 0x004D,
+ 0x1FA8, 0x1F84, 0x044D, 0x04DC, 0x1FE4, 0x1F79, 0x004E,
+ 0x1FB4, 0x1F71, 0x0425, 0x04FA, 0x0000, 0x1F6D, 0x004F,
+ 0x1FC0, 0x1F5F, 0x03FB, 0x0516, 0x001E, 0x1F62, 0x0050,
+ 0x1FCB, 0x1F4F, 0x03D1, 0x0531, 0x003D, 0x1F57, 0x0050,
+ 0x1FD7, 0x1F41, 0x03A5, 0x0548, 0x005E, 0x1F4D, 0x0050,
+ 0x1FE2, 0x1F34, 0x037A, 0x055E, 0x0080, 0x1F43, 0x004F,
+ 0x1FEC, 0x1F2A, 0x034D, 0x0572, 0x00A4, 0x1F39, 0x004E,
+ 0x1FF6, 0x1F20, 0x0321, 0x0584, 0x00C9, 0x1F30, 0x004C,
+ 0x0000, 0x1F19, 0x02F4, 0x0593, 0x00EF, 0x1F28, 0x0049,
+ 0x0009, 0x1F13, 0x02C6, 0x05A1, 0x0116, 0x1F21, 0x0046,
+ 0x0012, 0x1F0F, 0x0299, 0x05AA, 0x013E, 0x1F1B, 0x0043,
+ 0x001A, 0x1F0C, 0x026C, 0x05B3, 0x0167, 0x1F15, 0x003F,
+ 0x0021, 0x1F0A, 0x023F, 0x05BA, 0x0191, 0x1F11, 0x003A,
+ 0x0028, 0x1F0A, 0x0213, 0x05BD, 0x01BC, 0x1F0D, 0x0035,
+ },
+ [HS_LT_15_16_SCALE] = {
+ /* Luma */
+ 0x005B, 0x1F0A, 0x0195, 0x060C, 0x0195, 0x1F0A, 0x005B,
+ 0x005D, 0x1F13, 0x0166, 0x0609, 0x01C6, 0x1F03, 0x0058,
+ 0x005F, 0x1F1C, 0x0138, 0x0605, 0x01F7, 0x1EFD, 0x0054,
+ 0x0060, 0x1F26, 0x010B, 0x05FF, 0x0229, 0x1EF8, 0x004F,
+ 0x0060, 0x1F31, 0x00DF, 0x05F5, 0x025C, 0x1EF5, 0x004A,
+ 0x0060, 0x1F3D, 0x00B5, 0x05E8, 0x028F, 0x1EF3, 0x0044,
+ 0x005F, 0x1F49, 0x008C, 0x05DA, 0x02C3, 0x1EF2, 0x003D,
+ 0x005E, 0x1F56, 0x0065, 0x05C7, 0x02F6, 0x1EF4, 0x0036,
+ 0x005C, 0x1F63, 0x003F, 0x05B3, 0x032B, 0x1EF7, 0x002D,
+ 0x0059, 0x1F71, 0x001B, 0x059D, 0x035F, 0x1EFB, 0x0024,
+ 0x0057, 0x1F7F, 0x1FF9, 0x0583, 0x0392, 0x1F02, 0x001A,
+ 0x0053, 0x1F8D, 0x1FD9, 0x0567, 0x03C5, 0x1F0B, 0x0010,
+ 0x0050, 0x1F9B, 0x1FBB, 0x0548, 0x03F8, 0x1F15, 0x0005,
+ 0x004C, 0x1FA9, 0x1F9E, 0x0528, 0x042A, 0x1F22, 0x1FF9,
+ 0x0048, 0x1FB7, 0x1F84, 0x0505, 0x045A, 0x1F31, 0x1FED,
+ 0x0043, 0x1FC5, 0x1F6C, 0x04E0, 0x048A, 0x1F42, 0x1FE0,
+ 0x1FD1, 0x1F50, 0x04DF, 0x04DF, 0x1F50, 0x1FD1, 0x0000,
+ 0x1FE0, 0x1F42, 0x048A, 0x04E0, 0x1F6C, 0x1FC5, 0x0043,
+ 0x1FED, 0x1F31, 0x045A, 0x0505, 0x1F84, 0x1FB7, 0x0048,
+ 0x1FF9, 0x1F22, 0x042A, 0x0528, 0x1F9E, 0x1FA9, 0x004C,
+ 0x0005, 0x1F15, 0x03F8, 0x0548, 0x1FBB, 0x1F9B, 0x0050,
+ 0x0010, 0x1F0B, 0x03C5, 0x0567, 0x1FD9, 0x1F8D, 0x0053,
+ 0x001A, 0x1F02, 0x0392, 0x0583, 0x1FF9, 0x1F7F, 0x0057,
+ 0x0024, 0x1EFB, 0x035F, 0x059D, 0x001B, 0x1F71, 0x0059,
+ 0x002D, 0x1EF7, 0x032B, 0x05B3, 0x003F, 0x1F63, 0x005C,
+ 0x0036, 0x1EF4, 0x02F6, 0x05C7, 0x0065, 0x1F56, 0x005E,
+ 0x003D, 0x1EF2, 0x02C3, 0x05DA, 0x008C, 0x1F49, 0x005F,
+ 0x0044, 0x1EF3, 0x028F, 0x05E8, 0x00B5, 0x1F3D, 0x0060,
+ 0x004A, 0x1EF5, 0x025C, 0x05F5, 0x00DF, 0x1F31, 0x0060,
+ 0x004F, 0x1EF8, 0x0229, 0x05FF, 0x010B, 0x1F26, 0x0060,
+ 0x0054, 0x1EFD, 0x01F7, 0x0605, 0x0138, 0x1F1C, 0x005F,
+ 0x0058, 0x1F03, 0x01C6, 0x0609, 0x0166, 0x1F13, 0x005D,
+ /* Chroma */
+ 0x005B, 0x1F0A, 0x0195, 0x060C, 0x0195, 0x1F0A, 0x005B,
+ 0x005D, 0x1F13, 0x0166, 0x0609, 0x01C6, 0x1F03, 0x0058,
+ 0x005F, 0x1F1C, 0x0138, 0x0605, 0x01F7, 0x1EFD, 0x0054,
+ 0x0060, 0x1F26, 0x010B, 0x05FF, 0x0229, 0x1EF8, 0x004F,
+ 0x0060, 0x1F31, 0x00DF, 0x05F5, 0x025C, 0x1EF5, 0x004A,
+ 0x0060, 0x1F3D, 0x00B5, 0x05E8, 0x028F, 0x1EF3, 0x0044,
+ 0x005F, 0x1F49, 0x008C, 0x05DA, 0x02C3, 0x1EF2, 0x003D,
+ 0x005E, 0x1F56, 0x0065, 0x05C7, 0x02F6, 0x1EF4, 0x0036,
+ 0x005C, 0x1F63, 0x003F, 0x05B3, 0x032B, 0x1EF7, 0x002D,
+ 0x0059, 0x1F71, 0x001B, 0x059D, 0x035F, 0x1EFB, 0x0024,
+ 0x0057, 0x1F7F, 0x1FF9, 0x0583, 0x0392, 0x1F02, 0x001A,
+ 0x0053, 0x1F8D, 0x1FD9, 0x0567, 0x03C5, 0x1F0B, 0x0010,
+ 0x0050, 0x1F9B, 0x1FBB, 0x0548, 0x03F8, 0x1F15, 0x0005,
+ 0x004C, 0x1FA9, 0x1F9E, 0x0528, 0x042A, 0x1F22, 0x1FF9,
+ 0x0048, 0x1FB7, 0x1F84, 0x0505, 0x045A, 0x1F31, 0x1FED,
+ 0x0043, 0x1FC5, 0x1F6C, 0x04E0, 0x048A, 0x1F42, 0x1FE0,
+ 0x1FD1, 0x1F50, 0x04DF, 0x04DF, 0x1F50, 0x1FD1, 0x0000,
+ 0x1FE0, 0x1F42, 0x048A, 0x04E0, 0x1F6C, 0x1FC5, 0x0043,
+ 0x1FED, 0x1F31, 0x045A, 0x0505, 0x1F84, 0x1FB7, 0x0048,
+ 0x1FF9, 0x1F22, 0x042A, 0x0528, 0x1F9E, 0x1FA9, 0x004C,
+ 0x0005, 0x1F15, 0x03F8, 0x0548, 0x1FBB, 0x1F9B, 0x0050,
+ 0x0010, 0x1F0B, 0x03C5, 0x0567, 0x1FD9, 0x1F8D, 0x0053,
+ 0x001A, 0x1F02, 0x0392, 0x0583, 0x1FF9, 0x1F7F, 0x0057,
+ 0x0024, 0x1EFB, 0x035F, 0x059D, 0x001B, 0x1F71, 0x0059,
+ 0x002D, 0x1EF7, 0x032B, 0x05B3, 0x003F, 0x1F63, 0x005C,
+ 0x0036, 0x1EF4, 0x02F6, 0x05C7, 0x0065, 0x1F56, 0x005E,
+ 0x003D, 0x1EF2, 0x02C3, 0x05DA, 0x008C, 0x1F49, 0x005F,
+ 0x0044, 0x1EF3, 0x028F, 0x05E8, 0x00B5, 0x1F3D, 0x0060,
+ 0x004A, 0x1EF5, 0x025C, 0x05F5, 0x00DF, 0x1F31, 0x0060,
+ 0x004F, 0x1EF8, 0x0229, 0x05FF, 0x010B, 0x1F26, 0x0060,
+ 0x0054, 0x1EFD, 0x01F7, 0x0605, 0x0138, 0x1F1C, 0x005F,
+ 0x0058, 0x1F03, 0x01C6, 0x0609, 0x0166, 0x1F13, 0x005D,
+ },
+ [HS_LE_16_16_SCALE] = {
+ /* Luma */
+ 0x006E, 0x1F24, 0x013E, 0x0660, 0x013E, 0x1F24, 0x006E,
+ 0x006C, 0x1F33, 0x010B, 0x065D, 0x0172, 0x1F17, 0x0070,
+ 0x0069, 0x1F41, 0x00DA, 0x0659, 0x01A8, 0x1F0B, 0x0070,
+ 0x0066, 0x1F51, 0x00AA, 0x0650, 0x01DF, 0x1F00, 0x0070,
+ 0x0062, 0x1F61, 0x007D, 0x0644, 0x0217, 0x1EF6, 0x006F,
+ 0x005E, 0x1F71, 0x0051, 0x0636, 0x0250, 0x1EED, 0x006D,
+ 0x0059, 0x1F81, 0x0028, 0x0624, 0x028A, 0x1EE5, 0x006B,
+ 0x0054, 0x1F91, 0x0000, 0x060F, 0x02C5, 0x1EE0, 0x0067,
+ 0x004E, 0x1FA2, 0x1FDB, 0x05F6, 0x0300, 0x1EDC, 0x0063,
+ 0x0049, 0x1FB2, 0x1FB8, 0x05DB, 0x033B, 0x1EDA, 0x005D,
+ 0x0043, 0x1FC3, 0x1F98, 0x05BC, 0x0376, 0x1ED9, 0x0057,
+ 0x003D, 0x1FD3, 0x1F7A, 0x059B, 0x03B1, 0x1EDB, 0x004F,
+ 0x0036, 0x1FE2, 0x1F5E, 0x0578, 0x03EC, 0x1EDF, 0x0047,
+ 0x0030, 0x1FF1, 0x1F45, 0x0551, 0x0426, 0x1EE6, 0x003D,
+ 0x002A, 0x0000, 0x1F2E, 0x0528, 0x045F, 0x1EEE, 0x0033,
+ 0x0023, 0x000E, 0x1F19, 0x04FD, 0x0498, 0x1EFA, 0x0027,
+ 0x001B, 0x1F04, 0x04E1, 0x04E1, 0x1F04, 0x001B, 0x0000,
+ 0x0027, 0x1EFA, 0x0498, 0x04FD, 0x1F19, 0x000E, 0x0023,
+ 0x0033, 0x1EEE, 0x045F, 0x0528, 0x1F2E, 0x0000, 0x002A,
+ 0x003D, 0x1EE6, 0x0426, 0x0551, 0x1F45, 0x1FF1, 0x0030,
+ 0x0047, 0x1EDF, 0x03EC, 0x0578, 0x1F5E, 0x1FE2, 0x0036,
+ 0x004F, 0x1EDB, 0x03B1, 0x059B, 0x1F7A, 0x1FD3, 0x003D,
+ 0x0057, 0x1ED9, 0x0376, 0x05BC, 0x1F98, 0x1FC3, 0x0043,
+ 0x005D, 0x1EDA, 0x033B, 0x05DB, 0x1FB8, 0x1FB2, 0x0049,
+ 0x0063, 0x1EDC, 0x0300, 0x05F6, 0x1FDB, 0x1FA2, 0x004E,
+ 0x0067, 0x1EE0, 0x02C5, 0x060F, 0x0000, 0x1F91, 0x0054,
+ 0x006B, 0x1EE5, 0x028A, 0x0624, 0x0028, 0x1F81, 0x0059,
+ 0x006D, 0x1EED, 0x0250, 0x0636, 0x0051, 0x1F71, 0x005E,
+ 0x006F, 0x1EF6, 0x0217, 0x0644, 0x007D, 0x1F61, 0x0062,
+ 0x0070, 0x1F00, 0x01DF, 0x0650, 0x00AA, 0x1F51, 0x0066,
+ 0x0070, 0x1F0B, 0x01A8, 0x0659, 0x00DA, 0x1F41, 0x0069,
+ 0x0070, 0x1F17, 0x0172, 0x065D, 0x010B, 0x1F33, 0x006C,
+ /* Chroma */
+ 0x006E, 0x1F24, 0x013E, 0x0660, 0x013E, 0x1F24, 0x006E,
+ 0x006C, 0x1F33, 0x010B, 0x065D, 0x0172, 0x1F17, 0x0070,
+ 0x0069, 0x1F41, 0x00DA, 0x0659, 0x01A8, 0x1F0B, 0x0070,
+ 0x0066, 0x1F51, 0x00AA, 0x0650, 0x01DF, 0x1F00, 0x0070,
+ 0x0062, 0x1F61, 0x007D, 0x0644, 0x0217, 0x1EF6, 0x006F,
+ 0x005E, 0x1F71, 0x0051, 0x0636, 0x0250, 0x1EED, 0x006D,
+ 0x0059, 0x1F81, 0x0028, 0x0624, 0x028A, 0x1EE5, 0x006B,
+ 0x0054, 0x1F91, 0x0000, 0x060F, 0x02C5, 0x1EE0, 0x0067,
+ 0x004E, 0x1FA2, 0x1FDB, 0x05F6, 0x0300, 0x1EDC, 0x0063,
+ 0x0049, 0x1FB2, 0x1FB8, 0x05DB, 0x033B, 0x1EDA, 0x005D,
+ 0x0043, 0x1FC3, 0x1F98, 0x05BC, 0x0376, 0x1ED9, 0x0057,
+ 0x003D, 0x1FD3, 0x1F7A, 0x059B, 0x03B1, 0x1EDB, 0x004F,
+ 0x0036, 0x1FE2, 0x1F5E, 0x0578, 0x03EC, 0x1EDF, 0x0047,
+ 0x0030, 0x1FF1, 0x1F45, 0x0551, 0x0426, 0x1EE6, 0x003D,
+ 0x002A, 0x0000, 0x1F2E, 0x0528, 0x045F, 0x1EEE, 0x0033,
+ 0x0023, 0x000E, 0x1F19, 0x04FD, 0x0498, 0x1EFA, 0x0027,
+ 0x001B, 0x1F04, 0x04E1, 0x04E1, 0x1F04, 0x001B, 0x0000,
+ 0x0027, 0x1EFA, 0x0498, 0x04FD, 0x1F19, 0x000E, 0x0023,
+ 0x0033, 0x1EEE, 0x045F, 0x0528, 0x1F2E, 0x0000, 0x002A,
+ 0x003D, 0x1EE6, 0x0426, 0x0551, 0x1F45, 0x1FF1, 0x0030,
+ 0x0047, 0x1EDF, 0x03EC, 0x0578, 0x1F5E, 0x1FE2, 0x0036,
+ 0x004F, 0x1EDB, 0x03B1, 0x059B, 0x1F7A, 0x1FD3, 0x003D,
+ 0x0057, 0x1ED9, 0x0376, 0x05BC, 0x1F98, 0x1FC3, 0x0043,
+ 0x005D, 0x1EDA, 0x033B, 0x05DB, 0x1FB8, 0x1FB2, 0x0049,
+ 0x0063, 0x1EDC, 0x0300, 0x05F6, 0x1FDB, 0x1FA2, 0x004E,
+ 0x0067, 0x1EE0, 0x02C5, 0x060F, 0x0000, 0x1F91, 0x0054,
+ 0x006B, 0x1EE5, 0x028A, 0x0624, 0x0028, 0x1F81, 0x0059,
+ 0x006D, 0x1EED, 0x0250, 0x0636, 0x0051, 0x1F71, 0x005E,
+ 0x006F, 0x1EF6, 0x0217, 0x0644, 0x007D, 0x1F61, 0x0062,
+ 0x0070, 0x1F00, 0x01DF, 0x0650, 0x00AA, 0x1F51, 0x0066,
+ 0x0070, 0x1F0B, 0x01A8, 0x0659, 0x00DA, 0x1F41, 0x0069,
+ 0x0070, 0x1F17, 0x0172, 0x065D, 0x010B, 0x1F33, 0x006C,
+ },
+};
+
+/* vertical scaler coefficients */
+enum {
+ VS_UP_SCALE = 0,
+ VS_LT_9_16_SCALE,
+ VS_LT_10_16_SCALE,
+ VS_LT_11_16_SCALE,
+ VS_LT_12_16_SCALE,
+ VS_LT_13_16_SCALE,
+ VS_LT_14_16_SCALE,
+ VS_LT_15_16_SCALE,
+ VS_LT_16_16_SCALE,
+ VS_1_TO_1_SCALE,
+};
+
+static const u16 scaler_vs_coeffs[15][SC_NUM_PHASES * 2 * SC_V_NUM_TAPS] = {
+ [VS_UP_SCALE] = {
+ /* Luma */
+ 0x1FD1, 0x00B1, 0x06FC, 0x00B1, 0x1FD1,
+ 0x1FD8, 0x0085, 0x06F9, 0x00E1, 0x1FC9,
+ 0x1FDF, 0x005B, 0x06F2, 0x0114, 0x1FC0,
+ 0x1FE5, 0x0035, 0x06E5, 0x014A, 0x1FB7,
+ 0x1FEB, 0x0012, 0x06D3, 0x0182, 0x1FAE,
+ 0x1FF1, 0x1FF3, 0x06BA, 0x01BD, 0x1FA5,
+ 0x1FF5, 0x1FD7, 0x069D, 0x01FB, 0x1F9C,
+ 0x1FF9, 0x1FBE, 0x067C, 0x023A, 0x1F93,
+ 0x1FFD, 0x1FA8, 0x0656, 0x027B, 0x1F8A,
+ 0x0000, 0x1F95, 0x062B, 0x02BF, 0x1F81,
+ 0x0002, 0x1F86, 0x05FC, 0x0303, 0x1F79,
+ 0x0004, 0x1F79, 0x05CA, 0x0347, 0x1F72,
+ 0x0005, 0x1F6F, 0x0594, 0x038D, 0x1F6B,
+ 0x0006, 0x1F67, 0x055B, 0x03D2, 0x1F66,
+ 0x0007, 0x1F62, 0x051E, 0x0417, 0x1F62,
+ 0x0007, 0x1F5F, 0x04DF, 0x045C, 0x1F5F,
+ 0x1F5E, 0x04A2, 0x04A2, 0x1F5E, 0x0000,
+ 0x1F5F, 0x045C, 0x04DF, 0x1F5F, 0x0007,
+ 0x1F62, 0x0417, 0x051E, 0x1F62, 0x0007,
+ 0x1F66, 0x03D2, 0x055B, 0x1F67, 0x0006,
+ 0x1F6B, 0x038D, 0x0594, 0x1F6F, 0x0005,
+ 0x1F72, 0x0347, 0x05CA, 0x1F79, 0x0004,
+ 0x1F79, 0x0303, 0x05FC, 0x1F86, 0x0002,
+ 0x1F81, 0x02BF, 0x062B, 0x1F95, 0x0000,
+ 0x1F8A, 0x027B, 0x0656, 0x1FA8, 0x1FFD,
+ 0x1F93, 0x023A, 0x067C, 0x1FBE, 0x1FF9,
+ 0x1F9C, 0x01FB, 0x069D, 0x1FD7, 0x1FF5,
+ 0x1FA5, 0x01BD, 0x06BA, 0x1FF3, 0x1FF1,
+ 0x1FAE, 0x0182, 0x06D3, 0x0012, 0x1FEB,
+ 0x1FB7, 0x014A, 0x06E5, 0x0035, 0x1FE5,
+ 0x1FC0, 0x0114, 0x06F2, 0x005B, 0x1FDF,
+ 0x1FC9, 0x00E1, 0x06F9, 0x0085, 0x1FD8,
+ /* Chroma */
+ 0x1FD1, 0x00B1, 0x06FC, 0x00B1, 0x1FD1,
+ 0x1FD8, 0x0085, 0x06F9, 0x00E1, 0x1FC9,
+ 0x1FDF, 0x005B, 0x06F2, 0x0114, 0x1FC0,
+ 0x1FE5, 0x0035, 0x06E5, 0x014A, 0x1FB7,
+ 0x1FEB, 0x0012, 0x06D3, 0x0182, 0x1FAE,
+ 0x1FF1, 0x1FF3, 0x06BA, 0x01BD, 0x1FA5,
+ 0x1FF5, 0x1FD7, 0x069D, 0x01FB, 0x1F9C,
+ 0x1FF9, 0x1FBE, 0x067C, 0x023A, 0x1F93,
+ 0x1FFD, 0x1FA8, 0x0656, 0x027B, 0x1F8A,
+ 0x0000, 0x1F95, 0x062B, 0x02BF, 0x1F81,
+ 0x0002, 0x1F86, 0x05FC, 0x0303, 0x1F79,
+ 0x0004, 0x1F79, 0x05CA, 0x0347, 0x1F72,
+ 0x0005, 0x1F6F, 0x0594, 0x038D, 0x1F6B,
+ 0x0006, 0x1F67, 0x055B, 0x03D2, 0x1F66,
+ 0x0007, 0x1F62, 0x051E, 0x0417, 0x1F62,
+ 0x0007, 0x1F5F, 0x04DF, 0x045C, 0x1F5F,
+ 0x1F5E, 0x04A2, 0x04A2, 0x1F5E, 0x0000,
+ 0x1F5F, 0x045C, 0x04DF, 0x1F5F, 0x0007,
+ 0x1F62, 0x0417, 0x051E, 0x1F62, 0x0007,
+ 0x1F66, 0x03D2, 0x055B, 0x1F67, 0x0006,
+ 0x1F6B, 0x038D, 0x0594, 0x1F6F, 0x0005,
+ 0x1F72, 0x0347, 0x05CA, 0x1F79, 0x0004,
+ 0x1F79, 0x0303, 0x05FC, 0x1F86, 0x0002,
+ 0x1F81, 0x02BF, 0x062B, 0x1F95, 0x0000,
+ 0x1F8A, 0x027B, 0x0656, 0x1FA8, 0x1FFD,
+ 0x1F93, 0x023A, 0x067C, 0x1FBE, 0x1FF9,
+ 0x1F9C, 0x01FB, 0x069D, 0x1FD7, 0x1FF5,
+ 0x1FA5, 0x01BD, 0x06BA, 0x1FF3, 0x1FF1,
+ 0x1FAE, 0x0182, 0x06D3, 0x0012, 0x1FEB,
+ 0x1FB7, 0x014A, 0x06E5, 0x0035, 0x1FE5,
+ 0x1FC0, 0x0114, 0x06F2, 0x005B, 0x1FDF,
+ 0x1FC9, 0x00E1, 0x06F9, 0x0085, 0x1FD8,
+ },
+ [VS_LT_9_16_SCALE] = {
+ /* Luma */
+ 0x001C, 0x01F6, 0x03DC, 0x01F6, 0x001C,
+ 0x0018, 0x01DF, 0x03DB, 0x020C, 0x0022,
+ 0x0013, 0x01C9, 0x03D9, 0x0223, 0x0028,
+ 0x000F, 0x01B3, 0x03D6, 0x023A, 0x002E,
+ 0x000C, 0x019D, 0x03D2, 0x0250, 0x0035,
+ 0x0009, 0x0188, 0x03CC, 0x0266, 0x003D,
+ 0x0006, 0x0173, 0x03C5, 0x027D, 0x0045,
+ 0x0004, 0x015E, 0x03BD, 0x0293, 0x004E,
+ 0x0002, 0x014A, 0x03B4, 0x02A8, 0x0058,
+ 0x0000, 0x0136, 0x03AA, 0x02BE, 0x0062,
+ 0x1FFF, 0x0123, 0x039E, 0x02D3, 0x006D,
+ 0x1FFE, 0x0110, 0x0392, 0x02E8, 0x0078,
+ 0x1FFD, 0x00FE, 0x0384, 0x02FC, 0x0085,
+ 0x1FFD, 0x00ED, 0x0376, 0x030F, 0x0091,
+ 0x1FFC, 0x00DC, 0x0367, 0x0322, 0x009F,
+ 0x1FFC, 0x00CC, 0x0357, 0x0334, 0x00AD,
+ 0x00BC, 0x0344, 0x0344, 0x00BC, 0x0000,
+ 0x00AD, 0x0334, 0x0357, 0x00CC, 0x1FFC,
+ 0x009F, 0x0322, 0x0367, 0x00DC, 0x1FFC,
+ 0x0091, 0x030F, 0x0376, 0x00ED, 0x1FFD,
+ 0x0085, 0x02FC, 0x0384, 0x00FE, 0x1FFD,
+ 0x0078, 0x02E8, 0x0392, 0x0110, 0x1FFE,
+ 0x006D, 0x02D3, 0x039E, 0x0123, 0x1FFF,
+ 0x0062, 0x02BE, 0x03AA, 0x0136, 0x0000,
+ 0x0058, 0x02A8, 0x03B4, 0x014A, 0x0002,
+ 0x004E, 0x0293, 0x03BD, 0x015E, 0x0004,
+ 0x0045, 0x027D, 0x03C5, 0x0173, 0x0006,
+ 0x003D, 0x0266, 0x03CC, 0x0188, 0x0009,
+ 0x0035, 0x0250, 0x03D2, 0x019D, 0x000C,
+ 0x002E, 0x023A, 0x03D6, 0x01B3, 0x000F,
+ 0x0028, 0x0223, 0x03D9, 0x01C9, 0x0013,
+ 0x0022, 0x020C, 0x03DB, 0x01DF, 0x0018,
+ /* Chroma */
+ 0x001C, 0x01F6, 0x03DC, 0x01F6, 0x001C,
+ 0x0018, 0x01DF, 0x03DB, 0x020C, 0x0022,
+ 0x0013, 0x01C9, 0x03D9, 0x0223, 0x0028,
+ 0x000F, 0x01B3, 0x03D6, 0x023A, 0x002E,
+ 0x000C, 0x019D, 0x03D2, 0x0250, 0x0035,
+ 0x0009, 0x0188, 0x03CC, 0x0266, 0x003D,
+ 0x0006, 0x0173, 0x03C5, 0x027D, 0x0045,
+ 0x0004, 0x015E, 0x03BD, 0x0293, 0x004E,
+ 0x0002, 0x014A, 0x03B4, 0x02A8, 0x0058,
+ 0x0000, 0x0136, 0x03AA, 0x02BE, 0x0062,
+ 0x1FFF, 0x0123, 0x039E, 0x02D3, 0x006D,
+ 0x1FFE, 0x0110, 0x0392, 0x02E8, 0x0078,
+ 0x1FFD, 0x00FE, 0x0384, 0x02FC, 0x0085,
+ 0x1FFD, 0x00ED, 0x0376, 0x030F, 0x0091,
+ 0x1FFC, 0x00DC, 0x0367, 0x0322, 0x009F,
+ 0x1FFC, 0x00CC, 0x0357, 0x0334, 0x00AD,
+ 0x00BC, 0x0344, 0x0344, 0x00BC, 0x0000,
+ 0x00AD, 0x0334, 0x0357, 0x00CC, 0x1FFC,
+ 0x009F, 0x0322, 0x0367, 0x00DC, 0x1FFC,
+ 0x0091, 0x030F, 0x0376, 0x00ED, 0x1FFD,
+ 0x0085, 0x02FC, 0x0384, 0x00FE, 0x1FFD,
+ 0x0078, 0x02E8, 0x0392, 0x0110, 0x1FFE,
+ 0x006D, 0x02D3, 0x039E, 0x0123, 0x1FFF,
+ 0x0062, 0x02BE, 0x03AA, 0x0136, 0x0000,
+ 0x0058, 0x02A8, 0x03B4, 0x014A, 0x0002,
+ 0x004E, 0x0293, 0x03BD, 0x015E, 0x0004,
+ 0x0045, 0x027D, 0x03C5, 0x0173, 0x0006,
+ 0x003D, 0x0266, 0x03CC, 0x0188, 0x0009,
+ 0x0035, 0x0250, 0x03D2, 0x019D, 0x000C,
+ 0x002E, 0x023A, 0x03D6, 0x01B3, 0x000F,
+ 0x0028, 0x0223, 0x03D9, 0x01C9, 0x0013,
+ 0x0022, 0x020C, 0x03DB, 0x01DF, 0x0018,
+ },
+ [VS_LT_10_16_SCALE] = {
+ /* Luma */
+ 0x0003, 0x01E9, 0x0428, 0x01E9, 0x0003,
+ 0x0000, 0x01D0, 0x0426, 0x0203, 0x0007,
+ 0x1FFD, 0x01B7, 0x0424, 0x021C, 0x000C,
+ 0x1FFB, 0x019E, 0x0420, 0x0236, 0x0011,
+ 0x1FF9, 0x0186, 0x041A, 0x0250, 0x0017,
+ 0x1FF7, 0x016E, 0x0414, 0x026A, 0x001D,
+ 0x1FF6, 0x0157, 0x040B, 0x0284, 0x0024,
+ 0x1FF5, 0x0140, 0x0401, 0x029E, 0x002C,
+ 0x1FF4, 0x012A, 0x03F6, 0x02B7, 0x0035,
+ 0x1FF4, 0x0115, 0x03E9, 0x02D0, 0x003E,
+ 0x1FF4, 0x0100, 0x03DB, 0x02E9, 0x0048,
+ 0x1FF4, 0x00EC, 0x03CC, 0x0301, 0x0053,
+ 0x1FF4, 0x00D9, 0x03BC, 0x0318, 0x005F,
+ 0x1FF5, 0x00C7, 0x03AA, 0x032F, 0x006B,
+ 0x1FF6, 0x00B5, 0x0398, 0x0345, 0x0078,
+ 0x1FF6, 0x00A5, 0x0384, 0x035B, 0x0086,
+ 0x0094, 0x036C, 0x036C, 0x0094, 0x0000,
+ 0x0086, 0x035B, 0x0384, 0x00A5, 0x1FF6,
+ 0x0078, 0x0345, 0x0398, 0x00B5, 0x1FF6,
+ 0x006B, 0x032F, 0x03AA, 0x00C7, 0x1FF5,
+ 0x005F, 0x0318, 0x03BC, 0x00D9, 0x1FF4,
+ 0x0053, 0x0301, 0x03CC, 0x00EC, 0x1FF4,
+ 0x0048, 0x02E9, 0x03DB, 0x0100, 0x1FF4,
+ 0x003E, 0x02D0, 0x03E9, 0x0115, 0x1FF4,
+ 0x0035, 0x02B7, 0x03F6, 0x012A, 0x1FF4,
+ 0x002C, 0x029E, 0x0401, 0x0140, 0x1FF5,
+ 0x0024, 0x0284, 0x040B, 0x0157, 0x1FF6,
+ 0x001D, 0x026A, 0x0414, 0x016E, 0x1FF7,
+ 0x0017, 0x0250, 0x041A, 0x0186, 0x1FF9,
+ 0x0011, 0x0236, 0x0420, 0x019E, 0x1FFB,
+ 0x000C, 0x021C, 0x0424, 0x01B7, 0x1FFD,
+ 0x0007, 0x0203, 0x0426, 0x01D0, 0x0000,
+ /* Chroma */
+ 0x0003, 0x01E9, 0x0428, 0x01E9, 0x0003,
+ 0x0000, 0x01D0, 0x0426, 0x0203, 0x0007,
+ 0x1FFD, 0x01B7, 0x0424, 0x021C, 0x000C,
+ 0x1FFB, 0x019E, 0x0420, 0x0236, 0x0011,
+ 0x1FF9, 0x0186, 0x041A, 0x0250, 0x0017,
+ 0x1FF7, 0x016E, 0x0414, 0x026A, 0x001D,
+ 0x1FF6, 0x0157, 0x040B, 0x0284, 0x0024,
+ 0x1FF5, 0x0140, 0x0401, 0x029E, 0x002C,
+ 0x1FF4, 0x012A, 0x03F6, 0x02B7, 0x0035,
+ 0x1FF4, 0x0115, 0x03E9, 0x02D0, 0x003E,
+ 0x1FF4, 0x0100, 0x03DB, 0x02E9, 0x0048,
+ 0x1FF4, 0x00EC, 0x03CC, 0x0301, 0x0053,
+ 0x1FF4, 0x00D9, 0x03BC, 0x0318, 0x005F,
+ 0x1FF5, 0x00C7, 0x03AA, 0x032F, 0x006B,
+ 0x1FF6, 0x00B5, 0x0398, 0x0345, 0x0078,
+ 0x1FF6, 0x00A5, 0x0384, 0x035B, 0x0086,
+ 0x0094, 0x036C, 0x036C, 0x0094, 0x0000,
+ 0x0086, 0x035B, 0x0384, 0x00A5, 0x1FF6,
+ 0x0078, 0x0345, 0x0398, 0x00B5, 0x1FF6,
+ 0x006B, 0x032F, 0x03AA, 0x00C7, 0x1FF5,
+ 0x005F, 0x0318, 0x03BC, 0x00D9, 0x1FF4,
+ 0x0053, 0x0301, 0x03CC, 0x00EC, 0x1FF4,
+ 0x0048, 0x02E9, 0x03DB, 0x0100, 0x1FF4,
+ 0x003E, 0x02D0, 0x03E9, 0x0115, 0x1FF4,
+ 0x0035, 0x02B7, 0x03F6, 0x012A, 0x1FF4,
+ 0x002C, 0x029E, 0x0401, 0x0140, 0x1FF5,
+ 0x0024, 0x0284, 0x040B, 0x0157, 0x1FF6,
+ 0x001D, 0x026A, 0x0414, 0x016E, 0x1FF7,
+ 0x0017, 0x0250, 0x041A, 0x0186, 0x1FF9,
+ 0x0011, 0x0236, 0x0420, 0x019E, 0x1FFB,
+ 0x000C, 0x021C, 0x0424, 0x01B7, 0x1FFD,
+ 0x0007, 0x0203, 0x0426, 0x01D0, 0x0000,
+ },
+ [VS_LT_11_16_SCALE] = {
+ /* Luma */
+ 0x1FEC, 0x01D6, 0x047C, 0x01D6, 0x1FEC,
+ 0x1FEA, 0x01BA, 0x047B, 0x01F3, 0x1FEE,
+ 0x1FE9, 0x019D, 0x0478, 0x0211, 0x1FF1,
+ 0x1FE8, 0x0182, 0x0473, 0x022E, 0x1FF5,
+ 0x1FE8, 0x0167, 0x046C, 0x024C, 0x1FF9,
+ 0x1FE8, 0x014D, 0x0464, 0x026A, 0x1FFD,
+ 0x1FE8, 0x0134, 0x0459, 0x0288, 0x0003,
+ 0x1FE9, 0x011B, 0x044D, 0x02A6, 0x0009,
+ 0x1FE9, 0x0104, 0x0440, 0x02C3, 0x0010,
+ 0x1FEA, 0x00ED, 0x0430, 0x02E1, 0x0018,
+ 0x1FEB, 0x00D7, 0x0420, 0x02FD, 0x0021,
+ 0x1FED, 0x00C2, 0x040D, 0x0319, 0x002B,
+ 0x1FEE, 0x00AE, 0x03F9, 0x0336, 0x0035,
+ 0x1FF0, 0x009C, 0x03E3, 0x0350, 0x0041,
+ 0x1FF1, 0x008A, 0x03CD, 0x036B, 0x004D,
+ 0x1FF3, 0x0079, 0x03B5, 0x0384, 0x005B,
+ 0x0069, 0x0397, 0x0397, 0x0069, 0x0000,
+ 0x005B, 0x0384, 0x03B5, 0x0079, 0x1FF3,
+ 0x004D, 0x036B, 0x03CD, 0x008A, 0x1FF1,
+ 0x0041, 0x0350, 0x03E3, 0x009C, 0x1FF0,
+ 0x0035, 0x0336, 0x03F9, 0x00AE, 0x1FEE,
+ 0x002B, 0x0319, 0x040D, 0x00C2, 0x1FED,
+ 0x0021, 0x02FD, 0x0420, 0x00D7, 0x1FEB,
+ 0x0018, 0x02E1, 0x0430, 0x00ED, 0x1FEA,
+ 0x0010, 0x02C3, 0x0440, 0x0104, 0x1FE9,
+ 0x0009, 0x02A6, 0x044D, 0x011B, 0x1FE9,
+ 0x0003, 0x0288, 0x0459, 0x0134, 0x1FE8,
+ 0x1FFD, 0x026A, 0x0464, 0x014D, 0x1FE8,
+ 0x1FF9, 0x024C, 0x046C, 0x0167, 0x1FE8,
+ 0x1FF5, 0x022E, 0x0473, 0x0182, 0x1FE8,
+ 0x1FF1, 0x0211, 0x0478, 0x019D, 0x1FE9,
+ 0x1FEE, 0x01F3, 0x047B, 0x01BA, 0x1FEA,
+ /* Chroma */
+ 0x1FEC, 0x01D6, 0x047C, 0x01D6, 0x1FEC,
+ 0x1FEA, 0x01BA, 0x047B, 0x01F3, 0x1FEE,
+ 0x1FE9, 0x019D, 0x0478, 0x0211, 0x1FF1,
+ 0x1FE8, 0x0182, 0x0473, 0x022E, 0x1FF5,
+ 0x1FE8, 0x0167, 0x046C, 0x024C, 0x1FF9,
+ 0x1FE8, 0x014D, 0x0464, 0x026A, 0x1FFD,
+ 0x1FE8, 0x0134, 0x0459, 0x0288, 0x0003,
+ 0x1FE9, 0x011B, 0x044D, 0x02A6, 0x0009,
+ 0x1FE9, 0x0104, 0x0440, 0x02C3, 0x0010,
+ 0x1FEA, 0x00ED, 0x0430, 0x02E1, 0x0018,
+ 0x1FEB, 0x00D7, 0x0420, 0x02FD, 0x0021,
+ 0x1FED, 0x00C2, 0x040D, 0x0319, 0x002B,
+ 0x1FEE, 0x00AE, 0x03F9, 0x0336, 0x0035,
+ 0x1FF0, 0x009C, 0x03E3, 0x0350, 0x0041,
+ 0x1FF1, 0x008A, 0x03CD, 0x036B, 0x004D,
+ 0x1FF3, 0x0079, 0x03B5, 0x0384, 0x005B,
+ 0x0069, 0x0397, 0x0397, 0x0069, 0x0000,
+ 0x005B, 0x0384, 0x03B5, 0x0079, 0x1FF3,
+ 0x004D, 0x036B, 0x03CD, 0x008A, 0x1FF1,
+ 0x0041, 0x0350, 0x03E3, 0x009C, 0x1FF0,
+ 0x0035, 0x0336, 0x03F9, 0x00AE, 0x1FEE,
+ 0x002B, 0x0319, 0x040D, 0x00C2, 0x1FED,
+ 0x0021, 0x02FD, 0x0420, 0x00D7, 0x1FEB,
+ 0x0018, 0x02E1, 0x0430, 0x00ED, 0x1FEA,
+ 0x0010, 0x02C3, 0x0440, 0x0104, 0x1FE9,
+ 0x0009, 0x02A6, 0x044D, 0x011B, 0x1FE9,
+ 0x0003, 0x0288, 0x0459, 0x0134, 0x1FE8,
+ 0x1FFD, 0x026A, 0x0464, 0x014D, 0x1FE8,
+ 0x1FF9, 0x024C, 0x046C, 0x0167, 0x1FE8,
+ 0x1FF5, 0x022E, 0x0473, 0x0182, 0x1FE8,
+ 0x1FF1, 0x0211, 0x0478, 0x019D, 0x1FE9,
+ 0x1FEE, 0x01F3, 0x047B, 0x01BA, 0x1FEA,
+ },
+ [VS_LT_12_16_SCALE] = {
+ /* Luma */
+ 0x1FD8, 0x01BC, 0x04D8, 0x01BC, 0x1FD8,
+ 0x1FD8, 0x019C, 0x04D8, 0x01DC, 0x1FD8,
+ 0x1FD8, 0x017D, 0x04D4, 0x01FE, 0x1FD9,
+ 0x1FD9, 0x015E, 0x04CF, 0x0220, 0x1FDA,
+ 0x1FDB, 0x0141, 0x04C7, 0x0241, 0x1FDC,
+ 0x1FDC, 0x0125, 0x04BC, 0x0264, 0x1FDF,
+ 0x1FDE, 0x0109, 0x04B0, 0x0286, 0x1FE3,
+ 0x1FE0, 0x00EF, 0x04A1, 0x02A9, 0x1FE7,
+ 0x1FE2, 0x00D6, 0x0491, 0x02CB, 0x1FEC,
+ 0x1FE4, 0x00BE, 0x047E, 0x02EE, 0x1FF2,
+ 0x1FE6, 0x00A7, 0x046A, 0x030F, 0x1FFA,
+ 0x1FE9, 0x0092, 0x0453, 0x0330, 0x0002,
+ 0x1FEB, 0x007E, 0x043B, 0x0351, 0x000B,
+ 0x1FED, 0x006B, 0x0421, 0x0372, 0x0015,
+ 0x1FEF, 0x005A, 0x0406, 0x0391, 0x0020,
+ 0x1FF1, 0x0049, 0x03EA, 0x03AF, 0x002D,
+ 0x003A, 0x03C6, 0x03C6, 0x003A, 0x0000,
+ 0x002D, 0x03AF, 0x03EA, 0x0049, 0x1FF1,
+ 0x0020, 0x0391, 0x0406, 0x005A, 0x1FEF,
+ 0x0015, 0x0372, 0x0421, 0x006B, 0x1FED,
+ 0x000B, 0x0351, 0x043B, 0x007E, 0x1FEB,
+ 0x0002, 0x0330, 0x0453, 0x0092, 0x1FE9,
+ 0x1FFA, 0x030F, 0x046A, 0x00A7, 0x1FE6,
+ 0x1FF2, 0x02EE, 0x047E, 0x00BE, 0x1FE4,
+ 0x1FEC, 0x02CB, 0x0491, 0x00D6, 0x1FE2,
+ 0x1FE7, 0x02A9, 0x04A1, 0x00EF, 0x1FE0,
+ 0x1FE3, 0x0286, 0x04B0, 0x0109, 0x1FDE,
+ 0x1FDF, 0x0264, 0x04BC, 0x0125, 0x1FDC,
+ 0x1FDC, 0x0241, 0x04C7, 0x0141, 0x1FDB,
+ 0x1FDA, 0x0220, 0x04CF, 0x015E, 0x1FD9,
+ 0x1FD9, 0x01FE, 0x04D4, 0x017D, 0x1FD8,
+ 0x1FD8, 0x01DC, 0x04D8, 0x019C, 0x1FD8,
+ /* Chroma */
+ 0x1FD8, 0x01BC, 0x04D8, 0x01BC, 0x1FD8,
+ 0x1FD8, 0x019C, 0x04D8, 0x01DC, 0x1FD8,
+ 0x1FD8, 0x017D, 0x04D4, 0x01FE, 0x1FD9,
+ 0x1FD9, 0x015E, 0x04CF, 0x0220, 0x1FDA,
+ 0x1FDB, 0x0141, 0x04C7, 0x0241, 0x1FDC,
+ 0x1FDC, 0x0125, 0x04BC, 0x0264, 0x1FDF,
+ 0x1FDE, 0x0109, 0x04B0, 0x0286, 0x1FE3,
+ 0x1FE0, 0x00EF, 0x04A1, 0x02A9, 0x1FE7,
+ 0x1FE2, 0x00D6, 0x0491, 0x02CB, 0x1FEC,
+ 0x1FE4, 0x00BE, 0x047E, 0x02EE, 0x1FF2,
+ 0x1FE6, 0x00A7, 0x046A, 0x030F, 0x1FFA,
+ 0x1FE9, 0x0092, 0x0453, 0x0330, 0x0002,
+ 0x1FEB, 0x007E, 0x043B, 0x0351, 0x000B,
+ 0x1FED, 0x006B, 0x0421, 0x0372, 0x0015,
+ 0x1FEF, 0x005A, 0x0406, 0x0391, 0x0020,
+ 0x1FF1, 0x0049, 0x03EA, 0x03AF, 0x002D,
+ 0x003A, 0x03C6, 0x03C6, 0x003A, 0x0000,
+ 0x002D, 0x03AF, 0x03EA, 0x0049, 0x1FF1,
+ 0x0020, 0x0391, 0x0406, 0x005A, 0x1FEF,
+ 0x0015, 0x0372, 0x0421, 0x006B, 0x1FED,
+ 0x000B, 0x0351, 0x043B, 0x007E, 0x1FEB,
+ 0x0002, 0x0330, 0x0453, 0x0092, 0x1FE9,
+ 0x1FFA, 0x030F, 0x046A, 0x00A7, 0x1FE6,
+ 0x1FF2, 0x02EE, 0x047E, 0x00BE, 0x1FE4,
+ 0x1FEC, 0x02CB, 0x0491, 0x00D6, 0x1FE2,
+ 0x1FE7, 0x02A9, 0x04A1, 0x00EF, 0x1FE0,
+ 0x1FE3, 0x0286, 0x04B0, 0x0109, 0x1FDE,
+ 0x1FDF, 0x0264, 0x04BC, 0x0125, 0x1FDC,
+ 0x1FDC, 0x0241, 0x04C7, 0x0141, 0x1FDB,
+ 0x1FDA, 0x0220, 0x04CF, 0x015E, 0x1FD9,
+ 0x1FD9, 0x01FE, 0x04D4, 0x017D, 0x1FD8,
+ 0x1FD8, 0x01DC, 0x04D8, 0x019C, 0x1FD8,
+ },
+ [VS_LT_13_16_SCALE] = {
+ /* Luma */
+ 0x1FC8, 0x0199, 0x053E, 0x0199, 0x1FC8,
+ 0x1FCA, 0x0175, 0x053E, 0x01BD, 0x1FC6,
+ 0x1FCD, 0x0153, 0x0539, 0x01E2, 0x1FC5,
+ 0x1FCF, 0x0132, 0x0532, 0x0209, 0x1FC4,
+ 0x1FD2, 0x0112, 0x0529, 0x022F, 0x1FC4,
+ 0x1FD5, 0x00F4, 0x051C, 0x0256, 0x1FC5,
+ 0x1FD8, 0x00D7, 0x050D, 0x027E, 0x1FC6,
+ 0x1FDC, 0x00BB, 0x04FB, 0x02A6, 0x1FC8,
+ 0x1FDF, 0x00A1, 0x04E7, 0x02CE, 0x1FCB,
+ 0x1FE2, 0x0089, 0x04D1, 0x02F5, 0x1FCF,
+ 0x1FE5, 0x0072, 0x04B8, 0x031D, 0x1FD4,
+ 0x1FE8, 0x005D, 0x049E, 0x0344, 0x1FD9,
+ 0x1FEB, 0x0049, 0x0480, 0x036B, 0x1FE1,
+ 0x1FEE, 0x0037, 0x0462, 0x0390, 0x1FE9,
+ 0x1FF0, 0x0026, 0x0442, 0x03B6, 0x1FF2,
+ 0x1FF2, 0x0017, 0x0420, 0x03DA, 0x1FFD,
+ 0x0009, 0x03F7, 0x03F7, 0x0009, 0x0000,
+ 0x1FFD, 0x03DA, 0x0420, 0x0017, 0x1FF2,
+ 0x1FF2, 0x03B6, 0x0442, 0x0026, 0x1FF0,
+ 0x1FE9, 0x0390, 0x0462, 0x0037, 0x1FEE,
+ 0x1FE1, 0x036B, 0x0480, 0x0049, 0x1FEB,
+ 0x1FD9, 0x0344, 0x049E, 0x005D, 0x1FE8,
+ 0x1FD4, 0x031D, 0x04B8, 0x0072, 0x1FE5,
+ 0x1FCF, 0x02F5, 0x04D1, 0x0089, 0x1FE2,
+ 0x1FCB, 0x02CE, 0x04E7, 0x00A1, 0x1FDF,
+ 0x1FC8, 0x02A6, 0x04FB, 0x00BB, 0x1FDC,
+ 0x1FC6, 0x027E, 0x050D, 0x00D7, 0x1FD8,
+ 0x1FC5, 0x0256, 0x051C, 0x00F4, 0x1FD5,
+ 0x1FC4, 0x022F, 0x0529, 0x0112, 0x1FD2,
+ 0x1FC4, 0x0209, 0x0532, 0x0132, 0x1FCF,
+ 0x1FC5, 0x01E2, 0x0539, 0x0153, 0x1FCD,
+ 0x1FC6, 0x01BD, 0x053E, 0x0175, 0x1FCA,
+ /* Chroma */
+ 0x1FC8, 0x0199, 0x053E, 0x0199, 0x1FC8,
+ 0x1FCA, 0x0175, 0x053E, 0x01BD, 0x1FC6,
+ 0x1FCD, 0x0153, 0x0539, 0x01E2, 0x1FC5,
+ 0x1FCF, 0x0132, 0x0532, 0x0209, 0x1FC4,
+ 0x1FD2, 0x0112, 0x0529, 0x022F, 0x1FC4,
+ 0x1FD5, 0x00F4, 0x051C, 0x0256, 0x1FC5,
+ 0x1FD8, 0x00D7, 0x050D, 0x027E, 0x1FC6,
+ 0x1FDC, 0x00BB, 0x04FB, 0x02A6, 0x1FC8,
+ 0x1FDF, 0x00A1, 0x04E7, 0x02CE, 0x1FCB,
+ 0x1FE2, 0x0089, 0x04D1, 0x02F5, 0x1FCF,
+ 0x1FE5, 0x0072, 0x04B8, 0x031D, 0x1FD4,
+ 0x1FE8, 0x005D, 0x049E, 0x0344, 0x1FD9,
+ 0x1FEB, 0x0049, 0x0480, 0x036B, 0x1FE1,
+ 0x1FEE, 0x0037, 0x0462, 0x0390, 0x1FE9,
+ 0x1FF0, 0x0026, 0x0442, 0x03B6, 0x1FF2,
+ 0x1FF2, 0x0017, 0x0420, 0x03DA, 0x1FFD,
+ 0x0009, 0x03F7, 0x03F7, 0x0009, 0x0000,
+ 0x1FFD, 0x03DA, 0x0420, 0x0017, 0x1FF2,
+ 0x1FF2, 0x03B6, 0x0442, 0x0026, 0x1FF0,
+ 0x1FE9, 0x0390, 0x0462, 0x0037, 0x1FEE,
+ 0x1FE1, 0x036B, 0x0480, 0x0049, 0x1FEB,
+ 0x1FD9, 0x0344, 0x049E, 0x005D, 0x1FE8,
+ 0x1FD4, 0x031D, 0x04B8, 0x0072, 0x1FE5,
+ 0x1FCF, 0x02F5, 0x04D1, 0x0089, 0x1FE2,
+ 0x1FCB, 0x02CE, 0x04E7, 0x00A1, 0x1FDF,
+ 0x1FC8, 0x02A6, 0x04FB, 0x00BB, 0x1FDC,
+ 0x1FC6, 0x027E, 0x050D, 0x00D7, 0x1FD8,
+ 0x1FC5, 0x0256, 0x051C, 0x00F4, 0x1FD5,
+ 0x1FC4, 0x022F, 0x0529, 0x0112, 0x1FD2,
+ 0x1FC4, 0x0209, 0x0532, 0x0132, 0x1FCF,
+ 0x1FC5, 0x01E2, 0x0539, 0x0153, 0x1FCD,
+ 0x1FC6, 0x01BD, 0x053E, 0x0175, 0x1FCA,
+ },
+ [VS_LT_14_16_SCALE] = {
+ /* Luma */
+ 0x1FBF, 0x016C, 0x05AA, 0x016C, 0x1FBF,
+ 0x1FC3, 0x0146, 0x05A8, 0x0194, 0x1FBB,
+ 0x1FC7, 0x0121, 0x05A3, 0x01BD, 0x1FB8,
+ 0x1FCB, 0x00FD, 0x059B, 0x01E8, 0x1FB5,
+ 0x1FD0, 0x00DC, 0x058F, 0x0213, 0x1FB2,
+ 0x1FD4, 0x00BC, 0x0580, 0x0240, 0x1FB0,
+ 0x1FD8, 0x009E, 0x056E, 0x026D, 0x1FAF,
+ 0x1FDC, 0x0082, 0x055A, 0x029A, 0x1FAE,
+ 0x1FE0, 0x0067, 0x0542, 0x02C9, 0x1FAE,
+ 0x1FE4, 0x004F, 0x0528, 0x02F6, 0x1FAF,
+ 0x1FE8, 0x0038, 0x050A, 0x0325, 0x1FB1,
+ 0x1FEB, 0x0024, 0x04EB, 0x0352, 0x1FB4,
+ 0x1FEE, 0x0011, 0x04C8, 0x0380, 0x1FB9,
+ 0x1FF1, 0x0000, 0x04A4, 0x03AC, 0x1FBF,
+ 0x1FF4, 0x1FF1, 0x047D, 0x03D8, 0x1FC6,
+ 0x1FF6, 0x1FE4, 0x0455, 0x0403, 0x1FCE,
+ 0x1FD8, 0x0428, 0x0428, 0x1FD8, 0x0000,
+ 0x1FCE, 0x0403, 0x0455, 0x1FE4, 0x1FF6,
+ 0x1FC6, 0x03D8, 0x047D, 0x1FF1, 0x1FF4,
+ 0x1FBF, 0x03AC, 0x04A4, 0x0000, 0x1FF1,
+ 0x1FB9, 0x0380, 0x04C8, 0x0011, 0x1FEE,
+ 0x1FB4, 0x0352, 0x04EB, 0x0024, 0x1FEB,
+ 0x1FB1, 0x0325, 0x050A, 0x0038, 0x1FE8,
+ 0x1FAF, 0x02F6, 0x0528, 0x004F, 0x1FE4,
+ 0x1FAE, 0x02C9, 0x0542, 0x0067, 0x1FE0,
+ 0x1FAE, 0x029A, 0x055A, 0x0082, 0x1FDC,
+ 0x1FAF, 0x026D, 0x056E, 0x009E, 0x1FD8,
+ 0x1FB0, 0x0240, 0x0580, 0x00BC, 0x1FD4,
+ 0x1FB2, 0x0213, 0x058F, 0x00DC, 0x1FD0,
+ 0x1FB5, 0x01E8, 0x059B, 0x00FD, 0x1FCB,
+ 0x1FB8, 0x01BD, 0x05A3, 0x0121, 0x1FC7,
+ 0x1FBB, 0x0194, 0x05A8, 0x0146, 0x1FC3,
+ /* Chroma */
+ 0x1FBF, 0x016C, 0x05AA, 0x016C, 0x1FBF,
+ 0x1FC3, 0x0146, 0x05A8, 0x0194, 0x1FBB,
+ 0x1FC7, 0x0121, 0x05A3, 0x01BD, 0x1FB8,
+ 0x1FCB, 0x00FD, 0x059B, 0x01E8, 0x1FB5,
+ 0x1FD0, 0x00DC, 0x058F, 0x0213, 0x1FB2,
+ 0x1FD4, 0x00BC, 0x0580, 0x0240, 0x1FB0,
+ 0x1FD8, 0x009E, 0x056E, 0x026D, 0x1FAF,
+ 0x1FDC, 0x0082, 0x055A, 0x029A, 0x1FAE,
+ 0x1FE0, 0x0067, 0x0542, 0x02C9, 0x1FAE,
+ 0x1FE4, 0x004F, 0x0528, 0x02F6, 0x1FAF,
+ 0x1FE8, 0x0038, 0x050A, 0x0325, 0x1FB1,
+ 0x1FEB, 0x0024, 0x04EB, 0x0352, 0x1FB4,
+ 0x1FEE, 0x0011, 0x04C8, 0x0380, 0x1FB9,
+ 0x1FF1, 0x0000, 0x04A4, 0x03AC, 0x1FBF,
+ 0x1FF4, 0x1FF1, 0x047D, 0x03D8, 0x1FC6,
+ 0x1FF6, 0x1FE4, 0x0455, 0x0403, 0x1FCE,
+ 0x1FD8, 0x0428, 0x0428, 0x1FD8, 0x0000,
+ 0x1FCE, 0x0403, 0x0455, 0x1FE4, 0x1FF6,
+ 0x1FC6, 0x03D8, 0x047D, 0x1FF1, 0x1FF4,
+ 0x1FBF, 0x03AC, 0x04A4, 0x0000, 0x1FF1,
+ 0x1FB9, 0x0380, 0x04C8, 0x0011, 0x1FEE,
+ 0x1FB4, 0x0352, 0x04EB, 0x0024, 0x1FEB,
+ 0x1FB1, 0x0325, 0x050A, 0x0038, 0x1FE8,
+ 0x1FAF, 0x02F6, 0x0528, 0x004F, 0x1FE4,
+ 0x1FAE, 0x02C9, 0x0542, 0x0067, 0x1FE0,
+ 0x1FAE, 0x029A, 0x055A, 0x0082, 0x1FDC,
+ 0x1FAF, 0x026D, 0x056E, 0x009E, 0x1FD8,
+ 0x1FB0, 0x0240, 0x0580, 0x00BC, 0x1FD4,
+ 0x1FB2, 0x0213, 0x058F, 0x00DC, 0x1FD0,
+ 0x1FB5, 0x01E8, 0x059B, 0x00FD, 0x1FCB,
+ 0x1FB8, 0x01BD, 0x05A3, 0x0121, 0x1FC7,
+ 0x1FBB, 0x0194, 0x05A8, 0x0146, 0x1FC3,
+ },
+ [VS_LT_15_16_SCALE] = {
+ /* Luma */
+ 0x1FBD, 0x0136, 0x061A, 0x0136, 0x1FBD,
+ 0x1FC3, 0x010D, 0x0617, 0x0161, 0x1FB8,
+ 0x1FC9, 0x00E6, 0x0611, 0x018E, 0x1FB2,
+ 0x1FCE, 0x00C1, 0x0607, 0x01BD, 0x1FAD,
+ 0x1FD4, 0x009E, 0x05F9, 0x01ED, 0x1FA8,
+ 0x1FD9, 0x007D, 0x05E8, 0x021F, 0x1FA3,
+ 0x1FDE, 0x005E, 0x05D3, 0x0252, 0x1F9F,
+ 0x1FE2, 0x0042, 0x05BC, 0x0285, 0x1F9B,
+ 0x1FE7, 0x0029, 0x059F, 0x02B9, 0x1F98,
+ 0x1FEA, 0x0011, 0x0580, 0x02EF, 0x1F96,
+ 0x1FEE, 0x1FFC, 0x055D, 0x0324, 0x1F95,
+ 0x1FF1, 0x1FE9, 0x0538, 0x0359, 0x1F95,
+ 0x1FF4, 0x1FD8, 0x0510, 0x038E, 0x1F96,
+ 0x1FF7, 0x1FC9, 0x04E5, 0x03C2, 0x1F99,
+ 0x1FF9, 0x1FBD, 0x04B8, 0x03F5, 0x1F9D,
+ 0x1FFB, 0x1FB2, 0x0489, 0x0428, 0x1FA2,
+ 0x1FAA, 0x0456, 0x0456, 0x1FAA, 0x0000,
+ 0x1FA2, 0x0428, 0x0489, 0x1FB2, 0x1FFB,
+ 0x1F9D, 0x03F5, 0x04B8, 0x1FBD, 0x1FF9,
+ 0x1F99, 0x03C2, 0x04E5, 0x1FC9, 0x1FF7,
+ 0x1F96, 0x038E, 0x0510, 0x1FD8, 0x1FF4,
+ 0x1F95, 0x0359, 0x0538, 0x1FE9, 0x1FF1,
+ 0x1F95, 0x0324, 0x055D, 0x1FFC, 0x1FEE,
+ 0x1F96, 0x02EF, 0x0580, 0x0011, 0x1FEA,
+ 0x1F98, 0x02B9, 0x059F, 0x0029, 0x1FE7,
+ 0x1F9B, 0x0285, 0x05BC, 0x0042, 0x1FE2,
+ 0x1F9F, 0x0252, 0x05D3, 0x005E, 0x1FDE,
+ 0x1FA3, 0x021F, 0x05E8, 0x007D, 0x1FD9,
+ 0x1FA8, 0x01ED, 0x05F9, 0x009E, 0x1FD4,
+ 0x1FAD, 0x01BD, 0x0607, 0x00C1, 0x1FCE,
+ 0x1FB2, 0x018E, 0x0611, 0x00E6, 0x1FC9,
+ 0x1FB8, 0x0161, 0x0617, 0x010D, 0x1FC3,
+ /* Chroma */
+ 0x1FBD, 0x0136, 0x061A, 0x0136, 0x1FBD,
+ 0x1FC3, 0x010D, 0x0617, 0x0161, 0x1FB8,
+ 0x1FC9, 0x00E6, 0x0611, 0x018E, 0x1FB2,
+ 0x1FCE, 0x00C1, 0x0607, 0x01BD, 0x1FAD,
+ 0x1FD4, 0x009E, 0x05F9, 0x01ED, 0x1FA8,
+ 0x1FD9, 0x007D, 0x05E8, 0x021F, 0x1FA3,
+ 0x1FDE, 0x005E, 0x05D3, 0x0252, 0x1F9F,
+ 0x1FE2, 0x0042, 0x05BC, 0x0285, 0x1F9B,
+ 0x1FE7, 0x0029, 0x059F, 0x02B9, 0x1F98,
+ 0x1FEA, 0x0011, 0x0580, 0x02EF, 0x1F96,
+ 0x1FEE, 0x1FFC, 0x055D, 0x0324, 0x1F95,
+ 0x1FF1, 0x1FE9, 0x0538, 0x0359, 0x1F95,
+ 0x1FF4, 0x1FD8, 0x0510, 0x038E, 0x1F96,
+ 0x1FF7, 0x1FC9, 0x04E5, 0x03C2, 0x1F99,
+ 0x1FF9, 0x1FBD, 0x04B8, 0x03F5, 0x1F9D,
+ 0x1FFB, 0x1FB2, 0x0489, 0x0428, 0x1FA2,
+ 0x1FAA, 0x0456, 0x0456, 0x1FAA, 0x0000,
+ 0x1FA2, 0x0428, 0x0489, 0x1FB2, 0x1FFB,
+ 0x1F9D, 0x03F5, 0x04B8, 0x1FBD, 0x1FF9,
+ 0x1F99, 0x03C2, 0x04E5, 0x1FC9, 0x1FF7,
+ 0x1F96, 0x038E, 0x0510, 0x1FD8, 0x1FF4,
+ 0x1F95, 0x0359, 0x0538, 0x1FE9, 0x1FF1,
+ 0x1F95, 0x0324, 0x055D, 0x1FFC, 0x1FEE,
+ 0x1F96, 0x02EF, 0x0580, 0x0011, 0x1FEA,
+ 0x1F98, 0x02B9, 0x059F, 0x0029, 0x1FE7,
+ 0x1F9B, 0x0285, 0x05BC, 0x0042, 0x1FE2,
+ 0x1F9F, 0x0252, 0x05D3, 0x005E, 0x1FDE,
+ 0x1FA3, 0x021F, 0x05E8, 0x007D, 0x1FD9,
+ 0x1FA8, 0x01ED, 0x05F9, 0x009E, 0x1FD4,
+ 0x1FAD, 0x01BD, 0x0607, 0x00C1, 0x1FCE,
+ 0x1FB2, 0x018E, 0x0611, 0x00E6, 0x1FC9,
+ 0x1FB8, 0x0161, 0x0617, 0x010D, 0x1FC3,
+ },
+ [VS_LT_16_16_SCALE] = {
+ /* Luma */
+ 0x1FC3, 0x00F8, 0x068A, 0x00F8, 0x1FC3,
+ 0x1FCA, 0x00CC, 0x0689, 0x0125, 0x1FBC,
+ 0x1FD1, 0x00A3, 0x0681, 0x0156, 0x1FB5,
+ 0x1FD7, 0x007D, 0x0676, 0x0188, 0x1FAE,
+ 0x1FDD, 0x005A, 0x0666, 0x01BD, 0x1FA6,
+ 0x1FE3, 0x0039, 0x0652, 0x01F3, 0x1F9F,
+ 0x1FE8, 0x001B, 0x0639, 0x022C, 0x1F98,
+ 0x1FEC, 0x0000, 0x061D, 0x0265, 0x1F92,
+ 0x1FF0, 0x1FE8, 0x05FC, 0x02A0, 0x1F8C,
+ 0x1FF4, 0x1FD2, 0x05D7, 0x02DC, 0x1F87,
+ 0x1FF7, 0x1FBF, 0x05AF, 0x0319, 0x1F82,
+ 0x1FFA, 0x1FAF, 0x0583, 0x0356, 0x1F7E,
+ 0x1FFC, 0x1FA1, 0x0554, 0x0393, 0x1F7C,
+ 0x1FFE, 0x1F95, 0x0523, 0x03CF, 0x1F7B,
+ 0x0000, 0x1F8C, 0x04EE, 0x040B, 0x1F7B,
+ 0x0001, 0x1F85, 0x04B8, 0x0446, 0x1F7C,
+ 0x1F80, 0x0480, 0x0480, 0x1F80, 0x0000,
+ 0x1F7C, 0x0446, 0x04B8, 0x1F85, 0x0001,
+ 0x1F7B, 0x040B, 0x04EE, 0x1F8C, 0x0000,
+ 0x1F7B, 0x03CF, 0x0523, 0x1F95, 0x1FFE,
+ 0x1F7C, 0x0393, 0x0554, 0x1FA1, 0x1FFC,
+ 0x1F7E, 0x0356, 0x0583, 0x1FAF, 0x1FFA,
+ 0x1F82, 0x0319, 0x05AF, 0x1FBF, 0x1FF7,
+ 0x1F87, 0x02DC, 0x05D7, 0x1FD2, 0x1FF4,
+ 0x1F8C, 0x02A0, 0x05FC, 0x1FE8, 0x1FF0,
+ 0x1F92, 0x0265, 0x061D, 0x0000, 0x1FEC,
+ 0x1F98, 0x022C, 0x0639, 0x001B, 0x1FE8,
+ 0x1F9F, 0x01F3, 0x0652, 0x0039, 0x1FE3,
+ 0x1FA6, 0x01BD, 0x0666, 0x005A, 0x1FDD,
+ 0x1FAE, 0x0188, 0x0676, 0x007D, 0x1FD7,
+ 0x1FB5, 0x0156, 0x0681, 0x00A3, 0x1FD1,
+ 0x1FBC, 0x0125, 0x0689, 0x00CC, 0x1FCA,
+ /* Chroma */
+ 0x1FC3, 0x00F8, 0x068A, 0x00F8, 0x1FC3,
+ 0x1FCA, 0x00CC, 0x0689, 0x0125, 0x1FBC,
+ 0x1FD1, 0x00A3, 0x0681, 0x0156, 0x1FB5,
+ 0x1FD7, 0x007D, 0x0676, 0x0188, 0x1FAE,
+ 0x1FDD, 0x005A, 0x0666, 0x01BD, 0x1FA6,
+ 0x1FE3, 0x0039, 0x0652, 0x01F3, 0x1F9F,
+ 0x1FE8, 0x001B, 0x0639, 0x022C, 0x1F98,
+ 0x1FEC, 0x0000, 0x061D, 0x0265, 0x1F92,
+ 0x1FF0, 0x1FE8, 0x05FC, 0x02A0, 0x1F8C,
+ 0x1FF4, 0x1FD2, 0x05D7, 0x02DC, 0x1F87,
+ 0x1FF7, 0x1FBF, 0x05AF, 0x0319, 0x1F82,
+ 0x1FFA, 0x1FAF, 0x0583, 0x0356, 0x1F7E,
+ 0x1FFC, 0x1FA1, 0x0554, 0x0393, 0x1F7C,
+ 0x1FFE, 0x1F95, 0x0523, 0x03CF, 0x1F7B,
+ 0x0000, 0x1F8C, 0x04EE, 0x040B, 0x1F7B,
+ 0x0001, 0x1F85, 0x04B8, 0x0446, 0x1F7C,
+ 0x1F80, 0x0480, 0x0480, 0x1F80, 0x0000,
+ 0x1F7C, 0x0446, 0x04B8, 0x1F85, 0x0001,
+ 0x1F7B, 0x040B, 0x04EE, 0x1F8C, 0x0000,
+ 0x1F7B, 0x03CF, 0x0523, 0x1F95, 0x1FFE,
+ 0x1F7C, 0x0393, 0x0554, 0x1FA1, 0x1FFC,
+ 0x1F7E, 0x0356, 0x0583, 0x1FAF, 0x1FFA,
+ 0x1F82, 0x0319, 0x05AF, 0x1FBF, 0x1FF7,
+ 0x1F87, 0x02DC, 0x05D7, 0x1FD2, 0x1FF4,
+ 0x1F8C, 0x02A0, 0x05FC, 0x1FE8, 0x1FF0,
+ 0x1F92, 0x0265, 0x061D, 0x0000, 0x1FEC,
+ 0x1F98, 0x022C, 0x0639, 0x001B, 0x1FE8,
+ 0x1F9F, 0x01F3, 0x0652, 0x0039, 0x1FE3,
+ 0x1FA6, 0x01BD, 0x0666, 0x005A, 0x1FDD,
+ 0x1FAE, 0x0188, 0x0676, 0x007D, 0x1FD7,
+ 0x1FB5, 0x0156, 0x0681, 0x00A3, 0x1FD1,
+ 0x1FBC, 0x0125, 0x0689, 0x00CC, 0x1FCA,
+ },
+ [VS_1_TO_1_SCALE] = {
+ /* Luma */
+ 0x0000, 0x0000, 0x0800, 0x0000, 0x0000,
+ 0x1FD8, 0x0085, 0x06F9, 0x00E1, 0x1FC9,
+ 0x1FDF, 0x005B, 0x06F2, 0x0114, 0x1FC0,
+ 0x1FE5, 0x0035, 0x06E5, 0x014A, 0x1FB7,
+ 0x1FEB, 0x0012, 0x06D3, 0x0182, 0x1FAE,
+ 0x1FF1, 0x1FF3, 0x06BA, 0x01BD, 0x1FA5,
+ 0x1FF5, 0x1FD7, 0x069D, 0x01FB, 0x1F9C,
+ 0x1FF9, 0x1FBE, 0x067C, 0x023A, 0x1F93,
+ 0x1FFD, 0x1FA8, 0x0656, 0x027B, 0x1F8A,
+ 0x0000, 0x1F95, 0x062B, 0x02BF, 0x1F81,
+ 0x0002, 0x1F86, 0x05FC, 0x0303, 0x1F79,
+ 0x0004, 0x1F79, 0x05CA, 0x0347, 0x1F72,
+ 0x0005, 0x1F6F, 0x0594, 0x038D, 0x1F6B,
+ 0x0006, 0x1F67, 0x055B, 0x03D2, 0x1F66,
+ 0x0007, 0x1F62, 0x051E, 0x0417, 0x1F62,
+ 0x0007, 0x1F5F, 0x04DF, 0x045C, 0x1F5F,
+ 0x1F5E, 0x04A2, 0x04A2, 0x1F5E, 0x0000,
+ 0x1F5F, 0x045C, 0x04DF, 0x1F5F, 0x0007,
+ 0x1F62, 0x0417, 0x051E, 0x1F62, 0x0007,
+ 0x1F66, 0x03D2, 0x055B, 0x1F67, 0x0006,
+ 0x1F6B, 0x038D, 0x0594, 0x1F6F, 0x0005,
+ 0x1F72, 0x0347, 0x05CA, 0x1F79, 0x0004,
+ 0x1F79, 0x0303, 0x05FC, 0x1F86, 0x0002,
+ 0x1F81, 0x02BF, 0x062B, 0x1F95, 0x0000,
+ 0x1F8A, 0x027B, 0x0656, 0x1FA8, 0x1FFD,
+ 0x1F93, 0x023A, 0x067C, 0x1FBE, 0x1FF9,
+ 0x1F9C, 0x01FB, 0x069D, 0x1FD7, 0x1FF5,
+ 0x1FA5, 0x01BD, 0x06BA, 0x1FF3, 0x1FF1,
+ 0x1FAE, 0x0182, 0x06D3, 0x0012, 0x1FEB,
+ 0x1FB7, 0x014A, 0x06E5, 0x0035, 0x1FE5,
+ 0x1FC0, 0x0114, 0x06F2, 0x005B, 0x1FDF,
+ 0x1FC9, 0x00E1, 0x06F9, 0x0085, 0x1FD8,
+ /* Chroma */
+ 0x0000, 0x0000, 0x0800, 0x0000, 0x0000,
+ 0x1FD8, 0x0085, 0x06F9, 0x00E1, 0x1FC9,
+ 0x1FDF, 0x005B, 0x06F2, 0x0114, 0x1FC0,
+ 0x1FE5, 0x0035, 0x06E5, 0x014A, 0x1FB7,
+ 0x1FEB, 0x0012, 0x06D3, 0x0182, 0x1FAE,
+ 0x1FF1, 0x1FF3, 0x06BA, 0x01BD, 0x1FA5,
+ 0x1FF5, 0x1FD7, 0x069D, 0x01FB, 0x1F9C,
+ 0x1FF9, 0x1FBE, 0x067C, 0x023A, 0x1F93,
+ 0x1FFD, 0x1FA8, 0x0656, 0x027B, 0x1F8A,
+ 0x0000, 0x1F95, 0x062B, 0x02BF, 0x1F81,
+ 0x0002, 0x1F86, 0x05FC, 0x0303, 0x1F79,
+ 0x0004, 0x1F79, 0x05CA, 0x0347, 0x1F72,
+ 0x0005, 0x1F6F, 0x0594, 0x038D, 0x1F6B,
+ 0x0006, 0x1F67, 0x055B, 0x03D2, 0x1F66,
+ 0x0007, 0x1F62, 0x051E, 0x0417, 0x1F62,
+ 0x0007, 0x1F5F, 0x04DF, 0x045C, 0x1F5F,
+ 0x1F5E, 0x04A2, 0x04A2, 0x1F5E, 0x0000,
+ 0x1F5F, 0x045C, 0x04DF, 0x1F5F, 0x0007,
+ 0x1F62, 0x0417, 0x051E, 0x1F62, 0x0007,
+ 0x1F66, 0x03D2, 0x055B, 0x1F67, 0x0006,
+ 0x1F6B, 0x038D, 0x0594, 0x1F6F, 0x0005,
+ 0x1F72, 0x0347, 0x05CA, 0x1F79, 0x0004,
+ 0x1F79, 0x0303, 0x05FC, 0x1F86, 0x0002,
+ 0x1F81, 0x02BF, 0x062B, 0x1F95, 0x0000,
+ 0x1F8A, 0x027B, 0x0656, 0x1FA8, 0x1FFD,
+ 0x1F93, 0x023A, 0x067C, 0x1FBE, 0x1FF9,
+ 0x1F9C, 0x01FB, 0x069D, 0x1FD7, 0x1FF5,
+ 0x1FA5, 0x01BD, 0x06BA, 0x1FF3, 0x1FF1,
+ 0x1FAE, 0x0182, 0x06D3, 0x0012, 0x1FEB,
+ 0x1FB7, 0x014A, 0x06E5, 0x0035, 0x1FE5,
+ 0x1FC0, 0x0114, 0x06F2, 0x005B, 0x1FDF,
+ 0x1FC9, 0x00E1, 0x06F9, 0x0085, 0x1FD8,
+ },
+};
+#endif
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
index fcbe48a09cf8..e8175e7938ed 100644
--- a/drivers/media/platform/ti-vpe/vpdma.c
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -30,38 +30,47 @@
const struct vpdma_data_format vpdma_yuv_fmts[] = {
[VPDMA_DATA_FMT_Y444] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_Y444,
.depth = 8,
},
[VPDMA_DATA_FMT_Y422] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_Y422,
.depth = 8,
},
[VPDMA_DATA_FMT_Y420] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_Y420,
.depth = 8,
},
[VPDMA_DATA_FMT_C444] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_C444,
.depth = 8,
},
[VPDMA_DATA_FMT_C422] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_C422,
.depth = 8,
},
[VPDMA_DATA_FMT_C420] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_C420,
.depth = 4,
},
[VPDMA_DATA_FMT_YC422] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_YC422,
.depth = 16,
},
[VPDMA_DATA_FMT_YC444] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_YC444,
.depth = 24,
},
[VPDMA_DATA_FMT_CY422] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_CY422,
.depth = 16,
},
@@ -69,82 +78,102 @@ const struct vpdma_data_format vpdma_yuv_fmts[] = {
const struct vpdma_data_format vpdma_rgb_fmts[] = {
[VPDMA_DATA_FMT_RGB565] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_RGB16_565,
.depth = 16,
},
[VPDMA_DATA_FMT_ARGB16_1555] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_ARGB_1555,
.depth = 16,
},
[VPDMA_DATA_FMT_ARGB16] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_ARGB_4444,
.depth = 16,
},
[VPDMA_DATA_FMT_RGBA16_5551] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_RGBA_5551,
.depth = 16,
},
[VPDMA_DATA_FMT_RGBA16] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_RGBA_4444,
.depth = 16,
},
[VPDMA_DATA_FMT_ARGB24] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_ARGB24_6666,
.depth = 24,
},
[VPDMA_DATA_FMT_RGB24] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_RGB24_888,
.depth = 24,
},
[VPDMA_DATA_FMT_ARGB32] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_ARGB32_8888,
.depth = 32,
},
[VPDMA_DATA_FMT_RGBA24] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_RGBA24_6666,
.depth = 24,
},
[VPDMA_DATA_FMT_RGBA32] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_RGBA32_8888,
.depth = 32,
},
[VPDMA_DATA_FMT_BGR565] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_BGR16_565,
.depth = 16,
},
[VPDMA_DATA_FMT_ABGR16_1555] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_ABGR_1555,
.depth = 16,
},
[VPDMA_DATA_FMT_ABGR16] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_ABGR_4444,
.depth = 16,
},
[VPDMA_DATA_FMT_BGRA16_5551] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_BGRA_5551,
.depth = 16,
},
[VPDMA_DATA_FMT_BGRA16] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_BGRA_4444,
.depth = 16,
},
[VPDMA_DATA_FMT_ABGR24] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_ABGR24_6666,
.depth = 24,
},
[VPDMA_DATA_FMT_BGR24] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_BGR24_888,
.depth = 24,
},
[VPDMA_DATA_FMT_ABGR32] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_ABGR32_8888,
.depth = 32,
},
[VPDMA_DATA_FMT_BGRA24] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_BGRA24_6666,
.depth = 24,
},
[VPDMA_DATA_FMT_BGRA32] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_BGRA32_8888,
.depth = 32,
},
@@ -152,6 +181,7 @@ const struct vpdma_data_format vpdma_rgb_fmts[] = {
const struct vpdma_data_format vpdma_misc_fmts[] = {
[VPDMA_DATA_FMT_MV] = {
+ .type = VPDMA_DATA_FMT_TYPE_MISC,
.data_type = DATA_TYPE_MV,
.depth = 4,
},
@@ -599,10 +629,11 @@ void vpdma_add_out_dtd(struct vpdma_desc_list *list, struct v4l2_rect *c_rect,
channel = next_chan = chan_info[chan].num;
- if (fmt->data_type == DATA_TYPE_C420)
+ if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
+ fmt->data_type == DATA_TYPE_C420)
depth = 8;
- stride = (depth * c_rect->width) >> 3;
+ stride = ALIGN((depth * c_rect->width) >> 3, VPDMA_STRIDE_ALIGN);
dma_addr += (c_rect->left * depth) >> 3;
dtd = list->next;
@@ -649,13 +680,14 @@ void vpdma_add_in_dtd(struct vpdma_desc_list *list, int frame_width,
channel = next_chan = chan_info[chan].num;
- if (fmt->data_type == DATA_TYPE_C420) {
+ if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
+ fmt->data_type == DATA_TYPE_C420) {
height >>= 1;
frame_height >>= 1;
depth = 8;
}
- stride = (depth * c_rect->width) >> 3;
+ stride = ALIGN((depth * c_rect->width) >> 3, VPDMA_STRIDE_ALIGN);
dma_addr += (c_rect->left * depth) >> 3;
dtd = list->next;
diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h
index eaa2a71a5db9..cf40f11b3c8f 100644
--- a/drivers/media/platform/ti-vpe/vpdma.h
+++ b/drivers/media/platform/ti-vpe/vpdma.h
@@ -39,13 +39,23 @@ struct vpdma_data {
bool ready;
};
+enum vpdma_data_format_type {
+ VPDMA_DATA_FMT_TYPE_YUV,
+ VPDMA_DATA_FMT_TYPE_RGB,
+ VPDMA_DATA_FMT_TYPE_MISC,
+};
+
struct vpdma_data_format {
+ enum vpdma_data_format_type type;
int data_type;
u8 depth;
};
#define VPDMA_DESC_ALIGN 16 /* 16-byte descriptor alignment */
-
+#define VPDMA_STRIDE_ALIGN 16 /*
+ * line stride of source and dest
+ * buffers should be 16 byte aligned
+ */
#define VPDMA_DTD_DESC_SIZE 32 /* 8 words */
#define VPDMA_CFD_CTD_DESC_SIZE 16 /* 4 words */
diff --git a/drivers/media/platform/ti-vpe/vpdma_priv.h b/drivers/media/platform/ti-vpe/vpdma_priv.h
index f0e9a8038c1b..c1a6ce1884f3 100644
--- a/drivers/media/platform/ti-vpe/vpdma_priv.h
+++ b/drivers/media/platform/ti-vpe/vpdma_priv.h
@@ -78,7 +78,7 @@
#define DATA_TYPE_C420 0x6
#define DATA_TYPE_YC422 0x7
#define DATA_TYPE_YC444 0x8
-#define DATA_TYPE_CY422 0x23
+#define DATA_TYPE_CY422 0x27
#define DATA_TYPE_RGB16_565 0x0
#define DATA_TYPE_ARGB_1555 0x1
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 4e58069e24ff..1296c5386231 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -30,6 +30,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
+#include <linux/log2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
@@ -42,6 +43,8 @@
#include "vpdma.h"
#include "vpe_regs.h"
+#include "sc.h"
+#include "csc.h"
#define VPE_MODULE_NAME "vpe"
@@ -54,10 +57,6 @@
/* required alignments */
#define S_ALIGN 0 /* multiple of 1 */
#define H_ALIGN 1 /* multiple of 2 */
-#define W_ALIGN 1 /* multiple of 2 */
-
-/* multiple of 128 bits, line stride, 16 bytes */
-#define L_ALIGN 4
/* flags that indicate a format can be used for capture/output */
#define VPE_FMT_TYPE_CAPTURE (1 << 0)
@@ -268,6 +267,38 @@ static struct vpe_fmt vpe_formats[] = {
.vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CY422],
},
},
+ {
+ .name = "RGB888 packed",
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .types = VPE_FMT_TYPE_CAPTURE,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB24],
+ },
+ },
+ {
+ .name = "ARGB32",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .types = VPE_FMT_TYPE_CAPTURE,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ARGB32],
+ },
+ },
+ {
+ .name = "BGR888 packed",
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .types = VPE_FMT_TYPE_CAPTURE,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_BGR24],
+ },
+ },
+ {
+ .name = "ABGR32",
+ .fourcc = V4L2_PIX_FMT_BGR32,
+ .types = VPE_FMT_TYPE_CAPTURE,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ABGR32],
+ },
+ },
};
/*
@@ -327,9 +358,12 @@ struct vpe_dev {
int irq;
void __iomem *base;
+ struct resource *res;
struct vb2_alloc_ctx *alloc_ctx;
struct vpdma_data *vpdma; /* vpdma data handle */
+ struct sc_data *sc; /* scaler data handle */
+ struct csc_data *csc; /* csc data handle */
};
/*
@@ -356,6 +390,8 @@ struct vpe_ctx {
void *mv_buf[2]; /* virtual addrs of motion vector bufs */
size_t mv_buf_size; /* current motion vector buffer size */
struct vpdma_buf mmr_adb; /* shadow reg addr/data block */
+ struct vpdma_buf sc_coeff_h; /* h coeff buffer */
+ struct vpdma_buf sc_coeff_v; /* v coeff buffer */
struct vpdma_desc_list desc_list; /* DMA descriptor list */
bool deinterlacing; /* using de-interlacer */
@@ -438,14 +474,23 @@ struct vpe_mmr_adb {
u32 us3_regs[8];
struct vpdma_adb_hdr dei_hdr;
u32 dei_regs[8];
- struct vpdma_adb_hdr sc_hdr;
- u32 sc_regs[1];
- u32 sc_pad[3];
+ struct vpdma_adb_hdr sc_hdr0;
+ u32 sc_regs0[7];
+ u32 sc_pad0[1];
+ struct vpdma_adb_hdr sc_hdr8;
+ u32 sc_regs8[6];
+ u32 sc_pad8[2];
+ struct vpdma_adb_hdr sc_hdr17;
+ u32 sc_regs17[9];
+ u32 sc_pad17[3];
struct vpdma_adb_hdr csc_hdr;
u32 csc_regs[6];
u32 csc_pad[2];
};
+#define GET_OFFSET_TOP(ctx, obj, reg) \
+ ((obj)->res->start - ctx->dev->res->start + reg)
+
#define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a) \
VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a)
/*
@@ -458,8 +503,14 @@ static void init_adb_hdrs(struct vpe_ctx *ctx)
VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0);
VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0);
VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE);
- VPE_SET_MMR_ADB_HDR(ctx, sc_hdr, sc_regs, VPE_SC_MP_SC0);
- VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs, VPE_CSC_CSC00);
+ VPE_SET_MMR_ADB_HDR(ctx, sc_hdr0, sc_regs0,
+ GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC0));
+ VPE_SET_MMR_ADB_HDR(ctx, sc_hdr8, sc_regs8,
+ GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC8));
+ VPE_SET_MMR_ADB_HDR(ctx, sc_hdr17, sc_regs17,
+ GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC17));
+ VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs,
+ GET_OFFSET_TOP(ctx, ctx->dev->csc, CSC_CSC00));
};
/*
@@ -670,17 +721,20 @@ static void set_src_registers(struct vpe_ctx *ctx)
static void set_dst_registers(struct vpe_ctx *ctx)
{
struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ enum v4l2_colorspace clrspc = ctx->q_data[Q_DATA_DST].colorspace;
struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
u32 val = 0;
- /* select RGB path when color space conversion is supported in future */
- if (fmt->fourcc == V4L2_PIX_FMT_RGB24)
- val |= VPE_RGB_OUT_SELECT | VPE_CSC_SRC_DEI_SCALER;
+ if (clrspc == V4L2_COLORSPACE_SRGB)
+ val |= VPE_RGB_OUT_SELECT;
else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
val |= VPE_COLOR_SEPARATE_422;
- /* The source of CHR_DS is always the scaler, whether it's used or not */
- val |= VPE_DS_SRC_DEI_SCALER;
+ /*
+ * the source of CHR_DS and CSC is always the scaler, irrespective of
+ * whether it's used or not
+ */
+ val |= VPE_DS_SRC_DEI_SCALER | VPE_CSC_SRC_DEI_SCALER;
if (fmt->fourcc != V4L2_PIX_FMT_NV12)
val |= VPE_DS_BYPASS;
@@ -742,28 +796,6 @@ static void set_dei_shadow_registers(struct vpe_ctx *ctx)
ctx->load_mmrs = true;
}
-static void set_csc_coeff_bypass(struct vpe_ctx *ctx)
-{
- struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
- u32 *shadow_csc_reg5 = &mmr_adb->csc_regs[5];
-
- *shadow_csc_reg5 |= VPE_CSC_BYPASS;
-
- ctx->load_mmrs = true;
-}
-
-static void set_sc_regs_bypass(struct vpe_ctx *ctx)
-{
- struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
- u32 *sc_reg0 = &mmr_adb->sc_regs[0];
- u32 val = 0;
-
- val |= VPE_SC_BYPASS;
- *sc_reg0 = val;
-
- ctx->load_mmrs = true;
-}
-
/*
* Set the shadow registers whose values are modified when either the
* source or destination format is changed.
@@ -772,6 +804,11 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
{
struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ unsigned int src_w = s_q_data->c_rect.width;
+ unsigned int src_h = s_q_data->c_rect.height;
+ unsigned int dst_w = d_q_data->c_rect.width;
+ unsigned int dst_h = d_q_data->c_rect.height;
size_t mv_buf_size;
int ret;
@@ -780,12 +817,23 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
if ((s_q_data->flags & Q_DATA_INTERLACED) &&
!(d_q_data->flags & Q_DATA_INTERLACED)) {
+ int bytes_per_line;
const struct vpdma_data_format *mv =
&vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
+ /*
+ * we make sure that the source image has a 16 byte aligned
+ * stride, we need to do the same for the motion vector buffer
+ * by aligning it's stride to the next 16 byte boundry. this
+ * extra space will not be used by the de-interlacer, but will
+ * ensure that vpdma operates correctly
+ */
+ bytes_per_line = ALIGN((s_q_data->width * mv->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
+ mv_buf_size = bytes_per_line * s_q_data->height;
+
ctx->deinterlacing = 1;
- mv_buf_size =
- (s_q_data->width * s_q_data->height * mv->depth) >> 3;
+ src_h <<= 1;
} else {
ctx->deinterlacing = 0;
mv_buf_size = 0;
@@ -799,8 +847,16 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
set_cfg_and_line_modes(ctx);
set_dei_regs(ctx);
- set_csc_coeff_bypass(ctx);
- set_sc_regs_bypass(ctx);
+
+ csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0],
+ s_q_data->colorspace, d_q_data->colorspace);
+
+ sc_set_hs_coeffs(ctx->dev->sc, ctx->sc_coeff_h.addr, src_w, dst_w);
+ sc_set_vs_coeffs(ctx->dev->sc, ctx->sc_coeff_v.addr, src_h, dst_h);
+
+ sc_config_scaler(ctx->dev->sc, &mmr_adb->sc_regs0[0],
+ &mmr_adb->sc_regs8[0], &mmr_adb->sc_regs17[0],
+ src_w, src_h, dst_w, dst_h);
return 0;
}
@@ -916,35 +972,10 @@ static void vpe_dump_regs(struct vpe_dev *dev)
DUMPREG(DEI_FMD_STATUS_R0);
DUMPREG(DEI_FMD_STATUS_R1);
DUMPREG(DEI_FMD_STATUS_R2);
- DUMPREG(SC_MP_SC0);
- DUMPREG(SC_MP_SC1);
- DUMPREG(SC_MP_SC2);
- DUMPREG(SC_MP_SC3);
- DUMPREG(SC_MP_SC4);
- DUMPREG(SC_MP_SC5);
- DUMPREG(SC_MP_SC6);
- DUMPREG(SC_MP_SC8);
- DUMPREG(SC_MP_SC9);
- DUMPREG(SC_MP_SC10);
- DUMPREG(SC_MP_SC11);
- DUMPREG(SC_MP_SC12);
- DUMPREG(SC_MP_SC13);
- DUMPREG(SC_MP_SC17);
- DUMPREG(SC_MP_SC18);
- DUMPREG(SC_MP_SC19);
- DUMPREG(SC_MP_SC20);
- DUMPREG(SC_MP_SC21);
- DUMPREG(SC_MP_SC22);
- DUMPREG(SC_MP_SC23);
- DUMPREG(SC_MP_SC24);
- DUMPREG(SC_MP_SC25);
- DUMPREG(CSC_CSC00);
- DUMPREG(CSC_CSC01);
- DUMPREG(CSC_CSC02);
- DUMPREG(CSC_CSC03);
- DUMPREG(CSC_CSC04);
- DUMPREG(CSC_CSC05);
#undef DUMPREG
+
+ sc_dump_regs(dev->sc);
+ csc_dump_regs(dev->csc);
}
static void add_out_dtd(struct vpe_ctx *ctx, int port)
@@ -1053,6 +1084,7 @@ static void disable_irqs(struct vpe_ctx *ctx)
static void device_run(void *priv)
{
struct vpe_ctx *ctx = priv;
+ struct sc_data *sc = ctx->dev->sc;
struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) {
@@ -1075,13 +1107,37 @@ static void device_run(void *priv)
ctx->load_mmrs = false;
}
+ if (sc->loaded_coeff_h != ctx->sc_coeff_h.dma_addr ||
+ sc->load_coeff_h) {
+ vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_h);
+ vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
+ &ctx->sc_coeff_h, 0);
+
+ sc->loaded_coeff_h = ctx->sc_coeff_h.dma_addr;
+ sc->load_coeff_h = false;
+ }
+
+ if (sc->loaded_coeff_v != ctx->sc_coeff_v.dma_addr ||
+ sc->load_coeff_v) {
+ vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_v);
+ vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
+ &ctx->sc_coeff_v, SC_COEF_SRAM_SIZE >> 4);
+
+ sc->loaded_coeff_v = ctx->sc_coeff_v.dma_addr;
+ sc->load_coeff_v = false;
+ }
+
/* output data descriptors */
if (ctx->deinterlacing)
add_out_dtd(ctx, VPE_PORT_MV_OUT);
- add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
- if (d_q_data->fmt->coplanar)
- add_out_dtd(ctx, VPE_PORT_CHROMA_OUT);
+ if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
+ add_out_dtd(ctx, VPE_PORT_RGB_OUT);
+ } else {
+ add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
+ if (d_q_data->fmt->coplanar)
+ add_out_dtd(ctx, VPE_PORT_CHROMA_OUT);
+ }
/* input data descriptors */
if (ctx->deinterlacing) {
@@ -1117,9 +1173,16 @@ static void device_run(void *priv)
}
/* sync on channel control descriptors for output ports */
- vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA_OUT);
- if (d_q_data->fmt->coplanar)
- vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA_OUT);
+ if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_RGB_OUT);
+ } else {
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_LUMA_OUT);
+ if (d_q_data->fmt->coplanar)
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_CHROMA_OUT);
+ }
if (ctx->deinterlacing)
vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT);
@@ -1198,6 +1261,8 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
vpdma_reset_desc_list(&ctx->desc_list);
@@ -1352,7 +1417,8 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
{
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
struct v4l2_plane_pix_format *plane_fmt;
- int i;
+ unsigned int w_align;
+ int i, depth, depth_bytes;
if (!fmt || !(fmt->types & type)) {
vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
@@ -1363,35 +1429,57 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE)
pix->field = V4L2_FIELD_NONE;
- v4l_bound_align_image(&pix->width, MIN_W, MAX_W, W_ALIGN,
+ depth = fmt->vpdma_fmt[VPE_LUMA]->depth;
+
+ /*
+ * the line stride should 16 byte aligned for VPDMA to work, based on
+ * the bytes per pixel, figure out how much the width should be aligned
+ * to make sure line stride is 16 byte aligned
+ */
+ depth_bytes = depth >> 3;
+
+ if (depth_bytes == 3)
+ /*
+ * if bpp is 3(as in some RGB formats), the pixel width doesn't
+ * really help in ensuring line stride is 16 byte aligned
+ */
+ w_align = 4;
+ else
+ /*
+ * for the remainder bpp(4, 2 and 1), the pixel width alignment
+ * can ensure a line stride alignment of 16 bytes. For example,
+ * if bpp is 2, then the line stride can be 16 byte aligned if
+ * the width is 8 byte aligned
+ */
+ w_align = order_base_2(VPDMA_DESC_ALIGN / depth_bytes);
+
+ v4l_bound_align_image(&pix->width, MIN_W, MAX_W, w_align,
&pix->height, MIN_H, MAX_H, H_ALIGN,
S_ALIGN);
pix->num_planes = fmt->coplanar ? 2 : 1;
pix->pixelformat = fmt->fourcc;
- if (type == VPE_FMT_TYPE_CAPTURE) {
- struct vpe_q_data *s_q_data;
-
- /* get colorspace from the source queue */
- s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
-
- pix->colorspace = s_q_data->colorspace;
- } else {
- if (!pix->colorspace)
- pix->colorspace = V4L2_COLORSPACE_SMPTE240M;
+ if (!pix->colorspace) {
+ if (fmt->fourcc == V4L2_PIX_FMT_RGB24 ||
+ fmt->fourcc == V4L2_PIX_FMT_BGR24 ||
+ fmt->fourcc == V4L2_PIX_FMT_RGB32 ||
+ fmt->fourcc == V4L2_PIX_FMT_BGR32) {
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ } else {
+ if (pix->height > 1280) /* HD */
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ else /* SD */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ }
}
for (i = 0; i < pix->num_planes; i++) {
- int depth;
-
plane_fmt = &pix->plane_fmt[i];
depth = fmt->vpdma_fmt[i]->depth;
if (i == VPE_LUMA)
- plane_fmt->bytesperline =
- round_up((pix->width * depth) >> 3,
- 1 << L_ALIGN);
+ plane_fmt->bytesperline = (pix->width * depth) >> 3;
else
plane_fmt->bytesperline = pix->width;
@@ -1749,6 +1837,14 @@ static int vpe_open(struct file *file)
if (ret != 0)
goto free_desc_list;
+ ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_h, SC_COEF_SRAM_SIZE);
+ if (ret != 0)
+ goto free_mmr_adb;
+
+ ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_v, SC_COEF_SRAM_SIZE);
+ if (ret != 0)
+ goto free_sc_h;
+
init_adb_hdrs(ctx);
v4l2_fh_init(&ctx->fh, video_devdata(file));
@@ -1770,7 +1866,7 @@ static int vpe_open(struct file *file)
s_q_data->height = 1080;
s_q_data->sizeimage[VPE_LUMA] = (s_q_data->width * s_q_data->height *
s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
- s_q_data->colorspace = V4L2_COLORSPACE_SMPTE240M;
+ s_q_data->colorspace = V4L2_COLORSPACE_SMPTE170M;
s_q_data->field = V4L2_FIELD_NONE;
s_q_data->c_rect.left = 0;
s_q_data->c_rect.top = 0;
@@ -1817,6 +1913,10 @@ static int vpe_open(struct file *file)
exit_fh:
v4l2_ctrl_handler_free(hdl);
v4l2_fh_exit(&ctx->fh);
+ vpdma_free_desc_buf(&ctx->sc_coeff_v);
+free_sc_h:
+ vpdma_free_desc_buf(&ctx->sc_coeff_h);
+free_mmr_adb:
vpdma_free_desc_buf(&ctx->mmr_adb);
free_desc_list:
vpdma_free_desc_list(&ctx->desc_list);
@@ -1938,12 +2038,11 @@ static int vpe_probe(struct platform_device *pdev)
{
struct vpe_dev *dev;
struct video_device *vfd;
- struct resource *res;
int ret, irq, func;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ if (!dev)
+ return -ENOMEM;
spin_lock_init(&dev->lock);
@@ -1954,16 +2053,17 @@ static int vpe_probe(struct platform_device *pdev)
atomic_set(&dev->num_instances, 0);
mutex_init(&dev->dev_mutex);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpe_top");
+ dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "vpe_top");
/*
* HACK: we get resource info from device tree in the form of a list of
* VPE sub blocks, the driver currently uses only the base of vpe_top
* for register access, the driver should be changed later to access
* registers based on the sub block base addresses
*/
- dev->base = devm_ioremap(&pdev->dev, res->start, SZ_32K);
- if (IS_ERR(dev->base)) {
- ret = PTR_ERR(dev->base);
+ dev->base = devm_ioremap(&pdev->dev, dev->res->start, SZ_32K);
+ if (!dev->base) {
+ ret = -ENOMEM;
goto v4l2_dev_unreg;
}
@@ -2006,9 +2106,23 @@ static int vpe_probe(struct platform_device *pdev)
vpe_top_vpdma_reset(dev);
+ dev->sc = sc_create(pdev);
+ if (IS_ERR(dev->sc)) {
+ ret = PTR_ERR(dev->sc);
+ goto runtime_put;
+ }
+
+ dev->csc = csc_create(pdev);
+ if (IS_ERR(dev->csc)) {
+ ret = PTR_ERR(dev->csc);
+ goto runtime_put;
+ }
+
dev->vpdma = vpdma_create(pdev);
- if (IS_ERR(dev->vpdma))
+ if (IS_ERR(dev->vpdma)) {
+ ret = PTR_ERR(dev->vpdma);
goto runtime_put;
+ }
vfd = &dev->vfd;
*vfd = vpe_videodev;
@@ -2081,18 +2195,7 @@ static struct platform_driver vpe_pdrv = {
},
};
-static void __exit vpe_exit(void)
-{
- platform_driver_unregister(&vpe_pdrv);
-}
-
-static int __init vpe_init(void)
-{
- return platform_driver_register(&vpe_pdrv);
-}
-
-module_init(vpe_init);
-module_exit(vpe_exit);
+module_platform_driver(vpe_pdrv);
MODULE_DESCRIPTION("TI VPE driver");
MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>");
diff --git a/drivers/media/platform/ti-vpe/vpe_regs.h b/drivers/media/platform/ti-vpe/vpe_regs.h
index ed214e828398..74283d79eae1 100644
--- a/drivers/media/platform/ti-vpe/vpe_regs.h
+++ b/drivers/media/platform/ti-vpe/vpe_regs.h
@@ -306,191 +306,4 @@
#define VPE_FMD_FRAME_DIFF_MASK 0x000fffff
#define VPE_FMD_FRAME_DIFF_SHIFT 0
-/* VPE scaler regs */
-#define VPE_SC_MP_SC0 0x0700
-#define VPE_INTERLACE_O (1 << 0)
-#define VPE_LINEAR (1 << 1)
-#define VPE_SC_BYPASS (1 << 2)
-#define VPE_INVT_FID (1 << 3)
-#define VPE_USE_RAV (1 << 4)
-#define VPE_ENABLE_EV (1 << 5)
-#define VPE_AUTO_HS (1 << 6)
-#define VPE_DCM_2X (1 << 7)
-#define VPE_DCM_4X (1 << 8)
-#define VPE_HP_BYPASS (1 << 9)
-#define VPE_INTERLACE_I (1 << 10)
-#define VPE_ENABLE_SIN2_VER_INTP (1 << 11)
-#define VPE_Y_PK_EN (1 << 14)
-#define VPE_TRIM (1 << 15)
-#define VPE_SELFGEN_FID (1 << 16)
-
-#define VPE_SC_MP_SC1 0x0704
-#define VPE_ROW_ACC_INC_MASK 0x07ffffff
-#define VPE_ROW_ACC_INC_SHIFT 0
-
-#define VPE_SC_MP_SC2 0x0708
-#define VPE_ROW_ACC_OFFSET_MASK 0x0fffffff
-#define VPE_ROW_ACC_OFFSET_SHIFT 0
-
-#define VPE_SC_MP_SC3 0x070c
-#define VPE_ROW_ACC_OFFSET_B_MASK 0x0fffffff
-#define VPE_ROW_ACC_OFFSET_B_SHIFT 0
-
-#define VPE_SC_MP_SC4 0x0710
-#define VPE_TAR_H_MASK 0x07ff
-#define VPE_TAR_H_SHIFT 0
-#define VPE_TAR_W_MASK 0x07ff
-#define VPE_TAR_W_SHIFT 12
-#define VPE_LIN_ACC_INC_U_MASK 0x07
-#define VPE_LIN_ACC_INC_U_SHIFT 24
-#define VPE_NLIN_ACC_INIT_U_MASK 0x07
-#define VPE_NLIN_ACC_INIT_U_SHIFT 28
-
-#define VPE_SC_MP_SC5 0x0714
-#define VPE_SRC_H_MASK 0x07ff
-#define VPE_SRC_H_SHIFT 0
-#define VPE_SRC_W_MASK 0x07ff
-#define VPE_SRC_W_SHIFT 12
-#define VPE_NLIN_ACC_INC_U_MASK 0x07
-#define VPE_NLIN_ACC_INC_U_SHIFT 24
-
-#define VPE_SC_MP_SC6 0x0718
-#define VPE_ROW_ACC_INIT_RAV_MASK 0x03ff
-#define VPE_ROW_ACC_INIT_RAV_SHIFT 0
-#define VPE_ROW_ACC_INIT_RAV_B_MASK 0x03ff
-#define VPE_ROW_ACC_INIT_RAV_B_SHIFT 10
-
-#define VPE_SC_MP_SC8 0x0720
-#define VPE_NLIN_LEFT_MASK 0x07ff
-#define VPE_NLIN_LEFT_SHIFT 0
-#define VPE_NLIN_RIGHT_MASK 0x07ff
-#define VPE_NLIN_RIGHT_SHIFT 12
-
-#define VPE_SC_MP_SC9 0x0724
-#define VPE_LIN_ACC_INC VPE_SC_MP_SC9
-
-#define VPE_SC_MP_SC10 0x0728
-#define VPE_NLIN_ACC_INIT VPE_SC_MP_SC10
-
-#define VPE_SC_MP_SC11 0x072c
-#define VPE_NLIN_ACC_INC VPE_SC_MP_SC11
-
-#define VPE_SC_MP_SC12 0x0730
-#define VPE_COL_ACC_OFFSET_MASK 0x01ffffff
-#define VPE_COL_ACC_OFFSET_SHIFT 0
-
-#define VPE_SC_MP_SC13 0x0734
-#define VPE_SC_FACTOR_RAV_MASK 0x03ff
-#define VPE_SC_FACTOR_RAV_SHIFT 0
-#define VPE_CHROMA_INTP_THR_MASK 0x03ff
-#define VPE_CHROMA_INTP_THR_SHIFT 12
-#define VPE_DELTA_CHROMA_THR_MASK 0x0f
-#define VPE_DELTA_CHROMA_THR_SHIFT 24
-
-#define VPE_SC_MP_SC17 0x0744
-#define VPE_EV_THR_MASK 0x03ff
-#define VPE_EV_THR_SHIFT 12
-#define VPE_DELTA_LUMA_THR_MASK 0x0f
-#define VPE_DELTA_LUMA_THR_SHIFT 24
-#define VPE_DELTA_EV_THR_MASK 0x0f
-#define VPE_DELTA_EV_THR_SHIFT 28
-
-#define VPE_SC_MP_SC18 0x0748
-#define VPE_HS_FACTOR_MASK 0x03ff
-#define VPE_HS_FACTOR_SHIFT 0
-#define VPE_CONF_DEFAULT_MASK 0x01ff
-#define VPE_CONF_DEFAULT_SHIFT 16
-
-#define VPE_SC_MP_SC19 0x074c
-#define VPE_HPF_COEFF0_MASK 0xff
-#define VPE_HPF_COEFF0_SHIFT 0
-#define VPE_HPF_COEFF1_MASK 0xff
-#define VPE_HPF_COEFF1_SHIFT 8
-#define VPE_HPF_COEFF2_MASK 0xff
-#define VPE_HPF_COEFF2_SHIFT 16
-#define VPE_HPF_COEFF3_MASK 0xff
-#define VPE_HPF_COEFF3_SHIFT 23
-
-#define VPE_SC_MP_SC20 0x0750
-#define VPE_HPF_COEFF4_MASK 0xff
-#define VPE_HPF_COEFF4_SHIFT 0
-#define VPE_HPF_COEFF5_MASK 0xff
-#define VPE_HPF_COEFF5_SHIFT 8
-#define VPE_HPF_NORM_SHIFT_MASK 0x07
-#define VPE_HPF_NORM_SHIFT_SHIFT 16
-#define VPE_NL_LIMIT_MASK 0x1ff
-#define VPE_NL_LIMIT_SHIFT 20
-
-#define VPE_SC_MP_SC21 0x0754
-#define VPE_NL_LO_THR_MASK 0x01ff
-#define VPE_NL_LO_THR_SHIFT 0
-#define VPE_NL_LO_SLOPE_MASK 0xff
-#define VPE_NL_LO_SLOPE_SHIFT 16
-
-#define VPE_SC_MP_SC22 0x0758
-#define VPE_NL_HI_THR_MASK 0x01ff
-#define VPE_NL_HI_THR_SHIFT 0
-#define VPE_NL_HI_SLOPE_SH_MASK 0x07
-#define VPE_NL_HI_SLOPE_SH_SHIFT 16
-
-#define VPE_SC_MP_SC23 0x075c
-#define VPE_GRADIENT_THR_MASK 0x07ff
-#define VPE_GRADIENT_THR_SHIFT 0
-#define VPE_GRADIENT_THR_RANGE_MASK 0x0f
-#define VPE_GRADIENT_THR_RANGE_SHIFT 12
-#define VPE_MIN_GY_THR_MASK 0xff
-#define VPE_MIN_GY_THR_SHIFT 16
-#define VPE_MIN_GY_THR_RANGE_MASK 0x0f
-#define VPE_MIN_GY_THR_RANGE_SHIFT 28
-
-#define VPE_SC_MP_SC24 0x0760
-#define VPE_ORG_H_MASK 0x07ff
-#define VPE_ORG_H_SHIFT 0
-#define VPE_ORG_W_MASK 0x07ff
-#define VPE_ORG_W_SHIFT 16
-
-#define VPE_SC_MP_SC25 0x0764
-#define VPE_OFF_H_MASK 0x07ff
-#define VPE_OFF_H_SHIFT 0
-#define VPE_OFF_W_MASK 0x07ff
-#define VPE_OFF_W_SHIFT 16
-
-/* VPE color space converter regs */
-#define VPE_CSC_CSC00 0x5700
-#define VPE_CSC_A0_MASK 0x1fff
-#define VPE_CSC_A0_SHIFT 0
-#define VPE_CSC_B0_MASK 0x1fff
-#define VPE_CSC_B0_SHIFT 16
-
-#define VPE_CSC_CSC01 0x5704
-#define VPE_CSC_C0_MASK 0x1fff
-#define VPE_CSC_C0_SHIFT 0
-#define VPE_CSC_A1_MASK 0x1fff
-#define VPE_CSC_A1_SHIFT 16
-
-#define VPE_CSC_CSC02 0x5708
-#define VPE_CSC_B1_MASK 0x1fff
-#define VPE_CSC_B1_SHIFT 0
-#define VPE_CSC_C1_MASK 0x1fff
-#define VPE_CSC_C1_SHIFT 16
-
-#define VPE_CSC_CSC03 0x570c
-#define VPE_CSC_A2_MASK 0x1fff
-#define VPE_CSC_A2_SHIFT 0
-#define VPE_CSC_B2_MASK 0x1fff
-#define VPE_CSC_B2_SHIFT 16
-
-#define VPE_CSC_CSC04 0x5710
-#define VPE_CSC_C2_MASK 0x1fff
-#define VPE_CSC_C2_SHIFT 0
-#define VPE_CSC_D0_MASK 0x0fff
-#define VPE_CSC_D0_SHIFT 16
-
-#define VPE_CSC_CSC05 0x5714
-#define VPE_CSC_D1_MASK 0x0fff
-#define VPE_CSC_D1_SHIFT 0
-#define VPE_CSC_D2_MASK 0x0fff
-#define VPE_CSC_D2_SHIFT 16
-#define VPE_CSC_BYPASS (1 << 28)
-
#endif
diff --git a/drivers/media/platform/vsp1/Makefile b/drivers/media/platform/vsp1/Makefile
index 4da226169e15..151cecd0ea25 100644
--- a/drivers/media/platform/vsp1/Makefile
+++ b/drivers/media/platform/vsp1/Makefile
@@ -1,5 +1,6 @@
vsp1-y := vsp1_drv.o vsp1_entity.o vsp1_video.o
vsp1-y += vsp1_rpf.o vsp1_rwpf.o vsp1_wpf.o
-vsp1-y += vsp1_lif.o vsp1_uds.o
+vsp1-y += vsp1_hsit.o vsp1_lif.o vsp1_lut.o
+vsp1-y += vsp1_sru.o vsp1_uds.o
obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1.o
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h
index d6c6ecd039ff..94d1b02680c5 100644
--- a/drivers/media/platform/vsp1/vsp1.h
+++ b/drivers/media/platform/vsp1/vsp1.h
@@ -28,8 +28,11 @@ struct clk;
struct device;
struct vsp1_platform_data;
+struct vsp1_hsit;
struct vsp1_lif;
+struct vsp1_lut;
struct vsp1_rwpf;
+struct vsp1_sru;
struct vsp1_uds;
#define VPS1_MAX_RPF 5
@@ -47,8 +50,12 @@ struct vsp1_device {
struct mutex lock;
int ref_count;
+ struct vsp1_hsit *hsi;
+ struct vsp1_hsit *hst;
struct vsp1_lif *lif;
+ struct vsp1_lut *lut;
struct vsp1_rwpf *rpf[VPS1_MAX_RPF];
+ struct vsp1_sru *sru;
struct vsp1_uds *uds[VPS1_MAX_UDS];
struct vsp1_rwpf *wpf[VPS1_MAX_WPF];
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index d16bf0f41e24..0df0a994e575 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -20,8 +20,11 @@
#include <linux/videodev2.h>
#include "vsp1.h"
+#include "vsp1_hsit.h"
#include "vsp1_lif.h"
+#include "vsp1_lut.h"
#include "vsp1_rwpf.h"
+#include "vsp1_sru.h"
#include "vsp1_uds.h"
/* -----------------------------------------------------------------------------
@@ -152,6 +155,22 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
}
/* Instantiate all the entities. */
+ vsp1->hsi = vsp1_hsit_create(vsp1, true);
+ if (IS_ERR(vsp1->hsi)) {
+ ret = PTR_ERR(vsp1->hsi);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->hsi->entity.list_dev, &vsp1->entities);
+
+ vsp1->hst = vsp1_hsit_create(vsp1, false);
+ if (IS_ERR(vsp1->hst)) {
+ ret = PTR_ERR(vsp1->hst);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities);
+
if (vsp1->pdata->features & VSP1_HAS_LIF) {
vsp1->lif = vsp1_lif_create(vsp1);
if (IS_ERR(vsp1->lif)) {
@@ -162,6 +181,16 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->lif->entity.list_dev, &vsp1->entities);
}
+ if (vsp1->pdata->features & VSP1_HAS_LUT) {
+ vsp1->lut = vsp1_lut_create(vsp1);
+ if (IS_ERR(vsp1->lut)) {
+ ret = PTR_ERR(vsp1->lut);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->lut->entity.list_dev, &vsp1->entities);
+ }
+
for (i = 0; i < vsp1->pdata->rpf_count; ++i) {
struct vsp1_rwpf *rpf;
@@ -175,6 +204,16 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&rpf->entity.list_dev, &vsp1->entities);
}
+ if (vsp1->pdata->features & VSP1_HAS_SRU) {
+ vsp1->sru = vsp1_sru_create(vsp1);
+ if (IS_ERR(vsp1->sru)) {
+ ret = PTR_ERR(vsp1->sru);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->sru->entity.list_dev, &vsp1->entities);
+ }
+
for (i = 0; i < vsp1->pdata->uds_count; ++i) {
struct vsp1_uds *uds;
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
index 9028f9d524f4..0226e47df6d9 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -15,6 +15,7 @@
#include <linux/gfp.h>
#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-subdev.h>
#include "vsp1.h"
@@ -122,12 +123,16 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
unsigned int id;
unsigned int reg;
} routes[] = {
+ { VI6_DPR_NODE_HSI, VI6_DPR_HSI_ROUTE },
+ { VI6_DPR_NODE_HST, VI6_DPR_HST_ROUTE },
{ VI6_DPR_NODE_LIF, 0 },
+ { VI6_DPR_NODE_LUT, VI6_DPR_LUT_ROUTE },
{ VI6_DPR_NODE_RPF(0), VI6_DPR_RPF_ROUTE(0) },
{ VI6_DPR_NODE_RPF(1), VI6_DPR_RPF_ROUTE(1) },
{ VI6_DPR_NODE_RPF(2), VI6_DPR_RPF_ROUTE(2) },
{ VI6_DPR_NODE_RPF(3), VI6_DPR_RPF_ROUTE(3) },
{ VI6_DPR_NODE_RPF(4), VI6_DPR_RPF_ROUTE(4) },
+ { VI6_DPR_NODE_SRU, VI6_DPR_SRU_ROUTE },
{ VI6_DPR_NODE_UDS(0), VI6_DPR_UDS_ROUTE(0) },
{ VI6_DPR_NODE_UDS(1), VI6_DPR_UDS_ROUTE(1) },
{ VI6_DPR_NODE_UDS(2), VI6_DPR_UDS_ROUTE(2) },
@@ -177,5 +182,7 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
void vsp1_entity_destroy(struct vsp1_entity *entity)
{
+ if (entity->subdev.ctrl_handler)
+ v4l2_ctrl_handler_free(entity->subdev.ctrl_handler);
media_entity_cleanup(&entity->subdev.entity);
}
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
index c4feab2cbb81..e152798d7f38 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -20,8 +20,12 @@
struct vsp1_device;
enum vsp1_entity_type {
+ VSP1_ENTITY_HSI,
+ VSP1_ENTITY_HST,
VSP1_ENTITY_LIF,
+ VSP1_ENTITY_LUT,
VSP1_ENTITY_RPF,
+ VSP1_ENTITY_SRU,
VSP1_ENTITY_UDS,
VSP1_ENTITY_WPF,
};
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
new file mode 100644
index 000000000000..285485350d82
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -0,0 +1,222 @@
+/*
+ * vsp1_hsit.c -- R-Car VSP1 Hue Saturation value (Inverse) Transform
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_hsit.h"
+
+#define HSIT_MIN_SIZE 4U
+#define HSIT_MAX_SIZE 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_hsit_read(struct vsp1_hsit *hsit, u32 reg)
+{
+ return vsp1_read(hsit->entity.vsp1, reg);
+}
+
+static inline void vsp1_hsit_write(struct vsp1_hsit *hsit, u32 reg, u32 data)
+{
+ vsp1_write(hsit->entity.vsp1, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Core Operations
+ */
+
+static int hsit_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct vsp1_hsit *hsit = to_hsit(subdev);
+
+ if (!enable)
+ return 0;
+
+ if (hsit->inverse)
+ vsp1_hsit_write(hsit, VI6_HSI_CTRL, VI6_HSI_CTRL_EN);
+ else
+ vsp1_hsit_write(hsit, VI6_HST_CTRL, VI6_HST_CTRL_EN);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int hsit_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct vsp1_hsit *hsit = to_hsit(subdev);
+
+ if (code->index > 0)
+ return -EINVAL;
+
+ if ((code->pad == HSIT_PAD_SINK && !hsit->inverse) |
+ (code->pad == HSIT_PAD_SOURCE && hsit->inverse))
+ code->code = V4L2_MBUS_FMT_ARGB8888_1X32;
+ else
+ code->code = V4L2_MBUS_FMT_AHSV8888_1X32;
+
+ return 0;
+}
+
+static int hsit_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(fh, fse->pad);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ if (fse->pad == HSIT_PAD_SINK) {
+ fse->min_width = HSIT_MIN_SIZE;
+ fse->max_width = HSIT_MAX_SIZE;
+ fse->min_height = HSIT_MIN_SIZE;
+ fse->max_height = HSIT_MAX_SIZE;
+ } else {
+ /* The size on the source pad are fixed and always identical to
+ * the size on the sink pad.
+ */
+ fse->min_width = format->width;
+ fse->max_width = format->width;
+ fse->min_height = format->height;
+ fse->max_height = format->height;
+ }
+
+ return 0;
+}
+
+static int hsit_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_hsit *hsit = to_hsit(subdev);
+
+ fmt->format = *vsp1_entity_get_pad_format(&hsit->entity, fh, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static int hsit_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_hsit *hsit = to_hsit(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = vsp1_entity_get_pad_format(&hsit->entity, fh, fmt->pad,
+ fmt->which);
+
+ if (fmt->pad == HSIT_PAD_SOURCE) {
+ /* The HST and HSI output format code and resolution can't be
+ * modified.
+ */
+ fmt->format = *format;
+ return 0;
+ }
+
+ format->code = hsit->inverse ? V4L2_MBUS_FMT_AHSV8888_1X32
+ : V4L2_MBUS_FMT_ARGB8888_1X32;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ HSIT_MIN_SIZE, HSIT_MAX_SIZE);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ HSIT_MIN_SIZE, HSIT_MAX_SIZE);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&hsit->entity, fh, HSIT_PAD_SOURCE,
+ fmt->which);
+ *format = fmt->format;
+ format->code = hsit->inverse ? V4L2_MBUS_FMT_ARGB8888_1X32
+ : V4L2_MBUS_FMT_AHSV8888_1X32;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static struct v4l2_subdev_video_ops hsit_video_ops = {
+ .s_stream = hsit_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops hsit_pad_ops = {
+ .enum_mbus_code = hsit_enum_mbus_code,
+ .enum_frame_size = hsit_enum_frame_size,
+ .get_fmt = hsit_get_format,
+ .set_fmt = hsit_set_format,
+};
+
+static struct v4l2_subdev_ops hsit_ops = {
+ .video = &hsit_video_ops,
+ .pad = &hsit_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_hsit *vsp1_hsit_create(struct vsp1_device *vsp1, bool inverse)
+{
+ struct v4l2_subdev *subdev;
+ struct vsp1_hsit *hsit;
+ int ret;
+
+ hsit = devm_kzalloc(vsp1->dev, sizeof(*hsit), GFP_KERNEL);
+ if (hsit == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ hsit->inverse = inverse;
+
+ if (inverse) {
+ hsit->entity.type = VSP1_ENTITY_HSI;
+ hsit->entity.id = VI6_DPR_NODE_HSI;
+ } else {
+ hsit->entity.type = VSP1_ENTITY_HST;
+ hsit->entity.id = VI6_DPR_NODE_HST;
+ }
+
+ ret = vsp1_entity_init(vsp1, &hsit->entity, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the V4L2 subdev. */
+ subdev = &hsit->entity.subdev;
+ v4l2_subdev_init(subdev, &hsit_ops);
+
+ subdev->entity.ops = &vsp1_media_ops;
+ subdev->internal_ops = &vsp1_subdev_internal_ops;
+ snprintf(subdev->name, sizeof(subdev->name), "%s %s",
+ dev_name(vsp1->dev), inverse ? "hsi" : "hst");
+ v4l2_set_subdevdata(subdev, hsit);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ vsp1_entity_init_formats(subdev, NULL);
+
+ return hsit;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.h b/drivers/media/platform/vsp1/vsp1_hsit.h
new file mode 100644
index 000000000000..82f1c8426900
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_hsit.h
@@ -0,0 +1,38 @@
+/*
+ * vsp1_hsit.h -- R-Car VSP1 Hue Saturation value (Inverse) Transform
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_HSIT_H__
+#define __VSP1_HSIT_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define HSIT_PAD_SINK 0
+#define HSIT_PAD_SOURCE 1
+
+struct vsp1_hsit {
+ struct vsp1_entity entity;
+ bool inverse;
+};
+
+static inline struct vsp1_hsit *to_hsit(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_hsit, entity.subdev);
+}
+
+struct vsp1_hsit *vsp1_hsit_create(struct vsp1_device *vsp1, bool inverse);
+
+#endif /* __VSP1_HSIT_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_lut.c b/drivers/media/platform/vsp1/vsp1_lut.c
new file mode 100644
index 000000000000..4e9dc7c86ef8
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_lut.c
@@ -0,0 +1,252 @@
+/*
+ * vsp1_lut.c -- R-Car VSP1 Look-Up Table
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/vsp1.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_lut.h"
+
+#define LUT_MIN_SIZE 4U
+#define LUT_MAX_SIZE 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_lut_read(struct vsp1_lut *lut, u32 reg)
+{
+ return vsp1_read(lut->entity.vsp1, reg);
+}
+
+static inline void vsp1_lut_write(struct vsp1_lut *lut, u32 reg, u32 data)
+{
+ vsp1_write(lut->entity.vsp1, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Core Operations
+ */
+
+static void lut_configure(struct vsp1_lut *lut, struct vsp1_lut_config *config)
+{
+ memcpy_toio(lut->entity.vsp1->mmio + VI6_LUT_TABLE, config->lut,
+ sizeof(config->lut));
+}
+
+static long lut_ioctl(struct v4l2_subdev *subdev, unsigned int cmd, void *arg)
+{
+ struct vsp1_lut *lut = to_lut(subdev);
+
+ switch (cmd) {
+ case VIDIOC_VSP1_LUT_CONFIG:
+ lut_configure(lut, arg);
+ return 0;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Video Operations
+ */
+
+static int lut_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct vsp1_lut *lut = to_lut(subdev);
+
+ if (!enable)
+ return 0;
+
+ vsp1_lut_write(lut, VI6_LUT_CTRL, VI6_LUT_CTRL_EN);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int lut_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ static const unsigned int codes[] = {
+ V4L2_MBUS_FMT_ARGB8888_1X32,
+ V4L2_MBUS_FMT_AHSV8888_1X32,
+ V4L2_MBUS_FMT_AYUV8_1X32,
+ };
+ struct v4l2_mbus_framefmt *format;
+
+ if (code->pad == LUT_PAD_SINK) {
+ if (code->index >= ARRAY_SIZE(codes))
+ return -EINVAL;
+
+ code->code = codes[code->index];
+ } else {
+ /* The LUT can't perform format conversion, the sink format is
+ * always identical to the source format.
+ */
+ if (code->index)
+ return -EINVAL;
+
+ format = v4l2_subdev_get_try_format(fh, LUT_PAD_SINK);
+ code->code = format->code;
+ }
+
+ return 0;
+}
+
+static int lut_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(fh, fse->pad);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ if (fse->pad == LUT_PAD_SINK) {
+ fse->min_width = LUT_MIN_SIZE;
+ fse->max_width = LUT_MAX_SIZE;
+ fse->min_height = LUT_MIN_SIZE;
+ fse->max_height = LUT_MAX_SIZE;
+ } else {
+ /* The size on the source pad are fixed and always identical to
+ * the size on the sink pad.
+ */
+ fse->min_width = format->width;
+ fse->max_width = format->width;
+ fse->min_height = format->height;
+ fse->max_height = format->height;
+ }
+
+ return 0;
+}
+
+static int lut_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_lut *lut = to_lut(subdev);
+
+ fmt->format = *vsp1_entity_get_pad_format(&lut->entity, fh, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static int lut_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_lut *lut = to_lut(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Default to YUV if the requested format is not supported. */
+ if (fmt->format.code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
+ fmt->format.code != V4L2_MBUS_FMT_AHSV8888_1X32 &&
+ fmt->format.code != V4L2_MBUS_FMT_AYUV8_1X32)
+ fmt->format.code = V4L2_MBUS_FMT_AYUV8_1X32;
+
+ format = vsp1_entity_get_pad_format(&lut->entity, fh, fmt->pad,
+ fmt->which);
+
+ if (fmt->pad == LUT_PAD_SOURCE) {
+ /* The LUT output format can't be modified. */
+ fmt->format = *format;
+ return 0;
+ }
+
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ LUT_MIN_SIZE, LUT_MAX_SIZE);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ LUT_MIN_SIZE, LUT_MAX_SIZE);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&lut->entity, fh, LUT_PAD_SOURCE,
+ fmt->which);
+ *format = fmt->format;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static struct v4l2_subdev_core_ops lut_core_ops = {
+ .ioctl = lut_ioctl,
+};
+
+static struct v4l2_subdev_video_ops lut_video_ops = {
+ .s_stream = lut_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops lut_pad_ops = {
+ .enum_mbus_code = lut_enum_mbus_code,
+ .enum_frame_size = lut_enum_frame_size,
+ .get_fmt = lut_get_format,
+ .set_fmt = lut_set_format,
+};
+
+static struct v4l2_subdev_ops lut_ops = {
+ .core = &lut_core_ops,
+ .video = &lut_video_ops,
+ .pad = &lut_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_lut *vsp1_lut_create(struct vsp1_device *vsp1)
+{
+ struct v4l2_subdev *subdev;
+ struct vsp1_lut *lut;
+ int ret;
+
+ lut = devm_kzalloc(vsp1->dev, sizeof(*lut), GFP_KERNEL);
+ if (lut == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ lut->entity.type = VSP1_ENTITY_LUT;
+ lut->entity.id = VI6_DPR_NODE_LUT;
+
+ ret = vsp1_entity_init(vsp1, &lut->entity, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the V4L2 subdev. */
+ subdev = &lut->entity.subdev;
+ v4l2_subdev_init(subdev, &lut_ops);
+
+ subdev->entity.ops = &vsp1_media_ops;
+ subdev->internal_ops = &vsp1_subdev_internal_ops;
+ snprintf(subdev->name, sizeof(subdev->name), "%s lut",
+ dev_name(vsp1->dev));
+ v4l2_set_subdevdata(subdev, lut);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ vsp1_entity_init_formats(subdev, NULL);
+
+ return lut;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_lut.h b/drivers/media/platform/vsp1/vsp1_lut.h
new file mode 100644
index 000000000000..f92ffb867350
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_lut.h
@@ -0,0 +1,38 @@
+/*
+ * vsp1_lut.h -- R-Car VSP1 Look-Up Table
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_LUT_H__
+#define __VSP1_LUT_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define LUT_PAD_SINK 0
+#define LUT_PAD_SOURCE 1
+
+struct vsp1_lut {
+ struct vsp1_entity entity;
+ u32 lut[256];
+};
+
+static inline struct vsp1_lut *to_lut(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_lut, entity.subdev);
+}
+
+struct vsp1_lut *vsp1_lut_create(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_LUT_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index 1d3304f1365b..28650806c20f 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -336,8 +336,21 @@
*/
#define VI6_SRU_CTRL0 0x2200
+#define VI6_SRU_CTRL0_PARAM0_SHIFT 16
+#define VI6_SRU_CTRL0_PARAM1_SHIFT 8
+#define VI6_SRU_CTRL0_MODE_UPSCALE (4 << 4)
+#define VI6_SRU_CTRL0_PARAM2 (1 << 3)
+#define VI6_SRU_CTRL0_PARAM3 (1 << 2)
+#define VI6_SRU_CTRL0_PARAM4 (1 << 1)
+#define VI6_SRU_CTRL0_EN (1 << 0)
+
#define VI6_SRU_CTRL1 0x2204
+#define VI6_SRU_CTRL1_PARAM5 0x7ff
+
#define VI6_SRU_CTRL2 0x2208
+#define VI6_SRU_CTRL2_PARAM6_SHIFT 16
+#define VI6_SRU_CTRL2_PARAM7_SHIFT 8
+#define VI6_SRU_CTRL2_PARAM8_SHIFT 0
/* -----------------------------------------------------------------------------
* UDS Control Registers
@@ -412,6 +425,7 @@
*/
#define VI6_LUT_CTRL 0x2800
+#define VI6_LUT_CTRL_EN (1 << 0)
/* -----------------------------------------------------------------------------
* CLU Control Registers
@@ -424,12 +438,14 @@
*/
#define VI6_HST_CTRL 0x2a00
+#define VI6_HST_CTRL_EN (1 << 0)
/* -----------------------------------------------------------------------------
* HSI Control Registers
*/
#define VI6_HSI_CTRL 0x2b00
+#define VI6_HSI_CTRL_EN (1 << 0)
/* -----------------------------------------------------------------------------
* BRU Control Registers
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index 254871d3423e..bce2be5466b9 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -47,25 +47,36 @@ static int rpf_s_stream(struct v4l2_subdev *subdev, int enable)
struct vsp1_rwpf *rpf = to_rwpf(subdev);
const struct vsp1_format_info *fmtinfo = rpf->video.fmtinfo;
const struct v4l2_pix_format_mplane *format = &rpf->video.format;
+ const struct v4l2_rect *crop = &rpf->crop;
u32 pstride;
u32 infmt;
if (!enable)
return 0;
- /* Source size and stride. Cropping isn't supported yet. */
+ /* Source size, stride and crop offsets.
+ *
+ * The crop offsets correspond to the location of the crop rectangle top
+ * left corner in the plane buffer. Only two offsets are needed, as
+ * planes 2 and 3 always have identical strides.
+ */
vsp1_rpf_write(rpf, VI6_RPF_SRC_BSIZE,
- (format->width << VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT) |
- (format->height << VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT));
+ (crop->width << VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT) |
+ (crop->height << VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT));
vsp1_rpf_write(rpf, VI6_RPF_SRC_ESIZE,
- (format->width << VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT) |
- (format->height << VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT));
+ (crop->width << VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT) |
+ (crop->height << VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT));
+ rpf->offsets[0] = crop->top * format->plane_fmt[0].bytesperline
+ + crop->left * fmtinfo->bpp[0] / 8;
pstride = format->plane_fmt[0].bytesperline
<< VI6_RPF_SRCM_PSTRIDE_Y_SHIFT;
- if (format->num_planes > 1)
+ if (format->num_planes > 1) {
+ rpf->offsets[1] = crop->top * format->plane_fmt[1].bytesperline
+ + crop->left * fmtinfo->bpp[1] / 8;
pstride |= format->plane_fmt[1].bytesperline
<< VI6_RPF_SRCM_PSTRIDE_C_SHIFT;
+ }
vsp1_rpf_write(rpf, VI6_RPF_SRCM_PSTRIDE, pstride);
@@ -113,6 +124,8 @@ static struct v4l2_subdev_pad_ops rpf_pad_ops = {
.enum_frame_size = vsp1_rwpf_enum_frame_size,
.get_fmt = vsp1_rwpf_get_format,
.set_fmt = vsp1_rwpf_set_format,
+ .get_selection = vsp1_rwpf_get_selection,
+ .set_selection = vsp1_rwpf_set_selection,
};
static struct v4l2_subdev_ops rpf_ops = {
@@ -129,11 +142,14 @@ static void rpf_vdev_queue(struct vsp1_video *video,
{
struct vsp1_rwpf *rpf = container_of(video, struct vsp1_rwpf, video);
- vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y, buf->addr[0]);
+ vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y,
+ buf->addr[0] + rpf->offsets[0]);
if (buf->buf.num_planes > 1)
- vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0, buf->addr[1]);
+ vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0,
+ buf->addr[1] + rpf->offsets[1]);
if (buf->buf.num_planes > 2)
- vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1, buf->addr[2]);
+ vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1,
+ buf->addr[2] + rpf->offsets[1]);
}
static const struct vsp1_video_operations rpf_vdev_ops = {
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index 9752d5516ceb..782f770daee5 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -71,6 +71,19 @@ int vsp1_rwpf_enum_frame_size(struct v4l2_subdev *subdev,
return 0;
}
+static struct v4l2_rect *
+vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf, struct v4l2_subdev_fh *fh, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_crop(fh, RWPF_PAD_SINK);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &rwpf->crop;
+ default:
+ return NULL;
+ }
+}
+
int vsp1_rwpf_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
struct v4l2_subdev_format *fmt)
{
@@ -87,6 +100,7 @@ int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
/* Default to YUV if the requested format is not supported. */
if (fmt->format.code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
@@ -115,6 +129,13 @@ int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
fmt->format = *format;
+ /* Update the sink crop rectangle. */
+ crop = vsp1_rwpf_get_crop(rwpf, fh, fmt->which);
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = fmt->format.width;
+ crop->height = fmt->format.height;
+
/* Propagate the format to the source pad. */
format = vsp1_entity_get_pad_format(&rwpf->entity, fh, RWPF_PAD_SOURCE,
fmt->which);
@@ -122,3 +143,78 @@ int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
return 0;
}
+
+int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Cropping is implemented on the sink pad. */
+ if (sel->pad != RWPF_PAD_SINK)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *vsp1_rwpf_get_crop(rwpf, fh, sel->which);
+ break;
+
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ format = vsp1_entity_get_pad_format(&rwpf->entity, fh,
+ RWPF_PAD_SINK, sel->which);
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = format->width;
+ sel->r.height = format->height;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+
+ /* Cropping is implemented on the sink pad. */
+ if (sel->pad != RWPF_PAD_SINK)
+ return -EINVAL;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ /* Make sure the crop rectangle is entirely contained in the image. The
+ * WPF top and left offsets are limited to 255.
+ */
+ format = vsp1_entity_get_pad_format(&rwpf->entity, fh, RWPF_PAD_SINK,
+ sel->which);
+ sel->r.left = min_t(unsigned int, sel->r.left, format->width - 2);
+ sel->r.top = min_t(unsigned int, sel->r.top, format->height - 2);
+ if (rwpf->entity.type == VSP1_ENTITY_WPF) {
+ sel->r.left = min_t(unsigned int, sel->r.left, 255);
+ sel->r.top = min_t(unsigned int, sel->r.top, 255);
+ }
+ sel->r.width = min_t(unsigned int, sel->r.width,
+ format->width - sel->r.left);
+ sel->r.height = min_t(unsigned int, sel->r.height,
+ format->height - sel->r.top);
+
+ crop = vsp1_rwpf_get_crop(rwpf, fh, sel->which);
+ *crop = sel->r;
+
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&rwpf->entity, fh, RWPF_PAD_SOURCE,
+ sel->which);
+ format->width = crop->width;
+ format->height = crop->height;
+
+ return 0;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
index c182d85f36b3..6cbdb547470b 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -29,6 +29,10 @@ struct vsp1_rwpf {
unsigned int max_width;
unsigned int max_height;
+
+ struct v4l2_rect crop;
+
+ unsigned int offsets[2];
};
static inline struct vsp1_rwpf *to_rwpf(struct v4l2_subdev *subdev)
@@ -49,5 +53,11 @@ int vsp1_rwpf_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
struct v4l2_subdev_format *fmt);
int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
struct v4l2_subdev_format *fmt);
+int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel);
+int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel);
#endif /* __VSP1_RWPF_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
new file mode 100644
index 000000000000..7ab1a0b2d656
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -0,0 +1,356 @@
+/*
+ * vsp1_sru.c -- R-Car VSP1 Super Resolution Unit
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_sru.h"
+
+#define SRU_MIN_SIZE 4U
+#define SRU_MAX_SIZE 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_sru_read(struct vsp1_sru *sru, u32 reg)
+{
+ return vsp1_read(sru->entity.vsp1, reg);
+}
+
+static inline void vsp1_sru_write(struct vsp1_sru *sru, u32 reg, u32 data)
+{
+ vsp1_write(sru->entity.vsp1, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+#define V4L2_CID_VSP1_SRU_INTENSITY (V4L2_CID_USER_BASE + 1)
+
+static int sru_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_sru *sru =
+ container_of(ctrl->handler, struct vsp1_sru, ctrls);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VSP1_SRU_INTENSITY:
+ sru->intensity = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops sru_ctrl_ops = {
+ .s_ctrl = sru_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config sru_intensity_control = {
+ .ops = &sru_ctrl_ops,
+ .id = V4L2_CID_VSP1_SRU_INTENSITY,
+ .name = "Intensity",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 6,
+ .step = 1,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Core Operations
+ */
+
+struct vsp1_sru_param {
+ u32 ctrl0;
+ u32 ctrl2;
+};
+
+#define VI6_SRU_CTRL0_PARAMS(p0, p1) \
+ (((p0) << VI6_SRU_CTRL0_PARAM0_SHIFT) | \
+ ((p1) << VI6_SRU_CTRL0_PARAM1_SHIFT))
+
+#define VI6_SRU_CTRL2_PARAMS(p6, p7, p8) \
+ (((p6) << VI6_SRU_CTRL2_PARAM6_SHIFT) | \
+ ((p7) << VI6_SRU_CTRL2_PARAM7_SHIFT) | \
+ ((p8) << VI6_SRU_CTRL2_PARAM8_SHIFT))
+
+static const struct vsp1_sru_param vsp1_sru_params[] = {
+ {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(256, 4) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(24, 40, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(256, 4) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(8, 16, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(384, 5) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(36, 60, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(384, 5) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(12, 27, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(511, 6) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(48, 80, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(511, 6) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(16, 36, 255),
+ },
+};
+
+static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct vsp1_sru *sru = to_sru(subdev);
+ const struct vsp1_sru_param *param;
+ struct v4l2_mbus_framefmt *input;
+ struct v4l2_mbus_framefmt *output;
+ bool upscale;
+ u32 ctrl0;
+
+ if (!enable)
+ return 0;
+
+ input = &sru->entity.formats[SRU_PAD_SINK];
+ output = &sru->entity.formats[SRU_PAD_SOURCE];
+ upscale = input->width != output->width;
+ param = &vsp1_sru_params[sru->intensity];
+
+ if (input->code == V4L2_MBUS_FMT_ARGB8888_1X32)
+ ctrl0 = VI6_SRU_CTRL0_PARAM2 | VI6_SRU_CTRL0_PARAM3
+ | VI6_SRU_CTRL0_PARAM4;
+ else
+ ctrl0 = VI6_SRU_CTRL0_PARAM3;
+
+ vsp1_sru_write(sru, VI6_SRU_CTRL0, param->ctrl0 | ctrl0 |
+ (upscale ? VI6_SRU_CTRL0_MODE_UPSCALE : 0));
+ vsp1_sru_write(sru, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
+ vsp1_sru_write(sru, VI6_SRU_CTRL2, param->ctrl2);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int sru_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ static const unsigned int codes[] = {
+ V4L2_MBUS_FMT_ARGB8888_1X32,
+ V4L2_MBUS_FMT_AYUV8_1X32,
+ };
+ struct v4l2_mbus_framefmt *format;
+
+ if (code->pad == SRU_PAD_SINK) {
+ if (code->index >= ARRAY_SIZE(codes))
+ return -EINVAL;
+
+ code->code = codes[code->index];
+ } else {
+ /* The SRU can't perform format conversion, the sink format is
+ * always identical to the source format.
+ */
+ if (code->index)
+ return -EINVAL;
+
+ format = v4l2_subdev_get_try_format(fh, SRU_PAD_SINK);
+ code->code = format->code;
+ }
+
+ return 0;
+}
+
+static int sru_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(fh, SRU_PAD_SINK);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ if (fse->pad == SRU_PAD_SINK) {
+ fse->min_width = SRU_MIN_SIZE;
+ fse->max_width = SRU_MAX_SIZE;
+ fse->min_height = SRU_MIN_SIZE;
+ fse->max_height = SRU_MAX_SIZE;
+ } else {
+ fse->min_width = format->width;
+ fse->min_height = format->height;
+ if (format->width <= SRU_MAX_SIZE / 2 &&
+ format->height <= SRU_MAX_SIZE / 2) {
+ fse->max_width = format->width * 2;
+ fse->max_height = format->height * 2;
+ } else {
+ fse->max_width = format->width;
+ fse->max_height = format->height;
+ }
+ }
+
+ return 0;
+}
+
+static int sru_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_sru *sru = to_sru(subdev);
+
+ fmt->format = *vsp1_entity_get_pad_format(&sru->entity, fh, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static void sru_try_format(struct vsp1_sru *sru, struct v4l2_subdev_fh *fh,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ struct v4l2_mbus_framefmt *format;
+ unsigned int input_area;
+ unsigned int output_area;
+
+ switch (pad) {
+ case SRU_PAD_SINK:
+ /* Default to YUV if the requested format is not supported. */
+ if (fmt->code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
+ fmt->code != V4L2_MBUS_FMT_AYUV8_1X32)
+ fmt->code = V4L2_MBUS_FMT_AYUV8_1X32;
+
+ fmt->width = clamp(fmt->width, SRU_MIN_SIZE, SRU_MAX_SIZE);
+ fmt->height = clamp(fmt->height, SRU_MIN_SIZE, SRU_MAX_SIZE);
+ break;
+
+ case SRU_PAD_SOURCE:
+ /* The SRU can't perform format conversion. */
+ format = vsp1_entity_get_pad_format(&sru->entity, fh,
+ SRU_PAD_SINK, which);
+ fmt->code = format->code;
+
+ /* We can upscale by 2 in both direction, but not independently.
+ * Compare the input and output rectangles areas (avoiding
+ * integer overflows on the output): if the requested output
+ * area is larger than 1.5^2 the input area upscale by two,
+ * otherwise don't scale.
+ */
+ input_area = format->width * format->height;
+ output_area = min(fmt->width, SRU_MAX_SIZE)
+ * min(fmt->height, SRU_MAX_SIZE);
+
+ if (fmt->width <= SRU_MAX_SIZE / 2 &&
+ fmt->height <= SRU_MAX_SIZE / 2 &&
+ output_area > input_area * 9 / 4) {
+ fmt->width = format->width * 2;
+ fmt->height = format->height * 2;
+ } else {
+ fmt->width = format->width;
+ fmt->height = format->height;
+ }
+ break;
+ }
+
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+static int sru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_sru *sru = to_sru(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ sru_try_format(sru, fh, fmt->pad, &fmt->format, fmt->which);
+
+ format = vsp1_entity_get_pad_format(&sru->entity, fh, fmt->pad,
+ fmt->which);
+ *format = fmt->format;
+
+ if (fmt->pad == SRU_PAD_SINK) {
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&sru->entity, fh,
+ SRU_PAD_SOURCE, fmt->which);
+ *format = fmt->format;
+
+ sru_try_format(sru, fh, SRU_PAD_SOURCE, format, fmt->which);
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static struct v4l2_subdev_video_ops sru_video_ops = {
+ .s_stream = sru_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops sru_pad_ops = {
+ .enum_mbus_code = sru_enum_mbus_code,
+ .enum_frame_size = sru_enum_frame_size,
+ .get_fmt = sru_get_format,
+ .set_fmt = sru_set_format,
+};
+
+static struct v4l2_subdev_ops sru_ops = {
+ .video = &sru_video_ops,
+ .pad = &sru_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_sru *vsp1_sru_create(struct vsp1_device *vsp1)
+{
+ struct v4l2_subdev *subdev;
+ struct vsp1_sru *sru;
+ int ret;
+
+ sru = devm_kzalloc(vsp1->dev, sizeof(*sru), GFP_KERNEL);
+ if (sru == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ sru->entity.type = VSP1_ENTITY_SRU;
+ sru->entity.id = VI6_DPR_NODE_SRU;
+
+ ret = vsp1_entity_init(vsp1, &sru->entity, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the V4L2 subdev. */
+ subdev = &sru->entity.subdev;
+ v4l2_subdev_init(subdev, &sru_ops);
+
+ subdev->entity.ops = &vsp1_media_ops;
+ subdev->internal_ops = &vsp1_subdev_internal_ops;
+ snprintf(subdev->name, sizeof(subdev->name), "%s sru",
+ dev_name(vsp1->dev));
+ v4l2_set_subdevdata(subdev, sru);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ vsp1_entity_init_formats(subdev, NULL);
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&sru->ctrls, 1);
+ v4l2_ctrl_new_custom(&sru->ctrls, &sru_intensity_control, NULL);
+ v4l2_ctrl_handler_setup(&sru->ctrls);
+ sru->entity.subdev.ctrl_handler = &sru->ctrls;
+
+ return sru;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_sru.h b/drivers/media/platform/vsp1/vsp1_sru.h
new file mode 100644
index 000000000000..381870b74780
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_sru.h
@@ -0,0 +1,41 @@
+/*
+ * vsp1_sru.h -- R-Car VSP1 Super Resolution Unit
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_SRU_H__
+#define __VSP1_SRU_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define SRU_PAD_SINK 0
+#define SRU_PAD_SOURCE 1
+
+struct vsp1_sru {
+ struct vsp1_entity entity;
+
+ struct v4l2_ctrl_handler ctrls;
+ unsigned int intensity;
+};
+
+static inline struct vsp1_sru *to_sru(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_sru, entity.subdev);
+}
+
+struct vsp1_sru *vsp1_sru_create(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_SRU_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 4b0ac07af662..b4687a834f85 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -488,11 +488,17 @@ static bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe)
* This function completes the current buffer by filling its sequence number,
* time stamp and payload size, and hands it back to the videobuf core.
*
+ * When operating in DU output mode (deep pipeline to the DU through the LIF),
+ * the VSP1 needs to constantly supply frames to the display. In that case, if
+ * no other buffer is queued, reuse the one that has just been processed instead
+ * of handing it back to the videobuf core.
+ *
* Return the next queued buffer or NULL if the queue is empty.
*/
static struct vsp1_video_buffer *
vsp1_video_complete_buffer(struct vsp1_video *video)
{
+ struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
struct vsp1_video_buffer *next = NULL;
struct vsp1_video_buffer *done;
unsigned long flags;
@@ -507,6 +513,13 @@ vsp1_video_complete_buffer(struct vsp1_video *video)
done = list_first_entry(&video->irqqueue,
struct vsp1_video_buffer, queue);
+
+ /* In DU output mode reuse the buffer if the list is singular. */
+ if (pipe->lif && list_is_singular(&video->irqqueue)) {
+ spin_unlock_irqrestore(&video->irqlock, flags);
+ return done;
+ }
+
list_del(&done->queue);
if (!list_empty(&video->irqqueue))
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index db4b85ee05fc..7baed81ff005 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -48,8 +48,7 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
struct vsp1_pipeline *pipe =
to_vsp1_pipeline(&wpf->entity.subdev.entity);
struct vsp1_device *vsp1 = wpf->entity.vsp1;
- const struct v4l2_mbus_framefmt *format =
- &wpf->entity.formats[RWPF_PAD_SOURCE];
+ const struct v4l2_rect *crop = &wpf->crop;
unsigned int i;
u32 srcrpf = 0;
u32 outfmt = 0;
@@ -68,7 +67,7 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
vsp1_wpf_write(wpf, VI6_WPF_SRCRPF, srcrpf);
- /* Destination stride. Cropping isn't supported yet. */
+ /* Destination stride. */
if (!pipe->lif) {
struct v4l2_pix_format_mplane *format = &wpf->video.format;
@@ -79,10 +78,12 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
format->plane_fmt[1].bytesperline);
}
- vsp1_wpf_write(wpf, VI6_WPF_HSZCLIP,
- format->width << VI6_WPF_SZCLIP_SIZE_SHIFT);
- vsp1_wpf_write(wpf, VI6_WPF_VSZCLIP,
- format->height << VI6_WPF_SZCLIP_SIZE_SHIFT);
+ vsp1_wpf_write(wpf, VI6_WPF_HSZCLIP, VI6_WPF_SZCLIP_EN |
+ (crop->left << VI6_WPF_SZCLIP_OFST_SHIFT) |
+ (crop->width << VI6_WPF_SZCLIP_SIZE_SHIFT));
+ vsp1_wpf_write(wpf, VI6_WPF_VSZCLIP, VI6_WPF_SZCLIP_EN |
+ (crop->top << VI6_WPF_SZCLIP_OFST_SHIFT) |
+ (crop->height << VI6_WPF_SZCLIP_SIZE_SHIFT));
/* Format */
if (!pipe->lif) {
@@ -130,6 +131,8 @@ static struct v4l2_subdev_pad_ops wpf_pad_ops = {
.enum_frame_size = vsp1_rwpf_enum_frame_size,
.get_fmt = vsp1_rwpf_get_format,
.set_fmt = vsp1_rwpf_set_format,
+ .get_selection = vsp1_rwpf_get_selection,
+ .set_selection = vsp1_rwpf_set_selection,
};
static struct v4l2_subdev_ops wpf_ops = {
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 6ecdc39bb366..192f36f2f4aa 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -21,6 +21,12 @@ config RADIO_SI470X
source "drivers/media/radio/si470x/Kconfig"
+config RADIO_SI4713
+ tristate "Silicon Labs Si4713 FM Radio with RDS Transmitter support"
+ depends on VIDEO_V4L2
+
+source "drivers/media/radio/si4713/Kconfig"
+
config RADIO_SI476X
tristate "Silicon Laboratories Si476x I2C FM Radio"
depends on I2C && VIDEO_V4L2
@@ -113,29 +119,6 @@ config RADIO_SHARK2
To compile this driver as a module, choose M here: the
module will be called radio-shark2.
-config I2C_SI4713
- tristate "I2C driver for Silicon Labs Si4713 device"
- depends on I2C && VIDEO_V4L2
- ---help---
- Say Y here if you want support to Si4713 I2C device.
- This device driver supports only i2c bus.
-
- To compile this driver as a module, choose M here: the
- module will be called si4713.
-
-config RADIO_SI4713
- tristate "Silicon Labs Si4713 FM Radio Transmitter support"
- depends on I2C && VIDEO_V4L2
- select I2C_SI4713
- ---help---
- Say Y here if you want support to Si4713 FM Radio Transmitter.
- This device can transmit audio through FM. It can transmit
- RDS and RBDS signals as well. This module is the v4l2 radio
- interface for the i2c driver of this device.
-
- To compile this driver as a module, choose M here: the
- module will be called radio-si4713.
-
config USB_KEENE
tristate "Keene FM Transmitter USB support"
depends on USB && VIDEO_V4L2
@@ -146,6 +129,20 @@ config USB_KEENE
To compile this driver as a module, choose M here: the
module will be called radio-keene.
+config USB_RAREMONO
+ tristate "Thanko's Raremono AM/FM/SW radio support"
+ depends on USB && VIDEO_V4L2
+ ---help---
+ The 'Thanko's Raremono' device contains the Si4734 chip from Silicon Labs Inc.
+ It is one of the very few or perhaps the only consumer USB radio device
+ to receive the AM/FM/SW bands.
+
+ Say Y here if you want to connect this type of AM/FM/SW receiver
+ to your computer's USB port.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-raremono.
+
config USB_MA901
tristate "Masterkit MA901 USB FM radio support"
depends on USB && VIDEO_V4L2
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index 3b645601800d..120e791199b2 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -17,12 +17,11 @@ obj-$(CONFIG_RADIO_RTRACK) += radio-aimslab.o
obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zoltrix.o
obj-$(CONFIG_RADIO_GEMTEK) += radio-gemtek.o
obj-$(CONFIG_RADIO_TRUST) += radio-trust.o
-obj-$(CONFIG_I2C_SI4713) += si4713-i2c.o
-obj-$(CONFIG_RADIO_SI4713) += radio-si4713.o
obj-$(CONFIG_RADIO_SI476X) += radio-si476x.o
obj-$(CONFIG_RADIO_MIROPCM20) += radio-miropcm20.o
obj-$(CONFIG_USB_DSBR) += dsbr100.o
obj-$(CONFIG_RADIO_SI470X) += si470x/
+obj-$(CONFIG_RADIO_SI4713) += si4713/
obj-$(CONFIG_USB_MR800) += radio-mr800.o
obj-$(CONFIG_USB_KEENE) += radio-keene.o
obj-$(CONFIG_USB_MA901) += radio-ma901.o
@@ -33,6 +32,7 @@ obj-$(CONFIG_RADIO_TIMBERDALE) += radio-timb.o
obj-$(CONFIG_RADIO_WL1273) += radio-wl1273.o
obj-$(CONFIG_RADIO_WL128X) += wl128x/
obj-$(CONFIG_RADIO_TEA575X) += tea575x.o
+obj-$(CONFIG_USB_RAREMONO) += radio-raremono.o
shark2-objs := radio-shark2.o radio-tea5777.o
diff --git a/drivers/media/radio/radio-raremono.c b/drivers/media/radio/radio-raremono.c
new file mode 100644
index 000000000000..7b3bdbb1be73
--- /dev/null
+++ b/drivers/media/radio/radio-raremono.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/usb.h>
+#include <linux/hid.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <asm/unaligned.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+
+/*
+ * 'Thanko's Raremono' is a Japanese si4734-based AM/FM/SW USB receiver:
+ *
+ * http://www.raremono.jp/product/484.html/
+ *
+ * The USB protocol has been reversed engineered using wireshark, initially
+ * by Dinesh Ram <dinesh.ram@cern.ch> and finished by Hans Verkuil
+ * <hverkuil@xs4all.nl>.
+ *
+ * Sadly the firmware used in this product hides lots of goodies since the
+ * si4734 has more features than are supported by the firmware. Oh well...
+ */
+
+/* driver and module definitions */
+MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_DESCRIPTION("Thanko's Raremono AM/FM/SW Receiver USB driver");
+MODULE_LICENSE("GPL v2");
+
+/*
+ * The Device announces itself as Cygnal Integrated Products, Inc.
+ *
+ * The vendor and product IDs (and in fact all other lsusb information as
+ * well) are identical to the si470x Silicon Labs USB FM Radio Reference
+ * Design board, even though this card has a si4734 device. Clearly the
+ * designer of this product never bothered to change the USB IDs.
+ */
+
+/* USB Device ID List */
+static struct usb_device_id usb_raremono_device_table[] = {
+ {USB_DEVICE_AND_INTERFACE_INFO(0x10c4, 0x818a, USB_CLASS_HID, 0, 0) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, usb_raremono_device_table);
+
+#define BUFFER_LENGTH 64
+
+/* Timeout is set to a high value, could probably be reduced. Need more tests */
+#define USB_TIMEOUT 10000
+
+/* Frequency limits in KHz */
+#define FM_FREQ_RANGE_LOW 64000
+#define FM_FREQ_RANGE_HIGH 108000
+
+#define AM_FREQ_RANGE_LOW 520
+#define AM_FREQ_RANGE_HIGH 1710
+
+#define SW_FREQ_RANGE_LOW 2300
+#define SW_FREQ_RANGE_HIGH 26100
+
+enum { BAND_FM, BAND_AM, BAND_SW };
+
+static const struct v4l2_frequency_band bands[] = {
+ /* Band FM */
+ {
+ .type = V4L2_TUNER_RADIO,
+ .index = 0,
+ .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = FM_FREQ_RANGE_LOW * 16,
+ .rangehigh = FM_FREQ_RANGE_HIGH * 16,
+ .modulation = V4L2_BAND_MODULATION_FM,
+ },
+ /* Band AM */
+ {
+ .type = V4L2_TUNER_RADIO,
+ .index = 1,
+ .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = AM_FREQ_RANGE_LOW * 16,
+ .rangehigh = AM_FREQ_RANGE_HIGH * 16,
+ .modulation = V4L2_BAND_MODULATION_AM,
+ },
+ /* Band SW */
+ {
+ .type = V4L2_TUNER_RADIO,
+ .index = 2,
+ .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = SW_FREQ_RANGE_LOW * 16,
+ .rangehigh = SW_FREQ_RANGE_HIGH * 16,
+ .modulation = V4L2_BAND_MODULATION_AM,
+ },
+};
+
+struct raremono_device {
+ struct usb_device *usbdev;
+ struct usb_interface *intf;
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+ struct mutex lock;
+
+ u8 *buffer;
+ u32 band;
+ unsigned curfreq;
+};
+
+static inline struct raremono_device *to_raremono_dev(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct raremono_device, v4l2_dev);
+}
+
+/* Set frequency. */
+static int raremono_cmd_main(struct raremono_device *radio, unsigned band, unsigned freq)
+{
+ unsigned band_offset;
+ int ret;
+
+ switch (band) {
+ case BAND_FM:
+ band_offset = 1;
+ freq /= 10;
+ break;
+ case BAND_AM:
+ band_offset = 0;
+ break;
+ default:
+ band_offset = 2;
+ break;
+ }
+ radio->buffer[0] = 0x04 + band_offset;
+ radio->buffer[1] = freq >> 8;
+ radio->buffer[2] = freq & 0xff;
+
+ ret = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0),
+ HID_REQ_SET_REPORT,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ 0x0300 + radio->buffer[0], 2,
+ radio->buffer, 3, USB_TIMEOUT);
+
+ if (ret < 0) {
+ dev_warn(radio->v4l2_dev.dev, "%s failed (%d)\n", __func__, ret);
+ return ret;
+ }
+ radio->curfreq = (band == BAND_FM) ? freq * 10 : freq;
+ return 0;
+}
+
+/* Handle unplugging the device.
+ * We call video_unregister_device in any case.
+ * The last function called in this procedure is
+ * usb_raremono_device_release.
+ */
+static void usb_raremono_disconnect(struct usb_interface *intf)
+{
+ struct raremono_device *radio = to_raremono_dev(usb_get_intfdata(intf));
+
+ dev_info(&intf->dev, "Thanko's Raremono disconnected\n");
+
+ mutex_lock(&radio->lock);
+ usb_set_intfdata(intf, NULL);
+ video_unregister_device(&radio->vdev);
+ v4l2_device_disconnect(&radio->v4l2_dev);
+ mutex_unlock(&radio->lock);
+ v4l2_device_put(&radio->v4l2_dev);
+}
+
+/*
+ * Linux Video interface
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *v)
+{
+ struct raremono_device *radio = video_drvdata(file);
+
+ strlcpy(v->driver, "radio-raremono", sizeof(v->driver));
+ strlcpy(v->card, "Thanko's Raremono", sizeof(v->card));
+ usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
+ v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
+ v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int vidioc_enum_freq_bands(struct file *file, void *priv,
+ struct v4l2_frequency_band *band)
+{
+ if (band->tuner != 0)
+ return -EINVAL;
+
+ if (band->index >= ARRAY_SIZE(bands))
+ return -EINVAL;
+
+ *band = bands[band->index];
+
+ return 0;
+}
+
+static int vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *v)
+{
+ struct raremono_device *radio = video_drvdata(file);
+ int ret;
+
+ if (v->index > 0)
+ return -EINVAL;
+
+ strlcpy(v->name, "AM/FM/SW", sizeof(v->name));
+ v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_FREQ_BANDS;
+ v->rangelow = AM_FREQ_RANGE_LOW * 16;
+ v->rangehigh = FM_FREQ_RANGE_HIGH * 16;
+ v->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO;
+ v->audmode = (radio->curfreq < FM_FREQ_RANGE_LOW) ?
+ V4L2_TUNER_MODE_MONO : V4L2_TUNER_MODE_STEREO;
+ memset(radio->buffer, 1, BUFFER_LENGTH);
+ ret = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
+ 1, 0xa1, 0x030d, 2, radio->buffer, BUFFER_LENGTH, USB_TIMEOUT);
+
+ if (ret < 0) {
+ dev_warn(radio->v4l2_dev.dev, "%s failed (%d)\n", __func__, ret);
+ return ret;
+ }
+ v->signal = ((radio->buffer[1] & 0xf) << 8 | radio->buffer[2]) << 4;
+ return 0;
+}
+
+static int vidioc_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *v)
+{
+ return v->index ? -EINVAL : 0;
+}
+
+static int vidioc_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *f)
+{
+ struct raremono_device *radio = video_drvdata(file);
+ u32 freq = f->frequency;
+ unsigned band;
+
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
+
+ if (f->frequency >= (FM_FREQ_RANGE_LOW + SW_FREQ_RANGE_HIGH) * 8)
+ band = BAND_FM;
+ else if (f->frequency <= (AM_FREQ_RANGE_HIGH + SW_FREQ_RANGE_LOW) * 8)
+ band = BAND_AM;
+ else
+ band = BAND_SW;
+
+ freq = clamp_t(u32, f->frequency, bands[band].rangelow, bands[band].rangehigh);
+ return raremono_cmd_main(radio, band, freq / 16);
+}
+
+static int vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct raremono_device *radio = video_drvdata(file);
+
+ if (f->tuner != 0)
+ return -EINVAL;
+ f->type = V4L2_TUNER_RADIO;
+ f->frequency = radio->curfreq * 16;
+ return 0;
+}
+
+/* File system interface */
+static const struct v4l2_file_operations usb_raremono_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops usb_raremono_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_enum_freq_bands = vidioc_enum_freq_bands,
+};
+
+/* check if the device is present and register with v4l and usb if it is */
+static int usb_raremono_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct raremono_device *radio;
+ int retval = 0;
+
+ radio = devm_kzalloc(&intf->dev, sizeof(struct raremono_device), GFP_KERNEL);
+ if (radio)
+ radio->buffer = devm_kmalloc(&intf->dev, BUFFER_LENGTH, GFP_KERNEL);
+
+ if (!radio || !radio->buffer)
+ return -ENOMEM;
+
+ radio->usbdev = interface_to_usbdev(intf);
+ radio->intf = intf;
+
+ /*
+ * This device uses the same USB IDs as the si470x SiLabs reference
+ * design. So do an additional check: attempt to read the device ID
+ * from the si470x: the lower 12 bits are 0x0242 for the si470x. The
+ * Raremono always returns 0x0800 (the meaning of that is unknown, but
+ * at least it works).
+ *
+ * We use this check to determine which device we are dealing with.
+ */
+ msleep(20);
+ retval = usb_control_msg(radio->usbdev,
+ usb_rcvctrlpipe(radio->usbdev, 0),
+ HID_REQ_GET_REPORT,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ 1, 2,
+ radio->buffer, 3, 500);
+ if (retval != 3 ||
+ (get_unaligned_be16(&radio->buffer[1]) & 0xfff) == 0x0242) {
+ dev_info(&intf->dev, "this is not Thanko's Raremono.\n");
+ return -ENODEV;
+ }
+
+ dev_info(&intf->dev, "Thanko's Raremono connected: (%04X:%04X)\n",
+ id->idVendor, id->idProduct);
+
+ retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
+ if (retval < 0) {
+ dev_err(&intf->dev, "couldn't register v4l2_device\n");
+ return retval;
+ }
+
+ mutex_init(&radio->lock);
+
+ strlcpy(radio->vdev.name, radio->v4l2_dev.name,
+ sizeof(radio->vdev.name));
+ radio->vdev.v4l2_dev = &radio->v4l2_dev;
+ radio->vdev.fops = &usb_raremono_fops;
+ radio->vdev.ioctl_ops = &usb_raremono_ioctl_ops;
+ radio->vdev.lock = &radio->lock;
+ radio->vdev.release = video_device_release_empty;
+
+ usb_set_intfdata(intf, &radio->v4l2_dev);
+
+ video_set_drvdata(&radio->vdev, radio);
+ set_bit(V4L2_FL_USE_FH_PRIO, &radio->vdev.flags);
+
+ raremono_cmd_main(radio, BAND_FM, 95160);
+
+ retval = video_register_device(&radio->vdev, VFL_TYPE_RADIO, -1);
+ if (retval == 0) {
+ dev_info(&intf->dev, "V4L2 device registered as %s\n",
+ video_device_node_name(&radio->vdev));
+ return 0;
+ }
+ dev_err(&intf->dev, "could not register video device\n");
+ v4l2_device_unregister(&radio->v4l2_dev);
+ return retval;
+}
+
+/* USB subsystem interface */
+static struct usb_driver usb_raremono_driver = {
+ .name = "radio-raremono",
+ .probe = usb_raremono_probe,
+ .disconnect = usb_raremono_disconnect,
+ .id_table = usb_raremono_device_table,
+};
+
+module_usb_driver(usb_raremono_driver);
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index d6d4d60261d5..07ef40595efd 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -137,6 +137,8 @@ MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*");
/* interrupt out endpoint 2 every 1 millisecond */
#define UNUSED_REPORT 23
+#define MAX_REPORT_SIZE 64
+
/**************************************************************************
@@ -208,7 +210,7 @@ MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*");
*/
static int si470x_get_report(struct si470x_device *radio, void *buf, int size)
{
- unsigned char *report = (unsigned char *) buf;
+ unsigned char *report = buf;
int retval;
retval = usb_control_msg(radio->usbdev,
@@ -231,7 +233,7 @@ static int si470x_get_report(struct si470x_device *radio, void *buf, int size)
*/
static int si470x_set_report(struct si470x_device *radio, void *buf, int size)
{
- unsigned char *report = (unsigned char *) buf;
+ unsigned char *report = buf;
int retval;
retval = usb_control_msg(radio->usbdev,
@@ -254,15 +256,14 @@ static int si470x_set_report(struct si470x_device *radio, void *buf, int size)
*/
int si470x_get_register(struct si470x_device *radio, int regnr)
{
- unsigned char buf[REGISTER_REPORT_SIZE];
int retval;
- buf[0] = REGISTER_REPORT(regnr);
+ radio->usb_buf[0] = REGISTER_REPORT(regnr);
- retval = si470x_get_report(radio, (void *) &buf, sizeof(buf));
+ retval = si470x_get_report(radio, radio->usb_buf, REGISTER_REPORT_SIZE);
if (retval >= 0)
- radio->registers[regnr] = get_unaligned_be16(&buf[1]);
+ radio->registers[regnr] = get_unaligned_be16(&radio->usb_buf[1]);
return (retval < 0) ? -EINVAL : 0;
}
@@ -273,13 +274,12 @@ int si470x_get_register(struct si470x_device *radio, int regnr)
*/
int si470x_set_register(struct si470x_device *radio, int regnr)
{
- unsigned char buf[REGISTER_REPORT_SIZE];
int retval;
- buf[0] = REGISTER_REPORT(regnr);
- put_unaligned_be16(radio->registers[regnr], &buf[1]);
+ radio->usb_buf[0] = REGISTER_REPORT(regnr);
+ put_unaligned_be16(radio->registers[regnr], &radio->usb_buf[1]);
- retval = si470x_set_report(radio, (void *) &buf, sizeof(buf));
+ retval = si470x_set_report(radio, radio->usb_buf, REGISTER_REPORT_SIZE);
return (retval < 0) ? -EINVAL : 0;
}
@@ -295,18 +295,17 @@ int si470x_set_register(struct si470x_device *radio, int regnr)
*/
static int si470x_get_all_registers(struct si470x_device *radio)
{
- unsigned char buf[ENTIRE_REPORT_SIZE];
int retval;
unsigned char regnr;
- buf[0] = ENTIRE_REPORT;
+ radio->usb_buf[0] = ENTIRE_REPORT;
- retval = si470x_get_report(radio, (void *) &buf, sizeof(buf));
+ retval = si470x_get_report(radio, radio->usb_buf, ENTIRE_REPORT_SIZE);
if (retval >= 0)
for (regnr = 0; regnr < RADIO_REGISTER_NUM; regnr++)
radio->registers[regnr] = get_unaligned_be16(
- &buf[regnr * RADIO_REGISTER_SIZE + 1]);
+ &radio->usb_buf[regnr * RADIO_REGISTER_SIZE + 1]);
return (retval < 0) ? -EINVAL : 0;
}
@@ -323,14 +322,13 @@ static int si470x_get_all_registers(struct si470x_device *radio)
static int si470x_set_led_state(struct si470x_device *radio,
unsigned char led_state)
{
- unsigned char buf[LED_REPORT_SIZE];
int retval;
- buf[0] = LED_REPORT;
- buf[1] = LED_COMMAND;
- buf[2] = led_state;
+ radio->usb_buf[0] = LED_REPORT;
+ radio->usb_buf[1] = LED_COMMAND;
+ radio->usb_buf[2] = led_state;
- retval = si470x_set_report(radio, (void *) &buf, sizeof(buf));
+ retval = si470x_set_report(radio, radio->usb_buf, LED_REPORT_SIZE);
return (retval < 0) ? -EINVAL : 0;
}
@@ -346,19 +344,18 @@ static int si470x_set_led_state(struct si470x_device *radio,
*/
static int si470x_get_scratch_page_versions(struct si470x_device *radio)
{
- unsigned char buf[SCRATCH_REPORT_SIZE];
int retval;
- buf[0] = SCRATCH_REPORT;
+ radio->usb_buf[0] = SCRATCH_REPORT;
- retval = si470x_get_report(radio, (void *) &buf, sizeof(buf));
+ retval = si470x_get_report(radio, radio->usb_buf, SCRATCH_REPORT_SIZE);
if (retval < 0)
dev_warn(&radio->intf->dev, "si470x_get_scratch: "
"si470x_get_report returned %d\n", retval);
else {
- radio->software_version = buf[1];
- radio->hardware_version = buf[2];
+ radio->software_version = radio->usb_buf[1];
+ radio->hardware_version = radio->usb_buf[2];
}
return (retval < 0) ? -EINVAL : 0;
@@ -509,6 +506,7 @@ static void si470x_usb_release(struct v4l2_device *v4l2_dev)
v4l2_device_unregister(&radio->v4l2_dev);
kfree(radio->int_in_buffer);
kfree(radio->buffer);
+ kfree(radio->usb_buf);
kfree(radio);
}
@@ -593,6 +591,11 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
retval = -ENOMEM;
goto err_initial;
}
+ radio->usb_buf = kmalloc(MAX_REPORT_SIZE, GFP_KERNEL);
+ if (radio->usb_buf == NULL) {
+ retval = -ENOMEM;
+ goto err_radio;
+ }
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
radio->band = 1; /* Default to 76 - 108 MHz */
@@ -612,7 +615,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
if (!radio->int_in_endpoint) {
dev_info(&intf->dev, "could not find interrupt in endpoint\n");
retval = -EIO;
- goto err_radio;
+ goto err_usbbuf;
}
int_end_size = le16_to_cpu(radio->int_in_endpoint->wMaxPacketSize);
@@ -621,7 +624,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
if (!radio->int_in_buffer) {
dev_info(&intf->dev, "could not allocate int_in_buffer");
retval = -ENOMEM;
- goto err_radio;
+ goto err_usbbuf;
}
radio->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -632,6 +635,30 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
}
radio->v4l2_dev.release = si470x_usb_release;
+
+ /*
+ * The si470x SiLabs reference design uses the same USB IDs as
+ * 'Thanko's Raremono' si4734 based receiver. So check here which we
+ * have: attempt to read the device ID from the si470x: the lower 12
+ * bits should be 0x0242 for the si470x.
+ *
+ * We use this check to determine which device we are dealing with.
+ */
+ if (id->idVendor == 0x10c4 && id->idProduct == 0x818a) {
+ retval = usb_control_msg(radio->usbdev,
+ usb_rcvctrlpipe(radio->usbdev, 0),
+ HID_REQ_GET_REPORT,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ 1, 2,
+ radio->usb_buf, 3, 500);
+ if (retval != 3 ||
+ (get_unaligned_be16(&radio->usb_buf[1]) & 0xfff) != 0x0242) {
+ dev_info(&intf->dev, "this is not a si470x device.\n");
+ retval = -ENODEV;
+ goto err_urb;
+ }
+ }
+
retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
if (retval < 0) {
dev_err(&intf->dev, "couldn't register v4l2_device\n");
@@ -743,6 +770,8 @@ err_urb:
usb_free_urb(radio->int_in_urb);
err_intbuffer:
kfree(radio->int_in_buffer);
+err_usbbuf:
+ kfree(radio->usb_buf);
err_radio:
kfree(radio);
err_initial:
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index 467e95575488..4b7660470e2f 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -167,6 +167,7 @@ struct si470x_device {
/* reference to USB and video device */
struct usb_device *usbdev;
struct usb_interface *intf;
+ char *usb_buf;
/* Interrupt endpoint handling */
char *int_in_buffer;
diff --git a/drivers/media/radio/si4713/Kconfig b/drivers/media/radio/si4713/Kconfig
new file mode 100644
index 000000000000..a7c3ba85d12b
--- /dev/null
+++ b/drivers/media/radio/si4713/Kconfig
@@ -0,0 +1,40 @@
+config USB_SI4713
+ tristate "Silicon Labs Si4713 FM Radio Transmitter support with USB"
+ depends on USB && RADIO_SI4713
+ select SI4713
+ ---help---
+ This is a driver for USB devices with the Silicon Labs SI4713
+ chip. Currently these devices are known to work.
+ - 10c4:8244: Silicon Labs FM Transmitter USB device.
+
+ Say Y here if you want to connect this type of radio to your
+ computer's USB port.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-usb-si4713.
+
+config PLATFORM_SI4713
+ tristate "Silicon Labs Si4713 FM Radio Transmitter support with I2C"
+ depends on I2C && RADIO_SI4713
+ select SI4713
+ ---help---
+ This is a driver for I2C devices with the Silicon Labs SI4713
+ chip.
+
+ Say Y here if you want to connect this type of radio to your
+ computer's I2C port.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-platform-si4713.
+
+config I2C_SI4713
+ tristate "Silicon Labs Si4713 FM Radio Transmitter support"
+ depends on I2C && RADIO_SI4713
+ ---help---
+ Say Y here if you want support to Si4713 FM Radio Transmitter.
+ This device can transmit audio through FM. It can transmit
+ RDS and RBDS signals as well. This module is the v4l2 radio
+ interface for the i2c driver of this device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called si4713.
diff --git a/drivers/media/radio/si4713/Makefile b/drivers/media/radio/si4713/Makefile
new file mode 100644
index 000000000000..ddaaf925e883
--- /dev/null
+++ b/drivers/media/radio/si4713/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for radios with Silicon Labs Si4713 FM Radio Transmitters
+#
+
+obj-$(CONFIG_I2C_SI4713) += si4713.o
+obj-$(CONFIG_USB_SI4713) += radio-usb-si4713.o
+obj-$(CONFIG_PLATFORM_SI4713) += radio-platform-si4713.o
diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/si4713/radio-platform-si4713.c
index ba4cfc946868..ba4cfc946868 100644
--- a/drivers/media/radio/radio-si4713.c
+++ b/drivers/media/radio/si4713/radio-platform-si4713.c
diff --git a/drivers/media/radio/si4713/radio-usb-si4713.c b/drivers/media/radio/si4713/radio-usb-si4713.c
new file mode 100644
index 000000000000..779855b74bcd
--- /dev/null
+++ b/drivers/media/radio/si4713/radio-usb-si4713.c
@@ -0,0 +1,540 @@
+/*
+ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates.
+ * All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* kernel includes */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/mutex.h>
+#include <linux/i2c.h>
+/* V4l includes */
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/si4713.h>
+
+#include "si4713.h"
+
+/* driver and module definitions */
+MODULE_AUTHOR("Dinesh Ram <dinesh.ram@cern.ch>");
+MODULE_DESCRIPTION("Si4713 FM Transmitter USB driver");
+MODULE_LICENSE("GPL v2");
+
+/* The Device announces itself as Cygnal Integrated Products, Inc. */
+#define USB_SI4713_VENDOR 0x10c4
+#define USB_SI4713_PRODUCT 0x8244
+
+#define BUFFER_LENGTH 64
+#define USB_TIMEOUT 1000
+#define USB_RESP_TIMEOUT 50000
+
+/* USB Device ID List */
+static struct usb_device_id usb_si4713_usb_device_table[] = {
+ {USB_DEVICE_AND_INTERFACE_INFO(USB_SI4713_VENDOR, USB_SI4713_PRODUCT,
+ USB_CLASS_HID, 0, 0) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, usb_si4713_usb_device_table);
+
+struct si4713_usb_device {
+ struct usb_device *usbdev;
+ struct usb_interface *intf;
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_subdev *v4l2_subdev;
+ struct mutex lock;
+ struct i2c_adapter i2c_adapter;
+
+ u8 *buffer;
+};
+
+static inline struct si4713_usb_device *to_si4713_dev(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct si4713_usb_device, v4l2_dev);
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *v)
+{
+ struct si4713_usb_device *radio = video_drvdata(file);
+
+ strlcpy(v->driver, "radio-usb-si4713", sizeof(v->driver));
+ strlcpy(v->card, "Si4713 FM Transmitter", sizeof(v->card));
+ usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
+ v->device_caps = V4L2_CAP_MODULATOR | V4L2_CAP_RDS_OUTPUT;
+ v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int vidioc_g_modulator(struct file *file, void *priv,
+ struct v4l2_modulator *vm)
+{
+ struct si4713_usb_device *radio = video_drvdata(file);
+
+ return v4l2_subdev_call(radio->v4l2_subdev, tuner, g_modulator, vm);
+}
+
+static int vidioc_s_modulator(struct file *file, void *priv,
+ const struct v4l2_modulator *vm)
+{
+ struct si4713_usb_device *radio = video_drvdata(file);
+
+ return v4l2_subdev_call(radio->v4l2_subdev, tuner, s_modulator, vm);
+}
+
+static int vidioc_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *vf)
+{
+ struct si4713_usb_device *radio = video_drvdata(file);
+
+ return v4l2_subdev_call(radio->v4l2_subdev, tuner, s_frequency, vf);
+}
+
+static int vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *vf)
+{
+ struct si4713_usb_device *radio = video_drvdata(file);
+
+ return v4l2_subdev_call(radio->v4l2_subdev, tuner, g_frequency, vf);
+}
+
+static const struct v4l2_ioctl_ops usb_si4713_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_g_modulator = vidioc_g_modulator,
+ .vidioc_s_modulator = vidioc_s_modulator,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/* File system interface */
+static const struct v4l2_file_operations usb_si4713_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = v4l2_ctrl_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static void usb_si4713_video_device_release(struct v4l2_device *v4l2_dev)
+{
+ struct si4713_usb_device *radio = to_si4713_dev(v4l2_dev);
+ struct i2c_adapter *adapter = &radio->i2c_adapter;
+
+ i2c_del_adapter(adapter);
+ v4l2_device_unregister(&radio->v4l2_dev);
+ kfree(radio->buffer);
+ kfree(radio);
+}
+
+/*
+ * This command sequence emulates the behaviour of the Windows driver.
+ * The structure of these commands was determined by sniffing the
+ * usb traffic of the device during startup.
+ * Most likely, these commands make some queries to the device.
+ * Commands are sent to enquire parameters like the bus mode,
+ * component revision, boot mode, the device serial number etc.
+ *
+ * These commands are necessary to be sent in this order during startup.
+ * The device fails to powerup if these commands are not sent.
+ *
+ * The complete list of startup commands is given in the start_seq table below.
+ */
+static int si4713_send_startup_command(struct si4713_usb_device *radio)
+{
+ unsigned long until_jiffies = jiffies + usecs_to_jiffies(USB_RESP_TIMEOUT) + 1;
+ u8 *buffer = radio->buffer;
+ int retval;
+
+ /* send the command */
+ retval = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0),
+ 0x09, 0x21, 0x033f, 0, radio->buffer,
+ BUFFER_LENGTH, USB_TIMEOUT);
+ if (retval < 0)
+ return retval;
+
+ for (;;) {
+ /* receive the response */
+ retval = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
+ 0x01, 0xa1, 0x033f, 0, radio->buffer,
+ BUFFER_LENGTH, USB_TIMEOUT);
+ if (retval < 0)
+ return retval;
+ if (!radio->buffer[1]) {
+ /* USB traffic sniffing showed that some commands require
+ * additional checks. */
+ switch (buffer[1]) {
+ case 0x32:
+ if (radio->buffer[2] == 0)
+ return 0;
+ break;
+ case 0x14:
+ case 0x12:
+ if (radio->buffer[2] & SI4713_CTS)
+ return 0;
+ break;
+ case 0x06:
+ if ((radio->buffer[2] & SI4713_CTS) && radio->buffer[9] == 0x08)
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ }
+ if (time_is_before_jiffies(until_jiffies))
+ return -EIO;
+ msleep(3);
+ }
+
+ return retval;
+}
+
+struct si4713_start_seq_table {
+ int len;
+ u8 payload[8];
+};
+
+/*
+ * Some of the startup commands that could be recognized are :
+ * (0x03): Get serial number of the board (Response : CB000-00-00)
+ * (0x06, 0x03, 0x03, 0x08, 0x01, 0x0f) : Get Component revision
+ */
+static struct si4713_start_seq_table start_seq[] = {
+
+ { 1, { 0x03 } },
+ { 2, { 0x32, 0x7f } },
+ { 6, { 0x06, 0x03, 0x03, 0x08, 0x01, 0x0f } },
+ { 2, { 0x14, 0x02 } },
+ { 2, { 0x09, 0x90 } },
+ { 3, { 0x08, 0x90, 0xfa } },
+ { 2, { 0x36, 0x01 } },
+ { 2, { 0x05, 0x03 } },
+ { 7, { 0x06, 0x00, 0x06, 0x0e, 0x01, 0x0f, 0x05 } },
+ { 1, { 0x12 } },
+ /* Commands that are sent after pressing the 'Initialize'
+ button in the windows application */
+ { 1, { 0x03 } },
+ { 1, { 0x01 } },
+ { 2, { 0x09, 0x90 } },
+ { 3, { 0x08, 0x90, 0xfa } },
+ { 1, { 0x34 } },
+ { 2, { 0x35, 0x01 } },
+ { 2, { 0x36, 0x01 } },
+ { 2, { 0x30, 0x09 } },
+ { 4, { 0x30, 0x06, 0x00, 0xe2 } },
+ { 3, { 0x31, 0x01, 0x30 } },
+ { 3, { 0x31, 0x04, 0x09 } },
+ { 2, { 0x05, 0x02 } },
+ { 6, { 0x06, 0x03, 0x03, 0x08, 0x01, 0x0f } },
+};
+
+static int si4713_start_seq(struct si4713_usb_device *radio)
+{
+ int retval = 0;
+ int i;
+
+ radio->buffer[0] = 0x3f;
+
+ for (i = 0; i < ARRAY_SIZE(start_seq); i++) {
+ int len = start_seq[i].len;
+ u8 *payload = start_seq[i].payload;
+
+ memcpy(radio->buffer + 1, payload, len);
+ memset(radio->buffer + len + 1, 0, BUFFER_LENGTH - 1 - len);
+ retval = si4713_send_startup_command(radio);
+ }
+
+ return retval;
+}
+
+static struct i2c_board_info si4713_board_info = {
+ I2C_BOARD_INFO("si4713", SI4713_I2C_ADDR_BUSEN_HIGH),
+};
+
+struct si4713_command_table {
+ int command_id;
+ u8 payload[8];
+};
+
+/*
+ * Structure of a command :
+ * Byte 1 : 0x3f (always)
+ * Byte 2 : 0x06 (send a command)
+ * Byte 3 : Unknown
+ * Byte 4 : Number of arguments + 1 (for the command byte)
+ * Byte 5 : Number of response bytes
+ */
+static struct si4713_command_table command_table[] = {
+
+ { SI4713_CMD_POWER_UP, { 0x00, SI4713_PWUP_NARGS + 1, SI4713_PWUP_NRESP} },
+ { SI4713_CMD_GET_REV, { 0x03, 0x01, SI4713_GETREV_NRESP } },
+ { SI4713_CMD_POWER_DOWN, { 0x00, 0x01, SI4713_PWDN_NRESP} },
+ { SI4713_CMD_SET_PROPERTY, { 0x00, SI4713_SET_PROP_NARGS + 1, SI4713_SET_PROP_NRESP } },
+ { SI4713_CMD_GET_PROPERTY, { 0x00, SI4713_GET_PROP_NARGS + 1, SI4713_GET_PROP_NRESP } },
+ { SI4713_CMD_TX_TUNE_FREQ, { 0x03, SI4713_TXFREQ_NARGS + 1, SI4713_TXFREQ_NRESP } },
+ { SI4713_CMD_TX_TUNE_POWER, { 0x03, SI4713_TXPWR_NARGS + 1, SI4713_TXPWR_NRESP } },
+ { SI4713_CMD_TX_TUNE_MEASURE, { 0x03, SI4713_TXMEA_NARGS + 1, SI4713_TXMEA_NRESP } },
+ { SI4713_CMD_TX_TUNE_STATUS, { 0x00, SI4713_TXSTATUS_NARGS + 1, SI4713_TXSTATUS_NRESP } },
+ { SI4713_CMD_TX_ASQ_STATUS, { 0x03, SI4713_ASQSTATUS_NARGS + 1, SI4713_ASQSTATUS_NRESP } },
+ { SI4713_CMD_GET_INT_STATUS, { 0x03, 0x01, SI4713_GET_STATUS_NRESP } },
+ { SI4713_CMD_TX_RDS_BUFF, { 0x03, SI4713_RDSBUFF_NARGS + 1, SI4713_RDSBUFF_NRESP } },
+ { SI4713_CMD_TX_RDS_PS, { 0x00, SI4713_RDSPS_NARGS + 1, SI4713_RDSPS_NRESP } },
+};
+
+static int send_command(struct si4713_usb_device *radio, u8 *payload, char *data, int len)
+{
+ int retval;
+
+ radio->buffer[0] = 0x3f;
+ radio->buffer[1] = 0x06;
+
+ memcpy(radio->buffer + 2, payload, 3);
+ memcpy(radio->buffer + 5, data, len);
+ memset(radio->buffer + 5 + len, 0, BUFFER_LENGTH - 5 - len);
+
+ /* send the command */
+ retval = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0),
+ 0x09, 0x21, 0x033f, 0, radio->buffer,
+ BUFFER_LENGTH, USB_TIMEOUT);
+
+ return retval < 0 ? retval : 0;
+}
+
+static int si4713_i2c_read(struct si4713_usb_device *radio, char *data, int len)
+{
+ unsigned long until_jiffies = jiffies + usecs_to_jiffies(USB_RESP_TIMEOUT) + 1;
+ int retval;
+
+ /* receive the response */
+ for (;;) {
+ retval = usb_control_msg(radio->usbdev,
+ usb_rcvctrlpipe(radio->usbdev, 0),
+ 0x01, 0xa1, 0x033f, 0, radio->buffer,
+ BUFFER_LENGTH, USB_TIMEOUT);
+ if (retval < 0)
+ return retval;
+
+ /*
+ * Check that we get a valid reply back (buffer[1] == 0) and
+ * that CTS is set before returning, otherwise we wait and try
+ * again. The i2c driver also does the CTS check, but the timeouts
+ * used there are much too small for this USB driver, so we wait
+ * for it here.
+ */
+ if (radio->buffer[1] == 0 && (radio->buffer[2] & SI4713_CTS)) {
+ memcpy(data, radio->buffer + 2, len);
+ return 0;
+ }
+ if (time_is_before_jiffies(until_jiffies)) {
+ /* Zero the status value, ensuring CTS isn't set */
+ data[0] = 0;
+ return 0;
+ }
+ msleep(3);
+ }
+}
+
+static int si4713_i2c_write(struct si4713_usb_device *radio, char *data, int len)
+{
+ int retval = -EINVAL;
+ int i;
+
+ if (len > BUFFER_LENGTH - 5)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(command_table); i++) {
+ if (data[0] == command_table[i].command_id)
+ retval = send_command(radio, command_table[i].payload,
+ data, len);
+ }
+
+ return retval < 0 ? retval : 0;
+}
+
+static int si4713_transfer(struct i2c_adapter *i2c_adapter,
+ struct i2c_msg *msgs, int num)
+{
+ struct si4713_usb_device *radio = i2c_get_adapdata(i2c_adapter);
+ int retval = -EINVAL;
+ int i;
+
+ if (num <= 0)
+ return 0;
+
+ for (i = 0; i < num; i++) {
+ if (msgs[i].flags & I2C_M_RD)
+ retval = si4713_i2c_read(radio, msgs[i].buf, msgs[i].len);
+ else
+ retval = si4713_i2c_write(radio, msgs[i].buf, msgs[i].len);
+ if (retval)
+ break;
+ }
+
+ return retval ? retval : num;
+}
+
+static u32 si4713_functionality(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static struct i2c_algorithm si4713_algo = {
+ .master_xfer = si4713_transfer,
+ .functionality = si4713_functionality,
+};
+
+/* This name value shows up in the sysfs filename associated
+ with this I2C adapter */
+static struct i2c_adapter si4713_i2c_adapter_template = {
+ .name = "si4713-i2c",
+ .owner = THIS_MODULE,
+ .algo = &si4713_algo,
+};
+
+static int si4713_register_i2c_adapter(struct si4713_usb_device *radio)
+{
+ radio->i2c_adapter = si4713_i2c_adapter_template;
+ /* set up sysfs linkage to our parent device */
+ radio->i2c_adapter.dev.parent = &radio->usbdev->dev;
+ i2c_set_adapdata(&radio->i2c_adapter, radio);
+
+ return i2c_add_adapter(&radio->i2c_adapter);
+}
+
+/* check if the device is present and register with v4l and usb if it is */
+static int usb_si4713_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct si4713_usb_device *radio;
+ struct i2c_adapter *adapter;
+ struct v4l2_subdev *sd;
+ int retval = -ENOMEM;
+
+ dev_info(&intf->dev, "Si4713 development board discovered: (%04X:%04X)\n",
+ id->idVendor, id->idProduct);
+
+ /* Initialize local device structure */
+ radio = kzalloc(sizeof(struct si4713_usb_device), GFP_KERNEL);
+ if (radio)
+ radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL);
+
+ if (!radio || !radio->buffer) {
+ dev_err(&intf->dev, "kmalloc for si4713_usb_device failed\n");
+ kfree(radio);
+ return -ENOMEM;
+ }
+
+ mutex_init(&radio->lock);
+
+ radio->usbdev = interface_to_usbdev(intf);
+ radio->intf = intf;
+ usb_set_intfdata(intf, &radio->v4l2_dev);
+
+ retval = si4713_start_seq(radio);
+ if (retval < 0)
+ goto err_v4l2;
+
+ retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
+ if (retval < 0) {
+ dev_err(&intf->dev, "couldn't register v4l2_device\n");
+ goto err_v4l2;
+ }
+
+ retval = si4713_register_i2c_adapter(radio);
+ if (retval < 0) {
+ dev_err(&intf->dev, "could not register i2c device\n");
+ goto err_i2cdev;
+ }
+
+ adapter = &radio->i2c_adapter;
+ sd = v4l2_i2c_new_subdev_board(&radio->v4l2_dev, adapter,
+ &si4713_board_info, NULL);
+ radio->v4l2_subdev = sd;
+ if (!sd) {
+ dev_err(&intf->dev, "cannot get v4l2 subdevice\n");
+ retval = -ENODEV;
+ goto del_adapter;
+ }
+
+ radio->vdev.ctrl_handler = sd->ctrl_handler;
+ radio->v4l2_dev.release = usb_si4713_video_device_release;
+ strlcpy(radio->vdev.name, radio->v4l2_dev.name,
+ sizeof(radio->vdev.name));
+ radio->vdev.v4l2_dev = &radio->v4l2_dev;
+ radio->vdev.fops = &usb_si4713_fops;
+ radio->vdev.ioctl_ops = &usb_si4713_ioctl_ops;
+ radio->vdev.lock = &radio->lock;
+ radio->vdev.release = video_device_release_empty;
+ radio->vdev.vfl_dir = VFL_DIR_TX;
+
+ video_set_drvdata(&radio->vdev, radio);
+ set_bit(V4L2_FL_USE_FH_PRIO, &radio->vdev.flags);
+
+ retval = video_register_device(&radio->vdev, VFL_TYPE_RADIO, -1);
+ if (retval < 0) {
+ dev_err(&intf->dev, "could not register video device\n");
+ goto del_adapter;
+ }
+
+ dev_info(&intf->dev, "V4L2 device registered as %s\n",
+ video_device_node_name(&radio->vdev));
+
+ return 0;
+
+del_adapter:
+ i2c_del_adapter(adapter);
+err_i2cdev:
+ v4l2_device_unregister(&radio->v4l2_dev);
+err_v4l2:
+ kfree(radio->buffer);
+ kfree(radio);
+ return retval;
+}
+
+static void usb_si4713_disconnect(struct usb_interface *intf)
+{
+ struct si4713_usb_device *radio = to_si4713_dev(usb_get_intfdata(intf));
+
+ dev_info(&intf->dev, "Si4713 development board now disconnected\n");
+
+ mutex_lock(&radio->lock);
+ usb_set_intfdata(intf, NULL);
+ video_unregister_device(&radio->vdev);
+ v4l2_device_disconnect(&radio->v4l2_dev);
+ mutex_unlock(&radio->lock);
+ v4l2_device_put(&radio->v4l2_dev);
+}
+
+/* USB subsystem interface */
+static struct usb_driver usb_si4713_driver = {
+ .name = "radio-usb-si4713",
+ .probe = usb_si4713_probe,
+ .disconnect = usb_si4713_disconnect,
+ .id_table = usb_si4713_usb_device_table,
+};
+
+module_usb_driver(usb_si4713_driver);
diff --git a/drivers/media/radio/si4713-i2c.c b/drivers/media/radio/si4713/si4713.c
index 9ec48ccbcf0b..07d5153811e8 100644
--- a/drivers/media/radio/si4713-i2c.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -27,13 +27,12 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/gpio.h>
-#include <linux/regulator/consumer.h>
#include <linux/module.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-common.h>
-#include "si4713-i2c.h"
+#include "si4713.h"
/* module parameters */
static int debug;
@@ -45,23 +44,18 @@ MODULE_AUTHOR("Eduardo Valentin <eduardo.valentin@nokia.com>");
MODULE_DESCRIPTION("I2C driver for Si4713 FM Radio Transmitter");
MODULE_VERSION("0.0.1");
-static const char *si4713_supply_names[SI4713_NUM_SUPPLIES] = {
- "vio",
- "vdd",
-};
-
#define DEFAULT_RDS_PI 0x00
#define DEFAULT_RDS_PTY 0x00
#define DEFAULT_RDS_DEVIATION 0x00C8
#define DEFAULT_RDS_PS_REPEAT_COUNT 0x0003
#define DEFAULT_LIMITER_RTIME 0x1392
#define DEFAULT_LIMITER_DEV 0x102CA
-#define DEFAULT_PILOT_FREQUENCY 0x4A38
+#define DEFAULT_PILOT_FREQUENCY 0x4A38
#define DEFAULT_PILOT_DEVIATION 0x1A5E
#define DEFAULT_ACOMP_ATIME 0x0000
#define DEFAULT_ACOMP_RTIME 0xF4240L
#define DEFAULT_ACOMP_GAIN 0x0F
-#define DEFAULT_ACOMP_THRESHOLD (-0x28)
+#define DEFAULT_ACOMP_THRESHOLD (-0x28)
#define DEFAULT_MUTE 0x01
#define DEFAULT_POWER_LEVEL 88
#define DEFAULT_FREQUENCY 8800
@@ -213,6 +207,7 @@ static int si4713_send_command(struct si4713_device *sdev, const u8 command,
u8 response[], const int respn, const int usecs)
{
struct i2c_client *client = v4l2_get_subdevdata(&sdev->sd);
+ unsigned long until_jiffies;
u8 data1[MAX_ARGS + 1];
int err;
@@ -228,30 +223,42 @@ static int si4713_send_command(struct si4713_device *sdev, const u8 command,
if (err != argn + 1) {
v4l2_err(&sdev->sd, "Error while sending command 0x%02x\n",
command);
- return (err > 0) ? -EIO : err;
+ return err < 0 ? err : -EIO;
}
+ until_jiffies = jiffies + usecs_to_jiffies(usecs) + 1;
+
/* Wait response from interrupt */
- if (!wait_for_completion_timeout(&sdev->work,
+ if (client->irq) {
+ if (!wait_for_completion_timeout(&sdev->work,
usecs_to_jiffies(usecs) + 1))
- v4l2_warn(&sdev->sd,
+ v4l2_warn(&sdev->sd,
"(%s) Device took too much time to answer.\n",
__func__);
-
- /* Then get the response */
- err = i2c_master_recv(client, response, respn);
- if (err != respn) {
- v4l2_err(&sdev->sd,
- "Error while reading response for command 0x%02x\n",
- command);
- return (err > 0) ? -EIO : err;
}
- DBG_BUFFER(&sdev->sd, "Response", response, respn);
- if (check_command_failed(response[0]))
- return -EBUSY;
+ do {
+ err = i2c_master_recv(client, response, respn);
+ if (err != respn) {
+ v4l2_err(&sdev->sd,
+ "Error %d while reading response for command 0x%02x\n",
+ err, command);
+ return err < 0 ? err : -EIO;
+ }
- return 0;
+ DBG_BUFFER(&sdev->sd, "Response", response, respn);
+ if (!check_command_failed(response[0]))
+ return 0;
+
+ if (client->irq)
+ return -EBUSY;
+ if (usecs <= 1000)
+ usleep_range(usecs, 1000);
+ else
+ usleep_range(1000, 2000);
+ } while (time_is_after_jiffies(until_jiffies));
+
+ return -EBUSY;
}
/*
@@ -265,9 +272,9 @@ static int si4713_read_property(struct si4713_device *sdev, u16 prop, u32 *pv)
int err;
u8 val[SI4713_GET_PROP_NRESP];
/*
- * .First byte = 0
- * .Second byte = property's MSB
- * .Third byte = property's LSB
+ * .First byte = 0
+ * .Second byte = property's MSB
+ * .Third byte = property's LSB
*/
const u8 args[SI4713_GET_PROP_NARGS] = {
0x00,
@@ -302,11 +309,11 @@ static int si4713_write_property(struct si4713_device *sdev, u16 prop, u16 val)
int rval;
u8 resp[SI4713_SET_PROP_NRESP];
/*
- * .First byte = 0
- * .Second byte = property's MSB
- * .Third byte = property's LSB
- * .Fourth byte = value's MSB
- * .Fifth byte = value's LSB
+ * .First byte = 0
+ * .Second byte = property's MSB
+ * .Third byte = property's LSB
+ * .Fourth byte = value's MSB
+ * .Fifth byte = value's LSB
*/
const u8 args[SI4713_SET_PROP_NARGS] = {
0x00,
@@ -344,31 +351,36 @@ static int si4713_write_property(struct si4713_device *sdev, u16 prop, u16 val)
*/
static int si4713_powerup(struct si4713_device *sdev)
{
+ struct i2c_client *client = v4l2_get_subdevdata(&sdev->sd);
int err;
u8 resp[SI4713_PWUP_NRESP];
/*
- * .First byte = Enabled interrupts and boot function
- * .Second byte = Input operation mode
+ * .First byte = Enabled interrupts and boot function
+ * .Second byte = Input operation mode
*/
- const u8 args[SI4713_PWUP_NARGS] = {
- SI4713_PWUP_CTSIEN | SI4713_PWUP_GPO2OEN | SI4713_PWUP_FUNC_TX,
+ u8 args[SI4713_PWUP_NARGS] = {
+ SI4713_PWUP_GPO2OEN | SI4713_PWUP_FUNC_TX,
SI4713_PWUP_OPMOD_ANALOG,
};
if (sdev->power_state)
return 0;
- err = regulator_bulk_enable(ARRAY_SIZE(sdev->supplies),
- sdev->supplies);
- if (err) {
- v4l2_err(&sdev->sd, "Failed to enable supplies: %d\n", err);
- return err;
+ if (sdev->supplies) {
+ err = regulator_bulk_enable(sdev->supplies, sdev->supply_data);
+ if (err) {
+ v4l2_err(&sdev->sd, "Failed to enable supplies: %d\n", err);
+ return err;
+ }
}
if (gpio_is_valid(sdev->gpio_reset)) {
udelay(50);
gpio_set_value(sdev->gpio_reset, 1);
}
+ if (client->irq)
+ args[0] |= SI4713_PWUP_CTSIEN;
+
err = si4713_send_command(sdev, SI4713_CMD_POWER_UP,
args, ARRAY_SIZE(args),
resp, ARRAY_SIZE(resp),
@@ -380,13 +392,15 @@ static int si4713_powerup(struct si4713_device *sdev)
v4l2_dbg(1, debug, &sdev->sd, "Device in power up mode\n");
sdev->power_state = POWER_ON;
- err = si4713_write_property(sdev, SI4713_GPO_IEN,
+ if (client->irq)
+ err = si4713_write_property(sdev, SI4713_GPO_IEN,
SI4713_STC_INT | SI4713_CTS);
- } else {
- if (gpio_is_valid(sdev->gpio_reset))
- gpio_set_value(sdev->gpio_reset, 0);
- err = regulator_bulk_disable(ARRAY_SIZE(sdev->supplies),
- sdev->supplies);
+ return err;
+ }
+ if (gpio_is_valid(sdev->gpio_reset))
+ gpio_set_value(sdev->gpio_reset, 0);
+ if (sdev->supplies) {
+ err = regulator_bulk_disable(sdev->supplies, sdev->supply_data);
if (err)
v4l2_err(&sdev->sd,
"Failed to disable supplies: %d\n", err);
@@ -418,11 +432,13 @@ static int si4713_powerdown(struct si4713_device *sdev)
v4l2_dbg(1, debug, &sdev->sd, "Device in reset mode\n");
if (gpio_is_valid(sdev->gpio_reset))
gpio_set_value(sdev->gpio_reset, 0);
- err = regulator_bulk_disable(ARRAY_SIZE(sdev->supplies),
- sdev->supplies);
- if (err)
- v4l2_err(&sdev->sd,
- "Failed to disable supplies: %d\n", err);
+ if (sdev->supplies) {
+ err = regulator_bulk_disable(sdev->supplies,
+ sdev->supply_data);
+ if (err)
+ v4l2_err(&sdev->sd,
+ "Failed to disable supplies: %d\n", err);
+ }
sdev->power_state = POWER_OFF;
}
@@ -451,7 +467,7 @@ static int si4713_checkrev(struct si4713_device *sdev)
v4l2_info(&sdev->sd, "chip found @ 0x%02x (%s)\n",
client->addr << 1, client->adapter->name);
} else {
- v4l2_err(&sdev->sd, "Invalid product number\n");
+ v4l2_err(&sdev->sd, "Invalid product number 0x%X\n", resp[1]);
rval = -EINVAL;
}
return rval;
@@ -465,39 +481,45 @@ static int si4713_checkrev(struct si4713_device *sdev)
*/
static int si4713_wait_stc(struct si4713_device *sdev, const int usecs)
{
- int err;
+ struct i2c_client *client = v4l2_get_subdevdata(&sdev->sd);
u8 resp[SI4713_GET_STATUS_NRESP];
+ unsigned long start_jiffies = jiffies;
+ int err;
- /* Wait response from STC interrupt */
- if (!wait_for_completion_timeout(&sdev->work,
- usecs_to_jiffies(usecs) + 1))
+ if (client->irq &&
+ !wait_for_completion_timeout(&sdev->work, usecs_to_jiffies(usecs) + 1))
v4l2_warn(&sdev->sd,
- "%s: device took too much time to answer (%d usec).\n",
- __func__, usecs);
-
- /* Clear status bits */
- err = si4713_send_command(sdev, SI4713_CMD_GET_INT_STATUS,
- NULL, 0,
- resp, ARRAY_SIZE(resp),
- DEFAULT_TIMEOUT);
-
- if (err < 0)
- goto exit;
-
- v4l2_dbg(1, debug, &sdev->sd,
- "%s: status bits: 0x%02x\n", __func__, resp[0]);
-
- if (!(resp[0] & SI4713_STC_INT))
- err = -EIO;
-
-exit:
- return err;
+ "(%s) Device took too much time to answer.\n", __func__);
+
+ for (;;) {
+ /* Clear status bits */
+ err = si4713_send_command(sdev, SI4713_CMD_GET_INT_STATUS,
+ NULL, 0,
+ resp, ARRAY_SIZE(resp),
+ DEFAULT_TIMEOUT);
+ /* The USB device returns errors when it waits for the
+ * STC bit to be set. Hence polling */
+ if (err >= 0) {
+ v4l2_dbg(1, debug, &sdev->sd,
+ "%s: status bits: 0x%02x\n", __func__, resp[0]);
+
+ if (resp[0] & SI4713_STC_INT)
+ return 0;
+ }
+ if (jiffies_to_usecs(jiffies - start_jiffies) > usecs)
+ return err < 0 ? err : -EIO;
+ /* We sleep here for 3-4 ms in order to avoid flooding the device
+ * with USB requests. The si4713 USB driver was developed
+ * by reverse engineering the Windows USB driver. The windows
+ * driver also has a ~2.5 ms delay between responses. */
+ usleep_range(3000, 4000);
+ }
}
/*
* si4713_tx_tune_freq - Sets the state of the RF carrier and sets the tuning
- * frequency between 76 and 108 MHz in 10 kHz units and
- * steps of 50 kHz.
+ * frequency between 76 and 108 MHz in 10 kHz units and
+ * steps of 50 kHz.
* @sdev: si4713_device structure for the device we are communicating
* @frequency: desired frequency (76 - 108 MHz, unit 10 KHz, step 50 kHz)
*/
@@ -506,9 +528,9 @@ static int si4713_tx_tune_freq(struct si4713_device *sdev, u16 frequency)
int err;
u8 val[SI4713_TXFREQ_NRESP];
/*
- * .First byte = 0
- * .Second byte = frequency's MSB
- * .Third byte = frequency's LSB
+ * .First byte = 0
+ * .Second byte = frequency's MSB
+ * .Third byte = frequency's LSB
*/
const u8 args[SI4713_TXFREQ_NARGS] = {
0x00,
@@ -535,14 +557,14 @@ static int si4713_tx_tune_freq(struct si4713_device *sdev, u16 frequency)
}
/*
- * si4713_tx_tune_power - Sets the RF voltage level between 88 and 115 dBuV in
- * 1 dB units. A value of 0x00 indicates off. The command
- * also sets the antenna tuning capacitance. A value of 0
- * indicates autotuning, and a value of 1 - 191 indicates
- * a manual override, which results in a tuning
- * capacitance of 0.25 pF x @antcap.
+ * si4713_tx_tune_power - Sets the RF voltage level between 88 and 120 dBuV in
+ * 1 dB units. A value of 0x00 indicates off. The command
+ * also sets the antenna tuning capacitance. A value of 0
+ * indicates autotuning, and a value of 1 - 191 indicates
+ * a manual override, which results in a tuning
+ * capacitance of 0.25 pF x @antcap.
* @sdev: si4713_device structure for the device we are communicating
- * @power: tuning power (88 - 115 dBuV, unit/step 1 dB)
+ * @power: tuning power (88 - 120 dBuV, unit/step 1 dB)
* @antcap: value of antenna tuning capacitor (0 - 191)
*/
static int si4713_tx_tune_power(struct si4713_device *sdev, u8 power,
@@ -551,21 +573,21 @@ static int si4713_tx_tune_power(struct si4713_device *sdev, u8 power,
int err;
u8 val[SI4713_TXPWR_NRESP];
/*
- * .First byte = 0
- * .Second byte = 0
- * .Third byte = power
- * .Fourth byte = antcap
+ * .First byte = 0
+ * .Second byte = 0
+ * .Third byte = power
+ * .Fourth byte = antcap
*/
- const u8 args[SI4713_TXPWR_NARGS] = {
+ u8 args[SI4713_TXPWR_NARGS] = {
0x00,
0x00,
power,
antcap,
};
- if (((power > 0) && (power < SI4713_MIN_POWER)) ||
- power > SI4713_MAX_POWER || antcap > SI4713_MAX_ANTCAP)
- return -EDOM;
+ /* Map power values 1-87 to MIN_POWER (88) */
+ if (power > 0 && power < SI4713_MIN_POWER)
+ args[2] = power = SI4713_MIN_POWER;
err = si4713_send_command(sdev, SI4713_CMD_TX_TUNE_POWER,
args, ARRAY_SIZE(args), val,
@@ -583,12 +605,12 @@ static int si4713_tx_tune_power(struct si4713_device *sdev, u8 power,
/*
* si4713_tx_tune_measure - Enters receive mode and measures the received noise
- * level in units of dBuV on the selected frequency.
- * The Frequency must be between 76 and 108 MHz in 10 kHz
- * units and steps of 50 kHz. The command also sets the
- * antenna tuning capacitance. A value of 0 means
- * autotuning, and a value of 1 to 191 indicates manual
- * override.
+ * level in units of dBuV on the selected frequency.
+ * The Frequency must be between 76 and 108 MHz in 10 kHz
+ * units and steps of 50 kHz. The command also sets the
+ * antenna tuning capacitance. A value of 0 means
+ * autotuning, and a value of 1 to 191 indicates manual
+ * override.
* @sdev: si4713_device structure for the device we are communicating
* @frequency: desired frequency (76 - 108 MHz, unit 10 KHz, step 50 kHz)
* @antcap: value of antenna tuning capacitor (0 - 191)
@@ -599,10 +621,10 @@ static int si4713_tx_tune_measure(struct si4713_device *sdev, u16 frequency,
int err;
u8 val[SI4713_TXMEA_NRESP];
/*
- * .First byte = 0
- * .Second byte = frequency's MSB
- * .Third byte = frequency's LSB
- * .Fourth byte = antcap
+ * .First byte = 0
+ * .Second byte = frequency's MSB
+ * .Third byte = frequency's LSB
+ * .Fourth byte = antcap
*/
const u8 args[SI4713_TXMEA_NARGS] = {
0x00,
@@ -632,11 +654,11 @@ static int si4713_tx_tune_measure(struct si4713_device *sdev, u16 frequency,
/*
* si4713_tx_tune_status- Returns the status of the tx_tune_freq, tx_tune_mea or
- * tx_tune_power commands. This command return the current
- * frequency, output voltage in dBuV, the antenna tunning
- * capacitance value and the received noise level. The
- * command also clears the stcint interrupt bit when the
- * first bit of its arguments is high.
+ * tx_tune_power commands. This command return the current
+ * frequency, output voltage in dBuV, the antenna tunning
+ * capacitance value and the received noise level. The
+ * command also clears the stcint interrupt bit when the
+ * first bit of its arguments is high.
* @sdev: si4713_device structure for the device we are communicating
* @intack: 0x01 to clear the seek/tune complete interrupt status indicator.
* @frequency: returned frequency
@@ -651,7 +673,7 @@ static int si4713_tx_tune_status(struct si4713_device *sdev, u8 intack,
int err;
u8 val[SI4713_TXSTATUS_NRESP];
/*
- * .First byte = intack bit
+ * .First byte = intack bit
*/
const u8 args[SI4713_TXSTATUS_NARGS] = {
intack & SI4713_INTACK_MASK,
@@ -812,8 +834,9 @@ static int si4713_set_rds_ps_name(struct si4713_device *sdev, char *ps_name)
return rval;
}
-static int si4713_set_rds_radio_text(struct si4713_device *sdev, char *rt)
+static int si4713_set_rds_radio_text(struct si4713_device *sdev, const char *rt)
{
+ static const char cr[RDS_RADIOTEXT_BLK_SIZE] = { RDS_CARRIAGE_RETURN, 0 };
int rval = 0, i;
u16 t_index = 0;
u8 b_index = 0, cr_inserted = 0;
@@ -837,7 +860,7 @@ static int si4713_set_rds_radio_text(struct si4713_device *sdev, char *rt)
for (i = 0; i < RDS_RADIOTEXT_BLK_SIZE; i++) {
if (!rt[t_index + i] ||
rt[t_index + i] == RDS_CARRIAGE_RETURN) {
- rt[t_index + i] = RDS_CARRIAGE_RETURN;
+ rt = cr;
cr_inserted = 1;
break;
}
@@ -1024,7 +1047,6 @@ static int si4713_initialize(struct si4713_device *sdev)
if (rval < 0)
return rval;
-
sdev->frequency = DEFAULT_FREQUENCY;
sdev->stereo = 1;
sdev->tune_rnl = DEFAULT_TUNE_RNL;
@@ -1345,7 +1367,7 @@ static int si4713_probe(struct i2c_client *client,
struct v4l2_ctrl_handler *hdl;
int rval, i;
- sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
+ sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
if (!sdev) {
dev_err(&client->dev, "Failed to alloc video device.\n");
rval = -ENOMEM;
@@ -1362,13 +1384,14 @@ static int si4713_probe(struct i2c_client *client,
}
sdev->gpio_reset = pdata->gpio_reset;
gpio_direction_output(sdev->gpio_reset, 0);
+ sdev->supplies = pdata->supplies;
}
- for (i = 0; i < ARRAY_SIZE(sdev->supplies); i++)
- sdev->supplies[i].supply = si4713_supply_names[i];
+ for (i = 0; i < sdev->supplies; i++)
+ sdev->supply_data[i].supply = pdata->supply_names[i];
- rval = regulator_bulk_get(&client->dev, ARRAY_SIZE(sdev->supplies),
- sdev->supplies);
+ rval = regulator_bulk_get(&client->dev, sdev->supplies,
+ sdev->supply_data);
if (rval) {
dev_err(&client->dev, "Cannot get regulators: %d\n", rval);
goto free_gpio;
@@ -1420,8 +1443,8 @@ static int si4713_probe(struct i2c_client *client,
V4L2_CID_AUDIO_COMPRESSION_GAIN, 0, MAX_ACOMP_GAIN, 1,
DEFAULT_ACOMP_GAIN);
sdev->compression_threshold = v4l2_ctrl_new_std(hdl, &si4713_ctrl_ops,
- V4L2_CID_AUDIO_COMPRESSION_THRESHOLD, MIN_ACOMP_THRESHOLD,
- MAX_ACOMP_THRESHOLD, 1,
+ V4L2_CID_AUDIO_COMPRESSION_THRESHOLD,
+ MIN_ACOMP_THRESHOLD, MAX_ACOMP_THRESHOLD, 1,
DEFAULT_ACOMP_THRESHOLD);
sdev->compression_attack_time = v4l2_ctrl_new_std(hdl, &si4713_ctrl_ops,
V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME, 0,
@@ -1443,9 +1466,11 @@ static int si4713_probe(struct i2c_client *client,
V4L2_CID_TUNE_PREEMPHASIS,
V4L2_PREEMPHASIS_75_uS, 0, V4L2_PREEMPHASIS_50_uS);
sdev->tune_pwr_level = v4l2_ctrl_new_std(hdl, &si4713_ctrl_ops,
- V4L2_CID_TUNE_POWER_LEVEL, 0, 120, 1, DEFAULT_POWER_LEVEL);
+ V4L2_CID_TUNE_POWER_LEVEL, 0, SI4713_MAX_POWER,
+ 1, DEFAULT_POWER_LEVEL);
sdev->tune_ant_cap = v4l2_ctrl_new_std(hdl, &si4713_ctrl_ops,
- V4L2_CID_TUNE_ANTENNA_CAPACITOR, 0, 191, 1, 0);
+ V4L2_CID_TUNE_ANTENNA_CAPACITOR, 0, SI4713_MAX_ANTCAP,
+ 1, 0);
if (hdl->error) {
rval = hdl->error;
@@ -1481,7 +1506,7 @@ free_irq:
free_ctrls:
v4l2_ctrl_handler_free(hdl);
put_reg:
- regulator_bulk_free(ARRAY_SIZE(sdev->supplies), sdev->supplies);
+ regulator_bulk_free(sdev->supplies, sdev->supply_data);
free_gpio:
if (gpio_is_valid(sdev->gpio_reset))
gpio_free(sdev->gpio_reset);
@@ -1505,7 +1530,7 @@ static int si4713_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- regulator_bulk_free(ARRAY_SIZE(sdev->supplies), sdev->supplies);
+ regulator_bulk_free(sdev->supplies, sdev->supply_data);
if (gpio_is_valid(sdev->gpio_reset))
gpio_free(sdev->gpio_reset);
kfree(sdev);
diff --git a/drivers/media/radio/si4713-i2c.h b/drivers/media/radio/si4713/si4713.h
index 25cdea26343b..4837cf6e0e1b 100644
--- a/drivers/media/radio/si4713-i2c.h
+++ b/drivers/media/radio/si4713/si4713.h
@@ -15,6 +15,7 @@
#ifndef SI4713_I2C_H
#define SI4713_I2C_H
+#include <linux/regulator/consumer.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-ctrls.h>
#include <media/si4713.h>
@@ -226,7 +227,8 @@ struct si4713_device {
struct v4l2_ctrl *tune_ant_cap;
};
struct completion work;
- struct regulator_bulk_data supplies[SI4713_NUM_SUPPLIES];
+ unsigned supplies;
+ struct regulator_bulk_data supply_data[SI4713_NUM_SUPPLIES];
int gpio_reset;
u32 power_state;
u32 rds_enabled;
diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
index cef06981b7c9..7c14060a40b8 100644
--- a/drivers/media/radio/tea575x.c
+++ b/drivers/media/radio/tea575x.c
@@ -20,12 +20,12 @@
*
*/
-#include <asm/io.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <asm/io.h>
#include <media/v4l2-device.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index f329485c6629..822b9f47ca72 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1909,10 +1909,8 @@ static struct input_dev *imon_init_idev(struct imon_context *ictx)
int ret, i;
idev = input_allocate_device();
- if (!idev) {
- dev_err(ictx->dev, "input dev allocation failed\n");
+ if (!idev)
goto out;
- }
snprintf(ictx->name_idev, sizeof(ictx->name_idev),
"iMON Panel, Knob and Mouse(%04x:%04x)",
@@ -1960,10 +1958,8 @@ static struct input_dev *imon_init_touch(struct imon_context *ictx)
int ret;
touch = input_allocate_device();
- if (!touch) {
- dev_err(ictx->dev, "touchscreen input dev allocation failed\n");
+ if (!touch)
goto touch_alloc_failed;
- }
snprintf(ictx->name_touch, sizeof(ictx->name_touch),
"iMON USB Touchscreen (%04x:%04x)",
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index b1cde8c0422b..0b8c54919010 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -98,4 +98,5 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-videomate-s350.o \
rc-videomate-tv-pvr.o \
rc-winfast.o \
- rc-winfast-usbii-deluxe.o
+ rc-winfast-usbii-deluxe.o \
+ rc-su3000.o
diff --git a/drivers/media/rc/keymaps/rc-su3000.c b/drivers/media/rc/keymaps/rc-su3000.c
new file mode 100644
index 000000000000..8dbd3e9bc951
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-su3000.c
@@ -0,0 +1,75 @@
+/* rc-su3000.h - Keytable for Geniatech HDStar Remote Controller
+ *
+ * Copyright (c) 2013 by Evgeny Plehov <Evgeny Plehov@ukr.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+static struct rc_map_table su3000[] = {
+ { 0x25, KEY_POWER }, /* right-bottom Red */
+ { 0x0a, KEY_MUTE }, /* -/-- */
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+ { 0x00, KEY_0 },
+ { 0x20, KEY_UP }, /* CH+ */
+ { 0x21, KEY_DOWN }, /* CH+ */
+ { 0x12, KEY_VOLUMEUP }, /* Brightness Up */
+ { 0x13, KEY_VOLUMEDOWN },/* Brightness Down */
+ { 0x1f, KEY_RECORD },
+ { 0x17, KEY_PLAY },
+ { 0x16, KEY_PAUSE },
+ { 0x0b, KEY_STOP },
+ { 0x27, KEY_FASTFORWARD },/* >> */
+ { 0x26, KEY_REWIND }, /* << */
+ { 0x0d, KEY_OK }, /* Mute */
+ { 0x11, KEY_LEFT }, /* VOL- */
+ { 0x10, KEY_RIGHT }, /* VOL+ */
+ { 0x29, KEY_BACK }, /* button under 9 */
+ { 0x2c, KEY_MENU }, /* TTX */
+ { 0x2b, KEY_EPG }, /* EPG */
+ { 0x1e, KEY_RED }, /* OSD */
+ { 0x0e, KEY_GREEN }, /* Window */
+ { 0x2d, KEY_YELLOW }, /* button under << */
+ { 0x0f, KEY_BLUE }, /* bottom yellow button */
+ { 0x14, KEY_AUDIO }, /* Snapshot */
+ { 0x38, KEY_TV }, /* TV/Radio */
+ { 0x0c, KEY_ESC } /* upper Red button */
+};
+
+static struct rc_map_list su3000_map = {
+ .map = {
+ .scan = su3000,
+ .size = ARRAY_SIZE(su3000),
+ .rc_type = RC_TYPE_RC5,
+ .name = RC_MAP_SU3000,
+ }
+};
+
+static int __init init_rc_map_su3000(void)
+{
+ return rc_map_register(&su3000_map);
+}
+
+static void __exit exit_rc_map_su3000(void)
+{
+ rc_map_unregister(&su3000_map);
+}
+
+module_init(init_rc_map_su3000)
+module_exit(exit_rc_map_su3000)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Evgeny Plehov <Evgeny Plehov@ukr.net>");
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 3c761014d3ce..a25bb1581e46 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -199,6 +199,7 @@ static bool debug;
#define VENDOR_TIVO 0x105a
#define VENDOR_CONEXANT 0x0572
#define VENDOR_TWISTEDMELON 0x2596
+#define VENDOR_HAUPPAUGE 0x2040
enum mceusb_model_type {
MCE_GEN2 = 0, /* Most boards */
@@ -210,6 +211,7 @@ enum mceusb_model_type {
MULTIFUNCTION,
TIVO_KIT,
MCE_GEN2_NO_TX,
+ HAUPPAUGE_CX_HYBRID_TV,
};
struct mceusb_model {
@@ -258,6 +260,11 @@ static const struct mceusb_model mceusb_model[] = {
.no_tx = 1, /* tx isn't wired up at all */
.name = "Conexant Hybrid TV (cx231xx) MCE IR",
},
+ [HAUPPAUGE_CX_HYBRID_TV] = {
+ .rc_map = RC_MAP_HAUPPAUGE,
+ .no_tx = 1, /* eeprom says it has no tx */
+ .name = "Conexant Hybrid TV (cx231xx) MCE IR no TX",
+ },
[MULTIFUNCTION] = {
.mce_gen2 = 1,
.ir_intfnum = 2,
@@ -399,6 +406,9 @@ static struct usb_device_id mceusb_dev_table[] = {
{ USB_DEVICE(VENDOR_TWISTEDMELON, 0x8016) },
/* Twisted Melon Inc. - Manta Transceiver */
{ USB_DEVICE(VENDOR_TWISTEDMELON, 0x8042) },
+ /* Hauppauge WINTV-HVR-HVR 930C-HD - based on cx231xx */
+ { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb130),
+ .driver_info = HAUPPAUGE_CX_HYBRID_TV },
/* Terminating entry */
{ }
};
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 46da365c9c84..02e2f38c9c85 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -22,6 +22,10 @@
#include <linux/module.h>
#include "rc-core-priv.h"
+/* Bitmap to store allocated device numbers from 0 to IRRCV_NUM_DEVICES - 1 */
+#define IRRCV_NUM_DEVICES 256
+DECLARE_BITMAP(ir_core_dev_number, IRRCV_NUM_DEVICES);
+
/* Sizes are in bytes, 256 bytes allows for 32 entries on x64 */
#define IR_TAB_MIN_SIZE 256
#define IR_TAB_MAX_SIZE 8192
@@ -1065,10 +1069,9 @@ EXPORT_SYMBOL_GPL(rc_free_device);
int rc_register_device(struct rc_dev *dev)
{
static bool raw_init = false; /* raw decoders loaded? */
- static atomic_t devno = ATOMIC_INIT(0);
struct rc_map *rc_map;
const char *path;
- int rc;
+ int rc, devno;
if (!dev || !dev->map_name)
return -EINVAL;
@@ -1096,7 +1099,15 @@ int rc_register_device(struct rc_dev *dev)
*/
mutex_lock(&dev->lock);
- dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
+ do {
+ devno = find_first_zero_bit(ir_core_dev_number,
+ IRRCV_NUM_DEVICES);
+ /* No free device slots */
+ if (devno >= IRRCV_NUM_DEVICES)
+ return -ENOMEM;
+ } while (test_and_set_bit(devno, ir_core_dev_number));
+
+ dev->devno = devno;
dev_set_name(&dev->dev, "rc%ld", dev->devno);
dev_set_drvdata(&dev->dev, dev);
rc = device_add(&dev->dev);
@@ -1186,6 +1197,7 @@ out_dev:
device_del(&dev->dev);
out_unlock:
mutex_unlock(&dev->lock);
+ clear_bit(dev->devno, ir_core_dev_number);
return rc;
}
EXPORT_SYMBOL_GPL(rc_register_device);
@@ -1197,6 +1209,8 @@ void rc_unregister_device(struct rc_dev *dev)
del_timer_sync(&dev->timer_keyup);
+ clear_bit(dev->devno, ir_core_dev_number);
+
if (dev->driver_type == RC_DRIVER_IR_RAW)
ir_raw_event_unregister(dev);
diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c
index 65120c2d47ad..8f0cddb9e8f2 100644
--- a/drivers/media/rc/st_rc.c
+++ b/drivers/media/rc/st_rc.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <media/rc-core.h>
#include <linux/pinctrl/consumer.h>
@@ -28,6 +29,7 @@ struct st_rc_device {
int sample_mult;
int sample_div;
bool rxuhfmode;
+ struct reset_control *rstc;
};
/* Registers */
@@ -161,6 +163,10 @@ static void st_rc_hardware_init(struct st_rc_device *dev)
unsigned int rx_max_symbol_per = MAX_SYMB_TIME;
unsigned int rx_sampling_freq_div;
+ /* Enable the IP */
+ if (dev->rstc)
+ reset_control_deassert(dev->rstc);
+
clk_prepare_enable(dev->sys_clock);
baseclock = clk_get_rate(dev->sys_clock);
@@ -271,6 +277,11 @@ static int st_rc_probe(struct platform_device *pdev)
else
rc_dev->rx_base = rc_dev->base;
+
+ rc_dev->rstc = reset_control_get(dev, NULL);
+ if (IS_ERR(rc_dev->rstc))
+ rc_dev->rstc = NULL;
+
rc_dev->dev = dev;
platform_set_drvdata(pdev, rc_dev);
st_rc_hardware_init(rc_dev);
@@ -338,6 +349,8 @@ static int st_rc_suspend(struct device *dev)
writel(0x00, rc_dev->rx_base + IRB_RX_EN);
writel(0x00, rc_dev->rx_base + IRB_RX_INT_EN);
clk_disable_unprepare(rc_dev->sys_clock);
+ if (rc_dev->rstc)
+ reset_control_assert(rc_dev->rstc);
}
return 0;
diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig
index 15665debc572..ba2e365296cf 100644
--- a/drivers/media/tuners/Kconfig
+++ b/drivers/media/tuners/Kconfig
@@ -215,6 +215,13 @@ config MEDIA_TUNER_FC2580
help
FCI FC2580 silicon tuner driver.
+config MEDIA_TUNER_M88TS2022
+ tristate "Montage M88TS2022 silicon tuner"
+ depends on MEDIA_SUPPORT && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Montage M88TS2022 silicon tuner driver.
+
config MEDIA_TUNER_TUA9001
tristate "Infineon TUA 9001 silicon tuner"
depends on MEDIA_SUPPORT && I2C
diff --git a/drivers/media/tuners/Makefile b/drivers/media/tuners/Makefile
index 308f108eadba..efe82a904b12 100644
--- a/drivers/media/tuners/Makefile
+++ b/drivers/media/tuners/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_MEDIA_TUNER_TDA18212) += tda18212.o
obj-$(CONFIG_MEDIA_TUNER_E4000) += e4000.o
obj-$(CONFIG_MEDIA_TUNER_FC2580) += fc2580.o
obj-$(CONFIG_MEDIA_TUNER_TUA9001) += tua9001.o
+obj-$(CONFIG_MEDIA_TUNER_M88TS2022) += m88ts2022.o
obj-$(CONFIG_MEDIA_TUNER_FC0011) += fc0011.o
obj-$(CONFIG_MEDIA_TUNER_FC0012) += fc0012.o
obj-$(CONFIG_MEDIA_TUNER_FC0013) += fc0013.o
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index 72971a8d3c37..40c1da707d15 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -243,8 +243,10 @@ static int e4000_set_params(struct dvb_frontend *fe)
break;
}
- if (i == ARRAY_SIZE(e4000_pll_lut))
+ if (i == ARRAY_SIZE(e4000_pll_lut)) {
+ ret = -EINVAL;
goto err;
+ }
/*
* Note: Currently f_vco overflows when c->frequency is 1 073 741 824 Hz
@@ -271,8 +273,10 @@ static int e4000_set_params(struct dvb_frontend *fe)
break;
}
- if (i == ARRAY_SIZE(e400_lna_filter_lut))
+ if (i == ARRAY_SIZE(e400_lna_filter_lut)) {
+ ret = -EINVAL;
goto err;
+ }
ret = e4000_wr_reg(priv, 0x10, e400_lna_filter_lut[i].val);
if (ret < 0)
@@ -284,8 +288,10 @@ static int e4000_set_params(struct dvb_frontend *fe)
break;
}
- if (i == ARRAY_SIZE(e4000_if_filter_lut))
+ if (i == ARRAY_SIZE(e4000_if_filter_lut)) {
+ ret = -EINVAL;
goto err;
+ }
buf[0] = e4000_if_filter_lut[i].reg11_val;
buf[1] = e4000_if_filter_lut[i].reg12_val;
@@ -300,8 +306,10 @@ static int e4000_set_params(struct dvb_frontend *fe)
break;
}
- if (i == ARRAY_SIZE(e4000_band_lut))
+ if (i == ARRAY_SIZE(e4000_band_lut)) {
+ ret = -EINVAL;
goto err;
+ }
ret = e4000_wr_reg(priv, 0x07, e4000_band_lut[i].reg07_val);
if (ret < 0)
diff --git a/drivers/media/tuners/m88ts2022.c b/drivers/media/tuners/m88ts2022.c
new file mode 100644
index 000000000000..40c42dec721b
--- /dev/null
+++ b/drivers/media/tuners/m88ts2022.c
@@ -0,0 +1,674 @@
+/*
+ * Montage M88TS2022 silicon tuner driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Some calculations are taken from existing TS2020 driver.
+ */
+
+#include "m88ts2022_priv.h"
+
+/* write multiple registers */
+static int m88ts2022_wr_regs(struct m88ts2022_priv *priv,
+ u8 reg, const u8 *val, int len)
+{
+#define MAX_WR_LEN 3
+#define MAX_WR_XFER_LEN (MAX_WR_LEN + 1)
+ int ret;
+ u8 buf[MAX_WR_XFER_LEN];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->client->addr,
+ .flags = 0,
+ .len = 1 + len,
+ .buf = buf,
+ }
+ };
+
+ if (WARN_ON(len > MAX_WR_LEN))
+ return -EINVAL;
+
+ buf[0] = reg;
+ memcpy(&buf[1], val, len);
+
+ ret = i2c_transfer(priv->client->adapter, msg, 1);
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ dev_warn(&priv->client->dev,
+ "%s: i2c wr failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+/* read multiple registers */
+static int m88ts2022_rd_regs(struct m88ts2022_priv *priv, u8 reg,
+ u8 *val, int len)
+{
+#define MAX_RD_LEN 1
+#define MAX_RD_XFER_LEN (MAX_RD_LEN)
+ int ret;
+ u8 buf[MAX_RD_XFER_LEN];
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &reg,
+ }, {
+ .addr = priv->client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buf,
+ }
+ };
+
+ if (WARN_ON(len > MAX_RD_LEN))
+ return -EINVAL;
+
+ ret = i2c_transfer(priv->client->adapter, msg, 2);
+ if (ret == 2) {
+ memcpy(val, buf, len);
+ ret = 0;
+ } else {
+ dev_warn(&priv->client->dev,
+ "%s: i2c rd failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+/* write single register */
+static int m88ts2022_wr_reg(struct m88ts2022_priv *priv, u8 reg, u8 val)
+{
+ return m88ts2022_wr_regs(priv, reg, &val, 1);
+}
+
+/* read single register */
+static int m88ts2022_rd_reg(struct m88ts2022_priv *priv, u8 reg, u8 *val)
+{
+ return m88ts2022_rd_regs(priv, reg, val, 1);
+}
+
+/* write single register with mask */
+static int m88ts2022_wr_reg_mask(struct m88ts2022_priv *priv,
+ u8 reg, u8 val, u8 mask)
+{
+ int ret;
+ u8 u8tmp;
+
+ /* no need for read if whole reg is written */
+ if (mask != 0xff) {
+ ret = m88ts2022_rd_regs(priv, reg, &u8tmp, 1);
+ if (ret)
+ return ret;
+
+ val &= mask;
+ u8tmp &= ~mask;
+ val |= u8tmp;
+ }
+
+ return m88ts2022_wr_regs(priv, reg, &val, 1);
+}
+
+static int m88ts2022_cmd(struct dvb_frontend *fe,
+ int op, int sleep, u8 reg, u8 mask, u8 val, u8 *reg_val)
+{
+ struct m88ts2022_priv *priv = fe->tuner_priv;
+ int ret, i;
+ u8 u8tmp;
+ struct m88ts2022_reg_val reg_vals[] = {
+ {0x51, 0x1f - op},
+ {0x51, 0x1f},
+ {0x50, 0x00 + op},
+ {0x50, 0x00},
+ };
+
+ for (i = 0; i < 2; i++) {
+ dev_dbg(&priv->client->dev,
+ "%s: i=%d op=%02x reg=%02x mask=%02x val=%02x\n",
+ __func__, i, op, reg, mask, val);
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ ret = m88ts2022_wr_reg(priv, reg_vals[i].reg,
+ reg_vals[i].val);
+ if (ret)
+ goto err;
+ }
+
+ usleep_range(sleep * 1000, sleep * 10000);
+
+ ret = m88ts2022_rd_reg(priv, reg, &u8tmp);
+ if (ret)
+ goto err;
+
+ if ((u8tmp & mask) != val)
+ break;
+ }
+
+ if (reg_val)
+ *reg_val = u8tmp;
+err:
+ return ret;
+}
+
+static int m88ts2022_set_params(struct dvb_frontend *fe)
+{
+ struct m88ts2022_priv *priv = fe->tuner_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret;
+ unsigned int frequency_khz, frequency_offset_khz, f_3db_hz;
+ unsigned int f_ref_khz, f_vco_khz, div_ref, div_out, pll_n, gdiv28;
+ u8 buf[3], u8tmp, cap_code, lpf_gm, lpf_mxdiv, div_max, div_min;
+ u16 u16tmp;
+ dev_dbg(&priv->client->dev,
+ "%s: frequency=%d symbol_rate=%d rolloff=%d\n",
+ __func__, c->frequency, c->symbol_rate, c->rolloff);
+ /*
+ * Integer-N PLL synthesizer
+ * kHz is used for all calculations to keep calculations within 32-bit
+ */
+ f_ref_khz = DIV_ROUND_CLOSEST(priv->cfg.clock, 1000);
+ div_ref = DIV_ROUND_CLOSEST(f_ref_khz, 2000);
+
+ if (c->symbol_rate < 5000000)
+ frequency_offset_khz = 3000; /* 3 MHz */
+ else
+ frequency_offset_khz = 0;
+
+ frequency_khz = c->frequency + frequency_offset_khz;
+
+ if (frequency_khz < 1103000) {
+ div_out = 4;
+ u8tmp = 0x1b;
+ } else {
+ div_out = 2;
+ u8tmp = 0x0b;
+ }
+
+ buf[0] = u8tmp;
+ buf[1] = 0x40;
+ ret = m88ts2022_wr_regs(priv, 0x10, buf, 2);
+ if (ret)
+ goto err;
+
+ f_vco_khz = frequency_khz * div_out;
+ pll_n = f_vco_khz * div_ref / f_ref_khz;
+ pll_n += pll_n % 2;
+ priv->frequency_khz = pll_n * f_ref_khz / div_ref / div_out;
+
+ if (pll_n < 4095)
+ u16tmp = pll_n - 1024;
+ else if (pll_n < 6143)
+ u16tmp = pll_n + 1024;
+ else
+ u16tmp = pll_n + 3072;
+
+ buf[0] = (u16tmp >> 8) & 0x3f;
+ buf[1] = (u16tmp >> 0) & 0xff;
+ buf[2] = div_ref - 8;
+ ret = m88ts2022_wr_regs(priv, 0x01, buf, 3);
+ if (ret)
+ goto err;
+
+ dev_dbg(&priv->client->dev,
+ "%s: frequency=%u offset=%d f_vco_khz=%u pll_n=%u div_ref=%u div_out=%u\n",
+ __func__, priv->frequency_khz,
+ priv->frequency_khz - c->frequency, f_vco_khz, pll_n,
+ div_ref, div_out);
+
+ ret = m88ts2022_cmd(fe, 0x10, 5, 0x15, 0x40, 0x00, NULL);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_rd_reg(priv, 0x14, &u8tmp);
+ if (ret)
+ goto err;
+
+ u8tmp &= 0x7f;
+ if (u8tmp < 64) {
+ ret = m88ts2022_wr_reg_mask(priv, 0x10, 0x80, 0x80);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_wr_reg(priv, 0x11, 0x6f);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_cmd(fe, 0x10, 5, 0x15, 0x40, 0x00, NULL);
+ if (ret)
+ goto err;
+ }
+
+ ret = m88ts2022_rd_reg(priv, 0x14, &u8tmp);
+ if (ret)
+ goto err;
+
+ u8tmp &= 0x1f;
+ if (u8tmp > 19) {
+ ret = m88ts2022_wr_reg_mask(priv, 0x10, 0x00, 0x02);
+ if (ret)
+ goto err;
+ }
+
+ ret = m88ts2022_cmd(fe, 0x08, 5, 0x3c, 0xff, 0x00, NULL);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_wr_reg(priv, 0x25, 0x00);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_wr_reg(priv, 0x27, 0x70);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_wr_reg(priv, 0x41, 0x09);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_wr_reg(priv, 0x08, 0x0b);
+ if (ret)
+ goto err;
+
+ /* filters */
+ gdiv28 = DIV_ROUND_CLOSEST(f_ref_khz * 1694U, 1000000U);
+
+ ret = m88ts2022_wr_reg(priv, 0x04, gdiv28);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_cmd(fe, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
+ if (ret)
+ goto err;
+
+ cap_code = u8tmp & 0x3f;
+
+ ret = m88ts2022_wr_reg(priv, 0x41, 0x0d);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_cmd(fe, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
+ if (ret)
+ goto err;
+
+ u8tmp &= 0x3f;
+ cap_code = (cap_code + u8tmp) / 2;
+ gdiv28 = gdiv28 * 207 / (cap_code * 2 + 151);
+ div_max = gdiv28 * 135 / 100;
+ div_min = gdiv28 * 78 / 100;
+ div_max = clamp_val(div_max, 0U, 63U);
+
+ f_3db_hz = c->symbol_rate * 135UL / 200UL;
+ f_3db_hz += 2000000U + (frequency_offset_khz * 1000U);
+ f_3db_hz = clamp(f_3db_hz, 7000000U, 40000000U);
+
+#define LPF_COEFF 3200U
+ lpf_gm = DIV_ROUND_CLOSEST(f_3db_hz * gdiv28, LPF_COEFF * f_ref_khz);
+ lpf_gm = clamp_val(lpf_gm, 1U, 23U);
+
+ lpf_mxdiv = DIV_ROUND_CLOSEST(lpf_gm * LPF_COEFF * f_ref_khz, f_3db_hz);
+ if (lpf_mxdiv < div_min)
+ lpf_mxdiv = DIV_ROUND_CLOSEST(++lpf_gm * LPF_COEFF * f_ref_khz, f_3db_hz);
+ lpf_mxdiv = clamp_val(lpf_mxdiv, 0U, div_max);
+
+ ret = m88ts2022_wr_reg(priv, 0x04, lpf_mxdiv);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_wr_reg(priv, 0x06, lpf_gm);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_cmd(fe, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
+ if (ret)
+ goto err;
+
+ cap_code = u8tmp & 0x3f;
+
+ ret = m88ts2022_wr_reg(priv, 0x41, 0x09);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_cmd(fe, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
+ if (ret)
+ goto err;
+
+ u8tmp &= 0x3f;
+ cap_code = (cap_code + u8tmp) / 2;
+
+ u8tmp = cap_code | 0x80;
+ ret = m88ts2022_wr_reg(priv, 0x25, u8tmp);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_wr_reg(priv, 0x27, 0x30);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_wr_reg(priv, 0x08, 0x09);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_cmd(fe, 0x01, 20, 0x21, 0xff, 0x00, NULL);
+ if (ret)
+ goto err;
+err:
+ if (ret)
+ dev_dbg(&priv->client->dev, "%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int m88ts2022_init(struct dvb_frontend *fe)
+{
+ struct m88ts2022_priv *priv = fe->tuner_priv;
+ int ret, i;
+ u8 u8tmp;
+ static const struct m88ts2022_reg_val reg_vals[] = {
+ {0x7d, 0x9d},
+ {0x7c, 0x9a},
+ {0x7a, 0x76},
+ {0x3b, 0x01},
+ {0x63, 0x88},
+ {0x61, 0x85},
+ {0x22, 0x30},
+ {0x30, 0x40},
+ {0x20, 0x23},
+ {0x24, 0x02},
+ {0x12, 0xa0},
+ };
+ dev_dbg(&priv->client->dev, "%s:\n", __func__);
+
+ ret = m88ts2022_wr_reg(priv, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_wr_reg(priv, 0x00, 0x03);
+ if (ret)
+ goto err;
+
+ switch (priv->cfg.clock_out) {
+ case M88TS2022_CLOCK_OUT_DISABLED:
+ u8tmp = 0x60;
+ break;
+ case M88TS2022_CLOCK_OUT_ENABLED:
+ u8tmp = 0x70;
+ ret = m88ts2022_wr_reg(priv, 0x05, priv->cfg.clock_out_div);
+ if (ret)
+ goto err;
+ break;
+ case M88TS2022_CLOCK_OUT_ENABLED_XTALOUT:
+ u8tmp = 0x6c;
+ break;
+ default:
+ goto err;
+ }
+
+ ret = m88ts2022_wr_reg(priv, 0x42, u8tmp);
+ if (ret)
+ goto err;
+
+ if (priv->cfg.loop_through)
+ u8tmp = 0xec;
+ else
+ u8tmp = 0x6c;
+
+ ret = m88ts2022_wr_reg(priv, 0x62, u8tmp);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ ret = m88ts2022_wr_reg(priv, reg_vals[i].reg, reg_vals[i].val);
+ if (ret)
+ goto err;
+ }
+err:
+ if (ret)
+ dev_dbg(&priv->client->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ts2022_sleep(struct dvb_frontend *fe)
+{
+ struct m88ts2022_priv *priv = fe->tuner_priv;
+ int ret;
+ dev_dbg(&priv->client->dev, "%s:\n", __func__);
+
+ ret = m88ts2022_wr_reg(priv, 0x00, 0x00);
+ if (ret)
+ goto err;
+err:
+ if (ret)
+ dev_dbg(&priv->client->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ts2022_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct m88ts2022_priv *priv = fe->tuner_priv;
+ dev_dbg(&priv->client->dev, "%s:\n", __func__);
+
+ *frequency = priv->frequency_khz;
+ return 0;
+}
+
+static int m88ts2022_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct m88ts2022_priv *priv = fe->tuner_priv;
+ dev_dbg(&priv->client->dev, "%s:\n", __func__);
+
+ *frequency = 0; /* Zero-IF */
+ return 0;
+}
+
+static int m88ts2022_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ struct m88ts2022_priv *priv = fe->tuner_priv;
+ int ret;
+ u8 u8tmp;
+ u16 gain, u16tmp;
+ unsigned int gain1, gain2, gain3;
+
+ ret = m88ts2022_rd_reg(priv, 0x3d, &u8tmp);
+ if (ret)
+ goto err;
+
+ gain1 = (u8tmp >> 0) & 0x1f;
+ gain1 = clamp(gain1, 0U, 15U);
+
+ ret = m88ts2022_rd_reg(priv, 0x21, &u8tmp);
+ if (ret)
+ goto err;
+
+ gain2 = (u8tmp >> 0) & 0x1f;
+ gain2 = clamp(gain2, 2U, 16U);
+
+ ret = m88ts2022_rd_reg(priv, 0x66, &u8tmp);
+ if (ret)
+ goto err;
+
+ gain3 = (u8tmp >> 3) & 0x07;
+ gain3 = clamp(gain3, 0U, 6U);
+
+ gain = gain1 * 265 + gain2 * 338 + gain3 * 285;
+
+ /* scale value to 0x0000-0xffff */
+ u16tmp = (0xffff - gain);
+ u16tmp = clamp_val(u16tmp, 59000U, 61500U);
+
+ *strength = (u16tmp - 59000) * 0xffff / (61500 - 59000);
+err:
+ if (ret)
+ dev_dbg(&priv->client->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static const struct dvb_tuner_ops m88ts2022_tuner_ops = {
+ .info = {
+ .name = "Montage M88TS2022",
+ .frequency_min = 950000,
+ .frequency_max = 2150000,
+ },
+
+ .init = m88ts2022_init,
+ .sleep = m88ts2022_sleep,
+ .set_params = m88ts2022_set_params,
+
+ .get_frequency = m88ts2022_get_frequency,
+ .get_if_frequency = m88ts2022_get_if_frequency,
+ .get_rf_strength = m88ts2022_get_rf_strength,
+};
+
+static int m88ts2022_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct m88ts2022_config *cfg = client->dev.platform_data;
+ struct dvb_frontend *fe = cfg->fe;
+ struct m88ts2022_priv *priv;
+ int ret;
+ u8 chip_id, u8tmp;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ dev_err(&client->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME);
+ goto err;
+ }
+
+ memcpy(&priv->cfg, cfg, sizeof(struct m88ts2022_config));
+ priv->client = client;
+
+ /* check if the tuner is there */
+ ret = m88ts2022_rd_reg(priv, 0x00, &u8tmp);
+ if (ret)
+ goto err;
+
+ if ((u8tmp & 0x03) == 0x00) {
+ ret = m88ts2022_wr_reg(priv, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+
+ usleep_range(2000, 50000);
+ }
+
+ ret = m88ts2022_wr_reg(priv, 0x00, 0x03);
+ if (ret)
+ goto err;
+
+ usleep_range(2000, 50000);
+
+ ret = m88ts2022_rd_reg(priv, 0x00, &chip_id);
+ if (ret)
+ goto err;
+
+ dev_dbg(&priv->client->dev, "%s: chip_id=%02x\n", __func__, chip_id);
+
+ switch (chip_id) {
+ case 0xc3:
+ case 0x83:
+ break;
+ default:
+ goto err;
+ }
+
+ switch (priv->cfg.clock_out) {
+ case M88TS2022_CLOCK_OUT_DISABLED:
+ u8tmp = 0x60;
+ break;
+ case M88TS2022_CLOCK_OUT_ENABLED:
+ u8tmp = 0x70;
+ ret = m88ts2022_wr_reg(priv, 0x05, priv->cfg.clock_out_div);
+ if (ret)
+ goto err;
+ break;
+ case M88TS2022_CLOCK_OUT_ENABLED_XTALOUT:
+ u8tmp = 0x6c;
+ break;
+ default:
+ goto err;
+ }
+
+ ret = m88ts2022_wr_reg(priv, 0x42, u8tmp);
+ if (ret)
+ goto err;
+
+ if (priv->cfg.loop_through)
+ u8tmp = 0xec;
+ else
+ u8tmp = 0x6c;
+
+ ret = m88ts2022_wr_reg(priv, 0x62, u8tmp);
+ if (ret)
+ goto err;
+
+ /* sleep */
+ ret = m88ts2022_wr_reg(priv, 0x00, 0x00);
+ if (ret)
+ goto err;
+
+ dev_info(&priv->client->dev,
+ "%s: Montage M88TS2022 successfully identified\n",
+ KBUILD_MODNAME);
+
+ fe->tuner_priv = priv;
+ memcpy(&fe->ops.tuner_ops, &m88ts2022_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+
+ i2c_set_clientdata(client, priv);
+ return 0;
+err:
+ dev_dbg(&client->dev, "%s: failed=%d\n", __func__, ret);
+ kfree(priv);
+ return ret;
+}
+
+static int m88ts2022_remove(struct i2c_client *client)
+{
+ struct m88ts2022_priv *priv = i2c_get_clientdata(client);
+ struct dvb_frontend *fe = priv->cfg.fe;
+ dev_dbg(&client->dev, "%s:\n", __func__);
+
+ memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
+ fe->tuner_priv = NULL;
+ kfree(priv);
+
+ return 0;
+}
+
+static const struct i2c_device_id m88ts2022_id[] = {
+ {"m88ts2022", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, m88ts2022_id);
+
+static struct i2c_driver m88ts2022_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "m88ts2022",
+ },
+ .probe = m88ts2022_probe,
+ .remove = m88ts2022_remove,
+ .id_table = m88ts2022_id,
+};
+
+module_i2c_driver(m88ts2022_driver);
+
+MODULE_DESCRIPTION("Montage M88TS2022 silicon tuner driver");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/tuners/m88ts2022.h b/drivers/media/tuners/m88ts2022.h
new file mode 100644
index 000000000000..659fa1b1633a
--- /dev/null
+++ b/drivers/media/tuners/m88ts2022.h
@@ -0,0 +1,54 @@
+/*
+ * Montage M88TS2022 silicon tuner driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef M88TS2022_H
+#define M88TS2022_H
+
+#include "dvb_frontend.h"
+
+struct m88ts2022_config {
+ /*
+ * clock
+ * 16000000 - 32000000
+ */
+ u32 clock;
+
+ /*
+ * RF loop-through
+ */
+ u8 loop_through:1;
+
+ /*
+ * clock output
+ */
+#define M88TS2022_CLOCK_OUT_DISABLED 0
+#define M88TS2022_CLOCK_OUT_ENABLED 1
+#define M88TS2022_CLOCK_OUT_ENABLED_XTALOUT 2
+ u8 clock_out:2;
+
+ /*
+ * clock output divider
+ * 1 - 31
+ */
+ u8 clock_out_div:5;
+
+ /*
+ * pointer to DVB frontend
+ */
+ struct dvb_frontend *fe;
+};
+
+#endif
diff --git a/drivers/media/tuners/m88ts2022_priv.h b/drivers/media/tuners/m88ts2022_priv.h
new file mode 100644
index 000000000000..0363dd866a2d
--- /dev/null
+++ b/drivers/media/tuners/m88ts2022_priv.h
@@ -0,0 +1,34 @@
+/*
+ * Montage M88TS2022 silicon tuner driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef M88TS2022_PRIV_H
+#define M88TS2022_PRIV_H
+
+#include "m88ts2022.h"
+
+struct m88ts2022_priv {
+ struct m88ts2022_config cfg;
+ struct i2c_client *client;
+ struct dvb_frontend *fe;
+ u32 frequency_khz;
+};
+
+struct m88ts2022_reg_val {
+ u8 reg;
+ u8 val;
+};
+
+#endif
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 4be5cf808a40..cca508d4aafb 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -134,15 +134,6 @@ struct xc2028_data {
_rc; \
})
-#define i2c_rcv(priv, buf, size) ({ \
- int _rc; \
- _rc = tuner_i2c_xfer_recv(&priv->i2c_props, buf, size); \
- if (size != _rc) \
- tuner_err("i2c input error: rc = %d (should be %d)\n", \
- _rc, (int)size); \
- _rc; \
-})
-
#define i2c_send_recv(priv, obuf, osize, ibuf, isize) ({ \
int _rc; \
_rc = tuner_i2c_xfer_send_recv(&priv->i2c_props, obuf, osize, \
@@ -276,6 +267,7 @@ static int check_device_status(struct xc2028_data *priv)
case XC2028_WAITING_FIRMWARE:
return -EAGAIN;
case XC2028_ACTIVE:
+ return 1;
case XC2028_SLEEP:
return 0;
case XC2028_NODEV:
@@ -718,6 +710,8 @@ static int load_scode(struct dvb_frontend *fe, unsigned int type,
return 0;
}
+static int xc2028_sleep(struct dvb_frontend *fe);
+
static int check_firmware(struct dvb_frontend *fe, unsigned int type,
v4l2_std_id std, __u16 int_freq)
{
@@ -890,7 +884,7 @@ read_not_reliable:
return 0;
fail:
- priv->state = XC2028_SLEEP;
+ priv->state = XC2028_NO_FIRMWARE;
memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
if (retry_count < 8) {
@@ -900,6 +894,9 @@ fail:
goto retry;
}
+ /* Firmware didn't load. Put the device to sleep */
+ xc2028_sleep(fe);
+
if (rc == -ENOENT)
rc = -EINVAL;
return rc;
@@ -917,6 +914,12 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
if (rc < 0)
return rc;
+ /* If the device is sleeping, no channel is tuned */
+ if (!rc) {
+ *strength = 0;
+ return 0;
+ }
+
mutex_lock(&priv->lock);
/* Sync Lock Indicator */
@@ -964,6 +967,12 @@ static int xc2028_get_afc(struct dvb_frontend *fe, s32 *afc)
if (rc < 0)
return rc;
+ /* If the device is sleeping, no channel is tuned */
+ if (!rc) {
+ *afc = 0;
+ return 0;
+ }
+
mutex_lock(&priv->lock);
/* Sync Lock Indicator */
@@ -1281,6 +1290,10 @@ static int xc2028_sleep(struct dvb_frontend *fe)
if (rc < 0)
return rc;
+ /* Device is already in sleep mode */
+ if (!rc)
+ return 0;
+
/* Avoid firmware reload on slow devices or if PM disabled */
if (no_poweroff || priv->ctrl.disable_power_mgmt)
return 0;
@@ -1298,7 +1311,8 @@ static int xc2028_sleep(struct dvb_frontend *fe)
else
rc = send_seq(priv, {0x80, XREG_POWER_DOWN, 0x00, 0x00});
- priv->state = XC2028_SLEEP;
+ if (rc >= 0)
+ priv->state = XC2028_SLEEP;
mutex_unlock(&priv->lock);
@@ -1366,7 +1380,7 @@ static void load_firmware_cb(const struct firmware *fw,
if (rc < 0)
return;
- priv->state = XC2028_SLEEP;
+ priv->state = XC2028_ACTIVE;
}
static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index cfe8056b91aa..39d824e2bb69 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -17,7 +17,6 @@ source "drivers/media/usb/cpia2/Kconfig"
source "drivers/media/usb/zr364xx/Kconfig"
source "drivers/media/usb/stkwebcam/Kconfig"
source "drivers/media/usb/s2255/Kconfig"
-source "drivers/media/usb/sn9c102/Kconfig"
source "drivers/media/usb/usbtv/Kconfig"
endif
diff --git a/drivers/media/usb/Makefile b/drivers/media/usb/Makefile
index 0935f47497a6..7ac4b143dce8 100644
--- a/drivers/media/usb/Makefile
+++ b/drivers/media/usb/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
obj-$(CONFIG_USB_GSPCA) += gspca/
obj-$(CONFIG_USB_PWC) += pwc/
obj-$(CONFIG_VIDEO_CPIA2) += cpia2/
-obj-$(CONFIG_USB_SN9C102) += sn9c102/
obj-$(CONFIG_VIDEO_AU0828) += au0828/
obj-$(CONFIG_VIDEO_HDPVR) += hdpvr/
obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index bd9d19a73efd..ab45a6f9dcc9 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -173,9 +173,8 @@ static int au0828_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
int ifnum;
-#ifdef CONFIG_VIDEO_AU0828_V4L2
- int retval;
-#endif
+ int retval = 0;
+
struct au0828_dev *dev;
struct usb_device *usbdev = interface_to_usbdev(interface);
@@ -257,7 +256,11 @@ static int au0828_usb_probe(struct usb_interface *interface,
#endif
/* Digital TV */
- au0828_dvb_register(dev);
+ retval = au0828_dvb_register(dev);
+ if (retval)
+ pr_err("%s() au0282_dev_register failed\n",
+ __func__);
+
/* Store the pointer to the au0828_dev so it can be accessed in
au0828_usb_disconnect */
@@ -268,7 +271,7 @@ static int au0828_usb_probe(struct usb_interface *interface,
mutex_unlock(&dev->lock);
- return 0;
+ return retval;
}
static struct usb_driver au0828_usb_driver = {
diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c
index 9a6f15613a38..4ae8b1074649 100644
--- a/drivers/media/usb/au0828/au0828-dvb.c
+++ b/drivers/media/usb/au0828/au0828-dvb.c
@@ -33,6 +33,10 @@
#include "mxl5007t.h"
#include "tda18271.h"
+static int preallocate_big_buffers;
+module_param_named(preallocate_big_buffers, preallocate_big_buffers, int, 0644);
+MODULE_PARM_DESC(preallocate_big_buffers, "Preallocate the larger transfer buffers at module load time");
+
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define _AU0828_BULKPIPE 0x83
@@ -153,9 +157,13 @@ static int stop_urb_transfer(struct au0828_dev *dev)
dev->urb_streaming = 0;
for (i = 0; i < URB_COUNT; i++) {
- usb_kill_urb(dev->urbs[i]);
- kfree(dev->urbs[i]->transfer_buffer);
- usb_free_urb(dev->urbs[i]);
+ if (dev->urbs[i]) {
+ usb_kill_urb(dev->urbs[i]);
+ if (!preallocate_big_buffers)
+ kfree(dev->urbs[i]->transfer_buffer);
+
+ usb_free_urb(dev->urbs[i]);
+ }
}
return 0;
@@ -181,10 +189,18 @@ static int start_urb_transfer(struct au0828_dev *dev)
purb = dev->urbs[i];
- purb->transfer_buffer = kzalloc(URB_BUFSIZE, GFP_KERNEL);
+ if (preallocate_big_buffers)
+ purb->transfer_buffer = dev->dig_transfer_buffer[i];
+ else
+ purb->transfer_buffer = kzalloc(URB_BUFSIZE,
+ GFP_KERNEL);
+
if (!purb->transfer_buffer) {
usb_free_urb(purb);
dev->urbs[i] = NULL;
+ printk(KERN_ERR
+ "%s: failed big buffer allocation, err = %d\n",
+ __func__, ret);
goto err;
}
@@ -217,6 +233,27 @@ err:
return ret;
}
+static void au0828_start_transport(struct au0828_dev *dev)
+{
+ au0828_write(dev, 0x608, 0x90);
+ au0828_write(dev, 0x609, 0x72);
+ au0828_write(dev, 0x60a, 0x71);
+ au0828_write(dev, 0x60b, 0x01);
+
+}
+
+static void au0828_stop_transport(struct au0828_dev *dev, int full_stop)
+{
+ if (full_stop) {
+ au0828_write(dev, 0x608, 0x00);
+ au0828_write(dev, 0x609, 0x00);
+ au0828_write(dev, 0x60a, 0x00);
+ }
+ au0828_write(dev, 0x60b, 0x00);
+}
+
+
+
static int au0828_dvb_start_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
@@ -231,13 +268,17 @@ static int au0828_dvb_start_feed(struct dvb_demux_feed *feed)
if (dvb) {
mutex_lock(&dvb->lock);
+ dvb->start_count++;
+ dprintk(1, "%s(), start_count: %d, stop_count: %d\n", __func__,
+ dvb->start_count, dvb->stop_count);
if (dvb->feeding++ == 0) {
/* Start transport */
- au0828_write(dev, 0x608, 0x90);
- au0828_write(dev, 0x609, 0x72);
- au0828_write(dev, 0x60a, 0x71);
- au0828_write(dev, 0x60b, 0x01);
+ au0828_start_transport(dev);
ret = start_urb_transfer(dev);
+ if (ret < 0) {
+ au0828_stop_transport(dev, 0);
+ dvb->feeding--; /* We ran out of memory... */
+ }
}
mutex_unlock(&dvb->lock);
}
@@ -256,10 +297,16 @@ static int au0828_dvb_stop_feed(struct dvb_demux_feed *feed)
if (dvb) {
mutex_lock(&dvb->lock);
- if (--dvb->feeding == 0) {
- /* Stop transport */
- ret = stop_urb_transfer(dev);
- au0828_write(dev, 0x60b, 0x00);
+ dvb->stop_count++;
+ dprintk(1, "%s(), start_count: %d, stop_count: %d\n", __func__,
+ dvb->start_count, dvb->stop_count);
+ if (dvb->feeding > 0) {
+ dvb->feeding--;
+ if (dvb->feeding == 0) {
+ /* Stop transport */
+ ret = stop_urb_transfer(dev);
+ au0828_stop_transport(dev, 0);
+ }
}
mutex_unlock(&dvb->lock);
}
@@ -282,16 +329,10 @@ static void au0828_restart_dvb_streaming(struct work_struct *work)
/* Stop transport */
stop_urb_transfer(dev);
- au0828_write(dev, 0x608, 0x00);
- au0828_write(dev, 0x609, 0x00);
- au0828_write(dev, 0x60a, 0x00);
- au0828_write(dev, 0x60b, 0x00);
+ au0828_stop_transport(dev, 1);
/* Start transport */
- au0828_write(dev, 0x608, 0x90);
- au0828_write(dev, 0x609, 0x72);
- au0828_write(dev, 0x60a, 0x71);
- au0828_write(dev, 0x60b, 0x01);
+ au0828_start_transport(dev);
start_urb_transfer(dev);
mutex_unlock(&dvb->lock);
@@ -304,6 +345,23 @@ static int dvb_register(struct au0828_dev *dev)
dprintk(1, "%s()\n", __func__);
+ if (preallocate_big_buffers) {
+ int i;
+ for (i = 0; i < URB_COUNT; i++) {
+ dev->dig_transfer_buffer[i] = kzalloc(URB_BUFSIZE,
+ GFP_KERNEL);
+
+ if (!dev->dig_transfer_buffer[i]) {
+ result = -ENOMEM;
+
+ printk(KERN_ERR
+ "%s: failed buffer allocation (errno = %d)\n",
+ DRIVER_NAME, result);
+ goto fail_adapter;
+ }
+ }
+ }
+
INIT_WORK(&dev->restart_streaming, au0828_restart_dvb_streaming);
/* register adapter */
@@ -375,6 +433,9 @@ static int dvb_register(struct au0828_dev *dev)
/* register network adapter */
dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx);
+
+ dvb->start_count = 0;
+ dvb->stop_count = 0;
return 0;
fail_fe_conn:
@@ -391,6 +452,13 @@ fail_frontend:
dvb_frontend_detach(dvb->frontend);
dvb_unregister_adapter(&dvb->adapter);
fail_adapter:
+
+ if (preallocate_big_buffers) {
+ int i;
+ for (i = 0; i < URB_COUNT; i++)
+ kfree(dev->dig_transfer_buffer[i]);
+ }
+
return result;
}
@@ -411,6 +479,14 @@ void au0828_dvb_unregister(struct au0828_dev *dev)
dvb_unregister_frontend(dvb->frontend);
dvb_frontend_detach(dvb->frontend);
dvb_unregister_adapter(&dvb->adapter);
+
+ if (preallocate_big_buffers) {
+ int i;
+ for (i = 0; i < URB_COUNT; i++)
+ kfree(dev->dig_transfer_buffer[i]);
+ }
+
+
}
/* All the DVB attach calls go here, this function get's modified
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index ef1f57f22be7..5439772c1551 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -102,6 +102,8 @@ struct au0828_dvb {
struct dmx_frontend fe_mem;
struct dvb_net net;
int feeding;
+ int start_count;
+ int stop_count;
};
enum au0828_stream_state {
@@ -260,6 +262,10 @@ struct au0828_dev {
/* USB / URB Related */
int urb_streaming;
struct urb *urbs[URB_COUNT];
+
+ /* Preallocated transfer digital transfer buffers */
+
+ char *dig_transfer_buffer[URB_COUNT];
};
/* ----------------------------------------------------------- */
diff --git a/drivers/media/usb/cx231xx/Kconfig b/drivers/media/usb/cx231xx/Kconfig
index 86feeeaf61c2..f14c5e89a567 100644
--- a/drivers/media/usb/cx231xx/Kconfig
+++ b/drivers/media/usb/cx231xx/Kconfig
@@ -45,6 +45,8 @@ config VIDEO_CX231XX_DVB
select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
select DVB_MB86A20S if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_LGDT3305 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TDA18271C2DD if MEDIA_SUBDRV_AUTOSELECT
---help---
This adds support for DVB cards based on the
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 528cce958a82..2ee03e4ddd86 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -709,6 +709,8 @@ const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards);
/* table of devices that work with this driver */
struct usb_device_id cx231xx_id_table[] = {
+ {USB_DEVICE(0x1D19, 0x6109),
+ .driver_info = CX231XX_BOARD_PV_XCAPTURE_USB},
{USB_DEVICE(0x0572, 0x5A3C),
.driver_info = CX231XX_BOARD_UNKNOWN},
{USB_DEVICE(0x0572, 0x58A2),
diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c
index 96a5a0965399..7c0f797f1057 100644
--- a/drivers/media/usb/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c
@@ -371,9 +371,9 @@ static int cx231xx_i2c_xfer(struct i2c_adapter *i2c_adap,
mutex_lock(&dev->i2c_lock);
for (i = 0; i < num; i++) {
- addr = msgs[i].addr >> 1;
+ addr = msgs[i].addr;
- dprintk2(2, "%s %s addr=%x len=%d:",
+ dprintk2(2, "%s %s addr=0x%x len=%d:",
(msgs[i].flags & I2C_M_RD) ? "read" : "write",
i == num - 1 ? "stop" : "nonstop", addr, msgs[i].len);
if (!msgs[i].len) {
@@ -390,32 +390,41 @@ static int cx231xx_i2c_xfer(struct i2c_adapter *i2c_adap,
rc = cx231xx_i2c_recv_bytes(i2c_adap, &msgs[i]);
if (i2c_debug >= 2) {
for (byte = 0; byte < msgs[i].len; byte++)
- printk(" %02x", msgs[i].buf[byte]);
+ printk(KERN_CONT " %02x", msgs[i].buf[byte]);
}
} else if (i + 1 < num && (msgs[i + 1].flags & I2C_M_RD) &&
msgs[i].addr == msgs[i + 1].addr
&& (msgs[i].len <= 2) && (bus->nr < 3)) {
+ /* write bytes */
+ if (i2c_debug >= 2) {
+ for (byte = 0; byte < msgs[i].len; byte++)
+ printk(KERN_CONT " %02x", msgs[i].buf[byte]);
+ printk(KERN_CONT "\n");
+ }
/* read bytes */
+ dprintk2(2, "plus %s %s addr=0x%x len=%d:",
+ (msgs[i+1].flags & I2C_M_RD) ? "read" : "write",
+ i+1 == num - 1 ? "stop" : "nonstop", addr, msgs[i+1].len);
rc = cx231xx_i2c_recv_bytes_with_saddr(i2c_adap,
&msgs[i],
&msgs[i + 1]);
if (i2c_debug >= 2) {
- for (byte = 0; byte < msgs[i].len; byte++)
- printk(" %02x", msgs[i].buf[byte]);
+ for (byte = 0; byte < msgs[i+1].len; byte++)
+ printk(KERN_CONT " %02x", msgs[i+1].buf[byte]);
}
i++;
} else {
/* write bytes */
if (i2c_debug >= 2) {
for (byte = 0; byte < msgs[i].len; byte++)
- printk(" %02x", msgs[i].buf[byte]);
+ printk(KERN_CONT " %02x", msgs[i].buf[byte]);
}
rc = cx231xx_i2c_send_bytes(i2c_adap, &msgs[i]);
}
if (rc < 0)
goto err;
if (i2c_debug >= 2)
- printk("\n");
+ printk(KERN_CONT "\n");
}
mutex_unlock(&dev->i2c_lock);
return num;
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 8f9b2cea88f0..8ede8ea762e6 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -1539,6 +1539,8 @@ static const struct usb_device_id af9035_id_table[] = {
&af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
{ DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
&af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
+ { DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900,
+ &af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, af9035_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index 90cfa35ef6e6..eeab79bdd2aa 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -442,6 +442,7 @@ static struct cxd2820r_config anysee_cxd2820r_config = {
* IOD[0] ZL10353 1=enabled
* IOE[0] tuner 0=enabled
* tuner is behind ZL10353 I2C-gate
+ * tuner is behind TDA10023 I2C-gate
*
* E7 TC VID=1c73 PID=861f HW=18 FW=0.7 AMTCI=0.5 "anysee-E7TC(LP)"
* PCB: 508TC (rev0.6)
@@ -956,7 +957,7 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
if (fe && adap->fe[1]) {
/* attach tuner for 2nd FE */
- fe = dvb_attach(dvb_pll_attach, adap->fe[0],
+ fe = dvb_attach(dvb_pll_attach, adap->fe[1],
(0xc0 >> 1), &d->i2c_adap,
DVB_PLL_SAMSUNG_DTOS403IH102A);
}
diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
index 44c64ef361bf..c1051c347744 100644
--- a/drivers/media/usb/dvb-usb-v2/az6007.c
+++ b/drivers/media/usb/dvb-usb-v2/az6007.c
@@ -68,6 +68,19 @@ static struct drxk_config terratec_h7_drxk = {
.microcode_name = "dvb-usb-terratec-h7-drxk.fw",
};
+static struct drxk_config cablestar_hdci_drxk = {
+ .adr = 0x29,
+ .parallel_ts = true,
+ .dynamic_clk = true,
+ .single_master = true,
+ .enable_merr_cfg = true,
+ .no_i2c_bridge = false,
+ .chunk_size = 64,
+ .mpeg_out_clk_strength = 0x02,
+ .qam_demod_parameter_count = 2,
+ .microcode_name = "dvb-usb-technisat-cablestar-hdci-drxk.fw",
+};
+
static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct az6007_device_state *st = fe_to_priv(fe);
@@ -630,6 +643,27 @@ static int az6007_frontend_attach(struct dvb_usb_adapter *adap)
return 0;
}
+static int az6007_cablestar_hdci_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct az6007_device_state *st = adap_to_priv(adap);
+ struct dvb_usb_device *d = adap_to_d(adap);
+
+ pr_debug("attaching demod drxk\n");
+
+ adap->fe[0] = dvb_attach(drxk_attach, &cablestar_hdci_drxk,
+ &d->i2c_adap);
+ if (!adap->fe[0])
+ return -EINVAL;
+
+ adap->fe[0]->sec_priv = adap;
+ st->gate_ctrl = adap->fe[0]->ops.i2c_gate_ctrl;
+ adap->fe[0]->ops.i2c_gate_ctrl = drxk_gate_ctrl;
+
+ az6007_ci_init(adap);
+
+ return 0;
+}
+
static int az6007_tuner_attach(struct dvb_usb_adapter *adap)
{
struct dvb_usb_device *d = adap_to_d(adap);
@@ -868,6 +902,29 @@ static struct dvb_usb_device_properties az6007_props = {
}
};
+static struct dvb_usb_device_properties az6007_cablestar_hdci_props = {
+ .driver_name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .firmware = AZ6007_FIRMWARE,
+
+ .adapter_nr = adapter_nr,
+ .size_of_priv = sizeof(struct az6007_device_state),
+ .i2c_algo = &az6007_i2c_algo,
+ .tuner_attach = az6007_tuner_attach,
+ .frontend_attach = az6007_cablestar_hdci_frontend_attach,
+ .streaming_ctrl = az6007_streaming_ctrl,
+/* ditch get_rc_config as it can't work (TS35 remote, I believe it's rc5) */
+ .get_rc_config = NULL,
+ .read_mac_address = az6007_read_mac_addr,
+ .download_firmware = az6007_download_firmware,
+ .identify_state = az6007_identify_state,
+ .power_ctrl = az6007_power_ctrl,
+ .num_adapters = 1,
+ .adapter = {
+ { .stream = DVB_USB_STREAM_BULK(0x02, 10, 4096), }
+ }
+};
+
static struct usb_device_id az6007_usb_table[] = {
{DVB_USB_DEVICE(USB_VID_AZUREWAVE, USB_PID_AZUREWAVE_6007,
&az6007_props, "Azurewave 6007", RC_MAP_EMPTY)},
@@ -875,6 +932,8 @@ static struct usb_device_id az6007_usb_table[] = {
&az6007_props, "Terratec H7", RC_MAP_NEC_TERRATEC_CINERGY_XS)},
{DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_H7_2,
&az6007_props, "Terratec H7", RC_MAP_NEC_TERRATEC_CINERGY_XS)},
+ {DVB_USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_CABLESTAR_HDCI,
+ &az6007_cablestar_hdci_props, "Technisat CableStar Combo HD CI", RC_MAP_EMPTY)},
{0},
};
diff --git a/drivers/media/usb/dvb-usb-v2/ec168.c b/drivers/media/usb/dvb-usb-v2/ec168.c
index 5c68f3918bc8..0c2b377704ff 100644
--- a/drivers/media/usb/dvb-usb-v2/ec168.c
+++ b/drivers/media/usb/dvb-usb-v2/ec168.c
@@ -170,7 +170,7 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
error:
mutex_unlock(&d->i2c_mutex);
- return i;
+ return ret;
}
static u32 ec168_i2c_func(struct i2c_adapter *adapter)
diff --git a/drivers/media/usb/dvb-usb-v2/it913x.c b/drivers/media/usb/dvb-usb-v2/it913x.c
index 1cb6899cf797..fe95a586dd5d 100644
--- a/drivers/media/usb/dvb-usb-v2/it913x.c
+++ b/drivers/media/usb/dvb-usb-v2/it913x.c
@@ -799,6 +799,9 @@ static const struct usb_device_id it913x_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CTVDIGDUAL_V2,
&it913x_properties, "Digital Dual TV Receiver CTVDIGDUAL_V2",
RC_MAP_IT913X_V1) },
+ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_H335,
+ &it913x_properties, "Avermedia H335",
+ RC_MAP_IT913X_V2) },
{} /* Terminating entry */
};
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
index d83df4bb72d3..0a98d04c53e4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-demod.c - driver for the MaxLinear MXL111SF DVB-T demodulator
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -601,7 +601,7 @@ struct dvb_frontend *mxl111sf_demod_attach(struct mxl111sf_state *mxl_state,
EXPORT_SYMBOL_GPL(mxl111sf_demod_attach);
MODULE_DESCRIPTION("MaxLinear MxL111SF DVB-T demodulator driver");
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
index 3f3f8bfd190b..2d4530f5be54 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-demod.h - driver for the MaxLinear MXL111SF DVB-T demodulator
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
index e4121cb8f5ef..a619410adde4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-gpio.c - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
index 0220f54299a5..b85a5772d771 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-gpio.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
index 34434557ef65..a101d06eb143 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-i2c.c - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
index a57a45ffb9e4..465762145ad2 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-i2c.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
index b741b3a7a325..f6b348024bec 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-phy.c - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
index f0756071d347..0643738de7de 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-phy.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
index 17831b0fb9db..89bf115e927e 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-reg.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
index 879c529640f7..a8d2c7053674 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-tuner.c - driver for the MaxLinear MXL111SF CMOS tuner
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -512,7 +512,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
EXPORT_SYMBOL_GPL(mxl111sf_tuner_attach);
MODULE_DESCRIPTION("MaxLinear MxL111SF CMOS tuner driver");
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
index 90f583e5d6a6..2046db22519e 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-tuner.h - driver for the MaxLinear MXL111SF CMOS tuner
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -68,7 +68,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
#else
static inline
struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
- struct mxl111sf_state *mxl_state
+ struct mxl111sf_state *mxl_state,
struct mxl111sf_tuner_config *cfg)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 08240e498451..c7304fa8ab73 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com)
+ * Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -105,7 +105,7 @@ int mxl111sf_read_reg(struct mxl111sf_state *state, u8 addr, u8 *data)
ret = -EINVAL;
}
- pr_debug("R: (0x%02x, 0x%02x)\n", addr, *data);
+ pr_debug("R: (0x%02x, 0x%02x)\n", addr, buf[1]);
fail:
return ret;
}
@@ -1421,7 +1421,7 @@ static struct usb_driver mxl111sf_usb_driver = {
module_usb_driver(mxl111sf_usb_driver);
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_DESCRIPTION("Driver for MaxLinear MxL111SF");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.h b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
index 9816de86e48c..8516c011b7cc 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com)
+ * Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index ecca03667f98..fda5c64ba0e8 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1407,6 +1407,8 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
&rtl2832u_props, "Dexatek DK DVB-T Dongle", NULL) },
{ DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6680,
&rtl2832u_props, "DigitalNow Quad DVB-T Receiver", NULL) },
+ { DVB_USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_MINID,
+ &rtl2832u_props, "Leadtek Winfast DTV Dongle Mini D", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, 0x00d3,
&rtl2832u_props, "TerraTec Cinergy T Stick RC (Rev. 3)", NULL) },
{ DVB_USB_DEVICE(USB_VID_DEXATEK, 0x1102,
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 20e345d9fe8f..a1c641e18362 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -149,6 +149,7 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ int ret;
int i;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
@@ -173,7 +174,8 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (1 + msg[i].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[i].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = 0;
obuf[1] = msg[i].len;
@@ -193,12 +195,14 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (3 + msg[i].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[i].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
if (1 + msg[i + 1].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[i + 1].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[i].len;
obuf[1] = msg[i+1].len;
@@ -223,7 +227,8 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (2 + msg[i].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[i].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[i].addr;
obuf[1] = msg[i].len;
@@ -237,8 +242,14 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
}
}
+ if (i == num)
+ ret = num;
+ else
+ ret = -EREMOTEIO;
+
+unlock:
mutex_unlock(&d->i2c_mutex);
- return i == num ? num : -EREMOTEIO;
+ return ret;
}
static u32 cxusb_i2c_func(struct i2c_adapter *adapter)
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index c1a63b2a6baa..ae0f56a32e4d 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -2,7 +2,7 @@
* DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101,
* TeVii S600, S630, S650, S660, S480, S421, S632
* Prof 1100, 7500,
- * Geniatech SU3000 Cards
+ * Geniatech SU3000, T220 Cards
* Copyright (C) 2008-2012 Igor M. Liplianin (liplianin@me.by)
*
* This program is free software; you can redistribute it and/or modify it
@@ -29,6 +29,8 @@
#include "stb6100.h"
#include "stb6100_proc.h"
#include "m88rs2000.h"
+#include "tda18271.h"
+#include "cxd2820r.h"
/* Max transfer size done by I2C transfer functions */
#define MAX_XFER_SIZE 64
@@ -110,11 +112,6 @@
"Please see linux/Documentation/dvb/ for more details " \
"on firmware-problems."
-struct rc_map_dvb_usb_table_table {
- struct rc_map_table *rc_keys;
- int rc_keys_size;
-};
-
struct su3000_state {
u8 initialized;
};
@@ -129,12 +126,6 @@ module_param_named(debug, dvb_usb_dw2102_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level (1=info 2=xfer 4=rc(or-able))."
DVB_USB_DEBUG_STATUS);
-/* keymaps */
-static int ir_keymap;
-module_param_named(keymap, ir_keymap, int, 0644);
-MODULE_PARM_DESC(keymap, "set keymap 0=default 1=dvbworld 2=tevii 3=tbs ..."
- " 256=none");
-
/* demod probe */
static int demod_probe = 1;
module_param_named(demod, demod_probe, int, 0644);
@@ -301,6 +292,7 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ int ret;
if (!d)
return -ENODEV;
@@ -316,7 +308,8 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
if (2 + msg[1].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[1].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[0].addr << 1;
@@ -340,7 +333,8 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
if (2 + msg[0].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[1].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[0].addr << 1;
@@ -357,7 +351,8 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
if (2 + msg[0].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[1].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[0].addr << 1;
@@ -386,15 +381,17 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
break;
}
+ ret = num;
+unlock:
mutex_unlock(&d->i2c_mutex);
- return num;
+ return ret;
}
static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int len, i, j;
+ int len, i, j, ret;
if (!d)
return -ENODEV;
@@ -430,7 +427,8 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
if (2 + msg[j].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[j].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
dw210x_op_rw(d->udev, 0xc3,
@@ -466,7 +464,8 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
if (2 + msg[j].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[j].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[j].addr << 1;
@@ -481,15 +480,18 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
}
}
+ ret = num;
+unlock:
mutex_unlock(&d->i2c_mutex);
- return num;
+ return ret;
}
static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ int ret;
int i;
if (!d)
@@ -506,7 +508,8 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (2 + msg[1].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[1].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
@@ -530,7 +533,8 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (2 + msg[0].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[0].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
@@ -556,9 +560,11 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
msg[i].flags == 0 ? ">>>" : "<<<");
debug_dump(msg[i].buf, msg[i].len, deb_xfer);
}
+ ret = num;
+unlock:
mutex_unlock(&d->i2c_mutex);
- return num;
+ return ret;
}
static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
@@ -566,7 +572,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
struct usb_device *udev;
- int len, i, j;
+ int len, i, j, ret;
if (!d)
return -ENODEV;
@@ -618,7 +624,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (msg[j].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[j].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
dw210x_op_rw(d->udev, 0x91, 0, 0,
@@ -652,7 +659,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (2 + msg[j].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[j].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[j + 1].len;
@@ -671,7 +679,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (2 + msg[j].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[j].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[j].len + 1;
obuf[1] = (msg[j].addr << 1);
@@ -685,9 +694,11 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
}
}
}
+ ret = num;
+unlock:
mutex_unlock(&d->i2c_mutex);
- return num;
+ return ret;
}
static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
@@ -1095,6 +1106,16 @@ static struct ds3000_config su3000_ds3000_config = {
.set_lock_led = dw210x_led_ctrl,
};
+static struct cxd2820r_config cxd2820r_config = {
+ .i2c_address = 0x6c, /* (0xd8 >> 1) */
+ .ts_mode = 0x38,
+};
+
+static struct tda18271_config tda18271_config = {
+ .output_opt = TDA18271_OUTPUT_LT_OFF,
+ .gate = TDA18271_GATE_DIGITAL,
+};
+
static u8 m88rs2000_inittab[] = {
DEMOD_WRITE, 0x9a, 0x30,
DEMOD_WRITE, 0x00, 0x01,
@@ -1364,6 +1385,49 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d)
return -EIO;
}
+static int t220_frontend_attach(struct dvb_usb_adapter *d)
+{
+ u8 obuf[3] = { 0xe, 0x80, 0 };
+ u8 ibuf[] = { 0 };
+
+ if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ err("command 0x0e transfer failed.");
+
+ obuf[0] = 0xe;
+ obuf[1] = 0x83;
+ obuf[2] = 0;
+
+ if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ err("command 0x0e transfer failed.");
+
+ msleep(100);
+
+ obuf[0] = 0xe;
+ obuf[1] = 0x80;
+ obuf[2] = 1;
+
+ if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ err("command 0x0e transfer failed.");
+
+ obuf[0] = 0x51;
+
+ if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+ err("command 0x51 transfer failed.");
+
+ d->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
+ &d->dev->i2c_adap, NULL);
+ if (d->fe_adap[0].fe != NULL) {
+ if (dvb_attach(tda18271_attach, d->fe_adap[0].fe, 0x60,
+ &d->dev->i2c_adap, &tda18271_config)) {
+ info("Attached TDA18271HD/CXD2820R!\n");
+ return 0;
+ }
+ }
+
+ info("Failed to attach TDA18271HD/CXD2820R!\n");
+ return -EIO;
+}
+
static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d)
{
u8 obuf[] = { 0x51 };
@@ -1404,174 +1468,29 @@ static int dw3101_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static struct rc_map_table rc_map_dw210x_table[] = {
- { 0xf80a, KEY_POWER2 }, /*power*/
- { 0xf80c, KEY_MUTE }, /*mute*/
- { 0xf811, KEY_1 },
- { 0xf812, KEY_2 },
- { 0xf813, KEY_3 },
- { 0xf814, KEY_4 },
- { 0xf815, KEY_5 },
- { 0xf816, KEY_6 },
- { 0xf817, KEY_7 },
- { 0xf818, KEY_8 },
- { 0xf819, KEY_9 },
- { 0xf810, KEY_0 },
- { 0xf81c, KEY_CHANNELUP }, /*ch+*/
- { 0xf80f, KEY_CHANNELDOWN }, /*ch-*/
- { 0xf81a, KEY_VOLUMEUP }, /*vol+*/
- { 0xf80e, KEY_VOLUMEDOWN }, /*vol-*/
- { 0xf804, KEY_RECORD }, /*rec*/
- { 0xf809, KEY_FAVORITES }, /*fav*/
- { 0xf808, KEY_REWIND }, /*rewind*/
- { 0xf807, KEY_FASTFORWARD }, /*fast*/
- { 0xf80b, KEY_PAUSE }, /*pause*/
- { 0xf802, KEY_ESC }, /*cancel*/
- { 0xf803, KEY_TAB }, /*tab*/
- { 0xf800, KEY_UP }, /*up*/
- { 0xf81f, KEY_OK }, /*ok*/
- { 0xf801, KEY_DOWN }, /*down*/
- { 0xf805, KEY_CAMERA }, /*cap*/
- { 0xf806, KEY_STOP }, /*stop*/
- { 0xf840, KEY_ZOOM }, /*full*/
- { 0xf81e, KEY_TV }, /*tvmode*/
- { 0xf81b, KEY_LAST }, /*recall*/
-};
-
-static struct rc_map_table rc_map_tevii_table[] = {
- { 0xf80a, KEY_POWER },
- { 0xf80c, KEY_MUTE },
- { 0xf811, KEY_1 },
- { 0xf812, KEY_2 },
- { 0xf813, KEY_3 },
- { 0xf814, KEY_4 },
- { 0xf815, KEY_5 },
- { 0xf816, KEY_6 },
- { 0xf817, KEY_7 },
- { 0xf818, KEY_8 },
- { 0xf819, KEY_9 },
- { 0xf810, KEY_0 },
- { 0xf81c, KEY_MENU },
- { 0xf80f, KEY_VOLUMEDOWN },
- { 0xf81a, KEY_LAST },
- { 0xf80e, KEY_OPEN },
- { 0xf804, KEY_RECORD },
- { 0xf809, KEY_VOLUMEUP },
- { 0xf808, KEY_CHANNELUP },
- { 0xf807, KEY_PVR },
- { 0xf80b, KEY_TIME },
- { 0xf802, KEY_RIGHT },
- { 0xf803, KEY_LEFT },
- { 0xf800, KEY_UP },
- { 0xf81f, KEY_OK },
- { 0xf801, KEY_DOWN },
- { 0xf805, KEY_TUNER },
- { 0xf806, KEY_CHANNELDOWN },
- { 0xf840, KEY_PLAYPAUSE },
- { 0xf81e, KEY_REWIND },
- { 0xf81b, KEY_FAVORITES },
- { 0xf81d, KEY_BACK },
- { 0xf84d, KEY_FASTFORWARD },
- { 0xf844, KEY_EPG },
- { 0xf84c, KEY_INFO },
- { 0xf841, KEY_AB },
- { 0xf843, KEY_AUDIO },
- { 0xf845, KEY_SUBTITLE },
- { 0xf84a, KEY_LIST },
- { 0xf846, KEY_F1 },
- { 0xf847, KEY_F2 },
- { 0xf85e, KEY_F3 },
- { 0xf85c, KEY_F4 },
- { 0xf852, KEY_F5 },
- { 0xf85a, KEY_F6 },
- { 0xf856, KEY_MODE },
- { 0xf858, KEY_SWITCHVIDEOMODE },
-};
-
-static struct rc_map_table rc_map_tbs_table[] = {
- { 0xf884, KEY_POWER },
- { 0xf894, KEY_MUTE },
- { 0xf887, KEY_1 },
- { 0xf886, KEY_2 },
- { 0xf885, KEY_3 },
- { 0xf88b, KEY_4 },
- { 0xf88a, KEY_5 },
- { 0xf889, KEY_6 },
- { 0xf88f, KEY_7 },
- { 0xf88e, KEY_8 },
- { 0xf88d, KEY_9 },
- { 0xf892, KEY_0 },
- { 0xf896, KEY_CHANNELUP },
- { 0xf891, KEY_CHANNELDOWN },
- { 0xf893, KEY_VOLUMEUP },
- { 0xf88c, KEY_VOLUMEDOWN },
- { 0xf883, KEY_RECORD },
- { 0xf898, KEY_PAUSE },
- { 0xf899, KEY_OK },
- { 0xf89a, KEY_SHUFFLE },
- { 0xf881, KEY_UP },
- { 0xf890, KEY_LEFT },
- { 0xf882, KEY_RIGHT },
- { 0xf888, KEY_DOWN },
- { 0xf895, KEY_FAVORITES },
- { 0xf897, KEY_SUBTITLE },
- { 0xf89d, KEY_ZOOM },
- { 0xf89f, KEY_EXIT },
- { 0xf89e, KEY_MENU },
- { 0xf89c, KEY_EPG },
- { 0xf880, KEY_PREVIOUS },
- { 0xf89b, KEY_MODE }
-};
+static int dw2102_rc_query(struct dvb_usb_device *d)
+{
+ u8 key[2];
+ struct i2c_msg msg = {
+ .addr = DW2102_RC_QUERY,
+ .flags = I2C_M_RD,
+ .buf = key,
+ .len = 2
+ };
-static struct rc_map_table rc_map_su3000_table[] = {
- { 0x25, KEY_POWER }, /* right-bottom Red */
- { 0x0a, KEY_MUTE }, /* -/-- */
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x00, KEY_0 },
- { 0x20, KEY_UP }, /* CH+ */
- { 0x21, KEY_DOWN }, /* CH+ */
- { 0x12, KEY_VOLUMEUP }, /* Brightness Up */
- { 0x13, KEY_VOLUMEDOWN },/* Brightness Down */
- { 0x1f, KEY_RECORD },
- { 0x17, KEY_PLAY },
- { 0x16, KEY_PAUSE },
- { 0x0b, KEY_STOP },
- { 0x27, KEY_FASTFORWARD },/* >> */
- { 0x26, KEY_REWIND }, /* << */
- { 0x0d, KEY_OK }, /* Mute */
- { 0x11, KEY_LEFT }, /* VOL- */
- { 0x10, KEY_RIGHT }, /* VOL+ */
- { 0x29, KEY_BACK }, /* button under 9 */
- { 0x2c, KEY_MENU }, /* TTX */
- { 0x2b, KEY_EPG }, /* EPG */
- { 0x1e, KEY_RED }, /* OSD */
- { 0x0e, KEY_GREEN }, /* Window */
- { 0x2d, KEY_YELLOW }, /* button under << */
- { 0x0f, KEY_BLUE }, /* bottom yellow button */
- { 0x14, KEY_AUDIO }, /* Snapshot */
- { 0x38, KEY_TV }, /* TV/Radio */
- { 0x0c, KEY_ESC } /* upper Red button */
-};
+ if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) {
+ if (msg.buf[0] != 0xff) {
+ deb_rc("%s: rc code: %x, %x\n",
+ __func__, key[0], key[1]);
+ rc_keydown(d->rc_dev, key[0], 1);
+ }
+ }
-static struct rc_map_dvb_usb_table_table keys_tables[] = {
- { rc_map_dw210x_table, ARRAY_SIZE(rc_map_dw210x_table) },
- { rc_map_tevii_table, ARRAY_SIZE(rc_map_tevii_table) },
- { rc_map_tbs_table, ARRAY_SIZE(rc_map_tbs_table) },
- { rc_map_su3000_table, ARRAY_SIZE(rc_map_su3000_table) },
-};
+ return 0;
+}
-static int dw2102_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+static int prof_rc_query(struct dvb_usb_device *d)
{
- struct rc_map_table *keymap = d->props.rc.legacy.rc_map_table;
- int keymap_size = d->props.rc.legacy.rc_map_size;
u8 key[2];
struct i2c_msg msg = {
.addr = DW2102_RC_QUERY,
@@ -1579,32 +1498,34 @@ static int dw2102_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
.buf = key,
.len = 2
};
- int i;
- /* override keymap */
- if ((ir_keymap > 0) && (ir_keymap <= ARRAY_SIZE(keys_tables))) {
- keymap = keys_tables[ir_keymap - 1].rc_keys ;
- keymap_size = keys_tables[ir_keymap - 1].rc_keys_size;
- } else if (ir_keymap > ARRAY_SIZE(keys_tables))
- return 0; /* none */
-
- *state = REMOTE_NO_KEY_PRESSED;
- if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) {
- for (i = 0; i < keymap_size ; i++) {
- if (rc5_data(&keymap[i]) == msg.buf[0]) {
- *state = REMOTE_KEY_PRESSED;
- *event = keymap[i].keycode;
- break;
- }
+ if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) {
+ if (msg.buf[0] != 0xff) {
+ deb_rc("%s: rc code: %x, %x\n",
+ __func__, key[0], key[1]);
+ rc_keydown(d->rc_dev, key[0]^0xff, 1);
}
+ }
- if ((*state) == REMOTE_KEY_PRESSED)
- deb_rc("%s: found rc key: %x, %x, event: %x\n",
- __func__, key[0], key[1], (*event));
- else if (key[0] != 0xff)
- deb_rc("%s: unknown rc key: %x, %x\n",
- __func__, key[0], key[1]);
+ return 0;
+}
+static int su3000_rc_query(struct dvb_usb_device *d)
+{
+ u8 key[2];
+ struct i2c_msg msg = {
+ .addr = DW2102_RC_QUERY,
+ .flags = I2C_M_RD,
+ .buf = key,
+ .len = 2
+ };
+
+ if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) {
+ if (msg.buf[0] != 0xff) {
+ deb_rc("%s: rc code: %x, %x\n",
+ __func__, key[0], key[1]);
+ rc_keydown(d->rc_dev, key[1] << 8 | key[0], 1);
+ }
}
return 0;
@@ -1630,6 +1551,7 @@ enum dw2102_table_entry {
TEVII_S632,
TERRATEC_CINERGY_S2_R2,
GOTVIEW_SAT_HD,
+ GENIATECH_T220,
};
static struct usb_device_id dw2102_table[] = {
@@ -1652,6 +1574,7 @@ static struct usb_device_id dw2102_table[] = {
[TEVII_S632] = {USB_DEVICE(0x9022, USB_PID_TEVII_S632)},
[TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC, 0x00b0)},
[GOTVIEW_SAT_HD] = {USB_DEVICE(0x1FE1, USB_PID_GOTVIEW_SAT_HD)},
+ [GENIATECH_T220] = {USB_DEVICE(0x1f4d, 0xD220)},
{ }
};
@@ -1711,9 +1634,7 @@ static int dw2102_load_firmware(struct usb_device *dev,
/* init registers */
switch (dev->descriptor.idProduct) {
case USB_PID_TEVII_S650:
- dw2104_properties.rc.legacy.rc_map_table = rc_map_tevii_table;
- dw2104_properties.rc.legacy.rc_map_size =
- ARRAY_SIZE(rc_map_tevii_table);
+ dw2104_properties.rc.core.rc_codes = RC_MAP_TEVII_NEC;
case USB_PID_DW2104:
reset = 1;
dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1,
@@ -1777,10 +1698,11 @@ static struct dvb_usb_device_properties dw2102_properties = {
.i2c_algo = &dw2102_serit_i2c_algo,
- .rc.legacy = {
- .rc_map_table = rc_map_dw210x_table,
- .rc_map_size = ARRAY_SIZE(rc_map_dw210x_table),
+ .rc.core = {
.rc_interval = 150,
+ .rc_codes = RC_MAP_DM1105_NEC,
+ .module_name = "dw2102",
+ .allowed_protos = RC_BIT_NEC,
.rc_query = dw2102_rc_query,
},
@@ -1831,10 +1753,11 @@ static struct dvb_usb_device_properties dw2104_properties = {
.no_reconnect = 1,
.i2c_algo = &dw2104_i2c_algo,
- .rc.legacy = {
- .rc_map_table = rc_map_dw210x_table,
- .rc_map_size = ARRAY_SIZE(rc_map_dw210x_table),
+ .rc.core = {
.rc_interval = 150,
+ .rc_codes = RC_MAP_DM1105_NEC,
+ .module_name = "dw2102",
+ .allowed_protos = RC_BIT_NEC,
.rc_query = dw2102_rc_query,
},
@@ -1881,10 +1804,11 @@ static struct dvb_usb_device_properties dw3101_properties = {
.no_reconnect = 1,
.i2c_algo = &dw3101_i2c_algo,
- .rc.legacy = {
- .rc_map_table = rc_map_dw210x_table,
- .rc_map_size = ARRAY_SIZE(rc_map_dw210x_table),
+ .rc.core = {
.rc_interval = 150,
+ .rc_codes = RC_MAP_DM1105_NEC,
+ .module_name = "dw2102",
+ .allowed_protos = RC_BIT_NEC,
.rc_query = dw2102_rc_query,
},
@@ -1929,10 +1853,11 @@ static struct dvb_usb_device_properties s6x0_properties = {
.no_reconnect = 1,
.i2c_algo = &s6x0_i2c_algo,
- .rc.legacy = {
- .rc_map_table = rc_map_tevii_table,
- .rc_map_size = ARRAY_SIZE(rc_map_tevii_table),
+ .rc.core = {
.rc_interval = 150,
+ .rc_codes = RC_MAP_TEVII_NEC,
+ .module_name = "dw2102",
+ .allowed_protos = RC_BIT_NEC,
.rc_query = dw2102_rc_query,
},
@@ -2022,11 +1947,12 @@ static struct dvb_usb_device_properties su3000_properties = {
.identify_state = su3000_identify_state,
.i2c_algo = &su3000_i2c_algo,
- .rc.legacy = {
- .rc_map_table = rc_map_su3000_table,
- .rc_map_size = ARRAY_SIZE(rc_map_su3000_table),
+ .rc.core = {
.rc_interval = 150,
- .rc_query = dw2102_rc_query,
+ .rc_codes = RC_MAP_SU3000,
+ .module_name = "dw2102",
+ .allowed_protos = RC_BIT_RC5,
+ .rc_query = su3000_rc_query,
},
.read_mac_address = su3000_read_mac_address,
@@ -2077,6 +2003,55 @@ static struct dvb_usb_device_properties su3000_properties = {
}
};
+static struct dvb_usb_device_properties t220_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .size_of_priv = sizeof(struct su3000_state),
+ .power_ctrl = su3000_power_ctrl,
+ .num_adapters = 1,
+ .identify_state = su3000_identify_state,
+ .i2c_algo = &su3000_i2c_algo,
+
+ .rc.core = {
+ .rc_interval = 150,
+ .rc_codes = RC_MAP_SU3000,
+ .module_name = "dw2102",
+ .allowed_protos = RC_BIT_RC5,
+ .rc_query = su3000_rc_query,
+ },
+
+ .read_mac_address = su3000_read_mac_address,
+
+ .generic_bulk_ctrl_endpoint = 0x01,
+
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = { {
+ .streaming_ctrl = su3000_streaming_ctrl,
+ .frontend_attach = t220_frontend_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 8,
+ .endpoint = 0x82,
+ .u = {
+ .bulk = {
+ .buffersize = 4096,
+ }
+ }
+ }
+ } },
+ }
+ },
+ .num_device_descs = 1,
+ .devices = {
+ { "Geniatech T220 DVB-T/T2 USB2.0",
+ { &dw2102_table[GENIATECH_T220], NULL },
+ { NULL },
+ },
+ }
+};
+
static int dw2102_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -2088,8 +2063,8 @@ static int dw2102_probe(struct usb_interface *intf,
/* fill only different fields */
p1100->firmware = P1100_FIRMWARE;
p1100->devices[0] = d1100;
- p1100->rc.legacy.rc_map_table = rc_map_tbs_table;
- p1100->rc.legacy.rc_map_size = ARRAY_SIZE(rc_map_tbs_table);
+ p1100->rc.core.rc_query = prof_rc_query;
+ p1100->rc.core.rc_codes = RC_MAP_TBS_NEC;
p1100->adapter->fe[0].frontend_attach = stv0288_frontend_attach;
s660 = kmemdup(&s6x0_properties,
@@ -2114,8 +2089,8 @@ static int dw2102_probe(struct usb_interface *intf,
}
p7500->firmware = P7500_FIRMWARE;
p7500->devices[0] = d7500;
- p7500->rc.legacy.rc_map_table = rc_map_tbs_table;
- p7500->rc.legacy.rc_map_size = ARRAY_SIZE(rc_map_tbs_table);
+ p7500->rc.core.rc_query = prof_rc_query;
+ p7500->rc.core.rc_codes = RC_MAP_TBS_NEC;
p7500->adapter->fe[0].frontend_attach = prof_7500_frontend_attach;
@@ -2149,7 +2124,9 @@ static int dw2102_probe(struct usb_interface *intf,
0 == dvb_usb_device_init(intf, s421,
THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &su3000_properties,
- THIS_MODULE, NULL, adapter_nr))
+ THIS_MODULE, NULL, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf, &t220_properties,
+ THIS_MODULE, NULL, adapter_nr))
return 0;
return -ENODEV;
@@ -2169,7 +2146,7 @@ MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104,"
" DVB-C 3101 USB2.0,"
" TeVii S600, S630, S650, S660, S480, S421, S632"
" Prof 1100, 7500 USB2.0,"
- " Geniatech SU3000 devices");
+ " Geniatech SU3000, T220 devices");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(DW2101_FIRMWARE);
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index ca5ee6aceb62..a1fccf3096de 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -1,8 +1,12 @@
config VIDEO_EM28XX
- tristate "Empia EM28xx USB video capture support"
+ tristate "Empia EM28xx USB devices support"
depends on VIDEO_DEV && I2C
select VIDEO_TUNER
select VIDEO_TVEEPROM
+
+config VIDEO_EM28XX_V4L2
+ tristate "Empia EM28xx analog TV, video capture and/or webcam support"
+ depends on VIDEO_EM28XX
select VIDEOBUF2_VMALLOC
select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TVP5150 if MEDIA_SUBDRV_AUTOSELECT
@@ -49,6 +53,8 @@ config VIDEO_EM28XX_DVB
select DVB_MB86A20S if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_M88TS2022 if MEDIA_SUBDRV_AUTOSELECT
---help---
This adds support for DVB cards based on the
Empiatech em28xx chips.
diff --git a/drivers/media/usb/em28xx/Makefile b/drivers/media/usb/em28xx/Makefile
index ad6d48557940..3f850d5063d0 100644
--- a/drivers/media/usb/em28xx/Makefile
+++ b/drivers/media/usb/em28xx/Makefile
@@ -1,10 +1,11 @@
-em28xx-y += em28xx-video.o em28xx-i2c.o em28xx-cards.o
-em28xx-y += em28xx-core.o em28xx-vbi.o em28xx-camera.o
+em28xx-y += em28xx-core.o em28xx-i2c.o em28xx-cards.o em28xx-camera.o
+em28xx-v4l-objs := em28xx-video.o em28xx-vbi.o
em28xx-alsa-objs := em28xx-audio.o
em28xx-rc-objs := em28xx-input.o
obj-$(CONFIG_VIDEO_EM28XX) += em28xx.o
+obj-$(CONFIG_VIDEO_EM28XX_V4L2) += em28xx-v4l.o
obj-$(CONFIG_VIDEO_EM28XX_ALSA) += em28xx-alsa.o
obj-$(CONFIG_VIDEO_EM28XX_DVB) += em28xx-dvb.o
obj-$(CONFIG_VIDEO_EM28XX_RC) += em28xx-rc.o
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 2fdb66ee44ab..05e9bd11a3ff 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2006 Markus Rechberger <mrechberger@gmail.com>
*
- * Copyright (C) 2007-2011 Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2007-2014 Mauro Carvalho Chehab
* - Port to work with the in-kernel driver
* - Cleanups, fixes, alsa-controls, etc.
*
@@ -50,6 +50,9 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "activates debug info");
+#define EM28XX_MAX_AUDIO_BUFS 5
+#define EM28XX_MIN_AUDIO_PACKETS 64
+
#define dprintk(fmt, arg...) do { \
if (debug) \
printk(KERN_INFO "em28xx-audio %s: " fmt, \
@@ -63,17 +66,13 @@ static int em28xx_deinit_isoc_audio(struct em28xx *dev)
int i;
dprintk("Stopping isoc\n");
- for (i = 0; i < EM28XX_AUDIO_BUFS; i++) {
+ for (i = 0; i < dev->adev.num_urb; i++) {
+ struct urb *urb = dev->adev.urb[i];
+
if (!irqs_disabled())
- usb_kill_urb(dev->adev.urb[i]);
+ usb_kill_urb(urb);
else
- usb_unlink_urb(dev->adev.urb[i]);
-
- usb_free_urb(dev->adev.urb[i]);
- dev->adev.urb[i] = NULL;
-
- kfree(dev->adev.transfer_buffer[i]);
- dev->adev.transfer_buffer[i] = NULL;
+ usb_unlink_urb(urb);
}
return 0;
@@ -91,6 +90,12 @@ static void em28xx_audio_isocirq(struct urb *urb)
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
+ if (dev->disconnected) {
+ dprintk("device disconnected while streaming. URB status=%d.\n", urb->status);
+ atomic_set(&dev->stream_started, 0);
+ return;
+ }
+
switch (urb->status) {
case 0: /* success */
case -ETIMEDOUT: /* NAK */
@@ -158,63 +163,27 @@ static void em28xx_audio_isocirq(struct urb *urb)
urb->status = 0;
status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status < 0) {
+ if (status < 0)
em28xx_errdev("resubmit of audio urb failed (error=%i)\n",
status);
- }
return;
}
static int em28xx_init_audio_isoc(struct em28xx *dev)
{
int i, errCode;
- const int sb_size = EM28XX_NUM_AUDIO_PACKETS *
- EM28XX_AUDIO_MAX_PACKET_SIZE;
dprintk("Starting isoc transfers\n");
- for (i = 0; i < EM28XX_AUDIO_BUFS; i++) {
- struct urb *urb;
- int j, k;
-
- dev->adev.transfer_buffer[i] = kmalloc(sb_size, GFP_ATOMIC);
- if (!dev->adev.transfer_buffer[i])
- return -ENOMEM;
-
- memset(dev->adev.transfer_buffer[i], 0x80, sb_size);
- urb = usb_alloc_urb(EM28XX_NUM_AUDIO_PACKETS, GFP_ATOMIC);
- if (!urb) {
- em28xx_errdev("usb_alloc_urb failed!\n");
- for (j = 0; j < i; j++) {
- usb_free_urb(dev->adev.urb[j]);
- kfree(dev->adev.transfer_buffer[j]);
- }
- return -ENOMEM;
- }
-
- urb->dev = dev->udev;
- urb->context = dev;
- urb->pipe = usb_rcvisocpipe(dev->udev, EM28XX_EP_AUDIO);
- urb->transfer_flags = URB_ISO_ASAP;
- urb->transfer_buffer = dev->adev.transfer_buffer[i];
- urb->interval = 1;
- urb->complete = em28xx_audio_isocirq;
- urb->number_of_packets = EM28XX_NUM_AUDIO_PACKETS;
- urb->transfer_buffer_length = sb_size;
-
- for (j = k = 0; j < EM28XX_NUM_AUDIO_PACKETS;
- j++, k += EM28XX_AUDIO_MAX_PACKET_SIZE) {
- urb->iso_frame_desc[j].offset = k;
- urb->iso_frame_desc[j].length =
- EM28XX_AUDIO_MAX_PACKET_SIZE;
- }
- dev->adev.urb[i] = urb;
- }
+ /* Start streaming */
+ for (i = 0; i < dev->adev.num_urb; i++) {
+ memset(dev->adev.transfer_buffer[i], 0x80,
+ dev->adev.urb[i]->transfer_buffer_length);
- for (i = 0; i < EM28XX_AUDIO_BUFS; i++) {
errCode = usb_submit_urb(dev->adev.urb[i], GFP_ATOMIC);
if (errCode) {
- em28xx_errdev("submit of audio urb failed\n");
+ em28xx_errdev("submit of audio urb failed (error=%i)\n",
+ errCode);
em28xx_deinit_isoc_audio(dev);
atomic_set(&dev->stream_started, 0);
return errCode;
@@ -255,15 +224,26 @@ static struct snd_pcm_hardware snd_em28xx_hw_capture = {
.formats = SNDRV_PCM_FMTBIT_S16_LE,
- .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_KNOT,
+ .rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 62720 * 8, /* just about the value in usbaudio.c */
- .period_bytes_min = 64, /* 12544/2, */
- .period_bytes_max = 12544,
+
+
+ /*
+ * The period is 12.288 bytes. Allow a 10% of variation along its
+ * value, in order to avoid overruns/underruns due to some clock
+ * drift.
+ *
+ * FIXME: This period assumes 64 packets, and a 48000 PCM rate.
+ * Calculate it dynamically.
+ */
+ .period_bytes_min = 11059,
+ .period_bytes_max = 13516,
+
.periods_min = 2,
.periods_max = 98, /* 12544, */
};
@@ -274,28 +254,48 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
int ret = 0;
- dprintk("opening device and trying to acquire exclusive lock\n");
-
if (!dev) {
em28xx_err("BUG: em28xx can't find device struct."
" Can't proceed with open\n");
return -ENODEV;
}
+ if (dev->disconnected)
+ return -ENODEV;
+
+ dprintk("opening device and trying to acquire exclusive lock\n");
+
runtime->hw = snd_em28xx_hw_capture;
- if ((dev->alt == 0 || dev->audio_ifnum) && dev->adev.users == 0) {
- if (dev->audio_ifnum)
+ if ((dev->alt == 0 || dev->is_audio_only) && dev->adev.users == 0) {
+ int nonblock = !!(substream->f_flags & O_NONBLOCK);
+
+ if (nonblock) {
+ if (!mutex_trylock(&dev->lock))
+ return -EAGAIN;
+ } else
+ mutex_lock(&dev->lock);
+ if (dev->is_audio_only)
+ /* vendor audio is on a separate interface */
dev->alt = 1;
else
+ /* vendor audio is on the same interface as video */
dev->alt = 7;
+ /*
+ * FIXME: The intention seems to be to select the alt
+ * setting with the largest wMaxPacketSize for the video
+ * endpoint.
+ * At least dev->alt should be used instead, but we
+ * should probably not touch it at all if it is
+ * already >0, because wMaxPacketSize of the audio
+ * endpoints seems to be the same for all.
+ */
dprintk("changing alternate number on interface %d to %d\n",
- dev->audio_ifnum, dev->alt);
- usb_set_interface(dev->udev, dev->audio_ifnum, dev->alt);
+ dev->ifnum, dev->alt);
+ usb_set_interface(dev->udev, dev->ifnum, dev->alt);
/* Sets volume, mute, etc */
dev->mute = 0;
- mutex_lock(&dev->lock);
ret = em28xx_audio_analog_set(dev);
if (ret < 0)
goto err;
@@ -304,7 +304,12 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
mutex_unlock(&dev->lock);
}
+ /* Dynamically adjust the period size */
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+ dev->adev.period * 95 / 100,
+ dev->adev.period * 105 / 100);
+
dev->adev.capture_pcm_substream = substream;
return 0;
@@ -344,6 +349,10 @@ static int snd_em28xx_hw_capture_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
int ret;
+ struct em28xx *dev = snd_pcm_substream_chip(substream);
+
+ if (dev->disconnected)
+ return -ENODEV;
dprintk("Setting capture parameters\n");
@@ -383,6 +392,9 @@ static int snd_em28xx_prepare(struct snd_pcm_substream *substream)
{
struct em28xx *dev = snd_pcm_substream_chip(substream);
+ if (dev->disconnected)
+ return -ENODEV;
+
dev->adev.hwptr_done_capture = 0;
dev->adev.capture_transfer_done = 0;
@@ -408,6 +420,9 @@ static int snd_em28xx_capture_trigger(struct snd_pcm_substream *substream,
struct em28xx *dev = snd_pcm_substream_chip(substream);
int retval = 0;
+ if (dev->disconnected)
+ return -ENODEV;
+
switch (cmd) {
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */
case SNDRV_PCM_TRIGGER_RESUME: /* fall through */
@@ -434,6 +449,9 @@ static snd_pcm_uframes_t snd_em28xx_capture_pointer(struct snd_pcm_substream
snd_pcm_uframes_t hwptr_done;
dev = snd_pcm_substream_chip(substream);
+ if (dev->disconnected)
+ return SNDRV_PCM_POS_XRUN;
+
spin_lock_irqsave(&dev->adev.slock, flags);
hwptr_done = dev->adev.hwptr_done_capture;
spin_unlock_irqrestore(&dev->adev.slock, flags);
@@ -455,6 +473,11 @@ static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
static int em28xx_vol_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *info)
{
+ struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+
+ if (dev->disconnected)
+ return -ENODEV;
+
info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
info->count = 2;
info->value.integer.min = 0;
@@ -467,11 +490,22 @@ static int em28xx_vol_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+ struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream;
u16 val = (0x1f - (value->value.integer.value[0] & 0x1f)) |
(0x1f - (value->value.integer.value[1] & 0x1f)) << 8;
+ int nonblock = 0;
int rc;
- mutex_lock(&dev->lock);
+ if (dev->disconnected)
+ return -ENODEV;
+
+ if (substream)
+ nonblock = !!(substream->f_flags & O_NONBLOCK);
+ if (nonblock) {
+ if (!mutex_trylock(&dev->lock))
+ return -EAGAIN;
+ } else
+ mutex_lock(&dev->lock);
rc = em28xx_read_ac97(dev, kcontrol->private_value);
if (rc < 0)
goto err;
@@ -496,9 +530,20 @@ static int em28xx_vol_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+ struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream;
+ int nonblock = 0;
int val;
- mutex_lock(&dev->lock);
+ if (dev->disconnected)
+ return -ENODEV;
+
+ if (substream)
+ nonblock = !!(substream->f_flags & O_NONBLOCK);
+ if (nonblock) {
+ if (!mutex_trylock(&dev->lock))
+ return -EAGAIN;
+ } else
+ mutex_lock(&dev->lock);
val = em28xx_read_ac97(dev, kcontrol->private_value);
mutex_unlock(&dev->lock);
if (val < 0)
@@ -520,9 +565,20 @@ static int em28xx_vol_put_mute(struct snd_kcontrol *kcontrol,
{
struct em28xx *dev = snd_kcontrol_chip(kcontrol);
u16 val = value->value.integer.value[0];
+ struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream;
+ int nonblock = 0;
int rc;
- mutex_lock(&dev->lock);
+ if (dev->disconnected)
+ return -ENODEV;
+
+ if (substream)
+ nonblock = !!(substream->f_flags & O_NONBLOCK);
+ if (nonblock) {
+ if (!mutex_trylock(&dev->lock))
+ return -EAGAIN;
+ } else
+ mutex_lock(&dev->lock);
rc = em28xx_read_ac97(dev, kcontrol->private_value);
if (rc < 0)
goto err;
@@ -550,9 +606,20 @@ static int em28xx_vol_get_mute(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+ struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream;
+ int nonblock = 0;
int val;
- mutex_lock(&dev->lock);
+ if (dev->disconnected)
+ return -ENODEV;
+
+ if (substream)
+ nonblock = !!(substream->f_flags & O_NONBLOCK);
+ if (nonblock) {
+ if (!mutex_trylock(&dev->lock))
+ return -EAGAIN;
+ } else
+ mutex_lock(&dev->lock);
val = em28xx_read_ac97(dev, kcontrol->private_value);
mutex_unlock(&dev->lock);
if (val < 0)
@@ -634,25 +701,204 @@ static struct snd_pcm_ops snd_em28xx_pcm_capture = {
.page = snd_pcm_get_vmalloc_page,
};
+static void em28xx_audio_free_urb(struct em28xx *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->adev.num_urb; i++) {
+ struct urb *urb = dev->adev.urb[i];
+
+ if (!urb)
+ continue;
+
+ usb_free_coherent(dev->udev, urb->transfer_buffer_length,
+ dev->adev.transfer_buffer[i],
+ urb->transfer_dma);
+
+ usb_free_urb(urb);
+ }
+ kfree(dev->adev.urb);
+ kfree(dev->adev.transfer_buffer);
+ dev->adev.num_urb = 0;
+}
+
+/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
+static int em28xx_audio_ep_packet_size(struct usb_device *udev,
+ struct usb_endpoint_descriptor *e)
+{
+ int size = le16_to_cpu(e->wMaxPacketSize);
+
+ if (udev->speed == USB_SPEED_HIGH)
+ return (size & 0x7ff) * (1 + (((size) >> 11) & 0x03));
+
+ return size & 0x7ff;
+}
+
+static int em28xx_audio_urb_init(struct em28xx *dev)
+{
+ struct usb_interface *intf;
+ struct usb_endpoint_descriptor *e, *ep = NULL;
+ int i, ep_size, interval, num_urb, npackets;
+ int urb_size, bytes_per_transfer;
+ u8 alt;
+
+ if (dev->ifnum)
+ alt = 1;
+ else
+ alt = 7;
+
+ intf = usb_ifnum_to_if(dev->udev, dev->ifnum);
+
+ if (intf->num_altsetting <= alt) {
+ em28xx_errdev("alt %d doesn't exist on interface %d\n",
+ dev->ifnum, alt);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < intf->altsetting[alt].desc.bNumEndpoints; i++) {
+ e = &intf->altsetting[alt].endpoint[i].desc;
+ if (!usb_endpoint_dir_in(e))
+ continue;
+ if (e->bEndpointAddress == EM28XX_EP_AUDIO) {
+ ep = e;
+ break;
+ }
+ }
+
+ if (!ep) {
+ em28xx_errdev("Couldn't find an audio endpoint");
+ return -ENODEV;
+ }
+
+ ep_size = em28xx_audio_ep_packet_size(dev->udev, ep);
+ interval = 1 << (ep->bInterval - 1);
+
+ em28xx_info("Endpoint 0x%02x %s on intf %d alt %d interval = %d, size %d\n",
+ EM28XX_EP_AUDIO, usb_speed_string(dev->udev->speed),
+ dev->ifnum, alt,
+ interval,
+ ep_size);
+
+ /* Calculate the number and size of URBs to better fit the audio samples */
+
+ /*
+ * Estimate the number of bytes per DMA transfer.
+ *
+ * This is given by the bit rate (for now, only 48000 Hz) multiplied
+ * by 2 channels and 2 bytes/sample divided by the number of microframe
+ * intervals and by the microframe rate (125 us)
+ */
+ bytes_per_transfer = DIV_ROUND_UP(48000 * 2 * 2, 125 * interval);
+
+ /*
+ * Estimate the number of transfer URBs. Don't let it go past the
+ * maximum number of URBs that is known to be supported by the device.
+ */
+ num_urb = DIV_ROUND_UP(bytes_per_transfer, ep_size);
+ if (num_urb > EM28XX_MAX_AUDIO_BUFS)
+ num_urb = EM28XX_MAX_AUDIO_BUFS;
+
+ /*
+ * Now that we know the number of bytes per transfer and the number of
+ * URBs, estimate the typical size of an URB, in order to adjust the
+ * minimal number of packets.
+ */
+ urb_size = bytes_per_transfer / num_urb;
+
+ /*
+ * Now, calculate the amount of audio packets to be filled on each
+ * URB. In order to preserve the old behaviour, use a minimal
+ * threshold for this value.
+ */
+ npackets = EM28XX_MIN_AUDIO_PACKETS;
+ if (urb_size > ep_size * npackets)
+ npackets = DIV_ROUND_UP(urb_size, ep_size);
+
+ em28xx_info("Number of URBs: %d, with %d packets and %d size",
+ num_urb, npackets, urb_size);
+
+ /* Estimate the bytes per period */
+ dev->adev.period = urb_size * npackets;
+
+ /* Allocate space to store the number of URBs to be used */
+
+ dev->adev.transfer_buffer = kcalloc(num_urb,
+ sizeof(*dev->adev.transfer_buffer),
+ GFP_ATOMIC);
+ if (!dev->adev.transfer_buffer) {
+ return -ENOMEM;
+ }
+
+ dev->adev.urb = kcalloc(num_urb, sizeof(*dev->adev.urb), GFP_ATOMIC);
+ if (!dev->adev.urb) {
+ kfree(dev->adev.transfer_buffer);
+ return -ENOMEM;
+ }
+
+ /* Alloc memory for each URB and for each transfer buffer */
+ dev->adev.num_urb = num_urb;
+ for (i = 0; i < num_urb; i++) {
+ struct urb *urb;
+ int j, k;
+ void *buf;
+
+ urb = usb_alloc_urb(npackets, GFP_ATOMIC);
+ if (!urb) {
+ em28xx_errdev("usb_alloc_urb failed!\n");
+ em28xx_audio_free_urb(dev);
+ return -ENOMEM;
+ }
+ dev->adev.urb[i] = urb;
+
+ buf = usb_alloc_coherent(dev->udev, npackets * ep_size, GFP_ATOMIC,
+ &urb->transfer_dma);
+ if (!buf) {
+ em28xx_errdev("usb_alloc_coherent failed!\n");
+ em28xx_audio_free_urb(dev);
+ return -ENOMEM;
+ }
+ dev->adev.transfer_buffer[i] = buf;
+
+ urb->dev = dev->udev;
+ urb->context = dev;
+ urb->pipe = usb_rcvisocpipe(dev->udev, EM28XX_EP_AUDIO);
+ urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_buffer = buf;
+ urb->interval = interval;
+ urb->complete = em28xx_audio_isocirq;
+ urb->number_of_packets = npackets;
+ urb->transfer_buffer_length = ep_size * npackets;
+
+ for (j = k = 0; j < npackets; j++, k += ep_size) {
+ urb->iso_frame_desc[j].offset = k;
+ urb->iso_frame_desc[j].length = ep_size;
+ }
+ }
+
+ return 0;
+}
+
static int em28xx_audio_init(struct em28xx *dev)
{
struct em28xx_audio *adev = &dev->adev;
struct snd_pcm *pcm;
struct snd_card *card;
static int devnr;
- int err;
+ int err;
- if (!dev->has_alsa_audio || dev->audio_ifnum < 0) {
+ if (!dev->has_alsa_audio) {
/* This device does not support the extension (in this case
the device is expecting the snd-usb-audio module or
doesn't have analog audio support at all) */
return 0;
}
- printk(KERN_INFO "em28xx-audio.c: probing for em28xx Audio Vendor Class\n");
+ em28xx_info("Binding audio extension\n");
+
printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2006 Markus "
"Rechberger\n");
- printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2007-2011 Mauro Carvalho Chehab\n");
+ printk(KERN_INFO
+ "em28xx-audio.c: Copyright (C) 2007-2014 Mauro Carvalho Chehab\n");
err = snd_card_create(index[devnr], "Em28xx Audio", THIS_MODULE, 0,
&card);
@@ -660,11 +906,12 @@ static int em28xx_audio_init(struct em28xx *dev)
return err;
spin_lock_init(&adev->slock);
+ adev->sndcard = card;
+ adev->udev = dev->udev;
+
err = snd_pcm_new(card, "Em28xx Audio", 0, 0, 1, &pcm);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
+ if (err < 0)
+ goto card_free;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_em28xx_pcm_capture);
pcm->info_flags = 0;
@@ -694,15 +941,25 @@ static int em28xx_audio_init(struct em28xx *dev)
em28xx_cvol_new(card, dev, "Surround", AC97_SURROUND_MASTER);
}
+ err = em28xx_audio_urb_init(dev);
+ if (err)
+ goto card_free;
+
err = snd_card_register(card);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
- adev->sndcard = card;
- adev->udev = dev->udev;
+ if (err < 0)
+ goto urb_free;
+ em28xx_info("Audio extension successfully initialized\n");
return 0;
+
+urb_free:
+ em28xx_audio_free_urb(dev);
+
+card_free:
+ snd_card_free(card);
+ adev->sndcard = NULL;
+
+ return err;
}
static int em28xx_audio_fini(struct em28xx *dev)
@@ -717,7 +974,14 @@ static int em28xx_audio_fini(struct em28xx *dev)
return 0;
}
+ em28xx_info("Closing audio extension");
+
if (dev->adev.sndcard) {
+ snd_card_disconnect(dev->adev.sndcard);
+ flush_work(&dev->wq_trigger);
+
+ em28xx_audio_free_urb(dev);
+
snd_card_free(dev->adev.sndcard);
dev->adev.sndcard = NULL;
}
@@ -745,7 +1009,8 @@ static void __exit em28xx_alsa_unregister(void)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Markus Rechberger <mrechberger@gmail.com>");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
-MODULE_DESCRIPTION("Em28xx Audio driver");
+MODULE_DESCRIPTION(DRIVER_DESC " - audio interface");
+MODULE_VERSION(EM28XX_VERSION);
module_init(em28xx_alsa_register);
module_exit(em28xx_alsa_unregister);
diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c
index d666741797d4..c29f5c4e7b40 100644
--- a/drivers/media/usb/em28xx/em28xx-camera.c
+++ b/drivers/media/usb/em28xx/em28xx-camera.c
@@ -454,3 +454,4 @@ int em28xx_init_camera(struct em28xx *dev)
return ret;
}
+EXPORT_SYMBOL_GPL(em28xx_init_camera);
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index a5196697627f..4d97a76cc3b0 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -36,7 +36,6 @@
#include <media/tvaudio.h>
#include <media/i2c-addr.h>
#include <media/tveeprom.h>
-#include <media/v4l2-clk.h>
#include <media/v4l2-common.h>
#include "em28xx.h"
@@ -67,7 +66,7 @@ MODULE_PARM_DESC(usb_xfer_mode,
/* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS - 1 */
-static unsigned long em28xx_devused;
+DECLARE_BITMAP(em28xx_devused, EM28XX_MAXBOARDS);
struct em28xx_hash_table {
unsigned long hash;
@@ -356,6 +355,28 @@ static struct em28xx_reg_seq c3tech_digital_duo_digital[] = {
{ -1, -1, -1, -1},
};
+/*
+ * 2013:0258 PCTV DVB-S2 Stick (461e)
+ * GPIO 0 = POWER_ON
+ * GPIO 1 = BOOST
+ * GPIO 2 = VUV_LNB (red LED)
+ * GPIO 3 = #EXT_12V
+ * GPIO 4 = INT_DEM
+ * GPIO 5 = INT_LNB
+ * GPIO 6 = #RESET_DEM
+ * GPIO 7 = P07_LED (green LED)
+ */
+static struct em28xx_reg_seq pctv_461e[] = {
+ {EM2874_R80_GPIO_P0_CTRL, 0x7f, 0xff, 0},
+ {0x0d, 0xff, 0xff, 0},
+ {EM2874_R80_GPIO_P0_CTRL, 0x3f, 0xff, 100}, /* reset demod */
+ {EM2874_R80_GPIO_P0_CTRL, 0x7f, 0xff, 200}, /* reset demod */
+ {0x0d, 0x42, 0xff, 0},
+ {EM2874_R80_GPIO_P0_CTRL, 0xeb, 0xff, 0},
+ {EM2874_R5F_TS_ENABLE, 0x84, 0x84, 0}, /* parallel? | null discard */
+ { -1, -1, -1, -1},
+};
+
#if 0
static struct em28xx_reg_seq hauppauge_930c_gpio[] = {
{EM2874_R80_GPIO_P0_CTRL, 0x6f, 0xff, 10},
@@ -412,6 +433,70 @@ static struct em28xx_reg_seq pctv_520e[] = {
{ -1, -1, -1, -1},
};
+/* 1ae7:9003/9004 SpeedLink Vicious And Devine Laplace webcam
+ * reg 0x80/0x84:
+ * GPIO_0: capturing LED, 0=on, 1=off
+ * GPIO_2: AV mute button, 0=pressed, 1=unpressed
+ * GPIO 3: illumination button, 0=pressed, 1=unpressed
+ * GPIO_6: illumination/flash LED, 0=on, 1=off
+ * reg 0x81/0x85:
+ * GPIO_7: snapshot button, 0=pressed, 1=unpressed
+ */
+static struct em28xx_reg_seq speedlink_vad_laplace_reg_seq[] = {
+ {EM2820_R08_GPIO_CTRL, 0xf7, 0xff, 10},
+ {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xb2, 10},
+ { -1, -1, -1, -1},
+};
+
+/*
+ * Button definitions
+ */
+static struct em28xx_button std_snapshot_button[] = {
+ {
+ .role = EM28XX_BUTTON_SNAPSHOT,
+ .reg_r = EM28XX_R0C_USBSUSP,
+ .reg_clearing = EM28XX_R0C_USBSUSP,
+ .mask = EM28XX_R0C_USBSUSP_SNAPSHOT,
+ .inverted = 0,
+ },
+ {-1, 0, 0, 0, 0},
+};
+
+static struct em28xx_button speedlink_vad_laplace_buttons[] = {
+ {
+ .role = EM28XX_BUTTON_SNAPSHOT,
+ .reg_r = EM2874_R85_GPIO_P1_STATE,
+ .mask = 0x80,
+ .inverted = 1,
+ },
+ {
+ .role = EM28XX_BUTTON_ILLUMINATION,
+ .reg_r = EM2874_R84_GPIO_P0_STATE,
+ .mask = 0x08,
+ .inverted = 1,
+ },
+ {-1, 0, 0, 0, 0},
+};
+
+/*
+ * LED definitions
+ */
+static struct em28xx_led speedlink_vad_laplace_leds[] = {
+ {
+ .role = EM28XX_LED_ANALOG_CAPTURING,
+ .gpio_reg = EM2874_R80_GPIO_P0_CTRL,
+ .gpio_mask = 0x01,
+ .inverted = 1,
+ },
+ {
+ .role = EM28XX_LED_ILLUMINATION,
+ .gpio_reg = EM2874_R80_GPIO_P0_CTRL,
+ .gpio_mask = 0x40,
+ .inverted = 1,
+ },
+ {-1, 0, 0, 0},
+};
+
/*
* Board definitions
*/
@@ -1391,7 +1476,7 @@ struct em28xx_board em28xx_boards[] = {
},
[EM2820_BOARD_PROLINK_PLAYTV_USB2] = {
.name = "SIIG AVTuner-PVR / Pixelview Prolink PlayTV USB 2.0",
- .has_snapshot_button = 1,
+ .buttons = std_snapshot_button,
.tda9887_conf = TDA9887_PRESENT,
.tuner_type = TUNER_YMEC_TVF_5533MF,
.decoder = EM28XX_SAA711X,
@@ -1413,7 +1498,7 @@ struct em28xx_board em28xx_boards[] = {
},
[EM2860_BOARD_SAA711X_REFERENCE_DESIGN] = {
.name = "EM2860/SAA711X Reference Design",
- .has_snapshot_button = 1,
+ .buttons = std_snapshot_button,
.tuner_type = TUNER_ABSENT,
.decoder = EM28XX_SAA711X,
.input = { {
@@ -2020,7 +2105,7 @@ struct em28xx_board em28xx_boards[] = {
},
/* 1b80:e1cc Delock 61959
* Empia EM2874B + Micronas DRX 3913KA2 + NXP TDA18271HDC2
- * mostly the same as MaxMedia UB-425-TC but different remote */
+ * mostly the same as MaxMedia UB-425-TC but different remote */
[EM2874_BOARD_DELOCK_61959] = {
.name = "Delock 61959",
.tuner_type = TUNER_ABSENT,
@@ -2043,7 +2128,38 @@ struct em28xx_board em28xx_boards[] = {
.tuner_gpio = default_tuner_gpio,
.def_i2c_bus = 1,
},
+ /* 1ae7:9003/9004 SpeedLink Vicious And Devine Laplace webcam
+ * Empia EM2765 + OmniVision OV2640 */
+ [EM2765_BOARD_SPEEDLINK_VAD_LAPLACE] = {
+ .name = "SpeedLink Vicious And Devine Laplace webcam",
+ .xclk = EM28XX_XCLK_FREQUENCY_24MHZ,
+ .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE |
+ EM28XX_I2C_FREQ_100_KHZ,
+ .def_i2c_bus = 1,
+ .tuner_type = TUNER_ABSENT,
+ .is_webcam = 1,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .amux = EM28XX_AMUX_VIDEO,
+ .gpio = speedlink_vad_laplace_reg_seq,
+ } },
+ .buttons = speedlink_vad_laplace_buttons,
+ .leds = speedlink_vad_laplace_leds,
+ },
+ /* 2013:0258 PCTV DVB-S2 Stick (461e)
+ * Empia EM28178, Montage M88DS3103, Montage M88TS2022, Allegro A8293 */
+ [EM28178_BOARD_PCTV_461E] = {
+ .def_i2c_bus = 1,
+ .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ,
+ .name = "PCTV DVB-S2 Stick (461e)",
+ .tuner_type = TUNER_ABSENT,
+ .tuner_gpio = pctv_461e,
+ .has_dvb = 1,
+ .ir_codes = RC_MAP_PINNACLE_PCTV_HD,
+ },
};
+EXPORT_SYMBOL_GPL(em28xx_boards);
+
const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
/* table of devices that work with this driver */
@@ -2208,6 +2324,12 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2884_BOARD_PCTV_520E },
{ USB_DEVICE(0x1b80, 0xe1cc),
.driver_info = EM2874_BOARD_DELOCK_61959 },
+ { USB_DEVICE(0x1ae7, 0x9003),
+ .driver_info = EM2765_BOARD_SPEEDLINK_VAD_LAPLACE },
+ { USB_DEVICE(0x1ae7, 0x9004),
+ .driver_info = EM2765_BOARD_SPEEDLINK_VAD_LAPLACE },
+ { USB_DEVICE(0x2013, 0x0258),
+ .driver_info = EM28178_BOARD_PCTV_461E },
{ },
};
MODULE_DEVICE_TABLE(usb, em28xx_id_table);
@@ -2239,24 +2361,6 @@ static struct em28xx_hash_table em28xx_i2c_hash[] = {
};
/* NOTE: introduce a separate hash table for devices with 16 bit eeproms */
-/* I2C possible address to saa7115, tvp5150, msp3400, tvaudio */
-static unsigned short saa711x_addrs[] = {
- 0x4a >> 1, 0x48 >> 1, /* SAA7111, SAA7111A and SAA7113 */
- 0x42 >> 1, 0x40 >> 1, /* SAA7114, SAA7115 and SAA7118 */
- I2C_CLIENT_END };
-
-static unsigned short tvp5150_addrs[] = {
- 0xb8 >> 1,
- 0xba >> 1,
- I2C_CLIENT_END
-};
-
-static unsigned short msp3400_addrs[] = {
- 0x80 >> 1,
- 0x88 >> 1,
- I2C_CLIENT_END
-};
-
int em28xx_tuner_callback(void *ptr, int component, int command, int arg)
{
struct em28xx_i2c_bus *i2c_bus = ptr;
@@ -2408,113 +2512,6 @@ static void em28xx_pre_card_setup(struct em28xx *dev)
em28xx_set_mode(dev, EM28XX_SUSPEND);
}
-static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
-{
- memset(ctl, 0, sizeof(*ctl));
-
- ctl->fname = XC2028_DEFAULT_FIRMWARE;
- ctl->max_len = 64;
- ctl->mts = em28xx_boards[dev->model].mts_firmware;
-
- switch (dev->model) {
- case EM2880_BOARD_EMPIRE_DUAL_TV:
- case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
- case EM2882_BOARD_TERRATEC_HYBRID_XS:
- ctl->demod = XC3028_FE_ZARLINK456;
- break;
- case EM2880_BOARD_TERRATEC_HYBRID_XS:
- case EM2880_BOARD_TERRATEC_HYBRID_XS_FR:
- case EM2881_BOARD_PINNACLE_HYBRID_PRO:
- ctl->demod = XC3028_FE_ZARLINK456;
- break;
- case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2:
- case EM2882_BOARD_PINNACLE_HYBRID_PRO_330E:
- ctl->demod = XC3028_FE_DEFAULT;
- break;
- case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600:
- ctl->demod = XC3028_FE_DEFAULT;
- ctl->fname = XC3028L_DEFAULT_FIRMWARE;
- break;
- case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850:
- case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950:
- case EM2880_BOARD_PINNACLE_PCTV_HD_PRO:
- /* FIXME: Better to specify the needed IF */
- ctl->demod = XC3028_FE_DEFAULT;
- break;
- case EM2883_BOARD_KWORLD_HYBRID_330U:
- case EM2882_BOARD_DIKOM_DK300:
- case EM2882_BOARD_KWORLD_VS_DVBT:
- ctl->demod = XC3028_FE_CHINA;
- ctl->fname = XC2028_DEFAULT_FIRMWARE;
- break;
- case EM2882_BOARD_EVGA_INDTUBE:
- ctl->demod = XC3028_FE_CHINA;
- ctl->fname = XC3028L_DEFAULT_FIRMWARE;
- break;
- default:
- ctl->demod = XC3028_FE_OREN538;
- }
-}
-
-static void em28xx_tuner_setup(struct em28xx *dev)
-{
- struct tuner_setup tun_setup;
- struct v4l2_frequency f;
-
- if (dev->tuner_type == TUNER_ABSENT)
- return;
-
- memset(&tun_setup, 0, sizeof(tun_setup));
-
- tun_setup.mode_mask = T_ANALOG_TV | T_RADIO;
- tun_setup.tuner_callback = em28xx_tuner_callback;
-
- if (dev->board.radio.type) {
- tun_setup.type = dev->board.radio.type;
- tun_setup.addr = dev->board.radio_addr;
-
- v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr, &tun_setup);
- }
-
- if ((dev->tuner_type != TUNER_ABSENT) && (dev->tuner_type)) {
- tun_setup.type = dev->tuner_type;
- tun_setup.addr = dev->tuner_addr;
-
- v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr, &tun_setup);
- }
-
- if (dev->tda9887_conf) {
- struct v4l2_priv_tun_config tda9887_cfg;
-
- tda9887_cfg.tuner = TUNER_TDA9887;
- tda9887_cfg.priv = &dev->tda9887_conf;
-
- v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config, &tda9887_cfg);
- }
-
- if (dev->tuner_type == TUNER_XC2028) {
- struct v4l2_priv_tun_config xc2028_cfg;
- struct xc2028_ctrl ctl;
-
- memset(&xc2028_cfg, 0, sizeof(xc2028_cfg));
- memset(&ctl, 0, sizeof(ctl));
-
- em28xx_setup_xc3028(dev, &ctl);
-
- xc2028_cfg.tuner = TUNER_XC2028;
- xc2028_cfg.priv = &ctl;
-
- v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config, &xc2028_cfg);
- }
-
- /* configure tuner */
- f.tuner = 0;
- f.type = V4L2_TUNER_ANALOG_TV;
- f.frequency = 9076; /* just a magic number */
- dev->ctl_freq = f.frequency;
- v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
-}
-
static int em28xx_hint_board(struct em28xx *dev)
{
int i;
@@ -2768,57 +2765,56 @@ static void em28xx_card_setup(struct em28xx *dev)
/* Allow override tuner type by a module parameter */
if (tuner >= 0)
dev->tuner_type = tuner;
+}
- /* request some modules */
- if (dev->board.has_msp34xx)
- v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
- "msp3400", 0, msp3400_addrs);
-
- if (dev->board.decoder == EM28XX_SAA711X)
- v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
- "saa7115_auto", 0, saa711x_addrs);
-
- if (dev->board.decoder == EM28XX_TVP5150)
- v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
- "tvp5150", 0, tvp5150_addrs);
-
- if (dev->board.adecoder == EM28XX_TVAUDIO)
- v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
- "tvaudio", dev->board.tvaudio_addr, NULL);
-
- if (dev->board.tuner_type != TUNER_ABSENT) {
- int has_demod = (dev->tda9887_conf & TDA9887_PRESENT);
-
- if (dev->board.radio.type)
- v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
- "tuner", dev->board.radio_addr, NULL);
-
- if (has_demod)
- v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap[dev->def_i2c_bus], "tuner",
- 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
- if (dev->tuner_addr == 0) {
- enum v4l2_i2c_tuner_type type =
- has_demod ? ADDRS_TV_WITH_DEMOD : ADDRS_TV;
- struct v4l2_subdev *sd;
-
- sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap[dev->def_i2c_bus], "tuner",
- 0, v4l2_i2c_tuner_addrs(type));
-
- if (sd)
- dev->tuner_addr = v4l2_i2c_subdev_addr(sd);
- } else {
- v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
- "tuner", dev->tuner_addr, NULL);
- }
- }
+void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
+{
+ memset(ctl, 0, sizeof(*ctl));
- em28xx_tuner_setup(dev);
+ ctl->fname = XC2028_DEFAULT_FIRMWARE;
+ ctl->max_len = 64;
+ ctl->mts = em28xx_boards[dev->model].mts_firmware;
- em28xx_init_camera(dev);
+ switch (dev->model) {
+ case EM2880_BOARD_EMPIRE_DUAL_TV:
+ case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
+ case EM2882_BOARD_TERRATEC_HYBRID_XS:
+ ctl->demod = XC3028_FE_ZARLINK456;
+ break;
+ case EM2880_BOARD_TERRATEC_HYBRID_XS:
+ case EM2880_BOARD_TERRATEC_HYBRID_XS_FR:
+ case EM2881_BOARD_PINNACLE_HYBRID_PRO:
+ ctl->demod = XC3028_FE_ZARLINK456;
+ break;
+ case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2:
+ case EM2882_BOARD_PINNACLE_HYBRID_PRO_330E:
+ ctl->demod = XC3028_FE_DEFAULT;
+ break;
+ case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600:
+ ctl->demod = XC3028_FE_DEFAULT;
+ ctl->fname = XC3028L_DEFAULT_FIRMWARE;
+ break;
+ case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850:
+ case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950:
+ case EM2880_BOARD_PINNACLE_PCTV_HD_PRO:
+ /* FIXME: Better to specify the needed IF */
+ ctl->demod = XC3028_FE_DEFAULT;
+ break;
+ case EM2883_BOARD_KWORLD_HYBRID_330U:
+ case EM2882_BOARD_DIKOM_DK300:
+ case EM2882_BOARD_KWORLD_VS_DVBT:
+ ctl->demod = XC3028_FE_CHINA;
+ ctl->fname = XC2028_DEFAULT_FIRMWARE;
+ break;
+ case EM2882_BOARD_EVGA_INDTUBE:
+ ctl->demod = XC3028_FE_CHINA;
+ ctl->fname = XC3028L_DEFAULT_FIRMWARE;
+ break;
+ default:
+ ctl->demod = XC3028_FE_OREN538;
+ }
}
-
+EXPORT_SYMBOL_GPL(em28xx_setup_xc3028);
static void request_module_async(struct work_struct *work)
{
@@ -2831,17 +2827,30 @@ static void request_module_async(struct work_struct *work)
* can be initialised right now. Otherwise, the module init
* code will do it.
*/
+
+ /*
+ * Devicdes with an audio-only interface also have a V4L/DVB/RC
+ * interface. Don't register extensions twice on those devices.
+ */
+ if (dev->is_audio_only) {
+#if defined(CONFIG_MODULES) && defined(MODULE)
+ request_module("em28xx-alsa");
+#endif
+ return;
+ }
+
em28xx_init_extension(dev);
#if defined(CONFIG_MODULES) && defined(MODULE)
+ if (dev->has_video)
+ request_module("em28xx-v4l");
if (dev->has_audio_class)
request_module("snd-usb-audio");
else if (dev->has_alsa_audio)
request_module("em28xx-alsa");
-
if (dev->board.has_dvb)
request_module("em28xx-dvb");
- if (dev->board.has_snapshot_button ||
+ if (dev->board.buttons ||
((dev->board.ir_codes || dev->board.has_ir_i2c) && !disable_ir))
request_module("em28xx-rc");
#endif /* CONFIG_MODULES */
@@ -2867,23 +2876,20 @@ void em28xx_release_resources(struct em28xx *dev)
{
/*FIXME: I2C IR should be disconnected */
- em28xx_release_analog_resources(dev);
+ mutex_lock(&dev->lock);
if (dev->def_i2c_bus)
em28xx_i2c_unregister(dev, 1);
em28xx_i2c_unregister(dev, 0);
- if (dev->clk)
- v4l2_clk_unregister_fixed(dev->clk);
-
- v4l2_ctrl_handler_free(&dev->ctrl_handler);
-
- v4l2_device_unregister(&dev->v4l2_dev);
usb_put_dev(dev->udev);
/* Mark device as unused */
- clear_bit(dev->devno, &em28xx_devused);
+ clear_bit(dev->devno, em28xx_devused);
+
+ mutex_unlock(&dev->lock);
};
+EXPORT_SYMBOL_GPL(em28xx_release_resources);
/*
* em28xx_init_dev()
@@ -2893,7 +2899,6 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
struct usb_interface *interface,
int minor)
{
- struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler;
int retval;
static const char *default_chip_name = "em28xx";
const char *chip_name = default_chip_name;
@@ -2968,6 +2973,11 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
dev->wait_after_write = 0;
dev->eeprom_addrwidth_16bit = 1;
break;
+ case CHIP_ID_EM28178:
+ chip_name = "em28178";
+ dev->wait_after_write = 0;
+ dev->eeprom_addrwidth_16bit = 1;
+ break;
case CHIP_ID_EM2883:
chip_name = "em2882/3";
dev->wait_after_write = 0;
@@ -2983,6 +2993,16 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
}
}
+ if (dev->chip_id == CHIP_ID_EM2870 ||
+ dev->chip_id == CHIP_ID_EM2874 ||
+ dev->chip_id == CHIP_ID_EM28174 ||
+ dev->chip_id == CHIP_ID_EM28178) {
+ /* Digital only device - don't load any alsa module */
+ dev->audio_mode.has_audio = false;
+ dev->has_audio_class = false;
+ dev->has_alsa_audio = false;
+ }
+
if (chip_name != default_chip_name)
printk(KERN_INFO DRIVER_NAME
": chip ID is %s\n", chip_name);
@@ -3015,15 +3035,6 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
}
}
- retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev);
- if (retval < 0) {
- em28xx_errdev("Call to v4l2_device_register() failed!\n");
- return retval;
- }
-
- v4l2_ctrl_handler_init(hdl, 8);
- dev->v4l2_dev.ctrl_handler = hdl;
-
rt_mutex_init(&dev->i2c_bus_lock);
/* register i2c bus 0 */
@@ -3034,7 +3045,7 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
if (retval < 0) {
em28xx_errdev("%s: em28xx_i2c_register bus 0 - error [%d]!\n",
__func__, retval);
- goto unregister_dev;
+ return retval;
}
/* register i2c bus 1 */
@@ -3048,88 +3059,17 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
if (retval < 0) {
em28xx_errdev("%s: em28xx_i2c_register bus 1 - error [%d]!\n",
__func__, retval);
- goto unregister_dev;
- }
- }
-
- /*
- * Default format, used for tvp5150 or saa711x output formats
- */
- dev->vinmode = 0x10;
- dev->vinctl = EM28XX_VINCTRL_INTERLACED |
- EM28XX_VINCTRL_CCIR656_ENABLE;
- /* Do board specific init and eeprom reading */
- em28xx_card_setup(dev);
+ em28xx_i2c_unregister(dev, 0);
- /* Configure audio */
- retval = em28xx_audio_setup(dev);
- if (retval < 0) {
- em28xx_errdev("%s: Error while setting audio - error [%d]!\n",
- __func__, retval);
- goto fail;
- }
- if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
- v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
- V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
- v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
- V4L2_CID_AUDIO_VOLUME, 0, 0x1f, 1, 0x1f);
- } else {
- /* install the em28xx notify callback */
- v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_MUTE),
- em28xx_ctrl_notify, dev);
- v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_VOLUME),
- em28xx_ctrl_notify, dev);
- }
-
- /* wake i2c devices */
- em28xx_wake_i2c(dev);
-
- /* init video dma queues */
- INIT_LIST_HEAD(&dev->vidq.active);
- INIT_LIST_HEAD(&dev->vbiq.active);
-
- if (dev->board.has_msp34xx) {
- /* Send a reset to other chips via gpio */
- retval = em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xf7);
- if (retval < 0) {
- em28xx_errdev("%s: em28xx_write_reg - "
- "msp34xx(1) failed! error [%d]\n",
- __func__, retval);
- goto fail;
- }
- msleep(3);
-
- retval = em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xff);
- if (retval < 0) {
- em28xx_errdev("%s: em28xx_write_reg - "
- "msp34xx(2) failed! error [%d]\n",
- __func__, retval);
- goto fail;
+ return retval;
}
- msleep(3);
- }
-
- retval = em28xx_register_analog_devices(dev);
- if (retval < 0) {
- goto fail;
}
- /* Save some power by putting tuner to sleep */
- v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_power, 0);
+ /* Do board specific init and eeprom reading */
+ em28xx_card_setup(dev);
return 0;
-
-fail:
- if (dev->def_i2c_bus)
- em28xx_i2c_unregister(dev, 1);
- em28xx_i2c_unregister(dev, 0);
- v4l2_ctrl_handler_free(&dev->ctrl_handler);
-
-unregister_dev:
- v4l2_device_unregister(&dev->v4l2_dev);
-
- return retval;
}
/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
@@ -3154,7 +3094,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
/* Check to see next free device and mark as used */
do {
- nr = find_first_zero_bit(&em28xx_devused, EM28XX_MAXBOARDS);
+ nr = find_first_zero_bit(em28xx_devused, EM28XX_MAXBOARDS);
if (nr >= EM28XX_MAXBOARDS) {
/* No free device slots */
printk(DRIVER_NAME ": Supports only %i em28xx boards.\n",
@@ -3162,7 +3102,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
retval = -ENOMEM;
goto err_no_slot;
}
- } while (test_and_set_bit(nr, &em28xx_devused));
+ } while (test_and_set_bit(nr, em28xx_devused));
/* Don't register audio interfaces */
if (interface->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
@@ -3332,7 +3272,9 @@ static int em28xx_usb_probe(struct usb_interface *interface,
dev->alt = -1;
dev->is_audio_only = has_audio && !(has_video || has_dvb);
dev->has_alsa_audio = has_audio;
- dev->audio_ifnum = ifnum;
+ dev->audio_mode.has_audio = has_audio;
+ dev->has_video = has_video;
+ dev->ifnum = ifnum;
/* Checks if audio is provided by some interface */
for (i = 0; i < udev->config->desc.bNumInterfaces; i++) {
@@ -3369,15 +3311,11 @@ static int em28xx_usb_probe(struct usb_interface *interface,
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
- /* initialize videobuf2 stuff */
- em28xx_vb2_setup(dev);
-
/* allocate device struct */
mutex_init(&dev->lock);
- mutex_lock(&dev->lock);
retval = em28xx_init_dev(dev, udev, interface, nr);
if (retval) {
- goto unlock_and_free;
+ goto err_free;
}
if (usb_xfer_mode < 0) {
@@ -3402,26 +3340,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
em28xx_info("dvb set to %s mode.\n",
dev->dvb_xfer_bulk ? "bulk" : "isoc");
-
- /* pre-allocate DVB usb transfer buffers */
- if (dev->dvb_xfer_bulk) {
- retval = em28xx_alloc_urbs(dev, EM28XX_DIGITAL_MODE,
- dev->dvb_xfer_bulk,
- EM28XX_DVB_NUM_BUFS,
- 512,
- EM28XX_DVB_BULK_PACKET_MULTIPLIER);
- } else {
- retval = em28xx_alloc_urbs(dev, EM28XX_DIGITAL_MODE,
- dev->dvb_xfer_bulk,
- EM28XX_DVB_NUM_BUFS,
- dev->dvb_max_pkt_size_isoc,
- EM28XX_DVB_NUM_ISOC_PACKETS);
- }
- if (retval) {
- printk(DRIVER_NAME
- ": Failed to pre-allocate USB transfer buffers for DVB.\n");
- goto unlock_and_free;
- }
}
request_modules(dev);
@@ -3429,19 +3347,15 @@ static int em28xx_usb_probe(struct usb_interface *interface,
/* Should be the last thing to do, to avoid newer udev's to
open the device before fully initializing it
*/
- mutex_unlock(&dev->lock);
return 0;
-unlock_and_free:
- mutex_unlock(&dev->lock);
-
err_free:
kfree(dev->alt_max_pkt_size_isoc);
kfree(dev);
err:
- clear_bit(nr, &em28xx_devused);
+ clear_bit(nr, em28xx_devused);
err_no_slot:
usb_put_dev(udev);
@@ -3465,36 +3379,13 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
dev->disconnected = 1;
- if (dev->is_audio_only) {
- mutex_lock(&dev->lock);
- em28xx_close_extension(dev);
- mutex_unlock(&dev->lock);
- return;
- }
-
- em28xx_info("disconnecting %s\n", dev->vdev->name);
+ em28xx_info("Disconnecting %s\n", dev->name);
flush_request_modules(dev);
- mutex_lock(&dev->lock);
-
- v4l2_device_disconnect(&dev->v4l2_dev);
-
- if (dev->users) {
- em28xx_warn("device %s is open! Deregistration and memory deallocation are deferred on close.\n",
- video_device_node_name(dev->vdev));
-
- em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
- em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
- }
-
em28xx_close_extension(dev);
- /* NOTE: must be called BEFORE the resources are released */
-
- if (!dev->users)
- em28xx_release_resources(dev);
- mutex_unlock(&dev->lock);
+ em28xx_release_resources(dev);
if (!dev->users) {
kfree(dev->alt_max_pkt_size_isoc);
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index fc157af5234a..898fb9bd88a2 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -23,6 +23,7 @@
*/
#include <linux/init.h>
+#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -33,6 +34,16 @@
#include "em28xx.h"
+#define DRIVER_AUTHOR "Ludovico Cavedon <cavedon@sssup.it>, " \
+ "Markus Rechberger <mrechberger@gmail.com>, " \
+ "Mauro Carvalho Chehab <mchehab@infradead.org>, " \
+ "Sascha Sommer <saschasommer@freenet.de>"
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(EM28XX_VERSION);
+
/* #define ENABLE_DEBUG_ISOC_FRAMES */
static unsigned int core_debug;
@@ -53,14 +64,6 @@ MODULE_PARM_DESC(reg_debug, "enable debug messages [URB reg]");
printk(KERN_INFO "%s %s :"fmt, \
dev->name, __func__ , ##arg); } while (0)
-static int alt;
-module_param(alt, int, 0644);
-MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint");
-
-static unsigned int disable_vbi;
-module_param(disable_vbi, int, 0644);
-MODULE_PARM_DESC(disable_vbi, "disable vbi support");
-
/* FIXME */
#define em28xx_isocdbg(fmt, arg...) do {\
if (core_debug) \
@@ -226,21 +229,42 @@ int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
EXPORT_SYMBOL_GPL(em28xx_write_reg_bits);
/*
+ * em28xx_toggle_reg_bits()
+ * toggles/inverts the bits (specified by bitmask) of a register
+ */
+int em28xx_toggle_reg_bits(struct em28xx *dev, u16 reg, u8 bitmask)
+{
+ int oldval;
+ u8 newval;
+
+ oldval = em28xx_read_reg(dev, reg);
+ if (oldval < 0)
+ return oldval;
+
+ newval = (~oldval & bitmask) | (oldval & ~bitmask);
+
+ return em28xx_write_reg(dev, reg, newval);
+}
+EXPORT_SYMBOL_GPL(em28xx_toggle_reg_bits);
+
+/*
* em28xx_is_ac97_ready()
* Checks if ac97 is ready
*/
static int em28xx_is_ac97_ready(struct em28xx *dev)
{
- int ret, i;
+ unsigned long timeout = jiffies + msecs_to_jiffies(EM28XX_AC97_XFER_TIMEOUT);
+ int ret;
/* Wait up to 50 ms for AC97 command to complete */
- for (i = 0; i < 10; i++, msleep(5)) {
+ while (time_is_after_jiffies(timeout)) {
ret = em28xx_read_reg(dev, EM28XX_R43_AC97BUSY);
if (ret < 0)
return ret;
if (!(ret & 0x01))
return 0;
+ msleep(5);
}
em28xx_warn("AC97 command still being executed: not handled properly!\n");
@@ -482,16 +506,8 @@ int em28xx_audio_setup(struct em28xx *dev)
int vid1, vid2, feat, cfg;
u32 vid;
- if (dev->chip_id == CHIP_ID_EM2870 || dev->chip_id == CHIP_ID_EM2874
- || dev->chip_id == CHIP_ID_EM28174) {
- /* Digital only device - don't load any alsa module */
- dev->audio_mode.has_audio = false;
- dev->has_audio_class = false;
- dev->has_alsa_audio = false;
+ if (!dev->audio_mode.has_audio)
return 0;
- }
-
- dev->audio_mode.has_audio = true;
/* See how this device is configured */
cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG);
@@ -504,17 +520,19 @@ int em28xx_audio_setup(struct em28xx *dev)
dev->has_alsa_audio = false;
dev->audio_mode.has_audio = false;
return 0;
- } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
- EM28XX_CHIPCFG_I2S_3_SAMPRATES) {
- em28xx_info("I2S Audio (3 sample rates)\n");
- dev->audio_mode.i2s_3rates = 1;
- } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
- EM28XX_CHIPCFG_I2S_5_SAMPRATES) {
- em28xx_info("I2S Audio (5 sample rates)\n");
- dev->audio_mode.i2s_5rates = 1;
- }
-
- if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) != EM28XX_CHIPCFG_AC97) {
+ } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) != EM28XX_CHIPCFG_AC97) {
+ if (dev->chip_id < CHIP_ID_EM2860 &&
+ (cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
+ EM2820_CHIPCFG_I2S_1_SAMPRATE)
+ dev->audio_mode.i2s_samplerates = 1;
+ else if (dev->chip_id >= CHIP_ID_EM2860 &&
+ (cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
+ EM2860_CHIPCFG_I2S_5_SAMPRATES)
+ dev->audio_mode.i2s_samplerates = 5;
+ else
+ dev->audio_mode.i2s_samplerates = 3;
+ em28xx_info("I2S Audio (%d sample rate(s))\n",
+ dev->audio_mode.i2s_samplerates);
/* Skip the code that does AC97 vendor detection */
dev->audio_mode.ac97 = EM28XX_NO_AC97;
goto init_audio;
@@ -582,23 +600,21 @@ init_audio:
}
EXPORT_SYMBOL_GPL(em28xx_audio_setup);
-int em28xx_colorlevels_set_default(struct em28xx *dev)
+const struct em28xx_led *em28xx_find_led(struct em28xx *dev,
+ enum em28xx_led_role role)
{
- em28xx_write_reg(dev, EM28XX_R20_YGAIN, CONTRAST_DEFAULT);
- em28xx_write_reg(dev, EM28XX_R21_YOFFSET, BRIGHTNESS_DEFAULT);
- em28xx_write_reg(dev, EM28XX_R22_UVGAIN, SATURATION_DEFAULT);
- em28xx_write_reg(dev, EM28XX_R23_UOFFSET, BLUE_BALANCE_DEFAULT);
- em28xx_write_reg(dev, EM28XX_R24_VOFFSET, RED_BALANCE_DEFAULT);
- em28xx_write_reg(dev, EM28XX_R25_SHARPNESS, SHARPNESS_DEFAULT);
-
- em28xx_write_reg(dev, EM28XX_R14_GAMMA, 0x20);
- em28xx_write_reg(dev, EM28XX_R15_RGAIN, 0x20);
- em28xx_write_reg(dev, EM28XX_R16_GGAIN, 0x20);
- em28xx_write_reg(dev, EM28XX_R17_BGAIN, 0x20);
- em28xx_write_reg(dev, EM28XX_R18_ROFFSET, 0x00);
- em28xx_write_reg(dev, EM28XX_R19_GOFFSET, 0x00);
- return em28xx_write_reg(dev, EM28XX_R1A_BOFFSET, 0x00);
+ if (dev->board.leds) {
+ u8 k = 0;
+ while (dev->board.leds[k].role >= 0 &&
+ dev->board.leds[k].role < EM28XX_NUM_LED_ROLES) {
+ if (dev->board.leds[k].role == role)
+ return &dev->board.leds[k];
+ k++;
+ }
+ }
+ return NULL;
}
+EXPORT_SYMBOL_GPL(em28xx_find_led);
int em28xx_capture_start(struct em28xx *dev, int start)
{
@@ -606,271 +622,57 @@ int em28xx_capture_start(struct em28xx *dev, int start)
if (dev->chip_id == CHIP_ID_EM2874 ||
dev->chip_id == CHIP_ID_EM2884 ||
- dev->chip_id == CHIP_ID_EM28174) {
+ dev->chip_id == CHIP_ID_EM28174 ||
+ dev->chip_id == CHIP_ID_EM28178) {
/* The Transport Stream Enable Register moved in em2874 */
- if (!start) {
- rc = em28xx_write_reg_bits(dev, EM2874_R5F_TS_ENABLE,
- 0x00,
- EM2874_TS1_CAPTURE_ENABLE);
- return rc;
- }
-
- /* Enable Transport Stream */
rc = em28xx_write_reg_bits(dev, EM2874_R5F_TS_ENABLE,
- EM2874_TS1_CAPTURE_ENABLE,
+ start ?
+ EM2874_TS1_CAPTURE_ENABLE : 0x00,
EM2874_TS1_CAPTURE_ENABLE);
- return rc;
- }
-
-
- /* FIXME: which is the best order? */
- /* video registers are sampled by VREF */
- rc = em28xx_write_reg_bits(dev, EM28XX_R0C_USBSUSP,
- start ? 0x10 : 0x00, 0x10);
- if (rc < 0)
- return rc;
-
- if (!start) {
- /* disable video capture */
- rc = em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x27);
- return rc;
- }
-
- if (dev->board.is_webcam)
- rc = em28xx_write_reg(dev, 0x13, 0x0c);
-
- /* enable video capture */
- rc = em28xx_write_reg(dev, 0x48, 0x00);
-
- if (dev->mode == EM28XX_ANALOG_MODE)
- rc = em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x67);
- else
- rc = em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x37);
-
- msleep(6);
-
- return rc;
-}
-
-int em28xx_vbi_supported(struct em28xx *dev)
-{
- /* Modprobe option to manually disable */
- if (disable_vbi == 1)
- return 0;
-
- if (dev->board.is_webcam)
- return 0;
-
- /* FIXME: check subdevices for VBI support */
-
- if (dev->chip_id == CHIP_ID_EM2860 ||
- dev->chip_id == CHIP_ID_EM2883)
- return 1;
-
- /* Version of em28xx that does not support VBI */
- return 0;
-}
-
-int em28xx_set_outfmt(struct em28xx *dev)
-{
- int ret;
- u8 fmt, vinctrl;
-
- fmt = dev->format->reg;
- if (!dev->is_em25xx)
- fmt |= 0x20;
- /*
- * NOTE: it's not clear if this is really needed !
- * The datasheets say bit 5 is a reserved bit and devices seem to work
- * fine without it. But the Windows driver sets it for em2710/50+em28xx
- * devices and we've always been setting it, too.
- *
- * em2765 (em25xx, em276x/7x/8x) devices do NOT work with this bit set,
- * it's likely used for an additional (compressed ?) format there.
- */
- ret = em28xx_write_reg(dev, EM28XX_R27_OUTFMT, fmt);
- if (ret < 0)
- return ret;
-
- ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, dev->vinmode);
- if (ret < 0)
- return ret;
-
- vinctrl = dev->vinctl;
- if (em28xx_vbi_supported(dev) == 1) {
- vinctrl |= EM28XX_VINCTRL_VBI_RAW;
- em28xx_write_reg(dev, EM28XX_R34_VBI_START_H, 0x00);
- em28xx_write_reg(dev, EM28XX_R36_VBI_WIDTH, dev->vbi_width/4);
- em28xx_write_reg(dev, EM28XX_R37_VBI_HEIGHT, dev->vbi_height);
- if (dev->norm & V4L2_STD_525_60) {
- /* NTSC */
- em28xx_write_reg(dev, EM28XX_R35_VBI_START_V, 0x09);
- } else if (dev->norm & V4L2_STD_625_50) {
- /* PAL */
- em28xx_write_reg(dev, EM28XX_R35_VBI_START_V, 0x07);
- }
- }
-
- return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, vinctrl);
-}
-
-static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax,
- u8 ymin, u8 ymax)
-{
- em28xx_coredbg("em28xx Scale: (%d,%d)-(%d,%d)\n",
- xmin, ymin, xmax, ymax);
-
- em28xx_write_regs(dev, EM28XX_R28_XMIN, &xmin, 1);
- em28xx_write_regs(dev, EM28XX_R29_XMAX, &xmax, 1);
- em28xx_write_regs(dev, EM28XX_R2A_YMIN, &ymin, 1);
- return em28xx_write_regs(dev, EM28XX_R2B_YMAX, &ymax, 1);
-}
-
-static void em28xx_capture_area_set(struct em28xx *dev, u8 hstart, u8 vstart,
- u16 width, u16 height)
-{
- u8 cwidth = width >> 2;
- u8 cheight = height >> 2;
- u8 overflow = (height >> 9 & 0x02) | (width >> 10 & 0x01);
- /* NOTE: size limit: 2047x1023 = 2MPix */
-
- em28xx_coredbg("capture area set to (%d,%d): %dx%d\n",
- hstart, vstart,
- ((overflow & 2) << 9 | cwidth << 2),
- ((overflow & 1) << 10 | cheight << 2));
-
- em28xx_write_regs(dev, EM28XX_R1C_HSTART, &hstart, 1);
- em28xx_write_regs(dev, EM28XX_R1D_VSTART, &vstart, 1);
- em28xx_write_regs(dev, EM28XX_R1E_CWIDTH, &cwidth, 1);
- em28xx_write_regs(dev, EM28XX_R1F_CHEIGHT, &cheight, 1);
- em28xx_write_regs(dev, EM28XX_R1B_OFLOW, &overflow, 1);
-
- /* FIXME: function/meaning of these registers ? */
- /* FIXME: align width+height to multiples of 4 ?! */
- if (dev->is_em25xx) {
- em28xx_write_reg(dev, 0x34, width >> 4);
- em28xx_write_reg(dev, 0x35, height >> 4);
- }
-}
-
-static int em28xx_scaler_set(struct em28xx *dev, u16 h, u16 v)
-{
- u8 mode;
- /* the em2800 scaler only supports scaling down to 50% */
-
- if (dev->board.is_em2800) {
- mode = (v ? 0x20 : 0x00) | (h ? 0x10 : 0x00);
} else {
- u8 buf[2];
-
- buf[0] = h;
- buf[1] = h >> 8;
- em28xx_write_regs(dev, EM28XX_R30_HSCALELOW, (char *)buf, 2);
-
- buf[0] = v;
- buf[1] = v >> 8;
- em28xx_write_regs(dev, EM28XX_R32_VSCALELOW, (char *)buf, 2);
- /* it seems that both H and V scalers must be active
- to work correctly */
- mode = (h || v) ? 0x30 : 0x00;
- }
- return em28xx_write_reg_bits(dev, EM28XX_R26_COMPR, mode, 0x30);
-}
-
-/* FIXME: this only function read values from dev */
-int em28xx_resolution_set(struct em28xx *dev)
-{
- int width, height;
- width = norm_maxw(dev);
- height = norm_maxh(dev);
-
- /* Properly setup VBI */
- dev->vbi_width = 720;
- if (dev->norm & V4L2_STD_525_60)
- dev->vbi_height = 12;
- else
- dev->vbi_height = 18;
-
- em28xx_set_outfmt(dev);
+ /* FIXME: which is the best order? */
+ /* video registers are sampled by VREF */
+ rc = em28xx_write_reg_bits(dev, EM28XX_R0C_USBSUSP,
+ start ? 0x10 : 0x00, 0x10);
+ if (rc < 0)
+ return rc;
- em28xx_accumulator_set(dev, 1, (width - 4) >> 2, 1, (height - 4) >> 2);
+ if (start) {
+ if (dev->board.is_webcam)
+ rc = em28xx_write_reg(dev, 0x13, 0x0c);
- /* If we don't set the start position to 2 in VBI mode, we end up
- with line 20/21 being YUYV encoded instead of being in 8-bit
- greyscale. The core of the issue is that line 21 (and line 23 for
- PAL WSS) are inside of active video region, and as a result they
- get the pixelformatting associated with that area. So by cropping
- it out, we end up with the same format as the rest of the VBI
- region */
- if (em28xx_vbi_supported(dev) == 1)
- em28xx_capture_area_set(dev, 0, 2, width, height);
- else
- em28xx_capture_area_set(dev, 0, 0, width, height);
+ /* Enable video capture */
+ rc = em28xx_write_reg(dev, 0x48, 0x00);
- return em28xx_scaler_set(dev, dev->hscale, dev->vscale);
-}
+ if (dev->mode == EM28XX_ANALOG_MODE)
+ rc = em28xx_write_reg(dev,
+ EM28XX_R12_VINENABLE, 0x67);
+ else
+ rc = em28xx_write_reg(dev,
+ EM28XX_R12_VINENABLE, 0x37);
-/* Set USB alternate setting for analog video */
-int em28xx_set_alternate(struct em28xx *dev)
-{
- int errCode;
- int i;
- unsigned int min_pkt_size = dev->width * 2 + 4;
-
- /* NOTE: for isoc transfers, only alt settings > 0 are allowed
- bulk transfers seem to work only with alt=0 ! */
- dev->alt = 0;
- if ((alt > 0) && (alt < dev->num_alt)) {
- em28xx_coredbg("alternate forced to %d\n", dev->alt);
- dev->alt = alt;
- goto set_alt;
+ msleep(6);
+ } else {
+ /* disable video capture */
+ rc = em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x27);
+ }
}
- if (dev->analog_xfer_bulk)
- goto set_alt;
- /* When image size is bigger than a certain value,
- the frame size should be increased, otherwise, only
- green screen will be received.
- */
- if (dev->width * 2 * dev->height > 720 * 240 * 2)
- min_pkt_size *= 2;
+ if (rc < 0)
+ return rc;
- for (i = 0; i < dev->num_alt; i++) {
- /* stop when the selected alt setting offers enough bandwidth */
- if (dev->alt_max_pkt_size_isoc[i] >= min_pkt_size) {
- dev->alt = i;
- break;
- /* otherwise make sure that we end up with the maximum bandwidth
- because the min_pkt_size equation might be wrong...
- */
- } else if (dev->alt_max_pkt_size_isoc[i] >
- dev->alt_max_pkt_size_isoc[dev->alt])
- dev->alt = i;
+ /* Switch (explicitly controlled) analog capturing LED on/off */
+ if (dev->mode == EM28XX_ANALOG_MODE) {
+ const struct em28xx_led *led;
+ led = em28xx_find_led(dev, EM28XX_LED_ANALOG_CAPTURING);
+ if (led)
+ em28xx_write_reg_bits(dev, led->gpio_reg,
+ (!start ^ led->inverted) ?
+ ~led->gpio_mask : led->gpio_mask,
+ led->gpio_mask);
}
-set_alt:
- /* NOTE: for bulk transfers, we need to call usb_set_interface()
- * even if the previous settings were the same. Otherwise streaming
- * fails with all urbs having status = -EOVERFLOW ! */
- if (dev->analog_xfer_bulk) {
- dev->max_pkt_size = 512; /* USB 2.0 spec */
- dev->packet_multiplier = EM28XX_BULK_PACKET_MULTIPLIER;
- } else { /* isoc */
- em28xx_coredbg("minimum isoc packet size: %u (alt=%d)\n",
- min_pkt_size, dev->alt);
- dev->max_pkt_size =
- dev->alt_max_pkt_size_isoc[dev->alt];
- dev->packet_multiplier = EM28XX_NUM_ISOC_PACKETS;
- }
- em28xx_coredbg("setting alternate %d with wMaxPacketSize=%u\n",
- dev->alt, dev->max_pkt_size);
- errCode = usb_set_interface(dev->udev, 0, dev->alt);
- if (errCode < 0) {
- em28xx_errdev("cannot change alternate number to %d (error=%i)\n",
- dev->alt, errCode);
- return errCode;
- }
- return 0;
+ return rc;
}
int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio)
@@ -1238,18 +1040,6 @@ int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
EXPORT_SYMBOL_GPL(em28xx_init_usb_xfer);
/*
- * em28xx_wake_i2c()
- * configure i2c attached devices
- */
-void em28xx_wake_i2c(struct em28xx *dev)
-{
- v4l2_device_call_all(&dev->v4l2_dev, 0, core, reset, 0);
- v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_routing,
- INPUT(dev->ctl_input)->vmux, 0, 0);
- v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
-}
-
-/*
* Device control list
*/
@@ -1272,7 +1062,7 @@ int em28xx_register_extension(struct em28xx_ops *ops)
ops->init(dev);
}
mutex_unlock(&em28xx_devlist_mutex);
- printk(KERN_INFO "Em28xx: Initialized (%s) extension\n", ops->name);
+ printk(KERN_INFO "em28xx: Registered (%s) extension\n", ops->name);
return 0;
}
EXPORT_SYMBOL(em28xx_register_extension);
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 344042bb845c..a0a669e81362 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -51,10 +51,14 @@
#include "a8293.h"
#include "qt1010.h"
#include "mb86a20s.h"
+#include "m88ds3103.h"
+#include "m88ts2022.h"
-MODULE_DESCRIPTION("driver for em28xx based DVB cards");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC " - digital TV interface");
+MODULE_VERSION(EM28XX_VERSION);
+
static unsigned int debug;
module_param(debug, int, 0644);
@@ -87,6 +91,7 @@ struct em28xx_dvb {
struct semaphore pll_mutex;
bool dont_attach_fe1;
int lna_gpio;
+ struct i2c_client *i2c_client_tuner;
};
@@ -198,7 +203,7 @@ static int em28xx_start_streaming(struct em28xx_dvb *dvb)
dvb_alt = dev->dvb_alt_isoc;
}
- usb_set_interface(dev->udev, 0, dvb_alt);
+ usb_set_interface(dev->udev, dev->ifnum, dvb_alt);
rc = em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
if (rc < 0)
return rc;
@@ -271,7 +276,7 @@ static int em28xx_stop_feed(struct dvb_demux_feed *feed)
static int em28xx_dvb_bus_ctrl(struct dvb_frontend *fe, int acquire)
{
struct em28xx_i2c_bus *i2c_bus = fe->dvb->priv;
- struct em28xx *dev = i2c_bus->dev;
+ struct em28xx *dev = i2c_bus->dev;
if (acquire)
return em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
@@ -370,7 +375,6 @@ static struct drxk_config terratec_h5_drxk = {
.no_i2c_bridge = 1,
.microcode_name = "dvb-usb-terratec-h5-drxk.fw",
.qam_demod_parameter_count = 2,
- .load_firmware_sync = true,
};
static struct drxk_config hauppauge_930c_drxk = {
@@ -380,7 +384,6 @@ static struct drxk_config hauppauge_930c_drxk = {
.microcode_name = "dvb-usb-hauppauge-hvr930c-drxk.fw",
.chunk_size = 56,
.qam_demod_parameter_count = 2,
- .load_firmware_sync = true,
};
static struct drxk_config terratec_htc_stick_drxk = {
@@ -394,7 +397,6 @@ static struct drxk_config terratec_htc_stick_drxk = {
.antenna_dvbt = true,
/* The windows driver uses the same. This will disable LNA. */
.antenna_gpio = 0x6,
- .load_firmware_sync = true,
};
static struct drxk_config maxmedia_ub425_tc_drxk = {
@@ -403,7 +405,6 @@ static struct drxk_config maxmedia_ub425_tc_drxk = {
.no_i2c_bridge = 1,
.microcode_name = "dvb-demod-drxk-01.fw",
.chunk_size = 62,
- .load_firmware_sync = true,
.qam_demod_parameter_count = 2,
};
@@ -415,7 +416,6 @@ static struct drxk_config pctv_520e_drxk = {
.chunk_size = 58,
.antenna_dvbt = true, /* disable LNA */
.antenna_gpio = (1 << 2), /* disable LNA */
- .load_firmware_sync = true,
};
static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
@@ -808,6 +808,14 @@ static struct tda18271_config c3tech_duo_tda18271_config = {
.small_i2c = TDA18271_03_BYTE_CHUNK_INIT,
};
+static const struct m88ds3103_config pctv_461e_m88ds3103_config = {
+ .i2c_addr = 0x68,
+ .clock = 27000000,
+ .i2c_wr_max = 33,
+ .clock_out = 0,
+ .ts_mode = M88DS3103_TS_PARALLEL_16,
+ .agc = 0x99,
+};
/* ------------------------------------------------------------------ */
@@ -815,11 +823,16 @@ static int em28xx_attach_xc3028(u8 addr, struct em28xx *dev)
{
struct dvb_frontend *fe;
struct xc2028_config cfg;
+ struct xc2028_ctrl ctl;
memset(&cfg, 0, sizeof(cfg));
cfg.i2c_adap = &dev->i2c_adap[dev->def_i2c_bus];
cfg.i2c_addr = addr;
+ memset(&ctl, 0, sizeof(ctl));
+ em28xx_setup_xc3028(dev, &ctl);
+ cfg.ctrl = &ctl;
+
if (!dev->dvb->fe[0]) {
em28xx_errdev("/2: dvb frontend not attached. "
"Can't attach xc3028\n");
@@ -979,12 +992,18 @@ static int em28xx_dvb_init(struct em28xx *dev)
int result = 0, mfe_shared = 0;
struct em28xx_dvb *dvb;
+ if (dev->is_audio_only) {
+ /* Shouldn't initialize IR for this interface */
+ return 0;
+ }
+
if (!dev->board.has_dvb) {
/* This device does not support the extension */
- printk(KERN_INFO "em28xx_dvb: This device does not support the extension\n");
return 0;
}
+ em28xx_info("Binding DVB extension\n");
+
dvb = kzalloc(sizeof(struct em28xx_dvb), GFP_KERNEL);
if (dvb == NULL) {
@@ -994,6 +1013,27 @@ static int em28xx_dvb_init(struct em28xx *dev)
dev->dvb = dvb;
dvb->fe[0] = dvb->fe[1] = NULL;
+ /* pre-allocate DVB usb transfer buffers */
+ if (dev->dvb_xfer_bulk) {
+ result = em28xx_alloc_urbs(dev, EM28XX_DIGITAL_MODE,
+ dev->dvb_xfer_bulk,
+ EM28XX_DVB_NUM_BUFS,
+ 512,
+ EM28XX_DVB_BULK_PACKET_MULTIPLIER);
+ } else {
+ result = em28xx_alloc_urbs(dev, EM28XX_DIGITAL_MODE,
+ dev->dvb_xfer_bulk,
+ EM28XX_DVB_NUM_BUFS,
+ dev->dvb_max_pkt_size_isoc,
+ EM28XX_DVB_NUM_ISOC_PACKETS);
+ }
+ if (result) {
+ em28xx_errdev("em28xx_dvb: failed to pre-allocate USB transfer buffers for DVB.\n");
+ kfree(dvb);
+ dev->dvb = NULL;
+ return result;
+ }
+
mutex_lock(&dev->lock);
em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
/* init frontend */
@@ -1330,6 +1370,48 @@ static int em28xx_dvb_init(struct em28xx *dev)
goto out_free;
}
break;
+ case EM28178_BOARD_PCTV_461E:
+ {
+ /* demod I2C adapter */
+ struct i2c_adapter *i2c_adapter;
+ struct i2c_board_info info;
+ struct m88ts2022_config m88ts2022_config = {
+ .clock = 27000000,
+ };
+ memset(&info, 0, sizeof(struct i2c_board_info));
+
+ /* attach demod */
+ dvb->fe[0] = dvb_attach(m88ds3103_attach,
+ &pctv_461e_m88ds3103_config,
+ &dev->i2c_adap[dev->def_i2c_bus],
+ &i2c_adapter);
+ if (dvb->fe[0] == NULL) {
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ /* attach tuner */
+ m88ts2022_config.fe = dvb->fe[0];
+ strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &m88ts2022_config;
+ request_module("m88ts2022");
+ dvb->i2c_client_tuner = i2c_new_device(i2c_adapter, &info);
+
+ /* delegate signal strength measurement to tuner */
+ dvb->fe[0]->ops.read_signal_strength =
+ dvb->fe[0]->ops.tuner_ops.get_rf_strength;
+
+ /* attach SEC */
+ if (!dvb_attach(a8293_attach, dvb->fe[0],
+ &dev->i2c_adap[dev->def_i2c_bus],
+ &em28xx_a8293_config)) {
+ dvb_frontend_detach(dvb->fe[0]);
+ result = -ENODEV;
+ goto out_free;
+ }
+ }
+ break;
default:
em28xx_errdev("/2: The frontend of your DVB/ATSC card"
" isn't supported yet\n");
@@ -1354,7 +1436,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
/* MFE lock */
dvb->adapter.mfe_shared = mfe_shared;
- em28xx_info("Successfully loaded em28xx-dvb\n");
+ em28xx_info("DVB extension successfully initialized\n");
ret:
em28xx_set_mode(dev, EM28XX_SUSPEND);
mutex_unlock(&dev->lock);
@@ -1375,14 +1457,23 @@ static inline void prevent_sleep(struct dvb_frontend_ops *ops)
static int em28xx_dvb_fini(struct em28xx *dev)
{
+ if (dev->is_audio_only) {
+ /* Shouldn't initialize IR for this interface */
+ return 0;
+ }
+
if (!dev->board.has_dvb) {
/* This device does not support the extension */
return 0;
}
+ em28xx_info("Closing DVB extension");
+
if (dev->dvb) {
struct em28xx_dvb *dvb = dev->dvb;
+ em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
+
if (dev->disconnected) {
/* We cannot tell the device to sleep
* once it has been unplugged. */
@@ -1392,6 +1483,7 @@ static int em28xx_dvb_fini(struct em28xx *dev)
prevent_sleep(&dvb->fe[1]->ops);
}
+ i2c_release_client(dvb->i2c_client_tuner);
em28xx_unregister_dvb(dvb);
kfree(dvb);
dev->dvb = NULL;
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index c4ff9739a7ae..7e1724076ac4 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -26,6 +26,7 @@
#include <linux/kernel.h>
#include <linux/usb.h>
#include <linux/i2c.h>
+#include <linux/jiffies.h>
#include "em28xx.h"
#include "tuner-xc2028.h"
@@ -40,7 +41,7 @@ MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time");
static unsigned int i2c_debug;
module_param(i2c_debug, int, 0644);
-MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
+MODULE_PARM_DESC(i2c_debug, "i2c debug message level (1: normal debug, 2: show I2C transfers)");
/*
* em2800_i2c_send_bytes()
@@ -48,8 +49,8 @@ MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
*/
static int em2800_i2c_send_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
{
+ unsigned long timeout = jiffies + msecs_to_jiffies(EM28XX_I2C_XFER_TIMEOUT);
int ret;
- int write_timeout;
u8 b2[6];
if (len < 1 || len > 4)
@@ -74,22 +75,26 @@ static int em2800_i2c_send_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
return (ret < 0) ? ret : -EIO;
}
/* wait for completion */
- for (write_timeout = EM2800_I2C_XFER_TIMEOUT; write_timeout > 0;
- write_timeout -= 5) {
+ while (time_is_after_jiffies(timeout)) {
ret = dev->em28xx_read_reg(dev, 0x05);
- if (ret == 0x80 + len - 1) {
+ if (ret == 0x80 + len - 1)
return len;
- } else if (ret == 0x94 + len - 1) {
- return -ENODEV;
- } else if (ret < 0) {
+ if (ret == 0x94 + len - 1) {
+ if (i2c_debug == 1)
+ em28xx_warn("R05 returned 0x%02x: I2C timeout",
+ ret);
+ return -ENXIO;
+ }
+ if (ret < 0) {
em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
ret);
return ret;
}
msleep(5);
}
- em28xx_warn("write to i2c device at 0x%x timed out\n", addr);
- return -EIO;
+ if (i2c_debug)
+ em28xx_warn("write to i2c device at 0x%x timed out\n", addr);
+ return -ETIMEDOUT;
}
/*
@@ -98,9 +103,9 @@ static int em2800_i2c_send_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
*/
static int em2800_i2c_recv_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
{
+ unsigned long timeout = jiffies + msecs_to_jiffies(EM28XX_I2C_XFER_TIMEOUT);
u8 buf2[4];
int ret;
- int read_timeout;
int i;
if (len < 1 || len > 4)
@@ -117,22 +122,28 @@ static int em2800_i2c_recv_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
}
/* wait for completion */
- for (read_timeout = EM2800_I2C_XFER_TIMEOUT; read_timeout > 0;
- read_timeout -= 5) {
+ while (time_is_after_jiffies(timeout)) {
ret = dev->em28xx_read_reg(dev, 0x05);
- if (ret == 0x84 + len - 1) {
+ if (ret == 0x84 + len - 1)
break;
- } else if (ret == 0x94 + len - 1) {
- return -ENODEV;
- } else if (ret < 0) {
+ if (ret == 0x94 + len - 1) {
+ if (i2c_debug == 1)
+ em28xx_warn("R05 returned 0x%02x: I2C timeout",
+ ret);
+ return -ENXIO;
+ }
+ if (ret < 0) {
em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
ret);
return ret;
}
msleep(5);
}
- if (ret != 0x84 + len - 1)
- em28xx_warn("read from i2c device at 0x%x timed out\n", addr);
+ if (ret != 0x84 + len - 1) {
+ if (i2c_debug)
+ em28xx_warn("read from i2c device at 0x%x timed out\n",
+ addr);
+ }
/* get the received message */
ret = dev->em28xx_read_reg_req_len(dev, 0x00, 4-len, buf2, len);
@@ -168,7 +179,8 @@ static int em2800_i2c_check_for_device(struct em28xx *dev, u8 addr)
static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
u16 len, int stop)
{
- int write_timeout, ret;
+ unsigned long timeout = jiffies + msecs_to_jiffies(EM28XX_I2C_XFER_TIMEOUT);
+ int ret;
if (len < 1 || len > 64)
return -EOPNOTSUPP;
@@ -191,16 +203,19 @@ static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
}
}
- /* Check success of the i2c operation */
- for (write_timeout = EM2800_I2C_XFER_TIMEOUT; write_timeout > 0;
- write_timeout -= 5) {
+ /* wait for completion */
+ while (time_is_after_jiffies(timeout)) {
ret = dev->em28xx_read_reg(dev, 0x05);
- if (ret == 0) { /* success */
+ if (ret == 0) /* success */
return len;
- } else if (ret == 0x10) {
- return -ENODEV;
- } else if (ret < 0) {
- em28xx_warn("failed to read i2c transfer status from bridge (error=%i)\n",
+ if (ret == 0x10) {
+ if (i2c_debug == 1)
+ em28xx_warn("I2C transfer timeout on writing to addr 0x%02x",
+ addr);
+ return -ENXIO;
+ }
+ if (ret < 0) {
+ em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
ret);
return ret;
}
@@ -211,8 +226,10 @@ static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
* (even with high payload) ...
*/
}
- em28xx_warn("write to i2c device at 0x%x timed out\n", addr);
- return -EIO;
+ if (i2c_debug)
+ em28xx_warn("write to i2c device at 0x%x timed out (status=%i)\n",
+ addr, ret);
+ return -ETIMEDOUT;
}
/*
@@ -242,26 +259,28 @@ static int em28xx_i2c_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf, u16 len)
* bytes if we are on bus B AND there was no write attempt to the
* specified slave address before AND no device is present at the
* requested slave address.
- * Anyway, the next check will fail with -ENODEV in this case, so avoid
+ * Anyway, the next check will fail with -ENXIO in this case, so avoid
* spamming the system log on device probing and do nothing here.
*/
/* Check success of the i2c operation */
ret = dev->em28xx_read_reg(dev, 0x05);
+ if (ret == 0) /* success */
+ return len;
if (ret < 0) {
- em28xx_warn("failed to read i2c transfer status from bridge (error=%i)\n",
+ em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
ret);
return ret;
}
- if (ret > 0) {
- if (ret == 0x10) {
- return -ENODEV;
- } else {
- em28xx_warn("unknown i2c error (status=%i)\n", ret);
- return -EIO;
- }
+ if (ret == 0x10) {
+ if (i2c_debug == 1)
+ em28xx_warn("I2C transfer timeout on writing to addr 0x%02x",
+ addr);
+ return -ENXIO;
}
- return len;
+
+ em28xx_warn("unknown i2c error (status=%i)\n", ret);
+ return -ETIMEDOUT;
}
/*
@@ -316,8 +335,12 @@ static int em25xx_bus_B_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
*/
if (!ret)
return len;
- else if (ret > 0)
- return -ENODEV;
+ else if (ret > 0) {
+ if (i2c_debug == 1)
+ em28xx_warn("Bus B R08 returned 0x%02x: I2C timeout",
+ ret);
+ return -ENXIO;
+ }
return ret;
/*
@@ -355,7 +378,7 @@ static int em25xx_bus_B_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf,
* bytes if we are on bus B AND there was no write attempt to the
* specified slave address before AND no device is present at the
* requested slave address.
- * Anyway, the next check will fail with -ENODEV in this case, so avoid
+ * Anyway, the next check will fail with -ENXIO in this case, so avoid
* spamming the system log on device probing and do nothing here.
*/
@@ -367,8 +390,12 @@ static int em25xx_bus_B_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf,
*/
if (!ret)
return len;
- else if (ret > 0)
- return -ENODEV;
+ else if (ret > 0) {
+ if (i2c_debug == 1)
+ em28xx_warn("Bus B R08 returned 0x%02x: I2C timeout",
+ ret);
+ return -ENXIO;
+ }
return ret;
/*
@@ -409,10 +436,6 @@ static inline int i2c_check_for_device(struct em28xx_i2c_bus *i2c_bus, u16 addr)
rc = em2800_i2c_check_for_device(dev, addr);
else if (i2c_bus->algo_type == EM28XX_I2C_ALGO_EM25XX_BUS_B)
rc = em25xx_bus_B_check_for_device(dev, addr);
- if (rc == -ENODEV) {
- if (i2c_debug)
- printk(" no device\n");
- }
return rc;
}
@@ -421,7 +444,7 @@ static inline int i2c_recv_bytes(struct em28xx_i2c_bus *i2c_bus,
{
struct em28xx *dev = i2c_bus->dev;
u16 addr = msg.addr << 1;
- int byte, rc = -EOPNOTSUPP;
+ int rc = -EOPNOTSUPP;
if (i2c_bus->algo_type == EM28XX_I2C_ALGO_EM28XX)
rc = em28xx_i2c_recv_bytes(dev, addr, msg.buf, msg.len);
@@ -429,10 +452,6 @@ static inline int i2c_recv_bytes(struct em28xx_i2c_bus *i2c_bus,
rc = em2800_i2c_recv_bytes(dev, addr, msg.buf, msg.len);
else if (i2c_bus->algo_type == EM28XX_I2C_ALGO_EM25XX_BUS_B)
rc = em25xx_bus_B_recv_bytes(dev, addr, msg.buf, msg.len);
- if (i2c_debug) {
- for (byte = 0; byte < msg.len; byte++)
- printk(" %02x", msg.buf[byte]);
- }
return rc;
}
@@ -441,12 +460,8 @@ static inline int i2c_send_bytes(struct em28xx_i2c_bus *i2c_bus,
{
struct em28xx *dev = i2c_bus->dev;
u16 addr = msg.addr << 1;
- int byte, rc = -EOPNOTSUPP;
+ int rc = -EOPNOTSUPP;
- if (i2c_debug) {
- for (byte = 0; byte < msg.len; byte++)
- printk(" %02x", msg.buf[byte]);
- }
if (i2c_bus->algo_type == EM28XX_I2C_ALGO_EM28XX)
rc = em28xx_i2c_send_bytes(dev, addr, msg.buf, msg.len, stop);
else if (i2c_bus->algo_type == EM28XX_I2C_ALGO_EM2800)
@@ -491,33 +506,53 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap,
}
for (i = 0; i < num; i++) {
addr = msgs[i].addr << 1;
- if (i2c_debug)
+ if (i2c_debug > 1)
printk(KERN_DEBUG "%s at %s: %s %s addr=%02x len=%d:",
dev->name, __func__ ,
(msgs[i].flags & I2C_M_RD) ? "read" : "write",
i == num - 1 ? "stop" : "nonstop",
addr, msgs[i].len);
- if (!msgs[i].len) { /* no len: check only for device presence */
+ if (!msgs[i].len) {
+ /*
+ * no len: check only for device presence
+ * This code is only called during device probe.
+ */
rc = i2c_check_for_device(i2c_bus, addr);
- if (rc == -ENODEV) {
+ if (rc < 0) {
+ if (rc == -ENXIO) {
+ if (i2c_debug > 1)
+ printk(KERN_CONT " no device\n");
+ rc = -ENODEV;
+ } else {
+ if (i2c_debug > 1)
+ printk(KERN_CONT " ERROR: %i\n", rc);
+ }
rt_mutex_unlock(&dev->i2c_bus_lock);
return rc;
}
} else if (msgs[i].flags & I2C_M_RD) {
/* read bytes */
rc = i2c_recv_bytes(i2c_bus, msgs[i]);
+
+ if (i2c_debug > 1 && rc >= 0)
+ printk(KERN_CONT " %*ph",
+ msgs[i].len, msgs[i].buf);
} else {
+ if (i2c_debug > 1)
+ printk(KERN_CONT " %*ph",
+ msgs[i].len, msgs[i].buf);
+
/* write bytes */
rc = i2c_send_bytes(i2c_bus, msgs[i], i == num - 1);
}
if (rc < 0) {
- if (i2c_debug)
- printk(" ERROR: %i\n", rc);
+ if (i2c_debug > 1)
+ printk(KERN_CONT " ERROR: %i\n", rc);
rt_mutex_unlock(&dev->i2c_bus_lock);
return rc;
}
- if (i2c_debug)
- printk("\n");
+ if (i2c_debug > 1)
+ printk(KERN_CONT "\n");
}
rt_mutex_unlock(&dev->i2c_bus_lock);
@@ -600,7 +635,7 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
* calculation and returned device dataset. Simplifies the code a lot,
* but we might have to deal with multiple sizes in the future !
*/
- int i, err;
+ int err;
struct em28xx_eeprom *dev_config;
u8 buf, *data;
@@ -631,20 +666,14 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
goto error;
}
- /* Display eeprom content */
- for (i = 0; i < len; i++) {
- if (0 == (i % 16)) {
- if (dev->eeprom_addrwidth_16bit)
- em28xx_info("i2c eeprom %04x:", i);
- else
- em28xx_info("i2c eeprom %02x:", i);
- }
- printk(" %02x", data[i]);
- if (15 == (i % 16))
- printk("\n");
+ if (i2c_debug) {
+ /* Display eeprom content */
+ print_hex_dump(KERN_INFO, "eeprom ", DUMP_PREFIX_OFFSET,
+ 16, 1, data, len, true);
+
+ if (dev->eeprom_addrwidth_16bit)
+ em28xx_info("eeprom %06x: ... (skipped)\n", 256);
}
- if (dev->eeprom_addrwidth_16bit)
- em28xx_info("i2c eeprom %04x: ... (skipped)\n", i);
if (dev->eeprom_addrwidth_16bit &&
data[0] == 0x26 && data[3] == 0x00) {
@@ -736,10 +765,16 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
em28xx_info("\tAC97 audio (5 sample rates)\n");
break;
case 2:
- em28xx_info("\tI2S audio, sample rate=32k\n");
+ if (dev->chip_id < CHIP_ID_EM2860)
+ em28xx_info("\tI2S audio, sample rate=32k\n");
+ else
+ em28xx_info("\tI2S audio, 3 sample rates\n");
break;
case 3:
- em28xx_info("\tI2S audio, 3 sample rates\n");
+ if (dev->chip_id < CHIP_ID_EM2860)
+ em28xx_info("\tI2S audio, 3 sample rates\n");
+ else
+ em28xx_info("\tI2S audio, 5 sample rates\n");
break;
}
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index ea181e4b68c5..18f65d89d4bc 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -30,8 +30,9 @@
#include "em28xx.h"
-#define EM28XX_SNAPSHOT_KEY KEY_CAMERA
-#define EM28XX_SBUTTON_QUERY_INTERVAL 500
+#define EM28XX_SNAPSHOT_KEY KEY_CAMERA
+#define EM28XX_BUTTONS_DEBOUNCED_QUERY_INTERVAL 500 /* [ms] */
+#define EM28XX_BUTTONS_VOLATILE_QUERY_INTERVAL 100 /* [ms] */
static unsigned int ir_debug;
module_param(ir_debug, int, 0644);
@@ -442,6 +443,7 @@ static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type)
case CHIP_ID_EM2884:
case CHIP_ID_EM2874:
case CHIP_ID_EM28174:
+ case CHIP_ID_EM28178:
return em2874_ir_change_protocol(rc_dev, rc_type);
default:
printk("Unrecognized em28xx chip id 0x%02x: IR not supported\n",
@@ -470,54 +472,98 @@ static int em28xx_probe_i2c_ir(struct em28xx *dev)
}
/**********************************************************
- Handle Webcam snapshot button
+ Handle buttons
**********************************************************/
-static void em28xx_query_sbutton(struct work_struct *work)
+static void em28xx_query_buttons(struct work_struct *work)
{
- /* Poll the register and see if the button is depressed */
struct em28xx *dev =
- container_of(work, struct em28xx, sbutton_query_work.work);
- int ret;
-
- ret = em28xx_read_reg(dev, EM28XX_R0C_USBSUSP);
-
- if (ret & EM28XX_R0C_USBSUSP_SNAPSHOT) {
- u8 cleared;
- /* Button is depressed, clear the register */
- cleared = ((u8) ret) & ~EM28XX_R0C_USBSUSP_SNAPSHOT;
- em28xx_write_regs(dev, EM28XX_R0C_USBSUSP, &cleared, 1);
-
- /* Not emulate the keypress */
- input_report_key(dev->sbutton_input_dev, EM28XX_SNAPSHOT_KEY,
- 1);
- /* Now unpress the key */
- input_report_key(dev->sbutton_input_dev, EM28XX_SNAPSHOT_KEY,
- 0);
+ container_of(work, struct em28xx, buttons_query_work.work);
+ u8 i, j;
+ int regval;
+ bool is_pressed, was_pressed;
+ const struct em28xx_led *led;
+
+ /* Poll and evaluate all addresses */
+ for (i = 0; i < dev->num_button_polling_addresses; i++) {
+ /* Read value from register */
+ regval = em28xx_read_reg(dev, dev->button_polling_addresses[i]);
+ if (regval < 0)
+ continue;
+ /* Check states of the buttons and act */
+ j = 0;
+ while (dev->board.buttons[j].role >= 0 &&
+ dev->board.buttons[j].role < EM28XX_NUM_BUTTON_ROLES) {
+ struct em28xx_button *button = &dev->board.buttons[j];
+ /* Check if button uses the current address */
+ if (button->reg_r != dev->button_polling_addresses[i]) {
+ j++;
+ continue;
+ }
+ /* Determine if button is and was pressed last time */
+ is_pressed = regval & button->mask;
+ was_pressed = dev->button_polling_last_values[i]
+ & button->mask;
+ if (button->inverted) {
+ is_pressed = !is_pressed;
+ was_pressed = !was_pressed;
+ }
+ /* Clear button state (if needed) */
+ if (is_pressed && button->reg_clearing)
+ em28xx_write_reg(dev, button->reg_clearing,
+ (~regval & button->mask)
+ | (regval & ~button->mask));
+ /* Handle button state */
+ if (!is_pressed || was_pressed) {
+ j++;
+ continue;
+ }
+ switch (button->role) {
+ case EM28XX_BUTTON_SNAPSHOT:
+ /* Emulate the keypress */
+ input_report_key(dev->sbutton_input_dev,
+ EM28XX_SNAPSHOT_KEY, 1);
+ /* Unpress the key */
+ input_report_key(dev->sbutton_input_dev,
+ EM28XX_SNAPSHOT_KEY, 0);
+ break;
+ case EM28XX_BUTTON_ILLUMINATION:
+ led = em28xx_find_led(dev,
+ EM28XX_LED_ILLUMINATION);
+ /* Switch illumination LED on/off */
+ if (led)
+ em28xx_toggle_reg_bits(dev,
+ led->gpio_reg,
+ led->gpio_mask);
+ break;
+ default:
+ WARN_ONCE(1, "BUG: unhandled button role.");
+ }
+ /* Next button */
+ j++;
+ }
+ /* Save current value for comparison during the next polling */
+ dev->button_polling_last_values[i] = regval;
}
-
/* Schedule next poll */
- schedule_delayed_work(&dev->sbutton_query_work,
- msecs_to_jiffies(EM28XX_SBUTTON_QUERY_INTERVAL));
+ schedule_delayed_work(&dev->buttons_query_work,
+ msecs_to_jiffies(dev->button_polling_interval));
}
-static void em28xx_register_snapshot_button(struct em28xx *dev)
+static int em28xx_register_snapshot_button(struct em28xx *dev)
{
struct input_dev *input_dev;
int err;
em28xx_info("Registering snapshot button...\n");
input_dev = input_allocate_device();
- if (!input_dev) {
- em28xx_errdev("input_allocate_device failed\n");
- return;
- }
+ if (!input_dev)
+ return -ENOMEM;
usb_make_path(dev->udev, dev->snapshot_button_path,
sizeof(dev->snapshot_button_path));
strlcat(dev->snapshot_button_path, "/sbutton",
sizeof(dev->snapshot_button_path));
- INIT_DELAYED_WORK(&dev->sbutton_query_work, em28xx_query_sbutton);
input_dev->name = "em28xx snapshot button";
input_dev->phys = dev->snapshot_button_path;
@@ -535,25 +581,86 @@ static void em28xx_register_snapshot_button(struct em28xx *dev)
if (err) {
em28xx_errdev("input_register_device failed\n");
input_free_device(input_dev);
- return;
+ return err;
}
dev->sbutton_input_dev = input_dev;
- schedule_delayed_work(&dev->sbutton_query_work,
- msecs_to_jiffies(EM28XX_SBUTTON_QUERY_INTERVAL));
- return;
+ return 0;
+}
+static void em28xx_init_buttons(struct em28xx *dev)
+{
+ u8 i = 0, j = 0;
+ bool addr_new = 0;
+
+ dev->button_polling_interval = EM28XX_BUTTONS_DEBOUNCED_QUERY_INTERVAL;
+ while (dev->board.buttons[i].role >= 0 &&
+ dev->board.buttons[i].role < EM28XX_NUM_BUTTON_ROLES) {
+ struct em28xx_button *button = &dev->board.buttons[i];
+ /* Check if polling address is already on the list */
+ addr_new = 1;
+ for (j = 0; j < dev->num_button_polling_addresses; j++) {
+ if (button->reg_r == dev->button_polling_addresses[j]) {
+ addr_new = 0;
+ break;
+ }
+ }
+ /* Check if max. number of polling addresses is exceeded */
+ if (addr_new && dev->num_button_polling_addresses
+ >= EM28XX_NUM_BUTTON_ADDRESSES_MAX) {
+ WARN_ONCE(1, "BUG: maximum number of button polling addresses exceeded.");
+ goto next_button;
+ }
+ /* Button role specific checks and actions */
+ if (button->role == EM28XX_BUTTON_SNAPSHOT) {
+ /* Register input device */
+ if (em28xx_register_snapshot_button(dev) < 0)
+ goto next_button;
+ } else if (button->role == EM28XX_BUTTON_ILLUMINATION) {
+ /* Check sanity */
+ if (!em28xx_find_led(dev, EM28XX_LED_ILLUMINATION)) {
+ em28xx_errdev("BUG: illumination button defined, but no illumination LED.\n");
+ goto next_button;
+ }
+ }
+ /* Add read address to list of polling addresses */
+ if (addr_new) {
+ unsigned int index = dev->num_button_polling_addresses;
+ dev->button_polling_addresses[index] = button->reg_r;
+ dev->num_button_polling_addresses++;
+ }
+ /* Reduce polling interval if necessary */
+ if (!button->reg_clearing)
+ dev->button_polling_interval =
+ EM28XX_BUTTONS_VOLATILE_QUERY_INTERVAL;
+next_button:
+ /* Next button */
+ i++;
+ }
+
+ /* Start polling */
+ if (dev->num_button_polling_addresses) {
+ memset(dev->button_polling_last_values, 0,
+ EM28XX_NUM_BUTTON_ADDRESSES_MAX);
+ INIT_DELAYED_WORK(&dev->buttons_query_work,
+ em28xx_query_buttons);
+ schedule_delayed_work(&dev->buttons_query_work,
+ msecs_to_jiffies(dev->button_polling_interval));
+ }
}
-static void em28xx_deregister_snapshot_button(struct em28xx *dev)
+static void em28xx_shutdown_buttons(struct em28xx *dev)
{
+ /* Cancel polling */
+ cancel_delayed_work_sync(&dev->buttons_query_work);
+ /* Clear polling addresses list */
+ dev->num_button_polling_addresses = 0;
+ /* Deregister input devices */
if (dev->sbutton_input_dev != NULL) {
em28xx_info("Deregistering snapshot button\n");
- cancel_delayed_work_sync(&dev->sbutton_query_work);
input_unregister_device(dev->sbutton_input_dev);
dev->sbutton_input_dev = NULL;
}
- return;
}
static int em28xx_ir_init(struct em28xx *dev)
@@ -564,8 +671,13 @@ static int em28xx_ir_init(struct em28xx *dev)
u64 rc_type;
u16 i2c_rc_dev_addr = 0;
- if (dev->board.has_snapshot_button)
- em28xx_register_snapshot_button(dev);
+ if (dev->is_audio_only) {
+ /* Shouldn't initialize IR for this interface */
+ return 0;
+ }
+
+ if (dev->board.buttons)
+ em28xx_init_buttons(dev);
if (dev->board.has_ir_i2c) {
i2c_rc_dev_addr = em28xx_probe_i2c_ir(dev);
@@ -583,6 +695,8 @@ static int em28xx_ir_init(struct em28xx *dev)
return 0;
}
+ em28xx_info("Registering input extension\n");
+
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
rc = rc_allocate_device();
if (!ir || !rc)
@@ -633,6 +747,7 @@ static int em28xx_ir_init(struct em28xx *dev)
case CHIP_ID_EM2884:
case CHIP_ID_EM2874:
case CHIP_ID_EM28174:
+ case CHIP_ID_EM28178:
ir->get_key = em2874_polling_getkey;
rc->allowed_protos = RC_BIT_RC5 | RC_BIT_NEC |
RC_BIT_RC6_0;
@@ -675,6 +790,8 @@ static int em28xx_ir_init(struct em28xx *dev)
if (err)
goto error;
+ em28xx_info("Input extension successfully initalized\n");
+
return 0;
error:
@@ -688,7 +805,14 @@ static int em28xx_ir_fini(struct em28xx *dev)
{
struct em28xx_IR *ir = dev->ir;
- em28xx_deregister_snapshot_button(dev);
+ if (dev->is_audio_only) {
+ /* Shouldn't initialize IR for this interface */
+ return 0;
+ }
+
+ em28xx_info("Closing input extension");
+
+ em28xx_shutdown_buttons(dev);
/* skip detach on non attached boards */
if (!ir)
@@ -722,7 +846,8 @@ static void __exit em28xx_rc_unregister(void)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
-MODULE_DESCRIPTION("Em28xx Input driver");
+MODULE_DESCRIPTION(DRIVER_DESC " - input interface");
+MODULE_VERSION(EM28XX_VERSION);
module_init(em28xx_rc_register);
module_exit(em28xx_rc_unregister);
diff --git a/drivers/media/usb/em28xx/em28xx-reg.h b/drivers/media/usb/em28xx/em28xx-reg.h
index 0e0477847965..311fb349dafa 100644
--- a/drivers/media/usb/em28xx/em28xx-reg.h
+++ b/drivers/media/usb/em28xx/em28xx-reg.h
@@ -25,10 +25,12 @@
#define EM28XX_R00_CHIPCFG 0x00
/* em28xx Chip Configuration 0x00 */
-#define EM28XX_CHIPCFG_VENDOR_AUDIO 0x80
-#define EM28XX_CHIPCFG_I2S_VOLUME_CAPABLE 0x40
-#define EM28XX_CHIPCFG_I2S_5_SAMPRATES 0x30
-#define EM28XX_CHIPCFG_I2S_3_SAMPRATES 0x20
+#define EM2860_CHIPCFG_VENDOR_AUDIO 0x80
+#define EM2860_CHIPCFG_I2S_VOLUME_CAPABLE 0x40
+#define EM2820_CHIPCFG_I2S_3_SAMPRATES 0x30
+#define EM2860_CHIPCFG_I2S_5_SAMPRATES 0x30
+#define EM2820_CHIPCFG_I2S_1_SAMPRATE 0x20
+#define EM2860_CHIPCFG_I2S_3_SAMPRATES 0x20
#define EM28XX_CHIPCFG_AC97 0x10
#define EM28XX_CHIPCFG_AUDIOMASK 0x30
@@ -245,6 +247,7 @@ enum em28xx_chip_id {
CHIP_ID_EM2874 = 65,
CHIP_ID_EM2884 = 68,
CHIP_ID_EM28174 = 113,
+ CHIP_ID_EM28178 = 114,
};
/*
diff --git a/drivers/media/usb/em28xx/em28xx-v4l.h b/drivers/media/usb/em28xx/em28xx-v4l.h
new file mode 100644
index 000000000000..bce438691e0e
--- /dev/null
+++ b/drivers/media/usb/em28xx/em28xx-v4l.h
@@ -0,0 +1,20 @@
+/*
+ em28xx-video.c - driver for Empia EM2800/EM2820/2840 USB
+ video capture devices
+
+ Copyright (C) 2013-2014 Mauro Carvalho Chehab <m.chehab@samsung.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ */
+
+
+int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count);
+int em28xx_stop_vbi_streaming(struct vb2_queue *vq);
+extern struct vb2_ops em28xx_vbi_qops;
diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c
index 39f39c527c13..db3d655600df 100644
--- a/drivers/media/usb/em28xx/em28xx-vbi.c
+++ b/drivers/media/usb/em28xx/em28xx-vbi.c
@@ -27,6 +27,7 @@
#include <linux/init.h>
#include "em28xx.h"
+#include "em28xx-v4l.h"
static unsigned int vbibufs = 5;
module_param(vbibufs, int, 0644);
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index dd19c9ff76e0..c3c928937dcd 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -38,9 +38,11 @@
#include <linux/slab.h>
#include "em28xx.h"
+#include "em28xx-v4l.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
+#include <media/v4l2-clk.h>
#include <media/msp3400.h>
#include <media/tuner.h>
@@ -49,19 +51,23 @@
"Mauro Carvalho Chehab <mchehab@infradead.org>, " \
"Sascha Sommer <saschasommer@freenet.de>"
-#define DRIVER_DESC "Empia em28xx based USB video device driver"
+static unsigned int isoc_debug;
+module_param(isoc_debug, int, 0644);
+MODULE_PARM_DESC(isoc_debug, "enable debug messages [isoc transfers]");
+
+static unsigned int disable_vbi;
+module_param(disable_vbi, int, 0644);
+MODULE_PARM_DESC(disable_vbi, "disable vbi support");
-#define EM28XX_VERSION "0.2.0"
+static int alt;
+module_param(alt, int, 0644);
+MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint");
#define em28xx_videodbg(fmt, arg...) do {\
if (video_debug) \
printk(KERN_INFO "%s %s :"fmt, \
dev->name, __func__ , ##arg); } while (0)
-static unsigned int isoc_debug;
-module_param(isoc_debug, int, 0644);
-MODULE_PARM_DESC(isoc_debug, "enable debug messages [isoc transfers]");
-
#define em28xx_isocdbg(fmt, arg...) \
do {\
if (isoc_debug) { \
@@ -71,7 +77,7 @@ do {\
} while (0)
MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_DESCRIPTION(DRIVER_DESC " - v4l2 interface");
MODULE_LICENSE("GPL");
MODULE_VERSION(EM28XX_VERSION);
@@ -135,6 +141,257 @@ static struct em28xx_fmt format[] = {
},
};
+static int em28xx_vbi_supported(struct em28xx *dev)
+{
+ /* Modprobe option to manually disable */
+ if (disable_vbi == 1)
+ return 0;
+
+ if (dev->board.is_webcam)
+ return 0;
+
+ /* FIXME: check subdevices for VBI support */
+
+ if (dev->chip_id == CHIP_ID_EM2860 ||
+ dev->chip_id == CHIP_ID_EM2883)
+ return 1;
+
+ /* Version of em28xx that does not support VBI */
+ return 0;
+}
+
+/*
+ * em28xx_wake_i2c()
+ * configure i2c attached devices
+ */
+static void em28xx_wake_i2c(struct em28xx *dev)
+{
+ v4l2_device_call_all(&dev->v4l2_dev, 0, core, reset, 0);
+ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_routing,
+ INPUT(dev->ctl_input)->vmux, 0, 0);
+ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
+}
+
+static int em28xx_colorlevels_set_default(struct em28xx *dev)
+{
+ em28xx_write_reg(dev, EM28XX_R20_YGAIN, CONTRAST_DEFAULT);
+ em28xx_write_reg(dev, EM28XX_R21_YOFFSET, BRIGHTNESS_DEFAULT);
+ em28xx_write_reg(dev, EM28XX_R22_UVGAIN, SATURATION_DEFAULT);
+ em28xx_write_reg(dev, EM28XX_R23_UOFFSET, BLUE_BALANCE_DEFAULT);
+ em28xx_write_reg(dev, EM28XX_R24_VOFFSET, RED_BALANCE_DEFAULT);
+ em28xx_write_reg(dev, EM28XX_R25_SHARPNESS, SHARPNESS_DEFAULT);
+
+ em28xx_write_reg(dev, EM28XX_R14_GAMMA, 0x20);
+ em28xx_write_reg(dev, EM28XX_R15_RGAIN, 0x20);
+ em28xx_write_reg(dev, EM28XX_R16_GGAIN, 0x20);
+ em28xx_write_reg(dev, EM28XX_R17_BGAIN, 0x20);
+ em28xx_write_reg(dev, EM28XX_R18_ROFFSET, 0x00);
+ em28xx_write_reg(dev, EM28XX_R19_GOFFSET, 0x00);
+ return em28xx_write_reg(dev, EM28XX_R1A_BOFFSET, 0x00);
+}
+
+static int em28xx_set_outfmt(struct em28xx *dev)
+{
+ int ret;
+ u8 fmt, vinctrl;
+
+ fmt = dev->format->reg;
+ if (!dev->is_em25xx)
+ fmt |= 0x20;
+ /*
+ * NOTE: it's not clear if this is really needed !
+ * The datasheets say bit 5 is a reserved bit and devices seem to work
+ * fine without it. But the Windows driver sets it for em2710/50+em28xx
+ * devices and we've always been setting it, too.
+ *
+ * em2765 (em25xx, em276x/7x/8x) devices do NOT work with this bit set,
+ * it's likely used for an additional (compressed ?) format there.
+ */
+ ret = em28xx_write_reg(dev, EM28XX_R27_OUTFMT, fmt);
+ if (ret < 0)
+ return ret;
+
+ ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, dev->vinmode);
+ if (ret < 0)
+ return ret;
+
+ vinctrl = dev->vinctl;
+ if (em28xx_vbi_supported(dev) == 1) {
+ vinctrl |= EM28XX_VINCTRL_VBI_RAW;
+ em28xx_write_reg(dev, EM28XX_R34_VBI_START_H, 0x00);
+ em28xx_write_reg(dev, EM28XX_R36_VBI_WIDTH, dev->vbi_width/4);
+ em28xx_write_reg(dev, EM28XX_R37_VBI_HEIGHT, dev->vbi_height);
+ if (dev->norm & V4L2_STD_525_60) {
+ /* NTSC */
+ em28xx_write_reg(dev, EM28XX_R35_VBI_START_V, 0x09);
+ } else if (dev->norm & V4L2_STD_625_50) {
+ /* PAL */
+ em28xx_write_reg(dev, EM28XX_R35_VBI_START_V, 0x07);
+ }
+ }
+
+ return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, vinctrl);
+}
+
+static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax,
+ u8 ymin, u8 ymax)
+{
+ em28xx_videodbg("em28xx Scale: (%d,%d)-(%d,%d)\n",
+ xmin, ymin, xmax, ymax);
+
+ em28xx_write_regs(dev, EM28XX_R28_XMIN, &xmin, 1);
+ em28xx_write_regs(dev, EM28XX_R29_XMAX, &xmax, 1);
+ em28xx_write_regs(dev, EM28XX_R2A_YMIN, &ymin, 1);
+ return em28xx_write_regs(dev, EM28XX_R2B_YMAX, &ymax, 1);
+}
+
+static void em28xx_capture_area_set(struct em28xx *dev, u8 hstart, u8 vstart,
+ u16 width, u16 height)
+{
+ u8 cwidth = width >> 2;
+ u8 cheight = height >> 2;
+ u8 overflow = (height >> 9 & 0x02) | (width >> 10 & 0x01);
+ /* NOTE: size limit: 2047x1023 = 2MPix */
+
+ em28xx_videodbg("capture area set to (%d,%d): %dx%d\n",
+ hstart, vstart,
+ ((overflow & 2) << 9 | cwidth << 2),
+ ((overflow & 1) << 10 | cheight << 2));
+
+ em28xx_write_regs(dev, EM28XX_R1C_HSTART, &hstart, 1);
+ em28xx_write_regs(dev, EM28XX_R1D_VSTART, &vstart, 1);
+ em28xx_write_regs(dev, EM28XX_R1E_CWIDTH, &cwidth, 1);
+ em28xx_write_regs(dev, EM28XX_R1F_CHEIGHT, &cheight, 1);
+ em28xx_write_regs(dev, EM28XX_R1B_OFLOW, &overflow, 1);
+
+ /* FIXME: function/meaning of these registers ? */
+ /* FIXME: align width+height to multiples of 4 ?! */
+ if (dev->is_em25xx) {
+ em28xx_write_reg(dev, 0x34, width >> 4);
+ em28xx_write_reg(dev, 0x35, height >> 4);
+ }
+}
+
+static int em28xx_scaler_set(struct em28xx *dev, u16 h, u16 v)
+{
+ u8 mode;
+ /* the em2800 scaler only supports scaling down to 50% */
+
+ if (dev->board.is_em2800) {
+ mode = (v ? 0x20 : 0x00) | (h ? 0x10 : 0x00);
+ } else {
+ u8 buf[2];
+
+ buf[0] = h;
+ buf[1] = h >> 8;
+ em28xx_write_regs(dev, EM28XX_R30_HSCALELOW, (char *)buf, 2);
+
+ buf[0] = v;
+ buf[1] = v >> 8;
+ em28xx_write_regs(dev, EM28XX_R32_VSCALELOW, (char *)buf, 2);
+ /* it seems that both H and V scalers must be active
+ to work correctly */
+ mode = (h || v) ? 0x30 : 0x00;
+ }
+ return em28xx_write_reg_bits(dev, EM28XX_R26_COMPR, mode, 0x30);
+}
+
+/* FIXME: this only function read values from dev */
+static int em28xx_resolution_set(struct em28xx *dev)
+{
+ int width, height;
+ width = norm_maxw(dev);
+ height = norm_maxh(dev);
+
+ /* Properly setup VBI */
+ dev->vbi_width = 720;
+ if (dev->norm & V4L2_STD_525_60)
+ dev->vbi_height = 12;
+ else
+ dev->vbi_height = 18;
+
+ em28xx_set_outfmt(dev);
+
+ em28xx_accumulator_set(dev, 1, (width - 4) >> 2, 1, (height - 4) >> 2);
+
+ /* If we don't set the start position to 2 in VBI mode, we end up
+ with line 20/21 being YUYV encoded instead of being in 8-bit
+ greyscale. The core of the issue is that line 21 (and line 23 for
+ PAL WSS) are inside of active video region, and as a result they
+ get the pixelformatting associated with that area. So by cropping
+ it out, we end up with the same format as the rest of the VBI
+ region */
+ if (em28xx_vbi_supported(dev) == 1)
+ em28xx_capture_area_set(dev, 0, 2, width, height);
+ else
+ em28xx_capture_area_set(dev, 0, 0, width, height);
+
+ return em28xx_scaler_set(dev, dev->hscale, dev->vscale);
+}
+
+/* Set USB alternate setting for analog video */
+static int em28xx_set_alternate(struct em28xx *dev)
+{
+ int errCode;
+ int i;
+ unsigned int min_pkt_size = dev->width * 2 + 4;
+
+ /* NOTE: for isoc transfers, only alt settings > 0 are allowed
+ bulk transfers seem to work only with alt=0 ! */
+ dev->alt = 0;
+ if ((alt > 0) && (alt < dev->num_alt)) {
+ em28xx_videodbg("alternate forced to %d\n", dev->alt);
+ dev->alt = alt;
+ goto set_alt;
+ }
+ if (dev->analog_xfer_bulk)
+ goto set_alt;
+
+ /* When image size is bigger than a certain value,
+ the frame size should be increased, otherwise, only
+ green screen will be received.
+ */
+ if (dev->width * 2 * dev->height > 720 * 240 * 2)
+ min_pkt_size *= 2;
+
+ for (i = 0; i < dev->num_alt; i++) {
+ /* stop when the selected alt setting offers enough bandwidth */
+ if (dev->alt_max_pkt_size_isoc[i] >= min_pkt_size) {
+ dev->alt = i;
+ break;
+ /* otherwise make sure that we end up with the maximum bandwidth
+ because the min_pkt_size equation might be wrong...
+ */
+ } else if (dev->alt_max_pkt_size_isoc[i] >
+ dev->alt_max_pkt_size_isoc[dev->alt])
+ dev->alt = i;
+ }
+
+set_alt:
+ /* NOTE: for bulk transfers, we need to call usb_set_interface()
+ * even if the previous settings were the same. Otherwise streaming
+ * fails with all urbs having status = -EOVERFLOW ! */
+ if (dev->analog_xfer_bulk) {
+ dev->max_pkt_size = 512; /* USB 2.0 spec */
+ dev->packet_multiplier = EM28XX_BULK_PACKET_MULTIPLIER;
+ } else { /* isoc */
+ em28xx_videodbg("minimum isoc packet size: %u (alt=%d)\n",
+ min_pkt_size, dev->alt);
+ dev->max_pkt_size =
+ dev->alt_max_pkt_size_isoc[dev->alt];
+ dev->packet_multiplier = EM28XX_NUM_ISOC_PACKETS;
+ }
+ em28xx_videodbg("setting alternate %d with wMaxPacketSize=%u\n",
+ dev->alt, dev->max_pkt_size);
+ errCode = usb_set_interface(dev->udev, dev->ifnum, dev->alt);
+ if (errCode < 0) {
+ em28xx_errdev("cannot change alternate number to %d (error=%i)\n",
+ dev->alt, errCode);
+ return errCode;
+ }
+ return 0;
+}
+
/* ------------------------------------------------------------------
DMA and thread functions
------------------------------------------------------------------*/
@@ -763,7 +1020,7 @@ static struct vb2_ops em28xx_video_qops = {
.wait_finish = vb2_ops_wait_finish,
};
-int em28xx_vb2_setup(struct em28xx *dev)
+static int em28xx_vb2_setup(struct em28xx *dev)
{
int rc;
struct vb2_queue *q;
@@ -831,7 +1088,7 @@ static void video_mux(struct em28xx *dev, int index)
em28xx_audio_analog_set(dev);
}
-void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv)
+static void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv)
{
struct em28xx *dev = priv;
@@ -890,7 +1147,7 @@ static int em28xx_s_ctrl(struct v4l2_ctrl *ctrl)
return (ret < 0) ? ret : 0;
}
-const struct v4l2_ctrl_ops em28xx_ctrl_ops = {
+static const struct v4l2_ctrl_ops em28xx_ctrl_ops = {
.s_ctrl = em28xx_s_ctrl,
};
@@ -1368,7 +1625,7 @@ static int vidioc_g_register(struct file *file, void *priv,
reg->val = ret;
} else {
__le16 val = 0;
- ret = em28xx_read_reg_req_len(dev, USB_REQ_GET_STATUS,
+ ret = dev->em28xx_read_reg_req_len(dev, USB_REQ_GET_STATUS,
reg->reg, (char *)&val, 2);
if (ret < 0)
return ret;
@@ -1570,6 +1827,10 @@ static int em28xx_v4l2_open(struct file *filp)
case VFL_TYPE_VBI:
fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
break;
+ case VFL_TYPE_RADIO:
+ break;
+ default:
+ return -EINVAL;
}
em28xx_videodbg("open dev=%s type=%s users=%d\n",
@@ -1590,15 +1851,17 @@ static int em28xx_v4l2_open(struct file *filp)
fh->type = fh_type;
filp->private_data = fh;
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) {
+ if (dev->users == 0) {
em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
- em28xx_resolution_set(dev);
- /* Needed, since GPIO might have disabled power of
- some i2c device
+ if (vdev->vfl_type != VFL_TYPE_RADIO)
+ em28xx_resolution_set(dev);
+
+ /*
+ * Needed, since GPIO might have disabled power
+ * of some i2c devices
*/
em28xx_wake_i2c(dev);
-
}
if (vdev->vfl_type == VFL_TYPE_RADIO) {
@@ -1615,40 +1878,59 @@ static int em28xx_v4l2_open(struct file *filp)
}
/*
- * em28xx_realease_resources()
+ * em28xx_v4l2_fini()
* unregisters the v4l2,i2c and usb devices
* called when the device gets disconected or at module unload
*/
-void em28xx_release_analog_resources(struct em28xx *dev)
+static int em28xx_v4l2_fini(struct em28xx *dev)
{
+ if (dev->is_audio_only) {
+ /* Shouldn't initialize IR for this interface */
+ return 0;
+ }
+
+ if (!dev->has_video) {
+ /* This device does not support the v4l2 extension */
+ return 0;
+ }
- /*FIXME: I2C IR should be disconnected */
+ em28xx_info("Closing video extension");
+
+ mutex_lock(&dev->lock);
+
+ v4l2_device_disconnect(&dev->v4l2_dev);
+
+ em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
if (dev->radio_dev) {
- if (video_is_registered(dev->radio_dev))
- video_unregister_device(dev->radio_dev);
- else
- video_device_release(dev->radio_dev);
- dev->radio_dev = NULL;
+ em28xx_info("V4L2 device %s deregistered\n",
+ video_device_node_name(dev->radio_dev));
+ video_unregister_device(dev->radio_dev);
}
if (dev->vbi_dev) {
em28xx_info("V4L2 device %s deregistered\n",
video_device_node_name(dev->vbi_dev));
- if (video_is_registered(dev->vbi_dev))
- video_unregister_device(dev->vbi_dev);
- else
- video_device_release(dev->vbi_dev);
- dev->vbi_dev = NULL;
+ video_unregister_device(dev->vbi_dev);
}
if (dev->vdev) {
em28xx_info("V4L2 device %s deregistered\n",
video_device_node_name(dev->vdev));
- if (video_is_registered(dev->vdev))
- video_unregister_device(dev->vdev);
- else
- video_device_release(dev->vdev);
- dev->vdev = NULL;
+ video_unregister_device(dev->vdev);
}
+
+ if (dev->clk) {
+ v4l2_clk_unregister_fixed(dev->clk);
+ dev->clk = NULL;
+ }
+
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ if (dev->users)
+ em28xx_warn("Device is open ! Memory deallocation is deferred on last close.\n");
+ mutex_unlock(&dev->lock);
+
+ return 0;
}
/*
@@ -1668,14 +1950,10 @@ static int em28xx_v4l2_close(struct file *filp)
mutex_lock(&dev->lock);
if (dev->users == 1) {
- /* the device is already disconnect,
- free the remaining resources */
+ /* free the remaining resources if device is disconnected */
if (dev->disconnected) {
- em28xx_release_resources(dev);
kfree(dev->alt_max_pkt_size_isoc);
- mutex_unlock(&dev->lock);
- kfree(dev);
- return 0;
+ goto exit;
}
/* Save some power by putting tuner to sleep */
@@ -1694,11 +1972,29 @@ static int em28xx_v4l2_close(struct file *filp)
}
}
+exit:
dev->users--;
mutex_unlock(&dev->lock);
return 0;
}
+/*
+ * em28xx_videodevice_release()
+ * called when the last user of the video device exits and frees the memeory
+ */
+static void em28xx_videodevice_release(struct video_device *vdev)
+{
+ struct em28xx *dev = video_get_drvdata(vdev);
+
+ video_device_release(vdev);
+ if (vdev == dev->vdev)
+ dev->vdev = NULL;
+ else if (vdev == dev->vbi_dev)
+ dev->vbi_dev = NULL;
+ else if (vdev == dev->radio_dev)
+ dev->radio_dev = NULL;
+}
+
static const struct v4l2_file_operations em28xx_v4l_fops = {
.owner = THIS_MODULE,
.open = em28xx_v4l2_open,
@@ -1753,11 +2049,10 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
};
static const struct video_device em28xx_video_template = {
- .fops = &em28xx_v4l_fops,
- .release = video_device_release_empty,
- .ioctl_ops = &video_ioctl_ops,
-
- .tvnorms = V4L2_STD_ALL,
+ .fops = &em28xx_v4l_fops,
+ .ioctl_ops = &video_ioctl_ops,
+ .release = em28xx_videodevice_release,
+ .tvnorms = V4L2_STD_ALL,
};
static const struct v4l2_file_operations radio_fops = {
@@ -1783,14 +2078,30 @@ static const struct v4l2_ioctl_ops radio_ioctl_ops = {
};
static struct video_device em28xx_radio_template = {
- .name = "em28xx-radio",
- .fops = &radio_fops,
- .ioctl_ops = &radio_ioctl_ops,
+ .fops = &radio_fops,
+ .ioctl_ops = &radio_ioctl_ops,
+ .release = em28xx_videodevice_release,
};
-/******************************** usb interface ******************************/
+/* I2C possible address to saa7115, tvp5150, msp3400, tvaudio */
+static unsigned short saa711x_addrs[] = {
+ 0x4a >> 1, 0x48 >> 1, /* SAA7111, SAA7111A and SAA7113 */
+ 0x42 >> 1, 0x40 >> 1, /* SAA7114, SAA7115 and SAA7118 */
+ I2C_CLIENT_END };
+static unsigned short tvp5150_addrs[] = {
+ 0xb8 >> 1,
+ 0xba >> 1,
+ I2C_CLIENT_END
+};
+static unsigned short msp3400_addrs[] = {
+ 0x80 >> 1,
+ 0x88 >> 1,
+ I2C_CLIENT_END
+};
+
+/******************************** usb interface ******************************/
static struct video_device *em28xx_vdev_init(struct em28xx *dev,
const struct video_device *template,
@@ -1817,14 +2128,198 @@ static struct video_device *em28xx_vdev_init(struct em28xx *dev,
return vfd;
}
-int em28xx_register_analog_devices(struct em28xx *dev)
+static void em28xx_tuner_setup(struct em28xx *dev)
+{
+ struct tuner_setup tun_setup;
+ struct v4l2_frequency f;
+
+ if (dev->tuner_type == TUNER_ABSENT)
+ return;
+
+ memset(&tun_setup, 0, sizeof(tun_setup));
+
+ tun_setup.mode_mask = T_ANALOG_TV | T_RADIO;
+ tun_setup.tuner_callback = em28xx_tuner_callback;
+
+ if (dev->board.radio.type) {
+ tun_setup.type = dev->board.radio.type;
+ tun_setup.addr = dev->board.radio_addr;
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr, &tun_setup);
+ }
+
+ if ((dev->tuner_type != TUNER_ABSENT) && (dev->tuner_type)) {
+ tun_setup.type = dev->tuner_type;
+ tun_setup.addr = dev->tuner_addr;
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr, &tun_setup);
+ }
+
+ if (dev->tda9887_conf) {
+ struct v4l2_priv_tun_config tda9887_cfg;
+
+ tda9887_cfg.tuner = TUNER_TDA9887;
+ tda9887_cfg.priv = &dev->tda9887_conf;
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config, &tda9887_cfg);
+ }
+
+ if (dev->tuner_type == TUNER_XC2028) {
+ struct v4l2_priv_tun_config xc2028_cfg;
+ struct xc2028_ctrl ctl;
+
+ memset(&xc2028_cfg, 0, sizeof(xc2028_cfg));
+ memset(&ctl, 0, sizeof(ctl));
+
+ em28xx_setup_xc3028(dev, &ctl);
+
+ xc2028_cfg.tuner = TUNER_XC2028;
+ xc2028_cfg.priv = &ctl;
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config, &xc2028_cfg);
+ }
+
+ /* configure tuner */
+ f.tuner = 0;
+ f.type = V4L2_TUNER_ANALOG_TV;
+ f.frequency = 9076; /* just a magic number */
+ dev->ctl_freq = f.frequency;
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
+}
+
+static int em28xx_v4l2_init(struct em28xx *dev)
{
u8 val;
int ret;
unsigned int maxw;
+ struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler;
+
+ if (dev->is_audio_only) {
+ /* Shouldn't initialize IR for this interface */
+ return 0;
+ }
+
+ if (!dev->has_video) {
+ /* This device does not support the v4l2 extension */
+ return 0;
+ }
+
+ em28xx_info("Registering V4L2 extension\n");
+
+ mutex_lock(&dev->lock);
+
+ ret = v4l2_device_register(&dev->udev->dev, &dev->v4l2_dev);
+ if (ret < 0) {
+ em28xx_errdev("Call to v4l2_device_register() failed!\n");
+ goto err;
+ }
+
+ v4l2_ctrl_handler_init(hdl, 8);
+ dev->v4l2_dev.ctrl_handler = hdl;
+
+ /*
+ * Default format, used for tvp5150 or saa711x output formats
+ */
+ dev->vinmode = 0x10;
+ dev->vinctl = EM28XX_VINCTRL_INTERLACED |
+ EM28XX_VINCTRL_CCIR656_ENABLE;
+
+ /* request some modules */
+
+ if (dev->board.has_msp34xx)
+ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
+ "msp3400", 0, msp3400_addrs);
+
+ if (dev->board.decoder == EM28XX_SAA711X)
+ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
+ "saa7115_auto", 0, saa711x_addrs);
+
+ if (dev->board.decoder == EM28XX_TVP5150)
+ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
+ "tvp5150", 0, tvp5150_addrs);
+
+ if (dev->board.adecoder == EM28XX_TVAUDIO)
+ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
+ "tvaudio", dev->board.tvaudio_addr, NULL);
+
+ /* Initialize tuner and camera */
+
+ if (dev->board.tuner_type != TUNER_ABSENT) {
+ int has_demod = (dev->tda9887_conf & TDA9887_PRESENT);
+
+ if (dev->board.radio.type)
+ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
+ "tuner", dev->board.radio_addr, NULL);
+
+ if (has_demod)
+ v4l2_i2c_new_subdev(&dev->v4l2_dev,
+ &dev->i2c_adap[dev->def_i2c_bus], "tuner",
+ 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
+ if (dev->tuner_addr == 0) {
+ enum v4l2_i2c_tuner_type type =
+ has_demod ? ADDRS_TV_WITH_DEMOD : ADDRS_TV;
+ struct v4l2_subdev *sd;
+
+ sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
+ &dev->i2c_adap[dev->def_i2c_bus], "tuner",
+ 0, v4l2_i2c_tuner_addrs(type));
+
+ if (sd)
+ dev->tuner_addr = v4l2_i2c_subdev_addr(sd);
+ } else {
+ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
+ "tuner", dev->tuner_addr, NULL);
+ }
+ }
+
+ em28xx_tuner_setup(dev);
+ em28xx_init_camera(dev);
+
+ /* Configure audio */
+ ret = em28xx_audio_setup(dev);
+ if (ret < 0) {
+ em28xx_errdev("%s: Error while setting audio - error [%d]!\n",
+ __func__, ret);
+ goto unregister_dev;
+ }
+ if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
+ v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, 0, 0x1f, 1, 0x1f);
+ } else {
+ /* install the em28xx notify callback */
+ v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_MUTE),
+ em28xx_ctrl_notify, dev);
+ v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_VOLUME),
+ em28xx_ctrl_notify, dev);
+ }
+
+ /* wake i2c devices */
+ em28xx_wake_i2c(dev);
+
+ /* init video dma queues */
+ INIT_LIST_HEAD(&dev->vidq.active);
+ INIT_LIST_HEAD(&dev->vbiq.active);
- printk(KERN_INFO "%s: v4l2 driver version %s\n",
- dev->name, EM28XX_VERSION);
+ if (dev->board.has_msp34xx) {
+ /* Send a reset to other chips via gpio */
+ ret = em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xf7);
+ if (ret < 0) {
+ em28xx_errdev("%s: em28xx_write_reg - msp34xx(1) failed! error [%d]\n",
+ __func__, ret);
+ goto unregister_dev;
+ }
+ msleep(3);
+
+ ret = em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xff);
+ if (ret < 0) {
+ em28xx_errdev("%s: em28xx_write_reg - msp34xx(2) failed! error [%d]\n",
+ __func__, ret);
+ goto unregister_dev;
+ }
+ msleep(3);
+ }
/* set default norm */
dev->norm = V4L2_STD_PAL;
@@ -1888,14 +2383,16 @@ int em28xx_register_analog_devices(struct em28xx *dev)
/* Reset image controls */
em28xx_colorlevels_set_default(dev);
v4l2_ctrl_handler_setup(&dev->ctrl_handler);
- if (dev->ctrl_handler.error)
- return dev->ctrl_handler.error;
+ ret = dev->ctrl_handler.error;
+ if (ret)
+ goto unregister_dev;
/* allocate and fill video video_device struct */
dev->vdev = em28xx_vdev_init(dev, &em28xx_video_template, "video");
if (!dev->vdev) {
em28xx_errdev("cannot allocate video_device.\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto unregister_dev;
}
dev->vdev->queue = &dev->vb_vidq;
dev->vdev->queue->lock = &dev->vb_queue_lock;
@@ -1925,7 +2422,7 @@ int em28xx_register_analog_devices(struct em28xx *dev)
if (ret) {
em28xx_errdev("unable to register video device (error=%i).\n",
ret);
- return ret;
+ goto unregister_dev;
}
/* Allocate and fill vbi video_device struct */
@@ -1954,7 +2451,7 @@ int em28xx_register_analog_devices(struct em28xx *dev)
vbi_nr[dev->devno]);
if (ret < 0) {
em28xx_errdev("unable to register vbi device\n");
- return ret;
+ goto unregister_dev;
}
}
@@ -1963,13 +2460,14 @@ int em28xx_register_analog_devices(struct em28xx *dev)
"radio");
if (!dev->radio_dev) {
em28xx_errdev("cannot allocate video_device.\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto unregister_dev;
}
ret = video_register_device(dev->radio_dev, VFL_TYPE_RADIO,
radio_nr[dev->devno]);
if (ret < 0) {
em28xx_errdev("can't register radio device\n");
- return ret;
+ goto unregister_dev;
}
em28xx_info("Registered radio device as %s\n",
video_device_node_name(dev->radio_dev));
@@ -1982,5 +2480,41 @@ int em28xx_register_analog_devices(struct em28xx *dev)
em28xx_info("V4L2 VBI device registered as %s\n",
video_device_node_name(dev->vbi_dev));
+ /* Save some power by putting tuner to sleep */
+ v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_power, 0);
+
+ /* initialize videobuf2 stuff */
+ em28xx_vb2_setup(dev);
+
+ em28xx_info("V4L2 extension successfully initialized\n");
+
+ mutex_unlock(&dev->lock);
return 0;
+
+unregister_dev:
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ v4l2_device_unregister(&dev->v4l2_dev);
+err:
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static struct em28xx_ops v4l2_ops = {
+ .id = EM28XX_V4L2,
+ .name = "Em28xx v4l2 Extension",
+ .init = em28xx_v4l2_init,
+ .fini = em28xx_v4l2_fini,
+};
+
+static int __init em28xx_video_register(void)
+{
+ return em28xx_register_extension(&v4l2_ops);
+}
+
+static void __exit em28xx_video_unregister(void)
+{
+ em28xx_unregister_extension(&v4l2_ops);
}
+
+module_init(em28xx_video_register);
+module_exit(em28xx_video_unregister);
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index f8726ad5d0a8..32d8a4bb7961 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -26,6 +26,9 @@
#ifndef _EM28XX_H
#define _EM28XX_H
+#define EM28XX_VERSION "0.2.1"
+#define DRIVER_DESC "Empia em28xx device driver"
+
#include <linux/workqueue.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
@@ -132,6 +135,8 @@
#define EM2884_BOARD_C3TECH_DIGITAL_DUO 88
#define EM2874_BOARD_DELOCK_61959 89
#define EM2874_BOARD_KWORLD_UB435Q_V2 90
+#define EM2765_BOARD_SPEEDLINK_VAD_LAPLACE 91
+#define EM28178_BOARD_PCTV_461E 92
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
@@ -178,8 +183,27 @@
#define EM28XX_INTERLACED_DEFAULT 1
-/* time in msecs to wait for i2c writes to finish */
-#define EM2800_I2C_XFER_TIMEOUT 20
+/*
+ * Time in msecs to wait for i2c xfers to finish.
+ * 35ms is the maximum time a SMBUS device could wait when
+ * clock stretching is used. As the transfer itself will take
+ * some time to happen, set it to 35 ms.
+ *
+ * Ok, I2C doesn't specify any limit. So, eventually, we may need
+ * to increase this timeout.
+ *
+ * FIXME: this assumes that an I2C message is not longer than 1ms.
+ * This is actually dependent on the I2C bus speed, although most
+ * devices use a 100kHz clock. So, this assumtion is true most of
+ * the time.
+ */
+#define EM28XX_I2C_XFER_TIMEOUT 36
+
+/* time in msecs to wait for AC97 xfers to finish */
+#define EM28XX_AC97_XFER_TIMEOUT 100
+
+/* max. number of button state polling addresses */
+#define EM28XX_NUM_BUTTON_ADDRESSES_MAX 5
enum em28xx_mode {
EM28XX_SUSPEND,
@@ -287,8 +311,7 @@ struct em28xx_audio_mode {
unsigned int has_audio:1;
- unsigned int i2s_3rates:1;
- unsigned int i2s_5rates:1;
+ u8 i2s_samplerates;
};
/* em28xx has two audio inputs: tuner and line in.
@@ -374,6 +397,33 @@ enum em28xx_adecoder {
EM28XX_TVAUDIO,
};
+enum em28xx_led_role {
+ EM28XX_LED_ANALOG_CAPTURING = 0,
+ EM28XX_LED_ILLUMINATION,
+ EM28XX_NUM_LED_ROLES, /* must be the last */
+};
+
+struct em28xx_led {
+ enum em28xx_led_role role;
+ u8 gpio_reg;
+ u8 gpio_mask;
+ bool inverted;
+};
+
+enum em28xx_button_role {
+ EM28XX_BUTTON_SNAPSHOT = 0,
+ EM28XX_BUTTON_ILLUMINATION,
+ EM28XX_NUM_BUTTON_ROLES, /* must be the last */
+};
+
+struct em28xx_button {
+ enum em28xx_button_role role;
+ u8 reg_r;
+ u8 reg_clearing;
+ u8 mask;
+ bool inverted;
+};
+
struct em28xx_board {
char *name;
int vchannels;
@@ -395,7 +445,6 @@ struct em28xx_board {
unsigned int mts_firmware:1;
unsigned int max_range_640_480:1;
unsigned int has_dvb:1;
- unsigned int has_snapshot_button:1;
unsigned int is_webcam:1;
unsigned int valid:1;
unsigned int has_ir_i2c:1;
@@ -410,6 +459,12 @@ struct em28xx_board {
struct em28xx_input input[MAX_EM28XX_INPUT];
struct em28xx_input radio;
char *ir_codes;
+
+ /* LEDs that need to be controlled explicitly */
+ struct em28xx_led *leds;
+
+ /* Buttons */
+ struct em28xx_button *buttons;
};
struct em28xx_eeprom {
@@ -426,15 +481,13 @@ struct em28xx_eeprom {
u8 string_idx_table;
};
-#define EM28XX_AUDIO_BUFS 5
-#define EM28XX_NUM_AUDIO_PACKETS 64
-#define EM28XX_AUDIO_MAX_PACKET_SIZE 196 /* static value */
#define EM28XX_CAPTURE_STREAM_EN 1
/* em28xx extensions */
#define EM28XX_AUDIO 0x10
#define EM28XX_DVB 0x20
#define EM28XX_RC 0x30
+#define EM28XX_V4L2 0x40
/* em28xx resource types (used for res_get/res_lock etc */
#define EM28XX_RESOURCE_VIDEO 0x01
@@ -442,8 +495,9 @@ struct em28xx_eeprom {
struct em28xx_audio {
char name[50];
- char *transfer_buffer[EM28XX_AUDIO_BUFS];
- struct urb *urb[EM28XX_AUDIO_BUFS];
+ unsigned num_urb;
+ char **transfer_buffer;
+ struct urb **urb;
struct usb_device *udev;
unsigned int capture_transfer_done;
struct snd_pcm_substream *capture_pcm_substream;
@@ -451,6 +505,8 @@ struct em28xx_audio {
unsigned int hwptr_done_capture;
struct snd_card *sndcard;
+ size_t period;
+
int users;
spinlock_t slock;
};
@@ -485,11 +541,13 @@ struct em28xx {
int model; /* index in the device_data struct */
int devno; /* marks the number of this device */
enum em28xx_chip_id chip_id;
- unsigned int is_em25xx:1; /* em25xx/em276x/7x/8x family bridge */
+ unsigned int is_em25xx:1; /* em25xx/em276x/7x/8x family bridge */
unsigned char disconnected:1; /* device has been diconnected */
-
- int audio_ifnum;
+ unsigned int has_video:1;
+ unsigned int has_audio_class:1;
+ unsigned int has_alsa_audio:1;
+ unsigned int is_audio_only:1;
struct v4l2_device v4l2_dev;
struct v4l2_ctrl_handler ctrl_handler;
@@ -507,10 +565,6 @@ struct em28xx {
/* Vinmode/Vinctl used at the driver */
int vinmode, vinctl;
- unsigned int has_audio_class:1;
- unsigned int has_alsa_audio:1;
- unsigned int is_audio_only:1;
-
/* Controls audio streaming */
struct work_struct wq_trigger; /* Trigger to start/stop audio for alsa module */
atomic_t stream_started; /* stream should be running if true */
@@ -608,6 +662,7 @@ struct em28xx {
/* usb transfer */
struct usb_device *udev; /* the usb device */
+ u8 ifnum; /* number of the assigned usb interface */
u8 analog_ep_isoc; /* address of isoc endpoint for analog */
u8 analog_ep_bulk; /* address of bulk endpoint for analog */
u8 dvb_ep_isoc; /* address of isoc endpoint for DVB */
@@ -639,10 +694,15 @@ struct em28xx {
enum em28xx_mode mode;
- /* Snapshot button */
+ /* Button state polling */
+ struct delayed_work buttons_query_work;
+ u8 button_polling_addresses[EM28XX_NUM_BUTTON_ADDRESSES_MAX];
+ u8 button_polling_last_values[EM28XX_NUM_BUTTON_ADDRESSES_MAX];
+ u8 num_button_polling_addresses;
+ u16 button_polling_interval; /* [ms] */
+ /* Snapshot button input device */
char snapshot_button_path[30]; /* path of the input dev */
struct input_dev *sbutton_input_dev;
- struct delayed_work sbutton_query_work;
struct em28xx_dvb *dvb;
};
@@ -672,6 +732,7 @@ int em28xx_write_regs(struct em28xx *dev, u16 reg, char *buf, int len);
int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val);
int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
u8 bitmask);
+int em28xx_toggle_reg_bits(struct em28xx *dev, u16 reg, u8 bitmask);
int em28xx_read_ac97(struct em28xx *dev, u8 reg);
int em28xx_write_ac97(struct em28xx *dev, u8 reg, u16 val);
@@ -679,12 +740,9 @@ int em28xx_write_ac97(struct em28xx *dev, u8 reg, u16 val);
int em28xx_audio_analog_set(struct em28xx *dev);
int em28xx_audio_setup(struct em28xx *dev);
-int em28xx_colorlevels_set_default(struct em28xx *dev);
+const struct em28xx_led *em28xx_find_led(struct em28xx *dev,
+ enum em28xx_led_role role);
int em28xx_capture_start(struct em28xx *dev, int start);
-int em28xx_vbi_supported(struct em28xx *dev);
-int em28xx_set_outfmt(struct em28xx *dev);
-int em28xx_resolution_set(struct em28xx *dev);
-int em28xx_set_alternate(struct em28xx *dev);
int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
int num_bufs, int max_pkt_size, int packet_multiplier);
int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
@@ -696,30 +754,18 @@ void em28xx_uninit_usb_xfer(struct em28xx *dev, enum em28xx_mode mode);
void em28xx_stop_urbs(struct em28xx *dev);
int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode);
int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio);
-void em28xx_wake_i2c(struct em28xx *dev);
int em28xx_register_extension(struct em28xx_ops *dev);
void em28xx_unregister_extension(struct em28xx_ops *dev);
void em28xx_init_extension(struct em28xx *dev);
void em28xx_close_extension(struct em28xx *dev);
-/* Provided by em28xx-video.c */
-int em28xx_vb2_setup(struct em28xx *dev);
-int em28xx_register_analog_devices(struct em28xx *dev);
-void em28xx_release_analog_resources(struct em28xx *dev);
-void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv);
-int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count);
-int em28xx_stop_vbi_streaming(struct vb2_queue *vq);
-extern const struct v4l2_ctrl_ops em28xx_ctrl_ops;
-
/* Provided by em28xx-cards.c */
extern struct em28xx_board em28xx_boards[];
extern struct usb_device_id em28xx_id_table[];
int em28xx_tuner_callback(void *ptr, int component, int command, int arg);
+void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl);
void em28xx_release_resources(struct em28xx *dev);
-/* Provided by em28xx-vbi.c */
-extern struct vb2_ops em28xx_vbi_qops;
-
/* Provided by em28xx-camera.c */
int em28xx_detect_sensor(struct em28xx *dev);
int em28xx_init_camera(struct em28xx *dev);
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 2f0c89cbac76..c5638964c3f2 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -198,7 +198,6 @@ static int device_authorization(struct hdpvr_device *dev)
hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0);
v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, " response: %s\n",
print_buf);
- kfree(print_buf);
#endif
msleep(100);
@@ -214,6 +213,9 @@ static int device_authorization(struct hdpvr_device *dev)
retval = ret != 8;
unlock:
mutex_unlock(&dev->usbc_mutex);
+#ifdef HDPVR_DEBUG
+ kfree(print_buf);
+#endif
return retval;
}
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 78c9bc8e7f56..abf365ab025d 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -1078,7 +1078,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
/* register webcam snapshot button input device */
pdev->button_dev = input_allocate_device();
if (!pdev->button_dev) {
- PWC_ERROR("Err, insufficient memory for webcam snapshot button device.");
rc = -ENOMEM;
goto err_video_unreg;
}
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 8c05565a240e..2189bfb2e828 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -83,14 +83,3 @@ config VIDEOBUF2_DMA_SG
#depends on HAS_DMA
select VIDEOBUF2_CORE
select VIDEOBUF2_MEMOPS
-
-config VIDEO_V4L2_INT_DEVICE
- tristate "V4L2 int device (DEPRECATED)"
- depends on VIDEO_V4L2
- ---help---
- An early framework for a hardware-independent interface for
- image sensors and bridges etc. Currently used by omap24xxcam and
- tcm825x drivers that should be converted to V4L2 subdev.
-
- Do not use for new developments.
-
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index 1a85eee581f8..c6ae7bad951e 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -15,7 +15,6 @@ ifeq ($(CONFIG_OF),y)
endif
obj-$(CONFIG_VIDEO_V4L2) += videodev.o
-obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o
obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o
obj-$(CONFIG_VIDEO_V4L2) += v4l2-dv-timings.o
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index fb46790d0eca..6ff002bd5909 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -745,6 +745,11 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS: return "VPX Deblocking Effect Control";
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD: return "VPX Golden Frame Refresh Period";
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: return "VPX Golden Frame Indicator";
+ case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP: return "VPX Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: return "VPX Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: return "VPX I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_PROFILE: return "VPX Profile";
/* CAMERA controls */
/* Keep the order of the 'case's the same as in videodev2.h! */
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index b5aaaac427ad..0a30dbf3d05c 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -872,8 +872,8 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
/* Should not happen since we thought this minor was free */
WARN_ON(video_device[vdev->minor] != NULL);
- video_device[vdev->minor] = vdev;
vdev->index = get_index(vdev);
+ video_device[vdev->minor] = vdev;
mutex_unlock(&videodev_lock);
if (vdev->ioctl_ops)
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index ee52b9f4a944..f7902fe8a526 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -515,6 +515,7 @@ bool v4l2_detect_gtf(unsigned frame_height,
aspect.denominator = 9;
}
image_width = ((image_height * aspect.numerator) / aspect.denominator);
+ image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1);
/* Horizontal */
if (default_gtf)
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 68e6b5e912ff..707aef705a47 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -28,6 +28,9 @@
#include <media/v4l2-device.h>
#include <media/videobuf2-core.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/v4l2.h>
+
/* Zero out the end of the struct pointed to by p. Everything after, but
* not including, the specified field is cleared. */
#define CLEAR_AFTER_FIELD(p, field) \
@@ -2338,6 +2341,12 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
err = func(file, cmd, parg);
if (err == -ENOIOCTLCMD)
err = -ENOTTY;
+ if (err == 0) {
+ if (cmd == VIDIOC_DQBUF)
+ trace_v4l2_dqbuf(video_devdata(file)->minor, parg);
+ else if (cmd == VIDIOC_QBUF)
+ trace_v4l2_qbuf(video_devdata(file)->minor, parg);
+ }
if (has_array_args) {
*kernel_ptr = user_ptr;
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 73035ee0f4de..178ce96556c6 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -558,6 +558,8 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
if (m2m_ctx->m2m_dev->m2m_ops->unlock)
m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
+ else if (m2m_ctx->q_lock)
+ mutex_unlock(m2m_ctx->q_lock);
if (list_empty(&src_q->done_list))
poll_wait(file, &src_q->done_wq, wait);
@@ -566,6 +568,8 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
if (m2m_ctx->m2m_dev->m2m_ops->lock)
m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
+ else if (m2m_ctx->q_lock)
+ mutex_lock(m2m_ctx->q_lock);
spin_lock_irqsave(&src_q->done_lock, flags);
if (!list_empty(&src_q->done_list))
@@ -693,6 +697,13 @@ struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
if (ret)
goto err;
+ /*
+ * If both queues use same mutex assign it as the common buffer
+ * queues lock to the m2m context. This lock is used in the
+ * v4l2_m2m_ioctl_* helpers.
+ */
+ if (out_q_ctx->q.lock == cap_q_ctx->q.lock)
+ m2m_ctx->q_lock = out_q_ctx->q.lock;
return m2m_ctx;
err:
@@ -740,3 +751,118 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
+/* Videobuf2 ioctl helpers */
+
+int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *rb)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
+
+int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
+
+int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
+
+int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
+
+int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
+
+int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
+
+int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
+
+int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
+
+/*
+ * v4l2_file_operations helpers. It is assumed here same lock is used
+ * for the output and the capture buffer queue.
+ */
+
+int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
+ int ret;
+
+ if (m2m_ctx->q_lock && mutex_lock_interruptible(m2m_ctx->q_lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_m2m_mmap(file, m2m_ctx, vma);
+
+ if (m2m_ctx->q_lock)
+ mutex_unlock(m2m_ctx->q_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
+
+unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
+ unsigned int ret;
+
+ if (m2m_ctx->q_lock)
+ mutex_lock(m2m_ctx->q_lock);
+
+ ret = v4l2_m2m_poll(file, m2m_ctx, wait);
+
+ if (m2m_ctx->q_lock)
+ mutex_unlock(m2m_ctx->q_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
+
diff --git a/drivers/media/v4l2-core/v4l2-of.c b/drivers/media/v4l2-core/v4l2-of.c
index a6478dca0cde..42e3e8a5e361 100644
--- a/drivers/media/v4l2-core/v4l2-of.c
+++ b/drivers/media/v4l2-core/v4l2-of.c
@@ -121,9 +121,11 @@ static void v4l2_of_parse_parallel_bus(const struct device_node *node,
* the bus as serial CSI-2 and clock-noncontinuous isn't set, we set the
* V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag.
* The caller should hold a reference to @node.
+ *
+ * Return: 0.
*/
-void v4l2_of_parse_endpoint(const struct device_node *node,
- struct v4l2_of_endpoint *endpoint)
+int v4l2_of_parse_endpoint(const struct device_node *node,
+ struct v4l2_of_endpoint *endpoint)
{
struct device_node *port_node = of_get_parent(node);
@@ -146,6 +148,8 @@ void v4l2_of_parse_endpoint(const struct device_node *node,
v4l2_of_parse_parallel_bus(node, endpoint);
of_node_put(port_node);
+
+ return 0;
}
EXPORT_SYMBOL(v4l2_of_parse_endpoint);
@@ -262,6 +266,6 @@ struct device_node *v4l2_of_get_remote_port(const struct device_node *node)
np = of_parse_phandle(node, "remote-endpoint", 0);
if (!np)
return NULL;
- return of_get_parent(np);
+ return of_get_next_parent(np);
}
EXPORT_SYMBOL(v4l2_of_get_remote_port);
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index 65411adcd0ea..7e6b209b7002 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -66,14 +66,11 @@ static void __videobuf_dc_free(struct device *dev,
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
- dev_dbg(q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
+ dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
map->count++;
- videobuf_queue_unlock(q);
}
static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -85,11 +82,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
- if (!--map->count) {
+ map->count--;
+ if (0 == map->count) {
struct videobuf_dma_contig_memory *mem;
dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
@@ -128,8 +126,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
kfree(map);
+ videobuf_queue_unlock(q);
}
- videobuf_queue_unlock(q);
}
static const struct vm_operations_struct videobuf_vm_ops = {
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 9db674ccdc68..828e7c10bd70 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -338,14 +338,11 @@ EXPORT_SYMBOL_GPL(videobuf_dma_free);
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
map->count++;
- videobuf_queue_unlock(q);
}
static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -358,9 +355,10 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
- if (!--map->count) {
+ map->count--;
+ if (0 == map->count) {
dprintk(1, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
@@ -376,9 +374,9 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
q->bufs[i]->baddr = 0;
q->ops->buf_release(q, q->bufs[i]);
}
+ videobuf_queue_unlock(q);
kfree(map);
}
- videobuf_queue_unlock(q);
return;
}
diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
index 1365c651c177..2ff7fcc77b11 100644
--- a/drivers/media/v4l2-core/videobuf-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf-vmalloc.c
@@ -54,14 +54,11 @@ MODULE_LICENSE("GPL");
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
map->count++;
- videobuf_queue_unlock(q);
}
static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -73,11 +70,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
- if (!--map->count) {
+ map->count--;
+ if (0 == map->count) {
struct videobuf_vmalloc_memory *mem;
dprintk(1, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
@@ -116,8 +114,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
kfree(map);
+ videobuf_queue_unlock(q);
}
- videobuf_queue_unlock(q);
return;
}
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 0edc165f418d..a127925c9d61 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -298,10 +298,28 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
* related information, if no buffers are left return the queue to an
* uninitialized state. Might be called even if the queue has already been freed.
*/
-static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
+static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
{
unsigned int buffer;
+ /*
+ * Sanity check: when preparing a buffer the queue lock is released for
+ * a short while (see __buf_prepare for the details), which would allow
+ * a race with a reqbufs which can call this function. Removing the
+ * buffers from underneath __buf_prepare is obviously a bad idea, so we
+ * check if any of the buffers is in the state PREPARING, and if so we
+ * just return -EAGAIN.
+ */
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
+ if (q->bufs[buffer] == NULL)
+ continue;
+ if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
+ dprintk(1, "reqbufs: preparing buffers, cannot free\n");
+ return -EAGAIN;
+ }
+ }
+
/* Call driver-provided cleanup function for each buffer, if provided */
if (q->ops->buf_cleanup) {
for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
@@ -326,6 +344,7 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
if (!q->num_buffers)
q->memory = 0;
INIT_LIST_HEAD(&q->queued_list);
+ return 0;
}
/**
@@ -481,6 +500,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
case VB2_BUF_STATE_PREPARED:
b->flags |= V4L2_BUF_FLAG_PREPARED;
break;
+ case VB2_BUF_STATE_PREPARING:
case VB2_BUF_STATE_DEQUEUED:
/* nothing */
break;
@@ -657,7 +677,9 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
return -EBUSY;
}
- __vb2_queue_free(q, q->num_buffers);
+ ret = __vb2_queue_free(q, q->num_buffers);
+ if (ret)
+ return ret;
/*
* In case of REQBUFS(0) return immediately without calling
@@ -1116,7 +1138,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
int ret;
int write = !V4L2_TYPE_IS_OUTPUT(q->type);
- /* Verify and copy relevant information provided by the userspace */
+ /* Copy relevant information provided by the userspace */
__fill_vb2_buffer(vb, b, planes);
for (plane = 0; plane < vb->num_planes; ++plane) {
@@ -1135,6 +1157,8 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
if (planes[plane].length < planes[plane].data_offset +
q->plane_sizes[plane]) {
+ dprintk(1, "qbuf: invalid dmabuf length for plane %d\n",
+ plane);
ret = -EINVAL;
goto err;
}
@@ -1226,6 +1250,7 @@ static void __enqueue_in_driver(struct vb2_buffer *vb)
static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
{
struct vb2_queue *q = vb->vb2_queue;
+ struct rw_semaphore *mmap_sem;
int ret;
ret = __verify_length(vb, b);
@@ -1235,12 +1260,32 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
return ret;
}
+ vb->state = VB2_BUF_STATE_PREPARING;
switch (q->memory) {
case V4L2_MEMORY_MMAP:
ret = __qbuf_mmap(vb, b);
break;
case V4L2_MEMORY_USERPTR:
+ /*
+ * In case of user pointer buffers vb2 allocators need to get
+ * direct access to userspace pages. This requires getting
+ * the mmap semaphore for read access in the current process
+ * structure. The same semaphore is taken before calling mmap
+ * operation, while both qbuf/prepare_buf and mmap are called
+ * by the driver or v4l2 core with the driver's lock held.
+ * To avoid an AB-BA deadlock (mmap_sem then driver's lock in
+ * mmap and driver's lock then mmap_sem in qbuf/prepare_buf),
+ * the videobuf2 core releases the driver's lock, takes
+ * mmap_sem and then takes the driver's lock again.
+ */
+ mmap_sem = &current->mm->mmap_sem;
+ call_qop(q, wait_prepare, q);
+ down_read(mmap_sem);
+ call_qop(q, wait_finish, q);
+
ret = __qbuf_userptr(vb, b);
+
+ up_read(mmap_sem);
break;
case V4L2_MEMORY_DMABUF:
ret = __qbuf_dmabuf(vb, b);
@@ -1254,105 +1299,36 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
ret = call_qop(q, buf_prepare, vb);
if (ret)
dprintk(1, "qbuf: buffer preparation failed: %d\n", ret);
- else
- vb->state = VB2_BUF_STATE_PREPARED;
+ vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
return ret;
}
static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
- const char *opname,
- int (*handler)(struct vb2_queue *,
- struct v4l2_buffer *,
- struct vb2_buffer *))
+ const char *opname)
{
- struct rw_semaphore *mmap_sem = NULL;
- struct vb2_buffer *vb;
- int ret;
-
- /*
- * In case of user pointer buffers vb2 allocators need to get direct
- * access to userspace pages. This requires getting the mmap semaphore
- * for read access in the current process structure. The same semaphore
- * is taken before calling mmap operation, while both qbuf/prepare_buf
- * and mmap are called by the driver or v4l2 core with the driver's lock
- * held. To avoid an AB-BA deadlock (mmap_sem then driver's lock in mmap
- * and driver's lock then mmap_sem in qbuf/prepare_buf) the videobuf2
- * core releases the driver's lock, takes mmap_sem and then takes the
- * driver's lock again.
- *
- * To avoid racing with other vb2 calls, which might be called after
- * releasing the driver's lock, this operation is performed at the
- * beginning of qbuf/prepare_buf processing. This way the queue status
- * is consistent after getting the driver's lock back.
- */
- if (q->memory == V4L2_MEMORY_USERPTR) {
- mmap_sem = &current->mm->mmap_sem;
- call_qop(q, wait_prepare, q);
- down_read(mmap_sem);
- call_qop(q, wait_finish, q);
- }
-
- if (q->fileio) {
- dprintk(1, "%s(): file io in progress\n", opname);
- ret = -EBUSY;
- goto unlock;
- }
-
if (b->type != q->type) {
dprintk(1, "%s(): invalid buffer type\n", opname);
- ret = -EINVAL;
- goto unlock;
+ return -EINVAL;
}
if (b->index >= q->num_buffers) {
dprintk(1, "%s(): buffer index out of range\n", opname);
- ret = -EINVAL;
- goto unlock;
+ return -EINVAL;
}
- vb = q->bufs[b->index];
- if (NULL == vb) {
+ if (q->bufs[b->index] == NULL) {
/* Should never happen */
dprintk(1, "%s(): buffer is NULL\n", opname);
- ret = -EINVAL;
- goto unlock;
+ return -EINVAL;
}
if (b->memory != q->memory) {
dprintk(1, "%s(): invalid memory type\n", opname);
- ret = -EINVAL;
- goto unlock;
- }
-
- ret = __verify_planes_array(vb, b);
- if (ret)
- goto unlock;
-
- ret = handler(q, b, vb);
- if (ret)
- goto unlock;
-
- /* Fill buffer information for the userspace */
- __fill_v4l2_buffer(vb, b);
-
- dprintk(1, "%s() of buffer %d succeeded\n", opname, vb->v4l2_buf.index);
-unlock:
- if (mmap_sem)
- up_read(mmap_sem);
- return ret;
-}
-
-static int __vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
- struct vb2_buffer *vb)
-{
- if (vb->state != VB2_BUF_STATE_DEQUEUED) {
- dprintk(1, "%s(): invalid buffer state %d\n", __func__,
- vb->state);
return -EINVAL;
}
- return __buf_prepare(vb, b);
+ return __verify_planes_array(q->bufs[b->index], b);
}
/**
@@ -1372,22 +1348,95 @@ static int __vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
*/
int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
{
- return vb2_queue_or_prepare_buf(q, b, "prepare_buf", __vb2_prepare_buf);
+ struct vb2_buffer *vb;
+ int ret;
+
+ if (q->fileio) {
+ dprintk(1, "%s(): file io in progress\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
+ if (ret)
+ return ret;
+
+ vb = q->bufs[b->index];
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ dprintk(1, "%s(): invalid buffer state %d\n", __func__,
+ vb->state);
+ return -EINVAL;
+ }
+
+ ret = __buf_prepare(vb, b);
+ if (!ret) {
+ /* Fill buffer information for the userspace */
+ __fill_v4l2_buffer(vb, b);
+
+ dprintk(1, "%s() of buffer %d succeeded\n", __func__, vb->v4l2_buf.index);
+ }
+ return ret;
}
EXPORT_SYMBOL_GPL(vb2_prepare_buf);
-static int __vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b,
- struct vb2_buffer *vb)
+/**
+ * vb2_start_streaming() - Attempt to start streaming.
+ * @q: videobuf2 queue
+ *
+ * If there are not enough buffers, then retry_start_streaming is set to
+ * 1 and 0 is returned. The next time a buffer is queued and
+ * retry_start_streaming is 1, this function will be called again to
+ * retry starting the DMA engine.
+ */
+static int vb2_start_streaming(struct vb2_queue *q)
{
int ret;
+ /* Tell the driver to start streaming */
+ ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count));
+
+ /*
+ * If there are not enough buffers queued to start streaming, then
+ * the start_streaming operation will return -ENOBUFS and you have to
+ * retry when the next buffer is queued.
+ */
+ if (ret == -ENOBUFS) {
+ dprintk(1, "qbuf: not enough buffers, retry when more buffers are queued.\n");
+ q->retry_start_streaming = 1;
+ return 0;
+ }
+ if (ret)
+ dprintk(1, "qbuf: driver refused to start streaming\n");
+ else
+ q->retry_start_streaming = 0;
+ return ret;
+}
+
+static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ int ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
+ struct vb2_buffer *vb;
+
+ if (ret)
+ return ret;
+
+ vb = q->bufs[b->index];
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ dprintk(1, "%s(): invalid buffer state %d\n", __func__,
+ vb->state);
+ return -EINVAL;
+ }
+
switch (vb->state) {
case VB2_BUF_STATE_DEQUEUED:
ret = __buf_prepare(vb, b);
if (ret)
return ret;
+ break;
case VB2_BUF_STATE_PREPARED:
break;
+ case VB2_BUF_STATE_PREPARING:
+ dprintk(1, "qbuf: buffer still being prepared\n");
+ return -EINVAL;
default:
dprintk(1, "qbuf: buffer already in use\n");
return -EINVAL;
@@ -1407,6 +1456,16 @@ static int __vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b,
if (q->streaming)
__enqueue_in_driver(vb);
+ /* Fill buffer information for the userspace */
+ __fill_v4l2_buffer(vb, b);
+
+ if (q->retry_start_streaming) {
+ ret = vb2_start_streaming(q);
+ if (ret)
+ return ret;
+ }
+
+ dprintk(1, "%s() of buffer %d succeeded\n", __func__, vb->v4l2_buf.index);
return 0;
}
@@ -1429,7 +1488,12 @@ static int __vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b,
*/
int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
{
- return vb2_queue_or_prepare_buf(q, b, "qbuf", __vb2_qbuf);
+ if (q->fileio) {
+ dprintk(1, "%s(): file io in progress\n", __func__);
+ return -EBUSY;
+ }
+
+ return vb2_internal_qbuf(q, b);
}
EXPORT_SYMBOL_GPL(vb2_qbuf);
@@ -1550,7 +1614,8 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q)
return -EINVAL;
}
- wait_event(q->done_wq, !atomic_read(&q->queued_count));
+ if (!q->retry_start_streaming)
+ wait_event(q->done_wq, !atomic_read(&q->queued_count));
return 0;
}
EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
@@ -1579,37 +1644,11 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
}
}
-/**
- * vb2_dqbuf() - Dequeue a buffer to the userspace
- * @q: videobuf2 queue
- * @b: buffer structure passed from userspace to vidioc_dqbuf handler
- * in driver
- * @nonblocking: if true, this call will not sleep waiting for a buffer if no
- * buffers ready for dequeuing are present. Normally the driver
- * would be passing (file->f_flags & O_NONBLOCK) here
- *
- * Should be called from vidioc_dqbuf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) calls buf_finish callback in the driver (if provided), in which
- * driver can perform any additional operations that may be required before
- * returning the buffer to userspace, such as cache sync,
- * 3) the buffer struct members are filled with relevant information for
- * the userspace.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_dqbuf handler in driver.
- */
-int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
+static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
{
struct vb2_buffer *vb = NULL;
int ret;
- if (q->fileio) {
- dprintk(1, "dqbuf: file io in progress\n");
- return -EBUSY;
- }
-
if (b->type != q->type) {
dprintk(1, "dqbuf: invalid buffer type\n");
return -EINVAL;
@@ -1648,6 +1687,36 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
return 0;
}
+
+/**
+ * vb2_dqbuf() - Dequeue a buffer to the userspace
+ * @q: videobuf2 queue
+ * @b: buffer structure passed from userspace to vidioc_dqbuf handler
+ * in driver
+ * @nonblocking: if true, this call will not sleep waiting for a buffer if no
+ * buffers ready for dequeuing are present. Normally the driver
+ * would be passing (file->f_flags & O_NONBLOCK) here
+ *
+ * Should be called from vidioc_dqbuf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) calls buf_finish callback in the driver (if provided), in which
+ * driver can perform any additional operations that may be required before
+ * returning the buffer to userspace, such as cache sync,
+ * 3) the buffer struct members are filled with relevant information for
+ * the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_dqbuf handler in driver.
+ */
+int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
+{
+ if (q->fileio) {
+ dprintk(1, "dqbuf: file io in progress\n");
+ return -EBUSY;
+ }
+ return vb2_internal_dqbuf(q, b, nonblocking);
+}
EXPORT_SYMBOL_GPL(vb2_dqbuf);
/**
@@ -1660,6 +1729,11 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
{
unsigned int i;
+ if (q->retry_start_streaming) {
+ q->retry_start_streaming = 0;
+ q->streaming = 0;
+ }
+
/*
* Tell driver to stop all transactions and release all queued
* buffers.
@@ -1687,37 +1761,24 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
__vb2_dqbuf(q->bufs[i]);
}
-/**
- * vb2_streamon - start streaming
- * @q: videobuf2 queue
- * @type: type argument passed from userspace to vidioc_streamon handler
- *
- * Should be called from vidioc_streamon handler of a driver.
- * This function:
- * 1) verifies current state
- * 2) passes any previously queued buffers to the driver and starts streaming
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_streamon handler in the driver.
- */
-int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
+static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
{
struct vb2_buffer *vb;
int ret;
- if (q->fileio) {
- dprintk(1, "streamon: file io in progress\n");
- return -EBUSY;
- }
-
if (type != q->type) {
dprintk(1, "streamon: invalid stream type\n");
return -EINVAL;
}
if (q->streaming) {
- dprintk(1, "streamon: already streaming\n");
- return -EBUSY;
+ dprintk(3, "streamon successful: already streaming\n");
+ return 0;
+ }
+
+ if (!q->num_buffers) {
+ dprintk(1, "streamon: no buffers have been allocated\n");
+ return -EINVAL;
}
/*
@@ -1727,12 +1788,9 @@ int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
list_for_each_entry(vb, &q->queued_list, queued_entry)
__enqueue_in_driver(vb);
- /*
- * Let driver notice that streaming state has been enabled.
- */
- ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count));
+ /* Tell driver to start streaming. */
+ ret = vb2_start_streaming(q);
if (ret) {
- dprintk(1, "streamon: driver refused to start streaming\n");
__vb2_queue_cancel(q);
return ret;
}
@@ -1742,39 +1800,40 @@ int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
dprintk(3, "Streamon successful\n");
return 0;
}
-EXPORT_SYMBOL_GPL(vb2_streamon);
-
/**
- * vb2_streamoff - stop streaming
+ * vb2_streamon - start streaming
* @q: videobuf2 queue
- * @type: type argument passed from userspace to vidioc_streamoff handler
+ * @type: type argument passed from userspace to vidioc_streamon handler
*
- * Should be called from vidioc_streamoff handler of a driver.
+ * Should be called from vidioc_streamon handler of a driver.
* This function:
- * 1) verifies current state,
- * 2) stop streaming and dequeues any queued buffers, including those previously
- * passed to the driver (after waiting for the driver to finish).
+ * 1) verifies current state
+ * 2) passes any previously queued buffers to the driver and starts streaming
*
- * This call can be used for pausing playback.
* The return values from this function are intended to be directly returned
- * from vidioc_streamoff handler in the driver
+ * from vidioc_streamon handler in the driver.
*/
-int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
+int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
{
if (q->fileio) {
- dprintk(1, "streamoff: file io in progress\n");
+ dprintk(1, "streamon: file io in progress\n");
return -EBUSY;
}
+ return vb2_internal_streamon(q, type);
+}
+EXPORT_SYMBOL_GPL(vb2_streamon);
+static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
+{
if (type != q->type) {
dprintk(1, "streamoff: invalid stream type\n");
return -EINVAL;
}
if (!q->streaming) {
- dprintk(1, "streamoff: not streaming\n");
- return -EINVAL;
+ dprintk(3, "streamoff successful: not streaming\n");
+ return 0;
}
/*
@@ -1786,6 +1845,30 @@ int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
dprintk(3, "Streamoff successful\n");
return 0;
}
+
+/**
+ * vb2_streamoff - stop streaming
+ * @q: videobuf2 queue
+ * @type: type argument passed from userspace to vidioc_streamoff handler
+ *
+ * Should be called from vidioc_streamoff handler of a driver.
+ * This function:
+ * 1) verifies current state,
+ * 2) stop streaming and dequeues any queued buffers, including those previously
+ * passed to the driver (after waiting for the driver to finish).
+ *
+ * This call can be used for pausing playback.
+ * The return values from this function are intended to be directly returned
+ * from vidioc_streamoff handler in the driver
+ */
+int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+ if (q->fileio) {
+ dprintk(1, "streamoff: file io in progress\n");
+ return -EBUSY;
+ }
+ return vb2_internal_streamoff(q, type);
+}
EXPORT_SYMBOL_GPL(vb2_streamoff);
/**
@@ -2277,15 +2360,16 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
goto err_reqbufs;
fileio->bufs[i].queued = 1;
}
-
- /*
- * Start streaming.
- */
- ret = vb2_streamon(q, q->type);
- if (ret)
- goto err_reqbufs;
+ fileio->index = q->num_buffers;
}
+ /*
+ * Start streaming.
+ */
+ ret = vb2_streamon(q, q->type);
+ if (ret)
+ goto err_reqbufs;
+
q->fileio = fileio;
return ret;
@@ -2308,13 +2392,8 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q)
struct vb2_fileio_data *fileio = q->fileio;
if (fileio) {
- /*
- * Hack fileio context to enable direct calls to vb2 ioctl
- * interface.
- */
+ vb2_internal_streamoff(q, q->type);
q->fileio = NULL;
-
- vb2_streamoff(q, q->type);
fileio->req.count = 0;
vb2_reqbufs(q, &fileio->req);
kfree(fileio);
@@ -2358,39 +2437,34 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
fileio = q->fileio;
/*
- * Hack fileio context to enable direct calls to vb2 ioctl interface.
- * The pointer will be restored before returning from this function.
- */
- q->fileio = NULL;
-
- index = fileio->index;
- buf = &fileio->bufs[index];
-
- /*
* Check if we need to dequeue the buffer.
*/
- if (buf->queued) {
- struct vb2_buffer *vb;
-
+ index = fileio->index;
+ if (index >= q->num_buffers) {
/*
* Call vb2_dqbuf to get buffer back.
*/
memset(&fileio->b, 0, sizeof(fileio->b));
fileio->b.type = q->type;
fileio->b.memory = q->memory;
- fileio->b.index = index;
- ret = vb2_dqbuf(q, &fileio->b, nonblock);
+ ret = vb2_internal_dqbuf(q, &fileio->b, nonblock);
dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
if (ret)
- goto end;
+ return ret;
fileio->dq_count += 1;
+ index = fileio->b.index;
+ buf = &fileio->bufs[index];
+
/*
* Get number of bytes filled by the driver
*/
- vb = q->bufs[index];
- buf->size = vb2_get_plane_payload(vb, 0);
+ buf->pos = 0;
buf->queued = 0;
+ buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
+ : vb2_plane_size(q->bufs[index], 0);
+ } else {
+ buf = &fileio->bufs[index];
}
/*
@@ -2412,8 +2486,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
ret = copy_from_user(buf->vaddr + buf->pos, data, count);
if (ret) {
dprintk(3, "file io: error copying data\n");
- ret = -EFAULT;
- goto end;
+ return -EFAULT;
}
/*
@@ -2433,10 +2506,6 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
fileio->dq_count == 1) {
dprintk(3, "file io: read limit reached\n");
- /*
- * Restore fileio pointer and release the context.
- */
- q->fileio = fileio;
return __vb2_cleanup_fileio(q);
}
@@ -2448,32 +2517,20 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
fileio->b.memory = q->memory;
fileio->b.index = index;
fileio->b.bytesused = buf->pos;
- ret = vb2_qbuf(q, &fileio->b);
+ ret = vb2_internal_qbuf(q, &fileio->b);
dprintk(5, "file io: vb2_dbuf result: %d\n", ret);
if (ret)
- goto end;
+ return ret;
/*
* Buffer has been queued, update the status
*/
buf->pos = 0;
buf->queued = 1;
- buf->size = q->bufs[0]->v4l2_planes[0].length;
+ buf->size = vb2_plane_size(q->bufs[index], 0);
fileio->q_count += 1;
-
- /*
- * Switch to the next buffer
- */
- fileio->index = (index + 1) % q->num_buffers;
-
- /*
- * Start streaming if required.
- */
- if (!read && !q->streaming) {
- ret = vb2_streamon(q, q->type);
- if (ret)
- goto end;
- }
+ if (fileio->index < q->num_buffers)
+ fileio->index++;
}
/*
@@ -2481,11 +2538,6 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
*/
if (ret == 0)
ret = count;
-end:
- /*
- * Restore the fileio context and block vb2 ioctl interface.
- */
- q->fileio = fileio;
return ret;
}
@@ -2649,16 +2701,29 @@ int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
}
EXPORT_SYMBOL_GPL(vb2_fop_mmap);
-int vb2_fop_release(struct file *file)
+int _vb2_fop_release(struct file *file, struct mutex *lock)
{
struct video_device *vdev = video_devdata(file);
if (file->private_data == vdev->queue->owner) {
+ if (lock)
+ mutex_lock(lock);
vb2_queue_release(vdev->queue);
vdev->queue->owner = NULL;
+ if (lock)
+ mutex_unlock(lock);
}
return v4l2_fh_release(file);
}
+EXPORT_SYMBOL_GPL(_vb2_fop_release);
+
+int vb2_fop_release(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+
+ return _vb2_fop_release(file, lock);
+}
EXPORT_SYMBOL_GPL(vb2_fop_release);
ssize_t vb2_fop_write(struct file *file, const char __user *buf,
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 0d3a8ffe47a3..c779f210d2c6 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -40,6 +40,7 @@ struct vb2_dma_sg_buf {
unsigned int num_pages;
atomic_t refcount;
struct vb2_vmarea_handler handler;
+ struct vm_area_struct *vma;
};
static void vb2_dma_sg_put(void *buf_priv);
@@ -155,12 +156,18 @@ static void vb2_dma_sg_put(void *buf_priv)
}
}
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+ return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write)
{
struct vb2_dma_sg_buf *buf;
unsigned long first, last;
int num_pages_from_user;
+ struct vm_area_struct *vma;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
@@ -180,7 +187,38 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
if (!buf->pages)
goto userptr_fail_alloc_pages;
- num_pages_from_user = get_user_pages(current, current->mm,
+ vma = find_vma(current->mm, vaddr);
+ if (!vma) {
+ dprintk(1, "no vma for address %lu\n", vaddr);
+ goto userptr_fail_find_vma;
+ }
+
+ if (vma->vm_end < vaddr + size) {
+ dprintk(1, "vma at %lu is too small for %lu bytes\n",
+ vaddr, size);
+ goto userptr_fail_find_vma;
+ }
+
+ buf->vma = vb2_get_vma(vma);
+ if (!buf->vma) {
+ dprintk(1, "failed to copy vma\n");
+ goto userptr_fail_find_vma;
+ }
+
+ if (vma_is_io(buf->vma)) {
+ for (num_pages_from_user = 0;
+ num_pages_from_user < buf->num_pages;
+ ++num_pages_from_user, vaddr += PAGE_SIZE) {
+ unsigned long pfn;
+
+ if (follow_pfn(buf->vma, vaddr, &pfn)) {
+ dprintk(1, "no page for address %lu\n", vaddr);
+ break;
+ }
+ buf->pages[num_pages_from_user] = pfn_to_page(pfn);
+ }
+ } else
+ num_pages_from_user = get_user_pages(current, current->mm,
vaddr & PAGE_MASK,
buf->num_pages,
write,
@@ -200,9 +238,12 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
userptr_fail_alloc_table_from_pages:
userptr_fail_get_user_pages:
dprintk(1, "get_user_pages requested/got: %d/%d]\n",
- num_pages_from_user, buf->num_pages);
- while (--num_pages_from_user >= 0)
- put_page(buf->pages[num_pages_from_user]);
+ buf->num_pages, num_pages_from_user);
+ if (!vma_is_io(buf->vma))
+ while (--num_pages_from_user >= 0)
+ put_page(buf->pages[num_pages_from_user]);
+ vb2_put_vma(buf->vma);
+userptr_fail_find_vma:
kfree(buf->pages);
userptr_fail_alloc_pages:
kfree(buf);
@@ -226,9 +267,11 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
while (--i >= 0) {
if (buf->write)
set_page_dirty_lock(buf->pages[i]);
- put_page(buf->pages[i]);
+ if (!vma_is_io(buf->vma))
+ put_page(buf->pages[i]);
}
kfree(buf->pages);
+ vb2_put_vma(buf->vma);
kfree(buf);
}
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
index 25f8f93decb6..2a635b6fdaf7 100644
--- a/drivers/memstick/host/rtsx_pci_ms.c
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -145,6 +145,8 @@ static int ms_transfer_data(struct realtek_pci_ms *host, unsigned char data_dir,
unsigned int length = sg->length;
u16 sec_cnt = (u16)(length / 512);
u8 val, trans_mode, dma_dir;
+ struct memstick_dev *card = host->msh->card;
+ bool pro_card = card->id.type == MEMSTICK_TYPE_PRO;
dev_dbg(ms_dev(host), "%s: tpc = 0x%02x, data_dir = %s, length = %d\n",
__func__, tpc, (data_dir == READ) ? "READ" : "WRITE",
@@ -152,19 +154,21 @@ static int ms_transfer_data(struct realtek_pci_ms *host, unsigned char data_dir,
if (data_dir == READ) {
dma_dir = DMA_DIR_FROM_CARD;
- trans_mode = MS_TM_AUTO_READ;
+ trans_mode = pro_card ? MS_TM_AUTO_READ : MS_TM_NORMAL_READ;
} else {
dma_dir = DMA_DIR_TO_CARD;
- trans_mode = MS_TM_AUTO_WRITE;
+ trans_mode = pro_card ? MS_TM_AUTO_WRITE : MS_TM_NORMAL_WRITE;
}
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_H,
- 0xFF, (u8)(sec_cnt >> 8));
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_L,
- 0xFF, (u8)sec_cnt);
+ if (pro_card) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_H,
+ 0xFF, (u8)(sec_cnt >> 8));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_L,
+ 0xFF, (u8)sec_cnt);
+ }
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
@@ -192,8 +196,14 @@ static int ms_transfer_data(struct realtek_pci_ms *host, unsigned char data_dir,
}
rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val);
- if (val & (MS_INT_CMDNK | MS_INT_ERR | MS_CRC16_ERR | MS_RDY_TIMEOUT))
- return -EIO;
+ if (pro_card) {
+ if (val & (MS_INT_CMDNK | MS_INT_ERR |
+ MS_CRC16_ERR | MS_RDY_TIMEOUT))
+ return -EIO;
+ } else {
+ if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT))
+ return -EIO;
+ }
return 0;
}
@@ -462,8 +472,8 @@ static int rtsx_pci_ms_set_param(struct memstick_host *msh,
clock = 19000000;
ssc_depth = RTSX_SSC_DEPTH_500K;
- err = rtsx_pci_write_register(pcr, MS_CFG,
- 0x18, MS_BUS_WIDTH_1);
+ err = rtsx_pci_write_register(pcr, MS_CFG, 0x58,
+ MS_BUS_WIDTH_1 | PUSH_TIME_DEFAULT);
if (err < 0)
return err;
} else if (value == MEMSTICK_PAR4) {
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 767ff4d839f4..570b18a113ff 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -346,7 +346,7 @@ static int mpt_remove_dead_ioc_func(void *arg)
if ((pdev == NULL))
return -1;
- pci_stop_and_remove_bus_device(pdev);
+ pci_stop_and_remove_bus_device_locked(pdev);
return 0;
}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index dd239bdbfcb4..00d339c361fc 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2235,10 +2235,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
}
/* do we need to support multiple segments? */
- if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) {
- printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
- ioc->name, __func__, bio_segments(req->bio), blk_rq_bytes(req),
- bio_segments(rsp->bio), blk_rq_bytes(rsp));
+ if (bio_multiple_segments(req->bio) ||
+ bio_multiple_segments(rsp->bio)) {
+ printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u, rsp %u\n",
+ ioc->name, __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
return -EINVAL;
}
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index a60c188c2bd9..04bd3b6de401 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -754,19 +754,19 @@ static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd,
unsigned long arg)
{
int ret;
- mutex_lock(&i2o_cfg_mutex);
switch (cmd) {
case I2OGETIOPS:
ret = i2o_cfg_ioctl(file, cmd, arg);
break;
case I2OPASSTHRU32:
+ mutex_lock(&i2o_cfg_mutex);
ret = i2o_cfg_passthru32(file, cmd, arg);
+ mutex_unlock(&i2o_cfg_mutex);
break;
default:
ret = -ENOIOCTLCMD;
break;
}
- mutex_unlock(&i2o_cfg_mutex);
return ret;
}
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index a65447d65605..7dca1e640970 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -148,7 +148,7 @@ static struct resource onkey_resources[] = {
},
};
-static struct mfd_cell onkey_devs[] = {
+static const struct mfd_cell onkey_devs[] = {
{
.name = "88pm80x-onkey",
.num_resources = 1,
@@ -157,7 +157,7 @@ static struct mfd_cell onkey_devs[] = {
},
};
-static struct mfd_cell regulator_devs[] = {
+static const struct mfd_cell regulator_devs[] = {
{
.name = "88pm80x-regulator",
.id = -1,
diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c
index 8a5b6ffb5afb..64751c2a1ace 100644
--- a/drivers/mfd/88pm805.c
+++ b/drivers/mfd/88pm805.c
@@ -77,7 +77,7 @@ static struct resource codec_resources[] = {
},
};
-static struct mfd_cell codec_devs[] = {
+static const struct mfd_cell codec_devs[] = {
{
.name = "88pm80x-codec",
.num_resources = ARRAY_SIZE(codec_resources),
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index dd671582c9a1..49bb445d846a 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -80,7 +80,7 @@ config MFD_CROS_EC_I2C
config MFD_CROS_EC_SPI
tristate "ChromeOS Embedded Controller (SPI)"
- depends on MFD_CROS_EC && SPI
+ depends on MFD_CROS_EC && SPI && OF
---help---
If you say Y here, you get support for talking to the ChromeOS EC
@@ -163,14 +163,10 @@ config MFD_DA9063
Additional drivers must be enabled in order to use the functionality
of the device.
-config MFD_MC13783
- tristate
-
config MFD_MC13XXX
tristate
depends on (SPI_MASTER || I2C)
select MFD_CORE
- select MFD_MC13783
help
Enable support for the Freescale MC13783 and MC13892 PMICs.
This driver provides common support for accessing the device,
@@ -321,6 +317,19 @@ config MFD_88PM860X
select individual components like voltage regulators, RTC and
battery-charger under the corresponding menus.
+config MFD_MAX14577
+ bool "Maxim Semiconductor MAX14577 MUIC + Charger Support"
+ depends on I2C=y
+ select MFD_CORE
+ select REGMAP_I2C
+ select IRQ_DOMAIN
+ help
+ Say yes here to support for Maxim Semiconductor MAX14577.
+ This is a Micro-USB IC with Charger controls on chip.
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
config MFD_MAX77686
bool "Maxim Semiconductor MAX77686 PMIC Support"
depends on I2C=y
@@ -725,6 +734,17 @@ config MFD_DM355EVM_MSP
boards. MSP430 firmware manages resets and power sequencing,
inputs from buttons and the IR remote, LEDs, an RTC, and more.
+config MFD_LP3943
+ tristate "TI/National Semiconductor LP3943 MFD Driver"
+ depends on I2C
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Support for the TI/National Semiconductor LP3943.
+ This driver consists of GPIO and PWM drivers.
+ With these functionalities, it can be used for LED string control or
+ general usage such like a GPIO controller and a PWM controller.
+
config MFD_LP8788
bool "TI LP8788 Power Management Unit Driver"
depends on I2C=y
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 8a28dc90fe78..5aea5ef0a62f 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -102,6 +102,7 @@ obj-$(CONFIG_PMIC_DA9052) += da9052-core.o
obj-$(CONFIG_MFD_DA9052_SPI) += da9052-spi.o
obj-$(CONFIG_MFD_DA9052_I2C) += da9052-i2c.o
+obj-$(CONFIG_MFD_LP3943) += lp3943.o
obj-$(CONFIG_MFD_LP8788) += lp8788.o lp8788-irq.o
da9055-objs := da9055-core.o da9055-i2c.o
@@ -110,6 +111,7 @@ obj-$(CONFIG_MFD_DA9055) += da9055.o
da9063-objs := da9063-core.o da9063-irq.o da9063-i2c.o
obj-$(CONFIG_MFD_DA9063) += da9063.o
+obj-$(CONFIG_MFD_MAX14577) += max14577.o
obj-$(CONFIG_MFD_MAX77686) += max77686.o max77686-irq.o
obj-$(CONFIG_MFD_MAX77693) += max77693.o max77693-irq.o
obj-$(CONFIG_MFD_MAX8907) += max8907.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index b6c2cdc76091..aaff683cd37d 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -491,7 +491,7 @@ static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
if (line == AB8540_INT_GPIO43F || line == AB8540_INT_GPIO44F)
line += 1;
- handle_nested_irq(ab8500->irq_base + line);
+ handle_nested_irq(irq_create_mapping(ab8500->domain, line));
}
return 0;
@@ -1017,7 +1017,7 @@ static struct resource ab8500_temp_resources[] = {
},
};
-static struct mfd_cell ab8500_bm_devs[] = {
+static const struct mfd_cell ab8500_bm_devs[] = {
{
.name = "ab8500-charger",
.of_compatible = "stericsson,ab8500-charger",
@@ -1052,7 +1052,7 @@ static struct mfd_cell ab8500_bm_devs[] = {
},
};
-static struct mfd_cell ab8500_devs[] = {
+static const struct mfd_cell ab8500_devs[] = {
#ifdef CONFIG_DEBUG_FS
{
.name = "ab8500-debug",
@@ -1143,7 +1143,7 @@ static struct mfd_cell ab8500_devs[] = {
},
};
-static struct mfd_cell ab9540_devs[] = {
+static const struct mfd_cell ab9540_devs[] = {
#ifdef CONFIG_DEBUG_FS
{
.name = "ab8500-debug",
@@ -1214,7 +1214,7 @@ static struct mfd_cell ab9540_devs[] = {
};
/* Device list for ab8505 */
-static struct mfd_cell ab8505_devs[] = {
+static const struct mfd_cell ab8505_devs[] = {
#ifdef CONFIG_DEBUG_FS
{
.name = "ab8500-debug",
@@ -1275,7 +1275,7 @@ static struct mfd_cell ab8505_devs[] = {
},
};
-static struct mfd_cell ab8540_devs[] = {
+static const struct mfd_cell ab8540_devs[] = {
#ifdef CONFIG_DEBUG_FS
{
.name = "ab8500-debug",
@@ -1339,7 +1339,7 @@ static struct mfd_cell ab8540_devs[] = {
},
};
-static struct mfd_cell ab8540_cut1_devs[] = {
+static const struct mfd_cell ab8540_cut1_devs[] = {
{
.name = "ab8500-rtc",
.of_compatible = "stericsson,ab8500-rtc",
@@ -1348,7 +1348,7 @@ static struct mfd_cell ab8540_cut1_devs[] = {
},
};
-static struct mfd_cell ab8540_cut2_devs[] = {
+static const struct mfd_cell ab8540_cut2_devs[] = {
{
.name = "ab8540-rtc",
.of_compatible = "stericsson,ab8540-rtc",
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index e33e385af0a2..d1a22aae2df5 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -1600,7 +1600,6 @@ static int ab8500_interrupts_print(struct seq_file *s, void *p)
for (line = 0; line < num_interrupt_lines; line++) {
struct irq_desc *desc = irq_to_desc(line + irq_first);
- struct irqaction *action = desc->action;
seq_printf(s, "%3i: %6i %4i", line,
num_interrupts[line],
@@ -1608,7 +1607,9 @@ static int ab8500_interrupts_print(struct seq_file *s, void *p)
if (desc && desc->name)
seq_printf(s, "-%-8s", desc->name);
- if (action) {
+ if (desc && desc->action) {
+ struct irqaction *action = desc->action;
+
seq_printf(s, " %s", action->name);
while ((action = action->next) != NULL)
seq_printf(s, ", %s", action->name);
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 75e180ceecf3..a45aab9f6bb1 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -565,7 +565,7 @@ static inline int arizona_of_get_core_pdata(struct arizona *arizona)
}
#endif
-static struct mfd_cell early_devs[] = {
+static const struct mfd_cell early_devs[] = {
{ .name = "arizona-ldo1" },
};
@@ -577,7 +577,7 @@ static const char *wm5102_supplies[] = {
"SPKVDDR",
};
-static struct mfd_cell wm5102_devs[] = {
+static const struct mfd_cell wm5102_devs[] = {
{ .name = "arizona-micsupp" },
{ .name = "arizona-extcon" },
{ .name = "arizona-gpio" },
@@ -590,7 +590,7 @@ static struct mfd_cell wm5102_devs[] = {
},
};
-static struct mfd_cell wm5110_devs[] = {
+static const struct mfd_cell wm5110_devs[] = {
{ .name = "arizona-micsupp" },
{ .name = "arizona-extcon" },
{ .name = "arizona-gpio" },
@@ -609,7 +609,7 @@ static const char *wm8997_supplies[] = {
"SPKVDD",
};
-static struct mfd_cell wm8997_devs[] = {
+static const struct mfd_cell wm8997_devs[] = {
{ .name = "arizona-micsupp" },
{ .name = "arizona-extcon" },
{ .name = "arizona-gpio" },
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
index f161f2e00df7..c71ff0af1547 100644
--- a/drivers/mfd/as3722.c
+++ b/drivers/mfd/as3722.c
@@ -54,7 +54,7 @@ static const struct resource as3722_adc_resource[] = {
},
};
-static struct mfd_cell as3722_devs[] = {
+static const struct mfd_cell as3722_devs[] = {
{
.name = "as3722-pinctrl",
},
@@ -74,6 +74,9 @@ static struct mfd_cell as3722_devs[] = {
{
.name = "as3722-power-off",
},
+ {
+ .name = "as3722-wdt",
+ },
};
static const struct regmap_irq as3722_irqs[] = {
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index fa22154c84e4..9f6294f2a070 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -695,7 +695,7 @@ static int ds1wm_disable(struct platform_device *pdev)
return 0;
}
-static struct mfd_cell asic3_cell_ds1wm = {
+static const struct mfd_cell asic3_cell_ds1wm = {
.name = "ds1wm",
.enable = ds1wm_enable,
.disable = ds1wm_disable,
@@ -797,7 +797,7 @@ static int asic3_mmc_disable(struct platform_device *pdev)
return 0;
}
-static struct mfd_cell asic3_cell_mmc = {
+static const struct mfd_cell asic3_cell_mmc = {
.name = "tmio-mmc",
.enable = asic3_mmc_enable,
.disable = asic3_mmc_disable,
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index 1f36885d674b..783fe2e73e1e 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -84,7 +84,7 @@ static irqreturn_t ec_irq_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static struct mfd_cell cros_devs[] = {
+static const struct mfd_cell cros_devs[] = {
{
.name = "cros-ec-keyb",
.id = 1,
diff --git a/drivers/mfd/cros_ec_i2c.c b/drivers/mfd/cros_ec_i2c.c
index 123044608b63..4f71be99a183 100644
--- a/drivers/mfd/cros_ec_i2c.c
+++ b/drivers/mfd/cros_ec_i2c.c
@@ -120,7 +120,7 @@ static int cros_ec_command_xfer(struct cros_ec_device *ec_dev,
return ret;
}
-static int cros_ec_probe_i2c(struct i2c_client *client,
+static int cros_ec_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
struct device *dev = &client->dev;
@@ -150,7 +150,7 @@ static int cros_ec_probe_i2c(struct i2c_client *client,
return 0;
}
-static int cros_ec_remove_i2c(struct i2c_client *client)
+static int cros_ec_i2c_remove(struct i2c_client *client)
{
struct cros_ec_device *ec_dev = i2c_get_clientdata(client);
@@ -190,8 +190,8 @@ static struct i2c_driver cros_ec_driver = {
.owner = THIS_MODULE,
.pm = &cros_ec_i2c_pm_ops,
},
- .probe = cros_ec_probe_i2c,
- .remove = cros_ec_remove_i2c,
+ .probe = cros_ec_i2c_probe,
+ .remove = cros_ec_i2c_remove,
.id_table = cros_ec_i2c_id,
};
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index 367ccb58ecb1..84af8d7a4295 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/mfd/cros_ec.h>
#include <linux/mfd/cros_ec_commands.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
@@ -50,10 +51,11 @@
/*
* Time between raising the SPI chip select (for the end of a
* transaction) and dropping it again (for the next transaction).
- * If we go too fast, the EC will miss the transaction. It seems
- * that 50us is enough with the 16MHz STM32 EC.
+ * If we go too fast, the EC will miss the transaction. We know that we
+ * need at least 70 us with the 16 MHz STM32 EC, so go with 200 us to be
+ * safe.
*/
-#define EC_SPI_RECOVERY_TIME_NS (50 * 1000)
+#define EC_SPI_RECOVERY_TIME_NS (200 * 1000)
/**
* struct cros_ec_spi - information about a SPI-connected EC
@@ -61,10 +63,13 @@
* @spi: SPI device we are connected to
* @last_transfer_ns: time that we last finished a transfer, or 0 if there
* if no record
+ * @end_of_msg_delay: used to set the delay_usecs on the spi_transfer that
+ * is sent when we want to turn off CS at the end of a transaction.
*/
struct cros_ec_spi {
struct spi_device *spi;
s64 last_transfer_ns;
+ unsigned int end_of_msg_delay;
};
static void debug_packet(struct device *dev, const char *name, u8 *ptr,
@@ -75,7 +80,9 @@ static void debug_packet(struct device *dev, const char *name, u8 *ptr,
dev_dbg(dev, "%s: ", name);
for (i = 0; i < len; i++)
- dev_cont(dev, " %02x", ptr[i]);
+ pr_cont(" %02x", ptr[i]);
+
+ pr_cont("\n");
#endif
}
@@ -105,7 +112,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
/* Receive data until we see the header byte */
deadline = jiffies + msecs_to_jiffies(EC_MSG_DEADLINE_MS);
do {
- memset(&trans, '\0', sizeof(trans));
+ memset(&trans, 0, sizeof(trans));
trans.cs_change = 1;
trans.rx_buf = ptr = ec_dev->din;
trans.len = EC_MSG_PREAMBLE_COUNT;
@@ -157,7 +164,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%zd\n",
todo, need_len, ptr - ec_dev->din);
- memset(&trans, '\0', sizeof(trans));
+ memset(&trans, 0, sizeof(trans));
trans.cs_change = 1;
trans.rx_buf = ptr;
trans.len = todo;
@@ -217,7 +224,7 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
/* Transmit phase - send our message */
debug_packet(ec_dev->dev, "out", ec_dev->dout, len);
- memset(&trans, '\0', sizeof(trans));
+ memset(&trans, 0, sizeof(trans));
trans.tx_buf = ec_dev->dout;
trans.len = len;
trans.cs_change = 1;
@@ -235,6 +242,17 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
/* turn off CS */
spi_message_init(&msg);
+
+ if (ec_spi->end_of_msg_delay) {
+ /*
+ * Add delay for last transaction, to ensure the rising edge
+ * doesn't come too soon after the end of the data.
+ */
+ memset(&trans, 0, sizeof(trans));
+ trans.delay_usecs = ec_spi->end_of_msg_delay;
+ spi_message_add_tail(&trans, &msg);
+ }
+
final_ret = spi_sync(ec_spi->spi, &msg);
ktime_get_ts(&ts);
ec_spi->last_transfer_ns = timespec_to_ns(&ts);
@@ -281,7 +299,18 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
return 0;
}
-static int cros_ec_probe_spi(struct spi_device *spi)
+static void cros_ec_spi_dt_probe(struct cros_ec_spi *ec_spi, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ u32 val;
+ int ret;
+
+ ret = of_property_read_u32(np, "google,cros-ec-spi-msg-delay", &val);
+ if (!ret)
+ ec_spi->end_of_msg_delay = val;
+}
+
+static int cros_ec_spi_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct cros_ec_device *ec_dev;
@@ -302,6 +331,9 @@ static int cros_ec_probe_spi(struct spi_device *spi)
if (!ec_dev)
return -ENOMEM;
+ /* Check for any DT properties */
+ cros_ec_spi_dt_probe(ec_spi, dev);
+
spi_set_drvdata(spi, ec_dev);
ec_dev->name = "SPI";
ec_dev->dev = dev;
@@ -323,7 +355,7 @@ static int cros_ec_probe_spi(struct spi_device *spi)
return 0;
}
-static int cros_ec_remove_spi(struct spi_device *spi)
+static int cros_ec_spi_remove(struct spi_device *spi)
{
struct cros_ec_device *ec_dev;
@@ -364,12 +396,12 @@ static struct spi_driver cros_ec_driver_spi = {
.owner = THIS_MODULE,
.pm = &cros_ec_spi_pm_ops,
},
- .probe = cros_ec_probe_spi,
- .remove = cros_ec_remove_spi,
+ .probe = cros_ec_spi_probe,
+ .remove = cros_ec_spi_remove,
.id_table = cros_ec_spi_id,
};
module_spi_driver(cros_ec_driver_spi);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ChromeOS EC multi function device (SPI)");
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
index 2e4752a9220a..17c13012686a 100644
--- a/drivers/mfd/cs5535-mfd.c
+++ b/drivers/mfd/cs5535-mfd.c
@@ -172,7 +172,7 @@ static void cs5535_mfd_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static DEFINE_PCI_DEVICE_TABLE(cs5535_mfd_pci_tbl) = {
+static const struct pci_device_id cs5535_mfd_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
{ 0, }
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index ea28a33576e4..25838f10b35b 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -427,7 +427,7 @@ int da9052_adc_read_temp(struct da9052 *da9052)
}
EXPORT_SYMBOL_GPL(da9052_adc_read_temp);
-static struct mfd_cell da9052_subdev_info[] = {
+static const struct mfd_cell da9052_subdev_info[] = {
{
.name = "da9052-regulator",
.id = 1,
diff --git a/drivers/mfd/da9055-core.c b/drivers/mfd/da9055-core.c
index d3670cd3c3c6..caf8dcffd0ad 100644
--- a/drivers/mfd/da9055-core.c
+++ b/drivers/mfd/da9055-core.c
@@ -294,7 +294,7 @@ static struct resource da9055_ld05_6_resource = {
.flags = IORESOURCE_IRQ,
};
-static struct mfd_cell da9055_devs[] = {
+static const struct mfd_cell da9055_devs[] = {
{
.of_compatible = "dialog,da9055-gpio",
.name = "da9055-gpio",
diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c
index c9cf8d988406..26937cd01071 100644
--- a/drivers/mfd/da9063-core.c
+++ b/drivers/mfd/da9063-core.c
@@ -75,7 +75,7 @@ static struct resource da9063_hwmon_resources[] = {
};
-static struct mfd_cell da9063_devs[] = {
+static const struct mfd_cell da9063_devs[] = {
{
.name = DA9063_DRVNAME_REGULATORS,
.num_resources = ARRAY_SIZE(da9063_regulators_resources),
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index b9ce60c301de..e43e6e821117 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -3070,7 +3070,7 @@ static struct db8500_thsens_platform_data db8500_thsens_data = {
.num_trips = 4,
};
-static struct mfd_cell common_prcmu_devs[] = {
+static const struct mfd_cell common_prcmu_devs[] = {
{
.name = "ux500_wdt",
.platform_data = &db8500_wdt_pdata,
@@ -3079,7 +3079,7 @@ static struct mfd_cell common_prcmu_devs[] = {
},
};
-static struct mfd_cell db8500_prcmu_devs[] = {
+static const struct mfd_cell db8500_prcmu_devs[] = {
{
.name = "db8500-prcmu-regulators",
.of_compatible = "stericsson,db8500-prcmu-regulator",
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c
index 6bf92a507b95..e88d4f6fef4c 100644
--- a/drivers/mfd/htc-pasic3.c
+++ b/drivers/mfd/htc-pasic3.c
@@ -114,7 +114,7 @@ static struct resource ds1wm_resources[] __initdata = {
},
};
-static struct mfd_cell ds1wm_cell __initdata = {
+static const struct mfd_cell ds1wm_cell __initconst = {
.name = "ds1wm",
.enable = ds1wm_enable,
.disable = ds1wm_disable,
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index 9203d47cdbb1..049fd23af54a 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -178,7 +178,7 @@ static struct mfd_cell msic_devs[] = {
* These devices appear only after the MSIC driver itself is initialized so
* we can guarantee that the SCU IPC interface is ready.
*/
-static struct mfd_cell msic_other_devs[] = {
+static const struct mfd_cell msic_other_devs[] = {
/* Audio codec in the MSIC */
{
.id = -1,
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
index fcbb2e9dfd37..81b7d88af313 100644
--- a/drivers/mfd/janz-cmodio.c
+++ b/drivers/mfd/janz-cmodio.c
@@ -265,7 +265,7 @@ static void cmodio_pci_remove(struct pci_dev *dev)
#define PCI_VENDOR_ID_JANZ 0x13c3
/* The list of devices that this module will support */
-static DEFINE_PCI_DEVICE_TABLE(cmodio_pci_ids) = {
+static const struct pci_device_id cmodio_pci_ids[] = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_JANZ, 0x0101 },
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_JANZ, 0x0100 },
{ 0, }
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 3c0e8cf6916b..7a51c0d0d4f1 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -181,7 +181,7 @@ static struct resource jz4740_battery_resources[] = {
},
};
-static struct mfd_cell jz4740_adc_cells[] = {
+static const struct mfd_cell jz4740_adc_cells[] = {
{
.id = 0,
.name = "jz4740-hwmon",
diff --git a/drivers/mfd/lp3943.c b/drivers/mfd/lp3943.c
new file mode 100644
index 000000000000..e32226836fb4
--- /dev/null
+++ b/drivers/mfd/lp3943.c
@@ -0,0 +1,167 @@
+/*
+ * TI/National Semiconductor LP3943 MFD Core Driver
+ *
+ * Copyright 2013 Texas Instruments
+ *
+ * Author: Milo Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Driver structure:
+ * LP3943 is an integrated device capable of driving 16 output channels.
+ * It can be used for a GPIO expander and PWM generators.
+ *
+ * LED control General usage for a device
+ * ___________ ____________________________
+ *
+ * LP3943 MFD ---- GPIO expander leds-gpio eg) HW enable pin
+ * |
+ * --- PWM generator leds-pwm eg) PWM input
+ *
+ * Internal two PWM channels are used for LED dimming effect.
+ * And each output pin can be used as a GPIO as well.
+ * The LED functionality can work with GPIOs or PWMs.
+ * LEDs can be controlled with legacy leds-gpio(static brightness) or
+ * leds-pwm drivers(dynamic brightness control).
+ * Alternatively, it can be used for generic GPIO and PWM controller.
+ * For example, a GPIO is HW enable pin of a device.
+ * A PWM is input pin of a backlight device.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/lp3943.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#define LP3943_MAX_REGISTERS 0x09
+
+/* Register configuration for pin MUX */
+static const struct lp3943_reg_cfg lp3943_mux_cfg[] = {
+ /* address, mask, shift */
+ { LP3943_REG_MUX0, 0x03, 0 },
+ { LP3943_REG_MUX0, 0x0C, 2 },
+ { LP3943_REG_MUX0, 0x30, 4 },
+ { LP3943_REG_MUX0, 0xC0, 6 },
+ { LP3943_REG_MUX1, 0x03, 0 },
+ { LP3943_REG_MUX1, 0x0C, 2 },
+ { LP3943_REG_MUX1, 0x30, 4 },
+ { LP3943_REG_MUX1, 0xC0, 6 },
+ { LP3943_REG_MUX2, 0x03, 0 },
+ { LP3943_REG_MUX2, 0x0C, 2 },
+ { LP3943_REG_MUX2, 0x30, 4 },
+ { LP3943_REG_MUX2, 0xC0, 6 },
+ { LP3943_REG_MUX3, 0x03, 0 },
+ { LP3943_REG_MUX3, 0x0C, 2 },
+ { LP3943_REG_MUX3, 0x30, 4 },
+ { LP3943_REG_MUX3, 0xC0, 6 },
+};
+
+static struct mfd_cell lp3943_devs[] = {
+ {
+ .name = "lp3943-pwm",
+ .of_compatible = "ti,lp3943-pwm",
+ },
+ {
+ .name = "lp3943-gpio",
+ .of_compatible = "ti,lp3943-gpio",
+ },
+};
+
+int lp3943_read_byte(struct lp3943 *lp3943, u8 reg, u8 *read)
+{
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(lp3943->regmap, reg, &val);
+ if (ret < 0)
+ return ret;
+
+ *read = (u8)val;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lp3943_read_byte);
+
+int lp3943_write_byte(struct lp3943 *lp3943, u8 reg, u8 data)
+{
+ return regmap_write(lp3943->regmap, reg, data);
+}
+EXPORT_SYMBOL_GPL(lp3943_write_byte);
+
+int lp3943_update_bits(struct lp3943 *lp3943, u8 reg, u8 mask, u8 data)
+{
+ return regmap_update_bits(lp3943->regmap, reg, mask, data);
+}
+EXPORT_SYMBOL_GPL(lp3943_update_bits);
+
+static const struct regmap_config lp3943_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = LP3943_MAX_REGISTERS,
+};
+
+static int lp3943_probe(struct i2c_client *cl, const struct i2c_device_id *id)
+{
+ struct lp3943 *lp3943;
+ struct device *dev = &cl->dev;
+
+ lp3943 = devm_kzalloc(dev, sizeof(*lp3943), GFP_KERNEL);
+ if (!lp3943)
+ return -ENOMEM;
+
+ lp3943->regmap = devm_regmap_init_i2c(cl, &lp3943_regmap_config);
+ if (IS_ERR(lp3943->regmap))
+ return PTR_ERR(lp3943->regmap);
+
+ lp3943->pdata = dev_get_platdata(dev);
+ lp3943->dev = dev;
+ lp3943->mux_cfg = lp3943_mux_cfg;
+ i2c_set_clientdata(cl, lp3943);
+
+ return mfd_add_devices(dev, -1, lp3943_devs, ARRAY_SIZE(lp3943_devs),
+ NULL, 0, NULL);
+}
+
+static int lp3943_remove(struct i2c_client *cl)
+{
+ struct lp3943 *lp3943 = i2c_get_clientdata(cl);
+
+ mfd_remove_devices(lp3943->dev);
+ return 0;
+}
+
+static const struct i2c_device_id lp3943_ids[] = {
+ { "lp3943", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lp3943_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id lp3943_of_match[] = {
+ { .compatible = "ti,lp3943", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lp3943_of_match);
+#endif
+
+static struct i2c_driver lp3943_driver = {
+ .probe = lp3943_probe,
+ .remove = lp3943_remove,
+ .driver = {
+ .name = "lp3943",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(lp3943_of_match),
+ },
+ .id_table = lp3943_ids,
+};
+
+module_i2c_driver(lp3943_driver);
+
+MODULE_DESCRIPTION("LP3943 MFD Core Driver");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/lp8788.c b/drivers/mfd/lp8788.c
index 0f1221911018..a30bc15fe5ba 100644
--- a/drivers/mfd/lp8788.c
+++ b/drivers/mfd/lp8788.c
@@ -71,7 +71,7 @@ static struct resource rtc_irqs[] = {
},
};
-static struct mfd_cell lp8788_devs[] = {
+static const struct mfd_cell lp8788_devs[] = {
/* 4 bucks */
MFD_DEV_WITH_ID(BUCK, 1),
MFD_DEV_WITH_ID(BUCK, 2),
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 37edf9e989b0..be93fa261ded 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -517,7 +517,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
* pci_driver, because the I/O Controller Hub has also other
* functions that probably will be registered by other drivers.
*/
-static DEFINE_PCI_DEVICE_TABLE(lpc_ich_ids) = {
+static const struct pci_device_id lpc_ich_ids[] = {
{ PCI_VDEVICE(INTEL, 0x2410), LPC_ICH},
{ PCI_VDEVICE(INTEL, 0x2420), LPC_ICH0},
{ PCI_VDEVICE(INTEL, 0x2440), LPC_ICH2},
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index fbfbf0b7f97a..3bb05c03c68d 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -76,7 +76,7 @@ static struct mfd_cell wdt_sch_cell = {
.ignore_resource_conflicts = true,
};
-static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = {
+static const struct pci_device_id lpc_sch_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ITC_LPC) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CENTERTON_ILB) },
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
new file mode 100644
index 000000000000..ac514fb2b877
--- /dev/null
+++ b/drivers/mfd/max14577.c
@@ -0,0 +1,245 @@
+/*
+ * max14577.c - mfd core driver for the Maxim 14577
+ *
+ * Copyright (C) 2013 Samsung Electrnoics
+ * Chanwoo Choi <cw00.choi@samsung.com>
+ * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver is based on max8997.c
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max14577.h>
+#include <linux/mfd/max14577-private.h>
+
+static struct mfd_cell max14577_devs[] = {
+ { .name = "max14577-muic", },
+ {
+ .name = "max14577-regulator",
+ .of_compatible = "maxim,max14577-regulator",
+ },
+ { .name = "max14577-charger", },
+};
+
+static bool max14577_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX14577_REG_INT1 ... MAX14577_REG_STATUS3:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static const struct regmap_config max14577_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_reg = max14577_volatile_reg,
+ .max_register = MAX14577_REG_END,
+};
+
+static const struct regmap_irq max14577_irqs[] = {
+ /* INT1 interrupts */
+ { .reg_offset = 0, .mask = INT1_ADC_MASK, },
+ { .reg_offset = 0, .mask = INT1_ADCLOW_MASK, },
+ { .reg_offset = 0, .mask = INT1_ADCERR_MASK, },
+ /* INT2 interrupts */
+ { .reg_offset = 1, .mask = INT2_CHGTYP_MASK, },
+ { .reg_offset = 1, .mask = INT2_CHGDETRUN_MASK, },
+ { .reg_offset = 1, .mask = INT2_DCDTMR_MASK, },
+ { .reg_offset = 1, .mask = INT2_DBCHG_MASK, },
+ { .reg_offset = 1, .mask = INT2_VBVOLT_MASK, },
+ /* INT3 interrupts */
+ { .reg_offset = 2, .mask = INT3_EOC_MASK, },
+ { .reg_offset = 2, .mask = INT3_CGMBC_MASK, },
+ { .reg_offset = 2, .mask = INT3_OVP_MASK, },
+ { .reg_offset = 2, .mask = INT3_MBCCHGERR_MASK, },
+};
+
+static const struct regmap_irq_chip max14577_irq_chip = {
+ .name = "max14577",
+ .status_base = MAX14577_REG_INT1,
+ .mask_base = MAX14577_REG_INTMASK1,
+ .mask_invert = 1,
+ .num_regs = 3,
+ .irqs = max14577_irqs,
+ .num_irqs = ARRAY_SIZE(max14577_irqs),
+};
+
+static int max14577_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct max14577 *max14577;
+ struct max14577_platform_data *pdata = dev_get_platdata(&i2c->dev);
+ struct device_node *np = i2c->dev.of_node;
+ u8 reg_data;
+ int ret = 0;
+
+ if (np) {
+ pdata = devm_kzalloc(&i2c->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ i2c->dev.platform_data = pdata;
+ }
+
+ if (!pdata) {
+ dev_err(&i2c->dev, "No platform data found.\n");
+ return -EINVAL;
+ }
+
+ max14577 = devm_kzalloc(&i2c->dev, sizeof(*max14577), GFP_KERNEL);
+ if (!max14577)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, max14577);
+ max14577->dev = &i2c->dev;
+ max14577->i2c = i2c;
+ max14577->irq = i2c->irq;
+
+ max14577->regmap = devm_regmap_init_i2c(i2c, &max14577_regmap_config);
+ if (IS_ERR(max14577->regmap)) {
+ ret = PTR_ERR(max14577->regmap);
+ dev_err(max14577->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = max14577_read_reg(max14577->regmap, MAX14577_REG_DEVICEID,
+ &reg_data);
+ if (ret) {
+ dev_err(max14577->dev, "Device not found on this channel: %d\n",
+ ret);
+ return ret;
+ }
+ max14577->vendor_id = ((reg_data & DEVID_VENDORID_MASK) >>
+ DEVID_VENDORID_SHIFT);
+ max14577->device_id = ((reg_data & DEVID_DEVICEID_MASK) >>
+ DEVID_DEVICEID_SHIFT);
+ dev_info(max14577->dev, "Device ID: 0x%x, vendor: 0x%x\n",
+ max14577->device_id, max14577->vendor_id);
+
+ ret = regmap_add_irq_chip(max14577->regmap, max14577->irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 0,
+ &max14577_irq_chip,
+ &max14577->irq_data);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n",
+ max14577->irq, ret);
+ return ret;
+ }
+
+ ret = mfd_add_devices(max14577->dev, -1, max14577_devs,
+ ARRAY_SIZE(max14577_devs), NULL, 0,
+ regmap_irq_get_domain(max14577->irq_data));
+ if (ret < 0)
+ goto err_mfd;
+
+ device_init_wakeup(max14577->dev, 1);
+
+ return 0;
+
+err_mfd:
+ regmap_del_irq_chip(max14577->irq, max14577->irq_data);
+
+ return ret;
+}
+
+static int max14577_i2c_remove(struct i2c_client *i2c)
+{
+ struct max14577 *max14577 = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(max14577->dev);
+ regmap_del_irq_chip(max14577->irq, max14577->irq_data);
+
+ return 0;
+}
+
+static const struct i2c_device_id max14577_i2c_id[] = {
+ { "max14577", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max14577_i2c_id);
+
+static int max14577_suspend(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max14577 *max14577 = i2c_get_clientdata(i2c);
+
+ if (device_may_wakeup(dev)) {
+ enable_irq_wake(max14577->irq);
+ /*
+ * MUIC IRQ must be disabled during suspend if this is
+ * a wake up source because it will be handled before
+ * resuming I2C.
+ *
+ * When device is woken up from suspend (e.g. by ADC change),
+ * an interrupt occurs before resuming I2C bus controller.
+ * Interrupt handler tries to read registers but this read
+ * will fail because I2C is still suspended.
+ */
+ disable_irq(max14577->irq);
+ }
+
+ return 0;
+}
+
+static int max14577_resume(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max14577 *max14577 = i2c_get_clientdata(i2c);
+
+ if (device_may_wakeup(dev)) {
+ disable_irq_wake(max14577->irq);
+ enable_irq(max14577->irq);
+ }
+
+ return 0;
+}
+
+static struct of_device_id max14577_dt_match[] = {
+ { .compatible = "maxim,max14577", },
+ {},
+};
+
+static SIMPLE_DEV_PM_OPS(max14577_pm, max14577_suspend, max14577_resume);
+
+static struct i2c_driver max14577_i2c_driver = {
+ .driver = {
+ .name = "max14577",
+ .owner = THIS_MODULE,
+ .pm = &max14577_pm,
+ .of_match_table = max14577_dt_match,
+ },
+ .probe = max14577_i2c_probe,
+ .remove = max14577_i2c_remove,
+ .id_table = max14577_i2c_id,
+};
+
+static int __init max14577_i2c_init(void)
+{
+ return i2c_add_driver(&max14577_i2c_driver);
+}
+subsys_initcall(max14577_i2c_init);
+
+static void __exit max14577_i2c_exit(void)
+{
+ i2c_del_driver(&max14577_i2c_driver);
+}
+module_exit(max14577_i2c_exit);
+
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>, Krzysztof Kozlowski <k.kozlowski@samsung.com>");
+MODULE_DESCRIPTION("MAXIM 14577 multi-function core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index 34520cbe8afb..f53d5823a3f7 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -35,7 +35,7 @@
#define I2C_ADDR_RTC (0x0C >> 1)
-static struct mfd_cell max77686_devs[] = {
+static const struct mfd_cell max77686_devs[] = {
{ .name = "max77686-pmic", },
{ .name = "max77686-rtc", },
{ .name = "max77686-clk", },
@@ -104,7 +104,7 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
max77686->irq_gpio = pdata->irq_gpio;
max77686->irq = i2c->irq;
- max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config);
+ max77686->regmap = devm_regmap_init_i2c(i2c, &max77686_regmap_config);
if (IS_ERR(max77686->regmap)) {
ret = PTR_ERR(max77686->regmap);
dev_err(max77686->dev, "Failed to allocate register map: %d\n",
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index 9f92463f4f7e..e0859987ab6b 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -41,7 +41,7 @@
#define I2C_ADDR_MUIC (0x4A >> 1)
#define I2C_ADDR_HAPTIC (0x90 >> 1)
-static struct mfd_cell max77693_devs[] = {
+static const struct mfd_cell max77693_devs[] = {
{ .name = "max77693-pmic", },
{ .name = "max77693-charger", },
{ .name = "max77693-flash", },
@@ -107,6 +107,12 @@ static const struct regmap_config max77693_regmap_config = {
.max_register = MAX77693_PMIC_REG_END,
};
+static const struct regmap_config max77693_regmap_muic_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX77693_MUIC_REG_END,
+};
+
static int max77693_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -153,7 +159,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
* before call max77693-muic probe() function.
*/
max77693->regmap_muic = devm_regmap_init_i2c(max77693->muic,
- &max77693_regmap_config);
+ &max77693_regmap_muic_config);
if (IS_ERR(max77693->regmap_muic)) {
ret = PTR_ERR(max77693->regmap_muic);
dev_err(max77693->dev,
diff --git a/drivers/mfd/max8907.c b/drivers/mfd/max8907.c
index 3bbfedc07f41..07740314b29d 100644
--- a/drivers/mfd/max8907.c
+++ b/drivers/mfd/max8907.c
@@ -22,7 +22,7 @@
#include <linux/regmap.h>
#include <linux/slab.h>
-static struct mfd_cell max8907_cells[] = {
+static const struct mfd_cell max8907_cells[] = {
{ .name = "max8907-regulator", },
{ .name = "max8907-rtc", },
};
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index f0cc40296d8c..f3faf0c45ddd 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -45,7 +45,7 @@ static struct resource touch_resources[] = {
},
};
-static struct mfd_cell touch_devs[] = {
+static const struct mfd_cell touch_devs[] = {
{
.name = "max8925-touch",
.num_resources = 1,
@@ -63,7 +63,7 @@ static struct resource power_supply_resources[] = {
},
};
-static struct mfd_cell power_devs[] = {
+static const struct mfd_cell power_devs[] = {
{
.name = "max8925-power",
.num_resources = 1,
@@ -81,7 +81,7 @@ static struct resource rtc_resources[] = {
},
};
-static struct mfd_cell rtc_devs[] = {
+static const struct mfd_cell rtc_devs[] = {
{
.name = "max8925-rtc",
.num_resources = 1,
@@ -104,7 +104,7 @@ static struct resource onkey_resources[] = {
},
};
-static struct mfd_cell onkey_devs[] = {
+static const struct mfd_cell onkey_devs[] = {
{
.name = "max8925-onkey",
.num_resources = 2,
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 791aea3e96ce..be88a3bf7b85 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -40,7 +40,7 @@
#define I2C_ADDR_RTC (0x0C >> 1)
#define I2C_ADDR_HAPTIC (0x90 >> 1)
-static struct mfd_cell max8997_devs[] = {
+static const struct mfd_cell max8997_devs[] = {
{ .name = "max8997-pmic", },
{ .name = "max8997-rtc", },
{ .name = "max8997-battery", },
@@ -133,7 +133,6 @@ int max8997_update_reg(struct i2c_client *i2c, u8 reg, u8 val, u8 mask)
}
EXPORT_SYMBOL_GPL(max8997_update_reg);
-#ifdef CONFIG_OF
/*
* Only the common platform data elements for max8997 are parsed here from the
* device tree. Other sub-modules of max8997 such as pmic, rtc and others have
@@ -164,24 +163,15 @@ static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
return pd;
}
-#else
-static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
- struct device *dev)
-{
- return 0;
-}
-#endif
static inline int max8997_i2c_get_driver_data(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
-#ifdef CONFIG_OF
- if (i2c->dev.of_node) {
+ if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(max8997_pmic_dt_match, i2c->dev.of_node);
return (int)match->data;
}
-#endif
return (int)id->driver_data;
}
@@ -203,7 +193,7 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
max8997->type = max8997_i2c_get_driver_data(i2c, id);
max8997->irq = i2c->irq;
- if (max8997->dev->of_node) {
+ if (IS_ENABLED(CONFIG_OF) && max8997->dev->of_node) {
pdata = max8997_i2c_parse_dt_pdata(max8997->dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
@@ -228,18 +218,19 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
max8997_irq_init(max8997);
- mfd_add_devices(max8997->dev, -1, max8997_devs,
+ ret = mfd_add_devices(max8997->dev, -1, max8997_devs,
ARRAY_SIZE(max8997_devs),
NULL, 0, NULL);
+ if (ret < 0) {
+ dev_err(max8997->dev, "failed to add MFD devices %d\n", ret);
+ goto err_mfd;
+ }
/*
* TODO: enable others (flash, muic, rtc, battery, ...) and
* check the return value
*/
- if (ret < 0)
- goto err_mfd;
-
/* MAX8997 has a power button input. */
device_init_wakeup(max8997->dev, pdata->wakeup);
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index fe6332dcabee..612ca404e150 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -37,7 +37,7 @@
#define RTC_I2C_ADDR (0x0c >> 1)
-static struct mfd_cell max8998_devs[] = {
+static const struct mfd_cell max8998_devs[] = {
{
.name = "max8998-pmic",
}, {
@@ -47,7 +47,7 @@ static struct mfd_cell max8998_devs[] = {
},
};
-static struct mfd_cell lp3974_devs[] = {
+static const struct mfd_cell lp3974_devs[] = {
{
.name = "lp3974-pmic",
}, {
@@ -175,7 +175,7 @@ static inline int max8998_i2c_get_driver_data(struct i2c_client *i2c,
if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(max8998_dt_match, i2c->dev.of_node);
- return (int)match->data;
+ return (int)(long)match->data;
}
return (int)id->driver_data;
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index dbbf8ee3f592..06e64b6fcb89 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -158,9 +158,6 @@ int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val)
{
int ret;
- if (offset > MC13XXX_NUMREGS)
- return -EINVAL;
-
ret = regmap_read(mc13xxx->regmap, offset, val);
dev_vdbg(mc13xxx->dev, "[0x%02x] -> 0x%06x\n", offset, *val);
@@ -172,7 +169,7 @@ int mc13xxx_reg_write(struct mc13xxx *mc13xxx, unsigned int offset, u32 val)
{
dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x\n", offset, val);
- if (offset > MC13XXX_NUMREGS || val > 0xffffff)
+ if (val >= BIT(24))
return -EINVAL;
return regmap_write(mc13xxx->regmap, offset, val);
@@ -639,42 +636,36 @@ static inline int mc13xxx_probe_flags_dt(struct mc13xxx *mc13xxx)
}
#endif
-int mc13xxx_common_init(struct mc13xxx *mc13xxx,
- struct mc13xxx_platform_data *pdata, int irq)
+int mc13xxx_common_init(struct device *dev)
{
+ struct mc13xxx_platform_data *pdata = dev_get_platdata(dev);
+ struct mc13xxx *mc13xxx = dev_get_drvdata(dev);
int ret;
u32 revision;
- mc13xxx_lock(mc13xxx);
+ mc13xxx->dev = dev;
ret = mc13xxx_reg_read(mc13xxx, MC13XXX_REVISION, &revision);
if (ret)
- goto err_revision;
+ return ret;
mc13xxx->variant->print_revision(mc13xxx, revision);
/* mask all irqs */
ret = mc13xxx_reg_write(mc13xxx, MC13XXX_IRQMASK0, 0x00ffffff);
if (ret)
- goto err_mask;
+ return ret;
ret = mc13xxx_reg_write(mc13xxx, MC13XXX_IRQMASK1, 0x00ffffff);
if (ret)
- goto err_mask;
+ return ret;
- ret = request_threaded_irq(irq, NULL, mc13xxx_irq_thread,
+ ret = request_threaded_irq(mc13xxx->irq, NULL, mc13xxx_irq_thread,
IRQF_ONESHOT | IRQF_TRIGGER_HIGH, "mc13xxx", mc13xxx);
-
- if (ret) {
-err_mask:
-err_revision:
- mc13xxx_unlock(mc13xxx);
+ if (ret)
return ret;
- }
- mc13xxx->irq = irq;
-
- mc13xxx_unlock(mc13xxx);
+ mutex_init(&mc13xxx->lock);
if (mc13xxx_probe_flags_dt(mc13xxx) < 0 && pdata)
mc13xxx->flags = pdata->flags;
@@ -710,13 +701,17 @@ err_revision:
}
EXPORT_SYMBOL_GPL(mc13xxx_common_init);
-void mc13xxx_common_cleanup(struct mc13xxx *mc13xxx)
+int mc13xxx_common_exit(struct device *dev)
{
+ struct mc13xxx *mc13xxx = dev_get_drvdata(dev);
+
free_irq(mc13xxx->irq, mc13xxx);
+ mfd_remove_devices(dev);
+ mutex_destroy(&mc13xxx->lock);
- mfd_remove_devices(mc13xxx->dev);
+ return 0;
}
-EXPORT_SYMBOL_GPL(mc13xxx_common_cleanup);
+EXPORT_SYMBOL_GPL(mc13xxx_common_exit);
MODULE_DESCRIPTION("Core driver for Freescale MC13XXX PMIC");
MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index 898bd335cd8e..ae3addb153a2 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -10,7 +10,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/mutex.h>
#include <linux/mfd/core.h>
#include <linux/mfd/mc13xxx.h>
#include <linux/of.h>
@@ -60,7 +59,6 @@ static int mc13xxx_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct mc13xxx *mc13xxx;
- struct mc13xxx_platform_data *pdata = dev_get_platdata(&client->dev);
int ret;
mc13xxx = devm_kzalloc(&client->dev, sizeof(*mc13xxx), GFP_KERNEL);
@@ -69,15 +67,13 @@ static int mc13xxx_i2c_probe(struct i2c_client *client,
dev_set_drvdata(&client->dev, mc13xxx);
- mc13xxx->dev = &client->dev;
- mutex_init(&mc13xxx->lock);
+ mc13xxx->irq = client->irq;
mc13xxx->regmap = devm_regmap_init_i2c(client,
&mc13xxx_regmap_i2c_config);
if (IS_ERR(mc13xxx->regmap)) {
ret = PTR_ERR(mc13xxx->regmap);
- dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
- ret);
+ dev_err(&client->dev, "Failed to initialize regmap: %d\n", ret);
return ret;
}
@@ -89,18 +85,12 @@ static int mc13xxx_i2c_probe(struct i2c_client *client,
mc13xxx->variant = (void *)id->driver_data;
}
- ret = mc13xxx_common_init(mc13xxx, pdata, client->irq);
-
- return ret;
+ return mc13xxx_common_init(&client->dev);
}
static int mc13xxx_i2c_remove(struct i2c_client *client)
{
- struct mc13xxx *mc13xxx = dev_get_drvdata(&client->dev);
-
- mc13xxx_common_cleanup(mc13xxx);
-
- return 0;
+ return mc13xxx_common_exit(&client->dev);
}
static struct i2c_driver mc13xxx_i2c_driver = {
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index 5f14ef6693c2..38ab67829791 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -13,7 +13,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/mfd/core.h>
#include <linux/mfd/mc13xxx.h>
@@ -129,27 +128,24 @@ static struct regmap_bus regmap_mc13xxx_bus = {
static int mc13xxx_spi_probe(struct spi_device *spi)
{
struct mc13xxx *mc13xxx;
- struct mc13xxx_platform_data *pdata = dev_get_platdata(&spi->dev);
int ret;
mc13xxx = devm_kzalloc(&spi->dev, sizeof(*mc13xxx), GFP_KERNEL);
if (!mc13xxx)
return -ENOMEM;
- spi_set_drvdata(spi, mc13xxx);
+ dev_set_drvdata(&spi->dev, mc13xxx);
+
spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
- mc13xxx->dev = &spi->dev;
- mutex_init(&mc13xxx->lock);
+ mc13xxx->irq = spi->irq;
mc13xxx->regmap = devm_regmap_init(&spi->dev, &regmap_mc13xxx_bus,
&spi->dev,
&mc13xxx_regmap_spi_config);
if (IS_ERR(mc13xxx->regmap)) {
ret = PTR_ERR(mc13xxx->regmap);
- dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
- ret);
- spi_set_drvdata(spi, NULL);
+ dev_err(&spi->dev, "Failed to initialize regmap: %d\n", ret);
return ret;
}
@@ -164,16 +160,12 @@ static int mc13xxx_spi_probe(struct spi_device *spi)
mc13xxx->variant = (void *)id_entry->driver_data;
}
- return mc13xxx_common_init(mc13xxx, pdata, spi->irq);
+ return mc13xxx_common_init(&spi->dev);
}
static int mc13xxx_spi_remove(struct spi_device *spi)
{
- struct mc13xxx *mc13xxx = spi_get_drvdata(spi);
-
- mc13xxx_common_cleanup(mc13xxx);
-
- return 0;
+ return mc13xxx_common_exit(&spi->dev);
}
static struct spi_driver mc13xxx_spi_driver = {
diff --git a/drivers/mfd/mc13xxx.h b/drivers/mfd/mc13xxx.h
index 460ec5c7b18c..ae7f1659f5d1 100644
--- a/drivers/mfd/mc13xxx.h
+++ b/drivers/mfd/mc13xxx.h
@@ -43,9 +43,7 @@ struct mc13xxx {
int adcflags;
};
-int mc13xxx_common_init(struct mc13xxx *mc13xxx,
- struct mc13xxx_platform_data *pdata, int irq);
-
-void mc13xxx_common_cleanup(struct mc13xxx *mc13xxx);
+int mc13xxx_common_init(struct device *dev);
+int mc13xxx_common_exit(struct device *dev);
#endif /* __DRIVERS_MFD_MC13XXX_H */
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 142650fdc058..90b630ccc8bc 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -121,22 +121,22 @@ static u64 usbhs_dmamask = DMA_BIT_MASK(32);
static inline void usbhs_write(void __iomem *base, u32 reg, u32 val)
{
- __raw_writel(val, base + reg);
+ writel_relaxed(val, base + reg);
}
static inline u32 usbhs_read(void __iomem *base, u32 reg)
{
- return __raw_readl(base + reg);
+ return readl_relaxed(base + reg);
}
static inline void usbhs_writeb(void __iomem *base, u8 reg, u8 val)
{
- __raw_writeb(val, base + reg);
+ writeb_relaxed(val, base + reg);
}
static inline u8 usbhs_readb(void __iomem *base, u8 reg)
{
- return __raw_readb(base + reg);
+ return readb_relaxed(base + reg);
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 0d946ae14453..5ee50f779ef6 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -121,22 +121,22 @@ static DEFINE_SPINLOCK(tll_lock); /* serialize access to tll_dev */
static inline void usbtll_write(void __iomem *base, u32 reg, u32 val)
{
- __raw_writel(val, base + reg);
+ writel_relaxed(val, base + reg);
}
static inline u32 usbtll_read(void __iomem *base, u32 reg)
{
- return __raw_readl(base + reg);
+ return readl_relaxed(base + reg);
}
static inline void usbtll_writeb(void __iomem *base, u8 reg, u8 val)
{
- __raw_writeb(val, base + reg);
+ writeb_relaxed(val, base + reg);
}
static inline u8 usbtll_readb(void __iomem *base, u8 reg)
{
- return __raw_readb(base + reg);
+ return readb_relaxed(base + reg);
}
/*-------------------------------------------------------------------------*/
@@ -333,21 +333,17 @@ int omap_tll_init(struct usbhs_omap_platform_data *pdata)
unsigned reg;
struct usbtll_omap *tll;
- spin_lock(&tll_lock);
-
- if (!tll_dev) {
- spin_unlock(&tll_lock);
+ if (!tll_dev)
return -ENODEV;
- }
- tll = dev_get_drvdata(tll_dev);
+ pm_runtime_get_sync(tll_dev);
+ spin_lock(&tll_lock);
+ tll = dev_get_drvdata(tll_dev);
needs_tll = false;
for (i = 0; i < tll->nch; i++)
needs_tll |= omap_usb_mode_needs_tll(pdata->port_mode[i]);
- pm_runtime_get_sync(tll_dev);
-
if (needs_tll) {
void __iomem *base = tll->base;
@@ -398,9 +394,8 @@ int omap_tll_init(struct usbhs_omap_platform_data *pdata)
}
}
- pm_runtime_put_sync(tll_dev);
-
spin_unlock(&tll_lock);
+ pm_runtime_put_sync(tll_dev);
return 0;
}
@@ -411,17 +406,14 @@ int omap_tll_enable(struct usbhs_omap_platform_data *pdata)
int i;
struct usbtll_omap *tll;
- spin_lock(&tll_lock);
-
- if (!tll_dev) {
- spin_unlock(&tll_lock);
+ if (!tll_dev)
return -ENODEV;
- }
-
- tll = dev_get_drvdata(tll_dev);
pm_runtime_get_sync(tll_dev);
+ spin_lock(&tll_lock);
+ tll = dev_get_drvdata(tll_dev);
+
for (i = 0; i < tll->nch; i++) {
if (omap_usb_mode_needs_tll(pdata->port_mode[i])) {
int r;
@@ -448,13 +440,10 @@ int omap_tll_disable(struct usbhs_omap_platform_data *pdata)
int i;
struct usbtll_omap *tll;
- spin_lock(&tll_lock);
-
- if (!tll_dev) {
- spin_unlock(&tll_lock);
+ if (!tll_dev)
return -ENODEV;
- }
+ spin_lock(&tll_lock);
tll = dev_get_drvdata(tll_dev);
for (i = 0; i < tll->nch; i++) {
@@ -464,9 +453,8 @@ int omap_tll_disable(struct usbhs_omap_platform_data *pdata)
}
}
- pm_runtime_put_sync(tll_dev);
-
spin_unlock(&tll_lock);
+ pm_runtime_put_sync(tll_dev);
return 0;
}
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 6841d6805fd6..41ab5e34d2ac 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -245,7 +245,7 @@ static int pcf50633_probe(struct i2c_client *client,
for (i = 0; i < PCF50633_NUM_REGULATORS; i++) {
struct platform_device *pdev;
- pdev = platform_device_alloc("pcf50633-regltr", i);
+ pdev = platform_device_alloc("pcf50633-regulator", i);
if (!pdev) {
dev_err(pcf->dev, "Cannot create regulator %d\n", i);
continue;
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index 346330176afc..df276ad9f40b 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -74,7 +74,7 @@ static struct deepsleep_control_data deepsleep_data[] = {
#define EXT_PWR_REQ \
(RC5T583_EXT_PWRREQ1_CONTROL | RC5T583_EXT_PWRREQ2_CONTROL)
-static struct mfd_cell rc5t583_subdevs[] = {
+static const struct mfd_cell rc5t583_subdevs[] = {
{.name = "rc5t583-gpio",},
{.name = "rc5t583-regulator",},
{.name = "rc5t583-rtc", },
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
index 21b7bef73507..d346146249a2 100644
--- a/drivers/mfd/rdc321x-southbridge.c
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -56,7 +56,7 @@ static struct resource rdc321x_gpio_resources[] = {
}
};
-static struct mfd_cell rdc321x_sb_cells[] = {
+static const struct mfd_cell rdc321x_sb_cells[] = {
{
.name = "rdc321x-wdt",
.resources = rdc321x_wdt_resource,
@@ -96,7 +96,7 @@ static void rdc321x_sb_remove(struct pci_dev *pdev)
mfd_remove_devices(&pdev->dev);
}
-static DEFINE_PCI_DEVICE_TABLE(rdc321x_sb_table) = {
+static const struct pci_device_id rdc321x_sb_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030) },
{}
};
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
index a1830986eeb7..c8f345f7e9a2 100644
--- a/drivers/mfd/retu-mfd.c
+++ b/drivers/mfd/retu-mfd.c
@@ -55,7 +55,7 @@ static struct resource retu_pwrbutton_res[] = {
},
};
-static struct mfd_cell retu_devs[] = {
+static const struct mfd_cell retu_devs[] = {
{
.name = "retu-wdt"
},
@@ -94,7 +94,7 @@ static struct resource tahvo_usb_res[] = {
},
};
-static struct mfd_cell tahvo_devs[] = {
+static const struct mfd_cell tahvo_devs[] = {
{
.name = "tahvo-usb",
.resources = tahvo_usb_res,
@@ -122,7 +122,7 @@ static const struct retu_data {
char *chip_name;
char *companion_name;
struct regmap_irq_chip *irq_chip;
- struct mfd_cell *children;
+ const struct mfd_cell *children;
int nchildren;
} retu_data[] = {
[0] = {
diff --git a/drivers/mfd/rtl8411.c b/drivers/mfd/rtl8411.c
index 52801351864d..fdd34c883d86 100644
--- a/drivers/mfd/rtl8411.c
+++ b/drivers/mfd/rtl8411.c
@@ -49,8 +49,8 @@ static int rtl8411b_is_qfn48(struct rtsx_pcr *pcr)
static void rtl8411_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
- u32 reg1;
- u8 reg3;
+ u32 reg1 = 0;
+ u8 reg3 = 0;
rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg1);
dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg1);
@@ -71,7 +71,7 @@ static void rtl8411_fetch_vendor_settings(struct rtsx_pcr *pcr)
static void rtl8411b_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
- u32 reg;
+ u32 reg = 0;
rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
@@ -191,24 +191,25 @@ static int rtl8411_card_power_off(struct rtsx_pcr *pcr, int card)
BPP_LDO_POWB, BPP_LDO_SUSPEND);
}
-static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+static int rtl8411_do_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage,
+ int bpp_tuned18_shift, int bpp_asic_1v8)
{
u8 mask, val;
int err;
- mask = (BPP_REG_TUNED18 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_MASK;
+ mask = (BPP_REG_TUNED18 << bpp_tuned18_shift) | BPP_PAD_MASK;
if (voltage == OUTPUT_3V3) {
err = rtsx_pci_write_register(pcr,
SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3);
if (err < 0)
return err;
- val = (BPP_ASIC_3V3 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_3V3;
+ val = (BPP_ASIC_3V3 << bpp_tuned18_shift) | BPP_PAD_3V3;
} else if (voltage == OUTPUT_1V8) {
err = rtsx_pci_write_register(pcr,
SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8);
if (err < 0)
return err;
- val = (BPP_ASIC_1V8 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_1V8;
+ val = (bpp_asic_1v8 << bpp_tuned18_shift) | BPP_PAD_1V8;
} else {
return -EINVAL;
}
@@ -216,6 +217,18 @@ static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
return rtsx_pci_write_register(pcr, LDO_CTL, mask, val);
}
+static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ return rtl8411_do_switch_output_voltage(pcr, voltage,
+ BPP_TUNED18_SHIFT_8411, BPP_ASIC_1V8);
+}
+
+static int rtl8402_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ return rtl8411_do_switch_output_voltage(pcr, voltage,
+ BPP_TUNED18_SHIFT_8402, BPP_ASIC_2V0);
+}
+
static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr)
{
unsigned int card_exist;
@@ -295,6 +308,22 @@ static const struct pcr_ops rtl8411_pcr_ops = {
.force_power_down = rtl8411_force_power_down,
};
+static const struct pcr_ops rtl8402_pcr_ops = {
+ .fetch_vendor_settings = rtl8411_fetch_vendor_settings,
+ .extra_init_hw = rtl8411_extra_init_hw,
+ .optimize_phy = NULL,
+ .turn_on_led = rtl8411_turn_on_led,
+ .turn_off_led = rtl8411_turn_off_led,
+ .enable_auto_blink = rtl8411_enable_auto_blink,
+ .disable_auto_blink = rtl8411_disable_auto_blink,
+ .card_power_on = rtl8411_card_power_on,
+ .card_power_off = rtl8411_card_power_off,
+ .switch_output_voltage = rtl8402_switch_output_voltage,
+ .cd_deglitch = rtl8411_cd_deglitch,
+ .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
+ .force_power_down = rtl8411_force_power_down,
+};
+
static const struct pcr_ops rtl8411b_pcr_ops = {
.fetch_vendor_settings = rtl8411b_fetch_vendor_settings,
.extra_init_hw = rtl8411b_extra_init_hw,
@@ -441,12 +470,10 @@ static const u32 rtl8411b_qfn48_ms_pull_ctl_disable_tbl[] = {
0,
};
-void rtl8411_init_params(struct rtsx_pcr *pcr)
+static void rtl8411_init_common_params(struct rtsx_pcr *pcr)
{
pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
pcr->num_slots = 2;
- pcr->ops = &rtl8411_pcr_ops;
-
pcr->flags = 0;
pcr->card_drive_sel = RTL8411_CARD_DRIVE_DEFAULT;
pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
@@ -454,47 +481,29 @@ void rtl8411_init_params(struct rtsx_pcr *pcr)
pcr->aspm_en = ASPM_L1_EN;
pcr->tx_initial_phase = SET_CLOCK_PHASE(23, 7, 14);
pcr->rx_initial_phase = SET_CLOCK_PHASE(4, 3, 10);
-
pcr->ic_version = rtl8411_get_ic_version(pcr);
- pcr->sd_pull_ctl_enable_tbl = rtl8411_sd_pull_ctl_enable_tbl;
- pcr->sd_pull_ctl_disable_tbl = rtl8411_sd_pull_ctl_disable_tbl;
- pcr->ms_pull_ctl_enable_tbl = rtl8411_ms_pull_ctl_enable_tbl;
- pcr->ms_pull_ctl_disable_tbl = rtl8411_ms_pull_ctl_disable_tbl;
+}
+
+void rtl8411_init_params(struct rtsx_pcr *pcr)
+{
+ rtl8411_init_common_params(pcr);
+ pcr->ops = &rtl8411_pcr_ops;
+ set_pull_ctrl_tables(pcr, rtl8411);
}
void rtl8411b_init_params(struct rtsx_pcr *pcr)
{
- pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
- pcr->num_slots = 2;
+ rtl8411_init_common_params(pcr);
pcr->ops = &rtl8411b_pcr_ops;
+ if (rtl8411b_is_qfn48(pcr))
+ set_pull_ctrl_tables(pcr, rtl8411b_qfn48);
+ else
+ set_pull_ctrl_tables(pcr, rtl8411b_qfn64);
+}
- pcr->flags = 0;
- pcr->card_drive_sel = RTL8411_CARD_DRIVE_DEFAULT;
- pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
- pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
- pcr->aspm_en = ASPM_L1_EN;
- pcr->tx_initial_phase = SET_CLOCK_PHASE(23, 7, 14);
- pcr->rx_initial_phase = SET_CLOCK_PHASE(4, 3, 10);
-
- pcr->ic_version = rtl8411_get_ic_version(pcr);
-
- if (rtl8411b_is_qfn48(pcr)) {
- pcr->sd_pull_ctl_enable_tbl =
- rtl8411b_qfn48_sd_pull_ctl_enable_tbl;
- pcr->sd_pull_ctl_disable_tbl =
- rtl8411b_qfn48_sd_pull_ctl_disable_tbl;
- pcr->ms_pull_ctl_enable_tbl =
- rtl8411b_qfn48_ms_pull_ctl_enable_tbl;
- pcr->ms_pull_ctl_disable_tbl =
- rtl8411b_qfn48_ms_pull_ctl_disable_tbl;
- } else {
- pcr->sd_pull_ctl_enable_tbl =
- rtl8411b_qfn64_sd_pull_ctl_enable_tbl;
- pcr->sd_pull_ctl_disable_tbl =
- rtl8411b_qfn64_sd_pull_ctl_disable_tbl;
- pcr->ms_pull_ctl_enable_tbl =
- rtl8411b_qfn64_ms_pull_ctl_enable_tbl;
- pcr->ms_pull_ctl_disable_tbl =
- rtl8411b_qfn64_ms_pull_ctl_disable_tbl;
- }
+void rtl8402_init_params(struct rtsx_pcr *pcr)
+{
+ rtl8411_init_common_params(pcr);
+ pcr->ops = &rtl8402_pcr_ops;
+ set_pull_ctrl_tables(pcr, rtl8411);
}
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 11e20afbdcac..1d15735f9ef9 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -50,13 +50,14 @@ static struct mfd_cell rtsx_pcr_cells[] = {
},
};
-static DEFINE_PCI_DEVICE_TABLE(rtsx_pci_ids) = {
+static const struct pci_device_id rtsx_pci_ids[] = {
{ PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
@@ -1061,6 +1062,10 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
case 0x5287:
rtl8411b_init_params(pcr);
break;
+
+ case 0x5286:
+ rtl8402_init_params(pcr);
+ break;
}
dev_dbg(&(pcr->pci->dev), "PID: 0x%04x, IC version: 0x%02x\n",
@@ -1228,8 +1233,14 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
pcr->remove_pci = true;
- cancel_delayed_work(&pcr->carddet_work);
- cancel_delayed_work(&pcr->idle_work);
+ /* Disable interrupts at the pcr level */
+ spin_lock_irq(&pcr->lock);
+ rtsx_pci_writel(pcr, RTSX_BIER, 0);
+ pcr->bier = 0;
+ spin_unlock_irq(&pcr->lock);
+
+ cancel_delayed_work_sync(&pcr->carddet_work);
+ cancel_delayed_work_sync(&pcr->idle_work);
mfd_remove_devices(&pcidev->dev);
diff --git a/drivers/mfd/rtsx_pcr.h b/drivers/mfd/rtsx_pcr.h
index 947e79b05ceb..07e4c2ebf05a 100644
--- a/drivers/mfd/rtsx_pcr.h
+++ b/drivers/mfd/rtsx_pcr.h
@@ -30,6 +30,7 @@
void rts5209_init_params(struct rtsx_pcr *pcr);
void rts5229_init_params(struct rtsx_pcr *pcr);
void rtl8411_init_params(struct rtsx_pcr *pcr);
+void rtl8402_init_params(struct rtsx_pcr *pcr);
void rts5227_init_params(struct rtsx_pcr *pcr);
void rts5249_init_params(struct rtsx_pcr *pcr);
void rtl8411b_init_params(struct rtsx_pcr *pcr);
@@ -63,4 +64,12 @@ static inline u8 map_sd_drive(int idx)
#define rtl8411_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x07)
#define rtl8411b_reg_to_sd30_drive_sel_3v3(reg) ((reg) & 0x03)
+#define set_pull_ctrl_tables(pcr, __device) \
+do { \
+ pcr->sd_pull_ctl_enable_tbl = __device##_sd_pull_ctl_enable_tbl; \
+ pcr->sd_pull_ctl_disable_tbl = __device##_sd_pull_ctl_disable_tbl; \
+ pcr->ms_pull_ctl_enable_tbl = __device##_ms_pull_ctl_enable_tbl; \
+ pcr->ms_pull_ctl_disable_tbl = __device##_ms_pull_ctl_disable_tbl; \
+} while (0)
+
#endif
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 54cc25546592..a139798b8065 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -31,7 +31,7 @@
#include <linux/mfd/samsung/s5m8767.h>
#include <linux/regmap.h>
-static struct mfd_cell s5m8751_devs[] = {
+static const struct mfd_cell s5m8751_devs[] = {
{
.name = "s5m8751-pmic",
}, {
@@ -41,7 +41,7 @@ static struct mfd_cell s5m8751_devs[] = {
},
};
-static struct mfd_cell s5m8763_devs[] = {
+static const struct mfd_cell s5m8763_devs[] = {
{
.name = "s5m8763-pmic",
}, {
@@ -51,15 +51,17 @@ static struct mfd_cell s5m8763_devs[] = {
},
};
-static struct mfd_cell s5m8767_devs[] = {
+static const struct mfd_cell s5m8767_devs[] = {
{
.name = "s5m8767-pmic",
}, {
.name = "s5m-rtc",
- },
+ }, {
+ .name = "s5m8767-clk",
+ }
};
-static struct mfd_cell s2mps11_devs[] = {
+static const struct mfd_cell s2mps11_devs[] = {
{
.name = "s2mps11-pmic",
}, {
@@ -79,36 +81,6 @@ static struct of_device_id sec_dt_match[] = {
};
#endif
-int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest)
-{
- return regmap_read(sec_pmic->regmap_pmic, reg, dest);
-}
-EXPORT_SYMBOL_GPL(sec_reg_read);
-
-int sec_bulk_read(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf)
-{
- return regmap_bulk_read(sec_pmic->regmap_pmic, reg, buf, count);
-}
-EXPORT_SYMBOL_GPL(sec_bulk_read);
-
-int sec_reg_write(struct sec_pmic_dev *sec_pmic, u8 reg, u8 value)
-{
- return regmap_write(sec_pmic->regmap_pmic, reg, value);
-}
-EXPORT_SYMBOL_GPL(sec_reg_write);
-
-int sec_bulk_write(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf)
-{
- return regmap_raw_write(sec_pmic->regmap_pmic, reg, buf, count);
-}
-EXPORT_SYMBOL_GPL(sec_bulk_write);
-
-int sec_reg_update(struct sec_pmic_dev *sec_pmic, u8 reg, u8 val, u8 mask)
-{
- return regmap_update_bits(sec_pmic->regmap_pmic, reg, mask, val);
-}
-EXPORT_SYMBOL_GPL(sec_reg_update);
-
static bool s2mps11_volatile(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -134,12 +106,12 @@ static bool s5m8763_volatile(struct device *dev, unsigned int reg)
}
}
-static struct regmap_config sec_regmap_config = {
+static const struct regmap_config sec_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
-static struct regmap_config s2mps11_regmap_config = {
+static const struct regmap_config s2mps11_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -148,7 +120,7 @@ static struct regmap_config s2mps11_regmap_config = {
.cache_type = REGCACHE_FLAT,
};
-static struct regmap_config s5m8763_regmap_config = {
+static const struct regmap_config s5m8763_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -157,7 +129,7 @@ static struct regmap_config s5m8763_regmap_config = {
.cache_type = REGCACHE_FLAT,
};
-static struct regmap_config s5m8767_regmap_config = {
+static const struct regmap_config s5m8767_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -204,7 +176,7 @@ static struct sec_platform_data *sec_pmic_i2c_parse_dt_pdata(
static struct sec_platform_data *sec_pmic_i2c_parse_dt_pdata(
struct device *dev)
{
- return 0;
+ return NULL;
}
#endif
@@ -323,6 +295,8 @@ static int sec_pmic_probe(struct i2c_client *i2c,
if (ret)
goto err;
+ device_init_wakeup(sec_pmic->dev, sec_pmic->wakeup);
+
return ret;
err:
@@ -341,6 +315,43 @@ static int sec_pmic_remove(struct i2c_client *i2c)
return 0;
}
+static int sec_pmic_suspend(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct sec_pmic_dev *sec_pmic = i2c_get_clientdata(i2c);
+
+ if (device_may_wakeup(dev)) {
+ enable_irq_wake(sec_pmic->irq);
+ /*
+ * PMIC IRQ must be disabled during suspend for RTC alarm
+ * to work properly.
+ * When device is woken up from suspend by RTC Alarm, an
+ * interrupt occurs before resuming I2C bus controller.
+ * The interrupt is handled by regmap_irq_thread which tries
+ * to read RTC registers. This read fails (I2C is still
+ * suspended) and RTC Alarm interrupt is disabled.
+ */
+ disable_irq(sec_pmic->irq);
+ }
+
+ return 0;
+}
+
+static int sec_pmic_resume(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct sec_pmic_dev *sec_pmic = i2c_get_clientdata(i2c);
+
+ if (device_may_wakeup(dev)) {
+ disable_irq_wake(sec_pmic->irq);
+ enable_irq(sec_pmic->irq);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(sec_pmic_pm_ops, sec_pmic_suspend, sec_pmic_resume);
+
static const struct i2c_device_id sec_pmic_id[] = {
{ "sec_pmic", 0 },
{ }
@@ -351,6 +362,7 @@ static struct i2c_driver sec_pmic_driver = {
.driver = {
.name = "sec_pmic",
.owner = THIS_MODULE,
+ .pm = &sec_pmic_pm_ops,
.of_match_table = of_match_ptr(sec_dt_match),
},
.probe = sec_pmic_probe,
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index b441b1be27cb..4de494f51d40 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -22,7 +22,7 @@
#include <linux/mfd/samsung/s5m8763.h>
#include <linux/mfd/samsung/s5m8767.h>
-static struct regmap_irq s2mps11_irqs[] = {
+static const struct regmap_irq s2mps11_irqs[] = {
[S2MPS11_IRQ_PWRONF] = {
.reg_offset = 0,
.mask = S2MPS11_IRQ_PWRONF_MASK,
@@ -90,7 +90,7 @@ static struct regmap_irq s2mps11_irqs[] = {
};
-static struct regmap_irq s5m8767_irqs[] = {
+static const struct regmap_irq s5m8767_irqs[] = {
[S5M8767_IRQ_PWRR] = {
.reg_offset = 0,
.mask = S5M8767_IRQ_PWRR_MASK,
@@ -161,7 +161,7 @@ static struct regmap_irq s5m8767_irqs[] = {
},
};
-static struct regmap_irq s5m8763_irqs[] = {
+static const struct regmap_irq s5m8763_irqs[] = {
[S5M8763_IRQ_DCINF] = {
.reg_offset = 0,
.mask = S5M8763_IRQ_DCINF_MASK,
@@ -236,7 +236,7 @@ static struct regmap_irq s5m8763_irqs[] = {
},
};
-static struct regmap_irq_chip s2mps11_irq_chip = {
+static const struct regmap_irq_chip s2mps11_irq_chip = {
.name = "s2mps11",
.irqs = s2mps11_irqs,
.num_irqs = ARRAY_SIZE(s2mps11_irqs),
@@ -246,7 +246,7 @@ static struct regmap_irq_chip s2mps11_irq_chip = {
.ack_base = S2MPS11_REG_INT1,
};
-static struct regmap_irq_chip s5m8767_irq_chip = {
+static const struct regmap_irq_chip s5m8767_irq_chip = {
.name = "s5m8767",
.irqs = s5m8767_irqs,
.num_irqs = ARRAY_SIZE(s5m8767_irqs),
@@ -256,7 +256,7 @@ static struct regmap_irq_chip s5m8767_irq_chip = {
.ack_base = S5M8767_REG_INT1,
};
-static struct regmap_irq_chip s5m8763_irq_chip = {
+static const struct regmap_irq_chip s5m8763_irq_chip = {
.name = "s5m8763",
.irqs = s5m8763_irqs,
.num_irqs = ARRAY_SIZE(s5m8763_irqs),
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index c2c8c91c6c7b..e7dc441a8f8a 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1710,7 +1710,7 @@ static int sm501_plat_remove(struct platform_device *dev)
return 0;
}
-static DEFINE_PCI_DEVICE_TABLE(sm501_pci_tbl) = {
+static const struct pci_device_id sm501_pci_tbl[] = {
{ 0x126f, 0x0501, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, },
};
diff --git a/drivers/mfd/ssbi.c b/drivers/mfd/ssbi.c
index 102a22844297..b78942ed4c67 100644
--- a/drivers/mfd/ssbi.c
+++ b/drivers/mfd/ssbi.c
@@ -65,13 +65,19 @@
#define SSBI_TIMEOUT_US 100
+enum ssbi_controller_type {
+ MSM_SBI_CTRL_SSBI = 0,
+ MSM_SBI_CTRL_SSBI2,
+ MSM_SBI_CTRL_PMIC_ARBITER,
+};
+
struct ssbi {
struct device *slave;
void __iomem *base;
spinlock_t lock;
enum ssbi_controller_type controller_type;
int (*read)(struct ssbi *, u16 addr, u8 *buf, int len);
- int (*write)(struct ssbi *, u16 addr, u8 *buf, int len);
+ int (*write)(struct ssbi *, u16 addr, const u8 *buf, int len);
};
#define to_ssbi(dev) platform_get_drvdata(to_platform_device(dev))
@@ -140,7 +146,7 @@ err:
}
static int
-ssbi_write_bytes(struct ssbi *ssbi, u16 addr, u8 *buf, int len)
+ssbi_write_bytes(struct ssbi *ssbi, u16 addr, const u8 *buf, int len)
{
int ret = 0;
@@ -217,7 +223,7 @@ err:
}
static int
-ssbi_pa_write_bytes(struct ssbi *ssbi, u16 addr, u8 *buf, int len)
+ssbi_pa_write_bytes(struct ssbi *ssbi, u16 addr, const u8 *buf, int len)
{
u32 cmd;
int ret = 0;
@@ -249,7 +255,7 @@ int ssbi_read(struct device *dev, u16 addr, u8 *buf, int len)
}
EXPORT_SYMBOL_GPL(ssbi_read);
-int ssbi_write(struct device *dev, u16 addr, u8 *buf, int len)
+int ssbi_write(struct device *dev, u16 addr, const u8 *buf, int len)
{
struct ssbi *ssbi = to_ssbi(dev);
unsigned long flags;
@@ -311,7 +317,7 @@ static int ssbi_probe(struct platform_device *pdev)
return of_platform_populate(np, NULL, NULL, &pdev->dev);
}
-static struct of_device_id ssbi_match_table[] = {
+static const struct of_device_id ssbi_match_table[] = {
{ .compatible = "qcom,ssbi" },
{}
};
diff --git a/drivers/mfd/sta2x11-mfd.c b/drivers/mfd/sta2x11-mfd.c
index 65c6fa671acb..5b72db07d9de 100644
--- a/drivers/mfd/sta2x11-mfd.c
+++ b/drivers/mfd/sta2x11-mfd.c
@@ -339,7 +339,7 @@ static int sta2x11_mfd_platform_probe(struct platform_device *dev,
regmap_config->cache_type = REGCACHE_NONE;
mfd->regmap[index] = devm_regmap_init_mmio(&dev->dev, mfd->regs[index],
regmap_config);
- WARN_ON(!mfd->regmap[index]);
+ WARN_ON(IS_ERR(mfd->regmap[index]));
return 0;
}
@@ -529,7 +529,7 @@ static int sta2x11_mfd_resume(struct pci_dev *pdev)
{
int err;
- pci_set_power_state(pdev, 0);
+ pci_set_power_state(pdev, PCI_D0);
err = pci_enable_device(pdev);
if (err)
return err;
@@ -642,7 +642,7 @@ err_disable:
return err;
}
-static DEFINE_PCI_DEVICE_TABLE(sta2x11_mfd_tbl) = {
+static const struct pci_device_id sta2x11_mfd_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_GPIO)},
{PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_VIC)},
{0,},
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index fff63a41862c..42ccd0544513 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -297,14 +297,14 @@ static struct resource stmpe_gpio_resources[] = {
},
};
-static struct mfd_cell stmpe_gpio_cell = {
+static const struct mfd_cell stmpe_gpio_cell = {
.name = "stmpe-gpio",
.of_compatible = "st,stmpe-gpio",
.resources = stmpe_gpio_resources,
.num_resources = ARRAY_SIZE(stmpe_gpio_resources),
};
-static struct mfd_cell stmpe_gpio_cell_noirq = {
+static const struct mfd_cell stmpe_gpio_cell_noirq = {
.name = "stmpe-gpio",
.of_compatible = "st,stmpe-gpio",
/* gpio cell resources consist of an irq only so no resources here */
@@ -325,7 +325,7 @@ static struct resource stmpe_keypad_resources[] = {
},
};
-static struct mfd_cell stmpe_keypad_cell = {
+static const struct mfd_cell stmpe_keypad_cell = {
.name = "stmpe-keypad",
.of_compatible = "st,stmpe-keypad",
.resources = stmpe_keypad_resources,
@@ -409,7 +409,7 @@ static struct resource stmpe_ts_resources[] = {
},
};
-static struct mfd_cell stmpe_ts_cell = {
+static const struct mfd_cell stmpe_ts_cell = {
.name = "stmpe-ts",
.of_compatible = "st,stmpe-ts",
.resources = stmpe_ts_resources,
@@ -1064,7 +1064,7 @@ static int stmpe_chip_init(struct stmpe *stmpe)
return stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_ICR_LSB], icr);
}
-static int stmpe_add_device(struct stmpe *stmpe, struct mfd_cell *cell)
+static int stmpe_add_device(struct stmpe *stmpe, const struct mfd_cell *cell)
{
return mfd_add_devices(stmpe->dev, stmpe->pdata->id, cell, 1,
NULL, stmpe->irq_base, stmpe->domain);
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
index ff2b09ba8797..6639f1b0fef5 100644
--- a/drivers/mfd/stmpe.h
+++ b/drivers/mfd/stmpe.h
@@ -38,7 +38,7 @@ static inline void stmpe_dump_bytes(const char *str, const void *buf,
* enable and altfunc callbacks
*/
struct stmpe_variant_block {
- struct mfd_cell *cell;
+ const struct mfd_cell *cell;
int irq;
enum stmpe_block block;
};
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 87ea51dc6234..2cf636c267d9 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -155,7 +155,7 @@ static struct resource keypad_resources[] = {
},
};
-static struct mfd_cell tc3589x_dev_gpio[] = {
+static const struct mfd_cell tc3589x_dev_gpio[] = {
{
.name = "tc3589x-gpio",
.num_resources = ARRAY_SIZE(gpio_resources),
@@ -164,7 +164,7 @@ static struct mfd_cell tc3589x_dev_gpio[] = {
},
};
-static struct mfd_cell tc3589x_dev_keypad[] = {
+static const struct mfd_cell tc3589x_dev_keypad[] = {
{
.name = "tc3589x-keypad",
.num_resources = ARRAY_SIZE(keypad_resources),
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index acd0f3a41044..591a331d8d83 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -126,7 +126,7 @@ static struct tmio_mmc_data tc6387xb_mmc_data = {
/*--------------------------------------------------------------------------*/
-static struct mfd_cell tc6387xb_cells[] = {
+static const struct mfd_cell tc6387xb_cells[] = {
[TC6387XB_CELL_MMC] = {
.name = "tmio-mmc",
.enable = tc6387xb_mmc_enable,
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 88718abfb9ba..d4e860413bb5 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -24,6 +24,7 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/sched.h>
#include <linux/mfd/ti_am335x_tscadc.h>
@@ -48,32 +49,79 @@ static const struct regmap_config tscadc_regmap_config = {
.val_bits = 32,
};
-void am335x_tsc_se_update(struct ti_tscadc_dev *tsadc)
+void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tsadc, u32 val)
{
- tscadc_writel(tsadc, REG_SE, tsadc->reg_se_cache);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tsadc->reg_lock, flags);
+ tsadc->reg_se_cache = val;
+ if (tsadc->adc_waiting)
+ wake_up(&tsadc->reg_se_wait);
+ else if (!tsadc->adc_in_use)
+ tscadc_writel(tsadc, REG_SE, val);
+
+ spin_unlock_irqrestore(&tsadc->reg_lock, flags);
+}
+EXPORT_SYMBOL_GPL(am335x_tsc_se_set_cache);
+
+static void am335x_tscadc_need_adc(struct ti_tscadc_dev *tsadc)
+{
+ DEFINE_WAIT(wait);
+ u32 reg;
+
+ /*
+ * disable TSC steps so it does not run while the ADC is using it. If
+ * write 0 while it is running (it just started or was already running)
+ * then it completes all steps that were enabled and stops then.
+ */
+ tscadc_writel(tsadc, REG_SE, 0);
+ reg = tscadc_readl(tsadc, REG_ADCFSM);
+ if (reg & SEQ_STATUS) {
+ tsadc->adc_waiting = true;
+ prepare_to_wait(&tsadc->reg_se_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(&tsadc->reg_lock);
+
+ schedule();
+
+ spin_lock_irq(&tsadc->reg_lock);
+ finish_wait(&tsadc->reg_se_wait, &wait);
+
+ reg = tscadc_readl(tsadc, REG_ADCFSM);
+ WARN_ON(reg & SEQ_STATUS);
+ tsadc->adc_waiting = false;
+ }
+ tsadc->adc_in_use = true;
+}
+
+void am335x_tsc_se_set_once(struct ti_tscadc_dev *tsadc, u32 val)
+{
+ spin_lock_irq(&tsadc->reg_lock);
+ am335x_tscadc_need_adc(tsadc);
+
+ tscadc_writel(tsadc, REG_SE, val);
+ spin_unlock_irq(&tsadc->reg_lock);
}
-EXPORT_SYMBOL_GPL(am335x_tsc_se_update);
+EXPORT_SYMBOL_GPL(am335x_tsc_se_set_once);
-void am335x_tsc_se_set(struct ti_tscadc_dev *tsadc, u32 val)
+void am335x_tsc_se_adc_done(struct ti_tscadc_dev *tsadc)
{
unsigned long flags;
spin_lock_irqsave(&tsadc->reg_lock, flags);
- tsadc->reg_se_cache = tscadc_readl(tsadc, REG_SE);
- tsadc->reg_se_cache |= val;
- am335x_tsc_se_update(tsadc);
+ tsadc->adc_in_use = false;
+ tscadc_writel(tsadc, REG_SE, tsadc->reg_se_cache);
spin_unlock_irqrestore(&tsadc->reg_lock, flags);
}
-EXPORT_SYMBOL_GPL(am335x_tsc_se_set);
+EXPORT_SYMBOL_GPL(am335x_tsc_se_adc_done);
void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val)
{
unsigned long flags;
spin_lock_irqsave(&tsadc->reg_lock, flags);
- tsadc->reg_se_cache = tscadc_readl(tsadc, REG_SE);
tsadc->reg_se_cache &= ~val;
- am335x_tsc_se_update(tsadc);
+ tscadc_writel(tsadc, REG_SE, tsadc->reg_se_cache);
spin_unlock_irqrestore(&tsadc->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(am335x_tsc_se_clr);
@@ -181,6 +229,8 @@ static int ti_tscadc_probe(struct platform_device *pdev)
}
spin_lock_init(&tscadc->reg_lock);
+ init_waitqueue_head(&tscadc->reg_se_wait);
+
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
@@ -302,7 +352,6 @@ static int tscadc_resume(struct device *dev)
if (tscadc_dev->tsc_cell != -1)
tscadc_idle_config(tscadc_dev);
- am335x_tsc_se_update(tscadc_dev);
restore = tscadc_readl(tscadc_dev, REG_CTRL);
tscadc_writel(tscadc_dev, REG_CTRL,
(restore | CNTRLREG_TSCSSENB));
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index dbb34f94e5e3..2bc5cfb85204 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -374,7 +374,7 @@ static const struct resource timberdale_dma_resources[] = {
},
};
-static struct mfd_cell timberdale_cells_bar0_cfg0[] = {
+static const struct mfd_cell timberdale_cells_bar0_cfg0[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
@@ -431,7 +431,7 @@ static struct mfd_cell timberdale_cells_bar0_cfg0[] = {
},
};
-static struct mfd_cell timberdale_cells_bar0_cfg1[] = {
+static const struct mfd_cell timberdale_cells_bar0_cfg1[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
@@ -498,7 +498,7 @@ static struct mfd_cell timberdale_cells_bar0_cfg1[] = {
},
};
-static struct mfd_cell timberdale_cells_bar0_cfg2[] = {
+static const struct mfd_cell timberdale_cells_bar0_cfg2[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
@@ -548,7 +548,7 @@ static struct mfd_cell timberdale_cells_bar0_cfg2[] = {
},
};
-static struct mfd_cell timberdale_cells_bar0_cfg3[] = {
+static const struct mfd_cell timberdale_cells_bar0_cfg3[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
@@ -619,7 +619,7 @@ static const struct resource timberdale_sdhc_resources[] = {
},
};
-static struct mfd_cell timberdale_cells_bar1[] = {
+static const struct mfd_cell timberdale_cells_bar1[] = {
{
.name = "sdhci",
.num_resources = ARRAY_SIZE(timberdale_sdhc_resources),
@@ -627,7 +627,7 @@ static struct mfd_cell timberdale_cells_bar1[] = {
},
};
-static struct mfd_cell timberdale_cells_bar2[] = {
+static const struct mfd_cell timberdale_cells_bar2[] = {
{
.name = "sdhci",
.num_resources = ARRAY_SIZE(timberdale_sdhc_resources),
@@ -851,7 +851,7 @@ static void timb_remove(struct pci_dev *dev)
kfree(priv);
}
-static DEFINE_PCI_DEVICE_TABLE(timberdale_pci_tbl) = {
+static const struct pci_device_id timberdale_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TIMB, PCI_DEVICE_ID_TIMB) },
{ 0 }
};
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index a081b925d10b..3b27482a174f 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -24,7 +24,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/tps6507x.h>
-static struct mfd_cell tps6507x_devs[] = {
+static const struct mfd_cell tps6507x_devs[] = {
{
.name = "tps6507x-pmic",
},
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index e6f03a733879..ba1a25d758c1 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -64,7 +64,7 @@ static struct resource charger_resources[] = {
}
};
-static struct mfd_cell tps65090s[] = {
+static const struct mfd_cell tps65090s[] = {
{
.name = "tps65090-pmic",
},
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index b7be0b295575..966cf65c5c36 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -30,7 +30,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/tps65217.h>
-static struct mfd_cell tps65217s[] = {
+static const struct mfd_cell tps65217s[] = {
{
.name = "tps65217-pmic",
},
@@ -170,7 +170,7 @@ static int tps65217_probe(struct i2c_client *client,
"Failed to find matching dt id\n");
return -EINVAL;
}
- chip_id = (unsigned int)match->data;
+ chip_id = (unsigned int)(unsigned long)match->data;
status_off = of_property_read_bool(client->dev.of_node,
"ti,pmic-shutdown-controller");
}
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index ee61fd7c198d..bbd54414a75d 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -103,7 +103,7 @@ static struct resource tps6586x_rtc_resources[] = {
},
};
-static struct mfd_cell tps6586x_cell[] = {
+static const struct mfd_cell tps6586x_cell[] = {
{
.name = "tps6586x-gpio",
},
@@ -124,6 +124,7 @@ struct tps6586x {
struct device *dev;
struct i2c_client *client;
struct regmap *regmap;
+ int version;
int irq;
struct irq_chip irq_chip;
@@ -208,6 +209,14 @@ int tps6586x_irq_get_virq(struct device *dev, int irq)
}
EXPORT_SYMBOL_GPL(tps6586x_irq_get_virq);
+int tps6586x_get_version(struct device *dev)
+{
+ struct tps6586x *tps6586x = dev_get_drvdata(dev);
+
+ return tps6586x->version;
+}
+EXPORT_SYMBOL_GPL(tps6586x_get_version);
+
static int __remove_subdev(struct device *dev, void *unused)
{
platform_device_unregister(to_platform_device(dev));
@@ -472,12 +481,38 @@ static void tps6586x_power_off(void)
tps6586x_set_bits(tps6586x_dev, TPS6586X_SUPPLYENE, SLEEP_MODE_BIT);
}
+static void tps6586x_print_version(struct i2c_client *client, int version)
+{
+ const char *name;
+
+ switch (version) {
+ case TPS658621A:
+ name = "TPS658621A";
+ break;
+ case TPS658621CD:
+ name = "TPS658621C/D";
+ break;
+ case TPS658623:
+ name = "TPS658623";
+ break;
+ case TPS658643:
+ name = "TPS658643";
+ break;
+ default:
+ name = "TPS6586X";
+ break;
+ }
+
+ dev_info(&client->dev, "Found %s, VERSIONCRC is %02x\n", name, version);
+}
+
static int tps6586x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tps6586x_platform_data *pdata = dev_get_platdata(&client->dev);
struct tps6586x *tps6586x;
int ret;
+ int version;
if (!pdata && client->dev.of_node)
pdata = tps6586x_parse_dt(client);
@@ -487,19 +522,18 @@ static int tps6586x_i2c_probe(struct i2c_client *client,
return -ENOTSUPP;
}
- ret = i2c_smbus_read_byte_data(client, TPS6586X_VERSIONCRC);
- if (ret < 0) {
- dev_err(&client->dev, "Chip ID read failed: %d\n", ret);
+ version = i2c_smbus_read_byte_data(client, TPS6586X_VERSIONCRC);
+ if (version < 0) {
+ dev_err(&client->dev, "Chip ID read failed: %d\n", version);
return -EIO;
}
- dev_info(&client->dev, "VERSIONCRC is %02x\n", ret);
-
tps6586x = devm_kzalloc(&client->dev, sizeof(*tps6586x), GFP_KERNEL);
- if (tps6586x == NULL) {
- dev_err(&client->dev, "memory for tps6586x alloc failed\n");
+ if (!tps6586x)
return -ENOMEM;
- }
+
+ tps6586x->version = version;
+ tps6586x_print_version(client, tps6586x->version);
tps6586x->client = client;
tps6586x->dev = &client->dev;
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index c0f608e3ca9e..1f142d76cbbc 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -36,7 +36,7 @@ static struct resource rtc_resources[] = {
}
};
-static struct mfd_cell tps65910s[] = {
+static const struct mfd_cell tps65910s[] = {
{
.name = "tps65910-gpio",
},
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
index 925a044cbdf6..27a518e0eec6 100644
--- a/drivers/mfd/tps65912-core.c
+++ b/drivers/mfd/tps65912-core.c
@@ -21,7 +21,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/tps65912.h>
-static struct mfd_cell tps65912s[] = {
+static const struct mfd_cell tps65912s[] = {
{
.name = "tps65912-pmic",
},
diff --git a/drivers/mfd/tps80031.c b/drivers/mfd/tps80031.c
index f15ee6d5cfbf..ed6c5b0956e2 100644
--- a/drivers/mfd/tps80031.c
+++ b/drivers/mfd/tps80031.c
@@ -44,7 +44,7 @@ static struct resource tps80031_rtc_resources[] = {
};
/* TPS80031 sub mfd devices */
-static struct mfd_cell tps80031_cell[] = {
+static const struct mfd_cell tps80031_cell[] = {
{
.name = "tps80031-pmic",
},
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 29473c2c95ae..ed718328eff1 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -47,6 +47,9 @@
#include <linux/i2c.h>
#include <linux/i2c/twl.h>
+/* Register descriptions for audio */
+#include <linux/mfd/twl4030-audio.h>
+
#include "twl-core.h"
/*
@@ -200,6 +203,105 @@ static struct twl_mapping twl4030_map[] = {
{ 2, TWL5031_BASEADD_INTERRUPTS },
};
+static struct reg_default twl4030_49_defaults[] = {
+ /* Audio Registers */
+ { 0x01, 0x00}, /* CODEC_MODE */
+ { 0x02, 0x00}, /* OPTION */
+ /* 0x03 Unused */
+ { 0x04, 0x00}, /* MICBIAS_CTL */
+ { 0x05, 0x00}, /* ANAMICL */
+ { 0x06, 0x00}, /* ANAMICR */
+ { 0x07, 0x00}, /* AVADC_CTL */
+ { 0x08, 0x00}, /* ADCMICSEL */
+ { 0x09, 0x00}, /* DIGMIXING */
+ { 0x0a, 0x0f}, /* ATXL1PGA */
+ { 0x0b, 0x0f}, /* ATXR1PGA */
+ { 0x0c, 0x0f}, /* AVTXL2PGA */
+ { 0x0d, 0x0f}, /* AVTXR2PGA */
+ { 0x0e, 0x00}, /* AUDIO_IF */
+ { 0x0f, 0x00}, /* VOICE_IF */
+ { 0x10, 0x3f}, /* ARXR1PGA */
+ { 0x11, 0x3f}, /* ARXL1PGA */
+ { 0x12, 0x3f}, /* ARXR2PGA */
+ { 0x13, 0x3f}, /* ARXL2PGA */
+ { 0x14, 0x25}, /* VRXPGA */
+ { 0x15, 0x00}, /* VSTPGA */
+ { 0x16, 0x00}, /* VRX2ARXPGA */
+ { 0x17, 0x00}, /* AVDAC_CTL */
+ { 0x18, 0x00}, /* ARX2VTXPGA */
+ { 0x19, 0x32}, /* ARXL1_APGA_CTL*/
+ { 0x1a, 0x32}, /* ARXR1_APGA_CTL*/
+ { 0x1b, 0x32}, /* ARXL2_APGA_CTL*/
+ { 0x1c, 0x32}, /* ARXR2_APGA_CTL*/
+ { 0x1d, 0x00}, /* ATX2ARXPGA */
+ { 0x1e, 0x00}, /* BT_IF */
+ { 0x1f, 0x55}, /* BTPGA */
+ { 0x20, 0x00}, /* BTSTPGA */
+ { 0x21, 0x00}, /* EAR_CTL */
+ { 0x22, 0x00}, /* HS_SEL */
+ { 0x23, 0x00}, /* HS_GAIN_SET */
+ { 0x24, 0x00}, /* HS_POPN_SET */
+ { 0x25, 0x00}, /* PREDL_CTL */
+ { 0x26, 0x00}, /* PREDR_CTL */
+ { 0x27, 0x00}, /* PRECKL_CTL */
+ { 0x28, 0x00}, /* PRECKR_CTL */
+ { 0x29, 0x00}, /* HFL_CTL */
+ { 0x2a, 0x00}, /* HFR_CTL */
+ { 0x2b, 0x05}, /* ALC_CTL */
+ { 0x2c, 0x00}, /* ALC_SET1 */
+ { 0x2d, 0x00}, /* ALC_SET2 */
+ { 0x2e, 0x00}, /* BOOST_CTL */
+ { 0x2f, 0x00}, /* SOFTVOL_CTL */
+ { 0x30, 0x13}, /* DTMF_FREQSEL */
+ { 0x31, 0x00}, /* DTMF_TONEXT1H */
+ { 0x32, 0x00}, /* DTMF_TONEXT1L */
+ { 0x33, 0x00}, /* DTMF_TONEXT2H */
+ { 0x34, 0x00}, /* DTMF_TONEXT2L */
+ { 0x35, 0x79}, /* DTMF_TONOFF */
+ { 0x36, 0x11}, /* DTMF_WANONOFF */
+ { 0x37, 0x00}, /* I2S_RX_SCRAMBLE_H */
+ { 0x38, 0x00}, /* I2S_RX_SCRAMBLE_M */
+ { 0x39, 0x00}, /* I2S_RX_SCRAMBLE_L */
+ { 0x3a, 0x06}, /* APLL_CTL */
+ { 0x3b, 0x00}, /* DTMF_CTL */
+ { 0x3c, 0x44}, /* DTMF_PGA_CTL2 (0x3C) */
+ { 0x3d, 0x69}, /* DTMF_PGA_CTL1 (0x3D) */
+ { 0x3e, 0x00}, /* MISC_SET_1 */
+ { 0x3f, 0x00}, /* PCMBTMUX */
+ /* 0x40 - 0x42 Unused */
+ { 0x43, 0x00}, /* RX_PATH_SEL */
+ { 0x44, 0x32}, /* VDL_APGA_CTL */
+ { 0x45, 0x00}, /* VIBRA_CTL */
+ { 0x46, 0x00}, /* VIBRA_SET */
+ { 0x47, 0x00}, /* VIBRA_PWM_SET */
+ { 0x48, 0x00}, /* ANAMIC_GAIN */
+ { 0x49, 0x00}, /* MISC_SET_2 */
+ /* End of Audio Registers */
+};
+
+static bool twl4030_49_nop_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case 0:
+ case 3:
+ case 40:
+ case 41:
+ case 42:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static const struct regmap_range twl4030_49_volatile_ranges[] = {
+ regmap_reg_range(TWL4030_BASEADD_TEST, 0xff),
+};
+
+static const struct regmap_access_table twl4030_49_volatile_table = {
+ .yes_ranges = twl4030_49_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(twl4030_49_volatile_ranges),
+};
+
static struct regmap_config twl4030_regmap_config[4] = {
{
/* Address 0x48 */
@@ -212,6 +314,15 @@ static struct regmap_config twl4030_regmap_config[4] = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0xff,
+
+ .readable_reg = twl4030_49_nop_reg,
+ .writeable_reg = twl4030_49_nop_reg,
+
+ .volatile_table = &twl4030_49_volatile_table,
+
+ .reg_defaults = twl4030_49_defaults,
+ .num_reg_defaults = ARRAY_SIZE(twl4030_49_defaults),
+ .cache_type = REGCACHE_RBTREE,
},
{
/* Address 0x4a */
@@ -302,35 +413,50 @@ unsigned int twl_rev(void)
EXPORT_SYMBOL(twl_rev);
/**
- * twl_i2c_write - Writes a n bit register in TWL4030/TWL5030/TWL60X0
+ * twl_get_regmap - Get the regmap associated with the given module
* @mod_no: module number
- * @value: an array of num_bytes+1 containing data to write
- * @reg: register address (just offset will do)
- * @num_bytes: number of bytes to transfer
*
- * Returns the result of operation - 0 is success
+ * Returns the regmap pointer or NULL in case of failure.
*/
-int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
+static struct regmap *twl_get_regmap(u8 mod_no)
{
- int ret;
int sid;
struct twl_client *twl;
if (unlikely(!twl_priv || !twl_priv->ready)) {
pr_err("%s: not initialized\n", DRIVER_NAME);
- return -EPERM;
+ return NULL;
}
if (unlikely(mod_no >= twl_get_last_module())) {
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
- return -EPERM;
+ return NULL;
}
sid = twl_priv->twl_map[mod_no].sid;
twl = &twl_priv->twl_modules[sid];
- ret = regmap_bulk_write(twl->regmap,
- twl_priv->twl_map[mod_no].base + reg, value,
- num_bytes);
+ return twl->regmap;
+}
+
+/**
+ * twl_i2c_write - Writes a n bit register in TWL4030/TWL5030/TWL60X0
+ * @mod_no: module number
+ * @value: an array of num_bytes+1 containing data to write
+ * @reg: register address (just offset will do)
+ * @num_bytes: number of bytes to transfer
+ *
+ * Returns the result of operation - 0 is success
+ */
+int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
+{
+ struct regmap *regmap = twl_get_regmap(mod_no);
+ int ret;
+
+ if (!regmap)
+ return -EPERM;
+
+ ret = regmap_bulk_write(regmap, twl_priv->twl_map[mod_no].base + reg,
+ value, num_bytes);
if (ret)
pr_err("%s: Write failed (mod %d, reg 0x%02x count %d)\n",
@@ -351,25 +477,14 @@ EXPORT_SYMBOL(twl_i2c_write);
*/
int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
{
+ struct regmap *regmap = twl_get_regmap(mod_no);
int ret;
- int sid;
- struct twl_client *twl;
- if (unlikely(!twl_priv || !twl_priv->ready)) {
- pr_err("%s: not initialized\n", DRIVER_NAME);
- return -EPERM;
- }
- if (unlikely(mod_no >= twl_get_last_module())) {
- pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
+ if (!regmap)
return -EPERM;
- }
- sid = twl_priv->twl_map[mod_no].sid;
- twl = &twl_priv->twl_modules[sid];
-
- ret = regmap_bulk_read(twl->regmap,
- twl_priv->twl_map[mod_no].base + reg, value,
- num_bytes);
+ ret = regmap_bulk_read(regmap, twl_priv->twl_map[mod_no].base + reg,
+ value, num_bytes);
if (ret)
pr_err("%s: Read failed (mod %d, reg 0x%02x count %d)\n",
@@ -379,6 +494,27 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
}
EXPORT_SYMBOL(twl_i2c_read);
+/**
+ * twl_regcache_bypass - Configure the regcache bypass for the regmap associated
+ * with the module
+ * @mod_no: module number
+ * @enable: Regcache bypass state
+ *
+ * Returns 0 else failure.
+ */
+int twl_set_regcache_bypass(u8 mod_no, bool enable)
+{
+ struct regmap *regmap = twl_get_regmap(mod_no);
+
+ if (!regmap)
+ return -EPERM;
+
+ regcache_cache_bypass(regmap, enable);
+
+ return 0;
+}
+EXPORT_SYMBOL(twl_set_regcache_bypass);
+
/*----------------------------------------------------------------------*/
/**
@@ -701,62 +837,6 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
usb3v1[0].dev_name = dev_name(child);
}
}
- if (IS_ENABLED(CONFIG_TWL6030_USB) && pdata->usb &&
- twl_class_is_6030()) {
-
- static struct regulator_consumer_supply usb3v3;
- int regulator;
-
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030)) {
- /* this is a template that gets copied */
- struct regulator_init_data usb_fixed = {
- .constraints.valid_modes_mask =
- REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .constraints.valid_ops_mask =
- REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- };
-
- if (features & TWL6032_SUBCLASS) {
- usb3v3.supply = "ldousb";
- regulator = TWL6032_REG_LDOUSB;
- } else {
- usb3v3.supply = "vusb";
- regulator = TWL6030_REG_VUSB;
- }
- child = add_regulator_linked(regulator, &usb_fixed,
- &usb3v3, 1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- pdata->usb->features = features;
-
- child = add_child(TWL_MODULE_USB, "twl6030_usb",
- pdata->usb, sizeof(*pdata->usb), true,
- /* irq1 = VBUS_PRES, irq0 = USB ID */
- irq_base + USBOTG_INTR_OFFSET,
- irq_base + USB_PRES_INTR_OFFSET);
-
- if (IS_ERR(child))
- return PTR_ERR(child);
- /* we need to connect regulators to this transceiver */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && child)
- usb3v3.dev_name = dev_name(child);
- } else if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) &&
- twl_class_is_6030()) {
- if (features & TWL6032_SUBCLASS)
- child = add_regulator(TWL6032_REG_LDOUSB,
- pdata->ldousb, features);
- else
- child = add_regulator(TWL6030_REG_VUSB,
- pdata->vusb, features);
-
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
if (IS_ENABLED(CONFIG_TWL4030_WATCHDOG) && twl_class_is_4030()) {
child = add_child(TWL_MODULE_PM_RECEIVER, "twl4030_wdt", NULL,
@@ -870,148 +950,6 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
return PTR_ERR(child);
}
- /* twl6030 regulators */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && twl_class_is_6030() &&
- !(features & TWL6032_SUBCLASS)) {
- child = add_regulator(TWL6030_REG_VDD1, pdata->vdd1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6030_REG_VDD2, pdata->vdd2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6030_REG_VDD3, pdata->vdd3,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6030_REG_V1V8, pdata->v1v8,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6030_REG_V2V1, pdata->v2v1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6030_REG_VMMC, pdata->vmmc,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6030_REG_VPP, pdata->vpp,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6030_REG_VUSIM, pdata->vusim,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6030_REG_VCXIO, pdata->vcxio,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6030_REG_VDAC, pdata->vdac,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6030_REG_VAUX1_6030, pdata->vaux1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6030_REG_VAUX2_6030, pdata->vaux2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6030_REG_VAUX3_6030, pdata->vaux3,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6030_REG_CLK32KG, pdata->clk32kg,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- /* 6030 and 6025 share this regulator */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && twl_class_is_6030()) {
- child = add_regulator(TWL6030_REG_VANA, pdata->vana,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- /* twl6032 regulators */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && twl_class_is_6030() &&
- (features & TWL6032_SUBCLASS)) {
- child = add_regulator(TWL6032_REG_LDO5, pdata->ldo5,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6032_REG_LDO1, pdata->ldo1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6032_REG_LDO7, pdata->ldo7,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6032_REG_LDO6, pdata->ldo6,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6032_REG_LDOLN, pdata->ldoln,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6032_REG_LDO2, pdata->ldo2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6032_REG_LDO4, pdata->ldo4,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6032_REG_LDO3, pdata->ldo3,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6032_REG_SMPS3, pdata->smps3,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6032_REG_SMPS4, pdata->smps4,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL6032_REG_VIO, pdata->vio6025,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- }
-
if (IS_ENABLED(CONFIG_CHARGER_TWL4030) && pdata->bci &&
!(features & (TPS_SUBSET | TWL5031))) {
child = add_child(TWL_MODULE_MAIN_CHARGE, "twl4030_bci",
@@ -1133,6 +1071,11 @@ static int twl_remove(struct i2c_client *client)
return 0;
}
+static struct of_dev_auxdata twl_auxdata_lookup[] = {
+ OF_DEV_AUXDATA("ti,twl4030-gpio", 0, "twl4030-gpio", NULL),
+ { /* sentinel */ },
+};
+
/* NOTE: This driver only handles a single twl4030/tps659x0 chip */
static int
twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
@@ -1271,10 +1214,14 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
twl_i2c_write_u8(TWL4030_MODULE_INTBR, temp, REG_GPPUPDCTR1);
}
- if (node)
- status = of_platform_populate(node, NULL, NULL, &client->dev);
- else
+ if (node) {
+ if (pdata)
+ twl_auxdata_lookup[0].platform_data = pdata->gpio;
+ status = of_platform_populate(node, NULL, twl_auxdata_lookup,
+ &client->dev);
+ } else {
status = add_children(pdata, irq_base, id->driver_data);
+ }
fail:
if (status < 0)
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 517eda832f79..18a607e2ca06 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -176,8 +176,9 @@ static irqreturn_t twl6030_irq_thread(int irq, void *data)
int i, ret;
union {
u8 bytes[4];
- u32 int_sts;
+ __le32 int_sts;
} sts;
+ u32 int_sts; /* sts.int_sts converted to CPU endianness */
struct twl6030_irq *pdata = data;
/* read INT_STS_A, B and C in one shot using a burst read */
@@ -196,8 +197,9 @@ static irqreturn_t twl6030_irq_thread(int irq, void *data)
if (sts.bytes[2] & 0x10)
sts.bytes[2] |= 0x08;
- for (i = 0; sts.int_sts; sts.int_sts >>= 1, i++)
- if (sts.int_sts & 0x1) {
+ int_sts = le32_to_cpu(sts.int_sts);
+ for (i = 0; int_sts; int_sts >>= 1, i++)
+ if (int_sts & 0x1) {
int module_irq =
irq_find_mapping(pdata->irq_domain,
pdata->irq_mapping_tbl[i]);
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index 0779d5ab9ab1..75316fb33448 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -44,6 +44,54 @@
#define VIBRACTRL_MEMBER(reg) ((reg == TWL6040_REG_VIBCTLL) ? 0 : 1)
#define TWL6040_NUM_SUPPLIES (2)
+static struct reg_default twl6040_defaults[] = {
+ { 0x01, 0x4B }, /* REG_ASICID (ro) */
+ { 0x02, 0x00 }, /* REG_ASICREV (ro) */
+ { 0x03, 0x00 }, /* REG_INTID */
+ { 0x04, 0x00 }, /* REG_INTMR */
+ { 0x05, 0x00 }, /* REG_NCPCTRL */
+ { 0x06, 0x00 }, /* REG_LDOCTL */
+ { 0x07, 0x60 }, /* REG_HPPLLCTL */
+ { 0x08, 0x00 }, /* REG_LPPLLCTL */
+ { 0x09, 0x4A }, /* REG_LPPLLDIV */
+ { 0x0A, 0x00 }, /* REG_AMICBCTL */
+ { 0x0B, 0x00 }, /* REG_DMICBCTL */
+ { 0x0C, 0x00 }, /* REG_MICLCTL */
+ { 0x0D, 0x00 }, /* REG_MICRCTL */
+ { 0x0E, 0x00 }, /* REG_MICGAIN */
+ { 0x0F, 0x1B }, /* REG_LINEGAIN */
+ { 0x10, 0x00 }, /* REG_HSLCTL */
+ { 0x11, 0x00 }, /* REG_HSRCTL */
+ { 0x12, 0x00 }, /* REG_HSGAIN */
+ { 0x13, 0x00 }, /* REG_EARCTL */
+ { 0x14, 0x00 }, /* REG_HFLCTL */
+ { 0x15, 0x00 }, /* REG_HFLGAIN */
+ { 0x16, 0x00 }, /* REG_HFRCTL */
+ { 0x17, 0x00 }, /* REG_HFRGAIN */
+ { 0x18, 0x00 }, /* REG_VIBCTLL */
+ { 0x19, 0x00 }, /* REG_VIBDATL */
+ { 0x1A, 0x00 }, /* REG_VIBCTLR */
+ { 0x1B, 0x00 }, /* REG_VIBDATR */
+ { 0x1C, 0x00 }, /* REG_HKCTL1 */
+ { 0x1D, 0x00 }, /* REG_HKCTL2 */
+ { 0x1E, 0x00 }, /* REG_GPOCTL */
+ { 0x1F, 0x00 }, /* REG_ALB */
+ { 0x20, 0x00 }, /* REG_DLB */
+ /* 0x28, REG_TRIM1 */
+ /* 0x29, REG_TRIM2 */
+ /* 0x2A, REG_TRIM3 */
+ /* 0x2B, REG_HSOTRIM */
+ /* 0x2C, REG_HFOTRIM */
+ { 0x2D, 0x08 }, /* REG_ACCCTL */
+ { 0x2E, 0x00 }, /* REG_STATUS (ro) */
+};
+
+static struct reg_default twl6040_patch[] = {
+ /* Select I2C bus access to dual access registers */
+ { TWL6040_REG_ACCCTL, 0x09 },
+};
+
+
static bool twl6040_has_vibra(struct device_node *node)
{
#ifdef CONFIG_OF
@@ -238,6 +286,9 @@ int twl6040_power(struct twl6040 *twl6040, int on)
if (twl6040->power_count++)
goto out;
+ /* Allow writes to the chip */
+ regcache_cache_only(twl6040->regmap, false);
+
if (gpio_is_valid(twl6040->audpwron)) {
/* use automatic power-up sequence */
ret = twl6040_power_up_automatic(twl6040);
@@ -253,6 +304,10 @@ int twl6040_power(struct twl6040 *twl6040, int on)
goto out;
}
}
+
+ /* Sync with the HW */
+ regcache_sync(twl6040->regmap);
+
/* Default PLL configuration after power up */
twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL;
twl6040->sysclk = 19200000;
@@ -279,6 +334,11 @@ int twl6040_power(struct twl6040 *twl6040, int on)
/* use manual power-down sequence */
twl6040_power_down_manual(twl6040);
}
+
+ /* Set regmap to cache only and mark it as dirty */
+ regcache_cache_only(twl6040->regmap, true);
+ regcache_mark_dirty(twl6040->regmap);
+
twl6040->sysclk = 0;
twl6040->mclk = 0;
}
@@ -490,9 +550,24 @@ static bool twl6040_readable_reg(struct device *dev, unsigned int reg)
static bool twl6040_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
- case TWL6040_REG_VIBCTLL:
- case TWL6040_REG_VIBCTLR:
- case TWL6040_REG_INTMR:
+ case TWL6040_REG_ASICID:
+ case TWL6040_REG_ASICREV:
+ case TWL6040_REG_INTID:
+ case TWL6040_REG_LPPLLCTL:
+ case TWL6040_REG_HPPLLCTL:
+ case TWL6040_REG_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool twl6040_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TWL6040_REG_ASICID:
+ case TWL6040_REG_ASICREV:
+ case TWL6040_REG_STATUS:
return false;
default:
return true;
@@ -502,10 +577,15 @@ static bool twl6040_volatile_reg(struct device *dev, unsigned int reg)
static struct regmap_config twl6040_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
+
+ .reg_defaults = twl6040_defaults,
+ .num_reg_defaults = ARRAY_SIZE(twl6040_defaults),
+
.max_register = TWL6040_REG_STATUS, /* 0x2e */
.readable_reg = twl6040_readable_reg,
.volatile_reg = twl6040_volatile_reg,
+ .writeable_reg = twl6040_writeable_reg,
.cache_type = REGCACHE_RBTREE,
};
@@ -624,6 +704,8 @@ static int twl6040_probe(struct i2c_client *client,
/* dual-access registers controlled by I2C only */
twl6040_set_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_I2CSEL);
+ regmap_register_patch(twl6040->regmap, twl6040_patch,
+ ARRAY_SIZE(twl6040_patch));
/*
* The main functionality of twl6040 to provide audio on OMAP4+ systems.
@@ -656,6 +738,10 @@ static int twl6040_probe(struct i2c_client *client,
cell->name = "twl6040-gpo";
children++;
+ /* The chip is powered down so mark regmap to cache only and dirty */
+ regcache_cache_only(twl6040->regmap, true);
+ regcache_mark_dirty(twl6040->regmap);
+
ret = mfd_add_devices(&client->dev, -1, twl6040->cells, children,
NULL, 0, NULL);
if (ret)
diff --git a/drivers/mfd/viperboard.c b/drivers/mfd/viperboard.c
index af2a6703f34f..e00f5340ed87 100644
--- a/drivers/mfd/viperboard.c
+++ b/drivers/mfd/viperboard.c
@@ -37,7 +37,7 @@ static const struct usb_device_id vprbrd_table[] = {
MODULE_DEVICE_TABLE(usb, vprbrd_table);
-static struct mfd_cell vprbrd_devs[] = {
+static const struct mfd_cell vprbrd_devs[] = {
{
.name = "viperboard-gpio",
},
diff --git a/drivers/mfd/vx855.c b/drivers/mfd/vx855.c
index 757ecc63338c..84f01da4875e 100644
--- a/drivers/mfd/vx855.c
+++ b/drivers/mfd/vx855.c
@@ -60,7 +60,7 @@ static struct resource vx855_gpio_resources[] = {
},
};
-static struct mfd_cell vx855_cells[] = {
+static const struct mfd_cell vx855_cells[] = {
{
.name = "vx855_gpio",
.num_resources = ARRAY_SIZE(vx855_gpio_resources),
@@ -118,7 +118,7 @@ static void vx855_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static DEFINE_PCI_DEVICE_TABLE(vx855_pci_tbl) = {
+static const struct pci_device_id vx855_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855) },
{ 0, }
};
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index bf8b3b5ad1fe..11632f135e8c 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -14,6 +14,7 @@
#include <linux/mfd/arizona/core.h>
#include <linux/mfd/arizona/registers.h>
+#include <linux/device.h>
#include "arizona.h"
@@ -223,6 +224,31 @@ static const struct reg_default wm5110_revb_patch[] = {
{ 0x80, 0x0 },
};
+static const struct reg_default wm5110_revd_patch[] = {
+ { 0x80, 0x3 },
+ { 0x80, 0x3 },
+ { 0x393, 0x27 },
+ { 0x394, 0x27 },
+ { 0x395, 0x27 },
+ { 0x396, 0x27 },
+ { 0x397, 0x27 },
+ { 0x398, 0x26 },
+ { 0x221, 0x90 },
+ { 0x211, 0x8 },
+ { 0x36c, 0x1fb },
+ { 0x26e, 0x64 },
+ { 0x26f, 0xea },
+ { 0x270, 0x1f16 },
+ { 0x51b, 0x1 },
+ { 0x55b, 0x1 },
+ { 0x59b, 0x1 },
+ { 0x4f0, 0x633 },
+ { 0x441, 0xc059 },
+ { 0x209, 0x27 },
+ { 0x80, 0x0 },
+ { 0x80, 0x0 },
+};
+
/* We use a function so we can use ARRAY_SIZE() */
int wm5110_patch(struct arizona *arizona)
{
@@ -235,7 +261,10 @@ int wm5110_patch(struct arizona *arizona)
return regmap_register_patch(arizona->regmap,
wm5110_revb_patch,
ARRAY_SIZE(wm5110_revb_patch));
-
+ case 3:
+ return regmap_register_patch(arizona->regmap,
+ wm5110_revd_patch,
+ ARRAY_SIZE(wm5110_revd_patch));
default:
return 0;
}
@@ -504,7 +533,7 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x000001AA, 0x0004 }, /* R426 - FLL2 GPIO Clock */
{ 0x00000200, 0x0006 }, /* R512 - Mic Charge Pump 1 */
{ 0x00000210, 0x0184 }, /* R528 - LDO1 Control 1 */
- { 0x00000213, 0x0344 }, /* R531 - LDO2 Control 1 */
+ { 0x00000213, 0x03E4 }, /* R531 - LDO2 Control 1 */
{ 0x00000218, 0x01A6 }, /* R536 - Mic Bias Ctrl 1 */
{ 0x00000219, 0x01A6 }, /* R537 - Mic Bias Ctrl 2 */
{ 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
@@ -524,6 +553,7 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000300, 0x0000 }, /* R768 - Input Enables */
{ 0x00000308, 0x0000 }, /* R776 - Input Rate */
{ 0x00000309, 0x0022 }, /* R777 - Input Volume Ramp */
+ { 0x0000030C, 0x0002 }, /* R780 - HPF Control */
{ 0x00000310, 0x2080 }, /* R784 - IN1L Control */
{ 0x00000311, 0x0180 }, /* R785 - ADC Digital Volume 1L */
{ 0x00000312, 0x0000 }, /* R786 - DMIC1L Control */
@@ -545,6 +575,7 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000328, 0x2000 }, /* R808 - IN4L Control */
{ 0x00000329, 0x0180 }, /* R809 - ADC Digital Volume 4L */
{ 0x0000032A, 0x0000 }, /* R810 - DMIC4L Control */
+ { 0x0000032C, 0x0000 }, /* R812 - IN4R Control */
{ 0x0000032D, 0x0180 }, /* R813 - ADC Digital Volume 4R */
{ 0x0000032E, 0x0000 }, /* R814 - DMIC4R Control */
{ 0x00000400, 0x0000 }, /* R1024 - Output Enables 1 */
@@ -598,6 +629,7 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x0000043D, 0x0180 }, /* R1085 - DAC Digital Volume 6R */
{ 0x0000043E, 0x0080 }, /* R1086 - DAC Volume Limit 6R */
{ 0x0000043F, 0x0800 }, /* R1087 - Noise Gate Select 6R */
+ { 0x00000440, 0x8FFF }, /* R1088 - DRE Enable */
{ 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
{ 0x00000458, 0x0000 }, /* R1112 - Noise Gate Control */
{ 0x00000480, 0x0040 }, /* R1152 - Class W ANC Threshold 1 */
@@ -606,6 +638,9 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000491, 0x0000 }, /* R1169 - PDM SPK1 CTRL 2 */
{ 0x00000492, 0x0069 }, /* R1170 - PDM SPK2 CTRL 1 */
{ 0x00000493, 0x0000 }, /* R1171 - PDM SPK2 CTRL 2 */
+ { 0x000004A0, 0x3480 }, /* R1184 - HP1 Short Circuit Ctrl */
+ { 0x000004A1, 0x3480 }, /* R1185 - HP2 Short Circuit Ctrl */
+ { 0x000004A2, 0x3480 }, /* R1186 - HP3 Short Circuit Ctrl */
{ 0x00000500, 0x000C }, /* R1280 - AIF1 BCLK Ctrl */
{ 0x00000501, 0x0008 }, /* R1281 - AIF1 Tx Pin Ctrl */
{ 0x00000502, 0x0000 }, /* R1282 - AIF1 Rx Pin Ctrl */
@@ -882,6 +917,38 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x0000074D, 0x0080 }, /* R1869 - AIF2TX2MIX Input 3 Volume */
{ 0x0000074E, 0x0000 }, /* R1870 - AIF2TX2MIX Input 4 Source */
{ 0x0000074F, 0x0080 }, /* R1871 - AIF2TX2MIX Input 4 Volume */
+ { 0x00000750, 0x0000 }, /* R1872 - AIF2TX3MIX Input 1 Source */
+ { 0x00000751, 0x0080 }, /* R1873 - AIF2TX3MIX Input 1 Volume */
+ { 0x00000752, 0x0000 }, /* R1874 - AIF2TX3MIX Input 2 Source */
+ { 0x00000753, 0x0080 }, /* R1875 - AIF2TX3MIX Input 2 Volume */
+ { 0x00000754, 0x0000 }, /* R1876 - AIF2TX3MIX Input 3 Source */
+ { 0x00000755, 0x0080 }, /* R1877 - AIF2TX3MIX Input 3 Volume */
+ { 0x00000756, 0x0000 }, /* R1878 - AIF2TX3MIX Input 4 Source */
+ { 0x00000757, 0x0080 }, /* R1879 - AIF2TX3MIX Input 4 Volume */
+ { 0x00000758, 0x0000 }, /* R1880 - AIF2TX4MIX Input 1 Source */
+ { 0x00000759, 0x0080 }, /* R1881 - AIF2TX4MIX Input 1 Volume */
+ { 0x0000075A, 0x0000 }, /* R1882 - AIF2TX4MIX Input 2 Source */
+ { 0x0000075B, 0x0080 }, /* R1883 - AIF2TX4MIX Input 2 Volume */
+ { 0x0000075C, 0x0000 }, /* R1884 - AIF2TX4MIX Input 3 Source */
+ { 0x0000075D, 0x0080 }, /* R1885 - AIF2TX4MIX Input 3 Volume */
+ { 0x0000075E, 0x0000 }, /* R1886 - AIF2TX4MIX Input 4 Source */
+ { 0x0000075F, 0x0080 }, /* R1887 - AIF2TX4MIX Input 4 Volume */
+ { 0x00000760, 0x0000 }, /* R1888 - AIF2TX5MIX Input 1 Source */
+ { 0x00000761, 0x0080 }, /* R1889 - AIF2TX5MIX Input 1 Volume */
+ { 0x00000762, 0x0000 }, /* R1890 - AIF2TX5MIX Input 2 Source */
+ { 0x00000763, 0x0080 }, /* R1891 - AIF2TX5MIX Input 2 Volume */
+ { 0x00000764, 0x0000 }, /* R1892 - AIF2TX5MIX Input 3 Source */
+ { 0x00000765, 0x0080 }, /* R1893 - AIF2TX5MIX Input 3 Volume */
+ { 0x00000766, 0x0000 }, /* R1894 - AIF2TX5MIX Input 4 Source */
+ { 0x00000767, 0x0080 }, /* R1895 - AIF2TX5MIX Input 4 Volume */
+ { 0x00000768, 0x0000 }, /* R1896 - AIF2TX6MIX Input 1 Source */
+ { 0x00000769, 0x0080 }, /* R1897 - AIF2TX6MIX Input 1 Volume */
+ { 0x0000076A, 0x0000 }, /* R1898 - AIF2TX6MIX Input 2 Source */
+ { 0x0000076B, 0x0080 }, /* R1899 - AIF2TX6MIX Input 2 Volume */
+ { 0x0000076C, 0x0000 }, /* R1900 - AIF2TX6MIX Input 3 Source */
+ { 0x0000076D, 0x0080 }, /* R1901 - AIF2TX6MIX Input 3 Volume */
+ { 0x0000076E, 0x0000 }, /* R1902 - AIF2TX6MIX Input 4 Source */
+ { 0x0000076F, 0x0080 }, /* R1903 - AIF2TX6MIX Input 4 Volume */
{ 0x00000780, 0x0000 }, /* R1920 - AIF3TX1MIX Input 1 Source */
{ 0x00000781, 0x0080 }, /* R1921 - AIF3TX1MIX Input 1 Volume */
{ 0x00000782, 0x0000 }, /* R1922 - AIF3TX1MIX Input 2 Source */
@@ -1342,6 +1409,64 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00001404, 0x0000 }, /* R5124 - DSP4 Status 1 */
};
+static bool wm5110_is_rev_b_adsp_memory(unsigned int reg)
+{
+ if ((reg >= 0x100000 && reg < 0x103000) ||
+ (reg >= 0x180000 && reg < 0x181000) ||
+ (reg >= 0x190000 && reg < 0x192000) ||
+ (reg >= 0x1a8000 && reg < 0x1a9000) ||
+ (reg >= 0x200000 && reg < 0x209000) ||
+ (reg >= 0x280000 && reg < 0x281000) ||
+ (reg >= 0x290000 && reg < 0x29a000) ||
+ (reg >= 0x2a8000 && reg < 0x2aa000) ||
+ (reg >= 0x300000 && reg < 0x30f000) ||
+ (reg >= 0x380000 && reg < 0x382000) ||
+ (reg >= 0x390000 && reg < 0x39e000) ||
+ (reg >= 0x3a8000 && reg < 0x3b6000) ||
+ (reg >= 0x400000 && reg < 0x403000) ||
+ (reg >= 0x480000 && reg < 0x481000) ||
+ (reg >= 0x490000 && reg < 0x492000) ||
+ (reg >= 0x4a8000 && reg < 0x4a9000))
+ return true;
+ else
+ return false;
+}
+
+static bool wm5110_is_rev_d_adsp_memory(unsigned int reg)
+{
+ if ((reg >= 0x100000 && reg < 0x106000) ||
+ (reg >= 0x180000 && reg < 0x182000) ||
+ (reg >= 0x190000 && reg < 0x198000) ||
+ (reg >= 0x1a8000 && reg < 0x1aa000) ||
+ (reg >= 0x200000 && reg < 0x20f000) ||
+ (reg >= 0x280000 && reg < 0x282000) ||
+ (reg >= 0x290000 && reg < 0x29c000) ||
+ (reg >= 0x2a6000 && reg < 0x2b4000) ||
+ (reg >= 0x300000 && reg < 0x30f000) ||
+ (reg >= 0x380000 && reg < 0x382000) ||
+ (reg >= 0x390000 && reg < 0x3a2000) ||
+ (reg >= 0x3a6000 && reg < 0x3b4000) ||
+ (reg >= 0x400000 && reg < 0x406000) ||
+ (reg >= 0x480000 && reg < 0x482000) ||
+ (reg >= 0x490000 && reg < 0x498000) ||
+ (reg >= 0x4a8000 && reg < 0x4aa000))
+ return true;
+ else
+ return false;
+}
+
+static bool wm5110_is_adsp_memory(struct device *dev, unsigned int reg)
+{
+ struct arizona *arizona = dev_get_drvdata(dev);
+
+ switch (arizona->rev) {
+ case 0 ... 2:
+ return wm5110_is_rev_b_adsp_memory(reg);
+ default:
+ return wm5110_is_rev_d_adsp_memory(reg);
+ }
+}
+
static bool wm5110_readable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -1460,6 +1585,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_INPUT_ENABLES_STATUS:
case ARIZONA_INPUT_RATE:
case ARIZONA_INPUT_VOLUME_RAMP:
+ case ARIZONA_HPF_CONTROL:
case ARIZONA_IN1L_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_1L:
case ARIZONA_DMIC1L_CONTROL:
@@ -1481,6 +1607,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_IN4L_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_4L:
case ARIZONA_DMIC4L_CONTROL:
+ case ARIZONA_IN4R_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_4R:
case ARIZONA_DMIC4R_CONTROL:
case ARIZONA_OUTPUT_ENABLES_1:
@@ -1536,12 +1663,16 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DAC_DIGITAL_VOLUME_6R:
case ARIZONA_DAC_VOLUME_LIMIT_6R:
case ARIZONA_NOISE_GATE_SELECT_6R:
+ case ARIZONA_DRE_ENABLE:
case ARIZONA_DAC_AEC_CONTROL_1:
case ARIZONA_NOISE_GATE_CONTROL:
case ARIZONA_PDM_SPK1_CTRL_1:
case ARIZONA_PDM_SPK1_CTRL_2:
case ARIZONA_PDM_SPK2_CTRL_1:
case ARIZONA_PDM_SPK2_CTRL_2:
+ case ARIZONA_HP1_SHORT_CIRCUIT_CTRL:
+ case ARIZONA_HP2_SHORT_CIRCUIT_CTRL:
+ case ARIZONA_HP3_SHORT_CIRCUIT_CTRL:
case ARIZONA_AIF1_BCLK_CTRL:
case ARIZONA_AIF1_TX_PIN_CTRL:
case ARIZONA_AIF1_RX_PIN_CTRL:
@@ -1820,6 +1951,38 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_AIF2TX2MIX_INPUT_3_VOLUME:
case ARIZONA_AIF2TX2MIX_INPUT_4_SOURCE:
case ARIZONA_AIF2TX2MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF2TX3MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF2TX3MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF2TX3MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF2TX3MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF2TX3MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF2TX3MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF2TX3MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF2TX3MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF2TX4MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF2TX4MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF2TX4MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF2TX4MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF2TX4MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF2TX4MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF2TX4MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF2TX4MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF2TX5MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF2TX5MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF2TX5MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF2TX5MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF2TX5MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF2TX5MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF2TX5MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF2TX5MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF2TX6MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF2TX6MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF2TX6MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF2TX6MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF2TX6MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF2TX6MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF2TX6MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF2TX6MIX_INPUT_4_VOLUME:
case ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE:
case ARIZONA_AIF3TX1MIX_INPUT_1_VOLUME:
case ARIZONA_AIF3TX1MIX_INPUT_2_SOURCE:
@@ -2331,7 +2494,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP4_SCRATCH_3:
return true;
default:
- return false;
+ return wm5110_is_adsp_memory(dev, reg);
}
}
@@ -2407,16 +2570,18 @@ static bool wm5110_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP4_SCRATCH_3:
return true;
default:
- return false;
+ return wm5110_is_adsp_memory(dev, reg);
}
}
+#define WM5110_MAX_REGISTER 0x4a9fff
+
const struct regmap_config wm5110_spi_regmap = {
.reg_bits = 32,
.pad_bits = 16,
.val_bits = 16,
- .max_register = ARIZONA_DSP1_STATUS_2,
+ .max_register = WM5110_MAX_REGISTER,
.readable_reg = wm5110_readable_register,
.volatile_reg = wm5110_volatile_register,
@@ -2430,7 +2595,7 @@ const struct regmap_config wm5110_i2c_regmap = {
.reg_bits = 32,
.val_bits = 16,
- .max_register = ARIZONA_DSP1_STATUS_2,
+ .max_register = WM5110_MAX_REGISTER,
.readable_reg = wm5110_readable_register,
.volatile_reg = wm5110_volatile_register,
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 5c459f469224..28366a90e1ad 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -1011,7 +1011,7 @@ static struct resource wm831x_wdt_resources[] = {
},
};
-static struct mfd_cell wm8310_devs[] = {
+static const struct mfd_cell wm8310_devs[] = {
{
.name = "wm831x-backup",
},
@@ -1165,7 +1165,7 @@ static struct mfd_cell wm8310_devs[] = {
},
};
-static struct mfd_cell wm8311_devs[] = {
+static const struct mfd_cell wm8311_devs[] = {
{
.name = "wm831x-backup",
},
@@ -1295,7 +1295,7 @@ static struct mfd_cell wm8311_devs[] = {
},
};
-static struct mfd_cell wm8312_devs[] = {
+static const struct mfd_cell wm8312_devs[] = {
{
.name = "wm831x-backup",
},
@@ -1449,7 +1449,7 @@ static struct mfd_cell wm8312_devs[] = {
},
};
-static struct mfd_cell wm8320_devs[] = {
+static const struct mfd_cell wm8320_devs[] = {
{
.name = "wm831x-backup",
},
@@ -1578,7 +1578,7 @@ static struct mfd_cell wm8320_devs[] = {
},
};
-static struct mfd_cell touch_devs[] = {
+static const struct mfd_cell touch_devs[] = {
{
.name = "wm831x-touch",
.num_resources = ARRAY_SIZE(wm831x_touch_resources),
@@ -1586,7 +1586,7 @@ static struct mfd_cell touch_devs[] = {
},
};
-static struct mfd_cell rtc_devs[] = {
+static const struct mfd_cell rtc_devs[] = {
{
.name = "wm831x-rtc",
.num_resources = ARRAY_SIZE(wm831x_rtc_resources),
@@ -1594,7 +1594,7 @@ static struct mfd_cell rtc_devs[] = {
},
};
-static struct mfd_cell backlight_devs[] = {
+static const struct mfd_cell backlight_devs[] = {
{
.name = "wm831x-backlight",
},
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
index 2b29caebc9cf..a4cbefe5430f 100644
--- a/drivers/mfd/wm831x-i2c.c
+++ b/drivers/mfd/wm831x-i2c.c
@@ -64,11 +64,13 @@ static int wm831x_i2c_suspend(struct device *dev)
return wm831x_device_suspend(wm831x);
}
-static void wm831x_i2c_shutdown(struct i2c_client *i2c)
+static int wm831x_i2c_poweroff(struct device *dev)
{
- struct wm831x *wm831x = i2c_get_clientdata(i2c);
+ struct wm831x *wm831x = dev_get_drvdata(dev);
wm831x_device_shutdown(wm831x);
+
+ return 0;
}
static const struct i2c_device_id wm831x_i2c_id[] = {
@@ -85,6 +87,7 @@ MODULE_DEVICE_TABLE(i2c, wm831x_i2c_id);
static const struct dev_pm_ops wm831x_pm_ops = {
.suspend = wm831x_i2c_suspend,
+ .poweroff = wm831x_i2c_poweroff,
};
static struct i2c_driver wm831x_i2c_driver = {
@@ -95,7 +98,6 @@ static struct i2c_driver wm831x_i2c_driver = {
},
.probe = wm831x_i2c_probe,
.remove = wm831x_i2c_remove,
- .shutdown = wm831x_i2c_shutdown,
.id_table = wm831x_i2c_id,
};
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
index 07de3cc5a0d9..b8a5e3b34ec7 100644
--- a/drivers/mfd/wm831x-spi.c
+++ b/drivers/mfd/wm831x-spi.c
@@ -66,16 +66,19 @@ static int wm831x_spi_suspend(struct device *dev)
return wm831x_device_suspend(wm831x);
}
-static void wm831x_spi_shutdown(struct spi_device *spi)
+static int wm831x_spi_poweroff(struct device *dev)
{
- struct wm831x *wm831x = spi_get_drvdata(spi);
+ struct wm831x *wm831x = dev_get_drvdata(dev);
wm831x_device_shutdown(wm831x);
+
+ return 0;
}
static const struct dev_pm_ops wm831x_spi_pm = {
.freeze = wm831x_spi_suspend,
.suspend = wm831x_spi_suspend,
+ .poweroff = wm831x_spi_poweroff,
};
static const struct spi_device_id wm831x_spi_ids[] = {
@@ -99,7 +102,6 @@ static struct spi_driver wm831x_spi_driver = {
.id_table = wm831x_spi_ids,
.probe = wm831x_spi_probe,
.remove = wm831x_spi_remove,
- .shutdown = wm831x_spi_shutdown,
};
static int __init wm831x_spi_init(void)
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 030827511667..ba04f1bc70eb 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -33,7 +33,7 @@
#include "wm8994.h"
-static struct mfd_cell wm8994_regulator_devs[] = {
+static const struct mfd_cell wm8994_regulator_devs[] = {
{
.name = "wm8994-ldo",
.id = 1,
@@ -62,7 +62,7 @@ static struct resource wm8994_gpio_resources[] = {
},
};
-static struct mfd_cell wm8994_devs[] = {
+static const struct mfd_cell wm8994_devs[] = {
{
.name = "wm8994-codec",
.num_resources = ARRAY_SIZE(wm8994_codec_resources),
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index a3e291d0df9a..6cb388e8fb7d 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -525,4 +525,5 @@ source "drivers/misc/altera-stapl/Kconfig"
source "drivers/misc/mei/Kconfig"
source "drivers/misc/vmw_vmci/Kconfig"
source "drivers/misc/mic/Kconfig"
+source "drivers/misc/genwqe/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f45473e68bf7..99b9424ce31d 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -53,3 +53,4 @@ obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
obj-$(CONFIG_SRAM) += sram.o
obj-y += mic/
+obj-$(CONFIG_GENWQE) += genwqe/
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 0daadcf1ed7a..d3eee113baeb 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -641,7 +641,7 @@ static const struct attribute_group ad525x_group_commands = {
.attrs = ad525x_attributes_commands,
};
-int ad_dpot_add_files(struct device *dev,
+static int ad_dpot_add_files(struct device *dev,
unsigned features, unsigned rdac)
{
int err = sysfs_create_file(&dev->kobj,
@@ -666,7 +666,7 @@ int ad_dpot_add_files(struct device *dev,
return err;
}
-inline void ad_dpot_remove_files(struct device *dev,
+static inline void ad_dpot_remove_files(struct device *dev,
unsigned features, unsigned rdac)
{
sysfs_remove_file(&dev->kobj,
diff --git a/drivers/misc/bmp085-i2c.c b/drivers/misc/bmp085-i2c.c
index 3abfcecf8424..a7c16295b816 100644
--- a/drivers/misc/bmp085-i2c.c
+++ b/drivers/misc/bmp085-i2c.c
@@ -49,7 +49,7 @@ static int bmp085_i2c_probe(struct i2c_client *client,
return err;
}
- return bmp085_probe(&client->dev, regmap);
+ return bmp085_probe(&client->dev, regmap, client->irq);
}
static int bmp085_i2c_remove(struct i2c_client *client)
diff --git a/drivers/misc/bmp085-spi.c b/drivers/misc/bmp085-spi.c
index d6a52659cf24..864ecac32373 100644
--- a/drivers/misc/bmp085-spi.c
+++ b/drivers/misc/bmp085-spi.c
@@ -41,7 +41,7 @@ static int bmp085_spi_probe(struct spi_device *client)
return err;
}
- return bmp085_probe(&client->dev, regmap);
+ return bmp085_probe(&client->dev, regmap, client->irq);
}
static int bmp085_spi_remove(struct spi_device *client)
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index 2704d885a9b3..820e53d0048f 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -49,9 +49,11 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/delay.h>
#include <linux/of.h>
#include "bmp085.h"
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/gpio.h>
#define BMP085_CHIP_ID 0x55
#define BMP085_CALIBRATION_DATA_START 0xAA
@@ -84,8 +86,19 @@ struct bmp085_data {
unsigned long last_temp_measurement;
u8 chip_id;
s32 b6; /* calculated temperature correction coefficient */
+ int irq;
+ struct completion done;
};
+static irqreturn_t bmp085_eoc_isr(int irq, void *devid)
+{
+ struct bmp085_data *data = devid;
+
+ complete(&data->done);
+
+ return IRQ_HANDLED;
+}
+
static s32 bmp085_read_calibration_data(struct bmp085_data *data)
{
u16 tmp[BMP085_CALIBRATION_DATA_LENGTH];
@@ -116,6 +129,9 @@ static s32 bmp085_update_raw_temperature(struct bmp085_data *data)
s32 status;
mutex_lock(&data->lock);
+
+ init_completion(&data->done);
+
status = regmap_write(data->regmap, BMP085_CTRL_REG,
BMP085_TEMP_MEASUREMENT);
if (status < 0) {
@@ -123,7 +139,8 @@ static s32 bmp085_update_raw_temperature(struct bmp085_data *data)
"Error while requesting temperature measurement.\n");
goto exit;
}
- msleep(BMP085_TEMP_CONVERSION_TIME);
+ wait_for_completion_timeout(&data->done, 1 + msecs_to_jiffies(
+ BMP085_TEMP_CONVERSION_TIME));
status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB,
&tmp, sizeof(tmp));
@@ -147,6 +164,9 @@ static s32 bmp085_update_raw_pressure(struct bmp085_data *data)
s32 status;
mutex_lock(&data->lock);
+
+ init_completion(&data->done);
+
status = regmap_write(data->regmap, BMP085_CTRL_REG,
BMP085_PRESSURE_MEASUREMENT +
(data->oversampling_setting << 6));
@@ -157,8 +177,8 @@ static s32 bmp085_update_raw_pressure(struct bmp085_data *data)
}
/* wait for the end of conversion */
- msleep(2+(3 << data->oversampling_setting));
-
+ wait_for_completion_timeout(&data->done, 1 + msecs_to_jiffies(
+ 2+(3 << data->oversampling_setting)));
/* copy data into a u32 (4 bytes), but skip the first byte. */
status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB,
((u8 *)&tmp)+1, 3);
@@ -420,7 +440,7 @@ struct regmap_config bmp085_regmap_config = {
};
EXPORT_SYMBOL_GPL(bmp085_regmap_config);
-int bmp085_probe(struct device *dev, struct regmap *regmap)
+int bmp085_probe(struct device *dev, struct regmap *regmap, int irq)
{
struct bmp085_data *data;
int err = 0;
@@ -434,6 +454,15 @@ int bmp085_probe(struct device *dev, struct regmap *regmap)
dev_set_drvdata(dev, data);
data->dev = dev;
data->regmap = regmap;
+ data->irq = irq;
+
+ if (data->irq > 0) {
+ err = devm_request_irq(dev, data->irq, bmp085_eoc_isr,
+ IRQF_TRIGGER_RISING, "bmp085",
+ data);
+ if (err < 0)
+ goto exit_free;
+ }
/* Initialize the BMP085 chip */
err = bmp085_init_client(data);
diff --git a/drivers/misc/bmp085.h b/drivers/misc/bmp085.h
index 2b8f615bca92..8b8e3b1f5ca5 100644
--- a/drivers/misc/bmp085.h
+++ b/drivers/misc/bmp085.h
@@ -26,7 +26,7 @@
extern struct regmap_config bmp085_regmap_config;
-int bmp085_probe(struct device *dev, struct regmap *regmap);
+int bmp085_probe(struct device *dev, struct regmap *regmap, int irq);
int bmp085_remove(struct device *dev);
int bmp085_detect(struct device *dev);
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index c169e07654cb..f0fa4e8ca124 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -3,7 +3,7 @@
* Philip Edelbrock <phil@netroedge.com>
* Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2003 IBM Corp.
- * Copyright (C) 2004 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 3a015abb444a..78e55b501c94 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -378,7 +378,6 @@ static int eeprom_93xx46_remove(struct spi_device *spi)
device_remove_file(&spi->dev, &dev_attr_erase);
sysfs_remove_bin_file(&spi->dev.kobj, &edev->bin);
- spi_set_drvdata(spi, NULL);
kfree(edev);
return 0;
}
diff --git a/drivers/misc/genwqe/Kconfig b/drivers/misc/genwqe/Kconfig
new file mode 100644
index 000000000000..6069d8cd79d7
--- /dev/null
+++ b/drivers/misc/genwqe/Kconfig
@@ -0,0 +1,13 @@
+#
+# IBM Accelerator Family 'GenWQE'
+#
+
+menuconfig GENWQE
+ tristate "GenWQE PCIe Accelerator"
+ depends on PCI && 64BIT
+ select CRC_ITU_T
+ default n
+ help
+ Enables PCIe card driver for IBM GenWQE accelerators.
+ The user-space interface is described in
+ include/linux/genwqe/genwqe_card.h.
diff --git a/drivers/misc/genwqe/Makefile b/drivers/misc/genwqe/Makefile
new file mode 100644
index 000000000000..98a2b4f0b18b
--- /dev/null
+++ b/drivers/misc/genwqe/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for GenWQE driver
+#
+
+obj-$(CONFIG_GENWQE) := genwqe_card.o
+genwqe_card-objs := card_base.o card_dev.o card_ddcb.o card_sysfs.o \
+ card_debugfs.o card_utils.o
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
new file mode 100644
index 000000000000..74d51c9bb858
--- /dev/null
+++ b/drivers/misc/genwqe/card_base.c
@@ -0,0 +1,1205 @@
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Module initialization and PCIe setup. Card health monitoring and
+ * recovery functionality. Character device creation and deletion are
+ * controlled from here.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/err.h>
+#include <linux/aer.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/device.h>
+#include <linux/log2.h>
+#include <linux/genwqe/genwqe_card.h>
+
+#include "card_base.h"
+#include "card_ddcb.h"
+
+MODULE_AUTHOR("Frank Haverkamp <haver@linux.vnet.ibm.com>");
+MODULE_AUTHOR("Michael Ruettger <michael@ibmra.de>");
+MODULE_AUTHOR("Joerg-Stephan Vogt <jsvogt@de.ibm.com>");
+MODULE_AUTHOR("Michal Jung <mijung@de.ibm.com>");
+
+MODULE_DESCRIPTION("GenWQE Card");
+MODULE_VERSION(DRV_VERS_STRING);
+MODULE_LICENSE("GPL");
+
+static char genwqe_driver_name[] = GENWQE_DEVNAME;
+static struct class *class_genwqe;
+static struct dentry *debugfs_genwqe;
+static struct genwqe_dev *genwqe_devices[GENWQE_CARD_NO_MAX];
+
+/* PCI structure for identifying device by PCI vendor and device ID */
+static DEFINE_PCI_DEVICE_TABLE(genwqe_device_table) = {
+ { .vendor = PCI_VENDOR_ID_IBM,
+ .device = PCI_DEVICE_GENWQE,
+ .subvendor = PCI_SUBVENDOR_ID_IBM,
+ .subdevice = PCI_SUBSYSTEM_ID_GENWQE5,
+ .class = (PCI_CLASSCODE_GENWQE5 << 8),
+ .class_mask = ~0,
+ .driver_data = 0 },
+
+ /* Initial SR-IOV bring-up image */
+ { .vendor = PCI_VENDOR_ID_IBM,
+ .device = PCI_DEVICE_GENWQE,
+ .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
+ .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV,
+ .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
+ .class_mask = ~0,
+ .driver_data = 0 },
+
+ { .vendor = PCI_VENDOR_ID_IBM, /* VF Vendor ID */
+ .device = 0x0000, /* VF Device ID */
+ .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
+ .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV,
+ .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
+ .class_mask = ~0,
+ .driver_data = 0 },
+
+ /* Fixed up image */
+ { .vendor = PCI_VENDOR_ID_IBM,
+ .device = PCI_DEVICE_GENWQE,
+ .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
+ .subdevice = PCI_SUBSYSTEM_ID_GENWQE5,
+ .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
+ .class_mask = ~0,
+ .driver_data = 0 },
+
+ { .vendor = PCI_VENDOR_ID_IBM, /* VF Vendor ID */
+ .device = 0x0000, /* VF Device ID */
+ .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
+ .subdevice = PCI_SUBSYSTEM_ID_GENWQE5,
+ .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
+ .class_mask = ~0,
+ .driver_data = 0 },
+
+ /* Even one more ... */
+ { .vendor = PCI_VENDOR_ID_IBM,
+ .device = PCI_DEVICE_GENWQE,
+ .subvendor = PCI_SUBVENDOR_ID_IBM,
+ .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_NEW,
+ .class = (PCI_CLASSCODE_GENWQE5 << 8),
+ .class_mask = ~0,
+ .driver_data = 0 },
+
+ { 0, } /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, genwqe_device_table);
+
+/**
+ * genwqe_dev_alloc() - Create and prepare a new card descriptor
+ *
+ * Return: Pointer to card descriptor, or ERR_PTR(err) on error
+ */
+static struct genwqe_dev *genwqe_dev_alloc(void)
+{
+ unsigned int i = 0, j;
+ struct genwqe_dev *cd;
+
+ for (i = 0; i < GENWQE_CARD_NO_MAX; i++) {
+ if (genwqe_devices[i] == NULL)
+ break;
+ }
+ if (i >= GENWQE_CARD_NO_MAX)
+ return ERR_PTR(-ENODEV);
+
+ cd = kzalloc(sizeof(struct genwqe_dev), GFP_KERNEL);
+ if (!cd)
+ return ERR_PTR(-ENOMEM);
+
+ cd->card_idx = i;
+ cd->class_genwqe = class_genwqe;
+ cd->debugfs_genwqe = debugfs_genwqe;
+
+ init_waitqueue_head(&cd->queue_waitq);
+
+ spin_lock_init(&cd->file_lock);
+ INIT_LIST_HEAD(&cd->file_list);
+
+ cd->card_state = GENWQE_CARD_UNUSED;
+ spin_lock_init(&cd->print_lock);
+
+ cd->ddcb_software_timeout = genwqe_ddcb_software_timeout;
+ cd->kill_timeout = genwqe_kill_timeout;
+
+ for (j = 0; j < GENWQE_MAX_VFS; j++)
+ cd->vf_jobtimeout_msec[j] = genwqe_vf_jobtimeout_msec;
+
+ genwqe_devices[i] = cd;
+ return cd;
+}
+
+static void genwqe_dev_free(struct genwqe_dev *cd)
+{
+ if (!cd)
+ return;
+
+ genwqe_devices[cd->card_idx] = NULL;
+ kfree(cd);
+}
+
+/**
+ * genwqe_bus_reset() - Card recovery
+ *
+ * pci_reset_function() will recover the device and ensure that the
+ * registers are accessible again when it completes with success. If
+ * not, the card will stay dead and registers will be unaccessible
+ * still.
+ */
+static int genwqe_bus_reset(struct genwqe_dev *cd)
+{
+ int bars, rc = 0;
+ struct pci_dev *pci_dev = cd->pci_dev;
+ void __iomem *mmio;
+
+ if (cd->err_inject & GENWQE_INJECT_BUS_RESET_FAILURE)
+ return -EIO;
+
+ mmio = cd->mmio;
+ cd->mmio = NULL;
+ pci_iounmap(pci_dev, mmio);
+
+ bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
+ pci_release_selected_regions(pci_dev, bars);
+
+ /*
+ * Firmware/BIOS might change memory mapping during bus reset.
+ * Settings like enable bus-mastering, ... are backuped and
+ * restored by the pci_reset_function().
+ */
+ dev_dbg(&pci_dev->dev, "[%s] pci_reset function ...\n", __func__);
+ rc = pci_reset_function(pci_dev);
+ if (rc) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: failed reset func (rc %d)\n", __func__, rc);
+ return rc;
+ }
+ dev_dbg(&pci_dev->dev, "[%s] done with rc=%d\n", __func__, rc);
+
+ /*
+ * Here is the right spot to clear the register read
+ * failure. pci_bus_reset() does this job in real systems.
+ */
+ cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE |
+ GENWQE_INJECT_GFIR_FATAL |
+ GENWQE_INJECT_GFIR_INFO);
+
+ rc = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
+ if (rc) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: request bars failed (%d)\n", __func__, rc);
+ return -EIO;
+ }
+
+ cd->mmio = pci_iomap(pci_dev, 0, 0);
+ if (cd->mmio == NULL) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: mapping BAR0 failed\n", __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/*
+ * Hardware circumvention section. Certain bitstreams in our test-lab
+ * had different kinds of problems. Here is where we adjust those
+ * bitstreams to function will with this version of our device driver.
+ *
+ * Thise circumventions are applied to the physical function only.
+ * The magical numbers below are identifying development/manufacturing
+ * versions of the bitstream used on the card.
+ *
+ * Turn off error reporting for old/manufacturing images.
+ */
+
+bool genwqe_need_err_masking(struct genwqe_dev *cd)
+{
+ return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull;
+}
+
+static void genwqe_tweak_hardware(struct genwqe_dev *cd)
+{
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ /* Mask FIRs for development images */
+ if (((cd->slu_unitcfg & 0xFFFF0ull) >= 0x32000ull) &&
+ ((cd->slu_unitcfg & 0xFFFF0ull) <= 0x33250ull)) {
+ dev_warn(&pci_dev->dev,
+ "FIRs masked due to bitstream %016llx.%016llx\n",
+ cd->slu_unitcfg, cd->app_unitcfg);
+
+ __genwqe_writeq(cd, IO_APP_SEC_LEM_DEBUG_OVR,
+ 0xFFFFFFFFFFFFFFFFull);
+
+ __genwqe_writeq(cd, IO_APP_ERR_ACT_MASK,
+ 0x0000000000000000ull);
+ }
+}
+
+/**
+ * genwqe_recovery_on_fatal_gfir_required() - Version depended actions
+ *
+ * Bitstreams older than 2013-02-17 have a bug where fatal GFIRs must
+ * be ignored. This is e.g. true for the bitstream we gave to the card
+ * manufacturer, but also for some old bitstreams we released to our
+ * test-lab.
+ */
+int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd)
+{
+ return (cd->slu_unitcfg & 0xFFFF0ull) >= 0x32170ull;
+}
+
+int genwqe_flash_readback_fails(struct genwqe_dev *cd)
+{
+ return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull;
+}
+
+/**
+ * genwqe_T_psec() - Calculate PF/VF timeout register content
+ *
+ * Note: From a design perspective it turned out to be a bad idea to
+ * use codes here to specifiy the frequency/speed values. An old
+ * driver cannot understand new codes and is therefore always a
+ * problem. Better is to measure out the value or put the
+ * speed/frequency directly into a register which is always a valid
+ * value for old as well as for new software.
+ */
+/* T = 1/f */
+static int genwqe_T_psec(struct genwqe_dev *cd)
+{
+ u16 speed; /* 1/f -> 250, 200, 166, 175 */
+ static const int T[] = { 4000, 5000, 6000, 5714 };
+
+ speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full);
+ if (speed >= ARRAY_SIZE(T))
+ return -1; /* illegal value */
+
+ return T[speed];
+}
+
+/**
+ * genwqe_setup_pf_jtimer() - Setup PF hardware timeouts for DDCB execution
+ *
+ * Do this _after_ card_reset() is called. Otherwise the values will
+ * vanish. The settings need to be done when the queues are inactive.
+ *
+ * The max. timeout value is 2^(10+x) * T (6ns for 166MHz) * 15/16.
+ * The min. timeout value is 2^(10+x) * T (6ns for 166MHz) * 14/16.
+ */
+static bool genwqe_setup_pf_jtimer(struct genwqe_dev *cd)
+{
+ u32 T = genwqe_T_psec(cd);
+ u64 x;
+
+ if (genwqe_pf_jobtimeout_msec == 0)
+ return false;
+
+ /* PF: large value needed, flash update 2sec per block */
+ x = ilog2(genwqe_pf_jobtimeout_msec *
+ 16000000000uL/(T * 15)) - 10;
+
+ genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
+ 0xff00 | (x & 0xff), 0);
+ return true;
+}
+
+/**
+ * genwqe_setup_vf_jtimer() - Setup VF hardware timeouts for DDCB execution
+ */
+static bool genwqe_setup_vf_jtimer(struct genwqe_dev *cd)
+{
+ struct pci_dev *pci_dev = cd->pci_dev;
+ unsigned int vf;
+ u32 T = genwqe_T_psec(cd);
+ u64 x;
+
+ for (vf = 0; vf < pci_sriov_get_totalvfs(pci_dev); vf++) {
+
+ if (cd->vf_jobtimeout_msec[vf] == 0)
+ continue;
+
+ x = ilog2(cd->vf_jobtimeout_msec[vf] *
+ 16000000000uL/(T * 15)) - 10;
+
+ genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
+ 0xff00 | (x & 0xff), vf + 1);
+ }
+ return true;
+}
+
+static int genwqe_ffdc_buffs_alloc(struct genwqe_dev *cd)
+{
+ unsigned int type, e = 0;
+
+ for (type = 0; type < GENWQE_DBG_UNITS; type++) {
+ switch (type) {
+ case GENWQE_DBG_UNIT0:
+ e = genwqe_ffdc_buff_size(cd, 0);
+ break;
+ case GENWQE_DBG_UNIT1:
+ e = genwqe_ffdc_buff_size(cd, 1);
+ break;
+ case GENWQE_DBG_UNIT2:
+ e = genwqe_ffdc_buff_size(cd, 2);
+ break;
+ case GENWQE_DBG_REGS:
+ e = GENWQE_FFDC_REGS;
+ break;
+ }
+
+ /* currently support only the debug units mentioned here */
+ cd->ffdc[type].entries = e;
+ cd->ffdc[type].regs = kmalloc(e * sizeof(struct genwqe_reg),
+ GFP_KERNEL);
+ /*
+ * regs == NULL is ok, the using code treats this as no regs,
+ * Printing warning is ok in this case.
+ */
+ }
+ return 0;
+}
+
+static void genwqe_ffdc_buffs_free(struct genwqe_dev *cd)
+{
+ unsigned int type;
+
+ for (type = 0; type < GENWQE_DBG_UNITS; type++) {
+ kfree(cd->ffdc[type].regs);
+ cd->ffdc[type].regs = NULL;
+ }
+}
+
+static int genwqe_read_ids(struct genwqe_dev *cd)
+{
+ int err = 0;
+ int slu_id;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ cd->slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG);
+ if (cd->slu_unitcfg == IO_ILLEGAL_VALUE) {
+ dev_err(&pci_dev->dev,
+ "err: SLUID=%016llx\n", cd->slu_unitcfg);
+ err = -EIO;
+ goto out_err;
+ }
+
+ slu_id = genwqe_get_slu_id(cd);
+ if (slu_id < GENWQE_SLU_ARCH_REQ || slu_id == 0xff) {
+ dev_err(&pci_dev->dev,
+ "err: incompatible SLU Architecture %u\n", slu_id);
+ err = -ENOENT;
+ goto out_err;
+ }
+
+ cd->app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG);
+ if (cd->app_unitcfg == IO_ILLEGAL_VALUE) {
+ dev_err(&pci_dev->dev,
+ "err: APPID=%016llx\n", cd->app_unitcfg);
+ err = -EIO;
+ goto out_err;
+ }
+ genwqe_read_app_id(cd, cd->app_name, sizeof(cd->app_name));
+
+ /*
+ * Is access to all registers possible? If we are a VF the
+ * answer is obvious. If we run fully virtualized, we need to
+ * check if we can access all registers. If we do not have
+ * full access we will cause an UR and some informational FIRs
+ * in the PF, but that should not harm.
+ */
+ if (pci_dev->is_virtfn)
+ cd->is_privileged = 0;
+ else
+ cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM)
+ != IO_ILLEGAL_VALUE);
+
+ out_err:
+ return err;
+}
+
+static int genwqe_start(struct genwqe_dev *cd)
+{
+ int err;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ err = genwqe_read_ids(cd);
+ if (err)
+ return err;
+
+ if (genwqe_is_privileged(cd)) {
+ /* do this after the tweaks. alloc fail is acceptable */
+ genwqe_ffdc_buffs_alloc(cd);
+ genwqe_stop_traps(cd);
+
+ /* Collect registers e.g. FIRs, UNITIDs, traces ... */
+ genwqe_read_ffdc_regs(cd, cd->ffdc[GENWQE_DBG_REGS].regs,
+ cd->ffdc[GENWQE_DBG_REGS].entries, 0);
+
+ genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT0,
+ cd->ffdc[GENWQE_DBG_UNIT0].regs,
+ cd->ffdc[GENWQE_DBG_UNIT0].entries);
+
+ genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT1,
+ cd->ffdc[GENWQE_DBG_UNIT1].regs,
+ cd->ffdc[GENWQE_DBG_UNIT1].entries);
+
+ genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT2,
+ cd->ffdc[GENWQE_DBG_UNIT2].regs,
+ cd->ffdc[GENWQE_DBG_UNIT2].entries);
+
+ genwqe_start_traps(cd);
+
+ if (cd->card_state == GENWQE_CARD_FATAL_ERROR) {
+ dev_warn(&pci_dev->dev,
+ "[%s] chip reload/recovery!\n", __func__);
+
+ /*
+ * Stealth Mode: Reload chip on either hot
+ * reset or PERST.
+ */
+ cd->softreset = 0x7Cull;
+ __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET,
+ cd->softreset);
+
+ err = genwqe_bus_reset(cd);
+ if (err != 0) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: bus reset failed!\n",
+ __func__);
+ goto out;
+ }
+
+ /*
+ * Re-read the IDs because
+ * it could happen that the bitstream load
+ * failed!
+ */
+ err = genwqe_read_ids(cd);
+ if (err)
+ goto out;
+ }
+ }
+
+ err = genwqe_setup_service_layer(cd); /* does a reset to the card */
+ if (err != 0) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: could not setup servicelayer!\n", __func__);
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (genwqe_is_privileged(cd)) { /* code is running _after_ reset */
+ genwqe_tweak_hardware(cd);
+
+ genwqe_setup_pf_jtimer(cd);
+ genwqe_setup_vf_jtimer(cd);
+ }
+
+ err = genwqe_device_create(cd);
+ if (err < 0) {
+ dev_err(&pci_dev->dev,
+ "err: chdev init failed! (err=%d)\n", err);
+ goto out_release_service_layer;
+ }
+ return 0;
+
+ out_release_service_layer:
+ genwqe_release_service_layer(cd);
+ out:
+ if (genwqe_is_privileged(cd))
+ genwqe_ffdc_buffs_free(cd);
+ return -EIO;
+}
+
+/**
+ * genwqe_stop() - Stop card operation
+ *
+ * Recovery notes:
+ * As long as genwqe_thread runs we might access registers during
+ * error data capture. Same is with the genwqe_health_thread.
+ * When genwqe_bus_reset() fails this function might called two times:
+ * first by the genwqe_health_thread() and later by genwqe_remove() to
+ * unbind the device. We must be able to survive that.
+ *
+ * This function must be robust enough to be called twice.
+ */
+static int genwqe_stop(struct genwqe_dev *cd)
+{
+ genwqe_finish_queue(cd); /* no register access */
+ genwqe_device_remove(cd); /* device removed, procs killed */
+ genwqe_release_service_layer(cd); /* here genwqe_thread is stopped */
+
+ if (genwqe_is_privileged(cd)) {
+ pci_disable_sriov(cd->pci_dev); /* access pci config space */
+ genwqe_ffdc_buffs_free(cd);
+ }
+
+ return 0;
+}
+
+/**
+ * genwqe_recover_card() - Try to recover the card if it is possible
+ *
+ * If fatal_err is set no register access is possible anymore. It is
+ * likely that genwqe_start fails in that situation. Proper error
+ * handling is required in this case.
+ *
+ * genwqe_bus_reset() will cause the pci code to call genwqe_remove()
+ * and later genwqe_probe() for all virtual functions.
+ */
+static int genwqe_recover_card(struct genwqe_dev *cd, int fatal_err)
+{
+ int rc;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ genwqe_stop(cd);
+
+ /*
+ * Make sure chip is not reloaded to maintain FFDC. Write SLU
+ * Reset Register, CPLDReset field to 0.
+ */
+ if (!fatal_err) {
+ cd->softreset = 0x70ull;
+ __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset);
+ }
+
+ rc = genwqe_bus_reset(cd);
+ if (rc != 0) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: card recovery impossible!\n", __func__);
+ return rc;
+ }
+
+ rc = genwqe_start(cd);
+ if (rc < 0) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: failed to launch device!\n", __func__);
+ return rc;
+ }
+ return 0;
+}
+
+static int genwqe_health_check_cond(struct genwqe_dev *cd, u64 *gfir)
+{
+ *gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
+ return (*gfir & GFIR_ERR_TRIGGER) &&
+ genwqe_recovery_on_fatal_gfir_required(cd);
+}
+
+/**
+ * genwqe_fir_checking() - Check the fault isolation registers of the card
+ *
+ * If this code works ok, can be tried out with help of the genwqe_poke tool:
+ * sudo ./tools/genwqe_poke 0x8 0xfefefefefef
+ *
+ * Now the relevant FIRs/sFIRs should be printed out and the driver should
+ * invoke recovery (devices are removed and readded).
+ */
+static u64 genwqe_fir_checking(struct genwqe_dev *cd)
+{
+ int j, iterations = 0;
+ u64 mask, fir, fec, uid, gfir, gfir_masked, sfir, sfec;
+ u32 fir_addr, fir_clr_addr, fec_addr, sfir_addr, sfec_addr;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ healthMonitor:
+ iterations++;
+ if (iterations > 16) {
+ dev_err(&pci_dev->dev, "* exit looping after %d times\n",
+ iterations);
+ goto fatal_error;
+ }
+
+ gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
+ if (gfir != 0x0)
+ dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n",
+ IO_SLC_CFGREG_GFIR, gfir);
+ if (gfir == IO_ILLEGAL_VALUE)
+ goto fatal_error;
+
+ /*
+ * Avoid printing when to GFIR bit is on prevents contignous
+ * printout e.g. for the following bug:
+ * FIR set without a 2ndary FIR/FIR cannot be cleared
+ * Comment out the following if to get the prints:
+ */
+ if (gfir == 0)
+ return 0;
+
+ gfir_masked = gfir & GFIR_ERR_TRIGGER; /* fatal errors */
+
+ for (uid = 0; uid < GENWQE_MAX_UNITS; uid++) { /* 0..2 in zEDC */
+
+ /* read the primary FIR (pfir) */
+ fir_addr = (uid << 24) + 0x08;
+ fir = __genwqe_readq(cd, fir_addr);
+ if (fir == 0x0)
+ continue; /* no error in this unit */
+
+ dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fir_addr, fir);
+ if (fir == IO_ILLEGAL_VALUE)
+ goto fatal_error;
+
+ /* read primary FEC */
+ fec_addr = (uid << 24) + 0x18;
+ fec = __genwqe_readq(cd, fec_addr);
+
+ dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fec_addr, fec);
+ if (fec == IO_ILLEGAL_VALUE)
+ goto fatal_error;
+
+ for (j = 0, mask = 1ULL; j < 64; j++, mask <<= 1) {
+
+ /* secondary fir empty, skip it */
+ if ((fir & mask) == 0x0)
+ continue;
+
+ sfir_addr = (uid << 24) + 0x100 + 0x08 * j;
+ sfir = __genwqe_readq(cd, sfir_addr);
+
+ if (sfir == IO_ILLEGAL_VALUE)
+ goto fatal_error;
+ dev_err(&pci_dev->dev,
+ "* 0x%08x 0x%016llx\n", sfir_addr, sfir);
+
+ sfec_addr = (uid << 24) + 0x300 + 0x08 * j;
+ sfec = __genwqe_readq(cd, sfec_addr);
+
+ if (sfec == IO_ILLEGAL_VALUE)
+ goto fatal_error;
+ dev_err(&pci_dev->dev,
+ "* 0x%08x 0x%016llx\n", sfec_addr, sfec);
+
+ gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
+ if (gfir == IO_ILLEGAL_VALUE)
+ goto fatal_error;
+
+ /* gfir turned on during routine! get out and
+ start over. */
+ if ((gfir_masked == 0x0) &&
+ (gfir & GFIR_ERR_TRIGGER)) {
+ goto healthMonitor;
+ }
+
+ /* do not clear if we entered with a fatal gfir */
+ if (gfir_masked == 0x0) {
+
+ /* NEW clear by mask the logged bits */
+ sfir_addr = (uid << 24) + 0x100 + 0x08 * j;
+ __genwqe_writeq(cd, sfir_addr, sfir);
+
+ dev_dbg(&pci_dev->dev,
+ "[HM] Clearing 2ndary FIR 0x%08x "
+ "with 0x%016llx\n", sfir_addr, sfir);
+
+ /*
+ * note, these cannot be error-Firs
+ * since gfir_masked is 0 after sfir
+ * was read. Also, it is safe to do
+ * this write if sfir=0. Still need to
+ * clear the primary. This just means
+ * there is no secondary FIR.
+ */
+
+ /* clear by mask the logged bit. */
+ fir_clr_addr = (uid << 24) + 0x10;
+ __genwqe_writeq(cd, fir_clr_addr, mask);
+
+ dev_dbg(&pci_dev->dev,
+ "[HM] Clearing primary FIR 0x%08x "
+ "with 0x%016llx\n", fir_clr_addr,
+ mask);
+ }
+ }
+ }
+ gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
+ if (gfir == IO_ILLEGAL_VALUE)
+ goto fatal_error;
+
+ if ((gfir_masked == 0x0) && (gfir & GFIR_ERR_TRIGGER)) {
+ /*
+ * Check once more that it didn't go on after all the
+ * FIRS were cleared.
+ */
+ dev_dbg(&pci_dev->dev, "ACK! Another FIR! Recursing %d!\n",
+ iterations);
+ goto healthMonitor;
+ }
+ return gfir_masked;
+
+ fatal_error:
+ return IO_ILLEGAL_VALUE;
+}
+
+/**
+ * genwqe_health_thread() - Health checking thread
+ *
+ * This thread is only started for the PF of the card.
+ *
+ * This thread monitors the health of the card. A critical situation
+ * is when we read registers which contain -1 (IO_ILLEGAL_VALUE). In
+ * this case we need to be recovered from outside. Writing to
+ * registers will very likely not work either.
+ *
+ * This thread must only exit if kthread_should_stop() becomes true.
+ *
+ * Condition for the health-thread to trigger:
+ * a) when a kthread_stop() request comes in or
+ * b) a critical GFIR occured
+ *
+ * Informational GFIRs are checked and potentially printed in
+ * health_check_interval seconds.
+ */
+static int genwqe_health_thread(void *data)
+{
+ int rc, should_stop = 0;
+ struct genwqe_dev *cd = data;
+ struct pci_dev *pci_dev = cd->pci_dev;
+ u64 gfir, gfir_masked, slu_unitcfg, app_unitcfg;
+
+ while (!kthread_should_stop()) {
+ rc = wait_event_interruptible_timeout(cd->health_waitq,
+ (genwqe_health_check_cond(cd, &gfir) ||
+ (should_stop = kthread_should_stop())),
+ genwqe_health_check_interval * HZ);
+
+ if (should_stop)
+ break;
+
+ if (gfir == IO_ILLEGAL_VALUE) {
+ dev_err(&pci_dev->dev,
+ "[%s] GFIR=%016llx\n", __func__, gfir);
+ goto fatal_error;
+ }
+
+ slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG);
+ if (slu_unitcfg == IO_ILLEGAL_VALUE) {
+ dev_err(&pci_dev->dev,
+ "[%s] SLU_UNITCFG=%016llx\n",
+ __func__, slu_unitcfg);
+ goto fatal_error;
+ }
+
+ app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG);
+ if (app_unitcfg == IO_ILLEGAL_VALUE) {
+ dev_err(&pci_dev->dev,
+ "[%s] APP_UNITCFG=%016llx\n",
+ __func__, app_unitcfg);
+ goto fatal_error;
+ }
+
+ gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
+ if (gfir == IO_ILLEGAL_VALUE) {
+ dev_err(&pci_dev->dev,
+ "[%s] %s: GFIR=%016llx\n", __func__,
+ (gfir & GFIR_ERR_TRIGGER) ? "err" : "info",
+ gfir);
+ goto fatal_error;
+ }
+
+ gfir_masked = genwqe_fir_checking(cd);
+ if (gfir_masked == IO_ILLEGAL_VALUE)
+ goto fatal_error;
+
+ /*
+ * GFIR ErrorTrigger bits set => reset the card!
+ * Never do this for old/manufacturing images!
+ */
+ if ((gfir_masked) && !cd->skip_recovery &&
+ genwqe_recovery_on_fatal_gfir_required(cd)) {
+
+ cd->card_state = GENWQE_CARD_FATAL_ERROR;
+
+ rc = genwqe_recover_card(cd, 0);
+ if (rc < 0) {
+ /* FIXME Card is unusable and needs unbind! */
+ goto fatal_error;
+ }
+ }
+
+ cd->last_gfir = gfir;
+ cond_resched();
+ }
+
+ return 0;
+
+ fatal_error:
+ dev_err(&pci_dev->dev,
+ "[%s] card unusable. Please trigger unbind!\n", __func__);
+
+ /* Bring down logical devices to inform user space via udev remove. */
+ cd->card_state = GENWQE_CARD_FATAL_ERROR;
+ genwqe_stop(cd);
+
+ /* genwqe_bus_reset failed(). Now wait for genwqe_remove(). */
+ while (!kthread_should_stop())
+ cond_resched();
+
+ return -EIO;
+}
+
+static int genwqe_health_check_start(struct genwqe_dev *cd)
+{
+ int rc;
+
+ if (genwqe_health_check_interval <= 0)
+ return 0; /* valid for disabling the service */
+
+ /* moved before request_irq() */
+ /* init_waitqueue_head(&cd->health_waitq); */
+
+ cd->health_thread = kthread_run(genwqe_health_thread, cd,
+ GENWQE_DEVNAME "%d_health",
+ cd->card_idx);
+ if (IS_ERR(cd->health_thread)) {
+ rc = PTR_ERR(cd->health_thread);
+ cd->health_thread = NULL;
+ return rc;
+ }
+ return 0;
+}
+
+static int genwqe_health_thread_running(struct genwqe_dev *cd)
+{
+ return cd->health_thread != NULL;
+}
+
+static int genwqe_health_check_stop(struct genwqe_dev *cd)
+{
+ int rc;
+
+ if (!genwqe_health_thread_running(cd))
+ return -EIO;
+
+ rc = kthread_stop(cd->health_thread);
+ cd->health_thread = NULL;
+ return 0;
+}
+
+/**
+ * genwqe_pci_setup() - Allocate PCIe related resources for our card
+ */
+static int genwqe_pci_setup(struct genwqe_dev *cd)
+{
+ int err, bars;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
+ err = pci_enable_device_mem(pci_dev);
+ if (err) {
+ dev_err(&pci_dev->dev,
+ "err: failed to enable pci memory (err=%d)\n", err);
+ goto err_out;
+ }
+
+ /* Reserve PCI I/O and memory resources */
+ err = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
+ if (err) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: request bars failed (%d)\n", __func__, err);
+ err = -EIO;
+ goto err_disable_device;
+ }
+
+ /* check for 64-bit DMA address supported (DAC) */
+ if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
+ err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pci_dev->dev,
+ "err: DMA64 consistent mask error\n");
+ err = -EIO;
+ goto out_release_resources;
+ }
+ /* check for 32-bit DMA address supported (SAC) */
+ } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
+ err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pci_dev->dev,
+ "err: DMA32 consistent mask error\n");
+ err = -EIO;
+ goto out_release_resources;
+ }
+ } else {
+ dev_err(&pci_dev->dev,
+ "err: neither DMA32 nor DMA64 supported\n");
+ err = -EIO;
+ goto out_release_resources;
+ }
+
+ pci_set_master(pci_dev);
+ pci_enable_pcie_error_reporting(pci_dev);
+
+ /* request complete BAR-0 space (length = 0) */
+ cd->mmio_len = pci_resource_len(pci_dev, 0);
+ cd->mmio = pci_iomap(pci_dev, 0, 0);
+ if (cd->mmio == NULL) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: mapping BAR0 failed\n", __func__);
+ err = -ENOMEM;
+ goto out_release_resources;
+ }
+
+ cd->num_vfs = pci_sriov_get_totalvfs(pci_dev);
+
+ err = genwqe_read_ids(cd);
+ if (err)
+ goto out_iounmap;
+
+ return 0;
+
+ out_iounmap:
+ pci_iounmap(pci_dev, cd->mmio);
+ out_release_resources:
+ pci_release_selected_regions(pci_dev, bars);
+ err_disable_device:
+ pci_disable_device(pci_dev);
+ err_out:
+ return err;
+}
+
+/**
+ * genwqe_pci_remove() - Free PCIe related resources for our card
+ */
+static void genwqe_pci_remove(struct genwqe_dev *cd)
+{
+ int bars;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (cd->mmio)
+ pci_iounmap(pci_dev, cd->mmio);
+
+ bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
+ pci_release_selected_regions(pci_dev, bars);
+ pci_disable_device(pci_dev);
+}
+
+/**
+ * genwqe_probe() - Device initialization
+ * @pdev: PCI device information struct
+ *
+ * Callable for multiple cards. This function is called on bind.
+ *
+ * Return: 0 if succeeded, < 0 when failed
+ */
+static int genwqe_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
+{
+ int err;
+ struct genwqe_dev *cd;
+
+ genwqe_init_crc32();
+
+ cd = genwqe_dev_alloc();
+ if (IS_ERR(cd)) {
+ dev_err(&pci_dev->dev, "err: could not alloc mem (err=%d)!\n",
+ (int)PTR_ERR(cd));
+ return PTR_ERR(cd);
+ }
+
+ dev_set_drvdata(&pci_dev->dev, cd);
+ cd->pci_dev = pci_dev;
+
+ err = genwqe_pci_setup(cd);
+ if (err < 0) {
+ dev_err(&pci_dev->dev,
+ "err: problems with PCI setup (err=%d)\n", err);
+ goto out_free_dev;
+ }
+
+ err = genwqe_start(cd);
+ if (err < 0) {
+ dev_err(&pci_dev->dev,
+ "err: cannot start card services! (err=%d)\n", err);
+ goto out_pci_remove;
+ }
+
+ if (genwqe_is_privileged(cd)) {
+ err = genwqe_health_check_start(cd);
+ if (err < 0) {
+ dev_err(&pci_dev->dev,
+ "err: cannot start health checking! "
+ "(err=%d)\n", err);
+ goto out_stop_services;
+ }
+ }
+ return 0;
+
+ out_stop_services:
+ genwqe_stop(cd);
+ out_pci_remove:
+ genwqe_pci_remove(cd);
+ out_free_dev:
+ genwqe_dev_free(cd);
+ return err;
+}
+
+/**
+ * genwqe_remove() - Called when device is removed (hot-plugable)
+ *
+ * Or when driver is unloaded respecitively when unbind is done.
+ */
+static void genwqe_remove(struct pci_dev *pci_dev)
+{
+ struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev);
+
+ genwqe_health_check_stop(cd);
+
+ /*
+ * genwqe_stop() must survive if it is called twice
+ * sequentially. This happens when the health thread calls it
+ * and fails on genwqe_bus_reset().
+ */
+ genwqe_stop(cd);
+ genwqe_pci_remove(cd);
+ genwqe_dev_free(cd);
+}
+
+/*
+ * genwqe_err_error_detected() - Error detection callback
+ *
+ * This callback is called by the PCI subsystem whenever a PCI bus
+ * error is detected.
+ */
+static pci_ers_result_t genwqe_err_error_detected(struct pci_dev *pci_dev,
+ enum pci_channel_state state)
+{
+ struct genwqe_dev *cd;
+
+ dev_err(&pci_dev->dev, "[%s] state=%d\n", __func__, state);
+
+ if (pci_dev == NULL)
+ return PCI_ERS_RESULT_NEED_RESET;
+
+ cd = dev_get_drvdata(&pci_dev->dev);
+ if (cd == NULL)
+ return PCI_ERS_RESULT_NEED_RESET;
+
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t genwqe_err_result_none(struct pci_dev *dev)
+{
+ return PCI_ERS_RESULT_NONE;
+}
+
+static void genwqe_err_resume(struct pci_dev *dev)
+{
+}
+
+static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs)
+{
+ struct genwqe_dev *cd = dev_get_drvdata(&dev->dev);
+
+ if (numvfs > 0) {
+ genwqe_setup_vf_jtimer(cd);
+ pci_enable_sriov(dev, numvfs);
+ return numvfs;
+ }
+ if (numvfs == 0) {
+ pci_disable_sriov(dev);
+ return 0;
+ }
+ return 0;
+}
+
+static struct pci_error_handlers genwqe_err_handler = {
+ .error_detected = genwqe_err_error_detected,
+ .mmio_enabled = genwqe_err_result_none,
+ .link_reset = genwqe_err_result_none,
+ .slot_reset = genwqe_err_result_none,
+ .resume = genwqe_err_resume,
+};
+
+static struct pci_driver genwqe_driver = {
+ .name = genwqe_driver_name,
+ .id_table = genwqe_device_table,
+ .probe = genwqe_probe,
+ .remove = genwqe_remove,
+ .sriov_configure = genwqe_sriov_configure,
+ .err_handler = &genwqe_err_handler,
+};
+
+/**
+ * genwqe_init_module() - Driver registration and initialization
+ */
+static int __init genwqe_init_module(void)
+{
+ int rc;
+
+ class_genwqe = class_create(THIS_MODULE, GENWQE_DEVNAME);
+ if (IS_ERR(class_genwqe)) {
+ pr_err("[%s] create class failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ debugfs_genwqe = debugfs_create_dir(GENWQE_DEVNAME, NULL);
+ if (!debugfs_genwqe) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ rc = pci_register_driver(&genwqe_driver);
+ if (rc != 0) {
+ pr_err("[%s] pci_reg_driver (rc=%d)\n", __func__, rc);
+ goto err_out0;
+ }
+
+ return rc;
+
+ err_out0:
+ debugfs_remove(debugfs_genwqe);
+ err_out:
+ class_destroy(class_genwqe);
+ return rc;
+}
+
+/**
+ * genwqe_exit_module() - Driver exit
+ */
+static void __exit genwqe_exit_module(void)
+{
+ pci_unregister_driver(&genwqe_driver);
+ debugfs_remove(debugfs_genwqe);
+ class_destroy(class_genwqe);
+}
+
+module_init(genwqe_init_module);
+module_exit(genwqe_exit_module);
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
new file mode 100644
index 000000000000..5e4dbd21f89a
--- /dev/null
+++ b/drivers/misc/genwqe/card_base.h
@@ -0,0 +1,557 @@
+#ifndef __CARD_BASE_H__
+#define __CARD_BASE_H__
+
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Interfaces within the GenWQE module. Defines genwqe_card and
+ * ddcb_queue as well as ddcb_requ.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/stringify.h>
+#include <linux/pci.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/version.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+
+#include <linux/genwqe/genwqe_card.h>
+#include "genwqe_driver.h"
+
+#define GENWQE_MSI_IRQS 4 /* Just one supported, no MSIx */
+#define GENWQE_FLAG_MSI_ENABLED (1 << 0)
+
+#define GENWQE_MAX_VFS 15 /* maximum 15 VFs are possible */
+#define GENWQE_MAX_FUNCS 16 /* 1 PF and 15 VFs */
+#define GENWQE_CARD_NO_MAX (16 * GENWQE_MAX_FUNCS)
+
+/* Compile parameters, some of them appear in debugfs for later adjustment */
+#define genwqe_ddcb_max 32 /* DDCBs on the work-queue */
+#define genwqe_polling_enabled 0 /* in case of irqs not working */
+#define genwqe_ddcb_software_timeout 10 /* timeout per DDCB in seconds */
+#define genwqe_kill_timeout 8 /* time until process gets killed */
+#define genwqe_vf_jobtimeout_msec 250 /* 250 msec */
+#define genwqe_pf_jobtimeout_msec 8000 /* 8 sec should be ok */
+#define genwqe_health_check_interval 4 /* <= 0: disabled */
+
+/* Sysfs attribute groups used when we create the genwqe device */
+extern const struct attribute_group *genwqe_attribute_groups[];
+
+/*
+ * Config space for Genwqe5 A7:
+ * 00:[14 10 4b 04]40 00 10 00[00 00 00 12]00 00 00 00
+ * 10: 0c 00 00 f0 07 3c 00 00 00 00 00 00 00 00 00 00
+ * 20: 00 00 00 00 00 00 00 00 00 00 00 00[14 10 4b 04]
+ * 30: 00 00 00 00 50 00 00 00 00 00 00 00 00 00 00 00
+ */
+#define PCI_DEVICE_GENWQE 0x044b /* Genwqe DeviceID */
+
+#define PCI_SUBSYSTEM_ID_GENWQE5 0x035f /* Genwqe A5 Subsystem-ID */
+#define PCI_SUBSYSTEM_ID_GENWQE5_NEW 0x044b /* Genwqe A5 Subsystem-ID */
+#define PCI_CLASSCODE_GENWQE5 0x1200 /* UNKNOWN */
+
+#define PCI_SUBVENDOR_ID_IBM_SRIOV 0x0000
+#define PCI_SUBSYSTEM_ID_GENWQE5_SRIOV 0x0000 /* Genwqe A5 Subsystem-ID */
+#define PCI_CLASSCODE_GENWQE5_SRIOV 0x1200 /* UNKNOWN */
+
+#define GENWQE_SLU_ARCH_REQ 2 /* Required SLU architecture level */
+
+/**
+ * struct genwqe_reg - Genwqe data dump functionality
+ */
+struct genwqe_reg {
+ u32 addr;
+ u32 idx;
+ u64 val;
+};
+
+/*
+ * enum genwqe_dbg_type - Specify chip unit to dump/debug
+ */
+enum genwqe_dbg_type {
+ GENWQE_DBG_UNIT0 = 0, /* captured before prev errs cleared */
+ GENWQE_DBG_UNIT1 = 1,
+ GENWQE_DBG_UNIT2 = 2,
+ GENWQE_DBG_UNIT3 = 3,
+ GENWQE_DBG_UNIT4 = 4,
+ GENWQE_DBG_UNIT5 = 5,
+ GENWQE_DBG_UNIT6 = 6,
+ GENWQE_DBG_UNIT7 = 7,
+ GENWQE_DBG_REGS = 8,
+ GENWQE_DBG_DMA = 9,
+ GENWQE_DBG_UNITS = 10, /* max number of possible debug units */
+};
+
+/* Software error injection to simulate card failures */
+#define GENWQE_INJECT_HARDWARE_FAILURE 0x00000001 /* injects -1 reg reads */
+#define GENWQE_INJECT_BUS_RESET_FAILURE 0x00000002 /* pci_bus_reset fail */
+#define GENWQE_INJECT_GFIR_FATAL 0x00000004 /* GFIR = 0x0000ffff */
+#define GENWQE_INJECT_GFIR_INFO 0x00000008 /* GFIR = 0xffff0000 */
+
+/*
+ * Genwqe card description and management data.
+ *
+ * Error-handling in case of card malfunction
+ * ------------------------------------------
+ *
+ * If the card is detected to be defective the outside environment
+ * will cause the PCI layer to call deinit (the cleanup function for
+ * probe). This is the same effect like doing a unbind/bind operation
+ * on the card.
+ *
+ * The genwqe card driver implements a health checking thread which
+ * verifies the card function. If this detects a problem the cards
+ * device is being shutdown and restarted again, along with a reset of
+ * the card and queue.
+ *
+ * All functions accessing the card device return either -EIO or -ENODEV
+ * code to indicate the malfunction to the user. The user has to close
+ * the file descriptor and open a new one, once the card becomes
+ * available again.
+ *
+ * If the open file descriptor is setup to receive SIGIO, the signal is
+ * genereated for the application which has to provide a handler to
+ * react on it. If the application does not close the open
+ * file descriptor a SIGKILL is send to enforce freeing the cards
+ * resources.
+ *
+ * I did not find a different way to prevent kernel problems due to
+ * reference counters for the cards character devices getting out of
+ * sync. The character device deallocation does not block, even if
+ * there is still an open file descriptor pending. If this pending
+ * descriptor is closed, the data structures used by the character
+ * device is reinstantiated, which will lead to the reference counter
+ * dropping below the allowed values.
+ *
+ * Card recovery
+ * -------------
+ *
+ * To test the internal driver recovery the following command can be used:
+ * sudo sh -c 'echo 0xfffff > /sys/class/genwqe/genwqe0_card/err_inject'
+ */
+
+
+/**
+ * struct dma_mapping_type - Mapping type definition
+ *
+ * To avoid memcpying data arround we use user memory directly. To do
+ * this we need to pin/swap-in the memory and request a DMA address
+ * for it.
+ */
+enum dma_mapping_type {
+ GENWQE_MAPPING_RAW = 0, /* contignous memory buffer */
+ GENWQE_MAPPING_SGL_TEMP, /* sglist dynamically used */
+ GENWQE_MAPPING_SGL_PINNED, /* sglist used with pinning */
+};
+
+/**
+ * struct dma_mapping - Information about memory mappings done by the driver
+ */
+struct dma_mapping {
+ enum dma_mapping_type type;
+
+ void *u_vaddr; /* user-space vaddr/non-aligned */
+ void *k_vaddr; /* kernel-space vaddr/non-aligned */
+ dma_addr_t dma_addr; /* physical DMA address */
+
+ struct page **page_list; /* list of pages used by user buff */
+ dma_addr_t *dma_list; /* list of dma addresses per page */
+ unsigned int nr_pages; /* number of pages */
+ unsigned int size; /* size in bytes */
+
+ struct list_head card_list; /* list of usr_maps for card */
+ struct list_head pin_list; /* list of pinned memory for dev */
+};
+
+static inline void genwqe_mapping_init(struct dma_mapping *m,
+ enum dma_mapping_type type)
+{
+ memset(m, 0, sizeof(*m));
+ m->type = type;
+}
+
+/**
+ * struct ddcb_queue - DDCB queue data
+ * @ddcb_max: Number of DDCBs on the queue
+ * @ddcb_next: Next free DDCB
+ * @ddcb_act: Next DDCB supposed to finish
+ * @ddcb_seq: Sequence number of last DDCB
+ * @ddcbs_in_flight: Currently enqueued DDCBs
+ * @ddcbs_completed: Number of already completed DDCBs
+ * @busy: Number of -EBUSY returns
+ * @ddcb_daddr: DMA address of first DDCB in the queue
+ * @ddcb_vaddr: Kernel virtual address of first DDCB in the queue
+ * @ddcb_req: Associated requests (one per DDCB)
+ * @ddcb_waitqs: Associated wait queues (one per DDCB)
+ * @ddcb_lock: Lock to protect queuing operations
+ * @ddcb_waitq: Wait on next DDCB finishing
+ */
+
+struct ddcb_queue {
+ int ddcb_max; /* amount of DDCBs */
+ int ddcb_next; /* next available DDCB num */
+ int ddcb_act; /* DDCB to be processed */
+ u16 ddcb_seq; /* slc seq num */
+ unsigned int ddcbs_in_flight; /* number of ddcbs in processing */
+ unsigned int ddcbs_completed;
+ unsigned int ddcbs_max_in_flight;
+ unsigned int busy; /* how many times -EBUSY? */
+
+ dma_addr_t ddcb_daddr; /* DMA address */
+ struct ddcb *ddcb_vaddr; /* kernel virtual addr for DDCBs */
+ struct ddcb_requ **ddcb_req; /* ddcb processing parameter */
+ wait_queue_head_t *ddcb_waitqs; /* waitqueue per ddcb */
+
+ spinlock_t ddcb_lock; /* exclusive access to queue */
+ wait_queue_head_t ddcb_waitq; /* wait for ddcb processing */
+
+ /* registers or the respective queue to be used */
+ u32 IO_QUEUE_CONFIG;
+ u32 IO_QUEUE_STATUS;
+ u32 IO_QUEUE_SEGMENT;
+ u32 IO_QUEUE_INITSQN;
+ u32 IO_QUEUE_WRAP;
+ u32 IO_QUEUE_OFFSET;
+ u32 IO_QUEUE_WTIME;
+ u32 IO_QUEUE_ERRCNTS;
+ u32 IO_QUEUE_LRW;
+};
+
+/*
+ * GFIR, SLU_UNITCFG, APP_UNITCFG
+ * 8 Units with FIR/FEC + 64 * 2ndary FIRS/FEC.
+ */
+#define GENWQE_FFDC_REGS (3 + (8 * (2 + 2 * 64)))
+
+struct genwqe_ffdc {
+ unsigned int entries;
+ struct genwqe_reg *regs;
+};
+
+/**
+ * struct genwqe_dev - GenWQE device information
+ * @card_state: Card operation state, see above
+ * @ffdc: First Failure Data Capture buffers for each unit
+ * @card_thread: Working thread to operate the DDCB queue
+ * @card_waitq: Wait queue used in card_thread
+ * @queue: DDCB queue
+ * @health_thread: Card monitoring thread (only for PFs)
+ * @health_waitq: Wait queue used in health_thread
+ * @pci_dev: Associated PCI device (function)
+ * @mmio: Base address of 64-bit register space
+ * @mmio_len: Length of register area
+ * @file_lock: Lock to protect access to file_list
+ * @file_list: List of all processes with open GenWQE file descriptors
+ *
+ * This struct contains all information needed to communicate with a
+ * GenWQE card. It is initialized when a GenWQE device is found and
+ * destroyed when it goes away. It holds data to maintain the queue as
+ * well as data needed to feed the user interfaces.
+ */
+struct genwqe_dev {
+ enum genwqe_card_state card_state;
+ spinlock_t print_lock;
+
+ int card_idx; /* card index 0..CARD_NO_MAX-1 */
+ u64 flags; /* general flags */
+
+ /* FFDC data gathering */
+ struct genwqe_ffdc ffdc[GENWQE_DBG_UNITS];
+
+ /* DDCB workqueue */
+ struct task_struct *card_thread;
+ wait_queue_head_t queue_waitq;
+ struct ddcb_queue queue; /* genwqe DDCB queue */
+ unsigned int irqs_processed;
+
+ /* Card health checking thread */
+ struct task_struct *health_thread;
+ wait_queue_head_t health_waitq;
+
+ /* char device */
+ dev_t devnum_genwqe; /* major/minor num card */
+ struct class *class_genwqe; /* reference to class object */
+ struct device *dev; /* for device creation */
+ struct cdev cdev_genwqe; /* char device for card */
+
+ struct dentry *debugfs_root; /* debugfs card root directory */
+ struct dentry *debugfs_genwqe; /* debugfs driver root directory */
+
+ /* pci resources */
+ struct pci_dev *pci_dev; /* PCI device */
+ void __iomem *mmio; /* BAR-0 MMIO start */
+ unsigned long mmio_len;
+ u16 num_vfs;
+ u32 vf_jobtimeout_msec[GENWQE_MAX_VFS];
+ int is_privileged; /* access to all regs possible */
+
+ /* config regs which we need often */
+ u64 slu_unitcfg;
+ u64 app_unitcfg;
+ u64 softreset;
+ u64 err_inject;
+ u64 last_gfir;
+ char app_name[5];
+
+ spinlock_t file_lock; /* lock for open files */
+ struct list_head file_list; /* list of open files */
+
+ /* debugfs parameters */
+ int ddcb_software_timeout; /* wait until DDCB times out */
+ int skip_recovery; /* circumvention if recovery fails */
+ int kill_timeout; /* wait after sending SIGKILL */
+};
+
+/**
+ * enum genwqe_requ_state - State of a DDCB execution request
+ */
+enum genwqe_requ_state {
+ GENWQE_REQU_NEW = 0,
+ GENWQE_REQU_ENQUEUED = 1,
+ GENWQE_REQU_TAPPED = 2,
+ GENWQE_REQU_FINISHED = 3,
+ GENWQE_REQU_STATE_MAX,
+};
+
+/**
+ * struct ddcb_requ - Kernel internal representation of the DDCB request
+ * @cmd: User space representation of the DDCB execution request
+ */
+struct ddcb_requ {
+ /* kernel specific content */
+ enum genwqe_requ_state req_state; /* request status */
+ int num; /* ddcb_no for this request */
+ struct ddcb_queue *queue; /* associated queue */
+
+ struct dma_mapping dma_mappings[DDCB_FIXUPS];
+ struct sg_entry *sgl[DDCB_FIXUPS];
+ dma_addr_t sgl_dma_addr[DDCB_FIXUPS];
+ size_t sgl_size[DDCB_FIXUPS];
+
+ /* kernel/user shared content */
+ struct genwqe_ddcb_cmd cmd; /* ddcb_no for this request */
+ struct genwqe_debug_data debug_data;
+};
+
+/**
+ * struct genwqe_file - Information for open GenWQE devices
+ */
+struct genwqe_file {
+ struct genwqe_dev *cd;
+ struct genwqe_driver *client;
+ struct file *filp;
+
+ struct fasync_struct *async_queue;
+ struct task_struct *owner;
+ struct list_head list; /* entry in list of open files */
+
+ spinlock_t map_lock; /* lock for dma_mappings */
+ struct list_head map_list; /* list of dma_mappings */
+
+ spinlock_t pin_lock; /* lock for pinned memory */
+ struct list_head pin_list; /* list of pinned memory */
+};
+
+int genwqe_setup_service_layer(struct genwqe_dev *cd); /* for PF only */
+int genwqe_finish_queue(struct genwqe_dev *cd);
+int genwqe_release_service_layer(struct genwqe_dev *cd);
+
+/**
+ * genwqe_get_slu_id() - Read Service Layer Unit Id
+ * Return: 0x00: Development code
+ * 0x01: SLC1 (old)
+ * 0x02: SLC2 (sept2012)
+ * 0x03: SLC2 (feb2013, generic driver)
+ */
+static inline int genwqe_get_slu_id(struct genwqe_dev *cd)
+{
+ return (int)((cd->slu_unitcfg >> 32) & 0xff);
+}
+
+int genwqe_ddcbs_in_flight(struct genwqe_dev *cd);
+
+u8 genwqe_card_type(struct genwqe_dev *cd);
+int genwqe_card_reset(struct genwqe_dev *cd);
+int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count);
+void genwqe_reset_interrupt_capability(struct genwqe_dev *cd);
+
+int genwqe_device_create(struct genwqe_dev *cd);
+int genwqe_device_remove(struct genwqe_dev *cd);
+
+/* debugfs */
+int genwqe_init_debugfs(struct genwqe_dev *cd);
+void genqwe_exit_debugfs(struct genwqe_dev *cd);
+
+int genwqe_read_softreset(struct genwqe_dev *cd);
+
+/* Hardware Circumventions */
+int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd);
+int genwqe_flash_readback_fails(struct genwqe_dev *cd);
+
+/**
+ * genwqe_write_vreg() - Write register in VF window
+ * @cd: genwqe device
+ * @reg: register address
+ * @val: value to write
+ * @func: 0: PF, 1: VF0, ..., 15: VF14
+ */
+int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func);
+
+/**
+ * genwqe_read_vreg() - Read register in VF window
+ * @cd: genwqe device
+ * @reg: register address
+ * @func: 0: PF, 1: VF0, ..., 15: VF14
+ *
+ * Return: content of the register
+ */
+u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func);
+
+/* FFDC Buffer Management */
+int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int unit_id);
+int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int unit_id,
+ struct genwqe_reg *regs, unsigned int max_regs);
+int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
+ unsigned int max_regs, int all);
+int genwqe_ffdc_dump_dma(struct genwqe_dev *cd,
+ struct genwqe_reg *regs, unsigned int max_regs);
+
+int genwqe_init_debug_data(struct genwqe_dev *cd,
+ struct genwqe_debug_data *d);
+
+void genwqe_init_crc32(void);
+int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len);
+
+/* Memory allocation/deallocation; dma address handling */
+int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m,
+ void *uaddr, unsigned long size,
+ struct ddcb_requ *req);
+
+int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
+ struct ddcb_requ *req);
+
+struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages,
+ dma_addr_t *dma_addr, size_t *sgl_size);
+
+void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list,
+ dma_addr_t dma_addr, size_t size);
+
+int genwqe_setup_sgl(struct genwqe_dev *cd,
+ unsigned long offs,
+ unsigned long size,
+ struct sg_entry *sgl, /* genwqe sgl */
+ dma_addr_t dma_addr, size_t sgl_size,
+ dma_addr_t *dma_list, int page_offs, int num_pages);
+
+int genwqe_check_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list,
+ int size);
+
+static inline bool dma_mapping_used(struct dma_mapping *m)
+{
+ if (!m)
+ return 0;
+ return m->size != 0;
+}
+
+/**
+ * __genwqe_execute_ddcb() - Execute DDCB request with addr translation
+ *
+ * This function will do the address translation changes to the DDCBs
+ * according to the definitions required by the ATS field. It looks up
+ * the memory allocation buffer or does vmap/vunmap for the respective
+ * user-space buffers, inclusive page pinning and scatter gather list
+ * buildup and teardown.
+ */
+int __genwqe_execute_ddcb(struct genwqe_dev *cd,
+ struct genwqe_ddcb_cmd *cmd);
+
+/**
+ * __genwqe_execute_raw_ddcb() - Execute DDCB request without addr translation
+ *
+ * This version will not do address translation or any modifcation of
+ * the DDCB data. It is used e.g. for the MoveFlash DDCB which is
+ * entirely prepared by the driver itself. That means the appropriate
+ * DMA addresses are already in the DDCB and do not need any
+ * modification.
+ */
+int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
+ struct genwqe_ddcb_cmd *cmd);
+
+int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
+int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
+int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
+
+/* register access */
+int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val);
+u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs);
+int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val);
+u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs);
+
+void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
+ dma_addr_t *dma_handle);
+void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+/* Base clock frequency in MHz */
+int genwqe_base_clock_frequency(struct genwqe_dev *cd);
+
+/* Before FFDC is captured the traps should be stopped. */
+void genwqe_stop_traps(struct genwqe_dev *cd);
+void genwqe_start_traps(struct genwqe_dev *cd);
+
+/* Hardware circumvention */
+bool genwqe_need_err_masking(struct genwqe_dev *cd);
+
+/**
+ * genwqe_is_privileged() - Determine operation mode for PCI function
+ *
+ * On Intel with SRIOV support we see:
+ * PF: is_physfn = 1 is_virtfn = 0
+ * VF: is_physfn = 0 is_virtfn = 1
+ *
+ * On Systems with no SRIOV support _and_ virtualized systems we get:
+ * is_physfn = 0 is_virtfn = 0
+ *
+ * Other vendors have individual pci device ids to distinguish between
+ * virtual function drivers and physical function drivers. GenWQE
+ * unfortunately has just on pci device id for both, VFs and PF.
+ *
+ * The following code is used to distinguish if the card is running in
+ * privileged mode, either as true PF or in a virtualized system with
+ * full register access e.g. currently on PowerPC.
+ *
+ * if (pci_dev->is_virtfn)
+ * cd->is_privileged = 0;
+ * else
+ * cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM)
+ * != IO_ILLEGAL_VALUE);
+ */
+static inline int genwqe_is_privileged(struct genwqe_dev *cd)
+{
+ return cd->is_privileged;
+}
+
+#endif /* __CARD_BASE_H__ */
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
new file mode 100644
index 000000000000..6f1acc0ccf88
--- /dev/null
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -0,0 +1,1376 @@
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Device Driver Control Block (DDCB) queue support. Definition of
+ * interrupt handlers for queue support as well as triggering the
+ * health monitor code in case of problems. The current hardware uses
+ * an MSI interrupt which is shared between error handling and
+ * functional code.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/crc-itu-t.h>
+
+#include "card_base.h"
+#include "card_ddcb.h"
+
+/*
+ * N: next DDCB, this is where the next DDCB will be put.
+ * A: active DDCB, this is where the code will look for the next completion.
+ * x: DDCB is enqueued, we are waiting for its completion.
+
+ * Situation (1): Empty queue
+ * +---+---+---+---+---+---+---+---+
+ * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | | | | | | | | |
+ * +---+---+---+---+---+---+---+---+
+ * A/N
+ * enqueued_ddcbs = A - N = 2 - 2 = 0
+ *
+ * Situation (2): Wrapped, N > A
+ * +---+---+---+---+---+---+---+---+
+ * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | | | x | x | | | | |
+ * +---+---+---+---+---+---+---+---+
+ * A N
+ * enqueued_ddcbs = N - A = 4 - 2 = 2
+ *
+ * Situation (3): Queue wrapped, A > N
+ * +---+---+---+---+---+---+---+---+
+ * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | x | x | | | x | x | x | x |
+ * +---+---+---+---+---+---+---+---+
+ * N A
+ * enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 2) = 6
+ *
+ * Situation (4a): Queue full N > A
+ * +---+---+---+---+---+---+---+---+
+ * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | x | x | x | x | x | x | x | |
+ * +---+---+---+---+---+---+---+---+
+ * A N
+ *
+ * enqueued_ddcbs = N - A = 7 - 0 = 7
+ *
+ * Situation (4a): Queue full A > N
+ * +---+---+---+---+---+---+---+---+
+ * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | x | x | x | | x | x | x | x |
+ * +---+---+---+---+---+---+---+---+
+ * N A
+ * enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 3) = 7
+ */
+
+static int queue_empty(struct ddcb_queue *queue)
+{
+ return queue->ddcb_next == queue->ddcb_act;
+}
+
+static int queue_enqueued_ddcbs(struct ddcb_queue *queue)
+{
+ if (queue->ddcb_next >= queue->ddcb_act)
+ return queue->ddcb_next - queue->ddcb_act;
+
+ return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next);
+}
+
+static int queue_free_ddcbs(struct ddcb_queue *queue)
+{
+ int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1;
+
+ if (WARN_ON_ONCE(free_ddcbs < 0)) { /* must never ever happen! */
+ return 0;
+ }
+ return free_ddcbs;
+}
+
+/*
+ * Use of the PRIV field in the DDCB for queue debugging:
+ *
+ * (1) Trying to get rid of a DDCB which saw a timeout:
+ * pddcb->priv[6] = 0xcc; # cleared
+ *
+ * (2) Append a DDCB via NEXT bit:
+ * pddcb->priv[7] = 0xaa; # appended
+ *
+ * (3) DDCB needed tapping:
+ * pddcb->priv[7] = 0xbb; # tapped
+ *
+ * (4) DDCB marked as correctly finished:
+ * pddcb->priv[6] = 0xff; # finished
+ */
+
+static inline void ddcb_mark_tapped(struct ddcb *pddcb)
+{
+ pddcb->priv[7] = 0xbb; /* tapped */
+}
+
+static inline void ddcb_mark_appended(struct ddcb *pddcb)
+{
+ pddcb->priv[7] = 0xaa; /* appended */
+}
+
+static inline void ddcb_mark_cleared(struct ddcb *pddcb)
+{
+ pddcb->priv[6] = 0xcc; /* cleared */
+}
+
+static inline void ddcb_mark_finished(struct ddcb *pddcb)
+{
+ pddcb->priv[6] = 0xff; /* finished */
+}
+
+static inline void ddcb_mark_unused(struct ddcb *pddcb)
+{
+ pddcb->priv_64 = cpu_to_be64(0); /* not tapped */
+}
+
+/**
+ * genwqe_crc16() - Generate 16-bit crc as required for DDCBs
+ * @buff: pointer to data buffer
+ * @len: length of data for calculation
+ * @init: initial crc (0xffff at start)
+ *
+ * Polynomial = x^16 + x^12 + x^5 + 1 (0x1021)
+ * Example: 4 bytes 0x01 0x02 0x03 0x04 with init = 0xffff
+ * should result in a crc16 of 0x89c3
+ *
+ * Return: crc16 checksum in big endian format !
+ */
+static inline u16 genwqe_crc16(const u8 *buff, size_t len, u16 init)
+{
+ return crc_itu_t(init, buff, len);
+}
+
+static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue)
+{
+ int i;
+ struct ddcb *pddcb;
+ unsigned long flags;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ spin_lock_irqsave(&cd->print_lock, flags);
+
+ dev_info(&pci_dev->dev,
+ "DDCB list for card #%d (ddcb_act=%d / ddcb_next=%d):\n",
+ cd->card_idx, queue->ddcb_act, queue->ddcb_next);
+
+ pddcb = queue->ddcb_vaddr;
+ for (i = 0; i < queue->ddcb_max; i++) {
+ dev_err(&pci_dev->dev,
+ " %c %-3d: RETC=%03x SEQ=%04x "
+ "HSI=%02X SHI=%02x PRIV=%06llx CMD=%03x\n",
+ i == queue->ddcb_act ? '>' : ' ',
+ i,
+ be16_to_cpu(pddcb->retc_16),
+ be16_to_cpu(pddcb->seqnum_16),
+ pddcb->hsi,
+ pddcb->shi,
+ be64_to_cpu(pddcb->priv_64),
+ pddcb->cmd);
+ pddcb++;
+ }
+ spin_unlock_irqrestore(&cd->print_lock, flags);
+}
+
+struct genwqe_ddcb_cmd *ddcb_requ_alloc(void)
+{
+ struct ddcb_requ *req;
+
+ req = kzalloc(sizeof(*req), GFP_ATOMIC);
+ if (!req)
+ return NULL;
+
+ return &req->cmd;
+}
+
+void ddcb_requ_free(struct genwqe_ddcb_cmd *cmd)
+{
+ struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
+ kfree(req);
+}
+
+static inline enum genwqe_requ_state ddcb_requ_get_state(struct ddcb_requ *req)
+{
+ return req->req_state;
+}
+
+static inline void ddcb_requ_set_state(struct ddcb_requ *req,
+ enum genwqe_requ_state new_state)
+{
+ req->req_state = new_state;
+}
+
+static inline int ddcb_requ_collect_debug_data(struct ddcb_requ *req)
+{
+ return req->cmd.ddata_addr != 0x0;
+}
+
+/**
+ * ddcb_requ_finished() - Returns the hardware state of the associated DDCB
+ * @cd: pointer to genwqe device descriptor
+ * @req: DDCB work request
+ *
+ * Status of ddcb_requ mirrors this hardware state, but is copied in
+ * the ddcb_requ on interrupt/polling function. The lowlevel code
+ * should check the hardware state directly, the higher level code
+ * should check the copy.
+ *
+ * This function will also return true if the state of the queue is
+ * not GENWQE_CARD_USED. This enables us to purge all DDCBs in the
+ * shutdown case.
+ */
+static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req)
+{
+ return (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) ||
+ (cd->card_state != GENWQE_CARD_USED);
+}
+
+/**
+ * enqueue_ddcb() - Enqueue a DDCB
+ * @cd: pointer to genwqe device descriptor
+ * @queue: queue this operation should be done on
+ * @ddcb_no: pointer to ddcb number being tapped
+ *
+ * Start execution of DDCB by tapping or append to queue via NEXT
+ * bit. This is done by an atomic 'compare and swap' instruction and
+ * checking SHI and HSI of the previous DDCB.
+ *
+ * This function must only be called with ddcb_lock held.
+ *
+ * Return: 1 if new DDCB is appended to previous
+ * 2 if DDCB queue is tapped via register/simulation
+ */
+#define RET_DDCB_APPENDED 1
+#define RET_DDCB_TAPPED 2
+
+static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
+ struct ddcb *pddcb, int ddcb_no)
+{
+ unsigned int try;
+ int prev_no;
+ struct ddcb *prev_ddcb;
+ __be32 old, new, icrc_hsi_shi;
+ u64 num;
+
+ /*
+ * For performance checks a Dispatch Timestamp can be put into
+ * DDCB It is supposed to use the SLU's free running counter,
+ * but this requires PCIe cycles.
+ */
+ ddcb_mark_unused(pddcb);
+
+ /* check previous DDCB if already fetched */
+ prev_no = (ddcb_no == 0) ? queue->ddcb_max - 1 : ddcb_no - 1;
+ prev_ddcb = &queue->ddcb_vaddr[prev_no];
+
+ /*
+ * It might have happened that the HSI.FETCHED bit is
+ * set. Retry in this case. Therefore I expect maximum 2 times
+ * trying.
+ */
+ ddcb_mark_appended(pddcb);
+ for (try = 0; try < 2; try++) {
+ old = prev_ddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */
+
+ /* try to append via NEXT bit if prev DDCB is not completed */
+ if ((old & DDCB_COMPLETED_BE32) != 0x00000000)
+ break;
+
+ new = (old | DDCB_NEXT_BE32);
+ icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new);
+
+ if (icrc_hsi_shi == old)
+ return RET_DDCB_APPENDED; /* appended to queue */
+ }
+
+ /* Queue must be re-started by updating QUEUE_OFFSET */
+ ddcb_mark_tapped(pddcb);
+ num = (u64)ddcb_no << 8;
+ __genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */
+
+ return RET_DDCB_TAPPED;
+}
+
+/**
+ * copy_ddcb_results() - Copy output state from real DDCB to request
+ *
+ * Copy DDCB ASV to request struct. There is no endian
+ * conversion made, since data structure in ASV is still
+ * unknown here.
+ *
+ * This is needed by:
+ * - genwqe_purge_ddcb()
+ * - genwqe_check_ddcb_queue()
+ */
+static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no)
+{
+ struct ddcb_queue *queue = req->queue;
+ struct ddcb *pddcb = &queue->ddcb_vaddr[req->num];
+
+ memcpy(&req->cmd.asv[0], &pddcb->asv[0], DDCB_ASV_LENGTH);
+
+ /* copy status flags of the variant part */
+ req->cmd.vcrc = be16_to_cpu(pddcb->vcrc_16);
+ req->cmd.deque_ts = be64_to_cpu(pddcb->deque_ts_64);
+ req->cmd.cmplt_ts = be64_to_cpu(pddcb->cmplt_ts_64);
+
+ req->cmd.attn = be16_to_cpu(pddcb->attn_16);
+ req->cmd.progress = be32_to_cpu(pddcb->progress_32);
+ req->cmd.retc = be16_to_cpu(pddcb->retc_16);
+
+ if (ddcb_requ_collect_debug_data(req)) {
+ int prev_no = (ddcb_no == 0) ?
+ queue->ddcb_max - 1 : ddcb_no - 1;
+ struct ddcb *prev_pddcb = &queue->ddcb_vaddr[prev_no];
+
+ memcpy(&req->debug_data.ddcb_finished, pddcb,
+ sizeof(req->debug_data.ddcb_finished));
+ memcpy(&req->debug_data.ddcb_prev, prev_pddcb,
+ sizeof(req->debug_data.ddcb_prev));
+ }
+}
+
+/**
+ * genwqe_check_ddcb_queue() - Checks DDCB queue for completed work equests.
+ * @cd: pointer to genwqe device descriptor
+ *
+ * Return: Number of DDCBs which were finished
+ */
+static int genwqe_check_ddcb_queue(struct genwqe_dev *cd,
+ struct ddcb_queue *queue)
+{
+ unsigned long flags;
+ int ddcbs_finished = 0;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ spin_lock_irqsave(&queue->ddcb_lock, flags);
+
+ /* FIXME avoid soft locking CPU */
+ while (!queue_empty(queue) && (ddcbs_finished < queue->ddcb_max)) {
+
+ struct ddcb *pddcb;
+ struct ddcb_requ *req;
+ u16 vcrc, vcrc_16, retc_16;
+
+ pddcb = &queue->ddcb_vaddr[queue->ddcb_act];
+
+ if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) ==
+ 0x00000000)
+ goto go_home; /* not completed, continue waiting */
+
+ /* Note: DDCB could be purged */
+
+ req = queue->ddcb_req[queue->ddcb_act];
+ if (req == NULL) {
+ /* this occurs if DDCB is purged, not an error */
+ /* Move active DDCB further; Nothing to do anymore. */
+ goto pick_next_one;
+ }
+
+ /*
+ * HSI=0x44 (fetched and completed), but RETC is
+ * 0x101, or even worse 0x000.
+ *
+ * In case of seeing the queue in inconsistent state
+ * we read the errcnts and the queue status to provide
+ * a trigger for our PCIe analyzer stop capturing.
+ */
+ retc_16 = be16_to_cpu(pddcb->retc_16);
+ if ((pddcb->hsi == 0x44) && (retc_16 <= 0x101)) {
+ u64 errcnts, status;
+ u64 ddcb_offs = (u64)pddcb - (u64)queue->ddcb_vaddr;
+
+ errcnts = __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS);
+ status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS);
+
+ dev_err(&pci_dev->dev,
+ "[%s] SEQN=%04x HSI=%02x RETC=%03x "
+ " Q_ERRCNTS=%016llx Q_STATUS=%016llx\n"
+ " DDCB_DMA_ADDR=%016llx\n",
+ __func__, be16_to_cpu(pddcb->seqnum_16),
+ pddcb->hsi, retc_16, errcnts, status,
+ queue->ddcb_daddr + ddcb_offs);
+ }
+
+ copy_ddcb_results(req, queue->ddcb_act);
+ queue->ddcb_req[queue->ddcb_act] = NULL; /* take from queue */
+
+ dev_dbg(&pci_dev->dev, "FINISHED DDCB#%d\n", req->num);
+ genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
+
+ ddcb_mark_finished(pddcb);
+
+ /* calculate CRC_16 to see if VCRC is correct */
+ vcrc = genwqe_crc16(pddcb->asv,
+ VCRC_LENGTH(req->cmd.asv_length),
+ 0xffff);
+ vcrc_16 = be16_to_cpu(pddcb->vcrc_16);
+ if (vcrc != vcrc_16) {
+ printk_ratelimited(KERN_ERR
+ "%s %s: err: wrong VCRC pre=%02x vcrc_len=%d "
+ "bytes vcrc_data=%04x is not vcrc_card=%04x\n",
+ GENWQE_DEVNAME, dev_name(&pci_dev->dev),
+ pddcb->pre, VCRC_LENGTH(req->cmd.asv_length),
+ vcrc, vcrc_16);
+ }
+
+ ddcb_requ_set_state(req, GENWQE_REQU_FINISHED);
+ queue->ddcbs_completed++;
+ queue->ddcbs_in_flight--;
+
+ /* wake up process waiting for this DDCB */
+ wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]);
+
+pick_next_one:
+ queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max;
+ ddcbs_finished++;
+ }
+
+ go_home:
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+ return ddcbs_finished;
+}
+
+/**
+ * __genwqe_wait_ddcb(): Waits until DDCB is completed
+ * @cd: pointer to genwqe device descriptor
+ * @req: pointer to requsted DDCB parameters
+ *
+ * The Service Layer will update the RETC in DDCB when processing is
+ * pending or done.
+ *
+ * Return: > 0 remaining jiffies, DDCB completed
+ * -ETIMEDOUT when timeout
+ * -ERESTARTSYS when ^C
+ * -EINVAL when unknown error condition
+ *
+ * When an error is returned the called needs to ensure that
+ * purge_ddcb() is being called to get the &req removed from the
+ * queue.
+ */
+int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
+{
+ int rc;
+ unsigned int ddcb_no;
+ struct ddcb_queue *queue;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (req == NULL)
+ return -EINVAL;
+
+ queue = req->queue;
+ if (queue == NULL)
+ return -EINVAL;
+
+ ddcb_no = req->num;
+ if (ddcb_no >= queue->ddcb_max)
+ return -EINVAL;
+
+ rc = wait_event_interruptible_timeout(queue->ddcb_waitqs[ddcb_no],
+ ddcb_requ_finished(cd, req),
+ genwqe_ddcb_software_timeout * HZ);
+
+ /*
+ * We need to distinguish 3 cases here:
+ * 1. rc == 0 timeout occured
+ * 2. rc == -ERESTARTSYS signal received
+ * 3. rc > 0 remaining jiffies condition is true
+ */
+ if (rc == 0) {
+ struct ddcb_queue *queue = req->queue;
+ struct ddcb *pddcb;
+
+ /*
+ * Timeout may be caused by long task switching time.
+ * When timeout happens, check if the request has
+ * meanwhile completed.
+ */
+ genwqe_check_ddcb_queue(cd, req->queue);
+ if (ddcb_requ_finished(cd, req))
+ return rc;
+
+ dev_err(&pci_dev->dev,
+ "[%s] err: DDCB#%d timeout rc=%d state=%d req @ %p\n",
+ __func__, req->num, rc, ddcb_requ_get_state(req),
+ req);
+ dev_err(&pci_dev->dev,
+ "[%s] IO_QUEUE_STATUS=0x%016llx\n", __func__,
+ __genwqe_readq(cd, queue->IO_QUEUE_STATUS));
+
+ pddcb = &queue->ddcb_vaddr[req->num];
+ genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
+
+ print_ddcb_info(cd, req->queue);
+ return -ETIMEDOUT;
+
+ } else if (rc == -ERESTARTSYS) {
+ return rc;
+ /*
+ * EINTR: Stops the application
+ * ERESTARTSYS: Restartable systemcall; called again
+ */
+
+ } else if (rc < 0) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: DDCB#%d unknown result (rc=%d) %d!\n",
+ __func__, req->num, rc, ddcb_requ_get_state(req));
+ return -EINVAL;
+ }
+
+ /* Severe error occured. Driver is forced to stop operation */
+ if (cd->card_state != GENWQE_CARD_USED) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: DDCB#%d forced to stop (rc=%d)\n",
+ __func__, req->num, rc);
+ return -EIO;
+ }
+ return rc;
+}
+
+/**
+ * get_next_ddcb() - Get next available DDCB
+ * @cd: pointer to genwqe device descriptor
+ *
+ * DDCB's content is completely cleared but presets for PRE and
+ * SEQNUM. This function must only be called when ddcb_lock is held.
+ *
+ * Return: NULL if no empty DDCB available otherwise ptr to next DDCB.
+ */
+static struct ddcb *get_next_ddcb(struct genwqe_dev *cd,
+ struct ddcb_queue *queue,
+ int *num)
+{
+ u64 *pu64;
+ struct ddcb *pddcb;
+
+ if (queue_free_ddcbs(queue) == 0) /* queue is full */
+ return NULL;
+
+ /* find new ddcb */
+ pddcb = &queue->ddcb_vaddr[queue->ddcb_next];
+
+ /* if it is not completed, we are not allowed to use it */
+ /* barrier(); */
+ if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) == 0x00000000)
+ return NULL;
+
+ *num = queue->ddcb_next; /* internal DDCB number */
+ queue->ddcb_next = (queue->ddcb_next + 1) % queue->ddcb_max;
+
+ /* clear important DDCB fields */
+ pu64 = (u64 *)pddcb;
+ pu64[0] = 0ULL; /* offs 0x00 (ICRC,HSI,SHI,...) */
+ pu64[1] = 0ULL; /* offs 0x01 (ACFUNC,CMD...) */
+
+ /* destroy previous results in ASV */
+ pu64[0x80/8] = 0ULL; /* offs 0x80 (ASV + 0) */
+ pu64[0x88/8] = 0ULL; /* offs 0x88 (ASV + 0x08) */
+ pu64[0x90/8] = 0ULL; /* offs 0x90 (ASV + 0x10) */
+ pu64[0x98/8] = 0ULL; /* offs 0x98 (ASV + 0x18) */
+ pu64[0xd0/8] = 0ULL; /* offs 0xd0 (RETC,ATTN...) */
+
+ pddcb->pre = DDCB_PRESET_PRE; /* 128 */
+ pddcb->seqnum_16 = cpu_to_be16(queue->ddcb_seq++);
+ return pddcb;
+}
+
+/**
+ * __genwqe_purge_ddcb() - Remove a DDCB from the workqueue
+ * @cd: genwqe device descriptor
+ * @req: DDCB request
+ *
+ * This will fail when the request was already FETCHED. In this case
+ * we need to wait until it is finished. Else the DDCB can be
+ * reused. This function also ensures that the request data structure
+ * is removed from ddcb_req[].
+ *
+ * Do not forget to call this function when genwqe_wait_ddcb() fails,
+ * such that the request gets really removed from ddcb_req[].
+ *
+ * Return: 0 success
+ */
+int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
+{
+ struct ddcb *pddcb = NULL;
+ unsigned int t;
+ unsigned long flags;
+ struct ddcb_queue *queue = req->queue;
+ struct pci_dev *pci_dev = cd->pci_dev;
+ u64 queue_status;
+ __be32 icrc_hsi_shi = 0x0000;
+ __be32 old, new;
+
+ /* unsigned long flags; */
+ if (genwqe_ddcb_software_timeout <= 0) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: software timeout is not set!\n", __func__);
+ return -EFAULT;
+ }
+
+ pddcb = &queue->ddcb_vaddr[req->num];
+
+ for (t = 0; t < genwqe_ddcb_software_timeout * 10; t++) {
+
+ spin_lock_irqsave(&queue->ddcb_lock, flags);
+
+ /* Check if req was meanwhile finished */
+ if (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED)
+ goto go_home;
+
+ /* try to set PURGE bit if FETCHED/COMPLETED are not set */
+ old = pddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */
+ if ((old & DDCB_FETCHED_BE32) == 0x00000000) {
+
+ new = (old | DDCB_PURGE_BE32);
+ icrc_hsi_shi = cmpxchg(&pddcb->icrc_hsi_shi_32,
+ old, new);
+ if (icrc_hsi_shi == old)
+ goto finish_ddcb;
+ }
+
+ /* normal finish with HSI bit */
+ barrier();
+ icrc_hsi_shi = pddcb->icrc_hsi_shi_32;
+ if (icrc_hsi_shi & DDCB_COMPLETED_BE32)
+ goto finish_ddcb;
+
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+
+ /*
+ * Here the check_ddcb() function will most likely
+ * discover this DDCB to be finished some point in
+ * time. It will mark the req finished and free it up
+ * in the list.
+ */
+
+ copy_ddcb_results(req, req->num); /* for the failing case */
+ msleep(100); /* sleep for 1/10 second and try again */
+ continue;
+
+finish_ddcb:
+ copy_ddcb_results(req, req->num);
+ ddcb_requ_set_state(req, GENWQE_REQU_FINISHED);
+ queue->ddcbs_in_flight--;
+ queue->ddcb_req[req->num] = NULL; /* delete from array */
+ ddcb_mark_cleared(pddcb);
+
+ /* Move active DDCB further; Nothing to do here anymore. */
+
+ /*
+ * We need to ensure that there is at least one free
+ * DDCB in the queue. To do that, we must update
+ * ddcb_act only if the COMPLETED bit is set for the
+ * DDCB we are working on else we treat that DDCB even
+ * if we PURGED it as occupied (hardware is supposed
+ * to set the COMPLETED bit yet!).
+ */
+ icrc_hsi_shi = pddcb->icrc_hsi_shi_32;
+ if ((icrc_hsi_shi & DDCB_COMPLETED_BE32) &&
+ (queue->ddcb_act == req->num)) {
+ queue->ddcb_act = ((queue->ddcb_act + 1) %
+ queue->ddcb_max);
+ }
+go_home:
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+ return 0;
+ }
+
+ /*
+ * If the card is dead and the queue is forced to stop, we
+ * might see this in the queue status register.
+ */
+ queue_status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS);
+
+ dev_dbg(&pci_dev->dev, "UN/FINISHED DDCB#%d\n", req->num);
+ genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
+
+ dev_err(&pci_dev->dev,
+ "[%s] err: DDCB#%d not purged and not completed "
+ "after %d seconds QSTAT=%016llx!!\n",
+ __func__, req->num, genwqe_ddcb_software_timeout,
+ queue_status);
+
+ print_ddcb_info(cd, req->queue);
+
+ return -EFAULT;
+}
+
+int genwqe_init_debug_data(struct genwqe_dev *cd, struct genwqe_debug_data *d)
+{
+ int len;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (d == NULL) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: invalid memory for debug data!\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ len = sizeof(d->driver_version);
+ snprintf(d->driver_version, len, "%s", DRV_VERS_STRING);
+ d->slu_unitcfg = cd->slu_unitcfg;
+ d->app_unitcfg = cd->app_unitcfg;
+ return 0;
+}
+
+/**
+ * __genwqe_enqueue_ddcb() - Enqueue a DDCB
+ * @cd: pointer to genwqe device descriptor
+ * @req: pointer to DDCB execution request
+ *
+ * Return: 0 if enqueuing succeeded
+ * -EIO if card is unusable/PCIe problems
+ * -EBUSY if enqueuing failed
+ */
+int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
+{
+ struct ddcb *pddcb;
+ unsigned long flags;
+ struct ddcb_queue *queue;
+ struct pci_dev *pci_dev = cd->pci_dev;
+ u16 icrc;
+
+ if (cd->card_state != GENWQE_CARD_USED) {
+ printk_ratelimited(KERN_ERR
+ "%s %s: [%s] Card is unusable/PCIe problem Req#%d\n",
+ GENWQE_DEVNAME, dev_name(&pci_dev->dev),
+ __func__, req->num);
+ return -EIO;
+ }
+
+ queue = req->queue = &cd->queue;
+
+ /* FIXME circumvention to improve performance when no irq is
+ * there.
+ */
+ if (genwqe_polling_enabled)
+ genwqe_check_ddcb_queue(cd, queue);
+
+ /*
+ * It must be ensured to process all DDCBs in successive
+ * order. Use a lock here in order to prevent nested DDCB
+ * enqueuing.
+ */
+ spin_lock_irqsave(&queue->ddcb_lock, flags);
+
+ pddcb = get_next_ddcb(cd, queue, &req->num); /* get ptr and num */
+ if (pddcb == NULL) {
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+ queue->busy++;
+ return -EBUSY;
+ }
+
+ if (queue->ddcb_req[req->num] != NULL) {
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+
+ dev_err(&pci_dev->dev,
+ "[%s] picked DDCB %d with req=%p still in use!!\n",
+ __func__, req->num, req);
+ return -EFAULT;
+ }
+ ddcb_requ_set_state(req, GENWQE_REQU_ENQUEUED);
+ queue->ddcb_req[req->num] = req;
+
+ pddcb->cmdopts_16 = cpu_to_be16(req->cmd.cmdopts);
+ pddcb->cmd = req->cmd.cmd;
+ pddcb->acfunc = req->cmd.acfunc; /* functional unit */
+
+ /*
+ * We know that we can get retc 0x104 with CRC error, do not
+ * stop the queue in those cases for this command. XDIR = 1
+ * does not work for old SLU versions.
+ *
+ * Last bitstream with the old XDIR behavior had SLU_ID
+ * 0x34199.
+ */
+ if ((cd->slu_unitcfg & 0xFFFF0ull) > 0x34199ull)
+ pddcb->xdir = 0x1;
+ else
+ pddcb->xdir = 0x0;
+
+
+ pddcb->psp = (((req->cmd.asiv_length / 8) << 4) |
+ ((req->cmd.asv_length / 8)));
+ pddcb->disp_ts_64 = cpu_to_be64(req->cmd.disp_ts);
+
+ /*
+ * If copying the whole DDCB_ASIV_LENGTH is impacting
+ * performance we need to change it to
+ * req->cmd.asiv_length. But simulation benefits from some
+ * non-architectured bits behind the architectured content.
+ *
+ * How much data is copied depends on the availability of the
+ * ATS field, which was introduced late. If the ATS field is
+ * supported ASIV is 8 bytes shorter than it used to be. Since
+ * the ATS field is copied too, the code should do exactly
+ * what it did before, but I wanted to make copying of the ATS
+ * field very explicit.
+ */
+ if (genwqe_get_slu_id(cd) <= 0x2) {
+ memcpy(&pddcb->__asiv[0], /* destination */
+ &req->cmd.__asiv[0], /* source */
+ DDCB_ASIV_LENGTH); /* req->cmd.asiv_length */
+ } else {
+ pddcb->n.ats_64 = cpu_to_be64(req->cmd.ats);
+ memcpy(&pddcb->n.asiv[0], /* destination */
+ &req->cmd.asiv[0], /* source */
+ DDCB_ASIV_LENGTH_ATS); /* req->cmd.asiv_length */
+ }
+
+ pddcb->icrc_hsi_shi_32 = cpu_to_be32(0x00000000); /* for crc */
+
+ /*
+ * Calculate CRC_16 for corresponding range PSP(7:4). Include
+ * empty 4 bytes prior to the data.
+ */
+ icrc = genwqe_crc16((const u8 *)pddcb,
+ ICRC_LENGTH(req->cmd.asiv_length), 0xffff);
+ pddcb->icrc_hsi_shi_32 = cpu_to_be32((u32)icrc << 16);
+
+ /* enable DDCB completion irq */
+ if (!genwqe_polling_enabled)
+ pddcb->icrc_hsi_shi_32 |= DDCB_INTR_BE32;
+
+ dev_dbg(&pci_dev->dev, "INPUT DDCB#%d\n", req->num);
+ genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
+
+ if (ddcb_requ_collect_debug_data(req)) {
+ /* use the kernel copy of debug data. copying back to
+ user buffer happens later */
+
+ genwqe_init_debug_data(cd, &req->debug_data);
+ memcpy(&req->debug_data.ddcb_before, pddcb,
+ sizeof(req->debug_data.ddcb_before));
+ }
+
+ enqueue_ddcb(cd, queue, pddcb, req->num);
+ queue->ddcbs_in_flight++;
+
+ if (queue->ddcbs_in_flight > queue->ddcbs_max_in_flight)
+ queue->ddcbs_max_in_flight = queue->ddcbs_in_flight;
+
+ ddcb_requ_set_state(req, GENWQE_REQU_TAPPED);
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+ wake_up_interruptible(&cd->queue_waitq);
+
+ return 0;
+}
+
+/**
+ * __genwqe_execute_raw_ddcb() - Setup and execute DDCB
+ * @cd: pointer to genwqe device descriptor
+ * @req: user provided DDCB request
+ */
+int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
+ struct genwqe_ddcb_cmd *cmd)
+{
+ int rc = 0;
+ struct pci_dev *pci_dev = cd->pci_dev;
+ struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
+
+ if (cmd->asiv_length > DDCB_ASIV_LENGTH) {
+ dev_err(&pci_dev->dev, "[%s] err: wrong asiv_length of %d\n",
+ __func__, cmd->asiv_length);
+ return -EINVAL;
+ }
+ if (cmd->asv_length > DDCB_ASV_LENGTH) {
+ dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n",
+ __func__, cmd->asiv_length);
+ return -EINVAL;
+ }
+ rc = __genwqe_enqueue_ddcb(cd, req);
+ if (rc != 0)
+ return rc;
+
+ rc = __genwqe_wait_ddcb(cd, req);
+ if (rc < 0) /* error or signal interrupt */
+ goto err_exit;
+
+ if (ddcb_requ_collect_debug_data(req)) {
+ if (copy_to_user((struct genwqe_debug_data __user *)
+ (unsigned long)cmd->ddata_addr,
+ &req->debug_data,
+ sizeof(struct genwqe_debug_data)))
+ return -EFAULT;
+ }
+
+ /*
+ * Higher values than 0x102 indicate completion with faults,
+ * lower values than 0x102 indicate processing faults. Note
+ * that DDCB might have been purged. E.g. Cntl+C.
+ */
+ if (cmd->retc != DDCB_RETC_COMPLETE) {
+ /* This might happen e.g. flash read, and needs to be
+ handled by the upper layer code. */
+ rc = -EBADMSG; /* not processed/error retc */
+ }
+
+ return rc;
+
+ err_exit:
+ __genwqe_purge_ddcb(cd, req);
+
+ if (ddcb_requ_collect_debug_data(req)) {
+ if (copy_to_user((struct genwqe_debug_data __user *)
+ (unsigned long)cmd->ddata_addr,
+ &req->debug_data,
+ sizeof(struct genwqe_debug_data)))
+ return -EFAULT;
+ }
+ return rc;
+}
+
+/**
+ * genwqe_next_ddcb_ready() - Figure out if the next DDCB is already finished
+ *
+ * We use this as condition for our wait-queue code.
+ */
+static int genwqe_next_ddcb_ready(struct genwqe_dev *cd)
+{
+ unsigned long flags;
+ struct ddcb *pddcb;
+ struct ddcb_queue *queue = &cd->queue;
+
+ spin_lock_irqsave(&queue->ddcb_lock, flags);
+
+ if (queue_empty(queue)) { /* emtpy queue */
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+ return 0;
+ }
+
+ pddcb = &queue->ddcb_vaddr[queue->ddcb_act];
+ if (pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) { /* ddcb ready */
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+ return 1;
+ }
+
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+ return 0;
+}
+
+/**
+ * genwqe_ddcbs_in_flight() - Check how many DDCBs are in flight
+ *
+ * Keep track on the number of DDCBs which ware currently in the
+ * queue. This is needed for statistics as well as conditon if we want
+ * to wait or better do polling in case of no interrupts available.
+ */
+int genwqe_ddcbs_in_flight(struct genwqe_dev *cd)
+{
+ unsigned long flags;
+ int ddcbs_in_flight = 0;
+ struct ddcb_queue *queue = &cd->queue;
+
+ spin_lock_irqsave(&queue->ddcb_lock, flags);
+ ddcbs_in_flight += queue->ddcbs_in_flight;
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+
+ return ddcbs_in_flight;
+}
+
+static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
+{
+ int rc, i;
+ struct ddcb *pddcb;
+ u64 val64;
+ unsigned int queue_size;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (genwqe_ddcb_max < 2)
+ return -EINVAL;
+
+ queue_size = roundup(genwqe_ddcb_max * sizeof(struct ddcb), PAGE_SIZE);
+
+ queue->ddcbs_in_flight = 0; /* statistics */
+ queue->ddcbs_max_in_flight = 0;
+ queue->ddcbs_completed = 0;
+ queue->busy = 0;
+
+ queue->ddcb_seq = 0x100; /* start sequence number */
+ queue->ddcb_max = genwqe_ddcb_max; /* module parameter */
+ queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size,
+ &queue->ddcb_daddr);
+ if (queue->ddcb_vaddr == NULL) {
+ dev_err(&pci_dev->dev,
+ "[%s] **err: could not allocate DDCB **\n", __func__);
+ return -ENOMEM;
+ }
+ memset(queue->ddcb_vaddr, 0, queue_size);
+
+ queue->ddcb_req = kzalloc(sizeof(struct ddcb_requ *) *
+ queue->ddcb_max, GFP_KERNEL);
+ if (!queue->ddcb_req) {
+ rc = -ENOMEM;
+ goto free_ddcbs;
+ }
+
+ queue->ddcb_waitqs = kzalloc(sizeof(wait_queue_head_t) *
+ queue->ddcb_max, GFP_KERNEL);
+ if (!queue->ddcb_waitqs) {
+ rc = -ENOMEM;
+ goto free_requs;
+ }
+
+ for (i = 0; i < queue->ddcb_max; i++) {
+ pddcb = &queue->ddcb_vaddr[i]; /* DDCBs */
+ pddcb->icrc_hsi_shi_32 = DDCB_COMPLETED_BE32;
+ pddcb->retc_16 = cpu_to_be16(0xfff);
+
+ queue->ddcb_req[i] = NULL; /* requests */
+ init_waitqueue_head(&queue->ddcb_waitqs[i]); /* waitqueues */
+ }
+
+ queue->ddcb_act = 0;
+ queue->ddcb_next = 0; /* queue is empty */
+
+ spin_lock_init(&queue->ddcb_lock);
+ init_waitqueue_head(&queue->ddcb_waitq);
+
+ val64 = ((u64)(queue->ddcb_max - 1) << 8); /* lastptr */
+ __genwqe_writeq(cd, queue->IO_QUEUE_CONFIG, 0x07); /* iCRC/vCRC */
+ __genwqe_writeq(cd, queue->IO_QUEUE_SEGMENT, queue->ddcb_daddr);
+ __genwqe_writeq(cd, queue->IO_QUEUE_INITSQN, queue->ddcb_seq);
+ __genwqe_writeq(cd, queue->IO_QUEUE_WRAP, val64);
+ return 0;
+
+ free_requs:
+ kfree(queue->ddcb_req);
+ queue->ddcb_req = NULL;
+ free_ddcbs:
+ __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr,
+ queue->ddcb_daddr);
+ queue->ddcb_vaddr = NULL;
+ queue->ddcb_daddr = 0ull;
+ return -ENODEV;
+
+}
+
+static int ddcb_queue_initialized(struct ddcb_queue *queue)
+{
+ return queue->ddcb_vaddr != NULL;
+}
+
+static void free_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
+{
+ unsigned int queue_size;
+
+ queue_size = roundup(queue->ddcb_max * sizeof(struct ddcb), PAGE_SIZE);
+
+ kfree(queue->ddcb_req);
+ queue->ddcb_req = NULL;
+
+ if (queue->ddcb_vaddr) {
+ __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr,
+ queue->ddcb_daddr);
+ queue->ddcb_vaddr = NULL;
+ queue->ddcb_daddr = 0ull;
+ }
+}
+
+static irqreturn_t genwqe_pf_isr(int irq, void *dev_id)
+{
+ u64 gfir;
+ struct genwqe_dev *cd = (struct genwqe_dev *)dev_id;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ /*
+ * In case of fatal FIR error the queue is stopped, such that
+ * we can safely check it without risking anything.
+ */
+ cd->irqs_processed++;
+ wake_up_interruptible(&cd->queue_waitq);
+
+ /*
+ * Checking for errors before kicking the queue might be
+ * safer, but slower for the good-case ... See above.
+ */
+ gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
+ if ((gfir & GFIR_ERR_TRIGGER) != 0x0) {
+
+ wake_up_interruptible(&cd->health_waitq);
+
+ /*
+ * By default GFIRs causes recovery actions. This
+ * count is just for debug when recovery is masked.
+ */
+ printk_ratelimited(KERN_ERR
+ "%s %s: [%s] GFIR=%016llx\n",
+ GENWQE_DEVNAME, dev_name(&pci_dev->dev),
+ __func__, gfir);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t genwqe_vf_isr(int irq, void *dev_id)
+{
+ struct genwqe_dev *cd = (struct genwqe_dev *)dev_id;
+
+ cd->irqs_processed++;
+ wake_up_interruptible(&cd->queue_waitq);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * genwqe_card_thread() - Work thread for the DDCB queue
+ *
+ * The idea is to check if there are DDCBs in processing. If there are
+ * some finished DDCBs, we process them and wakeup the
+ * requestors. Otherwise we give other processes time using
+ * cond_resched().
+ */
+static int genwqe_card_thread(void *data)
+{
+ int should_stop = 0, rc = 0;
+ struct genwqe_dev *cd = (struct genwqe_dev *)data;
+
+ while (!kthread_should_stop()) {
+
+ genwqe_check_ddcb_queue(cd, &cd->queue);
+
+ if (genwqe_polling_enabled) {
+ rc = wait_event_interruptible_timeout(
+ cd->queue_waitq,
+ genwqe_ddcbs_in_flight(cd) ||
+ (should_stop = kthread_should_stop()), 1);
+ } else {
+ rc = wait_event_interruptible_timeout(
+ cd->queue_waitq,
+ genwqe_next_ddcb_ready(cd) ||
+ (should_stop = kthread_should_stop()), HZ);
+ }
+ if (should_stop)
+ break;
+
+ /*
+ * Avoid soft lockups on heavy loads; we do not want
+ * to disable our interrupts.
+ */
+ cond_resched();
+ }
+ return 0;
+}
+
+/**
+ * genwqe_setup_service_layer() - Setup DDCB queue
+ * @cd: pointer to genwqe device descriptor
+ *
+ * Allocate DDCBs. Configure Service Layer Controller (SLC).
+ *
+ * Return: 0 success
+ */
+int genwqe_setup_service_layer(struct genwqe_dev *cd)
+{
+ int rc;
+ struct ddcb_queue *queue;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (genwqe_is_privileged(cd)) {
+ rc = genwqe_card_reset(cd);
+ if (rc < 0) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: reset failed.\n", __func__);
+ return rc;
+ }
+ genwqe_read_softreset(cd);
+ }
+
+ queue = &cd->queue;
+ queue->IO_QUEUE_CONFIG = IO_SLC_QUEUE_CONFIG;
+ queue->IO_QUEUE_STATUS = IO_SLC_QUEUE_STATUS;
+ queue->IO_QUEUE_SEGMENT = IO_SLC_QUEUE_SEGMENT;
+ queue->IO_QUEUE_INITSQN = IO_SLC_QUEUE_INITSQN;
+ queue->IO_QUEUE_OFFSET = IO_SLC_QUEUE_OFFSET;
+ queue->IO_QUEUE_WRAP = IO_SLC_QUEUE_WRAP;
+ queue->IO_QUEUE_WTIME = IO_SLC_QUEUE_WTIME;
+ queue->IO_QUEUE_ERRCNTS = IO_SLC_QUEUE_ERRCNTS;
+ queue->IO_QUEUE_LRW = IO_SLC_QUEUE_LRW;
+
+ rc = setup_ddcb_queue(cd, queue);
+ if (rc != 0) {
+ rc = -ENODEV;
+ goto err_out;
+ }
+
+ init_waitqueue_head(&cd->queue_waitq);
+ cd->card_thread = kthread_run(genwqe_card_thread, cd,
+ GENWQE_DEVNAME "%d_thread",
+ cd->card_idx);
+ if (IS_ERR(cd->card_thread)) {
+ rc = PTR_ERR(cd->card_thread);
+ cd->card_thread = NULL;
+ goto stop_free_queue;
+ }
+
+ rc = genwqe_set_interrupt_capability(cd, GENWQE_MSI_IRQS);
+ if (rc > 0)
+ rc = genwqe_set_interrupt_capability(cd, rc);
+ if (rc != 0) {
+ rc = -ENODEV;
+ goto stop_kthread;
+ }
+
+ /*
+ * We must have all wait-queues initialized when we enable the
+ * interrupts. Otherwise we might crash if we get an early
+ * irq.
+ */
+ init_waitqueue_head(&cd->health_waitq);
+
+ if (genwqe_is_privileged(cd)) {
+ rc = request_irq(pci_dev->irq, genwqe_pf_isr, IRQF_SHARED,
+ GENWQE_DEVNAME, cd);
+ } else {
+ rc = request_irq(pci_dev->irq, genwqe_vf_isr, IRQF_SHARED,
+ GENWQE_DEVNAME, cd);
+ }
+ if (rc < 0) {
+ dev_err(&pci_dev->dev, "irq %d not free.\n", pci_dev->irq);
+ goto stop_irq_cap;
+ }
+
+ cd->card_state = GENWQE_CARD_USED;
+ return 0;
+
+ stop_irq_cap:
+ genwqe_reset_interrupt_capability(cd);
+ stop_kthread:
+ kthread_stop(cd->card_thread);
+ cd->card_thread = NULL;
+ stop_free_queue:
+ free_ddcb_queue(cd, queue);
+ err_out:
+ return rc;
+}
+
+/**
+ * queue_wake_up_all() - Handles fatal error case
+ *
+ * The PCI device got unusable and we have to stop all pending
+ * requests as fast as we can. The code after this must purge the
+ * DDCBs in question and ensure that all mappings are freed.
+ */
+static int queue_wake_up_all(struct genwqe_dev *cd)
+{
+ unsigned int i;
+ unsigned long flags;
+ struct ddcb_queue *queue = &cd->queue;
+
+ spin_lock_irqsave(&queue->ddcb_lock, flags);
+
+ for (i = 0; i < queue->ddcb_max; i++)
+ wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]);
+
+ spin_unlock_irqrestore(&queue->ddcb_lock, flags);
+
+ return 0;
+}
+
+/**
+ * genwqe_finish_queue() - Remove any genwqe devices and user-interfaces
+ *
+ * Relies on the pre-condition that there are no users of the card
+ * device anymore e.g. with open file-descriptors.
+ *
+ * This function must be robust enough to be called twice.
+ */
+int genwqe_finish_queue(struct genwqe_dev *cd)
+{
+ int i, rc, in_flight;
+ int waitmax = genwqe_ddcb_software_timeout;
+ struct pci_dev *pci_dev = cd->pci_dev;
+ struct ddcb_queue *queue = &cd->queue;
+
+ if (!ddcb_queue_initialized(queue))
+ return 0;
+
+ /* Do not wipe out the error state. */
+ if (cd->card_state == GENWQE_CARD_USED)
+ cd->card_state = GENWQE_CARD_UNUSED;
+
+ /* Wake up all requests in the DDCB queue such that they
+ should be removed nicely. */
+ queue_wake_up_all(cd);
+
+ /* We must wait to get rid of the DDCBs in flight */
+ for (i = 0; i < waitmax; i++) {
+ in_flight = genwqe_ddcbs_in_flight(cd);
+
+ if (in_flight == 0)
+ break;
+
+ dev_dbg(&pci_dev->dev,
+ " DEBUG [%d/%d] waiting for queue to get empty: "
+ "%d requests!\n", i, waitmax, in_flight);
+
+ /*
+ * Severe severe error situation: The card itself has
+ * 16 DDCB queues, each queue has e.g. 32 entries,
+ * each DDBC has a hardware timeout of currently 250
+ * msec but the PFs have a hardware timeout of 8 sec
+ * ... so I take something large.
+ */
+ msleep(1000);
+ }
+ if (i == waitmax) {
+ dev_err(&pci_dev->dev, " [%s] err: queue is not empty!!\n",
+ __func__);
+ rc = -EIO;
+ }
+ return rc;
+}
+
+/**
+ * genwqe_release_service_layer() - Shutdown DDCB queue
+ * @cd: genwqe device descriptor
+ *
+ * This function must be robust enough to be called twice.
+ */
+int genwqe_release_service_layer(struct genwqe_dev *cd)
+{
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (!ddcb_queue_initialized(&cd->queue))
+ return 1;
+
+ free_irq(pci_dev->irq, cd);
+ genwqe_reset_interrupt_capability(cd);
+
+ if (cd->card_thread != NULL) {
+ kthread_stop(cd->card_thread);
+ cd->card_thread = NULL;
+ }
+
+ free_ddcb_queue(cd, &cd->queue);
+ return 0;
+}
diff --git a/drivers/misc/genwqe/card_ddcb.h b/drivers/misc/genwqe/card_ddcb.h
new file mode 100644
index 000000000000..c4f26720753e
--- /dev/null
+++ b/drivers/misc/genwqe/card_ddcb.h
@@ -0,0 +1,188 @@
+#ifndef __CARD_DDCB_H__
+#define __CARD_DDCB_H__
+
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+#include "genwqe_driver.h"
+#include "card_base.h"
+
+/**
+ * struct ddcb - Device Driver Control Block DDCB
+ * @hsi: Hardware software interlock
+ * @shi: Software hardware interlock. Hsi and shi are used to interlock
+ * software and hardware activities. We are using a compare and
+ * swap operation to ensure that there are no races when
+ * activating new DDCBs on the queue, or when we need to
+ * purge a DDCB from a running queue.
+ * @acfunc: Accelerator function addresses a unit within the chip
+ * @cmd: Command to work on
+ * @cmdopts_16: Options for the command
+ * @asiv: Input data
+ * @asv: Output data
+ *
+ * The DDCB data format is big endian. Multiple consequtive DDBCs form
+ * a DDCB queue.
+ */
+#define ASIV_LENGTH 104 /* Old specification without ATS field */
+#define ASIV_LENGTH_ATS 96 /* New specification with ATS field */
+#define ASV_LENGTH 64
+
+struct ddcb {
+ union {
+ __be32 icrc_hsi_shi_32; /* iCRC, Hardware/SW interlock */
+ struct {
+ __be16 icrc_16;
+ u8 hsi;
+ u8 shi;
+ };
+ };
+ u8 pre; /* Preamble */
+ u8 xdir; /* Execution Directives */
+ __be16 seqnum_16; /* Sequence Number */
+
+ u8 acfunc; /* Accelerator Function.. */
+ u8 cmd; /* Command. */
+ __be16 cmdopts_16; /* Command Options */
+ u8 sur; /* Status Update Rate */
+ u8 psp; /* Protection Section Pointer */
+ __be16 rsvd_0e_16; /* Reserved invariant */
+
+ __be64 fwiv_64; /* Firmware Invariant. */
+
+ union {
+ struct {
+ __be64 ats_64; /* Address Translation Spec */
+ u8 asiv[ASIV_LENGTH_ATS]; /* New ASIV */
+ } n;
+ u8 __asiv[ASIV_LENGTH]; /* obsolete */
+ };
+ u8 asv[ASV_LENGTH]; /* Appl Spec Variant */
+
+ __be16 rsvd_c0_16; /* Reserved Variant */
+ __be16 vcrc_16; /* Variant CRC */
+ __be32 rsvd_32; /* Reserved unprotected */
+
+ __be64 deque_ts_64; /* Deque Time Stamp. */
+
+ __be16 retc_16; /* Return Code */
+ __be16 attn_16; /* Attention/Extended Error Codes */
+ __be32 progress_32; /* Progress indicator. */
+
+ __be64 cmplt_ts_64; /* Completion Time Stamp. */
+
+ /* The following layout matches the new service layer format */
+ __be32 ibdc_32; /* Inbound Data Count (* 256) */
+ __be32 obdc_32; /* Outbound Data Count (* 256) */
+
+ __be64 rsvd_SLH_64; /* Reserved for hardware */
+ union { /* private data for driver */
+ u8 priv[8];
+ __be64 priv_64;
+ };
+ __be64 disp_ts_64; /* Dispatch TimeStamp */
+} __attribute__((__packed__));
+
+/* CRC polynomials for DDCB */
+#define CRC16_POLYNOMIAL 0x1021
+
+/*
+ * SHI: Software to Hardware Interlock
+ * This 1 byte field is written by software to interlock the
+ * movement of one queue entry to another with the hardware in the
+ * chip.
+ */
+#define DDCB_SHI_INTR 0x04 /* Bit 2 */
+#define DDCB_SHI_PURGE 0x02 /* Bit 1 */
+#define DDCB_SHI_NEXT 0x01 /* Bit 0 */
+
+/*
+ * HSI: Hardware to Software interlock
+ * This 1 byte field is written by hardware to interlock the movement
+ * of one queue entry to another with the software in the chip.
+ */
+#define DDCB_HSI_COMPLETED 0x40 /* Bit 6 */
+#define DDCB_HSI_FETCHED 0x04 /* Bit 2 */
+
+/*
+ * Accessing HSI/SHI is done 32-bit wide
+ * Normally 16-bit access would work too, but on some platforms the
+ * 16 compare and swap operation is not supported. Therefore
+ * switching to 32-bit such that those platforms will work too.
+ *
+ * iCRC HSI/SHI
+ */
+#define DDCB_INTR_BE32 cpu_to_be32(0x00000004)
+#define DDCB_PURGE_BE32 cpu_to_be32(0x00000002)
+#define DDCB_NEXT_BE32 cpu_to_be32(0x00000001)
+#define DDCB_COMPLETED_BE32 cpu_to_be32(0x00004000)
+#define DDCB_FETCHED_BE32 cpu_to_be32(0x00000400)
+
+/* Definitions of DDCB presets */
+#define DDCB_PRESET_PRE 0x80
+#define ICRC_LENGTH(n) ((n) + 8 + 8 + 8) /* used ASIV + hdr fields */
+#define VCRC_LENGTH(n) ((n)) /* used ASV */
+
+/*
+ * Genwqe Scatter Gather list
+ * Each element has up to 8 entries.
+ * The chaining element is element 0 cause of prefetching needs.
+ */
+
+/*
+ * 0b0110 Chained descriptor. The descriptor is describing the next
+ * descriptor list.
+ */
+#define SG_CHAINED (0x6)
+
+/*
+ * 0b0010 First entry of a descriptor list. Start from a Buffer-Empty
+ * condition.
+ */
+#define SG_DATA (0x2)
+
+/*
+ * 0b0000 Early terminator. This is the last entry on the list
+ * irregardless of the length indicated.
+ */
+#define SG_END_LIST (0x0)
+
+/**
+ * struct sglist - Scatter gather list
+ * @target_addr: Either a dma addr of memory to work on or a
+ * dma addr or a subsequent sglist block.
+ * @len: Length of the data block.
+ * @flags: See above.
+ *
+ * Depending on the command the GenWQE card can use a scatter gather
+ * list to describe the memory it works on. Always 8 sg_entry's form
+ * a block.
+ */
+struct sg_entry {
+ __be64 target_addr;
+ __be32 len;
+ __be32 flags;
+};
+
+#endif /* __CARD_DDCB_H__ */
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
new file mode 100644
index 000000000000..3bfdc07a7248
--- /dev/null
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -0,0 +1,500 @@
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Debugfs interfaces for the GenWQE card. Help to debug potential
+ * problems. Dump internal chip state for debugging and failure
+ * determination.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#include "card_base.h"
+#include "card_ddcb.h"
+
+#define GENWQE_DEBUGFS_RO(_name, _showfn) \
+ static int genwqe_debugfs_##_name##_open(struct inode *inode, \
+ struct file *file) \
+ { \
+ return single_open(file, _showfn, inode->i_private); \
+ } \
+ static const struct file_operations genwqe_##_name##_fops = { \
+ .open = genwqe_debugfs_##_name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ }
+
+static void dbg_uidn_show(struct seq_file *s, struct genwqe_reg *regs,
+ int entries)
+{
+ unsigned int i;
+ u32 v_hi, v_lo;
+
+ for (i = 0; i < entries; i++) {
+ v_hi = (regs[i].val >> 32) & 0xffffffff;
+ v_lo = (regs[i].val) & 0xffffffff;
+
+ seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x EXT_ERR_REC\n",
+ regs[i].addr, regs[i].idx, v_hi, v_lo);
+ }
+}
+
+static int curr_dbg_uidn_show(struct seq_file *s, void *unused, int uid)
+{
+ struct genwqe_dev *cd = s->private;
+ int entries;
+ struct genwqe_reg *regs;
+
+ entries = genwqe_ffdc_buff_size(cd, uid);
+ if (entries < 0)
+ return -EINVAL;
+
+ if (entries == 0)
+ return 0;
+
+ regs = kcalloc(entries, sizeof(*regs), GFP_KERNEL);
+ if (regs == NULL)
+ return -ENOMEM;
+
+ genwqe_stop_traps(cd); /* halt the traps while dumping data */
+ genwqe_ffdc_buff_read(cd, uid, regs, entries);
+ genwqe_start_traps(cd);
+
+ dbg_uidn_show(s, regs, entries);
+ kfree(regs);
+ return 0;
+}
+
+static int genwqe_curr_dbg_uid0_show(struct seq_file *s, void *unused)
+{
+ return curr_dbg_uidn_show(s, unused, 0);
+}
+
+GENWQE_DEBUGFS_RO(curr_dbg_uid0, genwqe_curr_dbg_uid0_show);
+
+static int genwqe_curr_dbg_uid1_show(struct seq_file *s, void *unused)
+{
+ return curr_dbg_uidn_show(s, unused, 1);
+}
+
+GENWQE_DEBUGFS_RO(curr_dbg_uid1, genwqe_curr_dbg_uid1_show);
+
+static int genwqe_curr_dbg_uid2_show(struct seq_file *s, void *unused)
+{
+ return curr_dbg_uidn_show(s, unused, 2);
+}
+
+GENWQE_DEBUGFS_RO(curr_dbg_uid2, genwqe_curr_dbg_uid2_show);
+
+static int prev_dbg_uidn_show(struct seq_file *s, void *unused, int uid)
+{
+ struct genwqe_dev *cd = s->private;
+
+ dbg_uidn_show(s, cd->ffdc[uid].regs, cd->ffdc[uid].entries);
+ return 0;
+}
+
+static int genwqe_prev_dbg_uid0_show(struct seq_file *s, void *unused)
+{
+ return prev_dbg_uidn_show(s, unused, 0);
+}
+
+GENWQE_DEBUGFS_RO(prev_dbg_uid0, genwqe_prev_dbg_uid0_show);
+
+static int genwqe_prev_dbg_uid1_show(struct seq_file *s, void *unused)
+{
+ return prev_dbg_uidn_show(s, unused, 1);
+}
+
+GENWQE_DEBUGFS_RO(prev_dbg_uid1, genwqe_prev_dbg_uid1_show);
+
+static int genwqe_prev_dbg_uid2_show(struct seq_file *s, void *unused)
+{
+ return prev_dbg_uidn_show(s, unused, 2);
+}
+
+GENWQE_DEBUGFS_RO(prev_dbg_uid2, genwqe_prev_dbg_uid2_show);
+
+static int genwqe_curr_regs_show(struct seq_file *s, void *unused)
+{
+ struct genwqe_dev *cd = s->private;
+ unsigned int i;
+ struct genwqe_reg *regs;
+
+ regs = kcalloc(GENWQE_FFDC_REGS, sizeof(*regs), GFP_KERNEL);
+ if (regs == NULL)
+ return -ENOMEM;
+
+ genwqe_stop_traps(cd);
+ genwqe_read_ffdc_regs(cd, regs, GENWQE_FFDC_REGS, 1);
+ genwqe_start_traps(cd);
+
+ for (i = 0; i < GENWQE_FFDC_REGS; i++) {
+ if (regs[i].addr == 0xffffffff)
+ break; /* invalid entries */
+
+ if (regs[i].val == 0x0ull)
+ continue; /* do not print 0x0 FIRs */
+
+ seq_printf(s, " 0x%08x 0x%016llx\n",
+ regs[i].addr, regs[i].val);
+ }
+ return 0;
+}
+
+GENWQE_DEBUGFS_RO(curr_regs, genwqe_curr_regs_show);
+
+static int genwqe_prev_regs_show(struct seq_file *s, void *unused)
+{
+ struct genwqe_dev *cd = s->private;
+ unsigned int i;
+ struct genwqe_reg *regs = cd->ffdc[GENWQE_DBG_REGS].regs;
+
+ if (regs == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < GENWQE_FFDC_REGS; i++) {
+ if (regs[i].addr == 0xffffffff)
+ break; /* invalid entries */
+
+ if (regs[i].val == 0x0ull)
+ continue; /* do not print 0x0 FIRs */
+
+ seq_printf(s, " 0x%08x 0x%016llx\n",
+ regs[i].addr, regs[i].val);
+ }
+ return 0;
+}
+
+GENWQE_DEBUGFS_RO(prev_regs, genwqe_prev_regs_show);
+
+static int genwqe_jtimer_show(struct seq_file *s, void *unused)
+{
+ struct genwqe_dev *cd = s->private;
+ unsigned int vf_num;
+ u64 jtimer;
+
+ jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, 0);
+ seq_printf(s, " PF 0x%016llx %d msec\n", jtimer,
+ genwqe_pf_jobtimeout_msec);
+
+ for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) {
+ jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
+ vf_num + 1);
+ seq_printf(s, " VF%-2d 0x%016llx %d msec\n", vf_num, jtimer,
+ cd->vf_jobtimeout_msec[vf_num]);
+ }
+ return 0;
+}
+
+GENWQE_DEBUGFS_RO(jtimer, genwqe_jtimer_show);
+
+static int genwqe_queue_working_time_show(struct seq_file *s, void *unused)
+{
+ struct genwqe_dev *cd = s->private;
+ unsigned int vf_num;
+ u64 t;
+
+ t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, 0);
+ seq_printf(s, " PF 0x%016llx\n", t);
+
+ for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) {
+ t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, vf_num + 1);
+ seq_printf(s, " VF%-2d 0x%016llx\n", vf_num, t);
+ }
+ return 0;
+}
+
+GENWQE_DEBUGFS_RO(queue_working_time, genwqe_queue_working_time_show);
+
+static int genwqe_ddcb_info_show(struct seq_file *s, void *unused)
+{
+ struct genwqe_dev *cd = s->private;
+ unsigned int i;
+ struct ddcb_queue *queue;
+ struct ddcb *pddcb;
+
+ queue = &cd->queue;
+ seq_puts(s, "DDCB QUEUE:\n");
+ seq_printf(s, " ddcb_max: %d\n"
+ " ddcb_daddr: %016llx - %016llx\n"
+ " ddcb_vaddr: %016llx\n"
+ " ddcbs_in_flight: %u\n"
+ " ddcbs_max_in_flight: %u\n"
+ " ddcbs_completed: %u\n"
+ " busy: %u\n"
+ " irqs_processed: %u\n",
+ queue->ddcb_max, (long long)queue->ddcb_daddr,
+ (long long)queue->ddcb_daddr +
+ (queue->ddcb_max * DDCB_LENGTH),
+ (long long)queue->ddcb_vaddr, queue->ddcbs_in_flight,
+ queue->ddcbs_max_in_flight, queue->ddcbs_completed,
+ queue->busy, cd->irqs_processed);
+
+ /* Hardware State */
+ seq_printf(s, " 0x%08x 0x%016llx IO_QUEUE_CONFIG\n"
+ " 0x%08x 0x%016llx IO_QUEUE_STATUS\n"
+ " 0x%08x 0x%016llx IO_QUEUE_SEGMENT\n"
+ " 0x%08x 0x%016llx IO_QUEUE_INITSQN\n"
+ " 0x%08x 0x%016llx IO_QUEUE_WRAP\n"
+ " 0x%08x 0x%016llx IO_QUEUE_OFFSET\n"
+ " 0x%08x 0x%016llx IO_QUEUE_WTIME\n"
+ " 0x%08x 0x%016llx IO_QUEUE_ERRCNTS\n"
+ " 0x%08x 0x%016llx IO_QUEUE_LRW\n",
+ queue->IO_QUEUE_CONFIG,
+ __genwqe_readq(cd, queue->IO_QUEUE_CONFIG),
+ queue->IO_QUEUE_STATUS,
+ __genwqe_readq(cd, queue->IO_QUEUE_STATUS),
+ queue->IO_QUEUE_SEGMENT,
+ __genwqe_readq(cd, queue->IO_QUEUE_SEGMENT),
+ queue->IO_QUEUE_INITSQN,
+ __genwqe_readq(cd, queue->IO_QUEUE_INITSQN),
+ queue->IO_QUEUE_WRAP,
+ __genwqe_readq(cd, queue->IO_QUEUE_WRAP),
+ queue->IO_QUEUE_OFFSET,
+ __genwqe_readq(cd, queue->IO_QUEUE_OFFSET),
+ queue->IO_QUEUE_WTIME,
+ __genwqe_readq(cd, queue->IO_QUEUE_WTIME),
+ queue->IO_QUEUE_ERRCNTS,
+ __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS),
+ queue->IO_QUEUE_LRW,
+ __genwqe_readq(cd, queue->IO_QUEUE_LRW));
+
+ seq_printf(s, "DDCB list (ddcb_act=%d/ddcb_next=%d):\n",
+ queue->ddcb_act, queue->ddcb_next);
+
+ pddcb = queue->ddcb_vaddr;
+ for (i = 0; i < queue->ddcb_max; i++) {
+ seq_printf(s, " %-3d: RETC=%03x SEQ=%04x HSI/SHI=%02x/%02x ",
+ i, be16_to_cpu(pddcb->retc_16),
+ be16_to_cpu(pddcb->seqnum_16),
+ pddcb->hsi, pddcb->shi);
+ seq_printf(s, "PRIV=%06llx CMD=%02x\n",
+ be64_to_cpu(pddcb->priv_64), pddcb->cmd);
+ pddcb++;
+ }
+ return 0;
+}
+
+GENWQE_DEBUGFS_RO(ddcb_info, genwqe_ddcb_info_show);
+
+static int genwqe_info_show(struct seq_file *s, void *unused)
+{
+ struct genwqe_dev *cd = s->private;
+ u16 val16, type;
+ u64 app_id, slu_id, bitstream = -1;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ slu_id = __genwqe_readq(cd, IO_SLU_UNITCFG);
+ app_id = __genwqe_readq(cd, IO_APP_UNITCFG);
+
+ if (genwqe_is_privileged(cd))
+ bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM);
+
+ val16 = (u16)(slu_id & 0x0fLLU);
+ type = (u16)((slu_id >> 20) & 0xffLLU);
+
+ seq_printf(s, "%s driver version: %s\n"
+ " Device Name/Type: %s %s CardIdx: %d\n"
+ " SLU/APP Config : 0x%016llx/0x%016llx\n"
+ " Build Date : %u/%x/%u\n"
+ " Base Clock : %u MHz\n"
+ " Arch/SVN Release: %u/%llx\n"
+ " Bitstream : %llx\n",
+ GENWQE_DEVNAME, DRV_VERS_STRING, dev_name(&pci_dev->dev),
+ genwqe_is_privileged(cd) ?
+ "Physical" : "Virtual or no SR-IOV",
+ cd->card_idx, slu_id, app_id,
+ (u16)((slu_id >> 12) & 0x0fLLU), /* month */
+ (u16)((slu_id >> 4) & 0xffLLU), /* day */
+ (u16)((slu_id >> 16) & 0x0fLLU) + 2010, /* year */
+ genwqe_base_clock_frequency(cd),
+ (u16)((slu_id >> 32) & 0xffLLU), slu_id >> 40,
+ bitstream);
+
+ return 0;
+}
+
+GENWQE_DEBUGFS_RO(info, genwqe_info_show);
+
+int genwqe_init_debugfs(struct genwqe_dev *cd)
+{
+ struct dentry *root;
+ struct dentry *file;
+ int ret;
+ char card_name[64];
+ char name[64];
+ unsigned int i;
+
+ sprintf(card_name, "%s%u_card", GENWQE_DEVNAME, cd->card_idx);
+
+ root = debugfs_create_dir(card_name, cd->debugfs_genwqe);
+ if (!root) {
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ /* non privileged interfaces are done here */
+ file = debugfs_create_file("ddcb_info", S_IRUGO, root, cd,
+ &genwqe_ddcb_info_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("info", S_IRUGO, root, cd,
+ &genwqe_info_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_x64("err_inject", 0666, root, &cd->err_inject);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_u32("ddcb_software_timeout", 0666, root,
+ &cd->ddcb_software_timeout);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_u32("kill_timeout", 0666, root,
+ &cd->kill_timeout);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ /* privileged interfaces follow here */
+ if (!genwqe_is_privileged(cd)) {
+ cd->debugfs_root = root;
+ return 0;
+ }
+
+ file = debugfs_create_file("curr_regs", S_IRUGO, root, cd,
+ &genwqe_curr_regs_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("curr_dbg_uid0", S_IRUGO, root, cd,
+ &genwqe_curr_dbg_uid0_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("curr_dbg_uid1", S_IRUGO, root, cd,
+ &genwqe_curr_dbg_uid1_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("curr_dbg_uid2", S_IRUGO, root, cd,
+ &genwqe_curr_dbg_uid2_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("prev_regs", S_IRUGO, root, cd,
+ &genwqe_prev_regs_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("prev_dbg_uid0", S_IRUGO, root, cd,
+ &genwqe_prev_dbg_uid0_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("prev_dbg_uid1", S_IRUGO, root, cd,
+ &genwqe_prev_dbg_uid1_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("prev_dbg_uid2", S_IRUGO, root, cd,
+ &genwqe_prev_dbg_uid2_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ for (i = 0; i < GENWQE_MAX_VFS; i++) {
+ sprintf(name, "vf%d_jobtimeout_msec", i);
+
+ file = debugfs_create_u32(name, 0666, root,
+ &cd->vf_jobtimeout_msec[i]);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+ }
+
+ file = debugfs_create_file("jobtimer", S_IRUGO, root, cd,
+ &genwqe_jtimer_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("queue_working_time", S_IRUGO, root, cd,
+ &genwqe_queue_working_time_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_u32("skip_recovery", 0666, root,
+ &cd->skip_recovery);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ cd->debugfs_root = root;
+ return 0;
+err1:
+ debugfs_remove_recursive(root);
+err0:
+ return ret;
+}
+
+void genqwe_exit_debugfs(struct genwqe_dev *cd)
+{
+ debugfs_remove_recursive(cd->debugfs_root);
+}
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
new file mode 100644
index 000000000000..2c2c9cc75231
--- /dev/null
+++ b/drivers/misc/genwqe/card_dev.c
@@ -0,0 +1,1415 @@
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Character device representation of the GenWQE device. This allows
+ * user-space applications to communicate with the card.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+
+#include "card_base.h"
+#include "card_ddcb.h"
+
+static int genwqe_open_files(struct genwqe_dev *cd)
+{
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cd->file_lock, flags);
+ rc = list_empty(&cd->file_list);
+ spin_unlock_irqrestore(&cd->file_lock, flags);
+ return !rc;
+}
+
+static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
+{
+ unsigned long flags;
+
+ cfile->owner = current;
+ spin_lock_irqsave(&cd->file_lock, flags);
+ list_add(&cfile->list, &cd->file_list);
+ spin_unlock_irqrestore(&cd->file_lock, flags);
+}
+
+static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cd->file_lock, flags);
+ list_del(&cfile->list);
+ spin_unlock_irqrestore(&cd->file_lock, flags);
+
+ return 0;
+}
+
+static void genwqe_add_pin(struct genwqe_file *cfile, struct dma_mapping *m)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cfile->pin_lock, flags);
+ list_add(&m->pin_list, &cfile->pin_list);
+ spin_unlock_irqrestore(&cfile->pin_lock, flags);
+}
+
+static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cfile->pin_lock, flags);
+ list_del(&m->pin_list);
+ spin_unlock_irqrestore(&cfile->pin_lock, flags);
+
+ return 0;
+}
+
+/**
+ * genwqe_search_pin() - Search for the mapping for a userspace address
+ * @cfile: Descriptor of opened file
+ * @u_addr: User virtual address
+ * @size: Size of buffer
+ * @dma_addr: DMA address to be updated
+ *
+ * Return: Pointer to the corresponding mapping NULL if not found
+ */
+static struct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile,
+ unsigned long u_addr,
+ unsigned int size,
+ void **virt_addr)
+{
+ unsigned long flags;
+ struct dma_mapping *m;
+
+ spin_lock_irqsave(&cfile->pin_lock, flags);
+
+ list_for_each_entry(m, &cfile->pin_list, pin_list) {
+ if ((((u64)m->u_vaddr) <= (u_addr)) &&
+ (((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
+
+ if (virt_addr)
+ *virt_addr = m->k_vaddr +
+ (u_addr - (u64)m->u_vaddr);
+
+ spin_unlock_irqrestore(&cfile->pin_lock, flags);
+ return m;
+ }
+ }
+ spin_unlock_irqrestore(&cfile->pin_lock, flags);
+ return NULL;
+}
+
+static void __genwqe_add_mapping(struct genwqe_file *cfile,
+ struct dma_mapping *dma_map)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cfile->map_lock, flags);
+ list_add(&dma_map->card_list, &cfile->map_list);
+ spin_unlock_irqrestore(&cfile->map_lock, flags);
+}
+
+static void __genwqe_del_mapping(struct genwqe_file *cfile,
+ struct dma_mapping *dma_map)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cfile->map_lock, flags);
+ list_del(&dma_map->card_list);
+ spin_unlock_irqrestore(&cfile->map_lock, flags);
+}
+
+
+/**
+ * __genwqe_search_mapping() - Search for the mapping for a userspace address
+ * @cfile: descriptor of opened file
+ * @u_addr: user virtual address
+ * @size: size of buffer
+ * @dma_addr: DMA address to be updated
+ * Return: Pointer to the corresponding mapping NULL if not found
+ */
+static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile,
+ unsigned long u_addr,
+ unsigned int size,
+ dma_addr_t *dma_addr,
+ void **virt_addr)
+{
+ unsigned long flags;
+ struct dma_mapping *m;
+ struct pci_dev *pci_dev = cfile->cd->pci_dev;
+
+ spin_lock_irqsave(&cfile->map_lock, flags);
+ list_for_each_entry(m, &cfile->map_list, card_list) {
+
+ if ((((u64)m->u_vaddr) <= (u_addr)) &&
+ (((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
+
+ /* match found: current is as expected and
+ addr is in range */
+ if (dma_addr)
+ *dma_addr = m->dma_addr +
+ (u_addr - (u64)m->u_vaddr);
+
+ if (virt_addr)
+ *virt_addr = m->k_vaddr +
+ (u_addr - (u64)m->u_vaddr);
+
+ spin_unlock_irqrestore(&cfile->map_lock, flags);
+ return m;
+ }
+ }
+ spin_unlock_irqrestore(&cfile->map_lock, flags);
+
+ dev_err(&pci_dev->dev,
+ "[%s] Entry not found: u_addr=%lx, size=%x\n",
+ __func__, u_addr, size);
+
+ return NULL;
+}
+
+static void genwqe_remove_mappings(struct genwqe_file *cfile)
+{
+ int i = 0;
+ struct list_head *node, *next;
+ struct dma_mapping *dma_map;
+ struct genwqe_dev *cd = cfile->cd;
+ struct pci_dev *pci_dev = cfile->cd->pci_dev;
+
+ list_for_each_safe(node, next, &cfile->map_list) {
+ dma_map = list_entry(node, struct dma_mapping, card_list);
+
+ list_del_init(&dma_map->card_list);
+
+ /*
+ * This is really a bug, because those things should
+ * have been already tidied up.
+ *
+ * GENWQE_MAPPING_RAW should have been removed via mmunmap().
+ * GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code.
+ */
+ dev_err(&pci_dev->dev,
+ "[%s] %d. cleanup mapping: u_vaddr=%p "
+ "u_kaddr=%016lx dma_addr=%lx\n", __func__, i++,
+ dma_map->u_vaddr, (unsigned long)dma_map->k_vaddr,
+ (unsigned long)dma_map->dma_addr);
+
+ if (dma_map->type == GENWQE_MAPPING_RAW) {
+ /* we allocated this dynamically */
+ __genwqe_free_consistent(cd, dma_map->size,
+ dma_map->k_vaddr,
+ dma_map->dma_addr);
+ kfree(dma_map);
+ } else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) {
+ /* we use dma_map statically from the request */
+ genwqe_user_vunmap(cd, dma_map, NULL);
+ }
+ }
+}
+
+static void genwqe_remove_pinnings(struct genwqe_file *cfile)
+{
+ struct list_head *node, *next;
+ struct dma_mapping *dma_map;
+ struct genwqe_dev *cd = cfile->cd;
+
+ list_for_each_safe(node, next, &cfile->pin_list) {
+ dma_map = list_entry(node, struct dma_mapping, pin_list);
+
+ /*
+ * This is not a bug, because a killed processed might
+ * not call the unpin ioctl, which is supposed to free
+ * the resources.
+ *
+ * Pinnings are dymically allocated and need to be
+ * deleted.
+ */
+ list_del_init(&dma_map->pin_list);
+ genwqe_user_vunmap(cd, dma_map, NULL);
+ kfree(dma_map);
+ }
+}
+
+/**
+ * genwqe_kill_fasync() - Send signal to all processes with open GenWQE files
+ *
+ * E.g. genwqe_send_signal(cd, SIGIO);
+ */
+static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig)
+{
+ unsigned int files = 0;
+ unsigned long flags;
+ struct genwqe_file *cfile;
+
+ spin_lock_irqsave(&cd->file_lock, flags);
+ list_for_each_entry(cfile, &cd->file_list, list) {
+ if (cfile->async_queue)
+ kill_fasync(&cfile->async_queue, sig, POLL_HUP);
+ files++;
+ }
+ spin_unlock_irqrestore(&cd->file_lock, flags);
+ return files;
+}
+
+static int genwqe_force_sig(struct genwqe_dev *cd, int sig)
+{
+ unsigned int files = 0;
+ unsigned long flags;
+ struct genwqe_file *cfile;
+
+ spin_lock_irqsave(&cd->file_lock, flags);
+ list_for_each_entry(cfile, &cd->file_list, list) {
+ force_sig(sig, cfile->owner);
+ files++;
+ }
+ spin_unlock_irqrestore(&cd->file_lock, flags);
+ return files;
+}
+
+/**
+ * genwqe_open() - file open
+ * @inode: file system information
+ * @filp: file handle
+ *
+ * This function is executed whenever an application calls
+ * open("/dev/genwqe",..).
+ *
+ * Return: 0 if successful or <0 if errors
+ */
+static int genwqe_open(struct inode *inode, struct file *filp)
+{
+ struct genwqe_dev *cd;
+ struct genwqe_file *cfile;
+ struct pci_dev *pci_dev;
+
+ cfile = kzalloc(sizeof(*cfile), GFP_KERNEL);
+ if (cfile == NULL)
+ return -ENOMEM;
+
+ cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe);
+ pci_dev = cd->pci_dev;
+ cfile->cd = cd;
+ cfile->filp = filp;
+ cfile->client = NULL;
+
+ spin_lock_init(&cfile->map_lock); /* list of raw memory allocations */
+ INIT_LIST_HEAD(&cfile->map_list);
+
+ spin_lock_init(&cfile->pin_lock); /* list of user pinned memory */
+ INIT_LIST_HEAD(&cfile->pin_list);
+
+ filp->private_data = cfile;
+
+ genwqe_add_file(cd, cfile);
+ return 0;
+}
+
+/**
+ * genwqe_fasync() - Setup process to receive SIGIO.
+ * @fd: file descriptor
+ * @filp: file handle
+ * @mode: file mode
+ *
+ * Sending a signal is working as following:
+ *
+ * if (cdev->async_queue)
+ * kill_fasync(&cdev->async_queue, SIGIO, POLL_IN);
+ *
+ * Some devices also implement asynchronous notification to indicate
+ * when the device can be written; in this case, of course,
+ * kill_fasync must be called with a mode of POLL_OUT.
+ */
+static int genwqe_fasync(int fd, struct file *filp, int mode)
+{
+ struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data;
+ return fasync_helper(fd, filp, mode, &cdev->async_queue);
+}
+
+
+/**
+ * genwqe_release() - file close
+ * @inode: file system information
+ * @filp: file handle
+ *
+ * This function is executed whenever an application calls 'close(fd_genwqe)'
+ *
+ * Return: always 0
+ */
+static int genwqe_release(struct inode *inode, struct file *filp)
+{
+ struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
+ struct genwqe_dev *cd = cfile->cd;
+
+ /* there must be no entries in these lists! */
+ genwqe_remove_mappings(cfile);
+ genwqe_remove_pinnings(cfile);
+
+ /* remove this filp from the asynchronously notified filp's */
+ genwqe_fasync(-1, filp, 0);
+
+ /*
+ * For this to work we must not release cd when this cfile is
+ * not yet released, otherwise the list entry is invalid,
+ * because the list itself gets reinstantiated!
+ */
+ genwqe_del_file(cd, cfile);
+ kfree(cfile);
+ return 0;
+}
+
+static void genwqe_vma_open(struct vm_area_struct *vma)
+{
+ /* nothing ... */
+}
+
+/**
+ * genwqe_vma_close() - Called each time when vma is unmapped
+ *
+ * Free memory which got allocated by GenWQE mmap().
+ */
+static void genwqe_vma_close(struct vm_area_struct *vma)
+{
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ struct inode *inode = vma->vm_file->f_dentry->d_inode;
+ struct dma_mapping *dma_map;
+ struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev,
+ cdev_genwqe);
+ struct pci_dev *pci_dev = cd->pci_dev;
+ dma_addr_t d_addr = 0;
+ struct genwqe_file *cfile = vma->vm_private_data;
+
+ dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize,
+ &d_addr, NULL);
+ if (dma_map == NULL) {
+ dev_err(&pci_dev->dev,
+ " [%s] err: mapping not found: v=%lx, p=%lx s=%lx\n",
+ __func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT,
+ vsize);
+ return;
+ }
+ __genwqe_del_mapping(cfile, dma_map);
+ __genwqe_free_consistent(cd, dma_map->size, dma_map->k_vaddr,
+ dma_map->dma_addr);
+ kfree(dma_map);
+}
+
+static struct vm_operations_struct genwqe_vma_ops = {
+ .open = genwqe_vma_open,
+ .close = genwqe_vma_close,
+};
+
+/**
+ * genwqe_mmap() - Provide contignous buffers to userspace
+ *
+ * We use mmap() to allocate contignous buffers used for DMA
+ * transfers. After the buffer is allocated we remap it to user-space
+ * and remember a reference to our dma_mapping data structure, where
+ * we store the associated DMA address and allocated size.
+ *
+ * When we receive a DDCB execution request with the ATS bits set to
+ * plain buffer, we lookup our dma_mapping list to find the
+ * corresponding DMA address for the associated user-space address.
+ */
+static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int rc;
+ unsigned long pfn, vsize = vma->vm_end - vma->vm_start;
+ struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
+ struct genwqe_dev *cd = cfile->cd;
+ struct dma_mapping *dma_map;
+
+ if (vsize == 0)
+ return -EINVAL;
+
+ if (get_order(vsize) > MAX_ORDER)
+ return -ENOMEM;
+
+ dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC);
+ if (dma_map == NULL)
+ return -ENOMEM;
+
+ genwqe_mapping_init(dma_map, GENWQE_MAPPING_RAW);
+ dma_map->u_vaddr = (void *)vma->vm_start;
+ dma_map->size = vsize;
+ dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE);
+ dma_map->k_vaddr = __genwqe_alloc_consistent(cd, vsize,
+ &dma_map->dma_addr);
+ if (dma_map->k_vaddr == NULL) {
+ rc = -ENOMEM;
+ goto free_dma_map;
+ }
+
+ if (capable(CAP_SYS_ADMIN) && (vsize > sizeof(dma_addr_t)))
+ *(dma_addr_t *)dma_map->k_vaddr = dma_map->dma_addr;
+
+ pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT;
+ rc = remap_pfn_range(vma,
+ vma->vm_start,
+ pfn,
+ vsize,
+ vma->vm_page_prot);
+ if (rc != 0) {
+ rc = -EFAULT;
+ goto free_dma_mem;
+ }
+
+ vma->vm_private_data = cfile;
+ vma->vm_ops = &genwqe_vma_ops;
+ __genwqe_add_mapping(cfile, dma_map);
+
+ return 0;
+
+ free_dma_mem:
+ __genwqe_free_consistent(cd, dma_map->size,
+ dma_map->k_vaddr,
+ dma_map->dma_addr);
+ free_dma_map:
+ kfree(dma_map);
+ return rc;
+}
+
+/**
+ * do_flash_update() - Excute flash update (write image or CVPD)
+ * @cd: genwqe device
+ * @load: details about image load
+ *
+ * Return: 0 if successful
+ */
+
+#define FLASH_BLOCK 0x40000 /* we use 256k blocks */
+
+static int do_flash_update(struct genwqe_file *cfile,
+ struct genwqe_bitstream *load)
+{
+ int rc = 0;
+ int blocks_to_flash;
+ dma_addr_t dma_addr;
+ u64 flash = 0;
+ size_t tocopy = 0;
+ u8 __user *buf;
+ u8 *xbuf;
+ u32 crc;
+ u8 cmdopts;
+ struct genwqe_dev *cd = cfile->cd;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if ((load->size & 0x3) != 0)
+ return -EINVAL;
+
+ if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
+ return -EINVAL;
+
+ /* FIXME Bits have changed for new service layer! */
+ switch ((char)load->partition) {
+ case '0':
+ cmdopts = 0x14;
+ break; /* download/erase_first/part_0 */
+ case '1':
+ cmdopts = 0x1C;
+ break; /* download/erase_first/part_1 */
+ case 'v': /* cmdopts = 0x0c (VPD) */
+ default:
+ return -EINVAL;
+ }
+
+ buf = (u8 __user *)load->data_addr;
+ xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
+ if (xbuf == NULL)
+ return -ENOMEM;
+
+ blocks_to_flash = load->size / FLASH_BLOCK;
+ while (load->size) {
+ struct genwqe_ddcb_cmd *req;
+
+ /*
+ * We must be 4 byte aligned. Buffer must be 0 appened
+ * to have defined values when calculating CRC.
+ */
+ tocopy = min_t(size_t, load->size, FLASH_BLOCK);
+
+ rc = copy_from_user(xbuf, buf, tocopy);
+ if (rc) {
+ rc = -EFAULT;
+ goto free_buffer;
+ }
+ crc = genwqe_crc32(xbuf, tocopy, 0xffffffff);
+
+ dev_dbg(&pci_dev->dev,
+ "[%s] DMA: %lx CRC: %08x SZ: %ld %d\n",
+ __func__, (unsigned long)dma_addr, crc, tocopy,
+ blocks_to_flash);
+
+ /* prepare DDCB for SLU process */
+ req = ddcb_requ_alloc();
+ if (req == NULL) {
+ rc = -ENOMEM;
+ goto free_buffer;
+ }
+
+ req->cmd = SLCMD_MOVE_FLASH;
+ req->cmdopts = cmdopts;
+
+ /* prepare invariant values */
+ if (genwqe_get_slu_id(cd) <= 0x2) {
+ *(__be64 *)&req->__asiv[0] = cpu_to_be64(dma_addr);
+ *(__be64 *)&req->__asiv[8] = cpu_to_be64(tocopy);
+ *(__be64 *)&req->__asiv[16] = cpu_to_be64(flash);
+ *(__be32 *)&req->__asiv[24] = cpu_to_be32(0);
+ req->__asiv[24] = load->uid;
+ *(__be32 *)&req->__asiv[28] = cpu_to_be32(crc);
+
+ /* for simulation only */
+ *(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id);
+ *(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id);
+ req->asiv_length = 32; /* bytes included in crc calc */
+ } else { /* setup DDCB for ATS architecture */
+ *(__be64 *)&req->asiv[0] = cpu_to_be64(dma_addr);
+ *(__be32 *)&req->asiv[8] = cpu_to_be32(tocopy);
+ *(__be32 *)&req->asiv[12] = cpu_to_be32(0); /* resvd */
+ *(__be64 *)&req->asiv[16] = cpu_to_be64(flash);
+ *(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24);
+ *(__be32 *)&req->asiv[28] = cpu_to_be32(crc);
+
+ /* for simulation only */
+ *(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id);
+ *(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id);
+
+ /* Rd only */
+ req->ats = 0x4ULL << 44;
+ req->asiv_length = 40; /* bytes included in crc calc */
+ }
+ req->asv_length = 8;
+
+ /* For Genwqe5 we get back the calculated CRC */
+ *(u64 *)&req->asv[0] = 0ULL; /* 0x80 */
+
+ rc = __genwqe_execute_raw_ddcb(cd, req);
+
+ load->retc = req->retc;
+ load->attn = req->attn;
+ load->progress = req->progress;
+
+ if (rc < 0) {
+ ddcb_requ_free(req);
+ goto free_buffer;
+ }
+
+ if (req->retc != DDCB_RETC_COMPLETE) {
+ rc = -EIO;
+ ddcb_requ_free(req);
+ goto free_buffer;
+ }
+
+ load->size -= tocopy;
+ flash += tocopy;
+ buf += tocopy;
+ blocks_to_flash--;
+ ddcb_requ_free(req);
+ }
+
+ free_buffer:
+ __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
+ return rc;
+}
+
+static int do_flash_read(struct genwqe_file *cfile,
+ struct genwqe_bitstream *load)
+{
+ int rc, blocks_to_flash;
+ dma_addr_t dma_addr;
+ u64 flash = 0;
+ size_t tocopy = 0;
+ u8 __user *buf;
+ u8 *xbuf;
+ u8 cmdopts;
+ struct genwqe_dev *cd = cfile->cd;
+ struct pci_dev *pci_dev = cd->pci_dev;
+ struct genwqe_ddcb_cmd *cmd;
+
+ if ((load->size & 0x3) != 0)
+ return -EINVAL;
+
+ if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
+ return -EINVAL;
+
+ /* FIXME Bits have changed for new service layer! */
+ switch ((char)load->partition) {
+ case '0':
+ cmdopts = 0x12;
+ break; /* upload/part_0 */
+ case '1':
+ cmdopts = 0x1A;
+ break; /* upload/part_1 */
+ case 'v':
+ default:
+ return -EINVAL;
+ }
+
+ buf = (u8 __user *)load->data_addr;
+ xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
+ if (xbuf == NULL)
+ return -ENOMEM;
+
+ blocks_to_flash = load->size / FLASH_BLOCK;
+ while (load->size) {
+ /*
+ * We must be 4 byte aligned. Buffer must be 0 appened
+ * to have defined values when calculating CRC.
+ */
+ tocopy = min_t(size_t, load->size, FLASH_BLOCK);
+
+ dev_dbg(&pci_dev->dev,
+ "[%s] DMA: %lx SZ: %ld %d\n",
+ __func__, (unsigned long)dma_addr, tocopy,
+ blocks_to_flash);
+
+ /* prepare DDCB for SLU process */
+ cmd = ddcb_requ_alloc();
+ if (cmd == NULL) {
+ rc = -ENOMEM;
+ goto free_buffer;
+ }
+ cmd->cmd = SLCMD_MOVE_FLASH;
+ cmd->cmdopts = cmdopts;
+
+ /* prepare invariant values */
+ if (genwqe_get_slu_id(cd) <= 0x2) {
+ *(__be64 *)&cmd->__asiv[0] = cpu_to_be64(dma_addr);
+ *(__be64 *)&cmd->__asiv[8] = cpu_to_be64(tocopy);
+ *(__be64 *)&cmd->__asiv[16] = cpu_to_be64(flash);
+ *(__be32 *)&cmd->__asiv[24] = cpu_to_be32(0);
+ cmd->__asiv[24] = load->uid;
+ *(__be32 *)&cmd->__asiv[28] = cpu_to_be32(0) /* CRC */;
+ cmd->asiv_length = 32; /* bytes included in crc calc */
+ } else { /* setup DDCB for ATS architecture */
+ *(__be64 *)&cmd->asiv[0] = cpu_to_be64(dma_addr);
+ *(__be32 *)&cmd->asiv[8] = cpu_to_be32(tocopy);
+ *(__be32 *)&cmd->asiv[12] = cpu_to_be32(0); /* resvd */
+ *(__be64 *)&cmd->asiv[16] = cpu_to_be64(flash);
+ *(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24);
+ *(__be32 *)&cmd->asiv[28] = cpu_to_be32(0); /* CRC */
+
+ /* rd/wr */
+ cmd->ats = 0x5ULL << 44;
+ cmd->asiv_length = 40; /* bytes included in crc calc */
+ }
+ cmd->asv_length = 8;
+
+ /* we only get back the calculated CRC */
+ *(u64 *)&cmd->asv[0] = 0ULL; /* 0x80 */
+
+ rc = __genwqe_execute_raw_ddcb(cd, cmd);
+
+ load->retc = cmd->retc;
+ load->attn = cmd->attn;
+ load->progress = cmd->progress;
+
+ if ((rc < 0) && (rc != -EBADMSG)) {
+ ddcb_requ_free(cmd);
+ goto free_buffer;
+ }
+
+ rc = copy_to_user(buf, xbuf, tocopy);
+ if (rc) {
+ rc = -EFAULT;
+ ddcb_requ_free(cmd);
+ goto free_buffer;
+ }
+
+ /* We know that we can get retc 0x104 with CRC err */
+ if (((cmd->retc == DDCB_RETC_FAULT) &&
+ (cmd->attn != 0x02)) || /* Normally ignore CRC error */
+ ((cmd->retc == DDCB_RETC_COMPLETE) &&
+ (cmd->attn != 0x00))) { /* Everything was fine */
+ rc = -EIO;
+ ddcb_requ_free(cmd);
+ goto free_buffer;
+ }
+
+ load->size -= tocopy;
+ flash += tocopy;
+ buf += tocopy;
+ blocks_to_flash--;
+ ddcb_requ_free(cmd);
+ }
+ rc = 0;
+
+ free_buffer:
+ __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
+ return rc;
+}
+
+static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
+{
+ int rc;
+ struct genwqe_dev *cd = cfile->cd;
+ struct pci_dev *pci_dev = cfile->cd->pci_dev;
+ struct dma_mapping *dma_map;
+ unsigned long map_addr;
+ unsigned long map_size;
+
+ if ((m->addr == 0x0) || (m->size == 0))
+ return -EINVAL;
+
+ map_addr = (m->addr & PAGE_MASK);
+ map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
+
+ dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC);
+ if (dma_map == NULL)
+ return -ENOMEM;
+
+ genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED);
+ rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size, NULL);
+ if (rc != 0) {
+ dev_err(&pci_dev->dev,
+ "[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
+ kfree(dma_map);
+ return rc;
+ }
+
+ genwqe_add_pin(cfile, dma_map);
+ return 0;
+}
+
+static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
+{
+ struct genwqe_dev *cd = cfile->cd;
+ struct dma_mapping *dma_map;
+ unsigned long map_addr;
+ unsigned long map_size;
+
+ if (m->addr == 0x0)
+ return -EINVAL;
+
+ map_addr = (m->addr & PAGE_MASK);
+ map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
+
+ dma_map = genwqe_search_pin(cfile, map_addr, map_size, NULL);
+ if (dma_map == NULL)
+ return -ENOENT;
+
+ genwqe_del_pin(cfile, dma_map);
+ genwqe_user_vunmap(cd, dma_map, NULL);
+ kfree(dma_map);
+ return 0;
+}
+
+/**
+ * ddcb_cmd_cleanup() - Remove dynamically created fixup entries
+ *
+ * Only if there are any. Pinnings are not removed.
+ */
+static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
+{
+ unsigned int i;
+ struct dma_mapping *dma_map;
+ struct genwqe_dev *cd = cfile->cd;
+
+ for (i = 0; i < DDCB_FIXUPS; i++) {
+ dma_map = &req->dma_mappings[i];
+
+ if (dma_mapping_used(dma_map)) {
+ __genwqe_del_mapping(cfile, dma_map);
+ genwqe_user_vunmap(cd, dma_map, req);
+ }
+ if (req->sgl[i] != NULL) {
+ genwqe_free_sgl(cd, req->sgl[i],
+ req->sgl_dma_addr[i],
+ req->sgl_size[i]);
+ req->sgl[i] = NULL;
+ req->sgl_dma_addr[i] = 0x0;
+ req->sgl_size[i] = 0;
+ }
+
+ }
+ return 0;
+}
+
+/**
+ * ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references
+ *
+ * Before the DDCB gets executed we need to handle the fixups. We
+ * replace the user-space addresses with DMA addresses or do
+ * additional setup work e.g. generating a scatter-gather list which
+ * is used to describe the memory referred to in the fixup.
+ */
+static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
+{
+ int rc;
+ unsigned int asiv_offs, i;
+ struct genwqe_dev *cd = cfile->cd;
+ struct genwqe_ddcb_cmd *cmd = &req->cmd;
+ struct dma_mapping *m;
+ const char *type = "UNKNOWN";
+
+ for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58;
+ i++, asiv_offs += 0x08) {
+
+ u64 u_addr;
+ dma_addr_t d_addr;
+ u32 u_size = 0;
+ u64 ats_flags;
+
+ ats_flags = ATS_GET_FLAGS(cmd->ats, asiv_offs);
+
+ switch (ats_flags) {
+
+ case ATS_TYPE_DATA:
+ break; /* nothing to do here */
+
+ case ATS_TYPE_FLAT_RDWR:
+ case ATS_TYPE_FLAT_RD: {
+ u_addr = be64_to_cpu(*((__be64 *)&cmd->
+ asiv[asiv_offs]));
+ u_size = be32_to_cpu(*((__be32 *)&cmd->
+ asiv[asiv_offs + 0x08]));
+
+ /*
+ * No data available. Ignore u_addr in this
+ * case and set addr to 0. Hardware must not
+ * fetch the buffer.
+ */
+ if (u_size == 0x0) {
+ *((__be64 *)&cmd->asiv[asiv_offs]) =
+ cpu_to_be64(0x0);
+ break;
+ }
+
+ m = __genwqe_search_mapping(cfile, u_addr, u_size,
+ &d_addr, NULL);
+ if (m == NULL) {
+ rc = -EFAULT;
+ goto err_out;
+ }
+
+ *((__be64 *)&cmd->asiv[asiv_offs]) =
+ cpu_to_be64(d_addr);
+ break;
+ }
+
+ case ATS_TYPE_SGL_RDWR:
+ case ATS_TYPE_SGL_RD: {
+ int page_offs, nr_pages, offs;
+
+ u_addr = be64_to_cpu(*((__be64 *)
+ &cmd->asiv[asiv_offs]));
+ u_size = be32_to_cpu(*((__be32 *)
+ &cmd->asiv[asiv_offs + 0x08]));
+
+ /*
+ * No data available. Ignore u_addr in this
+ * case and set addr to 0. Hardware must not
+ * fetch the empty sgl.
+ */
+ if (u_size == 0x0) {
+ *((__be64 *)&cmd->asiv[asiv_offs]) =
+ cpu_to_be64(0x0);
+ break;
+ }
+
+ m = genwqe_search_pin(cfile, u_addr, u_size, NULL);
+ if (m != NULL) {
+ type = "PINNING";
+ page_offs = (u_addr -
+ (u64)m->u_vaddr)/PAGE_SIZE;
+ } else {
+ type = "MAPPING";
+ m = &req->dma_mappings[i];
+
+ genwqe_mapping_init(m,
+ GENWQE_MAPPING_SGL_TEMP);
+ rc = genwqe_user_vmap(cd, m, (void *)u_addr,
+ u_size, req);
+ if (rc != 0)
+ goto err_out;
+
+ __genwqe_add_mapping(cfile, m);
+ page_offs = 0;
+ }
+
+ offs = offset_in_page(u_addr);
+ nr_pages = DIV_ROUND_UP(offs + u_size, PAGE_SIZE);
+
+ /* create genwqe style scatter gather list */
+ req->sgl[i] = genwqe_alloc_sgl(cd, m->nr_pages,
+ &req->sgl_dma_addr[i],
+ &req->sgl_size[i]);
+ if (req->sgl[i] == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ genwqe_setup_sgl(cd, offs, u_size,
+ req->sgl[i],
+ req->sgl_dma_addr[i],
+ req->sgl_size[i],
+ m->dma_list,
+ page_offs,
+ nr_pages);
+
+ *((__be64 *)&cmd->asiv[asiv_offs]) =
+ cpu_to_be64(req->sgl_dma_addr[i]);
+
+ break;
+ }
+ default:
+ rc = -EINVAL;
+ goto err_out;
+ }
+ }
+ return 0;
+
+ err_out:
+ ddcb_cmd_cleanup(cfile, req);
+ return rc;
+}
+
+/**
+ * genwqe_execute_ddcb() - Execute DDCB using userspace address fixups
+ *
+ * The code will build up the translation tables or lookup the
+ * contignous memory allocation table to find the right translations
+ * and DMA addresses.
+ */
+static int genwqe_execute_ddcb(struct genwqe_file *cfile,
+ struct genwqe_ddcb_cmd *cmd)
+{
+ int rc;
+ struct genwqe_dev *cd = cfile->cd;
+ struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
+
+ rc = ddcb_cmd_fixups(cfile, req);
+ if (rc != 0)
+ return rc;
+
+ rc = __genwqe_execute_raw_ddcb(cd, cmd);
+ ddcb_cmd_cleanup(cfile, req);
+ return rc;
+}
+
+static int do_execute_ddcb(struct genwqe_file *cfile,
+ unsigned long arg, int raw)
+{
+ int rc;
+ struct genwqe_ddcb_cmd *cmd;
+ struct ddcb_requ *req;
+ struct genwqe_dev *cd = cfile->cd;
+
+ cmd = ddcb_requ_alloc();
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ req = container_of(cmd, struct ddcb_requ, cmd);
+
+ if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) {
+ ddcb_requ_free(cmd);
+ return -EFAULT;
+ }
+
+ if (!raw)
+ rc = genwqe_execute_ddcb(cfile, cmd);
+ else
+ rc = __genwqe_execute_raw_ddcb(cd, cmd);
+
+ /* Copy back only the modifed fields. Do not copy ASIV
+ back since the copy got modified by the driver. */
+ if (copy_to_user((void __user *)arg, cmd,
+ sizeof(*cmd) - DDCB_ASIV_LENGTH)) {
+ ddcb_requ_free(cmd);
+ return -EFAULT;
+ }
+
+ ddcb_requ_free(cmd);
+ return rc;
+}
+
+/**
+ * genwqe_ioctl() - IO control
+ * @filp: file handle
+ * @cmd: command identifier (passed from user)
+ * @arg: argument (passed from user)
+ *
+ * Return: 0 success
+ */
+static long genwqe_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int rc = 0;
+ struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
+ struct genwqe_dev *cd = cfile->cd;
+ struct genwqe_reg_io __user *io;
+ u64 val;
+ u32 reg_offs;
+
+ if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE)
+ return -EINVAL;
+
+ switch (cmd) {
+
+ case GENWQE_GET_CARD_STATE:
+ put_user(cd->card_state, (enum genwqe_card_state __user *)arg);
+ return 0;
+
+ /* Register access */
+ case GENWQE_READ_REG64: {
+ io = (struct genwqe_reg_io __user *)arg;
+
+ if (get_user(reg_offs, &io->num))
+ return -EFAULT;
+
+ if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
+ return -EINVAL;
+
+ val = __genwqe_readq(cd, reg_offs);
+ put_user(val, &io->val64);
+ return 0;
+ }
+
+ case GENWQE_WRITE_REG64: {
+ io = (struct genwqe_reg_io __user *)arg;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
+ return -EPERM;
+
+ if (get_user(reg_offs, &io->num))
+ return -EFAULT;
+
+ if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
+ return -EINVAL;
+
+ if (get_user(val, &io->val64))
+ return -EFAULT;
+
+ __genwqe_writeq(cd, reg_offs, val);
+ return 0;
+ }
+
+ case GENWQE_READ_REG32: {
+ io = (struct genwqe_reg_io __user *)arg;
+
+ if (get_user(reg_offs, &io->num))
+ return -EFAULT;
+
+ if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
+ return -EINVAL;
+
+ val = __genwqe_readl(cd, reg_offs);
+ put_user(val, &io->val64);
+ return 0;
+ }
+
+ case GENWQE_WRITE_REG32: {
+ io = (struct genwqe_reg_io __user *)arg;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
+ return -EPERM;
+
+ if (get_user(reg_offs, &io->num))
+ return -EFAULT;
+
+ if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
+ return -EINVAL;
+
+ if (get_user(val, &io->val64))
+ return -EFAULT;
+
+ __genwqe_writel(cd, reg_offs, val);
+ return 0;
+ }
+
+ /* Flash update/reading */
+ case GENWQE_SLU_UPDATE: {
+ struct genwqe_bitstream load;
+
+ if (!genwqe_is_privileged(cd))
+ return -EPERM;
+
+ if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
+ return -EPERM;
+
+ if (copy_from_user(&load, (void __user *)arg,
+ sizeof(load)))
+ return -EFAULT;
+
+ rc = do_flash_update(cfile, &load);
+
+ if (copy_to_user((void __user *)arg, &load, sizeof(load)))
+ return -EFAULT;
+
+ return rc;
+ }
+
+ case GENWQE_SLU_READ: {
+ struct genwqe_bitstream load;
+
+ if (!genwqe_is_privileged(cd))
+ return -EPERM;
+
+ if (genwqe_flash_readback_fails(cd))
+ return -ENOSPC; /* known to fail for old versions */
+
+ if (copy_from_user(&load, (void __user *)arg, sizeof(load)))
+ return -EFAULT;
+
+ rc = do_flash_read(cfile, &load);
+
+ if (copy_to_user((void __user *)arg, &load, sizeof(load)))
+ return -EFAULT;
+
+ return rc;
+ }
+
+ /* memory pinning and unpinning */
+ case GENWQE_PIN_MEM: {
+ struct genwqe_mem m;
+
+ if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
+ return -EFAULT;
+
+ return genwqe_pin_mem(cfile, &m);
+ }
+
+ case GENWQE_UNPIN_MEM: {
+ struct genwqe_mem m;
+
+ if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
+ return -EFAULT;
+
+ return genwqe_unpin_mem(cfile, &m);
+ }
+
+ /* launch an DDCB and wait for completion */
+ case GENWQE_EXECUTE_DDCB:
+ return do_execute_ddcb(cfile, arg, 0);
+
+ case GENWQE_EXECUTE_RAW_DDCB: {
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return do_execute_ddcb(cfile, arg, 1);
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+#if defined(CONFIG_COMPAT)
+/**
+ * genwqe_compat_ioctl() - Compatibility ioctl
+ *
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/genwqe<n>_card.
+ *
+ * @filp: file pointer.
+ * @cmd: command.
+ * @arg: user argument.
+ * Return: zero on success or negative number on failure.
+ */
+static long genwqe_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return genwqe_ioctl(filp, cmd, arg);
+}
+#endif /* defined(CONFIG_COMPAT) */
+
+static const struct file_operations genwqe_fops = {
+ .owner = THIS_MODULE,
+ .open = genwqe_open,
+ .fasync = genwqe_fasync,
+ .mmap = genwqe_mmap,
+ .unlocked_ioctl = genwqe_ioctl,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = genwqe_compat_ioctl,
+#endif
+ .release = genwqe_release,
+};
+
+static int genwqe_device_initialized(struct genwqe_dev *cd)
+{
+ return cd->dev != NULL;
+}
+
+/**
+ * genwqe_device_create() - Create and configure genwqe char device
+ * @cd: genwqe device descriptor
+ *
+ * This function must be called before we create any more genwqe
+ * character devices, because it is allocating the major and minor
+ * number which are supposed to be used by the client drivers.
+ */
+int genwqe_device_create(struct genwqe_dev *cd)
+{
+ int rc;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ /*
+ * Here starts the individual setup per client. It must
+ * initialize its own cdev data structure with its own fops.
+ * The appropriate devnum needs to be created. The ranges must
+ * not overlap.
+ */
+ rc = alloc_chrdev_region(&cd->devnum_genwqe, 0,
+ GENWQE_MAX_MINOR, GENWQE_DEVNAME);
+ if (rc < 0) {
+ dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n");
+ goto err_dev;
+ }
+
+ cdev_init(&cd->cdev_genwqe, &genwqe_fops);
+ cd->cdev_genwqe.owner = THIS_MODULE;
+
+ rc = cdev_add(&cd->cdev_genwqe, cd->devnum_genwqe, 1);
+ if (rc < 0) {
+ dev_err(&pci_dev->dev, "err: cdev_add failed\n");
+ goto err_add;
+ }
+
+ /*
+ * Finally the device in /dev/... must be created. The rule is
+ * to use card%d_clientname for each created device.
+ */
+ cd->dev = device_create_with_groups(cd->class_genwqe,
+ &cd->pci_dev->dev,
+ cd->devnum_genwqe, cd,
+ genwqe_attribute_groups,
+ GENWQE_DEVNAME "%u_card",
+ cd->card_idx);
+ if (IS_ERR(cd->dev)) {
+ rc = PTR_ERR(cd->dev);
+ goto err_cdev;
+ }
+
+ rc = genwqe_init_debugfs(cd);
+ if (rc != 0)
+ goto err_debugfs;
+
+ return 0;
+
+ err_debugfs:
+ device_destroy(cd->class_genwqe, cd->devnum_genwqe);
+ err_cdev:
+ cdev_del(&cd->cdev_genwqe);
+ err_add:
+ unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
+ err_dev:
+ cd->dev = NULL;
+ return rc;
+}
+
+static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
+{
+ int rc;
+ unsigned int i;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (!genwqe_open_files(cd))
+ return 0;
+
+ dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__);
+
+ rc = genwqe_kill_fasync(cd, SIGIO);
+ if (rc > 0) {
+ /* give kill_timeout seconds to close file descriptors ... */
+ for (i = 0; (i < genwqe_kill_timeout) &&
+ genwqe_open_files(cd); i++) {
+ dev_info(&pci_dev->dev, " %d sec ...", i);
+
+ cond_resched();
+ msleep(1000);
+ }
+
+ /* if no open files we can safely continue, else ... */
+ if (!genwqe_open_files(cd))
+ return 0;
+
+ dev_warn(&pci_dev->dev,
+ "[%s] send SIGKILL and wait ...\n", __func__);
+
+ rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */
+ if (rc) {
+ /* Give kill_timout more seconds to end processes */
+ for (i = 0; (i < genwqe_kill_timeout) &&
+ genwqe_open_files(cd); i++) {
+ dev_warn(&pci_dev->dev, " %d sec ...", i);
+
+ cond_resched();
+ msleep(1000);
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * genwqe_device_remove() - Remove genwqe's char device
+ *
+ * This function must be called after the client devices are removed
+ * because it will free the major/minor number range for the genwqe
+ * drivers.
+ *
+ * This function must be robust enough to be called twice.
+ */
+int genwqe_device_remove(struct genwqe_dev *cd)
+{
+ int rc;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (!genwqe_device_initialized(cd))
+ return 1;
+
+ genwqe_inform_and_stop_processes(cd);
+
+ /*
+ * We currently do wait until all filedescriptors are
+ * closed. This leads to a problem when we abort the
+ * application which will decrease this reference from
+ * 1/unused to 0/illegal and not from 2/used 1/empty.
+ */
+ rc = atomic_read(&cd->cdev_genwqe.kobj.kref.refcount);
+ if (rc != 1) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc);
+ panic("Fatal err: cannot free resources with pending references!");
+ }
+
+ genqwe_exit_debugfs(cd);
+ device_destroy(cd->class_genwqe, cd->devnum_genwqe);
+ cdev_del(&cd->cdev_genwqe);
+ unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
+ cd->dev = NULL;
+
+ return 0;
+}
diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c
new file mode 100644
index 000000000000..a72a99266c3c
--- /dev/null
+++ b/drivers/misc/genwqe/card_sysfs.c
@@ -0,0 +1,288 @@
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Sysfs interfaces for the GenWQE card. There are attributes to query
+ * the version of the bitstream as well as some for the driver. For
+ * debugging, please also see the debugfs interfaces of this driver.
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/sysfs.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+
+#include "card_base.h"
+#include "card_ddcb.h"
+
+static const char * const genwqe_types[] = {
+ [GENWQE_TYPE_ALTERA_230] = "GenWQE4-230",
+ [GENWQE_TYPE_ALTERA_530] = "GenWQE4-530",
+ [GENWQE_TYPE_ALTERA_A4] = "GenWQE5-A4",
+ [GENWQE_TYPE_ALTERA_A7] = "GenWQE5-A7",
+};
+
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+ const char *cs[GENWQE_CARD_STATE_MAX] = { "unused", "used", "error" };
+
+ return sprintf(buf, "%s\n", cs[cd->card_state]);
+}
+static DEVICE_ATTR_RO(status);
+
+static ssize_t appid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ char app_name[5];
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ genwqe_read_app_id(cd, app_name, sizeof(app_name));
+ return sprintf(buf, "%s\n", app_name);
+}
+static DEVICE_ATTR_RO(appid);
+
+static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u64 slu_id, app_id;
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ slu_id = __genwqe_readq(cd, IO_SLU_UNITCFG);
+ app_id = __genwqe_readq(cd, IO_APP_UNITCFG);
+
+ return sprintf(buf, "%016llx.%016llx\n", slu_id, app_id);
+}
+static DEVICE_ATTR_RO(version);
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u8 card_type;
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ card_type = genwqe_card_type(cd);
+ return sprintf(buf, "%s\n", (card_type >= ARRAY_SIZE(genwqe_types)) ?
+ "invalid" : genwqe_types[card_type]);
+}
+static DEVICE_ATTR_RO(type);
+
+static ssize_t driver_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", DRV_VERS_STRING);
+}
+static DEVICE_ATTR_RO(driver);
+
+static ssize_t tempsens_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u64 tempsens;
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ tempsens = __genwqe_readq(cd, IO_SLU_TEMPERATURE_SENSOR);
+ return sprintf(buf, "%016llx\n", tempsens);
+}
+static DEVICE_ATTR_RO(tempsens);
+
+static ssize_t freerunning_timer_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u64 t;
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ t = __genwqe_readq(cd, IO_SLC_FREE_RUNNING_TIMER);
+ return sprintf(buf, "%016llx\n", t);
+}
+static DEVICE_ATTR_RO(freerunning_timer);
+
+static ssize_t queue_working_time_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u64 t;
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ t = __genwqe_readq(cd, IO_SLC_QUEUE_WTIME);
+ return sprintf(buf, "%016llx\n", t);
+}
+static DEVICE_ATTR_RO(queue_working_time);
+
+static ssize_t base_clock_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u64 base_clock;
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ base_clock = genwqe_base_clock_frequency(cd);
+ return sprintf(buf, "%lld\n", base_clock);
+}
+static DEVICE_ATTR_RO(base_clock);
+
+/**
+ * curr_bitstream_show() - Show the current bitstream id
+ *
+ * There is a bug in some old versions of the CPLD which selects the
+ * bitstream, which causes the IO_SLU_BITSTREAM register to report
+ * unreliable data in very rare cases. This makes this sysfs
+ * unreliable up to the point were a new CPLD version is being used.
+ *
+ * Unfortunately there is no automatic way yet to query the CPLD
+ * version, such that you need to manually ensure via programming
+ * tools that you have a recent version of the CPLD software.
+ *
+ * The proposed circumvention is to use a special recovery bitstream
+ * on the backup partition (0) to identify problems while loading the
+ * image.
+ */
+static ssize_t curr_bitstream_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int curr_bitstream;
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ curr_bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1;
+ return sprintf(buf, "%d\n", curr_bitstream);
+}
+static DEVICE_ATTR_RO(curr_bitstream);
+
+/**
+ * next_bitstream_show() - Show the next activated bitstream
+ *
+ * IO_SLC_CFGREG_SOFTRESET: This register can only be accessed by the PF.
+ */
+static ssize_t next_bitstream_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int next_bitstream;
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ switch ((cd->softreset & 0xc) >> 2) {
+ case 0x2:
+ next_bitstream = 0;
+ break;
+ case 0x3:
+ next_bitstream = 1;
+ break;
+ default:
+ next_bitstream = -1;
+ break; /* error */
+ }
+ return sprintf(buf, "%d\n", next_bitstream);
+}
+
+static ssize_t next_bitstream_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int partition;
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+
+ if (kstrtoint(buf, 0, &partition) < 0)
+ return -EINVAL;
+
+ switch (partition) {
+ case 0x0:
+ cd->softreset = 0x78;
+ break;
+ case 0x1:
+ cd->softreset = 0x7c;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset);
+ return count;
+}
+static DEVICE_ATTR_RW(next_bitstream);
+
+/*
+ * Create device_attribute structures / params: name, mode, show, store
+ * additional flag if valid in VF
+ */
+static struct attribute *genwqe_attributes[] = {
+ &dev_attr_tempsens.attr,
+ &dev_attr_next_bitstream.attr,
+ &dev_attr_curr_bitstream.attr,
+ &dev_attr_base_clock.attr,
+ &dev_attr_driver.attr,
+ &dev_attr_type.attr,
+ &dev_attr_version.attr,
+ &dev_attr_appid.attr,
+ &dev_attr_status.attr,
+ &dev_attr_freerunning_timer.attr,
+ &dev_attr_queue_working_time.attr,
+ NULL,
+};
+
+static struct attribute *genwqe_normal_attributes[] = {
+ &dev_attr_driver.attr,
+ &dev_attr_type.attr,
+ &dev_attr_version.attr,
+ &dev_attr_appid.attr,
+ &dev_attr_status.attr,
+ &dev_attr_freerunning_timer.attr,
+ &dev_attr_queue_working_time.attr,
+ NULL,
+};
+
+/**
+ * genwqe_is_visible() - Determine if sysfs attribute should be visible or not
+ *
+ * VFs have restricted mmio capabilities, so not all sysfs entries
+ * are allowed in VFs.
+ */
+static umode_t genwqe_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ unsigned int j;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct genwqe_dev *cd = dev_get_drvdata(dev);
+ umode_t mode = attr->mode;
+
+ if (genwqe_is_privileged(cd))
+ return mode;
+
+ for (j = 0; genwqe_normal_attributes[j] != NULL; j++)
+ if (genwqe_normal_attributes[j] == attr)
+ return mode;
+
+ return 0;
+}
+
+static struct attribute_group genwqe_attribute_group = {
+ .is_visible = genwqe_is_visible,
+ .attrs = genwqe_attributes,
+};
+
+const struct attribute_group *genwqe_attribute_groups[] = {
+ &genwqe_attribute_group,
+ NULL,
+};
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
new file mode 100644
index 000000000000..6b1a6ef9f1a8
--- /dev/null
+++ b/drivers/misc/genwqe/card_utils.c
@@ -0,0 +1,944 @@
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Miscelanous functionality used in the other GenWQE driver parts.
+ */
+
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/page-flags.h>
+#include <linux/scatterlist.h>
+#include <linux/hugetlb.h>
+#include <linux/iommu.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <asm/pgtable.h>
+
+#include "genwqe_driver.h"
+#include "card_base.h"
+#include "card_ddcb.h"
+
+/**
+ * __genwqe_writeq() - Write 64-bit register
+ * @cd: genwqe device descriptor
+ * @byte_offs: byte offset within BAR
+ * @val: 64-bit value
+ *
+ * Return: 0 if success; < 0 if error
+ */
+int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val)
+{
+ if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
+ return -EIO;
+
+ if (cd->mmio == NULL)
+ return -EIO;
+
+ __raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs);
+ return 0;
+}
+
+/**
+ * __genwqe_readq() - Read 64-bit register
+ * @cd: genwqe device descriptor
+ * @byte_offs: offset within BAR
+ *
+ * Return: value from register
+ */
+u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs)
+{
+ if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
+ return 0xffffffffffffffffull;
+
+ if ((cd->err_inject & GENWQE_INJECT_GFIR_FATAL) &&
+ (byte_offs == IO_SLC_CFGREG_GFIR))
+ return 0x000000000000ffffull;
+
+ if ((cd->err_inject & GENWQE_INJECT_GFIR_INFO) &&
+ (byte_offs == IO_SLC_CFGREG_GFIR))
+ return 0x00000000ffff0000ull;
+
+ if (cd->mmio == NULL)
+ return 0xffffffffffffffffull;
+
+ return be64_to_cpu((__force __be64)__raw_readq(cd->mmio + byte_offs));
+}
+
+/**
+ * __genwqe_writel() - Write 32-bit register
+ * @cd: genwqe device descriptor
+ * @byte_offs: byte offset within BAR
+ * @val: 32-bit value
+ *
+ * Return: 0 if success; < 0 if error
+ */
+int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val)
+{
+ if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
+ return -EIO;
+
+ if (cd->mmio == NULL)
+ return -EIO;
+
+ __raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs);
+ return 0;
+}
+
+/**
+ * __genwqe_readl() - Read 32-bit register
+ * @cd: genwqe device descriptor
+ * @byte_offs: offset within BAR
+ *
+ * Return: Value from register
+ */
+u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs)
+{
+ if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
+ return 0xffffffff;
+
+ if (cd->mmio == NULL)
+ return 0xffffffff;
+
+ return be32_to_cpu((__force __be32)__raw_readl(cd->mmio + byte_offs));
+}
+
+/**
+ * genwqe_read_app_id() - Extract app_id
+ *
+ * app_unitcfg need to be filled with valid data first
+ */
+int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len)
+{
+ int i, j;
+ u32 app_id = (u32)cd->app_unitcfg;
+
+ memset(app_name, 0, len);
+ for (i = 0, j = 0; j < min(len, 4); j++) {
+ char ch = (char)((app_id >> (24 - j*8)) & 0xff);
+ if (ch == ' ')
+ continue;
+ app_name[i++] = isprint(ch) ? ch : 'X';
+ }
+ return i;
+}
+
+/**
+ * genwqe_init_crc32() - Prepare a lookup table for fast crc32 calculations
+ *
+ * Existing kernel functions seem to use a different polynom,
+ * therefore we could not use them here.
+ *
+ * Genwqe's Polynomial = 0x20044009
+ */
+#define CRC32_POLYNOMIAL 0x20044009
+static u32 crc32_tab[256]; /* crc32 lookup table */
+
+void genwqe_init_crc32(void)
+{
+ int i, j;
+ u32 crc;
+
+ for (i = 0; i < 256; i++) {
+ crc = i << 24;
+ for (j = 0; j < 8; j++) {
+ if (crc & 0x80000000)
+ crc = (crc << 1) ^ CRC32_POLYNOMIAL;
+ else
+ crc = (crc << 1);
+ }
+ crc32_tab[i] = crc;
+ }
+}
+
+/**
+ * genwqe_crc32() - Generate 32-bit crc as required for DDCBs
+ * @buff: pointer to data buffer
+ * @len: length of data for calculation
+ * @init: initial crc (0xffffffff at start)
+ *
+ * polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009)
+
+ * Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should
+ * result in a crc32 of 0xf33cb7d3.
+ *
+ * The existing kernel crc functions did not cover this polynom yet.
+ *
+ * Return: crc32 checksum.
+ */
+u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
+{
+ int i;
+ u32 crc;
+
+ crc = init;
+ while (len--) {
+ i = ((crc >> 24) ^ *buff++) & 0xFF;
+ crc = (crc << 8) ^ crc32_tab[i];
+ }
+ return crc;
+}
+
+void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
+ dma_addr_t *dma_handle)
+{
+ if (get_order(size) > MAX_ORDER)
+ return NULL;
+
+ return pci_alloc_consistent(cd->pci_dev, size, dma_handle);
+}
+
+void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ if (vaddr == NULL)
+ return;
+
+ pci_free_consistent(cd->pci_dev, size, vaddr, dma_handle);
+}
+
+static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list,
+ int num_pages)
+{
+ int i;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) {
+ pci_unmap_page(pci_dev, dma_list[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ dma_list[i] = 0x0;
+ }
+}
+
+static int genwqe_map_pages(struct genwqe_dev *cd,
+ struct page **page_list, int num_pages,
+ dma_addr_t *dma_list)
+{
+ int i;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ /* establish DMA mapping for requested pages */
+ for (i = 0; i < num_pages; i++) {
+ dma_addr_t daddr;
+
+ dma_list[i] = 0x0;
+ daddr = pci_map_page(pci_dev, page_list[i],
+ 0, /* map_offs */
+ PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL); /* FIXME rd/rw */
+
+ if (pci_dma_mapping_error(pci_dev, daddr)) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: no dma addr daddr=%016llx!\n",
+ __func__, (long long)daddr);
+ goto err;
+ }
+
+ dma_list[i] = daddr;
+ }
+ return 0;
+
+ err:
+ genwqe_unmap_pages(cd, dma_list, num_pages);
+ return -EIO;
+}
+
+static int genwqe_sgl_size(int num_pages)
+{
+ int len, num_tlb = num_pages / 7;
+
+ len = sizeof(struct sg_entry) * (num_pages+num_tlb + 1);
+ return roundup(len, PAGE_SIZE);
+}
+
+struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages,
+ dma_addr_t *dma_addr, size_t *sgl_size)
+{
+ struct pci_dev *pci_dev = cd->pci_dev;
+ struct sg_entry *sgl;
+
+ *sgl_size = genwqe_sgl_size(num_pages);
+ if (get_order(*sgl_size) > MAX_ORDER) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: too much memory requested!\n", __func__);
+ return NULL;
+ }
+
+ sgl = __genwqe_alloc_consistent(cd, *sgl_size, dma_addr);
+ if (sgl == NULL) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: no memory available!\n", __func__);
+ return NULL;
+ }
+
+ return sgl;
+}
+
+int genwqe_setup_sgl(struct genwqe_dev *cd,
+ unsigned long offs,
+ unsigned long size,
+ struct sg_entry *sgl,
+ dma_addr_t dma_addr, size_t sgl_size,
+ dma_addr_t *dma_list, int page_offs, int num_pages)
+{
+ int i = 0, j = 0, p;
+ unsigned long dma_offs, map_offs;
+ struct pci_dev *pci_dev = cd->pci_dev;
+ dma_addr_t prev_daddr = 0;
+ struct sg_entry *s, *last_s = NULL;
+
+ /* sanity checks */
+ if (offs > PAGE_SIZE) {
+ dev_err(&pci_dev->dev,
+ "[%s] too large start offs %08lx\n", __func__, offs);
+ return -EFAULT;
+ }
+ if (sgl_size < genwqe_sgl_size(num_pages)) {
+ dev_err(&pci_dev->dev,
+ "[%s] sgl_size too small %08lx for %d pages\n",
+ __func__, sgl_size, num_pages);
+ return -EFAULT;
+ }
+
+ dma_offs = 128; /* next block if needed/dma_offset */
+ map_offs = offs; /* offset in first page */
+
+ s = &sgl[0]; /* first set of 8 entries */
+ p = 0; /* page */
+ while (p < num_pages) {
+ dma_addr_t daddr;
+ unsigned int size_to_map;
+
+ /* always write the chaining entry, cleanup is done later */
+ j = 0;
+ s[j].target_addr = cpu_to_be64(dma_addr + dma_offs);
+ s[j].len = cpu_to_be32(128);
+ s[j].flags = cpu_to_be32(SG_CHAINED);
+ j++;
+
+ while (j < 8) {
+ /* DMA mapping for requested page, offs, size */
+ size_to_map = min(size, PAGE_SIZE - map_offs);
+ daddr = dma_list[page_offs + p] + map_offs;
+ size -= size_to_map;
+ map_offs = 0;
+
+ if (prev_daddr == daddr) {
+ u32 prev_len = be32_to_cpu(last_s->len);
+
+ /* pr_info("daddr combining: "
+ "%016llx/%08x -> %016llx\n",
+ prev_daddr, prev_len, daddr); */
+
+ last_s->len = cpu_to_be32(prev_len +
+ size_to_map);
+
+ p++; /* process next page */
+ if (p == num_pages)
+ goto fixup; /* nothing to do */
+
+ prev_daddr = daddr + size_to_map;
+ continue;
+ }
+
+ /* start new entry */
+ s[j].target_addr = cpu_to_be64(daddr);
+ s[j].len = cpu_to_be32(size_to_map);
+ s[j].flags = cpu_to_be32(SG_DATA);
+ prev_daddr = daddr + size_to_map;
+ last_s = &s[j];
+ j++;
+
+ p++; /* process next page */
+ if (p == num_pages)
+ goto fixup; /* nothing to do */
+ }
+ dma_offs += 128;
+ s += 8; /* continue 8 elements further */
+ }
+ fixup:
+ if (j == 1) { /* combining happend on last entry! */
+ s -= 8; /* full shift needed on previous sgl block */
+ j = 7; /* shift all elements */
+ }
+
+ for (i = 0; i < j; i++) /* move elements 1 up */
+ s[i] = s[i + 1];
+
+ s[i].target_addr = cpu_to_be64(0);
+ s[i].len = cpu_to_be32(0);
+ s[i].flags = cpu_to_be32(SG_END_LIST);
+ return 0;
+}
+
+void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list,
+ dma_addr_t dma_addr, size_t size)
+{
+ __genwqe_free_consistent(cd, size, sg_list, dma_addr);
+}
+
+/**
+ * free_user_pages() - Give pinned pages back
+ *
+ * Documentation of get_user_pages is in mm/memory.c:
+ *
+ * If the page is written to, set_page_dirty (or set_page_dirty_lock,
+ * as appropriate) must be called after the page is finished with, and
+ * before put_page is called.
+ *
+ * FIXME Could be of use to others and might belong in the generic
+ * code, if others agree. E.g.
+ * ll_free_user_pages in drivers/staging/lustre/lustre/llite/rw26.c
+ * ceph_put_page_vector in net/ceph/pagevec.c
+ * maybe more?
+ */
+static int free_user_pages(struct page **page_list, unsigned int nr_pages,
+ int dirty)
+{
+ unsigned int i;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (page_list[i] != NULL) {
+ if (dirty)
+ set_page_dirty_lock(page_list[i]);
+ put_page(page_list[i]);
+ }
+ }
+ return 0;
+}
+
+/**
+ * genwqe_user_vmap() - Map user-space memory to virtual kernel memory
+ * @cd: pointer to genwqe device
+ * @m: mapping params
+ * @uaddr: user virtual address
+ * @size: size of memory to be mapped
+ *
+ * We need to think about how we could speed this up. Of course it is
+ * not a good idea to do this over and over again, like we are
+ * currently doing it. Nevertheless, I am curious where on the path
+ * the performance is spend. Most probably within the memory
+ * allocation functions, but maybe also in the DMA mapping code.
+ *
+ * Restrictions: The maximum size of the possible mapping currently depends
+ * on the amount of memory we can get using kzalloc() for the
+ * page_list and pci_alloc_consistent for the sg_list.
+ * The sg_list is currently itself not scattered, which could
+ * be fixed with some effort. The page_list must be split into
+ * PAGE_SIZE chunks too. All that will make the complicated
+ * code more complicated.
+ *
+ * Return: 0 if success
+ */
+int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
+ unsigned long size, struct ddcb_requ *req)
+{
+ int rc = -EINVAL;
+ unsigned long data, offs;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if ((uaddr == NULL) || (size == 0)) {
+ m->size = 0; /* mark unused and not added */
+ return -EINVAL;
+ }
+ m->u_vaddr = uaddr;
+ m->size = size;
+
+ /* determine space needed for page_list. */
+ data = (unsigned long)uaddr;
+ offs = offset_in_page(data);
+ m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
+
+ m->page_list = kcalloc(m->nr_pages,
+ sizeof(struct page *) + sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!m->page_list) {
+ dev_err(&pci_dev->dev, "err: alloc page_list failed\n");
+ m->nr_pages = 0;
+ m->u_vaddr = NULL;
+ m->size = 0; /* mark unused and not added */
+ return -ENOMEM;
+ }
+ m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages);
+
+ /* pin user pages in memory */
+ rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
+ m->nr_pages,
+ 1, /* write by caller */
+ m->page_list); /* ptrs to pages */
+
+ /* assumption: get_user_pages can be killed by signals. */
+ if (rc < m->nr_pages) {
+ free_user_pages(m->page_list, rc, 0);
+ rc = -EFAULT;
+ goto fail_get_user_pages;
+ }
+
+ rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list);
+ if (rc != 0)
+ goto fail_free_user_pages;
+
+ return 0;
+
+ fail_free_user_pages:
+ free_user_pages(m->page_list, m->nr_pages, 0);
+
+ fail_get_user_pages:
+ kfree(m->page_list);
+ m->page_list = NULL;
+ m->dma_list = NULL;
+ m->nr_pages = 0;
+ m->u_vaddr = NULL;
+ m->size = 0; /* mark unused and not added */
+ return rc;
+}
+
+/**
+ * genwqe_user_vunmap() - Undo mapping of user-space mem to virtual kernel
+ * memory
+ * @cd: pointer to genwqe device
+ * @m: mapping params
+ */
+int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
+ struct ddcb_requ *req)
+{
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (!dma_mapping_used(m)) {
+ dev_err(&pci_dev->dev, "[%s] err: mapping %p not used!\n",
+ __func__, m);
+ return -EINVAL;
+ }
+
+ if (m->dma_list)
+ genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
+
+ if (m->page_list) {
+ free_user_pages(m->page_list, m->nr_pages, 1);
+
+ kfree(m->page_list);
+ m->page_list = NULL;
+ m->dma_list = NULL;
+ m->nr_pages = 0;
+ }
+
+ m->u_vaddr = NULL;
+ m->size = 0; /* mark as unused and not added */
+ return 0;
+}
+
+/**
+ * genwqe_card_type() - Get chip type SLU Configuration Register
+ * @cd: pointer to the genwqe device descriptor
+ * Return: 0: Altera Stratix-IV 230
+ * 1: Altera Stratix-IV 530
+ * 2: Altera Stratix-V A4
+ * 3: Altera Stratix-V A7
+ */
+u8 genwqe_card_type(struct genwqe_dev *cd)
+{
+ u64 card_type = cd->slu_unitcfg;
+ return (u8)((card_type & IO_SLU_UNITCFG_TYPE_MASK) >> 20);
+}
+
+/**
+ * genwqe_card_reset() - Reset the card
+ * @cd: pointer to the genwqe device descriptor
+ */
+int genwqe_card_reset(struct genwqe_dev *cd)
+{
+ u64 softrst;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (!genwqe_is_privileged(cd))
+ return -ENODEV;
+
+ /* new SL */
+ __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, 0x1ull);
+ msleep(1000);
+ __genwqe_readq(cd, IO_HSU_FIR_CLR);
+ __genwqe_readq(cd, IO_APP_FIR_CLR);
+ __genwqe_readq(cd, IO_SLU_FIR_CLR);
+
+ /*
+ * Read-modify-write to preserve the stealth bits
+ *
+ * For SL >= 039, Stealth WE bit allows removing
+ * the read-modify-wrote.
+ * r-m-w may require a mask 0x3C to avoid hitting hard
+ * reset again for error reset (should be 0, chicken).
+ */
+ softrst = __genwqe_readq(cd, IO_SLC_CFGREG_SOFTRESET) & 0x3cull;
+ __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, softrst | 0x2ull);
+
+ /* give ERRORRESET some time to finish */
+ msleep(50);
+
+ if (genwqe_need_err_masking(cd)) {
+ dev_info(&pci_dev->dev,
+ "[%s] masking errors for old bitstreams\n", __func__);
+ __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
+ }
+ return 0;
+}
+
+int genwqe_read_softreset(struct genwqe_dev *cd)
+{
+ u64 bitstream;
+
+ if (!genwqe_is_privileged(cd))
+ return -ENODEV;
+
+ bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1;
+ cd->softreset = (bitstream == 0) ? 0x8ull : 0xcull;
+ return 0;
+}
+
+/**
+ * genwqe_set_interrupt_capability() - Configure MSI capability structure
+ * @cd: pointer to the device
+ * Return: 0 if no error
+ */
+int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
+{
+ int rc;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ rc = pci_enable_msi_block(pci_dev, count);
+ if (rc == 0)
+ cd->flags |= GENWQE_FLAG_MSI_ENABLED;
+ return rc;
+}
+
+/**
+ * genwqe_reset_interrupt_capability() - Undo genwqe_set_interrupt_capability()
+ * @cd: pointer to the device
+ */
+void genwqe_reset_interrupt_capability(struct genwqe_dev *cd)
+{
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (cd->flags & GENWQE_FLAG_MSI_ENABLED) {
+ pci_disable_msi(pci_dev);
+ cd->flags &= ~GENWQE_FLAG_MSI_ENABLED;
+ }
+}
+
+/**
+ * set_reg_idx() - Fill array with data. Ignore illegal offsets.
+ * @cd: card device
+ * @r: debug register array
+ * @i: index to desired entry
+ * @m: maximum possible entries
+ * @addr: addr which is read
+ * @index: index in debug array
+ * @val: read value
+ */
+static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r,
+ unsigned int *i, unsigned int m, u32 addr, u32 idx,
+ u64 val)
+{
+ if (WARN_ON_ONCE(*i >= m))
+ return -EFAULT;
+
+ r[*i].addr = addr;
+ r[*i].idx = idx;
+ r[*i].val = val;
+ ++*i;
+ return 0;
+}
+
+static int set_reg(struct genwqe_dev *cd, struct genwqe_reg *r,
+ unsigned int *i, unsigned int m, u32 addr, u64 val)
+{
+ return set_reg_idx(cd, r, i, m, addr, 0, val);
+}
+
+int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
+ unsigned int max_regs, int all)
+{
+ unsigned int i, j, idx = 0;
+ u32 ufir_addr, ufec_addr, sfir_addr, sfec_addr;
+ u64 gfir, sluid, appid, ufir, ufec, sfir, sfec;
+
+ /* Global FIR */
+ gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
+ set_reg(cd, regs, &idx, max_regs, IO_SLC_CFGREG_GFIR, gfir);
+
+ /* UnitCfg for SLU */
+ sluid = __genwqe_readq(cd, IO_SLU_UNITCFG); /* 0x00000000 */
+ set_reg(cd, regs, &idx, max_regs, IO_SLU_UNITCFG, sluid);
+
+ /* UnitCfg for APP */
+ appid = __genwqe_readq(cd, IO_APP_UNITCFG); /* 0x02000000 */
+ set_reg(cd, regs, &idx, max_regs, IO_APP_UNITCFG, appid);
+
+ /* Check all chip Units */
+ for (i = 0; i < GENWQE_MAX_UNITS; i++) {
+
+ /* Unit FIR */
+ ufir_addr = (i << 24) | 0x008;
+ ufir = __genwqe_readq(cd, ufir_addr);
+ set_reg(cd, regs, &idx, max_regs, ufir_addr, ufir);
+
+ /* Unit FEC */
+ ufec_addr = (i << 24) | 0x018;
+ ufec = __genwqe_readq(cd, ufec_addr);
+ set_reg(cd, regs, &idx, max_regs, ufec_addr, ufec);
+
+ for (j = 0; j < 64; j++) {
+ /* wherever there is a primary 1, read the 2ndary */
+ if (!all && (!(ufir & (1ull << j))))
+ continue;
+
+ sfir_addr = (i << 24) | (0x100 + 8 * j);
+ sfir = __genwqe_readq(cd, sfir_addr);
+ set_reg(cd, regs, &idx, max_regs, sfir_addr, sfir);
+
+ sfec_addr = (i << 24) | (0x300 + 8 * j);
+ sfec = __genwqe_readq(cd, sfec_addr);
+ set_reg(cd, regs, &idx, max_regs, sfec_addr, sfec);
+ }
+ }
+
+ /* fill with invalid data until end */
+ for (i = idx; i < max_regs; i++) {
+ regs[i].addr = 0xffffffff;
+ regs[i].val = 0xffffffffffffffffull;
+ }
+ return idx;
+}
+
+/**
+ * genwqe_ffdc_buff_size() - Calculates the number of dump registers
+ */
+int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
+{
+ int entries = 0, ring, traps, traces, trace_entries;
+ u32 eevptr_addr, l_addr, d_len, d_type;
+ u64 eevptr, val, addr;
+
+ eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
+ eevptr = __genwqe_readq(cd, eevptr_addr);
+
+ if ((eevptr != 0x0) && (eevptr != -1ull)) {
+ l_addr = GENWQE_UID_OFFS(uid) | eevptr;
+
+ while (1) {
+ val = __genwqe_readq(cd, l_addr);
+
+ if ((val == 0x0) || (val == -1ull))
+ break;
+
+ /* 38:24 */
+ d_len = (val & 0x0000007fff000000ull) >> 24;
+
+ /* 39 */
+ d_type = (val & 0x0000008000000000ull) >> 36;
+
+ if (d_type) { /* repeat */
+ entries += d_len;
+ } else { /* size in bytes! */
+ entries += d_len >> 3;
+ }
+
+ l_addr += 8;
+ }
+ }
+
+ for (ring = 0; ring < 8; ring++) {
+ addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
+ val = __genwqe_readq(cd, addr);
+
+ if ((val == 0x0ull) || (val == -1ull))
+ continue;
+
+ traps = (val >> 24) & 0xff;
+ traces = (val >> 16) & 0xff;
+ trace_entries = val & 0xffff;
+
+ entries += traps + (traces * trace_entries);
+ }
+ return entries;
+}
+
+/**
+ * genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure
+ */
+int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
+ struct genwqe_reg *regs, unsigned int max_regs)
+{
+ int i, traps, traces, trace, trace_entries, trace_entry, ring;
+ unsigned int idx = 0;
+ u32 eevptr_addr, l_addr, d_addr, d_len, d_type;
+ u64 eevptr, e, val, addr;
+
+ eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
+ eevptr = __genwqe_readq(cd, eevptr_addr);
+
+ if ((eevptr != 0x0) && (eevptr != 0xffffffffffffffffull)) {
+ l_addr = GENWQE_UID_OFFS(uid) | eevptr;
+ while (1) {
+ e = __genwqe_readq(cd, l_addr);
+ if ((e == 0x0) || (e == 0xffffffffffffffffull))
+ break;
+
+ d_addr = (e & 0x0000000000ffffffull); /* 23:0 */
+ d_len = (e & 0x0000007fff000000ull) >> 24; /* 38:24 */
+ d_type = (e & 0x0000008000000000ull) >> 36; /* 39 */
+ d_addr |= GENWQE_UID_OFFS(uid);
+
+ if (d_type) {
+ for (i = 0; i < (int)d_len; i++) {
+ val = __genwqe_readq(cd, d_addr);
+ set_reg_idx(cd, regs, &idx, max_regs,
+ d_addr, i, val);
+ }
+ } else {
+ d_len >>= 3; /* Size in bytes! */
+ for (i = 0; i < (int)d_len; i++, d_addr += 8) {
+ val = __genwqe_readq(cd, d_addr);
+ set_reg_idx(cd, regs, &idx, max_regs,
+ d_addr, 0, val);
+ }
+ }
+ l_addr += 8;
+ }
+ }
+
+ /*
+ * To save time, there are only 6 traces poplulated on Uid=2,
+ * Ring=1. each with iters=512.
+ */
+ for (ring = 0; ring < 8; ring++) { /* 0 is fls, 1 is fds,
+ 2...7 are ASI rings */
+ addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
+ val = __genwqe_readq(cd, addr);
+
+ if ((val == 0x0ull) || (val == -1ull))
+ continue;
+
+ traps = (val >> 24) & 0xff; /* Number of Traps */
+ traces = (val >> 16) & 0xff; /* Number of Traces */
+ trace_entries = val & 0xffff; /* Entries per trace */
+
+ /* Note: This is a combined loop that dumps both the traps */
+ /* (for the trace == 0 case) as well as the traces 1 to */
+ /* 'traces'. */
+ for (trace = 0; trace <= traces; trace++) {
+ u32 diag_sel =
+ GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace);
+
+ addr = (GENWQE_UID_OFFS(uid) |
+ IO_EXTENDED_DIAG_SELECTOR);
+ __genwqe_writeq(cd, addr, diag_sel);
+
+ for (trace_entry = 0;
+ trace_entry < (trace ? trace_entries : traps);
+ trace_entry++) {
+ addr = (GENWQE_UID_OFFS(uid) |
+ IO_EXTENDED_DIAG_READ_MBX);
+ val = __genwqe_readq(cd, addr);
+ set_reg_idx(cd, regs, &idx, max_regs, addr,
+ (diag_sel<<16) | trace_entry, val);
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * genwqe_write_vreg() - Write register in virtual window
+ *
+ * Note, these registers are only accessible to the PF through the
+ * VF-window. It is not intended for the VF to access.
+ */
+int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func)
+{
+ __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
+ __genwqe_writeq(cd, reg, val);
+ return 0;
+}
+
+/**
+ * genwqe_read_vreg() - Read register in virtual window
+ *
+ * Note, these registers are only accessible to the PF through the
+ * VF-window. It is not intended for the VF to access.
+ */
+u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func)
+{
+ __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
+ return __genwqe_readq(cd, reg);
+}
+
+/**
+ * genwqe_base_clock_frequency() - Deteremine base clock frequency of the card
+ *
+ * Note: From a design perspective it turned out to be a bad idea to
+ * use codes here to specifiy the frequency/speed values. An old
+ * driver cannot understand new codes and is therefore always a
+ * problem. Better is to measure out the value or put the
+ * speed/frequency directly into a register which is always a valid
+ * value for old as well as for new software.
+ *
+ * Return: Card clock in MHz
+ */
+int genwqe_base_clock_frequency(struct genwqe_dev *cd)
+{
+ u16 speed; /* MHz MHz MHz MHz */
+ static const int speed_grade[] = { 250, 200, 166, 175 };
+
+ speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full);
+ if (speed >= ARRAY_SIZE(speed_grade))
+ return 0; /* illegal value */
+
+ return speed_grade[speed];
+}
+
+/**
+ * genwqe_stop_traps() - Stop traps
+ *
+ * Before reading out the analysis data, we need to stop the traps.
+ */
+void genwqe_stop_traps(struct genwqe_dev *cd)
+{
+ __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_SET, 0xcull);
+}
+
+/**
+ * genwqe_start_traps() - Start traps
+ *
+ * After having read the data, we can/must enable the traps again.
+ */
+void genwqe_start_traps(struct genwqe_dev *cd)
+{
+ __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_CLR, 0xcull);
+
+ if (genwqe_need_err_masking(cd))
+ __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
+}
diff --git a/drivers/misc/genwqe/genwqe_driver.h b/drivers/misc/genwqe/genwqe_driver.h
new file mode 100644
index 000000000000..46e916b36c70
--- /dev/null
+++ b/drivers/misc/genwqe/genwqe_driver.h
@@ -0,0 +1,77 @@
+#ifndef __GENWQE_DRIVER_H__
+#define __GENWQE_DRIVER_H__
+
+/**
+ * IBM Accelerator Family 'GenWQE'
+ *
+ * (C) Copyright IBM Corp. 2013
+ *
+ * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
+ * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
+ * Author: Michael Jung <mijung@de.ibm.com>
+ * Author: Michael Ruettger <michael@ibmra.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <linux/scatterlist.h>
+#include <linux/iommu.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+
+#include <asm/byteorder.h>
+#include <linux/genwqe/genwqe_card.h>
+
+#define DRV_VERS_STRING "2.0.0"
+
+/*
+ * Static minor number assignement, until we decide/implement
+ * something dynamic.
+ */
+#define GENWQE_MAX_MINOR 128 /* up to 128 possible genwqe devices */
+
+/**
+ * genwqe_requ_alloc() - Allocate a new DDCB execution request
+ *
+ * This data structure contains the user visiable fields of the DDCB
+ * to be executed.
+ *
+ * Return: ptr to genwqe_ddcb_cmd data structure
+ */
+struct genwqe_ddcb_cmd *ddcb_requ_alloc(void);
+
+/**
+ * ddcb_requ_free() - Free DDCB execution request.
+ * @req: ptr to genwqe_ddcb_cmd data structure.
+ */
+void ddcb_requ_free(struct genwqe_ddcb_cmd *req);
+
+u32 genwqe_crc32(u8 *buff, size_t len, u32 init);
+
+static inline void genwqe_hexdump(struct pci_dev *pci_dev,
+ const void *buff, unsigned int size)
+{
+ char prefix[32];
+
+ scnprintf(prefix, sizeof(prefix), "%s %s: ",
+ GENWQE_DEVNAME, pci_name(pci_dev));
+
+ print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET, 16, 1, buff,
+ size, true);
+}
+
+#endif /* __GENWQE_DRIVER_H__ */
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index a2edb2ee0921..49c7a23f02fc 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -224,7 +224,7 @@ static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
}
#ifdef CONFIG_IDE
-int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
+static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
struct block_device *bdev, unsigned int cmd,
unsigned long arg)
{
@@ -334,9 +334,10 @@ static void execute_location(void *dst)
static void execute_user_location(void *dst)
{
+ /* Intentionally crossing kernel/user memory boundary. */
void (*func)(void) = dst;
- if (copy_to_user(dst, do_nothing, EXEC_SIZE))
+ if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
return;
func();
}
@@ -408,6 +409,8 @@ static void lkdtm_do_action(enum ctype which)
case CT_SPINLOCKUP:
/* Must be called twice to trigger. */
spin_lock(&lock_me_up);
+ /* Let sparse know we intended to exit holding the lock. */
+ __release(&lock_me_up);
break;
case CT_HUNG_TASK:
set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index d22c6864508b..2fad84432829 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -177,7 +177,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
unsigned long timeout;
int i;
- /* Only Posible if we are in timeout */
+ /* Only possible if we are in timeout */
if (!cl || cl != &dev->iamthif_cl) {
dev_dbg(&dev->pdev->dev, "bad file ext.\n");
return -ETIMEDOUT;
@@ -249,7 +249,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
cb->response_buffer.size);
dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx);
- /* length is being turncated to PAGE_SIZE, however,
+ /* length is being truncated to PAGE_SIZE, however,
* the buf_idx may point beyond */
length = min_t(size_t, length, (cb->buf_idx - *offset));
@@ -316,6 +316,7 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
mei_hdr.host_addr = dev->iamthif_cl.host_client_id;
mei_hdr.me_addr = dev->iamthif_cl.me_client_id;
mei_hdr.reserved = 0;
+ mei_hdr.internal = 0;
dev->iamthif_msg_buf_index += mei_hdr.length;
ret = mei_write_message(dev, &mei_hdr, dev->iamthif_msg_buf);
if (ret)
@@ -477,6 +478,7 @@ int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
mei_hdr.host_addr = cl->host_client_id;
mei_hdr.me_addr = cl->me_client_id;
mei_hdr.reserved = 0;
+ mei_hdr.internal = 0;
if (*slots >= msg_slots) {
mei_hdr.length = len;
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 87c96e4669e2..9b809cfc2899 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -154,7 +154,7 @@ int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
return 0;
}
/**
- * mei_io_cb_alloc_resp_buf - allocate respose buffer
+ * mei_io_cb_alloc_resp_buf - allocate response buffer
*
* @cb: io callback structure
* @length: size of the buffer
@@ -207,7 +207,7 @@ int mei_cl_flush_queues(struct mei_cl *cl)
/**
- * mei_cl_init - initializes intialize cl.
+ * mei_cl_init - initializes cl.
*
* @cl: host client to be initialized
* @dev: mei device
@@ -263,10 +263,10 @@ struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
return NULL;
}
-/** mei_cl_link: allocte host id in the host map
+/** mei_cl_link: allocate host id in the host map
*
* @cl - host client
- * @id - fixed host id or -1 for genereting one
+ * @id - fixed host id or -1 for generic one
*
* returns 0 on success
* -EINVAL on incorrect values
@@ -282,19 +282,19 @@ int mei_cl_link(struct mei_cl *cl, int id)
dev = cl->dev;
- /* If Id is not asigned get one*/
+ /* If Id is not assigned get one*/
if (id == MEI_HOST_CLIENT_ID_ANY)
id = find_first_zero_bit(dev->host_clients_map,
MEI_CLIENTS_MAX);
if (id >= MEI_CLIENTS_MAX) {
- dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ;
+ dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
return -EMFILE;
}
open_handle_count = dev->open_handle_count + dev->iamthif_open_count;
if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
- dev_err(&dev->pdev->dev, "open_handle_count exceded %d",
+ dev_err(&dev->pdev->dev, "open_handle_count exceeded %d",
MEI_MAX_OPEN_HANDLE_COUNT);
return -EMFILE;
}
@@ -344,8 +344,6 @@ int mei_cl_unlink(struct mei_cl *cl)
cl->state = MEI_FILE_INITIALIZING;
- list_del_init(&cl->link);
-
return 0;
}
@@ -372,13 +370,14 @@ void mei_host_client_init(struct work_struct *work)
}
dev->dev_state = MEI_DEV_ENABLED;
+ dev->reset_count = 0;
mutex_unlock(&dev->device_lock);
}
/**
- * mei_cl_disconnect - disconnect host clinet form the me one
+ * mei_cl_disconnect - disconnect host client from the me one
*
* @cl: host client
*
@@ -457,7 +456,7 @@ free:
*
* @cl: private data of the file object
*
- * returns ture if other client is connected, 0 - otherwise.
+ * returns true if other client is connected, false - otherwise.
*/
bool mei_cl_is_other_connecting(struct mei_cl *cl)
{
@@ -481,7 +480,7 @@ bool mei_cl_is_other_connecting(struct mei_cl *cl)
}
/**
- * mei_cl_connect - connect host clinet to the me one
+ * mei_cl_connect - connect host client to the me one
*
* @cl: host client
*
@@ -729,6 +728,7 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
mei_hdr.host_addr = cl->host_client_id;
mei_hdr.me_addr = cl->me_client_id;
mei_hdr.reserved = 0;
+ mei_hdr.internal = cb->internal;
if (*slots >= msg_slots) {
mei_hdr.length = len;
@@ -775,7 +775,7 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
* @cl: host client
* @cl: write callback with filled data
*
- * returns numbe of bytes sent on success, <0 on failure.
+ * returns number of bytes sent on success, <0 on failure.
*/
int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
{
@@ -828,6 +828,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
mei_hdr.host_addr = cl->host_client_id;
mei_hdr.me_addr = cl->me_client_id;
mei_hdr.reserved = 0;
+ mei_hdr.internal = cb->internal;
rets = mei_write_message(dev, &mei_hdr, buf->data);
@@ -907,7 +908,6 @@ void mei_cl_all_disconnect(struct mei_device *dev)
list_for_each_entry_safe(cl, next, &dev->file_list, link) {
cl->state = MEI_FILE_DISCONNECTED;
cl->mei_flow_ctrl_creds = 0;
- cl->read_cb = NULL;
cl->timer_count = 0;
}
}
@@ -941,8 +941,16 @@ void mei_cl_all_wakeup(struct mei_device *dev)
void mei_cl_all_write_clear(struct mei_device *dev)
{
struct mei_cl_cb *cb, *next;
+ struct list_head *list;
+
+ list = &dev->write_list.list;
+ list_for_each_entry_safe(cb, next, list, list) {
+ list_del(&cb->list);
+ mei_io_cb_free(cb);
+ }
- list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
+ list = &dev->write_waiting_list.list;
+ list_for_each_entry_safe(cb, next, list, list) {
list_del(&cb->list);
mei_io_cb_free(cb);
}
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index e3870f22d238..a3ae154444b2 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -43,7 +43,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
mutex_lock(&dev->device_lock);
- /* if the driver is not enabled the list won't b consitent */
+ /* if the driver is not enabled the list won't be consistent */
if (dev->dev_state != MEI_DEV_ENABLED)
goto out;
@@ -101,7 +101,7 @@ static const struct file_operations mei_dbgfs_fops_devstate = {
/**
* mei_dbgfs_deregister - Remove the debugfs files and directories
- * @mei - pointer to mei device private dat
+ * @mei - pointer to mei device private data
*/
void mei_dbgfs_deregister(struct mei_device *dev)
{
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 9b3a0fb7f265..28cd74c073b9 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -28,9 +28,9 @@
*
* @dev: the device structure
*
- * returns none.
+ * returns 0 on success -ENOMEM on allocation failure
*/
-static void mei_hbm_me_cl_allocate(struct mei_device *dev)
+static int mei_hbm_me_cl_allocate(struct mei_device *dev)
{
struct mei_me_client *clients;
int b;
@@ -44,7 +44,7 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev)
dev->me_clients_num++;
if (dev->me_clients_num == 0)
- return;
+ return 0;
kfree(dev->me_clients);
dev->me_clients = NULL;
@@ -56,12 +56,10 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev)
sizeof(struct mei_me_client), GFP_KERNEL);
if (!clients) {
dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
- dev->dev_state = MEI_DEV_RESETTING;
- mei_reset(dev, 1);
- return;
+ return -ENOMEM;
}
dev->me_clients = clients;
- return;
+ return 0;
}
/**
@@ -85,12 +83,12 @@ void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
}
/**
- * same_disconn_addr - tells if they have the same address
+ * mei_hbm_cl_addr_equal - tells if they have the same address
*
- * @file: private data of the file object.
- * @disconn: disconnection request.
+ * @cl: - client
+ * @buf: buffer with cl header
*
- * returns true if addres are same
+ * returns true if addresses are the same
*/
static inline
bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf)
@@ -128,6 +126,17 @@ static bool is_treat_specially_client(struct mei_cl *cl,
return false;
}
+/**
+ * mei_hbm_idle - set hbm to idle state
+ *
+ * @dev: the device structure
+ */
+void mei_hbm_idle(struct mei_device *dev)
+{
+ dev->init_clients_timer = 0;
+ dev->hbm_state = MEI_HBM_IDLE;
+}
+
int mei_hbm_start_wait(struct mei_device *dev)
{
int ret;
@@ -137,7 +146,7 @@ int mei_hbm_start_wait(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
ret = wait_event_interruptible_timeout(dev->wait_recvd_msg,
dev->hbm_state == MEI_HBM_IDLE ||
- dev->hbm_state > MEI_HBM_START,
+ dev->hbm_state >= MEI_HBM_STARTED,
mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
mutex_lock(&dev->device_lock);
@@ -153,12 +162,15 @@ int mei_hbm_start_wait(struct mei_device *dev)
* mei_hbm_start_req - sends start request message.
*
* @dev: the device structure
+ *
+ * returns 0 on success and < 0 on failure
*/
int mei_hbm_start_req(struct mei_device *dev)
{
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
struct hbm_host_version_request *start_req;
const size_t len = sizeof(struct hbm_host_version_request);
+ int ret;
mei_hbm_hdr(mei_hdr, len);
@@ -170,12 +182,13 @@ int mei_hbm_start_req(struct mei_device *dev)
start_req->host_version.minor_version = HBM_MINOR_VERSION;
dev->hbm_state = MEI_HBM_IDLE;
- if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
- dev_err(&dev->pdev->dev, "version message write failed\n");
- dev->dev_state = MEI_DEV_RESETTING;
- mei_reset(dev, 1);
- return -EIO;
+ ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "version message write failed: ret = %d\n",
+ ret);
+ return ret;
}
+
dev->hbm_state = MEI_HBM_START;
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
return 0;
@@ -186,13 +199,15 @@ int mei_hbm_start_req(struct mei_device *dev)
*
* @dev: the device structure
*
- * returns none.
+ * returns 0 on success and < 0 on failure
*/
-static void mei_hbm_enum_clients_req(struct mei_device *dev)
+static int mei_hbm_enum_clients_req(struct mei_device *dev)
{
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
struct hbm_host_enum_request *enum_req;
const size_t len = sizeof(struct hbm_host_enum_request);
+ int ret;
+
/* enumerate clients */
mei_hbm_hdr(mei_hdr, len);
@@ -200,14 +215,15 @@ static void mei_hbm_enum_clients_req(struct mei_device *dev)
memset(enum_req, 0, len);
enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
- if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
- dev->dev_state = MEI_DEV_RESETTING;
- dev_err(&dev->pdev->dev, "enumeration request write failed.\n");
- mei_reset(dev, 1);
+ ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "enumeration request write failed: ret = %d.\n",
+ ret);
+ return ret;
}
dev->hbm_state = MEI_HBM_ENUM_CLIENTS;
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
- return;
+ return 0;
}
/**
@@ -215,7 +231,7 @@ static void mei_hbm_enum_clients_req(struct mei_device *dev)
*
* @dev: the device structure
*
- * returns none.
+ * returns 0 on success and < 0 on failure
*/
static int mei_hbm_prop_req(struct mei_device *dev)
@@ -226,7 +242,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
const size_t len = sizeof(struct hbm_props_request);
unsigned long next_client_index;
unsigned long client_num;
-
+ int ret;
client_num = dev->me_client_presentation_num;
@@ -253,12 +269,11 @@ static int mei_hbm_prop_req(struct mei_device *dev)
prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
prop_req->address = next_client_index;
- if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
- dev->dev_state = MEI_DEV_RESETTING;
- dev_err(&dev->pdev->dev, "properties request write failed\n");
- mei_reset(dev, 1);
-
- return -EIO;
+ ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "properties request write failed: ret = %d\n",
+ ret);
+ return ret;
}
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
@@ -268,7 +283,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
}
/**
- * mei_hbm_stop_req_prepare - perpare stop request message
+ * mei_hbm_stop_req_prepare - prepare stop request message
*
* @dev - mei device
* @mei_hdr - mei message header
@@ -289,7 +304,7 @@ static void mei_hbm_stop_req_prepare(struct mei_device *dev,
}
/**
- * mei_hbm_cl_flow_control_req - sends flow control requst.
+ * mei_hbm_cl_flow_control_req - sends flow control request.
*
* @dev: the device structure
* @cl: client info
@@ -451,7 +466,7 @@ int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
}
/**
- * mei_hbm_cl_connect_res - connect resposne from the ME
+ * mei_hbm_cl_connect_res - connect response from the ME
*
* @dev: the device structure
* @rs: connect response bus message
@@ -505,8 +520,8 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev,
/**
- * mei_hbm_fw_disconnect_req - disconnect request initiated by me
- * host sends disoconnect response
+ * mei_hbm_fw_disconnect_req - disconnect request initiated by ME firmware
+ * host sends disconnect response
*
* @dev: the device structure.
* @disconnect_req: disconnect request bus message from the me
@@ -559,8 +574,10 @@ bool mei_hbm_version_is_supported(struct mei_device *dev)
*
* @dev: the device structure
* @mei_hdr: header of bus message
+ *
+ * returns 0 on success and < 0 on failure
*/
-void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
+int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
{
struct mei_bus_message *mei_msg;
struct mei_me_client *me_client;
@@ -577,8 +594,20 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
+ /* ignore spurious message and prevent reset nesting
+ * hbm is put to idle during system reset
+ */
+ if (dev->hbm_state == MEI_HBM_IDLE) {
+ dev_dbg(&dev->pdev->dev, "hbm: state is idle ignore spurious messages\n");
+ return 0;
+ }
+
switch (mei_msg->hbm_cmd) {
case HOST_START_RES_CMD:
+ dev_dbg(&dev->pdev->dev, "hbm: start: response message received.\n");
+
+ dev->init_clients_timer = 0;
+
version_res = (struct hbm_host_version_response *)mei_msg;
dev_dbg(&dev->pdev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n",
@@ -597,73 +626,89 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
}
if (!mei_hbm_version_is_supported(dev)) {
- dev_warn(&dev->pdev->dev, "hbm version mismatch: stopping the driver.\n");
+ dev_warn(&dev->pdev->dev, "hbm: start: version mismatch - stopping the driver.\n");
- dev->hbm_state = MEI_HBM_STOP;
+ dev->hbm_state = MEI_HBM_STOPPED;
mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr,
dev->wr_msg.data);
- mei_write_message(dev, &dev->wr_msg.hdr,
- dev->wr_msg.data);
+ if (mei_write_message(dev, &dev->wr_msg.hdr,
+ dev->wr_msg.data)) {
+ dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n");
+ return -EIO;
+ }
+ break;
+ }
- return;
+ if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
+ dev->hbm_state != MEI_HBM_START) {
+ dev_err(&dev->pdev->dev, "hbm: start: state mismatch, [%d, %d]\n",
+ dev->dev_state, dev->hbm_state);
+ return -EPROTO;
}
- if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
- dev->hbm_state == MEI_HBM_START) {
- dev->init_clients_timer = 0;
- mei_hbm_enum_clients_req(dev);
- } else {
- dev_err(&dev->pdev->dev, "reset: wrong host start response\n");
- mei_reset(dev, 1);
- return;
+ dev->hbm_state = MEI_HBM_STARTED;
+
+ if (mei_hbm_enum_clients_req(dev)) {
+ dev_err(&dev->pdev->dev, "hbm: start: failed to send enumeration request\n");
+ return -EIO;
}
wake_up_interruptible(&dev->wait_recvd_msg);
- dev_dbg(&dev->pdev->dev, "host start response message received.\n");
break;
case CLIENT_CONNECT_RES_CMD:
+ dev_dbg(&dev->pdev->dev, "hbm: client connect response: message received.\n");
+
connect_res = (struct hbm_client_connect_response *) mei_msg;
mei_hbm_cl_connect_res(dev, connect_res);
- dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
wake_up(&dev->wait_recvd_msg);
break;
case CLIENT_DISCONNECT_RES_CMD:
+ dev_dbg(&dev->pdev->dev, "hbm: client disconnect response: message received.\n");
+
disconnect_res = (struct hbm_client_connect_response *) mei_msg;
mei_hbm_cl_disconnect_res(dev, disconnect_res);
- dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
wake_up(&dev->wait_recvd_msg);
break;
case MEI_FLOW_CONTROL_CMD:
+ dev_dbg(&dev->pdev->dev, "hbm: client flow control response: message received.\n");
+
flow_control = (struct hbm_flow_control *) mei_msg;
mei_hbm_cl_flow_control_res(dev, flow_control);
- dev_dbg(&dev->pdev->dev, "client flow control response message received.\n");
break;
case HOST_CLIENT_PROPERTIES_RES_CMD:
+ dev_dbg(&dev->pdev->dev, "hbm: properties response: message received.\n");
+
+ dev->init_clients_timer = 0;
+
+ if (dev->me_clients == NULL) {
+ dev_err(&dev->pdev->dev, "hbm: properties response: mei_clients not allocated\n");
+ return -EPROTO;
+ }
+
props_res = (struct hbm_props_response *)mei_msg;
me_client = &dev->me_clients[dev->me_client_presentation_num];
- if (props_res->status || !dev->me_clients) {
- dev_err(&dev->pdev->dev, "reset: properties response hbm wrong status.\n");
- mei_reset(dev, 1);
- return;
+ if (props_res->status) {
+ dev_err(&dev->pdev->dev, "hbm: properties response: wrong status = %d\n",
+ props_res->status);
+ return -EPROTO;
}
if (me_client->client_id != props_res->address) {
- dev_err(&dev->pdev->dev, "reset: host properties response address mismatch\n");
- mei_reset(dev, 1);
- return;
+ dev_err(&dev->pdev->dev, "hbm: properties response: address mismatch %d ?= %d\n",
+ me_client->client_id, props_res->address);
+ return -EPROTO;
}
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) {
- dev_err(&dev->pdev->dev, "reset: unexpected properties response\n");
- mei_reset(dev, 1);
-
- return;
+ dev_err(&dev->pdev->dev, "hbm: properties response: state mismatch, [%d, %d]\n",
+ dev->dev_state, dev->hbm_state);
+ return -EPROTO;
}
me_client->props = props_res->client_properties;
@@ -671,49 +716,70 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev->me_client_presentation_num++;
/* request property for the next client */
- mei_hbm_prop_req(dev);
+ if (mei_hbm_prop_req(dev))
+ return -EIO;
break;
case HOST_ENUM_RES_CMD:
+ dev_dbg(&dev->pdev->dev, "hbm: enumeration response: message received\n");
+
+ dev->init_clients_timer = 0;
+
enum_res = (struct hbm_host_enum_response *) mei_msg;
BUILD_BUG_ON(sizeof(dev->me_clients_map)
< sizeof(enum_res->valid_addresses));
memcpy(dev->me_clients_map, enum_res->valid_addresses,
sizeof(enum_res->valid_addresses));
- if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
- dev->hbm_state == MEI_HBM_ENUM_CLIENTS) {
- dev->init_clients_timer = 0;
- mei_hbm_me_cl_allocate(dev);
- dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES;
-
- /* first property reqeust */
- mei_hbm_prop_req(dev);
- } else {
- dev_err(&dev->pdev->dev, "reset: unexpected enumeration response hbm.\n");
- mei_reset(dev, 1);
- return;
+
+ if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
+ dev->hbm_state != MEI_HBM_ENUM_CLIENTS) {
+ dev_err(&dev->pdev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n",
+ dev->dev_state, dev->hbm_state);
+ return -EPROTO;
+ }
+
+ if (mei_hbm_me_cl_allocate(dev)) {
+ dev_err(&dev->pdev->dev, "hbm: enumeration response: cannot allocate clients array\n");
+ return -ENOMEM;
}
+
+ dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES;
+
+ /* first property request */
+ if (mei_hbm_prop_req(dev))
+ return -EIO;
+
break;
case HOST_STOP_RES_CMD:
+ dev_dbg(&dev->pdev->dev, "hbm: stop response: message received\n");
+
+ dev->init_clients_timer = 0;
- if (dev->hbm_state != MEI_HBM_STOP)
- dev_err(&dev->pdev->dev, "unexpected stop response hbm.\n");
- dev->dev_state = MEI_DEV_DISABLED;
- dev_info(&dev->pdev->dev, "reset: FW stop response.\n");
- mei_reset(dev, 1);
+ if (dev->hbm_state != MEI_HBM_STOPPED) {
+ dev_err(&dev->pdev->dev, "hbm: stop response: state mismatch, [%d, %d]\n",
+ dev->dev_state, dev->hbm_state);
+ return -EPROTO;
+ }
+
+ dev->dev_state = MEI_DEV_POWER_DOWN;
+ dev_info(&dev->pdev->dev, "hbm: stop response: resetting.\n");
+ /* force the reset */
+ return -EPROTO;
break;
case CLIENT_DISCONNECT_REQ_CMD:
- /* search for client */
+ dev_dbg(&dev->pdev->dev, "hbm: disconnect request: message received\n");
+
disconnect_req = (struct hbm_client_connect_request *)mei_msg;
mei_hbm_fw_disconnect_req(dev, disconnect_req);
break;
case ME_STOP_REQ_CMD:
+ dev_dbg(&dev->pdev->dev, "hbm: stop request: message received\n");
- dev->hbm_state = MEI_HBM_STOP;
+ dev->hbm_state = MEI_HBM_STOPPED;
mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr,
dev->wr_ext_msg.data);
break;
@@ -722,5 +788,6 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
break;
}
+ return 0;
}
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
index 4ae2e56e404f..5f92188a5cd7 100644
--- a/drivers/misc/mei/hbm.h
+++ b/drivers/misc/mei/hbm.h
@@ -32,13 +32,13 @@ struct mei_cl;
enum mei_hbm_state {
MEI_HBM_IDLE = 0,
MEI_HBM_START,
+ MEI_HBM_STARTED,
MEI_HBM_ENUM_CLIENTS,
MEI_HBM_CLIENT_PROPERTIES,
- MEI_HBM_STARTED,
- MEI_HBM_STOP,
+ MEI_HBM_STOPPED,
};
-void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr);
+int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr);
static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
{
@@ -49,6 +49,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
hdr->reserved = 0;
}
+void mei_hbm_idle(struct mei_device *dev);
int mei_hbm_start_req(struct mei_device *dev);
int mei_hbm_start_wait(struct mei_device *dev);
int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl);
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 3412adcdaeb0..6f656c053b14 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -185,7 +185,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
mei_me_reg_write(hw, H_CSR, hcsr);
- if (dev->dev_state == MEI_DEV_POWER_DOWN)
+ if (intr_enable == false)
mei_me_hw_reset_release(dev);
return 0;
@@ -469,7 +469,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
struct mei_device *dev = (struct mei_device *) dev_id;
struct mei_cl_cb complete_list;
s32 slots;
- int rets;
+ int rets = 0;
dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
/* initialize our complete list */
@@ -482,15 +482,10 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
mei_clear_interrupts(dev);
/* check if ME wants a reset */
- if (!mei_hw_is_ready(dev) &&
- dev->dev_state != MEI_DEV_RESETTING &&
- dev->dev_state != MEI_DEV_INITIALIZING &&
- dev->dev_state != MEI_DEV_POWER_DOWN &&
- dev->dev_state != MEI_DEV_POWER_UP) {
- dev_dbg(&dev->pdev->dev, "FW not ready.\n");
- mei_reset(dev, 1);
- mutex_unlock(&dev->device_lock);
- return IRQ_HANDLED;
+ if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
+ dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n");
+ schedule_work(&dev->reset_work);
+ goto end;
}
/* check if we need to start the dev */
@@ -500,15 +495,12 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
dev->recvd_hw_ready = true;
wake_up_interruptible(&dev->wait_hw_ready);
-
- mutex_unlock(&dev->device_lock);
- return IRQ_HANDLED;
} else {
+
dev_dbg(&dev->pdev->dev, "Reset Completed.\n");
mei_me_hw_reset_release(dev);
- mutex_unlock(&dev->device_lock);
- return IRQ_HANDLED;
}
+ goto end;
}
/* check slots available for reading */
slots = mei_count_full_read_slots(dev);
@@ -516,21 +508,23 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/* we have urgent data to send so break the read */
if (dev->wr_ext_msg.hdr.length)
break;
- dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots);
- dev_dbg(&dev->pdev->dev, "call mei_irq_read_handler.\n");
+ dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots);
rets = mei_irq_read_handler(dev, &complete_list, &slots);
- if (rets)
+ if (rets && dev->dev_state != MEI_DEV_RESETTING) {
+ schedule_work(&dev->reset_work);
goto end;
+ }
}
+
rets = mei_irq_write_handler(dev, &complete_list);
-end:
- dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
- dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
- mutex_unlock(&dev->device_lock);
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
mei_irq_compl_handler(dev, &complete_list);
+end:
+ dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets);
+ mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
}
static const struct mei_hw_ops mei_me_hw_ops = {
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index cb2f556b4252..dd44e33ad2b6 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -111,7 +111,8 @@ struct mei_msg_hdr {
u32 me_addr:8;
u32 host_addr:8;
u32 length:9;
- u32 reserved:6;
+ u32 reserved:5;
+ u32 internal:1;
u32 msg_complete:1;
} __packed;
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index f7f3abbe12b6..cdd31c2a2a2b 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -43,41 +43,119 @@ const char *mei_dev_state_str(int state)
#undef MEI_DEV_STATE
}
-void mei_device_init(struct mei_device *dev)
-{
- /* setup our list array */
- INIT_LIST_HEAD(&dev->file_list);
- INIT_LIST_HEAD(&dev->device_list);
- mutex_init(&dev->device_lock);
- init_waitqueue_head(&dev->wait_hw_ready);
- init_waitqueue_head(&dev->wait_recvd_msg);
- init_waitqueue_head(&dev->wait_stop_wd);
- dev->dev_state = MEI_DEV_INITIALIZING;
- mei_io_list_init(&dev->read_list);
- mei_io_list_init(&dev->write_list);
- mei_io_list_init(&dev->write_waiting_list);
- mei_io_list_init(&dev->ctrl_wr_list);
- mei_io_list_init(&dev->ctrl_rd_list);
+/**
+ * mei_cancel_work. Cancel mei background jobs
+ *
+ * @dev: the device structure
+ *
+ * returns 0 on success or < 0 if the reset hasn't succeeded
+ */
+void mei_cancel_work(struct mei_device *dev)
+{
+ cancel_work_sync(&dev->init_work);
+ cancel_work_sync(&dev->reset_work);
- INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
- INIT_WORK(&dev->init_work, mei_host_client_init);
+ cancel_delayed_work(&dev->timer_work);
+}
+EXPORT_SYMBOL_GPL(mei_cancel_work);
- INIT_LIST_HEAD(&dev->wd_cl.link);
- INIT_LIST_HEAD(&dev->iamthif_cl.link);
- mei_io_list_init(&dev->amthif_cmd_list);
- mei_io_list_init(&dev->amthif_rd_complete_list);
+/**
+ * mei_reset - resets host and fw.
+ *
+ * @dev: the device structure
+ */
+int mei_reset(struct mei_device *dev)
+{
+ enum mei_dev_state state = dev->dev_state;
+ bool interrupts_enabled;
+ int ret;
- bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
- dev->open_handle_count = 0;
+ if (state != MEI_DEV_INITIALIZING &&
+ state != MEI_DEV_DISABLED &&
+ state != MEI_DEV_POWER_DOWN &&
+ state != MEI_DEV_POWER_UP)
+ dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n",
+ mei_dev_state_str(state));
- /*
- * Reserving the first client ID
- * 0: Reserved for MEI Bus Message communications
+ /* we're already in reset, cancel the init timer
+ * if the reset was called due the hbm protocol error
+ * we need to call it before hw start
+ * so the hbm watchdog won't kick in
*/
- bitmap_set(dev->host_clients_map, 0, 1);
+ mei_hbm_idle(dev);
+
+ /* enter reset flow */
+ interrupts_enabled = state != MEI_DEV_POWER_DOWN;
+ dev->dev_state = MEI_DEV_RESETTING;
+
+ dev->reset_count++;
+ if (dev->reset_count > MEI_MAX_CONSEC_RESET) {
+ dev_err(&dev->pdev->dev, "reset: reached maximal consecutive resets: disabling the device\n");
+ dev->dev_state = MEI_DEV_DISABLED;
+ return -ENODEV;
+ }
+
+ ret = mei_hw_reset(dev, interrupts_enabled);
+ /* fall through and remove the sw state even if hw reset has failed */
+
+ /* no need to clean up software state in case of power up */
+ if (state != MEI_DEV_INITIALIZING &&
+ state != MEI_DEV_POWER_UP) {
+
+ /* remove all waiting requests */
+ mei_cl_all_write_clear(dev);
+
+ mei_cl_all_disconnect(dev);
+
+ /* wake up all readers and writers so they can be interrupted */
+ mei_cl_all_wakeup(dev);
+
+ /* remove entry if already in list */
+ dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
+ mei_cl_unlink(&dev->wd_cl);
+ mei_cl_unlink(&dev->iamthif_cl);
+ mei_amthif_reset_params(dev);
+ memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
+ }
+
+
+ dev->me_clients_num = 0;
+ dev->rd_msg_hdr = 0;
+ dev->wd_pending = false;
+
+ if (ret) {
+ dev_err(&dev->pdev->dev, "hw_reset failed ret = %d\n", ret);
+ dev->dev_state = MEI_DEV_DISABLED;
+ return ret;
+ }
+
+ if (state == MEI_DEV_POWER_DOWN) {
+ dev_dbg(&dev->pdev->dev, "powering down: end of reset\n");
+ dev->dev_state = MEI_DEV_DISABLED;
+ return 0;
+ }
+
+ ret = mei_hw_start(dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "hw_start failed ret = %d\n", ret);
+ dev->dev_state = MEI_DEV_DISABLED;
+ return ret;
+ }
+
+ dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
+
+ dev->dev_state = MEI_DEV_INIT_CLIENTS;
+ ret = mei_hbm_start_req(dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "hbm_start failed ret = %d\n", ret);
+ dev->dev_state = MEI_DEV_DISABLED;
+ return ret;
+ }
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(mei_device_init);
+EXPORT_SYMBOL_GPL(mei_reset);
/**
* mei_start - initializes host and fw to start work.
@@ -90,14 +168,21 @@ int mei_start(struct mei_device *dev)
{
mutex_lock(&dev->device_lock);
- /* acknowledge interrupt and stop interupts */
+ /* acknowledge interrupt and stop interrupts */
mei_clear_interrupts(dev);
mei_hw_config(dev);
dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n");
- mei_reset(dev, 1);
+ dev->dev_state = MEI_DEV_INITIALIZING;
+ dev->reset_count = 0;
+ mei_reset(dev);
+
+ if (dev->dev_state == MEI_DEV_DISABLED) {
+ dev_err(&dev->pdev->dev, "reset failed");
+ goto err;
+ }
if (mei_hbm_start_wait(dev)) {
dev_err(&dev->pdev->dev, "HBM haven't started");
@@ -132,101 +217,64 @@ err:
EXPORT_SYMBOL_GPL(mei_start);
/**
- * mei_reset - resets host and fw.
+ * mei_restart - restart device after suspend
*
* @dev: the device structure
- * @interrupts_enabled: if interrupt should be enabled after reset.
+ *
+ * returns 0 on success or -ENODEV if the restart hasn't succeeded
*/
-void mei_reset(struct mei_device *dev, int interrupts_enabled)
+int mei_restart(struct mei_device *dev)
{
- bool unexpected;
- int ret;
-
- unexpected = (dev->dev_state != MEI_DEV_INITIALIZING &&
- dev->dev_state != MEI_DEV_DISABLED &&
- dev->dev_state != MEI_DEV_POWER_DOWN &&
- dev->dev_state != MEI_DEV_POWER_UP);
+ int err;
- if (unexpected)
- dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n",
- mei_dev_state_str(dev->dev_state));
-
- ret = mei_hw_reset(dev, interrupts_enabled);
- if (ret) {
- dev_err(&dev->pdev->dev, "hw reset failed disabling the device\n");
- interrupts_enabled = false;
- dev->dev_state = MEI_DEV_DISABLED;
- }
-
- dev->hbm_state = MEI_HBM_IDLE;
+ mutex_lock(&dev->device_lock);
- if (dev->dev_state != MEI_DEV_INITIALIZING &&
- dev->dev_state != MEI_DEV_POWER_UP) {
- if (dev->dev_state != MEI_DEV_DISABLED &&
- dev->dev_state != MEI_DEV_POWER_DOWN)
- dev->dev_state = MEI_DEV_RESETTING;
+ mei_clear_interrupts(dev);
- /* remove all waiting requests */
- mei_cl_all_write_clear(dev);
+ dev->dev_state = MEI_DEV_POWER_UP;
+ dev->reset_count = 0;
- mei_cl_all_disconnect(dev);
+ err = mei_reset(dev);
- /* wake up all readings so they can be interrupted */
- mei_cl_all_wakeup(dev);
-
- /* remove entry if already in list */
- dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
- mei_cl_unlink(&dev->wd_cl);
- mei_cl_unlink(&dev->iamthif_cl);
- mei_amthif_reset_params(dev);
- memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
- }
+ mutex_unlock(&dev->device_lock);
- /* we're already in reset, cancel the init timer */
- dev->init_clients_timer = 0;
+ if (err || dev->dev_state == MEI_DEV_DISABLED)
+ return -ENODEV;
- dev->me_clients_num = 0;
- dev->rd_msg_hdr = 0;
- dev->wd_pending = false;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mei_restart);
- if (!interrupts_enabled) {
- dev_dbg(&dev->pdev->dev, "intr not enabled end of reset\n");
- return;
- }
- ret = mei_hw_start(dev);
- if (ret) {
- dev_err(&dev->pdev->dev, "hw_start failed disabling the device\n");
- dev->dev_state = MEI_DEV_DISABLED;
- return;
- }
+static void mei_reset_work(struct work_struct *work)
+{
+ struct mei_device *dev =
+ container_of(work, struct mei_device, reset_work);
- dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
- /* link is established * start sending messages. */
+ mutex_lock(&dev->device_lock);
- dev->dev_state = MEI_DEV_INIT_CLIENTS;
+ mei_reset(dev);
- mei_hbm_start_req(dev);
+ mutex_unlock(&dev->device_lock);
+ if (dev->dev_state == MEI_DEV_DISABLED)
+ dev_err(&dev->pdev->dev, "reset failed");
}
-EXPORT_SYMBOL_GPL(mei_reset);
void mei_stop(struct mei_device *dev)
{
dev_dbg(&dev->pdev->dev, "stopping the device.\n");
- flush_scheduled_work();
+ mei_cancel_work(dev);
- mutex_lock(&dev->device_lock);
+ mei_nfc_host_exit(dev);
- cancel_delayed_work(&dev->timer_work);
+ mutex_lock(&dev->device_lock);
mei_wd_stop(dev);
- mei_nfc_host_exit();
-
dev->dev_state = MEI_DEV_POWER_DOWN;
- mei_reset(dev, 0);
+ mei_reset(dev);
mutex_unlock(&dev->device_lock);
@@ -236,3 +284,41 @@ EXPORT_SYMBOL_GPL(mei_stop);
+void mei_device_init(struct mei_device *dev)
+{
+ /* setup our list array */
+ INIT_LIST_HEAD(&dev->file_list);
+ INIT_LIST_HEAD(&dev->device_list);
+ mutex_init(&dev->device_lock);
+ init_waitqueue_head(&dev->wait_hw_ready);
+ init_waitqueue_head(&dev->wait_recvd_msg);
+ init_waitqueue_head(&dev->wait_stop_wd);
+ dev->dev_state = MEI_DEV_INITIALIZING;
+ dev->reset_count = 0;
+
+ mei_io_list_init(&dev->read_list);
+ mei_io_list_init(&dev->write_list);
+ mei_io_list_init(&dev->write_waiting_list);
+ mei_io_list_init(&dev->ctrl_wr_list);
+ mei_io_list_init(&dev->ctrl_rd_list);
+
+ INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
+ INIT_WORK(&dev->init_work, mei_host_client_init);
+ INIT_WORK(&dev->reset_work, mei_reset_work);
+
+ INIT_LIST_HEAD(&dev->wd_cl.link);
+ INIT_LIST_HEAD(&dev->iamthif_cl.link);
+ mei_io_list_init(&dev->amthif_cmd_list);
+ mei_io_list_init(&dev->amthif_rd_complete_list);
+
+ bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
+ dev->open_handle_count = 0;
+
+ /*
+ * Reserving the first client ID
+ * 0: Reserved for MEI Bus Message communications
+ */
+ bitmap_set(dev->host_clients_map, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mei_device_init);
+
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 7a95c07e59a6..f0fbb5179f80 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -31,7 +31,7 @@
/**
- * mei_irq_compl_handler - dispatch complete handelers
+ * mei_irq_compl_handler - dispatch complete handlers
* for the completed callbacks
*
* @dev - mei device
@@ -301,13 +301,11 @@ int mei_irq_read_handler(struct mei_device *dev,
struct mei_cl_cb *cmpl_list, s32 *slots)
{
struct mei_msg_hdr *mei_hdr;
- struct mei_cl *cl_pos = NULL;
- struct mei_cl *cl_next = NULL;
- int ret = 0;
+ struct mei_cl *cl;
+ int ret;
if (!dev->rd_msg_hdr) {
dev->rd_msg_hdr = mei_read_hdr(dev);
- dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
(*slots)--;
dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
}
@@ -315,61 +313,67 @@ int mei_irq_read_handler(struct mei_device *dev,
dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
if (mei_hdr->reserved || !dev->rd_msg_hdr) {
- dev_dbg(&dev->pdev->dev, "corrupted message header.\n");
+ dev_err(&dev->pdev->dev, "corrupted message header 0x%08X\n",
+ dev->rd_msg_hdr);
ret = -EBADMSG;
goto end;
}
- if (mei_hdr->host_addr || mei_hdr->me_addr) {
- list_for_each_entry_safe(cl_pos, cl_next,
- &dev->file_list, link) {
- dev_dbg(&dev->pdev->dev,
- "list_for_each_entry_safe read host"
- " client = %d, ME client = %d\n",
- cl_pos->host_client_id,
- cl_pos->me_client_id);
- if (mei_cl_hbm_equal(cl_pos, mei_hdr))
- break;
- }
-
- if (&cl_pos->link == &dev->file_list) {
- dev_dbg(&dev->pdev->dev, "corrupted message header\n");
- ret = -EBADMSG;
- goto end;
- }
- }
- if (((*slots) * sizeof(u32)) < mei_hdr->length) {
- dev_err(&dev->pdev->dev,
- "we can't read the message slots =%08x.\n",
+ if (mei_slots2data(*slots) < mei_hdr->length) {
+ dev_err(&dev->pdev->dev, "less data available than length=%08x.\n",
*slots);
/* we can't read the message */
ret = -ERANGE;
goto end;
}
- /* decide where to read the message too */
- if (!mei_hdr->host_addr) {
- dev_dbg(&dev->pdev->dev, "call mei_hbm_dispatch.\n");
- mei_hbm_dispatch(dev, mei_hdr);
- dev_dbg(&dev->pdev->dev, "end mei_hbm_dispatch.\n");
- } else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
- (MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
- (dev->iamthif_state == MEI_IAMTHIF_READING)) {
+ /* HBM message */
+ if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) {
+ ret = mei_hbm_dispatch(dev, mei_hdr);
+ if (ret) {
+ dev_dbg(&dev->pdev->dev, "mei_hbm_dispatch failed ret = %d\n",
+ ret);
+ goto end;
+ }
+ goto reset_slots;
+ }
+
+ /* find recipient cl */
+ list_for_each_entry(cl, &dev->file_list, link) {
+ if (mei_cl_hbm_equal(cl, mei_hdr)) {
+ cl_dbg(dev, cl, "got a message\n");
+ break;
+ }
+ }
+
+ /* if no recipient cl was found we assume corrupted header */
+ if (&cl->link == &dev->file_list) {
+ dev_err(&dev->pdev->dev, "no destination client found 0x%08X\n",
+ dev->rd_msg_hdr);
+ ret = -EBADMSG;
+ goto end;
+ }
- dev_dbg(&dev->pdev->dev, "call mei_amthif_irq_read_msg.\n");
- dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
+ if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
+ MEI_FILE_CONNECTED == dev->iamthif_cl.state &&
+ dev->iamthif_state == MEI_IAMTHIF_READING) {
ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list);
- if (ret)
+ if (ret) {
+ dev_err(&dev->pdev->dev, "mei_amthif_irq_read_msg failed = %d\n",
+ ret);
goto end;
+ }
} else {
- dev_dbg(&dev->pdev->dev, "call mei_cl_irq_read_msg.\n");
- dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list);
- if (ret)
+ if (ret) {
+ dev_err(&dev->pdev->dev, "mei_cl_irq_read_msg failed = %d\n",
+ ret);
goto end;
+ }
}
+reset_slots:
/* reset the number of slots and header */
*slots = mei_count_full_read_slots(dev);
dev->rd_msg_hdr = 0;
@@ -533,7 +537,6 @@ EXPORT_SYMBOL_GPL(mei_irq_write_handler);
*
* @work: pointer to the work_struct structure
*
- * NOTE: This function is called by timer interrupt work
*/
void mei_timer(struct work_struct *work)
{
@@ -548,24 +551,30 @@ void mei_timer(struct work_struct *work)
mutex_lock(&dev->device_lock);
- if (dev->dev_state != MEI_DEV_ENABLED) {
- if (dev->dev_state == MEI_DEV_INIT_CLIENTS) {
- if (dev->init_clients_timer) {
- if (--dev->init_clients_timer == 0) {
- dev_err(&dev->pdev->dev, "reset: init clients timeout hbm_state = %d.\n",
- dev->hbm_state);
- mei_reset(dev, 1);
- }
+
+ /* Catch interrupt stalls during HBM init handshake */
+ if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
+ dev->hbm_state != MEI_HBM_IDLE) {
+
+ if (dev->init_clients_timer) {
+ if (--dev->init_clients_timer == 0) {
+ dev_err(&dev->pdev->dev, "timer: init clients timeout hbm_state = %d.\n",
+ dev->hbm_state);
+ mei_reset(dev);
+ goto out;
}
}
- goto out;
}
+
+ if (dev->dev_state != MEI_DEV_ENABLED)
+ goto out;
+
/*** connect/disconnect timeouts ***/
list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
if (cl_pos->timer_count) {
if (--cl_pos->timer_count == 0) {
- dev_err(&dev->pdev->dev, "reset: connect/disconnect timeout.\n");
- mei_reset(dev, 1);
+ dev_err(&dev->pdev->dev, "timer: connect/disconnect timeout.\n");
+ mei_reset(dev);
goto out;
}
}
@@ -573,8 +582,8 @@ void mei_timer(struct work_struct *work)
if (dev->iamthif_stall_timer) {
if (--dev->iamthif_stall_timer == 0) {
- dev_err(&dev->pdev->dev, "reset: amthif hanged.\n");
- mei_reset(dev, 1);
+ dev_err(&dev->pdev->dev, "timer: amthif hanged.\n");
+ mei_reset(dev);
dev->iamthif_msg_buf_size = 0;
dev->iamthif_msg_buf_index = 0;
dev->iamthif_canceled = false;
@@ -627,7 +636,8 @@ void mei_timer(struct work_struct *work)
}
}
out:
- schedule_delayed_work(&dev->timer_work, 2 * HZ);
+ if (dev->dev_state != MEI_DEV_DISABLED)
+ schedule_delayed_work(&dev->timer_work, 2 * HZ);
mutex_unlock(&dev->device_lock);
}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 9661a812f550..5424f8ff3f7f 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -48,7 +48,7 @@
*
* @inode: pointer to inode structure
* @file: pointer to file structure
- e
+ *
* returns 0 on success, <0 on error
*/
static int mei_open(struct inode *inode, struct file *file)
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 406f68e05b4e..f7de95b4cdd9 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -61,11 +61,16 @@ extern const uuid_le mei_wd_guid;
#define MEI_CLIENTS_MAX 256
/*
+ * maximum number of consecutive resets
+ */
+#define MEI_MAX_CONSEC_RESET 3
+
+/*
* Number of File descriptors/handles
* that can be opened to the driver.
*
* Limit to 255: 256 Total Clients
- * minus internal client for MEI Bus Messags
+ * minus internal client for MEI Bus Messages
*/
#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1)
@@ -178,9 +183,10 @@ struct mei_cl_cb {
unsigned long buf_idx;
unsigned long read_time;
struct file *file_object;
+ u32 internal:1;
};
-/* MEI client instance carried as file->pirvate_data*/
+/* MEI client instance carried as file->private_data*/
struct mei_cl {
struct list_head link;
struct mei_device *dev;
@@ -326,6 +332,7 @@ struct mei_cl_device {
/**
* struct mei_device - MEI private device struct
+ * @reset_count - limits the number of consecutive resets
* @hbm_state - state of host bus message protocol
* @mem_addr - mem mapped base register address
@@ -369,6 +376,7 @@ struct mei_device {
/*
* mei device states
*/
+ unsigned long reset_count;
enum mei_dev_state dev_state;
enum mei_hbm_state hbm_state;
u16 init_clients_timer;
@@ -427,6 +435,7 @@ struct mei_device {
bool iamthif_canceled;
struct work_struct init_work;
+ struct work_struct reset_work;
/* List of bus devices */
struct list_head device_list;
@@ -456,13 +465,25 @@ static inline u32 mei_data2slots(size_t length)
return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
}
+/**
+ * mei_slots2data- get data in slots - bytes from slots
+ * @slots - number of available slots
+ * returns - number of bytes in slots
+ */
+static inline u32 mei_slots2data(int slots)
+{
+ return slots * 4;
+}
+
/*
* mei init function prototypes
*/
void mei_device_init(struct mei_device *dev);
-void mei_reset(struct mei_device *dev, int interrupts);
+int mei_reset(struct mei_device *dev);
int mei_start(struct mei_device *dev);
+int mei_restart(struct mei_device *dev);
void mei_stop(struct mei_device *dev);
+void mei_cancel_work(struct mei_device *dev);
/*
* MEI interrupt functions prototype
@@ -510,7 +531,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
* NFC functions
*/
int mei_nfc_host_init(struct mei_device *dev);
-void mei_nfc_host_exit(void);
+void mei_nfc_host_exit(struct mei_device *dev);
/*
* NFC Client UUID
@@ -626,9 +647,9 @@ static inline void mei_dbgfs_deregister(struct mei_device *dev) {}
int mei_register(struct mei_device *dev);
void mei_deregister(struct mei_device *dev);
-#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d comp=%1d"
+#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d internal=%1d comp=%1d"
#define MEI_HDR_PRM(hdr) \
(hdr)->host_addr, (hdr)->me_addr, \
- (hdr)->length, (hdr)->msg_complete
+ (hdr)->length, (hdr)->internal, (hdr)->msg_complete
#endif
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index 04087d1369d9..a58320c0c049 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -92,7 +92,7 @@ struct mei_nfc_hci_hdr {
* @cl: NFC host client
* @cl_info: NFC info host client
* @init_work: perform connection to the info client
- * @fw_ivn: NFC Intervace Version Number
+ * @fw_ivn: NFC Interface Version Number
* @vendor_id: NFC manufacturer ID
* @radio_type: NFC radio type
*/
@@ -163,7 +163,7 @@ static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
return 0;
default:
- dev_err(&dev->pdev->dev, "Unknow radio type 0x%x\n",
+ dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n",
ndev->radio_type);
return -EINVAL;
@@ -175,14 +175,14 @@ static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
ndev->bus_name = "pn544";
return 0;
default:
- dev_err(&dev->pdev->dev, "Unknow radio type 0x%x\n",
+ dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n",
ndev->radio_type);
return -EINVAL;
}
default:
- dev_err(&dev->pdev->dev, "Unknow vendor ID 0x%x\n",
+ dev_err(&dev->pdev->dev, "Unknown vendor ID 0x%x\n",
ndev->vendor_id);
return -EINVAL;
@@ -469,7 +469,9 @@ static void mei_nfc_init(struct work_struct *work)
return;
err:
+ mutex_lock(&dev->device_lock);
mei_nfc_free(ndev);
+ mutex_unlock(&dev->device_lock);
return;
}
@@ -481,7 +483,7 @@ int mei_nfc_host_init(struct mei_device *dev)
struct mei_cl *cl_info, *cl = NULL;
int i, ret;
- /* already initialzed */
+ /* already initialized */
if (ndev->cl_info)
return 0;
@@ -547,12 +549,16 @@ err:
return ret;
}
-void mei_nfc_host_exit(void)
+void mei_nfc_host_exit(struct mei_device *dev)
{
struct mei_nfc_dev *ndev = &nfc_dev;
+ cancel_work_sync(&ndev->init_work);
+
+ mutex_lock(&dev->device_lock);
if (ndev->cl && ndev->cl->device)
mei_cl_remove_device(ndev->cl->device);
mei_nfc_free(ndev);
+ mutex_unlock(&dev->device_lock);
}
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 2cab3c0a6805..ddadd08956f4 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -144,6 +144,21 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "failed to get pci regions.\n");
goto disable_device;
}
+
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ }
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
+ goto release_regions;
+ }
+
+
/* allocates and initializes the mei dev structure */
dev = mei_me_dev_init(pdev);
if (!dev) {
@@ -197,8 +212,8 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
release_irq:
+ mei_cancel_work(dev);
mei_disable_interrupts(dev);
- flush_scheduled_work();
free_irq(pdev->irq, dev);
disable_msi:
pci_disable_msi(pdev);
@@ -306,16 +321,14 @@ static int mei_me_pci_resume(struct device *device)
return err;
}
- mutex_lock(&dev->device_lock);
- dev->dev_state = MEI_DEV_POWER_UP;
- mei_clear_interrupts(dev);
- mei_reset(dev, 1);
- mutex_unlock(&dev->device_lock);
+ err = mei_restart(dev);
+ if (err)
+ return err;
/* Start timer if stopped in suspend */
schedule_delayed_work(&dev->timer_work, HZ);
- return err;
+ return 0;
}
static SIMPLE_DEV_PM_OPS(mei_me_pm_ops, mei_me_pci_suspend, mei_me_pci_resume);
#define MEI_ME_PM_OPS (&mei_me_pm_ops)
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 9e354216c163..f70945ed96f6 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -115,6 +115,7 @@ int mei_wd_send(struct mei_device *dev)
hdr.me_addr = dev->wd_cl.me_client_id;
hdr.msg_complete = 1;
hdr.reserved = 0;
+ hdr.internal = 0;
if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE))
hdr.length = MEI_WD_START_MSG_SIZE;
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h
index 3574cc375bb9..1a6edce2ecde 100644
--- a/drivers/misc/mic/host/mic_device.h
+++ b/drivers/misc/mic/host/mic_device.h
@@ -112,7 +112,7 @@ struct mic_device {
struct work_struct shutdown_work;
u8 state;
u8 shutdown_status;
- struct sysfs_dirent *state_sysfs;
+ struct kernfs_node *state_sysfs;
struct completion reset_wait;
void *log_buf_addr;
int *log_buf_len;
@@ -134,6 +134,8 @@ struct mic_device {
* @send_intr: Send an interrupt for a particular doorbell on the card.
* @ack_interrupt: Hardware specific operations to ack the h/w on
* receipt of an interrupt.
+ * @intr_workarounds: Hardware specific workarounds needed after
+ * handling an interrupt.
* @reset: Reset the remote processor.
* @reset_fw_ready: Reset firmware ready field.
* @is_fw_ready: Check if firmware is ready for OS download.
@@ -149,6 +151,7 @@ struct mic_hw_ops {
void (*write_spad)(struct mic_device *mdev, unsigned int idx, u32 val);
void (*send_intr)(struct mic_device *mdev, int doorbell);
u32 (*ack_interrupt)(struct mic_device *mdev);
+ void (*intr_workarounds)(struct mic_device *mdev);
void (*reset)(struct mic_device *mdev);
void (*reset_fw_ready)(struct mic_device *mdev);
bool (*is_fw_ready)(struct mic_device *mdev);
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c
index ad838c7651c4..c04a021e20c7 100644
--- a/drivers/misc/mic/host/mic_main.c
+++ b/drivers/misc/mic/host/mic_main.c
@@ -115,7 +115,7 @@ static irqreturn_t mic_shutdown_db(int irq, void *data)
struct mic_device *mdev = data;
struct mic_bootparam *bootparam = mdev->dp;
- mdev->ops->ack_interrupt(mdev);
+ mdev->ops->intr_workarounds(mdev);
switch (bootparam->shutdown_status) {
case MIC_HALTED:
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
index e04bb4fe6823..7e1ef0ebbb80 100644
--- a/drivers/misc/mic/host/mic_virtio.c
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -156,7 +156,8 @@ static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
static int _mic_virtio_copy(struct mic_vdev *mvdev,
struct mic_copy_desc *copy)
{
- int ret = 0, iovcnt = copy->iovcnt;
+ int ret = 0;
+ u32 iovcnt = copy->iovcnt;
struct iovec iov;
struct iovec __user *u_iov = copy->iov;
void __user *ubuf = NULL;
@@ -369,7 +370,7 @@ static irqreturn_t mic_virtio_intr_handler(int irq, void *data)
struct mic_vdev *mvdev = data;
struct mic_device *mdev = mvdev->mdev;
- mdev->ops->ack_interrupt(mdev);
+ mdev->ops->intr_workarounds(mdev);
schedule_work(&mvdev->virtio_bh_work);
return IRQ_HANDLED;
}
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index 0dfa8a81436e..5562fdd3ef4e 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -174,35 +174,38 @@ static void mic_x100_send_intr(struct mic_device *mdev, int doorbell)
}
/**
- * mic_ack_interrupt - Device specific interrupt handling.
- * @mdev: pointer to mic_device instance
+ * mic_x100_ack_interrupt - Read the interrupt sources register and
+ * clear it. This function will be called in the MSI/INTx case.
+ * @mdev: Pointer to mic_device instance.
*
- * Returns: bitmask of doorbell events triggered.
+ * Returns: bitmask of interrupt sources triggered.
*/
static u32 mic_x100_ack_interrupt(struct mic_device *mdev)
{
- u32 reg = 0;
- struct mic_mw *mw = &mdev->mmio;
u32 sicr0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICR0;
+ u32 reg = mic_mmio_read(&mdev->mmio, sicr0);
+ mic_mmio_write(&mdev->mmio, reg, sicr0);
+ return reg;
+}
+
+/**
+ * mic_x100_intr_workarounds - These hardware specific workarounds are
+ * to be invoked everytime an interrupt is handled.
+ * @mdev: Pointer to mic_device instance.
+ *
+ * Returns: none
+ */
+static void mic_x100_intr_workarounds(struct mic_device *mdev)
+{
+ struct mic_mw *mw = &mdev->mmio;
/* Clear pending bit array. */
if (MIC_A0_STEP == mdev->stepping)
mic_mmio_write(mw, 1, MIC_X100_SBOX_BASE_ADDRESS +
MIC_X100_SBOX_MSIXPBACR);
- if (mdev->irq_info.num_vectors <= 1) {
- reg = mic_mmio_read(mw, sicr0);
-
- if (unlikely(!reg))
- goto done;
-
- mic_mmio_write(mw, reg, sicr0);
- }
-
if (mdev->stepping >= MIC_B0_STEP)
mdev->intr_ops->enable_interrupts(mdev);
-done:
- return reg;
}
/**
@@ -553,6 +556,7 @@ struct mic_hw_ops mic_x100_ops = {
.write_spad = mic_x100_write_spad,
.send_intr = mic_x100_send_intr,
.ack_interrupt = mic_x100_ack_interrupt,
+ .intr_workarounds = mic_x100_intr_workarounds,
.reset = mic_x100_hw_reset,
.reset_fw_ready = mic_x100_reset_fw_ready,
.is_fw_ready = mic_x100_is_fw_ready,
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index 9b2062d17327..2bef3f76032a 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -139,8 +139,11 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
ubuf += sizeof(hdr);
ubufcch = ubuf;
- if (gru_user_copy_handle(&ubuf, cch))
- goto fail;
+ if (gru_user_copy_handle(&ubuf, cch)) {
+ if (cch_locked)
+ unlock_cch_handle(cch);
+ return -EFAULT;
+ }
if (cch_locked)
ubufcch->delresp = 0;
bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
@@ -179,10 +182,6 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
ret = -EFAULT;
return ret ? ret : bytes;
-
-fail:
- unlock_cch_handle(cch);
- return -EFAULT;
}
int gru_dump_chiplet_request(unsigned long arg)
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 652593fc486d..128d5615c804 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -828,6 +828,7 @@ enum xp_retval
xpc_allocate_msg_wait(struct xpc_channel *ch)
{
enum xp_retval ret;
+ DEFINE_WAIT(wait);
if (ch->flags & XPC_C_DISCONNECTING) {
DBUG_ON(ch->reason == xpInterrupted);
@@ -835,7 +836,9 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
}
atomic_inc(&ch->n_on_msg_allocate_wq);
- ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
+ prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
+ ret = schedule_timeout(1);
+ finish_wait(&ch->msg_allocate_wq, &wait);
atomic_dec(&ch->n_on_msg_allocate_wq);
if (ch->flags & XPC_C_DISCONNECTING) {
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 8d64b681dd93..3aed525e55b4 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -812,7 +812,7 @@ static void st_tty_flush_buffer(struct tty_struct *tty)
kfree_skb(st_gdata->tx_skb);
st_gdata->tx_skb = NULL;
- tty->ops->flush_buffer(tty);
+ tty_driver_flush_buffer(tty);
return;
}
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 96853a09788a..9d3dbb28734b 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -531,7 +531,6 @@ long st_kim_stop(void *kim_data)
/* Flush any pending characters in the driver and discipline. */
tty_ldisc_flush(tty);
tty_driver_flush_buffer(tty);
- tty->ops->flush_buffer(tty);
}
/* send uninstall notification to UIM */
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index c98b03b99353..d35cda06b5e8 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -165,7 +165,7 @@ static void vmci_guest_cid_update(u32 sub_id,
* true if required hypercalls (or fallback hypercalls) are
* supported by the host, false otherwise.
*/
-static bool vmci_check_host_caps(struct pci_dev *pdev)
+static int vmci_check_host_caps(struct pci_dev *pdev)
{
bool result;
struct vmci_resource_query_msg *msg;
@@ -176,7 +176,7 @@ static bool vmci_check_host_caps(struct pci_dev *pdev)
check_msg = kmalloc(msg_size, GFP_KERNEL);
if (!check_msg) {
dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
- return false;
+ return -ENOMEM;
}
check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
@@ -196,7 +196,7 @@ static bool vmci_check_host_caps(struct pci_dev *pdev)
__func__, result ? "PASSED" : "FAILED");
/* We need the vector. There are no fallbacks. */
- return result;
+ return result ? 0 : -ENXIO;
}
/*
@@ -564,12 +564,14 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
dev_warn(&pdev->dev,
"VMCI device unable to register notification bitmap with PPN 0x%x\n",
(u32) bitmap_ppn);
+ error = -ENXIO;
goto err_remove_vmci_dev_g;
}
}
/* Check host capabilities. */
- if (!vmci_check_host_caps(pdev))
+ error = vmci_check_host_caps(pdev);
+ if (error)
goto err_remove_bitmap;
/* Enable device. */
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 29d5d988a51c..7b5424f398ac 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1959,6 +1959,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = md->queue.card;
struct mmc_host *host = card->host;
unsigned long flags;
+ unsigned int cmd_flags = req ? req->cmd_flags : 0;
if (req && !mq->mqrq_prev->req)
/* claim host only for the first request */
@@ -1974,7 +1975,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
}
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
- if (req && req->cmd_flags & REQ_DISCARD) {
+ if (cmd_flags & REQ_DISCARD) {
/* complete ongoing async transfer before issuing discard */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
@@ -1983,7 +1984,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
ret = mmc_blk_issue_secdiscard_rq(mq, req);
else
ret = mmc_blk_issue_discard_rq(mq, req);
- } else if (req && req->cmd_flags & REQ_FLUSH) {
+ } else if (cmd_flags & REQ_FLUSH) {
/* complete ongoing async transfer before issuing flush */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
@@ -1999,7 +2000,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
out:
if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
- (req && (req->cmd_flags & MMC_REQ_SPECIAL_MASK)))
+ (cmd_flags & MMC_REQ_SPECIAL_MASK))
/*
* Release host when there are no more requests
* and after special request(discard, flush) is done.
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 57a2b403bf8e..098374b1ab2b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2460,7 +2460,8 @@ void mmc_rescan(struct work_struct *work)
*/
mmc_bus_put(host);
- if (host->ops->get_cd && host->ops->get_cd(host) == 0) {
+ if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd &&
+ host->ops->get_cd(host) == 0) {
mmc_claim_host(host);
mmc_power_off(host);
mmc_release_host(host);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index f631f5a9bf79..98e9eb0f6643 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1119,14 +1119,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*/
if (mmc_card_highspeed(card)) {
if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
- && ((host->caps & (MMC_CAP_1_8V_DDR |
- MMC_CAP_UHS_DDR50))
- == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)))
+ && (host->caps & MMC_CAP_1_8V_DDR))
ddr = MMC_1_8V_DDR_MODE;
else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
- && ((host->caps & (MMC_CAP_1_2V_DDR |
- MMC_CAP_UHS_DDR50))
- == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)))
+ && (host->caps & MMC_CAP_1_2V_DDR))
ddr = MMC_1_2V_DDR_MODE;
}
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
index 06ee1aeaacec..6c36fccaa1ec 100644
--- a/drivers/mmc/core/quirks.c
+++ b/drivers/mmc/core/quirks.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_ids.h>
#ifndef SDIO_VENDOR_ID_TI
#define SDIO_VENDOR_ID_TI 0x0097
@@ -30,6 +31,10 @@
#define SDIO_DEVICE_ID_STE_CW1200 0x2280
#endif
+#ifndef SDIO_DEVICE_ID_MARVELL_8797_F0
+#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128
+#endif
+
/*
* This hook just adds a quirk for all sdio devices
*/
@@ -58,6 +63,9 @@ static const struct mmc_fixup mmc_fixup_methods[] = {
SDIO_FIXUP(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200,
add_quirk, MMC_QUIRK_BROKEN_BYTE_MODE_512),
+ SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_F0,
+ add_quirk, MMC_QUIRK_BROKEN_IRQ_POLLING),
+
END_FIXUP
};
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 6f42050b7ccc..692fdb177294 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -11,6 +11,7 @@
*/
#include <linux/err.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/pm_runtime.h>
@@ -45,6 +46,13 @@ static const unsigned int tacc_mant[] = {
35, 40, 45, 50, 55, 60, 70, 80,
};
+static const unsigned int sd_au_size[] = {
+ 0, SZ_16K / 512, SZ_32K / 512, SZ_64K / 512,
+ SZ_128K / 512, SZ_256K / 512, SZ_512K / 512, SZ_1M / 512,
+ SZ_2M / 512, SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
+ SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
+};
+
#define UNSTUFF_BITS(resp,start,size) \
({ \
const int __size = size; \
@@ -216,7 +224,7 @@ static int mmc_decode_scr(struct mmc_card *card)
static int mmc_read_ssr(struct mmc_card *card)
{
unsigned int au, es, et, eo;
- int err, i, max_au;
+ int err, i;
u32 *ssr;
if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
@@ -240,26 +248,25 @@ static int mmc_read_ssr(struct mmc_card *card)
for (i = 0; i < 16; i++)
ssr[i] = be32_to_cpu(ssr[i]);
- /* SD3.0 increases max AU size to 64MB (0xF) from 4MB (0x9) */
- max_au = card->scr.sda_spec3 ? 0xF : 0x9;
-
/*
* UNSTUFF_BITS only works with four u32s so we have to offset the
* bitfield positions accordingly.
*/
au = UNSTUFF_BITS(ssr, 428 - 384, 4);
- if (au > 0 && au <= max_au) {
- card->ssr.au = 1 << (au + 4);
- es = UNSTUFF_BITS(ssr, 408 - 384, 16);
- et = UNSTUFF_BITS(ssr, 402 - 384, 6);
- eo = UNSTUFF_BITS(ssr, 400 - 384, 2);
- if (es && et) {
- card->ssr.erase_timeout = (et * 1000) / es;
- card->ssr.erase_offset = eo * 1000;
+ if (au) {
+ if (au <= 9 || card->scr.sda_spec3) {
+ card->ssr.au = sd_au_size[au];
+ es = UNSTUFF_BITS(ssr, 408 - 384, 16);
+ et = UNSTUFF_BITS(ssr, 402 - 384, 6);
+ if (es && et) {
+ eo = UNSTUFF_BITS(ssr, 400 - 384, 2);
+ card->ssr.erase_timeout = (et * 1000) / es;
+ card->ssr.erase_offset = eo * 1000;
+ }
+ } else {
+ pr_warning("%s: SD Status: Invalid Allocation Unit size.\n",
+ mmc_hostname(card->host));
}
- } else {
- pr_warning("%s: SD Status: Invalid Allocation Unit "
- "size.\n", mmc_hostname(card->host));
}
out:
kfree(ssr);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 157b570ba343..92d1ba8e8153 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -308,7 +308,7 @@ static void sdio_acpi_set_handle(struct sdio_func *func)
struct mmc_host *host = func->card->host;
u64 addr = (host->slotno << 16) | func->num;
- acpi_preset_companion(&func->dev, ACPI_HANDLE(host->parent), addr);
+ acpi_preset_companion(&func->dev, ACPI_COMPANION(host->parent), addr);
}
#else
static inline void sdio_acpi_set_handle(struct sdio_func *func) {}
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 3d8ceb4084de..aaa90460ed23 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -53,6 +53,17 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
return ret;
}
+ if (pending && mmc_card_broken_irq_polling(card) &&
+ !(host->caps & MMC_CAP_SDIO_IRQ)) {
+ unsigned char dummy;
+
+ /* A fake interrupt could be created when we poll SDIO_CCCR_INTx
+ * register with a Marvell SD8797 card. A dummy CMD52 read to
+ * function 0 register 0xff can avoid this.
+ */
+ mmc_io_rw_direct(card, 0, 0, 0xff, 0, &dummy);
+ }
+
count = 0;
for (i = 1; i <= 7; i++) {
if (pending & (1 << i)) {
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 7fc5099e44b2..1384f67abe21 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -104,6 +104,18 @@ config MMC_SDHCI_PLTFM
If unsure, say N.
+config MMC_SDHCI_OF_ARASAN
+ tristate "SDHCI OF support for the Arasan SDHCI controllers"
+ depends on MMC_SDHCI_PLTFM
+ depends on OF
+ help
+ This selects the Arasan Secure Digital Host Controller Interface
+ (SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_OF_ESDHC
tristate "SDHCI OF support for the Freescale eSDHC controller"
depends on MMC_SDHCI_PLTFM
@@ -324,7 +336,7 @@ config MMC_ATMELMCI
config MMC_MSM
tristate "Qualcomm SDCC Controller Support"
- depends on MMC && ARCH_MSM
+ depends on MMC && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50)
help
This provides support for the SD/MMC cell found in the
MSM and QSD SOCs from Qualcomm. The controller also has
@@ -479,7 +491,8 @@ config MMC_TMIO
config MMC_SDHI
tristate "SH-Mobile SDHI SD/SDIO controller support"
- depends on SUPERH || ARCH_SHMOBILE
+ depends on SUPERH || ARM
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
select MMC_TMIO_CORE
help
This provides support for the SDHI SD/SDIO controller found in
@@ -575,6 +588,16 @@ config MMC_DW_SOCFPGA
This selects support for Altera SoCFPGA specific extensions to the
Synopsys DesignWare Memory Card Interface driver.
+config MMC_DW_K3
+ tristate "K3 specific extensions for Synopsys DW Memory Card Interface"
+ depends on MMC_DW
+ select MMC_DW_PLTFM
+ select MMC_DW_IDMAC
+ help
+ This selects support for Hisilicon K3 SoC specific extensions to the
+ Synopsys DesignWare Memory Card Interface driver. Select this option
+ for platforms based on Hisilicon K3 SoC's.
+
config MMC_DW_PCI
tristate "Synopsys Designware MCI support on PCI bus"
depends on MMC_DW && PCI
@@ -588,7 +611,8 @@ config MMC_DW_PCI
config MMC_SH_MMCIF
tristate "SuperH Internal MMCIF support"
- depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
+ depends on MMC_BLOCK
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
help
This selects the MMC Host Interface controller (MMCIF).
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index c41d0c364509..3483b6b6b880 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
+obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-o2micro.o
obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
@@ -43,6 +44,7 @@ obj-$(CONFIG_MMC_DW) += dw_mmc.o
obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o
obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o
obj-$(CONFIG_MMC_DW_SOCFPGA) += dw_mmc-socfpga.o
+obj-$(CONFIG_MMC_DW_K3) += dw_mmc-k3.o
obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
@@ -57,6 +59,7 @@ obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
+obj-$(CONFIG_MMC_SDHCI_OF_ARASAN) += sdhci-of-arasan.o
obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 2cbb4516d353..42706ea0ba85 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1192,11 +1192,22 @@ static void atmci_start_request(struct atmel_mci *host,
iflags |= ATMCI_CMDRDY;
cmd = mrq->cmd;
cmdflags = atmci_prepare_command(slot->mmc, cmd);
- atmci_send_command(host, cmd, cmdflags);
+
+ /*
+ * DMA transfer should be started before sending the command to avoid
+ * unexpected errors especially for read operations in SDIO mode.
+ * Unfortunately, in PDC mode, command has to be sent before starting
+ * the transfer.
+ */
+ if (host->submit_data != &atmci_submit_data_dma)
+ atmci_send_command(host, cmd, cmdflags);
if (data)
host->submit_data(host, data);
+ if (host->submit_data == &atmci_submit_data_dma)
+ atmci_send_command(host, cmd, cmdflags);
+
if (mrq->stop) {
host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
@@ -1391,8 +1402,14 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
clk_unprepare(host->mck);
switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ break;
case MMC_POWER_UP:
set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
break;
default:
/*
@@ -2204,6 +2221,7 @@ static int __init atmci_init_slot(struct atmel_mci *host,
}
host->slot[id] = slot;
+ mmc_regulator_get_supply(mmc);
mmc_add_host(mmc);
if (gpio_is_valid(slot->detect_pin)) {
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
new file mode 100644
index 000000000000..f567c219cff4
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2013 Linaro Ltd.
+ * Copyright (c) 2013 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/dw_mmc.h>
+#include <linux/of_address.h>
+
+#include "dw_mmc.h"
+#include "dw_mmc-pltfm.h"
+
+static void dw_mci_k3_set_ios(struct dw_mci *host, struct mmc_ios *ios)
+{
+ int ret;
+
+ ret = clk_set_rate(host->ciu_clk, ios->clock);
+ if (ret)
+ dev_warn(host->dev, "failed to set rate %uHz\n", ios->clock);
+
+ host->bus_hz = clk_get_rate(host->ciu_clk);
+}
+
+static const struct dw_mci_drv_data k3_drv_data = {
+ .set_ios = dw_mci_k3_set_ios,
+};
+
+static const struct of_device_id dw_mci_k3_match[] = {
+ { .compatible = "hisilicon,hi4511-dw-mshc", .data = &k3_drv_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_mci_k3_match);
+
+static int dw_mci_k3_probe(struct platform_device *pdev)
+{
+ const struct dw_mci_drv_data *drv_data;
+ const struct of_device_id *match;
+
+ match = of_match_node(dw_mci_k3_match, pdev->dev.of_node);
+ drv_data = match->data;
+
+ return dw_mci_pltfm_register(pdev, drv_data);
+}
+
+static int dw_mci_k3_suspend(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+ int ret;
+
+ ret = dw_mci_suspend(host);
+ if (!ret)
+ clk_disable_unprepare(host->ciu_clk);
+
+ return ret;
+}
+
+static int dw_mci_k3_resume(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(host->ciu_clk);
+ if (ret) {
+ dev_err(host->dev, "failed to enable ciu clock\n");
+ return ret;
+ }
+
+ return dw_mci_resume(host);
+}
+
+static SIMPLE_DEV_PM_OPS(dw_mci_k3_pmops, dw_mci_k3_suspend, dw_mci_k3_resume);
+
+static struct platform_driver dw_mci_k3_pltfm_driver = {
+ .probe = dw_mci_k3_probe,
+ .remove = dw_mci_pltfm_remove,
+ .driver = {
+ .name = "dwmmc_k3",
+ .of_match_table = dw_mci_k3_match,
+ .pm = &dw_mci_k3_pmops,
+ },
+};
+
+module_platform_driver(dw_mci_k3_pltfm_driver);
+
+MODULE_DESCRIPTION("K3 Specific DW-MSHC Driver Extension");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dwmmc-k3");
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 4bce0deec362..55cd110a49c4 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -36,6 +36,7 @@
#include <linux/workqueue.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/mmc/slot-gpio.h>
#include "dw_mmc.h"
@@ -1032,20 +1033,29 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
int present;
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci_board *brd = slot->host->pdata;
+ struct dw_mci *host = slot->host;
+ int gpio_cd = mmc_gpio_get_cd(mmc);
/* Use platform get_cd function, else try onboard card detect */
if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
present = 1;
else if (brd->get_cd)
present = !brd->get_cd(slot->id);
+ else if (!IS_ERR_VALUE(gpio_cd))
+ present = gpio_cd;
else
present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
== 0 ? 1 : 0;
- if (present)
+ spin_lock_bh(&host->lock);
+ if (present) {
+ set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
dev_dbg(&mmc->class_dev, "card is present\n");
- else
+ } else {
+ clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
dev_dbg(&mmc->class_dev, "card is not present\n");
+ }
+ spin_unlock_bh(&host->lock);
return present;
}
@@ -1926,10 +1936,6 @@ static void dw_mci_work_routine_card(struct work_struct *work)
/* Card change detected */
slot->last_detect_state = present;
- /* Mark card as present if applicable */
- if (present != 0)
- set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
-
/* Clean up queue if present */
mrq = slot->mrq;
if (mrq) {
@@ -1977,8 +1983,6 @@ static void dw_mci_work_routine_card(struct work_struct *work)
/* Power down slot */
if (present == 0) {
- clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
-
/* Clear down the FIFO */
dw_mci_fifo_reset(host);
#ifdef CONFIG_MMC_DW_IDMAC
@@ -2079,6 +2083,26 @@ static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
return gpio;
}
+
+/* find the cd gpio for a given slot */
+static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
+ struct mmc_host *mmc)
+{
+ struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
+ int gpio;
+
+ if (!np)
+ return;
+
+ gpio = of_get_named_gpio(np, "cd-gpios", 0);
+
+ /* Having a missing entry is valid; return silently */
+ if (!gpio_is_valid(gpio))
+ return;
+
+ if (mmc_gpio_request_cd(mmc, gpio, 0))
+ dev_warn(dev, "gpio [%d] request failed\n", gpio);
+}
#else /* CONFIG_OF */
static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
{
@@ -2096,6 +2120,11 @@ static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
{
return -EINVAL;
}
+static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
+ struct mmc_host *mmc)
+{
+ return;
+}
#endif /* CONFIG_OF */
static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
@@ -2197,12 +2226,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
#endif /* CONFIG_MMC_DW_IDMAC */
}
- if (dw_mci_get_cd(mmc))
- set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
- else
- clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
-
slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
+ dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
ret = mmc_add_host(mmc);
if (ret)
@@ -2389,6 +2414,9 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+ if (of_get_property(np, "cd-inverted", NULL))
+ pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+
return pdata;
}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index f32057972dd7..b93122636531 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1683,8 +1683,6 @@ static int mmci_remove(struct amba_device *dev)
{
struct mmc_host *mmc = amba_get_drvdata(dev);
- amba_set_drvdata(dev, NULL);
-
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 50fc9df791b2..073e871a0fc8 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -38,6 +38,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
@@ -69,37 +70,25 @@ struct mxs_mmc_host {
unsigned char bus_width;
spinlock_t lock;
int sdio_irq_en;
- int wp_gpio;
- bool wp_inverted;
- bool cd_inverted;
- bool broken_cd;
- bool non_removable;
};
-static int mxs_mmc_get_ro(struct mmc_host *mmc)
+static int mxs_mmc_get_cd(struct mmc_host *mmc)
{
struct mxs_mmc_host *host = mmc_priv(mmc);
- int ret;
-
- if (!gpio_is_valid(host->wp_gpio))
- return -EINVAL;
-
- ret = gpio_get_value(host->wp_gpio);
+ struct mxs_ssp *ssp = &host->ssp;
+ int present, ret;
- if (host->wp_inverted)
- ret = !ret;
+ ret = mmc_gpio_get_cd(mmc);
+ if (ret >= 0)
+ return ret;
- return ret;
-}
+ present = !(readl(ssp->base + HW_SSP_STATUS(ssp)) &
+ BM_SSP_STATUS_CARD_DETECT);
-static int mxs_mmc_get_cd(struct mmc_host *mmc)
-{
- struct mxs_mmc_host *host = mmc_priv(mmc);
- struct mxs_ssp *ssp = &host->ssp;
+ if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
+ present = !present;
- return host->non_removable || host->broken_cd ||
- !(readl(ssp->base + HW_SSP_STATUS(ssp)) &
- BM_SSP_STATUS_CARD_DETECT) ^ host->cd_inverted;
+ return present;
}
static int mxs_mmc_reset(struct mxs_mmc_host *host)
@@ -549,7 +538,7 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
static const struct mmc_host_ops mxs_mmc_ops = {
.request = mxs_mmc_request,
- .get_ro = mxs_mmc_get_ro,
+ .get_ro = mmc_gpio_get_ro,
.get_cd = mxs_mmc_get_cd,
.set_ios = mxs_mmc_set_ios,
.enable_sdio_irq = mxs_mmc_enable_sdio_irq,
@@ -579,15 +568,12 @@ static int mxs_mmc_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(mxs_mmc_dt_ids, &pdev->dev);
- struct device_node *np = pdev->dev.of_node;
struct mxs_mmc_host *host;
struct mmc_host *mmc;
struct resource *iores;
int ret = 0, irq_err;
struct regulator *reg_vmmc;
- enum of_gpio_flags flags;
struct mxs_ssp *ssp;
- u32 bus_width = 0;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq_err = platform_get_irq(pdev, 0);
@@ -648,23 +634,13 @@ static int mxs_mmc_probe(struct platform_device *pdev)
mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
- of_property_read_u32(np, "bus-width", &bus_width);
- if (bus_width == 4)
- mmc->caps |= MMC_CAP_4_BIT_DATA;
- else if (bus_width == 8)
- mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
- host->broken_cd = of_property_read_bool(np, "broken-cd");
- host->non_removable = of_property_read_bool(np, "non-removable");
- if (host->non_removable)
- mmc->caps |= MMC_CAP_NONREMOVABLE;
- host->wp_gpio = of_get_named_gpio_flags(np, "wp-gpios", 0, &flags);
- if (flags & OF_GPIO_ACTIVE_LOW)
- host->wp_inverted = 1;
-
- host->cd_inverted = of_property_read_bool(np, "cd-inverted");
-
mmc->f_min = 400000;
mmc->f_max = 288000000;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto out_clk_disable;
+
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
mmc->max_segs = 52;
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 2fce5ea5eb39..f23782683a7c 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -23,7 +23,9 @@
#include <linux/irq.h>
#include <linux/io.h>
+#include <plat/gpio-cfg.h>
#include <mach/dma.h>
+#include <mach/gpio-samsung.h>
#include <linux/platform_data/mmc-s3cmci.h>
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index ef19874fcd1f..9ce17f6e4014 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -31,10 +31,9 @@
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/acpi.h>
-#include <linux/acpi_gpio.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/delay.h>
@@ -144,6 +143,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "80860F14" , "3" , &sdhci_acpi_slot_int_sd },
{ "INT33BB" , "2" , &sdhci_acpi_slot_int_sdio },
{ "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio },
+ { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio },
{ "PNP0D40" },
{ },
};
@@ -152,6 +152,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
{ "80860F14" },
{ "INT33BB" },
{ "INT33C6" },
+ { "INT3436" },
{ "PNP0D40" },
{ },
};
@@ -199,22 +200,23 @@ static irqreturn_t sdhci_acpi_sd_cd(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int sdhci_acpi_add_own_cd(struct device *dev, int gpio,
- struct mmc_host *mmc)
+static int sdhci_acpi_add_own_cd(struct device *dev, struct mmc_host *mmc)
{
+ struct gpio_desc *desc;
unsigned long flags;
int err, irq;
- if (gpio < 0) {
- err = gpio;
+ desc = devm_gpiod_get_index(dev, "sd_cd", 0);
+ if (IS_ERR(desc)) {
+ err = PTR_ERR(desc);
goto out;
}
- err = devm_gpio_request_one(dev, gpio, GPIOF_DIR_IN, "sd_cd");
+ err = gpiod_direction_input(desc);
if (err)
- goto out;
+ goto out_free;
- irq = gpio_to_irq(gpio);
+ irq = gpiod_to_irq(desc);
if (irq < 0) {
err = irq;
goto out_free;
@@ -228,7 +230,7 @@ static int sdhci_acpi_add_own_cd(struct device *dev, int gpio,
return 0;
out_free:
- devm_gpio_free(dev, gpio);
+ devm_gpiod_put(dev, desc);
out:
dev_warn(dev, "failed to setup card detect wake up\n");
return err;
@@ -236,8 +238,7 @@ out:
#else
-static int sdhci_acpi_add_own_cd(struct device *dev, int gpio,
- struct mmc_host *mmc)
+static int sdhci_acpi_add_own_cd(struct device *dev, struct mmc_host *mmc)
{
return 0;
}
@@ -254,7 +255,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
struct resource *iomem;
resource_size_t len;
const char *hid;
- int err, gpio;
+ int err;
if (acpi_bus_get_device(handle, &device))
return -ENODEV;
@@ -279,8 +280,6 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (IS_ERR(host))
return PTR_ERR(host);
- gpio = acpi_get_gpio_by_index(dev, 0, NULL);
-
c = sdhci_priv(host);
c->host = host;
c->slot = sdhci_acpi_get_slot(handle, hid);
@@ -338,7 +337,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
goto err_free;
if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
- if (sdhci_acpi_add_own_cd(dev, gpio, host->mmc))
+ if (sdhci_acpi_add_own_cd(dev, host->mmc))
c->use_runtime_pm = false;
}
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 461a4c3f4ef7..b841bb7cd371 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -27,6 +27,7 @@
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_data/mmc-esdhc-imx.h>
+#include <linux/pm_runtime.h>
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
@@ -45,6 +46,8 @@
#define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25)
/* Bits 3 and 6 are not SDHCI standard definitions */
#define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7
+/* Tuning bits */
+#define ESDHC_MIX_CTRL_TUNING_MASK 0x03c00000
/* dll control register */
#define ESDHC_DLL_CTRL 0x60
@@ -385,6 +388,22 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
return ret;
}
+ if (unlikely(reg == SDHCI_TRANSFER_MODE)) {
+ if (esdhc_is_usdhc(imx_data)) {
+ u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ ret = m & ESDHC_MIX_CTRL_SDHCI_MASK;
+ /* Swap AC23 bit */
+ if (m & ESDHC_MIX_CTRL_AC23EN) {
+ ret &= ~ESDHC_MIX_CTRL_AC23EN;
+ ret |= SDHCI_TRNS_AUTO_CMD23;
+ }
+ } else {
+ ret = readw(host->ioaddr + SDHCI_TRANSFER_MODE);
+ }
+
+ return ret;
+ }
+
return readw(host->ioaddr + reg);
}
@@ -421,24 +440,20 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR);
u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
- new_val = readl(host->ioaddr + ESDHC_TUNING_CTRL);
+ if (val & SDHCI_CTRL_TUNED_CLK) {
+ v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
+ } else {
+ v &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
+ m &= ~ESDHC_MIX_CTRL_FBCLK_SEL;
+ }
+
if (val & SDHCI_CTRL_EXEC_TUNING) {
- new_val |= ESDHC_STD_TUNING_EN |
- ESDHC_TUNING_START_TAP;
v |= ESDHC_MIX_CTRL_EXE_TUNE;
m |= ESDHC_MIX_CTRL_FBCLK_SEL;
} else {
- new_val &= ~ESDHC_STD_TUNING_EN;
v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
- m &= ~ESDHC_MIX_CTRL_FBCLK_SEL;
}
- if (val & SDHCI_CTRL_TUNED_CLK)
- v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
- else
- v &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
-
- writel(new_val, host->ioaddr + ESDHC_TUNING_CTRL);
writel(v, host->ioaddr + SDHCI_ACMD12_ERR);
writel(m, host->ioaddr + ESDHC_MIX_CTRL);
}
@@ -546,7 +561,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
* Do it manually here.
*/
if (esdhc_is_usdhc(imx_data)) {
- writel(0, host->ioaddr + ESDHC_MIX_CTRL);
+ /* the tuning bits should be kept during reset */
+ new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ writel(new_val & ESDHC_MIX_CTRL_TUNING_MASK,
+ host->ioaddr + ESDHC_MIX_CTRL);
imx_data->is_ddr = 0;
}
}
@@ -558,19 +576,17 @@ static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
struct pltfm_imx_data *imx_data = pltfm_host->priv;
struct esdhc_platform_data *boarddata = &imx_data->boarddata;
- u32 f_host = clk_get_rate(pltfm_host->clk);
-
- if (boarddata->f_max && (boarddata->f_max < f_host))
+ if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock))
return boarddata->f_max;
else
- return f_host;
+ return pltfm_host->clock;
}
static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- return clk_get_rate(pltfm_host->clk) / 256 / 16;
+ return pltfm_host->clock / 256 / 16;
}
static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
@@ -578,7 +594,7 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
- unsigned int host_clock = clk_get_rate(pltfm_host->clk);
+ unsigned int host_clock = pltfm_host->clock;
int pre_div = 2;
int div = 1;
u32 temp, val;
@@ -681,6 +697,7 @@ static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
/* FIXME: delay a bit for card to be ready for next tuning due to errors */
mdelay(1);
+ pm_runtime_get_sync(host->mmc->parent);
reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL |
ESDHC_MIX_CTRL_FBCLK_SEL;
@@ -699,7 +716,7 @@ static void esdhc_request_done(struct mmc_request *mrq)
static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode)
{
struct mmc_command cmd = {0};
- struct mmc_request mrq = {0};
+ struct mmc_request mrq = {NULL};
struct mmc_data data = {0};
struct scatterlist sg;
char tuning_pattern[ESDHC_TUNING_BLOCK_PATTERN_LEN];
@@ -809,6 +826,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
pinctrl = imx_data->pins_100mhz;
break;
case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
pinctrl = imx_data->pins_200mhz;
break;
default:
@@ -836,6 +854,7 @@ static int esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR50;
break;
case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR104;
break;
case MMC_TIMING_UHS_DDR50:
@@ -976,7 +995,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
}
pltfm_host->clk = imx_data->clk_per;
-
+ pltfm_host->clock = clk_get_rate(pltfm_host->clk);
clk_prepare_enable(imx_data->clk_per);
clk_prepare_enable(imx_data->clk_ipg);
clk_prepare_enable(imx_data->clk_ahb);
@@ -1009,11 +1028,18 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (esdhc_is_usdhc(imx_data)) {
writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL);
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+ host->mmc->caps |= MMC_CAP_1_8V_DDR;
}
if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
sdhci_esdhc_ops.platform_execute_tuning =
esdhc_executing_tuning;
+
+ if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
+ writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
+ ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP,
+ host->ioaddr + ESDHC_TUNING_CTRL);
+
boarddata = &imx_data->boarddata;
if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
if (!host->mmc->parent->platform_data) {
@@ -1053,7 +1079,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
break;
case ESDHC_CD_PERMANENT:
- host->mmc->caps = MMC_CAP_NONREMOVABLE;
+ host->mmc->caps |= MMC_CAP_NONREMOVABLE;
break;
case ESDHC_CD_NONE:
@@ -1094,6 +1120,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (err)
goto disable_clk;
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_suspend_ignore_children(&pdev->dev, 1);
+
return 0;
disable_clk:
@@ -1114,21 +1146,63 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
sdhci_remove_host(host, dead);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ if (!IS_ENABLED(CONFIG_PM_RUNTIME)) {
+ clk_disable_unprepare(imx_data->clk_per);
+ clk_disable_unprepare(imx_data->clk_ipg);
+ clk_disable_unprepare(imx_data->clk_ahb);
+ }
+
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int sdhci_esdhc_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ int ret;
+
+ ret = sdhci_runtime_suspend_host(host);
+
clk_disable_unprepare(imx_data->clk_per);
clk_disable_unprepare(imx_data->clk_ipg);
clk_disable_unprepare(imx_data->clk_ahb);
- sdhci_pltfm_free(pdev);
+ return ret;
+}
- return 0;
+static int sdhci_esdhc_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+
+ clk_prepare_enable(imx_data->clk_per);
+ clk_prepare_enable(imx_data->clk_ipg);
+ clk_prepare_enable(imx_data->clk_ahb);
+
+ return sdhci_runtime_resume_host(host);
}
+#endif
+
+static const struct dev_pm_ops sdhci_esdhc_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_pltfm_resume)
+ SET_RUNTIME_PM_OPS(sdhci_esdhc_runtime_suspend,
+ sdhci_esdhc_runtime_resume, NULL)
+};
static struct platform_driver sdhci_esdhc_imx_driver = {
.driver = {
.name = "sdhci-esdhc-imx",
.owner = THIS_MODULE,
.of_match_table = imx_esdhc_dt_ids,
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = &sdhci_esdhc_pmops,
},
.id_table = imx_esdhc_devtype,
.probe = sdhci_esdhc_imx_probe,
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
new file mode 100644
index 000000000000..f7c7cf62437d
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -0,0 +1,224 @@
+/*
+ * Arasan Secure Digital Host Controller Interface.
+ * Copyright (C) 2011 - 2012 Michal Simek <monstr@monstr.eu>
+ * Copyright (c) 2012 Wind River Systems, Inc.
+ * Copyright (C) 2013 Pengutronix e.K.
+ * Copyright (C) 2013 Xilinx Inc.
+ *
+ * Based on sdhci-of-esdhc.c
+ *
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ *
+ * Authors: Xiaobo Xie <X.Xie@freescale.com>
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/module.h>
+#include "sdhci-pltfm.h"
+
+#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c
+
+#define CLK_CTRL_TIMEOUT_SHIFT 16
+#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT)
+#define CLK_CTRL_TIMEOUT_MIN_EXP 13
+
+/**
+ * struct sdhci_arasan_data
+ * @clk_ahb: Pointer to the AHB clock
+ */
+struct sdhci_arasan_data {
+ struct clk *clk_ahb;
+};
+
+static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
+{
+ u32 div;
+ unsigned long freq;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET);
+ div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT;
+
+ freq = clk_get_rate(pltfm_host->clk);
+ freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div);
+
+ return freq;
+}
+
+static struct sdhci_ops sdhci_arasan_ops = {
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_timeout_clock = sdhci_arasan_get_timeout_clock,
+};
+
+static struct sdhci_pltfm_data sdhci_arasan_pdata = {
+ .ops = &sdhci_arasan_ops,
+};
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * sdhci_arasan_suspend - Suspend method for the driver
+ * @dev: Address of the device structure
+ * Returns 0 on success and error value on error
+ *
+ * Put the device in a low power state.
+ */
+static int sdhci_arasan_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv;
+ int ret;
+
+ ret = sdhci_suspend_host(host);
+ if (ret)
+ return ret;
+
+ clk_disable(pltfm_host->clk);
+ clk_disable(sdhci_arasan->clk_ahb);
+
+ return 0;
+}
+
+/**
+ * sdhci_arasan_resume - Resume method for the driver
+ * @dev: Address of the device structure
+ * Returns 0 on success and error value on error
+ *
+ * Resume operation after suspend
+ */
+static int sdhci_arasan_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv;
+ int ret;
+
+ ret = clk_enable(sdhci_arasan->clk_ahb);
+ if (ret) {
+ dev_err(dev, "Cannot enable AHB clock.\n");
+ return ret;
+ }
+
+ ret = clk_enable(pltfm_host->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable SD clock.\n");
+ clk_disable(sdhci_arasan->clk_ahb);
+ return ret;
+ }
+
+ return sdhci_resume_host(host);
+}
+#endif /* ! CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(sdhci_arasan_dev_pm_ops, sdhci_arasan_suspend,
+ sdhci_arasan_resume);
+
+static int sdhci_arasan_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct clk *clk_xin;
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_arasan_data *sdhci_arasan;
+
+ sdhci_arasan = devm_kzalloc(&pdev->dev, sizeof(*sdhci_arasan),
+ GFP_KERNEL);
+ if (!sdhci_arasan)
+ return -ENOMEM;
+
+ sdhci_arasan->clk_ahb = devm_clk_get(&pdev->dev, "clk_ahb");
+ if (IS_ERR(sdhci_arasan->clk_ahb)) {
+ dev_err(&pdev->dev, "clk_ahb clock not found.\n");
+ return PTR_ERR(sdhci_arasan->clk_ahb);
+ }
+
+ clk_xin = devm_clk_get(&pdev->dev, "clk_xin");
+ if (IS_ERR(clk_xin)) {
+ dev_err(&pdev->dev, "clk_xin clock not found.\n");
+ return PTR_ERR(clk_xin);
+ }
+
+ ret = clk_prepare_enable(sdhci_arasan->clk_ahb);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable AHB clock.\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(clk_xin);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable SD clock.\n");
+ goto clk_dis_ahb;
+ }
+
+ host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata, 0);
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
+ dev_err(&pdev->dev, "platform init failed (%u)\n", ret);
+ goto clk_disable_all;
+ }
+
+ sdhci_get_of_property(pdev);
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = sdhci_arasan;
+ pltfm_host->clk = clk_xin;
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(&pdev->dev, "platform register failed (%u)\n", ret);
+ goto err_pltfm_free;
+ }
+
+ return 0;
+
+err_pltfm_free:
+ sdhci_pltfm_free(pdev);
+clk_disable_all:
+ clk_disable_unprepare(clk_xin);
+clk_dis_ahb:
+ clk_disable_unprepare(sdhci_arasan->clk_ahb);
+
+ return ret;
+}
+
+static int sdhci_arasan_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv;
+
+ clk_disable_unprepare(pltfm_host->clk);
+ clk_disable_unprepare(sdhci_arasan->clk_ahb);
+
+ return sdhci_pltfm_unregister(pdev);
+}
+
+static const struct of_device_id sdhci_arasan_of_match[] = {
+ { .compatible = "arasan,sdhci-8.9a" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
+
+static struct platform_driver sdhci_arasan_driver = {
+ .driver = {
+ .name = "sdhci-arasan",
+ .owner = THIS_MODULE,
+ .of_match_table = sdhci_arasan_of_match,
+ .pm = &sdhci_arasan_dev_pm_ops,
+ },
+ .probe = sdhci_arasan_probe,
+ .remove = sdhci_arasan_remove,
+};
+
+module_platform_driver(sdhci_arasan_driver);
+
+MODULE_DESCRIPTION("Driver for the Arasan SDHCI Controller");
+MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
new file mode 100644
index 000000000000..f49666bcc52a
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright (C) 2013 BayHub Technology Ltd.
+ *
+ * Authors: Peter Guo <peter.guo@bayhubtech.com>
+ * Adam Lee <adam.lee@canonical.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/pci.h>
+
+#include "sdhci.h"
+#include "sdhci-pci.h"
+#include "sdhci-pci-o2micro.h"
+
+void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
+{
+ u32 scratch_32;
+ int ret;
+ /* Improve write performance for SD3.0 */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_DEV_CTRL, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~((1 << 12) | (1 << 13) | (1 << 14));
+ pci_write_config_dword(chip->pdev, O2_SD_DEV_CTRL, scratch_32);
+
+ /* Enable Link abnormal reset generating Reset */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_MISC_REG5, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~((1 << 19) | (1 << 11));
+ scratch_32 |= (1 << 10);
+ pci_write_config_dword(chip->pdev, O2_SD_MISC_REG5, scratch_32);
+
+ /* set card power over current protection */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_TEST_REG, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 |= (1 << 4);
+ pci_write_config_dword(chip->pdev, O2_SD_TEST_REG, scratch_32);
+
+ /* adjust the output delay for SD mode */
+ pci_write_config_dword(chip->pdev, O2_SD_DELAY_CTRL, 0x00002492);
+
+ /* Set the output voltage setting of Aux 1.2v LDO */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_LD0_CTRL, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~(3 << 12);
+ pci_write_config_dword(chip->pdev, O2_SD_LD0_CTRL, scratch_32);
+
+ /* Set Max power supply capability of SD host */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_CAP_REG0, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~(0x01FE);
+ scratch_32 |= 0x00CC;
+ pci_write_config_dword(chip->pdev, O2_SD_CAP_REG0, scratch_32);
+ /* Set DLL Tuning Window */
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_TUNING_CTRL, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~(0x000000FF);
+ scratch_32 |= 0x00000066;
+ pci_write_config_dword(chip->pdev, O2_SD_TUNING_CTRL, scratch_32);
+
+ /* Set UHS2 T_EIDLE */
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_UHS2_L1_CTRL, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~(0x000000FC);
+ scratch_32 |= 0x00000084;
+ pci_write_config_dword(chip->pdev, O2_SD_UHS2_L1_CTRL, scratch_32);
+
+ /* Set UHS2 Termination */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_FUNC_REG3, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~((1 << 21) | (1 << 30));
+
+ /* Set RTD3 function disabled */
+ scratch_32 |= ((1 << 29) | (1 << 28));
+ pci_write_config_dword(chip->pdev, O2_SD_FUNC_REG3, scratch_32);
+
+ /* Set L1 Entrance Timer */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_CAPS, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~(0xf0000000);
+ scratch_32 |= 0x30000000;
+ pci_write_config_dword(chip->pdev, O2_SD_CAPS, scratch_32);
+
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_MISC_CTRL4, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~(0x000f0000);
+ scratch_32 |= 0x00080000;
+ pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL4, scratch_32);
+}
+EXPORT_SYMBOL_GPL(sdhci_pci_o2_fujin2_pci_init);
+
+int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
+{
+ struct sdhci_pci_chip *chip;
+ struct sdhci_host *host;
+ u32 reg;
+
+ chip = slot->chip;
+ host = slot->host;
+ switch (chip->pdev->device) {
+ case PCI_DEVICE_ID_O2_SDS0:
+ case PCI_DEVICE_ID_O2_SEABIRD0:
+ case PCI_DEVICE_ID_O2_SEABIRD1:
+ case PCI_DEVICE_ID_O2_SDS1:
+ case PCI_DEVICE_ID_O2_FUJIN2:
+ reg = sdhci_readl(host, O2_SD_VENDOR_SETTING);
+ if (reg & 0x1)
+ host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
+
+ if (chip->pdev->device != PCI_DEVICE_ID_O2_FUJIN2)
+ break;
+ /* set dll watch dog timer */
+ reg = sdhci_readl(host, O2_SD_VENDOR_SETTING2);
+ reg |= (1 << 12);
+ sdhci_writel(host, reg, O2_SD_VENDOR_SETTING2);
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe_slot);
+
+int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+{
+ int ret;
+ u8 scratch;
+ u32 scratch_32;
+
+ switch (chip->pdev->device) {
+ case PCI_DEVICE_ID_O2_8220:
+ case PCI_DEVICE_ID_O2_8221:
+ case PCI_DEVICE_ID_O2_8320:
+ case PCI_DEVICE_ID_O2_8321:
+ /* This extra setup is required due to broken ADMA. */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+ /* Set Multi 3 to VCC3V# */
+ pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08);
+
+ /* Disable CLK_REQ# support after media DET */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_CLKREQ, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x20;
+ pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
+
+ /* Choose capabilities, enable SDMA. We have to write 0x01
+ * to the capabilities register first to unlock it.
+ */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x01;
+ pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
+ pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
+
+ /* Disable ADMA1/2 */
+ pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39);
+ pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08);
+
+ /* Disable the infinite transfer mode */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_INF_MOD, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x08;
+ pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
+
+ /* Lock WP */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ break;
+ case PCI_DEVICE_ID_O2_SDS0:
+ case PCI_DEVICE_ID_O2_SDS1:
+ case PCI_DEVICE_ID_O2_FUJIN2:
+ /* UnLock WP */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+ /* Set timeout CLK */
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_CLK_SETTING, &scratch_32);
+ if (ret)
+ return ret;
+
+ scratch_32 &= ~(0xFF00);
+ scratch_32 |= 0x07E0C800;
+ pci_write_config_dword(chip->pdev,
+ O2_SD_CLK_SETTING, scratch_32);
+
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_CLKREQ, &scratch_32);
+ if (ret)
+ return ret;
+ scratch_32 |= 0x3;
+ pci_write_config_dword(chip->pdev, O2_SD_CLKREQ, scratch_32);
+
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, &scratch_32);
+ if (ret)
+ return ret;
+
+ scratch_32 &= ~(0x1F3F070E);
+ scratch_32 |= 0x18270106;
+ pci_write_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, scratch_32);
+
+ /* Disable UHS1 funciton */
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_CAP_REG2, &scratch_32);
+ if (ret)
+ return ret;
+ scratch_32 &= ~(0xE0);
+ pci_write_config_dword(chip->pdev,
+ O2_SD_CAP_REG2, scratch_32);
+
+ if (chip->pdev->device == PCI_DEVICE_ID_O2_FUJIN2)
+ sdhci_pci_o2_fujin2_pci_init(chip);
+
+ /* Lock WP */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ break;
+ case PCI_DEVICE_ID_O2_SEABIRD0:
+ case PCI_DEVICE_ID_O2_SEABIRD1:
+ /* UnLock WP */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_FUNC_REG0, &scratch_32);
+
+ if ((scratch_32 & 0xff000000) == 0x01000000) {
+ scratch_32 &= 0x0000FFFF;
+ scratch_32 |= 0x1F340000;
+
+ pci_write_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, scratch_32);
+ } else {
+ scratch_32 &= 0x0000FFFF;
+ scratch_32 |= 0x2c280000;
+
+ pci_write_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, scratch_32);
+
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_FUNC_REG4,
+ &scratch_32);
+ scratch_32 |= (1 << 22);
+ pci_write_config_dword(chip->pdev,
+ O2_SD_FUNC_REG4, scratch_32);
+ }
+
+ /* Lock WP */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe);
+
+int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip)
+{
+ sdhci_pci_o2_probe(chip);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdhci_pci_o2_resume);
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.h b/drivers/mmc/host/sdhci-pci-o2micro.h
new file mode 100644
index 000000000000..dbec4c933488
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pci-o2micro.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2013 BayHub Technology Ltd.
+ *
+ * Authors: Peter Guo <peter.guo@bayhubtech.com>
+ * Adam Lee <adam.lee@canonical.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SDHCI_PCI_O2MICRO_H
+#define __SDHCI_PCI_O2MICRO_H
+
+#include "sdhci-pci.h"
+
+/*
+ * O2Micro device IDs
+ */
+
+#define PCI_DEVICE_ID_O2_SDS0 0x8420
+#define PCI_DEVICE_ID_O2_SDS1 0x8421
+#define PCI_DEVICE_ID_O2_FUJIN2 0x8520
+#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620
+#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621
+
+/*
+ * O2Micro device registers
+ */
+
+#define O2_SD_MISC_REG5 0x64
+#define O2_SD_LD0_CTRL 0x68
+#define O2_SD_DEV_CTRL 0x88
+#define O2_SD_LOCK_WP 0xD3
+#define O2_SD_TEST_REG 0xD4
+#define O2_SD_FUNC_REG0 0xDC
+#define O2_SD_MULTI_VCC3V 0xEE
+#define O2_SD_CLKREQ 0xEC
+#define O2_SD_CAPS 0xE0
+#define O2_SD_ADMA1 0xE2
+#define O2_SD_ADMA2 0xE7
+#define O2_SD_INF_MOD 0xF1
+#define O2_SD_MISC_CTRL4 0xFC
+#define O2_SD_TUNING_CTRL 0x300
+#define O2_SD_PLL_SETTING 0x304
+#define O2_SD_CLK_SETTING 0x328
+#define O2_SD_CAP_REG2 0x330
+#define O2_SD_CAP_REG0 0x334
+#define O2_SD_UHS1_CAP_SETTING 0x33C
+#define O2_SD_DELAY_CTRL 0x350
+#define O2_SD_UHS2_L1_CTRL 0x35C
+#define O2_SD_FUNC_REG3 0x3E0
+#define O2_SD_FUNC_REG4 0x3E4
+
+#define O2_SD_VENDOR_SETTING 0x110
+#define O2_SD_VENDOR_SETTING2 0x1C8
+
+extern void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip);
+
+extern int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
+
+extern int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
+
+extern int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
+
+#endif /* __SDHCI_PCI_O2MICRO_H */
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 8f753811fc7a..0955777b6c7e 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -27,79 +27,8 @@
#include <linux/mmc/sdhci-pci-data.h>
#include "sdhci.h"
-
-/*
- * PCI device IDs
- */
-#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809
-#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a
-#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14
-#define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15
-#define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16
-#define PCI_DEVICE_ID_INTEL_BYT_EMMC2 0x0f50
-#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
-#define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08f9
-#define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08fa
-#define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb
-#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5
-#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6
-
-/*
- * PCI registers
- */
-
-#define PCI_SDHCI_IFPIO 0x00
-#define PCI_SDHCI_IFDMA 0x01
-#define PCI_SDHCI_IFVENDOR 0x02
-
-#define PCI_SLOT_INFO 0x40 /* 8 bits */
-#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7)
-#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07
-
-#define MAX_SLOTS 8
-
-struct sdhci_pci_chip;
-struct sdhci_pci_slot;
-
-struct sdhci_pci_fixes {
- unsigned int quirks;
- unsigned int quirks2;
- bool allow_runtime_pm;
-
- int (*probe) (struct sdhci_pci_chip *);
-
- int (*probe_slot) (struct sdhci_pci_slot *);
- void (*remove_slot) (struct sdhci_pci_slot *, int);
-
- int (*suspend) (struct sdhci_pci_chip *);
- int (*resume) (struct sdhci_pci_chip *);
-};
-
-struct sdhci_pci_slot {
- struct sdhci_pci_chip *chip;
- struct sdhci_host *host;
- struct sdhci_pci_data *data;
-
- int pci_bar;
- int rst_n_gpio;
- int cd_gpio;
- int cd_irq;
-
- void (*hw_reset)(struct sdhci_host *host);
-};
-
-struct sdhci_pci_chip {
- struct pci_dev *pdev;
-
- unsigned int quirks;
- unsigned int quirks2;
- bool allow_runtime_pm;
- const struct sdhci_pci_fixes *fixes;
-
- int num_slots; /* Slots on controller */
- struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */
-};
-
+#include "sdhci-pci.h"
+#include "sdhci-pci-o2micro.h"
/*****************************************************************************\
* *
@@ -296,6 +225,7 @@ static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.allow_runtime_pm = true,
+ .own_cd_for_runtime_pm = true,
};
static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
@@ -360,6 +290,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON,
.allow_runtime_pm = true,
+ .own_cd_for_runtime_pm = true,
};
/* Define Host controllers for Intel Merrifield platform */
@@ -381,6 +312,7 @@ static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot)
static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_BROKEN_HS200,
.probe_slot = intel_mrfl_mmc_probe_slot,
};
@@ -393,65 +325,6 @@ static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = {
#define O2_SD_ADMA2 0xE7
#define O2_SD_INF_MOD 0xF1
-static int o2_probe(struct sdhci_pci_chip *chip)
-{
- int ret;
- u8 scratch;
-
- switch (chip->pdev->device) {
- case PCI_DEVICE_ID_O2_8220:
- case PCI_DEVICE_ID_O2_8221:
- case PCI_DEVICE_ID_O2_8320:
- case PCI_DEVICE_ID_O2_8321:
- /* This extra setup is required due to broken ADMA. */
- ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
- if (ret)
- return ret;
- scratch &= 0x7f;
- pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
-
- /* Set Multi 3 to VCC3V# */
- pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08);
-
- /* Disable CLK_REQ# support after media DET */
- ret = pci_read_config_byte(chip->pdev, O2_SD_CLKREQ, &scratch);
- if (ret)
- return ret;
- scratch |= 0x20;
- pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
-
- /* Choose capabilities, enable SDMA. We have to write 0x01
- * to the capabilities register first to unlock it.
- */
- ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
- if (ret)
- return ret;
- scratch |= 0x01;
- pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
- pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
-
- /* Disable ADMA1/2 */
- pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39);
- pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08);
-
- /* Disable the infinite transfer mode */
- ret = pci_read_config_byte(chip->pdev, O2_SD_INF_MOD, &scratch);
- if (ret)
- return ret;
- scratch |= 0x08;
- pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
-
- /* Lock WP */
- ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
- if (ret)
- return ret;
- scratch |= 0x80;
- pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
- }
-
- return 0;
-}
-
static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
{
u8 scratch;
@@ -642,7 +515,10 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
}
static const struct sdhci_pci_fixes sdhci_o2 = {
- .probe = o2_probe,
+ .probe = sdhci_pci_o2_probe,
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .probe_slot = sdhci_pci_o2_probe_slot,
+ .resume = sdhci_pci_o2_resume,
};
static const struct sdhci_pci_fixes sdhci_jmicron = {
@@ -1055,6 +931,46 @@ static const struct pci_device_id pci_ids[] = {
.driver_data = (kernel_ulong_t)&sdhci_o2,
},
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_FUJIN2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_SDS0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_SDS1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_SEABIRD0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_SEABIRD1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
{ /* Generic SD host controller */
PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
},
@@ -1457,6 +1373,15 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
sdhci_pci_add_own_cd(slot);
+ /*
+ * Check if the chip needs a separate GPIO for card detect to wake up
+ * from runtime suspend. If it is not there, don't allow runtime PM.
+ * Note sdhci_pci_add_own_cd() sets slot->cd_gpio to -EINVAL on failure.
+ */
+ if (chip->fixes && chip->fixes->own_cd_for_runtime_pm &&
+ !gpio_is_valid(slot->cd_gpio))
+ chip->allow_runtime_pm = false;
+
return slot;
remove:
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
new file mode 100644
index 000000000000..6d718719659e
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -0,0 +1,78 @@
+#ifndef __SDHCI_PCI_H
+#define __SDHCI_PCI_H
+
+/*
+ * PCI device IDs
+ */
+
+#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809
+#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a
+#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14
+#define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15
+#define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16
+#define PCI_DEVICE_ID_INTEL_BYT_EMMC2 0x0f50
+#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08f9
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08fa
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb
+#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5
+#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6
+
+/*
+ * PCI registers
+ */
+
+#define PCI_SDHCI_IFPIO 0x00
+#define PCI_SDHCI_IFDMA 0x01
+#define PCI_SDHCI_IFVENDOR 0x02
+
+#define PCI_SLOT_INFO 0x40 /* 8 bits */
+#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7)
+#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07
+
+#define MAX_SLOTS 8
+
+struct sdhci_pci_chip;
+struct sdhci_pci_slot;
+
+struct sdhci_pci_fixes {
+ unsigned int quirks;
+ unsigned int quirks2;
+ bool allow_runtime_pm;
+ bool own_cd_for_runtime_pm;
+
+ int (*probe) (struct sdhci_pci_chip *);
+
+ int (*probe_slot) (struct sdhci_pci_slot *);
+ void (*remove_slot) (struct sdhci_pci_slot *, int);
+
+ int (*suspend) (struct sdhci_pci_chip *);
+ int (*resume) (struct sdhci_pci_chip *);
+};
+
+struct sdhci_pci_slot {
+ struct sdhci_pci_chip *chip;
+ struct sdhci_host *host;
+ struct sdhci_pci_data *data;
+
+ int pci_bar;
+ int rst_n_gpio;
+ int cd_gpio;
+ int cd_irq;
+
+ void (*hw_reset)(struct sdhci_host *host);
+};
+
+struct sdhci_pci_chip {
+ struct pci_dev *pdev;
+
+ unsigned int quirks;
+ unsigned int quirks2;
+ bool allow_runtime_pm;
+ const struct sdhci_pci_fixes *fixes;
+
+ int num_slots; /* Slots on controller */
+ struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */
+};
+
+#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index e2065a44dffc..bef250e95418 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -237,19 +237,21 @@ int sdhci_pltfm_unregister(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
#ifdef CONFIG_PM
-static int sdhci_pltfm_suspend(struct device *dev)
+int sdhci_pltfm_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_suspend_host(host);
}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
-static int sdhci_pltfm_resume(struct device *dev)
+int sdhci_pltfm_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_resume_host(host);
}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
const struct dev_pm_ops sdhci_pltfm_pmops = {
.suspend = sdhci_pltfm_suspend,
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index e15ced79f7ed..04bc2481e5c3 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -111,6 +111,8 @@ static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host)
}
#ifdef CONFIG_PM
+extern int sdhci_pltfm_suspend(struct device *dev);
+extern int sdhci_pltfm_resume(struct device *dev);
extern const struct dev_pm_ops sdhci_pltfm_pmops;
#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops)
#else
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 5b7b2eba8a54..a835898a68dd 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -198,6 +198,7 @@ static struct sdhci_tegra_soc_data soc_data_tegra114 = {
};
static const struct of_device_id sdhci_tegra_dt_match[] = {
+ { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index bd8a0982aec3..9ddef4763541 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -898,8 +898,13 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
u16 mode;
struct mmc_data *data = cmd->data;
- if (data == NULL)
+ if (data == NULL) {
+ /* clear Auto CMD settings for no data CMDs */
+ mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
+ sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
+ SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
return;
+ }
WARN_ON(!host->data);
@@ -1013,7 +1018,12 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
mdelay(1);
}
- mod_timer(&host->timer, jiffies + 10 * HZ);
+ timeout = jiffies;
+ if (!cmd->data && cmd->cmd_timeout_ms > 9000)
+ timeout += DIV_ROUND_UP(cmd->cmd_timeout_ms, 1000) * HZ + HZ;
+ else
+ timeout += 10 * HZ;
+ mod_timer(&host->timer, timeout);
host->cmd = cmd;
@@ -1391,6 +1401,13 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
mmc->card->type == MMC_TYPE_MMC ?
MMC_SEND_TUNING_BLOCK_HS200 :
MMC_SEND_TUNING_BLOCK;
+
+ /* Here we need to set the host->mrq to NULL,
+ * in case the pending finish_tasklet
+ * finishes it incorrectly.
+ */
+ host->mrq = NULL;
+
spin_unlock_irqrestore(&host->lock, flags);
sdhci_execute_tuning(mmc, tuning_opcode);
spin_lock_irqsave(&host->lock, flags);
@@ -1845,12 +1862,12 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
unsigned long timeout;
int err = 0;
bool requires_tuning_nonuhs = false;
+ unsigned long flags;
host = mmc_priv(mmc);
sdhci_runtime_pm_get(host);
- disable_irq(host->irq);
- spin_lock(&host->lock);
+ spin_lock_irqsave(&host->lock, flags);
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
@@ -1870,15 +1887,13 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
requires_tuning_nonuhs)
ctrl |= SDHCI_CTRL_EXEC_TUNING;
else {
- spin_unlock(&host->lock);
- enable_irq(host->irq);
+ spin_unlock_irqrestore(&host->lock, flags);
sdhci_runtime_pm_put(host);
return 0;
}
if (host->ops->platform_execute_tuning) {
- spin_unlock(&host->lock);
- enable_irq(host->irq);
+ spin_unlock_irqrestore(&host->lock, flags);
err = host->ops->platform_execute_tuning(host, opcode);
sdhci_runtime_pm_put(host);
return err;
@@ -1951,15 +1966,12 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
host->cmd = NULL;
host->mrq = NULL;
- spin_unlock(&host->lock);
- enable_irq(host->irq);
-
+ spin_unlock_irqrestore(&host->lock, flags);
/* Wait for Buffer Read Ready interrupt */
wait_event_interruptible_timeout(host->buf_ready_int,
(host->tuning_done == 1),
msecs_to_jiffies(50));
- disable_irq(host->irq);
- spin_lock(&host->lock);
+ spin_lock_irqsave(&host->lock, flags);
if (!host->tuning_done) {
pr_info(DRIVER_NAME ": Timeout waiting for "
@@ -2034,8 +2046,7 @@ out:
err = 0;
sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
- spin_unlock(&host->lock);
- enable_irq(host->irq);
+ spin_unlock_irqrestore(&host->lock, flags);
sdhci_runtime_pm_put(host);
return err;
@@ -3004,7 +3015,8 @@ int sdhci_add_host(struct sdhci_host *host)
/* SD3.0: SDR104 is supported so (for eMMC) the caps2
* field can be promoted to support HS200.
*/
- mmc->caps2 |= MMC_CAP2_HS200;
+ if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
+ mmc->caps2 |= MMC_CAP2_HS200;
} else if (caps[1] & SDHCI_SUPPORT_SDR50)
mmc->caps |= MMC_CAP_UHS_SDR50;
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index d032b080ac4d..54730f4aac87 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -381,73 +381,75 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
desc, cookie);
}
-static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
- struct sh_mmcif_plat_data *pdata)
+static struct dma_chan *
+sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
+ struct sh_mmcif_plat_data *pdata,
+ enum dma_transfer_direction direction)
{
- struct resource *res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
struct dma_slave_config cfg;
+ struct dma_chan *chan;
+ unsigned int slave_id;
+ struct resource *res;
dma_cap_mask_t mask;
int ret;
- host->dma_active = false;
-
- if (pdata) {
- if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
- return;
- } else if (!host->pd->dev.of_node) {
- return;
- }
-
- /* We can only either use DMA for both Tx and Rx or not use it at all */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- host->chan_tx = dma_request_slave_channel_compat(mask, shdma_chan_filter,
- pdata ? (void *)pdata->slave_id_tx : NULL,
- &host->pd->dev, "tx");
- dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
- host->chan_tx);
+ if (pdata)
+ slave_id = direction == DMA_MEM_TO_DEV
+ ? pdata->slave_id_tx : pdata->slave_id_rx;
+ else
+ slave_id = 0;
- if (!host->chan_tx)
- return;
+ chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+ (void *)(unsigned long)slave_id, &host->pd->dev,
+ direction == DMA_MEM_TO_DEV ? "tx" : "rx");
+
+ dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__,
+ direction == DMA_MEM_TO_DEV ? "TX" : "RX", chan);
+
+ if (!chan)
+ return NULL;
+
+ res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
/* In the OF case the driver will get the slave ID from the DT */
- if (pdata)
- cfg.slave_id = pdata->slave_id_tx;
- cfg.direction = DMA_MEM_TO_DEV;
+ cfg.slave_id = slave_id;
+ cfg.direction = direction;
cfg.dst_addr = res->start + MMCIF_CE_DATA;
cfg.src_addr = 0;
- ret = dmaengine_slave_config(host->chan_tx, &cfg);
- if (ret < 0)
- goto ecfgtx;
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret < 0) {
+ dma_release_channel(chan);
+ return NULL;
+ }
- host->chan_rx = dma_request_slave_channel_compat(mask, shdma_chan_filter,
- pdata ? (void *)pdata->slave_id_rx : NULL,
- &host->pd->dev, "rx");
- dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
- host->chan_rx);
+ return chan;
+}
- if (!host->chan_rx)
- goto erqrx;
+static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
+ struct sh_mmcif_plat_data *pdata)
+{
+ host->dma_active = false;
- if (pdata)
- cfg.slave_id = pdata->slave_id_rx;
- cfg.direction = DMA_DEV_TO_MEM;
- cfg.dst_addr = 0;
- cfg.src_addr = res->start + MMCIF_CE_DATA;
- ret = dmaengine_slave_config(host->chan_rx, &cfg);
- if (ret < 0)
- goto ecfgrx;
+ if (pdata) {
+ if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
+ return;
+ } else if (!host->pd->dev.of_node) {
+ return;
+ }
- return;
+ /* We can only either use DMA for both Tx and Rx or not use it at all */
+ host->chan_tx = sh_mmcif_request_dma_one(host, pdata, DMA_MEM_TO_DEV);
+ if (!host->chan_tx)
+ return;
-ecfgrx:
- dma_release_channel(host->chan_rx);
- host->chan_rx = NULL;
-erqrx:
-ecfgtx:
- dma_release_channel(host->chan_tx);
- host->chan_tx = NULL;
+ host->chan_rx = sh_mmcif_request_dma_one(host, pdata, DMA_DEV_TO_MEM);
+ if (!host->chan_rx) {
+ dma_release_channel(host->chan_tx);
+ host->chan_tx = NULL;
+ }
}
static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index f344659dceac..2d6ce257a273 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -33,6 +33,8 @@
#include "tmio_mmc.h"
+#define EXT_ACC 0xe4
+
struct sh_mobile_sdhi_of_data {
unsigned long tmio_flags;
};
@@ -54,7 +56,7 @@ static int sh_mobile_sdhi_clk_enable(struct platform_device *pdev, unsigned int
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct tmio_mmc_host *host = mmc_priv(mmc);
struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
- int ret = clk_enable(priv->clk);
+ int ret = clk_prepare_enable(priv->clk);
if (ret < 0)
return ret;
@@ -67,7 +69,7 @@ static void sh_mobile_sdhi_clk_disable(struct platform_device *pdev)
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct tmio_mmc_host *host = mmc_priv(mmc);
struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
}
static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
@@ -133,9 +135,15 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
struct tmio_mmc_data *mmc_data;
struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
struct tmio_mmc_host *host;
+ struct resource *res;
int irq, ret, i = 0;
bool multiplexed_isr = true;
struct tmio_mmc_dma *dma_priv;
+ u16 ver;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
priv = devm_kzalloc(&pdev->dev, sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
if (priv == NULL) {
@@ -206,11 +214,22 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->flags |= of_data->tmio_flags;
}
+ /* SD control register space size is 0x100, 0x200 for bus_shift=1 */
+ mmc_data->bus_shift = resource_size(res) >> 9;
+
ret = tmio_mmc_host_probe(&host, pdev, mmc_data);
if (ret < 0)
goto eprobe;
/*
+ * FIXME:
+ * this Workaround can be more clever method
+ */
+ ver = sd_ctrl_read16(host, CTL_VERSION);
+ if (ver == 0xCB0D)
+ sd_ctrl_write16(host, EXT_ACC, 1);
+
+ /*
* Allow one or more specific (named) ISRs or
* one or more multiplexed (un-named) ISRs.
*/
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 8860d4d2bc22..1900abb04236 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -62,6 +62,7 @@ static int tmio_mmc_probe(struct platform_device *pdev)
const struct mfd_cell *cell = mfd_get_cell(pdev);
struct tmio_mmc_data *pdata;
struct tmio_mmc_host *host;
+ struct resource *res;
int ret = -EINVAL, irq;
if (pdev->num_resources != 2)
@@ -84,6 +85,14 @@ static int tmio_mmc_probe(struct platform_device *pdev)
goto out;
}
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
+ pdata->bus_shift = resource_size(res) >> 10;
+ pdata->flags |= TMIO_MMC_HAVE_HIGH_REG;
+
ret = tmio_mmc_host_probe(&host, pdev, pdata);
if (ret)
goto cell_disable;
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 86fd21e00099..aaa9c7e9e730 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -58,7 +58,6 @@ enum tmio_mmc_power {
struct tmio_mmc_host {
void __iomem *ctl;
- unsigned long bus_shift;
struct mmc_command *cmd;
struct mmc_request *mrq;
struct mmc_data *data;
@@ -176,19 +175,19 @@ int tmio_mmc_host_runtime_resume(struct device *dev);
static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
{
- return readw(host->ctl + (addr << host->bus_shift));
+ return readw(host->ctl + (addr << host->pdata->bus_shift));
}
static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
- readsw(host->ctl + (addr << host->bus_shift), buf, count);
+ readsw(host->ctl + (addr << host->pdata->bus_shift), buf, count);
}
static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
{
- return readw(host->ctl + (addr << host->bus_shift)) |
- readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
+ return readw(host->ctl + (addr << host->pdata->bus_shift)) |
+ readw(host->ctl + ((addr + 2) << host->pdata->bus_shift)) << 16;
}
static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
@@ -198,19 +197,19 @@ static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val
*/
if (host->pdata->write16_hook && host->pdata->write16_hook(host, addr))
return;
- writew(val, host->ctl + (addr << host->bus_shift));
+ writew(val, host->ctl + (addr << host->pdata->bus_shift));
}
static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
- writesw(host->ctl + (addr << host->bus_shift), buf, count);
+ writesw(host->ctl + (addr << host->pdata->bus_shift), buf, count);
}
static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
{
- writew(val, host->ctl + (addr << host->bus_shift));
- writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
+ writew(val, host->ctl + (addr << host->pdata->bus_shift));
+ writew(val >> 16, host->ctl + ((addr + 2) << host->pdata->bus_shift));
}
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 65edb4a62452..03e7b280cb4c 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -293,7 +293,7 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
if (pdata->dma->chan_priv_tx)
cfg.slave_id = pdata->dma->slave_id_tx;
cfg.direction = DMA_MEM_TO_DEV;
- cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift);
+ cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->pdata->bus_shift);
cfg.src_addr = 0;
ret = dmaengine_slave_config(host->chan_tx, &cfg);
if (ret < 0)
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index f3b2d8ca1eca..8d8abf23a611 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -161,10 +161,8 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
- struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
-
/* implicit BUG_ON(!res) */
- if (resource_size(res) > 0x100) {
+ if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
msleep(10);
}
@@ -176,14 +174,12 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
{
- struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
-
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
msleep(10);
/* implicit BUG_ON(!res) */
- if (resource_size(res) > 0x100) {
+ if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
msleep(10);
}
@@ -191,16 +187,14 @@ static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
static void tmio_mmc_reset(struct tmio_mmc_host *host)
{
- struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
-
/* FIXME - should we set stop clock reg here */
sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
/* implicit BUG_ON(!res) */
- if (resource_size(res) > 0x100)
+ if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
msleep(10);
sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
- if (resource_size(res) > 0x100)
+ if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
msleep(10);
}
@@ -944,17 +938,25 @@ static const struct mmc_host_ops tmio_mmc_ops = {
.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
};
-static void tmio_mmc_init_ocr(struct tmio_mmc_host *host)
+static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
{
struct tmio_mmc_data *pdata = host->pdata;
struct mmc_host *mmc = host->mmc;
mmc_regulator_get_supply(mmc);
+ /* use ocr_mask if no regulator */
if (!mmc->ocr_avail)
- mmc->ocr_avail = pdata->ocr_mask ? : MMC_VDD_32_33 | MMC_VDD_33_34;
- else if (pdata->ocr_mask)
- dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
+ mmc->ocr_avail = pdata->ocr_mask;
+
+ /*
+ * try again.
+ * There is possibility that regulator has not been probed
+ */
+ if (!mmc->ocr_avail)
+ return -EPROBE_DEFER;
+
+ return 0;
}
static void tmio_mmc_of_parse(struct platform_device *pdev,
@@ -1005,8 +1007,9 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
_host->set_pwr = pdata->set_pwr;
_host->set_clk_div = pdata->set_clk_div;
- /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
- _host->bus_shift = resource_size(res_ctl) >> 10;
+ ret = tmio_mmc_init_ocr(_host);
+ if (ret < 0)
+ goto host_free;
_host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
if (!_host->ctl) {
@@ -1016,14 +1019,13 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
mmc->ops = &tmio_mmc_ops;
mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
- mmc->caps2 = pdata->capabilities2;
+ mmc->caps2 |= pdata->capabilities2;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
mmc->max_segs;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
- tmio_mmc_init_ocr(_host);
_host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
mmc->caps & MMC_CAP_NEEDS_POLL ||
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 5fab4e6e8301..5ebcda39f554 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -157,10 +157,11 @@ config MTD_BCM47XX_PARTS
comment "User Modules And Translation Layers"
+#
+# MTD block device support is select'ed if needed
+#
config MTD_BLKDEVS
- tristate "Common interface to block layer for MTD 'translation layers'"
- depends on BLOCK
- default n
+ tristate
config MTD_BLOCK
tristate "Caching block device access to MTD devices"
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c
index 5a3942bf109c..96a33e3f7b00 100644
--- a/drivers/mtd/afs.c
+++ b/drivers/mtd/afs.c
@@ -264,7 +264,8 @@ static struct mtd_part_parser afs_parser = {
static int __init afs_parser_init(void)
{
- return register_mtd_parser(&afs_parser);
+ register_mtd_parser(&afs_parser);
+ return 0;
}
static void __exit afs_parser_exit(void)
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index ddc0a4287a4b..7c9172ad2621 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -139,7 +139,8 @@ static struct mtd_part_parser ar7_parser = {
static int __init ar7_parser_init(void)
{
- return register_mtd_parser(&ar7_parser);
+ register_mtd_parser(&ar7_parser);
+ return 0;
}
static void __exit ar7_parser_exit(void)
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 7a6384b0962a..de1eb92e42f5 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -23,10 +23,12 @@
* Amount of bytes we read when analyzing each block of flash memory.
* Set it big enough to allow detecting partition and reading important data.
*/
-#define BCM47XXPART_BYTES_TO_READ 0x404
+#define BCM47XXPART_BYTES_TO_READ 0x4e8
/* Magics */
#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */
+#define BOARD_DATA_MAGIC2 0xBD0D0BBD
+#define CFE_MAGIC 0x43464531 /* 1EFC */
#define FACTORY_MAGIC 0x59544346 /* FCTY */
#define POT_MAGIC1 0x54544f50 /* POTT */
#define POT_MAGIC2 0x504f /* OP */
@@ -102,8 +104,9 @@ static int bcm47xxpart_parse(struct mtd_info *master,
continue;
}
- /* CFE has small NVRAM at 0x400 */
- if (buf[0x400 / 4] == NVRAM_HEADER) {
+ /* Magic or small NVRAM at 0x400 */
+ if ((buf[0x4e0 / 4] == CFE_MAGIC && buf[0x4e4 / 4] == CFE_MAGIC) ||
+ (buf[0x400 / 4] == NVRAM_HEADER)) {
bcm47xxpart_add_part(&parts[curr_part++], "boot",
offset, MTD_WRITEABLE);
continue;
@@ -190,6 +193,21 @@ static int bcm47xxpart_parse(struct mtd_info *master,
offset, 0);
continue;
}
+
+ /* Read middle of the block */
+ if (mtd_read(master, offset + 0x8000, 0x4,
+ &bytes_read, (uint8_t *)buf) < 0) {
+ pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
+ offset);
+ continue;
+ }
+
+ /* Some devices (ex. WNDR3700v3) don't have a standard 'MPFR' */
+ if (buf[0x000 / 4] == BOARD_DATA_MAGIC2) {
+ bcm47xxpart_add_part(&parts[curr_part++], "board_data",
+ offset, MTD_WRITEABLE);
+ continue;
+ }
}
/* Look for NVRAM at the end of the last block. */
@@ -243,7 +261,8 @@ static struct mtd_part_parser bcm47xxpart_mtd_parser = {
static int __init bcm47xxpart_init(void)
{
- return register_mtd_parser(&bcm47xxpart_mtd_parser);
+ register_mtd_parser(&bcm47xxpart_mtd_parser);
+ return 0;
}
static void __exit bcm47xxpart_exit(void)
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
index 5c813907661c..b2443f7031c9 100644
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -221,7 +221,8 @@ static struct mtd_part_parser bcm63xx_cfe_parser = {
static int __init bcm63xx_cfe_parser_init(void)
{
- return register_mtd_parser(&bcm63xx_cfe_parser);
+ register_mtd_parser(&bcm63xx_cfe_parser);
+ return 0;
}
static void __exit bcm63xx_cfe_parser_exit(void)
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 721caebbc5cc..3e829b37af8d 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -395,7 +395,8 @@ static int __init cmdline_parser_init(void)
{
if (mtdparts)
mtdpart_setup(mtdparts);
- return register_mtd_parser(&cmdline_parser);
+ register_mtd_parser(&cmdline_parser);
+ return 0;
}
static void __exit cmdline_parser_exit(void)
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 4f091c1a9981..dd5e1018d37b 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -2047,21 +2047,21 @@ static int __init docg3_probe(struct platform_device *pdev)
ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!ress) {
dev_err(dev, "No I/O memory resource defined\n");
- goto noress;
+ return ret;
}
- base = ioremap(ress->start, DOC_IOSPACE_SIZE);
+ base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE);
ret = -ENOMEM;
- cascade = kzalloc(sizeof(*cascade) * DOC_MAX_NBFLOORS,
- GFP_KERNEL);
+ cascade = devm_kzalloc(dev, sizeof(*cascade) * DOC_MAX_NBFLOORS,
+ GFP_KERNEL);
if (!cascade)
- goto nomem1;
+ return ret;
cascade->base = base;
mutex_init(&cascade->lock);
cascade->bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
DOC_ECC_BCH_PRIMPOLY);
if (!cascade->bch)
- goto nomem2;
+ return ret;
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
mtd = doc_probe_device(cascade, floor, dev);
@@ -2101,11 +2101,6 @@ err_probe:
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
if (cascade->floors[floor])
doc_release_device(cascade->floors[floor]);
-nomem2:
- kfree(cascade);
-nomem1:
- iounmap(base);
-noress:
return ret;
}
@@ -2119,7 +2114,6 @@ static int __exit docg3_release(struct platform_device *pdev)
{
struct docg3_cascade *cascade = platform_get_drvdata(pdev);
struct docg3 *docg3 = cascade->floors[0]->priv;
- void __iomem *base = cascade->base;
int floor;
doc_unregister_sysfs(pdev, cascade);
@@ -2129,8 +2123,6 @@ static int __exit docg3_release(struct platform_device *pdev)
doc_release_device(cascade->floors[floor]);
free_bch(docg3->cascade->bch);
- kfree(cascade);
- iounmap(base);
return 0;
}
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 7eda71dbc183..ad1913909702 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -41,6 +41,7 @@
#define OPCODE_WRSR 0x01 /* Write status register 1 byte */
#define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */
#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */
+#define OPCODE_QUAD_READ 0x6b /* Read data bytes */
#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */
#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
#define OPCODE_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */
@@ -48,10 +49,12 @@
#define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */
#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
#define OPCODE_RDID 0x9f /* Read JEDEC ID */
+#define OPCODE_RDCR 0x35 /* Read configuration register */
/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
#define OPCODE_NORM_READ_4B 0x13 /* Read data bytes (low frequency) */
#define OPCODE_FAST_READ_4B 0x0c /* Read data bytes (high frequency) */
+#define OPCODE_QUAD_READ_4B 0x6c /* Read data bytes */
#define OPCODE_PP_4B 0x12 /* Page program (up to 256 bytes) */
#define OPCODE_SE_4B 0xdc /* Sector erase (usually 64KiB) */
@@ -76,6 +79,11 @@
#define SR_BP2 0x10 /* Block protect 2 */
#define SR_SRWD 0x80 /* SR write protect */
+#define SR_QUAD_EN_MX 0x40 /* Macronix Quad I/O */
+
+/* Configuration Register bits. */
+#define CR_QUAD_EN_SPAN 0x2 /* Spansion Quad I/O */
+
/* Define max times to check status register before we give up. */
#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
#define MAX_CMD_SIZE 6
@@ -84,6 +92,12 @@
/****************************************************************************/
+enum read_type {
+ M25P80_NORMAL = 0,
+ M25P80_FAST,
+ M25P80_QUAD,
+};
+
struct m25p {
struct spi_device *spi;
struct mutex lock;
@@ -94,7 +108,7 @@ struct m25p {
u8 read_opcode;
u8 program_opcode;
u8 *command;
- bool fast_read;
+ enum read_type flash_read;
};
static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd)
@@ -131,6 +145,26 @@ static int read_sr(struct m25p *flash)
}
/*
+ * Read configuration register, returning its value in the
+ * location. Return the configuration register value.
+ * Returns negative if error occured.
+ */
+static int read_cr(struct m25p *flash)
+{
+ u8 code = OPCODE_RDCR;
+ int ret;
+ u8 val;
+
+ ret = spi_write_then_read(flash->spi, &code, 1, &val, 1);
+ if (ret < 0) {
+ dev_err(&flash->spi->dev, "error %d reading CR\n", ret);
+ return ret;
+ }
+
+ return val;
+}
+
+/*
* Write status register 1 byte
* Returns negative if error occurred.
*/
@@ -220,6 +254,93 @@ static int wait_till_ready(struct m25p *flash)
}
/*
+ * Write status Register and configuration register with 2 bytes
+ * The first byte will be written to the status register, while the
+ * second byte will be written to the configuration register.
+ * Return negative if error occured.
+ */
+static int write_sr_cr(struct m25p *flash, u16 val)
+{
+ flash->command[0] = OPCODE_WRSR;
+ flash->command[1] = val & 0xff;
+ flash->command[2] = (val >> 8);
+
+ return spi_write(flash->spi, flash->command, 3);
+}
+
+static int macronix_quad_enable(struct m25p *flash)
+{
+ int ret, val;
+ u8 cmd[2];
+ cmd[0] = OPCODE_WRSR;
+
+ val = read_sr(flash);
+ cmd[1] = val | SR_QUAD_EN_MX;
+ write_enable(flash);
+
+ spi_write(flash->spi, &cmd, 2);
+
+ if (wait_till_ready(flash))
+ return 1;
+
+ ret = read_sr(flash);
+ if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
+ dev_err(&flash->spi->dev, "Macronix Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int spansion_quad_enable(struct m25p *flash)
+{
+ int ret;
+ int quad_en = CR_QUAD_EN_SPAN << 8;
+
+ write_enable(flash);
+
+ ret = write_sr_cr(flash, quad_en);
+ if (ret < 0) {
+ dev_err(&flash->spi->dev,
+ "error while writing configuration register\n");
+ return -EINVAL;
+ }
+
+ /* read back and check it */
+ ret = read_cr(flash);
+ if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
+ dev_err(&flash->spi->dev, "Spansion Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_quad_mode(struct m25p *flash, u32 jedec_id)
+{
+ int status;
+
+ switch (JEDEC_MFR(jedec_id)) {
+ case CFI_MFR_MACRONIX:
+ status = macronix_quad_enable(flash);
+ if (status) {
+ dev_err(&flash->spi->dev,
+ "Macronix quad-read not enabled\n");
+ return -EINVAL;
+ }
+ return status;
+ default:
+ status = spansion_quad_enable(flash);
+ if (status) {
+ dev_err(&flash->spi->dev,
+ "Spansion quad-read not enabled\n");
+ return -EINVAL;
+ }
+ return status;
+ }
+}
+
+/*
* Erase the whole flash memory
*
* Returns 0 if successful, non-zero otherwise.
@@ -350,6 +471,35 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
}
/*
+ * Dummy Cycle calculation for different type of read.
+ * It can be used to support more commands with
+ * different dummy cycle requirements.
+ */
+static inline int m25p80_dummy_cycles_read(struct m25p *flash)
+{
+ switch (flash->flash_read) {
+ case M25P80_FAST:
+ case M25P80_QUAD:
+ return 1;
+ case M25P80_NORMAL:
+ return 0;
+ default:
+ dev_err(&flash->spi->dev, "No valid read type supported\n");
+ return -1;
+ }
+}
+
+static inline unsigned int m25p80_rx_nbits(const struct m25p *flash)
+{
+ switch (flash->flash_read) {
+ case M25P80_QUAD:
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+/*
* Read an address range from the flash chip. The address range
* may be any size provided it is within the physical boundaries.
*/
@@ -360,6 +510,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
struct spi_transfer t[2];
struct spi_message m;
uint8_t opcode;
+ int dummy;
pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
__func__, (u32)from, len);
@@ -367,11 +518,18 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
spi_message_init(&m);
memset(t, 0, (sizeof t));
+ dummy = m25p80_dummy_cycles_read(flash);
+ if (dummy < 0) {
+ dev_err(&flash->spi->dev, "No valid read command supported\n");
+ return -EINVAL;
+ }
+
t[0].tx_buf = flash->command;
- t[0].len = m25p_cmdsz(flash) + (flash->fast_read ? 1 : 0);
+ t[0].len = m25p_cmdsz(flash) + dummy;
spi_message_add_tail(&t[0], &m);
t[1].rx_buf = buf;
+ t[1].rx_nbits = m25p80_rx_nbits(flash);
t[1].len = len;
spi_message_add_tail(&t[1], &m);
@@ -391,8 +549,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
spi_sync(flash->spi, &m);
- *retlen = m.actual_length - m25p_cmdsz(flash) -
- (flash->fast_read ? 1 : 0);
+ *retlen = m.actual_length - m25p_cmdsz(flash) - dummy;
mutex_unlock(&flash->lock);
@@ -698,6 +855,7 @@ struct flash_info {
#define SST_WRITE 0x04 /* use SST byte programming */
#define M25P_NO_FR 0x08 /* Can't do fastread */
#define SECT_4K_PMC 0x10 /* OPCODE_BE_4K_PMC works uniformly */
+#define M25P80_QUAD_READ 0x20 /* Flash supports Quad Read */
};
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
@@ -775,7 +933,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
- { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, 0) },
+ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, M25P80_QUAD_READ) },
/* Micron */
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
@@ -795,8 +953,8 @@ static const struct spi_device_id m25p_ids[] = {
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, 0) },
{ "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) },
{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
- { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, 0) },
- { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) },
+ { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, M25P80_QUAD_READ) },
+ { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, M25P80_QUAD_READ) },
{ "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
@@ -851,6 +1009,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
{ "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
+ { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
{ "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
{ "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
{ "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
@@ -937,6 +1096,7 @@ static int m25p_probe(struct spi_device *spi)
unsigned i;
struct mtd_part_parser_data ppdata;
struct device_node *np = spi->dev.of_node;
+ int ret;
/* Platform data helps sort out which chip type we have, as
* well as how this board partitions it. If we don't have
@@ -1051,22 +1211,46 @@ static int m25p_probe(struct spi_device *spi)
flash->page_size = info->page_size;
flash->mtd.writebufsize = flash->page_size;
- if (np)
+ if (np) {
/* If we were instantiated by DT, use it */
- flash->fast_read = of_property_read_bool(np, "m25p,fast-read");
- else
+ if (of_property_read_bool(np, "m25p,fast-read"))
+ flash->flash_read = M25P80_FAST;
+ else
+ flash->flash_read = M25P80_NORMAL;
+ } else {
/* If we weren't instantiated by DT, default to fast-read */
- flash->fast_read = true;
+ flash->flash_read = M25P80_FAST;
+ }
/* Some devices cannot do fast-read, no matter what DT tells us */
if (info->flags & M25P_NO_FR)
- flash->fast_read = false;
+ flash->flash_read = M25P80_NORMAL;
+
+ /* Quad-read mode takes precedence over fast/normal */
+ if (spi->mode & SPI_RX_QUAD && info->flags & M25P80_QUAD_READ) {
+ ret = set_quad_mode(flash, info->jedec_id);
+ if (ret) {
+ dev_err(&flash->spi->dev, "quad mode not supported\n");
+ return ret;
+ }
+ flash->flash_read = M25P80_QUAD;
+ }
/* Default commands */
- if (flash->fast_read)
+ switch (flash->flash_read) {
+ case M25P80_QUAD:
+ flash->read_opcode = OPCODE_QUAD_READ;
+ break;
+ case M25P80_FAST:
flash->read_opcode = OPCODE_FAST_READ;
- else
+ break;
+ case M25P80_NORMAL:
flash->read_opcode = OPCODE_NORM_READ;
+ break;
+ default:
+ dev_err(&flash->spi->dev, "No Read opcode defined\n");
+ return -EINVAL;
+ }
flash->program_opcode = OPCODE_PP;
@@ -1077,9 +1261,17 @@ static int m25p_probe(struct spi_device *spi)
flash->addr_width = 4;
if (JEDEC_MFR(info->jedec_id) == CFI_MFR_AMD) {
/* Dedicated 4-byte command set */
- flash->read_opcode = flash->fast_read ?
- OPCODE_FAST_READ_4B :
- OPCODE_NORM_READ_4B;
+ switch (flash->flash_read) {
+ case M25P80_QUAD:
+ flash->read_opcode = OPCODE_QUAD_READ_4B;
+ break;
+ case M25P80_FAST:
+ flash->read_opcode = OPCODE_FAST_READ_4B;
+ break;
+ case M25P80_NORMAL:
+ flash->read_opcode = OPCODE_NORM_READ_4B;
+ break;
+ }
flash->program_opcode = OPCODE_PP_4B;
/* No small sector erase for 4-byte command set */
flash->erase_opcode = OPCODE_SE_4B;
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 182849d39c61..5c8b322ba904 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -205,7 +205,7 @@ static int __init ms02nv_init_one(ulong addr)
mtd->type = MTD_RAM;
mtd->flags = MTD_CAP_RAM;
mtd->size = fixsize;
- mtd->name = (char *)ms02nv_name;
+ mtd->name = ms02nv_name;
mtd->owner = THIS_MODULE;
mtd->_read = ms02nv_read;
mtd->_write = ms02nv_write;
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 4a47b0266d4e..624069de4f28 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -669,7 +669,6 @@ static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
if (!err)
return 0;
- spi_set_drvdata(spi, NULL);
kfree(priv);
return err;
}
@@ -899,10 +898,8 @@ static int dataflash_remove(struct spi_device *spi)
pr_debug("%s: remove\n", dev_name(&spi->dev));
status = mtd_device_unregister(&flash->mtd);
- if (status == 0) {
- spi_set_drvdata(spi, NULL);
+ if (status == 0)
kfree(flash);
- }
return status;
}
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index ec59d65897fb..8e285089229c 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -92,7 +92,7 @@ static void __exit cleanup_mtdram(void)
}
int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
- unsigned long size, char *name)
+ unsigned long size, const char *name)
{
memset(mtd, 0, sizeof(*mtd));
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 2ef19aa0086b..d38b6460d505 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -388,7 +388,7 @@ static void put_chip(struct map_info *map, struct flchip *chip)
wake_up(&chip->wq);
}
-int do_write_buffer(struct map_info *map, struct flchip *chip,
+static int do_write_buffer(struct map_info *map, struct flchip *chip,
unsigned long adr, const struct kvec **pvec,
unsigned long *pvec_seek, int len)
{
@@ -469,7 +469,7 @@ int do_write_buffer(struct map_info *map, struct flchip *chip,
return ret;
}
-int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
+static int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
{
struct map_info *map = mtd->priv;
struct lpddr_private *lpddr = map->fldrv_priv;
@@ -748,34 +748,6 @@ static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
}
-int word_program(struct map_info *map, loff_t adr, uint32_t curval)
-{
- int ret;
- struct lpddr_private *lpddr = map->fldrv_priv;
- int chipnum = adr >> lpddr->chipshift;
- struct flchip *chip = &lpddr->chips[chipnum];
-
- mutex_lock(&chip->mutex);
- ret = get_chip(map, chip, FL_WRITING);
- if (ret) {
- mutex_unlock(&chip->mutex);
- return ret;
- }
-
- send_pfow_command(map, LPDDR_WORD_PROGRAM, adr, 0x00, (map_word *)&curval);
-
- ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->SingleWordProgTime));
- if (ret) {
- printk(KERN_WARNING"%s word_program error at: %llx; val: %x\n",
- map->name, adr, curval);
- goto out;
- }
-
-out: put_chip(map, chip);
- mutex_unlock(&chip->mutex);
- return ret;
-}
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 10debfea81e7..d6b2451eab1d 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -13,6 +13,7 @@
*
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -162,13 +163,6 @@ static int ixp4xx_flash_remove(struct platform_device *dev)
mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
- if (info->map.virt)
- iounmap(info->map.virt);
-
- if (info->res) {
- release_resource(info->res);
- kfree(info->res);
- }
if (plat->exit)
plat->exit();
@@ -194,7 +188,8 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
return err;
}
- info = kzalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL);
+ info = devm_kzalloc(&dev->dev, sizeof(struct ixp4xx_flash_info),
+ GFP_KERNEL);
if(!info) {
err = -ENOMEM;
goto Error;
@@ -220,20 +215,9 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
info->map.write = ixp4xx_probe_write16;
info->map.copy_from = ixp4xx_copy_from;
- info->res = request_mem_region(dev->resource->start,
- resource_size(dev->resource),
- "IXP4XXFlash");
- if (!info->res) {
- printk(KERN_ERR "IXP4XXFlash: Could not reserve memory region\n");
- err = -ENOMEM;
- goto Error;
- }
-
- info->map.virt = ioremap(dev->resource->start,
- resource_size(dev->resource));
- if (!info->map.virt) {
- printk(KERN_ERR "IXP4XXFlash: Failed to ioremap region\n");
- err = -EIO;
+ info->map.virt = devm_ioremap_resource(&dev->dev, dev->resource);
+ if (IS_ERR(info->map.virt)) {
+ err = PTR_ERR(info->map.virt);
goto Error;
}
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index d7ac65d1d569..93c507a6f862 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -123,24 +123,28 @@ ltq_mtd_probe(struct platform_device *pdev)
return -ENODEV;
}
- ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL);
+ ltq_mtd = devm_kzalloc(&pdev->dev, sizeof(struct ltq_mtd), GFP_KERNEL);
+ if (!ltq_mtd)
+ return -ENOMEM;
+
platform_set_drvdata(pdev, ltq_mtd);
ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!ltq_mtd->res) {
dev_err(&pdev->dev, "failed to get memory resource\n");
- err = -ENOENT;
- goto err_out;
+ return -ENOENT;
}
- ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
+ ltq_mtd->map = devm_kzalloc(&pdev->dev, sizeof(struct map_info),
+ GFP_KERNEL);
+ if (!ltq_mtd->map)
+ return -ENOMEM;
+
ltq_mtd->map->phys = ltq_mtd->res->start;
ltq_mtd->map->size = resource_size(ltq_mtd->res);
ltq_mtd->map->virt = devm_ioremap_resource(&pdev->dev, ltq_mtd->res);
- if (IS_ERR(ltq_mtd->map->virt)) {
- err = PTR_ERR(ltq_mtd->map->virt);
- goto err_out;
- }
+ if (IS_ERR(ltq_mtd->map->virt))
+ return PTR_ERR(ltq_mtd->map->virt);
ltq_mtd->map->name = ltq_map_name;
ltq_mtd->map->bankwidth = 2;
@@ -155,8 +159,7 @@ ltq_mtd_probe(struct platform_device *pdev)
if (!ltq_mtd->mtd) {
dev_err(&pdev->dev, "probing failed\n");
- err = -ENXIO;
- goto err_free;
+ return -ENXIO;
}
ltq_mtd->mtd->owner = THIS_MODULE;
@@ -177,10 +180,6 @@ ltq_mtd_probe(struct platform_device *pdev)
err_destroy:
map_destroy(ltq_mtd->mtd);
-err_free:
- kfree(ltq_mtd->map);
-err_out:
- kfree(ltq_mtd);
return err;
}
@@ -189,13 +188,9 @@ ltq_mtd_remove(struct platform_device *pdev)
{
struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
- if (ltq_mtd) {
- if (ltq_mtd->mtd) {
- mtd_device_unregister(ltq_mtd->mtd);
- map_destroy(ltq_mtd->mtd);
- }
- kfree(ltq_mtd->map);
- kfree(ltq_mtd);
+ if (ltq_mtd && ltq_mtd->mtd) {
+ mtd_device_unregister(ltq_mtd->mtd);
+ map_destroy(ltq_mtd->mtd);
}
return 0;
}
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index d210d131fef2..9aad854fe912 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -61,7 +61,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- info->map.name = (char *) flash->name;
+ info->map.name = flash->name;
info->map.bankwidth = flash->width;
info->map.phys = res->start;
info->map.size = resource_size(res);
@@ -73,7 +73,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
return -ENOMEM;
}
info->map.cached =
- ioremap_cached(info->map.phys, info->map.size);
+ ioremap_cache(info->map.phys, info->map.size);
if (!info->map.cached)
printk(KERN_WARNING "Failed to ioremap cached %s\n",
info->map.name);
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index d467f3b11c96..39cc4181f025 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -75,7 +75,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
up->name = of_get_property(dp, "model", NULL);
if (up->name && 0 < strlen(up->name))
- up->map.name = (char *)up->name;
+ up->map.name = up->name;
up->map.phys = op->resource[0].start;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 92311a56939f..34c0b16aed5c 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -313,15 +313,7 @@ static struct attribute *mtd_attrs[] = {
&dev_attr_bitflip_threshold.attr,
NULL,
};
-
-static struct attribute_group mtd_group = {
- .attrs = mtd_attrs,
-};
-
-static const struct attribute_group *mtd_groups[] = {
- &mtd_group,
- NULL,
-};
+ATTRIBUTE_GROUPS(mtd);
static struct device_type mtd_devtype = {
.name = "mtd",
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 6e732c3820c1..3c7d6d7623c1 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -534,7 +534,7 @@ out_register:
return slave;
}
-int mtd_add_partition(struct mtd_info *master, char *name,
+int mtd_add_partition(struct mtd_info *master, const char *name,
long long offset, long long length)
{
struct mtd_partition part;
@@ -672,22 +672,19 @@ static struct mtd_part_parser *get_partition_parser(const char *name)
#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
-int register_mtd_parser(struct mtd_part_parser *p)
+void register_mtd_parser(struct mtd_part_parser *p)
{
spin_lock(&part_parser_lock);
list_add(&p->list, &part_parsers);
spin_unlock(&part_parser_lock);
-
- return 0;
}
EXPORT_SYMBOL_GPL(register_mtd_parser);
-int deregister_mtd_parser(struct mtd_part_parser *p)
+void deregister_mtd_parser(struct mtd_part_parser *p)
{
spin_lock(&part_parser_lock);
list_del(&p->list);
spin_unlock(&part_parser_lock);
- return 0;
}
EXPORT_SYMBOL_GPL(deregister_mtd_parser);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 93ae6a6d94f7..90ff447bf043 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -95,7 +95,7 @@ config MTD_NAND_OMAP2
platforms.
config MTD_NAND_OMAP_BCH
- depends on MTD_NAND && MTD_NAND_OMAP2 && ARCH_OMAP3
+ depends on MTD_NAND_OMAP2
tristate "Support hardware based BCH error correction"
default n
select BCH
@@ -326,11 +326,11 @@ config MTD_NAND_ATMEL
on Atmel AT91 and AVR32 processors.
config MTD_NAND_PXA3xx
- tristate "Support for NAND flash devices on PXA3xx"
+ tristate "NAND support on PXA3xx and Armada 370/XP"
depends on PXA3xx || ARCH_MMP || PLAT_ORION
help
This enables the driver for the NAND flash device found on
- PXA3xx processors
+ PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).
config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC Controller"
@@ -458,17 +458,17 @@ config MTD_NAND_MXC
config MTD_NAND_SH_FLCTL
tristate "Support for NAND on Renesas SuperH FLCTL"
- depends on SUPERH || ARCH_SHMOBILE
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
help
Several Renesas SuperH CPU has FLCTL. This option enables support
for NAND Flash using FLCTL.
config MTD_NAND_DAVINCI
- tristate "Support NAND on DaVinci SoC"
- depends on ARCH_DAVINCI
+ tristate "Support NAND on DaVinci/Keystone SoC"
+ depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF)
help
Enable the driver for NAND flash chips on Texas Instruments
- DaVinci processors.
+ DaVinci/Keystone processors.
config MTD_NAND_TXX9NDFMC
tristate "NAND Flash support for TXx9 SoC"
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 59f08c44abdb..c36e9b84487c 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -1961,10 +1961,8 @@ static int atmel_nand_probe(struct platform_device *pdev)
/* Allocate memory for the device structure (and zero it) */
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
- if (!host) {
- printk(KERN_ERR "atmel_nand: failed to allocate device structure.\n");
+ if (!host)
return -ENOMEM;
- }
res = platform_driver_register(&atmel_nand_nfc_driver);
if (res)
@@ -2062,14 +2060,14 @@ static int atmel_nand_probe(struct platform_device *pdev)
}
if (gpio_get_value(host->board.det_pin)) {
- printk(KERN_INFO "No SmartMedia card inserted.\n");
+ dev_info(&pdev->dev, "No SmartMedia card inserted.\n");
res = -ENXIO;
goto err_no_card;
}
}
if (host->board.on_flash_bbt || on_flash_bbt) {
- printk(KERN_INFO "atmel_nand: Use On Flash BBT\n");
+ dev_info(&pdev->dev, "Use On Flash BBT\n");
nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
}
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index ae8dd7c41039..2880d888cfc5 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -418,10 +418,8 @@ static int au1550nd_probe(struct platform_device *pdev)
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- dev_err(&pdev->dev, "no memory for NAND context\n");
+ if (!ctx)
return -ENOMEM;
- }
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@@ -480,6 +478,8 @@ static int au1550nd_probe(struct platform_device *pdev)
mtd_device_register(&ctx->info, pd->parts, pd->num_parts);
+ platform_set_drvdata(pdev, ctx);
+
return 0;
out3:
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 2c42e125720f..94f55dbde995 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -745,7 +745,6 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL) {
- dev_err(&pdev->dev, "no memory for flash info\n");
err = -ENOMEM;
goto out_err_kzalloc;
}
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index c34985a55101..f2f64addb5e8 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -640,10 +640,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,
pci_set_master(pdev);
mtd = kzalloc(sizeof(*mtd) + sizeof(struct cafe_priv), GFP_KERNEL);
- if (!mtd) {
- dev_warn(&pdev->dev, "failed to alloc mtd_info\n");
+ if (!mtd)
return -ENOMEM;
- }
cafe = (void *)(&mtd[1]);
mtd->dev.parent = &pdev->dev;
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 39b2ef848811..66ec95e6ca6c 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -164,7 +164,6 @@ static int __init cmx270_init(void)
sizeof(struct nand_chip),
GFP_KERNEL);
if (!cmx270_nand_mtd) {
- pr_debug("Unable to allocate CM-X270 NAND MTD device structure.\n");
ret = -ENOMEM;
goto err_kzalloc;
}
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index d469a9a1dea0..88109d375ae7 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -199,7 +199,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
/* Allocate memory for MTD device structure and private data */
new_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
if (!new_mtd) {
- printk(KERN_WARNING "Unable to allocate CS553X NAND MTD device structure.\n");
err = -ENOMEM;
goto out;
}
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index b77a01efb483..a4989ec6292e 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/of_device.h>
#include <linux/of.h>
+#include <linux/of_mtd.h>
#include <linux/platform_data/mtd-davinci.h>
#include <linux/platform_data/mtd-davinci-aemif.h>
@@ -487,7 +488,7 @@ static int nand_davinci_dev_ready(struct mtd_info *mtd)
* ten ECC bytes plus the manufacturer's bad block marker byte, and
* and not overlapping the default BBT markers.
*/
-static struct nand_ecclayout hwecc4_small __initconst = {
+static struct nand_ecclayout hwecc4_small = {
.eccbytes = 10,
.eccpos = { 0, 1, 2, 3, 4,
/* offset 5 holds the badblock marker */
@@ -503,7 +504,7 @@ static struct nand_ecclayout hwecc4_small __initconst = {
* storing ten ECC bytes plus the manufacturer's bad block marker byte,
* and not overlapping the default BBT markers.
*/
-static struct nand_ecclayout hwecc4_2048 __initconst = {
+static struct nand_ecclayout hwecc4_2048 = {
.eccbytes = 40,
.eccpos = {
/* at the end of spare sector */
@@ -534,17 +535,19 @@ static struct davinci_nand_pdata
struct davinci_nand_pdata *pdata;
const char *mode;
u32 prop;
- int len;
pdata = devm_kzalloc(&pdev->dev,
sizeof(struct davinci_nand_pdata),
GFP_KERNEL);
pdev->dev.platform_data = pdata;
if (!pdata)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (!of_property_read_u32(pdev->dev.of_node,
"ti,davinci-chipselect", &prop))
pdev->id = prop;
+ else
+ return ERR_PTR(-EINVAL);
+
if (!of_property_read_u32(pdev->dev.of_node,
"ti,davinci-mask-ale", &prop))
pdata->mask_ale = prop;
@@ -555,6 +558,8 @@ static struct davinci_nand_pdata
"ti,davinci-mask-chipsel", &prop))
pdata->mask_chipsel = prop;
if (!of_property_read_string(pdev->dev.of_node,
+ "nand-ecc-mode", &mode) ||
+ !of_property_read_string(pdev->dev.of_node,
"ti,davinci-ecc-mode", &mode)) {
if (!strncmp("none", mode, 4))
pdata->ecc_mode = NAND_ECC_NONE;
@@ -566,12 +571,16 @@ static struct davinci_nand_pdata
if (!of_property_read_u32(pdev->dev.of_node,
"ti,davinci-ecc-bits", &prop))
pdata->ecc_bits = prop;
- if (!of_property_read_u32(pdev->dev.of_node,
+
+ prop = of_get_nand_bus_width(pdev->dev.of_node);
+ if (0 < prop || !of_property_read_u32(pdev->dev.of_node,
"ti,davinci-nand-buswidth", &prop))
if (prop == 16)
pdata->options |= NAND_BUSWIDTH_16;
- if (of_find_property(pdev->dev.of_node,
- "ti,davinci-nand-use-bbt", &len))
+ if (of_property_read_bool(pdev->dev.of_node,
+ "nand-on-flash-bbt") ||
+ of_property_read_bool(pdev->dev.of_node,
+ "ti,davinci-nand-use-bbt"))
pdata->bbt_options = NAND_BBT_USE_FLASH;
}
@@ -585,7 +594,7 @@ static struct davinci_nand_pdata
}
#endif
-static int __init nand_davinci_probe(struct platform_device *pdev)
+static int nand_davinci_probe(struct platform_device *pdev)
{
struct davinci_nand_pdata *pdata;
struct davinci_nand_info *info;
@@ -598,6 +607,9 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
nand_ecc_modes_t ecc_mode;
pdata = nand_davinci_get_pdata(pdev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
/* insist on board-specific configuration */
if (!pdata)
return -ENODEV;
@@ -607,11 +619,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
return -ENODEV;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
- if (!info) {
- dev_err(&pdev->dev, "unable to allocate memory\n");
- ret = -ENOMEM;
- goto err_nomem;
- }
+ if (!info)
+ return -ENOMEM;
platform_set_drvdata(pdev, info);
@@ -619,19 +628,23 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res1 || !res2) {
dev_err(&pdev->dev, "resource missing\n");
- ret = -EINVAL;
- goto err_nomem;
+ return -EINVAL;
}
vaddr = devm_ioremap_resource(&pdev->dev, res1);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- goto err_ioremap;
- }
- base = devm_ioremap_resource(&pdev->dev, res2);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- goto err_ioremap;
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ /*
+ * This registers range is used to setup NAND settings. In case with
+ * TI AEMIF driver, the same memory address range is requested already
+ * by AEMIF, so we cannot request it twice, just ioremap.
+ * The AEMIF and NAND drivers not use the same registers in this range.
+ */
+ base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
+ if (!base) {
+ dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
+ return -EADDRNOTAVAIL;
}
info->dev = &pdev->dev;
@@ -699,7 +712,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
spin_unlock_irq(&davinci_nand_lock);
if (ret == -EBUSY)
- goto err_ecc;
+ return ret;
info->chip.ecc.calculate = nand_davinci_calculate_4bit;
info->chip.ecc.correct = nand_davinci_correct_4bit;
@@ -715,8 +728,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->chip.ecc.strength = pdata->ecc_bits;
break;
default:
- ret = -EINVAL;
- goto err_ecc;
+ return -EINVAL;
}
info->chip.ecc.mode = ecc_mode;
@@ -724,7 +736,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
if (IS_ERR(info->clk)) {
ret = PTR_ERR(info->clk);
dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
- goto err_clk;
+ return ret;
}
ret = clk_prepare_enable(info->clk);
@@ -753,7 +765,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->core_chipsel);
if (ret < 0) {
dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
- goto err_timing;
+ goto err;
}
spin_lock_irq(&davinci_nand_lock);
@@ -769,7 +781,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
- goto err_scan;
+ goto err;
}
/* Update ECC layout if needed ... for 1-bit HW ECC, the default
@@ -783,7 +795,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
if (!chunks || info->mtd.oobsize < 16) {
dev_dbg(&pdev->dev, "too small\n");
ret = -EINVAL;
- goto err_scan;
+ goto err;
}
/* For small page chips, preserve the manufacturer's
@@ -814,7 +826,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "no 4-bit ECC support yet "
"for 4KiB-page NAND\n");
ret = -EIO;
- goto err_scan;
+ goto err;
syndrome_done:
info->chip.ecc.layout = &info->ecclayout;
@@ -822,7 +834,7 @@ syndrome_done:
ret = nand_scan_tail(&info->mtd);
if (ret < 0)
- goto err_scan;
+ goto err;
if (pdata->parts)
ret = mtd_device_parse_register(&info->mtd, NULL, NULL,
@@ -835,7 +847,7 @@ syndrome_done:
NULL, 0);
}
if (ret < 0)
- goto err_scan;
+ goto err;
val = davinci_nand_readl(info, NRCSR_OFFSET);
dev_info(&pdev->dev, "controller rev. %d.%d\n",
@@ -843,8 +855,7 @@ syndrome_done:
return 0;
-err_scan:
-err_timing:
+err:
clk_disable_unprepare(info->clk);
err_clk_enable:
@@ -852,15 +863,10 @@ err_clk_enable:
if (ecc_mode == NAND_ECC_HW_SYNDROME)
ecc4_busy = false;
spin_unlock_irq(&davinci_nand_lock);
-
-err_ecc:
-err_clk:
-err_ioremap:
-err_nomem:
return ret;
}
-static int __exit nand_davinci_remove(struct platform_device *pdev)
+static int nand_davinci_remove(struct platform_device *pdev)
{
struct davinci_nand_info *info = platform_get_drvdata(pdev);
@@ -877,7 +883,8 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
}
static struct platform_driver nand_davinci_driver = {
- .remove = __exit_p(nand_davinci_remove),
+ .probe = nand_davinci_probe,
+ .remove = nand_davinci_remove,
.driver = {
.name = "davinci_nand",
.owner = THIS_MODULE,
@@ -886,7 +893,7 @@ static struct platform_driver nand_davinci_driver = {
};
MODULE_ALIAS("platform:davinci_nand");
-module_platform_driver_probe(nand_davinci_driver, nand_davinci_probe);
+module_platform_driver(nand_davinci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 370b9dd7a278..c07cd573ad3a 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -125,7 +125,6 @@ static void reset_buf(struct denali_nand_info *denali)
static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
{
- BUG_ON(denali->buf.tail >= sizeof(denali->buf.buf));
denali->buf.buf[denali->buf.tail++] = byte;
}
@@ -897,7 +896,7 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
/* this function examines buffers to see if they contain data that
* indicate that the buffer is part of an erased region of flash.
*/
-bool is_erased(uint8_t *buf, int len)
+static bool is_erased(uint8_t *buf, int len)
{
int i = 0;
for (i = 0; i < len; i++)
@@ -1429,20 +1428,12 @@ int denali_init(struct denali_nand_info *denali)
}
}
- /* Is 32-bit DMA supported? */
- ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
- if (ret) {
- pr_err("Spectra: no usable DMA configuration\n");
- return ret;
- }
- denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
- DENALI_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+ /* allocate a temporary buffer for nand_scan_ident() */
+ denali->buf.buf = devm_kzalloc(denali->dev, PAGE_SIZE,
+ GFP_DMA | GFP_KERNEL);
+ if (!denali->buf.buf)
+ return -ENOMEM;
- if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
- dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
- return -EIO;
- }
denali->mtd.dev.parent = denali->dev;
denali_hw_init(denali);
denali_drv_init(denali);
@@ -1475,12 +1466,29 @@ int denali_init(struct denali_nand_info *denali)
goto failed_req_irq;
}
- /* MTD supported page sizes vary by kernel. We validate our
- * kernel supports the device here.
- */
- if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
- ret = -ENODEV;
- pr_err("Spectra: device size not supported by this version of MTD.");
+ /* allocate the right size buffer now */
+ devm_kfree(denali->dev, denali->buf.buf);
+ denali->buf.buf = devm_kzalloc(denali->dev,
+ denali->mtd.writesize + denali->mtd.oobsize,
+ GFP_KERNEL);
+ if (!denali->buf.buf) {
+ ret = -ENOMEM;
+ goto failed_req_irq;
+ }
+
+ /* Is 32-bit DMA supported? */
+ ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ pr_err("Spectra: no usable DMA configuration\n");
+ goto failed_req_irq;
+ }
+
+ denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
+ denali->mtd.writesize + denali->mtd.oobsize,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
+ dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
+ ret = -EIO;
goto failed_req_irq;
}
@@ -1602,7 +1610,8 @@ EXPORT_SYMBOL(denali_init);
void denali_remove(struct denali_nand_info *denali)
{
denali_irq_cleanup(denali->irq, denali);
- dma_unmap_single(denali->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ dma_unmap_single(denali->dev, denali->buf.dma_buf,
+ denali->mtd.writesize + denali->mtd.oobsize,
DMA_BIDIRECTIONAL);
}
EXPORT_SYMBOL(denali_remove);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index cec5712862c9..966817462421 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -455,12 +455,10 @@
#define ECC_SECTOR_SIZE 512
-#define DENALI_BUF_SIZE (NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE)
-
struct nand_buf {
int head;
int tail;
- uint8_t buf[DENALI_BUF_SIZE];
+ uint8_t *buf;
dma_addr_t dma_buf;
};
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
index 92530244e2cb..babb02c4b220 100644
--- a/drivers/mtd/nand/denali_dt.c
+++ b/drivers/mtd/nand/denali_dt.c
@@ -108,7 +108,7 @@ static int denali_dt_probe(struct platform_device *ofdev)
denali->dev->dma_mask = NULL;
}
- dt->clk = clk_get(&ofdev->dev, NULL);
+ dt->clk = devm_clk_get(&ofdev->dev, NULL);
if (IS_ERR(dt->clk)) {
dev_err(&ofdev->dev, "no clk available\n");
return PTR_ERR(dt->clk);
@@ -124,7 +124,6 @@ static int denali_dt_probe(struct platform_device *ofdev)
out_disable_clk:
clk_disable_unprepare(dt->clk);
- clk_put(dt->clk);
return ret;
}
@@ -135,7 +134,6 @@ static int denali_dt_remove(struct platform_device *ofdev)
denali_remove(&dt->denali);
clk_disable(dt->clk);
- clk_put(dt->clk);
return 0;
}
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index 033f177a6369..6e2f387b823f 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -21,7 +21,7 @@
#define DENALI_NAND_NAME "denali-nand-pci"
/* List of platforms this NAND controller has be integrated into */
-static DEFINE_PCI_DEVICE_TABLE(denali_pci_ids) = {
+static const struct pci_device_id denali_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
{ PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
{ /* end: all zeroes */ }
@@ -131,7 +131,6 @@ static struct pci_driver denali_pci_driver = {
static int denali_init_pci(void)
{
- pr_info("Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__);
return pci_register_driver(&denali_pci_driver);
}
module_init(denali_init_pci);
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index b68a4959f700..fec31d71b84e 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -1058,7 +1058,6 @@ static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partitio
buf = kmalloc(mtd->writesize, GFP_KERNEL);
if (!buf) {
- printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
return 0;
}
if (!(numheaders = find_media_headers(mtd, buf, "ANAND", 1)))
@@ -1166,7 +1165,6 @@ static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partiti
buf = kmalloc(mtd->writesize, GFP_KERNEL);
if (!buf) {
- printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
return 0;
}
@@ -1440,10 +1438,13 @@ static int __init doc_probe(unsigned long physadr)
int reg, len, numchips;
int ret = 0;
+ if (!request_mem_region(physadr, DOC_IOREMAP_LEN, NULL))
+ return -EBUSY;
virtadr = ioremap(physadr, DOC_IOREMAP_LEN);
if (!virtadr) {
printk(KERN_ERR "Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n", DOC_IOREMAP_LEN, physadr);
- return -EIO;
+ ret = -EIO;
+ goto error_ioremap;
}
/* It's not possible to cleanly detect the DiskOnChip - the
@@ -1561,7 +1562,6 @@ static int __init doc_probe(unsigned long physadr)
sizeof(struct nand_chip) + sizeof(struct doc_priv) + (2 * sizeof(struct nand_bbt_descr));
mtd = kzalloc(len, GFP_KERNEL);
if (!mtd) {
- printk(KERN_ERR "DiskOnChip kmalloc (%d bytes) failed!\n", len);
ret = -ENOMEM;
goto fail;
}
@@ -1629,6 +1629,10 @@ static int __init doc_probe(unsigned long physadr)
WriteDOC(save_control, virtadr, DOCControl);
fail:
iounmap(virtadr);
+
+error_ioremap:
+ release_mem_region(physadr, DOC_IOREMAP_LEN);
+
return ret;
}
@@ -1645,6 +1649,7 @@ static void release_nanddoc(void)
nextmtd = doc->nextdoc;
nand_release(mtd);
iounmap(doc->virtadr);
+ release_mem_region(doc->physadr, DOC_IOREMAP_LEN);
kfree(mtd);
}
}
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index c966fc7474ce..bcf60800c3ce 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -847,7 +847,6 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
if (!fsl_lbc_ctrl_dev->nand) {
elbc_fcm_ctrl = kzalloc(sizeof(*elbc_fcm_ctrl), GFP_KERNEL);
if (!elbc_fcm_ctrl) {
- dev_err(dev, "failed to allocate memory\n");
mutex_unlock(&fsl_elbc_nand_mutex);
ret = -ENOMEM;
goto err;
@@ -875,7 +874,7 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
goto err;
}
- priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
+ priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
if (!priv->mtd.name) {
ret = -ENOMEM;
goto err;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 43355779cff5..90ca7e75d6f0 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -1060,7 +1060,6 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
if (!fsl_ifc_ctrl_dev->nand) {
ifc_nand_ctrl = kzalloc(sizeof(*ifc_nand_ctrl), GFP_KERNEL);
if (!ifc_nand_ctrl) {
- dev_err(&dev->dev, "failed to allocate memory\n");
mutex_unlock(&fsl_ifc_nand_mutex);
return -ENOMEM;
}
@@ -1101,7 +1100,7 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
IFC_NAND_EVTER_INTR_FTOERIR_EN |
IFC_NAND_EVTER_INTR_WPERIR_EN,
&ifc->ifc_nand.nand_evter_intr_en);
- priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
+ priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
if (!priv->mtd.name) {
ret = -ENOMEM;
goto err;
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 8b2752263db9..1550692973dc 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -889,10 +889,8 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
pdata->nand_timings = devm_kzalloc(&pdev->dev,
sizeof(*pdata->nand_timings), GFP_KERNEL);
- if (!pdata->nand_timings) {
- dev_err(&pdev->dev, "no memory for nand_timing\n");
+ if (!pdata->nand_timings)
return -ENOMEM;
- }
of_property_read_u8_array(np, "timings", (u8 *)pdata->nand_timings,
sizeof(*pdata->nand_timings));
@@ -950,10 +948,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
/* Allocate memory for the device structure (and zero it) */
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
- if (!host) {
- dev_err(&pdev->dev, "failed to allocate device structure\n");
+ if (!host)
return -ENOMEM;
- }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
host->data_va = devm_ioremap_resource(&pdev->dev, res);
@@ -1108,8 +1104,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
host->ecc_place = &fsmc_ecc4_lp_place;
break;
default:
- printk(KERN_WARNING "No oob scheme defined for "
- "oobsize %d\n", mtd->oobsize);
+ dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
BUG();
}
} else {
@@ -1124,8 +1120,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
nand->ecc.layout = &fsmc_ecc1_128_layout;
break;
default:
- printk(KERN_WARNING "No oob scheme defined for "
- "oobsize %d\n", mtd->oobsize);
+ dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
BUG();
}
}
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index e826f898241f..8e6148aa4539 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -132,13 +132,17 @@ static int gpio_nand_get_config_of(const struct device *dev,
static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
{
- struct resource *r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
+ struct resource *r;
u64 addr;
- if (!r || of_property_read_u64(pdev->dev.of_node,
+ if (of_property_read_u64(pdev->dev.of_node,
"gpio-control-nand,io-sync-reg", &addr))
return NULL;
+ r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return NULL;
+
r->start = addr;
r->end = r->start + 0x3;
r->flags = IORESOURCE_MEM;
@@ -211,10 +215,8 @@ static int gpio_nand_probe(struct platform_device *pdev)
return -EINVAL;
gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL);
- if (!gpiomtd) {
- dev_err(&pdev->dev, "failed to create NAND MTD\n");
+ if (!gpiomtd)
return -ENOMEM;
- }
chip = &gpiomtd->nand_chip;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index aaced29727fb..dd1df605a1d6 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -20,6 +20,7 @@
*/
#include <linux/delay.h>
#include <linux/clk.h>
+#include <linux/slab.h>
#include "gpmi-nand.h"
#include "gpmi-regs.h"
@@ -207,30 +208,41 @@ void gpmi_dump_info(struct gpmi_nand_data *this)
u32 reg;
int i;
- pr_err("Show GPMI registers :\n");
+ dev_err(this->dev, "Show GPMI registers :\n");
for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
reg = readl(r->gpmi_regs + i * 0x10);
- pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+ dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
}
/* start to print out the BCH info */
- pr_err("Show BCH registers :\n");
+ dev_err(this->dev, "Show BCH registers :\n");
for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
reg = readl(r->bch_regs + i * 0x10);
- pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+ dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
}
- pr_err("BCH Geometry :\n");
- pr_err("GF length : %u\n", geo->gf_len);
- pr_err("ECC Strength : %u\n", geo->ecc_strength);
- pr_err("Page Size in Bytes : %u\n", geo->page_size);
- pr_err("Metadata Size in Bytes : %u\n", geo->metadata_size);
- pr_err("ECC Chunk Size in Bytes: %u\n", geo->ecc_chunk_size);
- pr_err("ECC Chunk Count : %u\n", geo->ecc_chunk_count);
- pr_err("Payload Size in Bytes : %u\n", geo->payload_size);
- pr_err("Auxiliary Size in Bytes: %u\n", geo->auxiliary_size);
- pr_err("Auxiliary Status Offset: %u\n", geo->auxiliary_status_offset);
- pr_err("Block Mark Byte Offset : %u\n", geo->block_mark_byte_offset);
- pr_err("Block Mark Bit Offset : %u\n", geo->block_mark_bit_offset);
+ dev_err(this->dev, "BCH Geometry :\n"
+ "GF length : %u\n"
+ "ECC Strength : %u\n"
+ "Page Size in Bytes : %u\n"
+ "Metadata Size in Bytes : %u\n"
+ "ECC Chunk Size in Bytes: %u\n"
+ "ECC Chunk Count : %u\n"
+ "Payload Size in Bytes : %u\n"
+ "Auxiliary Size in Bytes: %u\n"
+ "Auxiliary Status Offset: %u\n"
+ "Block Mark Byte Offset : %u\n"
+ "Block Mark Bit Offset : %u\n",
+ geo->gf_len,
+ geo->ecc_strength,
+ geo->page_size,
+ geo->metadata_size,
+ geo->ecc_chunk_size,
+ geo->ecc_chunk_count,
+ geo->payload_size,
+ geo->auxiliary_size,
+ geo->auxiliary_status_offset,
+ geo->block_mark_byte_offset,
+ geo->block_mark_bit_offset);
}
/* Configures the geometry for BCH. */
@@ -265,8 +277,8 @@ int bch_set_geometry(struct gpmi_nand_data *this)
* chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
* On the other hand, the MX28 needs the reset, because one case has been
* seen where the BCH produced ECC errors constantly after 10000
- * consecutive reboots. The latter case has not been seen on the MX23 yet,
- * still we don't know if it could happen there as well.
+ * consecutive reboots. The latter case has not been seen on the MX23
+ * yet, still we don't know if it could happen there as well.
*/
ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
if (ret)
@@ -353,7 +365,7 @@ static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
improved_timing_is_available =
(target.tREA_in_ns >= 0) &&
(target.tRLOH_in_ns >= 0) &&
- (target.tRHOH_in_ns >= 0) ;
+ (target.tRHOH_in_ns >= 0);
/* Inspect the clock. */
nfc->clock_frequency_in_hz = clk_get_rate(r->clock[0]);
@@ -911,10 +923,14 @@ static int enable_edo_mode(struct gpmi_nand_data *this, int mode)
struct resources *r = &this->resources;
struct nand_chip *nand = &this->nand;
struct mtd_info *mtd = &this->mtd;
- uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {};
+ uint8_t *feature;
unsigned long rate;
int ret;
+ feature = kzalloc(ONFI_SUBFEATURE_PARAM_LEN, GFP_KERNEL);
+ if (!feature)
+ return -ENOMEM;
+
nand->select_chip(mtd, 0);
/* [1] send SET FEATURE commond to NAND */
@@ -942,11 +958,13 @@ static int enable_edo_mode(struct gpmi_nand_data *this, int mode)
this->flags |= GPMI_ASYNC_EDO_ENABLED;
this->timing_mode = mode;
+ kfree(feature);
dev_info(this->dev, "enable the asynchronous EDO mode %d\n", mode);
return 0;
err_out:
nand->select_chip(mtd, -1);
+ kfree(feature);
dev_err(this->dev, "mode:%d ,failed in set feature.\n", mode);
return -EINVAL;
}
@@ -986,7 +1004,7 @@ void gpmi_begin(struct gpmi_nand_data *this)
/* Enable the clock. */
ret = gpmi_enable_clk(this);
if (ret) {
- pr_err("We failed in enable the clk\n");
+ dev_err(this->dev, "We failed in enable the clk\n");
goto err_out;
}
@@ -1003,7 +1021,7 @@ void gpmi_begin(struct gpmi_nand_data *this)
/* [1] Set HW_GPMI_TIMING0 */
reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) |
- BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles) ;
+ BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles);
writel(reg, gpmi_regs + HW_GPMI_TIMING0);
@@ -1090,7 +1108,7 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
reg = readl(r->gpmi_regs + HW_GPMI_STAT);
} else
- pr_err("unknow arch.\n");
+ dev_err(this->dev, "unknow arch.\n");
return reg & mask;
}
@@ -1121,10 +1139,8 @@ int gpmi_send_command(struct gpmi_nand_data *this)
desc = dmaengine_prep_slave_sg(channel,
(struct scatterlist *)pio,
ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
- if (!desc) {
- pr_err("step 1 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
/* [2] send out the COMMAND + ADDRESS string stored in @buffer */
sgl = &this->cmd_sgl;
@@ -1134,11 +1150,8 @@ int gpmi_send_command(struct gpmi_nand_data *this)
desc = dmaengine_prep_slave_sg(channel,
sgl, 1, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-
- if (!desc) {
- pr_err("step 2 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
/* [3] submit the DMA */
set_dma_type(this, DMA_FOR_COMMAND);
@@ -1167,20 +1180,17 @@ int gpmi_send_data(struct gpmi_nand_data *this)
pio[1] = 0;
desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
- if (!desc) {
- pr_err("step 1 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
/* [2] send DMA request */
prepare_data_dma(this, DMA_TO_DEVICE);
desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
1, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc) {
- pr_err("step 2 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
+
/* [3] submit the DMA */
set_dma_type(this, DMA_FOR_WRITE_DATA);
return start_dma_without_bch_irq(this, desc);
@@ -1204,20 +1214,16 @@ int gpmi_read_data(struct gpmi_nand_data *this)
desc = dmaengine_prep_slave_sg(channel,
(struct scatterlist *)pio,
ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
- if (!desc) {
- pr_err("step 1 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
/* [2] : send DMA request */
prepare_data_dma(this, DMA_FROM_DEVICE);
desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc) {
- pr_err("step 2 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
/* [3] : submit the DMA */
set_dma_type(this, DMA_FOR_READ_DATA);
@@ -1262,10 +1268,9 @@ int gpmi_send_page(struct gpmi_nand_data *this,
(struct scatterlist *)pio,
ARRAY_SIZE(pio), DMA_TRANS_NONE,
DMA_CTRL_ACK);
- if (!desc) {
- pr_err("step 2 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
+
set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
return start_dma_with_bch_irq(this, desc);
}
@@ -1297,10 +1302,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
desc = dmaengine_prep_slave_sg(channel,
(struct scatterlist *)pio, 2,
DMA_TRANS_NONE, 0);
- if (!desc) {
- pr_err("step 1 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
/* [2] Enable the BCH block and read. */
command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
@@ -1327,10 +1330,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
(struct scatterlist *)pio,
ARRAY_SIZE(pio), DMA_TRANS_NONE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc) {
- pr_err("step 2 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
/* [3] Disable the BCH block */
command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
@@ -1348,10 +1349,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
(struct scatterlist *)pio, 3,
DMA_TRANS_NONE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc) {
- pr_err("step 3 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
/* [4] submit the DMA */
set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index dabbc14db563..ca6369fe91ff 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -18,9 +18,6 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
@@ -352,6 +349,9 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
int common_nfc_set_geometry(struct gpmi_nand_data *this)
{
+ if (of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc")
+ && set_geometry_by_ecc_info(this))
+ return 0;
return legacy_set_geometry(this);
}
@@ -367,25 +367,28 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
struct scatterlist *sgl = &this->data_sgl;
int ret;
- this->direct_dma_map_ok = true;
-
/* first try to map the upper buffer directly */
- sg_init_one(sgl, this->upper_buf, this->upper_len);
- ret = dma_map_sg(this->dev, sgl, 1, dr);
- if (ret == 0) {
- /* We have to use our own DMA buffer. */
- sg_init_one(sgl, this->data_buffer_dma, PAGE_SIZE);
-
- if (dr == DMA_TO_DEVICE)
- memcpy(this->data_buffer_dma, this->upper_buf,
- this->upper_len);
-
+ if (virt_addr_valid(this->upper_buf) &&
+ !object_is_on_stack(this->upper_buf)) {
+ sg_init_one(sgl, this->upper_buf, this->upper_len);
ret = dma_map_sg(this->dev, sgl, 1, dr);
if (ret == 0)
- pr_err("DMA mapping failed.\n");
+ goto map_fail;
- this->direct_dma_map_ok = false;
+ this->direct_dma_map_ok = true;
+ return;
}
+
+map_fail:
+ /* We have to use our own DMA buffer. */
+ sg_init_one(sgl, this->data_buffer_dma, this->upper_len);
+
+ if (dr == DMA_TO_DEVICE)
+ memcpy(this->data_buffer_dma, this->upper_buf, this->upper_len);
+
+ dma_map_sg(this->dev, sgl, 1, dr);
+
+ this->direct_dma_map_ok = false;
}
/* This will be called after the DMA operation is finished. */
@@ -416,7 +419,7 @@ static void dma_irq_callback(void *param)
break;
default:
- pr_err("in wrong DMA operation.\n");
+ dev_err(this->dev, "in wrong DMA operation.\n");
}
complete(dma_c);
@@ -438,7 +441,8 @@ int start_dma_without_bch_irq(struct gpmi_nand_data *this,
/* Wait for the interrupt from the DMA block. */
err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
if (!err) {
- pr_err("DMA timeout, last DMA :%d\n", this->last_dma_type);
+ dev_err(this->dev, "DMA timeout, last DMA :%d\n",
+ this->last_dma_type);
gpmi_dump_info(this);
return -ETIMEDOUT;
}
@@ -467,7 +471,8 @@ int start_dma_with_bch_irq(struct gpmi_nand_data *this,
/* Wait for the interrupt from the BCH block. */
err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
if (!err) {
- pr_err("BCH timeout, last DMA :%d\n", this->last_dma_type);
+ dev_err(this->dev, "BCH timeout, last DMA :%d\n",
+ this->last_dma_type);
gpmi_dump_info(this);
return -ETIMEDOUT;
}
@@ -483,70 +488,38 @@ static int acquire_register_block(struct gpmi_nand_data *this,
void __iomem *p;
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
- if (!r) {
- pr_err("Can't get resource for %s\n", res_name);
- return -ENODEV;
- }
-
- p = ioremap(r->start, resource_size(r));
- if (!p) {
- pr_err("Can't remap %s\n", res_name);
- return -ENOMEM;
- }
+ p = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
res->gpmi_regs = p;
else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
res->bch_regs = p;
else
- pr_err("unknown resource name : %s\n", res_name);
+ dev_err(this->dev, "unknown resource name : %s\n", res_name);
return 0;
}
-static void release_register_block(struct gpmi_nand_data *this)
-{
- struct resources *res = &this->resources;
- if (res->gpmi_regs)
- iounmap(res->gpmi_regs);
- if (res->bch_regs)
- iounmap(res->bch_regs);
- res->gpmi_regs = NULL;
- res->bch_regs = NULL;
-}
-
static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
{
struct platform_device *pdev = this->pdev;
- struct resources *res = &this->resources;
const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
struct resource *r;
int err;
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
if (!r) {
- pr_err("Can't get resource for %s\n", res_name);
+ dev_err(this->dev, "Can't get resource for %s\n", res_name);
return -ENODEV;
}
- err = request_irq(r->start, irq_h, 0, res_name, this);
- if (err) {
- pr_err("Can't own %s\n", res_name);
- return err;
- }
-
- res->bch_low_interrupt = r->start;
- res->bch_high_interrupt = r->end;
- return 0;
-}
-
-static void release_bch_irq(struct gpmi_nand_data *this)
-{
- struct resources *res = &this->resources;
- int i = res->bch_low_interrupt;
+ err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
+ if (err)
+ dev_err(this->dev, "error requesting BCH IRQ\n");
- for (; i <= res->bch_high_interrupt; i++)
- free_irq(i, this);
+ return err;
}
static void release_dma_channels(struct gpmi_nand_data *this)
@@ -567,7 +540,7 @@ static int acquire_dma_channels(struct gpmi_nand_data *this)
/* request dma channel */
dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
if (!dma_chan) {
- pr_err("Failed to request DMA channel.\n");
+ dev_err(this->dev, "Failed to request DMA channel.\n");
goto acquire_err;
}
@@ -579,21 +552,6 @@ acquire_err:
return -EINVAL;
}
-static void gpmi_put_clks(struct gpmi_nand_data *this)
-{
- struct resources *r = &this->resources;
- struct clk *clk;
- int i;
-
- for (i = 0; i < GPMI_CLK_MAX; i++) {
- clk = r->clock[i];
- if (clk) {
- clk_put(clk);
- r->clock[i] = NULL;
- }
- }
-}
-
static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = {
"gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
};
@@ -606,7 +564,7 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
int err, i;
/* The main clock is stored in the first. */
- r->clock[0] = clk_get(this->dev, "gpmi_io");
+ r->clock[0] = devm_clk_get(this->dev, "gpmi_io");
if (IS_ERR(r->clock[0])) {
err = PTR_ERR(r->clock[0]);
goto err_clock;
@@ -622,7 +580,7 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
if (extra_clks[i - 1] == NULL)
break;
- clk = clk_get(this->dev, extra_clks[i - 1]);
+ clk = devm_clk_get(this->dev, extra_clks[i - 1]);
if (IS_ERR(clk)) {
err = PTR_ERR(clk);
goto err_clock;
@@ -644,7 +602,6 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
err_clock:
dev_dbg(this->dev, "failed in finding the clocks.\n");
- gpmi_put_clks(this);
return err;
}
@@ -666,7 +623,7 @@ static int acquire_resources(struct gpmi_nand_data *this)
ret = acquire_dma_channels(this);
if (ret)
- goto exit_dma_channels;
+ goto exit_regs;
ret = gpmi_get_clks(this);
if (ret)
@@ -675,18 +632,12 @@ static int acquire_resources(struct gpmi_nand_data *this)
exit_clock:
release_dma_channels(this);
-exit_dma_channels:
- release_bch_irq(this);
exit_regs:
- release_register_block(this);
return ret;
}
static void release_resources(struct gpmi_nand_data *this)
{
- gpmi_put_clks(this);
- release_register_block(this);
- release_bch_irq(this);
release_dma_channels(this);
}
@@ -732,8 +683,7 @@ static int read_page_prepare(struct gpmi_nand_data *this,
length, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dest_phys)) {
if (alt_size < length) {
- pr_err("%s, Alternate buffer is too small\n",
- __func__);
+ dev_err(dev, "Alternate buffer is too small\n");
return -ENOMEM;
}
goto map_failed;
@@ -783,8 +733,7 @@ static int send_page_prepare(struct gpmi_nand_data *this,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, source_phys)) {
if (alt_size < length) {
- pr_err("%s, Alternate buffer is too small\n",
- __func__);
+ dev_err(dev, "Alternate buffer is too small\n");
return -ENOMEM;
}
goto map_failed;
@@ -837,14 +786,23 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
{
struct bch_geometry *geo = &this->bch_geometry;
struct device *dev = this->dev;
+ struct mtd_info *mtd = &this->mtd;
/* [1] Allocate a command buffer. PAGE_SIZE is enough. */
this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
if (this->cmd_buffer == NULL)
goto error_alloc;
- /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */
- this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
+ /*
+ * [2] Allocate a read/write data buffer.
+ * The gpmi_alloc_dma_buffer can be called twice.
+ * We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
+ * is called before the nand_scan_ident; and we allocate a buffer
+ * of the real NAND page size when the gpmi_alloc_dma_buffer is
+ * called after the nand_scan_ident.
+ */
+ this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
+ GFP_DMA | GFP_KERNEL);
if (this->data_buffer_dma == NULL)
goto error_alloc;
@@ -872,7 +830,6 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
error_alloc:
gpmi_free_dma_buffer(this);
- pr_err("Error allocating DMA buffers!\n");
return -ENOMEM;
}
@@ -904,7 +861,8 @@ static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
ret = gpmi_send_command(this);
if (ret)
- pr_err("Chip: %u, Error %d\n", this->current_chip, ret);
+ dev_err(this->dev, "Chip: %u, Error %d\n",
+ this->current_chip, ret);
this->command_length = 0;
}
@@ -935,7 +893,7 @@ static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
struct nand_chip *chip = mtd->priv;
struct gpmi_nand_data *this = chip->priv;
- pr_debug("len is %d\n", len);
+ dev_dbg(this->dev, "len is %d\n", len);
this->upper_buf = buf;
this->upper_len = len;
@@ -947,7 +905,7 @@ static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
struct nand_chip *chip = mtd->priv;
struct gpmi_nand_data *this = chip->priv;
- pr_debug("len is %d\n", len);
+ dev_dbg(this->dev, "len is %d\n", len);
this->upper_buf = (uint8_t *)buf;
this->upper_len = len;
@@ -1026,13 +984,13 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
unsigned int max_bitflips = 0;
int ret;
- pr_debug("page number is : %d\n", page);
+ dev_dbg(this->dev, "page number is : %d\n", page);
ret = read_page_prepare(this, buf, mtd->writesize,
this->payload_virt, this->payload_phys,
nfc_geo->payload_size,
&payload_virt, &payload_phys);
if (ret) {
- pr_err("Inadequate DMA buffer\n");
+ dev_err(this->dev, "Inadequate DMA buffer\n");
ret = -ENOMEM;
return ret;
}
@@ -1046,7 +1004,7 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
nfc_geo->payload_size,
payload_virt, payload_phys);
if (ret) {
- pr_err("Error in ECC-based read: %d\n", ret);
+ dev_err(this->dev, "Error in ECC-based read: %d\n", ret);
return ret;
}
@@ -1102,7 +1060,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
dma_addr_t auxiliary_phys;
int ret;
- pr_debug("ecc write page.\n");
+ dev_dbg(this->dev, "ecc write page.\n");
if (this->swap_block_mark) {
/*
* If control arrives here, we're doing block mark swapping.
@@ -1132,7 +1090,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
nfc_geo->payload_size,
&payload_virt, &payload_phys);
if (ret) {
- pr_err("Inadequate payload DMA buffer\n");
+ dev_err(this->dev, "Inadequate payload DMA buffer\n");
return 0;
}
@@ -1142,7 +1100,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
nfc_geo->auxiliary_size,
&auxiliary_virt, &auxiliary_phys);
if (ret) {
- pr_err("Inadequate auxiliary DMA buffer\n");
+ dev_err(this->dev, "Inadequate auxiliary DMA buffer\n");
goto exit_auxiliary;
}
}
@@ -1150,7 +1108,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
/* Ask the NFC. */
ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
if (ret)
- pr_err("Error in ECC-based write: %d\n", ret);
+ dev_err(this->dev, "Error in ECC-based write: %d\n", ret);
if (!this->swap_block_mark) {
send_page_end(this, chip->oob_poi, mtd->oobsize,
@@ -1240,7 +1198,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
{
struct gpmi_nand_data *this = chip->priv;
- pr_debug("page number is %d\n", page);
+ dev_dbg(this->dev, "page number is %d\n", page);
/* clear the OOB buffer */
memset(chip->oob_poi, ~0, mtd->oobsize);
@@ -1453,7 +1411,6 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
/* Write the NCB fingerprint into the page buffer. */
memset(buffer, ~0, mtd->writesize);
- memset(chip->oob_poi, ~0, mtd->oobsize);
memcpy(buffer + 12, fingerprint, strlen(fingerprint));
/* Loop through the first search area, writing NCB fingerprints. */
@@ -1568,7 +1525,7 @@ static int gpmi_set_geometry(struct gpmi_nand_data *this)
/* Set up the NFC geometry which is used by BCH. */
ret = bch_set_geometry(this);
if (ret) {
- pr_err("Error setting BCH geometry : %d\n", ret);
+ dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
return ret;
}
@@ -1576,20 +1533,7 @@ static int gpmi_set_geometry(struct gpmi_nand_data *this)
return gpmi_alloc_dma_buffer(this);
}
-static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this)
-{
- /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
- if (GPMI_IS_MX23(this))
- this->swap_block_mark = false;
- else
- this->swap_block_mark = true;
-
- /* Set up the medium geometry */
- return gpmi_set_geometry(this);
-
-}
-
-static void gpmi_nfc_exit(struct gpmi_nand_data *this)
+static void gpmi_nand_exit(struct gpmi_nand_data *this)
{
nand_release(&this->mtd);
gpmi_free_dma_buffer(this);
@@ -1603,8 +1547,11 @@ static int gpmi_init_last(struct gpmi_nand_data *this)
struct bch_geometry *bch_geo = &this->bch_geometry;
int ret;
- /* Prepare for the BBT scan. */
- ret = gpmi_pre_bbt_scan(this);
+ /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
+ this->swap_block_mark = !GPMI_IS_MX23(this);
+
+ /* Set up the medium geometry */
+ ret = gpmi_set_geometry(this);
if (ret)
return ret;
@@ -1629,7 +1576,7 @@ static int gpmi_init_last(struct gpmi_nand_data *this)
return 0;
}
-static int gpmi_nfc_init(struct gpmi_nand_data *this)
+static int gpmi_nand_init(struct gpmi_nand_data *this)
{
struct mtd_info *mtd = &this->mtd;
struct nand_chip *chip = &this->nand;
@@ -1693,7 +1640,7 @@ static int gpmi_nfc_init(struct gpmi_nand_data *this)
return 0;
err_out:
- gpmi_nfc_exit(this);
+ gpmi_nand_exit(this);
return ret;
}
@@ -1728,15 +1675,13 @@ static int gpmi_nand_probe(struct platform_device *pdev)
if (of_id) {
pdev->id_entry = of_id->data;
} else {
- pr_err("Failed to find the right device id.\n");
+ dev_err(&pdev->dev, "Failed to find the right device id.\n");
return -ENODEV;
}
this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
- if (!this) {
- pr_err("Failed to allocate per-device memory\n");
+ if (!this)
return -ENOMEM;
- }
platform_set_drvdata(pdev, this);
this->pdev = pdev;
@@ -1750,7 +1695,7 @@ static int gpmi_nand_probe(struct platform_device *pdev)
if (ret)
goto exit_nfc_init;
- ret = gpmi_nfc_init(this);
+ ret = gpmi_nand_init(this);
if (ret)
goto exit_nfc_init;
@@ -1770,7 +1715,7 @@ static int gpmi_nand_remove(struct platform_device *pdev)
{
struct gpmi_nand_data *this = platform_get_drvdata(pdev);
- gpmi_nfc_exit(this);
+ gpmi_nand_exit(this);
release_resources(this);
return 0;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index a7685e3a8748..4c801fa18725 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -26,8 +26,6 @@
struct resources {
void __iomem *gpmi_regs;
void __iomem *bch_regs;
- unsigned int bch_low_interrupt;
- unsigned int bch_high_interrupt;
unsigned int dma_low_channel;
unsigned int dma_high_channel;
struct clk *clock[GPMI_CLK_MAX];
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index a264b888c66c..a2c804de156b 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -416,10 +416,8 @@ static int jz_nand_probe(struct platform_device *pdev)
uint8_t nand_maf_id = 0, nand_dev_id = 0;
nand = kzalloc(sizeof(*nand), GFP_KERNEL);
- if (!nand) {
- dev_err(&pdev->dev, "Failed to allocate device structure.\n");
+ if (!nand)
return -ENOMEM;
- }
ret = jz_nand_ioremap_resource(pdev, "mmio", &nand->mem, &nand->base);
if (ret)
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 327d96c03505..687478c9f09c 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -539,20 +539,6 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
return 0;
}
-static int lpc32xx_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t offset, int data_len, const uint8_t *buf,
- int oob_required, int page, int cached, int raw)
-{
- int res;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- res = lpc32xx_write_page_lowlevel(mtd, chip, buf, oob_required);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- lpc32xx_waitfunc(mtd, chip);
-
- return res;
-}
-
static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
@@ -627,10 +613,8 @@ static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
struct device_node *np = dev->of_node;
ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
- if (!ncfg) {
- dev_err(dev, "could not allocate memory for platform data\n");
+ if (!ncfg)
return NULL;
- }
of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
@@ -666,10 +650,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
/* Allocate memory for the device structure (and zero it) */
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
- if (!host) {
- dev_err(&pdev->dev, "failed to allocate device structure.\n");
+ if (!host)
return -ENOMEM;
- }
rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->io_base = devm_ioremap_resource(&pdev->dev, rc);
@@ -732,9 +714,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
nand_chip->ecc.write_oob = lpc32xx_write_oob;
nand_chip->ecc.read_oob = lpc32xx_read_oob;
nand_chip->ecc.strength = 4;
- nand_chip->write_page = lpc32xx_write_page;
nand_chip->waitfunc = lpc32xx_waitfunc;
+ nand_chip->options = NAND_NO_SUBPAGE_WRITE;
nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
nand_chip->bbt_td = &lpc32xx_nand_bbt;
nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
@@ -764,14 +746,12 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
if (!host->dma_buf) {
- dev_err(&pdev->dev, "Error allocating dma_buf memory\n");
res = -ENOMEM;
goto err_exit3;
}
host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
if (!host->dummy_buf) {
- dev_err(&pdev->dev, "Error allocating dummy_buf memory\n");
res = -ENOMEM;
goto err_exit3;
}
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index 23e6974ccd20..53a6742e3da3 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -725,10 +725,8 @@ static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
struct device_node *np = dev->of_node;
ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
- if (!ncfg) {
- dev_err(dev, "could not allocate memory for NAND config\n");
+ if (!ncfg)
return NULL;
- }
of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);
of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth);
@@ -772,10 +770,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
/* Allocate memory for the device structure (and zero it) */
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
- if (!host) {
- dev_err(&pdev->dev, "failed to allocate device structure\n");
+ if (!host)
return -ENOMEM;
- }
host->io_base_dma = rc->start;
host->io_base = devm_ioremap_resource(&pdev->dev, rc);
@@ -791,8 +787,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
if (host->ncfg->wp_gpio == -EPROBE_DEFER)
return -EPROBE_DEFER;
- if (gpio_is_valid(host->ncfg->wp_gpio) &&
- gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
+ if (gpio_is_valid(host->ncfg->wp_gpio) && devm_gpio_request(&pdev->dev,
+ host->ncfg->wp_gpio, "NAND WP")) {
dev_err(&pdev->dev, "GPIO not available\n");
return -EBUSY;
}
@@ -808,7 +804,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
/* Get NAND clock */
- host->clk = clk_get(&pdev->dev, NULL);
+ host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "Clock failure\n");
res = -ENOENT;
@@ -858,7 +854,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
GFP_KERNEL);
if (host->data_buf == NULL) {
- dev_err(&pdev->dev, "Error allocating memory\n");
res = -ENOMEM;
goto err_exit2;
}
@@ -927,10 +922,8 @@ err_exit3:
dma_release_channel(host->dma_chan);
err_exit2:
clk_disable(host->clk);
- clk_put(host->clk);
err_exit1:
lpc32xx_wp_enable(host);
- gpio_free(host->ncfg->wp_gpio);
return res;
}
@@ -953,9 +946,7 @@ static int lpc32xx_nand_remove(struct platform_device *pdev)
writel(tmp, SLC_CTRL(host->io_base));
clk_disable(host->clk);
- clk_put(host->clk);
lpc32xx_wp_enable(host);
- gpio_free(host->ncfg->wp_gpio);
return 0;
}
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 439bc3896418..31ee7cfbc12b 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -653,10 +653,8 @@ static int mpc5121_nfc_probe(struct platform_device *op)
}
prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL);
- if (!prv) {
- dev_err(dev, "Memory exhausted!\n");
+ if (!prv)
return -ENOMEM;
- }
mtd = &prv->mtd;
chip = &prv->chip;
@@ -731,7 +729,7 @@ static int mpc5121_nfc_probe(struct platform_device *op)
of_node_put(rootnode);
/* Enable NFC clock */
- clk = devm_clk_get(dev, "nfc_clk");
+ clk = devm_clk_get(dev, "ipg");
if (IS_ERR(clk)) {
dev_err(dev, "Unable to acquire NFC clock!\n");
retval = PTR_ERR(clk);
@@ -786,7 +784,6 @@ static int mpc5121_nfc_probe(struct platform_device *op)
/* Detect NAND chips */
if (nand_scan(mtd, be32_to_cpup(chips_no))) {
dev_err(dev, "NAND Flash not found !\n");
- devm_free_irq(dev, prv->irq, mtd);
retval = -ENXIO;
goto error;
}
@@ -811,7 +808,6 @@ static int mpc5121_nfc_probe(struct platform_device *op)
default:
dev_err(dev, "Unsupported NAND flash!\n");
- devm_free_irq(dev, prv->irq, mtd);
retval = -ENXIO;
goto error;
}
@@ -822,7 +818,6 @@ static int mpc5121_nfc_probe(struct platform_device *op)
retval = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
if (retval) {
dev_err(dev, "Error adding MTD device!\n");
- devm_free_irq(dev, prv->irq, mtd);
goto error;
}
@@ -836,11 +831,8 @@ static int mpc5121_nfc_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct mtd_info *mtd = dev_get_drvdata(dev);
- struct nand_chip *chip = mtd->priv;
- struct mpc5121_nfc_prv *prv = chip->priv;
nand_release(mtd);
- devm_free_irq(dev, prv->irq, mtd);
mpc5121_nfc_free(dev, mtd);
return 0;
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 9dfdb06c508b..e9a4835c4dd9 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -677,7 +677,6 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
ecc_stat >>= 4;
} while (--no_subpages);
- mtd->ecc_stats.corrected += ret;
pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
return ret;
@@ -1400,12 +1399,15 @@ static int mxcnd_probe(struct platform_device *pdev)
int err = 0;
/* Allocate memory for MTD device structure and private data */
- host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host) +
- NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE, GFP_KERNEL);
+ host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host),
+ GFP_KERNEL);
if (!host)
return -ENOMEM;
- host->data_buf = (uint8_t *)(host + 1);
+ /* allocate a temporary buffer for the nand_scan_ident() */
+ host->data_buf = devm_kzalloc(&pdev->dev, PAGE_SIZE, GFP_KERNEL);
+ if (!host->data_buf)
+ return -ENOMEM;
host->dev = &pdev->dev;
/* structures must be linked */
@@ -1512,7 +1514,9 @@ static int mxcnd_probe(struct platform_device *pdev)
if (err)
return err;
- clk_prepare_enable(host->clk);
+ err = clk_prepare_enable(host->clk);
+ if (err)
+ return err;
host->clk_act = 1;
/*
@@ -1531,6 +1535,15 @@ static int mxcnd_probe(struct platform_device *pdev)
goto escan;
}
+ /* allocate the right size buffer now */
+ devm_kfree(&pdev->dev, (void *)host->data_buf);
+ host->data_buf = devm_kzalloc(&pdev->dev, mtd->writesize + mtd->oobsize,
+ GFP_KERNEL);
+ if (!host->data_buf) {
+ err = -ENOMEM;
+ goto escan;
+ }
+
/* Call preset again, with correct writesize this time */
host->devtype_data->preset(mtd);
@@ -1576,6 +1589,8 @@ static int mxcnd_remove(struct platform_device *pdev)
struct mxc_nand_host *host = platform_get_drvdata(pdev);
nand_release(&host->mtd);
+ if (host->clk_act)
+ clk_disable_unprepare(host->clk);
return 0;
}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index bd39f7b67906..59eba5d2c685 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -29,6 +29,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -202,6 +204,51 @@ static void nand_select_chip(struct mtd_info *mtd, int chipnr)
}
/**
+ * nand_write_byte - [DEFAULT] write single byte to chip
+ * @mtd: MTD device structure
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0]
+ */
+static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ chip->write_buf(mtd, &byte, 1);
+}
+
+/**
+ * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
+ * @mtd: MTD device structure
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
+ */
+static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
+{
+ struct nand_chip *chip = mtd->priv;
+ uint16_t word = byte;
+
+ /*
+ * It's not entirely clear what should happen to I/O[15:8] when writing
+ * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
+ *
+ * When the host supports a 16-bit bus width, only data is
+ * transferred at the 16-bit width. All address and command line
+ * transfers shall use only the lower 8-bits of the data bus. During
+ * command transfers, the host may place any value on the upper
+ * 8-bits of the data bus. During address transfers, the host shall
+ * set the upper 8-bits of the data bus to 00h.
+ *
+ * One user of the write_byte callback is nand_onfi_set_features. The
+ * four parameters are specified to be written to I/O[7:0], but this is
+ * neither an address nor a command transfer. Let's assume a 0 on the
+ * upper I/O lines is OK.
+ */
+ chip->write_buf(mtd, (uint8_t *)&word, 2);
+}
+
+/**
* nand_write_buf - [DEFAULT] write buffer to chip
* @mtd: MTD device structure
* @buf: data buffer
@@ -1408,6 +1455,30 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
}
/**
+ * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
+ * @mtd: MTD device structure
+ * @retry_mode: the retry mode to use
+ *
+ * Some vendors supply a special command to shift the Vt threshold, to be used
+ * when there are too many bitflips in a page (i.e., ECC error). After setting
+ * a new threshold, the host should retry reading the page.
+ */
+static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ pr_debug("setting READ RETRY mode %d\n", retry_mode);
+
+ if (retry_mode >= chip->read_retries)
+ return -EINVAL;
+
+ if (!chip->setup_read_retry)
+ return -EOPNOTSUPP;
+
+ return chip->setup_read_retry(mtd, retry_mode);
+}
+
+/**
* nand_do_read_ops - [INTERN] Read data with ECC
* @mtd: MTD device structure
* @from: offset to read from
@@ -1420,7 +1491,6 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
{
int chipnr, page, realpage, col, bytes, aligned, oob_required;
struct nand_chip *chip = mtd->priv;
- struct mtd_ecc_stats stats;
int ret = 0;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
@@ -1429,8 +1499,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
uint8_t *bufpoi, *oob, *buf;
unsigned int max_bitflips = 0;
-
- stats = mtd->ecc_stats;
+ int retry_mode = 0;
+ bool ecc_fail = false;
chipnr = (int)(from >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
@@ -1445,6 +1515,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
oob_required = oob ? 1 : 0;
while (1) {
+ unsigned int ecc_failures = mtd->ecc_stats.failed;
+
bytes = min(mtd->writesize - col, readlen);
aligned = (bytes == mtd->writesize);
@@ -1452,6 +1524,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
if (realpage != chip->pagebuf || oob) {
bufpoi = aligned ? buf : chip->buffers->databuf;
+read_retry:
chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
/*
@@ -1481,7 +1554,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
/* Transfer not aligned data */
if (!aligned) {
if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
- !(mtd->ecc_stats.failed - stats.failed) &&
+ !(mtd->ecc_stats.failed - ecc_failures) &&
(ops->mode != MTD_OPS_RAW)) {
chip->pagebuf = realpage;
chip->pagebuf_bitflips = ret;
@@ -1492,8 +1565,6 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
memcpy(buf, chip->buffers->databuf + col, bytes);
}
- buf += bytes;
-
if (unlikely(oob)) {
int toread = min(oobreadlen, max_oobsize);
@@ -1511,6 +1582,25 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
else
nand_wait_ready(mtd);
}
+
+ if (mtd->ecc_stats.failed - ecc_failures) {
+ if (retry_mode + 1 <= chip->read_retries) {
+ retry_mode++;
+ ret = nand_setup_read_retry(mtd,
+ retry_mode);
+ if (ret < 0)
+ break;
+
+ /* Reset failures; retry */
+ mtd->ecc_stats.failed = ecc_failures;
+ goto read_retry;
+ } else {
+ /* No more retry modes; real failure */
+ ecc_fail = true;
+ }
+ }
+
+ buf += bytes;
} else {
memcpy(buf, chip->buffers->databuf + col, bytes);
buf += bytes;
@@ -1520,6 +1610,14 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
readlen -= bytes;
+ /* Reset to retry mode 0 */
+ if (retry_mode) {
+ ret = nand_setup_read_retry(mtd, 0);
+ if (ret < 0)
+ break;
+ retry_mode = 0;
+ }
+
if (!readlen)
break;
@@ -1545,7 +1643,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
if (ret < 0)
return ret;
- if (mtd->ecc_stats.failed - stats.failed)
+ if (ecc_fail)
return -EBADMSG;
return max_bitflips;
@@ -2716,6 +2814,7 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
int addr, uint8_t *subfeature_param)
{
int status;
+ int i;
if (!chip->onfi_version ||
!(le16_to_cpu(chip->onfi_params.opt_cmd)
@@ -2723,7 +2822,9 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
return -EINVAL;
chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
- chip->write_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ chip->write_byte(mtd, subfeature_param[i]);
+
status = chip->waitfunc(mtd, chip);
if (status & NAND_STATUS_FAIL)
return -EIO;
@@ -2740,6 +2841,8 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
int addr, uint8_t *subfeature_param)
{
+ int i;
+
if (!chip->onfi_version ||
!(le16_to_cpu(chip->onfi_params.opt_cmd)
& ONFI_OPT_CMD_SET_GET_FEATURES))
@@ -2749,7 +2852,8 @@ static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
memset(subfeature_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
- chip->read_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ *subfeature_param++ = chip->read_byte(mtd);
return 0;
}
@@ -2812,6 +2916,8 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
chip->block_markbad = nand_default_block_markbad;
if (!chip->write_buf || chip->write_buf == nand_write_buf)
chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
+ if (!chip->write_byte || chip->write_byte == nand_write_byte)
+ chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
if (!chip->read_buf || chip->read_buf == nand_read_buf)
chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
if (!chip->scan_bbt)
@@ -2926,6 +3032,30 @@ ext_out:
return ret;
}
+static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
+{
+ struct nand_chip *chip = mtd->priv;
+ uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
+
+ return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
+ feature);
+}
+
+/*
+ * Configure chip properties from Micron vendor-specific ONFI table
+ */
+static void nand_onfi_detect_micron(struct nand_chip *chip,
+ struct nand_onfi_params *p)
+{
+ struct nand_onfi_vendor_micron *micron = (void *)p->vendor;
+
+ if (le16_to_cpu(p->vendor_revision) < 1)
+ return;
+
+ chip->read_retries = micron->read_retry_options;
+ chip->setup_read_retry = nand_setup_read_retry_micron;
+}
+
/*
* Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
*/
@@ -2979,7 +3109,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
chip->onfi_version = 10;
if (!chip->onfi_version) {
- pr_info("%s: unsupported ONFI version: %d\n", __func__, val);
+ pr_info("unsupported ONFI version: %d\n", val);
return 0;
}
@@ -3032,6 +3162,9 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
pr_warn("Could not retrieve ONFI ECC requirements\n");
}
+ if (p->jedec_id == NAND_MFR_MICRON)
+ nand_onfi_detect_micron(chip, p);
+
return 1;
}
@@ -3152,9 +3285,12 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
mtd->oobsize = 512;
break;
case 6:
- default: /* Other cases are "reserved" (unknown) */
mtd->oobsize = 640;
break;
+ case 7:
+ default: /* Other cases are "reserved" (unknown) */
+ mtd->oobsize = 1024;
+ break;
}
extid >>= 2;
/* Calc blocksize */
@@ -3325,6 +3461,9 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
*busw = type->options & NAND_BUSWIDTH_16;
+ if (!mtd->name)
+ mtd->name = type->name;
+
return true;
}
return false;
@@ -3372,8 +3511,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
id_data[i] = chip->read_byte(mtd);
if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
- pr_info("%s: second ID read did not match "
- "%02x,%02x against %02x,%02x\n", __func__,
+ pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
*maf_id, *dev_id, id_data[0], id_data[1]);
return ERR_PTR(-ENODEV);
}
@@ -3440,10 +3578,10 @@ ident_done:
* Check, if buswidth is correct. Hardware drivers should set
* chip correct!
*/
- pr_info("NAND device: Manufacturer ID:"
- " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
- *dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
- pr_warn("NAND bus width %d instead %d bit\n",
+ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+ *maf_id, *dev_id);
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name);
+ pr_warn("bus width %d instead %d bit\n",
(chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
busw ? 16 : 8);
return ERR_PTR(-EINVAL);
@@ -3472,14 +3610,13 @@ ident_done:
if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
chip->cmdfunc = nand_command_lp;
- pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s)\n",
- *maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
+ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+ *maf_id, *dev_id);
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
chip->onfi_version ? chip->onfi_params.model : type->name);
-
- pr_info("NAND device: %dMiB, %s, page size: %d, OOB size: %d\n",
+ pr_info("%dMiB, %s, page size: %d, OOB size: %d\n",
(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
mtd->writesize, mtd->oobsize);
-
return type;
}
@@ -3535,7 +3672,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
chip->select_chip(mtd, -1);
}
if (i > 1)
- pr_info("%d NAND chips detected\n", i);
+ pr_info("%d chips detected\n", i);
/* Store the number of chips and calc total size for mtd */
chip->numchips = i;
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index a87b0a3afa35..daa2faacd7d0 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -169,6 +169,8 @@ struct nand_manufacturers nand_manuf_ids[] = {
{NAND_MFR_AMD, "AMD/Spansion"},
{NAND_MFR_MACRONIX, "Macronix"},
{NAND_MFR_EON, "Eon"},
+ {NAND_MFR_SANDISK, "SanDisk"},
+ {NAND_MFR_INTEL, "Intel"},
{0x0, "Unknown"}
};
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 52115151e4a7..9ee09a8177c6 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -241,12 +241,10 @@ static int nuc900_nand_probe(struct platform_device *pdev)
{
struct nuc900_nand *nuc900_nand;
struct nand_chip *chip;
- int retval;
struct resource *res;
- retval = 0;
-
- nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL);
+ nuc900_nand = devm_kzalloc(&pdev->dev, sizeof(struct nuc900_nand),
+ GFP_KERNEL);
if (!nuc900_nand)
return -ENOMEM;
chip = &(nuc900_nand->chip);
@@ -255,11 +253,9 @@ static int nuc900_nand_probe(struct platform_device *pdev)
nuc900_nand->mtd.owner = THIS_MODULE;
spin_lock_init(&nuc900_nand->lock);
- nuc900_nand->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(nuc900_nand->clk)) {
- retval = -ENOENT;
- goto fail1;
- }
+ nuc900_nand->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(nuc900_nand->clk))
+ return -ENOENT;
clk_enable(nuc900_nand->clk);
chip->cmdfunc = nuc900_nand_command_lp;
@@ -272,57 +268,29 @@ static int nuc900_nand_probe(struct platform_device *pdev)
chip->ecc.mode = NAND_ECC_SOFT;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- retval = -ENXIO;
- goto fail1;
- }
-
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- retval = -EBUSY;
- goto fail1;
- }
-
- nuc900_nand->reg = ioremap(res->start, resource_size(res));
- if (!nuc900_nand->reg) {
- retval = -ENOMEM;
- goto fail2;
- }
+ nuc900_nand->reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nuc900_nand->reg))
+ return PTR_ERR(nuc900_nand->reg);
nuc900_nand_enable(nuc900_nand);
- if (nand_scan(&(nuc900_nand->mtd), 1)) {
- retval = -ENXIO;
- goto fail3;
- }
+ if (nand_scan(&(nuc900_nand->mtd), 1))
+ return -ENXIO;
mtd_device_register(&(nuc900_nand->mtd), partitions,
ARRAY_SIZE(partitions));
platform_set_drvdata(pdev, nuc900_nand);
- return retval;
-
-fail3: iounmap(nuc900_nand->reg);
-fail2: release_mem_region(res->start, resource_size(res));
-fail1: kfree(nuc900_nand);
- return retval;
+ return 0;
}
static int nuc900_nand_remove(struct platform_device *pdev)
{
struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
- struct resource *res;
nand_release(&nuc900_nand->mtd);
- iounmap(nuc900_nand->reg);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
clk_disable(nuc900_nand->clk);
- clk_put(nuc900_nand->clk);
-
- kfree(nuc900_nand);
return 0;
}
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index f77725009907..ef4190a02b7b 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1730,13 +1730,7 @@ static int omap_nand_probe(struct platform_device *pdev)
break;
case NAND_OMAP_POLLED:
- if (nand_chip->options & NAND_BUSWIDTH_16) {
- nand_chip->read_buf = omap_read_buf16;
- nand_chip->write_buf = omap_write_buf16;
- } else {
- nand_chip->read_buf = omap_read_buf8;
- nand_chip->write_buf = omap_write_buf8;
- }
+ /* Use nand_base defaults for {read,write}_buf */
break;
case NAND_OMAP_PREFETCH_DMA:
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index a393a5b6ce1e..dd7fe817eafb 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -87,7 +87,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
if (!nc) {
- printk(KERN_ERR "orion_nand: failed to allocate device structure.\n");
ret = -ENOMEM;
goto no_res;
}
@@ -101,7 +100,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
io_base = ioremap(res->start, resource_size(res));
if (!io_base) {
- printk(KERN_ERR "orion_nand: ioremap failed\n");
+ dev_err(&pdev->dev, "ioremap failed\n");
ret = -EIO;
goto no_res;
}
@@ -110,7 +109,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
board = devm_kzalloc(&pdev->dev, sizeof(struct orion_nand_data),
GFP_KERNEL);
if (!board) {
- printk(KERN_ERR "orion_nand: failed to allocate board structure.\n");
ret = -ENOMEM;
goto no_res;
}
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 4d174366a0f0..90f871acb0ef 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -223,7 +223,7 @@ MODULE_DEVICE_TABLE(of, pasemi_nand_match);
static struct platform_driver pasemi_nand_driver =
{
.driver = {
- .name = (char*)driver_name,
+ .name = driver_name,
.owner = THIS_MODULE,
.of_match_table = pasemi_nand_match,
},
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index cad4cdc9df39..0b068a5c0bff 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -47,30 +48,16 @@ static int plat_nand_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
-
/* Allocate memory for the device structure (and zero it) */
- data = kzalloc(sizeof(struct plat_nand_data), GFP_KERNEL);
- if (!data) {
- dev_err(&pdev->dev, "failed to allocate device structure.\n");
+ data = devm_kzalloc(&pdev->dev, sizeof(struct plat_nand_data),
+ GFP_KERNEL);
+ if (!data)
return -ENOMEM;
- }
-
- if (!request_mem_region(res->start, resource_size(res),
- dev_name(&pdev->dev))) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
- err = -EBUSY;
- goto out_free;
- }
- data->io_base = ioremap(res->start, resource_size(res));
- if (data->io_base == NULL) {
- dev_err(&pdev->dev, "ioremap failed\n");
- err = -EIO;
- goto out_release_io;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->io_base))
+ return PTR_ERR(data->io_base);
data->chip.priv = &data;
data->mtd.priv = &data->chip;
@@ -122,11 +109,6 @@ static int plat_nand_probe(struct platform_device *pdev)
out:
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
- iounmap(data->io_base);
-out_release_io:
- release_mem_region(res->start, resource_size(res));
-out_free:
- kfree(data);
return err;
}
@@ -137,16 +119,10 @@ static int plat_nand_remove(struct platform_device *pdev)
{
struct plat_nand_data *data = platform_get_drvdata(pdev);
struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
nand_release(&data->mtd);
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
- iounmap(data->io_base);
- release_mem_region(res->start, resource_size(res));
- kfree(data);
return 0;
}
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 4b3aaa898a8b..2a7a0b27ac38 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -7,6 +7,8 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
+ *
+ * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
*/
#include <linux/kernel.h>
@@ -24,6 +26,7 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_mtd.h>
#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
#define ARCH_HAS_DMA
@@ -35,6 +38,7 @@
#include <linux/platform_data/mtd-nand-pxa3xx.h>
+#define NAND_DEV_READY_TIMEOUT 50
#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
#define NAND_STOP_DELAY (2 * HZ/50)
#define PAGE_CHUNK_SIZE (2048)
@@ -54,6 +58,7 @@
#define NDPCR (0x18) /* Page Count Register */
#define NDBDR0 (0x1C) /* Bad Block Register 0 */
#define NDBDR1 (0x20) /* Bad Block Register 1 */
+#define NDECCCTRL (0x28) /* ECC control */
#define NDDB (0x40) /* Data Buffer */
#define NDCB0 (0x48) /* Command Buffer0 */
#define NDCB1 (0x4C) /* Command Buffer1 */
@@ -80,6 +85,9 @@
#define NDCR_INT_MASK (0xFFF)
#define NDSR_MASK (0xfff)
+#define NDSR_ERR_CNT_OFF (16)
+#define NDSR_ERR_CNT_MASK (0x1f)
+#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
#define NDSR_RDY (0x1 << 12)
#define NDSR_FLASH_RDY (0x1 << 11)
#define NDSR_CS0_PAGED (0x1 << 10)
@@ -88,8 +96,8 @@
#define NDSR_CS1_CMDD (0x1 << 7)
#define NDSR_CS0_BBD (0x1 << 6)
#define NDSR_CS1_BBD (0x1 << 5)
-#define NDSR_DBERR (0x1 << 4)
-#define NDSR_SBERR (0x1 << 3)
+#define NDSR_UNCORERR (0x1 << 4)
+#define NDSR_CORERR (0x1 << 3)
#define NDSR_WRDREQ (0x1 << 2)
#define NDSR_RDDREQ (0x1 << 1)
#define NDSR_WRCMDREQ (0x1)
@@ -98,6 +106,8 @@
#define NDCB0_ST_ROW_EN (0x1 << 26)
#define NDCB0_AUTO_RS (0x1 << 25)
#define NDCB0_CSEL (0x1 << 24)
+#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
+#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
#define NDCB0_NC (0x1 << 20)
@@ -108,6 +118,14 @@
#define NDCB0_CMD1_MASK (0xff)
#define NDCB0_ADDR_CYC_SHIFT (16)
+#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
+#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
+#define EXT_CMD_TYPE_READ 4 /* Read */
+#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
+#define EXT_CMD_TYPE_FINAL 3 /* Final command */
+#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
+#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
+
/* macros for registers read/write */
#define nand_writel(info, off, val) \
__raw_writel((val), (info)->mmio_base + (off))
@@ -120,9 +138,9 @@ enum {
ERR_NONE = 0,
ERR_DMABUSERR = -1,
ERR_SENDCMD = -2,
- ERR_DBERR = -3,
+ ERR_UNCORERR = -3,
ERR_BBERR = -4,
- ERR_SBERR = -5,
+ ERR_CORERR = -5,
};
enum {
@@ -149,7 +167,6 @@ struct pxa3xx_nand_host {
void *info_data;
/* page size of attached chip */
- unsigned int page_size;
int use_ecc;
int cs;
@@ -167,11 +184,13 @@ struct pxa3xx_nand_info {
struct clk *clk;
void __iomem *mmio_base;
unsigned long mmio_phys;
- struct completion cmd_complete;
+ struct completion cmd_complete, dev_ready;
unsigned int buf_start;
unsigned int buf_count;
unsigned int buf_size;
+ unsigned int data_buff_pos;
+ unsigned int oob_buff_pos;
/* DMA information */
int drcmr_dat;
@@ -195,13 +214,18 @@ struct pxa3xx_nand_info {
int cs;
int use_ecc; /* use HW ECC ? */
+ int ecc_bch; /* using BCH ECC? */
int use_dma; /* use DMA ? */
int use_spare; /* use spare ? */
- int is_ready;
+ int need_wait;
- unsigned int page_size; /* page size of attached chip */
- unsigned int data_size; /* data size in FIFO */
+ unsigned int data_size; /* data to be read from FIFO */
+ unsigned int chunk_size; /* split commands chunk size */
unsigned int oob_size;
+ unsigned int spare_size;
+ unsigned int ecc_size;
+ unsigned int ecc_err_cnt;
+ unsigned int max_bitflips;
int retcode;
/* cached register value */
@@ -239,6 +263,64 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = {
{ "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
};
+static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
+static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_mirror_pattern
+};
+
+static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
+ .eccbytes = 32,
+ .eccpos = {
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63},
+ .oobfree = { {2, 30} }
+};
+
+static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
+ .eccbytes = 64,
+ .eccpos = {
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127},
+ /* Bootrom looks in bytes 0 & 5 for bad blocks */
+ .oobfree = { {6, 26}, { 64, 32} }
+};
+
+static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
+ .eccbytes = 128,
+ .eccpos = {
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63},
+ .oobfree = { }
+};
+
/* Define a default flash type setting serve as flash detecting only */
#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
@@ -256,6 +338,29 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = {
/* convert nano-seconds to nand flash controller clock cycles */
#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
+static struct of_device_id pxa3xx_nand_dt_ids[] = {
+ {
+ .compatible = "marvell,pxa3xx-nand",
+ .data = (void *)PXA3XX_NAND_VARIANT_PXA,
+ },
+ {
+ .compatible = "marvell,armada370-nand",
+ .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
+
+static enum pxa3xx_nand_variant
+pxa3xx_nand_get_variant(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
+ if (!of_id)
+ return PXA3XX_NAND_VARIANT_PXA;
+ return (enum pxa3xx_nand_variant)of_id->data;
+}
+
static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
const struct pxa3xx_nand_timing *t)
{
@@ -280,25 +385,23 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
nand_writel(info, NDTR1CS0, ndtr1);
}
-static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
+/*
+ * Set the data and OOB size, depending on the selected
+ * spare and ECC configuration.
+ * Only applicable to READ0, READOOB and PAGEPROG commands.
+ */
+static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
+ struct mtd_info *mtd)
{
- struct pxa3xx_nand_host *host = info->host[info->cs];
int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
- info->data_size = host->page_size;
- if (!oob_enable) {
- info->oob_size = 0;
+ info->data_size = mtd->writesize;
+ if (!oob_enable)
return;
- }
- switch (host->page_size) {
- case 2048:
- info->oob_size = (info->use_ecc) ? 40 : 64;
- break;
- case 512:
- info->oob_size = (info->use_ecc) ? 8 : 16;
- break;
- }
+ info->oob_size = info->spare_size;
+ if (!info->use_ecc)
+ info->oob_size += info->ecc_size;
}
/**
@@ -313,10 +416,15 @@ static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
ndcr = info->reg_ndcr;
- if (info->use_ecc)
+ if (info->use_ecc) {
ndcr |= NDCR_ECC_EN;
- else
+ if (info->ecc_bch)
+ nand_writel(info, NDECCCTRL, 0x1);
+ } else {
ndcr &= ~NDCR_ECC_EN;
+ if (info->ecc_bch)
+ nand_writel(info, NDECCCTRL, 0x0);
+ }
if (info->use_dma)
ndcr |= NDCR_DMA_EN;
@@ -375,26 +483,39 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
static void handle_data_pio(struct pxa3xx_nand_info *info)
{
+ unsigned int do_bytes = min(info->data_size, info->chunk_size);
+
switch (info->state) {
case STATE_PIO_WRITING:
- __raw_writesl(info->mmio_base + NDDB, info->data_buff,
- DIV_ROUND_UP(info->data_size, 4));
+ __raw_writesl(info->mmio_base + NDDB,
+ info->data_buff + info->data_buff_pos,
+ DIV_ROUND_UP(do_bytes, 4));
+
if (info->oob_size > 0)
- __raw_writesl(info->mmio_base + NDDB, info->oob_buff,
- DIV_ROUND_UP(info->oob_size, 4));
+ __raw_writesl(info->mmio_base + NDDB,
+ info->oob_buff + info->oob_buff_pos,
+ DIV_ROUND_UP(info->oob_size, 4));
break;
case STATE_PIO_READING:
- __raw_readsl(info->mmio_base + NDDB, info->data_buff,
- DIV_ROUND_UP(info->data_size, 4));
+ __raw_readsl(info->mmio_base + NDDB,
+ info->data_buff + info->data_buff_pos,
+ DIV_ROUND_UP(do_bytes, 4));
+
if (info->oob_size > 0)
- __raw_readsl(info->mmio_base + NDDB, info->oob_buff,
- DIV_ROUND_UP(info->oob_size, 4));
+ __raw_readsl(info->mmio_base + NDDB,
+ info->oob_buff + info->oob_buff_pos,
+ DIV_ROUND_UP(info->oob_size, 4));
break;
default:
dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
info->state);
BUG();
}
+
+ /* Update buffer pointers for multi-page read/write */
+ info->data_buff_pos += do_bytes;
+ info->oob_buff_pos += info->oob_size;
+ info->data_size -= do_bytes;
}
#ifdef ARCH_HAS_DMA
@@ -452,7 +573,7 @@ static void start_data_dma(struct pxa3xx_nand_info *info)
static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
{
struct pxa3xx_nand_info *info = devid;
- unsigned int status, is_completed = 0;
+ unsigned int status, is_completed = 0, is_ready = 0;
unsigned int ready, cmd_done;
if (info->cs == 0) {
@@ -465,10 +586,25 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
status = nand_readl(info, NDSR);
- if (status & NDSR_DBERR)
- info->retcode = ERR_DBERR;
- if (status & NDSR_SBERR)
- info->retcode = ERR_SBERR;
+ if (status & NDSR_UNCORERR)
+ info->retcode = ERR_UNCORERR;
+ if (status & NDSR_CORERR) {
+ info->retcode = ERR_CORERR;
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
+ info->ecc_bch)
+ info->ecc_err_cnt = NDSR_ERR_CNT(status);
+ else
+ info->ecc_err_cnt = 1;
+
+ /*
+ * Each chunk composing a page is corrected independently,
+ * and we need to store maximum number of corrected bitflips
+ * to return it to the MTD layer in ecc.read_page().
+ */
+ info->max_bitflips = max_t(unsigned int,
+ info->max_bitflips,
+ info->ecc_err_cnt);
+ }
if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
/* whether use dma to transfer data */
if (info->use_dma) {
@@ -488,8 +624,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
is_completed = 1;
}
if (status & ready) {
- info->is_ready = 1;
info->state = STATE_READY;
+ is_ready = 1;
}
if (status & NDSR_WRCMDREQ) {
@@ -518,6 +654,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
nand_writel(info, NDSR, status);
if (is_completed)
complete(&info->cmd_complete);
+ if (is_ready)
+ complete(&info->dev_ready);
NORMAL_IRQ_EXIT:
return IRQ_HANDLED;
}
@@ -530,51 +668,94 @@ static inline int is_buf_blank(uint8_t *buf, size_t len)
return 1;
}
-static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
- uint16_t column, int page_addr)
+static void set_command_address(struct pxa3xx_nand_info *info,
+ unsigned int page_size, uint16_t column, int page_addr)
{
- int addr_cycle, exec_cmd;
- struct pxa3xx_nand_host *host;
- struct mtd_info *mtd;
+ /* small page addr setting */
+ if (page_size < PAGE_CHUNK_SIZE) {
+ info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
+ | (column & 0xFF);
- host = info->host[info->cs];
- mtd = host->mtd;
- addr_cycle = 0;
- exec_cmd = 1;
+ info->ndcb2 = 0;
+ } else {
+ info->ndcb1 = ((page_addr & 0xFFFF) << 16)
+ | (column & 0xFFFF);
+
+ if (page_addr & 0xFF0000)
+ info->ndcb2 = (page_addr & 0xFF0000) >> 16;
+ else
+ info->ndcb2 = 0;
+ }
+}
+
+static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
+{
+ struct pxa3xx_nand_host *host = info->host[info->cs];
+ struct mtd_info *mtd = host->mtd;
/* reset data and oob column point to handle data */
info->buf_start = 0;
info->buf_count = 0;
info->oob_size = 0;
+ info->data_buff_pos = 0;
+ info->oob_buff_pos = 0;
info->use_ecc = 0;
info->use_spare = 1;
- info->is_ready = 0;
info->retcode = ERR_NONE;
- if (info->cs != 0)
- info->ndcb0 = NDCB0_CSEL;
- else
- info->ndcb0 = 0;
+ info->ecc_err_cnt = 0;
+ info->ndcb3 = 0;
+ info->need_wait = 0;
switch (command) {
case NAND_CMD_READ0:
case NAND_CMD_PAGEPROG:
info->use_ecc = 1;
case NAND_CMD_READOOB:
- pxa3xx_set_datasize(info);
+ pxa3xx_set_datasize(info, mtd);
break;
case NAND_CMD_PARAM:
info->use_spare = 0;
break;
- case NAND_CMD_SEQIN:
- exec_cmd = 0;
- break;
default:
info->ndcb1 = 0;
info->ndcb2 = 0;
- info->ndcb3 = 0;
break;
}
+ /*
+ * If we are about to issue a read command, or about to set
+ * the write address, then clean the data buffer.
+ */
+ if (command == NAND_CMD_READ0 ||
+ command == NAND_CMD_READOOB ||
+ command == NAND_CMD_SEQIN) {
+
+ info->buf_count = mtd->writesize + mtd->oobsize;
+ memset(info->data_buff, 0xFF, info->buf_count);
+ }
+
+}
+
+static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
+ int ext_cmd_type, uint16_t column, int page_addr)
+{
+ int addr_cycle, exec_cmd;
+ struct pxa3xx_nand_host *host;
+ struct mtd_info *mtd;
+
+ host = info->host[info->cs];
+ mtd = host->mtd;
+ addr_cycle = 0;
+ exec_cmd = 1;
+
+ if (info->cs != 0)
+ info->ndcb0 = NDCB0_CSEL;
+ else
+ info->ndcb0 = 0;
+
+ if (command == NAND_CMD_SEQIN)
+ exec_cmd = 0;
+
addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
+ host->col_addr_cycles);
@@ -589,30 +770,42 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
if (command == NAND_CMD_READOOB)
info->buf_start += mtd->writesize;
- /* Second command setting for large pages */
- if (host->page_size >= PAGE_CHUNK_SIZE)
+ /*
+ * Multiple page read needs an 'extended command type' field,
+ * which is either naked-read or last-read according to the
+ * state.
+ */
+ if (mtd->writesize == PAGE_CHUNK_SIZE) {
info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
+ } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
+ info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
+ | NDCB0_LEN_OVRD
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+ info->ndcb3 = info->chunk_size +
+ info->oob_size;
+ }
+
+ set_command_address(info, mtd->writesize, column, page_addr);
+ break;
case NAND_CMD_SEQIN:
- /* small page addr setting */
- if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) {
- info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
- | (column & 0xFF);
- info->ndcb2 = 0;
- } else {
- info->ndcb1 = ((page_addr & 0xFFFF) << 16)
- | (column & 0xFFFF);
+ info->buf_start = column;
+ set_command_address(info, mtd->writesize, 0, page_addr);
- if (page_addr & 0xFF0000)
- info->ndcb2 = (page_addr & 0xFF0000) >> 16;
- else
- info->ndcb2 = 0;
+ /*
+ * Multiple page programming needs to execute the initial
+ * SEQIN command that sets the page address.
+ */
+ if (mtd->writesize > PAGE_CHUNK_SIZE) {
+ info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
+ | addr_cycle
+ | command;
+ /* No data transfer in this case */
+ info->data_size = 0;
+ exec_cmd = 1;
}
-
- info->buf_count = mtd->writesize + mtd->oobsize;
- memset(info->data_buff, 0xFF, info->buf_count);
-
break;
case NAND_CMD_PAGEPROG:
@@ -622,13 +815,40 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
break;
}
- info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
- | NDCB0_AUTO_RS
- | NDCB0_ST_ROW_EN
- | NDCB0_DBC
- | (NAND_CMD_PAGEPROG << 8)
- | NAND_CMD_SEQIN
- | addr_cycle;
+ /* Second command setting for large pages */
+ if (mtd->writesize > PAGE_CHUNK_SIZE) {
+ /*
+ * Multiple page write uses the 'extended command'
+ * field. This can be used to issue a command dispatch
+ * or a naked-write depending on the current stage.
+ */
+ info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+ | NDCB0_LEN_OVRD
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+ info->ndcb3 = info->chunk_size +
+ info->oob_size;
+
+ /*
+ * This is the command dispatch that completes a chunked
+ * page program operation.
+ */
+ if (info->data_size == 0) {
+ info->ndcb0 = NDCB0_CMD_TYPE(0x1)
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
+ | command;
+ info->ndcb1 = 0;
+ info->ndcb2 = 0;
+ info->ndcb3 = 0;
+ }
+ } else {
+ info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+ | NDCB0_AUTO_RS
+ | NDCB0_ST_ROW_EN
+ | NDCB0_DBC
+ | (NAND_CMD_PAGEPROG << 8)
+ | NAND_CMD_SEQIN
+ | addr_cycle;
+ }
break;
case NAND_CMD_PARAM:
@@ -691,8 +911,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
return exec_cmd;
}
-static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
- int column, int page_addr)
+static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
+ int column, int page_addr)
{
struct pxa3xx_nand_host *host = mtd->priv;
struct pxa3xx_nand_info *info = host->info_data;
@@ -717,10 +937,15 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
nand_writel(info, NDTR1CS0, info->ndtr1cs0);
}
+ prepare_start_command(info, command);
+
info->state = STATE_PREPARED;
- exec_cmd = prepare_command_pool(info, command, column, page_addr);
+ exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
+
if (exec_cmd) {
init_completion(&info->cmd_complete);
+ init_completion(&info->dev_ready);
+ info->need_wait = 1;
pxa3xx_nand_start(info);
ret = wait_for_completion_timeout(&info->cmd_complete,
@@ -734,6 +959,117 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
info->state = STATE_IDLE;
}
+static void nand_cmdfunc_extended(struct mtd_info *mtd,
+ const unsigned command,
+ int column, int page_addr)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+ int ret, exec_cmd, ext_cmd_type;
+
+ /*
+ * if this is a x16 device then convert the input
+ * "byte" address into a "word" address appropriate
+ * for indexing a word-oriented device
+ */
+ if (info->reg_ndcr & NDCR_DWIDTH_M)
+ column /= 2;
+
+ /*
+ * There may be different NAND chip hooked to
+ * different chip select, so check whether
+ * chip select has been changed, if yes, reset the timing
+ */
+ if (info->cs != host->cs) {
+ info->cs = host->cs;
+ nand_writel(info, NDTR0CS0, info->ndtr0cs0);
+ nand_writel(info, NDTR1CS0, info->ndtr1cs0);
+ }
+
+ /* Select the extended command for the first command */
+ switch (command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ ext_cmd_type = EXT_CMD_TYPE_MONO;
+ break;
+ case NAND_CMD_SEQIN:
+ ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
+ break;
+ case NAND_CMD_PAGEPROG:
+ ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
+ break;
+ default:
+ ext_cmd_type = 0;
+ break;
+ }
+
+ prepare_start_command(info, command);
+
+ /*
+ * Prepare the "is ready" completion before starting a command
+ * transaction sequence. If the command is not executed the
+ * completion will be completed, see below.
+ *
+ * We can do that inside the loop because the command variable
+ * is invariant and thus so is the exec_cmd.
+ */
+ info->need_wait = 1;
+ init_completion(&info->dev_ready);
+ do {
+ info->state = STATE_PREPARED;
+ exec_cmd = prepare_set_command(info, command, ext_cmd_type,
+ column, page_addr);
+ if (!exec_cmd) {
+ info->need_wait = 0;
+ complete(&info->dev_ready);
+ break;
+ }
+
+ init_completion(&info->cmd_complete);
+ pxa3xx_nand_start(info);
+
+ ret = wait_for_completion_timeout(&info->cmd_complete,
+ CHIP_DELAY_TIMEOUT);
+ if (!ret) {
+ dev_err(&info->pdev->dev, "Wait time out!!!\n");
+ /* Stop State Machine for next command cycle */
+ pxa3xx_nand_stop(info);
+ break;
+ }
+
+ /* Check if the sequence is complete */
+ if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
+ break;
+
+ /*
+ * After a splitted program command sequence has issued
+ * the command dispatch, the command sequence is complete.
+ */
+ if (info->data_size == 0 &&
+ command == NAND_CMD_PAGEPROG &&
+ ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
+ break;
+
+ if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
+ /* Last read: issue a 'last naked read' */
+ if (info->data_size == info->chunk_size)
+ ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
+ else
+ ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
+
+ /*
+ * If a splitted program command has no more data to transfer,
+ * the command dispatch must be issued to complete.
+ */
+ } else if (command == NAND_CMD_PAGEPROG &&
+ info->data_size == 0) {
+ ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
+ }
+ } while (1);
+
+ info->state = STATE_IDLE;
+}
+
static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required)
{
@@ -753,20 +1089,14 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
chip->read_buf(mtd, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- if (info->retcode == ERR_SBERR) {
- switch (info->use_ecc) {
- case 1:
- mtd->ecc_stats.corrected++;
- break;
- case 0:
- default:
- break;
- }
- } else if (info->retcode == ERR_DBERR) {
+ if (info->retcode == ERR_CORERR && info->use_ecc) {
+ mtd->ecc_stats.corrected += info->ecc_err_cnt;
+
+ } else if (info->retcode == ERR_UNCORERR) {
/*
* for blank page (all 0xff), HW will calculate its ECC as
* 0, which is different from the ECC information within
- * OOB, ignore such double bit errors
+ * OOB, ignore such uncorrectable errors
*/
if (is_buf_blank(buf, mtd->writesize))
info->retcode = ERR_NONE;
@@ -774,7 +1104,7 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
mtd->ecc_stats.failed++;
}
- return 0;
+ return info->max_bitflips;
}
static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
@@ -833,21 +1163,27 @@ static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
{
struct pxa3xx_nand_host *host = mtd->priv;
struct pxa3xx_nand_info *info = host->info_data;
+ int ret;
+
+ if (info->need_wait) {
+ ret = wait_for_completion_timeout(&info->dev_ready,
+ CHIP_DELAY_TIMEOUT);
+ info->need_wait = 0;
+ if (!ret) {
+ dev_err(&info->pdev->dev, "Ready time out!!!\n");
+ return NAND_STATUS_FAIL;
+ }
+ }
/* pxa3xx_nand_send_command has waited for command complete */
if (this->state == FL_WRITING || this->state == FL_ERASING) {
if (info->retcode == ERR_NONE)
return 0;
- else {
- /*
- * any error make it return 0x01 which will tell
- * the caller the erase and write fail
- */
- return 0x01;
- }
+ else
+ return NAND_STATUS_FAIL;
}
- return 0;
+ return NAND_STATUS_READY;
}
static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
@@ -869,7 +1205,6 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
}
/* calculate flash information */
- host->page_size = f->page_size;
host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
/* calculate addressing information */
@@ -906,13 +1241,15 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
uint32_t ndcr = nand_readl(info, NDCR);
if (ndcr & NDCR_PAGE_SZ) {
- host->page_size = 2048;
+ /* Controller's FIFO size */
+ info->chunk_size = 2048;
host->read_id_bytes = 4;
} else {
- host->page_size = 512;
+ info->chunk_size = 512;
host->read_id_bytes = 2;
}
+ /* Set an initial chunk size */
info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
@@ -988,18 +1325,89 @@ static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
{
struct mtd_info *mtd;
+ struct nand_chip *chip;
int ret;
+
mtd = info->host[info->cs]->mtd;
+ chip = mtd->priv;
+
/* use the common timing to make a try */
ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
if (ret)
return ret;
- pxa3xx_nand_cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
- if (info->is_ready)
- return 0;
+ chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
+ ret = chip->waitfunc(mtd, chip);
+ if (ret & NAND_STATUS_FAIL)
+ return -ENODEV;
+
+ return 0;
+}
- return -ENODEV;
+static int pxa_ecc_init(struct pxa3xx_nand_info *info,
+ struct nand_ecc_ctrl *ecc,
+ int strength, int ecc_stepsize, int page_size)
+{
+ if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
+ info->chunk_size = 2048;
+ info->spare_size = 40;
+ info->ecc_size = 24;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = 512;
+ ecc->strength = 1;
+ return 1;
+
+ } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
+ info->chunk_size = 512;
+ info->spare_size = 8;
+ info->ecc_size = 8;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = 512;
+ ecc->strength = 1;
+ return 1;
+
+ /*
+ * Required ECC: 4-bit correction per 512 bytes
+ * Select: 16-bit correction per 2048 bytes
+ */
+ } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
+ info->ecc_bch = 1;
+ info->chunk_size = 2048;
+ info->spare_size = 32;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;
+ ecc->layout = &ecc_layout_2KB_bch4bit;
+ ecc->strength = 16;
+ return 1;
+
+ } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
+ info->ecc_bch = 1;
+ info->chunk_size = 2048;
+ info->spare_size = 32;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;
+ ecc->layout = &ecc_layout_4KB_bch4bit;
+ ecc->strength = 16;
+ return 1;
+
+ /*
+ * Required ECC: 8-bit correction per 512 bytes
+ * Select: 16-bit correction per 1024 bytes
+ */
+ } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
+ info->ecc_bch = 1;
+ info->chunk_size = 1024;
+ info->spare_size = 0;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;
+ ecc->layout = &ecc_layout_4KB_bch8bit;
+ ecc->strength = 16;
+ return 1;
+ }
+ return 0;
}
static int pxa3xx_nand_scan(struct mtd_info *mtd)
@@ -1014,6 +1422,7 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
uint32_t id = -1;
uint64_t chipsize;
int i, ret, num;
+ uint16_t ecc_strength, ecc_step;
if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
goto KEEP_CONFIG;
@@ -1072,15 +1481,60 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
pxa3xx_flash_ids[1].name = NULL;
def = pxa3xx_flash_ids;
KEEP_CONFIG:
- chip->ecc.mode = NAND_ECC_HW;
- chip->ecc.size = host->page_size;
- chip->ecc.strength = 1;
-
if (info->reg_ndcr & NDCR_DWIDTH_M)
chip->options |= NAND_BUSWIDTH_16;
+ /* Device detection must be done with ECC disabled */
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+ nand_writel(info, NDECCCTRL, 0x0);
+
if (nand_scan_ident(mtd, 1, def))
return -ENODEV;
+
+ if (pdata->flash_bbt) {
+ /*
+ * We'll use a bad block table stored in-flash and don't
+ * allow writing the bad block marker to the flash.
+ */
+ chip->bbt_options |= NAND_BBT_USE_FLASH |
+ NAND_BBT_NO_OOB_BBM;
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ }
+
+ /*
+ * If the page size is bigger than the FIFO size, let's check
+ * we are given the right variant and then switch to the extended
+ * (aka splitted) command handling,
+ */
+ if (mtd->writesize > PAGE_CHUNK_SIZE) {
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
+ chip->cmdfunc = nand_cmdfunc_extended;
+ } else {
+ dev_err(&info->pdev->dev,
+ "unsupported page size on this variant\n");
+ return -ENODEV;
+ }
+ }
+
+ ecc_strength = chip->ecc_strength_ds;
+ ecc_step = chip->ecc_step_ds;
+
+ /* Set default ECC strength requirements on non-ONFI devices */
+ if (ecc_strength < 1 && ecc_step < 1) {
+ ecc_strength = 1;
+ ecc_step = 512;
+ }
+
+ ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
+ ecc_step, mtd->writesize);
+ if (!ret) {
+ dev_err(&info->pdev->dev,
+ "ECC strength %d at page size %d is not supported\n",
+ chip->ecc_strength_ds, mtd->writesize);
+ return -ENODEV;
+ }
+
/* calculate addressing information */
if (mtd->writesize >= 2048)
host->col_addr_cycles = 2;
@@ -1121,6 +1575,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
return -ENOMEM;
info->pdev = pdev;
+ info->variant = pxa3xx_nand_get_variant(pdev);
for (cs = 0; cs < pdata->num_cs; cs++) {
mtd = (struct mtd_info *)((unsigned int)&info[1] +
(sizeof(*mtd) + sizeof(*host)) * cs);
@@ -1138,11 +1593,12 @@ static int alloc_nand_resource(struct platform_device *pdev)
chip->controller = &info->controller;
chip->waitfunc = pxa3xx_nand_waitfunc;
chip->select_chip = pxa3xx_nand_select_chip;
- chip->cmdfunc = pxa3xx_nand_cmdfunc;
chip->read_word = pxa3xx_nand_read_word;
chip->read_byte = pxa3xx_nand_read_byte;
chip->read_buf = pxa3xx_nand_read_buf;
chip->write_buf = pxa3xx_nand_write_buf;
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ chip->cmdfunc = nand_cmdfunc;
}
spin_lock_init(&chip->controller->lock);
@@ -1254,25 +1710,6 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id pxa3xx_nand_dt_ids[] = {
- {
- .compatible = "marvell,pxa3xx-nand",
- .data = (void *)PXA3XX_NAND_VARIANT_PXA,
- },
- {}
-};
-MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
-
-static enum pxa3xx_nand_variant
-pxa3xx_nand_get_variant(struct platform_device *pdev)
-{
- const struct of_device_id *of_id =
- of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
- if (!of_id)
- return PXA3XX_NAND_VARIANT_PXA;
- return (enum pxa3xx_nand_variant)of_id->data;
-}
-
static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
{
struct pxa3xx_nand_platform_data *pdata;
@@ -1292,6 +1729,7 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
if (of_get_property(np, "marvell,nand-keep-config", NULL))
pdata->keep_config = 1;
of_property_read_u32(np, "num-cs", &pdata->num_cs);
+ pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
pdev->dev.platform_data = pdata;
@@ -1329,7 +1767,6 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
}
info = platform_get_drvdata(pdev);
- info->variant = pxa3xx_nand_get_variant(pdev);
probe_success = 0;
for (cs = 0; cs < pdata->num_cs; cs++) {
struct mtd_info *mtd = info->host[cs]->mtd;
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index d65cbe903d40..f0918e7411d9 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -46,9 +46,43 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
-#include <plat/regs-nand.h>
#include <linux/platform_data/mtd-nand-s3c2410.h>
+#define S3C2410_NFREG(x) (x)
+
+#define S3C2410_NFCONF S3C2410_NFREG(0x00)
+#define S3C2410_NFCMD S3C2410_NFREG(0x04)
+#define S3C2410_NFADDR S3C2410_NFREG(0x08)
+#define S3C2410_NFDATA S3C2410_NFREG(0x0C)
+#define S3C2410_NFSTAT S3C2410_NFREG(0x10)
+#define S3C2410_NFECC S3C2410_NFREG(0x14)
+#define S3C2440_NFCONT S3C2410_NFREG(0x04)
+#define S3C2440_NFCMD S3C2410_NFREG(0x08)
+#define S3C2440_NFADDR S3C2410_NFREG(0x0C)
+#define S3C2440_NFDATA S3C2410_NFREG(0x10)
+#define S3C2440_NFSTAT S3C2410_NFREG(0x20)
+#define S3C2440_NFMECC0 S3C2410_NFREG(0x2C)
+#define S3C2412_NFSTAT S3C2410_NFREG(0x28)
+#define S3C2412_NFMECC0 S3C2410_NFREG(0x34)
+#define S3C2410_NFCONF_EN (1<<15)
+#define S3C2410_NFCONF_INITECC (1<<12)
+#define S3C2410_NFCONF_nFCE (1<<11)
+#define S3C2410_NFCONF_TACLS(x) ((x)<<8)
+#define S3C2410_NFCONF_TWRPH0(x) ((x)<<4)
+#define S3C2410_NFCONF_TWRPH1(x) ((x)<<0)
+#define S3C2410_NFSTAT_BUSY (1<<0)
+#define S3C2440_NFCONF_TACLS(x) ((x)<<12)
+#define S3C2440_NFCONF_TWRPH0(x) ((x)<<8)
+#define S3C2440_NFCONF_TWRPH1(x) ((x)<<4)
+#define S3C2440_NFCONT_INITECC (1<<4)
+#define S3C2440_NFCONT_nFCE (1<<1)
+#define S3C2440_NFCONT_ENABLE (1<<0)
+#define S3C2440_NFSTAT_READY (1<<0)
+#define S3C2412_NFCONF_NANDBOOT (1<<31)
+#define S3C2412_NFCONT_INIT_MAIN_ECC (1<<5)
+#define S3C2412_NFCONT_nFCE0 (1<<1)
+#define S3C2412_NFSTAT_READY (1<<0)
+
/* new oob placement block for use with hardware ecc generation
*/
@@ -919,7 +953,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (info == NULL) {
- dev_err(&pdev->dev, "no memory for flash info\n");
err = -ENOMEM;
goto exit_error;
}
@@ -974,7 +1007,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
size = nr_sets * sizeof(*info->mtds);
info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (info->mtds == NULL) {
- dev_err(&pdev->dev, "failed to allocate mtd storage\n");
err = -ENOMEM;
goto exit_error;
}
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index a3c84ebbe392..d72783dd7b96 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -151,7 +151,7 @@ static void flctl_setup_dma(struct sh_flctl *flctl)
dma_cap_set(DMA_SLAVE, mask);
flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
- (void *)pdata->slave_id_fifo0_tx);
+ (void *)(uintptr_t)pdata->slave_id_fifo0_tx);
dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
flctl->chan_fifo0_tx);
@@ -168,7 +168,7 @@ static void flctl_setup_dma(struct sh_flctl *flctl)
goto err;
flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
- (void *)pdata->slave_id_fifo0_rx);
+ (void *)(uintptr_t)pdata->slave_id_fifo0_rx);
dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
flctl->chan_fifo0_rx);
@@ -1021,7 +1021,6 @@ static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_OF
struct flctl_soc_config {
unsigned long flcmncr_val;
unsigned has_hwecc:1;
@@ -1059,10 +1058,8 @@ static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
GFP_KERNEL);
- if (!pdata) {
- dev_err(dev, "%s: failed to allocate config data\n", __func__);
+ if (!pdata)
return NULL;
- }
/* set SoC specific options */
pdata->flcmncr_val = config->flcmncr_val;
@@ -1080,12 +1077,6 @@ static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
return pdata;
}
-#else /* CONFIG_OF */
-static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
-{
- return NULL;
-}
-#endif /* CONFIG_OF */
static int flctl_probe(struct platform_device *pdev)
{
@@ -1094,38 +1085,30 @@ static int flctl_probe(struct platform_device *pdev)
struct mtd_info *flctl_mtd;
struct nand_chip *nand;
struct sh_flctl_platform_data *pdata;
- int ret = -ENXIO;
+ int ret;
int irq;
struct mtd_part_parser_data ppdata = {};
- flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL);
- if (!flctl) {
- dev_err(&pdev->dev, "failed to allocate driver data\n");
+ flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL);
+ if (!flctl)
return -ENOMEM;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get I/O memory\n");
- goto err_iomap;
- }
-
- flctl->reg = ioremap(res->start, resource_size(res));
- if (flctl->reg == NULL) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- goto err_iomap;
- }
+ flctl->reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(flctl->reg))
+ return PTR_ERR(flctl->reg);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "failed to get flste irq data\n");
- goto err_flste;
+ return -ENXIO;
}
- ret = request_irq(irq, flctl_handle_flste, IRQF_SHARED, "flste", flctl);
+ ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
+ "flste", flctl);
if (ret) {
dev_err(&pdev->dev, "request interrupt failed.\n");
- goto err_flste;
+ return ret;
}
if (pdev->dev.of_node)
@@ -1135,8 +1118,7 @@ static int flctl_probe(struct platform_device *pdev)
if (!pdata) {
dev_err(&pdev->dev, "no setup data defined\n");
- ret = -EINVAL;
- goto err_pdata;
+ return -EINVAL;
}
platform_set_drvdata(pdev, flctl);
@@ -1190,12 +1172,6 @@ static int flctl_probe(struct platform_device *pdev)
err_chip:
flctl_release_dma(flctl);
pm_runtime_disable(&pdev->dev);
-err_pdata:
- free_irq(irq, flctl);
-err_flste:
- iounmap(flctl->reg);
-err_iomap:
- kfree(flctl);
return ret;
}
@@ -1206,9 +1182,6 @@ static int flctl_remove(struct platform_device *pdev)
flctl_release_dma(flctl);
nand_release(&flctl->mtd);
pm_runtime_disable(&pdev->dev);
- free_irq(platform_get_irq(pdev, 0), flctl);
- iounmap(flctl->reg);
- kfree(flctl);
return 0;
}
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 87908d760feb..e81059b58382 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -121,10 +121,8 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
/* Allocate memory for MTD device structure and private data */
sharpsl = kzalloc(sizeof(struct sharpsl_nand), GFP_KERNEL);
- if (!sharpsl) {
- printk("Unable to allocate SharpSL NAND MTD device structure.\n");
+ if (!sharpsl)
return -ENOMEM;
- }
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@@ -136,7 +134,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
/* map physical address */
sharpsl->io = ioremap(r->start, resource_size(r));
if (!sharpsl->io) {
- printk("ioremap to access Sharp SL NAND chip failed\n");
+ dev_err(&pdev->dev, "ioremap to access Sharp SL NAND chip failed\n");
err = -EIO;
goto err_ioremap;
}
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index a3747c914d57..fb8fd35fa668 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -371,11 +371,9 @@ static int tmio_probe(struct platform_device *dev)
if (data == NULL)
dev_warn(&dev->dev, "NULL platform data!\n");
- tmio = kzalloc(sizeof *tmio, GFP_KERNEL);
- if (!tmio) {
- retval = -ENOMEM;
- goto err_kzalloc;
- }
+ tmio = devm_kzalloc(&dev->dev, sizeof(*tmio), GFP_KERNEL);
+ if (!tmio)
+ return -ENOMEM;
tmio->dev = dev;
@@ -385,22 +383,18 @@ static int tmio_probe(struct platform_device *dev)
mtd->priv = nand_chip;
mtd->name = "tmio-nand";
- tmio->ccr = ioremap(ccr->start, resource_size(ccr));
- if (!tmio->ccr) {
- retval = -EIO;
- goto err_iomap_ccr;
- }
+ tmio->ccr = devm_ioremap(&dev->dev, ccr->start, resource_size(ccr));
+ if (!tmio->ccr)
+ return -EIO;
tmio->fcr_base = fcr->start & 0xfffff;
- tmio->fcr = ioremap(fcr->start, resource_size(fcr));
- if (!tmio->fcr) {
- retval = -EIO;
- goto err_iomap_fcr;
- }
+ tmio->fcr = devm_ioremap(&dev->dev, fcr->start, resource_size(fcr));
+ if (!tmio->fcr)
+ return -EIO;
retval = tmio_hw_init(dev, tmio);
if (retval)
- goto err_hwinit;
+ return retval;
/* Set address of NAND IO lines */
nand_chip->IO_ADDR_R = tmio->fcr;
@@ -428,7 +422,8 @@ static int tmio_probe(struct platform_device *dev)
/* 15 us command delay time */
nand_chip->chip_delay = 15;
- retval = request_irq(irq, &tmio_irq, 0, dev_name(&dev->dev), tmio);
+ retval = devm_request_irq(&dev->dev, irq, &tmio_irq, 0,
+ dev_name(&dev->dev), tmio);
if (retval) {
dev_err(&dev->dev, "request_irq error %d\n", retval);
goto err_irq;
@@ -440,7 +435,7 @@ static int tmio_probe(struct platform_device *dev)
/* Scan to find existence of the device */
if (nand_scan(mtd, 1)) {
retval = -ENODEV;
- goto err_scan;
+ goto err_irq;
}
/* Register the partitions */
retval = mtd_device_parse_register(mtd, NULL, NULL,
@@ -451,18 +446,8 @@ static int tmio_probe(struct platform_device *dev)
nand_release(mtd);
-err_scan:
- if (tmio->irq)
- free_irq(tmio->irq, tmio);
err_irq:
tmio_hw_stop(dev, tmio);
-err_hwinit:
- iounmap(tmio->fcr);
-err_iomap_fcr:
- iounmap(tmio->ccr);
-err_iomap_ccr:
- kfree(tmio);
-err_kzalloc:
return retval;
}
@@ -471,12 +456,7 @@ static int tmio_remove(struct platform_device *dev)
struct tmio_nand *tmio = platform_get_drvdata(dev);
nand_release(&tmio->mtd);
- if (tmio->irq)
- free_irq(tmio->irq, tmio);
tmio_hw_stop(dev, tmio);
- iounmap(tmio->fcr);
- iounmap(tmio->ccr);
- kfree(tmio);
return 0;
}
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 235714a421dd..c1622a5ba814 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -319,11 +319,8 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
continue;
txx9_priv = kzalloc(sizeof(struct txx9ndfmc_priv),
GFP_KERNEL);
- if (!txx9_priv) {
- dev_err(&dev->dev, "Unable to allocate "
- "TXx9 NDFMC MTD device structure.\n");
+ if (!txx9_priv)
continue;
- }
chip = &txx9_priv->chip;
mtd = &txx9_priv->mtd;
mtd->owner = THIS_MODULE;
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index d64f8c30945f..aa26c32e1bc2 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -81,7 +81,7 @@ static int parse_ofpart_partitions(struct mtd_info *master,
partname = of_get_property(pp, "label", &len);
if (!partname)
partname = of_get_property(pp, "name", &len);
- (*pparts)[i].name = (char *)partname;
+ (*pparts)[i].name = partname;
if (of_get_property(pp, "read-only", &len))
(*pparts)[i].mask_flags |= MTD_WRITEABLE;
@@ -152,7 +152,7 @@ static int parse_ofoldpart_partitions(struct mtd_info *master,
if (names && (plen > 0)) {
int len = strlen(names) + 1;
- (*pparts)[i].name = (char *)names;
+ (*pparts)[i].name = names;
plen -= len;
names += len;
} else {
@@ -173,18 +173,9 @@ static struct mtd_part_parser ofoldpart_parser = {
static int __init ofpart_parser_init(void)
{
- int rc;
- rc = register_mtd_parser(&ofpart_parser);
- if (rc)
- goto out;
-
- rc = register_mtd_parser(&ofoldpart_parser);
- if (!rc)
- return 0;
-
- deregister_mtd_parser(&ofoldpart_parser);
-out:
- return rc;
+ register_mtd_parser(&ofpart_parser);
+ register_mtd_parser(&ofoldpart_parser);
+ return 0;
}
static void __exit ofpart_parser_exit(void)
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 63699fffc96d..8e1919b6f074 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -58,7 +58,7 @@ static int generic_onenand_probe(struct platform_device *pdev)
goto out_release_mem_region;
}
- info->onenand.mmcontrol = pdata ? pdata->mmcontrol : 0;
+ info->onenand.mmcontrol = pdata ? pdata->mmcontrol : NULL;
info->onenand.irq = platform_get_irq(pdev, 0);
info->mtd.name = dev_name(&pdev->dev);
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index 580035c803d6..5da911ebdf49 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -300,7 +300,8 @@ MODULE_ALIAS("RedBoot");
static int __init redboot_parser_init(void)
{
- return register_mtd_parser(&redboot_parser);
+ register_mtd_parser(&redboot_parser);
+ return 0;
}
static void __exit redboot_parser_exit(void)
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index 70106607c247..e579f9027c47 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -19,7 +19,7 @@
* or detected.
*/
-#if defined(CONFIG_MTD_NAND) || defined(CONFIG_MTD_NAND_MODULE)
+#if IS_ENABLED(CONFIG_MTD_NAND)
struct nand_ecc_test {
const char *name;
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index 33bb1f2b63e4..6f27d9a1be3b 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -1453,8 +1453,10 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
struct ubi_attach_info *scan_ai;
scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
- if (!scan_ai)
+ if (!scan_ai) {
+ err = -ENOMEM;
goto out_wl;
+ }
err = scan_all(ubi, scan_ai, 0);
if (err) {
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index e05dc6298c1d..57deae961429 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1245,8 +1245,10 @@ static int __init ubi_init(void)
ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
sizeof(struct ubi_wl_entry),
0, 0, NULL);
- if (!ubi_wl_entry_slab)
+ if (!ubi_wl_entry_slab) {
+ err = -ENOMEM;
goto out_dev_unreg;
+ }
err = ubi_debugfs_init();
if (err)
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index bf79def40126..d36134925d31 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -495,10 +495,12 @@ out:
*/
static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
{
- int err, err1;
+ int err;
size_t written;
loff_t addr;
uint32_t data = 0;
+ struct ubi_ec_hdr ec_hdr;
+
/*
* Note, we cannot generally define VID header buffers on stack,
* because of the way we deal with these buffers (see the header
@@ -509,50 +511,38 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
struct ubi_vid_hdr vid_hdr;
/*
+ * If VID or EC is valid, we have to corrupt them before erasing.
* It is important to first invalidate the EC header, and then the VID
* header. Otherwise a power cut may lead to valid EC header and
* invalid VID header, in which case UBI will treat this PEB as
* corrupted and will try to preserve it, and print scary warnings.
*/
addr = (loff_t)pnum * ubi->peb_size;
- err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
- if (!err) {
- addr += ubi->vid_hdr_aloffset;
+ err = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
+ if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
+ err != UBI_IO_FF){
err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
- if (!err)
- return 0;
+ if(err)
+ goto error;
}
- /*
- * We failed to write to the media. This was observed with Spansion
- * S29GL512N NOR flash. Most probably the previously eraseblock erasure
- * was interrupted at a very inappropriate moment, so it became
- * unwritable. In this case we probably anyway have garbage in this
- * PEB.
- */
- err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
- if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
- err1 == UBI_IO_FF) {
- struct ubi_ec_hdr ec_hdr;
-
- err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
- if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
- err1 == UBI_IO_FF)
- /*
- * Both VID and EC headers are corrupted, so we can
- * safely erase this PEB and not afraid that it will be
- * treated as a valid PEB in case of an unclean reboot.
- */
- return 0;
+ err = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
+ if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
+ err != UBI_IO_FF){
+ addr += ubi->vid_hdr_aloffset;
+ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
+ if (err)
+ goto error;
}
+ return 0;
+error:
/*
- * The PEB contains a valid VID header, but we cannot invalidate it.
- * Supposedly the flash media or the driver is screwed up, so return an
- * error.
+ * The PEB contains a valid VID or EC header, but we cannot invalidate
+ * it. Supposedly the flash media or the driver is screwed up, so
+ * return an error.
*/
- ubi_err("cannot invalidate PEB %d, write returned %d read returned %d",
- pnum, err, err1);
+ ubi_err("cannot invalidate PEB %d, write returned %d", pnum, err);
ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
return -EIO;
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b45b240889f5..f342278539d5 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -236,6 +236,7 @@ config VETH
config VIRTIO_NET
tristate "Virtio network driver"
depends on VIRTIO
+ select AVERAGE
---help---
This is the virtual network driver for virtio. It can be used with
lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index a7271e093845..67977f15af25 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -32,39 +32,12 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/netlink.h>
+#include <net/Space.h>
/* A unified ethernet device probe. This is the easiest way to have every
ethernet adaptor have the name "eth[0123...]".
*/
-extern struct net_device *hp100_probe(int unit);
-extern struct net_device *ultra_probe(int unit);
-extern struct net_device *wd_probe(int unit);
-extern struct net_device *ne_probe(int unit);
-extern struct net_device *fmv18x_probe(int unit);
-extern struct net_device *i82596_probe(int unit);
-extern struct net_device *ni65_probe(int unit);
-extern struct net_device *sonic_probe(int unit);
-extern struct net_device *smc_init(int unit);
-extern struct net_device *atarilance_probe(int unit);
-extern struct net_device *sun3lance_probe(int unit);
-extern struct net_device *sun3_82586_probe(int unit);
-extern struct net_device *apne_probe(int unit);
-extern struct net_device *cs89x0_probe(int unit);
-extern struct net_device *mvme147lance_probe(int unit);
-extern struct net_device *tc515_probe(int unit);
-extern struct net_device *lance_probe(int unit);
-extern struct net_device *mac8390_probe(int unit);
-extern struct net_device *mac89x0_probe(int unit);
-extern struct net_device *cops_probe(int unit);
-extern struct net_device *ltpc_probe(void);
-
-/* Fibre Channel adapters */
-extern int iph5526_probe(struct net_device *dev);
-
-/* SBNI adapters */
-extern int sbni_probe(int unit);
-
struct devprobe2 {
struct net_device *(*probe)(int unit);
int status; /* non-zero if autoprobe has failed */
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index 74dc1875f9cd..326a612a2730 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -32,7 +32,6 @@
* **********************
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 5a5d720da929..6f4e80853ed4 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_BONDING) += bonding.o
-bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o bond_netlink.o bond_options.o
+bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_sysfs_slave.o bond_debugfs.o bond_netlink.o bond_options.o
proc-$(CONFIG_PROC_FS) += bond_procfs.o
bonding-objs += $(proc-y)
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 187b1b7772ef..cce1f1bf90b4 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -34,14 +34,14 @@
#include "bonding.h"
#include "bond_3ad.h"
-// General definitions
+/* General definitions */
#define AD_SHORT_TIMEOUT 1
#define AD_LONG_TIMEOUT 0
#define AD_STANDBY 0x2
#define AD_MAX_TX_IN_SECOND 3
#define AD_COLLECTOR_MAX_DELAY 0
-// Timer definitions(43.4.4 in the 802.3ad standard)
+/* Timer definitions (43.4.4 in the 802.3ad standard) */
#define AD_FAST_PERIODIC_TIME 1
#define AD_SLOW_PERIODIC_TIME 30
#define AD_SHORT_TIMEOUT_TIME (3*AD_FAST_PERIODIC_TIME)
@@ -49,7 +49,7 @@
#define AD_CHURN_DETECTION_TIME 60
#define AD_AGGREGATE_WAIT_TIME 2
-// Port state definitions(43.4.2.2 in the 802.3ad standard)
+/* Port state definitions (43.4.2.2 in the 802.3ad standard) */
#define AD_STATE_LACP_ACTIVITY 0x1
#define AD_STATE_LACP_TIMEOUT 0x2
#define AD_STATE_AGGREGATION 0x4
@@ -59,7 +59,9 @@
#define AD_STATE_DEFAULTED 0x40
#define AD_STATE_EXPIRED 0x80
-// Port Variables definitions used by the State Machines(43.4.7 in the 802.3ad standard)
+/* Port Variables definitions used by the State Machines (43.4.7 in the
+ * 802.3ad standard)
+ */
#define AD_PORT_BEGIN 0x1
#define AD_PORT_LACP_ENABLED 0x2
#define AD_PORT_ACTOR_CHURN 0x4
@@ -71,27 +73,27 @@
#define AD_PORT_SELECTED 0x100
#define AD_PORT_MOVED 0x200
-// Port Key definitions
-// key is determined according to the link speed, duplex and
-// user key(which is yet not supported)
-// ------------------------------------------------------------
-// Port key : | User key | Speed |Duplex|
-// ------------------------------------------------------------
-// 16 6 1 0
+/* Port Key definitions
+ * key is determined according to the link speed, duplex and
+ * user key (which is yet not supported)
+ * --------------------------------------------------------------
+ * Port key : | User key | Speed | Duplex |
+ * --------------------------------------------------------------
+ * 16 6 1 0
+ */
#define AD_DUPLEX_KEY_BITS 0x1
#define AD_SPEED_KEY_BITS 0x3E
#define AD_USER_KEY_BITS 0xFFC0
-//dalloun
#define AD_LINK_SPEED_BITMASK_1MBPS 0x1
#define AD_LINK_SPEED_BITMASK_10MBPS 0x2
#define AD_LINK_SPEED_BITMASK_100MBPS 0x4
#define AD_LINK_SPEED_BITMASK_1000MBPS 0x8
#define AD_LINK_SPEED_BITMASK_10000MBPS 0x10
-//endalloun
-// compare MAC addresses
-#define MAC_ADDRESS_COMPARE(A, B) memcmp(A, B, ETH_ALEN)
+/* compare MAC addresses */
+#define MAC_ADDRESS_EQUAL(A, B) \
+ ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } };
static u16 ad_ticks_per_sec;
@@ -99,7 +101,7 @@ static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-// ================= main 802.3ad protocol functions ==================
+/* ================= main 802.3ad protocol functions ================== */
static int ad_lacpdu_send(struct port *port);
static int ad_marker_send(struct port *port, struct bond_marker *marker);
static void ad_mux_machine(struct port *port);
@@ -113,13 +115,13 @@ static void ad_initialize_agg(struct aggregator *aggregator);
static void ad_initialize_port(struct port *port, int lacp_fast);
static void ad_enable_collecting_distributing(struct port *port);
static void ad_disable_collecting_distributing(struct port *port);
-static void ad_marker_info_received(struct bond_marker *marker_info, struct port *port);
-static void ad_marker_response_received(struct bond_marker *marker, struct port *port);
+static void ad_marker_info_received(struct bond_marker *marker_info,
+ struct port *port);
+static void ad_marker_response_received(struct bond_marker *marker,
+ struct port *port);
-/////////////////////////////////////////////////////////////////////////////////
-// ================= api to bonding and kernel code ==================
-/////////////////////////////////////////////////////////////////////////////////
+/* ================= api to bonding and kernel code ================== */
/**
* __get_bond_by_port - get the port's bonding struct
@@ -141,25 +143,32 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
*
* Return the aggregator of the first slave in @bond, or %NULL if it can't be
* found.
+ * The caller must hold RCU or RTNL lock.
*/
static inline struct aggregator *__get_first_agg(struct port *port)
{
struct bonding *bond = __get_bond_by_port(port);
struct slave *first_slave;
+ struct aggregator *agg;
- // If there's no bond for this port, or bond has no slaves
+ /* If there's no bond for this port, or bond has no slaves */
if (bond == NULL)
return NULL;
- first_slave = bond_first_slave(bond);
- return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
+ rcu_read_lock();
+ first_slave = bond_first_slave_rcu(bond);
+ agg = first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
+ rcu_read_unlock();
+
+ return agg;
}
-/*
- * __agg_has_partner
+/**
+ * __agg_has_partner - see if we have a partner
+ * @agg: the agregator we're looking at
*
* Return nonzero if aggregator has a partner (denoted by a non-zero ether
- * address for the partner). Return 0 if not.
+ * address for the partner). Return 0 if not.
*/
static inline int __agg_has_partner(struct aggregator *agg)
{
@@ -169,7 +178,6 @@ static inline int __agg_has_partner(struct aggregator *agg)
/**
* __disable_port - disable the port's slave
* @port: the port we're looking at
- *
*/
static inline void __disable_port(struct port *port)
{
@@ -179,7 +187,6 @@ static inline void __disable_port(struct port *port)
/**
* __enable_port - enable the port's slave, if it's up
* @port: the port we're looking at
- *
*/
static inline void __enable_port(struct port *port)
{
@@ -192,7 +199,6 @@ static inline void __enable_port(struct port *port)
/**
* __port_is_enabled - check if the port's slave is in active state
* @port: the port we're looking at
- *
*/
static inline int __port_is_enabled(struct port *port)
{
@@ -218,7 +224,6 @@ static inline u32 __get_agg_selection_mode(struct port *port)
/**
* __check_agg_selection_timer - check if the selection timer has expired
* @port: the port we're looking at
- *
*/
static inline int __check_agg_selection_timer(struct port *port)
{
@@ -233,7 +238,6 @@ static inline int __check_agg_selection_timer(struct port *port)
/**
* __get_state_machine_lock - lock the port's state machines
* @port: the port we're looking at
- *
*/
static inline void __get_state_machine_lock(struct port *port)
{
@@ -243,7 +247,6 @@ static inline void __get_state_machine_lock(struct port *port)
/**
* __release_state_machine_lock - unlock the port's state machines
* @port: the port we're looking at
- *
*/
static inline void __release_state_machine_lock(struct port *port)
{
@@ -266,10 +269,11 @@ static u16 __get_link_speed(struct port *port)
struct slave *slave = port->slave;
u16 speed;
- /* this if covers only a special case: when the configuration starts with
- * link down, it sets the speed to 0.
- * This is done in spite of the fact that the e100 driver reports 0 to be
- * compatible with MVT in the future.*/
+ /* this if covers only a special case: when the configuration starts
+ * with link down, it sets the speed to 0.
+ * This is done in spite of the fact that the e100 driver reports 0
+ * to be compatible with MVT in the future.
+ */
if (slave->link != BOND_LINK_UP)
speed = 0;
else {
@@ -291,7 +295,8 @@ static u16 __get_link_speed(struct port *port)
break;
default:
- speed = 0; // unknown speed value from ethtool. shouldn't happen
+ /* unknown speed value from ethtool. shouldn't happen */
+ speed = 0;
break;
}
}
@@ -315,8 +320,9 @@ static u8 __get_duplex(struct port *port)
u8 retval;
- // handling a special case: when the configuration starts with
- // link down, it sets the duplex to 0.
+ /* handling a special case: when the configuration starts with
+ * link down, it sets the duplex to 0.
+ */
if (slave->link != BOND_LINK_UP)
retval = 0x0;
else {
@@ -340,15 +346,14 @@ static u8 __get_duplex(struct port *port)
/**
* __initialize_port_locks - initialize a port's STATE machine spinlock
* @port: the slave of the port we're looking at
- *
*/
static inline void __initialize_port_locks(struct slave *slave)
{
- // make sure it isn't called twice
+ /* make sure it isn't called twice */
spin_lock_init(&(SLAVE_AD_INFO(slave).state_machine_lock));
}
-//conversions
+/* Conversions */
/**
* __ad_timer_to_ticks - convert a given timer type to AD module ticks
@@ -357,39 +362,38 @@ static inline void __initialize_port_locks(struct slave *slave)
*
* If @timer_type is %current_while_timer, @par indicates long/short timer.
* If @timer_type is %periodic_timer, @par is one of %FAST_PERIODIC_TIME,
- * %SLOW_PERIODIC_TIME.
+ * %SLOW_PERIODIC_TIME.
*/
static u16 __ad_timer_to_ticks(u16 timer_type, u16 par)
{
u16 retval = 0; /* to silence the compiler */
switch (timer_type) {
- case AD_CURRENT_WHILE_TIMER: // for rx machine usage
+ case AD_CURRENT_WHILE_TIMER: /* for rx machine usage */
if (par)
- retval = (AD_SHORT_TIMEOUT_TIME*ad_ticks_per_sec); // short timeout
+ retval = (AD_SHORT_TIMEOUT_TIME*ad_ticks_per_sec);
else
- retval = (AD_LONG_TIMEOUT_TIME*ad_ticks_per_sec); // long timeout
+ retval = (AD_LONG_TIMEOUT_TIME*ad_ticks_per_sec);
break;
- case AD_ACTOR_CHURN_TIMER: // for local churn machine
+ case AD_ACTOR_CHURN_TIMER: /* for local churn machine */
retval = (AD_CHURN_DETECTION_TIME*ad_ticks_per_sec);
break;
- case AD_PERIODIC_TIMER: // for periodic machine
- retval = (par*ad_ticks_per_sec); // long timeout
+ case AD_PERIODIC_TIMER: /* for periodic machine */
+ retval = (par*ad_ticks_per_sec); /* long timeout */
break;
- case AD_PARTNER_CHURN_TIMER: // for remote churn machine
+ case AD_PARTNER_CHURN_TIMER: /* for remote churn machine */
retval = (AD_CHURN_DETECTION_TIME*ad_ticks_per_sec);
break;
- case AD_WAIT_WHILE_TIMER: // for selection machine
+ case AD_WAIT_WHILE_TIMER: /* for selection machine */
retval = (AD_AGGREGATE_WAIT_TIME*ad_ticks_per_sec);
break;
}
+
return retval;
}
-/////////////////////////////////////////////////////////////////////////////////
-// ================= ad_rx_machine helper functions ==================
-/////////////////////////////////////////////////////////////////////////////////
+/* ================= ad_rx_machine helper functions ================== */
/**
* __choose_matched - update a port's matched variable from a received lacpdu
@@ -416,17 +420,18 @@ static u16 __ad_timer_to_ticks(u16 timer_type, u16 par)
*/
static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
{
- // check if all parameters are alike
+ /* check if all parameters are alike
+ * or this is individual link(aggregation == FALSE)
+ * then update the state machine Matched variable.
+ */
if (((ntohs(lacpdu->partner_port) == port->actor_port_number) &&
(ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) &&
- !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) &&
+ MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) &&
(ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
(ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
- // or this is individual link(aggregation == FALSE)
((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
) {
- // update the state machine Matched variable
port->sm_vars |= AD_PORT_MATCHED;
} else {
port->sm_vars &= ~AD_PORT_MATCHED;
@@ -448,7 +453,9 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
struct port_params *partner = &port->partner_oper;
__choose_matched(lacpdu, port);
- // record the new parameter values for the partner operational
+ /* record the new parameter values for the partner
+ * operational
+ */
partner->port_number = ntohs(lacpdu->actor_port);
partner->port_priority = ntohs(lacpdu->actor_port_priority);
partner->system = lacpdu->actor_system;
@@ -456,10 +463,12 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
partner->key = ntohs(lacpdu->actor_key);
partner->port_state = lacpdu->actor_state;
- // set actor_oper_port_state.defaulted to FALSE
+ /* set actor_oper_port_state.defaulted to FALSE */
port->actor_oper_port_state &= ~AD_STATE_DEFAULTED;
- // set the partner sync. to on if the partner is sync. and the port is matched
+ /* set the partner sync. to on if the partner is sync,
+ * and the port is matched
+ */
if ((port->sm_vars & AD_PORT_MATCHED)
&& (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION))
partner->port_state |= AD_STATE_SYNCHRONIZATION;
@@ -479,11 +488,11 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
static void __record_default(struct port *port)
{
if (port) {
- // record the partner admin parameters
+ /* record the partner admin parameters */
memcpy(&port->partner_oper, &port->partner_admin,
sizeof(struct port_params));
- // set actor_oper_port_state.defaulted to true
+ /* set actor_oper_port_state.defaulted to true */
port->actor_oper_port_state |= AD_STATE_DEFAULTED;
}
}
@@ -506,14 +515,15 @@ static void __update_selected(struct lacpdu *lacpdu, struct port *port)
if (lacpdu && port) {
const struct port_params *partner = &port->partner_oper;
- // check if any parameter is different
+ /* check if any parameter is different then
+ * update the state machine selected variable.
+ */
if (ntohs(lacpdu->actor_port) != partner->port_number ||
ntohs(lacpdu->actor_port_priority) != partner->port_priority ||
- MAC_ADDRESS_COMPARE(&lacpdu->actor_system, &partner->system) ||
+ !MAC_ADDRESS_EQUAL(&lacpdu->actor_system, &partner->system) ||
ntohs(lacpdu->actor_system_priority) != partner->system_priority ||
ntohs(lacpdu->actor_key) != partner->key ||
(lacpdu->actor_state & AD_STATE_AGGREGATION) != (partner->port_state & AD_STATE_AGGREGATION)) {
- // update the state machine Selected variable
port->sm_vars &= ~AD_PORT_SELECTED;
}
}
@@ -537,15 +547,16 @@ static void __update_default_selected(struct port *port)
const struct port_params *admin = &port->partner_admin;
const struct port_params *oper = &port->partner_oper;
- // check if any parameter is different
+ /* check if any parameter is different then
+ * update the state machine selected variable.
+ */
if (admin->port_number != oper->port_number ||
admin->port_priority != oper->port_priority ||
- MAC_ADDRESS_COMPARE(&admin->system, &oper->system) ||
+ !MAC_ADDRESS_EQUAL(&admin->system, &oper->system) ||
admin->system_priority != oper->system_priority ||
admin->key != oper->key ||
(admin->port_state & AD_STATE_AGGREGATION)
!= (oper->port_state & AD_STATE_AGGREGATION)) {
- // update the state machine Selected variable
port->sm_vars &= ~AD_PORT_SELECTED;
}
}
@@ -565,12 +576,14 @@ static void __update_default_selected(struct port *port)
*/
static void __update_ntt(struct lacpdu *lacpdu, struct port *port)
{
- // validate lacpdu and port
+ /* validate lacpdu and port */
if (lacpdu && port) {
- // check if any parameter is different
+ /* check if any parameter is different then
+ * update the port->ntt.
+ */
if ((ntohs(lacpdu->partner_port) != port->actor_port_number) ||
(ntohs(lacpdu->partner_port_priority) != port->actor_port_priority) ||
- MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) ||
+ !MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) ||
(ntohs(lacpdu->partner_system_priority) != port->actor_system_priority) ||
(ntohs(lacpdu->partner_key) != port->actor_oper_port_key) ||
((lacpdu->partner_state & AD_STATE_LACP_ACTIVITY) != (port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY)) ||
@@ -578,43 +591,12 @@ static void __update_ntt(struct lacpdu *lacpdu, struct port *port)
((lacpdu->partner_state & AD_STATE_SYNCHRONIZATION) != (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) ||
((lacpdu->partner_state & AD_STATE_AGGREGATION) != (port->actor_oper_port_state & AD_STATE_AGGREGATION))
) {
-
port->ntt = true;
}
}
}
/**
- * __attach_bond_to_agg
- * @port: the port we're looking at
- *
- * Handle the attaching of the port's control parser/multiplexer and the
- * aggregator. This function does nothing since the parser/multiplexer of the
- * receive and the parser/multiplexer of the aggregator are already combined.
- */
-static void __attach_bond_to_agg(struct port *port)
-{
- port = NULL; /* just to satisfy the compiler */
- // This function does nothing since the parser/multiplexer of the receive
- // and the parser/multiplexer of the aggregator are already combined
-}
-
-/**
- * __detach_bond_from_agg
- * @port: the port we're looking at
- *
- * Handle the detaching of the port's control parser/multiplexer from the
- * aggregator. This function does nothing since the parser/multiplexer of the
- * receive and the parser/multiplexer of the aggregator are already combined.
- */
-static void __detach_bond_from_agg(struct port *port)
-{
- port = NULL; /* just to satisfy the compiler */
- // This function does nothing since the parser/multiplexer of the receive
- // and the parser/multiplexer of the aggregator are already combined
-}
-
-/**
* __agg_ports_are_ready - check if all ports in an aggregator are ready
* @aggregator: the aggregator we're looking at
*
@@ -625,7 +607,9 @@ static int __agg_ports_are_ready(struct aggregator *aggregator)
int retval = 1;
if (aggregator) {
- // scan all ports in this aggregator to verfy if they are all ready
+ /* scan all ports in this aggregator to verfy if they are
+ * all ready.
+ */
for (port = aggregator->lag_ports;
port;
port = port->next_port_in_aggregator) {
@@ -685,7 +669,7 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
bandwidth = aggregator->num_of_ports * 10000;
break;
default:
- bandwidth = 0; /*to silence the compiler ....*/
+ bandwidth = 0; /* to silence the compiler */
}
}
return bandwidth;
@@ -695,6 +679,7 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
* __get_active_agg - get the current active aggregator
* @aggregator: the aggregator we're looking at
*
+ * Caller must hold RCU lock.
*/
static struct aggregator *__get_active_agg(struct aggregator *aggregator)
{
@@ -702,7 +687,7 @@ static struct aggregator *__get_active_agg(struct aggregator *aggregator)
struct list_head *iter;
struct slave *slave;
- bond_for_each_slave(bond, slave, iter)
+ bond_for_each_slave_rcu(bond, slave, iter)
if (SLAVE_AD_INFO(slave).aggregator.is_active)
return &(SLAVE_AD_INFO(slave).aggregator);
@@ -712,15 +697,14 @@ static struct aggregator *__get_active_agg(struct aggregator *aggregator)
/**
* __update_lacpdu_from_port - update a port's lacpdu fields
* @port: the port we're looking at
- *
*/
static inline void __update_lacpdu_from_port(struct port *port)
{
struct lacpdu *lacpdu = &port->lacpdu;
const struct port_params *partner = &port->partner_oper;
- /* update current actual Actor parameters */
- /* lacpdu->subtype initialized
+ /* update current actual Actor parameters
+ * lacpdu->subtype initialized
* lacpdu->version_number initialized
* lacpdu->tlv_type_actor_info initialized
* lacpdu->actor_information_length initialized
@@ -756,9 +740,7 @@ static inline void __update_lacpdu_from_port(struct port *port)
*/
}
-//////////////////////////////////////////////////////////////////////////////////////
-// ================= main 802.3ad protocol code ======================================
-//////////////////////////////////////////////////////////////////////////////////////
+/* ================= main 802.3ad protocol code ========================= */
/**
* ad_lacpdu_send - send out a lacpdu packet on a given port
@@ -788,11 +770,12 @@ static int ad_lacpdu_send(struct port *port)
memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
/* Note: source address is set to be the member's PERMANENT address,
- because we use it to identify loopback lacpdus in receive. */
+ * because we use it to identify loopback lacpdus in receive.
+ */
memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU;
- lacpdu_header->lacpdu = port->lacpdu; // struct copy
+ lacpdu_header->lacpdu = port->lacpdu;
dev_queue_xmit(skb);
@@ -829,11 +812,12 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
/* Note: source address is set to be the member's PERMANENT address,
- because we use it to identify loopback MARKERs in receive. */
+ * because we use it to identify loopback MARKERs in receive.
+ */
memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
marker_header->hdr.h_proto = PKT_TYPE_LACPDU;
- marker_header->marker = *marker; // struct copy
+ marker_header->marker = *marker;
dev_queue_xmit(skb);
@@ -843,72 +827,90 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
/**
* ad_mux_machine - handle a port's mux state machine
* @port: the port we're looking at
- *
*/
static void ad_mux_machine(struct port *port)
{
mux_states_t last_state;
- // keep current State Machine state to compare later if it was changed
+ /* keep current State Machine state to compare later if it was
+ * changed
+ */
last_state = port->sm_mux_state;
if (port->sm_vars & AD_PORT_BEGIN) {
- port->sm_mux_state = AD_MUX_DETACHED; // next state
+ port->sm_mux_state = AD_MUX_DETACHED;
} else {
switch (port->sm_mux_state) {
case AD_MUX_DETACHED:
if ((port->sm_vars & AD_PORT_SELECTED)
|| (port->sm_vars & AD_PORT_STANDBY))
/* if SELECTED or STANDBY */
- port->sm_mux_state = AD_MUX_WAITING; // next state
+ port->sm_mux_state = AD_MUX_WAITING;
break;
case AD_MUX_WAITING:
- // if SELECTED == FALSE return to DETACH state
- if (!(port->sm_vars & AD_PORT_SELECTED)) { // if UNSELECTED
+ /* if SELECTED == FALSE return to DETACH state */
+ if (!(port->sm_vars & AD_PORT_SELECTED)) {
port->sm_vars &= ~AD_PORT_READY_N;
- // in order to withhold the Selection Logic to check all ports READY_N value
- // every callback cycle to update ready variable, we check READY_N and update READY here
+ /* in order to withhold the Selection Logic to
+ * check all ports READY_N value every callback
+ * cycle to update ready variable, we check
+ * READY_N and update READY here
+ */
__set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
- port->sm_mux_state = AD_MUX_DETACHED; // next state
+ port->sm_mux_state = AD_MUX_DETACHED;
break;
}
- // check if the wait_while_timer expired
+ /* check if the wait_while_timer expired */
if (port->sm_mux_timer_counter
&& !(--port->sm_mux_timer_counter))
port->sm_vars |= AD_PORT_READY_N;
- // in order to withhold the selection logic to check all ports READY_N value
- // every callback cycle to update ready variable, we check READY_N and update READY here
+ /* in order to withhold the selection logic to check
+ * all ports READY_N value every callback cycle to
+ * update ready variable, we check READY_N and update
+ * READY here
+ */
__set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
- // if the wait_while_timer expired, and the port is in READY state, move to ATTACHED state
+ /* if the wait_while_timer expired, and the port is
+ * in READY state, move to ATTACHED state
+ */
if ((port->sm_vars & AD_PORT_READY)
&& !port->sm_mux_timer_counter)
- port->sm_mux_state = AD_MUX_ATTACHED; // next state
+ port->sm_mux_state = AD_MUX_ATTACHED;
break;
case AD_MUX_ATTACHED:
- // check also if agg_select_timer expired(so the edable port will take place only after this timer)
- if ((port->sm_vars & AD_PORT_SELECTED) && (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) && !__check_agg_selection_timer(port)) {
- port->sm_mux_state = AD_MUX_COLLECTING_DISTRIBUTING;// next state
- } else if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY)) { // if UNSELECTED or STANDBY
+ /* check also if agg_select_timer expired (so the
+ * edable port will take place only after this timer)
+ */
+ if ((port->sm_vars & AD_PORT_SELECTED) &&
+ (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) &&
+ !__check_agg_selection_timer(port)) {
+ port->sm_mux_state = AD_MUX_COLLECTING_DISTRIBUTING;
+ } else if (!(port->sm_vars & AD_PORT_SELECTED) ||
+ (port->sm_vars & AD_PORT_STANDBY)) {
+ /* if UNSELECTED or STANDBY */
port->sm_vars &= ~AD_PORT_READY_N;
- // in order to withhold the selection logic to check all ports READY_N value
- // every callback cycle to update ready variable, we check READY_N and update READY here
+ /* in order to withhold the selection logic to
+ * check all ports READY_N value every callback
+ * cycle to update ready variable, we check
+ * READY_N and update READY here
+ */
__set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
- port->sm_mux_state = AD_MUX_DETACHED;// next state
+ port->sm_mux_state = AD_MUX_DETACHED;
}
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
- if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY) ||
- !(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION)
- ) {
- port->sm_mux_state = AD_MUX_ATTACHED;// next state
-
+ if (!(port->sm_vars & AD_PORT_SELECTED) ||
+ (port->sm_vars & AD_PORT_STANDBY) ||
+ !(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION)) {
+ port->sm_mux_state = AD_MUX_ATTACHED;
} else {
- // if port state hasn't changed make
- // sure that a collecting distributing
- // port in an active aggregator is enabled
+ /* if port state hasn't changed make
+ * sure that a collecting distributing
+ * port in an active aggregator is enabled
+ */
if (port->aggregator &&
port->aggregator->is_active &&
!__port_is_enabled(port)) {
@@ -917,19 +919,18 @@ static void ad_mux_machine(struct port *port)
}
}
break;
- default: //to silence the compiler
+ default:
break;
}
}
- // check if the state machine was changed
+ /* check if the state machine was changed */
if (port->sm_mux_state != last_state) {
pr_debug("Mux Machine: Port=%d, Last State=%d, Curr State=%d\n",
port->actor_port_number, last_state,
port->sm_mux_state);
switch (port->sm_mux_state) {
case AD_MUX_DETACHED:
- __detach_bond_from_agg(port);
port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
ad_disable_collecting_distributing(port);
port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
@@ -940,7 +941,6 @@ static void ad_mux_machine(struct port *port)
port->sm_mux_timer_counter = __ad_timer_to_ticks(AD_WAIT_WHILE_TIMER, 0);
break;
case AD_MUX_ATTACHED:
- __attach_bond_to_agg(port);
port->actor_oper_port_state |= AD_STATE_SYNCHRONIZATION;
port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
@@ -953,7 +953,7 @@ static void ad_mux_machine(struct port *port)
ad_enable_collecting_distributing(port);
port->ntt = true;
break;
- default: //to silence the compiler
+ default:
break;
}
}
@@ -972,59 +972,63 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
{
rx_states_t last_state;
- // keep current State Machine state to compare later if it was changed
+ /* keep current State Machine state to compare later if it was
+ * changed
+ */
last_state = port->sm_rx_state;
- // check if state machine should change state
- // first, check if port was reinitialized
+ /* check if state machine should change state */
+
+ /* first, check if port was reinitialized */
if (port->sm_vars & AD_PORT_BEGIN)
- /* next state */
port->sm_rx_state = AD_RX_INITIALIZE;
- // check if port is not enabled
+ /* check if port is not enabled */
else if (!(port->sm_vars & AD_PORT_BEGIN)
&& !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED))
- /* next state */
port->sm_rx_state = AD_RX_PORT_DISABLED;
- // check if new lacpdu arrived
- else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) || (port->sm_rx_state == AD_RX_DEFAULTED) || (port->sm_rx_state == AD_RX_CURRENT))) {
- port->sm_rx_timer_counter = 0; // zero timer
+ /* check if new lacpdu arrived */
+ else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) ||
+ (port->sm_rx_state == AD_RX_DEFAULTED) ||
+ (port->sm_rx_state == AD_RX_CURRENT))) {
+ port->sm_rx_timer_counter = 0;
port->sm_rx_state = AD_RX_CURRENT;
} else {
- // if timer is on, and if it is expired
- if (port->sm_rx_timer_counter && !(--port->sm_rx_timer_counter)) {
+ /* if timer is on, and if it is expired */
+ if (port->sm_rx_timer_counter &&
+ !(--port->sm_rx_timer_counter)) {
switch (port->sm_rx_state) {
case AD_RX_EXPIRED:
- port->sm_rx_state = AD_RX_DEFAULTED; // next state
+ port->sm_rx_state = AD_RX_DEFAULTED;
break;
case AD_RX_CURRENT:
- port->sm_rx_state = AD_RX_EXPIRED; // next state
+ port->sm_rx_state = AD_RX_EXPIRED;
break;
- default: //to silence the compiler
+ default:
break;
}
} else {
- // if no lacpdu arrived and no timer is on
+ /* if no lacpdu arrived and no timer is on */
switch (port->sm_rx_state) {
case AD_RX_PORT_DISABLED:
if (port->sm_vars & AD_PORT_MOVED)
- port->sm_rx_state = AD_RX_INITIALIZE; // next state
+ port->sm_rx_state = AD_RX_INITIALIZE;
else if (port->is_enabled
&& (port->sm_vars
& AD_PORT_LACP_ENABLED))
- port->sm_rx_state = AD_RX_EXPIRED; // next state
+ port->sm_rx_state = AD_RX_EXPIRED;
else if (port->is_enabled
&& ((port->sm_vars
& AD_PORT_LACP_ENABLED) == 0))
- port->sm_rx_state = AD_RX_LACP_DISABLED; // next state
+ port->sm_rx_state = AD_RX_LACP_DISABLED;
break;
- default: //to silence the compiler
+ default:
break;
}
}
}
- // check if the State machine was changed or new lacpdu arrived
+ /* check if the State machine was changed or new lacpdu arrived */
if ((port->sm_rx_state != last_state) || (lacpdu)) {
pr_debug("Rx Machine: Port=%d, Last State=%d, Curr State=%d\n",
port->actor_port_number, last_state,
@@ -1039,10 +1043,9 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
__record_default(port);
port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
port->sm_vars &= ~AD_PORT_MOVED;
- port->sm_rx_state = AD_RX_PORT_DISABLED; // next state
-
- /*- Fall Through -*/
+ port->sm_rx_state = AD_RX_PORT_DISABLED;
+ /* Fall Through */
case AD_RX_PORT_DISABLED:
port->sm_vars &= ~AD_PORT_MATCHED;
break;
@@ -1054,13 +1057,15 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
break;
case AD_RX_EXPIRED:
- //Reset of the Synchronization flag. (Standard 43.4.12)
- //This reset cause to disable this port in the COLLECTING_DISTRIBUTING state of the
- //mux machine in case of EXPIRED even if LINK_DOWN didn't arrive for the port.
+ /* Reset of the Synchronization flag (Standard 43.4.12)
+ * This reset cause to disable this port in the
+ * COLLECTING_DISTRIBUTING state of the mux machine in
+ * case of EXPIRED even if LINK_DOWN didn't arrive for
+ * the port.
+ */
port->partner_oper.port_state &= ~AD_STATE_SYNCHRONIZATION;
port->sm_vars &= ~AD_PORT_MATCHED;
- port->partner_oper.port_state |=
- AD_STATE_LACP_ACTIVITY;
+ port->partner_oper.port_state |= AD_STATE_LACP_ACTIVITY;
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
port->actor_oper_port_state |= AD_STATE_EXPIRED;
break;
@@ -1071,12 +1076,12 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
break;
case AD_RX_CURRENT:
- // detect loopback situation
- if (!MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->actor_system))) {
- // INFO_RECEIVED_LOOPBACK_FRAMES
- pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
- "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
- port->slave->bond->dev->name, port->slave->dev->name);
+ /* detect loopback situation */
+ if (MAC_ADDRESS_EQUAL(&(lacpdu->actor_system),
+ &(port->actor_system))) {
+ pr_err("%s: An illegal loopback occurred on adapter (%s).\nCheck the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
+ port->slave->bond->dev->name,
+ port->slave->dev->name);
return;
}
__update_selected(lacpdu, port);
@@ -1085,7 +1090,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT));
port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
break;
- default: //to silence the compiler
+ default:
break;
}
}
@@ -1094,13 +1099,14 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
/**
* ad_tx_machine - handle a port's tx state machine
* @port: the port we're looking at
- *
*/
static void ad_tx_machine(struct port *port)
{
- // check if tx timer expired, to verify that we do not send more than 3 packets per second
+ /* check if tx timer expired, to verify that we do not send more than
+ * 3 packets per second
+ */
if (port->sm_tx_timer_counter && !(--port->sm_tx_timer_counter)) {
- // check if there is something to send
+ /* check if there is something to send */
if (port->ntt && (port->sm_vars & AD_PORT_LACP_ENABLED)) {
__update_lacpdu_from_port(port);
@@ -1108,14 +1114,16 @@ static void ad_tx_machine(struct port *port)
pr_debug("Sent LACPDU on port %d\n",
port->actor_port_number);
- /* mark ntt as false, so it will not be sent again until
- demanded */
+ /* mark ntt as false, so it will not be sent
+ * again until demanded
+ */
port->ntt = false;
}
}
- // restart tx timer(to verify that we will not exceed AD_MAX_TX_IN_SECOND
- port->sm_tx_timer_counter =
- ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
+ /* restart tx timer(to verify that we will not exceed
+ * AD_MAX_TX_IN_SECOND
+ */
+ port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
}
}
@@ -1129,76 +1137,79 @@ static void ad_periodic_machine(struct port *port)
{
periodic_states_t last_state;
- // keep current state machine state to compare later if it was changed
+ /* keep current state machine state to compare later if it was changed */
last_state = port->sm_periodic_state;
- // check if port was reinitialized
+ /* check if port was reinitialized */
if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
(!(port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & AD_STATE_LACP_ACTIVITY))
) {
- port->sm_periodic_state = AD_NO_PERIODIC; // next state
+ port->sm_periodic_state = AD_NO_PERIODIC;
}
- // check if state machine should change state
+ /* check if state machine should change state */
else if (port->sm_periodic_timer_counter) {
- // check if periodic state machine expired
+ /* check if periodic state machine expired */
if (!(--port->sm_periodic_timer_counter)) {
- // if expired then do tx
- port->sm_periodic_state = AD_PERIODIC_TX; // next state
+ /* if expired then do tx */
+ port->sm_periodic_state = AD_PERIODIC_TX;
} else {
- // If not expired, check if there is some new timeout parameter from the partner state
+ /* If not expired, check if there is some new timeout
+ * parameter from the partner state
+ */
switch (port->sm_periodic_state) {
case AD_FAST_PERIODIC:
if (!(port->partner_oper.port_state
& AD_STATE_LACP_TIMEOUT))
- port->sm_periodic_state = AD_SLOW_PERIODIC; // next state
+ port->sm_periodic_state = AD_SLOW_PERIODIC;
break;
case AD_SLOW_PERIODIC:
if ((port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) {
- // stop current timer
port->sm_periodic_timer_counter = 0;
- port->sm_periodic_state = AD_PERIODIC_TX; // next state
+ port->sm_periodic_state = AD_PERIODIC_TX;
}
break;
- default: //to silence the compiler
+ default:
break;
}
}
} else {
switch (port->sm_periodic_state) {
case AD_NO_PERIODIC:
- port->sm_periodic_state = AD_FAST_PERIODIC; // next state
+ port->sm_periodic_state = AD_FAST_PERIODIC;
break;
case AD_PERIODIC_TX:
- if (!(port->partner_oper.port_state
- & AD_STATE_LACP_TIMEOUT))
- port->sm_periodic_state = AD_SLOW_PERIODIC; // next state
+ if (!(port->partner_oper.port_state &
+ AD_STATE_LACP_TIMEOUT))
+ port->sm_periodic_state = AD_SLOW_PERIODIC;
else
- port->sm_periodic_state = AD_FAST_PERIODIC; // next state
+ port->sm_periodic_state = AD_FAST_PERIODIC;
break;
- default: //to silence the compiler
+ default:
break;
}
}
- // check if the state machine was changed
+ /* check if the state machine was changed */
if (port->sm_periodic_state != last_state) {
pr_debug("Periodic Machine: Port=%d, Last State=%d, Curr State=%d\n",
port->actor_port_number, last_state,
port->sm_periodic_state);
switch (port->sm_periodic_state) {
case AD_NO_PERIODIC:
- port->sm_periodic_timer_counter = 0; // zero timer
+ port->sm_periodic_timer_counter = 0;
break;
case AD_FAST_PERIODIC:
- port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_FAST_PERIODIC_TIME))-1; // decrement 1 tick we lost in the PERIODIC_TX cycle
+ /* decrement 1 tick we lost in the PERIODIC_TX cycle */
+ port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_FAST_PERIODIC_TIME))-1;
break;
case AD_SLOW_PERIODIC:
- port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_SLOW_PERIODIC_TIME))-1; // decrement 1 tick we lost in the PERIODIC_TX cycle
+ /* decrement 1 tick we lost in the PERIODIC_TX cycle */
+ port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_SLOW_PERIODIC_TIME))-1;
break;
case AD_PERIODIC_TX:
port->ntt = true;
break;
- default: //to silence the compiler
+ default:
break;
}
}
@@ -1221,30 +1232,38 @@ static void ad_port_selection_logic(struct port *port)
struct slave *slave;
int found = 0;
- // if the port is already Selected, do nothing
+ /* if the port is already Selected, do nothing */
if (port->sm_vars & AD_PORT_SELECTED)
return;
bond = __get_bond_by_port(port);
- // if the port is connected to other aggregator, detach it
+ /* if the port is connected to other aggregator, detach it */
if (port->aggregator) {
- // detach the port from its former aggregator
+ /* detach the port from its former aggregator */
temp_aggregator = port->aggregator;
for (curr_port = temp_aggregator->lag_ports; curr_port;
last_port = curr_port,
- curr_port = curr_port->next_port_in_aggregator) {
+ curr_port = curr_port->next_port_in_aggregator) {
if (curr_port == port) {
temp_aggregator->num_of_ports--;
- if (!last_port) {// if it is the first port attached to the aggregator
+ /* if it is the first port attached to the
+ * aggregator
+ */
+ if (!last_port) {
temp_aggregator->lag_ports =
port->next_port_in_aggregator;
- } else {// not the first port attached to the aggregator
+ } else {
+ /* not the first port attached to the
+ * aggregator
+ */
last_port->next_port_in_aggregator =
port->next_port_in_aggregator;
}
- // clear the port's relations to this aggregator
+ /* clear the port's relations to this
+ * aggregator
+ */
port->aggregator = NULL;
port->next_port_in_aggregator = NULL;
port->actor_port_aggregator_identifier = 0;
@@ -1252,41 +1271,46 @@ static void ad_port_selection_logic(struct port *port)
pr_debug("Port %d left LAG %d\n",
port->actor_port_number,
temp_aggregator->aggregator_identifier);
- // if the aggregator is empty, clear its parameters, and set it ready to be attached
+ /* if the aggregator is empty, clear its
+ * parameters, and set it ready to be attached
+ */
if (!temp_aggregator->lag_ports)
ad_clear_agg(temp_aggregator);
break;
}
}
- if (!curr_port) { // meaning: the port was related to an aggregator but was not on the aggregator port list
- pr_warning("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
- port->slave->bond->dev->name,
- port->actor_port_number,
- port->slave->dev->name,
- port->aggregator->aggregator_identifier);
+ if (!curr_port) {
+ /* meaning: the port was related to an aggregator
+ * but was not on the aggregator port list
+ */
+ pr_warn("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
+ port->slave->bond->dev->name,
+ port->actor_port_number,
+ port->slave->dev->name,
+ port->aggregator->aggregator_identifier);
}
}
- // search on all aggregators for a suitable aggregator for this port
+ /* search on all aggregators for a suitable aggregator for this port */
bond_for_each_slave(bond, slave, iter) {
aggregator = &(SLAVE_AD_INFO(slave).aggregator);
- // keep a free aggregator for later use(if needed)
+ /* keep a free aggregator for later use(if needed) */
if (!aggregator->lag_ports) {
if (!free_aggregator)
free_aggregator = aggregator;
continue;
}
- // check if current aggregator suits us
- if (((aggregator->actor_oper_aggregator_key == port->actor_oper_port_key) && // if all parameters match AND
- !MAC_ADDRESS_COMPARE(&(aggregator->partner_system), &(port->partner_oper.system)) &&
+ /* check if current aggregator suits us */
+ if (((aggregator->actor_oper_aggregator_key == port->actor_oper_port_key) && /* if all parameters match AND */
+ MAC_ADDRESS_EQUAL(&(aggregator->partner_system), &(port->partner_oper.system)) &&
(aggregator->partner_system_priority == port->partner_oper.system_priority) &&
(aggregator->partner_oper_aggregator_key == port->partner_oper.key)
) &&
- ((MAC_ADDRESS_COMPARE(&(port->partner_oper.system), &(null_mac_addr)) && // partner answers
- !aggregator->is_individual) // but is not individual OR
+ ((!MAC_ADDRESS_EQUAL(&(port->partner_oper.system), &(null_mac_addr)) && /* partner answers */
+ !aggregator->is_individual) /* but is not individual OR */
)
) {
- // attach to the founded aggregator
+ /* attach to the founded aggregator */
port->aggregator = aggregator;
port->actor_port_aggregator_identifier =
port->aggregator->aggregator_identifier;
@@ -1297,23 +1321,26 @@ static void ad_port_selection_logic(struct port *port)
port->actor_port_number,
port->aggregator->aggregator_identifier);
- // mark this port as selected
+ /* mark this port as selected */
port->sm_vars |= AD_PORT_SELECTED;
found = 1;
break;
}
}
- // the port couldn't find an aggregator - attach it to a new aggregator
+ /* the port couldn't find an aggregator - attach it to a new
+ * aggregator
+ */
if (!found) {
if (free_aggregator) {
- // assign port a new aggregator
+ /* assign port a new aggregator */
port->aggregator = free_aggregator;
port->actor_port_aggregator_identifier =
port->aggregator->aggregator_identifier;
- // update the new aggregator's parameters
- // if port was responsed from the end-user
+ /* update the new aggregator's parameters
+ * if port was responsed from the end-user
+ */
if (port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)
/* if port is full duplex */
port->aggregator->is_individual = false;
@@ -1332,7 +1359,7 @@ static void ad_port_selection_logic(struct port *port)
port->aggregator->lag_ports = port;
port->aggregator->num_of_ports++;
- // mark this port as selected
+ /* mark this port as selected */
port->sm_vars |= AD_PORT_SELECTED;
pr_debug("Port %d joined LAG %d(new LAG)\n",
@@ -1344,23 +1371,24 @@ static void ad_port_selection_logic(struct port *port)
port->actor_port_number, port->slave->dev->name);
}
}
- // if all aggregator's ports are READY_N == TRUE, set ready=TRUE in all aggregator's ports
- // else set ready=FALSE in all aggregator's ports
- __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
+ /* if all aggregator's ports are READY_N == TRUE, set ready=TRUE
+ * in all aggregator's ports, else set ready=FALSE in all
+ * aggregator's ports
+ */
+ __set_agg_ports_ready(port->aggregator,
+ __agg_ports_are_ready(port->aggregator));
aggregator = __get_first_agg(port);
ad_agg_selection_logic(aggregator);
}
-/*
- * Decide if "agg" is a better choice for the new active aggregator that
+/* Decide if "agg" is a better choice for the new active aggregator that
* the current best, according to the ad_select policy.
*/
static struct aggregator *ad_agg_selection_test(struct aggregator *best,
struct aggregator *curr)
{
- /*
- * 0. If no best, select current.
+ /* 0. If no best, select current.
*
* 1. If the current agg is not individual, and the best is
* individual, select current.
@@ -1416,9 +1444,9 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
break;
default:
- pr_warning("%s: Impossible agg select mode %d\n",
- curr->slave->bond->dev->name,
- __get_agg_selection_mode(curr->lag_ports));
+ pr_warn("%s: Impossible agg select mode %d\n",
+ curr->slave->bond->dev->name,
+ __get_agg_selection_mode(curr->lag_ports));
break;
}
@@ -1428,10 +1456,12 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
static int agg_device_up(const struct aggregator *agg)
{
struct port *port = agg->lag_ports;
+
if (!port)
return 0;
- return (netif_running(port->slave->dev) &&
- netif_carrier_ok(port->slave->dev));
+
+ return netif_running(port->slave->dev) &&
+ netif_carrier_ok(port->slave->dev);
}
/**
@@ -1467,11 +1497,12 @@ static void ad_agg_selection_logic(struct aggregator *agg)
struct slave *slave;
struct port *port;
+ rcu_read_lock();
origin = agg;
active = __get_active_agg(agg);
best = (active && agg_device_up(active)) ? active : NULL;
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
agg = &(SLAVE_AD_INFO(slave).aggregator);
agg->is_active = 0;
@@ -1482,8 +1513,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
if (best &&
__get_agg_selection_mode(best->lag_ports) == BOND_AD_STABLE) {
- /*
- * For the STABLE policy, don't replace the old active
+ /* For the STABLE policy, don't replace the old active
* aggregator if it's still active (it has an answering
* partner) or if both the best and active don't have an
* answering partner.
@@ -1491,7 +1521,8 @@ static void ad_agg_selection_logic(struct aggregator *agg)
if (active && active->lag_ports &&
active->lag_ports->is_enabled &&
(__agg_has_partner(active) ||
- (!__agg_has_partner(active) && !__agg_has_partner(best)))) {
+ (!__agg_has_partner(active) &&
+ !__agg_has_partner(best)))) {
if (!(!active->actor_oper_aggregator_key &&
best->actor_oper_aggregator_key)) {
best = NULL;
@@ -1505,7 +1536,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
active->is_active = 1;
}
- // if there is new best aggregator, activate it
+ /* if there is new best aggregator, activate it */
if (best) {
pr_debug("best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
best->aggregator_identifier, best->num_of_ports,
@@ -1516,7 +1547,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
best->lag_ports, best->slave,
best->slave ? best->slave->dev->name : "NULL");
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
agg = &(SLAVE_AD_INFO(slave).aggregator);
pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
@@ -1526,10 +1557,11 @@ static void ad_agg_selection_logic(struct aggregator *agg)
agg->is_individual, agg->is_active);
}
- // check if any partner replys
+ /* check if any partner replys */
if (best->is_individual) {
- pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
- best->slave ? best->slave->bond->dev->name : "NULL");
+ pr_warn("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
+ best->slave ?
+ best->slave->bond->dev->name : "NULL");
}
best->is_active = 1;
@@ -1541,7 +1573,9 @@ static void ad_agg_selection_logic(struct aggregator *agg)
best->partner_oper_aggregator_key,
best->is_individual, best->is_active);
- // disable the ports that were related to the former active_aggregator
+ /* disable the ports that were related to the former
+ * active_aggregator
+ */
if (active) {
for (port = active->lag_ports; port;
port = port->next_port_in_aggregator) {
@@ -1550,8 +1584,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
}
}
- /*
- * if the selected aggregator is of join individuals
+ /* if the selected aggregator is of join individuals
* (partner_system is NULL), enable their ports
*/
active = __get_active_agg(origin);
@@ -1565,13 +1598,14 @@ static void ad_agg_selection_logic(struct aggregator *agg)
}
}
+ rcu_read_unlock();
+
bond_3ad_set_carrier(bond);
}
/**
* ad_clear_agg - clear a given aggregator's parameters
* @aggregator: the aggregator we're looking at
- *
*/
static void ad_clear_agg(struct aggregator *aggregator)
{
@@ -1595,7 +1629,6 @@ static void ad_clear_agg(struct aggregator *aggregator)
/**
* ad_initialize_agg - initialize a given aggregator's parameters
* @aggregator: the aggregator we're looking at
- *
*/
static void ad_initialize_agg(struct aggregator *aggregator)
{
@@ -1612,7 +1645,6 @@ static void ad_initialize_agg(struct aggregator *aggregator)
* ad_initialize_port - initialize a given port's parameters
* @aggregator: the aggregator we're looking at
* @lacp_fast: boolean. whether fast periodic should be used
- *
*/
static void ad_initialize_port(struct port *port, int lacp_fast)
{
@@ -1644,8 +1676,10 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
port->ntt = false;
port->actor_admin_port_key = 1;
port->actor_oper_port_key = 1;
- port->actor_admin_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY;
- port->actor_oper_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY;
+ port->actor_admin_port_state = AD_STATE_AGGREGATION |
+ AD_STATE_LACP_ACTIVITY;
+ port->actor_oper_port_state = AD_STATE_AGGREGATION |
+ AD_STATE_LACP_ACTIVITY;
if (lacp_fast)
port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
@@ -1654,7 +1688,7 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
memcpy(&port->partner_oper, &tmpl, sizeof(tmpl));
port->is_enabled = true;
- // ****** private parameters ******
+ /* private parameters */
port->sm_vars = 0x3;
port->sm_rx_state = 0;
port->sm_rx_timer_counter = 0;
@@ -1692,11 +1726,12 @@ static void ad_enable_collecting_distributing(struct port *port)
/**
* ad_disable_collecting_distributing - disable a port's transmit/receive
* @port: the port we're looking at
- *
*/
static void ad_disable_collecting_distributing(struct port *port)
{
- if (port->aggregator && MAC_ADDRESS_COMPARE(&(port->aggregator->partner_system), &(null_mac_addr))) {
+ if (port->aggregator &&
+ !MAC_ADDRESS_EQUAL(&(port->aggregator->partner_system),
+ &(null_mac_addr))) {
pr_debug("Disabling port %d(LAG %d)\n",
port->actor_port_number,
port->aggregator->aggregator_identifier);
@@ -1704,66 +1739,22 @@ static void ad_disable_collecting_distributing(struct port *port)
}
}
-#if 0
-/**
- * ad_marker_info_send - send a marker information frame
- * @port: the port we're looking at
- *
- * This function does nothing since we decided not to implement send and handle
- * response for marker PDU's, in this stage, but only to respond to marker
- * information.
- */
-static void ad_marker_info_send(struct port *port)
-{
- struct bond_marker marker;
- u16 index;
-
- // fill the marker PDU with the appropriate values
- marker.subtype = 0x02;
- marker.version_number = 0x01;
- marker.tlv_type = AD_MARKER_INFORMATION_SUBTYPE;
- marker.marker_length = 0x16;
- // convert requester_port to Big Endian
- marker.requester_port = (((port->actor_port_number & 0xFF) << 8) |((u16)(port->actor_port_number & 0xFF00) >> 8));
- marker.requester_system = port->actor_system;
- // convert requester_port(u32) to Big Endian
- marker.requester_transaction_id =
- (((++port->transaction_id & 0xFF) << 24)
- | ((port->transaction_id & 0xFF00) << 8)
- | ((port->transaction_id & 0xFF0000) >> 8)
- | ((port->transaction_id & 0xFF000000) >> 24));
- marker.pad = 0;
- marker.tlv_type_terminator = 0x00;
- marker.terminator_length = 0x00;
- for (index = 0; index < 90; index++)
- marker.reserved_90[index] = 0;
-
- // send the marker information
- if (ad_marker_send(port, &marker) >= 0) {
- pr_debug("Sent Marker Information on port %d\n",
- port->actor_port_number);
- }
-}
-#endif
-
/**
* ad_marker_info_received - handle receive of a Marker information frame
* @marker_info: Marker info received
* @port: the port we're looking at
- *
*/
static void ad_marker_info_received(struct bond_marker *marker_info,
struct port *port)
{
struct bond_marker marker;
- // copy the received marker data to the response marker
- //marker = *marker_info;
+ /* copy the received marker data to the response marker */
memcpy(&marker, marker_info, sizeof(struct bond_marker));
- // change the marker subtype to marker response
+ /* change the marker subtype to marker response */
marker.tlv_type = AD_MARKER_RESPONSE_SUBTYPE;
- // send the marker response
+ /* send the marker response */
if (ad_marker_send(port, &marker) >= 0) {
pr_debug("Sent Marker Response on port %d\n",
port->actor_port_number);
@@ -1780,22 +1771,21 @@ static void ad_marker_info_received(struct bond_marker *marker_info,
* information.
*/
static void ad_marker_response_received(struct bond_marker *marker,
- struct port *port)
+ struct port *port)
{
- marker = NULL; /* just to satisfy the compiler */
- port = NULL; /* just to satisfy the compiler */
- // DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW
+ marker = NULL;
+ port = NULL;
+ /* DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW */
}
-//////////////////////////////////////////////////////////////////////////////////////
-// ================= AD exported functions to the main bonding code ==================
-//////////////////////////////////////////////////////////////////////////////////////
+/* ========= AD exported functions to the main bonding code ========= */
-// Check aggregators status in team every T seconds
+/* Check aggregators status in team every T seconds */
#define AD_AGGREGATOR_SELECTION_TIMER 8
-/*
- * bond_3ad_initiate_agg_selection(struct bonding *bond)
+/**
+ * bond_3ad_initiate_agg_selection - initate aggregator selection
+ * @bond: bonding struct
*
* Set the aggregation selection timer, to initiate an agg selection in
* the very near future. Called during first initialization, and during
@@ -1817,8 +1807,8 @@ static u16 aggregator_identifier;
*/
void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
{
- // check that the bond is not initialized yet
- if (MAC_ADDRESS_COMPARE(&(BOND_AD_INFO(bond).system.sys_mac_addr),
+ /* check that the bond is not initialized yet */
+ if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
bond->dev->dev_addr)) {
aggregator_identifier = 0;
@@ -1826,7 +1816,9 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
- // initialize how many times this module is called in one second(should be about every 100ms)
+ /* initialize how many times this module is called in one
+ * second (should be about every 100ms)
+ */
ad_ticks_per_sec = tick_resolution;
bond_3ad_initiate_agg_selection(bond,
@@ -1842,22 +1834,16 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
* Returns: 0 on success
* < 0 on error
*/
-int bond_3ad_bind_slave(struct slave *slave)
+void bond_3ad_bind_slave(struct slave *slave)
{
struct bonding *bond = bond_get_bond_by_slave(slave);
struct port *port;
struct aggregator *aggregator;
- if (bond == NULL) {
- pr_err("%s: The slave %s is not attached to its bond\n",
- slave->bond->dev->name, slave->dev->name);
- return -1;
- }
-
- //check that the slave has not been initialized yet.
+ /* check that the slave has not been initialized yet. */
if (SLAVE_AD_INFO(slave).port.slave != slave) {
- // port initialization
+ /* port initialization */
port = &(SLAVE_AD_INFO(slave).port);
ad_initialize_port(port, bond->params.lacp_fast);
@@ -1865,28 +1851,30 @@ int bond_3ad_bind_slave(struct slave *slave)
__initialize_port_locks(slave);
port->slave = slave;
port->actor_port_number = SLAVE_AD_INFO(slave).id;
- // key is determined according to the link speed, duplex and user key(which is yet not supported)
- // ------------------------------------------------------------
- // Port key : | User key | Speed |Duplex|
- // ------------------------------------------------------------
- // 16 6 1 0
- port->actor_admin_port_key = 0; // initialize this parameter
+ /* key is determined according to the link speed, duplex and user key(which
+ * is yet not supported)
+ */
+ port->actor_admin_port_key = 0;
port->actor_admin_port_key |= __get_duplex(port);
port->actor_admin_port_key |= (__get_link_speed(port) << 1);
port->actor_oper_port_key = port->actor_admin_port_key;
- // if the port is not full duplex, then the port should be not lacp Enabled
+ /* if the port is not full duplex, then the port should be not
+ * lacp Enabled
+ */
if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS))
port->sm_vars &= ~AD_PORT_LACP_ENABLED;
- // actor system is the bond's system
+ /* actor system is the bond's system */
port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
- // tx timer(to verify that no more than MAX_TX_IN_SECOND lacpdu's are sent in one second)
+ /* tx timer(to verify that no more than MAX_TX_IN_SECOND
+ * lacpdu's are sent in one second)
+ */
port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
port->aggregator = NULL;
port->next_port_in_aggregator = NULL;
__disable_port(port);
- // aggregator initialization
+ /* aggregator initialization */
aggregator = &(SLAVE_AD_INFO(slave).aggregator);
ad_initialize_agg(aggregator);
@@ -1897,8 +1885,6 @@ int bond_3ad_bind_slave(struct slave *slave)
aggregator->is_active = 0;
aggregator->num_of_ports = 0;
}
-
- return 0;
}
/**
@@ -1918,16 +1904,13 @@ void bond_3ad_unbind_slave(struct slave *slave)
struct slave *slave_iter;
struct list_head *iter;
- // find the aggregator related to this slave
aggregator = &(SLAVE_AD_INFO(slave).aggregator);
-
- // find the port related to this slave
port = &(SLAVE_AD_INFO(slave).port);
- // if slave is null, the whole port is not initialized
+ /* if slave is null, the whole port is not initialized */
if (!port->slave) {
- pr_warning("Warning: %s: Trying to unbind an uninitialized port on %s\n",
- slave->bond->dev->name, slave->dev->name);
+ pr_warn("Warning: %s: Trying to unbind an uninitialized port on %s\n",
+ slave->bond->dev->name, slave->dev->name);
return;
}
@@ -1939,34 +1922,42 @@ void bond_3ad_unbind_slave(struct slave *slave)
__update_lacpdu_from_port(port);
ad_lacpdu_send(port);
- // check if this aggregator is occupied
+ /* check if this aggregator is occupied */
if (aggregator->lag_ports) {
- // check if there are other ports related to this aggregator except
- // the port related to this slave(thats ensure us that there is a
- // reason to search for new aggregator, and that we will find one
- if ((aggregator->lag_ports != port) || (aggregator->lag_ports->next_port_in_aggregator)) {
- // find new aggregator for the related port(s)
+ /* check if there are other ports related to this aggregator
+ * except the port related to this slave(thats ensure us that
+ * there is a reason to search for new aggregator, and that we
+ * will find one
+ */
+ if ((aggregator->lag_ports != port) ||
+ (aggregator->lag_ports->next_port_in_aggregator)) {
+ /* find new aggregator for the related port(s) */
bond_for_each_slave(bond, slave_iter, iter) {
new_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
- // if the new aggregator is empty, or it is connected to our port only
- if (!new_aggregator->lag_ports
- || ((new_aggregator->lag_ports == port)
- && !new_aggregator->lag_ports->next_port_in_aggregator))
+ /* if the new aggregator is empty, or it is
+ * connected to our port only
+ */
+ if (!new_aggregator->lag_ports ||
+ ((new_aggregator->lag_ports == port) &&
+ !new_aggregator->lag_ports->next_port_in_aggregator))
break;
}
if (!slave_iter)
new_aggregator = NULL;
- // if new aggregator found, copy the aggregator's parameters
- // and connect the related lag_ports to the new aggregator
+
+ /* if new aggregator found, copy the aggregator's
+ * parameters and connect the related lag_ports to the
+ * new aggregator
+ */
if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
pr_debug("Some port(s) related to LAG %d - replaceing with LAG %d\n",
aggregator->aggregator_identifier,
new_aggregator->aggregator_identifier);
- if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) {
+ if ((new_aggregator->lag_ports == port) &&
+ new_aggregator->is_active) {
pr_info("%s: Removing an active aggregator\n",
aggregator->slave->bond->dev->name);
- // select new active aggregator
select_new_active_agg = 1;
}
@@ -1982,30 +1973,33 @@ void bond_3ad_unbind_slave(struct slave *slave)
new_aggregator->is_active = aggregator->is_active;
new_aggregator->num_of_ports = aggregator->num_of_ports;
- // update the information that is written on the ports about the aggregator
+ /* update the information that is written on
+ * the ports about the aggregator
+ */
for (temp_port = aggregator->lag_ports; temp_port;
temp_port = temp_port->next_port_in_aggregator) {
temp_port->aggregator = new_aggregator;
temp_port->actor_port_aggregator_identifier = new_aggregator->aggregator_identifier;
}
- // clear the aggregator
ad_clear_agg(aggregator);
if (select_new_active_agg)
ad_agg_selection_logic(__get_first_agg(port));
} else {
- pr_warning("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n",
- slave->bond->dev->name);
+ pr_warn("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n",
+ slave->bond->dev->name);
}
- } else { // in case that the only port related to this aggregator is the one we want to remove
+ } else {
+ /* in case that the only port related to this
+ * aggregator is the one we want to remove
+ */
select_new_active_agg = aggregator->is_active;
- // clear the aggregator
ad_clear_agg(aggregator);
if (select_new_active_agg) {
pr_info("%s: Removing an active aggregator\n",
slave->bond->dev->name);
- // select new active aggregator
+ /* select new active aggregator */
temp_aggregator = __get_first_agg(port);
if (temp_aggregator)
ad_agg_selection_logic(temp_aggregator);
@@ -2014,15 +2008,19 @@ void bond_3ad_unbind_slave(struct slave *slave)
}
pr_debug("Unbinding port %d\n", port->actor_port_number);
- // find the aggregator that this port is connected to
+
+ /* find the aggregator that this port is connected to */
bond_for_each_slave(bond, slave_iter, iter) {
temp_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
prev_port = NULL;
- // search the port in the aggregator's related ports
+ /* search the port in the aggregator's related ports */
for (temp_port = temp_aggregator->lag_ports; temp_port;
prev_port = temp_port,
- temp_port = temp_port->next_port_in_aggregator) {
- if (temp_port == port) { // the aggregator found - detach the port from this aggregator
+ temp_port = temp_port->next_port_in_aggregator) {
+ if (temp_port == port) {
+ /* the aggregator found - detach the port from
+ * this aggregator
+ */
if (prev_port)
prev_port->next_port_in_aggregator = temp_port->next_port_in_aggregator;
else
@@ -2030,12 +2028,11 @@ void bond_3ad_unbind_slave(struct slave *slave)
temp_aggregator->num_of_ports--;
if (temp_aggregator->num_of_ports == 0) {
select_new_active_agg = temp_aggregator->is_active;
- // clear the aggregator
ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
pr_info("%s: Removing an active aggregator\n",
slave->bond->dev->name);
- // select new active aggregator
+ /* select new active aggregator */
ad_agg_selection_logic(__get_first_agg(port));
}
}
@@ -2069,21 +2066,23 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
struct port *port;
read_lock(&bond->lock);
+ rcu_read_lock();
- //check if there are any slaves
+ /* check if there are any slaves */
if (!bond_has_slaves(bond))
goto re_arm;
- // check if agg_select_timer timer after initialize is timed out
- if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) {
- slave = bond_first_slave(bond);
+ /* check if agg_select_timer timer after initialize is timed out */
+ if (BOND_AD_INFO(bond).agg_select_timer &&
+ !(--BOND_AD_INFO(bond).agg_select_timer)) {
+ slave = bond_first_slave_rcu(bond);
port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
- // select the active aggregator for the bond
+ /* select the active aggregator for the bond */
if (port) {
if (!port->slave) {
- pr_warning("%s: Warning: bond's first port is uninitialized\n",
- bond->dev->name);
+ pr_warn("%s: Warning: bond's first port is uninitialized\n",
+ bond->dev->name);
goto re_arm;
}
@@ -2093,12 +2092,12 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
bond_3ad_set_carrier(bond);
}
- // for each port run the state machines
- bond_for_each_slave(bond, slave, iter) {
+ /* for each port run the state machines */
+ bond_for_each_slave_rcu(bond, slave, iter) {
port = &(SLAVE_AD_INFO(slave).port);
if (!port->slave) {
- pr_warning("%s: Warning: Found an uninitialized port\n",
- bond->dev->name);
+ pr_warn("%s: Warning: Found an uninitialized port\n",
+ bond->dev->name);
goto re_arm;
}
@@ -2114,7 +2113,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
ad_mux_machine(port);
ad_tx_machine(port);
- // turn off the BEGIN bit, since we already handled it
+ /* turn off the BEGIN bit, since we already handled it */
if (port->sm_vars & AD_PORT_BEGIN)
port->sm_vars &= ~AD_PORT_BEGIN;
@@ -2122,9 +2121,9 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
re_arm:
- queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
-
+ rcu_read_unlock();
read_unlock(&bond->lock);
+ queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
}
/**
@@ -2137,7 +2136,8 @@ re_arm:
* received frames (loopback). Since only the payload is given to this
* function, it check for loopback.
*/
-static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length)
+static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
+ u16 length)
{
struct port *port;
int ret = RX_HANDLER_ANOTHER;
@@ -2147,8 +2147,8 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u1
port = &(SLAVE_AD_INFO(slave).port);
if (!port->slave) {
- pr_warning("%s: Warning: port of slave %s is uninitialized\n",
- slave->dev->name, slave->bond->dev->name);
+ pr_warn("%s: Warning: port of slave %s is uninitialized\n",
+ slave->dev->name, slave->bond->dev->name);
return ret;
}
@@ -2165,7 +2165,9 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u1
case AD_TYPE_MARKER:
ret = RX_HANDLER_CONSUMED;
- // No need to convert fields to Little Endian since we don't use the marker's fields.
+ /* No need to convert fields to Little Endian since we
+ * don't use the marker's fields.
+ */
switch (((struct bond_marker *)lacpdu)->tlv_type) {
case AD_MARKER_INFORMATION_SUBTYPE:
@@ -2201,20 +2203,25 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
port = &(SLAVE_AD_INFO(slave).port);
- // if slave is null, the whole port is not initialized
+ /* if slave is null, the whole port is not initialized */
if (!port->slave) {
- pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
- slave->bond->dev->name, slave->dev->name);
+ pr_warn("Warning: %s: speed changed for uninitialized port on %s\n",
+ slave->bond->dev->name, slave->dev->name);
return;
}
+ __get_state_machine_lock(port);
+
port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
port->actor_oper_port_key = port->actor_admin_port_key |=
(__get_link_speed(port) << 1);
pr_debug("Port %d changed speed\n", port->actor_port_number);
- // there is no need to reselect a new aggregator, just signal the
- // state machines to reinitialize
+ /* there is no need to reselect a new aggregator, just signal the
+ * state machines to reinitialize
+ */
port->sm_vars |= AD_PORT_BEGIN;
+
+ __release_state_machine_lock(port);
}
/**
@@ -2229,20 +2236,25 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
port = &(SLAVE_AD_INFO(slave).port);
- // if slave is null, the whole port is not initialized
+ /* if slave is null, the whole port is not initialized */
if (!port->slave) {
- pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
- slave->bond->dev->name, slave->dev->name);
+ pr_warn("%s: Warning: duplex changed for uninitialized port on %s\n",
+ slave->bond->dev->name, slave->dev->name);
return;
}
+ __get_state_machine_lock(port);
+
port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
port->actor_oper_port_key = port->actor_admin_port_key |=
__get_duplex(port);
pr_debug("Port %d changed duplex\n", port->actor_port_number);
- // there is no need to reselect a new aggregator, just signal the
- // state machines to reinitialize
+ /* there is no need to reselect a new aggregator, just signal the
+ * state machines to reinitialize
+ */
port->sm_vars |= AD_PORT_BEGIN;
+
+ __release_state_machine_lock(port);
}
/**
@@ -2258,15 +2270,21 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
port = &(SLAVE_AD_INFO(slave).port);
- // if slave is null, the whole port is not initialized
+ /* if slave is null, the whole port is not initialized */
if (!port->slave) {
- pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
- slave->bond->dev->name, slave->dev->name);
+ pr_warn("Warning: %s: link status changed for uninitialized port on %s\n",
+ slave->bond->dev->name, slave->dev->name);
return;
}
- // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed)
- // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report
+ __get_state_machine_lock(port);
+ /* on link down we are zeroing duplex and speed since
+ * some of the adaptors(ce1000.lan) report full duplex/speed
+ * instead of N/A(duplex) / 0(speed).
+ *
+ * on link up we are forcing recheck on the duplex and speed since
+ * some of he adaptors(ce1000.lan) report.
+ */
if (link == BOND_LINK_UP) {
port->is_enabled = true;
port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
@@ -2282,16 +2300,24 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
port->actor_oper_port_key = (port->actor_admin_port_key &=
~AD_SPEED_KEY_BITS);
}
- //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN")));
- // there is no need to reselect a new aggregator, just signal the
- // state machines to reinitialize
+ pr_debug("Port %d changed link status to %s",
+ port->actor_port_number,
+ (link == BOND_LINK_UP) ? "UP" : "DOWN");
+ /* there is no need to reselect a new aggregator, just signal the
+ * state machines to reinitialize
+ */
port->sm_vars |= AD_PORT_BEGIN;
+
+ __release_state_machine_lock(port);
}
-/*
- * set link state for bonding master: if we have an active
- * aggregator, we're up, if not, we're down. Presumes that we cannot
- * have an active aggregator if there are no slaves with link up.
+/**
+ * bond_3ad_set_carrier - set link state for bonding master
+ * @bond - bonding structure
+ *
+ * if we have an active aggregator, we're up, if not, we're down.
+ * Presumes that we cannot have an active aggregator if there are
+ * no slaves with link up.
*
* This behavior complies with IEEE 802.3 section 43.3.9.
*
@@ -2302,30 +2328,32 @@ int bond_3ad_set_carrier(struct bonding *bond)
{
struct aggregator *active;
struct slave *first_slave;
+ int ret = 1;
- first_slave = bond_first_slave(bond);
- if (!first_slave)
- return 0;
+ rcu_read_lock();
+ first_slave = bond_first_slave_rcu(bond);
+ if (!first_slave) {
+ ret = 0;
+ goto out;
+ }
active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator));
if (active) {
/* are enough slaves available to consider link up? */
if (active->num_of_ports < bond->params.min_links) {
if (netif_carrier_ok(bond->dev)) {
netif_carrier_off(bond->dev);
- return 1;
+ goto out;
}
} else if (!netif_carrier_ok(bond->dev)) {
netif_carrier_on(bond->dev);
- return 1;
+ goto out;
}
- return 0;
- }
-
- if (netif_carrier_ok(bond->dev)) {
+ } else if (netif_carrier_ok(bond->dev)) {
netif_carrier_off(bond->dev);
- return 1;
}
- return 0;
+out:
+ rcu_read_unlock();
+ return ret;
}
/**
@@ -2357,7 +2385,8 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
ad_info->ports = aggregator->num_of_ports;
ad_info->actor_key = aggregator->actor_oper_aggregator_key;
ad_info->partner_key = aggregator->partner_oper_aggregator_key;
- memcpy(ad_info->partner_system, aggregator->partner_system.mac_addr_value, ETH_ALEN);
+ memcpy(ad_info->partner_system,
+ aggregator->partner_system.mac_addr_value, ETH_ALEN);
return 0;
}
@@ -2385,13 +2414,12 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
struct list_head *iter;
int slaves_in_agg;
int slave_agg_no;
- int res = 1;
int agg_id;
if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
dev->name);
- goto out;
+ goto err_free;
}
slaves_in_agg = ad_info.ports;
@@ -2399,7 +2427,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
if (slaves_in_agg == 0) {
pr_debug("%s: Error: active aggregator is empty\n", dev->name);
- goto out;
+ goto err_free;
}
slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
@@ -2418,7 +2446,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
}
if (SLAVE_IS_OK(slave)) {
- res = bond_dev_queue_xmit(bond, skb, slave->dev);
+ bond_dev_queue_xmit(bond, skb, slave->dev);
goto out;
}
}
@@ -2426,21 +2454,23 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
if (slave_agg_no >= 0) {
pr_err("%s: Error: Couldn't find a slave to tx on for aggregator ID %d\n",
dev->name, agg_id);
- goto out;
+ goto err_free;
}
/* we couldn't find any suitable slave after the agg_no, so use the
- * first suitable found, if found. */
+ * first suitable found, if found.
+ */
if (first_ok_slave)
- res = bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
+ bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
+ else
+ goto err_free;
out:
- if (res) {
- /* no suitable interface, frame not sent */
- kfree_skb(skb);
- }
-
return NETDEV_TX_OK;
+err_free:
+ /* no suitable interface, frame not sent */
+ kfree_skb(skb);
+ goto out;
}
int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
@@ -2462,7 +2492,10 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
return ret;
}
-/*
+/**
+ * bond_3ad_update_lacp_rate - change the lacp rate
+ * @bond - bonding struct
+ *
* When modify lacp_rate parameter via sysfs,
* update actor_oper_port_state of each port.
*
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 5d91ad0cc041..13dc9d3c5e34 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -265,7 +265,7 @@ struct ad_slave_info {
// ================= AD Exported functions to the main bonding code ==================
void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
-int bond_3ad_bind_slave(struct slave *slave);
+void bond_3ad_bind_slave(struct slave *slave);
void bond_3ad_unbind_slave(struct slave *slave);
void bond_3ad_state_machine_handler(struct work_struct *);
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 02872405d35d..a2c47476804d 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -12,8 +12,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
@@ -470,7 +469,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
/* slave being removed should not be active at this point
*
- * Caller must hold bond lock for read
+ * Caller must hold rtnl.
*/
static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
{
@@ -816,7 +815,7 @@ static void rlb_rebalance(struct bonding *bond)
for (; hash_index != RLB_NULL_INDEX;
hash_index = client_info->used_next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
- assigned_slave = rlb_next_rx_slave(bond);
+ assigned_slave = __rlb_next_rx_slave(bond);
if (assigned_slave && (client_info->slave != assigned_slave)) {
client_info->slave = assigned_slave;
client_info->ntt = 1;
@@ -1372,7 +1371,6 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
int do_tx_balance = 1;
u32 hash_index = 0;
const u8 *hash_start = NULL;
- int res = 1;
struct ipv6hdr *ip6hdr;
skb_reset_mac_header(skb);
@@ -1470,7 +1468,8 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
ETH_ALEN);
}
- res = bond_dev_queue_xmit(bond, skb, tx_slave->dev);
+ bond_dev_queue_xmit(bond, skb, tx_slave->dev);
+ goto out;
} else {
if (tx_slave) {
_lock_tx_hashtbl(bond);
@@ -1479,11 +1478,9 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
}
}
- if (res) {
- /* no suitable interface, frame not sent */
- kfree_skb(skb);
- }
-
+ /* no suitable interface, frame not sent */
+ kfree_skb(skb);
+out:
return NETDEV_TX_OK;
}
@@ -1495,14 +1492,14 @@ void bond_alb_monitor(struct work_struct *work)
struct list_head *iter;
struct slave *slave;
- read_lock(&bond->lock);
-
if (!bond_has_slaves(bond)) {
bond_info->tx_rebalance_counter = 0;
bond_info->lp_counter = 0;
goto re_arm;
}
+ rcu_read_lock();
+
bond_info->tx_rebalance_counter++;
bond_info->lp_counter++;
@@ -1515,7 +1512,7 @@ void bond_alb_monitor(struct work_struct *work)
*/
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave, iter)
+ bond_for_each_slave_rcu(bond, slave, iter)
alb_send_learning_packets(slave, slave->dev->dev_addr);
read_unlock(&bond->curr_slave_lock);
@@ -1528,7 +1525,7 @@ void bond_alb_monitor(struct work_struct *work)
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
tlb_clear_slave(bond, slave, 1);
if (slave == bond->curr_active_slave) {
SLAVE_TLB_INFO(slave).load =
@@ -1552,11 +1549,9 @@ void bond_alb_monitor(struct work_struct *work)
* dev_set_promiscuity requires rtnl and
* nothing else. Avoid race with bond_close.
*/
- read_unlock(&bond->lock);
- if (!rtnl_trylock()) {
- read_lock(&bond->lock);
+ rcu_read_unlock();
+ if (!rtnl_trylock())
goto re_arm;
- }
bond_info->rlb_promisc_timeout_counter = 0;
@@ -1568,7 +1563,7 @@ void bond_alb_monitor(struct work_struct *work)
bond_info->primary_is_promisc = 0;
rtnl_unlock();
- read_lock(&bond->lock);
+ rcu_read_lock();
}
if (bond_info->rlb_rebalance) {
@@ -1590,11 +1585,9 @@ void bond_alb_monitor(struct work_struct *work)
}
}
}
-
+ rcu_read_unlock();
re_arm:
queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
-
- read_unlock(&bond->lock);
}
/* assumption: called before the slave is attached to the bond
@@ -1680,14 +1673,11 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
* If new_slave is NULL, caller must hold curr_slave_lock or
* bond->lock for write.
*
- * If new_slave is not NULL, caller must hold RTNL, bond->lock for
- * read and curr_slave_lock for write. Processing here may sleep, so
- * no other locks may be held.
+ * If new_slave is not NULL, caller must hold RTNL, curr_slave_lock
+ * for write. Processing here may sleep, so no other locks may be held.
*/
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave)
__releases(&bond->curr_slave_lock)
- __releases(&bond->lock)
- __acquires(&bond->lock)
__acquires(&bond->curr_slave_lock)
{
struct slave *swap_slave;
@@ -1723,7 +1713,6 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
tlb_clear_slave(bond, new_slave, 1);
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
ASSERT_RTNL();
@@ -1749,11 +1738,9 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
/* swap mac address */
alb_swap_mac_addr(swap_slave, new_slave);
alb_fasten_mac_swap(bond, swap_slave, new_slave);
- read_lock(&bond->lock);
} else {
/* set the new_slave to the bond mac address */
alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
- read_lock(&bond->lock);
alb_send_learning_packets(new_slave, bond->dev->dev_addr);
}
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 4226044efd08..e09dd4bfafff 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -12,8 +12,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 398e299ee1bd..71ba18efa15b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -79,7 +79,6 @@
#include <net/pkt_sched.h>
#include <linux/rculist.h>
#include <net/flow_keys.h>
-#include <linux/reciprocal_div.h>
#include "bonding.h"
#include "bond_3ad.h"
#include "bond_alb.h"
@@ -87,13 +86,11 @@
/*---------------------------- Module parameters ----------------------------*/
/* monitor all links that often (in milliseconds). <=0 disables monitoring */
-#define BOND_LINK_MON_INTERV 0
-#define BOND_LINK_ARP_INTERV 0
static int max_bonds = BOND_DEFAULT_MAX_BONDS;
static int tx_queues = BOND_DEFAULT_TX_QUEUES;
static int num_peer_notif = 1;
-static int miimon = BOND_LINK_MON_INTERV;
+static int miimon;
static int updelay;
static int downdelay;
static int use_carrier = 1;
@@ -104,7 +101,7 @@ static char *lacp_rate;
static int min_links;
static char *ad_select;
static char *xmit_hash_policy;
-static int arp_interval = BOND_LINK_ARP_INTERV;
+static int arp_interval;
static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
static char *arp_validate;
static char *arp_all_targets;
@@ -113,6 +110,7 @@ static int all_slaves_active;
static struct bond_params bonding_defaults;
static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
static int packets_per_slave = 1;
+static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
module_param(max_bonds, int, 0);
MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
@@ -189,6 +187,10 @@ module_param(packets_per_slave, int, 0);
MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
"mode; 0 for a random slave, 1 packet per "
"slave (default), >1 packets per slave.");
+module_param(lp_interval, uint, 0);
+MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
+ "the bonding driver sends learning packets to "
+ "each slaves peer switch. The default is 1.");
/*----------------------------- Global variables ----------------------------*/
@@ -204,67 +206,6 @@ static int bond_mode = BOND_MODE_ROUNDROBIN;
static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
static int lacp_fast;
-const struct bond_parm_tbl bond_lacp_tbl[] = {
-{ "slow", AD_LACP_SLOW},
-{ "fast", AD_LACP_FAST},
-{ NULL, -1},
-};
-
-const struct bond_parm_tbl bond_mode_tbl[] = {
-{ "balance-rr", BOND_MODE_ROUNDROBIN},
-{ "active-backup", BOND_MODE_ACTIVEBACKUP},
-{ "balance-xor", BOND_MODE_XOR},
-{ "broadcast", BOND_MODE_BROADCAST},
-{ "802.3ad", BOND_MODE_8023AD},
-{ "balance-tlb", BOND_MODE_TLB},
-{ "balance-alb", BOND_MODE_ALB},
-{ NULL, -1},
-};
-
-const struct bond_parm_tbl xmit_hashtype_tbl[] = {
-{ "layer2", BOND_XMIT_POLICY_LAYER2},
-{ "layer3+4", BOND_XMIT_POLICY_LAYER34},
-{ "layer2+3", BOND_XMIT_POLICY_LAYER23},
-{ "encap2+3", BOND_XMIT_POLICY_ENCAP23},
-{ "encap3+4", BOND_XMIT_POLICY_ENCAP34},
-{ NULL, -1},
-};
-
-const struct bond_parm_tbl arp_all_targets_tbl[] = {
-{ "any", BOND_ARP_TARGETS_ANY},
-{ "all", BOND_ARP_TARGETS_ALL},
-{ NULL, -1},
-};
-
-const struct bond_parm_tbl arp_validate_tbl[] = {
-{ "none", BOND_ARP_VALIDATE_NONE},
-{ "active", BOND_ARP_VALIDATE_ACTIVE},
-{ "backup", BOND_ARP_VALIDATE_BACKUP},
-{ "all", BOND_ARP_VALIDATE_ALL},
-{ NULL, -1},
-};
-
-const struct bond_parm_tbl fail_over_mac_tbl[] = {
-{ "none", BOND_FOM_NONE},
-{ "active", BOND_FOM_ACTIVE},
-{ "follow", BOND_FOM_FOLLOW},
-{ NULL, -1},
-};
-
-const struct bond_parm_tbl pri_reselect_tbl[] = {
-{ "always", BOND_PRI_RESELECT_ALWAYS},
-{ "better", BOND_PRI_RESELECT_BETTER},
-{ "failure", BOND_PRI_RESELECT_FAILURE},
-{ NULL, -1},
-};
-
-struct bond_parm_tbl ad_select_tbl[] = {
-{ "stable", BOND_AD_STABLE},
-{ "bandwidth", BOND_AD_BANDWIDTH},
-{ "count", BOND_AD_COUNT},
-{ NULL, -1},
-};
-
/*-------------------------- Forward declarations ---------------------------*/
static int bond_init(struct net_device *bond_dev);
@@ -299,7 +240,7 @@ const char *bond_mode_name(int mode)
* @skb: hw accel VLAN tagged skb to transmit
* @slave_dev: slave that is supposed to xmit this skbuff
*/
-int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
+void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *slave_dev)
{
skb->dev = slave_dev;
@@ -312,8 +253,6 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
else
dev_queue_xmit(skb);
-
- return 0;
}
/*
@@ -463,6 +402,22 @@ static void bond_update_speed_duplex(struct slave *slave)
return;
}
+const char *bond_slave_link_status(s8 link)
+{
+ switch (link) {
+ case BOND_LINK_UP:
+ return "up";
+ case BOND_LINK_FAIL:
+ return "going down";
+ case BOND_LINK_DOWN:
+ return "down";
+ case BOND_LINK_BACK:
+ return "going back";
+ default:
+ return "unknown";
+ }
+}
+
/*
* if <dev> supports MII link status reporting, check its link status.
*
@@ -591,33 +546,22 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
* device and retransmit an IGMP JOIN request to the current active
* slave.
*/
-static void bond_resend_igmp_join_requests(struct bonding *bond)
+static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
{
+ struct bonding *bond = container_of(work, struct bonding,
+ mcast_work.work);
+
if (!rtnl_trylock()) {
queue_delayed_work(bond->wq, &bond->mcast_work, 1);
return;
}
call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
- rtnl_unlock();
- /* We use curr_slave_lock to protect against concurrent access to
- * igmp_retrans from multiple running instances of this function and
- * bond_change_active_slave
- */
- write_lock_bh(&bond->curr_slave_lock);
if (bond->igmp_retrans > 1) {
bond->igmp_retrans--;
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
}
- write_unlock_bh(&bond->curr_slave_lock);
-}
-
-static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
-{
- struct bonding *bond = container_of(work, struct bonding,
- mcast_work.work);
-
- bond_resend_igmp_join_requests(bond);
+ rtnl_unlock();
}
/* Flush bond's hardware addresses from slave
@@ -697,14 +641,12 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
*
* Perform special MAC address swapping for fail_over_mac settings
*
- * Called with RTNL, bond->lock for read, curr_slave_lock for write_bh.
+ * Called with RTNL, curr_slave_lock for write_bh.
*/
static void bond_do_fail_over_mac(struct bonding *bond,
struct slave *new_active,
struct slave *old_active)
__releases(&bond->curr_slave_lock)
- __releases(&bond->lock)
- __acquires(&bond->lock)
__acquires(&bond->curr_slave_lock)
{
u8 tmp_mac[ETH_ALEN];
@@ -715,9 +657,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
case BOND_FOM_ACTIVE:
if (new_active) {
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
bond_set_dev_addr(bond->dev, new_active->dev);
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
}
break;
@@ -731,7 +671,6 @@ static void bond_do_fail_over_mac(struct bonding *bond,
return;
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
if (old_active) {
memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
@@ -761,7 +700,6 @@ static void bond_do_fail_over_mac(struct bonding *bond,
pr_err("%s: Error %d setting MAC of slave %s\n",
bond->dev->name, -rv, new_active->dev->name);
out:
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
break;
default:
@@ -821,7 +759,11 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
static bool bond_should_notify_peers(struct bonding *bond)
{
- struct slave *slave = bond->curr_active_slave;
+ struct slave *slave;
+
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ rcu_read_unlock();
pr_debug("bond_should_notify_peers: bond %s slave %s\n",
bond->dev->name, slave ? slave->dev->name : "NULL");
@@ -846,8 +788,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
* because it is apparently the best available slave we have, even though its
* updelay hasn't timed out yet.
*
- * If new_active is not NULL, caller must hold bond->lock for read and
- * curr_slave_lock for write_bh.
+ * If new_active is not NULL, caller must hold curr_slave_lock for write_bh.
*/
void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
{
@@ -916,14 +857,12 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
}
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
if (should_notify_peers)
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
bond->dev);
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
}
}
@@ -949,7 +888,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
* - The primary_slave has got its link back.
* - A slave has got its link back and there's no old curr_active_slave.
*
- * Caller must hold bond->lock for read and curr_slave_lock for write_bh.
+ * Caller must hold curr_slave_lock for write_bh.
*/
void bond_select_active_slave(struct bonding *bond)
{
@@ -1331,9 +1270,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (slave_ops->ndo_set_mac_address == NULL) {
if (!bond_has_slaves(bond)) {
- pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
- bond_dev->name);
- bond->params.fail_over_mac = BOND_FOM_ACTIVE;
+ pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address.\n",
+ bond_dev->name);
+ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
+ bond->params.fail_over_mac = BOND_FOM_ACTIVE;
+ pr_warn("%s: Setting fail_over_mac to active for active-backup mode.\n",
+ bond_dev->name);
+ }
} else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",
bond_dev->name);
@@ -1376,7 +1319,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
*/
memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
- if (!bond->params.fail_over_mac) {
+ if (!bond->params.fail_over_mac ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/*
* Set slave to master's mac address. The application already
* set the master's mac address to that of the first slave
@@ -1566,7 +1510,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
slave_dev->npinfo = bond->dev->npinfo;
if (slave_dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
- read_unlock(&bond->lock);
pr_info("Error, %s: master_dev is using netpoll, "
"but new slave device does not support netpoll.\n",
bond_dev->name);
@@ -1589,16 +1532,20 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_unregister;
}
+ res = bond_sysfs_slave_add(new_slave);
+ if (res) {
+ pr_debug("Error %d calling bond_sysfs_slave_add\n", res);
+ goto err_upper_unlink;
+ }
+
bond->slave_cnt++;
bond_compute_features(bond);
bond_set_carrier(bond);
if (USES_PRIMARY(bond->params.mode)) {
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
}
pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
@@ -1610,6 +1557,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
return 0;
/* Undo stages on error */
+err_upper_unlink:
+ bond_upper_dev_unlink(bond_dev, slave_dev);
+
err_unregister:
netdev_rx_handler_unregister(slave_dev);
@@ -1618,19 +1568,13 @@ err_detach:
bond_hw_addr_flush(bond_dev, slave_dev);
vlan_vids_del_by_dev(slave_dev, bond_dev);
- write_lock_bh(&bond->lock);
if (bond->primary_slave == new_slave)
bond->primary_slave = NULL;
if (bond->curr_active_slave == new_slave) {
- bond_change_active_slave(bond, NULL);
- write_unlock_bh(&bond->lock);
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
+ bond_change_active_slave(bond, NULL);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
- } else {
- write_unlock_bh(&bond->lock);
}
slave_disable_netpoll(new_slave);
@@ -1639,7 +1583,8 @@ err_close:
dev_close(slave_dev);
err_restore_mac:
- if (!bond->params.fail_over_mac) {
+ if (!bond->params.fail_over_mac ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/* XXX TODO - fom follow mode needs to change master's
* MAC if this slave's MAC is in use by the bond, or at
* least print a warning.
@@ -1658,7 +1603,7 @@ err_free:
err_undo_flags:
/* Enslave of first slave has failed and we need to fix master's mac */
if (!bond_has_slaves(bond) &&
- ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
+ ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr))
eth_hw_addr_random(bond_dev);
return res;
@@ -1695,23 +1640,21 @@ static int __bond_release_one(struct net_device *bond_dev,
}
block_netpoll_tx();
- write_lock_bh(&bond->lock);
slave = bond_get_slave_by_dev(bond, slave_dev);
if (!slave) {
/* not a slave of this bond */
pr_info("%s: %s not enslaved\n",
bond_dev->name, slave_dev->name);
- write_unlock_bh(&bond->lock);
unblock_netpoll_tx();
return -EINVAL;
}
- write_unlock_bh(&bond->lock);
-
/* release the slave from its bond */
bond->slave_cnt--;
+ bond_sysfs_slave_del(slave);
+
bond_upper_dev_unlink(bond_dev, slave_dev);
/* unregister rx_handler early so bond_handle_frame wouldn't be called
* for this slave anymore.
@@ -1720,12 +1663,10 @@ static int __bond_release_one(struct net_device *bond_dev,
write_lock_bh(&bond->lock);
/* Inform AD package of unbinding of slave. */
- if (bond->params.mode == BOND_MODE_8023AD) {
- /* must be called before the slave is
- * detached from the list
- */
+ if (bond->params.mode == BOND_MODE_8023AD)
bond_3ad_unbind_slave(slave);
- }
+
+ write_unlock_bh(&bond->lock);
pr_info("%s: releasing %s interface %s\n",
bond_dev->name,
@@ -1736,8 +1677,9 @@ static int __bond_release_one(struct net_device *bond_dev,
bond->current_arp_slave = NULL;
- if (!all && !bond->params.fail_over_mac) {
- if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
+ if (!all && (!bond->params.fail_over_mac ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
+ if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
bond_dev->name, slave_dev->name,
@@ -1748,8 +1690,11 @@ static int __bond_release_one(struct net_device *bond_dev,
if (bond->primary_slave == slave)
bond->primary_slave = NULL;
- if (oldcurrent == slave)
+ if (oldcurrent == slave) {
+ write_lock_bh(&bond->curr_slave_lock);
bond_change_active_slave(bond, NULL);
+ write_unlock_bh(&bond->curr_slave_lock);
+ }
if (bond_is_lb(bond)) {
/* Must be called only after the slave has been
@@ -1757,28 +1702,22 @@ static int __bond_release_one(struct net_device *bond_dev,
* has been cleared (if our_slave == old_current),
* but before a new active slave is selected.
*/
- write_unlock_bh(&bond->lock);
bond_alb_deinit_slave(bond, slave);
- write_lock_bh(&bond->lock);
}
if (all) {
- rcu_assign_pointer(bond->curr_active_slave, NULL);
+ RCU_INIT_POINTER(bond->curr_active_slave, NULL);
} else if (oldcurrent == slave) {
/*
* Note that we hold RTNL over this sequence, so there
* is no concern that another slave add/remove event
* will interfere.
*/
- write_unlock_bh(&bond->lock);
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
- write_lock_bh(&bond->lock);
}
if (!bond_has_slaves(bond)) {
@@ -1793,7 +1732,6 @@ static int __bond_release_one(struct net_device *bond_dev,
}
}
- write_unlock_bh(&bond->lock);
unblock_netpoll_tx();
synchronize_rcu();
@@ -1837,7 +1775,8 @@ static int __bond_release_one(struct net_device *bond_dev,
/* close slave before restoring its mac address */
dev_close(slave_dev);
- if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
+ if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/* restore original ("permanent") mac address */
memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
addr.sa_family = slave_dev->type;
@@ -1928,7 +1867,7 @@ static int bond_miimon_inspect(struct bonding *bond)
ignore_updelay = !bond->curr_active_slave ? true : false;
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2119,48 +2058,42 @@ do_failover:
* an acquisition of appropriate locks followed by a commit phase to
* implement whatever link state changes are indicated.
*/
-void bond_mii_monitor(struct work_struct *work)
+static void bond_mii_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mii_work.work);
bool should_notify_peers = false;
unsigned long delay;
- read_lock(&bond->lock);
-
delay = msecs_to_jiffies(bond->params.miimon);
if (!bond_has_slaves(bond))
goto re_arm;
+ rcu_read_lock();
+
should_notify_peers = bond_should_notify_peers(bond);
if (bond_miimon_inspect(bond)) {
- read_unlock(&bond->lock);
+ rcu_read_unlock();
/* Race avoidance with bond_close cancel of workqueue */
if (!rtnl_trylock()) {
- read_lock(&bond->lock);
delay = 1;
should_notify_peers = false;
goto re_arm;
}
- read_lock(&bond->lock);
-
bond_miimon_commit(bond);
- read_unlock(&bond->lock);
rtnl_unlock(); /* might sleep, hold no other locks */
- read_lock(&bond->lock);
- }
+ } else
+ rcu_read_unlock();
re_arm:
if (bond->params.miimon)
queue_delayed_work(bond->wq, &bond->mii_work, delay);
- read_unlock(&bond->lock);
-
if (should_notify_peers) {
if (!rtnl_trylock())
return;
@@ -2414,20 +2347,20 @@ static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
* arp is transmitted to generate traffic. see activebackup_arp_monitor for
* arp monitoring in active backup mode.
*/
-void bond_loadbalance_arp_mon(struct work_struct *work)
+static void bond_loadbalance_arp_mon(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
arp_work.work);
struct slave *slave, *oldcurrent;
struct list_head *iter;
- int do_failover = 0;
-
- read_lock(&bond->lock);
+ int do_failover = 0, slave_state_changed = 0;
if (!bond_has_slaves(bond))
goto re_arm;
- oldcurrent = bond->curr_active_slave;
+ rcu_read_lock();
+
+ oldcurrent = ACCESS_ONCE(bond->curr_active_slave);
/* see if any of the previous devices are up now (i.e. they have
* xmt and rcv traffic). the curr_active_slave does not come into
* the picture unless it is null. also, slave->jiffies is not needed
@@ -2436,7 +2369,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* TODO: what about up/down delay in arp mode? it wasn't here before
* so it can wait
*/
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
unsigned long trans_start = dev_trans_start(slave->dev);
if (slave->link != BOND_LINK_UP) {
@@ -2444,7 +2377,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
bond_time_in_interval(bond, slave->dev->last_rx, 1)) {
slave->link = BOND_LINK_UP;
- bond_set_active_slave(slave);
+ slave_state_changed = 1;
/* primary_slave has no meaning in round-robin
* mode. the window of a slave being up and
@@ -2473,7 +2406,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
!bond_time_in_interval(bond, slave->dev->last_rx, 2)) {
slave->link = BOND_LINK_DOWN;
- bond_set_backup_slave(slave);
+ slave_state_changed = 1;
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
@@ -2498,22 +2431,33 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
bond_arp_send_all(bond, slave);
}
- if (do_failover) {
- block_netpoll_tx();
- write_lock_bh(&bond->curr_slave_lock);
+ rcu_read_unlock();
- bond_select_active_slave(bond);
+ if (do_failover || slave_state_changed) {
+ if (!rtnl_trylock())
+ goto re_arm;
- write_unlock_bh(&bond->curr_slave_lock);
- unblock_netpoll_tx();
+ if (slave_state_changed) {
+ bond_slave_state_change(bond);
+ } else if (do_failover) {
+ /* the bond_select_active_slave must hold RTNL
+ * and curr_slave_lock for write.
+ */
+ block_netpoll_tx();
+ write_lock_bh(&bond->curr_slave_lock);
+
+ bond_select_active_slave(bond);
+
+ write_unlock_bh(&bond->curr_slave_lock);
+ unblock_netpoll_tx();
+ }
+ rtnl_unlock();
}
re_arm:
if (bond->params.arp_interval)
queue_delayed_work(bond->wq, &bond->arp_work,
msecs_to_jiffies(bond->params.arp_interval));
-
- read_unlock(&bond->lock);
}
/*
@@ -2522,7 +2466,7 @@ re_arm:
* place for the slave. Returns 0 if no changes are found, >0 if changes
* to link states must be committed.
*
- * Called with bond->lock held for read.
+ * Called with rcu_read_lock hold.
*/
static int bond_ab_arp_inspect(struct bonding *bond)
{
@@ -2531,7 +2475,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
struct slave *slave;
int commit = 0;
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
last_rx = slave_last_rx(bond, slave);
@@ -2593,7 +2537,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
* Called to commit link state changes noted by inspection step of
* active-backup mode ARP monitor.
*
- * Called with RTNL and bond->lock for read.
+ * Called with RTNL hold.
*/
static void bond_ab_arp_commit(struct bonding *bond)
{
@@ -2667,42 +2611,49 @@ do_failover:
/*
* Send ARP probes for active-backup mode ARP monitor.
- *
- * Called with bond->lock held for read.
*/
-static void bond_ab_arp_probe(struct bonding *bond)
+static bool bond_ab_arp_probe(struct bonding *bond)
{
- struct slave *slave, *before = NULL, *new_slave = NULL;
+ struct slave *slave, *before = NULL, *new_slave = NULL,
+ *curr_arp_slave, *curr_active_slave;
struct list_head *iter;
bool found = false;
- read_lock(&bond->curr_slave_lock);
+ rcu_read_lock();
+ curr_arp_slave = rcu_dereference(bond->current_arp_slave);
+ curr_active_slave = rcu_dereference(bond->curr_active_slave);
- if (bond->current_arp_slave && bond->curr_active_slave)
+ if (curr_arp_slave && curr_active_slave)
pr_info("PROBE: c_arp %s && cas %s BAD\n",
- bond->current_arp_slave->dev->name,
- bond->curr_active_slave->dev->name);
+ curr_arp_slave->dev->name,
+ curr_active_slave->dev->name);
- if (bond->curr_active_slave) {
- bond_arp_send_all(bond, bond->curr_active_slave);
- read_unlock(&bond->curr_slave_lock);
- return;
+ if (curr_active_slave) {
+ bond_arp_send_all(bond, curr_active_slave);
+ rcu_read_unlock();
+ return true;
}
-
- read_unlock(&bond->curr_slave_lock);
+ rcu_read_unlock();
/* if we don't have a curr_active_slave, search for the next available
* backup slave from the current_arp_slave and make it the candidate
* for becoming the curr_active_slave
*/
- if (!bond->current_arp_slave) {
- bond->current_arp_slave = bond_first_slave(bond);
- if (!bond->current_arp_slave)
- return;
+ if (!rtnl_trylock())
+ return false;
+ /* curr_arp_slave might have gone away */
+ curr_arp_slave = ACCESS_ONCE(bond->current_arp_slave);
+
+ if (!curr_arp_slave) {
+ curr_arp_slave = bond_first_slave(bond);
+ if (!curr_arp_slave) {
+ rtnl_unlock();
+ return true;
+ }
}
- bond_set_slave_inactive_flags(bond->current_arp_slave);
+ bond_set_slave_inactive_flags(curr_arp_slave);
bond_for_each_slave(bond, slave, iter) {
if (!found && !before && IS_UP(slave->dev))
@@ -2727,68 +2678,67 @@ static void bond_ab_arp_probe(struct bonding *bond)
pr_info("%s: backup interface %s is now down.\n",
bond->dev->name, slave->dev->name);
}
- if (slave == bond->current_arp_slave)
+ if (slave == curr_arp_slave)
found = true;
}
if (!new_slave && before)
new_slave = before;
- if (!new_slave)
- return;
+ if (!new_slave) {
+ rtnl_unlock();
+ return true;
+ }
new_slave->link = BOND_LINK_BACK;
bond_set_slave_active_flags(new_slave);
bond_arp_send_all(bond, new_slave);
new_slave->jiffies = jiffies;
- bond->current_arp_slave = new_slave;
+ rcu_assign_pointer(bond->current_arp_slave, new_slave);
+ rtnl_unlock();
+ return true;
}
-void bond_activebackup_arp_mon(struct work_struct *work)
+static void bond_activebackup_arp_mon(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
arp_work.work);
- bool should_notify_peers = false;
+ bool should_notify_peers = false, should_commit = false;
int delta_in_ticks;
- read_lock(&bond->lock);
-
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
if (!bond_has_slaves(bond))
goto re_arm;
+ rcu_read_lock();
should_notify_peers = bond_should_notify_peers(bond);
+ should_commit = bond_ab_arp_inspect(bond);
+ rcu_read_unlock();
- if (bond_ab_arp_inspect(bond)) {
- read_unlock(&bond->lock);
-
+ if (should_commit) {
/* Race avoidance with bond_close flush of workqueue */
if (!rtnl_trylock()) {
- read_lock(&bond->lock);
delta_in_ticks = 1;
should_notify_peers = false;
goto re_arm;
}
- read_lock(&bond->lock);
-
bond_ab_arp_commit(bond);
-
- read_unlock(&bond->lock);
rtnl_unlock();
- read_lock(&bond->lock);
}
- bond_ab_arp_probe(bond);
+ if (!bond_ab_arp_probe(bond)) {
+ /* rtnl locking failed, re-arm */
+ delta_in_ticks = 1;
+ should_notify_peers = false;
+ }
re_arm:
if (bond->params.arp_interval)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
- read_unlock(&bond->lock);
-
if (should_notify_peers) {
if (!rtnl_trylock())
return;
@@ -2896,9 +2846,27 @@ static int bond_slave_netdev_event(unsigned long event,
*/
break;
case NETDEV_CHANGENAME:
- /*
- * TODO: handle changing the primary's name
- */
+ /* we don't care if we don't have primary set */
+ if (!USES_PRIMARY(bond->params.mode) ||
+ !bond->params.primary[0])
+ break;
+
+ if (slave == bond->primary_slave) {
+ /* slave's name changed - he's no longer primary */
+ bond->primary_slave = NULL;
+ } else if (!strcmp(slave_dev->name, bond->params.primary)) {
+ /* we have a new primary slave */
+ bond->primary_slave = slave;
+ } else { /* we didn't change primary - exit */
+ break;
+ }
+
+ pr_info("%s: Primary slave changed to %s, reselecting active slave.\n",
+ bond->dev->name, bond->primary_slave ? slave_dev->name :
+ "none");
+ write_lock_bh(&bond->curr_slave_lock);
+ bond_select_active_slave(bond);
+ write_unlock_bh(&bond->curr_slave_lock);
break;
case NETDEV_FEAT_CHANGE:
bond_compute_features(bond);
@@ -3178,6 +3146,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
struct ifslave k_sinfo;
struct ifslave __user *u_sinfo = NULL;
struct mii_ioctl_data *mii = NULL;
+ struct bond_opt_value newval;
struct net *net;
int res = 0;
@@ -3249,37 +3218,35 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- slave_dev = dev_get_by_name(net, ifr->ifr_slave);
+ slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
pr_debug("slave_dev=%p:\n", slave_dev);
if (!slave_dev)
- res = -ENODEV;
- else {
- pr_debug("slave_dev->name=%s:\n", slave_dev->name);
- switch (cmd) {
- case BOND_ENSLAVE_OLD:
- case SIOCBONDENSLAVE:
- res = bond_enslave(bond_dev, slave_dev);
- break;
- case BOND_RELEASE_OLD:
- case SIOCBONDRELEASE:
- res = bond_release(bond_dev, slave_dev);
- break;
- case BOND_SETHWADDR_OLD:
- case SIOCBONDSETHWADDR:
- bond_set_dev_addr(bond_dev, slave_dev);
- res = 0;
- break;
- case BOND_CHANGE_ACTIVE_OLD:
- case SIOCBONDCHANGEACTIVE:
- res = bond_option_active_slave_set(bond, slave_dev);
- break;
- default:
- res = -EOPNOTSUPP;
- }
+ return -ENODEV;
- dev_put(slave_dev);
+ pr_debug("slave_dev->name=%s:\n", slave_dev->name);
+ switch (cmd) {
+ case BOND_ENSLAVE_OLD:
+ case SIOCBONDENSLAVE:
+ res = bond_enslave(bond_dev, slave_dev);
+ break;
+ case BOND_RELEASE_OLD:
+ case SIOCBONDRELEASE:
+ res = bond_release(bond_dev, slave_dev);
+ break;
+ case BOND_SETHWADDR_OLD:
+ case SIOCBONDSETHWADDR:
+ bond_set_dev_addr(bond_dev, slave_dev);
+ res = 0;
+ break;
+ case BOND_CHANGE_ACTIVE_OLD:
+ case SIOCBONDCHANGEACTIVE:
+ bond_opt_initstr(&newval, slave_dev->name);
+ res = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval);
+ break;
+ default:
+ res = -EOPNOTSUPP;
}
return res;
@@ -3471,7 +3438,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
/* If fail_over_mac is enabled, do nothing and return success.
* Returning an error causes ifenslave to fail.
*/
- if (bond->params.fail_over_mac)
+ if (bond->params.fail_over_mac &&
+ bond->params.mode == BOND_MODE_ACTIVEBACKUP)
return 0;
if (!is_valid_ether_addr(sa->sa_data))
@@ -3550,7 +3518,7 @@ unwind:
* it fails, it tries to find the first available slave for transmission.
* The skb is consumed in all cases, thus the function is void.
*/
-void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
+static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
{
struct list_head *iter;
struct slave *slave;
@@ -3590,8 +3558,9 @@ void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
*/
static u32 bond_rr_gen_slave_id(struct bonding *bond)
{
- int packets_per_slave = bond->params.packets_per_slave;
u32 slave_id;
+ struct reciprocal_value reciprocal_packets_per_slave;
+ int packets_per_slave = bond->params.packets_per_slave;
switch (packets_per_slave) {
case 0:
@@ -3601,8 +3570,10 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
slave_id = bond->rr_tx_counter;
break;
default:
+ reciprocal_packets_per_slave =
+ bond->params.reciprocal_packets_per_slave;
slave_id = reciprocal_divide(bond->rr_tx_counter,
- packets_per_slave);
+ reciprocal_packets_per_slave);
break;
}
bond->rr_tx_counter++;
@@ -3707,32 +3678,29 @@ static inline int bond_slave_override(struct bonding *bond,
struct sk_buff *skb)
{
struct slave *slave = NULL;
- struct slave *check_slave;
struct list_head *iter;
- int res = 1;
if (!skb->queue_mapping)
return 1;
/* Find out if any slaves have the same mapping as this skb. */
- bond_for_each_slave_rcu(bond, check_slave, iter) {
- if (check_slave->queue_id == skb->queue_mapping) {
- slave = check_slave;
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (slave->queue_id == skb->queue_mapping) {
+ if (slave_can_tx(slave)) {
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ return 0;
+ }
+ /* If the slave isn't UP, use default transmit policy. */
break;
}
}
- /* If the slave isn't UP, use default transmit policy. */
- if (slave && slave->queue_id && IS_UP(slave->dev) &&
- (slave->link == BOND_LINK_UP)) {
- res = bond_dev_queue_xmit(bond, skb, slave->dev);
- }
-
- return res;
+ return 1;
}
-static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
/*
* This helper function exists to help dev_pick_tx get the correct
@@ -3940,6 +3908,9 @@ void bond_setup(struct net_device *bond_dev)
* capable
*/
+ /* Don't allow bond devices to change network namespaces. */
+ bond_dev->features |= NETIF_F_NETNS_LOCAL;
+
bond_dev->hw_features = BOND_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
@@ -3973,6 +3944,29 @@ static void bond_uninit(struct net_device *bond_dev)
/*------------------------- Module initialization ---------------------------*/
+int bond_parm_tbl_lookup(int mode, const struct bond_parm_tbl *tbl)
+{
+ int i;
+
+ for (i = 0; tbl[i].modename; i++)
+ if (mode == tbl[i].mode)
+ return tbl[i].mode;
+
+ return -1;
+}
+
+static int bond_parm_tbl_lookup_name(const char *modename,
+ const struct bond_parm_tbl *tbl)
+{
+ int i;
+
+ for (i = 0; tbl[i].modename; i++)
+ if (strcmp(modename, tbl[i].modename) == 0)
+ return tbl[i].mode;
+
+ return -1;
+}
+
/*
* Convert string input module parms. Accept either the
* number of the mode or its string name. A bit complicated because
@@ -3981,27 +3975,17 @@ static void bond_uninit(struct net_device *bond_dev)
*/
int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
{
- int modeint = -1, i, rv;
- char *p, modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, };
+ int modeint;
+ char *p, modestr[BOND_MAX_MODENAME_LEN + 1];
for (p = (char *)buf; *p; p++)
if (!(isdigit(*p) || isspace(*p)))
break;
- if (*p)
- rv = sscanf(buf, "%20s", modestr);
- else
- rv = sscanf(buf, "%d", &modeint);
-
- if (!rv)
- return -1;
-
- for (i = 0; tbl[i].modename; i++) {
- if (modeint == tbl[i].mode)
- return tbl[i].mode;
- if (strcmp(modestr, tbl[i].modename) == 0)
- return tbl[i].mode;
- }
+ if (*p && sscanf(buf, "%20s", modestr) != 0)
+ return bond_parm_tbl_lookup_name(modestr, tbl);
+ else if (sscanf(buf, "%d", &modeint) != 0)
+ return bond_parm_tbl_lookup(modeint, tbl);
return -1;
}
@@ -4009,18 +3993,20 @@ int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
static int bond_check_params(struct bond_params *params)
{
int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
+ struct bond_opt_value newval, *valptr;
int arp_all_targets_value;
/*
* Convert string parameters.
*/
if (mode) {
- bond_mode = bond_parse_parm(mode, bond_mode_tbl);
- if (bond_mode == -1) {
- pr_err("Error: Invalid bonding mode \"%s\"\n",
- mode == NULL ? "NULL" : mode);
+ bond_opt_initstr(&newval, mode);
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
+ if (!valptr) {
+ pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
return -EINVAL;
}
+ bond_mode = valptr->value;
}
if (xmit_hash_policy) {
@@ -4029,14 +4015,15 @@ static int bond_check_params(struct bond_params *params)
pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
bond_mode_name(bond_mode));
} else {
- xmit_hashtype = bond_parse_parm(xmit_hash_policy,
- xmit_hashtype_tbl);
- if (xmit_hashtype == -1) {
+ bond_opt_initstr(&newval, xmit_hash_policy);
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
+ &newval);
+ if (!valptr) {
pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
- xmit_hash_policy == NULL ? "NULL" :
xmit_hash_policy);
return -EINVAL;
}
+ xmit_hashtype = valptr->value;
}
}
@@ -4045,26 +4032,29 @@ static int bond_check_params(struct bond_params *params)
pr_info("lacp_rate param is irrelevant in mode %s\n",
bond_mode_name(bond_mode));
} else {
- lacp_fast = bond_parse_parm(lacp_rate, bond_lacp_tbl);
- if (lacp_fast == -1) {
+ bond_opt_initstr(&newval, lacp_rate);
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
+ &newval);
+ if (!valptr) {
pr_err("Error: Invalid lacp rate \"%s\"\n",
- lacp_rate == NULL ? "NULL" : lacp_rate);
+ lacp_rate);
return -EINVAL;
}
+ lacp_fast = valptr->value;
}
}
if (ad_select) {
- params->ad_select = bond_parse_parm(ad_select, ad_select_tbl);
- if (params->ad_select == -1) {
- pr_err("Error: Invalid ad_select \"%s\"\n",
- ad_select == NULL ? "NULL" : ad_select);
+ bond_opt_initstr(&newval, lacp_rate);
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
+ &newval);
+ if (!valptr) {
+ pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
return -EINVAL;
}
-
- if (bond_mode != BOND_MODE_8023AD) {
+ params->ad_select = valptr->value;
+ if (bond_mode != BOND_MODE_8023AD)
pr_warning("ad_select param only affects 802.3ad mode\n");
- }
} else {
params->ad_select = BOND_AD_STABLE;
}
@@ -4076,9 +4066,9 @@ static int bond_check_params(struct bond_params *params)
}
if (miimon < 0) {
- pr_warning("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to %d\n",
- miimon, INT_MAX, BOND_LINK_MON_INTERV);
- miimon = BOND_LINK_MON_INTERV;
+ pr_warning("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ miimon, INT_MAX);
+ miimon = 0;
}
if (updelay < 0) {
@@ -4105,8 +4095,8 @@ static int bond_check_params(struct bond_params *params)
num_peer_notif = 1;
}
- /* reset values for 802.3ad */
- if (bond_mode == BOND_MODE_8023AD) {
+ /* reset values for 802.3ad/TLB/ALB */
+ if (BOND_NO_USES_ARP(bond_mode)) {
if (!miimon) {
pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
pr_warning("Forcing miimon to 100msec\n");
@@ -4135,22 +4125,13 @@ static int bond_check_params(struct bond_params *params)
resend_igmp = BOND_DEFAULT_RESEND_IGMP;
}
- if (packets_per_slave < 0 || packets_per_slave > USHRT_MAX) {
+ bond_opt_initval(&newval, packets_per_slave);
+ if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
packets_per_slave, USHRT_MAX);
packets_per_slave = 1;
}
- /* reset values for TLB/ALB */
- if ((bond_mode == BOND_MODE_TLB) ||
- (bond_mode == BOND_MODE_ALB)) {
- if (!miimon) {
- pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n");
- pr_warning("Forcing miimon to 100msec\n");
- miimon = BOND_DEFAULT_MIIMON;
- }
- }
-
if (bond_mode == BOND_MODE_ALB) {
pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
updelay);
@@ -4190,9 +4171,9 @@ static int bond_check_params(struct bond_params *params)
}
if (arp_interval < 0) {
- pr_warning("Warning: arp_interval module parameter (%d) , not in range 0-%d, so it was reset to %d\n",
- arp_interval, INT_MAX, BOND_LINK_ARP_INTERV);
- arp_interval = BOND_LINK_ARP_INTERV;
+ pr_warning("Warning: arp_interval module parameter (%d) , not in range 0-%d, so it was reset to 0\n",
+ arp_interval, INT_MAX);
+ arp_interval = 0;
}
for (arp_ip_count = 0, i = 0;
@@ -4231,35 +4212,40 @@ static int bond_check_params(struct bond_params *params)
return -EINVAL;
}
- arp_validate_value = bond_parse_parm(arp_validate,
- arp_validate_tbl);
- if (arp_validate_value == -1) {
+ bond_opt_initstr(&newval, arp_validate);
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
+ &newval);
+ if (!valptr) {
pr_err("Error: invalid arp_validate \"%s\"\n",
- arp_validate == NULL ? "NULL" : arp_validate);
+ arp_validate);
return -EINVAL;
}
- } else
+ arp_validate_value = valptr->value;
+ } else {
arp_validate_value = 0;
+ }
arp_all_targets_value = 0;
if (arp_all_targets) {
- arp_all_targets_value = bond_parse_parm(arp_all_targets,
- arp_all_targets_tbl);
-
- if (arp_all_targets_value == -1) {
+ bond_opt_initstr(&newval, arp_all_targets);
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
+ &newval);
+ if (!valptr) {
pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
arp_all_targets);
arp_all_targets_value = 0;
+ } else {
+ arp_all_targets_value = valptr->value;
}
}
if (miimon) {
pr_info("MII link monitoring set to %d ms\n", miimon);
} else if (arp_interval) {
+ valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
+ arp_validate_value);
pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
- arp_interval,
- arp_validate_tbl[arp_validate_value].modename,
- arp_ip_count);
+ arp_interval, valptr->string, arp_ip_count);
for (i = 0; i < arp_ip_count; i++)
pr_info(" %s", arp_ip_target[i]);
@@ -4283,33 +4269,41 @@ static int bond_check_params(struct bond_params *params)
}
if (primary && primary_reselect) {
- primary_reselect_value = bond_parse_parm(primary_reselect,
- pri_reselect_tbl);
- if (primary_reselect_value == -1) {
+ bond_opt_initstr(&newval, primary_reselect);
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
+ &newval);
+ if (!valptr) {
pr_err("Error: Invalid primary_reselect \"%s\"\n",
- primary_reselect ==
- NULL ? "NULL" : primary_reselect);
+ primary_reselect);
return -EINVAL;
}
+ primary_reselect_value = valptr->value;
} else {
primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
}
if (fail_over_mac) {
- fail_over_mac_value = bond_parse_parm(fail_over_mac,
- fail_over_mac_tbl);
- if (fail_over_mac_value == -1) {
+ bond_opt_initstr(&newval, fail_over_mac);
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
+ &newval);
+ if (!valptr) {
pr_err("Error: invalid fail_over_mac \"%s\"\n",
- arp_validate == NULL ? "NULL" : arp_validate);
+ fail_over_mac);
return -EINVAL;
}
-
+ fail_over_mac_value = valptr->value;
if (bond_mode != BOND_MODE_ACTIVEBACKUP)
pr_warning("Warning: fail_over_mac only affects active-backup mode.\n");
} else {
fail_over_mac_value = BOND_FOM_NONE;
}
+ if (lp_interval == 0) {
+ pr_warning("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
+ INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
+ lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
+ }
+
/* fill params struct with the proper values */
params->mode = bond_mode;
params->xmit_policy = xmit_hashtype;
@@ -4329,11 +4323,19 @@ static int bond_check_params(struct bond_params *params)
params->all_slaves_active = all_slaves_active;
params->resend_igmp = resend_igmp;
params->min_links = min_links;
- params->lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
- if (packets_per_slave > 1)
- params->packets_per_slave = reciprocal_value(packets_per_slave);
- else
- params->packets_per_slave = packets_per_slave;
+ params->lp_interval = lp_interval;
+ params->packets_per_slave = packets_per_slave;
+ if (packets_per_slave > 0) {
+ params->reciprocal_packets_per_slave =
+ reciprocal_value(packets_per_slave);
+ } else {
+ /* reciprocal_packets_per_slave is unused if
+ * packets_per_slave is 0 or 1, just initialize it
+ */
+ params->reciprocal_packets_per_slave =
+ (struct reciprocal_value) { 0 };
+ }
+
if (primary) {
strncpy(params->primary, primary, IFNAMSIZ);
params->primary[IFNAMSIZ - 1] = 0;
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 40e7b1cb4aea..70651f8e8e3b 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -1,6 +1,7 @@
/*
* drivers/net/bond/bond_netlink.c - Netlink interface for bonding
* Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -20,9 +21,81 @@
#include <net/rtnetlink.h>
#include "bonding.h"
+static size_t bond_get_slave_size(const struct net_device *bond_dev,
+ const struct net_device *slave_dev)
+{
+ return nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_STATE */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_MII_STATUS */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_SLAVE_LINK_FAILURE_COUNT */
+ nla_total_size(MAX_ADDR_LEN) + /* IFLA_BOND_SLAVE_PERM_HWADDR */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_QUEUE_ID */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_AGGREGATOR_ID */
+ 0;
+}
+
+static int bond_fill_slave_info(struct sk_buff *skb,
+ const struct net_device *bond_dev,
+ const struct net_device *slave_dev)
+{
+ struct slave *slave = bond_slave_get_rtnl(slave_dev);
+
+ if (nla_put_u8(skb, IFLA_BOND_SLAVE_STATE, bond_slave_state(slave)))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_SLAVE_MII_STATUS, slave->link))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_SLAVE_LINK_FAILURE_COUNT,
+ slave->link_failure_count))
+ goto nla_put_failure;
+
+ if (nla_put(skb, IFLA_BOND_SLAVE_PERM_HWADDR,
+ slave_dev->addr_len, slave->perm_hwaddr))
+ goto nla_put_failure;
+
+ if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
+ goto nla_put_failure;
+
+ if (slave->bond->params.mode == BOND_MODE_8023AD) {
+ const struct aggregator *agg;
+
+ agg = SLAVE_AD_INFO(slave).port.aggregator;
+ if (agg)
+ if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
+ agg->aggregator_identifier))
+ goto nla_put_failure;
+ }
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_MODE] = { .type = NLA_U8 },
[IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
+ [IFLA_BOND_MIIMON] = { .type = NLA_U32 },
+ [IFLA_BOND_UPDELAY] = { .type = NLA_U32 },
+ [IFLA_BOND_DOWNDELAY] = { .type = NLA_U32 },
+ [IFLA_BOND_USE_CARRIER] = { .type = NLA_U8 },
+ [IFLA_BOND_ARP_INTERVAL] = { .type = NLA_U32 },
+ [IFLA_BOND_ARP_IP_TARGET] = { .type = NLA_NESTED },
+ [IFLA_BOND_ARP_VALIDATE] = { .type = NLA_U32 },
+ [IFLA_BOND_ARP_ALL_TARGETS] = { .type = NLA_U32 },
+ [IFLA_BOND_PRIMARY] = { .type = NLA_U32 },
+ [IFLA_BOND_PRIMARY_RESELECT] = { .type = NLA_U8 },
+ [IFLA_BOND_FAIL_OVER_MAC] = { .type = NLA_U8 },
+ [IFLA_BOND_XMIT_HASH_POLICY] = { .type = NLA_U8 },
+ [IFLA_BOND_RESEND_IGMP] = { .type = NLA_U32 },
+ [IFLA_BOND_NUM_PEER_NOTIF] = { .type = NLA_U8 },
+ [IFLA_BOND_ALL_SLAVES_ACTIVE] = { .type = NLA_U8 },
+ [IFLA_BOND_MIN_LINKS] = { .type = NLA_U32 },
+ [IFLA_BOND_LP_INTERVAL] = { .type = NLA_U32 },
+ [IFLA_BOND_PACKETS_PER_SLAVE] = { .type = NLA_U32 },
+ [IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 },
+ [IFLA_BOND_AD_SELECT] = { .type = NLA_U8 },
+ [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED },
};
static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -40,28 +113,238 @@ static int bond_changelink(struct net_device *bond_dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct bond_opt_value newval;
+ int miimon = 0;
int err;
- if (data && data[IFLA_BOND_MODE]) {
+ if (!data)
+ return 0;
+
+ if (data[IFLA_BOND_MODE]) {
int mode = nla_get_u8(data[IFLA_BOND_MODE]);
- err = bond_option_mode_set(bond, mode);
+ bond_opt_initval(&newval, mode);
+ err = __bond_opt_set(bond, BOND_OPT_MODE, &newval);
if (err)
return err;
}
- if (data && data[IFLA_BOND_ACTIVE_SLAVE]) {
+ if (data[IFLA_BOND_ACTIVE_SLAVE]) {
int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
struct net_device *slave_dev;
+ char *active_slave = "";
- if (ifindex == 0) {
- slave_dev = NULL;
- } else {
+ if (ifindex != 0) {
slave_dev = __dev_get_by_index(dev_net(bond_dev),
ifindex);
if (!slave_dev)
return -ENODEV;
+ active_slave = slave_dev->name;
+ }
+ bond_opt_initstr(&newval, active_slave);
+ err = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_MIIMON]) {
+ miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
+
+ bond_opt_initval(&newval, miimon);
+ err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_UPDELAY]) {
+ int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
+
+ bond_opt_initval(&newval, updelay);
+ err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_DOWNDELAY]) {
+ int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
+
+ bond_opt_initval(&newval, downdelay);
+ err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_USE_CARRIER]) {
+ int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
+
+ bond_opt_initval(&newval, use_carrier);
+ err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ARP_INTERVAL]) {
+ int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
+
+ if (arp_interval && miimon) {
+ pr_err("%s: ARP monitoring cannot be used with MII monitoring.\n",
+ bond->dev->name);
+ return -EINVAL;
}
- err = bond_option_active_slave_set(bond, slave_dev);
+
+ bond_opt_initval(&newval, arp_interval);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ARP_IP_TARGET]) {
+ struct nlattr *attr;
+ int i = 0, rem;
+
+ bond_option_arp_ip_targets_clear(bond);
+ nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
+ __be32 target = nla_get_be32(attr);
+
+ bond_opt_initval(&newval, target);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
+ &newval);
+ if (err)
+ break;
+ i++;
+ }
+ if (i == 0 && bond->params.arp_interval)
+ pr_warn("%s: removing last arp target with arp_interval on\n",
+ bond->dev->name);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ARP_VALIDATE]) {
+ int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
+
+ if (arp_validate && miimon) {
+ pr_err("%s: ARP validating cannot be used with MII monitoring.\n",
+ bond->dev->name);
+ return -EINVAL;
+ }
+
+ bond_opt_initval(&newval, arp_validate);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ARP_ALL_TARGETS]) {
+ int arp_all_targets =
+ nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
+
+ bond_opt_initval(&newval, arp_all_targets);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_PRIMARY]) {
+ int ifindex = nla_get_u32(data[IFLA_BOND_PRIMARY]);
+ struct net_device *dev;
+ char *primary = "";
+
+ dev = __dev_get_by_index(dev_net(bond_dev), ifindex);
+ if (dev)
+ primary = dev->name;
+
+ bond_opt_initstr(&newval, primary);
+ err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_PRIMARY_RESELECT]) {
+ int primary_reselect =
+ nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
+
+ bond_opt_initval(&newval, primary_reselect);
+ err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_FAIL_OVER_MAC]) {
+ int fail_over_mac =
+ nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
+
+ bond_opt_initval(&newval, fail_over_mac);
+ err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_XMIT_HASH_POLICY]) {
+ int xmit_hash_policy =
+ nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
+
+ bond_opt_initval(&newval, xmit_hash_policy);
+ err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_RESEND_IGMP]) {
+ int resend_igmp =
+ nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
+
+ bond_opt_initval(&newval, resend_igmp);
+ err = __bond_opt_set(bond, BOND_OPT_RESEND_IGMP, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_NUM_PEER_NOTIF]) {
+ int num_peer_notif =
+ nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
+
+ bond_opt_initval(&newval, num_peer_notif);
+ err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ALL_SLAVES_ACTIVE]) {
+ int all_slaves_active =
+ nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
+
+ bond_opt_initval(&newval, all_slaves_active);
+ err = __bond_opt_set(bond, BOND_OPT_ALL_SLAVES_ACTIVE, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_MIN_LINKS]) {
+ int min_links =
+ nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
+
+ bond_opt_initval(&newval, min_links);
+ err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_LP_INTERVAL]) {
+ int lp_interval =
+ nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
+
+ bond_opt_initval(&newval, lp_interval);
+ err = __bond_opt_set(bond, BOND_OPT_LP_INTERVAL, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_PACKETS_PER_SLAVE]) {
+ int packets_per_slave =
+ nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
+
+ bond_opt_initval(&newval, packets_per_slave);
+ err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_AD_LACP_RATE]) {
+ int lacp_rate =
+ nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
+
+ bond_opt_initval(&newval, lacp_rate);
+ err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_AD_SELECT]) {
+ int ad_select =
+ nla_get_u8(data[IFLA_BOND_AD_SELECT]);
+
+ bond_opt_initval(&newval, ad_select);
+ err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval);
if (err)
return err;
}
@@ -83,7 +366,36 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
static size_t bond_get_size(const struct net_device *bond_dev)
{
return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */
- nla_total_size(sizeof(u32)); /* IFLA_BOND_ACTIVE_SLAVE */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_ACTIVE_SLAVE */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIIMON */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_UPDELAY */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_DOWNDELAY */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_USE_CARRIER */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_INTERVAL */
+ /* IFLA_BOND_ARP_IP_TARGET */
+ nla_total_size(sizeof(struct nlattr)) +
+ nla_total_size(sizeof(u32)) * BOND_MAX_ARP_TARGETS +
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_VALIDATE */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_ALL_TARGETS */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_PRIMARY */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_PRIMARY_RESELECT */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_FAIL_OVER_MAC */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_XMIT_HASH_POLICY */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_RESEND_IGMP */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_NUM_PEER_NOTIF */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_ALL_SLAVES_ACTIVE */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIN_LINKS */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_LP_INTERVAL */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_PACKETS_PER_SLAVE */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_RATE */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_SELECT */
+ nla_total_size(sizeof(struct nlattr)) + /* IFLA_BOND_AD_INFO */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_AGGREGATOR */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_NUM_PORTS */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
+ nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
+ 0;
}
static int bond_fill_info(struct sk_buff *skb,
@@ -91,11 +403,139 @@ static int bond_fill_info(struct sk_buff *skb,
{
struct bonding *bond = netdev_priv(bond_dev);
struct net_device *slave_dev = bond_option_active_slave_get(bond);
+ struct nlattr *targets;
+ unsigned int packets_per_slave;
+ int i, targets_added;
+
+ if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode))
+ goto nla_put_failure;
+
+ if (slave_dev &&
+ nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_UPDELAY,
+ bond->params.updelay * bond->params.miimon))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_DOWNDELAY,
+ bond->params.downdelay * bond->params.miimon))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
+ goto nla_put_failure;
- if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode) ||
- (slave_dev &&
- nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex)))
+ if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
goto nla_put_failure;
+
+ targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET);
+ if (!targets)
+ goto nla_put_failure;
+
+ targets_added = 0;
+ for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
+ if (bond->params.arp_targets[i]) {
+ nla_put_be32(skb, i, bond->params.arp_targets[i]);
+ targets_added = 1;
+ }
+ }
+
+ if (targets_added)
+ nla_nest_end(skb, targets);
+ else
+ nla_nest_cancel(skb, targets);
+
+ if (nla_put_u32(skb, IFLA_BOND_ARP_VALIDATE, bond->params.arp_validate))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_ARP_ALL_TARGETS,
+ bond->params.arp_all_targets))
+ goto nla_put_failure;
+
+ if (bond->primary_slave &&
+ nla_put_u32(skb, IFLA_BOND_PRIMARY,
+ bond->primary_slave->dev->ifindex))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT,
+ bond->params.primary_reselect))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_FAIL_OVER_MAC,
+ bond->params.fail_over_mac))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_XMIT_HASH_POLICY,
+ bond->params.xmit_policy))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_RESEND_IGMP,
+ bond->params.resend_igmp))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_NUM_PEER_NOTIF,
+ bond->params.num_peer_notif))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_ALL_SLAVES_ACTIVE,
+ bond->params.all_slaves_active))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_MIN_LINKS,
+ bond->params.min_links))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_LP_INTERVAL,
+ bond->params.lp_interval))
+ goto nla_put_failure;
+
+ packets_per_slave = bond->params.packets_per_slave;
+ if (nla_put_u32(skb, IFLA_BOND_PACKETS_PER_SLAVE,
+ packets_per_slave))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE,
+ bond->params.lacp_fast))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_AD_SELECT,
+ bond->params.ad_select))
+ goto nla_put_failure;
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ struct ad_info info;
+
+ if (!bond_3ad_get_active_agg_info(bond, &info)) {
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, IFLA_BOND_AD_INFO);
+ if (!nest)
+ goto nla_put_failure;
+
+ if (nla_put_u16(skb, IFLA_BOND_AD_INFO_AGGREGATOR,
+ info.aggregator_id))
+ goto nla_put_failure;
+ if (nla_put_u16(skb, IFLA_BOND_AD_INFO_NUM_PORTS,
+ info.ports))
+ goto nla_put_failure;
+ if (nla_put_u16(skb, IFLA_BOND_AD_INFO_ACTOR_KEY,
+ info.actor_key))
+ goto nla_put_failure;
+ if (nla_put_u16(skb, IFLA_BOND_AD_INFO_PARTNER_KEY,
+ info.partner_key))
+ goto nla_put_failure;
+ if (nla_put(skb, IFLA_BOND_AD_INFO_PARTNER_MAC,
+ sizeof(info.partner_system),
+ &info.partner_system))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest);
+ }
+ }
+
return 0;
nla_put_failure:
@@ -116,6 +556,8 @@ struct rtnl_link_ops bond_link_ops __read_mostly = {
.get_num_tx_queues = bond_get_num_tx_queues,
.get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
as for TX queues */
+ .get_slave_size = bond_get_slave_size,
+ .fill_slave_info = bond_fill_slave_info,
};
int __init bond_netlink_init(void)
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index ea6f640782b7..11cb943222d5 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1,6 +1,7 @@
/*
* drivers/net/bond/bond_options.c - bonding options
* Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -15,39 +16,575 @@
#include <linux/netdevice.h>
#include <linux/rwlock.h>
#include <linux/rcupdate.h>
+#include <linux/ctype.h>
+#include <linux/inet.h>
#include "bonding.h"
-static bool bond_mode_is_valid(int mode)
+static struct bond_opt_value bond_mode_tbl[] = {
+ { "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT},
+ { "active-backup", BOND_MODE_ACTIVEBACKUP, 0},
+ { "balance-xor", BOND_MODE_XOR, 0},
+ { "broadcast", BOND_MODE_BROADCAST, 0},
+ { "802.3ad", BOND_MODE_8023AD, 0},
+ { "balance-tlb", BOND_MODE_TLB, 0},
+ { "balance-alb", BOND_MODE_ALB, 0},
+ { NULL, -1, 0},
+};
+
+static struct bond_opt_value bond_pps_tbl[] = {
+ { "default", 1, BOND_VALFLAG_DEFAULT},
+ { "maxval", USHRT_MAX, BOND_VALFLAG_MAX},
+ { NULL, -1, 0},
+};
+
+static struct bond_opt_value bond_xmit_hashtype_tbl[] = {
+ { "layer2", BOND_XMIT_POLICY_LAYER2, BOND_VALFLAG_DEFAULT},
+ { "layer3+4", BOND_XMIT_POLICY_LAYER34, 0},
+ { "layer2+3", BOND_XMIT_POLICY_LAYER23, 0},
+ { "encap2+3", BOND_XMIT_POLICY_ENCAP23, 0},
+ { "encap3+4", BOND_XMIT_POLICY_ENCAP34, 0},
+ { NULL, -1, 0},
+};
+
+static struct bond_opt_value bond_arp_validate_tbl[] = {
+ { "none", BOND_ARP_VALIDATE_NONE, BOND_VALFLAG_DEFAULT},
+ { "active", BOND_ARP_VALIDATE_ACTIVE, 0},
+ { "backup", BOND_ARP_VALIDATE_BACKUP, 0},
+ { "all", BOND_ARP_VALIDATE_ALL, 0},
+ { NULL, -1, 0},
+};
+
+static struct bond_opt_value bond_arp_all_targets_tbl[] = {
+ { "any", BOND_ARP_TARGETS_ANY, BOND_VALFLAG_DEFAULT},
+ { "all", BOND_ARP_TARGETS_ALL, 0},
+ { NULL, -1, 0},
+};
+
+static struct bond_opt_value bond_fail_over_mac_tbl[] = {
+ { "none", BOND_FOM_NONE, BOND_VALFLAG_DEFAULT},
+ { "active", BOND_FOM_ACTIVE, 0},
+ { "follow", BOND_FOM_FOLLOW, 0},
+ { NULL, -1, 0},
+};
+
+static struct bond_opt_value bond_intmax_tbl[] = {
+ { "off", 0, BOND_VALFLAG_DEFAULT},
+ { "maxval", INT_MAX, BOND_VALFLAG_MAX},
+};
+
+static struct bond_opt_value bond_lacp_rate_tbl[] = {
+ { "slow", AD_LACP_SLOW, 0},
+ { "fast", AD_LACP_FAST, 0},
+ { NULL, -1, 0},
+};
+
+static struct bond_opt_value bond_ad_select_tbl[] = {
+ { "stable", BOND_AD_STABLE, BOND_VALFLAG_DEFAULT},
+ { "bandwidth", BOND_AD_BANDWIDTH, 0},
+ { "count", BOND_AD_COUNT, 0},
+ { NULL, -1, 0},
+};
+
+static struct bond_opt_value bond_num_peer_notif_tbl[] = {
+ { "off", 0, 0},
+ { "maxval", 255, BOND_VALFLAG_MAX},
+ { "default", 1, BOND_VALFLAG_DEFAULT},
+ { NULL, -1, 0}
+};
+
+static struct bond_opt_value bond_primary_reselect_tbl[] = {
+ { "always", BOND_PRI_RESELECT_ALWAYS, BOND_VALFLAG_DEFAULT},
+ { "better", BOND_PRI_RESELECT_BETTER, 0},
+ { "failure", BOND_PRI_RESELECT_FAILURE, 0},
+ { NULL, -1},
+};
+
+static struct bond_opt_value bond_use_carrier_tbl[] = {
+ { "off", 0, 0},
+ { "on", 1, BOND_VALFLAG_DEFAULT},
+ { NULL, -1, 0}
+};
+
+static struct bond_opt_value bond_all_slaves_active_tbl[] = {
+ { "off", 0, BOND_VALFLAG_DEFAULT},
+ { "on", 1, 0},
+ { NULL, -1, 0}
+};
+
+static struct bond_opt_value bond_resend_igmp_tbl[] = {
+ { "off", 0, 0},
+ { "maxval", 255, BOND_VALFLAG_MAX},
+ { "default", 1, BOND_VALFLAG_DEFAULT},
+ { NULL, -1, 0}
+};
+
+static struct bond_opt_value bond_lp_interval_tbl[] = {
+ { "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
+ { "maxval", INT_MAX, BOND_VALFLAG_MAX},
+};
+
+static struct bond_option bond_opts[] = {
+ [BOND_OPT_MODE] = {
+ .id = BOND_OPT_MODE,
+ .name = "mode",
+ .desc = "bond device mode",
+ .flags = BOND_OPTFLAG_NOSLAVES | BOND_OPTFLAG_IFDOWN,
+ .values = bond_mode_tbl,
+ .set = bond_option_mode_set
+ },
+ [BOND_OPT_PACKETS_PER_SLAVE] = {
+ .id = BOND_OPT_PACKETS_PER_SLAVE,
+ .name = "packets_per_slave",
+ .desc = "Packets to send per slave in RR mode",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ROUNDROBIN)),
+ .values = bond_pps_tbl,
+ .set = bond_option_pps_set
+ },
+ [BOND_OPT_XMIT_HASH] = {
+ .id = BOND_OPT_XMIT_HASH,
+ .name = "xmit_hash_policy",
+ .desc = "balance-xor and 802.3ad hashing method",
+ .values = bond_xmit_hashtype_tbl,
+ .set = bond_option_xmit_hash_policy_set
+ },
+ [BOND_OPT_ARP_VALIDATE] = {
+ .id = BOND_OPT_ARP_VALIDATE,
+ .name = "arp_validate",
+ .desc = "validate src/dst of ARP probes",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP)),
+ .values = bond_arp_validate_tbl,
+ .set = bond_option_arp_validate_set
+ },
+ [BOND_OPT_ARP_ALL_TARGETS] = {
+ .id = BOND_OPT_ARP_ALL_TARGETS,
+ .name = "arp_all_targets",
+ .desc = "fail on any/all arp targets timeout",
+ .values = bond_arp_all_targets_tbl,
+ .set = bond_option_arp_all_targets_set
+ },
+ [BOND_OPT_FAIL_OVER_MAC] = {
+ .id = BOND_OPT_FAIL_OVER_MAC,
+ .name = "fail_over_mac",
+ .desc = "For active-backup, do not set all slaves to the same MAC",
+ .flags = BOND_OPTFLAG_NOSLAVES,
+ .values = bond_fail_over_mac_tbl,
+ .set = bond_option_fail_over_mac_set
+ },
+ [BOND_OPT_ARP_INTERVAL] = {
+ .id = BOND_OPT_ARP_INTERVAL,
+ .name = "arp_interval",
+ .desc = "arp interval in milliseconds",
+ .unsuppmodes = BIT(BOND_MODE_8023AD) | BIT(BOND_MODE_TLB) |
+ BIT(BOND_MODE_ALB),
+ .values = bond_intmax_tbl,
+ .set = bond_option_arp_interval_set
+ },
+ [BOND_OPT_ARP_TARGETS] = {
+ .id = BOND_OPT_ARP_TARGETS,
+ .name = "arp_ip_target",
+ .desc = "arp targets in n.n.n.n form",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .set = bond_option_arp_ip_targets_set
+ },
+ [BOND_OPT_DOWNDELAY] = {
+ .id = BOND_OPT_DOWNDELAY,
+ .name = "downdelay",
+ .desc = "Delay before considering link down, in milliseconds",
+ .values = bond_intmax_tbl,
+ .set = bond_option_downdelay_set
+ },
+ [BOND_OPT_UPDELAY] = {
+ .id = BOND_OPT_UPDELAY,
+ .name = "updelay",
+ .desc = "Delay before considering link up, in milliseconds",
+ .values = bond_intmax_tbl,
+ .set = bond_option_updelay_set
+ },
+ [BOND_OPT_LACP_RATE] = {
+ .id = BOND_OPT_LACP_RATE,
+ .name = "lacp_rate",
+ .desc = "LACPDU tx rate to request from 802.3ad partner",
+ .flags = BOND_OPTFLAG_IFDOWN,
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .values = bond_lacp_rate_tbl,
+ .set = bond_option_lacp_rate_set
+ },
+ [BOND_OPT_MINLINKS] = {
+ .id = BOND_OPT_MINLINKS,
+ .name = "min_links",
+ .desc = "Minimum number of available links before turning on carrier",
+ .values = bond_intmax_tbl,
+ .set = bond_option_min_links_set
+ },
+ [BOND_OPT_AD_SELECT] = {
+ .id = BOND_OPT_AD_SELECT,
+ .name = "ad_select",
+ .desc = "803.ad aggregation selection logic",
+ .flags = BOND_OPTFLAG_IFDOWN,
+ .values = bond_ad_select_tbl,
+ .set = bond_option_ad_select_set
+ },
+ [BOND_OPT_NUM_PEER_NOTIF] = {
+ .id = BOND_OPT_NUM_PEER_NOTIF,
+ .name = "num_unsol_na",
+ .desc = "Number of peer notifications to send on failover event",
+ .values = bond_num_peer_notif_tbl,
+ .set = bond_option_num_peer_notif_set
+ },
+ [BOND_OPT_MIIMON] = {
+ .id = BOND_OPT_MIIMON,
+ .name = "miimon",
+ .desc = "Link check interval in milliseconds",
+ .values = bond_intmax_tbl,
+ .set = bond_option_miimon_set
+ },
+ [BOND_OPT_PRIMARY] = {
+ .id = BOND_OPT_PRIMARY,
+ .name = "primary",
+ .desc = "Primary network device to use",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP) |
+ BIT(BOND_MODE_TLB) |
+ BIT(BOND_MODE_ALB)),
+ .set = bond_option_primary_set
+ },
+ [BOND_OPT_PRIMARY_RESELECT] = {
+ .id = BOND_OPT_PRIMARY_RESELECT,
+ .name = "primary_reselect",
+ .desc = "Reselect primary slave once it comes up",
+ .values = bond_primary_reselect_tbl,
+ .set = bond_option_primary_reselect_set
+ },
+ [BOND_OPT_USE_CARRIER] = {
+ .id = BOND_OPT_USE_CARRIER,
+ .name = "use_carrier",
+ .desc = "Use netif_carrier_ok (vs MII ioctls) in miimon",
+ .values = bond_use_carrier_tbl,
+ .set = bond_option_use_carrier_set
+ },
+ [BOND_OPT_ACTIVE_SLAVE] = {
+ .id = BOND_OPT_ACTIVE_SLAVE,
+ .name = "active_slave",
+ .desc = "Currently active slave",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP) |
+ BIT(BOND_MODE_TLB) |
+ BIT(BOND_MODE_ALB)),
+ .set = bond_option_active_slave_set
+ },
+ [BOND_OPT_QUEUE_ID] = {
+ .id = BOND_OPT_QUEUE_ID,
+ .name = "queue_id",
+ .desc = "Set queue id of a slave",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .set = bond_option_queue_id_set
+ },
+ [BOND_OPT_ALL_SLAVES_ACTIVE] = {
+ .id = BOND_OPT_ALL_SLAVES_ACTIVE,
+ .name = "all_slaves_active",
+ .desc = "Keep all frames received on an interface by setting active flag for all slaves",
+ .values = bond_all_slaves_active_tbl,
+ .set = bond_option_all_slaves_active_set
+ },
+ [BOND_OPT_RESEND_IGMP] = {
+ .id = BOND_OPT_RESEND_IGMP,
+ .name = "resend_igmp",
+ .desc = "Number of IGMP membership reports to send on link failure",
+ .values = bond_resend_igmp_tbl,
+ .set = bond_option_resend_igmp_set
+ },
+ [BOND_OPT_LP_INTERVAL] = {
+ .id = BOND_OPT_LP_INTERVAL,
+ .name = "lp_interval",
+ .desc = "The number of seconds between instances where the bonding driver sends learning packets to each slave's peer switch",
+ .values = bond_lp_interval_tbl,
+ .set = bond_option_lp_interval_set
+ },
+ [BOND_OPT_SLAVES] = {
+ .id = BOND_OPT_SLAVES,
+ .name = "slaves",
+ .desc = "Slave membership management",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .set = bond_option_slaves_set
+ },
+ { }
+};
+
+/* Searches for a value in opt's values[] table */
+struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
{
+ struct bond_option *opt;
int i;
- for (i = 0; bond_mode_tbl[i].modename; i++);
+ opt = bond_opt_get(option);
+ if (WARN_ON(!opt))
+ return NULL;
+ for (i = 0; opt->values && opt->values[i].string; i++)
+ if (opt->values[i].value == val)
+ return &opt->values[i];
- return mode >= 0 && mode < i;
+ return NULL;
}
-int bond_option_mode_set(struct bonding *bond, int mode)
+/* Searches for a value in opt's values[] table which matches the flagmask */
+static struct bond_opt_value *bond_opt_get_flags(const struct bond_option *opt,
+ u32 flagmask)
{
- if (!bond_mode_is_valid(mode)) {
- pr_err("invalid mode value %d.\n", mode);
- return -EINVAL;
+ int i;
+
+ for (i = 0; opt->values && opt->values[i].string; i++)
+ if (opt->values[i].flags & flagmask)
+ return &opt->values[i];
+
+ return NULL;
+}
+
+/* If maxval is missing then there's no range to check. In case minval is
+ * missing then it's considered to be 0.
+ */
+static bool bond_opt_check_range(const struct bond_option *opt, u64 val)
+{
+ struct bond_opt_value *minval, *maxval;
+
+ minval = bond_opt_get_flags(opt, BOND_VALFLAG_MIN);
+ maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
+ if (!maxval || (minval && val < minval->value) || val > maxval->value)
+ return false;
+
+ return true;
+}
+
+/**
+ * bond_opt_parse - parse option value
+ * @opt: the option to parse against
+ * @val: value to parse
+ *
+ * This function tries to extract the value from @val and check if it's
+ * a possible match for the option and returns NULL if a match isn't found,
+ * or the struct_opt_value that matched. It also strips the new line from
+ * @val->string if it's present.
+ */
+struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
+ struct bond_opt_value *val)
+{
+ char *p, valstr[BOND_OPT_MAX_NAMELEN + 1] = { 0, };
+ struct bond_opt_value *tbl, *ret = NULL;
+ bool checkval;
+ int i, rv;
+
+ /* No parsing if the option wants a raw val */
+ if (opt->flags & BOND_OPTFLAG_RAWVAL)
+ return val;
+
+ tbl = opt->values;
+ if (!tbl)
+ goto out;
+
+ /* ULLONG_MAX is used to bypass string processing */
+ checkval = val->value != ULLONG_MAX;
+ if (!checkval) {
+ if (!val->string)
+ goto out;
+ p = strchr(val->string, '\n');
+ if (p)
+ *p = '\0';
+ for (p = val->string; *p; p++)
+ if (!(isdigit(*p) || isspace(*p)))
+ break;
+ /* The following code extracts the string to match or the value
+ * and sets checkval appropriately
+ */
+ if (*p) {
+ rv = sscanf(val->string, "%32s", valstr);
+ } else {
+ rv = sscanf(val->string, "%llu", &val->value);
+ checkval = true;
+ }
+ if (!rv)
+ goto out;
}
- if (bond->dev->flags & IFF_UP) {
- pr_err("%s: unable to update mode because interface is up.\n",
- bond->dev->name);
- return -EPERM;
+ for (i = 0; tbl[i].string; i++) {
+ /* Check for exact match */
+ if (checkval) {
+ if (val->value == tbl[i].value)
+ ret = &tbl[i];
+ } else {
+ if (!strcmp(valstr, "default") &&
+ (tbl[i].flags & BOND_VALFLAG_DEFAULT))
+ ret = &tbl[i];
+
+ if (!strcmp(valstr, tbl[i].string))
+ ret = &tbl[i];
+ }
+ /* Found an exact match */
+ if (ret)
+ goto out;
}
+ /* Possible range match */
+ if (checkval && bond_opt_check_range(opt, val->value))
+ ret = val;
+out:
+ return ret;
+}
- if (bond_has_slaves(bond)) {
- pr_err("%s: unable to update mode because bond has slaves.\n",
- bond->dev->name);
- return -EPERM;
+/* Check opt's dependencies against bond mode and currently set options */
+static int bond_opt_check_deps(struct bonding *bond,
+ const struct bond_option *opt)
+{
+ struct bond_params *params = &bond->params;
+
+ if (test_bit(params->mode, &opt->unsuppmodes))
+ return -EACCES;
+ if ((opt->flags & BOND_OPTFLAG_NOSLAVES) && bond_has_slaves(bond))
+ return -ENOTEMPTY;
+ if ((opt->flags & BOND_OPTFLAG_IFDOWN) && (bond->dev->flags & IFF_UP))
+ return -EBUSY;
+
+ return 0;
+}
+
+static void bond_opt_dep_print(struct bonding *bond,
+ const struct bond_option *opt)
+{
+ struct bond_opt_value *modeval;
+ struct bond_params *params;
+
+ params = &bond->params;
+ modeval = bond_opt_get_val(BOND_OPT_MODE, params->mode);
+ if (test_bit(params->mode, &opt->unsuppmodes))
+ pr_err("%s: option %s: mode dependency failed, not supported in mode %s(%llu)\n",
+ bond->dev->name, opt->name,
+ modeval->string, modeval->value);
+}
+
+static void bond_opt_error_interpret(struct bonding *bond,
+ const struct bond_option *opt,
+ int error, struct bond_opt_value *val)
+{
+ struct bond_opt_value *minval, *maxval;
+ char *p;
+
+ switch (error) {
+ case -EINVAL:
+ if (val) {
+ if (val->string) {
+ /* sometimes RAWVAL opts may have new lines */
+ p = strchr(val->string, '\n');
+ if (p)
+ *p = '\0';
+ pr_err("%s: option %s: invalid value (%s).\n",
+ bond->dev->name, opt->name, val->string);
+ } else {
+ pr_err("%s: option %s: invalid value (%llu).\n",
+ bond->dev->name, opt->name, val->value);
+ }
+ }
+ minval = bond_opt_get_flags(opt, BOND_VALFLAG_MIN);
+ maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
+ if (!maxval)
+ break;
+ pr_err("%s: option %s: allowed values %llu - %llu.\n",
+ bond->dev->name, opt->name, minval ? minval->value : 0,
+ maxval->value);
+ break;
+ case -EACCES:
+ bond_opt_dep_print(bond, opt);
+ break;
+ case -ENOTEMPTY:
+ pr_err("%s: option %s: unable to set because the bond device has slaves.\n",
+ bond->dev->name, opt->name);
+ break;
+ case -EBUSY:
+ pr_err("%s: option %s: unable to set because the bond device is up.\n",
+ bond->dev->name, opt->name);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * __bond_opt_set - set a bonding option
+ * @bond: target bond device
+ * @option: option to set
+ * @val: value to set it to
+ *
+ * This function is used to change the bond's option value, it can be
+ * used for both enabling/changing an option and for disabling it. RTNL lock
+ * must be obtained before calling this function.
+ */
+int __bond_opt_set(struct bonding *bond,
+ unsigned int option, struct bond_opt_value *val)
+{
+ struct bond_opt_value *retval = NULL;
+ const struct bond_option *opt;
+ int ret = -ENOENT;
+
+ ASSERT_RTNL();
+
+ opt = bond_opt_get(option);
+ if (WARN_ON(!val) || WARN_ON(!opt))
+ goto out;
+ ret = bond_opt_check_deps(bond, opt);
+ if (ret)
+ goto out;
+ retval = bond_opt_parse(opt, val);
+ if (!retval) {
+ ret = -EINVAL;
+ goto out;
}
+ ret = opt->set(bond, retval);
+out:
+ if (ret)
+ bond_opt_error_interpret(bond, opt, ret, val);
- if (BOND_NO_USES_ARP(mode) && bond->params.arp_interval) {
+ return ret;
+}
+
+/**
+ * bond_opt_tryset_rtnl - try to acquire rtnl and call __bond_opt_set
+ * @bond: target bond device
+ * @option: option to set
+ * @buf: value to set it to
+ *
+ * This function tries to acquire RTNL without blocking and if successful
+ * calls __bond_opt_set. It is mainly used for sysfs option manipulation.
+ */
+int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf)
+{
+ struct bond_opt_value optval;
+ int ret;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+ bond_opt_initstr(&optval, buf);
+ ret = __bond_opt_set(bond, option, &optval);
+ rtnl_unlock();
+
+ return ret;
+}
+
+/**
+ * bond_opt_get - get a pointer to an option
+ * @option: option for which to return a pointer
+ *
+ * This function checks if option is valid and if so returns a pointer
+ * to its entry in the bond_opts[] option array.
+ */
+struct bond_option *bond_opt_get(unsigned int option)
+{
+ if (!BOND_OPT_VALID(option))
+ return NULL;
+
+ return &bond_opts[option];
+}
+
+int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval)
+{
+ if (BOND_NO_USES_ARP(newval->value) && bond->params.arp_interval) {
pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
- bond->dev->name, bond_mode_tbl[mode].modename);
+ bond->dev->name, newval->string);
/* disable arp monitoring */
bond->params.arp_interval = 0;
/* set miimon to default value */
@@ -58,7 +595,8 @@ int bond_option_mode_set(struct bonding *bond, int mode)
/* don't cache arp_validate between modes */
bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
- bond->params.mode = mode;
+ bond->params.mode = newval->value;
+
return 0;
}
@@ -81,10 +619,21 @@ struct net_device *bond_option_active_slave_get(struct bonding *bond)
}
int bond_option_active_slave_set(struct bonding *bond,
- struct net_device *slave_dev)
+ struct bond_opt_value *newval)
{
+ char ifname[IFNAMSIZ] = { 0, };
+ struct net_device *slave_dev;
int ret = 0;
+ sscanf(newval->string, "%15s", ifname); /* IFNAMSIZ */
+ if (!strlen(ifname) || newval->string[0] == '\n') {
+ slave_dev = NULL;
+ } else {
+ slave_dev = __dev_get_by_name(dev_net(bond->dev), ifname);
+ if (!slave_dev)
+ return -ENODEV;
+ }
+
if (slave_dev) {
if (!netif_is_bond_slave(slave_dev)) {
pr_err("Device %s is not bonding slave.\n",
@@ -99,14 +648,7 @@ int bond_option_active_slave_set(struct bonding *bond,
}
}
- if (!USES_PRIMARY(bond->params.mode)) {
- pr_err("%s: Unable to change active slave; %s is in mode %d\n",
- bond->dev->name, bond->dev->name, bond->params.mode);
- return -EINVAL;
- }
-
block_netpoll_tx();
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
/* check to see if we are clearing active */
@@ -141,7 +683,604 @@ int bond_option_active_slave_set(struct bonding *bond,
}
write_unlock_bh(&bond->curr_slave_lock);
+ unblock_netpoll_tx();
+
+ return ret;
+}
+
+int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting MII monitoring interval to %llu.\n",
+ bond->dev->name, newval->value);
+ bond->params.miimon = newval->value;
+ if (bond->params.updelay)
+ pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+ bond->dev->name,
+ bond->params.updelay * bond->params.miimon);
+ if (bond->params.downdelay)
+ pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+ bond->dev->name,
+ bond->params.downdelay * bond->params.miimon);
+ if (newval->value && bond->params.arp_interval) {
+ pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+ bond->dev->name);
+ bond->params.arp_interval = 0;
+ if (bond->params.arp_validate)
+ bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
+ }
+ if (bond->dev->flags & IFF_UP) {
+ /* If the interface is up, we may need to fire off
+ * the MII timer. If the interface is down, the
+ * timer will get fired off when the open function
+ * is called.
+ */
+ if (!newval->value) {
+ cancel_delayed_work_sync(&bond->mii_work);
+ } else {
+ cancel_delayed_work_sync(&bond->arp_work);
+ queue_delayed_work(bond->wq, &bond->mii_work, 0);
+ }
+ }
+
+ return 0;
+}
+
+int bond_option_updelay_set(struct bonding *bond, struct bond_opt_value *newval)
+{
+ int value = newval->value;
+
+ if (!bond->params.miimon) {
+ pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+ if ((value % bond->params.miimon) != 0) {
+ pr_warn("%s: Warning: up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
+ bond->dev->name, value,
+ bond->params.miimon,
+ (value / bond->params.miimon) *
+ bond->params.miimon);
+ }
+ bond->params.updelay = value / bond->params.miimon;
+ pr_info("%s: Setting up delay to %d.\n",
+ bond->dev->name,
+ bond->params.updelay * bond->params.miimon);
+
+ return 0;
+}
+
+int bond_option_downdelay_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ int value = newval->value;
+
+ if (!bond->params.miimon) {
+ pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+ if ((value % bond->params.miimon) != 0) {
+ pr_warn("%s: Warning: down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n",
+ bond->dev->name, value,
+ bond->params.miimon,
+ (value / bond->params.miimon) *
+ bond->params.miimon);
+ }
+ bond->params.downdelay = value / bond->params.miimon;
+ pr_info("%s: Setting down delay to %d.\n",
+ bond->dev->name,
+ bond->params.downdelay * bond->params.miimon);
+
+ return 0;
+}
+
+int bond_option_use_carrier_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting use_carrier to %llu.\n",
+ bond->dev->name, newval->value);
+ bond->params.use_carrier = newval->value;
+
+ return 0;
+}
+
+int bond_option_arp_interval_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting ARP monitoring interval to %llu.\n",
+ bond->dev->name, newval->value);
+ bond->params.arp_interval = newval->value;
+ if (newval->value) {
+ if (bond->params.miimon) {
+ pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+ bond->dev->name, bond->dev->name);
+ bond->params.miimon = 0;
+ }
+ if (!bond->params.arp_targets[0])
+ pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+ bond->dev->name);
+ }
+ if (bond->dev->flags & IFF_UP) {
+ /* If the interface is up, we may need to fire off
+ * the ARP timer. If the interface is down, the
+ * timer will get fired off when the open function
+ * is called.
+ */
+ if (!newval->value) {
+ if (bond->params.arp_validate)
+ bond->recv_probe = NULL;
+ cancel_delayed_work_sync(&bond->arp_work);
+ } else {
+ /* arp_validate can be set only in active-backup mode */
+ if (bond->params.arp_validate)
+ bond->recv_probe = bond_arp_rcv;
+ cancel_delayed_work_sync(&bond->mii_work);
+ queue_delayed_work(bond->wq, &bond->arp_work, 0);
+ }
+ }
+
+ return 0;
+}
+
+static void _bond_options_arp_ip_target_set(struct bonding *bond, int slot,
+ __be32 target,
+ unsigned long last_rx)
+{
+ __be32 *targets = bond->params.arp_targets;
+ struct list_head *iter;
+ struct slave *slave;
+
+ if (slot >= 0 && slot < BOND_MAX_ARP_TARGETS) {
+ bond_for_each_slave(bond, slave, iter)
+ slave->target_last_arp_rx[slot] = last_rx;
+ targets[slot] = target;
+ }
+}
+
+static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
+{
+ __be32 *targets = bond->params.arp_targets;
+ int ind;
+
+ if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
+ pr_err("%s: invalid ARP target %pI4 specified for addition\n",
+ bond->dev->name, &target);
+ return -EINVAL;
+ }
+
+ if (bond_get_targets_ip(targets, target) != -1) { /* dup */
+ pr_err("%s: ARP target %pI4 is already present\n",
+ bond->dev->name, &target);
+ return -EINVAL;
+ }
+
+ ind = bond_get_targets_ip(targets, 0); /* first free slot */
+ if (ind == -1) {
+ pr_err("%s: ARP target table is full!\n",
+ bond->dev->name);
+ return -EINVAL;
+ }
+
+ pr_info("%s: adding ARP target %pI4.\n", bond->dev->name, &target);
+
+ _bond_options_arp_ip_target_set(bond, ind, target, jiffies);
+
+ return 0;
+}
+
+int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
+{
+ int ret;
+
+ /* not to race with bond_arp_rcv */
+ write_lock_bh(&bond->lock);
+ ret = _bond_option_arp_ip_target_add(bond, target);
+ write_unlock_bh(&bond->lock);
+
+ return ret;
+}
+
+int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
+{
+ __be32 *targets = bond->params.arp_targets;
+ struct list_head *iter;
+ struct slave *slave;
+ unsigned long *targets_rx;
+ int ind, i;
+
+ if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
+ pr_err("%s: invalid ARP target %pI4 specified for removal\n",
+ bond->dev->name, &target);
+ return -EINVAL;
+ }
+
+ ind = bond_get_targets_ip(targets, target);
+ if (ind == -1) {
+ pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
+ bond->dev->name, &target);
+ return -EINVAL;
+ }
+
+ if (ind == 0 && !targets[1] && bond->params.arp_interval)
+ pr_warn("%s: removing last arp target with arp_interval on\n",
+ bond->dev->name);
+
+ pr_info("%s: removing ARP target %pI4.\n", bond->dev->name,
+ &target);
+
+ /* not to race with bond_arp_rcv */
+ write_lock_bh(&bond->lock);
+
+ bond_for_each_slave(bond, slave, iter) {
+ targets_rx = slave->target_last_arp_rx;
+ for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
+ targets_rx[i] = targets_rx[i+1];
+ targets_rx[i] = 0;
+ }
+ for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
+ targets[i] = targets[i+1];
+ targets[i] = 0;
+
+ write_unlock_bh(&bond->lock);
+
+ return 0;
+}
+
+void bond_option_arp_ip_targets_clear(struct bonding *bond)
+{
+ int i;
+
+ /* not to race with bond_arp_rcv */
+ write_lock_bh(&bond->lock);
+ for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
+ _bond_options_arp_ip_target_set(bond, i, 0, 0);
+ write_unlock_bh(&bond->lock);
+}
+
+int bond_option_arp_ip_targets_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ int ret = -EPERM;
+ __be32 target;
+
+ if (newval->string) {
+ if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) {
+ pr_err("%s: invalid ARP target %pI4 specified\n",
+ bond->dev->name, &target);
+ return ret;
+ }
+ if (newval->string[0] == '+')
+ ret = bond_option_arp_ip_target_add(bond, target);
+ else if (newval->string[0] == '-')
+ ret = bond_option_arp_ip_target_rem(bond, target);
+ else
+ pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
+ bond->dev->name);
+ } else {
+ target = newval->value;
+ ret = bond_option_arp_ip_target_add(bond, target);
+ }
+
+ return ret;
+}
+
+int bond_option_arp_validate_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: setting arp_validate to %s (%llu).\n",
+ bond->dev->name, newval->string, newval->value);
+
+ if (bond->dev->flags & IFF_UP) {
+ if (!newval->value)
+ bond->recv_probe = NULL;
+ else if (bond->params.arp_interval)
+ bond->recv_probe = bond_arp_rcv;
+ }
+ bond->params.arp_validate = newval->value;
+
+ return 0;
+}
+
+int bond_option_arp_all_targets_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: setting arp_all_targets to %s (%llu).\n",
+ bond->dev->name, newval->string, newval->value);
+ bond->params.arp_all_targets = newval->value;
+
+ return 0;
+}
+
+int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
+{
+ char *p, *primary = newval->string;
+ struct list_head *iter;
+ struct slave *slave;
+
+ block_netpoll_tx();
+ read_lock(&bond->lock);
+ write_lock_bh(&bond->curr_slave_lock);
+
+ p = strchr(primary, '\n');
+ if (p)
+ *p = '\0';
+ /* check to see if we are clearing primary */
+ if (!strlen(primary)) {
+ pr_info("%s: Setting primary slave to None.\n",
+ bond->dev->name);
+ bond->primary_slave = NULL;
+ memset(bond->params.primary, 0, sizeof(bond->params.primary));
+ bond_select_active_slave(bond);
+ goto out;
+ }
+
+ bond_for_each_slave(bond, slave, iter) {
+ if (strncmp(slave->dev->name, primary, IFNAMSIZ) == 0) {
+ pr_info("%s: Setting %s as primary slave.\n",
+ bond->dev->name, slave->dev->name);
+ bond->primary_slave = slave;
+ strcpy(bond->params.primary, slave->dev->name);
+ bond_select_active_slave(bond);
+ goto out;
+ }
+ }
+
+ if (bond->primary_slave) {
+ pr_info("%s: Setting primary slave to None.\n",
+ bond->dev->name);
+ bond->primary_slave = NULL;
+ bond_select_active_slave(bond);
+ }
+ strncpy(bond->params.primary, primary, IFNAMSIZ);
+ bond->params.primary[IFNAMSIZ - 1] = 0;
+
+ pr_info("%s: Recording %s as primary, but it has not been enslaved to %s yet.\n",
+ bond->dev->name, primary, bond->dev->name);
+
+out:
+ write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
unblock_netpoll_tx();
+
+ return 0;
+}
+
+int bond_option_primary_reselect_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: setting primary_reselect to %s (%llu).\n",
+ bond->dev->name, newval->string, newval->value);
+ bond->params.primary_reselect = newval->value;
+
+ block_netpoll_tx();
+ write_lock_bh(&bond->curr_slave_lock);
+ bond_select_active_slave(bond);
+ write_unlock_bh(&bond->curr_slave_lock);
+ unblock_netpoll_tx();
+
+ return 0;
+}
+
+int bond_option_fail_over_mac_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting fail_over_mac to %s (%llu).\n",
+ bond->dev->name, newval->string, newval->value);
+ bond->params.fail_over_mac = newval->value;
+
+ return 0;
+}
+
+int bond_option_xmit_hash_policy_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: setting xmit hash policy to %s (%llu).\n",
+ bond->dev->name, newval->string, newval->value);
+ bond->params.xmit_policy = newval->value;
+
+ return 0;
+}
+
+int bond_option_resend_igmp_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting resend_igmp to %llu.\n",
+ bond->dev->name, newval->value);
+ bond->params.resend_igmp = newval->value;
+
+ return 0;
+}
+
+int bond_option_num_peer_notif_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ bond->params.num_peer_notif = newval->value;
+
+ return 0;
+}
+
+int bond_option_all_slaves_active_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ struct list_head *iter;
+ struct slave *slave;
+
+ if (newval->value == bond->params.all_slaves_active)
+ return 0;
+ bond->params.all_slaves_active = newval->value;
+ bond_for_each_slave(bond, slave, iter) {
+ if (!bond_is_active_slave(slave)) {
+ if (newval->value)
+ slave->inactive = 0;
+ else
+ slave->inactive = 1;
+ }
+ }
+
+ return 0;
+}
+
+int bond_option_min_links_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting min links value to %llu\n",
+ bond->dev->name, newval->value);
+ bond->params.min_links = newval->value;
+
+ return 0;
+}
+
+int bond_option_lp_interval_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ bond->params.lp_interval = newval->value;
+
+ return 0;
+}
+
+int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval)
+{
+ bond->params.packets_per_slave = newval->value;
+ if (newval->value > 0) {
+ bond->params.reciprocal_packets_per_slave =
+ reciprocal_value(newval->value);
+ } else {
+ /* reciprocal_packets_per_slave is unused if
+ * packets_per_slave is 0 or 1, just initialize it
+ */
+ bond->params.reciprocal_packets_per_slave =
+ (struct reciprocal_value) { 0 };
+ }
+
+ return 0;
+}
+
+int bond_option_lacp_rate_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting LACP rate to %s (%llu).\n",
+ bond->dev->name, newval->string, newval->value);
+ bond->params.lacp_fast = newval->value;
+ bond_3ad_update_lacp_rate(bond);
+
+ return 0;
+}
+
+int bond_option_ad_select_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting ad_select to %s (%llu).\n",
+ bond->dev->name, newval->string, newval->value);
+ bond->params.ad_select = newval->value;
+
+ return 0;
+}
+
+int bond_option_queue_id_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ struct slave *slave, *update_slave;
+ struct net_device *sdev;
+ struct list_head *iter;
+ char *delim;
+ int ret = 0;
+ u16 qid;
+
+ /* delim will point to queue id if successful */
+ delim = strchr(newval->string, ':');
+ if (!delim)
+ goto err_no_cmd;
+
+ /* Terminate string that points to device name and bump it
+ * up one, so we can read the queue id there.
+ */
+ *delim = '\0';
+ if (sscanf(++delim, "%hd\n", &qid) != 1)
+ goto err_no_cmd;
+
+ /* Check buffer length, valid ifname and queue id */
+ if (strlen(newval->string) > IFNAMSIZ ||
+ !dev_valid_name(newval->string) ||
+ qid > bond->dev->real_num_tx_queues)
+ goto err_no_cmd;
+
+ /* Get the pointer to that interface if it exists */
+ sdev = __dev_get_by_name(dev_net(bond->dev), newval->string);
+ if (!sdev)
+ goto err_no_cmd;
+
+ /* Search for thes slave and check for duplicate qids */
+ update_slave = NULL;
+ bond_for_each_slave(bond, slave, iter) {
+ if (sdev == slave->dev)
+ /* We don't need to check the matching
+ * slave for dups, since we're overwriting it
+ */
+ update_slave = slave;
+ else if (qid && qid == slave->queue_id) {
+ goto err_no_cmd;
+ }
+ }
+
+ if (!update_slave)
+ goto err_no_cmd;
+
+ /* Actually set the qids for the slave */
+ update_slave->queue_id = qid;
+
+out:
return ret;
+
+err_no_cmd:
+ pr_info("invalid input for queue_id set for %s.\n",
+ bond->dev->name);
+ ret = -EPERM;
+ goto out;
+
+}
+
+int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval)
+{
+ char command[IFNAMSIZ + 1] = { 0, };
+ struct net_device *dev;
+ char *ifname;
+ int ret;
+
+ sscanf(newval->string, "%16s", command); /* IFNAMSIZ*/
+ ifname = command + 1;
+ if ((strlen(command) <= 1) ||
+ !dev_valid_name(ifname))
+ goto err_no_cmd;
+
+ dev = __dev_get_by_name(dev_net(bond->dev), ifname);
+ if (!dev) {
+ pr_info("%s: Interface %s does not exist!\n",
+ bond->dev->name, ifname);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ switch (command[0]) {
+ case '+':
+ pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name);
+ ret = bond_enslave(bond->dev, dev);
+ break;
+
+ case '-':
+ pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name);
+ ret = bond_release(bond->dev, dev);
+ break;
+
+ default:
+ goto err_no_cmd;
+ }
+
+out:
+ return ret;
+
+err_no_cmd:
+ pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
+ bond->dev->name);
+ ret = -EPERM;
+ goto out;
}
diff --git a/drivers/net/bonding/bond_options.h b/drivers/net/bonding/bond_options.h
new file mode 100644
index 000000000000..433d37f6940b
--- /dev/null
+++ b/drivers/net/bonding/bond_options.h
@@ -0,0 +1,170 @@
+/*
+ * drivers/net/bond/bond_options.h - bonding options
+ * Copyright (c) 2013 Nikolay Aleksandrov <nikolay@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _BOND_OPTIONS_H
+#define _BOND_OPTIONS_H
+
+#define BOND_OPT_MAX_NAMELEN 32
+#define BOND_OPT_VALID(opt) ((opt) < BOND_OPT_LAST)
+#define BOND_MODE_ALL_EX(x) (~(x))
+
+/* Option flags:
+ * BOND_OPTFLAG_NOSLAVES - check if the bond device is empty before setting
+ * BOND_OPTFLAG_IFDOWN - check if the bond device is down before setting
+ * BOND_OPTFLAG_RAWVAL - the option parses the value itself
+ */
+enum {
+ BOND_OPTFLAG_NOSLAVES = BIT(0),
+ BOND_OPTFLAG_IFDOWN = BIT(1),
+ BOND_OPTFLAG_RAWVAL = BIT(2)
+};
+
+/* Value type flags:
+ * BOND_VALFLAG_DEFAULT - mark the value as default
+ * BOND_VALFLAG_(MIN|MAX) - mark the value as min/max
+ */
+enum {
+ BOND_VALFLAG_DEFAULT = BIT(0),
+ BOND_VALFLAG_MIN = BIT(1),
+ BOND_VALFLAG_MAX = BIT(2)
+};
+
+/* Option IDs, their bit positions correspond to their IDs */
+enum {
+ BOND_OPT_MODE,
+ BOND_OPT_PACKETS_PER_SLAVE,
+ BOND_OPT_XMIT_HASH,
+ BOND_OPT_ARP_VALIDATE,
+ BOND_OPT_ARP_ALL_TARGETS,
+ BOND_OPT_FAIL_OVER_MAC,
+ BOND_OPT_ARP_INTERVAL,
+ BOND_OPT_ARP_TARGETS,
+ BOND_OPT_DOWNDELAY,
+ BOND_OPT_UPDELAY,
+ BOND_OPT_LACP_RATE,
+ BOND_OPT_MINLINKS,
+ BOND_OPT_AD_SELECT,
+ BOND_OPT_NUM_PEER_NOTIF,
+ BOND_OPT_MIIMON,
+ BOND_OPT_PRIMARY,
+ BOND_OPT_PRIMARY_RESELECT,
+ BOND_OPT_USE_CARRIER,
+ BOND_OPT_ACTIVE_SLAVE,
+ BOND_OPT_QUEUE_ID,
+ BOND_OPT_ALL_SLAVES_ACTIVE,
+ BOND_OPT_RESEND_IGMP,
+ BOND_OPT_LP_INTERVAL,
+ BOND_OPT_SLAVES,
+ BOND_OPT_LAST
+};
+
+/* This structure is used for storing option values and for passing option
+ * values when changing an option. The logic when used as an arg is as follows:
+ * - if string != NULL -> parse it, if the opt is RAW type then return it, else
+ * return the parse result
+ * - if string == NULL -> parse value
+ */
+struct bond_opt_value {
+ char *string;
+ u64 value;
+ u32 flags;
+};
+
+struct bonding;
+
+struct bond_option {
+ int id;
+ char *name;
+ char *desc;
+ u32 flags;
+
+ /* unsuppmodes is used to denote modes in which the option isn't
+ * supported.
+ */
+ unsigned long unsuppmodes;
+ /* supported values which this option can have, can be a subset of
+ * BOND_OPTVAL_RANGE's value range
+ */
+ struct bond_opt_value *values;
+
+ int (*set)(struct bonding *bond, struct bond_opt_value *val);
+};
+
+int __bond_opt_set(struct bonding *bond, unsigned int option,
+ struct bond_opt_value *val);
+int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
+struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
+ struct bond_opt_value *val);
+struct bond_option *bond_opt_get(unsigned int option);
+struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
+
+/* This helper is used to initialize a bond_opt_value structure for parameter
+ * passing. There should be either a valid string or value, but not both.
+ * When value is ULLONG_MAX then string will be used.
+ */
+static inline void __bond_opt_init(struct bond_opt_value *optval,
+ char *string, u64 value)
+{
+ memset(optval, 0, sizeof(*optval));
+ optval->value = ULLONG_MAX;
+ if (value == ULLONG_MAX)
+ optval->string = string;
+ else
+ optval->value = value;
+}
+#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value)
+#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX)
+
+int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval);
+int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval);
+int bond_option_xmit_hash_policy_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_arp_validate_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_arp_all_targets_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_fail_over_mac_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_arp_interval_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_arp_ip_targets_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+void bond_option_arp_ip_targets_clear(struct bonding *bond);
+int bond_option_downdelay_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_updelay_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_lacp_rate_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_min_links_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_ad_select_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_num_peer_notif_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval);
+int bond_option_primary_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_primary_reselect_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_use_carrier_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_active_slave_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_queue_id_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_all_slaves_active_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_resend_igmp_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_lp_interval_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval);
+#endif /* _BOND_OPTIONS_H */
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index fb868d6c22da..3ac20e78eafc 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -65,6 +65,7 @@ static void bond_info_seq_stop(struct seq_file *seq, void *v)
static void bond_info_show_master(struct seq_file *seq)
{
struct bonding *bond = seq->private;
+ struct bond_opt_value *optval;
struct slave *curr;
int i;
@@ -76,26 +77,32 @@ static void bond_info_show_master(struct seq_file *seq)
bond_mode_name(bond->params.mode));
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
- bond->params.fail_over_mac)
- seq_printf(seq, " (fail_over_mac %s)",
- fail_over_mac_tbl[bond->params.fail_over_mac].modename);
+ bond->params.fail_over_mac) {
+ optval = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
+ bond->params.fail_over_mac);
+ seq_printf(seq, " (fail_over_mac %s)", optval->string);
+ }
seq_printf(seq, "\n");
if (bond->params.mode == BOND_MODE_XOR ||
bond->params.mode == BOND_MODE_8023AD) {
+ optval = bond_opt_get_val(BOND_OPT_XMIT_HASH,
+ bond->params.xmit_policy);
seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
- xmit_hashtype_tbl[bond->params.xmit_policy].modename,
- bond->params.xmit_policy);
+ optval->string, bond->params.xmit_policy);
}
if (USES_PRIMARY(bond->params.mode)) {
seq_printf(seq, "Primary Slave: %s",
(bond->primary_slave) ?
bond->primary_slave->dev->name : "None");
- if (bond->primary_slave)
+ if (bond->primary_slave) {
+ optval = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT,
+ bond->params.primary_reselect);
seq_printf(seq, " (primary_reselect %s)",
- pri_reselect_tbl[bond->params.primary_reselect].modename);
+ optval->string);
+ }
seq_printf(seq, "\nCurrently Active Slave: %s\n",
(curr) ? curr->dev->name : "None");
@@ -136,8 +143,10 @@ static void bond_info_show_master(struct seq_file *seq)
seq_printf(seq, "LACP rate: %s\n",
(bond->params.lacp_fast) ? "fast" : "slow");
seq_printf(seq, "Min links: %d\n", bond->params.min_links);
+ optval = bond_opt_get_val(BOND_OPT_AD_SELECT,
+ bond->params.ad_select);
seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
- ad_select_tbl[bond->params.ad_select].modename);
+ optval->string);
if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
seq_printf(seq, "bond %s has no active aggregator\n",
@@ -159,18 +168,6 @@ static void bond_info_show_master(struct seq_file *seq)
}
}
-static const char *bond_slave_link_status(s8 link)
-{
- static const char * const status[] = {
- [BOND_LINK_UP] = "up",
- [BOND_LINK_FAIL] = "going down",
- [BOND_LINK_DOWN] = "down",
- [BOND_LINK_BACK] = "going back",
- };
-
- return status[link];
-}
-
static void bond_info_show_slave(struct seq_file *seq,
const struct slave *slave)
{
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 0ae580bbc5db..643fcc110299 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -12,8 +12,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
@@ -40,7 +39,6 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/nsproxy.h>
-#include <linux/reciprocal_div.h>
#include "bonding.h"
@@ -202,58 +200,15 @@ static ssize_t bonding_store_slaves(struct device *d,
struct device_attribute *attr,
const char *buffer, size_t count)
{
- char command[IFNAMSIZ + 1] = { 0, };
- char *ifname;
- int res, ret = count;
- struct net_device *dev;
struct bonding *bond = to_bond(d);
+ int ret;
- if (!rtnl_trylock())
- return restart_syscall();
-
- sscanf(buffer, "%16s", command); /* IFNAMSIZ*/
- ifname = command + 1;
- if ((strlen(command) <= 1) ||
- !dev_valid_name(ifname))
- goto err_no_cmd;
-
- dev = __dev_get_by_name(dev_net(bond->dev), ifname);
- if (!dev) {
- pr_info("%s: Interface %s does not exist!\n",
- bond->dev->name, ifname);
- ret = -ENODEV;
- goto out;
- }
-
- switch (command[0]) {
- case '+':
- pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name);
- res = bond_enslave(bond->dev, dev);
- break;
-
- case '-':
- pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name);
- res = bond_release(bond->dev, dev);
- break;
-
- default:
- goto err_no_cmd;
- }
-
- if (res)
- ret = res;
- goto out;
-
-err_no_cmd:
- pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
- bond->dev->name);
- ret = -EPERM;
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_SLAVES, (char *)buffer);
+ if (!ret)
+ ret = count;
-out:
- rtnl_unlock();
return ret;
}
-
static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves,
bonding_store_slaves);
@@ -265,37 +220,24 @@ static ssize_t bonding_show_mode(struct device *d,
struct device_attribute *attr, char *buf)
{
struct bonding *bond = to_bond(d);
+ struct bond_opt_value *val;
- return sprintf(buf, "%s %d\n",
- bond_mode_tbl[bond->params.mode].modename,
- bond->params.mode);
+ val = bond_opt_get_val(BOND_OPT_MODE, bond->params.mode);
+
+ return sprintf(buf, "%s %d\n", val->string, bond->params.mode);
}
static ssize_t bonding_store_mode(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret;
struct bonding *bond = to_bond(d);
+ int ret;
- new_value = bond_parse_parm(buf, bond_mode_tbl);
- if (new_value < 0) {
- pr_err("%s: Ignoring invalid mode value %.*s.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
- return -EINVAL;
- }
- if (!rtnl_trylock())
- return restart_syscall();
-
- ret = bond_option_mode_set(bond, new_value);
- if (!ret) {
- pr_info("%s: setting mode to %s (%d).\n",
- bond->dev->name, bond_mode_tbl[new_value].modename,
- new_value);
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MODE, (char *)buf);
+ if (!ret)
ret = count;
- }
- rtnl_unlock();
return ret;
}
static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
@@ -309,31 +251,23 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+ struct bond_opt_value *val;
- return sprintf(buf, "%s %d\n",
- xmit_hashtype_tbl[bond->params.xmit_policy].modename,
- bond->params.xmit_policy);
+ val = bond_opt_get_val(BOND_OPT_XMIT_HASH, bond->params.xmit_policy);
+
+ return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy);
}
static ssize_t bonding_store_xmit_hash(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int ret;
- new_value = bond_parse_parm(buf, xmit_hashtype_tbl);
- if (new_value < 0) {
- pr_err("%s: Ignoring invalid xmit hash policy value %.*s.\n",
- bond->dev->name,
- (int)strlen(buf) - 1, buf);
- ret = -EINVAL;
- } else {
- bond->params.xmit_policy = new_value;
- pr_info("%s: setting xmit hash policy to %s (%d).\n",
- bond->dev->name,
- xmit_hashtype_tbl[new_value].modename, new_value);
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_XMIT_HASH, (char *)buf);
+ if (!ret)
+ ret = count;
return ret;
}
@@ -348,10 +282,12 @@ static ssize_t bonding_show_arp_validate(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+ struct bond_opt_value *val;
- return sprintf(buf, "%s %d\n",
- arp_validate_tbl[bond->params.arp_validate].modename,
- bond->params.arp_validate);
+ val = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
+ bond->params.arp_validate);
+
+ return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate);
}
static ssize_t bonding_store_arp_validate(struct device *d,
@@ -359,36 +295,11 @@ static ssize_t bonding_store_arp_validate(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
+ int ret;
- if (!rtnl_trylock())
- return restart_syscall();
- new_value = bond_parse_parm(buf, arp_validate_tbl);
- if (new_value < 0) {
- pr_err("%s: Ignoring invalid arp_validate value %s\n",
- bond->dev->name, buf);
- ret = -EINVAL;
- goto out;
- }
- if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
- pr_err("%s: arp_validate only supported in active-backup mode.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- pr_info("%s: setting arp_validate to %s (%d).\n",
- bond->dev->name, arp_validate_tbl[new_value].modename,
- new_value);
-
- if (bond->dev->flags & IFF_UP) {
- if (!new_value)
- bond->recv_probe = NULL;
- else if (bond->params.arp_interval)
- bond->recv_probe = bond_arp_rcv;
- }
- bond->params.arp_validate = new_value;
-out:
- rtnl_unlock();
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_VALIDATE, (char *)buf);
+ if (!ret)
+ ret = count;
return ret;
}
@@ -403,10 +314,12 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- int value = bond->params.arp_all_targets;
+ struct bond_opt_value *val;
- return sprintf(buf, "%s %d\n", arp_all_targets_tbl[value].modename,
- value);
+ val = bond_opt_get_val(BOND_OPT_ARP_ALL_TARGETS,
+ bond->params.arp_all_targets);
+ return sprintf(buf, "%s %d\n",
+ val->string, bond->params.arp_all_targets);
}
static ssize_t bonding_store_arp_all_targets(struct device *d,
@@ -414,21 +327,13 @@ static ssize_t bonding_store_arp_all_targets(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value;
-
- new_value = bond_parse_parm(buf, arp_all_targets_tbl);
- if (new_value < 0) {
- pr_err("%s: Ignoring invalid arp_all_targets value %s\n",
- bond->dev->name, buf);
- return -EINVAL;
- }
- pr_info("%s: setting arp_all_targets to %s (%d).\n",
- bond->dev->name, arp_all_targets_tbl[new_value].modename,
- new_value);
+ int ret;
- bond->params.arp_all_targets = new_value;
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_ALL_TARGETS, (char *)buf);
+ if (!ret)
+ ret = count;
- return count;
+ return ret;
}
static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR,
@@ -443,44 +348,25 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+ struct bond_opt_value *val;
- return sprintf(buf, "%s %d\n",
- fail_over_mac_tbl[bond->params.fail_over_mac].modename,
- bond->params.fail_over_mac);
+ val = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
+ bond->params.fail_over_mac);
+
+ return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
}
static ssize_t bonding_store_fail_over_mac(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int ret;
- if (!rtnl_trylock())
- return restart_syscall();
-
- if (bond_has_slaves(bond)) {
- pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
- new_value = bond_parse_parm(buf, fail_over_mac_tbl);
- if (new_value < 0) {
- pr_err("%s: Ignoring invalid fail_over_mac value %s.\n",
- bond->dev->name, buf);
- ret = -EINVAL;
- goto out;
- }
-
- bond->params.fail_over_mac = new_value;
- pr_info("%s: Setting fail_over_mac to %s (%d).\n",
- bond->dev->name, fail_over_mac_tbl[new_value].modename,
- new_value);
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_FAIL_OVER_MAC, (char *)buf);
+ if (!ret)
+ ret = count;
-out:
- rtnl_unlock();
return ret;
}
@@ -507,61 +393,12 @@ static ssize_t bonding_store_arp_interval(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
+ int ret;
+
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_INTERVAL, (char *)buf);
+ if (!ret)
+ ret = count;
- if (!rtnl_trylock())
- return restart_syscall();
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no arp_interval value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if (new_value < 0) {
- pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",
- bond->dev->name, new_value, INT_MAX);
- ret = -EINVAL;
- goto out;
- }
- if (BOND_NO_USES_ARP(bond->params.mode)) {
- pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n",
- bond->dev->name, bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- pr_info("%s: Setting ARP monitoring interval to %d.\n",
- bond->dev->name, new_value);
- bond->params.arp_interval = new_value;
- if (new_value) {
- if (bond->params.miimon) {
- pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
- bond->dev->name, bond->dev->name);
- bond->params.miimon = 0;
- }
- if (!bond->params.arp_targets[0])
- pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
- bond->dev->name);
- }
- if (bond->dev->flags & IFF_UP) {
- /* If the interface is up, we may need to fire off
- * the ARP timer. If the interface is down, the
- * timer will get fired off when the open function
- * is called.
- */
- if (!new_value) {
- if (bond->params.arp_validate)
- bond->recv_probe = NULL;
- cancel_delayed_work_sync(&bond->arp_work);
- } else {
- /* arp_validate can be set only in active-backup mode */
- if (bond->params.arp_validate)
- bond->recv_probe = bond_arp_rcv;
- cancel_delayed_work_sync(&bond->mii_work);
- queue_delayed_work(bond->wq, &bond->arp_work, 0);
- }
- }
-out:
- rtnl_unlock();
return ret;
}
static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
@@ -574,8 +411,8 @@ static ssize_t bonding_show_arp_targets(struct device *d,
struct device_attribute *attr,
char *buf)
{
- int i, res = 0;
struct bonding *bond = to_bond(d);
+ int i, res = 0;
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
if (bond->params.arp_targets[i])
@@ -584,6 +421,7 @@ static ssize_t bonding_show_arp_targets(struct device *d,
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
+
return res;
}
@@ -592,82 +430,12 @@ static ssize_t bonding_store_arp_targets(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- struct list_head *iter;
- struct slave *slave;
- __be32 newtarget, *targets;
- unsigned long *targets_rx;
- int ind, i, j, ret = -EINVAL;
-
- if (!rtnl_trylock())
- return restart_syscall();
-
- targets = bond->params.arp_targets;
- if (!in4_pton(buf + 1, -1, (u8 *)&newtarget, -1, NULL) ||
- IS_IP_TARGET_UNUSABLE_ADDRESS(newtarget)) {
- pr_err("%s: invalid ARP target %pI4 specified for addition\n",
- bond->dev->name, &newtarget);
- goto out;
- }
- /* look for adds */
- if (buf[0] == '+') {
- if (bond_get_targets_ip(targets, newtarget) != -1) { /* dup */
- pr_err("%s: ARP target %pI4 is already present\n",
- bond->dev->name, &newtarget);
- goto out;
- }
-
- ind = bond_get_targets_ip(targets, 0); /* first free slot */
- if (ind == -1) {
- pr_err("%s: ARP target table is full!\n",
- bond->dev->name);
- goto out;
- }
-
- pr_info("%s: adding ARP target %pI4.\n", bond->dev->name,
- &newtarget);
- /* not to race with bond_arp_rcv */
- write_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave, iter)
- slave->target_last_arp_rx[ind] = jiffies;
- targets[ind] = newtarget;
- write_unlock_bh(&bond->lock);
- } else if (buf[0] == '-') {
- ind = bond_get_targets_ip(targets, newtarget);
- if (ind == -1) {
- pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
- bond->dev->name, &newtarget);
- goto out;
- }
-
- if (ind == 0 && !targets[1] && bond->params.arp_interval)
- pr_warn("%s: removing last arp target with arp_interval on\n",
- bond->dev->name);
-
- pr_info("%s: removing ARP target %pI4.\n", bond->dev->name,
- &newtarget);
+ int ret;
- write_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave, iter) {
- targets_rx = slave->target_last_arp_rx;
- j = ind;
- for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
- targets_rx[j] = targets_rx[j+1];
- targets_rx[j] = 0;
- }
- for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
- targets[i] = targets[i+1];
- targets[i] = 0;
- write_unlock_bh(&bond->lock);
- } else {
- pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_TARGETS, (char *)buf);
+ if (!ret)
+ ret = count;
- ret = count;
-out:
- rtnl_unlock();
return ret;
}
static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
@@ -690,45 +458,13 @@ static ssize_t bonding_store_downdelay(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int ret;
- if (!rtnl_trylock())
- return restart_syscall();
- if (!(bond->params.miimon)) {
- pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no down delay value specified.\n", bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if (new_value < 0) {
- pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
- bond->dev->name, new_value, 0, INT_MAX);
- ret = -EINVAL;
- goto out;
- } else {
- if ((new_value % bond->params.miimon) != 0) {
- pr_warning("%s: Warning: down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n",
- bond->dev->name, new_value,
- bond->params.miimon,
- (new_value / bond->params.miimon) *
- bond->params.miimon);
- }
- bond->params.downdelay = new_value / bond->params.miimon;
- pr_info("%s: Setting down delay to %d.\n",
- bond->dev->name,
- bond->params.downdelay * bond->params.miimon);
-
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_DOWNDELAY, (char *)buf);
+ if (!ret)
+ ret = count;
-out:
- rtnl_unlock();
return ret;
}
static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
@@ -748,45 +484,13 @@ static ssize_t bonding_store_updelay(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int ret;
- if (!rtnl_trylock())
- return restart_syscall();
- if (!(bond->params.miimon)) {
- pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no up delay value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if (new_value < 0) {
- pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n",
- bond->dev->name, new_value, 0, INT_MAX);
- ret = -EINVAL;
- goto out;
- } else {
- if ((new_value % bond->params.miimon) != 0) {
- pr_warning("%s: Warning: up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
- bond->dev->name, new_value,
- bond->params.miimon,
- (new_value / bond->params.miimon) *
- bond->params.miimon);
- }
- bond->params.updelay = new_value / bond->params.miimon;
- pr_info("%s: Setting up delay to %d.\n",
- bond->dev->name,
- bond->params.updelay * bond->params.miimon);
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_UPDELAY, (char *)buf);
+ if (!ret)
+ ret = count;
-out:
- rtnl_unlock();
return ret;
}
static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
@@ -801,10 +505,11 @@ static ssize_t bonding_show_lacp(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+ struct bond_opt_value *val;
- return sprintf(buf, "%s %d\n",
- bond_lacp_tbl[bond->params.lacp_fast].modename,
- bond->params.lacp_fast);
+ val = bond_opt_get_val(BOND_OPT_LACP_RATE, bond->params.lacp_fast);
+
+ return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
}
static ssize_t bonding_store_lacp(struct device *d,
@@ -812,40 +517,11 @@ static ssize_t bonding_store_lacp(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
-
- if (!rtnl_trylock())
- return restart_syscall();
-
- if (bond->dev->flags & IFF_UP) {
- pr_err("%s: Unable to update LACP rate because interface is up.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
- if (bond->params.mode != BOND_MODE_8023AD) {
- pr_err("%s: Unable to update LACP rate because bond is not in 802.3ad mode.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
+ int ret;
- new_value = bond_parse_parm(buf, bond_lacp_tbl);
-
- if ((new_value == 1) || (new_value == 0)) {
- bond->params.lacp_fast = new_value;
- bond_3ad_update_lacp_rate(bond);
- pr_info("%s: Setting LACP rate to %s (%d).\n",
- bond->dev->name, bond_lacp_tbl[new_value].modename,
- new_value);
- } else {
- pr_err("%s: Ignoring invalid LACP rate value %.*s.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
- ret = -EINVAL;
- }
-out:
- rtnl_unlock();
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LACP_RATE, (char *)buf);
+ if (!ret)
+ ret = count;
return ret;
}
@@ -867,19 +543,12 @@ static ssize_t bonding_store_min_links(struct device *d,
{
struct bonding *bond = to_bond(d);
int ret;
- unsigned int new_value;
- ret = kstrtouint(buf, 0, &new_value);
- if (ret < 0) {
- pr_err("%s: Ignoring invalid min links value %s.\n",
- bond->dev->name, buf);
- return ret;
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MINLINKS, (char *)buf);
+ if (!ret)
+ ret = count;
- pr_info("%s: Setting min links value to %u\n",
- bond->dev->name, new_value);
- bond->params.min_links = new_value;
- return count;
+ return ret;
}
static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR,
bonding_show_min_links, bonding_store_min_links);
@@ -889,10 +558,11 @@ static ssize_t bonding_show_ad_select(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+ struct bond_opt_value *val;
- return sprintf(buf, "%s %d\n",
- ad_select_tbl[bond->params.ad_select].modename,
- bond->params.ad_select);
+ val = bond_opt_get_val(BOND_OPT_AD_SELECT, bond->params.ad_select);
+
+ return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select);
}
@@ -900,29 +570,13 @@ static ssize_t bonding_store_ad_select(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int ret;
- if (bond->dev->flags & IFF_UP) {
- pr_err("%s: Unable to update ad_select because interface is up.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_AD_SELECT, (char *)buf);
+ if (!ret)
+ ret = count;
- new_value = bond_parse_parm(buf, ad_select_tbl);
-
- if (new_value != -1) {
- bond->params.ad_select = new_value;
- pr_info("%s: Setting ad_select to %s (%d).\n",
- bond->dev->name, ad_select_tbl[new_value].modename,
- new_value);
- } else {
- pr_err("%s: Ignoring invalid ad_select value %.*s.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
- ret = -EINVAL;
- }
-out:
return ret;
}
static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
@@ -944,8 +598,13 @@ static ssize_t bonding_store_num_peer_notif(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int err = kstrtou8(buf, 10, &bond->params.num_peer_notif);
- return err ? err : count;
+ int ret;
+
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_NUM_PEER_NOTIF, (char *)buf);
+ if (!ret)
+ ret = count;
+
+ return ret;
}
static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
bonding_show_num_peer_notif, bonding_store_num_peer_notif);
@@ -971,56 +630,13 @@ static ssize_t bonding_store_miimon(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int ret;
+
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MIIMON, (char *)buf);
+ if (!ret)
+ ret = count;
- if (!rtnl_trylock())
- return restart_syscall();
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no miimon value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if (new_value < 0) {
- pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
- bond->dev->name, new_value, 0, INT_MAX);
- ret = -EINVAL;
- goto out;
- }
- pr_info("%s: Setting MII monitoring interval to %d.\n",
- bond->dev->name, new_value);
- bond->params.miimon = new_value;
- if (bond->params.updelay)
- pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
- bond->dev->name,
- bond->params.updelay * bond->params.miimon);
- if (bond->params.downdelay)
- pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
- bond->dev->name,
- bond->params.downdelay * bond->params.miimon);
- if (new_value && bond->params.arp_interval) {
- pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
- bond->dev->name);
- bond->params.arp_interval = 0;
- if (bond->params.arp_validate)
- bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
- }
- if (bond->dev->flags & IFF_UP) {
- /* If the interface is up, we may need to fire off
- * the MII timer. If the interface is down, the
- * timer will get fired off when the open function
- * is called.
- */
- if (!new_value) {
- cancel_delayed_work_sync(&bond->mii_work);
- } else {
- cancel_delayed_work_sync(&bond->arp_work);
- queue_delayed_work(bond->wq, &bond->mii_work, 0);
- }
- }
-out:
- rtnl_unlock();
return ret;
}
static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
@@ -1051,58 +667,13 @@ static ssize_t bonding_store_primary(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- struct list_head *iter;
- char ifname[IFNAMSIZ];
- struct slave *slave;
-
- if (!rtnl_trylock())
- return restart_syscall();
- block_netpoll_tx();
- read_lock(&bond->lock);
- write_lock_bh(&bond->curr_slave_lock);
-
- if (!USES_PRIMARY(bond->params.mode)) {
- pr_info("%s: Unable to set primary slave; %s is in mode %d\n",
- bond->dev->name, bond->dev->name, bond->params.mode);
- goto out;
- }
-
- sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
-
- /* check to see if we are clearing primary */
- if (!strlen(ifname) || buf[0] == '\n') {
- pr_info("%s: Setting primary slave to None.\n",
- bond->dev->name);
- bond->primary_slave = NULL;
- memset(bond->params.primary, 0, sizeof(bond->params.primary));
- bond_select_active_slave(bond);
- goto out;
- }
-
- bond_for_each_slave(bond, slave, iter) {
- if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
- pr_info("%s: Setting %s as primary slave.\n",
- bond->dev->name, slave->dev->name);
- bond->primary_slave = slave;
- strcpy(bond->params.primary, slave->dev->name);
- bond_select_active_slave(bond);
- goto out;
- }
- }
-
- strncpy(bond->params.primary, ifname, IFNAMSIZ);
- bond->params.primary[IFNAMSIZ - 1] = 0;
+ int ret;
- pr_info("%s: Recording %s as primary, "
- "but it has not been enslaved to %s yet.\n",
- bond->dev->name, ifname, bond->dev->name);
-out:
- write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
- unblock_netpoll_tx();
- rtnl_unlock();
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY, (char *)buf);
+ if (!ret)
+ ret = count;
- return count;
+ return ret;
}
static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR,
bonding_show_primary, bonding_store_primary);
@@ -1115,45 +686,27 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+ struct bond_opt_value *val;
+
+ val = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT,
+ bond->params.primary_reselect);
return sprintf(buf, "%s %d\n",
- pri_reselect_tbl[bond->params.primary_reselect].modename,
- bond->params.primary_reselect);
+ val->string, bond->params.primary_reselect);
}
static ssize_t bonding_store_primary_reselect(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int ret;
- if (!rtnl_trylock())
- return restart_syscall();
-
- new_value = bond_parse_parm(buf, pri_reselect_tbl);
- if (new_value < 0) {
- pr_err("%s: Ignoring invalid primary_reselect value %.*s.\n",
- bond->dev->name,
- (int) strlen(buf) - 1, buf);
- ret = -EINVAL;
- goto out;
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY_RESELECT,
+ (char *)buf);
+ if (!ret)
+ ret = count;
- bond->params.primary_reselect = new_value;
- pr_info("%s: setting primary_reselect to %s (%d).\n",
- bond->dev->name, pri_reselect_tbl[new_value].modename,
- new_value);
-
- block_netpoll_tx();
- read_lock(&bond->lock);
- write_lock_bh(&bond->curr_slave_lock);
- bond_select_active_slave(bond);
- write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
- unblock_netpoll_tx();
-out:
- rtnl_unlock();
return ret;
}
static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR,
@@ -1176,25 +729,13 @@ static ssize_t bonding_store_carrier(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int ret;
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_USE_CARRIER, (char *)buf);
+ if (!ret)
+ ret = count;
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no use_carrier value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if ((new_value == 0) || (new_value == 1)) {
- bond->params.use_carrier = new_value;
- pr_info("%s: Setting use_carrier to %d.\n",
- bond->dev->name, new_value);
- } else {
- pr_info("%s: Ignoring invalid use_carrier value %d.\n",
- bond->dev->name, new_value);
- }
-out:
return ret;
}
static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
@@ -1225,34 +766,14 @@ static ssize_t bonding_store_active_slave(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int ret;
struct bonding *bond = to_bond(d);
- char ifname[IFNAMSIZ];
- struct net_device *dev;
-
- if (!rtnl_trylock())
- return restart_syscall();
-
- sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
- if (!strlen(ifname) || buf[0] == '\n') {
- dev = NULL;
- } else {
- dev = __dev_get_by_name(dev_net(bond->dev), ifname);
- if (!dev) {
- ret = -ENODEV;
- goto out;
- }
- }
+ int ret;
- ret = bond_option_active_slave_set(bond, dev);
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ACTIVE_SLAVE, (char *)buf);
if (!ret)
ret = count;
- out:
- rtnl_unlock();
-
return ret;
-
}
static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
bonding_show_active_slave, bonding_store_active_slave);
@@ -1421,72 +942,15 @@ static ssize_t bonding_store_queue_id(struct device *d,
struct device_attribute *attr,
const char *buffer, size_t count)
{
- struct slave *slave, *update_slave;
struct bonding *bond = to_bond(d);
- struct list_head *iter;
- u16 qid;
- int ret = count;
- char *delim;
- struct net_device *sdev = NULL;
-
- if (!rtnl_trylock())
- return restart_syscall();
-
- /* delim will point to queue id if successful */
- delim = strchr(buffer, ':');
- if (!delim)
- goto err_no_cmd;
-
- /*
- * Terminate string that points to device name and bump it
- * up one, so we can read the queue id there.
- */
- *delim = '\0';
- if (sscanf(++delim, "%hd\n", &qid) != 1)
- goto err_no_cmd;
-
- /* Check buffer length, valid ifname and queue id */
- if (strlen(buffer) > IFNAMSIZ ||
- !dev_valid_name(buffer) ||
- qid > bond->dev->real_num_tx_queues)
- goto err_no_cmd;
-
- /* Get the pointer to that interface if it exists */
- sdev = __dev_get_by_name(dev_net(bond->dev), buffer);
- if (!sdev)
- goto err_no_cmd;
-
- /* Search for thes slave and check for duplicate qids */
- update_slave = NULL;
- bond_for_each_slave(bond, slave, iter) {
- if (sdev == slave->dev)
- /*
- * We don't need to check the matching
- * slave for dups, since we're overwriting it
- */
- update_slave = slave;
- else if (qid && qid == slave->queue_id) {
- goto err_no_cmd;
- }
- }
-
- if (!update_slave)
- goto err_no_cmd;
+ int ret;
- /* Actually set the qids for the slave */
- update_slave->queue_id = qid;
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_QUEUE_ID, (char *)buffer);
+ if (!ret)
+ ret = count;
-out:
- rtnl_unlock();
return ret;
-
-err_no_cmd:
- pr_info("invalid input for queue_id set for %s.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
}
-
static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id,
bonding_store_queue_id);
@@ -1508,42 +972,13 @@ static ssize_t bonding_store_slaves_active(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
- struct list_head *iter;
- struct slave *slave;
-
- if (!rtnl_trylock())
- return restart_syscall();
-
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no all_slaves_active value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
-
- if (new_value == bond->params.all_slaves_active)
- goto out;
+ int ret;
- if ((new_value == 0) || (new_value == 1)) {
- bond->params.all_slaves_active = new_value;
- } else {
- pr_info("%s: Ignoring invalid all_slaves_active value %d.\n",
- bond->dev->name, new_value);
- ret = -EINVAL;
- goto out;
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ALL_SLAVES_ACTIVE,
+ (char *)buf);
+ if (!ret)
+ ret = count;
- bond_for_each_slave(bond, slave, iter) {
- if (!bond_is_active_slave(slave)) {
- if (new_value)
- slave->inactive = 0;
- else
- slave->inactive = 1;
- }
- }
-out:
- rtnl_unlock();
return ret;
}
static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
@@ -1565,27 +1000,13 @@ static ssize_t bonding_store_resend_igmp(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int ret;
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no resend_igmp value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
-
- if (new_value < 0 || new_value > 255) {
- pr_err("%s: Invalid resend_igmp value %d not in range 0-255; rejected.\n",
- bond->dev->name, new_value);
- ret = -EINVAL;
- goto out;
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_RESEND_IGMP, (char *)buf);
+ if (!ret)
+ ret = count;
- pr_info("%s: Setting resend_igmp to %d.\n",
- bond->dev->name, new_value);
- bond->params.resend_igmp = new_value;
-out:
return ret;
}
@@ -1606,24 +1027,12 @@ static ssize_t bonding_store_lp_interval(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
-
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no lp interval value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
+ int ret;
- if (new_value <= 0) {
- pr_err ("%s: lp_interval must be between 1 and %d\n",
- bond->dev->name, INT_MAX);
- ret = -EINVAL;
- goto out;
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LP_INTERVAL, (char *)buf);
+ if (!ret)
+ ret = count;
- bond->params.lp_interval = new_value;
-out:
return ret;
}
@@ -1636,10 +1045,6 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
{
struct bonding *bond = to_bond(d);
unsigned int packets_per_slave = bond->params.packets_per_slave;
-
- if (packets_per_slave > 1)
- packets_per_slave = reciprocal_value(packets_per_slave);
-
return sprintf(buf, "%u\n", packets_per_slave);
}
@@ -1648,28 +1053,13 @@ static ssize_t bonding_store_packets_per_slave(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
+ int ret;
+
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PACKETS_PER_SLAVE,
+ (char *)buf);
+ if (!ret)
+ ret = count;
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no packets_per_slave value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if (new_value < 0 || new_value > USHRT_MAX) {
- pr_err("%s: packets_per_slave must be between 0 and %u\n",
- bond->dev->name, USHRT_MAX);
- ret = -EINVAL;
- goto out;
- }
- if (bond->params.mode != BOND_MODE_ROUNDROBIN)
- pr_warn("%s: Warning: packets_per_slave has effect only in balance-rr mode\n",
- bond->dev->name);
- if (new_value > 1)
- bond->params.packets_per_slave = reciprocal_value(new_value);
- else
- bond->params.packets_per_slave = new_value;
-out:
return ret;
}
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
new file mode 100644
index 000000000000..2e4eec5450c8
--- /dev/null
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -0,0 +1,144 @@
+/* Sysfs attributes of bond slaves
+ *
+ * Copyright (c) 2014 Scott Feldman <sfeldma@cumulusnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+#include "bonding.h"
+
+struct slave_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct slave *, char *);
+};
+
+#define SLAVE_ATTR(_name, _mode, _show) \
+const struct slave_attribute slave_attr_##_name = { \
+ .attr = {.name = __stringify(_name), \
+ .mode = _mode }, \
+ .show = _show, \
+};
+#define SLAVE_ATTR_RO(_name) \
+ SLAVE_ATTR(_name, S_IRUGO, _name##_show)
+
+static ssize_t state_show(struct slave *slave, char *buf)
+{
+ switch (bond_slave_state(slave)) {
+ case BOND_STATE_ACTIVE:
+ return sprintf(buf, "active\n");
+ case BOND_STATE_BACKUP:
+ return sprintf(buf, "backup\n");
+ default:
+ return sprintf(buf, "UNKONWN\n");
+ }
+}
+static SLAVE_ATTR_RO(state);
+
+static ssize_t mii_status_show(struct slave *slave, char *buf)
+{
+ return sprintf(buf, "%s\n", bond_slave_link_status(slave->link));
+}
+static SLAVE_ATTR_RO(mii_status);
+
+static ssize_t link_failure_count_show(struct slave *slave, char *buf)
+{
+ return sprintf(buf, "%d\n", slave->link_failure_count);
+}
+static SLAVE_ATTR_RO(link_failure_count);
+
+static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
+{
+ return sprintf(buf, "%pM\n", slave->perm_hwaddr);
+}
+static SLAVE_ATTR_RO(perm_hwaddr);
+
+static ssize_t queue_id_show(struct slave *slave, char *buf)
+{
+ return sprintf(buf, "%d\n", slave->queue_id);
+}
+static SLAVE_ATTR_RO(queue_id);
+
+static ssize_t ad_aggregator_id_show(struct slave *slave, char *buf)
+{
+ const struct aggregator *agg;
+
+ if (slave->bond->params.mode == BOND_MODE_8023AD) {
+ agg = SLAVE_AD_INFO(slave).port.aggregator;
+ if (agg)
+ return sprintf(buf, "%d\n",
+ agg->aggregator_identifier);
+ }
+
+ return sprintf(buf, "N/A\n");
+}
+static SLAVE_ATTR_RO(ad_aggregator_id);
+
+static const struct slave_attribute *slave_attrs[] = {
+ &slave_attr_state,
+ &slave_attr_mii_status,
+ &slave_attr_link_failure_count,
+ &slave_attr_perm_hwaddr,
+ &slave_attr_queue_id,
+ &slave_attr_ad_aggregator_id,
+ NULL
+};
+
+#define to_slave_attr(_at) container_of(_at, struct slave_attribute, attr)
+#define to_slave(obj) container_of(obj, struct slave, kobj)
+
+static ssize_t slave_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct slave_attribute *slave_attr = to_slave_attr(attr);
+ struct slave *slave = to_slave(kobj);
+
+ return slave_attr->show(slave, buf);
+}
+
+static const struct sysfs_ops slave_sysfs_ops = {
+ .show = slave_show,
+};
+
+static struct kobj_type slave_ktype = {
+#ifdef CONFIG_SYSFS
+ .sysfs_ops = &slave_sysfs_ops,
+#endif
+};
+
+int bond_sysfs_slave_add(struct slave *slave)
+{
+ const struct slave_attribute **a;
+ int err;
+
+ err = kobject_init_and_add(&slave->kobj, &slave_ktype,
+ &(slave->dev->dev.kobj), "bonding_slave");
+ if (err)
+ return err;
+
+ for (a = slave_attrs; *a; ++a) {
+ err = sysfs_create_file(&slave->kobj, &((*a)->attr));
+ if (err) {
+ kobject_del(&slave->kobj);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void bond_sysfs_slave_del(struct slave *slave)
+{
+ const struct slave_attribute **a;
+
+ for (a = slave_attrs; *a; ++a)
+ sysfs_remove_file(&slave->kobj, &((*a)->attr));
+
+ kobject_del(&slave->kobj);
+}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index a9f4f9f4d8ce..86ccfb9f71cc 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -23,8 +23,11 @@
#include <linux/netpoll.h>
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
+#include <linux/reciprocal_div.h>
+
#include "bond_3ad.h"
#include "bond_alb.h"
+#include "bond_options.h"
#define DRV_VERSION "3.7.1"
#define DRV_RELDATE "April 27, 2011"
@@ -101,6 +104,10 @@
netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \
NULL)
+/* Caller must have rcu_read_lock */
+#define bond_first_slave_rcu(bond) \
+ netdev_lower_get_first_private_rcu(bond->dev)
+
#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond))
#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond))
@@ -167,6 +174,7 @@ struct bond_params {
int resend_igmp;
int lp_interval;
int packets_per_slave;
+ struct reciprocal_value reciprocal_packets_per_slave;
};
struct bond_parm_tbl {
@@ -199,6 +207,7 @@ struct slave {
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *np;
#endif
+ struct kobject kobj;
};
/*
@@ -280,12 +289,31 @@ static inline bool bond_is_lb(const struct bonding *bond)
static inline void bond_set_active_slave(struct slave *slave)
{
- slave->backup = 0;
+ if (slave->backup) {
+ slave->backup = 0;
+ rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+ }
}
static inline void bond_set_backup_slave(struct slave *slave)
{
- slave->backup = 1;
+ if (!slave->backup) {
+ slave->backup = 1;
+ rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+ }
+}
+
+static inline void bond_slave_state_change(struct bonding *bond)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+
+ bond_for_each_slave(bond, tmp, iter) {
+ if (tmp->link == BOND_LINK_UP)
+ bond_set_active_slave(tmp);
+ else if (tmp->link == BOND_LINK_DOWN)
+ bond_set_backup_slave(tmp);
+ }
}
static inline int bond_slave_state(struct slave *slave)
@@ -394,8 +422,8 @@ static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be3
in_dev = __in_dev_get_rcu(dev);
if (in_dev)
- addr = inet_confirm_addr(in_dev, dst, local, RT_SCOPE_HOST);
-
+ addr = inet_confirm_addr(dev_net(dev), in_dev, dst, local,
+ RT_SCOPE_HOST);
rcu_read_unlock();
return addr;
}
@@ -412,19 +440,18 @@ static inline bool slave_can_tx(struct slave *slave)
struct bond_net;
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
-int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
-void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
+void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
int bond_create(struct net *net, const char *name);
int bond_create_sysfs(struct bond_net *net);
void bond_destroy_sysfs(struct bond_net *net);
void bond_prepare_sysfs_group(struct bonding *bond);
+int bond_sysfs_slave_add(struct slave *slave);
+void bond_sysfs_slave_del(struct slave *slave);
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
-void bond_mii_monitor(struct work_struct *);
-void bond_loadbalance_arp_mon(struct work_struct *);
-void bond_activebackup_arp_mon(struct work_struct *);
int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
+int bond_parm_tbl_lookup(int mode, const struct bond_parm_tbl *tbl);
void bond_select_active_slave(struct bonding *bond);
void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
void bond_create_debugfs(void);
@@ -437,10 +464,11 @@ void bond_setup(struct net_device *bond_dev);
unsigned int bond_get_num_tx_queues(void);
int bond_netlink_init(void);
void bond_netlink_fini(void);
-int bond_option_mode_set(struct bonding *bond, int mode);
-int bond_option_active_slave_set(struct bonding *bond, struct net_device *slave_dev);
+int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
+int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
struct net_device *bond_option_active_slave_get(struct bonding *bond);
+const char *bond_slave_link_status(s8 link);
struct bond_net {
struct net * net; /* Associated network namespace */
@@ -520,7 +548,6 @@ static inline int bond_get_targets_ip(__be32 *targets, __be32 ip)
/* exported from bond_main.c */
extern int bond_net_id;
extern const struct bond_parm_tbl bond_lacp_tbl[];
-extern const struct bond_parm_tbl bond_mode_tbl[];
extern const struct bond_parm_tbl xmit_hashtype_tbl[];
extern const struct bond_parm_tbl arp_validate_tbl[];
extern const struct bond_parm_tbl arp_all_targets_tbl[];
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index ee92ad5a6cf8..39ba2f892ad6 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -3,7 +3,6 @@
* Author: Daniel Martensson
* License terms: GNU General Public License (GPL) version 2.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 3c069472eb8b..9e7d95dae2c7 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -71,7 +71,7 @@ config CAN_AT91
and AT91SAM9X5 processors.
config CAN_TI_HECC
- depends on ARCH_OMAP3
+ depends on ARM
tristate "TI High End CAN Controller"
---help---
Driver for TI HECC (High End CAN Controller) module found on many
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index cf0f63e14e53..6efe27458116 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -22,7 +22,6 @@
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 8a0b515b33ea..8d2b89a12e09 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -9,7 +9,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 77061eebb034..951bfede8f3d 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -808,17 +808,19 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
u32 num_rx_pkts = 0;
unsigned int msg_obj, msg_ctrl_save;
struct c_can_priv *priv = netdev_priv(dev);
- u32 val = c_can_read_reg32(priv, C_CAN_INTPND1_REG);
+ u16 val;
+
+ /*
+ * It is faster to read only one 16bit register. This is only possible
+ * for a maximum number of 16 objects.
+ */
+ BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
+ "Implementation does not support more message objects than 16");
+
+ while (quota > 0 && (val = priv->read_reg(priv, C_CAN_INTPND1_REG))) {
+ while ((msg_obj = ffs(val)) && quota > 0) {
+ val &= ~BIT(msg_obj - 1);
- for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
- msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
- val = c_can_read_reg32(priv, C_CAN_INTPND1_REG),
- msg_obj++) {
- /*
- * as interrupt pending register's bit n-1 corresponds to
- * message object n, we need to handle the same properly.
- */
- if (val & (1 << (msg_obj - 1))) {
c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
~IF_COMM_TXRQST);
msg_ctrl_save = priv->read_reg(priv,
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index bda1888cae9a..fc59bc6f040b 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
@@ -324,19 +323,10 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
}
if (!priv->echo_skb[idx]) {
- struct sock *srcsk = skb->sk;
- if (atomic_read(&skb->users) != 1) {
- struct sk_buff *old_skb = skb;
-
- skb = skb_clone(old_skb, GFP_ATOMIC);
- kfree_skb(old_skb);
- if (!skb)
- return;
- } else
- skb_orphan(skb);
-
- skb->sk = srcsk;
+ skb = can_create_echo_skb(skb);
+ if (!skb)
+ return;
/* make settings for echo to reduce code in irq context */
skb->protocol = htons(ETH_P_CAN);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index aaed97bee471..320bef2dba42 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -235,9 +235,12 @@ static const struct can_bittiming_const flexcan_bittiming_const = {
};
/*
- * Abstract off the read/write for arm versus ppc.
+ * Abstract off the read/write for arm versus ppc. This
+ * assumes that PPC uses big-endian registers and everything
+ * else uses little-endian registers, independent of CPU
+ * endianess.
*/
-#if defined(__BIG_ENDIAN)
+#if defined(CONFIG_PPC)
static inline u32 flexcan_read(void __iomem *addr)
{
return in_be32(addr);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index ab5909a7bae9..71594e5676fd 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
@@ -19,6 +18,7 @@
#include <linux/netdevice.h>
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/can/skb.h>
#include <linux/can/error.h>
#include <linux/mfd/janz.h>
@@ -1134,20 +1134,9 @@ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
*/
static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
{
- struct sock *srcsk = skb->sk;
-
- if (atomic_read(&skb->users) != 1) {
- struct sk_buff *old_skb = skb;
-
- skb = skb_clone(old_skb, GFP_ATOMIC);
- kfree_skb(old_skb);
- if (!skb)
- return;
- } else {
- skb_orphan(skb);
- }
-
- skb->sk = srcsk;
+ skb = can_create_echo_skb(skb);
+ if (!skb)
+ return;
/* save this skb for tx interrupt echo handling */
skb_queue_tail(&mod->echoq, skb);
@@ -1323,7 +1312,7 @@ static int ican3_napi(struct napi_struct *napi, int budget)
/* process all communication messages */
while (true) {
- struct ican3_msg msg;
+ struct ican3_msg uninitialized_var(msg);
ret = ican3_recv_msg(mod, &msg);
if (ret)
break;
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 08ac401e0214..cdb9808d12db 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -28,8 +28,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
*
@@ -59,6 +58,7 @@
#include <linux/can/dev.h>
#include <linux/can/led.h>
#include <linux/can/platform/mcp251x.h>
+#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -69,6 +69,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
@@ -264,6 +266,7 @@ struct mcp251x_priv {
int restart_tx;
struct regulator *power;
struct regulator *transceiver;
+ struct clk *clk;
};
#define MCP251X_IS(_model) \
@@ -995,22 +998,65 @@ static const struct net_device_ops mcp251x_netdev_ops = {
.ndo_start_xmit = mcp251x_hard_start_xmit,
};
+static const struct of_device_id mcp251x_of_match[] = {
+ {
+ .compatible = "microchip,mcp2510",
+ .data = (void *)CAN_MCP251X_MCP2510,
+ },
+ {
+ .compatible = "microchip,mcp2515",
+ .data = (void *)CAN_MCP251X_MCP2515,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mcp251x_of_match);
+
+static const struct spi_device_id mcp251x_id_table[] = {
+ {
+ .name = "mcp2510",
+ .driver_data = (kernel_ulong_t)CAN_MCP251X_MCP2510,
+ },
+ {
+ .name = "mcp2515",
+ .driver_data = (kernel_ulong_t)CAN_MCP251X_MCP2515,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
+
static int mcp251x_can_probe(struct spi_device *spi)
{
+ const struct of_device_id *of_id = of_match_device(mcp251x_of_match,
+ &spi->dev);
+ struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
struct net_device *net;
struct mcp251x_priv *priv;
- struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
- int ret = -ENODEV;
+ int freq, ret = -ENODEV;
+ struct clk *clk;
+
+ clk = devm_clk_get(&spi->dev, NULL);
+ if (IS_ERR(clk)) {
+ if (pdata)
+ freq = pdata->oscillator_frequency;
+ else
+ return PTR_ERR(clk);
+ } else {
+ freq = clk_get_rate(clk);
+ }
- if (!pdata)
- /* Platform data is required for osc freq */
- goto error_out;
+ /* Sanity check */
+ if (freq < 1000000 || freq > 25000000)
+ return -ERANGE;
/* Allocate can/net device */
net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
- if (!net) {
- ret = -ENOMEM;
- goto error_alloc;
+ if (!net)
+ return -ENOMEM;
+
+ if (!IS_ERR(clk)) {
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto out_free;
}
net->netdev_ops = &mcp251x_netdev_ops;
@@ -1019,23 +1065,27 @@ static int mcp251x_can_probe(struct spi_device *spi)
priv = netdev_priv(net);
priv->can.bittiming_const = &mcp251x_bittiming_const;
priv->can.do_set_mode = mcp251x_do_set_mode;
- priv->can.clock.freq = pdata->oscillator_frequency / 2;
+ priv->can.clock.freq = freq / 2;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
- priv->model = spi_get_device_id(spi)->driver_data;
+ if (of_id)
+ priv->model = (enum mcp251x_model)of_id->data;
+ else
+ priv->model = spi_get_device_id(spi)->driver_data;
priv->net = net;
+ priv->clk = clk;
priv->power = devm_regulator_get(&spi->dev, "vdd");
priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
(PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
ret = -EPROBE_DEFER;
- goto error_power;
+ goto out_clk;
}
ret = mcp251x_power_enable(priv->power, 1);
if (ret)
- goto error_power;
+ goto out_clk;
spi_set_drvdata(spi, priv);
@@ -1067,15 +1117,17 @@ static int mcp251x_can_probe(struct spi_device *spi)
/* Allocate non-DMA buffers */
if (!mcp251x_enable_dma) {
- priv->spi_tx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
+ priv->spi_tx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
+ GFP_KERNEL);
if (!priv->spi_tx_buf) {
ret = -ENOMEM;
- goto error_tx_buf;
+ goto error_probe;
}
- priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
+ priv->spi_rx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
+ GFP_KERNEL);
if (!priv->spi_rx_buf) {
ret = -ENOMEM;
- goto error_rx_buf;
+ goto error_probe;
}
}
@@ -1108,21 +1160,18 @@ static int mcp251x_can_probe(struct spi_device *spi)
return ret;
error_probe:
- if (!mcp251x_enable_dma)
- kfree(priv->spi_rx_buf);
-error_rx_buf:
- if (!mcp251x_enable_dma)
- kfree(priv->spi_tx_buf);
-error_tx_buf:
if (mcp251x_enable_dma)
dma_free_coherent(&spi->dev, PAGE_SIZE,
priv->spi_tx_buf, priv->spi_tx_dma);
mcp251x_power_enable(priv->power, 0);
-error_power:
+
+out_clk:
+ if (!IS_ERR(clk))
+ clk_disable_unprepare(clk);
+
+out_free:
free_candev(net);
-error_alloc:
- dev_err(&spi->dev, "probe failed\n");
-error_out:
+
return ret;
}
@@ -1136,13 +1185,13 @@ static int mcp251x_can_remove(struct spi_device *spi)
if (mcp251x_enable_dma) {
dma_free_coherent(&spi->dev, PAGE_SIZE,
priv->spi_tx_buf, priv->spi_tx_dma);
- } else {
- kfree(priv->spi_tx_buf);
- kfree(priv->spi_rx_buf);
}
mcp251x_power_enable(priv->power, 0);
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
free_candev(net);
return 0;
@@ -1205,21 +1254,13 @@ static int mcp251x_can_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend,
mcp251x_can_resume);
-static const struct spi_device_id mcp251x_id_table[] = {
- { "mcp2510", CAN_MCP251X_MCP2510 },
- { "mcp2515", CAN_MCP251X_MCP2515 },
- { },
-};
-
-MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
-
static struct spi_driver mcp251x_can_driver = {
.driver = {
.name = DEVICE_NAME,
.owner = THIS_MODULE,
+ .of_match_table = mcp251x_of_match,
.pm = &mcp251x_can_pm_ops,
},
-
.id_table = mcp251x_id_table,
.probe = mcp251x_can_probe,
.remove = mcp251x_can_remove,
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index e59b3a392af6..44725296f72a 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
@@ -109,135 +108,170 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
#endif /* CONFIG_PPC_MPC52xx */
#ifdef CONFIG_PPC_MPC512x
-struct mpc512x_clockctl {
- u32 spmr; /* System PLL Mode Reg */
- u32 sccr[2]; /* System Clk Ctrl Reg 1 & 2 */
- u32 scfr1; /* System Clk Freq Reg 1 */
- u32 scfr2; /* System Clk Freq Reg 2 */
- u32 reserved;
- u32 bcr; /* Bread Crumb Reg */
- u32 pccr[12]; /* PSC Clk Ctrl Reg 0-11 */
- u32 spccr; /* SPDIF Clk Ctrl Reg */
- u32 cccr; /* CFM Clk Ctrl Reg */
- u32 dccr; /* DIU Clk Cnfg Reg */
- u32 mccr[4]; /* MSCAN Clk Ctrl Reg 1-3 */
-};
-
-static struct of_device_id mpc512x_clock_ids[] = {
- { .compatible = "fsl,mpc5121-clock", },
- {}
-};
-
static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
- const char *clock_name, int *mscan_clksrc)
+ const char *clock_source, int *mscan_clksrc)
{
- struct mpc512x_clockctl __iomem *clockctl;
- struct device_node *np_clock;
- struct clk *sys_clk, *ref_clk;
- int plen, clockidx, clocksrc = -1;
- u32 sys_freq, val, clockdiv = 1, freq = 0;
- const u32 *pval;
-
- np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
- if (!np_clock) {
- dev_err(&ofdev->dev, "couldn't find clock node\n");
- return 0;
- }
- clockctl = of_iomap(np_clock, 0);
- if (!clockctl) {
- dev_err(&ofdev->dev, "couldn't map clock registers\n");
- goto exit_put;
- }
+ struct device_node *np;
+ u32 clockdiv;
+ enum {
+ CLK_FROM_AUTO,
+ CLK_FROM_IPS,
+ CLK_FROM_SYS,
+ CLK_FROM_REF,
+ } clk_from;
+ struct clk *clk_in, *clk_can;
+ unsigned long freq_calc;
+ struct mscan_priv *priv;
+ struct clk *clk_ipg;
- /* Determine the MSCAN device index from the peripheral's
- * physical address. Register address offsets against the
- * IMMR base are: 0x1300, 0x1380, 0x2300, 0x2380
+ /* the caller passed in the clock source spec that was read from
+ * the device tree, get the optional clock divider as well
*/
- pval = of_get_property(ofdev->dev.of_node, "reg", &plen);
- BUG_ON(!pval || plen < sizeof(*pval));
- clockidx = (*pval & 0x80) ? 1 : 0;
- if (*pval & 0x2000)
- clockidx += 2;
+ np = ofdev->dev.of_node;
+ clockdiv = 1;
+ of_property_read_u32(np, "fsl,mscan-clock-divider", &clockdiv);
+ dev_dbg(&ofdev->dev, "device tree specs: clk src[%s] div[%d]\n",
+ clock_source ? clock_source : "<NULL>", clockdiv);
+
+ /* when clock-source is 'ip', the CANCTL1[CLKSRC] bit needs to
+ * get set, and the 'ips' clock is the input to the MSCAN
+ * component
+ *
+ * for clock-source values of 'ref' or 'sys' the CANCTL1[CLKSRC]
+ * bit needs to get cleared, an optional clock-divider may have
+ * been specified (the default value is 1), the appropriate
+ * MSCAN related MCLK is the input to the MSCAN component
+ *
+ * in the absence of a clock-source spec, first an optimal clock
+ * gets determined based on the 'sys' clock, if that fails the
+ * 'ref' clock is used
+ */
+ clk_from = CLK_FROM_AUTO;
+ if (clock_source) {
+ /* interpret the device tree's spec for the clock source */
+ if (!strcmp(clock_source, "ip"))
+ clk_from = CLK_FROM_IPS;
+ else if (!strcmp(clock_source, "sys"))
+ clk_from = CLK_FROM_SYS;
+ else if (!strcmp(clock_source, "ref"))
+ clk_from = CLK_FROM_REF;
+ else
+ goto err_invalid;
+ dev_dbg(&ofdev->dev, "got a clk source spec[%d]\n", clk_from);
+ }
+ if (clk_from == CLK_FROM_AUTO) {
+ /* no spec so far, try the 'sys' clock; round to the
+ * next MHz and see if we can get a multiple of 16MHz
+ */
+ dev_dbg(&ofdev->dev, "no clk source spec, trying SYS\n");
+ clk_in = devm_clk_get(&ofdev->dev, "sys");
+ if (IS_ERR(clk_in))
+ goto err_notavail;
+ freq_calc = clk_get_rate(clk_in);
+ freq_calc += 499999;
+ freq_calc /= 1000000;
+ freq_calc *= 1000000;
+ if ((freq_calc % 16000000) == 0) {
+ clk_from = CLK_FROM_SYS;
+ clockdiv = freq_calc / 16000000;
+ dev_dbg(&ofdev->dev,
+ "clk fit, sys[%lu] div[%d] freq[%lu]\n",
+ freq_calc, clockdiv, freq_calc / clockdiv);
+ }
+ }
+ if (clk_from == CLK_FROM_AUTO) {
+ /* no spec so far, use the 'ref' clock */
+ dev_dbg(&ofdev->dev, "no clk source spec, trying REF\n");
+ clk_in = devm_clk_get(&ofdev->dev, "ref");
+ if (IS_ERR(clk_in))
+ goto err_notavail;
+ clk_from = CLK_FROM_REF;
+ freq_calc = clk_get_rate(clk_in);
+ dev_dbg(&ofdev->dev,
+ "clk fit, ref[%lu] (no div) freq[%lu]\n",
+ freq_calc, freq_calc);
+ }
- /*
- * Clock source and divider selection: 3 different clock sources
- * can be selected: "ip", "ref" or "sys". For the latter two, a
- * clock divider can be defined as well. If the clock source is
- * not specified by the device tree, we first try to find an
- * optimal CAN source clock based on the system clock. If that
- * is not posslible, the reference clock will be used.
+ /* select IPS or MCLK as the MSCAN input (returned to the caller),
+ * setup the MCLK mux source and rate if applicable, apply the
+ * optionally specified or derived above divider, and determine
+ * the actual resulting clock rate to return to the caller
*/
- if (clock_name && !strcmp(clock_name, "ip")) {
+ switch (clk_from) {
+ case CLK_FROM_IPS:
+ clk_can = devm_clk_get(&ofdev->dev, "ips");
+ if (IS_ERR(clk_can))
+ goto err_notavail;
+ priv = netdev_priv(dev_get_drvdata(&ofdev->dev));
+ priv->clk_can = clk_can;
+ freq_calc = clk_get_rate(clk_can);
*mscan_clksrc = MSCAN_CLKSRC_IPS;
- freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
- } else {
+ dev_dbg(&ofdev->dev, "clk from IPS, clksrc[%d] freq[%lu]\n",
+ *mscan_clksrc, freq_calc);
+ break;
+ case CLK_FROM_SYS:
+ case CLK_FROM_REF:
+ clk_can = devm_clk_get(&ofdev->dev, "mclk");
+ if (IS_ERR(clk_can))
+ goto err_notavail;
+ priv = netdev_priv(dev_get_drvdata(&ofdev->dev));
+ priv->clk_can = clk_can;
+ if (clk_from == CLK_FROM_SYS)
+ clk_in = devm_clk_get(&ofdev->dev, "sys");
+ if (clk_from == CLK_FROM_REF)
+ clk_in = devm_clk_get(&ofdev->dev, "ref");
+ if (IS_ERR(clk_in))
+ goto err_notavail;
+ clk_set_parent(clk_can, clk_in);
+ freq_calc = clk_get_rate(clk_in);
+ freq_calc /= clockdiv;
+ clk_set_rate(clk_can, freq_calc);
+ freq_calc = clk_get_rate(clk_can);
*mscan_clksrc = MSCAN_CLKSRC_BUS;
-
- pval = of_get_property(ofdev->dev.of_node,
- "fsl,mscan-clock-divider", &plen);
- if (pval && plen == sizeof(*pval))
- clockdiv = *pval;
- if (!clockdiv)
- clockdiv = 1;
-
- if (!clock_name || !strcmp(clock_name, "sys")) {
- sys_clk = devm_clk_get(&ofdev->dev, "sys_clk");
- if (IS_ERR(sys_clk)) {
- dev_err(&ofdev->dev, "couldn't get sys_clk\n");
- goto exit_unmap;
- }
- /* Get and round up/down sys clock rate */
- sys_freq = 1000000 *
- ((clk_get_rate(sys_clk) + 499999) / 1000000);
-
- if (!clock_name) {
- /* A multiple of 16 MHz would be optimal */
- if ((sys_freq % 16000000) == 0) {
- clocksrc = 0;
- clockdiv = sys_freq / 16000000;
- freq = sys_freq / clockdiv;
- }
- } else {
- clocksrc = 0;
- freq = sys_freq / clockdiv;
- }
- }
-
- if (clocksrc < 0) {
- ref_clk = devm_clk_get(&ofdev->dev, "ref_clk");
- if (IS_ERR(ref_clk)) {
- dev_err(&ofdev->dev, "couldn't get ref_clk\n");
- goto exit_unmap;
- }
- clocksrc = 1;
- freq = clk_get_rate(ref_clk) / clockdiv;
- }
+ dev_dbg(&ofdev->dev, "clk from MCLK, clksrc[%d] freq[%lu]\n",
+ *mscan_clksrc, freq_calc);
+ break;
+ default:
+ goto err_invalid;
}
- /* Disable clock */
- out_be32(&clockctl->mccr[clockidx], 0x0);
- if (clocksrc >= 0) {
- /* Set source and divider */
- val = (clocksrc << 14) | ((clockdiv - 1) << 17);
- out_be32(&clockctl->mccr[clockidx], val);
- /* Enable clock */
- out_be32(&clockctl->mccr[clockidx], val | 0x10000);
- }
+ /* the above clk_can item is used for the bitrate, access to
+ * the peripheral's register set needs the clk_ipg item
+ */
+ clk_ipg = devm_clk_get(&ofdev->dev, "ipg");
+ if (IS_ERR(clk_ipg))
+ goto err_notavail_ipg;
+ if (clk_prepare_enable(clk_ipg))
+ goto err_notavail_ipg;
+ priv = netdev_priv(dev_get_drvdata(&ofdev->dev));
+ priv->clk_ipg = clk_ipg;
+
+ /* return the determined clock source rate */
+ return freq_calc;
+
+err_invalid:
+ dev_err(&ofdev->dev, "invalid clock source specification\n");
+ /* clock source rate could not get determined */
+ return 0;
- /* Enable MSCAN clock domain */
- val = in_be32(&clockctl->sccr[1]);
- if (!(val & (1 << 25)))
- out_be32(&clockctl->sccr[1], val | (1 << 25));
+err_notavail:
+ dev_err(&ofdev->dev, "cannot acquire or setup bitrate clock source\n");
+ /* clock source rate could not get determined */
+ return 0;
- dev_dbg(&ofdev->dev, "using '%s' with frequency divider %d\n",
- *mscan_clksrc == MSCAN_CLKSRC_IPS ? "ips_clk" :
- clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
+err_notavail_ipg:
+ dev_err(&ofdev->dev, "cannot acquire or setup register clock\n");
+ /* clock source rate could not get determined */
+ return 0;
+}
-exit_unmap:
- iounmap(clockctl);
-exit_put:
- of_node_put(np_clock);
- return freq;
+static void mpc512x_can_put_clock(struct platform_device *ofdev)
+{
+ struct mscan_priv *priv;
+
+ priv = netdev_priv(dev_get_drvdata(&ofdev->dev));
+ if (priv->clk_ipg)
+ clk_disable_unprepare(priv->clk_ipg);
}
#else /* !CONFIG_PPC_MPC512x */
static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
@@ -245,6 +279,7 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
{
return 0;
}
+#define mpc512x_can_put_clock NULL
#endif /* CONFIG_PPC_MPC512x */
static const struct of_device_id mpc5xxx_can_table[];
@@ -386,11 +421,13 @@ static int mpc5xxx_can_resume(struct platform_device *ofdev)
static const struct mpc5xxx_can_data mpc5200_can_data = {
.type = MSCAN_TYPE_MPC5200,
.get_clock = mpc52xx_can_get_clock,
+ /* .put_clock not applicable */
};
static const struct mpc5xxx_can_data mpc5121_can_data = {
.type = MSCAN_TYPE_MPC5121,
.get_clock = mpc512x_can_get_clock,
+ .put_clock = mpc512x_can_put_clock,
};
static const struct of_device_id mpc5xxx_can_table[] = {
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index a955ec8c4b97..b9f3faabb0f3 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index e98abb97a050..ad8e08f9c496 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSCAN_H__
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 5f0e9b3bfa7b..6c077eb87b5e 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/interrupt.h>
@@ -22,7 +21,6 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 835921388e7b..d790b874ca79 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 087b13bd300e..c96eb14699d5 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -26,8 +26,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index f9b4f81cd86a..fbb61a0d901f 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
@@ -45,7 +44,8 @@ MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
"esd CAN-PCI/PMC/266, "
"esd CAN-PCIe/2000, "
"Connect Tech Inc. CANpro/104-Plus Opto (CRG001), "
- "IXXAT PC-I 04/PCI")
+ "IXXAT PC-I 04/PCI, "
+ "ELCUS CAN-200-PCI")
MODULE_LICENSE("GPL v2");
#define PLX_PCI_MAX_CHAN 2
@@ -123,6 +123,11 @@ struct plx_pci_card {
#define ESD_PCI_SUB_SYS_ID_PCIE2000 0x0200
#define ESD_PCI_SUB_SYS_ID_PCI104200 0x0501
+#define CAN200PCI_DEVICE_ID 0x9030
+#define CAN200PCI_VENDOR_ID 0x10b5
+#define CAN200PCI_SUB_DEVICE_ID 0x0301
+#define CAN200PCI_SUB_VENDOR_ID 0xe1c5
+
#define IXXAT_PCI_VENDOR_ID 0x10b5
#define IXXAT_PCI_DEVICE_ID 0x9050
#define IXXAT_PCI_SUB_SYS_ID 0x2540
@@ -234,6 +239,14 @@ static struct plx_pci_card_info plx_pci_card_info_cti = {
/* based on PLX9030 */
};
+static struct plx_pci_card_info plx_pci_card_info_elcus = {
+ "Eclus CAN-200-PCI", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {1, 0x00, 0x00}, { {2, 0x00, 0x80}, {3, 0x00, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9030 */
+};
+
static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
{
/* Adlink PCI-7841/cPCI-7841 */
@@ -319,6 +332,13 @@ static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
0, 0,
(kernel_ulong_t)&plx_pci_card_info_cti
},
+ {
+ /* Elcus CAN-200-PCI */
+ CAN200PCI_VENDOR_ID, CAN200PCI_DEVICE_ID,
+ CAN200PCI_SUB_VENDOR_ID, CAN200PCI_SUB_DEVICE_ID,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_elcus
+ },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index 06a282397fff..df136a2516c4 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -11,8 +11,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 047accd4ede5..2f6e24534231 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/* This is a generic driver for SJA1000 chips on the OpenFirmware platform
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 29f9b6321187..943df645b459 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 25377e547f9b..3fcdae266377 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -18,9 +18,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307. You can also get it
- * at http://www.gnu.org/licenses/gpl.html
+ * with this program; if not, see http://www.gnu.org/licenses/gpl.html
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
index 498605f833dd..cdc0c7433a4b 100644
--- a/drivers/net/can/softing/softing_cs.c
+++ b/drivers/net/can/softing/softing_cs.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index b595d3422b9f..52fe50725d74 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/firmware.h>
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 6cd5c01b624d..9ea0dcde94ce 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -13,12 +13,10 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/io.h>
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 60d95b44d0f7..2c62fe6c8fa9 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -37,7 +37,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
@@ -518,10 +517,10 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
data = (cf->can_id & CAN_SFF_MASK) << 18;
hecc_write_mbx(priv, mbxno, HECC_CANMID, data);
hecc_write_mbx(priv, mbxno, HECC_CANMDL,
- be32_to_cpu(*(u32 *)(cf->data)));
+ be32_to_cpu(*(__be32 *)(cf->data)));
if (cf->can_dlc > 4)
hecc_write_mbx(priv, mbxno, HECC_CANMDH,
- be32_to_cpu(*(u32 *)(cf->data + 4)));
+ be32_to_cpu(*(__be32 *)(cf->data + 4)));
else
*(u32 *)(cf->data + 4) = 0;
can_put_echo_skb(skb, ndev, mbxno);
@@ -569,12 +568,10 @@ static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
cf->can_id |= CAN_RTR_FLAG;
cf->can_dlc = get_can_dlc(data & 0xF);
data = hecc_read_mbx(priv, mbxno, HECC_CANMDL);
- *(u32 *)(cf->data) = cpu_to_be32(data);
+ *(__be32 *)(cf->data) = cpu_to_be32(data);
if (cf->can_dlc > 4) {
data = hecc_read_mbx(priv, mbxno, HECC_CANMDH);
- *(u32 *)(cf->data + 4) = cpu_to_be32(data);
- } else {
- *(u32 *)(cf->data + 4) = 0;
+ *(__be32 *)(cf->data + 4) = cpu_to_be32(data);
}
spin_lock_irqsave(&priv->mbx_lock, flags);
hecc_clear_bit(priv, HECC_CANME, mbx_mask);
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 5f9a7ad9b964..52c42fd49510 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -16,7 +16,6 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-#include <linux/init.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -625,6 +624,7 @@ static int ems_usb_start(struct ems_usb *dev)
usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
urb->transfer_dma);
+ usb_free_urb(urb);
break;
}
@@ -798,8 +798,8 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
* allowed (MAX_TX_URBS).
*/
if (!context) {
- usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
+ usb_free_urb(urb);
netdev_warn(netdev, "couldn't find free context\n");
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index ac6177d3befc..7fbe85935f1d 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -16,7 +16,6 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-#include <linux/init.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 4b2d5ed62b11..6c859bba8b65 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -12,7 +12,6 @@
* Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
*/
-#include <linux/init.h>
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 8ee9d1556e6e..263dd921edc4 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -927,6 +927,9 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
/* set LED in default state (end of init phase) */
pcan_usb_pro_set_led(dev, 0, 1);
+ kfree(bi);
+ kfree(fi);
+
return 0;
err_out:
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 8becd3d838b5..a0fa1fd5092b 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -23,7 +23,6 @@
* who were very cooperative and answered my questions.
*/
-#include <linux/init.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 0a2a5ee79a17..4e94057ef5cf 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -46,6 +46,7 @@
#include <linux/if_ether.h>
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/can/skb.h>
#include <linux/slab.h>
#include <net/rtnetlink.h>
@@ -109,25 +110,23 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
stats->rx_packets++;
stats->rx_bytes += cfd->len;
}
- kfree_skb(skb);
+ consume_skb(skb);
return NETDEV_TX_OK;
}
/* perform standard echo handling for CAN network interfaces */
if (loop) {
- struct sock *srcsk = skb->sk;
- skb = skb_share_check(skb, GFP_ATOMIC);
+ skb = can_create_echo_skb(skb);
if (!skb)
return NETDEV_TX_OK;
/* receive with packet counting */
- skb->sk = srcsk;
vcan_rx(skb, dev);
} else {
/* no looped packets => no counting */
- kfree_skb(skb);
+ consume_skb(skb);
}
return NETDEV_TX_OK;
}
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index f219d38acf58..7a79b6046879 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -395,6 +395,7 @@ static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
if (duplicate_slave)
eql_kill_one_slave(queue, duplicate_slave);
+ dev_hold(slave->dev);
list_add(&slave->list, &queue->all_slaves);
queue->num_slaves++;
slave->dev->flags |= IFF_SLAVE;
@@ -413,39 +414,35 @@ static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *
if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
return -EFAULT;
- slave_dev = dev_get_by_name(&init_net, srq.slave_name);
- if (slave_dev) {
- if ((master_dev->flags & IFF_UP) == IFF_UP) {
- /* slave is not a master & not already a slave: */
- if (!eql_is_master(slave_dev) &&
- !eql_is_slave(slave_dev)) {
- slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
- equalizer_t *eql = netdev_priv(master_dev);
- int ret;
-
- if (!s) {
- dev_put(slave_dev);
- return -ENOMEM;
- }
-
- memset(s, 0, sizeof(*s));
- s->dev = slave_dev;
- s->priority = srq.priority;
- s->priority_bps = srq.priority;
- s->priority_Bps = srq.priority / 8;
-
- spin_lock_bh(&eql->queue.lock);
- ret = __eql_insert_slave(&eql->queue, s);
- if (ret) {
- dev_put(slave_dev);
- kfree(s);
- }
- spin_unlock_bh(&eql->queue.lock);
-
- return ret;
- }
+ slave_dev = __dev_get_by_name(&init_net, srq.slave_name);
+ if (!slave_dev)
+ return -ENODEV;
+
+ if ((master_dev->flags & IFF_UP) == IFF_UP) {
+ /* slave is not a master & not already a slave: */
+ if (!eql_is_master(slave_dev) && !eql_is_slave(slave_dev)) {
+ slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ equalizer_t *eql = netdev_priv(master_dev);
+ int ret;
+
+ if (!s)
+ return -ENOMEM;
+
+ memset(s, 0, sizeof(*s));
+ s->dev = slave_dev;
+ s->priority = srq.priority;
+ s->priority_bps = srq.priority;
+ s->priority_Bps = srq.priority / 8;
+
+ spin_lock_bh(&eql->queue.lock);
+ ret = __eql_insert_slave(&eql->queue, s);
+ if (ret)
+ kfree(s);
+
+ spin_unlock_bh(&eql->queue.lock);
+
+ return ret;
}
- dev_put(slave_dev);
}
return -EINVAL;
@@ -461,24 +458,20 @@ static int eql_emancipate(struct net_device *master_dev, slaving_request_t __use
if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
return -EFAULT;
- slave_dev = dev_get_by_name(&init_net, srq.slave_name);
- ret = -EINVAL;
- if (slave_dev) {
- spin_lock_bh(&eql->queue.lock);
-
- if (eql_is_slave(slave_dev)) {
- slave_t *slave = __eql_find_slave_dev(&eql->queue,
- slave_dev);
+ slave_dev = __dev_get_by_name(&init_net, srq.slave_name);
+ if (!slave_dev)
+ return -ENODEV;
- if (slave) {
- eql_kill_one_slave(&eql->queue, slave);
- ret = 0;
- }
+ ret = -EINVAL;
+ spin_lock_bh(&eql->queue.lock);
+ if (eql_is_slave(slave_dev)) {
+ slave_t *slave = __eql_find_slave_dev(&eql->queue, slave_dev);
+ if (slave) {
+ eql_kill_one_slave(&eql->queue, slave);
+ ret = 0;
}
- dev_put(slave_dev);
-
- spin_unlock_bh(&eql->queue.lock);
}
+ spin_unlock_bh(&eql->queue.lock);
return ret;
}
@@ -494,7 +487,7 @@ static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
return -EFAULT;
- slave_dev = dev_get_by_name(&init_net, sc.slave_name);
+ slave_dev = __dev_get_by_name(&init_net, sc.slave_name);
if (!slave_dev)
return -ENODEV;
@@ -510,8 +503,6 @@ static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
}
spin_unlock_bh(&eql->queue.lock);
- dev_put(slave_dev);
-
if (!ret && copy_to_user(scp, &sc, sizeof (slave_config_t)))
ret = -EFAULT;
@@ -529,7 +520,7 @@ static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
return -EFAULT;
- slave_dev = dev_get_by_name(&init_net, sc.slave_name);
+ slave_dev = __dev_get_by_name(&init_net, sc.slave_name);
if (!slave_dev)
return -ENODEV;
@@ -548,8 +539,6 @@ static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
}
spin_unlock_bh(&eql->queue.lock);
- dev_put(slave_dev);
-
return ret;
}
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index ede8daa68275..c53384d41c96 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -252,8 +252,7 @@ static int el3_isa_id_sequence(__be16 *phys_addr)
for (i = 0; i < el3_cards; i++) {
struct el3_private *lp = netdev_priv(el3_devs[i]);
if (lp->type == EL3_PNP &&
- !memcmp(phys_addr, el3_devs[i]->dev_addr,
- ETH_ALEN)) {
+ ether_addr_equal((u8 *)phys_addr, el3_devs[i]->dev_addr)) {
if (el3_debug > 3)
pr_debug("3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n",
phys_addr[0] & 0xff, phys_addr[0] >> 8,
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index 6fc994fa4abe..b9948f00c5e9 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -73,7 +73,6 @@ earlier 3Com products.
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 078480aaa168..5992860a39c9 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -25,7 +25,6 @@
#define DRV_VERSION "1.162-ac"
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index ad5272b348f0..238ccea965c8 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -693,7 +693,7 @@ DEFINE_WINDOW_IO(16)
DEFINE_WINDOW_IO(32)
#ifdef CONFIG_PCI
-#define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL)
+#define DEVICE_PCI(dev) ((dev_is_pci(dev)) ? to_pci_dev((dev)) : NULL)
#else
#define DEVICE_PCI(dev) NULL
#endif
@@ -2079,10 +2079,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
iowrite16(len, ioaddr + Wn7_MasterLen);
spin_unlock_irq(&vp->window_lock);
vp->tx_skb = skb;
+ skb_tx_timestamp(skb);
iowrite16(StartDMADown, ioaddr + EL3_CMD);
/* netif_wake_queue() will be called at the DMADone interrupt. */
} else {
/* ... and the packet rounded to a doubleword. */
+ skb_tx_timestamp(skb);
iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
dev_kfree_skb (skb);
if (ioread16(ioaddr + TxFree) > 1536) {
@@ -2212,6 +2214,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
#endif
}
+ skb_tx_timestamp(skb);
iowrite16(DownUnstall, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&vp->lock, flags);
return NETDEV_TX_OK;
@@ -2986,6 +2989,7 @@ static const struct ethtool_ops vortex_ethtool_ops = {
.nway_reset = vortex_nway_reset,
.get_wol = vortex_get_wol,
.set_wol = vortex_set_wol,
+ .get_ts_info = ethtool_op_get_ts_info,
};
#ifdef CONFIG_PCI
@@ -3290,7 +3294,6 @@ static int __init vortex_init(void)
static void __exit vortex_eisa_cleanup(void)
{
- struct vortex_private *vp;
void __iomem *ioaddr;
#ifdef CONFIG_EISA
@@ -3299,7 +3302,6 @@ static void __exit vortex_eisa_cleanup(void)
#endif
if (compaq_net_device) {
- vp = netdev_priv(compaq_net_device);
ioaddr = ioport_map(compaq_net_device->base_addr,
VORTEX_TOTAL_SIZE);
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index 2923c51bb351..3e2f2c2e7b58 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -21,12 +21,6 @@ struct e8390_pkt_hdr {
unsigned short count; /* header + packet length in bytes */
};
-#ifdef notdef
-extern int ei_debug;
-#else
-#define ei_debug 1
-#endif
-
#ifdef CONFIG_NET_POLL_CONTROLLER
void ei_poll(struct net_device *dev);
void eip_poll(struct net_device *dev);
@@ -99,6 +93,7 @@ struct ei_device {
u32 *reg_offset; /* Register mapping table */
spinlock_t page_lock; /* Page register locks */
unsigned long priv; /* Private field to store bus IDs etc. */
+ u32 msg_enable; /* debug message level */
#ifdef AX88796_PLATFORM
unsigned char rxcr_base; /* default value for RXCR */
#endif
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 912ed7a5f33a..30104b60da85 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -116,9 +116,15 @@ static const char version[] =
static int apne_owned; /* signal if card already owned */
+static u32 apne_msg_enable;
+module_param_named(msg_enable, apne_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
+
struct net_device * __init apne_probe(int unit)
{
struct net_device *dev;
+ struct ei_device *ei_local;
+
#ifndef MANUAL_CONFIG
char tuple[8];
#endif
@@ -133,11 +139,11 @@ struct net_device * __init apne_probe(int unit)
if ( !(AMIGAHW_PRESENT(PCMCIA)) )
return ERR_PTR(-ENODEV);
- printk("Looking for PCMCIA ethernet card : ");
+ pr_info("Looking for PCMCIA ethernet card : ");
/* check if a card is inserted */
if (!(PCMCIA_INSERTED)) {
- printk("NO PCMCIA card inserted\n");
+ pr_cont("NO PCMCIA card inserted\n");
return ERR_PTR(-ENODEV);
}
@@ -148,6 +154,8 @@ struct net_device * __init apne_probe(int unit)
sprintf(dev->name, "eth%d", unit);
netdev_boot_setup_check(dev);
}
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = apne_msg_enable;
/* disable pcmcia irq for readtuple */
pcmcia_disable_irq();
@@ -155,14 +163,14 @@ struct net_device * __init apne_probe(int unit)
#ifndef MANUAL_CONFIG
if ((pcmcia_copy_tuple(CISTPL_FUNCID, tuple, 8) < 3) ||
(tuple[2] != CISTPL_FUNCID_NETWORK)) {
- printk("not an ethernet card\n");
+ pr_cont("not an ethernet card\n");
/* XXX: shouldn't we re-enable irq here? */
free_netdev(dev);
return ERR_PTR(-ENODEV);
}
#endif
- printk("ethernet PCMCIA card inserted\n");
+ pr_cont("ethernet PCMCIA card inserted\n");
if (!init_pcmcia()) {
/* XXX: shouldn't we re-enable irq here? */
@@ -205,10 +213,10 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
#endif
static unsigned version_printed;
- if (ei_debug && version_printed++ == 0)
- printk(version);
+ if ((apne_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ netdev_info(dev, version);
- printk("PCMCIA NE*000 ethercard probe");
+ netdev_info(dev, "PCMCIA NE*000 ethercard probe");
/* Reset card. Who knows what dain-bramaged state it was left in. */
{ unsigned long reset_start_time = jiffies;
@@ -217,7 +225,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
- printk(" not found (no reset ack).\n");
+ pr_cont(" not found (no reset ack).\n");
return -ENODEV;
}
@@ -288,7 +296,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
start_page = 0x01;
stop_page = (wordlength == 2) ? 0x40 : 0x20;
} else {
- printk(" not found.\n");
+ pr_cont(" not found.\n");
return -ENXIO;
}
@@ -320,9 +328,9 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
- printk(" %pM\n", dev->dev_addr);
+ pr_cont(" %pM\n", dev->dev_addr);
- printk("%s: %s found.\n", dev->name, name);
+ netdev_info(dev, "%s found.\n", name);
ei_status.name = name;
ei_status.tx_start_page = start_page;
@@ -352,10 +360,11 @@ static void
apne_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
+ struct ei_device *ei_local = netdev_priv(dev);
init_pcmcia();
- if (ei_debug > 1) printk("resetting the 8390 t=%ld...", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
@@ -365,8 +374,8 @@ apne_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */
while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
- printk("%s: ne_reset_8390() did not complete.\n", dev->name);
- break;
+ netdev_err(dev, "ne_reset_8390() did not complete.\n");
+ break;
}
outb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr. */
}
@@ -386,9 +395,9 @@ apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_pa
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne_get_8390_hdr "
- "[DMAstat:%d][irqlock:%d][intr:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+ netdev_err(dev, "DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ ei_status.dmaing, ei_status.irqlock, dev->irq);
return;
}
@@ -433,9 +442,9 @@ apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int rin
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne_block_input "
- "[DMAstat:%d][irqlock:%d][intr:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+ netdev_err(dev, "DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ ei_status.dmaing, ei_status.irqlock, dev->irq);
return;
}
ei_status.dmaing |= 0x01;
@@ -481,9 +490,9 @@ apne_block_output(struct net_device *dev, int count,
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne_block_output."
- "[DMAstat:%d][irqlock:%d][intr:%d]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+ netdev_err(dev, "DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d][intr:%d]\n",
+ ei_status.dmaing, ei_status.irqlock, dev->irq);
return;
}
ei_status.dmaing |= 0x01;
@@ -513,7 +522,7 @@ apne_block_output(struct net_device *dev, int count,
while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
- printk("%s: timeout waiting for Tx RDC.\n", dev->name);
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
apne_reset_8390(dev);
NS8390_init(dev,1);
break;
@@ -536,8 +545,8 @@ static irqreturn_t apne_interrupt(int irq, void *dev_id)
pcmcia_ack_int(pcmcia_intreq);
return IRQ_NONE;
}
- if (ei_debug > 3)
- printk("pcmcia intreq = %x\n", pcmcia_intreq);
+ if (apne_msg_enable & NETIF_MSG_INTR)
+ pr_debug("pcmcia intreq = %x\n", pcmcia_intreq);
pcmcia_disable_irq(); /* to get rid of the sti() within ei_interrupt */
ei_interrupt(irq, dev_id);
pcmcia_ack_int(pcmcia_get_intreq());
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 36fa577970bb..455d4c399b52 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/isapnp.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/platform_device.h>
@@ -78,6 +77,8 @@ static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electron
#define AX_GPOC_PPDSET BIT(6)
+static u32 ax_msg_enable;
+
/* device private data */
struct ax_device {
@@ -147,8 +148,7 @@ static void ax_reset_8390(struct net_device *dev)
unsigned long reset_start_time = jiffies;
void __iomem *addr = (void __iomem *)dev->base_addr;
- if (ei_debug > 1)
- netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
@@ -496,12 +496,28 @@ static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return phy_ethtool_sset(phy_dev, cmd);
}
+static u32 ax_get_msglevel(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ return ei_local->msg_enable;
+}
+
+static void ax_set_msglevel(struct net_device *dev, u32 v)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ ei_local->msg_enable = v;
+}
+
static const struct ethtool_ops ax_ethtool_ops = {
.get_drvinfo = ax_get_drvinfo,
.get_settings = ax_get_settings,
.set_settings = ax_set_settings,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_msglevel = ax_get_msglevel,
+ .set_msglevel = ax_set_msglevel,
};
#ifdef CONFIG_AX88796_93CX6
@@ -763,6 +779,7 @@ static int ax_init_dev(struct net_device *dev)
ei_local->block_output = &ax_block_output;
ei_local->get_8390_hdr = &ax_get_8390_hdr;
ei_local->priv = 0;
+ ei_local->msg_enable = ax_msg_enable;
dev->netdev_ops = &ax_netdev_ops;
dev->ethtool_ops = &ax_ethtool_ops;
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index d801c1410fb0..73c57a4a7b9e 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/timer.h>
@@ -105,6 +104,7 @@ static void AX88190_init(struct net_device *dev, int startp);
static int ax_open(struct net_device *dev);
static int ax_close(struct net_device *dev);
static irqreturn_t ax_interrupt(int irq, void *dev_id);
+static u32 axnet_msg_enable;
/*====================================================================*/
@@ -152,6 +152,7 @@ static int axnet_probe(struct pcmcia_device *link)
return -ENOMEM;
ei_local = netdev_priv(dev);
+ ei_local->msg_enable = axnet_msg_enable;
spin_lock_init(&ei_local->page_lock);
info = PRIV(dev);
@@ -650,11 +651,12 @@ static void block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset)
{
unsigned int nic_base = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
int xfer_count = count;
char *buf = skb->data;
- if ((ei_debug > 4) && (count != 4))
- pr_debug("%s: [bi=%d]\n", dev->name, count+4);
+ if ((netif_msg_rx_status(ei_local)) && (count != 4))
+ netdev_dbg(dev, "[bi=%d]\n", count+4);
outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
@@ -810,11 +812,6 @@ module_pcmcia_driver(axnet_cs_driver);
#define ei_block_input (ei_local->block_input)
#define ei_get_8390_hdr (ei_local->get_8390_hdr)
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef ei_debug
-int ei_debug = 1;
-#endif
-
/* Index to functions. */
static void ei_tx_intr(struct net_device *dev);
static void ei_tx_err(struct net_device *dev);
@@ -925,11 +922,10 @@ static void axnet_tx_timeout(struct net_device *dev)
isr = inb(e8390_base+EN0_ISR);
spin_unlock_irqrestore(&ei_local->page_lock, flags);
- netdev_printk(KERN_DEBUG, dev,
- "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
- (txsr & ENTSR_ABT) ? "excess collisions." :
- (isr) ? "lost interrupt?" : "cable problem?",
- txsr, isr, tickssofar);
+ netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
+ (txsr & ENTSR_ABT) ? "excess collisions." :
+ (isr) ? "lost interrupt?" : "cable problem?",
+ txsr, isr, tickssofar);
if (!isr && !dev->stats.tx_packets)
{
@@ -998,29 +994,30 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
{
output_page = ei_local->tx_start_page;
ei_local->tx1 = send_length;
- if (ei_debug && ei_local->tx2 > 0)
- netdev_printk(KERN_DEBUG, dev,
- "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
- ei_local->tx2, ei_local->lasttx,
- ei_local->txing);
+ if ((netif_msg_tx_queued(ei_local)) &&
+ ei_local->tx2 > 0)
+ netdev_dbg(dev,
+ "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
+ ei_local->tx2, ei_local->lasttx,
+ ei_local->txing);
}
else if (ei_local->tx2 == 0)
{
output_page = ei_local->tx_start_page + TX_PAGES/2;
ei_local->tx2 = send_length;
- if (ei_debug && ei_local->tx1 > 0)
- netdev_printk(KERN_DEBUG, dev,
- "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
- ei_local->tx1, ei_local->lasttx,
- ei_local->txing);
+ if ((netif_msg_tx_queued(ei_local)) &&
+ ei_local->tx1 > 0)
+ netdev_dbg(dev,
+ "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
+ ei_local->tx1, ei_local->lasttx,
+ ei_local->txing);
}
else
{ /* We should never get here. */
- if (ei_debug)
- netdev_printk(KERN_DEBUG, dev,
- "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
- ei_local->tx1, ei_local->tx2,
- ei_local->lasttx);
+ netif_dbg(ei_local, tx_err, dev,
+ "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+ ei_local->tx1, ei_local->tx2,
+ ei_local->lasttx);
ei_local->irqlock = 0;
netif_stop_queue(dev);
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
@@ -1124,10 +1121,9 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&ei_local->page_lock, flags);
return IRQ_NONE;
}
-
- if (ei_debug > 3)
- netdev_printk(KERN_DEBUG, dev, "interrupt(isr=%#2.2x)\n",
- inb_p(e8390_base + EN0_ISR));
+
+ netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
+ inb_p(e8390_base + EN0_ISR));
outb_p(0x00, e8390_base + EN0_ISR);
ei_local->irqlock = 1;
@@ -1137,9 +1133,8 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
++nr_serviced < MAX_SERVICE)
{
if (!netif_running(dev) || (interrupts == 0xff)) {
- if (ei_debug > 1)
- netdev_warn(dev,
- "interrupt from stopped card\n");
+ netif_warn(ei_local, intr, dev,
+ "interrupt from stopped card\n");
outb_p(interrupts, e8390_base + EN0_ISR);
interrupts = 0;
break;
@@ -1175,14 +1170,15 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
}
}
- if (interrupts && ei_debug > 3)
+ if (interrupts && (netif_msg_intr(ei_local)))
{
handled = 1;
if (nr_serviced >= MAX_SERVICE)
{
/* 0xFF is valid for a card removal */
- if(interrupts!=0xFF)
- netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
+ if (interrupts != 0xFF)
+ netdev_warn(dev,
+ "Too much work at interrupt, status %#2.2x\n",
interrupts);
outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
} else {
@@ -1221,8 +1217,7 @@ static void ei_tx_err(struct net_device *dev)
unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
#ifdef VERBOSE_ERROR_DUMP
- netdev_printk(KERN_DEBUG, dev,
- "transmitter error (%#2x):", txsr);
+ netdev_dbg(dev, "transmitter error (%#2x):", txsr);
if (txsr & ENTSR_ABT)
pr_cont(" excess-collisions");
if (txsr & ENTSR_ND)
@@ -1287,9 +1282,9 @@ static void ei_tx_intr(struct net_device *dev)
else if (ei_local->tx2 < 0)
{
if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
- netdev_info(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n",
- ei_local->name, ei_local->lasttx,
- ei_local->tx2);
+ netdev_err(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n",
+ ei_local->name, ei_local->lasttx,
+ ei_local->tx2);
ei_local->tx2 = 0;
if (ei_local->tx1 > 0)
{
@@ -1366,9 +1361,11 @@ static void ei_receive(struct net_device *dev)
Keep quiet if it looks like a card removal. One problem here
is that some clones crash in roughly the same way.
*/
- if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
- netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
- this_frame, ei_local->current_page);
+ if ((netif_msg_rx_err(ei_local)) &&
+ this_frame != ei_local->current_page &&
+ (this_frame != 0x0 || rxing_page != 0xFF))
+ netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
+ this_frame, ei_local->current_page);
if (this_frame == rxing_page) /* Read all the frames? */
break; /* Done for now */
@@ -1383,11 +1380,10 @@ static void ei_receive(struct net_device *dev)
if (pkt_len < 60 || pkt_len > 1518)
{
- if (ei_debug)
- netdev_printk(KERN_DEBUG, dev,
- "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
- rx_frame.count, rx_frame.status,
- rx_frame.next);
+ netif_err(ei_local, rx_err, dev,
+ "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
+ rx_frame.count, rx_frame.status,
+ rx_frame.next);
dev->stats.rx_errors++;
dev->stats.rx_length_errors++;
}
@@ -1398,10 +1394,9 @@ static void ei_receive(struct net_device *dev)
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL)
{
- if (ei_debug > 1)
- netdev_printk(KERN_DEBUG, dev,
- "Couldn't allocate a sk_buff of size %d\n",
- pkt_len);
+ netif_err(ei_local, rx_err, dev,
+ "Couldn't allocate a sk_buff of size %d\n",
+ pkt_len);
dev->stats.rx_dropped++;
break;
}
@@ -1420,11 +1415,10 @@ static void ei_receive(struct net_device *dev)
}
else
{
- if (ei_debug)
- netdev_printk(KERN_DEBUG, dev,
- "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
- rx_frame.status, rx_frame.next,
- rx_frame.count);
+ netif_err(ei_local, rx_err, dev,
+ "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ rx_frame.status, rx_frame.next,
+ rx_frame.count);
dev->stats.rx_errors++;
/* NB: The NIC counts CRC, frame and missed errors. */
if (pkt_stat & ENRSR_FO)
@@ -1461,6 +1455,7 @@ static void ei_rx_overrun(struct net_device *dev)
axnet_dev_t *info = PRIV(dev);
long e8390_base = dev->base_addr;
unsigned char was_txing, must_resend = 0;
+ struct ei_device *ei_local = netdev_priv(dev);
/*
* Record whether a Tx was in progress and then issue the
@@ -1468,9 +1463,8 @@ static void ei_rx_overrun(struct net_device *dev)
*/
was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
-
- if (ei_debug > 1)
- netdev_printk(KERN_DEBUG, dev, "Receiver overrun\n");
+
+ netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
dev->stats.rx_over_errors++;
/*
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index 78c6fb4b1143..b36ee9e0d220 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -56,18 +56,15 @@
#define ei_inb_p(_p) readb((void __iomem *)_p)
#define ei_outb_p(_v,_p) writeb(_v,(void __iomem *)_p)
-#define NET_DEBUG 0
-#define DEBUG_INIT 2
-
#define DRV_NAME "etherh"
#define DRV_VERSION "1.11"
-static char version[] __initdata =
+static char version[] =
"EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n";
#include "lib8390.c"
-static unsigned int net_debug = NET_DEBUG;
+static u32 etherh_msg_enable;
struct etherh_priv {
void __iomem *ioc_fast;
@@ -317,9 +314,9 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf
void __iomem *dma_base, *addr;
if (ei_local->dmaing) {
- printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: "
- " DMAstat %d irqlock %d\n", dev->name,
- ei_local->dmaing, ei_local->irqlock);
+ netdev_err(dev, "DMAing conflict in etherh_block_input: "
+ " DMAstat %d irqlock %d\n",
+ ei_local->dmaing, ei_local->irqlock);
return;
}
@@ -361,8 +358,7 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf
while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
- printk(KERN_ERR "%s: timeout waiting for TX RDC\n",
- dev->name);
+ netdev_warn(dev, "timeout waiting for TX RDC\n");
etherh_reset (dev);
__NS8390_init (dev, 1);
break;
@@ -383,9 +379,9 @@ etherh_block_input (struct net_device *dev, int count, struct sk_buff *skb, int
void __iomem *dma_base, *addr;
if (ei_local->dmaing) {
- printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: "
- " DMAstat %d irqlock %d\n", dev->name,
- ei_local->dmaing, ei_local->irqlock);
+ netdev_err(dev, "DMAing conflict in etherh_block_input: "
+ " DMAstat %d irqlock %d\n",
+ ei_local->dmaing, ei_local->irqlock);
return;
}
@@ -423,9 +419,9 @@ etherh_get_header (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_p
void __iomem *dma_base, *addr;
if (ei_local->dmaing) {
- printk(KERN_ERR "%s: DMAing conflict in etherh_get_header: "
- " DMAstat %d irqlock %d\n", dev->name,
- ei_local->dmaing, ei_local->irqlock);
+ netdev_err(dev, "DMAing conflict in etherh_get_header: "
+ " DMAstat %d irqlock %d\n",
+ ei_local->dmaing, ei_local->irqlock);
return;
}
@@ -513,8 +509,8 @@ static void __init etherh_banner(void)
{
static int version_printed;
- if (net_debug && version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ if ((etherh_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ pr_info("%s", version);
}
/*
@@ -625,11 +621,27 @@ static int etherh_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return 0;
}
+static u32 etherh_get_msglevel(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ return ei_local->msg_enable;
+}
+
+static void etherh_set_msglevel(struct net_device *dev, u32 v)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ ei_local->msg_enable = v;
+}
+
static const struct ethtool_ops etherh_ethtool_ops = {
.get_settings = etherh_get_settings,
.set_settings = etherh_set_settings,
.get_drvinfo = etherh_get_drvinfo,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_msglevel = etherh_get_msglevel,
+ .set_msglevel = etherh_set_msglevel,
};
static const struct net_device_ops etherh_netdev_ops = {
@@ -746,6 +758,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
ei_local->block_output = etherh_block_output;
ei_local->get_8390_hdr = etherh_get_header;
ei_local->interface_num = 0;
+ ei_local->msg_enable = etherh_msg_enable;
etherh_reset(dev);
__NS8390_init(dev, 0);
@@ -754,8 +767,8 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
if (ret)
goto free;
- printk(KERN_INFO "%s: %s in slot %d, %pM\n",
- dev->name, data->name, ec->slot_no, dev->dev_addr);
+ netdev_info(dev, "%s in slot %d, %pM\n",
+ data->name, ec->slot_no, dev->dev_addr);
ecard_set_drvdata(ec, dev);
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index fb3dd4399cf3..0fe19d609c2e 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -66,6 +66,7 @@ static void hydra_block_input(struct net_device *dev, int count,
static void hydra_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static void hydra_remove_one(struct zorro_dev *z);
+static u32 hydra_msg_enable;
static struct zorro_device_id hydra_zorro_tbl[] = {
{ ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET },
@@ -113,12 +114,13 @@ static const struct net_device_ops hydra_netdev_ops = {
static int hydra_init(struct zorro_dev *z)
{
struct net_device *dev;
- unsigned long board = ZTWO_VADDR(z->resource.start);
+ unsigned long board = (unsigned long)ZTWO_VADDR(z->resource.start);
unsigned long ioaddr = board+HYDRA_NIC_BASE;
const char name[] = "NE2000";
int start_page, stop_page;
int j;
int err;
+ struct ei_device *ei_local;
static u32 hydra_offsets[16] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
@@ -137,6 +139,8 @@ static int hydra_init(struct zorro_dev *z)
start_page = NESM_START_PG;
stop_page = NESM_STOP_PG;
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = hydra_msg_enable;
dev->base_addr = ioaddr;
dev->irq = IRQ_AMIGA_PORTS;
@@ -187,15 +191,16 @@ static int hydra_open(struct net_device *dev)
static int hydra_close(struct net_device *dev)
{
- if (ei_debug > 1)
- printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n");
__ei_close(dev);
return 0;
}
static void hydra_reset_8390(struct net_device *dev)
{
- printk(KERN_INFO "Hydra hw reset not there\n");
+ netdev_info(dev, "Hydra hw reset not there\n");
}
static void hydra_get_8390_hdr(struct net_device *dev,
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index b329f5c0d62b..d2cd80444ade 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -99,11 +99,6 @@
#define ei_block_input (ei_local->block_input)
#define ei_get_8390_hdr (ei_local->get_8390_hdr)
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef ei_debug
-int ei_debug = 1;
-#endif
-
/* Index to functions. */
static void ei_tx_intr(struct net_device *dev);
static void ei_tx_err(struct net_device *dev);
@@ -116,6 +111,11 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
static void do_set_multicast_list(struct net_device *dev);
static void __NS8390_init(struct net_device *dev, int startp);
+static unsigned version_printed;
+static u32 msg_enable;
+module_param(msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
+
/*
* SMP and the 8390 setup.
*
@@ -345,19 +345,23 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
if (ei_local->tx1 == 0) {
output_page = ei_local->tx_start_page;
ei_local->tx1 = send_length;
- if (ei_debug && ei_local->tx2 > 0)
- netdev_dbg(dev, "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
+ if ((netif_msg_tx_queued(ei_local)) &&
+ ei_local->tx2 > 0)
+ netdev_dbg(dev,
+ "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
ei_local->tx2, ei_local->lasttx, ei_local->txing);
} else if (ei_local->tx2 == 0) {
output_page = ei_local->tx_start_page + TX_PAGES/2;
ei_local->tx2 = send_length;
- if (ei_debug && ei_local->tx1 > 0)
- netdev_dbg(dev, "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
+ if ((netif_msg_tx_queued(ei_local)) &&
+ ei_local->tx1 > 0)
+ netdev_dbg(dev,
+ "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
ei_local->tx1, ei_local->lasttx, ei_local->txing);
} else { /* We should never get here. */
- if (ei_debug)
- netdev_dbg(dev, "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
- ei_local->tx1, ei_local->tx2, ei_local->lasttx);
+ netif_dbg(ei_local, tx_err, dev,
+ "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+ ei_local->tx1, ei_local->tx2, ei_local->lasttx);
ei_local->irqlock = 0;
netif_stop_queue(dev);
ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
@@ -388,7 +392,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
} else
ei_local->txqueue++;
- if (ei_local->tx1 && ei_local->tx2)
+ if (ei_local->tx1 && ei_local->tx2)
netif_stop_queue(dev);
else
netif_start_queue(dev);
@@ -445,9 +449,8 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
/* Change to page 0 and read the intr status reg. */
ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
- if (ei_debug > 3)
- netdev_dbg(dev, "interrupt(isr=%#2.2x)\n",
- ei_inb_p(e8390_base + EN0_ISR));
+ netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
+ ei_inb_p(e8390_base + EN0_ISR));
/* !!Assumption!! -- we stay in page 0. Don't break this. */
while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
@@ -485,7 +488,7 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
}
- if (interrupts && ei_debug) {
+ if (interrupts && (netif_msg_intr(ei_local))) {
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
if (nr_serviced >= MAX_SERVICE) {
/* 0xFF is valid for a card removal */
@@ -676,10 +679,11 @@ static void ei_receive(struct net_device *dev)
Keep quiet if it looks like a card removal. One problem here
is that some clones crash in roughly the same way.
*/
- if (ei_debug > 0 &&
+ if ((netif_msg_rx_status(ei_local)) &&
this_frame != ei_local->current_page &&
(this_frame != 0x0 || rxing_page != 0xFF))
- netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
+ netdev_err(dev,
+ "mismatched read page pointers %2x vs %2x\n",
this_frame, ei_local->current_page);
if (this_frame == rxing_page) /* Read all the frames? */
@@ -707,10 +711,10 @@ static void ei_receive(struct net_device *dev)
}
if (pkt_len < 60 || pkt_len > 1518) {
- if (ei_debug)
- netdev_dbg(dev, "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
- rx_frame.count, rx_frame.status,
- rx_frame.next);
+ netif_dbg(ei_local, rx_status, dev,
+ "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
+ rx_frame.count, rx_frame.status,
+ rx_frame.next);
dev->stats.rx_errors++;
dev->stats.rx_length_errors++;
} else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
@@ -718,9 +722,9 @@ static void ei_receive(struct net_device *dev)
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- if (ei_debug > 1)
- netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n",
- pkt_len);
+ netif_err(ei_local, rx_err, dev,
+ "Couldn't allocate a sk_buff of size %d\n",
+ pkt_len);
dev->stats.rx_dropped++;
break;
} else {
@@ -736,10 +740,10 @@ static void ei_receive(struct net_device *dev)
dev->stats.multicast++;
}
} else {
- if (ei_debug)
- netdev_dbg(dev, "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
- rx_frame.status, rx_frame.next,
- rx_frame.count);
+ netif_err(ei_local, rx_err, dev,
+ "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ rx_frame.status, rx_frame.next,
+ rx_frame.count);
dev->stats.rx_errors++;
/* NB: The NIC counts CRC, frame and missed errors. */
if (pkt_stat & ENRSR_FO)
@@ -789,8 +793,7 @@ static void ei_rx_overrun(struct net_device *dev)
was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
- if (ei_debug > 1)
- netdev_dbg(dev, "Receiver overrun\n");
+ netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
dev->stats.rx_over_errors++;
/*
@@ -965,8 +968,9 @@ static void __ei_set_multicast_list(struct net_device *dev)
static void ethdev_setup(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- printk(version);
+
+ if ((msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ pr_info("%s", version);
ether_setup(dev);
@@ -1035,9 +1039,10 @@ static void __NS8390_init(struct net_device *dev, int startp)
ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
for (i = 0; i < 6; i++) {
ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
- if (ei_debug > 1 &&
+ if ((netif_msg_probe(ei_local)) &&
ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
- netdev_err(dev, "Hw. address read/write mismap %d\n", i);
+ netdev_err(dev,
+ "Hw. address read/write mismap %d\n", i);
}
ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 88ccc8b14f0a..90e825e8abfe 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -167,6 +167,7 @@ static void slow_sane_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static void word_memcpy_tocard(unsigned long tp, const void *fp, int count);
static void word_memcpy_fromcard(void *tp, unsigned long fp, int count);
+static u32 mac8390_msg_enable;
static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
{
@@ -402,6 +403,7 @@ struct net_device * __init mac8390_probe(int unit)
struct net_device *dev;
struct nubus_dev *ndev = NULL;
int err = -ENODEV;
+ struct ei_device *ei_local;
static unsigned int slots;
@@ -440,6 +442,10 @@ struct net_device * __init mac8390_probe(int unit)
if (!ndev)
goto out;
+
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = mac8390_msg_enable;
+
err = register_netdev(dev);
if (err)
goto out;
@@ -660,19 +666,22 @@ static int mac8390_close(struct net_device *dev)
static void mac8390_no_reset(struct net_device *dev)
{
+ struct ei_device *ei_local = netdev_priv(dev);
+
ei_status.txing = 0;
- if (ei_debug > 1)
- pr_info("reset not supported\n");
+ netif_info(ei_local, hw, dev, "reset not supported\n");
}
static void interlan_reset(struct net_device *dev)
{
unsigned char *target = nubus_slot_addr(IRQ2SLOT(dev->irq));
- if (ei_debug > 1)
- pr_info("Need to reset the NS8390 t=%lu...", jiffies);
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ netif_info(ei_local, hw, dev, "Need to reset the NS8390 t=%lu...",
+ jiffies);
ei_status.txing = 0;
target[0xC0000] = 0;
- if (ei_debug > 1)
+ if (netif_msg_hw(ei_local))
pr_cont("reset complete\n");
}
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 230efd6fa5d5..38fcdcf7c4c7 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -39,6 +38,7 @@ static const char version[] =
#define NESM_START_PG 0x40 /* First page of TX buffer */
#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+static u32 mcf8390_msg_enable;
#ifdef NE2000_ODDOFFSET
/*
@@ -153,9 +153,9 @@ static void mcf8390_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
u32 addr = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- netdev_dbg(dev, "resetting the 8390 t=%ld...\n", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
@@ -288,7 +288,7 @@ static void mcf8390_block_output(struct net_device *dev, int count,
dma_start = jiffies;
while ((ei_inb(addr + NE_EN0_ISR) & ENISR_RDC) == 0) {
if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
- netdev_err(dev, "timeout waiting for Tx RDC\n");
+ netdev_warn(dev, "timeout waiting for Tx RDC\n");
mcf8390_reset_8390(dev);
__NS8390_init(dev, 1);
break;
@@ -437,6 +437,7 @@ static int mcf8390_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
platform_set_drvdata(pdev, dev);
ei_local = netdev_priv(dev);
+ ei_local->msg_enable = mcf8390_msg_enable;
dev->irq = irq->start;
dev->base_addr = mem->start;
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index b2e840513735..58eaa8f34942 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -71,14 +71,17 @@ static struct platform_device *pdev_ne[MAX_NE_CARDS];
static int io[MAX_NE_CARDS];
static int irq[MAX_NE_CARDS];
static int bad[MAX_NE_CARDS];
+static u32 ne_msg_enable;
#ifdef MODULE
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(bad, int, NULL, 0);
+module_param_named(msg_enable, ne_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
MODULE_PARM_DESC(io, "I/O base address(es),required");
MODULE_PARM_DESC(irq, "IRQ number(s)");
MODULE_PARM_DESC(bad, "Accept card(s) with bad signatures");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
MODULE_DESCRIPTION("NE1000/NE2000 ISA/PnP Ethernet driver");
MODULE_LICENSE("GPL");
#endif /* MODULE */
@@ -214,8 +217,8 @@ static int __init do_ne_probe(struct net_device *dev)
if (base_addr > 0x1ff) { /* Check a single specified location. */
int ret = ne_probe1(dev, base_addr);
if (ret)
- printk(KERN_WARNING "ne.c: No NE*000 card found at "
- "i/o = %#lx\n", base_addr);
+ netdev_warn(dev, "ne.c: No NE*000 card found at "
+ "i/o = %#lx\n", base_addr);
return ret;
}
else if (base_addr != 0) /* Don't probe at all. */
@@ -264,11 +267,14 @@ static int __init ne_probe_isapnp(struct net_device *dev)
/* found it */
dev->base_addr = pnp_port_start(idev, 0);
dev->irq = pnp_irq(idev, 0);
- printk(KERN_INFO "ne.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
- (char *) isapnp_clone_list[i].driver_data,
- dev->base_addr, dev->irq);
+ netdev_info(dev,
+ "ne.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
+ (char *) isapnp_clone_list[i].driver_data,
+ dev->base_addr, dev->irq);
if (ne_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */
- printk(KERN_ERR "ne.c: Probe of ISAPnP card at %#lx failed.\n", dev->base_addr);
+ netdev_err(dev,
+ "ne.c: Probe of ISAPnP card at %#lx failed.\n",
+ dev->base_addr);
pnp_device_detach(idev);
return -ENXIO;
}
@@ -293,6 +299,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
int neX000, ctron, copam, bad_card;
int reg0, ret;
static unsigned version_printed;
+ struct ei_device *ei_local = netdev_priv(dev);
if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
return -EBUSY;
@@ -319,10 +326,10 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
}
}
- if (ei_debug && version_printed++ == 0)
- printk(KERN_INFO "%s%s", version1, version2);
+ if ((ne_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ netdev_info(dev, "%s%s", version1, version2);
- printk(KERN_INFO "NE*000 ethercard probe at %#3lx:", ioaddr);
+ netdev_info(dev, "NE*000 ethercard probe at %#3lx:", ioaddr);
/* A user with a poor card that fails to ack the reset, or that
does not have a valid 0x57,0x57 signature can still use this
@@ -343,10 +350,10 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
if (bad_card) {
- printk(" (warning: no reset ack)");
+ pr_cont(" (warning: no reset ack)");
break;
} else {
- printk(" not found (no reset ack).\n");
+ pr_cont(" not found (no reset ack).\n");
ret = -ENODEV;
goto err_out;
}
@@ -454,13 +461,13 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
}
if (bad_clone_list[i].name8 == NULL)
{
- printk(" not found (invalid signature %2.2x %2.2x).\n",
+ pr_cont(" not found (invalid signature %2.2x %2.2x).\n",
SA_prom[14], SA_prom[15]);
ret = -ENXIO;
goto err_out;
}
#else
- printk(" not found.\n");
+ pr_cont(" not found.\n");
ret = -ENXIO;
goto err_out;
#endif
@@ -476,15 +483,15 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
mdelay(10); /* wait 10ms for interrupt to propagate */
outb_p(0x00, ioaddr + EN0_IMR); /* Mask it again. */
dev->irq = probe_irq_off(cookie);
- if (ei_debug > 2)
- printk(" autoirq is %d\n", dev->irq);
+ if (netif_msg_probe(ei_local))
+ pr_cont(" autoirq is %d", dev->irq);
} else if (dev->irq == 2)
/* Fixup for users that don't know that IRQ 2 is really IRQ 9,
or don't know which one to set. */
dev->irq = 9;
if (! dev->irq) {
- printk(" failed to detect IRQ line.\n");
+ pr_cont(" failed to detect IRQ line.\n");
ret = -EAGAIN;
goto err_out;
}
@@ -493,7 +500,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
share and the board will usually be enabled. */
ret = request_irq(dev->irq, eip_interrupt, 0, name, dev);
if (ret) {
- printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
+ pr_cont(" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
goto err_out;
}
@@ -512,7 +519,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
}
#endif
- printk("%pM\n", dev->dev_addr);
+ pr_cont("%pM\n", dev->dev_addr);
ei_status.name = name;
ei_status.tx_start_page = start_page;
@@ -536,11 +543,12 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
dev->netdev_ops = &eip_netdev_ops;
NS8390p_init(dev, 0);
+ ei_local->msg_enable = ne_msg_enable;
ret = register_netdev(dev);
if (ret)
goto out_irq;
- printk(KERN_INFO "%s: %s found at %#lx, using IRQ %d.\n",
- dev->name, name, ioaddr, dev->irq);
+ netdev_info(dev, "%s found at %#lx, using IRQ %d.\n",
+ name, ioaddr, dev->irq);
return 0;
out_irq:
@@ -556,9 +564,9 @@ err_out:
static void ne_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
+ struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
/* DON'T change these to inb_p/outb_p or reset will fail on clones. */
outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
@@ -569,7 +577,7 @@ static void ne_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */
while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
- printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name);
+ netdev_err(dev, "ne_reset_8390() did not complete.\n");
break;
}
outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
@@ -587,9 +595,9 @@ static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, i
if (ei_status.dmaing)
{
- printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr "
- "[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d].\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -621,6 +629,7 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
{
#ifdef NE_SANITY_CHECK
int xfer_count = count;
+ struct ei_device *ei_local = netdev_priv(dev);
#endif
int nic_base = dev->base_addr;
char *buf = skb->data;
@@ -628,9 +637,9 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing)
{
- printk(KERN_EMERG "%s: DMAing conflict in ne_block_input "
- "[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d].\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -660,7 +669,7 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
this message you either 1) have a slightly incompatible clone
or 2) have noise/speed problems with your bus. */
- if (ei_debug > 1)
+ if (netif_msg_rx_status(ei_local))
{
/* DMA termination address check... */
int addr, tries = 20;
@@ -674,9 +683,9 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
break;
} while (--tries > 0);
if (tries <= 0)
- printk(KERN_WARNING "%s: RX transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- dev->name, ring_offset + xfer_count, addr);
+ netdev_warn(dev, "RX transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ ring_offset + xfer_count, addr);
}
#endif
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
@@ -690,6 +699,7 @@ static void ne_block_output(struct net_device *dev, int count,
unsigned long dma_start;
#ifdef NE_SANITY_CHECK
int retries = 0;
+ struct ei_device *ei_local = netdev_priv(dev);
#endif
/* Round the count up for word writes. Do we need to do this?
@@ -702,9 +712,9 @@ static void ne_block_output(struct net_device *dev, int count,
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing)
{
- printk(KERN_EMERG "%s: DMAing conflict in ne_block_output."
- "[DMAstat:%d][irqlock:%d]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d]\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -751,7 +761,7 @@ retry:
/* This was for the ALPHA version only, but enough people have
been encountering problems so it is still here. */
- if (ei_debug > 1)
+ if (netif_msg_tx_queued(ei_local))
{
/* DMA termination address check... */
int addr, tries = 20;
@@ -765,9 +775,9 @@ retry:
if (tries <= 0)
{
- printk(KERN_WARNING "%s: Tx packet transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- dev->name, (start_page << 8) + count, addr);
+ netdev_warn(dev, "Tx packet transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ (start_page << 8) + count, addr);
if (retries++ == 0)
goto retry;
}
@@ -776,7 +786,7 @@ retry:
while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
- printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
ne_reset_8390(dev);
NS8390p_init(dev, 1);
break;
@@ -936,8 +946,8 @@ int __init init_module(void)
retval = platform_driver_probe(&ne_driver, ne_drv_probe);
if (retval) {
if (io[0] == 0)
- printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\""
- " value(s) for ISA cards.\n");
+ pr_notice("ne.c: You must supply \"io=0xNNN\""
+ " value(s) for ISA cards.\n");
ne_loop_rm_unreg(1);
return retval;
}
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index fc14a85e4d5f..f395c967262e 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -33,8 +33,6 @@
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
-static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
-
#define MAX_UNITS 8 /* More are supported, limit only on options */
/* Used to pass the full-duplex flag, etc. */
static int full_duplex[MAX_UNITS];
@@ -60,6 +58,8 @@ static int options[MAX_UNITS];
#include "8390.h"
+static u32 ne2k_msg_enable;
+
/* These identify the driver base version and may not be removed. */
static const char version[] =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
@@ -76,10 +76,10 @@ MODULE_AUTHOR("Donald Becker / Paul Gortmaker");
MODULE_DESCRIPTION("PCI NE2000 clone driver");
MODULE_LICENSE("GPL");
-module_param(debug, int, 0);
+module_param_named(msg_enable, ne2k_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
module_param_array(options, int, NULL, 0);
module_param_array(full_duplex, int, NULL, 0);
-MODULE_PARM_DESC(debug, "debug level (1-2)");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
MODULE_PARM_DESC(options, "Bit 5: full duplex");
MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
@@ -226,6 +226,7 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
static unsigned int fnd_cnt;
long ioaddr;
int flags = pci_clone_list[chip_idx].flags;
+ struct ei_device *ei_local;
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
@@ -280,6 +281,8 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
goto err_out_free_res;
}
dev->netdev_ops = &ne2k_netdev_ops;
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = ne2k_msg_enable;
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -379,9 +382,9 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
if (i)
goto err_out_free_netdev;
- printk("%s: %s found at %#lx, IRQ %d, %pM.\n",
- dev->name, pci_clone_list[chip_idx].name, ioaddr, dev->irq,
- dev->dev_addr);
+ netdev_info(dev, "%s found at %#lx, IRQ %d, %pM.\n",
+ pci_clone_list[chip_idx].name, ioaddr, dev->irq,
+ dev->dev_addr);
return 0;
@@ -450,9 +453,10 @@ static int ne2k_pci_close(struct net_device *dev)
static void ne2k_pci_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
+ struct ei_device *ei_local = netdev_priv(dev);
- if (debug > 1) printk("%s: Resetting the 8390 t=%ld...",
- dev->name, jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n",
+ jiffies);
outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
@@ -462,7 +466,7 @@ static void ne2k_pci_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */
while ((inb(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
if (jiffies - reset_start_time > 2) {
- printk("%s: ne2k_pci_reset_8390() did not complete.\n", dev->name);
+ netdev_err(dev, "ne2k_pci_reset_8390() did not complete.\n");
break;
}
outb(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
@@ -479,9 +483,9 @@ static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne2k_pci_get_8390_hdr "
+ netdev_err(dev, "DMAing conflict in ne2k_pci_get_8390_hdr "
"[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -517,9 +521,9 @@ static void ne2k_pci_block_input(struct net_device *dev, int count,
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne2k_pci_block_input "
+ netdev_err(dev, "DMAing conflict in ne2k_pci_block_input "
"[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -572,9 +576,9 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne2k_pci_block_output."
+ netdev_err(dev, "DMAing conflict in ne2k_pci_block_output."
"[DMAstat:%d][irqlock:%d]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -619,7 +623,7 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
while ((inb(nic_base + EN0_ISR) & ENISR_RDC) == 0)
if (jiffies - dma_start > 2) { /* Avoid clock roll-over. */
- printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
ne2k_pci_reset_8390(dev);
NS8390_init(dev,1);
break;
@@ -640,8 +644,24 @@ static void ne2k_pci_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
+static u32 ne2k_pci_get_msglevel(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ return ei_local->msg_enable;
+}
+
+static void ne2k_pci_set_msglevel(struct net_device *dev, u32 v)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ ei_local->msg_enable = v;
+}
+
static const struct ethtool_ops ne2k_pci_ethtool_ops = {
.get_drvinfo = ne2k_pci_get_drvinfo,
+ .get_msglevel = ne2k_pci_get_msglevel,
+ .set_msglevel = ne2k_pci_set_msglevel,
};
static void ne2k_pci_remove_one(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 46c5aadaca8e..ca3c2b921cf6 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -32,7 +32,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/timer.h>
@@ -67,7 +66,7 @@
#define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */
static const char *if_names[] = { "auto", "10baseT", "10base2"};
-
+static u32 pcnet_msg_enable;
/*====================================================================*/
@@ -558,6 +557,7 @@ static int pcnet_config(struct pcmcia_device *link)
int start_pg, stop_pg, cm_offset;
int has_shmem = 0;
hw_info_t *local_hw_info;
+ struct ei_device *ei_local;
dev_dbg(&link->dev, "pcnet_config\n");
@@ -607,6 +607,8 @@ static int pcnet_config(struct pcmcia_device *link)
mii_phy_probe(dev);
SET_NETDEV_DEV(dev, &link->dev);
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = pcnet_msg_enable;
if (register_netdev(dev) != 0) {
pr_notice("register_netdev() failed\n");
@@ -616,7 +618,7 @@ static int pcnet_config(struct pcmcia_device *link)
if (info->flags & (IS_DL10019|IS_DL10022)) {
u_char id = inb(dev->base_addr + 0x1a);
netdev_info(dev, "NE2000 (DL100%d rev %02x): ",
- (info->flags & IS_DL10022) ? 22 : 19, id);
+ (info->flags & IS_DL10022) ? 22 : 19, id);
if (info->pna_phy)
pr_cont("PNA, ");
} else {
@@ -1063,9 +1065,9 @@ static void ei_watchdog(u_long arg)
if (info->phy_id == info->eth_phy) {
if (p)
netdev_info(dev, "autonegotiation complete: "
- "%sbaseT-%cD selected\n",
- ((p & 0x0180) ? "100" : "10"),
- ((p & 0x0140) ? 'F' : 'H'));
+ "%sbaseT-%cD selected\n",
+ ((p & 0x0180) ? "100" : "10"),
+ ((p & 0x0140) ? 'F' : 'H'));
else
netdev_info(dev, "link partner did not autonegotiate\n");
}
@@ -1081,7 +1083,7 @@ static void ei_watchdog(u_long arg)
mdio_write(mii_addr, info->phy_id, 0, 0x0400);
info->phy_id ^= info->pna_phy ^ info->eth_phy;
netdev_info(dev, "switched to %s transceiver\n",
- (info->phy_id == info->eth_phy) ? "ethernet" : "PNA");
+ (info->phy_id == info->eth_phy) ? "ethernet" : "PNA");
mdio_write(mii_addr, info->phy_id, 0,
(info->phy_id == info->eth_phy) ? 0x1000 : 0);
info->link_status = 0;
@@ -1128,9 +1130,9 @@ static void dma_get_8390_hdr(struct net_device *dev,
unsigned int nic_base = dev->base_addr;
if (ei_status.dmaing) {
- netdev_notice(dev, "DMAing conflict in dma_block_input."
- "[DMAstat:%1x][irqlock:%1x]\n",
- ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in dma_block_input."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -1159,13 +1161,14 @@ static void dma_block_input(struct net_device *dev, int count,
unsigned int nic_base = dev->base_addr;
int xfer_count = count;
char *buf = skb->data;
+ struct ei_device *ei_local = netdev_priv(dev);
- if ((ei_debug > 4) && (count != 4))
+ if ((netif_msg_rx_status(ei_local)) && (count != 4))
netdev_dbg(dev, "[bi=%d]\n", count+4);
if (ei_status.dmaing) {
- netdev_notice(dev, "DMAing conflict in dma_block_input."
- "[DMAstat:%1x][irqlock:%1x]\n",
- ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in dma_block_input."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -1183,7 +1186,8 @@ static void dma_block_input(struct net_device *dev, int count,
/* This was for the ALPHA version only, but enough people have been
encountering problems that it is still here. */
#ifdef PCMCIA_DEBUG
- if (ei_debug > 4) { /* DMA termination address check... */
+ /* DMA termination address check... */
+ if (netif_msg_rx_status(ei_local)) {
int addr, tries = 20;
do {
/* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
@@ -1196,8 +1200,8 @@ static void dma_block_input(struct net_device *dev, int count,
} while (--tries > 0);
if (tries <= 0)
netdev_notice(dev, "RX transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- ring_offset + xfer_count, addr);
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ ring_offset + xfer_count, addr);
}
#endif
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
@@ -1213,12 +1217,12 @@ static void dma_block_output(struct net_device *dev, int count,
pcnet_dev_t *info = PRIV(dev);
#ifdef PCMCIA_DEBUG
int retries = 0;
+ struct ei_device *ei_local = netdev_priv(dev);
#endif
u_long dma_start;
#ifdef PCMCIA_DEBUG
- if (ei_debug > 4)
- netdev_dbg(dev, "[bo=%d]\n", count);
+ netif_dbg(ei_local, tx_queued, dev, "[bo=%d]\n", count);
#endif
/* Round the count up for word writes. Do we need to do this?
@@ -1227,9 +1231,9 @@ static void dma_block_output(struct net_device *dev, int count,
if (count & 0x01)
count++;
if (ei_status.dmaing) {
- netdev_notice(dev, "DMAing conflict in dma_block_output."
- "[DMAstat:%1x][irqlock:%1x]\n",
- ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in dma_block_output."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -1256,7 +1260,8 @@ static void dma_block_output(struct net_device *dev, int count,
#ifdef PCMCIA_DEBUG
/* This was for the ALPHA version only, but enough people have been
encountering problems that it is still here. */
- if (ei_debug > 4) { /* DMA termination address check... */
+ /* DMA termination address check... */
+ if (netif_msg_tx_queued(ei_local)) {
int addr, tries = 20;
do {
int high = inb_p(nic_base + EN0_RSARHI);
@@ -1267,8 +1272,8 @@ static void dma_block_output(struct net_device *dev, int count,
} while (--tries > 0);
if (tries <= 0) {
netdev_notice(dev, "Tx packet transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- (start_page << 8) + count, addr);
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ (start_page << 8) + count, addr);
if (retries++ == 0)
goto retry;
}
@@ -1277,10 +1282,10 @@ static void dma_block_output(struct net_device *dev, int count,
while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + PCNET_RDC_TIMEOUT)) {
- netdev_notice(dev, "timeout waiting for Tx RDC.\n");
- pcnet_reset_8390(dev);
- NS8390_init(dev, 1);
- break;
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
+ pcnet_reset_8390(dev);
+ NS8390_init(dev, 1);
+ break;
}
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c
index b0fbce39661a..139385dcdaa7 100644
--- a/drivers/net/ethernet/8390/smc-ultra.c
+++ b/drivers/net/ethernet/8390/smc-ultra.c
@@ -111,6 +111,7 @@ static struct isapnp_device_id ultra_device_ids[] __initdata = {
MODULE_DEVICE_TABLE(isapnp, ultra_device_ids);
#endif
+static u32 ultra_msg_enable;
#define START_PG 0x00 /* First page of TX buffer */
@@ -211,6 +212,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
unsigned char num_pages, irqreg, addr, piomode;
unsigned char idreg = inb(ioaddr + 7);
unsigned char reg4 = inb(ioaddr + 4) & 0x7f;
+ struct ei_device *ei_local = netdev_priv(dev);
if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME))
return -EBUSY;
@@ -232,16 +234,16 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
goto out;
}
- if (ei_debug && version_printed++ == 0)
- printk(version);
+ if ((ultra_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ netdev_info(dev, version);
model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ";
for (i = 0; i < 6; i++)
dev->dev_addr[i] = inb(ioaddr + 8 + i);
- printk("%s: %s at %#3x, %pM", dev->name, model_name,
- ioaddr, dev->dev_addr);
+ netdev_info(dev, "%s at %#3x, %pM", model_name,
+ ioaddr, dev->dev_addr);
/* Switch from the station address to the alternate register set and
read the useful registers there. */
@@ -265,7 +267,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
irq = irqmap[((irqreg & 0x40) >> 4) + ((irqreg & 0x0c) >> 2)];
if (irq == 0) {
- printk(", failed to detect IRQ line.\n");
+ pr_cont(", failed to detect IRQ line.\n");
retval = -EAGAIN;
goto out;
}
@@ -296,7 +298,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG)*256);
if (!ei_status.mem) {
- printk(", failed to ioremap.\n");
+ pr_cont(", failed to ioremap.\n");
retval = -ENOMEM;
goto out;
}
@@ -304,14 +306,15 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG)*256;
if (piomode) {
- printk(",%s IRQ %d programmed-I/O mode.\n",
- eeprom_irq ? "EEPROM" : "assigned ", dev->irq);
+ pr_cont(", %s IRQ %d programmed-I/O mode.\n",
+ eeprom_irq ? "EEPROM" : "assigned ", dev->irq);
ei_status.block_input = &ultra_pio_input;
ei_status.block_output = &ultra_pio_output;
ei_status.get_8390_hdr = &ultra_pio_get_hdr;
} else {
- printk(",%s IRQ %d memory %#lx-%#lx.\n", eeprom_irq ? "" : "assigned ",
- dev->irq, dev->mem_start, dev->mem_end-1);
+ pr_cont(", %s IRQ %d memory %#lx-%#lx.\n",
+ eeprom_irq ? "" : "assigned ", dev->irq, dev->mem_start,
+ dev->mem_end-1);
ei_status.block_input = &ultra_block_input;
ei_status.block_output = &ultra_block_output;
ei_status.get_8390_hdr = &ultra_get_8390_hdr;
@@ -320,6 +323,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
dev->netdev_ops = &ultra_netdev_ops;
NS8390_init(dev, 0);
+ ei_local->msg_enable = ultra_msg_enable;
retval = register_netdev(dev);
if (retval)
@@ -356,12 +360,15 @@ static int __init ultra_probe_isapnp(struct net_device *dev)
/* found it */
dev->base_addr = pnp_port_start(idev, 0);
dev->irq = pnp_irq(idev, 0);
- printk(KERN_INFO "smc-ultra.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
- (char *) ultra_device_ids[i].driver_data,
- dev->base_addr, dev->irq);
+ netdev_info(dev,
+ "smc-ultra.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
+ (char *) ultra_device_ids[i].driver_data,
+ dev->base_addr, dev->irq);
if (ultra_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */
- printk(KERN_ERR "smc-ultra.c: Probe of ISAPnP card at %#lx failed.\n", dev->base_addr);
- pnp_device_detach(idev);
+ netdev_err(dev,
+ "smc-ultra.c: Probe of ISAPnP card at %#lx failed.\n",
+ dev->base_addr);
+ pnp_device_detach(idev);
return -ENXIO;
}
ei_status.priv = (unsigned long)idev;
@@ -412,9 +419,10 @@ static void
ultra_reset_8390(struct net_device *dev)
{
int cmd_port = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC base addr */
+ struct ei_device *ei_local = netdev_priv(dev);
outb(ULTRA_RESET, cmd_port);
- if (ei_debug > 1) printk("resetting Ultra, t=%ld...", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting Ultra, t=%ld...\n", jiffies);
ei_status.txing = 0;
outb(0x00, cmd_port); /* Disable shared memory for safety. */
@@ -424,7 +432,7 @@ ultra_reset_8390(struct net_device *dev)
else
outb(0x01, cmd_port + 6); /* Enable interrupts and memory. */
- if (ei_debug > 1) printk("reset done\n");
+ netif_dbg(ei_local, hw, dev, "reset done\n");
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
@@ -530,11 +538,11 @@ static int
ultra_close_card(struct net_device *dev)
{
int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* CMDREG */
+ struct ei_device *ei_local = netdev_priv(dev);
netif_stop_queue(dev);
- if (ei_debug > 1)
- printk("%s: Shutting down ethercard.\n", dev->name);
+ netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n");
outb(0x00, ioaddr + 6); /* Disable interrupts. */
free_irq(dev->irq, dev);
@@ -556,8 +564,10 @@ static int irq[MAX_ULTRA_CARDS];
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
+module_param_named(msg_enable, ultra_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
MODULE_PARM_DESC(io, "I/O base address(es)");
MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
MODULE_DESCRIPTION("SMC Ultra/EtherEZ ISA/PnP Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index 8df4c4157230..aca957d4e121 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -69,6 +69,11 @@ static void stnic_block_output (struct net_device *dev, int count,
static void stnic_init (struct net_device *dev);
+static u32 stnic_msg_enable;
+
+module_param_named(msg_enable, stnic_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
+
/* SH7750 specific read/write io. */
static inline void
STNIC_DELAY (void)
@@ -100,6 +105,7 @@ static int __init stnic_probe(void)
{
struct net_device *dev;
int i, err;
+ struct ei_device *ei_local;
/* If we are not running on a SolutionEngine, give up now */
if (! MACH_SE)
@@ -125,10 +131,10 @@ static int __init stnic_probe(void)
share and the board will usually be enabled. */
err = request_irq (dev->irq, ei_interrupt, 0, DRV_NAME, dev);
if (err) {
- printk (KERN_EMERG " unable to get IRQ %d.\n", dev->irq);
- free_netdev(dev);
- return err;
- }
+ netdev_emerg(dev, " unable to get IRQ %d.\n", dev->irq);
+ free_netdev(dev);
+ return err;
+ }
ei_status.name = dev->name;
ei_status.word16 = 1;
@@ -147,6 +153,8 @@ static int __init stnic_probe(void)
ei_status.block_output = &stnic_block_output;
stnic_init (dev);
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = stnic_msg_enable;
err = register_netdev(dev);
if (err) {
@@ -156,7 +164,7 @@ static int __init stnic_probe(void)
}
stnic_dev = dev;
- printk (KERN_INFO "NS ST-NIC 83902A\n");
+ netdev_info(dev, "NS ST-NIC 83902A\n");
return 0;
}
@@ -164,10 +172,11 @@ static int __init stnic_probe(void)
static void
stnic_reset (struct net_device *dev)
{
+ struct ei_device *ei_local = netdev_priv(dev);
+
*(vhalf *) PA_83902_RST = 0;
udelay (5);
- if (ei_debug > 1)
- printk (KERN_WARNING "8390 reset done (%ld).\n", jiffies);
+ netif_warn(ei_local, hw, dev, "8390 reset done (%ld).\n", jiffies);
*(vhalf *) PA_83902_RST = ~0;
udelay (5);
}
@@ -176,6 +185,8 @@ static void
stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr,
int ring_page)
{
+ struct ei_device *ei_local = netdev_priv(dev);
+
half buf[2];
STNIC_WRITE (PG0_RSAR0, 0);
@@ -196,8 +207,7 @@ stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr,
hdr->count = ((buf[1] >> 8) & 0xff) | (buf[1] << 8);
#endif
- if (ei_debug > 1)
- printk (KERN_DEBUG "ring %x status %02x next %02x count %04x.\n",
+ netif_dbg(ei_local, probe, dev, "ring %x status %02x next %02x count %04x.\n",
ring_page, hdr->status, hdr->next, hdr->count);
STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA);
diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c
index 03eb3eed49fa..dd7d816bde52 100644
--- a/drivers/net/ethernet/8390/wd.c
+++ b/drivers/net/ethernet/8390/wd.c
@@ -60,6 +60,7 @@ static void wd_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static int wd_close(struct net_device *dev);
+static u32 wd_msg_enable;
#define WD_START_PG 0x00 /* First page of TX buffer */
#define WD03_STOP_PG 0x20 /* Last page +1 of RX ring */
@@ -170,6 +171,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */
const char *model_name;
static unsigned version_printed;
+ struct ei_device *ei_local = netdev_priv(dev);
for (i = 0; i < 8; i++)
checksum += inb(ioaddr + 8 + i);
@@ -180,19 +182,19 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
/* Check for semi-valid mem_start/end values if supplied. */
if ((dev->mem_start % 0x2000) || (dev->mem_end % 0x2000)) {
- printk(KERN_WARNING "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n");
+ netdev_warn(dev,
+ "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n");
dev->mem_start = 0;
dev->mem_end = 0;
}
- if (ei_debug && version_printed++ == 0)
- printk(version);
+ if ((wd_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ netdev_info(dev, version);
for (i = 0; i < 6; i++)
dev->dev_addr[i] = inb(ioaddr + 8 + i);
- printk("%s: WD80x3 at %#3x, %pM",
- dev->name, ioaddr, dev->dev_addr);
+ netdev_info(dev, "WD80x3 at %#3x, %pM", ioaddr, dev->dev_addr);
/* The following PureData probe code was contributed by
Mike Jagdis <jaggy@purplet.demon.co.uk>. Puredata does software
@@ -244,8 +246,9 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
}
#ifndef final_version
if ( !ancient && (inb(ioaddr+1) & 0x01) != (word16 & 0x01))
- printk("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).",
- word16 ? 16 : 8, (inb(ioaddr+1) & 0x01) ? 16 : 8);
+ pr_cont("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).",
+ word16 ? 16 : 8,
+ (inb(ioaddr+1) & 0x01) ? 16 : 8);
#endif
}
@@ -259,7 +262,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
if (reg0 == 0xff || reg0 == 0) {
/* Future plan: this could check a few likely locations first. */
dev->mem_start = 0xd0000;
- printk(" assigning address %#lx", dev->mem_start);
+ pr_cont(" assigning address %#lx", dev->mem_start);
} else {
int high_addr_bits = inb(ioaddr+WD_CMDREG5) & 0x1f;
/* Some boards don't have the register 5 -- it returns 0xff. */
@@ -297,8 +300,8 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
outb_p(0x00, nic_addr+EN0_IMR); /* Mask all intrs. again. */
- if (ei_debug > 2)
- printk(" autoirq is %d", dev->irq);
+ if (netif_msg_drv(ei_local))
+ pr_cont(" autoirq is %d", dev->irq);
if (dev->irq < 2)
dev->irq = word16 ? 10 : 5;
} else
@@ -310,7 +313,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
share and the board will usually be enabled. */
i = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
if (i) {
- printk (" unable to get IRQ %d.\n", dev->irq);
+ pr_cont(" unable to get IRQ %d.\n", dev->irq);
return i;
}
@@ -338,8 +341,8 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
return -ENOMEM;
}
- printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
- model_name, dev->irq, dev->mem_start, dev->mem_end-1);
+ pr_cont(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
+ model_name, dev->irq, dev->mem_start, dev->mem_end-1);
ei_status.reset_8390 = wd_reset_8390;
ei_status.block_input = wd_block_input;
@@ -348,6 +351,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
dev->netdev_ops = &wd_netdev_ops;
NS8390_init(dev, 0);
+ ei_local->msg_enable = wd_msg_enable;
#if 1
/* Enable interrupt generation on softconfig cards -- M.U */
@@ -385,9 +389,11 @@ static void
wd_reset_8390(struct net_device *dev)
{
int wd_cmd_port = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ struct ei_device *ei_local = netdev_priv(dev);
outb(WD_RESET, wd_cmd_port);
- if (ei_debug > 1) printk("resetting the WD80x3 t=%lu...", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the WD80x3 t=%lu...\n",
+ jiffies);
ei_status.txing = 0;
/* Set up the ASIC registers, just in case something changed them. */
@@ -395,7 +401,7 @@ wd_reset_8390(struct net_device *dev)
if (ei_status.word16)
outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5);
- if (ei_debug > 1) printk("reset done\n");
+ netif_dbg(ei_local, hw, dev, "reset done\n");
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
@@ -474,9 +480,9 @@ static int
wd_close(struct net_device *dev)
{
int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- printk("%s: Shutting down ethercard.\n", dev->name);
+ netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n");
ei_close(dev);
/* Change from 16-bit to 8-bit shared memory so reboot works. */
@@ -502,10 +508,12 @@ module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(mem, int, NULL, 0);
module_param_array(mem_end, int, NULL, 0);
+module_param_named(msg_enable, wd_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
MODULE_PARM_DESC(io, "I/O base address(es)");
MODULE_PARM_DESC(irq, "IRQ number(s) (ignored for PureData boards)");
MODULE_PARM_DESC(mem, "memory base address(es)(ignored for PureData boards)");
MODULE_PARM_DESC(mem_end, "memory end address(es)");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
MODULE_DESCRIPTION("ISA Western Digital wd8003/wd8013 ; SMC Elite, Elite16 ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index 85ec4c2d2645..8308728fad05 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -44,6 +44,8 @@
static const char version[] =
"8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+static u32 zorro8390_msg_enable;
+
#include "lib8390.c"
#define DRV_NAME "zorro8390"
@@ -86,9 +88,9 @@ static struct card_info {
static void zorro8390_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
+ struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- netdev_dbg(dev, "resetting - t=%ld...\n", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting - t=%ld...\n", jiffies);
z_writeb(z_readb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
@@ -119,8 +121,9 @@ static void zorro8390_get_8390_hdr(struct net_device *dev,
* If it does, it's the last thing you'll see
*/
if (ei_status.dmaing) {
- netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
- __func__, ei_status.dmaing, ei_status.irqlock);
+ netdev_warn(dev,
+ "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
+ __func__, ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -230,7 +233,7 @@ static void zorro8390_block_output(struct net_device *dev, int count,
while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2 * HZ / 100)) {
/* 20ms */
- netdev_err(dev, "timeout waiting for Tx RDC\n");
+ netdev_warn(dev, "timeout waiting for Tx RDC\n");
zorro8390_reset_8390(dev);
__NS8390_init(dev, 1);
break;
@@ -248,8 +251,9 @@ static int zorro8390_open(struct net_device *dev)
static int zorro8390_close(struct net_device *dev)
{
- if (ei_debug > 1)
- netdev_dbg(dev, "Shutting down ethercard\n");
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard\n");
__ei_close(dev);
return 0;
}
@@ -287,12 +291,13 @@ static const struct net_device_ops zorro8390_netdev_ops = {
};
static int zorro8390_init(struct net_device *dev, unsigned long board,
- const char *name, unsigned long ioaddr)
+ const char *name, void __iomem *ioaddr)
{
int i;
int err;
unsigned char SA_prom[32];
int start_page, stop_page;
+ struct ei_device *ei_local = netdev_priv(dev);
static u32 zorro8390_offsets[16] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
@@ -354,7 +359,7 @@ static int zorro8390_init(struct net_device *dev, unsigned long board,
start_page = NESM_START_PG;
stop_page = NESM_STOP_PG;
- dev->base_addr = ioaddr;
+ dev->base_addr = (unsigned long)ioaddr;
dev->irq = IRQ_AMIGA_PORTS;
/* Install the Interrupt handler */
@@ -383,6 +388,9 @@ static int zorro8390_init(struct net_device *dev, unsigned long board,
dev->netdev_ops = &zorro8390_netdev_ops;
__NS8390_init(dev, 0);
+
+ ei_local->msg_enable = zorro8390_msg_enable;
+
err = register_netdev(dev);
if (err) {
free_irq(IRQ_AMIGA_PORTS, dev);
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 75fb1d20d6fd..c0f68dcd1dc1 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -667,8 +667,8 @@ static u32 bfin_select_phc_clock(u32 input_clk, unsigned int *shift_result)
return 1000000000UL / ppn;
}
-static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
+static int bfin_mac_hwtstamp_set(struct net_device *netdev,
+ struct ifreq *ifr)
{
struct hwtstamp_config config;
struct bfin_mac_local *lp = netdev_priv(netdev);
@@ -824,6 +824,16 @@ static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
-EFAULT : 0;
}
+static int bfin_mac_hwtstamp_get(struct net_device *netdev,
+ struct ifreq *ifr)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+
+ return copy_to_user(ifr->ifr_data, &lp->stamp_cfg,
+ sizeof(lp->stamp_cfg)) ?
+ -EFAULT : 0;
+}
+
static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
{
struct bfin_mac_local *lp = netdev_priv(netdev);
@@ -1062,7 +1072,8 @@ static void bfin_phc_release(struct bfin_mac_local *lp)
#else
# define bfin_mac_hwtstamp_is_none(cfg) 0
# define bfin_mac_hwtstamp_init(dev)
-# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
+# define bfin_mac_hwtstamp_set(dev, ifr) (-EOPNOTSUPP)
+# define bfin_mac_hwtstamp_get(dev, ifr) (-EOPNOTSUPP)
# define bfin_rx_hwtstamp(dev, skb)
# define bfin_tx_hwtstamp(dev, skb)
# define bfin_phc_init(netdev, dev) 0
@@ -1496,7 +1507,9 @@ static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCSHWTSTAMP:
- return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd);
+ return bfin_mac_hwtstamp_set(netdev, ifr);
+ case SIOCGHWTSTAMP:
+ return bfin_mac_hwtstamp_get(netdev, ifr);
default:
if (lp->phydev)
return phy_mii_ioctl(lp->phydev, ifr, cmd);
@@ -1544,7 +1557,6 @@ static int bfin_mac_open(struct net_device *dev)
return ret;
phy_start(lp->phydev);
- phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
setup_system_regs(dev);
setup_mac_addr(dev->dev_addr);
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index e06694555144..c5d75e7aeeb6 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -25,7 +25,6 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/uaccess.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -1361,7 +1360,7 @@ static int greth_mdio_init(struct greth_private *greth)
timeout = jiffies + 6*HZ;
while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
}
- genphy_read_status(greth->phy);
+ phy_read_status(greth->phy);
greth_link_change(greth->netdev);
}
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 46dfb1378c17..511f6eecd58b 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -16,7 +16,6 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/gpio.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mii.h>
@@ -930,6 +929,9 @@ static int emac_resume(struct platform_device *dev)
}
static const struct of_device_id emac_of_match[] = {
+ {.compatible = "allwinner,sun4i-a10-emac",},
+
+ /* Deprecated */
{.compatible = "allwinner,sun4i-emac",},
{},
};
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 219be1bf3cfc..1517e9df5ba1 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -61,7 +61,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/highmem.h>
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 65926a956575..18e542f7853d 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -17,7 +17,6 @@
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -42,9 +41,9 @@
#include "7990.h"
-#define WRITERAP(lp,x) out_be16(lp->base + LANCE_RAP, (x))
-#define WRITERDP(lp,x) out_be16(lp->base + LANCE_RDP, (x))
-#define READRDP(lp) in_be16(lp->base + LANCE_RDP)
+#define WRITERAP(lp, x) out_be16(lp->base + LANCE_RAP, (x))
+#define WRITERDP(lp, x) out_be16(lp->base + LANCE_RDP, (x))
+#define READRDP(lp) in_be16(lp->base + LANCE_RDP)
#if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE)
#include "hplance.h"
@@ -56,9 +55,9 @@
#if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE)
/* Lossage Factor Nine, Mr Sulu. */
-#define WRITERAP(lp,x) (lp->writerap(lp,x))
-#define WRITERDP(lp,x) (lp->writerdp(lp,x))
-#define READRDP(lp) (lp->readrdp(lp))
+#define WRITERAP(lp, x) (lp->writerap(lp, x))
+#define WRITERDP(lp, x) (lp->writerdp(lp, x))
+#define READRDP(lp) (lp->readrdp(lp))
#else
@@ -94,428 +93,436 @@ static inline __u16 READRDP(struct lance_private *lp)
#ifdef UNDEF
#define PRINT_RINGS() \
do { \
- int t; \
- for (t=0; t < RX_RING_SIZE; t++) { \
- printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n",\
- t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0,\
- ib->brx_ring[t].length,\
- ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits);\
- }\
- for (t=0; t < TX_RING_SIZE; t++) { \
- printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n",\
- t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0,\
- ib->btx_ring[t].length,\
- ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits);\
- }\
+ int t; \
+ for (t = 0; t < RX_RING_SIZE; t++) { \
+ printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n", \
+ t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0, \
+ ib->brx_ring[t].length, \
+ ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits); \
+ } \
+ for (t = 0; t < TX_RING_SIZE; t++) { \
+ printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n", \
+ t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0, \
+ ib->btx_ring[t].length, \
+ ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits); \
+ } \
} while (0)
#else
#define PRINT_RINGS()
#endif
/* Load the CSR registers. The LANCE has to be STOPped when we do this! */
-static void load_csrs (struct lance_private *lp)
+static void load_csrs(struct lance_private *lp)
{
- volatile struct lance_init_block *aib = lp->lance_init_block;
- int leptr;
+ volatile struct lance_init_block *aib = lp->lance_init_block;
+ int leptr;
- leptr = LANCE_ADDR (aib);
+ leptr = LANCE_ADDR(aib);
- WRITERAP(lp, LE_CSR1); /* load address of init block */
- WRITERDP(lp, leptr & 0xFFFF);
- WRITERAP(lp, LE_CSR2);
- WRITERDP(lp, leptr >> 16);
- WRITERAP(lp, LE_CSR3);
- WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */
+ WRITERAP(lp, LE_CSR1); /* load address of init block */
+ WRITERDP(lp, leptr & 0xFFFF);
+ WRITERAP(lp, LE_CSR2);
+ WRITERDP(lp, leptr >> 16);
+ WRITERAP(lp, LE_CSR3);
+ WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */
- /* Point back to csr0 */
- WRITERAP(lp, LE_CSR0);
+ /* Point back to csr0 */
+ WRITERAP(lp, LE_CSR0);
}
/* #define to 0 or 1 appropriately */
#define DEBUG_IRING 0
/* Set up the Lance Rx and Tx rings and the init block */
-static void lance_init_ring (struct net_device *dev)
+static void lance_init_ring(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
- int leptr;
- int i;
-
- aib = lp->lance_init_block;
-
- lp->rx_new = lp->tx_new = 0;
- lp->rx_old = lp->tx_old = 0;
-
- ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */
-
- /* Copy the ethernet address to the lance init block
- * Notice that we do a byteswap if we're big endian.
- * [I think this is the right criterion; at least, sunlance,
- * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
- * However, the datasheet says that the BSWAP bit doesn't affect
- * the init block, so surely it should be low byte first for
- * everybody? Um.]
- * We could define the ib->physaddr as three 16bit values and
- * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
- */
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
+ int leptr;
+ int i;
+
+ aib = lp->lance_init_block;
+
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */
+
+ /* Copy the ethernet address to the lance init block
+ * Notice that we do a byteswap if we're big endian.
+ * [I think this is the right criterion; at least, sunlance,
+ * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
+ * However, the datasheet says that the BSWAP bit doesn't affect
+ * the init block, so surely it should be low byte first for
+ * everybody? Um.]
+ * We could define the ib->physaddr as three 16bit values and
+ * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
+ */
#ifdef __BIG_ENDIAN
- ib->phys_addr [0] = dev->dev_addr [1];
- ib->phys_addr [1] = dev->dev_addr [0];
- ib->phys_addr [2] = dev->dev_addr [3];
- ib->phys_addr [3] = dev->dev_addr [2];
- ib->phys_addr [4] = dev->dev_addr [5];
- ib->phys_addr [5] = dev->dev_addr [4];
+ ib->phys_addr[0] = dev->dev_addr[1];
+ ib->phys_addr[1] = dev->dev_addr[0];
+ ib->phys_addr[2] = dev->dev_addr[3];
+ ib->phys_addr[3] = dev->dev_addr[2];
+ ib->phys_addr[4] = dev->dev_addr[5];
+ ib->phys_addr[5] = dev->dev_addr[4];
#else
- for (i=0; i<6; i++)
- ib->phys_addr[i] = dev->dev_addr[i];
+ for (i = 0; i < 6; i++)
+ ib->phys_addr[i] = dev->dev_addr[i];
#endif
- if (DEBUG_IRING)
- printk ("TX rings:\n");
+ if (DEBUG_IRING)
+ printk("TX rings:\n");
lp->tx_full = 0;
- /* Setup the Tx ring entries */
- for (i = 0; i < (1<<lp->lance_log_tx_bufs); i++) {
- leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
- ib->btx_ring [i].tmd0 = leptr;
- ib->btx_ring [i].tmd1_hadr = leptr >> 16;
- ib->btx_ring [i].tmd1_bits = 0;
- ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
- ib->btx_ring [i].misc = 0;
- if (DEBUG_IRING)
- printk ("%d: 0x%8.8x\n", i, leptr);
- }
-
- /* Setup the Rx ring entries */
- if (DEBUG_IRING)
- printk ("RX rings:\n");
- for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) {
- leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
-
- ib->brx_ring [i].rmd0 = leptr;
- ib->brx_ring [i].rmd1_hadr = leptr >> 16;
- ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
- /* 0xf000 == bits that must be one (reserved, presumably) */
- ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
- ib->brx_ring [i].mblength = 0;
- if (DEBUG_IRING)
- printk ("%d: 0x%8.8x\n", i, leptr);
- }
-
- /* Setup the initialization block */
-
- /* Setup rx descriptor pointer */
- leptr = LANCE_ADDR(&aib->brx_ring);
- ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
- ib->rx_ptr = leptr;
- if (DEBUG_IRING)
- printk ("RX ptr: %8.8x\n", leptr);
-
- /* Setup tx descriptor pointer */
- leptr = LANCE_ADDR(&aib->btx_ring);
- ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
- ib->tx_ptr = leptr;
- if (DEBUG_IRING)
- printk ("TX ptr: %8.8x\n", leptr);
-
- /* Clear the multicast filter */
- ib->filter [0] = 0;
- ib->filter [1] = 0;
- PRINT_RINGS();
+ /* Setup the Tx ring entries */
+ for (i = 0; i < (1 << lp->lance_log_tx_bufs); i++) {
+ leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
+ ib->btx_ring[i].tmd0 = leptr;
+ ib->btx_ring[i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring[i].tmd1_bits = 0;
+ ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring[i].misc = 0;
+ if (DEBUG_IRING)
+ printk("%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the Rx ring entries */
+ if (DEBUG_IRING)
+ printk("RX rings:\n");
+ for (i = 0; i < (1 << lp->lance_log_rx_bufs); i++) {
+ leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
+
+ ib->brx_ring[i].rmd0 = leptr;
+ ib->brx_ring[i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
+ /* 0xf000 == bits that must be one (reserved, presumably) */
+ ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring[i].mblength = 0;
+ if (DEBUG_IRING)
+ printk("%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->brx_ring);
+ ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
+ ib->rx_ptr = leptr;
+ if (DEBUG_IRING)
+ printk("RX ptr: %8.8x\n", leptr);
+
+ /* Setup tx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->btx_ring);
+ ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
+ ib->tx_ptr = leptr;
+ if (DEBUG_IRING)
+ printk("TX ptr: %8.8x\n", leptr);
+
+ /* Clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+ PRINT_RINGS();
}
/* LANCE must be STOPped before we do this, too... */
-static int init_restart_lance (struct lance_private *lp)
+static int init_restart_lance(struct lance_private *lp)
{
- int i;
+ int i;
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_INIT);
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_INIT);
- /* Need a hook here for sunlance ledma stuff */
+ /* Need a hook here for sunlance ledma stuff */
- /* Wait for the lance to complete initialization */
- for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
- barrier();
- if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
- printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
- return -1;
- }
+ /* Wait for the lance to complete initialization */
+ for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
+ barrier();
+ if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
+ printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
+ return -1;
+ }
- /* Clear IDON by writing a "1", enable interrupts and start lance */
- WRITERDP(lp, LE_C0_IDON);
- WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
+ /* Clear IDON by writing a "1", enable interrupts and start lance */
+ WRITERDP(lp, LE_C0_IDON);
+ WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
- return 0;
+ return 0;
}
-static int lance_reset (struct net_device *dev)
+static int lance_reset(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- int status;
+ struct lance_private *lp = netdev_priv(dev);
+ int status;
- /* Stop the lance */
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
- load_csrs (lp);
- lance_init_ring (dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
- status = init_restart_lance (lp);
+ load_csrs(lp);
+ lance_init_ring(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ status = init_restart_lance(lp);
#ifdef DEBUG_DRIVER
- printk ("Lance restart=%d\n", status);
+ printk("Lance restart=%d\n", status);
#endif
- return status;
+ return status;
}
-static int lance_rx (struct net_device *dev)
+static int lance_rx(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- volatile struct lance_rx_desc *rd;
- unsigned char bits;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_rx_desc *rd;
+ unsigned char bits;
#ifdef TEST_HITS
- int i;
+ int i;
#endif
#ifdef TEST_HITS
- printk ("[");
- for (i = 0; i < RX_RING_SIZE; i++) {
- if (i == lp->rx_new)
- printk ("%s",
- ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
- else
- printk ("%s",
- ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
- }
- printk ("]");
+ printk("[");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (i == lp->rx_new)
+ printk("%s",
+ ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "_" : "X");
+ else
+ printk("%s",
+ ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "." : "1");
+ }
+ printk("]");
#endif
#ifdef CONFIG_HP300
blinken_leds(0x40, 0);
#endif
- WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */
- for (rd = &ib->brx_ring [lp->rx_new]; /* For each Rx ring we own... */
- !((bits = rd->rmd1_bits) & LE_R1_OWN);
- rd = &ib->brx_ring [lp->rx_new]) {
-
- /* We got an incomplete frame? */
- if ((bits & LE_R1_POK) != LE_R1_POK) {
- dev->stats.rx_over_errors++;
- dev->stats.rx_errors++;
- continue;
- } else if (bits & LE_R1_ERR) {
- /* Count only the end frame as a rx error,
- * not the beginning
- */
- if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
- if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
- if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
- if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
- if (bits & LE_R1_EOP) dev->stats.rx_errors++;
- } else {
+ WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */
+ for (rd = &ib->brx_ring[lp->rx_new]; /* For each Rx ring we own... */
+ !((bits = rd->rmd1_bits) & LE_R1_OWN);
+ rd = &ib->brx_ring[lp->rx_new]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ continue;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF)
+ dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC)
+ dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL)
+ dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA)
+ dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP)
+ dev->stats.rx_errors++;
+ } else {
int len = (rd->mblength & 0xfff) - 4;
struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
- if (!skb) {
- dev->stats.rx_dropped++;
- rd->mblength = 0;
- rd->rmd1_bits = LE_R1_OWN;
- lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
- return 0;
- }
-
- skb_reserve (skb, 2); /* 16 byte align */
- skb_put (skb, len); /* make room */
- skb_copy_to_linear_data(skb,
- (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
- len);
- skb->protocol = eth_type_trans (skb, dev);
- netif_rx (skb);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ return 0;
+ }
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)&(ib->rx_buf[lp->rx_new][0]),
+ len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
- }
-
- /* Return the packet to the pool */
- rd->mblength = 0;
- rd->rmd1_bits = LE_R1_OWN;
- lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
- }
- return 0;
+ }
+
+ /* Return the packet to the pool */
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ }
+ return 0;
}
-static int lance_tx (struct net_device *dev)
+static int lance_tx(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- volatile struct lance_tx_desc *td;
- int i, j;
- int status;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_tx_desc *td;
+ int i, j;
+ int status;
#ifdef CONFIG_HP300
blinken_leds(0x80, 0);
#endif
- /* csr0 is 2f3 */
- WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
- /* csr0 is 73 */
-
- j = lp->tx_old;
- for (i = j; i != lp->tx_new; i = j) {
- td = &ib->btx_ring [i];
-
- /* If we hit a packet not owned by us, stop */
- if (td->tmd1_bits & LE_T1_OWN)
- break;
-
- if (td->tmd1_bits & LE_T1_ERR) {
- status = td->misc;
-
- dev->stats.tx_errors++;
- if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
- if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
-
- if (status & LE_T3_CLOS) {
- dev->stats.tx_carrier_errors++;
- if (lp->auto_select) {
- lp->tpe = 1 - lp->tpe;
- printk("%s: Carrier Lost, trying %s\n",
- dev->name, lp->tpe?"TPE":"AUI");
- /* Stop the lance */
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
- lance_init_ring (dev);
- load_csrs (lp);
- init_restart_lance (lp);
- return 0;
- }
- }
-
- /* buffer errors and underflows turn off the transmitter */
- /* Restart the adapter */
- if (status & (LE_T3_BUF|LE_T3_UFL)) {
- dev->stats.tx_fifo_errors++;
-
- printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
- dev->name);
- /* Stop the lance */
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
- lance_init_ring (dev);
- load_csrs (lp);
- init_restart_lance (lp);
- return 0;
- }
- } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
- /*
- * So we don't count the packet more than once.
- */
- td->tmd1_bits &= ~(LE_T1_POK);
-
- /* One collision before packet was sent. */
- if (td->tmd1_bits & LE_T1_EONE)
- dev->stats.collisions++;
-
- /* More than one collision, be optimistic. */
- if (td->tmd1_bits & LE_T1_EMORE)
- dev->stats.collisions += 2;
-
- dev->stats.tx_packets++;
- }
-
- j = (j + 1) & lp->tx_ring_mod_mask;
- }
- lp->tx_old = j;
- WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
- return 0;
+ /* csr0 is 2f3 */
+ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
+ /* csr0 is 73 */
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ td = &ib->btx_ring[i];
+
+ /* If we hit a packet not owned by us, stop */
+ if (td->tmd1_bits & LE_T1_OWN)
+ break;
+
+ if (td->tmd1_bits & LE_T1_ERR) {
+ status = td->misc;
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY)
+ dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL)
+ dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ printk("%s: Carrier Lost, trying %s\n",
+ dev->name,
+ lp->tpe ? "TPE" : "AUI");
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ return 0;
+ }
+ }
+
+ /* buffer errors and underflows turn off the transmitter */
+ /* Restart the adapter */
+ if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ return 0;
+ }
+ } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ td->tmd1_bits &= ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (td->tmd1_bits & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (td->tmd1_bits & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+
+ j = (j + 1) & lp->tx_ring_mod_mask;
+ }
+ lp->tx_old = j;
+ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
+ return 0;
}
static irqreturn_t
-lance_interrupt (int irq, void *dev_id)
+lance_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = (struct net_device *)dev_id;
- struct lance_private *lp = netdev_priv(dev);
- int csr0;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ int csr0;
- spin_lock (&lp->devlock);
+ spin_lock(&lp->devlock);
- WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */
- csr0 = READRDP(lp);
+ WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */
+ csr0 = READRDP(lp);
- PRINT_RINGS();
+ PRINT_RINGS();
- if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */
- spin_unlock (&lp->devlock);
- return IRQ_NONE; /* been generated by the Lance. */
+ if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */
+ spin_unlock(&lp->devlock);
+ return IRQ_NONE; /* been generated by the Lance. */
}
- /* Acknowledge all the interrupt sources ASAP */
- WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
-
- if ((csr0 & LE_C0_ERR)) {
- /* Clear the error condition */
- WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
- }
-
- if (csr0 & LE_C0_RINT)
- lance_rx (dev);
-
- if (csr0 & LE_C0_TINT)
- lance_tx (dev);
-
- /* Log misc errors. */
- if (csr0 & LE_C0_BABL)
- dev->stats.tx_errors++; /* Tx babble. */
- if (csr0 & LE_C0_MISS)
- dev->stats.rx_errors++; /* Missed a Rx frame. */
- if (csr0 & LE_C0_MERR) {
- printk("%s: Bus master arbitration failure, status %4.4x.\n",
- dev->name, csr0);
- /* Restart the chip. */
- WRITERDP(lp, LE_C0_STRT);
- }
-
- if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
+ /* Acknowledge all the interrupt sources ASAP */
+ WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
+
+ if ((csr0 & LE_C0_ERR)) {
+ /* Clear the error condition */
+ WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
+ }
+
+ if (csr0 & LE_C0_RINT)
+ lance_rx(dev);
+
+ if (csr0 & LE_C0_TINT)
+ lance_tx(dev);
+
+ /* Log misc errors. */
+ if (csr0 & LE_C0_BABL)
+ dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & LE_C0_MISS)
+ dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & LE_C0_MERR) {
+ printk("%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ WRITERDP(lp, LE_C0_STRT);
+ }
+
+ if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
lp->tx_full = 0;
- netif_wake_queue (dev);
- }
+ netif_wake_queue(dev);
+ }
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
- spin_unlock (&lp->devlock);
+ spin_unlock(&lp->devlock);
return IRQ_HANDLED;
}
-int lance_open (struct net_device *dev)
+int lance_open(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
+ struct lance_private *lp = netdev_priv(dev);
int res;
- /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
- if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
- return -EAGAIN;
+ /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
+ if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
+ return -EAGAIN;
- res = lance_reset(dev);
+ res = lance_reset(dev);
spin_lock_init(&lp->devlock);
- netif_start_queue (dev);
+ netif_start_queue(dev);
return res;
}
EXPORT_SYMBOL_GPL(lance_open);
-int lance_close (struct net_device *dev)
+int lance_close(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
+ struct lance_private *lp = netdev_priv(dev);
- netif_stop_queue (dev);
+ netif_stop_queue(dev);
- /* Stop the LANCE */
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
+ /* Stop the LANCE */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
- free_irq(lp->irq, dev);
+ free_irq(lp->irq, dev);
- return 0;
+ return 0;
}
EXPORT_SYMBOL_GPL(lance_close);
@@ -524,122 +531,122 @@ void lance_tx_timeout(struct net_device *dev)
printk("lance_tx_timeout\n");
lance_reset(dev);
dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue (dev);
+ netif_wake_queue(dev);
}
EXPORT_SYMBOL_GPL(lance_tx_timeout);
-int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
+int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- int entry, skblen, len;
- static int outs;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ int entry, skblen, len;
+ static int outs;
unsigned long flags;
- if (!TX_BUFFS_AVAIL)
- return NETDEV_TX_LOCKED;
+ if (!TX_BUFFS_AVAIL)
+ return NETDEV_TX_LOCKED;
- netif_stop_queue (dev);
+ netif_stop_queue(dev);
- skblen = skb->len;
+ skblen = skb->len;
#ifdef DEBUG_DRIVER
- /* dump the packet */
- {
- int i;
-
- for (i = 0; i < 64; i++) {
- if ((i % 16) == 0)
- printk ("\n");
- printk ("%2.2x ", skb->data [i]);
- }
- }
+ /* dump the packet */
+ {
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if ((i % 16) == 0)
+ printk("\n");
+ printk("%2.2x ", skb->data[i]);
+ }
+ }
#endif
- len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
- entry = lp->tx_new & lp->tx_ring_mod_mask;
- ib->btx_ring [entry].length = (-len) | 0xf000;
- ib->btx_ring [entry].misc = 0;
+ len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
+ entry = lp->tx_new & lp->tx_ring_mod_mask;
+ ib->btx_ring[entry].length = (-len) | 0xf000;
+ ib->btx_ring[entry].misc = 0;
if (skb->len < ETH_ZLEN)
memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
- skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
+ skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
- /* Now, give the packet to the lance */
- ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
- lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
+ /* Now, give the packet to the lance */
+ ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
+ lp->tx_new = (lp->tx_new + 1) & lp->tx_ring_mod_mask;
- outs++;
- /* Kick the lance: transmit now */
- WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
- dev_kfree_skb (skb);
+ outs++;
+ /* Kick the lance: transmit now */
+ WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
+ dev_kfree_skb(skb);
- spin_lock_irqsave (&lp->devlock, flags);
- if (TX_BUFFS_AVAIL)
- netif_start_queue (dev);
+ spin_lock_irqsave(&lp->devlock, flags);
+ if (TX_BUFFS_AVAIL)
+ netif_start_queue(dev);
else
lp->tx_full = 1;
- spin_unlock_irqrestore (&lp->devlock, flags);
+ spin_unlock_irqrestore(&lp->devlock, flags);
- return NETDEV_TX_OK;
+ return NETDEV_TX_OK;
}
EXPORT_SYMBOL_GPL(lance_start_xmit);
/* taken from the depca driver via a2065.c */
-static void lance_load_multicast (struct net_device *dev)
+static void lance_load_multicast(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- volatile u16 *mcast_table = (u16 *)&ib->filter;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile u16 *mcast_table = (u16 *)&ib->filter;
struct netdev_hw_addr *ha;
- u32 crc;
-
- /* set all multicast bits */
- if (dev->flags & IFF_ALLMULTI){
- ib->filter [0] = 0xffffffff;
- ib->filter [1] = 0xffffffff;
- return;
- }
- /* clear the multicast filter */
- ib->filter [0] = 0;
- ib->filter [1] = 0;
-
- /* Add addresses */
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ ib->filter[0] = 0xffffffff;
+ ib->filter[1] = 0xffffffff;
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+
+ /* Add addresses */
netdev_for_each_mc_addr(ha, dev) {
crc = ether_crc_le(6, ha->addr);
- crc = crc >> 26;
- mcast_table [crc >> 4] |= 1 << (crc & 0xf);
- }
+ crc = crc >> 26;
+ mcast_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
}
-void lance_set_multicast (struct net_device *dev)
+void lance_set_multicast(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
int stopped;
stopped = netif_queue_stopped(dev);
if (!stopped)
- netif_stop_queue (dev);
-
- while (lp->tx_old != lp->tx_new)
- schedule();
+ netif_stop_queue(dev);
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
- lance_init_ring (dev);
+ while (lp->tx_old != lp->tx_new)
+ schedule();
- if (dev->flags & IFF_PROMISC) {
- ib->mode |= LE_MO_PROM;
- } else {
- ib->mode &= ~LE_MO_PROM;
- lance_load_multicast (dev);
- }
- load_csrs (lp);
- init_restart_lance (lp);
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ ib->mode |= LE_MO_PROM;
+ } else {
+ ib->mode &= ~LE_MO_PROM;
+ lance_load_multicast(dev);
+ }
+ load_csrs(lp);
+ init_restart_lance(lp);
if (!stopped)
- netif_start_queue (dev);
+ netif_start_queue(dev);
}
EXPORT_SYMBOL_GPL(lance_set_multicast);
@@ -648,10 +655,10 @@ void lance_poll(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
- spin_lock (&lp->devlock);
+ spin_lock(&lp->devlock);
WRITERAP(lp, LE_CSR0);
WRITERDP(lp, LE_C0_STRT);
- spin_unlock (&lp->devlock);
+ spin_unlock(&lp->devlock);
lance_interrupt(dev->irq, dev);
}
#endif
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
index ae33a99bf476..e9e0be313804 100644
--- a/drivers/net/ethernet/amd/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
@@ -35,33 +35,32 @@
#define LANCE_LOG_RX_BUFFERS 3
#endif
-#define TX_RING_SIZE (1<<LANCE_LOG_TX_BUFFERS)
-#define RX_RING_SIZE (1<<LANCE_LOG_RX_BUFFERS)
-#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
-#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
-#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
-#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
-#define PKT_BUFF_SIZE (1544)
-#define RX_BUFF_SIZE PKT_BUFF_SIZE
-#define TX_BUFF_SIZE PKT_BUFF_SIZE
+#define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS)
+#define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS)
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+#define PKT_BUFF_SIZE (1544)
+#define RX_BUFF_SIZE PKT_BUFF_SIZE
+#define TX_BUFF_SIZE PKT_BUFF_SIZE
/* Each receive buffer is described by a receive message descriptor (RMD) */
struct lance_rx_desc {
- volatile unsigned short rmd0; /* low address of packet */
- volatile unsigned char rmd1_bits; /* descriptor bits */
- volatile unsigned char rmd1_hadr; /* high address of packet */
- volatile short length; /* This length is 2s complement (negative)!
- * Buffer length
- */
- volatile unsigned short mblength; /* Actual number of bytes received */
+ volatile unsigned short rmd0; /* low address of packet */
+ volatile unsigned char rmd1_bits; /* descriptor bits */
+ volatile unsigned char rmd1_hadr; /* high address of packet */
+ volatile short length; /* This length is 2s complement (negative)!
+ * Buffer length */
+ volatile unsigned short mblength; /* Actual number of bytes received */
};
/* Ditto for TMD: */
struct lance_tx_desc {
- volatile unsigned short tmd0; /* low address of packet */
- volatile unsigned char tmd1_bits; /* descriptor bits */
- volatile unsigned char tmd1_hadr; /* high address of packet */
- volatile short length; /* Length is 2s complement (negative)! */
+ volatile unsigned short tmd0; /* low address of packet */
+ volatile unsigned char tmd1_bits; /* descriptor bits */
+ volatile unsigned char tmd1_hadr; /* high address of packet */
+ volatile short length; /* Length is 2s complement (negative)! */
volatile unsigned short misc;
};
@@ -71,181 +70,178 @@ struct lance_tx_desc {
* init block,the Tx and Rx rings and the buffers together in memory:
*/
struct lance_init_block {
- volatile unsigned short mode; /* Pre-set mode (reg. 15) */
- volatile unsigned char phys_addr[6]; /* Physical ethernet address */
- volatile unsigned filter[2]; /* Multicast filter (64 bits) */
-
- /* Receive and transmit ring base, along with extra bits. */
- volatile unsigned short rx_ptr; /* receive descriptor addr */
- volatile unsigned short rx_len; /* receive len and high addr */
- volatile unsigned short tx_ptr; /* transmit descriptor addr */
- volatile unsigned short tx_len; /* transmit len and high addr */
-
- /* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
- * This will be true if this whole struct is 8-byte aligned.
- */
- volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
- volatile struct lance_rx_desc brx_ring[RX_RING_SIZE];
-
- volatile char tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
- volatile char rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
- /* we use this just to make the struct big enough that we can move its startaddr
- * in order to force alignment to an eight byte boundary.
- */
+ volatile unsigned short mode; /* Pre-set mode (reg. 15) */
+ volatile unsigned char phys_addr[6]; /* Physical ethernet address */
+ volatile unsigned filter[2]; /* Multicast filter (64 bits) */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ volatile unsigned short rx_ptr; /* receive descriptor addr */
+ volatile unsigned short rx_len; /* receive len and high addr */
+ volatile unsigned short tx_ptr; /* transmit descriptor addr */
+ volatile unsigned short tx_len; /* transmit len and high addr */
+
+ /* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
+ * This will be true if this whole struct is 8-byte aligned.
+ */
+ volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
+ volatile struct lance_rx_desc brx_ring[RX_RING_SIZE];
+
+ volatile char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
+ volatile char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE];
+ /* we use this just to make the struct big enough that we can move its startaddr
+ * in order to force alignment to an eight byte boundary.
+ */
};
/* This is where we keep all the stuff the driver needs to know about.
* I'm definitely unhappy about the mechanism for allowing specific
* drivers to add things...
*/
-struct lance_private
-{
- char *name;
+struct lance_private {
+ const char *name;
unsigned long base;
- volatile struct lance_init_block *init_block; /* CPU address of RAM */
- volatile struct lance_init_block *lance_init_block; /* LANCE address of RAM */
+ volatile struct lance_init_block *init_block; /* CPU address of RAM */
+ volatile struct lance_init_block *lance_init_block; /* LANCE address of RAM */
- int rx_new, tx_new;
- int rx_old, tx_old;
+ int rx_new, tx_new;
+ int rx_old, tx_old;
- int lance_log_rx_bufs, lance_log_tx_bufs;
- int rx_ring_mod_mask, tx_ring_mod_mask;
+ int lance_log_rx_bufs, lance_log_tx_bufs;
+ int rx_ring_mod_mask, tx_ring_mod_mask;
- int tpe; /* TPE is selected */
- int auto_select; /* cable-selection is by carrier */
- unsigned short busmaster_regval;
+ int tpe; /* TPE is selected */
+ int auto_select; /* cable-selection is by carrier */
+ unsigned short busmaster_regval;
- unsigned int irq; /* IRQ to register */
+ unsigned int irq; /* IRQ to register */
- /* This is because the HP LANCE is disgusting and you have to check
- * a DIO-specific register every time you read/write the LANCE regs :-<
- * [could we get away with making these some sort of macro?]
- */
- void (*writerap)(void *, unsigned short);
- void (*writerdp)(void *, unsigned short);
- unsigned short (*readrdp)(void *);
+ /* This is because the HP LANCE is disgusting and you have to check
+ * a DIO-specific register every time you read/write the LANCE regs :-<
+ * [could we get away with making these some sort of macro?]
+ */
+ void (*writerap)(void *, unsigned short);
+ void (*writerdp)(void *, unsigned short);
+ unsigned short (*readrdp)(void *);
spinlock_t devlock;
char tx_full;
};
/*
- * Am7990 Control and Status Registers
+ * Am7990 Control and Status Registers
*/
-#define LE_CSR0 0x0000 /* LANCE Controller Status */
-#define LE_CSR1 0x0001 /* IADR[15:0] (bit0==0 ie word aligned) */
-#define LE_CSR2 0x0002 /* IADR[23:16] (high bits reserved) */
-#define LE_CSR3 0x0003 /* Misc */
+#define LE_CSR0 0x0000 /* LANCE Controller Status */
+#define LE_CSR1 0x0001 /* IADR[15:0] (bit0==0 ie word aligned) */
+#define LE_CSR2 0x0002 /* IADR[23:16] (high bits reserved) */
+#define LE_CSR3 0x0003 /* Misc */
/*
* Bit definitions for CSR0 (LANCE Controller Status)
*/
-#define LE_C0_ERR 0x8000 /* Error = BABL | CERR | MISS | MERR */
-#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */
-#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */
-#define LE_C0_MISS 0x1000 /* Missed Frame (no rx buffer to put it in) */
-#define LE_C0_MERR 0x0800 /* Memory Error */
-#define LE_C0_RINT 0x0400 /* Receive Interrupt */
-#define LE_C0_TINT 0x0200 /* Transmit Interrupt */
-#define LE_C0_IDON 0x0100 /* Initialization Done */
-#define LE_C0_INTR 0x0080 /* Interrupt Flag
- = BABL | MISS | MERR | RINT | TINT | IDON */
-#define LE_C0_INEA 0x0040 /* Interrupt Enable */
-#define LE_C0_RXON 0x0020 /* Receive On */
-#define LE_C0_TXON 0x0010 /* Transmit On */
-#define LE_C0_TDMD 0x0008 /* Transmit Demand */
-#define LE_C0_STOP 0x0004 /* Stop */
-#define LE_C0_STRT 0x0002 /* Start */
-#define LE_C0_INIT 0x0001 /* Initialize */
+#define LE_C0_ERR 0x8000 /* Error = BABL | CERR | MISS | MERR */
+#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */
+#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */
+#define LE_C0_MISS 0x1000 /* Missed Frame (no rx buffer to put it in) */
+#define LE_C0_MERR 0x0800 /* Memory Error */
+#define LE_C0_RINT 0x0400 /* Receive Interrupt */
+#define LE_C0_TINT 0x0200 /* Transmit Interrupt */
+#define LE_C0_IDON 0x0100 /* Initialization Done */
+#define LE_C0_INTR 0x0080 /* Interrupt Flag
+ = BABL | MISS | MERR | RINT | TINT | IDON */
+#define LE_C0_INEA 0x0040 /* Interrupt Enable */
+#define LE_C0_RXON 0x0020 /* Receive On */
+#define LE_C0_TXON 0x0010 /* Transmit On */
+#define LE_C0_TDMD 0x0008 /* Transmit Demand */
+#define LE_C0_STOP 0x0004 /* Stop */
+#define LE_C0_STRT 0x0002 /* Start */
+#define LE_C0_INIT 0x0001 /* Initialize */
/*
* Bit definitions for CSR3
*/
-#define LE_C3_BSWP 0x0004 /* Byte Swap
- (on for big endian byte order) */
-#define LE_C3_ACON 0x0002 /* ALE Control
- (on for active low ALE) */
-#define LE_C3_BCON 0x0001 /* Byte Control */
+#define LE_C3_BSWP 0x0004 /* Byte Swap (on for big endian byte order) */
+#define LE_C3_ACON 0x0002 /* ALE Control (on for active low ALE) */
+#define LE_C3_BCON 0x0001 /* Byte Control */
/*
* Mode Flags
*/
-#define LE_MO_PROM 0x8000 /* Promiscuous Mode */
+#define LE_MO_PROM 0x8000 /* Promiscuous Mode */
/* these next ones 0x4000 -- 0x0080 are not available on the LANCE 7990,
* but they are in NetBSD's am7990.h, presumably for backwards-compatible chips
*/
-#define LE_MO_DRCVBC 0x4000 /* disable receive broadcast */
-#define LE_MO_DRCVPA 0x2000 /* disable physical address detection */
-#define LE_MO_DLNKTST 0x1000 /* disable link status */
-#define LE_MO_DAPC 0x0800 /* disable automatic polarity correction */
-#define LE_MO_MENDECL 0x0400 /* MENDEC loopback mode */
-#define LE_MO_LRTTSEL 0x0200 /* lower RX threshold / TX mode selection */
-#define LE_MO_PSEL1 0x0100 /* port selection bit1 */
-#define LE_MO_PSEL0 0x0080 /* port selection bit0 */
+#define LE_MO_DRCVBC 0x4000 /* disable receive broadcast */
+#define LE_MO_DRCVPA 0x2000 /* disable physical address detection */
+#define LE_MO_DLNKTST 0x1000 /* disable link status */
+#define LE_MO_DAPC 0x0800 /* disable automatic polarity correction */
+#define LE_MO_MENDECL 0x0400 /* MENDEC loopback mode */
+#define LE_MO_LRTTSEL 0x0200 /* lower RX threshold / TX mode selection */
+#define LE_MO_PSEL1 0x0100 /* port selection bit1 */
+#define LE_MO_PSEL0 0x0080 /* port selection bit0 */
/* and this one is from the C-LANCE data sheet... */
-#define LE_MO_EMBA 0x0080 /* Enable Modified Backoff Algorithm
- (C-LANCE, not original LANCE) */
-#define LE_MO_INTL 0x0040 /* Internal Loopback */
-#define LE_MO_DRTY 0x0020 /* Disable Retry */
-#define LE_MO_FCOLL 0x0010 /* Force Collision */
-#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */
-#define LE_MO_LOOP 0x0004 /* Loopback Enable */
-#define LE_MO_DTX 0x0002 /* Disable Transmitter */
-#define LE_MO_DRX 0x0001 /* Disable Receiver */
+#define LE_MO_EMBA 0x0080 /* Enable Modified Backoff Algorithm
+ (C-LANCE, not original LANCE) */
+#define LE_MO_INTL 0x0040 /* Internal Loopback */
+#define LE_MO_DRTY 0x0020 /* Disable Retry */
+#define LE_MO_FCOLL 0x0010 /* Force Collision */
+#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */
+#define LE_MO_LOOP 0x0004 /* Loopback Enable */
+#define LE_MO_DTX 0x0002 /* Disable Transmitter */
+#define LE_MO_DRX 0x0001 /* Disable Receiver */
/*
* Receive Flags
*/
-#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */
-#define LE_R1_ERR 0x40 /* Error */
-#define LE_R1_FRA 0x20 /* Framing Error */
-#define LE_R1_OFL 0x10 /* Overflow Error */
-#define LE_R1_CRC 0x08 /* CRC Error */
-#define LE_R1_BUF 0x04 /* Buffer Error */
-#define LE_R1_SOP 0x02 /* Start of Packet */
-#define LE_R1_EOP 0x01 /* End of Packet */
-#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
+#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_R1_ERR 0x40 /* Error */
+#define LE_R1_FRA 0x20 /* Framing Error */
+#define LE_R1_OFL 0x10 /* Overflow Error */
+#define LE_R1_CRC 0x08 /* CRC Error */
+#define LE_R1_BUF 0x04 /* Buffer Error */
+#define LE_R1_SOP 0x02 /* Start of Packet */
+#define LE_R1_EOP 0x01 /* End of Packet */
+#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
/*
* Transmit Flags
*/
-#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */
-#define LE_T1_ERR 0x40 /* Error */
-#define LE_T1_RES 0x20 /* Reserved, LANCE writes this with a zero */
-#define LE_T1_EMORE 0x10 /* More than one retry needed */
-#define LE_T1_EONE 0x08 /* One retry needed */
-#define LE_T1_EDEF 0x04 /* Deferred */
-#define LE_T1_SOP 0x02 /* Start of Packet */
-#define LE_T1_EOP 0x01 /* End of Packet */
-#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
+#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_T1_ERR 0x40 /* Error */
+#define LE_T1_RES 0x20 /* Reserved, LANCE writes this with a zero */
+#define LE_T1_EMORE 0x10 /* More than one retry needed */
+#define LE_T1_EONE 0x08 /* One retry needed */
+#define LE_T1_EDEF 0x04 /* Deferred */
+#define LE_T1_SOP 0x02 /* Start of Packet */
+#define LE_T1_EOP 0x01 /* End of Packet */
+#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
/*
* Error Flags
*/
-#define LE_T3_BUF 0x8000 /* Buffer Error */
-#define LE_T3_UFL 0x4000 /* Underflow Error */
-#define LE_T3_LCOL 0x1000 /* Late Collision */
-#define LE_T3_CLOS 0x0800 /* Loss of Carrier */
-#define LE_T3_RTY 0x0400 /* Retry Error */
-#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */
+#define LE_T3_BUF 0x8000 /* Buffer Error */
+#define LE_T3_UFL 0x4000 /* Underflow Error */
+#define LE_T3_LCOL 0x1000 /* Late Collision */
+#define LE_T3_CLOS 0x0800 /* Loss of Carrier */
+#define LE_T3_RTY 0x0400 /* Retry Error */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */
/* Miscellaneous useful macros */
-#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
- lp->tx_old+lp->tx_ring_mod_mask-lp->tx_new:\
- lp->tx_old - lp->tx_new-1)
+#define TX_BUFFS_AVAIL ((lp->tx_old <= lp->tx_new) ? \
+ lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new : \
+ lp->tx_old - lp->tx_new - 1)
/* The LANCE only uses 24 bit addresses. This does the obvious thing. */
#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
/* Now the prototypes we export */
int lance_open(struct net_device *dev);
-int lance_close (struct net_device *dev);
-int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
-void lance_set_multicast (struct net_device *dev);
+int lance_close(struct net_device *dev);
+int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void lance_set_multicast(struct net_device *dev);
void lance_tx_timeout(struct net_device *dev);
#ifdef CONFIG_NET_POLL_CONTROLLER
void lance_poll(struct net_device *dev);
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 0866e7627433..56139184b801 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -57,6 +57,7 @@
#include <linux/zorro.h>
#include <linux/bitops.h>
+#include <asm/byteorder.h>
#include <asm/irq.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
@@ -678,6 +679,7 @@ static int a2065_init_one(struct zorro_dev *z,
unsigned long base_addr = board + A2065_LANCE;
unsigned long mem_start = board + A2065_RAM;
struct resource *r1, *r2;
+ u32 serial;
int err;
r1 = request_mem_region(base_addr, sizeof(struct lance_regs),
@@ -702,6 +704,7 @@ static int a2065_init_one(struct zorro_dev *z,
r1->name = dev->name;
r2->name = dev->name;
+ serial = be32_to_cpu(z->rom.er_SerialNumber);
dev->dev_addr[0] = 0x00;
if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */
dev->dev_addr[1] = 0x80;
@@ -710,11 +713,11 @@ static int a2065_init_one(struct zorro_dev *z,
dev->dev_addr[1] = 0x00;
dev->dev_addr[2] = 0x9f;
}
- dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff;
- dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff;
- dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff;
- dev->base_addr = ZTWO_VADDR(base_addr);
- dev->mem_start = ZTWO_VADDR(mem_start);
+ dev->dev_addr[3] = (serial >> 16) & 0xff;
+ dev->dev_addr[4] = (serial >> 8) & 0xff;
+ dev->dev_addr[5] = serial & 0xff;
+ dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
+ dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
dev->mem_end = dev->mem_start + A2065_RAM_SIZE;
priv->ll = (volatile struct lance_regs *)dev->base_addr;
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index d042511bdc13..2061b471fd16 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -24,9 +24,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
Module Name:
@@ -74,7 +72,6 @@ Revision History:
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/pci.h>
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 8baa3527ba74..a75092d584cc 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
Module Name:
@@ -753,7 +751,7 @@ struct amd8111e_priv{
const char *name;
struct pci_dev *pci_dev; /* Ptr to the associated pci_dev */
struct net_device* amd8111e_net_dev; /* ptr to associated net_device */
- /* Transmit and recive skbs */
+ /* Transmit and receive skbs */
struct sk_buff *tx_skbuff[NUM_TX_BUFFERS];
struct sk_buff *rx_skbuff[NUM_RX_BUFFERS];
/* Transmit and receive dma mapped addr */
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index c178eb4c8166..b08101b31b8b 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -51,6 +51,7 @@
#include <linux/zorro.h>
#include <linux/bitops.h>
+#include <asm/byteorder.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
#include <asm/irq.h>
@@ -718,6 +719,7 @@ static int ariadne_init_one(struct zorro_dev *z,
struct resource *r1, *r2;
struct net_device *dev;
struct ariadne_private *priv;
+ u32 serial;
int err;
r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960");
@@ -741,14 +743,15 @@ static int ariadne_init_one(struct zorro_dev *z,
r1->name = dev->name;
r2->name = dev->name;
+ serial = be32_to_cpu(z->rom.er_SerialNumber);
dev->dev_addr[0] = 0x00;
dev->dev_addr[1] = 0x60;
dev->dev_addr[2] = 0x30;
- dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff;
- dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff;
- dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff;
- dev->base_addr = ZTWO_VADDR(base_addr);
- dev->mem_start = ZTWO_VADDR(mem_start);
+ dev->dev_addr[3] = (serial >> 16) & 0xff;
+ dev->dev_addr[4] = (serial >> 8) & 0xff;
+ dev->dev_addr[5] = serial & 0xff;
+ dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
+ dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE;
dev->netdev_ops = &ariadne_netdev_ops;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 427c148bb643..a2bd91e3d302 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -27,8 +27,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* ########################################################################
*
@@ -48,7 +47,6 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h
index 4b7f7ad62bb8..ca53024f017f 100644
--- a/drivers/net/ethernet/amd/au1000_eth.h
+++ b/drivers/net/ethernet/amd/au1000_eth.h
@@ -18,8 +18,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* ########################################################################
*
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 0c61fd50d882..47ce57c2c893 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -127,41 +127,41 @@ static void hplance_remove_one(struct dio_dev *d)
/* Initialise a single lance board at the given DIO device */
static void hplance_init(struct net_device *dev, struct dio_dev *d)
{
- unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
- struct hplance_private *lp;
- int i;
-
- /* reset the board */
- out_8(va+DIO_IDOFF, 0xff);
- udelay(100); /* ariba! ariba! udelay! udelay! */
-
- /* Fill the dev fields */
- dev->base_addr = va;
- dev->netdev_ops = &hplance_netdev_ops;
- dev->dma = 0;
-
- for (i=0; i<6; i++) {
- /* The NVRAM holds our ethernet address, one nibble per byte,
- * at bytes NVRAMOFF+1,3,5,7,9...
- */
- dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
- | (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
- }
-
- lp = netdev_priv(dev);
- lp->lance.name = (char*)d->name; /* discards const, shut up gcc */
- lp->lance.base = va;
- lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
- lp->lance.lance_init_block = NULL; /* LANCE addr of same RAM */
- lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
- lp->lance.irq = d->ipl;
- lp->lance.writerap = hplance_writerap;
- lp->lance.writerdp = hplance_writerdp;
- lp->lance.readrdp = hplance_readrdp;
- lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
- lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
- lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
- lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
+ unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
+ struct hplance_private *lp;
+ int i;
+
+ /* reset the board */
+ out_8(va + DIO_IDOFF, 0xff);
+ udelay(100); /* ariba! ariba! udelay! udelay! */
+
+ /* Fill the dev fields */
+ dev->base_addr = va;
+ dev->netdev_ops = &hplance_netdev_ops;
+ dev->dma = 0;
+
+ for (i = 0; i < 6; i++) {
+ /* The NVRAM holds our ethernet address, one nibble per byte,
+ * at bytes NVRAMOFF+1,3,5,7,9...
+ */
+ dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
+ | (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
+ }
+
+ lp = netdev_priv(dev);
+ lp->lance.name = d->name;
+ lp->lance.base = va;
+ lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
+ lp->lance.lance_init_block = NULL; /* LANCE addr of same RAM */
+ lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
+ lp->lance.irq = d->ipl;
+ lp->lance.writerap = hplance_writerap;
+ lp->lance.writerdp = hplance_writerdp;
+ lp->lance.readrdp = hplance_readrdp;
+ lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
+ lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
+ lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
+ lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
}
/* This is disgusting. We have to check the DIO status register for ack every
@@ -195,25 +195,25 @@ static unsigned short hplance_readrdp(void *priv)
static int hplance_open(struct net_device *dev)
{
- int status;
- struct lance_private *lp = netdev_priv(dev);
+ int status;
+ struct lance_private *lp = netdev_priv(dev);
- status = lance_open(dev); /* call generic lance open code */
- if (status)
- return status;
- /* enable interrupts at board level. */
- out_8(lp->base + HPLANCE_STATUS, LE_IE);
+ status = lance_open(dev); /* call generic lance open code */
+ if (status)
+ return status;
+ /* enable interrupts at board level. */
+ out_8(lp->base + HPLANCE_STATUS, LE_IE);
- return 0;
+ return 0;
}
static int hplance_close(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
+ struct lance_private *lp = netdev_priv(dev);
- out_8(lp->base + HPLANCE_STATUS, 0); /* disable interrupts at boardlevel */
- lance_close(dev);
- return 0;
+ out_8(lp->base + HPLANCE_STATUS, 0); /* disable interrupts at boardlevel */
+ lance_close(dev);
+ return 0;
}
static int __init hplance_init_module(void)
@@ -223,7 +223,7 @@ static int __init hplance_init_module(void)
static void __exit hplance_cleanup_module(void)
{
- dio_unregister_driver(&hplance_driver);
+ dio_unregister_driver(&hplance_driver);
}
module_init(hplance_init_module);
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index e108e911da05..0e8399dec054 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -94,33 +94,31 @@ struct net_device * __init mvme147lance_probe(int unit)
dev->netdev_ops = &lance_netdev_ops;
dev->dma = 0;
- addr=(u_long *)ETHERNET_ADDRESS;
+ addr = (u_long *)ETHERNET_ADDRESS;
address = *addr;
- dev->dev_addr[0]=0x08;
- dev->dev_addr[1]=0x00;
- dev->dev_addr[2]=0x3e;
- address=address>>8;
- dev->dev_addr[5]=address&0xff;
- address=address>>8;
- dev->dev_addr[4]=address&0xff;
- address=address>>8;
- dev->dev_addr[3]=address&0xff;
-
- printk("%s: MVME147 at 0x%08lx, irq %d, "
- "Hardware Address %pM\n",
+ dev->dev_addr[0] = 0x08;
+ dev->dev_addr[1] = 0x00;
+ dev->dev_addr[2] = 0x3e;
+ address = address >> 8;
+ dev->dev_addr[5] = address&0xff;
+ address = address >> 8;
+ dev->dev_addr[4] = address&0xff;
+ address = address >> 8;
+ dev->dev_addr[3] = address&0xff;
+
+ printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
dev->name, dev->base_addr, MVME147_LANCE_IRQ,
dev->dev_addr);
lp = netdev_priv(dev);
lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */
- if (!lp->ram)
- {
+ if (!lp->ram) {
printk("%s: No memory for LANCE buffers\n", dev->name);
free_netdev(dev);
return ERR_PTR(-ENOMEM);
}
- lp->lance.name = (char*)name; /* discards const, shut up gcc */
+ lp->lance.name = name;
lp->lance.base = dev->base_addr;
lp->lance.init_block = (struct lance_init_block *)(lp->ram); /* CPU addr */
lp->lance.lance_init_block = (struct lance_init_block *)(lp->ram); /* LANCE addr of same RAM */
@@ -167,8 +165,8 @@ static int m147lance_open(struct net_device *dev)
if (status)
return status;
/* enable interrupts at board level. */
- m147_pcc->lan_cntrl=0; /* clear the interrupts (if any) */
- m147_pcc->lan_cntrl=0x08 | 0x04; /* Enable irq 4 */
+ m147_pcc->lan_cntrl = 0; /* clear the interrupts (if any) */
+ m147_pcc->lan_cntrl = 0x08 | 0x04; /* Enable irq 4 */
return 0;
}
@@ -176,7 +174,7 @@ static int m147lance_open(struct net_device *dev)
static int m147lance_close(struct net_device *dev)
{
/* disable interrupts at boardlevel */
- m147_pcc->lan_cntrl=0x0; /* disable interrupts */
+ m147_pcc->lan_cntrl = 0x0; /* disable interrupts */
lance_close(dev);
return 0;
}
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index d4ed89130c52..08569fe2b182 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -132,7 +132,6 @@ Include Files
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 38492e0b704e..9339cccfe05a 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1668,7 +1668,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
for (i = 0; i < ETH_ALEN; i++)
promaddr[i] = inb(ioaddr + i);
- if (memcmp(promaddr, dev->dev_addr, ETH_ALEN) ||
+ if (!ether_addr_equal(promaddr, dev->dev_addr) ||
!is_valid_ether_addr(dev->dev_addr)) {
if (is_valid_ether_addr(promaddr)) {
if (pcnet32_debug & NETIF_MSG_PROBE) {
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index ece56831a647..5e4273b7aa27 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -80,7 +80,6 @@ static char lancestr[] = "LANCE";
#include <linux/in.h>
#include <linux/string.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/errno.h>
#include <linux/socket.h> /* Used for the temporal inet entries and routing */
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index dc08678bf9a4..928fac6dd10a 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -122,7 +122,6 @@ struct buffer_state {
* @link: PHY's last seen link state.
* @duplex: PHY's last set duplex mode.
* @speed: PHY's last set speed.
- * @max_speed: Maximum supported by current system network data-rate.
*/
struct arc_emac_priv {
/* Devices */
@@ -152,7 +151,6 @@ struct arc_emac_priv {
unsigned int link;
unsigned int duplex;
unsigned int speed;
- unsigned int max_speed;
};
/**
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index b2ffad1304d2..eeecc29cf5b7 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -381,17 +381,7 @@ static int arc_emac_open(struct net_device *ndev)
phy_dev->autoneg = AUTONEG_ENABLE;
phy_dev->speed = 0;
phy_dev->duplex = 0;
- phy_dev->advertising = phy_dev->supported;
-
- if (priv->max_speed > 100) {
- phy_dev->advertising &= PHY_GBIT_FEATURES;
- } else if (priv->max_speed <= 100) {
- phy_dev->advertising &= PHY_BASIC_FEATURES;
- if (priv->max_speed <= 10) {
- phy_dev->advertising &= ~SUPPORTED_100baseT_Half;
- phy_dev->advertising &= ~SUPPORTED_100baseT_Full;
- }
- }
+ phy_dev->advertising &= phy_dev->supported;
priv->last_rx_bd = 0;
@@ -565,6 +555,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
/* Make sure pointer to data buffer is set */
wmb();
+ skb_tx_timestamp(skb);
+
*info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
/* Increment index to point to the next BD */
@@ -579,8 +571,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
arc_reg_set(priv, R_STATUS, TXPL_MASK);
- skb_tx_timestamp(skb);
-
return NETDEV_TX_OK;
}
@@ -704,14 +694,6 @@ static int arc_emac_probe(struct platform_device *pdev)
/* Set poll rate so that it polls every 1 ms */
arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000);
- /* Get max speed of operation from device tree */
- if (of_property_read_u32(pdev->dev.of_node, "max-speed",
- &priv->max_speed)) {
- dev_err(&pdev->dev, "failed to retrieve <max-speed> from device tree\n");
- err = -EINVAL;
- goto out;
- }
-
ndev->irq = irq;
dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq);
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index d71103dbf2cd..8fc93c5f6abc 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -106,6 +106,9 @@ struct alx_priv {
u16 msg_enable;
bool msi;
+
+ /* protects hw.stats */
+ spinlock_t stats_lock;
};
extern const struct ethtool_ops alx_ethtool_ops;
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
index 45b36507abc1..08e22df2a300 100644
--- a/drivers/net/ethernet/atheros/alx/ethtool.c
+++ b/drivers/net/ethernet/atheros/alx/ethtool.c
@@ -46,6 +46,66 @@
#include "reg.h"
#include "hw.h"
+/* The order of these strings must match the order of the fields in
+ * struct alx_hw_stats
+ * See hw.h
+ */
+static const char alx_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "rx_bcast_packets",
+ "rx_mcast_packets",
+ "rx_pause_packets",
+ "rx_ctrl_packets",
+ "rx_fcs_errors",
+ "rx_length_errors",
+ "rx_bytes",
+ "rx_runt_packets",
+ "rx_fragments",
+ "rx_64B_or_less_packets",
+ "rx_65B_to_127B_packets",
+ "rx_128B_to_255B_packets",
+ "rx_256B_to_511B_packets",
+ "rx_512B_to_1023B_packets",
+ "rx_1024B_to_1518B_packets",
+ "rx_1519B_to_mtu_packets",
+ "rx_oversize_packets",
+ "rx_rxf_ov_drop_packets",
+ "rx_rrd_ov_drop_packets",
+ "rx_align_errors",
+ "rx_bcast_bytes",
+ "rx_mcast_bytes",
+ "rx_address_errors",
+ "tx_packets",
+ "tx_bcast_packets",
+ "tx_mcast_packets",
+ "tx_pause_packets",
+ "tx_exc_defer_packets",
+ "tx_ctrl_packets",
+ "tx_defer_packets",
+ "tx_bytes",
+ "tx_64B_or_less_packets",
+ "tx_65B_to_127B_packets",
+ "tx_128B_to_255B_packets",
+ "tx_256B_to_511B_packets",
+ "tx_512B_to_1023B_packets",
+ "tx_1024B_to_1518B_packets",
+ "tx_1519B_to_mtu_packets",
+ "tx_single_collision",
+ "tx_multiple_collisions",
+ "tx_late_collision",
+ "tx_abort_collision",
+ "tx_underrun",
+ "tx_trd_eop",
+ "tx_length_errors",
+ "tx_trunc_packets",
+ "tx_bcast_bytes",
+ "tx_mcast_bytes",
+ "tx_update",
+};
+
+#define ALX_NUM_STATS ARRAY_SIZE(alx_gstrings_stats)
+
+
static u32 alx_get_supported_speeds(struct alx_hw *hw)
{
u32 supported = SUPPORTED_10baseT_Half |
@@ -201,6 +261,44 @@ static void alx_set_msglevel(struct net_device *netdev, u32 data)
alx->msg_enable = data;
}
+static void alx_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *estats, u64 *data)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+
+ spin_lock(&alx->stats_lock);
+
+ alx_update_hw_stats(hw);
+ BUILD_BUG_ON(sizeof(hw->stats) - offsetof(struct alx_hw_stats, rx_ok) <
+ ALX_NUM_STATS * sizeof(u64));
+ memcpy(data, &hw->stats.rx_ok, ALX_NUM_STATS * sizeof(u64));
+
+ spin_unlock(&alx->stats_lock);
+}
+
+static void alx_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &alx_gstrings_stats, sizeof(alx_gstrings_stats));
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static int alx_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ALX_NUM_STATS;
+ default:
+ return -EINVAL;
+ }
+}
+
const struct ethtool_ops alx_ethtool_ops = {
.get_settings = alx_get_settings,
.set_settings = alx_set_settings,
@@ -209,4 +307,7 @@ const struct ethtool_ops alx_ethtool_ops = {
.get_msglevel = alx_get_msglevel,
.set_msglevel = alx_set_msglevel,
.get_link = ethtool_op_get_link,
+ .get_strings = alx_get_strings,
+ .get_sset_count = alx_get_sset_count,
+ .get_ethtool_stats = alx_get_ethtool_stats,
};
diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c
index 1e8c24a3cb4e..7712f068f6d4 100644
--- a/drivers/net/ethernet/atheros/alx/hw.c
+++ b/drivers/net/ethernet/atheros/alx/hw.c
@@ -1050,3 +1050,61 @@ bool alx_get_phy_info(struct alx_hw *hw)
return true;
}
+
+void alx_update_hw_stats(struct alx_hw *hw)
+{
+ /* RX stats */
+ hw->stats.rx_ok += alx_read_mem32(hw, ALX_MIB_RX_OK);
+ hw->stats.rx_bcast += alx_read_mem32(hw, ALX_MIB_RX_BCAST);
+ hw->stats.rx_mcast += alx_read_mem32(hw, ALX_MIB_RX_MCAST);
+ hw->stats.rx_pause += alx_read_mem32(hw, ALX_MIB_RX_PAUSE);
+ hw->stats.rx_ctrl += alx_read_mem32(hw, ALX_MIB_RX_CTRL);
+ hw->stats.rx_fcs_err += alx_read_mem32(hw, ALX_MIB_RX_FCS_ERR);
+ hw->stats.rx_len_err += alx_read_mem32(hw, ALX_MIB_RX_LEN_ERR);
+ hw->stats.rx_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_BYTE_CNT);
+ hw->stats.rx_runt += alx_read_mem32(hw, ALX_MIB_RX_RUNT);
+ hw->stats.rx_frag += alx_read_mem32(hw, ALX_MIB_RX_FRAG);
+ hw->stats.rx_sz_64B += alx_read_mem32(hw, ALX_MIB_RX_SZ_64B);
+ hw->stats.rx_sz_127B += alx_read_mem32(hw, ALX_MIB_RX_SZ_127B);
+ hw->stats.rx_sz_255B += alx_read_mem32(hw, ALX_MIB_RX_SZ_255B);
+ hw->stats.rx_sz_511B += alx_read_mem32(hw, ALX_MIB_RX_SZ_511B);
+ hw->stats.rx_sz_1023B += alx_read_mem32(hw, ALX_MIB_RX_SZ_1023B);
+ hw->stats.rx_sz_1518B += alx_read_mem32(hw, ALX_MIB_RX_SZ_1518B);
+ hw->stats.rx_sz_max += alx_read_mem32(hw, ALX_MIB_RX_SZ_MAX);
+ hw->stats.rx_ov_sz += alx_read_mem32(hw, ALX_MIB_RX_OV_SZ);
+ hw->stats.rx_ov_rxf += alx_read_mem32(hw, ALX_MIB_RX_OV_RXF);
+ hw->stats.rx_ov_rrd += alx_read_mem32(hw, ALX_MIB_RX_OV_RRD);
+ hw->stats.rx_align_err += alx_read_mem32(hw, ALX_MIB_RX_ALIGN_ERR);
+ hw->stats.rx_bc_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_BCCNT);
+ hw->stats.rx_mc_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_MCCNT);
+ hw->stats.rx_err_addr += alx_read_mem32(hw, ALX_MIB_RX_ERRADDR);
+
+ /* TX stats */
+ hw->stats.tx_ok += alx_read_mem32(hw, ALX_MIB_TX_OK);
+ hw->stats.tx_bcast += alx_read_mem32(hw, ALX_MIB_TX_BCAST);
+ hw->stats.tx_mcast += alx_read_mem32(hw, ALX_MIB_TX_MCAST);
+ hw->stats.tx_pause += alx_read_mem32(hw, ALX_MIB_TX_PAUSE);
+ hw->stats.tx_exc_defer += alx_read_mem32(hw, ALX_MIB_TX_EXC_DEFER);
+ hw->stats.tx_ctrl += alx_read_mem32(hw, ALX_MIB_TX_CTRL);
+ hw->stats.tx_defer += alx_read_mem32(hw, ALX_MIB_TX_DEFER);
+ hw->stats.tx_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_BYTE_CNT);
+ hw->stats.tx_sz_64B += alx_read_mem32(hw, ALX_MIB_TX_SZ_64B);
+ hw->stats.tx_sz_127B += alx_read_mem32(hw, ALX_MIB_TX_SZ_127B);
+ hw->stats.tx_sz_255B += alx_read_mem32(hw, ALX_MIB_TX_SZ_255B);
+ hw->stats.tx_sz_511B += alx_read_mem32(hw, ALX_MIB_TX_SZ_511B);
+ hw->stats.tx_sz_1023B += alx_read_mem32(hw, ALX_MIB_TX_SZ_1023B);
+ hw->stats.tx_sz_1518B += alx_read_mem32(hw, ALX_MIB_TX_SZ_1518B);
+ hw->stats.tx_sz_max += alx_read_mem32(hw, ALX_MIB_TX_SZ_MAX);
+ hw->stats.tx_single_col += alx_read_mem32(hw, ALX_MIB_TX_SINGLE_COL);
+ hw->stats.tx_multi_col += alx_read_mem32(hw, ALX_MIB_TX_MULTI_COL);
+ hw->stats.tx_late_col += alx_read_mem32(hw, ALX_MIB_TX_LATE_COL);
+ hw->stats.tx_abort_col += alx_read_mem32(hw, ALX_MIB_TX_ABORT_COL);
+ hw->stats.tx_underrun += alx_read_mem32(hw, ALX_MIB_TX_UNDERRUN);
+ hw->stats.tx_trd_eop += alx_read_mem32(hw, ALX_MIB_TX_TRD_EOP);
+ hw->stats.tx_len_err += alx_read_mem32(hw, ALX_MIB_TX_LEN_ERR);
+ hw->stats.tx_trunc += alx_read_mem32(hw, ALX_MIB_TX_TRUNC);
+ hw->stats.tx_bc_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_BCCNT);
+ hw->stats.tx_mc_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_MCCNT);
+
+ hw->stats.update += alx_read_mem32(hw, ALX_MIB_UPDATE);
+}
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
index 96f3b4381e17..15548802d6f8 100644
--- a/drivers/net/ethernet/atheros/alx/hw.h
+++ b/drivers/net/ethernet/atheros/alx/hw.h
@@ -381,6 +381,73 @@ struct alx_rrd {
ALX_ISR_RX_Q6 | \
ALX_ISR_RX_Q7)
+/* Statistics counters collected by the MAC
+ *
+ * The order of the fields must match the strings in alx_gstrings_stats
+ * All stats fields should be u64
+ * See ethtool.c
+ */
+struct alx_hw_stats {
+ /* rx */
+ u64 rx_ok; /* good RX packets */
+ u64 rx_bcast; /* good RX broadcast packets */
+ u64 rx_mcast; /* good RX multicast packets */
+ u64 rx_pause; /* RX pause frames */
+ u64 rx_ctrl; /* RX control packets other than pause frames */
+ u64 rx_fcs_err; /* RX packets with bad FCS */
+ u64 rx_len_err; /* RX packets with length != actual size */
+ u64 rx_byte_cnt; /* good bytes received. FCS is NOT included */
+ u64 rx_runt; /* RX packets < 64 bytes with good FCS */
+ u64 rx_frag; /* RX packets < 64 bytes with bad FCS */
+ u64 rx_sz_64B; /* 64 byte RX packets */
+ u64 rx_sz_127B; /* 65-127 byte RX packets */
+ u64 rx_sz_255B; /* 128-255 byte RX packets */
+ u64 rx_sz_511B; /* 256-511 byte RX packets */
+ u64 rx_sz_1023B; /* 512-1023 byte RX packets */
+ u64 rx_sz_1518B; /* 1024-1518 byte RX packets */
+ u64 rx_sz_max; /* 1519 byte to MTU RX packets */
+ u64 rx_ov_sz; /* truncated RX packets, size > MTU */
+ u64 rx_ov_rxf; /* frames dropped due to RX FIFO overflow */
+ u64 rx_ov_rrd; /* frames dropped due to RRD overflow */
+ u64 rx_align_err; /* alignment errors */
+ u64 rx_bc_byte_cnt; /* RX broadcast bytes, excluding FCS */
+ u64 rx_mc_byte_cnt; /* RX multicast bytes, excluding FCS */
+ u64 rx_err_addr; /* packets dropped due to address filtering */
+
+ /* tx */
+ u64 tx_ok; /* good TX packets */
+ u64 tx_bcast; /* good TX broadcast packets */
+ u64 tx_mcast; /* good TX multicast packets */
+ u64 tx_pause; /* TX pause frames */
+ u64 tx_exc_defer; /* TX packets deferred excessively */
+ u64 tx_ctrl; /* TX control frames, excluding pause frames */
+ u64 tx_defer; /* TX packets deferred */
+ u64 tx_byte_cnt; /* bytes transmitted, FCS is NOT included */
+ u64 tx_sz_64B; /* 64 byte TX packets */
+ u64 tx_sz_127B; /* 65-127 byte TX packets */
+ u64 tx_sz_255B; /* 128-255 byte TX packets */
+ u64 tx_sz_511B; /* 256-511 byte TX packets */
+ u64 tx_sz_1023B; /* 512-1023 byte TX packets */
+ u64 tx_sz_1518B; /* 1024-1518 byte TX packets */
+ u64 tx_sz_max; /* 1519 byte to MTU TX packets */
+ u64 tx_single_col; /* packets TX after a single collision */
+ u64 tx_multi_col; /* packets TX after multiple collisions */
+ u64 tx_late_col; /* TX packets with late collisions */
+ u64 tx_abort_col; /* TX packets aborted w/excessive collisions */
+ u64 tx_underrun; /* TX packets aborted due to TX FIFO underrun
+ * or TRD FIFO underrun
+ */
+ u64 tx_trd_eop; /* reads beyond the EOP into the next frame
+ * when TRD was not written timely
+ */
+ u64 tx_len_err; /* TX packets where length != actual size */
+ u64 tx_trunc; /* TX packets truncated due to size > MTU */
+ u64 tx_bc_byte_cnt; /* broadcast bytes transmitted, excluding FCS */
+ u64 tx_mc_byte_cnt; /* multicast bytes transmitted, excluding FCS */
+ u64 update;
+};
+
+
/* maximum interrupt vectors for msix */
#define ALX_MAX_MSIX_INTRS 16
@@ -424,6 +491,9 @@ struct alx_hw {
/* PHY link patch flag */
bool lnk_patch;
+
+ /* cumulated stats from the hardware (registers are cleared on read) */
+ struct alx_hw_stats stats;
};
static inline int alx_hw_revision(struct alx_hw *hw)
@@ -491,6 +561,7 @@ bool alx_phy_configured(struct alx_hw *hw);
void alx_configure_basic(struct alx_hw *hw);
void alx_disable_rss(struct alx_hw *hw);
bool alx_get_phy_info(struct alx_hw *hw);
+void alx_update_hw_stats(struct alx_hw *hw);
static inline u32 alx_speed_to_ethadv(int speed, u8 duplex)
{
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c3c4c266b846..2e45f6ec1bf0 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1166,10 +1166,60 @@ static void alx_poll_controller(struct net_device *netdev)
}
#endif
+static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *net_stats)
+{
+ struct alx_priv *alx = netdev_priv(dev);
+ struct alx_hw_stats *hw_stats = &alx->hw.stats;
+
+ spin_lock(&alx->stats_lock);
+
+ alx_update_hw_stats(&alx->hw);
+
+ net_stats->tx_bytes = hw_stats->tx_byte_cnt;
+ net_stats->rx_bytes = hw_stats->rx_byte_cnt;
+ net_stats->multicast = hw_stats->rx_mcast;
+ net_stats->collisions = hw_stats->tx_single_col +
+ hw_stats->tx_multi_col +
+ hw_stats->tx_late_col +
+ hw_stats->tx_abort_col;
+
+ net_stats->rx_errors = hw_stats->rx_frag +
+ hw_stats->rx_fcs_err +
+ hw_stats->rx_len_err +
+ hw_stats->rx_ov_sz +
+ hw_stats->rx_ov_rrd +
+ hw_stats->rx_align_err +
+ hw_stats->rx_ov_rxf;
+
+ net_stats->rx_fifo_errors = hw_stats->rx_ov_rxf;
+ net_stats->rx_length_errors = hw_stats->rx_len_err;
+ net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
+ net_stats->rx_frame_errors = hw_stats->rx_align_err;
+ net_stats->rx_dropped = hw_stats->rx_ov_rrd;
+
+ net_stats->tx_errors = hw_stats->tx_late_col +
+ hw_stats->tx_abort_col +
+ hw_stats->tx_underrun +
+ hw_stats->tx_trunc;
+
+ net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
+ net_stats->tx_fifo_errors = hw_stats->tx_underrun;
+ net_stats->tx_window_errors = hw_stats->tx_late_col;
+
+ net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
+ net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
+
+ spin_unlock(&alx->stats_lock);
+
+ return net_stats;
+}
+
static const struct net_device_ops alx_netdev_ops = {
.ndo_open = alx_open,
.ndo_stop = alx_stop,
.ndo_start_xmit = alx_start_xmit,
+ .ndo_get_stats64 = alx_get_stats64,
.ndo_set_rx_mode = alx_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = alx_set_mac_address,
@@ -1242,6 +1292,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
alx = netdev_priv(netdev);
spin_lock_init(&alx->hw.mdio_lock);
spin_lock_init(&alx->irq_lock);
+ spin_lock_init(&alx->stats_lock);
alx->dev = netdev;
alx->hw.pdev = pdev;
alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
index e4358c98bc4e..af006b44b2a6 100644
--- a/drivers/net/ethernet/atheros/alx/reg.h
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -404,15 +404,59 @@
/* MIB */
#define ALX_MIB_BASE 0x1700
+
#define ALX_MIB_RX_OK (ALX_MIB_BASE + 0)
+#define ALX_MIB_RX_BCAST (ALX_MIB_BASE + 4)
+#define ALX_MIB_RX_MCAST (ALX_MIB_BASE + 8)
+#define ALX_MIB_RX_PAUSE (ALX_MIB_BASE + 12)
+#define ALX_MIB_RX_CTRL (ALX_MIB_BASE + 16)
+#define ALX_MIB_RX_FCS_ERR (ALX_MIB_BASE + 20)
+#define ALX_MIB_RX_LEN_ERR (ALX_MIB_BASE + 24)
+#define ALX_MIB_RX_BYTE_CNT (ALX_MIB_BASE + 28)
+#define ALX_MIB_RX_RUNT (ALX_MIB_BASE + 32)
+#define ALX_MIB_RX_FRAG (ALX_MIB_BASE + 36)
+#define ALX_MIB_RX_SZ_64B (ALX_MIB_BASE + 40)
+#define ALX_MIB_RX_SZ_127B (ALX_MIB_BASE + 44)
+#define ALX_MIB_RX_SZ_255B (ALX_MIB_BASE + 48)
+#define ALX_MIB_RX_SZ_511B (ALX_MIB_BASE + 52)
+#define ALX_MIB_RX_SZ_1023B (ALX_MIB_BASE + 56)
+#define ALX_MIB_RX_SZ_1518B (ALX_MIB_BASE + 60)
+#define ALX_MIB_RX_SZ_MAX (ALX_MIB_BASE + 64)
+#define ALX_MIB_RX_OV_SZ (ALX_MIB_BASE + 68)
+#define ALX_MIB_RX_OV_RXF (ALX_MIB_BASE + 72)
+#define ALX_MIB_RX_OV_RRD (ALX_MIB_BASE + 76)
+#define ALX_MIB_RX_ALIGN_ERR (ALX_MIB_BASE + 80)
+#define ALX_MIB_RX_BCCNT (ALX_MIB_BASE + 84)
+#define ALX_MIB_RX_MCCNT (ALX_MIB_BASE + 88)
#define ALX_MIB_RX_ERRADDR (ALX_MIB_BASE + 92)
+
#define ALX_MIB_TX_OK (ALX_MIB_BASE + 96)
+#define ALX_MIB_TX_BCAST (ALX_MIB_BASE + 100)
+#define ALX_MIB_TX_MCAST (ALX_MIB_BASE + 104)
+#define ALX_MIB_TX_PAUSE (ALX_MIB_BASE + 108)
+#define ALX_MIB_TX_EXC_DEFER (ALX_MIB_BASE + 112)
+#define ALX_MIB_TX_CTRL (ALX_MIB_BASE + 116)
+#define ALX_MIB_TX_DEFER (ALX_MIB_BASE + 120)
+#define ALX_MIB_TX_BYTE_CNT (ALX_MIB_BASE + 124)
+#define ALX_MIB_TX_SZ_64B (ALX_MIB_BASE + 128)
+#define ALX_MIB_TX_SZ_127B (ALX_MIB_BASE + 132)
+#define ALX_MIB_TX_SZ_255B (ALX_MIB_BASE + 136)
+#define ALX_MIB_TX_SZ_511B (ALX_MIB_BASE + 140)
+#define ALX_MIB_TX_SZ_1023B (ALX_MIB_BASE + 144)
+#define ALX_MIB_TX_SZ_1518B (ALX_MIB_BASE + 148)
+#define ALX_MIB_TX_SZ_MAX (ALX_MIB_BASE + 152)
+#define ALX_MIB_TX_SINGLE_COL (ALX_MIB_BASE + 156)
+#define ALX_MIB_TX_MULTI_COL (ALX_MIB_BASE + 160)
+#define ALX_MIB_TX_LATE_COL (ALX_MIB_BASE + 164)
+#define ALX_MIB_TX_ABORT_COL (ALX_MIB_BASE + 168)
+#define ALX_MIB_TX_UNDERRUN (ALX_MIB_BASE + 172)
+#define ALX_MIB_TX_TRD_EOP (ALX_MIB_BASE + 176)
+#define ALX_MIB_TX_LEN_ERR (ALX_MIB_BASE + 180)
+#define ALX_MIB_TX_TRUNC (ALX_MIB_BASE + 184)
+#define ALX_MIB_TX_BCCNT (ALX_MIB_BASE + 188)
#define ALX_MIB_TX_MCCNT (ALX_MIB_BASE + 192)
+#define ALX_MIB_UPDATE (ALX_MIB_BASE + 196)
-#define ALX_RX_STATS_BIN ALX_MIB_RX_OK
-#define ALX_RX_STATS_END ALX_MIB_RX_ERRADDR
-#define ALX_TX_STATS_BIN ALX_MIB_TX_OK
-#define ALX_TX_STATS_END ALX_MIB_TX_MCCNT
#define ALX_ISR 0x1600
#define ALX_ISR_DIS BIT(31)
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index 7f9369a3b378..b9203d928938 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -22,7 +22,6 @@
#ifndef _ATL1C_H_
#define _ATL1C_H_
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index a36a760ada28..4d3258dd0a88 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -145,9 +145,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
* Mask some pcie error bits
*/
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
- data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
- pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
+ if (pos) {
+ pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
+ data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
+ pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
+ }
/* clear error status */
pcie_capability_write_word(pdev, PCI_EXP_DEVSTA,
PCI_EXP_DEVSTA_NFED |
@@ -1498,31 +1500,40 @@ static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
struct net_device_stats *net_stats = &netdev->stats;
atl1c_update_hw_stats(adapter);
- net_stats->rx_packets = hw_stats->rx_ok;
- net_stats->tx_packets = hw_stats->tx_ok;
net_stats->rx_bytes = hw_stats->rx_byte_cnt;
net_stats->tx_bytes = hw_stats->tx_byte_cnt;
net_stats->multicast = hw_stats->rx_mcast;
net_stats->collisions = hw_stats->tx_1_col +
- hw_stats->tx_2_col * 2 +
- hw_stats->tx_late_col + hw_stats->tx_abort_col;
- net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err +
- hw_stats->rx_len_err + hw_stats->rx_sz_ov +
- hw_stats->rx_rrd_ov + hw_stats->rx_align_err;
+ hw_stats->tx_2_col +
+ hw_stats->tx_late_col +
+ hw_stats->tx_abort_col;
+
+ net_stats->rx_errors = hw_stats->rx_frag +
+ hw_stats->rx_fcs_err +
+ hw_stats->rx_len_err +
+ hw_stats->rx_sz_ov +
+ hw_stats->rx_rrd_ov +
+ hw_stats->rx_align_err +
+ hw_stats->rx_rxf_ov;
+
net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov;
net_stats->rx_length_errors = hw_stats->rx_len_err;
net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
net_stats->rx_frame_errors = hw_stats->rx_align_err;
- net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+ net_stats->rx_dropped = hw_stats->rx_rrd_ov;
- net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+ net_stats->tx_errors = hw_stats->tx_late_col +
+ hw_stats->tx_abort_col +
+ hw_stats->tx_underrun +
+ hw_stats->tx_trunc;
- net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col +
- hw_stats->tx_underrun + hw_stats->tx_trunc;
net_stats->tx_fifo_errors = hw_stats->tx_underrun;
net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
net_stats->tx_window_errors = hw_stats->tx_late_col;
+ net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
+ net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
+
return net_stats;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index 1b0fe2d04a0e..0212dac7e23a 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -23,7 +23,6 @@
#ifndef _ATL1E_H_
#define _ATL1E_H_
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 7a73f3a9fcb5..d5c2d3e912e5 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1177,32 +1177,40 @@ static struct net_device_stats *atl1e_get_stats(struct net_device *netdev)
struct atl1e_hw_stats *hw_stats = &adapter->hw_stats;
struct net_device_stats *net_stats = &netdev->stats;
- net_stats->rx_packets = hw_stats->rx_ok;
- net_stats->tx_packets = hw_stats->tx_ok;
net_stats->rx_bytes = hw_stats->rx_byte_cnt;
net_stats->tx_bytes = hw_stats->tx_byte_cnt;
net_stats->multicast = hw_stats->rx_mcast;
net_stats->collisions = hw_stats->tx_1_col +
- hw_stats->tx_2_col * 2 +
- hw_stats->tx_late_col + hw_stats->tx_abort_col;
+ hw_stats->tx_2_col +
+ hw_stats->tx_late_col +
+ hw_stats->tx_abort_col;
+
+ net_stats->rx_errors = hw_stats->rx_frag +
+ hw_stats->rx_fcs_err +
+ hw_stats->rx_len_err +
+ hw_stats->rx_sz_ov +
+ hw_stats->rx_rrd_ov +
+ hw_stats->rx_align_err +
+ hw_stats->rx_rxf_ov;
- net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err +
- hw_stats->rx_len_err + hw_stats->rx_sz_ov +
- hw_stats->rx_rrd_ov + hw_stats->rx_align_err;
net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov;
net_stats->rx_length_errors = hw_stats->rx_len_err;
net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
net_stats->rx_frame_errors = hw_stats->rx_align_err;
- net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+ net_stats->rx_dropped = hw_stats->rx_rrd_ov;
- net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+ net_stats->tx_errors = hw_stats->tx_late_col +
+ hw_stats->tx_abort_col +
+ hw_stats->tx_underrun +
+ hw_stats->tx_trunc;
- net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col +
- hw_stats->tx_underrun + hw_stats->tx_trunc;
net_stats->tx_fifo_errors = hw_stats->tx_underrun;
net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
net_stats->tx_window_errors = hw_stats->tx_late_col;
+ net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
+ net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
+
return net_stats;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 538211d6f7d9..287272dd69da 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1678,33 +1678,42 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct stats_msg_block *smb = adapter->smb.smb;
+ u64 new_rx_errors = smb->rx_frag +
+ smb->rx_fcs_err +
+ smb->rx_len_err +
+ smb->rx_sz_ov +
+ smb->rx_rxf_ov +
+ smb->rx_rrd_ov +
+ smb->rx_align_err;
+ u64 new_tx_errors = smb->tx_late_col +
+ smb->tx_abort_col +
+ smb->tx_underrun +
+ smb->tx_trunc;
+
/* Fill out the OS statistics structure */
- adapter->soft_stats.rx_packets += smb->rx_ok;
- adapter->soft_stats.tx_packets += smb->tx_ok;
+ adapter->soft_stats.rx_packets += smb->rx_ok + new_rx_errors;
+ adapter->soft_stats.tx_packets += smb->tx_ok + new_tx_errors;
adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
adapter->soft_stats.multicast += smb->rx_mcast;
- adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 +
- smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry);
+ adapter->soft_stats.collisions += smb->tx_1_col +
+ smb->tx_2_col +
+ smb->tx_late_col +
+ smb->tx_abort_col;
/* Rx Errors */
- adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err +
- smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov +
- smb->rx_rrd_ov + smb->rx_align_err);
+ adapter->soft_stats.rx_errors += new_rx_errors;
adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
adapter->soft_stats.rx_length_errors += smb->rx_len_err;
adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
- adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
- smb->rx_rxf_ov);
adapter->soft_stats.rx_pause += smb->rx_pause;
adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
/* Tx Errors */
- adapter->soft_stats.tx_errors += (smb->tx_late_col +
- smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc);
+ adapter->soft_stats.tx_errors += new_tx_errors;
adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
adapter->soft_stats.tx_window_errors += smb->tx_late_col;
@@ -1718,23 +1727,18 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
adapter->soft_stats.tx_trunc += smb->tx_trunc;
adapter->soft_stats.tx_pause += smb->tx_pause;
- netdev->stats.rx_packets = adapter->soft_stats.rx_packets;
- netdev->stats.tx_packets = adapter->soft_stats.tx_packets;
netdev->stats.rx_bytes = adapter->soft_stats.rx_bytes;
netdev->stats.tx_bytes = adapter->soft_stats.tx_bytes;
netdev->stats.multicast = adapter->soft_stats.multicast;
netdev->stats.collisions = adapter->soft_stats.collisions;
netdev->stats.rx_errors = adapter->soft_stats.rx_errors;
- netdev->stats.rx_over_errors =
- adapter->soft_stats.rx_missed_errors;
netdev->stats.rx_length_errors =
adapter->soft_stats.rx_length_errors;
netdev->stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
netdev->stats.rx_frame_errors =
adapter->soft_stats.rx_frame_errors;
netdev->stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
- netdev->stats.rx_missed_errors =
- adapter->soft_stats.rx_missed_errors;
+ netdev->stats.rx_dropped = adapter->soft_stats.rx_rrd_ov;
netdev->stats.tx_errors = adapter->soft_stats.tx_errors;
netdev->stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
netdev->stats.tx_aborted_errors =
@@ -1743,6 +1747,9 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
adapter->soft_stats.tx_window_errors;
netdev->stats.tx_carrier_errors =
adapter->soft_stats.tx_carrier_errors;
+
+ netdev->stats.rx_packets = adapter->soft_stats.rx_packets;
+ netdev->stats.tx_packets = adapter->soft_stats.tx_packets;
}
static void atl1_update_mailbox(struct atl1_adapter *adapter)
@@ -1872,7 +1879,7 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
adapter->rx_buffer_len);
if (unlikely(!skb)) {
/* Better luck next round */
- adapter->netdev->stats.rx_dropped++;
+ adapter->soft_stats.rx_dropped++;
break;
}
@@ -3122,7 +3129,8 @@ static void atl1_remove(struct pci_dev *pdev)
* from the BIOS during POST. If we've been messing with the MAC
* address, we need to save the permanent one.
*/
- if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) {
+ if (!ether_addr_equal_unaligned(adapter->hw.mac_addr,
+ adapter->hw.perm_mac_addr)) {
memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr,
ETH_ALEN);
atl1_set_mac_addr(&adapter->hw);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h
index 3bf79a56220d..34a58cd846a0 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.h
+++ b/drivers/net/ethernet/atheros/atlx/atl1.h
@@ -666,6 +666,7 @@ struct atl1_sft_stats {
u64 rx_errors;
u64 rx_length_errors;
u64 rx_crc_errors;
+ u64 rx_dropped;
u64 rx_frame_errors;
u64 rx_fifo_errors;
u64 rx_missed_errors;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 2fa5b86f139d..3f97d9fd0a71 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -23,6 +23,7 @@ config B44
depends on SSB_POSSIBLE && HAS_DMA
select SSB
select MII
+ select PHYLIB
---help---
If you have a network (Ethernet) controller of this type, say Y
or M and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 90e54d5488dc..1f7b5aa114fa 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -6,6 +6,7 @@
* Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
* Copyright (C) 2006 Broadcom Corporation.
* Copyright (C) 2007 Michael Buesch <m@bues.ch>
+ * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
*
* Distribute under GPL.
*/
@@ -29,6 +30,7 @@
#include <linux/dma-mapping.h>
#include <linux/ssb/ssb.h>
#include <linux/slab.h>
+#include <linux/phy.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -284,7 +286,7 @@ static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
{
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
return 0;
return __b44_readphy(bp, bp->phy_addr, reg, val);
@@ -292,14 +294,14 @@ static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
{
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
return 0;
return __b44_writephy(bp, bp->phy_addr, reg, val);
}
/* miilib interface */
-static int b44_mii_read(struct net_device *dev, int phy_id, int location)
+static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
{
u32 val;
struct b44 *bp = netdev_priv(dev);
@@ -309,19 +311,36 @@ static int b44_mii_read(struct net_device *dev, int phy_id, int location)
return val;
}
-static void b44_mii_write(struct net_device *dev, int phy_id, int location,
- int val)
+static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
+ int val)
{
struct b44 *bp = netdev_priv(dev);
__b44_writephy(bp, phy_id, location, val);
}
+static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
+{
+ u32 val;
+ struct b44 *bp = bus->priv;
+ int rc = __b44_readphy(bp, phy_id, location, &val);
+ if (rc)
+ return 0xffffffff;
+ return val;
+}
+
+static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
+ u16 val)
+{
+ struct b44 *bp = bus->priv;
+ return __b44_writephy(bp, phy_id, location, val);
+}
+
static int b44_phy_reset(struct b44 *bp)
{
u32 val;
int err;
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
return 0;
err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
if (err)
@@ -423,7 +442,7 @@ static int b44_setup_phy(struct b44 *bp)
b44_wap54g10_workaround(bp);
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
return 0;
if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
goto out;
@@ -521,12 +540,14 @@ static void b44_check_phy(struct b44 *bp)
{
u32 bmsr, aux;
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
bp->flags |= B44_FLAG_100_BASE_T;
- bp->flags |= B44_FLAG_FULL_DUPLEX;
if (!netif_carrier_ok(bp->dev)) {
u32 val = br32(bp, B44_TX_CTRL);
- val |= TX_CTRL_DUPLEX;
+ if (bp->flags & B44_FLAG_FULL_DUPLEX)
+ val |= TX_CTRL_DUPLEX;
+ else
+ val &= ~TX_CTRL_DUPLEX;
bw32(bp, B44_TX_CTRL, val);
netif_carrier_on(bp->dev);
b44_link_report(bp);
@@ -1315,7 +1336,7 @@ static void b44_chip_reset(struct b44 *bp, int reset_kind)
if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
br32(bp, B44_ENET_CTRL);
- bp->flags &= ~B44_FLAG_INTERNAL_PHY;
+ bp->flags |= B44_FLAG_EXTERNAL_PHY;
} else {
u32 val = br32(bp, B44_DEVCTRL);
@@ -1324,7 +1345,7 @@ static void b44_chip_reset(struct b44 *bp, int reset_kind)
br32(bp, B44_DEVCTRL);
udelay(100);
}
- bp->flags |= B44_FLAG_INTERNAL_PHY;
+ bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
}
}
@@ -1339,7 +1360,10 @@ static void b44_halt(struct b44 *bp)
bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
/* now reset the chip, but without enabling the MAC&PHY
* part of it. This has to be done _after_ we shut down the PHY */
- b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+ b44_chip_reset(bp, B44_CHIP_RESET_FULL);
+ else
+ b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
}
/* bp->lock is held. */
@@ -1805,6 +1829,11 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct b44 *bp = netdev_priv(dev);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+ BUG_ON(!bp->phydev);
+ return phy_ethtool_gset(bp->phydev, cmd);
+ }
+
cmd->supported = (SUPPORTED_Autoneg);
cmd->supported |= (SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
@@ -1828,8 +1857,8 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
DUPLEX_FULL : DUPLEX_HALF;
cmd->port = 0;
cmd->phy_address = bp->phy_addr;
- cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
- XCVR_INTERNAL : XCVR_EXTERNAL;
+ cmd->transceiver = (bp->flags & B44_FLAG_EXTERNAL_PHY) ?
+ XCVR_EXTERNAL : XCVR_INTERNAL;
cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
AUTONEG_DISABLE : AUTONEG_ENABLE;
if (cmd->autoneg == AUTONEG_ENABLE)
@@ -1846,7 +1875,23 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct b44 *bp = netdev_priv(dev);
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed;
+ int ret;
+
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+ BUG_ON(!bp->phydev);
+ spin_lock_irq(&bp->lock);
+ if (netif_running(dev))
+ b44_setup_phy(bp);
+
+ ret = phy_ethtool_sset(bp->phydev, cmd);
+
+ spin_unlock_irq(&bp->lock);
+
+ return ret;
+ }
+
+ speed = ethtool_cmd_speed(cmd);
/* We do not support gigabit. */
if (cmd->autoneg == AUTONEG_ENABLE) {
@@ -2076,7 +2121,6 @@ static const struct ethtool_ops b44_ethtool_ops = {
static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct mii_ioctl_data *data = if_mii(ifr);
struct b44 *bp = netdev_priv(dev);
int err = -EINVAL;
@@ -2084,7 +2128,12 @@ static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
goto out;
spin_lock_irq(&bp->lock);
- err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+ BUG_ON(!bp->phydev);
+ err = phy_mii_ioctl(bp->phydev, ifr, cmd);
+ } else {
+ err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
+ }
spin_unlock_irq(&bp->lock);
out:
return err;
@@ -2146,6 +2195,141 @@ static const struct net_device_ops b44_netdev_ops = {
#endif
};
+static void b44_adjust_link(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phydev;
+ bool status_changed = 0;
+
+ BUG_ON(!phydev);
+
+ if (bp->old_link != phydev->link) {
+ status_changed = 1;
+ bp->old_link = phydev->link;
+ }
+
+ /* reflect duplex change */
+ if (phydev->link) {
+ if ((phydev->duplex == DUPLEX_HALF) &&
+ (bp->flags & B44_FLAG_FULL_DUPLEX)) {
+ status_changed = 1;
+ bp->flags &= ~B44_FLAG_FULL_DUPLEX;
+ } else if ((phydev->duplex == DUPLEX_FULL) &&
+ !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
+ status_changed = 1;
+ bp->flags |= B44_FLAG_FULL_DUPLEX;
+ }
+ }
+
+ if (status_changed) {
+ b44_check_phy(bp);
+ phy_print_status(phydev);
+ }
+}
+
+static int b44_register_phy_one(struct b44 *bp)
+{
+ struct mii_bus *mii_bus;
+ struct ssb_device *sdev = bp->sdev;
+ struct phy_device *phydev;
+ char bus_id[MII_BUS_ID_SIZE + 3];
+ struct ssb_sprom *sprom = &sdev->bus->sprom;
+ int err;
+
+ mii_bus = mdiobus_alloc();
+ if (!mii_bus) {
+ dev_err(sdev->dev, "mdiobus_alloc() failed\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ mii_bus->priv = bp;
+ mii_bus->read = b44_mdio_read_phylib;
+ mii_bus->write = b44_mdio_write_phylib;
+ mii_bus->name = "b44_eth_mii";
+ mii_bus->parent = sdev->dev;
+ mii_bus->phy_mask = ~(1 << bp->phy_addr);
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
+ mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!mii_bus->irq) {
+ dev_err(sdev->dev, "mii_bus irq allocation failed\n");
+ err = -ENOMEM;
+ goto err_out_mdiobus;
+ }
+
+ memset(mii_bus->irq, PHY_POLL, sizeof(int) * PHY_MAX_ADDR);
+
+ bp->mii_bus = mii_bus;
+
+ err = mdiobus_register(mii_bus);
+ if (err) {
+ dev_err(sdev->dev, "failed to register MII bus\n");
+ goto err_out_mdiobus_irq;
+ }
+
+ if (!bp->mii_bus->phy_map[bp->phy_addr] &&
+ (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
+
+ dev_info(sdev->dev,
+ "could not find PHY at %i, use fixed one\n",
+ bp->phy_addr);
+
+ bp->phy_addr = 0;
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
+ bp->phy_addr);
+ } else {
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
+ bp->phy_addr);
+ }
+
+ phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phydev)) {
+ dev_err(sdev->dev, "could not attach PHY at %i\n",
+ bp->phy_addr);
+ err = PTR_ERR(phydev);
+ goto err_out_mdiobus_unregister;
+ }
+
+ /* mask with MAC supported features */
+ phydev->supported &= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_MII);
+ phydev->advertising = phydev->supported;
+
+ bp->phydev = phydev;
+ bp->old_link = 0;
+ bp->phy_addr = phydev->addr;
+
+ dev_info(sdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+ phydev->drv->name, dev_name(&phydev->dev));
+
+ return 0;
+
+err_out_mdiobus_unregister:
+ mdiobus_unregister(mii_bus);
+
+err_out_mdiobus_irq:
+ kfree(mii_bus->irq);
+
+err_out_mdiobus:
+ mdiobus_free(mii_bus);
+
+err_out:
+ return err;
+}
+
+static void b44_unregister_phy_one(struct b44 *bp)
+{
+ struct mii_bus *mii_bus = bp->mii_bus;
+
+ phy_disconnect(bp->phydev);
+ mdiobus_unregister(mii_bus);
+ kfree(mii_bus->irq);
+ mdiobus_free(mii_bus);
+}
+
static int b44_init_one(struct ssb_device *sdev,
const struct ssb_device_id *ent)
{
@@ -2206,9 +2390,15 @@ static int b44_init_one(struct ssb_device *sdev,
goto err_out_powerdown;
}
+ if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
+ dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
+ err = -ENODEV;
+ goto err_out_powerdown;
+ }
+
bp->mii_if.dev = dev;
- bp->mii_if.mdio_read = b44_mii_read;
- bp->mii_if.mdio_write = b44_mii_write;
+ bp->mii_if.mdio_read = b44_mdio_read_mii;
+ bp->mii_if.mdio_write = b44_mdio_write_mii;
bp->mii_if.phy_id = bp->phy_addr;
bp->mii_if.phy_id_mask = 0x1f;
bp->mii_if.reg_num_mask = 0x1f;
@@ -2236,13 +2426,26 @@ static int b44_init_one(struct ssb_device *sdev,
b44_chip_reset(bp, B44_CHIP_RESET_FULL);
/* do a phy reset to test if there is an active phy */
- if (b44_phy_reset(bp) < 0)
- bp->phy_addr = B44_PHY_ADDR_NO_PHY;
+ err = b44_phy_reset(bp);
+ if (err < 0) {
+ dev_err(sdev->dev, "phy reset failed\n");
+ goto err_out_unregister_netdev;
+ }
+
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+ err = b44_register_phy_one(bp);
+ if (err) {
+ dev_err(sdev->dev, "Cannot register PHY, aborting\n");
+ goto err_out_unregister_netdev;
+ }
+ }
netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
return 0;
+err_out_unregister_netdev:
+ unregister_netdev(dev);
err_out_powerdown:
ssb_bus_may_powerdown(sdev->bus);
@@ -2256,8 +2459,11 @@ out:
static void b44_remove_one(struct ssb_device *sdev)
{
struct net_device *dev = ssb_get_drvdata(sdev);
+ struct b44 *bp = netdev_priv(dev);
unregister_netdev(dev);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+ b44_unregister_phy_one(bp);
ssb_device_disable(sdev, 0);
ssb_bus_may_powerdown(sdev->bus);
free_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index 8993d72f0420..3e9c3fc7591b 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -280,9 +280,10 @@ struct ring_info {
dma_addr_t mapping;
};
-#define B44_MCAST_TABLE_SIZE 32
-#define B44_PHY_ADDR_NO_PHY 30
-#define B44_MDC_RATIO 5000000
+#define B44_MCAST_TABLE_SIZE 32
+#define B44_PHY_ADDR_NO_LOCAL_PHY 30 /* no local phy regs */
+#define B44_PHY_ADDR_NO_PHY 31 /* no phy present at all */
+#define B44_MDC_RATIO 5000000
#define B44_STAT_REG_DECLARE \
_B44(tx_good_octets) \
@@ -344,6 +345,9 @@ B44_STAT_REG_DECLARE
struct u64_stats_sync syncp;
};
+#define B44_BOARDFLAG_ROBO 0x0010 /* Board has robo switch */
+#define B44_BOARDFLAG_ADM 0x0080 /* Board has ADMtek switch */
+
struct ssb_device;
struct b44 {
@@ -376,7 +380,7 @@ struct b44 {
#define B44_FLAG_ADV_10FULL 0x02000000
#define B44_FLAG_ADV_100HALF 0x04000000
#define B44_FLAG_ADV_100FULL 0x08000000
-#define B44_FLAG_INTERNAL_PHY 0x10000000
+#define B44_FLAG_EXTERNAL_PHY 0x10000000
#define B44_FLAG_RX_RING_HACK 0x20000000
#define B44_FLAG_TX_RING_HACK 0x40000000
#define B44_FLAG_WOL_ENABLE 0x80000000
@@ -396,6 +400,9 @@ struct b44 {
u32 tx_pending;
u8 phy_addr;
u8 force_copybreak;
+ struct phy_device *phydev;
+ struct mii_bus *mii_bus;
+ int old_link;
struct mii_if_info mii_if;
};
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index e2aa09ce6af7..0297a79a38e1 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -96,6 +96,19 @@ static void bgmac_dma_tx_enable(struct bgmac *bgmac,
u32 ctl;
ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
+ if (bgmac->core->id.rev >= 4) {
+ ctl &= ~BGMAC_DMA_TX_BL_MASK;
+ ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
+
+ ctl &= ~BGMAC_DMA_TX_MR_MASK;
+ ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;
+
+ ctl &= ~BGMAC_DMA_TX_PC_MASK;
+ ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;
+
+ ctl &= ~BGMAC_DMA_TX_PT_MASK;
+ ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
+ }
ctl |= BGMAC_DMA_TX_ENABLE;
ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
@@ -240,6 +253,16 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
u32 ctl;
ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
+ if (bgmac->core->id.rev >= 4) {
+ ctl &= ~BGMAC_DMA_RX_BL_MASK;
+ ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
+
+ ctl &= ~BGMAC_DMA_RX_PC_MASK;
+ ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT;
+
+ ctl &= ~BGMAC_DMA_RX_PT_MASK;
+ ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
+ }
ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
ctl |= BGMAC_DMA_RX_ENABLE;
ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
@@ -682,70 +705,6 @@ static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
return 0;
}
-/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */
-static void bgmac_phy_force(struct bgmac *bgmac)
-{
- u16 ctl;
- u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB |
- BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX);
-
- if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
- return;
-
- if (bgmac->autoneg)
- return;
-
- ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL);
- ctl &= mask;
- if (bgmac->full_duplex)
- ctl |= BGMAC_PHY_CTL_DUPLEX;
- if (bgmac->speed == BGMAC_SPEED_100)
- ctl |= BGMAC_PHY_CTL_SPEED_100;
- else if (bgmac->speed == BGMAC_SPEED_1000)
- ctl |= BGMAC_PHY_CTL_SPEED_1000;
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl);
-}
-
-/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */
-static void bgmac_phy_advertise(struct bgmac *bgmac)
-{
- u16 adv;
-
- if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
- return;
-
- if (!bgmac->autoneg)
- return;
-
- /* Adv selected 10/100 speeds */
- adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV);
- adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL |
- BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL);
- if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
- adv |= BGMAC_PHY_ADV_10HALF;
- if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
- adv |= BGMAC_PHY_ADV_100HALF;
- if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
- adv |= BGMAC_PHY_ADV_10FULL;
- if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
- adv |= BGMAC_PHY_ADV_100FULL;
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv);
-
- /* Adv selected 1000 speeds */
- adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2);
- adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL);
- if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
- adv |= BGMAC_PHY_ADV2_1000HALF;
- if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
- adv |= BGMAC_PHY_ADV2_1000FULL;
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv);
-
- /* Restart */
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
- bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) |
- BGMAC_PHY_CTL_RESTART);
-}
-
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
static void bgmac_phy_init(struct bgmac *bgmac)
{
@@ -789,11 +748,9 @@ static void bgmac_phy_reset(struct bgmac *bgmac)
if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
return;
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
- BGMAC_PHY_CTL_RESET);
+ bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET);
udelay(100);
- if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) &
- BGMAC_PHY_CTL_RESET)
+ if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET)
bgmac_err(bgmac, "PHY reset failed\n");
bgmac_phy_init(bgmac);
}
@@ -811,13 +768,13 @@ static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
u32 new_val = (cmdcfg & mask) | set;
- bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR);
+ bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR(bgmac->core->id.rev));
udelay(2);
if (new_val != cmdcfg || force)
bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
- bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR);
+ bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR(bgmac->core->id.rev));
udelay(2);
}
@@ -876,31 +833,56 @@ static void bgmac_clear_mib(struct bgmac *bgmac)
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
-static void bgmac_speed(struct bgmac *bgmac, int speed)
+static void bgmac_mac_speed(struct bgmac *bgmac)
{
u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
u32 set = 0;
- if (speed & BGMAC_SPEED_10)
+ switch (bgmac->mac_speed) {
+ case SPEED_10:
set |= BGMAC_CMDCFG_ES_10;
- if (speed & BGMAC_SPEED_100)
+ break;
+ case SPEED_100:
set |= BGMAC_CMDCFG_ES_100;
- if (speed & BGMAC_SPEED_1000)
+ break;
+ case SPEED_1000:
set |= BGMAC_CMDCFG_ES_1000;
- if (!bgmac->full_duplex)
+ break;
+ case SPEED_2500:
+ set |= BGMAC_CMDCFG_ES_2500;
+ break;
+ default:
+ bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed);
+ }
+
+ if (bgmac->mac_duplex == DUPLEX_HALF)
set |= BGMAC_CMDCFG_HD;
+
bgmac_cmdcfg_maskset(bgmac, mask, set, true);
}
static void bgmac_miiconfig(struct bgmac *bgmac)
{
- u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
- BGMAC_DS_MM_SHIFT;
- if (imode == 0 || imode == 1) {
- if (bgmac->autoneg)
- bgmac_speed(bgmac, BGMAC_SPEED_100);
- else
- bgmac_speed(bgmac, bgmac->speed);
+ struct bcma_device *core = bgmac->core;
+ struct bcma_chipinfo *ci = &core->bus->chipinfo;
+ u8 imode;
+
+ if (ci->id == BCMA_CHIP_ID_BCM4707 ||
+ ci->id == BCMA_CHIP_ID_BCM53018) {
+ bcma_awrite32(core, BCMA_IOCTL,
+ bcma_aread32(core, BCMA_IOCTL) | 0x40 |
+ BGMAC_BCMA_IOCTL_SW_CLKEN);
+ bgmac->mac_speed = SPEED_2500;
+ bgmac->mac_duplex = DUPLEX_FULL;
+ bgmac_mac_speed(bgmac);
+ } else {
+ imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
+ BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
+ if (imode == 0 || imode == 1) {
+ bgmac->mac_speed = SPEED_100;
+ bgmac->mac_duplex = DUPLEX_FULL;
+ bgmac_mac_speed(bgmac);
+ }
}
}
@@ -910,7 +892,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
struct bcma_device *core = bgmac->core;
struct bcma_bus *bus = core->bus;
struct bcma_chipinfo *ci = &bus->chipinfo;
- u32 flags = 0;
+ u32 flags;
u32 iost;
int i;
@@ -933,26 +915,36 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
}
iost = bcma_aread32(core, BCMA_IOST);
- if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) ||
+ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
(ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
- (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9))
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188))
iost &= ~BGMAC_BCMA_IOST_ATTACHED;
- if (iost & BGMAC_BCMA_IOST_ATTACHED) {
- flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
- if (!bgmac->has_robosw)
- flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+ /* 3GMAC: for BCM4707, only do core reset at bgmac_probe() */
+ if (ci->id != BCMA_CHIP_ID_BCM4707) {
+ flags = 0;
+ if (iost & BGMAC_BCMA_IOST_ATTACHED) {
+ flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
+ if (!bgmac->has_robosw)
+ flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+ }
+ bcma_core_enable(core, flags);
}
- bcma_core_enable(core, flags);
-
- if (core->id.rev > 2) {
- bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8);
- bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24,
+ /* Request Misc PLL for corerev > 2 */
+ if (core->id.rev > 2 &&
+ ci->id != BCMA_CHIP_ID_BCM4707 &&
+ ci->id != BCMA_CHIP_ID_BCM53018) {
+ bgmac_set(bgmac, BCMA_CLKCTLST,
+ BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
+ bgmac_wait_value(bgmac->core, BCMA_CLKCTLST,
+ BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
+ BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
1000);
}
- if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 ||
+ if (ci->id == BCMA_CHIP_ID_BCM5357 ||
+ ci->id == BCMA_CHIP_ID_BCM4749 ||
ci->id == BCMA_CHIP_ID_BCM53572) {
struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
u8 et_swtype = 0;
@@ -967,10 +959,11 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
et_swtype &= 0x0f;
et_swtype <<= 4;
sw_type = et_swtype;
- } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) {
+ } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) {
sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
- } else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) ||
- (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) {
+ } else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
+ (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
}
@@ -1007,8 +1000,10 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
BGMAC_CMDCFG_PROM |
BGMAC_CMDCFG_NLC |
BGMAC_CMDCFG_CFE |
- BGMAC_CMDCFG_SR,
+ BGMAC_CMDCFG_SR(core->id.rev),
false);
+ bgmac->mac_speed = SPEED_UNKNOWN;
+ bgmac->mac_duplex = DUPLEX_UNKNOWN;
bgmac_clear_mib(bgmac);
if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
@@ -1048,7 +1043,7 @@ static void bgmac_enable(struct bgmac *bgmac)
cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
- BGMAC_CMDCFG_SR, true);
+ BGMAC_CMDCFG_SR(bgmac->core->id.rev), true);
udelay(2);
cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
@@ -1077,12 +1072,16 @@ static void bgmac_enable(struct bgmac *bgmac)
break;
}
- rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
- rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
- bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000;
- mdp = (bp_clk * 128 / 1000) - 3;
- rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
- bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
+ if (ci->id != BCMA_CHIP_ID_BCM4707 &&
+ ci->id != BCMA_CHIP_ID_BCM53018) {
+ rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
+ rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
+ bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) /
+ 1000000;
+ mdp = (bp_clk * 128 / 1000) - 3;
+ rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
+ bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
+ }
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
@@ -1108,13 +1107,6 @@ static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
- if (!bgmac->autoneg) {
- bgmac_speed(bgmac, bgmac->speed);
- bgmac_phy_force(bgmac);
- } else if (bgmac->speed) { /* if there is anything to adv */
- bgmac_phy_advertise(bgmac);
- }
-
if (full_init) {
bgmac_dma_init(bgmac);
if (1) /* FIXME: is there any case we don't want IRQs? */
@@ -1204,6 +1196,8 @@ static int bgmac_open(struct net_device *net_dev)
}
napi_enable(&bgmac->napi);
+ phy_start(bgmac->phy_dev);
+
netif_carrier_on(net_dev);
err_out:
@@ -1216,6 +1210,8 @@ static int bgmac_stop(struct net_device *net_dev)
netif_carrier_off(net_dev);
+ phy_stop(bgmac->phy_dev);
+
napi_disable(&bgmac->napi);
bgmac_chip_intrs_off(bgmac);
free_irq(bgmac->core->irq, net_dev);
@@ -1252,27 +1248,11 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
struct bgmac *bgmac = netdev_priv(net_dev);
- struct mii_ioctl_data *data = if_mii(ifr);
-
- switch (cmd) {
- case SIOCGMIIPHY:
- data->phy_id = bgmac->phyaddr;
- /* fallthru */
- case SIOCGMIIREG:
- if (!netif_running(net_dev))
- return -EAGAIN;
- data->val_out = bgmac_phy_read(bgmac, data->phy_id,
- data->reg_num & 0x1f);
- return 0;
- case SIOCSMIIREG:
- if (!netif_running(net_dev))
- return -EAGAIN;
- bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f,
- data->val_in);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
+
+ if (!netif_running(net_dev))
+ return -EINVAL;
+
+ return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd);
}
static const struct net_device_ops bgmac_netdev_ops = {
@@ -1294,61 +1274,16 @@ static int bgmac_get_settings(struct net_device *net_dev,
{
struct bgmac *bgmac = netdev_priv(net_dev);
- cmd->supported = SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg;
-
- if (bgmac->autoneg) {
- WARN_ON(cmd->advertising);
- if (bgmac->full_duplex) {
- if (bgmac->speed & BGMAC_SPEED_10)
- cmd->advertising |= ADVERTISED_10baseT_Full;
- if (bgmac->speed & BGMAC_SPEED_100)
- cmd->advertising |= ADVERTISED_100baseT_Full;
- if (bgmac->speed & BGMAC_SPEED_1000)
- cmd->advertising |= ADVERTISED_1000baseT_Full;
- } else {
- if (bgmac->speed & BGMAC_SPEED_10)
- cmd->advertising |= ADVERTISED_10baseT_Half;
- if (bgmac->speed & BGMAC_SPEED_100)
- cmd->advertising |= ADVERTISED_100baseT_Half;
- if (bgmac->speed & BGMAC_SPEED_1000)
- cmd->advertising |= ADVERTISED_1000baseT_Half;
- }
- } else {
- switch (bgmac->speed) {
- case BGMAC_SPEED_10:
- ethtool_cmd_speed_set(cmd, SPEED_10);
- break;
- case BGMAC_SPEED_100:
- ethtool_cmd_speed_set(cmd, SPEED_100);
- break;
- case BGMAC_SPEED_1000:
- ethtool_cmd_speed_set(cmd, SPEED_1000);
- break;
- }
- }
-
- cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
-
- cmd->autoneg = bgmac->autoneg;
-
- return 0;
+ return phy_ethtool_gset(bgmac->phy_dev, cmd);
}
-#if 0
static int bgmac_set_settings(struct net_device *net_dev,
struct ethtool_cmd *cmd)
{
struct bgmac *bgmac = netdev_priv(net_dev);
- return -1;
+ return phy_ethtool_sset(bgmac->phy_dev, cmd);
}
-#endif
static void bgmac_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
@@ -1359,6 +1294,7 @@ static void bgmac_get_drvinfo(struct net_device *net_dev,
static const struct ethtool_ops bgmac_ethtool_ops = {
.get_settings = bgmac_get_settings,
+ .set_settings = bgmac_set_settings,
.get_drvinfo = bgmac_get_drvinfo,
};
@@ -1377,9 +1313,35 @@ static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
return bgmac_phy_write(bus->priv, mii_id, regnum, value);
}
+static void bgmac_adjust_link(struct net_device *net_dev)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+ struct phy_device *phy_dev = bgmac->phy_dev;
+ bool update = false;
+
+ if (phy_dev->link) {
+ if (phy_dev->speed != bgmac->mac_speed) {
+ bgmac->mac_speed = phy_dev->speed;
+ update = true;
+ }
+
+ if (phy_dev->duplex != bgmac->mac_duplex) {
+ bgmac->mac_duplex = phy_dev->duplex;
+ update = true;
+ }
+ }
+
+ if (update) {
+ bgmac_mac_speed(bgmac);
+ phy_print_status(phy_dev);
+ }
+}
+
static int bgmac_mii_register(struct bgmac *bgmac)
{
struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ char bus_id[MII_BUS_ID_SIZE + 3];
int i, err = 0;
mii_bus = mdiobus_alloc();
@@ -1411,8 +1373,22 @@ static int bgmac_mii_register(struct bgmac *bgmac)
bgmac->mii_bus = mii_bus;
+ /* Connect to the PHY */
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
+ bgmac->phyaddr);
+ phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phy_dev)) {
+ bgmac_err(bgmac, "PHY connecton failed\n");
+ err = PTR_ERR(phy_dev);
+ goto err_unregister_bus;
+ }
+ bgmac->phy_dev = phy_dev;
+
return err;
+err_unregister_bus:
+ mdiobus_unregister(mii_bus);
err_free_irq:
kfree(mii_bus->irq);
err_free_bus:
@@ -1467,9 +1443,6 @@ static int bgmac_probe(struct bcma_device *core)
bcma_set_drvdata(core, bgmac);
/* Defaults */
- bgmac->autoneg = true;
- bgmac->full_duplex = true;
- bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000;
memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
/* On BCM4706 we need common core to access PHY */
@@ -1500,6 +1473,27 @@ static int bgmac_probe(struct bcma_device *core)
bgmac_chip_reset(bgmac);
+ /* For Northstar, we have to take all GMAC core out of reset */
+ if (core->id.id == BCMA_CHIP_ID_BCM4707 ||
+ core->id.id == BCMA_CHIP_ID_BCM53018) {
+ struct bcma_device *ns_core;
+ int ns_gmac;
+
+ /* Northstar has 4 GMAC cores */
+ for (ns_gmac = 0; ns_gmac < 4; ns_gmac++) {
+ /* As Northstar requirement, we have to reset all GMACs
+ * before accessing one. bgmac_chip_reset() call
+ * bcma_core_enable() for this core. Then the other
+ * three GMACs didn't reset. We do it here.
+ */
+ ns_core = bcma_find_core_unit(core->bus,
+ BCMA_CORE_MAC_GBIT,
+ ns_gmac);
+ if (ns_core && !bcma_core_is_enabled(ns_core))
+ bcma_core_enable(ns_core, 0);
+ }
+ }
+
err = bgmac_dma_alloc(bgmac);
if (err) {
bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
@@ -1524,14 +1518,12 @@ static int bgmac_probe(struct bcma_device *core)
err = bgmac_mii_register(bgmac);
if (err) {
bgmac_err(bgmac, "Cannot register MDIO\n");
- err = -ENOTSUPP;
goto err_dma_free;
}
err = register_netdev(bgmac->net_dev);
if (err) {
bgmac_err(bgmac, "Cannot register net device\n");
- err = -ENOTSUPP;
goto err_mii_unregister;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 66c8afbdc8c7..89fa5bc69c51 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -95,7 +95,11 @@
#define BGMAC_RXQ_CTL_MDP_SHIFT 24
#define BGMAC_GPIO_SELECT 0x194
#define BGMAC_GPIO_OUTPUT_EN 0x198
-/* For 0x1e0 see BCMA_CLKCTLST */
+
+/* For 0x1e0 see BCMA_CLKCTLST. Below are BGMAC specific bits */
+#define BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ 0x00000100
+#define BGMAC_BCMA_CLKCTLST_MISC_PLL_ST 0x01000000
+
#define BGMAC_HW_WAR 0x1e4
#define BGMAC_PWR_CTL 0x1e8
#define BGMAC_DMA_BASE0 0x200 /* Tx and Rx controller */
@@ -185,6 +189,7 @@
#define BGMAC_CMDCFG_ES_10 0x00000000
#define BGMAC_CMDCFG_ES_100 0x00000004
#define BGMAC_CMDCFG_ES_1000 0x00000008
+#define BGMAC_CMDCFG_ES_2500 0x0000000C
#define BGMAC_CMDCFG_PROM 0x00000010 /* Set to activate promiscuous mode */
#define BGMAC_CMDCFG_PAD_EN 0x00000020
#define BGMAC_CMDCFG_CF 0x00000040
@@ -193,7 +198,9 @@
#define BGMAC_CMDCFG_TAI 0x00000200
#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
#define BGMAC_CMDCFG_HD_SHIFT 10
-#define BGMAC_CMDCFG_SR 0x00000800 /* Set to reset mode */
+#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */
+#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */
+#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
#define BGMAC_CMDCFG_AE 0x00400000
#define BGMAC_CMDCFG_CFE 0x00800000
@@ -216,27 +223,6 @@
#define BGMAC_RX_STATUS 0xb38
#define BGMAC_TX_STATUS 0xb3c
-#define BGMAC_PHY_CTL 0x00
-#define BGMAC_PHY_CTL_SPEED_MSB 0x0040
-#define BGMAC_PHY_CTL_DUPLEX 0x0100 /* duplex mode */
-#define BGMAC_PHY_CTL_RESTART 0x0200 /* restart autonegotiation */
-#define BGMAC_PHY_CTL_ANENAB 0x1000 /* enable autonegotiation */
-#define BGMAC_PHY_CTL_SPEED 0x2000
-#define BGMAC_PHY_CTL_LOOP 0x4000 /* loopback */
-#define BGMAC_PHY_CTL_RESET 0x8000 /* reset */
-/* Helpers */
-#define BGMAC_PHY_CTL_SPEED_10 0
-#define BGMAC_PHY_CTL_SPEED_100 BGMAC_PHY_CTL_SPEED
-#define BGMAC_PHY_CTL_SPEED_1000 BGMAC_PHY_CTL_SPEED_MSB
-#define BGMAC_PHY_ADV 0x04
-#define BGMAC_PHY_ADV_10HALF 0x0020 /* advertise 10MBits/s half duplex */
-#define BGMAC_PHY_ADV_10FULL 0x0040 /* advertise 10MBits/s full duplex */
-#define BGMAC_PHY_ADV_100HALF 0x0080 /* advertise 100MBits/s half duplex */
-#define BGMAC_PHY_ADV_100FULL 0x0100 /* advertise 100MBits/s full duplex */
-#define BGMAC_PHY_ADV2 0x09
-#define BGMAC_PHY_ADV2_1000HALF 0x0100 /* advertise 1000MBits/s half duplex */
-#define BGMAC_PHY_ADV2_1000FULL 0x0200 /* advertise 1000MBits/s full duplex */
-
/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */
#define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */
@@ -254,9 +240,34 @@
#define BGMAC_DMA_TX_SUSPEND 0x00000002
#define BGMAC_DMA_TX_LOOPBACK 0x00000004
#define BGMAC_DMA_TX_FLUSH 0x00000010
+#define BGMAC_DMA_TX_MR_MASK 0x000000C0 /* Multiple outstanding reads */
+#define BGMAC_DMA_TX_MR_SHIFT 6
+#define BGMAC_DMA_TX_MR_1 0
+#define BGMAC_DMA_TX_MR_2 1
#define BGMAC_DMA_TX_PARITY_DISABLE 0x00000800
#define BGMAC_DMA_TX_ADDREXT_MASK 0x00030000
#define BGMAC_DMA_TX_ADDREXT_SHIFT 16
+#define BGMAC_DMA_TX_BL_MASK 0x001C0000 /* BurstLen bits */
+#define BGMAC_DMA_TX_BL_SHIFT 18
+#define BGMAC_DMA_TX_BL_16 0
+#define BGMAC_DMA_TX_BL_32 1
+#define BGMAC_DMA_TX_BL_64 2
+#define BGMAC_DMA_TX_BL_128 3
+#define BGMAC_DMA_TX_BL_256 4
+#define BGMAC_DMA_TX_BL_512 5
+#define BGMAC_DMA_TX_BL_1024 6
+#define BGMAC_DMA_TX_PC_MASK 0x00E00000 /* Prefetch control */
+#define BGMAC_DMA_TX_PC_SHIFT 21
+#define BGMAC_DMA_TX_PC_0 0
+#define BGMAC_DMA_TX_PC_4 1
+#define BGMAC_DMA_TX_PC_8 2
+#define BGMAC_DMA_TX_PC_16 3
+#define BGMAC_DMA_TX_PT_MASK 0x03000000 /* Prefetch threshold */
+#define BGMAC_DMA_TX_PT_SHIFT 24
+#define BGMAC_DMA_TX_PT_1 0
+#define BGMAC_DMA_TX_PT_2 1
+#define BGMAC_DMA_TX_PT_4 2
+#define BGMAC_DMA_TX_PT_8 3
#define BGMAC_DMA_TX_INDEX 0x04
#define BGMAC_DMA_TX_RINGLO 0x08
#define BGMAC_DMA_TX_RINGHI 0x0C
@@ -284,8 +295,33 @@
#define BGMAC_DMA_RX_DIRECT_FIFO 0x00000100
#define BGMAC_DMA_RX_OVERFLOW_CONT 0x00000400
#define BGMAC_DMA_RX_PARITY_DISABLE 0x00000800
+#define BGMAC_DMA_RX_MR_MASK 0x000000C0 /* Multiple outstanding reads */
+#define BGMAC_DMA_RX_MR_SHIFT 6
+#define BGMAC_DMA_TX_MR_1 0
+#define BGMAC_DMA_TX_MR_2 1
#define BGMAC_DMA_RX_ADDREXT_MASK 0x00030000
#define BGMAC_DMA_RX_ADDREXT_SHIFT 16
+#define BGMAC_DMA_RX_BL_MASK 0x001C0000 /* BurstLen bits */
+#define BGMAC_DMA_RX_BL_SHIFT 18
+#define BGMAC_DMA_RX_BL_16 0
+#define BGMAC_DMA_RX_BL_32 1
+#define BGMAC_DMA_RX_BL_64 2
+#define BGMAC_DMA_RX_BL_128 3
+#define BGMAC_DMA_RX_BL_256 4
+#define BGMAC_DMA_RX_BL_512 5
+#define BGMAC_DMA_RX_BL_1024 6
+#define BGMAC_DMA_RX_PC_MASK 0x00E00000 /* Prefetch control */
+#define BGMAC_DMA_RX_PC_SHIFT 21
+#define BGMAC_DMA_RX_PC_0 0
+#define BGMAC_DMA_RX_PC_4 1
+#define BGMAC_DMA_RX_PC_8 2
+#define BGMAC_DMA_RX_PC_16 3
+#define BGMAC_DMA_RX_PT_MASK 0x03000000 /* Prefetch threshold */
+#define BGMAC_DMA_RX_PT_SHIFT 24
+#define BGMAC_DMA_RX_PT_1 0
+#define BGMAC_DMA_RX_PT_2 1
+#define BGMAC_DMA_RX_PT_4 2
+#define BGMAC_DMA_RX_PT_8 3
#define BGMAC_DMA_RX_INDEX 0x24
#define BGMAC_DMA_RX_RINGLO 0x28
#define BGMAC_DMA_RX_RINGHI 0x2C
@@ -342,10 +378,6 @@
#define BGMAC_CHIPCTL_1_SW_TYPE_RGMII 0x000000C0
#define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS 0x00010000
-#define BGMAC_SPEED_10 0x0001
-#define BGMAC_SPEED_100 0x0002
-#define BGMAC_SPEED_1000 0x0004
-
#define BGMAC_WEIGHT 64
#define ETHER_MAX_LEN 1518
@@ -402,6 +434,7 @@ struct bgmac {
struct net_device *net_dev;
struct napi_struct napi;
struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
/* DMA */
struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
@@ -416,10 +449,9 @@ struct bgmac {
u32 int_mask;
u32 int_status;
- /* Speed-related */
- int speed;
- bool autoneg;
- bool full_duplex;
+ /* Current MAC state */
+ int mac_speed;
+ int mac_duplex;
u8 phyaddr;
bool has_robosw;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index d9980ad00b4b..cda25ac45b47 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -23,7 +23,6 @@
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -58,8 +57,8 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.2.4"
-#define DRV_MODULE_RELDATE "Aug 05, 2013"
+#define DRV_MODULE_VERSION "2.2.5"
+#define DRV_MODULE_RELDATE "December 20, 2013"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -86,7 +85,7 @@ MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
static int disable_msi = 0;
-module_param(disable_msi, int, 0);
+module_param(disable_msi, int, S_IRUGO);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
typedef enum {
@@ -1197,6 +1196,8 @@ bnx2_copper_linkup(struct bnx2 *bp)
{
u32 bmcr;
+ bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
+
bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
if (bmcr & BMCR_ANENABLE) {
u32 local_adv, remote_adv, common;
@@ -1255,6 +1256,14 @@ bnx2_copper_linkup(struct bnx2 *bp)
}
}
+ if (bp->link_up) {
+ u32 ext_status;
+
+ bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
+ if (ext_status & EXT_STATUS_MDIX)
+ bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
+ }
+
return 0;
}
@@ -2048,29 +2057,27 @@ bnx2_setup_copper_phy(struct bnx2 *bp)
__releases(&bp->phy_lock)
__acquires(&bp->phy_lock)
{
- u32 bmcr;
+ u32 bmcr, adv_reg, new_adv = 0;
u32 new_bmcr;
bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+ bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
+ adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
+ ADVERTISE_PAUSE_ASYM);
+
+ new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
+
if (bp->autoneg & AUTONEG_SPEED) {
- u32 adv_reg, adv1000_reg;
- u32 new_adv = 0;
+ u32 adv1000_reg;
u32 new_adv1000 = 0;
- bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
- adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
- ADVERTISE_PAUSE_ASYM);
+ new_adv |= bnx2_phy_get_pause_adv(bp);
bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
adv1000_reg &= PHY_ALL_1000_SPEED;
- new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
- new_adv |= ADVERTISE_CSMA;
- new_adv |= bnx2_phy_get_pause_adv(bp);
-
new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
-
if ((adv1000_reg != new_adv1000) ||
(adv_reg != new_adv) ||
((bmcr & BMCR_ANENABLE) == 0)) {
@@ -2090,6 +2097,10 @@ __acquires(&bp->phy_lock)
return 0;
}
+ /* advertise nothing when forcing speed */
+ if (adv_reg != new_adv)
+ bnx2_write_phy(bp, bp->mii_adv, new_adv);
+
new_bmcr = 0;
if (bp->req_line_speed == SPEED_100) {
new_bmcr |= BMCR_SPEED100;
@@ -2341,9 +2352,15 @@ bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
}
/* ethernet@wirespeed */
- bnx2_write_phy(bp, 0x18, 0x7007);
- bnx2_read_phy(bp, 0x18, &val);
- bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
+ bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
+ bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
+ val |= AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
+
+ /* auto-mdix */
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
+ val |= AUX_CTL_MISC_CTL_AUTOMDIX;
+
+ bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
return 0;
}
@@ -3234,7 +3251,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
if ((bp->dev->features & NETIF_F_RXHASH) &&
((status & L2_FHDR_STATUS_USE_RXHASH) ==
L2_FHDR_STATUS_USE_RXHASH))
- skb->rxhash = rx_hdr->l2_fhdr_hash;
+ skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
+ PKT_HASH_TYPE_L3);
skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
napi_gro_receive(&bnapi->napi, skb);
@@ -6865,6 +6883,12 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (netif_carrier_ok(dev)) {
ethtool_cmd_speed_set(cmd, bp->line_speed);
cmd->duplex = bp->duplex;
+ if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
+ if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
+ cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ else
+ cmd->eth_tp_mdix = ETH_TP_MDI;
+ }
}
else {
ethtool_cmd_speed_set(cmd, -1);
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 18cb2d23e56b..f1cf2c44e7ed 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6471,6 +6471,15 @@ struct l2_fhdr {
#define BCM5708S_TX_ACTL3 0x17
+#define MII_BNX2_EXT_STATUS 0x11
+#define EXT_STATUS_MDIX (1 << 13)
+
+#define MII_BNX2_AUX_CTL 0x18
+#define AUX_CTL_MISC_CTL 0x7007
+#define AUX_CTL_MISC_CTL_WIRESPEED (1 << 4)
+#define AUX_CTL_MISC_CTL_AUTOMDIX (1 << 9)
+#define AUX_CTL_MISC_CTL_WR (1 << 15)
+
#define MII_BNX2_DSP_RW_PORT 0x15
#define MII_BNX2_DSP_ADDRESS 0x17
#define MII_BNX2_DSP_EXPAND_REG 0x0f00
@@ -6844,6 +6853,7 @@ struct bnx2 {
#define BNX2_PHY_FLAG_REMOTE_PHY_CAP 0x00000800
#define BNX2_PHY_FLAG_FORCED_DOWN 0x00001000
#define BNX2_PHY_FLAG_NO_PARALLEL 0x00002000
+#define BNX2_PHY_FLAG_MDIX 0x00004000
u32 mii_bmcr;
u32 mii_bmsr;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index a1f66e2c9a86..391f29ef6d2e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -472,7 +472,7 @@ struct bnx2x_agg_info {
u16 vlan_tag;
u16 len_on_bd;
u32 rxhash;
- bool l4_rxhash;
+ enum pkt_hash_types rxhash_type;
u16 gro_size;
u16 full_page;
};
@@ -520,10 +520,12 @@ struct bnx2x_fastpath {
#define BNX2X_FP_STATE_IDLE 0
#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
-#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */
-#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */
+#define BNX2X_FP_STATE_DISABLED (1 << 2)
+#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
+#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
+#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
-#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
+#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
/* protect state */
spinlock_t lock;
@@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
{
bool rc = true;
- spin_lock(&fp->lock);
+ spin_lock_bh(&fp->lock);
if (fp->state & BNX2X_FP_LOCKED) {
WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
@@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
/* we don't care if someone yielded */
fp->state = BNX2X_FP_STATE_NAPI;
}
- spin_unlock(&fp->lock);
+ spin_unlock_bh(&fp->lock);
return rc;
}
@@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
{
bool rc = false;
- spin_lock(&fp->lock);
+ spin_lock_bh(&fp->lock);
WARN_ON(fp->state &
(BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
rc = true;
- fp->state = BNX2X_FP_STATE_IDLE;
- spin_unlock(&fp->lock);
+
+ /* state ==> idle, unless currently disabled */
+ fp->state &= BNX2X_FP_STATE_DISABLED;
+ spin_unlock_bh(&fp->lock);
return rc;
}
@@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
rc = true;
- fp->state = BNX2X_FP_STATE_IDLE;
+
+ /* state ==> idle, unless currently disabled */
+ fp->state &= BNX2X_FP_STATE_DISABLED;
spin_unlock_bh(&fp->lock);
return rc;
}
@@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
/* true if a socket is polling, even if it did not get the lock */
static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
{
- WARN_ON(!(fp->state & BNX2X_FP_LOCKED));
+ WARN_ON(!(fp->state & BNX2X_FP_OWNED));
return fp->state & BNX2X_FP_USER_PEND;
}
+
+/* false if fp is currently owned */
+static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+{
+ int rc = true;
+
+ spin_lock_bh(&fp->lock);
+ if (fp->state & BNX2X_FP_OWNED)
+ rc = false;
+ fp->state |= BNX2X_FP_STATE_DISABLED;
+ spin_unlock_bh(&fp->lock);
+
+ return rc;
+}
#else
static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
{
@@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
{
return false;
}
+static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+{
+ return true;
+}
#endif /* CONFIG_NET_RX_BUSY_POLL */
/* Use 2500 as a mini-jumbo MTU for FCoE */
@@ -1250,7 +1274,10 @@ struct bnx2x_slowpath {
* Therefore, if they would have been defined in the same union,
* data can get corrupted.
*/
- struct afex_vif_list_ramrod_data func_afex_rdata;
+ union {
+ struct afex_vif_list_ramrod_data viflist_data;
+ struct function_update_data func_update;
+ } func_afex_rdata;
/* used by dmae command executer */
struct dmae_command dmae[MAX_DMAE_C];
@@ -1539,6 +1566,7 @@ struct bnx2x {
#define NO_ISCSI_FLAG (1 << 14)
#define NO_FCOE_FLAG (1 << 15)
#define BC_SUPPORTS_PFC_STATS (1 << 17)
+#define TX_SWITCHING (1 << 18)
#define BC_SUPPORTS_FCOE_FEATURES (1 << 19)
#define USING_SINGLE_MSIX_FLAG (1 << 20)
#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
@@ -1546,6 +1574,7 @@ struct bnx2x {
#define INTERRUPTS_ENABLED_FLAG (1 << 23)
#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
#define HAS_PHYS_PORT_ID (1 << 25)
+#define AER_ENABLED (1 << 26)
#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
@@ -2053,7 +2082,6 @@ int bnx2x_del_all_macs(struct bnx2x *bp,
void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id);
-u32 bnx2x_get_pretend_reg(struct bnx2x *bp);
int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
@@ -2436,7 +2464,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
(!((me_reg) & ME_REG_VF_ERR)))
-int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code);
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err);
+
/* Congestion management fairness mode */
#define CMNG_FNS_NONE 0
#define CMNG_FNS_MINMAX 1
@@ -2499,4 +2528,6 @@ void bnx2x_set_local_cmng(struct bnx2x *bp);
#define MCPR_SCRATCH_BASE(bp) \
(CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
+
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ec96130533cc..9d7419e0390b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -30,6 +30,43 @@
#include "bnx2x_init.h"
#include "bnx2x_sp.h"
+static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
+static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
+static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
+static int bnx2x_poll(struct napi_struct *napi, int budget);
+
+static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ /* Add NAPI objects */
+ for_each_rx_queue_cnic(bp, i) {
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+ bnx2x_poll, NAPI_POLL_WEIGHT);
+ napi_hash_add(&bnx2x_fp(bp, i, napi));
+ }
+}
+
+static void bnx2x_add_all_napi(struct bnx2x *bp)
+{
+ int i;
+
+ /* Add NAPI objects */
+ for_each_eth_queue(bp, i) {
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+ bnx2x_poll, NAPI_POLL_WEIGHT);
+ napi_hash_add(&bnx2x_fp(bp, i, napi));
+ }
+}
+
+static int bnx2x_calc_num_queues(struct bnx2x *bp)
+{
+ return bnx2x_num_queues ?
+ min_t(int, bnx2x_num_queues, BNX2X_MAX_QUEUES(bp)) :
+ min_t(int, netif_get_num_default_rss_queues(),
+ BNX2X_MAX_QUEUES(bp));
+}
+
/**
* bnx2x_move_fp - move content of the fastpath structure.
*
@@ -145,7 +182,7 @@ static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
}
}
-int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
+int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
/* free skb in the packet ring at pos idx
* return idx of last bd freed
@@ -160,6 +197,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
struct sk_buff *skb = tx_buf->skb;
u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
int nbd;
+ u16 split_bd_len = 0;
/* prefetch skb end pointer to speedup dev_kfree_skb() */
prefetch(&skb->end);
@@ -167,10 +205,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
txdata->txq_index, idx, tx_buf, skb);
- /* unmap first bd */
tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
- dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
- BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
#ifdef BNX2X_STOP_ON_ERROR
@@ -188,12 +223,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
--nbd;
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
- /* ...and the TSO split header bd since they have no mapping */
+ /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
+ tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
+ split_bd_len = BD_UNMAP_LEN(tx_data_bd);
--nbd;
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
}
+ /* unmap first bd */
+ dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
+ BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
+ DMA_TO_DEVICE);
+
/* now free frags */
while (nbd > 0) {
@@ -354,7 +396,7 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
*/
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
const struct eth_fast_path_rx_cqe *cqe,
- bool *l4_rxhash)
+ enum pkt_hash_types *rxhash_type)
{
/* Get Toeplitz hash from CQE */
if ((bp->dev->features & NETIF_F_RXHASH) &&
@@ -362,11 +404,13 @@ static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
enum eth_rss_hash_type htype;
htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
- *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
- (htype == TCP_IPV6_HASH_TYPE);
+ *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
+ (htype == TCP_IPV6_HASH_TYPE)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
+
return le32_to_cpu(cqe->rss_hash_result);
}
- *l4_rxhash = false;
+ *rxhash_type = PKT_HASH_TYPE_NONE;
return 0;
}
@@ -420,7 +464,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->tpa_state = BNX2X_TPA_START;
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
- tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
+ tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
if (fp->mode == TPA_MODE_GRO) {
u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
@@ -728,8 +772,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb_reserve(skb, pad + NET_SKB_PAD);
skb_put(skb, len);
- skb->rxhash = tpa_info->rxhash;
- skb->l4_rxhash = tpa_info->l4_rxhash;
+ skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -812,7 +855,7 @@ void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
-int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
struct bnx2x *bp = fp->bp;
u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
@@ -846,7 +889,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
enum eth_rx_cqe_type cqe_fp_type;
u16 len, pad, queue;
u8 *data;
- bool l4_rxhash;
+ u32 rxhash;
+ enum pkt_hash_types rxhash_type;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -987,8 +1031,8 @@ reuse_rx:
skb->protocol = eth_type_trans(skb, bp->dev);
/* Set Toeplitz hash for a none-LRO skb */
- skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
- skb->l4_rxhash = l4_rxhash;
+ rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
+ skb_set_hash(skb, rxhash, rxhash_type);
skb_checksum_none_assert(skb);
@@ -1481,7 +1525,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
}
}
-void bnx2x_free_skbs_cnic(struct bnx2x *bp)
+static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
{
bnx2x_free_tx_skbs_cnic(bp);
bnx2x_free_rx_skbs_cnic(bp);
@@ -1790,26 +1834,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
{
int i;
- local_bh_disable();
for_each_rx_queue_cnic(bp, i) {
napi_disable(&bnx2x_fp(bp, i, napi));
- while (!bnx2x_fp_lock_napi(&bp->fp[i]))
- mdelay(1);
+ while (!bnx2x_fp_ll_disable(&bp->fp[i]))
+ usleep_range(1000, 2000);
}
- local_bh_enable();
}
static void bnx2x_napi_disable(struct bnx2x *bp)
{
int i;
- local_bh_disable();
for_each_eth_queue(bp, i) {
napi_disable(&bnx2x_fp(bp, i, napi));
- while (!bnx2x_fp_lock_napi(&bp->fp[i]))
- mdelay(1);
+ while (!bnx2x_fp_ll_disable(&bp->fp[i]))
+ usleep_range(1000, 2000);
}
- local_bh_enable();
}
void bnx2x_netif_start(struct bnx2x *bp)
@@ -1832,7 +1872,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
bnx2x_napi_disable_cnic(bp);
}
-u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -2263,7 +2304,7 @@ static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
* virtualized environments a pf from another VM may have already
* initialized the device including loading FW
*/
-int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
{
/* is another pf loaded on this engine? */
if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
@@ -2282,8 +2323,12 @@ int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
/* abort nic load if version mismatch */
if (my_fw != loaded_fw) {
- BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
- loaded_fw, my_fw);
+ if (print_err)
+ BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
+ loaded_fw, my_fw);
+ else
+ BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
+ loaded_fw, my_fw);
return -EBUSY;
}
}
@@ -2296,16 +2341,16 @@ static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
int path = BP_PATH(bp);
DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- load_count[path][0]++;
- load_count[path][1 + port]++;
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ bnx2x_load_count[path][0]++;
+ bnx2x_load_count[path][1 + port]++;
DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- if (load_count[path][0] == 1)
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ if (bnx2x_load_count[path][0] == 1)
return FW_MSG_CODE_DRV_LOAD_COMMON;
- else if (load_count[path][1 + port] == 1)
+ else if (bnx2x_load_count[path][1 + port] == 1)
return FW_MSG_CODE_DRV_LOAD_PORT;
else
return FW_MSG_CODE_DRV_LOAD_FUNCTION;
@@ -2598,7 +2643,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
LOAD_ERROR_EXIT(bp, load_error1);
/* what did mcp say? */
- rc = bnx2x_nic_load_analyze_req(bp, load_code);
+ rc = bnx2x_compare_fw_ver(bp, load_code, true);
if (rc) {
bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
LOAD_ERROR_EXIT(bp, load_error2);
@@ -3063,7 +3108,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
/*
* net_device service functions
*/
-int bnx2x_poll(struct napi_struct *napi, int budget)
+static int bnx2x_poll(struct napi_struct *napi, int budget)
{
int work_done = 0;
u8 cos;
@@ -4190,7 +4235,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
/* end of fastpath */
}
-void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
+static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
{
int i;
for_each_cnic_queue(bp, i)
@@ -4404,7 +4449,7 @@ alloc_mem_err:
return 0;
}
-int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
+static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
{
if (!NO_FCOE(bp))
/* FCoE */
@@ -4417,7 +4462,7 @@ int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
return 0;
}
-int bnx2x_alloc_fp_mem(struct bnx2x *bp)
+static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
{
int i;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index da8fcaa74495..bfc58d488bb5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -26,10 +26,8 @@
#include "bnx2x_sriov.h"
/* This is used as a replacement for an MCP if it's not present */
-extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
-
-extern int num_queues;
-extern int int_mode;
+extern int bnx2x_load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
+extern int bnx2x_num_queues;
/************************ Macros ********************************/
#define BNX2X_PCI_FREE(x, y, size) \
@@ -417,35 +415,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
* If bp->state is OPEN, should be called with
* netif_addr_lock_bh()
*/
-void bnx2x_set_rx_mode(struct net_device *dev);
void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
-/**
- * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
- *
- * @bp: driver handle
- *
- * If bp->state is OPEN, should be called with
- * netif_addr_lock_bh().
- */
-int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
-
-/**
- * bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
- *
- * @bp: driver handle
- * @cl_id: client id
- * @rx_mode_flags: rx mode configuration
- * @rx_accept_flags: rx accept configuration
- * @tx_accept_flags: tx accept configuration (tx switch)
- * @ramrod_flags: ramrod configuration
- */
-int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
- unsigned long rx_mode_flags,
- unsigned long rx_accept_flags,
- unsigned long tx_accept_flags,
- unsigned long ramrod_flags);
-
/* Parity errors related */
void bnx2x_set_pf_load(struct bnx2x *bp);
bool bnx2x_clear_pf_load(struct bnx2x *bp);
@@ -524,7 +495,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
/* select_queue callback */
-u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp,
@@ -564,9 +536,6 @@ int bnx2x_reload_if_running(struct net_device *dev);
int bnx2x_change_mac_addr(struct net_device *dev, void *p);
-/* NAPI poll Rx part */
-int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
-
/* NAPI poll Tx part */
int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
@@ -577,13 +546,9 @@ int bnx2x_resume(struct pci_dev *pdev);
/* Release IRQ vectors */
void bnx2x_free_irq(struct bnx2x *bp);
-void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
void bnx2x_free_fp_mem(struct bnx2x *bp);
-int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
-int bnx2x_alloc_fp_mem(struct bnx2x *bp);
void bnx2x_init_rx_rings(struct bnx2x *bp);
void bnx2x_init_rx_rings_cnic(struct bnx2x *bp);
-void bnx2x_free_skbs_cnic(struct bnx2x *bp);
void bnx2x_free_skbs(struct bnx2x *bp);
void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
void bnx2x_netif_start(struct bnx2x *bp);
@@ -607,15 +572,6 @@ int bnx2x_enable_msix(struct bnx2x *bp);
int bnx2x_enable_msi(struct bnx2x *bp);
/**
- * bnx2x_poll - NAPI callback
- *
- * @napi: napi structure
- * @budget:
- *
- */
-int bnx2x_poll(struct napi_struct *napi, int budget);
-
-/**
* bnx2x_low_latency_recv - LL callback
*
* @napi: napi structure
@@ -861,30 +817,6 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
sge->addr_lo = 0;
}
-static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
-{
- int i;
-
- /* Add NAPI objects */
- for_each_rx_queue_cnic(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
- napi_hash_add(&bnx2x_fp(bp, i, napi));
- }
-}
-
-static inline void bnx2x_add_all_napi(struct bnx2x *bp)
-{
- int i;
-
- /* Add NAPI objects */
- for_each_eth_queue(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
- napi_hash_add(&bnx2x_fp(bp, i, napi));
- }
-}
-
static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
{
int i;
@@ -918,14 +850,6 @@ static inline void bnx2x_disable_msi(struct bnx2x *bp)
}
}
-static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
-{
- return num_queues ?
- min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
- min_t(int, netif_get_num_default_rss_queues(),
- BNX2X_MAX_QUEUES(bp));
-}
-
static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
{
int i, j;
@@ -1012,7 +936,7 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
- start_params->gre_tunnel_mode = IPGRE_TUNNEL;
+ start_params->gre_tunnel_mode = L2GRE_TUNNEL;
start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
return bnx2x_func_state_change(bp, &func_params);
@@ -1172,8 +1096,6 @@ static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
return fp->cl_id;
}
-u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
-
static inline void bnx2x_init_txdata(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata, u32 cid,
int txq_index, __le16 *tx_cons_sb,
@@ -1206,47 +1128,6 @@ static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
return bp->igu_base_sb;
}
-static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
-{
- struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
- unsigned long q_type = 0;
-
- bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
- bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
- BNX2X_FCOE_ETH_CL_ID_IDX);
- bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
- bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
- bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
- bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
- bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
- fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
- fp);
-
- DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
-
- /* qZone id equals to FW (per path) client id */
- bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
- /* init shortcut */
- bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
- bnx2x_rx_ustorm_prods_offset(fp);
-
- /* Configure Queue State object */
- __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
- __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
-
- /* No multi-CoS for FCoE L2 client */
- BUG_ON(fp->max_cos != 1);
-
- bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
- &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
- bnx2x_sp_mapping(bp, q_rdata), q_type);
-
- DP(NETIF_MSG_IFUP,
- "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
- fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
- fp->igu_sb_id);
-}
-
static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata)
{
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 32d0f1435fb4..38fc794c1655 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -358,49 +358,47 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cfg_idx = bnx2x_get_link_cfg_idx(bp);
old_multi_phy_config = bp->link_params.multi_phy_config;
- switch (cmd->port) {
- case PORT_TP:
- if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
- break; /* no port change */
-
- if (!(bp->port.supported[0] & SUPPORTED_TP ||
- bp->port.supported[1] & SUPPORTED_TP)) {
- DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
- return -EINVAL;
- }
- bp->link_params.multi_phy_config &=
- ~PORT_HW_CFG_PHY_SELECTION_MASK;
- if (bp->link_params.multi_phy_config &
- PORT_HW_CFG_PHY_SWAPPED_ENABLED)
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
- else
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
- break;
- case PORT_FIBRE:
- case PORT_DA:
- if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
- break; /* no port change */
-
- if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
- bp->port.supported[1] & SUPPORTED_FIBRE)) {
+ if (cmd->port != bnx2x_get_port_type(bp)) {
+ switch (cmd->port) {
+ case PORT_TP:
+ if (!(bp->port.supported[0] & SUPPORTED_TP ||
+ bp->port.supported[1] & SUPPORTED_TP)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ break;
+ case PORT_FIBRE:
+ case PORT_DA:
+ if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
+ bp->port.supported[1] & SUPPORTED_FIBRE)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ break;
+ default:
DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
return -EINVAL;
}
- bp->link_params.multi_phy_config &=
- ~PORT_HW_CFG_PHY_SELECTION_MASK;
- if (bp->link_params.multi_phy_config &
- PORT_HW_CFG_PHY_SWAPPED_ENABLED)
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
- else
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
- break;
- default:
- DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
- return -EINVAL;
}
/* Save new config in case command complete successfully */
new_multi_phy_config = bp->link_params.multi_phy_config;
@@ -1639,6 +1637,12 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
memcpy(&val, data_buf, 4);
+ /* Notice unlike bnx2x_nvram_read_dword() this will not
+ * change val using be32_to_cpu(), which causes data to flip
+ * if the eeprom is read and then written back. This is due
+ * to tools utilizing this functionality that would break
+ * if this would be resolved.
+ */
rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
/* advance to the next dword */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 20dcc02431ca..9b6b3d7304b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -205,6 +205,11 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
(_bank + (_addr & 0xf)), \
_val)
+static int bnx2x_check_half_open_conn(struct link_params *params,
+ struct link_vars *vars, u8 notify);
+static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params);
+
static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
@@ -1399,57 +1404,6 @@ static void bnx2x_update_pfc_xmac(struct link_params *params,
udelay(30);
}
-
-static void bnx2x_emac_get_pfc_stat(struct link_params *params,
- u32 pfc_frames_sent[2],
- u32 pfc_frames_received[2])
-{
- /* Read pfc statistic */
- struct bnx2x *bp = params->bp;
- u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
- u32 val_xon = 0;
- u32 val_xoff = 0;
-
- DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n");
-
- /* PFC received frames */
- val_xoff = REG_RD(bp, emac_base +
- EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
- val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
- val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
- val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
-
- pfc_frames_received[0] = val_xon + val_xoff;
-
- /* PFC received sent */
- val_xoff = REG_RD(bp, emac_base +
- EMAC_REG_RX_PFC_STATS_XOFF_SENT);
- val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
- val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
- val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
-
- pfc_frames_sent[0] = val_xon + val_xoff;
-}
-
-/* Read pfc statistic*/
-void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
- u32 pfc_frames_sent[2],
- u32 pfc_frames_received[2])
-{
- /* Read pfc statistic */
- struct bnx2x *bp = params->bp;
-
- DP(NETIF_MSG_LINK, "pfc statistic\n");
-
- if (!vars->link_up)
- return;
-
- if (vars->mac_type == MAC_TYPE_EMAC) {
- DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
- bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
- pfc_frames_received);
- }
-}
/******************************************************************/
/* MAC/PBF section */
/******************************************************************/
@@ -3865,6 +3819,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
} else {
+ /* Enable Auto-Detect to support 1G over CL37 as well */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
+
+ /* Force cl48 sync_status LOW to avoid getting stuck in CL73
+ * parallel-detect loop when CL73 and CL37 are enabled.
+ */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800);
+ bnx2x_set_aer_mmd(params, phy);
+
bnx2x_disable_kr2(params, vars, phy);
}
@@ -8120,17 +8087,20 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
*edc_mode = EDC_MODE_ACTIVE_DAC;
else
check_limiting_mode = 1;
- } else if (copper_module_type &
- SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
+ } else {
+ *edc_mode = EDC_MODE_PASSIVE_DAC;
+ /* Even in case PASSIVE_DAC indication is not set,
+ * treat it as a passive DAC cable, since some cables
+ * don't have this indication.
+ */
+ if (copper_module_type &
+ SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
DP(NETIF_MSG_LINK,
"Passive Copper cable detected\n");
- *edc_mode =
- EDC_MODE_PASSIVE_DAC;
- } else {
- DP(NETIF_MSG_LINK,
- "Unknown copper-cable-type 0x%x !!!\n",
- copper_module_type);
- return -EINVAL;
+ } else {
+ DP(NETIF_MSG_LINK,
+ "Unknown copper-cable-type\n");
+ }
}
break;
}
@@ -8632,8 +8602,8 @@ static void bnx2x_set_limiting_mode(struct link_params *params,
}
}
-int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
- struct link_params *params)
+static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 edc_mode;
@@ -10825,9 +10795,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
(1<<11));
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
- (phy->req_line_speed == SPEED_1000)) {
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->req_line_speed == SPEED_1000)) {
an_1000_val |= (1<<8);
autoneg_val |= (1<<9 | 1<<12);
if (phy->req_duplex == DUPLEX_FULL)
@@ -10843,30 +10813,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
0x09,
&an_1000_val);
- /* Set 100 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
- an_10_100_val |= (1<<7);
- /* Enable autoneg and restart autoneg for legacy speeds */
- autoneg_val |= (1<<9 | 1<<12);
-
- if (phy->req_duplex == DUPLEX_FULL)
- an_10_100_val |= (1<<8);
- DP(NETIF_MSG_LINK, "Advertising 100M\n");
- }
-
- /* Set 10 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
- an_10_100_val |= (1<<5);
- autoneg_val |= (1<<9 | 1<<12);
- if (phy->req_duplex == DUPLEX_FULL)
+ /* Advertise 10/100 link speed */
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
an_10_100_val |= (1<<6);
- DP(NETIF_MSG_LINK, "Advertising 10M\n");
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+ an_10_100_val |= (1<<7);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+ an_10_100_val |= (1<<8);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
+ }
}
/* Only 10/100 are allowed to work in FORCE mode */
@@ -13342,6 +13314,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
old_status, status);
+ /* Do not touch the link in case physical link down */
+ if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
+ return 1;
+
/* a. Update shmem->link_status accordingly
* b. Update link_vars->link_up
*/
@@ -13391,9 +13367,9 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
* a fault, for example, due to break in the TX side of fiber.
*
******************************************************************************/
-int bnx2x_check_half_open_conn(struct link_params *params,
- struct link_vars *vars,
- u8 notify)
+static int bnx2x_check_half_open_conn(struct link_params *params,
+ struct link_vars *vars,
+ u8 notify)
{
struct bnx2x *bp = params->bp;
u32 lss_status = 0;
@@ -13550,7 +13526,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
*/
not_kr2_device = (((base_page & 0x8000) == 0) ||
(((base_page & 0x8000) &&
- ((next_page & 0xe0) == 0x2))));
+ ((next_page & 0xe0) == 0x20))));
/* In case KR2 is already disabled, check if we need to re-enable it */
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 4df45234fdc0..389f5f8cb0a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -533,19 +533,11 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
int bnx2x_ets_e3b0_config(const struct link_params *params,
const struct link_vars *vars,
struct bnx2x_ets_params *ets_params);
-/* Read pfc statistic*/
-void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
- u32 pfc_frames_sent[2],
- u32 pfc_frames_received[2]);
+
void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
u32 chip_id, u32 shmem_base, u32 shmem2_base,
u8 port);
-int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
- struct link_params *params);
-
void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
-int bnx2x_check_half_open_conn(struct link_params *params,
- struct link_vars *vars, u8 notify);
#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 814d0eca9b33..7d4382286457 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/aer.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -93,30 +94,30 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1);
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
-int num_queues;
-module_param(num_queues, int, 0);
+int bnx2x_num_queues;
+module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
MODULE_PARM_DESC(num_queues,
" Set number of queues (default is as a number of CPUs)");
static int disable_tpa;
-module_param(disable_tpa, int, 0);
+module_param(disable_tpa, int, S_IRUGO);
MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
-int int_mode;
-module_param(int_mode, int, 0);
+static int int_mode;
+module_param(int_mode, int, S_IRUGO);
MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
"(1 INT#x; 2 MSI)");
static int dropless_fc;
-module_param(dropless_fc, int, 0);
+module_param(dropless_fc, int, S_IRUGO);
MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
static int mrrs = -1;
-module_param(mrrs, int, 0);
+module_param(mrrs, int, S_IRUGO);
MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
static int debug;
-module_param(debug, int, 0);
+module_param(debug, int, S_IRUGO);
MODULE_PARM_DESC(debug, " Default debug msglevel");
struct workqueue_struct *bnx2x_wq;
@@ -278,6 +279,12 @@ MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
#define BNX2X_PREV_WAIT_NEEDED 1
static DEFINE_SEMAPHORE(bnx2x_prev_sem);
static LIST_HEAD(bnx2x_prev_list);
+
+/* Forward declaration */
+static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
+static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
+static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
+
/****************************************************************************
* General service functions
****************************************************************************/
@@ -3000,6 +3007,9 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
if (zero_stats)
__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+ if (bp->flags & TX_SWITCHING)
+ __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
+
__set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
__set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
@@ -3297,6 +3307,10 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
ether_stat->txq_size = bp->tx_ring_size;
ether_stat->rxq_size = bp->rx_ring_size;
+
+#ifdef CONFIG_BNX2X_SRIOV
+ ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
+#endif
}
static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
@@ -5852,11 +5866,11 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
}
/* called with netif_addr_lock_bh() */
-int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
- unsigned long rx_mode_flags,
- unsigned long rx_accept_flags,
- unsigned long tx_accept_flags,
- unsigned long ramrod_flags)
+static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags)
{
struct bnx2x_rx_mode_ramrod_params ramrod_param;
int rc;
@@ -5964,7 +5978,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
}
/* called with netif_addr_lock_bh() */
-int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
{
unsigned long rx_mode_flags = 0, ramrod_flags = 0;
unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
@@ -6160,6 +6174,47 @@ static void bnx2x_init_tx_rings(struct bnx2x *bp)
bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
}
+static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
+{
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+ unsigned long q_type = 0;
+
+ bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
+ bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
+ BNX2X_FCOE_ETH_CL_ID_IDX);
+ bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
+ bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
+ bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
+ bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
+ bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
+ fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
+ fp);
+
+ DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
+
+ /* qZone id equals to FW (per path) client id */
+ bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
+ /* init shortcut */
+ bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
+ bnx2x_rx_ustorm_prods_offset(fp);
+
+ /* Configure Queue State object */
+ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+ __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+ /* No multi-CoS for FCoE L2 client */
+ BUG_ON(fp->max_cos != 1);
+
+ bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
+ &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_sp_mapping(bp, q_rdata), q_type);
+
+ DP(NETIF_MSG_IFUP,
+ "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
+ fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+ fp->igu_sb_id);
+}
+
void bnx2x_nic_init_cnic(struct bnx2x *bp)
{
if (!NO_FCOE(bp))
@@ -8732,16 +8787,16 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
int path = BP_PATH(bp);
DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- load_count[path][0]--;
- load_count[path][1 + port]--;
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ bnx2x_load_count[path][0]--;
+ bnx2x_load_count[path][1 + port]--;
DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- if (load_count[path][0] == 0)
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ if (bnx2x_load_count[path][0] == 0)
reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
- else if (load_count[path][1 + port] == 0)
+ else if (bnx2x_load_count[path][1 + port] == 0)
reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
else
reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -9767,7 +9822,7 @@ period_task_exit:
* Init service functions
*/
-u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
{
u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
@@ -9854,6 +9909,64 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
+#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
+#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
+#define BCM_5710_UNDI_FW_MF_VERS (0x05)
+#define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4))
+#define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4))
+static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
+{
+ u8 major, minor, version;
+ u32 fw;
+
+ /* Must check that FW is loaded */
+ if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
+ MISC_REGISTERS_RESET_REG_1_RST_XSEM)) {
+ BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n");
+ return false;
+ }
+
+ /* Read Currently loaded FW version */
+ fw = REG_RD(bp, XSEM_REG_PRAM);
+ major = fw & 0xff;
+ minor = (fw >> 0x8) & 0xff;
+ version = (fw >> 0x10) & 0xff;
+ BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n",
+ fw, major, minor, version);
+
+ if (major > BCM_5710_UNDI_FW_MF_MAJOR)
+ return true;
+
+ if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
+ (minor > BCM_5710_UNDI_FW_MF_MINOR))
+ return true;
+
+ if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
+ (minor == BCM_5710_UNDI_FW_MF_MINOR) &&
+ (version >= BCM_5710_UNDI_FW_MF_VERS))
+ return true;
+
+ return false;
+}
+
+static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp)
+{
+ int i;
+
+ /* Due to legacy (FW) code, the first function on each engine has a
+ * different offset macro from the rest of the functions.
+ * Setting this for all 8 functions is harmless regardless of whether
+ * this is actually a multi-function device.
+ */
+ for (i = 0; i < 2; i++)
+ REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1);
+
+ for (i = 2; i < 8; i++)
+ REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1);
+
+ BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n");
+}
+
static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
{
u16 rcq, bd;
@@ -10054,7 +10167,7 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
* the one required, then FLR will be sufficient to clean any residue
* left by previous driver
*/
- rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION);
+ rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
if (!rc) {
/* fw version is good */
@@ -10142,10 +10255,17 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
else
timer_count--;
- /* If UNDI resides in memory, manually increment it */
- if (prev_undi)
+ /* New UNDI FW supports MF and contains better
+ * cleaning methods - might be redundant but harmless.
+ */
+ if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
+ bnx2x_prev_unload_undi_mf(bp);
+ } else if (prev_undi) {
+ /* If UNDI resides in memory,
+ * manually increment it
+ */
bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
-
+ }
udelay(10);
}
@@ -10265,8 +10385,8 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
} while (--time_counter);
if (!time_counter || rc) {
- BNX2X_ERR("Failed unloading previous driver, aborting\n");
- rc = -EBUSY;
+ BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
+ rc = -EPROBE_DEFER;
}
/* Mark function if its port was used to boot from SAN */
@@ -11447,9 +11567,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
}
}
- /* adjust igu_sb_cnt to MF for E1x */
- if (CHIP_IS_E1x(bp) && IS_MF(bp))
- bp->igu_sb_cnt /= E1HVN_MAX;
+ /* adjust igu_sb_cnt to MF for E1H */
+ if (CHIP_IS_E1H(bp) && IS_MF(bp))
+ bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
/* port info */
bnx2x_get_port_hwinfo(bp);
@@ -11636,7 +11756,11 @@ static int bnx2x_init_bp(struct bnx2x *bp)
DRV_MSG_SEQ_NUMBER_MASK;
BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
- bnx2x_prev_unload(bp);
+ rc = bnx2x_prev_unload(bp);
+ if (rc) {
+ bnx2x_free_mem_bp(bp);
+ return rc;
+ }
}
if (CHIP_REV_IS_FPGA(bp))
@@ -11931,7 +12055,7 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
}
/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
-void bnx2x_set_rx_mode(struct net_device *dev)
+static void bnx2x_set_rx_mode(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -12156,6 +12280,14 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
return 0;
}
+static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
+{
+ if (bp->flags & AER_ENABLED) {
+ pci_disable_pcie_error_reporting(bp->pdev);
+ bp->flags &= ~AER_ENABLED;
+ }
+}
+
static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
struct net_device *dev, unsigned long board_type)
{
@@ -12262,6 +12394,14 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
/* clean indirect addresses */
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
PCICFG_VENDOR_ID_OFFSET);
+
+ /* AER (Advanced Error reporting) configuration */
+ rc = pci_enable_pcie_error_reporting(pdev);
+ if (!rc)
+ bp->flags |= AER_ENABLED;
+ else
+ BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
+
/*
* Clean the following indirect addresses for all functions since it
* is not used by the driver.
@@ -12693,8 +12833,6 @@ static int set_is_vf(int chip_id)
}
}
-struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
-
static int bnx2x_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -12869,6 +13007,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
return 0;
init_one_exit:
+ bnx2x_disable_pcie_error_reporting(bp);
+
if (bp->regview)
iounmap(bp->regview);
@@ -12942,27 +13082,29 @@ static void __bnx2x_remove(struct pci_dev *pdev,
pci_set_power_state(pdev, PCI_D3hot);
}
- if (bp->regview)
- iounmap(bp->regview);
+ bnx2x_disable_pcie_error_reporting(bp);
+ if (remove_netdev) {
+ if (bp->regview)
+ iounmap(bp->regview);
- /* for vf doorbells are part of the regview and were unmapped along with
- * it. FW is only loaded by PF.
- */
- if (IS_PF(bp)) {
- if (bp->doorbells)
- iounmap(bp->doorbells);
+ /* For vfs, doorbells are part of the regview and were unmapped
+ * along with it. FW is only loaded by PF.
+ */
+ if (IS_PF(bp)) {
+ if (bp->doorbells)
+ iounmap(bp->doorbells);
- bnx2x_release_firmware(bp);
- }
- bnx2x_free_mem_bp(bp);
+ bnx2x_release_firmware(bp);
+ }
+ bnx2x_free_mem_bp(bp);
- if (remove_netdev)
free_netdev(dev);
- if (atomic_read(&pdev->enable_cnt) == 1)
- pci_release_regions(pdev);
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
- pci_disable_device(pdev);
+ pci_disable_device(pdev);
+ }
}
static void bnx2x_remove_one(struct pci_dev *pdev)
@@ -13119,6 +13261,14 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
rtnl_unlock();
+ /* If AER, perform cleanup of the PCIe registers */
+ if (bp->flags & AER_ENABLED) {
+ if (pci_cleanup_aer_uncorrect_error_status(pdev))
+ BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
+ else
+ DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
+ }
+
return PCI_ERS_RESULT_RECOVERED;
}
@@ -13757,7 +13907,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
return 0;
}
-struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
+static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
@@ -13807,7 +13957,7 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
return cp;
}
-u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
+static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
{
struct bnx2x *bp = fp->bp;
u32 offset = BAR_USTRORM_INTMEM;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 3efbb35267c8..2beb5430b876 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -5932,6 +5932,7 @@
#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
+#define MISC_REGISTERS_RESET_REG_1_RST_XSEM (0x1<<22)
#define MISC_REGISTERS_RESET_REG_1_SET 0x584
#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24)
@@ -7179,6 +7180,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
+#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa
#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
#define MDIO_WC_REG_XGXS_STATUS3 0x8129
#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 32c92abf5094..0fb6ff2ac8e3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -355,23 +355,6 @@ static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
return vp->get(vp, 1);
}
-
-static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
-{
- struct bnx2x_credit_pool_obj *mp = o->macs_pool;
- struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
-
- if (!mp->get(mp, 1))
- return false;
-
- if (!vp->get(vp, 1)) {
- mp->put(mp, 1);
- return false;
- }
-
- return true;
-}
-
static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
{
struct bnx2x_credit_pool_obj *mp = o->macs_pool;
@@ -400,22 +383,6 @@ static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
return vp->put(vp, 1);
}
-static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
-{
- struct bnx2x_credit_pool_obj *mp = o->macs_pool;
- struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
-
- if (!mp->put(mp, 1))
- return false;
-
- if (!vp->put(vp, 1)) {
- mp->get(mp, 1);
- return false;
- }
-
- return true;
-}
-
/**
* __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
*
@@ -507,22 +474,6 @@ static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
}
}
-/**
- * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
- *
- * @bp: device handle
- * @o: vlan_mac object
- *
- * @details Notice if a pending execution exists, it would perform it -
- * possibly releasing and reclaiming the execution queue lock.
- */
-void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o)
-{
- spin_lock_bh(&o->exe_queue.lock);
- __bnx2x_vlan_mac_h_write_unlock(bp, o);
- spin_unlock_bh(&o->exe_queue.lock);
-}
/**
* __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
@@ -663,7 +614,7 @@ static int bnx2x_check_mac_add(struct bnx2x *bp,
/* Check if a requested MAC already exists */
list_for_each_entry(pos, &o->head, link)
- if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
+ if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
(data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return -EEXIST;
@@ -685,26 +636,6 @@ static int bnx2x_check_vlan_add(struct bnx2x *bp,
return 0;
}
-static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- union bnx2x_classification_ramrod_data *data)
-{
- struct bnx2x_vlan_mac_registry_elem *pos;
-
- DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
- data->vlan_mac.mac, data->vlan_mac.vlan);
-
- list_for_each_entry(pos, &o->head, link)
- if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
- (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)) &&
- (data->vlan_mac.is_inner_mac ==
- pos->u.vlan_mac.is_inner_mac))
- return -EEXIST;
-
- return 0;
-}
-
/* check_del() callbacks */
static struct bnx2x_vlan_mac_registry_elem *
bnx2x_check_mac_del(struct bnx2x *bp,
@@ -716,7 +647,7 @@ static struct bnx2x_vlan_mac_registry_elem *
DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
list_for_each_entry(pos, &o->head, link)
- if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
+ if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
(data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return pos;
@@ -739,27 +670,6 @@ static struct bnx2x_vlan_mac_registry_elem *
return NULL;
}
-static struct bnx2x_vlan_mac_registry_elem *
- bnx2x_check_vlan_mac_del(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- union bnx2x_classification_ramrod_data *data)
-{
- struct bnx2x_vlan_mac_registry_elem *pos;
-
- DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
- data->vlan_mac.mac, data->vlan_mac.vlan);
-
- list_for_each_entry(pos, &o->head, link)
- if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
- (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)) &&
- (data->vlan_mac.is_inner_mac ==
- pos->u.vlan_mac.is_inner_mac))
- return pos;
-
- return NULL;
-}
-
/* check_move() callback */
static bool bnx2x_check_move(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *src_o,
@@ -811,8 +721,8 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
return rx_tx_flag;
}
-void bnx2x_set_mac_in_nig(struct bnx2x *bp,
- bool add, unsigned char *dev_addr, int index)
+static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+ bool add, unsigned char *dev_addr, int index)
{
u32 wb_data[2];
u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
@@ -1126,97 +1036,6 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
rule_cnt);
}
-static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- struct bnx2x_exeq_elem *elem,
- int rule_idx, int cam_offset)
-{
- struct bnx2x_raw_obj *raw = &o->raw;
- struct eth_classify_rules_ramrod_data *data =
- (struct eth_classify_rules_ramrod_data *)(raw->rdata);
- int rule_cnt = rule_idx + 1;
- union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
- enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
- bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
- u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
- u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
-
- /* Reset the ramrod data buffer for the first rule */
- if (rule_idx == 0)
- memset(data, 0, sizeof(*data));
-
- /* Set a rule header */
- bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
- &rule_entry->pair.header);
-
- /* Set VLAN and MAC themselves */
- rule_entry->pair.vlan = cpu_to_le16(vlan);
- bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
- &rule_entry->pair.mac_mid,
- &rule_entry->pair.mac_lsb, mac);
- rule_entry->pair.inner_mac =
- cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
- /* MOVE: Add a rule that will add this MAC to the target Queue */
- if (cmd == BNX2X_VLAN_MAC_MOVE) {
- rule_entry++;
- rule_cnt++;
-
- /* Setup ramrod data */
- bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
- elem->cmd_data.vlan_mac.target_obj,
- true, CLASSIFY_RULE_OPCODE_PAIR,
- &rule_entry->pair.header);
-
- /* Set a VLAN itself */
- rule_entry->pair.vlan = cpu_to_le16(vlan);
- bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
- &rule_entry->pair.mac_mid,
- &rule_entry->pair.mac_lsb, mac);
- rule_entry->pair.inner_mac =
- cpu_to_le16(elem->cmd_data.vlan_mac.u.
- vlan_mac.is_inner_mac);
- }
-
- /* Set the ramrod data header */
- /* TODO: take this to the higher level in order to prevent multiple
- writing */
- bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
- rule_cnt);
-}
-
-/**
- * bnx2x_set_one_vlan_mac_e1h -
- *
- * @bp: device handle
- * @o: bnx2x_vlan_mac_obj
- * @elem: bnx2x_exeq_elem
- * @rule_idx: rule_idx
- * @cam_offset: cam_offset
- */
-static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- struct bnx2x_exeq_elem *elem,
- int rule_idx, int cam_offset)
-{
- struct bnx2x_raw_obj *raw = &o->raw;
- struct mac_configuration_cmd *config =
- (struct mac_configuration_cmd *)(raw->rdata);
- /* 57710 and 57711 do not support MOVE command,
- * so it's either ADD or DEL
- */
- bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
- true : false;
-
- /* Reset the ramrod data buffer */
- memset(config, 0, sizeof(*config));
-
- bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
- cam_offset, add,
- elem->cmd_data.vlan_mac.u.vlan_mac.mac,
- elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
- ETH_VLAN_FILTER_CLASSIFY, config);
-}
-
/**
* bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
*
@@ -1316,24 +1135,6 @@ static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
return NULL;
}
-static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
- struct bnx2x_exe_queue_obj *o,
- struct bnx2x_exeq_elem *elem)
-{
- struct bnx2x_exeq_elem *pos;
- struct bnx2x_vlan_mac_ramrod_data *data =
- &elem->cmd_data.vlan_mac.u.vlan_mac;
-
- /* Check pending for execution commands */
- list_for_each_entry(pos, &o->exe_queue, link)
- if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
- sizeof(*data)) &&
- (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
- return pos;
-
- return NULL;
-}
-
/**
* bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
*
@@ -2038,6 +1839,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
struct bnx2x_vlan_mac_ramrod_params p;
struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+ unsigned long flags;
int read_lock;
int rc = 0;
@@ -2046,8 +1848,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
spin_lock_bh(&exeq->lock);
list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
- if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
- *vlan_mac_flags) {
+ flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
+ if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+ BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
rc = exeq->remove(bp, exeq->owner, exeq_pos);
if (rc) {
BNX2X_ERR("Failed to remove command\n");
@@ -2080,7 +1883,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
return read_lock;
list_for_each_entry(pos, &o->head, link) {
- if (pos->vlan_mac_flags == *vlan_mac_flags) {
+ flags = pos->vlan_mac_flags;
+ if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+ BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
rc = bnx2x_config_vlan_mac(bp, &p);
@@ -2237,69 +2042,6 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
}
}
-void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *vlan_mac_obj,
- u8 cl_id, u32 cid, u8 func_id, void *rdata,
- dma_addr_t rdata_mapping, int state,
- unsigned long *pstate, bnx2x_obj_type type,
- struct bnx2x_credit_pool_obj *macs_pool,
- struct bnx2x_credit_pool_obj *vlans_pool)
-{
- union bnx2x_qable_obj *qable_obj =
- (union bnx2x_qable_obj *)vlan_mac_obj;
-
- bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
- rdata_mapping, state, pstate, type,
- macs_pool, vlans_pool);
-
- /* CAM pool handling */
- vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
- vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
- /* CAM offset is relevant for 57710 and 57711 chips only which have a
- * single CAM for both MACs and VLAN-MAC pairs. So the offset
- * will be taken from MACs' pool object only.
- */
- vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
- vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
-
- if (CHIP_IS_E1(bp)) {
- BNX2X_ERR("Do not support chips others than E2\n");
- BUG();
- } else if (CHIP_IS_E1H(bp)) {
- vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
- vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
- vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
- vlan_mac_obj->check_move = bnx2x_check_move_always_err;
- vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
-
- /* Exe Queue */
- bnx2x_exe_queue_init(bp,
- &vlan_mac_obj->exe_queue, 1, qable_obj,
- bnx2x_validate_vlan_mac,
- bnx2x_remove_vlan_mac,
- bnx2x_optimize_vlan_mac,
- bnx2x_execute_vlan_mac,
- bnx2x_exeq_get_vlan_mac);
- } else {
- vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
- vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
- vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
- vlan_mac_obj->check_move = bnx2x_check_move;
- vlan_mac_obj->ramrod_cmd =
- RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
-
- /* Exe Queue */
- bnx2x_exe_queue_init(bp,
- &vlan_mac_obj->exe_queue,
- CLASSIFY_RULES_COUNT,
- qable_obj, bnx2x_validate_vlan_mac,
- bnx2x_remove_vlan_mac,
- bnx2x_optimize_vlan_mac,
- bnx2x_execute_vlan_mac,
- bnx2x_exeq_get_vlan_mac);
- }
-}
-
/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
static inline void __storm_memset_mac_filters(struct bnx2x *bp,
struct tstorm_eth_mac_filter_config *mac_filters,
@@ -4382,8 +4124,11 @@ int bnx2x_config_rss(struct bnx2x *bp,
struct bnx2x_raw_obj *r = &o->raw;
/* Do nothing if only driver cleanup was requested */
- if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
+ p->ramrod_flags);
return 0;
+ }
r->set_pending(r);
@@ -4983,6 +4728,13 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp,
test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
+
+ /* tx switching */
+ data->tx_switching_flg =
+ test_bit(BNX2X_Q_UPDATE_TX_SWITCHING, &params->update_flags);
+ data->tx_switching_change_flg =
+ test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+ &params->update_flags);
}
static inline int bnx2x_q_send_update(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 658f4e33abf9..00d7f214a40a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -266,6 +266,13 @@ enum {
BNX2X_DONT_CONSUME_CAM_CREDIT,
BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
};
+/* When looking for matching filters, some flags are not interesting */
+#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
+ 1 << BNX2X_ETH_MAC | \
+ 1 << BNX2X_ISCSI_ETH_MAC | \
+ 1 << BNX2X_NETQ_ETH_MAC)
+#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
+ ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
struct bnx2x_vlan_mac_ramrod_params {
/* Object to run the command from */
@@ -441,9 +448,6 @@ enum {
BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
};
-void bnx2x_set_mac_in_nig(struct bnx2x *bp,
- bool add, unsigned char *dev_addr, int index);
-
/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
/* RX_MODE ramrod special flags: set in rx_mode_flags field in
@@ -763,7 +767,9 @@ enum {
BNX2X_Q_UPDATE_DEF_VLAN_EN,
BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
- BNX2X_Q_UPDATE_SILENT_VLAN_REM
+ BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+ BNX2X_Q_UPDATE_TX_SWITCHING
};
/* Allowed Queue states */
@@ -1300,22 +1306,12 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
unsigned long *pstate, bnx2x_obj_type type,
struct bnx2x_credit_pool_obj *vlans_pool);
-void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *vlan_mac_obj,
- u8 cl_id, u32 cid, u8 func_id, void *rdata,
- dma_addr_t rdata_mapping, int state,
- unsigned long *pstate, bnx2x_obj_type type,
- struct bnx2x_credit_pool_obj *macs_pool,
- struct bnx2x_credit_pool_obj *vlans_pool);
-
int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o);
void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o);
int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o);
-void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o);
int bnx2x_config_vlan_mac(struct bnx2x *bp,
struct bnx2x_vlan_mac_ramrod_params *p);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 2e46c28fc601..e42f48df6e94 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -166,6 +166,7 @@ enum bnx2x_vfop_qteardown_state {
BNX2X_VFOP_QTEARDOWN_RXMODE,
BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
BNX2X_VFOP_QTEARDOWN_CLR_MAC,
+ BNX2X_VFOP_QTEARDOWN_CLR_MCAST,
BNX2X_VFOP_QTEARDOWN_QDTOR,
BNX2X_VFOP_QTEARDOWN_DONE
};
@@ -617,7 +618,7 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
&vlan_mac->user_req.vlan_mac_flags,
&vlan_mac->ramrod_flags);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
/* next state */
@@ -628,7 +629,7 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
if (vfop->rc == -EEXIST)
vfop->rc = 0;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
vfop->rc = !!obj->raw.check_pending(&obj->raw);
@@ -645,7 +646,7 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_VLAN_CONFIG_LIST:
/* next state */
@@ -657,7 +658,7 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
}
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
default:
bnx2x_vfop_default(state);
@@ -798,10 +799,10 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
return -ENOMEM;
}
-int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, u16 vid, bool add)
+static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, u16 vid, bool add)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
int rc;
@@ -1023,25 +1024,35 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
case BNX2X_VFOP_QFLR_CLR_VLAN:
/* vlan-clear-all: driver-only, don't consume credit */
vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
- if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)))
- vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid,
- true);
- if (vfop->rc)
- goto op_err;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+ if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) {
+ /* the vlan_mac vfop will re-schedule us */
+ vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd,
+ qid, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ } else {
+ /* need to reschedule ourselves */
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ }
case BNX2X_VFOP_QFLR_CLR_MAC:
/* mac-clear-all: driver only consume credit */
vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
- if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)))
- vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid,
- true);
- DP(BNX2X_MSG_IOV,
- "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
- vf->abs_vfid, vfop->rc);
- if (vfop->rc)
- goto op_err;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) {
+ /* the vlan_mac vfop will re-schedule us */
+ vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd,
+ qid, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ } else {
+ /* need to reschedule ourselves */
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ }
case BNX2X_VFOP_QFLR_TERMINATE:
qstate = &vfop->op_p->qctor.qstate;
@@ -1112,7 +1123,10 @@ static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
switch (state) {
case BNX2X_VFOP_MCAST_DEL:
/* clear existing mcasts */
- vfop->state = BNX2X_VFOP_MCAST_ADD;
+ vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD
+ : BNX2X_VFOP_MCAST_CHK_DONE;
+ mcast->mcast_list_len = vf->mcast_list_len;
+ vf->mcast_list_len = args->mc_num;
vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
@@ -1120,17 +1134,17 @@ static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
if (raw->check_pending(raw))
goto op_pending;
- if (args->mc_num) {
- /* update mcast list on the ramrod params */
- INIT_LIST_HEAD(&mcast->mcast_list);
- for (i = 0; i < args->mc_num; i++)
- list_add_tail(&(args->mc[i].link),
- &mcast->mcast_list);
- /* add new mcasts */
- vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
- vfop->rc = bnx2x_config_mcast(bp, mcast,
- BNX2X_MCAST_CMD_ADD);
- }
+ /* update mcast list on the ramrod params */
+ INIT_LIST_HEAD(&mcast->mcast_list);
+ for (i = 0; i < args->mc_num; i++)
+ list_add_tail(&(args->mc[i].link),
+ &mcast->mcast_list);
+ mcast->mcast_list_len = args->mc_num;
+
+ /* add new mcasts */
+ vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
+ vfop->rc = bnx2x_config_mcast(bp, mcast,
+ BNX2X_MCAST_CMD_ADD);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
case BNX2X_VFOP_MCAST_CHK_DONE:
@@ -1209,6 +1223,11 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
/* next state */
vfop->state = BNX2X_VFOP_RXMODE_DONE;
+ /* record the accept flags in vfdb so hypervisor can modify them
+ * if necessary
+ */
+ bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
+ ramrod->rx_accept_flags;
vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
op_err:
@@ -1224,39 +1243,43 @@ op_pending:
return;
}
+static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
+ struct bnx2x_rx_mode_ramrod_params *ramrod,
+ struct bnx2x_virtf *vf,
+ unsigned long accept_flags)
+{
+ struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
+
+ memset(ramrod, 0, sizeof(*ramrod));
+ ramrod->cid = vfq->cid;
+ ramrod->cl_id = vfq_cl_id(vf, vfq);
+ ramrod->rx_mode_obj = &bp->rx_mode_obj;
+ ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
+ ramrod->rx_accept_flags = accept_flags;
+ ramrod->tx_accept_flags = accept_flags;
+ ramrod->pstate = &vf->filter_state;
+ ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
+
+ set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+ set_bit(RAMROD_RX, &ramrod->ramrod_flags);
+ set_bit(RAMROD_TX, &ramrod->ramrod_flags);
+
+ ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
+ ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+}
+
int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid, unsigned long accept_flags)
{
- struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
struct bnx2x_rx_mode_ramrod_params *ramrod =
&vf->op_params.rx_mode;
- memset(ramrod, 0, sizeof(*ramrod));
-
- /* Prepare ramrod parameters */
- ramrod->cid = vfq->cid;
- ramrod->cl_id = vfq_cl_id(vf, vfq);
- ramrod->rx_mode_obj = &bp->rx_mode_obj;
- ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
-
- ramrod->rx_accept_flags = accept_flags;
- ramrod->tx_accept_flags = accept_flags;
- ramrod->pstate = &vf->filter_state;
- ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
-
- set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
- set_bit(RAMROD_RX, &ramrod->ramrod_flags);
- set_bit(RAMROD_TX, &ramrod->ramrod_flags);
-
- ramrod->rdata =
- bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
- ramrod->rdata_mapping =
- bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+ bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
bnx2x_vfop_rxmode, cmd->done);
@@ -1303,12 +1326,19 @@ static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
/* mac-clear-all: consume credit */
- vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
+ vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST;
vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
if (vfop->rc)
goto op_err;
return;
+ case BNX2X_VFOP_QTEARDOWN_CLR_MCAST:
+ vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
+ vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
case BNX2X_VFOP_QTEARDOWN_QDTOR:
/* run the queue destruction flow */
DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
@@ -1416,12 +1446,12 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
if (vf->cfg_flags & VF_CFG_INT_SIMD)
val |= IGU_VF_CONF_SINGLE_ISR_EN;
val &= ~IGU_VF_CONF_PARENT_MASK;
- val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */
+ val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
DP(BNX2X_MSG_IOV,
- "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
- vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
+ "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
+ vf->abs_vfid, val);
bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
@@ -2188,6 +2218,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
* It needs to be initialized here so that it can be safely
* handled by a subsequent FLR flow.
*/
+ vf->mcast_list_len = 0;
bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
0xFF, 0xFF, 0xFF,
bnx2x_vf_sp(bp, vf, mcast_rdata),
@@ -2363,8 +2394,9 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
goto get_vf;
case EVENT_RING_OPCODE_MALICIOUS_VF:
abs_vfid = elem->message.data.malicious_vf_event.vf_id;
- DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
- abs_vfid, elem->message.data.malicious_vf_event.err_id);
+ BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
+ abs_vfid,
+ elem->message.data.malicious_vf_event.err_id);
goto get_vf;
default:
return 1;
@@ -2416,15 +2448,9 @@ get_vf:
bnx2x_vf_handle_filters_eqe(bp, vf);
break;
case EVENT_RING_OPCODE_VF_FLR:
- DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
- vf->abs_vfid);
- /* Do nothing for now */
- break;
case EVENT_RING_OPCODE_MALICIOUS_VF:
- DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n",
- abs_vfid, elem->message.data.malicious_vf_event.err_id);
/* Do nothing for now */
- break;
+ return 0;
}
/* SRIOV: reschedule any 'in_progress' operations */
bnx2x_iov_sp_event(bp, cid, false);
@@ -2848,13 +2874,9 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
goto op_err;
return;
}
-
- /* remove multicasts */
vfop->state = BNX2X_VFOP_CLOSE_HW;
- vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
- if (vfop->rc)
- goto op_err;
- return;
+ vfop->rc = 0;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_CLOSE_HW:
@@ -2888,6 +2910,9 @@ op_done:
DP(BNX2X_MSG_IOV, "set state to acquired\n");
bnx2x_vfop_end(bp, vf, vfop);
+op_pending:
+ /* Not supported at the moment; Exists for macros only */
+ return;
}
int bnx2x_vfop_close_cmd(struct bnx2x *bp,
@@ -3110,6 +3135,60 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->abs_vfid, vf->op_current);
}
+static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
+{
+ struct bnx2x_queue_state_params q_params;
+ u32 prev_flags;
+ int i, rc;
+
+ /* Verify changes are needed and record current Tx switching state */
+ prev_flags = bp->flags;
+ if (enable)
+ bp->flags |= TX_SWITCHING;
+ else
+ bp->flags &= ~TX_SWITCHING;
+ if (prev_flags == bp->flags)
+ return 0;
+
+ /* Verify state enables the sending of queue ramrods */
+ if ((bp->state != BNX2X_STATE_OPEN) ||
+ (bnx2x_get_q_logical_state(bp,
+ &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE))
+ return 0;
+
+ /* send q. update ramrod to configure Tx switching */
+ memset(&q_params, 0, sizeof(q_params));
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+ &q_params.params.update.update_flags);
+ if (enable)
+ __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
+ &q_params.params.update.update_flags);
+ else
+ __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
+ &q_params.params.update.update_flags);
+
+ /* send the ramrod on all the queues of the PF */
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ /* Set the appropriate Queue object */
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure Tx switching\n");
+ return rc;
+ }
+ }
+
+ DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
+ return 0;
+}
+
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
{
struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
@@ -3137,12 +3216,14 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
bp->requested_nr_virtfn = num_vfs_param;
if (num_vfs_param == 0) {
+ bnx2x_set_pf_tx_switching(bp, false);
pci_disable_sriov(dev);
return 0;
} else {
return bnx2x_enable_sriov(bp);
}
}
+
#define IGU_ENTRY_SIZE 4
int bnx2x_enable_sriov(struct bnx2x *bp)
@@ -3202,13 +3283,16 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
bnx2x_iov_static_resc(bp, vf);
}
- /* prepare msix vectors in VF configuration space */
+ /* prepare msix vectors in VF configuration space - the value in the
+ * PCI configuration space should be the index of the last entry,
+ * namely one less than the actual size of the table
+ */
for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
- num_vf_queues);
+ num_vf_queues - 1);
DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
- vf_idx, num_vf_queues);
+ vf_idx, num_vf_queues - 1);
}
bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
@@ -3217,6 +3301,11 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
*/
DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
bnx2x_disable_sriov(bp);
+
+ rc = bnx2x_set_pf_tx_switching(bp, true);
+ if (rc)
+ return rc;
+
rc = pci_enable_sriov(bp->pdev, req_vfs);
if (rc) {
BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
@@ -3436,10 +3525,18 @@ out:
int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
{
+ struct bnx2x_queue_state_params q_params = {NULL};
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+ struct bnx2x_queue_update_params *update_params;
+ struct pf_vf_bulletin_content *bulletin = NULL;
+ struct bnx2x_rx_mode_ramrod_params rx_ramrod;
struct bnx2x *bp = netdev_priv(dev);
- int rc, q_logical_state;
+ struct bnx2x_vlan_mac_obj *vlan_obj;
+ unsigned long vlan_mac_flags = 0;
+ unsigned long ramrod_flags = 0;
struct bnx2x_virtf *vf = NULL;
- struct pf_vf_bulletin_content *bulletin = NULL;
+ unsigned long accept_flags;
+ int rc;
/* sanity and init */
rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
@@ -3457,104 +3554,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
/* update PF's copy of the VF's bulletin. No point in posting the vlan
* to the VF since it doesn't have anything to do with it. But it useful
* to store it here in case the VF is not up yet and we can only
- * configure the vlan later when it does.
+ * configure the vlan later when it does. Treat vlan id 0 as remove the
+ * Host tag.
*/
- bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ if (vlan > 0)
+ bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ else
+ bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
bulletin->vlan = vlan;
/* is vf initialized and queue set up? */
- q_logical_state =
- bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
- if (vf->state == VF_ENABLED &&
- q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
- /* configure the vlan in device on this vf's queue */
- unsigned long ramrod_flags = 0;
- unsigned long vlan_mac_flags = 0;
- struct bnx2x_vlan_mac_obj *vlan_obj =
- &bnx2x_leading_vfq(vf, vlan_obj);
- struct bnx2x_vlan_mac_ramrod_params ramrod_param;
- struct bnx2x_queue_state_params q_params = {NULL};
- struct bnx2x_queue_update_params *update_params;
+ if (vf->state != VF_ENABLED ||
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE)
+ return rc;
- rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
- if (rc)
- return rc;
- memset(&ramrod_param, 0, sizeof(ramrod_param));
+ /* configure the vlan in device on this vf's queue */
+ vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+ rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
+ if (rc)
+ return rc;
- /* must lock vfpf channel to protect against vf flows */
- bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+ /* must lock vfpf channel to protect against vf flows */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
- /* remove existing vlans */
- __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
- &ramrod_flags);
- if (rc) {
- BNX2X_ERR("failed to delete vlans\n");
- rc = -EINVAL;
- goto out;
- }
+ /* remove existing vlans */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc) {
+ BNX2X_ERR("failed to delete vlans\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* need to remove/add the VF's accept_any_vlan bit */
+ accept_flags = bnx2x_leading_vfq(vf, accept_flags);
+ if (vlan)
+ clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+ else
+ set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+ bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
+ accept_flags);
+ bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
+ bnx2x_config_rx_mode(bp, &rx_ramrod);
+
+ /* configure the new vlan to device */
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ ramrod_param.vlan_mac_obj = vlan_obj;
+ ramrod_param.ramrod_flags = ramrod_flags;
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &ramrod_param.user_req.vlan_mac_flags);
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc) {
+ BNX2X_ERR("failed to configure vlan\n");
+ rc = -EINVAL;
+ goto out;
+ }
- /* send queue update ramrod to configure default vlan and silent
- * vlan removal
+ /* send queue update ramrod to configure default vlan and silent
+ * vlan removal
+ */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ &update_params->update_flags);
+ if (vlan == 0) {
+ /* if vlan is 0 then we want to leave the VF traffic
+ * untagged, and leave the incoming traffic untouched
+ * (i.e. do not remove any vlan tags).
*/
- __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
- q_params.cmd = BNX2X_Q_CMD_UPDATE;
- q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
- update_params = &q_params.params.update;
- __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ } else {
+ /* configure default vlan to vf queue and set silent
+ * vlan removal (the vf remains unaware of this vlan).
+ */
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
&update_params->update_flags);
- __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
&update_params->update_flags);
+ update_params->def_vlan = vlan;
+ update_params->silent_removal_value =
+ vlan & VLAN_VID_MASK;
+ update_params->silent_removal_mask = VLAN_VID_MASK;
+ }
- if (vlan == 0) {
- /* if vlan is 0 then we want to leave the VF traffic
- * untagged, and leave the incoming traffic untouched
- * (i.e. do not remove any vlan tags).
- */
- __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
- &update_params->update_flags);
- __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
- &update_params->update_flags);
- } else {
- /* configure the new vlan to device */
- __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- ramrod_param.vlan_mac_obj = vlan_obj;
- ramrod_param.ramrod_flags = ramrod_flags;
- ramrod_param.user_req.u.vlan.vlan = vlan;
- ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
- rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
- if (rc) {
- BNX2X_ERR("failed to configure vlan\n");
- rc = -EINVAL;
- goto out;
- }
-
- /* configure default vlan to vf queue and set silent
- * vlan removal (the vf remains unaware of this vlan).
- */
- update_params = &q_params.params.update;
- __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
- &update_params->update_flags);
- __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
- &update_params->update_flags);
- update_params->def_vlan = vlan;
- }
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure default VLAN\n");
+ goto out;
+ }
- /* Update the Queue state */
- rc = bnx2x_queue_state_change(bp, &q_params);
- if (rc) {
- BNX2X_ERR("Failed to configure default VLAN\n");
- goto out;
- }
- /* clear the flag indicating that this VF needs its vlan
- * (will only be set if the HV configured the Vlan before vf was
- * up and we were called because the VF came up later
- */
+ /* clear the flag indicating that this VF needs its vlan
+ * (will only be set if the HV configured the Vlan before vf was
+ * up and we were called because the VF came up later
+ */
out:
- vf->cfg_flags &= ~VF_CFG_VLAN;
- bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
- }
+ vf->cfg_flags &= ~VF_CFG_VLAN;
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+
return rc;
}
@@ -3605,7 +3716,7 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
/* the mac address in bulletin board is valid and is new */
if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
- memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) {
+ !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) {
/* update new mac to net device */
memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 1ff6a9366629..d9fcca1b5a9d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -74,6 +74,7 @@ struct bnx2x_vf_queue {
/* VLANs object */
struct bnx2x_vlan_mac_obj vlan_obj;
atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
+ unsigned long accept_flags; /* last accept flags configured */
/* Queue Slow-path State object */
struct bnx2x_queue_sp_obj sp_obj;
@@ -268,6 +269,7 @@ struct bnx2x_virtf {
int leading_rss;
/* MCAST object */
+ int mcast_list_len;
struct bnx2x_mcast_obj mcast_obj;
/* RSS configuration object */
@@ -663,11 +665,6 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
struct bnx2x_vfop_filters *macs,
int qid, bool drv_only);
-int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, u16 vid, bool add);
-
int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
@@ -725,13 +722,6 @@ void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid);
/* Handles an FLR (or VF_DISABLE) notification form the MCP */
void bnx2x_vf_handle_flr_event(struct bnx2x *bp);
-void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
- u16 length);
-void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
- u16 type, u16 length);
-void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv);
-void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list);
-
bool bnx2x_tlv_supported(u16 tlvtype);
u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
@@ -748,7 +738,6 @@ int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp);
int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bool is_leading);
-int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
int bnx2x_vfpf_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *params);
@@ -812,7 +801,6 @@ static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
-static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
u8 vf_qid, bool set) {return 0; }
static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index efa8a151d789..3fa6c2a2a5a9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -21,9 +21,11 @@
#include "bnx2x_cmn.h"
#include <linux/crc32.h>
+static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
+
/* place a given tlv on the tlv buffer at a given offset */
-void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
- u16 length)
+static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list,
+ u16 offset, u16 type, u16 length)
{
struct channel_tlv *tl =
(struct channel_tlv *)(tlvs_list + offset);
@@ -33,8 +35,8 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
}
/* Clear the mailbox and init the header of the first tlv */
-void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
- u16 type, u16 length)
+static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
+ u16 type, u16 length)
{
mutex_lock(&bp->vf2pf_mutex);
@@ -52,7 +54,8 @@ void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
}
/* releases the mailbox */
-void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
+static void bnx2x_vfpf_finalize(struct bnx2x *bp,
+ struct vfpf_first_tlv *first_tlv)
{
DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
first_tlv->tl.type);
@@ -85,7 +88,7 @@ static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
}
/* list the types and lengths of the tlvs on the buffer */
-void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
+static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
{
int i = 1;
struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
@@ -208,7 +211,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
return -EINVAL;
}
- BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
+ DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
*vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
@@ -633,7 +636,7 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return rc;
}
-int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
+static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
{
struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
@@ -800,14 +803,18 @@ int bnx2x_vfpf_config_rss(struct bnx2x *bp,
}
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
- BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n",
- resp->hdr.status);
- rc = -EINVAL;
+ /* Since older drivers don't support this feature (and VF has
+ * no way of knowing other than failing this), don't propagate
+ * an error in this case.
+ */
+ DP(BNX2X_MSG_IOV,
+ "Failed to send rss message to PF over VF-PF channel [%d]\n",
+ resp->hdr.status);
}
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
- return 0;
+ return rc;
}
int bnx2x_vfpf_set_mcast(struct net_device *dev)
@@ -1416,6 +1423,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
setup_q->rxq.cache_line_log;
rxq_params->sb_cq_index = setup_q->rxq.sb_index;
+ /* rx setup - multicast engine */
+ if (bnx2x_vfq_is_leading(q)) {
+ u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ rxq_params->mcast_engine_id = mcast_id;
+ __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
+ }
+
bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
q->index, q->sb_idx);
}
@@ -1598,6 +1613,8 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
unsigned long accept = 0;
+ struct pf_vf_bulletin_content *bulletin =
+ BP_VF_BULLETIN(bp, vf->index);
/* covert VF-PF if mask to bnx2x accept flags */
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
@@ -1617,9 +1634,11 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
/* A packet arriving the vf's mac should be accepted
- * with any vlan
+ * with any vlan, unless a vlan has already been
+ * configured.
*/
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
+ if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
/* set rx-mode */
rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
@@ -1702,7 +1721,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
/* ...and only the mac set by the ndo */
if (filters->n_mac_vlan_filters == 1 &&
- memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) {
+ !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
vf->abs_vfid);
@@ -1710,6 +1729,21 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
goto response;
}
}
+ /* if vlan was set by hypervisor we don't allow guest to config vlan */
+ if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
+ int i;
+
+ /* search for vlan filters */
+ for (i = 0; i < filters->n_mac_vlan_filters; i++) {
+ if (filters->filters[i].flags &
+ VFPF_Q_FILTER_VLAN_TAG_VALID) {
+ BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
+ vf->abs_vfid);
+ vf->op_rc = -EPERM;
+ goto response;
+ }
+ }
+ }
/* verify vf_qid */
if (filters->vf_qid > vf_rxq_count(vf))
@@ -1805,6 +1839,9 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
/* flags handled individually for backward/forward compatability */
+ vf_op_params->rss_flags = 0;
+ vf_op_params->ramrod_flags = 0;
+
if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
__set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index f58a8b80302d..fcf9105a5476 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5220,6 +5220,7 @@ static void cnic_init_rings(struct cnic_dev *dev)
cnic_ring_ctl(dev, cid, cli, 1);
*cid_ptr = cid >> 4;
*(cid_ptr + 1) = cid * bp->db_size;
+ *(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
}
}
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 0121a5d55192..0d6b13f854d9 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -186,6 +186,8 @@ struct kcq_info {
u16 (*hw_idx)(u16);
};
+#define UIO_USE_TX_DOORBELL 0x017855DB
+
struct cnic_uio_dev {
struct uio_info cnic_uinfo;
u32 uio_dev;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index ebbfe25acaa6..8cf6b1926069 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -14,8 +14,8 @@
#include "bnx2x/bnx2x_mfw_req.h"
-#define CNIC_MODULE_VERSION "2.5.18"
-#define CNIC_MODULE_RELDATE "Sept 01, 2013"
+#define CNIC_MODULE_VERSION "2.5.19"
+#define CNIC_MODULE_RELDATE "December 19, 2013"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index c2777712da99..b61c14ed9b8d 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
* This driver is designed for the Broadcom SiByte SOC built-in
@@ -36,7 +35,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index f3dd93b4aeaa..3167ed6593b0 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -25,7 +25,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/in.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/pci.h>
@@ -37,6 +36,7 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/brcmphy.h>
+#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/tcp.h>
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 134
+#define TG3_MIN_NUM 136
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "Sep 16, 2013"
+#define DRV_MODULE_RELDATE "Jan 03, 2014"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -208,6 +208,9 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define TG3_RAW_IP_ALIGN 2
+#define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
+#define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
+
#define TG3_FW_UPDATE_TIMEOUT_SEC 5
#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
@@ -2606,13 +2609,14 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
tg3_writephy(tp, MII_CTRL1000, phy9_orig);
- if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
- reg32 &= ~0x3000;
- tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
- } else if (!err)
- err = -EBUSY;
+ err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
+ if (err)
+ return err;
- return err;
+ reg32 &= ~0x3000;
+ tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
+
+ return 0;
}
static void tg3_carrier_off(struct tg3 *tp)
@@ -3948,32 +3952,41 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
return 0;
}
+/* tp->lock is held. */
+static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
+{
+ u32 addr_high, addr_low;
+
+ addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
+ addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5]);
+
+ if (index < 4) {
+ tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
+ tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
+ } else {
+ index -= 4;
+ tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
+ tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
+ }
+}
/* tp->lock is held. */
static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
{
- u32 addr_high, addr_low;
+ u32 addr_high;
int i;
- addr_high = ((tp->dev->dev_addr[0] << 8) |
- tp->dev->dev_addr[1]);
- addr_low = ((tp->dev->dev_addr[2] << 24) |
- (tp->dev->dev_addr[3] << 16) |
- (tp->dev->dev_addr[4] << 8) |
- (tp->dev->dev_addr[5] << 0));
for (i = 0; i < 4; i++) {
if (i == 1 && skip_mac_1)
continue;
- tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
- tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
+ __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
}
if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
tg3_asic_rev(tp) == ASIC_REV_5704) {
- for (i = 0; i < 12; i++) {
- tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
- tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
- }
+ for (i = 4; i < 16; i++)
+ __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
}
addr_high = (tp->dev->dev_addr[0] +
@@ -4403,9 +4416,12 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
if (tg3_flag(tp, WOL_SPEED_100MB))
adv |= ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full;
- if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
- adv |= ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full;
+ if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
+ if (!(tp->phy_flags &
+ TG3_PHYFLG_DISABLE_1G_HD_ADV))
+ adv |= ADVERTISED_1000baseT_Half;
+ adv |= ADVERTISED_1000baseT_Full;
+ }
fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
} else {
@@ -7622,7 +7638,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
{
u32 base = (u32) mapping & 0xffffffff;
- return (base > 0xffffdcc0) && (base + len + 8 < base);
+ return base + len + 8 < base;
}
/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
@@ -8925,6 +8941,49 @@ static void tg3_restore_pci_state(struct tg3 *tp)
}
}
+static void tg3_override_clk(struct tg3 *tp)
+{
+ u32 val;
+
+ switch (tg3_asic_rev(tp)) {
+ case ASIC_REV_5717:
+ val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+ tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
+ TG3_CPMU_MAC_ORIDE_ENABLE);
+ break;
+
+ case ASIC_REV_5719:
+ case ASIC_REV_5720:
+ tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+ break;
+
+ default:
+ return;
+ }
+}
+
+static void tg3_restore_clk(struct tg3 *tp)
+{
+ u32 val;
+
+ switch (tg3_asic_rev(tp)) {
+ case ASIC_REV_5717:
+ val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+ tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
+ val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
+ break;
+
+ case ASIC_REV_5719:
+ case ASIC_REV_5720:
+ val = tr32(TG3_CPMU_CLCK_ORIDE);
+ tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+ break;
+
+ default:
+ return;
+ }
+}
+
/* tp->lock is held. */
static int tg3_chip_reset(struct tg3 *tp)
{
@@ -9013,6 +9072,13 @@ static int tg3_chip_reset(struct tg3 *tp)
tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
}
+ /* Set the clock to the highest frequency to avoid timeouts. With link
+ * aware mode, the clock speed could be slow and bootcode does not
+ * complete within the expected time. Override the clock to allow the
+ * bootcode to finish sooner and then restore it.
+ */
+ tg3_override_clk(tp);
+
/* Manage gphy power for all CPMU absent PCIe devices. */
if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
@@ -9151,10 +9217,7 @@ static int tg3_chip_reset(struct tg3 *tp)
tw32(0x7c00, val | (1 << 25));
}
- if (tg3_asic_rev(tp) == ASIC_REV_5720) {
- val = tr32(TG3_CPMU_CLCK_ORIDE);
- tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
- }
+ tg3_restore_clk(tp);
/* Reprobe ASF enable state. */
tg3_flag_clear(tp, ENABLE_ASF);
@@ -9186,6 +9249,7 @@ static int tg3_chip_reset(struct tg3 *tp)
static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
+static void __tg3_set_rx_mode(struct net_device *);
/* tp->lock is held. */
static int tg3_halt(struct tg3 *tp, int kind, bool silent)
@@ -9246,6 +9310,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
}
spin_lock_bh(&tp->lock);
__tg3_set_mac_addr(tp, skip_mac_1);
+ __tg3_set_rx_mode(dev);
spin_unlock_bh(&tp->lock);
return err;
@@ -9634,6 +9699,20 @@ static void __tg3_set_rx_mode(struct net_device *dev)
tw32(MAC_HASH_REG_3, mc_filter[3]);
}
+ if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
+ rx_mode |= RX_MODE_PROMISC;
+ } else if (!(dev->flags & IFF_PROMISC)) {
+ /* Add all entries into to the mac addr filter list */
+ int i = 0;
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ __tg3_set_one_mac_addr(tp, ha->addr,
+ i + TG3_UCAST_ADDR_IDX(tp));
+ i++;
+ }
+ }
+
if (rx_mode != tp->rx_mode) {
tp->rx_mode = rx_mode;
tw32_f(MAC_RX_MODE, rx_mode);
@@ -9966,6 +10045,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
if (tg3_asic_rev(tp) == ASIC_REV_5719)
val |= BUFMGR_MODE_NO_TX_UNDERRUN;
if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762 ||
tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
@@ -10751,6 +10831,7 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
+ tg3_asic_rev(tp) != ASIC_REV_5762 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
@@ -10879,6 +10960,13 @@ static void tg3_timer(unsigned long __opaque)
} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
tg3_flag(tp, 5780_CLASS)) {
tg3_serdes_parallel_detect(tp);
+ } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
+ u32 cpmu = tr32(TG3_CPMU_STATUS);
+ bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
+ TG3_CPMU_STATUS_LINK_MASK);
+
+ if (link_up != tp->link_up)
+ tg3_setup_phy(tp, false);
}
tp->timer_counter = tp->timer_multiplier;
@@ -11746,8 +11834,6 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
get_stat64(&hw_stats->rx_frame_too_long_errors) +
get_stat64(&hw_stats->rx_undersize_packets);
- stats->rx_over_errors = old_stats->rx_over_errors +
- get_stat64(&hw_stats->rxbds_empty);
stats->rx_frame_errors = old_stats->rx_frame_errors +
get_stat64(&hw_stats->rx_align_errors);
stats->tx_aborted_errors = old_stats->tx_aborted_errors +
@@ -13594,14 +13680,13 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
}
-static int tg3_hwtstamp_ioctl(struct net_device *dev,
- struct ifreq *ifr, int cmd)
+static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct tg3 *tp = netdev_priv(dev);
struct hwtstamp_config stmpconf;
if (!tg3_flag(tp, PTP_CAPABLE))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
return -EFAULT;
@@ -13682,6 +13767,67 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev,
-EFAULT : 0;
}
+static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ struct hwtstamp_config stmpconf;
+
+ if (!tg3_flag(tp, PTP_CAPABLE))
+ return -EOPNOTSUPP;
+
+ stmpconf.flags = 0;
+ stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
+ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
+
+ switch (tp->rxptpctl) {
+ case 0:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -ERANGE;
+ }
+
+ return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
+ -EFAULT : 0;
+}
+
static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *data = if_mii(ifr);
@@ -13735,7 +13881,10 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
case SIOCSHWTSTAMP:
- return tg3_hwtstamp_ioctl(dev, ifr, cmd);
+ return tg3_hwtstamp_set(dev, ifr);
+
+ case SIOCGHWTSTAMP:
+ return tg3_hwtstamp_get(dev, ifr);
default:
/* do nothing */
@@ -13965,12 +14114,12 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_netif_stop(tp);
+ tg3_set_mtu(dev, tp, new_mtu);
+
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_set_mtu(dev, tp, new_mtu);
-
/* Reset PHY, otherwise the read DMA engine will be in a mode that
* breaks all requests to 256 bytes.
*/
@@ -14856,7 +15005,8 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
if (val == NIC_SRAM_DATA_SIG_MAGIC) {
u32 nic_cfg, led_cfg;
- u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
+ u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
+ u32 nic_phy_id, ver, eeprom_phy_id;
int eeprom_phy_serdes = 0;
tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
@@ -14873,6 +15023,11 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
if (tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
+
if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
eeprom_phy_serdes = 1;
@@ -15025,6 +15180,9 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
+
+ if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
+ tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
}
done:
if (tg3_flag(tp, WOL_CAP))
@@ -15120,9 +15278,11 @@ static void tg3_phy_init_link_config(struct tg3 *tp)
{
u32 adv = ADVERTISED_Autoneg;
- if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
- adv |= ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full;
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
+ adv |= ADVERTISED_1000baseT_Half;
+ adv |= ADVERTISED_1000baseT_Full;
+ }
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
adv |= ADVERTISED_100baseT_Half |
@@ -16470,6 +16630,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Set these bits to enable statistics workaround. */
if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762 ||
tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
tp->coalesce_mode |= HOSTCC_MODE_ATTN;
@@ -16612,6 +16773,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
else
tg3_flag_clear(tp, POLL_SERDES);
+ if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
+ tg3_flag_set(tp, POLL_CPMU_LINK);
+
tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
@@ -17533,6 +17697,7 @@ static int tg3_init_one(struct pci_dev *pdev,
features |= NETIF_F_LOOPBACK;
dev->hw_features |= features;
+ dev->priv_flags |= IFF_UNICAST_FLT;
if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
!tg3_flag(tp, TSO_CAPABLE) &&
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 5c3835aa1e1b..ef472385bce4 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1146,10 +1146,14 @@
#define TG3_CPMU_CLCK_ORIDE 0x00003624
#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000
+#define TG3_CPMU_CLCK_ORIDE_ENABLE 0x00003628
+#define TG3_CPMU_MAC_ORIDE_ENABLE (1 << 13)
+
#define TG3_CPMU_STATUS 0x0000362c
#define TG3_CPMU_STATUS_FMSK_5717 0x20000000
#define TG3_CPMU_STATUS_FMSK_5719 0xc0000000
#define TG3_CPMU_STATUS_FSHFT_5719 30
+#define TG3_CPMU_STATUS_LINK_MASK 0x180000
#define TG3_CPMU_CLCK_STAT 0x00003630
#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000
@@ -2204,7 +2208,7 @@
#define NIC_SRAM_DATA_CFG_2 0x00000d38
-#define NIC_SRAM_DATA_CFG_2_APD_EN 0x00000400
+#define NIC_SRAM_DATA_CFG_2_APD_EN 0x00004000
#define SHASTA_EXT_LED_MODE_MASK 0x00018000
#define SHASTA_EXT_LED_LEGACY 0x00000000
#define SHASTA_EXT_LED_SHARED 0x00008000
@@ -2226,6 +2230,9 @@
#define NIC_SRAM_CPMUSTAT_SIG 0x0000362c
#define NIC_SRAM_CPMUSTAT_SIG_MSK 0x0000ffff
+#define NIC_SRAM_DATA_CFG_5 0x00000e0c
+#define NIC_SRAM_DISABLE_1G_HALF_ADV 0x00000002
+
#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000
#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000
@@ -3014,6 +3021,7 @@ enum TG3_FLAGS {
TG3_FLAG_ENABLE_ASF,
TG3_FLAG_ASPM_WORKAROUND,
TG3_FLAG_POLL_SERDES,
+ TG3_FLAG_POLL_CPMU_LINK,
TG3_FLAG_MBOX_WRITE_REORDER,
TG3_FLAG_PCIX_TARGET_HWBUG,
TG3_FLAG_WOL_SPEED_100MB,
@@ -3325,6 +3333,7 @@ struct tg3 {
#define TG3_PHYFLG_1G_ON_VAUX_OK 0x00080000
#define TG3_PHYFLG_KEEP_LINK_ON_PWRDN 0x00100000
#define TG3_PHYFLG_MDIX_STATE 0x00200000
+#define TG3_PHYFLG_DISABLE_1G_HD_ADV 0x00400000
u32 led_ctrl;
u32 phy_otp;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 6f3cac060f29..1803c3959044 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -22,6 +22,14 @@
/* IOC local definitions */
+#define bfa_ioc_state_disabled(__sm) \
+ (((__sm) == BFI_IOC_UNINIT) || \
+ ((__sm) == BFI_IOC_INITING) || \
+ ((__sm) == BFI_IOC_HWINIT) || \
+ ((__sm) == BFI_IOC_DISABLED) || \
+ ((__sm) == BFI_IOC_FAIL) || \
+ ((__sm) == BFI_IOC_CFG_DISABLED))
+
/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
#define bfa_ioc_firmware_lock(__ioc) \
@@ -42,6 +50,14 @@
((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
#define bfa_ioc_sync_complete(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
+#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \
+ ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
+#define bfa_ioc_get_cur_ioc_fwstate(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
+#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \
+ ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
+#define bfa_ioc_get_alt_ioc_fwstate(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
#define bfa_ioc_mbox_cmd_pending(__ioc) \
(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
@@ -76,8 +92,8 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
-static void bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
- u32 boot_param);
+static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc,
+ enum bfi_fwboot_type boot_type, u32 boot_param);
static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
char *serial_num);
@@ -860,7 +876,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
*/
case IOCPF_E_TIMEOUT:
- writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
@@ -949,7 +965,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
case IOCPF_E_SEMLOCKED:
bfa_ioc_notify_fail(ioc);
bfa_ioc_sync_leave(ioc);
- writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
break;
@@ -1031,7 +1047,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
bfa_ioc_notify_fail(ioc);
if (!iocpf->auto_recover) {
bfa_ioc_sync_leave(ioc);
- writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
} else {
@@ -1162,7 +1178,7 @@ bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
}
- fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
if (fwstate == BFI_IOC_UNINIT) {
writel(1, ioc->ioc_regs.ioc_init_sem_reg);
return;
@@ -1176,8 +1192,8 @@ bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
}
bfa_ioc_fwver_clear(ioc);
- writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
- writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
+ bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
/*
* Try to lock and then unlock the semaphore.
@@ -1309,22 +1325,504 @@ bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
}
}
-/* Returns TRUE if same. */
+static bool
+bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1,
+ struct bfi_ioc_image_hdr *fwhdr_2)
+{
+ int i;
+
+ for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
+ if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
+ return false;
+ }
+
+ return true;
+}
+
+/* Returns TRUE if major minor and maintainence are same.
+ * If patch version are same, check for MD5 Checksum to be same.
+ */
+static bool
+bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr,
+ struct bfi_ioc_image_hdr *fwhdr_to_cmp)
+{
+ if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
+ return false;
+ if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
+ return false;
+ if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
+ return false;
+ if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
+ return false;
+ if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
+ drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
+ drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build)
+ return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
+
+ return true;
+}
+
+static bool
+bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr)
+{
+ if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
+ return false;
+
+ return true;
+}
+
+static bool
+fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr)
+{
+ if (fwhdr->fwver.phase == 0 &&
+ fwhdr->fwver.build == 0)
+ return false;
+
+ return true;
+}
+
+/* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */
+static enum bfi_ioc_img_ver_cmp
+bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
+ struct bfi_ioc_image_hdr *fwhdr_to_cmp)
+{
+ if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == false)
+ return BFI_IOC_IMG_VER_INCOMP;
+
+ if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
+ return BFI_IOC_IMG_VER_OLD;
+
+ /* GA takes priority over internal builds of the same patch stream.
+ * At this point major minor maint and patch numbers are same.
+ */
+ if (fwhdr_is_ga(base_fwhdr) == true)
+ if (fwhdr_is_ga(fwhdr_to_cmp))
+ return BFI_IOC_IMG_VER_SAME;
+ else
+ return BFI_IOC_IMG_VER_OLD;
+ else
+ if (fwhdr_is_ga(fwhdr_to_cmp))
+ return BFI_IOC_IMG_VER_BETTER;
+
+ if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
+ return BFI_IOC_IMG_VER_OLD;
+
+ if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
+ return BFI_IOC_IMG_VER_OLD;
+
+ /* All Version Numbers are equal.
+ * Md5 check to be done as a part of compatibility check.
+ */
+ return BFI_IOC_IMG_VER_SAME;
+}
+
+/* register definitions */
+#define FLI_CMD_REG 0x0001d000
+#define FLI_WRDATA_REG 0x0001d00c
+#define FLI_RDDATA_REG 0x0001d010
+#define FLI_ADDR_REG 0x0001d004
+#define FLI_DEV_STATUS_REG 0x0001d014
+
+#define BFA_FLASH_FIFO_SIZE 128 /* fifo size */
+#define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */
+#define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */
+#define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */
+
+#define NFC_STATE_RUNNING 0x20000001
+#define NFC_STATE_PAUSED 0x00004560
+#define NFC_VER_VALID 0x147
+
+enum bfa_flash_cmd {
+ BFA_FLASH_FAST_READ = 0x0b, /* fast read */
+ BFA_FLASH_WRITE_ENABLE = 0x06, /* write enable */
+ BFA_FLASH_SECTOR_ERASE = 0xd8, /* sector erase */
+ BFA_FLASH_WRITE = 0x02, /* write */
+ BFA_FLASH_READ_STATUS = 0x05, /* read status */
+};
+
+/* hardware error definition */
+enum bfa_flash_err {
+ BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */
+ BFA_FLASH_UNINIT = -2, /*!< flash not initialized */
+ BFA_FLASH_BAD = -3, /*!< flash bad */
+ BFA_FLASH_BUSY = -4, /*!< flash busy */
+ BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */
+ BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */
+ BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */
+ BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */
+ BFA_FLASH_ERR_LEN = -9, /*!< invalid length */
+};
+
+/* flash command register data structure */
+union bfa_flash_cmd_reg {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 act:1;
+ u32 rsv:1;
+ u32 write_cnt:9;
+ u32 read_cnt:9;
+ u32 addr_cnt:4;
+ u32 cmd:8;
+#else
+ u32 cmd:8;
+ u32 addr_cnt:4;
+ u32 read_cnt:9;
+ u32 write_cnt:9;
+ u32 rsv:1;
+ u32 act:1;
+#endif
+ } r;
+ u32 i;
+};
+
+/* flash device status register data structure */
+union bfa_flash_dev_status_reg {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 rsv:21;
+ u32 fifo_cnt:6;
+ u32 busy:1;
+ u32 init_status:1;
+ u32 present:1;
+ u32 bad:1;
+ u32 good:1;
+#else
+ u32 good:1;
+ u32 bad:1;
+ u32 present:1;
+ u32 init_status:1;
+ u32 busy:1;
+ u32 fifo_cnt:6;
+ u32 rsv:21;
+#endif
+ } r;
+ u32 i;
+};
+
+/* flash address register data structure */
+union bfa_flash_addr_reg {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 addr:24;
+ u32 dummy:8;
+#else
+ u32 dummy:8;
+ u32 addr:24;
+#endif
+ } r;
+ u32 i;
+};
+
+/* Flash raw private functions */
+static void
+bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
+ u8 rd_cnt, u8 ad_cnt, u8 op)
+{
+ union bfa_flash_cmd_reg cmd;
+
+ cmd.i = 0;
+ cmd.r.act = 1;
+ cmd.r.write_cnt = wr_cnt;
+ cmd.r.read_cnt = rd_cnt;
+ cmd.r.addr_cnt = ad_cnt;
+ cmd.r.cmd = op;
+ writel(cmd.i, (pci_bar + FLI_CMD_REG));
+}
+
+static void
+bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
+{
+ union bfa_flash_addr_reg addr;
+
+ addr.r.addr = address & 0x00ffffff;
+ addr.r.dummy = 0;
+ writel(addr.i, (pci_bar + FLI_ADDR_REG));
+}
+
+static int
+bfa_flash_cmd_act_check(void __iomem *pci_bar)
+{
+ union bfa_flash_cmd_reg cmd;
+
+ cmd.i = readl(pci_bar + FLI_CMD_REG);
+
+ if (cmd.r.act)
+ return BFA_FLASH_ERR_CMD_ACT;
+
+ return 0;
+}
+
+/* Flush FLI data fifo. */
+static u32
+bfa_flash_fifo_flush(void __iomem *pci_bar)
+{
+ u32 i;
+ u32 t;
+ union bfa_flash_dev_status_reg dev_status;
+
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+
+ if (!dev_status.r.fifo_cnt)
+ return 0;
+
+ /* fifo counter in terms of words */
+ for (i = 0; i < dev_status.r.fifo_cnt; i++)
+ t = readl(pci_bar + FLI_RDDATA_REG);
+
+ /* Check the device status. It may take some time. */
+ for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+ if (!dev_status.r.fifo_cnt)
+ break;
+ }
+
+ if (dev_status.r.fifo_cnt)
+ return BFA_FLASH_ERR_FIFO_CNT;
+
+ return 0;
+}
+
+/* Read flash status. */
+static u32
+bfa_flash_status_read(void __iomem *pci_bar)
+{
+ union bfa_flash_dev_status_reg dev_status;
+ u32 status;
+ u32 ret_status;
+ int i;
+
+ status = bfa_flash_fifo_flush(pci_bar);
+ if (status < 0)
+ return status;
+
+ bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
+
+ for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
+ status = bfa_flash_cmd_act_check(pci_bar);
+ if (!status)
+ break;
+ }
+
+ if (status)
+ return status;
+
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+ if (!dev_status.r.fifo_cnt)
+ return BFA_FLASH_BUSY;
+
+ ret_status = readl(pci_bar + FLI_RDDATA_REG);
+ ret_status >>= 24;
+
+ status = bfa_flash_fifo_flush(pci_bar);
+ if (status < 0)
+ return status;
+
+ return ret_status;
+}
+
+/* Start flash read operation. */
+static u32
+bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
+ char *buf)
+{
+ u32 status;
+
+ /* len must be mutiple of 4 and not exceeding fifo size */
+ if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
+ return BFA_FLASH_ERR_LEN;
+
+ /* check status */
+ status = bfa_flash_status_read(pci_bar);
+ if (status == BFA_FLASH_BUSY)
+ status = bfa_flash_status_read(pci_bar);
+
+ if (status < 0)
+ return status;
+
+ /* check if write-in-progress bit is cleared */
+ if (status & BFA_FLASH_WIP_MASK)
+ return BFA_FLASH_ERR_WIP;
+
+ bfa_flash_set_addr(pci_bar, offset);
+
+ bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
+
+ return 0;
+}
+
+/* Check flash read operation. */
+static u32
+bfa_flash_read_check(void __iomem *pci_bar)
+{
+ if (bfa_flash_cmd_act_check(pci_bar))
+ return 1;
+
+ return 0;
+}
+
+/* End flash read operation. */
+static void
+bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
+{
+ u32 i;
+
+ /* read data fifo up to 32 words */
+ for (i = 0; i < len; i += 4) {
+ u32 w = readl(pci_bar + FLI_RDDATA_REG);
+ *((u32 *)(buf + i)) = swab32(w);
+ }
+
+ bfa_flash_fifo_flush(pci_bar);
+}
+
+/* Perform flash raw read. */
+
+#define FLASH_BLOCKING_OP_MAX 500
+#define FLASH_SEM_LOCK_REG 0x18820
+
+static int
+bfa_raw_sem_get(void __iomem *bar)
+{
+ int locked;
+
+ locked = readl((bar + FLASH_SEM_LOCK_REG));
+
+ return !locked;
+}
+
+static enum bfa_status
+bfa_flash_sem_get(void __iomem *bar)
+{
+ u32 n = FLASH_BLOCKING_OP_MAX;
+
+ while (!bfa_raw_sem_get(bar)) {
+ if (--n <= 0)
+ return BFA_STATUS_BADFLASH;
+ udelay(10000);
+ }
+ return BFA_STATUS_OK;
+}
+
+static void
+bfa_flash_sem_put(void __iomem *bar)
+{
+ writel(0, (bar + FLASH_SEM_LOCK_REG));
+}
+
+static enum bfa_status
+bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
+ u32 len)
+{
+ u32 n, status;
+ u32 off, l, s, residue, fifo_sz;
+
+ residue = len;
+ off = 0;
+ fifo_sz = BFA_FLASH_FIFO_SIZE;
+ status = bfa_flash_sem_get(pci_bar);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ while (residue) {
+ s = offset + off;
+ n = s / fifo_sz;
+ l = (n + 1) * fifo_sz - s;
+ if (l > residue)
+ l = residue;
+
+ status = bfa_flash_read_start(pci_bar, offset + off, l,
+ &buf[off]);
+ if (status < 0) {
+ bfa_flash_sem_put(pci_bar);
+ return BFA_STATUS_FAILED;
+ }
+
+ n = BFA_FLASH_BLOCKING_OP_MAX;
+ while (bfa_flash_read_check(pci_bar)) {
+ if (--n <= 0) {
+ bfa_flash_sem_put(pci_bar);
+ return BFA_STATUS_FAILED;
+ }
+ }
+
+ bfa_flash_read_end(pci_bar, l, &buf[off]);
+
+ residue -= l;
+ off += l;
+ }
+ bfa_flash_sem_put(pci_bar);
+
+ return BFA_STATUS_OK;
+}
+
+#define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */
+
+static enum bfa_status
+bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off,
+ u32 *fwimg)
+{
+ return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
+ BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
+ (char *)fwimg, BFI_FLASH_CHUNK_SZ);
+}
+
+static enum bfi_ioc_img_ver_cmp
+bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc,
+ struct bfi_ioc_image_hdr *base_fwhdr)
+{
+ struct bfi_ioc_image_hdr *flash_fwhdr;
+ enum bfa_status status;
+ u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
+
+ status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg);
+ if (status != BFA_STATUS_OK)
+ return BFI_IOC_IMG_VER_INCOMP;
+
+ flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg;
+ if (bfa_ioc_flash_fwver_valid(flash_fwhdr))
+ return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
+ else
+ return BFI_IOC_IMG_VER_INCOMP;
+}
+
+/**
+ * Returns TRUE if driver is willing to work with current smem f/w version.
+ */
bool
bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
{
struct bfi_ioc_image_hdr *drv_fwhdr;
- int i;
+ enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp;
drv_fwhdr = (struct bfi_ioc_image_hdr *)
bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
- for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
- if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
- return false;
+ /* If smem is incompatible or old, driver should not work with it. */
+ drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr);
+ if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
+ drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
+ return false;
}
- return true;
+ /* IF Flash has a better F/W than smem do not work with smem.
+ * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
+ * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
+ */
+ smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr);
+
+ if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER)
+ return false;
+ else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME)
+ return true;
+ else
+ return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
+ true : false;
}
/* Return true if current running version is valid. Firmware signature and
@@ -1333,15 +1831,9 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
static bool
bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
{
- struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
+ struct bfi_ioc_image_hdr fwhdr;
bfa_nw_ioc_fwver_get(ioc, &fwhdr);
- drv_fwhdr = (struct bfi_ioc_image_hdr *)
- bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
-
- if (fwhdr.signature != drv_fwhdr->signature)
- return false;
-
if (swab32(fwhdr.bootenv) != boot_env)
return false;
@@ -1366,7 +1858,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
bool fwvalid;
u32 boot_env;
- ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
if (force)
ioc_fwstate = BFI_IOC_UNINIT;
@@ -1380,8 +1872,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
false : bfa_ioc_fwver_valid(ioc, boot_env);
if (!fwvalid) {
- bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
- bfa_ioc_poll_fwinit(ioc);
+ if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
+ BFA_STATUS_OK)
+ bfa_ioc_poll_fwinit(ioc);
+
return;
}
@@ -1411,8 +1905,9 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
/**
* Initialize the h/w for any other states.
*/
- bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
- bfa_ioc_poll_fwinit(ioc);
+ if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
+ BFA_STATUS_OK)
+ bfa_ioc_poll_fwinit(ioc);
}
void
@@ -1517,7 +2012,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
}
/* Initiate a full firmware download. */
-static void
+static enum bfa_status
bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
u32 boot_env)
{
@@ -1527,18 +2022,47 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
u32 chunkno = 0;
u32 i;
u32 asicmode;
+ u32 fwimg_size;
+ u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
+ enum bfa_status status;
- fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
+
+ status = bfa_nw_ioc_flash_img_get_chnk(ioc,
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ fwimg = fwimg_buf;
+ } else {
+ fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
+ fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+ }
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
- for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
+ for (i = 0; i < fwimg_size; i++) {
if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
- fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ status = bfa_nw_ioc_flash_img_get_chnk(ioc,
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
+ fwimg_buf);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ fwimg = fwimg_buf;
+ } else {
+ fwimg = bfa_cb_image_get_chunk(
+ bfa_ioc_asic_gen(ioc),
BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+ }
}
/**
@@ -1566,6 +2090,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
/*
* Set boot type, env and device mode at the end.
*/
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ boot_type = BFI_FWBOOT_TYPE_NORMAL;
+ }
asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
ioc->port0_mode, ioc->port1_mode);
writel(asicmode, ((ioc->ioc_regs.smem_page_start)
@@ -1574,6 +2102,7 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
+ (BFI_FWBOOT_TYPE_OFF)));
writel(boot_env, ((ioc->ioc_regs.smem_page_start)
+ (BFI_FWBOOT_ENV_OFF)));
+ return BFA_STATUS_OK;
}
static void
@@ -1846,29 +2375,47 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
/* Interface used by diag module to do firmware boot with memory test
* as the entry vector.
*/
-static void
+static enum bfa_status
bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
u32 boot_env)
{
+ struct bfi_ioc_image_hdr *drv_fwhdr;
+ enum bfa_status status;
bfa_ioc_stats(ioc, ioc_boots);
if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
- return;
+ return BFA_STATUS_FAILED;
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_NORMAL) {
+ drv_fwhdr = (struct bfi_ioc_image_hdr *)
+ bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
+ /* Work with Flash iff flash f/w is better than driver f/w.
+ * Otherwise push drivers firmware.
+ */
+ if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
+ BFI_IOC_IMG_VER_BETTER)
+ boot_type = BFI_FWBOOT_TYPE_FLASH;
+ }
/**
* Initialize IOC state of all functions on a chip reset.
*/
if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
- writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
- writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
+ bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
} else {
- writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
- writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
+ bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
}
bfa_ioc_msgflush(ioc);
- bfa_ioc_download_fw(ioc, boot_type, boot_env);
- bfa_ioc_lpu_start(ioc);
+ status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
+ if (status == BFA_STATUS_OK)
+ bfa_ioc_lpu_start(ioc);
+ else
+ bfa_nw_iocpf_timeout(ioc);
+
+ return status;
}
/* Enable/disable IOC failure auto recovery. */
@@ -2473,7 +3020,7 @@ bfa_nw_iocpf_sem_timeout(void *ioc_arg)
static void
bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
{
- u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
if (fwstate == BFI_IOC_DISABLED) {
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index f04e0aab25b4..20cff7df4b55 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -215,6 +215,13 @@ struct bfa_ioc_hwif {
void (*ioc_sync_ack) (struct bfa_ioc *ioc);
bool (*ioc_sync_complete) (struct bfa_ioc *ioc);
bool (*ioc_lpu_read_stat) (struct bfa_ioc *ioc);
+ void (*ioc_set_fwstate) (struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate);
+ enum bfi_ioc_state (*ioc_get_fwstate) (struct bfa_ioc *ioc);
+ void (*ioc_set_alt_fwstate) (struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate);
+ enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc *ioc);
+
};
#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@@ -291,6 +298,7 @@ void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
+enum bfa_status bfa_nw_ioc_fwsig_invalidate(struct bfa_ioc *ioc);
void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
struct bfa_ioc_notify *notify);
bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index 5df0b0c68c5a..d639558455cb 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -48,6 +48,12 @@ static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_set_cur_ioc_fwstate(
+ struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_set_alt_ioc_fwstate(
+ struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc);
static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
enum bfi_asic_mode asic_mode);
static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
@@ -68,6 +74,10 @@ static const struct bfa_ioc_hwif nw_hwif_ct = {
.ioc_sync_leave = bfa_ioc_ct_sync_leave,
.ioc_sync_ack = bfa_ioc_ct_sync_ack,
.ioc_sync_complete = bfa_ioc_ct_sync_complete,
+ .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate,
+ .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate,
+ .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate,
+ .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate,
};
static const struct bfa_ioc_hwif nw_hwif_ct2 = {
@@ -85,6 +95,10 @@ static const struct bfa_ioc_hwif nw_hwif_ct2 = {
.ioc_sync_leave = bfa_ioc_ct_sync_leave,
.ioc_sync_ack = bfa_ioc_ct_sync_ack,
.ioc_sync_complete = bfa_ioc_ct_sync_complete,
+ .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate,
+ .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate,
+ .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate,
+ .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate,
};
/* Called from bfa_ioc_attach() to map asic specific calls. */
@@ -565,6 +579,32 @@ bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
return false;
}
+static void
+bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate)
+{
+ writel(fwstate, ioc->ioc_regs.ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc)
+{
+ return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
+}
+
+static void
+bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate)
+{
+ writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc)
+{
+ return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate);
+}
+
static enum bfa_status
bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
{
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 1f24c23dc786..8c563a77cdf6 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -25,6 +25,7 @@
/* BFI FW image type */
#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
+#define BFI_FLASH_IMAGE_SZ 0x100000
/* Msg header common to all msgs */
struct bfi_mhdr {
@@ -233,7 +234,29 @@ struct bfi_ioc_getattr_reply {
#define BFI_IOC_TRC_HDR_SZ 32
#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
+#define BFI_IOC_FW_INV_SIGN (0xdeaddead)
#define BFI_IOC_MD5SUM_SZ 4
+
+struct bfi_ioc_fwver {
+#ifdef __BIG_ENDIAN
+ u8 patch;
+ u8 maint;
+ u8 minor;
+ u8 major;
+ u8 rsvd[2];
+ u8 build;
+ u8 phase;
+#else
+ u8 major;
+ u8 minor;
+ u8 maint;
+ u8 patch;
+ u8 phase;
+ u8 build;
+ u8 rsvd[2];
+#endif
+};
+
struct bfi_ioc_image_hdr {
u32 signature; /*!< constant signature */
u8 asic_gen; /*!< asic generation */
@@ -242,10 +265,18 @@ struct bfi_ioc_image_hdr {
u8 port1_mode; /*!< device mode for port 1 */
u32 exec; /*!< exec vector */
u32 bootenv; /*!< firmware boot env */
- u32 rsvd_b[4];
+ u32 rsvd_b[2];
+ struct bfi_ioc_fwver fwver;
u32 md5sum[BFI_IOC_MD5SUM_SZ];
};
+enum bfi_ioc_img_ver_cmp {
+ BFI_IOC_IMG_VER_INCOMP,
+ BFI_IOC_IMG_VER_OLD,
+ BFI_IOC_IMG_VER_SAME,
+ BFI_IOC_IMG_VER_BETTER
+};
+
#define BFI_FWBOOT_DEVMODE_OFF 4
#define BFI_FWBOOT_TYPE_OFF 8
#define BFI_FWBOOT_ENV_OFF 12
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index 7d10e335c27d..ae072dc5d238 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -472,7 +472,8 @@ enum bfi_enet_hds_type {
struct bfi_enet_rx_cfg {
u8 rxq_type;
- u8 rsvd[3];
+ u8 rsvd[1];
+ u16 frame_size;
struct {
u8 max_header_size;
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index f1eafc409bbd..1f512190d696 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -354,6 +354,14 @@ do { \
} \
} while (0)
+#define bna_mcam_mod_free_q(_bna) (&(_bna)->mcam_mod.free_q)
+
+#define bna_mcam_mod_del_q(_bna) (&(_bna)->mcam_mod.del_q)
+
+#define bna_ucam_mod_free_q(_bna) (&(_bna)->ucam_mod.free_q)
+
+#define bna_ucam_mod_del_q(_bna) (&(_bna)->ucam_mod.del_q)
+
/* Inline functions */
static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
@@ -391,12 +399,8 @@ int bna_num_rxp_set(struct bna *bna, int num_rxp);
void bna_hw_stats_get(struct bna *bna);
/* APIs for RxF */
-struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
-void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
- struct bna_mac *mac);
-struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
-void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
- struct bna_mac *mac);
+struct bna_mac *bna_cam_mod_mac_get(struct list_head *head);
+void bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac);
struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
struct bna_mcam_handle *handle);
@@ -493,11 +497,17 @@ enum bna_cb_status
bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
+bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status
bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
+void
+bna_rx_mcast_delall(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
enum bna_rxmode bitmask,
@@ -505,6 +515,8 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
+void bna_rx_vlan_strip_enable(struct bna_rx *rx);
+void bna_rx_vlan_strip_disable(struct bna_rx *rx);
/* ENET */
/* API for RX */
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 3ca77fad4851..13f9636cdba7 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1811,6 +1811,13 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
}
+ /* A separate queue to allow synchronous setting of a list of MACs */
+ INIT_LIST_HEAD(&ucam_mod->del_q);
+ for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) {
+ bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+ list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
+ }
+
ucam_mod->bna = bna;
}
@@ -1818,11 +1825,16 @@ static void
bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
{
struct list_head *qe;
- int i = 0;
+ int i;
+ i = 0;
list_for_each(qe, &ucam_mod->free_q)
i++;
+ i = 0;
+ list_for_each(qe, &ucam_mod->del_q)
+ i++;
+
ucam_mod->bna = NULL;
}
@@ -1851,6 +1863,13 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
&mcam_mod->free_handle_q);
}
+ /* A separate queue to allow synchronous setting of a list of MACs */
+ INIT_LIST_HEAD(&mcam_mod->del_q);
+ for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) {
+ bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+ list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
+ }
+
mcam_mod->bna = bna;
}
@@ -1864,6 +1883,9 @@ bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
list_for_each(qe, &mcam_mod->free_q) i++;
i = 0;
+ list_for_each(qe, &mcam_mod->del_q) i++;
+
+ i = 0;
list_for_each(qe, &mcam_mod->free_handle_q) i++;
mcam_mod->bna = NULL;
@@ -1976,7 +1998,7 @@ bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
BNA_MEM_T_KVA;
res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
- attr->num_ucmac * sizeof(struct bna_mac);
+ (attr->num_ucmac * 2) * sizeof(struct bna_mac);
/* Virtual memory for Multicast MAC address - stored by mcam module */
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
@@ -1984,7 +2006,7 @@ bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
BNA_MEM_T_KVA;
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
- attr->num_mcmac * sizeof(struct bna_mac);
+ (attr->num_mcmac * 2) * sizeof(struct bna_mac);
/* Virtual memory for Multicast handle - stored by mcam module */
res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
@@ -2080,41 +2102,21 @@ bna_num_rxp_set(struct bna *bna, int num_rxp)
}
struct bna_mac *
-bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
-{
- struct list_head *qe;
-
- if (list_empty(&ucam_mod->free_q))
- return NULL;
-
- bfa_q_deq(&ucam_mod->free_q, &qe);
-
- return (struct bna_mac *)qe;
-}
-
-void
-bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
-{
- list_add_tail(&mac->qe, &ucam_mod->free_q);
-}
-
-struct bna_mac *
-bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
+bna_cam_mod_mac_get(struct list_head *head)
{
struct list_head *qe;
- if (list_empty(&mcam_mod->free_q))
+ if (list_empty(head))
return NULL;
- bfa_q_deq(&mcam_mod->free_q, &qe);
-
+ bfa_q_deq(head, &qe);
return (struct bna_mac *)qe;
}
void
-bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
+bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac)
{
- list_add_tail(&mac->qe, &mcam_mod->free_q);
+ list_add_tail(&mac->qe, tail);
}
struct bna_mcam_handle *
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index af3f7bb0b3b8..2702d02e98d9 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -322,6 +322,10 @@ do { \
#define BNA_CQ_EF_REMOTE (1 << 19)
#define BNA_CQ_EF_LOCAL (1 << 20)
+/* CAT2 ASIC does not use bit 21 as per the SPEC.
+ * Bit 31 is set in every end of frame completion
+ */
+#define BNA_CQ_EF_EOP (1 << 31)
/* Data structures */
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 3c07064b2bc4..85e63546abe3 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -529,13 +529,13 @@ bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
struct list_head *qe;
int ret;
- /* Delete multicast entries previousely added */
+ /* First delete multicast entries to maintain the count */
while (!list_empty(&rxf->mcast_pending_del_q)) {
bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
if (ret)
return ret;
}
@@ -586,7 +586,7 @@ bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
ret = bna_rxf_mcast_del(rxf, mac, cleanup);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
if (ret)
return ret;
}
@@ -796,20 +796,20 @@ bna_rxf_uninit(struct bna_rxf *rxf)
while (!list_empty(&rxf->ucast_pending_add_q)) {
bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
bfa_q_qe_init(&mac->qe);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
}
if (rxf->ucast_pending_mac) {
bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
- rxf->ucast_pending_mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
+ rxf->ucast_pending_mac);
rxf->ucast_pending_mac = NULL;
}
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
bfa_q_qe_init(&mac->qe);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
}
rxf->rxmode_pending = 0;
@@ -869,7 +869,7 @@ bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
if (rxf->ucast_pending_mac == NULL) {
rxf->ucast_pending_mac =
- bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
+ bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
if (rxf->ucast_pending_mac == NULL)
return BNA_CB_UCAST_CAM_FULL;
bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
@@ -900,7 +900,7 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
return BNA_CB_SUCCESS;
}
- mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
if (mac == NULL)
return BNA_CB_MCAST_LIST_FULL;
bfa_q_qe_init(&mac->qe);
@@ -916,35 +916,92 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
}
enum bna_cb_status
-bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
+bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
+ struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head;
struct list_head *qe;
u8 *mcaddr;
- struct bna_mac *mac;
+ struct bna_mac *mac, *del_mac;
int i;
+ /* Purge the pending_add_q */
+ while (!list_empty(&rxf->ucast_pending_add_q)) {
+ bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ }
+
+ /* Schedule active_q entries for deletion */
+ while (!list_empty(&rxf->ucast_active_q)) {
+ bfa_q_deq(&rxf->ucast_active_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+
+ del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
+ memcpy(del_mac, mac, sizeof(*del_mac));
+ list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
+ bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ }
+
/* Allocate nodes */
INIT_LIST_HEAD(&list_head);
- for (i = 0, mcaddr = mclist; i < count; i++) {
- mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ for (i = 0, mcaddr = uclist; i < count; i++) {
+ mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
if (mac == NULL)
goto err_return;
bfa_q_qe_init(&mac->qe);
memcpy(mac->addr, mcaddr, ETH_ALEN);
list_add_tail(&mac->qe, &list_head);
-
mcaddr += ETH_ALEN;
}
+ /* Add the new entries */
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
+ }
+
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+
+ return BNA_CB_SUCCESS;
+
+err_return:
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ }
+
+ return BNA_CB_UCAST_CAM_FULL;
+}
+
+enum bna_cb_status
+bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
+ void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+ struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head list_head;
+ struct list_head *qe;
+ u8 *mcaddr;
+ struct bna_mac *mac, *del_mac;
+ int i;
+
/* Purge the pending_add_q */
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
}
/* Schedule active_q entries for deletion */
@@ -952,7 +1009,26 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
bfa_q_deq(&rxf->mcast_active_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
- list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
+
+ del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
+
+ memcpy(del_mac, mac, sizeof(*del_mac));
+ list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
+ mac->handle = NULL;
+ bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
+ }
+
+ /* Allocate nodes */
+ INIT_LIST_HEAD(&list_head);
+ for (i = 0, mcaddr = mclist; i < count; i++) {
+ mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
+ if (mac == NULL)
+ goto err_return;
+ bfa_q_qe_init(&mac->qe);
+ memcpy(mac->addr, mcaddr, ETH_ALEN);
+ list_add_tail(&mac->qe, &list_head);
+
+ mcaddr += ETH_ALEN;
}
/* Add the new entries */
@@ -974,13 +1050,56 @@ err_return:
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
}
return BNA_CB_MCAST_LIST_FULL;
}
void
+bna_rx_mcast_delall(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head *qe;
+ struct bna_mac *mac, *del_mac;
+ int need_hw_config = 0;
+
+ /* Purge all entries from pending_add_q */
+ while (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ }
+
+ /* Schedule all entries in active_q for deletion */
+ while (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+
+ del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
+
+ memcpy(del_mac, mac, sizeof(*del_mac));
+ list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
+ mac->handle = NULL;
+ bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ need_hw_config = 1;
+ }
+
+ if (need_hw_config) {
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ return;
+ }
+
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx);
+}
+
+void
bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
@@ -1022,7 +1141,7 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
return 1;
}
@@ -1062,11 +1181,13 @@ bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
if (cleanup == BNA_SOFT_CLEANUP)
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
+ mac);
else {
bna_bfi_ucast_req(rxf, mac,
BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
+ mac);
return 1;
}
}
@@ -1690,6 +1811,7 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
cfg_req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
+ cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
cfg_req->num_queue_sets = rx->num_paths;
for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
i < rx->num_paths;
@@ -1711,8 +1833,17 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
/* Large/Single RxQ */
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
&q0->qpt);
- q0->buffer_size =
- bna_enet_mtu_get(&rx->bna->enet);
+ if (q0->multi_buffer)
+ /* multi-buffer is enabled by allocating
+ * a new rx with new set of resources.
+ * q0->buffer_size should be initialized to
+ * fragment size.
+ */
+ cfg_req->rx_cfg.multi_buffer =
+ BNA_STATUS_T_ENABLED;
+ else
+ q0->buffer_size =
+ bna_enet_mtu_get(&rx->bna->enet);
cfg_req->q_cfg[i].ql.rx_buffer_size =
htons((u16)q0->buffer_size);
break;
@@ -2262,8 +2393,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
u32 hq_depth;
u32 dq_depth;
- dq_depth = q_cfg->q_depth;
- hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
+ dq_depth = q_cfg->q0_depth;
+ hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
cq_depth = dq_depth + hq_depth;
BNA_TO_POWER_OF_2_HIGH(cq_depth);
@@ -2380,10 +2511,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_rxq *q0;
struct bna_rxq *q1;
struct bna_intr_info *intr_info;
- u32 page_count;
+ struct bna_mem_descr *hqunmap_mem;
+ struct bna_mem_descr *dqunmap_mem;
struct bna_mem_descr *ccb_mem;
struct bna_mem_descr *rcb_mem;
- struct bna_mem_descr *unmapq_mem;
struct bna_mem_descr *cqpt_mem;
struct bna_mem_descr *cswqpt_mem;
struct bna_mem_descr *cpage_mem;
@@ -2393,8 +2524,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_mem_descr *dsqpt_mem;
struct bna_mem_descr *hpage_mem;
struct bna_mem_descr *dpage_mem;
- int i;
- int dpage_count, hpage_count, rcb_idx;
+ u32 dpage_count, hpage_count;
+ u32 hq_idx, dq_idx, rcb_idx;
+ u32 cq_depth, i;
+ u32 page_count;
if (!bna_rx_res_check(rx_mod, rx_cfg))
return NULL;
@@ -2402,7 +2535,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
- unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
+ dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
+ hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
@@ -2454,7 +2588,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
}
rx->num_paths = rx_cfg->num_paths;
- for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) {
+ for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
+ i < rx->num_paths; i++) {
rxp = bna_rxp_get(rx_mod);
list_add_tail(&rxp->qe, &rx->rxp_q);
rxp->type = rx_cfg->rxp_type;
@@ -2497,9 +2632,13 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
q0->rxp = rxp;
q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
- q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
- rcb_idx++;
- q0->rcb->q_depth = rx_cfg->q_depth;
+ q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
+ rcb_idx++; dq_idx++;
+ q0->rcb->q_depth = rx_cfg->q0_depth;
+ q0->q_depth = rx_cfg->q0_depth;
+ q0->multi_buffer = rx_cfg->q0_multi_buf;
+ q0->buffer_size = rx_cfg->q0_buf_size;
+ q0->num_vecs = rx_cfg->q0_num_vecs;
q0->rcb->rxq = q0;
q0->rcb->bnad = bna->bnad;
q0->rcb->id = 0;
@@ -2519,15 +2658,18 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
q1->rxp = rxp;
q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
- q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
- rcb_idx++;
- q1->rcb->q_depth = rx_cfg->q_depth;
+ q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
+ rcb_idx++; hq_idx++;
+ q1->rcb->q_depth = rx_cfg->q1_depth;
+ q1->q_depth = rx_cfg->q1_depth;
+ q1->multi_buffer = BNA_STATUS_T_DISABLED;
+ q1->num_vecs = 1;
q1->rcb->rxq = q1;
q1->rcb->bnad = bna->bnad;
q1->rcb->id = 1;
q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
rx_cfg->hds_config.forced_offset
- : rx_cfg->small_buff_size;
+ : rx_cfg->q1_buf_size;
q1->rx_packets = q1->rx_bytes = 0;
q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
@@ -2542,9 +2684,14 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
/* Setup CQ */
rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
- rxp->cq.ccb->q_depth = rx_cfg->q_depth +
- ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
- 0 : rx_cfg->q_depth);
+ cq_depth = rx_cfg->q0_depth +
+ ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
+ 0 : rx_cfg->q1_depth);
+ /* if multi-buffer is enabled sum of q0_depth
+ * and q1_depth need not be a power of 2
+ */
+ BNA_TO_POWER_OF_2_HIGH(cq_depth);
+ rxp->cq.ccb->q_depth = cq_depth;
rxp->cq.ccb->cq = &rxp->cq;
rxp->cq.ccb->rcb[0] = q0->rcb;
q0->rcb->ccb = rxp->cq.ccb;
@@ -2670,6 +2817,30 @@ bna_rx_cleanup_complete(struct bna_rx *rx)
bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
}
+void
+bna_rx_vlan_strip_enable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
+ rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
+ rxf->vlan_strip_pending = true;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ }
+}
+
+void
+bna_rx_vlan_strip_disable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
+ rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
+ rxf->vlan_strip_pending = true;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ }
+}
+
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
enum bna_rxmode bitmask,
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index dc50f7836b6d..621547cd3504 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -109,20 +109,21 @@ enum bna_tx_res_req_type {
enum bna_rx_mem_type {
BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */
BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */
- BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */
- BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */
- BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */
- BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */
- BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */
- BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */
- BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_IBIDX = 12,
- BNA_RX_RES_MEM_T_RIT = 13,
- BNA_RX_RES_T_INTR = 14, /* Rx interrupts */
- BNA_RX_RES_T_MAX = 15
+ BNA_RX_RES_MEM_T_UNMAPHQ = 2,
+ BNA_RX_RES_MEM_T_UNMAPDQ = 3,
+ BNA_RX_RES_MEM_T_CQPT = 4,
+ BNA_RX_RES_MEM_T_CSWQPT = 5,
+ BNA_RX_RES_MEM_T_CQPT_PAGE = 6,
+ BNA_RX_RES_MEM_T_HQPT = 7,
+ BNA_RX_RES_MEM_T_DQPT = 8,
+ BNA_RX_RES_MEM_T_HSWQPT = 9,
+ BNA_RX_RES_MEM_T_DSWQPT = 10,
+ BNA_RX_RES_MEM_T_DPAGE = 11,
+ BNA_RX_RES_MEM_T_HPAGE = 12,
+ BNA_RX_RES_MEM_T_IBIDX = 13,
+ BNA_RX_RES_MEM_T_RIT = 14,
+ BNA_RX_RES_T_INTR = 15,
+ BNA_RX_RES_T_MAX = 16
};
enum bna_tx_type {
@@ -583,6 +584,8 @@ struct bna_rxq {
int buffer_size;
int q_depth;
+ u32 num_vecs;
+ enum bna_status multi_buffer;
struct bna_qpt qpt;
struct bna_rcb *rcb;
@@ -632,6 +635,8 @@ struct bna_ccb {
struct bna_rcb *rcb[2];
void *ctrl; /* For bnad */
struct bna_pkt_rate pkt_rate;
+ u32 pkts_una;
+ u32 bytes_per_intr;
/* Control path */
struct bna_cq *cq;
@@ -671,14 +676,22 @@ struct bna_rx_config {
int num_paths;
enum bna_rxp_type rxp_type;
int paused;
- int q_depth;
int coalescing_timeo;
/*
* Small/Large (or Header/Data) buffer size to be configured
- * for SLR and HDS queue type. Large buffer size comes from
- * enet->mtu.
+ * for SLR and HDS queue type.
*/
- int small_buff_size;
+ u32 frame_size;
+
+ /* header or small queue */
+ u32 q1_depth;
+ u32 q1_buf_size;
+
+ /* data or large queue */
+ u32 q0_depth;
+ u32 q0_buf_size;
+ u32 q0_num_vecs;
+ enum bna_status q0_multi_buf;
enum bna_status rss_status;
struct bna_rss_config rss_config;
@@ -866,8 +879,9 @@ struct bna_rx_mod {
/* CAM */
struct bna_ucam_mod {
- struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
+ struct bna_mac *ucmac; /* num_ucmac * 2 entries */
struct list_head free_q;
+ struct list_head del_q;
struct bna *bna;
};
@@ -880,9 +894,10 @@ struct bna_mcam_handle {
};
struct bna_mcam_mod {
- struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */
- struct bna_mcam_handle *mchandle; /* BFI_MAX_MCMAC entries */
+ struct bna_mac *mcmac; /* num_mcmac * 2 entries */
+ struct bna_mcam_handle *mchandle; /* num_mcmac entries */
struct list_head free_q;
+ struct list_head del_q;
struct list_head free_handle_q;
struct bna *bna;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 248bc37cb41b..cf64f3d0b60d 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
/*
* Global variables
*/
-u32 bnad_rxqs_per_cq = 2;
+static u32 bnad_rxqs_per_cq = 2;
static u32 bna_id;
static struct mutex bnad_list_mutex;
static LIST_HEAD(bnad_list);
@@ -142,7 +142,8 @@ bnad_tx_buff_unmap(struct bnad *bnad,
dma_unmap_page(&bnad->pcidev->dev,
dma_unmap_addr(&unmap->vectors[vector], dma_addr),
- skb_shinfo(skb)->frags[nvecs].size, DMA_TO_DEVICE);
+ dma_unmap_len(&unmap->vectors[vector], dma_len),
+ DMA_TO_DEVICE);
dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
nvecs--;
}
@@ -282,27 +283,32 @@ static int
bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
{
struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
- int mtu, order;
+ int order;
bnad_rxq_alloc_uninit(bnad, rcb);
- mtu = bna_enet_mtu_get(&bnad->bna.enet);
- order = get_order(mtu);
+ order = get_order(rcb->rxq->buffer_size);
+
+ unmap_q->type = BNAD_RXBUF_PAGE;
if (bna_is_small_rxq(rcb->id)) {
unmap_q->alloc_order = 0;
unmap_q->map_size = rcb->rxq->buffer_size;
} else {
- unmap_q->alloc_order = order;
- unmap_q->map_size =
- (rcb->rxq->buffer_size > 2048) ?
- PAGE_SIZE << order : 2048;
+ if (rcb->rxq->multi_buffer) {
+ unmap_q->alloc_order = 0;
+ unmap_q->map_size = rcb->rxq->buffer_size;
+ unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
+ } else {
+ unmap_q->alloc_order = order;
+ unmap_q->map_size =
+ (rcb->rxq->buffer_size > 2048) ?
+ PAGE_SIZE << order : 2048;
+ }
}
BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
- unmap_q->type = BNAD_RXBUF_PAGE;
-
return 0;
}
@@ -345,10 +351,10 @@ bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
for (i = 0; i < rcb->q_depth; i++) {
struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- bnad_rxq_cleanup_page(bnad, unmap);
- else
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
bnad_rxq_cleanup_skb(bnad, unmap);
+ else
+ bnad_rxq_cleanup_page(bnad, unmap);
}
bnad_rxq_alloc_uninit(bnad, rcb);
}
@@ -480,10 +486,10 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
return;
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- bnad_rxq_refill_page(bnad, rcb, to_alloc);
- else
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
bnad_rxq_refill_skb(bnad, rcb, to_alloc);
+ else
+ bnad_rxq_refill_page(bnad, rcb, to_alloc);
}
#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
@@ -500,72 +506,114 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
#define flags_udp6 (BNA_CQ_EF_IPV6 | \
BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
-static inline struct sk_buff *
-bnad_cq_prepare_skb(struct bnad_rx_ctrl *rx_ctrl,
- struct bnad_rx_unmap_q *unmap_q,
- struct bnad_rx_unmap *unmap,
- u32 length, u32 flags)
+static void
+bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
+ u32 sop_ci, u32 nvecs)
{
- struct bnad *bnad = rx_ctrl->bnad;
- struct sk_buff *skb;
+ struct bnad_rx_unmap_q *unmap_q;
+ struct bnad_rx_unmap *unmap;
+ u32 ci, vec;
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) {
- skb = napi_get_frags(&rx_ctrl->napi);
- if (unlikely(!skb))
- return NULL;
+ unmap_q = rcb->unmap_q;
+ for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
+ unmap = &unmap_q->unmap[ci];
+ BNA_QE_INDX_INC(ci, rcb->q_depth);
+
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
+ bnad_rxq_cleanup_skb(bnad, unmap);
+ else
+ bnad_rxq_cleanup_page(bnad, unmap);
+ }
+}
+
+static void
+bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
+ u32 sop_ci, u32 nvecs, u32 last_fraglen)
+{
+ struct bnad *bnad;
+ u32 ci, vec, len, totlen = 0;
+ struct bnad_rx_unmap_q *unmap_q;
+ struct bnad_rx_unmap *unmap;
+
+ unmap_q = rcb->unmap_q;
+ bnad = rcb->bnad;
+
+ /* prefetch header */
+ prefetch(page_address(unmap_q->unmap[sop_ci].page) +
+ unmap_q->unmap[sop_ci].page_offset);
+
+ for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
+ unmap = &unmap_q->unmap[ci];
+ BNA_QE_INDX_INC(ci, rcb->q_depth);
dma_unmap_page(&bnad->pcidev->dev,
dma_unmap_addr(&unmap->vector, dma_addr),
unmap->vector.len, DMA_FROM_DEVICE);
+
+ len = (vec == nvecs) ?
+ last_fraglen : unmap->vector.len;
+ totlen += len;
+
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- unmap->page, unmap->page_offset, length);
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
+ unmap->page, unmap->page_offset, len);
unmap->page = NULL;
unmap->vector.len = 0;
-
- return skb;
}
- skb = unmap->skb;
- BUG_ON(!skb);
+ skb->len += totlen;
+ skb->data_len += totlen;
+ skb->truesize += totlen;
+}
+
+static inline void
+bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
+ struct bnad_rx_unmap *unmap, u32 len)
+{
+ prefetch(skb->data);
dma_unmap_single(&bnad->pcidev->dev,
dma_unmap_addr(&unmap->vector, dma_addr),
unmap->vector.len, DMA_FROM_DEVICE);
- skb_put(skb, length);
-
+ skb_put(skb, len);
skb->protocol = eth_type_trans(skb, bnad->netdev);
unmap->skb = NULL;
unmap->vector.len = 0;
- return skb;
}
static u32
bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
{
- struct bna_cq_entry *cq, *cmpl;
+ struct bna_cq_entry *cq, *cmpl, *next_cmpl;
struct bna_rcb *rcb = NULL;
struct bnad_rx_unmap_q *unmap_q;
- struct bnad_rx_unmap *unmap;
- struct sk_buff *skb;
+ struct bnad_rx_unmap *unmap = NULL;
+ struct sk_buff *skb = NULL;
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
- u32 packets = 0, length = 0, flags, masked_flags;
+ u32 packets = 0, len = 0, totlen = 0;
+ u32 pi, vec, sop_ci = 0, nvecs = 0;
+ u32 flags, masked_flags;
prefetch(bnad->netdev);
cq = ccb->sw_q;
cmpl = &cq[ccb->producer_index];
- while (cmpl->valid && (packets < budget)) {
- packets++;
- flags = ntohl(cmpl->flags);
- length = ntohs(cmpl->length);
+ while (packets < budget) {
+ if (!cmpl->valid)
+ break;
+ /* The 'valid' field is set by the adapter, only after writing
+ * the other fields of completion entry. Hence, do not load
+ * other fields of completion entry *before* the 'valid' is
+ * loaded. Adding the rmb() here prevents the compiler and/or
+ * CPU from reordering the reads which would potentially result
+ * in reading stale values in completion entry.
+ */
+ rmb();
+
BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
if (bna_is_small_rxq(cmpl->rxq_id))
@@ -574,25 +622,78 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
rcb = ccb->rcb[0];
unmap_q = rcb->unmap_q;
- unmap = &unmap_q->unmap[rcb->consumer_index];
- if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
- BNA_CQ_EF_FCS_ERROR |
- BNA_CQ_EF_TOO_LONG))) {
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- bnad_rxq_cleanup_page(bnad, unmap);
- else
- bnad_rxq_cleanup_skb(bnad, unmap);
+ /* start of packet ci */
+ sop_ci = rcb->consumer_index;
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
+ unmap = &unmap_q->unmap[sop_ci];
+ skb = unmap->skb;
+ } else {
+ skb = napi_get_frags(&rx_ctrl->napi);
+ if (unlikely(!skb))
+ break;
+ }
+ prefetch(skb);
+
+ flags = ntohl(cmpl->flags);
+ len = ntohs(cmpl->length);
+ totlen = len;
+ nvecs = 1;
+
+ /* Check all the completions for this frame.
+ * busy-wait doesn't help much, break here.
+ */
+ if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
+ (flags & BNA_CQ_EF_EOP) == 0) {
+ pi = ccb->producer_index;
+ do {
+ BNA_QE_INDX_INC(pi, ccb->q_depth);
+ next_cmpl = &cq[pi];
+
+ if (!next_cmpl->valid)
+ break;
+ /* The 'valid' field is set by the adapter, only
+ * after writing the other fields of completion
+ * entry. Hence, do not load other fields of
+ * completion entry *before* the 'valid' is
+ * loaded. Adding the rmb() here prevents the
+ * compiler and/or CPU from reordering the reads
+ * which would potentially result in reading
+ * stale values in completion entry.
+ */
+ rmb();
+
+ len = ntohs(next_cmpl->length);
+ flags = ntohl(next_cmpl->flags);
+
+ nvecs++;
+ totlen += len;
+ } while ((flags & BNA_CQ_EF_EOP) == 0);
+
+ if (!next_cmpl->valid)
+ break;
+ }
+
+ /* TODO: BNA_CQ_EF_LOCAL ? */
+ if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
+ BNA_CQ_EF_FCS_ERROR |
+ BNA_CQ_EF_TOO_LONG))) {
+ bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
rcb->rxq->rx_packets_with_error++;
+
goto next;
}
- skb = bnad_cq_prepare_skb(ccb->ctrl, unmap_q, unmap,
- length, flags);
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
+ bnad_cq_setup_skb(bnad, skb, unmap, len);
+ else
+ bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
- if (unlikely(!skb))
- break;
+ packets++;
+ rcb->rxq->rx_packets++;
+ rcb->rxq->rx_bytes += totlen;
+ ccb->bytes_per_intr += totlen;
masked_flags = flags & flags_cksum_prot_mask;
@@ -606,21 +707,21 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
else
skb_checksum_none_assert(skb);
- rcb->rxq->rx_packets++;
- rcb->rxq->rx_bytes += length;
-
if (flags & BNA_CQ_EF_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- napi_gro_frags(&rx_ctrl->napi);
- else
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
netif_receive_skb(skb);
+ else
+ napi_gro_frags(&rx_ctrl->napi);
next:
- cmpl->valid = 0;
- BNA_QE_INDX_INC(rcb->consumer_index, rcb->q_depth);
- BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
+ BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
+ for (vec = 0; vec < nvecs; vec++) {
+ cmpl = &cq[ccb->producer_index];
+ cmpl->valid = 0;
+ BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
+ }
cmpl = &cq[ccb->producer_index];
}
@@ -1899,8 +2000,10 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
tx_info);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- if (!tx)
+ if (!tx) {
+ err = -ENOMEM;
goto err_return;
+ }
tx_info->tx = tx;
INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
@@ -1911,7 +2014,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
err = bnad_tx_msix_register(bnad, tx_info,
tx_id, bnad->num_txq_per_tx);
if (err)
- goto err_return;
+ goto cleanup_tx;
}
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1920,6 +2023,12 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
return 0;
+cleanup_tx:
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_destroy(tx_info->tx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ tx_info->tx = NULL;
+ tx_info->tx_id = 0;
err_return:
bnad_tx_res_free(bnad, res_info);
return err;
@@ -1930,6 +2039,7 @@ err_return:
static void
bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
{
+ memset(rx_config, 0, sizeof(*rx_config));
rx_config->rx_type = BNA_RX_T_REGULAR;
rx_config->num_paths = bnad->num_rxp_per_rx;
rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
@@ -1950,10 +2060,39 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
memset(&rx_config->rss_config, 0,
sizeof(rx_config->rss_config));
}
+
+ rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
+ rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
+
+ /* BNA_RXP_SINGLE - one data-buffer queue
+ * BNA_RXP_SLR - one small-buffer and one large-buffer queues
+ * BNA_RXP_HDS - one header-buffer and one data-buffer queues
+ */
+ /* TODO: configurable param for queue type */
rx_config->rxp_type = BNA_RXP_SLR;
- rx_config->q_depth = bnad->rxq_depth;
- rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
+ if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
+ rx_config->frame_size > 4096) {
+ /* though size_routing_enable is set in SLR,
+ * small packets may get routed to same rxq.
+ * set buf_size to 2048 instead of PAGE_SIZE.
+ */
+ rx_config->q0_buf_size = 2048;
+ /* this should be in multiples of 2 */
+ rx_config->q0_num_vecs = 4;
+ rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
+ rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
+ } else {
+ rx_config->q0_buf_size = rx_config->frame_size;
+ rx_config->q0_num_vecs = 1;
+ rx_config->q0_depth = bnad->rxq_depth;
+ }
+
+ /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
+ if (rx_config->rxp_type == BNA_RXP_SLR) {
+ rx_config->q1_depth = bnad->rxq_depth;
+ rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
+ }
rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
}
@@ -1969,6 +2108,49 @@ bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
}
/* Called with mutex_lock(&bnad->conf_mutex) held */
+static u32
+bnad_reinit_rx(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+ u32 err = 0, current_err = 0;
+ u32 rx_id = 0, count = 0;
+ unsigned long flags;
+
+ /* destroy and create new rx objects */
+ for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
+ if (!bnad->rx_info[rx_id].rx)
+ continue;
+ bnad_destroy_rx(bnad, rx_id);
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_enet_mtu_set(&bnad->bna.enet,
+ BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
+ count++;
+ current_err = bnad_setup_rx(bnad, rx_id);
+ if (current_err && !err) {
+ err = current_err;
+ pr_err("RXQ:%u setup failed\n", rx_id);
+ }
+ }
+
+ /* restore rx configuration */
+ if (bnad->rx_info[0].rx && !err) {
+ bnad_restore_vlans(bnad, 0);
+ bnad_enable_default_bcast(bnad);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ bnad_set_rx_mode(netdev);
+ }
+
+ return count;
+}
+
+/* Called with bnad_conf_lock() held */
void
bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
{
@@ -2047,13 +2229,19 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
spin_unlock_irqrestore(&bnad->bna_lock, flags);
/* Fill Unmap Q memory requirements */
- BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPQ],
- rx_config->num_paths +
- ((rx_config->rxp_type == BNA_RXP_SINGLE) ?
- 0 : rx_config->num_paths),
- ((bnad->rxq_depth * sizeof(struct bnad_rx_unmap)) +
- sizeof(struct bnad_rx_unmap_q)));
-
+ BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
+ rx_config->num_paths,
+ (rx_config->q0_depth *
+ sizeof(struct bnad_rx_unmap)) +
+ sizeof(struct bnad_rx_unmap_q));
+
+ if (rx_config->rxp_type != BNA_RXP_SINGLE) {
+ BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
+ rx_config->num_paths,
+ (rx_config->q1_depth *
+ sizeof(struct bnad_rx_unmap) +
+ sizeof(struct bnad_rx_unmap_q)));
+ }
/* Allocate resource */
err = bnad_rx_res_alloc(bnad, res_info, rx_id);
if (err)
@@ -2548,7 +2736,6 @@ bnad_open(struct net_device *netdev)
int err;
struct bnad *bnad = netdev_priv(netdev);
struct bna_pause_config pause_config;
- int mtu;
unsigned long flags;
mutex_lock(&bnad->conf_mutex);
@@ -2567,10 +2754,9 @@ bnad_open(struct net_device *netdev)
pause_config.tx_pause = 0;
pause_config.rx_pause = 0;
- mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
-
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
+ bna_enet_mtu_set(&bnad->bna.enet,
+ BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
bna_enet_enable(&bnad->bna.enet);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -2624,9 +2810,6 @@ bnad_stop(struct net_device *netdev)
bnad_destroy_tx(bnad, 0);
bnad_destroy_rx(bnad, 0);
- /* These config flags are cleared in the hardware */
- bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | BNAD_CF_PROMISC);
-
/* Synchronize mailbox IRQ */
bnad_mbox_irq_sync(bnad);
@@ -2784,21 +2967,21 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
}
tcb = bnad->tx_info[0].tcb[txq_id];
- q_depth = tcb->q_depth;
- prod = tcb->producer_index;
-
- unmap_q = tcb->unmap_q;
/*
* Takes care of the Tx that is scheduled between clearing the flag
* and the netif_tx_stop_all_queues() call.
*/
- if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
+ if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
dev_kfree_skb(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
return NETDEV_TX_OK;
}
+ q_depth = tcb->q_depth;
+ prod = tcb->producer_index;
+ unmap_q = tcb->unmap_q;
+
vectors = 1 + skb_shinfo(skb)->nr_frags;
wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
@@ -2863,7 +3046,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
for (i = 0, vect_id = 0; i < vectors - 1; i++) {
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
- u16 size = skb_frag_size(frag);
+ u32 size = skb_frag_size(frag);
if (unlikely(size == 0)) {
/* Undo the changes starting at tcb->producer_index */
@@ -2888,10 +3071,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
0, size, DMA_TO_DEVICE);
+ dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
txqent->vector[vect_id].length = htons(size);
dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
- dma_addr);
+ dma_addr);
head_unmap->nvecs++;
}
@@ -2911,6 +3095,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
return NETDEV_TX_OK;
+ skb_tx_timestamp(skb);
+
bna_txq_prod_indx_doorbell(tcb);
smp_mb();
@@ -2937,73 +3123,133 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
return stats;
}
+static void
+bnad_set_rx_ucast_fltr(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+ int uc_count = netdev_uc_count(netdev);
+ enum bna_cb_status ret;
+ u8 *mac_list;
+ struct netdev_hw_addr *ha;
+ int entry;
+
+ if (netdev_uc_empty(bnad->netdev)) {
+ bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+ return;
+ }
+
+ if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
+ goto mode_default;
+
+ mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
+ if (mac_list == NULL)
+ goto mode_default;
+
+ entry = 0;
+ netdev_for_each_uc_addr(ha, netdev) {
+ memcpy(&mac_list[entry * ETH_ALEN],
+ &ha->addr[0], ETH_ALEN);
+ entry++;
+ }
+
+ ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
+ mac_list, NULL);
+ kfree(mac_list);
+
+ if (ret != BNA_CB_SUCCESS)
+ goto mode_default;
+
+ return;
+
+ /* ucast packets not in UCAM are routed to default function */
+mode_default:
+ bnad->cfg_flags |= BNAD_CF_DEFAULT;
+ bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+}
+
+static void
+bnad_set_rx_mcast_fltr(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+ int mc_count = netdev_mc_count(netdev);
+ enum bna_cb_status ret;
+ u8 *mac_list;
+
+ if (netdev->flags & IFF_ALLMULTI)
+ goto mode_allmulti;
+
+ if (netdev_mc_empty(netdev))
+ return;
+
+ if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
+ goto mode_allmulti;
+
+ mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
+
+ if (mac_list == NULL)
+ goto mode_allmulti;
+
+ memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+
+ /* copy rest of the MCAST addresses */
+ bnad_netdev_mc_list_get(netdev, mac_list);
+ ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
+ mac_list, NULL);
+ kfree(mac_list);
+
+ if (ret != BNA_CB_SUCCESS)
+ goto mode_allmulti;
+
+ return;
+
+mode_allmulti:
+ bnad->cfg_flags |= BNAD_CF_ALLMULTI;
+ bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
+}
+
void
bnad_set_rx_mode(struct net_device *netdev)
{
struct bnad *bnad = netdev_priv(netdev);
- u32 new_mask, valid_mask;
+ enum bna_rxmode new_mode, mode_mask;
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- new_mask = valid_mask = 0;
-
- if (netdev->flags & IFF_PROMISC) {
- if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
- new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
- valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
- bnad->cfg_flags |= BNAD_CF_PROMISC;
- }
- } else {
- if (bnad->cfg_flags & BNAD_CF_PROMISC) {
- new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
- valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
- bnad->cfg_flags &= ~BNAD_CF_PROMISC;
- }
- }
-
- if (netdev->flags & IFF_ALLMULTI) {
- if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
- new_mask |= BNA_RXMODE_ALLMULTI;
- valid_mask |= BNA_RXMODE_ALLMULTI;
- bnad->cfg_flags |= BNAD_CF_ALLMULTI;
- }
- } else {
- if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
- new_mask &= ~BNA_RXMODE_ALLMULTI;
- valid_mask |= BNA_RXMODE_ALLMULTI;
- bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
- }
+ if (bnad->rx_info[0].rx == NULL) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return;
}
- if (bnad->rx_info[0].rx == NULL)
- goto unlock;
+ /* clear bnad flags to update it with new settings */
+ bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
+ BNAD_CF_ALLMULTI);
- bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
+ new_mode = 0;
+ if (netdev->flags & IFF_PROMISC) {
+ new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
+ bnad->cfg_flags |= BNAD_CF_PROMISC;
+ } else {
+ bnad_set_rx_mcast_fltr(bnad);
- if (!netdev_mc_empty(netdev)) {
- u8 *mcaddr_list;
- int mc_count = netdev_mc_count(netdev);
+ if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
+ new_mode |= BNA_RXMODE_ALLMULTI;
- /* Index 0 holds the broadcast address */
- mcaddr_list =
- kzalloc((mc_count + 1) * ETH_ALEN,
- GFP_ATOMIC);
- if (!mcaddr_list)
- goto unlock;
+ bnad_set_rx_ucast_fltr(bnad);
- memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+ if (bnad->cfg_flags & BNAD_CF_DEFAULT)
+ new_mode |= BNA_RXMODE_DEFAULT;
+ }
- /* Copy rest of the MC addresses */
- bnad_netdev_mc_list_get(netdev, mcaddr_list);
+ mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
+ BNA_RXMODE_ALLMULTI;
+ bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
- bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
- mcaddr_list, NULL);
+ if (bnad->cfg_flags & BNAD_CF_PROMISC)
+ bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
+ else
+ bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
- /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
- kfree(mcaddr_list);
- }
-unlock:
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -3033,14 +3279,14 @@ bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
}
static int
-bnad_mtu_set(struct bnad *bnad, int mtu)
+bnad_mtu_set(struct bnad *bnad, int frame_size)
{
unsigned long flags;
init_completion(&bnad->bnad_completions.mtu_comp);
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
+ bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
wait_for_completion(&bnad->bnad_completions.mtu_comp);
@@ -3051,18 +3297,34 @@ bnad_mtu_set(struct bnad *bnad, int mtu)
static int
bnad_change_mtu(struct net_device *netdev, int new_mtu)
{
- int err, mtu = netdev->mtu;
+ int err, mtu;
struct bnad *bnad = netdev_priv(netdev);
+ u32 rx_count = 0, frame, new_frame;
if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
return -EINVAL;
mutex_lock(&bnad->conf_mutex);
+ mtu = netdev->mtu;
netdev->mtu = new_mtu;
- mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
- err = bnad_mtu_set(bnad, mtu);
+ frame = BNAD_FRAME_SIZE(mtu);
+ new_frame = BNAD_FRAME_SIZE(new_mtu);
+
+ /* check if multi-buffer needs to be enabled */
+ if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
+ netif_running(bnad->netdev)) {
+ /* only when transition is over 4K */
+ if ((frame <= 4096 && new_frame > 4096) ||
+ (frame > 4096 && new_frame <= 4096))
+ rx_count = bnad_reinit_rx(bnad);
+ }
+
+ /* rx_count > 0 - new rx created
+ * - Linux set err = 0 and return
+ */
+ err = bnad_mtu_set(bnad, new_frame);
if (err)
err = -EBUSY;
@@ -3262,7 +3524,6 @@ bnad_uninit(struct bnad *bnad)
if (bnad->bar0)
iounmap(bnad->bar0);
- pci_set_drvdata(bnad->pcidev, NULL);
}
/*
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index f7e033f8a00e..2842c188e0da 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -71,7 +71,7 @@ struct bnad_rx_ctrl {
#define BNAD_NAME "bna"
#define BNAD_NAME_LEN 64
-#define BNAD_VERSION "3.2.21.1"
+#define BNAD_VERSION "3.2.23.0"
#define BNAD_MAILBOX_MSIX_INDEX 0
#define BNAD_MAILBOX_MSIX_VECTORS 1
@@ -84,7 +84,7 @@ struct bnad_rx_ctrl {
#define BNAD_IOCETH_TIMEOUT 10000
#define BNAD_MIN_Q_DEPTH 512
-#define BNAD_MAX_RXQ_DEPTH 2048
+#define BNAD_MAX_RXQ_DEPTH 16384
#define BNAD_MAX_TXQ_DEPTH 2048
#define BNAD_JUMBO_MTU 9000
@@ -105,6 +105,9 @@ struct bnad_rx_ctrl {
#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx)
#define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx)
+#define BNAD_FRAME_SIZE(_mtu) \
+ (ETH_HLEN + VLAN_HLEN + (_mtu) + ETH_FCS_LEN)
+
/*
* DATA STRUCTURES
*/
@@ -219,6 +222,7 @@ struct bnad_rx_info {
struct bnad_tx_vector {
DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
};
struct bnad_tx_unmap {
@@ -234,33 +238,38 @@ struct bnad_rx_vector {
struct bnad_rx_unmap {
struct page *page;
- u32 page_offset;
struct sk_buff *skb;
struct bnad_rx_vector vector;
+ u32 page_offset;
};
enum bnad_rxbuf_type {
BNAD_RXBUF_NONE = 0,
- BNAD_RXBUF_SKB = 1,
+ BNAD_RXBUF_SK_BUFF = 1,
BNAD_RXBUF_PAGE = 2,
- BNAD_RXBUF_MULTI = 3
+ BNAD_RXBUF_MULTI_BUFF = 3
};
-#define BNAD_RXBUF_IS_PAGE(_type) ((_type) == BNAD_RXBUF_PAGE)
+#define BNAD_RXBUF_IS_SK_BUFF(_type) ((_type) == BNAD_RXBUF_SK_BUFF)
+#define BNAD_RXBUF_IS_MULTI_BUFF(_type) ((_type) == BNAD_RXBUF_MULTI_BUFF)
struct bnad_rx_unmap_q {
int reuse_pi;
int alloc_order;
u32 map_size;
enum bnad_rxbuf_type type;
- struct bnad_rx_unmap unmap[0];
+ struct bnad_rx_unmap unmap[0] ____cacheline_aligned;
};
+#define BNAD_PCI_DEV_IS_CAT2(_bnad) \
+ ((_bnad)->pcidev->device == BFA_PCI_DEVICE_ID_CT2)
+
/* Bit mask values for bnad->cfg_flags */
#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */
#define BNAD_CF_PROMISC 0x02
#define BNAD_CF_ALLMULTI 0x04
-#define BNAD_CF_MSIX 0x08 /* If in MSIx mode */
+#define BNAD_CF_DEFAULT 0x08
+#define BNAD_CF_MSIX 0x10 /* If in MSIx mode */
/* Defines for run_flags bit-mask */
/* Set, tested & cleared using xxx_bit() functions */
@@ -367,7 +376,6 @@ struct bnad_drvinfo {
* EXTERN VARIABLES
*/
extern const struct firmware *bfi_fw;
-extern u32 bnad_rxqs_per_cq;
/*
* EXTERN PROTOTYPES
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 455b5a2e59d4..f9e150825bb5 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -1131,6 +1131,7 @@ static const struct ethtool_ops bnad_ethtool_ops = {
.get_eeprom = bnad_get_eeprom,
.set_eeprom = bnad_set_eeprom,
.flash_device = bnad_flash_device,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 43405f654b4a..b3ff6d507951 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -37,8 +37,8 @@
extern char bfa_version[];
-#define CNA_FW_FILE_CT "ctfw-3.2.1.1.bin"
-#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.1.bin"
+#define CNA_FW_FILE_CT "ctfw-3.2.3.0.bin"
+#define CNA_FW_FILE_CT2 "ct2fw-3.2.3.0.bin"
#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
#pragma pack(1)
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 92578690f6de..3190d38e16fb 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -17,6 +17,7 @@
#include <linux/circ_buf.h>
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
@@ -203,6 +204,47 @@ static int macb_mdio_reset(struct mii_bus *bus)
return 0;
}
+/**
+ * macb_set_tx_clk() - Set a clock to a new frequency
+ * @clk Pointer to the clock to change
+ * @rate New frequency in Hz
+ * @dev Pointer to the struct net_device
+ */
+static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
+{
+ long ferr, rate, rate_rounded;
+
+ switch (speed) {
+ case SPEED_10:
+ rate = 2500000;
+ break;
+ case SPEED_100:
+ rate = 25000000;
+ break;
+ case SPEED_1000:
+ rate = 125000000;
+ break;
+ default:
+ return;
+ }
+
+ rate_rounded = clk_round_rate(clk, rate);
+ if (rate_rounded < 0)
+ return;
+
+ /* RGMII allows 50 ppm frequency error. Test and warn if this limit
+ * is not satisfied.
+ */
+ ferr = abs(rate_rounded - rate);
+ ferr = DIV_ROUND_UP(ferr, rate / 100000);
+ if (ferr > 5)
+ netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
+ rate);
+
+ if (clk_set_rate(clk, rate_rounded))
+ netdev_err(dev, "adjusting tx_clk failed.\n");
+}
+
static void macb_handle_link_change(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
@@ -250,6 +292,9 @@ static void macb_handle_link_change(struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
+ if (!IS_ERR(bp->tx_clk))
+ macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
+
if (status_change) {
if (phydev->link) {
netif_carrier_on(dev);
@@ -1790,21 +1835,44 @@ static int __init macb_probe(struct platform_device *pdev)
spin_lock_init(&bp->lock);
INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
- bp->pclk = clk_get(&pdev->dev, "pclk");
+ bp->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(bp->pclk)) {
- dev_err(&pdev->dev, "failed to get macb_clk\n");
+ err = PTR_ERR(bp->pclk);
+ dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
goto err_out_free_dev;
}
- clk_prepare_enable(bp->pclk);
- bp->hclk = clk_get(&pdev->dev, "hclk");
+ bp->hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR(bp->hclk)) {
- dev_err(&pdev->dev, "failed to get hclk\n");
- goto err_out_put_pclk;
+ err = PTR_ERR(bp->hclk);
+ dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
+ goto err_out_free_dev;
}
- clk_prepare_enable(bp->hclk);
- bp->regs = ioremap(regs->start, resource_size(regs));
+ bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+
+ err = clk_prepare_enable(bp->pclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
+ goto err_out_free_dev;
+ }
+
+ err = clk_prepare_enable(bp->hclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
+ goto err_out_disable_pclk;
+ }
+
+ if (!IS_ERR(bp->tx_clk)) {
+ err = clk_prepare_enable(bp->tx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n",
+ err);
+ goto err_out_disable_hclk;
+ }
+ }
+
+ bp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
if (!bp->regs) {
dev_err(&pdev->dev, "failed to map registers, aborting.\n");
err = -ENOMEM;
@@ -1812,11 +1880,12 @@ static int __init macb_probe(struct platform_device *pdev)
}
dev->irq = platform_get_irq(pdev, 0);
- err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
+ err = devm_request_irq(&pdev->dev, dev->irq, macb_interrupt, 0,
+ dev->name, dev);
if (err) {
dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
dev->irq, err);
- goto err_out_iounmap;
+ goto err_out_disable_clocks;
}
dev->netdev_ops = &macb_netdev_ops;
@@ -1879,7 +1948,7 @@ static int __init macb_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_free_irq;
+ goto err_out_disable_clocks;
}
err = macb_mii_init(bp);
@@ -1902,16 +1971,13 @@ static int __init macb_probe(struct platform_device *pdev)
err_out_unregister_netdev:
unregister_netdev(dev);
-err_out_free_irq:
- free_irq(dev->irq, dev);
-err_out_iounmap:
- iounmap(bp->regs);
err_out_disable_clocks:
+ if (!IS_ERR(bp->tx_clk))
+ clk_disable_unprepare(bp->tx_clk);
+err_out_disable_hclk:
clk_disable_unprepare(bp->hclk);
- clk_put(bp->hclk);
+err_out_disable_pclk:
clk_disable_unprepare(bp->pclk);
-err_out_put_pclk:
- clk_put(bp->pclk);
err_out_free_dev:
free_netdev(dev);
err_out:
@@ -1933,12 +1999,10 @@ static int __exit macb_remove(struct platform_device *pdev)
kfree(bp->mii_bus->irq);
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
- free_irq(dev->irq, dev);
- iounmap(bp->regs);
+ if (!IS_ERR(bp->tx_clk))
+ clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
- clk_put(bp->hclk);
clk_disable_unprepare(bp->pclk);
- clk_put(bp->pclk);
free_netdev(dev);
}
@@ -1946,45 +2010,49 @@ static int __exit macb_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int macb_suspend(struct platform_device *pdev, pm_message_t state)
+static int macb_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct net_device *netdev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(netdev);
netif_carrier_off(netdev);
netif_device_detach(netdev);
+ if (!IS_ERR(bp->tx_clk))
+ clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
return 0;
}
-static int macb_resume(struct platform_device *pdev)
+static int macb_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct net_device *netdev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(netdev);
clk_prepare_enable(bp->pclk);
clk_prepare_enable(bp->hclk);
+ if (!IS_ERR(bp->tx_clk))
+ clk_prepare_enable(bp->tx_clk);
netif_device_attach(netdev);
return 0;
}
-#else
-#define macb_suspend NULL
-#define macb_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
+
static struct platform_driver macb_driver = {
.remove = __exit_p(macb_remove),
- .suspend = macb_suspend,
- .resume = macb_resume,
.driver = {
.name = "macb",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(macb_dt_ids),
+ .pm = &macb_pm_ops,
},
};
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index f4076155bed7..51c02442160a 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -572,6 +572,7 @@ struct macb {
struct platform_device *pdev;
struct clk *pclk;
struct clk *hclk;
+ struct clk *tx_clk;
struct net_device *dev;
struct napi_struct napi;
struct work_struct tx_error_task;
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 4fc5c8ef5121..d2a183c3a6ce 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -14,7 +14,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/circ_buf.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 8abb46b39032..53b1f9478383 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
@@ -50,7 +49,6 @@
#include <linux/if_vlan.h>
#include <linux/mdio.h>
#include <linux/crc32.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/pci_ids.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb/cphy.h b/drivers/net/ethernet/chelsio/cxgb/cphy.h
index 1f095a9fc739..a4d2a4c08d3f 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cphy.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cphy.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
index e36d45b78cc7..5249686afe71 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 1d021059f097..0fe7ff750d77 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
@@ -38,7 +37,6 @@
#include "common.h"
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb/elmer0.h b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
index eef655c827d9..81526ad36339 100644
--- a/drivers/net/ethernet/chelsio/cxgb/elmer0.h
+++ b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.c b/drivers/net/ethernet/chelsio/cxgb/espi.c
index 639ff1955739..3e182eee799e 100644
--- a/drivers/net/ethernet/chelsio/cxgb/espi.c
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.h b/drivers/net/ethernet/chelsio/cxgb/espi.h
index 5694aad4fbc0..162de5259df9 100644
--- a/drivers/net/ethernet/chelsio/cxgb/espi.h
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h
index d42337457cf7..dfa77491a910 100644
--- a/drivers/net/ethernet/chelsio/cxgb/gmac.h
+++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
index f7136b2fd1e5..d0cf611551a1 100644
--- a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index eb33a31b08a0..ec5e05052d99 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/regs.h b/drivers/net/ethernet/chelsio/cxgb/regs.h
index c80bf4d6d0a6..964ce59ee169 100644
--- a/drivers/net/ethernet/chelsio/cxgb/regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/regs.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 8061fb0ef7ed..4c5879389003 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
@@ -47,7 +46,6 @@
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/tcp.h>
#include <linux/ip.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
index b9bf16b385f7..a1ba591b3431 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.h
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index e0a03a31e7c4..816719314cc8 100644
--- a/drivers/net/ethernet/chelsio/cxgb/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
index d0f87d82566a..7f79cc7ceb75 100644
--- a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index 8c82248ce416..442480982d3f 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -36,7 +36,6 @@
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/mdio.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 76ae09999b5b..c0a9dd55f4e5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -182,7 +182,7 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
for_each_port(adapter, i) {
struct net_device *dev = adapter->port[i];
- if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
+ if (ether_addr_equal(dev->dev_addr, mac)) {
rcu_read_lock();
if (vlan && vlan != VLAN_VID_MASK) {
dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), vlan);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 8d53438638b2..5f226eda8cd6 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -429,7 +429,7 @@ found:
} else {
e->state = neigh->nud_state & NUD_CONNECTED ?
L2T_STATE_VALID : L2T_STATE_STALE;
- if (memcmp(e->dmac, neigh->ha, 6))
+ if (!ether_addr_equal(e->dmac, neigh->ha))
setup_l2e_send_pending(dev, NULL, e);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 6c9308850453..1f4b9b30b9ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -50,13 +50,13 @@
#include "cxgb4_uld.h"
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x06
-#define T4FW_VERSION_MICRO 0x18
+#define T4FW_VERSION_MINOR 0x09
+#define T4FW_VERSION_MICRO 0x17
#define T4FW_VERSION_BUILD 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x08
-#define T5FW_VERSION_MICRO 0x1C
+#define T5FW_VERSION_MINOR 0x09
+#define T5FW_VERSION_MICRO 0x17
#define T5FW_VERSION_BUILD 0x00
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
@@ -228,6 +228,25 @@ struct tp_params {
uint32_t dack_re; /* DACK timer resolution */
unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
+
+ u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
+ u32 ingress_config; /* cached TP_INGRESS_CONFIG */
+
+ /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
+ * subset of the set of fields which may be present in the Compressed
+ * Filter Tuple portion of filters and TCP TCB connections. The
+ * fields which are present are controlled by the TP_VLAN_PRI_MAP.
+ * Since a variable number of fields may or may not be present, their
+ * shifted field positions within the Compressed Filter Tuple may
+ * vary, or not even be present if the field isn't selected in
+ * TP_VLAN_PRI_MAP. Since some of these fields are needed in various
+ * places we store their offsets here, or a -1 if the field isn't
+ * present.
+ */
+ int vlan_shift;
+ int vnic_shift;
+ int port_shift;
+ int protocol_shift;
};
struct vpd_params {
@@ -368,8 +387,9 @@ struct work_struct;
enum { /* adapter flags */
FULL_INIT_DONE = (1 << 0),
- USING_MSI = (1 << 1),
- USING_MSIX = (1 << 2),
+ DEV_ENABLED = (1 << 1),
+ USING_MSI = (1 << 2),
+ USING_MSIX = (1 << 3),
FW_OK = (1 << 4),
RSS_TNLALLLOOKUP = (1 << 5),
USING_SOFT_PARAMS = (1 << 6),
@@ -919,13 +939,14 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable);
int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
unsigned int t4_flash_cfg_addr(struct adapter *adapter);
-int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
int t4_get_fw_version(struct adapter *adapter, u32 *vers);
int t4_get_tp_version(struct adapter *adapter, u32 *vers);
int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
const u8 *fw_data, unsigned int fw_size,
struct fw_hdr *card_fw, enum dev_state state, int *reset);
int t4_prep_adapter(struct adapter *adapter);
+int t4_init_tp_params(struct adapter *adap);
+int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
@@ -958,13 +979,6 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
int t4_fw_bye(struct adapter *adap, unsigned int mbox);
int t4_early_init(struct adapter *adap, unsigned int mbox);
int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
-int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force);
-int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
-int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
- const u8 *fw_data, unsigned int size, int force);
-int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
- unsigned int mtype, unsigned int maddr,
- u32 *finiver, u32 *finicsum, u32 *cfcsum);
int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
unsigned int cache_line_size);
int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index d6b12e035a7d..43ab35fea48d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2986,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
if (stid >= 0) {
t->stid_tab[stid].data = data;
stid += t->stid_base;
- t->stids_in_use++;
+ /* IPv6 requires max of 520 bits or 16 cells in TCAM
+ * This is equivalent to 4 TIDs. With CLIP enabled it
+ * needs 2 TIDs.
+ */
+ if (family == PF_INET)
+ t->stids_in_use++;
+ else
+ t->stids_in_use += 4;
}
spin_unlock_bh(&t->stid_lock);
return stid;
@@ -3012,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
}
if (stid >= 0) {
t->stid_tab[stid].data = data;
- stid += t->stid_base;
+ stid -= t->nstids;
+ stid += t->sftid_base;
t->stids_in_use++;
}
spin_unlock_bh(&t->stid_lock);
@@ -3024,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid);
*/
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
{
- stid -= t->stid_base;
+ /* Is it a server filter TID? */
+ if (t->nsftids && (stid >= t->sftid_base)) {
+ stid -= t->sftid_base;
+ stid += t->nstids;
+ } else {
+ stid -= t->stid_base;
+ }
+
spin_lock_bh(&t->stid_lock);
if (family == PF_INET)
__clear_bit(stid, t->stid_bmap);
else
bitmap_release_region(t->stid_bmap, stid, 2);
t->stid_tab[stid].data = NULL;
- t->stids_in_use--;
+ if (family == PF_INET)
+ t->stids_in_use--;
+ else
+ t->stids_in_use -= 4;
spin_unlock_bh(&t->stid_lock);
}
EXPORT_SYMBOL(cxgb4_free_stid);
@@ -3134,6 +3152,7 @@ static int tid_init(struct tid_info *t)
size_t size;
unsigned int stid_bmap_size;
unsigned int natids = t->natids;
+ struct adapter *adap = container_of(t, struct adapter, tids);
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
size = t->ntids * sizeof(*t->tid_tab) +
@@ -3167,6 +3186,11 @@ static int tid_init(struct tid_info *t)
t->afree = t->atid_tab;
}
bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
+ /* Reserve stid 0 for T4/T5 adapters */
+ if (!t->stid_base &&
+ (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
+ __set_bit(0, t->stid_bmap);
+
return 0;
}
@@ -3731,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
(adap->fn * 4));
- lli.filt_mode = adap->filter_mode;
+ lli.filt_mode = adap->params.tp.vlan_pri_map;
/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
for (i = 0; i < NCHAN; i++)
lli.tx_modq[i] = i;
@@ -4179,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
adap = netdev2adap(dev);
/* Adjust stid to correct filter index */
- stid -= adap->tids.nstids;
+ stid -= adap->tids.sftid_base;
stid += adap->tids.nftids;
/* Check to make sure the filter requested is writable ...
@@ -4205,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
f->fs.val.lip[i] = val[i];
f->fs.mask.lip[i] = ~0;
}
- if (adap->filter_mode & F_PORT) {
+ if (adap->params.tp.vlan_pri_map & F_PORT) {
f->fs.val.iport = port;
f->fs.mask.iport = mask;
}
}
+ if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
+ f->fs.val.proto = IPPROTO_TCP;
+ f->fs.mask.proto = ~0;
+ }
+
f->fs.dirsteer = 1;
f->fs.iq = queue;
/* Mark filter as locked */
@@ -4237,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
adap = netdev2adap(dev);
/* Adjust stid to correct filter index */
- stid -= adap->tids.nstids;
+ stid -= adap->tids.sftid_base;
stid += adap->tids.nftids;
f = &adap->tids.ftid_tab[stid];
@@ -4259,7 +4288,15 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
struct port_info *p = netdev_priv(dev);
struct adapter *adapter = p->adapter;
+ /* Block retrieving statistics during EEH error
+ * recovery. Otherwise, the recovery might fail
+ * and the PCI device will be removed permanently
+ */
spin_lock(&adapter->stats_lock);
+ if (!netif_device_present(dev)) {
+ spin_unlock(&adapter->stats_lock);
+ return ns;
+ }
t4_get_port_stats(adapter, p->tx_chan, &stats);
spin_unlock(&adapter->stats_lock);
@@ -5092,7 +5129,7 @@ static int adap_init0(struct adapter *adap)
enum dev_state state;
u32 params[7], val[7];
struct fw_caps_config_cmd caps_cmd;
- int reset = 1, j;
+ int reset = 1;
/*
* Contact FW, advertising Master capability (and potentially forcing
@@ -5434,21 +5471,11 @@ static int adap_init0(struct adapter *adap)
/*
* These are finalized by FW initialization, load their values now.
*/
- v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
- adap->params.tp.tre = TIMERRESOLUTION_GET(v);
- adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
adap->params.b_wnd);
- /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
- for (j = 0; j < NCHAN; j++)
- adap->params.tp.tx_modq[j] = j;
-
- t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
- &adap->filter_mode, 1,
- TP_VLAN_PRI_MAP);
-
+ t4_init_tp_params(adap);
adap->flags |= FW_OK;
return 0;
@@ -5477,16 +5504,21 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
rtnl_lock();
adap->flags &= ~FW_OK;
notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
+ spin_lock(&adap->stats_lock);
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
netif_device_detach(dev);
netif_carrier_off(dev);
}
+ spin_unlock(&adap->stats_lock);
if (adap->flags & FULL_INIT_DONE)
cxgb_down(adap);
rtnl_unlock();
- pci_disable_device(pdev);
+ if ((adap->flags & DEV_ENABLED)) {
+ pci_disable_device(pdev);
+ adap->flags &= ~DEV_ENABLED;
+ }
out: return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
}
@@ -5503,9 +5535,13 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
- if (pci_enable_device(pdev)) {
- dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
- return PCI_ERS_RESULT_DISCONNECT;
+ if (!(adap->flags & DEV_ENABLED)) {
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "Cannot reenable PCI "
+ "device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ adap->flags |= DEV_ENABLED;
}
pci_set_master(pdev);
@@ -5891,6 +5927,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_device;
}
+ /* PCI device has been enabled */
+ adapter->flags |= DEV_ENABLED;
+
adapter->regs = pci_ioremap_bar(pdev, 0);
if (!adapter->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
@@ -6124,10 +6163,13 @@ static void remove_one(struct pci_dev *pdev)
iounmap(adapter->regs);
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
- kfree(adapter);
pci_disable_pcie_error_reporting(pdev);
- pci_disable_device(pdev);
+ if ((adapter->flags & DEV_ENABLED)) {
+ pci_disable_device(pdev);
+ adapter->flags &= ~DEV_ENABLED;
+ }
pci_release_regions(pdev);
+ kfree(adapter);
} else
pci_release_regions(pdev);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 6f21f2451c30..4dd0a82533e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
{
- stid -= t->stid_base;
+ /* Is it a server filter TID? */
+ if (t->nsftids && (stid >= t->sftid_base)) {
+ stid -= t->sftid_base;
+ stid += t->nstids;
+ } else {
+ stid -= t->stid_base;
+ }
+
return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 29878098101e..81e8402a74b4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -45,6 +45,7 @@
#include "l2t.h"
#include "t4_msg.h"
#include "t4fw_api.h"
+#include "t4_regs.h"
#define VLAN_NONE 0xfff
@@ -411,6 +412,40 @@ done:
}
EXPORT_SYMBOL(cxgb4_l2t_get);
+u64 cxgb4_select_ntuple(struct net_device *dev,
+ const struct l2t_entry *l2t)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct tp_params *tp = &adap->params.tp;
+ u64 ntuple = 0;
+
+ /* Initialize each of the fields which we care about which are present
+ * in the Compressed Filter Tuple.
+ */
+ if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
+ ntuple |= (u64)(F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
+
+ if (tp->port_shift >= 0)
+ ntuple |= (u64)l2t->lport << tp->port_shift;
+
+ if (tp->protocol_shift >= 0)
+ ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
+
+ if (tp->vnic_shift >= 0) {
+ u32 viid = cxgb4_port_viid(dev);
+ u32 vf = FW_VIID_VIN_GET(viid);
+ u32 pf = FW_VIID_PFN_GET(viid);
+ u32 vld = FW_VIID_VIVLD_GET(viid);
+
+ ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
+ V_FT_VNID_ID_PF(pf) |
+ V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
+ }
+
+ return ntuple;
+}
+EXPORT_SYMBOL(cxgb4_select_ntuple);
+
/*
* Called when address resolution fails for an L2T entry to handle packets
* on the arpq head. If a packet specifies a failure handler it is invoked,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 108c0f1fce1c..85eb5c71358d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
const struct net_device *physdev,
unsigned int priority);
-
+u64 cxgb4_select_ntuple(struct net_device *dev,
+ const struct l2t_entry *l2t);
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index cc380c36e1a8..47ffa64fcf19 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1630,7 +1630,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
- skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+ skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
+ PKT_HASH_TYPE_L3);
if (unlikely(pkt->vlan_ex)) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
@@ -1686,7 +1687,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
skb->protocol = eth_type_trans(skb, q->netdev);
skb_record_rx_queue(skb, q->idx);
if (skb->dev->features & NETIF_F_RXHASH)
- skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+ skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
+ PKT_HASH_TYPE_L3);
rxq->stats.pkts++;
@@ -2581,7 +2583,7 @@ static int t4_sge_init_soft(struct adapter *adap)
#undef READ_FL_BUF
if (fl_small_pg != PAGE_SIZE ||
- (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg ||
+ (fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
(fl_large_pg & (fl_large_pg-1)) != 0))) {
dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
fl_small_pg, fl_large_pg);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 74a6fce5a15a..2c109343d570 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -32,12 +32,13 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/delay.h>
#include "cxgb4.h"
#include "t4_regs.h"
#include "t4fw_api.h"
+static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+ const u8 *fw_data, unsigned int size, int force);
/**
* t4_wait_op_done_val - wait until an operation is completed
* @adapter: the adapter performing the operation
@@ -1070,62 +1071,6 @@ unsigned int t4_flash_cfg_addr(struct adapter *adapter)
}
/**
- * t4_load_cfg - download config file
- * @adap: the adapter
- * @cfg_data: the cfg text file to write
- * @size: text file size
- *
- * Write the supplied config text file to the card's serial flash.
- */
-int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
-{
- int ret, i, n;
- unsigned int addr;
- unsigned int flash_cfg_start_sec;
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
-
- addr = t4_flash_cfg_addr(adap);
- flash_cfg_start_sec = addr / SF_SEC_SIZE;
-
- if (size > FLASH_CFG_MAX_SIZE) {
- dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
- FLASH_CFG_MAX_SIZE);
- return -EFBIG;
- }
-
- i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
- sf_sec_size);
- ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
- flash_cfg_start_sec + i - 1);
- /*
- * If size == 0 then we're simply erasing the FLASH sectors associated
- * with the on-adapter Firmware Configuration File.
- */
- if (ret || size == 0)
- goto out;
-
- /* this will write to the flash up to SF_PAGE_SIZE at a time */
- for (i = 0; i < size; i += SF_PAGE_SIZE) {
- if ((size - i) < SF_PAGE_SIZE)
- n = size - i;
- else
- n = SF_PAGE_SIZE;
- ret = t4_write_flash(adap, addr, n, cfg_data);
- if (ret)
- goto out;
-
- addr += SF_PAGE_SIZE;
- cfg_data += SF_PAGE_SIZE;
- }
-
-out:
- if (ret)
- dev_err(adap->pdev_dev, "config file %s failed %d\n",
- (size == 0 ? "clear" : "download"), ret);
- return ret;
-}
-
-/**
* t4_load_fw - download firmware
* @adap: the adapter
* @fw_data: the firmware image to write
@@ -2810,7 +2755,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
* be doing. The only way out of this state is to RESTART the firmware
* ...
*/
-int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
+static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
{
int ret = 0;
@@ -2875,7 +2820,7 @@ int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
* the chip since older firmware won't recognize the PCIE_FW.HALT
* flag and automatically RESET itself on startup.
*/
-int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
+static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
{
if (reset) {
/*
@@ -2938,8 +2883,8 @@ int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
* positive errno indicates that the adapter is ~probably~ intact, a
* negative errno indicates that things are looking bad ...
*/
-int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
- const u8 *fw_data, unsigned int size, int force)
+static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+ const u8 *fw_data, unsigned int size, int force)
{
const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
int reset, ret;
@@ -2964,78 +2909,6 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
return t4_fw_restart(adap, mbox, reset);
}
-
-/**
- * t4_fw_config_file - setup an adapter via a Configuration File
- * @adap: the adapter
- * @mbox: mailbox to use for the FW command
- * @mtype: the memory type where the Configuration File is located
- * @maddr: the memory address where the Configuration File is located
- * @finiver: return value for CF [fini] version
- * @finicsum: return value for CF [fini] checksum
- * @cfcsum: return value for CF computed checksum
- *
- * Issue a command to get the firmware to process the Configuration
- * File located at the specified mtype/maddress. If the Configuration
- * File is processed successfully and return value pointers are
- * provided, the Configuration File "[fini] section version and
- * checksum values will be returned along with the computed checksum.
- * It's up to the caller to decide how it wants to respond to the
- * checksums not matching but it recommended that a prominant warning
- * be emitted in order to help people rapidly identify changed or
- * corrupted Configuration Files.
- *
- * Also note that it's possible to modify things like "niccaps",
- * "toecaps",etc. between processing the Configuration File and telling
- * the firmware to use the new configuration. Callers which want to
- * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
- * Configuration Files if they want to do this.
- */
-int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
- unsigned int mtype, unsigned int maddr,
- u32 *finiver, u32 *finicsum, u32 *cfcsum)
-{
- struct fw_caps_config_cmd caps_cmd;
- int ret;
-
- /*
- * Tell the firmware to process the indicated Configuration File.
- * If there are no errors and the caller has provided return value
- * pointers for the [fini] section version, checksum and computed
- * checksum, pass those back to the caller.
- */
- memset(&caps_cmd, 0, sizeof(caps_cmd));
- caps_cmd.op_to_write =
- htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
- caps_cmd.cfvalid_to_len16 =
- htonl(FW_CAPS_CONFIG_CMD_CFVALID |
- FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
- FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
- FW_LEN16(caps_cmd));
- ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd);
- if (ret < 0)
- return ret;
-
- if (finiver)
- *finiver = ntohl(caps_cmd.finiver);
- if (finicsum)
- *finicsum = ntohl(caps_cmd.finicsum);
- if (cfcsum)
- *cfcsum = ntohl(caps_cmd.cfcsum);
-
- /*
- * And now tell the firmware to use the configuration we just loaded.
- */
- caps_cmd.op_to_write =
- htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE);
- caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
- return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
-}
-
/**
* t4_fixup_host_params - fix up host-dependent parameters
* @adap: the adapter
@@ -3808,6 +3681,109 @@ int t4_prep_adapter(struct adapter *adapter)
return 0;
}
+/**
+ * t4_init_tp_params - initialize adap->params.tp
+ * @adap: the adapter
+ *
+ * Initialize various fields of the adapter's TP Parameters structure.
+ */
+int t4_init_tp_params(struct adapter *adap)
+{
+ int chan;
+ u32 v;
+
+ v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
+ adap->params.tp.tre = TIMERRESOLUTION_GET(v);
+ adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
+
+ /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
+ for (chan = 0; chan < NCHAN; chan++)
+ adap->params.tp.tx_modq[chan] = chan;
+
+ /* Cache the adapter's Compressed Filter Mode and global Incress
+ * Configuration.
+ */
+ t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &adap->params.tp.vlan_pri_map, 1,
+ TP_VLAN_PRI_MAP);
+ t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &adap->params.tp.ingress_config, 1,
+ TP_INGRESS_CONFIG);
+
+ /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
+ * shift positions of several elements of the Compressed Filter Tuple
+ * for this adapter which we need frequently ...
+ */
+ adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
+ adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
+ adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
+ adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
+ F_PROTOCOL);
+
+ /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
+ * represents the presense of an Outer VLAN instead of a VNIC ID.
+ */
+ if ((adap->params.tp.ingress_config & F_VNIC) == 0)
+ adap->params.tp.vnic_shift = -1;
+
+ return 0;
+}
+
+/**
+ * t4_filter_field_shift - calculate filter field shift
+ * @adap: the adapter
+ * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
+ *
+ * Return the shift position of a filter field within the Compressed
+ * Filter Tuple. The filter field is specified via its selection bit
+ * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
+ */
+int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
+{
+ unsigned int filter_mode = adap->params.tp.vlan_pri_map;
+ unsigned int sel;
+ int field_shift;
+
+ if ((filter_mode & filter_sel) == 0)
+ return -1;
+
+ for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
+ switch (filter_mode & sel) {
+ case F_FCOE:
+ field_shift += W_FT_FCOE;
+ break;
+ case F_PORT:
+ field_shift += W_FT_PORT;
+ break;
+ case F_VNIC_ID:
+ field_shift += W_FT_VNIC_ID;
+ break;
+ case F_VLAN:
+ field_shift += W_FT_VLAN;
+ break;
+ case F_TOS:
+ field_shift += W_FT_TOS;
+ break;
+ case F_PROTOCOL:
+ field_shift += W_FT_PROTOCOL;
+ break;
+ case F_ETHERTYPE:
+ field_shift += W_FT_ETHERTYPE;
+ break;
+ case F_MACMATCH:
+ field_shift += W_FT_MACMATCH;
+ break;
+ case F_MPSHITTYPE:
+ field_shift += W_FT_MPSHITTYPE;
+ break;
+ case F_FRAGMENTATION:
+ field_shift += W_FT_FRAGMENTATION;
+ break;
+ }
+ }
+ return field_shift;
+}
+
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
{
u8 addr[6];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 0a8205d69d2c..4082522d8140 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1171,10 +1171,50 @@
#define A_TP_TX_SCHED_PCMD 0x25
+#define S_VNIC 11
+#define V_VNIC(x) ((x) << S_VNIC)
+#define F_VNIC V_VNIC(1U)
+
+#define S_FRAGMENTATION 9
+#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
+#define F_FRAGMENTATION V_FRAGMENTATION(1U)
+
+#define S_MPSHITTYPE 8
+#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
+#define F_MPSHITTYPE V_MPSHITTYPE(1U)
+
+#define S_MACMATCH 7
+#define V_MACMATCH(x) ((x) << S_MACMATCH)
+#define F_MACMATCH V_MACMATCH(1U)
+
+#define S_ETHERTYPE 6
+#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
+#define F_ETHERTYPE V_ETHERTYPE(1U)
+
+#define S_PROTOCOL 5
+#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
+#define F_PROTOCOL V_PROTOCOL(1U)
+
+#define S_TOS 4
+#define V_TOS(x) ((x) << S_TOS)
+#define F_TOS V_TOS(1U)
+
+#define S_VLAN 3
+#define V_VLAN(x) ((x) << S_VLAN)
+#define F_VLAN V_VLAN(1U)
+
+#define S_VNIC_ID 2
+#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
+#define F_VNIC_ID V_VNIC_ID(1U)
+
#define S_PORT 1
#define V_PORT(x) ((x) << S_PORT)
#define F_PORT V_PORT(1U)
+#define S_FCOE 0
+#define V_FCOE(x) ((x) << S_FCOE)
+#define F_FCOE V_FCOE(1U)
+
#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
@@ -1213,4 +1253,37 @@
#define V_CHIPID(x) ((x) << S_CHIPID)
#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
+/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
+ * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
+ * selects for a particular field being present. These fields, when present
+ * in the Compressed Filter Tuple, have the following widths in bits.
+ */
+#define W_FT_FCOE 1
+#define W_FT_PORT 3
+#define W_FT_VNIC_ID 17
+#define W_FT_VLAN 17
+#define W_FT_TOS 8
+#define W_FT_PROTOCOL 8
+#define W_FT_ETHERTYPE 16
+#define W_FT_MACMATCH 9
+#define W_FT_MPSHITTYPE 3
+#define W_FT_FRAGMENTATION 1
+
+/* Some of the Compressed Filter Tuple fields have internal structure. These
+ * bit shifts/masks describe those structures. All shifts are relative to the
+ * base position of the fields within the Compressed Filter Tuple
+ */
+#define S_FT_VLAN_VLD 16
+#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD)
+#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U)
+
+#define S_FT_VNID_ID_VF 0
+#define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF)
+
+#define S_FT_VNID_ID_PF 7
+#define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF)
+
+#define S_FT_VNID_ID_VLD 16
+#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD)
+
#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 61362450d05b..f412d0fa0850 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -268,7 +268,6 @@ int t4vf_wait_dev_ready(struct adapter *);
int t4vf_port_init(struct adapter *, int);
int t4vf_fw_reset(struct adapter *);
-int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
int t4vf_get_sge_params(struct adapter *);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index d958c44341b5..25dfeb8f28ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -363,8 +363,8 @@ int t4vf_fw_reset(struct adapter *adapter)
* Reads the values of firmware or device parameters. Up to 7 parameters
* can be queried at once.
*/
-int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
- const u32 *params, u32 *vals)
+static int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, u32 *vals)
{
int i, ret;
struct fw_params_cmd cmd, rpl;
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index ec88de4ac162..2be2a99c5ea3 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -18,7 +18,6 @@
#include <linux/mii.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index ff78dfaec508..b740bfce72ef 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1036,11 +1036,12 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, q_number);
if (netdev->features & NETIF_F_RXHASH) {
- skb->rxhash = rss_hash;
- if (rss_type & (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV4))
- skb->l4_rxhash = true;
+ skb_set_hash(skb, rss_hash,
+ (rss_type &
+ (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
}
if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index 43464f0a4f99..e6a83198c3dd 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -162,7 +162,7 @@ static int enic_are_pp_different(struct enic_port_profile *pp1,
return strcmp(pp1->name, pp2->name) | !!memcmp(pp1->instance_uuid,
pp2->instance_uuid, PORT_UUID_MAX) |
!!memcmp(pp1->host_uuid, pp2->host_uuid, PORT_UUID_MAX) |
- !!memcmp(pp1->mac_addr, pp2->mac_addr, ETH_ALEN);
+ !ether_addr_equal(pp1->mac_addr, pp2->mac_addr);
}
static int enic_pp_preassociate(struct enic *enic, int vf,
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 7080ad6c4014..a1a2b4028a5c 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -23,7 +23,6 @@
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
@@ -110,8 +109,8 @@ typedef struct board_info {
u8 imr_all;
unsigned int flags;
- unsigned int in_suspend :1;
- unsigned int wake_supported :1;
+ unsigned int in_suspend:1;
+ unsigned int wake_supported:1;
enum dm9000_type type;
@@ -162,7 +161,7 @@ static inline board_info_t *to_dm9000_board(struct net_device *dev)
* Read a byte from I/O port
*/
static u8
-ior(board_info_t * db, int reg)
+ior(board_info_t *db, int reg)
{
writeb(reg, db->io_addr);
return readb(db->io_data);
@@ -173,7 +172,7 @@ ior(board_info_t * db, int reg)
*/
static void
-iow(board_info_t * db, int reg, int value)
+iow(board_info_t *db, int reg, int value)
{
writeb(reg, db->io_addr);
writeb(value, db->io_data);
@@ -745,9 +744,9 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
.get_link = dm9000_get_link,
.get_wol = dm9000_get_wol,
.set_wol = dm9000_set_wol,
- .get_eeprom_len = dm9000_get_eeprom_len,
- .get_eeprom = dm9000_get_eeprom,
- .set_eeprom = dm9000_set_eeprom,
+ .get_eeprom_len = dm9000_get_eeprom_len,
+ .get_eeprom = dm9000_get_eeprom,
+ .set_eeprom = dm9000_set_eeprom,
};
static void dm9000_show_carrier(board_info_t *db,
@@ -795,7 +794,7 @@ dm9000_poll_work(struct work_struct *w)
}
} else
mii_check_media(&db->mii, netif_msg_link(db), 0);
-
+
if (netif_running(ndev))
dm9000_schedule_poll(db);
}
@@ -1252,12 +1251,11 @@ static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
dev_info(db->dev, "wake by link status change\n");
if (wcr & WCR_SAMPLEST)
dev_info(db->dev, "wake by sample packet\n");
- if (wcr & WCR_MAGICST )
+ if (wcr & WCR_MAGICST)
dev_info(db->dev, "wake by magic packet\n");
if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
dev_err(db->dev, "wake signalled with no reason? "
"NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
-
}
spin_unlock_irqrestore(&db->lock, flags);
@@ -1314,7 +1312,7 @@ dm9000_open(struct net_device *dev)
mii_check_media(&db->mii, netif_msg_link(db), 1);
netif_start_queue(dev);
-
+
dm9000_schedule_poll(db);
return 0;
@@ -1628,7 +1626,7 @@ dm9000_probe(struct platform_device *pdev)
if (!is_valid_ether_addr(ndev->dev_addr)) {
/* try reading from mac */
-
+
mac_src = "chip";
for (i = 0; i < 6; i++)
ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index df5a892fb49c..1812f4916917 100644
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -13,7 +13,6 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include "tulip.h"
-#include <linux/init.h>
#include <asm/unaligned.h>
diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c
index 93a4afaa09f1..dcf21a36a9cf 100644
--- a/drivers/net/ethernet/dec/tulip/media.c
+++ b/drivers/net/ethernet/dec/tulip/media.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/mii.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include "tulip.h"
@@ -458,7 +457,7 @@ void tulip_find_mii(struct net_device *dev, int board_idx)
/* Find the connected MII xcvrs.
Doing this in open() would allow detecting external xcvrs later,
but takes much time. */
- for (phyn = 1; phyn <= 32 && phy_idx < sizeof (tp->phys); phyn++) {
+ for (phyn = 1; phyn <= 32 && phy_idx < ARRAY_SIZE(tp->phys); phyn++) {
int phy = phyn & 0x1f;
int mii_status = tulip_mdio_read (dev, phy, MII_BMSR);
if ((mii_status & 0x8301) == 0x8001 ||
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index a5397b130724..aa4ee385091f 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -1192,9 +1192,6 @@ static int uli526x_suspend(struct pci_dev *pdev, pm_message_t state)
ULI526X_DBUG(0, "uli526x_suspend", 0);
- if (!netdev_priv(dev))
- return 0;
-
pci_save_state(pdev);
if (!netif_running(dev))
@@ -1228,9 +1225,6 @@ static int uli526x_resume(struct pci_dev *pdev)
ULI526X_DBUG(0, "uli526x_resume", 0);
- if (!netdev_priv(dev))
- return 0;
-
pci_restore_state(pdev);
if (!netif_running(dev))
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index ab7ebac6fbea..6204cdfe43a6 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -28,7 +28,6 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/uaccess.h>
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 3699565704c7..7d07a0f5320d 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -25,7 +25,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index f3d60eb13c3a..8a79a32a5674 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -15,7 +15,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 5878df619b53..8d09615da585 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "4.9.224.0u"
+#define DRV_VER "10.0.600.0u"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -42,7 +42,7 @@
#define OC_NAME_BE OC_NAME "(be3)"
#define OC_NAME_LANCER OC_NAME "(Lancer)"
#define OC_NAME_SH OC_NAME "(Skyhawk)"
-#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver"
+#define DRV_DESC "Emulex OneConnect NIC Driver"
#define BE_VENDOR_ID 0x19a2
#define EMULEX_VENDOR_ID 0x10df
@@ -104,6 +104,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define BE3_MAX_RSS_QS 16
#define BE3_MAX_TX_QS 16
#define BE3_MAX_EVT_QS 16
+#define BE3_SRIOV_MAX_EVT_QS 8
#define MAX_RX_QS 32
#define MAX_EVT_QS 32
@@ -282,7 +283,6 @@ struct be_rx_compl_info {
u32 rss_hash;
u16 vlan_tag;
u16 pkt_size;
- u16 rxq_idx;
u16 port;
u8 vlanf;
u8 num_rcvd;
@@ -480,7 +480,7 @@ struct be_adapter {
struct list_head entry;
u32 flash_status;
- struct completion flash_compl;
+ struct completion et_cmd_compl;
struct be_resources res; /* resources available for the func */
u16 num_vfs; /* Number of VFs provisioned by PF */
@@ -492,7 +492,7 @@ struct be_adapter {
u16 pvid;
struct phy_info phy;
u8 wol_cap;
- bool wol;
+ bool wol_en;
u32 uc_macs; /* Count of secondary UC MAC programmed */
u16 asic_rev;
u16 qnq_vid;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index e0e8bc1ef14c..48076a6370c3 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -141,11 +141,17 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
subsystem = resp_hdr->subsystem;
}
+ if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
+ subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
+ complete(&adapter->et_cmd_compl);
+ return 0;
+ }
+
if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
(opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
(subsystem == CMD_SUBSYSTEM_COMMON)) {
adapter->flash_status = compl_status;
- complete(&adapter->flash_compl);
+ complete(&adapter->et_cmd_compl);
}
if (compl_status == MCC_STATUS_SUCCESS) {
@@ -1095,23 +1101,22 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
- if (lancer_chip(adapter)) {
- req->hdr.version = 1;
- req->cq_id = cpu_to_le16(cq->id);
-
- AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
- be_encoded_q_len(mccq->len));
- AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
- ctxt, cq->id);
- AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
- ctxt, 1);
-
- } else {
+ if (BEx_chip(adapter)) {
AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
be_encoded_q_len(mccq->len));
AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
+ } else {
+ req->hdr.version = 1;
+ req->cq_id = cpu_to_le16(cq->id);
+
+ AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
+ ctxt, cq->id);
+ AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
+ ctxt, 1);
}
/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
@@ -1181,7 +1186,7 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
int status;
status = be_cmd_mccq_ext_create(adapter, mccq, cq);
- if (status && !lancer_chip(adapter)) {
+ if (status && BEx_chip(adapter)) {
dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
"or newer to avoid conflicting priorities between NIC "
"and FCoE traffic");
@@ -2017,6 +2022,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
0x3ea83c02, 0x4a110304};
int status;
+ if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
+ return 0;
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -2160,7 +2168,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
be_mcc_notify(adapter);
spin_unlock_bh(&adapter->mcc_lock);
- if (!wait_for_completion_timeout(&adapter->flash_compl,
+ if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(60000)))
status = -1;
else
@@ -2255,8 +2263,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
be_mcc_notify(adapter);
spin_unlock_bh(&adapter->mcc_lock);
- if (!wait_for_completion_timeout(&adapter->flash_compl,
- msecs_to_jiffies(40000)))
+ if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
+ msecs_to_jiffies(40000)))
status = -1;
else
status = adapter->flash_status;
@@ -2367,6 +2375,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_loopback_test *req;
+ struct be_cmd_resp_loopback_test *resp;
int status;
spin_lock_bh(&adapter->mcc_lock);
@@ -2381,8 +2390,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
- req->hdr.timeout = cpu_to_le32(4);
+ req->hdr.timeout = cpu_to_le32(15);
req->pattern = cpu_to_le64(pattern);
req->src_port = cpu_to_le32(port_num);
req->dest_port = cpu_to_le32(port_num);
@@ -2390,12 +2399,15 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
req->num_pkts = cpu_to_le32(num_pkts);
req->loopback_type = cpu_to_le32(loopback_type);
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
- status = le32_to_cpu(resp->status);
- }
+ be_mcc_notify(adapter);
+
+ spin_unlock_bh(&adapter->mcc_lock);
+ wait_for_completion(&adapter->et_cmd_compl);
+ resp = embedded_payload(wrb);
+ status = le32_to_cpu(resp->status);
+
+ return status;
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -2679,6 +2691,13 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
struct be_cmd_resp_get_fn_privileges *resp =
embedded_payload(wrb);
*privilege = le32_to_cpu(resp->privilege_mask);
+
+ /* In UMC mode FW does not return right privileges.
+ * Override with correct privilege equivalent to PF.
+ */
+ if (BEx_chip(adapter) && be_is_mc(adapter) &&
+ be_physfn(adapter))
+ *privilege = MAX_PRIVILEGES;
}
err:
@@ -2723,7 +2742,8 @@ err:
* If pmac_id is returned, pmac_id_valid is returned as true
*/
int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
- bool *pmac_id_valid, u32 *pmac_id, u8 domain)
+ bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
+ u8 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_mac_list *req;
@@ -2761,7 +2781,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
if (*pmac_id_valid) {
req->mac_id = cpu_to_le32(*pmac_id);
- req->iface_id = cpu_to_le16(adapter->if_handle);
+ req->iface_id = cpu_to_le16(if_handle);
req->perm_override = 0;
} else {
req->perm_override = 1;
@@ -2814,17 +2834,21 @@ out:
return status;
}
-int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac)
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac,
+ u32 if_handle, bool active, u32 domain)
{
- bool active = true;
+ if (!active)
+ be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
+ if_handle, domain);
if (BEx_chip(adapter))
return be_cmd_mac_addr_query(adapter, mac, false,
- adapter->if_handle, curr_pmac_id);
+ if_handle, curr_pmac_id);
else
/* Fetch the MAC address using pmac_id */
return be_cmd_get_mac_from_list(adapter, mac, &active,
- &curr_pmac_id, 0);
+ &curr_pmac_id,
+ if_handle, domain);
}
int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
@@ -2843,7 +2867,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
adapter->if_handle, 0);
} else {
status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
- NULL, 0);
+ NULL, adapter->if_handle, 0);
}
return status;
@@ -2904,7 +2928,8 @@ int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
int status;
status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
- &pmac_id, dom);
+ &pmac_id, if_id, dom);
+
if (!status && active_mac)
be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
@@ -2984,7 +3009,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
ctxt, intf_id);
AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
- if (!BEx_chip(adapter)) {
+ if (!BEx_chip(adapter) && mode) {
AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
ctxt, adapter->hba_port_num);
AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
@@ -3015,14 +3040,16 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_acpi_wol_magic_config_v1 *req;
- int status;
- int payload_len = sizeof(*req);
+ int status = 0;
struct be_dma_mem cmd;
if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
CMD_SUBSYSTEM_ETH))
return -EPERM;
+ if (be_is_wol_excluded(adapter))
+ return status;
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -3047,7 +3074,7 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
- payload_len, wrb, &cmd);
+ sizeof(*req), wrb, &cmd);
req->hdr.version = 1;
req->query_options = BE_GET_WOL_CAP;
@@ -3057,13 +3084,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
- /* the command could succeed misleadingly on old f/w
- * which is not aware of the V1 version. fake an error. */
- if (resp->hdr.response_length < payload_len) {
- status = -1;
- goto err;
- }
adapter->wol_cap = resp->wol_settings;
+ if (adapter->wol_cap & BE_WOL_CAP)
+ adapter->wol_en = true;
}
err:
mutex_unlock(&adapter->mbox_lock);
@@ -3072,6 +3095,76 @@ err:
return status;
}
+
+int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
+{
+ struct be_dma_mem extfat_cmd;
+ struct be_fat_conf_params *cfgs;
+ int status;
+ int i, j;
+
+ memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
+ extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
+ extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
+ &extfat_cmd.dma);
+ if (!extfat_cmd.va)
+ return -ENOMEM;
+
+ status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
+ if (status)
+ goto err;
+
+ cfgs = (struct be_fat_conf_params *)
+ (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
+ for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
+ u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
+ for (j = 0; j < num_modes; j++) {
+ if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
+ cfgs->module[i].trace_lvl[j].dbg_lvl =
+ cpu_to_le32(level);
+ }
+ }
+
+ status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
+err:
+ pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
+ extfat_cmd.dma);
+ return status;
+}
+
+int be_cmd_get_fw_log_level(struct be_adapter *adapter)
+{
+ struct be_dma_mem extfat_cmd;
+ struct be_fat_conf_params *cfgs;
+ int status, j;
+ int level = 0;
+
+ memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
+ extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
+ extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
+ &extfat_cmd.dma);
+
+ if (!extfat_cmd.va) {
+ dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
+ __func__);
+ goto err;
+ }
+
+ status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
+ if (!status) {
+ cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
+ sizeof(struct be_cmd_resp_hdr));
+ for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
+ if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
+ level = cfgs->module[0].trace_lvl[j].dbg_lvl;
+ }
+ }
+ pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
+ extfat_cmd.dma);
+err:
+ return level;
+}
+
int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
struct be_dma_mem *cmd)
{
@@ -3596,6 +3689,40 @@ int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
return status;
}
+/* Uses MBOX */
+int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
+{
+ struct be_cmd_req_get_active_profile *req;
+ struct be_mcc_wrb *wrb;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
+ wrb, NULL);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_active_profile *resp =
+ embedded_payload(wrb);
+ *profile_id = le16_to_cpu(resp->active_profile_id);
+ }
+
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 0075686276aa..fc4e076dc202 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -216,6 +216,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_FUNC_CONFIG 160
#define OPCODE_COMMON_GET_PROFILE_CONFIG 164
#define OPCODE_COMMON_SET_PROFILE_CONFIG 165
+#define OPCODE_COMMON_GET_ACTIVE_PROFILE 167
#define OPCODE_COMMON_SET_HSW_CONFIG 153
#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
#define OPCODE_COMMON_READ_OBJECT 171
@@ -452,7 +453,7 @@ struct amap_mcc_context_be {
u8 rsvd2[32];
} __packed;
-struct amap_mcc_context_lancer {
+struct amap_mcc_context_v1 {
u8 async_cq_id[16];
u8 ring_size[4];
u8 rsvd0[12];
@@ -476,7 +477,7 @@ struct be_cmd_req_mcc_ext_create {
u16 num_pages;
u16 cq_id;
u32 async_event_bitmap[1];
- u8 context[sizeof(struct amap_mcc_context_be) / 8];
+ u8 context[sizeof(struct amap_mcc_context_v1) / 8];
struct phys_addr pages[8];
} __packed;
@@ -1097,6 +1098,14 @@ struct be_cmd_resp_query_fw_cfg {
u32 function_caps;
};
+/* Is BE in a multi-channel mode */
+static inline bool be_is_mc(struct be_adapter *adapter)
+{
+ return adapter->function_mode & FLEX10_MODE ||
+ adapter->function_mode & VNIC_MODE ||
+ adapter->function_mode & UMC_ENABLED;
+}
+
/******************** RSS Config ****************************************/
/* RSS type Input parameters used to compute RX hash
* RSS_ENABLE_IPV4 SRC IPv4, DST IPv4
@@ -1917,6 +1926,17 @@ struct be_cmd_resp_set_profile_config {
struct be_cmd_resp_hdr hdr;
};
+struct be_cmd_req_get_active_profile {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+} __packed;
+
+struct be_cmd_resp_get_active_profile {
+ struct be_cmd_resp_hdr hdr;
+ u16 active_profile_id;
+ u16 next_profile_id;
+} __packed;
+
struct be_cmd_enable_disable_vf {
struct be_cmd_req_hdr hdr;
u8 enable;
@@ -2037,8 +2057,10 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
u32 vf_num);
int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
- bool *pmac_id_active, u32 *pmac_id, u8 domain);
-int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac);
+ bool *pmac_id_active, u32 *pmac_id,
+ u32 if_handle, u8 domain);
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac,
+ u32 if_handle, bool active, u32 domain);
int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count,
u32 domain);
@@ -2048,6 +2070,8 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain,
int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain,
u16 intf_id, u8 *mode);
int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
+int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level);
+int be_cmd_get_fw_log_level(struct be_adapter *adapter);
int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
struct be_dma_mem *cmd);
int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
@@ -2063,6 +2087,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter,
int be_cmd_get_profile_config(struct be_adapter *adapter,
struct be_resources *res, u8 domain);
int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain);
+int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
int vf_num);
int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 08330034d9ef..05be0070f55f 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -713,12 +713,13 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (be_is_wol_supported(adapter)) {
+ if (adapter->wol_cap & BE_WOL_CAP) {
wol->supported |= WAKE_MAGIC;
- if (adapter->wol)
+ if (adapter->wol_en)
wol->wolopts |= WAKE_MAGIC;
- } else
+ } else {
wol->wolopts = 0;
+ }
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
@@ -730,15 +731,15 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~WAKE_MAGIC)
return -EOPNOTSUPP;
- if (!be_is_wol_supported(adapter)) {
+ if (!(adapter->wol_cap & BE_WOL_CAP)) {
dev_warn(&adapter->pdev->dev, "WOL not supported\n");
return -EOPNOTSUPP;
}
if (wol->wolopts & WAKE_MAGIC)
- adapter->wol = true;
+ adapter->wol_en = true;
else
- adapter->wol = false;
+ adapter->wol_en = false;
return 0;
}
@@ -904,73 +905,21 @@ static u32 be_get_msg_level(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (lancer_chip(adapter)) {
- dev_err(&adapter->pdev->dev, "Operation not supported\n");
- return -EOPNOTSUPP;
- }
-
return adapter->msg_enable;
}
-static void be_set_fw_log_level(struct be_adapter *adapter, u32 level)
-{
- struct be_dma_mem extfat_cmd;
- struct be_fat_conf_params *cfgs;
- int status;
- int i, j;
-
- memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
- extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
- extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
- &extfat_cmd.dma);
- if (!extfat_cmd.va) {
- dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
- __func__);
- goto err;
- }
- status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
- if (!status) {
- cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
- sizeof(struct be_cmd_resp_hdr));
- for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
- u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
- for (j = 0; j < num_modes; j++) {
- if (cfgs->module[i].trace_lvl[j].mode ==
- MODE_UART)
- cfgs->module[i].trace_lvl[j].dbg_lvl =
- cpu_to_le32(level);
- }
- }
- status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd,
- cfgs);
- if (status)
- dev_err(&adapter->pdev->dev,
- "Message level set failed\n");
- } else {
- dev_err(&adapter->pdev->dev, "Message level get failed\n");
- }
-
- pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
- extfat_cmd.dma);
-err:
- return;
-}
-
static void be_set_msg_level(struct net_device *netdev, u32 level)
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (lancer_chip(adapter)) {
- dev_err(&adapter->pdev->dev, "Operation not supported\n");
- return;
- }
-
if (adapter->msg_enable == level)
return;
if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW))
- be_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
- FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL);
+ if (BEx_chip(adapter))
+ be_cmd_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
+ FW_LOG_LEVEL_DEFAULT :
+ FW_LOG_LEVEL_FATAL);
adapter->msg_enable = level;
return;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0fde69d5cb6a..04ac9c6a0d39 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -121,12 +121,6 @@ static const char * const ue_status_hi_desc[] = {
"Unknown"
};
-/* Is BE in a multi-channel mode */
-static inline bool be_is_mc(struct be_adapter *adapter) {
- return (adapter->function_mode & FLEX10_MODE ||
- adapter->function_mode & VNIC_MODE ||
- adapter->function_mode & UMC_ENABLED);
-}
static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
@@ -258,6 +252,12 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ /* Proceed further only if, User provided MAC is different
+ * from active MAC
+ */
+ if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
+ return 0;
+
/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
* privilege or if PF did not provision the new MAC address.
* On BE3, this cmd will always fail if the VF doesn't have the
@@ -280,14 +280,15 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
/* Decide if the new MAC is successfully activated only after
* querying the FW
*/
- status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
+ status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
+ adapter->if_handle, true, 0);
if (status)
goto err;
/* The MAC change did not happen, either due to lack of privilege
* or PF didn't pre-provision.
*/
- if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
+ if (!ether_addr_equal(addr->sa_data, mac)) {
status = -EPERM;
goto err;
}
@@ -1096,8 +1097,6 @@ static int be_vid_config(struct be_adapter *adapter)
dev_info(&adapter->pdev->dev,
"Disabling VLAN Promiscuous mode.\n");
adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
- dev_info(&adapter->pdev->dev,
- "Re-Enabling HW VLAN filtering\n");
}
}
}
@@ -1105,12 +1104,12 @@ static int be_vid_config(struct be_adapter *adapter)
return status;
set_vlan_promisc:
- dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
+ if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
+ return 0;
status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
if (!status) {
dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
- dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
adapter->flags |= BE_FLAGS_VLAN_PROMISC;
} else
dev_err(&adapter->pdev->dev,
@@ -1123,19 +1122,18 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
-
/* Packets with VID 0 are always received by Lancer by default */
if (lancer_chip(adapter) && vid == 0)
goto ret;
adapter->vlan_tag[vid] = 1;
- if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
- status = be_vid_config(adapter);
+ adapter->vlans_added++;
- if (!status)
- adapter->vlans_added++;
- else
+ status = be_vid_config(adapter);
+ if (status) {
+ adapter->vlans_added--;
adapter->vlan_tag[vid] = 0;
+ }
ret:
return status;
}
@@ -1150,9 +1148,7 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
goto ret;
adapter->vlan_tag[vid] = 0;
- if (adapter->vlans_added <= be_max_vlans(adapter))
- status = be_vid_config(adapter);
-
+ status = be_vid_config(adapter);
if (!status)
adapter->vlans_added--;
else
@@ -1442,12 +1438,12 @@ static inline bool csum_passed(struct be_rx_compl_info *rxcp)
(rxcp->ip_csum || rxcp->ipv6);
}
-static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
- u16 frag_idx)
+static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
{
struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *rx_page_info;
struct be_queue_info *rxq = &rxo->q;
+ u16 frag_idx = rxq->tail;
rx_page_info = &rxo->page_info_tbl[frag_idx];
BUG_ON(!rx_page_info->page);
@@ -1459,6 +1455,7 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
rx_page_info->last_page_user = false;
}
+ queue_tail_inc(rxq);
atomic_dec(&rxq->used);
return rx_page_info;
}
@@ -1467,15 +1464,13 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
static void be_rx_compl_discard(struct be_rx_obj *rxo,
struct be_rx_compl_info *rxcp)
{
- struct be_queue_info *rxq = &rxo->q;
struct be_rx_page_info *page_info;
u16 i, num_rcvd = rxcp->num_rcvd;
for (i = 0; i < num_rcvd; i++) {
- page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo);
put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info));
- index_inc(&rxcp->rxq_idx, rxq->len);
}
}
@@ -1486,13 +1481,12 @@ static void be_rx_compl_discard(struct be_rx_obj *rxo,
static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
struct be_rx_compl_info *rxcp)
{
- struct be_queue_info *rxq = &rxo->q;
struct be_rx_page_info *page_info;
u16 i, j;
u16 hdr_len, curr_frag_len, remaining;
u8 *start;
- page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo);
start = page_address(page_info->page) + page_info->page_offset;
prefetch(start);
@@ -1526,10 +1520,9 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
}
/* More frags present for this completion */
- index_inc(&rxcp->rxq_idx, rxq->len);
remaining = rxcp->pkt_size - curr_frag_len;
for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
- page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo);
curr_frag_len = min(remaining, rx_frag_size);
/* Coalesce all frags from the same physical page in one slot */
@@ -1550,7 +1543,6 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
skb->data_len += curr_frag_len;
skb->truesize += rx_frag_size;
remaining -= curr_frag_len;
- index_inc(&rxcp->rxq_idx, rxq->len);
page_info->page = NULL;
}
BUG_ON(j > MAX_SKB_FRAGS);
@@ -1581,7 +1573,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
if (netdev->features & NETIF_F_RXHASH)
- skb->rxhash = rxcp->rss_hash;
+ skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
@@ -1598,7 +1590,6 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *page_info;
struct sk_buff *skb = NULL;
- struct be_queue_info *rxq = &rxo->q;
u16 remaining, curr_frag_len;
u16 i, j;
@@ -1610,7 +1601,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
remaining = rxcp->pkt_size;
for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
- page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo);
curr_frag_len = min(remaining, rx_frag_size);
@@ -1628,7 +1619,6 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
skb->truesize += rx_frag_size;
remaining -= curr_frag_len;
- index_inc(&rxcp->rxq_idx, rxq->len);
memset(page_info, 0, sizeof(*page_info));
}
BUG_ON(j > MAX_SKB_FRAGS);
@@ -1639,7 +1629,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
if (adapter->netdev->features & NETIF_F_RXHASH)
- skb->rxhash = rxcp->rss_hash;
+ skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
@@ -1663,8 +1653,6 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
rxcp->ipv6 =
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
- rxcp->rxq_idx =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
rxcp->num_rcvd =
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
rxcp->pkt_type =
@@ -1695,8 +1683,6 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
rxcp->ipv6 =
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
- rxcp->rxq_idx =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
rxcp->num_rcvd =
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
rxcp->pkt_type =
@@ -1776,6 +1762,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
struct be_queue_info *rxq = &rxo->q;
struct page *pagep = NULL;
+ struct device *dev = &adapter->pdev->dev;
struct be_eth_rx_d *rxd;
u64 page_dmaaddr = 0, frag_dmaaddr;
u32 posted, page_offset = 0;
@@ -1788,9 +1775,15 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
rx_stats(rxo)->rx_post_fail++;
break;
}
- page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
- 0, adapter->big_page_size,
+ page_dmaaddr = dma_map_page(dev, pagep, 0,
+ adapter->big_page_size,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, page_dmaaddr)) {
+ put_page(pagep);
+ pagep = NULL;
+ rx_stats(rxo)->rx_post_fail++;
+ break;
+ }
page_info->page_offset = 0;
} else {
get_page(pagep);
@@ -1914,7 +1907,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
struct be_rx_compl_info *rxcp;
struct be_adapter *adapter = rxo->adapter;
int flush_wait = 0;
- u16 tail;
/* Consume pending rx completions.
* Wait for the flush completion (identified by zero num_rcvd)
@@ -1947,9 +1939,8 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
be_cq_notify(adapter, rx_cq->id, false, 0);
/* Then free posted rx buffers that were not used */
- tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
- for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
- page_info = get_rx_page_info(rxo, tail);
+ while (atomic_read(&rxq->used) > 0) {
+ page_info = get_rx_page_info(rxo);
put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info));
}
@@ -2744,13 +2735,16 @@ static int be_rx_qs_create(struct be_adapter *adapter)
if (!BEx_chip(adapter))
adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
RSS_ENABLE_UDP_IPV6;
+ } else {
+ /* Disable RSS, if only default RX Q is created */
+ adapter->rss_flags = RSS_ENABLE_NONE;
+ }
- rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
- 128);
- if (rc) {
- adapter->rss_flags = 0;
- return rc;
- }
+ rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
+ 128);
+ if (rc) {
+ adapter->rss_flags = RSS_ENABLE_NONE;
+ return rc;
}
/* First time posting */
@@ -2881,14 +2875,11 @@ static int be_vfs_mac_query(struct be_adapter *adapter)
int status, vf;
u8 mac[ETH_ALEN];
struct be_vf_cfg *vf_cfg;
- bool active = false;
for_all_vfs(adapter, vf_cfg, vf) {
- be_cmd_get_mac_from_list(adapter, mac, &active,
- &vf_cfg->pmac_id, 0);
-
- status = be_cmd_mac_addr_query(adapter, mac, false,
- vf_cfg->if_handle, 0);
+ status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
+ mac, vf_cfg->if_handle,
+ false, vf+1);
if (status)
return status;
memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
@@ -3124,11 +3115,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
{
struct pci_dev *pdev = adapter->pdev;
bool use_sriov = false;
+ int max_vfs;
- if (BE3_chip(adapter) && sriov_want(adapter)) {
- int max_vfs;
+ max_vfs = pci_sriov_get_totalvfs(pdev);
- max_vfs = pci_sriov_get_totalvfs(pdev);
+ if (BE3_chip(adapter) && sriov_want(adapter)) {
res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
use_sriov = res->max_vfs;
}
@@ -3159,7 +3150,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
res->max_rx_qs = res->max_rss_qs + 1;
- res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
+ if (be_physfn(adapter))
+ res->max_evt_qs = (max_vfs > 0) ?
+ BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
+ else
+ res->max_evt_qs = 1;
res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
@@ -3226,6 +3221,7 @@ static int be_get_resources(struct be_adapter *adapter)
/* Routine to query per function resource limits */
static int be_get_config(struct be_adapter *adapter)
{
+ u16 profile_id;
int status;
status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
@@ -3235,6 +3231,13 @@ static int be_get_config(struct be_adapter *adapter)
if (status)
return status;
+ if (be_physfn(adapter)) {
+ status = be_cmd_get_active_profile(adapter, &profile_id);
+ if (!status)
+ dev_info(&adapter->pdev->dev,
+ "Using profile 0x%x\n", profile_id);
+ }
+
status = be_get_resources(adapter);
if (status)
return status;
@@ -3389,11 +3392,6 @@ static int be_setup(struct be_adapter *adapter)
goto err;
be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
- /* In UMC mode FW does not return right privileges.
- * Override with correct privilege equivalent to PF.
- */
- if (be_is_mc(adapter))
- adapter->cmd_privileges = MAX_PRIVILEGES;
status = be_mac_setup(adapter);
if (status)
@@ -3412,6 +3410,8 @@ static int be_setup(struct be_adapter *adapter)
be_set_rx_mode(adapter->netdev);
+ be_cmd_get_acpi_wol_cap(adapter);
+
be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
@@ -4205,7 +4205,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
- init_completion(&adapter->flash_compl);
+ init_completion(&adapter->et_cmd_compl);
pci_save_state(adapter->pdev);
return 0;
@@ -4281,74 +4281,22 @@ static void be_remove(struct pci_dev *pdev)
free_netdev(adapter->netdev);
}
-bool be_is_wol_supported(struct be_adapter *adapter)
-{
- return ((adapter->wol_cap & BE_WOL_CAP) &&
- !be_is_wol_excluded(adapter)) ? true : false;
-}
-
-u32 be_get_fw_log_level(struct be_adapter *adapter)
-{
- struct be_dma_mem extfat_cmd;
- struct be_fat_conf_params *cfgs;
- int status;
- u32 level = 0;
- int j;
-
- if (lancer_chip(adapter))
- return 0;
-
- memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
- extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
- extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
- &extfat_cmd.dma);
-
- if (!extfat_cmd.va) {
- dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
- __func__);
- goto err;
- }
-
- status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
- if (!status) {
- cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
- sizeof(struct be_cmd_resp_hdr));
- for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
- if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
- level = cfgs->module[0].trace_lvl[j].dbg_lvl;
- }
- }
- pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
- extfat_cmd.dma);
-err:
- return level;
-}
-
static int be_get_initial_config(struct be_adapter *adapter)
{
- int status;
- u32 level;
+ int status, level;
status = be_cmd_get_cntl_attributes(adapter);
if (status)
return status;
- status = be_cmd_get_acpi_wol_cap(adapter);
- if (status) {
- /* in case of a failure to get wol capabillities
- * check the exclusion list to determine WOL capability */
- if (!be_is_wol_excluded(adapter))
- adapter->wol_cap |= BE_WOL_CAP;
- }
-
- if (be_is_wol_supported(adapter))
- adapter->wol = true;
-
/* Must be a power of 2 or else MODULO will BUG_ON */
adapter->be_get_temp_freq = 64;
- level = be_get_fw_log_level(adapter);
- adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
+ if (BEx_chip(adapter)) {
+ level = be_cmd_get_fw_log_level(adapter);
+ adapter->msg_enable =
+ level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
+ }
adapter->cfg_num_qs = netif_get_num_default_rss_queues();
return 0;
@@ -4611,7 +4559,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
struct be_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
- if (adapter->wol)
+ if (adapter->wol_en)
be_setup_wol(adapter, true);
be_intr_set(adapter, false);
@@ -4667,7 +4615,7 @@ static int be_resume(struct pci_dev *pdev)
msecs_to_jiffies(1000));
netif_device_attach(netdev);
- if (adapter->wol)
+ if (adapter->wol_en)
be_setup_wol(adapter, false);
return 0;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 4de8cfd149cf..55e0fa03dc90 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -13,6 +13,7 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
+#include <linux/clk.h>
#include <linux/crc32.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -51,6 +52,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
#define ETH_HASH0 0x48
#define ETH_HASH1 0x4c
#define ETH_TXCTRL 0x50
+#define ETH_END 0x54
/* mode register */
#define MODER_RXEN (1 << 0) /* receive enable */
@@ -179,6 +181,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
* @membase: pointer to buffer memory region
* @dma_alloc: dma allocated buffer size
* @io_region_size: I/O memory region size
+ * @num_bd: number of buffer descriptors
* @num_tx: number of send buffers
* @cur_tx: last send buffer written
* @dty_tx: last buffer actually sent
@@ -199,6 +202,7 @@ struct ethoc {
int dma_alloc;
resource_size_t io_region_size;
+ unsigned int num_bd;
unsigned int num_tx;
unsigned int cur_tx;
unsigned int dty_tx;
@@ -216,6 +220,7 @@ struct ethoc {
struct phy_device *phy;
struct mii_bus *mdio;
+ struct clk *clk;
s8 phy_id;
};
@@ -688,6 +693,11 @@ static int ethoc_mdio_probe(struct net_device *dev)
}
priv->phy = phy;
+ phy->advertising &= ~(ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseT_Half);
+ phy->supported &= ~(SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseT_Half);
+
return 0;
}
@@ -890,6 +900,102 @@ out:
return NETDEV_TX_OK;
}
+static int ethoc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phy;
+
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+static int ethoc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phy;
+
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+static int ethoc_get_regs_len(struct net_device *netdev)
+{
+ return ETH_END;
+}
+
+static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ u32 *regs_buff = p;
+ unsigned i;
+
+ regs->version = 0;
+ for (i = 0; i < ETH_END / sizeof(u32); ++i)
+ regs_buff[i] = ethoc_read(priv, i * sizeof(u32));
+}
+
+static void ethoc_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct ethoc *priv = netdev_priv(dev);
+
+ ring->rx_max_pending = priv->num_bd - 1;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->tx_max_pending = priv->num_bd - 1;
+
+ ring->rx_pending = priv->num_rx;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+ ring->tx_pending = priv->num_tx;
+}
+
+static int ethoc_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct ethoc *priv = netdev_priv(dev);
+
+ if (ring->tx_pending < 1 || ring->rx_pending < 1 ||
+ ring->tx_pending + ring->rx_pending > priv->num_bd)
+ return -EINVAL;
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(dev)) {
+ netif_tx_disable(dev);
+ ethoc_disable_rx_and_tx(priv);
+ ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+ synchronize_irq(dev->irq);
+ }
+
+ priv->num_tx = rounddown_pow_of_two(ring->tx_pending);
+ priv->num_rx = ring->rx_pending;
+ ethoc_init_ring(priv, dev->mem_start);
+
+ if (netif_running(dev)) {
+ ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+ ethoc_enable_rx_and_tx(priv);
+ netif_wake_queue(dev);
+ }
+ return 0;
+}
+
+const struct ethtool_ops ethoc_ethtool_ops = {
+ .get_settings = ethoc_get_settings,
+ .set_settings = ethoc_set_settings,
+ .get_regs_len = ethoc_get_regs_len,
+ .get_regs = ethoc_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = ethoc_get_ringparam,
+ .set_ringparam = ethoc_set_ringparam,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct net_device_ops ethoc_netdev_ops = {
.ndo_open = ethoc_open,
.ndo_stop = ethoc_stop,
@@ -917,6 +1023,8 @@ static int ethoc_probe(struct platform_device *pdev)
int num_bd;
int ret = 0;
bool random_mac = false;
+ struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0;
/* allocate networking device */
netdev = alloc_etherdev(sizeof(struct ethoc));
@@ -1016,6 +1124,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = -ENODEV;
goto error;
}
+ priv->num_bd = num_bd;
/* num_tx must be a power of two */
priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
priv->num_rx = num_bd - priv->num_tx;
@@ -1030,8 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev)
}
/* Allow the platform setup code to pass in a MAC address. */
- if (dev_get_platdata(&pdev->dev)) {
- struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
priv->phy_id = pdata->phy_id;
} else {
@@ -1069,6 +1177,27 @@ static int ethoc_probe(struct platform_device *pdev)
if (random_mac)
netdev->addr_assign_type = NET_ADDR_RANDOM;
+ /* Allow the platform setup code to adjust MII management bus clock. */
+ if (!eth_clkfreq) {
+ struct clk *clk = devm_clk_get(&pdev->dev, NULL);
+
+ if (!IS_ERR(clk)) {
+ priv->clk = clk;
+ clk_prepare_enable(clk);
+ eth_clkfreq = clk_get_rate(clk);
+ }
+ }
+ if (eth_clkfreq) {
+ u32 clkdiv = MIIMODER_CLKDIV(eth_clkfreq / 2500000 + 1);
+
+ if (!clkdiv)
+ clkdiv = 2;
+ dev_dbg(&pdev->dev, "setting MII clkdiv to %u\n", clkdiv);
+ ethoc_write(priv, MIIMODER,
+ (ethoc_read(priv, MIIMODER) & MIIMODER_NOPRE) |
+ clkdiv);
+ }
+
/* register MII bus */
priv->mdio = mdiobus_alloc();
if (!priv->mdio) {
@@ -1111,6 +1240,7 @@ static int ethoc_probe(struct platform_device *pdev)
netdev->netdev_ops = &ethoc_netdev_ops;
netdev->watchdog_timeo = ETHOC_TIMEOUT;
netdev->features |= 0;
+ netdev->ethtool_ops = &ethoc_ethtool_ops;
/* setup NAPI */
netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
@@ -1133,6 +1263,8 @@ free_mdio:
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
free:
+ if (priv->clk)
+ clk_disable_unprepare(priv->clk);
free_netdev(netdev);
out:
return ret;
@@ -1157,6 +1289,8 @@ static int ethoc_remove(struct platform_device *pdev)
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
}
+ if (priv->clk)
+ clk_disable_unprepare(priv->clk);
unregister_netdev(netdev);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 212f44b3a773..c11ecbc98149 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -24,7 +24,6 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -767,7 +766,7 @@ static void ftgmac100_free_buffers(struct ftgmac100 *priv)
continue;
dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
- dev_kfree_skb(skb);
+ kfree_skb(skb);
}
dma_free_coherent(priv->dev, sizeof(struct ftgmac100_descs),
@@ -1149,7 +1148,7 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
netdev_dbg(netdev, "tx packet too big\n");
netdev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1160,7 +1159,7 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
netdev_err(netdev, "map socket buffer failed\n");
netdev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 0120217a16dd..3b8d6d19ff05 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -339,7 +339,8 @@ struct fec_enet_private {
void fec_ptp_init(struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
-int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
+int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
+int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
/****************************************************************************/
#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e7c8b749c5a5..d4782b42401b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -29,7 +29,6 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -428,6 +427,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* If this was the last BD in the ring, start at the beginning again. */
bdp = fec_enet_get_nextdesc(bdp, fep);
+ skb_tx_timestamp(skb);
+
fep->cur_tx = bdp;
if (fep->cur_tx == fep->dirty_tx)
@@ -436,8 +437,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
- skb_tx_timestamp(skb);
-
return NETDEV_TX_OK;
}
@@ -1679,8 +1678,12 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
if (!phydev)
return -ENODEV;
- if (cmd == SIOCSHWTSTAMP && fep->bufdesc_ex)
- return fec_ptp_ioctl(ndev, rq, cmd);
+ if (fep->bufdesc_ex) {
+ if (cmd == SIOCSHWTSTAMP)
+ return fec_ptp_set(ndev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return fec_ptp_get(ndev, rq);
+ }
return phy_mii_ioctl(phydev, rq, cmd);
}
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 5007e4f9fff9..89ccb5b08708 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -274,7 +273,7 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
* @ifreq: ioctl data
* @cmd: particular ioctl requested
*/
-int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -321,6 +320,20 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
-EFAULT : 0;
}
+int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config.rx_filter = (fep->hwts_rx_en ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
/**
* fec_time_keep - call timecounter_read every second to avoid timer overrun
* because ENET just support 32bit counter, will timeout in 4s
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 56f2f608a9f4..62f042d4aaa9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -24,7 +24,6 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index f8b92864fc52..f5383abbf399 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -20,7 +20,6 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index a9a00f39521a..fc5413488496 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -20,7 +20,6 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index d37cd4ebac65..b4bf02f57d43 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -20,7 +20,6 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 67caaacd19ec..3d3fde66c2cc 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index ac5d447ff8c4..7e69c983d12a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -21,7 +21,6 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index c4f65067cf7c..583e71ab7f51 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -20,7 +20,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/mii.h>
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index b14d7904a075..ad5a5aadc7e1 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -70,7 +70,6 @@
#include <linux/unistd.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -795,8 +794,7 @@ err_grp_init:
return err;
}
-static int gfar_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
+static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
{
struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
@@ -845,7 +843,20 @@ static int gfar_hwtstamp_ioctl(struct net_device *netdev,
-EFAULT : 0;
}
-/* Ioctl MII Interface */
+static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ struct gfar_private *priv = netdev_priv(netdev);
+
+ config.flags = 0;
+ config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config.rx_filter = (priv->hwts_rx_en ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -854,7 +865,9 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -EINVAL;
if (cmd == SIOCSHWTSTAMP)
- return gfar_hwtstamp_ioctl(dev, rq, cmd);
+ return gfar_hwtstamp_set(dev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return gfar_hwtstamp_get(dev, rq);
if (!priv->phydev)
return -ENODEV;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 114c58f9d8d2..52bb2b0195cc 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -29,7 +29,6 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index d3d7ede27ef1..63d234419cc1 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -22,7 +22,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -889,11 +888,9 @@ static int gfar_set_hash_opts(struct gfar_private *priv,
static int gfar_check_filer_hardware(struct gfar_private *priv)
{
- struct gfar __iomem *regs = NULL;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 i;
- regs = priv->gfargrp[0].regs;
-
/* Check if we are in FIFO mode */
i = gfar_read(&regs->ecntrl);
i &= ECNTRL_FIFM;
@@ -927,7 +924,7 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
/* Sets the properties for arbitrary filer rule
* to the first 4 Layer 4 Bytes
*/
- regs->rbifx = 0xC0C1C2C3;
+ gfar_write(&regs->rbifx, 0xC0C1C2C3);
return 0;
}
@@ -1055,10 +1052,18 @@ static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
struct ethtool_tcpip4_spec *mask,
struct filer_table *tab)
{
- gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
- gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
- gfar_set_attribute(value->pdst, mask->pdst, RQFCR_PID_DPT, tab);
- gfar_set_attribute(value->psrc, mask->psrc, RQFCR_PID_SPT, tab);
+ gfar_set_attribute(be32_to_cpu(value->ip4src),
+ be32_to_cpu(mask->ip4src),
+ RQFCR_PID_SIA, tab);
+ gfar_set_attribute(be32_to_cpu(value->ip4dst),
+ be32_to_cpu(mask->ip4dst),
+ RQFCR_PID_DIA, tab);
+ gfar_set_attribute(be16_to_cpu(value->pdst),
+ be16_to_cpu(mask->pdst),
+ RQFCR_PID_DPT, tab);
+ gfar_set_attribute(be16_to_cpu(value->psrc),
+ be16_to_cpu(mask->psrc),
+ RQFCR_PID_SPT, tab);
gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
}
@@ -1067,12 +1072,17 @@ static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
struct ethtool_usrip4_spec *mask,
struct filer_table *tab)
{
- gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
- gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
+ gfar_set_attribute(be32_to_cpu(value->ip4src),
+ be32_to_cpu(mask->ip4src),
+ RQFCR_PID_SIA, tab);
+ gfar_set_attribute(be32_to_cpu(value->ip4dst),
+ be32_to_cpu(mask->ip4dst),
+ RQFCR_PID_DIA, tab);
gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
- gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB,
- tab);
+ gfar_set_attribute(be32_to_cpu(value->l4_4_bytes),
+ be32_to_cpu(mask->l4_4_bytes),
+ RQFCR_PID_ARB, tab);
}
@@ -1139,7 +1149,41 @@ static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
}
}
- gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab);
+ gfar_set_attribute(be16_to_cpu(value->h_proto),
+ be16_to_cpu(mask->h_proto),
+ RQFCR_PID_ETY, tab);
+}
+
+static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK;
+}
+
+static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK;
+}
+
+static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK;
+}
+
+static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK;
+}
+
+static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule)
+{
+ return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+}
+
+static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule)
+{
+ return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
}
/* Convert a rule to binary filter format of gianfar */
@@ -1153,22 +1197,21 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
u32 old_index = tab->index;
/* Check if vlan is wanted */
- if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_tci != 0xFFFF)) {
+ if ((rule->flow_type & FLOW_EXT) &&
+ (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) {
if (!rule->m_ext.vlan_tci)
- rule->m_ext.vlan_tci = 0xFFFF;
+ rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF);
vlan = RQFPR_VLN;
vlan_mask = RQFPR_VLN;
/* Separate the fields */
- id = rule->h_ext.vlan_tci & VLAN_VID_MASK;
- id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK;
- cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK;
- cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK;
- prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >>
- VLAN_PRIO_SHIFT;
- prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >>
- VLAN_PRIO_SHIFT;
+ id = vlan_tci_vid(rule);
+ id_mask = vlan_tci_vidm(rule);
+ cfi = vlan_tci_cfi(rule);
+ cfi_mask = vlan_tci_cfim(rule);
+ prio = vlan_tci_prio(rule);
+ prio_mask = vlan_tci_priom(rule);
if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
vlan |= RQFPR_CFI;
@@ -1666,10 +1709,10 @@ static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
for (i = 0; i < sizeof(flow->m_u); i++)
flow->m_u.hdata[i] ^= 0xFF;
- flow->m_ext.vlan_etype ^= 0xFFFF;
- flow->m_ext.vlan_tci ^= 0xFFFF;
- flow->m_ext.data[0] ^= ~0;
- flow->m_ext.data[1] ^= ~0;
+ flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF);
+ flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF);
+ flow->m_ext.data[0] ^= cpu_to_be32(~0);
+ flow->m_ext.data[1] ^= cpu_to_be32(~0);
}
static int gfar_add_cls(struct gfar_private *priv,
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index e006a09ba899..abc28da27042 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -22,7 +22,6 @@
#include <linux/device.h>
#include <linux/hrtimer.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -134,7 +133,7 @@ struct gianfar_ptp_registers {
#define REG_SIZE sizeof(struct gianfar_ptp_registers)
struct etsects {
- struct gianfar_ptp_registers *regs;
+ struct gianfar_ptp_registers __iomem *regs;
spinlock_t lock; /* protects regs */
struct ptp_clock *clock;
struct ptp_clock_info caps;
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
index acb55af7e3f3..e02dd1378751 100644
--- a/drivers/net/ethernet/freescale/gianfar_sysfs.c
+++ b/drivers/net/ethernet/freescale/gianfar_sysfs.c
@@ -24,7 +24,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/spinlock.h>
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 5548b6d00c31..72291a8904a9 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -435,11 +435,6 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
QE_CR_PROTOCOL_ETHERNET, 0);
}
-static inline int compare_addr(u8 **addr1, u8 **addr2)
-{
- return memcmp(addr1, addr2, ETH_ALEN);
-}
-
#ifdef DEBUG
static void get_statistics(struct ucc_geth_private *ugeth,
struct ucc_geth_tx_firmware_statistics *
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index e79aaf9ae52a..413329eff2ff 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -16,7 +16,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/stddef.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index ef46b58cb4e9..7becab1aa3e4 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -35,7 +35,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index f42f1b707733..d787fdd5db7b 100644
--- a/drivers/net/ethernet/i825xx/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -79,7 +79,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index 861fa15e1e81..17fca323c143 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -78,7 +78,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 4ceae9a30274..372fa8d1fda1 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -13,7 +13,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 70074792bdef..67f342a9f65e 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -26,7 +26,6 @@
#define __IBM_NEWEMAC_CORE_H
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 952d795230a4..4be971590461 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) IBM Corporation, 2003, 2010
*
@@ -1276,18 +1275,21 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
{
struct net_device *netdev = dev_get_drvdata(&vdev->dev);
struct ibmveth_adapter *adapter;
+ struct iommu_table *tbl;
unsigned long ret;
int i;
int rxqentries = 1;
+ tbl = get_iommu_table_base(&vdev->dev);
+
/* netdev inits at probe time along with the structures we need below*/
if (netdev == NULL)
- return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
+ return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
adapter = netdev_priv(netdev);
ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
- ret += IOMMU_PAGE_ALIGN(netdev->mtu);
+ ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
/* add the size of the active receive buffers */
@@ -1295,11 +1297,12 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
ret +=
adapter->rx_buff_pool[i].size *
IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
- buff_size);
+ buff_size, tbl);
rxqentries += adapter->rx_buff_pool[i].size;
}
/* add the size of the receive queue entries */
- ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
+ ret += IOMMU_PAGE_ALIGN(
+ rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
return ret;
}
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 84066bafe057..451ba7949e15 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) IBM Corporation, 2003, 2010
*
diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h
index abb300a31912..a21e4f5702b5 100644
--- a/drivers/net/ethernet/icplus/ipg.h
+++ b/drivers/net/ethernet/icplus/ipg.h
@@ -18,7 +18,6 @@
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/skbuff.h>
#include <asm/bitops.h>
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 149ac85b5f9e..bb9f0ba9d164 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -220,12 +220,12 @@ config IXGBE_DCB
If unsure, say N.
config IXGBEVF
- tristate "Intel(R) 82599 Virtual Function Ethernet support"
+ tristate "Intel(R) 10GbE PCI Express Virtual Function Ethernet support"
depends on PCI_MSI
---help---
- This driver supports Intel(R) 82599 virtual functions. For more
- information on how to identify your adapter, go to the Adapter &
- Driver ID Guide at:
+ This driver supports Intel(R) PCI Express virtual functions for the
+ Intel(R) ixgbe driver. For more information on how to identify your
+ adapter, go to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/sb/CS-008441.htm>
@@ -243,6 +243,7 @@ config IXGBEVF
config I40E
tristate "Intel(R) Ethernet Controller XL710 Family support"
+ select PTP_1588_CLOCK
depends on PCI
---help---
This driver supports Intel(R) Ethernet Controller XL710 Family of
@@ -259,4 +260,44 @@ config I40E
To compile this driver as a module, choose M here. The module
will be called i40e.
+config I40E_VXLAN
+ bool "Virtual eXtensible Local Area Network Support"
+ default n
+ depends on I40E && VXLAN && !(I40E=y && VXLAN=m)
+ ---help---
+ This allows one to create VXLAN virtual interfaces that provide
+ Layer 2 Networks over Layer 3 Networks. VXLAN is often used
+ to tunnel virtual network infrastructure in virtualized environments.
+ Say Y here if you want to use Virtual eXtensible Local Area Network
+ (VXLAN) in the driver.
+
+config I40E_DCB
+ bool "Data Center Bridging (DCB) Support"
+ default n
+ depends on I40E && DCB
+ ---help---
+ Say Y here if you want to use Data Center Bridging (DCB) in the
+ driver.
+
+ If unsure, say N.
+
+config I40EVF
+ tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
+ depends on PCI_MSI
+ ---help---
+ This driver supports Intel(R) XL710 and X710 virtual functions.
+ For more information on how to identify your adapter, go to the
+ Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/sb/CS-008441.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ To compile this driver as a module, choose M here. The module
+ will be called i40evf. MSI-X interrupt support is required
+ for this driver to work correctly.
+
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index 5bae933efc7c..cdbbca8a3755 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_IXGBE) += ixgbe/
obj-$(CONFIG_IXGBEVF) += ixgbevf/
obj-$(CONFIG_I40E) += i40e/
obj-$(CONFIG_IXGB) += ixgb/
+obj-$(CONFIG_I40EVF) += i40evf/
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index cbaba4442d4b..bf7a01ef9a57 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -3034,7 +3034,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
*enable_wake = false;
}
- pci_disable_device(pdev);
+ pci_clear_master(pdev);
}
static int __e100_power_off(struct pci_dev *pdev, bool wake)
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index f9313b36c887..10a0f221b183 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -36,7 +36,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <asm/byteorder.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ioport.h>
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 895450e9bb3c..ff2d806eaef7 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -718,8 +718,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
e1000_release_phy_80003es2lan(hw);
/* Disable IBIST slave mode (far-end loopback) */
- e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- &kum_reg_data);
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &kum_reg_data);
+ if (ret_val)
+ return ret_val;
kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
kum_reg_data);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 8d3945ab7334..6d91933c4cdd 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5790,7 +5790,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
* specified. Matching the kind of event packet is not supported, with the
* exception of "all V2 events regardless of level 2 or 4".
**/
-static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct hwtstamp_config config;
@@ -5825,6 +5825,14 @@ static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
sizeof(config)) ? -EFAULT : 0;
}
+static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
+ sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
+}
+
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -5833,7 +5841,9 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCSMIIREG:
return e1000_mii_ioctl(netdev, ifr, cmd);
case SIOCSHWTSTAMP:
- return e1000e_hwtstamp_ioctl(netdev, ifr);
+ return e1000e_hwtstamp_set(netdev, ifr);
+ case SIOCGHWTSTAMP:
+ return e1000e_hwtstamp_get(netdev, ifr);
default:
return -EOPNOTSUPP;
}
@@ -7015,13 +7025,11 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
-#ifdef CONFIG_PM
static const struct dev_pm_ops e1000_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
e1000_idle)
};
-#endif
/* PCI Device API Driver */
static struct pci_driver e1000_driver = {
@@ -7029,11 +7037,9 @@ static struct pci_driver e1000_driver = {
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = e1000_remove,
-#ifdef CONFIG_PM
.driver = {
.pm = &e1000_pm_ops,
},
-#endif
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
};
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index da2be59505c0..20e71f4ca426 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1757,19 +1757,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
* it across the board.
*/
ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
- if (ret_val)
+ if (ret_val) {
/* If the first read fails, another entity may have
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
- udelay(usec_interval);
+ if (usec_interval >= 1000)
+ msleep(usec_interval / 1000);
+ else
+ udelay(usec_interval);
+ }
ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
break;
if (phy_status & BMSR_LSTATUS)
break;
if (usec_interval >= 1000)
- mdelay(usec_interval / 1000);
+ msleep(usec_interval / 1000);
else
udelay(usec_interval);
}
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 479b2c4e552d..d9eb80acac4f 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel Ethernet Controller XL710 Family Linux Driver
-# Copyright(c) 2013 Intel Corporation.
+# Copyright(c) 2013 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
@@ -41,4 +40,7 @@ i40e-objs := i40e_main.o \
i40e_debugfs.o \
i40e_diag.o \
i40e_txrx.o \
+ i40e_ptp.o \
i40e_virtchnl_pf.o
+
+i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 1ca9834cdfda..72dae4d97b43 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -29,7 +28,7 @@
#define _I40E_H_
#include <net/tcp.h>
-#include <linux/init.h>
+#include <net/udp.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/module.h>
@@ -50,11 +49,15 @@
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
#include "i40e_virtchnl.h"
#include "i40e_virtchnl_pf.h"
#include "i40e_txrx.h"
+#include "i40e_dcb.h"
/* Useful i40e defaults */
#define I40E_BASE_PF_SEID 16
@@ -63,7 +66,7 @@
#define I40E_MAX_VEB 16
#define I40E_MAX_NUM_DESCRIPTORS 4096
-#define I40E_MAX_REGISTER 0x0038FFFF
+#define I40E_MAX_REGISTER 0x800000
#define I40E_DEFAULT_NUM_DESCRIPTORS 512
#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
#define I40E_MIN_NUM_DESCRIPTORS 64
@@ -72,6 +75,7 @@
#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
#define I40E_DEFAULT_QUEUES_PER_VF 4
#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
+#define I40E_MAX_QUEUES_PER_TC 64 /* should be a power of 2 */
#define I40E_FDIR_RING 0
#define I40E_FDIR_RING_COUNT 32
#define I40E_MAX_AQ_BUF_SIZE 4096
@@ -81,11 +85,13 @@
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_NVM_VERSION_LO_SHIFT 0
-#define I40E_NVM_VERSION_LO_MASK (0xf << I40E_NVM_VERSION_LO_SHIFT)
-#define I40E_NVM_VERSION_MID_SHIFT 4
-#define I40E_NVM_VERSION_MID_MASK (0xff << I40E_NVM_VERSION_MID_SHIFT)
-#define I40E_NVM_VERSION_HI_SHIFT 12
-#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
+#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
+#define I40E_NVM_VERSION_HI_SHIFT 8
+#define I40E_NVM_VERSION_HI_MASK (0xff << I40E_NVM_VERSION_HI_SHIFT)
+
+/* The values in here are decimal coded as hex as is the case in the NVM map*/
+#define I40E_CURRENT_NVM_VERSION_HI 0x2
+#define I40E_CURRENT_NVM_VERSION_LO 0x30
/* magic for getting defines into strings */
#define STRINGIFY(foo) #foo
@@ -127,7 +133,9 @@ enum i40e_state_t {
__I40E_PF_RESET_REQUESTED,
__I40E_CORE_RESET_REQUESTED,
__I40E_GLOBAL_RESET_REQUESTED,
+ __I40E_EMP_RESET_REQUESTED,
__I40E_FILTER_OVERFLOW_PROMISC,
+ __I40E_SUSPENDED,
};
enum i40e_interrupt_policy {
@@ -157,6 +165,8 @@ struct i40e_fdir_data {
u8 *raw_packet;
};
+#define I40E_ETH_P_LLDP 0x88cc
+
#define I40E_DCB_PRIO_TYPE_STRICT 0
#define I40E_DCB_PRIO_TYPE_ETS 1
#define I40E_DCB_STRICT_PRIO_CREDITS 127
@@ -191,14 +201,20 @@ struct i40e_pf {
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
u16 num_req_vfs; /* num vfs requested for this vf */
u16 num_vf_qps; /* num queue pairs per vf */
- u16 num_tc_qps; /* num queue pairs per TC */
u16 num_lan_qps; /* num lan queues this pf has set up */
u16 num_lan_msix; /* num queue vectors for the base pf vsi */
+ int queues_left; /* queues left unclaimed */
u16 rss_size; /* num queues in the RSS array */
u16 rss_size_max; /* HW defined max RSS queues */
u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
u8 atr_sample_rate;
+ bool wol_en;
+#ifdef CONFIG_I40E_VXLAN
+ __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
+ u16 pending_vxlan_bitmap;
+
+#endif
enum i40e_interrupt_policy int_policy;
u16 rx_itr_default;
u16 tx_itr_default;
@@ -216,24 +232,24 @@ struct i40e_pf {
#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4)
#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5)
#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6)
-#define I40E_FLAG_MQ_ENABLED (u64)(1 << 7)
-#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 8)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 9)
-#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 10)
-#define I40E_FLAG_IN_NETPOLL (u64)(1 << 13)
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 14)
-#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 15)
-#define I40E_FLAG_FILTER_SYNC (u64)(1 << 16)
-#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 18)
-#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 19)
-#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 20)
-#define I40E_FLAG_DCB_ENABLED (u64)(1 << 21)
-#define I40E_FLAG_FDIR_ENABLED (u64)(1 << 22)
-#define I40E_FLAG_FDIR_ATR_ENABLED (u64)(1 << 23)
-#define I40E_FLAG_MFP_ENABLED (u64)(1 << 27)
-
- u16 num_tx_queues;
- u16 num_rx_queues;
+#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8)
+#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9)
+#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13)
+#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14)
+#define I40E_FLAG_FILTER_SYNC (u64)(1 << 15)
+#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17)
+#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18)
+#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19)
+#define I40E_FLAG_DCB_ENABLED (u64)(1 << 20)
+#define I40E_FLAG_FD_SB_ENABLED (u64)(1 << 21)
+#define I40E_FLAG_FD_ATR_ENABLED (u64)(1 << 22)
+#define I40E_FLAG_PTP (u64)(1 << 25)
+#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26)
+#ifdef CONFIG_I40E_VXLAN
+#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
+#endif
bool stat_offsets_loaded;
struct i40e_hw_port_stats stats;
@@ -247,6 +263,7 @@ struct i40e_pf {
u16 globr_count; /* Global reset count */
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
+ u16 sw_int_count; /* SW interrupt count */
struct mutex switch_mutex;
u16 lan_vsi; /* our default LAN VSI */
@@ -270,6 +287,8 @@ struct i40e_pf {
struct dentry *i40e_dbg_pf;
#endif /* CONFIG_DEBUG_FS */
+ u16 instance; /* A unique number per i40e_pf instance in the system */
+
/* sr-iov config info */
struct i40e_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
@@ -287,6 +306,20 @@ struct i40e_pf {
u32 fcoe_hmc_filt_num;
u32 fcoe_hmc_cntx_num;
struct i40e_filter_control_settings filter_settings;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct sk_buff *ptp_tx_skb;
+ struct work_struct ptp_tx_work;
+ struct hwtstamp_config tstamp_config;
+ unsigned long ptp_tx_start;
+ unsigned long last_rx_ptp_check;
+ spinlock_t tmreg_lock; /* Used to protect the device time registers. */
+ u64 ptp_base_adj;
+ u32 tx_hwtstamp_timeouts;
+ u32 rx_hwtstamp_cleared;
+ bool ptp_tx;
+ bool ptp_rx;
};
struct i40e_mac_filter {
@@ -441,13 +474,11 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw)
static char buf[32];
snprintf(buf, sizeof(buf),
- "f%d.%d a%d.%d n%02d.%02d.%02d e%08x",
+ "f%d.%d a%d.%d n%02x.%02x e%08x",
hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
hw->aq.api_maj_ver, hw->aq.api_min_ver,
(hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
>> I40E_NVM_VERSION_HI_SHIFT,
- (hw->nvm.version & I40E_NVM_VERSION_MID_MASK)
- >> I40E_NVM_VERSION_MID_SHIFT,
(hw->nvm.version & I40E_NVM_VERSION_LO_MASK)
>> I40E_NVM_VERSION_LO_SHIFT,
hw->nvm.eetrack);
@@ -495,6 +526,7 @@ int i40e_up(struct i40e_vsi *vsi);
void i40e_down(struct i40e_vsi *vsi);
extern const char i40e_driver_name[];
extern const char i40e_driver_version_str[];
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
void i40e_update_stats(struct i40e_vsi *vsi);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
@@ -502,13 +534,6 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
bool printconfig);
-/* needed by i40e_main.c */
-void i40e_add_fdir_filter(struct i40e_fdir_data fdir_data,
- struct i40e_ring *tx_ring);
-void i40e_add_remove_filter(struct i40e_fdir_data fdir_data,
- struct i40e_ring *tx_ring);
-void i40e_update_fdir_filter(struct i40e_fdir_data fdir_data,
- struct i40e_ring *tx_ring);
int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
struct i40e_pf *pf, bool add);
@@ -524,10 +549,13 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
struct i40e_vsi *start_vsi);
+int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc);
void i40e_veb_release(struct i40e_veb *veb);
+int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc);
i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
@@ -544,6 +572,7 @@ static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/
void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
@@ -555,5 +584,21 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
-
+#ifdef CONFIG_I40E_DCB
+void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+ struct i40e_dcbx_config *new_cfg);
+void i40e_dcbnl_set_all(struct i40e_vsi *vsi);
+void i40e_dcbnl_setup(struct i40e_vsi *vsi);
+bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
+ struct i40e_dcbx_config *old_cfg,
+ struct i40e_dcbx_config *new_cfg);
+#endif /* CONFIG_I40E_DCB */
+void i40e_ptp_rx_hang(struct i40e_vsi *vsi);
+void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf);
+void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
+void i40e_ptp_set_increment(struct i40e_pf *pf);
+int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+void i40e_ptp_init(struct i40e_pf *pf);
+void i40e_ptp_stop(struct i40e_pf *pf);
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index cfef7fc32cdd..a50e6b3479ae 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -31,6 +30,8 @@
#include "i40e_adminq.h"
#include "i40e_prototype.h"
+static void i40e_resume_aq(struct i40e_hw *hw);
+
/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
@@ -43,13 +44,17 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
if (hw->mac.type == I40E_MAC_VF) {
hw->aq.asq.tail = I40E_VF_ATQT1;
hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.asq.len = I40E_VF_ATQLEN1;
hw->aq.arq.tail = I40E_VF_ARQT1;
hw->aq.arq.head = I40E_VF_ARQH1;
+ hw->aq.arq.len = I40E_VF_ARQLEN1;
} else {
hw->aq.asq.tail = I40E_PF_ATQT;
hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.asq.len = I40E_PF_ATQLEN;
hw->aq.arq.tail = I40E_PF_ARQT;
hw->aq.arq.head = I40E_PF_ARQH;
+ hw->aq.arq.len = I40E_PF_ARQLEN;
}
}
@@ -60,9 +65,8 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{
i40e_status ret_code;
- struct i40e_virt_mem mem;
- ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
i40e_mem_atq_ring,
(hw->aq.num_asq_entries *
sizeof(struct i40e_aq_desc)),
@@ -70,21 +74,14 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
if (ret_code)
return ret_code;
- hw->aq.asq.desc = hw->aq.asq_mem.va;
- hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
-
- ret_code = i40e_allocate_virt_mem(hw, &mem,
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
(hw->aq.num_asq_entries *
sizeof(struct i40e_asq_cmd_details)));
if (ret_code) {
- i40e_free_dma_mem(hw, &hw->aq.asq_mem);
- hw->aq.asq_mem.va = NULL;
- hw->aq.asq_mem.pa = 0;
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
return ret_code;
}
- hw->aq.asq.details = mem.va;
-
return ret_code;
}
@@ -96,16 +93,11 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
{
i40e_status ret_code;
- ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
i40e_mem_arq_ring,
(hw->aq.num_arq_entries *
sizeof(struct i40e_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
- if (ret_code)
- return ret_code;
-
- hw->aq.arq.desc = hw->aq.arq_mem.va;
- hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
return ret_code;
}
@@ -119,14 +111,7 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
**/
static void i40e_free_adminq_asq(struct i40e_hw *hw)
{
- struct i40e_virt_mem mem;
-
- i40e_free_dma_mem(hw, &hw->aq.asq_mem);
- hw->aq.asq_mem.va = NULL;
- hw->aq.asq_mem.pa = 0;
- mem.va = hw->aq.asq.details;
- i40e_free_virt_mem(hw, &mem);
- hw->aq.asq.details = NULL;
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}
/**
@@ -138,20 +123,17 @@ static void i40e_free_adminq_asq(struct i40e_hw *hw)
**/
static void i40e_free_adminq_arq(struct i40e_hw *hw)
{
- i40e_free_dma_mem(hw, &hw->aq.arq_mem);
- hw->aq.arq_mem.va = NULL;
- hw->aq.arq_mem.pa = 0;
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
}
/**
* i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
i40e_status ret_code;
struct i40e_aq_desc *desc;
- struct i40e_virt_mem mem;
struct i40e_dma_mem *bi;
int i;
@@ -160,11 +142,11 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
*/
/* buffer_info structures do not need alignment */
- ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
- sizeof(struct i40e_dma_mem)));
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
if (ret_code)
goto alloc_arq_bufs;
- hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_arq_entries; i++) {
@@ -206,29 +188,27 @@ unwind_alloc_arq_bufs:
i--;
for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
- mem.va = hw->aq.arq.r.arq_bi;
- i40e_free_virt_mem(hw, &mem);
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
return ret_code;
}
/**
* i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
{
i40e_status ret_code;
- struct i40e_virt_mem mem;
struct i40e_dma_mem *bi;
int i;
/* No mapped memory needed yet, just the buffer info structures */
- ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
- sizeof(struct i40e_dma_mem)));
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
if (ret_code)
goto alloc_asq_bufs;
- hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_asq_entries; i++) {
@@ -248,35 +228,36 @@ unwind_alloc_asq_bufs:
i--;
for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
- mem.va = hw->aq.asq.r.asq_bi;
- i40e_free_virt_mem(hw, &mem);
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
return ret_code;
}
/**
* i40e_free_arq_bufs - Free receive queue buffer info elements
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
static void i40e_free_arq_bufs(struct i40e_hw *hw)
{
- struct i40e_virt_mem mem;
int i;
+ /* free descriptors */
for (i = 0; i < hw->aq.num_arq_entries; i++)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
- mem.va = hw->aq.arq.r.arq_bi;
- i40e_free_virt_mem(hw, &mem);
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
}
/**
* i40e_free_asq_bufs - Free send queue buffer info elements
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
static void i40e_free_asq_bufs(struct i40e_hw *hw)
{
- struct i40e_virt_mem mem;
int i;
/* only unmap if the address is non-NULL */
@@ -284,14 +265,19 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
if (hw->aq.asq.r.asq_bi[i].pa)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
- /* now free the buffer info list */
- mem.va = hw->aq.asq.r.asq_bi;
- i40e_free_virt_mem(hw, &mem);
+ /* free the buffer info list */
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
}
/**
* i40e_config_asq_regs - configure ASQ registers
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* Configure base address and length registers for the transmit queue
**/
@@ -299,14 +285,18 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
{
if (hw->mac.type == I40E_MAC_VF) {
/* configure the transmit queue */
- wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
- wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_VF_ATQBAH1,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_VF_ATQBAL1,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
I40E_VF_ATQLEN1_ATQENABLE_MASK));
} else {
/* configure the transmit queue */
- wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
- wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_PF_ATQBAH,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQBAL,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
}
@@ -314,7 +304,7 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
/**
* i40e_config_arq_regs - ARQ register configuration
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* Configure base address and length registers for the receive (event queue)
**/
@@ -322,14 +312,18 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
{
if (hw->mac.type == I40E_MAC_VF) {
/* configure the receive queue */
- wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
- wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_VF_ARQBAH1,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_VF_ARQBAL1,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
I40E_VF_ARQLEN1_ARQENABLE_MASK));
} else {
/* configure the receive queue */
- wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
- wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_PF_ARQBAH,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQBAL,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
}
@@ -340,7 +334,7 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
/**
* i40e_init_asq - main initialization routine for ASQ
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* This is the main initialization routine for the Admin Send Queue
* Prior to calling this function, drivers *MUST* set the following fields
@@ -397,7 +391,7 @@ init_adminq_exit:
/**
* i40e_init_arq - initialize ARQ
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* The main initialization routine for the Admin Receive (Event) Queue.
* Prior to calling this function, drivers *MUST* set the following fields
@@ -454,7 +448,7 @@ init_adminq_exit:
/**
* i40e_shutdown_asq - shutdown the ASQ
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Send Queue
**/
@@ -466,10 +460,9 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
return I40E_ERR_NOT_READY;
/* Stop firmware AdminQ processing */
- if (hw->mac.type == I40E_MAC_VF)
- wr32(hw, I40E_VF_ATQLEN1, 0);
- else
- wr32(hw, I40E_PF_ATQLEN, 0);
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
/* make sure lock is available */
mutex_lock(&hw->aq.asq_mutex);
@@ -478,8 +471,6 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
/* free ring buffers */
i40e_free_asq_bufs(hw);
- /* free the ring descriptors */
- i40e_free_adminq_asq(hw);
mutex_unlock(&hw->aq.asq_mutex);
@@ -488,7 +479,7 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
/**
* i40e_shutdown_arq - shutdown ARQ
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Receive Queue
**/
@@ -500,10 +491,9 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
return I40E_ERR_NOT_READY;
/* Stop firmware AdminQ processing */
- if (hw->mac.type == I40E_MAC_VF)
- wr32(hw, I40E_VF_ARQLEN1, 0);
- else
- wr32(hw, I40E_PF_ARQLEN, 0);
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
/* make sure lock is available */
mutex_lock(&hw->aq.arq_mutex);
@@ -512,8 +502,6 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
/* free ring buffers */
i40e_free_arq_bufs(hw);
- /* free the ring descriptors */
- i40e_free_adminq_arq(hw);
mutex_unlock(&hw->aq.arq_mutex);
@@ -522,7 +510,7 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
/**
* i40e_init_adminq - main initialization routine for Admin Queue
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* Prior to calling this function, drivers *MUST* set the following fields
* in the hw->aq structure:
@@ -533,8 +521,9 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
**/
i40e_status i40e_init_adminq(struct i40e_hw *hw)
{
- u16 eetrack_lo, eetrack_hi;
i40e_status ret_code;
+ u16 eetrack_lo, eetrack_hi;
+ int retry = 0;
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
@@ -562,23 +551,41 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
if (ret_code)
goto init_adminq_free_asq;
- ret_code = i40e_aq_get_firmware_version(hw,
- &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver,
- &hw->aq.api_maj_ver, &hw->aq.api_min_ver,
- NULL);
- if (ret_code)
+ /* There are some cases where the firmware may not be quite ready
+ * for AdminQ operations, so we retry the AdminQ setup a few times
+ * if we see timeouts in this first AQ call.
+ */
+ do {
+ ret_code = i40e_aq_get_firmware_version(hw,
+ &hw->aq.fw_maj_ver,
+ &hw->aq.fw_min_ver,
+ &hw->aq.api_maj_ver,
+ &hw->aq.api_min_ver,
+ NULL);
+ if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ break;
+ retry++;
+ msleep(100);
+ i40e_resume_aq(hw);
+ } while (retry < 10);
+ if (ret_code != I40E_SUCCESS)
goto init_adminq_free_arq;
- if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
- hw->aq.api_min_ver != I40E_FW_API_VERSION_MINOR) {
- ret_code = I40E_ERR_FIRMWARE_API_VERSION;
- goto init_adminq_free_arq;
- }
+ /* get the NVM version info */
i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+ if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
+ hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) {
+ ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+ goto init_adminq_free_arq;
+ }
+
+ /* pre-emptive resource lock release */
+ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+
ret_code = i40e_aq_set_hmc_resource_profile(hw,
I40E_HMC_PROFILE_DEFAULT,
0,
@@ -600,12 +607,15 @@ init_adminq_exit:
/**
* i40e_shutdown_adminq - shutdown routine for the Admin Queue
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_queue_shutdown(hw, true);
+
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);
@@ -616,7 +626,7 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
/**
* i40e_clean_asq - cleans Admin send queue
- * @asq: pointer to the adminq send ring
+ * @hw: pointer to the hardware structure
*
* returns the number of free desc
**/
@@ -659,12 +669,12 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
* Returns true if the firmware has processed all descriptors on the
* admin send queue. Returns false if there are still requests pending.
**/
-bool i40e_asq_done(struct i40e_hw *hw)
+static bool i40e_asq_done(struct i40e_hw *hw)
{
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
- return (rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use);
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
}
@@ -674,7 +684,7 @@ bool i40e_asq_done(struct i40e_hw *hw)
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buff: buffer to use for indirect commands
* @buff_size: size of buffer for indirect commands
- * @opaque: pointer to info to be used in async cleanup
+ * @cmd_details: pointer to command details structure
*
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
@@ -854,7 +864,7 @@ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
/* zero out the desc */
memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_EI | I40E_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
}
/**
@@ -912,7 +922,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
"AQRX: Event received with error 0x%X.\n",
hw->aq.arq_last_status);
} else {
- memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc));
+ e->desc = *desc;
datalen = le16_to_cpu(desc->datalen);
e->msg_size = min(datalen, e->msg_size);
if (e->msg_buf != NULL && (e->msg_size != 0))
@@ -925,6 +935,11 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
desc->datalen = cpu_to_le16((u16)bi->size);
desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
@@ -947,36 +962,16 @@ clean_arq_element_out:
return ret_code;
}
-void i40e_resume_aq(struct i40e_hw *hw)
+static void i40e_resume_aq(struct i40e_hw *hw)
{
- u32 reg = 0;
-
/* Registers are reset after PF reset */
hw->aq.asq.next_to_use = 0;
hw->aq.asq.next_to_clean = 0;
i40e_config_asq_regs(hw);
- reg = hw->aq.num_asq_entries;
-
- if (hw->mac.type == I40E_MAC_VF) {
- reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_VF_ATQLEN1, reg);
- } else {
- reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_PF_ATQLEN, reg);
- }
hw->aq.arq.next_to_use = 0;
hw->aq.arq.next_to_clean = 0;
i40e_config_arq_regs(hw);
- reg = hw->aq.num_arq_entries;
-
- if (hw->mac.type == I40E_MAC_VF) {
- reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_VF_ARQLEN1, reg);
- } else {
- reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_PF_ARQLEN, reg);
- }
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 22e5ed683e47..993f7685a911 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -32,20 +31,20 @@
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
- (&(((struct i40e_aq_desc *)((R).desc))[i]))
+ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
#define I40E_ADMINQ_DESC_ALIGNMENT 4096
struct i40e_adminq_ring {
- void *desc; /* Descriptor ring memory */
- void *details; /* ASQ details */
+ struct i40e_virt_mem dma_head; /* space for dma structures */
+ struct i40e_dma_mem desc_buf; /* descriptor ring memory */
+ struct i40e_virt_mem cmd_buf; /* command buffer memory */
union {
struct i40e_dma_mem *asq_bi;
struct i40e_dma_mem *arq_bi;
} r;
- u64 dma_addr; /* Physical address of the ring */
u16 count; /* Number of descriptors */
u16 rx_buf_len; /* Admin Receive Queue buffer length */
@@ -56,6 +55,7 @@ struct i40e_adminq_ring {
/* used for queue tracking */
u32 head;
u32 tail;
+ u32 len;
};
/* ASQ transaction details */
@@ -69,7 +69,7 @@ struct i40e_asq_cmd_details {
};
#define I40E_ADMINQ_DETAILS(R, i) \
- (&(((struct i40e_asq_cmd_details *)((R).details))[i]))
+ (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
/* ARQ event information */
struct i40e_arq_event_info {
@@ -94,9 +94,6 @@ struct i40e_adminq_info {
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
- struct i40e_dma_mem asq_mem; /* send queue dynamic memory */
- struct i40e_dma_mem arq_mem; /* receive queue dynamic memory */
-
/* last status values on send and receive queues */
enum i40e_admin_queue_err asq_last_status;
enum i40e_admin_queue_err arq_last_status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index e61ebdd5a5f9..7b6374a8f8da 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -35,7 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0000
+#define I40E_FW_API_VERSION_MINOR 0x0001
struct i40e_aq_desc {
__le16 flags;
@@ -137,10 +136,13 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
/* LAA */
- i40e_aqc_opc_mng_laa = 0x0106,
+ i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
+ /* PXE */
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
+
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@@ -317,13 +319,15 @@ struct i40e_aqc_get_version {
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
-/* Send driver version (direct 0x0002) */
+/* Send driver version (indirect 0x0002) */
struct i40e_aqc_driver_version {
u8 driver_major_ver;
u8 driver_minor_ver;
u8 driver_build_ver;
u8 driver_subbuild_ver;
- u8 reserved[12];
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
@@ -479,7 +483,7 @@ struct i40e_aqc_mng_laa {
u8 reserved2[6];
};
-/* Manage MAC Address Read Command (0x0107) */
+/* Manage MAC Address Read Command (indirect 0x0107) */
struct i40e_aqc_mac_address_read {
__le16 command_flags;
#define I40E_AQC_LAN_ADDR_VALID 0x10
@@ -517,6 +521,16 @@ struct i40e_aqc_mac_address_write {
I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+ u8 rx_cnt;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -639,13 +653,15 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
u8 reserved2[6];
};
-/* Add VSI (indirect 0x210)
+/* Add VSI (indirect 0x0210)
* this indirect command uses struct i40e_aqc_vsi_properties_data
* as the indirect buffer (128 bytes)
*
- * Update VSI (indirect 0x211) Get VSI (indirect 0x0212)
- * use the generic i40e_aqc_switch_seid descriptor format
- * use the same completion and data structure as Add VSI
+ * Update VSI (indirect 0x211)
+ * uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ * uses the same completion and data structure as Add VSI
*/
struct i40e_aqc_add_get_update_vsi {
__le16 uplink_seid;
@@ -664,7 +680,6 @@ struct i40e_aqc_add_get_update_vsi {
#define I40E_AQ_VSI_TYPE_PF 0x2
#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
-#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
__le32 addr_high;
__le32 addr_low;
};
@@ -1026,7 +1041,9 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
- u8 reserved[10];
+ __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1179,33 +1196,46 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
} v4;
struct {
u8 data[16];
- } v6;
- } ipaddr;
+ } v6;
+ } ipaddr;
__le16 flags;
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+/* 0x0000 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002
+/* 0x0002 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+/* 0x0005 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007
+/* 0x0007 reserved */
/* 0x0008 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+
#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
- __le32 key_low;
- __le32 key_high;
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+
+ __le32 tenant_id;
+ u8 reserved[4];
__le16 queue_number;
#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
- u8 reserved[14];
+ u8 reserved2[14];
/* response section */
u8 allocation_result;
#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
@@ -1548,7 +1578,7 @@ struct i40e_aqc_module_desc {
struct i40e_aq_get_phy_abilities_resp {
__le32 phy_type; /* bitmap using the above enum for offsets */
- u8 link_speed; /* bitmap using the above enum */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
u8 abilities;
#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
@@ -1582,6 +1612,10 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le32 phy_type;
u8 link_speed;
u8 abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK 0x08
+#define I40E_AQ_PHY_ENABLE_AN 0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
__le16 eee_capability;
__le32 eeer;
u8 low_power_ctrl;
@@ -1914,22 +1948,33 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
__le16 udp_port;
- u8 header_len; /* in DWords, 1 to 15 */
- u8 protocol_index;
-#define I40E_AQC_TUNNEL_TYPE_MAC 0x0
-#define I40E_AQC_TUNNEL_TYPE_UDP 0x1
- u8 reserved[12];
+ u8 reserved0[3];
+ u8 protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
+#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
+ u8 reserved1[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+struct i40e_aqc_add_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
/* remove UDP Tunnel command (0x0B01) */
struct i40e_aqc_remove_udp_tunnel {
u8 reserved[2];
u8 index; /* 0 to 15 */
- u8 pf_filters;
- u8 total_filters;
- u8 reserved2[11];
+ u8 reserved2[13];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
@@ -1937,28 +1982,32 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
struct i40e_aqc_del_udp_tunnel_completion {
__le16 udp_port;
u8 index; /* 0 to 15 */
- u8 multiple_entries;
- u8 tunnels_used;
- u8 reserved;
- u8 tunnels_free;
- u8 reserved1[9];
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved1[11];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
/* tunnel key structure 0x0B10 */
+
struct i40e_aqc_tunnel_key_structure {
- __le16 key1_off;
- __le16 key1_len;
- __le16 key2_off;
- __le16 key2_len;
- __le16 flags;
+ u8 key1_off;
+ u8 key2_off;
+ u8 key1_len; /* 0 to 15 */
+ u8 key2_len; /* 0 to 15 */
+ u8 flags;
#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
/* response flags */
#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
- u8 resreved[6];
+ u8 network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
@@ -2052,6 +2101,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
#define I40E_AQ_CLUSTER_ID_DCB 8
#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
+#define I40E_AQ_CLUSTER_ID_ALTRAM 11
struct i40e_aqc_debug_dump_internals {
u8 cluster_id;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
index 3b1cc214f9dc..926811ad44ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 1e4ea134975a..e7f38b57834d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -43,20 +42,20 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
- case I40E_SFP_XL710_DEVICE_ID:
- case I40E_SFP_X710_DEVICE_ID:
- case I40E_QEMU_DEVICE_ID:
- case I40E_KX_A_DEVICE_ID:
- case I40E_KX_B_DEVICE_ID:
- case I40E_KX_C_DEVICE_ID:
- case I40E_KX_D_DEVICE_ID:
- case I40E_QSFP_A_DEVICE_ID:
- case I40E_QSFP_B_DEVICE_ID:
- case I40E_QSFP_C_DEVICE_ID:
+ case I40E_DEV_ID_SFP_XL710:
+ case I40E_DEV_ID_SFP_X710:
+ case I40E_DEV_ID_QEMU:
+ case I40E_DEV_ID_KX_A:
+ case I40E_DEV_ID_KX_B:
+ case I40E_DEV_ID_KX_C:
+ case I40E_DEV_ID_KX_D:
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ case I40E_DEV_ID_QSFP_C:
hw->mac.type = I40E_MAC_XL710;
break;
- case I40E_VF_DEVICE_ID:
- case I40E_VF_HV_DEVICE_ID:
+ case I40E_DEV_ID_VF:
+ case I40E_DEV_ID_VF_HV:
hw->mac.type = I40E_MAC_VF;
break;
default:
@@ -75,7 +74,8 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
- * @cap: pointer to adminq command descriptor
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
* @buffer: pointer to command buffer
*
* Dumps debug log about adminq command with descriptor contents.
@@ -126,6 +126,43 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
}
/**
+ * i40e_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool i40e_check_asq_alive(struct i40e_hw *hw)
+{
+ return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+}
+
+/**
+ * i40e_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
* i40e_init_shared_code - Initialize the shared code
* @hw: pointer to hardware structure
*
@@ -142,14 +179,6 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
i40e_status status = 0;
u32 reg;
- hw->phy.get_link_info = true;
-
- /* Determine port number */
- reg = rd32(hw, I40E_PFGEN_PORTNUM);
- reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
- I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
- hw->port = (u8)reg;
-
i40e_set_mac_type(hw);
switch (hw->mac.type) {
@@ -160,6 +189,21 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
break;
}
+ hw->phy.get_link_info = true;
+
+ /* Determine port number */
+ reg = rd32(hw, I40E_PFGEN_PORTNUM);
+ reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
+ I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
+ hw->port = (u8)reg;
+
+ /* Determine the PF number based on the PCI fn */
+ reg = rd32(hw, I40E_GLPCI_CAPSUP);
+ if (reg & I40E_GLPCI_CAPSUP_ARI_EN_MASK)
+ hw->pf_id = (u8)((hw->bus.device << 3) | hw->bus.func);
+ else
+ hw->pf_id = (u8)hw->bus.func;
+
status = i40e_init_nvm(hw);
return status;
}
@@ -210,8 +254,11 @@ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_mac_address_write);
cmd_data->command_flags = cpu_to_le16(flags);
- memcpy(&cmd_data->mac_sal, &mac_addr[0], 4);
- memcpy(&cmd_data->mac_sah, &mac_addr[4], 2);
+ cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
+ cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
+ ((u32)mac_addr[3] << 16) |
+ ((u32)mac_addr[4] << 8) |
+ mac_addr[5]);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -240,32 +287,53 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
}
/**
- * i40e_validate_mac_addr - Validate MAC address
- * @mac_addr: pointer to MAC address
- *
- * Tests a MAC address to ensure it is a valid Individual Address
+ * i40e_get_media_type - Gets media type
+ * @hw: pointer to the hardware structure
**/
-i40e_status i40e_validate_mac_addr(u8 *mac_addr)
+static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
{
- i40e_status status = 0;
-
- /* Make sure it is not a multicast address */
- if (I40E_IS_MULTICAST(mac_addr)) {
- hw_dbg(hw, "MAC address is multicast\n");
- status = I40E_ERR_INVALID_MAC_ADDR;
- /* Not a broadcast address */
- } else if (I40E_IS_BROADCAST(mac_addr)) {
- hw_dbg(hw, "MAC address is broadcast\n");
- status = I40E_ERR_INVALID_MAC_ADDR;
- /* Reject the zero address */
- } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
- mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
- hw_dbg(hw, "MAC address is all zeros\n");
- status = I40E_ERR_INVALID_MAC_ADDR;
+ enum i40e_media_type media;
+
+ switch (hw->phy.link_info.phy_type) {
+ case I40E_PHY_TYPE_10GBASE_SR:
+ case I40E_PHY_TYPE_10GBASE_LR:
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ media = I40E_MEDIA_TYPE_FIBER;
+ break;
+ case I40E_PHY_TYPE_100BASE_TX:
+ case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_10GBASE_T:
+ media = I40E_MEDIA_TYPE_BASET;
+ break;
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ media = I40E_MEDIA_TYPE_DA;
+ break;
+ case I40E_PHY_TYPE_1000BASE_KX:
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ case I40E_PHY_TYPE_10GBASE_KR:
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ media = I40E_MEDIA_TYPE_BACKPLANE;
+ break;
+ case I40E_PHY_TYPE_SGMII:
+ case I40E_PHY_TYPE_XAUI:
+ case I40E_PHY_TYPE_XFI:
+ case I40E_PHY_TYPE_XLAUI:
+ case I40E_PHY_TYPE_XLPPI:
+ default:
+ media = I40E_MEDIA_TYPE_UNKNOWN;
+ break;
}
- return status;
+
+ return media;
}
+#define I40E_PF_RESET_WAIT_COUNT_A0 200
+#define I40E_PF_RESET_WAIT_COUNT 10
/**
* i40e_pf_reset - Reset the PF
* @hw: pointer to the hardware structure
@@ -275,7 +343,8 @@ i40e_status i40e_validate_mac_addr(u8 *mac_addr)
**/
i40e_status i40e_pf_reset(struct i40e_hw *hw)
{
- u32 wait_cnt = 0;
+ u32 cnt = 0;
+ u32 cnt1 = 0;
u32 reg = 0;
u32 grst_del;
@@ -285,7 +354,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
*/
grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
>> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- for (wait_cnt = 0; wait_cnt < grst_del + 2; wait_cnt++) {
+ for (cnt = 0; cnt < grst_del + 2; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
break;
@@ -296,17 +365,37 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
return I40E_ERR_RESET_FAILED;
}
- /* Determine the PF number based on the PCI fn */
- hw->pf_id = (u8)hw->bus.func;
+ /* Now Wait for the FW to be ready */
+ for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
+ reg = rd32(hw, I40E_GLNVM_ULD);
+ reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
+ if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
+ hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
+ break;
+ }
+ usleep_range(10000, 20000);
+ }
+ if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
+ hw_dbg(hw, "wait for FW Reset complete timedout\n");
+ hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
+ return I40E_ERR_RESET_FAILED;
+ }
/* If there was a Global Reset in progress when we got here,
* we don't need to do the PF Reset
*/
- if (!wait_cnt) {
+ if (!cnt) {
+ if (hw->revision_id == 0)
+ cnt = I40E_PF_RESET_WAIT_COUNT_A0;
+ else
+ cnt = I40E_PF_RESET_WAIT_COUNT;
reg = rd32(hw, I40E_PFGEN_CTRL);
wr32(hw, I40E_PFGEN_CTRL,
(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
- for (wait_cnt = 0; wait_cnt < 10; wait_cnt++) {
+ for (; cnt; cnt--) {
reg = rd32(hw, I40E_PFGEN_CTRL);
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
@@ -319,6 +408,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
}
i40e_clear_pxe_mode(hw);
+
return 0;
}
@@ -335,9 +425,47 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
/* Clear single descriptor fetch/write-back mode */
reg = rd32(hw, I40E_GLLAN_RCTL_0);
- wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
+
+ if (hw->revision_id == 0) {
+ /* As a work around clear PXE_MODE instead of setting it */
+ wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
+ } else {
+ wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
+ }
+}
+
+/**
+ * i40e_led_is_mine - helper to find matching led
+ * @hw: pointer to the hw struct
+ * @idx: index into GPIO registers
+ *
+ * returns: 0 if no match, otherwise the value of the GPIO_CTL register
+ */
+static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
+{
+ u32 gpio_val = 0;
+ u32 port;
+
+ if (!hw->func_caps.led[idx])
+ return 0;
+
+ gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
+ port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
+ I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+ /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
+ * if it is not our port then ignore
+ */
+ if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
+ (port != hw->port))
+ return 0;
+
+ return gpio_val;
}
+#define I40E_LED0 22
+#define I40E_LINK_ACTIVITY 0xC
+
/**
* i40e_led_get - return current on/off mode
* @hw: pointer to the hw struct
@@ -349,24 +477,20 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
**/
u32 i40e_led_get(struct i40e_hw *hw)
{
- u32 gpio_val = 0;
u32 mode = 0;
- u32 port;
int i;
- for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
- if (!hw->func_caps.led[i])
- continue;
-
- gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
- port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
- >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
- if (port != hw->port)
+ if (!gpio_val)
continue;
- mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
- >> I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT;
+ mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
break;
}
@@ -376,60 +500,45 @@ u32 i40e_led_get(struct i40e_hw *hw)
/**
* i40e_led_set - set new on/off mode
* @hw: pointer to the hw struct
- * @mode: 0=off, else on (see EAS for mode details)
+ * @mode: 0=off, 0xf=on (else see manual for mode details)
+ * @blink: true if the LED should blink when on, false if steady
+ *
+ * if this function is used to turn on the blink it should
+ * be used to disable the blink when restoring the original state.
**/
-void i40e_led_set(struct i40e_hw *hw, u32 mode)
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
{
- u32 gpio_val = 0;
- u32 led_mode = 0;
- u32 port;
int i;
- for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
- if (!hw->func_caps.led[i])
- continue;
+ if (mode & 0xfffffff0)
+ hw_dbg(hw, "invalid mode passed in %X\n", mode);
- gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
- port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
- >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
- if (port != hw->port)
+ if (!gpio_val)
continue;
- led_mode = (mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
- I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
- gpio_val |= led_mode;
+ /* this & is a bit of paranoia, but serves as a range check */
+ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
+ I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
+
+ if (mode == I40E_LINK_ACTIVITY)
+ blink = false;
+
+ gpio_val |= (blink ? 1 : 0) <<
+ I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT;
+
wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
+ break;
}
}
/* Admin command wrappers */
-/**
- * i40e_aq_queue_shutdown
- * @hw: pointer to the hw struct
- * @unloading: is the driver unloading itself
- *
- * Tell the Firmware that we're shutting down the AdminQ and whether
- * or not the driver is unloading as well.
- **/
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
- bool unloading)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_queue_shutdown *cmd =
- (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_queue_shutdown);
-
- if (unloading)
- cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
-
- return status;
-}
/**
* i40e_aq_set_link_restart_an
@@ -490,15 +599,16 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
goto aq_get_link_info_exit;
/* save off old link status information */
- memcpy(&hw->phy.link_info_old, hw_link_info,
- sizeof(struct i40e_link_status));
+ hw->phy.link_info_old = *hw_link_info;
/* update link status */
hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
+ hw->phy.media_type = i40e_get_media_type(hw);
hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
hw_link_info->link_info = resp->link_info;
hw_link_info->an_info = resp->an_info;
hw_link_info->ext_info = resp->ext_info;
+ hw_link_info->loopback = resp->loopback;
if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
hw_link_info->lse_enable = true;
@@ -519,7 +629,7 @@ aq_get_link_info_exit:
/**
* i40e_aq_add_vsi
* @hw: pointer to the hw struct
- * @vsi: pointer to a vsi context struct
+ * @vsi_ctx: pointer to a vsi context struct
* @cmd_details: pointer to command details structure or NULL
*
* Add a VSI context to the hardware.
@@ -571,7 +681,8 @@ aq_add_vsi_exit:
* @cmd_details: pointer to command details structure or NULL
**/
i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@@ -665,7 +776,7 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
/**
* i40e_get_vsi_params - get VSI configuration info
* @hw: pointer to the hw struct
- * @vsi: pointer to a vsi context struct
+ * @vsi_ctx: pointer to a vsi context struct
* @cmd_details: pointer to command details structure or NULL
**/
i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
@@ -673,8 +784,8 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- struct i40e_aqc_switch_seid *cmd =
- (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
struct i40e_aqc_add_get_update_vsi_completion *resp =
(struct i40e_aqc_add_get_update_vsi_completion *)
&desc.params.raw;
@@ -683,7 +794,7 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_vsi_parameters);
- cmd->seid = cpu_to_le16(vsi_ctx->seid);
+ cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
@@ -707,7 +818,7 @@ aq_get_vsi_params_exit:
/**
* i40e_aq_update_vsi_params
* @hw: pointer to the hw struct
- * @vsi: pointer to a vsi context struct
+ * @vsi_ctx: pointer to a vsi context struct
* @cmd_details: pointer to command details structure or NULL
*
* Update a VSI context.
@@ -717,13 +828,13 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- struct i40e_aqc_switch_seid *cmd =
- (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_update_vsi_parameters);
- cmd->seid = cpu_to_le16(vsi_ctx->seid);
+ cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
@@ -810,7 +921,6 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
/**
* i40e_aq_send_driver_version
* @hw: pointer to the hw struct
- * @event: driver event: driver ok, start or stop
* @dv: driver's major, minor version
* @cmd_details: pointer to command details structure or NULL
*
@@ -873,6 +983,7 @@ i40e_get_link_status_exit:
* @downlink_seid: the VSI SEID
* @enabled_tc: bitmap of TCs to be enabled
* @default_port: true for default port VSI, false for control port
+ * @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support
* @veb_seid: pointer to where to put the resulting VEB SEID
* @cmd_details: pointer to command details structure or NULL
*
@@ -881,7 +992,8 @@ i40e_get_link_status_exit:
**/
i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc,
- bool default_port, u16 *veb_seid,
+ bool default_port, bool enable_l2_filtering,
+ u16 *veb_seid,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -907,6 +1019,10 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
else
veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
+
+ if (enable_l2_filtering)
+ veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER;
+
cmd->veb_flags = cpu_to_le16(veb_flags);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -922,10 +1038,10 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
* @hw: pointer to the hw struct
* @veb_seid: the SEID of the VEB to query
* @switch_id: the uplink switch id
- * @floating_veb: set to true if the VEB is floating
+ * @floating: set to true if the VEB is floating
* @statistic_index: index of the stats counter block for this VEB
* @vebs_used: number of VEB's used by function
- * @vebs_unallocated: total VEB's not reserved by any function
+ * @vebs_free: total VEB's not reserved by any function
* @cmd_details: pointer to command details structure or NULL
*
* This retrieves the parameters for a particular VEB, specified by
@@ -1059,89 +1175,11 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
}
/**
- * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
- * @hw: pointer to the hw struct
- * @seid: VSI for the vlan filters
- * @v_list: list of vlan filters to be added
- * @count: length of the list
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
- u16 buf_size;
-
- if (count == 0 || !v_list || !hw)
- return I40E_ERR_PARAM;
-
- buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
-
- /* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
- cmd->num_addresses = cpu_to_le16(count);
- cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
- cmd->seid[1] = 0;
- cmd->seid[2] = 0;
-
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-
- status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
- cmd_details);
-
- return status;
-}
-
-/**
- * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
- * @hw: pointer to the hw struct
- * @seid: VSI for the vlan filters
- * @v_list: list of macvlans to be removed
- * @count: length of the list
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
- u16 buf_size;
-
- if (count == 0 || !v_list || !hw)
- return I40E_ERR_PARAM;
-
- buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
-
- /* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
- cmd->num_addresses = cpu_to_le16(count);
- cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
- cmd->seid[1] = 0;
- cmd->seid[2] = 0;
-
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-
- status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
- cmd_details);
-
- return status;
-}
-
-/**
* i40e_aq_send_msg_to_vf
* @hw: pointer to the hardware structure
* @vfid: vf id to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
* @msg: pointer to the msg buffer
* @msglen: msg length
* @cmd_details: pointer to command details
@@ -1519,8 +1557,8 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_list_capabilites *cmd;
- i40e_status status = 0;
struct i40e_aq_desc desc;
+ i40e_status status = 0;
cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
@@ -1681,6 +1719,63 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
}
/**
+ * i40e_aq_add_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @udp_port: the UDP port to add
+ * @header_len: length of the tunneling header length in DWords
+ * @protocol_index: protocol index type
+ * @filter_index: pointer to filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 header_len,
+ u8 protocol_index, u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_udp_tunnel *cmd =
+ (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
+ struct i40e_aqc_del_udp_tunnel_completion *resp =
+ (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+
+ cmd->udp_port = cpu_to_le16(udp_port);
+ cmd->protocol_type = protocol_index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status)
+ *filter_index = resp->index;
+
+ return status;
+}
+
+/**
+ * i40e_aq_del_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @index: filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_remove_udp_tunnel *cmd =
+ (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+
+ cmd->index = index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_delete_element - Delete switch element
* @hw: pointer to the hw struct
* @seid: the SEID to delete from the switch
@@ -1709,6 +1804,28 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
}
/**
+ * i40e_aq_dcb_updated - DCB Updated Command
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * EMP will return when the shared RPB settings have been
+ * recomputed and modified. The retval field in the descriptor
+ * will be set to 0 when RPB is modified.
+ **/
+i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
* @hw: pointer to the hw struct
* @seid: seid for the physical port/switching component/vsi
@@ -1787,6 +1904,40 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
}
/**
+ * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component connected to Physical Port
+ * @ets_data: Buffer holding ETS parameters
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
+ sizeof(*ets_data), opcode, cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_switching_comp_bw_config,
+ cmd_details);
+}
+
+/**
* i40e_aq_query_vsi_bw_config - Query VSI BW configuration
* @hw: pointer to the hw struct
* @seid: seid of the VSI
@@ -2039,3 +2190,110 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
return 0;
}
+
+/**
+ * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
+ * @hw: pointer to the hw struct
+ * @mac_addr: MAC address to use in the filter
+ * @ethtype: Ethertype to use in the filter
+ * @flags: Flags that needs to be applied to the filter
+ * @vsi_seid: seid of the control VSI
+ * @queue: VSI queue number to send the packet to
+ * @is_add: Add control packet filter if True else remove
+ * @stats: Structure to hold information on control filter counts
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This command will Add or Remove control packet filter for a control VSI.
+ * In return it will update the total number of perfect filter count in
+ * the stats member.
+ **/
+i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_control_packet_filter *cmd =
+ (struct i40e_aqc_add_remove_control_packet_filter *)
+ &desc.params.raw;
+ struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
+ (struct i40e_aqc_add_remove_control_packet_filter_completion *)
+ &desc.params.raw;
+ i40e_status status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ if (is_add) {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_control_packet_filter);
+ cmd->queue = cpu_to_le16(queue);
+ } else {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_control_packet_filter);
+ }
+
+ if (mac_addr)
+ memcpy(cmd->mac, mac_addr, ETH_ALEN);
+
+ cmd->etype = cpu_to_le16(ethtype);
+ cmd->flags = cpu_to_le16(flags);
+ cmd->seid = cpu_to_le16(vsi_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && stats) {
+ stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
+ stats->etype_used = le16_to_cpu(resp->etype_used);
+ stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
+ stats->etype_free = le16_to_cpu(resp->etype_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_set_pci_config_data - store PCI bus info
+ * @hw: pointer to hardware structure
+ * @link_status: the link status word from PCI config space
+ *
+ * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
+ **/
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
+{
+ hw->bus.type = i40e_bus_type_pci_express;
+
+ switch (link_status & PCI_EXP_LNKSTA_NLW) {
+ case PCI_EXP_LNKSTA_NLW_X1:
+ hw->bus.width = i40e_bus_width_pcie_x1;
+ break;
+ case PCI_EXP_LNKSTA_NLW_X2:
+ hw->bus.width = i40e_bus_width_pcie_x2;
+ break;
+ case PCI_EXP_LNKSTA_NLW_X4:
+ hw->bus.width = i40e_bus_width_pcie_x4;
+ break;
+ case PCI_EXP_LNKSTA_NLW_X8:
+ hw->bus.width = i40e_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = i40e_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & PCI_EXP_LNKSTA_CLS) {
+ case PCI_EXP_LNKSTA_CLS_2_5GB:
+ hw->bus.speed = i40e_bus_speed_2500;
+ break;
+ case PCI_EXP_LNKSTA_CLS_5_0GB:
+ hw->bus.speed = i40e_bus_speed_5000;
+ break;
+ case PCI_EXP_LNKSTA_CLS_8_0GB:
+ hw->bus.speed = i40e_bus_speed_8000;
+ break;
+ default:
+ hw->bus.speed = i40e_bus_speed_unknown;
+ break;
+ }
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
new file mode 100644
index 000000000000..50730141bb7b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -0,0 +1,469 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_dcb.h"
+
+/**
+ * i40e_get_dcbx_status
+ * @hw: pointer to the hw struct
+ * @status: Embedded DCBX Engine Status
+ *
+ * Get the DCBX status from the Firmware
+ **/
+i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+{
+ u32 reg;
+
+ if (!status)
+ return I40E_ERR_PARAM;
+
+ reg = rd32(hw, I40E_PRTDCB_GENS);
+ *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
+ I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
+
+ return 0;
+}
+
+/**
+ * i40e_parse_ieee_etscfg_tlv
+ * @tlv: IEEE 802.1Qaz ETS CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_ieee_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >>
+ I40E_IEEE_ETS_WILLING_SHIFT);
+ etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >>
+ I40E_IEEE_ETS_CBS_SHIFT);
+ etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >>
+ I40E_IEEE_ETS_MAXTC_SHIFT);
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz ETS REC TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Parses IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* Move offset to priority table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_pfccfg_tlv
+ * @tlv: IEEE 802.1Qaz PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >>
+ I40E_IEEE_PFC_WILLING_SHIFT);
+ dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >>
+ I40E_IEEE_PFC_MBC_SHIFT);
+ dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >>
+ I40E_IEEE_PFC_CAP_SHIFT);
+ dcbcfg->pfc.pfcenable = buf[1];
+}
+
+/**
+ * i40e_parse_ieee_app_tlv
+ * @tlv: IEEE 802.1Qaz APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses IEEE 802.1Qaz APP PRIO TLV
+ **/
+static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength;
+ u16 offset = 0;
+ u16 length;
+ int i = 0;
+ u8 *buf;
+
+ typelength = ntohs(tlv->typelength);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ buf = tlv->tlvinfo;
+
+ /* The App priority table starts 5 octets after TLV header */
+ length -= (sizeof(tlv->ouisubtype) + 1);
+
+ /* Move offset to App Priority Table */
+ offset++;
+
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (offset < length) {
+ dcbcfg->app[i].priority = (u8)((buf[offset] &
+ I40E_IEEE_APP_PRIO_MASK) >>
+ I40E_IEEE_APP_PRIO_SHIFT);
+ dcbcfg->app[i].selector = (u8)((buf[offset] &
+ I40E_IEEE_APP_SEL_MASK) >>
+ I40E_IEEE_APP_SEL_SHIFT);
+ dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) |
+ buf[offset + 2];
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u8 subtype;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ switch (subtype) {
+ case I40E_IEEE_SUBTYPE_ETS_CFG:
+ i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_ETS_REC:
+ i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_PFC_CFG:
+ i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_APP_PRI:
+ i40e_parse_ieee_app_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_parse_org_tlv
+ * @tlv: Organization specific TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * will be returned
+ **/
+static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u32 oui;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >>
+ I40E_LLDP_TLV_OUI_SHIFT);
+ switch (oui) {
+ case I40E_IEEE_8021QAZ_OUI:
+ i40e_parse_ieee_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_lldp_to_dcb_config
+ * @lldpmib: LLDPDU to be parsed
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Parse DCB configuration from the LLDPDU
+ **/
+i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ i40e_status ret = 0;
+ struct i40e_lldp_org_tlv *tlv;
+ u16 type;
+ u16 length;
+ u16 typelength;
+
+ if (!lldpmib || !dcbcfg)
+ return I40E_ERR_PARAM;
+
+ /* set to the start of LLDPDU */
+ lldpmib += ETH_HLEN;
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ while (tlv) {
+ typelength = ntohs(tlv->typelength);
+ type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+
+ if (type == I40E_TLV_TYPE_END)
+ break;/* END TLV break out */
+
+ switch (type) {
+ case I40E_TLV_TYPE_ORG:
+ i40e_parse_org_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+
+ /* Move to next TLV */
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) +
+ length);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_aq_get_dcb_config
+ * @hw: pointer to the hw struct
+ * @mib_type: mib type for the query
+ * @bridgetype: bridge type for the query (remote)
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Query DCB configuration from the Firmware
+ **/
+i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ i40e_status ret = 0;
+ struct i40e_virt_mem mem;
+ u8 *lldpmib;
+
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type,
+ (void *)lldpmib, I40E_LLDPDU_SIZE,
+ NULL, NULL, NULL);
+ if (ret)
+ goto free_mem;
+
+ /* Parse LLDP MIB to get dcb configuration */
+ ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg);
+
+free_mem:
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_get_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get DCB configuration from the Firmware
+ **/
+i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
+{
+ i40e_status ret = 0;
+
+ /* Get Local DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->local_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+out:
+ return ret;
+}
+
+/**
+ * i40e_init_dcb
+ * @hw: pointer to the hw struct
+ *
+ * Update DCB configuration from the Firmware
+ **/
+i40e_status i40e_init_dcb(struct i40e_hw *hw)
+{
+ i40e_status ret = 0;
+
+ if (!hw->func_caps.dcb)
+ return ret;
+
+ /* Get DCBX status */
+ ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
+ if (ret)
+ return ret;
+
+ /* Check the DCBX Status */
+ switch (hw->dcbx_status) {
+ case I40E_DCBX_STATUS_DONE:
+ case I40E_DCBX_STATUS_IN_PROGRESS:
+ /* Get current DCBX configuration */
+ ret = i40e_get_dcb_config(hw);
+ break;
+ case I40E_DCBX_STATUS_DISABLED:
+ return ret;
+ case I40E_DCBX_STATUS_NOT_STARTED:
+ case I40E_DCBX_STATUS_MULTIPLE_PEERS:
+ default:
+ break;
+ }
+
+ /* Configure the LLDP MIB change event */
+ ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
new file mode 100644
index 000000000000..34cf1c30c7ff
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -0,0 +1,107 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DCB_H_
+#define _I40E_DCB_H_
+
+#include "i40e_type.h"
+
+#define I40E_DCBX_STATUS_NOT_STARTED 0
+#define I40E_DCBX_STATUS_IN_PROGRESS 1
+#define I40E_DCBX_STATUS_DONE 2
+#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3
+#define I40E_DCBX_STATUS_DISABLED 7
+
+#define I40E_TLV_TYPE_END 0
+#define I40E_TLV_TYPE_ORG 127
+
+#define I40E_IEEE_8021QAZ_OUI 0x0080C2
+#define I40E_IEEE_SUBTYPE_ETS_CFG 9
+#define I40E_IEEE_SUBTYPE_ETS_REC 10
+#define I40E_IEEE_SUBTYPE_PFC_CFG 11
+#define I40E_IEEE_SUBTYPE_APP_PRI 12
+
+/* Defines for LLDP TLV header */
+#define I40E_LLDP_TLV_LEN_SHIFT 0
+#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
+#define I40E_LLDP_TLV_TYPE_SHIFT 9
+#define I40E_LLDP_TLV_TYPE_MASK (0x7F << I40E_LLDP_TLV_TYPE_SHIFT)
+#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0
+#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
+#define I40E_LLDP_TLV_OUI_SHIFT 8
+#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+
+/* Defines for IEEE ETS TLV */
+#define I40E_IEEE_ETS_MAXTC_SHIFT 0
+#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
+#define I40E_IEEE_ETS_CBS_SHIFT 6
+#define I40E_IEEE_ETS_CBS_MASK (0x1 << I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_WILLING_SHIFT 7
+#define I40E_IEEE_ETS_WILLING_MASK (0x1 << I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_PRIO_0_SHIFT 0
+#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
+#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
+#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT)
+
+/* Defines for IEEE TSA types */
+#define I40E_IEEE_TSA_STRICT 0
+#define I40E_IEEE_TSA_ETS 2
+
+/* Defines for IEEE PFC TLV */
+#define I40E_IEEE_PFC_CAP_SHIFT 0
+#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
+#define I40E_IEEE_PFC_MBC_SHIFT 6
+#define I40E_IEEE_PFC_MBC_MASK (0x1 << I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_WILLING_SHIFT 7
+#define I40E_IEEE_PFC_WILLING_MASK (0x1 << I40E_IEEE_PFC_WILLING_SHIFT)
+
+/* Defines for IEEE APP TLV */
+#define I40E_IEEE_APP_SEL_SHIFT 0
+#define I40E_IEEE_APP_SEL_MASK (0x7 << I40E_IEEE_APP_SEL_SHIFT)
+#define I40E_IEEE_APP_PRIO_SHIFT 5
+#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT)
+
+
+#pragma pack(1)
+
+/* IEEE 802.1AB LLDP Organization specific TLV */
+struct i40e_lldp_org_tlv {
+ __be16 typelength;
+ __be32 ouisubtype;
+ u8 tlvinfo[1];
+};
+#pragma pack()
+
+i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
+ u16 *status);
+i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg);
+i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg);
+i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
+i40e_status i40e_init_dcb(struct i40e_hw *hw);
+#endif /* _I40E_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
new file mode 100644
index 000000000000..6e8103abfd0d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -0,0 +1,316 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifdef CONFIG_I40E_DCB
+#include "i40e.h"
+#include <net/dcbnl.h>
+
+/**
+ * i40e_get_pfc_delay - retrieve PFC Link Delay
+ * @hw: pointer to hardware struct
+ * @delay: holds the PFC Link delay value
+ *
+ * Returns PFC Link Delay from the PRTDCB_GENC.PFCLDA
+ **/
+static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
+{
+ u32 val;
+
+ val = rd32(hw, I40E_PRTDCB_GENC);
+ *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >>
+ I40E_PRTDCB_GENC_PFCLDA_SHIFT);
+}
+
+/**
+ * i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration
+ * @netdev: the corresponding netdev
+ * @ets: structure to hold the ETS information
+ *
+ * Returns local IEEE ETS configuration
+ **/
+static int i40e_dcbnl_ieee_getets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ struct i40e_dcbx_config *dcbxcfg;
+ struct i40e_hw *hw = &pf->hw;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ dcbxcfg = &hw->local_dcbx_config;
+ ets->willing = dcbxcfg->etscfg.willing;
+ ets->ets_cap = dcbxcfg->etscfg.maxtcs;
+ ets->cbs = dcbxcfg->etscfg.cbs;
+ memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable,
+ sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable,
+ sizeof(ets->tc_rx_bw));
+ memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable,
+ sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, dcbxcfg->etscfg.prioritytable,
+ sizeof(ets->prio_tc));
+ memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable,
+ sizeof(ets->tc_reco_bw));
+ memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable,
+ sizeof(ets->tc_reco_tsa));
+ memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prioritytable,
+ sizeof(ets->reco_prio_tc));
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration
+ * @netdev: the corresponding netdev
+ * @ets: structure to hold the PFC information
+ *
+ * Returns local IEEE PFC configuration
+ **/
+static int i40e_dcbnl_ieee_getpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ struct i40e_dcbx_config *dcbxcfg;
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ dcbxcfg = &hw->local_dcbx_config;
+ pfc->pfc_cap = dcbxcfg->pfc.pfccap;
+ pfc->pfc_en = dcbxcfg->pfc.pfcenable;
+ pfc->mbc = dcbxcfg->pfc.mbc;
+ i40e_get_pfc_delay(hw, &pfc->delay);
+
+ /* Get Requests/Indicatiosn */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ pfc->requests[i] = pf->stats.priority_xoff_tx[i];
+ pfc->indications[i] = pf->stats.priority_xoff_rx[i];
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_getdcbx - retrieve current DCBx capability
+ * @netdev: the corresponding netdev
+ *
+ * Returns DCBx capability features
+ **/
+static u8 i40e_dcbnl_getdcbx(struct net_device *dev)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+ return pf->dcbx_cap;
+}
+
+/**
+ * i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx
+ * @netdev: the corresponding netdev
+ *
+ * Returns the SAN MAC address used for LLDP exchange
+ **/
+static void i40e_dcbnl_get_perm_hw_addr(struct net_device *dev,
+ u8 *perm_addr)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ int i, j;
+
+ memset(perm_addr, 0xff, MAX_ADDR_LEN);
+
+ for (i = 0; i < dev->addr_len; i++)
+ perm_addr[i] = pf->hw.mac.perm_addr[i];
+
+ for (j = 0; j < dev->addr_len; j++, i++)
+ perm_addr[i] = pf->hw.mac.san_addr[j];
+}
+
+static const struct dcbnl_rtnl_ops dcbnl_ops = {
+ .ieee_getets = i40e_dcbnl_ieee_getets,
+ .ieee_getpfc = i40e_dcbnl_ieee_getpfc,
+ .getdcbx = i40e_dcbnl_getdcbx,
+ .getpermhwaddr = i40e_dcbnl_get_perm_hw_addr,
+};
+
+/**
+ * i40e_dcbnl_set_all - set all the apps and ieee data from DCBx config
+ * @vsi: the corresponding vsi
+ *
+ * Set up all the IEEE APPs in the DCBNL App Table and generate event for
+ * other settings
+ **/
+void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
+{
+ struct net_device *dev = vsi->netdev;
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ struct i40e_dcbx_config *dcbxcfg;
+ struct i40e_hw *hw = &pf->hw;
+ struct dcb_app sapp;
+ u8 prio, tc_map;
+ int i;
+
+ /* DCB not enabled */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return;
+
+ dcbxcfg = &hw->local_dcbx_config;
+
+ /* Set up all the App TLVs if DCBx is negotiated */
+ for (i = 0; i < dcbxcfg->numapps; i++) {
+ prio = dcbxcfg->app[i].priority;
+ tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]);
+
+ /* Add APP only if the TC is enabled for this VSI */
+ if (tc_map & vsi->tc_config.enabled_tc) {
+ sapp.selector = dcbxcfg->app[i].selector;
+ sapp.protocol = dcbxcfg->app[i].protocolid;
+ sapp.priority = prio;
+ dcb_ieee_setapp(dev, &sapp);
+ }
+ }
+
+ /* Notify user-space of the changes */
+ dcbnl_ieee_notify(dev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0);
+}
+
+/**
+ * i40e_dcbnl_vsi_del_app - Delete APP for given VSI
+ * @vsi: the corresponding vsi
+ * @app: APP to delete
+ *
+ * Delete given APP from the DCBNL APP table for given
+ * VSI
+ **/
+static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
+ struct i40e_ieee_app_priority_table *app)
+{
+ struct net_device *dev = vsi->netdev;
+ struct dcb_app sapp;
+
+ if (!dev)
+ return -EINVAL;
+
+ sapp.selector = app->selector;
+ sapp.protocol = app->protocolid;
+ sapp.priority = app->priority;
+ return dcb_ieee_delapp(dev, &sapp);
+}
+
+/**
+ * i40e_dcbnl_del_app - Delete APP on all VSIs
+ * @pf: the corresponding pf
+ * @app: APP to delete
+ *
+ * Delete given APP from all the VSIs for given PF
+ **/
+static void i40e_dcbnl_del_app(struct i40e_pf *pf,
+ struct i40e_ieee_app_priority_table *app)
+{
+ int v, err;
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (pf->vsi[v] && pf->vsi[v]->netdev) {
+ err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
+ if (err)
+ dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
+ __func__, pf->vsi[v]->seid,
+ err, app->selector,
+ app->protocolid, app->priority);
+ }
+ }
+}
+
+/**
+ * i40e_dcbnl_find_app - Search APP in given DCB config
+ * @cfg: DCBX configuration data
+ * @app: APP to search for
+ *
+ * Find given APP in the DCB configuration
+ **/
+static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
+ struct i40e_ieee_app_priority_table *app)
+{
+ int i;
+
+ for (i = 0; i < cfg->numapps; i++) {
+ if (app->selector == cfg->app[i].selector &&
+ app->protocolid == cfg->app[i].protocolid &&
+ app->priority == cfg->app[i].priority)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * i40e_dcbnl_flush_apps - Delete all removed APPs
+ * @pf: the corresponding pf
+ * @new_cfg: new DCBX configuration data
+ *
+ * Find and delete all APPs that are not present in the passed
+ * DCB configuration
+ **/
+void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+ struct i40e_dcbx_config *new_cfg)
+{
+ struct i40e_ieee_app_priority_table app;
+ struct i40e_dcbx_config *dcbxcfg;
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ dcbxcfg = &hw->local_dcbx_config;
+ for (i = 0; i < dcbxcfg->numapps; i++) {
+ app = dcbxcfg->app[i];
+ /* The APP is not available anymore delete it */
+ if (!i40e_dcbnl_find_app(new_cfg, &app))
+ i40e_dcbnl_del_app(pf, &app);
+ }
+}
+
+/**
+ * i40e_dcbnl_setup - DCBNL setup
+ * @vsi: the corresponding vsi
+ *
+ * Set up DCBNL ops and initial APP TLVs
+ **/
+void i40e_dcbnl_setup(struct i40e_vsi *vsi)
+{
+ struct net_device *dev = vsi->netdev;
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+ /* DCB not enabled */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return;
+
+ /* Do not setup DCB NL ops for MFP mode */
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ dev->dcbnl_ops = &dcbnl_ops;
+
+ /* Set initial IEEE DCB settings */
+ i40e_dcbnl_set_all(vsi);
+}
+#endif /* CONFIG_I40E_DCB */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index ef4cb1cf31f2..da22c3fa2c00 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -192,12 +191,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
len = (sizeof(struct i40e_aq_desc)
* pf->hw.aq.num_asq_entries);
- memcpy(p, pf->hw.aq.asq.desc, len);
+ memcpy(p, pf->hw.aq.asq.desc_buf.va, len);
p += len;
len = (sizeof(struct i40e_aq_desc)
* pf->hw.aq.num_arq_entries);
- memcpy(p, pf->hw.aq.arq.desc, len);
+ memcpy(p, pf->hw.aq.arq.desc_buf.va, len);
p += len;
i40e_dbg_dump_data_len = buflen;
@@ -362,7 +361,7 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
}
/**
- * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into pokem datum
+ * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
* @pf: the i40e_pf created in command write
* @seid: the seid the user put in
**/
@@ -516,10 +515,10 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->stats.bytes,
rx_ring->rx_stats.non_eop_descs);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
+ " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n",
i,
- rx_ring->rx_stats.alloc_rx_page_failed,
- rx_ring->rx_stats.alloc_rx_buff_failed);
+ rx_ring->rx_stats.alloc_page_failed,
+ rx_ring->rx_stats.alloc_buff_failed);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, rx_ring->size,
@@ -533,6 +532,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
+
dev_info(&pf->pdev->dev,
" tx_rings[%i]: desc = %p\n",
i, tx_ring->desc);
@@ -707,8 +707,13 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
{
struct i40e_adminq_ring *ring;
struct i40e_hw *hw = &pf->hw;
+ char hdr[32];
int i;
+ snprintf(hdr, sizeof(hdr), "%s %s: ",
+ dev_driver_string(&pf->pdev->dev),
+ dev_name(&pf->pdev->dev));
+
/* first the send (command) ring, then the receive (event) ring */
dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
ring = &(hw->aq.asq);
@@ -718,14 +723,8 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
" at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
i, d->flags, d->opcode, d->datalen, d->retval,
d->cookie_high, d->cookie_low);
- dev_info(&pf->pdev->dev,
- " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- d->params.raw[0], d->params.raw[1], d->params.raw[2],
- d->params.raw[3], d->params.raw[4], d->params.raw[5],
- d->params.raw[6], d->params.raw[7], d->params.raw[8],
- d->params.raw[9], d->params.raw[10], d->params.raw[11],
- d->params.raw[12], d->params.raw[13],
- d->params.raw[14], d->params.raw[15]);
+ print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
+ 16, 1, d->params.raw, 16, 0);
}
dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
@@ -736,14 +735,8 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
" ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
i, d->flags, d->opcode, d->datalen, d->retval,
d->cookie_high, d->cookie_low);
- dev_info(&pf->pdev->dev,
- " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- d->params.raw[0], d->params.raw[1], d->params.raw[2],
- d->params.raw[3], d->params.raw[4], d->params.raw[5],
- d->params.raw[6], d->params.raw[7], d->params.raw[8],
- d->params.raw[9], d->params.raw[10], d->params.raw[11],
- d->params.raw[12], d->params.raw[13],
- d->params.raw[14], d->params.raw[15]);
+ print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
+ 16, 1, d->params.raw, 16, 0);
}
}
@@ -759,27 +752,25 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
struct i40e_pf *pf, bool is_rx_ring)
{
- union i40e_rx_desc *ds;
+ struct i40e_tx_desc *txd;
+ union i40e_rx_desc *rxd;
struct i40e_ring ring;
struct i40e_vsi *vsi;
int i;
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
if (!vsi) {
- dev_info(&pf->pdev->dev,
- "vsi %d not found\n", vsi_seid);
- if (is_rx_ring)
- dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
- else
- dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
return;
}
if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
- if (is_rx_ring)
- dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
- else
- dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ return;
+ }
+ if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) {
+ dev_info(&pf->pdev->dev,
+ "descriptor rings have not been allocated for vsi %d\n",
+ vsi_seid);
return;
}
if (is_rx_ring)
@@ -790,22 +781,27 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
for (i = 0; i < ring.count; i++) {
- if (is_rx_ring)
- ds = I40E_RX_DESC(&ring, i);
- else
- ds = (union i40e_rx_desc *)
- I40E_TX_DESC(&ring, i);
- if ((sizeof(union i40e_rx_desc) ==
- sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
+ if (!is_rx_ring) {
+ txd = I40E_TX_DESC(&ring, i);
dev_info(&pf->pdev->dev,
- " d[%03i] = 0x%016llx 0x%016llx\n", i,
- ds->read.pkt_addr, ds->read.hdr_addr);
- else
+ " d[%03i] = 0x%016llx 0x%016llx\n",
+ i, txd->buffer_addr,
+ txd->cmd_type_offset_bsz);
+ } else if (sizeof(union i40e_rx_desc) ==
+ sizeof(union i40e_16byte_rx_desc)) {
+ rxd = I40E_RX_DESC(&ring, i);
+ dev_info(&pf->pdev->dev,
+ " d[%03i] = 0x%016llx 0x%016llx\n",
+ i, rxd->read.pkt_addr,
+ rxd->read.hdr_addr);
+ } else {
+ rxd = I40E_RX_DESC(&ring, i);
dev_info(&pf->pdev->dev,
" d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
- i, ds->read.pkt_addr,
- ds->read.hdr_addr,
- ds->read.rsvd1, ds->read.rsvd2);
+ i, rxd->read.pkt_addr,
+ rxd->read.hdr_addr,
+ rxd->read.rsvd1, rxd->read.rsvd2);
+ }
}
} else if (cnt == 3) {
if (desc_n >= ring.count || desc_n < 0) {
@@ -813,27 +809,29 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
"descriptor %d not found\n", desc_n);
return;
}
- if (is_rx_ring)
- ds = I40E_RX_DESC(&ring, desc_n);
- else
- ds = (union i40e_rx_desc *)I40E_TX_DESC(&ring, desc_n);
- if ((sizeof(union i40e_rx_desc) ==
- sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
+ if (!is_rx_ring) {
+ txd = I40E_TX_DESC(&ring, desc_n);
dev_info(&pf->pdev->dev,
- "vsi = %02i %s ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
- vsi_seid, is_rx_ring ? "rx" : "tx", ring_id,
- desc_n, ds->read.pkt_addr, ds->read.hdr_addr);
- else
+ "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+ vsi_seid, ring_id, desc_n,
+ txd->buffer_addr, txd->cmd_type_offset_bsz);
+ } else if (sizeof(union i40e_rx_desc) ==
+ sizeof(union i40e_16byte_rx_desc)) {
+ rxd = I40E_RX_DESC(&ring, desc_n);
+ dev_info(&pf->pdev->dev,
+ "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+ vsi_seid, ring_id, desc_n,
+ rxd->read.pkt_addr, rxd->read.hdr_addr);
+ } else {
+ rxd = I40E_RX_DESC(&ring, desc_n);
dev_info(&pf->pdev->dev,
"vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
- vsi_seid, ring_id,
- desc_n, ds->read.pkt_addr, ds->read.hdr_addr,
- ds->read.rsvd1, ds->read.rsvd2);
+ vsi_seid, ring_id, desc_n,
+ rxd->read.pkt_addr, rxd->read.hdr_addr,
+ rxd->read.rsvd1, rxd->read.rsvd2);
+ }
} else {
- if (is_rx_ring)
- dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
- else
- dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
}
}
@@ -979,8 +977,7 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
veb = i40e_dbg_find_veb(pf, seid);
if (!veb) {
- dev_info(&pf->pdev->dev,
- "%d: can't find veb\n", seid);
+ dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
return;
}
dev_info(&pf->pdev->dev,
@@ -1006,6 +1003,22 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
}
}
+/**
+ * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR
+ * @pf: the pf that would be altered
+ * @flag: flag that needs enabling or disabling
+ * @enable: Enable/disable FD SD/ATR
+ **/
+static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
+{
+ if (enable)
+ pf->flags |= flag;
+ else
+ pf->flags &= ~flag;
+ dev_info(&pf->pdev->dev, "requesting a pf reset\n");
+ i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+}
+
#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
/**
* i40e_dbg_command_write - write into command datum
@@ -1022,8 +1035,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
char *cmd_buf, *cmd_buf_tmp;
int bytes_not_copied;
struct i40e_vsi *vsi;
- u8 *print_buf_start;
- u8 *print_buf;
int vsi_seid;
int veb_seid;
int cnt;
@@ -1048,11 +1059,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
count = cmd_buf_tmp - cmd_buf + 1;
}
- print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
- if (!print_buf_start)
- goto command_write_done;
- print_buf = print_buf_start;
-
if (strncmp(cmd_buf, "add vsi", 7) == 0) {
vsi_seid = -1;
cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
@@ -1104,7 +1110,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
if (!vsi) {
dev_info(&pf->pdev->dev,
- "add relay: vsi VSI %d not found\n", vsi_seid);
+ "add relay: VSI %d not found\n", vsi_seid);
goto command_write_done;
}
@@ -1462,20 +1468,24 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
dev_info(&pf->pdev->dev, "forcing PFR\n");
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "corer", 5) == 0) {
dev_info(&pf->pdev->dev, "forcing CoreR\n");
- i40e_do_reset(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "globr", 5) == 0) {
dev_info(&pf->pdev->dev, "forcing GlobR\n");
- i40e_do_reset(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+
+ } else if (strncmp(cmd_buf, "empr", 4) == 0) {
+ dev_info(&pf->pdev->dev, "forcing EMPR\n");
+ i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "read", 4) == 0) {
u32 address;
u32 value;
- cnt = sscanf(&cmd_buf[4], "%x", &address);
+ cnt = sscanf(&cmd_buf[4], "%i", &address);
if (cnt != 1) {
dev_info(&pf->pdev->dev, "read <reg>\n");
goto command_write_done;
@@ -1494,7 +1504,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if (strncmp(cmd_buf, "write", 5) == 0) {
u32 address, value;
- cnt = sscanf(&cmd_buf[5], "%x %x", &address, &value);
+ cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
if (cnt != 2) {
dev_info(&pf->pdev->dev, "write <reg> <value>\n");
goto command_write_done;
@@ -1512,7 +1522,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
address, value);
} else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
- cnt = sscanf(&cmd_buf[15], "%d", &vsi_seid);
+ cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
if (cnt == 0) {
int i;
for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
@@ -1539,6 +1549,118 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else {
dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n");
}
+ } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
+ struct i40e_aq_desc *desc;
+ i40e_status ret;
+
+ desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ if (!desc)
+ goto command_write_done;
+ cnt = sscanf(&cmd_buf[11],
+ "%hx %hx %hx %hx %x %x %x %x %x %x",
+ &desc->flags,
+ &desc->opcode, &desc->datalen, &desc->retval,
+ &desc->cookie_high, &desc->cookie_low,
+ &desc->params.internal.param0,
+ &desc->params.internal.param1,
+ &desc->params.internal.param2,
+ &desc->params.internal.param3);
+ if (cnt != 10) {
+ dev_info(&pf->pdev->dev,
+ "send aq_cmd: bad command string, cnt=%d\n",
+ cnt);
+ kfree(desc);
+ desc = NULL;
+ goto command_write_done;
+ }
+ ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL);
+ if (!ret) {
+ dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
+ } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x AQ Error: %d\n",
+ desc->opcode, pf->hw.aq.asq_last_status);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x Status: %d\n",
+ desc->opcode, ret);
+ }
+ dev_info(&pf->pdev->dev,
+ "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ desc->flags, desc->opcode, desc->datalen, desc->retval,
+ desc->cookie_high, desc->cookie_low,
+ desc->params.internal.param0,
+ desc->params.internal.param1,
+ desc->params.internal.param2,
+ desc->params.internal.param3);
+ kfree(desc);
+ desc = NULL;
+ } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
+ struct i40e_aq_desc *desc;
+ i40e_status ret;
+ u16 buffer_len;
+ u8 *buff;
+
+ desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ if (!desc)
+ goto command_write_done;
+ cnt = sscanf(&cmd_buf[20],
+ "%hx %hx %hx %hx %x %x %x %x %x %x %hd",
+ &desc->flags,
+ &desc->opcode, &desc->datalen, &desc->retval,
+ &desc->cookie_high, &desc->cookie_low,
+ &desc->params.internal.param0,
+ &desc->params.internal.param1,
+ &desc->params.internal.param2,
+ &desc->params.internal.param3,
+ &buffer_len);
+ if (cnt != 11) {
+ dev_info(&pf->pdev->dev,
+ "send indirect aq_cmd: bad command string, cnt=%d\n",
+ cnt);
+ kfree(desc);
+ desc = NULL;
+ goto command_write_done;
+ }
+ /* Just stub a buffer big enough in case user messed up */
+ if (buffer_len == 0)
+ buffer_len = 1280;
+
+ buff = kzalloc(buffer_len, GFP_KERNEL);
+ if (!buff) {
+ kfree(desc);
+ desc = NULL;
+ goto command_write_done;
+ }
+ desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ ret = i40e_asq_send_command(&pf->hw, desc, buff,
+ buffer_len, NULL);
+ if (!ret) {
+ dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
+ } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x AQ Error: %d\n",
+ desc->opcode, pf->hw.aq.asq_last_status);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x Status: %d\n",
+ desc->opcode, ret);
+ }
+ dev_info(&pf->pdev->dev,
+ "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ desc->flags, desc->opcode, desc->datalen, desc->retval,
+ desc->cookie_high, desc->cookie_low,
+ desc->params.internal.param0,
+ desc->params.internal.param1,
+ desc->params.internal.param2,
+ desc->params.internal.param3);
+ print_hex_dump(KERN_INFO, "AQ buffer WB: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, buffer_len, true);
+ kfree(buff);
+ buff = NULL;
+ kfree(desc);
+ desc = NULL;
} else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
(strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
struct i40e_fdir_data fd_data;
@@ -1564,7 +1686,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (strncmp(cmd_buf, "add", 3) == 0)
add = true;
cnt = sscanf(&cmd_buf[13],
- "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %512s",
+ "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
&fd_data.q_index,
&fd_data.flex_off, &fd_data.pctype,
&fd_data.dest_vsi, &fd_data.dest_ctl,
@@ -1588,19 +1710,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
packet_len = min_t(u16,
packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
- dev_info(&pf->pdev->dev, "FD raw packet:\n");
for (i = 0; i < packet_len; i++) {
sscanf(&asc_packet[j], "%2hhx ",
&fd_data.raw_packet[i]);
j += 3;
- snprintf(print_buf, 3, "%02x ", fd_data.raw_packet[i]);
- print_buf += 3;
- if ((i % 16) == 15) {
- snprintf(print_buf, 1, "\n");
- print_buf++;
- }
}
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ dev_info(&pf->pdev->dev, "FD raw packet dump\n");
+ print_hex_dump(KERN_INFO, "FD raw packet: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ fd_data.raw_packet, packet_len, true);
ret = i40e_program_fdir_filter(&fd_data, pf, add);
if (!ret) {
dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
@@ -1612,6 +1730,14 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
fd_data.raw_packet = NULL;
kfree(asc_packet);
asc_packet = NULL;
+ } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) {
+ i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
+ } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
+ i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
+ } else if (strncmp(cmd_buf, "fd-sb off", 9) == 0) {
+ i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, false);
+ } else if (strncmp(cmd_buf, "fd-sb on", 8) == 0) {
+ i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, true);
} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
@@ -1622,8 +1748,35 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
pf->hw.aq.asq_last_status);
goto command_write_done;
}
+ ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
+ pf->hw.mac.addr,
+ I40E_ETH_P_LLDP, 0,
+ pf->vsi[pf->lan_vsi]->seid,
+ 0, true, NULL, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: Add Control Packet Filter AQ command failed =0x%x\n",
+ __func__, pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+#ifdef CONFIG_I40E_DCB
+ pf->dcbx_cap = DCB_CAP_DCBX_HOST |
+ DCB_CAP_DCBX_VER_IEEE;
+#endif /* CONFIG_I40E_DCB */
} else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
int ret;
+ ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
+ pf->hw.mac.addr,
+ I40E_ETH_P_LLDP, 0,
+ pf->vsi[pf->lan_vsi]->seid,
+ 0, false, NULL, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: Remove Control Packet Filter AQ command failed =0x%x\n",
+ __func__, pf->hw.aq.asq_last_status);
+ /* Continue and start FW LLDP anyways */
+ }
+
ret = i40e_aq_start_lldp(&pf->hw, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -1631,10 +1784,14 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
pf->hw.aq.asq_last_status);
goto command_write_done;
}
+#ifdef CONFIG_I40E_DCB
+ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
+ DCB_CAP_DCBX_VER_IEEE;
+#endif /* CONFIG_I40E_DCB */
} else if (strncmp(&cmd_buf[5],
"get local", 9) == 0) {
u16 llen, rlen;
- int ret, i;
+ int ret;
u8 *buff;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
@@ -1652,22 +1809,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
goto command_write_done;
}
- dev_info(&pf->pdev->dev,
- "Get LLDP MIB (local) AQ buffer written back:\n");
- for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
- snprintf(print_buf, 3, "%02x ", buff[i]);
- print_buf += 3;
- if ((i % 16) == 15) {
- snprintf(print_buf, 1, "\n");
- print_buf++;
- }
- }
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ dev_info(&pf->pdev->dev, "LLDP MIB (local)\n");
+ print_hex_dump(KERN_INFO, "LLDP MIB (local): ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, I40E_LLDPDU_SIZE, true);
kfree(buff);
buff = NULL;
} else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
u16 llen, rlen;
- int ret, i;
+ int ret;
u8 *buff;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
@@ -1686,17 +1836,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
goto command_write_done;
}
- dev_info(&pf->pdev->dev,
- "Get LLDP MIB (remote) AQ buffer written back:\n");
- for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
- snprintf(print_buf, 3, "%02x ", buff[i]);
- print_buf += 3;
- if ((i % 16) == 15) {
- snprintf(print_buf, 1, "\n");
- print_buf++;
- }
- }
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n");
+ print_hex_dump(KERN_INFO, "LLDP MIB (remote): ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, I40E_LLDPDU_SIZE, true);
kfree(buff);
buff = NULL;
} else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
@@ -1721,7 +1864,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
}
} else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
- u16 buffer_len, i, bytes;
+ u16 buffer_len, bytes;
u16 module;
u32 offset;
u16 *buff;
@@ -1775,16 +1918,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev,
"Read NVM module=0x%x offset=0x%x words=%d\n",
module, offset, buffer_len);
- for (i = 0; i < buffer_len; i++) {
- if ((i % 16) == 0) {
- snprintf(print_buf, 11, "\n0x%08x: ",
- offset + i);
- print_buf += 11;
- }
- snprintf(print_buf, 5, "%04x ", buff[i]);
- print_buf += 5;
- }
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ if (bytes)
+ print_hex_dump(KERN_INFO, "NVM Dump: ",
+ DUMP_PREFIX_OFFSET, 16, 2,
+ buff, bytes, true);
}
kfree(buff);
buff = NULL;
@@ -1814,8 +1951,14 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " pfr\n");
dev_info(&pf->pdev->dev, " corer\n");
dev_info(&pf->pdev->dev, " globr\n");
+ dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
+ dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
+ dev_info(&pf->pdev->dev, " fd-atr off\n");
+ dev_info(&pf->pdev->dev, " fd-atr on\n");
+ dev_info(&pf->pdev->dev, " fd-sb off\n");
+ dev_info(&pf->pdev->dev, " fd-sb on\n");
dev_info(&pf->pdev->dev, " lldp start\n");
dev_info(&pf->pdev->dev, " lldp stop\n");
dev_info(&pf->pdev->dev, " lldp get local\n");
@@ -1828,9 +1971,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
command_write_done:
kfree(cmd_buf);
cmd_buf = NULL;
- kfree(print_buf_start);
- print_buf = NULL;
- print_buf_start = NULL;
return count;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index de255143bde6..b2380daef8c1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -68,16 +67,16 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
struct i40e_diag_reg_test_info i40e_reg_list[] = {
/* offset mask elements stride */
- {I40E_QTX_CTL(0), 0x0000FFBF, 64, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+ {I40E_QTX_CTL(0), 0x0000FFBF, 4, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
{I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
- {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
- {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
- {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+ {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+ {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+ {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
{I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
{I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
- {I40E_PFINT_LNKLSTN(0), 0x000007FF, 511, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
- {I40E_QINT_TQCTL(0), 0x000000FF, I40E_QINT_TQCTL_MAX_INDEX + 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
- {I40E_QINT_RQCTL(0), 0x000000FF, I40E_QINT_RQCTL_MAX_INDEX + 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+ {I40E_PFINT_LNKLSTN(0), 0x000007FF, 64, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+ {I40E_QINT_TQCTL(0), 0x000000FF, 64, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+ {I40E_QINT_RQCTL(0), 0x000000FF, 64, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
{I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
{ 0 }
};
@@ -119,7 +118,7 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
/* read NVM control word and if NVM valid, validate EEPROM checksum*/
ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
- if ((!ret_code) &&
+ if (!ret_code &&
((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
(0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
ret_code = i40e_validate_nvm_checksum(hw, NULL);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index 3d98277f4526..0b5911652084 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -31,10 +30,10 @@
#include "i40e_type.h"
enum i40e_lb_mode {
- I40E_LB_MODE_NONE = 0,
- I40E_LB_MODE_PHY_LOCAL,
- I40E_LB_MODE_PHY_REMOTE,
- I40E_LB_MODE_MAC_LOCAL,
+ I40E_LB_MODE_NONE = 0x0,
+ I40E_LB_MODE_PHY_LOCAL = I40E_AQ_LB_PHY_LOCAL,
+ I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE,
+ I40E_LB_MODE_MAC_LOCAL = I40E_AQ_LB_MAC_LOCAL,
};
struct i40e_diag_reg_test_info {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 1b86138fa9e1..b1d7d8c5cb9b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -109,6 +108,8 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_oversize", stats.rx_oversize),
I40E_PF_STAT("rx_jabber", stats.rx_jabber),
I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
+ I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
};
#define I40E_QUEUE_STATS_LEN(n) \
@@ -193,28 +194,48 @@ static int i40e_get_settings(struct net_device *netdev,
ecmd->supported = SUPPORTED_10000baseKR_Full;
ecmd->advertising = ADVERTISED_10000baseKR_Full;
break;
- case I40E_PHY_TYPE_10GBASE_T:
default:
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->advertising = ADVERTISED_10000baseT_Full;
+ if (i40e_is_40G_device(hw->device_id)) {
+ ecmd->supported = SUPPORTED_40000baseSR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseSR4_Full;
+ } else {
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ }
break;
}
- /* for now just say autoneg all the time */
ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
- if (hw->phy.media_type == I40E_MEDIA_TYPE_BACKPLANE) {
+ switch (hw->phy.media_type) {
+ case I40E_MEDIA_TYPE_BACKPLANE:
ecmd->supported |= SUPPORTED_Backplane;
ecmd->advertising |= ADVERTISED_Backplane;
ecmd->port = PORT_NONE;
- } else if (hw->phy.media_type == I40E_MEDIA_TYPE_BASET) {
+ break;
+ case I40E_MEDIA_TYPE_BASET:
ecmd->supported |= SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
- } else {
+ break;
+ case I40E_MEDIA_TYPE_DA:
+ case I40E_MEDIA_TYPE_CX4:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_DA;
+ break;
+ case I40E_MEDIA_TYPE_FIBER:
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
+ break;
+ case I40E_MEDIA_TYPE_UNKNOWN:
+ default:
+ ecmd->port = PORT_OTHER;
+ break;
}
ecmd->transceiver = XCVR_EXTERNAL;
@@ -256,12 +277,14 @@ static void i40e_get_pauseparam(struct net_device *netdev,
((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
- pause->rx_pause = 0;
- pause->tx_pause = 0;
- if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_RX)
+ if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
+ pause->rx_pause = 1;
+ } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
+ pause->tx_pause = 1;
+ } else if (hw->fc.current_mode == I40E_FC_FULL) {
pause->rx_pause = 1;
- if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_TX)
pause->tx_pause = 1;
+ }
}
static u32 i40e_get_msglevel(struct net_device *netdev)
@@ -329,38 +352,56 @@ static int i40e_get_eeprom(struct net_device *netdev,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
- int first_word, last_word;
- u16 i, eeprom_len;
- u16 *eeprom_buff;
- int ret_val = 0;
-
+ struct i40e_pf *pf = np->vsi->back;
+ int ret_val = 0, len;
+ u8 *eeprom_buff;
+ u16 i, sectors;
+ bool last;
+#define I40E_NVM_SECTOR_SIZE 4096
if (eeprom->len == 0)
return -EINVAL;
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_len = last_word - first_word + 1;
-
- eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
+ eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
- ret_val = i40e_read_nvm_buffer(hw, first_word, &eeprom_len,
- eeprom_buff);
- if (eeprom_len == 0) {
- kfree(eeprom_buff);
- return -EACCES;
+ ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret_val) {
+ dev_info(&pf->pdev->dev,
+ "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
+ ret_val, hw->aq.asq_last_status);
+ goto free_buff;
}
- /* Device's eeprom is always little-endian, word addressable */
- for (i = 0; i < eeprom_len; i++)
- le16_to_cpus(&eeprom_buff[i]);
+ sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
+ sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
+ len = I40E_NVM_SECTOR_SIZE;
+ last = false;
+ for (i = 0; i < sectors; i++) {
+ if (i == (sectors - 1)) {
+ len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
+ last = true;
+ }
+ ret_val = i40e_aq_read_nvm(hw, 0x0,
+ eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
+ len,
+ (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
+ last, NULL);
+ if (ret_val) {
+ dev_info(&pf->pdev->dev,
+ "read NVM failed err=%d status=0x%x\n",
+ ret_val, hw->aq.asq_last_status);
+ goto release_nvm;
+ }
+ }
- memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+release_nvm:
+ i40e_release_nvm(hw);
+ memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
+free_buff:
kfree(eeprom_buff);
-
return ret_val;
}
@@ -368,8 +409,14 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
-
- return hw->nvm.sr_size * 2;
+ u32 val;
+
+ val = (rd32(hw, I40E_GLPCI_LBARCTRL)
+ & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
+ >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
+ /* register returns value in power of 2, 64Kbyte chunks. */
+ val = (64 * 1024) * (1 << val);
+ return val;
}
static void i40e_get_drvinfo(struct net_device *netdev,
@@ -418,15 +465,19 @@ static int i40e_set_ringparam(struct net_device *netdev,
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_tx_count = clamp_t(u32, ring->tx_pending,
- I40E_MIN_NUM_DESCRIPTORS,
- I40E_MAX_NUM_DESCRIPTORS);
- new_tx_count = ALIGN(new_tx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+ if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+ ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
+ ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+ ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
+ netdev_info(netdev,
+ "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
+ ring->tx_pending, ring->rx_pending,
+ I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
+ return -EINVAL;
+ }
- new_rx_count = clamp_t(u32, ring->rx_pending,
- I40E_MIN_NUM_DESCRIPTORS,
- I40E_MAX_NUM_DESCRIPTORS);
- new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+ new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
+ new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */
if ((new_tx_count == vsi->tx_rings[0]->count) &&
@@ -699,11 +750,44 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
static int i40e_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
- return ethtool_op_get_ts_info(dev, info);
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (pf->ptp_clock)
+ info->phc_index = ptp_clock_index(pf->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+
+ return 0;
}
-static int i40e_link_test(struct i40e_pf *pf, u64 *data)
+static int i40e_link_test(struct net_device *netdev, u64 *data)
{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ netif_info(pf, hw, netdev, "link test\n");
if (i40e_get_link_status(&pf->hw))
*data = 0;
else
@@ -712,36 +796,51 @@ static int i40e_link_test(struct i40e_pf *pf, u64 *data)
return *data;
}
-static int i40e_reg_test(struct i40e_pf *pf, u64 *data)
+static int i40e_reg_test(struct net_device *netdev, u64 *data)
{
- i40e_status ret;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
- ret = i40e_diag_reg_test(&pf->hw);
- *data = ret;
+ netif_info(pf, hw, netdev, "register test\n");
+ *data = i40e_diag_reg_test(&pf->hw);
- return ret;
+ return *data;
}
-static int i40e_eeprom_test(struct i40e_pf *pf, u64 *data)
+static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
{
- i40e_status ret;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
- ret = i40e_diag_eeprom_test(&pf->hw);
- *data = ret;
+ netif_info(pf, hw, netdev, "eeprom test\n");
+ *data = i40e_diag_eeprom_test(&pf->hw);
- return ret;
+ return *data;
}
-static int i40e_intr_test(struct i40e_pf *pf, u64 *data)
+static int i40e_intr_test(struct net_device *netdev, u64 *data)
{
- *data = -ENOSYS;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ u16 swc_old = pf->sw_int_count;
+
+ netif_info(pf, hw, netdev, "interrupt test\n");
+ wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
+ (I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
+ usleep_range(1000, 2000);
+ *data = (swc_old == pf->sw_int_count);
return *data;
}
-static int i40e_loopback_test(struct i40e_pf *pf, u64 *data)
+static int i40e_loopback_test(struct net_device *netdev, u64 *data)
{
- *data = -ENOSYS;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ netif_info(pf, hw, netdev, "loopback test not implemented\n");
+ *data = 0;
return *data;
}
@@ -752,42 +851,38 @@ static void i40e_diag_test(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- set_bit(__I40E_TESTING, &pf->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
+ netif_info(pf, drv, netdev, "offline testing starting\n");
- netdev_info(netdev, "offline testing starting\n");
+ set_bit(__I40E_TESTING, &pf->state);
/* Link test performed before hardware reset
* so autoneg doesn't interfere with test result
*/
- netdev_info(netdev, "link test starting\n");
- if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+ if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- netdev_info(netdev, "register test starting\n");
- if (i40e_reg_test(pf, &data[I40E_ETH_TEST_REG]))
+ if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
- netdev_info(netdev, "eeprom test starting\n");
- if (i40e_eeprom_test(pf, &data[I40E_ETH_TEST_EEPROM]))
+ if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
- netdev_info(netdev, "interrupt test starting\n");
- if (i40e_intr_test(pf, &data[I40E_ETH_TEST_INTR]))
+ if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
- netdev_info(netdev, "loopback test starting\n");
- if (i40e_loopback_test(pf, &data[I40E_ETH_TEST_LOOPBACK]))
+ /* run reg test last, a reset is required after it */
+ if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__I40E_TESTING, &pf->state);
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
} else {
- netdev_info(netdev, "online test starting\n");
/* Online tests */
- if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+ netif_info(pf, drv, netdev, "online testing starting\n");
+
+ if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
/* Offline only tests, not run in online; pass by default */
@@ -795,16 +890,53 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_EEPROM] = 0;
data[I40E_ETH_TEST_INTR] = 0;
data[I40E_ETH_TEST_LOOPBACK] = 0;
-
- clear_bit(__I40E_TESTING, &pf->state);
}
+
+ netif_info(pf, drv, netdev, "testing finished\n");
}
static void i40e_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
- wol->supported = 0;
- wol->wolopts = 0;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 wol_nvm_bits;
+
+ /* NVM bit on means WoL disabled for the port */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+ if ((1 << hw->port) & wol_nvm_bits) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ } else {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0);
+ }
+}
+
+static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 wol_nvm_bits;
+
+ /* NVM bit on means WoL disabled for the port */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+ if (((1 << hw->port) & wol_nvm_bits))
+ return -EOPNOTSUPP;
+
+ /* only magic packet is supported */
+ if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
+ return -EOPNOTSUPP;
+
+ /* is this a new value? */
+ if (pf->wol_en != !!wol->wolopts) {
+ pf->wol_en = !!wol->wolopts;
+ device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
+ }
+
+ return 0;
}
static int i40e_nway_reset(struct net_device *netdev)
@@ -838,13 +970,13 @@ static int i40e_set_phys_id(struct net_device *netdev,
pf->led_status = i40e_led_get(hw);
return blink_freq;
case ETHTOOL_ID_ON:
- i40e_led_set(hw, 0xF);
+ i40e_led_set(hw, 0xF, false);
break;
case ETHTOOL_ID_OFF:
- i40e_led_set(hw, 0x0);
+ i40e_led_set(hw, 0x0, false);
break;
case ETHTOOL_ID_INACTIVE:
- i40e_led_set(hw, pf->led_status);
+ i40e_led_set(hw, pf->led_status, false);
break;
}
@@ -1003,6 +1135,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
ret = i40e_get_rss_hash_opts(pf, cmd);
break;
case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = 10;
ret = 0;
break;
case ETHTOOL_GRXCLSRULE:
@@ -1142,6 +1275,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
}
#define IP_HEADER_OFFSET 14
+#define I40E_UDPIP_DUMMY_PACKET_LEN 42
/**
* i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for
* a specific flow spec
@@ -1162,6 +1296,12 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
bool err = false;
int ret;
int i;
+ char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+
+ memcpy(fd_data->raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
@@ -1192,6 +1332,7 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
return err ? -EOPNOTSUPP : 0;
}
+#define I40E_TCPIP_DUMMY_PACKET_LEN 54
/**
* i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for
* a specific flow spec
@@ -1211,6 +1352,14 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
struct iphdr *ip;
bool err = false;
int ret;
+ /* Dummy packet */
+ char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x80, 0x11, 0x0, 0x72, 0, 0, 0, 0};
+
+ memcpy(fd_data->raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
@@ -1218,6 +1367,15 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
+ ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
+ tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
+
+ if (add) {
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+ pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ }
+ }
fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
ret = i40e_program_fdir_filter(fd_data, pf, add);
@@ -1232,9 +1390,6 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
fd_data->pctype, ret);
}
- ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
- tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
-
fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
ret = i40e_program_fdir_filter(fd_data, pf, add);
@@ -1268,6 +1423,7 @@ static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
return -EOPNOTSUPP;
}
+#define I40E_IP_DUMMY_PACKET_LEN 34
/**
* i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
* a specific flow spec
@@ -1287,7 +1443,11 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
bool err = false;
int ret;
int i;
+ char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ memcpy(fd_data->raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
@@ -1356,8 +1516,8 @@ static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
fd_data.flex_off = 0;
fd_data.pctype = 0;
fd_data.dest_vsi = vsi->id;
- fd_data.dest_ctl = 0;
- fd_data.fd_status = 0;
+ fd_data.dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+ fd_data.fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
fd_data.cnt_index = 0;
fd_data.fd_id = 0;
@@ -1400,6 +1560,7 @@ static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
return ret;
}
+
/**
* i40e_set_rxnfc - command to set RX flow classification rules
* @netdev: network interface device structure
@@ -1431,6 +1592,94 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
return ret;
}
+/**
+ * i40e_max_channels - get Max number of combined channels supported
+ * @vsi: vsi pointer
+ **/
+static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
+{
+ /* TODO: This code assumes DCB and FD is disabled for now. */
+ return vsi->alloc_queue_pairs;
+}
+
+/**
+ * i40e_get_channels - Get the current channels enabled and max supported etc.
+ * @netdev: network interface device structure
+ * @ch: ethtool channels structure
+ *
+ * We don't support separate tx and rx queues as channels. The other count
+ * represents how many queues are being used for control. max_combined counts
+ * how many queue pairs we can support. They may not be mapped 1 to 1 with
+ * q_vectors since we support a lot more queue pairs than q_vectors.
+ **/
+static void i40e_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ /* report maximum channels */
+ ch->max_combined = i40e_max_channels(vsi);
+
+ /* report info for other vector */
+ ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0;
+ ch->max_other = ch->other_count;
+
+ /* Note: This code assumes DCB is disabled for now. */
+ ch->combined_count = vsi->num_queue_pairs;
+}
+
+/**
+ * i40e_set_channels - Set the new channels count.
+ * @netdev: network interface device structure
+ * @ch: ethtool channels structure
+ *
+ * The new channels count may not be the same as requested by the user
+ * since it gets rounded down to a power of 2 value.
+ **/
+static int i40e_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ unsigned int count = ch->combined_count;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int new_count;
+
+ /* We do not support setting channels for any other VSI at present */
+ if (vsi->type != I40E_VSI_MAIN)
+ return -EINVAL;
+
+ /* verify they are not requesting separate vectors */
+ if (!count || ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ /* verify other_count has not changed */
+ if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0))
+ return -EINVAL;
+
+ /* verify the number of channels does not exceed hardware limits */
+ if (count > i40e_max_channels(vsi))
+ return -EINVAL;
+
+ /* update feature limits from largest to smallest supported values */
+ /* TODO: Flow director limit, DCB etc */
+
+ /* cap RSS limit */
+ if (count > pf->rss_size_max)
+ count = pf->rss_size_max;
+
+ /* use rss_reconfig to rebuild with new queue count and update traffic
+ * class queue mapping
+ */
+ new_count = i40e_reconfig_rss_queues(pf, count);
+ if (new_count > 0)
+ return 0;
+ else
+ return -EINVAL;
+}
+
static const struct ethtool_ops i40e_ethtool_ops = {
.get_settings = i40e_get_settings,
.get_drvinfo = i40e_get_drvinfo,
@@ -1439,6 +1688,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.nway_reset = i40e_nway_reset,
.get_link = ethtool_op_get_link,
.get_wol = i40e_get_wol,
+ .set_wol = i40e_set_wol,
.get_eeprom_len = i40e_get_eeprom_len,
.get_eeprom = i40e_get_eeprom,
.get_ringparam = i40e_get_ringparam,
@@ -1455,6 +1705,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_ethtool_stats = i40e_get_ethtool_stats,
.get_coalesce = i40e_get_coalesce,
.set_coalesce = i40e_set_coalesce,
+ .get_channels = i40e_get_channels,
+ .set_channels = i40e_set_channels,
.get_ts_info = i40e_get_ts_info,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 901804af8b0e..bf2d4cc5b569 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -47,10 +46,10 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
u64 direct_mode_sz)
{
enum i40e_memory_type mem_type __attribute__((unused));
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
struct i40e_dma_mem mem;
+ i40e_status ret_code;
u64 alloc_len;
if (NULL == hmc_info->sd_table.sd_entry) {
@@ -90,11 +89,9 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
sd_entry->u.pd_table.pd_entry =
(struct i40e_hmc_pd_entry *)
sd_entry->u.pd_table.pd_entry_virt_mem.va;
- memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem,
- sizeof(struct i40e_dma_mem));
+ sd_entry->u.pd_table.pd_page_addr = mem;
} else {
- memcpy(&sd_entry->u.bp.addr, &mem,
- sizeof(struct i40e_dma_mem));
+ sd_entry->u.bp.addr = mem;
sd_entry->u.bp.sd_pd_index = sd_index;
}
/* initialize the sd entry */
@@ -165,7 +162,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
if (ret_code)
goto exit;
- memcpy(&pd_entry->bp.addr, &mem, sizeof(struct i40e_dma_mem));
+ pd_entry->bp.addr = mem;
pd_entry->bp.sd_pd_index = pd_index;
pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
/* Set page address and valid bit */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index aacd42a261e9..0cd4701234f8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -117,7 +116,6 @@ struct i40e_hmc_info {
* @hw: pointer to our hw struct
* @pa: pointer to physical address
* @sd_index: segment descriptor index
- * @hmc_fn_id: hmc function id
* @type: if sd entry is direct or paged
**/
#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
@@ -139,7 +137,6 @@ struct i40e_hmc_info {
* I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
* @hw: pointer to our hw struct
* @sd_index: segment descriptor index
- * @hmc_fn_id: hmc function id
* @type: if sd entry is direct or paged
**/
#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
@@ -160,7 +157,6 @@ struct i40e_hmc_info {
* @hw: pointer to our hw struct
* @sd_idx: segment descriptor index
* @pd_idx: page descriptor index
- * @hmc_fn_id: hmc function id
**/
#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
wr32((hw), I40E_PFHMC_PDINV, \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index a695b91c9c79..d5d98fe2691d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -486,8 +485,7 @@ i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
/* Make one big object, a single SD */
info.count = 1;
ret_code = i40e_create_lan_hmc_object(hw, &info);
- if ((ret_code) &&
- (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
+ if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
goto try_type_paged;
else if (ret_code)
goto configure_lan_hmc_out;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index 00ff35006077..341de925a298 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -113,8 +112,8 @@ enum i40e_hmc_lan_object_size {
#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
#define I40E_HMC_OBJ_SIZE_TXQ 128
#define I40E_HMC_OBJ_SIZE_RXQ 32
-#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128
-#define I40E_HMC_OBJ_SIZE_FCOE_FILT 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
enum i40e_hmc_lan_rsrc_type {
I40E_HMC_LAN_FULL = 0,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 12b0932204ba..b901371ca361 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -27,6 +26,9 @@
/* Local includes */
#include "i40e.h"
+#ifdef CONFIG_I40E_VXLAN
+#include <net/vxlan.h>
+#endif
const char i40e_driver_name[] = "i40e";
static const char i40e_driver_string[] =
@@ -36,22 +38,24 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 11
+#define DRV_VERSION_BUILD 30
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
const char i40e_driver_version_str[] = DRV_VERSION;
-static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation.";
+static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
/* a bit of forward declarations */
static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
static void i40e_handle_reset_warning(struct i40e_pf *pf);
static int i40e_add_vsi(struct i40e_vsi *vsi);
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
-static int i40e_setup_pf_switch(struct i40e_pf *pf);
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+static void i40e_fdir_sb_setup(struct i40e_pf *pf);
+static int i40e_veb_get_bw_info(struct i40e_veb *veb);
/* i40e_pci_tbl - PCI Device ID Table
*
@@ -61,16 +65,16 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
* Class, Class Mask, private data (not used) }
*/
static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
- {PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X710), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_D), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
/* required last entry */
{0, }
};
@@ -354,6 +358,9 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
int i;
+ if (test_bit(__I40E_DOWN, &vsi->state))
+ return stats;
+
if (!vsi->tx_rings)
return stats;
@@ -416,7 +423,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
- if (vsi->rx_rings)
+ if (vsi->rx_rings && vsi->rx_rings[0]) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
memset(&vsi->rx_rings[i]->stats, 0 ,
sizeof(vsi->rx_rings[i]->stats));
@@ -427,6 +434,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->tx_rings[i]->tx_stats, 0,
sizeof(vsi->tx_rings[i]->tx_stats));
}
+ }
vsi->stat_offsets_loaded = false;
}
@@ -461,7 +469,7 @@ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
{
u64 new_data;
- if (hw->device_id == I40E_QEMU_DEVICE_ID) {
+ if (hw->device_id == I40E_DEV_ID_QEMU) {
new_data = rd32(hw, loreg);
new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
} else {
@@ -577,10 +585,11 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
veb->stat_offsets_loaded,
&oes->tx_discards, &es->tx_discards);
- i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
- veb->stat_offsets_loaded,
- &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
-
+ if (hw->revision_id > 0)
+ i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_unknown_protocol,
+ &es->rx_unknown_protocol);
i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
veb->stat_offsets_loaded,
&oes->rx_bytes, &es->rx_bytes);
@@ -778,8 +787,8 @@ void i40e_update_stats(struct i40e_vsi *vsi)
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
rx_b += bytes;
rx_p += packets;
- rx_buf += p->rx_stats.alloc_rx_buff_failed;
- rx_page += p->rx_stats.alloc_rx_page_failed;
+ rx_buf += p->rx_stats.alloc_buff_failed;
+ rx_page += p->rx_stats.alloc_page_failed;
}
rcu_read_unlock();
vsi->tx_restart = tx_restart;
@@ -1065,7 +1074,7 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
if (!i40e_find_filter(vsi, macaddr, f->vlan,
is_vf, is_netdev)) {
if (!i40e_add_filter(vsi, macaddr, f->vlan,
- is_vf, is_netdev))
+ is_vf, is_netdev))
return NULL;
}
}
@@ -1207,6 +1216,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
return 0;
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return -EADDRNOTAVAIL;
+
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
ret = i40e_aq_mac_address_write(&vsi->back->hw,
@@ -1260,6 +1273,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
u8 offset;
u16 qmap;
int i;
+ u16 num_tc_qps = 0;
sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
offset = 0;
@@ -1281,6 +1295,9 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
+ /* Number of queues per enabled TC */
+ num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc);
+ num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
/* Setup queue offset/count for all TCs for given VSI */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
@@ -1288,30 +1305,25 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
int pow, num_qps;
- vsi->tc_config.tc_info[i].qoffset = offset;
switch (vsi->type) {
case I40E_VSI_MAIN:
- if (i == 0)
- qcount = pf->rss_size;
- else
- qcount = pf->num_tc_qps;
- vsi->tc_config.tc_info[i].qcount = qcount;
+ qcount = min_t(int, pf->rss_size, num_tc_qps);
break;
case I40E_VSI_FDIR:
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
default:
- qcount = vsi->alloc_queue_pairs;
- vsi->tc_config.tc_info[i].qcount = qcount;
+ qcount = num_tc_qps;
WARN_ON(i != 0);
break;
}
+ vsi->tc_config.tc_info[i].qoffset = offset;
+ vsi->tc_config.tc_info[i].qcount = qcount;
/* find the power-of-2 of the number of queue pairs */
- num_qps = vsi->tc_config.tc_info[i].qcount;
+ num_qps = qcount;
pow = 0;
- while (num_qps &&
- ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) {
+ while (num_qps && ((1 << pow) < qcount)) {
pow++;
num_qps >>= 1;
}
@@ -1321,7 +1333,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
(offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
- offset += vsi->tc_config.tc_info[i].qcount;
+ offset += qcount;
} else {
/* TC is not enabled so set the offset to
* default queue and allocate one queue
@@ -1497,11 +1509,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cpu_to_le16((u16)(f->vlan ==
I40E_VLAN_ANY ? 0 : f->vlan));
- /* vlan0 as wild card to allow packets from all vlans */
- if (f->vlan == I40E_VLAN_ANY ||
- (vsi->netdev && !(vsi->netdev->features &
- NETIF_F_HW_VLAN_CTAG_FILTER)))
- cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
del_list[num_del].flags = cmd_flags;
num_del++;
@@ -1567,12 +1574,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
add_list[num_add].queue_number = 0;
cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
-
- /* vlan0 as wild card to allow packets from all vlans */
- if (f->vlan == I40E_VLAN_ANY || (vsi->netdev &&
- !(vsi->netdev->features &
- NETIF_F_HW_VLAN_CTAG_FILTER)))
- cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
add_list[num_add].flags = cpu_to_le16(cmd_flags);
num_add++;
@@ -1638,6 +1639,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"set uni promisc failed, err %d, aq_err %d\n",
aq_ret, pf->hw.aq.asq_last_status);
+ aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+ vsi->seid,
+ cur_promisc, NULL);
+ if (aq_ret)
+ dev_info(&pf->pdev->dev,
+ "set brdcast promisc failed, err %d, aq_err %d\n",
+ aq_ret, pf->hw.aq.asq_last_status);
}
clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1690,6 +1698,27 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * i40e_ioctl - Access the hwtstamp interface
+ * @netdev: network interface device structure
+ * @ifr: interface request data
+ * @cmd: ioctl command
+ **/
+int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return i40e_ptp_get_ts_config(pf, ifr);
+ case SIOCSHWTSTAMP:
+ return i40e_ptp_set_ts_config(pf, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
* i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
* @vsi: the vsi being adjusted
**/
@@ -1771,7 +1800,6 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
{
struct i40e_mac_filter *f, *add_f;
bool is_netdev, is_vf;
- int ret;
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(vsi->netdev);
@@ -1797,13 +1825,6 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
- ret = i40e_sync_vsi_filters(vsi);
- if (ret) {
- dev_info(&vsi->back->pdev->dev,
- "Could not sync filters for vid %d\n", vid);
- return ret;
- }
-
/* Now if we add a vlan tag, make sure to check if it is the first
* tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
* with 0, so we now accept untagged and specified tagged traffic
@@ -1824,7 +1845,10 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
return -ENOMEM;
}
}
+ }
+ /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
+ if (vid > 0 && !vsi->info.pvid) {
list_for_each_entry(f, &vsi->mac_filter_list, list) {
if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
is_vf, is_netdev)) {
@@ -1840,10 +1864,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
}
- ret = i40e_sync_vsi_filters(vsi);
}
- return ret;
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return 0;
+
+ return i40e_sync_vsi_filters(vsi);
}
/**
@@ -1859,7 +1886,6 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
struct i40e_mac_filter *f, *add_f;
bool is_vf, is_netdev;
int filter_count = 0;
- int ret;
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(netdev);
@@ -1870,12 +1896,6 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
list_for_each_entry(f, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
- ret = i40e_sync_vsi_filters(vsi);
- if (ret) {
- dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
- return ret;
- }
-
/* go through all the filters for this VSI and if there is only
* vid == 0 it means there are no other filters, so vid 0 must
* be replaced with -1. This signifies that we should from now
@@ -1918,6 +1938,10 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return 0;
+
return i40e_sync_vsi_filters(vsi);
}
@@ -2008,8 +2032,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
vsi->info.pvid = cpu_to_le16(vid);
- vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
- vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
+ I40E_AQ_VSI_PVLAN_INSERT_PVID |
+ I40E_AQ_VSI_PVLAN_EMOD_STR;
ctxt.seid = vsi->seid;
memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
@@ -2032,8 +2057,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
**/
void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
{
+ i40e_vlan_stripping_disable(vsi);
+
vsi->info.pvid = 0;
- i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
}
/**
@@ -2066,8 +2092,11 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
{
int i;
+ if (!vsi->tx_rings)
+ return;
+
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->tx_rings[i]->desc)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
i40e_free_tx_resources(vsi->tx_rings[i]);
}
@@ -2100,8 +2129,11 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
{
int i;
+ if (!vsi->rx_rings)
+ return;
+
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->rx_rings[i]->desc)
+ if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
i40e_free_rx_resources(vsi->rx_rings[i]);
}
@@ -2121,7 +2153,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
u32 qtx_ctl = 0;
/* some ATR related tx ring init */
- if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) {
+ if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
ring->atr_sample_rate = vsi->back->atr_sample_rate;
ring->atr_count = 0;
} else {
@@ -2130,6 +2162,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
/* initialize XPS */
if (ring->q_vector && ring->netdev &&
+ vsi->tc_config.numtc <= 1 &&
!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
netif_set_xps_queue(ring->netdev,
&ring->q_vector->affinity_mask,
@@ -2141,8 +2174,9 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
tx_ctx.new_context = 1;
tx_ctx.base = (ring->dma / 128);
tx_ctx.qlen = ring->count;
- tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED));
+ tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED));
+ tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
/* As part of VSI creation/update, FW allocates certain
* Tx arbitration queue sets for each TC enabled for
@@ -2176,7 +2210,10 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
}
/* Now associate this queue with this PCI function */
- qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+ if (vsi->type == I40E_VSI_VMDQ2)
+ qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
+ else
+ qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
I40E_QTX_CTL_PF_INDX_MASK);
wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
@@ -2243,7 +2280,10 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.tphwdesc_ena = 1;
rx_ctx.tphdata_ena = 1;
rx_ctx.tphhead_ena = 1;
- rx_ctx.lrxqthresh = 2;
+ if (hw->revision_id == 0)
+ rx_ctx.lrxqthresh = 0;
+ else
+ rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
rx_ctx.showiv = 1;
@@ -2477,6 +2517,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
I40E_PFINT_ICR0_ENA_GRST_MASK |
I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
I40E_PFINT_ICR0_ENA_GPIO_MASK |
+ I40E_PFINT_ICR0_ENA_TIMESYNC_MASK |
I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
I40E_PFINT_ICR0_ENA_VFLR_MASK |
@@ -2485,8 +2526,8 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
wr32(hw, I40E_PFINT_ICR0_ENA, val);
/* SW_ITR_IDX = 0, but don't change INTENA */
- wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
- I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
+ wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
/* OTHER_ITR_IDX = 0 */
wr32(hw, I40E_PFINT_STAT_CTL0, 0);
@@ -2532,6 +2573,19 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
}
/**
+ * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
+ * @pf: board private structure
+ **/
+void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ i40e_flush(hw);
+}
+
+/**
* i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
* @pf: board private structure
**/
@@ -2584,23 +2638,6 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
}
/**
- * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings
- * @irq: interrupt number
- * @data: pointer to a q_vector
- **/
-static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
-{
- struct i40e_q_vector *q_vector = data;
-
- if (!q_vector->tx.ring && !q_vector->rx.ring)
- return IRQ_HANDLED;
-
- pr_info("fdir ring cleaning needed\n");
-
- return IRQ_HANDLED;
-}
-
-/**
* i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
* @vsi: the VSI being configured
* @basename: name for the vector
@@ -2740,20 +2777,21 @@ static irqreturn_t i40e_intr(int irq, void *data)
{
struct i40e_pf *pf = (struct i40e_pf *)data;
struct i40e_hw *hw = &pf->hw;
+ irqreturn_t ret = IRQ_NONE;
u32 icr0, icr0_remaining;
u32 val, ena_mask;
icr0 = rd32(hw, I40E_PFINT_ICR0);
-
- val = rd32(hw, I40E_PFINT_DYN_CTL0);
- val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
- wr32(hw, I40E_PFINT_DYN_CTL0, val);
+ ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
/* if sharing a legacy IRQ, we might get called w/o an intr pending */
if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
- return IRQ_NONE;
+ goto enable_intr;
- ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+ /* if interrupt but no bits showing, must be SWINT */
+ if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
+ (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
+ pf->sw_int_count++;
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
@@ -2793,14 +2831,31 @@ static irqreturn_t i40e_intr(int irq, void *data)
val = rd32(hw, I40E_GLGEN_RSTAT);
val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
>> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
- if (val & I40E_RESET_CORER)
+ if (val == I40E_RESET_CORER)
pf->corer_count++;
- else if (val & I40E_RESET_GLOBR)
+ else if (val == I40E_RESET_GLOBR)
pf->globr_count++;
- else if (val & I40E_RESET_EMPR)
+ else if (val == I40E_RESET_EMPR)
pf->empr_count++;
}
+ if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
+ icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
+ dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
+ u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
+
+ if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ i40e_ptp_tx_hwtstamp(pf);
+ prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK;
+ }
+
+ wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat);
+ }
+
/* If a critical error is pending we have no choice but to reset the
* device.
* Report and mask out any remaining unexpected interrupts.
@@ -2809,22 +2864,19 @@ static irqreturn_t i40e_intr(int irq, void *data)
if (icr0_remaining) {
dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
icr0_remaining);
- if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) ||
- (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
+ if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
- if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
- dev_info(&pf->pdev->dev, "HMC error interrupt\n");
- } else {
- dev_info(&pf->pdev->dev, "device will be reset\n");
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
- i40e_service_event_schedule(pf);
- }
+ dev_info(&pf->pdev->dev, "device will be reset\n");
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ i40e_service_event_schedule(pf);
}
ena_mask &= ~icr0_remaining;
}
+ ret = IRQ_HANDLED;
+enable_intr:
/* re-enable interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
if (!test_bit(__I40E_DOWN, &pf->state)) {
@@ -2832,6 +2884,94 @@ static irqreturn_t i40e_intr(int irq, void *data)
i40e_irq_dynamic_enable_icr0(pf);
}
+ return ret;
+}
+
+/**
+ * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring: tx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
+{
+ struct i40e_vsi *vsi = tx_ring->vsi;
+ u16 i = tx_ring->next_to_clean;
+ struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_desc;
+
+ tx_buf = &tx_ring->tx_bi[i];
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+
+ dma_unmap_len_set(tx_buf, len, 0);
+
+
+ /* move to the next desc and buffer to clean */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+
+ if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ i40e_irq_dynamic_enable(vsi,
+ tx_ring->q_vector->v_idx + vsi->base_vector);
+ }
+ return budget > 0;
+}
+
+/**
+ * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
+{
+ struct i40e_q_vector *q_vector = data;
+ struct i40e_vsi *vsi;
+
+ if (!q_vector->tx.ring)
+ return IRQ_HANDLED;
+
+ vsi = q_vector->tx.ring->vsi;
+ i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
+
return IRQ_HANDLED;
}
@@ -2974,28 +3114,20 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
} while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
- if (enable) {
- /* is STAT set ? */
- if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
- dev_info(&pf->pdev->dev,
- "Tx %d already enabled\n", i);
- continue;
- }
- } else {
- /* is !STAT set ? */
- if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
- dev_info(&pf->pdev->dev,
- "Tx %d already disabled\n", i);
- continue;
- }
- }
+ /* Skip if the queue is already in the requested state */
+ if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ continue;
+ if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ continue;
/* turn on/off the queue */
- if (enable)
+ if (enable) {
+ wr32(hw, I40E_QTX_HEAD(pf_q), 0);
tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
I40E_QTX_ENA_QENA_STAT_MASK;
- else
+ } else {
tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ }
wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
@@ -3019,6 +3151,9 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
}
}
+ if (hw->revision_id == 0)
+ mdelay(50);
+
return 0;
}
@@ -3091,9 +3226,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
* @vsi: the VSI being configured
* @enable: start or stop the rings
**/
-static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
+int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
{
- int ret;
+ int ret = 0;
/* do rx first for enable and last for disable */
if (request) {
@@ -3102,10 +3237,9 @@ static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
return ret;
ret = i40e_vsi_control_tx(vsi, request);
} else {
- ret = i40e_vsi_control_tx(vsi, request);
- if (ret)
- return ret;
- ret = i40e_vsi_control_rx(vsi, request);
+ /* Ignore return value, we need to shutdown whatever we can */
+ i40e_vsi_control_tx(vsi, request);
+ i40e_vsi_control_rx(vsi, request);
}
return ret;
@@ -3131,7 +3265,8 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
u16 vector = i + base;
/* free only the irqs that were actually requested */
- if (vsi->q_vectors[i]->num_ringpairs == 0)
+ if (!vsi->q_vectors[i] ||
+ !vsi->q_vectors[i]->num_ringpairs)
continue;
/* clear the affinity_mask in the IRQ descriptor */
@@ -3543,7 +3678,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
/* Get the VSI level BW configuration per TC */
aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
- NULL);
+ NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
"couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
@@ -3754,6 +3889,149 @@ out:
}
/**
+ * i40e_veb_config_tc - Configure TCs for given VEB
+ * @veb: given VEB
+ * @enabled_tc: TC bitmap
+ *
+ * Configures given TC bitmap for VEB (switching) element
+ **/
+int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
+{
+ struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
+ struct i40e_pf *pf = veb->pf;
+ int ret = 0;
+ int i;
+
+ /* No TCs or already enabled TCs just return */
+ if (!enabled_tc || veb->enabled_tc == enabled_tc)
+ return ret;
+
+ bw_data.tc_valid_bits = enabled_tc;
+ /* bw_data.absolute_credits is not set (relative) */
+
+ /* Enable ETS TCs with equal BW Share for now */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & (1 << i))
+ bw_data.tc_bw_share_credits[i] = 1;
+ }
+
+ ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
+ &bw_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "veb bw config failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ goto out;
+ }
+
+ /* Update the BW information */
+ ret = i40e_veb_get_bw_info(veb);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed getting veb bw config, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ }
+
+out:
+ return ret;
+}
+
+#ifdef CONFIG_I40E_DCB
+/**
+ * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
+ * @pf: PF struct
+ *
+ * Reconfigure VEB/VSIs on a given PF; it is assumed that
+ * the caller would've quiesce all the VSIs before calling
+ * this function
+ **/
+static void i40e_dcb_reconfigure(struct i40e_pf *pf)
+{
+ u8 tc_map = 0;
+ int ret;
+ u8 v;
+
+ /* Enable the TCs available on PF to all VEBs */
+ tc_map = i40e_pf_get_tc_map(pf);
+ for (v = 0; v < I40E_MAX_VEB; v++) {
+ if (!pf->veb[v])
+ continue;
+ ret = i40e_veb_config_tc(pf->veb[v], tc_map);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed configuring TC for VEB seid=%d\n",
+ pf->veb[v]->seid);
+ /* Will try to configure as many components */
+ }
+ }
+
+ /* Update each VSI */
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (!pf->vsi[v])
+ continue;
+
+ /* - Enable all TCs for the LAN VSI
+ * - For all others keep them at TC0 for now
+ */
+ if (v == pf->lan_vsi)
+ tc_map = i40e_pf_get_tc_map(pf);
+ else
+ tc_map = i40e_pf_get_default_tc(pf);
+
+ ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed configuring TC for VSI seid=%d\n",
+ pf->vsi[v]->seid);
+ /* Will try to configure as many components */
+ } else {
+ if (pf->vsi[v]->netdev)
+ i40e_dcbnl_set_all(pf->vsi[v]);
+ }
+ }
+}
+
+/**
+ * i40e_init_pf_dcb - Initialize DCB configuration
+ * @pf: PF being configured
+ *
+ * Query the current DCB configuration and cache it
+ * in the hardware structure
+ **/
+static int i40e_init_pf_dcb(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int err = 0;
+
+ if (pf->hw.func_caps.npar_enable)
+ goto out;
+
+ /* Get the initial DCB configuration */
+ err = i40e_init_dcb(hw);
+ if (!err) {
+ /* Device/Function is not DCBX capable */
+ if ((!hw->func_caps.dcb) ||
+ (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
+ dev_info(&pf->pdev->dev,
+ "DCBX offload is not supported or is disabled for this PF.\n");
+
+ if (pf->flags & I40E_FLAG_MFP_ENABLED)
+ goto out;
+
+ } else {
+ /* When status is not DISABLED then DCBX in FW */
+ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
+ DCB_CAP_DCBX_VER_IEEE;
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
+ }
+ }
+
+out:
+ return err;
+}
+#endif /* CONFIG_I40E_DCB */
+
+/**
* i40e_up_complete - Finish the last steps of bringing up a connection
* @vsi: the VSI being configured
**/
@@ -3957,22 +4235,28 @@ static int i40e_open(struct net_device *netdev)
if (err)
goto err_setup_rx;
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(netdev, vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(netdev, vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
err = i40e_up_complete(vsi);
if (err)
goto err_up_complete;
- if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
- err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL);
- if (err)
- netdev_info(netdev,
- "couldn't set broadcast err %d aq_err %d\n",
- err, pf->hw.aq.asq_last_status);
- }
+#ifdef CONFIG_I40E_VXLAN
+ vxlan_get_rx_port(netdev);
+#endif
return 0;
err_up_complete:
i40e_down(vsi);
+err_set_queues:
i40e_vsi_free_irq(vsi);
err_setup_rx:
i40e_vsi_free_rx_resources(vsi);
@@ -4054,6 +4338,24 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
i40e_flush(&pf->hw);
+ } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
+
+ /* Request a Firmware Reset
+ *
+ * Same as Global reset, plus restarting the
+ * embedded firmware engine.
+ */
+ /* enable EMP Reset */
+ val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
+ val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
+
+ /* force the reset */
+ val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+ val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+ i40e_flush(&pf->hw);
+
} else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
/* Request a PF Reset
@@ -4091,6 +4393,144 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
}
}
+#ifdef CONFIG_I40E_DCB
+/**
+ * i40e_dcb_need_reconfig - Check if DCB needs reconfig
+ * @pf: board private structure
+ * @old_cfg: current DCB config
+ * @new_cfg: new DCB config
+ **/
+bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
+ struct i40e_dcbx_config *old_cfg,
+ struct i40e_dcbx_config *new_cfg)
+{
+ bool need_reconfig = false;
+
+ /* Check if ETS configuration has changed */
+ if (memcmp(&new_cfg->etscfg,
+ &old_cfg->etscfg,
+ sizeof(new_cfg->etscfg))) {
+ /* If Priority Table has changed reconfig is needed */
+ if (memcmp(&new_cfg->etscfg.prioritytable,
+ &old_cfg->etscfg.prioritytable,
+ sizeof(new_cfg->etscfg.prioritytable))) {
+ need_reconfig = true;
+ dev_info(&pf->pdev->dev, "ETS UP2TC changed.\n");
+ }
+
+ if (memcmp(&new_cfg->etscfg.tcbwtable,
+ &old_cfg->etscfg.tcbwtable,
+ sizeof(new_cfg->etscfg.tcbwtable)))
+ dev_info(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+
+ if (memcmp(&new_cfg->etscfg.tsatable,
+ &old_cfg->etscfg.tsatable,
+ sizeof(new_cfg->etscfg.tsatable)))
+ dev_info(&pf->pdev->dev, "ETS TSA Table changed.\n");
+ }
+
+ /* Check if PFC configuration has changed */
+ if (memcmp(&new_cfg->pfc,
+ &old_cfg->pfc,
+ sizeof(new_cfg->pfc))) {
+ need_reconfig = true;
+ dev_info(&pf->pdev->dev, "PFC config change detected.\n");
+ }
+
+ /* Check if APP Table has changed */
+ if (memcmp(&new_cfg->app,
+ &old_cfg->app,
+ sizeof(new_cfg->app))) {
+ need_reconfig = true;
+ dev_info(&pf->pdev->dev, "APP Table change detected.\n");
+ }
+
+ return need_reconfig;
+}
+
+/**
+ * i40e_handle_lldp_event - Handle LLDP Change MIB event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ **/
+static int i40e_handle_lldp_event(struct i40e_pf *pf,
+ struct i40e_arq_event_info *e)
+{
+ struct i40e_aqc_lldp_get_mib *mib =
+ (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
+ struct i40e_dcbx_config tmp_dcbx_cfg;
+ bool need_reconfig = false;
+ int ret = 0;
+ u8 type;
+
+ /* Ignore if event is not for Nearest Bridge */
+ type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
+ & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
+ return ret;
+
+ /* Check MIB Type and return if event for Remote MIB update */
+ type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+ if (type == I40E_AQ_LLDP_MIB_REMOTE) {
+ /* Update the remote cached instance and return */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ goto exit;
+ }
+
+ /* Convert/store the DCBX data from LLDPDU temporarily */
+ memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
+ ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg);
+ if (ret) {
+ /* Error in LLDPDU parsing return */
+ dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n");
+ goto exit;
+ }
+
+ /* No change detected in DCBX configs */
+ if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
+ dev_info(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
+ goto exit;
+ }
+
+ need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg);
+
+ i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg);
+
+ /* Overwrite the new configuration */
+ *dcbx_cfg = tmp_dcbx_cfg;
+
+ if (!need_reconfig)
+ goto exit;
+
+ /* Reconfiguration needed quiesce all VSIs */
+ i40e_pf_quiesce_all_vsi(pf);
+
+ /* Changes in configuration update VEB/VSI */
+ i40e_dcb_reconfigure(pf);
+
+ i40e_pf_unquiesce_all_vsi(pf);
+exit:
+ return ret;
+}
+#endif /* CONFIG_I40E_DCB */
+
+/**
+ * i40e_do_reset_safe - Protected reset path for userland calls.
+ * @pf: board private structure
+ * @reset_flags: which reset is requested
+ *
+ **/
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
+{
+ rtnl_lock();
+ i40e_do_reset(pf, reset_flags);
+ rtnl_unlock();
+}
+
/**
* i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
* @pf: board private structure
@@ -4245,6 +4685,9 @@ static void i40e_link_event(struct i40e_pf *pf)
if (pf->vf)
i40e_vc_notify_link_state(pf);
+
+ if (pf->flags & I40E_FLAG_PTP)
+ i40e_ptp_set_increment(pf);
}
/**
@@ -4326,6 +4769,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
for (i = 0; i < I40E_MAX_VEB; i++)
if (pf->veb[i])
i40e_update_veb_stats(pf->veb[i]);
+
+ i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
}
/**
@@ -4336,6 +4781,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
{
u32 reset_flags = 0;
+ rtnl_lock();
if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
reset_flags |= (1 << __I40E_REINIT_REQUESTED);
clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
@@ -4358,7 +4804,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
*/
if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
i40e_handle_reset_warning(pf);
- return;
+ goto unlock;
}
/* If we're already down or resetting, just bail */
@@ -4366,6 +4812,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
!test_bit(__I40E_DOWN, &pf->state) &&
!test_bit(__I40E_CONFIG_BUSY, &pf->state))
i40e_do_reset(pf, reset_flags);
+
+unlock:
+ rtnl_unlock();
}
/**
@@ -4429,6 +4878,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
return;
do {
+ event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
ret = i40e_clean_arq_element(hw, &event, &pending);
if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
dev_info(&pf->pdev->dev, "No ARQ event found\n");
@@ -4454,15 +4904,23 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
break;
case i40e_aqc_opc_lldp_update_mib:
dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
+#ifdef CONFIG_I40E_DCB
+ rtnl_lock();
+ ret = i40e_handle_lldp_event(pf, &event);
+ rtnl_unlock();
+#endif /* CONFIG_I40E_DCB */
break;
case i40e_aqc_opc_event_lan_overflow:
dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
i40e_handle_lan_overflow_event(pf, &event);
break;
+ case i40e_aqc_opc_send_msg_to_peer:
+ dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
+ break;
default:
dev_info(&pf->pdev->dev,
- "ARQ Error: Unknown event %d received\n",
- event.desc.opcode);
+ "ARQ Error: Unknown event 0x%04x received\n",
+ opcode);
break;
}
} while (pending && (i++ < pf->adminq_work_limit));
@@ -4592,6 +5050,9 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
}
} while (err);
+ /* increment MSI-X count because current FW skips one */
+ pf->hw.func_caps.num_msix_vectors++;
+
if (pf->hw.debug_mask & I40E_DEBUG_USER)
dev_info(&pf->pdev->dev,
"pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
@@ -4603,57 +5064,89 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
pf->hw.func_caps.num_tx_qp,
pf->hw.func_caps.num_vsis);
+#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
+ + pf->hw.func_caps.num_vfs)
+ if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
+ dev_info(&pf->pdev->dev,
+ "got num_vsis %d, setting num_vsis to %d\n",
+ pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
+ pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
+ }
+
return 0;
}
+static int i40e_vsi_clear(struct i40e_vsi *vsi);
+
/**
- * i40e_fdir_setup - initialize the Flow Director resources
+ * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
* @pf: board private structure
**/
-static void i40e_fdir_setup(struct i40e_pf *pf)
+static void i40e_fdir_sb_setup(struct i40e_pf *pf)
{
struct i40e_vsi *vsi;
bool new_vsi = false;
int err, i;
- if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED)))
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
- pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
-
- /* find existing or make new FDIR VSI */
+ /* find existing VSI and see if it needs configuring */
vsi = NULL;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
vsi = pf->vsi[i];
+ break;
+ }
+ }
+
+ /* create a new VSI if none exists */
if (!vsi) {
- vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0);
+ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
+ pf->vsi[pf->lan_vsi]->seid, 0);
if (!vsi) {
dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
- pf->flags &= ~I40E_FLAG_FDIR_ENABLED;
- return;
+ goto err_vsi;
}
new_vsi = true;
}
- WARN_ON(vsi->base_queue != I40E_FDIR_RING);
- i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings);
+ i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
err = i40e_vsi_setup_tx_resources(vsi);
- if (!err)
- err = i40e_vsi_setup_rx_resources(vsi);
- if (!err)
- err = i40e_vsi_configure(vsi);
- if (!err && new_vsi) {
+ if (err)
+ goto err_setup_tx;
+ err = i40e_vsi_setup_rx_resources(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ if (new_vsi) {
char int_name[IFNAMSIZ + 9];
+ err = i40e_vsi_configure(vsi);
+ if (err)
+ goto err_setup_rx;
snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
dev_driver_string(&pf->pdev->dev));
err = i40e_vsi_request_irq(vsi, int_name);
- }
- if (!err)
+ if (err)
+ goto err_setup_rx;
err = i40e_up_complete(vsi);
+ if (err)
+ goto err_up_complete;
+ }
clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
+ return;
+
+err_up_complete:
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+err_setup_rx:
+ i40e_vsi_free_rx_resources(vsi);
+err_setup_tx:
+ i40e_vsi_free_tx_resources(vsi);
+err_vsi:
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ i40e_vsi_clear(vsi);
}
/**
@@ -4673,26 +5166,25 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
}
/**
- * i40e_handle_reset_warning - prep for the core to reset
+ * i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
*
- * Close up the VFs and other things in prep for a Core Reset,
- * then get ready to rebuild the world.
- **/
-static void i40e_handle_reset_warning(struct i40e_pf *pf)
+ * Close up the VFs and other things in prep for pf Reset.
+ **/
+static int i40e_prep_for_reset(struct i40e_pf *pf)
{
- struct i40e_driver_version dv;
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
u32 v;
clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
- return;
+ return 0;
dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
- i40e_vc_notify_reset(pf);
+ if (i40e_check_asq_alive(hw))
+ i40e_vc_notify_reset(pf);
/* quiesce the VSIs and their queues that are not already DOWN */
i40e_pf_quiesce_all_vsi(pf);
@@ -4704,6 +5196,27 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
i40e_shutdown_adminq(&pf->hw);
+ /* call shutdown HMC */
+ ret = i40e_shutdown_lan_hmc(hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+ }
+ return ret;
+}
+
+/**
+ * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ **/
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
+{
+ struct i40e_driver_version dv;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ u32 v;
+
/* Now we wait for GRST to settle out.
* We don't have to delete the VEBs or VSIs from the hw switch
* because the reset will make them disappear.
@@ -4731,13 +5244,6 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
goto end_core_reset;
}
- /* call shutdown HMC */
- ret = i40e_shutdown_lan_hmc(hw);
- if (ret) {
- dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
- goto end_core_reset;
- }
-
ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp,
pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
@@ -4751,8 +5257,16 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
goto end_core_reset;
}
+#ifdef CONFIG_I40E_DCB
+ ret = i40e_init_pf_dcb(pf);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret);
+ goto end_core_reset;
+ }
+#endif /* CONFIG_I40E_DCB */
+
/* do basic switch setup */
- ret = i40e_setup_pf_switch(pf);
+ ret = i40e_setup_pf_switch(pf, reinit);
if (ret)
goto end_core_reset;
@@ -4831,6 +5345,22 @@ end_core_reset:
}
/**
+ * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
+ * @pf: board private structure
+ *
+ * Close up the VFs and other things in prep for a Core Reset,
+ * then get ready to rebuild the world.
+ **/
+static void i40e_handle_reset_warning(struct i40e_pf *pf)
+{
+ i40e_status ret;
+
+ ret = i40e_prep_for_reset(pf);
+ if (!ret)
+ i40e_reset_and_rebuild(pf, false);
+}
+
+/**
* i40e_handle_mdd_event
* @pf: pointer to the pf structure
*
@@ -4911,6 +5441,52 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
i40e_flush(hw);
}
+#ifdef CONFIG_I40E_VXLAN
+/**
+ * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
+ * @pf: board private structure
+ **/
+static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
+{
+ const int vxlan_hdr_qwords = 4;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ u8 filter_index;
+ __be16 port;
+ int i;
+
+ if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
+ return;
+
+ pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
+
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->pending_vxlan_bitmap & (1 << i)) {
+ pf->pending_vxlan_bitmap &= ~(1 << i);
+ port = pf->vxlan_ports[i];
+ ret = port ?
+ i40e_aq_add_udp_tunnel(hw, ntohs(port),
+ vxlan_hdr_qwords,
+ I40E_AQC_TUNNEL_TYPE_VXLAN,
+ &filter_index, NULL)
+ : i40e_aq_del_udp_tunnel(hw, i, NULL);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
+ port ? "adding" : "deleting",
+ ntohs(port), port ? i : i);
+
+ pf->vxlan_ports[i] = 0;
+ } else {
+ dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
+ port ? "Added" : "Deleted",
+ ntohs(port), port ? i : filter_index);
+ }
+ }
+ }
+}
+
+#endif
/**
* i40e_service_task - Run the driver's async subtasks
* @work: pointer to work_struct containing our data
@@ -4929,6 +5505,9 @@ static void i40e_service_task(struct work_struct *work)
i40e_fdir_reinit_subtask(pf);
i40e_check_hang_subtask(pf);
i40e_sync_filters_subtask(pf);
+#ifdef CONFIG_I40E_VXLAN
+ i40e_sync_vxlan_filters_subtask(pf);
+#endif
i40e_clean_adminq_subtask(pf);
i40e_service_event_complete(pf);
@@ -5006,6 +5585,42 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
}
/**
+ * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
+ * @type: VSI pointer
+ * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
+{
+ int size;
+ int ret = 0;
+
+ /* allocate memory for both Tx and Rx ring pointers */
+ size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+ vsi->tx_rings = kzalloc(size, GFP_KERNEL);
+ if (!vsi->tx_rings)
+ return -ENOMEM;
+ vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
+
+ if (alloc_qvectors) {
+ /* allocate memory for q_vector pointers */
+ size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
+ vsi->q_vectors = kzalloc(size, GFP_KERNEL);
+ if (!vsi->q_vectors) {
+ ret = -ENOMEM;
+ goto err_vectors;
+ }
+ }
+ return ret;
+
+err_vectors:
+ kfree(vsi->tx_rings);
+ return ret;
+}
+
+/**
* i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
* @pf: board private structure
* @type: type of VSI
@@ -5017,8 +5632,6 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
{
int ret = -ENODEV;
struct i40e_vsi *vsi;
- int sz_vectors;
- int sz_rings;
int vsi_idx;
int i;
@@ -5068,22 +5681,9 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
if (ret)
goto err_rings;
- /* allocate memory for ring pointers */
- sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
- vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
- if (!vsi->tx_rings) {
- ret = -ENOMEM;
+ ret = i40e_vsi_alloc_arrays(vsi, true);
+ if (ret)
goto err_rings;
- }
- vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
-
- /* allocate memory for q_vector pointers */
- sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
- vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL);
- if (!vsi->q_vectors) {
- ret = -ENOMEM;
- goto err_vectors;
- }
/* Setup default MSIX irq handler for VSI */
i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
@@ -5092,8 +5692,6 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
ret = vsi_idx;
goto unlock_pf;
-err_vectors:
- kfree(vsi->tx_rings);
err_rings:
pf->next_vsi = i - 1;
kfree(vsi);
@@ -5103,6 +5701,26 @@ unlock_pf:
}
/**
+ * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
+ * @type: VSI pointer
+ * @free_qvectors: a bool to specify if q_vectors need to be freed.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
+{
+ /* free the ring and vector containers */
+ if (free_qvectors) {
+ kfree(vsi->q_vectors);
+ vsi->q_vectors = NULL;
+ }
+ kfree(vsi->tx_rings);
+ vsi->tx_rings = NULL;
+ vsi->rx_rings = NULL;
+}
+
+/**
* i40e_vsi_clear - Deallocate the VSI provided
* @vsi: the VSI being un-configured
**/
@@ -5138,9 +5756,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
- /* free the ring and vector containers */
- kfree(vsi->q_vectors);
- kfree(vsi->tx_rings);
+ i40e_vsi_free_arrays(vsi, true);
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi)
@@ -5158,18 +5774,17 @@ free_vsi:
* i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
* @vsi: the VSI being cleaned
**/
-static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
{
int i;
- if (vsi->tx_rings[0])
+ if (vsi->tx_rings && vsi->tx_rings[0]) {
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
kfree_rcu(vsi->tx_rings[i], rcu);
vsi->tx_rings[i] = NULL;
vsi->rx_rings[i] = NULL;
}
-
- return 0;
+ }
}
/**
@@ -5186,6 +5801,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
struct i40e_ring *tx_ring;
struct i40e_ring *rx_ring;
+ /* allocate space for both Tx and Rx in one shot */
tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
if (!tx_ring)
goto err_out;
@@ -5289,19 +5905,22 @@ static int i40e_init_msix(struct i40e_pf *pf)
/* The number of vectors we'll request will be comprised of:
* - Add 1 for "other" cause for Admin Queue events, etc.
* - The number of LAN queue pairs
- * already adjusted for the NUMA node
- * assumes symmetric Tx/Rx pairing
+ * - Queues being used for RSS.
+ * We don't need as many as max_rss_size vectors.
+ * use rss_size instead in the calculation since that
+ * is governed by number of cpus in the system.
+ * - assumes symmetric Tx/Rx pairing
* - The number of VMDq pairs
* Once we count this up, try the request.
*
* If we can't get what we want, we'll simplify to nearly nothing
* and try again. If that still fails, we punt.
*/
- pf->num_lan_msix = pf->num_lan_qps;
+ pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
pf->num_vmdq_msix = pf->num_vmdq_qps;
v_budget = 1 + pf->num_lan_msix;
v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
- if (pf->flags & I40E_FLAG_FDIR_ENABLED)
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
v_budget++;
/* Scale down if necessary, and the rings will share vectors */
@@ -5437,14 +6056,13 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
err = i40e_init_msix(pf);
if (err) {
- pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
- I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_MQ_ENABLED |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_SRIOV_ENABLED |
- I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
+ pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
+ I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_SRIOV_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
/* rework the queue expectations without MSIX */
i40e_determine_queue_usage(pf);
@@ -5513,15 +6131,15 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
**/
static int i40e_config_rss(struct i40e_pf *pf)
{
- struct i40e_hw *hw = &pf->hw;
- u32 lut = 0;
- int i, j;
- u64 hena;
/* Set of random keys generated using kernel random number generator */
static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
+ struct i40e_hw *hw = &pf->hw;
+ u32 lut = 0;
+ int i, j;
+ u64 hena;
/* Fill out hash function seed */
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
@@ -5530,16 +6148,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)|
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= I40E_DEFAULT_RSS_HENA;
wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
@@ -5568,6 +6177,34 @@ static int i40e_config_rss(struct i40e_pf *pf)
}
/**
+ * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
+ * @pf: board private structure
+ * @queue_count: the requested queue count for rss.
+ *
+ * returns 0 if rss is not enabled, if enabled returns the final rss queue
+ * count which may be different from the requested queue count.
+ **/
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
+{
+ if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
+ return 0;
+
+ queue_count = min_t(int, queue_count, pf->rss_size_max);
+ queue_count = rounddown_pow_of_two(queue_count);
+
+ if (queue_count != pf->rss_size) {
+ i40e_prep_for_reset(pf);
+
+ pf->rss_size = queue_count;
+
+ i40e_reset_and_rebuild(pf, true);
+ i40e_config_rss(pf);
+ }
+ dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size);
+ return pf->rss_size;
+}
+
+/**
* i40e_sw_init - Initialize general software structures (struct i40e_pf)
* @pf: board private structure to initialize
*
@@ -5582,6 +6219,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
+ pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
if (I40E_DEBUG_USER & debug)
pf->hw.debug_mask = debug;
@@ -5593,39 +6231,47 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
I40E_FLAG_MSI_ENABLED |
I40E_FLAG_MSIX_ENABLED |
- I40E_FLAG_RX_PS_ENABLED |
- I40E_FLAG_MQ_ENABLED |
I40E_FLAG_RX_1BUF_ENABLED;
+ /* Depending on PF configurations, it is possible that the RSS
+ * maximum might end up larger than the available queues
+ */
pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+ pf->rss_size_max = min_t(int, pf->rss_size_max,
+ pf->hw.func_caps.num_tx_qp);
if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED;
- pf->rss_size = min_t(int, pf->rss_size_max,
- nr_cpus_node(numa_node_id()));
+ pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
+ pf->rss_size = rounddown_pow_of_two(pf->rss_size);
} else {
pf->rss_size = 1;
}
- if (pf->hw.func_caps.dcb)
- pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC;
- else
- pf->num_tc_qps = 0;
+ /* MFP mode enabled */
+ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+ pf->flags |= I40E_FLAG_MFP_ENABLED;
+ dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
+ }
- if (pf->hw.func_caps.fd) {
- /* FW/NVM is not yet fixed in this regard */
- if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
- (pf->hw.func_caps.fd_filters_best_effort > 0)) {
- pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED;
- dev_info(&pf->pdev->dev,
- "Flow Director ATR mode Enabled\n");
- pf->flags |= I40E_FLAG_FDIR_ENABLED;
+ /* FW/NVM is not yet fixed in this regard */
+ if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
+ (pf->hw.func_caps.fd_filters_best_effort > 0)) {
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
+ dev_info(&pf->pdev->dev,
+ "Flow Director ATR mode Enabled\n");
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
dev_info(&pf->pdev->dev,
"Flow Director Side Band mode Enabled\n");
- pf->fdir_pf_filter_count =
- pf->hw.func_caps.fd_filters_guaranteed;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Flow Director Side Band mode Disabled in MFP mode\n");
}
- } else {
- pf->fdir_pf_filter_count = 0;
+ pf->fdir_pf_filter_count =
+ pf->hw.func_caps.fd_filters_guaranteed;
+ pf->hw.fdir_shared_filter_count =
+ pf->hw.func_caps.fd_filters_best_effort;
}
if (pf->hw.func_caps.vmdq) {
@@ -5634,12 +6280,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
}
- /* MFP mode enabled */
- if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
- pf->flags |= I40E_FLAG_MFP_ENABLED;
- dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
- }
-
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs) {
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
@@ -5647,6 +6287,9 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_req_vfs = min_t(int,
pf->hw.func_caps.num_vfs,
I40E_MAX_VF_COUNT);
+ dev_info(&pf->pdev->dev,
+ "Number of VFs being requested for PF[%d] = %d\n",
+ pf->hw.pf_id, pf->num_req_vfs);
}
#endif /* CONFIG_PCI_IOV */
pf->eeprom_version = 0xDEAD;
@@ -5701,6 +6344,104 @@ static int i40e_set_features(struct net_device *netdev,
return 0;
}
+#ifdef CONFIG_I40E_VXLAN
+/**
+ * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
+ * @pf: board private structure
+ * @port: The UDP port to look up
+ *
+ * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
+ **/
+static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
+{
+ u8 i;
+
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->vxlan_ports[i] == port)
+ return i;
+ }
+
+ return i;
+}
+
+/**
+ * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that VXLAN is notifying us about
+ * @port: New UDP port number that VXLAN started listening to
+ **/
+static void i40e_add_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 next_idx;
+ u8 idx;
+
+ if (sa_family == AF_INET6)
+ return;
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ /* Check if port already exists */
+ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
+ return;
+ }
+
+ /* Now check if there is space to add the new port */
+ next_idx = i40e_get_vxlan_port_idx(pf, 0);
+
+ if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
+ ntohs(port));
+ return;
+ }
+
+ /* New port: add it and mark its index in the bitmap */
+ pf->vxlan_ports[next_idx] = port;
+ pf->pending_vxlan_bitmap |= (1 << next_idx);
+
+ pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+}
+
+/**
+ * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that VXLAN is notifying us about
+ * @port: UDP port number that VXLAN stopped listening to
+ **/
+static void i40e_del_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 idx;
+
+ if (sa_family == AF_INET6)
+ return;
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ /* Check if port already exists */
+ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ /* if port exists, set it to 0 (mark for deletion)
+ * and make it pending
+ */
+ pf->vxlan_ports[idx] = 0;
+
+ pf->pending_vxlan_bitmap |= (1 << idx);
+
+ pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+ } else {
+ netdev_warn(netdev, "Port %d was not found, not deleting\n",
+ ntohs(port));
+ }
+}
+
+#endif
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -5710,6 +6451,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = i40e_set_mac,
.ndo_change_mtu = i40e_change_mtu,
+ .ndo_do_ioctl = i40e_ioctl,
.ndo_tx_timeout = i40e_tx_timeout,
.ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
@@ -5722,6 +6464,10 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
.ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
.ndo_get_vf_config = i40e_ndo_get_vf_config,
+#ifdef CONFIG_I40E_VXLAN
+ .ndo_add_vxlan_port = i40e_add_vxlan_port,
+ .ndo_del_vxlan_port = i40e_del_vxlan_port,
+#endif
};
/**
@@ -5732,6 +6478,7 @@ static const struct net_device_ops i40e_netdev_ops = {
**/
static int i40e_config_netdev(struct i40e_vsi *vsi)
{
+ u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_netdev_priv *np;
@@ -5781,6 +6528,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
random_ether_addr(mac_addr);
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
}
+ i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
@@ -5814,10 +6562,6 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)
if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
return;
- /* there is no HW VSI for FDIR */
- if (vsi->type == I40E_VSI_FDIR)
- return;
-
i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
return;
}
@@ -5901,12 +6645,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
break;
case I40E_VSI_FDIR:
- /* no queue mapping or actual HW VSI needed */
- vsi->info.valid_sections = 0;
- vsi->seid = 0;
- vsi->id = 0;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
- return 0;
break;
case I40E_VSI_VMDQ2:
@@ -6133,6 +6877,69 @@ vector_setup_out:
}
/**
+ * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
+ * @vsi: pointer to the vsi.
+ *
+ * This re-allocates a vsi's queue resources.
+ *
+ * Returns pointer to the successfully allocated and configured VSI sw struct
+ * on success, otherwise returns NULL on failure.
+ **/
+static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 enabled_tc;
+ int ret;
+
+ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
+ i40e_vsi_clear_rings(vsi);
+
+ i40e_vsi_free_arrays(vsi, false);
+ i40e_set_num_rings_in_vsi(vsi);
+ ret = i40e_vsi_alloc_arrays(vsi, false);
+ if (ret)
+ goto err_vsi;
+
+ ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ if (ret < 0) {
+ dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
+ vsi->seid, ret);
+ goto err_vsi;
+ }
+ vsi->base_queue = ret;
+
+ /* Update the FW view of the VSI. Force a reset of TC and queue
+ * layout configurations.
+ */
+ enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+ pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
+ pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
+ i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+
+ /* assign it some queues */
+ ret = i40e_alloc_rings(vsi);
+ if (ret)
+ goto err_rings;
+
+ /* map all of the rings to the q_vectors */
+ i40e_vsi_map_rings_to_vectors(vsi);
+ return vsi;
+
+err_rings:
+ i40e_vsi_free_q_vectors(vsi);
+ if (vsi->netdev_registered) {
+ vsi->netdev_registered = false;
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+ i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
+err_vsi:
+ i40e_vsi_clear(vsi);
+ return NULL;
+}
+
+/**
* i40e_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @type: VSI type
@@ -6212,6 +7019,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
if (v_idx < 0)
goto err_alloc;
vsi = pf->vsi[v_idx];
+ if (!vsi)
+ goto err_alloc;
vsi->type = type;
vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
@@ -6220,7 +7029,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
else if (type == I40E_VSI_SRIOV)
vsi->vf_id = param1;
/* assign it some queues */
- ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
+ vsi->idx);
if (ret < 0) {
dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
vsi->seid, ret);
@@ -6246,6 +7056,10 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
goto err_netdev;
vsi->netdev_registered = true;
netif_carrier_off(vsi->netdev);
+#ifdef CONFIG_I40E_DCB
+ /* Setup DCB netlink interface */
+ i40e_dcbnl_setup(vsi);
+#endif /* CONFIG_I40E_DCB */
/* fall through */
case I40E_VSI_FDIR:
@@ -6503,12 +7317,14 @@ void i40e_veb_release(struct i40e_veb *veb)
**/
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{
- bool is_default = (vsi->idx == vsi->back->lan_vsi);
+ bool is_default = false;
+ bool is_cloud = false;
int ret;
/* get a VEB from the hardware */
ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
- veb->enabled_tc, is_default, &veb->seid, NULL);
+ veb->enabled_tc, is_default,
+ is_cloud, &veb->seid, NULL);
if (ret) {
dev_info(&veb->pf->pdev->dev,
"couldn't add VEB, err %d, aq_err %d\n",
@@ -6773,11 +7589,13 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
/**
* i40e_setup_pf_switch - Setup the HW switch on startup or after reset
* @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
*
* Returns 0 on success, negative value on failure
**/
-static int i40e_setup_pf_switch(struct i40e_pf *pf)
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
{
+ u32 rxfc = 0, txfc = 0, rxfc_reg;
int ret;
/* find out what's out there already */
@@ -6790,14 +7608,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
}
i40e_pf_reset_stats(pf);
- /* fdir VSI must happen first to be sure it gets queue 0, but only
- * if there is enough room for the fdir VSI
- */
- if (pf->num_lan_qps > 1)
- i40e_fdir_setup(pf);
-
/* first time setup */
- if (pf->lan_vsi == I40E_NO_VSI) {
+ if (pf->lan_vsi == I40E_NO_VSI || reinit) {
struct i40e_vsi *vsi = NULL;
u16 uplink_seid;
@@ -6808,19 +7620,15 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
uplink_seid = pf->veb[pf->lan_veb]->seid;
else
uplink_seid = pf->mac_seid;
-
- vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ if (pf->lan_vsi == I40E_NO_VSI)
+ vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ else if (reinit)
+ vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
if (!vsi) {
dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
i40e_fdir_teardown(pf);
return -EAGAIN;
}
- /* accommodate kcompat by copying the main VSI queue count
- * into the pf, since this newer code pushes the pf queue
- * info down a level into a VSI
- */
- pf->num_rx_queues = vsi->alloc_queue_pairs;
- pf->num_tx_queues = vsi->alloc_queue_pairs;
} else {
/* force a reset of TC and queue layout configurations */
u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
@@ -6830,6 +7638,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
}
i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
+ i40e_fdir_sb_setup(pf);
+
/* Setup static PF queue filter control settings */
ret = i40e_setup_pf_filter_control(pf);
if (ret) {
@@ -6848,37 +7658,68 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
i40e_link_event(pf);
- /* Initialize user-specifics link properties */
+ /* Initialize user-specific link properties */
pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
I40E_AQ_AN_COMPLETED) ? true : false);
- pf->hw.fc.requested_mode = I40E_FC_DEFAULT;
- if (pf->hw.phy.link_info.an_info &
- (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX))
+ /* requested_mode is set in probe or by ethtool */
+ if (!pf->fc_autoneg_status)
+ goto no_autoneg;
+
+ if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) &&
+ (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX))
pf->hw.fc.current_mode = I40E_FC_FULL;
else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
else
- pf->hw.fc.current_mode = I40E_FC_DEFAULT;
+ pf->hw.fc.current_mode = I40E_FC_NONE;
- return ret;
-}
+ /* sync the flow control settings with the auto-neg values */
+ switch (pf->hw.fc.current_mode) {
+ case I40E_FC_FULL:
+ txfc = 1;
+ rxfc = 1;
+ break;
+ case I40E_FC_TX_PAUSE:
+ txfc = 1;
+ rxfc = 0;
+ break;
+ case I40E_FC_RX_PAUSE:
+ txfc = 0;
+ rxfc = 1;
+ break;
+ case I40E_FC_NONE:
+ case I40E_FC_DEFAULT:
+ txfc = 0;
+ rxfc = 0;
+ break;
+ case I40E_FC_PFC:
+ /* TBD */
+ break;
+ /* no default case, we have to handle all possibilities here */
+ }
-/**
- * i40e_set_rss_size - helper to set rss_size
- * @pf: board private structure
- * @queues_left: how many queues
- */
-static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
-{
- int num_tc0;
+ wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
+
+ rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
+ ~I40E_PRTDCB_MFLCN_RFCE_MASK;
+ rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT);
+
+ wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg);
- num_tc0 = min_t(int, queues_left, pf->rss_size_max);
- num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id()));
- num_tc0 = rounddown_pow_of_two(num_tc0);
+ goto fc_complete;
- return num_tc0;
+no_autoneg:
+ /* disable L2 flow control, user can turn it on if they wish */
+ wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0);
+ wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
+ ~I40E_PRTDCB_MFLCN_RFCE_MASK);
+
+fc_complete:
+ i40e_ptp_init(pf);
+
+ return ret;
}
/**
@@ -6887,12 +7728,9 @@ static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
**/
static void i40e_determine_queue_usage(struct i40e_pf *pf)
{
- int accum_tc_size;
int queues_left;
pf->num_lan_qps = 0;
- pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps);
- accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps;
/* Find the max queues to be put into basic use. We'll always be
* using TC0, whether or not DCB is running, and TC0 will get the
@@ -6900,99 +7738,45 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
*/
queues_left = pf->hw.func_caps.num_tx_qp;
- if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) &&
- (pf->flags & I40E_FLAG_MQ_ENABLED)) ||
- !(pf->flags & (I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
- (queues_left == 1)) {
-
+ if ((queues_left == 1) ||
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED) ||
+ !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_DCB_ENABLED))) {
/* one qp for PF, no queues for anything else */
queues_left = 0;
pf->rss_size = pf->num_lan_qps = 1;
/* make sure all the fancies are disabled */
- pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_MQ_ENABLED |
- I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_SRIOV_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
-
- queues_left -= pf->rss_size;
- pf->num_lan_qps = pf->rss_size;
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- (pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- /* save num_tc_qps queues for TCs 1 thru 7 and the rest
- * are set up for RSS in TC0
- */
- queues_left -= accum_tc_size;
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
-
- queues_left -= pf->rss_size;
- if (queues_left < 0) {
- dev_info(&pf->pdev->dev, "not enough queues for DCB\n");
- return;
- }
-
- pf->num_lan_qps = pf->rss_size + accum_tc_size;
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- queues_left -= 1; /* save 1 queue for FD */
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
-
- queues_left -= pf->rss_size;
- if (queues_left < 0) {
- dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n");
- return;
+ pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_SRIOV_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
+ } else {
+ /* Not enough queues for all TCs */
+ if ((pf->flags & I40E_FLAG_DCB_ENABLED) &&
+ (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
+ pf->num_lan_qps = pf->rss_size_max;
+ queues_left -= pf->num_lan_qps;
+ }
- pf->num_lan_qps = pf->rss_size;
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- (pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- /* save 1 queue for TCs 1 thru 7,
- * 1 queue for flow director,
- * and the rest are set up for RSS in TC0
- */
- queues_left -= 1;
- queues_left -= accum_tc_size;
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
- queues_left -= pf->rss_size;
- if (queues_left < 0) {
- dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n");
- return;
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (queues_left > 1) {
+ queues_left -= 1; /* save 1 queue for FD */
+ } else {
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
}
-
- pf->num_lan_qps = pf->rss_size + accum_tc_size;
-
- } else {
- dev_info(&pf->pdev->dev,
- "Invalid configuration, flags=0x%08llx\n", pf->flags);
- return;
}
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
pf->num_vf_qps && pf->num_req_vfs && queues_left) {
- pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left /
- pf->num_vf_qps));
+ pf->num_req_vfs = min_t(int, pf->num_req_vfs,
+ (queues_left / pf->num_vf_qps));
queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
}
@@ -7003,6 +7787,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
}
+ pf->queues_left = queues_left;
return;
}
@@ -7024,7 +7809,7 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
/* Flow Director is enabled */
- if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED))
+ if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
settings->enable_fdir = true;
/* Ethtype and MACVLAN filters enabled for PF */
@@ -7053,6 +7838,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct i40e_driver_version dv;
struct i40e_pf *pf;
struct i40e_hw *hw;
+ static u16 pfs_found;
+ u16 link_status;
int err = 0;
u32 len;
@@ -7118,6 +7905,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->subsystem_device_id = pdev->subsystem_device;
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
+ pf->instance = pfs_found;
+
+ /* do a special CORER for clearing PXE mode once at init */
+ if (hw->revision_id == 0 &&
+ (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
+ wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
+ i40e_flush(hw);
+ msleep(200);
+ pf->corer_count++;
+
+ i40e_clear_pxe_mode(hw);
+ }
/* Reset here to make sure all is clean and to define PF 'n' */
err = i40e_pf_reset(hw);
@@ -7142,8 +7941,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
+ /* set up a default setting for link flow control */
+ pf->hw.fc.requested_mode = I40E_FC_NONE;
+
err = i40e_init_adminq(hw);
dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
+ if (((hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
+ >> I40E_NVM_VERSION_HI_SHIFT) != I40E_CURRENT_NVM_VERSION_HI) {
+ dev_info(&pdev->dev,
+ "warning: NVM version not supported, supported version: %02x.%02x\n",
+ I40E_CURRENT_NVM_VERSION_HI,
+ I40E_CURRENT_NVM_VERSION_LO);
+ }
if (err) {
dev_info(&pdev->dev,
"init_adminq failed: %d expecting API %02x.%02x\n",
@@ -7152,6 +7961,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
+ i40e_clear_pxe_mode(hw);
err = i40e_get_capabilities(pf);
if (err)
goto err_adminq_setup;
@@ -7178,7 +7988,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
i40e_get_mac_addr(hw, hw->mac.addr);
- if (i40e_validate_mac_addr(hw->mac.addr)) {
+ if (!is_valid_ether_addr(hw->mac.addr)) {
dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
err = -EIO;
goto err_mac_addr;
@@ -7188,6 +7998,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
+#ifdef CONFIG_I40E_DCB
+ err = i40e_init_pf_dcb(pf);
+ if (err) {
+ dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ goto err_init_dcb;
+ }
+#endif /* CONFIG_I40E_DCB */
/* set up periodic task facility */
setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
@@ -7198,6 +8016,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
pf->link_check_timeout = jiffies;
+ /* WoL defaults to disabled */
+ pf->wol_en = false;
+ device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
+
/* set up the main switch operations */
i40e_determine_queue_usage(pf);
i40e_init_interrupt_scheme(pf);
@@ -7212,7 +8034,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_switch_setup;
}
- err = i40e_setup_pf_switch(pf);
+ err = i40e_setup_pf_switch(pf, false);
if (err) {
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
@@ -7250,6 +8072,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_flush(hw);
}
+ pfs_found++;
+
i40e_dbg_pf_init(pf);
/* tell the firmware that we're starting */
@@ -7263,15 +8087,41 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mod_timer(&pf->service_timer,
round_jiffies(jiffies + pf->service_timer_period));
+ /* Get the negotiated link width and speed from PCI config space */
+ pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
+
+ i40e_set_pci_config_data(hw, link_status);
+
+ dev_info(&pdev->dev, "PCI Express: %s %s\n",
+ (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
+ hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
+ hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
+ "Unknown"),
+ (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
+ hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
+ hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
+ hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
+ "Unknown"));
+
+ if (hw->bus.width < i40e_bus_width_pcie_x8 ||
+ hw->bus.speed < i40e_bus_speed_8000) {
+ dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
+ dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+ }
+
return 0;
/* Unwind what we've done if something failed in the setup */
err_vsis:
set_bit(__I40E_DOWN, &pf->state);
-err_switch_setup:
i40e_clear_interrupt_scheme(pf);
kfree(pf->vsi);
+err_switch_setup:
+ i40e_reset_interrupt_capability(pf);
del_timer_sync(&pf->service_timer);
+#ifdef CONFIG_I40E_DCB
+err_init_dcb:
+#endif /* CONFIG_I40E_DCB */
err_mac_addr:
err_configure_lan_hmc:
(void)i40e_shutdown_lan_hmc(hw);
@@ -7313,6 +8163,8 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_dbg_pf_exit(pf);
+ i40e_ptp_stop(pf);
+
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
@@ -7356,7 +8208,6 @@ static void i40e_remove(struct pci_dev *pdev)
"Failed to destroy the HMC resources: %d\n", ret_code);
/* shutdown the adminq */
- i40e_aq_queue_shutdown(&pf->hw, true);
ret_code = i40e_shutdown_adminq(&pf->hw);
if (ret_code)
dev_warn(&pdev->dev,
@@ -7413,7 +8264,11 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
/* shutdown all operations */
- i40e_pf_quiesce_all_vsi(pf);
+ if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+ }
/* Request a slot reset */
return PCI_ERS_RESULT_NEED_RESET;
@@ -7476,9 +8331,103 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
struct i40e_pf *pf = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "%s\n", __func__);
+ if (test_bit(__I40E_SUSPENDED, &pf->state))
+ return;
+
+ rtnl_lock();
i40e_handle_reset_warning(pf);
+ rtnl_lock();
}
+/**
+ * i40e_shutdown - PCI callback for shutting down
+ * @pdev: PCI device information struct
+ **/
+static void i40e_shutdown(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_hw *hw = &pf->hw;
+
+ set_bit(__I40E_SUSPENDED, &pf->state);
+ set_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+
+ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, pf->wol_en);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+#ifdef CONFIG_PM
+/**
+ * i40e_suspend - PCI callback for moving to D3
+ * @pdev: PCI device information struct
+ **/
+static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_hw *hw = &pf->hw;
+
+ set_bit(__I40E_SUSPENDED, &pf->state);
+ set_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+
+ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+ pci_wake_from_d3(pdev, pf->wol_en);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+/**
+ * i40e_resume - PCI callback for waking up from D3
+ * @pdev: PCI device information struct
+ **/
+static int i40e_resume(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ /* pci_restore_state() clears dev->state_saves, so
+ * call pci_save_state() again to restore it.
+ */
+ pci_save_state(pdev);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "%s: Cannot enable PCI device from suspend\n",
+ __func__);
+ return err;
+ }
+ pci_set_master(pdev);
+
+ /* no wakeup events while running */
+ pci_wake_from_d3(pdev, false);
+
+ /* handling the reset will rebuild the device state */
+ if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
+ clear_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_reset_and_rebuild(pf, false);
+ rtnl_unlock();
+ }
+
+ return 0;
+}
+
+#endif
static const struct pci_error_handlers i40e_err_handler = {
.error_detected = i40e_pci_error_detected,
.slot_reset = i40e_pci_error_slot_reset,
@@ -7490,6 +8439,11 @@ static struct pci_driver i40e_driver = {
.id_table = i40e_pci_tbl,
.probe = i40e_probe,
.remove = i40e_remove,
+#ifdef CONFIG_PM
+ .suspend = i40e_suspend,
+ .resume = i40e_resume,
+#endif
+ .shutdown = i40e_shutdown,
.err_handler = &i40e_err_handler,
.sriov_configure = i40e_pci_sriov_configure,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 97e1bb30ef8a..73f95b081927 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -166,15 +165,15 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
}
/**
- * i40e_read_nvm_srctl - Reads Shadow RAM.
+ * i40e_read_nvm_word - Reads Shadow RAM
* @hw: pointer to the HW structure.
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
* @data: word read from the Shadow RAM.
*
* Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
-static i40e_status i40e_read_nvm_srctl(struct i40e_hw *hw, u16 offset,
- u16 *data)
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
i40e_status ret_code = I40E_ERR_TIMEOUT;
u32 sr_reg;
@@ -211,29 +210,6 @@ read_nvm_exit:
}
/**
- * i40e_read_nvm_word - Reads Shadow RAM word.
- * @hw: pointer to the HW structure.
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- * @data: word read from the Shadow RAM.
- *
- * Reads 16 bit word from the Shadow RAM. Each read is preceded
- * with the NVM ownership taking and followed by the release.
- **/
-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data)
-{
- i40e_status ret_code = 0;
-
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- ret_code = i40e_read_nvm_srctl(hw, offset, data);
- i40e_release_nvm(hw);
- }
-
- return ret_code;
-}
-
-/**
* i40e_read_nvm_buffer - Reads Shadow RAM buffer.
* @hw: pointer to the HW structure.
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
@@ -250,36 +226,25 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
{
i40e_status ret_code = 0;
u16 index, word;
- u32 time;
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- /* Loop thru the selected region. */
- for (word = 0; word < *words; word++) {
- index = offset + word;
- ret_code = i40e_read_nvm_srctl(hw, index, &data[word]);
- if (ret_code)
- break;
- /* Check if we didn't exceeded the semaphore timeout. */
- time = rd32(hw, I40E_GLVFGEN_TIMER);
- if (time >= hw->nvm.hw_semaphore_timeout) {
- ret_code = I40E_ERR_TIMEOUT;
- hw_dbg(hw, "NVM read error: timeout.\n");
- break;
- }
- }
- /* Update the number of words read from the Shadow RAM. */
- *words = word;
- /* Release the NVM ownership. */
- i40e_release_nvm(hw);
+ /* Loop thru the selected region. */
+ for (word = 0; word < *words; word++) {
+ index = offset + word;
+ ret_code = i40e_read_nvm_word(hw, index, &data[word]);
+ if (ret_code)
+ break;
}
+ /* Update the number of words read from the Shadow RAM. */
+ *words = word;
+
return ret_code;
}
/**
* i40e_calc_nvm_checksum - Calculates and returns the checksum
* @hw: pointer to hardware structure
+ * @checksum: pointer to the checksum
*
* This function calculate SW Checksum that covers the whole 64kB shadow RAM
* except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
@@ -297,14 +262,14 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
u32 i = 0;
/* read pointer to VPD area */
- ret_code = i40e_read_nvm_srctl(hw, I40E_SR_VPD_PTR, &vpd_module);
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
}
/* read pointer to PCIe Alt Auto-load module */
- ret_code = i40e_read_nvm_srctl(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
&pcie_alt_module);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
@@ -331,7 +296,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
break;
}
- ret_code = i40e_read_nvm_srctl(hw, (u16)i, &word);
+ ret_code = i40e_read_nvm_word(hw, (u16)i, &word);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
@@ -358,7 +323,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
{
i40e_status ret_code = 0;
u16 checksum_sr = 0;
- u16 checksum_local;
+ u16 checksum_local = 0;
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret_code)
@@ -371,7 +336,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
/* Do not use i40e_read_nvm_word() because we do not want to take
* the synchronization semaphores twice here.
*/
- i40e_read_nvm_srctl(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+ i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
/* Verify read checksum from EEPROM is the same as
* calculated checksum
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 702c81ba86e3..ecd0f0b663c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index f75bb9ccc900..ed91f93ede2b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -51,7 +50,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
-bool i40e_asq_done(struct i40e_hw *hw);
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw,
@@ -60,10 +58,11 @@ void i40e_debug_aq(struct i40e_hw *hw,
void *buffer);
void i40e_idle_aq(struct i40e_hw *hw);
-void i40e_resume_aq(struct i40e_hw *hw);
+bool i40e_check_asq_alive(struct i40e_hw *hw);
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
u32 i40e_led_get(struct i40e_hw *hw);
-void i40e_led_set(struct i40e_hw *hw, u32 mode);
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
/* admin send queue commands */
@@ -71,8 +70,6 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
- bool unloading);
i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
@@ -95,9 +92,9 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
u16 vsi_id, bool set_filter,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
@@ -106,7 +103,8 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc,
- bool default_port, u16 *pveb_seid,
+ bool default_port, bool enable_l2_filtering,
+ u16 *pveb_seid,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
u16 veb_seid, u16 *switch_id, bool *floating,
@@ -119,12 +117,6 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
@@ -164,11 +156,19 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 header_len,
+ u8 protocol_index, u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
u16 flags, u8 *mac_addr,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
enum i40e_aq_hmc_profile profile,
u8 pe_vf_enabled_count,
@@ -179,6 +179,15 @@ i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
@@ -207,8 +216,6 @@ bool i40e_get_link_status(struct i40e_hw *hw);
i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
u8 *mac_addr);
i40e_status i40e_validate_mac_addr(u8 *mac_addr);
-i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
- struct i40e_lldp_variables *lldp_cfg);
/* prototype for functions used for NVM access */
i40e_status i40e_init_nvm(struct i40e_hw *hw);
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
@@ -222,6 +229,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data);
i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum);
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
/* prototype for functions used for SW locks */
@@ -236,4 +244,9 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_set_filter_control(struct i40e_hw *hw,
struct i40e_filter_control_settings *settings);
+i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
new file mode 100644
index 000000000000..e33ec6c842b7
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -0,0 +1,662 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e.h"
+#include <linux/export.h>
+#include <linux/ptp_classify.h>
+
+/* The XL710 timesync is very much like Intel's 82599 design when it comes to
+ * the fundamental clock design. However, the clock operations are much simpler
+ * in the XL710 because the device supports a full 64 bits of nanoseconds.
+ * Because the field is so wide, we can forgo the cycle counter and just
+ * operate with the nanosecond field directly without fear of overflow.
+ *
+ * Much like the 82599, the update period is dependent upon the link speed:
+ * At 40Gb link or no link, the period is 1.6ns.
+ * At 10Gb link, the period is multiplied by 2. (3.2ns)
+ * At 1Gb link, the period is multiplied by 20. (32ns)
+ * 1588 functionality is not supported at 100Mbps.
+ */
+#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
+#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
+#define I40E_PTP_1GB_INCVAL 0x2000000000ULL
+
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 (0x1 << \
+ I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \
+ I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PTP_TX_TIMEOUT (HZ * 15)
+
+/**
+ * i40e_ptp_read - Read the PHC time from the device
+ * @pf: Board private structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * This function reads the PRTTSYN_TIME registers and stores them in a
+ * timespec. However, since the registers are 64 bits of nanoseconds, we must
+ * convert the result to a timespec before we can return.
+ **/
+static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 hi, lo;
+ u64 ns;
+
+ /* The timer latches on the lowest register read. */
+ lo = rd32(hw, I40E_PRTTSYN_TIME_L);
+ hi = rd32(hw, I40E_PRTTSYN_TIME_H);
+
+ ns = (((u64)hi) << 32) | lo;
+
+ *ts = ns_to_timespec(ns);
+}
+
+/**
+ * i40e_ptp_write - Write the PHC time to the device
+ * @pf: Board private structure
+ * @ts: timespec structure that holds the new time value
+ *
+ * This function writes the PRTTSYN_TIME registers with the user value. Since
+ * we receive a timespec from the stack, we must convert that timespec into
+ * nanoseconds before programming the registers.
+ **/
+static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec *ts)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u64 ns = timespec_to_ns(ts);
+
+ /* The timer will not update until the high register is written, so
+ * write the low register first.
+ */
+ wr32(hw, I40E_PRTTSYN_TIME_L, ns & 0xFFFFFFFF);
+ wr32(hw, I40E_PRTTSYN_TIME_H, ns >> 32);
+}
+
+/**
+ * i40e_ptp_convert_to_hwtstamp - Convert device clock to system time
+ * @hwtstamps: Timestamp structure to update
+ * @timestamp: Timestamp from the hardware
+ *
+ * We need to convert the NIC clock value into a hwtstamp which can be used by
+ * the upper level timestamping functions. Since the timestamp is simply a 64-
+ * bit nanosecond value, we can call ns_to_ktime directly to handle this.
+ **/
+static void i40e_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamps,
+ u64 timestamp)
+{
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+
+ hwtstamps->hwtstamp = ns_to_ktime(timestamp);
+}
+
+/**
+ * i40e_ptp_adjfreq - Adjust the PHC frequency
+ * @ptp: The PTP clock structure
+ * @ppb: Parts per billion adjustment from the base
+ *
+ * Adjust the frequency of the PHC by the indicated parts per billion from the
+ * base frequency.
+ **/
+static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+ struct i40e_hw *hw = &pf->hw;
+ u64 adj, freq, diff;
+ int neg_adj = 0;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ smp_mb(); /* Force any pending update before accessing. */
+ adj = ACCESS_ONCE(pf->ptp_base_adj);
+
+ freq = adj;
+ freq *= ppb;
+ diff = div_u64(freq, 1000000000ULL);
+
+ if (neg_adj)
+ adj -= diff;
+ else
+ adj += diff;
+
+ wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
+ wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_adjtime - Adjust the PHC time
+ * @ptp: The PTP clock structure
+ * @delta: Offset in nanoseconds to adjust the PHC time by
+ *
+ * Adjust the frequency of the PHC by the indicated parts per billion from the
+ * base frequency.
+ **/
+static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+ struct timespec now, then = ns_to_timespec(delta);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pf->tmreg_lock, flags);
+
+ i40e_ptp_read(pf, &now);
+ now = timespec_add(now, then);
+ i40e_ptp_write(pf, (const struct timespec *)&now);
+
+ spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_gettime - Get the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * Read the device clock and return the correct value on ns, after converting it
+ * into a timespec struct.
+ **/
+static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pf->tmreg_lock, flags);
+ i40e_ptp_read(pf, ts);
+ spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_settime - Set the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec structure that holds the new time value
+ *
+ * Set the device clock to the user input value. The conversion from timespec
+ * to ns happens in the write function.
+ **/
+static int i40e_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pf->tmreg_lock, flags);
+ i40e_ptp_write(pf, ts);
+ spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_tx_work
+ * @work: pointer to work struct
+ *
+ * This work function polls the PRTTSYN_STAT_0.TXTIME bit to determine when a
+ * Tx timestamp event has occurred, in order to pass the Tx timestamp value up
+ * the stack in the skb.
+ */
+static void i40e_ptp_tx_work(struct work_struct *work)
+{
+ struct i40e_pf *pf = container_of(work, struct i40e_pf,
+ ptp_tx_work);
+ struct i40e_hw *hw = &pf->hw;
+ u32 prttsyn_stat_0;
+
+ if (!pf->ptp_tx_skb)
+ return;
+
+ if (time_is_before_jiffies(pf->ptp_tx_start +
+ I40E_PTP_TX_TIMEOUT)) {
+ dev_kfree_skb_any(pf->ptp_tx_skb);
+ pf->ptp_tx_skb = NULL;
+ pf->tx_hwtstamp_timeouts++;
+ dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang");
+ return;
+ }
+
+ prttsyn_stat_0 = rd32(hw, I40E_PRTTSYN_STAT_0);
+ if (prttsyn_stat_0 & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
+ i40e_ptp_tx_hwtstamp(pf);
+ else
+ schedule_work(&pf->ptp_tx_work);
+}
+
+/**
+ * i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem
+ * @ptp: The PTP clock structure
+ * @rq: The requested feature to change
+ * @on: Enable/disable flag
+ *
+ * The XL710 does not support any of the ancillary features of the PHC
+ * subsystem, so this function may just return.
+ **/
+static int i40e_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
+ * @vsi: The VSI with the rings relevant to 1588
+ *
+ * This watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to timestamp
+ * any future packets.
+ **/
+void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_ring *rx_ring;
+ unsigned long rx_event;
+ u32 prttsyn_stat;
+ int n;
+
+ if (pf->flags & I40E_FLAG_PTP)
+ return;
+
+ prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+
+ /* Unless all four receive timestamp registers are latched, we are not
+ * concerned about a possible PTP Rx hang, so just update the timeout
+ * counter and exit.
+ */
+ if (!(prttsyn_stat & ((I40E_PRTTSYN_STAT_1_RXT0_MASK <<
+ I40E_PRTTSYN_STAT_1_RXT0_SHIFT) |
+ (I40E_PRTTSYN_STAT_1_RXT1_MASK <<
+ I40E_PRTTSYN_STAT_1_RXT1_SHIFT) |
+ (I40E_PRTTSYN_STAT_1_RXT2_MASK <<
+ I40E_PRTTSYN_STAT_1_RXT2_SHIFT) |
+ (I40E_PRTTSYN_STAT_1_RXT3_MASK <<
+ I40E_PRTTSYN_STAT_1_RXT3_SHIFT)))) {
+ pf->last_rx_ptp_check = jiffies;
+ return;
+ }
+
+ /* Determine the most recent watchdog or rx_timestamp event. */
+ rx_event = pf->last_rx_ptp_check;
+ for (n = 0; n < vsi->num_queue_pairs; n++) {
+ rx_ring = vsi->rx_rings[n];
+ if (time_after(rx_ring->last_rx_timestamp, rx_event))
+ rx_event = rx_ring->last_rx_timestamp;
+ }
+
+ /* Only need to read the high RXSTMP register to clear the lock */
+ if (time_is_before_jiffies(rx_event + 5 * HZ)) {
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
+ pf->last_rx_ptp_check = jiffies;
+ pf->rx_hwtstamp_cleared++;
+ dev_warn(&vsi->back->pdev->dev,
+ "%s: clearing Rx timestamp hang",
+ __func__);
+ }
+}
+
+/**
+ * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp
+ * @pf: Board private structure
+ *
+ * Read the value of the Tx timestamp from the registers, convert it into a
+ * value consumable by the stack, and store that result into the shhwtstamps
+ * struct before returning it up the stack.
+ **/
+void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct i40e_hw *hw = &pf->hw;
+ u32 hi, lo;
+ u64 ns;
+
+ lo = rd32(hw, I40E_PRTTSYN_TXTIME_L);
+ hi = rd32(hw, I40E_PRTTSYN_TXTIME_H);
+
+ ns = (((u64)hi) << 32) | lo;
+
+ i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns);
+ skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
+ dev_kfree_skb_any(pf->ptp_tx_skb);
+ pf->ptp_tx_skb = NULL;
+}
+
+/**
+ * i40e_ptp_rx_hwtstamp - Utility function which checks for an Rx timestamp
+ * @pf: Board private structure
+ * @skb: Particular skb to send timestamp with
+ * @index: Index into the receive timestamp registers for the timestamp
+ *
+ * The XL710 receives a notification in the receive descriptor with an offset
+ * into the set of RXTIME registers where the timestamp is for that skb. This
+ * function goes and fetches the receive timestamp from that offset, if a valid
+ * one exists. The RXTIME registers are in ns, so we must convert the result
+ * first.
+ **/
+void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
+{
+ u32 prttsyn_stat, hi, lo;
+ struct i40e_hw *hw;
+ u64 ns;
+
+ /* Since we cannot turn off the Rx timestamp logic if the device is
+ * doing Tx timestamping, check if Rx timestamping is configured.
+ */
+ if (!pf->ptp_rx)
+ return;
+
+ hw = &pf->hw;
+
+ prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+
+ if (!(prttsyn_stat & (1 << index)))
+ return;
+
+ lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
+ hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index));
+
+ ns = (((u64)hi) << 32) | lo;
+
+ i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns);
+}
+
+/**
+ * i40e_ptp_set_increment - Utility function to update clock increment rate
+ * @pf: Board private structure
+ *
+ * During a link change, the DMA frequency that drives the 1588 logic will
+ * change. In order to keep the PRTTSYN_TIME registers in units of nanoseconds,
+ * we must update the increment value per clock tick.
+ **/
+void i40e_ptp_set_increment(struct i40e_pf *pf)
+{
+ struct i40e_link_status *hw_link_info;
+ struct i40e_hw *hw = &pf->hw;
+ u64 incval;
+
+ hw_link_info = &hw->phy.link_info;
+
+ i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+
+ switch (hw_link_info->link_speed) {
+ case I40E_LINK_SPEED_10GB:
+ incval = I40E_PTP_10GB_INCVAL;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ incval = I40E_PTP_1GB_INCVAL;
+ break;
+ case I40E_LINK_SPEED_100MB:
+ dev_warn(&pf->pdev->dev,
+ "%s: 1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n",
+ __func__);
+ incval = 0;
+ break;
+ case I40E_LINK_SPEED_40GB:
+ default:
+ incval = I40E_PTP_40GB_INCVAL;
+ break;
+ }
+
+ /* Write the new increment value into the increment register. The
+ * hardware will not update the clock until both registers have been
+ * written.
+ */
+ wr32(hw, I40E_PRTTSYN_INC_L, incval & 0xFFFFFFFF);
+ wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
+
+ /* Update the base adjustement value. */
+ ACCESS_ONCE(pf->ptp_base_adj) = incval;
+ smp_mb(); /* Force the above update. */
+}
+
+/**
+ * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping
+ * @pf: Board private structure
+ * @ifreq: ioctl data
+ *
+ * Obtain the current hardware timestamping settigs as requested. To do this,
+ * keep a shadow copy of the timestamp settings rather than attempting to
+ * deconstruct it from the registers.
+ **/
+int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+{
+ struct hwtstamp_config *config = &pf->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
+ * @pf: Board private structure
+ * @ifreq: ioctl data
+ *
+ * Respond to the user filter requests and make the appropriate hardware
+ * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
+ * logic, so keep track in software of whether to indicate these timestamps
+ * or not.
+ *
+ * It is permissible to "upgrade" the user request to a broader filter, as long
+ * as the user receives the timestamps they care about and the user is notified
+ * the filter has been broadened.
+ **/
+int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct hwtstamp_config *config = &pf->tstamp_config;
+ u32 pf_id, tsyntype, regval;
+
+ if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
+ return -EFAULT;
+
+ /* Reserved for future extensions. */
+ if (config->flags)
+ return -EINVAL;
+
+ /* Confirm that 1588 is supported on this PF. */
+ pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >>
+ I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
+ if (hw->pf_id != pf_id)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ pf->ptp_tx = false;
+ break;
+ case HWTSTAMP_TX_ON:
+ pf->ptp_tx = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ pf->ptp_rx = false;
+ tsyntype = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ pf->ptp_rx = true;
+ tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK |
+ I40E_PRTTSYN_CTL1_TSYNTYPE_V1 |
+ I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ pf->ptp_rx = true;
+ tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK |
+ I40E_PRTTSYN_CTL1_TSYNTYPE_V2 |
+ I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ default:
+ return -ERANGE;
+ }
+
+ /* Clear out all 1588-related registers to clear and unlatch them. */
+ rd32(hw, I40E_PRTTSYN_STAT_0);
+ rd32(hw, I40E_PRTTSYN_TXTIME_H);
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
+
+ /* Enable/disable the Tx timestamp interrupt based on user input. */
+ regval = rd32(hw, I40E_PRTTSYN_CTL0);
+ if (pf->ptp_tx)
+ regval |= I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
+ else
+ regval &= ~I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
+ wr32(hw, I40E_PRTTSYN_CTL0, regval);
+
+ regval = rd32(hw, I40E_PFINT_ICR0_ENA);
+ if (pf->ptp_tx)
+ regval |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ else
+ regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, regval);
+
+ /* There is no simple on/off switch for Rx. To "disable" Rx support,
+ * ignore any received timestamps, rather than turn off the clock.
+ */
+ if (pf->ptp_rx) {
+ regval = rd32(hw, I40E_PRTTSYN_CTL1);
+ /* clear everything but the enable bit */
+ regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
+ /* now enable bits for desired Rx timestamps */
+ regval |= tsyntype;
+ wr32(hw, I40E_PRTTSYN_CTL1, regval);
+ }
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * i40e_ptp_init - Initialize the 1588 support and register the PHC
+ * @pf: Board private structure
+ *
+ * This function registers the device clock as a PHC. If it is successful, it
+ * starts the clock in the hardware.
+ **/
+void i40e_ptp_init(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+
+ strncpy(pf->ptp_caps.name, "i40e", sizeof(pf->ptp_caps.name));
+ pf->ptp_caps.owner = THIS_MODULE;
+ pf->ptp_caps.max_adj = 999999999;
+ pf->ptp_caps.n_ext_ts = 0;
+ pf->ptp_caps.pps = 0;
+ pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
+ pf->ptp_caps.adjtime = i40e_ptp_adjtime;
+ pf->ptp_caps.gettime = i40e_ptp_gettime;
+ pf->ptp_caps.settime = i40e_ptp_settime;
+ pf->ptp_caps.enable = i40e_ptp_enable;
+
+ /* Attempt to register the clock before enabling the hardware. */
+ pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
+ if (IS_ERR(pf->ptp_clock)) {
+ pf->ptp_clock = NULL;
+ dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
+ __func__);
+ } else {
+ struct timespec ts;
+ u32 regval;
+
+ spin_lock_init(&pf->tmreg_lock);
+ INIT_WORK(&pf->ptp_tx_work, i40e_ptp_tx_work);
+
+ dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
+ netdev->name);
+ pf->flags |= I40E_FLAG_PTP;
+
+ /* Ensure the clocks are running. */
+ regval = rd32(hw, I40E_PRTTSYN_CTL0);
+ regval |= I40E_PRTTSYN_CTL0_TSYNENA_MASK;
+ wr32(hw, I40E_PRTTSYN_CTL0, regval);
+ regval = rd32(hw, I40E_PRTTSYN_CTL1);
+ regval |= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
+ wr32(hw, I40E_PRTTSYN_CTL1, regval);
+
+ /* Set the increment value per clock tick. */
+ i40e_ptp_set_increment(pf);
+
+ /* reset the tstamp_config */
+ memset(&pf->tstamp_config, 0, sizeof(pf->tstamp_config));
+
+ /* Set the clock value. */
+ ts = ktime_to_timespec(ktime_get_real());
+ i40e_ptp_settime(&pf->ptp_caps, &ts);
+ }
+}
+
+/**
+ * i40e_ptp_stop - Disable the driver/hardware support and unregister the PHC
+ * @pf: Board private structure
+ *
+ * This function handles the cleanup work required from the initialization by
+ * clearing out the important information and unregistering the PHC.
+ **/
+void i40e_ptp_stop(struct i40e_pf *pf)
+{
+ pf->flags &= ~I40E_FLAG_PTP;
+ pf->ptp_tx = false;
+ pf->ptp_rx = false;
+
+ cancel_work_sync(&pf->ptp_tx_work);
+ if (pf->ptp_tx_skb) {
+ dev_kfree_skb_any(pf->ptp_tx_skb);
+ pf->ptp_tx_skb = NULL;
+ }
+
+ if (pf->ptp_clock) {
+ ptp_clock_unregister(pf->ptp_clock);
+ pf->ptp_clock = NULL;
+ dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__,
+ pf->vsi[pf->lan_vsi]->netdev->name);
+ }
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 6bd333cde28b..1d40f425acf1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -28,6 +27,10 @@
#ifndef _I40E_REGISTER_H_
#define _I40E_REGISTER_H_
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
@@ -38,6 +41,11 @@
#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
@@ -50,9 +58,14 @@
#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+
#define I40E_PF_ARQBAH 0x00080180
#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
@@ -837,7 +850,7 @@
#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
-#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
#define I40E_GLHMC_PEQ1MAX 0x000C2054
#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
@@ -903,7 +916,7 @@
#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
#define I40E_GLHMC_PEXFFLMAX 0x000C204c
#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
#define I40E_GLHMC_PEXFMAX 0x000C2048
#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
@@ -1636,7 +1649,7 @@
#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
-#define I40E_VSILAN_QTABLE_MAX_INDEX 15
+#define I40E_VSILAN_QTABLE_MAX_INDEX 7
#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
@@ -1773,16 +1786,20 @@
#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
#define I40E_GL_MNG_FWSM 0x000B6134
-#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
-#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x3FF << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6
#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25
+#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT)
#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
@@ -2035,6 +2052,28 @@
#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+
#define I40E_GLPCI_BYTCTH 0x0009C484
#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
@@ -2170,6 +2209,12 @@
#define I40E_GLPCI_PCIERR 0x000BE4FC
#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PCITEST2 0x000BE4BC
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT)
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT)
+
#define I40E_GLPCI_PKTCT 0x0009C4BC
#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
@@ -2380,8 +2425,7 @@
#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
-#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
+
#define I40E_PFPE_MRTEIDXMASK 0x00008600
#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
@@ -2460,8 +2504,6 @@
#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
-#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
@@ -3141,30 +3183,6 @@
#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_GLPM_DMACR 0x000881F4
-#define I40E_GLPM_DMACR_DMACWT_SHIFT 0
-#define I40E_GLPM_DMACR_DMACWT_MASK (0xFFFF << I40E_GLPM_DMACR_DMACWT_SHIFT)
-#define I40E_GLPM_DMACR_EXIT_DC_SHIFT 29
-#define I40E_GLPM_DMACR_EXIT_DC_MASK (0x1 << I40E_GLPM_DMACR_EXIT_DC_SHIFT)
-#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT 30
-#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_MASK (0x1 << I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT)
-#define I40E_GLPM_DMACR_DMAC_EN_SHIFT 31
-#define I40E_GLPM_DMACR_DMAC_EN_MASK (0x1 << I40E_GLPM_DMACR_DMAC_EN_SHIFT)
-#define I40E_GLPM_LTRC 0x000BE500
-#define I40E_GLPM_LTRC_SLTRV_SHIFT 0
-#define I40E_GLPM_LTRC_SLTRV_MASK (0x3FF << I40E_GLPM_LTRC_SLTRV_SHIFT)
-#define I40E_GLPM_LTRC_SSCALE_SHIFT 10
-#define I40E_GLPM_LTRC_SSCALE_MASK (0x7 << I40E_GLPM_LTRC_SSCALE_SHIFT)
-#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT 15
-#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT)
-#define I40E_GLPM_LTRC_NSLTRV_SHIFT 16
-#define I40E_GLPM_LTRC_NSLTRV_MASK (0x3FF << I40E_GLPM_LTRC_NSLTRV_SHIFT)
-#define I40E_GLPM_LTRC_NSSCALE_SHIFT 26
-#define I40E_GLPM_LTRC_NSSCALE_MASK (0x7 << I40E_GLPM_LTRC_NSSCALE_SHIFT)
-#define I40E_GLPM_LTRC_LTR_SEND_SHIFT 30
-#define I40E_GLPM_LTRC_LTR_SEND_MASK (0x1 << I40E_GLPM_LTRC_LTR_SEND_SHIFT)
-#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT 31
-#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT)
#define I40E_PRTPM_EEE_STAT 0x001E4320
#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
@@ -3201,9 +3219,6 @@
#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
-#define I40E_PRTPM_HPTC 0x000AC800
-#define I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT 0
-#define I40E_PRTPM_HPTC_HIGH_PRI_TC_MASK (0xFF << I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT)
#define I40E_PRTPM_RLPIC 0x001E43A0
#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
@@ -3265,8 +3280,8 @@
#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
-#define I40E_GLQF_CTL_DDPLPEN_SHIFT 7
-#define I40E_GLQF_CTL_DDPLPEN_MASK (0x1 << I40E_GLQF_CTL_DDPLPEN_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT 7
+#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT)
#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
@@ -3416,9 +3431,9 @@
#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
-#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
-#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 6
-#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0xF << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
@@ -3504,7 +3519,7 @@
#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
-#define I40E_VSIQF_TCREGION_MAX_INDEX 7
+#define I40E_VSIQF_TCREGION_MAX_INDEX 3
#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
@@ -3521,10 +3536,7 @@
#define I40E_GL_FCOEDDPC_MAX_INDEX 143
#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
-#define I40E_GL_FCOEDDPEC(_i) (0x00314900 + ((_i) * 8)) /* _i=0...143 */
-#define I40E_GL_FCOEDDPEC_MAX_INDEX 143
-#define I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT 0
-#define I40E_GL_FCOEDDPEC_CFOEDDPEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT)
+/* _i=0...143 */
#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
@@ -4276,46 +4288,10 @@
#define I40E_PFPM_APM 0x000B8080
#define I40E_PFPM_APM_APME_SHIFT 0
#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
-#define I40E_PFPM_FHFT_DATA(_i, _j) (0x00060000 + ((_i) * 4096 + (_j) * 128))
-#define I40E_PFPM_FHFT_DATA_MAX_INDEX 7
-#define I40E_PFPM_FHFT_DATA_DWORD_SHIFT 0
-#define I40E_PFPM_FHFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PFPM_FHFT_DATA_DWORD_SHIFT)
#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PFPM_FHFT_MASK(_i, _j) (0x00068000 + ((_i) * 1024 + (_j) * 128))
-#define I40E_PFPM_FHFT_MASK_MAX_INDEX 7
-#define I40E_PFPM_FHFT_MASK_MASK_SHIFT 0
-#define I40E_PFPM_FHFT_MASK_MASK_MASK (0xFFFF << I40E_PFPM_FHFT_MASK_MASK_SHIFT)
-#define I40E_PFPM_PROXYFC 0x00245A80
-#define I40E_PFPM_PROXYFC_PPROXYE_SHIFT 0
-#define I40E_PFPM_PROXYFC_PPROXYE_MASK (0x1 << I40E_PFPM_PROXYFC_PPROXYE_SHIFT)
-#define I40E_PFPM_PROXYFC_EX_SHIFT 1
-#define I40E_PFPM_PROXYFC_EX_MASK (0x1 << I40E_PFPM_PROXYFC_EX_SHIFT)
-#define I40E_PFPM_PROXYFC_ARP_SHIFT 4
-#define I40E_PFPM_PROXYFC_ARP_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_SHIFT)
-#define I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT 5
-#define I40E_PFPM_PROXYFC_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYFC_NS_SHIFT 9
-#define I40E_PFPM_PROXYFC_NS_MASK (0x1 << I40E_PFPM_PROXYFC_NS_SHIFT)
-#define I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT 10
-#define I40E_PFPM_PROXYFC_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYFC_MLD_SHIFT 12
-#define I40E_PFPM_PROXYFC_MLD_MASK (0x1 << I40E_PFPM_PROXYFC_MLD_SHIFT)
-#define I40E_PFPM_PROXYS 0x00245B80
-#define I40E_PFPM_PROXYS_EX_SHIFT 1
-#define I40E_PFPM_PROXYS_EX_MASK (0x1 << I40E_PFPM_PROXYS_EX_SHIFT)
-#define I40E_PFPM_PROXYS_ARP_SHIFT 4
-#define I40E_PFPM_PROXYS_ARP_MASK (0x1 << I40E_PFPM_PROXYS_ARP_SHIFT)
-#define I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT 5
-#define I40E_PFPM_PROXYS_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYS_NS_SHIFT 9
-#define I40E_PFPM_PROXYS_NS_MASK (0x1 << I40E_PFPM_PROXYS_NS_SHIFT)
-#define I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT 10
-#define I40E_PFPM_PROXYS_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYS_MLD_SHIFT 12
-#define I40E_PFPM_PROXYS_MLD_MASK (0x1 << I40E_PFPM_PROXYS_MLD_SHIFT)
#define I40E_PFPM_WUC 0x0006B200
#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
@@ -4536,21 +4512,21 @@
#define I40E_VFMSIX_PBA 0x00002000
#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TADD_MAX_INDEX 16
#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TMSG_MAX_INDEX 16
#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TUADD_MAX_INDEX 16
#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
@@ -4610,8 +4586,6 @@
#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT 17
-#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT)
#define I40E_VFPE_MRTEIDXMASK1 0x00009000
#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
@@ -4684,5 +4658,13 @@
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
-
+#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT)
#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index 5e5bcddac573..5f9cac55aa55 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index f1f03bc5c729..d4bb482b1a7f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -77,7 +76,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
/* grab the next descriptor */
i = tx_ring->next_to_use;
fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
- tx_buf = &tx_ring->tx_bi[i];
tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
@@ -129,15 +127,23 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
/* Now program a dummy descriptor */
i = tx_ring->next_to_use;
tx_desc = I40E_TX_DESC(tx_ring, i);
+ tx_buf = &tx_ring->tx_bi[i];
tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
tx_desc->buffer_addr = cpu_to_le64(dma);
td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
+ /* set the timestamp */
+ tx_buf->time_stamp = jiffies;
+
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -768,7 +774,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_buf_len);
if (!skb) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_buff_failed++;
goto no_buffers;
}
/* initialize queue mapping */
@@ -782,7 +788,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev, bi->dma)) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_buff_failed++;
bi->dma = 0;
goto no_buffers;
}
@@ -792,7 +798,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
if (!bi->page) {
bi->page = alloc_page(GFP_ATOMIC);
if (!bi->page) {
- rx_ring->rx_stats.alloc_rx_page_failed++;
+ rx_ring->rx_stats.alloc_page_failed++;
goto no_buffers;
}
}
@@ -807,7 +813,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev,
bi->page_dma)) {
- rx_ring->rx_stats.alloc_rx_page_failed++;
+ rx_ring->rx_stats.alloc_page_failed++;
bi->page_dma = 0;
goto no_buffers;
}
@@ -860,12 +866,25 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
* @skb: skb currently being received and modified
* @rx_status: status value of last descriptor in packet
* @rx_error: error value of last descriptor in packet
+ * @rx_ptype: ptype value of last descriptor in packet
**/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
u32 rx_status,
- u32 rx_error)
+ u32 rx_error,
+ u16 rx_ptype)
{
+ bool ipv4_tunnel, ipv6_tunnel;
+ __wsum rx_udp_csum;
+ __sum16 csum;
+ struct iphdr *iph;
+
+ ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+ ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+
+ skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
skb->ip_summed = CHECKSUM_NONE;
/* Rx csum enabled and ip headers found? */
@@ -873,13 +892,47 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* IP or L4 checksum error */
+ /* likely incorrect csum if alternate IP extention headers found */
+ if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ return;
+
+ /* IP or L4 or outmost IP checksum error */
if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
+ (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
vsi->back->hw_csum_rx_error++;
return;
}
+ if (ipv4_tunnel &&
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+ /* If VXLAN traffic has an outer UDPv4 checksum we need to check
+ * it in the driver, hardware does not do it for us.
+ * Since L3L4P bit was set we assume a valid IHL value (>=5)
+ * so the total length of IPv4 header is IHL*4 bytes
+ */
+ skb->transport_header = skb->mac_header +
+ sizeof(struct ethhdr) +
+ (ip_hdr(skb)->ihl * 4);
+
+ /* Add 4 bytes for VLAN tagged packets */
+ skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD))
+ ? VLAN_HLEN : 0;
+
+ rx_udp_csum = udp_csum(skb);
+ iph = ip_hdr(skb);
+ csum = csum_tcpudp_magic(
+ iph->saddr, iph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, rx_udp_csum);
+
+ if (udp_hdr(skb)->check != csum) {
+ vsi->back->hw_csum_rx_error++;
+ return;
+ }
+ }
+
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -891,13 +944,15 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
static inline u32 i40e_rx_hash(struct i40e_ring *ring,
union i40e_rx_desc *rx_desc)
{
- if (ring->netdev->features & NETIF_F_RXHASH) {
- if ((le64_to_cpu(rx_desc->wb.qword1.status_error_len) >>
- I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
- I40E_RX_DESC_FLTSTAT_RSS_HASH)
- return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- }
- return 0;
+ const __le64 rss_mask =
+ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+ if ((ring->netdev->features & NETIF_F_RXHASH) &&
+ (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
+ return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+ else
+ return 0;
}
/**
@@ -918,11 +973,12 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status;
u64 qword;
+ u16 rx_ptype;
rx_desc = I40E_RX_DESC(rx_ring, i);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
- >> I40E_RXD_QW1_STATUS_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
union i40e_rx_desc *next_rxd;
@@ -938,18 +994,20 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
skb = rx_bi->skb;
prefetch(skb->data);
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
- >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
- >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
- rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK)
- >> I40E_RXD_QW1_LENGTH_SPH_SHIFT;
+ rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+ rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT;
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK)
- >> I40E_RXD_QW1_ERROR_SHIFT;
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
rx_bi->skb = NULL;
/* This memory barrier is needed to keep us from reading
@@ -1030,13 +1088,21 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
}
skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
- i40e_rx_checksum(vsi, skb, rx_status, rx_error);
+ if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
+ i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
+ I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
+ rx_ring->last_rx_timestamp = jiffies;
+ }
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
total_rx_packets++;
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
@@ -1059,8 +1125,8 @@ next_desc:
/* use prefetched values */
rx_desc = next_rxd;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
- >> I40E_RXD_QW1_STATUS_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
}
rx_ring->next_to_clean = i;
@@ -1173,7 +1239,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
u16 i;
/* make sure ATR is enabled */
- if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
+ if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
return;
/* if sampling is disabled do nothing */
@@ -1268,7 +1334,7 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
tx_flags |= I40E_TX_FLAGS_HW_VLAN;
/* else if it is a SW VLAN, check the next protocol and store the tag */
- } else if (protocol == __constant_htons(ETH_P_8021Q)) {
+ } else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, _vhdr;
vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
if (!vhdr)
@@ -1333,7 +1399,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
return err;
}
- if (protocol == __constant_htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
iph->tot_len = 0;
@@ -1359,10 +1425,50 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
cd_cmd = I40E_TX_CTX_DESC_TSO;
cd_tso_len = skb->len - *hdr_len;
cd_mss = skb_shinfo(skb)->gso_size;
- *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT)
- | ((u64)cd_tso_len
- << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
- | ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ ((u64)cd_tso_len <<
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ return 1;
+}
+
+/**
+ * i40e_tsyn - set up the tsyn context descriptor
+ * @tx_ring: ptr to the ring to send
+ * @skb: ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ *
+ * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
+ **/
+static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, u64 *cd_type_cmd_tso_mss)
+{
+ struct i40e_pf *pf;
+
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ return 0;
+
+ /* Tx timestamps cannot be sampled when doing TSO */
+ if (tx_flags & I40E_TX_FLAGS_TSO)
+ return 0;
+
+ /* only timestamp the outbound packet if the user has requested it and
+ * we are not already transmitting a packet to be timestamped
+ */
+ pf = i40e_netdev_to_pf(tx_ring->netdev);
+ if (pf->ptp_tx && !pf->ptp_tx_skb) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ pf->ptp_tx_skb = skb_get(skb);
+ } else {
+ return 0;
+ }
+
+ *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
+ I40E_TXD_CTX_QW1_CMD_SHIFT;
+
+ pf->ptp_tx_start = jiffies;
+ schedule_work(&pf->ptp_tx_work);
+
return 1;
}
@@ -1660,6 +1766,7 @@ dma_error:
static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Memory barrier before checking head and tail */
smp_mb();
/* Check again in a case another CPU has just made room available. */
@@ -1741,6 +1848,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
__be16 protocol;
u32 td_cmd = 0;
u8 hdr_len = 0;
+ int tsyn;
int tso;
if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY;
@@ -1756,9 +1864,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
first = &tx_ring->tx_bi[tx_ring->next_to_use];
/* setup IPv4/IPv6 offloads */
- if (protocol == __constant_htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
tx_flags |= I40E_TX_FLAGS_IPV4;
- else if (protocol == __constant_htons(ETH_P_IPV6))
+ else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
@@ -1771,6 +1879,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
skb_tx_timestamp(skb);
+ tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
+
+ if (tsyn)
+ tx_flags |= I40E_TX_FLAGS_TSYN;
+
/* always enable CRC insertion offload */
td_cmd |= I40E_TX_DESC_CMD_ICRC;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index db55d9947f15..d5349698e513 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -25,11 +24,13 @@
*
******************************************************************************/
+#ifndef _I40E_TXRX_H_
+#define _I40E_TXRX_H_
+
/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
-#define I40E_MAX_ITR 0x07FF
-#define I40E_MIN_ITR 0x0001
-#define I40E_ITR_USEC_RESOLUTION 2
+#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
+#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
#define I40E_MAX_IRATE 0x03F
#define I40E_MIN_IRATE 0x001
#define I40E_IRATE_USEC_RESOLUTION 4
@@ -49,10 +50,43 @@
#define I40E_QUEUE_END_OF_LIST 0x7FF
-#define I40E_ITR_NONE 3
-#define I40E_RX_ITR 0
-#define I40E_TX_ITR 1
-#define I40E_PE_ITR 2
+/* this enum matches hardware bits and is meant to be used by DYN_CTLN
+ * registers and QINT registers or more generally anywhere in the manual
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+enum i40e_dyn_idx_t {
+ I40E_IDX_ITR0 = 0,
+ I40E_IDX_ITR1 = 1,
+ I40E_IDX_ITR2 = 2,
+ I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
+};
+
+/* these are indexes into ITRN registers */
+#define I40E_RX_ITR I40E_IDX_ITR0
+#define I40E_TX_ITR I40E_IDX_ITR1
+#define I40E_PE_ITR I40E_IDX_ITR2
+
+/* Supported RSS offloads */
+#define I40E_DEFAULT_RSS_HENA ( \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
/* Supported Rx Buffer Sizes */
#define I40E_RXBUFFER_512 512 /* Used for packet split */
#define I40E_RXBUFFER_2048 2048
@@ -102,6 +136,7 @@
#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
+#define I40E_TX_FLAGS_TSYN (u32)(1 << 8)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -139,8 +174,8 @@ struct i40e_tx_queue_stats {
struct i40e_rx_queue_stats {
u64 non_eop_descs;
- u64 alloc_rx_page_failed;
- u64 alloc_rx_buff_failed;
+ u64 alloc_page_failed;
+ u64 alloc_buff_failed;
};
enum i40e_ring_state_t {
@@ -214,6 +249,8 @@ struct i40e_ring {
u8 atr_sample_rate;
u8 atr_count;
+ unsigned long last_rx_timestamp;
+
bool ring_active; /* is ring online or not */
/* stats structs */
@@ -262,3 +299,4 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
void i40e_free_tx_resources(struct i40e_ring *tx_ring);
void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
+#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index f3f22b20f02f..181a825d3160 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -36,38 +35,31 @@
#include "i40e_lan_hmc.h"
/* Device IDs */
-#define I40E_SFP_XL710_DEVICE_ID 0x1572
-#define I40E_SFP_X710_DEVICE_ID 0x1573
-#define I40E_QEMU_DEVICE_ID 0x1574
-#define I40E_KX_A_DEVICE_ID 0x157F
-#define I40E_KX_B_DEVICE_ID 0x1580
-#define I40E_KX_C_DEVICE_ID 0x1581
-#define I40E_KX_D_DEVICE_ID 0x1582
-#define I40E_QSFP_A_DEVICE_ID 0x1583
-#define I40E_QSFP_B_DEVICE_ID 0x1584
-#define I40E_QSFP_C_DEVICE_ID 0x1585
-#define I40E_VF_DEVICE_ID 0x154C
-#define I40E_VF_HV_DEVICE_ID 0x1571
-
-#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0000
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_SFP_X710 0x1573
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_A 0x157F
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_KX_D 0x1582
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3
#define I40E_MAX_CHAINED_RX_BUFFERS 5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
-/* Check whether address is multicast. This is little-endian specific check.*/
-#define I40E_IS_MULTICAST(address) \
- (bool)(((u8 *)(address))[0] & ((u8)0x01))
-
-/* Check whether an address is broadcast. */
-#define I40E_IS_BROADCAST(address) \
- ((((u8 *)(address))[0] == ((u8)0xff)) && \
- (((u8 *)(address))[1] == ((u8)0xff)))
-
/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
@@ -75,8 +67,6 @@
struct i40e_hw;
typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
-#define I40E_ETH_LENGTH_OF_ADDRESS 6
-
/* Data type manipulation macros. */
#define I40E_DESC_UNUSED(R) \
@@ -85,9 +75,10 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
/* bitfields for Tx queue mapping in QTX_CTL */
#define I40E_QTX_CTL_VF_QUEUE 0x0
+#define I40E_QTX_CTL_VM_QUEUE 0x1
#define I40E_QTX_CTL_PF_QUEUE 0x2
-/* debug masks */
+/* debug masks - set these bits in hw->debug_mask to control output */
enum i40e_debug_mask {
I40E_DEBUG_INIT = 0x00000001,
I40E_DEBUG_RELEASE = 0x00000002,
@@ -101,10 +92,10 @@ enum i40e_debug_mask {
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
- I40E_DEBUG_AQ_MESSAGE = 0x01000000, /* for i40e_debug() */
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
- I40E_DEBUG_AQ_COMMAND = 0x06000000, /* for i40e_debug_aq() */
+ I40E_DEBUG_AQ_COMMAND = 0x06000000,
I40E_DEBUG_AQ = 0x0F000000,
I40E_DEBUG_USER = 0xF0000000,
@@ -134,6 +125,7 @@ enum i40e_media_type {
I40E_MEDIA_TYPE_BASET,
I40E_MEDIA_TYPE_BACKPLANE,
I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_DA,
I40E_MEDIA_TYPE_VIRTUAL
};
@@ -171,6 +163,7 @@ struct i40e_link_status {
u8 link_info;
u8 an_info;
u8 ext_info;
+ u8 loopback;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
};
@@ -236,9 +229,9 @@ struct i40e_hw_capabilities {
struct i40e_mac_info {
enum i40e_mac_type type;
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
u16 max_fcoeq;
};
@@ -500,18 +493,26 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
- I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 3 BITS */
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
- I40E_RX_DESC_STATUS_LPBK_SHIFT = 14
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18
};
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x7UL << \
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
+ I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
enum i40e_rx_desc_fltstat_values {
I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
@@ -547,28 +548,32 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks {
/* Packet type non-ip values */
enum i40e_rx_l2_ptype {
- I40E_RX_PTYPE_L2_RESERVED = 0,
- I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
- I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
- I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
- I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
- I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
- I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
- I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
- I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
- I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
- I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
- I40E_RX_PTYPE_L2_ARP = 11,
- I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
- I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
- I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
- I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
- I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
- I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
};
struct i40e_rx_ptype_decoded {
@@ -852,10 +857,7 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Value 0-25 are reserved for future use */
- I40E_FILTER_PCTYPE_IPV4_TEREDO_UDP = 26,
- I40E_FILTER_PCTYPE_IPV6_TEREDO_UDP = 27,
- I40E_FILTER_PCTYPE_NONF_IPV4_1588_UDP = 28,
+ /* Note: Values 0-28 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
@@ -864,8 +866,7 @@ enum i40e_filter_pctype {
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Value 37 is reserved for future use */
- I40E_FILTER_PCTYPE_NONF_IPV6_1588_UDP = 38,
+ /* Note: Values 37-38 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
@@ -877,7 +878,8 @@ enum i40e_filter_pctype {
/* Note: Value 47 is reserved for future use */
I40E_FILTER_PCTYPE_FCOE_OX = 48,
I40E_FILTER_PCTYPE_FCOE_RX = 49,
- /* Note: Value 50-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
};
@@ -1014,6 +1016,7 @@ struct i40e_hw_port_stats {
#define I40E_SR_NVM_CONTROL_WORD 0x00
#define I40E_SR_EMP_MODULE_PTR 0x0F
#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
#define I40E_SR_NVM_EETRACK_LO 0x2D
#define I40E_SR_NVM_EETRACK_HI 0x2E
@@ -1138,17 +1141,4 @@ enum i40e_reset_type {
I40E_RESET_GLOBR = 2,
I40E_RESET_EMPR = 3,
};
-
-/* IEEE 802.1AB LLDP Agent Variables from NVM */
-#define I40E_NVM_LLDP_CFG_PTR 0xF
-struct i40e_lldp_variables {
- u16 length;
- u16 adminstatus;
- u16 msgfasttx;
- u16 msgtxinterval;
- u16 txparams;
- u16 timers;
- u16 crc8;
-};
-
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index cc6654f1dac7..22a1b69cd646 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -142,7 +141,7 @@ struct i40e_virtchnl_vsi_resource {
u16 num_queue_pairs;
enum i40e_vsi_type vsi_type;
u16 qset_handle;
- u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 default_mac_addr[ETH_ALEN];
};
/* VF offload flags */
#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
@@ -265,7 +264,7 @@ struct i40e_virtchnl_queue_select {
*/
struct i40e_virtchnl_ether_addr {
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
u8 pad[2];
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 07596982a477..b9d1c1c8ca5a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -70,7 +69,7 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
{
struct i40e_pf *pf = vf->pf;
- return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
+ return vector_id <= pf->hw.func_caps.num_msix_vectors_vf;
}
/***********************vf resource mgmt routines*****************/
@@ -102,130 +101,6 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
}
/**
- * i40e_ctrl_vsi_tx_queue
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
- * @vsi_queue_id: vsi relative queue index
- * @ctrl: control flags
- *
- * enable/disable/enable check/disable check
- **/
-static int i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
- u16 vsi_queue_id,
- enum i40e_queue_ctrl ctrl)
-{
- struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
- bool writeback = false;
- u16 pf_queue_id;
- int ret = 0;
- u32 reg;
-
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
- reg = rd32(hw, I40E_QTX_ENA(pf_queue_id));
-
- switch (ctrl) {
- case I40E_QUEUE_CTRL_ENABLE:
- reg |= I40E_QTX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_ENABLECHECK:
- ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
- break;
- case I40E_QUEUE_CTRL_DISABLE:
- reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_DISABLECHECK:
- ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLE:
- reg |= I40E_QTX_ENA_FAST_QDIS_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLECHECK:
- ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- if (!ret) {
- reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK;
- writeback = true;
- }
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- if (writeback) {
- wr32(hw, I40E_QTX_ENA(pf_queue_id), reg);
- i40e_flush(hw);
- }
-
- return ret;
-}
-
-/**
- * i40e_ctrl_vsi_rx_queue
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
- * @vsi_queue_id: vsi relative queue index
- * @ctrl: control flags
- *
- * enable/disable/enable check/disable check
- **/
-static int i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
- u16 vsi_queue_id,
- enum i40e_queue_ctrl ctrl)
-{
- struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
- bool writeback = false;
- u16 pf_queue_id;
- int ret = 0;
- u32 reg;
-
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
- reg = rd32(hw, I40E_QRX_ENA(pf_queue_id));
-
- switch (ctrl) {
- case I40E_QUEUE_CTRL_ENABLE:
- reg |= I40E_QRX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_ENABLECHECK:
- ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
- break;
- case I40E_QUEUE_CTRL_DISABLE:
- reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_DISABLECHECK:
- ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLE:
- reg |= I40E_QRX_ENA_FAST_QDIS_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLECHECK:
- ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- if (!ret) {
- reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK;
- writeback = true;
- }
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- if (writeback) {
- wr32(hw, I40E_QRX_ENA(pf_queue_id), reg);
- i40e_flush(hw);
- }
-
- return ret;
-}
-
-/**
* i40e_config_irq_link_list
* @vf: pointer to the vf info
* @vsi_idx: index of VSI in PF struct
@@ -260,23 +135,17 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
goto irq_list_done;
}
tempmap = vecmap->rxq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
linklistmap |= (1 <<
(I40E_VIRTCHNL_SUPPORTED_QTYPES *
vsi_queue_id));
- vsi_queue_id =
- find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1);
}
tempmap = vecmap->txq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
linklistmap |= (1 <<
(I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
+ 1));
- vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- vsi_queue_id + 1);
}
next_q = find_first_bit(&linklistmap,
@@ -307,7 +176,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
(I40E_MAX_VSI_QP *
I40E_VIRTCHNL_SUPPORTED_QTYPES),
next_q + 1);
- if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+ if (next_q <
+ (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
@@ -499,7 +369,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
{
struct i40e_mac_filter *f = NULL;
struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
struct i40e_vsi *vsi;
int ret = 0;
@@ -513,14 +382,32 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
goto error_alloc_vsi_res;
}
if (type == I40E_VSI_SRIOV) {
+ u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
vf->lan_vsi_index = vsi->idx;
vf->lan_vsi_id = vsi->id;
dev_info(&pf->pdev->dev,
- "LAN VSI index %d, VSI id %d\n",
- vsi->idx, vsi->id);
+ "VF %d assigned LAN VSI index %d, VSI id %d\n",
+ vf->vf_id, vsi->idx, vsi->id);
+ /* If the port VLAN has been configured and then the
+ * VF driver was removed then the VSI port VLAN
+ * configuration was destroyed. Check if there is
+ * a port VLAN and restore the VSI configuration if
+ * needed.
+ */
+ if (vf->port_vlan_id)
+ i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
- 0, true, false);
+ vf->port_vlan_id, true, false);
+ if (!f)
+ dev_info(&pf->pdev->dev,
+ "Could not allocate VF MAC addr\n");
+ f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
+ true, false);
+ if (!f)
+ dev_info(&pf->pdev->dev,
+ "Could not allocate VF broadcast filter\n");
}
+
if (!f) {
dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
ret = -ENOMEM;
@@ -534,150 +421,11 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
goto error_alloc_vsi_res;
}
- /* accept bcast pkts. by default */
- ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
- vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status);
- ret = -EINVAL;
- }
-
error_alloc_vsi_res:
return ret;
}
/**
- * i40e_reset_vf
- * @vf: pointer to the vf structure
- * @flr: VFLR was issued or not
- *
- * reset the vf
- **/
-int i40e_reset_vf(struct i40e_vf *vf, bool flr)
-{
- int ret = -ENOENT;
- struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
- u32 reg, reg_idx, msix_vf;
- bool rsd = false;
- u16 pf_queue_id;
- int i, j;
-
- /* warn the VF */
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS);
-
- clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
-
- /* PF triggers VFR only when VF requests, in case of
- * VFLR, HW triggers VFR
- */
- if (!flr) {
- /* reset vf using VPGEN_VFRTRIG reg */
- reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
- i40e_flush(hw);
- }
-
- /* poll VPGEN_VFRSTAT reg to make sure
- * that reset is complete
- */
- for (i = 0; i < 4; i++) {
- /* vf reset requires driver to first reset the
- * vf & than poll the status register to make sure
- * that the requested op was completed
- * successfully
- */
- udelay(10);
- reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
- if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
- rsd = true;
- break;
- }
- }
-
- if (!rsd)
- dev_err(&pf->pdev->dev, "VF reset check timeout %d\n",
- vf->vf_id);
-
- /* fast disable qps */
- for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
- ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLE);
- ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLE);
- }
-
- /* Queue enable/disable requires driver to
- * first reset the vf & than poll the status register
- * to make sure that the requested op was completed
- * successfully
- */
- udelay(10);
- for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
- ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLECHECK);
- if (ret)
- dev_info(&pf->pdev->dev,
- "Queue control check failed on Tx queue %d of VSI %d VF %d\n",
- vf->lan_vsi_index, j, vf->vf_id);
- ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLECHECK);
- if (ret)
- dev_info(&pf->pdev->dev,
- "Queue control check failed on Rx queue %d of VSI %d VF %d\n",
- vf->lan_vsi_index, j, vf->vf_id);
- }
-
- /* clear the irq settings */
- msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
- for (i = 0; i < msix_vf; i++) {
- /* format is same for both registers */
- if (0 == i)
- reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
- else
- reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
- (vf->vf_id))
- + (i - 1));
- reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
- I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
- wr32(hw, reg_idx, reg);
- i40e_flush(hw);
- }
- /* disable interrupts so the VF starts in a known state */
- for (i = 0; i < msix_vf; i++) {
- /* format is same for both registers */
- if (0 == i)
- reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
- else
- reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
- (vf->vf_id))
- + (i - 1));
- wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
- i40e_flush(hw);
- }
-
- /* set the defaults for the rqctl & tqctl registers */
- reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK |
- I40E_QINT_RQCTL_NEXTQ_TYPE_MASK);
- for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
- wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg);
- wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg);
- }
-
- /* clear the reset bit in the VPGEN_VFRTRIG reg */
- reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
- reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
- /* tell the VF the reset is done */
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
- i40e_flush(hw);
-
- return ret;
-}
-
-/**
* i40e_enable_vf_mappings
* @vf: pointer to the vf info
*
@@ -756,6 +504,9 @@ static void i40e_disable_vf_mappings(struct i40e_vf *vf)
static void i40e_free_vf_res(struct i40e_vf *vf)
{
struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg_idx, reg;
+ int i, msix_vf;
/* free vsi & disconnect it from the parent uplink */
if (vf->lan_vsi_index) {
@@ -763,6 +514,34 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
vf->lan_vsi_index = 0;
vf->lan_vsi_id = 0;
}
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf + 1;
+ /* disable interrupts so the VF starts in a known state */
+ for (i = 0; i < msix_vf; i++) {
+ /* format is same for both registers */
+ if (0 == i)
+ reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
+ else
+ reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
+ (vf->vf_id))
+ + (i - 1));
+ wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ i40e_flush(hw);
+ }
+
+ /* clear the irq settings */
+ for (i = 0; i < msix_vf; i++) {
+ /* format is same for both registers */
+ if (0 == i)
+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+ else
+ reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
+ (vf->vf_id))
+ + (i - 1));
+ reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ wr32(hw, reg_idx, reg);
+ i40e_flush(hw);
+ }
/* reset some of the state varibles keeping
* track of the resources
*/
@@ -804,6 +583,111 @@ error_alloc:
return ret;
}
+#define VF_DEVICE_STATUS 0xAA
+#define VF_TRANS_PENDING_MASK 0x20
+/**
+ * i40e_quiesce_vf_pci
+ * @vf: pointer to the vf structure
+ *
+ * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
+ * if the transactions never clear.
+ **/
+static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ int vf_abs_id, i;
+ u32 reg;
+
+ vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ wr32(hw, I40E_PF_PCI_CIAA,
+ VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
+ for (i = 0; i < 100; i++) {
+ reg = rd32(hw, I40E_PF_PCI_CIAD);
+ if ((reg & VF_TRANS_PENDING_MASK) == 0)
+ return 0;
+ udelay(1);
+ }
+ return -EIO;
+}
+
+/**
+ * i40e_reset_vf
+ * @vf: pointer to the vf structure
+ * @flr: VFLR was issued or not
+ *
+ * reset the vf
+ **/
+void i40e_reset_vf(struct i40e_vf *vf, bool flr)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ bool rsd = false;
+ int i;
+ u32 reg;
+
+ /* warn the VF */
+ clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+ /* In the case of a VFLR, the HW has already reset the VF and we
+ * just need to clean up, so don't hit the VFRTRIG register.
+ */
+ if (!flr) {
+ /* reset vf using VPGEN_VFRTRIG reg */
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ i40e_flush(hw);
+ }
+
+ if (i40e_quiesce_vf_pci(vf))
+ dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
+ vf->vf_id);
+
+ /* poll VPGEN_VFRSTAT reg to make sure
+ * that reset is complete
+ */
+ for (i = 0; i < 100; i++) {
+ /* vf reset requires driver to first reset the
+ * vf & than poll the status register to make sure
+ * that the requested op was completed
+ * successfully
+ */
+ udelay(10);
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
+ rsd = true;
+ break;
+ }
+ }
+
+ if (!rsd)
+ dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+ vf->vf_id);
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
+ /* clear the reset bit in the VPGEN_VFRTRIG reg */
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+
+ /* On initial reset, we won't have any queues */
+ if (vf->lan_vsi_index == 0)
+ goto complete_reset;
+
+ i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false);
+complete_reset:
+ /* reallocate vf resources to reset the VSI state */
+ i40e_free_vf_res(vf);
+ mdelay(10);
+ i40e_alloc_vf_res(vf);
+ i40e_enable_vf_mappings(vf);
+
+ /* tell the VF the reset is done */
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+ i40e_flush(hw);
+}
+
/**
* i40e_vfs_are_assigned
* @pf: pointer to the pf structure
@@ -816,7 +700,7 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
struct pci_dev *vfdev;
/* loop through all the VFs to see if we own any that are assigned */
- vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL);
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL);
while (vfdev) {
/* if we don't own it we don't care */
if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
@@ -826,12 +710,82 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
}
vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- I40E_VF_DEVICE_ID,
+ I40E_DEV_ID_VF,
vfdev);
}
return false;
}
+#ifdef CONFIG_PCI_IOV
+
+/**
+ * i40e_enable_pf_switch_lb
+ * @pf: pointer to the pf structure
+ *
+ * enable switch loop back or die - no point in a return value
+ **/
+static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ int aq_ret;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s couldn't get pf vsi config, err %d, aq_err %d\n",
+ __func__, aq_ret, pf->hw.aq.asq_last_status);
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: update vsi switch failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
+#endif
+
+/**
+ * i40e_disable_pf_switch_lb
+ * @pf: pointer to the pf structure
+ *
+ * disable switch loop back or die - no point in a return value
+ **/
+static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ int aq_ret;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s couldn't get pf vsi config, err %d, aq_err %d\n",
+ __func__, aq_ret, pf->hw.aq.asq_last_status);
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: update vsi switch failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
/**
* i40e_free_vfs
@@ -842,17 +796,20 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
void i40e_free_vfs(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- int i;
+ u32 reg_idx, bit_idx;
+ int i, tmp, vf_id;
if (!pf->vf)
return;
/* Disable interrupt 0 so we don't try to handle the VFLR. */
- wr32(hw, I40E_PFINT_DYN_CTL0, 0);
- i40e_flush(hw);
+ i40e_irq_dynamic_disable_icr0(pf);
+ mdelay(10); /* let any messages in transit get finished up */
/* free up vf resources */
- for (i = 0; i < pf->num_alloc_vfs; i++) {
+ tmp = pf->num_alloc_vfs;
+ pf->num_alloc_vfs = 0;
+ for (i = 0; i < tmp; i++) {
if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
i40e_free_vf_res(&pf->vf[i]);
/* disable qp mappings */
@@ -861,20 +818,25 @@ void i40e_free_vfs(struct i40e_pf *pf)
kfree(pf->vf);
pf->vf = NULL;
- pf->num_alloc_vfs = 0;
- if (!i40e_vfs_are_assigned(pf))
+ if (!i40e_vfs_are_assigned(pf)) {
pci_disable_sriov(pf->pdev);
- else
+ /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
+ * work correctly when SR-IOV gets re-enabled.
+ */
+ for (vf_id = 0; vf_id < tmp; vf_id++) {
+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+ }
+ i40e_disable_pf_switch_lb(pf);
+ } else {
dev_warn(&pf->pdev->dev,
"unable to disable SR-IOV because VFs are assigned.\n");
+ }
/* Re-enable interrupt 0. */
- wr32(hw, I40E_PFINT_DYN_CTL0,
- I40E_PFINT_DYN_CTL0_INTENA_MASK |
- I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
- (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
- i40e_flush(hw);
+ i40e_irq_dynamic_enable_icr0(pf);
}
#ifdef CONFIG_PCI_IOV
@@ -890,6 +852,9 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
struct i40e_vf *vfs;
int i, ret = 0;
+ /* Disable interrupt 0 so we don't try to handle the VFLR. */
+ i40e_irq_dynamic_disable_icr0(pf);
+
ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
if (ret) {
dev_err(&pf->pdev->dev,
@@ -913,11 +878,8 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* assign default capabilities */
set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
-
- ret = i40e_alloc_vf_res(&vfs[i]);
- i40e_reset_vf(&vfs[i], true);
- if (ret)
- break;
+ /* vf resources get allocated during reset */
+ i40e_reset_vf(&vfs[i], false);
/* enable vf vplan_qtable mappings */
i40e_enable_vf_mappings(&vfs[i]);
@@ -925,10 +887,13 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
pf->vf = vfs;
pf->num_alloc_vfs = num_alloc_vfs;
+ i40e_enable_pf_switch_lb(pf);
err_alloc:
if (ret)
i40e_free_vfs(pf);
err_iov:
+ /* Re-enable interrupt 0. */
+ i40e_irq_dynamic_enable_icr0(pf);
return ret;
}
@@ -1009,6 +974,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
+ int true_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
i40e_status aq_ret;
/* single place to detect unsuccessful return values */
@@ -1028,8 +994,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
vf->num_valid_msgs++;
}
- aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
- msg, msglen, NULL);
+ aq_ret = i40e_aq_send_msg_to_vf(hw, true_vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
if (aq_ret) {
dev_err(&pf->pdev->dev,
"Unable to send the message to VF %d aq_err %d\n",
@@ -1144,12 +1110,10 @@ err:
* unlike other virtchnl messages, pf driver
* doesn't send the response back to the vf
**/
-static int i40e_vc_reset_vf_msg(struct i40e_vf *vf)
+static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
{
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
- return -ENOENT;
-
- return i40e_reset_vf(vf, false);
+ if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ i40e_reset_vf(vf, false);
}
/**
@@ -1291,27 +1255,21 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
/* lookout for the invalid queue index */
tempmap = map->rxq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
vsi_queue_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- vsi_queue_id + 1);
}
tempmap = map->txq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
vsi_queue_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- vsi_queue_id + 1);
}
i40e_config_irq_link_list(vf, vsi_id, map);
@@ -1337,8 +1295,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
- unsigned long tempmap;
- u16 queue_id;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
@@ -1354,66 +1310,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- /* Poll the status register to make sure that the
- * requested op was completed successfully
- */
- udelay(10);
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on RX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on TX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
+ if (i40e_vsi_control_rings(pf->vsi[vsi_id], true))
+ aq_ret = I40E_ERR_TIMEOUT;
error_param:
/* send the response to the vf */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
@@ -1436,8 +1334,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
- unsigned long tempmap;
- u16 queue_id;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
@@ -1453,65 +1349,8 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- /* Poll the status register to make sure that the
- * requested op was completed successfully
- */
- udelay(10);
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on RX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on TX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
+ if (i40e_vsi_control_rings(pf->vsi[vsi_id], false))
+ aq_ret = I40E_ERR_TIMEOUT;
error_param:
/* send the response to the vf */
@@ -1554,7 +1393,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
i40e_update_eth_stats(vsi);
- memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
+ stats = vsi->eth_stats;
error_param:
/* send the response back to the vf */
@@ -1563,6 +1402,40 @@ error_param:
}
/**
+ * i40e_check_vf_permission
+ * @vf: pointer to the vf info
+ * @macaddr: pointer to the MAC Address being checked
+ *
+ * Check if the VF has permission to add or delete unicast MAC address
+ * filters and return error code -EPERM if not. Then check if the
+ * address filter requested is broadcast or zero and if so return
+ * an invalid MAC address error code.
+ **/
+static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
+{
+ struct i40e_pf *pf = vf->pf;
+ int ret = 0;
+
+ if (is_broadcast_ether_addr(macaddr) ||
+ is_zero_ether_addr(macaddr)) {
+ dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
+ !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
+ /* If the host VMM administrator has set the VF MAC address
+ * administratively via the ndo_set_vf_mac command then deny
+ * permission to the VF to add or delete unicast MAC addresses.
+ * The VF may request to set the MAC address filter already
+ * assigned to it so do not return an error in that case.
+ */
+ dev_err(&pf->pdev->dev,
+ "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
+ ret = -EPERM;
+ }
+ return ret;
+}
+
+/**
* i40e_vc_add_mac_addr_msg
* @vf: pointer to the vf info
* @msg: pointer to the msg buffer
@@ -1577,24 +1450,20 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = al->vsi_id;
- i40e_status aq_ret = 0;
+ i40e_status ret = 0;
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
+ ret = I40E_ERR_PARAM;
goto error_param;
}
for (i = 0; i < al->num_elements; i++) {
- if (is_broadcast_ether_addr(al->list[i].addr) ||
- is_zero_ether_addr(al->list[i].addr)) {
- dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n",
- al->list[i].addr);
- aq_ret = I40E_ERR_PARAM;
+ ret = i40e_check_vf_permission(vf, al->list[i].addr);
+ if (ret)
goto error_param;
- }
}
vsi = pf->vsi[vsi_id];
@@ -1603,7 +1472,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_mac_filter *f;
f = i40e_find_mac(vsi, al->list[i].addr, true, false);
- if (f) {
+ if (!f) {
if (i40e_is_vsi_in_vlan(vsi))
f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
true, false);
@@ -1615,7 +1484,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
if (!f) {
dev_err(&pf->pdev->dev,
"Unable to add VF MAC filter\n");
- aq_ret = I40E_ERR_PARAM;
+ ret = I40E_ERR_PARAM;
goto error_param;
}
}
@@ -1627,7 +1496,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the vf */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
- aq_ret);
+ ret);
}
/**
@@ -1645,15 +1514,25 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = al->vsi_id;
- i40e_status aq_ret = 0;
+ i40e_status ret = 0;
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
+ ret = I40E_ERR_PARAM;
goto error_param;
}
+
+ for (i = 0; i < al->num_elements; i++) {
+ if (is_broadcast_ether_addr(al->list[i].addr) ||
+ is_zero_ether_addr(al->list[i].addr)) {
+ dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
+ al->list[i].addr);
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ goto error_param;
+ }
+ }
vsi = pf->vsi[vsi_id];
/* delete addresses from the list */
@@ -1668,7 +1547,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the vf */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
- aq_ret);
+ ret);
}
/**
@@ -1777,30 +1656,6 @@ error_param:
}
/**
- * i40e_vc_fcoe_msg
- * @vf: pointer to the vf info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * called from the vf for the fcoe msgs
- **/
-static int i40e_vc_fcoe_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
-{
- i40e_status aq_ret = 0;
-
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VF_STAT_FCOEENA, &vf->vf_states)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- aq_ret = I40E_ERR_NOT_IMPLEMENTED;
-
-error_param:
- /* send the response to the vf */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_FCOE, aq_ret);
-}
-
-/**
* i40e_vc_validate_vf_msg
* @vf: pointer to the vf info
* @msg: pointer to the msg buffer
@@ -1920,19 +1775,24 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
- struct i40e_vf *vf = &(pf->vf[vf_id]);
struct i40e_hw *hw = &pf->hw;
+ int local_vf_id = vf_id - hw->func_caps.vf_base_id;
+ struct i40e_vf *vf;
int ret;
pf->vf_aq_requests++;
+ if (local_vf_id >= pf->num_alloc_vfs)
+ return -EINVAL;
+ vf = &(pf->vf[local_vf_id]);
/* perform basic checks on the msg */
ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
if (ret) {
- dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id);
+ dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n",
+ local_vf_id, v_opcode, msglen);
return ret;
}
- wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
+
switch (v_opcode) {
case I40E_VIRTCHNL_OP_VERSION:
ret = i40e_vc_get_version_msg(vf);
@@ -1941,7 +1801,8 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
ret = i40e_vc_get_vf_resources_msg(vf);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
- ret = i40e_vc_reset_vf_msg(vf);
+ i40e_vc_reset_vf_msg(vf);
+ ret = 0;
break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
@@ -1973,13 +1834,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
case I40E_VIRTCHNL_OP_GET_STATS:
ret = i40e_vc_get_stats_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_FCOE:
- ret = i40e_vc_fcoe_msg(vf, msg, msglen);
- break;
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
- dev_err(&pf->pdev->dev,
- "Unsupported opcode %d from vf %d\n", v_opcode, vf_id);
+ dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n",
+ v_opcode, local_vf_id);
ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
I40E_ERR_NOT_IMPLEMENTED);
break;
@@ -2015,19 +1873,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
/* clear the bit in GLGEN_VFLRSTAT */
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
- if (i40e_reset_vf(vf, true))
- dev_err(&pf->pdev->dev,
- "Unable to reset the VF %d\n", vf_id);
- /* free up vf resources to destroy vsi state */
- i40e_free_vf_res(vf);
-
- /* allocate new vf resources with the default state */
- if (i40e_alloc_vf_res(vf))
- dev_err(&pf->pdev->dev,
- "Unable to allocate VF resources %d\n",
- vf_id);
-
- i40e_enable_vf_mappings(vf);
+ i40e_reset_vf(vf, true);
}
}
@@ -2183,6 +2029,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
}
memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
+ vf->pf_set_mac = true;
dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
ret = 0;
@@ -2229,6 +2076,20 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
goto error_pvid;
}
+ if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi))
+ dev_err(&pf->pdev->dev,
+ "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
+ vf_id);
+
+ /* Check for condition where there was already a port VLAN ID
+ * filter set and now it is being deleted by setting it to zero.
+ * Before deleting all the old VLAN filters we must add new ones
+ * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
+ * MAC addresses deleted.
+ */
+ if (!(vlan_id || qos) && vsi->info.pvid)
+ ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
+
if (vsi->info.pvid) {
/* kill old VLAN */
ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
@@ -2243,7 +2104,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
ret = i40e_vsi_add_pvid(vsi,
vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
else
- i40e_vlan_stripping_disable(vsi);
+ i40e_vsi_remove_pvid(vsi);
if (vlan_id) {
dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
@@ -2257,12 +2118,20 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
vsi->back->hw.aq.asq_last_status);
goto error_pvid;
}
+ /* Kill non-vlan MAC filters - ignore error return since
+ * there might not be any non-vlan MAC filters.
+ */
+ i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY);
}
if (ret) {
dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
goto error_pvid;
}
+ /* The Port VLAN needs to be saved across resets the same as the
+ * default LAN MAC address.
+ */
+ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
ret = 0;
error_pvid:
@@ -2294,7 +2163,6 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_mac_filter *f, *ftmp;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_vf *vf;
@@ -2318,11 +2186,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
ivi->vf = vf_id;
- /* first entry of the list is the default ethernet address */
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS);
- break;
- }
+ memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
ivi->tx_rate = 0;
ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 360382cf3040..cc1feee36e12 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -82,6 +81,8 @@ struct i40e_vf {
struct i40e_virtchnl_ether_addr default_lan_addr;
struct i40e_virtchnl_ether_addr default_fcoe_addr;
+ u16 port_vlan_id;
+ bool pf_set_mac; /* The VMM admin set the VF MAC address */
/* VSI indices - actual VSI pointers are maintained in the PF structure
* When assigned, these will be non-zero, because VSI 0 is always
@@ -104,7 +105,7 @@ int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
-int i40e_reset_vf(struct i40e_vf *vf, bool flr);
+void i40e_reset_vf(struct i40e_vf *vf, bool flr);
void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
/* vf configuration related iplink handlers */
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
new file mode 100644
index 000000000000..e09be37a07a8
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -0,0 +1,33 @@
+################################################################################
+#
+# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+# Copyright(c) 2013 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+## Makefile for the Intel(R) 40GbE VF driver
+#
+#
+
+obj-$(CONFIG_I40EVF) += i40evf.o
+
+i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \
+ i40e_txrx.o i40e_common.o i40e_adminq.o
+
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
new file mode 100644
index 000000000000..5470ce95936e
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -0,0 +1,927 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+static void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ if (hw->mac.type == I40E_MAC_VF) {
+ hw->aq.asq.tail = I40E_VF_ATQT1;
+ hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.asq.len = I40E_VF_ATQLEN1;
+ hw->aq.arq.tail = I40E_VF_ARQT1;
+ hw->aq.arq.head = I40E_VF_ARQH1;
+ hw->aq.arq.len = I40E_VF_ARQLEN1;
+ } else {
+ hw->aq.asq.tail = I40E_PF_ATQT;
+ hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.asq.len = I40E_PF_ATQLEN;
+ hw->aq.arq.tail = I40E_PF_ARQT;
+ hw->aq.arq.head = I40E_PF_ARQH;
+ hw->aq.arq.len = I40E_PF_ARQLEN;
+ }
+}
+
+/**
+ * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ i40e_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_asq_cmd_details)));
+ if (ret_code) {
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ return ret_code;
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ i40e_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ * i40e_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* free descriptors */
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ * i40e_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* free the buffer info list */
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ * i40e_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+static void i40e_config_asq_regs(struct i40e_hw *hw)
+{
+ if (hw->mac.type == I40E_MAC_VF) {
+ /* configure the transmit queue */
+ wr32(hw, I40E_VF_ATQBAH1,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_VF_ATQBAL1,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
+ I40E_VF_ATQLEN1_ATQENABLE_MASK));
+ } else {
+ /* configure the transmit queue */
+ wr32(hw, I40E_PF_ATQBAH,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQBAL,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
+ I40E_PF_ATQLEN_ATQENABLE_MASK));
+ }
+}
+
+/**
+ * i40e_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+static void i40e_config_arq_regs(struct i40e_hw *hw)
+{
+ if (hw->mac.type == I40E_MAC_VF) {
+ /* configure the receive queue */
+ wr32(hw, I40E_VF_ARQBAH1,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_VF_ARQBAL1,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
+ I40E_VF_ARQLEN1_ARQENABLE_MASK));
+ } else {
+ /* configure the receive queue */
+ wr32(hw, I40E_PF_ARQBAH,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQBAL,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
+ I40E_PF_ARQLEN_ARQENABLE_MASK));
+ }
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+}
+
+/**
+ * i40e_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_asq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_asq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_asq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ i40e_config_asq_regs(hw);
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_arq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_arq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_arq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ i40e_config_arq_regs(hw);
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.asq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
+
+ /* make sure lock is available */
+ mutex_lock(&hw->aq.asq_mutex);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_asq_bufs(hw);
+
+ mutex_unlock(&hw->aq.asq_mutex);
+
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.arq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
+
+ /* make sure lock is available */
+ mutex_lock(&hw->aq.arq_mutex);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_arq_bufs(hw);
+
+ mutex_unlock(&hw->aq.arq_mutex);
+
+ return ret_code;
+}
+
+/**
+ * i40evf_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+i40e_status i40evf_init_adminq(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ /* initialize locks */
+ mutex_init(&hw->aq.asq_mutex);
+ mutex_init(&hw->aq.arq_mutex);
+
+ /* Set up register offsets */
+ i40e_adminq_init_regs(hw);
+
+ /* allocate the ASQ */
+ ret_code = i40e_init_asq(hw);
+ if (ret_code)
+ goto init_adminq_destroy_locks;
+
+ /* allocate the ARQ */
+ ret_code = i40e_init_arq(hw);
+ if (ret_code)
+ goto init_adminq_free_asq;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_asq:
+ i40e_shutdown_asq(hw);
+init_adminq_destroy_locks:
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40evf_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (i40evf_check_asq_alive(hw))
+ i40evf_aq_queue_shutdown(hw, true);
+
+ i40e_shutdown_asq(hw);
+ i40e_shutdown_arq(hw);
+
+ /* destroy the locks */
+
+ return ret_code;
+}
+
+/**
+ * i40e_clean_asq - cleans Admin send queue
+ * @hw: pointer to the hardware structure
+ *
+ * returns the number of free desc
+ **/
+static u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+ struct i40e_adminq_ring *asq = &(hw->aq.asq);
+ struct i40e_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct i40e_aq_desc desc_cb;
+ struct i40e_aq_desc *desc;
+
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ if (details->callback) {
+ I40E_ADMINQ_CALLBACK cb_func =
+ (I40E_ADMINQ_CALLBACK)details->callback;
+ desc_cb = *desc;
+ cb_func(hw, &desc_cb);
+ }
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)details, 0,
+ sizeof(struct i40e_asq_cmd_details));
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ * i40evf_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ **/
+bool i40evf_asq_done(struct i40e_hw *hw)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ * i40evf_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ i40e_status status = 0;
+ struct i40e_dma_mem *dma_buff = NULL;
+ struct i40e_asq_cmd_details *details;
+ struct i40e_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ u16 retval = 0;
+
+ if (hw->aq.asq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_exit;
+ }
+
+ details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ *details = *cmd_details;
+
+ /* If the cmd_details are defined copy the cookie. The
+ * cpu_to_le32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ cpu_to_le32(upper_32_bits(details->cookie));
+ desc->cookie_low =
+ cpu_to_le32(lower_32_bits(details->cookie));
+ }
+ } else {
+ memset(details, 0, sizeof(struct i40e_asq_cmd_details));
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~cpu_to_le16(details->flags_dis);
+ desc->flags |= cpu_to_le16(details->flags_ena);
+
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = I40E_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = I40E_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (i40e_clean_asq(hw) == 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ *desc_on_ring = *desc;
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ memcpy(dma_buff->va, buff, buff_size);
+ desc_on_ring->datalen = cpu_to_le16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+ u32 delay_len = 10;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (i40evf_asq_done(hw))
+ break;
+ /* ugh! delay while spin_lock */
+ udelay(delay_len);
+ total_delay += delay_len;
+ } while (total_delay < I40E_ASQ_CMD_TIMEOUT);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (i40evf_asq_done(hw)) {
+ *desc = *desc_on_ring;
+ if (buff != NULL)
+ memcpy(buff, dma_buff->va, buff_size);
+ retval = le16_to_cpu(desc->retval);
+ if (retval != 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ status = 0;
+ else
+ status = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ }
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+
+asq_send_command_error:
+ mutex_unlock(&hw->aq.asq_mutex);
+asq_send_command_exit:
+ return status;
+}
+
+/**
+ * i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
+}
+
+/**
+ * i40evf_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
+{
+ i40e_status ret_code = 0;
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* take the lock before we start messing with the ring */
+ mutex_lock(&hw->aq.arq_mutex);
+
+ /* set next_to_use to head */
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Queue is empty.\n");
+ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+ i40evf_debug_aq(hw,
+ I40E_DEBUG_AQ_COMMAND,
+ (void *)desc,
+ hw->aq.arq.r.arq_bi[desc_idx].va);
+
+ flags = le16_to_cpu(desc->flags);
+ if (flags & I40E_AQ_FLAG_ERR) {
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ } else {
+ e->desc = *desc;
+ datalen = le16_to_cpu(desc->datalen);
+ e->msg_size = min(datalen, e->msg_size);
+ if (e->msg_buf != NULL && (e->msg_size != 0))
+ memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_size);
+ }
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+ mutex_unlock(&hw->aq.arq_mutex);
+
+ return ret_code;
+}
+
+void i40evf_resume_aq(struct i40e_hw *hw)
+{
+ /* Registers are reset after PF reset */
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ i40e_config_asq_regs(hw);
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ i40e_config_arq_regs(hw);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
new file mode 100644
index 000000000000..8f72c31d95cc
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -0,0 +1,106 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i) \
+ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+ struct i40e_virt_mem dma_head; /* space for dma structures */
+ struct i40e_dma_mem desc_buf; /* descriptor ring memory */
+ struct i40e_virt_mem cmd_buf; /* command buffer memory */
+
+ union {
+ struct i40e_dma_mem *asq_bi;
+ struct i40e_dma_mem *arq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+ void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i) \
+ (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+ struct i40e_aq_desc desc;
+ u16 msg_size;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+ struct i40e_adminq_ring arq; /* receive queue */
+ struct i40e_adminq_ring asq; /* send queue */
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+
+ struct mutex asq_mutex; /* Send queue lock */
+ struct mutex arq_mutex; /* Receive queue lock */
+
+ /* last status values on send and receive queues */
+ enum i40e_admin_queue_err asq_last_status;
+ enum i40e_admin_queue_err arq_last_status;
+};
+
+/* general information */
+#define I40E_AQ_LARGE_BUF 512
+#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
+
+void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
new file mode 100644
index 000000000000..f7cea1bca38d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -0,0 +1,2153 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0001
+#define I40E_FW_API_VERSION_A0_MINOR 0x0000
+
+struct i40e_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+ /* aq commands */
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+
+ /* resource ownership */
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
+
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
+
+ i40e_aqc_opc_set_cppm_configuration = 0x0103,
+ i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
+
+ /* LAA */
+ i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* internal switch commands */
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+ i40e_aqc_opc_set_storm_control_config = 0x0280,
+ i40e_aqc_opc_get_storm_control_config = 0x0281,
+
+ /* DCB commands */
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
+
+ /* TX scheduler */
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_reset = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+
+ /* NVM commands */
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+
+ /* virtualization commands */
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
+
+ /* Tunnel commands */
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+
+ /* Async Events */
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
+
+ /* debug commands */
+ i40e_aqc_opc_debug_get_deviceid = 0xFF00,
+ i40e_aqc_opc_debug_set_mode = 0xFF01,
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
+ i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+ i40e_aqc_opc_debug_modify_internals = 0xFF09,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+ { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct i40e_aqc_get_version {
+ __le32 rom_ver;
+ __le32 fw_build;
+ __le16 fw_major;
+ __le16 fw_minor;
+ __le16 api_major;
+ __le16 api_minor;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
+
+/* Send driver version (indirect 0x0002) */
+struct i40e_aqc_driver_version {
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define I40E_AQ_RESOURCE_NVM 1
+#define I40E_AQ_RESOURCE_SDP 2
+#define I40E_AQ_RESOURCE_ACCESS_READ 1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+
+struct i40e_aqc_request_resource {
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct i40e_aqc_list_capabilites {
+ u8 command_flags;
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
+
+struct i40e_aqc_list_capabilities_element_resp {
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
+};
+
+/* list of caps */
+
+#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_SRIOV 0x0012
+#define I40E_AQ_CAP_ID_VF 0x0013
+#define I40E_AQ_CAP_ID_VMDQ 0x0014
+#define I40E_AQ_CAP_ID_8021QBG 0x0015
+#define I40E_AQ_CAP_ID_8021QBR 0x0016
+#define I40E_AQ_CAP_ID_VSI 0x0017
+#define I40E_AQ_CAP_ID_DCB 0x0018
+#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_RSS 0x0040
+#define I40E_AQ_CAP_ID_RXQ 0x0041
+#define I40E_AQ_CAP_ID_TXQ 0x0042
+#define I40E_AQ_CAP_ID_MSIX 0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define I40E_AQ_CAP_ID_1588 0x0046
+#define I40E_AQ_CAP_ID_IWARP 0x0051
+#define I40E_AQ_CAP_ID_LED 0x0061
+#define I40E_AQ_CAP_ID_SDP 0x0062
+#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_FLEX10 0x00F1
+#define I40E_AQ_CAP_ID_CEM 0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct i40e_aqc_cppm_configuration {
+ __le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC 0x0800
+#define I40E_AQ_CPPM_EN_DMCTH 0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
+#define I40E_AQ_CPPM_EN_HPTC 0x4000
+#define I40E_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct i40e_aqc_arp_proxy_data {
+ __le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4 0x0008
+#define I40E_AQ_ARP_UNSUP_CTL 0x0010
+#define I40E_AQ_ARP_ENA 0x0020
+#define I40E_AQ_ARP_ADD_IPV4 0x0040
+#define I40E_AQ_ARP_DEL_IPV4 0x0080
+ __le16 table_id;
+ __le32 pfpm_proxyfc;
+ __le32 ip_addr;
+ u8 mac_addr[6];
+};
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct i40e_aqc_ns_proxy_data {
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0 0x0100
+#define I40E_AQ_NS_PROXY_DEL_0 0x0200
+#define I40E_AQ_NS_PROXY_ADD_1 0x0400
+#define I40E_AQ_NS_PROXY_DEL_1 0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
+};
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct i40e_aqc_mng_laa {
+ __le16 command_flags;
+#define I40E_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
+};
+
+/* Manage MAC Address Read Command (indirect 0x0107) */
+struct i40e_aqc_mac_address_read {
+ __le16 command_flags;
+#define I40E_AQC_LAN_ADDR_VALID 0x10
+#define I40E_AQC_SAN_ADDR_VALID 0x20
+#define I40E_AQC_PORT_ADDR_VALID 0x40
+#define I40E_AQC_WOL_ADDR_VALID 0x80
+#define I40E_AQC_ADDR_VALID_MASK 0xf0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
+
+struct i40e_aqc_mac_address_read_data {
+ u8 pf_lan_mac[6];
+ u8 pf_san_mac[6];
+ u8 port_mac[6];
+ u8 pf_wol_mac[6];
+};
+
+I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct i40e_aqc_mac_address_write {
+ __le16 command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define I40E_AQC_WRITE_TYPE_PORT 0x8000
+#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+ u8 rx_cnt;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct i40e_aqc_switch_seid {
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_switch_config_header_resp {
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
+};
+
+struct i40e_aqc_switch_config_element_resp {
+ u8 element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC 1
+#define I40E_AQ_SW_ELEM_TYPE_PF 2
+#define I40E_AQ_SW_ELEM_TYPE_VF 3
+#define I40E_AQ_SW_ELEM_TYPE_EMP 4
+#define I40E_AQ_SW_ELEM_TYPE_BMC 5
+#define I40E_AQ_SW_ELEM_TYPE_PV 16
+#define I40E_AQ_SW_ELEM_TYPE_VEB 17
+#define I40E_AQ_SW_ELEM_TYPE_PA 18
+#define I40E_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define I40E_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR 0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
+};
+
+/* Get Switch Configuration (indirect 0x0200)
+ * an array of elements are returned in the response buffer
+ * the first in the array is the header, remainder are elements
+ */
+struct i40e_aqc_get_switch_config_resp {
+ struct i40e_aqc_get_switch_config_header_resp header;
+ struct i40e_aqc_switch_config_element_resp element[1];
+};
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct i40e_aqc_add_remove_statistics {
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct i40e_aqc_set_port_parameters {
+ __le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct i40e_aqc_get_switch_resource_alloc {
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct i40e_aqc_switch_resource_alloc_element_resp {
+ u8 resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
+};
+
+/* Add VSI (indirect 0x0210)
+ * this indirect command uses struct i40e_aqc_vsi_properties_data
+ * as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211)
+ * uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ * uses the same completion and data structure as Add VSI
+ */
+struct i40e_aqc_add_get_update_vsi {
+ __le16 uplink_seid;
+ u8 connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT 0x0
+#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF 0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
+#define I40E_AQ_VSI_TYPE_PF 0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
+#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
+
+struct i40e_aqc_add_get_update_vsi_completion {
+ __le16 seid;
+ __le16 vsi_number;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
+
+struct i40e_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress table */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct i40e_aqc_add_update_pv {
+ __le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
+
+struct i40e_aqc_add_update_pv_completion {
+ /* reserved for update; for add also encodes error if rc == ENOSPC */
+ __le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+
+struct i40e_aqc_get_pv_params_completion {
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE 0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct i40e_aqc_add_veb {
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING 0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
+ u8 enable_tcs;
+ u8 reserved[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
+
+struct i40e_aqc_add_veb_completion {
+ u8 reserved[6];
+ __le16 switch_seid;
+ /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+ __le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic i40e_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct i40e_aqc_macvlan {
+ __le16 num_addresses;
+ __le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
+
+/* indirect data for command and response */
+struct i40e_aqc_add_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+ __le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+ /* response section */
+ u8 match_method;
+#define I40E_AQC_MM_PERFECT_MATCH 0x01
+#define I40E_AQC_MM_HASH_MATCH 0x02
+#define I40E_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_macvlan_completion {
+ __le16 perfect_mac_used;
+ __le16 perfect_mac_free;
+ __le16 unicast_hash_free;
+ __le16 multicast_hash_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses i40e_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct i40e_aqc_remove_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
+ /* reply section */
+ u8 error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic i40e_aqc_macvlan for the command
+ */
+struct i40e_aqc_add_remove_vlan_element_data {
+ __le16 vlan_tag;
+ u8 vlan_flags;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_LOCAL 0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT 3
+#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_vlan_completion {
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct i40e_aqc_set_vsi_promiscuous_modes {
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
+/* flags used for both fields above */
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define I40E_AQC_SET_VSI_DEFAULT 0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+ __le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_add_tag {
+ __le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
+
+struct i40e_aqc_add_remove_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_remove_tag {
+ __le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ u8 reserved[12];
+};
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct i40e_aqc_add_remove_mcast_etag {
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
+
+struct i40e_aqc_add_remove_mcast_etag_completion {
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct i40e_aqc_update_tag {
+ __le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
+
+struct i40e_aqc_update_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the i40e_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct i40e_aqc_add_remove_control_packet_filter {
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+ __le16 queue;
+ u8 reserved[2];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
+
+struct i40e_aqc_add_remove_control_packet_filter_completion {
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the i40e_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_aqc_add_remove_cloud_filters {
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
+
+struct i40e_aqc_add_remove_cloud_filters_element_data {
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
+ union {
+ struct {
+ u8 reserved[12];
+ u8 data[4];
+ } v4;
+ struct {
+ u8 data[16];
+ } v6;
+ } ipaddr;
+ __le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007
+/* 0x0000 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+/* 0x0002 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+/* 0x0005 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+/* 0x0007 reserved */
+/* 0x0008 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+
+ __le32 tenant_id ;
+ u8 reserved[4];
+ __le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved2[14];
+ /* response section */
+ u8 allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
+};
+
+struct i40e_aqc_remove_cloud_filters_completion {
+ __le16 perfect_ovlan_used;
+ __le16 perfect_ovlan_free;
+ __le16 vlan_used;
+ __le16 vlan_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ * take care to set the flags correctly.
+ */
+struct i40e_aqc_add_delete_mirror_rule {
+ __le16 seid;
+ __le16 rule_type;
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+ __le16 num_entries;
+ __le16 destination; /* VSI for add, rule id for delete */
+ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
+
+struct i40e_aqc_add_delete_mirror_rule_completion {
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+
+/* Set Storm Control Configuration (direct 0x0280)
+ * Get Storm Control Configuration (direct 0x0281)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_set_get_storm_control_config {
+ __le32 broadcast_threshold;
+ __le32 multicast_threshold;
+ __le32 control_flags;
+#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
+#define I40E_AQC_STORM_CONTROL_MDICW 0x02
+#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
+#define I40E_AQC_STORM_CONTROL_BDICW 0x08
+#define I40E_AQC_STORM_CONTROL_BIDU 0x10
+#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
+#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
+ I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_pfc_ignore {
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET 0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct i40e_aqc_tx_sched_ind {
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct i40e_aqc_qs_handles_resp {
+ __le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct i40e_aqc_configure_vsi_bw_limit {
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_ets_sla_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_tc_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
+};
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct i40e_aqc_query_vsi_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
+};
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct i40e_aqc_query_vsi_ets_sla_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct i40e_aqc_configure_switching_comp_bw_limit {
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
+
+/* Enable Physical Port ETS (indirect 0x0413)
+ * Modify Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct i40e_aqc_configure_switching_comp_ets_data {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_flags;
+ u8 reserved2[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved3[96];
+};
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct i40e_aqc_configure_switching_comp_bw_config_data {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
+};
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct i40e_aqc_query_switching_comp_ets_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
+};
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct i40e_aqc_query_port_ets_config_resp {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
+};
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct i40e_aqc_query_switching_comp_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+
+enum i40e_aq_phy_type {
+ I40E_PHY_TYPE_SGMII = 0x0,
+ I40E_PHY_TYPE_1000BASE_KX = 0x1,
+ I40E_PHY_TYPE_10GBASE_KX4 = 0x2,
+ I40E_PHY_TYPE_10GBASE_KR = 0x3,
+ I40E_PHY_TYPE_40GBASE_KR4 = 0x4,
+ I40E_PHY_TYPE_XAUI = 0x5,
+ I40E_PHY_TYPE_XFI = 0x6,
+ I40E_PHY_TYPE_SFI = 0x7,
+ I40E_PHY_TYPE_XLAUI = 0x8,
+ I40E_PHY_TYPE_XLPPI = 0x9,
+ I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
+ I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_100BASE_TX = 0x11,
+ I40E_PHY_TYPE_1000BASE_T = 0x12,
+ I40E_PHY_TYPE_10GBASE_T = 0x13,
+ I40E_PHY_TYPE_10GBASE_SR = 0x14,
+ I40E_PHY_TYPE_10GBASE_LR = 0x15,
+ I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
+ I40E_PHY_TYPE_10GBASE_CR1 = 0x17,
+ I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
+ I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
+ I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1B,
+ I40E_PHY_TYPE_MAX
+};
+
+#define I40E_LINK_SPEED_100MB_SHIFT 0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
+#define I40E_LINK_SPEED_10GB_SHIFT 0x3
+#define I40E_LINK_SPEED_40GB_SHIFT 0x4
+#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+
+enum i40e_aq_link_speed {
+ I40E_LINK_SPEED_UNKNOWN = 0,
+ I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+};
+
+struct i40e_aqc_module_desc {
+ u8 oui[3];
+ u8 reserved1;
+ u8 part_number[16];
+ u8 revision[4];
+ u8 reserved2[8];
+};
+
+struct i40e_aq_get_phy_abilities_resp {
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
+ u8 abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
+#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
+#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
+#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
+#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
+#define I40E_AQ_PHY_FLAG_AN_ON 0x02
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+ __le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX 0x0002
+#define I40E_AQ_EEE_1000BASE_T 0x0004
+#define I40E_AQ_EEE_10GBASE_T 0x0008
+#define I40E_AQ_EEE_1000BASE_KX 0x0010
+#define I40E_AQ_EEE_10GBASE_KX4 0x0020
+#define I40E_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 reserved[3];
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS 16
+ struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
+};
+
+/* Set PHY Config (direct 0x0601) */
+struct i40e_aq_set_phy_config { /* same bits as above in all */
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK 0x08
+#define I40E_AQ_PHY_ENABLE_AN 0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 reserved[3];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct i40e_aq_set_mac_config {
+ __le16 max_frame_size;
+ u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct i40e_aqc_set_link_restart_an {
+ u8 command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct i40e_aqc_get_link_status {
+ __le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK 0x3
+#define I40E_AQ_LSE_NOP 0x0
+#define I40E_AQ_LSE_DISABLE 0x2
+#define I40E_AQ_LSE_ENABLE 0x3
+/* only response uses this flag */
+#define I40E_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* i40e_aq_phy_type */
+ u8 link_speed; /* i40e_aq_link_speed */
+ u8 link_info;
+#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_FAULT 0x02
+#define I40E_AQ_LINK_FAULT_TX 0x04
+#define I40E_AQ_LINK_FAULT_RX 0x08
+#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_MEDIA_AVAILABLE 0x40
+#define I40E_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define I40E_AQ_AN_COMPLETED 0x01
+#define I40E_AQ_LP_AN_ABILITY 0x02
+#define I40E_AQ_PD_FAULT 0x04
+#define I40E_AQ_FEC_EN 0x08
+#define I40E_AQ_PHY_LOW_POWER 0x10
+#define I40E_AQ_LINK_PAUSE_TX 0x20
+#define I40E_AQ_LINK_PAUSE_RX 0x40
+#define I40E_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT 0x02
+#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE 0x00
+#define I40E_AQ_LINK_TX_DRAINED 0x01
+#define I40E_AQ_LINK_TX_FLUSHED 0x03
+ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+ __le16 max_frame_size;
+ u8 config;
+#define I40E_AQ_CONFIG_CRC_ENA 0x04
+#define I40E_AQ_CONFIG_PACING_MASK 0x78
+ u8 reserved[5];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct i40e_aqc_set_phy_int_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
+#define I40E_AQ_EVENT_MEDIA_NA 0x0004
+#define I40E_AQ_EVENT_LINK_FAULT 0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct i40e_aqc_an_advt_reg {
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct i40e_aqc_set_lb_mode {
+ __le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL 0x01
+#define I40E_AQ_LB_PHY_REMOTE 0x02
+#define I40E_AQ_LB_MAC_LOCAL 0x04
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
+
+/* Set PHY Reset command (0x0622) */
+struct i40e_aqc_set_phy_reset {
+ u8 reset_flags;
+#define I40E_AQ_PHY_RESET_REQUEST 0x02
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset);
+
+enum i40e_aq_phy_reg_type {
+ I40E_AQC_PHY_REG_INTERNAL = 0x1,
+ I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct i40e_aqc_nvm_update {
+ u8 command_flags;
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct i40e_aqc_alternate_write {
+ __le32 address0;
+ __le32 data0;
+ __le32 address1;
+ __le32 data1;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct i40e_aqc_alternate_ind_write {
+ __le32 address;
+ __le32 length;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses i40e_aq_desc
+ */
+struct i40e_aqc_alternate_write_done {
+ __le16 cmd_flags;
+#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
+#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
+#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
+#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct i40e_aqc_alternate_set_mode {
+ __le32 mode;
+#define I40E_AQ_ALTERNATE_MODE_NONE 0
+#define I40E_AQ_ALTERNATE_MODE_OEM 1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct i40e_aqc_lan_overflow {
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct i40e_aqc_lldp_get_mib {
+ u8 type;
+ u8 reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define I40E_AQ_LLDP_MIB_LOCAL 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE 0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define I40E_AQ_LLDP_TX_SHIFT 0x4
+#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use I40E_AQ_LINK_TX_* above */
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct i40e_aqc_lldp_update_mib {
+ u8 command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct i40e_aqc_lldp_add_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct i40e_aqc_lldp_update_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct i40e_aqc_lldp_stop {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct i40e_aqc_lldp_start {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+
+/* Apply MIB changes (0x0A07)
+ * uses the generic struc as it contains no data
+ */
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct i40e_aqc_add_udp_tunnel {
+ __le16 udp_port;
+ u8 header_len; /* in DWords, 1 to 15 */
+ u8 protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x0
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x2
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x3
+ u8 variable_udp_length;
+#define I40E_AQC_TUNNEL_FIXED_UDP_LENGTH 0x0
+#define I40E_AQC_TUNNEL_VARIABLE_UDP_LENGTH 0x1
+ u8 udp_key_index;
+#define I40E_AQC_TUNNEL_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_TUNNEL_KEY_INDEX_NGE 0x1
+#define I40E_AQC_TUNNEL_KEY_INDEX_PROPRIETARY_UDP 0x2
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+
+struct i40e_aqc_add_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct i40e_aqc_remove_udp_tunnel {
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 pf_filters;
+ u8 total_filters;
+ u8 reserved2[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
+
+struct i40e_aqc_del_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved;
+ u8 tunnels_free;
+ u8 reserved1[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+
+/* tunnel key structure 0x0B10 */
+
+struct i40e_aqc_tunnel_key_structure_A0 {
+ __le16 key1_off;
+ __le16 key1_len;
+ __le16 key2_off;
+ __le16 key2_len;
+ __le16 flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 resreved[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure_A0);
+
+struct i40e_aqc_tunnel_key_structure {
+ u8 key1_off;
+ u8 key2_off;
+ u8 key1_len; /* 0 to 15 */
+ u8 key2_len; /* 0 to 15 */
+ u8 flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct i40e_aqc_oem_param_change {
+ __le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define I40E_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ u8 param_value2[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
+
+struct i40e_aqc_oem_state_change {
+ __le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
+#define I40E_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct i40e_acq_set_test_mode {
+ u8 mode;
+#define I40E_AQ_TEST_PARTIAL 0
+#define I40E_AQ_TEST_FULL 1
+#define I40E_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define I40E_AQ_TEST_OPEN 0
+#define I40E_AQ_TEST_CLOSE 1
+#define I40E_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct i40e_aqc_debug_reg_read_write {
+ __le32 reserved;
+ __le32 address;
+ __le32 value_high;
+ __le32 value_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* i40e_aq_desc is used for the command */
+struct i40e_aqc_debug_reg_sg_element_data {
+ __le32 address;
+ __le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct i40e_aqc_debug_modify_reg {
+ __le32 address;
+ __le32 value;
+ __le32 clear_mask;
+ __le32 set_mask;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define I40E_AQ_CLUSTER_ID_AUX 0
+#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1
+#define I40E_AQ_CLUSTER_ID_TXSCHED 2
+#define I40E_AQ_CLUSTER_ID_HMC 3
+#define I40E_AQ_CLUSTER_ID_MAC0 4
+#define I40E_AQ_CLUSTER_ID_MAC1 5
+#define I40E_AQ_CLUSTER_ID_MAC2 6
+#define I40E_AQ_CLUSTER_ID_MAC3 7
+#define I40E_AQ_CLUSTER_ID_DCB 8
+#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
+#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
+#define I40E_AQ_CLUSTER_ID_ALTRAM 11
+
+struct i40e_aqc_debug_dump_internals {
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
+
+struct i40e_aqc_debug_modify_internals {
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+
+#endif
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
new file mode 100644
index 000000000000..d8654fb9e525
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
@@ -0,0 +1,55 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+ i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ i40e_mem_asq_buf = 1,
+ i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
+ i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
+ i40e_mem_pd = 5, /* Page Descriptor */
+ i40e_mem_bp = 6, /* Backing Page - 4KB */
+ i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size);
+i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
new file mode 100644
index 000000000000..7b13953b28c4
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -0,0 +1,254 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+{
+ i40e_status status = 0;
+
+ if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (hw->device_id) {
+ case I40E_DEV_ID_SFP_XL710:
+ case I40E_DEV_ID_SFP_X710:
+ case I40E_DEV_ID_QEMU:
+ case I40E_DEV_ID_KX_A:
+ case I40E_DEV_ID_KX_B:
+ case I40E_DEV_ID_KX_C:
+ case I40E_DEV_ID_KX_D:
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ case I40E_DEV_ID_QSFP_C:
+ hw->mac.type = I40E_MAC_XL710;
+ break;
+ case I40E_DEV_ID_VF:
+ case I40E_DEV_ID_VF_HV:
+ hw->mac.type = I40E_MAC_VF;
+ break;
+ default:
+ hw->mac.type = I40E_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+/**
+ * i40evf_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+ void *buffer)
+{
+ struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u8 *aq_buffer = (u8 *)buffer;
+ u32 data[4];
+ u32 i = 0;
+
+ if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ return;
+
+ i40e_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
+ aq_desc->retval);
+ i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ aq_desc->cookie_high, aq_desc->cookie_low);
+ i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ aq_desc->params.internal.param0,
+ aq_desc->params.internal.param1);
+ i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ aq_desc->params.external.addr_high,
+ aq_desc->params.external.addr_low);
+
+ if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ memset(data, 0, sizeof(data));
+ i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+ for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) {
+ data[((i % 16) / 4)] |=
+ ((u32)aq_buffer[i]) << (8 * (i % 4));
+ if ((i % 16) == 15) {
+ i40e_debug(hw, mask,
+ "\t0x%04X %08X %08X %08X %08X\n",
+ i - 15, data[0], data[1], data[2],
+ data[3]);
+ memset(data, 0, sizeof(data));
+ }
+ }
+ if ((i % 16) != 0)
+ i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n",
+ i - (i % 16), data[0], data[1], data[2],
+ data[3]);
+ }
+}
+
+/**
+ * i40evf_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool i40evf_check_asq_alive(struct i40e_hw *hw)
+{
+ return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+}
+
+/**
+ * i40evf_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40evf_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+
+/**
+ * i40e_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. i40evf_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+ desc.cookie_high = cpu_to_le32(v_opcode);
+ desc.cookie_low = cpu_to_le32(v_retval);
+ if (msglen) {
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF
+ | I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(msglen);
+ }
+ if (!cmd_details) {
+ struct i40e_asq_cmd_details details;
+ memset(&details, 0, sizeof(details));
+ details.async = true;
+ cmd_details = &details;
+ }
+ status = i40evf_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg,
+ msglen, cmd_details);
+ return status;
+}
+
+/**
+ * i40e_vf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg)
+{
+ struct i40e_virtchnl_vsi_resource *vsi_res;
+ int i;
+
+ vsi_res = &msg->vsi_res[0];
+
+ hw->dev_caps.num_vsis = msg->num_vsis;
+ hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+ hw->dev_caps.dcb = msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_L2;
+ hw->dev_caps.fcoe = (msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
+ for (i = 0; i < msg->num_vsis; i++) {
+ if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
+ memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr,
+ ETH_ALEN);
+ memcpy(hw->mac.addr, vsi_res->default_mac_addr,
+ ETH_ALEN);
+ }
+ vsi_res++;
+ }
+}
+
+/**
+ * i40e_vf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+i40e_status i40e_vf_reset(struct i40e_hw *hw)
+{
+ return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
+ 0, NULL, 0, NULL);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
new file mode 100644
index 000000000000..cb97b3eed440
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -0,0 +1,238 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_HMC_H_
+#define _I40E_HMC_H_
+
+#define I40E_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
+#define I40E_HMC_PD_CNT_IN_SD 512
+#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
+#define I40E_HMC_PAGED_BP_SIZE 4096
+#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define I40E_FIRST_VF_FPM_ID 16
+
+struct i40e_hmc_obj_info {
+ u64 base; /* base addr in FPM */
+ u32 max_cnt; /* max count available for this hmc func */
+ u32 cnt; /* count of objects driver actually wants to create */
+ u64 size; /* size in bytes of one object */
+};
+
+enum i40e_sd_entry_type {
+ I40E_SD_TYPE_INVALID = 0,
+ I40E_SD_TYPE_PAGED = 1,
+ I40E_SD_TYPE_DIRECT = 2
+};
+
+struct i40e_hmc_bp {
+ enum i40e_sd_entry_type entry_type;
+ struct i40e_dma_mem addr; /* populate to be used by hw */
+ u32 sd_pd_index;
+ u32 ref_cnt;
+};
+
+struct i40e_hmc_pd_entry {
+ struct i40e_hmc_bp bp;
+ u32 sd_index;
+ bool valid;
+};
+
+struct i40e_hmc_pd_table {
+ struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
+ struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
+ struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+ u32 ref_cnt;
+ u32 sd_index;
+};
+
+struct i40e_hmc_sd_entry {
+ enum i40e_sd_entry_type entry_type;
+ bool valid;
+
+ union {
+ struct i40e_hmc_pd_table pd_table;
+ struct i40e_hmc_bp bp;
+ } u;
+};
+
+struct i40e_hmc_sd_table {
+ struct i40e_virt_mem addr; /* used to track sd_entry allocations */
+ u32 sd_cnt;
+ u32 ref_cnt;
+ struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct i40e_hmc_info {
+ u32 signature;
+ /* equals to pci func num for PF and dynamically allocated for VFs */
+ u8 hmc_fn_id;
+ u16 first_sd_index; /* index of the first available SD */
+
+ /* hmc objects */
+ struct i40e_hmc_obj_info *hmc_obj;
+ struct i40e_virt_mem hmc_obj_virt_mem;
+ struct i40e_hmc_sd_table sd_table;
+};
+
+#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
+#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
+#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
+
+#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
+#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
+#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
+
+/**
+ * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
+{ \
+ u32 val1, val2, val3; \
+ val1 = (u32)(upper_32_bits(pa)); \
+ val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
+ (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
+{ \
+ u32 val2, val3; \
+ val2 = (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
+ val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ **/
+#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
+ wr32((hw), I40E_PFHMC_PDINV, \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
+ wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{ \
+ u64 fpm_addr, fpm_limit; \
+ fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (index); \
+ fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+ *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
+ *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(sd_limit) += 1; \
+}
+
+/**
+ * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{ \
+ u64 fpm_adr, fpm_limit; \
+ fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (idx); \
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
+ *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
+ *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(pd_limit) += 1; \
+}
+i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz);
+
+i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index);
+i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+
+#endif /* _I40E_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
new file mode 100644
index 000000000000..17e42ca26d0b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
@@ -0,0 +1,165 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_LAN_HMC_H_
+#define _I40E_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data */
+struct i40e_hmc_obj_rxq {
+ u16 head;
+ u8 cpuid;
+ u64 base;
+ u16 qlen;
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+ u8 dbuff;
+#define I40E_RXQ_CTX_HBUFF_SHIFT 6
+ u8 hbuff;
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 fc_ena;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
+ u16 rxmax;
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
+ u8 lrxqthresh;
+};
+
+/* Tx queue context data */
+struct i40e_hmc_obj_txq {
+ u16 head;
+ u8 new_context;
+ u64 base;
+ u8 fc_ena;
+ u8 timesync_ena;
+ u8 fd_ena;
+ u8 alt_vlan_ena;
+ u16 thead_wb;
+ u16 cpuid;
+ u8 head_wb_ena;
+ u16 qlen;
+ u8 tphrdesc_ena;
+ u8 tphrpacket_ena;
+ u8 tphwdesc_ena;
+ u64 head_wb_addr;
+ u32 crc;
+ u16 rdylist;
+ u8 rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum i40e_hmc_obj_rx_hsplit_0 {
+ I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct i40e_hmc_obj_fcoe_cntx {
+ u32 rsv[32];
+};
+
+struct i40e_hmc_obj_fcoe_filt {
+ u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum i40e_hmc_lan_object_size {
+ I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
+ I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
+ I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
+ I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
+ I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
+ I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
+ I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define I40E_HMC_OBJ_SIZE_TXQ 128
+#define I40E_HMC_OBJ_SIZE_RXQ 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
+
+enum i40e_hmc_lan_rsrc_type {
+ I40E_HMC_LAN_FULL = 0,
+ I40E_HMC_LAN_TX = 1,
+ I40E_HMC_LAN_RX = 2,
+ I40E_HMC_FCOE_CTX = 3,
+ I40E_HMC_FCOE_FILT = 4,
+ I40E_HMC_LAN_MAX = 5
+};
+
+enum i40e_hmc_model {
+ I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
+ I40E_HMC_MODEL_DIRECT_ONLY = 1,
+ I40E_HMC_MODEL_PAGED_ONLY = 2,
+ I40E_HMC_MODEL_UNKNOWN,
+};
+
+struct i40e_hmc_lan_create_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ enum i40e_sd_entry_type entry_type;
+ u64 direct_mode_sz;
+};
+
+struct i40e_hmc_lan_delete_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+};
+
+i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model);
+i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+
+#endif /* _I40E_LAN_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
new file mode 100644
index 000000000000..622f373b745d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
@@ -0,0 +1,72 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_OSDEP_H_
+#define _I40E_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/tcp.h>
+#include <linux/pci.h>
+
+/* get readq/writeq support for 32 bit kernels, use the low-first version */
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
+/* File to be the magic between shared code and
+ * actual OS primitives
+ */
+
+#define hw_dbg(hw, S, A...) do {} while (0)
+
+#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define rd32(a, reg) readl((a)->hw_addr + (reg))
+
+#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+#define rd64(a, reg) readq((a)->hw_addr + (reg))
+#define i40e_flush(a) readl((a)->hw_addr + I40E_VFGEN_RSTAT)
+
+/* memory allocation tracking */
+struct i40e_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+} __packed;
+
+#define i40e_allocate_dma_mem(h, m, unused, s, a) \
+ i40evf_allocate_dma_mem_d(h, m, s, a)
+#define i40e_free_dma_mem(h, m) i40evf_free_dma_mem_d(h, m)
+
+struct i40e_virt_mem {
+ void *va;
+ u32 size;
+} __packed;
+#define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s)
+#define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m)
+
+#define i40e_debug(h, m, s, ...) i40evf_debug_d(h, m, s, ##__VA_ARGS__)
+extern void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
+ __attribute__ ((format(gnu_printf, 3, 4)));
+
+typedef enum i40e_status_code i40e_status;
+#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
new file mode 100644
index 000000000000..7841573a58c9
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -0,0 +1,84 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include "i40e_virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+i40e_status i40evf_init_adminq(struct i40e_hw *hw);
+i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+bool i40evf_asq_done(struct i40e_hw *hw);
+
+/* debug function for adminq */
+void i40evf_debug_aq(struct i40e_hw *hw,
+ enum i40e_debug_mask mask,
+ void *desc,
+ void *buffer);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+void i40evf_resume_aq(struct i40e_hw *hw);
+bool i40evf_check_asq_alive(struct i40e_hw *hw);
+i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading);
+
+i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+
+/* prototype for functions used for SW locks */
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg);
+i40e_status i40e_vf_reset(struct i40e_hw *hw);
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings);
+i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
+#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
new file mode 100644
index 000000000000..30af953cf106
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -0,0 +1,4667 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+
+#define I40E_PF_ARQBAH 0x00080180
+#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080
+#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_PF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380
+#define I40E_PF_ARQH_ARQH_SHIFT 0
+#define I40E_PF_ARQH_ARQH_MASK (0x3FF << I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280
+#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_PF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_PF_ARQLEN_ARQVFE_MASK (0x1 << I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_PF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_PF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_PF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480
+#define I40E_PF_ARQT_ARQT_SHIFT 0
+#define I40E_PF_ARQT_ARQT_MASK (0x3FF << I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100
+#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_PF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000
+#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_PF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300
+#define I40E_PF_ATQH_ATQH_SHIFT 0
+#define I40E_PF_ATQH_ATQH_MASK (0x3FF << I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200
+#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_PF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_PF_ATQLEN_ATQVFE_MASK (0x1 << I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_PF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_PF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_PF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400
+#define I40E_PF_ATQT_ATQT_SHIFT 0
+#define I40E_PF_ATQT_ATQT_MASK (0x3FF << I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAH_MAX_INDEX 127
+#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAL_MAX_INDEX 127
+#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQH_MAX_INDEX 127
+#define I40E_VF_ARQH_ARQH_SHIFT 0
+#define I40E_VF_ARQH_ARQH_MASK (0x3FF << I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQLEN_MAX_INDEX 127
+#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQT_MAX_INDEX 127
+#define I40E_VF_ARQT_ARQT_SHIFT 0
+#define I40E_VF_ARQT_ARQT_MASK (0x3FF << I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAH_MAX_INDEX 127
+#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAL_MAX_INDEX 127
+#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQH_MAX_INDEX 127
+#define I40E_VF_ATQH_ATQH_SHIFT 0
+#define I40E_VF_ATQH_ATQH_MASK (0x3FF << I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQLEN_MAX_INDEX 127
+#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQT_MAX_INDEX 127
+#define I40E_VF_ATQT_ATQT_SHIFT 0
+#define I40E_VF_ATQT_ATQT_MASK (0x3FF << I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20
+#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK (0xFF << I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK (0xFFF << I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL(_pf) (0x0010C300 + ((_pf) * 4))/* _pf=0..15 */
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK (0xFFF << I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK (0x7 << I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK (0x3 << I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK (0x3 << I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i, _pf) (0x0010C100 + ((_i) * 4) + ((_pf) * 16))/* _i=0...3 _pf=0..15 */
+#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
+#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
+#define I40E_PFCM_LANCTXDATA_DATA_MASK (0xFFFFFFFF << I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT(_pf) (0x0010C380 + ((_pf) * 4))/* _pf=0..15 */
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044
+#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
+#define I40E_GLDCB_GENC_PCIRTT_MASK (0xFFFF << I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK (0xFFFFFFFF << I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK (0x3 << I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK (0xFFFF << I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
+#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000
+#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK (0x3 << I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK (0xF << I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
+#define I40E_PRTDCB_GENC_FCOEUP_MASK (0x7 << I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK (0x1 << I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
+#define I40E_PRTDCB_GENC_PFCLDA_MASK (0xFFFF << I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020
+#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK (0x7 << I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK (0x1 << I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK (0x1 << I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK (0x1 << I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK (0x1 << I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK (0xFF << I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK (0xF << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK (0xFF << I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK (0x7F << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK (0x1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK (0x1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK (0xFF << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK (0x7 << I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK (0xFF << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK (0x1FFF << I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK (0xFF << I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK (0xFF << I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TDPUC 0x00044100
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT 0
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_MASK (0xFFFF << I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TFWSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TFWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TFWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK (0x3FFF << I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94
+#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK (0xF << I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK (0x1 << I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
+#define I40E_GLFCOE_RCTL_ICRC_MASK (0x1 << I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK (0x3FFF << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048
+#define I40E_GL_FWSTS_FWS0B_SHIFT 0
+#define I40E_GL_FWSTS_FWS0B_MASK (0xFF << I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWRI_SHIFT 9
+#define I40E_GL_FWSTS_FWRI_MASK (0x1 << I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_SHIFT 16
+#define I40E_GL_FWSTS_FWS1B_MASK (0xFF << I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184
+#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK (0x1 << I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK (0x3 << I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */
+#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK (0x3 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK (0x7 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK (0xF << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK (0x3 << I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK (0x1F << I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK (0x1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK (0x1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
+#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
+#define I40E_GLGEN_I2CCMD_DATA_MASK (0xFFFF << I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
+#define I40E_GLGEN_I2CCMD_REGADD_MASK (0xFF << I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK (0x7 << I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
+#define I40E_GLGEN_I2CCMD_OP_MASK (0x1 << I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
+#define I40E_GLGEN_I2CCMD_RESET_MASK (0x1 << I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_SHIFT 29
+#define I40E_GLGEN_I2CCMD_R_MASK (0x1 << I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_E_SHIFT 31
+#define I40E_GLGEN_I2CCMD_E_MASK (0x1 << I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK (0x1F << I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK (0x7 << I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK (0x1 << I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK (0x1FFFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK (0x1 << I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK (0x3FFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSCA_MAX_INDEX 3
+#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
+#define I40E_GLGEN_MSCA_MDIADD_MASK (0xFFFF << I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
+#define I40E_GLGEN_MSCA_DEVADD_MASK (0x1F << I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
+#define I40E_GLGEN_MSCA_PHYADD_MASK (0x1F << I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
+#define I40E_GLGEN_MSCA_OPCODE_MASK (0x3 << I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
+#define I40E_GLGEN_MSCA_STCODE_MASK (0x3 << I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
+#define I40E_GLGEN_MSCA_MDICMD_MASK (0x1 << I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK (0x1 << I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSRWD_MAX_INDEX 3
+#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK (0x1F << I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK (0xFF << I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_PE_ENA 0x000B81A0
+#define I40E_GLGEN_PE_ENA_PE_ENA_SHIFT 0
+#define I40E_GLGEN_PE_ENA_PE_ENA_MASK (0x1 << I40E_GLGEN_PE_ENA_PE_ENA_SHIFT)
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT 1
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_MASK (0x3 << I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188
+#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK (0x3 << I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK (0x3 << I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK (0x3 << I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK (0x3F << I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180
+#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK (0x3F << I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RSTENA_EMP 0x000B818C
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190
+#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
+#define I40E_GLGEN_RTRIG_CORER_MASK (0x1 << I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
+#define I40E_GLGEN_RTRIG_GLOBR_MASK (0x1 << I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK (0x1 << I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C
+#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
+#define I40E_GLGEN_STAT_HWRSVD0_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
+#define I40E_GLGEN_STAT_DCBEN_MASK (0x1 << I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_SHIFT 3
+#define I40E_GLGEN_STAT_VTEN_MASK (0x1 << I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
+#define I40E_GLGEN_STAT_FCOEN_MASK (0x1 << I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
+#define I40E_GLGEN_STAT_EVBEN_MASK (0x1 << I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
+#define I40E_GLGEN_STAT_HWRSVD1_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
+#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK (0xFFFFFFFF << I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC
+#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
+#define I40E_GLVFGEN_TIMER_GTIME_MASK (0xFFFFFFFF << I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400
+#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
+#define I40E_PFGEN_CTRL_PFSWR_MASK (0x1 << I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500
+#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK (0x1 << I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480
+#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000
+#define I40E_PFGEN_STATE_PFPEEN_SHIFT 0
+#define I40E_PFGEN_STATE_PFPEEN_MASK (0x1 << I40E_PFGEN_STATE_PFPEEN_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
+#define I40E_PFGEN_STATE_PFFCEN_MASK (0x1 << I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
+#define I40E_PFGEN_STATE_PFLINKEN_MASK (0x1 << I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
+#define I40E_PFGEN_STATE_PFSCEN_MASK (0x1 << I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120
+#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK (0x1 << I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100
+#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
+#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
+#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK (0x1 << I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
+#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK (0x1 << I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
+#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
+#define I40E_VSIGEN_RSTAT_VMRD_MASK (0x1 << I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
+#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK (0x1 << I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4))
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK (0xFFFFF << I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK (0xF << I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK (0x7FFFFF << I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK (0xFFFF << I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK (0xF << I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK (0x1FFF << I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK (0x1FFFF << I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK (0xF << I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK (0x1FFFFFFF << I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK (0x3FFF << I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK (0xF << I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK (0x7FF << I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK (0x7FF << I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK (0xF << I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK (0xFF << I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK (0x7FF << I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK (0xF << I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK (0x1FFFF << I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK (0x7 << I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK (0xF << I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK (0xF << I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK (0x1FFFFF << I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK (0x7FFFFF << I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK (0xF << I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLCNT(_i) (0x000C5500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK (0xF << I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK (0xF << I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK (0xFFFF << I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK (0xF << I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK (0xF << I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLCNT(_i) (0x000C5100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK (0xF << I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK (0xF << I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_SDPART_MAX_INDEX 15
+#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4))
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK (0xFFF << I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK (0x1FF << I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLCNT(_i) (0x000Cd500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFFLCNT(_i) (0x000Cd100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK (0x3FFFFFFF << I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK (0x1F << I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK (0x1 << I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK (0xF << I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK (0x1F << I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK (0x1 << I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000
+#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK (0xFFF << I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF << I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK (0xFFFFF << I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_UFUSE 0x00094008
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK (0x1 << I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_NIC_ID_MASK (0x1 << I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK (0x1 << I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700
+#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480
+#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780
+#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_PFINT_ICR0_INTEVENT_MASK (0x1 << I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_PFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_PFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_PFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_PFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
+#define I40E_PFINT_ICR0_QUEUE_4_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
+#define I40E_PFINT_ICR0_QUEUE_5_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
+#define I40E_PFINT_ICR0_QUEUE_6_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
+#define I40E_PFINT_ICR0_QUEUE_7_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_GRST_MASK (0x1 << I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_VFLR_MASK (0x1 << I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_SWINT_SHIFT 31
+#define I40E_PFINT_ICR0_SWINT_MASK (0x1 << I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_ENA_GRST_MASK (0x1 << I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK (0x1 << I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */
+#define I40E_PFINT_ITR0_MAX_INDEX 2
+#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4))
+#define I40E_PFINT_ITRN_MAX_INDEX 2
+#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580
+#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATE0_INTERVAL_MASK (0x3F << I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_RATEN_MAX_INDEX 511
+#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATEN_INTERVAL_MASK (0x3F << I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_RQCTL_MAX_INDEX 1535
+#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_RQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_RQCTL_INTEVENT_MASK (0x1 << I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_TQCTL_MAX_INDEX 1535
+#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_TQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_TQCTL_INTEVENT_MASK (0x1 << I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
+#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_MAX_INDEX 127
+#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR0_INTEVENT_MASK (0x1 << I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_SWINT_SHIFT 31
+#define I40E_VFINT_ICR0_SWINT_MASK (0x1 << I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */
+#define I40E_VFINT_ITR0_MAX_INDEX 2
+#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN_MAX_INDEX 2
+#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_AEQCTL_MAX_INDEX 127
+#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_CEQCTL_MAX_INDEX 511
+#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_LNKLST0_MAX_INDEX 127
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_RATE0_MAX_INDEX 127
+#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATE0_INTERVAL_MASK (0x3F << I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_RATEN_MAX_INDEX 511
+#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATEN_INTERVAL_MASK (0x3F << I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK (0x1 << I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
+#define I40E_GL_RDPU_CNTRL_ECO_MASK (0x7FFFFFFF << I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500
+#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK (0x1 << I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK (0xFFF << I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK (0xFFF << I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400
+#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_LASTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_VALID_MASK (0x1 << I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_ENA_MAX_INDEX 1535
+#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QRX_ENA_QENA_REQ_MASK (0x1 << I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QRX_ENA_FAST_QDIS_MASK (0x1 << I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QRX_ENA_QENA_STAT_MASK (0x1 << I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_TAIL_MAX_INDEX 1535
+#define I40E_QRX_TAIL_TAIL_SHIFT 0
+#define I40E_QRX_TAIL_TAIL_MASK (0x1FFF << I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_CTL_MAX_INDEX 1535
+#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
+#define I40E_QTX_CTL_PFVF_Q_MASK (0x3 << I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_SHIFT 2
+#define I40E_QTX_CTL_PF_INDX_MASK (0xF << I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
+#define I40E_QTX_CTL_VFVM_INDX_MASK (0x1FF << I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_ENA_MAX_INDEX 1535
+#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QTX_ENA_QENA_REQ_MASK (0x1 << I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QTX_ENA_FAST_QDIS_MASK (0x1 << I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QTX_ENA_QENA_STAT_MASK (0x1 << I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_HEAD_MAX_INDEX 1535
+#define I40E_QTX_HEAD_HEAD_SHIFT 0
+#define I40E_QTX_HEAD_HEAD_MASK (0x1FFF << I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
+#define I40E_QTX_HEAD_RS_PENDING_MASK (0x1 << I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_TAIL_MAX_INDEX 1535
+#define I40E_QTX_TAIL_TAIL_SHIFT 0
+#define I40E_QTX_TAIL_TAIL_MASK (0x1FFF << I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPLAN_MAPENA_MAX_INDEX 127
+#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK (0x1 << I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VPLAN_QTABLE_MAX_INDEX 15
+#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
+#define I40E_VPLAN_QTABLE_QINDEX_MASK (0x7FF << I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSILAN_QBASE_MAX_INDEX 383
+#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
+#define I40E_VSILAN_QBASE_VSIBASE_MASK (0x7FF << I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSILAN_QTABLE_MAX_INDEX 7
+#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140
+#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
+#define I40E_PRTGL_SAH_FC_SAH_MASK (0xFFFF << I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_MFS_SHIFT 16
+#define I40E_PRTGL_SAH_MFS_MASK (0xFFFF << I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120
+#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
+#define I40E_PRTGL_SAL_FC_SAL_MASK (0xFFFFFFFF << I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HLCTLA 0x001E4760
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT 1
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_MASK (0x1 << I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT)
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT 2
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT)
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP 0x001E3130
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP 0x001E3290
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP 0x001E3310
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP 0x001E3100
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP 0x001E3280
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP 0x001E3300
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE 0x001E3000
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSECTL1 0x001E3560
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT 3
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT 30
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT 31
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25
+#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK (0x1 << I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */
+#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK (0xFF << I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
+#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK (0xFFFF << I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
+#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
+#define I40E_PRT_MNG_MAVTV_VID_MASK (0xFFF << I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK (0xFF << I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK (0xFFFF << I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_METF_MAX_INDEX 3
+#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
+#define I40E_PRT_MNG_METF_ETYPE_MASK (0xFFFF << I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
+#define I40E_PRT_MNG_METF_POLARITY_MASK (0x1 << I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK (0xFFFF << I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
+#define I40E_PRT_MNG_MFUTP_UDP_MASK (0x1 << I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
+#define I40E_PRT_MNG_MFUTP_TCP_MASK (0x1 << I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK (0x1 << I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
+#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
+#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
+#define I40E_PRT_MNG_MMAH_MMAH_MASK (0xFFFF << I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
+#define I40E_PRT_MNG_MMAL_MMAL_MASK (0xFFFFFFFF << I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK (0xFF << I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00004900 + ((_i) * 4)) /* _i=0...5 */
+#define I40E_MSIX_PBA_MAX_INDEX 5
+#define I40E_MSIX_PBA_PENBIT_SHIFT 0
+#define I40E_MSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TADD_MAX_INDEX 128
+#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_MSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_MSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TMSG_MAX_INDEX 128
+#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TUADD_MAX_INDEX 128
+#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TVCTRL_MAX_INDEX 128
+#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_MSIX_TVCTRL_MASK_MASK (0x1 << I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i) (0x00004944 + ((_i) * 4)) /* _i=0...19 */
+#define I40E_VFMSIX_PBA1_MAX_INDEX 19
+#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA1_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
+#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108
+#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
+#define I40E_GLNVM_FLA_FL_SCK_MASK (0x1 << I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
+#define I40E_GLNVM_FLA_FL_CE_MASK (0x1 << I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
+#define I40E_GLNVM_FLA_FL_SI_MASK (0x1 << I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
+#define I40E_GLNVM_FLA_FL_SO_MASK (0x1 << I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
+#define I40E_GLNVM_FLA_FL_REQ_MASK (0x1 << I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
+#define I40E_GLNVM_FLA_FL_GNT_MASK (0x1 << I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK (0x1 << I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
+#define I40E_GLNVM_FLA_FL_SADDR_MASK (0x7FF << I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
+#define I40E_GLNVM_FLA_FL_BUSY_MASK (0x1 << I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
+#define I40E_GLNVM_FLA_FL_DER_MASK (0x1 << I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104
+#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
+#define I40E_GLNVM_FLASHID_FLASHID_MASK (0xFFFFFF << I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100
+#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
+#define I40E_GLNVM_GENS_NVM_PRES_MASK (0x1 << I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
+#define I40E_GLNVM_GENS_SR_SIZE_MASK (0x7 << I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
+#define I40E_GLNVM_GENS_BANK1VAL_MASK (0x1 << I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
+#define I40E_GLNVM_GENS_ALT_PRST_MASK (0x1 << I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK (0x1 << I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */
+#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK (0xFFFFFF << I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110
+#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK (0x1 << I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
+#define I40E_GLNVM_SRCTL_ADDR_MASK (0x7FFF << I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
+#define I40E_GLNVM_SRCTL_WRITE_MASK (0x1 << I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_START_SHIFT 30
+#define I40E_GLNVM_SRCTL_START_MASK (0x1 << I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
+#define I40E_GLNVM_SRCTL_DONE_MASK (0x1 << I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114
+#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
+#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+
+#define I40E_GLPCI_BYTCTH 0x0009C484
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4
+#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK (0x1 << I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8
+#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK (0x1 << I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK (0x1 << I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0
+#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
+#define I40E_GLPCI_CNF_FLEX10_MASK (0x1 << I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK (0x1 << I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494
+#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
+#define I40E_GLPCI_CNF2_RO_DIS_MASK (0x1 << I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK (0x1 << I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK (0xFF << I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK (0xFFFFFFFF << I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LATCT 0x0009C4B4
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK (0xFFFFFFFF << I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484
+#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK (0x1 << I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK (0x1 << I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK (0x1 << I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK (0x1 << I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK (0x3F << I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK (0x7 << I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK (0xF << I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PCITEST2 0x000BE4BC
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT)
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT)
+
+#define I40E_GLPCI_PKTCT 0x0009C4BC
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK (0x1 << I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490
+#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK (0x3 << I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4
+#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
+#define I40E_GLPCI_REVID_NVM_REVID_MASK (0xFF << I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C
+#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK (0xFFFF << I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498
+#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK (0xFFFFFFFF << I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SUBSYSID 0x000BE48C
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT 16
+#define I40E_GLPCI_SUBSYSID_SUB_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8
+#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
+#define I40E_GLPCI_UPADD_ADDRESS_MASK (0x7FFFFFFF << I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK (0x1 << I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK (0x1 << I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK (0x7 << I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK (0x1F << I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK (0xFF << I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080
+#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK (0xFFF << I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK (0x7F << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100
+#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
+#define I40E_PF_PCI_CIAD_DATA_MASK (0xFFFFFFFF << I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK (0x1 << I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000
+#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
+#define I40E_PFPCI_CNF_MSI_EN_MASK (0x1 << I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK (0x1 << I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
+#define I40E_PFPCI_CNF_IO_BAR_MASK (0x1 << I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
+#define I40E_PFPCI_CNF_INT_PIN_MASK (0x3 << I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK (0x3 << I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK (0x1 << I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200
+#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK (0x1 << I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK (0xFFFFFFFF << I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK (0xFFFFFFFF << I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PFDEVID 0x000BE080
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300
+#define I40E_PFPCI_PM_PME_EN_SHIFT 0
+#define I40E_PFPCI_PM_PME_EN_MASK (0x1 << I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280
+#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK (0x1 << I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_VFDEVID 0x000BE100
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300
+#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK (0x1FF << I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380
+#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
+#define I40E_PFPCI_VMPEND_PENDING_MASK (0x1 << I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL(_i) (0x0000D480 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPE_PFFLMOBJCTRL_MAX_INDEX 15
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+
+#define I40E_PFPE_MRTEIDXMASK 0x00008600
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK (0x1 << I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4))
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4))
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXUNEXPERR 0x0001E008
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT 0
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_MASK (0xFFFFFF << I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4))
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK (0x3F << I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK (0x3 << I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK (0x3F << I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK (0x1 << I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360
+#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK (0xFFFF << I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK (0x1 << I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0
+#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK (0xFFFF << I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140
+#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK (0x1 << I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
+#define I40E_PRTPM_GC_MNG_VETO_MASK (0x1 << I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_RATD_SHIFT 2
+#define I40E_PRTPM_GC_RATD_MASK (0x1 << I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_SHIFT 3
+#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0
+#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0
+#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828
+#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK (0xFFFFF << I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830
+#define I40E_GLRPB_GHW_GHW_SHIFT 0
+#define I40E_GLRPB_GHW_GHW_MASK (0xFFFFF << I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834
+#define I40E_GLRPB_GLW_GLW_SHIFT 0
+#define I40E_GLRPB_GLW_GLW_MASK (0xFFFFF << I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844
+#define I40E_GLRPB_PHW_PHW_SHIFT 0
+#define I40E_GLRPB_PHW_PHW_MASK (0xFFFFF << I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848
+#define I40E_GLRPB_PLW_PLW_SHIFT 0
+#define I40E_GLRPB_PLW_PLW_MASK (0xFFFFF << I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DHW_MAX_INDEX 7
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DLW_MAX_INDEX 7
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DPS_MAX_INDEX 7
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK (0xFFFFF << I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SHT_MAX_INDEX 7
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK (0xFFFFF << I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SLT_MAX_INDEX 7
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK (0xFFFFF << I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK (0xFFFFF << I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK (0xFFFFFFFF << I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4
+#define I40E_GLQF_CTL_HTOEP_SHIFT 1
+#define I40E_GLQF_CTL_HTOEP_MASK (0x1 << I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT 7
+#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_SHIFT 17
+#define I40E_GLQF_CTL_FDBEST_MASK (0xFF << I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
+#define I40E_GLQF_CTL_PROGPRIO_MASK (0x1 << I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
+#define I40E_GLQF_CTL_INVALPRIO_MASK (0x1 << I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
+#define I40E_GLQF_CTL_IGNORE_IP_MASK (0x1 << I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */
+#define I40E_GLQF_HSYM_MAX_INDEX 63
+#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK (0x1 << I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */
+#define I40E_GLQF_PCNT_MAX_INDEX 511
+#define I40E_GLQF_PCNT_PCNT_SHIFT 0
+#define I40E_GLQF_PCNT_PCNT_MASK (0xFFFFFFFF << I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */
+#define I40E_GLQF_SWAP_MAX_INDEX 1
+#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
+#define I40E_GLQF_SWAP_FLEN0_MASK (0xF << I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
+#define I40E_GLQF_SWAP_FLEN1_MASK (0xF << I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0
+#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK (0x1 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_FD_ENA_MASK (0x1 << I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK (0x1 << I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK (0x1 << I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK (0x1 << I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280
+#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK (0xFF << I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
+#define I40E_PFQF_FDALLOC_FDBEST_MASK (0xFF << I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */
+#define I40E_PFQF_HENA_MAX_INDEX 1
+#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */
+#define I40E_PFQF_HKEY_MAX_INDEX 12
+#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_PFQF_HKEY_KEY_0_MASK (0xFF << I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_PFQF_HKEY_KEY_1_MASK (0xFF << I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_PFQF_HKEY_KEY_2_MASK (0xFF << I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_PFQF_HKEY_KEY_3_MASK (0xFF << I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */
+#define I40E_PFQF_HLUT_MAX_INDEX 127
+#define I40E_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_PFQF_HLUT_LUT0_MASK (0x3F << I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_PFQF_HLUT_LUT1_MASK (0x3F << I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_PFQF_HLUT_LUT2_MASK (0x3F << I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_PFQF_HLUT_LUT3_MASK (0x3F << I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK (0x7 << I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK (0x7 << I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK (0x7 << I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK (0x7 << I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK (0x7 << I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK (0x7 << I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK (0x7 << I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK (0x7 << I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60
+#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK (0x1 << I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */
+#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK (0xFF << I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */
+#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
+#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
+#define I40E_PRTQF_FD_MSK_MASK_MASK (0xFFFF << I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK (0x3F << I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
+#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HENA1_MAX_INDEX 1
+#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */
+#define I40E_VFQF_HKEY1_MAX_INDEX 12
+#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY1_KEY_0_MASK (0xFF << I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY1_KEY_1_MASK (0xFF << I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY1_KEY_2_MASK (0xFF << I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY1_KEY_3_MASK (0xFF << I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT1_LUT0_MASK (0xF << I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT1_LUT1_MASK (0xF << I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT1_LUT2_MASK (0xF << I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT1_LUT3_MASK (0xF << I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HREGION1_MAX_INDEX 7
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION1_REGION_0_MASK (0x7 << I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION1_REGION_1_MASK (0x7 << I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION1_REGION_2_MASK (0x7 << I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION1_REGION_3_MASK (0x7 << I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION1_REGION_4_MASK (0x7 << I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION1_REGION_5_MASK (0x7 << I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION1_REGION_6_MASK (0x7 << I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION1_REGION_7_MASK (0x7 << I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPQF_CTL_MAX_INDEX 127
+#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
+#define I40E_VPQF_CTL_PEHSIZE_MASK (0x1F << I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
+#define I40E_VPQF_CTL_PEDSIZE_MASK (0x1F << I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
+#define I40E_VPQF_CTL_FCHSIZE_MASK (0xF << I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
+#define I40E_VPQF_CTL_FCDSIZE_MASK (0x3 << I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIQF_CTL_MAX_INDEX 383
+#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK (0x1 << I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSIQF_TCREGION_MAX_INDEX 3
+#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOECRC_MAX_INDEX 143
+#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
+#define I40E_GL_FCOECRC_FCOECRC_MASK (0xFFFFFFFF << I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDDPC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+/* _i=0...143 */
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFRC(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFRC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXAC(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXAC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT 0
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK (0xFFFF << I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK (0xFFFF << I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOELAST_MAX_INDEX 143
+#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
+#define I40E_GL_FCOELAST_FCOELAST_MASK (0xFFFFFFFF << I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPRC_MAX_INDEX 143
+#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK (0xFFFFFFFF << I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPTC_MAX_INDEX 143
+#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK (0xFFFFFFFF << I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOERPDC_MAX_INDEX 143
+#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK (0xFFFFFFFF << I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCH_MAX_INDEX 3
+#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCL_MAX_INDEX 3
+#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCH_MAX_INDEX 3
+#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCL_MAX_INDEX 3
+#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
+#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK (0xFFFFFFFF << I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCH_MAX_INDEX 3
+#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
+#define I40E_GLPRT_GORCH_GORCH_MASK (0xFFFF << I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCL_MAX_INDEX 3
+#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
+#define I40E_GLPRT_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCH_MAX_INDEX 3
+#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLPRT_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCL_MAX_INDEX 3
+#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLPRT_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
+#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK (0xFFFFFFFF << I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LDPC_MAX_INDEX 3
+#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
+#define I40E_GLPRT_LDPC_LDPC_MASK (0xFFFFFFFF << I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MLFC_MAX_INDEX 3
+#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
+#define I40E_GLPRT_MLFC_MLFC_MASK (0xFFFFFFFF << I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCH_MAX_INDEX 3
+#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLPRT_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCL_MAX_INDEX 3
+#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLPRT_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCH_MAX_INDEX 3
+#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLPRT_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCL_MAX_INDEX 3
+#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLPRT_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MRFC_MAX_INDEX 3
+#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
+#define I40E_GLPRT_MRFC_MRFC_MASK (0xFFFFFFFF << I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK (0xFFFF << I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127H_MAX_INDEX 3
+#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
+#define I40E_GLPRT_PRC127H_PRC127H_MASK (0xFFFF << I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127L_MAX_INDEX 3
+#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
+#define I40E_GLPRT_PRC127L_PRC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255H_MAX_INDEX 3
+#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK (0xFFFF << I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255L_MAX_INDEX 3
+#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
+#define I40E_GLPRT_PRC255L_PRC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511H_MAX_INDEX 3
+#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
+#define I40E_GLPRT_PRC511H_PRC511H_MASK (0xFFFF << I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511L_MAX_INDEX 3
+#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
+#define I40E_GLPRT_PRC511L_PRC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64H_MAX_INDEX 3
+#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
+#define I40E_GLPRT_PRC64H_PRC64H_MASK (0xFFFF << I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64L_MAX_INDEX 3
+#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
+#define I40E_GLPRT_PRC64L_PRC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK (0xFFFF << I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127H_MAX_INDEX 3
+#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
+#define I40E_GLPRT_PTC127H_PTC127H_MASK (0xFFFF << I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127L_MAX_INDEX 3
+#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
+#define I40E_GLPRT_PTC127L_PTC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK (0xFFFF << I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255H_MAX_INDEX 3
+#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
+#define I40E_GLPRT_PTC255H_PTC255H_MASK (0xFFFF << I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255L_MAX_INDEX 3
+#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
+#define I40E_GLPRT_PTC255L_PTC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511H_MAX_INDEX 3
+#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
+#define I40E_GLPRT_PTC511H_PTC511H_MASK (0xFFFF << I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511L_MAX_INDEX 3
+#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
+#define I40E_GLPRT_PTC511L_PTC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64H_MAX_INDEX 3
+#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
+#define I40E_GLPRT_PTC64H_PTC64H_MASK (0xFFFF << I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64L_MAX_INDEX 3
+#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
+#define I40E_GLPRT_PTC64L_PTC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK (0xFFFF << I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RDPC_MAX_INDEX 3
+#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
+#define I40E_GLPRT_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RFC_MAX_INDEX 3
+#define I40E_GLPRT_RFC_RFC_SHIFT 0
+#define I40E_GLPRT_RFC_RFC_MASK (0xFFFFFFFF << I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RJC_MAX_INDEX 3
+#define I40E_GLPRT_RJC_RJC_SHIFT 0
+#define I40E_GLPRT_RJC_RJC_MASK (0xFFFFFFFF << I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RLEC_MAX_INDEX 3
+#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
+#define I40E_GLPRT_RLEC_RLEC_MASK (0xFFFFFFFF << I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ROC_MAX_INDEX 3
+#define I40E_GLPRT_ROC_ROC_SHIFT 0
+#define I40E_GLPRT_ROC_ROC_MASK (0xFFFFFFFF << I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUC_MAX_INDEX 3
+#define I40E_GLPRT_RUC_RUC_SHIFT 0
+#define I40E_GLPRT_RUC_RUC_MASK (0xFFFFFFFF << I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUPP_MAX_INDEX 3
+#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
+#define I40E_GLPRT_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK (0xFFFFFFFF << I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_STDC(_i) (0x00300640 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_STDC_MAX_INDEX 3
+#define I40E_GLPRT_STDC_STDC_SHIFT 0
+#define I40E_GLPRT_STDC_STDC_MASK (0xFFFFFFFF << I40E_GLPRT_STDC_STDC_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDOLD_MAX_INDEX 3
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK (0xFFFFFFFF << I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDPC_MAX_INDEX 3
+#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
+#define I40E_GLPRT_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLPRT_TDPC_TDPC_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCH_MAX_INDEX 3
+#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCL_MAX_INDEX 3
+#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLPRT_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCH_MAX_INDEX 3
+#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCL_MAX_INDEX 3
+#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK (0xFFFFFFFF << I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCH_MAX_INDEX 15
+#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLSW_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCL_MAX_INDEX 15
+#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLSW_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCH_MAX_INDEX 15
+#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLSW_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCL_MAX_INDEX 15
+#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLSW_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCH_MAX_INDEX 15
+#define I40E_GLSW_GORCH_GORCH_SHIFT 0
+#define I40E_GLSW_GORCH_GORCH_MASK (0xFFFF << I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCL_MAX_INDEX 15
+#define I40E_GLSW_GORCL_GORCL_SHIFT 0
+#define I40E_GLSW_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCH_MAX_INDEX 15
+#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLSW_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCL_MAX_INDEX 15
+#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLSW_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCH_MAX_INDEX 15
+#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLSW_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCL_MAX_INDEX 15
+#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLSW_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCH_MAX_INDEX 15
+#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLSW_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCL_MAX_INDEX 15
+#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLSW_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_RUPP_MAX_INDEX 15
+#define I40E_GLSW_RUPP_RUPP_SHIFT 0
+#define I40E_GLSW_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_TDPC_MAX_INDEX 15
+#define I40E_GLSW_TDPC_TDPC_SHIFT 0
+#define I40E_GLSW_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCH_MAX_INDEX 15
+#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLSW_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCL_MAX_INDEX 15
+#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLSW_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCH_MAX_INDEX 15
+#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLSW_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCL_MAX_INDEX 15
+#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLSW_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCH_MAX_INDEX 383
+#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLV_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCL_MAX_INDEX 383
+#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLV_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCH_MAX_INDEX 383
+#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLV_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCL_MAX_INDEX 383
+#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLV_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCH_MAX_INDEX 383
+#define I40E_GLV_GORCH_GORCH_SHIFT 0
+#define I40E_GLV_GORCH_GORCH_MASK (0xFFFF << I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCL_MAX_INDEX 383
+#define I40E_GLV_GORCL_GORCL_SHIFT 0
+#define I40E_GLV_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCH_MAX_INDEX 383
+#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLV_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCL_MAX_INDEX 383
+#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLV_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCH_MAX_INDEX 383
+#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLV_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCL_MAX_INDEX 383
+#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLV_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCH_MAX_INDEX 383
+#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLV_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCL_MAX_INDEX 383
+#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLV_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RDPC_MAX_INDEX 383
+#define I40E_GLV_RDPC_RDPC_SHIFT 0
+#define I40E_GLV_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RUPP_MAX_INDEX 383
+#define I40E_GLV_RUPP_RUPP_SHIFT 0
+#define I40E_GLV_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */
+#define I40E_GLV_TEPC_MAX_INDEX 383
+#define I40E_GLV_TEPC_TEPC_SHIFT 0
+#define I40E_GLV_TEPC_TEPC_MASK (0xFFFFFFFF << I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCH_MAX_INDEX 383
+#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLV_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCL_MAX_INDEX 383
+#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLV_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCH_MAX_INDEX 383
+#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK (0xFFFF << I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCL_MAX_INDEX 383
+#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLV_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK (0xFFFF << I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK (0xFFFF << I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK (0xFFFF << I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK (0xFFFF << I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_L 0x00269F44
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_MASK (0xFFFFFFFF << I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x0026CF00 + ((_i) * 4)) /* _i=0...25 */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 25
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN 0x0026CF84
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRT_MSCCNT 0x00256BA0
+#define I40E_PRT_MSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_MSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_MSCCNT_CCOUNT_SHIFT)
+#define I40E_PRT_SCSTS 0x00256C20
+#define I40E_PRT_SCSTS_BSCA_SHIFT 0
+#define I40E_PRT_SCSTS_BSCA_MASK (0x1 << I40E_PRT_SCSTS_BSCA_SHIFT)
+#define I40E_PRT_SCSTS_BSCAP_SHIFT 1
+#define I40E_PRT_SCSTS_BSCAP_MASK (0x1 << I40E_PRT_SCSTS_BSCAP_SHIFT)
+#define I40E_PRT_SCSTS_MSCA_SHIFT 2
+#define I40E_PRT_SCSTS_MSCA_MASK (0x1 << I40E_PRT_SCSTS_MSCA_SHIFT)
+#define I40E_PRT_SCSTS_MSCAP_SHIFT 3
+#define I40E_PRT_SCSTS_MSCAP_MASK (0x1 << I40E_PRT_SCSTS_MSCAP_SHIFT)
+#define I40E_PRT_SWT_BSCCNT 0x00256C60
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280
+#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK (0x7FFFFFFF << I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
+#define I40E_PRTTSYN_ADJ_SIGN_MASK (0x1 << I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK (0x3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK (0xF << I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK (0x3 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK (0x1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK (0x1 << I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK (0xFFFFFFFF << I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK (0x1 << I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK (0xF << I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK (0x3 << I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK (0x3 << I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK (0x3 << I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK (0x3F << I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK (0x1 << I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140
+#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GLSCD_QUANTA 0x000B2080
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK (0x7 << I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510
+#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_RX_FUNCTION_MASK (0xFF << I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_SHIFT 8
+#define I40E_GL_MDET_RX_EVENT_MASK (0x1FF << I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_RX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_SHIFT 31
+#define I40E_GL_MDET_RX_VALID_MASK (0x1 << I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480
+#define I40E_GL_MDET_TX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_TX_FUNCTION_MASK (0xFF << I40E_GL_MDET_TX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 8
+#define I40E_GL_MDET_TX_EVENT_MASK (0x1FF << I40E_GL_MDET_TX_EVENT_SHIFT)
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_TX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VALID_SHIFT 31
+#define I40E_GL_MDET_TX_VALID_MASK (0x1 << I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400
+#define I40E_PF_MDET_RX_VALID_SHIFT 0
+#define I40E_PF_MDET_RX_VALID_MASK (0x1 << I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400
+#define I40E_PF_MDET_TX_VALID_SHIFT 0
+#define I40E_PF_MDET_TX_VALID_MASK (0x1 << I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500
+#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_VALID_MASK (0x1 << I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_RX_MAX_INDEX 127
+#define I40E_VP_MDET_RX_VALID_SHIFT 0
+#define I40E_VP_MDET_RX_VALID_MASK (0x1 << I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_TX_MAX_INDEX 127
+#define I40E_VP_MDET_TX_VALID_SHIFT 0
+#define I40E_VP_MDET_TX_VALID_MASK (0x1 << I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800
+#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
+#define I40E_GLPM_WUMC_NOTCO_MASK (0x1 << I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK (0x1 << I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
+#define I40E_GLPM_WUMC_ROL_MODE_MASK (0x1 << I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
+#define I40E_GLPM_WUMC_RESERVED_4_MASK (0x1FFF << I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK (0xFFFF << I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080
+#define I40E_PFPM_APM_APME_SHIFT 0
+#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200
+#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
+#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400
+#define I40E_PFPM_WUFC_LNKC_SHIFT 0
+#define I40E_PFPM_WUFC_LNKC_MASK (0x1 << I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_MAG_SHIFT 1
+#define I40E_PFPM_WUFC_MAG_MASK (0x1 << I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_SHIFT 3
+#define I40E_PFPM_WUFC_MNG_MASK (0x1 << I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_SHIFT 16
+#define I40E_PFPM_WUFC_FLX0_MASK (0x1 << I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_SHIFT 17
+#define I40E_PFPM_WUFC_FLX1_MASK (0x1 << I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_SHIFT 18
+#define I40E_PFPM_WUFC_FLX2_MASK (0x1 << I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_SHIFT 19
+#define I40E_PFPM_WUFC_FLX3_MASK (0x1 << I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_SHIFT 20
+#define I40E_PFPM_WUFC_FLX4_MASK (0x1 << I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_SHIFT 21
+#define I40E_PFPM_WUFC_FLX5_MASK (0x1 << I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_SHIFT 22
+#define I40E_PFPM_WUFC_FLX6_MASK (0x1 << I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_SHIFT 23
+#define I40E_PFPM_WUFC_FLX7_MASK (0x1 << I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600
+#define I40E_PFPM_WUS_LNKC_SHIFT 0
+#define I40E_PFPM_WUS_LNKC_MASK (0x1 << I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_MAG_SHIFT 1
+#define I40E_PFPM_WUS_MAG_MASK (0x1 << I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
+#define I40E_PFPM_WUS_PME_STATUS_MASK (0x1 << I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_MNG_SHIFT 3
+#define I40E_PFPM_WUS_MNG_MASK (0x1 << I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_FLX0_SHIFT 16
+#define I40E_PFPM_WUS_FLX0_MASK (0x1 << I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX1_SHIFT 17
+#define I40E_PFPM_WUS_FLX1_MASK (0x1 << I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX2_SHIFT 18
+#define I40E_PFPM_WUS_FLX2_MASK (0x1 << I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX3_SHIFT 19
+#define I40E_PFPM_WUS_FLX3_MASK (0x1 << I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX4_SHIFT 20
+#define I40E_PFPM_WUS_FLX4_MASK (0x1 << I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX5_SHIFT 21
+#define I40E_PFPM_WUS_FLX5_MASK (0x1 << I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX6_SHIFT 22
+#define I40E_PFPM_WUS_FLX6_MASK (0x1 << I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX7_SHIFT 23
+#define I40E_PFPM_WUS_FLX7_MASK (0x1 << I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUS_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000
+#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
+#define I40E_PRTPM_FHFHR_UNICAST_MASK (0x1 << I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK (0x1 << I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAH_MAX_INDEX 3
+#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK (0xFFFF << I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
+#define I40E_PRTPM_SAH_PF_NUM_MASK (0xF << I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK (0x1 << I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_AV_SHIFT 31
+#define I40E_PRTPM_SAH_AV_MASK (0x1 << I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAL_MAX_INDEX 3
+#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK (0xFFFFFFFF << I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000
+#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH1_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00
+#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL1_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400
+#define I40E_VF_ARQH1_ARQH_SHIFT 0
+#define I40E_VF_ARQH1_ARQH_MASK (0x3FF << I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000
+#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN1_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN1_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000
+#define I40E_VF_ARQT1_ARQT_SHIFT 0
+#define I40E_VF_ARQT1_ARQT_MASK (0x3FF << I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800
+#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH1_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00
+#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL1_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400
+#define I40E_VF_ATQH1_ATQH_SHIFT 0
+#define I40E_VF_ATQH1_ATQH_MASK (0x3FF << I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800
+#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN1_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN1_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400
+#define I40E_VF_ATQT1_ATQT_SHIFT 0
+#define I40E_VF_ATQT1_ATQT_MASK (0x3FF << I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800
+#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00
+#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4))
+#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
+#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800
+#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR01_INTEVENT_MASK (0x1 << I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR01_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR01_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR01_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR01_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR01_ADMINQ_MASK (0x1 << I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_SWINT_SHIFT 31
+#define I40E_VFINT_ICR01_SWINT_MASK (0x1 << I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */
+#define I40E_VFINT_ITR01_MAX_INDEX 2
+#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR01_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN1_MAX_INDEX 2
+#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN1_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QRX_TAIL1_MAX_INDEX 15
+#define I40E_QRX_TAIL1_TAIL_SHIFT 0
+#define I40E_QRX_TAIL1_TAIL_MASK (0x1FFF << I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QTX_TAIL1_MAX_INDEX 15
+#define I40E_QTX_TAIL1_TAIL_SHIFT 0
+#define I40E_QTX_TAIL1_TAIL_MASK (0x1FFF << I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000
+#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD_MAX_INDEX 16
+#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG_MAX_INDEX 16
+#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD_MAX_INDEX 16
+#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
+#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */
+#define I40E_VFQF_HENA_MAX_INDEX 1
+#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */
+#define I40E_VFQF_HKEY_MAX_INDEX 12
+#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY_KEY_0_MASK (0xFF << I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY_KEY_1_MASK (0xFF << I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY_KEY_2_MASK (0xFF << I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY_KEY_3_MASK (0xFF << I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_VFQF_HLUT_MAX_INDEX 15
+#define I40E_VFQF_HLUT_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT_LUT0_MASK (0xF << I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT_LUT1_MASK (0xF << I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT_LUT2_MASK (0xF << I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT_LUT3_MASK (0xF << I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */
+#define I40E_VFQF_HREGION_MAX_INDEX 7
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION_REGION_0_MASK (0x7 << I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION_REGION_1_MASK (0x7 << I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION_REGION_2_MASK (0x7 << I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION_REGION_3_MASK (0x7 << I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION_REGION_4_MASK (0x7 << I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION_REGION_5_MASK (0x7 << I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION_REGION_6_MASK (0x7 << I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT)
+#endif
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
new file mode 100644
index 000000000000..7c08cc2e339b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -0,0 +1,97 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+ I40E_SUCCESS = 0,
+ I40E_ERR_NVM = -1,
+ I40E_ERR_NVM_CHECKSUM = -2,
+ I40E_ERR_PHY = -3,
+ I40E_ERR_CONFIG = -4,
+ I40E_ERR_PARAM = -5,
+ I40E_ERR_MAC_TYPE = -6,
+ I40E_ERR_UNKNOWN_PHY = -7,
+ I40E_ERR_LINK_SETUP = -8,
+ I40E_ERR_ADAPTER_STOPPED = -9,
+ I40E_ERR_INVALID_MAC_ADDR = -10,
+ I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
+ I40E_ERR_MASTER_REQUESTS_PENDING = -12,
+ I40E_ERR_INVALID_LINK_SETTINGS = -13,
+ I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
+ I40E_ERR_RESET_FAILED = -15,
+ I40E_ERR_SWFW_SYNC = -16,
+ I40E_ERR_NO_AVAILABLE_VSI = -17,
+ I40E_ERR_NO_MEMORY = -18,
+ I40E_ERR_BAD_PTR = -19,
+ I40E_ERR_RING_FULL = -20,
+ I40E_ERR_INVALID_PD_ID = -21,
+ I40E_ERR_INVALID_QP_ID = -22,
+ I40E_ERR_INVALID_CQ_ID = -23,
+ I40E_ERR_INVALID_CEQ_ID = -24,
+ I40E_ERR_INVALID_AEQ_ID = -25,
+ I40E_ERR_INVALID_SIZE = -26,
+ I40E_ERR_INVALID_ARP_INDEX = -27,
+ I40E_ERR_INVALID_FPM_FUNC_ID = -28,
+ I40E_ERR_QP_INVALID_MSG_SIZE = -29,
+ I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ I40E_ERR_INVALID_FRAG_COUNT = -31,
+ I40E_ERR_QUEUE_EMPTY = -32,
+ I40E_ERR_INVALID_ALIGNMENT = -33,
+ I40E_ERR_FLUSHED_QUEUE = -34,
+ I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
+ I40E_ERR_TIMEOUT = -37,
+ I40E_ERR_OPCODE_MISMATCH = -38,
+ I40E_ERR_CQP_COMPL_ERROR = -39,
+ I40E_ERR_INVALID_VF_ID = -40,
+ I40E_ERR_INVALID_HMCFN_ID = -41,
+ I40E_ERR_BACKING_PAGE_ERROR = -42,
+ I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ I40E_ERR_INVALID_PBLE_INDEX = -44,
+ I40E_ERR_INVALID_SD_INDEX = -45,
+ I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ I40E_ERR_INVALID_SD_TYPE = -47,
+ I40E_ERR_MEMCPY_FAILED = -48,
+ I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ I40E_ERR_SRQ_ENABLED = -52,
+ I40E_ERR_ADMIN_QUEUE_ERROR = -53,
+ I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ I40E_ERR_BUF_TOO_SHORT = -55,
+ I40E_ERR_ADMIN_QUEUE_FULL = -56,
+ I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ I40E_ERR_BAD_IWARP_CQE = -58,
+ I40E_ERR_NVM_BLANK_MODE = -59,
+ I40E_ERR_NOT_IMPLEMENTED = -60,
+ I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ I40E_ERR_DIAG_TEST_FAILED = -62,
+ I40E_ERR_NOT_READY = -63,
+ I40E_NOT_SUPPORTED = -64,
+ I40E_ERR_FIRMWARE_API_VERSION = -65,
+};
+
+#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
new file mode 100644
index 000000000000..ffdb01d853db
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -0,0 +1,1575 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include <linux/prefetch.h>
+
+#include "i40evf.h"
+
+static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
+ u32 td_tag)
+{
+ return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
+ ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
+ ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+ ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+
+/**
+ * i40e_unmap_and_free_tx_resource - Release a Tx buffer
+ * @ring: the ring that owns the buffer
+ * @tx_buffer: the buffer to free
+ **/
+static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
+ struct i40e_tx_buffer *tx_buffer)
+{
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ } else if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+ /* tx_buffer must be completely set up in the transmit path */
+}
+
+/**
+ * i40evf_clean_tx_ring - Free any empty Tx buffers
+ * @tx_ring: ring to be cleaned
+ **/
+void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
+{
+ unsigned long bi_size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!tx_ring->tx_bi)
+ return;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->count; i++)
+ i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
+
+ bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_bi, 0, bi_size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ if (!tx_ring->netdev)
+ return;
+
+ /* cleanup Tx queue statistics */
+ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index));
+}
+
+/**
+ * i40evf_free_tx_resources - Free Tx resources per queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
+{
+ i40evf_clean_tx_ring(tx_ring);
+ kfree(tx_ring->tx_bi);
+ tx_ring->tx_bi = NULL;
+
+ if (tx_ring->desc) {
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+ tx_ring->desc = NULL;
+ }
+}
+
+/**
+ * i40e_get_tx_pending - how many tx descriptors not processed
+ * @tx_ring: the ring of descriptors
+ *
+ * Since there is no access to the ring head register
+ * in XL710, we need to use our local copies
+ **/
+static u32 i40e_get_tx_pending(struct i40e_ring *ring)
+{
+ u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
+ ? ring->next_to_use
+ : ring->next_to_use + ring->count);
+ return ntu - ring->next_to_clean;
+}
+
+/**
+ * i40e_check_tx_hang - Is there a hang in the Tx queue
+ * @tx_ring: the ring of descriptors
+ **/
+static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
+{
+ u32 tx_pending = i40e_get_tx_pending(tx_ring);
+ bool ret = false;
+
+ clear_check_for_tx_hang(tx_ring);
+
+ /* Check for a hung queue, but be thorough. This verifies
+ * that a transmit has been completed since the previous
+ * check AND there is at least one packet pending. The
+ * ARMED bit is set to indicate a potential hang. The
+ * bit is cleared if a pause frame is received to remove
+ * false hang detection due to PFC or 802.3x frames. By
+ * requiring this to fail twice we avoid races with
+ * PFC clearing the ARMED bit and conditions where we
+ * run the check_tx_hang logic with a transmit completion
+ * pending but without time to complete it yet.
+ */
+ if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
+ tx_pending) {
+ /* make sure it is true for two checks in a row */
+ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
+ &tx_ring->state);
+ } else {
+ /* update completed stats and disarm the hang check */
+ tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+ clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_clean_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring: tx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+{
+ u16 i = tx_ring->next_to_clean;
+ struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_desc;
+ unsigned int total_packets = 0;
+ unsigned int total_bytes = 0;
+
+ tx_buf = &tx_ring->tx_bi[i];
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+
+ /* update the statistics for this packet */
+ total_bytes += tx_buf->bytecount;
+ total_packets += tx_buf->gso_segs;
+
+ /* free the skb */
+ dev_kfree_skb_any(tx_buf->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+
+ /* clear tx_buffer data */
+ tx_buf->skb = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ tx_ring->q_vector->tx.total_bytes += total_bytes;
+ tx_ring->q_vector->tx.total_packets += total_packets;
+
+ if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
+ /* schedule immediate reset if we believe we hung */
+ dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
+ " VSI <%d>\n"
+ " Tx Queue <%d>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n",
+ tx_ring->vsi->seid,
+ tx_ring->queue_index,
+ tx_ring->next_to_use, i);
+ dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->tx_bi[i].time_stamp, jiffies);
+
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ dev_info(tx_ring->dev,
+ "tx hang detected on queue %d, resetting adapter\n",
+ tx_ring->queue_index);
+
+ tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
+
+ /* the adapter is about to reset, no point in enabling stuff */
+ return true;
+ }
+
+ netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ total_packets, total_bytes);
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+ (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
+ !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ }
+ }
+
+ return budget > 0;
+}
+
+/**
+ * i40e_set_new_dynamic_itr - Find new ITR level
+ * @rc: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte counts during
+ * the last interrupt. The advantage of per interrupt computation
+ * is faster updates and more accurate ITR for the current traffic
+ * pattern. Constants in this function were computed based on
+ * theoretical maximum wire speed and thresholds were set based on
+ * testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+{
+ enum i40e_latency_range new_latency_range = rc->latency_range;
+ u32 new_itr = rc->itr;
+ int bytes_per_int;
+
+ if (rc->total_packets == 0 || !rc->itr)
+ return;
+
+ /* simple throttlerate management
+ * 0-10MB/s lowest (100000 ints/s)
+ * 10-20MB/s low (20000 ints/s)
+ * 20-1249MB/s bulk (8000 ints/s)
+ */
+ bytes_per_int = rc->total_bytes / rc->itr;
+ switch (rc->itr) {
+ case I40E_LOWEST_LATENCY:
+ if (bytes_per_int > 10)
+ new_latency_range = I40E_LOW_LATENCY;
+ break;
+ case I40E_LOW_LATENCY:
+ if (bytes_per_int > 20)
+ new_latency_range = I40E_BULK_LATENCY;
+ else if (bytes_per_int <= 10)
+ new_latency_range = I40E_LOWEST_LATENCY;
+ break;
+ case I40E_BULK_LATENCY:
+ if (bytes_per_int <= 20)
+ rc->latency_range = I40E_LOW_LATENCY;
+ break;
+ }
+
+ switch (new_latency_range) {
+ case I40E_LOWEST_LATENCY:
+ new_itr = I40E_ITR_100K;
+ break;
+ case I40E_LOW_LATENCY:
+ new_itr = I40E_ITR_20K;
+ break;
+ case I40E_BULK_LATENCY:
+ new_itr = I40E_ITR_8K;
+ break;
+ default:
+ break;
+ }
+
+ if (new_itr != rc->itr) {
+ /* do an exponential smoothing */
+ new_itr = (10 * new_itr * rc->itr) /
+ ((9 * new_itr) + rc->itr);
+ rc->itr = new_itr & I40E_MAX_ITR;
+ }
+
+ rc->total_bytes = 0;
+ rc->total_packets = 0;
+}
+
+/**
+ * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
+ * @q_vector: the vector to adjust
+ **/
+static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
+{
+ u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
+ struct i40e_hw *hw = &q_vector->vsi->back->hw;
+ u32 reg_addr;
+ u16 old_itr;
+
+ reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1);
+ old_itr = q_vector->rx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->rx);
+ if (old_itr != q_vector->rx.itr)
+ wr32(hw, reg_addr, q_vector->rx.itr);
+
+ reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1);
+ old_itr = q_vector->tx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->tx);
+ if (old_itr != q_vector->tx.itr)
+ wr32(hw, reg_addr, q_vector->tx.itr);
+}
+
+/**
+ * i40evf_setup_tx_descriptors - Allocate the Tx descriptors
+ * @tx_ring: the tx ring to set up
+ *
+ * Return 0 on success, negative on error
+ **/
+int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int bi_size;
+
+ if (!dev)
+ return -ENOMEM;
+
+ bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
+ if (!tx_ring->tx_bi)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
+ tx_ring->size);
+ goto err;
+ }
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ return 0;
+
+err:
+ kfree(tx_ring->tx_bi);
+ tx_ring->tx_bi = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * i40evf_clean_rx_ring - Free Rx buffers
+ * @rx_ring: ring to be cleaned
+ **/
+void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ struct i40e_rx_buffer *rx_bi;
+ unsigned long bi_size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_bi)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_bi = &rx_ring->rx_bi[i];
+ if (rx_bi->dma) {
+ dma_unmap_single(dev,
+ rx_bi->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+ }
+ if (rx_bi->skb) {
+ dev_kfree_skb(rx_bi->skb);
+ rx_bi->skb = NULL;
+ }
+ if (rx_bi->page) {
+ if (rx_bi->page_dma) {
+ dma_unmap_page(dev,
+ rx_bi->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ rx_bi->page_dma = 0;
+ }
+ __free_page(rx_bi->page);
+ rx_bi->page = NULL;
+ rx_bi->page_offset = 0;
+ }
+ }
+
+ bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_bi, 0, bi_size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * i40evf_free_rx_resources - Free Rx resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
+{
+ i40evf_clean_rx_ring(rx_ring);
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+
+ if (rx_ring->desc) {
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+ rx_ring->desc = NULL;
+ }
+}
+
+/**
+ * i40evf_setup_rx_descriptors - Allocate Rx descriptors
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int bi_size;
+
+ bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
+ if (!rx_ring->rx_bi)
+ goto err;
+
+ /* Round up to nearest 4K */
+ rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
+ ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
+ : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc) {
+ dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+ rx_ring->size);
+ goto err;
+ }
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+err:
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * i40e_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ **/
+static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+{
+ rx_ring->next_to_use = val;
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+}
+
+/**
+ * i40evf_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+ u16 i = rx_ring->next_to_use;
+ union i40e_rx_desc *rx_desc;
+ struct i40e_rx_buffer *bi;
+ struct sk_buff *skb;
+
+ /* do nothing if no valid netdev defined */
+ if (!rx_ring->netdev || !cleaned_count)
+ return;
+
+ while (cleaned_count--) {
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_bi[i];
+ skb = bi->skb;
+
+ if (!skb) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ goto no_buffers;
+ }
+ /* initialize queue mapping */
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ bi->skb = skb;
+ }
+
+ if (!bi->dma) {
+ bi->dma = dma_map_single(rx_ring->dev,
+ skb->data,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ bi->dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ if (!bi->page) {
+ bi->page = alloc_page(GFP_ATOMIC);
+ if (!bi->page) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ goto no_buffers;
+ }
+ }
+
+ if (!bi->page_dma) {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= PAGE_SIZE / 2;
+ bi->page_dma = dma_map_page(rx_ring->dev,
+ bi->page,
+ bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ bi->page_dma)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ bi->page_dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ } else {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ rx_desc->read.hdr_addr = 0;
+ }
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i)
+ i40e_release_rx_desc(rx_ring, i);
+}
+
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring: rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+ struct sk_buff *skb, u16 vlan_tag)
+{
+ struct i40e_q_vector *q_vector = rx_ring->q_vector;
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ u64 flags = vsi->back->flags;
+
+ if (vlan_tag & VLAN_VID_MASK)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+ if (flags & I40E_FLAG_IN_NETPOLL)
+ netif_rx(skb);
+ else
+ napi_gro_receive(&q_vector->napi, skb);
+}
+
+/**
+ * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * @vsi: the VSI we care about
+ * @skb: skb currently being received and modified
+ * @rx_status: status value of last descriptor in packet
+ * @rx_error: error value of last descriptor in packet
+ * @rx_ptype: ptype value of last descriptor in packet
+ **/
+static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
+ struct sk_buff *skb,
+ u32 rx_status,
+ u32 rx_error,
+ u16 rx_ptype)
+{
+ bool ipv4_tunnel, ipv6_tunnel;
+ __wsum rx_udp_csum;
+ __sum16 csum;
+ struct iphdr *iph;
+
+ ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+ ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+
+ skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Rx csum enabled and ip headers found? */
+ if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
+ rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ return;
+
+ /* likely incorrect csum if alternate IP extention headers found */
+ if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ return;
+
+ /* IP or L4 or outmost IP checksum error */
+ if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
+ vsi->back->hw_csum_rx_error++;
+ return;
+ }
+
+ if (ipv4_tunnel &&
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+ /* If VXLAN traffic has an outer UDPv4 checksum we need to check
+ * it in the driver, hardware does not do it for us.
+ * Since L3L4P bit was set we assume a valid IHL value (>=5)
+ * so the total length of IPv4 header is IHL*4 bytes
+ */
+ skb->transport_header = skb->mac_header +
+ sizeof(struct ethhdr) +
+ (ip_hdr(skb)->ihl * 4);
+
+ /* Add 4 bytes for VLAN tagged packets */
+ skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD))
+ ? VLAN_HLEN : 0;
+
+ rx_udp_csum = udp_csum(skb);
+ iph = ip_hdr(skb);
+ csum = csum_tcpudp_magic(
+ iph->saddr, iph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, rx_udp_csum);
+
+ if (udp_hdr(skb)->check != csum) {
+ vsi->back->hw_csum_rx_error++;
+ return;
+ }
+ }
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/**
+ * i40e_rx_hash - returns the hash value from the Rx descriptor
+ * @ring: descriptor ring
+ * @rx_desc: specific descriptor
+ **/
+static inline u32 i40e_rx_hash(struct i40e_ring *ring,
+ union i40e_rx_desc *rx_desc)
+{
+ const __le64 rss_mask =
+ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+ if ((ring->netdev->features & NETIF_F_RXHASH) &&
+ (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
+ return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+ else
+ return 0;
+}
+
+/**
+ * i40e_clean_rx_irq - Reclaim resources after receive completes
+ * @rx_ring: rx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
+ u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ const int current_node = numa_node_id();
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ u16 i = rx_ring->next_to_clean;
+ union i40e_rx_desc *rx_desc;
+ u32 rx_error, rx_status;
+ u64 qword;
+ u16 rx_ptype;
+
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+
+ while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
+ union i40e_rx_desc *next_rxd;
+ struct i40e_rx_buffer *rx_bi;
+ struct sk_buff *skb;
+ u16 vlan_tag;
+ rx_bi = &rx_ring->rx_bi[i];
+ skb = rx_bi->skb;
+ prefetch(skb->data);
+
+ rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+ rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT;
+
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
+ rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
+ rx_bi->skb = NULL;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * STATUS_DD bit is set
+ */
+ rmb();
+
+ /* Get the header and possibly the whole packet
+ * If this is an skb from previous receive dma will be 0
+ */
+ if (rx_bi->dma) {
+ u16 len;
+
+ if (rx_hbo)
+ len = I40E_RX_HDR_SIZE;
+ else if (rx_sph)
+ len = rx_header_len;
+ else if (rx_packet_len)
+ len = rx_packet_len; /* 1buf/no split found */
+ else
+ len = rx_header_len; /* split always mode */
+
+ skb_put(skb, len);
+ dma_unmap_single(rx_ring->dev,
+ rx_bi->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+ }
+
+ /* Get the rest of the data if this was a header split */
+ if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
+
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_bi->page,
+ rx_bi->page_offset,
+ rx_packet_len);
+
+ skb->len += rx_packet_len;
+ skb->data_len += rx_packet_len;
+ skb->truesize += rx_packet_len;
+
+ if ((page_count(rx_bi->page) == 1) &&
+ (page_to_nid(rx_bi->page) == current_node))
+ get_page(rx_bi->page);
+ else
+ rx_bi->page = NULL;
+
+ dma_unmap_page(rx_ring->dev,
+ rx_bi->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ rx_bi->page_dma = 0;
+ }
+ I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+
+ if (unlikely(
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ struct i40e_rx_buffer *next_buffer;
+
+ next_buffer = &rx_ring->rx_bi[i];
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ rx_bi->skb = next_buffer->skb;
+ rx_bi->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ }
+ rx_ring->rx_stats.non_eop_descs++;
+ goto next_desc;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ dev_kfree_skb_any(skb);
+ goto next_desc;
+ }
+
+ skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
+ vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
+ : 0;
+ i40e_receive_skb(rx_ring, skb, vlan_tag);
+
+ rx_ring->netdev->last_rx = jiffies;
+ budget--;
+next_desc:
+ rx_desc->wb.qword1.status_error_len = 0;
+ if (!budget)
+ break;
+
+ cleaned_count++;
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+ i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ }
+
+ rx_ring->next_to_clean = i;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ rx_ring->q_vector->rx.total_packets += total_rx_packets;
+ rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+ if (cleaned_count)
+ i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
+
+ return budget > 0;
+}
+
+/**
+ * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean all queues associated with a q_vector.
+ *
+ * Returns the amount of work done
+ **/
+int i40evf_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct i40e_q_vector *q_vector =
+ container_of(napi, struct i40e_q_vector, napi);
+ struct i40e_vsi *vsi = q_vector->vsi;
+ struct i40e_ring *ring;
+ bool clean_complete = true;
+ int budget_per_ring;
+
+ if (test_bit(__I40E_DOWN, &vsi->state)) {
+ napi_complete(napi);
+ return 0;
+ }
+
+ /* Since the actual Tx work is minimal, we can give the Tx a larger
+ * budget and be more aggressive about cleaning up the Tx descriptors.
+ */
+ i40e_for_each_ring(ring, q_vector->tx)
+ clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+
+ /* We attempt to distribute budget to each Rx queue fairly, but don't
+ * allow the budget to go below 1 because that would exit polling early.
+ */
+ budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
+
+ i40e_for_each_ring(ring, q_vector->rx)
+ clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
+
+ /* If work not completed, return budget and polling will return */
+ if (!clean_complete)
+ return budget;
+
+ /* Work is done so exit the polling mode and re-enable the interrupt */
+ napi_complete(napi);
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
+ ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ i40e_update_dynamic_itr(q_vector);
+
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx);
+
+ return 0;
+}
+
+/**
+ * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ * @flags: the tx flags to be set
+ *
+ * Checks the skb and set up correspondingly several generic transmit flags
+ * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
+ *
+ * Returns error code indicate the frame should be dropped upon error and the
+ * otherwise returns 0 to indicate the flags has been set properly.
+ **/
+static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct i40e_ring *tx_ring,
+ u32 *flags)
+{
+ __be16 protocol = skb->protocol;
+ u32 tx_flags = 0;
+
+ /* if we have a HW VLAN tag being added, default to the HW one */
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+ /* else if it is a SW VLAN, check the next protocol and store the tag */
+ } else if (protocol == htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vhdr, _vhdr;
+ vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+ if (!vhdr)
+ return -EINVAL;
+
+ protocol = vhdr->h_vlan_encapsulated_proto;
+ tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= I40E_TX_FLAGS_SW_VLAN;
+ }
+
+ *flags = tx_flags;
+ return 0;
+}
+
+/**
+ * i40e_tso - set up the tso context descriptor
+ * @tx_ring: ptr to the ring to send
+ * @skb: ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ * @protocol: the send protocol
+ * @hdr_len: ptr to the size of the packet header
+ * @cd_tunneling: ptr to context descriptor bits
+ *
+ * Returns 0 if no TSO can happen, 1 if tso is going, or error
+ **/
+static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol, u8 *hdr_len,
+ u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+{
+ u32 cd_cmd, cd_tso_len, cd_mss;
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ u32 l4len;
+ int err;
+ struct ipv6hdr *ipv6h;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ if (protocol == htons(ETH_P_IP)) {
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ } else if (skb_is_gso_v6(skb)) {
+
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
+ : ipv6_hdr(skb);
+ tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+ ipv6h->payload_len = 0;
+ tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
+ *hdr_len = (skb->encapsulation
+ ? (skb_inner_transport_header(skb) - skb->data)
+ : skb_transport_offset(skb)) + l4len;
+
+ /* find the field values */
+ cd_cmd = I40E_TX_CTX_DESC_TSO;
+ cd_tso_len = skb->len - *hdr_len;
+ cd_mss = skb_shinfo(skb)->gso_size;
+ *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ ((u64)cd_tso_len <<
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ return 1;
+}
+
+/**
+ * i40e_tx_enable_csum - Enable Tx checksum offloads
+ * @skb: send buffer
+ * @tx_flags: Tx flags currently set
+ * @td_cmd: Tx descriptor command bits to set
+ * @td_offset: Tx descriptor header offsets to set
+ * @cd_tunneling: ptr to context desc bits
+ **/
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+ u32 *td_cmd, u32 *td_offset,
+ struct i40e_ring *tx_ring,
+ u32 *cd_tunneling)
+{
+ struct ipv6hdr *this_ipv6_hdr;
+ unsigned int this_tcp_hdrlen;
+ struct iphdr *this_ip_hdr;
+ u32 network_hdr_len;
+ u8 l4_hdr = 0;
+
+ if (skb->encapsulation) {
+ network_hdr_len = skb_inner_network_header_len(skb);
+ this_ip_hdr = inner_ip_hdr(skb);
+ this_ipv6_hdr = inner_ipv6_hdr(skb);
+ this_tcp_hdrlen = inner_tcp_hdrlen(skb);
+
+ if (tx_flags & I40E_TX_FLAGS_IPV4) {
+
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+ ip_hdr(skb)->check = 0;
+ } else {
+ *cd_tunneling |=
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ }
+ } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ ip_hdr(skb)->check = 0;
+ } else {
+ *cd_tunneling |=
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ }
+ }
+
+ /* Now set the ctx descriptor fields */
+ *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
+ I40E_TXD_CTX_UDP_TUNNELING |
+ ((skb_inner_network_offset(skb) -
+ skb_transport_offset(skb)) >> 1) <<
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+ } else {
+ network_hdr_len = skb_network_header_len(skb);
+ this_ip_hdr = ip_hdr(skb);
+ this_ipv6_hdr = ipv6_hdr(skb);
+ this_tcp_hdrlen = tcp_hdrlen(skb);
+ }
+
+ /* Enable IP checksum offloads */
+ if (tx_flags & I40E_TX_FLAGS_IPV4) {
+ l4_hdr = this_ip_hdr->protocol;
+ /* the stack computes the IP header already, the only time we
+ * need the hardware to recompute it is in the case of TSO.
+ */
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ this_ip_hdr->check = 0;
+ } else {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
+ }
+ /* Now set the td_offset for IP header length */
+ *td_offset = (network_hdr_len >> 2) <<
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ l4_hdr = this_ipv6_hdr->nexthdr;
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+ /* Now set the td_offset for IP header length */
+ *td_offset = (network_hdr_len >> 2) <<
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ }
+ /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
+ *td_offset |= (skb_network_offset(skb) >> 1) <<
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ /* Enable L4 checksum offloads */
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ /* enable checksum offloads */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (this_tcp_hdrlen >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case IPPROTO_SCTP:
+ /* enable SCTP checksum offload */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *td_offset |= (sizeof(struct sctphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ /* enable UDP checksum offload */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+ *td_offset |= (sizeof(struct udphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_create_tx_ctx Build the Tx context descriptor
+ * @tx_ring: ring to create the descriptor on
+ * @cd_type_cmd_tso_mss: Quad Word 1
+ * @cd_tunneling: Quad Word 0 - bits 0-31
+ * @cd_l2tag2: Quad Word 0 - bits 32-63
+ **/
+static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
+ const u64 cd_type_cmd_tso_mss,
+ const u32 cd_tunneling, const u32 cd_l2tag2)
+{
+ struct i40e_tx_context_desc *context_desc;
+ int i = tx_ring->next_to_use;
+
+ if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
+ return;
+
+ /* grab the next descriptor */
+ context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ /* cpu_to_le32 and assign to struct fields */
+ context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
+ context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
+ context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
+}
+
+/**
+ * i40e_tx_map - Build the Tx descriptor
+ * @tx_ring: ring to send buffer on
+ * @skb: send buffer
+ * @first: first buffer info buffer to use
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ * @td_cmd: the command field in the descriptor
+ * @td_offset: offset for checksum or crc
+ **/
+static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
+{
+ unsigned int data_len = skb->data_len;
+ unsigned int size = skb_headlen(skb);
+ struct skb_frag_struct *frag;
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_tx_desc *tx_desc;
+ u16 i = tx_ring->next_to_use;
+ u32 td_tag = 0;
+ dma_addr_t dma;
+ u16 gso_segs;
+
+ if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
+ td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+ td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
+ I40E_TX_FLAGS_VLAN_SHIFT;
+ }
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+ gso_segs = skb_shinfo(skb)->gso_segs;
+ else
+ gso_segs = 1;
+
+ /* multiply data chunks by size of headers */
+ first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
+ first->gso_segs = gso_segs;
+ first->skb = skb;
+ first->tx_flags = tx_flags;
+
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ tx_bi = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_bi, len, size);
+ dma_unmap_addr_set(tx_bi, dma, dma);
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+
+ while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset,
+ I40E_MAX_DATA_PER_TXD, td_tag);
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ dma += I40E_MAX_DATA_PER_TXD;
+ size -= I40E_MAX_DATA_PER_TXD;
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ }
+
+ if (likely(!data_len))
+ break;
+
+ tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+ size, td_tag);
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+
+ tx_bi = &tx_ring->tx_bi[i];
+ }
+
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+
+ netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ first->bytecount);
+
+ /* set the timestamp */
+ first->time_stamp = jiffies;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ /* notify HW of packet */
+ writel(i, tx_ring->tail);
+
+ return;
+
+dma_error:
+ dev_info(tx_ring->dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_bi map */
+ for (;;) {
+ tx_bi = &tx_ring->tx_bi[i];
+ i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
+ if (tx_bi == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
+ tx_ring->next_to_use = i;
+}
+
+/**
+ * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Memory barrier before checking head and tail */
+ smp_mb();
+
+ /* Check again in a case another CPU has just made room available. */
+ if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ return 0;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __i40e_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns number of data descriptors needed for this skb. Returns 0 to indicate
+ * there is not enough descriptors available in this ring since we need at least
+ * one descriptor.
+ **/
+static int i40e_xmit_descriptor_count(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
+{
+#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
+ unsigned int f;
+#endif
+ int count = 0;
+
+ /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+ count += skb_shinfo(skb)->nr_frags;
+#endif
+ count += TXD_USE_COUNT(skb_headlen(skb));
+ if (i40e_maybe_stop_tx(tx_ring, count + 3)) {
+ tx_ring->tx_stats.tx_busy++;
+ return 0;
+ }
+ return count;
+}
+
+/**
+ * i40e_xmit_frame_ring - Sends buffer on Tx ring
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
+{
+ u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
+ u32 cd_tunneling = 0, cd_l2tag2 = 0;
+ struct i40e_tx_buffer *first;
+ u32 td_offset = 0;
+ u32 tx_flags = 0;
+ __be16 protocol;
+ u32 td_cmd = 0;
+ u8 hdr_len = 0;
+ int tso;
+ if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+ return NETDEV_TX_BUSY;
+
+ /* prepare the xmit flags */
+ if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+ goto out_drop;
+
+ /* obtain protocol of skb */
+ protocol = skb->protocol;
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_bi[tx_ring->next_to_use];
+
+ /* setup IPv4/IPv6 offloads */
+ if (protocol == htons(ETH_P_IP))
+ tx_flags |= I40E_TX_FLAGS_IPV4;
+ else if (protocol == htons(ETH_P_IPV6))
+ tx_flags |= I40E_TX_FLAGS_IPV6;
+
+ tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+ &cd_type_cmd_tso_mss, &cd_tunneling);
+
+ if (tso < 0)
+ goto out_drop;
+ else if (tso)
+ tx_flags |= I40E_TX_FLAGS_TSO;
+
+ skb_tx_timestamp(skb);
+
+ /* always enable CRC insertion offload */
+ td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
+ /* Always offload the checksum, since it's in the data descriptor */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ tx_flags |= I40E_TX_FLAGS_CSUM;
+
+ i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+ tx_ring, &cd_tunneling);
+ }
+
+ i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
+ cd_tunneling, cd_l2tag2);
+
+ i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+ td_cmd, td_offset);
+
+ i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ return NETDEV_TX_OK;
+
+out_drop:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
+ * @skb: send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping];
+
+ /* hardware can't handle really short frames, hardware padding works
+ * beyond this point
+ */
+ if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
+ if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
+ return NETDEV_TX_OK;
+ skb->len = I40E_MIN_TX_LEN;
+ skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
+ }
+
+ return i40e_xmit_frame_ring(skb, tx_ring);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
new file mode 100644
index 000000000000..10bf49e18d7f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -0,0 +1,296 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_TXRX_H_
+#define _I40E_TXRX_H_
+
+/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
+
+#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
+#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
+#define I40E_MAX_IRATE 0x03F
+#define I40E_MIN_IRATE 0x001
+#define I40E_IRATE_USEC_RESOLUTION 4
+#define I40E_ITR_100K 0x0005
+#define I40E_ITR_20K 0x0019
+#define I40E_ITR_8K 0x003E
+#define I40E_ITR_4K 0x007A
+#define I40E_ITR_RX_DEF I40E_ITR_8K
+#define I40E_ITR_TX_DEF I40E_ITR_4K
+#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
+#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
+#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
+#define I40E_DEFAULT_IRQ_WORK 256
+#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
+#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
+#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+
+#define I40E_QUEUE_END_OF_LIST 0x7FF
+
+/* this enum matches hardware bits and is meant to be used by DYN_CTLN
+ * registers and QINT registers or more generally anywhere in the manual
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+enum i40e_dyn_idx_t {
+ I40E_IDX_ITR0 = 0,
+ I40E_IDX_ITR1 = 1,
+ I40E_IDX_ITR2 = 2,
+ I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
+};
+
+/* these are indexes into ITRN registers */
+#define I40E_RX_ITR I40E_IDX_ITR0
+#define I40E_TX_ITR I40E_IDX_ITR1
+#define I40E_PE_ITR I40E_IDX_ITR2
+
+/* Supported RSS offloads */
+#define I40E_DEFAULT_RSS_HENA ( \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+/* Supported Rx Buffer Sizes */
+#define I40E_RXBUFFER_512 512 /* Used for packet split */
+#define I40E_RXBUFFER_2048 2048
+#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
+#define I40E_RXBUFFER_4096 4096
+#define I40E_RXBUFFER_8192 8192
+#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
+
+/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
+ * this adds up to 512 bytes of extra data meaning the smallest allocation
+ * we could have is 1K.
+ * i.e. RXBUFFER_512 --> size-1024 slab
+ */
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40E_RX_NEXT_DESC(r, i, n) \
+ do { \
+ (i)++; \
+ if ((i) == (r)->count) \
+ i = 0; \
+ (n) = I40E_RX_DESC((r), (i)); \
+ } while (0)
+
+#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
+ do { \
+ I40E_RX_NEXT_DESC((r), (i), (n)); \
+ prefetch((n)); \
+ } while (0)
+
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+#define I40E_MIN_TX_LEN 17
+#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
+#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+
+#define I40E_TX_FLAGS_CSUM (u32)(1)
+#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
+#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2)
+#define I40E_TX_FLAGS_TSO (u32)(1 << 3)
+#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4)
+#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
+#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
+#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
+#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
+#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
+#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
+#define I40E_TX_FLAGS_VLAN_SHIFT 16
+
+struct i40e_tx_buffer {
+ struct i40e_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ unsigned short gso_segs;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
+};
+
+struct i40e_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ dma_addr_t page_dma;
+ unsigned int page_offset;
+};
+
+struct i40e_queue_stats {
+ u64 packets;
+ u64 bytes;
+};
+
+struct i40e_tx_queue_stats {
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 tx_done_old;
+};
+
+struct i40e_rx_queue_stats {
+ u64 non_eop_descs;
+ u64 alloc_page_failed;
+ u64 alloc_buff_failed;
+};
+
+enum i40e_ring_state_t {
+ __I40E_TX_FDIR_INIT_DONE,
+ __I40E_TX_XPS_INIT_DONE,
+ __I40E_TX_DETECT_HANG,
+ __I40E_HANG_CHECK_ARMED,
+ __I40E_RX_PS_ENABLED,
+ __I40E_RX_LRO_ENABLED,
+ __I40E_RX_16BYTE_DESC_ENABLED,
+};
+
+#define ring_is_ps_enabled(ring) \
+ test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define set_ring_ps_enabled(ring) \
+ set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define clear_ring_ps_enabled(ring) \
+ clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) \
+ test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+ set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+ clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_lro_enabled(ring) \
+ test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define set_ring_lro_enabled(ring) \
+ set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define clear_ring_lro_enabled(ring) \
+ clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define ring_is_16byte_desc_enabled(ring) \
+ test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define set_ring_16byte_desc_enabled(ring) \
+ set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define clear_ring_16byte_desc_enabled(ring) \
+ clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+
+/* struct that defines a descriptor ring, associated with a VSI */
+struct i40e_ring {
+ struct i40e_ring *next; /* pointer to next ring in q_vector */
+ void *desc; /* Descriptor ring memory */
+ struct device *dev; /* Used for DMA mapping */
+ struct net_device *netdev; /* netdev ring maps to */
+ union {
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_rx_buffer *rx_bi;
+ };
+ unsigned long state;
+ u16 queue_index; /* Queue number of ring */
+ u8 dcb_tc; /* Traffic class of ring */
+ u8 __iomem *tail;
+
+ u16 count; /* Number of descriptors */
+ u16 reg_idx; /* HW register index of the ring */
+ u16 rx_hdr_len;
+ u16 rx_buf_len;
+ u8 dtype;
+#define I40E_RX_DTYPE_NO_SPLIT 0
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 1
+#define I40E_RX_DTYPE_HEADER_SPLIT 2
+ u8 hsplit;
+#define I40E_RX_SPLIT_L2 0x1
+#define I40E_RX_SPLIT_IP 0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP 0x8
+
+ /* used in interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ u8 atr_sample_rate;
+ u8 atr_count;
+
+ bool ring_active; /* is ring online or not */
+
+ /* stats structs */
+ struct i40e_queue_stats stats;
+ struct u64_stats_sync syncp;
+ union {
+ struct i40e_tx_queue_stats tx_stats;
+ struct i40e_rx_queue_stats rx_stats;
+ };
+
+ unsigned int size; /* length of descriptor ring in bytes */
+ dma_addr_t dma; /* physical address of ring */
+
+ struct i40e_vsi *vsi; /* Backreference to associated VSI */
+ struct i40e_q_vector *q_vector; /* Backreference to associated vector */
+
+ struct rcu_head rcu; /* to avoid race on free */
+} ____cacheline_internodealigned_in_smp;
+
+enum i40e_latency_range {
+ I40E_LOWEST_LATENCY = 0,
+ I40E_LOW_LATENCY = 1,
+ I40E_BULK_LATENCY = 2,
+};
+
+struct i40e_ring_container {
+ /* array of pointers to rings */
+ struct i40e_ring *ring;
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 count;
+ enum i40e_latency_range latency_range;
+ u16 itr;
+};
+
+/* iterator for handling rings in ring container */
+#define i40e_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos != NULL; pos = pos->next)
+
+void i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
+netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
+void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
+int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring);
+int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring);
+void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
+void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
+int i40evf_napi_poll(struct napi_struct *napi, int budget);
+#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
new file mode 100644
index 000000000000..3bffac06592f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -0,0 +1,1152 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_SFP_X710 0x1573
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_A 0x157F
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_KX_D 0x1582
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
+
+#define I40E_MAX_VSI_QP 16
+#define I40E_MAX_VF_VSI 3
+#define I40E_MAX_CHAINED_RX_BUFFERS 5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
+
+/* Max default timeout in ms, */
+#define I40E_MAX_NVM_TIMEOUT 18000
+
+/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+#define ETH_ALEN 6
+
+/* Data type manipulation macros. */
+
+#define I40E_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE 0x0
+#define I40E_QTX_CTL_VM_QUEUE 0x1
+#define I40E_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum i40e_debug_mask {
+ I40E_DEBUG_INIT = 0x00000001,
+ I40E_DEBUG_RELEASE = 0x00000002,
+
+ I40E_DEBUG_LINK = 0x00000010,
+ I40E_DEBUG_PHY = 0x00000020,
+ I40E_DEBUG_HMC = 0x00000040,
+ I40E_DEBUG_NVM = 0x00000080,
+ I40E_DEBUG_LAN = 0x00000100,
+ I40E_DEBUG_FLOW = 0x00000200,
+ I40E_DEBUG_DCB = 0x00000400,
+ I40E_DEBUG_DIAG = 0x00000800,
+
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000,
+ I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ I40E_DEBUG_AQ_COMMAND = 0x06000000,
+ I40E_DEBUG_AQ = 0x0F000000,
+
+ I40E_DEBUG_USER = 0xF0000000,
+
+ I40E_DEBUG_ALL = 0xFFFFFFFF
+};
+
+/* PCI Bus Info */
+#define I40E_PCI_LINK_WIDTH_1 0x10
+#define I40E_PCI_LINK_WIDTH_2 0x20
+#define I40E_PCI_LINK_WIDTH_4 0x40
+#define I40E_PCI_LINK_WIDTH_8 0x80
+#define I40E_PCI_LINK_SPEED_2500 0x1
+#define I40E_PCI_LINK_SPEED_5000 0x2
+#define I40E_PCI_LINK_SPEED_8000 0x3
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+ I40E_MAC_UNKNOWN = 0,
+ I40E_MAC_X710,
+ I40E_MAC_XL710,
+ I40E_MAC_VF,
+ I40E_MAC_GENERIC,
+};
+
+enum i40e_media_type {
+ I40E_MEDIA_TYPE_UNKNOWN = 0,
+ I40E_MEDIA_TYPE_FIBER,
+ I40E_MEDIA_TYPE_BASET,
+ I40E_MEDIA_TYPE_BACKPLANE,
+ I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_DA,
+ I40E_MEDIA_TYPE_VIRTUAL
+};
+
+enum i40e_fc_mode {
+ I40E_FC_NONE = 0,
+ I40E_FC_RX_PAUSE,
+ I40E_FC_TX_PAUSE,
+ I40E_FC_FULL,
+ I40E_FC_PFC,
+ I40E_FC_DEFAULT
+};
+
+enum i40e_vsi_type {
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1,
+ I40E_VSI_VMDQ2,
+ I40E_VSI_CTRL,
+ I40E_VSI_FCOE,
+ I40E_VSI_MIRROR,
+ I40E_VSI_SRIOV,
+ I40E_VSI_FDIR,
+ I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+ I40E_QUEUE_TYPE_RX = 0,
+ I40E_QUEUE_TYPE_TX,
+ I40E_QUEUE_TYPE_PE_CEQ,
+ I40E_QUEUE_TYPE_UNKNOWN
+};
+
+struct i40e_link_status {
+ enum i40e_aq_phy_type phy_type;
+ enum i40e_aq_link_speed link_speed;
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 loopback;
+ /* is Link Status Event notification to SW enabled */
+ bool lse_enable;
+};
+
+struct i40e_phy_info {
+ struct i40e_link_status link_info;
+ struct i40e_link_status link_info_old;
+ u32 autoneg_advertised;
+ u32 phy_id;
+ u32 module_type;
+ bool get_link_info;
+ enum i40e_media_type media_type;
+};
+
+#define I40E_HW_CAP_MAX_GPIO 30
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+ u32 switch_mode;
+#define I40E_NVM_IMAGE_TYPE_EVB 0x0
+#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
+#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+
+ u32 management_mode;
+ u32 npar_enable;
+ u32 os2bmc;
+ u32 valid_functions;
+ bool sr_iov_1_1;
+ bool vmdq;
+ bool evb_802_1_qbg; /* Edge Virtual Bridging */
+ bool evb_802_1_qbh; /* Bridge Port Extension */
+ bool dcb;
+ bool fcoe;
+ bool mfp_mode_1;
+ bool mgmt_cem;
+ bool ieee_1588;
+ bool iwarp;
+ bool fd;
+ u32 fd_filters_guaranteed;
+ u32 fd_filters_best_effort;
+ bool rss;
+ u32 rss_table_size;
+ u32 rss_table_entry_width;
+ bool led[I40E_HW_CAP_MAX_GPIO];
+ bool sdp[I40E_HW_CAP_MAX_GPIO];
+ u32 nvm_image_type;
+ u32 num_flow_director_filters;
+ u32 num_vfs;
+ u32 vf_base_id;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors;
+ u32 num_msix_vectors_vf;
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+ u32 mdio_port_num;
+ u32 mdio_port_mode;
+ u8 rx_buf_chain_len;
+ u32 enabled_tcmap;
+ u32 maxtc;
+};
+
+struct i40e_mac_info {
+ enum i40e_mac_type type;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
+ u16 max_fcoeq;
+};
+
+enum i40e_aq_resources_ids {
+ I40E_NVM_RESOURCE_ID = 1
+};
+
+enum i40e_aq_resource_access_type {
+ I40E_RESOURCE_READ = 1,
+ I40E_RESOURCE_WRITE
+};
+
+struct i40e_nvm_info {
+ u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
+ u64 hw_semaphore_wait; /* - || - */
+ u32 timeout; /* [ms] */
+ u16 sr_size; /* Shadow RAM size in words */
+ bool blank_nvm_mode; /* is NVM empty (no FW present)*/
+ u16 version; /* NVM package version */
+ u32 eetrack; /* NVM data version */
+};
+
+/* PCI bus types */
+enum i40e_bus_type {
+ i40e_bus_type_unknown = 0,
+ i40e_bus_type_pci,
+ i40e_bus_type_pcix,
+ i40e_bus_type_pci_express,
+ i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+ i40e_bus_speed_unknown = 0,
+ i40e_bus_speed_33 = 33,
+ i40e_bus_speed_66 = 66,
+ i40e_bus_speed_100 = 100,
+ i40e_bus_speed_120 = 120,
+ i40e_bus_speed_133 = 133,
+ i40e_bus_speed_2500 = 2500,
+ i40e_bus_speed_5000 = 5000,
+ i40e_bus_speed_8000 = 8000,
+ i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+ i40e_bus_width_unknown = 0,
+ i40e_bus_width_pcie_x1 = 1,
+ i40e_bus_width_pcie_x2 = 2,
+ i40e_bus_width_pcie_x4 = 4,
+ i40e_bus_width_pcie_x8 = 8,
+ i40e_bus_width_32 = 32,
+ i40e_bus_width_64 = 64,
+ i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+ enum i40e_bus_speed speed;
+ enum i40e_bus_width width;
+ enum i40e_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+};
+
+/* Flow control (FC) parameters */
+struct i40e_fc_info {
+ enum i40e_fc_mode current_mode; /* FC mode in effect */
+ enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define I40E_MAX_TRAFFIC_CLASS 8
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DCBX_MAX_APPS 32
+#define I40E_LLDPDU_SIZE 1500
+
+/* IEEE 802.1Qaz ETS Configuration data */
+struct i40e_ieee_ets_config {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz ETS Recommendation data */
+struct i40e_ieee_ets_recommend {
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz PFC Configuration data */
+struct i40e_ieee_pfc_config {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcenable;
+};
+
+/* IEEE 802.1Qaz Application Priority data */
+struct i40e_ieee_app_priority_table {
+ u8 priority;
+ u8 selector;
+ u16 protocolid;
+};
+
+struct i40e_dcbx_config {
+ u32 numapps;
+ struct i40e_ieee_ets_config etscfg;
+ struct i40e_ieee_ets_recommend etsrec;
+ struct i40e_ieee_pfc_config pfc;
+ struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct i40e_hw {
+ u8 __iomem *hw_addr;
+ void *back;
+
+ /* function pointer structs */
+ struct i40e_phy_info phy;
+ struct i40e_mac_info mac;
+ struct i40e_bus_info bus;
+ struct i40e_nvm_info nvm;
+ struct i40e_fc_info fc;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 port;
+ bool adapter_stopped;
+
+ /* capabilities for entire device and PCI func */
+ struct i40e_hw_capabilities dev_caps;
+ struct i40e_hw_capabilities func_caps;
+
+ /* Flow Director shared filter space */
+ u16 fdir_shared_filter_count;
+
+ /* device profile info */
+ u8 pf_id;
+ u16 main_vsi_seid;
+
+ /* Closest numa node to the device */
+ u16 numa_node;
+
+ /* Admin Queue info */
+ struct i40e_adminq_info aq;
+
+ /* HMC info */
+ struct i40e_hmc_info hmc; /* HMC info struct */
+
+ /* LLDP/DCBX Status */
+ u16 dcbx_status;
+
+ /* DCBX info */
+ struct i40e_dcbx_config local_dcbx_config;
+ struct i40e_dcbx_config remote_dcbx_config;
+
+ /* debug mask */
+ u32 debug_mask;
+};
+
+struct i40e_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union i40e_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
+
+enum i40e_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_STATUS_DD_SHIFT = 0,
+ I40E_RX_DESC_STATUS_EOF_SHIFT = 1,
+ I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
+ I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
+ I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18
+};
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
+ I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+ I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
+ I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ I40E_RX_DESC_FLTSTAT_RSV = 2,
+ I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define I40E_RXD_QW1_ERROR_SHIFT 19
+#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_ERROR_RXE_SHIFT = 0,
+ I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ I40E_RX_DESC_ERROR_HBO_SHIFT = 2,
+ I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
+ I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
+ I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+ I40E_RX_DESC_ERROR_L3L4E_NONE = 0,
+ I40E_RX_DESC_ERROR_L3L4E_PROT = 1,
+ I40E_RX_DESC_ERROR_L3L4E_FC = 2,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT 30
+#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
+};
+
+struct i40e_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+ I40E_RX_PTYPE_OUTER_L2 = 0,
+ I40E_RX_PTYPE_OUTER_IP = 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+ I40E_RX_PTYPE_OUTER_NONE = 0,
+ I40E_RX_PTYPE_OUTER_IPV4 = 0,
+ I40E_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+ I40E_RX_PTYPE_NOT_FRAG = 0,
+ I40E_RX_PTYPE_FRAG = 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+ I40E_RX_PTYPE_TUNNEL_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+ I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+ I40E_RX_PTYPE_INNER_PROT_NONE = 0,
+ I40E_RX_PTYPE_INNER_PROT_UDP = 1,
+ I40E_RX_PTYPE_INNER_PROT_TCP = 2,
+ I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
+ I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
+ I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+ I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
+ I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+enum i40e_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+ I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define I40E_TXD_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
+
+enum i40e_tx_desc_dtype_value {
+ I40E_TX_DESC_DTYPE_DATA = 0x0,
+ I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ I40E_TX_DESC_DTYPE_CONTEXT = 0x1,
+ I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ I40E_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ I40E_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT 4
+#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
+
+enum i40e_tx_desc_cmd_bits {
+ I40E_TX_DESC_CMD_EOP = 0x0001,
+ I40E_TX_DESC_CMD_RS = 0x0002,
+ I40E_TX_DESC_CMD_ICRC = 0x0004,
+ I40E_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ I40E_TX_DESC_CMD_DUMMY = 0x0010,
+ I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ I40E_TX_DESC_CMD_FCOET = 0x0080,
+ I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT 16
+#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ I40E_TXD_QW1_OFFSET_SHIFT)
+
+enum i40e_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT 48
+#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
+#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
+
+enum i40e_tx_ctx_desc_cmd_bits {
+ I40E_TX_CTX_DESC_TSO = 0x01,
+ I40E_TX_CTX_DESC_TSYN = 0x02,
+ I40E_TX_CTX_DESC_IL2TAG2 = 0x04,
+ I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ I40E_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ I40E_TX_CTX_DESC_SWPE = 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
+#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ I40E_TXD_CTX_QW1_MSS_SHIFT)
+
+#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
+#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum i40e_tx_ctx_desc_eipt_offload {
+ I40E_TX_CTX_EXT_IP_NONE = 0x0,
+ I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ I40E_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
+#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
+ I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+
+struct i40e_filter_program_desc {
+ __le32 qindex_flex_ptype_vsi;
+ __le32 rsvd;
+ __le32 dtype_cmd_cntindex;
+ __le32 fd_id;
+};
+#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0
+#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
+#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17
+#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+ /* Note: Values 0-28 are reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-38 are reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OX = 48,
+ I40E_FILTER_PCTYPE_FCOE_RX = 49,
+ I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+enum i40e_filter_program_desc_dest {
+ I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
+};
+
+enum i40e_filter_program_desc_fd_status {
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
+};
+
+#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
+#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum i40e_filter_program_desc_pcmd {
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
+};
+
+#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
+ I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum i40e_filter_type {
+ I40E_FLOW_DIRECTOR_FLTR = 0,
+ I40E_PE_QUAD_HASH_FLTR = 1,
+ I40E_ETHERTYPE_FLTR,
+ I40E_FCOE_CTX_FLTR,
+ I40E_MAC_VLAN_FLTR,
+ I40E_HASH_FLTR
+};
+
+struct i40e_vsi_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 vsi_number;
+ u16 vsis_allocated;
+ u16 vsis_unallocated;
+ u16 flags;
+ u8 pf_num;
+ u8 vf_num;
+ u8 connection_type;
+ struct i40e_aqc_vsi_properties_data info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_errors; /* repc */
+ u64 rx_missed; /* rmpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+
+/* Statistics collected by the MAC */
+struct i40e_hw_port_stats {
+ /* eth stats collected by the port */
+ struct i40e_eth_stats eth;
+
+ /* additional port specific stats */
+ u64 tx_dropped_link_down; /* tdold */
+ u64 crc_errors; /* crcerrs */
+ u64 illegal_bytes; /* illerrc */
+ u64 error_bytes; /* errbc */
+ u64 mac_local_faults; /* mlfc */
+ u64 mac_remote_faults; /* mrfc */
+ u64 rx_length_errors; /* rlec */
+ u64 link_xon_rx; /* lxonrxc */
+ u64 link_xoff_rx; /* lxoffrxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 link_xon_tx; /* lxontxc */
+ u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
+ u64 rx_size_64; /* prc64 */
+ u64 rx_size_127; /* prc127 */
+ u64 rx_size_255; /* prc255 */
+ u64 rx_size_511; /* prc511 */
+ u64 rx_size_1023; /* prc1023 */
+ u64 rx_size_1522; /* prc1522 */
+ u64 rx_size_big; /* prc9522 */
+ u64 rx_undersize; /* ruc */
+ u64 rx_fragments; /* rfc */
+ u64 rx_oversize; /* roc */
+ u64 rx_jabber; /* rjc */
+ u64 tx_size_64; /* ptc64 */
+ u64 tx_size_127; /* ptc127 */
+ u64 tx_size_255; /* ptc255 */
+ u64 tx_size_511; /* ptc511 */
+ u64 tx_size_1023; /* ptc1023 */
+ u64 tx_size_1522; /* ptc1522 */
+ u64 tx_size_big; /* ptc9522 */
+ u64 mac_short_packet_dropped; /* mspdc */
+ u64 checksum_error; /* xec */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define I40E_SR_NVM_CONTROL_WORD 0x00
+#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_SR_NVM_WAKE_ON_LAN 0x19
+#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
+#define I40E_SR_NVM_EETRACK_LO 0x2D
+#define I40E_SR_NVM_EETRACK_HI 0x2E
+#define I40E_SR_VPD_PTR 0x2F
+#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define I40E_SR_SW_CHECKSUM_WORD 0x3F
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
+#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
+#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
+#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+
+/* Shadow RAM related */
+#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define I40E_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
+
+#define I40E_SRRD_SRCTL_ATTEMPTS 100000
+
+enum i40e_switch_element_types {
+ I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
+ I40E_SWITCH_ELEMENT_TYPE_PF = 2,
+ I40E_SWITCH_ELEMENT_TYPE_VF = 3,
+ I40E_SWITCH_ELEMENT_TYPE_EMP = 4,
+ I40E_SWITCH_ELEMENT_TYPE_BMC = 6,
+ I40E_SWITCH_ELEMENT_TYPE_PE = 16,
+ I40E_SWITCH_ELEMENT_TYPE_VEB = 17,
+ I40E_SWITCH_ELEMENT_TYPE_PA = 18,
+ I40E_SWITCH_ELEMENT_TYPE_VSI = 19,
+};
+
+/* Supported EtherType filters */
+enum i40e_ether_type_index {
+ I40E_ETHER_TYPE_1588 = 0,
+ I40E_ETHER_TYPE_FIP = 1,
+ I40E_ETHER_TYPE_OUI_EXTENDED = 2,
+ I40E_ETHER_TYPE_MAC_CONTROL = 3,
+ I40E_ETHER_TYPE_LLDP = 4,
+ I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5,
+ I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6,
+ I40E_ETHER_TYPE_QCN_CNM = 7,
+ I40E_ETHER_TYPE_8021X = 8,
+ I40E_ETHER_TYPE_ARP = 9,
+ I40E_ETHER_TYPE_RSV1 = 10,
+ I40E_ETHER_TYPE_RSV2 = 11,
+};
+
+/* Filter context base size is 1K */
+#define I40E_HASH_FILTER_BASE_SIZE 1024
+/* Supported Hash filter values */
+enum i40e_hash_filter_size {
+ I40E_HASH_FILTER_SIZE_1K = 0,
+ I40E_HASH_FILTER_SIZE_2K = 1,
+ I40E_HASH_FILTER_SIZE_4K = 2,
+ I40E_HASH_FILTER_SIZE_8K = 3,
+ I40E_HASH_FILTER_SIZE_16K = 4,
+ I40E_HASH_FILTER_SIZE_32K = 5,
+ I40E_HASH_FILTER_SIZE_64K = 6,
+ I40E_HASH_FILTER_SIZE_128K = 7,
+ I40E_HASH_FILTER_SIZE_256K = 8,
+ I40E_HASH_FILTER_SIZE_512K = 9,
+ I40E_HASH_FILTER_SIZE_1M = 10,
+};
+
+/* DMA context base size is 0.5K */
+#define I40E_DMA_CNTX_BASE_SIZE 512
+/* Supported DMA context values */
+enum i40e_dma_cntx_size {
+ I40E_DMA_CNTX_SIZE_512 = 0,
+ I40E_DMA_CNTX_SIZE_1K = 1,
+ I40E_DMA_CNTX_SIZE_2K = 2,
+ I40E_DMA_CNTX_SIZE_4K = 3,
+ I40E_DMA_CNTX_SIZE_8K = 4,
+ I40E_DMA_CNTX_SIZE_16K = 5,
+ I40E_DMA_CNTX_SIZE_32K = 6,
+ I40E_DMA_CNTX_SIZE_64K = 7,
+ I40E_DMA_CNTX_SIZE_128K = 8,
+ I40E_DMA_CNTX_SIZE_256K = 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum i40e_hash_lut_size {
+ I40E_HASH_LUT_SIZE_128 = 0,
+ I40E_HASH_LUT_SIZE_512 = 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct i40e_filter_control_settings {
+ /* number of PE Quad Hash filter buckets */
+ enum i40e_hash_filter_size pe_filt_num;
+ /* number of PE Quad Hash contexts */
+ enum i40e_dma_cntx_size pe_cntx_num;
+ /* number of FCoE filter buckets */
+ enum i40e_hash_filter_size fcoe_filt_num;
+ /* number of FCoE DDP contexts */
+ enum i40e_dma_cntx_size fcoe_cntx_num;
+ /* size of the Hash LUT */
+ enum i40e_hash_lut_size hash_lut_size;
+ /* enable FDIR filters for PF and its VFs */
+ bool enable_fdir;
+ /* enable Ethertype filters for PF and its VFs */
+ bool enable_ethtype;
+ /* enable MAC/VLAN filters for PF and its VFs */
+ bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct i40e_control_filter_stats {
+ u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
+ u16 etype_used; /* Used perfect EtherType filters */
+ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
+ u16 etype_free; /* Un-used perfect EtherType filters */
+};
+
+enum i40e_reset_type {
+ I40E_RESET_POR = 0,
+ I40E_RESET_CORER = 1,
+ I40E_RESET_GLOBR = 2,
+ I40E_RESET_EMPR = 3,
+};
+#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
new file mode 100644
index 000000000000..ccf45d04b7ef
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -0,0 +1,364 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_H_
+#define _I40E_VIRTCHNL_H_
+
+#include "i40e_type.h"
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the various i40e drivers.
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * Firmware copies the cookie fields when sending messages between the PF and
+ * VF, but uses all other fields internally. Due to this limitation, we
+ * must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the vsi indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value is of
+ * i40e_status_code type, defined in the i40e_type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of these
+ * opcodes. The VF driver must first validate the API version of the PF driver,
+ * then request a reset, then get resources, then configure queues and
+ * interrupts. After these operations are complete, the VF driver may start
+ * its queues, optionally add MAC and VLAN filters, and process traffic.
+ */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum i40e_virtchnl_ops {
+/* VF sends req. to pf for the following
+ * ops.
+ */
+ I40E_VIRTCHNL_OP_UNKNOWN = 0,
+ I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ I40E_VIRTCHNL_OP_RESET_VF,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_VIRTCHNL_OP_FCOE,
+/* PF sends status change events to vfs using
+ * the following op.
+ */
+ I40E_VIRTCHNL_OP_EVENT,
+};
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct i40e_virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ i40e_status v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+/* Message descriptions and data structures.*/
+
+/* I40E_VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define I40E_VIRTCHNL_VERSION_MAJOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR 0
+struct i40e_virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+/* I40E_VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
+ * VF sends this request to PF with no parameters
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_vf_resource and one or more
+ * i40e_virtchnl_vsi_resource structures.
+ */
+
+struct i40e_virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum i40e_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[ETH_ALEN];
+};
+/* VF offload flags */
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+
+struct i40e_virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_offload_flags;
+ u32 max_fcoe_contexts;
+ u32 max_fcoe_filters;
+
+ struct i40e_virtchnl_vsi_resource vsi_res[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of i40e_virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct i40e_virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled;
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of i40e_virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct i40e_virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled;
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u64 dma_ring_addr;
+ enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct i40e_virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct i40e_virtchnl_txq_info txq;
+ struct i40e_virtchnl_rxq_info rxq;
+};
+
+struct i40e_virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ struct i40e_virtchnl_queue_pair_info qpair[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct i40e_virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+struct i40e_virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct i40e_virtchnl_vector_map vecmap[1];
+};
+
+/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
+ * I40E_VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct i40e_virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct i40e_virtchnl_ether_addr {
+ u8 addr[ETH_ALEN];
+ u8 pad[2];
+};
+
+struct i40e_virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct i40e_virtchnl_ether_addr list[1];
+};
+
+/* I40E_VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct i40e_virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct i40e_virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
+#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* I40E_VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct i40e_eth_stats in an external buffer.
+ */
+
+/* I40E_VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum i40e_virtchnl_event_codes {
+ I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
+ I40E_VIRTCHNL_EVENT_LINK_CHANGE,
+ I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
+ I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+#define I40E_PF_EVENT_SEVERITY_INFO 0
+#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct i40e_virtchnl_pf_event {
+ enum i40e_virtchnl_event_codes event;
+ union {
+ struct {
+ enum i40e_aq_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+/* The following are TBD, not necessary for LAN functionality.
+ * I40E_VIRTCHNL_OP_FCOE
+ */
+
+/* VF reset states - these are written into the RSTAT register:
+ * I40E_VFGEN_RSTAT1 on the PF
+ * I40E_VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum i40e_vfr_states {
+ I40E_VFR_INPROGRESS = 0,
+ I40E_VFR_COMPLETED,
+ I40E_VFR_VFACTIVE,
+ I40E_VFR_UNKNOWN,
+};
+
+#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
new file mode 100644
index 000000000000..ff6529b288a1
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -0,0 +1,321 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40EVF_H_
+#define _I40EVF_H_
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <net/udp.h>
+#include <linux/sctp.h>
+
+
+#include "i40e_type.h"
+#include "i40e_virtchnl.h"
+#include "i40e_txrx.h"
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+#define PFX "i40evf: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+ ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+ __func__ , ## args)))
+
+/* dummy struct to make common code less painful */
+struct i40e_vsi {
+ struct i40evf_adapter *back;
+ struct net_device *netdev;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u16 seid;
+ u16 id;
+ unsigned long state;
+ int base_vector;
+ u16 work_limit;
+ /* high bit set means dynamic, use accessor routines to read/write.
+ * hardware only supports 2us resolution for the ITR registers.
+ * these values always store the USER setting, and must be converted
+ * before programming to a register.
+ */
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+};
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40EVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40EVF_DEFAULT_TXD 512
+#define I40EVF_DEFAULT_RXD 512
+#define I40EVF_MAX_TXD 4096
+#define I40EVF_MIN_TXD 64
+#define I40EVF_MAX_RXD 4096
+#define I40EVF_MIN_RXD 64
+#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 8
+
+/* Supported Rx Buffer Sizes */
+#define I40EVF_RXBUFFER_64 64 /* Used for packet split */
+#define I40EVF_RXBUFFER_128 128 /* Used for packet split */
+#define I40EVF_RXBUFFER_256 256 /* Used for packet split */
+#define I40EVF_RXBUFFER_2048 2048
+#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+#define I40EVF_MAX_AQ_BUF_SIZE 4096
+#define I40EVF_AQ_LEN 32
+#define I40EVF_AQ_MAX_ERR 10 /* times to try before resetting AQ */
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define I40E_RX_DESC(R, i) (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
+#define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i]))
+#define I40E_TX_CTXTDESC(R, i) \
+ (&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
+#define MAX_RX_QUEUES 8
+#define MAX_TX_QUEUES MAX_RX_QUEUES
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct i40e_q_vector {
+ struct i40evf_adapter *adapter;
+ struct i40e_vsi *vsi;
+ struct napi_struct napi;
+ unsigned long reg_idx;
+ struct i40e_ring_container rx;
+ struct i40e_ring_container tx;
+ u32 ring_mask;
+ u8 num_ringpairs; /* total number of ring pairs in vector */
+ int v_idx; /* vector index in list */
+ char name[IFNAMSIZ + 9];
+ cpumask_var_t affinity_mask;
+};
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways. The lowest value
+ * supported by all of the i40e hardware is 8.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
+ ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define I40EVF_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define I40EVF_RX_DESC_ADV(R, i) \
+ (&(((union i40e_adv_rx_desc *)((R).desc))[i]))
+#define I40EVF_TX_DESC_ADV(R, i) \
+ (&(((union i40e_adv_tx_desc *)((R).desc))[i]))
+#define I40EVF_TX_CTXTDESC_ADV(R, i) \
+ (&(((struct i40e_adv_tx_context_desc *)((R).desc))[i]))
+
+#define OTHER_VECTOR 1
+#define NONQ_VECS (OTHER_VECTOR)
+
+#define MAX_MSIX_Q_VECTORS 4
+#define MAX_MSIX_COUNT 5
+
+#define MIN_MSIX_Q_VECTORS 1
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS)
+
+#define I40EVF_QUEUE_END_OF_LIST 0x7FF
+#define I40EVF_FREE_VECTOR 0x7FFF
+struct i40evf_mac_filter {
+ struct list_head list;
+ u8 macaddr[ETH_ALEN];
+ bool remove; /* filter needs to be removed */
+ bool add; /* filter needs to be added */
+};
+
+struct i40evf_vlan_filter {
+ struct list_head list;
+ u16 vlan;
+ bool remove; /* filter needs to be removed */
+ bool add; /* filter needs to be added */
+};
+
+/* Driver state. The order of these is important! */
+enum i40evf_state_t {
+ __I40EVF_STARTUP, /* driver loaded, probe complete */
+ __I40EVF_FAILED, /* PF communication failed. Fatal. */
+ __I40EVF_REMOVE, /* driver is being unloaded */
+ __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
+ __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
+ __I40EVF_INIT_SW, /* got resources, setting up structs */
+ /* Below here, watchdog is running */
+ __I40EVF_DOWN, /* ready, can be opened */
+ __I40EVF_TESTING, /* in ethtool self-test */
+ __I40EVF_RESETTING, /* in reset */
+ __I40EVF_RUNNING, /* opened, working */
+};
+
+enum i40evf_critical_section_t {
+ __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
+};
+/* make common code happy */
+#define __I40E_DOWN __I40EVF_DOWN
+
+/* board specific private data structure */
+struct i40evf_adapter {
+ struct timer_list watchdog_timer;
+ struct vlan_group *vlgrp;
+ struct work_struct reset_task;
+ struct work_struct adminq_task;
+ struct delayed_work init_task;
+ struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+ struct list_head vlan_filter_list;
+ char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
+
+ /* Interrupt Throttle Rate */
+ u32 itr_setting;
+ u16 eitr_low;
+ u16 eitr_high;
+
+ /* TX */
+ struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
+ u64 restart_queue;
+ u64 hw_csum_tx_good;
+ u64 lsc_int;
+ u64 hw_tso_ctxt;
+ u64 hw_tso6_ctxt;
+ u32 tx_timeout_count;
+ struct list_head mac_filter_list;
+#ifdef DEBUG
+ bool detect_tx_hung;
+#endif /* DEBUG */
+
+ /* RX */
+ struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
+ int txd_count;
+ int rxd_count;
+ u64 hw_csum_rx_error;
+ u64 hw_rx_no_dma_resources;
+ u64 hw_csum_rx_good;
+ u64 non_eop_descs;
+ int num_msix_vectors;
+ struct msix_entry *msix_entries;
+
+ u64 rx_hdr_split;
+
+ u32 init_state;
+ volatile unsigned long flags;
+#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1)
+#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
+#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
+#define I40EVF_FLAG_RX_PS_ENABLED (u32)(1 << 3)
+#define I40EVF_FLAG_IN_NETPOLL (u32)(1 << 4)
+#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5)
+#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6)
+#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
+/* duplcates for common code */
+#define I40E_FLAG_FDIR_ATR_ENABLED 0
+#define I40E_FLAG_DCB_ENABLED 0
+#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL
+#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
+ /* flags for admin queue service task */
+ u32 aq_required;
+ u32 aq_pending;
+#define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1)
+#define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
+#define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
+#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3)
+#define I40EVF_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4)
+#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5)
+#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
+#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
+#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device_stats net_stats;
+
+ /* structs defined in i40e_vf.h */
+ struct i40e_hw hw;
+
+ enum i40evf_state_t state;
+ volatile unsigned long crit_section;
+ u64 tx_busy;
+
+ struct work_struct watchdog_task;
+ bool netdev_registered;
+ bool dev_closed;
+ bool link_up;
+ enum i40e_virtchnl_ops current_op;
+ struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
+ struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+ u16 msg_enable;
+ struct i40e_eth_stats current_stats;
+ struct i40e_vsi vsi;
+ u32 aq_wait_count;
+};
+
+struct i40evf_info {
+ enum i40e_mac_type mac;
+ unsigned int flags;
+};
+
+
+/* needed by i40evf_ethtool.c */
+extern char i40evf_driver_name[];
+extern const char i40evf_driver_version[];
+
+int i40evf_up(struct i40evf_adapter *adapter);
+void i40evf_down(struct i40evf_adapter *adapter);
+void i40evf_reinit_locked(struct i40evf_adapter *adapter);
+void i40evf_reset(struct i40evf_adapter *adapter);
+void i40evf_set_ethtool_ops(struct net_device *netdev);
+void i40evf_update_stats(struct i40evf_adapter *adapter);
+void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter);
+int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter);
+void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask);
+
+void i40e_napi_add_all(struct i40evf_adapter *adapter);
+void i40e_napi_del_all(struct i40evf_adapter *adapter);
+
+int i40evf_send_api_ver(struct i40evf_adapter *adapter);
+int i40evf_verify_api_ver(struct i40evf_adapter *adapter);
+int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter);
+int i40evf_get_vf_config(struct i40evf_adapter *adapter);
+void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush);
+void i40evf_configure_queues(struct i40evf_adapter *adapter);
+void i40evf_deconfigure_queues(struct i40evf_adapter *adapter);
+void i40evf_enable_queues(struct i40evf_adapter *adapter);
+void i40evf_disable_queues(struct i40evf_adapter *adapter);
+void i40evf_map_queues(struct i40evf_adapter *adapter);
+void i40evf_add_ether_addrs(struct i40evf_adapter *adapter);
+void i40evf_del_ether_addrs(struct i40evf_adapter *adapter);
+void i40evf_add_vlans(struct i40evf_adapter *adapter);
+void i40evf_del_vlans(struct i40evf_adapter *adapter);
+void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
+void i40evf_request_stats(struct i40evf_adapter *adapter);
+void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval, u8 *msg, u16 msglen);
+#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
new file mode 100644
index 000000000000..b0b1f4bf5ac0
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -0,0 +1,390 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* ethtool support for i40evf */
+#include "i40evf.h"
+
+#include <linux/uaccess.h>
+
+
+struct i40evf_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_offset;
+};
+
+#define I40EVF_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .stat_offset = offsetof(struct i40evf_adapter, _stat) \
+}
+
+/* All stats are u64, so we don't need to track the size of the field. */
+static const struct i40evf_stats i40evf_gstrings_stats[] = {
+ I40EVF_STAT("rx_bytes", current_stats.rx_bytes),
+ I40EVF_STAT("rx_unicast", current_stats.rx_unicast),
+ I40EVF_STAT("rx_multicast", current_stats.rx_multicast),
+ I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast),
+ I40EVF_STAT("rx_discards", current_stats.rx_discards),
+ I40EVF_STAT("rx_errors", current_stats.rx_errors),
+ I40EVF_STAT("rx_missed", current_stats.rx_missed),
+ I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
+ I40EVF_STAT("tx_bytes", current_stats.tx_bytes),
+ I40EVF_STAT("tx_unicast", current_stats.tx_unicast),
+ I40EVF_STAT("tx_multicast", current_stats.tx_multicast),
+ I40EVF_STAT("tx_broadcast", current_stats.tx_broadcast),
+ I40EVF_STAT("tx_discards", current_stats.tx_discards),
+ I40EVF_STAT("tx_errors", current_stats.tx_errors),
+};
+
+#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
+#define I40EVF_QUEUE_STATS_LEN \
+ (((struct i40evf_adapter *) \
+ netdev_priv(netdev))->vsi_res->num_queue_pairs * 4)
+#define I40EVF_STATS_LEN (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN)
+
+/**
+ * i40evf_get_settings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ecmd: ethtool command
+ *
+ * Reports speed/duplex settings. Because this is a VF, we don't know what
+ * kind of link we really have, so we fake it.
+ **/
+static int i40evf_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ /* In the future the VF will be able to query the PF for
+ * some information - for now use a dummy value
+ */
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_DUMMY1;
+ ecmd->port = PORT_NONE;
+
+ return 0;
+}
+
+/**
+ * i40evf_get_sset_count - Get length of string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ *
+ * Reports size of string table. This driver only supports
+ * strings for statistics.
+ **/
+static int i40evf_get_sset_count(struct net_device *netdev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return I40EVF_STATS_LEN;
+ else
+ return -ENOTSUPP;
+}
+
+/**
+ * i40evf_get_ethtool_stats - report device statistics
+ * @netdev: network interface device structure
+ * @stats: ethtool statistics structure
+ * @data: pointer to data buffer
+ *
+ * All statistics are added to the data buffer as an array of u64.
+ **/
+static void i40evf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int i, j;
+ char *p;
+
+ for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) {
+ p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset;
+ data[i] = *(u64 *)p;
+ }
+ for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) {
+ data[i++] = adapter->tx_rings[j]->stats.packets;
+ data[i++] = adapter->tx_rings[j]->stats.bytes;
+ }
+ for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) {
+ data[i++] = adapter->rx_rings[j]->stats.packets;
+ data[i++] = adapter->rx_rings[j]->stats.bytes;
+ }
+}
+
+/**
+ * i40evf_get_strings - Get string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ * @data: buffer for string data
+ *
+ * Builds stats string table.
+ **/
+static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ u8 *p = data;
+ int i;
+
+ if (sset == ETH_SS_STATS) {
+ for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, i40evf_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+/**
+ * i40evf_get_msglevel - Get debug message level
+ * @netdev: network interface device structure
+ *
+ * Returns current debug message level.
+ **/
+static u32 i40evf_get_msglevel(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+/**
+ * i40evf_get_msglevel - Set debug message level
+ * @netdev: network interface device structure
+ * @data: message level
+ *
+ * Set current debug message level. Higher values cause the driver to
+ * be noisier.
+ **/
+static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+/**
+ * i40evf_get_drvinto - Get driver info
+ * @netdev: network interface device structure
+ * @drvinfo: ethool driver info structure
+ *
+ * Returns information about the driver and device for display to the user.
+ **/
+static void i40evf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, i40evf_driver_name, 32);
+ strlcpy(drvinfo->version, i40evf_driver_version, 32);
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+}
+
+/**
+ * i40evf_get_ringparam - Get ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ *
+ * Returns current ring parameters. TX and RX rings are reported separately,
+ * but the number of rings is not reported.
+ **/
+static void i40evf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_ring *tx_ring = adapter->tx_rings[0];
+ struct i40e_ring *rx_ring = adapter->rx_rings[0];
+
+ ring->rx_max_pending = I40EVF_MAX_RXD;
+ ring->tx_max_pending = I40EVF_MAX_TXD;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
+}
+
+/**
+ * i40evf_set_ringparam - Set ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ *
+ * Sets ring parameters. TX and RX rings are controlled separately, but the
+ * number of rings is not specified, so all rings get the same settings.
+ **/
+static int i40evf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ u32 new_rx_count, new_tx_count;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_tx_count = clamp_t(u32, ring->tx_pending,
+ I40EVF_MIN_TXD,
+ I40EVF_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
+
+ new_rx_count = clamp_t(u32, ring->rx_pending,
+ I40EVF_MIN_RXD,
+ I40EVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
+
+ /* if nothing to do return success */
+ if ((new_tx_count == adapter->txd_count) &&
+ (new_rx_count == adapter->rxd_count))
+ return 0;
+
+ adapter->txd_count = new_tx_count;
+ adapter->rxd_count = new_rx_count;
+
+ if (netif_running(netdev))
+ i40evf_reinit_locked(adapter);
+ return 0;
+}
+
+/**
+ * i40evf_get_coalesce - Get interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Returns current coalescing settings. This is referred to elsewhere in the
+ * driver as Interrupt Throttle Rate, as this is how the hardware describes
+ * this functionality.
+ **/
+static int i40evf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_vsi *vsi = &adapter->vsi;
+
+ ec->tx_max_coalesced_frames = vsi->work_limit;
+ ec->rx_max_coalesced_frames = vsi->work_limit;
+
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
+ ec->rx_coalesce_usecs = 1;
+ else
+ ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+
+ if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ ec->tx_coalesce_usecs = 1;
+ else
+ ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+
+ return 0;
+}
+
+/**
+ * i40evf_set_coalesce - Set interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Change current coalescing settings.
+ **/
+static int i40evf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_vsi *vsi = &adapter->vsi;
+ struct i40e_q_vector *q_vector;
+ int i;
+
+ if (ec->tx_max_coalesced_frames || ec->rx_max_coalesced_frames)
+ vsi->work_limit = ec->tx_max_coalesced_frames;
+
+ switch (ec->rx_coalesce_usecs) {
+ case 0:
+ vsi->rx_itr_setting = 0;
+ break;
+ case 1:
+ vsi->rx_itr_setting = (I40E_ITR_DYNAMIC
+ | ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ break;
+ default:
+ if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+ return -EINVAL;
+ vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+ break;
+ }
+
+ switch (ec->tx_coalesce_usecs) {
+ case 0:
+ vsi->tx_itr_setting = 0;
+ break;
+ case 1:
+ vsi->tx_itr_setting = (I40E_ITR_DYNAMIC
+ | ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+ break;
+ default:
+ if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+ return -EINVAL;
+ vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+ break;
+ }
+
+ for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
+ q_vector = adapter->q_vector[i];
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
+ i40e_flush(hw);
+ }
+
+ return 0;
+}
+
+static struct ethtool_ops i40evf_ethtool_ops = {
+ .get_settings = i40evf_get_settings,
+ .get_drvinfo = i40evf_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = i40evf_get_ringparam,
+ .set_ringparam = i40evf_set_ringparam,
+ .get_strings = i40evf_get_strings,
+ .get_ethtool_stats = i40evf_get_ethtool_stats,
+ .get_sset_count = i40evf_get_sset_count,
+ .get_msglevel = i40evf_get_msglevel,
+ .set_msglevel = i40evf_set_msglevel,
+ .get_coalesce = i40evf_get_coalesce,
+ .set_coalesce = i40evf_set_coalesce,
+};
+
+/**
+ * i40evf_set_ethtool_ops - Initialize ethtool ops struct
+ * @netdev: network interface device structure
+ *
+ * Sets ethtool ops struct in our netdev so that ethtool can call
+ * our functions.
+ **/
+void i40evf_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &i40evf_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
new file mode 100644
index 000000000000..f5caf4419243
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -0,0 +1,2353 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
+static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
+static int i40evf_close(struct net_device *netdev);
+
+char i40evf_driver_name[] = "i40evf";
+static const char i40evf_driver_string[] =
+ "Intel(R) XL710 X710 Virtual Function Network Driver";
+
+#define DRV_VERSION "0.9.11"
+const char i40evf_driver_version[] = DRV_VERSION;
+static const char i40evf_copyright[] =
+ "Copyright (c) 2013 Intel Corporation.";
+
+/* i40evf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static DEFINE_PCI_DEVICE_TABLE(i40evf_pci_tbl) = {
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
+ /* required last entry */
+ {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl);
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/**
+ * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ u64 size, u32 alignment)
+{
+ struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
+
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ mem->size = ALIGN(size, alignment);
+ mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
+ (dma_addr_t *)&mem->pa, GFP_KERNEL);
+ if (mem->va)
+ return 0;
+ else
+ return I40E_ERR_NO_MEMORY;
+}
+
+/**
+ * i40evf_free_dma_mem_d - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+{
+ struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
+
+ if (!mem || !mem->va)
+ return I40E_ERR_PARAM;
+ dma_free_coherent(&adapter->pdev->dev, mem->size,
+ mem->va, (dma_addr_t)mem->pa);
+ return 0;
+}
+
+/**
+ * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ **/
+i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem, u32 size)
+{
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ mem->size = size;
+ mem->va = kzalloc(size, GFP_KERNEL);
+
+ if (mem->va)
+ return 0;
+ else
+ return I40E_ERR_NO_MEMORY;
+}
+
+/**
+ * i40evf_free_virt_mem_d - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem)
+{
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ /* it's ok to kfree a NULL pointer */
+ kfree(mem->va);
+
+ return 0;
+}
+
+/**
+ * i40evf_debug_d - OS dependent version of debug printing
+ * @hw: pointer to the HW structure
+ * @mask: debug level mask
+ * @fmt_str: printf-type format description
+ **/
+void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
+{
+ char buf[512];
+ va_list argptr;
+
+ if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
+ return;
+
+ va_start(argptr, fmt_str);
+ vsnprintf(buf, sizeof(buf), fmt_str, argptr);
+ va_end(argptr);
+
+ /* the debug string is already formatted with a newline */
+ pr_info("%s", buf);
+}
+
+/**
+ * i40evf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void i40evf_tx_timeout(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ adapter->tx_timeout_count++;
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+}
+
+/**
+ * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ wr32(hw, I40E_VFINT_DYN_CTL01, 0);
+
+ /* read flush */
+ rd32(hw, I40E_VFGEN_RSTAT);
+
+ synchronize_irq(adapter->msix_entries[0].vector);
+}
+
+/**
+ * i40evf_misc_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
+ I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
+ wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
+
+ /* read flush */
+ rd32(hw, I40E_VFGEN_RSTAT);
+}
+
+/**
+ * i40evf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void i40evf_irq_disable(struct i40evf_adapter *adapter)
+{
+ int i;
+ struct i40e_hw *hw = &adapter->hw;
+
+ for (i = 1; i < adapter->num_msix_vectors; i++) {
+ wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
+ synchronize_irq(adapter->msix_entries[i].vector);
+ }
+ /* read flush */
+ rd32(hw, I40E_VFGEN_RSTAT);
+
+}
+
+/**
+ * i40evf_irq_enable_queues - Enable interrupt for specified queues
+ * @adapter: board private structure
+ * @mask: bitmap of queues to enable
+ **/
+void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ int i;
+
+ for (i = 1; i < adapter->num_msix_vectors; i++) {
+ if (mask & (1 << (i - 1))) {
+ wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
+ I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ }
+ }
+}
+
+/**
+ * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
+ * @adapter: board private structure
+ * @mask: bitmap of vectors to trigger
+ **/
+static void i40evf_fire_sw_int(struct i40evf_adapter *adapter,
+ u32 mask)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ int i;
+ uint32_t dyn_ctl;
+
+ for (i = 1; i < adapter->num_msix_vectors; i++) {
+ if (mask & (1 << i)) {
+ dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
+ dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
+ wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
+ }
+ }
+}
+
+/**
+ * i40evf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
+{
+ struct i40e_hw *hw = &adapter->hw;
+
+ i40evf_irq_enable_queues(adapter, ~0);
+
+ if (flush)
+ rd32(hw, I40E_VFGEN_RSTAT);
+}
+
+/**
+ * i40evf_msix_aq - Interrupt handler for vector 0
+ * @irq: interrupt number
+ * @data: pointer to netdev
+ **/
+static irqreturn_t i40evf_msix_aq(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ u32 val;
+ u32 ena_mask;
+
+ /* handle non-queue interrupts */
+ val = rd32(hw, I40E_VFINT_ICR01);
+ ena_mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
+
+
+ val = rd32(hw, I40E_VFINT_DYN_CTL01);
+ val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+ wr32(hw, I40E_VFINT_DYN_CTL01, val);
+
+ /* re-enable interrupt causes */
+ wr32(hw, I40E_VFINT_ICR0_ENA1, ena_mask);
+ wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
+
+ /* schedule work on the private workqueue */
+ schedule_work(&adapter->adminq_task);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
+{
+ struct i40e_q_vector *q_vector = data;
+
+ if (!q_vector->tx.ring && !q_vector->rx.ring)
+ return IRQ_HANDLED;
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40evf_map_vector_to_rxq - associate irqs with rx queues
+ * @adapter: board private structure
+ * @v_idx: interrupt number
+ * @r_idx: queue number
+ **/
+static void
+i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
+{
+ struct i40e_q_vector *q_vector = adapter->q_vector[v_idx];
+ struct i40e_ring *rx_ring = adapter->rx_rings[r_idx];
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ rx_ring->vsi = &adapter->vsi;
+ q_vector->rx.ring = rx_ring;
+ q_vector->rx.count++;
+ q_vector->rx.latency_range = I40E_LOW_LATENCY;
+}
+
+/**
+ * i40evf_map_vector_to_txq - associate irqs with tx queues
+ * @adapter: board private structure
+ * @v_idx: interrupt number
+ * @t_idx: queue number
+ **/
+static void
+i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
+{
+ struct i40e_q_vector *q_vector = adapter->q_vector[v_idx];
+ struct i40e_ring *tx_ring = adapter->tx_rings[t_idx];
+
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = q_vector->tx.ring;
+ tx_ring->vsi = &adapter->vsi;
+ q_vector->tx.ring = tx_ring;
+ q_vector->tx.count++;
+ q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ q_vector->num_ringpairs++;
+ q_vector->ring_mask |= (1 << t_idx);
+}
+
+/**
+ * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code. Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible. You would add new
+ * mapping configurations in here.
+ **/
+static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
+{
+ int q_vectors;
+ int v_start = 0;
+ int rxr_idx = 0, txr_idx = 0;
+ int rxr_remaining = adapter->vsi_res->num_queue_pairs;
+ int txr_remaining = adapter->vsi_res->num_queue_pairs;
+ int i, j;
+ int rqpv, tqpv;
+ int err = 0;
+
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ /* The ideal configuration...
+ * We have enough vectors to map one per queue.
+ */
+ if (q_vectors == (rxr_remaining * 2)) {
+ for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+ i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
+
+ for (; txr_idx < txr_remaining; v_start++, txr_idx++)
+ i40evf_map_vector_to_txq(adapter, v_start, txr_idx);
+ goto out;
+ }
+
+ /* If we don't have enough vectors for a 1-to-1
+ * mapping, we'll have to group them so there are
+ * multiple queues per vector.
+ * Re-adjusting *qpv takes care of the remainder.
+ */
+ for (i = v_start; i < q_vectors; i++) {
+ rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
+ for (j = 0; j < rqpv; j++) {
+ i40evf_map_vector_to_rxq(adapter, i, rxr_idx);
+ rxr_idx++;
+ rxr_remaining--;
+ }
+ }
+ for (i = v_start; i < q_vectors; i++) {
+ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
+ for (j = 0; j < tqpv; j++) {
+ i40evf_map_vector_to_txq(adapter, i, txr_idx);
+ txr_idx++;
+ txr_remaining--;
+ }
+ }
+
+out:
+ adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
+
+ return err;
+}
+
+/**
+ * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Allocates MSI-X vectors for tx and rx handling, and requests
+ * interrupts from the kernel.
+ **/
+static int
+i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
+{
+ int vector, err, q_vectors;
+ int rx_int_idx = 0, tx_int_idx = 0;
+
+ i40evf_irq_disable(adapter);
+ /* Decrement for Other and TCP Timer vectors */
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (vector = 0; vector < q_vectors; vector++) {
+ struct i40e_q_vector *q_vector = adapter->q_vector[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "i40evf-%s-%s-%d", basename,
+ "TxRx", rx_int_idx++);
+ tx_int_idx++;
+ } else if (q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "i40evf-%s-%s-%d", basename,
+ "rx", rx_int_idx++);
+ } else if (q_vector->tx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "i40evf-%s-%s-%d", basename,
+ "tx", tx_int_idx++);
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
+ err = request_irq(
+ adapter->msix_entries[vector + NONQ_VECS].vector,
+ i40evf_msix_clean_rings,
+ 0,
+ q_vector->name,
+ q_vector);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "%s: request_irq failed, error: %d\n",
+ __func__, err);
+ goto free_queue_irqs;
+ }
+ /* assign the mask for this irq */
+ irq_set_affinity_hint(
+ adapter->msix_entries[vector + NONQ_VECS].vector,
+ q_vector->affinity_mask);
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ irq_set_affinity_hint(
+ adapter->msix_entries[vector + NONQ_VECS].vector,
+ NULL);
+ free_irq(adapter->msix_entries[vector + NONQ_VECS].vector,
+ adapter->q_vector[vector]);
+ }
+ return err;
+}
+
+/**
+ * i40evf_request_misc_irq - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
+ * vector is only for the admin queue, and stays active even when the netdev
+ * is closed.
+ **/
+static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ sprintf(adapter->name[0], "i40evf:mbx");
+ err = request_irq(adapter->msix_entries[0].vector,
+ &i40evf_msix_aq, 0, adapter->name[0], netdev);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "request_irq for msix_aq failed: %d\n", err);
+ free_irq(adapter->msix_entries[0].vector, netdev);
+ }
+ return err;
+}
+
+/**
+ * i40evf_free_traffic_irqs - Free MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Frees all MSI-X vectors other than 0.
+ **/
+static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
+{
+ int i;
+ int q_vectors;
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (i = 0; i < q_vectors; i++) {
+ irq_set_affinity_hint(adapter->msix_entries[i+1].vector,
+ NULL);
+ free_irq(adapter->msix_entries[i+1].vector,
+ adapter->q_vector[i]);
+ }
+}
+
+/**
+ * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
+ * @adapter: board private structure
+ *
+ * Frees MSI-X vector 0.
+ **/
+static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->msix_entries[0].vector, netdev);
+}
+
+/**
+ * i40evf_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void i40evf_configure_tx(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i);
+}
+
+/**
+ * i40evf_configure_rx - Configure Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void i40evf_configure_rx(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int i;
+ int rx_buf_len;
+
+
+ adapter->flags &= ~I40EVF_FLAG_RX_PS_CAPABLE;
+ adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE;
+
+ /* Decide whether to use packet split mode or not */
+ if (netdev->mtu > ETH_DATA_LEN) {
+ if (adapter->flags & I40EVF_FLAG_RX_PS_CAPABLE)
+ adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
+ } else {
+ if (adapter->flags & I40EVF_FLAG_RX_1BUF_CAPABLE)
+ adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
+ }
+
+ /* Set the RX buffer length according to the mode */
+ if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
+ rx_buf_len = I40E_RX_HDR_SIZE;
+ } else {
+ if (netdev->mtu <= ETH_DATA_LEN)
+ rx_buf_len = I40EVF_RXBUFFER_2048;
+ else
+ rx_buf_len = ALIGN(max_frame, 1024);
+ }
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i);
+ adapter->rx_rings[i]->rx_buf_len = rx_buf_len;
+ }
+}
+
+/**
+ * i40evf_find_vlan - Search filter list for specific vlan filter
+ * @adapter: board private structure
+ * @vlan: vlan tag
+ *
+ * Returns ptr to the filter object or NULL
+ **/
+static struct
+i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+ struct i40evf_vlan_filter *f;
+
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (vlan == f->vlan)
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * i40evf_add_vlan - Add a vlan filter to the list
+ * @adapter: board private structure
+ * @vlan: VLAN tag
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ **/
+static struct
+i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+ struct i40evf_vlan_filter *f;
+
+ f = i40evf_find_vlan(adapter, vlan);
+ if (NULL == f) {
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (NULL == f) {
+ dev_info(&adapter->pdev->dev,
+ "%s: no memory for new VLAN filter\n",
+ __func__);
+ return NULL;
+ }
+ f->vlan = vlan;
+
+ INIT_LIST_HEAD(&f->list);
+ list_add(&f->list, &adapter->vlan_filter_list);
+ f->add = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ }
+
+ return f;
+}
+
+/**
+ * i40evf_del_vlan - Remove a vlan filter from the list
+ * @adapter: board private structure
+ * @vlan: VLAN tag
+ **/
+static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+ struct i40evf_vlan_filter *f;
+
+ f = i40evf_find_vlan(adapter, vlan);
+ if (f) {
+ f->remove = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ }
+ return;
+}
+
+/**
+ * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
+ * @netdev: network device struct
+ * @vid: VLAN tag
+ **/
+static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ if (i40evf_add_vlan(adapter, vid) == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
+ * @netdev: network device struct
+ * @vid: VLAN tag
+ **/
+static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ i40evf_del_vlan(adapter, vid);
+ return 0;
+}
+
+/**
+ * i40evf_find_filter - Search filter list for specific mac filter
+ * @adapter: board private structure
+ * @macaddr: the MAC address
+ *
+ * Returns ptr to the filter object or NULL
+ **/
+static struct
+i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
+ u8 *macaddr)
+{
+ struct i40evf_mac_filter *f;
+
+ if (!macaddr)
+ return NULL;
+
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (ether_addr_equal(macaddr, f->macaddr))
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * i40e_add_filter - Add a mac filter to the filter list
+ * @adapter: board private structure
+ * @macaddr: the MAC address
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ **/
+static struct
+i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
+ u8 *macaddr)
+{
+ struct i40evf_mac_filter *f;
+
+ if (!macaddr)
+ return NULL;
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ mdelay(1);
+
+ f = i40evf_find_filter(adapter, macaddr);
+ if (NULL == f) {
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (NULL == f) {
+ dev_info(&adapter->pdev->dev,
+ "%s: no memory for new filter\n", __func__);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section);
+ return NULL;
+ }
+
+ memcpy(f->macaddr, macaddr, ETH_ALEN);
+
+ list_add(&f->list, &adapter->mac_filter_list);
+ f->add = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ }
+
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ return f;
+}
+
+/**
+ * i40evf_set_mac - NDO callback to set port mac address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40evf_set_mac(struct net_device *netdev, void *p)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40evf_mac_filter *f;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
+ return 0;
+
+ f = i40evf_add_filter(adapter, addr->sa_data);
+ if (f) {
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ }
+
+ return (f == NULL) ? -ENOMEM : 0;
+}
+
+/**
+ * i40evf_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+ **/
+static void i40evf_set_rx_mode(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40evf_mac_filter *f, *ftmp;
+ struct netdev_hw_addr *uca;
+ struct netdev_hw_addr *mca;
+
+ /* add addr if not already in the filter list */
+ netdev_for_each_uc_addr(uca, netdev) {
+ i40evf_add_filter(adapter, uca->addr);
+ }
+ netdev_for_each_mc_addr(mca, netdev) {
+ i40evf_add_filter(adapter, mca->addr);
+ }
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ mdelay(1);
+ /* remove filter if not in netdev list */
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ bool found = false;
+
+ if (f->macaddr[0] & 0x01) {
+ netdev_for_each_mc_addr(mca, netdev) {
+ if (ether_addr_equal(mca->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+ } else {
+ netdev_for_each_uc_addr(uca, netdev) {
+ if (ether_addr_equal(uca->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (found) {
+ f->remove = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ }
+ }
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+}
+
+/**
+ * i40evf_napi_enable_all - enable NAPI on all queue vectors
+ * @adapter: board private structure
+ **/
+static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
+{
+ int q_idx;
+ struct i40e_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ struct napi_struct *napi;
+ q_vector = adapter->q_vector[q_idx];
+ napi = &q_vector->napi;
+ napi_enable(napi);
+ }
+}
+
+/**
+ * i40evf_napi_disable_all - disable NAPI on all queue vectors
+ * @adapter: board private structure
+ **/
+static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
+{
+ int q_idx;
+ struct i40e_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ q_vector = adapter->q_vector[q_idx];
+ napi_disable(&q_vector->napi);
+ }
+}
+
+/**
+ * i40evf_configure - set up transmit and receive data structures
+ * @adapter: board private structure
+ **/
+static void i40evf_configure(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ i40evf_set_rx_mode(netdev);
+
+ i40evf_configure_tx(adapter);
+ i40evf_configure_rx(adapter);
+ adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ struct i40e_ring *ring = adapter->rx_rings[i];
+ i40evf_alloc_rx_buffers(ring, ring->count);
+ ring->next_to_use = ring->count - 1;
+ writel(ring->next_to_use, ring->tail);
+ }
+}
+
+/**
+ * i40evf_up_complete - Finish the last steps of bringing up a connection
+ * @adapter: board private structure
+ **/
+static int i40evf_up_complete(struct i40evf_adapter *adapter)
+{
+ adapter->state = __I40EVF_RUNNING;
+ clear_bit(__I40E_DOWN, &adapter->vsi.state);
+
+ i40evf_napi_enable_all(adapter);
+
+ adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+ mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ return 0;
+}
+
+/**
+ * i40evf_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ i40evf_clean_rx_ring(adapter->rx_rings[i]);
+}
+
+/**
+ * i40evf_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ i40evf_clean_tx_ring(adapter->tx_rings[i]);
+}
+
+/**
+ * i40e_down - Shutdown the connection processing
+ * @adapter: board private structure
+ **/
+void i40evf_down(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct i40evf_mac_filter *f;
+
+ /* remove all MAC filters from the VSI */
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ f->remove = true;
+ }
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ /* disable receives */
+ adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+ mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ msleep(20);
+
+ netif_tx_disable(netdev);
+
+ netif_tx_stop_all_queues(netdev);
+
+ i40evf_irq_disable(adapter);
+
+ i40evf_napi_disable_all(adapter);
+
+ netif_carrier_off(netdev);
+
+ i40evf_clean_all_tx_rings(adapter);
+ i40evf_clean_all_rx_rings(adapter);
+}
+
+/**
+ * i40evf_acquire_msix_vectors - Setup the MSIX capability
+ * @adapter: board private structure
+ * @vectors: number of vectors to request
+ *
+ * Work with the OS to set up the MSIX vectors needed.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int
+i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
+{
+ int err, vector_threshold;
+
+ /* We'll want at least 3 (vector_threshold):
+ * 0) Other (Admin Queue and link, mostly)
+ * 1) TxQ[0] Cleanup
+ * 2) RxQ[0] Cleanup
+ */
+ vector_threshold = MIN_MSIX_COUNT;
+
+ /* The more we get, the more we will assign to Tx/Rx Cleanup
+ * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+ * Right now, we simply care about how many we'll get; we'll
+ * set them up later while requesting irq's.
+ */
+ while (vectors >= vector_threshold) {
+ err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+ vectors);
+ if (!err) /* Success in acquiring all requested vectors. */
+ break;
+ else if (err < 0)
+ vectors = 0; /* Nasty failure, quit now */
+ else /* err == number of vectors we should try again with */
+ vectors = err;
+ }
+
+ if (vectors < vector_threshold) {
+ dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts.\n");
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ err = -EIO;
+ } else {
+ /* Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NONQ_VECS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = vectors;
+ }
+ return err;
+}
+
+/**
+ * i40evf_free_queues - Free memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * Free all of the memory associated with queue pairs.
+ **/
+static void i40evf_free_queues(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ if (!adapter->vsi_res)
+ return;
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ if (adapter->tx_rings[i])
+ kfree_rcu(adapter->tx_rings[i], rcu);
+ adapter->tx_rings[i] = NULL;
+ adapter->rx_rings[i] = NULL;
+ }
+}
+
+/**
+ * i40evf_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time. The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring;
+ struct i40e_ring *rx_ring;
+
+ tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
+ if (!tx_ring)
+ goto err_out;
+
+ tx_ring->queue_index = i;
+ tx_ring->netdev = adapter->netdev;
+ tx_ring->dev = &adapter->pdev->dev;
+ tx_ring->count = I40EVF_DEFAULT_TXD;
+ adapter->tx_rings[i] = tx_ring;
+
+ rx_ring = &tx_ring[1];
+ rx_ring->queue_index = i;
+ rx_ring->netdev = adapter->netdev;
+ rx_ring->dev = &adapter->pdev->dev;
+ rx_ring->count = I40EVF_DEFAULT_RXD;
+ adapter->rx_rings[i] = rx_ring;
+ }
+
+ return 0;
+
+err_out:
+ i40evf_free_queues(adapter);
+ return -ENOMEM;
+}
+
+/**
+ * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
+{
+ int vector, v_budget;
+ int pairs = 0;
+ int err = 0;
+
+ if (!adapter->vsi_res) {
+ err = -EIO;
+ goto out;
+ }
+ pairs = adapter->vsi_res->num_queue_pairs;
+
+ /* It's easy to be greedy for MSI-X vectors, but it really
+ * doesn't do us much good if we have a lot more vectors
+ * than CPU's. So let's be conservative and only ask for
+ * (roughly) twice the number of vectors as there are CPU's.
+ */
+ v_budget = min(pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
+ v_budget = min(v_budget, (int)adapter->vf_res->max_vectors + 1);
+
+ /* A failure in MSI-X entry allocation isn't fatal, but it does
+ * mean we disable MSI-X capabilities of the adapter.
+ */
+ adapter->msix_entries = kcalloc(v_budget,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!adapter->msix_entries) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (vector = 0; vector < v_budget; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+ i40evf_acquire_msix_vectors(adapter, v_budget);
+
+out:
+ adapter->netdev->real_num_tx_queues = pairs;
+ return err;
+}
+
+/**
+ * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ struct i40e_q_vector *q_vector;
+
+ num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ goto err_out;
+ q_vector->adapter = adapter;
+ q_vector->vsi = &adapter->vsi;
+ q_vector->v_idx = q_idx;
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ i40evf_napi_poll, 64);
+ adapter->q_vector[q_idx] = q_vector;
+ }
+
+ return 0;
+
+err_out:
+ while (q_idx) {
+ q_idx--;
+ q_vector = adapter->q_vector[q_idx];
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ adapter->q_vector[q_idx] = NULL;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ int napi_vectors;
+
+ num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+ napi_vectors = adapter->vsi_res->num_queue_pairs;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ struct i40e_q_vector *q_vector = adapter->q_vector[q_idx];
+
+ adapter->q_vector[q_idx] = NULL;
+ if (q_idx < napi_vectors)
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ }
+}
+
+/**
+ * i40evf_reset_interrupt_capability - Reset MSIX setup
+ * @adapter: board private structure
+ *
+ **/
+void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
+{
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ return;
+}
+
+/**
+ * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
+ * @adapter: board private structure to initialize
+ *
+ **/
+int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
+{
+ int err;
+
+ err = i40evf_set_interrupt_capability(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to setup interrupt capabilities\n");
+ goto err_set_interrupt;
+ }
+
+ err = i40evf_alloc_q_vectors(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate memory for queue vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ err = i40evf_alloc_queues(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
+ dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
+ (adapter->vsi_res->num_queue_pairs > 1) ? "Enabled" :
+ "Disabled", adapter->vsi_res->num_queue_pairs);
+
+ return 0;
+err_alloc_queues:
+ i40evf_free_q_vectors(adapter);
+err_alloc_q_vectors:
+ i40evf_reset_interrupt_capability(adapter);
+err_set_interrupt:
+ return err;
+}
+
+/**
+ * i40evf_watchdog_timer - Periodic call-back timer
+ * @data: pointer to adapter disguised as unsigned long
+ **/
+static void i40evf_watchdog_timer(unsigned long data)
+{
+ struct i40evf_adapter *adapter = (struct i40evf_adapter *)data;
+ schedule_work(&adapter->watchdog_task);
+ /* timer will be rescheduled in watchdog task */
+}
+
+/**
+ * i40evf_watchdog_task - Periodic call-back task
+ * @work: pointer to work_struct
+ **/
+static void i40evf_watchdog_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter = container_of(work,
+ struct i40evf_adapter,
+ watchdog_task);
+ struct i40e_hw *hw = &adapter->hw;
+
+ if (adapter->state < __I40EVF_DOWN)
+ goto watchdog_done;
+
+ if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
+ goto watchdog_done;
+
+ /* check for unannounced reset */
+ if ((adapter->state != __I40EVF_RESETTING) &&
+ (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
+ adapter->state = __I40EVF_RESETTING;
+ schedule_work(&adapter->reset_task);
+ dev_info(&adapter->pdev->dev, "%s: hardware reset detected\n",
+ __func__);
+ goto watchdog_done;
+ }
+
+ /* Process admin queue tasks. After init, everything gets done
+ * here so we don't race on the admin queue.
+ */
+ if (adapter->aq_pending)
+ goto watchdog_done;
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
+ i40evf_map_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) {
+ i40evf_add_ether_addrs(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) {
+ i40evf_add_vlans(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) {
+ i40evf_del_ether_addrs(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) {
+ i40evf_del_vlans(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
+ i40evf_disable_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
+ i40evf_configure_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) {
+ i40evf_enable_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->state == __I40EVF_RUNNING)
+ i40evf_request_stats(adapter);
+
+ i40evf_irq_enable(adapter, true);
+ i40evf_fire_sw_int(adapter, 0xFF);
+watchdog_done:
+ if (adapter->aq_required)
+ mod_timer(&adapter->watchdog_timer,
+ jiffies + msecs_to_jiffies(20));
+ else
+ mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ schedule_work(&adapter->adminq_task);
+}
+
+/**
+ * i40evf_configure_rss - Prepare for RSS if used
+ * @adapter: board private structure
+ **/
+static void i40evf_configure_rss(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ u32 lut = 0;
+ int i, j;
+ u64 hena;
+
+ /* Set of random keys generated using kernel random number generator */
+ static const u32 seed[I40E_VFQF_HKEY_MAX_INDEX + 1] = {
+ 0x794221b4, 0xbca0c5ab, 0x6cd5ebd9, 0x1ada6127,
+ 0x983b3aa1, 0x1c4e71eb, 0x7f6328b2, 0xfcdc0da0,
+ 0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e,
+ 0x4954b126 };
+
+ /* Hash type is configured by the PF - we just supply the key */
+
+ /* Fill out hash function seed */
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ wr32(hw, I40E_VFQF_HKEY(i), seed[i]);
+
+ /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+ hena = I40E_DEFAULT_RSS_HENA;
+ wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
+ if (j == adapter->vsi_res->num_queue_pairs)
+ j = 0;
+ /* lut = 4-byte sliding window of 4 lut entries */
+ lut = (lut << 8) | (j &
+ ((0x1 << 8) - 1));
+ /* On i = 3, we have 4 entries in lut; write to the register */
+ if ((i & 3) == 3)
+ wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
+ }
+ i40e_flush(hw);
+}
+
+/**
+ * i40evf_reset_task - Call-back task to handle hardware reset
+ * @work: pointer to work_struct
+ *
+ * During reset we need to shut down and reinitialize the admin queue
+ * before we can use it to communicate with the PF again. We also clear
+ * and reinit the rings because that context is lost as well.
+ **/
+static void i40evf_reset_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter =
+ container_of(work, struct i40evf_adapter, reset_task);
+ struct i40e_hw *hw = &adapter->hw;
+ int i = 0, err;
+ uint32_t rstat_val;
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ udelay(500);
+
+ /* wait until the reset is complete */
+ for (i = 0; i < 20; i++) {
+ rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ if (rstat_val == I40E_VFR_COMPLETED)
+ break;
+ else
+ mdelay(100);
+ }
+ if (i == 20) {
+ /* reset never finished */
+ dev_info(&adapter->pdev->dev, "%s: reset never finished: %x\n",
+ __func__, rstat_val);
+ /* carry on anyway */
+ }
+ i40evf_down(adapter);
+ adapter->state = __I40EVF_RESETTING;
+
+ /* kill and reinit the admin queue */
+ if (i40evf_shutdown_adminq(hw))
+ dev_warn(&adapter->pdev->dev,
+ "%s: Failed to destroy the Admin Queue resources\n",
+ __func__);
+ err = i40evf_init_adminq(hw);
+ if (err)
+ dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n",
+ __func__, err);
+
+ adapter->aq_pending = 0;
+ adapter->aq_required = 0;
+ i40evf_map_queues(adapter);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+ mod_timer(&adapter->watchdog_timer, jiffies + 2);
+
+ if (netif_running(adapter->netdev)) {
+ /* allocate transmit descriptors */
+ err = i40evf_setup_all_tx_resources(adapter);
+ if (err)
+ goto reset_err;
+
+ /* allocate receive descriptors */
+ err = i40evf_setup_all_rx_resources(adapter);
+ if (err)
+ goto reset_err;
+
+ i40evf_configure(adapter);
+
+ err = i40evf_up_complete(adapter);
+ if (err)
+ goto reset_err;
+
+ i40evf_irq_enable(adapter, true);
+ }
+ return;
+reset_err:
+ dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n");
+ i40evf_close(adapter->netdev);
+}
+
+/**
+ * i40evf_adminq_task - worker thread to clean the admin queue
+ * @work: pointer to work_struct containing our data
+ **/
+static void i40evf_adminq_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter =
+ container_of(work, struct i40evf_adapter, adminq_task);
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_arq_event_info event;
+ struct i40e_virtchnl_msg *v_msg;
+ i40e_status ret;
+ u16 pending;
+
+ event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ if (!event.msg_buf) {
+ dev_info(&adapter->pdev->dev, "%s: no memory for ARQ clean\n",
+ __func__);
+ return;
+ }
+ v_msg = (struct i40e_virtchnl_msg *)&event.desc;
+ do {
+ ret = i40evf_clean_arq_element(hw, &event, &pending);
+ if (ret)
+ break; /* No event to process or error cleaning ARQ */
+
+ i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
+ v_msg->v_retval, event.msg_buf,
+ event.msg_size);
+ if (pending != 0) {
+ dev_info(&adapter->pdev->dev,
+ "%s: ARQ: Pending events %d\n",
+ __func__, pending);
+ memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
+ }
+ } while (pending);
+
+ /* re-enable Admin queue interrupt cause */
+ i40evf_misc_irq_enable(adapter);
+
+ kfree(event.msg_buf);
+}
+
+/**
+ * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ if (adapter->tx_rings[i]->desc)
+ i40evf_free_tx_resources(adapter->tx_rings[i]);
+
+}
+
+/**
+ * i40evf_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]);
+ if (!err)
+ continue;
+ dev_err(&adapter->pdev->dev,
+ "%s: Allocation for Tx Queue %u failed\n",
+ __func__, i);
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * i40evf_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]);
+ if (!err)
+ continue;
+ dev_err(&adapter->pdev->dev,
+ "%s: Allocation for Rx Queue %u failed\n",
+ __func__, i);
+ break;
+ }
+ return err;
+}
+
+/**
+ * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ if (adapter->rx_rings[i]->desc)
+ i40evf_free_rx_resources(adapter->rx_rings[i]);
+}
+
+/**
+ * i40evf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int i40evf_open(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (adapter->state != __I40EVF_DOWN)
+ return -EBUSY;
+
+ /* allocate transmit descriptors */
+ err = i40evf_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = i40evf_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ /* clear any pending interrupts, may auto mask */
+ err = i40evf_request_traffic_irqs(adapter, netdev->name);
+ if (err)
+ goto err_req_irq;
+
+ i40evf_configure(adapter);
+
+ err = i40evf_up_complete(adapter);
+ if (err)
+ goto err_req_irq;
+
+ i40evf_irq_enable(adapter, true);
+
+ return 0;
+
+err_req_irq:
+ i40evf_down(adapter);
+ i40evf_free_traffic_irqs(adapter);
+err_setup_rx:
+ i40evf_free_all_rx_resources(adapter);
+err_setup_tx:
+ i40evf_free_all_tx_resources(adapter);
+
+ return err;
+}
+
+/**
+ * i40evf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
+ * are freed, along with all transmit and receive resources.
+ **/
+static int i40evf_close(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ /* signal that we are down to the interrupt handler */
+ adapter->state = __I40EVF_DOWN;
+ set_bit(__I40E_DOWN, &adapter->vsi.state);
+
+ i40evf_down(adapter);
+ i40evf_free_traffic_irqs(adapter);
+
+ i40evf_free_all_tx_resources(adapter);
+ i40evf_free_all_rx_resources(adapter);
+
+ return 0;
+}
+
+/**
+ * i40evf_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *i40evf_get_stats(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ /* only return the current stats */
+ return &adapter->net_stats;
+}
+
+/**
+ * i40evf_reinit_locked - Software reinit
+ * @adapter: board private structure
+ *
+ * Reinititalizes the ring structures in response to a software configuration
+ * change. Roughly the same as close followed by open, but skips releasing
+ * and reallocating the interrupts.
+ **/
+void i40evf_reinit_locked(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ WARN_ON(in_interrupt());
+
+ adapter->state = __I40EVF_RESETTING;
+
+ i40evf_down(adapter);
+
+ /* allocate transmit descriptors */
+ err = i40evf_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_reinit;
+
+ /* allocate receive descriptors */
+ err = i40evf_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_reinit;
+
+ i40evf_configure(adapter);
+
+ err = i40evf_up_complete(adapter);
+ if (err)
+ goto err_reinit;
+
+ i40evf_irq_enable(adapter, true);
+ return;
+
+err_reinit:
+ dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n");
+ i40evf_close(netdev);
+}
+
+/**
+ * i40evf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
+ return -EINVAL;
+
+ /* must set new MTU before calling down or up */
+ netdev->mtu = new_mtu;
+ i40evf_reinit_locked(adapter);
+ return 0;
+}
+
+static const struct net_device_ops i40evf_netdev_ops = {
+ .ndo_open = i40evf_open,
+ .ndo_stop = i40evf_close,
+ .ndo_start_xmit = i40evf_xmit_frame,
+ .ndo_get_stats = i40evf_get_stats,
+ .ndo_set_rx_mode = i40evf_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = i40evf_set_mac,
+ .ndo_change_mtu = i40evf_change_mtu,
+ .ndo_tx_timeout = i40evf_tx_timeout,
+ .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
+};
+
+/**
+ * i40evf_check_reset_complete - check that VF reset is complete
+ * @hw: pointer to hw struct
+ *
+ * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
+ **/
+static int i40evf_check_reset_complete(struct i40e_hw *hw)
+{
+ u32 rstat;
+ int i;
+
+ for (i = 0; i < 100; i++) {
+ rstat = rd32(hw, I40E_VFGEN_RSTAT);
+ if (rstat == I40E_VFR_VFACTIVE)
+ return 0;
+ udelay(10);
+ }
+ return -EBUSY;
+}
+
+/**
+ * i40evf_init_task - worker thread to perform delayed initialization
+ * @work: pointer to work_struct containing our data
+ *
+ * This task completes the work that was begun in probe. Due to the nature
+ * of VF-PF communications, we may need to wait tens of milliseconds to get
+ * reponses back from the PF. Rather than busy-wait in probe and bog down the
+ * whole system, we'll do it in a task so we can sleep.
+ * This task only runs during driver init. Once we've established
+ * communications with the PF driver and set up our netdev, the watchdog
+ * takes over.
+ **/
+static void i40evf_init_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter = container_of(work,
+ struct i40evf_adapter,
+ init_task.work);
+ struct net_device *netdev = adapter->netdev;
+ struct i40evf_mac_filter *f;
+ struct i40e_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int i, err, bufsz;
+
+ switch (adapter->state) {
+ case __I40EVF_STARTUP:
+ /* driver loaded, probe complete */
+ err = i40e_set_mac_type(hw);
+ if (err) {
+ dev_info(&pdev->dev, "%s: set_mac_type failed: %d\n",
+ __func__, err);
+ goto err;
+ }
+ err = i40evf_check_reset_complete(hw);
+ if (err) {
+ dev_info(&pdev->dev, "%s: device is still in reset (%d).\n",
+ __func__, err);
+ goto err;
+ }
+ hw->aq.num_arq_entries = I40EVF_AQ_LEN;
+ hw->aq.num_asq_entries = I40EVF_AQ_LEN;
+ hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
+ hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
+
+ err = i40evf_init_adminq(hw);
+ if (err) {
+ dev_info(&pdev->dev, "%s: init_adminq failed: %d\n",
+ __func__, err);
+ goto err;
+ }
+ err = i40evf_send_api_ver(adapter);
+ if (err) {
+ dev_info(&pdev->dev, "%s: unable to send to PF (%d)\n",
+ __func__, err);
+ i40evf_shutdown_adminq(hw);
+ goto err;
+ }
+ adapter->state = __I40EVF_INIT_VERSION_CHECK;
+ goto restart;
+ break;
+ case __I40EVF_INIT_VERSION_CHECK:
+ if (!i40evf_asq_done(hw))
+ goto err;
+
+ /* aq msg sent, awaiting reply */
+ err = i40evf_verify_api_ver(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to verify API version, error %d\n",
+ err);
+ goto err;
+ }
+ err = i40evf_send_vf_config_msg(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable send config request, error %d\n",
+ err);
+ goto err;
+ }
+ adapter->state = __I40EVF_INIT_GET_RESOURCES;
+ goto restart;
+ break;
+ case __I40EVF_INIT_GET_RESOURCES:
+ /* aq msg sent, awaiting reply */
+ if (!adapter->vf_res) {
+ bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
+ (I40E_MAX_VF_VSI *
+ sizeof(struct i40e_virtchnl_vsi_resource));
+ adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
+ if (!adapter->vf_res) {
+ dev_err(&pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ goto err;
+ }
+ }
+ err = i40evf_get_vf_config(adapter);
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ goto restart;
+ if (err) {
+ dev_info(&pdev->dev, "%s: unable to get VF config (%d)\n",
+ __func__, err);
+ goto err_alloc;
+ }
+ adapter->state = __I40EVF_INIT_SW;
+ break;
+ default:
+ goto err_alloc;
+ }
+ /* got VF config message back from PF, now we can parse it */
+ for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+ if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+ }
+ if (!adapter->vsi_res) {
+ dev_info(&pdev->dev, "%s: no LAN VSI found\n", __func__);
+ goto err_alloc;
+ }
+
+ adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
+
+ adapter->txd_count = I40EVF_DEFAULT_TXD;
+ adapter->rxd_count = I40EVF_DEFAULT_RXD;
+
+ netdev->netdev_ops = &i40evf_netdev_ops;
+ i40evf_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+
+ netdev->features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_SCTP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GRO;
+
+ if (adapter->vf_res->vf_offload_flags
+ & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
+ netdev->vlan_features = netdev->features;
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+
+ /* The HW MAC address was set and/or determined in sw_init */
+ if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
+ dev_info(&pdev->dev,
+ "Invalid MAC address %pMAC, using random\n",
+ adapter->hw.mac.addr);
+ random_ether_addr(adapter->hw.mac.addr);
+ }
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+ INIT_LIST_HEAD(&adapter->mac_filter_list);
+ INIT_LIST_HEAD(&adapter->vlan_filter_list);
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (NULL == f)
+ goto err_sw_init;
+
+ memcpy(f->macaddr, adapter->hw.mac.addr, ETH_ALEN);
+ f->add = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+ list_add(&f->list, &adapter->mac_filter_list);
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = &i40evf_watchdog_timer;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+
+ err = i40evf_init_interrupt_scheme(adapter);
+ if (err)
+ goto err_sw_init;
+ i40evf_map_rings_to_vectors(adapter);
+ i40evf_configure_rss(adapter);
+ err = i40evf_request_misc_irq(adapter);
+ if (err)
+ goto err_sw_init;
+
+ netif_carrier_off(netdev);
+
+ strcpy(netdev->name, "eth%d");
+
+ adapter->vsi.id = adapter->vsi_res->vsi_id;
+ adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
+ adapter->vsi.back = adapter;
+ adapter->vsi.base_vector = 1;
+ adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
+ adapter->vsi.rx_itr_setting = I40E_ITR_DYNAMIC;
+ adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC;
+ adapter->vsi.netdev = adapter->netdev;
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ adapter->netdev_registered = true;
+
+ netif_tx_stop_all_queues(netdev);
+
+ dev_info(&pdev->dev, "MAC address: %pMAC\n", adapter->hw.mac.addr);
+ if (netdev->features & NETIF_F_GRO)
+ dev_info(&pdev->dev, "GRO is enabled\n");
+
+ dev_info(&pdev->dev, "%s\n", i40evf_driver_string);
+ adapter->state = __I40EVF_DOWN;
+ set_bit(__I40E_DOWN, &adapter->vsi.state);
+ i40evf_misc_irq_enable(adapter);
+ return;
+restart:
+ schedule_delayed_work(&adapter->init_task,
+ msecs_to_jiffies(50));
+ return;
+
+err_register:
+ i40evf_free_misc_irq(adapter);
+err_sw_init:
+ i40evf_reset_interrupt_capability(adapter);
+ adapter->state = __I40EVF_FAILED;
+err_alloc:
+ kfree(adapter->vf_res);
+ adapter->vf_res = NULL;
+err:
+ /* Things went into the weeds, so try again later */
+ if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
+ dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n");
+ if (hw->aq.asq.count)
+ i40evf_shutdown_adminq(hw); /* ignore error */
+ adapter->state = __I40EVF_FAILED;
+ return; /* do not reschedule */
+ }
+ schedule_delayed_work(&adapter->init_task, HZ * 3);
+ return;
+}
+
+/**
+ * i40evf_shutdown - Shutdown the device in preparation for a reboot
+ * @pdev: pci device structure
+ **/
+static void i40evf_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ i40evf_close(netdev);
+
+#ifdef CONFIG_PM
+ pci_save_state(pdev);
+
+#endif
+ pci_disable_device(pdev);
+}
+
+/**
+ * i40evf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in i40evf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * i40evf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct i40evf_adapter *adapter = NULL;
+ struct i40e_hw *hw = NULL;
+ int err, pci_using_dac;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ pci_using_dac = true;
+ /* coherent mask for the same size will always succeed if
+ * dma_set_mask does
+ */
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ pci_using_dac = false;
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ } else {
+ dev_err(&pdev->dev, "%s: DMA configuration failed: %d\n",
+ __func__, err);
+ err = -EIO;
+ goto err_dma;
+ }
+
+ err = pci_request_regions(pdev, i40evf_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_regions failed 0x%x\n", err);
+ goto err_pci_reg;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter),
+ MAX_TX_QUEUES);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+
+ hw = &adapter->hw;
+ hw->back = adapter;
+
+ adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+ adapter->state = __I40EVF_STARTUP;
+
+ /* Call save state here because it relies on the adapter struct. */
+ pci_save_state(pdev);
+
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+ hw->bus.device = PCI_SLOT(pdev->devfn);
+ hw->bus.func = PCI_FUNC(pdev->devfn);
+
+ INIT_WORK(&adapter->reset_task, i40evf_reset_task);
+ INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
+ INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
+ INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
+ schedule_delayed_work(&adapter->init_task, 10);
+
+ return 0;
+
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+#ifdef CONFIG_PM
+/**
+ * i40evf_suspend - Power management suspend routine
+ * @pdev: PCI device information struct
+ * @state: unused
+ *
+ * Called when the system (VM) is entering sleep/suspend.
+ **/
+static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int retval = 0;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ i40evf_down(adapter);
+ rtnl_unlock();
+ }
+ i40evf_free_misc_irq(adapter);
+ i40evf_reset_interrupt_capability(adapter);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+/**
+ * i40evf_resume - Power managment resume routine
+ * @pdev: PCI device information struct
+ *
+ * Called when the system (VM) is resumed from sleep/suspend.
+ **/
+static int i40evf_resume(struct pci_dev *pdev)
+{
+ struct i40evf_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ /* pci_restore_state clears dev->state_saved so call
+ * pci_save_state to restore it.
+ */
+ pci_save_state(pdev);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ rtnl_lock();
+ err = i40evf_set_interrupt_capability(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
+ return err;
+ }
+ err = i40evf_request_misc_irq(adapter);
+ rtnl_unlock();
+ if (err) {
+ dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
+ return err;
+ }
+
+ schedule_work(&adapter->reset_task);
+
+ netif_device_attach(netdev);
+
+ return err;
+}
+
+#endif /* CONFIG_PM */
+/**
+ * i40evf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * i40evf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void i40evf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+
+ cancel_delayed_work_sync(&adapter->init_task);
+
+ if (adapter->netdev_registered) {
+ unregister_netdev(netdev);
+ adapter->netdev_registered = false;
+ }
+ adapter->state = __I40EVF_REMOVE;
+
+ if (adapter->num_msix_vectors) {
+ i40evf_misc_irq_disable(adapter);
+ del_timer_sync(&adapter->watchdog_timer);
+
+ flush_scheduled_work();
+
+ i40evf_free_misc_irq(adapter);
+
+ i40evf_reset_interrupt_capability(adapter);
+ }
+
+ if (hw->aq.asq.count)
+ i40evf_shutdown_adminq(hw);
+
+ iounmap(hw->hw_addr);
+ pci_release_regions(pdev);
+
+ i40evf_free_queues(adapter);
+ kfree(adapter->vf_res);
+
+ free_netdev(netdev);
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver i40evf_driver = {
+ .name = i40evf_driver_name,
+ .id_table = i40evf_pci_tbl,
+ .probe = i40evf_probe,
+ .remove = i40evf_remove,
+#ifdef CONFIG_PM
+ .suspend = i40evf_suspend,
+ .resume = i40evf_resume,
+#endif
+ .shutdown = i40evf_shutdown,
+};
+
+/**
+ * i40e_init_module - Driver Registration Routine
+ *
+ * i40e_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init i40evf_init_module(void)
+{
+ int ret;
+ pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
+ i40evf_driver_version);
+
+ pr_info("%s\n", i40evf_copyright);
+
+ ret = pci_register_driver(&i40evf_driver);
+ return ret;
+}
+
+module_init(i40evf_init_module);
+
+/**
+ * i40e_exit_module - Driver Exit Cleanup Routine
+ *
+ * i40e_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit i40evf_exit_module(void)
+{
+ pci_unregister_driver(&i40evf_driver);
+}
+
+module_exit(i40evf_exit_module);
+
+/* i40evf_main.c */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
new file mode 100644
index 000000000000..e6978d79e62b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -0,0 +1,772 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+
+/* busy wait delay in msec */
+#define I40EVF_BUSY_WAIT_DELAY 10
+#define I40EVF_BUSY_WAIT_COUNT 50
+
+/**
+ * i40evf_send_pf_msg
+ * @adapter: adapter structure
+ * @op: virtual channel opcode
+ * @msg: pointer to message buffer
+ * @len: message length
+ *
+ * Send message to PF and print status if failure.
+ **/
+static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
+ enum i40e_virtchnl_ops op, u8 *msg, u16 len)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ i40e_status err;
+
+ err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
+ if (err)
+ dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
+ op, err, hw->aq.asq_last_status);
+ return err;
+}
+
+/**
+ * i40evf_send_api_ver
+ * @adapter: adapter structure
+ *
+ * Send API version admin queue message to the PF. The reply is not checked
+ * in this function. Returns 0 if the message was successfully
+ * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+ **/
+int i40evf_send_api_ver(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_version_info vvi;
+
+ vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
+ vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
+
+ return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi,
+ sizeof(vvi));
+}
+
+/**
+ * i40evf_verify_api_ver
+ * @adapter: adapter structure
+ *
+ * Compare API versions with the PF. Must be called after admin queue is
+ * initialized. Returns 0 if API versions match, -EIO if
+ * they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
+ **/
+int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_version_info *pf_vvi;
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_arq_event_info event;
+ i40e_status err;
+
+ event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ if (!event.msg_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = i40evf_clean_arq_element(hw, &event, NULL);
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ goto out_alloc;
+
+ err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+ if (err) {
+ err = -EIO;
+ goto out_alloc;
+ }
+
+ if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
+ I40E_VIRTCHNL_OP_VERSION) {
+ err = -EIO;
+ goto out_alloc;
+ }
+
+ pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
+ if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
+ (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
+ err = -EIO;
+
+out_alloc:
+ kfree(event.msg_buf);
+out:
+ return err;
+}
+
+/**
+ * i40evf_send_vf_config_msg
+ * @adapter: adapter structure
+ *
+ * Send VF configuration request admin queue message to the PF. The reply
+ * is not checked in this function. Returns 0 if the message was
+ * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+ **/
+int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
+{
+ return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ NULL, 0);
+}
+
+/**
+ * i40evf_get_vf_config
+ * @hw: pointer to the hardware structure
+ * @len: length of buffer
+ *
+ * Get VF configuration from PF and populate hw structure. Must be called after
+ * admin queue is initialized. Busy waits until response is received from PF,
+ * with maximum timeout. Response from PF is returned in the buffer for further
+ * processing by the caller.
+ **/
+int i40evf_get_vf_config(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_arq_event_info event;
+ u16 len;
+ i40e_status err;
+
+ len = sizeof(struct i40e_virtchnl_vf_resource) +
+ I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
+ event.msg_size = len;
+ event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ if (!event.msg_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = i40evf_clean_arq_element(hw, &event, NULL);
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ goto out_alloc;
+
+ err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Error returned from PF, %d, %d\n", __func__,
+ le32_to_cpu(event.desc.cookie_high),
+ le32_to_cpu(event.desc.cookie_low));
+ err = -EIO;
+ goto out_alloc;
+ }
+
+ if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid response from PF, %d, %d\n", __func__,
+ le32_to_cpu(event.desc.cookie_high),
+ le32_to_cpu(event.desc.cookie_low));
+ err = -EIO;
+ goto out_alloc;
+ }
+ memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len));
+
+ i40e_vf_parse_hw_config(hw, adapter->vf_res);
+out_alloc:
+ kfree(event.msg_buf);
+out:
+ return err;
+}
+
+/**
+ * i40evf_configure_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF set up our (previously allocated) queues.
+ **/
+void i40evf_configure_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_vsi_queue_config_info *vqci;
+ struct i40e_virtchnl_queue_pair_info *vqpi;
+ int pairs = adapter->vsi_res->num_queue_pairs;
+ int i, len;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+ len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
+ (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
+ vqci = kzalloc(len, GFP_ATOMIC);
+ if (!vqci) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ vqci->vsi_id = adapter->vsi_res->vsi_id;
+ vqci->num_queue_pairs = pairs;
+ vqpi = vqci->qpair;
+ /* Size check is not needed here - HW max is 16 queue pairs, and we
+ * can fit info for 31 of them into the AQ buffer before it overflows.
+ */
+ for (i = 0; i < pairs; i++) {
+ vqpi->txq.vsi_id = vqci->vsi_id;
+ vqpi->txq.queue_id = i;
+ vqpi->txq.ring_len = adapter->tx_rings[i]->count;
+ vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
+
+ vqpi->rxq.vsi_id = vqci->vsi_id;
+ vqpi->rxq.queue_id = i;
+ vqpi->rxq.ring_len = adapter->rx_rings[i]->count;
+ vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma;
+ vqpi->rxq.max_pkt_size = adapter->netdev->mtu
+ + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
+ vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len;
+ vqpi++;
+ }
+
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ (u8 *)vqci, len);
+ kfree(vqci);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+}
+
+/**
+ * i40evf_enable_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF enable all of our queues.
+ **/
+void i40evf_enable_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_queue_select vqs;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
+ vqs.vsi_id = adapter->vsi_res->vsi_id;
+ vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
+ vqs.rx_queues = vqs.tx_queues;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ (u8 *)&vqs, sizeof(vqs));
+ adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
+}
+
+/**
+ * i40evf_disable_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF disable all of our queues.
+ **/
+void i40evf_disable_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_queue_select vqs;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
+ vqs.vsi_id = adapter->vsi_res->vsi_id;
+ vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
+ vqs.rx_queues = vqs.tx_queues;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ (u8 *)&vqs, sizeof(vqs));
+ adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
+}
+
+/**
+ * i40evf_map_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF map queues to interrupt vectors. Misc causes, including
+ * admin queue, are always mapped to vector 0.
+ **/
+void i40evf_map_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_irq_map_info *vimi;
+ int v_idx, q_vectors, len;
+ struct i40e_q_vector *q_vector;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
+
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ len = sizeof(struct i40e_virtchnl_irq_map_info) +
+ (adapter->num_msix_vectors *
+ sizeof(struct i40e_virtchnl_vector_map));
+ vimi = kzalloc(len, GFP_ATOMIC);
+ if (!vimi) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+
+ vimi->num_vectors = adapter->num_msix_vectors;
+ /* Queue vectors first */
+ for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ q_vector = adapter->q_vector[v_idx];
+ vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
+ vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS;
+ vimi->vecmap[v_idx].txq_map = q_vector->ring_mask;
+ vimi->vecmap[v_idx].rxq_map = q_vector->ring_mask;
+ }
+ /* Misc vector last - this is only for AdminQ messages */
+ vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
+ vimi->vecmap[v_idx].vector_id = 0;
+ vimi->vecmap[v_idx].txq_map = 0;
+ vimi->vecmap[v_idx].rxq_map = 0;
+
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ (u8 *)vimi, len);
+ kfree(vimi);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
+}
+
+/**
+ * i40evf_add_ether_addrs
+ * @adapter: adapter structure
+ * @addrs: the MAC address filters to add (contiguous)
+ * @count: number of filters
+ *
+ * Request that the PF add one or more addresses to our filters.
+ **/
+void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_ether_addr_list *veal;
+ int len, i = 0, count = 0;
+ struct i40evf_mac_filter *f;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (f->add)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
+
+ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+ (count * sizeof(struct i40e_virtchnl_ether_addr));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_ether_addr_list)) /
+ sizeof(struct i40e_virtchnl_ether_addr);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+
+ veal = kzalloc(len, GFP_ATOMIC);
+ if (!veal) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ veal->vsi_id = adapter->vsi_res->vsi_id;
+ veal->num_elements = count;
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (f->add) {
+ memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
+ i++;
+ f->add = false;
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ (u8 *)veal, len);
+ kfree(veal);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+}
+
+/**
+ * i40evf_del_ether_addrs
+ * @adapter: adapter structure
+ * @addrs: the MAC address filters to remove (contiguous)
+ * @count: number of filtes
+ *
+ * Request that the PF remove one or more addresses from our filters.
+ **/
+void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_ether_addr_list *veal;
+ struct i40evf_mac_filter *f, *ftmp;
+ int len, i = 0, count = 0;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (f->remove)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+
+ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+ (count * sizeof(struct i40e_virtchnl_ether_addr));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_ether_addr_list)) /
+ sizeof(struct i40e_virtchnl_ether_addr);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+ veal = kzalloc(len, GFP_ATOMIC);
+ if (!veal) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ veal->vsi_id = adapter->vsi_res->vsi_id;
+ veal->num_elements = count;
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ if (f->remove) {
+ memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
+ i++;
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ (u8 *)veal, len);
+ kfree(veal);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+}
+
+/**
+ * i40evf_add_vlans
+ * @adapter: adapter structure
+ * @vlans: the VLANs to add
+ * @count: number of VLANs
+ *
+ * Request that the PF add one or more VLAN filters to our VSI.
+ **/
+void i40evf_add_vlans(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_vlan_filter_list *vvfl;
+ int len, i = 0, count = 0;
+ struct i40evf_vlan_filter *f;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->add)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
+
+ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ (count * sizeof(u16));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+ sizeof(u16);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+ vvfl = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ vvfl->num_elements = count;
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->add) {
+ vvfl->vlan_id[i] = f->vlan;
+ i++;
+ f->add = false;
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
+ kfree(vvfl);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+}
+
+/**
+ * i40evf_del_vlans
+ * @adapter: adapter structure
+ * @vlans: the VLANs to remove
+ * @count: number of VLANs
+ *
+ * Request that the PF remove one or more VLAN filters from our VSI.
+ **/
+void i40evf_del_vlans(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_vlan_filter_list *vvfl;
+ struct i40evf_vlan_filter *f, *ftmp;
+ int len, i = 0, count = 0;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->remove)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
+
+ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ (count * sizeof(u16));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+ sizeof(u16);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+ vvfl = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ vvfl->num_elements = count;
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ if (f->remove) {
+ vvfl->vlan_id[i] = f->vlan;
+ i++;
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
+ kfree(vvfl);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+}
+
+/**
+ * i40evf_set_promiscuous
+ * @adapter: adapter structure
+ * @flags: bitmask to control unicast/multicast promiscuous.
+ *
+ * Request that the PF enable promiscuous mode for our VSI.
+ **/
+void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
+{
+ struct i40e_virtchnl_promisc_info vpi;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ vpi.vsi_id = adapter->vsi_res->vsi_id;
+ vpi.flags = flags;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ (u8 *)&vpi, sizeof(vpi));
+}
+
+/**
+ * i40evf_request_stats
+ * @adapter: adapter structure
+ *
+ * Request VSI statistics from PF.
+ **/
+void i40evf_request_stats(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_queue_select vqs;
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* no error message, this isn't crucial */
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS;
+ vqs.vsi_id = adapter->vsi_res->vsi_id;
+ /* queue maps are ignored for this message - only the vsi is used */
+ if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS,
+ (u8 *)&vqs, sizeof(vqs)))
+ /* if the request failed, don't lock out others */
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
+
+/**
+ * i40evf_virtchnl_completion
+ * @adapter: adapter structure
+ * @v_opcode: opcode sent by PF
+ * @v_retval: retval sent by PF
+ * @msg: message sent by PF
+ * @msglen: message length
+ *
+ * Asynchronous completion function for admin queue messages. Rather than busy
+ * wait, we fire off our requests and assume that no errors will be returned.
+ * This function handles the reply messages.
+ **/
+void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval,
+ u8 *msg, u16 msglen)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
+ struct i40e_virtchnl_pf_event *vpe =
+ (struct i40e_virtchnl_pf_event *)msg;
+ switch (vpe->event) {
+ case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ adapter->link_up =
+ vpe->event_data.link_event.link_status;
+ if (adapter->link_up && !netif_carrier_ok(netdev)) {
+ dev_info(&adapter->pdev->dev, "NIC Link is Up\n");
+ netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
+ } else if (!adapter->link_up) {
+ dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+ break;
+ case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ adapter->state = __I40EVF_RESETTING;
+ schedule_work(&adapter->reset_task);
+ dev_info(&adapter->pdev->dev,
+ "%s: hardware reset pending\n", __func__);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "%s: Unknown event %d from pf\n",
+ __func__, vpe->event);
+ break;
+
+ }
+ return;
+ }
+ if (v_opcode != adapter->current_op) {
+ dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d.\n",
+ __func__, adapter->current_op, v_opcode);
+ /* We're probably completely screwed at this point, but clear
+ * the current op and try to carry on....
+ */
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ return;
+ }
+ if (v_retval) {
+ dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d!\n",
+ __func__, v_retval, v_opcode);
+ }
+ switch (v_opcode) {
+ case I40E_VIRTCHNL_OP_GET_STATS: {
+ struct i40e_eth_stats *stats =
+ (struct i40e_eth_stats *)msg;
+ adapter->net_stats.rx_packets = stats->rx_unicast +
+ stats->rx_multicast +
+ stats->rx_broadcast;
+ adapter->net_stats.tx_packets = stats->tx_unicast +
+ stats->tx_multicast +
+ stats->tx_broadcast;
+ adapter->net_stats.rx_bytes = stats->rx_bytes;
+ adapter->net_stats.tx_bytes = stats->tx_bytes;
+ adapter->net_stats.rx_errors = stats->rx_errors;
+ adapter->net_stats.tx_errors = stats->tx_errors;
+ adapter->net_stats.rx_dropped = stats->rx_missed;
+ adapter->net_stats.tx_dropped = stats->tx_discards;
+ adapter->current_stats = *stats;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_MAC_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_MAC_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_VLAN_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_VLAN_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ENABLE_QUEUES);
+ /* enable transmits */
+ i40evf_irq_enable(adapter, true);
+ netif_tx_start_all_queues(adapter->netdev);
+ netif_carrier_on(adapter->netdev);
+ break;
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DISABLE_QUEUES);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_CONFIGURE_QUEUES);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
+ break;
+ default:
+ dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF.\n",
+ __func__, v_opcode);
+ break;
+ } /* switch v_opcode */
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 47c2d10df826..06df6928f44c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -113,6 +113,59 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
}
/**
+ * igb_check_for_link_media_swap - Check which M88E1112 interface linked
+ * @hw: pointer to the HW structure
+ *
+ * Poll the M88E1112 interfaces to see which interface achieved link.
+ */
+static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ u8 port = 0;
+
+ /* Check the copper medium. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_COPPER;
+
+ /* Check the other medium. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ /* reset page to 0 */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_OTHER;
+
+ /* Determine if a swap needs to happen. */
+ if (port && (hw->dev_spec._82575.media_port != port)) {
+ hw->dev_spec._82575.media_port = port;
+ hw->dev_spec._82575.media_changed = true;
+ } else {
+ ret_val = igb_check_for_link_82575(hw);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
* igb_init_phy_params_82575 - Init PHY func ptrs.
* @hw: pointer to the HW structure
**/
@@ -189,6 +242,29 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
else
phy->ops.get_cable_length = igb_get_cable_length_m88;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+ /* Check if this PHY is confgured for media swap. */
+ if (phy->id == M88E1112_E_PHY_ID) {
+ u16 data;
+
+ ret_val = phy->ops.write_reg(hw,
+ E1000_M88E1112_PAGE_ADDR,
+ 2);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw,
+ E1000_M88E1112_MAC_CTRL_1,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
+ E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
+ if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
+ data == E1000_M88E1112_AUTO_COPPER_BASEX)
+ hw->mac.ops.check_for_link =
+ igb_check_for_link_media_swap;
+ }
break;
case IGP03E1000_E_PHY_ID:
phy->type = e1000_phy_igp_3;
@@ -365,6 +441,19 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
? igb_setup_copper_link_82575
: igb_setup_serdes_link_82575;
+ if (mac->type == e1000_82580) {
+ switch (hw->device_id) {
+ /* feature not supported on these id's */
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+ case E1000_DEV_ID_DH89XXCC_SERDES:
+ case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+ case E1000_DEV_ID_DH89XXCC_SFP:
+ break;
+ default:
+ hw->dev_spec._82575.mas_capable = true;
+ break;
+ }
+ }
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 978eca31ceda..0571b973be80 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -205,6 +205,11 @@
*/
#define E1000_CONNSW_ENRGSRC 0x4
+#define E1000_CONNSW_PHYSD 0x400
+#define E1000_CONNSW_PHY_PDN 0x800
+#define E1000_CONNSW_SERDESD 0x200
+#define E1000_CONNSW_AUTOSENSE_CONF 0x2
+#define E1000_CONNSW_AUTOSENSE_EN 0x1
#define E1000_PCS_CFG_PCS_EN 8
#define E1000_PCS_LCTL_FLV_LINK_UP 1
#define E1000_PCS_LCTL_FSV_100 2
@@ -532,6 +537,17 @@
#define E1000_MDICNFG_PHY_MASK 0x03E00000
#define E1000_MDICNFG_PHY_SHIFT 21
+#define E1000_MEDIA_PORT_COPPER 1
+#define E1000_MEDIA_PORT_OTHER 2
+#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2
+#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3
+#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */
+#define E1000_M88E1112_MAC_CTRL_1 0x10
+#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
+#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7
+#define E1000_M88E1112_PAGE_ADDR 0x16
+#define E1000_M88E1112_STATUS 0x01
+
/* PCI Express Control */
#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 2e166b22d52b..ab99e2b582a8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -533,6 +533,9 @@ struct e1000_dev_spec_82575 {
bool clear_semaphore_once;
struct e1000_sfp_flags eth_flags;
bool module_plugged;
+ u8 media_port;
+ bool media_changed;
+ bool mas_capable;
};
struct e1000_hw {
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 5e9ed89403aa..ccf472f073dd 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -41,6 +41,7 @@
#include <linux/if_vlan.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/pci.h>
struct igb_adapter;
@@ -67,6 +68,7 @@ struct igb_adapter;
#define IGB_MIN_ITR_USECS 10
#define NON_Q_VECTORS 1
#define MAX_Q_VECTORS 8
+#define MAX_MSIX_ENTRIES 10
/* Transmit and receive queues */
#define IGB_MAX_RX_QUEUES 8
@@ -127,9 +129,9 @@ struct vf_data_storage {
#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
#define IGB_TX_HTHRESH 1
#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 1 : 4)
+ (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4)
#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 1 : 16)
+ (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16)
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
@@ -337,8 +339,10 @@ struct hwmon_attr {
};
struct hwmon_buff {
- struct device *device;
- struct hwmon_attr *hwmon_list;
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+ struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1];
+ struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4];
unsigned int n_hwmon;
};
#endif
@@ -355,7 +359,7 @@ struct igb_adapter {
unsigned int flags;
unsigned int num_q_vectors;
- struct msix_entry *msix_entries;
+ struct msix_entry msix_entries[MAX_MSIX_ENTRIES];
/* Interrupt Throttle Rate */
u32 rx_itr_setting;
@@ -440,7 +444,7 @@ struct igb_adapter {
char fw_version[32];
#ifdef CONFIG_IGB_HWMON
- struct hwmon_buff igb_hwmon_buff;
+ struct hwmon_buff *igb_hwmon_buff;
bool ets;
#endif
struct i2c_algo_bit_data i2c_algo;
@@ -450,6 +454,8 @@ struct igb_adapter {
u8 rss_indir_tbl[IGB_RETA_SIZE];
unsigned long link_check_timeout;
+ int copper_tries;
+ struct e1000_info ei;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -462,6 +468,16 @@ struct igb_adapter {
#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9)
+#define IGB_FLAG_MEDIA_RESET (1 << 10)
+#define IGB_FLAG_MAS_CAPABLE (1 << 11)
+#define IGB_FLAG_MAS_ENABLE (1 << 12)
+#define IGB_FLAG_HAS_MSIX (1 << 13)
+
+/* Media Auto Sense */
+#define IGB_MAS_ENABLE_0 0X0001
+#define IGB_MAS_ENABLE_1 0X0002
+#define IGB_MAS_ENABLE_2 0X0004
+#define IGB_MAS_ENABLE_3 0X0008
/* DMA Coalescing defines */
#define IGB_MIN_TXPBSIZE 20408
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index c3143da497c8..1df02378de69 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1386,7 +1386,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
*data = 0;
/* Hook up test interrupt handler just for this test */
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
if (request_irq(adapter->msix_entries[0].vector,
igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
@@ -1519,7 +1519,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
msleep(10);
/* Unhook test interrupt handler */
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
free_irq(adapter->msix_entries[0].vector, adapter);
else
free_irq(irq, adapter);
@@ -1983,6 +1983,10 @@ static void igb_diag_test(struct net_device *netdev,
bool if_running = netif_running(netdev);
set_bit(__IGB_TESTING, &adapter->state);
+
+ /* can't do offline tests on media switching devices */
+ if (adapter->hw.dev_spec._82575.mas_capable)
+ eth_test->flags &= ~ETH_TEST_FL_OFFLINE;
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
@@ -2929,7 +2933,7 @@ static void igb_get_channels(struct net_device *netdev,
ch->max_combined = igb_max_channels(adapter);
/* Report info for other vector */
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
ch->max_other = NON_Q_VECTORS;
ch->other_count = NON_Q_VECTORS;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 58f1ce967aeb..e0af5bc61613 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -117,29 +117,29 @@ static int igb_add_hwmon_attr(struct igb_adapter *adapter,
unsigned int n_attr;
struct hwmon_attr *igb_attr;
- n_attr = adapter->igb_hwmon_buff.n_hwmon;
- igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr];
+ n_attr = adapter->igb_hwmon_buff->n_hwmon;
+ igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr];
switch (type) {
case IGB_HWMON_TYPE_LOC:
igb_attr->dev_attr.show = igb_hwmon_show_location;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_label", offset);
+ "temp%u_label", offset + 1);
break;
case IGB_HWMON_TYPE_TEMP:
igb_attr->dev_attr.show = igb_hwmon_show_temp;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_input", offset);
+ "temp%u_input", offset + 1);
break;
case IGB_HWMON_TYPE_CAUTION:
igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_max", offset);
+ "temp%u_max", offset + 1);
break;
case IGB_HWMON_TYPE_MAX:
igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_crit", offset);
+ "temp%u_crit", offset + 1);
break;
default:
rc = -EPERM;
@@ -154,30 +154,16 @@ static int igb_add_hwmon_attr(struct igb_adapter *adapter,
igb_attr->dev_attr.attr.mode = S_IRUGO;
igb_attr->dev_attr.attr.name = igb_attr->name;
sysfs_attr_init(&igb_attr->dev_attr.attr);
- rc = device_create_file(&adapter->pdev->dev,
- &igb_attr->dev_attr);
- if (rc == 0)
- ++adapter->igb_hwmon_buff.n_hwmon;
- return rc;
+ adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr;
+
+ ++adapter->igb_hwmon_buff->n_hwmon;
+
+ return 0;
}
static void igb_sysfs_del_adapter(struct igb_adapter *adapter)
{
- int i;
-
- if (adapter == NULL)
- return;
-
- for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) {
- device_remove_file(&adapter->pdev->dev,
- &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr);
- }
-
- kfree(adapter->igb_hwmon_buff.hwmon_list);
-
- if (adapter->igb_hwmon_buff.device)
- hwmon_device_unregister(adapter->igb_hwmon_buff.device);
}
/* called from igb_main.c */
@@ -189,11 +175,11 @@ void igb_sysfs_exit(struct igb_adapter *adapter)
/* called from igb_main.c */
int igb_sysfs_init(struct igb_adapter *adapter)
{
- struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff;
+ struct hwmon_buff *igb_hwmon;
+ struct i2c_client *client;
+ struct device *hwmon_dev;
unsigned int i;
- int n_attrs;
int rc = 0;
- struct i2c_client *client = NULL;
/* If this method isn't defined we don't support thermals */
if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
@@ -201,34 +187,16 @@ int igb_sysfs_init(struct igb_adapter *adapter)
/* Don't create thermal hwmon interface if no sensors present */
rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw));
- if (rc)
- goto exit;
-
- /* init i2c_client */
- client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
- if (client == NULL) {
- dev_info(&adapter->pdev->dev,
- "Failed to create new i2c device..\n");
+ if (rc)
goto exit;
- }
- adapter->i2c_client = client;
- /* Allocation space for max attributes
- * max num sensors * values (loc, temp, max, caution)
- */
- n_attrs = E1000_MAX_SENSORS * 4;
- igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
- GFP_KERNEL);
- if (!igb_hwmon->hwmon_list) {
+ igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon),
+ GFP_KERNEL);
+ if (!igb_hwmon) {
rc = -ENOMEM;
- goto err;
- }
-
- igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
- if (IS_ERR(igb_hwmon->device)) {
- rc = PTR_ERR(igb_hwmon->device);
- goto err;
+ goto exit;
}
+ adapter->igb_hwmon_buff = igb_hwmon;
for (i = 0; i < E1000_MAX_SENSORS; i++) {
@@ -240,11 +208,39 @@ int igb_sysfs_init(struct igb_adapter *adapter)
/* Bail if any hwmon attr struct fails to initialize */
rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION);
- rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
- rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
- rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
if (rc)
- goto err;
+ goto exit;
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
+ if (rc)
+ goto exit;
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
+ if (rc)
+ goto exit;
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
+ if (rc)
+ goto exit;
+ }
+
+ /* init i2c_client */
+ client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
+ if (client == NULL) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to create new i2c device.\n");
+ rc = -ENODEV;
+ goto exit;
+ }
+ adapter->i2c_client = client;
+
+ igb_hwmon->groups[0] = &igb_hwmon->group;
+ igb_hwmon->group.attrs = igb_hwmon->attrs;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev,
+ client->name,
+ igb_hwmon,
+ igb_hwmon->groups);
+ if (IS_ERR(hwmon_dev)) {
+ rc = PTR_ERR(hwmon_dev);
+ goto err;
}
goto exit;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 025e5f4b7481..46d31a49f5ea 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -803,7 +803,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
if (tx_queue > IGB_N0_QUEUE)
msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
- if (!adapter->msix_entries && msix_vector == 0)
+ if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
msixbm |= E1000_EIMS_OTHER;
array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
q_vector->eims_value = msixbm;
@@ -983,43 +983,58 @@ err_out:
return err;
}
-static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
-{
- if (adapter->msix_entries) {
- pci_disable_msix(adapter->pdev);
- kfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
- } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
- pci_disable_msi(adapter->pdev);
- }
-}
-
/**
* igb_free_q_vector - Free memory allocated for specific interrupt vector
* @adapter: board private structure to initialize
* @v_idx: Index of vector to be freed
*
- * This function frees the memory allocated to the q_vector. In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
+ * This function frees the memory allocated to the q_vector.
**/
static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
{
struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ adapter->q_vector[v_idx] = NULL;
+
+ /* igb_get_stats64() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * igb_reset_q_vector - Reset config for interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be reset
+ *
+ * If NAPI is enabled it will delete any references to the
+ * NAPI struct. This is preparation for igb_free_q_vector.
+ **/
+static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
if (q_vector->tx.ring)
adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
if (q_vector->rx.ring)
adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
- adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
- /* igb_get_stats64() might access the rings on this vector,
- * we must wait a grace period before freeing it.
- */
- kfree_rcu(q_vector, rcu);
+}
+
+static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
+{
+ int v_idx = adapter->num_q_vectors;
+
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
+ pci_disable_msix(adapter->pdev);
+ else if (adapter->flags & IGB_FLAG_HAS_MSI)
+ pci_disable_msi(adapter->pdev);
+
+ while (v_idx--)
+ igb_reset_q_vector(adapter, v_idx);
}
/**
@@ -1038,8 +1053,10 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
adapter->num_rx_queues = 0;
adapter->num_q_vectors = 0;
- while (v_idx--)
+ while (v_idx--) {
+ igb_reset_q_vector(adapter, v_idx);
igb_free_q_vector(adapter, v_idx);
+ }
}
/**
@@ -1070,6 +1087,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
if (!msix)
goto msi_only;
+ adapter->flags |= IGB_FLAG_HAS_MSIX;
/* Number of supported queues. */
adapter->num_rx_queues = adapter->rss_queues;
@@ -1090,12 +1108,6 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
/* add 1 vector for link status interrupts */
numvecs++;
- adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
- GFP_KERNEL);
-
- if (!adapter->msix_entries)
- goto msi_only;
-
for (i = 0; i < numvecs; i++)
adapter->msix_entries[i].entry = i;
@@ -1172,7 +1184,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
(sizeof(struct igb_ring) * ring_count);
/* allocate q_vector and rings */
- q_vector = kzalloc(size, GFP_KERNEL);
+ q_vector = adapter->q_vector[v_idx];
+ if (!q_vector)
+ q_vector = kzalloc(size, GFP_KERNEL);
if (!q_vector)
return -ENOMEM;
@@ -1370,7 +1384,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
int err = 0;
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
err = igb_request_msix(adapter);
if (!err)
goto request_done;
@@ -1414,7 +1428,7 @@ request_done:
static void igb_free_irq(struct igb_adapter *adapter)
{
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
int vector = 0, i;
free_irq(adapter->msix_entries[vector++].vector, adapter);
@@ -1439,7 +1453,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
* mapped into these registers and so clearing the bits can cause
* issues on the VF drivers so we only need to clear what we set
*/
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 regval = rd32(E1000_EIAM);
wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
wr32(E1000_EIMC, adapter->eims_enable_mask);
@@ -1450,7 +1464,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
wr32(E1000_IAM, 0);
wr32(E1000_IMC, ~0);
wrfl();
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
int i;
for (i = 0; i < adapter->num_q_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
@@ -1467,7 +1481,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
u32 regval = rd32(E1000_EIAC);
wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
@@ -1607,6 +1621,73 @@ static void igb_power_down_link(struct igb_adapter *adapter)
}
/**
+ * Detect and switch function for Media Auto Sense
+ * @adapter: address of the board private structure
+ **/
+static void igb_check_swap_media(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext, connsw;
+ bool swap_now = false;
+
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ connsw = rd32(E1000_CONNSW);
+
+ /* need to live swap if current media is copper and we have fiber/serdes
+ * to go to.
+ */
+
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
+ swap_now = true;
+ } else if (!(connsw & E1000_CONNSW_SERDESD)) {
+ /* copper signal takes time to appear */
+ if (adapter->copper_tries < 4) {
+ adapter->copper_tries++;
+ connsw |= E1000_CONNSW_AUTOSENSE_CONF;
+ wr32(E1000_CONNSW, connsw);
+ return;
+ } else {
+ adapter->copper_tries = 0;
+ if ((connsw & E1000_CONNSW_PHYSD) &&
+ (!(connsw & E1000_CONNSW_PHY_PDN))) {
+ swap_now = true;
+ connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
+ wr32(E1000_CONNSW, connsw);
+ }
+ }
+ }
+
+ if (!swap_now)
+ return;
+
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ netdev_info(adapter->netdev,
+ "MAS: changing media to fiber/serdes\n");
+ ctrl_ext |=
+ E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ adapter->copper_tries = 0;
+ break;
+ case e1000_media_type_internal_serdes:
+ case e1000_media_type_fiber:
+ netdev_info(adapter->netdev,
+ "MAS: changing media to copper\n");
+ ctrl_ext &=
+ ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ break;
+ default:
+ /* shouldn't get here during regular operation */
+ netdev_err(adapter->netdev,
+ "AMS: Invalid media type found, returning\n");
+ break;
+ }
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+}
+
+/**
* igb_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
**/
@@ -1623,7 +1704,7 @@ int igb_up(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++)
napi_enable(&(adapter->q_vector[i]->napi));
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
igb_configure_msix(adapter);
else
igb_assign_vector(adapter->q_vector[0], 0);
@@ -1719,6 +1800,37 @@ void igb_reinit_locked(struct igb_adapter *adapter)
clear_bit(__IGB_RESETTING, &adapter->state);
}
+/** igb_enable_mas - Media Autosense re-enable after swap
+ *
+ * @adapter: adapter struct
+ **/
+static s32 igb_enable_mas(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 connsw;
+ s32 ret_val = 0;
+
+ connsw = rd32(E1000_CONNSW);
+ if (!(hw->phy.media_type == e1000_media_type_copper))
+ return ret_val;
+
+ /* configure for SerDes media detect */
+ if (!(connsw & E1000_CONNSW_SERDESD)) {
+ connsw |= E1000_CONNSW_ENRGSRC;
+ connsw |= E1000_CONNSW_AUTOSENSE_EN;
+ wr32(E1000_CONNSW, connsw);
+ wrfl();
+ } else if (connsw & E1000_CONNSW_SERDESD) {
+ /* already SerDes, no need to enable anything */
+ return ret_val;
+ } else {
+ netdev_info(adapter->netdev,
+ "MAS: Unable to configure feature, disabling..\n");
+ adapter->flags &= ~IGB_FLAG_MAS_ENABLE;
+ }
+ return ret_val;
+}
+
void igb_reset(struct igb_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
@@ -1830,6 +1942,16 @@ void igb_reset(struct igb_adapter *adapter)
hw->mac.ops.reset_hw(hw);
wr32(E1000_WUC, 0);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ /* need to resetup here after media swap */
+ adapter->ei.get_invariants(hw);
+ adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
+ }
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ if (igb_enable_mas(adapter))
+ dev_err(&pdev->dev,
+ "Error enabling Media Auto Sense\n");
+ }
if (hw->mac.ops.init_hw(hw))
dev_err(&pdev->dev, "Hardware Error\n");
@@ -1976,6 +2098,58 @@ void igb_set_fw_version(struct igb_adapter *adapter)
}
/**
+ * igb_init_mas - init Media Autosense feature if enabled in the NVM
+ *
+ * @adapter: adapter struct
+ **/
+static void igb_init_mas(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 eeprom_data;
+
+ hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
+ switch (hw->bus.func) {
+ case E1000_FUNC_0:
+ if (eeprom_data & IGB_MAS_ENABLE_0) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ case E1000_FUNC_1:
+ if (eeprom_data & IGB_MAS_ENABLE_1) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ case E1000_FUNC_2:
+ if (eeprom_data & IGB_MAS_ENABLE_2) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ case E1000_FUNC_3:
+ if (eeprom_data & IGB_MAS_ENABLE_3) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ default:
+ /* Shouldn't get here */
+ netdev_err(adapter->netdev,
+ "MAS: Invalid port configuration, returning\n");
+ break;
+ }
+}
+
+/**
* igb_init_i2c - Init I2C interface
* @adapter: pointer to adapter structure
**/
@@ -2022,7 +2196,6 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
s32 ret_val;
static int global_quad_port_a; /* global quad port a indication */
const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
- unsigned long mmio_start, mmio_len;
int err, pci_using_dac;
u8 part_str[E1000_PBANUM_LENGTH];
@@ -2079,11 +2252,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->back = adapter;
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
- mmio_start = pci_resource_start(pdev, 0);
- mmio_len = pci_resource_len(pdev, 0);
-
err = -EIO;
- hw->hw_addr = ioremap(mmio_start, mmio_len);
+ hw->hw_addr = pci_iomap(pdev, 0, 0);
if (!hw->hw_addr)
goto err_ioremap;
@@ -2093,8 +2263,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
- netdev->mem_start = mmio_start;
- netdev->mem_end = mmio_start + mmio_len;
+ netdev->mem_start = pci_resource_start(pdev, 0);
+ netdev->mem_end = pci_resource_end(pdev, 0);
/* PCI config space info */
hw->vendor_id = pdev->vendor;
@@ -2350,6 +2520,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->ets = false;
}
#endif
+ /* Check if Media Autosense is enabled */
+ adapter->ei = *ei;
+ if (hw->dev_spec._82575.mas_capable)
+ igb_init_mas(adapter);
+
/* do hw tstamp init after resetting */
igb_ptp_init(adapter);
@@ -2382,7 +2557,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
dev_info(&pdev->dev,
"Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
- adapter->msix_entries ? "MSI-X" :
+ (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
(adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
adapter->num_rx_queues, adapter->num_tx_queues);
switch (hw->mac.type) {
@@ -2470,7 +2645,7 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
int err = 0;
int i;
- if (!adapter->msix_entries || num_vfs > 7) {
+ if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
err = -EPERM;
goto out;
}
@@ -3935,6 +4110,7 @@ static void igb_watchdog_task(struct work_struct *work)
struct net_device *netdev = adapter->netdev;
u32 link;
int i;
+ u32 connsw;
link = igb_has_link(adapter);
@@ -3945,7 +4121,21 @@ static void igb_watchdog_task(struct work_struct *work)
link = false;
}
+ /* Force link down if we have fiber to swap to */
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ connsw = rd32(E1000_CONNSW);
+ if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
+ link = 0;
+ }
+ }
if (link) {
+ /* Perform a reset if the media type changed. */
+ if (hw->dev_spec._82575.media_changed) {
+ hw->dev_spec._82575.media_changed = false;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ igb_reset(adapter);
+ }
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
@@ -4026,8 +4216,27 @@ static void igb_watchdog_task(struct work_struct *work)
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
+ /* link is down, time to check for alternate media */
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ igb_check_swap_media(adapter);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
+ }
pm_schedule_suspend(netdev->dev.parent,
MSEC_PER_SEC * 5);
+
+ /* also check for alternate media here */
+ } else if (!netif_carrier_ok(netdev) &&
+ (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+ igb_check_swap_media(adapter);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
}
}
@@ -4056,7 +4265,7 @@ static void igb_watchdog_task(struct work_struct *work)
}
/* Cause software interrupt to ensure Rx ring is cleaned */
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 eics = 0;
for (i = 0; i < adapter->num_q_vectors; i++)
eics |= adapter->q_vector[i]->eims_value;
@@ -5977,7 +6186,7 @@ static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
}
if (!test_bit(__IGB_DOWN, &adapter->state)) {
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
wr32(E1000_EIMS, q_vector->eims_value);
else
igb_irq_enable(adapter);
@@ -7344,7 +7553,7 @@ static void igb_netpoll(struct net_device *netdev)
for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i];
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
wr32(E1000_EIMC, q_vector->eims_value);
else
igb_irq_disable(adapter);
@@ -7842,7 +8051,7 @@ int igb_reinit_queues(struct igb_adapter *adapter)
if (netif_running(netdev))
igb_close(netdev);
- igb_clear_interrupt_scheme(adapter);
+ igb_reset_interrupt_capability(adapter);
if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 04bf22e5ee31..675435fc2e53 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1745,7 +1745,7 @@ static int igbvf_set_mac(struct net_device *netdev, void *p)
hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
- if (memcmp(addr->sa_data, hw->mac.addr, 6))
+ if (!ether_addr_equal(addr->sa_data, hw->mac.addr))
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index 2224cc2edf13..1180cd59b570 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -33,7 +33,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <asm/byteorder.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ioport.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index f38fc0a343a2..0186ea2969fe 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -424,9 +424,10 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
#ifdef BP_EXTENDED_STATS
q_vector->tx.ring->stats.yields++;
#endif
- } else
+ } else {
/* we don't care if someone yielded */
q_vector->state = IXGBE_QV_STATE_NAPI;
+ }
spin_unlock_bh(&q_vector->lock);
return rc;
}
@@ -458,9 +459,10 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
#ifdef BP_EXTENDED_STATS
q_vector->rx.ring->stats.yields++;
#endif
- } else
+ } else {
/* preserve yield marks */
q_vector->state |= IXGBE_QV_STATE_POLL;
+ }
spin_unlock_bh(&q_vector->lock);
return rc;
}
@@ -552,8 +554,10 @@ struct hwmon_attr {
};
struct hwmon_buff {
- struct device *device;
- struct hwmon_attr *hwmon_list;
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+ struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1];
+ struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4];
unsigned int n_hwmon;
};
#endif /* CONFIG_IXGBE_HWMON */
@@ -583,6 +587,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
+static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
+{
+ writel(value, ring->tail);
+}
+
#define IXGBE_RX_DESC(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
#define IXGBE_TX_DESC(R, i) \
@@ -740,6 +749,7 @@ struct ixgbe_adapter {
#ifdef IXGBE_FCOE
struct ixgbe_fcoe fcoe;
#endif /* IXGBE_FCOE */
+ u8 __iomem *io_addr; /* Mainly for iounmap use */
u32 wol;
u16 bd_number;
@@ -775,7 +785,7 @@ struct ixgbe_adapter {
u32 vferr_refcount;
struct kobject *info_kobj;
#ifdef CONFIG_IXGBE_HWMON
- struct hwmon_buff ixgbe_hwmon_buff;
+ struct hwmon_buff *ixgbe_hwmon_buff;
#endif /* CONFIG_IXGBE_HWMON */
#ifdef CONFIG_DEBUG_FS
struct dentry *ixgbe_dbg_adapter;
@@ -796,6 +806,7 @@ enum ixgbe_state_t {
__IXGBE_TESTING,
__IXGBE_RESETTING,
__IXGBE_DOWN,
+ __IXGBE_REMOVING,
__IXGBE_SERVICE_SCHED,
__IXGBE_IN_SFP_INIT,
__IXGBE_PTP_RUNNING,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 007a0083a636..edda6814108c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -626,7 +626,7 @@ static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
goto out;
}
- eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
IXGBE_I2C_EEPROM_DEV_ADDR2,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index d259dc76604e..f2e3919750ec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -124,24 +124,65 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
-#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define IXGBE_FAILED_READ_REG 0xffffffffU
-#ifndef writeq
-#define writeq(val, addr) writel((u32) (val), addr); \
- writel((u32) (val >> 32), (addr + 4));
-#endif
+static inline bool ixgbe_removed(void __iomem *addr)
+{
+ return unlikely(!addr);
+}
-#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg);
-#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
+static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
-#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) (\
- writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
+ if (ixgbe_removed(reg_addr))
+ return;
+ writel(value, reg_addr + reg);
+}
+#define IXGBE_WRITE_REG(a, reg, value) ixgbe_write_reg((a), (reg), (value))
-#define IXGBE_READ_REG_ARRAY(a, reg, offset) (\
- readl((a)->hw_addr + (reg) + ((offset) << 2)))
+#ifndef writeq
+#define writeq writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel((u32)val, addr);
+ writel((u32)(val >> 32), addr + 4);
+}
+#endif
-#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
+static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+
+ if (ixgbe_removed(reg_addr))
+ return;
+ writeq(value, reg_addr + reg);
+}
+#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value))
+
+static inline u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u32 value;
+
+ if (ixgbe_removed(reg_addr))
+ return IXGBE_FAILED_READ_REG;
+ value = readl(reg_addr + reg);
+ if (unlikely(value == IXGBE_FAILED_READ_REG))
+ ixgbe_check_remove(hw, reg);
+ return value;
+}
+#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg))
+
+#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \
+ ixgbe_write_reg((a), (reg) + ((offset) << 2), (value))
+
+#define IXGBE_READ_REG_ARRAY(a, reg, offset) \
+ ixgbe_read_reg((a), (reg) + ((offset) << 2))
+
+#define IXGBE_WRITE_FLUSH(a) ixgbe_read_reg((a), IXGBE_STATUS)
#define ixgbe_hw_to_netdev(hw) (((struct ixgbe_adapter *)(hw)->back)->netdev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 4e7c9b098b58..043307024c4a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1342,61 +1342,61 @@ static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
static const u32 test_pattern[] = {
0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ *data = 1;
+ return 1;
+ }
for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
- before = readl(adapter->hw.hw_addr + reg);
- writel((test_pattern[pat] & write),
- (adapter->hw.hw_addr + reg));
- val = readl(adapter->hw.hw_addr + reg);
+ before = ixgbe_read_reg(&adapter->hw, reg);
+ ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
+ val = ixgbe_read_reg(&adapter->hw, reg);
if (val != (test_pattern[pat] & write & mask)) {
e_err(drv, "pattern test reg %04X failed: got "
"0x%08X expected 0x%08X\n",
reg, val, (test_pattern[pat] & write & mask));
*data = reg;
- writel(before, adapter->hw.hw_addr + reg);
- return 1;
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return true;
}
- writel(before, adapter->hw.hw_addr + reg);
+ ixgbe_write_reg(&adapter->hw, reg, before);
}
- return 0;
+ return false;
}
static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
u32 mask, u32 write)
{
u32 val, before;
- before = readl(adapter->hw.hw_addr + reg);
- writel((write & mask), (adapter->hw.hw_addr + reg));
- val = readl(adapter->hw.hw_addr + reg);
+
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ *data = 1;
+ return 1;
+ }
+ before = ixgbe_read_reg(&adapter->hw, reg);
+ ixgbe_write_reg(&adapter->hw, reg, write & mask);
+ val = ixgbe_read_reg(&adapter->hw, reg);
if ((write & mask) != (val & mask)) {
e_err(drv, "set/check reg %04X test failed: got 0x%08X "
"expected 0x%08X\n", reg, (val & mask), (write & mask));
*data = reg;
- writel(before, (adapter->hw.hw_addr + reg));
- return 1;
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return true;
}
- writel(before, (adapter->hw.hw_addr + reg));
- return 0;
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return false;
}
-#define REG_PATTERN_TEST(reg, mask, write) \
- do { \
- if (reg_pattern_test(adapter, data, reg, mask, write)) \
- return 1; \
- } while (0) \
-
-
-#define REG_SET_AND_CHECK(reg, mask, write) \
- do { \
- if (reg_set_and_check(adapter, data, reg, mask, write)) \
- return 1; \
- } while (0) \
-
static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
{
const struct ixgbe_reg_test *test;
u32 value, before, after;
u32 i, toggle;
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ e_err(drv, "Adapter removed - register test blocked\n");
+ *data = 1;
+ return 1;
+ }
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
toggle = 0x7FFFF3FF;
@@ -1419,10 +1419,10 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
* tests. Some bits are read-only, some toggle, and some
* are writeable on newer MACs.
*/
- before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
- value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
- after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
+ before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
+ value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
+ ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
+ after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
if (value != after) {
e_err(drv, "failed STATUS register test got: 0x%08X "
"expected: 0x%08X\n", after, value);
@@ -1430,7 +1430,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
return 1;
}
/* restore previous status */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
+ ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
/*
* Perform the remainder of the register test, looping through
@@ -1438,38 +1438,47 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
*/
while (test->reg) {
for (i = 0; i < test->array_len; i++) {
+ bool b = false;
+
switch (test->test_type) {
case PATTERN_TEST:
- REG_PATTERN_TEST(test->reg + (i * 0x40),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 0x40),
+ test->mask,
+ test->write);
break;
case SET_READ_TEST:
- REG_SET_AND_CHECK(test->reg + (i * 0x40),
- test->mask,
- test->write);
+ b = reg_set_and_check(adapter, data,
+ test->reg + (i * 0x40),
+ test->mask,
+ test->write);
break;
case WRITE_NO_TEST:
- writel(test->write,
- (adapter->hw.hw_addr + test->reg)
- + (i * 0x40));
+ ixgbe_write_reg(&adapter->hw,
+ test->reg + (i * 0x40),
+ test->write);
break;
case TABLE32_TEST:
- REG_PATTERN_TEST(test->reg + (i * 4),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 4),
+ test->mask,
+ test->write);
break;
case TABLE64_TEST_LO:
- REG_PATTERN_TEST(test->reg + (i * 8),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 8),
+ test->mask,
+ test->write);
break;
case TABLE64_TEST_HI:
- REG_PATTERN_TEST((test->reg + 4) + (i * 8),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ (test->reg + 4) + (i * 8),
+ test->mask,
+ test->write);
break;
}
+ if (b)
+ return 1;
}
test++;
}
@@ -1954,6 +1963,15 @@ static void ixgbe_diag_test(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
bool if_running = netif_running(netdev);
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ e_err(hw, "Adapter removed - test blocked\n");
+ data[0] = 1;
+ data[1] = 1;
+ data[2] = 1;
+ data[3] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
set_bit(__IXGBE_TESTING, &adapter->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
struct ixgbe_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cc06854296a3..6d4ada72dfd0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -64,7 +64,7 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "3.15.1-k"
+#define DRV_VERSION "3.19.1-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2013 Intel Corporation.";
@@ -278,10 +278,41 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
{
if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
+ !test_bit(__IXGBE_REMOVING, &adapter->state) &&
!test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
schedule_work(&adapter->service_task);
}
+static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+
+ if (!hw->hw_addr)
+ return;
+ hw->hw_addr = NULL;
+ e_dev_err("Adapter removed\n");
+ ixgbe_service_event_schedule(adapter);
+}
+
+void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
+{
+ u32 value;
+
+ /* The following check not only optimizes a bit by not
+ * performing a read on the status register when the
+ * register just read was a status register read that
+ * returned IXGBE_FAILED_READ_REG. It also blocks any
+ * potential recursion.
+ */
+ if (reg == IXGBE_STATUS) {
+ ixgbe_remove_adapter(hw);
+ return;
+ }
+ value = ixgbe_read_reg(hw, IXGBE_STATUS);
+ if (value == IXGBE_FAILED_READ_REG)
+ ixgbe_remove_adapter(hw);
+}
+
static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
{
BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
@@ -1314,7 +1345,7 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
* such as IA-64).
*/
wmb();
- writel(val, rx_ring->tail);
+ ixgbe_write_tail(rx_ring, val);
}
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
@@ -2969,7 +3000,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
ring->count * sizeof(union ixgbe_adv_tx_desc));
IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
- ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
+ ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
/*
* set WTHRESH to encourage burst writeback, it should not be set
@@ -3308,6 +3339,8 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
+ if (ixgbe_removed(hw->hw_addr))
+ return;
/* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
if (hw->mac.type == ixgbe_mac_82598EB &&
!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
@@ -3332,6 +3365,8 @@ void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
+ if (ixgbe_removed(hw->hw_addr))
+ return;
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
@@ -3372,7 +3407,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
ring->count * sizeof(union ixgbe_adv_rx_desc));
IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
- ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
+ ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
ixgbe_configure_srrctl(adapter, ring);
ixgbe_configure_rscctl(adapter, ring);
@@ -4572,6 +4607,7 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
+ smp_mb__before_clear_bit();
clear_bit(__IXGBE_DOWN, &adapter->state);
ixgbe_napi_enable_all(adapter);
@@ -4656,6 +4692,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
int err;
+ if (ixgbe_removed(hw->hw_addr))
+ return;
/* lock SFP init bit to prevent race conditions with the watchdog */
while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
usleep_range(1000, 2000);
@@ -4783,7 +4821,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
int i;
/* signal that we are down to the interrupt handler */
- set_bit(__IXGBE_DOWN, &adapter->state);
+ if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
+ return; /* do nothing if already down */
/* disable receives */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -5028,7 +5067,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* assign number of SR-IOV VFs */
if (hw->mac.type != ixgbe_mac_82598EB) {
- if (max_vfs > 63) {
+ if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
adapter->num_vfs = 0;
e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
} else {
@@ -5874,8 +5913,9 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
u64 eics = 0;
int i;
- /* If we're down or resetting, just bail */
+ /* If we're down, removing or resetting, just bail */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_REMOVING, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
return;
@@ -6122,8 +6162,9 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
**/
static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
{
- /* if interface is down do nothing */
+ /* if interface is down, removing or resetting, do nothing */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_REMOVING, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
return;
@@ -6341,8 +6382,9 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
- /* If we're already down or resetting, just bail */
+ /* If we're already down, removing or resetting, just bail */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_REMOVING, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
return;
@@ -6350,7 +6392,9 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
netdev_err(adapter->netdev, "Reset adapter\n");
adapter->tx_timeout_count++;
+ rtnl_lock();
ixgbe_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
@@ -6362,6 +6406,15 @@ static void ixgbe_service_task(struct work_struct *work)
struct ixgbe_adapter *adapter = container_of(work,
struct ixgbe_adapter,
service_task);
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ rtnl_lock();
+ ixgbe_down(adapter);
+ rtnl_unlock();
+ }
+ ixgbe_service_event_complete(adapter);
+ return;
+ }
ixgbe_reset_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
ixgbe_sfp_link_config_subtask(adapter);
@@ -6693,7 +6746,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
tx_ring->next_to_use = i;
/* notify HW of packet */
- writel(i, tx_ring->tail);
+ ixgbe_write_tail(tx_ring, i);
return;
dma_error:
@@ -6827,12 +6880,20 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
return __ixgbe_maybe_stop_tx(tx_ring, size);
}
-#ifdef IXGBE_FCOE
-static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
+ struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
+#ifdef IXGBE_FCOE
struct ixgbe_adapter *adapter;
struct ixgbe_ring_feature *f;
int txq;
+#endif
+
+ if (fwd_adapter)
+ return skb->queue_mapping + fwd_adapter->tx_base_queue;
+
+#ifdef IXGBE_FCOE
/*
* only execute the code below if protocol is FCoE
@@ -6858,9 +6919,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
txq -= f->indices;
return txq + f->offset;
+#else
+ return __netdev_pick_tx(dev, skb);
+#endif
}
-#endif
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
@@ -7629,27 +7692,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
kfree(fwd_adapter);
}
-static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb,
- struct net_device *dev,
- void *priv)
-{
- struct ixgbe_fwd_adapter *fwd_adapter = priv;
- unsigned int queue;
- struct ixgbe_ring *tx_ring;
-
- queue = skb->queue_mapping + fwd_adapter->tx_base_queue;
- tx_ring = fwd_adapter->real_adapter->tx_ring[queue];
-
- return __ixgbe_xmit_frame(skb, dev, tx_ring);
-}
-
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
-#ifdef IXGBE_FCOE
.ndo_select_queue = ixgbe_select_queue,
-#endif
.ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac,
@@ -7689,7 +7736,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
.ndo_dfwd_add_station = ixgbe_fwd_add,
.ndo_dfwd_del_station = ixgbe_fwd_del,
- .ndo_dfwd_start_xmit = ixgbe_fwd_xmit,
};
/**
@@ -7881,6 +7927,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
+ adapter->io_addr = hw->hw_addr;
if (!hw->hw_addr) {
err = -EIO;
goto err_ioremap;
@@ -7972,8 +8019,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Mailbox */
ixgbe_init_mbx_params_pf(hw);
memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+ pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
ixgbe_enable_sriov(adapter);
- pci_sriov_set_totalvfs(pdev, 63);
skip_sriov:
#endif
@@ -8189,7 +8236,7 @@ err_register:
err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
- iounmap(hw->hw_addr);
+ iounmap(adapter->io_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
@@ -8217,7 +8264,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
ixgbe_dbg_adapter_exit(adapter);
- set_bit(__IXGBE_DOWN, &adapter->state);
+ set_bit(__IXGBE_REMOVING, &adapter->state);
cancel_work_sync(&adapter->service_task);
@@ -8256,7 +8303,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
kfree(adapter->ixgbe_ieee_ets);
#endif
- iounmap(adapter->hw.hw_addr);
+ iounmap(adapter->io_addr);
pci_release_selected_regions(pdev, pci_select_bars(pdev,
IORESOURCE_MEM));
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index d4a64e665398..cc3101afd29f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -27,8 +27,7 @@
#include <linux/pci.h>
#include <linux/delay.h>
-#include "ixgbe_type.h"
-#include "ixgbe_common.h"
+#include "ixgbe.h"
#include "ixgbe_mbx.h"
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 39217e5ff7dc..132557c318f8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -29,7 +29,7 @@
#include <linux/delay.h>
#include <linux/sched.h>
-#include "ixgbe_common.h"
+#include "ixgbe.h"
#include "ixgbe_phy.h"
static void ixgbe_i2c_start(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index d6f0c0d8cf11..dff0977876f7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -148,7 +148,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
* physical function. If the user requests greater thn
* 63 VFs then it is an error - reset to default of zero.
*/
- adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
+ adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
if (err) {
@@ -257,7 +257,7 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
* PF. The PCI bus driver already checks for other values out of
* range.
*/
- if (num_vfs > 63) {
+ if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
err = -EPERM;
goto err_out;
}
@@ -291,7 +291,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
int err;
+#ifdef CONFIG_PCI_IOV
u32 current_flags = adapter->flags;
+#endif
err = ixgbe_disable_sriov(adapter);
@@ -629,11 +631,14 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
struct ixgbe_hw *hw = &adapter->hw;
unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
u32 reg, reg_offset, vf_shift;
u32 msgbuf[4] = {0, 0, 0, 0};
u8 *addr = (u8 *)(&msgbuf[1]);
+ u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+ int i;
e_info(probe, "VF Reset msg received from vf %d\n", vf);
@@ -652,6 +657,17 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
reg |= 1 << vf_shift;
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+ /* force drop enable for all VF Rx queues */
+ for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) {
+ /* flush previous write */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* indicate to hardware that we want to set drop enable */
+ reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE;
+ reg |= i << IXGBE_QDE_IDX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
+ }
+
/* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
reg |= 1 << vf_shift;
@@ -682,6 +698,15 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
reg |= (1 << vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
+ /*
+ * Reset the VFs TDWBAL and TDWBAH registers
+ * which are not cleared by an FLR
+ */
+ for (i = 0; i < q_per_pool; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0);
+ }
+
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET;
if (!is_zero_ether_addr(vf_mac)) {
@@ -715,8 +740,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
}
if (adapter->vfinfo[vf].pf_set_mac &&
- memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac,
- ETH_ALEN)) {
+ !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
e_warn(drv,
"VF %d attempted to override administratively set MAC address\n"
"Reload the VF driver to resume operations\n",
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 4713f9fc7f46..8bd29190514e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -28,6 +28,11 @@
#ifndef _IXGBE_SRIOV_H_
#define _IXGBE_SRIOV_H_
+/* ixgbe driver limit the max number of VFs could be enabled to
+ * 63 (IXGBE_MAX_VF_FUNCTIONS - 1)
+ */
+#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1)
+
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index d118def16f35..e74ae3682733 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -111,29 +111,29 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
unsigned int n_attr;
struct hwmon_attr *ixgbe_attr;
- n_attr = adapter->ixgbe_hwmon_buff.n_hwmon;
- ixgbe_attr = &adapter->ixgbe_hwmon_buff.hwmon_list[n_attr];
+ n_attr = adapter->ixgbe_hwmon_buff->n_hwmon;
+ ixgbe_attr = &adapter->ixgbe_hwmon_buff->hwmon_list[n_attr];
switch (type) {
case IXGBE_HWMON_TYPE_LOC:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_label", offset);
+ "temp%u_label", offset + 1);
break;
case IXGBE_HWMON_TYPE_TEMP:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_input", offset);
+ "temp%u_input", offset + 1);
break;
case IXGBE_HWMON_TYPE_CAUTION:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_max", offset);
+ "temp%u_max", offset + 1);
break;
case IXGBE_HWMON_TYPE_MAX:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_crit", offset);
+ "temp%u_crit", offset + 1);
break;
default:
rc = -EPERM;
@@ -147,32 +147,17 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
ixgbe_attr->dev_attr.store = NULL;
ixgbe_attr->dev_attr.attr.mode = S_IRUGO;
ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name;
+ sysfs_attr_init(&ixgbe_attr->dev_attr.attr);
- rc = device_create_file(&adapter->pdev->dev,
- &ixgbe_attr->dev_attr);
+ adapter->ixgbe_hwmon_buff->attrs[n_attr] = &ixgbe_attr->dev_attr.attr;
- if (rc == 0)
- ++adapter->ixgbe_hwmon_buff.n_hwmon;
+ ++adapter->ixgbe_hwmon_buff->n_hwmon;
- return rc;
+ return 0;
}
static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter)
{
- int i;
-
- if (adapter == NULL)
- return;
-
- for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) {
- device_remove_file(&adapter->pdev->dev,
- &adapter->ixgbe_hwmon_buff.hwmon_list[i].dev_attr);
- }
-
- kfree(adapter->ixgbe_hwmon_buff.hwmon_list);
-
- if (adapter->ixgbe_hwmon_buff.device)
- hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device);
}
/* called from ixgbe_main.c */
@@ -184,9 +169,9 @@ void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter)
/* called from ixgbe_main.c */
int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
{
- struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff;
+ struct hwmon_buff *ixgbe_hwmon;
+ struct device *hwmon_dev;
unsigned int i;
- int n_attrs;
int rc = 0;
/* If this method isn't defined we don't support thermals */
@@ -198,23 +183,13 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw))
goto exit;
- /*
- * Allocation space for max attributs
- * max num sensors * values (loc, temp, max, caution)
- */
- n_attrs = IXGBE_MAX_SENSORS * 4;
- ixgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
- GFP_KERNEL);
- if (!ixgbe_hwmon->hwmon_list) {
+ ixgbe_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*ixgbe_hwmon),
+ GFP_KERNEL);
+ if (ixgbe_hwmon == NULL) {
rc = -ENOMEM;
- goto err;
- }
-
- ixgbe_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
- if (IS_ERR(ixgbe_hwmon->device)) {
- rc = PTR_ERR(ixgbe_hwmon->device);
- goto err;
+ goto exit;
}
+ adapter->ixgbe_hwmon_buff = ixgbe_hwmon;
for (i = 0; i < IXGBE_MAX_SENSORS; i++) {
/*
@@ -226,17 +201,28 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
/* Bail if any hwmon attr struct fails to initialize */
rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION);
- rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
- rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
- rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
if (rc)
- goto err;
+ goto exit;
+ rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
+ if (rc)
+ goto exit;
+ rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
+ if (rc)
+ goto exit;
+ rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
+ if (rc)
+ goto exit;
}
- goto exit;
+ ixgbe_hwmon->groups[0] = &ixgbe_hwmon->group;
+ ixgbe_hwmon->group.attrs = ixgbe_hwmon->attrs;
-err:
- ixgbe_sysfs_del_adapter(adapter);
+ hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev,
+ "ixgbe",
+ ixgbe_hwmon,
+ ixgbe_hwmon->groups);
+ if (IS_ERR(hwmon_dev))
+ rc = PTR_ERR(hwmon_dev);
exit:
return rc;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 7c19e969576f..0d39cfc4a3bf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1980,9 +1980,10 @@ enum {
#define IXGBE_FWSM_TS_ENABLED 0x1
/* Queue Drop Enable */
-#define IXGBE_QDE_ENABLE 0x00000001
-#define IXGBE_QDE_IDX_MASK 0x00007F00
-#define IXGBE_QDE_IDX_SHIFT 8
+#define IXGBE_QDE_ENABLE 0x00000001
+#define IXGBE_QDE_IDX_MASK 0x00007F00
+#define IXGBE_QDE_IDX_SHIFT 8
+#define IXGBE_QDE_WRITE 0x00010000
#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
@@ -2173,6 +2174,14 @@ enum {
#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4))
#define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600))
#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
+/* Translated register #defines */
+#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
+
+#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index)))
enum ixgbe_fdir_pballoc_type {
IXGBE_FDIR_PBALLOC_NONE = 0,
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 3147795bd135..05e4f32d84f7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -183,6 +183,7 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS)
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
@@ -277,4 +278,21 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ERR_RESET_FAILED -2
#define IXGBE_ERR_INVALID_ARGUMENT -3
+/* Transmit Config masks */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
+#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
+#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
+
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
+
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+
#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 54d9acef9c4e..f68b78c732a8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -77,11 +77,11 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
stats.saved_reset_vfgotc)},
{"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
+ {"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)},
+ {"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)},
{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
stats.saved_reset_vfmprc)},
- {"rx_csum_offload_good", IXGBEVF_ZSTAT(hw_csum_rx_good)},
{"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
- {"tx_csum_offload_ctxt", IXGBEVF_ZSTAT(hw_csum_tx_good)},
#ifdef BP_EXTENDED_STATS
{"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
{"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
@@ -286,9 +286,9 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].count = new_tx_count;
+ adapter->tx_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].count = new_rx_count;
+ adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count;
goto clear_reset;
@@ -303,20 +303,20 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
for (i = 0; i < adapter->num_tx_queues; i++) {
/* clone ring and setup updated count */
- tx_ring[i] = adapter->tx_ring[i];
+ tx_ring[i] = *adapter->tx_ring[i];
tx_ring[i].count = new_tx_count;
- err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
- if (!err)
- continue;
- while (i) {
- i--;
- ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
- }
+ err = ixgbevf_setup_tx_resources(&tx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_tx_resources(&tx_ring[i]);
+ }
- vfree(tx_ring);
- tx_ring = NULL;
+ vfree(tx_ring);
+ tx_ring = NULL;
- goto clear_reset;
+ goto clear_reset;
+ }
}
}
@@ -329,20 +329,20 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
for (i = 0; i < adapter->num_rx_queues; i++) {
/* clone ring and setup updated count */
- rx_ring[i] = adapter->rx_ring[i];
+ rx_ring[i] = *adapter->rx_ring[i];
rx_ring[i].count = new_rx_count;
- err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
- if (!err)
- continue;
- while (i) {
- i--;
- ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
- }
+ err = ixgbevf_setup_rx_resources(&rx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_rx_resources(&rx_ring[i]);
+ }
- vfree(rx_ring);
- rx_ring = NULL;
+ vfree(rx_ring);
+ rx_ring = NULL;
- goto clear_reset;
+ goto clear_reset;
+ }
}
}
@@ -352,9 +352,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
/* Tx */
if (tx_ring) {
for (i = 0; i < adapter->num_tx_queues; i++) {
- ixgbevf_free_tx_resources(adapter,
- &adapter->tx_ring[i]);
- adapter->tx_ring[i] = tx_ring[i];
+ ixgbevf_free_tx_resources(adapter->tx_ring[i]);
+ *adapter->tx_ring[i] = tx_ring[i];
}
adapter->tx_ring_count = new_tx_count;
@@ -365,9 +364,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
/* Rx */
if (rx_ring) {
for (i = 0; i < adapter->num_rx_queues; i++) {
- ixgbevf_free_rx_resources(adapter,
- &adapter->rx_ring[i]);
- adapter->rx_ring[i] = rx_ring[i];
+ ixgbevf_free_rx_resources(adapter->rx_ring[i]);
+ *adapter->rx_ring[i] = rx_ring[i];
}
adapter->rx_ring_count = new_rx_count;
@@ -382,7 +380,7 @@ clear_reset:
/* free Tx resources if Rx error is encountered */
if (tx_ring) {
for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
+ ixgbevf_free_tx_resources(&tx_ring[i]);
vfree(tx_ring);
}
@@ -413,15 +411,15 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_yields += adapter->rx_ring[i].bp_yields;
- rx_cleaned += adapter->rx_ring[i].bp_cleaned;
- rx_yields += adapter->rx_ring[i].bp_yields;
+ rx_yields += adapter->rx_ring[i]->stats.yields;
+ rx_cleaned += adapter->rx_ring[i]->stats.cleaned;
+ rx_yields += adapter->rx_ring[i]->stats.yields;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_yields += adapter->tx_ring[i].bp_yields;
- tx_cleaned += adapter->tx_ring[i].bp_cleaned;
- tx_yields += adapter->tx_ring[i].bp_yields;
+ tx_yields += adapter->tx_ring[i]->stats.yields;
+ tx_cleaned += adapter->tx_ring[i]->stats.cleaned;
+ tx_yields += adapter->tx_ring[i]->stats.yields;
}
adapter->bp_rx_yields = rx_yields;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 8971e2d0a984..54829326bb09 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -46,12 +46,15 @@
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbevf_tx_buffer {
- struct sk_buff *skb;
- dma_addr_t dma;
- unsigned long time_stamp;
union ixgbe_adv_tx_desc *next_to_watch;
- u16 length;
- u16 mapped_as_page;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ unsigned short gso_segs;
+ __be16 protocol;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
};
struct ixgbevf_rx_buffer {
@@ -59,6 +62,29 @@ struct ixgbevf_rx_buffer {
dma_addr_t dma;
};
+struct ixgbevf_stats {
+ u64 packets;
+ u64 bytes;
+#ifdef BP_EXTENDED_STATS
+ u64 yields;
+ u64 misses;
+ u64 cleaned;
+#endif
+};
+
+struct ixgbevf_tx_queue_stats {
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 tx_done_old;
+};
+
+struct ixgbevf_rx_queue_stats {
+ u64 non_eop_descs;
+ u64 alloc_rx_page_failed;
+ u64 alloc_rx_buff_failed;
+ u64 csum_err;
+};
+
struct ixgbevf_ring {
struct ixgbevf_ring *next;
struct net_device *netdev;
@@ -70,31 +96,27 @@ struct ixgbevf_ring {
unsigned int next_to_use;
unsigned int next_to_clean;
- int queue_index; /* needed for multiqueue queue management */
union {
struct ixgbevf_tx_buffer *tx_buffer_info;
struct ixgbevf_rx_buffer *rx_buffer_info;
};
- u64 total_bytes;
- u64 total_packets;
- struct u64_stats_sync syncp;
- u64 hw_csum_rx_error;
- u64 hw_csum_rx_good;
-#ifdef BP_EXTENDED_STATS
- u64 bp_yields;
- u64 bp_misses;
- u64 bp_cleaned;
-#endif
+ struct ixgbevf_stats stats;
+ struct u64_stats_sync syncp;
+ union {
+ struct ixgbevf_tx_queue_stats tx_stats;
+ struct ixgbevf_rx_queue_stats rx_stats;
+ };
- u16 head;
- u16 tail;
+ u64 hw_csum_rx_error;
+ u8 __iomem *tail;
u16 reg_idx; /* holds the special value that gets the hardware register
* offset associated with this ring, which is different
* for DCB and RSS modes */
u16 rx_buf_len;
+ int queue_index; /* needed for multiqueue queue management */
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -125,8 +147,6 @@ struct ixgbevf_ring {
#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
-#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
-#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
@@ -188,7 +208,7 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
rc = false;
#ifdef BP_EXTENDED_STATS
- q_vector->tx.ring->bp_yields++;
+ q_vector->tx.ring->stats.yields++;
#endif
} else {
/* we don't care if someone yielded */
@@ -223,7 +243,7 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
rc = false;
#ifdef BP_EXTENDED_STATS
- q_vector->rx.ring->bp_yields++;
+ q_vector->rx.ring->stats.yields++;
#endif
} else {
/* preserve yield marks */
@@ -262,6 +282,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
spin_lock_bh(&q_vector->lock);
if (q_vector->state & IXGBEVF_QV_OWNED)
rc = false;
+ q_vector->state |= IXGBEVF_QV_STATE_DISABLED;
spin_unlock_bh(&q_vector->lock);
return rc;
}
@@ -315,7 +336,6 @@ static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
struct ixgbevf_adapter {
struct timer_list watchdog_timer;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- u16 bd_number;
struct work_struct reset_task;
struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
@@ -328,25 +348,18 @@ struct ixgbevf_adapter {
u32 eims_other;
/* TX */
- struct ixgbevf_ring *tx_ring; /* One per active queue */
int num_tx_queues;
+ struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 restart_queue;
- u64 hw_csum_tx_good;
- u64 lsc_int;
- u64 hw_tso_ctxt;
- u64 hw_tso6_ctxt;
u32 tx_timeout_count;
/* RX */
- struct ixgbevf_ring *rx_ring; /* One per active queue */
int num_rx_queues;
+ struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
- u64 hw_csum_rx_good;
u64 non_eop_descs;
int num_msix_vectors;
- struct msix_entry *msix_entries;
-
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
@@ -356,6 +369,9 @@ struct ixgbevf_adapter {
u32 flags;
#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1)
+#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
+
+ struct msix_entry *msix_entries;
/* OS defined structs */
struct net_device *netdev;
@@ -364,10 +380,12 @@ struct ixgbevf_adapter {
/* structs defined in ixgbe_vf.h */
struct ixgbe_hw hw;
u16 msg_enable;
- struct ixgbevf_hw_stats stats;
+ u16 bd_number;
/* Interrupt Throttle Rate */
u32 eitr_param;
+ struct ixgbevf_hw_stats stats;
+
unsigned long state;
u64 tx_busy;
unsigned int tx_ring_count;
@@ -386,9 +404,9 @@ struct ixgbevf_adapter {
u32 link_speed;
bool link_up;
- struct work_struct watchdog_task;
-
spinlock_t mbx_lock;
+
+ struct work_struct watchdog_task;
};
enum ixbgevf_state_t {
@@ -420,10 +438,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter);
void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
void ixgbevf_reset(struct ixgbevf_adapter *adapter);
void ixgbevf_set_ethtool_ops(struct net_device *netdev);
-int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
-int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
-void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
-void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+int ixgbevf_setup_rx_resources(struct ixgbevf_ring *);
+int ixgbevf_setup_tx_resources(struct ixgbevf_ring *);
+void ixgbevf_free_rx_resources(struct ixgbevf_ring *);
+void ixgbevf_free_tx_resources(struct ixgbevf_ring *);
void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
int ethtool_ioctl(struct ifreq *ifr);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 92ef4cb5a8e8..9df28985eba7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "2.11.3-k"
+#define DRV_VERSION "2.12.1-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -95,13 +95,15 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* forward decls */
+static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
-static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
- struct ixgbevf_ring *rx_ring,
+static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
u32 val)
{
+ rx_ring->next_to_use = val;
+
/*
* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
@@ -109,7 +111,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
* such as IA-64).
*/
wmb();
- IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
+ writel(val, rx_ring->tail);
}
/**
@@ -143,28 +145,25 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
}
static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
- struct ixgbevf_tx_buffer
- *tx_buffer_info)
-{
- if (tx_buffer_info->dma) {
- if (tx_buffer_info->mapped_as_page)
- dma_unmap_page(tx_ring->dev,
- tx_buffer_info->dma,
- tx_buffer_info->length,
- DMA_TO_DEVICE);
- else
+ struct ixgbevf_tx_buffer *tx_buffer)
+{
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(tx_ring->dev,
- tx_buffer_info->dma,
- tx_buffer_info->length,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
- tx_buffer_info->dma = 0;
+ } else if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
}
- if (tx_buffer_info->skb) {
- dev_kfree_skb_any(tx_buffer_info->skb);
- tx_buffer_info->skb = NULL;
- }
- tx_buffer_info->time_stamp = 0;
- /* tx_buffer_info must be completely set up in the transmit path */
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+ /* tx_buffer must be completely set up in the transmit path */
}
#define IXGBE_MAX_TXD_PWR 14
@@ -185,20 +184,21 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *tx_ring)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
- union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
- struct ixgbevf_tx_buffer *tx_buffer_info;
- unsigned int i, count = 0;
+ struct ixgbevf_tx_buffer *tx_buffer;
+ union ixgbe_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = tx_ring->count / 2;
+ unsigned int i = tx_ring->next_to_clean;
if (test_bit(__IXGBEVF_DOWN, &adapter->state))
return true;
- i = tx_ring->next_to_clean;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- eop_desc = tx_buffer_info->next_to_watch;
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
do {
- bool cleaned = false;
+ union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
/* if next_to_watch is not set then there is no work pending */
if (!eop_desc)
@@ -212,67 +212,90 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
break;
/* clear next_to_watch to prevent false hangs */
- tx_buffer_info->next_to_watch = NULL;
+ tx_buffer->next_to_watch = NULL;
- for ( ; !cleaned; count++) {
- struct sk_buff *skb;
- tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
- cleaned = (tx_desc == eop_desc);
- skb = tx_buffer_info->skb;
-
- if (cleaned && skb) {
- unsigned int segs, bytecount;
-
- /* gso_segs is currently only valid for tcp */
- segs = skb_shinfo(skb)->gso_segs ?: 1;
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * skb_headlen(skb)) +
- skb->len;
- total_packets += segs;
- total_bytes += bytecount;
- }
+ /* update the statistics for this packet */
+ total_bytes += tx_buffer->bytecount;
+ total_packets += tx_buffer->gso_segs;
- ixgbevf_unmap_and_free_tx_resource(tx_ring,
- tx_buffer_info);
+ /* free the skb */
+ dev_kfree_skb_any(tx_buffer->skb);
- tx_desc->wb.status = 0;
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ /* clear tx_buffer data */
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
i++;
- if (i == tx_ring->count)
- i = 0;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+ }
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
}
- eop_desc = tx_buffer_info->next_to_watch;
- } while (count < tx_ring->count);
+ /* issue prefetch for next Tx descriptor */
+ prefetch(tx_desc);
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
- if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
smp_mb();
+
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
- ++adapter->restart_queue;
+ ++tx_ring->tx_stats.restart_queue;
}
}
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->total_bytes += total_bytes;
- tx_ring->total_packets += total_packets;
- u64_stats_update_end(&tx_ring->syncp);
- q_vector->tx.total_bytes += total_bytes;
- q_vector->tx.total_packets += total_packets;
-
- return count < tx_ring->count;
+ return !!budget;
}
/**
@@ -341,7 +364,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
/* if IP and error */
if ((status_err & IXGBE_RXD_STAT_IPCS) &&
(status_err & IXGBE_RXDADV_ERR_IPE)) {
- ring->hw_csum_rx_error++;
+ ring->rx_stats.csum_err++;
return;
}
@@ -349,51 +372,46 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
return;
if (status_err & IXGBE_RXDADV_ERR_TCPE) {
- ring->hw_csum_rx_error++;
+ ring->rx_stats.csum_err++;
return;
}
/* It must be a TCP or UDP packet with a valid checksum */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- ring->hw_csum_rx_good++;
}
/**
* ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
**/
-static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring,
+static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
int cleaned_count)
{
- struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbevf_rx_buffer *bi;
unsigned int i = rx_ring->next_to_use;
- bi = &rx_ring->rx_buffer_info[i];
-
while (cleaned_count--) {
rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
if (!bi->skb) {
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_buf_len);
- if (!skb) {
- adapter->alloc_rx_buff_failed++;
+ if (!skb)
goto no_buffers;
- }
+
bi->skb = skb;
- bi->dma = dma_map_single(&pdev->dev, skb->data,
+ bi->dma = dma_map_single(rx_ring->dev, skb->data,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&pdev->dev, bi->dma)) {
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
dev_kfree_skb(skb);
bi->skb = NULL;
- dev_err(&pdev->dev, "RX DMA map failed\n");
+ dev_err(rx_ring->dev, "Rx DMA map failed\n");
break;
}
}
@@ -402,14 +420,12 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
i++;
if (i == rx_ring->count)
i = 0;
- bi = &rx_ring->rx_buffer_info[i];
}
no_buffers:
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
- ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
- }
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ if (rx_ring->next_to_use != i)
+ ixgbevf_release_rx_desc(rx_ring, i);
}
static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
@@ -424,8 +440,6 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring,
int budget)
{
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
struct sk_buff *skb;
@@ -451,7 +465,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
rx_buffer_info->skb = NULL;
if (rx_buffer_info->dma) {
- dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+ dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
@@ -471,7 +485,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
if (!(staterr & IXGBE_RXD_STAT_EOP)) {
skb->next = next_buffer->skb;
IXGBE_CB(skb->next)->prev = skb;
- adapter->non_eop_descs++;
+ rx_ring->rx_stats.non_eop_descs++;
goto next_desc;
}
@@ -503,7 +517,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
* source pruning.
*/
if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
- ether_addr_equal(adapter->netdev->dev_addr,
+ ether_addr_equal(rx_ring->netdev->dev_addr,
eth_hdr(skb)->h_source)) {
dev_kfree_skb_irq(skb);
goto next_desc;
@@ -516,8 +530,7 @@ next_desc:
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
- ixgbevf_alloc_rx_buffers(adapter, rx_ring,
- cleaned_count);
+ ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
@@ -532,11 +545,11 @@ next_desc:
cleaned_count = ixgbevf_desc_unused(rx_ring);
if (cleaned_count)
- ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+ ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->total_packets += total_rx_packets;
- rx_ring->total_bytes += total_rx_bytes;
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
@@ -641,9 +654,9 @@ static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
#ifdef BP_EXTENDED_STATS
if (found)
- ring->bp_cleaned += found;
+ ring->stats.cleaned += found;
else
- ring->bp_misses++;
+ ring->stats.misses++;
#endif
if (found)
break;
@@ -848,8 +861,8 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
{
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
- a->rx_ring[r_idx].next = q_vector->rx.ring;
- q_vector->rx.ring = &a->rx_ring[r_idx];
+ a->rx_ring[r_idx]->next = q_vector->rx.ring;
+ q_vector->rx.ring = a->rx_ring[r_idx];
q_vector->rx.count++;
}
@@ -858,8 +871,8 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
{
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
- a->tx_ring[t_idx].next = q_vector->tx.ring;
- q_vector->tx.ring = &a->tx_ring[t_idx];
+ a->tx_ring[t_idx]->next = q_vector->tx.ring;
+ q_vector->tx.ring = a->tx_ring[t_idx];
q_vector->tx.count++;
}
@@ -1087,6 +1100,70 @@ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
}
/**
+ * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
+ * @adapter: board private structure
+ * @ring: structure containing ring specific data
+ *
+ * Configure the Tx descriptor ring after a reset.
+ **/
+static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 tdba = ring->dma;
+ int wait_loop = 10;
+ u32 txdctl = IXGBE_TXDCTL_ENABLE;
+ u8 reg_idx = ring->reg_idx;
+
+ /* disable queue to avoid issues while updating state */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+ IXGBE_WRITE_FLUSH(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
+ ring->count * sizeof(union ixgbe_adv_tx_desc));
+
+ /* disable head writeback */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
+
+ /* enable relaxed ordering */
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
+ (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_TXCTRL_DATA_RRO_EN));
+
+ /* reset head and tail pointers */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
+ ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx);
+
+ /* reset ntu and ntc to place SW in sync with hardwdare */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+
+ /* In order to avoid issues WTHRESH + PTHRESH should always be equal
+ * to or less than the number of on chip descriptors, which is
+ * currently 40.
+ */
+ txdctl |= (8 << 16); /* WTHRESH = 8 */
+
+ /* Setting PTHRESH to 32 both improves performance */
+ txdctl |= (1 << 8) | /* HTHRESH = 1 */
+ 32; /* PTHRESH = 32 */
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
+
+ /* poll to verify queue is enabled */
+ do {
+ usleep_range(1000, 2000);
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
+ } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!wait_loop)
+ pr_err("Could not enable Tx Queue %d\n", reg_idx);
+}
+
+/**
* ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
* @adapter: board private structure
*
@@ -1094,31 +1171,11 @@ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
{
- u64 tdba;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 i, j, tdlen, txctrl;
+ u32 i;
/* Setup the HW Tx Head and Tail descriptor pointers */
- for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbevf_ring *ring = &adapter->tx_ring[i];
- j = ring->reg_idx;
- tdba = ring->dma;
- tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
- IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
- (tdba & DMA_BIT_MASK(32)));
- IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
- IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
- adapter->tx_ring[i].head = IXGBE_VFTDH(j);
- adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
- /* Disable Tx Head Writeback RO bit, since this hoses
- * bookkeeping if things aren't delivered in order.
- */
- txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
- }
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
}
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
@@ -1129,7 +1186,7 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
struct ixgbe_hw *hw = &adapter->hw;
u32 srrctl;
- rx_ring = &adapter->rx_ring[index];
+ rx_ring = adapter->rx_ring[index];
srrctl = IXGBE_SRRCTL_DROP_EN;
@@ -1187,7 +1244,93 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
rx_buf_len = IXGBEVF_RXBUFFER_10K;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+ adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
+}
+
+#define IXGBEVF_MAX_RX_DESC_POLL 10
+static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+
+ /* the hardware may take up to 100us to really disable the rx queue */
+ do {
+ udelay(10);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop)
+ pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
+ reg_idx);
+}
+
+static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ do {
+ usleep_range(1000, 2000);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop)
+ pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
+ reg_idx);
+}
+
+static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 rdba = ring->dma;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ /* disable queue to avoid issues while updating state */
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ ixgbevf_disable_rx_queue(adapter, ring);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
+ ring->count * sizeof(union ixgbe_adv_rx_desc));
+
+ /* enable relaxed ordering */
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
+ IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+
+ /* reset head and tail pointers */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
+ ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx);
+
+ /* reset ntu and ntc to place SW in sync with hardwdare */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+
+ ixgbevf_configure_srrctl(adapter, reg_idx);
+
+ /* prevent DMA from exceeding buffer space available */
+ rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
+ rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
+ rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+
+ ixgbevf_rx_desc_queue_enable(adapter, ring);
+ ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
}
/**
@@ -1198,33 +1341,17 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
{
- u64 rdba;
- struct ixgbe_hw *hw = &adapter->hw;
- int i, j;
- u32 rdlen;
+ int i;
ixgbevf_setup_psrtype(adapter);
/* set_rx_buffer_len must be called before ring initialization */
ixgbevf_set_rx_buffer_len(adapter);
- rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
- for (i = 0; i < adapter->num_rx_queues; i++) {
- rdba = adapter->rx_ring[i].dma;
- j = adapter->rx_ring[i].reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
- (rdba & DMA_BIT_MASK(32)));
- IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
- IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
- adapter->rx_ring[i].head = IXGBE_VFRDH(j);
- adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
-
- ixgbevf_configure_srrctl(adapter, j);
- }
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
}
static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
@@ -1366,69 +1493,54 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
}
}
-static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
+static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int def_q = 0;
+ unsigned int num_tcs = 0;
+ unsigned int num_rx_queues = 1;
+ int err;
- ixgbevf_set_rx_mode(netdev);
+ spin_lock_bh(&adapter->mbx_lock);
- ixgbevf_restore_vlan(adapter);
+ /* fetch queue configuration from the PF */
+ err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
- ixgbevf_configure_tx(adapter);
- ixgbevf_configure_rx(adapter);
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbevf_ring *ring = &adapter->rx_ring[i];
- ixgbevf_alloc_rx_buffers(adapter, ring,
- ixgbevf_desc_unused(ring));
- }
-}
+ spin_unlock_bh(&adapter->mbx_lock);
-#define IXGBEVF_MAX_RX_DESC_POLL 10
-static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
- int rxr)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
- u32 rxdctl;
- int j = adapter->rx_ring[rxr].reg_idx;
+ if (err)
+ return err;
- do {
- usleep_range(1000, 2000);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
- } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+ if (num_tcs > 1) {
+ /* update default Tx ring register index */
+ adapter->tx_ring[0]->reg_idx = def_q;
- if (!wait_loop)
- hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
- rxr);
+ /* we need as many queues as traffic classes */
+ num_rx_queues = num_tcs;
+ }
+
+ /* if we have a bad config abort request queue reset */
+ if (adapter->num_rx_queues != num_rx_queues) {
+ /* force mailbox timeout to prevent further messages */
+ hw->mbx.timeout = 0;
+
+ /* wait for watchdog to come around and bail us out */
+ adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
+ }
- ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
- (adapter->rx_ring[rxr].count - 1));
+ return 0;
}
-static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *ring)
+static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
{
- struct ixgbe_hw *hw = &adapter->hw;
- int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
- u32 rxdctl;
- u8 reg_idx = ring->reg_idx;
-
- rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
- rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ ixgbevf_configure_dcb(adapter);
- /* write value back with RXDCTL.ENABLE bit cleared */
- IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+ ixgbevf_set_rx_mode(adapter->netdev);
- /* the hardware may take up to 100us to really disable the rx queue */
- do {
- udelay(10);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
- } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+ ixgbevf_restore_vlan(adapter);
- if (!wait_loop)
- hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
- reg_idx);
+ ixgbevf_configure_tx(adapter);
+ ixgbevf_configure_rx(adapter);
}
static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1493,37 +1605,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- int i, j = 0;
- int num_rx_rings = adapter->num_rx_queues;
- u32 txdctl, rxdctl;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
- /* enable WTHRESH=8 descriptors, to encourage burst writeback */
- txdctl |= (8 << 16);
- IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
- }
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
- txdctl |= IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
- }
-
- for (i = 0; i < num_rx_rings; i++) {
- j = adapter->rx_ring[i].reg_idx;
- rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
- rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
- if (hw->mac.type == ixgbe_mac_X540_vf) {
- rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
- rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
- IXGBE_RXDCTL_RLPML_EN);
- }
- IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
- ixgbevf_rx_desc_queue_enable(adapter, i);
- }
ixgbevf_configure_msix(adapter);
@@ -1549,85 +1630,10 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
mod_timer(&adapter->watchdog_timer, jiffies);
}
-static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbevf_ring *rx_ring;
- unsigned int def_q = 0;
- unsigned int num_tcs = 0;
- unsigned int num_rx_queues = 1;
- int err, i;
-
- spin_lock_bh(&adapter->mbx_lock);
-
- /* fetch queue configuration from the PF */
- err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
-
- spin_unlock_bh(&adapter->mbx_lock);
-
- if (err)
- return err;
-
- if (num_tcs > 1) {
- /* update default Tx ring register index */
- adapter->tx_ring[0].reg_idx = def_q;
-
- /* we need as many queues as traffic classes */
- num_rx_queues = num_tcs;
- }
-
- /* nothing to do if we have the correct number of queues */
- if (adapter->num_rx_queues == num_rx_queues)
- return 0;
-
- /* allocate new rings */
- rx_ring = kcalloc(num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!rx_ring)
- return -ENOMEM;
-
- /* setup ring fields */
- for (i = 0; i < num_rx_queues; i++) {
- rx_ring[i].count = adapter->rx_ring_count;
- rx_ring[i].queue_index = i;
- rx_ring[i].reg_idx = i;
- rx_ring[i].dev = &adapter->pdev->dev;
- rx_ring[i].netdev = adapter->netdev;
-
- /* allocate resources on the ring */
- err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
- if (err) {
- while (i) {
- i--;
- ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
- }
- kfree(rx_ring);
- return err;
- }
- }
-
- /* free the existing rings and queues */
- ixgbevf_free_all_rx_resources(adapter);
- adapter->num_rx_queues = 0;
- kfree(adapter->rx_ring);
-
- /* move new rings into position on the adapter struct */
- adapter->rx_ring = rx_ring;
- adapter->num_rx_queues = num_rx_queues;
-
- /* reset ring to vector mapping */
- ixgbevf_reset_q_vectors(adapter);
- ixgbevf_map_rings_to_vectors(adapter);
-
- return 0;
-}
-
void ixgbevf_up(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- ixgbevf_reset_queues(adapter);
-
ixgbevf_configure(adapter);
ixgbevf_up_complete(adapter);
@@ -1640,13 +1646,10 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
* @rx_ring: ring to free buffers from
**/
-static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring)
+static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
unsigned long size;
unsigned int i;
@@ -1659,7 +1662,7 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+ dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
@@ -1680,23 +1683,13 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
-
- if (rx_ring->head)
- writel(0, adapter->hw.hw_addr + rx_ring->head);
- if (rx_ring->tail)
- writel(0, adapter->hw.hw_addr + rx_ring->tail);
}
/**
* ixgbevf_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
* @tx_ring: ring to be cleaned
**/
-static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring)
+static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
{
struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned long size;
@@ -1715,14 +1708,6 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
memset(tx_ring->tx_buffer_info, 0, size);
memset(tx_ring->desc, 0, tx_ring->size);
-
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
-
- if (tx_ring->head)
- writel(0, adapter->hw.hw_addr + tx_ring->head);
- if (tx_ring->tail)
- writel(0, adapter->hw.hw_addr + tx_ring->tail);
}
/**
@@ -1734,7 +1719,7 @@ static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+ ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
}
/**
@@ -1746,22 +1731,21 @@ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+ ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
}
void ixgbevf_down(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 txdctl;
- int i, j;
+ int i;
/* signal that we are down to the interrupt handler */
set_bit(__IXGBEVF_DOWN, &adapter->state);
/* disable all enabled rx queues */
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
+ ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
netif_tx_disable(netdev);
@@ -1782,10 +1766,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
- IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
- (txdctl & ~IXGBE_TXDCTL_ENABLE));
+ u8 reg_idx = adapter->tx_ring[i]->reg_idx;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
+ IXGBE_TXDCTL_SWFLSH);
}
netif_carrier_off(netdev);
@@ -1889,9 +1873,28 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
**/
static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
{
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int def_q = 0;
+ unsigned int num_tcs = 0;
+ int err;
+
/* Start with base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
+
+ spin_lock_bh(&adapter->mbx_lock);
+
+ /* fetch queue configuration from the PF */
+ err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
+
+ spin_unlock_bh(&adapter->mbx_lock);
+
+ if (err)
+ return;
+
+ /* we need as many queues as traffic classes */
+ if (num_tcs > 1)
+ adapter->num_rx_queues = num_tcs;
}
/**
@@ -1904,40 +1907,50 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
**/
static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
{
- int i;
+ struct ixgbevf_ring *ring;
+ int rx = 0, tx = 0;
- adapter->tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!adapter->tx_ring)
- goto err_tx_ring_allocation;
+ for (; tx < adapter->num_tx_queues; tx++) {
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ goto err_allocation;
- adapter->rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!adapter->rx_ring)
- goto err_rx_ring_allocation;
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = tx;
+ ring->reg_idx = tx;
- for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i].count = adapter->tx_ring_count;
- adapter->tx_ring[i].queue_index = i;
- /* reg_idx may be remapped later by DCB config */
- adapter->tx_ring[i].reg_idx = i;
- adapter->tx_ring[i].dev = &adapter->pdev->dev;
- adapter->tx_ring[i].netdev = adapter->netdev;
+ adapter->tx_ring[tx] = ring;
}
- for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i].count = adapter->rx_ring_count;
- adapter->rx_ring[i].queue_index = i;
- adapter->rx_ring[i].reg_idx = i;
- adapter->rx_ring[i].dev = &adapter->pdev->dev;
- adapter->rx_ring[i].netdev = adapter->netdev;
+ for (; rx < adapter->num_rx_queues; rx++) {
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ goto err_allocation;
+
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rx;
+ ring->reg_idx = rx;
+
+ adapter->rx_ring[rx] = ring;
}
return 0;
-err_rx_ring_allocation:
- kfree(adapter->tx_ring);
-err_tx_ring_allocation:
+err_allocation:
+ while (tx) {
+ kfree(adapter->tx_ring[--tx]);
+ adapter->tx_ring[tx] = NULL;
+ }
+
+ while (rx) {
+ kfree(adapter->rx_ring[--rx]);
+ adapter->rx_ring[rx] = NULL;
+ }
return -ENOMEM;
}
@@ -2128,6 +2141,17 @@ err_set_interrupt:
**/
static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ kfree(adapter->tx_ring[i]);
+ adapter->tx_ring[i] = NULL;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ kfree(adapter->rx_ring[i]);
+ adapter->rx_ring[i] = NULL;
+ }
+
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
@@ -2258,11 +2282,8 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->hw_csum_rx_error +=
- adapter->rx_ring[i].hw_csum_rx_error;
- adapter->hw_csum_rx_good +=
- adapter->rx_ring[i].hw_csum_rx_good;
- adapter->rx_ring[i].hw_csum_rx_error = 0;
- adapter->rx_ring[i].hw_csum_rx_good = 0;
+ adapter->rx_ring[i]->hw_csum_rx_error;
+ adapter->rx_ring[i]->hw_csum_rx_error = 0;
}
}
@@ -2340,6 +2361,8 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
bool link_up = adapter->link_up;
s32 need_reset;
+ ixgbevf_queue_reset_subtask(adapter);
+
adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
/*
@@ -2408,22 +2431,22 @@ pf_has_reset:
/**
* ixgbevf_free_tx_resources - Free Tx Resources per Queue
- * @adapter: board private structure
* @tx_ring: Tx descriptor ring for a specific queue
*
* Free all transmit software resources
**/
-void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring)
+void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
-
- ixgbevf_clean_tx_ring(adapter, tx_ring);
+ ixgbevf_clean_tx_ring(tx_ring);
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
tx_ring->dma);
tx_ring->desc = NULL;
@@ -2440,23 +2463,18 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- if (adapter->tx_ring[i].desc)
- ixgbevf_free_tx_resources(adapter,
- &adapter->tx_ring[i]);
-
+ if (adapter->tx_ring[i]->desc)
+ ixgbevf_free_tx_resources(adapter->tx_ring[i]);
}
/**
* ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
* @tx_ring: tx descriptor ring (for a specific queue) to setup
*
* Return 0 on success, negative on failure
**/
-int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring)
+int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
int size;
size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
@@ -2468,13 +2486,11 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
return 0;
err:
@@ -2500,7 +2516,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
- err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
if (!err)
continue;
hw_dbg(&adapter->hw,
@@ -2513,40 +2529,34 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
* @rx_ring: rx descriptor ring (for a specific queue) to setup
*
* Returns 0 on success, negative on failure
**/
-int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring)
+int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
int size;
size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
rx_ring->rx_buffer_info = vzalloc(size);
if (!rx_ring->rx_buffer_info)
- goto alloc_failed;
+ goto err;
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
- if (!rx_ring->desc) {
- vfree(rx_ring->rx_buffer_info);
- rx_ring->rx_buffer_info = NULL;
- goto alloc_failed;
- }
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
+ if (!rx_ring->desc)
+ goto err;
return 0;
-alloc_failed:
+err:
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
return -ENOMEM;
}
@@ -2565,7 +2575,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
if (!err)
continue;
hw_dbg(&adapter->hw,
@@ -2577,22 +2587,18 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
* @rx_ring: ring to clean the resources from
*
* Free all receive software resources
**/
-void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring)
+void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
-
- ixgbevf_clean_rx_ring(adapter, rx_ring);
+ ixgbevf_clean_rx_ring(rx_ring);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
rx_ring->dma);
rx_ring->desc = NULL;
@@ -2609,66 +2615,8 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- if (adapter->rx_ring[i].desc)
- ixgbevf_free_rx_resources(adapter,
- &adapter->rx_ring[i]);
-}
-
-static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbevf_ring *rx_ring;
- unsigned int def_q = 0;
- unsigned int num_tcs = 0;
- unsigned int num_rx_queues = 1;
- int err, i;
-
- spin_lock_bh(&adapter->mbx_lock);
-
- /* fetch queue configuration from the PF */
- err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
-
- spin_unlock_bh(&adapter->mbx_lock);
-
- if (err)
- return err;
-
- if (num_tcs > 1) {
- /* update default Tx ring register index */
- adapter->tx_ring[0].reg_idx = def_q;
-
- /* we need as many queues as traffic classes */
- num_rx_queues = num_tcs;
- }
-
- /* nothing to do if we have the correct number of queues */
- if (adapter->num_rx_queues == num_rx_queues)
- return 0;
-
- /* allocate new rings */
- rx_ring = kcalloc(num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!rx_ring)
- return -ENOMEM;
-
- /* setup ring fields */
- for (i = 0; i < num_rx_queues; i++) {
- rx_ring[i].count = adapter->rx_ring_count;
- rx_ring[i].queue_index = i;
- rx_ring[i].reg_idx = i;
- rx_ring[i].dev = &adapter->pdev->dev;
- rx_ring[i].netdev = adapter->netdev;
- }
-
- /* free the existing ring and queues */
- adapter->num_rx_queues = 0;
- kfree(adapter->rx_ring);
-
- /* move new rings into position on the adapter struct */
- adapter->rx_ring = rx_ring;
- adapter->num_rx_queues = num_rx_queues;
-
- return 0;
+ if (adapter->rx_ring[i]->desc)
+ ixgbevf_free_rx_resources(adapter->rx_ring[i]);
}
/**
@@ -2714,11 +2662,6 @@ static int ixgbevf_open(struct net_device *netdev)
}
}
- /* setup queue reg_idx and Rx queue count */
- err = ixgbevf_setup_queues(adapter);
- if (err)
- goto err_setup_queues;
-
/* allocate transmit descriptors */
err = ixgbevf_setup_all_tx_resources(adapter);
if (err)
@@ -2756,7 +2699,6 @@ err_setup_rx:
ixgbevf_free_all_rx_resources(adapter);
err_setup_tx:
ixgbevf_free_all_tx_resources(adapter);
-err_setup_queues:
ixgbevf_reset(adapter);
err_setup_reset:
@@ -2788,6 +2730,34 @@ static int ixgbevf_close(struct net_device *netdev)
return 0;
}
+static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *dev = adapter->netdev;
+
+ if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
+ return;
+
+ adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
+
+ /* if interface is down do nothing */
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_RESETTING, &adapter->state))
+ return;
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match packet buffer alignment. Unfortunately, the
+ * hardware is not flexible enough to do this dynamically.
+ */
+ if (netif_running(dev))
+ ixgbevf_close(dev);
+
+ ixgbevf_clear_interrupt_scheme(adapter);
+ ixgbevf_init_interrupt_scheme(adapter);
+
+ if (netif_running(dev))
+ ixgbevf_open(dev);
+}
+
static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
u32 vlan_macip_lens, u32 type_tucmd,
u32 mss_l4len_idx)
@@ -2810,8 +2780,10 @@ static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
}
static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+ struct ixgbevf_tx_buffer *first,
+ u8 *hdr_len)
{
+ struct sk_buff *skb = first->skb;
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
@@ -2836,12 +2808,17 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
IPPROTO_TCP,
0);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ first->tx_flags |= IXGBE_TX_FLAGS_TSO |
+ IXGBE_TX_FLAGS_CSUM |
+ IXGBE_TX_FLAGS_IPV4;
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
+ first->tx_flags |= IXGBE_TX_FLAGS_TSO |
+ IXGBE_TX_FLAGS_CSUM;
}
/* compute header lengths */
@@ -2849,6 +2826,10 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
*hdr_len += l4len;
*hdr_len = skb_transport_offset(skb) + l4len;
+ /* update gso size and bytecount with header size */
+ first->gso_segs = skb_shinfo(skb)->gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
/* mss_l4len_id: use 1 as index for TSO */
mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
@@ -2857,7 +2838,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_network_header_len(skb);
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
- vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
type_tucmd, mss_l4len_idx);
@@ -2865,9 +2846,10 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
return 1;
}
-static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
+ struct ixgbevf_tx_buffer *first)
{
+ struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
u32 mss_l4len_idx = 0;
u32 type_tucmd = 0;
@@ -2888,7 +2870,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
"partial checksum but proto=%x!\n",
- skb->protocol);
+ first->protocol);
}
break;
}
@@ -2916,184 +2898,190 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
}
break;
}
+
+ /* update TX checksum flag */
+ first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
}
/* vlan_macip_lens: MACLEN, VLAN tag */
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
- vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
type_tucmd, mss_l4len_idx);
-
- return (skb->ip_summed == CHECKSUM_PARTIAL);
}
-static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
{
- struct ixgbevf_tx_buffer *tx_buffer_info;
- unsigned int len;
- unsigned int total = skb->len;
- unsigned int offset = 0, size;
- int count = 0;
- unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
- unsigned int f;
- int i;
+ /* set type for advanced descriptor with frame checksum insertion */
+ __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_IFCS |
+ IXGBE_ADVTXD_DCMD_DEXT);
- i = tx_ring->next_to_use;
+ /* set HW vlan bit if vlan is present */
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
- len = min(skb_headlen(skb), total);
- while (len) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
-
- tx_buffer_info->length = size;
- tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = dma_map_single(tx_ring->dev,
- skb->data + offset,
- size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
- goto dma_error;
+ /* set segmentation enable bits for TSO/FSO */
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
+ cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
- len -= size;
- total -= size;
- offset += size;
- count++;
- i++;
- if (i == tx_ring->count)
- i = 0;
- }
+ return cmd_type;
+}
- for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
+ u32 tx_flags, unsigned int paylen)
+{
+ __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
- frag = &skb_shinfo(skb)->frags[f];
- len = min((unsigned int)skb_frag_size(frag), total);
- offset = 0;
+ /* enable L4 checksum for TSO and TX checksum offload */
+ if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
- while (len) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+ /* enble IPv4 checksum for TSO */
+ if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
- tx_buffer_info->length = size;
- tx_buffer_info->dma =
- skb_frag_dma_map(tx_ring->dev, frag,
- offset, size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev,
- tx_buffer_info->dma))
- goto dma_error;
- tx_buffer_info->mapped_as_page = true;
+ /* use index 1 context for TSO/FSO/FCOE */
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
+ olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
- len -= size;
- total -= size;
- offset += size;
- count++;
- i++;
- if (i == tx_ring->count)
- i = 0;
- }
- if (total == 0)
- break;
- }
+ /* Check Context must be set if Tx switch is enabled, which it
+ * always is for case where virtual functions are running
+ */
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
- if (i == 0)
- i = tx_ring->count - 1;
- else
- i = i - 1;
- tx_ring->tx_buffer_info[i].skb = skb;
+ tx_desc->read.olinfo_status = olinfo_status;
+}
- return count;
+static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
+ struct ixgbevf_tx_buffer *first,
+ const u8 hdr_len)
+{
+ dma_addr_t dma;
+ struct sk_buff *skb = first->skb;
+ struct ixgbevf_tx_buffer *tx_buffer;
+ union ixgbe_adv_tx_desc *tx_desc;
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int data_len = skb->data_len;
+ unsigned int size = skb_headlen(skb);
+ unsigned int paylen = skb->len - hdr_len;
+ u32 tx_flags = first->tx_flags;
+ __le32 cmd_type;
+ u16 i = tx_ring->next_to_use;
-dma_error:
- dev_err(tx_ring->dev, "TX DMA map failed\n");
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
- /* clear timestamp and dma mappings for failed tx_buffer_info map */
- tx_buffer_info->dma = 0;
- count--;
+ ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
+ cmd_type = ixgbevf_tx_cmd_type(tx_flags);
- /* clear timestamp and dma mappings for remaining portion of packet */
- while (count >= 0) {
- count--;
- i--;
- if (i < 0)
- i += tx_ring->count;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
- }
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
- return count;
-}
+ /* record length, and DMA address */
+ dma_unmap_len_set(first, len, size);
+ dma_unmap_addr_set(first, dma, dma);
-static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
- int count, unsigned int first, u32 paylen,
- u8 hdr_len)
-{
- union ixgbe_adv_tx_desc *tx_desc = NULL;
- struct ixgbevf_tx_buffer *tx_buffer_info;
- u32 olinfo_status = 0, cmd_type_len = 0;
- unsigned int i;
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
- u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
+ for (;;) {
+ while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
+ tx_desc->read.cmd_type_len =
+ cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
- cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
- cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+ dma += IXGBE_MAX_DATA_PER_TXD;
+ size -= IXGBE_MAX_DATA_PER_TXD;
- if (tx_flags & IXGBE_TX_FLAGS_VLAN)
- cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_desc->read.olinfo_status = 0;
+ }
- if (tx_flags & IXGBE_TX_FLAGS_CSUM)
- olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
+ if (likely(!data_len))
+ break;
- if (tx_flags & IXGBE_TX_FLAGS_TSO) {
- cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+ tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
- /* use index 1 context for tso */
- olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
- if (tx_flags & IXGBE_TX_FLAGS_IPV4)
- olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
- }
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
- /*
- * Check Context must be set if Tx switch is enabled, which it
- * always is for case where virtual functions are running
- */
- olinfo_status |= IXGBE_ADVTXD_CC;
+ size = skb_frag_size(frag);
+ data_len -= size;
- olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
- i = tx_ring->next_to_use;
- while (count--) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
- tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
- tx_desc->read.cmd_type_len =
- cpu_to_le32(cmd_type_len | tx_buffer_info->length);
- tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
- i++;
- if (i == tx_ring->count)
- i = 0;
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_desc->read.olinfo_status = 0;
+
+ frag++;
}
- tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+ /* write last descriptor with RS and EOP bits */
+ cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
+ tx_desc->read.cmd_type_len = cmd_type;
- tx_ring->tx_buffer_info[first].time_stamp = jiffies;
+ /* set the timestamp */
+ first->time_stamp = jiffies;
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch. (Only applicable for weak-ordered
+ * memory model archs, such as IA-64).
+ *
+ * We also need this memory barrier (wmb) to make certain all of the
+ * status bits have been updated before next_to_watch is written.
*/
wmb();
- tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ /* notify HW of packet */
+ writel(i, tx_ring->tail);
+
+ return;
+dma_error:
+ dev_err(tx_ring->dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_buffer_info map */
+ for (;;) {
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+ if (tx_buffer == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
tx_ring->next_to_use = i;
}
static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
{
- struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
-
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
@@ -3107,7 +3095,8 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
- ++adapter->restart_queue;
+ ++tx_ring->tx_stats.restart_queue;
+
return 0;
}
@@ -3121,22 +3110,23 @@ static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_tx_buffer *first;
struct ixgbevf_ring *tx_ring;
- unsigned int first;
- unsigned int tx_flags = 0;
- u8 hdr_len = 0;
- int r_idx = 0, tso;
+ int tso;
+ u32 tx_flags = 0;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
unsigned short f;
#endif
+ u8 hdr_len = 0;
u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
+
if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
- tx_ring = &adapter->tx_ring[r_idx];
+ tx_ring = adapter->tx_ring[skb->queue_mapping];
/*
* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
@@ -3152,38 +3142,41 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
count += skb_shinfo(skb)->nr_frags;
#endif
if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
- adapter->tx_busy++;
+ tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
if (vlan_tx_tag_present(skb)) {
tx_flags |= vlan_tx_tag_get(skb);
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
}
- first = tx_ring->next_to_use;
+ /* record initial flags and protocol */
+ first->tx_flags = tx_flags;
+ first->protocol = vlan_get_protocol(skb);
- if (skb->protocol == htons(ETH_P_IP))
- tx_flags |= IXGBE_TX_FLAGS_IPV4;
- tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ tso = ixgbevf_tso(tx_ring, first, &hdr_len);
+ if (tso < 0)
+ goto out_drop;
+ else
+ ixgbevf_tx_csum(tx_ring, first);
- if (tso)
- tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
- else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
- tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ ixgbevf_tx_map(tx_ring, first, hdr_len);
- ixgbevf_tx_queue(tx_ring, tx_flags,
- ixgbevf_tx_map(tx_ring, skb, tx_flags),
- first, skb->len, hdr_len);
+ ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
- writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
+ return NETDEV_TX_OK;
- ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
+out_drop:
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
return NETDEV_TX_OK;
}
@@ -3289,8 +3282,8 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
#ifdef CONFIG_PM
static int ixgbevf_resume(struct pci_dev *pdev)
{
- struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
u32 err;
pci_set_power_state(pdev, PCI_D0);
@@ -3349,22 +3342,22 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
for (i = 0; i < adapter->num_rx_queues; i++) {
- ring = &adapter->rx_ring[i];
+ ring = adapter->rx_ring[i];
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
- bytes = ring->total_bytes;
- packets = ring->total_packets;
+ bytes = ring->stats.bytes;
+ packets = ring->stats.packets;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
stats->rx_bytes += bytes;
stats->rx_packets += packets;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- ring = &adapter->tx_ring[i];
+ ring = adapter->tx_ring[i];
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
- bytes = ring->total_bytes;
- packets = ring->total_packets;
+ bytes = ring->stats.bytes;
+ packets = ring->stats.packets;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
stats->tx_bytes += bytes;
stats->tx_packets += packets;
@@ -3595,9 +3588,6 @@ static void ixgbevf_remove(struct pci_dev *pdev)
hw_dbg(&adapter->hw, "Remove complete\n");
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
-
free_netdev(netdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 4a5e3b0f712e..d74f5f4e5782 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -39,7 +39,6 @@
#include <linux/ctype.h>
#include <linux/types.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6a6c1f76d8e0..8f9266c64c75 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -9,8 +9,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) 2011 John Crispin <blogic@openwrt.org>
*/
@@ -619,7 +618,8 @@ ltq_etop_set_multicast_list(struct net_device *dev)
}
static u16
-ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb)
+ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
/* we are currently only using the first queue */
return 0;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index a49e81bdf8e8..6300fd27f2db 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -33,6 +33,7 @@ config MV643XX_ETH
config MVMDIO
tristate "Marvell MDIO interface support"
+ depends on HAS_IOMEM
select PHYLIB
---help---
This driver supports the MDIO interface found in the network
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 61088a6a9424..a2565ce22b7c 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -33,8 +33,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -2067,23 +2066,6 @@ static inline void oom_timer_wrapper(unsigned long data)
napi_schedule(&mp->napi);
}
-static void phy_reset(struct mv643xx_eth_private *mp)
-{
- int data;
-
- data = phy_read(mp->phy, MII_BMCR);
- if (data < 0)
- return;
-
- data |= BMCR_RESET;
- if (phy_write(mp->phy, MII_BMCR, data) < 0)
- return;
-
- do {
- data = phy_read(mp->phy, MII_BMCR);
- } while (data >= 0 && data & BMCR_RESET);
-}
-
static void port_start(struct mv643xx_eth_private *mp)
{
u32 pscr;
@@ -2096,8 +2078,9 @@ static void port_start(struct mv643xx_eth_private *mp)
struct ethtool_cmd cmd;
mv643xx_eth_get_settings(mp->dev, &cmd);
- phy_reset(mp);
+ phy_init_hw(mp->phy);
mv643xx_eth_set_settings(mp->dev, &cmd);
+ phy_start(mp->phy);
}
/*
@@ -2293,7 +2276,8 @@ static int mv643xx_eth_stop(struct net_device *dev)
del_timer_sync(&mp->rx_oom);
netif_carrier_off(dev);
-
+ if (mp->phy)
+ phy_stop(mp->phy);
free_irq(dev->irq, dev);
port_reset(mp);
@@ -2764,8 +2748,6 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
{
struct phy_device *phy = mp->phy;
- phy_reset(mp);
-
if (speed == 0) {
phy->autoneg = AUTONEG_ENABLE;
phy->speed = 0;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 7354960b583b..fd409d76b811 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -17,7 +17,6 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -92,6 +91,12 @@ static int orion_mdio_wait_ready(struct mii_bus *bus)
if (time_is_before_jiffies(end))
++timedout;
} else {
+ /* wait_event_timeout does not guarantee a delay of at
+ * least one whole jiffie, so timeout must be no less
+ * than two.
+ */
+ if (timeout < 2)
+ timeout = 2;
wait_event_timeout(dev->smi_busy_wait,
orion_mdio_smi_is_done(dev),
timeout);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d5f0d72e5e33..f418f4f20f94 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -101,16 +101,56 @@
#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
+
+/* Exception Interrupt Port/Queue Cause register */
+
#define MVNETA_INTR_NEW_CAUSE 0x25a0
-#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
#define MVNETA_INTR_NEW_MASK 0x25a4
+
+/* bits 0..7 = TXQ SENT, one bit per queue.
+ * bits 8..15 = RXQ OCCUP, one bit per queue.
+ * bits 16..23 = RXQ FREE, one bit per queue.
+ * bit 29 = OLD_REG_SUM, see old reg ?
+ * bit 30 = TX_ERR_SUM, one bit for 4 ports
+ * bit 31 = MISC_SUM, one bit for 4 ports
+ */
+#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
+#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
+#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
+#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
+
#define MVNETA_INTR_OLD_CAUSE 0x25a8
#define MVNETA_INTR_OLD_MASK 0x25ac
+
+/* Data Path Port/Queue Cause Register */
#define MVNETA_INTR_MISC_CAUSE 0x25b0
#define MVNETA_INTR_MISC_MASK 0x25b4
+
+#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
+#define MVNETA_CAUSE_LINK_CHANGE BIT(1)
+#define MVNETA_CAUSE_PTP BIT(4)
+
+#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
+#define MVNETA_CAUSE_RX_OVERRUN BIT(8)
+#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
+#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
+#define MVNETA_CAUSE_TX_UNDERUN BIT(11)
+#define MVNETA_CAUSE_PRBS_ERR BIT(12)
+#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
+#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
+
+#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
+#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
+#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
+
+#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
+#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
+#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
+
#define MVNETA_INTR_ENABLE 0x25b8
#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
-#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000
+#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF
+
#define MVNETA_RXQ_CMD 0x2680
#define MVNETA_RXQ_DISABLE_SHIFT 8
#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
@@ -176,9 +216,6 @@
#define MVNETA_RX_COAL_PKTS 32
#define MVNETA_RX_COAL_USEC 100
-/* Timer */
-#define MVNETA_TX_DONE_TIMER_PERIOD 10
-
/* Napi polling weight */
#define MVNETA_RX_POLL_WEIGHT 64
@@ -221,27 +258,25 @@
#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
-struct mvneta_stats {
+struct mvneta_pcpu_stats {
struct u64_stats_sync syncp;
- u64 packets;
- u64 bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
};
struct mvneta_port {
int pkt_size;
+ unsigned int frag_size;
void __iomem *base;
struct mvneta_rx_queue *rxqs;
struct mvneta_tx_queue *txqs;
- struct timer_list tx_done_timer;
struct net_device *dev;
u32 cause_rx_tx;
struct napi_struct napi;
- /* Flags */
- unsigned long flags;
-#define MVNETA_F_TX_DONE_TIMER_BIT 0
-
/* Napi weight */
int weight;
@@ -250,8 +285,7 @@ struct mvneta_port {
u8 mcast_count[256];
u16 tx_ring_size;
u16 rx_ring_size;
- struct mvneta_stats tx_stats;
- struct mvneta_stats rx_stats;
+ struct mvneta_pcpu_stats *stats;
struct mii_bus *mii_bus;
struct phy_device *phy_dev;
@@ -410,6 +444,8 @@ static int txq_number = 8;
static int rxq_def;
+static int rx_copybreak __read_mostly = 256;
+
#define MVNETA_DRIVER_NAME "mvneta"
#define MVNETA_DRIVER_VERSION "1.0"
@@ -461,21 +497,29 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
{
struct mvneta_port *pp = netdev_priv(dev);
unsigned int start;
+ int cpu;
- memset(stats, 0, sizeof(struct rtnl_link_stats64));
-
- do {
- start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
- stats->rx_packets = pp->rx_stats.packets;
- stats->rx_bytes = pp->rx_stats.bytes;
- } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
+ for_each_possible_cpu(cpu) {
+ struct mvneta_pcpu_stats *cpu_stats;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ cpu_stats = per_cpu_ptr(pp->stats, cpu);
+ do {
+ start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
+ rx_packets = cpu_stats->rx_packets;
+ rx_bytes = cpu_stats->rx_bytes;
+ tx_packets = cpu_stats->tx_packets;
+ tx_bytes = cpu_stats->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
- do {
- start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
- stats->tx_packets = pp->tx_stats.packets;
- stats->tx_bytes = pp->tx_stats.bytes;
- } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ }
stats->rx_errors = dev->stats.rx_errors;
stats->rx_dropped = dev->stats.rx_dropped;
@@ -487,14 +531,14 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
/* Rx descriptors helper methods */
-/* Checks whether the given RX descriptor is both the first and the
- * last descriptor for the RX packet. Each RX packet is currently
+/* Checks whether the RX descriptor having this status is both the first
+ * and the last descriptor for the RX packet. Each RX packet is currently
* received through a single RX descriptor, so not having each RX
* descriptor with its first and last bits set is an error
*/
-static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc *desc)
+static int mvneta_rxq_desc_is_first_last(u32 status)
{
- return (desc->status & MVNETA_RXD_FIRST_LAST_DESC) ==
+ return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
MVNETA_RXD_FIRST_LAST_DESC;
}
@@ -570,6 +614,7 @@ mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
int rx_desc = rxq->next_desc_to_proc;
rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
+ prefetch(rxq->descs + rxq->next_desc_to_proc);
return rxq->descs + rx_desc;
}
@@ -1100,17 +1145,6 @@ static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
txq->done_pkts_coal = value;
}
-/* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
-static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
-{
- if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
- pp->tx_done_timer.expires = jiffies +
- msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
- add_timer(&pp->tx_done_timer);
- }
-}
-
-
/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
u32 phys_addr, u32 cookie)
@@ -1204,10 +1238,10 @@ static void mvneta_rx_error(struct mvneta_port *pp,
{
u32 status = rx_desc->status;
- if (!mvneta_rxq_desc_is_first_last(rx_desc)) {
+ if (!mvneta_rxq_desc_is_first_last(status)) {
netdev_err(pp->dev,
"bad rx status %08x (buffer oversize), size=%d\n",
- rx_desc->status, rx_desc->data_size);
+ status, rx_desc->data_size);
return;
}
@@ -1231,13 +1265,12 @@ static void mvneta_rx_error(struct mvneta_port *pp,
}
}
-/* Handle RX checksum offload */
-static void mvneta_rx_csum(struct mvneta_port *pp,
- struct mvneta_rx_desc *rx_desc,
+/* Handle RX checksum offload based on the descriptor's status */
+static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
struct sk_buff *skb)
{
- if ((rx_desc->status & MVNETA_RXD_L3_IP4) &&
- (rx_desc->status & MVNETA_RXD_L4_CSUM_OK)) {
+ if ((status & MVNETA_RXD_L3_IP4) &&
+ (status & MVNETA_RXD_L4_CSUM_OK)) {
skb->csum = 0;
skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
@@ -1246,13 +1279,16 @@ static void mvneta_rx_csum(struct mvneta_port *pp,
skb->ip_summed = CHECKSUM_NONE;
}
-/* Return tx queue pointer (find last set bit) according to causeTxDone reg */
+/* Return tx queue pointer (find last set bit) according to <cause> returned
+ * form tx_done reg. <cause> must not be null. The return value is always a
+ * valid queue for matching the first one found in <cause>.
+ */
static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
u32 cause)
{
int queue = fls(cause) - 1;
- return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue];
+ return &pp->txqs[queue];
}
/* Free tx queue skbuffs */
@@ -1278,15 +1314,16 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
}
/* Handle end of transmission */
-static int mvneta_txq_done(struct mvneta_port *pp,
+static void mvneta_txq_done(struct mvneta_port *pp,
struct mvneta_tx_queue *txq)
{
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
int tx_done;
tx_done = mvneta_txq_sent_desc_proc(pp, txq);
- if (tx_done == 0)
- return tx_done;
+ if (!tx_done)
+ return;
+
mvneta_txq_bufs_free(pp, txq, tx_done);
txq->count -= tx_done;
@@ -1295,8 +1332,22 @@ static int mvneta_txq_done(struct mvneta_port *pp,
if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
netif_tx_wake_queue(nq);
}
+}
- return tx_done;
+static void *mvneta_frag_alloc(const struct mvneta_port *pp)
+{
+ if (likely(pp->frag_size <= PAGE_SIZE))
+ return netdev_alloc_frag(pp->frag_size);
+ else
+ return kmalloc(pp->frag_size, GFP_ATOMIC);
+}
+
+static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
+{
+ if (likely(pp->frag_size <= PAGE_SIZE))
+ put_page(virt_to_head_page(data));
+ else
+ kfree(data);
}
/* Refill processing */
@@ -1305,22 +1356,21 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
{
dma_addr_t phys_addr;
- struct sk_buff *skb;
+ void *data;
- skb = netdev_alloc_skb(pp->dev, pp->pkt_size);
- if (!skb)
+ data = mvneta_frag_alloc(pp);
+ if (!data)
return -ENOMEM;
- phys_addr = dma_map_single(pp->dev->dev.parent, skb->head,
+ phys_addr = dma_map_single(pp->dev->dev.parent, data,
MVNETA_RX_BUF_SIZE(pp->pkt_size),
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
- dev_kfree_skb(skb);
+ mvneta_frag_free(pp, data);
return -ENOMEM;
}
- mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
-
+ mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
return 0;
}
@@ -1374,9 +1424,9 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
for (i = 0; i < rxq->size; i++) {
struct mvneta_rx_desc *rx_desc = rxq->descs + i;
- struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie;
+ void *data = (void *)rx_desc->buf_cookie;
- dev_kfree_skb_any(skb);
+ mvneta_frag_free(pp, data);
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
}
@@ -1391,6 +1441,8 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
{
struct net_device *dev = pp->dev;
int rx_done, rx_filled;
+ u32 rcvd_pkts = 0;
+ u32 rcvd_bytes = 0;
/* Get number of received packets */
rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -1405,53 +1457,89 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
while (rx_done < rx_todo) {
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
struct sk_buff *skb;
+ unsigned char *data;
u32 rx_status;
int rx_bytes, err;
- prefetch(rx_desc);
rx_done++;
rx_filled++;
rx_status = rx_desc->status;
- skb = (struct sk_buff *)rx_desc->buf_cookie;
+ rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
+ data = (unsigned char *)rx_desc->buf_cookie;
- if (!mvneta_rxq_desc_is_first_last(rx_desc) ||
+ if (!mvneta_rxq_desc_is_first_last(rx_status) ||
(rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+ err_drop_frame:
dev->stats.rx_errors++;
mvneta_rx_error(pp, rx_desc);
- mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr,
- (u32)skb);
+ /* leave the descriptor untouched */
continue;
}
- dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
+ if (rx_bytes <= rx_copybreak) {
+ /* better copy a small frame and not unmap the DMA region */
+ skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
+ if (unlikely(!skb))
+ goto err_drop_frame;
+
+ dma_sync_single_range_for_cpu(dev->dev.parent,
+ rx_desc->buf_phys_addr,
+ MVNETA_MH_SIZE + NET_SKB_PAD,
+ rx_bytes,
+ DMA_FROM_DEVICE);
+ memcpy(skb_put(skb, rx_bytes),
+ data + MVNETA_MH_SIZE + NET_SKB_PAD,
+ rx_bytes);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ mvneta_rx_csum(pp, rx_status, skb);
+ napi_gro_receive(&pp->napi, skb);
+
+ rcvd_pkts++;
+ rcvd_bytes += rx_bytes;
+
+ /* leave the descriptor and buffer untouched */
+ continue;
+ }
+
+ skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
+ if (!skb)
+ goto err_drop_frame;
+
+ dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr,
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
- rx_bytes = rx_desc->data_size -
- (ETH_FCS_LEN + MVNETA_MH_SIZE);
- u64_stats_update_begin(&pp->rx_stats.syncp);
- pp->rx_stats.packets++;
- pp->rx_stats.bytes += rx_bytes;
- u64_stats_update_end(&pp->rx_stats.syncp);
+ rcvd_pkts++;
+ rcvd_bytes += rx_bytes;
/* Linux processing */
- skb_reserve(skb, MVNETA_MH_SIZE);
+ skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
skb_put(skb, rx_bytes);
skb->protocol = eth_type_trans(skb, dev);
- mvneta_rx_csum(pp, rx_desc, skb);
+ mvneta_rx_csum(pp, rx_status, skb);
napi_gro_receive(&pp->napi, skb);
/* Refill processing */
err = mvneta_rx_refill(pp, rx_desc);
if (err) {
- netdev_err(pp->dev, "Linux processing - Can't refill\n");
+ netdev_err(dev, "Linux processing - Can't refill\n");
rxq->missed++;
rx_filled--;
}
}
+ if (rcvd_pkts) {
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets += rcvd_pkts;
+ stats->rx_bytes += rcvd_bytes;
+ u64_stats_update_end(&stats->syncp);
+ }
+
/* Update rxq management counters */
mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
@@ -1582,25 +1670,17 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
out:
if (frags > 0) {
- u64_stats_update_begin(&pp->tx_stats.syncp);
- pp->tx_stats.packets++;
- pp->tx_stats.bytes += skb->len;
- u64_stats_update_end(&pp->tx_stats.syncp);
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
} else {
dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
}
- if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
- mvneta_txq_done(pp, txq);
-
- /* If after calling mvneta_txq_done, count equals
- * frags, we need to set the timer
- */
- if (txq->count == frags && frags > 0)
- mvneta_add_tx_done_timer(pp);
-
return NETDEV_TX_OK;
}
@@ -1620,33 +1700,26 @@ static void mvneta_txq_done_force(struct mvneta_port *pp,
txq->txq_get_index = 0;
}
-/* handle tx done - called from tx done timer callback */
-static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
- int *tx_todo)
+/* Handle tx done - called in softirq context. The <cause_tx_done> argument
+ * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
+ */
+static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
{
struct mvneta_tx_queue *txq;
- u32 tx_done = 0;
struct netdev_queue *nq;
- *tx_todo = 0;
- while (cause_tx_done != 0) {
+ while (cause_tx_done) {
txq = mvneta_tx_done_policy(pp, cause_tx_done);
- if (!txq)
- break;
nq = netdev_get_tx_queue(pp->dev, txq->id);
__netif_tx_lock(nq, smp_processor_id());
- if (txq->count) {
- tx_done += mvneta_txq_done(pp, txq);
- *tx_todo += txq->count;
- }
+ if (txq->count)
+ mvneta_txq_done(pp, txq);
__netif_tx_unlock(nq);
cause_tx_done &= ~((1 << txq->id));
}
-
- return tx_done;
}
/* Compute crc8 of the specified address, using a unique algorithm ,
@@ -1876,14 +1949,20 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
/* Read cause register */
cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
- MVNETA_RX_INTR_MASK(rxq_number);
+ (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
+
+ /* Release Tx descriptors */
+ if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
+ mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
+ cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
+ }
/* For the case where the last mvneta_poll did not process all
* RX packets
*/
cause_rx_tx |= pp->cause_rx_tx;
if (rxq_number > 1) {
- while ((cause_rx_tx != 0) && (budget > 0)) {
+ while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) {
int count;
struct mvneta_rx_queue *rxq;
/* get rx queue number from cause_rx_tx */
@@ -1915,7 +1994,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
local_irq_save(flags);
mvreg_write(pp, MVNETA_INTR_NEW_MASK,
- MVNETA_RX_INTR_MASK(rxq_number));
+ MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
local_irq_restore(flags);
}
@@ -1923,56 +2002,19 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
return rx_done;
}
-/* tx done timer callback */
-static void mvneta_tx_done_timer_callback(unsigned long data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct mvneta_port *pp = netdev_priv(dev);
- int tx_done = 0, tx_todo = 0;
-
- if (!netif_running(dev))
- return ;
-
- clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
-
- tx_done = mvneta_tx_done_gbe(pp,
- (((1 << txq_number) - 1) &
- MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
- &tx_todo);
- if (tx_todo > 0)
- mvneta_add_tx_done_timer(pp);
-}
-
/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
int num)
{
- struct net_device *dev = pp->dev;
int i;
for (i = 0; i < num; i++) {
- struct sk_buff *skb;
- struct mvneta_rx_desc *rx_desc;
- unsigned long phys_addr;
-
- skb = dev_alloc_skb(pp->pkt_size);
- if (!skb) {
- netdev_err(dev, "%s:rxq %d, %d of %d buffs filled\n",
+ memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
+ if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
+ netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
__func__, rxq->id, i, num);
break;
}
-
- rx_desc = rxq->descs + i;
- memset(rx_desc, 0, sizeof(struct mvneta_rx_desc));
- phys_addr = dma_map_single(dev->dev.parent, skb->head,
- MVNETA_RX_BUF_SIZE(pp->pkt_size),
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev.parent, phys_addr))) {
- dev_kfree_skb(skb);
- break;
- }
-
- mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
}
/* Add this number of RX descriptors as non occupied (ready to
@@ -2192,7 +2234,7 @@ static void mvneta_start_dev(struct mvneta_port *pp)
/* Unmask interrupts */
mvreg_write(pp, MVNETA_INTR_NEW_MASK,
- MVNETA_RX_INTR_MASK(rxq_number));
+ MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
phy_start(pp->phy_dev);
netif_tx_start_all_queues(pp->dev);
@@ -2225,16 +2267,6 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
mvneta_rx_reset(pp);
}
-/* tx timeout callback - display a message and stop/start the network device */
-static void mvneta_tx_timeout(struct net_device *dev)
-{
- struct mvneta_port *pp = netdev_priv(dev);
-
- netdev_info(dev, "tx timeout\n");
- mvneta_stop_dev(pp);
- mvneta_start_dev(pp);
-}
-
/* Return positive if MTU is valid */
static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
{
@@ -2282,6 +2314,8 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
mvneta_cleanup_rxqs(pp);
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+ pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
ret = mvneta_setup_rxqs(pp);
if (ret) {
@@ -2429,6 +2463,8 @@ static int mvneta_open(struct net_device *dev)
mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+ pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
ret = mvneta_setup_rxqs(pp);
if (ret)
@@ -2478,8 +2514,6 @@ static int mvneta_stop(struct net_device *dev)
free_irq(dev->irq, pp);
mvneta_cleanup_rxqs(pp);
mvneta_cleanup_txqs(pp);
- del_timer(&pp->tx_done_timer);
- clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
return 0;
}
@@ -2615,7 +2649,6 @@ static const struct net_device_ops mvneta_netdev_ops = {
.ndo_set_rx_mode = mvneta_set_rx_mode,
.ndo_set_mac_address = mvneta_set_mac_addr,
.ndo_change_mtu = mvneta_change_mtu,
- .ndo_tx_timeout = mvneta_tx_timeout,
.ndo_get_stats64 = mvneta_get_stats64,
.ndo_do_ioctl = mvneta_ioctl,
};
@@ -2751,6 +2784,7 @@ static int mvneta_probe(struct platform_device *pdev)
const char *mac_from;
int phy_mode;
int err;
+ int cpu;
/* Our multiqueue support is not complete, so for now, only
* allow the usage of the first RX queue
@@ -2792,9 +2826,6 @@ static int mvneta_probe(struct platform_device *pdev)
pp = netdev_priv(dev);
- u64_stats_init(&pp->tx_stats.syncp);
- u64_stats_init(&pp->rx_stats.syncp);
-
pp->weight = MVNETA_RX_POLL_WEIGHT;
pp->phy_node = phy_node;
pp->phy_interface = phy_mode;
@@ -2813,6 +2844,19 @@ static int mvneta_probe(struct platform_device *pdev)
goto err_clk;
}
+ /* Alloc per-cpu stats */
+ pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
+ if (!pp->stats) {
+ err = -ENOMEM;
+ goto err_unmap;
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct mvneta_pcpu_stats *stats;
+ stats = per_cpu_ptr(pp->stats, cpu);
+ u64_stats_init(&stats->syncp);
+ }
+
dt_mac_addr = of_get_mac_address(dn);
if (dt_mac_addr) {
mac_from = "device tree";
@@ -2828,11 +2872,6 @@ static int mvneta_probe(struct platform_device *pdev)
}
}
- pp->tx_done_timer.data = (unsigned long)dev;
- pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
- init_timer(&pp->tx_done_timer);
- clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
-
pp->tx_ring_size = MVNETA_MAX_TXD;
pp->rx_ring_size = MVNETA_MAX_RXD;
@@ -2842,7 +2881,7 @@ static int mvneta_probe(struct platform_device *pdev)
err = mvneta_init(pp, phy_addr);
if (err < 0) {
dev_err(&pdev->dev, "can't init eth hal\n");
- goto err_unmap;
+ goto err_free_stats;
}
mvneta_port_power_up(pp, phy_mode);
@@ -2872,6 +2911,8 @@ static int mvneta_probe(struct platform_device *pdev)
err_deinit:
mvneta_deinit(pp);
+err_free_stats:
+ free_percpu(pp->stats);
err_unmap:
iounmap(pp->base);
err_clk:
@@ -2892,6 +2933,7 @@ static int mvneta_remove(struct platform_device *pdev)
unregister_netdev(dev);
mvneta_deinit(pp);
clk_disable_unprepare(pp->clk);
+ free_percpu(pp->stats);
iounmap(pp->base);
irq_dispose_mapping(dev->irq);
free_netdev(dev);
@@ -2924,3 +2966,4 @@ module_param(rxq_number, int, S_IRUGO);
module_param(txq_number, int, S_IRUGO);
module_param(rxq_def, int, S_IRUGO);
+module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index fff62460185c..b358c2f6f4bd 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -19,11 +19,9 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/in.h>
#include <linux/ip.h>
@@ -321,23 +319,6 @@ static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
wrl(pep, PHY_ADDRESS, reg_data);
}
-static void ethernet_phy_reset(struct pxa168_eth_private *pep)
-{
- int data;
-
- data = phy_read(pep->phy, MII_BMCR);
- if (data < 0)
- return;
-
- data |= BMCR_RESET;
- if (phy_write(pep->phy, MII_BMCR, data) < 0)
- return;
-
- do {
- data = phy_read(pep->phy, MII_BMCR);
- } while (data >= 0 && data & BMCR_RESET);
-}
-
static void rxq_refill(struct net_device *dev)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
@@ -646,7 +627,7 @@ static void eth_port_start(struct net_device *dev)
struct ethtool_cmd cmd;
pxa168_get_settings(pep->dev, &cmd);
- ethernet_phy_reset(pep);
+ phy_init_hw(pep->phy);
pxa168_set_settings(pep->dev, &cmd);
}
@@ -1383,7 +1364,6 @@ static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
{
struct phy_device *phy = pep->phy;
- ethernet_phy_reset(pep);
phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 43aa7acd84a6..55a37ae11440 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2495,7 +2495,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
skb_copy_from_linear_data(re->skb, skb->data, length);
skb->ip_summed = re->skb->ip_summed;
skb->csum = re->skb->csum;
- skb->rxhash = re->skb->rxhash;
+ skb_copy_hash(skb, re->skb);
skb->vlan_proto = re->skb->vlan_proto;
skb->vlan_tci = re->skb->vlan_tci;
@@ -2503,7 +2503,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
length, PCI_DMA_FROMDEVICE);
re->skb->vlan_proto = 0;
re->skb->vlan_tci = 0;
- re->skb->rxhash = 0;
+ skb_clear_hash(re->skb);
re->skb->ip_summed = CHECKSUM_NONE;
skb_put(skb, length);
}
@@ -2723,7 +2723,7 @@ static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
struct sk_buff *skb;
skb = sky2->rx_ring[sky2->rx_next].skb;
- skb->rxhash = le32_to_cpu(status);
+ skb_set_hash(skb, le32_to_cpu(status), PKT_HASH_TYPE_L3);
}
/* Process status response ring */
@@ -5020,6 +5020,8 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+ netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
+
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "cannot register net device\n");
@@ -5028,8 +5030,6 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_carrier_off(dev);
- netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
-
sky2_show_addr(dev);
if (hw->ports > 1) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index eb520ab64014..563495d8975a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -6,6 +6,7 @@ config MLX4_EN
tristate "Mellanox Technologies 10Gbit Ethernet support"
depends on PCI
select MLX4_CORE
+ select PTP_1588_CLOCK
---help---
This driver supports Mellanox Technologies ConnectX Ethernet
devices.
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 06fef5b44f77..c3ad464d0627 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -71,9 +71,9 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
return obj;
}
-void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
+void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr)
{
- mlx4_bitmap_free_range(bitmap, obj, 1);
+ mlx4_bitmap_free_range(bitmap, obj, 1, use_rr);
}
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
@@ -118,11 +118,17 @@ u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
return bitmap->avail;
}
-void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
+ int use_rr)
{
obj &= bitmap->max + bitmap->reserved_top - 1;
spin_lock(&bitmap->lock);
+ if (!use_rr) {
+ bitmap->last = min(bitmap->last, obj);
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
+ }
bitmap_clear(bitmap->table, obj, cnt);
bitmap->avail += cnt;
spin_unlock(&bitmap->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 1e9970d2f0f3..0d02fba94536 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1371,6 +1371,15 @@ static struct mlx4_cmd_info cmd_info[] = {
.verify = NULL,
.wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
},
+ {
+ .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper
+ },
};
static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 22fcbe78311c..0487121e4a0f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -34,7 +34,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/hardirq.h>
#include <linux/export.h>
@@ -187,7 +186,7 @@ err_put:
mlx4_table_put(dev, &cq_table->table, *cqn);
err_out:
- mlx4_bitmap_free(&cq_table->bitmap, *cqn);
+ mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR);
return err;
}
@@ -217,7 +216,7 @@ void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
mlx4_table_put(dev, &cq_table->table, cqn);
- mlx4_bitmap_free(&cq_table->bitmap, cqn);
+ mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR);
}
static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index fd6441071319..abaf6bb22416 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -42,6 +42,10 @@ int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter)
int port_up = 0;
int err = 0;
+ if (priv->hwtstamp_config.tx_type == tx_type &&
+ priv->hwtstamp_config.rx_filter == rx_filter)
+ return 0;
+
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
@@ -103,19 +107,191 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
struct skb_shared_hwtstamps *hwts,
u64 timestamp)
{
+ unsigned long flags;
u64 nsec;
+ read_lock_irqsave(&mdev->clock_lock, flags);
nsec = timecounter_cyc2time(&mdev->clock, timestamp);
+ read_unlock_irqrestore(&mdev->clock_lock, flags);
memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
hwts->hwtstamp = ns_to_ktime(nsec);
}
+/**
+ * mlx4_en_remove_timestamp - disable PTP device
+ * @mdev: board private structure
+ *
+ * Stop the PTP support.
+ **/
+void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
+{
+ if (mdev->ptp_clock) {
+ ptp_clock_unregister(mdev->ptp_clock);
+ mdev->ptp_clock = NULL;
+ mlx4_info(mdev, "removed PHC\n");
+ }
+}
+
+void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
+{
+ bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
+ mdev->overflow_period);
+ unsigned long flags;
+
+ if (timeout) {
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ timecounter_read(&mdev->clock);
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+ mdev->last_overflow_check = jiffies;
+ }
+}
+
+/**
+ * mlx4_en_phc_adjfreq - adjust the frequency of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired frequency change in parts per billion
+ *
+ * Adjust the frequency of the PHC cycle counter by the indicated delta from
+ * the base frequency.
+ **/
+static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+ u64 adj;
+ u32 diff, mult;
+ int neg_adj = 0;
+ unsigned long flags;
+ struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+ ptp_clock_info);
+
+ if (delta < 0) {
+ neg_adj = 1;
+ delta = -delta;
+ }
+ mult = mdev->nominal_c_mult;
+ adj = mult;
+ adj *= delta;
+ diff = div_u64(adj, 1000000000ULL);
+
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ timecounter_read(&mdev->clock);
+ mdev->cycles.mult = neg_adj ? mult - diff : mult + diff;
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+ return 0;
+}
+
+/**
+ * mlx4_en_phc_adjtime - Shift the time of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired change in nanoseconds
+ *
+ * Adjust the timer by resetting the timecounter structure.
+ **/
+static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+ ptp_clock_info);
+ unsigned long flags;
+ s64 now;
+
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ now = timecounter_read(&mdev->clock);
+ now += delta;
+ timecounter_init(&mdev->clock, &mdev->cycles, now);
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+ return 0;
+}
+
+/**
+ * mlx4_en_phc_gettime - Reads the current time from the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * Read the timecounter and return the correct value in ns after converting
+ * it into a struct timespec.
+ **/
+static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+ ptp_clock_info);
+ unsigned long flags;
+ u32 remainder;
+ u64 ns;
+
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ ns = timecounter_read(&mdev->clock);
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+/**
+ * mlx4_en_phc_settime - Set the current time on the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec containing the new time for the cycle counter
+ *
+ * Reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ **/
+static int mlx4_en_phc_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+ ptp_clock_info);
+ u64 ns = timespec_to_ns(ts);
+ unsigned long flags;
+
+ /* reset the timecounter */
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ timecounter_init(&mdev->clock, &mdev->cycles, ns);
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+ return 0;
+}
+
+/**
+ * mlx4_en_phc_enable - enable or disable an ancillary feature
+ * @ptp: ptp clock structure
+ * @request: Desired resource to enable or disable
+ * @on: Caller passes one to enable or zero to disable
+ *
+ * Enable (or disable) ancillary features of the PHC subsystem.
+ * Currently, no ancillary features are supported.
+ **/
+static int mlx4_en_phc_enable(struct ptp_clock_info __always_unused *ptp,
+ struct ptp_clock_request __always_unused *request,
+ int __always_unused on)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .max_adj = 100000000,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjfreq = mlx4_en_phc_adjfreq,
+ .adjtime = mlx4_en_phc_adjtime,
+ .gettime = mlx4_en_phc_gettime,
+ .settime = mlx4_en_phc_settime,
+ .enable = mlx4_en_phc_enable,
+};
+
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
struct mlx4_dev *dev = mdev->dev;
+ unsigned long flags;
u64 ns;
+ rwlock_init(&mdev->clock_lock);
+
memset(&mdev->cycles, 0, sizeof(mdev->cycles));
mdev->cycles.read = mlx4_en_read_clock;
mdev->cycles.mask = CLOCKSOURCE_MASK(48);
@@ -127,9 +303,12 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
mdev->cycles.shift = 14;
mdev->cycles.mult =
clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
+ mdev->nominal_c_mult = mdev->cycles.mult;
+ write_lock_irqsave(&mdev->clock_lock, flags);
timecounter_init(&mdev->clock, &mdev->cycles,
ktime_to_ns(ktime_get_real()));
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least once every wrap around.
@@ -137,15 +316,18 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask);
do_div(ns, NSEC_PER_SEC / 2 / HZ);
mdev->overflow_period = ns;
-}
-void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
-{
- bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
- mdev->overflow_period);
+ /* Configure the PHC */
+ mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
+ snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
- if (timeout) {
- timecounter_read(&mdev->clock);
- mdev->last_overflow_check = jiffies;
+ mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info,
+ &mdev->pdev->dev);
+ if (IS_ERR(mdev->ptp_clock)) {
+ mdev->ptp_clock = NULL;
+ mlx4_err(mdev, "ptp_clock_register failed\n");
+ } else {
+ mlx4_info(mdev, "registered PHC clock\n");
}
+
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 3a098cc4d349..70e95324a97d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -161,12 +161,16 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
cq->mcq.event = mlx4_en_cq_event;
- if (!cq->is_tx) {
+ if (cq->is_tx) {
+ netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
+ NAPI_POLL_WEIGHT);
+ } else {
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
napi_hash_add(&cq->napi);
- napi_enable(&cq->napi);
}
+ napi_enable(&cq->napi);
+
return 0;
}
@@ -188,12 +192,12 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
+ napi_disable(&cq->napi);
if (!cq->is_tx) {
- napi_disable(&cq->napi);
napi_hash_del(&cq->napi);
synchronize_rcu();
- netif_napi_del(&cq->napi);
}
+ netif_napi_del(&cq->napi);
mlx4_cq_free(priv->mdev->dev, &cq->mcq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 0596f9f85a0e..3e8d33605fe7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1193,6 +1193,9 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
+
+ if (mdev->ptp_clock)
+ info->phc_index = ptp_clock_index(mdev->ptp_clock);
}
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 0d087b03a7b0..d357bf5a4686 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -174,6 +174,9 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
mlx4_err(mdev, "Internal error detected, restarting device\n");
break;
+ case MLX4_DEV_EVENT_SLAVE_INIT:
+ case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
+ break;
default:
if (port < 1 || port > dev->caps.num_ports ||
!mdev->pndev[port])
@@ -196,6 +199,9 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
if (mdev->pndev[i])
mlx4_en_destroy_netdev(mdev->pndev[i]);
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+ mlx4_en_remove_timestamp(mdev);
+
flush_workqueue(mdev->workqueue);
destroy_workqueue(mdev->workqueue);
(void) mlx4_mr_free(dev, &mdev->mr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index e72d8a112a6b..fad45316200a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -468,6 +468,53 @@ static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
memset(&dst_mac[ETH_ALEN], 0, 2);
}
+
+static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
+ int qpn, u64 *reg_id)
+{
+ int err;
+ struct mlx4_spec_list spec_eth_outer = { {NULL} };
+ struct mlx4_spec_list spec_vxlan = { {NULL} };
+ struct mlx4_spec_list spec_eth_inner = { {NULL} };
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_REGULAR,
+ .priority = MLX4_DOMAIN_NIC,
+ };
+
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ return 0; /* do nothing */
+
+ rule.port = priv->port;
+ rule.qpn = qpn;
+ INIT_LIST_HEAD(&rule.list);
+
+ spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
+ memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+
+ spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
+ spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
+
+ list_add_tail(&spec_eth_outer.list, &rule.list);
+ list_add_tail(&spec_vxlan.list, &rule.list);
+ list_add_tail(&spec_eth_inner.list, &rule.list);
+
+ err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
+ if (err) {
+ en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
+ return err;
+ }
+ en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
+ return 0;
+}
+
+
static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
unsigned char *mac, int *qpn, u64 *reg_id)
{
@@ -585,6 +632,11 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
if (err)
goto steer_err;
+ err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
+ &priv->tunnel_reg_id);
+ if (err)
+ goto tunnel_err;
+
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
err = -ENOMEM;
@@ -599,6 +651,9 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
return 0;
alloc_err:
+ if (priv->tunnel_reg_id)
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+tunnel_err:
mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
steer_err:
@@ -642,6 +697,11 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
}
}
+ if (priv->tunnel_reg_id) {
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+ priv->tunnel_reg_id = 0;
+ }
+
en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
priv->port, qpn);
mlx4_qp_release_range(dev, qpn, 1);
@@ -782,7 +842,7 @@ static void update_mclist_flags(struct mlx4_en_priv *priv,
list_for_each_entry(dst_tmp, dst, list) {
found = false;
list_for_each_entry(src_tmp, src, list) {
- if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
+ if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
found = true;
break;
}
@@ -797,7 +857,7 @@ static void update_mclist_flags(struct mlx4_en_priv *priv,
list_for_each_entry(src_tmp, src, list) {
found = false;
list_for_each_entry(dst_tmp, dst, list) {
- if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
+ if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
dst_tmp->action = MCLIST_NONE;
found = true;
break;
@@ -1044,6 +1104,12 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
if (err)
en_err(priv, "Fail to detach multicast address\n");
+ if (mclist->tunnel_reg_id) {
+ err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
+ if (err)
+ en_err(priv, "Failed to detach multicast address\n");
+ }
+
/* remove from list */
list_del(&mclist->list);
kfree(mclist);
@@ -1061,6 +1127,10 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
if (err)
en_err(priv, "Fail to attach multicast address\n");
+ err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
+ &mclist->tunnel_reg_id);
+ if (err)
+ en_err(priv, "Failed to attach multicast address\n");
}
}
}
@@ -1598,6 +1668,15 @@ int mlx4_en_start_port(struct net_device *dev)
goto tx_err;
}
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
+ if (err) {
+ en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
+ err);
+ goto tx_err;
+ }
+ }
+
/* Init port */
en_dbg(HW, priv, "Initializing port\n");
err = mlx4_INIT_PORT(mdev->dev, priv->port);
@@ -1910,8 +1989,10 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
prof->tx_ring_size, i, TX, node))
goto err;
- if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
- prof->tx_ring_size, TXBB_SIZE, node))
+ if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
+ priv->base_tx_qpn + i,
+ prof->tx_ring_size, TXBB_SIZE,
+ node, i))
goto err;
}
@@ -2025,7 +2106,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -2084,11 +2165,21 @@ static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
sizeof(config)) ? -EFAULT : 0;
}
+static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
+ sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
+}
+
static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
case SIOCSHWTSTAMP:
- return mlx4_en_hwtstamp_ioctl(dev, ifr);
+ return mlx4_en_hwtstamp_set(dev, ifr);
+ case SIOCGHWTSTAMP:
+ return mlx4_en_hwtstamp_get(dev, ifr);
default:
return -EOPNOTSUPP;
}
@@ -2154,6 +2245,27 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st
return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
}
+
+#define PORT_ID_BYTE_LEN 8
+static int mlx4_en_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_port_id *ppid)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_dev *mdev = priv->mdev->dev;
+ int i;
+ u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
+
+ if (!phys_port_id)
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(phys_port_id);
+ for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
+ ppid->id[i] = phys_port_id & 0xff;
+ phys_port_id >>= 8;
+ }
+ return 0;
+}
+
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
@@ -2179,6 +2291,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = mlx4_en_low_latency_recv,
#endif
+ .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
};
static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2207,6 +2320,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
+ .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -2365,6 +2479,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
dev->priv_flags |= IFF_UNICAST_FLT;
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ dev->features |= NETIF_F_GSO_UDP_TUNNEL;
+ }
+
mdev->pndev[port] = dev;
netif_carrier_off(dev);
@@ -2394,6 +2515,15 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
goto out;
}
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
+ if (err) {
+ en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
+ err);
+ goto out;
+ }
+ }
+
/* Init port */
en_warn(priv, "Initializing port\n");
err = mlx4_INIT_PORT(mdev->dev, priv->port);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index d3f508697a3d..f1a5500ff72d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -68,6 +68,12 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
context->param3 |= cpu_to_be32(1 << 30);
+
+ if (!is_tx && !rss &&
+ (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)) {
+ en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn);
+ context->srqn = cpu_to_be32(7 << 28); /* this fills bits 30:28 */
+ }
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 07a1d0fbae47..890922c1c8ee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -631,6 +631,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
int ip_summed;
int factor = priv->cqe_factor;
u64 timestamp;
+ bool l2_tunnel;
if (!priv->port_up)
return 0;
@@ -709,6 +710,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
length -= ring->fcs_del;
ring->bytes += length;
ring->packets++;
+ l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+ (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
if (likely(dev->features & NETIF_F_RXCSUM)) {
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
@@ -721,7 +724,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
* - not an IP fragment
* - no LLS polling in progress
*/
- if (!mlx4_en_cq_ll_polling(cq) &&
+ if (!mlx4_en_cq_busy_polling(cq) &&
(dev->features & NETIF_F_GRO)) {
struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
if (!gro_skb)
@@ -738,6 +741,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
gro_skb->data_len = length;
gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (l2_tunnel)
+ gro_skb->encapsulation = 1;
if ((cqe->vlan_my_qpn &
cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
@@ -747,7 +752,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
}
if (dev->features & NETIF_F_RXHASH)
- gro_skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
+ skb_set_hash(gro_skb,
+ be32_to_cpu(cqe->immed_rss_invalid),
+ PKT_HASH_TYPE_L3);
skb_record_rx_queue(gro_skb, cq->ring);
@@ -788,8 +795,13 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, cq->ring);
+ if (l2_tunnel)
+ skb->encapsulation = 1;
+
if (dev->features & NETIF_F_RXHASH)
- skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
+ skb_set_hash(skb,
+ be32_to_cpu(cqe->immed_rss_invalid),
+ PKT_HASH_TYPE_L3);
if ((be32_to_cpu(cqe->vlan_my_qpn) &
MLX4_CQE_VLAN_PRESENT_MASK) &&
@@ -804,8 +816,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
skb_mark_napi_id(skb, &cq->napi);
- /* Push it up the stack */
- netif_receive_skb(skb);
+ if (!mlx4_en_cq_busy_polling(cq))
+ napi_gro_receive(&cq->napi, skb);
+ else
+ netif_receive_skb(skb);
next:
for (nr = 0; nr < priv->num_frags; nr++)
@@ -1053,6 +1067,12 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
rss_context->base_qpn_udp = rss_context->default_qpn;
}
+
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n");
+ rss_mask |= MLX4_RSS_BY_INNER_HEADERS;
+ }
+
rss_context->flags = rss_mask;
rss_context->hash_fn = MLX4_RSS_HASH_TOP;
for (i = 0; i < 10; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index f54ebd5a1702..8e8a7eb43a2c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -39,6 +39,7 @@
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include <linux/tcp.h>
+#include <linux/ip.h>
#include <linux/moduleparam.h>
#include "mlx4_en.h"
@@ -55,7 +56,7 @@ MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring, int qpn, u32 size,
- u16 stride, int node)
+ u16 stride, int node, int queue_index)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring *ring;
@@ -140,6 +141,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->bf_enabled = true;
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
+ ring->queue_index = queue_index;
+
+ if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
+ cpumask_set_cpu(queue_index, &ring->affinity_mask);
*pring = ring;
return 0;
@@ -206,6 +211,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
&ring->qp, &ring->qp_state);
+ if (!user_prio && cpu_online(ring->queue_index))
+ netif_set_xps_queue(priv->dev, &ring->affinity_mask,
+ ring->queue_index);
return err;
}
@@ -317,7 +325,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
}
}
}
- dev_kfree_skb_any(skb);
+ dev_kfree_skb(skb);
return tx_info->nr_txbb;
}
@@ -354,7 +362,9 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
-static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+static int mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq,
+ int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
@@ -372,9 +382,10 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
u32 bytes = 0;
int factor = priv->cqe_factor;
u64 timestamp = 0;
+ int done = 0;
if (!priv->port_up)
- return;
+ return 0;
index = cons_index & size_mask;
cqe = &buf[(index << factor) + factor];
@@ -383,7 +394,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
- cons_index & size)) {
+ cons_index & size) && (done < budget)) {
/*
* make sure we read the CQE after we read the
* ownership bit
@@ -421,7 +432,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
txbbs_stamp = txbbs_skipped;
packets++;
bytes += ring->tx_info[ring_index].nr_bytes;
- } while (ring_index != new_index);
+ } while ((++done < budget) && (ring_index != new_index));
++cons_index;
index = cons_index & size_mask;
@@ -447,6 +458,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
netif_tx_wake_queue(ring->tx_queue);
priv->port_stats.wake_queue++;
}
+ return done;
}
void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -454,10 +466,31 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
- mlx4_en_process_tx_cq(cq->dev, cq);
- mlx4_en_arm_cq(priv, cq);
+ if (priv->port_up)
+ napi_schedule(&cq->napi);
+ else
+ mlx4_en_arm_cq(priv, cq);
}
+/* TX CQ polling - called by NAPI */
+int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
+{
+ struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
+ struct net_device *dev = cq->dev;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int done;
+
+ done = mlx4_en_process_tx_cq(dev, cq, budget);
+
+ /* If we used up all the quota - we're probably not done yet... */
+ if (done < budget) {
+ /* Done for now */
+ napi_complete(napi);
+ mlx4_en_arm_cq(priv, cq);
+ return done;
+ }
+ return budget;
+}
static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
@@ -528,7 +561,10 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
int real_size;
if (skb_is_gso(skb)) {
- *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (skb->encapsulation)
+ *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
+ else
+ *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
ALIGN(*lso_header_size + 4, DS_SIZE);
if (unlikely(*lso_header_size != skb_headlen(skb))) {
@@ -592,7 +628,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
}
}
-u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
@@ -827,6 +864,14 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->inl = 1;
}
+ if (skb->encapsulation) {
+ struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb);
+ if (ipv4->protocol == IPPROTO_TCP || ipv4->protocol == IPPROTO_UDP)
+ op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP);
+ else
+ op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
+ }
+
ring->prod += nr_txbb;
/* If we used a bounce buffer then copy descriptor back into place */
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index c9cdb2a2c596..8992b38578d5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/export.h>
@@ -963,7 +962,7 @@ err_out_free_mtt:
mlx4_mtt_cleanup(dev, &eq->mtt);
err_out_free_eq:
- mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
+ mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
err_out_free_pages:
for (i = 0; i < npages; ++i)
@@ -1018,7 +1017,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
eq->page_list[i].map);
kfree(eq->page_list);
- mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
+ mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
mlx4_free_cmd_mailbox(dev, mailbox);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 194928214606..91b69ff4b4a2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -134,7 +134,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[5] = "Time stamping support",
[6] = "VST (control vlan insertion/stripping) support",
[7] = "FSM (MAC anti-spoofing) support",
- [8] = "Dynamic QP updates support"
+ [8] = "Dynamic QP updates support",
+ [9] = "TCP/IP offloads/flow-steering for VXLAN support"
};
int i;
@@ -207,25 +208,25 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
/* when opcode modifier = 1 */
#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
-#define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET 0x8
-#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc
+#define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
+#define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
#define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
#define QUERY_FUNC_CAP_QP0_PROXY 0x14
#define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
#define QUERY_FUNC_CAP_QP1_PROXY 0x1c
+#define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28
-#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40
-#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80
+#define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
+#define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
+#define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
-#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80
+#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
if (vhcr->op_modifier == 1) {
- field = 0;
- /* ensure force vlan and force mac bits are not set */
- MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
- /* ensure that phy_wqe_gid bit is not set */
- MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
+ /* Set nic_info bit to mark new fields support */
+ field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
field = vhcr->in_modifier; /* phys-port = logical-port */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
@@ -243,6 +244,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size += 2;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
+ MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
+ QUERY_FUNC_CAP_PHYS_PORT_ID);
+
} else if (vhcr->op_modifier == 0) {
/* enable rdma and ethernet interfaces, and new quota locations */
field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
@@ -391,22 +395,22 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
goto out;
}
+ MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
- if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
+ if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_OFFSET) {
mlx4_err(dev, "VLAN is enforced on this port\n");
err = -EPROTONOSUPPORT;
goto out;
}
- if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
+ if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) {
mlx4_err(dev, "Force mac is enabled on this port\n");
err = -EPROTONOSUPPORT;
goto out;
}
} else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
- if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
+ if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
mlx4_err(dev, "phy_wqe_gid is "
"enforced on this ib port\n");
err = -EPROTONOSUPPORT;
@@ -433,6 +437,10 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
+ if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO)
+ MLX4_GET(func_cap->phys_port_id, outbox,
+ QUERY_FUNC_CAP_PHYS_PORT_ID);
+
/* All other resources are allocated by the master, but we still report
* 'num' and 'reserved' capabilities as follows:
* - num remains the maximum resource index
@@ -513,6 +521,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
#define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
+#define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
@@ -529,6 +538,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
+#define QUERY_DEV_CAP_VXLAN 0x9e
dev_cap->flags2 = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -603,6 +613,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (field & 0x80)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
+ if (field & 0x80)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
dev_cap->fs_max_num_qp_per_entry = field;
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
@@ -694,6 +707,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
if (field & 1<<6)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
+ if (field & 1<<3)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
MLX4_GET(dev_cap->max_icm_sz, outbox,
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -842,6 +858,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
field &= 0x7f;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
+ /* For guests, disable vxlan tunneling */
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
+ field &= 0xf7;
+ MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
+
/* For guests, report Blueflame disabled */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
field &= 0x7f;
@@ -860,6 +881,12 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, field,
QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
}
+
+ /* turn off ipoib managed steering for guests */
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
+ field &= ~0x80;
+ MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
+
return 0;
}
@@ -1267,6 +1294,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_IN_SIZE 0x200
#define INIT_HCA_VERSION_OFFSET 0x000
#define INIT_HCA_VERSION 2
+#define INIT_HCA_VXLAN_OFFSET 0x0c
#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
#define INIT_HCA_FLAGS_OFFSET 0x014
#define INIT_HCA_QPC_OFFSET 0x020
@@ -1425,6 +1453,12 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
+ /* set parser VXLAN attributes */
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) {
+ u8 parser_params = 0;
+ MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET);
+ }
+
err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
MLX4_CMD_NATIVE);
@@ -1713,6 +1747,43 @@ int mlx4_NOP(struct mlx4_dev *dev)
return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
}
+int mlx4_get_phys_port_id(struct mlx4_dev *dev)
+{
+ u8 port;
+ u32 *outbox;
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_mod;
+ u32 guid_hi, guid_lo;
+ int err, ret = 0;
+#define MOD_STAT_CFG_PORT_OFFSET 8
+#define MOD_STAT_CFG_GUID_H 0X14
+#define MOD_STAT_CFG_GUID_L 0X1c
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
+
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ in_mod = port << MOD_STAT_CFG_PORT_OFFSET;
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2,
+ MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_err(dev, "Fail to get port %d uplink guid\n",
+ port);
+ ret = err;
+ } else {
+ MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H);
+ MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L);
+ dev->caps.phys_port_id[port] = (u64)guid_lo |
+ (u64)guid_hi << 32;
+ }
+ }
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return ret;
+}
+
#define MLX4_WOL_SETUP_MODE (5 << 28)
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
{
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index a0a368b7c939..6811ee00ba7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -140,6 +140,8 @@ struct mlx4_func_cap {
u32 qp1_proxy_qpn;
u8 physical_port;
u8 port_flags;
+ u8 flags1;
+ u64 phys_port_id;
};
struct mlx4_adapter {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 01fc6515384d..d711158b0d4b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -96,10 +96,10 @@ MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
" To activate device managed"
" flow steering when available, set to -1");
-static bool enable_64b_cqe_eqe;
+static bool enable_64b_cqe_eqe = true;
module_param(enable_64b_cqe_eqe, bool, 0444);
MODULE_PARM_DESC(enable_64b_cqe_eqe,
- "Enable 64 byte CQEs/EQEs when the FW supports this");
+ "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
#define HCA_GLOBAL_CAP_MASK 0
@@ -388,6 +388,84 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
return 0;
}
+
+static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ u32 lnkcap1, lnkcap2;
+ int err1, err2;
+
+#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
+
+ *speed = PCI_SPEED_UNKNOWN;
+ *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+ err1 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP, &lnkcap1);
+ err2 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP2, &lnkcap2);
+ if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
+ *speed = PCIE_SPEED_8_0GT;
+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
+ *speed = PCIE_SPEED_5_0GT;
+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
+ *speed = PCIE_SPEED_2_5GT;
+ }
+ if (!err1) {
+ *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
+ if (!lnkcap2) { /* pre-r3.0 */
+ if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
+ *speed = PCIE_SPEED_5_0GT;
+ else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
+ *speed = PCIE_SPEED_2_5GT;
+ }
+ }
+
+ if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) {
+ return err1 ? err1 :
+ err2 ? err2 : -EINVAL;
+ }
+ return 0;
+}
+
+static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
+{
+ enum pcie_link_width width, width_cap;
+ enum pci_bus_speed speed, speed_cap;
+ int err;
+
+#define PCIE_SPEED_STR(speed) \
+ (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
+ speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
+ speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
+ "Unknown")
+
+ err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap);
+ if (err) {
+ mlx4_warn(dev,
+ "Unable to determine PCIe device BW capabilities\n");
+ return;
+ }
+
+ err = pcie_get_minimum_link(dev->pdev, &speed, &width);
+ if (err || speed == PCI_SPEED_UNKNOWN ||
+ width == PCIE_LNK_WIDTH_UNKNOWN) {
+ mlx4_warn(dev,
+ "Unable to determine PCI device chain minimum BW\n");
+ return;
+ }
+
+ if (width != width_cap || speed != speed_cap)
+ mlx4_warn(dev,
+ "PCIe BW is different than device's capability\n");
+
+ mlx4_info(dev, "PCIe link speed is %s, device supports %s\n",
+ PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
+ mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n",
+ width, width_cap);
+ return;
+}
+
/*The function checks if there are live vf, return the num of them*/
static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
{
@@ -606,6 +684,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
dev->caps.port_mask[i] = dev->caps.port_type[i];
+ dev->caps.phys_port_id[i] = func_cap.phys_port_id;
if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
&dev->caps.gid_table_len[i],
&dev->caps.pkey_table_len[i]))
@@ -1443,6 +1522,19 @@ static void choose_steering_mode(struct mlx4_dev *dev,
mlx4_log_num_mgm_entry_size);
}
+static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
+ struct mlx4_dev_cap *dev_cap)
+{
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
+ dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
+ dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
+ else
+ dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
+
+ mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode
+ == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
+}
+
static int mlx4_init_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1483,6 +1575,11 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
}
choose_steering_mode(dev, &dev_cap);
+ choose_tunnel_offload_mode(dev, &dev_cap);
+
+ err = mlx4_get_phys_port_id(dev);
+ if (err)
+ mlx4_err(dev, "Fail to get physical port id\n");
if (mlx4_is_master(dev))
mlx4_parav_master_pf_caps(dev);
@@ -1654,7 +1751,7 @@ EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
{
- mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
+ mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
return;
}
@@ -2287,6 +2384,12 @@ slave_start:
goto err_mfunc;
}
+ /* check if the device is functioning at its maximum possible speed.
+ * No return code for this call, just warn the user in case of PCI
+ * express device capabilities are under-satisfied by the bus.
+ */
+ mlx4_check_pcie_caps(dev);
+
/* In master functions, the communication channel must be initialized
* after obtaining its address from fw */
if (mlx4_is_master(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index acf9d5f1f922..db7dc0b6667d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -125,9 +125,14 @@ static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
u32 qpn)
{
- struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[port - 1];
+ struct mlx4_steer *s_steer;
struct mlx4_promisc_qp *pqp;
+ if (port < 1 || port > dev->caps.num_ports)
+ return NULL;
+
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
+
list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
if (pqp->qpn == qpn)
return pqp;
@@ -154,6 +159,9 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port,
u32 prot;
int err;
+ if (port < 1 || port > dev->caps.num_ports)
+ return -EINVAL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
if (!new_entry)
@@ -238,6 +246,9 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
struct mlx4_promisc_qp *pqp;
struct mlx4_promisc_qp *dqp;
+ if (port < 1 || port > dev->caps.num_ports)
+ return -EINVAL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
pqp = get_promisc_qp(dev, port, steer, qpn);
@@ -283,6 +294,9 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *dqp, *tmp_dqp;
+ if (port < 1 || port > dev->caps.num_ports)
+ return NULL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
/* if qp is not promisc, it cannot be duplicated */
@@ -324,6 +338,9 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
bool ret = false;
int i;
+ if (port < 1 || port > dev->caps.num_ports)
+ return NULL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -378,6 +395,9 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
int err;
struct mlx4_priv *priv = mlx4_priv(dev);
+ if (port < 1 || port > dev->caps.num_ports)
+ return -EINVAL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
@@ -484,6 +504,9 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
int loc, i;
int err;
+ if (port < 1 || port > dev->caps.num_ports)
+ return -EINVAL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
@@ -674,7 +697,8 @@ const u16 __sw_id_hw[] = {
[MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
[MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
[MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
- [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
+ [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006,
+ [MLX4_NET_TRANS_RULE_ID_VXLAN] = 0xE008
};
int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
@@ -699,7 +723,9 @@ static const int __rule_hw_sz[] = {
[MLX4_NET_TRANS_RULE_ID_TCP] =
sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
[MLX4_NET_TRANS_RULE_ID_UDP] =
- sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
+ sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
+ [MLX4_NET_TRANS_RULE_ID_VXLAN] =
+ sizeof(struct mlx4_net_trans_rule_hw_vxlan)
};
int mlx4_hw_rule_sz(struct mlx4_dev *dev,
@@ -764,6 +790,13 @@ static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
break;
+ case MLX4_NET_TRANS_RULE_ID_VXLAN:
+ rule_hw->vxlan.vni =
+ cpu_to_be32(be32_to_cpu(spec->vxlan.vni) << 8);
+ rule_hw->vxlan.vni_mask =
+ cpu_to_be32(be32_to_cpu(spec->vxlan.vni_mask) << 8);
+ break;
+
default:
return -EINVAL;
}
@@ -895,6 +928,23 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
}
EXPORT_SYMBOL_GPL(mlx4_flow_detach);
+int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
+ u32 max_range_qpn)
+{
+ int err;
+ u64 in_param;
+
+ in_param = ((u64) min_range_qpn) << 32;
+ in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF;
+
+ err = mlx4_cmd(dev, in_param, 0, 0,
+ MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE);
+
int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type steer)
@@ -996,7 +1046,7 @@ out:
index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
- index - dev->caps.num_mgms);
+ index - dev->caps.num_mgms, MLX4_USE_RR);
}
mutex_unlock(&priv->mcg_table.mutex);
@@ -1087,7 +1137,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
index, amgm_index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
- amgm_index - dev->caps.num_mgms);
+ amgm_index - dev->caps.num_mgms, MLX4_USE_RR);
}
} else {
/* Remove entry from AMGM */
@@ -1107,7 +1157,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
prev, index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
- index - dev->caps.num_mgms);
+ index - dev->caps.num_mgms, MLX4_USE_RR);
}
out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index e582a41a802b..6b65f7795215 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -783,6 +783,11 @@ enum {
MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1,
};
+enum {
+ MLX4_NO_RR = 0,
+ MLX4_USE_RR = 1,
+};
+
struct mlx4_priv {
struct mlx4_dev dev;
@@ -844,9 +849,10 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
extern struct workqueue_struct *mlx4_wq;
u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
-void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
+void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr);
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
-void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
+ int use_rr);
u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 resetrved_top);
@@ -1236,6 +1242,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f3758de59c05..3af04c3f42ea 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -45,6 +45,7 @@
#include <linux/dcbnl.h>
#endif
#include <linux/cpu_rmap.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/qp.h>
@@ -255,6 +256,8 @@ struct mlx4_en_tx_ring {
u16 poll_cnt;
struct mlx4_en_tx_info *tx_info;
u8 *bounce_buf;
+ u8 queue_index;
+ cpumask_t affinity_mask;
u32 last_nr_txbb;
struct mlx4_qp qp;
struct mlx4_qp_context context;
@@ -373,10 +376,14 @@ struct mlx4_en_dev {
u32 priv_pdn;
spinlock_t uar_lock;
u8 mac_removed[MLX4_MAX_PORTS + 1];
+ rwlock_t clock_lock;
+ u32 nominal_c_mult;
struct cyclecounter cycles;
struct timecounter clock;
unsigned long last_overflow_check;
unsigned long overflow_period;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
};
@@ -434,6 +441,7 @@ struct mlx4_en_mc_list {
enum mlx4_en_mclist_act action;
u8 addr[ETH_ALEN];
u64 reg_id;
+ u64 tunnel_reg_id;
};
struct mlx4_en_frag_info {
@@ -565,7 +573,7 @@ struct mlx4_en_priv {
struct list_head filters;
struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
#endif
-
+ u64 tunnel_reg_id;
};
enum mlx4_en_wol {
@@ -653,7 +661,7 @@ static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
}
/* true if a socket is polling, even if it did not get the lock */
-static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
+static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
{
WARN_ON(!(cq->state & MLX4_CQ_LOCKED));
return cq->state & CQ_USER_PEND;
@@ -683,7 +691,7 @@ static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
return false;
}
-static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
+static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
{
return false;
}
@@ -714,12 +722,14 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
-u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring,
- int qpn, u32 size, u16 stride, int node);
+ int qpn, u32 size, u16 stride,
+ int node, int queue_index);
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring);
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
@@ -741,6 +751,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
struct mlx4_en_cq *cq,
int budget);
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
+int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn, int user_prio,
struct mlx4_qp_context *context);
@@ -786,6 +797,7 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
struct skb_shared_hwtstamps *hwts,
u64 timestamp);
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev);
+void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev);
int mlx4_en_timestamp_config(struct net_device *dev,
int tx_type,
int rx_filter);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index b3ee9bafff5e..24835853b753 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -32,7 +32,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/slab.h>
@@ -346,7 +345,7 @@ void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+ mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR);
}
static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 84cfb40bf451..74216071201f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/io-mapping.h>
@@ -59,7 +58,7 @@ EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
{
- mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn);
+ mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn, MLX4_USE_RR);
}
EXPORT_SYMBOL_GPL(mlx4_pd_free);
@@ -96,7 +95,7 @@ EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc);
void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
{
- mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn);
+ mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn, MLX4_USE_RR);
}
void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
@@ -164,7 +163,7 @@ EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
{
- mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index);
+ mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index, MLX4_USE_RR);
}
EXPORT_SYMBOL_GPL(mlx4_uar_free);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 97d342fa5032..a58bcbf1b806 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -123,6 +123,26 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
return err;
}
+int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_table *table = &info->mac_table;
+ int i;
+
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ if (!table->refs[i])
+ continue;
+
+ if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
+ *idx = i;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
+
int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
@@ -800,6 +820,47 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
}
EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
+enum {
+ VXLAN_ENABLE_MODIFY = 1 << 7,
+ VXLAN_STEERING_MODIFY = 1 << 6,
+
+ VXLAN_ENABLE = 1 << 7,
+};
+
+struct mlx4_set_port_vxlan_context {
+ u32 reserved1;
+ u8 modify_flags;
+ u8 reserved2;
+ u8 enable_flags;
+ u8 steering;
+};
+
+int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering)
+{
+ int err;
+ u32 in_mod;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_vxlan_context *context;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof(*context));
+
+ context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
+ context->enable_flags = VXLAN_ENABLE;
+ context->steering = steering;
+
+ in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
+
int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2715e61dbb74..61d64ebffd56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -35,7 +35,6 @@
#include <linux/gfp.h>
#include <linux/export.h>
-#include <linux/init.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
@@ -250,7 +249,7 @@ void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
return;
- mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
+ mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, MLX4_USE_RR);
}
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 2f3f2bc7f283..57428a0cb9dd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1340,43 +1340,29 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
- if (!r)
+ if (!r) {
err = -ENOENT;
- else if (r->com.owner != slave)
+ } else if (r->com.owner != slave) {
err = -EPERM;
- else {
- switch (state) {
- case RES_CQ_BUSY:
- err = -EBUSY;
- break;
-
- case RES_CQ_ALLOCATED:
- if (r->com.state != RES_CQ_HW)
- err = -EINVAL;
- else if (atomic_read(&r->ref_count))
- err = -EBUSY;
- else
- err = 0;
- break;
-
- case RES_CQ_HW:
- if (r->com.state != RES_CQ_ALLOCATED)
- err = -EINVAL;
- else
- err = 0;
- break;
-
- default:
+ } else if (state == RES_CQ_ALLOCATED) {
+ if (r->com.state != RES_CQ_HW)
err = -EINVAL;
- }
+ else if (atomic_read(&r->ref_count))
+ err = -EBUSY;
+ else
+ err = 0;
+ } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
+ err = -EINVAL;
+ } else {
+ err = 0;
+ }
- if (!err) {
- r->com.from_state = r->com.state;
- r->com.to_state = state;
- r->com.state = RES_CQ_BUSY;
- if (cq)
- *cq = r;
- }
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_CQ_BUSY;
+ if (cq)
+ *cq = r;
}
spin_unlock_irq(mlx4_tlock(dev));
@@ -1385,7 +1371,7 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
}
static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
- enum res_cq_states state, struct res_srq **srq)
+ enum res_srq_states state, struct res_srq **srq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
@@ -1394,39 +1380,25 @@ static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
- if (!r)
+ if (!r) {
err = -ENOENT;
- else if (r->com.owner != slave)
+ } else if (r->com.owner != slave) {
err = -EPERM;
- else {
- switch (state) {
- case RES_SRQ_BUSY:
+ } else if (state == RES_SRQ_ALLOCATED) {
+ if (r->com.state != RES_SRQ_HW)
err = -EINVAL;
- break;
-
- case RES_SRQ_ALLOCATED:
- if (r->com.state != RES_SRQ_HW)
- err = -EINVAL;
- else if (atomic_read(&r->ref_count))
- err = -EBUSY;
- break;
-
- case RES_SRQ_HW:
- if (r->com.state != RES_SRQ_ALLOCATED)
- err = -EINVAL;
- break;
-
- default:
- err = -EINVAL;
- }
+ else if (atomic_read(&r->ref_count))
+ err = -EBUSY;
+ } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
+ err = -EINVAL;
+ }
- if (!err) {
- r->com.from_state = r->com.state;
- r->com.to_state = state;
- r->com.state = RES_SRQ_BUSY;
- if (srq)
- *srq = r;
- }
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_SRQ_BUSY;
+ if (srq)
+ *srq = r;
}
spin_unlock_irq(mlx4_tlock(dev));
@@ -3634,7 +3606,7 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
!is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
list_for_each_entry_safe(res, tmp, rlist, list) {
be_mac = cpu_to_be64(res->mac << 16);
- if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
+ if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
return 0;
}
pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
@@ -3844,6 +3816,16 @@ int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
return err;
}
+int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ return -EPERM;
+}
+
+
static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
{
struct res_gid *rgid;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 8fdf23753779..98faf870b0b0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/srq.h>
@@ -117,7 +116,7 @@ err_put:
mlx4_table_put(dev, &srq_table->table, *srqn);
err_out:
- mlx4_bitmap_free(&srq_table->bitmap, *srqn);
+ mlx4_bitmap_free(&srq_table->bitmap, *srqn, MLX4_NO_RR);
return err;
}
@@ -145,7 +144,7 @@ void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
mlx4_table_put(dev, &srq_table->table, srqn);
- mlx4_bitmap_free(&srq_table->bitmap, srqn);
+ mlx4_bitmap_free(&srq_table->bitmap, srqn, MLX4_NO_RR);
}
static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 157fe8df2c3e..8ff57e8e3e91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -4,5 +4,5 @@
config MLX5_CORE
tristate
- depends on PCI && X86
+ depends on PCI
default n
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 8675d26a678b..405c4fbcd0ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -32,7 +32,6 @@
#include <asm-generic/kmap_types.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index c2d660be6f76..43c5f4809526 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -201,10 +201,23 @@ EXPORT_SYMBOL(mlx5_core_query_cq);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- int type, struct mlx5_cq_modify_params *params)
+ struct mlx5_modify_cq_mbox_in *in, int in_sz)
{
- return -ENOSYS;
+ struct mlx5_modify_cq_mbox_out out;
+ int err;
+
+ memset(&out, 0, sizeof(out));
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ);
+ err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ return 0;
}
+EXPORT_SYMBOL(mlx5_core_modify_cq);
int mlx5_init_cq_table(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 80f6d127257a..10e1f1a18255 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -275,7 +275,7 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
}
static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
- int index)
+ int index, int *is_str)
{
struct mlx5_query_qp_mbox_out *out;
struct mlx5_qp_context *ctx;
@@ -293,19 +293,40 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
goto out;
}
+ *is_str = 0;
ctx = &out->ctx;
switch (index) {
case QP_PID:
param = qp->pid;
break;
case QP_STATE:
- param = be32_to_cpu(ctx->flags) >> 28;
+ param = (u64)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
+ *is_str = 1;
break;
case QP_XPORT:
- param = (be32_to_cpu(ctx->flags) >> 16) & 0xff;
+ param = (u64)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
+ *is_str = 1;
break;
case QP_MTU:
- param = ctx->mtu_msgmax >> 5;
+ switch (ctx->mtu_msgmax >> 5) {
+ case IB_MTU_256:
+ param = 256;
+ break;
+ case IB_MTU_512:
+ param = 512;
+ break;
+ case IB_MTU_1024:
+ param = 1024;
+ break;
+ case IB_MTU_2048:
+ param = 2048;
+ break;
+ case IB_MTU_4096:
+ param = 4096;
+ break;
+ default:
+ param = 0;
+ }
break;
case QP_N_RECV:
param = 1 << ((ctx->rq_size_stride >> 3) & 0xf);
@@ -414,6 +435,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
struct mlx5_field_desc *desc;
struct mlx5_rsc_debug *d;
char tbuf[18];
+ int is_str = 0;
u64 field;
int ret;
@@ -424,7 +446,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
d = (void *)(desc - desc->i) - sizeof(*d);
switch (d->type) {
case MLX5_DBG_RSC_QP:
- field = qp_read_field(d->dev, d->object, desc->i);
+ field = qp_read_field(d->dev, d->object, desc->i, &is_str);
break;
case MLX5_DBG_RSC_EQ:
@@ -440,7 +462,12 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
return -EINVAL;
}
- ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
+
+ if (is_str)
+ ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)field);
+ else
+ ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
+
if (ret > 0) {
if (copy_to_user(buf, tbuf, ret))
return -EFAULT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 40a9f5ed814d..a064f06e0cb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -460,7 +460,10 @@ disable_msix:
err_stop_poll:
mlx5_stop_health_poll(dev);
- mlx5_cmd_teardown_hca(dev);
+ if (mlx5_cmd_teardown_hca(dev)) {
+ dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
+ return err;
+ }
err_pagealloc_stop:
mlx5_pagealloc_stop(dev);
@@ -503,7 +506,10 @@ void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_eq_cleanup(dev);
mlx5_disable_msix(dev);
mlx5_stop_health_poll(dev);
- mlx5_cmd_teardown_hca(dev);
+ if (mlx5_cmd_teardown_hca(dev)) {
+ dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
+ return;
+ }
mlx5_pagealloc_stop(dev);
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 37b6ad1f9a1b..d59790a82bc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -99,7 +99,7 @@ enum {
enum {
MLX5_MAX_RECLAIM_TIME_MILI = 5000,
- MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / 4096,
+ MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
};
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
@@ -192,10 +192,8 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
struct fw_page *fp;
unsigned n;
- if (list_empty(&dev->priv.free_list)) {
+ if (list_empty(&dev->priv.free_list))
return -ENOMEM;
- mlx5_core_warn(dev, "\n");
- }
fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
@@ -208,7 +206,7 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
if (!fp->free_count)
list_del(&fp->list);
- *addr = fp->addr + n * 4096;
+ *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
return 0;
}
@@ -224,14 +222,15 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
return;
}
- n = (addr & ~PAGE_MASK) % 4096;
+ n = (addr & ~PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
fwp->free_count++;
set_bit(n, &fwp->bitmask);
if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
rb_erase(&fwp->rb_node, &dev->priv.page_root);
if (fwp->free_count != 1)
list_del(&fwp->list);
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(&dev->pdev->dev, addr & PAGE_MASK, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
__free_page(fwp->page);
kfree(fwp);
} else if (fwp->free_count == 1) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index f6afe7b5a675..8c9ac870ecb1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -57,7 +57,7 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
in->arg = cpu_to_be32(arg);
in->register_id = cpu_to_be16(reg_num);
err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out,
- sizeof(out) + size_out);
+ sizeof(*out) + size_out);
if (err)
goto ex2;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 54faf8bfcaf4..510576213dd0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -74,7 +74,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_destroy_qp_mbox_out dout;
int err;
- memset(&dout, 0, sizeof(dout));
+ memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
@@ -84,7 +84,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
}
if (out.hdr.status) {
- pr_warn("current num of QPs 0x%x\n", atomic_read(&dev->num_qps));
+ mlx5_core_warn(dev, "current num of QPs 0x%x\n",
+ atomic_read(&dev->num_qps));
return mlx5_cmd_status_to_err(&out.hdr);
}
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index 106eb972f2ac..16435b3cfa9f 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -21,7 +21,6 @@
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index ddd252a3da9c..ce84dc289c8f 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4128,10 +4128,10 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
int i;
int j = ADDITIONAL_ENTRIES;
- if (!memcmp(hw->override_addr, mac_addr, ETH_ALEN))
+ if (ether_addr_equal(hw->override_addr, mac_addr))
return 0;
for (i = 0; i < hw->addr_list_size; i++) {
- if (!memcmp(hw->address[i], mac_addr, ETH_ALEN))
+ if (ether_addr_equal(hw->address[i], mac_addr))
return 0;
if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
j = i;
@@ -4149,7 +4149,7 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
int i;
for (i = 0; i < hw->addr_list_size; i++) {
- if (!memcmp(hw->address[i], mac_addr, ETH_ALEN)) {
+ if (ether_addr_equal(hw->address[i], mac_addr)) {
memset(hw->address[i], 0, ETH_ALEN);
writel(0, hw->io + ADD_ADDR_INCR * i +
KS_ADD_ADDR_0_HI);
@@ -5853,15 +5853,12 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port *port = &priv->port;
- int rc;
int result = 0;
struct mii_ioctl_data *data = if_mii(ifr);
if (down_interruptible(&priv->proc_sem))
return -ERESTARTSYS;
- /* assume success */
- rc = 0;
switch (cmd) {
/* Get address of MII PHY in use. */
case SIOCGMIIPHY:
@@ -7104,8 +7101,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
ETH_ALEN);
else {
memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
- if (!memcmp(sw->other_addr, hw->override_addr,
- ETH_ALEN))
+ if (ether_addr_equal(sw->other_addr, hw->override_addr))
dev->dev_addr[5] += port->first_port;
}
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index cbd013379252..5020fd47825d 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -13,7 +13,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index 79257f71c5d9..a5512a97cc4d 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -24,7 +24,6 @@
#include <linux/fcntl.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 346a4e025c34..9e4ddbba7036 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -37,7 +37,6 @@
#include <linux/fcntl.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/string.h>
@@ -52,7 +51,6 @@
#include <linux/bitrev.h>
#include <linux/slab.h>
-#include <asm/bootinfo.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/hwtest.h>
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index d3b47003a575..dbccf1de49ec 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -22,8 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
* ChangeLog
@@ -2236,7 +2235,6 @@ out_disable:
pci_disable_device(pci_dev);
out_free:
free_netdev(ndev);
- pci_set_drvdata(pci_dev, NULL);
out:
return err;
}
@@ -2260,7 +2258,6 @@ static void ns83820_remove_one(struct pci_dev *pci_dev)
dev->rx_info.descs, dev->rx_info.phy_descs);
pci_disable_device(dev->pci_dev);
free_netdev(ndev);
- pci_set_drvdata(pci_dev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(ns83820_pci_tbl) = {
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index fbe5363cb89c..089b713b9f7b 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -2148,7 +2148,7 @@ __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
* __vxge_hw_ring_replenish - Initial replenish of RxDs
* This function replenishes the RxDs from reserve array to work array
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
{
void *rxd;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index f9876ea8c8bf..e46e8698e630 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -87,6 +87,7 @@ static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
module_param_array(bw_percentage, uint, NULL, 0);
static struct vxge_drv_config *driver_config;
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
static inline int is_vxge_card_up(struct vxgedev *vdev)
{
@@ -507,7 +508,8 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
* if rss is disabled/enabled, so key off of that.
*/
if (ext_info.rth_value)
- skb->rxhash = ext_info.rth_value;
+ skb_set_hash(skb, ext_info.rth_value,
+ PKT_HASH_TYPE_L3);
vxge_rx_complete(ring, skb, ext_info.vlan,
pkt_length, &ext_info);
@@ -724,9 +726,6 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
int vpath_idx = 0;
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath = NULL;
- struct __vxge_hw_device *hldev;
-
- hldev = pci_get_drvdata(vdev->pdev);
mac_address = (u8 *)&mac_addr;
memcpy(mac_address, mac_header, ETH_ALEN);
@@ -1429,7 +1428,7 @@ vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
return status;
}
- while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
+ while (!ether_addr_equal(mac->macaddr, macaddr)) {
status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
macaddr, macmask);
if (status != VXGE_HW_OK)
@@ -1970,7 +1969,7 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
}
/* reset vpaths */
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@@ -2441,9 +2440,6 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
static void vxge_rem_isr(struct vxgedev *vdev)
{
- struct __vxge_hw_device *hldev;
- hldev = pci_get_drvdata(vdev->pdev);
-
#ifdef CONFIG_PCI_MSI
if (vdev->config.intr_type == MSI_X) {
vxge_rem_msix_isr(vdev);
@@ -3189,7 +3185,7 @@ static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
return status;
}
-static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
+static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data)
{
struct hwtstamp_config config;
int i;
@@ -3250,6 +3246,21 @@ static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
return 0;
}
+static int vxge_hwtstamp_get(struct vxgedev *vdev, void __user *data)
+{
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = HWTSTAMP_TX_OFF;
+ config.rx_filter = (vdev->rx_hwts ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+ if (copy_to_user(data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
/**
* vxge_ioctl
* @dev: Device pointer.
@@ -3263,19 +3274,15 @@ static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct vxgedev *vdev = netdev_priv(dev);
- int ret;
switch (cmd) {
case SIOCSHWTSTAMP:
- ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
- if (ret)
- return ret;
- break;
+ return vxge_hwtstamp_set(vdev, rq->ifr_data);
+ case SIOCGHWTSTAMP:
+ return vxge_hwtstamp_get(vdev, rq->ifr_data);
default:
return -EOPNOTSUPP;
}
-
- return 0;
}
/**
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index 36ca40f8f249..3a79d93b8445 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -427,7 +427,6 @@ void vxge_os_timer(struct timer_list *timer, void (*func)(unsigned long data),
}
void vxge_initialize_ethtool_ops(struct net_device *ndev);
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
/* #define VXGE_DEBUG_INIT: debug for initialization functions
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 99749bd07d72..9e1aaa7f36bb 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -1956,8 +1956,7 @@ exit:
* @vid: vlan id to be added for this vpath into the list
*
* Adds the given vlan id into the list for this vpath.
- * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
- * vxge_hw_vpath_vid_get_next
+ * see also: vxge_hw_vpath_vid_delete
*
*/
enum vxge_hw_status
@@ -1979,45 +1978,13 @@ exit:
}
/**
- * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
- * from vlan id table.
- * @vp: Vpath handle.
- * @vid: Buffer to return vlan id
- *
- * Returns the first vlan id in the list for this vpath.
- * see also: vxge_hw_vpath_vid_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
-{
- u64 data;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
- 0, vid, &data);
-
- *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
-exit:
- return status;
-}
-
-/**
* vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
* to vlan id table.
* @vp: Vpath handle.
* @vid: vlan id to be added for this vpath into the list
*
* Adds the given vlan id into the list for this vpath.
- * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
- * vxge_hw_vpath_vid_get_next
+ * see also: vxge_hw_vpath_vid_add
*
*/
enum vxge_hw_status
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
index 4a518a3b131c..ba6f833bb059 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
@@ -1918,9 +1918,6 @@ vxge_hw_ring_rxd_post_post(
struct __vxge_hw_ring *ring_handle,
void *rxdh);
-enum vxge_hw_status
-vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle);
-
void
vxge_hw_ring_rxd_post_post_wmb(
struct __vxge_hw_ring *ring_handle,
@@ -2186,11 +2183,6 @@ vxge_hw_vpath_vid_add(
u64 vid);
enum vxge_hw_status
-vxge_hw_vpath_vid_get(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 *vid);
-
-enum vxge_hw_status
vxge_hw_vpath_vid_delete(
struct __vxge_hw_vpath_handle *vpath_handle,
u64 vid);
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index e6f0a4366f90..31eb911e4763 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/init.h>
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 1e8b9514718b..70cf97fe67f2 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -26,8 +26,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
@@ -59,7 +58,6 @@
#include <linux/skbuff.h>
#include <linux/mii.h>
#include <linux/random.h>
-#include <linux/init.h>
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
@@ -6020,7 +6018,6 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
out_error:
if (phystate_orig)
writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
- pci_set_drvdata(pci_dev, NULL);
out_freering:
free_rings(dev);
out_unmap:
@@ -6091,7 +6088,6 @@ static void nv_remove(struct pci_dev *pci_dev)
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
free_netdev(dev);
- pci_set_drvdata(pci_dev, NULL);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index ba3ca18611f7..422d9b51ac24 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -19,7 +19,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 2a9003071d51..2a55d6d53ee6 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _PCH_GBE_H_
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
index ff3ad70935a6..51250363566b 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
#include "pch_gbe_phy.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
index 94aaac5b057b..91ce07c8306c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _PCH_GBE_API_H_
#define _PCH_GBE_API_H_
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index f0ceb89af931..826f0ccdc23c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
#include "pch_gbe_api.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 27ffe0ebf0a6..464e91058c81 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index cf7c9b3a255b..08d4be616064 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
index 8b7ff75fc8e0..a5cad5ea9436 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
index 0cbe69206e04..95ad0151ad02 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _PCH_GBE_PHY_H_
#define _PCH_GBE_PHY_H_
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 07a890eb72ad..9a6cb482dcd0 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -1053,7 +1053,7 @@ static int yellowfin_rx(struct net_device *dev)
struct sk_buff *rx_skb = yp->rx_skbuff[entry];
s16 frame_status;
u16 desc_status;
- int data_size;
+ int data_size, yf_size;
u8 *buf_addr;
if(!desc->result_status)
@@ -1070,6 +1070,9 @@ static int yellowfin_rx(struct net_device *dev)
__func__, frame_status);
if (--boguscnt < 0)
break;
+
+ yf_size = sizeof(struct yellowfin_desc);
+
if ( ! (desc_status & RX_EOP)) {
if (data_size != 0)
netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
@@ -1096,12 +1099,12 @@ static int yellowfin_rx(struct net_device *dev)
if (status2 & 0x80) dev->stats.rx_dropped++;
#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
} else if ((yp->flags & HasMACAddrBug) &&
- memcmp(le32_to_cpu(yp->rx_ring_dma +
- entry*sizeof(struct yellowfin_desc)),
- dev->dev_addr, 6) != 0 &&
- memcmp(le32_to_cpu(yp->rx_ring_dma +
- entry*sizeof(struct yellowfin_desc)),
- "\377\377\377\377\377\377", 6) != 0) {
+ !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
+ entry * yf_size),
+ dev->dev_addr) &&
+ !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
+ entry * yf_size),
+ "\377\377\377\377\377\377")) {
if (bogus_rx++ == 0)
netdev_warn(dev, "Bad frame to %pM\n",
buf_addr);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index dbaa49e58b0c..9abf70d74b31 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -13,11 +13,9 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h
index f2749d46c125..a5807703ab96 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.h
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef PASEMI_MAC_H
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
index 4825959a0efe..25fae568261f 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
diff --git a/drivers/net/ethernet/qlogic/netxen/Makefile b/drivers/net/ethernet/qlogic/netxen/Makefile
index 861a0590b1f4..e14e60c88381 100644
--- a/drivers/net/ethernet/qlogic/netxen/Makefile
+++ b/drivers/net/ethernet/qlogic/netxen/Makefile
@@ -13,9 +13,7 @@
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston,
-# MA 02111-1307, USA.
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution
# in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 9adcdbb49476..6e426ae94692 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 1bcaf45aa864..6f6be57f4690 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 4ca2c196c98a..87e073c6e291 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
index 0c64c82b9acf..a310c2f6502a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 67efe754367d..db4280ce9c09 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
@@ -663,7 +661,7 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
list_for_each(head, del_list) {
cur = list_entry(head, nx_mac_list_t, list);
- if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(addr, cur->mac_addr)) {
list_move_tail(head, &adapter->mac_list);
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
index e2c5b6f2df03..7433c4d21601 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 7692dfd4f262..32058614151a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
@@ -1604,13 +1602,13 @@ netxen_process_lro(struct netxen_adapter *adapter,
u32 seq_number;
u8 vhdr_len = 0;
- if (unlikely(ring > adapter->max_rds_rings))
+ if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
rds_ring = &recv_ctx->rds_rings[ring];
index = netxen_get_lro_sts_refhandle(sts_data0);
- if (unlikely(index > rds_ring->num_desc))
+ if (unlikely(index >= rds_ring->num_desc))
return NULL;
buffer = &rds_ring->rx_buf_arr[index];
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 3bec8cfebf99..70849dea32b1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 0758b9435358..2eabd44f8914 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -8,7 +8,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/list.h>
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 631ea0ac1cd8..f19f81cde134 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -38,8 +38,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 52
-#define QLCNIC_LINUX_VERSIONID "5.3.52"
+#define _QLCNIC_LINUX_SUBVERSION 55
+#define QLCNIC_LINUX_VERSIONID "5.3.55"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -105,6 +105,8 @@
#define QLCNIC_DEF_TX_RINGS 4
#define QLCNIC_MAX_VNIC_TX_RINGS 4
#define QLCNIC_MAX_VNIC_SDS_RINGS 4
+#define QLCNIC_83XX_MINIMUM_VECTOR 3
+#define QLCNIC_82XX_MINIMUM_VECTOR 2
enum qlcnic_queue_type {
QLCNIC_TX_QUEUE = 1,
@@ -115,6 +117,10 @@ enum qlcnic_queue_type {
#define QLCNIC_VNIC_MODE 0xFF
#define QLCNIC_DEFAULT_MODE 0x0
+/* Virtual NIC function count */
+#define QLC_DEFAULT_VNIC_COUNT 8
+#define QLC_84XX_VNIC_COUNT 16
+
/*
* Following are the states of the Phantom. Phantom will set them and
* Host will read to check if the fields are correct.
@@ -365,6 +371,7 @@ struct qlcnic_rx_buffer {
*/
#define QLCNIC_INTR_COAL_TYPE_RX 1
#define QLCNIC_INTR_COAL_TYPE_TX 2
+#define QLCNIC_INTR_COAL_TYPE_RX_TX 3
#define QLCNIC_DEF_INTR_COALESCE_RX_TIME_US 3
#define QLCNIC_DEF_INTR_COALESCE_RX_PACKETS 256
@@ -374,7 +381,7 @@ struct qlcnic_rx_buffer {
#define QLCNIC_INTR_DEFAULT 0x04
#define QLCNIC_CONFIG_INTR_COALESCE 3
-#define QLCNIC_DEV_INFO_SIZE 1
+#define QLCNIC_DEV_INFO_SIZE 2
struct qlcnic_nic_intr_coalesce {
u8 type;
@@ -462,8 +469,10 @@ struct qlcnic_hardware_context {
u16 max_rx_ques;
u16 max_mtu;
u32 msg_enable;
- u16 act_pci_func;
+ u16 total_nic_func;
u16 max_pci_func;
+ u32 max_vnic_func;
+ u32 total_pci_func;
u32 capabilities;
u32 extra_capability[3];
@@ -487,6 +496,7 @@ struct qlcnic_hardware_context {
struct qlcnic_mailbox *mailbox;
u8 extend_lb_time;
u8 phys_port_id[ETH_ALEN];
+ u8 lb_mode;
};
struct qlcnic_adapter_stats {
@@ -578,6 +588,8 @@ struct qlcnic_host_tx_ring {
dma_addr_t phys_addr;
dma_addr_t hw_cons_phys_addr;
struct netdev_queue *txq;
+ /* Lock to protect Tx descriptors cleanup */
+ spinlock_t tx_clean_lock;
} ____cacheline_internodealigned_in_smp;
/*
@@ -788,9 +800,10 @@ struct qlcnic_cardrsp_tx_ctx {
#define QLCNIC_MAC_VLAN_ADD 3
#define QLCNIC_MAC_VLAN_DEL 4
-struct qlcnic_mac_list_s {
+struct qlcnic_mac_vlan_list {
struct list_head list;
uint8_t mac_addr[ETH_ALEN+2];
+ u16 vlan_id;
};
/* MAC Learn */
@@ -808,6 +821,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_ILB_MODE 0x1
#define QLCNIC_ELB_MODE 0x2
+#define QLCNIC_LB_MODE_MASK 0x3
#define QLCNIC_LINKEVENT 0x1
#define QLCNIC_LB_RESPONSE 0x2
@@ -856,7 +870,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3
#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5
#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
-#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_8
+#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -950,6 +964,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_TX_INTR_SHARED 0x10000
#define QLCNIC_APP_CHANGED_FLAGS 0x20000
#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
+#define QLCNIC_TSS_RSS 0x80000
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
@@ -959,6 +974,9 @@ struct qlcnic_ipaddr {
#define QLCNIC_BEACON_EANBLE 0xC
#define QLCNIC_BEACON_DISABLE 0xD
+#define QLCNIC_BEACON_ON 2
+#define QLCNIC_BEACON_OFF 0
+
#define QLCNIC_MSIX_TBL_SPACE 8192
#define QLCNIC_PCI_REG_MSIX_TBL 0x44
#define QLCNIC_MSIX_TBL_PGSIZE 4096
@@ -1043,6 +1061,9 @@ struct qlcnic_adapter {
u8 drv_tx_rings; /* max tx rings supported by driver */
u8 drv_sds_rings; /* max sds rings supported by driver */
+ u8 drv_tss_rings; /* tss ring input */
+ u8 drv_rss_rings; /* rss ring input */
+
u8 rx_csum;
u8 portnum;
@@ -1068,6 +1089,7 @@ struct qlcnic_adapter {
u64 dev_rst_time;
bool drv_mac_learn;
bool fdb_mac_learn;
+ bool rx_mac_learn;
unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
u8 flash_mfg_id;
struct qlcnic_npar_info *npars;
@@ -1093,7 +1115,6 @@ struct qlcnic_adapter {
struct qlcnic_filter_hash rx_fhash;
struct list_head vf_mc_list;
- spinlock_t tx_clean_lock;
spinlock_t mac_learn_lock;
/* spinlock for catching rcv filters for eswitch traffic */
spinlock_t rx_mac_learn_lock;
@@ -1257,7 +1278,7 @@ struct qlcnic_pci_func_cfg {
u16 port_num;
u8 pci_func;
u8 func_state;
- u8 def_mac_addr[6];
+ u8 def_mac_addr[ETH_ALEN];
};
struct qlcnic_npar_func_cfg {
@@ -1459,8 +1480,6 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
-void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *);
-void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
#define ADDR_IN_RANGE(addr, low, high) \
(((addr) < (high)) && ((addr) >= (low)))
@@ -1496,16 +1515,11 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
#define MAX_CTL_CHECK 1000
-int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
int qlcnic_dump_fw(struct qlcnic_adapter *);
int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *);
bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *);
-pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
- pci_channel_state_t);
-pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *);
-void qlcnic_82xx_io_resume(struct pci_dev *);
/* Functions from qlcnic_init.c */
void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int);
@@ -1540,9 +1554,7 @@ int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
void qlcnic_watchdog_task(struct work_struct *work);
void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
-int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
void qlcnic_set_multi(struct net_device *netdev);
-void __qlcnic_set_multi(struct net_device *, u16);
int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16);
int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
@@ -1555,13 +1567,11 @@ netdev_features_t qlcnic_fix_features(struct net_device *netdev,
netdev_features_t features);
int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
-int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *);
/* Functions from qlcnic_ethtool.c */
int qlcnic_check_loopback_buff(unsigned char *, u8 []);
int qlcnic_do_lb_test(struct qlcnic_adapter *, u8);
-int qlcnic_loopback_test(struct net_device *, u8);
/* Functions from qlcnic_main.c */
int qlcnic_reset_context(struct qlcnic_adapter *);
@@ -1570,10 +1580,9 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int);
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *, struct net_device *);
void qlcnic_set_tx_ring_count(struct qlcnic_adapter *, u8);
void qlcnic_set_sds_ring_count(struct qlcnic_adapter *, u8);
-int qlcnic_setup_rings(struct qlcnic_adapter *, u8, u8);
+int qlcnic_setup_rings(struct qlcnic_adapter *);
int qlcnic_validate_rings(struct qlcnic_adapter *, __u32, int);
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
-void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
void qlcnic_set_drv_version(struct qlcnic_adapter *);
@@ -1602,11 +1611,8 @@ void qlcnic_dump_mbx(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
-void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
-void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter);
void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter);
-int qlcnic_82xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
@@ -1614,7 +1620,7 @@ void qlcnic_set_vlan_config(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
-
+int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *);
void qlcnic_down(struct qlcnic_adapter *, struct net_device *);
int qlcnic_up(struct qlcnic_adapter *, struct net_device *);
void __qlcnic_down(struct qlcnic_adapter *, struct net_device *);
@@ -1629,15 +1635,15 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *);
int qlcnic_set_default_offload_settings(struct qlcnic_adapter *);
int qlcnic_reset_npar_config(struct qlcnic_adapter *);
int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
-void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, u16);
-int qlcnic_get_beacon_state(struct qlcnic_adapter *, u8 *);
int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
int qlcnic_read_mac_addr(struct qlcnic_adapter *);
int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
void qlcnic_sriov_vf_schedule_multi(struct net_device *);
-void qlcnic_vf_add_mc_list(struct net_device *, u16);
+int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
+int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
+ u16 *);
/*
* QLOGIC Board information
@@ -1671,11 +1677,8 @@ static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
if (err)
- dev_err(&adapter->pdev->dev, "failed to set %d Tx queues\n",
- adapter->drv_tx_rings);
- else
- dev_info(&adapter->pdev->dev, "Set %d Tx queues\n",
- adapter->drv_tx_rings);
+ netdev_err(netdev, "failed to set %d Tx queues\n",
+ adapter->drv_tx_rings);
return err;
}
@@ -1708,6 +1711,7 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
+void qlcnic_update_stats(struct qlcnic_adapter *);
/* Adapter hardware abstraction */
struct qlcnic_hardware_ops {
@@ -1740,7 +1744,8 @@ struct qlcnic_hardware_ops {
int (*change_macvlan) (struct qlcnic_adapter *, u8*, u16, u8);
void (*napi_enable) (struct qlcnic_adapter *);
void (*napi_disable) (struct qlcnic_adapter *);
- void (*config_intr_coal) (struct qlcnic_adapter *);
+ int (*config_intr_coal) (struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
int (*config_rss) (struct qlcnic_adapter *, int);
int (*config_hw_lro) (struct qlcnic_adapter *, int);
int (*config_loopback) (struct qlcnic_adapter *, u8);
@@ -1755,6 +1760,15 @@ struct qlcnic_hardware_ops {
pci_channel_state_t);
pci_ers_result_t (*io_slot_reset) (struct pci_dev *);
void (*io_resume) (struct pci_dev *);
+ void (*get_beacon_state)(struct qlcnic_adapter *);
+ void (*enable_sds_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_sds_ring *);
+ void (*disable_sds_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_sds_ring *);
+ void (*enable_tx_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+ void (*disable_tx_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
};
extern struct qlcnic_nic_template qlcnic_vf_ops;
@@ -1927,9 +1941,10 @@ static inline void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
adapter->ahw->hw_ops->napi_disable(adapter);
}
-static inline void qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
+static inline int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter,
+ struct ethtool_coalesce *ethcoal)
{
- adapter->ahw->hw_ops->config_intr_coal(adapter);
+ return adapter->ahw->hw_ops->config_intr_coal(adapter, ethcoal);
}
static inline int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
@@ -1981,6 +1996,11 @@ static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
adapter->ahw->hw_ops->set_mac_filter_count(adapter);
}
+static inline void qlcnic_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->get_beacon_state(adapter);
+}
+
static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
{
if (adapter->ahw->hw_ops->read_phys_port_id)
@@ -2023,6 +2043,54 @@ static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter)
return test_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
}
+static inline void
+qlcnic_82xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(0x0, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_82xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(1, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(0, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(1, tx_ring->crb_intr_mask);
+}
+
+/* Enable MSI-x and INT-x interrupts */
+static inline void
+qlcnic_83xx_enable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(0, sds_ring->crb_intr_mask);
+}
+
+/* Disable MSI-x and INT-x interrupts */
+static inline void
+qlcnic_83xx_disable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(1, sds_ring->crb_intr_mask);
+}
+
static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
{
test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
@@ -2032,10 +2100,10 @@ static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
/* When operating in a muti tx mode, driver needs to write 0x1
* to src register, instead of 0x0 to disable receiving interrupt.
*/
-static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
+static inline void
+qlcnic_82xx_disable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
{
- struct qlcnic_adapter *adapter = sds_ring->adapter;
-
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test &&
(adapter->flags & QLCNIC_MSIX_ENABLED))
@@ -2044,13 +2112,42 @@ static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
writel(0, sds_ring->crb_intr_mask);
}
+static inline void qlcnic_enable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ if (adapter->ahw->hw_ops->enable_sds_intr)
+ adapter->ahw->hw_ops->enable_sds_intr(adapter, sds_ring);
+}
+
+static inline void
+qlcnic_disable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ if (adapter->ahw->hw_ops->disable_sds_intr)
+ adapter->ahw->hw_ops->disable_sds_intr(adapter, sds_ring);
+}
+
+static inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (adapter->ahw->hw_ops->enable_tx_intr)
+ adapter->ahw->hw_ops->enable_tx_intr(adapter, tx_ring);
+}
+
+static inline void qlcnic_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (adapter->ahw->hw_ops->disable_tx_intr)
+ adapter->ahw->hw_ops->disable_tx_intr(adapter, tx_ring);
+}
+
/* When operating in a muti tx mode, driver needs to write 0x0
* to src register, instead of 0x1 to enable receiving interrupts.
*/
-static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
+static inline void
+qlcnic_82xx_enable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
{
- struct qlcnic_adapter *adapter = sds_ring->adapter;
-
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test &&
(adapter->flags & QLCNIC_MSIX_ENABLED))
@@ -2136,4 +2233,26 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
return status;
}
+
+static inline bool qlcnic_83xx_pf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false;
+}
+
+static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+}
+
+static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_84xx_check(adapter))
+ return QLC_84XX_VNIC_COUNT;
+ else
+ return QLC_DEFAULT_VNIC_COUNT;
+}
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index b1cb0ffb15c7..4146664d4d6a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -13,8 +13,26 @@
#include <linux/interrupt.h>
#include <linux/aer.h>
+static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
+static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
+static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
+ struct qlcnic_cmd_args *);
+static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *);
+static irqreturn_t qlcnic_83xx_handle_aen(int, void *);
+static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
+static void qlcnic_83xx_io_resume(struct pci_dev *);
+static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *, u8);
+static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
+static int qlcnic_83xx_resume(struct qlcnic_adapter *);
+static int qlcnic_83xx_shutdown(struct pci_dev *);
+static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
+
#define RSS_HASHTYPE_IP_TCP 0x3
#define QLC_83XX_FW_MBX_CMD 0
+#define QLC_SKIP_INACTIVE_PCI_REGS 7
static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@ -34,7 +52,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_READ_MAX_MTU, 4, 2},
{QLCNIC_CMD_READ_MAX_LRO, 4, 2},
{QLCNIC_CMD_MAC_ADDRESS, 4, 3},
- {QLCNIC_CMD_GET_PCI_INFO, 1, 66},
+ {QLCNIC_CMD_GET_PCI_INFO, 1, 129},
{QLCNIC_CMD_GET_NIC_INFO, 2, 19},
{QLCNIC_CMD_SET_NIC_INFO, 32, 1},
{QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
@@ -68,7 +86,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_CONFIG_VPORT, 4, 4},
{QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
{QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
- {QLCNIC_CMD_DCB_QUERY_PARAM, 2, 50},
+ {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
};
const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -180,6 +198,11 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
.io_error_detected = qlcnic_83xx_io_error_detected,
.io_slot_reset = qlcnic_83xx_io_slot_reset,
.io_resume = qlcnic_83xx_io_resume,
+ .get_beacon_state = qlcnic_83xx_get_beacon_state,
+ .enable_sds_intr = qlcnic_83xx_enable_sds_intr,
+ .disable_sds_intr = qlcnic_83xx_disable_sds_intr,
+ .enable_tx_intr = qlcnic_83xx_enable_tx_intr,
+ .disable_tx_intr = qlcnic_83xx_disable_tx_intr,
};
@@ -267,11 +290,22 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
}
}
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
+static void qlcnic_83xx_enable_legacy(struct qlcnic_adapter *adapter)
{
- int err, i, num_msix;
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ /* MSI-X enablement failed, use legacy interrupt */
+ adapter->tgt_status_reg = ahw->pci_base0 + QLC_83XX_INTX_PTR;
+ adapter->tgt_mask_reg = ahw->pci_base0 + QLC_83XX_INTX_MASK;
+ adapter->isr_int_vec = ahw->pci_base0 + QLC_83XX_INTX_TRGR;
+ adapter->msix_entries[0].vector = adapter->pdev->irq;
+ dev_info(&adapter->pdev->dev, "using legacy interrupt\n");
+}
+
+static int qlcnic_83xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
+{
+ int num_msix;
+
num_msix = adapter->drv_sds_rings;
/* account for AEN interrupt MSI-X based interrupts */
@@ -280,29 +314,44 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
num_msix += adapter->drv_tx_rings;
- err = qlcnic_enable_msix(adapter, num_msix);
- if (err == -ENOMEM)
- return err;
- if (adapter->flags & QLCNIC_MSIX_ENABLED)
- num_msix = adapter->ahw->num_msix;
- else {
- if (qlcnic_sriov_vf_check(adapter))
- return -EINVAL;
- num_msix = 1;
+ return num_msix;
+}
+
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, i, num_msix;
+
+ if (adapter->flags & QLCNIC_TSS_RSS) {
+ err = qlcnic_setup_tss_rss_intr(adapter);
+ if (err < 0)
+ return err;
+ num_msix = ahw->num_msix;
+ } else {
+ num_msix = qlcnic_83xx_calculate_msix_vector(adapter);
+
+ err = qlcnic_enable_msix(adapter, num_msix);
+ if (err == -ENOMEM)
+ return err;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ num_msix = ahw->num_msix;
+ } else {
+ if (qlcnic_sriov_vf_check(adapter))
+ return -EINVAL;
+ num_msix = 1;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+ }
}
+
/* setup interrupt mapping table for fw */
ahw->intr_tbl = vzalloc(num_msix *
sizeof(struct qlcnic_intrpt_config));
if (!ahw->intr_tbl)
return -ENOMEM;
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
- /* MSI-X enablement failed, use legacy interrupt */
- adapter->tgt_status_reg = ahw->pci_base0 + QLC_83XX_INTX_PTR;
- adapter->tgt_mask_reg = ahw->pci_base0 + QLC_83XX_INTX_MASK;
- adapter->isr_int_vec = ahw->pci_base0 + QLC_83XX_INTX_TRGR;
- adapter->msix_entries[0].vector = adapter->pdev->irq;
- dev_info(&adapter->pdev->dev, "using legacy interrupt\n");
- }
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ qlcnic_83xx_enable_legacy(adapter);
for (i = 0; i < num_msix; i++) {
if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -312,35 +361,22 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
ahw->intr_tbl[i].id = i;
ahw->intr_tbl[i].src = 0;
}
+
return 0;
}
-inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
+static inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
{
writel(0, adapter->tgt_mask_reg);
}
-inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
+static inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
{
if (adapter->tgt_mask_reg)
writel(1, adapter->tgt_mask_reg);
}
-/* Enable MSI-x and INT-x interrupts */
-void qlcnic_83xx_enable_intr(struct qlcnic_adapter *adapter,
- struct qlcnic_host_sds_ring *sds_ring)
-{
- writel(0, sds_ring->crb_intr_mask);
-}
-
-/* Disable MSI-x and INT-x interrupts */
-void qlcnic_83xx_disable_intr(struct qlcnic_adapter *adapter,
- struct qlcnic_host_sds_ring *sds_ring)
-{
- writel(1, sds_ring->crb_intr_mask);
-}
-
-inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
+static inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
*adapter)
{
u32 mask;
@@ -447,8 +483,9 @@ irqreturn_t qlcnic_83xx_intr(int irq, void *data)
qlcnic_83xx_poll_process_aen(adapter);
- if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- ahw->diag_cnt++;
+ if (ahw->diag_test) {
+ if (ahw->diag_test == QLCNIC_INTERRUPT_TEST)
+ ahw->diag_cnt++;
qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
return IRQ_HANDLED;
}
@@ -476,7 +513,7 @@ irqreturn_t qlcnic_83xx_tmp_intr(int irq, void *data)
done:
adapter->ahw->diag_cnt++;
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
return IRQ_HANDLED;
}
@@ -633,10 +670,10 @@ int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter)
return status;
}
-void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
+static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- u16 act_pci_fn = ahw->act_pci_func;
+ u16 act_pci_fn = ahw->total_nic_func;
u16 count;
ahw->max_mc_count = QLC_83XX_MAX_MC_COUNT;
@@ -868,7 +905,7 @@ static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
return;
}
-void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 event[QLC_83XX_MBX_AEN_CNT];
@@ -1275,8 +1312,8 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
/* send the mailbox command*/
err = qlcnic_issue_cmd(adapter, &cmd);
if (err) {
- dev_err(&adapter->pdev->dev,
- "Failed to create Tx ctx in firmware 0x%x\n", err);
+ netdev_err(adapter->netdev,
+ "Failed to create Tx ctx in firmware 0x%x\n", err);
goto out;
}
mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2];
@@ -1287,8 +1324,9 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
intr_mask = ahw->intr_tbl[adapter->drv_sds_rings + ring].src;
tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
}
- dev_info(&adapter->pdev->dev, "Tx Context[0x%x] Created, state:0x%x\n",
- tx->ctx_id, mbx_out->state);
+ netdev_info(adapter->netdev,
+ "Tx Context[0x%x] Created, state:0x%x\n",
+ tx->ctx_id, mbx_out->state);
out:
qlcnic_free_mbx_args(&cmd);
return err;
@@ -1340,16 +1378,11 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
}
if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
- /* disable and free mailbox interrupt */
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
- qlcnic_83xx_enable_mbx_poll(adapter);
- qlcnic_83xx_free_mbx_intr(adapter);
- }
adapter->ahw->loopback_state = 0;
adapter->ahw->hw_ops->setup_link_event(adapter, 1);
}
@@ -1363,33 +1396,20 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
- int ring, err;
+ int ring;
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
- qlcnic_83xx_disable_intr(adapter, sds_ring);
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_enable_mbx_poll(adapter);
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ qlcnic_disable_sds_intr(adapter, sds_ring);
}
}
qlcnic_fw_destroy_ctx(adapter);
qlcnic_detach(adapter);
- if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
- err = qlcnic_83xx_setup_mbx_intr(adapter);
- qlcnic_83xx_disable_mbx_poll(adapter);
- if (err) {
- dev_err(&adapter->pdev->dev,
- "%s: failed to setup mbx interrupt\n",
- __func__);
- goto out;
- }
- }
- }
adapter->ahw->diag_test = 0;
adapter->drv_sds_rings = drv_sds_rings;
@@ -1399,13 +1419,37 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
if (netif_running(netdev))
__qlcnic_up(adapter, netdev);
- if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST &&
- !(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_disable_mbx_poll(adapter);
out:
netif_device_attach(netdev);
}
+static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ u8 beacon_state;
+ int err = 0;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_CONFIG);
+ if (!err) {
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (!err) {
+ beacon_state = cmd.rsp.arg[4];
+ if (beacon_state == QLCNIC_BEACON_DISABLE)
+ ahw->beacon_state = QLC_83XX_BEACON_OFF;
+ else if (beacon_state == QLC_83XX_ENABLE_BEACON)
+ ahw->beacon_state = QLC_83XX_BEACON_ON;
+ }
+ } else {
+ netdev_err(adapter->netdev, "Get beacon state failed, err=%d\n",
+ err);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return;
+}
+
int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state,
u32 beacon)
{
@@ -1518,8 +1562,7 @@ int qlcnic_83xx_set_led(struct net_device *netdev,
return err;
}
-void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
- int enable)
+void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *adapter, int enable)
{
struct qlcnic_cmd_args cmd;
int status;
@@ -1527,21 +1570,21 @@ void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
if (qlcnic_sriov_vf_check(adapter))
return;
- if (enable) {
+ if (enable)
status = qlcnic_alloc_mbx_args(&cmd, adapter,
QLCNIC_CMD_INIT_NIC_FUNC);
- if (status)
- return;
-
- cmd.req.arg[1] = BIT_0 | BIT_31;
- } else {
+ else
status = qlcnic_alloc_mbx_args(&cmd, adapter,
QLCNIC_CMD_STOP_NIC_FUNC);
- if (status)
- return;
- cmd.req.arg[1] = BIT_0 | BIT_31;
- }
+ if (status)
+ return;
+
+ cmd.req.arg[1] = QLC_REGISTER_LB_IDC | QLC_INIT_FW_RESOURCES;
+
+ if (adapter->dcb)
+ cmd.req.arg[1] |= QLC_REGISTER_DCB_AEN;
+
status = qlcnic_issue_cmd(adapter, &cmd);
if (status)
dev_err(&adapter->pdev->dev,
@@ -1551,7 +1594,7 @@ void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
qlcnic_free_mbx_args(&cmd);
}
-int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
{
struct qlcnic_cmd_args cmd;
int err;
@@ -1568,7 +1611,7 @@ int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter)
{
struct qlcnic_cmd_args cmd;
int err;
@@ -1610,7 +1653,9 @@ static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
u32 *interface_id)
{
if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_alloc_lb_filters_mem(adapter);
qlcnic_pf_set_interface_id_promisc(adapter, interface_id);
+ adapter->rx_mac_learn = true;
} else {
if (!qlcnic_sriov_vf_check(adapter))
*interface_id = adapter->recv_ctx->context_id << 16;
@@ -1637,7 +1682,11 @@ int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
- cmd->req.arg[1] = (mode ? 1 : 0) | temp;
+
+ if (qlcnic_84xx_check(adapter) && qlcnic_sriov_pf_check(adapter))
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+
+ cmd->req.arg[1] = mode | temp;
err = qlcnic_issue_cmd(adapter, cmd);
if (!err)
return err;
@@ -1704,12 +1753,6 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
}
} while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
- /* Make sure carrier is off and queue is stopped during loopback */
- if (netif_running(netdev)) {
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
- }
-
ret = qlcnic_do_lb_test(adapter, mode);
qlcnic_83xx_clear_lb_mode(adapter, mode);
@@ -1737,7 +1780,7 @@ static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter,
ahw->extend_lb_time = 0;
}
-int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct net_device *netdev = adapter->netdev;
@@ -1806,7 +1849,7 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
return status;
}
-int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 config = ahw->port_config, max_wait_count;
@@ -2041,8 +2084,8 @@ void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
qlcnic_83xx_sre_macaddr_change(adapter, mac, vlan_id, QLCNIC_MAC_ADD);
}
-void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
- u8 type, struct qlcnic_cmd_args *cmd)
+static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 type, struct qlcnic_cmd_args *cmd)
{
switch (type) {
case QLCNIC_SET_STATION_MAC:
@@ -2086,37 +2129,130 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
return err;
}
-void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_set_rx_intr_coal(struct qlcnic_adapter *adapter)
{
- int err;
- u16 temp;
- struct qlcnic_cmd_args cmd;
struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ struct qlcnic_cmd_args cmd;
+ u16 temp;
+ int err;
- if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
- return;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
+ if (err)
+ return err;
+
+ temp = adapter->recv_ctx->context_id;
+ cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16;
+ temp = coal->rx_time_us;
+ cmd.req.arg[2] = coal->rx_packets | temp << 16;
+ cmd.req.arg[3] = coal->flag;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS)
+ netdev_err(adapter->netdev,
+ "failed to set interrupt coalescing parameters\n");
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static int qlcnic_83xx_set_tx_intr_coal(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ struct qlcnic_cmd_args cmd;
+ u16 temp;
+ int err;
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
if (err)
- return;
+ return err;
- if (coal->type == QLCNIC_INTR_COAL_TYPE_RX) {
- temp = adapter->recv_ctx->context_id;
- cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16;
- temp = coal->rx_time_us;
- cmd.req.arg[2] = coal->rx_packets | temp << 16;
- } else if (coal->type == QLCNIC_INTR_COAL_TYPE_TX) {
- temp = adapter->tx_ring->ctx_id;
- cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_TX | temp << 16;
- temp = coal->tx_time_us;
- cmd.req.arg[2] = coal->tx_packets | temp << 16;
- }
+ temp = adapter->tx_ring->ctx_id;
+ cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_TX | temp << 16;
+ temp = coal->tx_time_us;
+ cmd.req.arg[2] = coal->tx_packets | temp << 16;
cmd.req.arg[3] = coal->flag;
+
err = qlcnic_issue_cmd(adapter, &cmd);
if (err != QLCNIC_RCODE_SUCCESS)
- dev_info(&adapter->pdev->dev,
- "Failed to send interrupt coalescence parameters\n");
+ netdev_err(adapter->netdev,
+ "failed to set interrupt coalescing parameters\n");
+
qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *adapter)
+{
+ int err = 0;
+
+ err = qlcnic_83xx_set_rx_intr_coal(adapter);
+ if (err)
+ netdev_err(adapter->netdev,
+ "failed to set Rx coalescing parameters\n");
+
+ err = qlcnic_83xx_set_tx_intr_coal(adapter);
+ if (err)
+ netdev_err(adapter->netdev,
+ "failed to set Tx coalescing parameters\n");
+
+ return err;
+}
+
+int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ u32 rx_coalesce_usecs, rx_max_frames;
+ u32 tx_coalesce_usecs, tx_max_frames;
+ int err;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return -EIO;
+
+ tx_coalesce_usecs = ethcoal->tx_coalesce_usecs;
+ tx_max_frames = ethcoal->tx_max_coalesced_frames;
+ rx_coalesce_usecs = ethcoal->rx_coalesce_usecs;
+ rx_max_frames = ethcoal->rx_max_coalesced_frames;
+ coal->flag = QLCNIC_INTR_DEFAULT;
+
+ if ((coal->rx_time_us == rx_coalesce_usecs) &&
+ (coal->rx_packets == rx_max_frames)) {
+ coal->type = QLCNIC_INTR_COAL_TYPE_TX;
+ coal->tx_time_us = tx_coalesce_usecs;
+ coal->tx_packets = tx_max_frames;
+ } else if ((coal->tx_time_us == tx_coalesce_usecs) &&
+ (coal->tx_packets == tx_max_frames)) {
+ coal->type = QLCNIC_INTR_COAL_TYPE_RX;
+ coal->rx_time_us = rx_coalesce_usecs;
+ coal->rx_packets = rx_max_frames;
+ } else {
+ coal->type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ coal->rx_time_us = rx_coalesce_usecs;
+ coal->rx_packets = rx_max_frames;
+ coal->tx_time_us = tx_coalesce_usecs;
+ coal->tx_packets = tx_max_frames;
+ }
+
+ switch (coal->type) {
+ case QLCNIC_INTR_COAL_TYPE_RX:
+ err = qlcnic_83xx_set_rx_intr_coal(adapter);
+ break;
+ case QLCNIC_INTR_COAL_TYPE_TX:
+ err = qlcnic_83xx_set_tx_intr_coal(adapter);
+ break;
+ case QLCNIC_INTR_COAL_TYPE_RX_TX:
+ err = qlcnic_83xx_set_rx_tx_intr_coal(adapter);
+ break;
+ default:
+ err = -EINVAL;
+ netdev_err(adapter->netdev,
+ "Invalid Interrupt coalescing type\n");
+ break;
+ }
+
+ return err;
}
static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
@@ -2141,10 +2277,11 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
ahw->link_autoneg = MSB(MSW(data[3]));
ahw->module_type = MSB(LSW(data[3]));
ahw->has_link_events = 1;
+ ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK;
qlcnic_advert_link_change(adapter, link_status);
}
-irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
+static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
{
struct qlcnic_adapter *adapter = data;
struct qlcnic_mailbox *mbx;
@@ -2170,36 +2307,6 @@ out:
return IRQ_HANDLED;
}
-int qlcnic_enable_eswitch(struct qlcnic_adapter *adapter, u8 port, u8 enable)
-{
- int err = -EIO;
- struct qlcnic_cmd_args cmd;
-
- if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
- dev_err(&adapter->pdev->dev,
- "%s: Error, invoked by non management func\n",
- __func__);
- return err;
- }
-
- err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH);
- if (err)
- return err;
-
- cmd.req.arg[1] = (port & 0xf) | BIT_4;
- err = qlcnic_issue_cmd(adapter, &cmd);
-
- if (err != QLCNIC_RCODE_SUCCESS) {
- dev_err(&adapter->pdev->dev, "Failed to enable eswitch%d\n",
- err);
- err = -EIO;
- }
- qlcnic_free_mbx_args(&cmd);
-
- return err;
-
-}
-
int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *adapter,
struct qlcnic_info *nic)
{
@@ -2293,11 +2400,37 @@ out:
return err;
}
+int qlcnic_get_pci_func_type(struct qlcnic_adapter *adapter, u16 type,
+ u16 *nic, u16 *fcoe, u16 *iscsi)
+{
+ struct device *dev = &adapter->pdev->dev;
+ int err = 0;
+
+ switch (type) {
+ case QLCNIC_TYPE_NIC:
+ (*nic)++;
+ break;
+ case QLCNIC_TYPE_FCOE:
+ (*fcoe)++;
+ break;
+ case QLCNIC_TYPE_ISCSI:
+ (*iscsi)++;
+ break;
+ default:
+ dev_err(dev, "%s: Unknown PCI type[%x]\n",
+ __func__, type);
+ err = -EIO;
+ }
+
+ return err;
+}
+
int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
struct qlcnic_pci_info *pci_info)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct device *dev = &adapter->pdev->dev;
+ u16 nic = 0, fcoe = 0, iscsi = 0;
struct qlcnic_cmd_args cmd;
int i, err = 0, j = 0;
u32 temp;
@@ -2308,16 +2441,20 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
err = qlcnic_issue_cmd(adapter, &cmd);
- ahw->act_pci_func = 0;
+ ahw->total_nic_func = 0;
if (err == QLCNIC_RCODE_SUCCESS) {
ahw->max_pci_func = cmd.rsp.arg[1] & 0xFF;
- for (i = 2, j = 0; j < QLCNIC_MAX_PCI_FUNC; j++, pci_info++) {
+ for (i = 2, j = 0; j < ahw->max_vnic_func; j++, pci_info++) {
pci_info->id = cmd.rsp.arg[i] & 0xFFFF;
pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
i++;
+ if (!pci_info->active) {
+ i += QLC_SKIP_INACTIVE_PCI_REGS;
+ continue;
+ }
pci_info->type = cmd.rsp.arg[i] & 0xFFFF;
- if (pci_info->type == QLCNIC_TYPE_NIC)
- ahw->act_pci_func++;
+ err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+ &nic, &fcoe, &iscsi);
temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
pci_info->default_port = temp;
i++;
@@ -2335,6 +2472,13 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
err = -EIO;
}
+ ahw->total_nic_func = nic;
+ ahw->total_pci_func = nic + fcoe + iscsi;
+ if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+ dev_err(dev, "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+ __func__, ahw->total_nic_func, ahw->total_pci_func);
+ err = -EIO;
+ }
qlcnic_free_mbx_args(&cmd);
return err;
@@ -3484,7 +3628,7 @@ int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
return 0;
}
-int qlcnic_83xx_shutdown(struct pci_dev *pdev)
+static int qlcnic_83xx_shutdown(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
@@ -3506,7 +3650,7 @@ int qlcnic_83xx_shutdown(struct pci_dev *pdev)
return 0;
}
-int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlc_83xx_idc *idc = &ahw->idc;
@@ -3754,6 +3898,19 @@ static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
return;
}
+static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 offset;
+
+ offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+ dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x",
+ readl(ahw->pci_base0 + offset),
+ QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL),
+ QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL),
+ QLCRDX(ahw, QLCNIC_FW_MBX_CTRL));
+}
+
static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
{
struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
@@ -3798,6 +3955,8 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
__func__, cmd->cmd_op, cmd->type, ahw->pci_func,
ahw->op_mode);
clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_dump_mailbox_registers(adapter);
+ qlcnic_83xx_get_mbx_data(adapter, cmd);
qlcnic_dump_mbx(adapter, cmd);
qlcnic_83xx_idc_request_reset(adapter,
QLCNIC_FORCE_FW_DUMP_KEY);
@@ -3844,8 +4003,8 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter)
return 0;
}
-pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
@@ -3866,7 +4025,7 @@ pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NEED_RESET;
}
-pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
+static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
int err = 0;
@@ -3889,7 +4048,7 @@ disconnect:
return PCI_ERS_RESULT_DISCONNECT;
}
-void qlcnic_83xx_io_resume(struct pci_dev *pdev)
+static void qlcnic_83xx_io_resume(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 4cae6caa6bfa..f92485ca21d1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -324,6 +324,11 @@ struct qlc_83xx_idc {
char **name;
};
+enum qlcnic_vlan_operations {
+ QLC_VLAN_ADD = 0,
+ QLC_VLAN_DELETE
+};
+
/* Device States */
enum qlcnic_83xx_states {
QLC_83XX_IDC_DEV_UNKNOWN,
@@ -376,6 +381,8 @@ enum qlcnic_83xx_states {
/* LED configuration settings */
#define QLC_83XX_ENABLE_BEACON 0xe
+#define QLC_83XX_BEACON_ON 1
+#define QLC_83XX_BEACON_OFF 0
#define QLC_83XX_LED_RATE 0xff
#define QLC_83XX_LED_ACT (1 << 10)
#define QLC_83XX_LED_MOD (0 << 13)
@@ -518,6 +525,11 @@ enum qlc_83xx_ext_regs {
QLC_83XX_ASIC_TEMP,
};
+/* Initialize/Stop NIC command bit definitions */
+#define QLC_REGISTER_DCB_AEN BIT_1
+#define QLC_REGISTER_LB_IDC BIT_0
+#define QLC_INIT_FW_RESOURCES BIT_31
+
/* 83xx funcitons */
int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
@@ -532,17 +544,13 @@ void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong, int *);
int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
-void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *, int, u64 []);
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
-int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *, u8);
-int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
-int qlcnic_83xx_config_intr_coalesce(struct qlcnic_adapter *);
void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
-void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *, int);
+void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
int qlcnic_83xx_napi_add(struct qlcnic_adapter *, struct net_device *);
void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
@@ -563,32 +571,22 @@ void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8);
-void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
- struct qlcnic_cmd_args *);
int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
struct qlcnic_adapter *, u32);
void qlcnic_free_mbx_args(struct qlcnic_cmd_args *);
void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
struct qlcnic_info *);
-void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *);
-irqreturn_t qlcnic_83xx_handle_aen(int, void *);
+int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
+int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *);
int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *);
void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *);
irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
irqreturn_t qlcnic_83xx_intr(int, void *);
irqreturn_t qlcnic_83xx_tmp_intr(int, void *);
-void qlcnic_83xx_enable_intr(struct qlcnic_adapter *,
- struct qlcnic_host_sds_ring *);
-void qlcnic_83xx_disable_intr(struct qlcnic_adapter *,
- struct qlcnic_host_sds_ring *);
void qlcnic_83xx_check_vf(struct qlcnic_adapter *,
const struct pci_device_id *);
-void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
-int qlcnic_83xx_get_port_config(struct qlcnic_adapter *);
-int qlcnic_83xx_set_port_config(struct qlcnic_adapter *);
-int qlcnic_enable_eswitch(struct qlcnic_adapter *, u8, u8);
-int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *);
int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *);
int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *);
void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *);
@@ -610,9 +608,7 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *,
u32, u8 *, int);
int qlcnic_83xx_init(struct qlcnic_adapter *, int);
int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *);
-int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
void qlcnic_83xx_idc_poll_dev_state(struct work_struct *);
-int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *);
void qlcnic_83xx_idc_exit(struct qlcnic_adapter *);
void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
@@ -620,7 +616,6 @@ void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
-int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *, int);
int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
@@ -648,9 +643,6 @@ int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
-void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
-int qlcnic_83xx_shutdown(struct pci_dev *);
-int qlcnic_83xx_resume(struct qlcnic_adapter *);
int qlcnic_83xx_idc_init(struct qlcnic_adapter *);
int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *);
int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *);
@@ -658,8 +650,4 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
-pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
- pci_channel_state_t);
-pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
-void qlcnic_83xx_io_resume(struct pci_dev *);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 89208e5b25d6..90a2dda351ec 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -39,6 +39,9 @@
static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter);
static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev);
static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter);
+static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
+static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *);
+static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
/* Template header */
struct qlc_83xx_reset_hdr {
@@ -380,7 +383,7 @@ static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter)
qlcnic_up(adapter, netdev);
netif_device_attach(netdev);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
- dev_err(&adapter->pdev->dev, "%s:\n", __func__);
+ netdev_info(adapter->netdev, "%s: soft reset complete.\n", __func__);
return 0;
}
@@ -614,8 +617,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
qlcnic_83xx_enable_mbx_interrupt(adapter);
- /* register for NIC IDC AEN Events */
- qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ qlcnic_83xx_initialize_nic(adapter, 1);
err = qlcnic_sriov_pf_reinit(adapter);
if (err)
@@ -740,6 +742,7 @@ static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter)
adapter->ahw->idc.err_code = -EIO;
dev_err(&adapter->pdev->dev,
"%s: Device in unknown state\n", __func__);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
return 0;
}
@@ -818,7 +821,6 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_mailbox *mbx = ahw->mailbox;
int ret = 0;
- u32 owner;
u32 val;
/* Perform NIC configuration based ready state entry actions */
@@ -848,9 +850,9 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
set_bit(__QLCNIC_RESETTING, &adapter->state);
qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
} else {
- owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
- if (ahw->pci_func == owner)
- qlcnic_dump_fw(adapter);
+ netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+ __func__);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
}
return -EIO;
}
@@ -948,13 +950,26 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
return 0;
}
-static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
+static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
{
- dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 val, owner;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner) {
+ qlcnic_83xx_stop_hw(adapter);
+ qlcnic_dump_fw(adapter);
+ }
+ }
+
+ netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n",
+ __func__);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
- adapter->ahw->idc.err_code = -EIO;
+ ahw->idc.err_code = -EIO;
- return 0;
+ return;
}
static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter)
@@ -1063,12 +1078,6 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
qlcnic_83xx_periodic_tasks(adapter);
- /* Do not reschedule if firmaware is in hanged state and auto
- * recovery is disabled
- */
- if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset)
- return;
-
/* Re-schedule the function */
if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
@@ -1219,10 +1228,10 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
}
val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
- if ((val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) ||
- !qlcnic_auto_fw_reset) {
- dev_err(&adapter->pdev->dev,
- "%s:failed, device in non reset mode\n", __func__);
+ if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+ netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+ __func__);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 0);
qlcnic_83xx_unlock_driver(adapter);
return;
}
@@ -1254,24 +1263,24 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
if (size & 0xF)
size = (size + 16) & ~0xF;
- p_cache = kzalloc(size, GFP_KERNEL);
+ p_cache = vzalloc(size);
if (p_cache == NULL)
return -ENOMEM;
ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache,
size / sizeof(u32));
if (ret) {
- kfree(p_cache);
+ vfree(p_cache);
return ret;
}
/* 16 byte write to MS memory */
ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache,
size / 16);
if (ret) {
- kfree(p_cache);
+ vfree(p_cache);
return ret;
}
- kfree(p_cache);
+ vfree(p_cache);
return ret;
}
@@ -1522,7 +1531,7 @@ static int qlcnic_83xx_check_cmd_peg_status(struct qlcnic_adapter *p_dev)
return -EIO;
}
-int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
+static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
{
int err;
@@ -1595,7 +1604,7 @@ static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev)
}
}
-int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
+static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
{
struct qlcnic_hardware_context *ahw = p_dev->ahw;
u32 addr, count, prev_ver, curr_ver;
@@ -1994,6 +2003,14 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
if (!(val & QLC_83XX_IDC_GRACEFULL_RESET))
qlcnic_dump_fw(adapter);
+
+ if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+ netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+ __func__);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return err;
+ }
+
qlcnic_83xx_init_hw(adapter);
if (qlcnic_83xx_copy_bootloader(adapter))
@@ -2014,7 +2031,7 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
return 0;
}
-int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
{
int err;
struct qlcnic_info nic_info;
@@ -2073,8 +2090,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
ahw->nic_mode = QLCNIC_DEFAULT_MODE;
adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
- adapter->max_sds_rings = ahw->max_rx_ques;
- adapter->max_tx_rings = ahw->max_tx_ques;
+ adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+ adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
} else {
return -EIO;
}
@@ -2198,9 +2215,9 @@ static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter)
int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- struct qlcnic_dcb *dcb;
int err = 0;
+ adapter->rx_mac_learn = false;
ahw->msix_supported = !!qlcnic_use_msi_x;
qlcnic_83xx_init_rings(adapter);
@@ -2250,8 +2267,7 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
- /* register for NIC IDC AEN Events */
- qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ qlcnic_83xx_initialize_nic(adapter, 1);
/* Configure default, SR-IOV or Virtual NIC mode of operation */
err = qlcnic_83xx_configure_opmode(adapter);
@@ -2264,11 +2280,6 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
if (err)
goto disable_mbx_intr;
- dcb = adapter->dcb;
-
- if (dcb && qlcnic_dcb_attach(dcb))
- qlcnic_clear_dcb_ops(dcb);
-
/* Periodically monitor device status */
qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
return 0;
@@ -2299,7 +2310,7 @@ void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter)
qlcnic_83xx_disable_vnic_mode(adapter, 1);
qlcnic_83xx_idc_detach_driver(adapter);
- qlcnic_83xx_register_nic_idc_func(adapter, 0);
+ qlcnic_83xx_initialize_nic(adapter, 0);
cancel_delayed_work_sync(&adapter->idc_aen_work);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index 734d28602ac3..be7d7a62cc0d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -8,7 +8,7 @@
#include "qlcnic.h"
#include "qlcnic_hw.h"
-int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
+static int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
{
if (lock) {
if (qlcnic_83xx_lock_driver(adapter))
@@ -107,7 +107,7 @@ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
npar = adapter->npars;
- for (i = 0; i < ahw->act_pci_func; i++, npar++) {
+ for (i = 0; i < ahw->total_nic_func; i++, npar++) {
dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n",
npar->pci_func, npar->active, npar->type,
npar->phy_port, npar->min_bw, npar->max_bw,
@@ -115,7 +115,7 @@ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
}
dev_info(dev, "Max functions = %d, active functions = %d\n",
- ahw->max_pci_func, ahw->act_pci_func);
+ ahw->max_pci_func, ahw->total_nic_func);
if (qlcnic_83xx_set_vnic_opmode(adapter))
return err;
@@ -224,10 +224,14 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
return -EIO;
}
- if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) {
adapter->flags |= QLCNIC_ESWITCH_ENABLED;
- else
+ if (adapter->drv_mac_learn)
+ adapter->rx_mac_learn = true;
+ } else {
adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+ adapter->rx_mac_learn = false;
+ }
ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 859cb161fc63..64dcbf33d8f0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -91,18 +91,6 @@ void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd)
cmd->rsp.arg = NULL;
}
-static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
-{
- int i;
-
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
- if (adapter->npars[i].pci_func == pci_func)
- return i;
- }
-
- return -1;
-}
-
static u32
qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
{
@@ -966,13 +954,15 @@ out_free_dma:
int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
struct qlcnic_pci_info *pci_info)
{
- int err = 0, i;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ size_t npar_size = sizeof(struct qlcnic_pci_info_le);
+ size_t pci_size = npar_size * ahw->max_vnic_func;
+ u16 nic = 0, fcoe = 0, iscsi = 0;
+ struct qlcnic_pci_info_le *npar;
struct qlcnic_cmd_args cmd;
dma_addr_t pci_info_dma_t;
- struct qlcnic_pci_info_le *npar;
void *pci_info_addr;
- size_t npar_size = sizeof(struct qlcnic_pci_info_le);
- size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
+ int err = 0, i;
pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size,
&pci_info_dma_t, GFP_KERNEL);
@@ -989,14 +979,16 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
cmd.req.arg[3] = pci_size;
err = qlcnic_issue_cmd(adapter, &cmd);
- adapter->ahw->act_pci_func = 0;
+ ahw->total_nic_func = 0;
if (err == QLCNIC_RCODE_SUCCESS) {
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
+ for (i = 0; i < ahw->max_vnic_func; i++, npar++, pci_info++) {
pci_info->id = le16_to_cpu(npar->id);
pci_info->active = le16_to_cpu(npar->active);
+ if (!pci_info->active)
+ continue;
pci_info->type = le16_to_cpu(npar->type);
- if (pci_info->type == QLCNIC_TYPE_NIC)
- adapter->ahw->act_pci_func++;
+ err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+ &nic, &fcoe, &iscsi);
pci_info->default_port =
le16_to_cpu(npar->default_port);
pci_info->tx_min_bw =
@@ -1011,6 +1003,14 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
err = -EIO;
}
+ ahw->total_nic_func = nic;
+ ahw->total_pci_func = nic + fcoe + iscsi;
+ if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+ __func__, ahw->total_nic_func, ahw->total_pci_func);
+ err = -EIO;
+ }
qlcnic_free_mbx_args(&cmd);
out_free_dma:
dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
@@ -1203,7 +1203,7 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
esw_stats->context_id = eswitch;
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
if (adapter->npars[i].phy_port != eswitch)
continue;
@@ -1236,15 +1236,16 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
const u8 port, const u8 rx_tx)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
int err;
u32 arg1;
- struct qlcnic_cmd_args cmd;
- if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ if (ahw->op_mode != QLCNIC_MGMT_FUNC)
return -EIO;
if (func_esw == QLCNIC_STATS_PORT) {
- if (port >= QLCNIC_MAX_PCI_FUNC)
+ if (port >= ahw->max_vnic_func)
goto err_ret;
} else if (func_esw == QLCNIC_STATS_ESWITCH) {
if (port >= QLCNIC_NIU_MAX_XG_PORTS)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index 86bca7c14f99..77f1bce432d2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -15,7 +15,6 @@
#define QLC_DCB_GET_MAP(V) (1 << V)
-#define QLC_DCB_AEN_BIT 0x2
#define QLC_DCB_FW_VER 0x2
#define QLC_DCB_MAX_TC 0x8
#define QLC_DCB_MAX_APP 0x8
@@ -71,7 +70,6 @@ static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *);
static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *, bool);
static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
struct qlcnic_dcb_capability {
@@ -179,7 +177,6 @@ static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
.get_hw_capability = qlcnic_83xx_dcb_get_hw_capability,
.query_cee_param = qlcnic_83xx_dcb_query_cee_param,
.get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg,
- .register_aen = qlcnic_83xx_dcb_register_aen,
.aen_handler = qlcnic_83xx_dcb_aen_handler,
};
@@ -260,6 +257,9 @@ int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
{
struct qlcnic_dcb *dcb;
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+
dcb = kzalloc(sizeof(struct qlcnic_dcb), GFP_ATOMIC);
if (!dcb)
return -ENOMEM;
@@ -280,7 +280,6 @@ static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb)
return;
adapter = dcb->adapter;
- qlcnic_dcb_register_aen(dcb, 0);
while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
usleep_range(10000, 11000);
@@ -304,7 +303,6 @@ static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
{
qlcnic_dcb_get_hw_capability(dcb);
qlcnic_dcb_get_cee_cfg(dcb);
- qlcnic_dcb_register_aen(dcb, 1);
}
static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
@@ -642,29 +640,6 @@ static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
return err;
}
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *dcb, bool flag)
-{
- u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC);
- struct qlcnic_adapter *adapter = dcb->adapter;
- struct qlcnic_cmd_args cmd;
- int err;
-
- err = qlcnic_alloc_mbx_args(&cmd, adapter, val);
- if (err)
- return err;
-
- cmd.req.arg[1] = QLC_DCB_AEN_BIT;
-
- err = qlcnic_issue_cmd(adapter, &cmd);
- if (err)
- dev_err(&adapter->pdev->dev, "Failed to %s DCBX AEN, err %d\n",
- (flag ? "register" : "unregister"), err);
-
- qlcnic_free_mbx_args(&cmd);
-
- return err;
-}
-
static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
{
u32 *val = data;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index c04ae0cdc108..3cf4a10fbe1e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -25,7 +25,6 @@ struct qlcnic_dcb_ops {
int (*get_hw_capability) (struct qlcnic_dcb *);
int (*query_cee_param) (struct qlcnic_dcb *, char *, u8);
void (*init_dcbnl_ops) (struct qlcnic_dcb *);
- int (*register_aen) (struct qlcnic_dcb *, bool);
void (*aen_handler) (struct qlcnic_dcb *, void *);
int (*get_cee_cfg) (struct qlcnic_dcb *);
void (*get_info) (struct qlcnic_dcb *);
@@ -103,13 +102,6 @@ static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
return 0;
}
-static inline void
-qlcnic_dcb_register_aen(struct qlcnic_dcb *dcb, u8 flag)
-{
- if (dcb && dcb->ops->register_aen)
- dcb->ops->register_aen(dcb, flag);
-}
-
static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
{
if (dcb && dcb->ops->aen_handler)
@@ -121,4 +113,10 @@ static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
if (dcb && dcb->ops->init_dcbnl_ops)
dcb->ops->init_dcbnl_ops(dcb);
}
+
+static inline void qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
+{
+ if (dcb && qlcnic_dcb_attach(dcb))
+ qlcnic_clear_dcb_ops(dcb);
+}
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index b36c02fafcfd..acee1a5d80c6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -167,27 +167,35 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
-static inline int qlcnic_82xx_statistics(void)
+static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter)
{
- return ARRAY_SIZE(qlcnic_device_gstrings_stats) +
- ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+ return ARRAY_SIZE(qlcnic_gstrings_stats) +
+ ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
+ QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
}
-static inline int qlcnic_83xx_statistics(void)
+static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter)
{
- return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
+ return ARRAY_SIZE(qlcnic_gstrings_stats) +
+ ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
- ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
+ ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) +
+ QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
}
static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
{
- if (qlcnic_82xx_check(adapter))
- return qlcnic_82xx_statistics();
- else if (qlcnic_83xx_check(adapter))
- return qlcnic_83xx_statistics();
- else
- return -1;
+ int len = -1;
+
+ if (qlcnic_82xx_check(adapter)) {
+ len = qlcnic_82xx_statistics(adapter);
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ len += ARRAY_SIZE(qlcnic_device_gstrings_stats);
+ } else if (qlcnic_83xx_check(adapter)) {
+ len = qlcnic_83xx_statistics(adapter);
+ }
+
+ return len;
}
#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412
@@ -221,7 +229,7 @@ static const u32 ext_diag_registers[] = {
-1
};
-#define QLCNIC_MGMT_API_VERSION 2
+#define QLCNIC_MGMT_API_VERSION 3
#define QLCNIC_ETHTOOL_REGS_VER 4
static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter)
@@ -270,21 +278,8 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
sizeof(drvinfo->version));
}
-static int
-qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
-{
- struct qlcnic_adapter *adapter = netdev_priv(dev);
-
- if (qlcnic_82xx_check(adapter))
- return qlcnic_82xx_get_settings(adapter, ecmd);
- else if (qlcnic_83xx_check(adapter))
- return qlcnic_83xx_get_settings(adapter, ecmd);
-
- return -EIO;
-}
-
-int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
- struct ethtool_cmd *ecmd)
+static int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 speed, reg;
@@ -425,6 +420,20 @@ skip:
return 0;
}
+static int qlcnic_get_settings(struct net_device *dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (qlcnic_82xx_check(adapter))
+ return qlcnic_82xx_get_settings(adapter, ecmd);
+ else if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_get_settings(adapter, ecmd);
+
+ return -EIO;
+}
+
+
static int qlcnic_set_port_config(struct qlcnic_adapter *adapter,
struct ethtool_cmd *ecmd)
{
@@ -519,6 +528,9 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
regs_buff[1] = QLCNIC_MGMT_API_VERSION;
+ if (adapter->ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ regs_buff[2] = adapter->ahw->max_vnic_func;
+
if (qlcnic_82xx_check(adapter))
i = qlcnic_82xx_get_registers(adapter, regs_buff);
else
@@ -667,30 +679,25 @@ qlcnic_set_ringparam(struct net_device *dev,
static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter,
u8 rx_ring, u8 tx_ring)
{
+ if (rx_ring == 0 || tx_ring == 0)
+ return -EINVAL;
+
if (rx_ring != 0) {
if (rx_ring > adapter->max_sds_rings) {
- netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
+ netdev_err(adapter->netdev,
+ "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
rx_ring, adapter->max_sds_rings);
return -EINVAL;
}
}
if (tx_ring != 0) {
- if (qlcnic_82xx_check(adapter) &&
- (tx_ring > adapter->max_tx_rings)) {
+ if (tx_ring > adapter->max_tx_rings) {
netdev_err(adapter->netdev,
"Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n",
tx_ring, adapter->max_tx_rings);
return -EINVAL;
}
-
- if (qlcnic_83xx_check(adapter) &&
- (tx_ring > QLCNIC_SINGLE_RING)) {
- netdev_err(adapter->netdev,
- "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n",
- tx_ring, QLCNIC_SINGLE_RING);
- return -EINVAL;
- }
}
return 0;
@@ -729,6 +736,7 @@ static int qlcnic_set_channels(struct net_device *dev,
channel->rx_count);
return err;
}
+ adapter->drv_rss_rings = channel->rx_count;
}
if (channel->tx_count) {
@@ -739,10 +747,12 @@ static int qlcnic_set_channels(struct net_device *dev,
channel->tx_count);
return err;
}
+ adapter->drv_tss_rings = channel->tx_count;
}
- err = qlcnic_setup_rings(adapter, channel->rx_count,
- channel->tx_count);
+ adapter->flags |= QLCNIC_TSS_RSS;
+
+ err = qlcnic_setup_rings(adapter);
netdev_info(dev, "Allocated %d SDS rings and %d Tx rings\n",
adapter->drv_sds_rings, adapter->drv_tx_rings);
@@ -925,18 +935,13 @@ static int qlcnic_eeprom_test(struct net_device *dev)
static int qlcnic_get_sset_count(struct net_device *dev, int sset)
{
- int len;
struct qlcnic_adapter *adapter = netdev_priv(dev);
switch (sset) {
case ETH_SS_TEST:
return QLCNIC_TEST_LEN;
case ETH_SS_STATS:
- len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN;
- if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
- qlcnic_83xx_check(adapter))
- return len;
- return qlcnic_82xx_statistics();
+ return qlcnic_dev_statistics_len(adapter);
default:
return -EOPNOTSUPP;
}
@@ -948,6 +953,7 @@ static int qlcnic_irq_test(struct net_device *netdev)
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
int ret, drv_sds_rings = adapter->drv_sds_rings;
+ int drv_tx_rings = adapter->drv_tx_rings;
if (qlcnic_83xx_check(adapter))
return qlcnic_83xx_interrupt_test(netdev);
@@ -980,6 +986,7 @@ free_diag_res:
clear_diag_irq:
adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
@@ -1052,7 +1059,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
return 0;
}
-int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
+static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int drv_tx_rings = adapter->drv_tx_rings;
@@ -1270,7 +1277,7 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
return data;
}
-static void qlcnic_update_stats(struct qlcnic_adapter *adapter)
+void qlcnic_update_stats(struct qlcnic_adapter *adapter)
{
struct qlcnic_host_tx_ring *tx_ring;
int ring;
@@ -1491,9 +1498,7 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ethcoal)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_nic_intr_coalesce *coal;
- u32 rx_coalesce_usecs, rx_max_frames;
- u32 tx_coalesce_usecs, tx_max_frames;
+ int err;
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return -EINVAL;
@@ -1503,82 +1508,31 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
* unsupported parameters are set.
*/
if (ethcoal->rx_coalesce_usecs > 0xffff ||
- ethcoal->rx_max_coalesced_frames > 0xffff ||
- ethcoal->tx_coalesce_usecs > 0xffff ||
- ethcoal->tx_max_coalesced_frames > 0xffff ||
- ethcoal->rx_coalesce_usecs_irq ||
- ethcoal->rx_max_coalesced_frames_irq ||
- ethcoal->tx_coalesce_usecs_irq ||
- ethcoal->tx_max_coalesced_frames_irq ||
- ethcoal->stats_block_coalesce_usecs ||
- ethcoal->use_adaptive_rx_coalesce ||
- ethcoal->use_adaptive_tx_coalesce ||
- ethcoal->pkt_rate_low ||
- ethcoal->rx_coalesce_usecs_low ||
- ethcoal->rx_max_coalesced_frames_low ||
- ethcoal->tx_coalesce_usecs_low ||
- ethcoal->tx_max_coalesced_frames_low ||
- ethcoal->pkt_rate_high ||
- ethcoal->rx_coalesce_usecs_high ||
- ethcoal->rx_max_coalesced_frames_high ||
- ethcoal->tx_coalesce_usecs_high ||
- ethcoal->tx_max_coalesced_frames_high)
+ ethcoal->rx_max_coalesced_frames > 0xffff ||
+ ethcoal->tx_coalesce_usecs > 0xffff ||
+ ethcoal->tx_max_coalesced_frames > 0xffff ||
+ ethcoal->rx_coalesce_usecs_irq ||
+ ethcoal->rx_max_coalesced_frames_irq ||
+ ethcoal->tx_coalesce_usecs_irq ||
+ ethcoal->tx_max_coalesced_frames_irq ||
+ ethcoal->stats_block_coalesce_usecs ||
+ ethcoal->use_adaptive_rx_coalesce ||
+ ethcoal->use_adaptive_tx_coalesce ||
+ ethcoal->pkt_rate_low ||
+ ethcoal->rx_coalesce_usecs_low ||
+ ethcoal->rx_max_coalesced_frames_low ||
+ ethcoal->tx_coalesce_usecs_low ||
+ ethcoal->tx_max_coalesced_frames_low ||
+ ethcoal->pkt_rate_high ||
+ ethcoal->rx_coalesce_usecs_high ||
+ ethcoal->rx_max_coalesced_frames_high ||
+ ethcoal->tx_coalesce_usecs_high ||
+ ethcoal->tx_max_coalesced_frames_high)
return -EINVAL;
- coal = &adapter->ahw->coal;
+ err = qlcnic_config_intr_coalesce(adapter, ethcoal);
- if (qlcnic_83xx_check(adapter)) {
- if (!ethcoal->tx_coalesce_usecs ||
- !ethcoal->tx_max_coalesced_frames ||
- !ethcoal->rx_coalesce_usecs ||
- !ethcoal->rx_max_coalesced_frames) {
- coal->flag = QLCNIC_INTR_DEFAULT;
- coal->type = QLCNIC_INTR_COAL_TYPE_RX;
- coal->rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
- coal->rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
- coal->tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
- coal->tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
- } else {
- tx_coalesce_usecs = ethcoal->tx_coalesce_usecs;
- tx_max_frames = ethcoal->tx_max_coalesced_frames;
- rx_coalesce_usecs = ethcoal->rx_coalesce_usecs;
- rx_max_frames = ethcoal->rx_max_coalesced_frames;
- coal->flag = 0;
-
- if ((coal->rx_time_us == rx_coalesce_usecs) &&
- (coal->rx_packets == rx_max_frames)) {
- coal->type = QLCNIC_INTR_COAL_TYPE_TX;
- coal->tx_time_us = tx_coalesce_usecs;
- coal->tx_packets = tx_max_frames;
- } else if ((coal->tx_time_us == tx_coalesce_usecs) &&
- (coal->tx_packets == tx_max_frames)) {
- coal->type = QLCNIC_INTR_COAL_TYPE_RX;
- coal->rx_time_us = rx_coalesce_usecs;
- coal->rx_packets = rx_max_frames;
- } else {
- coal->type = QLCNIC_INTR_COAL_TYPE_RX;
- coal->rx_time_us = rx_coalesce_usecs;
- coal->rx_packets = rx_max_frames;
- coal->tx_time_us = tx_coalesce_usecs;
- coal->tx_packets = tx_max_frames;
- }
- }
- } else {
- if (!ethcoal->rx_coalesce_usecs ||
- !ethcoal->rx_max_coalesced_frames) {
- coal->flag = QLCNIC_INTR_DEFAULT;
- coal->rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
- coal->rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
- } else {
- coal->flag = 0;
- coal->rx_time_us = ethcoal->rx_coalesce_usecs;
- coal->rx_packets = ethcoal->rx_max_coalesced_frames;
- }
- }
-
- qlcnic_config_intr_coalesce(adapter);
-
- return 0;
+ return err;
}
static int qlcnic_get_intr_coalesce(struct net_device *netdev,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index d262211b03b3..34e467b239a1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -698,7 +698,6 @@ struct qlcnic_legacy_intr_set {
};
#define QLCNIC_MSIX_BASE 0x132110
-#define QLCNIC_MAX_PCI_FUNC 8
#define QLCNIC_MAX_VLAN_FILTERS 64
#define FLASH_ROM_WINDOW 0x42110030
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 6f7f60c09f07..03d18a0be6ce 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -455,14 +455,14 @@ int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
{
+ struct qlcnic_mac_vlan_list *cur;
struct list_head *head;
- struct qlcnic_mac_list_s *cur;
int err = -EINVAL;
/* Delete MAC from the existing list */
list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_list_s, list);
- if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (ether_addr_equal(addr, cur->mac_addr)) {
err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
0, QLCNIC_MAC_DEL);
if (err)
@@ -477,17 +477,18 @@ int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan)
{
+ struct qlcnic_mac_vlan_list *cur;
struct list_head *head;
- struct qlcnic_mac_list_s *cur;
/* look up if already exists */
list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_list_s, list);
- if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (ether_addr_equal(addr, cur->mac_addr) &&
+ cur->vlan_id == vlan)
return 0;
}
- cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
+ cur = kzalloc(sizeof(*cur), GFP_ATOMIC);
if (cur == NULL)
return -ENOMEM;
@@ -499,11 +500,12 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan)
return -EIO;
}
+ cur->vlan_id = vlan;
list_add_tail(&cur->list, &adapter->mac_list);
return 0;
}
-void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
+static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -516,8 +518,7 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
- if (!qlcnic_sriov_vf_check(adapter))
- qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan);
+ qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan);
qlcnic_nic_add_mac(adapter, bcast_addr, vlan);
if (netdev->flags & IFF_PROMISC) {
@@ -526,15 +527,11 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
} else if ((netdev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(netdev) > ahw->max_mc_count)) {
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
- } else if (!netdev_mc_empty(netdev) &&
- !qlcnic_sriov_vf_check(adapter)) {
+ } else if (!netdev_mc_empty(netdev)) {
netdev_for_each_mc_addr(ha, netdev)
qlcnic_nic_add_mac(adapter, ha->addr, vlan);
}
- if (qlcnic_sriov_vf_check(adapter))
- qlcnic_vf_add_mc_list(netdev, vlan);
-
/* configure unicast MAC address, if there is not sufficient space
* to store all the unicast addresses then enable promiscuous mode
*/
@@ -545,14 +542,15 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
qlcnic_nic_add_mac(adapter, ha->addr, vlan);
}
- if (!qlcnic_sriov_vf_check(adapter)) {
- if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
- !adapter->fdb_mac_learn) {
- qlcnic_alloc_lb_filters_mem(adapter);
- adapter->drv_mac_learn = true;
- } else {
- adapter->drv_mac_learn = false;
- }
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+ !adapter->fdb_mac_learn) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ adapter->drv_mac_learn = 1;
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ adapter->rx_mac_learn = true;
+ } else {
+ adapter->drv_mac_learn = 0;
+ adapter->rx_mac_learn = false;
}
qlcnic_nic_set_promisc(adapter, mode);
@@ -561,16 +559,17 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
void qlcnic_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_mac_vlan_list *cur;
struct netdev_hw_addr *ha;
- struct qlcnic_mac_list_s *cur;
+ size_t temp;
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
if (qlcnic_sriov_vf_check(adapter)) {
if (!netdev_mc_empty(netdev)) {
netdev_for_each_mc_addr(ha, netdev) {
- cur = kzalloc(sizeof(struct qlcnic_mac_list_s),
- GFP_ATOMIC);
+ temp = sizeof(struct qlcnic_mac_vlan_list);
+ cur = kzalloc(temp, GFP_ATOMIC);
if (cur == NULL)
break;
memcpy(cur->mac_addr,
@@ -605,11 +604,11 @@ int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter)
{
- struct qlcnic_mac_list_s *cur;
struct list_head *head = &adapter->mac_list;
+ struct qlcnic_mac_vlan_list *cur;
while (!list_empty(head)) {
- cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+ cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
qlcnic_sre_macaddr_change(adapter,
cur->mac_addr, 0, QLCNIC_MAC_DEL);
list_del(&cur->list);
@@ -756,10 +755,7 @@ int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *adapter)
return 0;
}
-/*
- * Send the interrupt coalescing parameter set by ethtool to the card.
- */
-void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *adapter)
{
struct qlcnic_nic_req req;
int rv;
@@ -781,10 +777,32 @@ void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter)
if (rv != 0)
dev_err(&adapter->netdev->dev,
"Could not send interrupt coalescing parameters\n");
+
+ return rv;
+}
+
+/* Send the interrupt coalescing parameter set by ethtool to the card. */
+int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ int rv;
+
+ coal->flag = QLCNIC_INTR_DEFAULT;
+ coal->rx_time_us = ethcoal->rx_coalesce_usecs;
+ coal->rx_packets = ethcoal->rx_max_coalesced_frames;
+
+ rv = qlcnic_82xx_set_rx_coalesce(adapter);
+
+ if (rv)
+ netdev_err(adapter->netdev,
+ "Failed to set Rx coalescing parametrs\n");
+
+ return rv;
}
-#define QLCNIC_ENABLE_IPV4_LRO 1
-#define QLCNIC_ENABLE_IPV6_LRO 2
+#define QLCNIC_ENABLE_IPV4_LRO BIT_0
+#define QLCNIC_ENABLE_IPV6_LRO (BIT_1 | BIT_9)
int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
{
@@ -948,7 +966,7 @@ int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int enable)
return rv;
}
-int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
+static int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
{
struct qlcnic_nic_req req;
u64 word;
@@ -1247,7 +1265,7 @@ static int qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter,
return 0;
}
-void
+static void
qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
{
void __iomem *addr = adapter->ahw->pci_base0 +
@@ -1258,7 +1276,7 @@ qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
mutex_unlock(&adapter->ahw->mem_lock);
}
-void
+static void
qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
{
void __iomem *addr = adapter->ahw->pci_base0 +
@@ -1494,7 +1512,7 @@ int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
return 0;
}
-int
+static int
qlcnic_wol_supported(struct qlcnic_adapter *adapter)
{
u32 wol_cfg;
@@ -1534,19 +1552,34 @@ int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
return rv;
}
-int qlcnic_get_beacon_state(struct qlcnic_adapter *adapter, u8 *h_state)
+void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
- int err;
+ u8 beacon_state;
+ int err = 0;
- err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_STATUS);
- if (!err) {
- err = qlcnic_issue_cmd(adapter, &cmd);
- if (!err)
- *h_state = cmd.rsp.arg[1];
+ if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_LED_STATUS);
+ if (!err) {
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ netdev_err(adapter->netdev,
+ "Failed to get current beacon state, err=%d\n",
+ err);
+ } else {
+ beacon_state = cmd.rsp.arg[1];
+ if (beacon_state == QLCNIC_BEACON_DISABLE)
+ ahw->beacon_state = QLCNIC_BEACON_OFF;
+ else if (beacon_state == QLCNIC_BEACON_EANBLE)
+ ahw->beacon_state = QLCNIC_BEACON_ON;
+ }
+ }
+ qlcnic_free_mbx_args(&cmd);
}
- qlcnic_free_mbx_args(&cmd);
- return err;
+
+ return;
}
void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 13303e7d1ed7..63d75617d445 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -162,16 +162,18 @@ struct qlcnic_host_tx_ring;
struct qlcnic_hardware_context;
struct qlcnic_adapter;
-int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int);
int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
struct net_device *netdev);
+void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
u64 *uaddr, u16 vlan_id);
-void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
+int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int);
void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
__be32, int);
@@ -181,9 +183,6 @@ int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8);
int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
-void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *);
-irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *);
int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *, int);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index e9c21e5d0ca9..c4262c23ed7c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
struct qlcnic_skb_frag *buffrag;
int i, j;
+ spin_lock(&tx_ring->tx_clean_lock);
+
cmd_buf = tx_ring->cmd_buf_arr;
for (i = 0; i < tx_ring->num_desc; i++) {
buffrag = cmd_buf->frag_array;
@@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
}
cmd_buf++;
}
+
+ spin_unlock(&tx_ring->tx_clean_lock);
}
void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 0149c9495347..54ebf300332a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -124,41 +124,16 @@
#define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
#define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
-struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
- struct qlcnic_host_rds_ring *, u16, u16);
+static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
+ int max);
-inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring)
-{
- if (qlcnic_check_multi_tx(adapter) &&
- !adapter->ahw->diag_test)
- writel(0x0, tx_ring->crb_intr_mask);
-}
-
-
-static inline void qlcnic_disable_tx_int(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring)
-{
- if (qlcnic_check_multi_tx(adapter) &&
- !adapter->ahw->diag_test)
- writel(1, tx_ring->crb_intr_mask);
-}
-
-inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring)
-{
- writel(0, tx_ring->crb_intr_mask);
-}
-
-inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring)
-{
- writel(1, tx_ring->crb_intr_mask);
-}
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
+ struct qlcnic_host_rds_ring *,
+ u16, u16);
-static inline u8 qlcnic_mac_hash(u64 mac)
+static inline u8 qlcnic_mac_hash(u64 mac, u16 vlan)
{
- return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff));
+ return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff) ^ (vlan & 0xff));
}
static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
@@ -202,7 +177,7 @@ static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
struct hlist_node *n;
hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
- if (!memcmp(tmp_fil->faddr, addr, ETH_ALEN) &&
+ if (ether_addr_equal(tmp_fil->faddr, addr) &&
tmp_fil->vlan_id == vlan_id)
return tmp_fil;
}
@@ -210,8 +185,8 @@ static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
return NULL;
}
-void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
- int loopback_pkt, u16 vlan_id)
+static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
+ struct sk_buff *skb, int loopback_pkt, u16 vlan_id)
{
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
struct qlcnic_filter *fil, *tmp_fil;
@@ -221,8 +196,11 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
u8 hindex, op;
int ret;
+ if (!qlcnic_sriov_pf_check(adapter) || (vlan_id == 0xffff))
+ vlan_id = 0;
+
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
- hindex = qlcnic_mac_hash(src_addr) &
+ hindex = qlcnic_mac_hash(src_addr, vlan_id) &
(adapter->fhash.fbucket_size - 1);
if (loopback_pkt) {
@@ -322,31 +300,47 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc,
struct sk_buff *skb)
{
+ struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ struct net_device *netdev = adapter->netdev;
+ u16 protocol = ntohs(skb->protocol);
struct qlcnic_filter *fil, *tmp_fil;
- struct hlist_node *n;
struct hlist_head *head;
- struct net_device *netdev = adapter->netdev;
- struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ struct hlist_node *n;
u64 src_addr = 0;
u16 vlan_id = 0;
- u8 hindex;
+ u8 hindex, hval;
- if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
- return;
+ if (!qlcnic_sriov_pf_check(adapter)) {
+ if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
+ return;
+ } else {
+ if (protocol == ETH_P_8021Q) {
+ vh = (struct vlan_ethhdr *)skb->data;
+ vlan_id = ntohs(vh->h_vlan_TCI);
+ } else if (vlan_tx_tag_present(skb)) {
+ vlan_id = vlan_tx_tag_get(skb);
+ }
+
+ if (ether_addr_equal(phdr->h_source, adapter->mac_addr) &&
+ !vlan_id)
+ return;
+ }
if (adapter->fhash.fnum >= adapter->fhash.fmax) {
adapter->stats.mac_filter_limit_overrun++;
- netdev_info(netdev, "Can not add more than %d mac addresses\n",
- adapter->fhash.fmax);
+ netdev_info(netdev, "Can not add more than %d mac-vlan filters, configured %d\n",
+ adapter->fhash.fmax, adapter->fhash.fnum);
return;
}
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
- hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
+ hval = qlcnic_mac_hash(src_addr, vlan_id);
+ hindex = hval & (adapter->fhash.fbucket_size - 1);
head = &(adapter->fhash.fhead[hindex]);
hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
- if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
+ if (ether_addr_equal(tmp_fil->faddr, (u8 *)&src_addr) &&
tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
qlcnic_change_filter(adapter, &src_addr,
@@ -687,17 +681,20 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
if (adapter->ahw->linkup && !linkup) {
netdev_info(netdev, "NIC Link is down\n");
adapter->ahw->linkup = 0;
- if (netif_running(netdev)) {
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
- }
+ netif_carrier_off(netdev);
} else if (!adapter->ahw->linkup && linkup) {
- netdev_info(netdev, "NIC Link is up\n");
adapter->ahw->linkup = 1;
- if (netif_running(netdev)) {
- netif_carrier_on(netdev);
- netif_wake_queue(netdev);
+
+ /* Do not advertise Link up to the stack if device
+ * is in loopback mode
+ */
+ if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) {
+ netdev_info(netdev, "NIC Link is up for loopback test\n");
+ return;
}
+
+ netdev_info(netdev, "NIC Link is up\n");
+ netif_carrier_on(netdev);
}
}
@@ -784,7 +781,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
struct net_device *netdev = adapter->netdev;
struct qlcnic_skb_frag *frag;
- if (!spin_trylock(&adapter->tx_clean_lock))
+ if (!spin_trylock(&tx_ring->tx_clean_lock))
return 1;
sw_consumer = tx_ring->sw_consumer;
@@ -813,8 +810,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
break;
}
+ tx_ring->sw_consumer = sw_consumer;
+
if (count && netif_running(netdev)) {
- tx_ring->sw_consumer = sw_consumer;
smp_mb();
if (netif_tx_queue_stopped(tx_ring->txq) &&
netif_carrier_ok(netdev)) {
@@ -840,7 +838,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
*/
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
- spin_unlock(&adapter->tx_clean_lock);
+
+ spin_unlock(&tx_ring->tx_clean_lock);
return done;
}
@@ -862,7 +861,7 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
qlcnic_enable_tx_intr(adapter, tx_ring);
}
}
@@ -903,7 +902,7 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(&sds_ring->napi);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
return work_done;
@@ -1015,9 +1014,9 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
}
}
-struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *ring,
- u16 index, u16 cksum)
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *ring,
+ u16 index, u16 cksum)
{
struct qlcnic_rx_buffer *buffer;
struct sk_buff *skb;
@@ -1156,13 +1155,13 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
u32 seq_number;
- if (unlikely(ring > adapter->max_rds_rings))
+ if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
rds_ring = &recv_ctx->rds_rings[ring];
index = qlcnic_get_lro_sts_refhandle(sts_data0);
- if (unlikely(index > rds_ring->num_desc))
+ if (unlikely(index >= rds_ring->num_desc))
return NULL;
buffer = &rds_ring->rx_buf_arr[index];
@@ -1236,7 +1235,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
return buffer;
}
-int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
+static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
{
struct qlcnic_host_rds_ring *rds_ring;
struct qlcnic_adapter *adapter = sds_ring->adapter;
@@ -1466,8 +1465,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_check_multi_tx(adapter) &&
- !adapter->ahw->diag_test &&
- (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
+ !adapter->ahw->diag_test) {
netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
NAPI_POLL_WEIGHT);
} else {
@@ -1535,13 +1533,12 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
if (qlcnic_check_multi_tx(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED) &&
- !adapter->ahw->diag_test &&
- (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
+ !adapter->ahw->diag_test) {
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
napi_enable(&tx_ring->napi);
@@ -1562,7 +1559,7 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- qlcnic_disable_int(sds_ring);
+ qlcnic_disable_sds_intr(adapter, sds_ring);
napi_synchronize(&sds_ring->napi);
napi_disable(&sds_ring->napi);
}
@@ -1572,7 +1569,7 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
qlcnic_check_multi_tx(adapter)) {
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
- qlcnic_disable_tx_int(adapter, tx_ring);
+ qlcnic_disable_tx_intr(adapter, tx_ring);
napi_synchronize(&tx_ring->napi);
napi_disable(&tx_ring->napi);
}
@@ -1601,7 +1598,8 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
struct sk_buff *skb;
struct qlcnic_host_rds_ring *rds_ring;
int index, length, cksum, is_lb_pkt;
- u16 vid = 0xffff, t_vid;
+ u16 vid = 0xffff;
+ int err;
if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
@@ -1619,19 +1617,19 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
- if (adapter->drv_mac_learn &&
- (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
- t_vid = 0;
- is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
- qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
- }
-
if (length > rds_ring->skb_size)
skb_put(skb, rds_ring->skb_size);
else
skb_put(skb, length);
- if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ err = qlcnic_check_rx_tagging(adapter, skb, &vid);
+
+ if (adapter->rx_mac_learn) {
+ is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
+ }
+
+ if (unlikely(err)) {
adapter->stats.rxdropped++;
dev_kfree_skb(skb);
return buffer;
@@ -1666,15 +1664,16 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
int l2_hdr_offset, l4_hdr_offset;
int index, is_lb_pkt;
u16 lro_length, length, data_offset, gso_size;
- u16 vid = 0xffff, t_vid;
+ u16 vid = 0xffff;
+ int err;
- if (unlikely(ring > adapter->max_rds_rings))
+ if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
rds_ring = &recv_ctx->rds_rings[ring];
index = qlcnic_83xx_hndl(sts_data[0]);
- if (unlikely(index > rds_ring->num_desc))
+ if (unlikely(index >= rds_ring->num_desc))
return NULL;
buffer = &rds_ring->rx_buf_arr[index];
@@ -1688,12 +1687,6 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
- if (adapter->drv_mac_learn &&
- (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
- t_vid = 0;
- is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
- qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
- }
if (qlcnic_83xx_is_tstamp(sts_data[1]))
data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
else
@@ -1702,7 +1695,14 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
skb_put(skb, lro_length + data_offset);
skb_pull(skb, l2_hdr_offset);
- if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ err = qlcnic_check_rx_tagging(adapter, skb, &vid);
+
+ if (adapter->rx_mac_learn) {
+ is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
+ }
+
+ if (unlikely(err)) {
adapter->stats.rxdropped++;
dev_kfree_skb(skb);
return buffer;
@@ -1832,7 +1832,7 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
return work_done;
@@ -1855,7 +1855,7 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
return work_done;
@@ -1874,7 +1874,7 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
if (work_done) {
napi_complete(&tx_ring->napi);
if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
- qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
}
return work_done;
@@ -1892,7 +1892,7 @@ static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(&sds_ring->napi);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
return work_done;
@@ -1912,7 +1912,7 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
if (adapter->flags & QLCNIC_MSIX_ENABLED)
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
@@ -1920,7 +1920,7 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
napi_enable(&tx_ring->napi);
- qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
}
}
}
@@ -1938,7 +1938,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (adapter->flags & QLCNIC_MSIX_ENABLED)
- qlcnic_83xx_disable_intr(adapter, sds_ring);
+ qlcnic_disable_sds_intr(adapter, sds_ring);
napi_synchronize(&sds_ring->napi);
napi_disable(&sds_ring->napi);
}
@@ -1947,7 +1947,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
- qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
+ qlcnic_disable_tx_intr(adapter, tx_ring);
napi_synchronize(&tx_ring->napi);
napi_disable(&tx_ring->napi);
}
@@ -2027,8 +2027,8 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
qlcnic_free_tx_rings(adapter);
}
-void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
- int ring, u64 sts_data[])
+static void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
+ int ring, u64 sts_data[])
{
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct sk_buff *skb;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 05c1eef8df13..ba78c7481fa3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -81,6 +81,16 @@ static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16);
static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16);
+static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *);
+static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
+static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *);
+static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
+static void qlcnic_82xx_io_resume(struct pci_dev *);
+static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+
static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -308,12 +318,12 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
{
- struct qlcnic_mac_list_s *cur;
+ struct qlcnic_mac_vlan_list *cur;
struct list_head *head;
list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_list_s, list);
- if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) {
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (ether_addr_equal_unaligned(adapter->mac_addr, cur->mac_addr)) {
qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
0, QLCNIC_MAC_DEL);
list_del(&cur->list);
@@ -337,7 +347,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
- if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN))
+ if (ether_addr_equal_unaligned(adapter->mac_addr, addr->sa_data))
return 0;
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
@@ -546,6 +556,11 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
.io_error_detected = qlcnic_82xx_io_error_detected,
.io_slot_reset = qlcnic_82xx_io_slot_reset,
.io_resume = qlcnic_82xx_io_resume,
+ .get_beacon_state = qlcnic_82xx_get_beacon_state,
+ .enable_sds_intr = qlcnic_82xx_enable_sds_intr,
+ .disable_sds_intr = qlcnic_82xx_disable_sds_intr,
+ .enable_tx_intr = qlcnic_82xx_enable_tx_intr,
+ .disable_tx_intr = qlcnic_82xx_disable_tx_intr,
};
static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
@@ -588,9 +603,6 @@ void qlcnic_set_tx_ring_count(struct qlcnic_adapter *adapter, u8 tx_cnt)
QLCNIC_TX_QUEUE);
else
adapter->drv_tx_rings = tx_cnt;
-
- dev_info(&adapter->pdev->dev, "Set %d Tx rings\n",
- adapter->drv_tx_rings);
}
void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt)
@@ -601,25 +613,79 @@ void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt)
QLCNIC_RX_QUEUE);
else
adapter->drv_sds_rings = rx_cnt;
-
- dev_info(&adapter->pdev->dev, "Set %d SDS rings\n",
- adapter->drv_sds_rings);
}
-int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
+int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- int drv_tx_rings, drv_sds_rings, tx_vector;
- int err = -1, i;
+ int num_msix = 0, err = 0, vector;
+
+ adapter->flags &= ~QLCNIC_TSS_RSS;
+
+ if (adapter->drv_tss_rings > 0)
+ num_msix += adapter->drv_tss_rings;
+ else
+ num_msix += adapter->drv_tx_rings;
+
+ if (adapter->drv_rss_rings > 0)
+ num_msix += adapter->drv_rss_rings;
+ else
+ num_msix += adapter->drv_sds_rings;
+
+ if (qlcnic_83xx_check(adapter))
+ num_msix += 1;
+
+ if (!adapter->msix_entries) {
+ adapter->msix_entries = kcalloc(num_msix,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!adapter->msix_entries)
+ return -ENOMEM;
+ }
+
+restore:
+ for (vector = 0; vector < num_msix; vector++)
+ adapter->msix_entries[vector].entry = vector;
- if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
- drv_tx_rings = 0;
- tx_vector = 0;
+ err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
+ if (err == 0) {
+ adapter->ahw->num_msix = num_msix;
+ if (adapter->drv_tss_rings > 0)
+ adapter->drv_tx_rings = adapter->drv_tss_rings;
+
+ if (adapter->drv_rss_rings > 0)
+ adapter->drv_sds_rings = adapter->drv_rss_rings;
} else {
- drv_tx_rings = adapter->drv_tx_rings;
- tx_vector = 1;
+ netdev_info(adapter->netdev,
+ "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
+ num_msix, err);
+
+ num_msix = adapter->drv_tx_rings + adapter->drv_sds_rings;
+
+ /* Set rings to 0 so we can restore original TSS/RSS count */
+ adapter->drv_tss_rings = 0;
+ adapter->drv_rss_rings = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ num_msix += 1;
+
+ netdev_info(adapter->netdev,
+ "Restoring %d Tx, %d SDS rings for total %d vectors.\n",
+ adapter->drv_tx_rings, adapter->drv_sds_rings,
+ num_msix);
+ goto restore;
+
+ err = -EIO;
}
+ return err;
+}
+
+int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int err = -1, vector;
+
if (!adapter->msix_entries) {
adapter->msix_entries = kcalloc(num_msix,
sizeof(struct msix_entry),
@@ -628,48 +694,43 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
return -ENOMEM;
}
- adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
if (adapter->ahw->msix_supported) {
- enable_msix:
- for (i = 0; i < num_msix; i++)
- adapter->msix_entries[i].entry = i;
+enable_msix:
+ for (vector = 0; vector < num_msix; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
if (err == 0) {
adapter->flags |= QLCNIC_MSIX_ENABLED;
- if (qlcnic_83xx_check(adapter)) {
- adapter->ahw->num_msix = num_msix;
- /* subtract mail box and tx ring vectors */
- adapter->drv_sds_rings = num_msix -
- drv_tx_rings - 1;
- } else {
- adapter->ahw->num_msix = num_msix;
- if (qlcnic_check_multi_tx(adapter) &&
- !adapter->ahw->diag_test &&
- (adapter->drv_tx_rings > 1))
- drv_sds_rings = num_msix - drv_tx_rings;
- else
- drv_sds_rings = num_msix;
-
- adapter->drv_sds_rings = drv_sds_rings;
- }
+ adapter->ahw->num_msix = num_msix;
dev_info(&pdev->dev, "using msi-x interrupts\n");
return err;
} else if (err > 0) {
dev_info(&pdev->dev,
- "Unable to allocate %d MSI-X interrupt vectors\n",
- num_msix);
- if (qlcnic_83xx_check(adapter)) {
- if (err < (QLC_83XX_MINIMUM_VECTOR - tx_vector))
- return err;
- err -= drv_tx_rings + 1;
+ "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
+ num_msix, err);
+
+ if (qlcnic_82xx_check(adapter)) {
num_msix = rounddown_pow_of_two(err);
- num_msix += drv_tx_rings + 1;
+ if (err < QLCNIC_82XX_MINIMUM_VECTOR)
+ return -EIO;
} else {
- num_msix = rounddown_pow_of_two(err);
- if (qlcnic_check_multi_tx(adapter))
- num_msix += drv_tx_rings;
+ num_msix = rounddown_pow_of_two(err - 1);
+ num_msix += 1;
+ if (err < QLCNIC_83XX_MINIMUM_VECTOR)
+ return -EIO;
+ }
+
+ if (qlcnic_82xx_check(adapter) &&
+ !qlcnic_check_multi_tx(adapter)) {
+ adapter->drv_sds_rings = num_msix;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+ } else {
+ /* Distribute vectors equally */
+ adapter->drv_tx_rings = num_msix / 2;
+ adapter->drv_sds_rings = adapter->drv_tx_rings;
}
if (num_msix) {
@@ -680,14 +741,29 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
}
} else {
dev_info(&pdev->dev,
- "Unable to allocate %d MSI-X interrupt vectors\n",
- num_msix);
+ "Unable to allocate %d MSI-X vectors, err=%d\n",
+ num_msix, err);
+ return err;
}
}
return err;
}
+static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
+{
+ int num_msix;
+
+ num_msix = adapter->drv_sds_rings;
+
+ if (qlcnic_check_multi_tx(adapter))
+ num_msix += adapter->drv_tx_rings;
+ else
+ num_msix += QLCNIC_SINGLE_RING;
+
+ return num_msix;
+}
+
static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
{
int err = 0;
@@ -722,25 +798,29 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
{
int num_msix, err = 0;
- num_msix = adapter->drv_sds_rings;
-
- if (qlcnic_check_multi_tx(adapter))
- num_msix += adapter->drv_tx_rings;
+ if (adapter->flags & QLCNIC_TSS_RSS) {
+ err = qlcnic_setup_tss_rss_intr(adapter);
+ if (err < 0)
+ return err;
+ num_msix = adapter->ahw->num_msix;
+ } else {
+ num_msix = qlcnic_82xx_calculate_msix_vector(adapter);
- err = qlcnic_enable_msix(adapter, num_msix);
- if (err == -ENOMEM)
- return err;
+ err = qlcnic_enable_msix(adapter, num_msix);
+ if (err == -ENOMEM)
+ return err;
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
- qlcnic_disable_multi_tx(adapter);
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ qlcnic_disable_multi_tx(adapter);
- err = qlcnic_enable_msi_legacy(adapter);
- if (!err)
- return err;
+ err = qlcnic_enable_msi_legacy(adapter);
+ if (!err)
+ return err;
+ }
}
return 0;
@@ -800,25 +880,26 @@ static void qlcnic_cleanup_pci_map(struct qlcnic_hardware_context *ahw)
static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_pci_info *pci_info;
int ret;
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
- switch (adapter->ahw->port_type) {
+ switch (ahw->port_type) {
case QLCNIC_GBE:
- adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_GBE_PORTS;
+ ahw->total_nic_func = QLCNIC_NIU_MAX_GBE_PORTS;
break;
case QLCNIC_XGBE:
- adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_XG_PORTS;
+ ahw->total_nic_func = QLCNIC_NIU_MAX_XG_PORTS;
break;
}
return 0;
}
- if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+ if (ahw->op_mode == QLCNIC_MGMT_FUNC)
return 0;
- pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
@@ -846,12 +927,13 @@ static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_pci_info *pci_info;
int i, id = 0, ret = 0, j = 0;
u16 act_pci_func;
u8 pfn;
- pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
@@ -859,7 +941,7 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
if (ret)
goto err_pci_info;
- act_pci_func = adapter->ahw->act_pci_func;
+ act_pci_func = ahw->total_nic_func;
adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
act_pci_func, GFP_KERNEL);
@@ -875,10 +957,10 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
goto err_npars;
}
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < ahw->max_vnic_func; i++) {
pfn = pci_info[i].id;
- if (pfn >= QLCNIC_MAX_PCI_FUNC) {
+ if (pfn >= ahw->max_vnic_func) {
ret = QL_STATUS_INVALID_PARAM;
goto err_eswitch;
}
@@ -1178,6 +1260,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
} else {
adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
}
@@ -1345,7 +1428,7 @@ int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
if (adapter->need_fw_reset)
return 0;
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
if (!adapter->npars[i].eswitch_status)
continue;
@@ -1408,7 +1491,7 @@ int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
return 0;
/* Set the NPAR config data after FW reset */
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
npar = &adapter->npars[i];
pci_func = npar->pci_func;
if (!adapter->npars[i].eswitch_status)
@@ -1483,7 +1566,7 @@ qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter)
{
int err;
@@ -1684,6 +1767,33 @@ static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter)
}
}
+static int qlcnic_config_def_intr_coalesce(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err;
+
+ /* Initialize interrupt coalesce parameters */
+ ahw->coal.flag = QLCNIC_INTR_DEFAULT;
+
+ if (qlcnic_83xx_check(adapter)) {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+ ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+
+ err = qlcnic_83xx_set_rx_tx_intr_coal(adapter);
+ } else {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+
+ err = qlcnic_82xx_set_rx_coalesce(adapter);
+ }
+
+ return err;
+}
+
int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
int ring;
@@ -1716,7 +1826,7 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (adapter->drv_sds_rings > 1)
qlcnic_config_rss(adapter, 1);
- qlcnic_config_intr_coalesce(adapter);
+ qlcnic_config_def_intr_coalesce(adapter);
if (netdev->features & NETIF_F_LRO)
qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
@@ -1727,6 +1837,7 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_linkevent_request(adapter, 1);
adapter->ahw->reset_context = 0;
+ netif_tx_start_all_queues(netdev);
return 0;
}
@@ -1755,7 +1866,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (qlcnic_sriov_vf_check(adapter))
qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb();
- spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
adapter->ahw->linkup = 0;
netif_tx_disable(netdev);
@@ -1776,7 +1886,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
for (ring = 0; ring < adapter->drv_tx_rings; ring++)
qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
- spin_unlock(&adapter->tx_clean_lock);
}
/* Usage: During suspend and firmware recovery module */
@@ -1863,7 +1972,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int drv_sds_rings)
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
- qlcnic_disable_int(sds_ring);
+ qlcnic_disable_sds_intr(adapter, sds_ring);
}
}
@@ -1886,7 +1995,6 @@ out:
static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
{
- struct qlcnic_hardware_context *ahw = adapter->ahw;
int err = 0;
adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
@@ -1895,15 +2003,7 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
err = -ENOMEM;
goto err_out;
}
- /* Initialize interrupt coalesce parameters */
- ahw->coal.flag = QLCNIC_INTR_DEFAULT;
- ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
- ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
- ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
- if (qlcnic_83xx_check(adapter)) {
- ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
- ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
- }
+
/* clear stats */
memset(&adapter->stats, 0, sizeof(adapter->stats));
err_out:
@@ -1940,7 +2040,6 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
qlcnic_detach(adapter);
adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
- adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
adapter->ahw->diag_test = test;
adapter->ahw->linkup = 0;
@@ -1965,7 +2064,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
}
@@ -1997,7 +2096,7 @@ qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
netif_device_attach(netdev);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
- dev_err(&adapter->pdev->dev, "%s:\n", __func__);
+ netdev_info(adapter->netdev, "%s: soft reset complete\n", __func__);
return 0;
}
@@ -2034,10 +2133,10 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
return err;
}
-void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
+static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- u16 act_pci_fn = ahw->act_pci_func;
+ u16 act_pci_fn = ahw->total_nic_func;
u16 count;
ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
@@ -2172,6 +2271,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
}
memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
tx_ring->cmd_buf_arr = cmd_buf_arr;
+ spin_lock_init(&tx_ring->tx_clean_lock);
}
if (qlcnic_83xx_check(adapter) ||
@@ -2212,7 +2312,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct qlcnic_hardware_context *ahw;
int err, pci_using_dac = -1;
char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
- struct qlcnic_dcb *dcb;
if (pdev->is_virtfn)
return -ENODEV;
@@ -2290,7 +2389,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_wq;
adapter->dev_rst_time = jiffies;
- adapter->ahw->revision_id = pdev->revision;
+ ahw->revision_id = pdev->revision;
+ ahw->max_vnic_func = qlcnic_get_vnic_func_count(adapter);
if (qlcnic_mac_learn == FDB_MAC_LEARN)
adapter->fdb_mac_learn = true;
else if (qlcnic_mac_learn == DRV_MAC_LEARN)
@@ -2299,7 +2399,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rwlock_init(&adapter->ahw->crb_lock);
mutex_init(&adapter->ahw->mem_lock);
- spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
qlcnic_register_dcb(adapter);
@@ -2335,10 +2434,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->flags |= QLCNIC_NEED_FLR;
- dcb = adapter->dcb;
-
- if (dcb && qlcnic_dcb_attach(dcb))
- qlcnic_clear_dcb_ops(dcb);
} else if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
@@ -2367,6 +2462,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_hw;
}
+ qlcnic_dcb_enable(adapter->dcb);
+
if (qlcnic_read_mac_addr(adapter))
dev_warn(&pdev->dev, "failed to read mac addr\n");
@@ -2500,13 +2597,11 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_cancel_idc_work(adapter);
ahw = adapter->ahw;
- qlcnic_dcb_free(adapter->dcb);
-
unregister_netdev(netdev);
qlcnic_sriov_cleanup(adapter);
if (qlcnic_83xx_check(adapter)) {
- qlcnic_83xx_register_nic_idc_func(adapter, 0);
+ qlcnic_83xx_initialize_nic(adapter, 0);
cancel_delayed_work_sync(&adapter->idc_aen_work);
qlcnic_83xx_free_mbx_intr(adapter);
qlcnic_83xx_detach_mailbox_work(adapter);
@@ -2514,6 +2609,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
kfree(ahw->fw_info);
}
+ qlcnic_dcb_free(adapter->dcb);
+
qlcnic_detach(adapter);
if (adapter->npars != NULL)
@@ -2608,14 +2705,8 @@ static int qlcnic_open(struct net_device *netdev)
err = __qlcnic_up(adapter, netdev);
if (err)
- goto err_out;
-
- netif_tx_start_all_queues(netdev);
-
- return 0;
+ qlcnic_detach(adapter);
-err_out:
- qlcnic_detach(adapter);
return err;
}
@@ -2642,7 +2733,7 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
if (adapter->fhash.fmax && adapter->fhash.fhead)
return;
- act_pci_func = adapter->ahw->act_pci_func;
+ act_pci_func = adapter->ahw->total_nic_func;
spin_lock_init(&adapter->mac_learn_lock);
spin_lock_init(&adapter->rx_mac_learn_lock);
@@ -2739,12 +2830,58 @@ int qlcnic_check_temp(struct qlcnic_adapter *adapter)
return rv;
}
-static void qlcnic_tx_timeout(struct net_device *netdev)
+static inline void dump_tx_ring_desc(struct qlcnic_host_tx_ring *tx_ring)
{
- struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int i;
+ struct cmd_desc_type0 *tx_desc_info;
+
+ for (i = 0; i < tx_ring->num_desc; i++) {
+ tx_desc_info = &tx_ring->desc_head[i];
+ pr_info("TX Desc: %d\n", i);
+ print_hex_dump(KERN_INFO, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
+ &tx_ring->desc_head[i],
+ sizeof(struct cmd_desc_type0), true);
+ }
+}
+
+static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
struct qlcnic_host_tx_ring *tx_ring;
int ring;
+ if (!netdev || !netif_running(netdev))
+ return;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netdev_info(netdev, "Tx ring=%d Context Id=0x%x\n",
+ ring, tx_ring->ctx_id);
+ netdev_info(netdev,
+ "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n",
+ tx_ring->tx_stats.xmit_finished,
+ tx_ring->tx_stats.xmit_called,
+ tx_ring->tx_stats.xmit_on,
+ tx_ring->tx_stats.xmit_off);
+ netdev_info(netdev,
+ "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
+ readl(tx_ring->crb_intr_mask),
+ readl(tx_ring->crb_cmd_producer),
+ tx_ring->producer, tx_ring->sw_consumer,
+ le32_to_cpu(*(tx_ring->hw_consumer)));
+
+ netdev_info(netdev, "Total desc=%d, Available desc=%d\n",
+ tx_ring->num_desc, qlcnic_tx_avail(tx_ring));
+
+ if (netif_msg_tx_done(adapter->ahw))
+ dump_tx_ring_desc(tx_ring);
+ }
+}
+
+static void qlcnic_tx_timeout(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return;
@@ -2757,22 +2894,7 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
QLCNIC_FORCE_FW_DUMP_KEY);
} else {
netdev_info(netdev, "Tx timeout, reset adapter context.\n");
- for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
- tx_ring = &adapter->tx_ring[ring];
- netdev_info(netdev, "Tx ring=%d\n", ring);
- netdev_info(netdev,
- "crb_intr_mask=%d, producer=%d, sw_consumer=%d, hw_consumer=%d\n",
- readl(tx_ring->crb_intr_mask),
- readl(tx_ring->crb_cmd_producer),
- tx_ring->sw_consumer,
- le32_to_cpu(*(tx_ring->hw_consumer)));
- netdev_info(netdev,
- "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n",
- tx_ring->tx_stats.xmit_finished,
- tx_ring->tx_stats.xmit_called,
- tx_ring->tx_stats.xmit_on,
- tx_ring->tx_stats.xmit_off);
- }
+ qlcnic_dump_tx_rings(adapter);
adapter->ahw->reset_context = 1;
}
}
@@ -2782,6 +2904,9 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct net_device_stats *stats = &netdev->stats;
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_update_stats(adapter);
+
stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
stats->tx_packets = adapter->stats.xmitfinished;
stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
@@ -2792,7 +2917,7 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
return stats;
}
-irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
+static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
{
u32 status;
@@ -2831,7 +2956,7 @@ static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
done:
adapter->ahw->diag_cnt++;
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
return IRQ_HANDLED;
}
@@ -2879,17 +3004,39 @@ static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev)
{
- int ring;
- struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return;
+
+ recv_ctx = adapter->recv_ctx;
- disable_irq(adapter->irq);
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- qlcnic_intr(adapter->irq, sds_ring);
+ qlcnic_disable_sds_intr(adapter, sds_ring);
+ napi_schedule(&sds_ring->napi);
+ }
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ /* Only Multi-Tx queue capable devices need to
+ * schedule NAPI for TX rings
+ */
+ if ((qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
+ (qlcnic_82xx_check(adapter) &&
+ !qlcnic_check_multi_tx(adapter)))
+ return;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ qlcnic_disable_tx_intr(adapter, tx_ring);
+ napi_schedule(&tx_ring->napi);
+ }
}
- enable_irq(adapter->irq);
}
#endif
@@ -3285,7 +3432,8 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
qlcnic_api_unlock(adapter);
}
-void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
+static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter,
+ u32 key)
{
u32 state, xg_val = 0, gb_val = 0;
@@ -3580,8 +3728,8 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
return err;
}
-pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
@@ -3611,13 +3759,13 @@ pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NEED_RESET;
}
-pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
+static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
{
return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
PCI_ERS_RESULT_RECOVERED;
}
-void qlcnic_82xx_io_resume(struct pci_dev *pdev)
+static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
{
u32 state;
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
@@ -3725,12 +3873,6 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
return -EINVAL;
}
- if (ring_cnt < 2) {
- netdev_err(netdev,
- "%s rings value should not be lower than 2\n", buf);
- return -EINVAL;
- }
-
if (!is_power_of_2(ring_cnt)) {
netdev_err(netdev, "%s rings value should be a power of 2\n",
buf);
@@ -3753,7 +3895,7 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
return 0;
}
-int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt)
+int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int err;
@@ -3774,12 +3916,6 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt)
qlcnic_teardown_intr(adapter);
- /* compute and set default and max tx/sds rings */
- qlcnic_set_tx_ring_count(adapter, tx_cnt);
- qlcnic_set_sds_ring_count(adapter, rx_cnt);
-
- netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
-
err = qlcnic_setup_intr(adapter);
if (err) {
kfree(adapter->msix_entries);
@@ -3787,9 +3923,10 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt)
return err;
}
+ netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+
if (qlcnic_83xx_check(adapter)) {
- /* register for NIC IDC AEN Events */
- qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ qlcnic_83xx_initialize_nic(adapter, 1);
err = qlcnic_83xx_setup_mbx_intr(adapter);
qlcnic_83xx_disable_mbx_poll(adapter);
if (err) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 0daf660e12a1..396bd1fd1d27 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -126,8 +126,8 @@ struct qlcnic_vport {
u16 handle;
u16 max_tx_bw;
u16 min_tx_bw;
+ u16 pvid;
u8 vlan_mode;
- u16 vlan;
u8 qos;
bool spoofchk;
u8 mac[6];
@@ -137,6 +137,8 @@ struct qlcnic_vf_info {
u8 pci_func;
u16 rx_ctx_id;
u16 tx_ctx_id;
+ u16 *sriov_vlans;
+ int num_vlan;
unsigned long state;
struct completion ch_free_cmpl;
struct work_struct trans_work;
@@ -149,6 +151,7 @@ struct qlcnic_vf_info {
struct qlcnic_trans_list rcv_pend;
struct qlcnic_adapter *adapter;
struct qlcnic_vport *vp;
+ struct mutex vlan_list_lock; /* Lock for VLAN list */
};
struct qlcnic_async_work_list {
@@ -185,7 +188,6 @@ void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *);
int qlcnic_sriov_vf_init(struct qlcnic_adapter *, int);
void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *);
int qlcnic_sriov_func_to_index(struct qlcnic_adapter *, u8);
-int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *, u32);
int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *, u8);
void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *);
@@ -195,8 +197,13 @@ int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *, struct qlcnic_vf_info *,
int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
struct qlcnic_info *, u16);
int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
-int qlcnic_sriov_vf_shutdown(struct pci_dev *);
-int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
+void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
+void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
+bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
+void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
+ struct qlcnic_vf_info *, u16);
+void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *,
+ struct qlcnic_vf_info *, u16);
static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter)
{
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 21a4b274d2e4..0638c1810d54 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -35,7 +35,10 @@ static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
struct qlcnic_cmd_args *);
+static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
+static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
+static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
.read_crb = qlcnic_83xx_read_crb,
@@ -68,6 +71,8 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
.change_l2_filter = qlcnic_83xx_change_l2_filter,
.get_board_info = qlcnic_83xx_get_port_info,
.free_mac_list = qlcnic_sriov_vf_free_mac_list,
+ .enable_sds_intr = qlcnic_83xx_enable_sds_intr,
+ .disable_sds_intr = qlcnic_83xx_disable_sds_intr,
};
static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
@@ -176,6 +181,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
vf->adapter = adapter;
vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
mutex_init(&vf->send_cmd_lock);
+ mutex_init(&vf->vlan_list_lock);
INIT_LIST_HEAD(&vf->rcv_act.wait_list);
INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
spin_lock_init(&vf->rcv_act.lock);
@@ -276,6 +282,11 @@ static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
{
+ if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
+ return;
+
+ qlcnic_sriov_free_vlans(adapter);
+
if (qlcnic_sriov_pf_check(adapter))
qlcnic_sriov_pf_cleanup(adapter);
@@ -416,10 +427,15 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
return 0;
sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
+ sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
+ dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
+ sriov->num_allowed_vlans);
+
+ qlcnic_sriov_alloc_vlans(adapter);
+
if (!sriov->any_vlan)
return 0;
- sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
num_vlans = sriov->num_allowed_vlans;
sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
if (!sriov->allowed_vlans)
@@ -432,8 +448,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
return 0;
}
-static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter,
- struct qlcnic_info *info)
+static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_cmd_args cmd;
@@ -473,14 +488,12 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
if (err)
return err;
+ ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters;
+
err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
if (err)
return -EIO;
- err = qlcnic_sriov_get_vf_acl(adapter, &nic_info);
- if (err)
- return err;
-
if (qlcnic_83xx_get_port_info(adapter))
return -EIO;
@@ -500,7 +513,6 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
int pci_using_dac)
{
- struct qlcnic_dcb *dcb;
int err;
INIT_LIST_HEAD(&adapter->vf_mc_list);
@@ -538,10 +550,9 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
if (err)
goto err_out_send_channel_term;
- dcb = adapter->dcb;
-
- if (dcb && qlcnic_dcb_attach(dcb))
- qlcnic_clear_dcb_ops(dcb);
+ err = qlcnic_sriov_get_vf_acl(adapter);
+ if (err)
+ goto err_out_send_channel_term;
err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
if (err)
@@ -1417,7 +1428,7 @@ cleanup_transaction:
return rsp;
}
-int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
+static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
{
struct qlcnic_cmd_args cmd;
struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
@@ -1447,18 +1458,27 @@ out:
return ret;
}
-void qlcnic_vf_add_mc_list(struct net_device *netdev, u16 vlan)
+static void qlcnic_vf_add_mc_list(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_mac_list_s *cur;
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_mac_vlan_list *cur;
struct list_head *head, tmp_list;
+ struct qlcnic_vf_info *vf;
+ u16 vlan_id;
+ int i;
+
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ vf = &adapter->ahw->sriov->vf_info[0];
INIT_LIST_HEAD(&tmp_list);
head = &adapter->vf_mc_list;
netif_addr_lock_bh(netdev);
while (!list_empty(head)) {
- cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+ cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
list_move(&cur->list, &tmp_list);
}
@@ -1466,8 +1486,28 @@ void qlcnic_vf_add_mc_list(struct net_device *netdev, u16 vlan)
while (!list_empty(&tmp_list)) {
cur = list_entry((&tmp_list)->next,
- struct qlcnic_mac_list_s, list);
- qlcnic_nic_add_mac(adapter, cur->mac_addr, vlan);
+ struct qlcnic_mac_vlan_list, list);
+ if (!qlcnic_sriov_check_any_vlan(vf)) {
+ qlcnic_nic_add_mac(adapter, bcast_addr, 0);
+ qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
+ } else {
+ mutex_lock(&vf->vlan_list_lock);
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ vlan_id = vf->sriov_vlans[i];
+ if (vlan_id) {
+ qlcnic_nic_add_mac(adapter, bcast_addr,
+ vlan_id);
+ qlcnic_nic_add_mac(adapter,
+ cur->mac_addr,
+ vlan_id);
+ }
+ }
+ mutex_unlock(&vf->vlan_list_lock);
+ if (qlcnic_84xx_check(adapter)) {
+ qlcnic_nic_add_mac(adapter, bcast_addr, 0);
+ qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
+ }
+ }
list_del(&cur->list);
kfree(cur);
}
@@ -1490,13 +1530,24 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- u16 vlan;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 mode = VPORT_MISS_MODE_DROP;
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
- vlan = adapter->ahw->sriov->vlan;
- __qlcnic_set_multi(netdev, vlan);
+ if (netdev->flags & IFF_PROMISC) {
+ if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > ahw->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ }
+
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_vf_add_mc_list(netdev);
+
+ qlcnic_nic_set_promisc(adapter, mode);
}
static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
@@ -1584,8 +1635,6 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
if (err)
goto err_out_term_channel;
- qlcnic_dcb_get_info(adapter->dcb);
-
return 0;
err_out_term_channel:
@@ -1833,18 +1882,60 @@ static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
cancel_delayed_work_sync(&adapter->fw_work);
}
-static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_sriov *sriov,
+static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i, err = -EINVAL;
+
+ if (!vf->sriov_vlans)
+ return err;
+
+ mutex_lock(&vf->vlan_list_lock);
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (vf->sriov_vlans[i] == vlan_id) {
+ err = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&vf->vlan_list_lock);
+ return err;
+}
+
+static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ int err = 0;
+
+ mutex_lock(&vf->vlan_list_lock);
+
+ if (vf->num_vlan >= sriov->num_allowed_vlans)
+ err = -EINVAL;
+
+ mutex_unlock(&vf->vlan_list_lock);
+ return err;
+}
+
+static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter,
u16 vid, u8 enable)
{
- u16 vlan = sriov->vlan;
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ bool vlan_exist;
u8 allowed = 0;
int i;
+ vf = &adapter->ahw->sriov->vf_info[0];
+ vlan_exist = qlcnic_sriov_check_any_vlan(vf);
if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
return -EINVAL;
if (enable) {
- if (vlan)
+ if (qlcnic_83xx_vf_check(adapter) && vlan_exist)
+ return -EINVAL;
+
+ if (qlcnic_sriov_validate_num_vlans(sriov, vf))
return -EINVAL;
if (sriov->any_vlan) {
@@ -1857,24 +1948,54 @@ static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_sriov *sriov,
return -EINVAL;
}
} else {
- if (!vlan || vlan != vid)
+ if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid))
return -EINVAL;
}
return 0;
}
+static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
+ enum qlcnic_vlan_operations opcode)
+{
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_sriov *sriov;
+
+ sriov = adapter->ahw->sriov;
+
+ if (!vf->sriov_vlans)
+ return;
+
+ mutex_lock(&vf->vlan_list_lock);
+
+ switch (opcode) {
+ case QLC_VLAN_ADD:
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id);
+ break;
+ case QLC_VLAN_DELETE:
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id);
+ break;
+ default:
+ netdev_err(adapter->netdev, "Invalid VLAN operation\n");
+ }
+
+ mutex_unlock(&vf->vlan_list_lock);
+ return;
+}
+
int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
u16 vid, u8 enable)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
struct qlcnic_cmd_args cmd;
int ret;
if (vid == 0)
return 0;
- ret = qlcnic_sriov_validate_vlan_cfg(sriov, vid, enable);
+ vf = &adapter->ahw->sriov->vf_info[0];
+ ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable);
if (ret)
return ret;
@@ -1894,11 +2015,11 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
qlcnic_free_mac_list(adapter);
if (enable)
- sriov->vlan = vid;
+ qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
else
- sriov->vlan = 0;
+ qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
- qlcnic_sriov_vf_set_multi(adapter->netdev);
+ qlcnic_set_multi(adapter->netdev);
}
qlcnic_free_mbx_args(&cmd);
@@ -1908,21 +2029,19 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
{
struct list_head *head = &adapter->mac_list;
- struct qlcnic_mac_list_s *cur;
- u16 vlan;
-
- vlan = adapter->ahw->sriov->vlan;
+ struct qlcnic_mac_vlan_list *cur;
while (!list_empty(head)) {
- cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
- qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
- vlan, QLCNIC_MAC_DEL);
+ cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
+ qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
+ QLCNIC_MAC_DEL);
list_del(&cur->list);
kfree(cur);
}
}
-int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
+
+static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
@@ -1946,7 +2065,7 @@ int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
return 0;
}
-int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
+static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
{
struct qlc_83xx_idc *idc = &adapter->ahw->idc;
struct net_device *netdev = adapter->netdev;
@@ -1972,3 +2091,70 @@ int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
idc->delay);
return err;
}
+
+void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ int i;
+
+ for (i = 0; i < sriov->num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
+ sizeof(*vf->sriov_vlans), GFP_KERNEL);
+ }
+}
+
+void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ int i;
+
+ for (i = 0; i < sriov->num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ kfree(vf->sriov_vlans);
+ vf->sriov_vlans = NULL;
+ }
+}
+
+void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i;
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (!vf->sriov_vlans[i]) {
+ vf->sriov_vlans[i] = vlan_id;
+ vf->num_vlan++;
+ return;
+ }
+ }
+}
+
+void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i;
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (vf->sriov_vlans[i] == vlan_id) {
+ vf->sriov_vlans[i] = 0;
+ vf->num_vlan--;
+ return;
+ }
+ }
+}
+
+bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
+{
+ bool err = false;
+
+ mutex_lock(&vf->vlan_list_lock);
+
+ if (vf->num_vlan)
+ err = true;
+
+ mutex_unlock(&vf->vlan_list_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 686f460b1502..09acf15c3a56 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -9,9 +9,14 @@
#include "qlcnic.h"
#include <linux/types.h>
-#define QLCNIC_SRIOV_VF_MAX_MAC 1
+#define QLCNIC_SRIOV_VF_MAX_MAC 7
#define QLC_VF_MIN_TX_RATE 100
#define QLC_VF_MAX_TX_RATE 9999
+#define QLC_MAC_OPCODE_MASK 0x7
+#define QLC_MAC_STAR_ADD 6
+#define QLC_MAC_STAR_DEL 7
+#define QLC_VF_FLOOD_BIT BIT_16
+#define QLC_FLOOD_MODE 0x5
static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
@@ -64,9 +69,10 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_resources *res = &sriov->ff_max;
- u32 temp, num_vf_macs, num_vfs, max;
+ u16 num_macs = sriov->num_allowed_vlans + 1;
int ret = -EIO, vpid, id;
struct qlcnic_vport *vp;
+ u32 num_vfs, max, temp;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
if (vpid < 0)
@@ -76,16 +82,25 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
max = num_vfs + 1;
info->bit_offsets = 0xffff;
info->max_tx_ques = res->num_tx_queues / max;
+
+ if (qlcnic_83xx_pf_check(adapter))
+ num_macs = 1;
+
info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
- num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
if (adapter->ahw->pci_func == func) {
- temp = res->num_rx_mcast_mac_filters - (num_vfs * num_vf_macs);
- info->max_rx_ucast_mac_filters = temp;
- temp = res->num_tx_mac_filters - (num_vfs * num_vf_macs);
- info->max_tx_mac_filters = temp;
info->min_tx_bw = 0;
info->max_tx_bw = MAX_BW;
+
+ temp = res->num_rx_ucast_mac_filters - num_macs * num_vfs;
+ info->max_rx_ucast_mac_filters = temp;
+ temp = res->num_tx_mac_filters - num_macs * num_vfs;
+ info->max_tx_mac_filters = temp;
+ temp = num_macs * num_vfs * QLCNIC_SRIOV_VF_MAX_MAC;
+ temp = res->num_rx_mcast_mac_filters - temp;
+ info->max_rx_mcast_mac_filters = temp;
+
+ info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
} else {
id = qlcnic_sriov_func_to_index(adapter, func);
if (id < 0)
@@ -93,8 +108,13 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
vp = sriov->vf_info[id].vp;
info->min_tx_bw = vp->min_tx_bw;
info->max_tx_bw = vp->max_tx_bw;
- info->max_rx_ucast_mac_filters = num_vf_macs;
- info->max_tx_mac_filters = num_vf_macs;
+
+ info->max_rx_ucast_mac_filters = num_macs;
+ info->max_tx_mac_filters = num_macs;
+ temp = num_macs * QLCNIC_SRIOV_VF_MAX_MAC;
+ info->max_rx_mcast_mac_filters = temp;
+
+ info->max_tx_ques = QLCNIC_SINGLE_RING;
}
info->max_rx_ip_addr = res->num_destip / max;
@@ -132,6 +152,25 @@ static void qlcnic_sriov_pf_set_ff_max_res(struct qlcnic_adapter *adapter,
ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs;
}
+static void qlcnic_sriov_set_vf_max_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ int temp, total_fn;
+
+ temp = npar_info->max_rx_mcast_mac_filters;
+ total_fn = sriov->num_vfs + 1;
+
+ temp = temp / (QLCNIC_SRIOV_VF_MAX_MAC * total_fn);
+ sriov->num_allowed_vlans = temp - 1;
+
+ if (qlcnic_83xx_pf_check(adapter))
+ sriov->num_allowed_vlans = 1;
+
+ netdev_info(adapter->netdev, "Max Guest VLANs supported per VF = %d\n",
+ sriov->num_allowed_vlans);
+}
+
static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
struct qlcnic_info *npar_info)
{
@@ -165,6 +204,7 @@ static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]);
npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]);
+ qlcnic_sriov_set_vf_max_vlan(adapter, npar_info);
qlcnic_sriov_pf_set_ff_max_res(adapter, npar_info);
dev_info(&adapter->pdev->dev,
"\n\ttotal_pf: %d,\n"
@@ -309,6 +349,28 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
return err;
}
+/* On configuring VF flood bit, PFD will receive traffic from all VFs */
+static int qlcnic_sriov_pf_cfg_flood(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = QLC_FLOOD_MODE | QLC_VF_FLOOD_BIT;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure VF Flood bit on PF, err=%d\n",
+ err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
static int qlcnic_sriov_pf_cfg_eswitch(struct qlcnic_adapter *adapter,
u8 func, u8 enable)
{
@@ -403,6 +465,8 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
qlcnic_sriov_pf_disable(adapter);
+ qlcnic_sriov_free_vlans(adapter);
+
qlcnic_sriov_pf_cleanup(adapter);
/* After disabling SRIOV re-init the driver in default mode
@@ -434,6 +498,12 @@ static int qlcnic_sriov_pf_init(struct qlcnic_adapter *adapter)
if (err)
return err;
+ if (qlcnic_84xx_check(adapter)) {
+ err = qlcnic_sriov_pf_cfg_flood(adapter);
+ if (err)
+ goto disable_vlan_filtering;
+ }
+
err = qlcnic_sriov_pf_cfg_eswitch(adapter, func, 1);
if (err)
goto disable_vlan_filtering;
@@ -511,6 +581,8 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
if (err)
goto del_flr_queue;
+ qlcnic_sriov_alloc_vlans(adapter);
+
err = qlcnic_sriov_pf_enable(adapter, num_vfs);
return err;
@@ -608,7 +680,7 @@ static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func)
if (vp->vlan_mode == QLC_PVID_MODE) {
cmd.req.arg[2] |= BIT_6;
- cmd.req.arg[3] |= vp->vlan << 8;
+ cmd.req.arg[3] |= vp->pvid << 8;
}
err = qlcnic_issue_cmd(adapter, &cmd);
@@ -643,10 +715,13 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_vport *vp = vf->vp;
struct qlcnic_adapter *adapter;
+ struct qlcnic_sriov *sriov;
u16 func = vf->pci_func;
+ size_t size;
int err;
adapter = vf->adapter;
+ sriov = adapter->ahw->sriov;
if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
@@ -656,8 +731,12 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
qlcnic_sriov_pf_config_vport(adapter, 0, func);
}
} else {
- if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
- vp->vlan = 0;
+ if (vp->vlan_mode == QLC_GUEST_VLAN_MODE) {
+ size = sizeof(*vf->sriov_vlans);
+ size = size * sriov->num_allowed_vlans;
+ memset(vf->sriov_vlans, 0, size);
+ }
+
err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
}
@@ -679,20 +758,23 @@ err_out:
}
static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
- struct qlcnic_vport *vp,
- u16 func, u16 vlan, u8 op)
+ struct qlcnic_vf_info *vf,
+ u16 vlan, u8 op)
{
struct qlcnic_cmd_args cmd;
struct qlcnic_macvlan_mbx mv;
+ struct qlcnic_vport *vp;
u8 *addr;
int err;
u32 *buf;
int vpid;
+ vp = vf->vp;
+
if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN))
return -ENOMEM;
- vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
if (vpid < 0) {
err = -EINVAL;
goto out;
@@ -736,6 +818,35 @@ static int qlcnic_sriov_validate_create_rx_ctx(struct qlcnic_cmd_args *cmd)
return 0;
}
+static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ int opcode)
+{
+ struct qlcnic_sriov *sriov;
+ u16 vlan;
+ int i;
+
+ sriov = adapter->ahw->sriov;
+
+ mutex_lock(&vf->vlan_list_lock);
+ if (vf->num_vlan) {
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ vlan = vf->sriov_vlans[i];
+ if (vlan)
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan,
+ opcode);
+ }
+ }
+ mutex_unlock(&vf->vlan_list_lock);
+
+ if (vf->vp->vlan_mode != QLC_PVID_MODE) {
+ if (qlcnic_83xx_pf_check(adapter) &&
+ qlcnic_sriov_check_any_vlan(vf))
+ return;
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0, opcode);
+ }
+}
+
static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
struct qlcnic_cmd_args *cmd)
{
@@ -743,7 +854,6 @@ static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
struct qlcnic_adapter *adapter = vf->adapter;
struct qlcnic_rcv_mbx_out *mbx_out;
int err;
- u16 vlan;
err = qlcnic_sriov_validate_create_rx_ctx(cmd);
if (err) {
@@ -754,12 +864,10 @@ static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
cmd->req.arg[6] = vf->vp->handle;
err = qlcnic_issue_cmd(adapter, cmd);
- vlan = vf->vp->vlan;
if (!err) {
mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1];
vf->rx_ctx_id = mbx_out->ctx_id;
- qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
- vlan, QLCNIC_MAC_ADD);
+ qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_ADD);
} else {
vf->rx_ctx_id = 0;
}
@@ -843,7 +951,6 @@ static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err;
- u16 vlan;
err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd);
if (err) {
@@ -851,9 +958,7 @@ static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
return err;
}
- vlan = vf->vp->vlan;
- qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
- vlan, QLCNIC_MAC_DEL);
+ qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_DEL);
cmd->req.arg[1] |= vf->vp->handle << 16;
err = qlcnic_issue_cmd(adapter, cmd);
@@ -1101,6 +1206,13 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
struct qlcnic_vport *vp = vf->vp;
u8 op, new_op;
+ if (((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_ADD) ||
+ ((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_DEL)) {
+ netdev_err(adapter->netdev, "MAC + any VLAN filter not allowed from VF %d\n",
+ vf->pci_func);
+ return -EINVAL;
+ }
+
if (!(cmd->req.arg[1] & BIT_8))
return -EINVAL;
@@ -1120,7 +1232,7 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
cmd->req.arg[1] &= ~0x7;
new_op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
- cmd->req.arg[3] |= vp->vlan << 16;
+ cmd->req.arg[3] |= vp->pvid << 16;
cmd->req.arg[1] |= new_op;
}
@@ -1190,8 +1302,10 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_vport *vp = vf->vp;
u8 cmd_op, mode = vp->vlan_mode;
struct qlcnic_adapter *adapter;
+ struct qlcnic_sriov *sriov;
adapter = vf->adapter;
+ sriov = adapter->ahw->sriov;
cmd_op = trans->req_hdr->cmd_op;
cmd->rsp.arg[0] |= 1 << 25;
@@ -1205,10 +1319,10 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
switch (mode) {
case QLC_GUEST_VLAN_MODE:
cmd->rsp.arg[1] = mode | 1 << 8;
- cmd->rsp.arg[2] = 1 << 16;
+ cmd->rsp.arg[2] = sriov->num_allowed_vlans << 16;
break;
case QLC_PVID_MODE:
- cmd->rsp.arg[1] = mode | 1 << 8 | vp->vlan << 16;
+ cmd->rsp.arg[1] = mode | 1 << 8 | vp->pvid << 16;
break;
}
@@ -1216,24 +1330,27 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
}
static int qlcnic_sriov_pf_del_guest_vlan(struct qlcnic_adapter *adapter,
- struct qlcnic_vf_info *vf)
-
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_vport *vp = vf->vp;
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ u16 vlan;
- if (!vp->vlan)
+ if (!qlcnic_sriov_check_any_vlan(vf))
return -EINVAL;
+ vlan = cmd->req.arg[1] >> 16;
if (!vf->rx_ctx_id) {
- vp->vlan = 0;
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
return 0;
}
- qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- vp->vlan, QLCNIC_MAC_DEL);
- vp->vlan = 0;
- qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- 0, QLCNIC_MAC_ADD);
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_DEL);
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
+
+ if (qlcnic_83xx_pf_check(adapter))
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf,
+ 0, QLCNIC_MAC_ADD);
return 0;
}
@@ -1241,32 +1358,37 @@ static int qlcnic_sriov_pf_add_guest_vlan(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_vport *vp = vf->vp;
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
int err = -EIO;
+ u16 vlan;
- if (vp->vlan)
+ if (qlcnic_83xx_pf_check(adapter) && qlcnic_sriov_check_any_vlan(vf))
return err;
+ vlan = cmd->req.arg[1] >> 16;
+
if (!vf->rx_ctx_id) {
- vp->vlan = cmd->req.arg[1] >> 16;
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
return 0;
}
- err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- 0, QLCNIC_MAC_DEL);
- if (err)
- return err;
+ if (qlcnic_83xx_pf_check(adapter)) {
+ err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+ QLCNIC_MAC_DEL);
+ if (err)
+ return err;
+ }
- vp->vlan = cmd->req.arg[1] >> 16;
- err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- vp->vlan, QLCNIC_MAC_ADD);
+ err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_ADD);
if (err) {
- qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- 0, QLCNIC_MAC_ADD);
- vp->vlan = 0;
+ if (qlcnic_83xx_pf_check(adapter))
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+ QLCNIC_MAC_ADD);
+ return err;
}
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
return err;
}
@@ -1289,7 +1411,7 @@ static int qlcnic_sriov_pf_cfg_guest_vlan_cmd(struct qlcnic_bc_trans *tran,
if (op)
err = qlcnic_sriov_pf_add_guest_vlan(adapter, vf, cmd);
else
- err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf);
+ err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf, cmd);
cmd->rsp.arg[0] |= err ? 2 << 25 : 1 << 25;
return err;
@@ -1299,8 +1421,6 @@ static const int qlcnic_pf_passthru_supp_cmds[] = {
QLCNIC_CMD_GET_STATISTICS,
QLCNIC_CMD_GET_PORT_CONFIG,
QLCNIC_CMD_GET_LINK_STATUS,
- QLCNIC_CMD_DCB_QUERY_CAP,
- QLCNIC_CMD_DCB_QUERY_PARAM,
QLCNIC_CMD_INIT_NIC_FUNC,
QLCNIC_CMD_STOP_NIC_FUNC,
};
@@ -1596,7 +1716,8 @@ void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
}
if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
- vp->vlan = 0;
+ memset(vf->sriov_vlans, 0,
+ sizeof(*vf->sriov_vlans) * sriov->num_allowed_vlans);
qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func);
@@ -1766,20 +1887,22 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
return -EOPNOTSUPP;
}
+ memset(vf_info->sriov_vlans, 0,
+ sizeof(*vf_info->sriov_vlans) * sriov->num_allowed_vlans);
+
switch (vlan) {
case 4095:
- vp->vlan = 0;
vp->vlan_mode = QLC_GUEST_VLAN_MODE;
break;
case 0:
vp->vlan_mode = QLC_NO_VLAN_MODE;
- vp->vlan = 0;
vp->qos = 0;
break;
default:
vp->vlan_mode = QLC_PVID_MODE;
- vp->vlan = vlan;
+ qlcnic_sriov_add_vlan_id(sriov, vf_info, vlan);
vp->qos = qos;
+ vp->pvid = vlan;
}
netdev_info(netdev, "Setting VLAN %d, QoS %d, for VF %d\n",
@@ -1794,7 +1917,7 @@ static __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
switch (vp->vlan_mode) {
case QLC_PVID_MODE:
- vlan = vp->vlan;
+ vlan = vp->pvid;
break;
case QLC_GUEST_VLAN_MODE:
vlan = MAX_VLAN_ID;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 1a9f8a400e50..3d64113a35af 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -6,7 +6,6 @@
*/
#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include "qlcnic.h"
@@ -127,6 +126,8 @@ static int qlcnic_83xx_store_beacon(struct qlcnic_adapter *adapter,
if (kstrtoul(buf, 2, &h_beacon))
return -EINVAL;
+ qlcnic_get_beacon_state(adapter);
+
if (ahw->beacon_state == h_beacon)
return len;
@@ -158,7 +159,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
struct qlcnic_hardware_context *ahw = adapter->ahw;
int err, drv_sds_rings = adapter->drv_sds_rings;
u16 beacon;
- u8 h_beacon_state, b_state, b_rate;
+ u8 b_state, b_rate;
if (len != sizeof(u16))
return QL_STATUS_INVALID_PARAM;
@@ -168,18 +169,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
if (err)
return err;
- if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
- err = qlcnic_get_beacon_state(adapter, &h_beacon_state);
- if (err) {
- netdev_err(adapter->netdev,
- "Failed to get current beacon state\n");
- } else {
- if (h_beacon_state == QLCNIC_BEACON_DISABLE)
- ahw->beacon_state = 0;
- else if (h_beacon_state == QLCNIC_BEACON_EANBLE)
- ahw->beacon_state = 2;
- }
- }
+ qlcnic_get_beacon_state(adapter);
if (ahw->beacon_state == b_state)
return len;
@@ -360,10 +350,28 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
return size;
}
-static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+static u32 qlcnic_get_pci_func_count(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 count = 0;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return ahw->total_nic_func;
+
+ if (ahw->total_pci_func <= QLC_DEFAULT_VNIC_COUNT)
+ count = QLC_DEFAULT_VNIC_COUNT;
+ else
+ count = ahw->max_vnic_func;
+
+ return count;
+}
+
+int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
int i;
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+
+ for (i = 0; i < pci_func_count; i++) {
if (adapter->npars[i].pci_func == pci_func)
return i;
}
@@ -382,7 +390,6 @@ static int validate_pm_config(struct qlcnic_adapter *adapter,
src_pci_func = pm_cfg[i].pci_func;
dest_pci_func = pm_cfg[i].dest_npar;
src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
-
if (src_index < 0)
return QL_STATUS_INVALID_PARAM;
@@ -439,6 +446,8 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
for (i = 0; i < count; i++) {
pci_func = pm_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
id = adapter->npars[index].phy_port;
adapter->npars[index].enable_pm = !!pm_cfg[i].action;
adapter->npars[index].dest_npar = id;
@@ -455,17 +464,19 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
- int i;
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_pm_func_cfg *pm_cfg;
+ int i, pm_cfg_size;
u8 pci_func;
- if (size != sizeof(pm_cfg))
+ pm_cfg_size = pci_func_count * sizeof(*pm_cfg);
+ if (size != pm_cfg_size)
return QL_STATUS_INVALID_PARAM;
- memset(&pm_cfg, 0,
- sizeof(struct qlcnic_pm_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+ memset(buf, 0, pm_cfg_size);
+ pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < pci_func_count; i++) {
pci_func = adapter->npars[i].pci_func;
if (!adapter->npars[i].active)
continue;
@@ -477,26 +488,26 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
pm_cfg[pci_func].dest_npar = 0;
pm_cfg[pci_func].pci_func = i;
}
- memcpy(buf, &pm_cfg, size);
-
return size;
}
static int validate_esw_config(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg, int count)
{
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int i, ret;
u32 op_mode;
u8 pci_func;
- int i, ret;
if (qlcnic_82xx_check(adapter))
- op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+ op_mode = readl(ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
else
- op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+ op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
- if (pci_func >= QLCNIC_MAX_PCI_FUNC)
+ if (pci_func >= pci_func_count)
return QL_STATUS_INVALID_PARAM;
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
@@ -600,6 +611,8 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
npar = &adapter->npars[index];
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
@@ -629,16 +642,19 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_esw_func_cfg *esw_cfg;
+ size_t esw_cfg_size;
u8 i, pci_func;
- if (size != sizeof(esw_cfg))
+ esw_cfg_size = pci_func_count * sizeof(*esw_cfg);
+ if (size != esw_cfg_size)
return QL_STATUS_INVALID_PARAM;
- memset(&esw_cfg, 0,
- sizeof(struct qlcnic_esw_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+ memset(buf, 0, esw_cfg_size);
+ esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < pci_func_count; i++) {
pci_func = adapter->npars[i].pci_func;
if (!adapter->npars[i].active)
continue;
@@ -650,9 +666,6 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
return QL_STATUS_INVALID_PARAM;
}
-
- memcpy(buf, &esw_cfg, size);
-
return size;
}
@@ -711,6 +724,8 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
if (ret)
return ret;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
adapter->npars[index].min_bw = nic_info.min_tx_bw;
adapter->npars[index].max_bw = nic_info.max_tx_bw;
}
@@ -726,27 +741,28 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_npar_func_cfg *np_cfg;
struct qlcnic_info nic_info;
- struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
+ size_t np_cfg_size;
int i, ret;
- if (size != sizeof(np_cfg))
+ np_cfg_size = pci_func_count * sizeof(*np_cfg);
+ if (size != np_cfg_size)
return QL_STATUS_INVALID_PARAM;
memset(&nic_info, 0, sizeof(struct qlcnic_info));
- memset(&np_cfg, 0,
- sizeof(struct qlcnic_npar_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+ memset(buf, 0, np_cfg_size);
+ np_cfg = (struct qlcnic_npar_func_cfg *)buf;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < pci_func_count; i++) {
if (qlcnic_is_valid_nic_func(adapter, i) < 0)
continue;
ret = qlcnic_get_nic_info(adapter, &nic_info, i);
if (ret)
return ret;
-
if (!adapter->npars[i].eswitch_status)
continue;
-
np_cfg[i].pci_func = i;
np_cfg[i].op_mode = (u8)nic_info.op_mode;
np_cfg[i].port_num = nic_info.phys_port;
@@ -756,8 +772,6 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
}
-
- memcpy(buf, &np_cfg, size);
return size;
}
@@ -769,6 +783,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_esw_statistics port_stats;
int ret;
@@ -778,7 +793,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
if (size != sizeof(struct qlcnic_esw_statistics))
return QL_STATUS_INVALID_PARAM;
- if (offset >= QLCNIC_MAX_PCI_FUNC)
+ if (offset >= pci_func_count)
return QL_STATUS_INVALID_PARAM;
memset(&port_stats, 0, size);
@@ -869,12 +884,13 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
int ret;
if (qlcnic_83xx_check(adapter))
return QLC_STATUS_UNSUPPORTED_CMD;
- if (offset >= QLCNIC_MAX_PCI_FUNC)
+ if (offset >= pci_func_count)
return QL_STATUS_INVALID_PARAM;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
@@ -898,14 +914,17 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_pci_func_cfg *pci_cfg;
struct qlcnic_pci_info *pci_info;
+ size_t pci_cfg_sz;
int i, ret;
- if (size != sizeof(pci_cfg))
+ pci_cfg_sz = pci_func_count * sizeof(*pci_cfg);
+ if (size != pci_cfg_sz)
return QL_STATUS_INVALID_PARAM;
- pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ pci_info = kcalloc(pci_func_count, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
@@ -915,19 +934,17 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
return ret;
}
- memset(&pci_cfg, 0,
- sizeof(struct qlcnic_pci_func_cfg) * QLCNIC_MAX_PCI_FUNC);
-
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
+ for (i = 0; i < pci_func_count; i++) {
pci_cfg[i].pci_func = pci_info[i].id;
pci_cfg[i].func_type = pci_info[i].type;
+ pci_cfg[i].func_state = 0;
pci_cfg[i].port_num = pci_info[i].default_port;
pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
}
- memcpy(buf, &pci_cfg, size);
kfree(pci_info);
return size;
}
@@ -1269,7 +1286,7 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
device_remove_file(dev, &dev_attr_bridged_mode);
}
-void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
+static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
@@ -1308,7 +1325,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
dev_info(dev, "failed to create eswitch stats sysfs entry");
}
-void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
+static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 03517478e589..ef332708e5f2 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -2248,7 +2248,6 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev);
int ql_mb_set_port_cfg(struct ql_adapter *qdev);
int ql_wait_fifo_empty(struct ql_adapter *qdev);
void ql_get_dump(struct ql_adapter *qdev, void *buff);
-void ql_gen_reg_dump(struct ql_adapter *qdev, struct ql_reg_dump *mpi_coredump);
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
int ql_own_firmware(struct ql_adapter *qdev);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 6bc5db703920..829be21f97b2 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -1242,8 +1242,8 @@ static void ql_get_core_dump(struct ql_adapter *qdev)
ql_queue_fw_error(qdev);
}
-void ql_gen_reg_dump(struct ql_adapter *qdev,
- struct ql_reg_dump *mpi_coredump)
+static void ql_gen_reg_dump(struct ql_adapter *qdev,
+ struct ql_reg_dump *mpi_coredump)
{
int i, status;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 8dee1beb9854..c3c514e332b5 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -1,5 +1,4 @@
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/list.h>
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 449f506d2e8f..ce2cfddbed50 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -6,7 +6,6 @@
* Ron Mercer <ron.mercer@qlogic.com>
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/module.h>
@@ -4765,6 +4764,8 @@ static int qlge_probe(struct pci_dev *pdev,
NETIF_F_RXCSUM;
ndev->features = ndev->hw_features;
ndev->vlan_features = ndev->hw_features;
+ /* vlan gets same features (except vlan filter) */
+ ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
if (test_bit(QL_DMA64, &qdev->flags))
ndev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 1e49ec5b2232..819b74cefd64 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -34,7 +34,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
@@ -222,6 +221,7 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
cmd = ioread16(ioaddr + MMDIO);
if (!(cmd & MDIO_READ))
break;
+ udelay(1);
}
if (limit < 0)
@@ -245,6 +245,7 @@ static int r6040_phy_write(void __iomem *ioaddr,
cmd = ioread16(ioaddr + MMDIO);
if (!(cmd & MDIO_WRITE))
break;
+ udelay(1);
}
return (limit < 0) ? -ETIMEDOUT : 0;
@@ -834,8 +835,8 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
/* Set TX descriptor & Transmit it */
lp->tx_free_desc--;
descptr = lp->tx_insert_ptr;
- if (skb->len < MISR)
- descptr->len = MISR;
+ if (skb->len < ETH_ZLEN)
+ descptr->len = ETH_ZLEN;
else
descptr->len = skb->len;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index c737f0ea5de7..91a67ae8f17b 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -21,7 +21,6 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index a30c4395b232..9e757c792d84 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -13,4 +13,4 @@ config SH_ETH
Renesas SuperH Ethernet device driver.
This driver supporting CPUs are:
- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
- R8A7740, R8A777x and R8A7790.
+ R8A7740, R8A777x and R8A779x.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index d256ce19d4de..040cb94e8219 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,5 +1,4 @@
-/*
- * SuperH Ethernet device driver
+/* SuperH Ethernet device driver
*
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
* Copyright (C) 2008-2013 Renesas Solutions Corp.
@@ -13,15 +12,11 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
@@ -148,6 +143,65 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
[FWALCR1] = 0x00b4,
};
+static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [EDSR] = 0x0000,
+ [EDMR] = 0x0400,
+ [EDTRR] = 0x0408,
+ [EDRRR] = 0x0410,
+ [EESR] = 0x0428,
+ [EESIPR] = 0x0430,
+ [TDLAR] = 0x0010,
+ [TDFAR] = 0x0014,
+ [TDFXR] = 0x0018,
+ [TDFFR] = 0x001c,
+ [RDLAR] = 0x0030,
+ [RDFAR] = 0x0034,
+ [RDFXR] = 0x0038,
+ [RDFFR] = 0x003c,
+ [TRSCER] = 0x0438,
+ [RMFCR] = 0x0440,
+ [TFTR] = 0x0448,
+ [FDR] = 0x0450,
+ [RMCR] = 0x0458,
+ [RPADIR] = 0x0460,
+ [FCFTR] = 0x0468,
+ [CSMR] = 0x04E4,
+
+ [ECMR] = 0x0500,
+ [RFLR] = 0x0508,
+ [ECSR] = 0x0510,
+ [ECSIPR] = 0x0518,
+ [PIR] = 0x0520,
+ [APR] = 0x0554,
+ [MPR] = 0x0558,
+ [PFTCR] = 0x055c,
+ [PFRCR] = 0x0560,
+ [TPAUSER] = 0x0564,
+ [MAHR] = 0x05c0,
+ [MALR] = 0x05c8,
+ [CEFCR] = 0x0740,
+ [FRECR] = 0x0748,
+ [TSFRCR] = 0x0750,
+ [TLFRCR] = 0x0758,
+ [RFCR] = 0x0760,
+ [MAFCR] = 0x0778,
+
+ [ARSTR] = 0x0000,
+ [TSU_CTRST] = 0x0004,
+ [TSU_VTAG0] = 0x0058,
+ [TSU_ADSBSY] = 0x0060,
+ [TSU_TEN] = 0x0064,
+ [TSU_ADRH0] = 0x0100,
+ [TSU_ADRL0] = 0x0104,
+ [TSU_ADRH31] = 0x01f8,
+ [TSU_ADRL31] = 0x01fc,
+
+ [TXNLCR0] = 0x0080,
+ [TXALCR0] = 0x0084,
+ [RXNLCR0] = 0x0088,
+ [RXALCR0] = 0x008C,
+};
+
static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
[ECMR] = 0x0300,
[RFLR] = 0x0308,
@@ -314,12 +368,14 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
[TSU_ADRL31] = 0x01fc,
};
-static int sh_eth_is_gether(struct sh_eth_private *mdp)
+static bool sh_eth_is_gether(struct sh_eth_private *mdp)
{
- if (mdp->reg_offset == sh_eth_offset_gigabit)
- return 1;
- else
- return 0;
+ return mdp->reg_offset == sh_eth_offset_gigabit;
+}
+
+static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
+{
+ return mdp->reg_offset == sh_eth_offset_fast_rz;
}
static void sh_eth_select_mii(struct net_device *ndev)
@@ -395,8 +451,8 @@ static struct sh_eth_cpu_data r8a777x_data = {
.hw_swap = 1,
};
-/* R8A7790 */
-static struct sh_eth_cpu_data r8a7790_data = {
+/* R8A7790/1 */
+static struct sh_eth_cpu_data r8a779x_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_r8a777x,
@@ -646,8 +702,8 @@ static struct sh_eth_cpu_data sh7763_data = {
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.tx_check = EESR_TC1 | EESR_FTC,
- .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
- EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI,
.apr = 1,
@@ -705,6 +761,38 @@ static struct sh_eth_cpu_data r8a7740_data = {
.shift_rd0 = 1,
};
+/* R7S72100 */
+static struct sh_eth_cpu_data r7s72100_data = {
+ .chip_reset = sh_eth_chip_reset,
+ .set_duplex = sh_eth_set_duplex,
+
+ .register_type = SH_ETH_REG_FAST_RZ,
+
+ .ecsr_value = ECSR_ICD,
+ .ecsipr_value = ECSIPR_ICDIP,
+ .eesipr_value = 0xff7f009f,
+
+ .tx_check = EESR_TC1 | EESR_FTC,
+ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
+ EESR_TDE | EESR_ECI,
+ .fdr_value = 0x0000070f,
+ .rmcr_value = RMCR_RNC,
+
+ .no_psr = 1,
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .hw_swap = 1,
+ .rpadir = 1,
+ .rpadir_value = 2 << 16,
+ .no_trimd = 1,
+ .no_ade = 1,
+ .hw_crc = 1,
+ .tsu = 1,
+ .shift_rd0 = 1,
+};
+
static struct sh_eth_cpu_data sh7619_data = {
.register_type = SH_ETH_REG_FAST_SH3_SH2,
@@ -732,7 +820,7 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
if (!cd->fcftr_value)
- cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
+ cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
DEFAULT_FIFO_F_D_RFD;
if (!cd->fdr_value)
@@ -771,7 +859,7 @@ static int sh_eth_reset(struct net_device *ndev)
struct sh_eth_private *mdp = netdev_priv(ndev);
int ret = 0;
- if (sh_eth_is_gether(mdp)) {
+ if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
sh_eth_write(ndev, EDSR_ENALL, EDSR);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
EDMR);
@@ -849,20 +937,17 @@ static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
return x;
}
-/*
- * Program the hardware MAC address from dev->dev_addr.
- */
+/* Program the hardware MAC address from dev->dev_addr. */
static void update_mac_address(struct net_device *ndev)
{
sh_eth_write(ndev,
- (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
- (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
+ (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+ (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
sh_eth_write(ndev,
- (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
+ (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
}
-/*
- * Get MAC address from SuperH MAC address register
+/* Get MAC address from SuperH MAC address register
*
* SuperH's Ethernet device doesn't have 'ROM' to MAC address.
* This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
@@ -885,7 +970,7 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac)
static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
{
- if (sh_eth_is_gether(mdp))
+ if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
return EDTRR_TRNS_GETHER;
else
return EDTRR_TRNS_ETHER;
@@ -1019,8 +1104,10 @@ static void sh_eth_ring_format(struct net_device *ndev)
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
- mdp->cur_rx = mdp->cur_tx = 0;
- mdp->dirty_rx = mdp->dirty_tx = 0;
+ mdp->cur_rx = 0;
+ mdp->cur_tx = 0;
+ mdp->dirty_rx = 0;
+ mdp->dirty_tx = 0;
memset(mdp->rx_ring, 0, rx_ringsize);
@@ -1033,7 +1120,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
if (skb == NULL)
break;
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
- DMA_FROM_DEVICE);
+ DMA_FROM_DEVICE);
sh_eth_set_receive_align(skb);
/* RX descriptor */
@@ -1046,7 +1133,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
/* Rx descriptor address set */
if (i == 0) {
sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
- if (sh_eth_is_gether(mdp))
+ if (sh_eth_is_gether(mdp) ||
+ sh_eth_is_rz_fast_ether(mdp))
sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
}
}
@@ -1067,7 +1155,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
if (i == 0) {
/* Tx descriptor address set */
sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
- if (sh_eth_is_gether(mdp))
+ if (sh_eth_is_gether(mdp) ||
+ sh_eth_is_rz_fast_ether(mdp))
sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
}
}
@@ -1081,8 +1170,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
struct sh_eth_private *mdp = netdev_priv(ndev);
int rx_ringsize, tx_ringsize, ret = 0;
- /*
- * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
+ /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
* card needs room to do 8 byte alignment, +2 so we can reserve
* the first 2 bytes, and +16 gets room for the status word from the
* card.
@@ -1257,7 +1345,7 @@ static int sh_eth_txfree(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc;
- int freeNum = 0;
+ int free_num = 0;
int entry = 0;
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
@@ -1271,7 +1359,7 @@ static int sh_eth_txfree(struct net_device *ndev)
txdesc->buffer_length, DMA_TO_DEVICE);
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
mdp->tx_skbuff[entry] = NULL;
- freeNum++;
+ free_num++;
}
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
if (entry >= mdp->num_tx_ring - 1)
@@ -1280,7 +1368,7 @@ static int sh_eth_txfree(struct net_device *ndev)
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += txdesc->buffer_length;
}
- return freeNum;
+ return free_num;
}
/* Packet receive function */
@@ -1313,12 +1401,11 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (!(desc_status & RDFEND))
ndev->stats.rx_length_errors++;
- /*
- * In case of almost all GETHER/ETHERs, the Receive Frame State
+ /* In case of almost all GETHER/ETHERs, the Receive Frame State
* (RFS) bits in the Receive Descriptor 0 are from bit 9 to
- * bit 0. However, in case of the R8A7740's GETHER, the RFS
- * bits are from bit 25 to bit 16. So, the driver needs right
- * shifting by 16.
+ * bit 0. However, in case of the R8A7740, R8A779x, and
+ * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
+ * driver needs right shifting by 16.
*/
if (mdp->cd->shift_rd0)
desc_status >>= 16;
@@ -1374,7 +1461,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (skb == NULL)
break; /* Better luck next round. */
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
- DMA_FROM_DEVICE);
+ DMA_FROM_DEVICE);
sh_eth_set_receive_align(skb);
skb_checksum_none_assert(skb);
@@ -1392,10 +1479,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
/* If we don't need to check status, don't. -KDU */
if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
/* fix the values for the next receiving if RDE is set */
- if (intr_status & EESR_RDE)
- mdp->cur_rx = mdp->dirty_rx =
- (sh_eth_read(ndev, RDFAR) -
- sh_eth_read(ndev, RDLAR)) >> 4;
+ if (intr_status & EESR_RDE) {
+ u32 count = (sh_eth_read(ndev, RDFAR) -
+ sh_eth_read(ndev, RDLAR)) >> 4;
+
+ mdp->cur_rx = count;
+ mdp->dirty_rx = count;
+ }
sh_eth_write(ndev, EDRRR_R, EDRRR);
}
@@ -1438,17 +1528,17 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (mdp->ether_link_active_low)
link_stat = ~link_stat;
}
- if (!(link_stat & PHY_ST_LINK))
+ if (!(link_stat & PHY_ST_LINK)) {
sh_eth_rcv_snd_disable(ndev);
- else {
+ } else {
/* Link Up */
sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
- ~DMAC_M_ECI, EESIPR);
- /*clear int */
+ ~DMAC_M_ECI, EESIPR);
+ /* clear int */
sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
- ECSR);
+ ECSR);
sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
- DMAC_M_ECI, EESIPR);
+ DMAC_M_ECI, EESIPR);
/* enable tx and rx */
sh_eth_rcv_snd_enable(ndev);
}
@@ -1517,11 +1607,11 @@ ignore_link:
if (intr_status & mask) {
/* Tx error */
u32 edtrr = sh_eth_read(ndev, EDTRR);
+
/* dmesg */
- dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
- intr_status, mdp->cur_tx);
- dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
- mdp->dirty_tx, (u32) ndev->state, edtrr);
+ dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
+ intr_status, mdp->cur_tx, mdp->dirty_tx,
+ (u32)ndev->state, edtrr);
/* dirty buffer free */
sh_eth_txfree(ndev);
@@ -1644,7 +1734,8 @@ static void sh_eth_adjust_link(struct net_device *ndev)
}
if (!mdp->link) {
sh_eth_write(ndev,
- (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
+ sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
+ ECMR);
new_state = 1;
mdp->link = phydev->link;
if (mdp->cd->no_psr || mdp->no_ether_link)
@@ -1671,7 +1762,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
struct phy_device *phydev = NULL;
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
- mdp->mii_bus->id , mdp->phy_id);
+ mdp->mii_bus->id, mdp->phy_id);
mdp->link = 0;
mdp->speed = 0;
@@ -1685,8 +1776,8 @@ static int sh_eth_phy_init(struct net_device *ndev)
return PTR_ERR(phydev);
}
- dev_info(&ndev->dev, "attached phy %i to driver %s\n",
- phydev->addr, phydev->drv->name);
+ dev_info(&ndev->dev, "attached PHY %d (IRQ %d) to driver %s\n",
+ phydev->addr, phydev->irq, phydev->drv->name);
mdp->phydev = phydev;
@@ -1703,15 +1794,13 @@ static int sh_eth_phy_start(struct net_device *ndev)
if (ret)
return ret;
- /* reset phy - this also wakes it from PDOWN */
- phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
phy_start(mdp->phydev);
return 0;
}
static int sh_eth_get_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
unsigned long flags;
@@ -1725,7 +1814,7 @@ static int sh_eth_get_settings(struct net_device *ndev,
}
static int sh_eth_set_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
unsigned long flags;
@@ -1801,7 +1890,7 @@ static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
}
static void sh_eth_get_ethtool_stats(struct net_device *ndev,
- struct ethtool_stats *stats, u64 *data)
+ struct ethtool_stats *stats, u64 *data)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int i = 0;
@@ -1818,7 +1907,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, *sh_eth_gstrings_stats,
- sizeof(sh_eth_gstrings_stats));
+ sizeof(sh_eth_gstrings_stats));
break;
}
}
@@ -1953,9 +2042,10 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
netif_stop_queue(ndev);
- if (netif_msg_timer(mdp))
- dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
- " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
+ if (netif_msg_timer(mdp)) {
+ dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x, resetting...\n",
+ ndev->name, (int)sh_eth_read(ndev, EESR));
+ }
/* tx_errors count up */
ndev->stats.tx_errors++;
@@ -2065,6 +2155,9 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ if (sh_eth_is_rz_fast_ether(mdp))
+ return &ndev->stats;
+
pm_runtime_get_sync(&mdp->pdev->dev);
ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
@@ -2088,8 +2181,7 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
}
/* ioctl to device function */
-static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
- int cmd)
+static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct phy_device *phydev = mdp->phydev;
@@ -2209,7 +2301,7 @@ static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
sh_eth_tsu_read_entry(reg_offset, c_addr);
- if (memcmp(addr, c_addr, ETH_ALEN) == 0)
+ if (ether_addr_equal(addr, c_addr))
return i;
}
@@ -2344,8 +2436,7 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
unsigned long flags;
spin_lock_irqsave(&mdp->lock, flags);
- /*
- * Initial condition is MCT = 1, PRM = 0.
+ /* Initial condition is MCT = 1, PRM = 0.
* Depending on ndev->flags, set PRM or clear MCT
*/
ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
@@ -2411,8 +2502,7 @@ static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
mdp->vlan_num_ids++;
- /*
- * The controller has one VLAN tag HW filter. So, if the filter is
+ /* The controller has one VLAN tag HW filter. So, if the filter is
* already enabled, the driver disables it and the filte
*/
if (mdp->vlan_num_ids > 1) {
@@ -2449,6 +2539,11 @@ static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
/* SuperH's TSU register init function */
static void sh_eth_tsu_init(struct sh_eth_private *mdp)
{
+ if (sh_eth_is_rz_fast_ether(mdp)) {
+ sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
+ return;
+ }
+
sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
@@ -2528,7 +2623,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
mdp->mii_bus->name = "sh_mii";
mdp->mii_bus->parent = &ndev->dev;
snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- mdp->pdev->name, id);
+ mdp->pdev->name, id);
/* PHY IRQ */
mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
@@ -2541,6 +2636,8 @@ static int sh_mdio_init(struct net_device *ndev, int id,
for (i = 0; i < PHY_MAX_ADDR; i++)
mdp->mii_bus->irq[i] = PHY_POLL;
+ if (pd->phy_irq > 0)
+ mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
/* register mdio bus */
ret = mdiobus_register(mdp->mii_bus);
@@ -2566,6 +2663,9 @@ static const u16 *sh_eth_get_register_offset(int register_type)
case SH_ETH_REG_GIGABIT:
reg_offset = sh_eth_offset_gigabit;
break;
+ case SH_ETH_REG_FAST_RZ:
+ reg_offset = sh_eth_offset_fast_rz;
+ break;
case SH_ETH_REG_FAST_RCAR:
reg_offset = sh_eth_offset_fast_rcar;
break;
@@ -2739,7 +2839,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* print device information */
pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
- (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
+ (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
platform_set_drvdata(pdev, ndev);
@@ -2777,8 +2877,7 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int sh_eth_runtime_nop(struct device *dev)
{
- /*
- * Runtime PM callback shared between ->runtime_suspend()
+ /* Runtime PM callback shared between ->runtime_suspend()
* and ->runtime_resume(). Simply returns success.
*
* This driver re-initializes all registers after
@@ -2805,9 +2904,11 @@ static struct platform_device_id sh_eth_id_table[] = {
{ "sh7757-ether", (kernel_ulong_t)&sh7757_data },
{ "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
{ "sh7763-gether", (kernel_ulong_t)&sh7763_data },
+ { "r7s72100-ether", (kernel_ulong_t)&r7s72100_data },
{ "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
{ "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
- { "r8a7790-ether", (kernel_ulong_t)&r8a7790_data },
+ { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
+ { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
{ }
};
MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index f32c1692d310..6075915b88ec 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -1,5 +1,4 @@
-/*
- * SuperH Ethernet device driver
+/* SuperH Ethernet device driver
*
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
* Copyright (C) 2008-2012 Renesas Solutions Corp.
@@ -12,9 +11,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -159,6 +155,7 @@ enum {
enum {
SH_ETH_REG_GIGABIT,
+ SH_ETH_REG_FAST_RZ,
SH_ETH_REG_FAST_RCAR,
SH_ETH_REG_FAST_SH4,
SH_ETH_REG_FAST_SH3_SH2
@@ -171,10 +168,9 @@ enum {
#define SH2_SH3_SKB_RX_ALIGN 2
#endif
-/*
- * Register's bits
+/* Register's bits
*/
-/* EDSR : sh7734, sh7757, sh7763, and r8a7740 only */
+/* EDSR : sh7734, sh7757, sh7763, r8a7740, and r7s72100 only */
enum EDSR_BIT {
EDSR_ENT = 0x01, EDSR_ENR = 0x02,
};
@@ -199,7 +195,7 @@ enum DMAC_T_BIT {
EDTRR_TRNS_ETHER = 0x01,
};
-/* EDRRR*/
+/* EDRRR */
enum EDRRR_R_BIT {
EDRRR_R = 0x01,
};
@@ -422,8 +418,7 @@ enum TSU_FWSLC_BIT {
#define TSU_VTAG_ENABLE 0x80000000
#define TSU_VTAG_VID_MASK 0x00000fff
-/*
- * The sh ether Tx buffer descriptors.
+/* The sh ether Tx buffer descriptors.
* This structure should be 20 bytes.
*/
struct sh_eth_txdesc {
@@ -437,10 +432,9 @@ struct sh_eth_txdesc {
#endif
u32 addr; /* TD2 */
u32 pad1; /* padding data */
-} __attribute__((aligned(2), packed));
+} __aligned(2) __packed;
-/*
- * The sh ether Rx buffer descriptors.
+/* The sh ether Rx buffer descriptors.
* This structure should be 20 bytes.
*/
struct sh_eth_rxdesc {
@@ -454,7 +448,7 @@ struct sh_eth_rxdesc {
#endif
u32 addr; /* RD2 */
u32 pad0; /* padding data */
-} __attribute__((aligned(2), packed));
+} __aligned(2) __packed;
/* This structure is used by each CPU dependency handling. */
struct sh_eth_cpu_data {
@@ -480,16 +474,16 @@ struct sh_eth_cpu_data {
unsigned long eesr_err_check;
/* hardware features */
- unsigned long irq_flags; /* IRQ configuration flags */
- unsigned no_psr:1; /* EtherC DO NOT have PSR */
- unsigned apr:1; /* EtherC have APR */
- unsigned mpr:1; /* EtherC have MPR */
- unsigned tpauser:1; /* EtherC have TPAUSER */
- unsigned bculr:1; /* EtherC have BCULR */
- unsigned tsu:1; /* EtherC have TSU */
- unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */
- unsigned rpadir:1; /* E-DMAC have RPADIR */
- unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
+ unsigned long irq_flags; /* IRQ configuration flags */
+ unsigned no_psr:1; /* EtherC DO NOT have PSR */
+ unsigned apr:1; /* EtherC have APR */
+ unsigned mpr:1; /* EtherC have MPR */
+ unsigned tpauser:1; /* EtherC have TPAUSER */
+ unsigned bculr:1; /* EtherC have BCULR */
+ unsigned tsu:1; /* EtherC have TSU */
+ unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */
+ unsigned rpadir:1; /* E-DMAC have RPADIR */
+ unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
unsigned hw_crc:1; /* E-DMAC have CSMR */
unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
@@ -511,14 +505,14 @@ struct sh_eth_private {
struct sh_eth_txdesc *tx_ring;
struct sk_buff **rx_skbuff;
struct sk_buff **tx_skbuff;
- spinlock_t lock;
- u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ spinlock_t lock; /* Register access lock */
+ u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
u32 cur_tx, dirty_tx;
- u32 rx_buf_sz; /* Based on MTU+slack. */
+ u32 rx_buf_sz; /* Based on MTU+slack. */
int edmac_endian;
struct napi_struct napi;
/* MII transceiver section. */
- u32 phy_id; /* PHY ID */
+ u32 phy_id; /* PHY ID */
struct mii_bus *mii_bus; /* MDIO bus control */
struct phy_device *phydev; /* PHY device control */
int link;
@@ -526,8 +520,8 @@ struct sh_eth_private {
int msg_enable;
int speed;
int duplex;
- int port; /* for TSU */
- int vlan_num_ids; /* for VLAN tag filter */
+ int port; /* for TSU */
+ int vlan_num_ids; /* for VLAN tag filter */
unsigned no_ether_link:1;
unsigned ether_link_active_low:1;
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index a99739c5142c..1f4449ad8900 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index c76571886011..69e4fd21adb4 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/string.h>
@@ -356,7 +355,7 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
if (pkt_status & SEEQ_RSTAT_FIG) {
/* Packet is OK. */
/* We don't want to receive our own packets */
- if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(rd->skb->data + 6, dev->dev_addr)) {
if (len > rx_copybreak) {
skb = rd->skb;
newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 676c3c057bfb..174a92f5fe51 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -14,6 +14,7 @@
#include "mcdi_pcol.h"
#include "nic.h"
#include "workarounds.h"
+#include "selftest.h"
#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/wait.h>
@@ -52,31 +53,31 @@ struct efx_ef10_filter_table {
struct {
unsigned long spec; /* pointer to spec plus flag bits */
-/* BUSY flag indicates that an update is in progress. STACK_OLD is
- * used to mark and sweep stack-owned MAC filters.
+/* BUSY flag indicates that an update is in progress. AUTO_OLD is
+ * used to mark and sweep MAC filters for the device address lists.
*/
#define EFX_EF10_FILTER_FLAG_BUSY 1UL
-#define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL
+#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
#define EFX_EF10_FILTER_FLAGS 3UL
u64 handle; /* firmware handle */
} *entry;
wait_queue_head_t waitq;
/* Shadow of net_device address lists, guarded by mac_lock */
-#define EFX_EF10_FILTER_STACK_UC_MAX 32
-#define EFX_EF10_FILTER_STACK_MC_MAX 256
+#define EFX_EF10_FILTER_DEV_UC_MAX 32
+#define EFX_EF10_FILTER_DEV_MC_MAX 256
struct {
u8 addr[ETH_ALEN];
u16 id;
- } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX],
- stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX];
- int stack_uc_count; /* negative for PROMISC */
- int stack_mc_count; /* negative for PROMISC/ALLMULTI */
+ } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX],
+ dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
+ int dev_uc_count; /* negative for PROMISC */
+ int dev_mc_count; /* negative for PROMISC/ALLMULTI */
};
/* An arbitrary search limit for the software hash table */
#define EFX_EF10_FILTER_SEARCH_LIMIT 200
-static void efx_ef10_rx_push_indir_table(struct efx_nic *efx);
+static void efx_ef10_rx_push_rss_config(struct efx_nic *efx);
static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
static void efx_ef10_filter_table_remove(struct efx_nic *efx);
@@ -263,6 +264,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail3;
+ efx_ptp_probe(efx, NULL);
+
return 0;
fail3:
@@ -277,11 +280,17 @@ fail1:
static int efx_ef10_free_vis(struct efx_nic *efx)
{
- int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ size_t outlen;
+ int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
/* -EALREADY means nothing to free, so ignore */
if (rc == -EALREADY)
rc = 0;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
+ rc);
return rc;
}
@@ -465,9 +474,10 @@ static void efx_ef10_remove(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
+ efx_ptp_remove(efx);
+
efx_mcdi_mon_remove(efx);
- /* This needs to be after efx_ptp_remove_channel() with no filters */
efx_ef10_rx_free_indir_table(efx);
if (nic_data->wc_membase)
@@ -669,10 +679,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_restore_piobufs = false;
}
- efx_ef10_rx_push_indir_table(efx);
+ efx_ef10_rx_push_rss_config(efx);
return 0;
}
+static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ /* All our allocations have been reset */
+ nic_data->must_realloc_vis = true;
+ nic_data->must_restore_filters = true;
+ nic_data->must_restore_piobufs = true;
+ nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+}
+
static int efx_ef10_map_reset_flags(u32 *flags)
{
enum {
@@ -703,6 +724,19 @@ static int efx_ef10_map_reset_flags(u32 *flags)
return -EINVAL;
}
+static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
+{
+ int rc = efx_mcdi_reset(efx, reset_type);
+
+ /* If it was a port reset, trigger reallocation of MC resources.
+ * Note that on an MC reset nothing needs to be done now because we'll
+ * detect the MC reset later and handle it then.
+ */
+ if (reset_type == RESET_TYPE_ALL && !rc)
+ efx_ef10_reset_mc_allocations(efx);
+ return rc;
+}
+
#define EF10_DMA_STAT(ext_name, mcdi_name) \
[EF10_STAT_ ## ext_name] = \
{ #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
@@ -764,8 +798,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
- EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
- EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
+ EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
+ EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
};
#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
@@ -834,8 +868,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
(1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
(1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
(1ULL << EF10_STAT_rx_dp_streaming_packets) | \
- (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \
- (1ULL << EF10_STAT_rx_dp_emerg_wait))
+ (1ULL << EF10_STAT_rx_dp_hlb_fetch) | \
+ (1ULL << EF10_STAT_rx_dp_hlb_wait))
static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
{
@@ -901,6 +935,7 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
return -EAGAIN;
/* Update derived statistics */
+ efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]);
stats[EF10_STAT_rx_good_bytes] =
stats[EF10_STAT_rx_bytes] -
stats[EF10_STAT_rx_bytes_minus_good_bytes];
@@ -1067,10 +1102,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
nic_data->warm_boot_count = rc;
/* All our allocations have been reset */
- nic_data->must_realloc_vis = true;
- nic_data->must_restore_filters = true;
- nic_data->must_restore_piobufs = true;
- nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+ efx_ef10_reset_mc_allocations(efx);
/* The datapath firmware might have been changed */
nic_data->must_check_datapath_caps = true;
@@ -1241,8 +1273,8 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
return;
fail:
- WARN_ON(true);
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
+ tx_queue->queue);
}
static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
@@ -1256,7 +1288,7 @@ static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
tx_queue->queue);
- rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
@@ -1265,7 +1297,8 @@ static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
return;
fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
+ outbuf, outlen, rc);
}
static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
@@ -1408,12 +1441,12 @@ static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
}
-static void efx_ef10_rx_push_indir_table(struct efx_nic *efx)
+static void efx_ef10_rx_push_rss_config(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
- netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n");
+ netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n");
if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
@@ -1461,8 +1494,9 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
efx_rx_queue_index(rx_queue));
- MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS,
- INIT_RXQ_IN_FLAG_PREFIX, 1);
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
+ INIT_RXQ_IN_FLAG_PREFIX, 1,
+ INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
@@ -1481,13 +1515,8 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
-
- return;
-
-fail:
- WARN_ON(true);
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
+ efx_rx_queue_index(rx_queue));
}
static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
@@ -1501,7 +1530,7 @@ static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
efx_rx_queue_index(rx_queue));
- rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
@@ -1510,7 +1539,8 @@ static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
return;
fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
+ outbuf, outlen, rc);
}
static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
@@ -1647,15 +1677,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
-
/* IRQ return is ignored */
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1669,7 +1691,7 @@ static void efx_ef10_ev_fini(struct efx_channel *channel)
MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
- rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
@@ -1678,7 +1700,8 @@ static void efx_ef10_ev_fini(struct efx_channel *channel)
return;
fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
+ outbuf, outlen, rc);
}
static void efx_ef10_ev_remove(struct efx_channel *channel)
@@ -1717,8 +1740,6 @@ static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
{
unsigned int rx_desc_ptr;
- WARN_ON(rx_queue->scatter_n == 0);
-
netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
"scattered RX aborted (dropping %u buffers)\n",
rx_queue->scatter_n);
@@ -1754,7 +1775,10 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
- WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT));
+ if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
+ netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
+ EFX_QWORD_FMT "\n",
+ EFX_QWORD_VAL(*event));
rx_queue = efx_channel_get_rx_queue(channel);
@@ -1765,17 +1789,27 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
if (n_descs != rx_queue->scatter_n + 1) {
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
/* detect rx abort */
if (unlikely(n_descs == rx_queue->scatter_n)) {
- WARN_ON(rx_bytes != 0);
+ if (rx_queue->scatter_n == 0 || rx_bytes != 0)
+ netdev_WARN(efx->net_dev,
+ "invalid RX abort: scatter_n=%u event="
+ EFX_QWORD_FMT "\n",
+ rx_queue->scatter_n,
+ EFX_QWORD_VAL(*event));
efx_ef10_handle_rx_abort(rx_queue);
return 0;
}
- if (unlikely(rx_queue->scatter_n != 0)) {
- /* Scattered packet completions cannot be
- * merged, so something has gone wrong.
- */
+ /* Check that RX completion merging is valid, i.e.
+ * the current firmware supports it and this is a
+ * non-scattered packet.
+ */
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
+ rx_queue->scatter_n != 0 || rx_cont) {
efx_ef10_handle_rx_bad_lbits(
rx_queue, next_ptr_lbits,
(rx_queue->removed_count +
@@ -1901,7 +1935,7 @@ static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
* events, so efx_process_channel() won't refill the
* queue. Refill it here
*/
- efx_fast_push_rx_descriptors(&channel->rx_queue);
+ efx_fast_push_rx_descriptors(&channel->rx_queue, true);
break;
default:
netif_err(efx, hw, efx->net_dev,
@@ -2232,7 +2266,9 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
+ 0 : spec->dmaq_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
(spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
@@ -2257,6 +2293,8 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
outbuf, sizeof(outbuf), NULL);
if (rc == 0)
*handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+ if (rc == -ENOSPC)
+ rc = -EBUSY; /* to match efx_farch_filter_insert() */
return rc;
}
@@ -2326,10 +2364,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
EFX_EF10_FILTER_FLAG_BUSY)
break;
if (spec->priority < saved_spec->priority &&
- !(saved_spec->priority ==
- EFX_FILTER_PRI_REQUIRED &&
- saved_spec->flags &
- EFX_FILTER_FLAG_RX_STACK)) {
+ spec->priority != EFX_FILTER_PRI_AUTO) {
rc = -EPERM;
goto out_unlock;
}
@@ -2383,11 +2418,13 @@ found:
*/
saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
if (saved_spec) {
- if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
+ if (spec->priority == EFX_FILTER_PRI_AUTO &&
+ saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
/* Just make sure it won't be removed */
- saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
+ if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
+ saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
table->entry[ins_index].spec &=
- ~EFX_EF10_FILTER_FLAG_STACK_OLD;
+ ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
rc = ins_index;
goto out_unlock;
}
@@ -2427,8 +2464,11 @@ found:
if (rc == 0) {
if (replacing) {
/* Update the fields that may differ */
+ if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
+ saved_spec->flags |=
+ EFX_FILTER_FLAG_RX_OVER_AUTO;
saved_spec->priority = spec->priority;
- saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK;
+ saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
saved_spec->flags |= spec->flags;
saved_spec->rss_context = spec->rss_context;
saved_spec->dmaq_id = spec->dmaq_id;
@@ -2497,13 +2537,13 @@ static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
}
/* Remove a filter.
- * If !stack_requested, remove by ID
- * If stack_requested, remove by index
+ * If !by_index, remove by ID
+ * If by_index, remove by index
* Filter ID may come from userland and must be range-checked.
*/
static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id, bool stack_requested)
+ unsigned int priority_mask,
+ u32 filter_id, bool by_index)
{
unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -2527,26 +2567,41 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
spin_unlock_bh(&efx->filter_lock);
schedule();
}
+
spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (!spec || spec->priority > priority ||
- (!stack_requested &&
+ if (!spec ||
+ (!by_index &&
efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
filter_id / HUNT_FILTER_TBL_ROWS)) {
rc = -ENOENT;
goto out_unlock;
}
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
+ priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
+ /* Just remove flags */
+ spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
+ table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ rc = 0;
+ goto out_unlock;
+ }
+
+ if (!(priority_mask & (1U << spec->priority))) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
+
table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
spin_unlock_bh(&efx->filter_lock);
- if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) {
- /* Reset steering of a stack-owned filter */
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
+ /* Reset to an automatic filter */
struct efx_filter_spec new_spec = *spec;
- new_spec.priority = EFX_FILTER_PRI_REQUIRED;
+ new_spec.priority = EFX_FILTER_PRI_AUTO;
new_spec.flags = (EFX_FILTER_FLAG_RX |
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK);
+ EFX_FILTER_FLAG_RX_RSS);
new_spec.dmaq_id = 0;
new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
rc = efx_ef10_filter_push(efx, &new_spec,
@@ -2574,6 +2629,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
}
}
+
table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
wake_up_all(&table->waitq);
out_unlock:
@@ -2586,7 +2642,8 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id)
{
- return efx_ef10_filter_remove_internal(efx, priority, filter_id, false);
+ return efx_ef10_filter_remove_internal(efx, 1U << priority,
+ filter_id, false);
}
static int efx_ef10_filter_get_safe(struct efx_nic *efx,
@@ -2612,10 +2669,24 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
return rc;
}
-static void efx_ef10_filter_clear_rx(struct efx_nic *efx,
+static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority)
{
- /* TODO */
+ unsigned int priority_mask;
+ unsigned int i;
+ int rc;
+
+ priority_mask = (((1U << (priority + 1)) - 1) &
+ ~(1U << EFX_FILTER_PRI_AUTO));
+
+ for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
+ rc = efx_ef10_filter_remove_internal(efx, priority_mask,
+ i, true);
+ if (rc && rc != -ENOENT)
+ return rc;
+ }
+
+ return 0;
}
static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
@@ -2716,8 +2787,6 @@ static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
rc = -EBUSY;
goto fail_unlock;
}
- EFX_WARN_ON_PARANOID(saved_spec->flags &
- EFX_FILTER_FLAG_RX_STACK);
if (spec->priority < saved_spec->priority) {
rc = -EPERM;
goto fail_unlock;
@@ -3027,8 +3096,11 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
table->entry[filter_idx].handle);
rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
NULL, 0, NULL);
-
- WARN_ON(rc != 0);
+ if (rc)
+ netdev_WARN(efx->net_dev,
+ "filter_idx=%#x handle=%#llx\n",
+ filter_idx,
+ table->entry[filter_idx].handle);
kfree(spec);
}
@@ -3052,15 +3124,15 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
/* Mark old filters that may need to be removed */
spin_lock_bh(&efx->filter_lock);
- n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count;
+ n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
for (i = 0; i < n; i++) {
- filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
+ filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
}
- n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count;
+ n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count;
for (i = 0; i < n; i++) {
- filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
+ filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
}
spin_unlock_bh(&efx->filter_lock);
@@ -3069,28 +3141,28 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
*/
netif_addr_lock_bh(net_dev);
if (net_dev->flags & IFF_PROMISC ||
- netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) {
- table->stack_uc_count = -1;
+ netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) {
+ table->dev_uc_count = -1;
} else {
- table->stack_uc_count = 1 + netdev_uc_count(net_dev);
- memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr,
+ table->dev_uc_count = 1 + netdev_uc_count(net_dev);
+ memcpy(table->dev_uc_list[0].addr, net_dev->dev_addr,
ETH_ALEN);
i = 1;
netdev_for_each_uc_addr(uc, net_dev) {
- memcpy(table->stack_uc_list[i].addr,
+ memcpy(table->dev_uc_list[i].addr,
uc->addr, ETH_ALEN);
i++;
}
}
if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
- netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) {
- table->stack_mc_count = -1;
+ netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) {
+ table->dev_mc_count = -1;
} else {
- table->stack_mc_count = 1 + netdev_mc_count(net_dev);
- eth_broadcast_addr(table->stack_mc_list[0].addr);
+ table->dev_mc_count = 1 + netdev_mc_count(net_dev);
+ eth_broadcast_addr(table->dev_mc_list[0].addr);
i = 1;
netdev_for_each_mc_addr(mc, net_dev) {
- memcpy(table->stack_mc_list[i].addr,
+ memcpy(table->dev_mc_list[i].addr,
mc->addr, ETH_ALEN);
i++;
}
@@ -3098,90 +3170,86 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
netif_addr_unlock_bh(net_dev);
/* Insert/renew unicast filters */
- if (table->stack_uc_count >= 0) {
- for (i = 0; i < table->stack_uc_count; i++) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK,
+ if (table->dev_uc_count >= 0) {
+ for (i = 0; i < table->dev_uc_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
0);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- table->stack_uc_list[i].addr);
+ table->dev_uc_list[i].addr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
/* Fall back to unicast-promisc */
while (i--)
efx_ef10_filter_remove_safe(
- efx, EFX_FILTER_PRI_REQUIRED,
- table->stack_uc_list[i].id);
- table->stack_uc_count = -1;
+ efx, EFX_FILTER_PRI_AUTO,
+ table->dev_uc_list[i].id);
+ table->dev_uc_count = -1;
break;
}
- table->stack_uc_list[i].id = rc;
+ table->dev_uc_list[i].id = rc;
}
}
- if (table->stack_uc_count < 0) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK,
+ if (table->dev_uc_count < 0) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
0);
efx_filter_set_uc_def(&spec);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
WARN_ON(1);
- table->stack_uc_count = 0;
+ table->dev_uc_count = 0;
} else {
- table->stack_uc_list[0].id = rc;
+ table->dev_uc_list[0].id = rc;
}
}
/* Insert/renew multicast filters */
- if (table->stack_mc_count >= 0) {
- for (i = 0; i < table->stack_mc_count; i++) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK,
+ if (table->dev_mc_count >= 0) {
+ for (i = 0; i < table->dev_mc_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
0);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- table->stack_mc_list[i].addr);
+ table->dev_mc_list[i].addr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
/* Fall back to multicast-promisc */
while (i--)
efx_ef10_filter_remove_safe(
- efx, EFX_FILTER_PRI_REQUIRED,
- table->stack_mc_list[i].id);
- table->stack_mc_count = -1;
+ efx, EFX_FILTER_PRI_AUTO,
+ table->dev_mc_list[i].id);
+ table->dev_mc_count = -1;
break;
}
- table->stack_mc_list[i].id = rc;
+ table->dev_mc_list[i].id = rc;
}
}
- if (table->stack_mc_count < 0) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK,
+ if (table->dev_mc_count < 0) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
0);
efx_filter_set_mc_def(&spec);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
WARN_ON(1);
- table->stack_mc_count = 0;
+ table->dev_mc_count = 0;
} else {
- table->stack_mc_list[0].id = rc;
+ table->dev_mc_list[0].id = rc;
}
}
/* Remove filters that weren't renewed. Since nothing else
- * changes the STACK_OLD flag or removes these filters, we
+ * changes the AUTO_OLD flag or removes these filters, we
* don't need to hold the filter_lock while scanning for
* these filters.
*/
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
if (ACCESS_ONCE(table->entry[i].spec) &
- EFX_EF10_FILTER_FLAG_STACK_OLD) {
- if (efx_ef10_filter_remove_internal(efx,
- EFX_FILTER_PRI_REQUIRED,
- i, true) < 0)
+ EFX_EF10_FILTER_FLAG_AUTO_OLD) {
+ if (efx_ef10_filter_remove_internal(
+ efx, 1U << EFX_FILTER_PRI_AUTO,
+ i, true) < 0)
remove_failed = true;
}
}
@@ -3195,6 +3263,87 @@ static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
return efx_mcdi_set_mac(efx);
}
+static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
+ return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+/* MC BISTs follow a different poll mechanism to phy BISTs.
+ * The BIST is done in the poll handler on the MC, and the MCDI command
+ * will block until the BIST is done.
+ */
+static int efx_ef10_poll_bist(struct efx_nic *efx)
+{
+ int rc;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
+ size_t outlen;
+ u32 result;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
+ return -EIO;
+
+ result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
+ switch (result) {
+ case MC_CMD_POLL_BIST_PASSED:
+ netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
+ return 0;
+ case MC_CMD_POLL_BIST_TIMEOUT:
+ netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
+ return -EIO;
+ case MC_CMD_POLL_BIST_FAILED:
+ netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
+ return -EIO;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "BIST returned unknown result %u", result);
+ return -EIO;
+ }
+}
+
+static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
+{
+ int rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
+
+ rc = efx_ef10_start_bist(efx, bist_type);
+ if (rc != 0)
+ return rc;
+
+ return efx_ef10_poll_bist(efx);
+}
+
+static int
+efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ int rc, rc2;
+
+ efx_reset_down(efx, RESET_TYPE_WORLD);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
+ NULL, 0, NULL, 0, NULL);
+ if (rc != 0)
+ goto out;
+
+ tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
+ tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
+
+ rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
+
+out:
+ rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
+ return rc ? rc : rc2;
+}
+
#ifdef CONFIG_SFC_MTD
struct efx_ef10_nvram_type_info {
@@ -3213,6 +3362,7 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
{ NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
};
@@ -3320,6 +3470,119 @@ static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
_efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
}
+static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
+ bool temp)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
+ int rc;
+
+ if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
+ channel->sync_events_state == SYNC_EVENTS_VALID ||
+ (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
+ return 0;
+ channel->sync_events_state = SYNC_EVENTS_REQUESTED;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
+ channel->channel);
+
+ rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ if (rc != 0)
+ channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
+ SYNC_EVENTS_DISABLED;
+
+ return rc;
+}
+
+static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
+ bool temp)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
+ int rc;
+
+ if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
+ (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
+ return 0;
+ if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
+ channel->sync_events_state = SYNC_EVENTS_DISABLED;
+ return 0;
+ }
+ channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
+ SYNC_EVENTS_DISABLED;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
+ MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
+ channel->channel);
+
+ rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ return rc;
+}
+
+static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
+ bool temp)
+{
+ int (*set)(struct efx_channel *channel, bool temp);
+ struct efx_channel *channel;
+
+ set = en ?
+ efx_ef10_rx_enable_timestamping :
+ efx_ef10_rx_disable_timestamping;
+
+ efx_for_each_channel(channel, efx) {
+ int rc = set(channel, temp);
+ if (en && rc != 0) {
+ efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
+ struct hwtstamp_config *init)
+{
+ int rc;
+
+ switch (init->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ efx_ef10_ptp_set_ts_sync_events(efx, false, false);
+ /* if TX timestamping is still requested then leave PTP on */
+ return efx_ptp_change_mode(efx,
+ init->tx_type != HWTSTAMP_TX_OFF, 0);
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_ALL;
+ rc = efx_ptp_change_mode(efx, true, 0);
+ if (!rc)
+ rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
+ if (rc)
+ efx_ptp_change_mode(efx, false, 0);
+ return rc;
+ default:
+ return -ERANGE;
+ }
+}
+
const struct efx_nic_type efx_hunt_a0_nic_type = {
.mem_map_size = efx_ef10_mem_map_size,
.probe = efx_ef10_probe,
@@ -3329,13 +3592,14 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.fini = efx_port_dummy_op_void,
.map_reset_reason = efx_mcdi_map_reset_reason,
.map_reset_flags = efx_ef10_map_reset_flags,
- .reset = efx_mcdi_reset,
+ .reset = efx_ef10_reset,
.probe_port = efx_mcdi_port_probe,
.remove_port = efx_mcdi_port_remove,
.fini_dmaq = efx_ef10_fini_dmaq,
.describe_stats = efx_ef10_describe_stats,
.update_stats = efx_ef10_update_stats,
.start_stats = efx_mcdi_mac_start_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
.set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = efx_ef10_push_irq_moderation,
@@ -3345,7 +3609,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.get_wol = efx_ef10_get_wol,
.set_wol = efx_ef10_set_wol,
.resume_wol = efx_port_dummy_op_void,
- /* TODO: test_chip */
+ .test_chip = efx_ef10_test_chip,
.test_nvram = efx_mcdi_nvram_test_all,
.mcdi_request = efx_ef10_mcdi_request,
.mcdi_poll_response = efx_ef10_mcdi_poll_response,
@@ -3360,7 +3624,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.tx_init = efx_ef10_tx_init,
.tx_remove = efx_ef10_tx_remove,
.tx_write = efx_ef10_tx_write,
- .rx_push_indir_table = efx_ef10_rx_push_indir_table,
+ .rx_push_rss_config = efx_ef10_rx_push_rss_config,
.rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init,
.rx_remove = efx_ef10_rx_remove,
@@ -3397,11 +3661,14 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.mtd_sync = efx_mcdi_mtd_sync,
#endif
.ptp_write_host_time = efx_ef10_ptp_write_host_time,
+ .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
+ .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
.revision = EFX_REV_HUNT_A0,
.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
.rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
.rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
.can_rx_scatter = true,
.always_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
@@ -3410,4 +3677,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
NETIF_F_RXHASH | NETIF_F_NTUPLE),
.mcdi_max_ver = 2,
.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+ .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_ALL,
};
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index fd844b53e385..83d464347021 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -83,6 +83,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
+ [RESET_TYPE_MC_BIST] = "MC_BIST",
};
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
@@ -91,6 +92,12 @@ const char *const efx_reset_type_names[] = {
*/
static struct workqueue_struct *reset_workqueue;
+/* How often and how many times to poll for a reset while waiting for a
+ * BIST that another function started to complete.
+ */
+#define BIST_WAIT_DELAY_MS 100
+#define BIST_WAIT_DELAY_COUNT 100
+
/**************************************************************************
*
* Configurable values
@@ -246,7 +253,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_channel_get_rx_queue(channel);
efx_rx_flush_packet(channel);
- efx_fast_push_rx_descriptors(rx_queue);
+ efx_fast_push_rx_descriptors(rx_queue, true);
}
return spent;
@@ -639,7 +646,9 @@ static void efx_start_datapath(struct efx_nic *efx)
efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
atomic_inc(&efx->active_queues);
- efx_nic_generate_fill_event(rx_queue);
+ efx_stop_eventq(channel);
+ efx_fast_push_rx_descriptors(rx_queue, false);
+ efx_start_eventq(channel);
}
WARN_ON(channel->rx_pkt_n_frags);
@@ -1051,18 +1060,23 @@ static void efx_start_port(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
efx->port_enabled = true;
- /* efx_mac_work() might have been scheduled after efx_stop_port(),
- * and then cancelled by efx_flush_all() */
+ /* Ensure MAC ingress/egress is enabled */
efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
}
-/* Prevent efx_mac_work() and efx_monitor() from working */
+/* Cancel work for MAC reconfiguration, periodic hardware monitoring
+ * and the async self-test, wait for them to finish and prevent them
+ * being scheduled again. This doesn't cover online resets, which
+ * should only be cancelled when removing the device.
+ */
static void efx_stop_port(struct efx_nic *efx)
{
netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
mutex_lock(&efx->mac_lock);
efx->port_enabled = false;
mutex_unlock(&efx->mac_lock);
@@ -1070,6 +1084,10 @@ static void efx_stop_port(struct efx_nic *efx)
/* Serialise against efx_set_multicast_list() */
netif_addr_lock_bh(efx->net_dev);
netif_addr_unlock_bh(efx->net_dev);
+
+ cancel_delayed_work_sync(&efx->monitor_work);
+ efx_selftest_async_cancel(efx);
+ cancel_work_sync(&efx->mac_work);
}
static void efx_fini_port(struct efx_nic *efx)
@@ -1099,6 +1117,77 @@ static void efx_remove_port(struct efx_nic *efx)
*
**************************************************************************/
+static LIST_HEAD(efx_primary_list);
+static LIST_HEAD(efx_unassociated_list);
+
+static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
+{
+ return left->type == right->type &&
+ left->vpd_sn && right->vpd_sn &&
+ !strcmp(left->vpd_sn, right->vpd_sn);
+}
+
+static void efx_associate(struct efx_nic *efx)
+{
+ struct efx_nic *other, *next;
+
+ if (efx->primary == efx) {
+ /* Adding primary function; look for secondaries */
+
+ netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
+ list_add_tail(&efx->node, &efx_primary_list);
+
+ list_for_each_entry_safe(other, next, &efx_unassociated_list,
+ node) {
+ if (efx_same_controller(efx, other)) {
+ list_del(&other->node);
+ netif_dbg(other, probe, other->net_dev,
+ "moving to secondary list of %s %s\n",
+ pci_name(efx->pci_dev),
+ efx->net_dev->name);
+ list_add_tail(&other->node,
+ &efx->secondary_list);
+ other->primary = efx;
+ }
+ }
+ } else {
+ /* Adding secondary function; look for primary */
+
+ list_for_each_entry(other, &efx_primary_list, node) {
+ if (efx_same_controller(efx, other)) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "adding to secondary list of %s %s\n",
+ pci_name(other->pci_dev),
+ other->net_dev->name);
+ list_add_tail(&efx->node,
+ &other->secondary_list);
+ efx->primary = other;
+ return;
+ }
+ }
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "adding to unassociated list\n");
+ list_add_tail(&efx->node, &efx_unassociated_list);
+ }
+}
+
+static void efx_dissociate(struct efx_nic *efx)
+{
+ struct efx_nic *other, *next;
+
+ list_del(&efx->node);
+ efx->primary = NULL;
+
+ list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
+ list_del(&other->node);
+ netif_dbg(other, probe, other->net_dev,
+ "moving to unassociated list\n");
+ list_add_tail(&other->node, &efx_unassociated_list);
+ other->primary = NULL;
+ }
+}
+
/* This configures the PCI device to enable I/O and DMA. */
static int efx_init_io(struct efx_nic *efx)
{
@@ -1675,18 +1764,10 @@ static void efx_start_all(struct efx_nic *efx)
}
efx->type->start_stats(efx);
-}
-
-/* Flush all delayed work. Should only be called when no more delayed work
- * will be scheduled. This doesn't flush pending online resets (efx_reset),
- * since we're holding the rtnl_lock at this point. */
-static void efx_flush_all(struct efx_nic *efx)
-{
- /* Make sure the hardware monitor and event self-test are stopped */
- cancel_delayed_work_sync(&efx->monitor_work);
- efx_selftest_async_cancel(efx);
- /* Stop scheduled port reconfigurations */
- cancel_work_sync(&efx->mac_work);
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
}
/* Quiesce the hardware and software data path, and regular activity
@@ -1702,12 +1783,16 @@ static void efx_stop_all(struct efx_nic *efx)
if (!efx->port_enabled)
return;
+ /* update stats before we go down so we can accurately count
+ * rx_nodesc_drops
+ */
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
efx->type->stop_stats(efx);
efx_stop_port(efx);
- /* Flush efx_mac_work(), refill_workqueue, monitor_work */
- efx_flush_all(efx);
-
/* Stop the kernel transmit interface. This is only valid if
* the device is stopped or detached; otherwise the watchdog
* may fire immediately.
@@ -1851,7 +1936,9 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
struct mii_ioctl_data *data = if_mii(ifr);
if (cmd == SIOCSHWTSTAMP)
- return efx_ptp_ioctl(efx, ifr, cmd);
+ return efx_ptp_set_ts_config(efx, ifr);
+ if (cmd == SIOCGHWTSTAMP)
+ return efx_ptp_get_ts_config(efx, ifr);
/* Convert phy_id from older PRTAD/DEVAD format */
if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
@@ -2064,7 +2151,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
/* If disabling RX n-tuple filtering, clear existing filters */
if (net_dev->features & ~data & NETIF_F_NTUPLE)
- efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+ return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
return 0;
}
@@ -2198,6 +2285,8 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_init_tx_queue_core_txq(tx_queue);
}
+ efx_associate(efx);
+
rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2211,6 +2300,7 @@ static int efx_register_netdev(struct efx_nic *efx)
fail_registered:
rtnl_lock();
+ efx_dissociate(efx);
unregister_netdevice(net_dev);
fail_locked:
efx->state = STATE_UNINIT;
@@ -2387,6 +2477,24 @@ int efx_try_recovery(struct efx_nic *efx)
return 0;
}
+static void efx_wait_for_bist_end(struct efx_nic *efx)
+{
+ int i;
+
+ for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
+ if (efx_mcdi_poll_reboot(efx))
+ goto out;
+ msleep(BIST_WAIT_DELAY_MS);
+ }
+
+ netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
+out:
+ /* Either way unset the BIST flag. If we found no reboot we probably
+ * won't recover, but we should try.
+ */
+ efx->mc_bist_for_other_fn = false;
+}
+
/* The worker thread exists so that code that cannot sleep can
* schedule a reset for later.
*/
@@ -2399,6 +2507,9 @@ static void efx_reset_work(struct work_struct *data)
pending = ACCESS_ONCE(efx->reset_pending);
method = fls(pending) - 1;
+ if (method == RESET_TYPE_MC_BIST)
+ efx_wait_for_bist_end(efx);
+
if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
method == RESET_TYPE_RECOVER_OR_ALL) &&
efx_try_recovery(efx))
@@ -2437,6 +2548,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
case RESET_TYPE_WORLD:
case RESET_TYPE_DISABLE:
case RESET_TYPE_RECOVER_OR_DISABLE:
+ case RESET_TYPE_MC_BIST:
method = type;
netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
RESET_TYPE(method));
@@ -2530,6 +2642,8 @@ static int efx_init_struct(struct efx_nic *efx,
int i;
/* Initialise common structures */
+ INIT_LIST_HEAD(&efx->node);
+ INIT_LIST_HEAD(&efx->secondary_list);
spin_lock_init(&efx->biu_lock);
#ifdef CONFIG_SFC_MTD
INIT_LIST_HEAD(&efx->mtd_list);
@@ -2548,6 +2662,8 @@ static int efx_init_struct(struct efx_nic *efx,
NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
efx->rx_packet_hash_offset =
efx->type->rx_hash_offset - efx->type->rx_prefix_size;
+ efx->rx_packet_ts_offset =
+ efx->type->rx_ts_offset - efx->type->rx_prefix_size;
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->phy_op = &efx_dummy_phy_operations;
@@ -2588,6 +2704,8 @@ static void efx_fini_struct(struct efx_nic *efx)
for (i = 0; i < EFX_MAX_CHANNELS; i++)
kfree(efx->channel[i]);
+ kfree(efx->vpd_sn);
+
if (efx->workqueue) {
destroy_workqueue(efx->workqueue);
efx->workqueue = NULL;
@@ -2632,6 +2750,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* Mark the NIC as fini, then stop the interface */
rtnl_lock();
+ efx_dissociate(efx);
dev_close(efx->net_dev);
efx_disable_interrupts(efx);
rtnl_unlock();
@@ -2647,7 +2766,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
efx_fini_struct(efx);
- pci_set_drvdata(pci_dev, NULL);
free_netdev(efx->net_dev);
pci_disable_pcie_error_reporting(pci_dev);
@@ -2659,12 +2777,12 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
* always appear within the first 512 bytes.
*/
#define SFC_VPD_LEN 512
-static void efx_print_product_vpd(struct efx_nic *efx)
+static void efx_probe_vpd_strings(struct efx_nic *efx)
{
struct pci_dev *dev = efx->pci_dev;
char vpd_data[SFC_VPD_LEN];
ssize_t vpd_size;
- int i, j;
+ int ro_start, ro_size, i, j;
/* Get the vpd data from the device */
vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
@@ -2674,14 +2792,15 @@ static void efx_print_product_vpd(struct efx_nic *efx)
}
/* Get the Read only section */
- i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (i < 0) {
+ ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+ if (ro_start < 0) {
netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
return;
}
- j = pci_vpd_lrdt_size(&vpd_data[i]);
- i += PCI_VPD_LRDT_TAG_SIZE;
+ ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
+ j = ro_size;
+ i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
if (i + j > vpd_size)
j = vpd_size - i;
@@ -2701,6 +2820,27 @@ static void efx_print_product_vpd(struct efx_nic *efx)
netif_info(efx, drv, efx->net_dev,
"Part Number : %.*s\n", j, &vpd_data[i]);
+
+ i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
+ j = ro_size;
+ i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
+ if (i < 0) {
+ netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
+ return;
+ }
+
+ j = pci_vpd_info_field_size(&vpd_data[i]);
+ i += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (i + j > vpd_size) {
+ netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
+ return;
+ }
+
+ efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
+ if (!efx->vpd_sn)
+ return;
+
+ snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
}
@@ -2797,7 +2937,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_info(efx, probe, efx->net_dev,
"Solarflare NIC detected\n");
- efx_print_product_vpd(efx);
+ efx_probe_vpd_strings(efx);
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx);
@@ -2841,7 +2981,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
fail2:
efx_fini_struct(efx);
fail1:
- pci_set_drvdata(pci_dev, NULL);
WARN_ON(rc > 0);
netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
free_netdev(net_dev);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index b8235ee5d7d7..dbd7b78fe01c 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -37,7 +37,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
void efx_rx_slow_fill(unsigned long context);
void __efx_rx_packet(struct efx_channel *channel);
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
@@ -66,6 +66,9 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_RXQ_MIN_ENT 128U
#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
+#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \
+ EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
+
/* Filters */
/**
@@ -134,17 +137,6 @@ efx_filter_get_filter_safe(struct efx_nic *efx,
return efx->type->filter_get_safe(efx, priority, filter_id, spec);
}
-/**
- * efx_farch_filter_clear_rx - remove RX filters by priority
- * @efx: NIC from which to remove the filters
- * @priority: Maximum priority to remove
- */
-static inline void efx_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority)
-{
- return efx->type->filter_clear_rx(efx, priority);
-}
-
static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority)
{
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index 7fdfee019092..75ef7ef6450b 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -165,6 +165,7 @@ enum reset_type {
RESET_TYPE_DMA_ERROR,
RESET_TYPE_TX_SKIP,
RESET_TYPE_MC_FAILURE,
+ RESET_TYPE_MC_BIST,
RESET_TYPE_MAX,
};
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 1f529fa2edb1..229428915aa8 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -318,6 +318,8 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
"eventq.int", NULL);
}
+ efx_fill_test(n++, strings, data, &tests->memory,
+ "core", 0, "memory", NULL);
efx_fill_test(n++, strings, data, &tests->registers,
"core", 0, "registers", NULL);
@@ -357,7 +359,8 @@ static int efx_ethtool_get_sset_count(struct net_device *net_dev,
switch (string_set) {
case ETH_SS_STATS:
return efx->type->describe_stats(efx, NULL) +
- EFX_ETHTOOL_SW_STAT_COUNT;
+ EFX_ETHTOOL_SW_STAT_COUNT +
+ efx_ptp_describe_stats(efx, NULL);
case ETH_SS_TEST:
return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
default:
@@ -378,6 +381,8 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
strlcpy(strings + i * ETH_GSTRING_LEN,
efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
+ strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
+ efx_ptp_describe_stats(efx, strings);
break;
case ETH_SS_TEST:
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
@@ -427,8 +432,11 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
break;
}
}
+ data += EFX_ETHTOOL_SW_STAT_COUNT;
spin_unlock_bh(&efx->stats_lock);
+
+ efx_ptp_update_stats(efx, data);
}
static void efx_ethtool_self_test(struct net_device *net_dev,
@@ -583,7 +591,7 @@ static void efx_ethtool_get_ringparam(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
- ring->tx_max_pending = EFX_MAX_DMAQ_SIZE;
+ ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
ring->rx_pending = efx->rxq_entries;
ring->tx_pending = efx->txq_entries;
}
@@ -596,7 +604,7 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
- ring->tx_pending > EFX_MAX_DMAQ_SIZE)
+ ring->tx_pending > EFX_TXQ_MAX_ENT(efx))
return -EINVAL;
if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
@@ -1032,7 +1040,7 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
- efx_nic_push_rx_indir_table(efx);
+ efx->type->rx_push_rss_config(efx);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index ff5d322b9b49..18d6f761f4d0 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -469,6 +469,24 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
}
/**************************************************************************
*
+ * RSS
+ *
+ **************************************************************************
+ */
+
+static void falcon_b0_rx_push_rss_config(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Set hash key for IPv4 */
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
+ efx_farch_rx_push_indir_table(efx);
+}
+
+/**************************************************************************
+ *
* EEPROM/flash
*
**************************************************************************
@@ -2247,6 +2265,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
struct falcon_board *board;
int rc;
+ efx->primary = efx; /* only one usable function per controller */
+
/* Allocate storage for hardware specific data */
nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
if (!nic_data)
@@ -2482,9 +2502,7 @@ static int falcon_init_nic(struct efx_nic *efx)
falcon_init_rx_cfg(efx);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- /* Set hash key for IPv4 */
- memcpy(&temp, efx->rx_hash_key, sizeof(temp));
- efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+ falcon_b0_rx_push_rss_config(efx);
/* Set destination of both TX and RX Flush events */
EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
@@ -2593,6 +2611,14 @@ void falcon_start_nic_stats(struct efx_nic *efx)
spin_unlock_bh(&efx->stats_lock);
}
+/* We don't acutally pull stats on falcon. Wait 10ms so that
+ * they arrive when we call this just after start_stats
+ */
+static void falcon_pull_nic_stats(struct efx_nic *efx)
+{
+ msleep(10);
+}
+
void falcon_stop_nic_stats(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
@@ -2672,6 +2698,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
+ .pull_stats = falcon_pull_nic_stats,
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
@@ -2692,7 +2719,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
- .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_push_rss_config = efx_port_dummy_op_void,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
@@ -2765,6 +2792,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
+ .pull_stats = falcon_pull_nic_stats,
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
@@ -2786,7 +2814,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
- .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_push_rss_config = falcon_b0_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index c0907d884d75..f72489a105ca 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -1147,7 +1147,7 @@ static void efx_farch_handle_generated_event(struct efx_channel *channel,
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
* queue. Refill it here */
- efx_fast_push_rx_descriptors(rx_queue);
+ efx_fast_push_rx_descriptors(rx_queue, true);
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
efx_farch_handle_drain_event(channel);
} else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
@@ -1618,8 +1618,7 @@ void efx_farch_rx_push_indir_table(struct efx_nic *efx)
size_t i = 0;
efx_dword_t dword;
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
- return;
+ BUG_ON(efx_nic_rev(efx) < EFX_REV_FALCON_B0);
BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
FR_BZ_RX_INDIRECTION_TBL_ROWS);
@@ -1745,8 +1744,6 @@ void efx_farch_init_common(struct efx_nic *efx)
EFX_INVERT_OWORD(temp);
efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
- efx_farch_rx_push_indir_table(efx);
-
/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
* controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
*/
@@ -2187,14 +2184,14 @@ efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
}
static void
-efx_farch_filter_init_rx_for_stack(struct efx_nic *efx,
- struct efx_farch_filter_spec *spec)
+efx_farch_filter_init_rx_auto(struct efx_nic *efx,
+ struct efx_farch_filter_spec *spec)
{
/* If there's only one channel then disable RSS for non VF
* traffic, thereby allowing VFs to use RSS when the PF can't.
*/
- spec->priority = EFX_FILTER_PRI_REQUIRED;
- spec->flags = (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_STACK |
+ spec->priority = EFX_FILTER_PRI_AUTO;
+ spec->flags = (EFX_FILTER_FLAG_RX |
(efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
(efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
spec->dmaq_id = 0;
@@ -2459,20 +2456,13 @@ s32 efx_farch_filter_insert(struct efx_nic *efx,
rc = -EEXIST;
goto out;
}
- if (spec.priority < saved_spec->priority &&
- !(saved_spec->priority == EFX_FILTER_PRI_REQUIRED &&
- saved_spec->flags & EFX_FILTER_FLAG_RX_STACK)) {
+ if (spec.priority < saved_spec->priority) {
rc = -EPERM;
goto out;
}
- if (spec.flags & EFX_FILTER_FLAG_RX_STACK) {
- /* Just make sure it won't be removed */
- saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
- rc = 0;
- goto out;
- }
- /* Retain the RX_STACK flag */
- spec.flags |= saved_spec->flags & EFX_FILTER_FLAG_RX_STACK;
+ if (saved_spec->priority == EFX_FILTER_PRI_AUTO ||
+ saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO)
+ spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
}
/* Insert the filter */
@@ -2553,11 +2543,11 @@ static int efx_farch_filter_remove(struct efx_nic *efx,
struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
if (!test_bit(filter_idx, table->used_bitmap) ||
- spec->priority > priority)
+ spec->priority != priority)
return -ENOENT;
- if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
- efx_farch_filter_init_rx_for_stack(efx, spec);
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
+ efx_farch_filter_init_rx_auto(efx, spec);
efx_farch_filter_push_rx_config(efx);
} else {
efx_farch_filter_table_clear_entry(efx, table, filter_idx);
@@ -2640,12 +2630,15 @@ efx_farch_filter_table_clear(struct efx_nic *efx,
unsigned int filter_idx;
spin_lock_bh(&efx->filter_lock);
- for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
- efx_farch_filter_remove(efx, table, filter_idx, priority);
+ for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
+ if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO)
+ efx_farch_filter_remove(efx, table,
+ filter_idx, priority);
+ }
spin_unlock_bh(&efx->filter_lock);
}
-void efx_farch_filter_clear_rx(struct efx_nic *efx,
+int efx_farch_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority)
{
efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
@@ -2654,6 +2647,7 @@ void efx_farch_filter_clear_rx(struct efx_nic *efx,
priority);
efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
priority);
+ return 0;
}
u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
@@ -2822,7 +2816,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
spec = &table->spec[i];
spec->type = EFX_FARCH_FILTER_UC_DEF + i;
- efx_farch_filter_init_rx_for_stack(efx, spec);
+ efx_farch_filter_init_rx_auto(efx, spec);
__set_bit(i, table->used_bitmap);
}
}
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 63c77a557178..3ef298d3c47e 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -59,12 +59,16 @@ enum efx_filter_match_flags {
/**
* enum efx_filter_priority - priority of a hardware filter specification
* @EFX_FILTER_PRI_HINT: Performance hint
+ * @EFX_FILTER_PRI_AUTO: Automatic filter based on device address list
+ * or hardware requirements. This may only be used by the filter
+ * implementation for each NIC type.
* @EFX_FILTER_PRI_MANUAL: Manually configured filter
* @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level
* networking and SR-IOV)
*/
enum efx_filter_priority {
EFX_FILTER_PRI_HINT = 0,
+ EFX_FILTER_PRI_AUTO,
EFX_FILTER_PRI_MANUAL,
EFX_FILTER_PRI_REQUIRED,
};
@@ -78,19 +82,18 @@ enum efx_filter_priority {
* according to the indirection table.
* @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
* queue.
- * @EFX_FILTER_FLAG_RX_STACK: Indicates a filter inserted for the
- * network stack. The filter must have a priority of
- * %EFX_FILTER_PRI_REQUIRED. It can be steered by a replacement
- * request with priority %EFX_FILTER_PRI_MANUAL, and a removal
- * request with priority %EFX_FILTER_PRI_MANUAL will reset the
- * steering (but not remove the filter).
+ * @EFX_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is
+ * overriding an automatic filter (priority
+ * %EFX_FILTER_PRI_AUTO). This may only be set by the filter
+ * implementation for each type. A removal request will restore
+ * the automatic filter in its place.
* @EFX_FILTER_FLAG_RX: Filter is for RX
* @EFX_FILTER_FLAG_TX: Filter is for TX
*/
enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01,
EFX_FILTER_FLAG_RX_SCATTER = 0x02,
- EFX_FILTER_FLAG_RX_STACK = 0x04,
+ EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
EFX_FILTER_FLAG_RX = 0x08,
EFX_FILTER_FLAG_TX = 0x10,
};
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 4b0bd8a1514d..eb59abb57e85 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -42,6 +42,7 @@ struct efx_mcdi_async_param {
unsigned int cmd;
size_t inlen;
size_t outlen;
+ bool quiet;
efx_mcdi_async_completer *complete;
unsigned long cookie;
/* followed by request/response buffer */
@@ -101,6 +102,10 @@ int efx_mcdi_init(struct efx_nic *efx)
netif_err(efx, probe, efx->net_dev,
"Host already registered with MCPU\n");
+ if (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
+ efx->primary = efx;
+
return 0;
}
@@ -191,6 +196,8 @@ static int efx_mcdi_errno(unsigned int mcdi_err)
TRANSLATE_ERROR(EALREADY);
TRANSLATE_ERROR(ENOSPC);
#undef TRANSLATE_ERROR
+ case MC_CMD_ERR_ENOTSUP:
+ return -EOPNOTSUPP;
case MC_CMD_ERR_ALLOC_FAIL:
return -ENOBUFS;
case MC_CMD_ERR_MAC_EXIST:
@@ -402,8 +409,9 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
{
struct efx_nic *efx = mcdi->efx;
struct efx_mcdi_async_param *async;
- size_t hdr_len, data_len;
+ size_t hdr_len, data_len, err_len;
efx_dword_t *outbuf;
+ MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
int rc;
if (cmpxchg(&mcdi->state,
@@ -444,6 +452,13 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
outbuf = (efx_dword_t *)(async + 1);
efx->type->mcdi_read_response(efx, outbuf, hdr_len,
min(async->outlen, data_len));
+ if (!timeout && rc && !async->quiet) {
+ err_len = min(sizeof(errbuf), data_len);
+ efx->type->mcdi_read_response(efx, errbuf, hdr_len,
+ sizeof(errbuf));
+ efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf,
+ err_len, rc);
+ }
async->complete(efx, async->cookie, rc, outbuf, data_len);
kfree(async);
@@ -519,18 +534,129 @@ efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
return 0;
}
+static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+ int rc;
+
+ if (mcdi->mode == MCDI_MODE_POLL)
+ rc = efx_mcdi_poll(efx);
+ else
+ rc = efx_mcdi_await_completion(efx);
+
+ if (rc != 0) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d mode %d timed out\n",
+ cmd, (int)inlen, mcdi->mode);
+
+ if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
+ netif_err(efx, hw, efx->net_dev,
+ "MCDI request was completed without an event\n");
+ rc = 0;
+ }
+
+ /* Close the race with efx_mcdi_ev_cpl() executing just too late
+ * and completing a request we've just cancelled, by ensuring
+ * that the seqno check therein fails.
+ */
+ spin_lock_bh(&mcdi->iface_lock);
+ ++mcdi->seqno;
+ ++mcdi->credits;
+ spin_unlock_bh(&mcdi->iface_lock);
+ }
+
+ if (rc != 0) {
+ if (outlen_actual)
+ *outlen_actual = 0;
+ } else {
+ size_t hdr_len, data_len, err_len;
+
+ /* At the very least we need a memory barrier here to ensure
+ * we pick up changes from efx_mcdi_ev_cpl(). Protect against
+ * a spurious efx_mcdi_ev_cpl() running concurrently by
+ * acquiring the iface_lock. */
+ spin_lock_bh(&mcdi->iface_lock);
+ rc = mcdi->resprc;
+ hdr_len = mcdi->resp_hdr_len;
+ data_len = mcdi->resp_data_len;
+ err_len = min(sizeof(errbuf), data_len);
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ BUG_ON(rc > 0);
+
+ efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+ min(outlen, data_len));
+ if (outlen_actual)
+ *outlen_actual = data_len;
+
+ efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len);
+
+ if (cmd == MC_CMD_REBOOT && rc == -EIO) {
+ /* Don't reset if MC_CMD_REBOOT returns EIO */
+ } else if (rc == -EIO || rc == -EINTR) {
+ netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
+ -rc);
+ efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ } else if (rc && !quiet) {
+ efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len,
+ rc);
+ }
+
+ if (rc == -EIO || rc == -EINTR) {
+ msleep(MCDI_STATUS_SLEEP_MS);
+ efx_mcdi_poll_reboot(efx);
+ mcdi->new_epoch = true;
+ }
+ }
+
+ efx_mcdi_release(mcdi);
+ return rc;
+}
+
+static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet)
+{
+ int rc;
+
+ rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+ if (rc) {
+ if (outlen_actual)
+ *outlen_actual = 0;
+ return rc;
+ }
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, quiet);
+}
+
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
- int rc;
+ return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, false);
+}
- rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
- if (rc)
- return rc;
- return efx_mcdi_rpc_finish(efx, cmd, inlen,
- outbuf, outlen, outlen_actual);
+/* Normally, on receiving an error code in the MCDI response,
+ * efx_mcdi_rpc will log an error message containing (among other
+ * things) the raw error code, by means of efx_mcdi_display_error.
+ * This _quiet version suppresses that; if the caller wishes to log
+ * the error conditionally on the return code, it should call this
+ * function and is then responsible for calling efx_mcdi_display_error
+ * as needed.
+ */
+int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, true);
}
int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
@@ -543,35 +669,19 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
if (rc)
return rc;
+ if (efx->mc_bist_for_other_fn)
+ return -ENETDOWN;
+
efx_mcdi_acquire_sync(mcdi);
efx_mcdi_send_request(efx, cmd, inbuf, inlen);
return 0;
}
-/**
- * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
- * @efx: NIC through which to issue the command
- * @cmd: Command type number
- * @inbuf: Command parameters
- * @inlen: Length of command parameters, in bytes
- * @outlen: Length to allocate for response buffer, in bytes
- * @complete: Function to be called on completion or cancellation.
- * @cookie: Arbitrary value to be passed to @complete.
- *
- * This function does not sleep and therefore may be called in atomic
- * context. It will fail if event queues are disabled or if MCDI
- * event completions have been disabled due to an error.
- *
- * If it succeeds, the @complete function will be called exactly once
- * in atomic context, when one of the following occurs:
- * (a) the completion event is received (in NAPI context)
- * (b) event queues are disabled (in the process that disables them)
- * (c) the request times-out (in timer context)
- */
-int
-efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
- const efx_dword_t *inbuf, size_t inlen, size_t outlen,
- efx_mcdi_async_completer *complete, unsigned long cookie)
+static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie, bool quiet)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
struct efx_mcdi_async_param *async;
@@ -581,6 +691,9 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
if (rc)
return rc;
+ if (efx->mc_bist_for_other_fn)
+ return -ENETDOWN;
+
async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
GFP_ATOMIC);
if (!async)
@@ -589,6 +702,7 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
async->cmd = cmd;
async->inlen = inlen;
async->outlen = outlen;
+ async->quiet = quiet;
async->complete = complete;
async->cookie = cookie;
memcpy(async + 1, inbuf, inlen);
@@ -617,79 +731,73 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
return rc;
}
+/**
+ * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
+ * @efx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes
+ * @outlen: Length to allocate for response buffer, in bytes
+ * @complete: Function to be called on completion or cancellation.
+ * @cookie: Arbitrary value to be passed to @complete.
+ *
+ * This function does not sleep and therefore may be called in atomic
+ * context. It will fail if event queues are disabled or if MCDI
+ * event completions have been disabled due to an error.
+ *
+ * If it succeeds, the @complete function will be called exactly once
+ * in atomic context, when one of the following occurs:
+ * (a) the completion event is received (in NAPI context)
+ * (b) event queues are disabled (in the process that disables them)
+ * (c) the request times-out (in timer context)
+ */
+int
+efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+ efx_mcdi_async_completer *complete, unsigned long cookie)
+{
+ return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
+ cookie, false);
+}
+
+int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen, efx_mcdi_async_completer *complete,
+ unsigned long cookie)
+{
+ return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
+ cookie, true);
+}
+
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
- struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- int rc;
-
- if (mcdi->mode == MCDI_MODE_POLL)
- rc = efx_mcdi_poll(efx);
- else
- rc = efx_mcdi_await_completion(efx);
-
- if (rc != 0) {
- netif_err(efx, hw, efx->net_dev,
- "MC command 0x%x inlen %d mode %d timed out\n",
- cmd, (int)inlen, mcdi->mode);
-
- if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
- netif_err(efx, hw, efx->net_dev,
- "MCDI request was completed without an event\n");
- rc = 0;
- }
-
- /* Close the race with efx_mcdi_ev_cpl() executing just too late
- * and completing a request we've just cancelled, by ensuring
- * that the seqno check therein fails.
- */
- spin_lock_bh(&mcdi->iface_lock);
- ++mcdi->seqno;
- ++mcdi->credits;
- spin_unlock_bh(&mcdi->iface_lock);
- }
-
- if (rc == 0) {
- size_t hdr_len, data_len;
-
- /* At the very least we need a memory barrier here to ensure
- * we pick up changes from efx_mcdi_ev_cpl(). Protect against
- * a spurious efx_mcdi_ev_cpl() running concurrently by
- * acquiring the iface_lock. */
- spin_lock_bh(&mcdi->iface_lock);
- rc = mcdi->resprc;
- hdr_len = mcdi->resp_hdr_len;
- data_len = mcdi->resp_data_len;
- spin_unlock_bh(&mcdi->iface_lock);
-
- BUG_ON(rc > 0);
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, false);
+}
- if (rc == 0) {
- efx->type->mcdi_read_response(efx, outbuf, hdr_len,
- min(outlen, data_len));
- if (outlen_actual != NULL)
- *outlen_actual = data_len;
- } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
- ; /* Don't reset if MC_CMD_REBOOT returns EIO */
- else if (rc == -EIO || rc == -EINTR) {
- netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
- -rc);
- efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
- } else
- netif_dbg(efx, hw, efx->net_dev,
- "MC command 0x%x inlen %d failed rc=%d\n",
- cmd, (int)inlen, -rc);
+int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, true);
+}
- if (rc == -EIO || rc == -EINTR) {
- msleep(MCDI_STATUS_SLEEP_MS);
- efx_mcdi_poll_reboot(efx);
- mcdi->new_epoch = true;
- }
- }
+void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, int rc)
+{
+ int code = 0, err_arg = 0;
- efx_mcdi_release(mcdi);
- return rc;
+ if (outlen >= MC_CMD_ERR_CODE_OFST + 4)
+ code = MCDI_DWORD(outbuf, ERR_CODE);
+ if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
+ err_arg = MCDI_DWORD(outbuf, ERR_ARG);
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d failed rc=%d (raw=%d) arg=%d\n",
+ cmd, (int)inlen, rc, code, err_arg);
}
/* Switch to polled MCDI completions. This can be called in various
@@ -834,6 +942,30 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
spin_unlock(&mcdi->iface_lock);
}
+/* The MC is going down in to BIST mode. set the BIST flag to block
+ * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset
+ * (which doesn't actually execute a reset, it waits for the controlling
+ * function to reset it).
+ */
+static void efx_mcdi_ev_bist(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ spin_lock(&mcdi->iface_lock);
+ efx->mc_bist_for_other_fn = true;
+ if (efx_mcdi_complete_sync(mcdi)) {
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ mcdi->resprc = -EIO;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
+ ++mcdi->credits;
+ }
+ }
+ mcdi->new_epoch = true;
+ efx_schedule_reset(efx, RESET_TYPE_MC_BIST);
+ spin_unlock(&mcdi->iface_lock);
+}
+
/* Called from falcon_process_eventq for MCDI events */
void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event)
@@ -867,14 +999,18 @@ void efx_mcdi_process_event(struct efx_channel *channel,
efx_mcdi_sensor_event(efx, event);
break;
case MCDI_EVENT_CODE_SCHEDERR:
- netif_info(efx, hw, efx->net_dev,
- "MC Scheduler error address=0x%x\n", data);
+ netif_dbg(efx, hw, efx->net_dev,
+ "MC Scheduler alert (0x%x)\n", data);
break;
case MCDI_EVENT_CODE_REBOOT:
case MCDI_EVENT_CODE_MC_REBOOT:
netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
efx_mcdi_ev_death(efx, -EIO);
break;
+ case MCDI_EVENT_CODE_MC_BIST:
+ netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n");
+ efx_mcdi_ev_bist(efx);
+ break;
case MCDI_EVENT_CODE_MAC_STATS_DMA:
/* MAC stats are gather lazily. We can ignore this. */
break;
@@ -886,6 +1022,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
case MCDI_EVENT_CODE_PTP_PPS:
efx_ptp_event(efx, event);
break;
+ case MCDI_EVENT_CODE_PTP_TIME:
+ efx_time_sync_event(channel, event);
+ break;
case MCDI_EVENT_CODE_TX_FLUSH:
case MCDI_EVENT_CODE_RX_FLUSH:
/* Two flush events will be sent: one to the same event
@@ -1000,13 +1139,27 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
goto fail;
}
+ if (driver_operating) {
+ if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) {
+ efx->mcdi->fn_flags =
+ MCDI_DWORD(outbuf,
+ DRV_ATTACH_EXT_OUT_FUNC_FLAGS);
+ } else {
+ /* Synthesise flags for Siena */
+ efx->mcdi->fn_flags =
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED |
+ (efx_port_num(efx) == 0) <<
+ MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY;
+ }
+ }
+
/* We currently assume we have control of the external link
* and are completely trusted by firmware. Abort probing
* if that's not true for this function.
*/
if (driver_operating &&
- outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN &&
- (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) &
+ (efx->mcdi->fn_flags &
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
@@ -1097,13 +1250,6 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1220,7 +1366,7 @@ fail1:
static int efx_mcdi_read_assertion(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
unsigned int flags, index;
const char *reason;
size_t outlen;
@@ -1235,13 +1381,17 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
retry = 2;
do {
MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
- inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
- outbuf, sizeof(outbuf), &outlen);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
+ inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
+ outbuf, sizeof(outbuf), &outlen);
} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
- if (rc)
+ if (rc) {
+ efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS,
+ MC_CMD_GET_ASSERTS_IN_LEN, outbuf,
+ outlen, rc);
return rc;
+ }
if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
return -EIO;
@@ -1319,17 +1469,18 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
}
-static int efx_mcdi_reset_port(struct efx_nic *efx)
+static int efx_mcdi_reset_func(struct efx_nic *efx)
{
- int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
- if (rc)
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN);
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0);
+ MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG,
+ ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
+ rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
return rc;
}
@@ -1347,7 +1498,6 @@ static int efx_mcdi_reset_mc(struct efx_nic *efx)
return 0;
if (rc == 0)
rc = -EIO;
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1368,7 +1518,7 @@ int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
if (method == RESET_TYPE_WORLD)
return efx_mcdi_reset_mc(efx);
else
- return efx_mcdi_reset_port(efx);
+ return efx_mcdi_reset_func(efx);
}
static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
@@ -1449,13 +1599,6 @@ int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1496,13 +1639,6 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
int rc;
rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1532,13 +1668,6 @@ static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1558,14 +1687,10 @@ static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
+ return rc;
memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
}
static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
@@ -1585,13 +1710,6 @@ static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1609,13 +1727,6 @@ static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1630,13 +1741,6 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 15816cacb548..52931aebf3c3 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -94,12 +94,14 @@ struct efx_mcdi_mtd_partition {
* struct efx_mcdi_data - extra state for NICs that implement MCDI
* @iface: Interface/protocol state
* @hwmon: Hardware monitor state
+ * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
*/
struct efx_mcdi_data {
struct efx_mcdi_iface iface;
#ifdef CONFIG_SFC_MCDI_MON
struct efx_mcdi_mon hwmon;
#endif
+ u32 fn_flags;
};
#ifdef CONFIG_SFC_MCDI_MON
@@ -116,12 +118,19 @@ void efx_mcdi_fini(struct efx_nic *efx);
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
size_t inlen, efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
+int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual);
int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen);
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
+int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, size_t *outlen_actual);
typedef void efx_mcdi_async_completer(struct efx_nic *efx,
unsigned long cookie, int rc,
@@ -131,6 +140,15 @@ int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
const efx_dword_t *inbuf, size_t inlen, size_t outlen,
efx_mcdi_async_completer *complete,
unsigned long cookie);
+int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie);
+
+void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, int rc);
int efx_mcdi_poll_reboot(struct efx_nic *efx);
void efx_mcdi_mode_poll(struct efx_nic *efx);
@@ -147,6 +165,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
*/
#define MCDI_DECLARE_BUF(_name, _len) \
efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
+#define MCDI_DECLARE_BUF_OUT_OR_ERR(_name, _len) \
+ MCDI_DECLARE_BUF(_name, max_t(size_t, _len, 8))
#define _MCDI_PTR(_buf, _offset) \
((u8 *)(_buf) + (_offset))
#define MCDI_PTR(_buf, _field) \
@@ -301,6 +321,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx);
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
+void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index d72ad4fc3617..bc27d5b580f5 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -24,6 +24,15 @@ enum efx_hwmon_type {
EFX_HWMON_IN, /* voltage */
EFX_HWMON_CURR, /* current */
EFX_HWMON_POWER, /* power */
+ EFX_HWMON_TYPES_COUNT
+};
+
+static const char *const efx_hwmon_unit[EFX_HWMON_TYPES_COUNT] = {
+ [EFX_HWMON_TEMP] = " degC",
+ [EFX_HWMON_COOL] = " rpm", /* though nonsense for a heatsink */
+ [EFX_HWMON_IN] = " mV",
+ [EFX_HWMON_CURR] = " mA",
+ [EFX_HWMON_POWER] = " W",
};
static const struct {
@@ -33,13 +42,13 @@ static const struct {
} efx_mcdi_sensor_type[] = {
#define SENSOR(name, label, hwmon_type, port) \
[MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
- SENSOR(CONTROLLER_TEMP, "Controller ext. temp.", TEMP, -1),
+ SENSOR(CONTROLLER_TEMP, "Controller board temp.", TEMP, -1),
SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1),
- SENSOR(CONTROLLER_COOLING, "Controller cooling", COOL, -1),
+ SENSOR(CONTROLLER_COOLING, "Controller heat sink", COOL, -1),
SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0),
- SENSOR(PHY0_COOLING, "PHY cooling", COOL, 0),
+ SENSOR(PHY0_COOLING, "PHY heat sink", COOL, 0),
SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1),
- SENSOR(PHY1_COOLING, "PHY cooling", COOL, 1),
+ SENSOR(PHY1_COOLING, "PHY heat sink", COOL, 1),
SENSOR(IN_1V0, "1.0V supply", IN, -1),
SENSOR(IN_1V2, "1.2V supply", IN, -1),
SENSOR(IN_1V8, "1.8V supply", IN, -1),
@@ -47,36 +56,42 @@ static const struct {
SENSOR(IN_3V3, "3.3V supply", IN, -1),
SENSOR(IN_12V0, "12.0V supply", IN, -1),
SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1),
- SENSOR(IN_VREF, "ref. voltage", IN, -1),
- SENSOR(OUT_VAOE, "AOE power supply", IN, -1),
- SENSOR(AOE_TEMP, "AOE temp.", TEMP, -1),
- SENSOR(PSU_AOE_TEMP, "AOE PSU temp.", TEMP, -1),
- SENSOR(PSU_TEMP, "Controller PSU temp.", TEMP, -1),
- SENSOR(FAN_0, NULL, COOL, -1),
- SENSOR(FAN_1, NULL, COOL, -1),
- SENSOR(FAN_2, NULL, COOL, -1),
- SENSOR(FAN_3, NULL, COOL, -1),
- SENSOR(FAN_4, NULL, COOL, -1),
+ SENSOR(IN_VREF, "Ref. voltage", IN, -1),
+ SENSOR(OUT_VAOE, "AOE FPGA supply", IN, -1),
+ SENSOR(AOE_TEMP, "AOE FPGA temp.", TEMP, -1),
+ SENSOR(PSU_AOE_TEMP, "AOE regulator temp.", TEMP, -1),
+ SENSOR(PSU_TEMP, "Controller regulator temp.",
+ TEMP, -1),
+ SENSOR(FAN_0, "Fan 0", COOL, -1),
+ SENSOR(FAN_1, "Fan 1", COOL, -1),
+ SENSOR(FAN_2, "Fan 2", COOL, -1),
+ SENSOR(FAN_3, "Fan 3", COOL, -1),
+ SENSOR(FAN_4, "Fan 4", COOL, -1),
SENSOR(IN_VAOE, "AOE input supply", IN, -1),
SENSOR(OUT_IAOE, "AOE output current", CURR, -1),
SENSOR(IN_IAOE, "AOE input current", CURR, -1),
SENSOR(NIC_POWER, "Board power use", POWER, -1),
SENSOR(IN_0V9, "0.9V supply", IN, -1),
- SENSOR(IN_I0V9, "0.9V input current", CURR, -1),
- SENSOR(IN_I1V2, "1.2V input current", CURR, -1),
- SENSOR(IN_0V9_ADC, "0.9V supply (at ADC)", IN, -1),
- SENSOR(CONTROLLER_2_TEMP, "Controller ext. temp. 2", TEMP, -1),
- SENSOR(VREG_INTERNAL_TEMP, "Voltage regulator temp.", TEMP, -1),
+ SENSOR(IN_I0V9, "0.9V supply current", CURR, -1),
+ SENSOR(IN_I1V2, "1.2V supply current", CURR, -1),
+ SENSOR(IN_0V9_ADC, "0.9V supply (ext. ADC)", IN, -1),
+ SENSOR(CONTROLLER_2_TEMP, "Controller board temp. 2", TEMP, -1),
+ SENSOR(VREG_INTERNAL_TEMP, "Regulator die temp.", TEMP, -1),
SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1),
SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1),
- SENSOR(CONTROLLER_VPTAT, "Controller int. temp. raw", IN, -1),
- SENSOR(CONTROLLER_INTERNAL_TEMP, "Controller int. temp.", TEMP, -1),
+ SENSOR(CONTROLLER_VPTAT,
+ "Controller PTAT voltage (int. ADC)", IN, -1),
+ SENSOR(CONTROLLER_INTERNAL_TEMP,
+ "Controller die temp. (int. ADC)", TEMP, -1),
SENSOR(CONTROLLER_VPTAT_EXTADC,
- "Controller int. temp. raw (at ADC)", IN, -1),
+ "Controller PTAT voltage (ext. ADC)", IN, -1),
SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
- "Controller int. temp. (via ADC)", TEMP, -1),
+ "Controller die temp. (ext. ADC)", TEMP, -1),
SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1),
SENSOR(AIRFLOW, "Air flow raw", IN, -1),
+ SENSOR(VDD08D_VSS08D_CSR, "0.9V die (int. ADC)", IN, -1),
+ SENSOR(VDD08D_VSS08D_CSR_EXTADC, "0.9V die (ext. ADC)", IN, -1),
+ SENSOR(HOTPOINT_TEMP, "Controller board temp. (hotpoint)", TEMP, -1),
#undef SENSOR
};
@@ -91,7 +106,8 @@ static const char *const sensor_status_names[] = {
void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
{
unsigned int type, state, value;
- const char *name = NULL, *state_txt;
+ enum efx_hwmon_type hwmon_type = EFX_HWMON_UNKNOWN;
+ const char *name = NULL, *state_txt, *unit;
type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
@@ -99,16 +115,22 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
/* Deal gracefully with the board having more drivers than we
* know about, but do not expect new sensor states. */
- if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
name = efx_mcdi_sensor_type[type].label;
+ hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+ }
if (!name)
name = "No sensor name available";
EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
state_txt = sensor_status_names[state];
+ EFX_BUG_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT);
+ unit = efx_hwmon_unit[hwmon_type];
+ if (!unit)
+ unit = "";
netif_err(efx, hw, efx->net_dev,
- "Sensor %d (%s) reports condition '%s' for raw value %d\n",
- type, name, state_txt, value);
+ "Sensor %d (%s) reports condition '%s' for value %d%s\n",
+ type, name, state_txt, value, unit);
}
#ifdef CONFIG_SFC_MCDI_MON
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index e0a63ddb7a6c..a707fb5ef14c 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -224,6 +224,8 @@
#define MC_CMD_ERR_MAC_EXIST 0x1009
/* Slave core not present */
#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
#define MC_CMD_ERR_CODE_OFST 0
@@ -390,6 +392,8 @@
* AOE_ERR_DATA)
*/
#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
+/* enum: DDR ECC status update */
+#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
@@ -462,6 +466,10 @@
#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
/* enum: the MC has detected an uncorrectable error */
#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: The MC has entered offline BIST mode */
+#define MCDI_EVENT_CODE_MC_BIST 0x19
+/* enum: PTP tick event providing current NIC time */
+#define MCDI_EVENT_CODE_PTP_TIME 0x1a
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
@@ -481,15 +489,32 @@
#define MCDI_EVENT_TX_ERR_DATA_OFST 0
#define MCDI_EVENT_TX_ERR_DATA_LBN 0
#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
-/* Seconds field of timestamp */
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
+ * timestamp
+ */
#define MCDI_EVENT_PTP_SECONDS_OFST 0
#define MCDI_EVENT_PTP_SECONDS_LBN 0
#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
-/* Nanoseconds field of timestamp */
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_MAJOR_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
+ * of timestamp
+ */
#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
-/* Lowest four bytes of sourceUUID from PTP packet */
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MINOR_OFST 0
+#define MCDI_EVENT_PTP_MINOR_LBN 0
+#define MCDI_EVENT_PTP_MINOR_WIDTH 32
+/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
+ */
#define MCDI_EVENT_PTP_UUID_OFST 0
#define MCDI_EVENT_PTP_UUID_LBN 0
#define MCDI_EVENT_PTP_UUID_WIDTH 32
@@ -505,6 +530,13 @@
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+/* For CODE_PTP_TIME events, the major value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
+/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
/* FCDI_EVENT structuredef */
#define FCDI_EVENT_LEN 8
@@ -545,8 +577,10 @@
#define FCDI_EVENT_CODE_TIMED_READ 0x5
/* enum: One or more PPS IN events */
#define FCDI_EVENT_CODE_PPS_IN 0x6
-/* enum: One or more PPS OUT events */
-#define FCDI_EVENT_CODE_PPS_OUT 0x7
+/* enum: Tick event from PTP clock */
+#define FCDI_EVENT_CODE_PTP_TICK 0x7
+/* enum: ECC error counters */
+#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
@@ -560,14 +594,21 @@
#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
-#define FCDI_EVENT_PPS_COUNT_OFST 0
-#define FCDI_EVENT_PPS_COUNT_LBN 0
-#define FCDI_EVENT_PPS_COUNT_WIDTH 32
-
-/* FCDI_EXTENDED_EVENT structuredef */
-#define FCDI_EXTENDED_EVENT_LENMIN 16
-#define FCDI_EXTENDED_EVENT_LENMAX 248
-#define FCDI_EXTENDED_EVENT_LEN(num) (8+8*(num))
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
+ * to the MC. Note that this structure | is overlayed over a normal FCDI event
+ * such that bits 32-63 containing | event code, level, source etc remain the
+ * same. In this case the data | field of the header is defined to be the
+ * number of timestamps
+ */
+#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
+#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
+#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
/* Number of timestamps following */
#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
@@ -581,14 +622,14 @@
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
/* Timestamp records comprising the event */
-#define FCDI_EXTENDED_EVENT_PPS_TIME_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIME_LEN 8
-#define FCDI_EXTENDED_EVENT_PPS_TIME_LO_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIME_HI_OFST 12
-#define FCDI_EXTENDED_EVENT_PPS_TIME_MINNUM 1
-#define FCDI_EXTENDED_EVENT_PPS_TIME_MAXNUM 30
-#define FCDI_EXTENDED_EVENT_PPS_TIME_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_TIME_WIDTH 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
/***********************************/
@@ -642,6 +683,10 @@
#define MC_CMD_COPYCODE_IN_LEN 16
/* Source address */
#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: The main image should be entered via a copy of a single word from and
+ * to this address when none of the other magic behaviours are required.
+ */
+#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
/* enum: Entering the main image via a copy of a single word from and to this
* address indicates that it should not attempt to start the datapath CPUs.
* This is useful for certain soft rebooting scenarios. (Huntington only)
@@ -872,8 +917,28 @@
#define MC_CMD_PTP_OP_RST_CLK 0x14
/* enum: Enable the forwarding of PPS events to the host */
#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Get the time format used by this NIC for PTP operations */
+#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16
+/* enum: Get the clock attributes. NOTE- extended version of
+ * MC_CMD_PTP_OP_GET_TIME_FORMAT
+ */
+#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16
+/* enum: Get corrections that should be applied to the various different
+ * timestamps
+ */
+#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17
+/* enum: Subscribe to receive periodic time events indicating the current NIC
+ * time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18
+/* enum: Unsubscribe to stop receiving time events */
+#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
+/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
+ * input on the same NIC.
+ */
+#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
/* enum: Above this for future use. */
-#define MC_CMD_PTP_OP_MAX 0x16
+#define MC_CMD_PTP_OP_MAX 0x1b
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
@@ -938,8 +1003,12 @@
#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
@@ -1005,8 +1074,12 @@
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
@@ -1078,9 +1151,51 @@
#define MC_CMD_PTP_ENABLE_PPS 0x0
/* enum: Disable */
#define MC_CMD_PTP_DISABLE_PPS 0x1
-/* Queueid to send events back */
+/* Queue id to send events back */
#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
+#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
+#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
+#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Event queue to send PTP time events to */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+
+/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Unsubscribe options */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+/* enum: Unsubscribe a single queue */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
+/* enum: Unsubscribe all queues */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+
+/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable PPS test mode, 0 to disable and return result. */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -1088,15 +1203,29 @@
#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0
/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
/* MC_CMD_PTP_OUT_STATUS msgresponse */
#define MC_CMD_PTP_OUT_STATUS_LEN 64
@@ -1116,21 +1245,21 @@
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
/* Number of PPS bad periods */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
-/* Minimum period of PPS pulse */
+/* Minimum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
-/* Maximum period of PPS pulse */
+/* Maximum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
-/* Last period of PPS pulse */
+/* Last period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
-/* Mean period of PPS pulse */
+/* Mean period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
-/* Minimum offset of PPS pulse (signed) */
+/* Minimum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
-/* Maximum offset of PPS pulse (signed) */
+/* Maximum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
-/* Last offset of PPS pulse (signed) */
+/* Last offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
-/* Mean offset of PPS pulse (signed) */
+/* Mean offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
@@ -1146,8 +1275,12 @@
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
/* Host time immediately after NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
/* Number of nanoseconds waited after reading NIC's hardware clock */
@@ -1177,6 +1310,16 @@
#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
/* enum: Timestamp trigger GPIO not working */
#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* enum: Insufficient PPS events to perform checks */
+#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa
+/* enum: PPS time event period not sufficiently close to 1s. */
+#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb
+/* enum: PPS time event nS reading not sufficiently close to zero. */
+#define MC_CMD_PTP_MANF_PPS_NS 0xc
+/* enum: PTP peripheral registers incorrect */
+#define MC_CMD_PTP_MANF_REGISTERS 0xd
+/* enum: Failed to read time from PTP peripheral */
+#define MC_CMD_PTP_MANF_CLOCK_READ 0xe
/* Presence of external oscillator */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
@@ -1198,6 +1341,62 @@
#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
+
+/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* Minimum acceptable value for a corrected synchronization timeset. When
+ * comparing host and NIC clock times, the MC returns a set of samples that
+ * contain the host start and end time, the MC time when the host start was
+ * detected and the time the MC waited between reading the time and detecting
+ * the host end. The corrected sync window is the difference between the host
+ * end and start times minus the time that the MC waited for host end.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
+/* Uncorrected error on transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+/* Uncorrected error on receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+
+/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
+
/***********************************/
/* MC_CMD_CSR_READ32
@@ -1923,6 +2122,8 @@
#define MC_CMD_MEDIA_SFP_PLUS 0x5
/* enum: 10GBaseT. */
#define MC_CMD_MEDIA_BASE_T 0x6
+/* enum: QSFP+. */
+#define MC_CMD_MEDIA_QSFP_PLUS 0x7
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
/* enum: Native clause 22 */
#define MC_CMD_MMD_CLAUSE22 0x0
@@ -2223,6 +2424,8 @@
#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
/* enum: KR Serdes Serial Wireside. */
#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+/* enum: Near side of AOE Siena side port */
+#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
@@ -2286,6 +2489,10 @@
#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
/* enum: Flow control is off. */
@@ -3175,7 +3382,7 @@
#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
/* MC_CMD_SENSOR_INFO_OUT msgresponse */
-#define MC_CMD_SENSOR_INFO_OUT_LENMIN 12
+#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4
#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
@@ -3269,16 +3476,18 @@
#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+/* enum: Hotpoint temperature: degC */
+#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
-#define MC_CMD_SENSOR_ENTRY_MINNUM 1
+#define MC_CMD_SENSOR_ENTRY_MINNUM 0
#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
-#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 12
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4
#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
@@ -3291,7 +3500,7 @@
/* MC_CMD_SENSOR_ENTRY_LEN 8 */
/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
-/* MC_CMD_SENSOR_ENTRY_MINNUM 1 */
+/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */
/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
@@ -3864,6 +4073,18 @@
#define NVRAM_PARTITION_TYPE_ID_LBN 0
#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
+/* LICENSED_APP_ID structuredef */
+#define LICENSED_APP_ID_LEN 4
+#define LICENSED_APP_ID_ID_OFST 0
+/* enum: OpenOnload */
+#define LICENSED_APP_ID_ONLOAD 0x1
+/* enum: PTP timestamping */
+#define LICENSED_APP_ID_PTP 0x2
+/* enum: SolarCapture Pro */
+#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+#define LICENSED_APP_ID_ID_LBN 0
+#define LICENSED_APP_ID_ID_WIDTH 32
+
/***********************************/
/* MC_CMD_READ_REGS
@@ -4021,6 +4242,8 @@
#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -4179,6 +4402,9 @@
#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
+/* MC_CMD_PROXY_CMD_OUT msgresponse */
+#define MC_CMD_PROXY_CMD_OUT_LEN 0
+
/***********************************/
/* MC_CMD_ALLOC_BUFTBL_CHUNK
@@ -4213,7 +4439,7 @@
/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 252
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
/* ID */
@@ -4226,7 +4452,7 @@
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 30
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
@@ -6800,6 +7026,30 @@
/***********************************/
+/* MC_CMD_CAP_BLK_READ
+ * Read multiple 64bit words from capture block memory
+ */
+#define MC_CMD_CAP_BLK_READ 0xe7
+
+/* MC_CMD_CAP_BLK_READ_IN msgrequest */
+#define MC_CMD_CAP_BLK_READ_IN_LEN 12
+#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+
+/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
+#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
+#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
+#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
+
+
+/***********************************/
/* MC_CMD_DUMP_DO
* Take a dump of the DUT state
*/
@@ -6826,6 +7076,10 @@
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+/* enum: The uart port this command was received over (if using a uart
+ * transport)
+ */
+#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
@@ -6942,39 +7196,68 @@
/***********************************/
-/* MC_CMD_START_KR_EYE_PLOT
- * Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
- * signal.
- */
-#define MC_CMD_START_KR_EYE_PLOT 0xee
-
-/* MC_CMD_START_KR_EYE_PLOT_IN msgrequest */
-#define MC_CMD_START_KR_EYE_PLOT_IN_LEN 4
-#define MC_CMD_START_KR_EYE_PLOT_IN_LANE_OFST 0
-
-/* MC_CMD_START_KR_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_START_KR_EYE_PLOT_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_POLL_KR_EYE_PLOT
- * Poll KR Serdes Eye diagram plot. Returns one row of BER data. The caller
- * should call this command repeatedly after starting eye plot, until no more
- * data is returned.
- */
-#define MC_CMD_POLL_KR_EYE_PLOT 0xef
-
-/* MC_CMD_POLL_KR_EYE_PLOT_IN msgrequest */
-#define MC_CMD_POLL_KR_EYE_PLOT_IN_LEN 0
-
-/* MC_CMD_POLL_KR_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMIN 0
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMAX 252
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LEN(num) (0+2*(num))
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_OFST 0
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_LEN 2
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MINNUM 0
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+/* MC_CMD_UART_SEND_DATA
+ * Send checksummed[sic] block of data over the uart. Response is a placeholder
+ * should we wish to make this reliable; currently requests are fire-and-
+ * forget.
+ */
+#define MC_CMD_UART_SEND_DATA 0xee
+
+/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
+#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
+#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
+#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
+/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
+#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
+
+/* MC_CMD_UART_SEND_DATA_IN msgresponse */
+#define MC_CMD_UART_SEND_DATA_IN_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_RECV_DATA
+ * Request checksummed[sic] block of data over the uart. Only a placeholder,
+ * subject to change and not currently implemented.
+ */
+#define MC_CMD_UART_RECV_DATA 0xef
+
+/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
+#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
+/* CRC32 over OFFSET, LENGTH, RESERVED */
+#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+/* Offset from which to read the data */
+#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+
+/* MC_CMD_UART_RECV_DATA_IN msgresponse */
+#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
+#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252
+#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
+/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
+#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
+#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
/***********************************/
@@ -7026,6 +7309,15 @@
#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
/* enum: Force KR Serdes reset / recalibration */
#define MC_CMD_KR_TUNE_IN_RECAL 0x4
+/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
+ * signal.
+ */
+#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
/* Align the arguments to 32 bits */
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
@@ -7123,6 +7415,91 @@
/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
+/* MC_CMD_KR_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TX Amplitude */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
+/* enum: De-Emphasis Tap1 Magnitude (0-7) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
+/* enum: De-Emphasis Tap1 Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
+/* enum: De-Emphasis Tap2 Magnitude (0-6) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
+/* enum: De-Emphasis Tap2 Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
+/* enum: Pre-Emphasis Magnitude */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
+/* enum: Pre-Emphasis Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
+/* enum: TX Slew Rate Coarse control */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
+/* enum: TX Slew Rate Fine control */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_SET_OUT_LEN 0
+
/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4
/* Requested operation */
@@ -7135,6 +7512,37 @@
/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
/***********************************/
/* MC_CMD_PCIE_TUNE
@@ -7157,6 +7565,13 @@
#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
/* enum: Override TX Driver settings */
#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
+/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
+#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
/* Align the arguments to 32 bits */
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
@@ -7258,6 +7673,37 @@
#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
/***********************************/
/* MC_CMD_LICENSING
@@ -7310,5 +7756,152 @@
*/
#define MC_CMD_MC2MC_PROXY 0xf4
+/* MC_CMD_MC2MC_PROXY_IN msgrequest */
+#define MC_CMD_MC2MC_PROXY_IN_LEN 0
+
+/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
+#define MC_CMD_MC2MC_PROXY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
+ * or a reboot of the MC.)
+ */
+#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+
+/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
+/* application ID to query (LICENSED_APP_ID_xxx) */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+
+/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_LICENSED_APP_OP
+ * Perform an action for an individual licensed application.
+ */
+#define MC_CMD_LICENSED_APP_OP 0xf6
+
+/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
+#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+/* enum: validate application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+/* arguments specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
+
+/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
+/* result specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+/* validation challenge */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
+/* feature expiry (time_t) */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+/* validation response */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_SNIFF_CONFIG
+ * Configure port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic delivered to the host (non-promiscuous
+ * mode) or all traffic arriving at the port (promiscuous mode) may be
+ * delivered to a specific queue, or a set of queues with RSS.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_SNIFF_CONFIG
+ * Obtain the current port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 7b6be61d549f..91d23252f8fa 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -90,13 +90,6 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -143,17 +136,13 @@ static int efx_mcdi_mdio_read(struct net_device *net_dev,
rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
+ return rc;
if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
MC_CMD_MDIO_STATUS_GOOD)
return -EIO;
return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
}
static int efx_mcdi_mdio_write(struct net_device *net_dev,
@@ -174,17 +163,13 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
+ return rc;
if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
MC_CMD_MDIO_STATUS_GOOD)
return -EIO;
return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
}
static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
@@ -487,17 +472,14 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx)
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
+ if (rc)
efx->link_state.up = false;
- } else {
+ else
efx_mcdi_phy_decode_link(
efx, &efx->link_state,
MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
- }
return !efx_link_state_equal(&efx->link_state, &old_state);
}
@@ -531,11 +513,8 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
+ if (rc)
return;
- }
ecmd->lp_advertising =
mcdi_to_ethtool_cap(phy_cfg->media,
MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
@@ -918,21 +897,29 @@ bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), &outlength);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
+ if (rc)
return true;
- }
return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
}
-static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
- u32 dma_len, int enable, int clear)
+enum efx_stats_action {
+ EFX_STATS_ENABLE,
+ EFX_STATS_DISABLE,
+ EFX_STATS_PULL,
+};
+
+static int efx_mcdi_mac_stats(struct efx_nic *efx,
+ enum efx_stats_action action, int clear)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
int rc;
- int period = enable ? 1000 : 0;
+ int change = action == EFX_STATS_PULL ? 0 : 1;
+ int enable = action == EFX_STATS_ENABLE ? 1 : 0;
+ int period = action == EFX_STATS_ENABLE ? 1000 : 0;
+ dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
+ u32 dma_len = action != EFX_STATS_DISABLE ?
+ MC_CMD_MAC_NSTATS * sizeof(u64) : 0;
BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
@@ -940,8 +927,8 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
MAC_STATS_IN_DMA, !!enable,
MAC_STATS_IN_CLEAR, clear,
- MAC_STATS_IN_PERIODIC_CHANGE, 1,
- MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
+ MAC_STATS_IN_PERIODIC_CHANGE, change,
+ MAC_STATS_IN_PERIODIC_ENABLE, enable,
MAC_STATS_IN_PERIODIC_CLEAR, 0,
MAC_STATS_IN_PERIODIC_NOEVENT, 1,
MAC_STATS_IN_PERIOD_MS, period);
@@ -949,14 +936,6 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
- __func__, enable ? "enable" : "disable", rc);
return rc;
}
@@ -966,13 +945,29 @@ void efx_mcdi_mac_start_stats(struct efx_nic *efx)
dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
- MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
+ efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
}
void efx_mcdi_mac_stop_stats(struct efx_nic *efx)
{
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
+ efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0);
+}
+
+#define EFX_MAC_STATS_WAIT_US 100
+#define EFX_MAC_STATS_WAIT_ATTEMPTS 10
+
+void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
+{
+ __le64 *dma_stats = efx->stats_buffer.addr;
+ int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
+
+ dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+ efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
+
+ while (dma_stats[MC_CMD_MAC_GENERATION_END] ==
+ EFX_MC_STATS_GENERATION_INVALID &&
+ attempts-- != 0)
+ udelay(EFX_MAC_STATS_WAIT_US);
}
int efx_mcdi_port_probe(struct efx_nic *efx)
@@ -1003,7 +998,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
efx->stats_buffer.addr,
(u64)virt_to_phys(efx->stats_buffer.addr));
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
+ efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 1);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 542a0d252ae0..af2b8c59a903 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -91,6 +91,7 @@
/* Forward declare Precision Time Protocol (PTP) support structure. */
struct efx_ptp_data;
+struct hwtstamp_config;
struct efx_self_tests;
@@ -287,12 +288,9 @@ struct efx_rx_buffer {
* Used to facilitate sharing dma mappings between recycled rx buffers
* and those passed up to the kernel.
*
- * @refcnt: Number of struct efx_rx_buffer's referencing this page.
- * When refcnt falls to zero, the page is unmapped for dma
* @dma_addr: The dma address of this page.
*/
struct efx_rx_page_state {
- unsigned refcnt;
dma_addr_t dma_addr;
unsigned int __pad[0] ____cacheline_aligned;
@@ -362,10 +360,11 @@ struct efx_rx_queue {
unsigned int slow_fill_count;
};
-enum efx_rx_alloc_method {
- RX_ALLOC_METHOD_AUTO = 0,
- RX_ALLOC_METHOD_SKB = 1,
- RX_ALLOC_METHOD_PAGE = 2,
+enum efx_sync_events_state {
+ SYNC_EVENTS_DISABLED = 0,
+ SYNC_EVENTS_QUIESCENT,
+ SYNC_EVENTS_REQUESTED,
+ SYNC_EVENTS_VALID,
};
/**
@@ -407,6 +406,9 @@ enum efx_rx_alloc_method {
* by __efx_rx_packet(), if @rx_pkt_n_frags != 0
* @rx_queue: RX queue for this channel
* @tx_queue: TX queues for this channel
+ * @sync_events_state: Current state of sync events on this channel
+ * @sync_timestamp_major: Major part of the last ptp sync event
+ * @sync_timestamp_minor: Minor part of the last ptp sync event
*/
struct efx_channel {
struct efx_nic *efx;
@@ -445,6 +447,10 @@ struct efx_channel {
struct efx_rx_queue rx_queue;
struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
+
+ enum efx_sync_events_state sync_events_state;
+ u32 sync_timestamp_major;
+ u32 sync_timestamp_minor;
};
/**
@@ -520,15 +526,6 @@ enum nic_state {
STATE_RECOVERY = 3, /* device recovering from PCI error */
};
-/*
- * Alignment of the skb->head which wraps a page-allocated RX buffer
- *
- * The skb allocated to wrap an rx_buffer can have this alignment. Since
- * the data is memcpy'd from the rx_buf, it does not need to be equal to
- * NET_IP_ALIGN.
- */
-#define EFX_PAGE_SKB_ALIGN 2
-
/* Forward declaration */
struct efx_nic;
@@ -651,6 +648,13 @@ struct vfdi_status;
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
* @pci_dev: The PCI device
+ * @node: List node for maintaning primary/secondary function lists
+ * @primary: &struct efx_nic instance for the primary function of this
+ * controller. May be the same structure, and may be %NULL if no
+ * primary function is bound. Serialised by rtnl_lock.
+ * @secondary_list: List of &struct efx_nic instances for the secondary PCI
+ * functions of the controller, if this is for the primary function.
+ * Serialised by rtnl_lock.
* @type: Controller type attributes
* @legacy_irq: IRQ number
* @workqueue: Workqueue for port reconfigures and the HW monitor.
@@ -694,6 +698,8 @@ struct vfdi_status;
* (valid only if @rx_prefix_size != 0; always negative)
* @rx_packet_len_offset: Offset of RX packet length from start of packet data
* (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative)
+ * @rx_packet_ts_offset: Offset of timestamp from start of packet data
+ * (valid only if channel->sync_timestamps_enabled; always negative)
* @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS
* @rx_scatter: Scatter mode enabled for receives
@@ -763,6 +769,7 @@ struct vfdi_status;
* @local_lock: Mutex protecting %local_addr_list and %local_page_list.
* @peer_work: Work item to broadcast peer addresses to VMs.
* @ptp_data: PTP state data
+ * @vpd_sn: Serial number read from VPD
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -777,6 +784,9 @@ struct efx_nic {
/* The following fields should be written very rarely */
char name[IFNAMSIZ];
+ struct list_head node;
+ struct efx_nic *primary;
+ struct list_head secondary_list;
struct pci_dev *pci_dev;
unsigned int port_num;
const struct efx_nic_type *type;
@@ -828,6 +838,7 @@ struct efx_nic {
unsigned int rx_prefix_size;
int rx_packet_hash_offset;
int rx_packet_len_offset;
+ int rx_packet_ts_offset;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
bool rx_scatter;
@@ -852,10 +863,14 @@ struct efx_nic {
struct work_struct mac_work;
bool port_enabled;
+ bool mc_bist_for_other_fn;
bool port_initialized;
struct net_device *net_dev;
struct efx_buffer stats_buffer;
+ u64 rx_nodesc_drops_total;
+ u64 rx_nodesc_drops_while_down;
+ bool rx_nodesc_drops_prev_state;
unsigned int phy_type;
const struct efx_phy_operations *phy_op;
@@ -907,6 +922,8 @@ struct efx_nic {
struct efx_ptp_data *ptp_data;
+ char *vpd_sn;
+
/* The following fields may be written more often */
struct delayed_work monitor_work ____cacheline_aligned_in_smp;
@@ -959,6 +976,7 @@ struct efx_mtd_partition {
* @update_stats: Update statistics not provided by event handling.
* Either argument may be %NULL.
* @start_stats: Start the regular fetching of statistics
+ * @pull_stats: Pull stats from the NIC and wait until they arrive.
* @stop_stats: Stop the regular fetching of statistics
* @set_id_led: Set state of identifying LED or revert to automatic function
* @push_irq_moderation: Apply interrupt moderation value
@@ -997,7 +1015,7 @@ struct efx_mtd_partition {
* @tx_init: Initialise TX queue on the NIC
* @tx_remove: Free resources for TX queue
* @tx_write: Write TX descriptors and doorbell
- * @rx_push_indir_table: Write RSS indirection table to the NIC
+ * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
* @rx_probe: Allocate resources for RX queue
* @rx_init: Initialise RX queue on the NIC
* @rx_remove: Free resources for RX queue
@@ -1017,7 +1035,8 @@ struct efx_mtd_partition {
* @filter_insert: add or replace a filter
* @filter_remove_safe: remove a filter by ID, carefully
* @filter_get_safe: retrieve a filter by ID, carefully
- * @filter_clear_rx: remove RX filters by priority
+ * @filter_clear_rx: Remove all RX filters whose priority is less than or
+ * equal to the given priority and is not %EFX_FILTER_PRI_AUTO
* @filter_count_rx_used: Get the number of filters in use at a given priority
* @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
* @filter_get_rx_ids: Get list of RX filters at a given priority
@@ -1037,6 +1056,12 @@ struct efx_mtd_partition {
* @mtd_sync: Wait for write-back to complete on MTD partition. This
* also notifies the driver that a writer has finished using this
* partition.
+ * @ptp_write_host_time: Send host time to MC as part of sync protocol
+ * @ptp_set_ts_sync_events: Enable or disable sync events for inline RX
+ * timestamping, possibly only temporarily for the purposes of a reset.
+ * @ptp_set_ts_config: Set hardware timestamp configuration. The flags
+ * and tx_type will already have been validated but this operation
+ * must validate and update rx_filter.
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1046,6 +1071,7 @@ struct efx_mtd_partition {
* @max_dma_mask: Maximum possible DMA mask
* @rx_prefix_size: Size of RX prefix before packet data
* @rx_hash_offset: Offset of RX flow hash within prefix
+ * @rx_ts_offset: Offset of timestamp within prefix
* @rx_buffer_padding: Size of padding at end of RX packet
* @can_rx_scatter: NIC is able to scatter packets to multiple buffers
* @always_rx_scatter: NIC will always scatter packets to multiple buffers
@@ -1055,6 +1081,7 @@ struct efx_mtd_partition {
* @offload_features: net_device feature flags for protocol offload
* features implemented in hardware
* @mcdi_max_ver: Maximum MCDI version supported
+ * @hwtstamp_filters: Mask of hardware timestamp filter types supported
*/
struct efx_nic_type {
unsigned int (*mem_map_size)(struct efx_nic *efx);
@@ -1077,6 +1104,7 @@ struct efx_nic_type {
size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
void (*start_stats)(struct efx_nic *efx);
+ void (*pull_stats)(struct efx_nic *efx);
void (*stop_stats)(struct efx_nic *efx);
void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
void (*push_irq_moderation)(struct efx_channel *channel);
@@ -1105,7 +1133,7 @@ struct efx_nic_type {
void (*tx_init)(struct efx_tx_queue *tx_queue);
void (*tx_remove)(struct efx_tx_queue *tx_queue);
void (*tx_write)(struct efx_tx_queue *tx_queue);
- void (*rx_push_indir_table)(struct efx_nic *efx);
+ void (*rx_push_rss_config)(struct efx_nic *efx);
int (*rx_probe)(struct efx_rx_queue *rx_queue);
void (*rx_init)(struct efx_rx_queue *rx_queue);
void (*rx_remove)(struct efx_rx_queue *rx_queue);
@@ -1130,8 +1158,8 @@ struct efx_nic_type {
int (*filter_get_safe)(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id, struct efx_filter_spec *);
- void (*filter_clear_rx)(struct efx_nic *efx,
- enum efx_filter_priority priority);
+ int (*filter_clear_rx)(struct efx_nic *efx,
+ enum efx_filter_priority priority);
u32 (*filter_count_rx_used)(struct efx_nic *efx,
enum efx_filter_priority priority);
u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
@@ -1155,6 +1183,9 @@ struct efx_nic_type {
int (*mtd_sync)(struct mtd_info *mtd);
#endif
void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
+ int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
+ int (*ptp_set_ts_config)(struct efx_nic *efx,
+ struct hwtstamp_config *init);
int revision;
unsigned int txd_ptr_tbl_base;
@@ -1165,6 +1196,7 @@ struct efx_nic_type {
u64 max_dma_mask;
unsigned int rx_prefix_size;
unsigned int rx_hash_offset;
+ unsigned int rx_ts_offset;
unsigned int rx_buffer_padding;
bool can_rx_scatter;
bool always_rx_scatter;
@@ -1173,6 +1205,7 @@ struct efx_nic_type {
netdev_features_t offload_features;
int mcdi_max_ver;
unsigned int max_rx_ip_filters;
+ u32 hwtstamp_filters;
};
/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 9c90bf56090f..79226b19e3c4 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -519,3 +519,15 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
}
}
}
+
+void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
+{
+ /* if down, or this is the first update after coming up */
+ if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
+ efx->rx_nodesc_drops_while_down +=
+ *rx_nodesc_drops - efx->rx_nodesc_drops_total;
+ efx->rx_nodesc_drops_total = *rx_nodesc_drops;
+ efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
+ *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
+}
+
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 91c63ec79c5f..a001fae1a8d7 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -412,8 +412,8 @@ enum {
EF10_STAT_rx_dp_q_disabled_packets,
EF10_STAT_rx_dp_di_dropped_packets,
EF10_STAT_rx_dp_streaming_packets,
- EF10_STAT_rx_dp_emerg_fetch,
- EF10_STAT_rx_dp_emerg_wait,
+ EF10_STAT_rx_dp_hlb_fetch,
+ EF10_STAT_rx_dp_hlb_wait,
EF10_STAT_COUNT
};
@@ -554,12 +554,29 @@ int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
bool spoofchk);
struct ethtool_ts_info;
-void efx_ptp_probe(struct efx_nic *efx);
-int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
+int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
+void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
+void efx_ptp_remove(struct efx_nic *efx);
+int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
+int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+int efx_ptp_get_mode(struct efx_nic *efx);
+int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+ unsigned int new_mode);
int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
+size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats);
+void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
+void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb);
+static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb)
+{
+ if (channel->sync_events_state == SYNC_EVENTS_VALID)
+ __efx_rx_skb_attach_timestamp(channel, skb);
+}
void efx_ptp_start_datapath(struct efx_nic *efx);
void efx_ptp_stop_datapath(struct efx_nic *efx);
@@ -678,8 +695,8 @@ int efx_farch_filter_remove_safe(struct efx_nic *efx,
int efx_farch_filter_get_safe(struct efx_nic *efx,
enum efx_filter_priority priority, u32 filter_id,
struct efx_filter_spec *);
-void efx_farch_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority);
+int efx_farch_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority);
u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
@@ -747,10 +764,6 @@ int falcon_reset_xaui(struct efx_nic *efx);
void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
void efx_farch_init_common(struct efx_nic *efx);
void efx_ef10_handle_drain_event(struct efx_nic *efx);
-static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
-{
- efx->type->rx_push_indir_table(efx);
-}
void efx_farch_rx_push_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
@@ -774,6 +787,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
const void *dma_buf, bool accumulate);
+void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
#define EFX_MAX_FLUSH_TIME 5000
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 3dd39dcfe36b..eb75fbd11a01 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -62,7 +62,7 @@
#define SYNCHRONISATION_GRANULARITY_NS 200
/* Minimum permitted length of a (corrected) synchronisation time */
-#define MIN_SYNCHRONISATION_NS 120
+#define DEFAULT_MIN_SYNCHRONISATION_NS 120
/* Maximum permitted length of a (corrected) synchronisation time */
#define MAX_SYNCHRONISATION_NS 1000
@@ -195,26 +195,29 @@ struct efx_ptp_event_rx {
/**
* struct efx_ptp_timeset - Synchronisation between host and MC
* @host_start: Host time immediately before hardware timestamp taken
- * @seconds: Hardware timestamp, seconds
- * @nanoseconds: Hardware timestamp, nanoseconds
+ * @major: Hardware timestamp, major
+ * @minor: Hardware timestamp, minor
* @host_end: Host time immediately after hardware timestamp taken
- * @waitns: Number of nanoseconds between hardware timestamp being read and
+ * @wait: Number of NIC clock ticks between hardware timestamp being read and
* host end time being seen
* @window: Difference of host_end and host_start
* @valid: Whether this timeset is valid
*/
struct efx_ptp_timeset {
u32 host_start;
- u32 seconds;
- u32 nanoseconds;
+ u32 major;
+ u32 minor;
u32 host_end;
- u32 waitns;
+ u32 wait;
u32 window; /* Derived: end - start, allowing for wrap */
};
/**
* struct efx_ptp_data - Precision Time Protocol (PTP) state
- * @channel: The PTP channel
+ * @efx: The NIC context
+ * @channel: The PTP channel (Siena only)
+ * @rx_ts_inline: Flag for whether RX timestamps are inline (else they are
+ * separate events)
* @rxq: Receive queue (awaiting timestamps)
* @txq: Transmit queue
* @evt_list: List of MC receive events awaiting packets
@@ -231,41 +234,42 @@ struct efx_ptp_timeset {
* @config: Current timestamp configuration
* @enabled: PTP operation enabled
* @mode: Mode in which PTP operating (PTP version)
+ * @time_format: Time format supported by this NIC
+ * @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time
+ * @nic_to_kernel_time: Function to convert from NIC to kernel time
+ * @min_synchronisation_ns: Minimum acceptable corrected sync window
+ * @ts_corrections.tx: Required driver correction of transmit timestamps
+ * @ts_corrections.rx: Required driver correction of receive timestamps
+ * @ts_corrections.pps_out: PPS output error (information only)
+ * @ts_corrections.pps_in: Required driver correction of PPS input timestamps
* @evt_frags: Partly assembled PTP events
* @evt_frag_idx: Current fragment number
* @evt_code: Last event code
* @start: Address at which MC indicates ready for synchronisation
* @host_time_pps: Host time at last PPS
- * @last_sync_ns: Last number of nanoseconds between readings when synchronising
- * @base_sync_ns: Number of nanoseconds for last synchronisation.
- * @base_sync_valid: Whether base_sync_time is valid.
* @current_adjfreq: Current ppb adjustment.
- * @phc_clock: Pointer to registered phc device
+ * @phc_clock: Pointer to registered phc device (if primary function)
* @phc_clock_info: Registration structure for phc device
* @pps_work: pps work task for handling pps events
* @pps_workwq: pps work queue
* @nic_ts_enabled: Flag indicating if NIC generated TS events are handled
* @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids
* allocations in main data path).
- * @debug_ptp_dir: PTP debugfs directory
- * @missed_rx_sync: Number of packets received without syncrhonisation.
* @good_syncs: Number of successful synchronisations.
- * @no_time_syncs: Number of synchronisations with no good times.
- * @bad_sync_durations: Number of synchronisations with bad durations.
+ * @fast_syncs: Number of synchronisations requiring short delay
* @bad_syncs: Number of failed synchronisations.
- * @last_sync_time: Number of nanoseconds for last synchronisation.
* @sync_timeouts: Number of synchronisation timeouts
- * @fast_syncs: Number of synchronisations requiring short delay
- * @min_sync_delta: Minimum time between event and synchronisation
- * @max_sync_delta: Maximum time between event and synchronisation
- * @average_sync_delta: Average time between event and synchronisation.
- * Modified moving average.
- * @last_sync_delta: Last time between event and synchronisation
- * @mc_stats: Context value for MC statistics
+ * @no_time_syncs: Number of synchronisations with no good times.
+ * @invalid_sync_windows: Number of sync windows with bad durations.
+ * @undersize_sync_windows: Number of corrected sync windows that are too small
+ * @oversize_sync_windows: Number of corrected sync windows that are too large
+ * @rx_no_timestamp: Number of packets received without a timestamp.
* @timeset: Last set of synchronisation statistics.
*/
struct efx_ptp_data {
+ struct efx_nic *efx;
struct efx_channel *channel;
+ bool rx_ts_inline;
struct sk_buff_head rxq;
struct sk_buff_head txq;
struct list_head evt_list;
@@ -282,14 +286,22 @@ struct efx_ptp_data {
struct hwtstamp_config config;
bool enabled;
unsigned int mode;
+ unsigned int time_format;
+ void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor);
+ ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor,
+ s32 correction);
+ unsigned int min_synchronisation_ns;
+ struct {
+ s32 tx;
+ s32 rx;
+ s32 pps_out;
+ s32 pps_in;
+ } ts_corrections;
efx_qword_t evt_frags[MAX_EVENT_FRAGS];
int evt_frag_idx;
int evt_code;
struct efx_buffer start;
struct pps_event_time host_time_pps;
- unsigned last_sync_ns;
- unsigned base_sync_ns;
- bool base_sync_valid;
s64 current_adjfreq;
struct ptp_clock *phc_clock;
struct ptp_clock_info phc_clock_info;
@@ -297,6 +309,16 @@ struct efx_ptp_data {
struct workqueue_struct *pps_workwq;
bool nic_ts_enabled;
MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
+
+ unsigned int good_syncs;
+ unsigned int fast_syncs;
+ unsigned int bad_syncs;
+ unsigned int sync_timeouts;
+ unsigned int no_time_syncs;
+ unsigned int invalid_sync_windows;
+ unsigned int undersize_sync_windows;
+ unsigned int oversize_sync_windows;
+ unsigned int rx_no_timestamp;
struct efx_ptp_timeset
timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
};
@@ -309,19 +331,263 @@ static int efx_phc_settime(struct ptp_clock_info *ptp,
static int efx_phc_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on);
+#define PTP_SW_STAT(ext_name, field_name) \
+ { #ext_name, 0, offsetof(struct efx_ptp_data, field_name) }
+#define PTP_MC_STAT(ext_name, mcdi_name) \
+ { #ext_name, 32, MC_CMD_PTP_OUT_STATUS_STATS_ ## mcdi_name ## _OFST }
+static const struct efx_hw_stat_desc efx_ptp_stat_desc[] = {
+ PTP_SW_STAT(ptp_good_syncs, good_syncs),
+ PTP_SW_STAT(ptp_fast_syncs, fast_syncs),
+ PTP_SW_STAT(ptp_bad_syncs, bad_syncs),
+ PTP_SW_STAT(ptp_sync_timeouts, sync_timeouts),
+ PTP_SW_STAT(ptp_no_time_syncs, no_time_syncs),
+ PTP_SW_STAT(ptp_invalid_sync_windows, invalid_sync_windows),
+ PTP_SW_STAT(ptp_undersize_sync_windows, undersize_sync_windows),
+ PTP_SW_STAT(ptp_oversize_sync_windows, oversize_sync_windows),
+ PTP_SW_STAT(ptp_rx_no_timestamp, rx_no_timestamp),
+ PTP_MC_STAT(ptp_tx_timestamp_packets, TX),
+ PTP_MC_STAT(ptp_rx_timestamp_packets, RX),
+ PTP_MC_STAT(ptp_timestamp_packets, TS),
+ PTP_MC_STAT(ptp_filter_matches, FM),
+ PTP_MC_STAT(ptp_non_filter_matches, NFM),
+};
+#define PTP_STAT_COUNT ARRAY_SIZE(efx_ptp_stat_desc)
+static const unsigned long efx_ptp_stat_mask[] = {
+ [0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL,
+};
+
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings)
+{
+ if (!efx->ptp_data)
+ return 0;
+
+ return efx_nic_describe_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
+ efx_ptp_stat_mask, strings);
+}
+
+size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_STATUS_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_STATUS_LEN);
+ size_t i;
+ int rc;
+
+ if (!efx->ptp_data)
+ return 0;
+
+ /* Copy software statistics */
+ for (i = 0; i < PTP_STAT_COUNT; i++) {
+ if (efx_ptp_stat_desc[i].dma_width)
+ continue;
+ stats[i] = *(unsigned int *)((char *)efx->ptp_data +
+ efx_ptp_stat_desc[i].offset);
+ }
+
+ /* Fetch MC statistics. We *must* fill in all statistics or
+ * risk leaking kernel memory to userland, so if the MCDI
+ * request fails we pretend we got zeroes.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_STATUS);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC_CMD_PTP_OP_STATUS failed (%d)\n", rc);
+ memset(outbuf, 0, sizeof(outbuf));
+ }
+ efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
+ efx_ptp_stat_mask,
+ stats, _MCDI_PTR(outbuf, 0), false);
+
+ return PTP_STAT_COUNT;
+}
+
+/* For Siena platforms NIC time is s and ns */
+static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+ struct timespec ts = ns_to_timespec(ns);
+ *nic_major = ts.tv_sec;
+ *nic_minor = ts.tv_nsec;
+}
+
+static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ ktime_t kt = ktime_set(nic_major, nic_minor);
+ if (correction >= 0)
+ kt = ktime_add_ns(kt, (u64)correction);
+ else
+ kt = ktime_sub_ns(kt, (u64)-correction);
+ return kt;
+}
+
+/* To convert from s27 format to ns we multiply then divide by a power of 2.
+ * For the conversion from ns to s27, the operation is also converted to a
+ * multiply and shift.
+ */
+#define S27_TO_NS_SHIFT (27)
+#define NS_TO_S27_MULT (((1ULL << 63) + NSEC_PER_SEC / 2) / NSEC_PER_SEC)
+#define NS_TO_S27_SHIFT (63 - S27_TO_NS_SHIFT)
+#define S27_MINOR_MAX (1 << S27_TO_NS_SHIFT)
+
+/* For Huntington platforms NIC time is in seconds and fractions of a second
+ * where the minor register only uses 27 bits in units of 2^-27s.
+ */
+static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+ struct timespec ts = ns_to_timespec(ns);
+ u32 maj = ts.tv_sec;
+ u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT +
+ (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT);
+
+ /* The conversion can result in the minor value exceeding the maximum.
+ * In this case, round up to the next second.
+ */
+ if (min >= S27_MINOR_MAX) {
+ min -= S27_MINOR_MAX;
+ maj++;
+ }
+
+ *nic_major = maj;
+ *nic_minor = min;
+}
+
+static inline ktime_t efx_ptp_s27_to_ktime(u32 nic_major, u32 nic_minor)
+{
+ u32 ns = (u32)(((u64)nic_minor * NSEC_PER_SEC +
+ (1ULL << (S27_TO_NS_SHIFT - 1))) >> S27_TO_NS_SHIFT);
+ return ktime_set(nic_major, ns);
+}
+
+static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ /* Apply the correction and deal with carry */
+ nic_minor += correction;
+ if ((s32)nic_minor < 0) {
+ nic_minor += S27_MINOR_MAX;
+ nic_major--;
+ } else if (nic_minor >= S27_MINOR_MAX) {
+ nic_minor -= S27_MINOR_MAX;
+ nic_major++;
+ }
+
+ return efx_ptp_s27_to_ktime(nic_major, nic_minor);
+}
+
+/* Get PTP attributes and set up time conversions */
+static int efx_ptp_get_attributes(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN);
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int rc;
+ u32 fmt;
+ size_t out_len;
+
+ /* Get the PTP attributes. If the NIC doesn't support the operation we
+ * use the default format for compatibility with older NICs i.e.
+ * seconds and nanoseconds.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &out_len);
+ if (rc == 0)
+ fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT);
+ else if (rc == -EINVAL)
+ fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
+ else
+ return rc;
+
+ if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) {
+ ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
+ ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction;
+ } else if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS) {
+ ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns;
+ ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction;
+ } else {
+ return -ERANGE;
+ }
+
+ ptp->time_format = fmt;
+
+ /* MC_CMD_PTP_OP_GET_ATTRIBUTES is an extended version of an older
+ * operation MC_CMD_PTP_OP_GET_TIME_FORMAT that also returns a value
+ * to use for the minimum acceptable corrected synchronization window.
+ * If we have the extra information store it. For older firmware that
+ * does not implement the extended command use the default value.
+ */
+ if (rc == 0 && out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN)
+ ptp->min_synchronisation_ns =
+ MCDI_DWORD(outbuf,
+ PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN);
+ else
+ ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS;
+
+ return 0;
+}
+
+/* Get PTP timestamp corrections */
+static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN);
+ int rc;
+
+ /* Get the timestamp corrections from the NIC. If this operation is
+ * not supported (older NICs) then no correction is required.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP,
+ MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc == 0) {
+ efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
+ efx->ptp_data->ts_corrections.rx = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE);
+ efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT);
+ efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN);
+ } else if (rc == -EINVAL) {
+ efx->ptp_data->ts_corrections.tx = 0;
+ efx->ptp_data->ts_corrections.rx = 0;
+ efx->ptp_data->ts_corrections.pps_out = 0;
+ efx->ptp_data->ts_corrections.pps_in = 0;
+ } else {
+ return rc;
+ }
+
+ return 0;
+}
+
/* Enable MCDI PTP support. */
static int efx_ptp_enable(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
- efx->ptp_data->channel->channel);
+ efx->ptp_data->channel ?
+ efx->ptp_data->channel->channel : 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
- return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ rc = (rc == -EALREADY) ? 0 : rc;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_PTP,
+ MC_CMD_PTP_IN_ENABLE_LEN,
+ outbuf, sizeof(outbuf), rc);
+ return rc;
}
/* Disable MCDI PTP support.
@@ -332,11 +598,19 @@ static int efx_ptp_enable(struct efx_nic *efx)
static int efx_ptp_disable(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
- return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ rc = (rc == -EALREADY) ? 0 : rc;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_PTP,
+ MC_CMD_PTP_IN_DISABLE_LEN,
+ outbuf, sizeof(outbuf), rc);
+ return rc;
}
static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
@@ -404,11 +678,10 @@ static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
unsigned start_ns, end_ns;
timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
- timeset->seconds = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_SECONDS);
- timeset->nanoseconds = MCDI_DWORD(data,
- PTP_OUT_SYNCHRONIZE_NANOSECONDS);
+ timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR);
+ timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR);
timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
- timeset->waitns = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
+ timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
/* Ignore seconds */
start_ns = timeset->host_start & MC_NANOSECOND_MASK;
@@ -437,62 +710,73 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
MCDI_VAR_ARRAY_LEN(response_length,
PTP_OUT_SYNCHRONIZE_TIMESET);
unsigned i;
- unsigned total;
unsigned ngood = 0;
unsigned last_good = 0;
struct efx_ptp_data *ptp = efx->ptp_data;
u32 last_sec;
u32 start_sec;
struct timespec delta;
+ ktime_t mc_time;
if (number_readings == 0)
return -EAGAIN;
- /* Read the set of results and increment stats for any results that
- * appera to be erroneous.
+ /* Read the set of results and find the last good host-MC
+ * synchronization result. The MC times when it finishes reading the
+ * host time so the corrected window time should be fairly constant
+ * for a given platform. Increment stats for any results that appear
+ * to be erroneous.
*/
for (i = 0; i < number_readings; i++) {
+ s32 window, corrected;
+ struct timespec wait;
+
efx_ptp_read_timeset(
MCDI_ARRAY_STRUCT_PTR(synch_buf,
PTP_OUT_SYNCHRONIZE_TIMESET, i),
&ptp->timeset[i]);
- }
- /* Find the last good host-MC synchronization result. The MC times
- * when it finishes reading the host time so the corrected window time
- * should be fairly constant for a given platform.
- */
- total = 0;
- for (i = 0; i < number_readings; i++)
- if (ptp->timeset[i].window > ptp->timeset[i].waitns) {
- unsigned win;
-
- win = ptp->timeset[i].window - ptp->timeset[i].waitns;
- if (win >= MIN_SYNCHRONISATION_NS &&
- win < MAX_SYNCHRONISATION_NS) {
- total += ptp->timeset[i].window;
- ngood++;
- last_good = i;
- }
+ wait = ktime_to_timespec(
+ ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0));
+ window = ptp->timeset[i].window;
+ corrected = window - wait.tv_nsec;
+
+ /* We expect the uncorrected synchronization window to be at
+ * least as large as the interval between host start and end
+ * times. If it is smaller than this then this is mostly likely
+ * to be a consequence of the host's time being adjusted.
+ * Check that the corrected sync window is in a reasonable
+ * range. If it is out of range it is likely to be because an
+ * interrupt or other delay occurred between reading the system
+ * time and writing it to MC memory.
+ */
+ if (window < SYNCHRONISATION_GRANULARITY_NS) {
+ ++ptp->invalid_sync_windows;
+ } else if (corrected >= MAX_SYNCHRONISATION_NS) {
+ ++ptp->oversize_sync_windows;
+ } else if (corrected < ptp->min_synchronisation_ns) {
+ ++ptp->undersize_sync_windows;
+ } else {
+ ngood++;
+ last_good = i;
}
+ }
if (ngood == 0) {
netif_warn(efx, drv, efx->net_dev,
- "PTP no suitable synchronisations %dns\n",
- ptp->base_sync_ns);
+ "PTP no suitable synchronisations\n");
return -EAGAIN;
}
- /* Average minimum this synchronisation */
- ptp->last_sync_ns = DIV_ROUND_UP(total, ngood);
- if (!ptp->base_sync_valid || (ptp->last_sync_ns < ptp->base_sync_ns)) {
- ptp->base_sync_valid = true;
- ptp->base_sync_ns = ptp->last_sync_ns;
- }
+ /* Convert the NIC time into kernel time. No correction is required-
+ * this time is the output of a firmware process.
+ */
+ mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
+ ptp->timeset[last_good].minor, 0);
/* Calculate delay from actual PPS to last_time */
- delta.tv_nsec =
- ptp->timeset[last_good].nanoseconds +
+ delta = ktime_to_timespec(mc_time);
+ delta.tv_nsec +=
last_time->ts_real.tv_nsec -
(ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
@@ -553,6 +837,11 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
loops++;
}
+ if (loops <= 1)
+ ++ptp->fast_syncs;
+ if (!time_before(jiffies, timeout))
+ ++ptp->sync_timeouts;
+
if (ACCESS_ONCE(*start))
efx_ptp_send_times(efx, &last_time);
@@ -561,9 +850,20 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
MC_CMD_PTP_IN_SYNCHRONIZE_LEN,
synch_buf, sizeof(synch_buf),
&response_length);
- if (rc == 0)
+ if (rc == 0) {
rc = efx_ptp_process_times(efx, synch_buf, response_length,
&last_time);
+ if (rc == 0)
+ ++ptp->good_syncs;
+ else
+ ++ptp->no_time_syncs;
+ }
+
+ /* Increment the bad syncs counter if the synchronize fails, whatever
+ * the reason.
+ */
+ if (rc != 0)
+ ++ptp->bad_syncs;
return rc;
}
@@ -602,9 +902,10 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
goto fail;
memset(&timestamps, 0, sizeof(timestamps));
- timestamps.hwtstamp = ktime_set(
- MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_SECONDS),
- MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_NANOSECONDS));
+ timestamps.hwtstamp = ptp_data->nic_to_kernel_time(
+ MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR),
+ MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR),
+ ptp_data->ts_corrections.tx);
skb_tstamp_tx(skb, &timestamps);
@@ -622,6 +923,9 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
struct list_head *cursor;
struct list_head *next;
+ if (ptp->rx_ts_inline)
+ return;
+
/* Drop time-expired events */
spin_lock_bh(&ptp->evt_lock);
if (!list_empty(&ptp->evt_list)) {
@@ -655,6 +959,8 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
struct efx_ptp_match *match;
enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED;
+ WARN_ON_ONCE(ptp->rx_ts_inline);
+
spin_lock_bh(&ptp->evt_lock);
evts_waiting = !list_empty(&ptp->evt_list);
spin_unlock_bh(&ptp->evt_lock);
@@ -696,13 +1002,10 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
/* Process any queued receive events and corresponding packets
*
* q is returned with all the packets that are ready for delivery.
- * true is returned if at least one of those packets requires
- * synchronisation.
*/
-static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
+static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
{
struct efx_ptp_data *ptp = efx->ptp_data;
- bool rc = false;
struct sk_buff *skb;
while ((skb = skb_dequeue(&ptp->rxq))) {
@@ -713,13 +1016,10 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
__skb_queue_tail(q, skb);
} else if (efx_ptp_match_rx(efx, skb) ==
PTP_PACKET_STATE_MATCHED) {
- rc = true;
__skb_queue_tail(q, skb);
} else if (time_after(jiffies, match->expiry)) {
match->state = PTP_PACKET_STATE_TIMED_OUT;
- if (net_ratelimit())
- netif_warn(efx, rx_err, efx->net_dev,
- "PTP packet - no timestamp seen\n");
+ ++ptp->rx_no_timestamp;
__skb_queue_tail(q, skb);
} else {
/* Replace unprocessed entry and stop */
@@ -727,8 +1027,6 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
break;
}
}
-
- return rc;
}
/* Complete processing of a received packet */
@@ -739,13 +1037,27 @@ static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb)
local_bh_enable();
}
-static int efx_ptp_start(struct efx_nic *efx)
+static void efx_ptp_remove_multicast_filters(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+
+ if (ptp->rxfilter_installed) {
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_general);
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_event);
+ ptp->rxfilter_installed = false;
+ }
+}
+
+static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
{
struct efx_ptp_data *ptp = efx->ptp_data;
struct efx_filter_spec rxfilter;
int rc;
- ptp->reset_required = false;
+ if (!ptp->channel || ptp->rxfilter_installed)
+ return 0;
/* Must filter on both event and general ports to ensure
* that there is no packet re-ordering.
@@ -778,23 +1090,37 @@ static int efx_ptp_start(struct efx_nic *efx)
goto fail;
ptp->rxfilter_general = rc;
+ ptp->rxfilter_installed = true;
+ return 0;
+
+fail:
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_event);
+ return rc;
+}
+
+static int efx_ptp_start(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int rc;
+
+ ptp->reset_required = false;
+
+ rc = efx_ptp_insert_multicast_filters(efx);
+ if (rc)
+ return rc;
+
rc = efx_ptp_enable(efx);
if (rc != 0)
- goto fail2;
+ goto fail;
ptp->evt_frag_idx = 0;
ptp->current_adjfreq = 0;
- ptp->rxfilter_installed = true;
return 0;
-fail2:
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_general);
fail:
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_event);
-
+ efx_ptp_remove_multicast_filters(efx);
return rc;
}
@@ -810,13 +1136,7 @@ static int efx_ptp_stop(struct efx_nic *efx)
rc = efx_ptp_disable(efx);
- if (ptp->rxfilter_installed) {
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_general);
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_event);
- ptp->rxfilter_installed = false;
- }
+ efx_ptp_remove_multicast_filters(efx);
/* Make sure RX packets are really delivered */
efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq);
@@ -844,7 +1164,7 @@ static void efx_ptp_pps_worker(struct work_struct *work)
{
struct efx_ptp_data *ptp =
container_of(work, struct efx_ptp_data, pps_work);
- struct efx_nic *efx = ptp->channel->efx;
+ struct efx_nic *efx = ptp->efx;
struct ptp_clock_event ptp_evt;
if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS))
@@ -855,13 +1175,11 @@ static void efx_ptp_pps_worker(struct work_struct *work)
ptp_clock_event(ptp->phc_clock, &ptp_evt);
}
-/* Process any pending transmissions and timestamp any received packets.
- */
static void efx_ptp_worker(struct work_struct *work)
{
struct efx_ptp_data *ptp_data =
container_of(work, struct efx_ptp_data, work);
- struct efx_nic *efx = ptp_data->channel->efx;
+ struct efx_nic *efx = ptp_data->efx;
struct sk_buff *skb;
struct sk_buff_head tempq;
@@ -874,42 +1192,50 @@ static void efx_ptp_worker(struct work_struct *work)
efx_ptp_drop_time_expired_events(efx);
__skb_queue_head_init(&tempq);
- if (efx_ptp_process_events(efx, &tempq) ||
- !skb_queue_empty(&ptp_data->txq)) {
+ efx_ptp_process_events(efx, &tempq);
- while ((skb = skb_dequeue(&ptp_data->txq)))
- efx_ptp_xmit_skb(efx, skb);
- }
+ while ((skb = skb_dequeue(&ptp_data->txq)))
+ efx_ptp_xmit_skb(efx, skb);
while ((skb = __skb_dequeue(&tempq)))
efx_ptp_process_rx(efx, skb);
}
-/* Initialise PTP channel and state.
- *
- * Setting core_index to zero causes the queue to be initialised and doesn't
- * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
- */
-static int efx_ptp_probe_channel(struct efx_channel *channel)
+static const struct ptp_clock_info efx_phc_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "sfc",
+ .max_adj = MAX_PPB,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 1,
+ .adjfreq = efx_phc_adjfreq,
+ .adjtime = efx_phc_adjtime,
+ .gettime = efx_phc_gettime,
+ .settime = efx_phc_settime,
+ .enable = efx_phc_enable,
+};
+
+/* Initialise PTP state. */
+int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
{
- struct efx_nic *efx = channel->efx;
struct efx_ptp_data *ptp;
int rc = 0;
unsigned int pos;
- channel->irq_moderation = 0;
- channel->rx_queue.core_index = 0;
-
ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
efx->ptp_data = ptp;
if (!efx->ptp_data)
return -ENOMEM;
+ ptp->efx = efx;
+ ptp->channel = channel;
+ ptp->rx_ts_inline = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+
rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL);
if (rc != 0)
goto fail1;
- ptp->channel = channel;
skb_queue_head_init(&ptp->rxq);
skb_queue_head_init(&ptp->txq);
ptp->workwq = create_singlethread_workqueue("sfc_ptp");
@@ -929,33 +1255,32 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
ptp->evt_overflow = false;
- ptp->phc_clock_info.owner = THIS_MODULE;
- snprintf(ptp->phc_clock_info.name,
- sizeof(ptp->phc_clock_info.name),
- "%pm", efx->net_dev->perm_addr);
- ptp->phc_clock_info.max_adj = MAX_PPB;
- ptp->phc_clock_info.n_alarm = 0;
- ptp->phc_clock_info.n_ext_ts = 0;
- ptp->phc_clock_info.n_per_out = 0;
- ptp->phc_clock_info.pps = 1;
- ptp->phc_clock_info.adjfreq = efx_phc_adjfreq;
- ptp->phc_clock_info.adjtime = efx_phc_adjtime;
- ptp->phc_clock_info.gettime = efx_phc_gettime;
- ptp->phc_clock_info.settime = efx_phc_settime;
- ptp->phc_clock_info.enable = efx_phc_enable;
-
- ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
- &efx->pci_dev->dev);
- if (IS_ERR(ptp->phc_clock)) {
- rc = PTR_ERR(ptp->phc_clock);
+ /* Get the NIC PTP attributes and set up time conversions */
+ rc = efx_ptp_get_attributes(efx);
+ if (rc < 0)
goto fail3;
- }
- INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
- ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
- if (!ptp->pps_workwq) {
- rc = -ENOMEM;
- goto fail4;
+ /* Get the timestamp corrections */
+ rc = efx_ptp_get_timestamp_corrections(efx);
+ if (rc < 0)
+ goto fail3;
+
+ if (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) {
+ ptp->phc_clock_info = efx_phc_clock_info;
+ ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
+ &efx->pci_dev->dev);
+ if (IS_ERR(ptp->phc_clock)) {
+ rc = PTR_ERR(ptp->phc_clock);
+ goto fail3;
+ }
+
+ INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
+ ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
+ if (!ptp->pps_workwq) {
+ rc = -ENOMEM;
+ goto fail4;
+ }
}
ptp->nic_ts_enabled = false;
@@ -976,14 +1301,27 @@ fail1:
return rc;
}
-static void efx_ptp_remove_channel(struct efx_channel *channel)
+/* Initialise PTP channel.
+ *
+ * Setting core_index to zero causes the queue to be initialised and doesn't
+ * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
+ */
+static int efx_ptp_probe_channel(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
+ channel->irq_moderation = 0;
+ channel->rx_queue.core_index = 0;
+
+ return efx_ptp_probe(efx, channel);
+}
+
+void efx_ptp_remove(struct efx_nic *efx)
+{
if (!efx->ptp_data)
return;
- (void)efx_ptp_disable(channel->efx);
+ (void)efx_ptp_disable(efx);
cancel_work_sync(&efx->ptp_data->work);
cancel_work_sync(&efx->ptp_data->pps_work);
@@ -991,15 +1329,22 @@ static void efx_ptp_remove_channel(struct efx_channel *channel)
skb_queue_purge(&efx->ptp_data->rxq);
skb_queue_purge(&efx->ptp_data->txq);
- ptp_clock_unregister(efx->ptp_data->phc_clock);
+ if (efx->ptp_data->phc_clock) {
+ destroy_workqueue(efx->ptp_data->pps_workwq);
+ ptp_clock_unregister(efx->ptp_data->phc_clock);
+ }
destroy_workqueue(efx->ptp_data->workwq);
- destroy_workqueue(efx->ptp_data->pps_workwq);
efx_nic_free_buffer(efx, &efx->ptp_data->start);
kfree(efx->ptp_data);
}
+static void efx_ptp_remove_channel(struct efx_channel *channel)
+{
+ efx_ptp_remove(channel->efx);
+}
+
static void efx_ptp_get_channel_name(struct efx_channel *channel,
char *buf, size_t len)
{
@@ -1080,14 +1425,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
/* Does this packet require timestamping? */
if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
- struct skb_shared_hwtstamps *timestamps;
-
match->state = PTP_PACKET_STATE_UNMATCHED;
- /* Clear all timestamps held: filled in later */
- timestamps = skb_hwtstamps(skb);
- memset(timestamps, 0, sizeof(*timestamps));
-
/* We expect the sequence number to be in the same position in
* the packet for PTP V1 and V2
*/
@@ -1132,8 +1471,13 @@ int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
return NETDEV_TX_OK;
}
-static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
- unsigned int new_mode)
+int efx_ptp_get_mode(struct efx_nic *efx)
+{
+ return efx->ptp_data->mode;
+}
+
+int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+ unsigned int new_mode)
{
if ((enable_wanted != efx->ptp_data->enabled) ||
(enable_wanted && (efx->ptp_data->mode != new_mode))) {
@@ -1177,8 +1521,6 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
{
- bool enable_wanted = false;
- unsigned int new_mode;
int rc;
if (init->flags)
@@ -1188,63 +1530,20 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
(init->tx_type != HWTSTAMP_TX_ON))
return -ERANGE;
- new_mode = efx->ptp_data->mode;
- /* Determine whether any PTP HW operations are required */
- switch (init->rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- new_mode = MC_CMD_PTP_MODE_V1;
- enable_wanted = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- /* Although these three are accepted only IPV4 packets will be
- * timestamped
- */
- init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
- new_mode = MC_CMD_PTP_MODE_V2_ENHANCED;
- enable_wanted = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- /* Non-IP + IPv6 timestamping not supported */
- return -ERANGE;
- break;
- default:
- return -ERANGE;
- }
-
- if (init->tx_type != HWTSTAMP_TX_OFF)
- enable_wanted = true;
-
- /* Old versions of the firmware do not support the improved
- * UUID filtering option (SF bug 33070). If the firmware does
- * not accept the enhanced mode, fall back to the standard PTP
- * v2 UUID filtering.
- */
- rc = efx_ptp_change_mode(efx, enable_wanted, new_mode);
- if ((rc != 0) && (new_mode == MC_CMD_PTP_MODE_V2_ENHANCED))
- rc = efx_ptp_change_mode(efx, enable_wanted, MC_CMD_PTP_MODE_V2);
- if (rc != 0)
+ rc = efx->type->ptp_set_ts_config(efx, init);
+ if (rc)
return rc;
efx->ptp_data->config = *init;
-
return 0;
}
void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
{
struct efx_ptp_data *ptp = efx->ptp_data;
+ struct efx_nic *primary = efx->primary;
+
+ ASSERT_RTNL();
if (!ptp)
return;
@@ -1252,18 +1551,14 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE);
- ts_info->phc_index = ptp_clock_index(ptp->phc_clock);
+ if (primary && primary->ptp_data && primary->ptp_data->phc_clock)
+ ts_info->phc_index =
+ ptp_clock_index(primary->ptp_data->phc_clock);
ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON;
- ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+ ts_info->rx_filters = ptp->efx->type->hwtstamp_filters;
}
-int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
+int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr)
{
struct hwtstamp_config config;
int rc;
@@ -1283,6 +1578,15 @@ int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
? -EFAULT : 0;
}
+int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+{
+ if (!efx->ptp_data)
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, &efx->ptp_data->config,
+ sizeof(efx->ptp_data->config)) ? -EFAULT : 0;
+}
+
static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len)
{
struct efx_ptp_data *ptp = efx->ptp_data;
@@ -1302,6 +1606,9 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
{
struct efx_ptp_event_rx *evt = NULL;
+ if (WARN_ON_ONCE(ptp->rx_ts_inline))
+ return;
+
if (ptp->evt_frag_idx != 3) {
ptp_event_failure(efx, 3);
return;
@@ -1320,9 +1627,10 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
MCDI_EVENT_SRC) << 8) |
(EFX_QWORD_FIELD(ptp->evt_frags[0],
MCDI_EVENT_SRC) << 16));
- evt->hwtimestamp = ktime_set(
+ evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time(
EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
- EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA));
+ EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA),
+ ptp->ts_corrections.rx);
evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
list_add_tail(&evt->link, &ptp->evt_list);
@@ -1397,12 +1705,99 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
}
}
+void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev)
+{
+ channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR);
+ channel->sync_timestamp_minor =
+ MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_26_19) << 19;
+ /* if sync events have been disabled then we want to silently ignore
+ * this event, so throw away result.
+ */
+ (void) cmpxchg(&channel->sync_events_state, SYNC_EVENTS_REQUESTED,
+ SYNC_EVENTS_VALID);
+}
+
+/* make some assumptions about the time representation rather than abstract it,
+ * since we currently only support one type of inline timestamping and only on
+ * EF10.
+ */
+#define MINOR_TICKS_PER_SECOND 0x8000000
+/* Fuzz factor for sync events to be out of order with RX events */
+#define FUZZ (MINOR_TICKS_PER_SECOND / 10)
+#define EXPECTED_SYNC_EVENTS_PER_SECOND 4
+
+static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_ts_offset));
+#else
+ const u8 *data = eh + efx->rx_packet_ts_offset;
+ return (u32)data[0] |
+ (u32)data[1] << 8 |
+ (u32)data[2] << 16 |
+ (u32)data[3] << 24;
+#endif
+}
+
+void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb)
+{
+ struct efx_nic *efx = channel->efx;
+ u32 pkt_timestamp_major, pkt_timestamp_minor;
+ u32 diff, carry;
+ struct skb_shared_hwtstamps *timestamps;
+
+ pkt_timestamp_minor = (efx_rx_buf_timestamp_minor(efx,
+ skb_mac_header(skb)) +
+ (u32) efx->ptp_data->ts_corrections.rx) &
+ (MINOR_TICKS_PER_SECOND - 1);
+
+ /* get the difference between the packet and sync timestamps,
+ * modulo one second
+ */
+ diff = (pkt_timestamp_minor - channel->sync_timestamp_minor) &
+ (MINOR_TICKS_PER_SECOND - 1);
+ /* do we roll over a second boundary and need to carry the one? */
+ carry = channel->sync_timestamp_minor + diff > MINOR_TICKS_PER_SECOND ?
+ 1 : 0;
+
+ if (diff <= MINOR_TICKS_PER_SECOND / EXPECTED_SYNC_EVENTS_PER_SECOND +
+ FUZZ) {
+ /* packet is ahead of the sync event by a quarter of a second or
+ * less (allowing for fuzz)
+ */
+ pkt_timestamp_major = channel->sync_timestamp_major + carry;
+ } else if (diff >= MINOR_TICKS_PER_SECOND - FUZZ) {
+ /* packet is behind the sync event but within the fuzz factor.
+ * This means the RX packet and sync event crossed as they were
+ * placed on the event queue, which can sometimes happen.
+ */
+ pkt_timestamp_major = channel->sync_timestamp_major - 1 + carry;
+ } else {
+ /* it's outside tolerance in both directions. this might be
+ * indicative of us missing sync events for some reason, so
+ * we'll call it an error rather than risk giving a bogus
+ * timestamp.
+ */
+ netif_vdbg(efx, drv, efx->net_dev,
+ "packet timestamp %x too far from sync event %x:%x\n",
+ pkt_timestamp_minor, channel->sync_timestamp_major,
+ channel->sync_timestamp_minor);
+ return;
+ }
+
+ /* attach the timestamps to the skb */
+ timestamps = skb_hwtstamps(skb);
+ timestamps->hwtstamp =
+ efx_ptp_s27_to_ktime(pkt_timestamp_major, pkt_timestamp_minor);
+}
+
static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
struct efx_ptp_data *ptp_data = container_of(ptp,
struct efx_ptp_data,
phc_clock_info);
- struct efx_nic *efx = ptp_data->channel->efx;
+ struct efx_nic *efx = ptp_data->efx;
MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN);
s64 adjustment_ns;
int rc;
@@ -1432,18 +1827,20 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
+ u32 nic_major, nic_minor;
struct efx_ptp_data *ptp_data = container_of(ptp,
struct efx_ptp_data,
phc_clock_info);
- struct efx_nic *efx = ptp_data->channel->efx;
- struct timespec delta_ts = ns_to_timespec(delta);
+ struct efx_nic *efx = ptp_data->efx;
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN);
+ efx->ptp_data->ns_to_nic_time(delta, &nic_major, &nic_minor);
+
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq);
- MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
- MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MAJOR, nic_major);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MINOR, nic_minor);
return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
NULL, 0, NULL);
}
@@ -1453,10 +1850,11 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
struct efx_ptp_data *ptp_data = container_of(ptp,
struct efx_ptp_data,
phc_clock_info);
- struct efx_nic *efx = ptp_data->channel->efx;
+ struct efx_nic *efx = ptp_data->efx;
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN);
int rc;
+ ktime_t kt;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
@@ -1466,8 +1864,10 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
if (rc != 0)
return rc;
- ts->tv_sec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_SECONDS);
- ts->tv_nsec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_NANOSECONDS);
+ kt = ptp_data->nic_to_kernel_time(
+ MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MAJOR),
+ MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MINOR), 0);
+ *ts = ktime_to_timespec(kt);
return 0;
}
@@ -1519,7 +1919,7 @@ static const struct efx_channel_type efx_ptp_channel_type = {
.keep_eventq = false,
};
-void efx_ptp_probe(struct efx_nic *efx)
+void efx_ptp_defer_probe_with_channel(struct efx_nic *efx)
{
/* Check whether PTP is implemented on this NIC. The DISABLE
* operation will succeed if and only if it is implemented.
@@ -1533,9 +1933,15 @@ void efx_ptp_start_datapath(struct efx_nic *efx)
{
if (efx_ptp_restart(efx))
netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n");
+ /* re-enable timestamping if it was previously enabled */
+ if (efx->type->ptp_set_ts_sync_events)
+ efx->type->ptp_set_ts_sync_events(efx, true, true);
}
void efx_ptp_stop_datapath(struct efx_nic *efx)
{
+ /* temporarily disable timestamping */
+ if (efx->type->ptp_set_ts_sync_events)
+ efx->type->ptp_set_ts_sync_events(efx, false, true);
efx_ptp_stop(efx);
}
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 42488df1f4ec..48588ddf81b0 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -149,7 +149,7 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
* 0 on success. If a single page can be used for multiple buffers,
* then the page will either be inserted fully, or not at all.
*/
-static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
{
struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
@@ -163,7 +163,8 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
do {
page = efx_reuse_page(rx_queue);
if (page == NULL) {
- page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+ page = alloc_pages(__GFP_COLD | __GFP_COMP |
+ (atomic ? GFP_ATOMIC : GFP_KERNEL),
efx->rx_buffer_order);
if (unlikely(page == NULL))
return -ENOMEM;
@@ -321,7 +322,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel,
* this means this function must run from the NAPI handler, or be called
* when NAPI is disabled.
*/
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
{
struct efx_nic *efx = rx_queue->efx;
unsigned int fill_level, batch_size;
@@ -354,7 +355,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
do {
- rc = efx_init_rx_buffers(rx_queue);
+ rc = efx_init_rx_buffers(rx_queue, atomic);
if (unlikely(rc)) {
/* Ensure that we don't leave the rx queue empty */
if (rx_queue->added_count == rx_queue->removed_count)
@@ -439,7 +440,8 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
}
if (efx->net_dev->features & NETIF_F_RXHASH)
- skb->rxhash = efx_rx_buf_hash(efx, eh);
+ skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
+ PKT_HASH_TYPE_L3);
skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
@@ -475,14 +477,18 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
struct sk_buff *skb;
/* Allocate an SKB to store the headers */
- skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
+ skb = netdev_alloc_skb(efx->net_dev,
+ efx->rx_ip_align + efx->rx_prefix_size +
+ hdr_len);
if (unlikely(skb == NULL))
return NULL;
EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
- skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
- memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
+ memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
+ efx->rx_prefix_size + hdr_len);
+ skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
+ __skb_put(skb, hdr_len);
/* Append the remaining page(s) onto the frag list */
if (rx_buf->len > hdr_len) {
@@ -619,6 +625,8 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ efx_rx_skb_attach_timestamp(channel, skb);
+
if (channel->type->receive_skb)
if (channel->type->receive_skb(channel, skb))
return;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 144bbff5a4ae..26641817a9c7 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -722,7 +722,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
return rc_reset;
}
- if ((tests->registers < 0) && !rc_test)
+ if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
rc_test = -EIO;
}
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index a2f4a06ffa4e..009dbe88f3be 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -38,6 +38,7 @@ struct efx_self_tests {
int eventq_dma[EFX_MAX_CHANNELS];
int eventq_int[EFX_MAX_CHANNELS];
/* offline tests */
+ int memory;
int registers;
int phy_ext[EFX_MAX_PHY_TESTS];
struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index d034bcd124ef..23f3a6f7737a 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -118,6 +118,54 @@ out:
/**************************************************************************
*
+ * PTP
+ *
+ **************************************************************************
+ */
+
+static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
+{
+ _efx_writed(efx, cpu_to_le32(host_time),
+ FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
+}
+
+static int siena_ptp_set_ts_config(struct efx_nic *efx,
+ struct hwtstamp_config *init)
+{
+ int rc;
+
+ switch (init->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /* if TX timestamping is still requested then leave PTP on */
+ return efx_ptp_change_mode(efx,
+ init->tx_type != HWTSTAMP_TX_OFF,
+ efx_ptp_get_mode(efx));
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ return efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V1);
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ rc = efx_ptp_change_mode(efx, true,
+ MC_CMD_PTP_MODE_V2_ENHANCED);
+ /* bug 33070 - old versions of the firmware do not support the
+ * improved UUID filtering option. Similarly old versions of the
+ * application do not expect it to be enabled. If the firmware
+ * does not accept the enhanced mode, fall back to the standard
+ * PTP v2 UUID filtering. */
+ if (rc != 0)
+ rc = efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V2);
+ return rc;
+ default:
+ return -ERANGE;
+ }
+}
+
+/**************************************************************************
+ *
* Device reset
*
**************************************************************************
@@ -259,7 +307,7 @@ static int siena_probe_nic(struct efx_nic *efx)
goto fail5;
efx_sriov_probe(efx);
- efx_ptp_probe(efx);
+ efx_ptp_defer_probe_with_channel(efx);
return 0;
@@ -273,6 +321,31 @@ fail1:
return rc;
}
+static void siena_rx_push_rss_config(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Set hash key for IPv4 */
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
+ /* Enable IPv6 RSS */
+ BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
+ 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
+ memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
+ EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
+ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
+ memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+
+ efx_farch_rx_push_indir_table(efx);
+}
+
/* This call performs hardware-specific global initialisation, such as
* defining the descriptor cache sizes and number of RSS channels.
* It does not set up any buffers, descriptor rings or event queues.
@@ -313,23 +386,7 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_RX_USR_BUF_SIZE >> 5);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
- /* Set hash key for IPv4 */
- memcpy(&temp, efx->rx_hash_key, sizeof(temp));
- efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
-
- /* Enable IPv6 RSS */
- BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
- 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
- FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
- memcpy(&temp, efx->rx_hash_key, sizeof(temp));
- efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
- memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
- efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
- EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
- FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
- memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
- FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
- efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+ siena_rx_push_rss_config(efx);
/* Enable event logging */
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
@@ -458,6 +515,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
return -EAGAIN;
/* Update derived statistics */
+ efx_nic_fix_nodesc_drop_stat(efx,
+ &stats[SIENA_STAT_rx_nodesc_drop_cnt]);
efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes],
stats[SIENA_STAT_tx_bytes] -
stats[SIENA_STAT_tx_bad_bytes]);
@@ -837,19 +896,6 @@ fail:
/**************************************************************************
*
- * PTP
- *
- **************************************************************************
- */
-
-static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
-{
- _efx_writed(efx, cpu_to_le32(host_time),
- FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
-}
-
-/**************************************************************************
- *
* Revision-dependent attributes used by efx.c and nic.c
*
**************************************************************************
@@ -878,6 +924,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.describe_stats = siena_describe_nic_stats,
.update_stats = siena_update_nic_stats,
.start_stats = efx_mcdi_mac_start_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
.set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = siena_push_irq_moderation,
@@ -902,7 +949,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
- .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_push_rss_config = siena_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
@@ -939,6 +986,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.mtd_sync = efx_mcdi_mtd_sync,
#endif
.ptp_write_host_time = siena_ptp_write_host_time,
+ .ptp_set_ts_config = siena_ptp_set_ts_config,
.revision = EFX_REV_SIENA_A0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
@@ -957,4 +1005,11 @@ const struct efx_nic_type siena_a0_nic_type = {
NETIF_F_RXHASH | NETIF_F_NTUPLE),
.mcdi_max_ver = 1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
+ .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ),
};
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index c49d1fb16965..75d11fa4eb0a 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -429,7 +429,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
}
/* Transfer ownership of the skb to the final buffer */
+#ifdef EFX_USE_PIO
finish_packet:
+#endif
buffer->skb = skb;
buffer->flags = EFX_TX_BUF_SKB | dma_flags;
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index ffa78432164d..7984ad05357d 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -30,7 +30,6 @@
#define IOC3_NAME "ioc3-eth"
#define IOC3_VERSION "2.6.3-4"
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/mm.h>
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 513ed8b1ba58..5564a5fa3385 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -10,7 +10,6 @@
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 975dc2d8e548..ff57a46388ee 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -576,7 +576,6 @@ err_unmap_tx:
err_out_unmap:
pci_iounmap(pci_dev, ioaddr);
err_out_cleardev:
- pci_set_drvdata(pci_dev, NULL);
pci_release_regions(pci_dev);
err_out:
free_netdev(net_dev);
@@ -2427,7 +2426,6 @@ static void sis900_remove(struct pci_dev *pci_dev)
pci_iounmap(pci_dev, sis_priv->ioaddr);
free_netdev(net_dev);
pci_release_regions(pci_dev);
- pci_set_drvdata(pci_dev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 0f096a890059..c50fb08c9905 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -17,8 +17,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Arguments:
* watchdog = TX watchdog timeout
@@ -55,7 +54,6 @@ static const char version[] =
)
#endif
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h
index 9965da39281b..04b35f55df97 100644
--- a/drivers/net/ethernet/smsc/smc911x.h
+++ b/drivers/net/ethernet/smsc/smc911x.h
@@ -15,8 +15,7 @@
. GNU General Public License for more details.
.
. You should have received a copy of the GNU General Public License
- . along with this program; if not, write to the Free Software
- . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ . along with this program; if not, see <http://www.gnu.org/licenses/>.
.
. Information contained in this file was obtained from the LAN9118
. manual from SMC. To get a copy, if you really want one, you can find
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 8ef70d9c20c1..c7a4868571f9 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -29,7 +29,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 8bf29eb4a5a0..839c0e6cca01 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -19,8 +19,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Arguments:
* io = for the base address
@@ -66,7 +65,6 @@ static const char version[] =
#endif
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -1895,7 +1893,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
SMC_SELECT_BANK(lp, 1);
val = SMC_GET_BASE(lp);
val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT;
- if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
+ if (((unsigned long)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
netdev_warn(dev, "%s: IOADDR %p doesn't match configuration (%x).\n",
CARDNAME, ioaddr, val);
}
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 749654b976bc..47dce918eb0f 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -18,8 +18,7 @@
. GNU General Public License for more details.
.
. You should have received a copy of the GNU General Public License
- . along with this program; if not, write to the Free Software
- . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ . along with this program; if not, see <http://www.gnu.org/licenses/>.
.
. Information contained in this file was obtained from the LAN91C111
. manual from SMC. To get a copy, if you really want one, you can find
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 8564f23a6796..6382b7c416f4 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
***************************************************************************
* Rewritten, heavily based on smsc911x simple driver by SMSC.
diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h
index 9ad5e5d39a03..23953957fed8 100644
--- a/drivers/net/ethernet/smsc/smsc911x.h
+++ b/drivers/net/ethernet/smsc/smsc911x.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
***************************************************************************/
#ifndef __SMSC911X_H__
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index f433d97aa097..d3b967aff9e0 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
***************************************************************************
*/
@@ -1541,7 +1540,7 @@ static int smsc9420_resume(struct pci_dev *pdev)
pci_set_master(pdev);
- err = pci_enable_wake(pdev, 0, 0);
+ err = pci_enable_wake(pdev, PCI_D0, 0);
if (err)
netif_warn(pd, ifup, pd->dev, "pci_enable_wake failed: %d\n",
err);
diff --git a/drivers/net/ethernet/smsc/smsc9420.h b/drivers/net/ethernet/smsc/smsc9420.h
index e441402f77a2..c63c76381af6 100644
--- a/drivers/net/ethernet/smsc/smsc9420.h
+++ b/drivers/net/ethernet/smsc/smsc9420.h
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
***************************************************************************
*/
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 6e52c0f74cd9..e2f202e3932f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -5,6 +5,7 @@ config STMMAC_ETH
select PHYLIB
select CRC32
select PTP_1588_CLOCK
+ select RESET_CONTROLLER
---help---
This is the driver for the Ethernet IPs are built around a
Synopsys IP Core and only tested on the STMicroelectronics
@@ -25,6 +26,17 @@ config STMMAC_PLATFORM
If unsure, say N.
+config DWMAC_SUNXI
+ bool "Allwinner GMAC support"
+ depends on STMMAC_PLATFORM && ARCH_SUNXI
+ default y
+ ---help---
+ Support for Allwinner A20/A31 GMAC ethernet controllers.
+
+ This selects Allwinner SoC glue layer support for the
+ stmmac device driver. This driver is used for A20/A31
+ GMAC ethernet controller.
+
config STMMAC_PCI
bool "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 356a9dd32be7..ecadecea79b2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
+stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index d234ab540b29..72d282bf33a5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -51,6 +51,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
while (len != 0) {
+ priv->tx_skbuff[entry] = NULL;
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
@@ -62,7 +63,6 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc);
- priv->tx_skbuff[entry] = NULL;
len -= bmax;
i++;
} else {
@@ -73,7 +73,6 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc);
- priv->tx_skbuff[entry] = NULL;
len = 0;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index fc94f202a43e..7834a3993946 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -29,7 +29,6 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/module.h>
-#include <linux/init.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define STMMAC_VLAN_TAG_USED
#include <linux/if_vlan.h>
@@ -293,6 +292,8 @@ struct dma_features {
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
+#define JUMBO_LEN 9000
+
struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */
void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode,
@@ -369,7 +370,7 @@ struct stmmac_dma_ops {
struct stmmac_ops {
/* MAC core initialization */
- void (*core_init) (void __iomem *ioaddr);
+ void (*core_init) (void __iomem *ioaddr, int mtu);
/* Enable and verify that the IPC module is supported */
int (*rx_ipc) (void __iomem *ioaddr);
/* Dump MAC registers */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
new file mode 100644
index 000000000000..771cd15fca18
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -0,0 +1,140 @@
+/**
+ * dwmac-sunxi.c - Allwinner sunxi DWMAC specific glue layer
+ *
+ * Copyright (C) 2013 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/stmmac.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/of_net.h>
+#include <linux/regulator/consumer.h>
+
+struct sunxi_priv_data {
+ int interface;
+ int clk_enabled;
+ struct clk *tx_clk;
+ struct regulator *regulator;
+};
+
+static void *sun7i_gmac_setup(struct platform_device *pdev)
+{
+ struct sunxi_priv_data *gmac;
+ struct device *dev = &pdev->dev;
+
+ gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return ERR_PTR(-ENOMEM);
+
+ gmac->interface = of_get_phy_mode(dev->of_node);
+
+ gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
+ if (IS_ERR(gmac->tx_clk)) {
+ dev_err(dev, "could not get tx clock\n");
+ return gmac->tx_clk;
+ }
+
+ /* Optional regulator for PHY */
+ gmac->regulator = devm_regulator_get_optional(dev, "phy");
+ if (IS_ERR(gmac->regulator)) {
+ if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
+ return ERR_PTR(-EPROBE_DEFER);
+ dev_info(dev, "no regulator found\n");
+ gmac->regulator = NULL;
+ }
+
+ return gmac;
+}
+
+#define SUN7I_GMAC_GMII_RGMII_RATE 125000000
+#define SUN7I_GMAC_MII_RATE 25000000
+
+static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
+{
+ struct sunxi_priv_data *gmac = priv;
+ int ret;
+
+ if (gmac->regulator) {
+ ret = regulator_enable(gmac->regulator);
+ if (ret)
+ return ret;
+ }
+
+ /* Set GMAC interface port mode
+ *
+ * The GMAC TX clock lines are configured by setting the clock
+ * rate, which then uses the auto-reparenting feature of the
+ * clock driver, and enabling/disabling the clock.
+ */
+ if (gmac->interface == PHY_INTERFACE_MODE_RGMII) {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
+ clk_prepare_enable(gmac->tx_clk);
+ gmac->clk_enabled = 1;
+ } else {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
+ clk_prepare(gmac->tx_clk);
+ }
+
+ return 0;
+}
+
+static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct sunxi_priv_data *gmac = priv;
+
+ if (gmac->clk_enabled) {
+ clk_disable(gmac->tx_clk);
+ gmac->clk_enabled = 0;
+ }
+ clk_unprepare(gmac->tx_clk);
+
+ if (gmac->regulator)
+ regulator_disable(gmac->regulator);
+}
+
+static void sun7i_fix_speed(void *priv, unsigned int speed)
+{
+ struct sunxi_priv_data *gmac = priv;
+
+ /* only GMII mode requires us to reconfigure the clock lines */
+ if (gmac->interface != PHY_INTERFACE_MODE_GMII)
+ return;
+
+ if (gmac->clk_enabled) {
+ clk_disable(gmac->tx_clk);
+ gmac->clk_enabled = 0;
+ }
+ clk_unprepare(gmac->tx_clk);
+
+ if (speed == 1000) {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
+ clk_prepare_enable(gmac->tx_clk);
+ gmac->clk_enabled = 1;
+ } else {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
+ clk_prepare(gmac->tx_clk);
+ }
+}
+
+/* of_data specifying hardware features and callbacks.
+ * hardware features were copied from Allwinner drivers. */
+const struct stmmac_of_data sun7i_gmac_data = {
+ .has_gmac = 1,
+ .tx_coe = 1,
+ .fix_mac_speed = sun7i_fix_speed,
+ .setup = sun7i_gmac_setup,
+ .init = sun7i_gmac_init,
+ .exit = sun7i_gmac_exit,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index c12aabb8cf93..f37d90f114f5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -126,11 +126,8 @@ enum power_event {
#define GMAC_ANE_PSE (3 << 7)
#define GMAC_ANE_PSE_SHIFT 7
- /* GMAC Configuration defines */
-#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
-#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
-
/* GMAC Configuration defines */
+#define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */
#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */
@@ -156,7 +153,7 @@ enum inter_frame_gap {
#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
- GMAC_CONTROL_JE | GMAC_CONTROL_BE)
+ GMAC_CONTROL_BE)
/* GMAC Frame Filter defines */
#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index cdd926832e27..b3e148ef5683 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -32,10 +32,15 @@
#include <asm/io.h>
#include "dwmac1000.h"
-static void dwmac1000_core_init(void __iomem *ioaddr)
+static void dwmac1000_core_init(void __iomem *ioaddr, int mtu)
{
u32 value = readl(ioaddr + GMAC_CONTROL);
value |= GMAC_CORE_INIT;
+ if (mtu > 1500)
+ value |= GMAC_CONTROL_2K;
+ if (mtu > 2000)
+ value |= GMAC_CONTROL_JE;
+
writel(value, ioaddr + GMAC_CONTROL);
/* Mask GMAC interrupts */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 5857d677dac1..2ff767bcfdd0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -32,7 +32,7 @@
#include <asm/io.h>
#include "dwmac100.h"
-static void dwmac100_core_init(void __iomem *ioaddr)
+static void dwmac100_core_init(void __iomem *ioaddr, int mtu)
{
u32 value = readl(ioaddr + MAC_CONTROL);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 1ef9d8a555aa..a96c7c2f5f3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -58,6 +58,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
STMMAC_RING_MODE);
wmb();
+ priv->tx_skbuff[entry] = NULL;
entry = (++priv->cur_tx) % txsize;
if (priv->extend_desc)
@@ -73,7 +74,6 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
STMMAC_RING_MODE);
wmb();
priv->hw->desc->set_tx_owner(desc);
- priv->tx_skbuff[entry] = NULL;
} else {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 22f89ffdfd95..d9af26ed58ee 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -32,6 +32,7 @@
#include <linux/pci.h>
#include "common.h"
#include <linux/ptp_clock_kernel.h>
+#include <linux/reset.h>
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
@@ -91,6 +92,7 @@ struct stmmac_priv {
int wolopts;
int wol_irq;
struct clk *stmmac_clk;
+ struct reset_control *stmmac_rst;
int clk_csr;
struct timer_list eee_ctrl_timer;
int lpi_irq;
@@ -105,21 +107,19 @@ struct stmmac_priv {
unsigned int default_addend;
u32 adv_ts;
int use_riwt;
+ int irq_wake;
spinlock_t ptp_lock;
};
-extern int phyaddr;
-
int stmmac_mdio_unregister(struct net_device *ndev);
int stmmac_mdio_register(struct net_device *ndev);
+int stmmac_mdio_reset(struct mii_bus *mii);
void stmmac_set_ethtool_ops(struct net_device *netdev);
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
extern const struct stmmac_hwtimestamp stmmac_ptp;
int stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
-int stmmac_freeze(struct net_device *ndev);
-int stmmac_restore(struct net_device *ndev);
int stmmac_resume(struct net_device *ndev);
int stmmac_suspend(struct net_device *ndev);
int stmmac_dvr_remove(struct net_device *ndev);
@@ -130,6 +130,9 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv);
bool stmmac_eee_init(struct stmmac_priv *priv);
#ifdef CONFIG_STMMAC_PLATFORM
+#ifdef CONFIG_DWMAC_SUNXI
+extern const struct stmmac_of_data sun7i_gmac_data;
+#endif
extern struct platform_driver stmmac_pltfr_driver;
static inline int stmmac_register_platform(void)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8a7a23a84ac5..a2e7d2c96e36 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -43,6 +43,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
+#include <linux/pinctrl/consumer.h>
#ifdef CONFIG_STMMAC_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -50,9 +51,9 @@
#include <linux/net_tstamp.h>
#include "stmmac_ptp.h"
#include "stmmac.h"
+#include <linux/reset.h>
#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
-#define JUMBO_LEN 9000
/* Module parameters */
#define TX_TIMEO 5000
@@ -64,7 +65,7 @@ static int debug = -1;
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
-int phyaddr = -1;
+static int phyaddr = -1;
module_param(phyaddr, int, S_IRUGO);
MODULE_PARM_DESC(phyaddr, "Physical device address");
@@ -91,7 +92,7 @@ static int tc = TC_DEFAULT;
module_param(tc, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(tc, "DMA threshold control value");
-#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
+#define DMA_BUFFER_SIZE BUF_SIZE_4KiB
static int buf_sz = DMA_BUFFER_SIZE;
module_param(buf_sz, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
@@ -332,7 +333,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
return;
/* exit if skb doesn't support hw tstamp */
- if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
+ if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
return;
if (priv->adv_ts)
@@ -622,17 +623,15 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
- if (netif_msg_hw(priv)) {
- if (priv->dma_cap.time_stamp) {
- pr_debug("IEEE 1588-2002 Time Stamp supported\n");
- priv->adv_ts = 0;
- }
- if (priv->dma_cap.atime_stamp && priv->extend_desc) {
- pr_debug
- ("IEEE 1588-2008 Advanced Time Stamp supported\n");
- priv->adv_ts = 1;
- }
- }
+ priv->adv_ts = 0;
+ if (priv->dma_cap.atime_stamp && priv->extend_desc)
+ priv->adv_ts = 1;
+
+ if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
+ pr_debug("IEEE 1588-2002 Time Stamp supported\n");
+
+ if (netif_msg_hw(priv) && priv->adv_ts)
+ pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
priv->hw->ptp = &stmmac_ptp;
priv->hwts_tx_en = 0;
@@ -778,6 +777,7 @@ static int stmmac_init_phy(struct net_device *dev)
char phy_id_fmt[MII_BUS_ID_SIZE + 3];
char bus_id[MII_BUS_ID_SIZE];
int interface = priv->plat->interface;
+ int max_speed = priv->plat->max_speed;
priv->oldlink = 0;
priv->speed = 0;
priv->oldduplex = -1;
@@ -802,7 +802,8 @@ static int stmmac_init_phy(struct net_device *dev)
/* Stop Advertising 1000BASE Capability if interface is not GMII */
if ((interface == PHY_INTERFACE_MODE_MII) ||
- (interface == PHY_INTERFACE_MODE_RMII))
+ (interface == PHY_INTERFACE_MODE_RMII) ||
+ (max_speed < 1000 && max_speed > 0))
phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full);
@@ -992,70 +993,12 @@ static int init_dma_desc_rings(struct net_device *dev)
if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
+ priv->dma_buf_sz = bfsize;
+
if (netif_msg_probe(priv))
pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__,
txsize, rxsize, bfsize);
- if (priv->extend_desc) {
- priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
- sizeof(struct
- dma_extended_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
- if (!priv->dma_erx)
- goto err_dma;
-
- priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
- sizeof(struct
- dma_extended_desc),
- &priv->dma_tx_phy,
- GFP_KERNEL);
- if (!priv->dma_etx) {
- dma_free_coherent(priv->device, priv->dma_rx_size *
- sizeof(struct dma_extended_desc),
- priv->dma_erx, priv->dma_rx_phy);
- goto err_dma;
- }
- } else {
- priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
- sizeof(struct dma_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
- if (!priv->dma_rx)
- goto err_dma;
-
- priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
- sizeof(struct dma_desc),
- &priv->dma_tx_phy,
- GFP_KERNEL);
- if (!priv->dma_tx) {
- dma_free_coherent(priv->device, priv->dma_rx_size *
- sizeof(struct dma_desc),
- priv->dma_rx, priv->dma_rx_phy);
- goto err_dma;
- }
- }
-
- priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!priv->rx_skbuff_dma)
- goto err_rx_skbuff_dma;
-
- priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!priv->rx_skbuff)
- goto err_rx_skbuff;
-
- priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!priv->tx_skbuff_dma)
- goto err_tx_skbuff_dma;
-
- priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!priv->tx_skbuff)
- goto err_tx_skbuff;
-
if (netif_msg_probe(priv)) {
pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
(u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
@@ -1081,7 +1024,6 @@ static int init_dma_desc_rings(struct net_device *dev)
}
priv->cur_rx = 0;
priv->dirty_rx = (unsigned int)(i - rxsize);
- priv->dma_buf_sz = bfsize;
buf_sz = bfsize;
/* Setup the chained descriptor addresses */
@@ -1123,30 +1065,6 @@ static int init_dma_desc_rings(struct net_device *dev)
err_init_rx_buffers:
while (--i >= 0)
stmmac_free_rx_buffers(priv, i);
- kfree(priv->tx_skbuff);
-err_tx_skbuff:
- kfree(priv->tx_skbuff_dma);
-err_tx_skbuff_dma:
- kfree(priv->rx_skbuff);
-err_rx_skbuff:
- kfree(priv->rx_skbuff_dma);
-err_rx_skbuff_dma:
- if (priv->extend_desc) {
- dma_free_coherent(priv->device, priv->dma_tx_size *
- sizeof(struct dma_extended_desc),
- priv->dma_etx, priv->dma_tx_phy);
- dma_free_coherent(priv->device, priv->dma_rx_size *
- sizeof(struct dma_extended_desc),
- priv->dma_erx, priv->dma_rx_phy);
- } else {
- dma_free_coherent(priv->device,
- priv->dma_tx_size * sizeof(struct dma_desc),
- priv->dma_tx, priv->dma_tx_phy);
- dma_free_coherent(priv->device,
- priv->dma_rx_size * sizeof(struct dma_desc),
- priv->dma_rx, priv->dma_rx_phy);
- }
-err_dma:
return ret;
}
@@ -1163,25 +1081,107 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
int i;
for (i = 0; i < priv->dma_tx_size; i++) {
- if (priv->tx_skbuff[i] != NULL) {
- struct dma_desc *p;
- if (priv->extend_desc)
- p = &((priv->dma_etx + i)->basic);
- else
- p = priv->dma_tx + i;
+ struct dma_desc *p;
- if (priv->tx_skbuff_dma[i])
- dma_unmap_single(priv->device,
- priv->tx_skbuff_dma[i],
- priv->hw->desc->get_tx_len(p),
- DMA_TO_DEVICE);
+ if (priv->extend_desc)
+ p = &((priv->dma_etx + i)->basic);
+ else
+ p = priv->dma_tx + i;
+
+ if (priv->tx_skbuff_dma[i]) {
+ dma_unmap_single(priv->device,
+ priv->tx_skbuff_dma[i],
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[i] = 0;
+ }
+
+ if (priv->tx_skbuff[i] != NULL) {
dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
- priv->tx_skbuff_dma[i] = 0;
}
}
}
+static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+{
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int rxsize = priv->dma_rx_size;
+ int ret = -ENOMEM;
+
+ priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!priv->rx_skbuff_dma)
+ return -ENOMEM;
+
+ priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!priv->rx_skbuff)
+ goto err_rx_skbuff;
+
+ priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!priv->tx_skbuff_dma)
+ goto err_tx_skbuff_dma;
+
+ priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!priv->tx_skbuff)
+ goto err_tx_skbuff;
+
+ if (priv->extend_desc) {
+ priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
+ sizeof(struct
+ dma_extended_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_erx)
+ goto err_dma;
+
+ priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
+ sizeof(struct
+ dma_extended_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_etx) {
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ priv->dma_erx, priv->dma_rx_phy);
+ goto err_dma;
+ }
+ } else {
+ priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
+ sizeof(struct dma_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_rx)
+ goto err_dma;
+
+ priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
+ sizeof(struct dma_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_tx) {
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ goto err_dma;
+ }
+ }
+
+ return 0;
+
+err_dma:
+ kfree(priv->tx_skbuff);
+err_tx_skbuff:
+ kfree(priv->tx_skbuff_dma);
+err_tx_skbuff_dma:
+ kfree(priv->rx_skbuff);
+err_rx_skbuff:
+ kfree(priv->rx_skbuff_dma);
+ return ret;
+}
+
static void free_dma_desc_resources(struct stmmac_priv *priv)
{
/* Release the DMA TX/RX socket buffers */
@@ -1524,9 +1524,9 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
priv->dev->dev_addr, 0);
if (!is_valid_ether_addr(priv->dev->dev_addr))
eth_hw_addr_random(priv->dev);
+ pr_info("%s: device MAC address %pM\n", priv->dev->name,
+ priv->dev->dev_addr);
}
- pr_warn("%s: device MAC address %pM\n", priv->dev->name,
- priv->dev->dev_addr);
}
/**
@@ -1591,6 +1591,86 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
}
/**
+ * stmmac_hw_setup: setup mac in a usable state.
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function sets up the ip in a usable state.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_hw_setup(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = init_dma_desc_rings(dev);
+ if (ret < 0) {
+ pr_err("%s: DMA descriptors initialization failed\n", __func__);
+ return ret;
+ }
+ /* DMA initialization and SW reset */
+ ret = stmmac_init_dma_engine(priv);
+ if (ret < 0) {
+ pr_err("%s: DMA engine initialization failed\n", __func__);
+ return ret;
+ }
+
+ /* Copy the MAC addr into the HW */
+ priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
+
+ /* If required, perform hw setup of the bus. */
+ if (priv->plat->bus_setup)
+ priv->plat->bus_setup(priv->ioaddr);
+
+ /* Initialize the MAC Core */
+ priv->hw->mac->core_init(priv->ioaddr, dev->mtu);
+
+ /* Enable the MAC Rx/Tx */
+ stmmac_set_mac(priv->ioaddr, true);
+
+ /* Set the HW DMA mode and the COE */
+ stmmac_dma_operation_mode(priv);
+
+ stmmac_mmc_setup(priv);
+
+ ret = stmmac_init_ptp(priv);
+ if (ret && ret != -EOPNOTSUPP)
+ pr_warn("%s: failed PTP initialisation\n", __func__);
+
+#ifdef CONFIG_STMMAC_DEBUG_FS
+ ret = stmmac_init_fs(dev);
+ if (ret < 0)
+ pr_warn("%s: failed debugFS registration\n", __func__);
+#endif
+ /* Start the ball rolling... */
+ pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
+ priv->hw->dma->start_tx(priv->ioaddr);
+ priv->hw->dma->start_rx(priv->ioaddr);
+
+ /* Dump DMA/MAC registers */
+ if (netif_msg_hw(priv)) {
+ priv->hw->mac->dump_regs(priv->ioaddr);
+ priv->hw->dma->dump_regs(priv->ioaddr);
+ }
+ priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
+
+ priv->eee_enabled = stmmac_eee_init(priv);
+
+ stmmac_init_tx_coalesce(priv);
+
+ if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
+ priv->rx_riwt = MAX_DMA_RIWT;
+ priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
+ }
+
+ if (priv->pcs && priv->hw->mac->ctrl_ane)
+ priv->hw->mac->ctrl_ane(priv->ioaddr, 0);
+
+ return 0;
+}
+
+/**
* stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
* Description:
@@ -1604,8 +1684,6 @@ static int stmmac_open(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
- clk_prepare_enable(priv->stmmac_clk);
-
stmmac_check_ether_addr(priv);
if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
@@ -1618,33 +1696,29 @@ static int stmmac_open(struct net_device *dev)
}
}
+ /* Extra statistics */
+ memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
+ priv->xstats.threshold = tc;
+
/* Create and initialize the TX/RX descriptors chains. */
priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
- ret = init_dma_desc_rings(dev);
+ alloc_dma_desc_resources(priv);
if (ret < 0) {
- pr_err("%s: DMA descriptors initialization failed\n", __func__);
+ pr_err("%s: DMA descriptors allocation failed\n", __func__);
goto dma_desc_error;
}
- /* DMA initialization and SW reset */
- ret = stmmac_init_dma_engine(priv);
+ ret = stmmac_hw_setup(dev);
if (ret < 0) {
- pr_err("%s: DMA engine initialization failed\n", __func__);
+ pr_err("%s: Hw setup failed\n", __func__);
goto init_error;
}
- /* Copy the MAC addr into the HW */
- priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
-
- /* If required, perform hw setup of the bus. */
- if (priv->plat->bus_setup)
- priv->plat->bus_setup(priv->ioaddr);
-
- /* Initialize the MAC Core */
- priv->hw->mac->core_init(priv->ioaddr);
+ if (priv->phydev)
+ phy_start(priv->phydev);
/* Request the IRQ lines */
ret = request_irq(dev->irq, stmmac_interrupt,
@@ -1677,55 +1751,6 @@ static int stmmac_open(struct net_device *dev)
}
}
- /* Enable the MAC Rx/Tx */
- stmmac_set_mac(priv->ioaddr, true);
-
- /* Set the HW DMA mode and the COE */
- stmmac_dma_operation_mode(priv);
-
- /* Extra statistics */
- memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
- priv->xstats.threshold = tc;
-
- stmmac_mmc_setup(priv);
-
- ret = stmmac_init_ptp(priv);
- if (ret)
- pr_warn("%s: failed PTP initialisation\n", __func__);
-
-#ifdef CONFIG_STMMAC_DEBUG_FS
- ret = stmmac_init_fs(dev);
- if (ret < 0)
- pr_warn("%s: failed debugFS registration\n", __func__);
-#endif
- /* Start the ball rolling... */
- pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
- priv->hw->dma->start_tx(priv->ioaddr);
- priv->hw->dma->start_rx(priv->ioaddr);
-
- /* Dump DMA/MAC registers */
- if (netif_msg_hw(priv)) {
- priv->hw->mac->dump_regs(priv->ioaddr);
- priv->hw->dma->dump_regs(priv->ioaddr);
- }
-
- if (priv->phydev)
- phy_start(priv->phydev);
-
- priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
-
- priv->eee_enabled = stmmac_eee_init(priv);
-
- stmmac_init_tx_coalesce(priv);
-
- if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
- priv->rx_riwt = MAX_DMA_RIWT;
- priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
- }
-
- if (priv->pcs && priv->hw->mac->ctrl_ane)
- priv->hw->mac->ctrl_ane(priv->ioaddr, 0);
-
napi_enable(&priv->napi);
netif_start_queue(dev);
@@ -1796,7 +1821,6 @@ static int stmmac_release(struct net_device *dev)
#ifdef CONFIG_STMMAC_DEBUG_FS
stmmac_exit_fs();
#endif
- clk_disable_unprepare(priv->stmmac_clk);
stmmac_release_ptp(priv);
@@ -1846,8 +1870,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
first = desc;
- priv->tx_skbuff[entry] = skb;
-
/* To program the descriptors according to the size of the frame */
if (priv->mode == STMMAC_RING_MODE) {
is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
@@ -1875,6 +1897,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int len = skb_frag_size(frag);
+ priv->tx_skbuff[entry] = NULL;
entry = (++priv->cur_tx) % txsize;
if (priv->extend_desc)
desc = (struct dma_desc *)(priv->dma_etx + entry);
@@ -1884,7 +1907,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry] = desc->des2;
- priv->tx_skbuff[entry] = NULL;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
priv->mode);
wmb();
@@ -1892,6 +1914,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
}
+ priv->tx_skbuff[entry] = skb;
+
/* Finalize the latest segment. */
priv->hw->desc->close_tx_desc(desc);
@@ -1953,6 +1977,23 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
+{
+ struct ethhdr *ehdr;
+ u16 vlanid;
+
+ if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
+ NETIF_F_HW_VLAN_CTAG_RX &&
+ !__vlan_get_tag(skb, &vlanid)) {
+ /* pop the vlan tag */
+ ehdr = (struct ethhdr *)skb->data;
+ memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
+ }
+}
+
+
/**
* stmmac_rx_refill: refill used skb preallocated buffers
* @priv: driver private structure
@@ -2104,6 +2145,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
print_pkt(skb->data, frame_len);
}
+ stmmac_rx_vlan(priv->dev, skb);
+
skb->protocol = eth_type_trans(skb, priv->dev);
if (unlikely(!coe))
@@ -2231,6 +2274,9 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
else
max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
+ if (priv->plat->maxmtu < max_mtu)
+ max_mtu = priv->plat->maxmtu;
+
if ((new_mtu < 46) || (new_mtu > max_mtu)) {
pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
return -EINVAL;
@@ -2278,6 +2324,9 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
+ if (priv->irq_wake)
+ pm_wakeup_event(priv->device, 0);
+
if (unlikely(!dev)) {
pr_err("%s: invalid dev pointer\n", __func__);
return IRQ_NONE;
@@ -2682,10 +2731,32 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
if ((phyaddr >= 0) && (phyaddr <= 31))
priv->plat->phy_addr = phyaddr;
+ priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
+ if (IS_ERR(priv->stmmac_clk)) {
+ dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
+ __func__);
+ ret = PTR_ERR(priv->stmmac_clk);
+ goto error_clk_get;
+ }
+ clk_prepare_enable(priv->stmmac_clk);
+
+ priv->stmmac_rst = devm_reset_control_get(priv->device,
+ STMMAC_RESOURCE_NAME);
+ if (IS_ERR(priv->stmmac_rst)) {
+ if (PTR_ERR(priv->stmmac_rst) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto error_hw_init;
+ }
+ dev_info(priv->device, "no reset control found\n");
+ priv->stmmac_rst = NULL;
+ }
+ if (priv->stmmac_rst)
+ reset_control_deassert(priv->stmmac_rst);
+
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
if (ret)
- goto error_free_netdev;
+ goto error_hw_init;
ndev->netdev_ops = &stmmac_netdev_ops;
@@ -2723,12 +2794,6 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
goto error_netdev_register;
}
- priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME);
- if (IS_ERR(priv->stmmac_clk)) {
- pr_warn("%s: warning: cannot get CSR clock\n", __func__);
- goto error_clk_get;
- }
-
/* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be
* changed at run-time and it is fixed. Viceversa the driver'll try to
@@ -2756,15 +2821,15 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
return priv;
error_mdio_register:
- clk_put(priv->stmmac_clk);
-error_clk_get:
unregister_netdev(ndev);
error_netdev_register:
netif_napi_del(&priv->napi);
-error_free_netdev:
+error_hw_init:
+ clk_disable_unprepare(priv->stmmac_clk);
+error_clk_get:
free_netdev(ndev);
- return NULL;
+ return ERR_PTR(ret);
}
/**
@@ -2788,6 +2853,9 @@ int stmmac_dvr_remove(struct net_device *ndev)
stmmac_mdio_unregister(ndev);
netif_carrier_off(ndev);
unregister_netdev(ndev);
+ if (priv->stmmac_rst)
+ reset_control_assert(priv->stmmac_rst);
+ clk_disable_unprepare(priv->stmmac_clk);
free_netdev(ndev);
return 0;
@@ -2819,10 +2887,12 @@ int stmmac_suspend(struct net_device *ndev)
stmmac_clear_descriptors(priv);
/* Enable Power down mode by programming the PMT regs */
- if (device_may_wakeup(priv->device))
+ if (device_may_wakeup(priv->device)) {
priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
- else {
+ priv->irq_wake = 1;
+ } else {
stmmac_set_mac(priv->ioaddr, false);
+ pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
clk_disable_unprepare(priv->stmmac_clk);
}
@@ -2846,18 +2916,21 @@ int stmmac_resume(struct net_device *ndev)
* this bit because it can generate problems while resuming
* from another devices (e.g. serial console).
*/
- if (device_may_wakeup(priv->device))
+ if (device_may_wakeup(priv->device)) {
priv->hw->mac->pmt(priv->ioaddr, 0);
- else
+ priv->irq_wake = 0;
+ } else {
+ pinctrl_pm_select_default_state(priv->device);
/* enable the clk prevously disabled */
clk_prepare_enable(priv->stmmac_clk);
+ /* reset the phy so that it's ready */
+ if (priv->mii)
+ stmmac_mdio_reset(priv->mii);
+ }
netif_device_attach(ndev);
- /* Enable the MAC and DMA */
- stmmac_set_mac(priv->ioaddr, true);
- priv->hw->dma->start_tx(priv->ioaddr);
- priv->hw->dma->start_rx(priv->ioaddr);
+ stmmac_hw_setup(ndev);
napi_enable(&priv->napi);
@@ -2870,22 +2943,6 @@ int stmmac_resume(struct net_device *ndev)
return 0;
}
-
-int stmmac_freeze(struct net_device *ndev)
-{
- if (!ndev || !netif_running(ndev))
- return 0;
-
- return stmmac_release(ndev);
-}
-
-int stmmac_restore(struct net_device *ndev)
-{
- if (!ndev || !netif_running(ndev))
- return 0;
-
- return stmmac_open(ndev);
-}
#endif /* CONFIG_PM */
/* Driver can be configured w/ and w/ both PCI and Platf drivers
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index fe7bc9903867..a468eb107823 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -128,7 +128,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
* @bus: points to the mii_bus structure
* Description: reset the MII bus
*/
-static int stmmac_mdio_reset(struct mii_bus *bus)
+int stmmac_mdio_reset(struct mii_bus *bus)
{
#if defined(CONFIG_STMMAC_PLATFORM)
struct net_device *ndev = bus->priv;
@@ -166,7 +166,6 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
udelay(data->delays[1]);
gpio_set_value(reset_gpio, active_low ? 1 : 0);
udelay(data->delays[2]);
- gpio_free(reset_gpio);
}
}
#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 644d80ece067..291608924849 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -26,9 +26,9 @@
#include <linux/pci.h>
#include "stmmac.h"
-struct plat_stmmacenet_data plat_dat;
-struct stmmac_mdio_bus_data mdio_data;
-struct stmmac_dma_cfg dma_cfg;
+static struct plat_stmmacenet_data plat_dat;
+static struct stmmac_mdio_bus_data mdio_data;
+static struct stmmac_dma_cfg dma_cfg;
static void stmmac_default_data(void)
{
@@ -100,9 +100,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
stmmac_default_data();
priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr);
- if (!priv) {
+ if (IS_ERR(priv)) {
pr_err("%s: main driver probe failed", __func__);
- ret = -ENODEV;
+ ret = PTR_ERR(priv);
goto err_out;
}
priv->dev->irq = pdev->irq;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 51c9069ef405..5884a7d2063b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -26,8 +26,23 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/of_device.h>
#include "stmmac.h"
+static const struct of_device_id stmmac_dt_ids[] = {
+#ifdef CONFIG_DWMAC_SUNXI
+ { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
+#endif
+ /* SoC specific glue layers should come before generic bindings */
+ { .compatible = "st,spear600-gmac"},
+ { .compatible = "snps,dwmac-3.610"},
+ { .compatible = "snps,dwmac-3.70a"},
+ { .compatible = "snps,dwmac-3.710"},
+ { .compatible = "snps,dwmac"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
+
#ifdef CONFIG_OF
static int stmmac_probe_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat,
@@ -35,23 +50,63 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
{
struct device_node *np = pdev->dev.of_node;
struct stmmac_dma_cfg *dma_cfg;
+ const struct of_device_id *device;
if (!np)
return -ENODEV;
+ device = of_match_device(stmmac_dt_ids, &pdev->dev);
+ if (!device)
+ return -ENODEV;
+
+ if (device->data) {
+ const struct stmmac_of_data *data = device->data;
+ plat->has_gmac = data->has_gmac;
+ plat->enh_desc = data->enh_desc;
+ plat->tx_coe = data->tx_coe;
+ plat->rx_coe = data->rx_coe;
+ plat->bugged_jumbo = data->bugged_jumbo;
+ plat->pmt = data->pmt;
+ plat->riwt_off = data->riwt_off;
+ plat->fix_mac_speed = data->fix_mac_speed;
+ plat->bus_setup = data->bus_setup;
+ plat->setup = data->setup;
+ plat->free = data->free;
+ plat->init = data->init;
+ plat->exit = data->exit;
+ }
+
*mac = of_get_mac_address(np);
plat->interface = of_get_phy_mode(np);
+ /* Get max speed of operation from device tree */
+ if (of_property_read_u32(np, "max-speed", &plat->max_speed))
+ plat->max_speed = -1;
+
plat->bus_id = of_alias_get_id(np, "ethernet");
if (plat->bus_id < 0)
plat->bus_id = 0;
- of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr);
+ /* Default to phy auto-detection */
+ plat->phy_addr = -1;
+
+ /* "snps,phy-addr" is not a standard property. Mark it as deprecated
+ * and warn of its use. Remove this when phy node support is added.
+ */
+ if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
+ dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
sizeof(struct stmmac_mdio_bus_data),
GFP_KERNEL);
+ plat->force_sf_dma_mode = of_property_read_bool(np, "snps,force_sf_dma_mode");
+
+ /* Set the maxmtu to a default of JUMBO_LEN in case the
+ * parameter is not present in the device tree.
+ */
+ plat->maxmtu = JUMBO_LEN;
+
/*
* Currently only the properties needed on SPEAr600
* are provided. All other properties should be added
@@ -60,6 +115,14 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
if (of_device_is_compatible(np, "st,spear600-gmac") ||
of_device_is_compatible(np, "snps,dwmac-3.70a") ||
of_device_is_compatible(np, "snps,dwmac")) {
+ /* Note that the max-frame-size parameter as defined in the
+ * ePAPR v1.1 spec is defined as max-frame-size, it's
+ * actually used as the IEEE definition of MAC Client
+ * data, or MTU. The ePAPR specification is confusing as
+ * the definition is max-frame-size, but usage examples
+ * are clearly MTUs
+ */
+ of_property_read_u32(np, "max-frame-size", &plat->maxmtu);
plat->has_gmac = 1;
plat->pmt = 1;
}
@@ -140,17 +203,24 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
}
}
+ /* Custom setup (if needed) */
+ if (plat_dat->setup) {
+ plat_dat->bsp_priv = plat_dat->setup(pdev);
+ if (IS_ERR(plat_dat->bsp_priv))
+ return PTR_ERR(plat_dat->bsp_priv);
+ }
+
/* Custom initialisation (if needed)*/
if (plat_dat->init) {
- ret = plat_dat->init(pdev);
+ ret = plat_dat->init(pdev, plat_dat->bsp_priv);
if (unlikely(ret))
return ret;
}
priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
- if (!priv) {
+ if (IS_ERR(priv)) {
pr_err("%s: main driver probe failed", __func__);
- return -ENODEV;
+ return PTR_ERR(priv);
}
/* Get MAC address if available (DT) */
@@ -199,7 +269,10 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
int ret = stmmac_dvr_remove(ndev);
if (priv->plat->exit)
- priv->plat->exit(pdev);
+ priv->plat->exit(pdev, priv->plat->bsp_priv);
+
+ if (priv->plat->free)
+ priv->plat->free(pdev, priv->plat->bsp_priv);
return ret;
}
@@ -207,64 +280,34 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int stmmac_pltfr_suspend(struct device *dev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
-
- return stmmac_suspend(ndev);
-}
-
-static int stmmac_pltfr_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
-
- return stmmac_resume(ndev);
-}
-
-int stmmac_pltfr_freeze(struct device *dev)
-{
int ret;
- struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
struct platform_device *pdev = to_platform_device(dev);
- ret = stmmac_freeze(ndev);
- if (plat_dat->exit)
- plat_dat->exit(pdev);
+ ret = stmmac_suspend(ndev);
+ if (priv->plat->exit)
+ priv->plat->exit(pdev, priv->plat->bsp_priv);
return ret;
}
-int stmmac_pltfr_restore(struct device *dev)
+static int stmmac_pltfr_resume(struct device *dev)
{
- struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
struct platform_device *pdev = to_platform_device(dev);
- if (plat_dat->init)
- plat_dat->init(pdev);
+ if (priv->plat->init)
+ priv->plat->init(pdev, priv->plat->bsp_priv);
- return stmmac_restore(ndev);
+ return stmmac_resume(ndev);
}
-static const struct dev_pm_ops stmmac_pltfr_pm_ops = {
- .suspend = stmmac_pltfr_suspend,
- .resume = stmmac_pltfr_resume,
- .freeze = stmmac_pltfr_freeze,
- .thaw = stmmac_pltfr_restore,
- .restore = stmmac_pltfr_restore,
-};
-#else
-static const struct dev_pm_ops stmmac_pltfr_pm_ops;
#endif /* CONFIG_PM */
-static const struct of_device_id stmmac_dt_ids[] = {
- { .compatible = "st,spear600-gmac"},
- { .compatible = "snps,dwmac-3.610"},
- { .compatible = "snps,dwmac-3.70a"},
- { .compatible = "snps,dwmac-3.710"},
- { .compatible = "snps,dwmac"},
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
+static SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops,
+ stmmac_pltfr_suspend, stmmac_pltfr_resume);
struct platform_driver stmmac_pltfr_driver = {
.probe = stmmac_pltfr_probe,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index b8b0eeed0f92..7680581ebe12 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -56,7 +56,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
priv->hw->ptp->config_addend(priv->ioaddr, addend);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
}
@@ -91,7 +91,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index b4d50d74ba18..df8d383acf48 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
- * 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* This driver uses the sungem driver (c) David Miller
* (davem@redhat.com) as its basis.
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
index b361424d5f57..882ce168a799 100644
--- a/drivers/net/ethernet/sun/cassini.h
+++ b/drivers/net/ethernet/sun/cassini.h
@@ -15,9 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
- * 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* vendor id: 0x108E (Sun Microsystems, Inc.)
* device id: 0xabba (Cassini)
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 388540fcb977..8e2266e1f260 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3493,10 +3493,12 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
rh = (struct rx_pkt_hdr1 *) skb->data;
if (np->dev->features & NETIF_F_RXHASH)
- skb->rxhash = ((u32)rh->hashval2_0 << 24 |
- (u32)rh->hashval2_1 << 16 |
- (u32)rh->hashval1_1 << 8 |
- (u32)rh->hashval1_2 << 0);
+ skb_set_hash(skb,
+ ((u32)rh->hashval2_0 << 24 |
+ (u32)rh->hashval2_1 << 16 |
+ (u32)rh->hashval1_1 << 8 |
+ (u32)rh->hashval1_2 << 0),
+ PKT_HASH_TYPE_L3);
skb_pull(skb, sizeof(*rh));
rp->rx_packets++;
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 7217ee5d6273..206c1063815a 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -13,7 +13,6 @@
#include <linux/in.h>
#include <linux/string.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/errno.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index b5655b79bd3b..c2799dc46325 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -24,7 +24,6 @@
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 3df56840a3b9..1c24a8f368bd 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -751,7 +751,7 @@ static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
struct vnet_mcast_entry *m;
for (m = vp->mcast_list; m; m = m->next) {
- if (!memcmp(m->addr, addr, ETH_ALEN))
+ if (ether_addr_equal(m->addr, addr))
return m;
}
return NULL;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 4f1d2549130e..2ead87759ab4 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1764,7 +1764,7 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
/* We reclaimed resources, so in case the Q is stopped by xmit callback,
- * we resume the transmition and use tx_lock to synchronize with xmit.*/
+ * we resume the transmission and use tx_lock to synchronize with xmit.*/
spin_lock(&priv->tx_lock);
priv->tx_level += tx_level;
BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 2dc16b6efaf0..73f74f369437 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -17,7 +17,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 5120d9ce1dd4..1d860ce914ed 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -541,14 +541,93 @@ static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
return slave_num;
}
+static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_ale *ale = priv->ale;
+ int i;
+
+ if (priv->data.dual_emac) {
+ bool flag = false;
+
+ /* Enabling promiscuous mode for one interface will be
+ * common for both the interface as the interface shares
+ * the same hardware resource.
+ */
+ for (i = 0; i <= priv->data.slaves; i++)
+ if (priv->slaves[i].ndev->flags & IFF_PROMISC)
+ flag = true;
+
+ if (!enable && flag) {
+ enable = true;
+ dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
+ }
+
+ if (enable) {
+ /* Enable Bypass */
+ cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
+
+ dev_dbg(&ndev->dev, "promiscuity enabled\n");
+ } else {
+ /* Disable Bypass */
+ cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
+ dev_dbg(&ndev->dev, "promiscuity disabled\n");
+ }
+ } else {
+ if (enable) {
+ unsigned long timeout = jiffies + HZ;
+
+ /* Disable Learn for all ports */
+ for (i = 0; i <= priv->data.slaves; i++) {
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NOLEARN, 1);
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NO_SA_UPDATE, 1);
+ }
+
+ /* Clear All Untouched entries */
+ cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+ do {
+ cpu_relax();
+ if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
+ break;
+ } while (time_after(timeout, jiffies));
+ cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+
+ /* Clear all mcast from ALE */
+ cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
+ priv->host_port);
+
+ /* Flood All Unicast Packets to Host port */
+ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
+ dev_dbg(&ndev->dev, "promiscuity enabled\n");
+ } else {
+ /* Flood All Unicast Packets to Host port */
+ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
+
+ /* Enable Learn for all ports */
+ for (i = 0; i <= priv->data.slaves; i++) {
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NOLEARN, 0);
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NO_SA_UPDATE, 0);
+ }
+ dev_dbg(&ndev->dev, "promiscuity disabled\n");
+ }
+ }
+}
+
static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
if (ndev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */
- dev_err(priv->dev, "Ignoring Promiscuous mode\n");
+ cpsw_set_promiscious(ndev, true);
return;
+ } else {
+ /* Disable promiscuous mode */
+ cpsw_set_promiscious(ndev, false);
}
/* Clear all mcast from ALE */
@@ -582,7 +661,7 @@ static void cpsw_intr_disable(struct cpsw_priv *priv)
return;
}
-void cpsw_tx_handler(void *token, int len, int status)
+static void cpsw_tx_handler(void *token, int len, int status)
{
struct sk_buff *skb = token;
struct net_device *ndev = skb->dev;
@@ -599,7 +678,7 @@ void cpsw_tx_handler(void *token, int len, int status)
dev_kfree_skb_any(skb);
}
-void cpsw_rx_handler(void *token, int len, int status)
+static void cpsw_rx_handler(void *token, int len, int status)
{
struct sk_buff *skb = token;
struct sk_buff *new_skb;
@@ -740,6 +819,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
/* set speed_in input in case RMII mode is used in 100Mbps */
if (phy->speed == 100)
mac_control |= BIT(15);
+ else if (phy->speed == 10)
+ mac_control |= BIT(18); /* In Band mode */
*link = true;
} else {
@@ -1255,29 +1336,6 @@ fail:
return NETDEV_TX_BUSY;
}
-static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
-{
- /*
- * The switch cannot operate in promiscuous mode without substantial
- * headache. For promiscuous mode to work, we would need to put the
- * ALE in bypass mode and route all traffic to the host port.
- * Subsequently, the host will need to operate as a "bridge", learn,
- * and flood as needed. For now, we simply complain here and
- * do nothing about it :-)
- */
- if ((flags & IFF_PROMISC) && (ndev->flags & IFF_PROMISC))
- dev_err(&ndev->dev, "promiscuity ignored!\n");
-
- /*
- * The switch cannot filter multicast traffic unless it is configured
- * in "VLAN Aware" mode. Unfortunately, VLAN awareness requires a
- * whole bunch of additional logic that this driver does not implement
- * at present.
- */
- if ((flags & IFF_ALLMULTI) && !(ndev->flags & IFF_ALLMULTI))
- dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n");
-}
-
#ifdef CONFIG_TI_CPTS
static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
@@ -1329,7 +1387,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
__raw_writel(ETH_P_1588, &priv->regs->ts_ltype);
}
-static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct cpsw_priv *priv = netdev_priv(dev);
struct cpts *cpts = priv->cpts;
@@ -1390,6 +1448,24 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
+static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpts *cpts = priv->cpts;
+ struct hwtstamp_config cfg;
+
+ if (priv->version != CPSW_VERSION_1 &&
+ priv->version != CPSW_VERSION_2)
+ return -EOPNOTSUPP;
+
+ cfg.flags = 0;
+ cfg.tx_type = cpts->tx_enable ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = (cpts->rx_enable ?
+ HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
#endif /*CONFIG_TI_CPTS*/
static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
@@ -1404,7 +1480,9 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
switch (cmd) {
#ifdef CONFIG_TI_CPTS
case SIOCSHWTSTAMP:
- return cpsw_hwtstamp_ioctl(dev, req);
+ return cpsw_hwtstamp_set(dev, req);
+ case SIOCGHWTSTAMP:
+ return cpsw_hwtstamp_get(dev, req);
#endif
case SIOCGMIIPHY:
data->phy_id = priv->slaves[slave_no].phy->addr;
@@ -1553,7 +1631,6 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
.ndo_start_xmit = cpsw_ndo_start_xmit,
- .ndo_change_rx_flags = cpsw_ndo_change_rx_flags,
.ndo_set_mac_address = cpsw_ndo_set_mac_address,
.ndo_do_ioctl = cpsw_ndo_ioctl,
.ndo_validate_addr = eth_validate_addr,
@@ -1801,8 +1878,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
phyid = be32_to_cpup(parp+1);
mdio = of_find_device_by_node(mdio_node);
- snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
- PHY_ID_FMT, mdio->name, phyid);
+
+ if (strncmp(mdio->name, "gpio", 4) == 0) {
+ /* GPIO bitbang MDIO driver attached */
+ struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
+
+ snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+ PHY_ID_FMT, bus->id, phyid);
+ } else {
+ /* davinci MDIO driver attached */
+ snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+ PHY_ID_FMT, mdio->name, phyid);
+ }
mac_addr = of_get_mac_address(slave_node);
if (mac_addr)
@@ -2106,7 +2193,7 @@ static int cpsw_probe(struct platform_device *pdev)
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
- dev_name(priv->dev), priv)) {
+ dev_name(&pdev->dev), priv)) {
dev_err(priv->dev, "error attaching irq\n");
goto clean_ale_ret;
}
@@ -2135,8 +2222,8 @@ static int cpsw_probe(struct platform_device *pdev)
data->cpts_clock_mult, data->cpts_clock_shift))
dev_err(priv->dev, "error registering cpts device\n");
- cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
- ss_res->start, ndev->irq);
+ cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
+ &ss_res->start, ndev->irq);
if (priv->data.dual_emac) {
ret = cpsw_probe_dual_emac(pdev, priv);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 7fa60d6092ed..7f893069c418 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -163,7 +163,7 @@ int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
if (cpsw_ale_get_vlan_id(ale_entry) != vid)
continue;
cpsw_ale_get_addr(ale_entry, entry_addr);
- if (memcmp(entry_addr, addr, 6) == 0)
+ if (ether_addr_equal(entry_addr, addr))
return idx;
}
return -ENOENT;
@@ -477,6 +477,14 @@ static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
.port_shift = 0,
.bits = 1,
},
+ [ALE_P0_UNI_FLOOD] = {
+ .name = "port0_unicast_flood",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 8,
+ .port_shift = 0,
+ .bits = 1,
+ },
[ALE_VLAN_NOLEARN] = {
.name = "vlan_nolearn",
.offset = ALE_CONTROL,
@@ -573,6 +581,14 @@ static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
.port_shift = 0,
.bits = 1,
},
+ [ALE_PORT_NO_SA_UPDATE] = {
+ .name = "no_source_update",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 5,
+ .port_shift = 0,
+ .bits = 1,
+ },
[ALE_PORT_MCAST_LIMIT] = {
.name = "mcast_limit",
.offset = ALE_PORTCTL,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 30daa1265f0c..de409c33b250 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -34,6 +34,7 @@ enum cpsw_ale_control {
ALE_ENABLE,
ALE_CLEAR,
ALE_AGEOUT,
+ ALE_P0_UNI_FLOOD,
ALE_VLAN_NOLEARN,
ALE_NO_PORT_VLAN,
ALE_OUI_DENY,
@@ -47,6 +48,7 @@ enum cpsw_ale_control {
ALE_PORT_DROP_UNTAGGED,
ALE_PORT_DROP_UNKNOWN_VLAN,
ALE_PORT_NOLEARN,
+ ALE_PORT_NO_SA_UPDATE,
ALE_PORT_UNKNOWN_VLAN_MEMBER,
ALE_PORT_UNKNOWN_MCAST_FLOOD,
ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 90a79462c869..364d0c7952c0 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -81,7 +81,7 @@ struct cpdma_desc {
};
struct cpdma_desc_pool {
- u32 phys;
+ phys_addr_t phys;
u32 hw_addr;
void __iomem *iomap; /* ioremap map */
void *cpumap; /* dma_alloc map */
@@ -219,8 +219,7 @@ static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
{
if (!desc)
return 0;
- return pool->hw_addr + (__force dma_addr_t)desc -
- (__force dma_addr_t)pool->iomap;
+ return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
}
static inline struct cpdma_desc __iomem *
@@ -972,7 +971,7 @@ struct cpdma_control_info {
#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
};
-struct cpdma_control_info controls[] = {
+static struct cpdma_control_info controls[] = {
[CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
[CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
[CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 4ec92659a100..0cca9dec5d82 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -82,7 +82,7 @@ struct davinci_mdio_regs {
} user[0];
};
-struct mdio_platform_data default_pdata = {
+static const struct mdio_platform_data default_pdata = {
.bus_freq = DEF_OUT_FREQ,
};
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index 4083ba8839e1..f59a6c265331 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -9,20 +9,10 @@ config TILE_NET
select CRC32
select TILE_GXIO_MPIPE if TILEGX
select HIGH_RES_TIMERS if TILEGX
+ select PTP_1588_CLOCK if TILEGX
---help---
This is a standard Linux network device driver for the
on-chip Tilera Gigabit Ethernet and XAUI interfaces.
To compile this driver as a module, choose M here: the module
will be called tile_net.
-
-config PTP_1588_CLOCK_TILEGX
- tristate "Tilera TILE-Gx mPIPE as PTP clock"
- select PTP_1588_CLOCK
- depends on TILE_NET
- depends on TILEGX
- ---help---
- This driver adds support for using the mPIPE as a PTP
- clock. This clock is only useful if your PTP programs are
- getting hardware time stamps on the PTP Ethernet packets
- using the SO_TIMESTAMPING API.
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 628b736e5ae7..023237a65720 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -187,10 +187,8 @@ struct tile_net_priv {
int echannel;
/* mPIPE instance, 0 or 1. */
int instance;
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
/* The timestamp config. */
struct hwtstamp_config stamp_cfg;
-#endif
};
static struct mpipe_data {
@@ -229,14 +227,12 @@ static struct mpipe_data {
int first_bucket;
int num_buckets;
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
/* PTP-specific data. */
struct ptp_clock *ptp_clock;
struct ptp_clock_info caps;
/* Lock for ptp accessors. */
struct mutex ptp_lock;
-#endif
} mpipe_data[NR_MPIPE_MAX] = {
[0 ... (NR_MPIPE_MAX - 1)] {
@@ -451,20 +447,17 @@ static void tile_net_provide_needed_buffers(void)
static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb,
gxio_mpipe_idesc_t *idesc)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) {
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec,
idesc->time_stamp_ns);
}
-#endif
}
/* Get TX timestamp, and store it in the skb. */
static void tile_tx_timestamp(struct sk_buff *skb, int instance)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
struct skb_shared_info *shtx = skb_shinfo(skb);
if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
struct mpipe_data *md = &mpipe_data[instance];
@@ -477,14 +470,11 @@ static void tile_tx_timestamp(struct sk_buff *skb, int instance)
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
skb_tstamp_tx(skb, &shhwtstamps);
}
-#endif
}
/* Use ioctl() to enable or disable TX or RX timestamping. */
-static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq,
- int cmd)
+static int tile_hwtstamp_set(struct net_device *dev, struct ifreq *rq)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
struct hwtstamp_config config;
struct tile_net_priv *priv = netdev_priv(dev);
@@ -530,9 +520,17 @@ static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq,
priv->stamp_cfg = config;
return 0;
-#else
- return -EOPNOTSUPP;
-#endif
+}
+
+static int tile_hwtstamp_get(struct net_device *dev, struct ifreq *rq)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ if (copy_to_user(rq->ifr_data, &priv->stamp_cfg,
+ sizeof(priv->stamp_cfg)))
+ return -EFAULT;
+
+ return 0;
}
static inline bool filter_packet(struct net_device *dev, void *buf)
@@ -814,8 +812,6 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
return HRTIMER_NORESTART;
}
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
-
/* PTP clock operations. */
static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
@@ -882,12 +878,9 @@ static struct ptp_clock_info ptp_mpipe_caps = {
.enable = ptp_mpipe_enable,
};
-#endif /* CONFIG_PTP_1588_CLOCK_TILEGX */
-
/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
struct timespec ts;
getnstimeofday(&ts);
@@ -899,16 +892,13 @@ static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
if (IS_ERR(md->ptp_clock))
netdev_err(dev, "ptp_clock_register failed %ld\n",
PTR_ERR(md->ptp_clock));
-#endif
}
/* Initialize PTP fields in a new device. */
static void init_ptp_dev(struct tile_net_priv *priv)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
-#endif
}
/* Helper functions for "tile_net_update()". */
@@ -2080,7 +2070,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
}
/* Return subqueue id on this core (one per core). */
-static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
return smp_processor_id();
}
@@ -2098,7 +2089,9 @@ static void tile_net_tx_timeout(struct net_device *dev)
static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
if (cmd == SIOCSHWTSTAMP)
- return tile_hwtstamp_ioctl(dev, rq, cmd);
+ return tile_hwtstamp_set(dev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return tile_hwtstamp_get(dev, rq);
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index f7f2ef49c0c1..d899d0072ae0 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1739,12 +1739,14 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
GELIC_CARD_PORT_STATUS_CHANGED;
- if (gelic_card_init_chain(card, &card->tx_chain,
- card->descr, GELIC_NET_TX_DESCRIPTORS))
+ result = gelic_card_init_chain(card, &card->tx_chain,
+ card->descr, GELIC_NET_TX_DESCRIPTORS);
+ if (result)
goto fail_alloc_tx;
- if (gelic_card_init_chain(card, &card->rx_chain,
- card->descr + GELIC_NET_TX_DESCRIPTORS,
- GELIC_NET_RX_DESCRIPTORS))
+ result = gelic_card_init_chain(card, &card->rx_chain,
+ card->descr + GELIC_NET_TX_DESCRIPTORS,
+ GELIC_NET_RX_DESCRIPTORS);
+ if (result)
goto fail_alloc_rx;
/* head of chain */
@@ -1754,7 +1756,8 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
card->rx_top, card->tx_top, sizeof(struct gelic_descr),
GELIC_NET_RX_DESCRIPTORS);
/* allocate rx skbs */
- if (gelic_card_alloc_rx_skbs(card))
+ result = gelic_card_alloc_rx_skbs(card);
+ if (result)
goto fail_alloc_skbs;
spin_lock_init(&card->tx_lock);
@@ -1772,7 +1775,8 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
}
#ifdef CONFIG_GELIC_WIRELESS
- if (gelic_wl_driver_probe(card)) {
+ result = gelic_wl_driver_probe(card);
+ if (result) {
dev_dbg(&dev->core, "%s: WL init failed\n", __func__);
goto fail_setup_netdev;
}
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 1322546d92ac..88e9c73cebc0 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -38,7 +38,6 @@ static const char *version = "tc35815.c:v" DRV_VERSION "\n";
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -1170,19 +1169,12 @@ static int tc35815_tx_full(struct net_device *dev)
static void tc35815_restart(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
+ int ret;
if (lp->phy_dev) {
- int timeout;
-
- phy_write(lp->phy_dev, MII_BMCR, BMCR_RESET);
- timeout = 100;
- while (--timeout) {
- if (!(phy_read(lp->phy_dev, MII_BMCR) & BMCR_RESET))
- break;
- udelay(1);
- }
- if (!timeout)
- printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name);
+ ret = phy_init_hw(lp->phy_dev);
+ if (ret)
+ printk(KERN_ERR "%s: PHY init failed.\n", dev->name);
}
spin_lock_bh(&lp->rx_lock);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index c4dbf981804b..47eeb3abf7f7 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -32,7 +32,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/net.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.h b/drivers/net/ethernet/tundra/tsi108_eth.h
index 5fee7d78dc6d..4a03c594b2b1 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.h
+++ b/drivers/net/ethernet/tundra/tsi108_eth.h
@@ -16,9 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index cce6c4bc556a..ef312bc6b865 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1618,6 +1618,7 @@ static void rhine_reset_task(struct work_struct *work)
goto out_unlock;
napi_disable(&rp->napi);
+ netif_tx_disable(dev);
spin_lock_bh(&rp->lock);
/* clear all descriptors */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 2166e879a096..a4347508031c 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -29,7 +29,6 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index f9293da19e26..1ec65feebb9e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -22,7 +22,6 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of_mdio.h>
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index fefb8cd5eb65..36052b98b3fc 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/uaccess.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index bdd20b888cf6..7c81ffb861e8 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -27,8 +27,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
* ALTERNATIVELY, this driver may be distributed under the terms of
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index bcc224a83734..25283f17d82f 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -373,7 +373,7 @@ static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
__raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
}
-static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
{
struct hwtstamp_config cfg;
struct ixp46x_ts_regs *regs;
@@ -417,6 +417,32 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
+static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config cfg;
+ struct port *port = netdev_priv(netdev);
+
+ cfg.flags = 0;
+ cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+
+ switch (port->hwts_rx_en) {
+ case 0:
+ cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ case PTP_SLAVE_MODE:
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ break;
+ case PTP_MASTER_MODE:
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -ERANGE;
+ }
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
int write, u16 cmd)
{
@@ -959,8 +985,12 @@ static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
if (!netif_running(dev))
return -EINVAL;
- if (cpu_is_ixp46x() && cmd == SIOCSHWTSTAMP)
- return hwtstamp_ioctl(dev, req, cmd);
+ if (cpu_is_ixp46x()) {
+ if (cmd == SIOCSHWTSTAMP)
+ return hwtstamp_set(dev, req);
+ if (cmd == SIOCGHWTSTAMP)
+ return hwtstamp_get(dev, req);
+ }
return phy_mii_ioctl(port->phydev, req, cmd);
}
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 0b40e1c46f07..eb78203cd58e 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -206,7 +206,6 @@
#include <linux/eisa.h>
#include <linux/errno.h>
#include <linux/fddidevice.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
@@ -241,12 +240,6 @@ static char version[] =
*/
#define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
-#ifdef CONFIG_PCI
-#define DFX_BUS_PCI(dev) (dev->bus == &pci_bus_type)
-#else
-#define DFX_BUS_PCI(dev) 0
-#endif
-
#ifdef CONFIG_EISA
#define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
#else
@@ -436,7 +429,7 @@ static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
static void dfx_get_bars(struct device *bdev,
resource_size_t *bar_start, resource_size_t *bar_len)
{
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
@@ -518,7 +511,7 @@ static const struct net_device_ops dfx_netdev_ops = {
static int dfx_register(struct device *bdev)
{
static int version_disp;
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
const char *print_name = dev_name(bdev);
@@ -667,7 +660,7 @@ static void dfx_bus_init(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
@@ -813,7 +806,7 @@ static void dfx_bus_uninit(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
u8 val;
@@ -967,7 +960,7 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
{
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
@@ -1877,7 +1870,7 @@ static irqreturn_t dfx_interrupt(int irq, void *dev_id)
struct net_device *dev = dev_id;
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
@@ -3579,7 +3572,7 @@ static void dfx_unregister(struct device *bdev)
{
struct net_device *dev = dev_get_drvdata(bdev);
DFX_board_t *bp = netdev_priv(dev);
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
resource_size_t bar_start = 0; /* pointer to port */
diff --git a/drivers/net/fddi/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c
index f83993590174..7d3779ae7377 100644
--- a/drivers/net/fddi/skfp/fplustm.c
+++ b/drivers/net/fddi/skfp/fplustm.c
@@ -23,6 +23,7 @@
#include "h/smc.h"
#include "h/supern_2.h"
#include <linux/bitrev.h>
+#include <linux/etherdevice.h>
#ifndef lint
static const char ID_sccs[] = "@(#)fplustm.c 1.32 99/02/23 (C) SK " ;
@@ -55,14 +56,14 @@ static char cam_warning [] = "E_SMT_004: CAM still busy\n";
#define DUMMY_READ() smc->hw.mc_dummy = (u_short) inp(ADDR(B0_RAP))
-#define CHECK_NPP() { unsigned k = 10000 ;\
+#define CHECK_NPP() { unsigned int k = 10000 ;\
while ((inpw(FM_A(FM_STMCHN)) & FM_SNPPND) && k) k--;\
if (!k) { \
SMT_PANIC(smc,SMT_E0130, SMT_E0130_MSG) ; \
} \
}
-#define CHECK_CAM() { unsigned k = 10 ;\
+#define CHECK_CAM() { unsigned int k = 10 ;\
while (!(inpw(FM_A(FM_AFSTAT)) & FM_DONE) && k) k--;\
if (!k) { \
SMT_PANIC(smc,SMT_E0131, SMT_E0131_MSG) ; \
@@ -356,25 +357,25 @@ static void set_formac_addr(struct s_smc *smc)
long t_requ = smc->mib.m[MAC0].fddiMACT_Req ;
outpw(FM_A(FM_SAID),my_said) ; /* set short address */
- outpw(FM_A(FM_LAIL),(unsigned)((smc->hw.fddi_home_addr.a[4]<<8) +
+ outpw(FM_A(FM_LAIL),(unsigned short)((smc->hw.fddi_home_addr.a[4]<<8) +
smc->hw.fddi_home_addr.a[5])) ;
- outpw(FM_A(FM_LAIC),(unsigned)((smc->hw.fddi_home_addr.a[2]<<8) +
+ outpw(FM_A(FM_LAIC),(unsigned short)((smc->hw.fddi_home_addr.a[2]<<8) +
smc->hw.fddi_home_addr.a[3])) ;
- outpw(FM_A(FM_LAIM),(unsigned)((smc->hw.fddi_home_addr.a[0]<<8) +
+ outpw(FM_A(FM_LAIM),(unsigned short)((smc->hw.fddi_home_addr.a[0]<<8) +
smc->hw.fddi_home_addr.a[1])) ;
outpw(FM_A(FM_SAGP),my_sagp) ; /* set short group address */
- outpw(FM_A(FM_LAGL),(unsigned)((smc->hw.fp.group_addr.a[4]<<8) +
+ outpw(FM_A(FM_LAGL),(unsigned short)((smc->hw.fp.group_addr.a[4]<<8) +
smc->hw.fp.group_addr.a[5])) ;
- outpw(FM_A(FM_LAGC),(unsigned)((smc->hw.fp.group_addr.a[2]<<8) +
+ outpw(FM_A(FM_LAGC),(unsigned short)((smc->hw.fp.group_addr.a[2]<<8) +
smc->hw.fp.group_addr.a[3])) ;
- outpw(FM_A(FM_LAGM),(unsigned)((smc->hw.fp.group_addr.a[0]<<8) +
+ outpw(FM_A(FM_LAGM),(unsigned short)((smc->hw.fp.group_addr.a[0]<<8) +
smc->hw.fp.group_addr.a[1])) ;
/* set r_request regs. (MSW & LSW of TRT ) */
- outpw(FM_A(FM_TREQ1),(unsigned)(t_requ>>16)) ;
- outpw(FM_A(FM_TREQ0),(unsigned)t_requ) ;
+ outpw(FM_A(FM_TREQ1),(unsigned short)(t_requ>>16)) ;
+ outpw(FM_A(FM_TREQ0),(unsigned short)t_requ) ;
}
static void set_int(char *p, int l)
@@ -394,10 +395,10 @@ static void set_int(char *p, int l)
* append 'end of chain' pointer
*/
static void copy_tx_mac(struct s_smc *smc, u_long td, struct fddi_mac *mac,
- unsigned off, int len)
+ unsigned int off, int len)
/* u_long td; transmit descriptor */
/* struct fddi_mac *mac; mac frame pointer */
-/* unsigned off; start address within buffer memory */
+/* unsigned int off; start address within buffer memory */
/* int len ; length of the frame including the FC */
{
int i ;
@@ -1082,7 +1083,7 @@ static struct s_fpmc* mac_get_mc_table(struct s_smc *smc,
slot = tb ;
continue ;
}
- if (memcmp((char *)&tb->a,(char *)own,6))
+ if (!ether_addr_equal((char *)&tb->a, (char *)own))
continue ;
return tb;
}
diff --git a/drivers/net/fddi/skfp/h/supern_2.h b/drivers/net/fddi/skfp/h/supern_2.h
index 0b73690280f6..4ee360d2dc62 100644
--- a/drivers/net/fddi/skfp/h/supern_2.h
+++ b/drivers/net/fddi/skfp/h/supern_2.h
@@ -92,33 +92,33 @@
union rx_descr {
struct {
#ifdef LITTLE_ENDIAN
- unsigned rx_length :16 ; /* frame length lower/upper byte */
- unsigned rx_erfbb :2 ; /* received frame byte boundary */
- unsigned rx_reserv2:2 ; /* reserved */
- unsigned rx_sfrmty :3 ; /* frame type bits */
- unsigned rx_sadrrg :1 ; /* DA == MA or broad-/multicast */
- unsigned rx_sfrmerr:1 ; /* received frame not valid */
- unsigned rx_seac0 :1 ; /* frame-copied C-indicator */
- unsigned rx_seac1 :1 ; /* address-match A-indicator */
- unsigned rx_seac2 :1 ; /* frame-error E-indicator */
- unsigned rx_ssrcrtg:1 ; /* == 1 SA has MSB set */
- unsigned rx_reserv1:1 ; /* reserved */
- unsigned rx_msrabt :1 ; /* memory status receive abort */
- unsigned rx_msvalid:1 ; /* memory status valid */
+ unsigned int rx_length :16 ; /* frame length lower/upper byte */
+ unsigned int rx_erfbb :2 ; /* received frame byte boundary */
+ unsigned int rx_reserv2:2 ; /* reserved */
+ unsigned int rx_sfrmty :3 ; /* frame type bits */
+ unsigned int rx_sadrrg :1 ; /* DA == MA or broad-/multicast */
+ unsigned int rx_sfrmerr:1 ; /* received frame not valid */
+ unsigned int rx_seac0 :1 ; /* frame-copied C-indicator */
+ unsigned int rx_seac1 :1 ; /* address-match A-indicator */
+ unsigned int rx_seac2 :1 ; /* frame-error E-indicator */
+ unsigned int rx_ssrcrtg:1 ; /* == 1 SA has MSB set */
+ unsigned int rx_reserv1:1 ; /* reserved */
+ unsigned int rx_msrabt :1 ; /* memory status receive abort */
+ unsigned int rx_msvalid:1 ; /* memory status valid */
#else
- unsigned rx_msvalid:1 ; /* memory status valid */
- unsigned rx_msrabt :1 ; /* memory status receive abort */
- unsigned rx_reserv1:1 ; /* reserved */
- unsigned rx_ssrcrtg:1 ; /* == 1 SA has MSB set */
- unsigned rx_seac2 :1 ; /* frame-error E-indicator */
- unsigned rx_seac1 :1 ; /* address-match A-indicator */
- unsigned rx_seac0 :1 ; /* frame-copied C-indicator */
- unsigned rx_sfrmerr:1 ; /* received frame not valid */
- unsigned rx_sadrrg :1 ; /* DA == MA or broad-/multicast */
- unsigned rx_sfrmty :3 ; /* frame type bits */
- unsigned rx_erfbb :2 ; /* received frame byte boundary */
- unsigned rx_reserv2:2 ; /* reserved */
- unsigned rx_length :16 ; /* frame length lower/upper byte */
+ unsigned int rx_msvalid:1 ; /* memory status valid */
+ unsigned int rx_msrabt :1 ; /* memory status receive abort */
+ unsigned int rx_reserv1:1 ; /* reserved */
+ unsigned int rx_ssrcrtg:1 ; /* == 1 SA has MSB set */
+ unsigned int rx_seac2 :1 ; /* frame-error E-indicator */
+ unsigned int rx_seac1 :1 ; /* address-match A-indicator */
+ unsigned int rx_seac0 :1 ; /* frame-copied C-indicator */
+ unsigned int rx_sfrmerr:1 ; /* received frame not valid */
+ unsigned int rx_sadrrg :1 ; /* DA == MA or broad-/multicast */
+ unsigned int rx_sfrmty :3 ; /* frame type bits */
+ unsigned int rx_erfbb :2 ; /* received frame byte boundary */
+ unsigned int rx_reserv2:2 ; /* reserved */
+ unsigned int rx_length :16 ; /* frame length lower/upper byte */
#endif
} r ;
long i ;
@@ -162,23 +162,23 @@ union rx_descr {
union tx_descr {
struct {
#ifdef LITTLE_ENDIAN
- unsigned tx_length:16 ; /* frame length lower/upper byte */
- unsigned tx_res :8 ; /* reserved (bit 16..23) */
- unsigned tx_xmtabt:1 ; /* transmit abort */
- unsigned tx_nfcs :1 ; /* no frame check sequence */
- unsigned tx_xdone :1 ; /* give up token */
- unsigned tx_rpxm :2 ; /* byte offset */
- unsigned tx_pat1 :2 ; /* must be TXP1 */
- unsigned tx_more :1 ; /* more frame in chain */
+ unsigned int tx_length:16 ; /* frame length lower/upper byte */
+ unsigned int tx_res :8 ; /* reserved (bit 16..23) */
+ unsigned int tx_xmtabt:1 ; /* transmit abort */
+ unsigned int tx_nfcs :1 ; /* no frame check sequence */
+ unsigned int tx_xdone :1 ; /* give up token */
+ unsigned int tx_rpxm :2 ; /* byte offset */
+ unsigned int tx_pat1 :2 ; /* must be TXP1 */
+ unsigned int tx_more :1 ; /* more frame in chain */
#else
- unsigned tx_more :1 ; /* more frame in chain */
- unsigned tx_pat1 :2 ; /* must be TXP1 */
- unsigned tx_rpxm :2 ; /* byte offset */
- unsigned tx_xdone :1 ; /* give up token */
- unsigned tx_nfcs :1 ; /* no frame check sequence */
- unsigned tx_xmtabt:1 ; /* transmit abort */
- unsigned tx_res :8 ; /* reserved (bit 16..23) */
- unsigned tx_length:16 ; /* frame length lower/upper byte */
+ unsigned int tx_more :1 ; /* more frame in chain */
+ unsigned int tx_pat1 :2 ; /* must be TXP1 */
+ unsigned int tx_rpxm :2 ; /* byte offset */
+ unsigned int tx_xdone :1 ; /* give up token */
+ unsigned int tx_nfcs :1 ; /* no frame check sequence */
+ unsigned int tx_xmtabt:1 ; /* transmit abort */
+ unsigned int tx_res :8 ; /* reserved (bit 16..23) */
+ unsigned int tx_length:16 ; /* frame length lower/upper byte */
#endif
} t ;
long i ;
@@ -202,13 +202,13 @@ union tx_descr {
union tx_pointer {
struct t {
#ifdef LITTLE_ENDIAN
- unsigned tp_pointer:16 ; /* pointer to tx_descr (low/high) */
- unsigned tp_res :8 ; /* reserved (bit 16..23) */
- unsigned tp_pattern:8 ; /* fixed pattern (bit 24..31) */
+ unsigned int tp_pointer:16 ; /* pointer to tx_descr (low/high) */
+ unsigned int tp_res :8 ; /* reserved (bit 16..23) */
+ unsigned int tp_pattern:8 ; /* fixed pattern (bit 24..31) */
#else
- unsigned tp_pattern:8 ; /* fixed pattern (bit 24..31) */
- unsigned tp_res :8 ; /* reserved (bit 16..23) */
- unsigned tp_pointer:16 ; /* pointer to tx_descr (low/high) */
+ unsigned int tp_pattern:8 ; /* fixed pattern (bit 24..31) */
+ unsigned int tp_res :8 ; /* reserved (bit 16..23) */
+ unsigned int tp_pointer:16 ; /* pointer to tx_descr (low/high) */
#endif
} t ;
long i ;
diff --git a/drivers/net/fddi/skfp/h/targetos.h b/drivers/net/fddi/skfp/h/targetos.h
index 53bacc107160..355194251ff8 100644
--- a/drivers/net/fddi/skfp/h/targetos.h
+++ b/drivers/net/fddi/skfp/h/targetos.h
@@ -48,7 +48,6 @@
#include <linux/fddidevice.h>
#include <linux/skbuff.h>
#include <linux/pci.h>
-#include <linux/init.h>
// is redefined by linux, but we need our definition
#undef ADDR
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 713d303a06a9..d5f58121b2e2 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -351,7 +351,6 @@ static void skfp_remove_one(struct pci_dev *pdev)
free_netdev(p);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
/*
diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c
index 08d94329c12f..9edada85ed02 100644
--- a/drivers/net/fddi/skfp/smt.c
+++ b/drivers/net/fddi/skfp/smt.c
@@ -900,7 +900,7 @@ static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason,
rdf->version.v_pad2 = 0 ;
/* set P13 */
- if ((unsigned) frame_len <= SMT_MAX_INFO_LEN - sizeof(*rdf) +
+ if ((unsigned int) frame_len <= SMT_MAX_INFO_LEN - sizeof(*rdf) +
2*sizeof(struct smt_header))
len = frame_len ;
else
diff --git a/drivers/net/fddi/skfp/srf.c b/drivers/net/fddi/skfp/srf.c
index f6f7baf9f27a..cc27dea3414e 100644
--- a/drivers/net/fddi/skfp/srf.c
+++ b/drivers/net/fddi/skfp/srf.c
@@ -73,7 +73,7 @@ void smt_init_evc(struct s_smc *smc)
{
struct s_srf_evc *evc ;
const struct evc_init *init ;
- int i ;
+ unsigned int i ;
int index ;
int offset ;
@@ -84,7 +84,7 @@ void smt_init_evc(struct s_smc *smc)
evc = smc->evcs ;
init = evc_inits ;
- for (i = 0 ; (unsigned) i < MAX_INIT_EVC ; i++) {
+ for (i = 0 ; i < MAX_INIT_EVC ; i++) {
for (index = 0 ; index < init->n ; index++) {
evc->evc_code = init->code ;
evc->evc_para = init->para ;
@@ -98,7 +98,7 @@ void smt_init_evc(struct s_smc *smc)
init++ ;
}
- if ((unsigned) (evc - smc->evcs) > MAX_EVCS) {
+ if ((unsigned int) (evc - smc->evcs) > MAX_EVCS) {
SMT_PANIC(smc,SMT_E0127, SMT_E0127_MSG) ;
}
@@ -139,7 +139,7 @@ void smt_init_evc(struct s_smc *smc)
offset++ ;
}
#ifdef DEBUG
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (SMT_IS_CONDITION(evc->evc_code)) {
if (!evc->evc_cond_state) {
SMT_PANIC(smc,SMT_E0128, SMT_E0128_MSG) ;
@@ -160,10 +160,10 @@ void smt_init_evc(struct s_smc *smc)
static struct s_srf_evc *smt_get_evc(struct s_smc *smc, int code, int index)
{
- int i ;
+ unsigned int i ;
struct s_srf_evc *evc ;
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (evc->evc_code == code && evc->evc_index == index)
return evc;
}
@@ -335,9 +335,9 @@ void smt_srf_event(struct s_smc *smc, int code, int index, int cond)
static void clear_all_rep(struct s_smc *smc)
{
struct s_srf_evc *evc ;
- int i ;
+ unsigned int i ;
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
evc->evc_rep_required = FALSE ;
if (SMT_IS_CONDITION(evc->evc_code))
*evc->evc_cond_state = FALSE ;
@@ -348,10 +348,10 @@ static void clear_all_rep(struct s_smc *smc)
static void clear_reported(struct s_smc *smc)
{
struct s_srf_evc *evc ;
- int i ;
+ unsigned int i ;
smc->srf.any_report = FALSE ;
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (SMT_IS_CONDITION(evc->evc_code)) {
if (*evc->evc_cond_state == FALSE)
evc->evc_rep_required = FALSE ;
@@ -375,7 +375,7 @@ static void smt_send_srf(struct s_smc *smc)
struct s_srf_evc *evc ;
SK_LOC_DECL(struct s_pcon,pcon) ;
SMbuf *mb ;
- int i ;
+ unsigned int i ;
static const struct fddi_addr SMT_SRF_DA = {
{ 0x80, 0x01, 0x43, 0x00, 0x80, 0x08 }
@@ -405,7 +405,7 @@ static void smt_send_srf(struct s_smc *smc)
smt_add_para(smc,&pcon,(u_short) SMT_P1033,0,0) ;
smt_add_para(smc,&pcon,(u_short) SMT_P1034,0,0) ;
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (evc->evc_rep_required) {
smt_add_para(smc,&pcon,evc->evc_para,
(int)evc->evc_index,0) ;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 1450e33fc250..66e2b19ef709 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -662,7 +662,8 @@ static int sixpack_open(struct tty_struct *tty)
tty->receive_room = 65536;
/* Now we're ready to register. */
- if (register_netdev(dev))
+ err = register_netdev(dev);
+ if (err)
goto out_free;
tnc_init(sp);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index f91bf0ddf031..d50b23cf9ea9 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -208,7 +208,7 @@ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
eth = eth_hdr(skb);
if (!(bpq->acpt_addr[0] & 0x01) &&
- memcmp(eth->h_source, bpq->acpt_addr, ETH_ALEN))
+ !ether_addr_equal(eth->h_source, bpq->acpt_addr))
goto drop_unlock;
if (skb_cow(skb, sizeof(struct ethhdr)))
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 3169252613fa..5d78c1d08abd 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case HDLCDRVCTL_CALIBRATE:
if(!capable(CAP_SYS_RAWIO))
return -EPERM;
+ if (bi.data.calibrate > INT_MAX / s->par.bitrate)
+ return -EINVAL;
s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
return 0;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 8e01c457015b..8a6c720a4cc9 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -9,8 +9,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) Hans Alblas PE1AYX <hans@esrac.ele.tue.nl>
* Copyright (C) 2004, 05 Ralf Baechle DL5RB <ralf@linux-mips.org>
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 1971411574db..61dd2447e1bb 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1057,6 +1057,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
case SIOCYAMGCFG:
+ memset(&yi, 0, sizeof(yi));
yi.cfg.mask = 0xffffffff;
yi.cfg.iobase = yp->iobase;
yi.cfg.irq = yp->irq;
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 00ed75155ce8..e580583f196d 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -37,7 +37,6 @@
#include <linux/netdevice.h>
#include <linux/hippidevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/slab.h>
@@ -213,10 +212,8 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rrpriv->tx_ring_dma);
if (rrpriv->regs)
pci_iounmap(pdev, rrpriv->regs);
- if (pdev) {
+ if (pdev)
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
- }
out2:
free_netdev(dev);
out3:
@@ -244,7 +241,6 @@ static void rr_remove_one(struct pci_dev *pdev)
pci_iounmap(pdev, rr->regs);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index e6fe0d80d612..7b594ce3f21d 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -12,8 +12,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
@@ -463,7 +462,7 @@ struct nvsp_message {
#define NETVSC_MTU 65536
-#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*2) /* 2MB */
+#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 2b0480416b31..03a2c6e17158 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -11,8 +11,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
@@ -137,8 +136,7 @@ static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
if (net_device->recv_buf) {
/* Free up the receive buffer */
- free_pages((unsigned long)net_device->recv_buf,
- get_order(net_device->recv_buf_size));
+ vfree(net_device->recv_buf);
net_device->recv_buf = NULL;
}
@@ -164,9 +162,7 @@ static int netvsc_init_recv_buf(struct hv_device *device)
return -ENODEV;
ndev = net_device->ndev;
- net_device->recv_buf =
- (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
- get_order(net_device->recv_buf_size));
+ net_device->recv_buf = vzalloc(net_device->recv_buf_size);
if (!net_device->recv_buf) {
netdev_err(ndev, "unable to allocate receive "
"buffer of size %d\n", net_device->recv_buf_size);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 524f713f6017..7756118c2f0a 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -11,8 +11,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
@@ -261,9 +260,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
struct sk_buff *skb;
net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
- if (!net) {
- netdev_err(net, "got receive callback but net device"
- " not initialized yet\n");
+ if (!net || net->reg_state != NETREG_REGISTERED) {
packet->status = NVSP_STAT_FAIL;
return 0;
}
@@ -327,7 +324,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
return -EINVAL;
nvdev->start_remove = true;
- cancel_delayed_work_sync(&ndevctx->dwork);
cancel_work_sync(&ndevctx->work);
netif_tx_disable(ndev);
rndis_filter_device_remove(hdev);
@@ -436,19 +432,11 @@ static int netvsc_probe(struct hv_device *dev,
SET_ETHTOOL_OPS(net, &ethtool_ops);
SET_NETDEV_DEV(net, &dev->device);
- ret = register_netdev(net);
- if (ret != 0) {
- pr_err("Unable to register netdev.\n");
- free_netdev(net);
- goto out;
- }
-
/* Notify the netvsc driver of the new device */
device_info.ring_size = ring_size;
ret = rndis_filter_device_add(dev, &device_info);
if (ret != 0) {
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
- unregister_netdev(net);
free_netdev(net);
hv_set_drvdata(dev, NULL);
return ret;
@@ -457,7 +445,13 @@ static int netvsc_probe(struct hv_device *dev,
netif_carrier_on(net);
-out:
+ ret = register_netdev(net);
+ if (ret != 0) {
+ pr_err("Unable to register netdev.\n");
+ rndis_filter_device_remove(dev);
+ free_netdev(net);
+ }
+
return ret;
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 0775f0aefd1e..1084e5de3ceb 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -11,8 +11,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 2cbe1c249996..ab31544bc254 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -987,7 +987,6 @@ err_gpio_dir:
err_slp_tr:
gpio_free(lp->rstn);
err_rstn:
- spi_set_drvdata(spi, NULL);
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
return rc;
@@ -1006,7 +1005,6 @@ static int at86rf230_remove(struct spi_device *spi)
gpio_free(lp->slp_tr);
gpio_free(lp->rstn);
- spi_set_drvdata(spi, NULL);
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index c6e46d6e9f75..246befa4ba05 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -715,7 +715,6 @@ static int mrf24j40_remove(struct spi_device *spi)
* complete? */
/* Clean up the SPI stuff. */
- spi_set_drvdata(spi, NULL);
kfree(devrec->buf);
kfree(devrec);
return 0;
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 2a30193d0d50..3da44d5d9149 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -62,8 +62,6 @@ config SIR_BFIN_PIO
bool "PIO mode"
endchoice
-comment "Dongle support"
-
config SH_SIR
tristate "SuperH SIR on UART"
depends on IRDA && SUPERH && \
@@ -74,6 +72,8 @@ config SH_SIR
Say Y here if your want to enable SIR function on SuperH UART
devices.
+comment "Dongle support"
+
config DONGLE
bool "Serial dongle support"
depends on IRTTY_SIR
@@ -210,13 +210,6 @@ config KINGSUN_DONGLE
To compile it as a module, choose M here: the module will be called
kingsun-sir.
-config EP7211_DONGLE
- tristate "Cirrus Logic clps711x I/R support"
- depends on IRTTY_SIR && ARCH_CLPS711X && IRDA
- help
- Say Y here if you want to build support for the Cirrus logic
- EP7211 chipset's infrared module.
-
config KSDAZZLE_DONGLE
tristate "KingSun Dazzle IrDA-USB dongle"
depends on IRDA && USB
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index dfc64537f62f..be8ab5b9a4a2 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -35,7 +35,6 @@ obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o
obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o
obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
-obj-$(CONFIG_EP7211_DONGLE) += ep7211-sir.o
obj-$(CONFIG_KINGSUN_DONGLE) += kingsun-sir.o
obj-$(CONFIG_KSDAZZLE_DONGLE) += ksdazzle-sir.o
obj-$(CONFIG_KS959_DONGLE) += ks959-sir.o
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 7a1f684edcb5..5f91e3e01c04 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -15,11 +15,9 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/irda/ep7211-sir.c b/drivers/net/irda/ep7211-sir.c
deleted file mode 100644
index 5fe1f4dd3369..000000000000
--- a/drivers/net/irda/ep7211-sir.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * IR port driver for the Cirrus Logic CLPS711X processors
- *
- * Copyright 2001, Blue Mug Inc. All rights reserved.
- * Copyright 2007, Samuel Ortiz <samuel@sortiz.org>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include <mach/hardware.h>
-
-#include "sir-dev.h"
-
-static int clps711x_dongle_open(struct sir_dev *dev)
-{
- unsigned int syscon;
-
- /* Turn on the SIR encoder. */
- syscon = clps_readl(SYSCON1);
- syscon |= SYSCON1_SIREN;
- clps_writel(syscon, SYSCON1);
-
- return 0;
-}
-
-static int clps711x_dongle_close(struct sir_dev *dev)
-{
- unsigned int syscon;
-
- /* Turn off the SIR encoder. */
- syscon = clps_readl(SYSCON1);
- syscon &= ~SYSCON1_SIREN;
- clps_writel(syscon, SYSCON1);
-
- return 0;
-}
-
-static struct dongle_driver clps711x_dongle = {
- .owner = THIS_MODULE,
- .driver_name = "EP7211 IR driver",
- .type = IRDA_EP7211_DONGLE,
- .open = clps711x_dongle_open,
- .close = clps711x_dongle_close,
-};
-
-static int clps711x_sir_probe(struct platform_device *pdev)
-{
- return irda_register_dongle(&clps711x_dongle);
-}
-
-static int clps711x_sir_remove(struct platform_device *pdev)
-{
- return irda_unregister_dongle(&clps711x_dongle);
-}
-
-static struct platform_driver clps711x_sir_driver = {
- .driver = {
- .name = "sir-clps711x",
- .owner = THIS_MODULE,
- },
- .probe = clps711x_sir_probe,
- .remove = clps711x_sir_remove,
-};
-module_platform_driver(clps711x_sir_driver);
-
-MODULE_AUTHOR("Samuel Ortiz <samuel@sortiz.org>");
-MODULE_DESCRIPTION("EP7211 IR dongle driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("irda-dongle-13"); /* IRDA_EP7211_DONGLE */
diff --git a/drivers/net/irda/esi-sir.c b/drivers/net/irda/esi-sir.c
index a908df7c4b9d..019a3e848bcb 100644
--- a/drivers/net/irda/esi-sir.c
+++ b/drivers/net/irda/esi-sir.c
@@ -25,9 +25,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index f9a86bdb12fa..925b78cc9797 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -58,7 +58,6 @@
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index 7b4833874ef5..96fe3659012d 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -64,7 +64,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/device.h>
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index 5f3aeac3f86d..e6b3804edacd 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -116,7 +116,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/device.h>
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 2d4b6a1ab202..37f23a189b35 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -80,7 +80,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/device.h>
diff --git a/drivers/net/irda/litelink-sir.c b/drivers/net/irda/litelink-sir.c
index d6d9d2e5ad49..6827777cbeea 100644
--- a/drivers/net/irda/litelink-sir.c
+++ b/drivers/net/irda/litelink-sir.c
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/irda/ma600-sir.c b/drivers/net/irda/ma600-sir.c
index e91216452379..a9a81358477b 100644
--- a/drivers/net/irda/ma600-sir.c
+++ b/drivers/net/irda/ma600-sir.c
@@ -25,9 +25,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 3f138ca88670..16f8ffb50e04 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -48,7 +48,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/device.h>
diff --git a/drivers/net/irda/old_belkin-sir.c b/drivers/net/irda/old_belkin-sir.c
index 75714bc71030..f237136f3827 100644
--- a/drivers/net/irda/old_belkin-sir.c
+++ b/drivers/net/irda/old_belkin-sir.c
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index ff45cd0d60e8..c96b46b2c3a8 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -804,7 +804,7 @@ static int sh_irda_probe(struct platform_device *pdev)
goto err_mem_4;
platform_set_drvdata(pdev, ndev);
- err = request_irq(irq, sh_irda_irq, 0, "sh_irda", self);
+ err = devm_request_irq(&pdev->dev, irq, sh_irda_irq, 0, "sh_irda", self);
if (err) {
dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
goto err_mem_4;
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 8d9ae5a086d5..cadf52e22464 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -761,7 +761,7 @@ static int sh_sir_probe(struct platform_device *pdev)
goto err_mem_4;
platform_set_drvdata(pdev, ndev);
- err = request_irq(irq, sh_sir_irq, 0, "sh_sir", self);
+ err = devm_request_irq(&pdev->dev, irq, sh_sir_irq, 0, "sh_sir", self);
if (err) {
dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
goto err_mem_4;
diff --git a/drivers/net/irda/sir_dongle.c b/drivers/net/irda/sir_dongle.c
index 2a9930e6e2af..cfbabb63f5cc 100644
--- a/drivers/net/irda/sir_dongle.c
+++ b/drivers/net/irda/sir_dongle.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/mutex.h>
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 0dcdf1592f6b..282120430f12 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -34,9 +34,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/irda/smsc-ircc2.h b/drivers/net/irda/smsc-ircc2.h
index 317b7fd69bb3..4829fa22cb29 100644
--- a/drivers/net/irda/smsc-ircc2.h
+++ b/drivers/net/irda/smsc-ircc2.h
@@ -25,9 +25,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 876e709b65ba..dd1bd1060ec9 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -41,7 +41,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/time.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 9abaec27f962..2900af091c2d 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -17,8 +17,7 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc.,
-59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+this program; if not, see <http://www.gnu.org/licenses/>.
F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
F02 Oct/28/02: Add SB device ID for 3147 and 3177.
@@ -408,7 +407,6 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
err_out2:
release_region(self->io.fir_base, self->io.fir_ext);
err_out1:
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
return err;
}
@@ -442,7 +440,6 @@ static void via_remove_one(struct pci_dev *pdev)
if (self->rx_buff.head)
dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
- pci_set_drvdata(pdev, NULL);
free_netdev(self->netdev);
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index f903a6a2dcb7..7ce820ecc361 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -18,8 +18,7 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc.,
-59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+this program; if not, see <http://www.gnu.org/licenses/>.
* Comment:
* jul/08/2002 : Rx buffer length should use Rx ring ptr.
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index c5bd58b4d8a8..485006604bbc 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -15,9 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
@@ -1695,7 +1693,6 @@ out_freedev:
out_disable:
pci_disable_device(pdev);
out:
- pci_set_drvdata(pdev, NULL);
return -ENODEV;
}
@@ -1721,8 +1718,6 @@ static void vlsi_irda_remove(struct pci_dev *pdev)
free_netdev(ndev);
- pci_set_drvdata(pdev, NULL);
-
IRDA_MESSAGE("%s: %s removed\n", drivername, pci_name(pdev));
}
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index a076eb125349..56399204e68c 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -18,9 +18,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index ac24c27b4b2d..c5011e078e1b 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -39,7 +39,6 @@
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/in.h>
-#include <linux/init.h>
#include <asm/uaccess.h>
#include <asm/io.h>
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index acf93798dc67..8433de4509c7 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -120,7 +120,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
struct net_device *dev = vlan->dev;
if (local)
- return vlan->forward(dev, skb);
+ return dev_forward_skb(dev, skb);
skb->dev = dev;
if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
@@ -128,7 +128,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
else
skb->pkt_type = PACKET_MULTICAST;
- return vlan->receive(skb);
+ return netif_rx(skb);
}
static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
@@ -251,7 +251,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
skb->dev = dev;
skb->pkt_type = PACKET_HOST;
- ret = vlan->receive(skb);
+ ret = netif_rx(skb);
out:
macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
@@ -290,8 +290,8 @@ xmit_world:
return dev_queue_xmit(skb);
}
-netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
unsigned int len = skb->len;
int ret;
@@ -299,13 +299,13 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
if (vlan->fwd_priv) {
skb->dev = vlan->lowerdev;
- ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv);
+ ret = dev_queue_xmit_accel(skb, vlan->fwd_priv);
} else {
ret = macvlan_queue_xmit(skb, dev);
}
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
- struct macvlan_pcpu_stats *pcpu_stats;
+ struct vlan_pcpu_stats *pcpu_stats;
pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
@@ -317,7 +317,6 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
}
return ret;
}
-EXPORT_SYMBOL_GPL(macvlan_start_xmit);
static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
@@ -338,6 +337,8 @@ static const struct header_ops macvlan_hard_header_ops = {
.cache_update = eth_header_cache_update,
};
+static struct rtnl_link_ops macvlan_link_ops;
+
static int macvlan_open(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
@@ -353,7 +354,8 @@ static int macvlan_open(struct net_device *dev)
goto hash_add;
}
- if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) {
+ if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD &&
+ dev->rtnl_link_ops == &macvlan_link_ops) {
vlan->fwd_priv =
lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
@@ -362,10 +364,8 @@ static int macvlan_open(struct net_device *dev)
*/
if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
vlan->fwd_priv = NULL;
- } else {
- dev->features &= ~NETIF_F_LLTX;
+ } else
return 0;
- }
}
err = -EBUSY;
@@ -546,12 +546,12 @@ static int macvlan_init(struct net_device *dev)
macvlan_set_lockdep_class(dev);
- vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats);
+ vlan->pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
if (!vlan->pcpu_stats)
return -ENOMEM;
for_each_possible_cpu(i) {
- struct macvlan_pcpu_stats *mvlstats;
+ struct vlan_pcpu_stats *mvlstats;
mvlstats = per_cpu_ptr(vlan->pcpu_stats, i);
u64_stats_init(&mvlstats->syncp);
}
@@ -577,7 +577,7 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
struct macvlan_dev *vlan = netdev_priv(dev);
if (vlan->pcpu_stats) {
- struct macvlan_pcpu_stats *p;
+ struct vlan_pcpu_stats *p;
u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
u32 rx_errors = 0, tx_dropped = 0;
unsigned int start;
@@ -690,8 +690,18 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ netdev_features_t mask;
+
+ features |= NETIF_F_ALL_FOR_ALL;
+ features &= (vlan->set_features | ~MACVLAN_FEATURES);
+ mask = features;
- return features & (vlan->set_features | ~MACVLAN_FEATURES);
+ features = netdev_increment_features(vlan->lowerdev->features,
+ features,
+ mask);
+ features |= NETIF_F_LLTX;
+
+ return features;
}
static const struct ethtool_ops macvlan_ethtool_ops = {
@@ -803,10 +813,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
}
int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
- int (*receive)(struct sk_buff *skb),
- int (*forward)(struct net_device *dev,
- struct sk_buff *skb))
+ struct nlattr *tb[], struct nlattr *data[])
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvlan_port *port;
@@ -820,13 +827,11 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
if (lowerdev == NULL)
return -ENODEV;
- /* When creating macvlans on top of other macvlans - use
+ /* When creating macvlans or macvtaps on top of other macvlans - use
* the real device as the lowerdev.
*/
- if (lowerdev->rtnl_link_ops == dev->rtnl_link_ops) {
- struct macvlan_dev *lowervlan = netdev_priv(lowerdev);
- lowerdev = lowervlan->lowerdev;
- }
+ if (netif_is_macvlan(lowerdev))
+ lowerdev = macvlan_dev_real_dev(lowerdev);
if (!tb[IFLA_MTU])
dev->mtu = lowerdev->mtu;
@@ -850,8 +855,6 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
vlan->lowerdev = lowerdev;
vlan->dev = dev;
vlan->port = port;
- vlan->receive = receive;
- vlan->forward = forward;
vlan->set_features = MACVLAN_FEATURES;
vlan->mode = MACVLAN_MODE_VEPA;
@@ -896,9 +899,7 @@ EXPORT_SYMBOL_GPL(macvlan_common_newlink);
static int macvlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
- return macvlan_common_newlink(src_net, dev, tb, data,
- netif_rx,
- dev_forward_skb);
+ return macvlan_common_newlink(src_net, dev, tb, data);
}
void macvlan_dellink(struct net_device *dev, struct list_head *head)
@@ -1019,9 +1020,8 @@ static int macvlan_device_event(struct notifier_block *unused,
break;
case NETDEV_FEAT_CHANGE:
list_for_each_entry(vlan, &port->vlans, list) {
- vlan->dev->features = dev->features & MACVLAN_FEATURES;
vlan->dev->gso_max_size = dev->gso_max_size;
- netdev_features_change(vlan->dev);
+ netdev_update_features(vlan->dev);
}
break;
case NETDEV_UNREGISTER:
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 2a89da080317..ff111a89e17f 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -11,7 +11,6 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/wait.h>
#include <linux/cdev.h>
#include <linux/idr.h>
@@ -70,6 +69,11 @@ static const struct proto_ops macvtap_socket_ops;
#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
+static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
+{
+ return rcu_dereference(dev->rx_handler_data);
+}
+
/*
* RCU usage:
* The macvtap_queue and the macvlan_dev are loosely coupled, the
@@ -219,7 +223,7 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
goto out;
/* Check if we can use flow to select a queue */
- rxq = skb_get_rxhash(skb);
+ rxq = skb_get_hash(skb);
if (rxq) {
tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
goto out;
@@ -271,24 +275,27 @@ static void macvtap_del_queues(struct net_device *dev)
sock_put(&qlist[j]->sk);
}
-/*
- * Forward happens for data that gets sent from one macvlan
- * endpoint to another one in bridge mode. We just take
- * the skb and put it into the receive queue.
- */
-static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
+static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
{
- struct macvlan_dev *vlan = netdev_priv(dev);
- struct macvtap_queue *q = macvtap_get_queue(dev, skb);
+ struct sk_buff *skb = *pskb;
+ struct net_device *dev = skb->dev;
+ struct macvlan_dev *vlan;
+ struct macvtap_queue *q;
netdev_features_t features = TAP_FEATURES;
+ vlan = macvtap_get_vlan_rcu(dev);
+ if (!vlan)
+ return RX_HANDLER_PASS;
+
+ q = macvtap_get_queue(dev, skb);
if (!q)
- goto drop;
+ return RX_HANDLER_PASS;
if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
goto drop;
- skb->dev = dev;
+ skb_push(skb, ETH_HLEN);
+
/* Apply the forward feature mask so that we perform segmentation
* according to users wishes. This only works if VNET_HDR is
* enabled.
@@ -320,22 +327,13 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
wake_up:
wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
- return NET_RX_SUCCESS;
+ return RX_HANDLER_CONSUMED;
drop:
+ /* Count errors/drops only here, thus don't care about args. */
+ macvlan_count_rx(vlan, 0, 0, 0);
kfree_skb(skb);
- return NET_RX_DROP;
-}
-
-/*
- * Receive is for data from the external interface (lowerdev),
- * in case of macvtap, we can treat that the same way as
- * forward, which macvlan cannot.
- */
-static int macvtap_receive(struct sk_buff *skb)
-{
- skb_push(skb, ETH_HLEN);
- return macvtap_forward(skb->dev, skb);
+ return RX_HANDLER_CONSUMED;
}
static int macvtap_get_minor(struct macvlan_dev *vlan)
@@ -385,6 +383,8 @@ static int macvtap_newlink(struct net *src_net,
struct nlattr *data[])
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ int err;
+
INIT_LIST_HEAD(&vlan->queue_list);
/* Since macvlan supports all offloads by default, make
@@ -392,16 +392,20 @@ static int macvtap_newlink(struct net *src_net,
*/
vlan->tap_features = TUN_OFFLOADS;
+ err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan);
+ if (err)
+ return err;
+
/* Don't put anything that may fail after macvlan_common_newlink
* because we can't undo what it does.
*/
- return macvlan_common_newlink(src_net, dev, tb, data,
- macvtap_receive, macvtap_forward);
+ return macvlan_common_newlink(src_net, dev, tb, data);
}
static void macvtap_dellink(struct net_device *dev,
struct list_head *head)
{
+ netdev_rx_handler_unregister(dev);
macvtap_del_queues(dev);
macvlan_dellink(dev, head);
}
@@ -588,7 +592,7 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
return 0;
}
-static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
+static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
struct virtio_net_hdr *vnet_hdr)
{
memset(vnet_hdr, 0, sizeof(*vnet_hdr));
@@ -619,8 +623,6 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */
-
- return 0;
}
/* Get packet from user space buffer */
@@ -727,9 +729,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
}
if (vlan) {
- local_bh_disable();
- macvlan_start_xmit(skb, vlan->dev);
- local_bh_enable();
+ skb->dev = vlan->dev;
+ dev_queue_xmit(skb);
} else {
kfree_skb(skb);
}
@@ -778,9 +779,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if ((len -= vnet_hdr_len) < 0)
return -EINVAL;
- ret = macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
- if (ret)
- return ret;
+ macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
return -EFAULT;
@@ -824,7 +823,7 @@ done:
return ret ? ret : total;
}
-static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
+static ssize_t macvtap_do_read(struct macvtap_queue *q,
const struct iovec *iv, unsigned long len,
int noblock)
{
@@ -875,7 +874,7 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
goto out;
}
- ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
+ ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK);
ret = min_t(ssize_t, ret, len);
if (ret > 0)
iocb->ki_pos = ret;
@@ -1109,7 +1108,7 @@ static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
int ret;
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
return -EINVAL;
- ret = macvtap_do_read(q, iocb, m->msg_iov, total_len,
+ ret = macvtap_do_read(q, m->msg_iov, total_len,
flags & MSG_DONTWAIT);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index 8403316eb02b..3e027ed0b3bb 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -342,34 +342,6 @@ void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
EXPORT_SYMBOL(mdio45_ethtool_gset_npage);
/**
- * mdio45_ethtool_spauseparam_an - set auto-negotiated pause parameters
- * @mdio: MDIO interface
- * @ecmd: Ethtool request structure
- *
- * This function assumes that the PHY has an auto-negotiation MMD. It
- * will enable and disable advertising of flow control as appropriate.
- */
-void mdio45_ethtool_spauseparam_an(const struct mdio_if_info *mdio,
- const struct ethtool_pauseparam *ecmd)
-{
- int adv, old_adv;
-
- WARN_ON(!(mdio->mmds & MDIO_DEVS_AN));
-
- old_adv = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN,
- MDIO_AN_ADVERTISE);
- adv = ((old_adv & ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) |
- mii_advertise_flowctrl((ecmd->rx_pause ? FLOW_CTRL_RX : 0) |
- (ecmd->tx_pause ? FLOW_CTRL_TX : 0)));
- if (adv != old_adv) {
- mdio->mdio_write(mdio->dev, mdio->prtad, MDIO_MMD_AN,
- MDIO_AN_ADVERTISE, adv);
- mdio45_nway_restart(mdio);
- }
-}
-EXPORT_SYMBOL(mdio45_ethtool_spauseparam_an);
-
-/**
* mdio_mii_ioctl - MII ioctl interface for MDIO (clause 22 or 45) PHYs
* @mdio: MDIO interface
* @mii_data: MII ioctl data structure
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 313a0377f68f..b57ce0cc9657 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -92,8 +92,8 @@ static int cis820x_config_intr(struct phy_device *phydev)
{
int err;
- if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
- err = phy_write(phydev, MII_CIS8201_IMASK,
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, MII_CIS8201_IMASK,
MII_CIS8201_IMASK_MASK);
else
err = phy_write(phydev, MII_CIS8201_IMASK, 0);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 383e8338ad86..d2c08f625a41 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -72,7 +72,7 @@ static int dm9161_config_intr(struct phy_device *phydev)
if (temp < 0)
return temp;
- if(PHY_INTERRUPT_ENABLED == phydev->interrupts )
+ if (PHY_INTERRUPT_ENABLED == phydev->interrupts)
temp &= ~(MII_DM9161_INTR_STOP);
else
temp |= MII_DM9161_INTR_STOP;
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 7490b6c866e6..9414fa272160 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -437,7 +437,10 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
if (on) {
gpio_num = gpio_tab[EXTTS0_GPIO + index];
evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
- evnt |= EVNT_RISE;
+ if (rq->extts.flags & PTP_FALLING_EDGE)
+ evnt |= EVNT_FALL;
+ else
+ evnt |= EVNT_RISE;
}
ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
return 0;
@@ -851,8 +854,8 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
- return (rxts->msgtype == (*msgtype & 0xf) &&
- rxts->seqid == ntohs(*seqid));
+ return rxts->msgtype == (*msgtype & 0xf) &&
+ rxts->seqid == ntohs(*seqid);
}
static void dp83640_free_clocks(void)
@@ -1058,6 +1061,13 @@ static void dp83640_remove(struct phy_device *phydev)
kfree(dp83640);
}
+static int dp83640_config_init(struct phy_device *phydev)
+{
+ enable_status_frames(phydev, true);
+ ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
+ return 0;
+}
+
static int dp83640_ack_interrupt(struct phy_device *phydev)
{
int err = phy_read(phydev, MII_DP83640_MISR);
@@ -1195,11 +1205,6 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
mutex_lock(&dp83640->clock->extreg_lock);
- if (dp83640->hwts_tx_en || dp83640->hwts_rx_en) {
- enable_status_frames(phydev, true);
- ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
- }
-
ext_write(0, phydev, PAGE5, PTP_TXCFG0, txcfg0);
ext_write(0, phydev, PAGE5, PTP_RXCFG0, rxcfg0);
@@ -1281,6 +1286,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
}
/* fall through */
case HWTSTAMP_TX_ON:
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_queue_tail(&dp83640->tx_queue, skb);
schedule_work(&dp83640->ts_work);
break;
@@ -1330,6 +1336,7 @@ static struct phy_driver dp83640_driver = {
.flags = PHY_HAS_INTERRUPT,
.probe = dp83640_probe,
.remove = dp83640_remove,
+ .config_init = dp83640_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = dp83640_ack_interrupt,
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index b5ddd5077a80..97bf58bf4939 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -48,7 +48,7 @@ MODULE_LICENSE("GPL");
static int ip175c_config_init(struct phy_device *phydev)
{
int err, i;
- static int full_reset_performed = 0;
+ static int full_reset_performed;
if (full_reset_performed == 0) {
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index ff2e45e9cb54..9108f3191701 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -82,7 +82,7 @@ static int lxt970_config_intr(struct phy_device *phydev)
{
int err;
- if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
else
err = phy_write(phydev, MII_LXT970_IER, 0);
@@ -114,7 +114,7 @@ static int lxt971_config_intr(struct phy_device *phydev)
{
int err;
- if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
else
err = phy_write(phydev, MII_LXT971_IER, 0);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 2e3c778ea9bf..bd37e45c89c0 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -894,6 +894,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -907,6 +909,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -920,6 +924,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -933,6 +939,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = {.owner = THIS_MODULE,},
},
{
@@ -946,6 +954,8 @@ static struct phy_driver marvell_drivers[] = {
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.did_interrupt = &m88e1121_did_interrupt,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -961,6 +971,8 @@ static struct phy_driver marvell_drivers[] = {
.did_interrupt = &m88e1121_did_interrupt,
.get_wol = &m88e1318_get_wol,
.set_wol = &m88e1318_set_wol,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -974,6 +986,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -987,6 +1001,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1000,6 +1016,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1013,6 +1031,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1026,6 +1046,8 @@ static struct phy_driver marvell_drivers[] = {
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.did_interrupt = &m88e1121_did_interrupt,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
};
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 8004acbef2c9..e701433bf52f 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
diff --git a/drivers/net/phy/mdio-moxart.c b/drivers/net/phy/mdio-moxart.c
index a5741cb0304e..f1fc51f655d9 100644
--- a/drivers/net/phy/mdio-moxart.c
+++ b/drivers/net/phy/mdio-moxart.c
@@ -8,7 +8,6 @@
*/
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index d2dd9e473e2c..096695163491 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -10,7 +10,6 @@
#include <linux/device.h>
#include <linux/of_mdio.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/phy.h>
#include <linux/mdio-mux.h>
#include <linux/of_gpio.h>
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
index f8e305d8da76..1656785ff339 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -15,7 +15,6 @@
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/phy.h>
#include <linux/mdio-mux.h>
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index 6aee02ed97ac..a51ed92fbada 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -10,7 +10,6 @@
#include <linux/of_mdio.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/phy.h>
#include <linux/io.h>
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 18969b3ad8bb..9367acc84fbb 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -13,7 +13,6 @@
*/
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -171,6 +170,9 @@ static int sun4i_mdio_remove(struct platform_device *pdev)
}
static const struct of_device_id sun4i_mdio_dt_ids[] = {
+ { .compatible = "allwinner,sun4i-a10-mdio" },
+
+ /* Deprecated */
{ .compatible = "allwinner,sun4i-mdio" },
{ }
};
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 56178761ce93..71e49000fbf3 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -1,7 +1,4 @@
-/*
- * drivers/net/phy/mdio_bus.c
- *
- * MDIO Bus interface
+/* MDIO Bus interface
*
* Author: Andy Fleming
*
@@ -36,10 +33,10 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
-#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
/**
* mdiobus_alloc_size - allocate a mii_bus structure
@@ -139,8 +136,7 @@ int mdiobus_register(struct mii_bus *bus)
int i, err;
if (NULL == bus || NULL == bus->name ||
- NULL == bus->read ||
- NULL == bus->write)
+ NULL == bus->read || NULL == bus->write)
return -EINVAL;
BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
@@ -154,6 +150,7 @@ int mdiobus_register(struct mii_bus *bus)
err = device_register(&bus->dev);
if (err) {
pr_err("mii_bus %s failed to register\n", bus->id);
+ put_device(&bus->dev);
return -EINVAL;
}
@@ -214,9 +211,7 @@ EXPORT_SYMBOL(mdiobus_unregister);
*/
void mdiobus_free(struct mii_bus *bus)
{
- /*
- * For compatibility with error handling in drivers.
- */
+ /* For compatibility with error handling in drivers. */
if (bus->state == MDIOBUS_ALLOCATED) {
kfree(bus);
return;
@@ -316,8 +311,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
if (phydrv->match_phy_device)
return phydrv->match_phy_device(phydev);
- return ((phydrv->phy_id & phydrv->phy_id_mask) ==
- (phydev->phy_id & phydrv->phy_id_mask));
+ return (phydrv->phy_id & phydrv->phy_id_mask) ==
+ (phydev->phy_id & phydrv->phy_id_mask);
}
#ifdef CONFIG_PM
@@ -335,15 +330,13 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
if (!netdev)
return true;
- /*
- * Don't suspend PHY if the attched netdev parent may wakeup.
+ /* Don't suspend PHY if the attched netdev parent may wakeup.
* The parent may point to a PCI device, as in tg3 driver.
*/
if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent))
return false;
- /*
- * Also don't suspend PHY if the netdev itself may wakeup. This
+ /* Also don't suspend PHY if the netdev itself may wakeup. This
* is the case for devices w/o underlaying pwr. mgmt. aware bus,
* e.g. SoC devices.
*/
@@ -358,8 +351,7 @@ static int mdio_bus_suspend(struct device *dev)
struct phy_driver *phydrv = to_phy_driver(dev->driver);
struct phy_device *phydev = to_phy_device(dev);
- /*
- * We must stop the state machine manually, otherwise it stops out of
+ /* We must stop the state machine manually, otherwise it stops out of
* control, possibly with the phydev->lock held. Upon resume, netdev
* may call phy routines that try to grab the same lock, and that may
* lead to a deadlock.
@@ -388,7 +380,7 @@ static int mdio_bus_resume(struct device *dev)
no_resume:
if (phydev->attached_dev && phydev->adjust_link)
- phy_start_machine(phydev, NULL);
+ phy_start_machine(phydev);
return 0;
}
@@ -410,12 +402,12 @@ static int mdio_bus_restore(struct device *dev)
phydev->link = 0;
phydev->state = PHY_UP;
- phy_start_machine(phydev, NULL);
+ phy_start_machine(phydev);
return 0;
}
-static struct dev_pm_ops mdio_bus_pm_ops = {
+static const struct dev_pm_ops mdio_bus_pm_ops = {
.suspend = mdio_bus_suspend,
.resume = mdio_bus_resume,
.freeze = mdio_bus_suspend,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 26fa05a472b4..5a8993b0cafc 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -81,14 +81,14 @@ static int ksz_config_flags(struct phy_device *phydev)
}
static int kszphy_extended_write(struct phy_device *phydev,
- u32 regnum, u16 val)
+ u32 regnum, u16 val)
{
phy_write(phydev, MII_KSZPHY_EXTREG, KSZPHY_EXTREG_WRITE | regnum);
return phy_write(phydev, MII_KSZPHY_EXTREG_WRITE, val);
}
static int kszphy_extended_read(struct phy_device *phydev,
- u32 regnum)
+ u32 regnum)
{
phy_write(phydev, MII_KSZPHY_EXTREG, regnum);
return phy_read(phydev, MII_KSZPHY_EXTREG_READ);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 36c6994436b7..19c9eca0ef26 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1,7 +1,4 @@
-/*
- * drivers/net/phy/phy.c
- *
- * Framework for configuring and reading PHY devices
+/* Framework for configuring and reading PHY devices
* Based on code in sungem_phy.c and gianfar_phy.c
*
* Author: Andy Fleming
@@ -23,7 +20,6 @@
#include <linux/errno.h>
#include <linux/unistd.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -36,11 +32,11 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mdio.h>
-
+#include <linux/io.h>
+#include <linux/uaccess.h>
#include <linux/atomic.h>
-#include <asm/io.h>
+
#include <asm/irq.h>
-#include <asm/uaccess.h>
/**
* phy_print_status - Convenience function to print out the current phy status
@@ -48,13 +44,14 @@
*/
void phy_print_status(struct phy_device *phydev)
{
- if (phydev->link)
+ if (phydev->link) {
pr_info("%s - Link is Up - %d/%s\n",
dev_name(&phydev->dev),
phydev->speed,
DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
- else
+ } else {
pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
+ }
}
EXPORT_SYMBOL(phy_print_status);
@@ -69,12 +66,10 @@ EXPORT_SYMBOL(phy_print_status);
*/
static int phy_clear_interrupt(struct phy_device *phydev)
{
- int err = 0;
-
if (phydev->drv->ack_interrupt)
- err = phydev->drv->ack_interrupt(phydev);
+ return phydev->drv->ack_interrupt(phydev);
- return err;
+ return 0;
}
/**
@@ -86,13 +81,11 @@ static int phy_clear_interrupt(struct phy_device *phydev)
*/
static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
{
- int err = 0;
-
phydev->interrupts = interrupts;
if (phydev->drv->config_intr)
- err = phydev->drv->config_intr(phydev);
+ return phydev->drv->config_intr(phydev);
- return err;
+ return 0;
}
@@ -106,15 +99,14 @@ static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
*/
static inline int phy_aneg_done(struct phy_device *phydev)
{
- int retval;
-
- retval = phy_read(phydev, MII_BMSR);
+ int retval = phy_read(phydev, MII_BMSR);
return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
}
/* A structure for mapping a particular speed and duplex
- * combination to a particular SUPPORTED and ADVERTISED value */
+ * combination to a particular SUPPORTED and ADVERTISED value
+ */
struct phy_setting {
int speed;
int duplex;
@@ -177,8 +169,7 @@ static inline int phy_find_setting(int speed, int duplex)
int idx = 0;
while (idx < ARRAY_SIZE(settings) &&
- (settings[idx].speed != speed ||
- settings[idx].duplex != duplex))
+ (settings[idx].speed != speed || settings[idx].duplex != duplex))
idx++;
return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
@@ -245,8 +236,7 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
if (cmd->phy_address != phydev->addr)
return -EINVAL;
- /* We make sure that we don't pass unsupported
- * values in to the PHY */
+ /* We make sure that we don't pass unsupported values in to the PHY */
cmd->advertising &= phydev->supported;
/* Verify the settings we care about. */
@@ -289,6 +279,7 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
cmd->supported = phydev->supported;
cmd->advertising = phydev->advertising;
+ cmd->lp_advertising = phydev->lp_advertising;
ethtool_cmd_speed_set(cmd, phydev->speed);
cmd->duplex = phydev->duplex;
@@ -312,8 +303,7 @@ EXPORT_SYMBOL(phy_ethtool_gset);
* PHYCONTROL layer. It changes registers without regard to
* current state. Use at own risk.
*/
-int phy_mii_ioctl(struct phy_device *phydev,
- struct ifreq *ifr, int cmd)
+int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *mii_data = if_mii(ifr);
u16 val = mii_data->val_in;
@@ -326,25 +316,24 @@ int phy_mii_ioctl(struct phy_device *phydev,
case SIOCGMIIREG:
mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
mii_data->reg_num);
- break;
+ return 0;
case SIOCSMIIREG:
if (mii_data->phy_id == phydev->addr) {
- switch(mii_data->reg_num) {
+ switch (mii_data->reg_num) {
case MII_BMCR:
- if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0)
+ if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0)
phydev->autoneg = AUTONEG_DISABLE;
else
phydev->autoneg = AUTONEG_ENABLE;
- if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
+ if (!phydev->autoneg && (val & BMCR_FULLDPLX))
phydev->duplex = DUPLEX_FULL;
else
phydev->duplex = DUPLEX_HALF;
- if ((!phydev->autoneg) &&
- (val & BMCR_SPEED1000))
+ if (!phydev->autoneg && (val & BMCR_SPEED1000))
phydev->speed = SPEED_1000;
- else if ((!phydev->autoneg) &&
- (val & BMCR_SPEED100))
+ else if (!phydev->autoneg &&
+ (val & BMCR_SPEED100))
phydev->speed = SPEED_100;
break;
case MII_ADVERTISE:
@@ -360,12 +349,9 @@ int phy_mii_ioctl(struct phy_device *phydev,
mii_data->reg_num, val);
if (mii_data->reg_num == MII_BMCR &&
- val & BMCR_RESET &&
- phydev->drv->config_init) {
- phy_scan_fixups(phydev);
- phydev->drv->config_init(phydev);
- }
- break;
+ val & BMCR_RESET)
+ return phy_init_hw(phydev);
+ return 0;
case SIOCSHWTSTAMP:
if (phydev->drv->hwtstamp)
@@ -375,8 +361,6 @@ int phy_mii_ioctl(struct phy_device *phydev,
default:
return -EOPNOTSUPP;
}
-
- return 0;
}
EXPORT_SYMBOL(phy_mii_ioctl);
@@ -399,7 +383,6 @@ int phy_start_aneg(struct phy_device *phydev)
phy_sanitize_settings(phydev);
err = phydev->drv->config_aneg(phydev);
-
if (err < 0)
goto out_unlock;
@@ -419,25 +402,18 @@ out_unlock:
}
EXPORT_SYMBOL(phy_start_aneg);
-
/**
* phy_start_machine - start PHY state machine tracking
* @phydev: the phy_device struct
- * @handler: callback function for state change notifications
*
* Description: The PHY infrastructure can run a state machine
* which tracks whether the PHY is starting up, negotiating,
* etc. This function starts the timer which tracks the state
- * of the PHY. If you want to be notified when the state changes,
- * pass in the callback @handler, otherwise, pass NULL. If you
- * want to maintain your own state machine, do not call this
- * function.
+ * of the PHY. If you want to maintain your own state machine,
+ * do not call this function.
*/
-void phy_start_machine(struct phy_device *phydev,
- void (*handler)(struct net_device *))
+void phy_start_machine(struct phy_device *phydev)
{
- phydev->adjust_state = handler;
-
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
}
@@ -457,8 +433,6 @@ void phy_stop_machine(struct phy_device *phydev)
if (phydev->state > PHY_UP)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
-
- phydev->adjust_state = NULL;
}
/**
@@ -495,7 +469,8 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
/* The MDIO bus is not allowed to be written in interrupt
* context, so we need to disable the irq here. A work
* queue will write the PHY to disable and clear the
- * interrupt, and then reenable the irq line. */
+ * interrupt, and then reenable the irq line.
+ */
disable_irq_nosync(irq);
atomic_inc(&phydev->irq_disable);
@@ -510,16 +485,12 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
*/
static int phy_enable_interrupts(struct phy_device *phydev)
{
- int err;
-
- err = phy_clear_interrupt(phydev);
+ int err = phy_clear_interrupt(phydev);
if (err < 0)
return err;
- err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
-
- return err;
+ return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
}
/**
@@ -532,13 +503,11 @@ static int phy_disable_interrupts(struct phy_device *phydev)
/* Disable PHY interrupts */
err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
-
if (err)
goto phy_err;
/* Clear the interrupt */
err = phy_clear_interrupt(phydev);
-
if (err)
goto phy_err;
@@ -562,22 +531,16 @@ phy_err:
*/
int phy_start_interrupts(struct phy_device *phydev)
{
- int err = 0;
-
atomic_set(&phydev->irq_disable, 0);
- if (request_irq(phydev->irq, phy_interrupt,
- IRQF_SHARED,
- "phy_interrupt",
- phydev) < 0) {
+ if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
+ phydev) < 0) {
pr_warn("%s: Can't get IRQ %d (PHY)\n",
phydev->bus->name, phydev->irq);
phydev->irq = PHY_POLL;
return 0;
}
- err = phy_enable_interrupts(phydev);
-
- return err;
+ return phy_enable_interrupts(phydev);
}
EXPORT_SYMBOL(phy_start_interrupts);
@@ -587,24 +550,20 @@ EXPORT_SYMBOL(phy_start_interrupts);
*/
int phy_stop_interrupts(struct phy_device *phydev)
{
- int err;
-
- err = phy_disable_interrupts(phydev);
+ int err = phy_disable_interrupts(phydev);
if (err)
phy_error(phydev);
free_irq(phydev->irq, phydev);
- /*
- * Cannot call flush_scheduled_work() here as desired because
+ /* Cannot call flush_scheduled_work() here as desired because
* of rtnl_lock(), but we do not really care about what would
* be done, except from enable_irq(), so cancel any work
* possibly pending and take care of the matter below.
*/
cancel_work_sync(&phydev->phy_queue);
- /*
- * If work indeed has been cancelled, disable_irq() will have
+ /* If work indeed has been cancelled, disable_irq() will have
* been left unbalanced from phy_interrupt() and enable_irq()
* has to be called so that other devices on the line work.
*/
@@ -615,14 +574,12 @@ int phy_stop_interrupts(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_stop_interrupts);
-
/**
* phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
* @work: work_struct that describes the work to be done
*/
void phy_change(struct work_struct *work)
{
- int err;
struct phy_device *phydev =
container_of(work, struct phy_device, phy_queue);
@@ -630,9 +587,7 @@ void phy_change(struct work_struct *work)
!phydev->drv->did_interrupt(phydev))
goto ignore;
- err = phy_disable_interrupts(phydev);
-
- if (err)
+ if (phy_disable_interrupts(phydev))
goto phy_err;
mutex_lock(&phydev->lock);
@@ -644,16 +599,13 @@ void phy_change(struct work_struct *work)
enable_irq(phydev->irq);
/* Reenable interrupts */
- if (PHY_HALTED != phydev->state)
- err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
-
- if (err)
+ if (PHY_HALTED != phydev->state &&
+ phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
goto irq_enable_err;
/* reschedule state queue work to run as soon as possible */
cancel_delayed_work_sync(&phydev->state_queue);
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
-
return;
ignore:
@@ -692,13 +644,12 @@ void phy_stop(struct phy_device *phydev)
out_unlock:
mutex_unlock(&phydev->lock);
- /*
- * Cannot call flush_scheduled_work() here as desired because
+ /* Cannot call flush_scheduled_work() here as desired because
* of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
* will not reenable interrupts.
*/
}
-
+EXPORT_SYMBOL(phy_stop);
/**
* phy_start - start or restart a PHY device
@@ -715,20 +666,19 @@ void phy_start(struct phy_device *phydev)
mutex_lock(&phydev->lock);
switch (phydev->state) {
- case PHY_STARTING:
- phydev->state = PHY_PENDING;
- break;
- case PHY_READY:
- phydev->state = PHY_UP;
- break;
- case PHY_HALTED:
- phydev->state = PHY_RESUMING;
- default:
- break;
+ case PHY_STARTING:
+ phydev->state = PHY_PENDING;
+ break;
+ case PHY_READY:
+ phydev->state = PHY_UP;
+ break;
+ case PHY_HALTED:
+ phydev->state = PHY_RESUMING;
+ default:
+ break;
}
mutex_unlock(&phydev->lock);
}
-EXPORT_SYMBOL(phy_stop);
EXPORT_SYMBOL(phy_start);
/**
@@ -740,160 +690,132 @@ void phy_state_machine(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct phy_device *phydev =
container_of(dwork, struct phy_device, state_queue);
- int needs_aneg = 0;
+ int needs_aneg = 0, do_suspend = 0;
int err = 0;
mutex_lock(&phydev->lock);
- if (phydev->adjust_state)
- phydev->adjust_state(phydev->attached_dev);
+ switch (phydev->state) {
+ case PHY_DOWN:
+ case PHY_STARTING:
+ case PHY_READY:
+ case PHY_PENDING:
+ break;
+ case PHY_UP:
+ needs_aneg = 1;
- switch(phydev->state) {
- case PHY_DOWN:
- case PHY_STARTING:
- case PHY_READY:
- case PHY_PENDING:
- break;
- case PHY_UP:
- needs_aneg = 1;
+ phydev->link_timeout = PHY_AN_TIMEOUT;
- phydev->link_timeout = PHY_AN_TIMEOUT;
+ break;
+ case PHY_AN:
+ err = phy_read_status(phydev);
+ if (err < 0)
+ break;
+ /* If the link is down, give up on negotiation for now */
+ if (!phydev->link) {
+ phydev->state = PHY_NOLINK;
+ netif_carrier_off(phydev->attached_dev);
+ phydev->adjust_link(phydev->attached_dev);
break;
- case PHY_AN:
- err = phy_read_status(phydev);
+ }
- if (err < 0)
- break;
+ /* Check if negotiation is done. Break if there's an error */
+ err = phy_aneg_done(phydev);
+ if (err < 0)
+ break;
- /* If the link is down, give up on
- * negotiation for now */
- if (!phydev->link) {
- phydev->state = PHY_NOLINK;
- netif_carrier_off(phydev->attached_dev);
- phydev->adjust_link(phydev->attached_dev);
- break;
- }
+ /* If AN is done, we're running */
+ if (err > 0) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ phydev->adjust_link(phydev->attached_dev);
- /* Check if negotiation is done. Break
- * if there's an error */
- err = phy_aneg_done(phydev);
- if (err < 0)
+ } else if (0 == phydev->link_timeout--) {
+ needs_aneg = 1;
+ /* If we have the magic_aneg bit, we try again */
+ if (phydev->drv->flags & PHY_HAS_MAGICANEG)
break;
-
- /* If AN is done, we're running */
- if (err > 0) {
- phydev->state = PHY_RUNNING;
- netif_carrier_on(phydev->attached_dev);
- phydev->adjust_link(phydev->attached_dev);
-
- } else if (0 == phydev->link_timeout--) {
- needs_aneg = 1;
- /* If we have the magic_aneg bit,
- * we try again */
- if (phydev->drv->flags & PHY_HAS_MAGICANEG)
- break;
- }
+ }
+ break;
+ case PHY_NOLINK:
+ err = phy_read_status(phydev);
+ if (err)
break;
- case PHY_NOLINK:
- err = phy_read_status(phydev);
-
- if (err)
- break;
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- netif_carrier_on(phydev->attached_dev);
- phydev->adjust_link(phydev->attached_dev);
- }
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ phydev->adjust_link(phydev->attached_dev);
+ }
+ break;
+ case PHY_FORCING:
+ err = genphy_update_link(phydev);
+ if (err)
break;
- case PHY_FORCING:
- err = genphy_update_link(phydev);
-
- if (err)
- break;
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- netif_carrier_on(phydev->attached_dev);
- } else {
- if (0 == phydev->link_timeout--)
- needs_aneg = 1;
- }
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ } else {
+ if (0 == phydev->link_timeout--)
+ needs_aneg = 1;
+ }
- phydev->adjust_link(phydev->attached_dev);
- break;
- case PHY_RUNNING:
- /* Only register a CHANGE if we are
- * polling or ignoring interrupts
- */
- if (!phy_interrupt_is_valid(phydev))
- phydev->state = PHY_CHANGELINK;
+ phydev->adjust_link(phydev->attached_dev);
+ break;
+ case PHY_RUNNING:
+ /* Only register a CHANGE if we are
+ * polling or ignoring interrupts
+ */
+ if (!phy_interrupt_is_valid(phydev))
+ phydev->state = PHY_CHANGELINK;
+ break;
+ case PHY_CHANGELINK:
+ err = phy_read_status(phydev);
+ if (err)
break;
- case PHY_CHANGELINK:
- err = phy_read_status(phydev);
- if (err)
- break;
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ } else {
+ phydev->state = PHY_NOLINK;
+ netif_carrier_off(phydev->attached_dev);
+ }
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- netif_carrier_on(phydev->attached_dev);
- } else {
- phydev->state = PHY_NOLINK;
- netif_carrier_off(phydev->attached_dev);
- }
+ phydev->adjust_link(phydev->attached_dev);
+ if (phy_interrupt_is_valid(phydev))
+ err = phy_config_interrupt(phydev,
+ PHY_INTERRUPT_ENABLED);
+ break;
+ case PHY_HALTED:
+ if (phydev->link) {
+ phydev->link = 0;
+ netif_carrier_off(phydev->attached_dev);
phydev->adjust_link(phydev->attached_dev);
-
- if (phy_interrupt_is_valid(phydev))
- err = phy_config_interrupt(phydev,
- PHY_INTERRUPT_ENABLED);
- break;
- case PHY_HALTED:
- if (phydev->link) {
- phydev->link = 0;
- netif_carrier_off(phydev->attached_dev);
- phydev->adjust_link(phydev->attached_dev);
- }
+ do_suspend = 1;
+ }
+ break;
+ case PHY_RESUMING:
+ err = phy_clear_interrupt(phydev);
+ if (err)
break;
- case PHY_RESUMING:
-
- err = phy_clear_interrupt(phydev);
- if (err)
- break;
-
- err = phy_config_interrupt(phydev,
- PHY_INTERRUPT_ENABLED);
+ err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
+ if (err)
+ break;
- if (err)
+ if (AUTONEG_ENABLE == phydev->autoneg) {
+ err = phy_aneg_done(phydev);
+ if (err < 0)
break;
- if (AUTONEG_ENABLE == phydev->autoneg) {
- err = phy_aneg_done(phydev);
- if (err < 0)
- break;
-
- /* err > 0 if AN is done.
- * Otherwise, it's 0, and we're
- * still waiting for AN */
- if (err > 0) {
- err = phy_read_status(phydev);
- if (err)
- break;
-
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- netif_carrier_on(phydev->attached_dev);
- } else
- phydev->state = PHY_NOLINK;
- phydev->adjust_link(phydev->attached_dev);
- } else {
- phydev->state = PHY_AN;
- phydev->link_timeout = PHY_AN_TIMEOUT;
- }
- } else {
+ /* err > 0 if AN is done.
+ * Otherwise, it's 0, and we're still waiting for AN
+ */
+ if (err > 0) {
err = phy_read_status(phydev);
if (err)
break;
@@ -901,11 +823,28 @@ void phy_state_machine(struct work_struct *work)
if (phydev->link) {
phydev->state = PHY_RUNNING;
netif_carrier_on(phydev->attached_dev);
- } else
+ } else {
phydev->state = PHY_NOLINK;
+ }
phydev->adjust_link(phydev->attached_dev);
+ } else {
+ phydev->state = PHY_AN;
+ phydev->link_timeout = PHY_AN_TIMEOUT;
}
- break;
+ } else {
+ err = phy_read_status(phydev);
+ if (err)
+ break;
+
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ } else {
+ phydev->state = PHY_NOLINK;
+ }
+ phydev->adjust_link(phydev->attached_dev);
+ }
+ break;
}
mutex_unlock(&phydev->lock);
@@ -913,11 +852,14 @@ void phy_state_machine(struct work_struct *work)
if (needs_aneg)
err = phy_start_aneg(phydev);
+ if (do_suspend)
+ phy_suspend(phydev);
+
if (err < 0)
phy_error(phydev);
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
- PHY_STATE_TIME * HZ);
+ PHY_STATE_TIME * HZ);
}
void phy_mac_interrupt(struct phy_device *phydev, int new_link)
@@ -959,14 +901,10 @@ static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
int addr)
{
- u32 ret;
-
mmd_phy_indirect(bus, prtad, devad, addr);
/* Read the content of the MMD's selected register */
- ret = bus->read(bus, addr, MII_MMD_DATA);
-
- return ret;
+ return bus->read(bus, addr, MII_MMD_DATA);
}
/**
@@ -1006,8 +944,6 @@ static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
*/
int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
{
- int ret = -EPROTONOSUPPORT;
-
/* According to 802.3az,the EEE is supported only in full duplex-mode.
* Also EEE feature is active when core is operating with MII, GMII
* or RGMII.
@@ -1033,7 +969,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
if (!cap)
- goto eee_exit;
+ return -EPROTONOSUPPORT;
/* Check which link settings negotiated and verify it in
* the EEE advertising registers.
@@ -1052,7 +988,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
idx = phy_find_setting(phydev->speed, phydev->duplex);
if (!(lp & adv & settings[idx].setting))
- goto eee_exit;
+ return -EPROTONOSUPPORT;
if (clk_stop_enable) {
/* Configure the PHY to stop receiving xMII
@@ -1069,11 +1005,10 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
MDIO_MMD_PCS, phydev->addr, val);
}
- ret = 0; /* EEE supported */
+ return 0; /* EEE supported */
}
-eee_exit:
- return ret;
+ return -EPROTONOSUPPORT;
}
EXPORT_SYMBOL(phy_init_eee);
@@ -1088,7 +1023,6 @@ int phy_get_eee_err(struct phy_device *phydev)
{
return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR,
MDIO_MMD_PCS, phydev->addr);
-
}
EXPORT_SYMBOL(phy_get_eee_err);
@@ -1138,9 +1072,8 @@ EXPORT_SYMBOL(phy_ethtool_get_eee);
*/
int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
{
- int val;
+ int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
- val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
phydev->addr, val);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index d6447b3f7409..82514e72b3d8 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1,7 +1,4 @@
-/*
- * drivers/net/phy/phy_device.c
- *
- * Framework for finding and configuring PHYs.
+/* Framework for finding and configuring PHYs.
* Also contains generic PHY driver
*
* Author: Andy Fleming
@@ -33,10 +30,11 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
-#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
MODULE_DESCRIPTION("PHY library");
MODULE_AUTHOR("Andy Fleming");
@@ -53,31 +51,31 @@ static void phy_device_release(struct device *dev)
kfree(to_phy_device(dev));
}
-static struct phy_driver genphy_driver;
-extern int mdio_bus_init(void);
-extern void mdio_bus_exit(void);
+enum genphy_driver {
+ GENPHY_DRV_1G,
+ GENPHY_DRV_10G,
+ GENPHY_DRV_MAX
+};
+
+static struct phy_driver genphy_driver[GENPHY_DRV_MAX];
static LIST_HEAD(phy_fixup_list);
static DEFINE_MUTEX(phy_fixup_lock);
-static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
- u32 flags, phy_interface_t interface);
-
-/*
- * Creates a new phy_fixup and adds it to the list
+/**
+ * phy_register_fixup - creates a new phy_fixup and adds it to the list
* @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID)
* @phy_uid: Used to match against phydev->phy_id (the UID of the PHY)
- * It can also be PHY_ANY_UID
+ * It can also be PHY_ANY_UID
* @phy_uid_mask: Applied to phydev->phy_id and fixup->phy_uid before
- * comparison
+ * comparison
* @run: The actual code to be run when a matching PHY is found
*/
int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
- int (*run)(struct phy_device *))
+ int (*run)(struct phy_device *))
{
- struct phy_fixup *fixup;
+ struct phy_fixup *fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
- fixup = kzalloc(sizeof(struct phy_fixup), GFP_KERNEL);
if (!fixup)
return -ENOMEM;
@@ -96,7 +94,7 @@ EXPORT_SYMBOL(phy_register_fixup);
/* Registers a fixup to be run on any PHY with the UID in phy_uid */
int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
- int (*run)(struct phy_device *))
+ int (*run)(struct phy_device *))
{
return phy_register_fixup(PHY_ANY_ID, phy_uid, phy_uid_mask, run);
}
@@ -104,14 +102,13 @@ EXPORT_SYMBOL(phy_register_fixup_for_uid);
/* Registers a fixup to be run on the PHY with id string bus_id */
int phy_register_fixup_for_id(const char *bus_id,
- int (*run)(struct phy_device *))
+ int (*run)(struct phy_device *))
{
return phy_register_fixup(bus_id, PHY_ANY_UID, 0xffffffff, run);
}
EXPORT_SYMBOL(phy_register_fixup_for_id);
-/*
- * Returns 1 if fixup matches phydev in bus_id and phy_uid.
+/* Returns 1 if fixup matches phydev in bus_id and phy_uid.
* Fixups can be set to match any in one or more fields.
*/
static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup)
@@ -121,7 +118,7 @@ static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup)
return 0;
if ((fixup->phy_uid & fixup->phy_uid_mask) !=
- (phydev->phy_id & fixup->phy_uid_mask))
+ (phydev->phy_id & fixup->phy_uid_mask))
if (fixup->phy_uid != PHY_ANY_UID)
return 0;
@@ -129,16 +126,14 @@ static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup)
}
/* Runs any matching fixups for this phydev */
-int phy_scan_fixups(struct phy_device *phydev)
+static int phy_scan_fixups(struct phy_device *phydev)
{
struct phy_fixup *fixup;
mutex_lock(&phy_fixup_lock);
list_for_each_entry(fixup, &phy_fixup_list, list) {
if (phy_needs_fixup(phydev, fixup)) {
- int err;
-
- err = fixup->run(phydev);
+ int err = fixup->run(phydev);
if (err < 0) {
mutex_unlock(&phy_fixup_lock);
@@ -150,25 +145,24 @@ int phy_scan_fixups(struct phy_device *phydev)
return 0;
}
-EXPORT_SYMBOL(phy_scan_fixups);
struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
- bool is_c45, struct phy_c45_device_ids *c45_ids)
+ bool is_c45,
+ struct phy_c45_device_ids *c45_ids)
{
struct phy_device *dev;
- /* We allocate the device, and initialize the
- * default values */
+ /* We allocate the device, and initialize the default values */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-
if (NULL == dev)
- return (struct phy_device*) PTR_ERR((void*)-ENOMEM);
+ return (struct phy_device *)PTR_ERR((void *)-ENOMEM);
dev->dev.release = phy_device_release;
dev->speed = 0;
dev->duplex = -1;
- dev->pause = dev->asym_pause = 0;
+ dev->pause = 0;
+ dev->asym_pause = 0;
dev->link = 1;
dev->interface = PHY_INTERFACE_MODE_GMII;
@@ -192,14 +186,15 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
INIT_WORK(&dev->phy_queue, phy_change);
/* Request the appropriate module unconditionally; don't
- bother trying to do so only if it isn't already loaded,
- because that gets complicated. A hotplug event would have
- done an unconditional modprobe anyway.
- We don't do normal hotplug because it won't work for MDIO
- -- because it relies on the device staying around for long
- enough for the driver to get loaded. With MDIO, the NIC
- driver will get bored and give up as soon as it finds that
- there's no driver _already_ loaded. */
+ * bother trying to do so only if it isn't already loaded,
+ * because that gets complicated. A hotplug event would have
+ * done an unconditional modprobe anyway.
+ * We don't do normal hotplug because it won't work for MDIO
+ * -- because it relies on the device staying around for long
+ * enough for the driver to get loaded. With MDIO, the NIC
+ * driver will get bored and give up as soon as it finds that
+ * there's no driver _already_ loaded.
+ */
request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
device_initialize(&dev->dev);
@@ -299,10 +294,8 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
if (is_c45)
return get_phy_c45_ids(bus, addr, phy_id, c45_ids);
- /* Grab the bits from PHYIR1, and put them
- * in the upper half */
+ /* Grab the bits from PHYIR1, and put them in the upper half */
phy_reg = mdiobus_read(bus, addr, MII_PHYSID1);
-
if (phy_reg < 0)
return -EIO;
@@ -310,7 +303,6 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
/* Grab the bits from PHYIR2, and put them in the lower half */
phy_reg = mdiobus_read(bus, addr, MII_PHYSID2);
-
if (phy_reg < 0)
return -EIO;
@@ -320,7 +312,8 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
}
/**
- * get_phy_device - reads the specified PHY device and returns its @phy_device struct
+ * get_phy_device - reads the specified PHY device and returns its @phy_device
+ * struct
* @bus: the target MII bus
* @addr: PHY address on the MII bus
* @is_c45: If true the PHY uses the 802.3 clause 45 protocol
@@ -331,7 +324,6 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
{
struct phy_c45_device_ids c45_ids = {0};
- struct phy_device *dev = NULL;
u32 phy_id = 0;
int r;
@@ -343,9 +335,7 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
if ((phy_id & 0x1fffffff) == 0x1fffffff)
return NULL;
- dev = phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
-
- return dev;
+ return phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
}
EXPORT_SYMBOL(get_phy_device);
@@ -357,14 +347,17 @@ int phy_device_register(struct phy_device *phydev)
{
int err;
- /* Don't register a phy if one is already registered at this
- * address */
+ /* Don't register a phy if one is already registered at this address */
if (phydev->bus->phy_map[phydev->addr])
return -EINVAL;
phydev->bus->phy_map[phydev->addr] = phydev;
/* Run all of the fixups for this PHY */
- phy_scan_fixups(phydev);
+ err = phy_init_hw(phydev);
+ if (err) {
+ pr_err("PHY %d failed to initialize\n", phydev->addr);
+ goto out;
+ }
err = device_add(&phydev->dev);
if (err) {
@@ -409,7 +402,7 @@ EXPORT_SYMBOL(phy_find_first);
* this function.
*/
static void phy_prepare_link(struct phy_device *phydev,
- void (*handler)(struct net_device *))
+ void (*handler)(struct net_device *))
{
phydev->adjust_link = handler;
}
@@ -432,7 +425,7 @@ int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
return rc;
phy_prepare_link(phydev, handler);
- phy_start_machine(phydev, NULL);
+ phy_start_machine(phydev);
if (phydev->irq > 0)
phy_start_interrupts(phydev);
@@ -455,16 +448,17 @@ EXPORT_SYMBOL(phy_connect_direct);
* choose to call only the subset of functions which provide
* the desired functionality.
*/
-struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
- void (*handler)(struct net_device *),
- phy_interface_t interface)
+struct phy_device *phy_connect(struct net_device *dev, const char *bus_id,
+ void (*handler)(struct net_device *),
+ phy_interface_t interface)
{
struct phy_device *phydev;
struct device *d;
int rc;
/* Search the list of PHY devices on the mdio bus for the
- * PHY with the requested name */
+ * PHY with the requested name
+ */
d = bus_find_device_by_name(&mdio_bus_type, NULL, bus_id);
if (!d) {
pr_err("PHY %s not found\n", bus_id);
@@ -481,7 +475,8 @@ struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
EXPORT_SYMBOL(phy_connect);
/**
- * phy_disconnect - disable interrupts, stop state machine, and detach a PHY device
+ * phy_disconnect - disable interrupts, stop state machine, and detach a PHY
+ * device
* @phydev: target phy_device struct
*/
void phy_disconnect(struct phy_device *phydev)
@@ -490,13 +485,53 @@ void phy_disconnect(struct phy_device *phydev)
phy_stop_interrupts(phydev);
phy_stop_machine(phydev);
-
+
phydev->adjust_link = NULL;
phy_detach(phydev);
}
EXPORT_SYMBOL(phy_disconnect);
+/**
+ * phy_poll_reset - Safely wait until a PHY reset has properly completed
+ * @phydev: The PHY device to poll
+ *
+ * Description: According to IEEE 802.3, Section 2, Subsection 22.2.4.1.1, as
+ * published in 2008, a PHY reset may take up to 0.5 seconds. The MII BMCR
+ * register must be polled until the BMCR_RESET bit clears.
+ *
+ * Furthermore, any attempts to write to PHY registers may have no effect
+ * or even generate MDIO bus errors until this is complete.
+ *
+ * Some PHYs (such as the Marvell 88E1111) don't entirely conform to the
+ * standard and do not fully reset after the BMCR_RESET bit is set, and may
+ * even *REQUIRE* a soft-reset to properly restart autonegotiation. In an
+ * effort to support such broken PHYs, this function is separate from the
+ * standard phy_init_hw() which will zero all the other bits in the BMCR
+ * and reapply all driver-specific and board-specific fixups.
+ */
+static int phy_poll_reset(struct phy_device *phydev)
+{
+ /* Poll until the reset bit clears (50ms per retry == 0.6 sec) */
+ unsigned int retries = 12;
+ int ret;
+
+ do {
+ msleep(50);
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+ } while (ret & BMCR_RESET && --retries);
+ if (ret & BMCR_RESET)
+ return -ETIMEDOUT;
+
+ /* Some chips (smsc911x) may still need up to another 1ms after the
+ * BMCR_RESET bit is cleared before they are usable.
+ */
+ msleep(1);
+ return 0;
+}
+
int phy_init_hw(struct phy_device *phydev)
{
int ret;
@@ -504,12 +539,21 @@ int phy_init_hw(struct phy_device *phydev)
if (!phydev->drv || !phydev->drv->config_init)
return 0;
+ ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_poll_reset(phydev);
+ if (ret < 0)
+ return ret;
+
ret = phy_scan_fixups(phydev);
if (ret < 0)
return ret;
return phydev->drv->config_init(phydev);
}
+EXPORT_SYMBOL(phy_init_hw);
/**
* phy_attach_direct - attach a network device to a given PHY device pointer
@@ -520,26 +564,25 @@ int phy_init_hw(struct phy_device *phydev)
*
* Description: Called by drivers to attach to a particular PHY
* device. The phy_device is found, and properly hooked up
- * to the phy_driver. If no driver is attached, then the
- * genphy_driver is used. The phy_device is given a ptr to
+ * to the phy_driver. If no driver is attached, then a
+ * generic driver is used. The phy_device is given a ptr to
* the attaching device, and given a callback for link status
* change. The phy_device is returned to the attaching driver.
*/
-static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
- u32 flags, phy_interface_t interface)
+int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+ u32 flags, phy_interface_t interface)
{
struct device *d = &phydev->dev;
int err;
/* Assume that if there is no driver, that it doesn't
- * exist, and we should use the genphy driver. */
+ * exist, and we should use the genphy driver.
+ */
if (NULL == d->driver) {
- if (phydev->is_c45) {
- pr_err("No driver for phy %x\n", phydev->phy_id);
- return -ENODEV;
- }
-
- d->driver = &genphy_driver.driver;
+ if (phydev->is_c45)
+ d->driver = &genphy_driver[GENPHY_DRV_10G].driver;
+ else
+ d->driver = &genphy_driver[GENPHY_DRV_1G].driver;
err = d->driver->probe(d);
if (err >= 0)
@@ -565,13 +608,17 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
/* Do initial configuration here, now that
* we have certain key parameters
- * (dev_flags and interface) */
+ * (dev_flags and interface)
+ */
err = phy_init_hw(phydev);
if (err)
phy_detach(phydev);
+ phy_resume(phydev);
+
return err;
}
+EXPORT_SYMBOL(phy_attach_direct);
/**
* phy_attach - attach a network device to a particular PHY device
@@ -582,8 +629,8 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
* Description: Same as phy_attach_direct() except that a PHY bus_id
* string is passed instead of a pointer to a struct phy_device.
*/
-struct phy_device *phy_attach(struct net_device *dev,
- const char *bus_id, phy_interface_t interface)
+struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
+ phy_interface_t interface)
{
struct bus_type *bus = &mdio_bus_type;
struct phy_device *phydev;
@@ -591,7 +638,8 @@ struct phy_device *phy_attach(struct net_device *dev,
int rc;
/* Search the list of PHY devices on the mdio bus for the
- * PHY with the requested name */
+ * PHY with the requested name
+ */
d = bus_find_device_by_name(bus, NULL, bus_id);
if (!d) {
pr_err("PHY %s not found\n", bus_id);
@@ -613,18 +661,49 @@ EXPORT_SYMBOL(phy_attach);
*/
void phy_detach(struct phy_device *phydev)
{
+ int i;
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
+ phy_suspend(phydev);
/* If the device had no specific driver before (i.e. - it
* was using the generic driver), we unbind the device
* from the generic driver so that there's a chance a
- * real driver could be loaded */
- if (phydev->dev.driver == &genphy_driver.driver)
- device_release_driver(&phydev->dev);
+ * real driver could be loaded
+ */
+ for (i = 0; i < ARRAY_SIZE(genphy_driver); i++) {
+ if (phydev->dev.driver == &genphy_driver[i].driver) {
+ device_release_driver(&phydev->dev);
+ break;
+ }
+ }
}
EXPORT_SYMBOL(phy_detach);
+int phy_suspend(struct phy_device *phydev)
+{
+ struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
+ struct ethtool_wolinfo wol;
+
+ /* If the device has WOL enabled, we cannot suspend the PHY */
+ wol.cmd = ETHTOOL_GWOL;
+ phy_ethtool_get_wol(phydev, &wol);
+ if (wol.wolopts)
+ return -EBUSY;
+
+ if (phydrv->suspend)
+ return phydrv->suspend(phydev);
+ return 0;
+}
+
+int phy_resume(struct phy_device *phydev)
+{
+ struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
+
+ if (phydrv->resume)
+ return phydrv->resume(phydev);
+ return 0;
+}
/* Generic PHY support and helper functions */
@@ -640,20 +719,19 @@ EXPORT_SYMBOL(phy_detach);
static int genphy_config_advert(struct phy_device *phydev)
{
u32 advertise;
- int oldadv, adv;
+ int oldadv, adv, bmsr;
int err, changed = 0;
- /* Only allow advertising what
- * this PHY supports */
+ /* Only allow advertising what this PHY supports */
phydev->advertising &= phydev->supported;
advertise = phydev->advertising;
/* Setup standard advertisement */
- oldadv = adv = phy_read(phydev, MII_ADVERTISE);
-
+ adv = phy_read(phydev, MII_ADVERTISE);
if (adv < 0)
return adv;
+ oldadv = adv;
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
ADVERTISE_PAUSE_ASYM);
adv |= ethtool_adv_to_mii_adv_t(advertise);
@@ -666,26 +744,36 @@ static int genphy_config_advert(struct phy_device *phydev)
changed = 1;
}
- /* Configure gigabit if it's supported */
- if (phydev->supported & (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full)) {
- oldadv = adv = phy_read(phydev, MII_CTRL1000);
+ bmsr = phy_read(phydev, MII_BMSR);
+ if (bmsr < 0)
+ return bmsr;
- if (adv < 0)
- return adv;
+ /* Per 802.3-2008, Section 22.2.4.2.16 Extended status all
+ * 1000Mbits/sec capable PHYs shall have the BMSR_ESTATEN bit set to a
+ * logical 1.
+ */
+ if (!(bmsr & BMSR_ESTATEN))
+ return changed;
- adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
- adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
+ /* Configure gigabit if it's supported */
+ adv = phy_read(phydev, MII_CTRL1000);
+ if (adv < 0)
+ return adv;
- if (adv != oldadv) {
- err = phy_write(phydev, MII_CTRL1000, adv);
+ oldadv = adv;
+ adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
- if (err < 0)
- return err;
+ if (phydev->supported & (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full)) {
+ adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
+ if (adv != oldadv)
changed = 1;
- }
}
+ err = phy_write(phydev, MII_CTRL1000, adv);
+ if (err < 0)
+ return err;
+
return changed;
}
@@ -699,10 +787,10 @@ static int genphy_config_advert(struct phy_device *phydev)
*/
int genphy_setup_forced(struct phy_device *phydev)
{
- int err;
int ctl = 0;
- phydev->pause = phydev->asym_pause = 0;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
if (SPEED_1000 == phydev->speed)
ctl |= BMCR_SPEED1000;
@@ -711,10 +799,8 @@ int genphy_setup_forced(struct phy_device *phydev)
if (DUPLEX_FULL == phydev->duplex)
ctl |= BMCR_FULLDPLX;
-
- err = phy_write(phydev, MII_BMCR, ctl);
- return err;
+ return phy_write(phydev, MII_BMCR, ctl);
}
EXPORT_SYMBOL(genphy_setup_forced);
@@ -724,25 +810,20 @@ EXPORT_SYMBOL(genphy_setup_forced);
*/
int genphy_restart_aneg(struct phy_device *phydev)
{
- int ctl;
-
- ctl = phy_read(phydev, MII_BMCR);
+ int ctl = phy_read(phydev, MII_BMCR);
if (ctl < 0)
return ctl;
- ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ ctl |= BMCR_ANENABLE | BMCR_ANRESTART;
/* Don't isolate the PHY if we're negotiating */
- ctl &= ~(BMCR_ISOLATE);
+ ctl &= ~BMCR_ISOLATE;
- ctl = phy_write(phydev, MII_BMCR, ctl);
-
- return ctl;
+ return phy_write(phydev, MII_BMCR, ctl);
}
EXPORT_SYMBOL(genphy_restart_aneg);
-
/**
* genphy_config_aneg - restart auto-negotiation or write BMCR
* @phydev: target phy_device struct
@@ -759,13 +840,12 @@ int genphy_config_aneg(struct phy_device *phydev)
return genphy_setup_forced(phydev);
result = genphy_config_advert(phydev);
-
if (result < 0) /* error */
return result;
-
if (result == 0) {
/* Advertisement hasn't changed, but maybe aneg was never on to
- * begin with? Or maybe phy was isolated? */
+ * begin with? Or maybe phy was isolated?
+ */
int ctl = phy_read(phydev, MII_BMCR);
if (ctl < 0)
@@ -776,7 +856,8 @@ int genphy_config_aneg(struct phy_device *phydev)
}
/* Only restart aneg if we are advertising something different
- * than we were before. */
+ * than we were before.
+ */
if (result > 0)
result = genphy_restart_aneg(phydev);
@@ -784,6 +865,11 @@ int genphy_config_aneg(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_config_aneg);
+static int gen10g_config_aneg(struct phy_device *phydev)
+{
+ return 0;
+}
+
/**
* genphy_update_link - update link status in @phydev
* @phydev: target phy_device struct
@@ -798,13 +884,11 @@ int genphy_update_link(struct phy_device *phydev)
/* Do a fake read */
status = phy_read(phydev, MII_BMSR);
-
if (status < 0)
return status;
/* Read link and autonegotiation status */
status = phy_read(phydev, MII_BMSR);
-
if (status < 0)
return status;
@@ -833,35 +917,36 @@ int genphy_read_status(struct phy_device *phydev)
int lpa;
int lpagb = 0;
- /* Update the link, but return if there
- * was an error */
+ /* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
if (err)
return err;
+ phydev->lp_advertising = 0;
+
if (AUTONEG_ENABLE == phydev->autoneg) {
if (phydev->supported & (SUPPORTED_1000baseT_Half
| SUPPORTED_1000baseT_Full)) {
lpagb = phy_read(phydev, MII_STAT1000);
-
if (lpagb < 0)
return lpagb;
adv = phy_read(phydev, MII_CTRL1000);
-
if (adv < 0)
return adv;
+ phydev->lp_advertising =
+ mii_stat1000_to_ethtool_lpa_t(lpagb);
lpagb &= adv << 2;
}
lpa = phy_read(phydev, MII_LPA);
-
if (lpa < 0)
return lpa;
- adv = phy_read(phydev, MII_ADVERTISE);
+ phydev->lp_advertising |= mii_lpa_to_ethtool_lpa_t(lpa);
+ adv = phy_read(phydev, MII_ADVERTISE);
if (adv < 0)
return adv;
@@ -869,7 +954,8 @@ int genphy_read_status(struct phy_device *phydev)
phydev->speed = SPEED_10;
phydev->duplex = DUPLEX_HALF;
- phydev->pause = phydev->asym_pause = 0;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
phydev->speed = SPEED_1000;
@@ -878,19 +964,20 @@ int genphy_read_status(struct phy_device *phydev)
phydev->duplex = DUPLEX_FULL;
} else if (lpa & (LPA_100FULL | LPA_100HALF)) {
phydev->speed = SPEED_100;
-
+
if (lpa & LPA_100FULL)
phydev->duplex = DUPLEX_FULL;
} else
if (lpa & LPA_10FULL)
phydev->duplex = DUPLEX_FULL;
- if (phydev->duplex == DUPLEX_FULL){
+ if (phydev->duplex == DUPLEX_FULL) {
phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
}
} else {
int bmcr = phy_read(phydev, MII_BMCR);
+
if (bmcr < 0)
return bmcr;
@@ -906,27 +993,55 @@ int genphy_read_status(struct phy_device *phydev)
else
phydev->speed = SPEED_10;
- phydev->pause = phydev->asym_pause = 0;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
}
return 0;
}
EXPORT_SYMBOL(genphy_read_status);
+static int gen10g_read_status(struct phy_device *phydev)
+{
+ int devad, reg;
+ u32 mmd_mask = phydev->c45_ids.devices_in_package;
+
+ phydev->link = 1;
+
+ /* For now just lie and say it's 10G all the time */
+ phydev->speed = SPEED_10000;
+ phydev->duplex = DUPLEX_FULL;
+
+ for (devad = 0; mmd_mask; devad++, mmd_mask = mmd_mask >> 1) {
+ if (!(mmd_mask & 1))
+ continue;
+
+ /* Read twice because link state is latched and a
+ * read moves the current state into the register
+ */
+ phy_read_mmd(phydev, devad, MDIO_STAT1);
+ reg = phy_read_mmd(phydev, devad, MDIO_STAT1);
+ if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS))
+ phydev->link = 0;
+ }
+
+ return 0;
+}
+
static int genphy_config_init(struct phy_device *phydev)
{
int val;
u32 features;
/* For now, I'll claim that the generic driver supports
- * all possible port types */
+ * all possible port types
+ */
features = (SUPPORTED_TP | SUPPORTED_MII
| SUPPORTED_AUI | SUPPORTED_FIBRE |
SUPPORTED_BNC);
/* Do we support autonegotiation? */
val = phy_read(phydev, MII_BMSR);
-
if (val < 0)
return val;
@@ -944,7 +1059,6 @@ static int genphy_config_init(struct phy_device *phydev)
if (val & BMSR_ESTATEN) {
val = phy_read(phydev, MII_ESTATUS);
-
if (val < 0)
return val;
@@ -959,6 +1073,16 @@ static int genphy_config_init(struct phy_device *phydev)
return 0;
}
+
+static int gen10g_config_init(struct phy_device *phydev)
+{
+ /* Temporarily just say we support everything */
+ phydev->supported = SUPPORTED_10000baseT_Full;
+ phydev->advertising = SUPPORTED_10000baseT_Full;
+
+ return 0;
+}
+
int genphy_suspend(struct phy_device *phydev)
{
int value;
@@ -966,7 +1090,7 @@ int genphy_suspend(struct phy_device *phydev)
mutex_lock(&phydev->lock);
value = phy_read(phydev, MII_BMCR);
- phy_write(phydev, MII_BMCR, (value | BMCR_PDOWN));
+ phy_write(phydev, MII_BMCR, value | BMCR_PDOWN);
mutex_unlock(&phydev->lock);
@@ -974,6 +1098,11 @@ int genphy_suspend(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_suspend);
+static int gen10g_suspend(struct phy_device *phydev)
+{
+ return 0;
+}
+
int genphy_resume(struct phy_device *phydev)
{
int value;
@@ -981,7 +1110,7 @@ int genphy_resume(struct phy_device *phydev)
mutex_lock(&phydev->lock);
value = phy_read(phydev, MII_BMCR);
- phy_write(phydev, MII_BMCR, (value & ~BMCR_PDOWN));
+ phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
mutex_unlock(&phydev->lock);
@@ -989,6 +1118,11 @@ int genphy_resume(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_resume);
+static int gen10g_resume(struct phy_device *phydev)
+{
+ return 0;
+}
+
/**
* phy_probe - probe and init a PHY device
* @dev: device to probe and init
@@ -999,22 +1133,18 @@ EXPORT_SYMBOL(genphy_resume);
*/
static int phy_probe(struct device *dev)
{
- struct phy_device *phydev;
- struct phy_driver *phydrv;
- struct device_driver *drv;
+ struct phy_device *phydev = to_phy_device(dev);
+ struct device_driver *drv = phydev->dev.driver;
+ struct phy_driver *phydrv = to_phy_driver(drv);
int err = 0;
- phydev = to_phy_device(dev);
-
- drv = phydev->dev.driver;
- phydrv = to_phy_driver(drv);
phydev->drv = phydrv;
/* Disable the interrupt if the PHY doesn't support it
* but the interrupt is still a valid one
*/
if (!(phydrv->flags & PHY_HAS_INTERRUPT) &&
- phy_interrupt_is_valid(phydev))
+ phy_interrupt_is_valid(phydev))
phydev->irq = PHY_POLL;
if (phydrv->flags & PHY_IS_INTERNAL)
@@ -1024,7 +1154,8 @@ static int phy_probe(struct device *dev)
/* Start out supporting everything. Eventually,
* a controller will attach, and may modify one
- * or both of these values */
+ * or both of these values
+ */
phydev->supported = phydrv->features;
phydev->advertising = phydrv->features;
@@ -1037,14 +1168,11 @@ static int phy_probe(struct device *dev)
mutex_unlock(&phydev->lock);
return err;
-
}
static int phy_remove(struct device *dev)
{
- struct phy_device *phydev;
-
- phydev = to_phy_device(dev);
+ struct phy_device *phydev = to_phy_device(dev);
mutex_lock(&phydev->lock);
phydev->state = PHY_DOWN;
@@ -1071,7 +1199,6 @@ int phy_driver_register(struct phy_driver *new_driver)
new_driver->driver.remove = phy_remove;
retval = driver_register(&new_driver->driver);
-
if (retval) {
pr_err("%s: Error %d in registering driver\n",
new_driver->name, retval);
@@ -1110,13 +1237,14 @@ EXPORT_SYMBOL(phy_driver_unregister);
void phy_drivers_unregister(struct phy_driver *drv, int n)
{
int i;
- for (i = 0; i < n; i++) {
+
+ for (i = 0; i < n; i++)
phy_driver_unregister(drv + i);
- }
}
EXPORT_SYMBOL(phy_drivers_unregister);
-static struct phy_driver genphy_driver = {
+static struct phy_driver genphy_driver[] = {
+{
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic PHY",
@@ -1126,8 +1254,19 @@ static struct phy_driver genphy_driver = {
.read_status = genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
- .driver = {.owner= THIS_MODULE, },
-};
+ .driver = { .owner = THIS_MODULE, },
+}, {
+ .phy_id = 0xffffffff,
+ .phy_id_mask = 0xffffffff,
+ .name = "Generic 10G PHY",
+ .config_init = gen10g_config_init,
+ .features = 0,
+ .config_aneg = gen10g_config_aneg,
+ .read_status = gen10g_read_status,
+ .suspend = gen10g_suspend,
+ .resume = gen10g_resume,
+ .driver = {.owner = THIS_MODULE, },
+} };
static int __init phy_init(void)
{
@@ -1137,7 +1276,8 @@ static int __init phy_init(void)
if (rc)
return rc;
- rc = phy_driver_register(&genphy_driver);
+ rc = phy_drivers_register(genphy_driver,
+ ARRAY_SIZE(genphy_driver));
if (rc)
mdio_bus_exit();
@@ -1146,7 +1286,8 @@ static int __init phy_init(void)
static void __exit phy_exit(void)
{
- phy_driver_unregister(&genphy_driver);
+ phy_drivers_unregister(genphy_driver,
+ ARRAY_SIZE(genphy_driver));
mdio_bus_exit();
}
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index f3bea1346021..4cf5fb922e59 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -15,7 +15,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -171,14 +170,14 @@ static int ks8995_write(struct ks8995_switch *ks, char *buf,
static inline int ks8995_read_reg(struct ks8995_switch *ks, u8 addr, u8 *buf)
{
- return (ks8995_read(ks, buf, addr, 1) != 1);
+ return ks8995_read(ks, buf, addr, 1) != 1;
}
static inline int ks8995_write_reg(struct ks8995_switch *ks, u8 addr, u8 val)
{
char buf = val;
- return (ks8995_write(ks, &buf, addr, 1) != 1);
+ return ks8995_write(ks, &buf, addr, 1) != 1;
}
/* ------------------------------------------------------------------------ */
@@ -325,7 +324,6 @@ static int ks8995_probe(struct spi_device *spi)
return 0;
err_drvdata:
- spi_set_drvdata(spi, NULL);
kfree(ks);
return err;
}
@@ -337,7 +335,6 @@ static int ks8995_remove(struct spi_device *spi)
ks8995 = spi_get_drvdata(spi);
sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
- spi_set_drvdata(spi, NULL);
kfree(ks8995);
return 0;
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 7b4ff35c8bf7..040b8978d6ca 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -547,9 +547,9 @@ static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
skb_pull(skb,dev->hard_header_len);
eth = eth_hdr(skb);
- if(*eth->h_dest&1)
+ if(is_multicast_ether_addr(eth->h_dest))
{
- if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
+ if(ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
skb->pkt_type=PACKET_BROADCAST;
else
skb->pkt_type=PACKET_MULTICAST;
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 9a1849a83e2a..911b21602ff2 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -27,8 +27,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
* Changelog:
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 82ee6ed954cb..2ea7efd11857 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -131,12 +131,12 @@ static inline struct pppoe_net *pppoe_pernet(struct net *net)
static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b)
{
- return a->sid == b->sid && !memcmp(a->remote, b->remote, ETH_ALEN);
+ return a->sid == b->sid && ether_addr_equal(a->remote, b->remote);
}
static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr)
{
- return a->sid == sid && !memcmp(a->remote, addr, ETH_ALEN);
+ return a->sid == sid && ether_addr_equal(a->remote, addr);
}
#if 8 % PPPOE_HASH_BITS
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 736050d6b451..28407426fd6f 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1647,7 +1647,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
/*
* This helper function exists to help dev_pick_tx get the correct
@@ -2033,6 +2034,10 @@ static void team_setup(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GRO;
+
+ /* Don't allow team devices to change network namespaces. */
+ dev->features |= NETIF_F_NETNS_LOCAL;
+
dev->hw_features = TEAM_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
@@ -2850,7 +2855,7 @@ static int team_device_event(struct notifier_block *unused,
case NETDEV_FEAT_CHANGE:
team_compute_features(port->team);
break;
- case NETDEV_CHANGEMTU:
+ case NETDEV_PRECHANGEMTU:
/* Forbid to change mtu of underlaying device */
return NOTIFY_BAD;
case NETDEV_PRE_TYPE_CHANGE:
diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
index 7f032e211343..cd2f692b8074 100644
--- a/drivers/net/team/team_mode_random.c
+++ b/drivers/net/team/team_mode_random.c
@@ -13,20 +13,14 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/skbuff.h>
-#include <linux/reciprocal_div.h>
#include <linux/if_team.h>
-static u32 random_N(unsigned int N)
-{
- return reciprocal_divide(prandom_u32(), N);
-}
-
static bool rnd_transmit(struct team *team, struct sk_buff *skb)
{
struct team_port *port;
int port_index;
- port_index = random_N(team->en_port_count);
+ port_index = prandom_u32_max(team->en_port_count);
port = team_get_port_by_index_rcu(team, port_index);
if (unlikely(!port))
goto drop;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7c8343a4f918..44c4db8450f0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -69,6 +69,7 @@
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/sock.h>
+#include <linux/seq_file.h>
#include <asm/uaccess.h>
@@ -110,7 +111,7 @@ struct tap_filter {
unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
};
-/* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
+/* DEFAULT_MAX_NUM_RSS_QUEUES were chosen to let the rx/tx queues allocated for
* the netdevice to be fit in one page. So we can make sure the success of
* memory allocation. TODO: increase the limit. */
#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
@@ -119,7 +120,7 @@ struct tap_filter {
#define TUN_FLOW_EXPIRE (3 * HZ)
/* A tun_file connects an open character device to a tuntap netdevice. It
- * also contains all socket related strctures (except sock_fprog and tap_filter)
+ * also contains all socket related structures (except sock_fprog and tap_filter)
* to serve as one transmit queue for tuntap device. The sock_fprog and
* tap_filter were kept in tun_struct since they were used for filtering for the
* netdevice not for a specific queue (at least I didn't see the requirement for
@@ -152,6 +153,7 @@ struct tun_flow_entry {
struct tun_struct *tun;
u32 rxhash;
+ u32 rps_rxhash;
int queue_index;
unsigned long updated;
};
@@ -220,6 +222,7 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
rxhash, queue_index);
e->updated = jiffies;
e->rxhash = rxhash;
+ e->rps_rxhash = 0;
e->queue_index = queue_index;
e->tun = tun;
hlist_add_head_rcu(&e->hash_link, head);
@@ -232,6 +235,7 @@ static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
{
tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
e->rxhash, e->queue_index);
+ sock_rps_reset_flow_hash(e->rps_rxhash);
hlist_del_rcu(&e->hash_link);
kfree_rcu(e, rcu);
--tun->flow_count;
@@ -325,6 +329,7 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
/* TODO: keep queueing to old queue until it's empty? */
e->queue_index = queue_index;
e->updated = jiffies;
+ sock_rps_record_flow_hash(e->rps_rxhash);
} else {
spin_lock_bh(&tun->lock);
if (!tun_flow_find(head, rxhash) &&
@@ -341,14 +346,27 @@ unlock:
rcu_read_unlock();
}
+/**
+ * Save the hash received in the stack receive path and update the
+ * flow_hash table accordingly.
+ */
+static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
+{
+ if (unlikely(e->rps_rxhash != hash)) {
+ sock_rps_reset_flow_hash(e->rps_rxhash);
+ e->rps_rxhash = hash;
+ }
+}
+
/* We try to identify a flow through its rxhash first. The reason that
- * we do not check rxq no. is becuase some cards(e.g 82599), chooses
+ * we do not check rxq no. is because some cards(e.g 82599), chooses
* the rxq based on the txq where the last packet of the flow comes. As
* the userspace application move between processors, we may get a
* different rxq no. here. If we could not get rxhash, then we would
* hope the rxq no. may help here.
*/
-static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
struct tun_struct *tun = netdev_priv(dev);
struct tun_flow_entry *e;
@@ -358,12 +376,13 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
rcu_read_lock();
numqueues = ACCESS_ONCE(tun->numqueues);
- txq = skb_get_rxhash(skb);
+ txq = skb_get_hash(skb);
if (txq) {
e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
- if (e)
+ if (e) {
+ tun_flow_save_rps_rxhash(e, txq);
txq = e->queue_index;
- else
+ } else
/* use multiply and shift instead of expensive divide */
txq = ((u64)txq * numqueues) >> 32;
} else if (likely(skb_rx_queue_recorded(skb))) {
@@ -531,7 +550,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
err = 0;
- /* Re-attach the filter to presist device */
+ /* Re-attach the filter to persist device */
if (!skip_filter && (tun->filter_attached == true)) {
err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
if (!err)
@@ -720,14 +739,32 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
struct tun_struct *tun = netdev_priv(dev);
int txq = skb->queue_mapping;
struct tun_file *tfile;
+ u32 numqueues = 0;
rcu_read_lock();
tfile = rcu_dereference(tun->tfiles[txq]);
+ numqueues = ACCESS_ONCE(tun->numqueues);
/* Drop packet if interface is not attached */
- if (txq >= tun->numqueues)
+ if (txq >= numqueues)
goto drop;
+ if (numqueues == 1) {
+ /* Select queue was not called for the skbuff, so we extract the
+ * RPS hash and save it into the flow_table here.
+ */
+ __u32 rxhash;
+
+ rxhash = skb_get_hash(skb);
+ if (rxhash) {
+ struct tun_flow_entry *e;
+ e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
+ rxhash);
+ if (e)
+ tun_flow_save_rps_rxhash(e, rxhash);
+ }
+ }
+
tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
BUG_ON(!tfile);
@@ -745,8 +782,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Limit the number of packets queued by dividing txq length with the
* number of queues.
*/
- if (skb_queue_len(&tfile->socket.sk->sk_receive_queue)
- >= dev->tx_queue_len / tun->numqueues)
+ if (skb_queue_len(&tfile->socket.sk->sk_receive_queue) * numqueues
+ >= dev->tx_queue_len)
goto drop;
if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
@@ -819,9 +856,9 @@ static void tun_poll_controller(struct net_device *dev)
* Tun only receives frames when:
* 1) the char device endpoint gets data from user space
* 2) the tun socket gets a sendmsg call from user space
- * Since both of those are syncronous operations, we are guaranteed
+ * Since both of those are synchronous operations, we are guaranteed
* never to have pending data when we poll for it
- * so theres nothing to do here but return.
+ * so there is nothing to do here but return.
* We need this though so netpoll recognizes us as an interface that
* supports polling, which enables bridge devices in virt setups to
* still use netconsole
@@ -1146,7 +1183,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_reset_network_header(skb);
skb_probe_transport_header(skb, 0);
- rxhash = skb_get_rxhash(skb);
+ rxhash = skb_get_hash(skb);
netif_rx_ni(skb);
tun->dev->stats.rx_packets++;
@@ -1291,8 +1328,7 @@ done:
}
static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
- struct kiocb *iocb, const struct iovec *iv,
- ssize_t len, int noblock)
+ const struct iovec *iv, ssize_t len, int noblock)
{
DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
@@ -1355,7 +1391,7 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
goto out;
}
- ret = tun_do_read(tun, tfile, iocb, iv, len,
+ ret = tun_do_read(tun, tfile, iv, len,
file->f_flags & O_NONBLOCK);
ret = min_t(ssize_t, ret, len);
if (ret > 0)
@@ -1456,7 +1492,7 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
SOL_PACKET, TUN_TX_TIMESTAMP);
goto out;
}
- ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
+ ret = tun_do_read(tun, tfile, m->msg_iov, total_len,
flags & MSG_DONTWAIT);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
@@ -2193,6 +2229,27 @@ static int tun_chr_close(struct inode *inode, struct file *file)
return 0;
}
+#ifdef CONFIG_PROC_FS
+static int tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
+{
+ struct tun_struct *tun;
+ struct ifreq ifr;
+
+ memset(&ifr, 0, sizeof(ifr));
+
+ rtnl_lock();
+ tun = tun_get(f);
+ if (tun)
+ tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
+ rtnl_unlock();
+
+ if (tun)
+ tun_put(tun);
+
+ return seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
+}
+#endif
+
static const struct file_operations tun_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -2207,7 +2264,10 @@ static const struct file_operations tun_fops = {
#endif
.open = tun_chr_open,
.release = tun_chr_close,
- .fasync = tun_chr_fasync
+ .fasync = tun_chr_fasync,
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = tun_chr_show_fdinfo,
+#endif
};
static struct miscdevice tun_miscdev = {
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 85e4a01670f0..409499fdb157 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -92,11 +92,12 @@ config USB_RTL8150
module will be called rtl8150.
config USB_RTL8152
- tristate "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
+ tristate "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
select MII
help
This option adds support for Realtek RTL8152 based USB 2.0
- 10/100 Ethernet adapters.
+ 10/100 Ethernet adapters and RTL8153 based USB 3.0 10/100/1000
+ Ethernet adapters.
To compile this driver as a module, choose M here: the
module will be called r8152.
@@ -276,12 +277,12 @@ config USB_NET_CDC_MBIM
module will be called cdc_mbim.
config USB_NET_DM9601
- tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
+ tristate "Davicom DM96xx based USB 10/100 ethernet devices"
depends on USB_USBNET
select CRC32
help
- This option adds support for Davicom DM9601 based USB 1.1
- 10/100 Ethernet adapters.
+ This option adds support for Davicom DM9601/DM9620/DM9621A
+ based USB 10/100 Ethernet adapters.
config USB_NET_SR9700
tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices"
@@ -291,6 +292,22 @@ config USB_NET_SR9700
This option adds support for CoreChip-sz SR9700 based USB 1.1
10/100 Ethernet adapters.
+config USB_NET_SR9800
+ tristate "CoreChip-sz SR9800 based USB 2.0 10/100 ethernet devices"
+ depends on USB_USBNET
+ select CRC32
+ default y
+ ---help---
+ Say Y if you want to use one of the following 100Mbps USB Ethernet
+ device based on the CoreChip-sz SR9800 chip.
+
+ This driver makes the adapter appear as a normal Ethernet interface,
+ typically on eth0, if it is the only ethernet device, or perhaps on
+ eth1, if you have a PCI or ISA ethernet card installed.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sr9800.
+
config USB_NET_SMSC75XX
tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices"
depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b17b5e88bbaf..433f0a00c683 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
obj-$(CONFIG_USB_NET_SR9700) += sr9700.o
+obj-$(CONFIG_USB_NET_SR9800) += sr9800.o
obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o
obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o
obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index bdaa12d07a12..5d049d00c2d7 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ASIX_H
@@ -28,7 +27,6 @@
#include <linux/module.h>
#include <linux/kmod.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 577c72d5f369..5c55f11572ba 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "asix.h"
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 386a3df53678..9765a7d4766d 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "asix.h"
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 723b3879ecc2..5f18fcb8dcc7 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -21,8 +21,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "asix.h"
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 8e8d0fcd4979..d6f64dad05bc 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index df507e6dbb9c..630caf48f63a 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -24,15 +24,13 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 08d55b6bf272..f7180f8db39e 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -14,12 +14,10 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ctype.h>
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 2023f3ea891e..42e176912c8e 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -14,15 +14,13 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
// #define DEBUG // error path messages, extra info
// #define VERBOSE // more; success messages
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -487,6 +485,7 @@ static const struct driver_info wwan_info = {
#define ZTE_VENDOR_ID 0x19D2
#define DELL_VENDOR_ID 0x413C
#define REALTEK_VENDOR_ID 0x0bda
+#define SAMSUNG_VENDOR_ID 0x04e8
static const struct usb_device_id products[] = {
/* BLACKLIST !!
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index e15ec2b12035..dbff290ed0e4 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -39,7 +39,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/ctype.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index 0d1fe89ae0bd..91f0919fe278 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -13,13 +13,11 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
#include <linux/kmod.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 1e207f086b75..3eed708a6182 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -14,12 +14,10 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index c6867f926cff..6e9c344c7a20 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -1,5 +1,5 @@
/*
- * Davicom DM9601 USB 1.1 10/100Mbps ethernet devices
+ * Davicom DM96xx USB 10/100Mbps ethernet devices
*
* Peter Korsgaard <jacmet@sunsite.dk>
*
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/stddef.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -364,7 +363,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->ethtool_ops = &dm9601_ethtool_ops;
dev->net->hard_header_len += DM_TX_OVERHEAD;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
- dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD;
+
+ /* dm9620/21a require room for 4 byte padding, even in dm9601
+ * mode, so we need +1 to be able to receive full size
+ * ethernet frames.
+ */
+ dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1;
dev->mii.dev = dev->net;
dev->mii.mdio_read = dm9601_mdio_read;
@@ -468,7 +472,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags)
{
- int len;
+ int len, pad;
/* format:
b1: packet length low
@@ -476,12 +480,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
b3..n: packet data
*/
- len = skb->len;
+ len = skb->len + DM_TX_OVERHEAD;
- if (skb_headroom(skb) < DM_TX_OVERHEAD) {
+ /* workaround for dm962x errata with tx fifo getting out of
+ * sync if a USB bulk transfer retry happens right after a
+ * packet with odd / maxpacket length by adding up to 3 bytes
+ * padding.
+ */
+ while ((len & 1) || !(len % dev->maxpacket))
+ len++;
+
+ len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */
+ pad = len - skb->len;
+
+ if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
struct sk_buff *skb2;
- skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags);
+ skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
dev_kfree_skb_any(skb);
skb = skb2;
if (!skb)
@@ -490,10 +505,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
__skb_push(skb, DM_TX_OVERHEAD);
- /* usbnet adds padding if length is a multiple of packet size
- if so, adjust length value in header */
- if ((skb->len % dev->maxpacket) == 0)
- len++;
+ if (pad) {
+ memset(skb->data + skb->len, 0, pad);
+ __skb_put(skb, pad);
+ }
skb->data[0] = len;
skb->data[1] = len >> 8;
@@ -543,7 +558,7 @@ static int dm9601_link_reset(struct usbnet *dev)
}
static const struct driver_info dm9601_info = {
- .description = "Davicom DM9601 USB Ethernet",
+ .description = "Davicom DM96xx USB 10/100 Ethernet",
.flags = FLAG_ETHER | FLAG_LINK_INTR,
.bind = dm9601_bind,
.rx_fixup = dm9601_rx_fixup,
@@ -594,6 +609,22 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */
.driver_info = (unsigned long)&dm9601_info,
},
+ {
+ USB_DEVICE(0x0a46, 0x9621), /* DM9621A USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
+ {
+ USB_DEVICE(0x0a46, 0x9622), /* DM9622 USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
+ {
+ USB_DEVICE(0x0a46, 0x0269), /* DM962OA USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
+ {
+ USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
{}, // END
};
@@ -612,5 +643,5 @@ static struct usb_driver dm9601_driver = {
module_usb_driver(dm9601_driver);
MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
-MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices");
+MODULE_DESCRIPTION("Davicom DM96xx USB 10/100 ethernet devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index a7e3f4e55bf3..e4a8a93fbaf7 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -14,15 +14,13 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
// #define DEBUG // error path messages, extra info
// #define VERBOSE // more; success messages
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 86292e6aaf49..660bd5ea9fc0 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -185,7 +185,6 @@ enum rx_ctrl_state{
#define BM_REQUEST_TYPE (0xa1)
#define B_NOTIFICATION (0x20)
#define W_VALUE (0x0)
-#define W_INDEX (0x2)
#define W_LENGTH (0x2)
#define B_OVERRUN (0x1<<6)
@@ -1202,16 +1201,18 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
struct hso_serial *serial = urb->context;
int status = urb->status;
+ D4("\n--- Got serial_read_bulk callback %02x ---", status);
+
/* sanity check */
if (!serial) {
D1("serial == NULL");
return;
- } else if (status) {
+ }
+ if (status) {
handle_usb_error(status, __func__, serial->parent);
return;
}
- D4("\n--- Got serial_read_bulk callback %02x ---", status);
D1("Actual length = %d\n", urb->actual_length);
DUMP1(urb->transfer_buffer, urb->actual_length);
@@ -1219,25 +1220,13 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
if (serial->port.count == 0)
return;
- if (status == 0) {
- if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
- fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
- /* Valid data, handle RX data */
- spin_lock(&serial->serial_lock);
- serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
- put_rxbuf_data_and_resubmit_bulk_urb(serial);
- spin_unlock(&serial->serial_lock);
- } else if (status == -ENOENT || status == -ECONNRESET) {
- /* Unlinked - check for throttled port. */
- D2("Port %d, successfully unlinked urb", serial->minor);
- spin_lock(&serial->serial_lock);
- serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
- hso_resubmit_rx_bulk_urb(serial, urb);
- spin_unlock(&serial->serial_lock);
- } else {
- D2("Port %d, status = %d for read urb", serial->minor, status);
- return;
- }
+ if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
+ fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
+ /* Valid data, handle RX data */
+ spin_lock(&serial->serial_lock);
+ serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
+ put_rxbuf_data_and_resubmit_bulk_urb(serial);
+ spin_unlock(&serial->serial_lock);
}
/*
@@ -1487,6 +1476,7 @@ static void tiocmget_intr_callback(struct urb *urb)
struct uart_icount *icount;
struct hso_serial_state_notification *serial_state_notification;
struct usb_device *usb;
+ int if_num;
/* Sanity checks */
if (!serial)
@@ -1495,15 +1485,24 @@ static void tiocmget_intr_callback(struct urb *urb)
handle_usb_error(status, __func__, serial->parent);
return;
}
+
+ /* tiocmget is only supported on HSO_PORT_MODEM */
tiocmget = serial->tiocmget;
if (!tiocmget)
return;
+ BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM);
+
usb = serial->parent->usb;
+ if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
+
+ /* wIndex should be the USB interface number of the port to which the
+ * notification applies, which should always be the Modem port.
+ */
serial_state_notification = &tiocmget->serial_state_notification;
if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE ||
serial_state_notification->bNotification != B_NOTIFICATION ||
le16_to_cpu(serial_state_notification->wValue) != W_VALUE ||
- le16_to_cpu(serial_state_notification->wIndex) != W_INDEX ||
+ le16_to_cpu(serial_state_notification->wIndex) != if_num ||
le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) {
dev_warn(&usb->dev,
"hso received invalid serial state notification\n");
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index ace9e74ffbdd..4ff70b22c6ee 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -20,8 +20,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index ff8594d8dd2d..421934c83f1c 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -45,7 +45,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 6866eae3e388..5662babf0583 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -15,7 +15,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ctype.h>
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index afb117c16d2d..a359d3bb7c5b 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -25,8 +25,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
@@ -46,7 +45,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index 808d6506da41..acfcc32b323d 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -15,8 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 03832d3780aa..a305a7b2dae6 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -36,14 +36,12 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/crc32.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/init.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -117,7 +115,6 @@ enum {
struct mcs7830_data {
u8 multi_filter[8];
u8 config;
- u8 link_counter;
};
static const char driver_name[] = "MOSCHIP usb-ethernet driver";
@@ -561,26 +558,16 @@ static void mcs7830_status(struct usbnet *dev, struct urb *urb)
{
u8 *buf = urb->transfer_buffer;
bool link, link_changed;
- struct mcs7830_data *data = mcs7830_get_data(dev);
if (urb->actual_length < 16)
return;
- link = !(buf[1] & 0x20);
+ link = !(buf[1] == 0x20);
link_changed = netif_carrier_ok(dev->net) != link;
if (link_changed) {
- data->link_counter++;
- /*
- track link state 20 times to guard against erroneous
- link state changes reported sometimes by the chip
- */
- if (data->link_counter > 20) {
- data->link_counter = 0;
- usbnet_link_change(dev, link, 0);
- netdev_dbg(dev->net, "Link Status is: %d\n", link);
- }
- } else
- data->link_counter = 0;
+ usbnet_link_change(dev, link, 0);
+ netdev_dbg(dev->net, "Link Status is: %d\n", link);
+ }
}
static const struct driver_info moschip_info = {
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 93e0716a118c..0a85d9227775 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -13,15 +13,13 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
// #define DEBUG // error path messages, extra info
// #define VERBOSE // more; success messages
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 0fcc8e65a068..3d18bb0eee85 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -13,15 +13,13 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
// #define DEBUG // error path messages, extra info
// #define VERBOSE // more; success messages
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 23bdd5b9274d..ff5c87128ffe 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -712,6 +712,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1255, 3)},
{QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x1270, 5)}, /* ZTE MF667 */
{QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
{QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
@@ -723,6 +724,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
+ {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 51073721e224..d89dbe395ad2 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright (c) 2014 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -7,7 +7,6 @@
*
*/
-#include <linux/init.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -24,9 +23,9 @@
#include <linux/ipv6.h>
/* Version Information */
-#define DRIVER_VERSION "v1.02.0 (2013/10/28)"
+#define DRIVER_VERSION "v1.04.0 (2014/01/15)"
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
-#define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
+#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
#define MODULENAME "r8152"
#define R8152_PHY_ID 32
@@ -39,15 +38,24 @@
#define PLA_RXFIFO_CTRL2 0xc0a8
#define PLA_FMC 0xc0b4
#define PLA_CFG_WOL 0xc0b6
+#define PLA_TEREDO_CFG 0xc0bc
#define PLA_MAR 0xcd00
+#define PLA_BACKUP 0xd000
#define PAL_BDC_CR 0xd1a0
+#define PLA_TEREDO_TIMER 0xd2cc
+#define PLA_REALWOW_TIMER 0xd2e8
#define PLA_LEDSEL 0xdd90
#define PLA_LED_FEATURE 0xdd92
#define PLA_PHYAR 0xde00
+#define PLA_BOOT_CTRL 0xe004
#define PLA_GPHY_INTR_IMR 0xe022
#define PLA_EEE_CR 0xe040
#define PLA_EEEP_CR 0xe080
#define PLA_MAC_PWR_CTRL 0xe0c0
+#define PLA_MAC_PWR_CTRL2 0xe0ca
+#define PLA_MAC_PWR_CTRL3 0xe0cc
+#define PLA_MAC_PWR_CTRL4 0xe0ce
+#define PLA_WDT6_CTRL 0xe428
#define PLA_TCR0 0xe610
#define PLA_TCR1 0xe612
#define PLA_TXFIFO_CTRL 0xe618
@@ -73,16 +81,25 @@
#define PLA_BP_5 0xfc32
#define PLA_BP_6 0xfc34
#define PLA_BP_7 0xfc36
+#define PLA_BP_EN 0xfc38
+#define USB_U2P3_CTRL 0xb460
#define USB_DEV_STAT 0xb808
#define USB_USB_CTRL 0xd406
#define USB_PHY_CTRL 0xd408
#define USB_TX_AGG 0xd40a
#define USB_RX_BUF_TH 0xd40c
#define USB_USB_TIMER 0xd428
+#define USB_RX_EARLY_AGG 0xd42c
#define USB_PM_CTRL_STATUS 0xd432
#define USB_TX_DMA 0xd434
+#define USB_TOLERANCE 0xd490
+#define USB_LPM_CTRL 0xd41a
#define USB_UPS_CTRL 0xd800
+#define USB_MISC_0 0xd81a
+#define USB_POWER_CUT 0xd80a
+#define USB_AFE_CTRL2 0xd824
+#define USB_WDT11_CTRL 0xe43c
#define USB_BP_BA 0xfc26
#define USB_BP_0 0xfc28
#define USB_BP_1 0xfc2a
@@ -92,14 +109,30 @@
#define USB_BP_5 0xfc32
#define USB_BP_6 0xfc34
#define USB_BP_7 0xfc36
+#define USB_BP_EN 0xfc38
/* OCP Registers */
#define OCP_ALDPS_CONFIG 0x2010
#define OCP_EEE_CONFIG1 0x2080
#define OCP_EEE_CONFIG2 0x2092
#define OCP_EEE_CONFIG3 0x2094
+#define OCP_BASE_MII 0xa400
#define OCP_EEE_AR 0xa41a
#define OCP_EEE_DATA 0xa41c
+#define OCP_PHY_STATUS 0xa420
+#define OCP_POWER_CFG 0xa430
+#define OCP_EEE_CFG 0xa432
+#define OCP_SRAM_ADDR 0xa436
+#define OCP_SRAM_DATA 0xa438
+#define OCP_DOWN_SPEED 0xa442
+#define OCP_EEE_CFG2 0xa5d0
+#define OCP_ADC_CFG 0xbc06
+
+/* SRAM Register */
+#define SRAM_LPF_CFG 0x8012
+#define SRAM_10M_AMP1 0x8080
+#define SRAM_10M_AMP2 0x8082
+#define SRAM_IMPEDANCE 0x8084
/* PLA_RCR */
#define RCR_AAP 0x00000001
@@ -116,14 +149,17 @@
#define RXFIFO_THR2_FULL 0x00000060
#define RXFIFO_THR2_HIGH 0x00000038
#define RXFIFO_THR2_OOB 0x0000004a
+#define RXFIFO_THR2_NORMAL 0x00a0
/* PLA_RXFIFO_CTRL2 */
#define RXFIFO_THR3_FULL 0x00000078
#define RXFIFO_THR3_HIGH 0x00000048
#define RXFIFO_THR3_OOB 0x0000005a
+#define RXFIFO_THR3_NORMAL 0x0110
/* PLA_TXFIFO_CTRL */
#define TXFIFO_THR_NORMAL 0x00400008
+#define TXFIFO_THR_NORMAL2 0x01000008
/* PLA_FMC */
#define FMC_FCR_MCU_EN 0x0001
@@ -131,6 +167,9 @@
/* PLA_EEEP_CR */
#define EEEP_CR_EEEP_TX 0x0002
+/* PLA_WDT6_CTRL */
+#define WDT6_SET_MODE 0x0010
+
/* PLA_TCR0 */
#define TCR0_TX_EMPTY 0x0800
#define TCR0_AUTO_FIFO 0x0080
@@ -168,6 +207,12 @@
/* PLA_CFG_WOL */
#define MAGIC_EN 0x0001
+/* PLA_TEREDO_CFG */
+#define TEREDO_SEL 0x8000
+#define TEREDO_WAKE_MASK 0x7f00
+#define TEREDO_RS_EVENT_MASK 0x00fe
+#define OOB_TEREDO_EN 0x0001
+
/* PAL_BDC_CR */
#define ALDPS_PROXY_MODE 0x0001
@@ -185,6 +230,25 @@
#define D3_CLK_GATED_EN 0x00004000
#define MCU_CLK_RATIO 0x07010f07
#define MCU_CLK_RATIO_MASK 0x0f0f0f0f
+#define ALDPS_SPDWN_RATIO 0x0f87
+
+/* PLA_MAC_PWR_CTRL2 */
+#define EEE_SPDWN_RATIO 0x8007
+
+/* PLA_MAC_PWR_CTRL3 */
+#define PKT_AVAIL_SPDWN_EN 0x0100
+#define SUSPEND_SPDWN_EN 0x0004
+#define U1U2_SPDWN_EN 0x0002
+#define L1_SPDWN_EN 0x0001
+
+/* PLA_MAC_PWR_CTRL4 */
+#define PWRSAVE_SPDWN_EN 0x1000
+#define RXDV_SPDWN_EN 0x0800
+#define TX10MIDLE_EN 0x0100
+#define TP100_SPDWN_EN 0x0020
+#define TP500_SPDWN_EN 0x0010
+#define TP1000_SPDWN_EN 0x0008
+#define EEE_SPDWN_EN 0x0001
/* PLA_GPHY_INTR_IMR */
#define GPHY_STS_MSK 0x0001
@@ -199,6 +263,9 @@
#define EEE_RX_EN 0x0001
#define EEE_TX_EN 0x0002
+/* PLA_BOOT_CTRL */
+#define AUTOLOAD_DONE 0x0002
+
/* USB_DEV_STAT */
#define STAT_SPEED_MASK 0x0006
#define STAT_SPEED_HIGH 0x0000
@@ -208,7 +275,9 @@
#define TX_AGG_MAX_THRESHOLD 0x03
/* USB_RX_BUF_TH */
-#define RX_BUF_THR 0x7a120180
+#define RX_THR_SUPPER 0x0c350180
+#define RX_THR_HIGH 0x7a120180
+#define RX_THR_SLOW 0xffff0180
/* USB_TX_DMA */
#define TEST_MODE_DISABLE 0x00000001
@@ -218,17 +287,55 @@
#define POWER_CUT 0x0100
/* USB_PM_CTRL_STATUS */
-#define RWSUME_INDICATE 0x0001
+#define RESUME_INDICATE 0x0001
/* USB_USB_CTRL */
#define RX_AGG_DISABLE 0x0010
+/* USB_U2P3_CTRL */
+#define U2P3_ENABLE 0x0001
+
+/* USB_POWER_CUT */
+#define PWR_EN 0x0001
+#define PHASE2_EN 0x0008
+
+/* USB_MISC_0 */
+#define PCUT_STATUS 0x0001
+
+/* USB_RX_EARLY_AGG */
+#define EARLY_AGG_SUPPER 0x0e832981
+#define EARLY_AGG_HIGH 0x0e837a12
+#define EARLY_AGG_SLOW 0x0e83ffff
+
+/* USB_WDT11_CTRL */
+#define TIMER11_EN 0x0001
+
+/* USB_LPM_CTRL */
+#define LPM_TIMER_MASK 0x0c
+#define LPM_TIMER_500MS 0x04 /* 500 ms */
+#define LPM_TIMER_500US 0x0c /* 500 us */
+
+/* USB_AFE_CTRL2 */
+#define SEN_VAL_MASK 0xf800
+#define SEN_VAL_NORMAL 0xa000
+#define SEL_RXIDLE 0x0100
+
/* OCP_ALDPS_CONFIG */
#define ENPWRSAVE 0x8000
#define ENPDNPS 0x0200
#define LINKENA 0x0100
#define DIS_SDSAVE 0x0010
+/* OCP_PHY_STATUS */
+#define PHY_STAT_MASK 0x0007
+#define PHY_STAT_LAN_ON 3
+#define PHY_STAT_PWRDN 5
+
+/* OCP_POWER_CFG */
+#define EEE_CLKDIV_EN 0x8000
+#define EN_ALDPS 0x0004
+#define EN_10M_PLLOFF 0x0001
+
/* OCP_EEE_CONFIG1 */
#define RG_TXLPI_MSK_HFDUP 0x8000
#define RG_MATCLR_EN 0x4000
@@ -263,7 +370,36 @@
#define EEE_ADDR 0x003C
#define EEE_DATA 0x0002
+/* OCP_EEE_CFG */
+#define CTAP_SHORT_EN 0x0040
+#define EEE10_EN 0x0010
+
+/* OCP_DOWN_SPEED */
+#define EN_10M_BGOFF 0x0080
+
+/* OCP_EEE_CFG2 */
+#define MY1000_EEE 0x0004
+#define MY100_EEE 0x0002
+
+/* OCP_ADC_CFG */
+#define CKADSEL_L 0x0100
+#define ADC_EN 0x0080
+#define EN_EMI_L 0x0040
+
+/* SRAM_LPF_CFG */
+#define LPF_AUTO_TUNE 0x8000
+
+/* SRAM_10M_AMP1 */
+#define GDAC_IB_UPALL 0x0008
+
+/* SRAM_10M_AMP2 */
+#define AMP_DN 0x0200
+
+/* SRAM_IMPEDANCE */
+#define RX_DRIVING_MASK 0x6000
+
enum rtl_register_content {
+ _1000bps = 0x10,
_100bps = 0x08,
_10bps = 0x04,
LINK_STATUS = 0x02,
@@ -273,6 +409,9 @@ enum rtl_register_content {
#define RTL8152_MAX_TX 10
#define RTL8152_MAX_RX 10
#define INTBUFSIZE 2
+#define CRC_SIZE 4
+#define TX_ALIGN 4
+#define RX_ALIGN 8
#define INTR_LINK 0x0004
@@ -302,10 +441,17 @@ enum rtl8152_flags {
/* Define these values to match your device */
#define VENDOR_ID_REALTEK 0x0bda
#define PRODUCT_ID_RTL8152 0x8152
+#define PRODUCT_ID_RTL8153 0x8153
+
+#define VENDOR_ID_SAMSUNG 0x04e8
+#define PRODUCT_ID_SAMSUNG 0xa101
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
+#define REALTEK_USB_DEVICE(vend, prod) \
+ USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC)
+
struct rx_desc {
__le32 opts1;
#define RX_LEN_MASK 0x7fff
@@ -363,6 +509,15 @@ struct r8152 {
spinlock_t rx_lock, tx_lock;
struct delayed_work schedule;
struct mii_if_info mii;
+
+ struct rtl_ops {
+ void (*init)(struct r8152 *);
+ int (*enable)(struct r8152 *);
+ void (*disable)(struct r8152 *);
+ void (*down)(struct r8152 *);
+ void (*unload)(struct r8152 *);
+ } rtl_ops;
+
int intr_interval;
u32 msg_enable;
u32 tx_qlen;
@@ -375,7 +530,11 @@ struct r8152 {
enum rtl_version {
RTL_VER_UNKNOWN = 0,
RTL_VER_01,
- RTL_VER_02
+ RTL_VER_02,
+ RTL_VER_03,
+ RTL_VER_04,
+ RTL_VER_05,
+ RTL_VER_MAX
};
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
@@ -427,8 +586,8 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
void *data, u16 type)
{
- u16 limit = 64;
- int ret = 0;
+ u16 limit = 64;
+ int ret = 0;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return -ENODEV;
@@ -467,9 +626,9 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
u16 size, void *data, u16 type)
{
- int ret;
- u16 byteen_start, byteen_end, byen;
- u16 limit = 512;
+ int ret;
+ u16 byteen_start, byteen_end, byen;
+ u16 limit = 512;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return -ENODEV;
@@ -653,45 +812,54 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
}
-static void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value)
+static u16 ocp_reg_read(struct r8152 *tp, u16 addr)
{
- u32 ocp_data;
- int i;
+ u16 ocp_base, ocp_index;
- ocp_data = PHYAR_FLAG | ((reg_addr & 0x1f) << 16) |
- (value & 0xffff);
+ ocp_base = addr & 0xf000;
+ if (ocp_base != tp->ocp_base) {
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base);
+ tp->ocp_base = ocp_base;
+ }
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_PHYAR, ocp_data);
+ ocp_index = (addr & 0x0fff) | 0xb000;
+ return ocp_read_word(tp, MCU_TYPE_PLA, ocp_index);
+}
- for (i = 20; i > 0; i--) {
- udelay(25);
- ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_PHYAR);
- if (!(ocp_data & PHYAR_FLAG))
- break;
+static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
+{
+ u16 ocp_base, ocp_index;
+
+ ocp_base = addr & 0xf000;
+ if (ocp_base != tp->ocp_base) {
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base);
+ tp->ocp_base = ocp_base;
}
- udelay(20);
+
+ ocp_index = (addr & 0x0fff) | 0xb000;
+ ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data);
}
-static int r8152_mdio_read(struct r8152 *tp, u32 reg_addr)
+static inline void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value)
{
- u32 ocp_data;
- int i;
-
- ocp_data = (reg_addr & 0x1f) << 16;
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_PHYAR, ocp_data);
+ ocp_reg_write(tp, OCP_BASE_MII + reg_addr * 2, value);
+}
- for (i = 20; i > 0; i--) {
- udelay(25);
- ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_PHYAR);
- if (ocp_data & PHYAR_FLAG)
- break;
- }
- udelay(20);
+static inline int r8152_mdio_read(struct r8152 *tp, u32 reg_addr)
+{
+ return ocp_reg_read(tp, OCP_BASE_MII + reg_addr * 2);
+}
- if (!(ocp_data & PHYAR_FLAG))
- return -EAGAIN;
+static void sram_write(struct r8152 *tp, u16 addr, u16 data)
+{
+ ocp_reg_write(tp, OCP_SRAM_ADDR, addr);
+ ocp_reg_write(tp, OCP_SRAM_DATA, data);
+}
- return (u16)(ocp_data & 0xffff);
+static u16 sram_read(struct r8152 *tp, u16 addr)
+{
+ ocp_reg_write(tp, OCP_SRAM_ADDR, addr);
+ return ocp_reg_read(tp, OCP_SRAM_DATA);
}
static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
@@ -715,20 +883,6 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
r8152_mdio_write(tp, reg, val);
}
-static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
-{
- u16 ocp_base, ocp_index;
-
- ocp_base = addr & 0xf000;
- if (ocp_base != tp->ocp_base) {
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base);
- tp->ocp_base = ocp_base;
- }
-
- ocp_index = (addr & 0x0fff) | 0xb000;
- ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data);
-}
-
static
int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
@@ -814,10 +968,12 @@ static void read_bulk_callback(struct urb *urb)
case -ENOENT:
return; /* the urb is in unlink state */
case -ETIME:
- pr_warn_ratelimited("may be reset is needed?..\n");
+ if (net_ratelimit())
+ netdev_warn(netdev, "maybe reset is needed?\n");
break;
default:
- pr_warn_ratelimited("Rx status %d\n", status);
+ if (net_ratelimit())
+ netdev_warn(netdev, "Rx status %d\n", status);
break;
}
@@ -850,7 +1006,8 @@ static void write_bulk_callback(struct urb *urb)
stats = rtl8152_get_stats(tp->netdev);
if (status) {
- pr_warn_ratelimited("Tx status %d\n", status);
+ if (net_ratelimit())
+ netdev_warn(tp->netdev, "Tx status %d\n", status);
stats->tx_errors += agg->skb_num;
} else {
stats->tx_packets += agg->skb_num;
@@ -927,17 +1084,17 @@ resubmit:
netif_device_detach(tp->netdev);
else if (res)
netif_err(tp, intr, tp->netdev,
- "can't resubmit intr, status %d\n", res);
+ "can't resubmit intr, status %d\n", res);
}
static inline void *rx_agg_align(void *data)
{
- return (void *)ALIGN((uintptr_t)data, 8);
+ return (void *)ALIGN((uintptr_t)data, RX_ALIGN);
}
static inline void *tx_agg_align(void *data)
{
- return (void *)ALIGN((uintptr_t)data, 4);
+ return (void *)ALIGN((uintptr_t)data, TX_ALIGN);
}
static void free_all_mem(struct r8152 *tp)
@@ -945,40 +1102,28 @@ static void free_all_mem(struct r8152 *tp)
int i;
for (i = 0; i < RTL8152_MAX_RX; i++) {
- if (tp->rx_info[i].urb) {
- usb_free_urb(tp->rx_info[i].urb);
- tp->rx_info[i].urb = NULL;
- }
+ usb_free_urb(tp->rx_info[i].urb);
+ tp->rx_info[i].urb = NULL;
- if (tp->rx_info[i].buffer) {
- kfree(tp->rx_info[i].buffer);
- tp->rx_info[i].buffer = NULL;
- tp->rx_info[i].head = NULL;
- }
+ kfree(tp->rx_info[i].buffer);
+ tp->rx_info[i].buffer = NULL;
+ tp->rx_info[i].head = NULL;
}
for (i = 0; i < RTL8152_MAX_TX; i++) {
- if (tp->tx_info[i].urb) {
- usb_free_urb(tp->tx_info[i].urb);
- tp->tx_info[i].urb = NULL;
- }
+ usb_free_urb(tp->tx_info[i].urb);
+ tp->tx_info[i].urb = NULL;
- if (tp->tx_info[i].buffer) {
- kfree(tp->tx_info[i].buffer);
- tp->tx_info[i].buffer = NULL;
- tp->tx_info[i].head = NULL;
- }
+ kfree(tp->tx_info[i].buffer);
+ tp->tx_info[i].buffer = NULL;
+ tp->tx_info[i].head = NULL;
}
- if (tp->intr_urb) {
- usb_free_urb(tp->intr_urb);
- tp->intr_urb = NULL;
- }
+ usb_free_urb(tp->intr_urb);
+ tp->intr_urb = NULL;
- if (tp->intr_buff) {
- kfree(tp->intr_buff);
- tp->intr_buff = NULL;
- }
+ kfree(tp->intr_buff);
+ tp->intr_buff = NULL;
}
static int alloc_all_mem(struct r8152 *tp)
@@ -1006,7 +1151,8 @@ static int alloc_all_mem(struct r8152 *tp)
if (buf != rx_agg_align(buf)) {
kfree(buf);
- buf = kmalloc_node(rx_buf_sz + 8, GFP_KERNEL, node);
+ buf = kmalloc_node(rx_buf_sz + RX_ALIGN, GFP_KERNEL,
+ node);
if (!buf)
goto err1;
}
@@ -1031,7 +1177,8 @@ static int alloc_all_mem(struct r8152 *tp)
if (buf != tx_agg_align(buf)) {
kfree(buf);
- buf = kmalloc_node(rx_buf_sz + 4, GFP_KERNEL, node);
+ buf = kmalloc_node(rx_buf_sz + TX_ALIGN, GFP_KERNEL,
+ node);
if (!buf)
goto err1;
}
@@ -1231,7 +1378,7 @@ static void rx_bottom(struct r8152 *tp)
stats = rtl8152_get_stats(netdev);
- pkt_len -= 4; /* CRC */
+ pkt_len -= CRC_SIZE;
rx_data += sizeof(struct rx_desc);
skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
@@ -1246,7 +1393,7 @@ static void rx_bottom(struct r8152 *tp)
stats->rx_packets++;
stats->rx_bytes += pkt_len;
- rx_data = rx_agg_align(rx_data + pkt_len + 4);
+ rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE);
rx_desc = (struct rx_desc *)rx_data;
len_used = (int)(rx_data - (u8 *)agg->head);
len_used += sizeof(struct rx_desc);
@@ -1336,7 +1483,7 @@ static void rtl8152_tx_timeout(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int i;
- netif_warn(tp, tx_err, netdev, "Tx timeout.\n");
+ netif_warn(tp, tx_err, netdev, "Tx timeout\n");
for (i = 0; i < RTL8152_MAX_TX; i++)
usb_unlink_urb(tp->tx_info[i].urb);
}
@@ -1449,13 +1596,11 @@ static inline u8 rtl8152_get_speed(struct r8152 *tp)
return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS);
}
-static int rtl8152_enable(struct r8152 *tp)
+static void rtl_set_eee_plus(struct r8152 *tp)
{
u32 ocp_data;
- int i, ret;
u8 speed;
- set_tx_qlen(tp);
speed = rtl8152_get_speed(tp);
if (speed & _10bps) {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
@@ -1466,6 +1611,12 @@ static int rtl8152_enable(struct r8152 *tp)
ocp_data &= ~EEEP_CR_EEEP_TX;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
}
+}
+
+static int rtl_enable(struct r8152 *tp)
+{
+ u32 ocp_data;
+ int i, ret;
r8152b_reset_packet_filter(tp);
@@ -1487,6 +1638,47 @@ static int rtl8152_enable(struct r8152 *tp)
return ret;
}
+static int rtl8152_enable(struct r8152 *tp)
+{
+ set_tx_qlen(tp);
+ rtl_set_eee_plus(tp);
+
+ return rtl_enable(tp);
+}
+
+static void r8153_set_rx_agg(struct r8152 *tp)
+{
+ u8 speed;
+
+ speed = rtl8152_get_speed(tp);
+ if (speed & _1000bps) {
+ if (tp->udev->speed == USB_SPEED_SUPER) {
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH,
+ RX_THR_SUPPER);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG,
+ EARLY_AGG_SUPPER);
+ } else {
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH,
+ RX_THR_HIGH);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG,
+ EARLY_AGG_HIGH);
+ }
+ } else {
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_SLOW);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG,
+ EARLY_AGG_SLOW);
+ }
+}
+
+static int rtl8153_enable(struct r8152 *tp)
+{
+ set_tx_qlen(tp);
+ rtl_set_eee_plus(tp);
+ r8153_set_rx_agg(tp);
+
+ return rtl_enable(tp);
+}
+
static void rtl8152_disable(struct r8152 *tp)
{
struct net_device_stats *stats = rtl8152_get_stats(tp->netdev);
@@ -1596,7 +1788,7 @@ static void r8152b_exit_oob(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL);
ocp_write_byte(tp, MCU_TYPE_USB, USB_TX_AGG, TX_AGG_MAX_THRESHOLD);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_BUF_THR);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_HIGH);
ocp_write_dword(tp, MCU_TYPE_USB, USB_TX_DMA,
TEST_MODE_DISABLE | TX_SIZE_ADJUST1);
@@ -1613,8 +1805,8 @@ static void r8152b_exit_oob(struct r8152 *tp)
static void r8152b_enter_oob(struct r8152 *tp)
{
- u32 ocp_data;
- int i;
+ u32 ocp_data;
+ int i;
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data &= ~NOW_IS_OOB;
@@ -1685,15 +1877,269 @@ static inline void r8152b_enable_aldps(struct r8152 *tp)
LINKENA | DIS_SDSAVE);
}
+static void r8153_hw_phy_cfg(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u16 data;
+
+ ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+ r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE);
+
+ if (tp->version == RTL_VER_03) {
+ data = ocp_reg_read(tp, OCP_EEE_CFG);
+ data &= ~CTAP_SHORT_EN;
+ ocp_reg_write(tp, OCP_EEE_CFG, data);
+ }
+
+ data = ocp_reg_read(tp, OCP_POWER_CFG);
+ data |= EEE_CLKDIV_EN;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+
+ data = ocp_reg_read(tp, OCP_DOWN_SPEED);
+ data |= EN_10M_BGOFF;
+ ocp_reg_write(tp, OCP_DOWN_SPEED, data);
+ data = ocp_reg_read(tp, OCP_POWER_CFG);
+ data |= EN_10M_PLLOFF;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+ data = sram_read(tp, SRAM_IMPEDANCE);
+ data &= ~RX_DRIVING_MASK;
+ sram_write(tp, SRAM_IMPEDANCE, data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
+ ocp_data |= PFM_PWM_SWITCH;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
+
+ data = sram_read(tp, SRAM_LPF_CFG);
+ data |= LPF_AUTO_TUNE;
+ sram_write(tp, SRAM_LPF_CFG, data);
+
+ data = sram_read(tp, SRAM_10M_AMP1);
+ data |= GDAC_IB_UPALL;
+ sram_write(tp, SRAM_10M_AMP1, data);
+ data = sram_read(tp, SRAM_10M_AMP2);
+ data |= AMP_DN;
+ sram_write(tp, SRAM_10M_AMP2, data);
+}
+
+static void r8153_u1u2en(struct r8152 *tp, int enable)
+{
+ u8 u1u2[8];
+
+ if (enable)
+ memset(u1u2, 0xff, sizeof(u1u2));
+ else
+ memset(u1u2, 0x00, sizeof(u1u2));
+
+ usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
+}
+
+static void r8153_u2p3en(struct r8152 *tp, int enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
+ if (enable)
+ ocp_data |= U2P3_ENABLE;
+ else
+ ocp_data &= ~U2P3_ENABLE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
+}
+
+static void r8153_power_cut_en(struct r8152 *tp, int enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
+ if (enable)
+ ocp_data |= PWR_EN | PHASE2_EN;
+ else
+ ocp_data &= ~(PWR_EN | PHASE2_EN);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ ocp_data &= ~PCUT_STATUS;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
+}
+
+static void r8153_teredo_off(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
+ ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
+}
+
+static void r8153_first_init(struct r8152 *tp)
+{
+ u32 ocp_data;
+ int i;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
+ ocp_data |= RXDY_GATED_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+
+ r8153_teredo_off(tp);
+
+ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+ ocp_data &= ~RCR_ACPT_ALL;
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+
+ r8153_hw_phy_cfg(tp);
+
+ rtl8152_nic_reset(tp);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ ocp_data &= ~NOW_IS_OOB;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data &= ~MCU_BORW_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ mdelay(1);
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data |= RE_INIT_LL;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ mdelay(1);
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
+ ocp_data &= ~CPCR_RX_VLAN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0);
+ ocp_data |= TCR0_AUTO_FIFO;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR0, ocp_data);
+
+ rtl8152_nic_reset(tp);
+
+ /* rx share fifo credit full threshold */
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_NORMAL);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_NORMAL);
+ /* TX share fifo free credit full threshold */
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL2);
+
+ /* rx aggregation */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
+ ocp_data &= ~RX_AGG_DISABLE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
+}
+
+static void r8153_enter_oob(struct r8152 *tp)
+{
+ u32 ocp_data;
+ int i;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ ocp_data &= ~NOW_IS_OOB;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+ rtl8152_disable(tp);
+
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ mdelay(1);
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data |= RE_INIT_LL;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ mdelay(1);
+ }
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
+ ocp_data |= MAGIC_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
+ ocp_data &= ~TEREDO_WAKE_MASK;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
+ ocp_data |= CPCR_RX_VLAN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR);
+ ocp_data |= ALDPS_PROXY_MODE;
+ ocp_write_word(tp, MCU_TYPE_PLA, PAL_BDC_CR, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
+ ocp_data &= ~RXDY_GATED_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+
+ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+ ocp_data |= RCR_APM | RCR_AM | RCR_AB;
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+}
+
+static void r8153_disable_aldps(struct r8152 *tp)
+{
+ u16 data;
+
+ data = ocp_reg_read(tp, OCP_POWER_CFG);
+ data &= ~EN_ALDPS;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+ msleep(20);
+}
+
+static void r8153_enable_aldps(struct r8152 *tp)
+{
+ u16 data;
+
+ data = ocp_reg_read(tp, OCP_POWER_CFG);
+ data |= EN_ALDPS;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+}
+
static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
{
- u16 bmcr, anar;
+ u16 bmcr, anar, gbcr;
int ret = 0;
cancel_delayed_work_sync(&tp->schedule);
anar = r8152_mdio_read(tp, MII_ADVERTISE);
anar &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_100HALF | ADVERTISE_100FULL);
+ if (tp->mii.supports_gmii) {
+ gbcr = r8152_mdio_read(tp, MII_CTRL1000);
+ gbcr &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+ } else {
+ gbcr = 0;
+ }
if (autoneg == AUTONEG_DISABLE) {
if (speed == SPEED_10) {
@@ -1702,6 +2148,9 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
} else if (speed == SPEED_100) {
bmcr = BMCR_SPEED100;
anar |= ADVERTISE_100HALF | ADVERTISE_100FULL;
+ } else if (speed == SPEED_1000 && tp->mii.supports_gmii) {
+ bmcr = BMCR_SPEED1000;
+ gbcr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
} else {
ret = -EINVAL;
goto out;
@@ -1723,6 +2172,16 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
anar |= ADVERTISE_10HALF;
anar |= ADVERTISE_100HALF;
}
+ } else if (speed == SPEED_1000 && tp->mii.supports_gmii) {
+ if (duplex == DUPLEX_FULL) {
+ anar |= ADVERTISE_10HALF | ADVERTISE_10FULL;
+ anar |= ADVERTISE_100HALF | ADVERTISE_100FULL;
+ gbcr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
+ } else {
+ anar |= ADVERTISE_10HALF;
+ anar |= ADVERTISE_100HALF;
+ gbcr |= ADVERTISE_1000HALF;
+ }
} else {
ret = -EINVAL;
goto out;
@@ -1731,6 +2190,9 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
}
+ if (tp->mii.supports_gmii)
+ r8152_mdio_write(tp, MII_CTRL1000, gbcr);
+
r8152_mdio_write(tp, MII_ADVERTISE, anar);
r8152_mdio_write(tp, MII_BMCR, bmcr);
@@ -1752,6 +2214,15 @@ static void rtl8152_down(struct r8152 *tp)
r8152b_enable_aldps(tp);
}
+static void rtl8153_down(struct r8152 *tp)
+{
+ r8153_u1u2en(tp, 0);
+ r8153_power_cut_en(tp, 0);
+ r8153_disable_aldps(tp);
+ r8153_enter_oob(tp);
+ r8153_enable_aldps(tp);
+}
+
static void set_carrier(struct r8152 *tp)
{
struct net_device *netdev = tp->netdev;
@@ -1762,7 +2233,7 @@ static void set_carrier(struct r8152 *tp)
if (speed & LINK_STATUS) {
if (!(tp->speed & LINK_STATUS)) {
- rtl8152_enable(tp);
+ tp->rtl_ops.enable(tp);
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
netif_carrier_on(netdev);
}
@@ -1770,7 +2241,7 @@ static void set_carrier(struct r8152 *tp)
if (tp->speed & LINK_STATUS) {
netif_carrier_off(netdev);
tasklet_disable(&tp->tl);
- rtl8152_disable(tp);
+ tp->rtl_ops.disable(tp);
tasklet_enable(&tp->tl);
}
}
@@ -1802,20 +2273,21 @@ static int rtl8152_open(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
+ rtl8152_set_speed(tp, AUTONEG_ENABLE,
+ tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
+ DUPLEX_FULL);
+ tp->speed = 0;
+ netif_carrier_off(netdev);
+ netif_start_queue(netdev);
+ set_bit(WORK_ENABLE, &tp->flags);
res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
if (res) {
if (res == -ENODEV)
netif_device_detach(tp->netdev);
- netif_warn(tp, ifup, netdev,
- "intr_urb submit failed: %d\n", res);
- return res;
+ netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
+ res);
}
- rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
- tp->speed = 0;
- netif_carrier_off(netdev);
- netif_start_queue(netdev);
- set_bit(WORK_ENABLE, &tp->flags);
return res;
}
@@ -1825,12 +2297,12 @@ static int rtl8152_close(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
- usb_kill_urb(tp->intr_urb);
clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
netif_stop_queue(netdev);
tasklet_disable(&tp->tl);
- rtl8152_disable(tp);
+ tp->rtl_ops.disable(tp);
tasklet_enable(&tp->tl);
return res;
@@ -1851,9 +2323,16 @@ static void rtl_clear_bp(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
}
+static void r8153_clear_bp(struct r8152 *tp)
+{
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
+ rtl_clear_bp(tp);
+}
+
static void r8152b_enable_eee(struct r8152 *tp)
{
- u32 ocp_data;
+ u32 ocp_data;
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
ocp_data |= EEE_RX_EN | EEE_TX_EN;
@@ -1874,6 +2353,22 @@ static void r8152b_enable_eee(struct r8152 *tp)
ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
}
+static void r8153_enable_eee(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u16 data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
+ ocp_data |= EEE_RX_EN | EEE_TX_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
+ data = ocp_reg_read(tp, OCP_EEE_CFG);
+ data |= EEE10_EN;
+ ocp_reg_write(tp, OCP_EEE_CFG, data);
+ data = ocp_reg_read(tp, OCP_EEE_CFG2);
+ data |= MY1000_EEE | MY100_EEE;
+ ocp_reg_write(tp, OCP_EEE_CFG2, data);
+}
+
static void r8152b_enable_fc(struct r8152 *tp)
{
u16 anar;
@@ -1909,7 +2404,7 @@ static void r8152b_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
- ocp_data &= ~RWSUME_INDICATE;
+ ocp_data &= ~RESUME_INDICATE;
ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
r8152b_exit_oob(tp);
@@ -1943,6 +2438,75 @@ static void r8152b_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
}
+static void r8153_init(struct r8152 *tp)
+{
+ u32 ocp_data;
+ int i;
+
+ r8153_u1u2en(tp, 0);
+
+ for (i = 0; i < 500; i++) {
+ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+ AUTOLOAD_DONE)
+ break;
+ msleep(20);
+ }
+
+ for (i = 0; i < 500; i++) {
+ ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK;
+ if (ocp_data == PHY_STAT_LAN_ON || ocp_data == PHY_STAT_PWRDN)
+ break;
+ msleep(20);
+ }
+
+ r8153_u2p3en(tp, 0);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL);
+ ocp_data &= ~TIMER11_EN;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL, ocp_data);
+
+ r8153_clear_bp(tp);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
+ ocp_data &= ~LED_MODE_MASK;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL);
+ ocp_data &= ~LPM_TIMER_MASK;
+ if (tp->udev->speed == USB_SPEED_SUPER)
+ ocp_data |= LPM_TIMER_500US;
+ else
+ ocp_data |= LPM_TIMER_500MS;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2);
+ ocp_data &= ~SEN_VAL_MASK;
+ ocp_data |= SEN_VAL_NORMAL | SEL_RXIDLE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2, ocp_data);
+
+ r8153_power_cut_en(tp, 0);
+ r8153_u1u2en(tp, 1);
+
+ r8153_first_init(tp);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
+ PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
+ U1U2_SPDWN_EN | L1_SPDWN_EN);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
+ PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
+ TP100_SPDWN_EN | TP500_SPDWN_EN | TP1000_SPDWN_EN |
+ EEE_SPDWN_EN);
+
+ r8153_enable_eee(tp);
+ r8153_enable_aldps(tp);
+ r8152b_enable_fc(tp);
+
+ r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE |
+ BMCR_ANRESTART);
+}
+
static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
{
struct r8152 *tp = usb_get_intfdata(intf);
@@ -1956,7 +2520,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
tasklet_disable(&tp->tl);
}
- rtl8152_down(tp);
+ tp->rtl_ops.down(tp);
return 0;
}
@@ -1965,10 +2529,12 @@ static int rtl8152_resume(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
- r8152b_init(tp);
+ tp->rtl_ops.init(tp);
netif_device_attach(tp->netdev);
if (netif_running(tp->netdev)) {
- rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
+ rtl8152_set_speed(tp, AUTONEG_ENABLE,
+ tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
+ DUPLEX_FULL);
tp->speed = 0;
netif_carrier_off(tp->netdev);
set_bit(WORK_ENABLE, &tp->flags);
@@ -2072,6 +2638,18 @@ static void r8152b_get_version(struct r8152 *tp)
case 0x4c10:
tp->version = RTL_VER_02;
break;
+ case 0x5c00:
+ tp->version = RTL_VER_03;
+ tp->mii.supports_gmii = 1;
+ break;
+ case 0x5c10:
+ tp->version = RTL_VER_04;
+ tp->mii.supports_gmii = 1;
+ break;
+ case 0x5c20:
+ tp->version = RTL_VER_05;
+ tp->mii.supports_gmii = 1;
+ break;
default:
netif_info(tp, probe, tp->netdev,
"Unknown version 0x%04x\n", version);
@@ -2079,6 +2657,80 @@ static void r8152b_get_version(struct r8152 *tp)
}
}
+static void rtl8152_unload(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ if (tp->version != RTL_VER_01) {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
+ ocp_data |= POWER_CUT;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
+ ocp_data &= ~RESUME_INDICATE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
+}
+
+static void rtl8153_unload(struct r8152 *tp)
+{
+ r8153_power_cut_en(tp, 1);
+}
+
+static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
+{
+ struct rtl_ops *ops = &tp->rtl_ops;
+ int ret = -ENODEV;
+
+ switch (id->idVendor) {
+ case VENDOR_ID_REALTEK:
+ switch (id->idProduct) {
+ case PRODUCT_ID_RTL8152:
+ ops->init = r8152b_init;
+ ops->enable = rtl8152_enable;
+ ops->disable = rtl8152_disable;
+ ops->down = rtl8152_down;
+ ops->unload = rtl8152_unload;
+ ret = 0;
+ break;
+ case PRODUCT_ID_RTL8153:
+ ops->init = r8153_init;
+ ops->enable = rtl8153_enable;
+ ops->disable = rtl8152_disable;
+ ops->down = rtl8153_down;
+ ops->unload = rtl8153_unload;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case VENDOR_ID_SAMSUNG:
+ switch (id->idProduct) {
+ case PRODUCT_ID_SAMSUNG:
+ ops->init = r8153_init;
+ ops->enable = rtl8153_enable;
+ ops->disable = rtl8152_disable;
+ ops->down = rtl8153_down;
+ ops->unload = rtl8153_unload;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (ret)
+ netif_err(tp, probe, tp->netdev, "Unknown Device\n");
+
+ return ret;
+}
+
static int rtl8152_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -2087,14 +2739,9 @@ static int rtl8152_probe(struct usb_interface *intf,
struct net_device *netdev;
int ret;
- if (udev->actconfig->desc.bConfigurationValue != 1) {
- usb_driver_set_configuration(udev, 1);
- return -ENODEV;
- }
-
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
- dev_err(&intf->dev, "Out of memory");
+ dev_err(&intf->dev, "Out of memory\n");
return -ENOMEM;
}
@@ -2102,12 +2749,17 @@ static int rtl8152_probe(struct usb_interface *intf,
tp = netdev_priv(netdev);
tp->msg_enable = 0x7FFF;
- tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
- INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
-
tp->udev = udev;
tp->netdev = netdev;
tp->intf = intf;
+
+ ret = rtl_ops_init(tp, id);
+ if (ret)
+ goto out;
+
+ tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
+ INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
+
netdev->netdev_ops = &rtl8152_netdev_ops;
netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
@@ -2124,7 +2776,7 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->mii.supports_gmii = 0;
r8152b_get_version(tp);
- r8152b_init(tp);
+ tp->rtl_ops.init(tp);
set_ethernet_addr(tp);
ret = alloc_all_mem(tp);
@@ -2135,11 +2787,11 @@ static int rtl8152_probe(struct usb_interface *intf,
ret = register_netdev(netdev);
if (ret != 0) {
- netif_err(tp, probe, netdev, "couldn't register the device");
+ netif_err(tp, probe, netdev, "couldn't register the device\n");
goto out1;
}
- netif_info(tp, probe, netdev, "%s", DRIVER_VERSION);
+ netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
return 0;
@@ -2150,21 +2802,6 @@ out:
return ret;
}
-static void rtl8152_unload(struct r8152 *tp)
-{
- u32 ocp_data;
-
- if (tp->version != RTL_VER_01) {
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
- ocp_data |= POWER_CUT;
- ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
- }
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
- ocp_data &= ~RWSUME_INDICATE;
- ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
-}
-
static void rtl8152_disconnect(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
@@ -2174,7 +2811,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
set_bit(RTL8152_UNPLUG, &tp->flags);
tasklet_kill(&tp->tl);
unregister_netdev(tp->netdev);
- rtl8152_unload(tp);
+ tp->rtl_ops.unload(tp);
free_all_mem(tp);
free_netdev(tp->netdev);
}
@@ -2182,7 +2819,9 @@ static void rtl8152_disconnect(struct usb_interface *intf)
/* table of devices that work with this driver */
static struct usb_device_id rtl8152_table[] = {
- {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)},
{}
};
diff --git a/drivers/net/usb/r815x.c b/drivers/net/usb/r815x.c
index 2df2f4fb42a7..f0a8791b7636 100644
--- a/drivers/net/usb/r815x.c
+++ b/drivers/net/usb/r815x.c
@@ -216,21 +216,13 @@ static const struct usb_device_id products[] = {
{
USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
-#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE)
- .driver_info = 0,
-#else
.driver_info = (unsigned long) &r8152_info,
-#endif
},
{
USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
-#if defined(CONFIG_USB_RTL8153) || defined(CONFIG_USB_RTL8153_MODULE)
- .driver_info = 0,
-#else
.driver_info = (unsigned long) &r8153_info,
-#endif
},
{ }, /* END */
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index cc49aac70224..a48bc0f20c1a 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -13,11 +13,9 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 6cbdac67f3a0..da2c4583bd2d 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -6,7 +6,6 @@
* version 2 as published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index a79e9d334928..a251588762ec 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -21,8 +21,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define DRIVER_VERSION "v.2.0"
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 66ebbacf066f..f17b9e02dd34 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -13,14 +13,12 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*****************************************************************************/
#include <linux/module.h>
#include <linux/kmod.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/smsc75xx.h b/drivers/net/usb/smsc75xx.h
index 67eba39e6ee2..2c7ea8fd184f 100644
--- a/drivers/net/usb/smsc75xx.h
+++ b/drivers/net/usb/smsc75xx.h
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*****************************************************************************/
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 3f38ba868f61..8dd54a0f7b29 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -13,14 +13,12 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*****************************************************************************/
#include <linux/module.h>
#include <linux/kmod.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/smsc95xx.h b/drivers/net/usb/smsc95xx.h
index f360ee372554..526faa0c44e6 100644
--- a/drivers/net/usb/smsc95xx.h
+++ b/drivers/net/usb/smsc95xx.h
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*****************************************************************************/
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 7ec3e0ee0783..99b69af14274 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/stddef.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
new file mode 100644
index 000000000000..4175eb9fdeca
--- /dev/null
+++ b/drivers/net/usb/sr9800.c
@@ -0,0 +1,870 @@
+/* CoreChip-sz SR9800 one chip USB 2.0 Ethernet Devices
+ *
+ * Author : Liu Junliang <liujunliang_ljl@163.com>
+ *
+ * Based on asix_common.c, asix_devices.c
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.*
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/usbnet.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+
+#include "sr9800.h"
+
+static int sr_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ int err;
+
+ err = usbnet_read_cmd(dev, cmd, SR_REQ_RD_REG, value, index,
+ data, size);
+ if ((err != size) && (err >= 0))
+ err = -EINVAL;
+
+ return err;
+}
+
+static int sr_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ int err;
+
+ err = usbnet_write_cmd(dev, cmd, SR_REQ_WR_REG, value, index,
+ data, size);
+ if ((err != size) && (err >= 0))
+ err = -EINVAL;
+
+ return err;
+}
+
+static void
+sr_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ usbnet_write_cmd_async(dev, cmd, SR_REQ_WR_REG, value, index, data,
+ size);
+}
+
+static int sr_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ int offset = 0;
+
+ while (offset + sizeof(u32) < skb->len) {
+ struct sk_buff *sr_skb;
+ u16 size;
+ u32 header = get_unaligned_le32(skb->data + offset);
+
+ offset += sizeof(u32);
+ /* get the packet length */
+ size = (u16) (header & 0x7ff);
+ if (size != ((~header >> 16) & 0x07ff)) {
+ netdev_err(dev->net, "%s : Bad Header Length\n",
+ __func__);
+ return 0;
+ }
+
+ if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
+ (size + offset > skb->len)) {
+ netdev_err(dev->net, "%s : Bad RX Length %d\n",
+ __func__, size);
+ return 0;
+ }
+ sr_skb = netdev_alloc_skb_ip_align(dev->net, size);
+ if (!sr_skb)
+ return 0;
+
+ skb_put(sr_skb, size);
+ memcpy(sr_skb->data, skb->data + offset, size);
+ usbnet_skb_return(dev, sr_skb);
+
+ offset += (size + 1) & 0xfffe;
+ }
+
+ if (skb->len != offset) {
+ netdev_err(dev->net, "%s : Bad SKB Length %d\n", __func__,
+ skb->len);
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+{
+ int headroom = skb_headroom(skb);
+ int tailroom = skb_tailroom(skb);
+ u32 padbytes = 0xffff0000;
+ u32 packet_len;
+ int padlen;
+
+ padlen = ((skb->len + 4) % (dev->maxpacket - 1)) ? 0 : 4;
+
+ if ((!skb_cloned(skb)) && ((headroom + tailroom) >= (4 + padlen))) {
+ if ((headroom < 4) || (tailroom < padlen)) {
+ skb->data = memmove(skb->head + 4, skb->data,
+ skb->len);
+ skb_set_tail_pointer(skb, skb->len);
+ }
+ } else {
+ struct sk_buff *skb2;
+ skb2 = skb_copy_expand(skb, 4, padlen, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+ return NULL;
+ }
+
+ skb_push(skb, 4);
+ packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
+ cpu_to_le32s(&packet_len);
+ skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
+
+ if (padlen) {
+ cpu_to_le32s(&padbytes);
+ memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
+ skb_put(skb, sizeof(padbytes));
+ }
+
+ return skb;
+}
+
+static void sr_status(struct usbnet *dev, struct urb *urb)
+{
+ struct sr9800_int_data *event;
+ int link;
+
+ if (urb->actual_length < 8)
+ return;
+
+ event = urb->transfer_buffer;
+ link = event->link & 0x01;
+ if (netif_carrier_ok(dev->net) != link) {
+ usbnet_link_change(dev, link, 1);
+ netdev_dbg(dev->net, "Link Status is: %d\n", link);
+ }
+
+ return;
+}
+
+static inline int sr_set_sw_mii(struct usbnet *dev)
+{
+ int ret;
+
+ ret = sr_write_cmd(dev, SR_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable software MII access\n");
+ return ret;
+}
+
+static inline int sr_set_hw_mii(struct usbnet *dev)
+{
+ int ret;
+
+ ret = sr_write_cmd(dev, SR_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable hardware MII access\n");
+ return ret;
+}
+
+static inline int sr_get_phy_addr(struct usbnet *dev)
+{
+ u8 buf[2];
+ int ret;
+
+ ret = sr_read_cmd(dev, SR_CMD_READ_PHY_ID, 0, 0, 2, buf);
+ if (ret < 0) {
+ netdev_err(dev->net, "%s : Error reading PHYID register:%02x\n",
+ __func__, ret);
+ goto out;
+ }
+ netdev_dbg(dev->net, "%s : returning 0x%04x\n", __func__,
+ *((__le16 *)buf));
+
+ ret = buf[1];
+
+out:
+ return ret;
+}
+
+static int sr_sw_reset(struct usbnet *dev, u8 flags)
+{
+ int ret;
+
+ ret = sr_write_cmd(dev, SR_CMD_SW_RESET, flags, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to send software reset:%02x\n",
+ ret);
+
+ return ret;
+}
+
+static u16 sr_read_rx_ctl(struct usbnet *dev)
+{
+ __le16 v;
+ int ret;
+
+ ret = sr_read_cmd(dev, SR_CMD_READ_RX_CTL, 0, 0, 2, &v);
+ if (ret < 0) {
+ netdev_err(dev->net, "Error reading RX_CTL register:%02x\n",
+ ret);
+ goto out;
+ }
+
+ ret = le16_to_cpu(v);
+out:
+ return ret;
+}
+
+static int sr_write_rx_ctl(struct usbnet *dev, u16 mode)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "%s : mode = 0x%04x\n", __func__, mode);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net,
+ "Failed to write RX_CTL mode to 0x%04x:%02x\n",
+ mode, ret);
+
+ return ret;
+}
+
+static u16 sr_read_medium_status(struct usbnet *dev)
+{
+ __le16 v;
+ int ret;
+
+ ret = sr_read_cmd(dev, SR_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
+ if (ret < 0) {
+ netdev_err(dev->net,
+ "Error reading Medium Status register:%02x\n", ret);
+ return ret; /* TODO: callers not checking for error ret */
+ }
+
+ return le16_to_cpu(v);
+}
+
+static int sr_write_medium_mode(struct usbnet *dev, u16 mode)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "%s : mode = 0x%04x\n", __func__, mode);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net,
+ "Failed to write Medium Mode mode to 0x%04x:%02x\n",
+ mode, ret);
+ return ret;
+}
+
+static int sr_write_gpio(struct usbnet *dev, u16 value, int sleep)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "%s : value = 0x%04x\n", __func__, value);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_GPIOS, value, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to write GPIO value 0x%04x:%02x\n",
+ value, ret);
+ if (sleep)
+ msleep(sleep);
+
+ return ret;
+}
+
+/* SR9800 have a 16-bit RX_CTL value */
+static void sr_set_multicast(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ u16 rx_ctl = SR_DEFAULT_RX_CTL;
+
+ if (net->flags & IFF_PROMISC) {
+ rx_ctl |= SR_RX_CTL_PRO;
+ } else if (net->flags & IFF_ALLMULTI ||
+ netdev_mc_count(net) > SR_MAX_MCAST) {
+ rx_ctl |= SR_RX_CTL_AMALL;
+ } else if (netdev_mc_empty(net)) {
+ /* just broadcast and directed */
+ } else {
+ /* We use the 20 byte dev->data
+ * for our 8 byte filter buffer
+ * to avoid allocating memory that
+ * is tricky to free later
+ */
+ struct netdev_hw_addr *ha;
+ u32 crc_bits;
+
+ memset(data->multi_filter, 0, SR_MCAST_FILTER_SIZE);
+
+ /* Build the multicast hash filter. */
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ data->multi_filter[crc_bits >> 3] |=
+ 1 << (crc_bits & 7);
+ }
+
+ sr_write_cmd_async(dev, SR_CMD_WRITE_MULTI_FILTER, 0, 0,
+ SR_MCAST_FILTER_SIZE, data->multi_filter);
+
+ rx_ctl |= SR_RX_CTL_AM;
+ }
+
+ sr_write_cmd_async(dev, SR_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
+}
+
+static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
+{
+ struct usbnet *dev = netdev_priv(net);
+ __le16 res;
+
+ mutex_lock(&dev->phy_mutex);
+ sr_set_sw_mii(dev);
+ sr_read_cmd(dev, SR_CMD_READ_MII_REG, phy_id, (__u16)loc, 2, &res);
+ sr_set_hw_mii(dev);
+ mutex_unlock(&dev->phy_mutex);
+
+ netdev_dbg(dev->net,
+ "%s : phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n", __func__,
+ phy_id, loc, le16_to_cpu(res));
+
+ return le16_to_cpu(res);
+}
+
+static void
+sr_mdio_write(struct net_device *net, int phy_id, int loc, int val)
+{
+ struct usbnet *dev = netdev_priv(net);
+ __le16 res = cpu_to_le16(val);
+
+ netdev_dbg(dev->net,
+ "%s : phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", __func__,
+ phy_id, loc, val);
+ mutex_lock(&dev->phy_mutex);
+ sr_set_sw_mii(dev);
+ sr_write_cmd(dev, SR_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
+ sr_set_hw_mii(dev);
+ mutex_unlock(&dev->phy_mutex);
+}
+
+/* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */
+static u32 sr_get_phyid(struct usbnet *dev)
+{
+ int phy_reg;
+ u32 phy_id;
+ int i;
+
+ /* Poll for the rare case the FW or phy isn't ready yet. */
+ for (i = 0; i < 100; i++) {
+ phy_reg = sr_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
+ if (phy_reg != 0 && phy_reg != 0xFFFF)
+ break;
+ mdelay(1);
+ }
+
+ if (phy_reg <= 0 || phy_reg == 0xFFFF)
+ return 0;
+
+ phy_id = (phy_reg & 0xffff) << 16;
+
+ phy_reg = sr_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID2);
+ if (phy_reg < 0)
+ return 0;
+
+ phy_id |= (phy_reg & 0xffff);
+
+ return phy_id;
+}
+
+static void
+sr_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 opt;
+
+ if (sr_read_cmd(dev, SR_CMD_READ_MONITOR_MODE, 0, 0, 1, &opt) < 0) {
+ wolinfo->supported = 0;
+ wolinfo->wolopts = 0;
+ return;
+ }
+ wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
+ wolinfo->wolopts = 0;
+ if (opt & SR_MONITOR_LINK)
+ wolinfo->wolopts |= WAKE_PHY;
+ if (opt & SR_MONITOR_MAGIC)
+ wolinfo->wolopts |= WAKE_MAGIC;
+}
+
+static int
+sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 opt = 0;
+
+ if (wolinfo->wolopts & WAKE_PHY)
+ opt |= SR_MONITOR_LINK;
+ if (wolinfo->wolopts & WAKE_MAGIC)
+ opt |= SR_MONITOR_MAGIC;
+
+ if (sr_write_cmd(dev, SR_CMD_WRITE_MONITOR_MODE,
+ opt, 0, 0, NULL) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sr_get_eeprom_len(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+
+ return data->eeprom_len;
+}
+
+static int sr_get_eeprom(struct net_device *net,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct usbnet *dev = netdev_priv(net);
+ __le16 *ebuf = (__le16 *)data;
+ int ret;
+ int i;
+
+ /* Crude hack to ensure that we don't overwrite memory
+ * if an odd length is supplied
+ */
+ if (eeprom->len % 2)
+ return -EINVAL;
+
+ eeprom->magic = SR_EEPROM_MAGIC;
+
+ /* sr9800 returns 2 bytes from eeprom on read */
+ for (i = 0; i < eeprom->len / 2; i++) {
+ ret = sr_read_cmd(dev, SR_CMD_READ_EEPROM, eeprom->offset + i,
+ 0, 2, &ebuf[i]);
+ if (ret < 0)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void sr_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+
+ /* Inherit standard device info */
+ usbnet_get_drvinfo(net, info);
+ strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ info->eedump_len = data->eeprom_len;
+}
+
+static u32 sr_get_link(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ return mii_link_ok(&dev->mii);
+}
+
+static int sr_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+
+static int sr_set_mac_address(struct net_device *net, void *p)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ struct sockaddr *addr = p;
+
+ if (netif_running(net))
+ return -EBUSY;
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+
+ /* We use the 20 byte dev->data
+ * for our 6 byte mac buffer
+ * to avoid allocating memory that
+ * is tricky to free later
+ */
+ memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
+ sr_write_cmd_async(dev, SR_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+
+ return 0;
+}
+
+static const struct ethtool_ops sr9800_ethtool_ops = {
+ .get_drvinfo = sr_get_drvinfo,
+ .get_link = sr_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_wol = sr_get_wol,
+ .set_wol = sr_set_wol,
+ .get_eeprom_len = sr_get_eeprom_len,
+ .get_eeprom = sr_get_eeprom,
+ .get_settings = usbnet_get_settings,
+ .set_settings = usbnet_set_settings,
+ .nway_reset = usbnet_nway_reset,
+};
+
+static int sr9800_link_reset(struct usbnet *dev)
+{
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+ u16 mode;
+
+ mii_check_media(&dev->mii, 1, 1);
+ mii_ethtool_gset(&dev->mii, &ecmd);
+ mode = SR9800_MEDIUM_DEFAULT;
+
+ if (ethtool_cmd_speed(&ecmd) != SPEED_100)
+ mode &= ~SR_MEDIUM_PS;
+
+ if (ecmd.duplex != DUPLEX_FULL)
+ mode &= ~SR_MEDIUM_FD;
+
+ netdev_dbg(dev->net, "%s : speed: %u duplex: %d mode: 0x%04x\n",
+ __func__, ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
+
+ sr_write_medium_mode(dev, mode);
+
+ return 0;
+}
+
+
+static int sr9800_set_default_mode(struct usbnet *dev)
+{
+ u16 rx_ctl;
+ int ret;
+
+ sr_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
+ sr_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
+ ADVERTISE_ALL | ADVERTISE_CSMA);
+ mii_nway_restart(&dev->mii);
+
+ ret = sr_write_medium_mode(dev, SR9800_MEDIUM_DEFAULT);
+ if (ret < 0)
+ goto out;
+
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_IPG012,
+ SR9800_IPG0_DEFAULT | SR9800_IPG1_DEFAULT,
+ SR9800_IPG2_DEFAULT, 0, NULL);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
+ goto out;
+ }
+
+ /* Set RX_CTL to default values with 2k buffer, and enable cactus */
+ ret = sr_write_rx_ctl(dev, SR_DEFAULT_RX_CTL);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
+ rx_ctl);
+
+ rx_ctl = sr_read_medium_status(dev);
+ netdev_dbg(dev->net, "Medium Status:0x%04x after all initializations\n",
+ rx_ctl);
+
+ return 0;
+out:
+ return ret;
+}
+
+static int sr9800_reset(struct usbnet *dev)
+{
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ int ret, embd_phy;
+ u16 rx_ctl;
+
+ ret = sr_write_gpio(dev,
+ SR_GPIO_RSE | SR_GPIO_GPO_2 | SR_GPIO_GPO2EN, 5);
+ if (ret < 0)
+ goto out;
+
+ embd_phy = ((sr_get_phy_addr(dev) & 0x1f) == 0x10 ? 1 : 0);
+
+ ret = sr_write_cmd(dev, SR_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
+ goto out;
+ }
+
+ ret = sr_sw_reset(dev, SR_SWRESET_IPPD | SR_SWRESET_PRL);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ ret = sr_sw_reset(dev, SR_SWRESET_CLEAR);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ if (embd_phy) {
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = sr_sw_reset(dev, SR_SWRESET_PRTE);
+ if (ret < 0)
+ goto out;
+ }
+
+ msleep(150);
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
+ ret = sr_write_rx_ctl(dev, 0x0000);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
+
+ ret = sr_sw_reset(dev, SR_SWRESET_PRL);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL | SR_SWRESET_PRL);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ ret = sr9800_set_default_mode(dev);
+ if (ret < 0)
+ goto out;
+
+ /* Rewrite MAC address */
+ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+ if (ret < 0)
+ goto out;
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static const struct net_device_ops sr9800_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = usbnet_change_mtu,
+ .ndo_set_mac_address = sr_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = sr_ioctl,
+ .ndo_set_rx_mode = sr_set_multicast,
+};
+
+static int sr9800_phy_powerup(struct usbnet *dev)
+{
+ int ret;
+
+ /* set the embedded Ethernet PHY in power-down state */
+ ret = sr_sw_reset(dev, SR_SWRESET_IPPD | SR_SWRESET_IPRL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to power down PHY : %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ /* set the embedded Ethernet PHY in power-up state */
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to reset PHY: %d\n", ret);
+ return ret;
+ }
+ msleep(600);
+
+ /* set the embedded Ethernet PHY in reset state */
+ ret = sr_sw_reset(dev, SR_SWRESET_CLEAR);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to power up PHY: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ /* set the embedded Ethernet PHY in power-up state */
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to reset PHY: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ u16 led01_mux, led23_mux;
+ int ret, embd_phy;
+ u32 phyid;
+ u16 rx_ctl;
+
+ data->eeprom_len = SR9800_EEPROM_LEN;
+
+ usbnet_get_endpoints(dev, intf);
+
+ /* LED Setting Rule :
+ * AABB:CCDD
+ * AA : MFA0(LED0)
+ * BB : MFA1(LED1)
+ * CC : MFA2(LED2), Reserved for SR9800
+ * DD : MFA3(LED3), Reserved for SR9800
+ */
+ led01_mux = (SR_LED_MUX_LINK_ACTIVE << 8) | SR_LED_MUX_LINK;
+ led23_mux = (SR_LED_MUX_LINK_ACTIVE << 8) | SR_LED_MUX_TX_ACTIVE;
+ ret = sr_write_cmd(dev, SR_CMD_LED_MUX, led01_mux, led23_mux, 0, NULL);
+ if (ret < 0) {
+ netdev_err(dev->net, "set LINK LED failed : %d\n", ret);
+ goto out;
+ }
+
+ /* Get the MAC address */
+ ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN,
+ dev->net->dev_addr);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
+ return ret;
+ }
+ netdev_dbg(dev->net, "mac addr : %pM\n", dev->net->dev_addr);
+
+ /* Initialize MII structure */
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = sr_mdio_read;
+ dev->mii.mdio_write = sr_mdio_write;
+ dev->mii.phy_id_mask = 0x1f;
+ dev->mii.reg_num_mask = 0x1f;
+ dev->mii.phy_id = sr_get_phy_addr(dev);
+
+ dev->net->netdev_ops = &sr9800_netdev_ops;
+ dev->net->ethtool_ops = &sr9800_ethtool_ops;
+
+ embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
+ /* Reset the PHY to normal operation mode */
+ ret = sr_write_cmd(dev, SR_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Init PHY routine */
+ ret = sr9800_phy_powerup(dev);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
+ ret = sr_write_rx_ctl(dev, 0x0000);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
+
+ /* Read PHYID register *AFTER* the PHY was reset properly */
+ phyid = sr_get_phyid(dev);
+ netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
+
+ /* medium mode setting */
+ ret = sr9800_set_default_mode(dev);
+ if (ret < 0)
+ goto out;
+
+ if (dev->udev->speed == USB_SPEED_HIGH) {
+ ret = sr_write_cmd(dev, SR_CMD_BULKIN_SIZE,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].byte_cnt,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].threshold,
+ 0, NULL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Reset RX_CTL failed: %d\n", ret);
+ goto out;
+ }
+ dev->rx_urb_size =
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].size;
+ } else {
+ ret = sr_write_cmd(dev, SR_CMD_BULKIN_SIZE,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].byte_cnt,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].threshold,
+ 0, NULL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Reset RX_CTL failed: %d\n", ret);
+ goto out;
+ }
+ dev->rx_urb_size =
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].size;
+ }
+ netdev_dbg(dev->net, "%s : setting rx_urb_size with : %ld\n", __func__,
+ dev->rx_urb_size);
+ return 0;
+
+out:
+ return ret;
+}
+
+static const struct driver_info sr9800_driver_info = {
+ .description = "CoreChip SR9800 USB 2.0 Ethernet",
+ .bind = sr9800_bind,
+ .status = sr_status,
+ .link_reset = sr9800_link_reset,
+ .reset = sr9800_reset,
+ .flags = DRIVER_FLAG,
+ .rx_fixup = sr_rx_fixup,
+ .tx_fixup = sr_tx_fixup,
+};
+
+static const struct usb_device_id products[] = {
+ {
+ USB_DEVICE(0x0fe6, 0x9800), /* SR9800 Device */
+ .driver_info = (unsigned long) &sr9800_driver_info,
+ },
+ {}, /* END */
+};
+
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver sr_driver = {
+ .name = DRIVER_NAME,
+ .id_table = products,
+ .probe = usbnet_probe,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+ .disconnect = usbnet_disconnect,
+ .supports_autosuspend = 1,
+};
+
+module_usb_driver(sr_driver);
+
+MODULE_AUTHOR("Liu Junliang <liujunliang_ljl@163.com");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_DESCRIPTION("SR9800 USB 2.0 USB2NET Dev : http://www.corechip-sz.com");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/sr9800.h b/drivers/net/usb/sr9800.h
new file mode 100644
index 000000000000..18f670251275
--- /dev/null
+++ b/drivers/net/usb/sr9800.h
@@ -0,0 +1,202 @@
+/* CoreChip-sz SR9800 one chip USB 2.0 Ethernet Devices
+ *
+ * Author : Liu Junliang <liujunliang_ljl@163.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef _SR9800_H
+#define _SR9800_H
+
+/* SR9800 spec. command table on Linux Platform */
+
+/* command : Software Station Management Control Reg */
+#define SR_CMD_SET_SW_MII 0x06
+/* command : PHY Read Reg */
+#define SR_CMD_READ_MII_REG 0x07
+/* command : PHY Write Reg */
+#define SR_CMD_WRITE_MII_REG 0x08
+/* command : Hardware Station Management Control Reg */
+#define SR_CMD_SET_HW_MII 0x0a
+/* command : SROM Read Reg */
+#define SR_CMD_READ_EEPROM 0x0b
+/* command : SROM Write Reg */
+#define SR_CMD_WRITE_EEPROM 0x0c
+/* command : SROM Write Enable Reg */
+#define SR_CMD_WRITE_ENABLE 0x0d
+/* command : SROM Write Disable Reg */
+#define SR_CMD_WRITE_DISABLE 0x0e
+/* command : RX Control Read Reg */
+#define SR_CMD_READ_RX_CTL 0x0f
+#define SR_RX_CTL_PRO (1 << 0)
+#define SR_RX_CTL_AMALL (1 << 1)
+#define SR_RX_CTL_SEP (1 << 2)
+#define SR_RX_CTL_AB (1 << 3)
+#define SR_RX_CTL_AM (1 << 4)
+#define SR_RX_CTL_AP (1 << 5)
+#define SR_RX_CTL_ARP (1 << 6)
+#define SR_RX_CTL_SO (1 << 7)
+#define SR_RX_CTL_RH1M (1 << 8)
+#define SR_RX_CTL_RH2M (1 << 9)
+#define SR_RX_CTL_RH3M (1 << 10)
+/* command : RX Control Write Reg */
+#define SR_CMD_WRITE_RX_CTL 0x10
+/* command : IPG0/IPG1/IPG2 Control Read Reg */
+#define SR_CMD_READ_IPG012 0x11
+/* command : IPG0/IPG1/IPG2 Control Write Reg */
+#define SR_CMD_WRITE_IPG012 0x12
+/* command : Node ID Read Reg */
+#define SR_CMD_READ_NODE_ID 0x13
+/* command : Node ID Write Reg */
+#define SR_CMD_WRITE_NODE_ID 0x14
+/* command : Multicast Filter Array Read Reg */
+#define SR_CMD_READ_MULTI_FILTER 0x15
+/* command : Multicast Filter Array Write Reg */
+#define SR_CMD_WRITE_MULTI_FILTER 0x16
+/* command : Eth/HomePNA PHY Address Reg */
+#define SR_CMD_READ_PHY_ID 0x19
+/* command : Medium Status Read Reg */
+#define SR_CMD_READ_MEDIUM_STATUS 0x1a
+#define SR_MONITOR_LINK (1 << 1)
+#define SR_MONITOR_MAGIC (1 << 2)
+#define SR_MONITOR_HSFS (1 << 4)
+/* command : Medium Status Write Reg */
+#define SR_CMD_WRITE_MEDIUM_MODE 0x1b
+#define SR_MEDIUM_GM (1 << 0)
+#define SR_MEDIUM_FD (1 << 1)
+#define SR_MEDIUM_AC (1 << 2)
+#define SR_MEDIUM_ENCK (1 << 3)
+#define SR_MEDIUM_RFC (1 << 4)
+#define SR_MEDIUM_TFC (1 << 5)
+#define SR_MEDIUM_JFE (1 << 6)
+#define SR_MEDIUM_PF (1 << 7)
+#define SR_MEDIUM_RE (1 << 8)
+#define SR_MEDIUM_PS (1 << 9)
+#define SR_MEDIUM_RSV (1 << 10)
+#define SR_MEDIUM_SBP (1 << 11)
+#define SR_MEDIUM_SM (1 << 12)
+/* command : Monitor Mode Status Read Reg */
+#define SR_CMD_READ_MONITOR_MODE 0x1c
+/* command : Monitor Mode Status Write Reg */
+#define SR_CMD_WRITE_MONITOR_MODE 0x1d
+/* command : GPIO Status Read Reg */
+#define SR_CMD_READ_GPIOS 0x1e
+#define SR_GPIO_GPO0EN (1 << 0) /* GPIO0 Output enable */
+#define SR_GPIO_GPO_0 (1 << 1) /* GPIO0 Output value */
+#define SR_GPIO_GPO1EN (1 << 2) /* GPIO1 Output enable */
+#define SR_GPIO_GPO_1 (1 << 3) /* GPIO1 Output value */
+#define SR_GPIO_GPO2EN (1 << 4) /* GPIO2 Output enable */
+#define SR_GPIO_GPO_2 (1 << 5) /* GPIO2 Output value */
+#define SR_GPIO_RESERVED (1 << 6) /* Reserved */
+#define SR_GPIO_RSE (1 << 7) /* Reload serial EEPROM */
+/* command : GPIO Status Write Reg */
+#define SR_CMD_WRITE_GPIOS 0x1f
+/* command : Eth PHY Power and Reset Control Reg */
+#define SR_CMD_SW_RESET 0x20
+#define SR_SWRESET_CLEAR 0x00
+#define SR_SWRESET_RR (1 << 0)
+#define SR_SWRESET_RT (1 << 1)
+#define SR_SWRESET_PRTE (1 << 2)
+#define SR_SWRESET_PRL (1 << 3)
+#define SR_SWRESET_BZ (1 << 4)
+#define SR_SWRESET_IPRL (1 << 5)
+#define SR_SWRESET_IPPD (1 << 6)
+/* command : Software Interface Selection Status Read Reg */
+#define SR_CMD_SW_PHY_STATUS 0x21
+/* command : Software Interface Selection Status Write Reg */
+#define SR_CMD_SW_PHY_SELECT 0x22
+/* command : BULK in Buffer Size Reg */
+#define SR_CMD_BULKIN_SIZE 0x2A
+/* command : LED_MUX Control Reg */
+#define SR_CMD_LED_MUX 0x70
+#define SR_LED_MUX_TX_ACTIVE (1 << 0)
+#define SR_LED_MUX_RX_ACTIVE (1 << 1)
+#define SR_LED_MUX_COLLISION (1 << 2)
+#define SR_LED_MUX_DUP_COL (1 << 3)
+#define SR_LED_MUX_DUP (1 << 4)
+#define SR_LED_MUX_SPEED (1 << 5)
+#define SR_LED_MUX_LINK_ACTIVE (1 << 6)
+#define SR_LED_MUX_LINK (1 << 7)
+
+/* Register Access Flags */
+#define SR_REQ_RD_REG (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+#define SR_REQ_WR_REG (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+
+/* Multicast Filter Array size & Max Number */
+#define SR_MCAST_FILTER_SIZE 8
+#define SR_MAX_MCAST 64
+
+/* IPG0/1/2 Default Value */
+#define SR9800_IPG0_DEFAULT 0x15
+#define SR9800_IPG1_DEFAULT 0x0c
+#define SR9800_IPG2_DEFAULT 0x12
+
+/* Medium Status Default Mode */
+#define SR9800_MEDIUM_DEFAULT \
+ (SR_MEDIUM_FD | SR_MEDIUM_RFC | \
+ SR_MEDIUM_TFC | SR_MEDIUM_PS | \
+ SR_MEDIUM_AC | SR_MEDIUM_RE)
+
+/* RX Control Default Setting */
+#define SR_DEFAULT_RX_CTL \
+ (SR_RX_CTL_SO | SR_RX_CTL_AB | SR_RX_CTL_RH1M)
+
+/* EEPROM Magic Number & EEPROM Size */
+#define SR_EEPROM_MAGIC 0xdeadbeef
+#define SR9800_EEPROM_LEN 0xff
+
+/* SR9800 Driver Version and Driver Name */
+#define DRIVER_VERSION "11-Nov-2013"
+#define DRIVER_NAME "CoreChips"
+#define DRIVER_FLAG \
+ (FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET)
+
+/* SR9800 BULKIN Buffer Size */
+#define SR9800_MAX_BULKIN_2K 0
+#define SR9800_MAX_BULKIN_4K 1
+#define SR9800_MAX_BULKIN_6K 2
+#define SR9800_MAX_BULKIN_8K 3
+#define SR9800_MAX_BULKIN_16K 4
+#define SR9800_MAX_BULKIN_20K 5
+#define SR9800_MAX_BULKIN_24K 6
+#define SR9800_MAX_BULKIN_32K 7
+
+struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = {
+ /* 2k */
+ {2048, 0x8000, 0x8001},
+ /* 4k */
+ {4096, 0x8100, 0x8147},
+ /* 6k */
+ {6144, 0x8200, 0x81EB},
+ /* 8k */
+ {8192, 0x8300, 0x83D7},
+ /* 16 */
+ {16384, 0x8400, 0x851E},
+ /* 20k */
+ {20480, 0x8500, 0x8666},
+ /* 24k */
+ {24576, 0x8600, 0x87AE},
+ /* 32k */
+ {32768, 0x8700, 0x8A3D},
+};
+
+/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
+struct sr_data {
+ u8 multi_filter[SR_MCAST_FILTER_SIZE];
+ u8 mac_addr[ETH_ALEN];
+ u8 phymode;
+ u8 ledmode;
+ u8 eeprom_len;
+};
+
+struct sr9800_int_data {
+ __le16 res1;
+ u8 link;
+ __le16 res2;
+ u8 status;
+ __le16 res3;
+} __packed;
+
+#endif /* _SR9800_H */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 8494bb53ebdc..4671da755e7b 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -1245,7 +1244,7 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
return -ENOMEM;
urb->num_sgs = num_sgs;
- sg_init_table(urb->sg, urb->num_sgs);
+ sg_init_table(urb->sg, urb->num_sgs + 1);
sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb));
total_len += skb_headlen(skb);
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 35c90307d473..6aaa6eb9df72 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -13,15 +13,13 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
// #define DEBUG // error path messages, extra info
// #define VERBOSE // more; success messages
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/workqueue.h>
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d208f8604981..d75f8edf4fb3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
//#define DEBUG
#include <linux/netdevice.h>
@@ -27,6 +26,7 @@
#include <linux/if_vlan.h>
#include <linux/slab.h>
#include <linux/cpu.h>
+#include <linux/average.h>
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
@@ -37,11 +37,18 @@ module_param(gso, bool, 0444);
/* FIXME: MTU in config. */
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
-#define MERGE_BUFFER_LEN (ALIGN(GOOD_PACKET_LEN + \
- sizeof(struct virtio_net_hdr_mrg_rxbuf), \
- L1_CACHE_BYTES))
#define GOOD_COPY_LEN 128
+/* Weight used for the RX packet size EWMA. The average packet size is used to
+ * determine the packet buffer size when refilling RX rings. As the entire RX
+ * ring may be refilled at once, the weight is chosen so that the EWMA will be
+ * insensitive to short-term, transient changes in packet size.
+ */
+#define RECEIVE_AVG_WEIGHT 64
+
+/* Minimum alignment for mergeable packet buffers. */
+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
+
#define VIRTNET_DRIVER_VERSION "1.0.0"
struct virtnet_stats {
@@ -73,12 +80,15 @@ struct receive_queue {
struct napi_struct napi;
- /* Number of input buffers, and max we've ever had. */
- unsigned int num, max;
-
/* Chain pages by the private ptr. */
struct page *pages;
+ /* Average packet length for mergeable receive buffers. */
+ struct ewma mrg_avg_pkt_len;
+
+ /* Page frag for packet buffer allocation. */
+ struct page_frag alloc_frag;
+
/* RX: fragments + linear part + virtio header */
struct scatterlist sg[MAX_SKB_FRAGS + 2];
@@ -127,11 +137,6 @@ struct virtnet_info {
/* Lock for config space updates */
struct mutex config_lock;
- /* Page_frag for GFP_KERNEL packet buffer allocation when we run
- * low on memory.
- */
- struct page_frag alloc_frag;
-
/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
@@ -222,6 +227,24 @@ static void skb_xmit_done(struct virtqueue *vq)
netif_wake_subqueue(vi->dev, vq2txq(vq));
}
+static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
+{
+ unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1);
+ return (truesize + 1) * MERGEABLE_BUFFER_ALIGN;
+}
+
+static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx)
+{
+ return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN);
+
+}
+
+static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize)
+{
+ unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN;
+ return (unsigned long)buf | (size - 1);
+}
+
/* Called from bottom half context */
static struct sk_buff *page_to_skb(struct receive_queue *rq,
struct page *page, unsigned int offset,
@@ -330,38 +353,34 @@ err:
static struct sk_buff *receive_mergeable(struct net_device *dev,
struct receive_queue *rq,
- void *buf,
+ unsigned long ctx,
unsigned int len)
{
+ void *buf = mergeable_ctx_to_buf_address(ctx);
struct skb_vnet_hdr *hdr = buf;
int num_buf = hdr->mhdr.num_buffers;
struct page *page = virt_to_head_page(buf);
int offset = buf - page_address(page);
- struct sk_buff *head_skb = page_to_skb(rq, page, offset, len,
- MERGE_BUFFER_LEN);
+ unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
+
+ struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, truesize);
struct sk_buff *curr_skb = head_skb;
if (unlikely(!curr_skb))
goto err_skb;
-
while (--num_buf) {
int num_skb_frags;
- buf = virtqueue_get_buf(rq->vq, &len);
- if (unlikely(!buf)) {
+ ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
+ if (unlikely(!ctx)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n",
dev->name, num_buf, hdr->mhdr.num_buffers);
dev->stats.rx_length_errors++;
goto err_buf;
}
- if (unlikely(len > MERGE_BUFFER_LEN)) {
- pr_debug("%s: rx error: merge buffer too long\n",
- dev->name);
- len = MERGE_BUFFER_LEN;
- }
+ buf = mergeable_ctx_to_buf_address(ctx);
page = virt_to_head_page(buf);
- --rq->num;
num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
@@ -377,37 +396,38 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
head_skb->truesize += nskb->truesize;
num_skb_frags = 0;
}
+ truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
if (curr_skb != head_skb) {
head_skb->data_len += len;
head_skb->len += len;
- head_skb->truesize += MERGE_BUFFER_LEN;
+ head_skb->truesize += truesize;
}
offset = buf - page_address(page);
if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
put_page(page);
skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
- len, MERGE_BUFFER_LEN);
+ len, truesize);
} else {
skb_add_rx_frag(curr_skb, num_skb_frags, page,
- offset, len, MERGE_BUFFER_LEN);
+ offset, len, truesize);
}
}
+ ewma_add(&rq->mrg_avg_pkt_len, head_skb->len);
return head_skb;
err_skb:
put_page(page);
while (--num_buf) {
- buf = virtqueue_get_buf(rq->vq, &len);
- if (unlikely(!buf)) {
+ ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
+ if (unlikely(!ctx)) {
pr_debug("%s: rx error: %d buffers missing\n",
dev->name, num_buf);
dev->stats.rx_length_errors++;
break;
}
- page = virt_to_head_page(buf);
+ page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx));
put_page(page);
- --rq->num;
}
err_buf:
dev->stats.rx_dropped++;
@@ -426,17 +446,20 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++;
- if (vi->mergeable_rx_bufs)
- put_page(virt_to_head_page(buf));
- else if (vi->big_packets)
+ if (vi->mergeable_rx_bufs) {
+ unsigned long ctx = (unsigned long)buf;
+ void *base = mergeable_ctx_to_buf_address(ctx);
+ put_page(virt_to_head_page(base));
+ } else if (vi->big_packets) {
give_pages(rq, buf);
- else
+ } else {
dev_kfree_skb(buf);
+ }
return;
}
if (vi->mergeable_rx_bufs)
- skb = receive_mergeable(dev, rq, buf, len);
+ skb = receive_mergeable(dev, rq, (unsigned long)buf, len);
else if (vi->big_packets)
skb = receive_big(dev, rq, buf, len);
else
@@ -577,28 +600,45 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
return err;
}
+static unsigned int get_mergeable_buf_len(struct ewma *avg_pkt_len)
+{
+ const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ unsigned int len;
+
+ len = hdr_len + clamp_t(unsigned int, ewma_read(avg_pkt_len),
+ GOOD_PACKET_LEN, PAGE_SIZE - hdr_len);
+ return ALIGN(len, MERGEABLE_BUFFER_ALIGN);
+}
+
static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
{
- struct virtnet_info *vi = rq->vq->vdev->priv;
- char *buf = NULL;
+ struct page_frag *alloc_frag = &rq->alloc_frag;
+ char *buf;
+ unsigned long ctx;
int err;
+ unsigned int len, hole;
- if (gfp & __GFP_WAIT) {
- if (skb_page_frag_refill(MERGE_BUFFER_LEN, &vi->alloc_frag,
- gfp)) {
- buf = (char *)page_address(vi->alloc_frag.page) +
- vi->alloc_frag.offset;
- get_page(vi->alloc_frag.page);
- vi->alloc_frag.offset += MERGE_BUFFER_LEN;
- }
- } else {
- buf = netdev_alloc_frag(MERGE_BUFFER_LEN);
- }
- if (!buf)
+ len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len);
+ if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
return -ENOMEM;
- sg_init_one(rq->sg, buf, MERGE_BUFFER_LEN);
- err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
+ buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
+ ctx = mergeable_buf_to_ctx(buf, len);
+ get_page(alloc_frag->page);
+ alloc_frag->offset += len;
+ hole = alloc_frag->size - alloc_frag->offset;
+ if (hole < len) {
+ /* To avoid internal fragmentation, if there is very likely not
+ * enough space for another buffer, add the remaining space to
+ * the current buffer. This extra space is not included in
+ * the truesize stored in ctx.
+ */
+ len += hole;
+ alloc_frag->offset += hole;
+ }
+
+ sg_init_one(rq->sg, buf, len);
+ err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp);
if (err < 0)
put_page(virt_to_head_page(buf));
@@ -618,6 +658,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
int err;
bool oom;
+ gfp |= __GFP_COLD;
do {
if (vi->mergeable_rx_bufs)
err = add_recvbuf_mergeable(rq, gfp);
@@ -629,10 +670,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
oom = err == -ENOMEM;
if (err)
break;
- ++rq->num;
} while (rq->vq->num_free);
- if (unlikely(rq->num > rq->max))
- rq->max = rq->num;
if (unlikely(!virtqueue_kick(rq->vq)))
return false;
return !oom;
@@ -700,11 +738,10 @@ again:
while (received < budget &&
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
receive_buf(rq, buf, len);
- --rq->num;
received++;
}
- if (rq->num < rq->max / 2) {
+ if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
if (!try_fill_recv(rq, GFP_ATOMIC))
schedule_delayed_work(&vi->refill, 0);
}
@@ -874,16 +911,15 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
/*
* Send command via the control virtqueue and check status. Commands
* supported by the hypervisor, as indicated by feature bits, should
- * never fail unless improperly formated.
+ * never fail unless improperly formatted.
*/
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
- struct scatterlist *out,
- struct scatterlist *in)
+ struct scatterlist *out)
{
struct scatterlist *sgs[4], hdr, stat;
struct virtio_net_ctrl_hdr ctrl;
virtio_net_ctrl_ack status = ~0;
- unsigned out_num = 0, in_num = 0, tmp;
+ unsigned out_num = 0, tmp;
/* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
@@ -896,16 +932,13 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
if (out)
sgs[out_num++] = out;
- if (in)
- sgs[out_num + in_num++] = in;
/* Add return status. */
sg_init_one(&stat, &status, sizeof(status));
- sgs[out_num + in_num++] = &stat;
+ sgs[out_num] = &stat;
- BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
- BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
- < 0);
+ BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
+ BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC) < 0);
if (unlikely(!virtqueue_kick(vi->cvq)))
return status == VIRTIO_NET_OK;
@@ -935,8 +968,7 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
sg_init_one(&sg, addr->sa_data, dev->addr_len);
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
- VIRTIO_NET_CTRL_MAC_ADDR_SET,
- &sg, NULL)) {
+ VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
dev_warn(&vdev->dev,
"Failed to set mac address by vq command.\n");
return -EINVAL;
@@ -1009,7 +1041,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
rtnl_lock();
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
- VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
+ VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
rtnl_unlock();
}
@@ -1027,7 +1059,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
sg_init_one(&sg, &s, sizeof(s));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
- VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
+ VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
queue_pairs);
return -EINVAL;
@@ -1067,7 +1099,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
void *buf;
int i;
- /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
+ /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
return;
@@ -1077,16 +1109,14 @@ static void virtnet_set_rx_mode(struct net_device *dev)
sg_init_one(sg, &promisc, sizeof(promisc));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
- VIRTIO_NET_CTRL_RX_PROMISC,
- sg, NULL))
+ VIRTIO_NET_CTRL_RX_PROMISC, sg))
dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
promisc ? "en" : "dis");
sg_init_one(sg, &allmulti, sizeof(allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
- VIRTIO_NET_CTRL_RX_ALLMULTI,
- sg, NULL))
+ VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
allmulti ? "en" : "dis");
@@ -1122,8 +1152,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
- VIRTIO_NET_CTRL_MAC_TABLE_SET,
- sg, NULL))
+ VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
kfree(buf);
@@ -1138,7 +1167,7 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
sg_init_one(&sg, &vid, sizeof(vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
- VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
+ VIRTIO_NET_CTRL_VLAN_ADD, &sg))
dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
return 0;
}
@@ -1152,7 +1181,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
sg_init_one(&sg, &vid, sizeof(vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
- VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
+ VIRTIO_NET_CTRL_VLAN_DEL, &sg))
dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
return 0;
}
@@ -1386,6 +1415,14 @@ static void free_receive_bufs(struct virtnet_info *vi)
}
}
+static void free_receive_page_frags(struct virtnet_info *vi)
+{
+ int i;
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ if (vi->rq[i].alloc_frag.page)
+ put_page(vi->rq[i].alloc_frag.page);
+}
+
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@@ -1401,15 +1438,16 @@ static void free_unused_bufs(struct virtnet_info *vi)
struct virtqueue *vq = vi->rq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (vi->mergeable_rx_bufs)
- put_page(virt_to_head_page(buf));
- else if (vi->big_packets)
+ if (vi->mergeable_rx_bufs) {
+ unsigned long ctx = (unsigned long)buf;
+ void *base = mergeable_ctx_to_buf_address(ctx);
+ put_page(virt_to_head_page(base));
+ } else if (vi->big_packets) {
give_pages(&vi->rq[i], buf);
- else
+ } else {
dev_kfree_skb(buf);
- --vi->rq[i].num;
+ }
}
- BUG_ON(vi->rq[i].num != 0);
}
}
@@ -1516,6 +1554,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
napi_weight);
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
+ ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
}
@@ -1552,6 +1591,33 @@ err:
return ret;
}
+#ifdef CONFIG_SYSFS
+static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attribute, char *buf)
+{
+ struct virtnet_info *vi = netdev_priv(queue->dev);
+ unsigned int queue_index = get_netdev_rx_queue_index(queue);
+ struct ewma *avg;
+
+ BUG_ON(queue_index >= vi->max_queue_pairs);
+ avg = &vi->rq[queue_index].mrg_avg_pkt_len;
+ return sprintf(buf, "%u\n", get_mergeable_buf_len(avg));
+}
+
+static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
+ __ATTR_RO(mergeable_rx_buffer_size);
+
+static struct attribute *virtio_net_mrg_rx_attrs[] = {
+ &mergeable_rx_buffer_size_attribute.attr,
+ NULL
+};
+
+static const struct attribute_group virtio_net_mrg_rx_group = {
+ .name = "virtio_net",
+ .attrs = virtio_net_mrg_rx_attrs
+};
+#endif
+
static int virtnet_probe(struct virtio_device *vdev)
{
int i, err;
@@ -1666,6 +1732,10 @@ static int virtnet_probe(struct virtio_device *vdev)
if (err)
goto free_stats;
+#ifdef CONFIG_SYSFS
+ if (vi->mergeable_rx_bufs)
+ dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
+#endif
netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
@@ -1680,7 +1750,8 @@ static int virtnet_probe(struct virtio_device *vdev)
try_fill_recv(&vi->rq[i], GFP_KERNEL);
/* If we didn't even get one input buffer, we're useless. */
- if (vi->rq[i].num == 0) {
+ if (vi->rq[i].vq->num_free ==
+ virtqueue_get_vring_size(vi->rq[i].vq)) {
free_unused_bufs(vi);
err = -ENOMEM;
goto free_recv_bufs;
@@ -1714,9 +1785,8 @@ free_recv_bufs:
unregister_netdev(dev);
free_vqs:
cancel_delayed_work_sync(&vi->refill);
+ free_receive_page_frags(vi);
virtnet_del_vqs(vi);
- if (vi->alloc_frag.page)
- put_page(vi->alloc_frag.page);
free_stats:
free_percpu(vi->stats);
free:
@@ -1733,6 +1803,8 @@ static void remove_vq_common(struct virtnet_info *vi)
free_receive_bufs(vi);
+ free_receive_page_frags(vi);
+
virtnet_del_vqs(vi);
}
@@ -1750,8 +1822,6 @@ static void virtnet_remove(struct virtio_device *vdev)
unregister_netdev(vi->dev);
remove_vq_common(vi);
- if (vi->alloc_frag.page)
- put_page(vi->alloc_frag.page);
flush_work(&vi->config_work);
@@ -1797,16 +1867,17 @@ static int virtnet_restore(struct virtio_device *vdev)
if (err)
return err;
- if (netif_running(vi->dev))
+ if (netif_running(vi->dev)) {
+ for (i = 0; i < vi->curr_queue_pairs; i++)
+ if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+ schedule_delayed_work(&vi->refill, 0);
+
for (i = 0; i < vi->max_queue_pairs; i++)
virtnet_napi_enable(&vi->rq[i]);
+ }
netif_device_attach(vi->dev);
- for (i = 0; i < vi->curr_queue_pairs; i++)
- if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
-
mutex_lock(&vi->config_lock);
vi->config_enable = true;
mutex_unlock(&vi->config_lock);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 7e2788c488ed..3be786faaaec 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1235,7 +1235,9 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
#ifdef VMXNET3_RSS
if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
(adapter->netdev->features & NETIF_F_RXHASH))
- ctx->skb->rxhash = le32_to_cpu(rcd->rssHash);
+ skb_set_hash(ctx->skb,
+ le32_to_cpu(rcd->rssHash),
+ PKT_HASH_TYPE_L3);
#endif
skb_put(ctx->skb, rcd->len);
@@ -3132,7 +3134,6 @@ err_alloc_queue_desc:
err_alloc_shared:
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
return err;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 12040a35d95d..190569d02450 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -37,7 +37,6 @@
#include <linux/spinlock.h>
#include <linux/ioport.h>
#include <linux/highmem.h>
-#include <linux/init.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 249e01c5600c..b0f705c2378f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -40,6 +40,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/vxlan.h>
+#include <net/protocol.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
#include <net/addrconf.h>
@@ -468,7 +469,6 @@ static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
/* Look up Ethernet address in forwarding table */
static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
const u8 *mac)
-
{
struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
struct vxlan_fdb *f;
@@ -554,13 +554,104 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
return 1;
}
+static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+{
+ struct sk_buff *p, **pp = NULL;
+ struct vxlanhdr *vh, *vh2;
+ struct ethhdr *eh, *eh2;
+ unsigned int hlen, off_vx, off_eth;
+ const struct packet_offload *ptype;
+ __be16 type;
+ int flush = 1;
+
+ off_vx = skb_gro_offset(skb);
+ hlen = off_vx + sizeof(*vh);
+ vh = skb_gro_header_fast(skb, off_vx);
+ if (skb_gro_header_hard(skb, hlen)) {
+ vh = skb_gro_header_slow(skb, hlen, off_vx);
+ if (unlikely(!vh))
+ goto out;
+ }
+ skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
+
+ off_eth = skb_gro_offset(skb);
+ hlen = off_eth + sizeof(*eh);
+ eh = skb_gro_header_fast(skb, off_eth);
+ if (skb_gro_header_hard(skb, hlen)) {
+ eh = skb_gro_header_slow(skb, hlen, off_eth);
+ if (unlikely(!eh))
+ goto out;
+ }
+
+ flush = 0;
+
+ for (p = *head; p; p = p->next) {
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ vh2 = (struct vxlanhdr *)(p->data + off_vx);
+ eh2 = (struct ethhdr *)(p->data + off_eth);
+ if (vh->vx_vni != vh2->vx_vni || compare_ether_header(eh, eh2)) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+ }
+
+ type = eh->h_proto;
+
+ rcu_read_lock();
+ ptype = gro_find_receive_by_type(type);
+ if (ptype == NULL) {
+ flush = 1;
+ goto out_unlock;
+ }
+
+ skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
+ pp = ptype->callbacks.gro_receive(head, skb);
+
+out_unlock:
+ rcu_read_unlock();
+out:
+ NAPI_GRO_CB(skb)->flush |= flush;
+
+ return pp;
+}
+
+static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ struct ethhdr *eh;
+ struct packet_offload *ptype;
+ __be16 type;
+ int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
+ int err = -ENOSYS;
+
+ eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
+ type = eh->h_proto;
+
+ rcu_read_lock();
+ ptype = gro_find_complete_by_type(type);
+ if (ptype != NULL)
+ err = ptype->callbacks.gro_complete(skb, nhoff + vxlan_len);
+
+ rcu_read_unlock();
+ return err;
+}
+
/* Notify netdevs that UDP port started listening */
-static void vxlan_notify_add_rx_port(struct sock *sk)
+static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
{
struct net_device *dev;
+ struct sock *sk = vs->sock->sk;
struct net *net = sock_net(sk);
sa_family_t sa_family = sk->sk_family;
__be16 port = inet_sk(sk)->inet_sport;
+ int err;
+
+ if (sa_family == AF_INET) {
+ err = udp_add_offload(&vs->udp_offloads);
+ if (err)
+ pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
+ }
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
@@ -572,9 +663,10 @@ static void vxlan_notify_add_rx_port(struct sock *sk)
}
/* Notify netdevs that UDP port is no more listening */
-static void vxlan_notify_del_rx_port(struct sock *sk)
+static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
{
struct net_device *dev;
+ struct sock *sk = vs->sock->sk;
struct net *net = sock_net(sk);
sa_family_t sa_family = sk->sk_family;
__be16 port = inet_sk(sk)->inet_sport;
@@ -586,6 +678,9 @@ static void vxlan_notify_del_rx_port(struct sock *sk)
port);
}
rcu_read_unlock();
+
+ if (sa_family == AF_INET)
+ udp_del_offload(&vs->udp_offloads);
}
/* Add new entry to forwarding table -- assumes lock held */
@@ -741,10 +836,9 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
return -EINVAL;
*ifindex = nla_get_u32(tb[NDA_IFINDEX]);
- tdev = dev_get_by_index(net, *ifindex);
+ tdev = __dev_get_by_index(net, *ifindex);
if (!tdev)
return -EADDRNOTAVAIL;
- dev_put(tdev);
} else {
*ifindex = 0;
}
@@ -916,17 +1010,32 @@ static bool vxlan_snoop(struct net_device *dev,
}
/* See if multicast group is already in use by other ID */
-static bool vxlan_group_used(struct vxlan_net *vn, union vxlan_addr *remote_ip)
+static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
{
struct vxlan_dev *vxlan;
+ /* The vxlan_sock is only used by dev, leaving group has
+ * no effect on other vxlan devices.
+ */
+ if (atomic_read(&dev->vn_sock->refcnt) == 1)
+ return false;
+
list_for_each_entry(vxlan, &vn->vxlan_list, next) {
- if (!netif_running(vxlan->dev))
+ if (!netif_running(vxlan->dev) || vxlan == dev)
continue;
- if (vxlan_addr_equal(&vxlan->default_dst.remote_ip,
- remote_ip))
- return true;
+ if (vxlan->vn_sock != dev->vn_sock)
+ continue;
+
+ if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
+ &dev->default_dst.remote_ip))
+ continue;
+
+ if (vxlan->default_dst.remote_ifindex !=
+ dev->default_dst.remote_ifindex)
+ continue;
+
+ return true;
}
return false;
@@ -949,7 +1058,7 @@ void vxlan_sock_release(struct vxlan_sock *vs)
spin_lock(&vn->sock_lock);
hlist_del_rcu(&vs->hlist);
rcu_assign_sk_user_data(vs->sock->sk, NULL);
- vxlan_notify_del_rx_port(sk);
+ vxlan_notify_del_rx_port(vs);
spin_unlock(&vn->sock_lock);
queue_work(vxlan_wq, &vs->del_work);
@@ -1047,6 +1156,16 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (!vs)
goto drop;
+ /* If the NIC driver gave us an encapsulated packet
+ * with the encapsulation mark, the device checksummed it
+ * for us. Otherwise force the upper layers to verify it.
+ */
+ if ((skb->ip_summed != CHECKSUM_UNNECESSARY && skb->ip_summed != CHECKSUM_PARTIAL) ||
+ !skb->encapsulation)
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb->encapsulation = 0;
+
vs->rcv(vs, skb, vxh->vx_vni);
return 0;
@@ -1066,7 +1185,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
struct iphdr *oip = NULL;
struct ipv6hdr *oip6 = NULL;
struct vxlan_dev *vxlan;
- struct pcpu_tstats *stats;
+ struct pcpu_sw_netstats *stats;
union vxlan_addr saddr;
__u32 vni;
int err = 0;
@@ -1105,17 +1224,6 @@ static void vxlan_rcv(struct vxlan_sock *vs,
skb_reset_network_header(skb);
- /* If the NIC driver gave us an encapsulated packet with
- * CHECKSUM_UNNECESSARY and Rx checksum feature is enabled,
- * leave the CHECKSUM_UNNECESSARY, the device checksummed it
- * for us. Otherwise force the upper layers to verify it.
- */
- if (skb->ip_summed != CHECKSUM_UNNECESSARY || !skb->encapsulation ||
- !(vxlan->dev->features & NETIF_F_RXCSUM))
- skb->ip_summed = CHECKSUM_NONE;
-
- skb->encapsulation = 0;
-
if (oip6)
err = IP6_ECN_decapsulate(oip6, skb);
if (oip)
@@ -1366,20 +1474,6 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
return false;
}
-static void vxlan_sock_put(struct sk_buff *skb)
-{
- sock_put(skb->sk);
-}
-
-/* On transmit, associate with the tunnel socket */
-static void vxlan_set_owner(struct sock *sk, struct sk_buff *skb)
-{
- skb_orphan(skb);
- sock_hold(sk);
- skb->sk = sk;
- skb->destructor = vxlan_sock_put;
-}
-
/* Compute source port for outgoing packet
* first choice to use L4 flow hash since it will spread
* better and maybe available from hardware
@@ -1390,7 +1484,7 @@ __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
unsigned int range = (port_max - port_min) + 1;
u32 hash;
- hash = skb_get_rxhash(skb);
+ hash = skb_get_hash(skb);
if (!hash)
hash = jhash(skb->data, 2 * ETH_ALEN,
(__force u32) skb->protocol);
@@ -1499,8 +1593,6 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
ip6h->daddr = *daddr;
ip6h->saddr = *saddr;
- vxlan_set_owner(vs->sock->sk, skb);
-
err = handle_offloads(skb);
if (err)
return err;
@@ -1557,8 +1649,6 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
uh->len = htons(skb->len);
uh->check = 0;
- vxlan_set_owner(vs->sock->sk, skb);
-
err = handle_offloads(skb);
if (err)
return err;
@@ -1572,11 +1662,12 @@ EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
struct vxlan_dev *dst_vxlan)
{
- struct pcpu_tstats *tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
- struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
+ struct pcpu_sw_netstats *tx_stats, *rx_stats;
union vxlan_addr loopback;
union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
+ tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
+ rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
skb->pkt_type = PACKET_HOST;
skb->encapsulation = 0;
skb->dev = dst_vxlan->dev;
@@ -1770,7 +1861,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct ethhdr *eth;
bool did_rsc = false;
- struct vxlan_rdst *rdst;
+ struct vxlan_rdst *rdst, *fdst = NULL;
struct vxlan_fdb *f;
skb_reset_mac_header(skb);
@@ -1812,7 +1903,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
vxlan_fdb_miss(vxlan, eth->h_dest);
dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
}
@@ -1820,12 +1911,19 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
list_for_each_entry_rcu(rdst, &f->remotes, list) {
struct sk_buff *skb1;
+ if (!fdst) {
+ fdst = rdst;
+ continue;
+ }
skb1 = skb_clone(skb, GFP_ATOMIC);
if (skb1)
vxlan_xmit_one(skb1, dev, rdst, did_rsc);
}
- dev_kfree_skb(skb);
+ if (fdst)
+ vxlan_xmit_one(skb, dev, fdst, did_rsc);
+ else
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1882,12 +1980,12 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_sock *vs;
int i;
- dev->tstats = alloc_percpu(struct pcpu_tstats);
+ dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
for_each_possible_cpu(i) {
- struct pcpu_tstats *vxlan_stats;
+ struct pcpu_sw_netstats *vxlan_stats;
vxlan_stats = per_cpu_ptr(dev->tstats, i);
u64_stats_init(&vxlan_stats->syncp);
}
@@ -1935,7 +2033,6 @@ static void vxlan_uninit(struct net_device *dev)
/* Start ageing timer and join group when device is brought up */
static int vxlan_open(struct net_device *dev)
{
- struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_sock *vs = vxlan->vn_sock;
@@ -1943,8 +2040,7 @@ static int vxlan_open(struct net_device *dev)
if (!vs)
return -ENOTCONN;
- if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
- vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) {
+ if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
vxlan_sock_hold(vs);
dev_hold(dev);
queue_work(vxlan_wq, &vxlan->igmp_join);
@@ -1983,7 +2079,7 @@ static int vxlan_stop(struct net_device *dev)
struct vxlan_sock *vs = vxlan->vn_sock;
if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
- ! vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) {
+ !vxlan_group_used(vn, vxlan)) {
vxlan_sock_hold(vs);
dev_hold(dev);
queue_work(vxlan_wq, &vxlan->igmp_leave);
@@ -2001,6 +2097,29 @@ static void vxlan_set_multicast_list(struct net_device *dev)
{
}
+static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+ struct net_device *lowerdev;
+ int max_mtu;
+
+ lowerdev = __dev_get_by_index(dev_net(dev), dst->remote_ifindex);
+ if (lowerdev == NULL)
+ return eth_change_mtu(dev, new_mtu);
+
+ if (dst->remote_ip.sa.sa_family == AF_INET6)
+ max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
+ else
+ max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
+
+ if (new_mtu < 68 || new_mtu > max_mtu)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
static const struct net_device_ops vxlan_netdev_ops = {
.ndo_init = vxlan_init,
.ndo_uninit = vxlan_uninit,
@@ -2009,7 +2128,7 @@ static const struct net_device_ops vxlan_netdev_ops = {
.ndo_start_xmit = vxlan_xmit,
.ndo_get_stats64 = ip_tunnel_get_stats64,
.ndo_set_rx_mode = vxlan_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
+ .ndo_change_mtu = vxlan_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_fdb_add = vxlan_fdb_add,
@@ -2278,7 +2397,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
struct sock *sk;
unsigned int h;
- vs = kmalloc(sizeof(*vs), GFP_KERNEL);
+ vs = kzalloc(sizeof(*vs), GFP_KERNEL);
if (!vs)
return ERR_PTR(-ENOMEM);
@@ -2303,9 +2422,14 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
vs->data = data;
rcu_assign_sk_user_data(vs->sock->sk, vs);
+ /* Initialize the vxlan udp offloads structure */
+ vs->udp_offloads.port = port;
+ vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive;
+ vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
+
spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
- vxlan_notify_add_rx_port(sk);
+ vxlan_notify_add_rx_port(vs);
spin_unlock(&vn->sock_lock);
/* Mark socket as an encapsulation socket. */
@@ -2440,7 +2564,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
/* update header length based on lower device */
dev->hard_header_len = lowerdev->hard_header_len +
(use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
- }
+ } else if (use_ipv6)
+ vxlan->flags |= VXLAN_F_IPV6;
if (data[IFLA_VXLAN_TOS])
vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
@@ -2629,6 +2754,44 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
.fill_info = vxlan_fill_info,
};
+static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
+ struct net_device *dev)
+{
+ struct vxlan_dev *vxlan, *next;
+ LIST_HEAD(list_kill);
+
+ list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+
+ /* In case we created vxlan device with carrier
+ * and we loose the carrier due to module unload
+ * we also need to remove vxlan device. In other
+ * cases, it's not necessary and remote_ifindex
+ * is 0 here, so no matches.
+ */
+ if (dst->remote_ifindex == dev->ifindex)
+ vxlan_dellink(vxlan->dev, &list_kill);
+ }
+
+ unregister_netdevice_many(&list_kill);
+}
+
+static int vxlan_lowerdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
+
+ if (event == NETDEV_UNREGISTER)
+ vxlan_handle_lowerdev_unregister(vn, dev);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block vxlan_notifier_block __read_mostly = {
+ .notifier_call = vxlan_lowerdev_event,
+};
+
static __net_init int vxlan_init_net(struct net *net)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
@@ -2643,22 +2806,8 @@ static __net_init int vxlan_init_net(struct net *net)
return 0;
}
-static __net_exit void vxlan_exit_net(struct net *net)
-{
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
- struct vxlan_dev *vxlan;
- LIST_HEAD(list);
-
- rtnl_lock();
- list_for_each_entry(vxlan, &vn->vxlan_list, next)
- unregister_netdevice_queue(vxlan->dev, &list);
- unregister_netdevice_many(&list);
- rtnl_unlock();
-}
-
static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net,
- .exit = vxlan_exit_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
};
@@ -2673,18 +2822,23 @@ static int __init vxlan_init_module(void)
get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
- rc = register_pernet_device(&vxlan_net_ops);
+ rc = register_pernet_subsys(&vxlan_net_ops);
if (rc)
goto out1;
- rc = rtnl_link_register(&vxlan_link_ops);
+ rc = register_netdevice_notifier(&vxlan_notifier_block);
if (rc)
goto out2;
- return 0;
+ rc = rtnl_link_register(&vxlan_link_ops);
+ if (rc)
+ goto out3;
+ return 0;
+out3:
+ unregister_netdevice_notifier(&vxlan_notifier_block);
out2:
- unregister_pernet_device(&vxlan_net_ops);
+ unregister_pernet_subsys(&vxlan_net_ops);
out1:
destroy_workqueue(vxlan_wq);
return rc;
@@ -2694,13 +2848,15 @@ late_initcall(vxlan_init_module);
static void __exit vxlan_cleanup_module(void)
{
rtnl_link_unregister(&vxlan_link_ops);
+ unregister_netdevice_notifier(&vxlan_notifier_block);
destroy_workqueue(vxlan_wq);
- unregister_pernet_device(&vxlan_net_ops);
- rcu_barrier();
+ unregister_pernet_subsys(&vxlan_net_ops);
+ /* rcu_barrier() is called by netns */
}
module_exit(vxlan_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_VERSION(VXLAN_VERSION);
MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
+MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
MODULE_ALIAS_RTNL_LINK("vxlan");
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 0d1c7592efa0..19f7cb2cdef3 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -71,12 +71,9 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
const void *saddr, unsigned len)
{
struct frhdr hdr;
- struct dlci_local *dlp;
unsigned int hlen;
char *dest;
- dlp = netdev_priv(dev);
-
hdr.control = FRAD_I_UI;
switch (type)
{
@@ -107,11 +104,9 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
{
- struct dlci_local *dlp;
struct frhdr *hdr;
int process, header;
- dlp = netdev_priv(dev);
if (!pskb_may_pull(skb, sizeof(*hdr))) {
netdev_notice(dev, "invalid data no header\n");
dev->stats.rx_errors++;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 851dc7b7e8b0..288610df205c 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -699,8 +699,6 @@ static void dscc4_free1(struct pci_dev *pdev)
for (i = 0; i < dev_per_card; i++)
unregister_hdlc_device(dscc4_to_dev(root + i));
- pci_set_drvdata(pdev, NULL);
-
for (i = 0; i < dev_per_card; i++)
free_netdev(root[i].dev);
kfree(root);
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
index 62f01b74cbd6..dc334c85d966 100644
--- a/drivers/net/wan/hd64570.c
+++ b/drivers/net/wan/hd64570.c
@@ -29,7 +29,6 @@
#include <linux/fcntl.h>
#include <linux/hdlc.h>
#include <linux/in.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/jiffies.h>
diff --git a/drivers/net/wan/hd64570.h b/drivers/net/wan/hd64570.h
index e4f539ad071b..10963e8f4b39 100644
--- a/drivers/net/wan/hd64570.h
+++ b/drivers/net/wan/hd64570.h
@@ -159,7 +159,7 @@ typedef struct {
/* Packet Descriptor Status bits */
#define ST_TX_EOM 0x80 /* End of frame */
-#define ST_TX_EOT 0x01 /* End of transmition */
+#define ST_TX_EOT 0x01 /* End of transmission */
#define ST_RX_EOM 0x80 /* End of frame */
#define ST_RX_SHORT 0x40 /* Short frame */
@@ -211,7 +211,7 @@ typedef struct {
#define CTL_NORTS 0x01
#define CTL_IDLE 0x10 /* Transmit an idle pattern */
-#define CTL_UDRNC 0x20 /* Idle after CRC or FCS+flag transmition */
+#define CTL_UDRNC 0x20 /* Idle after CRC or FCS+flag transmission */
#define ST0_TXRDY 0x02 /* TX ready */
#define ST0_RXRDY 0x01 /* RX ready */
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index 6269a09c7369..e92ecf1d3314 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -29,7 +29,6 @@
#include <linux/fcntl.h>
#include <linux/hdlc.h>
#include <linux/in.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/jiffies.h>
diff --git a/drivers/net/wan/hd64572.h b/drivers/net/wan/hd64572.h
index 96567c2dc4db..22137ee669cf 100644
--- a/drivers/net/wan/hd64572.h
+++ b/drivers/net/wan/hd64572.h
@@ -218,7 +218,7 @@ typedef struct {
#define ST_TX_EOM 0x80 /* End of frame */
#define ST_TX_UNDRRUN 0x08
#define ST_TX_OWNRSHP 0x02
-#define ST_TX_EOT 0x01 /* End of transmition */
+#define ST_TX_EOT 0x01 /* End of transmission */
#define ST_RX_EOM 0x80 /* End of frame */
#define ST_RX_SHORT 0x40 /* Short frame */
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index f51204cfe12f..b2fe9bb89633 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -49,7 +49,6 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/hdlc.h>
-#include <linux/init.h>
#include <linux/in.h>
#include <linux/if_arp.h>
#include <linux/netdevice.h>
@@ -973,7 +972,6 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_hdlcdev:
- pci_set_drvdata(pdev, NULL);
kfree(sc);
err_kzalloc:
pci_release_regions(pdev);
@@ -995,7 +993,6 @@ static void lmc_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 53efc57fcace..5b72f7f8c516 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -281,7 +281,6 @@ static void pc300_pci_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
if (card->ports[0].netdev)
free_netdev(card->ports[0].netdev);
if (card->ports[1].netdev)
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index ddbce54040e2..fe4e3ece3c42 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -260,7 +260,6 @@ static void pci200_pci_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
if (card->ports[0].netdev)
free_netdev(card->ports[0].netdev);
if (card->ports[1].netdev)
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 388ddf60a66d..1b89ecf0959e 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -57,6 +57,7 @@
#include <net/net_namespace.h>
#include <net/arp.h>
+#include <net/Space.h>
#include <asm/io.h>
#include <asm/types.h>
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 4c0a69779b89..f76aa9081585 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -542,7 +542,6 @@ static void wanxl_pci_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
kfree(card);
}
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index cfce83e1f273..f35f93c31b09 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -15,7 +15,6 @@
* more details.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/if.h>
#include <linux/skbuff.h>
@@ -1314,7 +1313,7 @@ static void adm8211_bss_info_changed(struct ieee80211_hw *dev,
if (!(changes & BSS_CHANGED_BSSID))
return;
- if (memcmp(conf->bssid, priv->bssid, ETH_ALEN)) {
+ if (!ether_addr_equal(conf->bssid, priv->bssid)) {
adm8211_set_bssid(dev, conf->bssid);
memcpy(priv->bssid, conf->bssid, ETH_ALEN);
}
@@ -1866,7 +1865,6 @@ static int adm8211_probe(struct pci_dev *pdev,
dev->flags = IEEE80211_HW_SIGNAL_UNSPEC;
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
- dev->channel_change_time = 1000;
dev->max_signal = 100; /* FIXME: find better value */
dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index 14128fd265ac..7e9ede6c5798 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -23,7 +23,6 @@
#ifdef __IN_PCMCIA_PACKAGE__
#include <pcmcia/k_compat.h>
#endif
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ptrace.h>
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 34c8a33cac06..99b3bfa717d5 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1721,7 +1721,7 @@ static void at76_mac80211_tx(struct ieee80211_hw *hw,
* following workaround is necessary. If the TX frame is an
* authentication frame extract the bssid and send the CMD_JOIN. */
if (mgmt->frame_control & cpu_to_le16(IEEE80211_STYPE_AUTH)) {
- if (!ether_addr_equal(priv->bssid, mgmt->bssid)) {
+ if (!ether_addr_equal_64bits(priv->bssid, mgmt->bssid)) {
memcpy(priv->bssid, mgmt->bssid, ETH_ALEN);
ieee80211_queue_work(hw, &priv->work_join_bssid);
dev_kfree_skb_any(skb);
@@ -2112,7 +2112,6 @@ static struct at76_priv *at76_alloc_new_device(struct usb_device *udev)
priv->pm_period = 0;
/* unit us */
- priv->hw->channel_change_time = 100000;
return priv;
}
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 280fc3d53a36..507d9a9ee69a 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -25,7 +25,6 @@
* that and only has minimal functionality.
*/
#include <linux/compiler.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -1765,7 +1764,7 @@ static struct usb_device_id ar5523_id_table[] = {
AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */
AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
- AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108
+ AR5523_DEVICE_UG(0x129b, 0x160b), /* Gigaset / USB stick 108
(CyberTAN Technology) */
AR5523_DEVICE_UG(0x16ab, 0x7801), /* Globalsun / AR5523_1 */
AR5523_DEVICE_UX(0x16ab, 0x7811), /* Globalsun / AR5523_2 */
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index e0ba7cd14252..b59cfbe0276b 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -17,6 +17,7 @@
#ifndef ATH_H
#define ATH_H
+#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/if_ether.h>
#include <linux/spinlock.h>
@@ -165,6 +166,7 @@ struct ath_common {
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
u32 len,
gfp_t gfp_mask);
+bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr);
void ath_hw_setbssidmask(struct ath_common *common);
void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key);
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 82e8088ca9b4..a6f5285235af 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -37,3 +37,10 @@ config ATH10K_TRACING
---help---
Select this to ath10k use tracing infrastructure.
+config ATH10K_DFS_CERTIFIED
+ bool "Atheros DFS support for certified platforms"
+ depends on ATH10K && CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ This option enables DFS support for initiating radiation on
+ ath10k.
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index e46951b8fb92..d44d618b05f9 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -243,6 +243,16 @@ static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
misc_ie_addr | CE_ERROR_MASK);
}
+static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ u32 misc_ie_addr = ath10k_pci_read32(ar,
+ ce_ctrl_addr + MISC_IE_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
+ misc_ie_addr & ~CE_ERROR_MASK);
+}
+
static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int mask)
@@ -731,7 +741,6 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
void ath10k_ce_per_engine_service_any(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ce_id, ret;
u32 intr_summary;
@@ -741,7 +750,7 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
intr_summary = CE_INTERRUPT_SUMMARY(ar);
- for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
+ for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
if (intr_summary & (1 << ce_id))
intr_summary &= ~(1 << ce_id);
else
@@ -783,22 +792,25 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
ath10k_pci_sleep(ar);
}
-void ath10k_ce_disable_interrupts(struct ath10k *ar)
+int ath10k_ce_disable_interrupts(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ce_id, ret;
ret = ath10k_pci_wake(ar);
if (ret)
- return;
+ return ret;
- for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
- struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
- u32 ctrl_addr = ce_state->ctrl_addr;
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
+ u32 ctrl_addr = ath10k_ce_base_address(ce_id);
ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+ ath10k_ce_error_intr_disable(ar, ctrl_addr);
+ ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
}
+
ath10k_pci_sleep(ar);
+
+ return 0;
}
void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
@@ -1047,9 +1059,19 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
const struct ce_attr *attr)
{
struct ath10k_ce_pipe *ce_state;
- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
int ret;
+ /*
+ * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
+ * additional TX locking checks.
+ *
+ * For the lack of a better place do the check here.
+ */
+ BUILD_BUG_ON(TARGET_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+ BUILD_BUG_ON(TARGET_10X_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+
ret = ath10k_pci_wake(ar);
if (ret)
return NULL;
@@ -1057,7 +1079,7 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
ce_state = ath10k_ce_init_state(ar, ce_id, attr);
if (!ce_state) {
ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
- return NULL;
+ goto out;
}
if (attr->src_nentries) {
@@ -1066,7 +1088,8 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
ce_id, ret);
ath10k_ce_deinit(ce_state);
- return NULL;
+ ce_state = NULL;
+ goto out;
}
}
@@ -1076,15 +1099,13 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
ce_id, ret);
ath10k_ce_deinit(ce_state);
- return NULL;
+ ce_state = NULL;
+ goto out;
}
}
- /* Enable CE error interrupts */
- ath10k_ce_error_intr_enable(ar, ctrl_addr);
-
+out:
ath10k_pci_sleep(ar);
-
return ce_state;
}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 15d45b5b7615..67dbde6a5c74 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -234,7 +234,7 @@ void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
-void ath10k_ce_disable_interrupts(struct ath10k *ar);
+int ath10k_ce_disable_interrupts(struct ath10k *ar);
/* ce_attr.flags values */
/* Use NonSnooping PCIe accesses? */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 1129994fb105..3b59af3bddf4 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -597,10 +597,8 @@ static int ath10k_init_uart(struct ath10k *ar)
return ret;
}
- if (!uart_print) {
- ath10k_info("UART prints disabled\n");
+ if (!uart_print)
return 0;
- }
ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7);
if (ret) {
@@ -645,8 +643,8 @@ static int ath10k_init_hw_params(struct ath10k *ar)
ar->hw_params = *hw_params;
- ath10k_info("Hardware name %s version 0x%x\n",
- ar->hw_params.name, ar->target_version);
+ ath10k_dbg(ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n",
+ ar->hw_params.name, ar->target_version);
return 0;
}
@@ -664,7 +662,8 @@ static void ath10k_core_restart(struct work_struct *work)
ieee80211_restart_hw(ar->hw);
break;
case ATH10K_STATE_OFF:
- /* this can happen if driver is being unloaded */
+ /* this can happen if driver is being unloaded
+ * or if the crash happens during FW probing */
ath10k_warn("cannot restart a device that hasn't been started\n");
break;
case ATH10K_STATE_RESTARTING:
@@ -737,8 +736,6 @@ EXPORT_SYMBOL(ath10k_core_create);
void ath10k_core_destroy(struct ath10k *ar)
{
- ath10k_debug_destroy(ar);
-
flush_workqueue(ar->workqueue);
destroy_workqueue(ar->workqueue);
@@ -786,21 +783,30 @@ int ath10k_core_start(struct ath10k *ar)
goto err;
}
- status = ath10k_htc_wait_target(&ar->htc);
- if (status)
+ status = ath10k_hif_start(ar);
+ if (status) {
+ ath10k_err("could not start HIF: %d\n", status);
goto err_wmi_detach;
+ }
+
+ status = ath10k_htc_wait_target(&ar->htc);
+ if (status) {
+ ath10k_err("failed to connect to HTC: %d\n", status);
+ goto err_hif_stop;
+ }
status = ath10k_htt_attach(ar);
if (status) {
ath10k_err("could not attach htt (%d)\n", status);
- goto err_wmi_detach;
+ goto err_hif_stop;
}
status = ath10k_init_connect_htc(ar);
if (status)
goto err_htt_detach;
- ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version);
+ ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n",
+ ar->hw->wiphy->fw_version);
status = ath10k_wmi_cmd_init(ar);
if (status) {
@@ -826,12 +832,23 @@ int ath10k_core_start(struct ath10k *ar)
ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
INIT_LIST_HEAD(&ar->arvifs);
+ if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
+ ath10k_info("%s (0x%x) fw %s api %d htt %d.%d\n",
+ ar->hw_params.name, ar->target_version,
+ ar->hw->wiphy->fw_version, ar->fw_api,
+ ar->htt.target_version_major,
+ ar->htt.target_version_minor);
+
+ __set_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags);
+
return 0;
err_disconnect_htc:
ath10k_htc_stop(&ar->htc);
err_htt_detach:
ath10k_htt_detach(&ar->htt);
+err_hif_stop:
+ ath10k_hif_stop(ar);
err_wmi_detach:
ath10k_wmi_detach(ar);
err:
@@ -985,6 +1002,8 @@ void ath10k_core_unregister(struct ath10k *ar)
ath10k_mac_unregister(ar);
ath10k_core_free_firmware_files(ar);
+
+ ath10k_debug_destroy(ar);
}
EXPORT_SYMBOL(ath10k_core_unregister);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 0934f7633de3..ade1781c7186 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -30,6 +30,7 @@
#include "wmi.h"
#include "../ath.h"
#include "../regd.h"
+#include "../dfs_pattern_detector.h"
#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -43,7 +44,7 @@
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
-#define ATH10K_MAX_NUM_MGMT_PENDING 16
+#define ATH10K_MAX_NUM_MGMT_PENDING 128
struct ath10k;
@@ -192,6 +193,14 @@ struct ath10k_target_stats {
};
+struct ath10k_dfs_stats {
+ u32 phy_errors;
+ u32 pulses_total;
+ u32 pulses_detected;
+ u32 pulses_discarded;
+ u32 radar_detected;
+};
+
#define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */
struct ath10k_peer {
@@ -244,6 +253,9 @@ struct ath10k_vif {
u8 bssid[ETH_ALEN];
} ibss;
} u;
+
+ u8 fixed_rate;
+ u8 fixed_nss;
};
struct ath10k_vif_iter {
@@ -261,6 +273,10 @@ struct ath10k_debug {
unsigned long htt_stats_mask;
struct delayed_work htt_stats_dwork;
+ struct ath10k_dfs_stats dfs_stats;
+ struct ath_dfs_pool_stats dfs_pool_stats;
+
+ u32 fw_dbglog_mask;
};
enum ath10k_state {
@@ -295,10 +311,19 @@ enum ath10k_fw_features {
/* firmware support tx frame management over WMI, otherwise it's HTT */
ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2,
+ /* Firmware does not support P2P */
+ ATH10K_FW_FEATURE_NO_P2P = 3,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
+enum ath10k_dev_flags {
+ /* Indicates that ath10k device is during CAC phase of DFS */
+ ATH10K_CAC_RUNNING,
+ ATH10K_FLAG_FIRST_BOOT_DONE,
+};
+
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
@@ -392,6 +417,8 @@ struct ath10k {
bool monitor_enabled;
bool monitor_present;
unsigned int filter_flags;
+ unsigned long dev_flags;
+ u32 dfs_block_radar_events;
struct wmi_pdev_set_wmm_params_arg wmm_params;
struct completion install_key_done;
@@ -410,6 +437,9 @@ struct ath10k {
struct list_head peers;
wait_queue_head_t peer_mapping_wq;
+ /* number of created peers; protected by data_lock */
+ int num_peers;
+
struct work_struct offchan_tx_work;
struct sk_buff_head offchan_tx_queue;
struct completion offchan_tx_completed;
@@ -428,6 +458,8 @@ struct ath10k {
u32 survey_last_cycle_count;
struct survey_info survey[ATH10K_NUM_CHANS];
+ struct dfs_pattern_detector *dfs_detector;
+
#ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug;
#endif
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 760ff2289e3c..6debd281350a 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -614,6 +614,61 @@ static const struct file_operations fops_htt_stats_mask = {
.llseek = default_llseek,
};
+static ssize_t ath10k_read_fw_dbglog(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned int len;
+ char buf[32];
+
+ len = scnprintf(buf, sizeof(buf), "0x%08x\n",
+ ar->debug.fw_dbglog_mask);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_fw_dbglog(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long mask;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &mask);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.fw_dbglog_mask = mask;
+
+ if (ar->state == ATH10K_STATE_ON) {
+ ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
+ if (ret) {
+ ath10k_warn("dbglog cfg failed from debugfs: %d\n",
+ ret);
+ goto exit;
+ }
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_fw_dbglog = {
+ .read = ath10k_read_fw_dbglog,
+ .write = ath10k_write_fw_dbglog,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath10k_debug_start(struct ath10k *ar)
{
int ret;
@@ -625,6 +680,14 @@ int ath10k_debug_start(struct ath10k *ar)
/* continue normally anyway, this isn't serious */
ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
+ if (ar->debug.fw_dbglog_mask) {
+ ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
+ if (ret)
+ /* not serious */
+ ath10k_warn("failed to enable dbglog during start: %d",
+ ret);
+ }
+
return 0;
}
@@ -639,6 +702,86 @@ void ath10k_debug_stop(struct ath10k *ar)
cancel_delayed_work(&ar->debug.htt_stats_dwork);
}
+static ssize_t ath10k_write_simulate_radar(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+
+ ieee80211_radar_detected(ar->hw);
+
+ return count;
+}
+
+static const struct file_operations fops_simulate_radar = {
+ .write = ath10k_write_simulate_radar,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#define ATH10K_DFS_STAT(s, p) (\
+ len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \
+ ar->debug.dfs_stats.p))
+
+#define ATH10K_DFS_POOL_STAT(s, p) (\
+ len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \
+ ar->debug.dfs_pool_stats.p))
+
+static ssize_t ath10k_read_dfs_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ int retval = 0, len = 0;
+ const int size = 8000;
+ struct ath10k *ar = file->private_data;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (!ar->dfs_detector) {
+ len += scnprintf(buf + len, size - len, "DFS not enabled\n");
+ goto exit;
+ }
+
+ ar->debug.dfs_pool_stats =
+ ar->dfs_detector->get_stats(ar->dfs_detector);
+
+ len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n");
+
+ ATH10K_DFS_STAT("reported phy errors", phy_errors);
+ ATH10K_DFS_STAT("pulse events reported", pulses_total);
+ ATH10K_DFS_STAT("DFS pulses detected", pulses_detected);
+ ATH10K_DFS_STAT("DFS pulses discarded", pulses_discarded);
+ ATH10K_DFS_STAT("Radars detected", radar_detected);
+
+ len += scnprintf(buf + len, size - len, "Global Pool statistics:\n");
+ ATH10K_DFS_POOL_STAT("Pool references", pool_reference);
+ ATH10K_DFS_POOL_STAT("Pulses allocated", pulse_allocated);
+ ATH10K_DFS_POOL_STAT("Pulses alloc error", pulse_alloc_error);
+ ATH10K_DFS_POOL_STAT("Pulses in use", pulse_used);
+ ATH10K_DFS_POOL_STAT("Seqs. allocated", pseq_allocated);
+ ATH10K_DFS_POOL_STAT("Seqs. alloc error", pseq_alloc_error);
+ ATH10K_DFS_POOL_STAT("Seqs. in use", pseq_used);
+
+exit:
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_dfs_stats = {
+ .read = ath10k_read_dfs_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
@@ -667,6 +810,23 @@ int ath10k_debug_create(struct ath10k *ar)
debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_htt_stats_mask);
+ debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_fw_dbglog);
+
+ if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+ debugfs_create_file("dfs_simulate_radar", S_IWUSR,
+ ar->debug.debugfs_phy, ar,
+ &fops_simulate_radar);
+
+ debugfs_create_bool("dfs_block_radar_events", S_IWUSR,
+ ar->debug.debugfs_phy,
+ &ar->dfs_block_radar_events);
+
+ debugfs_create_file("dfs_stats", S_IRUSR,
+ ar->debug.debugfs_phy, ar,
+ &fops_dfs_stats);
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 3cfe3ee90dbe..1773c36c71a0 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -33,6 +33,7 @@ enum ath10k_debug_mask {
ATH10K_DBG_MGMT = 0x00000100,
ATH10K_DBG_DATA = 0x00000200,
ATH10K_DBG_BMI = 0x00000400,
+ ATH10K_DBG_REGULATORY = 0x00000800,
ATH10K_DBG_ANY = 0xffffffff,
};
@@ -53,6 +54,8 @@ void ath10k_debug_read_service_map(struct ath10k *ar,
void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev);
+#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
+
#else
static inline int ath10k_debug_start(struct ath10k *ar)
{
@@ -82,6 +85,9 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev)
{
}
+
+#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
+
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index edae50b52806..edc57ab505c8 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -191,6 +191,11 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ if (!skb) {
+ ath10k_warn("invalid sk_buff completion - NULL pointer. firmware crashed?\n");
+ return 0;
+ }
+
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
@@ -534,14 +539,6 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
u16 credit_count;
u16 credit_size;
- reinit_completion(&htc->ctl_resp);
-
- status = ath10k_hif_start(htc->ar);
- if (status) {
- ath10k_err("could not start HIF (%d)\n", status);
- goto err_start;
- }
-
status = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_WAIT_TIMEOUT_HZ);
if (status <= 0) {
@@ -549,15 +546,13 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
status = -ETIMEDOUT;
ath10k_err("ctl_resp never came in (%d)\n", status);
- goto err_target;
+ return status;
}
if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
ath10k_err("Invalid HTC ready msg len:%d\n",
htc->control_resp_len);
-
- status = -ECOMM;
- goto err_target;
+ return -ECOMM;
}
msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
@@ -567,8 +562,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
if (message_id != ATH10K_HTC_MSG_READY_ID) {
ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id);
- status = -ECOMM;
- goto err_target;
+ return -ECOMM;
}
htc->total_transmit_credits = credit_count;
@@ -581,9 +575,8 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
if ((htc->total_transmit_credits == 0) ||
(htc->target_credit_size == 0)) {
- status = -ECOMM;
ath10k_err("Invalid credit size received\n");
- goto err_target;
+ return -ECOMM;
}
ath10k_htc_setup_target_buffer_assignments(htc);
@@ -600,14 +593,10 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
if (status) {
ath10k_err("could not connect to htc service (%d)\n", status);
- goto err_target;
+ return status;
}
return 0;
-err_target:
- ath10k_hif_stop(htc->ar);
-err_start:
- return status;
}
int ath10k_htc_connect_service(struct ath10k_htc *htc,
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 5f7eeebc5432..69697af59ce0 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -104,8 +104,8 @@ err_htc_attach:
static int ath10k_htt_verify_version(struct ath10k_htt *htt)
{
- ath10k_info("htt target version %d.%d\n",
- htt->target_version_major, htt->target_version_minor);
+ ath10k_dbg(ATH10K_DBG_BOOT, "htt target version %d.%d\n",
+ htt->target_version_major, htt->target_version_minor);
if (htt->target_version_major != 2 &&
htt->target_version_major != 3) {
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 1a337e93b7e9..b93ae355bc08 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1182,6 +1182,8 @@ struct htt_rx_info {
u32 info2;
} rate;
bool fcs_err;
+ bool amsdu_more;
+ bool mic_err;
};
struct ath10k_htt {
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 90d4f74c28d7..fe8bd1b59f0e 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -659,23 +659,6 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
memcpy(hdr_buf, hdr, hdr_len);
hdr = (struct ieee80211_hdr *)hdr_buf;
- /* FIXME: Hopefully this is a temporary measure.
- *
- * Reporting individual A-MSDU subframes means each reported frame
- * shares the same sequence number.
- *
- * mac80211 drops frames it recognizes as duplicates, i.e.
- * retransmission flag is set and sequence number matches sequence
- * number from a previous frame (as per IEEE 802.11-2012: 9.3.2.10
- * "Duplicate detection and recovery")
- *
- * To avoid frames being dropped clear retransmission flag for all
- * received A-MSDUs.
- *
- * Worst case: actual duplicate frames will be reported but this should
- * still be handled gracefully by other OSI/ISO layers. */
- hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_RETRY);
-
first = skb;
while (skb) {
void *decap_hdr;
@@ -746,6 +729,9 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
skb = skb->next;
info->skb->next = NULL;
+ if (skb)
+ info->amsdu_more = true;
+
ath10k_process_rx(htt->ar, info);
}
@@ -852,6 +838,20 @@ static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
return false;
}
+static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
+{
+ struct htt_rx_desc *rxd;
+ u32 flags;
+
+ rxd = (void *)skb->data - sizeof(*rxd);
+ flags = __le32_to_cpu(rxd->attention.flags);
+
+ if (flags & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
+ return true;
+
+ return false;
+}
+
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
@@ -959,6 +959,11 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
continue;
}
+ if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
+ ath10k_htt_rx_free_msdu_chain(msdu_head);
+ continue;
+ }
+
/* FIXME: we do not support chaining yet.
* this needs investigation */
if (msdu_chaining) {
@@ -969,6 +974,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
info.skb = msdu_head;
info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
+ info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head);
info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
info.signal += rx->ppdu.combined_rssi;
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index d9335e9d0d04..f1d36d2d2723 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -85,16 +85,13 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
int ath10k_htt_tx_attach(struct ath10k_htt *htt)
{
- u8 pipe;
-
spin_lock_init(&htt->tx_lock);
init_waitqueue_head(&htt->empty_tx_wq);
- /* At the beginning free queue number should hint us the maximum
- * queue length */
- pipe = htt->ar->htc.endpoint[htt->eid].ul_pipe_id;
- htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
- pipe);
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features))
+ htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
+ else
+ htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC;
ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 8aeb46d9b534..f1505a25d810 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -115,6 +115,7 @@ enum ath10k_mcast2ucast_mode {
#define TARGET_10X_MAC_AGGR_DELIM 0
#define TARGET_10X_AST_SKID_LIMIT 16
#define TARGET_10X_NUM_PEERS (128 + (TARGET_10X_NUM_VDEVS))
+#define TARGET_10X_NUM_PEERS_MAX 128
#define TARGET_10X_NUM_OFFLOAD_PEERS 0
#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0
#define TARGET_10X_NUM_PEER_KEYS 2
@@ -269,6 +270,7 @@ enum ath10k_mcast2ucast_mode {
#define CORE_CTRL_CPU_INTR_MASK 0x00002000
#define CORE_CTRL_ADDRESS 0x0000
#define PCIE_INTR_ENABLE_ADDRESS 0x0008
+#define PCIE_INTR_CAUSE_ADDRESS 0x000c
#define PCIE_INTR_CLR_ADDRESS 0x0014
#define SCRATCH_3_ADDRESS 0x0030
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 97ac8c87cba2..776e364eadcd 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -322,12 +322,19 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
lockdep_assert_held(&ar->conf_mutex);
ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
- if (ret)
+ if (ret) {
+ ath10k_warn("Failed to create wmi peer: %i\n", ret);
return ret;
+ }
ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
- if (ret)
+ if (ret) {
+ ath10k_warn("Failed to wait for created wmi peer: %i\n", ret);
return ret;
+ }
+ spin_lock_bh(&ar->data_lock);
+ ar->num_peers++;
+ spin_unlock_bh(&ar->data_lock);
return 0;
}
@@ -373,6 +380,10 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
if (ret)
return ret;
+ spin_lock_bh(&ar->data_lock);
+ ar->num_peers--;
+ spin_unlock_bh(&ar->data_lock);
+
return 0;
}
@@ -392,6 +403,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
list_del(&peer->list);
kfree(peer);
+ ar->num_peers--;
}
spin_unlock_bh(&ar->data_lock);
}
@@ -407,6 +419,7 @@ static void ath10k_peer_cleanup_all(struct ath10k *ar)
list_del(&peer->list);
kfree(peer);
}
+ ar->num_peers = 0;
spin_unlock_bh(&ar->data_lock);
}
@@ -450,15 +463,19 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
arg.channel.mode = chan_to_phymode(&conf->chandef);
- arg.channel.min_power = channel->max_power * 3;
- arg.channel.max_power = channel->max_power * 4;
- arg.channel.max_reg_power = channel->max_reg_power * 4;
- arg.channel.max_antenna_gain = channel->max_antenna_gain;
+ arg.channel.min_power = 0;
+ arg.channel.max_power = channel->max_power * 2;
+ arg.channel.max_reg_power = channel->max_reg_power * 2;
+ arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
arg.ssid = arvif->u.ap.ssid;
arg.ssid_len = arvif->u.ap.ssid_len;
arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+ /* For now allow DFS for AP mode */
+ arg.channel.chan_radar =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
} else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
arg.ssid = arvif->vif->bss_conf.ssid;
arg.ssid_len = arvif->vif->bss_conf.ssid_len;
@@ -516,6 +533,11 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
lockdep_assert_held(&ar->conf_mutex);
+ if (!ar->monitor_present) {
+ ath10k_warn("mac montor stop -- monitor is not present\n");
+ return -EINVAL;
+ }
+
arg.vdev_id = vdev_id;
arg.channel.freq = channel->center_freq;
arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1;
@@ -523,11 +545,13 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
/* TODO setup this dynamically, what in case we
don't have any vifs? */
arg.channel.mode = chan_to_phymode(&ar->hw->conf.chandef);
+ arg.channel.chan_radar =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
- arg.channel.min_power = channel->max_power * 3;
- arg.channel.max_power = channel->max_power * 4;
- arg.channel.max_reg_power = channel->max_reg_power * 4;
- arg.channel.max_antenna_gain = channel->max_antenna_gain;
+ arg.channel.min_power = 0;
+ arg.channel.max_power = channel->max_power * 2;
+ arg.channel.max_reg_power = channel->max_reg_power * 2;
+ arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
@@ -566,6 +590,16 @@ static int ath10k_monitor_stop(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
+ if (!ar->monitor_present) {
+ ath10k_warn("mac montor stop -- monitor is not present\n");
+ return -EINVAL;
+ }
+
+ if (!ar->monitor_enabled) {
+ ath10k_warn("mac montor stop -- monitor is not enabled\n");
+ return -EINVAL;
+ }
+
ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
if (ret)
ath10k_warn("Monitor vdev down failed: %d\n", ret);
@@ -647,6 +681,107 @@ static int ath10k_monitor_destroy(struct ath10k *ar)
return ret;
}
+static int ath10k_start_cac(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+
+ ret = ath10k_monitor_create(ar);
+ if (ret) {
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ return ret;
+ }
+
+ ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
+ if (ret) {
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ ath10k_monitor_destroy(ar);
+ return ret;
+ }
+
+ ath10k_dbg(ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+}
+
+static int ath10k_stop_cac(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* CAC is not running - do nothing */
+ if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
+ return 0;
+
+ ath10k_monitor_stop(ar);
+ ath10k_monitor_destroy(ar);
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+
+ ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n");
+
+ return 0;
+}
+
+static const char *ath10k_dfs_state(enum nl80211_dfs_state dfs_state)
+{
+ switch (dfs_state) {
+ case NL80211_DFS_USABLE:
+ return "USABLE";
+ case NL80211_DFS_UNAVAILABLE:
+ return "UNAVAILABLE";
+ case NL80211_DFS_AVAILABLE:
+ return "AVAILABLE";
+ default:
+ WARN_ON(1);
+ return "bug";
+ }
+}
+
+static void ath10k_config_radar_detection(struct ath10k *ar)
+{
+ struct ieee80211_channel *chan = ar->hw->conf.chandef.chan;
+ bool radar = ar->hw->conf.radar_enabled;
+ bool chan_radar = !!(chan->flags & IEEE80211_CHAN_RADAR);
+ enum nl80211_dfs_state dfs_state = chan->dfs_state;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac radar config update: chan %dMHz radar %d chan radar %d chan state %s\n",
+ chan->center_freq, radar, chan_radar,
+ ath10k_dfs_state(dfs_state));
+
+ /*
+ * It's safe to call it even if CAC is not started.
+ * This call here guarantees changing channel, etc. will stop CAC.
+ */
+ ath10k_stop_cac(ar);
+
+ if (!radar)
+ return;
+
+ if (!chan_radar)
+ return;
+
+ if (dfs_state != NL80211_DFS_USABLE)
+ return;
+
+ ret = ath10k_start_cac(ar);
+ if (ret) {
+ /*
+ * Not possible to start CAC on current channel so starting
+ * radiation is not allowed, make this channel DFS_UNAVAILABLE
+ * by indicating that radar was detected.
+ */
+ ath10k_warn("failed to start CAC (%d)\n", ret);
+ ieee80211_radar_detected(ar->hw);
+ }
+}
+
static void ath10k_control_beaconing(struct ath10k_vif *arvif,
struct ieee80211_bss_conf *info)
{
@@ -1351,19 +1486,22 @@ static int ath10k_update_channel_list(struct ath10k *ar)
ch->allow_vht = true;
ch->allow_ibss =
- !(channel->flags & IEEE80211_CHAN_NO_IBSS);
+ !(channel->flags & IEEE80211_CHAN_NO_IR);
ch->ht40plus =
!(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
- passive = channel->flags & IEEE80211_CHAN_PASSIVE_SCAN;
+ ch->chan_radar =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
+
+ passive = channel->flags & IEEE80211_CHAN_NO_IR;
ch->passive = passive;
ch->freq = channel->center_freq;
- ch->min_power = channel->max_power * 3;
- ch->max_power = channel->max_power * 4;
- ch->max_reg_power = channel->max_reg_power * 4;
- ch->max_antenna_gain = channel->max_antenna_gain;
+ ch->min_power = 0;
+ ch->max_power = channel->max_power * 2;
+ ch->max_reg_power = channel->max_reg_power * 2;
+ ch->max_antenna_gain = channel->max_antenna_gain * 2;
ch->reg_class_id = 0; /* FIXME */
/* FIXME: why use only legacy modes, why not any
@@ -1423,9 +1561,20 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath10k *ar = hw->priv;
+ bool result;
ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
+ if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
+ request->dfs_region);
+ result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
+ request->dfs_region);
+ if (!result)
+ ath10k_warn("dfs region 0x%X not supported, will trigger radar for every pulse\n",
+ request->dfs_region);
+ }
+
mutex_lock(&ar->conf_mutex);
if (ar->state == ATH10K_STATE_ON)
ath10k_regd_update(ar);
@@ -1714,8 +1863,10 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
break;
ret = ath10k_wmi_mgmt_tx(ar, skb);
- if (ret)
+ if (ret) {
ath10k_warn("wmi mgmt_tx failed (%d)\n", ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
}
}
@@ -1889,6 +2040,7 @@ void ath10k_halt(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
+ ath10k_stop_cac(ar);
del_timer_sync(&ar->scan.timeout);
ath10k_offchan_tx_purge(ar);
ath10k_mgmt_over_wmi_tx_purge(ar);
@@ -1943,7 +2095,7 @@ static int ath10k_start(struct ieee80211_hw *hw)
ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
ret);
- ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 0);
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
if (ret)
ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
ret);
@@ -1998,15 +2150,40 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
struct ath10k *ar = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
+ u32 param;
mutex_lock(&ar->conf_mutex);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ath10k_dbg(ATH10K_DBG_MAC, "mac config channel %d mhz\n",
- conf->chandef.chan->center_freq);
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac config channel %d mhz flags 0x%x\n",
+ conf->chandef.chan->center_freq,
+ conf->chandef.chan->flags);
+
spin_lock_bh(&ar->data_lock);
ar->rx_channel = conf->chandef.chan;
spin_unlock_bh(&ar->data_lock);
+
+ ath10k_config_radar_detection(ar);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac config power %d\n",
+ hw->conf.power_level);
+
+ param = ar->wmi.pdev_param->txpower_limit2g;
+ ret = ath10k_wmi_pdev_set_param(ar, param,
+ hw->conf.power_level * 2);
+ if (ret)
+ ath10k_warn("mac failed to set 2g txpower %d (%d)\n",
+ hw->conf.power_level, ret);
+
+ param = ar->wmi.pdev_param->txpower_limit5g;
+ ret = ath10k_wmi_pdev_set_param(ar, param,
+ hw->conf.power_level * 2);
+ if (ret)
+ ath10k_warn("mac failed to set 5g txpower %d (%d)\n",
+ hw->conf.power_level, ret);
}
if (changed & IEEE80211_CONF_CHANGE_PS)
@@ -2037,7 +2214,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
enum wmi_sta_powersave_param param;
int ret = 0;
- u32 value;
+ u32 value, param_id;
int bit;
u32 vdev_param;
@@ -2049,6 +2226,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
arvif->vif = vif;
INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
+ INIT_LIST_HEAD(&arvif->list);
if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
ath10k_warn("Only one monitor interface allowed\n");
@@ -2128,6 +2306,13 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ath10k_warn("Failed to create peer for AP: %d\n", ret);
goto err_vdev_delete;
}
+
+ param_id = ar->wmi.pdev_param->sta_kickout_th;
+
+ /* Disable STA KICKOUT functionality in FW */
+ ret = ath10k_wmi_pdev_set_param(ar, param_id, 0);
+ if (ret)
+ ath10k_warn("Failed to disable STA KICKOUT\n");
}
if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
@@ -2265,8 +2450,14 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
*total_flags &= SUPPORTED_FILTERS;
ar->filter_flags = *total_flags;
+ /* Monitor must not be started if it wasn't created first.
+ * Promiscuous mode may be started on a non-monitor interface - in
+ * such case the monitor vdev is not created so starting the
+ * monitor makes no sense. Since ath10k uses no special RX filters
+ * (only BSS filter in STA mode) there's no need for any special
+ * action here. */
if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
- !ar->monitor_enabled) {
+ !ar->monitor_enabled && ar->monitor_present) {
ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n",
ar->monitor_vdev_id);
@@ -2274,7 +2465,7 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
if (ret)
ath10k_warn("Unable to start monitor mode\n");
} else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
- ar->monitor_enabled) {
+ ar->monitor_enabled && ar->monitor_present) {
ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
ar->monitor_vdev_id);
@@ -2360,8 +2551,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_peer_create(ar, arvif->vdev_id,
info->bssid);
if (ret)
- ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
- info->bssid, arvif->vdev_id);
+ ath10k_warn("Failed to add peer %pM for vdev %d when changin bssid: %i\n",
+ info->bssid, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION) {
/*
@@ -2542,6 +2733,44 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
+static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ enum set_key_cmd cmd,
+ struct ieee80211_key_conf *key)
+{
+ u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
+ int ret;
+
+ /* 10.1 firmware branch requires default key index to be set to group
+ * key index after installing it. Otherwise FW/HW Txes corrupted
+ * frames with multi-vif APs. This is not required for main firmware
+ * branch (e.g. 636).
+ *
+ * FIXME: This has been tested only in AP. It remains unknown if this
+ * is required for multi-vif STA interfaces on 10.1 */
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
+ return;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
+ return;
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ return;
+
+ if (cmd != SET_KEY)
+ return;
+
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ key->keyidx);
+ if (ret)
+ ath10k_warn("failed to set group key as default key: %d\n",
+ ret);
+}
+
static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
@@ -2603,6 +2832,8 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
goto exit;
}
+ ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
+
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
if (peer && cmd == SET_KEY)
@@ -2627,6 +2858,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ int max_num_peers;
int ret = 0;
mutex_lock(&ar->conf_mutex);
@@ -2637,14 +2869,26 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* New station addition.
*/
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ max_num_peers = TARGET_10X_NUM_PEERS_MAX - 1;
+ else
+ max_num_peers = TARGET_NUM_PEERS;
+
+ if (ar->num_peers >= max_num_peers) {
+ ath10k_warn("Number of peers exceeded: peers number %d (max peers %d)\n",
+ ar->num_peers, max_num_peers);
+ ret = -ENOBUFS;
+ goto exit;
+ }
+
ath10k_dbg(ATH10K_DBG_MAC,
- "mac vdev %d peer create %pM (new sta)\n",
- arvif->vdev_id, sta->addr);
+ "mac vdev %d peer create %pM (new sta) num_peers %d\n",
+ arvif->vdev_id, sta->addr, ar->num_peers);
ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
if (ret)
- ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
+ ath10k_warn("Failed to add peer %pM for vdev %d when adding a new sta: %i\n",
+ sta->addr, arvif->vdev_id, ret);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
/*
@@ -2689,7 +2933,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_warn("Failed to disassociate station: %pM\n",
sta->addr);
}
-
+exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
@@ -3095,6 +3339,307 @@ exit:
return ret;
}
+/* Helper table for legacy fixed_rate/bitrate_mask */
+static const u8 cck_ofdm_rate[] = {
+ /* CCK */
+ 3, /* 1Mbps */
+ 2, /* 2Mbps */
+ 1, /* 5.5Mbps */
+ 0, /* 11Mbps */
+ /* OFDM */
+ 3, /* 6Mbps */
+ 7, /* 9Mbps */
+ 2, /* 12Mbps */
+ 6, /* 18Mbps */
+ 1, /* 24Mbps */
+ 5, /* 36Mbps */
+ 0, /* 48Mbps */
+ 4, /* 54Mbps */
+};
+
+/* Check if only one bit set */
+static int ath10k_check_single_mask(u32 mask)
+{
+ int bit;
+
+ bit = ffs(mask);
+ if (!bit)
+ return 0;
+
+ mask &= ~BIT(bit - 1);
+ if (mask)
+ return 2;
+
+ return 1;
+}
+
+static bool
+ath10k_default_bitrate_mask(struct ath10k *ar,
+ enum ieee80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ u32 legacy = 0x00ff;
+ u8 ht = 0xff, i;
+ u16 vht = 0x3ff;
+
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ legacy = 0x00fff;
+ vht = 0;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ break;
+ default:
+ return false;
+ }
+
+ if (mask->control[band].legacy != legacy)
+ return false;
+
+ for (i = 0; i < ar->num_rf_chains; i++)
+ if (mask->control[band].ht_mcs[i] != ht)
+ return false;
+
+ for (i = 0; i < ar->num_rf_chains; i++)
+ if (mask->control[band].vht_mcs[i] != vht)
+ return false;
+
+ return true;
+}
+
+static bool
+ath10k_bitrate_mask_nss(const struct cfg80211_bitrate_mask *mask,
+ enum ieee80211_band band,
+ u8 *fixed_nss)
+{
+ int ht_nss = 0, vht_nss = 0, i;
+
+ /* check legacy */
+ if (ath10k_check_single_mask(mask->control[band].legacy))
+ return false;
+
+ /* check HT */
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
+ if (mask->control[band].ht_mcs[i] == 0xff)
+ continue;
+ else if (mask->control[band].ht_mcs[i] == 0x00)
+ break;
+ else
+ return false;
+ }
+
+ ht_nss = i;
+
+ /* check VHT */
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ if (mask->control[band].vht_mcs[i] == 0x03ff)
+ continue;
+ else if (mask->control[band].vht_mcs[i] == 0x0000)
+ break;
+ else
+ return false;
+ }
+
+ vht_nss = i;
+
+ if (ht_nss > 0 && vht_nss > 0)
+ return false;
+
+ if (ht_nss)
+ *fixed_nss = ht_nss;
+ else if (vht_nss)
+ *fixed_nss = vht_nss;
+ else
+ return false;
+
+ return true;
+}
+
+static bool
+ath10k_bitrate_mask_correct(const struct cfg80211_bitrate_mask *mask,
+ enum ieee80211_band band,
+ enum wmi_rate_preamble *preamble)
+{
+ int legacy = 0, ht = 0, vht = 0, i;
+
+ *preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ /* check legacy */
+ legacy = ath10k_check_single_mask(mask->control[band].legacy);
+ if (legacy > 1)
+ return false;
+
+ /* check HT */
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
+ ht += ath10k_check_single_mask(mask->control[band].ht_mcs[i]);
+ if (ht > 1)
+ return false;
+
+ /* check VHT */
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+ vht += ath10k_check_single_mask(mask->control[band].vht_mcs[i]);
+ if (vht > 1)
+ return false;
+
+ /* Currently we support only one fixed_rate */
+ if ((legacy + ht + vht) != 1)
+ return false;
+
+ if (ht)
+ *preamble = WMI_RATE_PREAMBLE_HT;
+ else if (vht)
+ *preamble = WMI_RATE_PREAMBLE_VHT;
+
+ return true;
+}
+
+static bool
+ath10k_bitrate_mask_rate(const struct cfg80211_bitrate_mask *mask,
+ enum ieee80211_band band,
+ u8 *fixed_rate,
+ u8 *fixed_nss)
+{
+ u8 rate = 0, pream = 0, nss = 0, i;
+ enum wmi_rate_preamble preamble;
+
+ /* Check if single rate correct */
+ if (!ath10k_bitrate_mask_correct(mask, band, &preamble))
+ return false;
+
+ pream = preamble;
+
+ switch (preamble) {
+ case WMI_RATE_PREAMBLE_CCK:
+ case WMI_RATE_PREAMBLE_OFDM:
+ i = ffs(mask->control[band].legacy) - 1;
+
+ if (band == IEEE80211_BAND_2GHZ && i < 4)
+ pream = WMI_RATE_PREAMBLE_CCK;
+
+ if (band == IEEE80211_BAND_5GHZ)
+ i += 4;
+
+ if (i >= ARRAY_SIZE(cck_ofdm_rate))
+ return false;
+
+ rate = cck_ofdm_rate[i];
+ break;
+ case WMI_RATE_PREAMBLE_HT:
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
+ if (mask->control[band].ht_mcs[i])
+ break;
+
+ if (i == IEEE80211_HT_MCS_MASK_LEN)
+ return false;
+
+ rate = ffs(mask->control[band].ht_mcs[i]) - 1;
+ nss = i;
+ break;
+ case WMI_RATE_PREAMBLE_VHT:
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+ if (mask->control[band].vht_mcs[i])
+ break;
+
+ if (i == NL80211_VHT_NSS_MAX)
+ return false;
+
+ rate = ffs(mask->control[band].vht_mcs[i]) - 1;
+ nss = i;
+ break;
+ }
+
+ *fixed_nss = nss + 1;
+ nss <<= 4;
+ pream <<= 6;
+
+ ath10k_dbg(ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n",
+ pream, nss, rate);
+
+ *fixed_rate = pream | nss | rate;
+
+ return true;
+}
+
+static bool ath10k_get_fixed_rate_nss(const struct cfg80211_bitrate_mask *mask,
+ enum ieee80211_band band,
+ u8 *fixed_rate,
+ u8 *fixed_nss)
+{
+ /* First check full NSS mask, if we can simply limit NSS */
+ if (ath10k_bitrate_mask_nss(mask, band, fixed_nss))
+ return true;
+
+ /* Next Check single rate is set */
+ return ath10k_bitrate_mask_rate(mask, band, fixed_rate, fixed_nss);
+}
+
+static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
+ u8 fixed_rate,
+ u8 fixed_nss)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (arvif->fixed_rate == fixed_rate &&
+ arvif->fixed_nss == fixed_nss)
+ goto exit;
+
+ if (fixed_rate == WMI_FIXED_RATE_NONE)
+ ath10k_dbg(ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
+
+ vdev_param = ar->wmi.vdev_param->fixed_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, fixed_rate);
+ if (ret) {
+ ath10k_warn("Could not set fixed_rate param 0x%02x: %d\n",
+ fixed_rate, ret);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ arvif->fixed_rate = fixed_rate;
+
+ vdev_param = ar->wmi.vdev_param->nss;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, fixed_nss);
+
+ if (ret) {
+ ath10k_warn("Could not set fixed_nss param %d: %d\n",
+ fixed_nss, ret);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ arvif->fixed_nss = fixed_nss;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k *ar = arvif->ar;
+ enum ieee80211_band band = ar->hw->conf.chandef.chan->band;
+ u8 fixed_rate = WMI_FIXED_RATE_NONE;
+ u8 fixed_nss = ar->num_rf_chains;
+
+ if (!ath10k_default_bitrate_mask(ar, band, mask)) {
+ if (!ath10k_get_fixed_rate_nss(mask, band,
+ &fixed_rate,
+ &fixed_nss))
+ return -EINVAL;
+ }
+
+ return ath10k_set_fixed_rate_param(arvif, fixed_rate, fixed_nss);
+}
+
static const struct ieee80211_ops ath10k_ops = {
.tx = ath10k_tx,
.start = ath10k_start,
@@ -3117,6 +3662,7 @@ static const struct ieee80211_ops ath10k_ops = {
.tx_last_beacon = ath10k_tx_last_beacon,
.restart_complete = ath10k_restart_complete,
.get_survey = ath10k_get_survey,
+ .set_bitrate_mask = ath10k_set_bitrate_mask,
#ifdef CONFIG_PM
.suspend = ath10k_suspend,
.resume = ath10k_resume,
@@ -3249,12 +3795,37 @@ static const struct ieee80211_iface_limit ath10k_if_limits[] = {
},
};
-static const struct ieee80211_iface_combination ath10k_if_comb = {
- .limits = ath10k_if_limits,
- .n_limits = ARRAY_SIZE(ath10k_if_limits),
- .max_interfaces = 8,
- .num_different_channels = 1,
- .beacon_int_infra_match = true,
+static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
+ {
+ .max = 8,
+ .types = BIT(NL80211_IFTYPE_AP)
+ },
+};
+
+static const struct ieee80211_iface_combination ath10k_if_comb[] = {
+ {
+ .limits = ath10k_if_limits,
+ .n_limits = ARRAY_SIZE(ath10k_if_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ },
+};
+
+static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
+ {
+ .limits = ath10k_10x_if_limits,
+ .n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+#endif
+ },
};
static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
@@ -3433,9 +4004,12 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO);
+ BIT(NL80211_IFTYPE_AP);
+
+ if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
+ ar->hw->wiphy->interface_modes |=
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
ar->hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_PS |
@@ -3465,7 +4039,6 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->vif_data_size = sizeof(struct ath10k_vif);
- ar->hw->channel_change_time = 5000;
ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -3478,11 +4051,28 @@ int ath10k_mac_register(struct ath10k *ar)
*/
ar->hw->queues = 4;
- ar->hw->wiphy->iface_combinations = &ath10k_if_comb;
- ar->hw->wiphy->n_iface_combinations = 1;
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_10x_if_comb);
+ } else {
+ ar->hw->wiphy->iface_combinations = ath10k_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_if_comb);
+ }
ar->hw->netdev_features = NETIF_F_HW_CSUM;
+ if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+ /* Init ath dfs pattern detector */
+ ar->ath_common.debug_mask = ATH_DBG_DFS;
+ ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
+ NL80211_DFS_UNSET);
+
+ if (!ar->dfs_detector)
+ ath10k_warn("dfs pattern detector init failed\n");
+ }
+
ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
ath10k_reg_notifier);
if (ret) {
@@ -3518,6 +4108,9 @@ void ath10k_mac_unregister(struct ath10k *ar)
{
ieee80211_unregister_hw(ar->hw);
+ if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+ ar->dfs_detector->exit(ar->dfs_detector);
+
kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 9e86a811086f..29fd197d1fd8 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <linux/bitops.h>
#include "core.h"
#include "debug.h"
@@ -32,10 +33,21 @@
#include "ce.h"
#include "pci.h"
+enum ath10k_pci_irq_mode {
+ ATH10K_PCI_IRQ_AUTO = 0,
+ ATH10K_PCI_IRQ_LEGACY = 1,
+ ATH10K_PCI_IRQ_MSI = 2,
+};
+
static unsigned int ath10k_target_ps;
+static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
+
module_param(ath10k_target_ps, uint, 0644);
MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
+module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
+MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
+
#define QCA988X_2_0_DEVICE_ID (0x003c)
static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
@@ -52,10 +64,16 @@ static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
int num);
static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
static void ath10k_pci_stop_ce(struct ath10k *ar);
-static void ath10k_pci_device_reset(struct ath10k *ar);
-static int ath10k_pci_reset_target(struct ath10k *ar);
-static int ath10k_pci_start_intr(struct ath10k *ar);
-static void ath10k_pci_stop_intr(struct ath10k *ar);
+static int ath10k_pci_device_reset(struct ath10k *ar);
+static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
+static int ath10k_pci_init_irq(struct ath10k *ar);
+static int ath10k_pci_deinit_irq(struct ath10k *ar);
+static int ath10k_pci_request_irq(struct ath10k *ar);
+static void ath10k_pci_free_irq(struct ath10k *ar);
+static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
+ struct ath10k_ce_pipe *rx_pipe,
+ struct bmi_xfer *xfer);
+static void ath10k_pci_cleanup_ce(struct ath10k *ar);
static const struct ce_attr host_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
@@ -200,6 +218,87 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
/* CE7 used only by Host */
};
+static bool ath10k_pci_irq_pending(struct ath10k *ar)
+{
+ u32 cause;
+
+ /* Check if the shared legacy irq is for us */
+ cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_CAUSE_ADDRESS);
+ if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
+ return true;
+
+ return false;
+}
+
+static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
+{
+ /* IMPORTANT: INTR_CLR register has to be set after
+ * INTR_ENABLE is set to 0, otherwise interrupt can not be
+ * really cleared. */
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+ 0);
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+ /* IMPORTANT: this extra read transaction is required to
+ * flush the posted write buffer. */
+ (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS);
+}
+
+static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
+{
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+ /* IMPORTANT: this extra read transaction is required to
+ * flush the posted write buffer. */
+ (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS);
+}
+
+static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
+{
+ struct ath10k *ar = arg;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (ar_pci->num_msi_intrs == 0) {
+ if (!ath10k_pci_irq_pending(ar))
+ return IRQ_NONE;
+
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+ }
+
+ tasklet_schedule(&ar_pci->early_irq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static int ath10k_pci_request_early_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
+ * interrupt from irq vector is triggered in all cases for FW
+ * indication/errors */
+ ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
+ IRQF_SHARED, "ath10k_pci (early)", ar);
+ if (ret) {
+ ath10k_warn("failed to request early irq: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_pci_free_early_irq(struct ath10k *ar)
+{
+ free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
+}
+
/*
* Diagnostic read/write access is provided for startup/config/debug usage.
* Caller must guarantee proper alignment, when applicable, and single user
@@ -526,17 +625,6 @@ static bool ath10k_pci_target_is_awake(struct ath10k *ar)
return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
}
-static void ath10k_pci_wait(struct ath10k *ar)
-{
- int n = 100;
-
- while (n-- && !ath10k_pci_target_is_awake(ar))
- msleep(10);
-
- if (n < 0)
- ath10k_warn("Unable to wakeup target\n");
-}
-
int ath10k_do_pci_wake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -723,7 +811,7 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
flags);
if (ret)
- ath10k_warn("CE send failed: %p\n", nbuf);
+ ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
return ret;
}
@@ -750,9 +838,10 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
ar->fw_version_build);
host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
- if (ath10k_pci_diag_read_mem(ar, host_addr,
- &reg_dump_area, sizeof(u32)) != 0) {
- ath10k_warn("could not read hi_failure_state\n");
+ ret = ath10k_pci_diag_read_mem(ar, host_addr,
+ &reg_dump_area, sizeof(u32));
+ if (ret) {
+ ath10k_err("failed to read FW dump area address: %d\n", ret);
return;
}
@@ -762,7 +851,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
&reg_dump_values[0],
REG_DUMP_COUNT_QCA988X * sizeof(u32));
if (ret != 0) {
- ath10k_err("could not dump FW Dump Area\n");
+ ath10k_err("failed to read FW dump area: %d\n", ret);
return;
}
@@ -777,7 +866,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
reg_dump_values[i + 2],
reg_dump_values[i + 3]);
- ieee80211_queue_work(ar->hw, &ar->restart_work);
+ queue_work(ar->workqueue, &ar->restart_work);
}
static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
@@ -815,53 +904,41 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
sizeof(ar_pci->msg_callbacks_current));
}
-static int ath10k_pci_start_ce(struct ath10k *ar)
+static int ath10k_pci_alloc_compl(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
const struct ce_attr *attr;
struct ath10k_pci_pipe *pipe_info;
struct ath10k_pci_compl *compl;
- int i, pipe_num, completions, disable_interrupts;
+ int i, pipe_num, completions;
spin_lock_init(&ar_pci->compl_lock);
INIT_LIST_HEAD(&ar_pci->compl_process);
- for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num];
spin_lock_init(&pipe_info->pipe_lock);
INIT_LIST_HEAD(&pipe_info->compl_free);
/* Handle Diagnostic CE specially */
- if (pipe_info->ce_hdl == ce_diag)
+ if (pipe_info->ce_hdl == ar_pci->ce_diag)
continue;
attr = &host_ce_config_wlan[pipe_num];
completions = 0;
- if (attr->src_nentries) {
- disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
- ath10k_ce_send_cb_register(pipe_info->ce_hdl,
- ath10k_pci_ce_send_done,
- disable_interrupts);
+ if (attr->src_nentries)
completions += attr->src_nentries;
- }
- if (attr->dest_nentries) {
- ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
- ath10k_pci_ce_recv_data);
+ if (attr->dest_nentries)
completions += attr->dest_nentries;
- }
-
- if (completions == 0)
- continue;
for (i = 0; i < completions; i++) {
compl = kmalloc(sizeof(*compl), GFP_KERNEL);
if (!compl) {
ath10k_warn("No memory for completion state\n");
- ath10k_pci_stop_ce(ar);
+ ath10k_pci_cleanup_ce(ar);
return -ENOMEM;
}
@@ -873,20 +950,55 @@ static int ath10k_pci_start_ce(struct ath10k *ar)
return 0;
}
-static void ath10k_pci_stop_ce(struct ath10k *ar)
+static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_pci_compl *compl;
- struct sk_buff *skb;
- int i;
+ const struct ce_attr *attr;
+ struct ath10k_pci_pipe *pipe_info;
+ int pipe_num, disable_interrupts;
- ath10k_ce_disable_interrupts(ar);
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+ pipe_info = &ar_pci->pipe_info[pipe_num];
+
+ /* Handle Diagnostic CE specially */
+ if (pipe_info->ce_hdl == ar_pci->ce_diag)
+ continue;
+
+ attr = &host_ce_config_wlan[pipe_num];
+
+ if (attr->src_nentries) {
+ disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
+ ath10k_ce_send_cb_register(pipe_info->ce_hdl,
+ ath10k_pci_ce_send_done,
+ disable_interrupts);
+ }
+
+ if (attr->dest_nentries)
+ ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
+ ath10k_pci_ce_recv_data);
+ }
+
+ return 0;
+}
+
+static void ath10k_pci_kill_tasklet(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int i;
- /* Cancel the pending tasklet */
tasklet_kill(&ar_pci->intr_tq);
+ tasklet_kill(&ar_pci->msi_fw_err);
+ tasklet_kill(&ar_pci->early_irq_tasklet);
for (i = 0; i < CE_COUNT; i++)
tasklet_kill(&ar_pci->pipe_info[i].intr);
+}
+
+static void ath10k_pci_stop_ce(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_compl *compl;
+ struct sk_buff *skb;
/* Mark pending completions as aborted, so that upper layers free up
* their associated resources */
@@ -920,7 +1032,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
spin_unlock_bh(&ar_pci->compl_lock);
/* Free unused completions for each pipe. */
- for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num];
spin_lock_bh(&pipe_info->pipe_lock);
@@ -974,8 +1086,8 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
case ATH10K_PCI_COMPL_RECV:
ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
if (ret) {
- ath10k_warn("Unable to post recv buffer for pipe: %d\n",
- compl->pipe_info->pipe_num);
+ ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
+ compl->pipe_info->pipe_num, ret);
break;
}
@@ -1114,7 +1226,7 @@ static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
for (i = 0; i < num; i++) {
skb = dev_alloc_skb(pipe_info->buf_sz);
if (!skb) {
- ath10k_warn("could not allocate skbuff for pipe %d\n",
+ ath10k_warn("failed to allocate skbuff for pipe %d\n",
num);
ret = -ENOMEM;
goto err;
@@ -1127,7 +1239,7 @@ static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
- ath10k_warn("could not dma map skbuff\n");
+ ath10k_warn("failed to DMA map sk_buff\n");
dev_kfree_skb_any(skb);
ret = -EIO;
goto err;
@@ -1142,7 +1254,7 @@ static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
ce_data);
if (ret) {
- ath10k_warn("could not enqueue to pipe %d (%d)\n",
+ ath10k_warn("failed to enqueue to pipe %d: %d\n",
num, ret);
goto err;
}
@@ -1162,7 +1274,7 @@ static int ath10k_pci_post_rx(struct ath10k *ar)
const struct ce_attr *attr;
int pipe_num, ret = 0;
- for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num];
attr = &host_ce_config_wlan[pipe_num];
@@ -1172,8 +1284,8 @@ static int ath10k_pci_post_rx(struct ath10k *ar)
ret = ath10k_pci_post_rx_pipe(pipe_info,
attr->dest_nentries - 1);
if (ret) {
- ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
- pipe_num);
+ ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
+ pipe_num, ret);
for (; pipe_num >= 0; pipe_num--) {
pipe_info = &ar_pci->pipe_info[pipe_num];
@@ -1189,23 +1301,58 @@ static int ath10k_pci_post_rx(struct ath10k *ar)
static int ath10k_pci_hif_start(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ret;
+ int ret, ret_early;
- ret = ath10k_pci_start_ce(ar);
+ ath10k_pci_free_early_irq(ar);
+ ath10k_pci_kill_tasklet(ar);
+
+ ret = ath10k_pci_alloc_compl(ar);
if (ret) {
- ath10k_warn("could not start CE (%d)\n", ret);
- return ret;
+ ath10k_warn("failed to allocate CE completions: %d\n", ret);
+ goto err_early_irq;
+ }
+
+ ret = ath10k_pci_request_irq(ar);
+ if (ret) {
+ ath10k_warn("failed to post RX buffers for all pipes: %d\n",
+ ret);
+ goto err_free_compl;
+ }
+
+ ret = ath10k_pci_setup_ce_irq(ar);
+ if (ret) {
+ ath10k_warn("failed to setup CE interrupts: %d\n", ret);
+ goto err_stop;
}
/* Post buffers once to start things off. */
ret = ath10k_pci_post_rx(ar);
if (ret) {
- ath10k_warn("could not post rx pipes (%d)\n", ret);
- return ret;
+ ath10k_warn("failed to post RX buffers for all pipes: %d\n",
+ ret);
+ goto err_stop;
}
ar_pci->started = 1;
return 0;
+
+err_stop:
+ ath10k_ce_disable_interrupts(ar);
+ ath10k_pci_free_irq(ar);
+ ath10k_pci_kill_tasklet(ar);
+ ath10k_pci_stop_ce(ar);
+ ath10k_pci_process_ce(ar);
+err_free_compl:
+ ath10k_pci_cleanup_ce(ar);
+err_early_irq:
+ /* Though there should be no interrupts (device was reset)
+ * power_down() expects the early IRQ to be installed as per the
+ * driver lifecycle. */
+ ret_early = ath10k_pci_request_early_irq(ar);
+ if (ret_early)
+ ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
+
+ return ret;
}
static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
@@ -1271,6 +1418,13 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
* Indicate the completion to higer layer to free
* the buffer
*/
+
+ if (!netbuf) {
+ ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
+ ce_hdl->id);
+ continue;
+ }
+
ATH10K_SKB_CB(netbuf)->is_aborted = true;
ar_pci->msg_callbacks_current.tx_completion(ar,
netbuf,
@@ -1291,7 +1445,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int pipe_num;
- for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
struct ath10k_pci_pipe *pipe_info;
pipe_info = &ar_pci->pipe_info[pipe_num];
@@ -1306,7 +1460,7 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar)
struct ath10k_pci_pipe *pipe_info;
int pipe_num;
- for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num];
if (pipe_info->ce_hdl) {
ath10k_ce_deinit(pipe_info->ce_hdl);
@@ -1316,27 +1470,25 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar)
}
}
-static void ath10k_pci_disable_irqs(struct ath10k *ar)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
-
- for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
- disable_irq(ar_pci->pdev->irq + i);
-}
-
static void ath10k_pci_hif_stop(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
- /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
- * by ath10k_pci_start_intr(). */
- ath10k_pci_disable_irqs(ar);
+ ret = ath10k_ce_disable_interrupts(ar);
+ if (ret)
+ ath10k_warn("failed to disable CE interrupts: %d\n", ret);
+ ath10k_pci_free_irq(ar);
+ ath10k_pci_kill_tasklet(ar);
ath10k_pci_stop_ce(ar);
+ ret = ath10k_pci_request_early_irq(ar);
+ if (ret)
+ ath10k_warn("failed to re-enable early irq: %d\n", ret);
+
/* At this point, asynchronous threads are stopped, the target should
* not DMA nor interrupt. We process the leftovers and then free
* everything else up. */
@@ -1345,6 +1497,13 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_cleanup_ce(ar);
ath10k_pci_buffer_cleanup(ar);
+ /* Make the sure the device won't access any structures on the host by
+ * resetting it. The device was fed with PCI CE ringbuffer
+ * configuration during init. If ringbuffers are freed and the device
+ * were to access them this could lead to memory corruption on the
+ * host. */
+ ath10k_pci_device_reset(ar);
+
ar_pci->started = 0;
}
@@ -1363,6 +1522,8 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
void *treq, *tresp = NULL;
int ret = 0;
+ might_sleep();
+
if (resp && !resp_len)
return -EINVAL;
@@ -1403,14 +1564,12 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
if (ret)
goto err_resp;
- ret = wait_for_completion_timeout(&xfer.done,
- BMI_COMMUNICATION_TIMEOUT_HZ);
- if (ret <= 0) {
+ ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
+ if (ret) {
u32 unused_buffer;
unsigned int unused_nbytes;
unsigned int unused_id;
- ret = -ETIMEDOUT;
ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
&unused_nbytes, &unused_id);
} else {
@@ -1478,6 +1637,25 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
complete(&xfer->done);
}
+static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
+ struct ath10k_ce_pipe *rx_pipe,
+ struct bmi_xfer *xfer)
+{
+ unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
+
+ while (time_before_eq(jiffies, timeout)) {
+ ath10k_pci_bmi_send_done(tx_pipe);
+ ath10k_pci_bmi_recv_data(rx_pipe);
+
+ if (completion_done(&xfer->done))
+ return 0;
+
+ schedule();
+ }
+
+ return -ETIMEDOUT;
+}
+
/*
* Map from service/endpoint to Copy Engine.
* This table is derived from the CE_PCI TABLE, above.
@@ -1587,7 +1765,7 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
CORE_CTRL_ADDRESS,
&core_ctrl);
if (ret) {
- ath10k_warn("Unable to read core ctrl\n");
+ ath10k_warn("failed to read core_ctrl: %d\n", ret);
return ret;
}
@@ -1597,10 +1775,13 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
CORE_CTRL_ADDRESS,
core_ctrl);
- if (ret)
- ath10k_warn("Unable to set interrupt mask\n");
+ if (ret) {
+ ath10k_warn("failed to set target CPU interrupt mask: %d\n",
+ ret);
+ return ret;
+ }
- return ret;
+ return 0;
}
static int ath10k_pci_init_config(struct ath10k *ar)
@@ -1751,7 +1932,7 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
const struct ce_attr *attr;
int pipe_num;
- for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num];
pipe_info->pipe_num = pipe_num;
pipe_info->hif_ce_state = ar;
@@ -1759,7 +1940,7 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
if (pipe_info->ce_hdl == NULL) {
- ath10k_err("Unable to initialize CE for pipe: %d\n",
+ ath10k_err("failed to initialize CE for pipe: %d\n",
pipe_num);
/* It is safe to call it here. It checks if ce_hdl is
@@ -1768,31 +1949,18 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
return -1;
}
- if (pipe_num == ar_pci->ce_count - 1) {
+ if (pipe_num == CE_COUNT - 1) {
/*
* Reserve the ultimate CE for
* diagnostic Window support
*/
- ar_pci->ce_diag =
- ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
+ ar_pci->ce_diag = pipe_info->ce_hdl;
continue;
}
pipe_info->buf_sz = (size_t) (attr->src_sz_max);
}
- /*
- * Initially, establish CE completion handlers for use with BMI.
- * These are overwritten with generic handlers after we exit BMI phase.
- */
- pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
- ath10k_ce_send_cb_register(pipe_info->ce_hdl,
- ath10k_pci_bmi_send_done, 0);
-
- pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
- ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
- ath10k_pci_bmi_recv_data);
-
return 0;
}
@@ -1828,14 +1996,9 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
static int ath10k_pci_hif_power_up(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ const char *irq_mode;
int ret;
- ret = ath10k_pci_start_intr(ar);
- if (ret) {
- ath10k_err("could not start interrupt handling (%d)\n", ret);
- goto err;
- }
-
/*
* Bring the target up cleanly.
*
@@ -1846,39 +2009,80 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
* is in an unexpected state. We try to catch that here in order to
* reset the Target and retry the probe.
*/
- ath10k_pci_device_reset(ar);
-
- ret = ath10k_pci_reset_target(ar);
- if (ret)
- goto err_irq;
+ ret = ath10k_pci_device_reset(ar);
+ if (ret) {
+ ath10k_err("failed to reset target: %d\n", ret);
+ goto err;
+ }
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
/* Force AWAKE forever */
ath10k_do_pci_wake(ar);
ret = ath10k_pci_ce_init(ar);
- if (ret)
+ if (ret) {
+ ath10k_err("failed to initialize CE: %d\n", ret);
goto err_ps;
+ }
- ret = ath10k_pci_init_config(ar);
- if (ret)
+ ret = ath10k_ce_disable_interrupts(ar);
+ if (ret) {
+ ath10k_err("failed to disable CE interrupts: %d\n", ret);
goto err_ce;
+ }
- ret = ath10k_pci_wake_target_cpu(ar);
+ ret = ath10k_pci_init_irq(ar);
if (ret) {
- ath10k_err("could not wake up target CPU (%d)\n", ret);
+ ath10k_err("failed to init irqs: %d\n", ret);
goto err_ce;
}
+ ret = ath10k_pci_request_early_irq(ar);
+ if (ret) {
+ ath10k_err("failed to request early irq: %d\n", ret);
+ goto err_deinit_irq;
+ }
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_err("failed to wait for target to init: %d\n", ret);
+ goto err_free_early_irq;
+ }
+
+ ret = ath10k_pci_init_config(ar);
+ if (ret) {
+ ath10k_err("failed to setup init config: %d\n", ret);
+ goto err_free_early_irq;
+ }
+
+ ret = ath10k_pci_wake_target_cpu(ar);
+ if (ret) {
+ ath10k_err("could not wake up target CPU: %d\n", ret);
+ goto err_free_early_irq;
+ }
+
+ if (ar_pci->num_msi_intrs > 1)
+ irq_mode = "MSI-X";
+ else if (ar_pci->num_msi_intrs == 1)
+ irq_mode = "MSI";
+ else
+ irq_mode = "legacy";
+
+ if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
+ ath10k_info("pci irq %s\n", irq_mode);
+
return 0;
+err_free_early_irq:
+ ath10k_pci_free_early_irq(ar);
+err_deinit_irq:
+ ath10k_pci_deinit_irq(ar);
err_ce:
ath10k_pci_ce_deinit(ar);
+ ath10k_pci_device_reset(ar);
err_ps:
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_sleep(ar);
-err_irq:
- ath10k_pci_stop_intr(ar);
err:
return ret;
}
@@ -1887,7 +2091,10 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- ath10k_pci_stop_intr(ar);
+ ath10k_pci_free_early_irq(ar);
+ ath10k_pci_kill_tasklet(ar);
+ ath10k_pci_deinit_irq(ar);
+ ath10k_pci_device_reset(ar);
ath10k_pci_ce_deinit(ar);
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
@@ -2023,25 +2230,10 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (ar_pci->num_msi_intrs == 0) {
- /*
- * IMPORTANT: INTR_CLR regiser has to be set after
- * INTR_ENABLE is set to 0, otherwise interrupt can not be
- * really cleared.
- */
- iowrite32(0, ar_pci->mem +
- (SOC_CORE_BASE_ADDRESS |
- PCIE_INTR_ENABLE_ADDRESS));
- iowrite32(PCIE_INTR_FIRMWARE_MASK |
- PCIE_INTR_CE_MASK_ALL,
- ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
- PCIE_INTR_CLR_ADDRESS));
- /*
- * IMPORTANT: this extra read transaction is required to
- * flush the posted write buffer.
- */
- (void) ioread32(ar_pci->mem +
- (SOC_CORE_BASE_ADDRESS |
- PCIE_INTR_ENABLE_ADDRESS));
+ if (!ath10k_pci_irq_pending(ar))
+ return IRQ_NONE;
+
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
}
tasklet_schedule(&ar_pci->intr_tq);
@@ -2049,6 +2241,34 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+static void ath10k_pci_early_irq_tasklet(unsigned long data)
+{
+ struct ath10k *ar = (struct ath10k *)data;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ u32 fw_ind;
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_warn("failed to wake target in early irq tasklet: %d\n",
+ ret);
+ return;
+ }
+
+ fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
+ if (fw_ind & FW_IND_EVENT_PENDING) {
+ ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
+ fw_ind & ~FW_IND_EVENT_PENDING);
+
+ /* Some structures are unavailable during early boot or at
+ * driver teardown so just print that the device has crashed. */
+ ath10k_warn("device crashed - no diagnostics available\n");
+ }
+
+ ath10k_pci_sleep(ar);
+ ath10k_pci_enable_legacy_irq(ar);
+}
+
static void ath10k_pci_tasklet(unsigned long data)
{
struct ath10k *ar = (struct ath10k *)data;
@@ -2057,40 +2277,22 @@ static void ath10k_pci_tasklet(unsigned long data)
ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
ath10k_ce_per_engine_service_any(ar);
- if (ar_pci->num_msi_intrs == 0) {
- /* Enable Legacy PCI line interrupts */
- iowrite32(PCIE_INTR_FIRMWARE_MASK |
- PCIE_INTR_CE_MASK_ALL,
- ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
- PCIE_INTR_ENABLE_ADDRESS));
- /*
- * IMPORTANT: this extra read transaction is required to
- * flush the posted write buffer
- */
- (void) ioread32(ar_pci->mem +
- (SOC_CORE_BASE_ADDRESS |
- PCIE_INTR_ENABLE_ADDRESS));
- }
+ /* Re-enable legacy irq that was disabled in the irq handler */
+ if (ar_pci->num_msi_intrs == 0)
+ ath10k_pci_enable_legacy_irq(ar);
}
-static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
+static int ath10k_pci_request_irq_msix(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ret;
- int i;
-
- ret = pci_enable_msi_block(ar_pci->pdev, num);
- if (ret)
- return ret;
+ int ret, i;
ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
ath10k_pci_msi_fw_handler,
IRQF_SHARED, "ath10k_pci", ar);
if (ret) {
- ath10k_warn("request_irq(%d) failed %d\n",
+ ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
-
- pci_disable_msi(ar_pci->pdev);
return ret;
}
@@ -2099,44 +2301,38 @@ static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
ath10k_pci_per_engine_handler,
IRQF_SHARED, "ath10k_pci", ar);
if (ret) {
- ath10k_warn("request_irq(%d) failed %d\n",
+ ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
ar_pci->pdev->irq + i, ret);
for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
free_irq(ar_pci->pdev->irq + i, ar);
free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
- pci_disable_msi(ar_pci->pdev);
return ret;
}
}
- ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
return 0;
}
-static int ath10k_pci_start_intr_msi(struct ath10k *ar)
+static int ath10k_pci_request_irq_msi(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
- ret = pci_enable_msi(ar_pci->pdev);
- if (ret < 0)
- return ret;
-
ret = request_irq(ar_pci->pdev->irq,
ath10k_pci_interrupt_handler,
IRQF_SHARED, "ath10k_pci", ar);
- if (ret < 0) {
- pci_disable_msi(ar_pci->pdev);
+ if (ret) {
+ ath10k_warn("failed to request MSI irq %d: %d\n",
+ ar_pci->pdev->irq, ret);
return ret;
}
- ath10k_info("MSI interrupt handling\n");
return 0;
}
-static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
+static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
@@ -2144,112 +2340,165 @@ static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
ret = request_irq(ar_pci->pdev->irq,
ath10k_pci_interrupt_handler,
IRQF_SHARED, "ath10k_pci", ar);
- if (ret < 0)
+ if (ret) {
+ ath10k_warn("failed to request legacy irq %d: %d\n",
+ ar_pci->pdev->irq, ret);
return ret;
+ }
- /*
- * Make sure to wake the Target before enabling Legacy
- * Interrupt.
- */
- iowrite32(PCIE_SOC_WAKE_V_MASK,
- ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
- PCIE_SOC_WAKE_ADDRESS);
+ return 0;
+}
+
+static int ath10k_pci_request_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- ath10k_pci_wait(ar);
+ switch (ar_pci->num_msi_intrs) {
+ case 0:
+ return ath10k_pci_request_irq_legacy(ar);
+ case 1:
+ return ath10k_pci_request_irq_msi(ar);
+ case MSI_NUM_REQUEST:
+ return ath10k_pci_request_irq_msix(ar);
+ }
- /*
- * A potential race occurs here: The CORE_BASE write
- * depends on target correctly decoding AXI address but
- * host won't know when target writes BAR to CORE_CTRL.
- * This write might get lost if target has NOT written BAR.
- * For now, fix the race by repeating the write in below
- * synchronization checking.
- */
- iowrite32(PCIE_INTR_FIRMWARE_MASK |
- PCIE_INTR_CE_MASK_ALL,
- ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
- PCIE_INTR_ENABLE_ADDRESS));
- iowrite32(PCIE_SOC_WAKE_RESET,
- ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
- PCIE_SOC_WAKE_ADDRESS);
-
- ath10k_info("legacy interrupt handling\n");
- return 0;
+ ath10k_warn("unknown irq configuration upon request\n");
+ return -EINVAL;
}
-static int ath10k_pci_start_intr(struct ath10k *ar)
+static void ath10k_pci_free_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int i;
+
+ /* There's at least one interrupt irregardless whether its legacy INTR
+ * or MSI or MSI-X */
+ for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
+ free_irq(ar_pci->pdev->irq + i, ar);
+}
+
+static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int num = MSI_NUM_REQUEST;
- int ret;
int i;
- tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
+ tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
- (unsigned long) ar);
+ (unsigned long)ar);
+ tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
+ (unsigned long)ar);
for (i = 0; i < CE_COUNT; i++) {
ar_pci->pipe_info[i].ar_pci = ar_pci;
- tasklet_init(&ar_pci->pipe_info[i].intr,
- ath10k_pci_ce_tasklet,
+ tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
(unsigned long)&ar_pci->pipe_info[i]);
}
+}
+
+static int ath10k_pci_init_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
+ ar_pci->features);
+ int ret;
- if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
- num = 1;
+ ath10k_pci_init_irq_tasklets(ar);
- if (num > 1) {
- ret = ath10k_pci_start_intr_msix(ar, num);
+ if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
+ !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
+ ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
+
+ /* Try MSI-X */
+ if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
+ ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
+ ret = pci_enable_msi_block(ar_pci->pdev, ar_pci->num_msi_intrs);
if (ret == 0)
- goto exit;
+ return 0;
+ if (ret > 0)
+ pci_disable_msi(ar_pci->pdev);
- ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
- num = 1;
+ /* fall-through */
}
- if (num == 1) {
- ret = ath10k_pci_start_intr_msi(ar);
+ /* Try MSI */
+ if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
+ ar_pci->num_msi_intrs = 1;
+ ret = pci_enable_msi(ar_pci->pdev);
if (ret == 0)
- goto exit;
+ return 0;
- ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
- ret);
- num = 0;
+ /* fall-through */
}
- ret = ath10k_pci_start_intr_legacy(ar);
+ /* Try legacy irq
+ *
+ * A potential race occurs here: The CORE_BASE write
+ * depends on target correctly decoding AXI address but
+ * host won't know when target writes BAR to CORE_CTRL.
+ * This write might get lost if target has NOT written BAR.
+ * For now, fix the race by repeating the write in below
+ * synchronization checking. */
+ ar_pci->num_msi_intrs = 0;
-exit:
- ar_pci->num_msi_intrs = num;
- ar_pci->ce_count = CE_COUNT;
- return ret;
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_warn("failed to wake target: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+ ath10k_pci_sleep(ar);
+
+ return 0;
}
-static void ath10k_pci_stop_intr(struct ath10k *ar)
+static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
+ int ret;
- /* There's at least one interrupt irregardless whether its legacy INTR
- * or MSI or MSI-X */
- for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
- free_irq(ar_pci->pdev->irq + i, ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_warn("failed to wake target: %d\n", ret);
+ return ret;
+ }
- if (ar_pci->num_msi_intrs > 0)
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+ 0);
+ ath10k_pci_sleep(ar);
+
+ return 0;
+}
+
+static int ath10k_pci_deinit_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ switch (ar_pci->num_msi_intrs) {
+ case 0:
+ return ath10k_pci_deinit_irq_legacy(ar);
+ case 1:
+ /* fall-through */
+ case MSI_NUM_REQUEST:
pci_disable_msi(ar_pci->pdev);
+ return 0;
+ }
+
+ ath10k_warn("unknown irq configuration upon deinit\n");
+ return -EINVAL;
}
-static int ath10k_pci_reset_target(struct ath10k *ar)
+static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int wait_limit = 300; /* 3 sec */
+ int ret;
- /* Wait for Target to finish initialization before we proceed. */
- iowrite32(PCIE_SOC_WAKE_V_MASK,
- ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
- PCIE_SOC_WAKE_ADDRESS);
-
- ath10k_pci_wait(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_err("failed to wake up target: %d\n", ret);
+ return ret;
+ }
while (wait_limit-- &&
!(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
@@ -2264,34 +2513,26 @@ static int ath10k_pci_reset_target(struct ath10k *ar)
}
if (wait_limit < 0) {
- ath10k_err("Target stalled\n");
- iowrite32(PCIE_SOC_WAKE_RESET,
- ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
- PCIE_SOC_WAKE_ADDRESS);
- return -EIO;
+ ath10k_err("target stalled\n");
+ ret = -EIO;
+ goto out;
}
- iowrite32(PCIE_SOC_WAKE_RESET,
- ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
- PCIE_SOC_WAKE_ADDRESS);
-
- return 0;
+out:
+ ath10k_pci_sleep(ar);
+ return ret;
}
-static void ath10k_pci_device_reset(struct ath10k *ar)
+static int ath10k_pci_device_reset(struct ath10k *ar)
{
- int i;
+ int i, ret;
u32 val;
- if (!SOC_GLOBAL_RESET_ADDRESS)
- return;
-
- ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
- PCIE_SOC_WAKE_V_MASK);
- for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
- if (ath10k_pci_target_is_awake(ar))
- break;
- msleep(1);
+ ret = ath10k_do_pci_wake(ar);
+ if (ret) {
+ ath10k_err("failed to wake up target: %d\n",
+ ret);
+ return ret;
}
/* Put Target, including PCIe, into RESET. */
@@ -2317,7 +2558,8 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
msleep(1);
}
- ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
+ ath10k_do_pci_sleep(ar);
+ return 0;
}
static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
@@ -2374,7 +2616,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
if (!ar) {
- ath10k_err("ath10k_core_create failed!\n");
+ ath10k_err("failed to create driver core\n");
ret = -EINVAL;
goto err_ar_pci;
}
@@ -2393,20 +2635,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
*/
ret = pci_assign_resource(pdev, BAR_NUM);
if (ret) {
- ath10k_err("cannot assign PCI space: %d\n", ret);
+ ath10k_err("failed to assign PCI space: %d\n", ret);
goto err_ar;
}
ret = pci_enable_device(pdev);
if (ret) {
- ath10k_err("cannot enable PCI device: %d\n", ret);
+ ath10k_err("failed to enable PCI device: %d\n", ret);
goto err_ar;
}
/* Request MMIO resources */
ret = pci_request_region(pdev, BAR_NUM, "ath");
if (ret) {
- ath10k_err("PCI MMIO reservation error: %d\n", ret);
+ ath10k_err("failed to request MMIO region: %d\n", ret);
goto err_device;
}
@@ -2416,13 +2658,13 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
*/
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
- ath10k_err("32-bit DMA not available: %d\n", ret);
+ ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
goto err_region;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
- ath10k_err("cannot enable 32-bit consistent DMA\n");
+ ath10k_err("failed to set consistent DMA mask to 32-bit\n");
goto err_region;
}
@@ -2439,7 +2681,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
/* Arrange for access to Target SoC registers. */
mem = pci_iomap(pdev, BAR_NUM, 0);
if (!mem) {
- ath10k_err("PCI iomap error\n");
+ ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
ret = -EIO;
goto err_master;
}
@@ -2451,11 +2693,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ret = ath10k_do_pci_wake(ar);
if (ret) {
ath10k_err("Failed to get chip id: %d\n", ret);
- return ret;
+ goto err_iomap;
}
- chip_id = ath10k_pci_read32(ar,
- RTC_SOC_BASE_ADDRESS + SOC_CHIP_ID_ADDRESS);
+ chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
ath10k_do_pci_sleep(ar);
@@ -2463,7 +2704,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ret = ath10k_core_register(ar, chip_id);
if (ret) {
- ath10k_err("could not register driver core (%d)\n", ret);
+ ath10k_err("failed to register driver core: %d\n", ret);
goto err_iomap;
}
@@ -2529,7 +2770,7 @@ static int __init ath10k_pci_init(void)
ret = pci_register_driver(&ath10k_pci_driver);
if (ret)
- ath10k_err("pci_register_driver failed [%d]\n", ret);
+ ath10k_err("failed to register PCI driver: %d\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 52fb7b973571..a4f32038c440 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -198,9 +198,7 @@ struct ath10k_pci {
struct tasklet_struct intr_tq;
struct tasklet_struct msi_fw_err;
-
- /* Number of Copy Engines supported */
- unsigned int ce_count;
+ struct tasklet_struct early_irq_tasklet;
int started;
@@ -318,6 +316,16 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
return ioread32(ar_pci->mem + offset);
}
+static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
+{
+ return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
+}
+
+static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
+}
+
int ath10k_do_pci_wake(struct ath10k *ar);
void ath10k_do_pci_sleep(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 90817ddc92ba..4eb2ecbc06ef 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -182,6 +182,27 @@ TRACE_EVENT(ath10k_htt_stats,
)
);
+TRACE_EVENT(ath10k_wmi_dbglog,
+ TP_PROTO(void *buf, size_t buf_len),
+
+ TP_ARGS(buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "len %zu",
+ __entry->buf_len
+ )
+);
+
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 5ae373a1e294..74f45fa6f428 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -75,6 +75,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
ath10k_report_offchan_tx(htt->ar, msdu);
info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
if (tx_done->discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
@@ -183,7 +184,7 @@ static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
/* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
TODO check this */
mcs = (info2 >> 4) & 0x0F;
- nss = (info1 >> 10) & 0x07;
+ nss = ((info1 >> 10) & 0x07) + 1;
bw = info1 & 3;
sgi = info2 & 1;
@@ -230,12 +231,15 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
~IEEE80211_FCTL_PROTECTED);
}
- if (info->status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
+ if (info->mic_err)
status->flag |= RX_FLAG_MMIC_ERROR;
if (info->fcs_err)
status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (info->amsdu_more)
+ status->flag |= RX_FLAG_AMSDU_MORE;
+
status->signal = info->signal;
spin_lock_bh(&ar->data_lock);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index ccf3597fd9e2..712a606a080a 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -16,6 +16,7 @@
*/
#include <linux/skbuff.h>
+#include <linux/ctype.h>
#include "core.h"
#include "htc.h"
@@ -674,10 +675,8 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
/* Send the management frame buffer to the target */
ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
- if (ret) {
- dev_kfree_skb_any(skb);
+ if (ret)
return ret;
- }
/* TODO: report tx status to mac80211 - temporary just ACK */
info->flags |= IEEE80211_TX_STAT_ACK;
@@ -877,6 +876,7 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
struct wmi_mgmt_rx_event_v2 *ev_v2;
struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_channel *ch;
struct ieee80211_hdr *hdr;
u32 rx_status;
u32 channel;
@@ -909,6 +909,11 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ATH10K_DBG_MGMT,
"event mgmt rx status %08x\n", rx_status);
+ if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
dev_kfree_skb(skb);
return 0;
@@ -924,7 +929,25 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
if (rx_status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
- status->band = phy_mode_to_band(phy_mode);
+ /* HW can Rx CCK rates on 5GHz. In that case phy_mode is set to
+ * MODE_11B. This means phy_mode is not a reliable source for the band
+ * of mgmt rx. */
+
+ ch = ar->scan_channel;
+ if (!ch)
+ ch = ar->rx_channel;
+
+ if (ch) {
+ status->band = ch->band;
+
+ if (phy_mode == MODE_11B &&
+ status->band == IEEE80211_BAND_5GHZ)
+ ath10k_dbg(ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
+ } else {
+ ath10k_warn("using (unreliable) phy_mode to extract band for mgmt rx\n");
+ status->band = phy_mode_to_band(phy_mode);
+ }
+
status->freq = ieee80211_channel_to_frequency(channel, status->band);
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
status->rate_idx = get_rate_idx(rate, status->band);
@@ -934,7 +957,11 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
- if (fc & IEEE80211_FCTL_PROTECTED) {
+ /* FW delivers WEP Shared Auth frame with Protected Bit set and
+ * encrypted payload. However in case of PMF it delivers decrypted
+ * frames with Protected Bit set. */
+ if (ieee80211_has_protected(hdr->frame_control) &&
+ !ieee80211_is_auth(hdr->frame_control)) {
status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED;
hdr->frame_control = __cpu_to_le16(fc &
@@ -1044,9 +1071,14 @@ static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
}
-static void ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_MESG_EVENTID\n");
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
+ skb->len);
+
+ trace_ath10k_wmi_dbglog(skb->data, skb->len);
+
+ return 0;
}
static void ath10k_wmi_event_update_stats(struct ath10k *ar,
@@ -1383,9 +1415,259 @@ static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
}
+static void ath10k_dfs_radar_report(struct ath10k *ar,
+ struct wmi_single_phyerr_rx_event *event,
+ struct phyerr_radar_report *rr,
+ u64 tsf)
+{
+ u32 reg0, reg1, tsf32l;
+ struct pulse_event pe;
+ u64 tsf64;
+ u8 rssi, width;
+
+ reg0 = __le32_to_cpu(rr->reg0);
+ reg1 = __le32_to_cpu(rr->reg1);
+
+ ath10k_dbg(ATH10K_DBG_REGULATORY,
+ "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
+ MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
+ MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
+ MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
+ MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
+ ath10k_dbg(ATH10K_DBG_REGULATORY,
+ "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
+ MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
+ MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
+ ath10k_dbg(ATH10K_DBG_REGULATORY,
+ "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
+ MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
+
+ if (!ar->dfs_detector)
+ return;
+
+ /* report event to DFS pattern detector */
+ tsf32l = __le32_to_cpu(event->hdr.tsf_timestamp);
+ tsf64 = tsf & (~0xFFFFFFFFULL);
+ tsf64 |= tsf32l;
+
+ width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
+ rssi = event->hdr.rssi_combined;
+
+ /* hardware store this as 8 bit signed value,
+ * set to zero if negative number
+ */
+ if (rssi & 0x80)
+ rssi = 0;
+
+ pe.ts = tsf64;
+ pe.freq = ar->hw->conf.chandef.chan->center_freq;
+ pe.width = width;
+ pe.rssi = rssi;
+
+ ath10k_dbg(ATH10K_DBG_REGULATORY,
+ "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
+ pe.freq, pe.width, pe.rssi, pe.ts);
+
+ ATH10K_DFS_STAT_INC(ar, pulses_detected);
+
+ if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) {
+ ath10k_dbg(ATH10K_DBG_REGULATORY,
+ "dfs no pulse pattern detected, yet\n");
+ return;
+ }
+
+ ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs radar detected\n");
+ ATH10K_DFS_STAT_INC(ar, radar_detected);
+
+ /* Control radar events reporting in debugfs file
+ dfs_block_radar_events */
+ if (ar->dfs_block_radar_events) {
+ ath10k_info("DFS Radar detected, but ignored as requested\n");
+ return;
+ }
+
+ ieee80211_radar_detected(ar->hw);
+}
+
+static int ath10k_dfs_fft_report(struct ath10k *ar,
+ struct wmi_single_phyerr_rx_event *event,
+ struct phyerr_fft_report *fftr,
+ u64 tsf)
+{
+ u32 reg0, reg1;
+ u8 rssi, peak_mag;
+
+ reg0 = __le32_to_cpu(fftr->reg0);
+ reg1 = __le32_to_cpu(fftr->reg1);
+ rssi = event->hdr.rssi_combined;
+
+ ath10k_dbg(ATH10K_DBG_REGULATORY,
+ "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
+ MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
+ MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
+ MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
+ MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
+ ath10k_dbg(ATH10K_DBG_REGULATORY,
+ "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
+ MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
+ MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
+ MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
+ MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
+
+ peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
+
+ /* false event detection */
+ if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
+ peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
+ ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
+ ATH10K_DFS_STAT_INC(ar, pulses_discarded);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ath10k_wmi_event_dfs(struct ath10k *ar,
+ struct wmi_single_phyerr_rx_event *event,
+ u64 tsf)
+{
+ int buf_len, tlv_len, res, i = 0;
+ struct phyerr_tlv *tlv;
+ struct phyerr_radar_report *rr;
+ struct phyerr_fft_report *fftr;
+ u8 *tlv_buf;
+
+ buf_len = __le32_to_cpu(event->hdr.buf_len);
+ ath10k_dbg(ATH10K_DBG_REGULATORY,
+ "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
+ event->hdr.phy_err_code, event->hdr.rssi_combined,
+ __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len);
+
+ /* Skip event if DFS disabled */
+ if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
+ return;
+
+ ATH10K_DFS_STAT_INC(ar, pulses_total);
+
+ while (i < buf_len) {
+ if (i + sizeof(*tlv) > buf_len) {
+ ath10k_warn("too short buf for tlv header (%d)\n", i);
+ return;
+ }
+
+ tlv = (struct phyerr_tlv *)&event->bufp[i];
+ tlv_len = __le16_to_cpu(tlv->len);
+ tlv_buf = &event->bufp[i + sizeof(*tlv)];
+ ath10k_dbg(ATH10K_DBG_REGULATORY,
+ "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
+ tlv_len, tlv->tag, tlv->sig);
+
+ switch (tlv->tag) {
+ case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
+ if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
+ ath10k_warn("too short radar pulse summary (%d)\n",
+ i);
+ return;
+ }
+
+ rr = (struct phyerr_radar_report *)tlv_buf;
+ ath10k_dfs_radar_report(ar, event, rr, tsf);
+ break;
+ case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
+ if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
+ ath10k_warn("too short fft report (%d)\n", i);
+ return;
+ }
+
+ fftr = (struct phyerr_fft_report *)tlv_buf;
+ res = ath10k_dfs_fft_report(ar, event, fftr, tsf);
+ if (res)
+ return;
+ break;
+ }
+
+ i += sizeof(*tlv) + tlv_len;
+ }
+}
+
+static void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
+ struct wmi_single_phyerr_rx_event *event,
+ u64 tsf)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi event spectral scan\n");
+}
+
static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_PHYERR_EVENTID\n");
+ struct wmi_comb_phyerr_rx_event *comb_event;
+ struct wmi_single_phyerr_rx_event *event;
+ u32 count, i, buf_len, phy_err_code;
+ u64 tsf;
+ int left_len = skb->len;
+
+ ATH10K_DFS_STAT_INC(ar, phy_errors);
+
+ /* Check if combined event available */
+ if (left_len < sizeof(*comb_event)) {
+ ath10k_warn("wmi phyerr combined event wrong len\n");
+ return;
+ }
+
+ left_len -= sizeof(*comb_event);
+
+ /* Check number of included events */
+ comb_event = (struct wmi_comb_phyerr_rx_event *)skb->data;
+ count = __le32_to_cpu(comb_event->hdr.num_phyerr_events);
+
+ tsf = __le32_to_cpu(comb_event->hdr.tsf_u32);
+ tsf <<= 32;
+ tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi event phyerr count %d tsf64 0x%llX\n",
+ count, tsf);
+
+ event = (struct wmi_single_phyerr_rx_event *)comb_event->bufp;
+ for (i = 0; i < count; i++) {
+ /* Check if we can read event header */
+ if (left_len < sizeof(*event)) {
+ ath10k_warn("single event (%d) wrong head len\n", i);
+ return;
+ }
+
+ left_len -= sizeof(*event);
+
+ buf_len = __le32_to_cpu(event->hdr.buf_len);
+ phy_err_code = event->hdr.phy_err_code;
+
+ if (left_len < buf_len) {
+ ath10k_warn("single event (%d) wrong buf len\n", i);
+ return;
+ }
+
+ left_len -= buf_len;
+
+ switch (phy_err_code) {
+ case PHY_ERROR_RADAR:
+ ath10k_wmi_event_dfs(ar, event, tsf);
+ break;
+ case PHY_ERROR_SPECTRAL_SCAN:
+ ath10k_wmi_event_spectral_scan(ar, event, tsf);
+ break;
+ case PHY_ERROR_FALSE_RADAR_EXT:
+ ath10k_wmi_event_dfs(ar, event, tsf);
+ ath10k_wmi_event_spectral_scan(ar, event, tsf);
+ break;
+ default:
+ break;
+ }
+
+ event += sizeof(*event) + buf_len;
+ }
}
static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
@@ -1400,9 +1682,37 @@ static void ath10k_wmi_event_profile_match(struct ath10k *ar,
}
static void ath10k_wmi_event_debug_print(struct ath10k *ar,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_PRINT_EVENTID\n");
+ char buf[101], c;
+ int i;
+
+ for (i = 0; i < sizeof(buf) - 1; i++) {
+ if (i >= skb->len)
+ break;
+
+ c = skb->data[i];
+
+ if (c == '\0')
+ break;
+
+ if (isascii(c) && isprint(c))
+ buf[i] = c;
+ else
+ buf[i] = '.';
+ }
+
+ if (i == sizeof(buf) - 1)
+ ath10k_warn("wmi debug print truncated: %d\n", skb->len);
+
+ /* for some reason the debug prints end with \n, remove that */
+ if (skb->data[i - 1] == '\n')
+ i--;
+
+ /* the last byte is always reserved for the null character */
+ buf[i] = '\0';
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf);
}
static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
@@ -2062,6 +2372,7 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
{
struct wmi_set_channel_cmd *cmd;
struct sk_buff *skb;
+ u32 ch_flags = 0;
if (arg->passive)
return -EINVAL;
@@ -2070,10 +2381,14 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
if (!skb)
return -ENOMEM;
+ if (arg->chan_radar)
+ ch_flags |= WMI_CHAN_FLAG_DFS;
+
cmd = (struct wmi_set_channel_cmd *)skb->data;
cmd->chan.mhz = __cpu_to_le32(arg->freq);
cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq);
cmd->chan.mode = arg->mode;
+ cmd->chan.flags |= __cpu_to_le32(ch_flags);
cmd->chan.min_power = arg->min_power;
cmd->chan.max_power = arg->max_power;
cmd->chan.reg_power = arg->max_reg_power;
@@ -2211,7 +2526,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
}
ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
- __cpu_to_le32(ar->wmi.num_mem_chunks));
+ ar->wmi.num_mem_chunks);
cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
@@ -2224,10 +2539,10 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
__cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
ath10k_dbg(ATH10K_DBG_WMI,
- "wmi chunk %d len %d requested, addr 0x%x\n",
+ "wmi chunk %d len %d requested, addr 0x%llx\n",
i,
- cmd->host_mem_chunks[i].size,
- cmd->host_mem_chunks[i].ptr);
+ ar->wmi.mem_chunks[i].len,
+ (unsigned long long)ar->wmi.mem_chunks[i].paddr);
}
out:
memcpy(&cmd->resource_config, &config, sizeof(config));
@@ -2302,7 +2617,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
}
ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
- __cpu_to_le32(ar->wmi.num_mem_chunks));
+ ar->wmi.num_mem_chunks);
cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
@@ -2315,10 +2630,10 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
__cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
ath10k_dbg(ATH10K_DBG_WMI,
- "wmi chunk %d len %d requested, addr 0x%x\n",
+ "wmi chunk %d len %d requested, addr 0x%llx\n",
i,
- cmd->host_mem_chunks[i].size,
- cmd->host_mem_chunks[i].ptr);
+ ar->wmi.mem_chunks[i].len,
+ (unsigned long long)ar->wmi.mem_chunks[i].paddr);
}
out:
memcpy(&cmd->resource_config, &config, sizeof(config));
@@ -2622,6 +2937,7 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
struct sk_buff *skb;
const char *cmdname;
u32 flags = 0;
+ u32 ch_flags = 0;
if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
@@ -2648,6 +2964,8 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
flags |= WMI_VDEV_START_HIDDEN_SSID;
if (arg->pmf_enabled)
flags |= WMI_VDEV_START_PMF_ENABLED;
+ if (arg->channel.chan_radar)
+ ch_flags |= WMI_CHAN_FLAG_DFS;
cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
@@ -2669,6 +2987,7 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
__cpu_to_le32(arg->channel.band_center_freq1);
cmd->chan.mode = arg->channel.mode;
+ cmd->chan.flags |= __cpu_to_le32(ch_flags);
cmd->chan.min_power = arg->channel.min_power;
cmd->chan.max_power = arg->channel.max_power;
cmd->chan.reg_power = arg->channel.max_reg_power;
@@ -2676,9 +2995,10 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
cmd->chan.antenna_max = arg->channel.max_antenna_gain;
ath10k_dbg(ATH10K_DBG_WMI,
- "wmi vdev %s id 0x%x freq %d, mode %d, ch_flags: 0x%0X,"
- "max_power: %d\n", cmdname, arg->vdev_id, arg->channel.freq,
- arg->channel.mode, flags, arg->channel.max_power);
+ "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, "
+ "ch_flags: 0x%0X, max_power: %d\n", cmdname, arg->vdev_id,
+ flags, arg->channel.freq, arg->channel.mode,
+ cmd->chan.flags, arg->channel.max_power);
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
@@ -3012,6 +3332,8 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
flags |= WMI_CHAN_FLAG_ALLOW_VHT;
if (ch->ht40plus)
flags |= WMI_CHAN_FLAG_HT40_PLUS;
+ if (ch->chan_radar)
+ flags |= WMI_CHAN_FLAG_DFS;
ci->mhz = __cpu_to_le32(ch->freq);
ci->band_center_freq1 = __cpu_to_le32(ch->freq);
@@ -3094,6 +3416,7 @@ int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
{
struct wmi_bcn_tx_cmd *cmd;
struct sk_buff *skb;
+ int ret;
skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len);
if (!skb)
@@ -3106,7 +3429,11 @@ int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len);
memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
- return ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid);
+ ret = ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid);
+ if (ret)
+ dev_kfree_skb(skb);
+
+ return ret;
}
static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
@@ -3175,3 +3502,40 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,
type, delay_ms);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
}
+
+int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
+{
+ struct wmi_dbglog_cfg_cmd *cmd;
+ struct sk_buff *skb;
+ u32 cfg;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
+
+ if (module_enable) {
+ cfg = SM(ATH10K_DBGLOG_LEVEL_VERBOSE,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ } else {
+ /* set back defaults, all modules with WARN level */
+ cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ module_enable = ~0;
+ }
+
+ cmd->module_enable = __cpu_to_le32(module_enable);
+ cmd->module_valid = __cpu_to_le32(~0);
+ cmd->config_enable = __cpu_to_le32(cfg);
+ cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
+ __le32_to_cpu(cmd->module_enable),
+ __le32_to_cpu(cmd->module_valid),
+ __le32_to_cpu(cmd->config_enable),
+ __le32_to_cpu(cmd->config_valid));
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 78c991aec7f9..4b5e7d3d32b6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -893,6 +893,7 @@ struct wmi_channel {
union {
__le32 reginfo0;
struct {
+ /* note: power unit is 0.5 dBm */
u8 min_power;
u8 max_power;
u8 reg_power;
@@ -915,7 +916,8 @@ struct wmi_channel_arg {
bool allow_ht;
bool allow_vht;
bool ht40plus;
- /* note: power unit is 1/4th of dBm */
+ bool chan_radar;
+ /* note: power unit is 0.5 dBm */
u32 min_power;
u32 max_power;
u32 max_reg_power;
@@ -1977,6 +1979,10 @@ struct wmi_mgmt_rx_event_v2 {
#define WMI_RX_STATUS_ERR_MIC 0x10
#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
+#define PHY_ERROR_SPECTRAL_SCAN 0x26
+#define PHY_ERROR_FALSE_RADAR_EXT 0x24
+#define PHY_ERROR_RADAR 0x05
+
struct wmi_single_phyerr_rx_hdr {
/* TSF timestamp */
__le32 tsf_timestamp;
@@ -2068,6 +2074,87 @@ struct wmi_comb_phyerr_rx_event {
u8 bufp[0];
} __packed;
+#define PHYERR_TLV_SIG 0xBB
+#define PHYERR_TLV_TAG_SEARCH_FFT_REPORT 0xFB
+#define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY 0xF8
+
+struct phyerr_radar_report {
+ __le32 reg0; /* RADAR_REPORT_REG0_* */
+ __le32 reg1; /* REDAR_REPORT_REG1_* */
+} __packed;
+
+#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_MASK 0x80000000
+#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_LSB 31
+
+#define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_MASK 0x40000000
+#define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_LSB 30
+
+#define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_MASK 0x3FF00000
+#define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_LSB 20
+
+#define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_MASK 0x000F0000
+#define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_LSB 16
+
+#define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_MASK 0x0000FC00
+#define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_LSB 10
+
+#define RADAR_REPORT_REG0_PULSE_SIDX_MASK 0x000003FF
+#define RADAR_REPORT_REG0_PULSE_SIDX_LSB 0
+
+#define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_MASK 0x80000000
+#define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_LSB 31
+
+#define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_MASK 0x7F000000
+#define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_LSB 24
+
+#define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_MASK 0x00FF0000
+#define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_LSB 16
+
+#define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_MASK 0x0000FF00
+#define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_LSB 8
+
+#define RADAR_REPORT_REG1_PULSE_DUR_MASK 0x000000FF
+#define RADAR_REPORT_REG1_PULSE_DUR_LSB 0
+
+struct phyerr_fft_report {
+ __le32 reg0; /* SEARCH_FFT_REPORT_REG0_ * */
+ __le32 reg1; /* SEARCH_FFT_REPORT_REG1_ * */
+} __packed;
+
+#define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_MASK 0xFF800000
+#define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_LSB 23
+
+#define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_MASK 0x007FC000
+#define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_LSB 14
+
+#define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_MASK 0x00003000
+#define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_LSB 12
+
+#define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_MASK 0x00000FFF
+#define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_LSB 0
+
+#define SEARCH_FFT_REPORT_REG1_RELPWR_DB_MASK 0xFC000000
+#define SEARCH_FFT_REPORT_REG1_RELPWR_DB_LSB 26
+
+#define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_MASK 0x03FC0000
+#define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_LSB 18
+
+#define SEARCH_FFT_REPORT_REG1_PEAK_MAG_MASK 0x0003FF00
+#define SEARCH_FFT_REPORT_REG1_PEAK_MAG_LSB 8
+
+#define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_MASK 0x000000FF
+#define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_LSB 0
+
+
+struct phyerr_tlv {
+ __le16 len;
+ u8 tag;
+ u8 sig;
+} __packed;
+
+#define DFS_RSSI_POSSIBLY_FALSE 50
+#define DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE 40
+
struct wmi_mgmt_tx_hdr {
__le32 vdev_id;
struct wmi_mac_addr peer_macaddr;
@@ -2233,7 +2320,12 @@ enum wmi_pdev_param {
* 0: no protection 1:use CTS-to-self 2: use RTS/CTS
*/
WMI_PDEV_PARAM_PROTECTION_MODE,
- /* Dynamic bandwidth 0: disable 1: enable */
+ /*
+ * Dynamic bandwidth - 0: disable, 1: enable
+ *
+ * When enabled HW rate control tries different bandwidths when
+ * retransmitting frames.
+ */
WMI_PDEV_PARAM_DYNAMIC_BW,
/* Non aggregrate/ 11g sw retry threshold.0-disable */
WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
@@ -2911,6 +3003,18 @@ struct wmi_vdev_install_key_arg {
const void *key_data;
};
+/*
+ * vdev fixed rate format:
+ * - preamble - b7:b6 - see WMI_RATE_PREMABLE_
+ * - nss - b5:b4 - ss number (0 mean 1ss)
+ * - rate_mcs - b3:b0 - as below
+ * CCK: 0 - 11Mbps, 1 - 5,5Mbps, 2 - 2Mbps, 3 - 1Mbps,
+ * 4 - 11Mbps (s), 5 - 5,5Mbps (s), 6 - 2Mbps (s)
+ * OFDM: 0 - 48Mbps, 1 - 24Mbps, 2 - 12Mbps, 3 - 6Mbps,
+ * 4 - 54Mbps, 5 - 36Mbps, 6 - 18Mbps, 7 - 9Mbps
+ * HT/VHT: MCS index
+ */
+
/* Preamble types to be used with VDEV fixed rate configuration */
enum wmi_rate_preamble {
WMI_RATE_PREAMBLE_OFDM,
@@ -3998,6 +4102,54 @@ struct wmi_force_fw_hang_cmd {
__le32 delay_ms;
} __packed;
+enum ath10k_dbglog_level {
+ ATH10K_DBGLOG_LEVEL_VERBOSE = 0,
+ ATH10K_DBGLOG_LEVEL_INFO = 1,
+ ATH10K_DBGLOG_LEVEL_WARN = 2,
+ ATH10K_DBGLOG_LEVEL_ERR = 3,
+};
+
+/* VAP ids to enable dbglog */
+#define ATH10K_DBGLOG_CFG_VAP_LOG_LSB 0
+#define ATH10K_DBGLOG_CFG_VAP_LOG_MASK 0x0000ffff
+
+/* to enable dbglog in the firmware */
+#define ATH10K_DBGLOG_CFG_REPORTING_ENABLE_LSB 16
+#define ATH10K_DBGLOG_CFG_REPORTING_ENABLE_MASK 0x00010000
+
+/* timestamp resolution */
+#define ATH10K_DBGLOG_CFG_RESOLUTION_LSB 17
+#define ATH10K_DBGLOG_CFG_RESOLUTION_MASK 0x000E0000
+
+/* number of queued messages before sending them to the host */
+#define ATH10K_DBGLOG_CFG_REPORT_SIZE_LSB 20
+#define ATH10K_DBGLOG_CFG_REPORT_SIZE_MASK 0x0ff00000
+
+/*
+ * Log levels to enable. This defines the minimum level to enable, this is
+ * not a bitmask. See enum ath10k_dbglog_level for the values.
+ */
+#define ATH10K_DBGLOG_CFG_LOG_LVL_LSB 28
+#define ATH10K_DBGLOG_CFG_LOG_LVL_MASK 0x70000000
+
+/*
+ * Note: this is a cleaned up version of a struct firmware uses. For
+ * example, config_valid was hidden inside an array.
+ */
+struct wmi_dbglog_cfg_cmd {
+ /* bitmask to hold mod id config*/
+ __le32 module_enable;
+
+ /* see ATH10K_DBGLOG_CFG_ */
+ __le32 config_enable;
+
+ /* mask of module id bits to be changed */
+ __le32 module_valid;
+
+ /* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */
+ __le32 config_valid;
+} __packed;
+
#define ATH10K_RTS_MAX 2347
#define ATH10K_FRAGMT_THRESHOLD_MIN 540
#define ATH10K_FRAGMT_THRESHOLD_MAX 2346
@@ -4075,5 +4227,6 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
int ath10k_wmi_force_fw_hang(struct ath10k *ar,
enum wmi_force_fw_hang_type type, u32 delay_ms);
int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable);
#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 69f58b073e85..ef35da84f63b 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1238,14 +1238,11 @@ static void
ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
struct ieee80211_rx_status *rxs)
{
- struct ath_common *common = ath5k_hw_common(ah);
u64 tsf, bc_tstamp;
u32 hw_tu;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
- if (ieee80211_is_beacon(mgmt->frame_control) &&
- le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
- ether_addr_equal(mgmt->bssid, common->curbssid)) {
+ if (le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS) {
/*
* Received an IBSS beacon with the same BSSID. Hardware *must*
* have updated the local TSF. We have to work around various
@@ -1301,23 +1298,6 @@ ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
}
}
-static void
-ath5k_update_beacon_rssi(struct ath5k_hw *ah, struct sk_buff *skb, int rssi)
-{
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
- struct ath_common *common = ath5k_hw_common(ah);
-
- /* only beacons from our BSSID */
- if (!ieee80211_is_beacon(mgmt->frame_control) ||
- !ether_addr_equal(mgmt->bssid, common->curbssid))
- return;
-
- ewma_add(&ah->ah_beacon_rssi_avg, rssi);
-
- /* in IBSS mode we should keep RSSI statistics per neighbour */
- /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
-}
-
/*
* Compute padding position. skb must contain an IEEE 802.11 frame
*/
@@ -1390,6 +1370,7 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
struct ath5k_rx_status *rs)
{
struct ieee80211_rx_status *rxs;
+ struct ath_common *common = ath5k_hw_common(ah);
ath5k_remove_padding(skb);
@@ -1442,11 +1423,13 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
trace_ath5k_rx(ah, skb);
- ath5k_update_beacon_rssi(ah, skb, rs->rs_rssi);
+ if (ath_is_mybeacon(common, (struct ieee80211_hdr *)skb->data)) {
+ ewma_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
- /* check beacons in IBSS mode */
- if (ah->opmode == NL80211_IFTYPE_ADHOC)
- ath5k_check_ibss_tsf(ah, skb, rxs);
+ /* check beacons in IBSS mode */
+ if (ah->opmode == NL80211_IFTYPE_ADHOC)
+ ath5k_check_ibss_tsf(ah, skb, rxs);
+ }
ieee80211_rx(ah->hw, skb);
}
@@ -2549,7 +2532,6 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
hw->wiphy->available_antennas_rx = 0x3;
hw->extra_tx_headroom = 2;
- hw->channel_change_time = 5000;
/*
* Mark the device as detached to avoid processing
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index ba200b24be64..e6c52f7c26e7 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -616,7 +616,16 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
* SISRs will also clear PISR so no need to worry here.
*/
- pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS;
+ /* XXX: There seems to be an issue on some cards
+ * with tx interrupt flags not being updated
+ * on PISR despite that all Tx interrupt bits
+ * are cleared on SISRs. Since we handle all
+ * Tx queues all together it shouldn't be an
+ * issue if we clear Tx interrupt flags also
+ * on PISR to avoid that.
+ */
+ pisr_clear = (pisr & ~AR5K_ISR_BITS_FROM_SISRS) |
+ (pisr & AR5K_INT_TX_ALL);
/*
* Write to clear them...
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 2437ad26949d..fd4c89df67e1 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1109,7 +1109,9 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
(mode == WMI_11G_HT20) ?
NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT);
+ mutex_lock(&vif->wdev.mtx);
cfg80211_ch_switch_notify(vif->ndev, &chandef);
+ mutex_unlock(&vif->wdev.mtx);
}
static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -3169,12 +3171,15 @@ static bool ath6kl_is_p2p_go_ssid(const u8 *buf, size_t len)
}
static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
- struct ieee80211_channel *chan, bool offchan,
- unsigned int wait, const u8 *buf, size_t len,
- bool no_cck, bool dont_wait_for_ack, u64 *cookie)
+ struct cfg80211_mgmt_tx_params *params, u64 *cookie)
{
struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
struct ath6kl *ar = ath6kl_priv(vif->ndev);
+ struct ieee80211_channel *chan = params->chan;
+ const u8 *buf = params->buf;
+ size_t len = params->len;
+ unsigned int wait = params->wait;
+ bool no_cck = params->no_cck;
u32 id, freq;
const struct ieee80211_mgmt *mgmt;
bool more_data, queued;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 546d5da0b894..4f16d79c9eb1 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -2754,9 +2754,9 @@ static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
mask->control[band].legacy << 4;
/* copy mcs rate mask */
- mcsrate = mask->control[band].mcs[1];
+ mcsrate = mask->control[band].ht_mcs[1];
mcsrate <<= 8;
- mcsrate |= mask->control[band].mcs[0];
+ mcsrate |= mask->control[band].ht_mcs[0];
ratemask[band] |= mcsrate << 12;
ratemask[band] |= mcsrate << 28;
}
@@ -2806,7 +2806,7 @@ static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
mask->control[band].legacy << 4;
/* copy mcs rate mask */
- mcsrate = mask->control[band].mcs[0];
+ mcsrate = mask->control[band].ht_mcs[0];
ratemask[band] |= mcsrate << 12;
ratemask[band] |= mcsrate << 20;
}
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 32f139e2e897..7b96b3e5712d 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -65,6 +65,14 @@ config ATH9K_DEBUGFS
Also required for changing debug message flags at run time.
+config ATH9K_STATION_STATISTICS
+ bool "Detailed station statistics"
+ depends on ATH9K && ATH9K_DEBUGFS && DEBUG_FS
+ select MAC80211_DEBUGFS
+ default n
+ ---help---
+ This option enables detailed statistics for association stations.
+
config ATH9K_DFS_CERTIFIED
bool "Atheros DFS support for certified platforms"
depends on ATH9K && CFG80211_CERTIFICATION_ONUS
@@ -86,7 +94,7 @@ config ATH9K_DFS_CERTIFIED
config ATH9K_TX99
bool "Atheros ath9k TX99 testing support"
- depends on CFG80211_CERTIFICATION_ONUS
+ depends on ATH9K_DEBUGFS && CFG80211_CERTIFICATION_ONUS
default n
---help---
Say N. This should only be enabled on systems undergoing
@@ -104,6 +112,14 @@ config ATH9K_TX99
be evaluated to meet the RF exposure limits set forth in the
governmental SAR regulations.
+config ATH9K_WOW
+ bool "Wake on Wireless LAN support (EXPERIMENTAL)"
+ depends on ATH9K && PM
+ default n
+ ---help---
+ This option enables Wake on Wireless LAN support for certain cards.
+ Currently, AR9462 is supported.
+
config ATH9K_LEGACY_RATE_CONTROL
bool "Atheros ath9k rate control"
depends on ATH9K
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 6205ef5a9321..a40e5c5d7418 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -11,11 +11,15 @@ ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_ATH9K_PCI) += pci.o
ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
-ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
-ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += \
- dfs.o
-ath9k-$(CONFIG_PM_SLEEP) += wow.o
+ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o
+ath9k-$(CONFIG_ATH9K_TX99) += tx99.o
+ath9k-$(CONFIG_ATH9K_WOW) += wow.o
+
+ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o \
+ spectral.o
+
+ath9k-$(CONFIG_ATH9K_STATION_STATISTICS) += debug_sta.o
obj-$(CONFIG_ATH9K) += ath9k.o
@@ -41,6 +45,8 @@ ath9k_hw-y:= \
ar9003_eeprom.o \
ar9003_paprd.o
+ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o
+
ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
ar9003_mci.o
obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index bd048cc69a33..a3668433dc02 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -724,14 +724,14 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
struct ath_ant_comb *antcomb = &sc->ant_comb;
int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
int curr_main_set;
- int main_rssi = rs->rs_rssi_ctl0;
- int alt_rssi = rs->rs_rssi_ctl1;
+ int main_rssi = rs->rs_rssi_ctl[0];
+ int alt_rssi = rs->rs_rssi_ctl[1];
int rx_ant_conf, main_ant_conf;
bool short_scan = false, ret;
- rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
+ rx_ant_conf = (rs->rs_rssi_ctl[2] >> ATH_ANT_RX_CURRENT_SHIFT) &
ATH_ANT_RX_MASK;
- main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
+ main_ant_conf = (rs->rs_rssi_ctl[2] >> ATH_ANT_RX_MAIN_SHIFT) &
ATH_ANT_RX_MASK;
if (alt_rssi >= antcomb->low_rssi_thresh) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 5c95fd9e9c9e..d480d2f3e185 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -32,12 +32,8 @@ static int ar9002_hw_init_mode_regs(struct ath_hw *ah)
return 0;
}
- if (ah->config.pcie_clock_req)
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_off_L1_9280);
- else
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_always_on_L1_9280);
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9280PciePhy_clkreq_always_on_L1_9280);
if (AR_SREV_9287_11_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1);
@@ -387,6 +383,20 @@ void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
}
}
+static void ar9002_hw_init_hang_checks(struct ath_hw *ah)
+{
+ if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
+ ah->config.hw_hang_checks |= HW_BB_RIFS_HANG;
+ ah->config.hw_hang_checks |= HW_BB_DFS_HANG;
+ }
+
+ if (AR_SREV_9280(ah))
+ ah->config.hw_hang_checks |= HW_BB_RX_CLEAR_STUCK_HANG;
+
+ if (AR_SREV_5416(ah) || AR_SREV_9100(ah) || AR_SREV_9160(ah))
+ ah->config.hw_hang_checks |= HW_MAC_HANG;
+}
+
/* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
int ar9002_hw_attach_ops(struct ath_hw *ah)
{
@@ -399,6 +409,7 @@ int ar9002_hw_attach_ops(struct ath_hw *ah)
return ret;
priv_ops->init_mode_gain_regs = ar9002_hw_init_mode_gain_regs;
+ priv_ops->init_hang_checks = ar9002_hw_init_hang_checks;
ops->config_pci_powersave = ar9002_hw_configpcipowersave;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 8d78253c26ce..741b38ddcb37 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -29,7 +29,8 @@ static void ar9002_hw_set_desc_link(void *ds, u32 ds_link)
((struct ath_desc*) ds)->ds_link = ds_link;
}
-static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
+ u32 *sync_cause_p)
{
u32 isr = 0;
u32 mask2 = 0;
@@ -76,9 +77,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
mask2 |= ATH9K_INT_CST;
if (isr2 & AR_ISR_S2_TSFOOR)
mask2 |= ATH9K_INT_TSFOOR;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR_S2, isr2);
+ isr &= ~AR_ISR_BCNMISC;
+ }
}
- isr = REG_READ(ah, AR_ISR_RAC);
+ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
+ isr = REG_READ(ah, AR_ISR_RAC);
+
if (isr == 0xffffffff) {
*masked = 0;
return false;
@@ -97,11 +105,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
*masked |= ATH9K_INT_TX;
- s0_s = REG_READ(ah, AR_ISR_S0_S);
+ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
+ s0_s = REG_READ(ah, AR_ISR_S0_S);
+ s1_s = REG_READ(ah, AR_ISR_S1_S);
+ } else {
+ s0_s = REG_READ(ah, AR_ISR_S0);
+ REG_WRITE(ah, AR_ISR_S0, s0_s);
+ s1_s = REG_READ(ah, AR_ISR_S1);
+ REG_WRITE(ah, AR_ISR_S1, s1_s);
+
+ isr &= ~(AR_ISR_TXOK |
+ AR_ISR_TXDESC |
+ AR_ISR_TXERR |
+ AR_ISR_TXEOL);
+ }
+
ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
-
- s1_s = REG_READ(ah, AR_ISR_S1_S);
ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
}
@@ -114,13 +134,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
*masked |= mask2;
}
- if (AR_SREV_9100(ah))
- return true;
-
- if (isr & AR_ISR_GENTMR) {
+ if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
u32 s5_s;
- s5_s = REG_READ(ah, AR_ISR_S5_S);
+ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
+ s5_s = REG_READ(ah, AR_ISR_S5_S);
+ } else {
+ s5_s = REG_READ(ah, AR_ISR_S5);
+ }
+
ah->intr_gen_timer_trigger =
MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
@@ -133,10 +155,24 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
*masked |= ATH9K_INT_TIM_TIMER;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR_S5, s5_s);
+ isr &= ~AR_ISR_GENTMR;
+ }
}
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR, isr);
+ REG_READ(ah, AR_ISR);
+ }
+
+ if (AR_SREV_9100(ah))
+ return true;
+
if (sync_cause) {
- ath9k_debug_sync_cause(common, sync_cause);
+ if (sync_cause_p)
+ *sync_cause_p = sync_cause;
fatal_int =
(sync_cause &
(AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index f087117b2e6b..9a2afa2c690b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -201,7 +201,6 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
ath9k_hw_get_channel_centers(ah, chan, &centers);
freq = centers.synth_center;
- ah->config.spurmode = SPUR_ENABLE_EEPROM;
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 7546b9a7dcbf..0a6163e9248c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -303,7 +303,7 @@ static const u32 ar9300_2p2_mac_postamble[][5] = {
{0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
{0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
{0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x00008120, 0x18f04800, 0x18f04800, 0x18f04810, 0x18f04810},
{0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
{0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
};
@@ -352,7 +352,7 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
- {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
+ {0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
@@ -378,9 +378,9 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x00009814, 0x9280c00a},
{0x00009818, 0x00000000},
{0x0000981c, 0x00020028},
- {0x00009834, 0x6400a290},
+ {0x00009834, 0x6400a190},
{0x00009838, 0x0108ecff},
- {0x0000983c, 0x0d000600},
+ {0x0000983c, 0x14000600},
{0x00009880, 0x201fff00},
{0x00009884, 0x00001042},
{0x000098a4, 0x00200400},
@@ -401,7 +401,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x00009d04, 0x40206c10},
{0x00009d08, 0x009c4060},
{0x00009d0c, 0x9883800a},
- {0x00009d10, 0x01834061},
+ {0x00009d10, 0x01884061},
{0x00009d14, 0x00c0040b},
{0x00009d18, 0x00000000},
{0x00009e08, 0x0038230c},
@@ -459,7 +459,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000a3e8, 0x20202020},
{0x0000a3ec, 0x20202020},
{0x0000a3f0, 0x00000000},
- {0x0000a3f4, 0x00000246},
+ {0x0000a3f4, 0x00000000},
{0x0000a3f8, 0x0c9bd380},
{0x0000a3fc, 0x000f0f01},
{0x0000a400, 0x8fa91f01},
@@ -534,107 +534,107 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
- {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
- {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
- {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
- {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
- {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
- {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
- {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
- {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
- {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
- {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
- {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
- {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
- {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
- {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
- {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
- {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
- {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
- {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
- {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
- {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
- {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
- {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
- {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
- {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
- {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
- {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
- {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
- {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
- {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
- {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
- {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
- {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
- {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
- {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
- {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
- {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
- {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
- {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
- {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
- {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
- {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
+ {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5e08442e, 0x5e08442e, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x620a4431, 0x620a4431, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x15800028, 0x15800028, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1b80002b, 0x1b80002b, 0x12800400, 0x12800400},
+ {0x0000a598, 0x1f820028, 0x1f820028, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x2582002b, 0x2582002b, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2a84002a, 0x2a84002a, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2e86002a, 0x2e86002a, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x3382202d, 0x3382202d, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3884202c, 0x3884202c, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3c86202c, 0x3c86202c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4188202d, 0x4188202d, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4586402d, 0x4586402d, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4986222d, 0x4986222d, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4d862231, 0x4d862231, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x50882231, 0x50882231, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5688422e, 0x5688422e, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5e88442e, 0x5e88442e, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x628a4431, 0x628a4431, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x648a4432, 0x648a4432, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x688a4434, 0x688a4434, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x6c8a6434, 0x6c8a6434, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x6f8a6633, 0x6f8a6633, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
- {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
- {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
- {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
- {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
+ {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
{0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
{0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
- {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016048, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
{0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
- {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016448, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
{0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
{0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
- {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016848, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
@@ -644,7 +644,7 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
{0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
{0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
+ {0x0000a410, 0x000050d4, 0x000050d4, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
{0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
@@ -1086,8 +1086,8 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
{0x0000b074, 0x00000000},
{0x0000b078, 0x00000000},
{0x0000b07c, 0x00000000},
- {0x0000b080, 0x2a2d2f32},
- {0x0000b084, 0x21232328},
+ {0x0000b080, 0x23232323},
+ {0x0000b084, 0x21232323},
{0x0000b088, 0x19191c1e},
{0x0000b08c, 0x12141417},
{0x0000b090, 0x07070e0e},
@@ -1385,9 +1385,9 @@ static const u32 ar9300_2p2_mac_core[][2] = {
{0x000081f8, 0x00000000},
{0x000081fc, 0x00000000},
{0x00008240, 0x00100000},
- {0x00008244, 0x0010f424},
+ {0x00008244, 0x0010f400},
{0x00008248, 0x00000800},
- {0x0000824c, 0x0001e848},
+ {0x0000824c, 0x0001e800},
{0x00008250, 0x00000000},
{0x00008254, 0x00000000},
{0x00008258, 0x00000000},
@@ -1726,16 +1726,30 @@ static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = {
static const u32 ar9300PciePhy_clkreq_enable_L1_2p2[][2] = {
/* Addr allmodes */
- {0x00004040, 0x08253e5e},
+ {0x00004040, 0x0825365e},
{0x00004040, 0x0008003b},
{0x00004044, 0x00000000},
};
static const u32 ar9300PciePhy_clkreq_disable_L1_2p2[][2] = {
/* Addr allmodes */
- {0x00004040, 0x08213e5e},
+ {0x00004040, 0x0821365e},
{0x00004040, 0x0008003b},
{0x00004044, 0x00000000},
};
+static const u32 ar9300_2p2_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
+
+static const u32 ar9300_2p2_baseband_postamble_dfs_channel[][3] = {
+ /* Addr 5G 2G */
+ {0x00009824, 0x5ac668d0, 0x5ac668d0},
+ {0x00009e0c, 0x6d4000e2, 0x6d4000e2},
+ {0x00009e14, 0x37b9625e, 0x37b9625e},
+};
+
#endif /* INITVALS_9003_2P2_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_buffalo_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_buffalo_initvals.h
new file mode 100644
index 000000000000..59cf738f70df
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_buffalo_initvals.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9003_BUFFALO_H
+#define INITVALS_9003_BUFFALO_H
+
+static const u32 ar9300Modes_high_power_tx_gain_table_buffalo[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+#endif /* INITVALS_9003_BUFFALO_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 22934d3ca544..a352128c40ad 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -326,6 +326,224 @@ static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
ah->supp_cals = IQ_MISMATCH_CAL;
}
+#define OFF_UPPER_LT 24
+#define OFF_LOWER_LT 7
+
+static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah,
+ bool txiqcal_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int ch0_done, osdac_ch0, dc_off_ch0_i1, dc_off_ch0_q1, dc_off_ch0_i2,
+ dc_off_ch0_q2, dc_off_ch0_i3, dc_off_ch0_q3;
+ int ch1_done, osdac_ch1, dc_off_ch1_i1, dc_off_ch1_q1, dc_off_ch1_i2,
+ dc_off_ch1_q2, dc_off_ch1_i3, dc_off_ch1_q3;
+ int ch2_done, osdac_ch2, dc_off_ch2_i1, dc_off_ch2_q1, dc_off_ch2_i2,
+ dc_off_ch2_q2, dc_off_ch2_i3, dc_off_ch2_q3;
+ bool status;
+ u32 temp, val;
+
+ /*
+ * Clear offset and IQ calibration, run AGC cal.
+ */
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_OFFSET_CAL);
+ REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_CAL);
+
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT);
+ if (!status) {
+ ath_dbg(common, CALIBRATE,
+ "AGC cal without offset cal failed to complete in 1ms");
+ return false;
+ }
+
+ /*
+ * Allow only offset calibration and disable the others
+ * (Carrier Leak calibration, TX Filter calibration and
+ * Peak Detector offset calibration).
+ */
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_OFFSET_CAL);
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL,
+ AR_PHY_CL_CAL_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_FLTR_CAL);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_PKDET_CAL);
+
+ ch0_done = 0;
+ ch1_done = 0;
+ ch2_done = 0;
+
+ while ((ch0_done == 0) || (ch1_done == 0) || (ch2_done == 0)) {
+ osdac_ch0 = (REG_READ(ah, AR_PHY_65NM_CH0_BB1) >> 30) & 0x3;
+ osdac_ch1 = (REG_READ(ah, AR_PHY_65NM_CH1_BB1) >> 30) & 0x3;
+ osdac_ch2 = (REG_READ(ah, AR_PHY_65NM_CH2_BB1) >> 30) & 0x3;
+
+ REG_SET_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_CAL);
+
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT);
+ if (!status) {
+ ath_dbg(common, CALIBRATE,
+ "DC offset cal failed to complete in 1ms");
+ return false;
+ }
+
+ REG_CLR_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ /*
+ * High gain.
+ */
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH0_BB3) & 0xfffffcff) | (1 << 8)));
+ REG_WRITE(ah, AR_PHY_65NM_CH1_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH1_BB3) & 0xfffffcff) | (1 << 8)));
+ REG_WRITE(ah, AR_PHY_65NM_CH2_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH2_BB3) & 0xfffffcff) | (1 << 8)));
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH0_BB3);
+ dc_off_ch0_i1 = (temp >> 26) & 0x1f;
+ dc_off_ch0_q1 = (temp >> 21) & 0x1f;
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH1_BB3);
+ dc_off_ch1_i1 = (temp >> 26) & 0x1f;
+ dc_off_ch1_q1 = (temp >> 21) & 0x1f;
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH2_BB3);
+ dc_off_ch2_i1 = (temp >> 26) & 0x1f;
+ dc_off_ch2_q1 = (temp >> 21) & 0x1f;
+
+ /*
+ * Low gain.
+ */
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH0_BB3) & 0xfffffcff) | (2 << 8)));
+ REG_WRITE(ah, AR_PHY_65NM_CH1_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH1_BB3) & 0xfffffcff) | (2 << 8)));
+ REG_WRITE(ah, AR_PHY_65NM_CH2_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH2_BB3) & 0xfffffcff) | (2 << 8)));
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH0_BB3);
+ dc_off_ch0_i2 = (temp >> 26) & 0x1f;
+ dc_off_ch0_q2 = (temp >> 21) & 0x1f;
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH1_BB3);
+ dc_off_ch1_i2 = (temp >> 26) & 0x1f;
+ dc_off_ch1_q2 = (temp >> 21) & 0x1f;
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH2_BB3);
+ dc_off_ch2_i2 = (temp >> 26) & 0x1f;
+ dc_off_ch2_q2 = (temp >> 21) & 0x1f;
+
+ /*
+ * Loopback.
+ */
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH0_BB3) & 0xfffffcff) | (3 << 8)));
+ REG_WRITE(ah, AR_PHY_65NM_CH1_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH1_BB3) & 0xfffffcff) | (3 << 8)));
+ REG_WRITE(ah, AR_PHY_65NM_CH2_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH2_BB3) & 0xfffffcff) | (3 << 8)));
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH0_BB3);
+ dc_off_ch0_i3 = (temp >> 26) & 0x1f;
+ dc_off_ch0_q3 = (temp >> 21) & 0x1f;
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH1_BB3);
+ dc_off_ch1_i3 = (temp >> 26) & 0x1f;
+ dc_off_ch1_q3 = (temp >> 21) & 0x1f;
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH2_BB3);
+ dc_off_ch2_i3 = (temp >> 26) & 0x1f;
+ dc_off_ch2_q3 = (temp >> 21) & 0x1f;
+
+ if ((dc_off_ch0_i1 > OFF_UPPER_LT) || (dc_off_ch0_i1 < OFF_LOWER_LT) ||
+ (dc_off_ch0_i2 > OFF_UPPER_LT) || (dc_off_ch0_i2 < OFF_LOWER_LT) ||
+ (dc_off_ch0_i3 > OFF_UPPER_LT) || (dc_off_ch0_i3 < OFF_LOWER_LT) ||
+ (dc_off_ch0_q1 > OFF_UPPER_LT) || (dc_off_ch0_q1 < OFF_LOWER_LT) ||
+ (dc_off_ch0_q2 > OFF_UPPER_LT) || (dc_off_ch0_q2 < OFF_LOWER_LT) ||
+ (dc_off_ch0_q3 > OFF_UPPER_LT) || (dc_off_ch0_q3 < OFF_LOWER_LT)) {
+ if (osdac_ch0 == 3) {
+ ch0_done = 1;
+ } else {
+ osdac_ch0++;
+
+ val = REG_READ(ah, AR_PHY_65NM_CH0_BB1) & 0x3fffffff;
+ val |= (osdac_ch0 << 30);
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BB1, val);
+
+ ch0_done = 0;
+ }
+ } else {
+ ch0_done = 1;
+ }
+
+ if ((dc_off_ch1_i1 > OFF_UPPER_LT) || (dc_off_ch1_i1 < OFF_LOWER_LT) ||
+ (dc_off_ch1_i2 > OFF_UPPER_LT) || (dc_off_ch1_i2 < OFF_LOWER_LT) ||
+ (dc_off_ch1_i3 > OFF_UPPER_LT) || (dc_off_ch1_i3 < OFF_LOWER_LT) ||
+ (dc_off_ch1_q1 > OFF_UPPER_LT) || (dc_off_ch1_q1 < OFF_LOWER_LT) ||
+ (dc_off_ch1_q2 > OFF_UPPER_LT) || (dc_off_ch1_q2 < OFF_LOWER_LT) ||
+ (dc_off_ch1_q3 > OFF_UPPER_LT) || (dc_off_ch1_q3 < OFF_LOWER_LT)) {
+ if (osdac_ch1 == 3) {
+ ch1_done = 1;
+ } else {
+ osdac_ch1++;
+
+ val = REG_READ(ah, AR_PHY_65NM_CH1_BB1) & 0x3fffffff;
+ val |= (osdac_ch1 << 30);
+ REG_WRITE(ah, AR_PHY_65NM_CH1_BB1, val);
+
+ ch1_done = 0;
+ }
+ } else {
+ ch1_done = 1;
+ }
+
+ if ((dc_off_ch2_i1 > OFF_UPPER_LT) || (dc_off_ch2_i1 < OFF_LOWER_LT) ||
+ (dc_off_ch2_i2 > OFF_UPPER_LT) || (dc_off_ch2_i2 < OFF_LOWER_LT) ||
+ (dc_off_ch2_i3 > OFF_UPPER_LT) || (dc_off_ch2_i3 < OFF_LOWER_LT) ||
+ (dc_off_ch2_q1 > OFF_UPPER_LT) || (dc_off_ch2_q1 < OFF_LOWER_LT) ||
+ (dc_off_ch2_q2 > OFF_UPPER_LT) || (dc_off_ch2_q2 < OFF_LOWER_LT) ||
+ (dc_off_ch2_q3 > OFF_UPPER_LT) || (dc_off_ch2_q3 < OFF_LOWER_LT)) {
+ if (osdac_ch2 == 3) {
+ ch2_done = 1;
+ } else {
+ osdac_ch2++;
+
+ val = REG_READ(ah, AR_PHY_65NM_CH2_BB1) & 0x3fffffff;
+ val |= (osdac_ch2 << 30);
+ REG_WRITE(ah, AR_PHY_65NM_CH2_BB1, val);
+
+ ch2_done = 0;
+ }
+ } else {
+ ch2_done = 1;
+ }
+ }
+
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_OFFSET_CAL);
+ REG_SET_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ /*
+ * We don't need to check txiqcal_done here since it is always
+ * set for AR9550.
+ */
+ REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
+
+ return true;
+}
+
/*
* solve 4x4 linear equation used in loopback iq cal.
*/
@@ -347,7 +565,7 @@ static bool ar9003_hw_solve_iq_cal(struct ath_hw *ah,
const s32 result_shift = 1 << 15;
struct ath_common *common = ath9k_hw_common(ah);
- f2 = (f1 * f1 + f3 * f3) / result_shift;
+ f2 = ((f1 >> 3) * (f1 >> 3) + (f3 >> 3) * (f3 >> 3)) >> 9;
if (!f2) {
ath_dbg(common, CALIBRATE, "Divide by 0\n");
@@ -437,8 +655,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if (i2_m_q2_a0_d1 > 0x800)
i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1);
- if (i2_p_q2_a0_d1 > 0x800)
- i2_p_q2_a0_d1 = -((0xfff - i2_p_q2_a0_d1) + 1);
+ if (i2_p_q2_a0_d1 > 0x1000)
+ i2_p_q2_a0_d1 = -((0x1fff - i2_p_q2_a0_d1) + 1);
if (iq_corr_a0_d1 > 0x800)
iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1);
@@ -482,6 +700,19 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
return false;
}
+ if ((i2_p_q2_a0_d0 < 1024) || (i2_p_q2_a0_d0 > 2047) ||
+ (i2_p_q2_a1_d0 < 0) || (i2_p_q2_a1_d1 < 0) ||
+ (i2_p_q2_a0_d0 <= i2_m_q2_a0_d0) ||
+ (i2_p_q2_a0_d0 <= iq_corr_a0_d0) ||
+ (i2_p_q2_a0_d1 <= i2_m_q2_a0_d1) ||
+ (i2_p_q2_a0_d1 <= iq_corr_a0_d1) ||
+ (i2_p_q2_a1_d0 <= i2_m_q2_a1_d0) ||
+ (i2_p_q2_a1_d0 <= iq_corr_a1_d0) ||
+ (i2_p_q2_a1_d1 <= i2_m_q2_a1_d1) ||
+ (i2_p_q2_a1_d1 <= iq_corr_a1_d1)) {
+ return false;
+ }
+
mag_a0_d0 = (i2_m_q2_a0_d0 * res_scale) / i2_p_q2_a0_d0;
phs_a0_d0 = (iq_corr_a0_d0 * res_scale) / i2_p_q2_a0_d0;
@@ -898,7 +1129,7 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
{
- int offset[8], total = 0, test;
+ int offset[8] = {0}, total = 0, test;
int agc_out, i;
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
@@ -923,12 +1154,18 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
AR_PHY_65NM_RXRF_AGC_AGC_ON_OVR, 0x1);
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0x1);
- if (is_2g)
- REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
- AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR, 0x0);
- else
+
+ if (AR_SREV_9330_11(ah)) {
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
- AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR, 0x0);
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0);
+ } else {
+ if (is_2g)
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR, 0x0);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR, 0x0);
+ }
for (i = 6; i > 0; i--) {
offset[i] = BIT(i - 1);
@@ -964,9 +1201,9 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0);
}
-static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah,
- struct ath9k_channel *chan,
- bool run_rtt_cal)
+static void ar9003_hw_do_pcoem_manual_peak_cal(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ bool run_rtt_cal)
{
struct ath9k_hw_cal_data *caldata = ah->caldata;
int i;
@@ -1040,14 +1277,14 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
}
}
-static bool ar9003_hw_init_cal(struct ath_hw *ah,
- struct ath9k_channel *chan)
+static bool ar9003_hw_init_cal_pcoem(struct ath_hw *ah,
+ struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = ah->caldata;
bool txiqcal_done = false;
bool is_reusable = true, status = true;
- bool run_rtt_cal = false, run_agc_cal, sep_iq_cal = false;
+ bool run_rtt_cal = false, run_agc_cal;
bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
u32 rx_delay = 0;
u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
@@ -1119,22 +1356,12 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
txiqcal_done = run_agc_cal = true;
- } else if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags)) {
- run_agc_cal = true;
- sep_iq_cal = true;
}
skip_tx_iqcal:
if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
ar9003_mci_init_cal_req(ah, &is_reusable);
- if (sep_iq_cal) {
- txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
- REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
- udelay(5);
- REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
- }
-
if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
rx_delay = REG_READ(ah, AR_PHY_RX_DELAY);
/* Disable BB_active */
@@ -1155,7 +1382,7 @@ skip_tx_iqcal:
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
- ar9003_hw_do_manual_peak_cal(ah, chan, run_rtt_cal);
+ ar9003_hw_do_pcoem_manual_peak_cal(ah, chan, run_rtt_cal);
}
if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
@@ -1228,13 +1455,117 @@ skip_tx_iqcal:
return true;
}
+static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ bool txiqcal_done = false;
+ bool is_reusable = true, status = true;
+ bool run_agc_cal = false, sep_iq_cal = false;
+
+ /* Use chip chainmask only for calibration */
+ ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
+
+ if (ah->enabled_cals & TX_CL_CAL) {
+ REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ run_agc_cal = true;
+ }
+
+ if (IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))
+ goto skip_tx_iqcal;
+
+ /* Do Tx IQ Calibration */
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
+ AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
+ DELPT);
+
+ /*
+ * For AR9485 or later chips, TxIQ cal runs as part of
+ * AGC calibration. Specifically, AR9550 in SoC chips.
+ */
+ if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
+ txiqcal_done = true;
+ run_agc_cal = true;
+ } else {
+ sep_iq_cal = true;
+ run_agc_cal = true;
+ }
+
+ /*
+ * In the SoC family, this will run for AR9300, AR9331 and AR9340.
+ */
+ if (sep_iq_cal) {
+ txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+ udelay(5);
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+ }
+
+ if (AR_SREV_9550(ah) && IS_CHAN_2GHZ(chan)) {
+ if (!ar9003_hw_dynamic_osdac_selection(ah, txiqcal_done))
+ return false;
+ }
+
+skip_tx_iqcal:
+ if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
+ if (AR_SREV_9330_11(ah))
+ ar9003_hw_manual_peak_cal(ah, 0, IS_CHAN_2GHZ(chan));
+
+ /* Calibrate the AGC */
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ /* Poll for offset calibration complete */
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT);
+ }
+
+ if (!status) {
+ ath_dbg(common, CALIBRATE,
+ "offset calibration failed to complete in %d ms; noisy environment?\n",
+ AH_WAIT_TIMEOUT / 1000);
+ return false;
+ }
+
+ if (txiqcal_done)
+ ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
+
+ /* Revert chainmask to runtime parameters */
+ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
+
+ /* Initialize list pointers */
+ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+
+ INIT_CAL(&ah->iq_caldata);
+ INSERT_CAL(ah, &ah->iq_caldata);
+ ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
+
+ /* Initialize current pointer to first element in list */
+ ah->cal_list_curr = ah->cal_list;
+
+ if (ah->cal_list_curr)
+ ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
+
+ if (caldata)
+ caldata->CalValid = 0;
+
+ return true;
+}
+
void ar9003_hw_attach_calib_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+ if (AR_SREV_9485(ah) || AR_SREV_9462(ah) || AR_SREV_9565(ah))
+ priv_ops->init_cal = ar9003_hw_init_cal_pcoem;
+ else
+ priv_ops->init_cal = ar9003_hw_init_cal_soc;
+
priv_ops->init_cal_settings = ar9003_hw_init_cal_settings;
- priv_ops->init_cal = ar9003_hw_init_cal;
priv_ops->setup_calibration = ar9003_hw_setup_calibration;
ops->calibrate = ar9003_hw_calibrate;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 130657db5c43..b8daff78b9d1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -131,6 +131,7 @@ static const struct ar9300_eeprom ar9300_default = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -138,7 +139,7 @@ static const struct ar9300_eeprom ar9300_default = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0},
+ .future = {0, 0},
.tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
@@ -333,6 +334,7 @@ static const struct ar9300_eeprom ar9300_default = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -707,6 +709,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -714,7 +717,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0},
+ .future = {0, 0},
.tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
@@ -909,6 +912,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -1284,6 +1288,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -1291,7 +1296,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0},
+ .future = {0, 0},
.tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
@@ -1486,6 +1491,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -1861,6 +1867,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -1868,7 +1875,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0},
+ .future = {0, 0},
.tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
@@ -2063,6 +2070,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -2437,6 +2445,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80C080),
.papdRateMaskHt40 = LE32(0x0080C080),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -2444,7 +2453,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0},
+ .future = {0, 0},
.tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
@@ -2639,6 +2648,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -3588,7 +3598,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
AR_SWITCH_TABLE_COM_AR9462_ALL, value);
- } else if (AR_SREV_9550(ah)) {
+ } else if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
AR_SWITCH_TABLE_COM_AR9550_ALL, value);
} else
@@ -3965,7 +3975,7 @@ static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah)
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
u8 tuning_caps_param = eep->baseEepHeader.params_for_tuning_caps[0];
- if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
+ if (AR_SREV_9340(ah) || AR_SREV_9531(ah))
return;
if (eep->baseEepHeader.featureEnable & 0x40) {
@@ -4020,7 +4030,10 @@ static void ar9003_hw_xpa_timing_control_apply(struct ath_hw *ah, bool is2ghz)
if (!(eep->baseEepHeader.featureEnable & 0x80))
return;
- if (!AR_SREV_9300(ah) && !AR_SREV_9340(ah) && !AR_SREV_9580(ah))
+ if (!AR_SREV_9300(ah) &&
+ !AR_SREV_9340(ah) &&
+ !AR_SREV_9580(ah) &&
+ !AR_SREV_9531(ah))
return;
xpa_ctl = ar9003_modal_header(ah, is2ghz)->txFrameToXpaOn;
@@ -4111,6 +4124,37 @@ static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
}
}
+static void ar9003_hw_apply_minccapwr_thresh(struct ath_hw *ah,
+ bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ const u_int32_t cca_ctrl[AR9300_MAX_CHAINS] = {
+ AR_PHY_CCA_CTRL_0,
+ AR_PHY_CCA_CTRL_1,
+ AR_PHY_CCA_CTRL_2,
+ };
+ int chain;
+ u32 val;
+
+ if (is2ghz) {
+ if (!(eep->base_ext1.misc_enable & BIT(2)))
+ return;
+ } else {
+ if (!(eep->base_ext1.misc_enable & BIT(3)))
+ return;
+ }
+
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->caps.tx_chainmask & BIT(chain)))
+ continue;
+
+ val = ar9003_modal_header(ah, is2ghz)->noiseFloorThreshCh[chain];
+ REG_RMW_FIELD(ah, cca_ctrl[chain],
+ AR_PHY_EXT_CCA0_THRESH62_1, val);
+ }
+
+}
+
static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -4122,9 +4166,10 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
ar9003_hw_xlna_bias_strength_apply(ah, is2ghz);
ar9003_hw_atten_apply(ah, chan);
ar9003_hw_quick_drop_apply(ah, chan->channel);
- if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah) && !AR_SREV_9550(ah))
+ if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah) && !AR_SREV_9531(ah))
ar9003_hw_internal_regulator_apply(ah);
ar9003_hw_apply_tuning_caps(ah);
+ ar9003_hw_apply_minccapwr_thresh(ah, chan);
ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz);
ar9003_hw_thermometer_apply(ah);
ar9003_hw_thermo_cal_apply(ah);
@@ -4746,7 +4791,7 @@ static void ar9003_hw_power_control_override(struct ath_hw *ah,
}
tempslope:
- if (AR_SREV_9550(ah)) {
+ if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
/*
* AR955x has tempSlope register for each chain.
* Check whether temp_compensation feature is enabled or not.
@@ -5020,6 +5065,10 @@ static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep,
break;
}
}
+
+ if (is2GHz && !twiceMaxEdgePower)
+ twiceMaxEdgePower = 60;
+
return twiceMaxEdgePower;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 0e5daa58a4fc..694ca2e680e5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -270,10 +270,20 @@ struct cal_ctl_data_5g {
u8 ctlEdges[AR9300_NUM_BAND_EDGES_5G];
} __packed;
+#define MAX_BASE_EXTENSION_FUTURE 2
+
struct ar9300_BaseExtension_1 {
u8 ant_div_control;
- u8 future[3];
- u8 tempslopextension[8];
+ u8 future[MAX_BASE_EXTENSION_FUTURE];
+ /*
+ * misc_enable:
+ *
+ * BIT 0 - TX Gain Cap enable.
+ * BIT 1 - Uncompressed Checksum enable.
+ * BIT 2/3 - MinCCApwr enable 2g/5g.
+ */
+ u8 misc_enable;
+ int8_t tempslopextension[8];
int8_t quick_drop_low;
int8_t quick_drop_high;
} __packed;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 20e49095db2a..ec1da0cc25f5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -17,6 +17,7 @@
#include "hw.h"
#include "ar9003_mac.h"
#include "ar9003_2p2_initvals.h"
+#include "ar9003_buffalo_initvals.h"
#include "ar9485_initvals.h"
#include "ar9340_initvals.h"
#include "ar9330_1p1_initvals.h"
@@ -26,6 +27,8 @@
#include "ar9462_2p0_initvals.h"
#include "ar9462_2p1_initvals.h"
#include "ar9565_1p0_initvals.h"
+#include "ar9565_1p1_initvals.h"
+#include "ar953x_initvals.h"
/* General hardware code for the AR9003 hadware family */
@@ -148,7 +151,11 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
ar9340Modes_high_ob_db_tx_gain_table_1p0);
INIT_INI_ARRAY(&ah->iniModesFastClock,
- ar9340Modes_fast_clock_1p0);
+ ar9340Modes_fast_clock_1p0);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9340_1p0_baseband_core_txfir_coeff_japan_2484);
+ INIT_INI_ARRAY(&ah->ini_dfs,
+ ar9340_1p0_baseband_postamble_dfs_channel);
if (!ah->is_clk_25mhz)
INIT_INI_ARRAY(&ah->iniAdditional,
@@ -223,6 +230,10 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
ar9462_2p1_modes_fast_clock);
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
ar9462_2p1_baseband_core_txfir_coeff_japan_2484);
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9462_2p1_pciephy_clkreq_disable_L1);
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9462_2p1_pciephy_clkreq_disable_L1);
} else if (AR_SREV_9462_20(ah)) {
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core);
@@ -247,18 +258,18 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
ar9462_2p0_soc_postamble);
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_rx_gain_table_2p0);
+ ar9462_2p0_common_rx_gain);
/* Awake -> Sleep Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9462_pciephy_clkreq_disable_L1_2p0);
+ ar9462_2p0_pciephy_clkreq_disable_L1);
/* Sleep -> Awake Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9462_pciephy_clkreq_disable_L1_2p0);
+ ar9462_2p0_pciephy_clkreq_disable_L1);
/* Fast clock modal settings */
INIT_INI_ARRAY(&ah->iniModesFastClock,
- ar9462_modes_fast_clock_2p0);
+ ar9462_2p0_modes_fast_clock);
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
ar9462_2p0_baseband_core_txfir_coeff_japan_2484);
@@ -298,6 +309,31 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
/* Fast clock modal settings */
INIT_INI_ARRAY(&ah->iniModesFastClock,
ar955x_1p0_modes_fast_clock);
+ } else if (AR_SREV_9531(ah)) {
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ qca953x_1p0_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ qca953x_1p0_mac_postamble);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ qca953x_1p0_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ qca953x_1p0_baseband_postamble);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ qca953x_1p0_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ qca953x_1p0_radio_postamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ qca953x_1p0_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ qca953x_1p0_soc_postamble);
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca953x_1p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca953x_1p0_common_wo_xlna_rx_gain_bounds);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p0_modes_no_xpa_tx_gain_table);
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ qca953x_1p0_modes_fast_clock);
} else if (AR_SREV_9580(ah)) {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -330,7 +366,46 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
ar9580_1p0_low_ob_db_tx_gain_table);
INIT_INI_ARRAY(&ah->iniModesFastClock,
- ar9580_1p0_modes_fast_clock);
+ ar9580_1p0_modes_fast_clock);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9580_1p0_baseband_core_txfir_coeff_japan_2484);
+ INIT_INI_ARRAY(&ah->ini_dfs,
+ ar9580_1p0_baseband_postamble_dfs_channel);
+ } else if (AR_SREV_9565_11_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9565_1p1_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9565_1p1_mac_postamble);
+
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9565_1p1_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9565_1p1_baseband_postamble);
+
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9565_1p1_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9565_1p1_radio_postamble);
+
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9565_1p1_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9565_1p1_soc_postamble);
+
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9565_1p1_Common_rx_gain_table);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p1_Modes_lowest_ob_db_tx_gain_table);
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9565_1p1_pciephy_clkreq_disable_L1);
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9565_1p1_pciephy_clkreq_disable_L1);
+
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ ar9565_1p1_modes_fast_clock);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9565_1p1_baseband_core_txfir_coeff_japan_2484);
} else if (AR_SREV_9565(ah)) {
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
ar9565_1p0_mac_core);
@@ -411,7 +486,11 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
/* Fast clock modal settings */
INIT_INI_ARRAY(&ah->iniModesFastClock,
- ar9300Modes_fast_clock_2p2);
+ ar9300Modes_fast_clock_2p2);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9300_2p2_baseband_core_txfir_coeff_japan_2484);
+ INIT_INI_ARRAY(&ah->ini_dfs,
+ ar9300_2p2_baseband_postamble_dfs_channel);
}
}
@@ -432,6 +511,9 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9550(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar955x_1p0_modes_xpa_tx_gain_table);
+ else if (AR_SREV_9531(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p0_modes_xpa_tx_gain_table);
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_lowest_ob_db_tx_gain_table);
@@ -440,7 +522,10 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
ar9462_2p1_modes_low_ob_db_tx_gain);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9462_modes_low_ob_db_tx_gain_table_2p0);
+ ar9462_2p0_modes_low_ob_db_tx_gain);
+ else if (AR_SREV_9565_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p1_modes_low_ob_db_tx_gain_table);
else if (AR_SREV_9565(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9565_1p0_modes_low_ob_db_tx_gain_table);
@@ -469,12 +554,22 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
else if (AR_SREV_9550(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar955x_1p0_modes_no_xpa_tx_gain_table);
- else if (AR_SREV_9462_21(ah))
+ else if (AR_SREV_9531(ah)) {
+ if (AR_SREV_9531_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p1_modes_no_xpa_tx_gain_table);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p0_modes_no_xpa_tx_gain_table);
+ } else if (AR_SREV_9462_21(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9462_2p1_modes_high_ob_db_tx_gain);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9462_modes_high_ob_db_tx_gain_table_2p0);
+ ar9462_2p0_modes_high_ob_db_tx_gain);
+ else if (AR_SREV_9565_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p1_modes_high_ob_db_tx_gain_table);
else if (AR_SREV_9565(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9565_1p0_modes_high_ob_db_tx_gain_table);
@@ -500,6 +595,9 @@ static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_low_ob_db_tx_gain_table);
+ else if (AR_SREV_9565_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p1_modes_low_ob_db_tx_gain_table);
else if (AR_SREV_9565(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9565_1p0_modes_low_ob_db_tx_gain_table);
@@ -525,12 +623,20 @@ static void ar9003_tx_gain_table_mode3(struct ath_hw *ah)
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_high_power_tx_gain_table);
+ else if (AR_SREV_9565_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p1_modes_high_power_tx_gain_table);
else if (AR_SREV_9565(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9565_1p0_modes_high_power_tx_gain_table);
- else
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_high_power_tx_gain_table_2p2);
+ else {
+ if (ah->config.tx_gain_buffalo)
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_high_power_tx_gain_table_buffalo);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_high_power_tx_gain_table_2p2);
+ }
}
static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
@@ -546,7 +652,7 @@ static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
ar9462_2p1_modes_mix_ob_db_tx_gain);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9462_modes_mix_ob_db_tx_gain_table_2p0);
+ ar9462_2p0_modes_mix_ob_db_tx_gain);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9300Modes_mixed_ob_db_tx_gain_table_2p2);
@@ -581,6 +687,13 @@ static void ar9003_tx_gain_table_mode6(struct ath_hw *ah)
ar9580_1p0_type6_tx_gain_table);
}
+static void ar9003_tx_gain_table_mode7(struct ath_hw *ah)
+{
+ if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340_cus227_tx_gain_table_1p0);
+}
+
typedef void (*ath_txgain_tab)(struct ath_hw *ah);
static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
@@ -593,6 +706,7 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
ar9003_tx_gain_table_mode4,
ar9003_tx_gain_table_mode5,
ar9003_tx_gain_table_mode6,
+ ar9003_tx_gain_table_mode7,
};
int idx = ar9003_hw_get_tx_gain_idx(ah);
@@ -621,6 +735,11 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
ar955x_1p0_common_rx_gain_table);
INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
ar955x_1p0_common_rx_gain_bounds);
+ } else if (AR_SREV_9531(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca953x_1p0_common_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca953x_1p0_common_rx_gain_bounds);
} else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9580_1p0_rx_gain_table);
@@ -629,7 +748,10 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
ar9462_2p1_common_rx_gain);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_rx_gain_table_2p0);
+ ar9462_2p0_common_rx_gain);
+ else if (AR_SREV_9565_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9565_1p1_Common_rx_gain_table);
else if (AR_SREV_9565(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9565_1p0_Common_rx_gain_table);
@@ -657,15 +779,23 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
ar9462_2p1_common_wo_xlna_rx_gain);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_wo_xlna_rx_gain_table_2p0);
+ ar9462_2p0_common_wo_xlna_rx_gain);
else if (AR_SREV_9550(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar955x_1p0_common_wo_xlna_rx_gain_table);
INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
ar955x_1p0_common_wo_xlna_rx_gain_bounds);
+ } else if (AR_SREV_9531(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca953x_1p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca953x_1p0_common_wo_xlna_rx_gain_bounds);
} else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9580_1p0_wo_xlna_rx_gain_table);
+ else if (AR_SREV_9565_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9565_1p1_common_wo_xlna_rx_gain_table);
else if (AR_SREV_9565(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9565_1p0_common_wo_xlna_rx_gain_table);
@@ -687,7 +817,7 @@ static void ar9003_rx_gain_table_mode2(struct ath_hw *ah)
ar9462_2p1_baseband_postamble_5g_xlna);
} else if (AR_SREV_9462_20(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_mixed_rx_gain_table_2p0);
+ ar9462_2p0_common_mixed_rx_gain);
INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_core,
ar9462_2p0_baseband_core_mix_rxgain);
INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
@@ -701,12 +831,12 @@ static void ar9003_rx_gain_table_mode3(struct ath_hw *ah)
{
if (AR_SREV_9462_21(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_2p1_common_5g_xlna_only_rx_gain);
+ ar9462_2p1_common_5g_xlna_only_rxgain);
INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
ar9462_2p1_baseband_postamble_5g_xlna);
} else if (AR_SREV_9462_20(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_2p0_5g_xlna_only_rxgain);
+ ar9462_2p0_common_5g_xlna_only_rxgain);
INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
ar9462_2p0_baseband_postamble_5g_xlna);
}
@@ -750,6 +880,9 @@ static void ar9003_hw_init_mode_gain_regs(struct ath_hw *ah)
static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
bool power_off)
{
+ unsigned int i;
+ struct ar5416IniArray *array;
+
/*
* Increase L1 Entry Latency. Some WB222 boards don't have
* this change in eeprom/OTP.
@@ -775,19 +908,125 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
* Configire PCIE after Ini init. SERDES values now come from ini file
* This enables PCIe low power mode.
*/
- if (ah->config.pcieSerDesWrite) {
- unsigned int i;
- struct ar5416IniArray *array;
+ array = power_off ? &ah->iniPcieSerdes :
+ &ah->iniPcieSerdesLowPower;
+
+ for (i = 0; i < array->ia_rows; i++) {
+ REG_WRITE(ah,
+ INI_RA(array, i, 0),
+ INI_RA(array, i, 1));
+ }
+}
+
+static void ar9003_hw_init_hang_checks(struct ath_hw *ah)
+{
+ /*
+ * All chips support detection of BB/MAC hangs.
+ */
+ ah->config.hw_hang_checks |= HW_BB_WATCHDOG;
+ ah->config.hw_hang_checks |= HW_MAC_HANG;
+
+ /*
+ * This is not required for AR9580 1.0
+ */
+ if (AR_SREV_9300_22(ah))
+ ah->config.hw_hang_checks |= HW_PHYRESTART_CLC_WAR;
+
+ if (AR_SREV_9330(ah))
+ ah->bb_watchdog_timeout_ms = 85;
+ else
+ ah->bb_watchdog_timeout_ms = 25;
+}
- array = power_off ? &ah->iniPcieSerdes :
- &ah->iniPcieSerdesLowPower;
+/*
+ * MAC HW hang check
+ * =================
+ *
+ * Signature: dcu_chain_state is 0x6 and dcu_complete_state is 0x1.
+ *
+ * The state of each DCU chain (mapped to TX queues) is available from these
+ * DMA debug registers:
+ *
+ * Chain 0 state : Bits 4:0 of AR_DMADBG_4
+ * Chain 1 state : Bits 9:5 of AR_DMADBG_4
+ * Chain 2 state : Bits 14:10 of AR_DMADBG_4
+ * Chain 3 state : Bits 19:15 of AR_DMADBG_4
+ * Chain 4 state : Bits 24:20 of AR_DMADBG_4
+ * Chain 5 state : Bits 29:25 of AR_DMADBG_4
+ * Chain 6 state : Bits 4:0 of AR_DMADBG_5
+ * Chain 7 state : Bits 9:5 of AR_DMADBG_5
+ * Chain 8 state : Bits 14:10 of AR_DMADBG_5
+ * Chain 9 state : Bits 19:15 of AR_DMADBG_5
+ *
+ * The DCU chain state "0x6" means "WAIT_FRDONE" - wait for TX frame to be done.
+ */
+
+#define NUM_STATUS_READS 50
+
+static bool ath9k_hw_verify_hang(struct ath_hw *ah, unsigned int queue)
+{
+ u32 dma_dbg_chain, dma_dbg_complete;
+ u8 dcu_chain_state, dcu_complete_state;
+ int i;
+
+ for (i = 0; i < NUM_STATUS_READS; i++) {
+ if (queue < 6)
+ dma_dbg_chain = REG_READ(ah, AR_DMADBG_4);
+ else
+ dma_dbg_chain = REG_READ(ah, AR_DMADBG_5);
+
+ dma_dbg_complete = REG_READ(ah, AR_DMADBG_6);
+
+ dcu_chain_state = (dma_dbg_chain >> (5 * queue)) & 0x1f;
+ dcu_complete_state = dma_dbg_complete & 0x3;
+
+ if ((dcu_chain_state != 0x6) || (dcu_complete_state != 0x1))
+ return false;
+ }
+
+ ath_dbg(ath9k_hw_common(ah), RESET,
+ "MAC Hang signature found for queue: %d\n", queue);
+
+ return true;
+}
+
+static bool ar9003_hw_detect_mac_hang(struct ath_hw *ah)
+{
+ u32 dma_dbg_4, dma_dbg_5, dma_dbg_6, chk_dbg;
+ u8 dcu_chain_state, dcu_complete_state;
+ bool dcu_wait_frdone = false;
+ unsigned long chk_dcu = 0;
+ unsigned int i = 0;
+
+ dma_dbg_4 = REG_READ(ah, AR_DMADBG_4);
+ dma_dbg_5 = REG_READ(ah, AR_DMADBG_5);
+ dma_dbg_6 = REG_READ(ah, AR_DMADBG_6);
+
+ dcu_complete_state = dma_dbg_6 & 0x3;
+ if (dcu_complete_state != 0x1)
+ goto exit;
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (i < 6)
+ chk_dbg = dma_dbg_4;
+ else
+ chk_dbg = dma_dbg_5;
+
+ dcu_chain_state = (chk_dbg >> (5 * i)) & 0x1f;
+ if (dcu_chain_state == 0x6) {
+ dcu_wait_frdone = true;
+ chk_dcu |= BIT(i);
+ }
+ }
- for (i = 0; i < array->ia_rows; i++) {
- REG_WRITE(ah,
- INI_RA(array, i, 0),
- INI_RA(array, i, 1));
+ if ((dcu_complete_state == 0x1) && dcu_wait_frdone) {
+ for_each_set_bit(i, &chk_dcu, ATH9K_NUM_TX_QUEUES) {
+ if (ath9k_hw_verify_hang(ah, i))
+ return true;
}
}
+exit:
+ return false;
}
/* Sets up the AR9003 hardware familiy callbacks */
@@ -798,6 +1037,8 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
ar9003_hw_init_mode_regs(ah);
priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
+ priv_ops->init_hang_checks = ar9003_hw_init_hang_checks;
+ priv_ops->detect_mac_hang = ar9003_hw_detect_mac_hang;
ops->config_pci_powersave = ar9003_hw_configpcipowersave;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index f6c5c1b50471..729ffbf07343 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -175,7 +175,8 @@ static void ar9003_hw_set_desc_link(void *ds, u32 ds_link)
ads->ctl10 |= ar9003_calc_ptr_chksum(ads);
}
-static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
+ u32 *sync_cause_p)
{
u32 isr = 0;
u32 mask2 = 0;
@@ -310,7 +311,8 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
ar9003_mci_get_isr(ah, masked);
if (sync_cause) {
- ath9k_debug_sync_cause(common, sync_cause);
+ if (sync_cause_p)
+ *sync_cause_p = sync_cause;
fatal_int =
(sync_cause &
(AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
@@ -476,12 +478,12 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
/* XXX: Keycache */
rxs->rs_rssi = MS(rxsp->status5, AR_RxRSSICombined);
- rxs->rs_rssi_ctl0 = MS(rxsp->status1, AR_RxRSSIAnt00);
- rxs->rs_rssi_ctl1 = MS(rxsp->status1, AR_RxRSSIAnt01);
- rxs->rs_rssi_ctl2 = MS(rxsp->status1, AR_RxRSSIAnt02);
- rxs->rs_rssi_ext0 = MS(rxsp->status5, AR_RxRSSIAnt10);
- rxs->rs_rssi_ext1 = MS(rxsp->status5, AR_RxRSSIAnt11);
- rxs->rs_rssi_ext2 = MS(rxsp->status5, AR_RxRSSIAnt12);
+ rxs->rs_rssi_ctl[0] = MS(rxsp->status1, AR_RxRSSIAnt00);
+ rxs->rs_rssi_ctl[1] = MS(rxsp->status1, AR_RxRSSIAnt01);
+ rxs->rs_rssi_ctl[2] = MS(rxsp->status1, AR_RxRSSIAnt02);
+ rxs->rs_rssi_ext[0] = MS(rxsp->status5, AR_RxRSSIAnt10);
+ rxs->rs_rssi_ext[1] = MS(rxsp->status5, AR_RxRSSIAnt11);
+ rxs->rs_rssi_ext[2] = MS(rxsp->status5, AR_RxRSSIAnt12);
if (rxsp->status11 & AR_RxKeyIdxValid)
rxs->rs_keyix = MS(rxsp->status11, AR_KeyIdx);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index d39b79f5e841..09facba1dc6d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -103,7 +103,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
} else {
channelSel = CHANSEL_2G(freq) >> 1;
}
- } else if (AR_SREV_9550(ah)) {
+ } else if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
if (ah->is_clk_25mhz)
div = 75;
else
@@ -118,7 +118,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
/* Set to 2G mode */
bMode = 1;
} else {
- if ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) &&
+ if ((AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) &&
ah->is_clk_25mhz) {
channelSel = freq / 75;
chan_frac = ((freq % 75) * 0x20000) / 75;
@@ -641,11 +641,12 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
else
ah->enabled_cals &= ~TX_IQ_CAL;
- if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
- ah->enabled_cals |= TX_CL_CAL;
- else
- ah->enabled_cals &= ~TX_CL_CAL;
}
+
+ if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
+ ah->enabled_cals |= TX_CL_CAL;
+ else
+ ah->enabled_cals &= ~TX_CL_CAL;
}
static void ar9003_hw_prog_ini(struct ath_hw *ah,
@@ -809,10 +810,12 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
/*
* TXGAIN initvals.
*/
- if (AR_SREV_9550(ah)) {
- int modes_txgain_index;
+ if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
+ int modes_txgain_index = 1;
+
+ if (AR_SREV_9550(ah))
+ modes_txgain_index = ar9550_hw_get_modes_txgain_index(ah, chan);
- modes_txgain_index = ar9550_hw_get_modes_txgain_index(ah, chan);
if (modes_txgain_index < 0)
return -EINVAL;
@@ -1331,6 +1334,7 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
static void ar9003_hw_set_radar_params(struct ath_hw *ah,
struct ath_hw_radar_conf *conf)
{
+ unsigned int regWrites = 0;
u32 radar_0 = 0, radar_1 = 0;
if (!conf) {
@@ -1357,6 +1361,11 @@ static void ar9003_hw_set_radar_params(struct ath_hw *ah,
REG_SET_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
else
REG_CLR_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
+
+ if (AR_SREV_9300(ah) || AR_SREV_9340(ah) || AR_SREV_9580(ah)) {
+ REG_WRITE_ARRAY(&ah->ini_dfs,
+ IS_CHAN_HT40(ah->curchan) ? 2 : 1, regWrites);
+ }
}
static void ar9003_hw_set_radar_conf(struct ath_hw *ah)
@@ -1807,6 +1816,68 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs));
}
+/*
+ * Baseband Watchdog signatures:
+ *
+ * 0x04000539: BB hang when operating in HT40 DFS Channel.
+ * Full chip reset is not required, but a recovery
+ * mechanism is needed.
+ *
+ * 0x1300000a: Related to CAC deafness.
+ * Chip reset is not required.
+ *
+ * 0x0400000a: Related to CAC deafness.
+ * Full chip reset is required.
+ *
+ * 0x04000b09: RX state machine gets into an illegal state
+ * when a packet with unsupported rate is received.
+ * Full chip reset is required and PHY_RESTART has
+ * to be disabled.
+ *
+ * 0x04000409: Packet stuck on receive.
+ * Full chip reset is required for all chips except AR9340.
+ */
+
+/*
+ * ar9003_hw_bb_watchdog_check(): Returns true if a chip reset is required.
+ */
+bool ar9003_hw_bb_watchdog_check(struct ath_hw *ah)
+{
+ u32 val;
+
+ switch(ah->bb_watchdog_last_status) {
+ case 0x04000539:
+ val = REG_READ(ah, AR_PHY_RADAR_0);
+ val &= (~AR_PHY_RADAR_0_FIRPWR);
+ val |= SM(0x7f, AR_PHY_RADAR_0_FIRPWR);
+ REG_WRITE(ah, AR_PHY_RADAR_0, val);
+ udelay(1);
+ val = REG_READ(ah, AR_PHY_RADAR_0);
+ val &= ~AR_PHY_RADAR_0_FIRPWR;
+ val |= SM(AR9300_DFS_FIRPWR, AR_PHY_RADAR_0_FIRPWR);
+ REG_WRITE(ah, AR_PHY_RADAR_0, val);
+
+ return false;
+ case 0x1300000a:
+ return false;
+ case 0x0400000a:
+ case 0x04000b09:
+ return true;
+ case 0x04000409:
+ if (AR_SREV_9340(ah) || AR_SREV_9531(ah))
+ return false;
+ else
+ return true;
+ default:
+ /*
+ * For any other unknown signatures, do a
+ * full chip reset.
+ */
+ return true;
+ }
+}
+EXPORT_SYMBOL(ar9003_hw_bb_watchdog_check);
+
void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -1923,6 +1994,7 @@ EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
void ar9003_hw_disable_phy_restart(struct ath_hw *ah)
{
+ u8 result;
u32 val;
/* While receiving unsupported rate frame rx state machine
@@ -1930,15 +2002,13 @@ void ar9003_hw_disable_phy_restart(struct ath_hw *ah)
* state, BB would go hang. If RXSM is in 0xb state after
* first bb panic, ensure to disable the phy_restart.
*/
- if (!((MS(ah->bb_watchdog_last_status,
- AR_PHY_WATCHDOG_RX_OFDM_SM) == 0xb) ||
- ah->bb_hang_rx_ofdm))
- return;
+ result = MS(ah->bb_watchdog_last_status, AR_PHY_WATCHDOG_RX_OFDM_SM);
- ah->bb_hang_rx_ofdm = true;
- val = REG_READ(ah, AR_PHY_RESTART);
- val &= ~AR_PHY_RESTART_ENA;
-
- REG_WRITE(ah, AR_PHY_RESTART, val);
+ if ((result == 0xb) || ah->bb_hang_rx_ofdm) {
+ ah->bb_hang_rx_ofdm = true;
+ val = REG_READ(ah, AR_PHY_RESTART);
+ val &= ~AR_PHY_RESTART_ENA;
+ REG_WRITE(ah, AR_PHY_RESTART, val);
+ }
}
EXPORT_SYMBOL(ar9003_hw_disable_phy_restart);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 2af667beb273..fd090b1f2d0f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -270,7 +270,7 @@
#define AR_PHY_AGC (AR_AGC_BASE + 0x14)
#define AR_PHY_EXT_ATTEN_CTL_0 (AR_AGC_BASE + 0x18)
#define AR_PHY_CCA_0 (AR_AGC_BASE + 0x1c)
-#define AR_PHY_EXT_CCA0 (AR_AGC_BASE + 0x20)
+#define AR_PHY_CCA_CTRL_0 (AR_AGC_BASE + 0x20)
#define AR_PHY_RESTART (AR_AGC_BASE + 0x24)
/*
@@ -338,17 +338,17 @@
#define AR_PHY_CCA_NOM_VAL_9300_5GHZ -115
#define AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ -125
#define AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ -125
-#define AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ -95
-#define AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ -100
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_FCC_2GHZ -95
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_FCC_5GHZ -100
#define AR_PHY_CCA_NOM_VAL_9462_2GHZ -127
#define AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ -127
#define AR_PHY_CCA_MAX_GOOD_VAL_9462_2GHZ -60
-#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ -95
#define AR_PHY_CCA_NOM_VAL_9462_5GHZ -127
#define AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ -127
#define AR_PHY_CCA_MAX_GOOD_VAL_9462_5GHZ -60
-#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ -100
#define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118
@@ -397,6 +397,8 @@
#define AR9280_PHY_CCA_THRESH62_S 12
#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
#define AR_PHY_EXT_CCA0_THRESH62_S 0
+#define AR_PHY_EXT_CCA0_THRESH62_1 0x000001FF
+#define AR_PHY_EXT_CCA0_THRESH62_1_S 0
#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
@@ -667,6 +669,16 @@
#define AR_PHY_65NM_CH1_RXTX4 0x1650c
#define AR_PHY_65NM_CH2_RXTX4 0x1690c
+#define AR_PHY_65NM_CH0_BB1 0x16140
+#define AR_PHY_65NM_CH0_BB2 0x16144
+#define AR_PHY_65NM_CH0_BB3 0x16148
+#define AR_PHY_65NM_CH1_BB1 0x16540
+#define AR_PHY_65NM_CH1_BB2 0x16544
+#define AR_PHY_65NM_CH1_BB3 0x16548
+#define AR_PHY_65NM_CH2_BB1 0x16940
+#define AR_PHY_65NM_CH2_BB2 0x16944
+#define AR_PHY_65NM_CH2_BB3 0x16948
+
#define AR_PHY_65NM_CH0_SYNTH12_VREFMUL3 0x00780000
#define AR_PHY_65NM_CH0_SYNTH12_VREFMUL3_S 19
#define AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK 0x00000004
@@ -1331,4 +1343,6 @@
#define AR_PHY_65NM_RXRF_AGC_AGC_OUT 0x00000004
#define AR_PHY_65NM_RXRF_AGC_AGC_OUT_S 2
+#define AR9300_DFS_FIRPWR -28
+
#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_wow.c b/drivers/net/wireless/ath/ath9k/ar9003_wow.c
new file mode 100644
index 000000000000..81c88dd606dc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_wow.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "ath9k.h"
+#include "reg.h"
+#include "hw-ops.h"
+
+const char *ath9k_hw_wow_event_to_string(u32 wow_event)
+{
+ if (wow_event & AH_WOW_MAGIC_PATTERN_EN)
+ return "Magic pattern";
+ if (wow_event & AH_WOW_USER_PATTERN_EN)
+ return "User pattern";
+ if (wow_event & AH_WOW_LINK_CHANGE)
+ return "Link change";
+ if (wow_event & AH_WOW_BEACON_MISS)
+ return "Beacon miss";
+
+ return "unknown reason";
+}
+EXPORT_SYMBOL(ath9k_hw_wow_event_to_string);
+
+static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+
+ /* set rx disable bit */
+ REG_WRITE(ah, AR_CR, AR_CR_RXD);
+
+ if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0, AH_WAIT_TIMEOUT)) {
+ ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
+ REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
+ return;
+ }
+
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
+}
+
+static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u8 sta_mac_addr[ETH_ALEN], ap_mac_addr[ETH_ALEN];
+ u32 ctl[13] = {0};
+ u32 data_word[KAL_NUM_DATA_WORDS];
+ u8 i;
+ u32 wow_ka_data_word0;
+
+ memcpy(sta_mac_addr, common->macaddr, ETH_ALEN);
+ memcpy(ap_mac_addr, common->curbssid, ETH_ALEN);
+
+ /* set the transmit buffer */
+ ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
+ ctl[1] = 0;
+ ctl[3] = 0xb; /* OFDM_6M hardware value for this rate */
+ ctl[4] = 0;
+ ctl[7] = (ah->txchainmask) << 2;
+ ctl[2] = 0xf << 16; /* tx_tries 0 */
+
+ for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
+ REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
+
+ REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
+
+ data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
+ (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
+ data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
+ (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
+ data_word[2] = (sta_mac_addr[1] << 24) | (sta_mac_addr[0] << 16) |
+ (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
+ data_word[3] = (sta_mac_addr[5] << 24) | (sta_mac_addr[4] << 16) |
+ (sta_mac_addr[3] << 8) | (sta_mac_addr[2]);
+ data_word[4] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
+ (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
+ data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
+
+ if (AR_SREV_9462_20(ah)) {
+ /* AR9462 2.0 has an extra descriptor word (time based
+ * discard) compared to other chips */
+ REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
+ wow_ka_data_word0 = AR_WOW_TXBUF(13);
+ } else {
+ wow_ka_data_word0 = AR_WOW_TXBUF(12);
+ }
+
+ for (i = 0; i < KAL_NUM_DATA_WORDS; i++)
+ REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]);
+
+}
+
+void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
+ u8 *user_mask, int pattern_count,
+ int pattern_len)
+{
+ int i;
+ u32 pattern_val, mask_val;
+ u32 set, clr;
+
+ /* FIXME: should check count by querying the hardware capability */
+ if (pattern_count >= MAX_NUM_PATTERN)
+ return;
+
+ REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count));
+
+ /* set the registers for pattern */
+ for (i = 0; i < MAX_PATTERN_SIZE; i += 4) {
+ memcpy(&pattern_val, user_pattern, 4);
+ REG_WRITE(ah, (AR_WOW_TB_PATTERN(pattern_count) + i),
+ pattern_val);
+ user_pattern += 4;
+ }
+
+ /* set the registers for mask */
+ for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) {
+ memcpy(&mask_val, user_mask, 4);
+ REG_WRITE(ah, (AR_WOW_TB_MASK(pattern_count) + i), mask_val);
+ user_mask += 4;
+ }
+
+ /* set the pattern length to be matched
+ *
+ * AR_WOW_LENGTH1_REG1
+ * bit 31:24 pattern 0 length
+ * bit 23:16 pattern 1 length
+ * bit 15:8 pattern 2 length
+ * bit 7:0 pattern 3 length
+ *
+ * AR_WOW_LENGTH1_REG2
+ * bit 31:24 pattern 4 length
+ * bit 23:16 pattern 5 length
+ * bit 15:8 pattern 6 length
+ * bit 7:0 pattern 7 length
+ *
+ * the below logic writes out the new
+ * pattern length for the corresponding
+ * pattern_count, while masking out the
+ * other fields
+ */
+
+ ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
+
+ if (pattern_count < 4) {
+ /* Pattern 0-3 uses AR_WOW_LENGTH1 register */
+ set = (pattern_len & AR_WOW_LENGTH_MAX) <<
+ AR_WOW_LEN1_SHIFT(pattern_count);
+ clr = AR_WOW_LENGTH1_MASK(pattern_count);
+ REG_RMW(ah, AR_WOW_LENGTH1, set, clr);
+ } else {
+ /* Pattern 4-7 uses AR_WOW_LENGTH2 register */
+ set = (pattern_len & AR_WOW_LENGTH_MAX) <<
+ AR_WOW_LEN2_SHIFT(pattern_count);
+ clr = AR_WOW_LENGTH2_MASK(pattern_count);
+ REG_RMW(ah, AR_WOW_LENGTH2, set, clr);
+ }
+
+}
+EXPORT_SYMBOL(ath9k_hw_wow_apply_pattern);
+
+u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
+{
+ u32 wow_status = 0;
+ u32 val = 0, rval;
+
+ /*
+ * read the WoW status register to know
+ * the wakeup reason
+ */
+ rval = REG_READ(ah, AR_WOW_PATTERN);
+ val = AR_WOW_STATUS(rval);
+
+ /*
+ * mask only the WoW events that we have enabled. Sometimes
+ * we have spurious WoW events from the AR_WOW_PATTERN
+ * register. This mask will clean it up.
+ */
+
+ val &= ah->wow_event_mask;
+
+ if (val) {
+ if (val & AR_WOW_MAGIC_PAT_FOUND)
+ wow_status |= AH_WOW_MAGIC_PATTERN_EN;
+ if (AR_WOW_PATTERN_FOUND(val))
+ wow_status |= AH_WOW_USER_PATTERN_EN;
+ if (val & AR_WOW_KEEP_ALIVE_FAIL)
+ wow_status |= AH_WOW_LINK_CHANGE;
+ if (val & AR_WOW_BEACON_FAIL)
+ wow_status |= AH_WOW_BEACON_MISS;
+ }
+
+ /*
+ * set and clear WOW_PME_CLEAR registers for the chip to
+ * generate next wow signal.
+ * disable D3 before accessing other registers ?
+ */
+
+ /* do we need to check the bit value 0x01000000 (7-10) ?? */
+ REG_RMW(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR,
+ AR_PMCTRL_PWR_STATE_D1D3);
+
+ /*
+ * clear all events
+ */
+ REG_WRITE(ah, AR_WOW_PATTERN,
+ AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
+
+ /*
+ * restore the beacon threshold to init value
+ */
+ REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
+
+ /*
+ * Restore the way the PCI-E reset, Power-On-Reset, external
+ * PCIE_POR_SHORT pins are tied to its original value.
+ * Previously just before WoW sleep, we untie the PCI-E
+ * reset to our Chip's Power On Reset so that any PCI-E
+ * reset from the bus will not reset our chip
+ */
+ if (ah->is_pciexpress)
+ ath9k_hw_configpcipowersave(ah, false);
+
+ ah->wow_event_mask = 0;
+
+ return wow_status;
+}
+EXPORT_SYMBOL(ath9k_hw_wow_wakeup);
+
+void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
+{
+ u32 wow_event_mask;
+ u32 set, clr;
+
+ /*
+ * wow_event_mask is a mask to the AR_WOW_PATTERN register to
+ * indicate which WoW events we have enabled. The WoW events
+ * are from the 'pattern_enable' in this function and
+ * 'pattern_count' of ath9k_hw_wow_apply_pattern()
+ */
+ wow_event_mask = ah->wow_event_mask;
+
+ /*
+ * Untie Power-on-Reset from the PCI-E-Reset. When we are in
+ * WOW sleep, we do want the Reset from the PCI-E to disturb
+ * our hw state
+ */
+ if (ah->is_pciexpress) {
+ /*
+ * we need to untie the internal POR (power-on-reset)
+ * to the external PCI-E reset. We also need to tie
+ * the PCI-E Phy reset to the PCI-E reset.
+ */
+ set = AR_WA_RESET_EN | AR_WA_POR_SHORT;
+ clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE;
+ REG_RMW(ah, AR_WA, set, clr);
+ }
+
+ /*
+ * set the power states appropriately and enable PME
+ */
+ set = AR_PMCTRL_HOST_PME_EN | AR_PMCTRL_PWR_PM_CTRL_ENA |
+ AR_PMCTRL_AUX_PWR_DET | AR_PMCTRL_WOW_PME_CLR;
+
+ /*
+ * set and clear WOW_PME_CLEAR registers for the chip
+ * to generate next wow signal.
+ */
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
+ clr = AR_PMCTRL_WOW_PME_CLR;
+ REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+
+ /*
+ * Setup for:
+ * - beacon misses
+ * - magic pattern
+ * - keep alive timeout
+ * - pattern matching
+ */
+
+ /*
+ * Program default values for pattern backoff, aifs/slot/KAL count,
+ * beacon miss timeout, KAL timeout, etc.
+ */
+ set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF);
+ REG_SET_BIT(ah, AR_WOW_PATTERN, set);
+
+ set = AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) |
+ AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) |
+ AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT);
+ REG_SET_BIT(ah, AR_WOW_COUNT, set);
+
+ if (pattern_enable & AH_WOW_BEACON_MISS)
+ set = AR_WOW_BEACON_TIMO;
+ /* We are not using beacon miss, program a large value */
+ else
+ set = AR_WOW_BEACON_TIMO_MAX;
+
+ REG_WRITE(ah, AR_WOW_BCN_TIMO, set);
+
+ /*
+ * Keep alive timo in ms except AR9280
+ */
+ if (!pattern_enable)
+ set = AR_WOW_KEEP_ALIVE_NEVER;
+ else
+ set = KAL_TIMEOUT * 32;
+
+ REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, set);
+
+ /*
+ * Keep alive delay in us. based on 'power on clock',
+ * therefore in usec
+ */
+ set = KAL_DELAY * 1000;
+ REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, set);
+
+ /*
+ * Create keep alive pattern to respond to beacons
+ */
+ ath9k_wow_create_keep_alive_pattern(ah);
+
+ /*
+ * Configure MAC WoW Registers
+ */
+ set = 0;
+ /* Send keep alive timeouts anyway */
+ clr = AR_WOW_KEEP_ALIVE_AUTO_DIS;
+
+ if (pattern_enable & AH_WOW_LINK_CHANGE)
+ wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL;
+ else
+ set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
+
+ set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
+ REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr);
+
+ /*
+ * we are relying on a bmiss failure. ensure we have
+ * enough threshold to prevent false positives
+ */
+ REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR,
+ AR_WOW_BMISSTHRESHOLD);
+
+ set = 0;
+ clr = 0;
+
+ if (pattern_enable & AH_WOW_BEACON_MISS) {
+ set = AR_WOW_BEACON_FAIL_EN;
+ wow_event_mask |= AR_WOW_BEACON_FAIL;
+ } else {
+ clr = AR_WOW_BEACON_FAIL_EN;
+ }
+
+ REG_RMW(ah, AR_WOW_BCN_EN, set, clr);
+
+ set = 0;
+ clr = 0;
+ /*
+ * Enable the magic packet registers
+ */
+ if (pattern_enable & AH_WOW_MAGIC_PATTERN_EN) {
+ set = AR_WOW_MAGIC_EN;
+ wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND;
+ } else {
+ clr = AR_WOW_MAGIC_EN;
+ }
+ set |= AR_WOW_MAC_INTR_EN;
+ REG_RMW(ah, AR_WOW_PATTERN, set, clr);
+
+ REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
+ AR_WOW_PATTERN_SUPPORTED);
+
+ /*
+ * Set the power states appropriately and enable PME
+ */
+ clr = 0;
+ set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN |
+ AR_PMCTRL_PWR_PM_CTRL_ENA;
+
+ clr = AR_PCIE_PM_CTRL_ENA;
+ REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
+
+ /*
+ * this is needed to prevent the chip waking up
+ * the host within 3-4 seconds with certain
+ * platform/BIOS. The fix is to enable
+ * D1 & D3 to match original definition and
+ * also match the OTP value. Anyway this
+ * is more related to SW WOW.
+ */
+ clr = AR_PMCTRL_PWR_STATE_D1D3;
+ REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+
+ set = AR_PMCTRL_PWR_STATE_D1D3_REAL;
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
+
+ REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
+
+ /* to bring down WOW power low margin */
+ set = BIT(13);
+ REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set);
+ /* HW WoW */
+ clr = BIT(5);
+ REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr);
+
+ ath9k_hw_set_powermode_wow_sleep(ah);
+ ah->wow_event_mask = wow_event_mask;
+}
+EXPORT_SYMBOL(ath9k_hw_wow_enable);
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index 6e1756bc3833..f76139bbb74f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -18,6 +18,10 @@
#ifndef INITVALS_9330_1P1_H
#define INITVALS_9330_1P1_H
+#define ar9331_1p1_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
+#define ar9331_modes_high_power_tx_gain_1p1 ar9331_modes_lowest_ob_db_tx_gain_1p1
+
static const u32 ar9331_1p1_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
@@ -55,7 +59,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -252,7 +256,7 @@ static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
{0x0000a2e4, 0xfffff000, 0xfffff000, 0xfffff000, 0xfffff000},
{0x0000a2e8, 0xfffe0000, 0xfffe0000, 0xfffe0000, 0xfffe0000},
- {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d0, 0x000050d0},
+ {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d4, 0x000050d4},
{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
{0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
@@ -337,8 +341,6 @@ static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
{0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
};
-#define ar9331_1p1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
-
static const u32 ar9331_1p1_xtal_25M[][2] = {
/* Addr allmodes */
{0x00007038, 0x000002f8},
@@ -373,17 +375,17 @@ static const u32 ar9331_1p1_radio_core[][2] = {
{0x000160b4, 0x92480040},
{0x000160c0, 0x006db6db},
{0x000160c4, 0x0186db60},
- {0x000160c8, 0x6db4db6c},
+ {0x000160c8, 0x6db6db6c},
{0x000160cc, 0x6de6c300},
{0x000160d0, 0x14500820},
{0x00016100, 0x04cb0001},
{0x00016104, 0xfff80015},
{0x00016108, 0x00080010},
{0x0001610c, 0x00170000},
- {0x00016140, 0x10800000},
+ {0x00016140, 0x50804000},
{0x00016144, 0x01884080},
{0x00016148, 0x000080c0},
- {0x00016280, 0x01000015},
+ {0x00016280, 0x01001015},
{0x00016284, 0x14d20000},
{0x00016288, 0x00318000},
{0x0001628c, 0x50000000},
@@ -622,12 +624,12 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
{0x0000a370, 0x00000000},
{0x0000a390, 0x00000001},
{0x0000a394, 0x00000444},
- {0x0000a398, 0x001f0e0f},
- {0x0000a39c, 0x0075393f},
- {0x0000a3a0, 0xb79f6427},
- {0x0000a3a4, 0x00000000},
- {0x0000a3a8, 0xaaaaaaaa},
- {0x0000a3ac, 0x3c466478},
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x210d0401},
+ {0x0000a3a0, 0xab9a7144},
+ {0x0000a3a4, 0x00000011},
+ {0x0000a3a8, 0x3c3c003d},
+ {0x0000a3ac, 0x30310030},
{0x0000a3c0, 0x20202020},
{0x0000a3c4, 0x22222220},
{0x0000a3c8, 0x20200020},
@@ -686,100 +688,18 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
{0x0000a7dc, 0x00000001},
};
-static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
+static const u32 ar9331_1p1_mac_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
- {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
- {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
- {0x0000a2e4, 0xfffff000, 0xfffff000, 0xfffff000, 0xfffff000},
- {0x0000a2e8, 0xfffe0000, 0xfffe0000, 0xfffe0000, 0xfffe0000},
- {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d0, 0x000050d0},
- {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
- {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
- {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
- {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
- {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
- {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
- {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
- {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
- {0x0000a52c, 0x41023e85, 0x41023e85, 0x2d000a20, 0x2d000a20},
- {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000a22, 0x31000a22},
- {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000a24, 0x35000a24},
- {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000a43, 0x38000a43},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3b000e42, 0x3b000e42},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x3f000e44, 0x3f000e44},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x42000e64, 0x42000e64},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46000e66, 0x46000e66},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x4a000ea6, 0x4a000ea6},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4a000ea6, 0x4a000ea6},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4a000ea6, 0x4a000ea6},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x4a000ea6, 0x4a000ea6},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x4a000ea6, 0x4a000ea6},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x4a000ea6, 0x4a000ea6},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x4a000ea6, 0x4a000ea6},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
- {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
- {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
- {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
- {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
- {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
- {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
- {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
- {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
- {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
- {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
- {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
- {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
- {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
- {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
- {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
- {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
- {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
- {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
- {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
- {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
- {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
- {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
- {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
- {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
- {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
- {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
- {0x0000a620, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802},
- {0x0000a624, 0x03010a03, 0x03010a03, 0x03010a03, 0x03010a03},
- {0x0000a628, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
- {0x0000a62c, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
- {0x0000a630, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
- {0x0000a634, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
- {0x0000a638, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
- {0x0000a63c, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
- {0x00016044, 0x034922db, 0x034922db, 0x034922db, 0x034922db},
- {0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
};
-#define ar9331_1p1_mac_postamble ar9300_2p2_mac_postamble
-
static const u32 ar9331_1p1_soc_preamble[][2] = {
/* Addr allmodes */
{0x00007020, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
index 57ed8a112173..0ac8be96097f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
@@ -18,6 +18,28 @@
#ifndef INITVALS_9330_1P2_H
#define INITVALS_9330_1P2_H
+#define ar9331_modes_high_power_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
+
+#define ar9331_modes_low_ob_db_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
+
+#define ar9331_modes_lowest_ob_db_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
+
+#define ar9331_1p2_baseband_core_txfir_coeff_japan_2484 ar9331_1p1_baseband_core_txfir_coeff_japan_2484
+
+#define ar9331_1p2_xtal_25M ar9331_1p1_xtal_25M
+
+#define ar9331_1p2_xtal_40M ar9331_1p1_xtal_40M
+
+#define ar9331_1p2_soc_postamble ar9331_1p1_soc_postamble
+
+#define ar9331_1p2_mac_postamble ar9331_1p1_mac_postamble
+
+#define ar9331_1p2_soc_preamble ar9331_1p1_soc_preamble
+
+#define ar9331_1p2_mac_core ar9331_1p1_mac_core
+
+#define ar9331_common_wo_xlna_rx_gain_1p2 ar9331_common_wo_xlna_rx_gain_1p1
+
static const u32 ar9331_modes_high_ob_db_tx_gain_1p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
@@ -103,57 +125,6 @@ static const u32 ar9331_modes_high_ob_db_tx_gain_1p2[][5] = {
{0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
};
-#define ar9331_modes_high_power_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
-
-#define ar9331_modes_low_ob_db_tx_gain_1p2 ar9331_modes_high_power_tx_gain_1p2
-
-#define ar9331_modes_lowest_ob_db_tx_gain_1p2 ar9331_modes_low_ob_db_tx_gain_1p2
-
-static const u32 ar9331_1p2_baseband_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
- {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
- {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
- {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
- {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
- {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
- {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
- {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
- {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
- {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
- {0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
- {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
- {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
- {0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
- {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
- {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
- {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
- {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
- {0x0000a204, 0x00003fc0, 0x00003fc4, 0x00003fc4, 0x00003fc0},
- {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
- {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
- {0x0000a234, 0x00000fff, 0x00000fff, 0x10000fff, 0x00000fff},
- {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
- {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
- {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
- {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
- {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
- {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
- {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
- {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
- {0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
- {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
- {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
- {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
- {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-};
-
static const u32 ar9331_1p2_radio_core[][2] = {
/* Addr allmodes */
{0x00016000, 0x36db6db6},
@@ -219,24 +190,318 @@ static const u32 ar9331_1p2_radio_core[][2] = {
{0x000163d4, 0x00000000},
};
-#define ar9331_1p2_baseband_core_txfir_coeff_japan_2484 ar9331_1p1_baseband_core_txfir_coeff_japan_2484
-
-#define ar9331_1p2_xtal_25M ar9331_1p1_xtal_25M
-
-#define ar9331_1p2_xtal_40M ar9331_1p1_xtal_40M
-
-#define ar9331_1p2_baseband_core ar9331_1p1_baseband_core
-
-#define ar9331_1p2_soc_postamble ar9331_1p1_soc_postamble
-
-#define ar9331_1p2_mac_postamble ar9331_1p1_mac_postamble
-
-#define ar9331_1p2_soc_preamble ar9331_1p1_soc_preamble
-
-#define ar9331_1p2_mac_core ar9331_1p1_mac_core
+static const u32 ar9331_1p2_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a8f6b},
+ {0x0000980c, 0x04800000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x5f3ca3de},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14750600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0x00000000},
+ {0x00009c08, 0x03200000},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x1883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c00400},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038233c},
+ {0x00009e24, 0x9927b515},
+ {0x00009e28, 0x12ef0200},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2dc, 0x00000000},
+ {0x0000a2e0, 0x00000000},
+ {0x0000a2e4, 0x00000000},
+ {0x0000a2e8, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000006},
+ {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x04000000},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a640, 0x00000000},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000001},
+};
-#define ar9331_common_wo_xlna_rx_gain_1p2 ar9331_common_wo_xlna_rx_gain_1p1
+static const u32 ar9331_1p2_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
+ {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+ {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
+ {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
+ {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
+ {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
+ {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x00003fc0, 0x00003fc4, 0x00003fc4, 0x00003fc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x00000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
-#define ar9331_common_rx_gain_1p2 ar9485_common_rx_gain_1_1
+static const u32 ar9331_common_rx_gain_1p2[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x01800082},
+ {0x0000a014, 0x01820181},
+ {0x0000a018, 0x01840183},
+ {0x0000a01c, 0x01880185},
+ {0x0000a020, 0x018a0189},
+ {0x0000a024, 0x02850284},
+ {0x0000a028, 0x02890288},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x21212128},
+ {0x0000a098, 0x171c1c1c},
+ {0x0000a09c, 0x02020212},
+ {0x0000a0a0, 0x00000202},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x111f1100},
+ {0x0000a0c8, 0x111d111e},
+ {0x0000a0cc, 0x111b111c},
+ {0x0000a0d0, 0x22032204},
+ {0x0000a0d4, 0x22012202},
+ {0x0000a0d8, 0x221f2200},
+ {0x0000a0dc, 0x221d221e},
+ {0x0000a0e0, 0x33013302},
+ {0x0000a0e4, 0x331f3300},
+ {0x0000a0e8, 0x4402331e},
+ {0x0000a0ec, 0x44004401},
+ {0x0000a0f0, 0x441e441f},
+ {0x0000a0f4, 0x55015502},
+ {0x0000a0f8, 0x551f5500},
+ {0x0000a0fc, 0x6602551e},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000296},
+};
#endif /* INITVALS_9330_1P2_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index 25db9215985a..a01f0edb6518 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -18,6 +18,20 @@
#ifndef INITVALS_9340_H
#define INITVALS_9340_H
+#define ar9340_1p0_mac_postamble ar9300_2p2_mac_postamble
+
+#define ar9340_1p0_soc_postamble ar9300_2p2_soc_postamble
+
+#define ar9340Modes_fast_clock_1p0 ar9300Modes_fast_clock_2p2
+
+#define ar9340Common_rx_gain_table_1p0 ar9300Common_rx_gain_table_2p2
+
+#define ar9340Common_wo_xlna_rx_gain_table_1p0 ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define ar9340_1p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
+#define ar9340_1p0_baseband_postamble_dfs_channel ar9300_2p2_baseband_postamble_dfs_channel
+
static const u32 ar9340_1p0_radio_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000160ac, 0xa4646800, 0xa4646800, 0xa4646800, 0xa4646800},
@@ -100,8 +114,6 @@ static const u32 ar9340Modes_lowest_ob_db_tx_gain_table_1p0[][5] = {
{0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
};
-#define ar9340Modes_fast_clock_1p0 ar9300Modes_fast_clock_2p2
-
static const u32 ar9340_1p0_radio_core[][2] = {
/* Addr allmodes */
{0x00016000, 0x36db6db6},
@@ -215,16 +227,12 @@ static const u32 ar9340_1p0_radio_core_40M[][2] = {
{0x0000824c, 0x0001e800},
};
-#define ar9340_1p0_mac_postamble ar9300_2p2_mac_postamble
-
-#define ar9340_1p0_soc_postamble ar9300_2p2_soc_postamble
-
static const u32 ar9340_1p0_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
{0x00009820, 0x206a022e, 0x206a022e, 0x206a022e, 0x206a022e},
{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
- {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x00009828, 0x06903081, 0x06903081, 0x09103881, 0x09103881},
{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
{0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
@@ -340,9 +348,9 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x0000a370, 0x00000000},
{0x0000a390, 0x00000001},
{0x0000a394, 0x00000444},
- {0x0000a398, 0x001f0e0f},
- {0x0000a39c, 0x0075393f},
- {0x0000a3a0, 0xb79f6427},
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x210d0401},
+ {0x0000a3a0, 0xab9a7144},
{0x0000a3a4, 0x00000000},
{0x0000a3a8, 0xaaaaaaaa},
{0x0000a3ac, 0x3c466478},
@@ -714,266 +722,6 @@ static const u32 ar9340Modes_ub124_tx_gain_table_1p0[][5] = {
{0x0000b2e8, 0xfffe0000, 0xfffe0000, 0xfffc0000, 0xfffc0000},
};
-static const u32 ar9340Common_rx_gain_table_1p0[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x01910190},
- {0x0000a030, 0x01930192},
- {0x0000a034, 0x01950194},
- {0x0000a038, 0x038a0196},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x22222229},
- {0x0000a084, 0x1d1d1d1d},
- {0x0000a088, 0x1d1d1d1d},
- {0x0000a08c, 0x1d1d1d1d},
- {0x0000a090, 0x171d1d1d},
- {0x0000a094, 0x11111717},
- {0x0000a098, 0x00030311},
- {0x0000a09c, 0x00000000},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x23232323},
- {0x0000b084, 0x21232323},
- {0x0000b088, 0x19191c1e},
- {0x0000b08c, 0x12141417},
- {0x0000b090, 0x07070e0e},
- {0x0000b094, 0x03030305},
- {0x0000b098, 0x00000003},
- {0x0000b09c, 0x00000000},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
@@ -1437,8 +1185,6 @@ static const u32 ar9340_1p0_mac_core[][2] = {
{0x000083d0, 0x000101ff},
};
-#define ar9340Common_wo_xlna_rx_gain_table_1p0 ar9300Common_wo_xlna_rx_gain_table_2p2
-
static const u32 ar9340_1p0_soc_preamble[][2] = {
/* Addr allmodes */
{0x00007008, 0x00000000},
@@ -1447,4 +1193,106 @@ static const u32 ar9340_1p0_soc_preamble[][2] = {
{0x00007038, 0x000004c2},
};
+static const u32 ar9340_cus227_tx_gain_table_1p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x11000400, 0x11000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2c022220, 0x2c022220, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x30022222, 0x30022222, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x35022225, 0x35022225, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x3b02222a, 0x3b02222a, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x3f02222c, 0x3f02222c, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x34001640, 0x34001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x11800400, 0x11800400},
+ {0x0000a598, 0x21820220, 0x21820220, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x7086308c, 0x7086308c, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x738a308a, 0x738a308a, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
+ {0x00016048, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
+ {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
+ {0x00016288, 0x30318000, 0x30318000, 0x00318000, 0x00318000},
+ {0x00016444, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
+ {0x00016448, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
+ {0x0000a3a4, 0x00000011, 0x00000011, 0x00000011, 0x00000011},
+ {0x0000a3a8, 0x3c3c3c3c, 0x3c3c3c3c, 0x3c3c3c3c, 0x3c3c3c3c},
+ {0x0000a3ac, 0x30303030, 0x30303030, 0x30303030, 0x30303030},
+};
+
#endif /* INITVALS_9340_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 092b9d412e7f..1cc13569b17b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -20,7 +20,15 @@
/* AR9462 2.0 */
-static const u32 ar9462_modes_fast_clock_2p0[][3] = {
+#define ar9462_2p0_mac_postamble ar9331_1p1_mac_postamble
+
+#define ar9462_2p0_common_wo_xlna_rx_gain ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define ar9462_2p0_common_5g_xlna_only_rxgain ar9462_2p0_common_mixed_rx_gain
+
+#define ar9462_2p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
+static const u32 ar9462_2p0_modes_fast_clock[][3] = {
/* Addr 5G_HT20 5G_HT40 */
{0x00001030, 0x00000268, 0x000004d0},
{0x00001070, 0x0000018c, 0x00000318},
@@ -33,13 +41,6 @@ static const u32 ar9462_modes_fast_clock_2p0[][3] = {
{0x0000a254, 0x00000898, 0x00001130},
};
-static const u32 ar9462_pciephy_clkreq_enable_L1_2p0[][2] = {
- /* Addr allmodes */
- {0x00018c00, 0x18253ede},
- {0x00018c04, 0x000801d8},
- {0x00018c08, 0x0003780c},
-};
-
static const u32 ar9462_2p0_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
@@ -99,7 +100,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
};
-static const u32 ar9462_common_rx_gain_table_2p0[][2] = {
+static const u32 ar9462_2p0_common_rx_gain[][2] = {
/* Addr allmodes */
{0x0000a000, 0x00010000},
{0x0000a004, 0x00030002},
@@ -359,20 +360,13 @@ static const u32 ar9462_common_rx_gain_table_2p0[][2] = {
{0x0000b1fc, 0x00000196},
};
-static const u32 ar9462_pciephy_clkreq_disable_L1_2p0[][2] = {
+static const u32 ar9462_2p0_pciephy_clkreq_disable_L1[][2] = {
/* Addr allmodes */
{0x00018c00, 0x18213ede},
{0x00018c04, 0x000801d8},
{0x00018c08, 0x0003780c},
};
-static const u32 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0[][2] = {
- /* Addr allmodes */
- {0x00018c00, 0x18212ede},
- {0x00018c04, 0x000801d8},
- {0x00018c08, 0x0003780c},
-};
-
static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
@@ -380,274 +374,7 @@ static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = {
{0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
};
-static const u32 ar9462_common_wo_xlna_rx_gain_table_2p0[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x03820190},
- {0x0000a030, 0x03840383},
- {0x0000a034, 0x03880385},
- {0x0000a038, 0x038a0389},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x29292929},
- {0x0000a084, 0x29292929},
- {0x0000a088, 0x29292929},
- {0x0000a08c, 0x29292929},
- {0x0000a090, 0x22292929},
- {0x0000a094, 0x1d1d2222},
- {0x0000a098, 0x0c111117},
- {0x0000a09c, 0x00030303},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x32323232},
- {0x0000b084, 0x2f2f3232},
- {0x0000b088, 0x23282a2d},
- {0x0000b08c, 0x1c1e2123},
- {0x0000b090, 0x14171919},
- {0x0000b094, 0x0e0e1214},
- {0x0000b098, 0x03050707},
- {0x0000b09c, 0x00030303},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
-static const u32 ar9462_2p0_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
-
-static const u32 ar9462_modes_low_ob_db_tx_gain_table_2p0[][5] = {
+static const u32 ar9462_2p0_modes_low_ob_db_tx_gain[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
@@ -879,7 +606,7 @@ static const u32 ar9462_2p0_radio_postamble[][5] = {
{0x0001650c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
};
-static const u32 ar9462_modes_mix_ob_db_tx_gain_table_2p0[][5] = {
+static const u32 ar9462_2p0_modes_mix_ob_db_tx_gain[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
{0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
@@ -942,7 +669,7 @@ static const u32 ar9462_modes_mix_ob_db_tx_gain_table_2p0[][5] = {
{0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
};
-static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
+static const u32 ar9462_2p0_modes_high_ob_db_tx_gain[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
{0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
@@ -1240,19 +967,7 @@ static const u32 ar9462_2p0_mac_core[][2] = {
{0x000083d0, 0x000301ff},
};
-static const u32 ar9462_2p0_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
-
-static const u32 ar9462_common_mixed_rx_gain_table_2p0[][2] = {
+static const u32 ar9462_2p0_common_mixed_rx_gain[][2] = {
/* Addr allmodes */
{0x0000a000, 0x00010000},
{0x0000a004, 0x00030002},
@@ -1517,266 +1232,6 @@ static const u32 ar9462_2p0_baseband_postamble_5g_xlna[][5] = {
{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
};
-static const u32 ar9462_2p0_5g_xlna_only_rxgain[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x03820190},
- {0x0000a030, 0x03840383},
- {0x0000a034, 0x03880385},
- {0x0000a038, 0x038a0389},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x29292929},
- {0x0000a084, 0x29292929},
- {0x0000a088, 0x29292929},
- {0x0000a08c, 0x29292929},
- {0x0000a090, 0x22292929},
- {0x0000a094, 0x1d1d2222},
- {0x0000a098, 0x0c111117},
- {0x0000a09c, 0x00030303},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x2a2d2f32},
- {0x0000b084, 0x21232328},
- {0x0000b088, 0x19191c1e},
- {0x0000b08c, 0x12141417},
- {0x0000b090, 0x07070e0e},
- {0x0000b094, 0x03030305},
- {0x0000b098, 0x00000003},
- {0x0000b09c, 0x00000000},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
static const u32 ar9462_2p0_baseband_core_mix_rxgain[][2] = {
/* Addr allmodes */
{0x00009fd0, 0x0a2d6b93},
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
index 57fc5f459d0a..dc3adda46e8b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
@@ -20,6 +20,44 @@
/* AR9462 2.1 */
+#define ar9462_2p1_mac_postamble ar9462_2p0_mac_postamble
+
+#define ar9462_2p1_baseband_core ar9462_2p0_baseband_core
+
+#define ar9462_2p1_radio_core ar9462_2p0_radio_core
+
+#define ar9462_2p1_radio_postamble ar9462_2p0_radio_postamble
+
+#define ar9462_2p1_soc_postamble ar9462_2p0_soc_postamble
+
+#define ar9462_2p1_radio_postamble_sys2ant ar9462_2p0_radio_postamble_sys2ant
+
+#define ar9462_2p1_common_rx_gain ar9462_2p0_common_rx_gain
+
+#define ar9462_2p1_common_mixed_rx_gain ar9462_2p0_common_mixed_rx_gain
+
+#define ar9462_2p1_common_5g_xlna_only_rxgain ar9462_2p0_common_5g_xlna_only_rxgain
+
+#define ar9462_2p1_baseband_core_mix_rxgain ar9462_2p0_baseband_core_mix_rxgain
+
+#define ar9462_2p1_baseband_postamble_mix_rxgain ar9462_2p0_baseband_postamble_mix_rxgain
+
+#define ar9462_2p1_baseband_postamble_5g_xlna ar9462_2p0_baseband_postamble_5g_xlna
+
+#define ar9462_2p1_common_wo_xlna_rx_gain ar9462_2p0_common_wo_xlna_rx_gain
+
+#define ar9462_2p1_modes_low_ob_db_tx_gain ar9462_2p0_modes_low_ob_db_tx_gain
+
+#define ar9462_2p1_modes_high_ob_db_tx_gain ar9462_2p0_modes_high_ob_db_tx_gain
+
+#define ar9462_2p1_modes_mix_ob_db_tx_gain ar9462_2p0_modes_mix_ob_db_tx_gain
+
+#define ar9462_2p1_modes_fast_clock ar9462_2p0_modes_fast_clock
+
+#define ar9462_2p1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
+
+#define ar9462_2p1_pciephy_clkreq_disable_L1 ar9462_2p0_pciephy_clkreq_disable_L1
+
static const u32 ar9462_2p1_mac_core[][2] = {
/* Addr allmodes */
{0x00000008, 0x00000000},
@@ -183,168 +221,6 @@ static const u32 ar9462_2p1_mac_core[][2] = {
{0x000083d0, 0x000301ff},
};
-static const u32 ar9462_2p1_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
-
-static const u32 ar9462_2p1_baseband_core[][2] = {
- /* Addr allmodes */
- {0x00009800, 0xafe68e30},
- {0x00009804, 0xfd14e000},
- {0x00009808, 0x9c0a9f6b},
- {0x0000980c, 0x04900000},
- {0x00009814, 0x9280c00a},
- {0x00009818, 0x00000000},
- {0x0000981c, 0x00020028},
- {0x00009834, 0x6400a290},
- {0x00009838, 0x0108ecff},
- {0x0000983c, 0x0d000600},
- {0x00009880, 0x201fff00},
- {0x00009884, 0x00001042},
- {0x000098a4, 0x00200400},
- {0x000098b0, 0x32440bbe},
- {0x000098d0, 0x004b6a8e},
- {0x000098d4, 0x00000820},
- {0x000098dc, 0x00000000},
- {0x000098e4, 0x01ffffff},
- {0x000098e8, 0x01ffffff},
- {0x000098ec, 0x01ffffff},
- {0x000098f0, 0x00000000},
- {0x000098f4, 0x00000000},
- {0x00009bf0, 0x80000000},
- {0x00009c04, 0xff55ff55},
- {0x00009c08, 0x0320ff55},
- {0x00009c0c, 0x00000000},
- {0x00009c10, 0x00000000},
- {0x00009c14, 0x00046384},
- {0x00009c18, 0x05b6b440},
- {0x00009c1c, 0x00b6b440},
- {0x00009d00, 0xc080a333},
- {0x00009d04, 0x40206c10},
- {0x00009d08, 0x009c4060},
- {0x00009d0c, 0x9883800a},
- {0x00009d10, 0x01834061},
- {0x00009d14, 0x00c0040b},
- {0x00009d18, 0x00000000},
- {0x00009e08, 0x0038230c},
- {0x00009e24, 0x990bb515},
- {0x00009e28, 0x0c6f0000},
- {0x00009e30, 0x06336f77},
- {0x00009e34, 0x6af6532f},
- {0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x15262820},
- {0x00009e4c, 0x00001004},
- {0x00009e50, 0x00ff03f1},
- {0x00009e54, 0xe4c555c2},
- {0x00009e58, 0xfd857722},
- {0x00009e5c, 0xe9198724},
- {0x00009fc0, 0x803e4788},
- {0x00009fc4, 0x0001efb5},
- {0x00009fcc, 0x40000014},
- {0x00009fd0, 0x0a193b93},
- {0x0000a20c, 0x00000000},
- {0x0000a220, 0x00000000},
- {0x0000a224, 0x00000000},
- {0x0000a228, 0x10002310},
- {0x0000a23c, 0x00000000},
- {0x0000a244, 0x0c000000},
- {0x0000a2a0, 0x00000001},
- {0x0000a2c0, 0x00000001},
- {0x0000a2c8, 0x00000000},
- {0x0000a2cc, 0x18c43433},
- {0x0000a2d4, 0x00000000},
- {0x0000a2ec, 0x00000000},
- {0x0000a2f0, 0x00000000},
- {0x0000a2f4, 0x00000000},
- {0x0000a2f8, 0x00000000},
- {0x0000a344, 0x00000000},
- {0x0000a34c, 0x00000000},
- {0x0000a350, 0x0000a000},
- {0x0000a364, 0x00000000},
- {0x0000a370, 0x00000000},
- {0x0000a390, 0x00000001},
- {0x0000a394, 0x00000444},
- {0x0000a398, 0x001f0e0f},
- {0x0000a39c, 0x0075393f},
- {0x0000a3a0, 0xb79f6427},
- {0x0000a3c0, 0x20202020},
- {0x0000a3c4, 0x22222220},
- {0x0000a3c8, 0x20200020},
- {0x0000a3cc, 0x20202020},
- {0x0000a3d0, 0x20202020},
- {0x0000a3d4, 0x20202020},
- {0x0000a3d8, 0x20202020},
- {0x0000a3dc, 0x20202020},
- {0x0000a3e0, 0x20202020},
- {0x0000a3e4, 0x20202020},
- {0x0000a3e8, 0x20202020},
- {0x0000a3ec, 0x20202020},
- {0x0000a3f0, 0x00000000},
- {0x0000a3f4, 0x00000006},
- {0x0000a3f8, 0x0c9bd380},
- {0x0000a3fc, 0x000f0f01},
- {0x0000a400, 0x8fa91f01},
- {0x0000a404, 0x00000000},
- {0x0000a408, 0x0e79e5c6},
- {0x0000a40c, 0x00820820},
- {0x0000a414, 0x1ce739ce},
- {0x0000a418, 0x2d001dce},
- {0x0000a434, 0x00000000},
- {0x0000a438, 0x00001801},
- {0x0000a43c, 0x00100000},
- {0x0000a444, 0x00000000},
- {0x0000a448, 0x05000080},
- {0x0000a44c, 0x00000001},
- {0x0000a450, 0x00010000},
- {0x0000a454, 0x07000000},
- {0x0000a644, 0xbfad9d74},
- {0x0000a648, 0x0048060a},
- {0x0000a64c, 0x00002037},
- {0x0000a670, 0x03020100},
- {0x0000a674, 0x09080504},
- {0x0000a678, 0x0d0c0b0a},
- {0x0000a67c, 0x13121110},
- {0x0000a680, 0x31301514},
- {0x0000a684, 0x35343332},
- {0x0000a688, 0x00000036},
- {0x0000a690, 0x00000838},
- {0x0000a6b0, 0x0000000a},
- {0x0000a6b4, 0x00512c01},
- {0x0000a7c0, 0x00000000},
- {0x0000a7c4, 0xfffffffc},
- {0x0000a7c8, 0x00000000},
- {0x0000a7cc, 0x00000000},
- {0x0000a7d0, 0x00000000},
- {0x0000a7d4, 0x00000004},
- {0x0000a7dc, 0x00000000},
- {0x0000a7f0, 0x80000000},
- {0x0000a8d0, 0x004b6a8e},
- {0x0000a8d4, 0x00000820},
- {0x0000a8dc, 0x00000000},
- {0x0000a8f0, 0x00000000},
- {0x0000a8f4, 0x00000000},
- {0x0000abf0, 0x80000000},
- {0x0000b2d0, 0x00000080},
- {0x0000b2d4, 0x00000000},
- {0x0000b2ec, 0x00000000},
- {0x0000b2f0, 0x00000000},
- {0x0000b2f4, 0x00000000},
- {0x0000b2f8, 0x00000000},
- {0x0000b408, 0x0e79e5c0},
- {0x0000b40c, 0x00820820},
- {0x0000b420, 0x00000000},
- {0x0000b6b0, 0x0000000a},
- {0x0000b6b4, 0x00000001},
-};
-
static const u32 ar9462_2p1_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
@@ -404,72 +280,6 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = {
{0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
};
-static const u32 ar9462_2p1_radio_core[][2] = {
- /* Addr allmodes */
- {0x00016000, 0x36db6db6},
- {0x00016004, 0x6db6db40},
- {0x00016008, 0x73f00000},
- {0x0001600c, 0x00000000},
- {0x00016010, 0x6d820001},
- {0x00016040, 0x7f80fff8},
- {0x0001604c, 0x2699e04f},
- {0x00016050, 0x6db6db6c},
- {0x00016058, 0x6c200000},
- {0x00016080, 0x000c0000},
- {0x00016084, 0x9a68048c},
- {0x00016088, 0x54214514},
- {0x0001608c, 0x1203040b},
- {0x00016090, 0x24926490},
- {0x00016098, 0xd2888888},
- {0x000160a0, 0x0a108ffe},
- {0x000160a4, 0x812fc491},
- {0x000160a8, 0x423c8000},
- {0x000160b4, 0x92000000},
- {0x000160b8, 0x0285dddc},
- {0x000160bc, 0x02908888},
- {0x000160c0, 0x00adb6d0},
- {0x000160c4, 0x6db6db60},
- {0x000160c8, 0x6db6db6c},
- {0x000160cc, 0x0de6c1b0},
- {0x00016100, 0x3fffbe04},
- {0x00016104, 0xfff80000},
- {0x00016108, 0x00200400},
- {0x00016110, 0x00000000},
- {0x00016144, 0x02084080},
- {0x00016148, 0x000080c0},
- {0x00016280, 0x050a0001},
- {0x00016284, 0x3d841418},
- {0x00016288, 0x00000000},
- {0x0001628c, 0xe3000000},
- {0x00016290, 0xa1005080},
- {0x00016294, 0x00000020},
- {0x00016298, 0x54a82900},
- {0x00016340, 0x121e4276},
- {0x00016344, 0x00300000},
- {0x00016400, 0x36db6db6},
- {0x00016404, 0x6db6db40},
- {0x00016408, 0x73f00000},
- {0x0001640c, 0x00000000},
- {0x00016410, 0x6c800001},
- {0x00016440, 0x7f80fff8},
- {0x0001644c, 0x4699e04f},
- {0x00016450, 0x6db6db6c},
- {0x00016500, 0x3fffbe04},
- {0x00016504, 0xfff80000},
- {0x00016508, 0x00200400},
- {0x00016510, 0x00000000},
- {0x00016544, 0x02084080},
- {0x00016548, 0x000080c0},
-};
-
-static const u32 ar9462_2p1_radio_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
- {0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
- {0x0001610c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
- {0x0001650c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
-};
-
static const u32 ar9462_2p1_soc_preamble[][2] = {
/* Addr allmodes */
{0x000040a4, 0x00a0c9c9},
@@ -478,1297 +288,4 @@ static const u32 ar9462_2p1_soc_preamble[][2] = {
{0x00007038, 0x000004c2},
};
-static const u32 ar9462_2p1_soc_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00007010, 0x00000033, 0x00000033, 0x00000033, 0x00000033},
-};
-
-static const u32 ar9462_2p1_radio_postamble_sys2ant[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
- {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
- {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
-};
-
-static const u32 ar9462_2p1_common_rx_gain[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x01910190},
- {0x0000a030, 0x01930192},
- {0x0000a034, 0x01950194},
- {0x0000a038, 0x038a0196},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x22222229},
- {0x0000a084, 0x1d1d1d1d},
- {0x0000a088, 0x1d1d1d1d},
- {0x0000a08c, 0x1d1d1d1d},
- {0x0000a090, 0x171d1d1d},
- {0x0000a094, 0x11111717},
- {0x0000a098, 0x00030311},
- {0x0000a09c, 0x00000000},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x2a2d2f32},
- {0x0000b084, 0x21232328},
- {0x0000b088, 0x19191c1e},
- {0x0000b08c, 0x12141417},
- {0x0000b090, 0x07070e0e},
- {0x0000b094, 0x03030305},
- {0x0000b098, 0x00000003},
- {0x0000b09c, 0x00000000},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
-static const u32 ar9462_2p1_common_mixed_rx_gain[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x03820190},
- {0x0000a030, 0x03840383},
- {0x0000a034, 0x03880385},
- {0x0000a038, 0x038a0389},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x29292929},
- {0x0000a084, 0x29292929},
- {0x0000a088, 0x29292929},
- {0x0000a08c, 0x29292929},
- {0x0000a090, 0x22292929},
- {0x0000a094, 0x1d1d2222},
- {0x0000a098, 0x0c111117},
- {0x0000a09c, 0x00030303},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x2a2d2f32},
- {0x0000b084, 0x21232328},
- {0x0000b088, 0x19191c1e},
- {0x0000b08c, 0x12141417},
- {0x0000b090, 0x07070e0e},
- {0x0000b094, 0x03030305},
- {0x0000b098, 0x00000003},
- {0x0000b09c, 0x00000000},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
-static const u32 ar9462_2p1_baseband_core_mix_rxgain[][2] = {
- /* Addr allmodes */
- {0x00009fd0, 0x0a2d6b93},
-};
-
-static const u32 ar9462_2p1_baseband_postamble_mix_rxgain[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009820, 0x206a022e, 0x206a022e, 0x206a01ae, 0x206a01ae},
- {0x00009824, 0x63c640de, 0x5ac640d0, 0x63c640da, 0x63c640da},
- {0x00009828, 0x0796be89, 0x0696b081, 0x0916be81, 0x0916be81},
- {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000d8, 0x6c4000d8},
- {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec86d2e, 0x7ec86d2e},
- {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32395c5e},
-};
-
-static const u32 ar9462_2p1_baseband_postamble_5g_xlna[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
-};
-
-static const u32 ar9462_2p1_common_wo_xlna_rx_gain[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x03820190},
- {0x0000a030, 0x03840383},
- {0x0000a034, 0x03880385},
- {0x0000a038, 0x038a0389},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x29292929},
- {0x0000a084, 0x29292929},
- {0x0000a088, 0x29292929},
- {0x0000a08c, 0x29292929},
- {0x0000a090, 0x22292929},
- {0x0000a094, 0x1d1d2222},
- {0x0000a098, 0x0c111117},
- {0x0000a09c, 0x00030303},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x32323232},
- {0x0000b084, 0x2f2f3232},
- {0x0000b088, 0x23282a2d},
- {0x0000b08c, 0x1c1e2123},
- {0x0000b090, 0x14171919},
- {0x0000b094, 0x0e0e1214},
- {0x0000b098, 0x03050707},
- {0x0000b09c, 0x00030303},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
-static const u32 ar9462_2p1_common_5g_xlna_only_rx_gain[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x03820190},
- {0x0000a030, 0x03840383},
- {0x0000a034, 0x03880385},
- {0x0000a038, 0x038a0389},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x29292929},
- {0x0000a084, 0x29292929},
- {0x0000a088, 0x29292929},
- {0x0000a08c, 0x29292929},
- {0x0000a090, 0x22292929},
- {0x0000a094, 0x1d1d2222},
- {0x0000a098, 0x0c111117},
- {0x0000a09c, 0x00030303},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x2a2d2f32},
- {0x0000b084, 0x21232328},
- {0x0000b088, 0x19191c1e},
- {0x0000b08c, 0x12141417},
- {0x0000b090, 0x07070e0e},
- {0x0000b094, 0x03030305},
- {0x0000b098, 0x00000003},
- {0x0000b09c, 0x00000000},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
-static const u32 ar9462_2p1_modes_low_ob_db_tx_gain[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
- {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
- {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
- {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
- {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
- {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
- {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
- {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
- {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
- {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
- {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
- {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
- {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
- {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
- {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
- {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
- {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
- {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
- {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
- {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
- {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
- {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
- {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
- {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
- {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
- {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
- {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
- {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
- {0x00016048, 0x64992060, 0x64992060, 0x64992060, 0x64992060},
- {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
- {0x00016444, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
- {0x00016448, 0x64992000, 0x64992000, 0x64992000, 0x64992000},
- {0x00016454, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
-};
-
-static const u32 ar9462_2p1_modes_high_ob_db_tx_gain[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
- {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
- {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
- {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x000050da, 0x000050da, 0x000050de, 0x000050de},
- {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
- {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
- {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
- {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
- {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
- {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
- {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
- {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
- {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
- {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
- {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
- {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
- {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
- {0x0000a548, 0x55025eb3, 0x55025eb3, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x58025ef3, 0x58025ef3, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001a84, 0x44001a84},
- {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x751ffff6, 0x751ffff6, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x751ffff6, 0x751ffff6, 0x58001ef0, 0x58001ef0},
- {0x0000a56c, 0x751ffff6, 0x751ffff6, 0x5a001ef4, 0x5a001ef4},
- {0x0000a570, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
- {0x0000a574, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
- {0x0000a578, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
- {0x0000a57c, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
- {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
- {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
- {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
- {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
- {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
- {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x00016044, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
- {0x00016048, 0x8db49060, 0x8db49060, 0x8db49060, 0x8db49060},
- {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
- {0x00016444, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
- {0x00016448, 0x8db49000, 0x8db49000, 0x8db49000, 0x8db49000},
- {0x00016454, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
-};
-
-static const u32 ar9462_2p1_modes_mix_ob_db_tx_gain[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
- {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
- {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
- {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x0000d0da, 0x0000d0da, 0x0000d0de, 0x0000d0de},
- {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x18022622, 0x18022622, 0x12000400, 0x12000400},
- {0x0000a518, 0x1b022822, 0x1b022822, 0x16000402, 0x16000402},
- {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
- {0x0000a520, 0x22022c41, 0x22022c41, 0x1c000603, 0x1c000603},
- {0x0000a524, 0x28023042, 0x28023042, 0x21000a02, 0x21000a02},
- {0x0000a528, 0x2c023044, 0x2c023044, 0x25000a04, 0x25000a04},
- {0x0000a52c, 0x2f023644, 0x2f023644, 0x28000a20, 0x28000a20},
- {0x0000a530, 0x34025643, 0x34025643, 0x2c000e20, 0x2c000e20},
- {0x0000a534, 0x38025a44, 0x38025a44, 0x30000e22, 0x30000e22},
- {0x0000a538, 0x3b025e45, 0x3b025e45, 0x34000e24, 0x34000e24},
- {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x38001640, 0x38001640},
- {0x0000a540, 0x48025e6c, 0x48025e6c, 0x3c001660, 0x3c001660},
- {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3f001861, 0x3f001861},
- {0x0000a548, 0x55025eb3, 0x55025eb3, 0x43001a81, 0x43001a81},
- {0x0000a54c, 0x58025ef3, 0x58025ef3, 0x47001a83, 0x47001a83},
- {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x4a001c84, 0x4a001c84},
- {0x0000a554, 0x62025f56, 0x62025f56, 0x4e001ce3, 0x4e001ce3},
- {0x0000a558, 0x66027f56, 0x66027f56, 0x52001ce5, 0x52001ce5},
- {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x56001ce9, 0x56001ce9},
- {0x0000a560, 0x70049f56, 0x70049f56, 0x5a001ceb, 0x5a001ceb},
- {0x0000a564, 0x751ffff6, 0x751ffff6, 0x5c001eec, 0x5c001eec},
- {0x0000a568, 0x751ffff6, 0x751ffff6, 0x5e001ef0, 0x5e001ef0},
- {0x0000a56c, 0x751ffff6, 0x751ffff6, 0x60001ef4, 0x60001ef4},
- {0x0000a570, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
- {0x0000a574, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
- {0x0000a578, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
- {0x0000a57c, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
- {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
- {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
- {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
- {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
- {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
- {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
-};
-
-static const u32 ar9462_2p1_modes_fast_clock[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
- {0x00001030, 0x00000268, 0x000004d0},
- {0x00001070, 0x0000018c, 0x00000318},
- {0x000010b0, 0x00000fd0, 0x00001fa0},
- {0x00008014, 0x044c044c, 0x08980898},
- {0x0000801c, 0x148ec02b, 0x148ec057},
- {0x00008318, 0x000044c0, 0x00008980},
- {0x00009e00, 0x0372131c, 0x0372131c},
- {0x0000a230, 0x0000400b, 0x00004016},
- {0x0000a254, 0x00000898, 0x00001130},
-};
-
-static const u32 ar9462_2p1_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
-
#endif /* INITVALS_9462_2P1_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 7c1845221e1c..ce83ce47a1ca 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -20,17 +20,11 @@
/* AR9485 1.1 */
-static const u32 ar9485_1_1_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
+#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
+
+#define ar9485_1_1_mac_postamble ar9331_1p1_mac_postamble
+
+#define ar9485_1_1_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
/* Addr allmodes */
@@ -546,100 +540,6 @@ static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
};
-static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
- {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
- {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
- {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
- {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
- {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
- {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
- {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
- {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
- {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
- {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
- {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
- {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
- {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
- {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
- {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
- {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
- {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
- {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
- {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
- {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
- {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
- {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
- {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
- {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
- {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
- {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
- {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
- {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
-};
-
static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
@@ -1323,13 +1223,6 @@ static const u32 ar9485_1_1_mac_core[][2] = {
{0x000083d0, 0x000301ff},
};
-static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
-
static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
/* Addr allmodes */
{0x00018c00, 0x18013e5e},
diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
new file mode 100644
index 000000000000..3c9113d9b1bc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
@@ -0,0 +1,718 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_953X_H
+#define INITVALS_953X_H
+
+#define qca953x_1p0_mac_postamble ar9300_2p2_mac_postamble
+
+#define qca953x_1p0_soc_postamble ar9300_2p2_soc_postamble
+
+#define qca953x_1p0_common_rx_gain_table ar9300Common_rx_gain_table_2p2
+
+#define qca953x_1p0_common_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define qca953x_1p0_modes_fast_clock ar9300Modes_fast_clock_2p2
+
+static const u32 qca953x_1p0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c22},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008140, 0x000000fe},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f3d7},
+ {0x00008248, 0x00000852},
+ {0x0000824c, 0x0001e7ae},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9d400010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00001d40},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x0000001f},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0xffff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48107b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x8c7901ff},
+};
+
+static const u32 qca953x_1p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x0280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a190},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098bc, 0x00000002},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01884061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x813e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x01193b91},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a248, 0x00000140},
+ {0x0000a2a0, 0x00000007},
+ {0x0000a2c0, 0x00000007},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x1f020503},
+ {0x0000a39c, 0x29180c03},
+ {0x0000a3a0, 0x9a8b6844},
+ {0x0000a3a4, 0x000000ff},
+ {0x0000a3a8, 0x6a6a6a6a},
+ {0x0000a3ac, 0x6a6a6a6a},
+ {0x0000a3b0, 0x00c8641a},
+ {0x0000a3b4, 0x0000001a},
+ {0x0000a3b8, 0x0088642a},
+ {0x0000a3bc, 0x000001fa},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000000},
+ {0x0000a3f8, 0x0c9bd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce42108},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce73908},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce738e7},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00100000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a644, 0xbfad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x08000838},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+};
+
+static const u32 qca953x_1p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10822, 0xcfa10822},
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x005c0ec0, 0x005c0ec4, 0x005c0ec4, 0x005c0ec0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb01018, 0xffb01018, 0xffb01018, 0xffb01018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01010e0e, 0x01010e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
+ {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+};
+
+static const u32 qca953x_1p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x3f80fff8},
+ {0x0001604c, 0x000f0278},
+ {0x00016050, 0x8036db6c},
+ {0x00016054, 0x6db60000},
+ {0x00016080, 0x00080000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x14214514},
+ {0x0001608c, 0x119f080a},
+ {0x00016090, 0x24926490},
+ {0x00016094, 0x00000000},
+ {0x000160a0, 0xc2108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480080},
+ {0x000160c0, 0x006db6d8},
+ {0x000160c4, 0x24b6db6c},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x6db6fb7c},
+ {0x000160d0, 0x6db6da44},
+ {0x00016100, 0x07ff8001},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x01884080},
+ {0x00016148, 0x000080d8},
+ {0x00016280, 0x01000901},
+ {0x00016284, 0x15d30000},
+ {0x00016288, 0x00318000},
+ {0x0001628c, 0x50000000},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x3f80fff8},
+ {0x0001644c, 0x000f0278},
+ {0x00016450, 0x8036db6c},
+ {0x00016454, 0x6db60000},
+ {0x00016500, 0x07ff8001},
+ {0x00016508, 0x00080010},
+ {0x00016544, 0x01884080},
+ {0x00016548, 0x000080d8},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00800700},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+};
+
+static const u32 qca953x_1p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00016098, 0xd2dd5554, 0xd2dd5554, 0xc4128f5c, 0xc4128f5c},
+ {0x0001609c, 0x0a566f3a, 0x0a566f3a, 0x0fd08f25, 0x0fd08f25},
+ {0x000160ac, 0xa4647c00, 0xa4647c00, 0x24646800, 0x24646800},
+ {0x000160b0, 0x01885f52, 0x01885f52, 0x00fe7f46, 0x00fe7f46},
+ {0x00016104, 0xb7a00001, 0xb7a00001, 0xfff80005, 0xfff80005},
+ {0x0001610c, 0xc0000000, 0xc0000000, 0x00000000, 0x00000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
+ {0x00016504, 0xb7a00001, 0xb7a00001, 0xfff80001, 0xfff80001},
+ {0x0001650c, 0xc0000000, 0xc0000000, 0x00000000, 0x00000000},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
+};
+
+static const u32 qca953x_1p0_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00007000, 0x00000000},
+ {0x00007004, 0x00000000},
+ {0x00007008, 0x00000000},
+ {0x0000700c, 0x00000000},
+ {0x0000701c, 0x00000000},
+ {0x00007020, 0x00000000},
+ {0x00007024, 0x00000000},
+ {0x00007028, 0x00000000},
+ {0x0000702c, 0x00000000},
+ {0x00007030, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+ {0x00007048, 0x00000000},
+};
+
+static const u32 qca953x_1p0_common_rx_gain_bounds[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302018, 0x50302018},
+};
+
+static const u32 qca953x_1p0_common_wo_xlna_rx_gain_bounds[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+};
+
+static const u32 qca953x_1p0_modes_xpa_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xfffd5aaa},
+ {0x0000a2e0, 0xfffe9ccc},
+ {0x0000a2e4, 0xffffe0f0},
+ {0x0000a2e8, 0xfffcff00},
+ {0x0000a410, 0x000050da},
+ {0x0000a500, 0x00000000},
+ {0x0000a504, 0x04000002},
+ {0x0000a508, 0x08000004},
+ {0x0000a50c, 0x0c000006},
+ {0x0000a510, 0x0f00000a},
+ {0x0000a514, 0x1300000c},
+ {0x0000a518, 0x1700000e},
+ {0x0000a51c, 0x1b000064},
+ {0x0000a520, 0x1f000242},
+ {0x0000a524, 0x23000229},
+ {0x0000a528, 0x270002a2},
+ {0x0000a52c, 0x2c001203},
+ {0x0000a530, 0x30001803},
+ {0x0000a534, 0x33000881},
+ {0x0000a538, 0x38001809},
+ {0x0000a53c, 0x3a000814},
+ {0x0000a540, 0x3f001a0c},
+ {0x0000a544, 0x43001a0e},
+ {0x0000a548, 0x46001812},
+ {0x0000a54c, 0x49001884},
+ {0x0000a550, 0x4d001e84},
+ {0x0000a554, 0x50001e69},
+ {0x0000a558, 0x550006f4},
+ {0x0000a55c, 0x59000ad3},
+ {0x0000a560, 0x5e000ad5},
+ {0x0000a564, 0x61001ced},
+ {0x0000a568, 0x660018d4},
+ {0x0000a56c, 0x660018d4},
+ {0x0000a570, 0x660018d4},
+ {0x0000a574, 0x660018d4},
+ {0x0000a578, 0x660018d4},
+ {0x0000a57c, 0x660018d4},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x03804000},
+ {0x0000a610, 0x0300ca02},
+ {0x0000a614, 0x00000e04},
+ {0x0000a618, 0x03014000},
+ {0x0000a61c, 0x00000000},
+ {0x0000a620, 0x00000000},
+ {0x0000a624, 0x03014000},
+ {0x0000a628, 0x03804c05},
+ {0x0000a62c, 0x0701de06},
+ {0x0000a630, 0x07819c07},
+ {0x0000a634, 0x0701dc07},
+ {0x0000a638, 0x0701dc07},
+ {0x0000a63c, 0x0701dc07},
+ {0x0000b2dc, 0xfffd5aaa},
+ {0x0000b2e0, 0xfffe9ccc},
+ {0x0000b2e4, 0xffffe0f0},
+ {0x0000b2e8, 0xfffcff00},
+ {0x00016044, 0x010002d4},
+ {0x00016048, 0x66482400},
+ {0x00016280, 0x01000015},
+ {0x00016444, 0x010002d4},
+ {0x00016448, 0x66482400},
+};
+
+static const u32 qca953x_1p0_modes_no_xpa_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xffd5f552},
+ {0x0000a2e0, 0xffe60664},
+ {0x0000a2e4, 0xfff80780},
+ {0x0000a2e8, 0xfffff800},
+ {0x0000a410, 0x000050d6},
+ {0x0000a500, 0x00060020},
+ {0x0000a504, 0x04060060},
+ {0x0000a508, 0x080600a0},
+ {0x0000a50c, 0x0c068020},
+ {0x0000a510, 0x10068060},
+ {0x0000a514, 0x140680a0},
+ {0x0000a518, 0x18090040},
+ {0x0000a51c, 0x1b090080},
+ {0x0000a520, 0x1f0900c0},
+ {0x0000a524, 0x240c0041},
+ {0x0000a528, 0x280d0021},
+ {0x0000a52c, 0x2d0f0061},
+ {0x0000a530, 0x310f00a1},
+ {0x0000a534, 0x350e00a2},
+ {0x0000a538, 0x360e80a2},
+ {0x0000a53c, 0x380f00a2},
+ {0x0000a540, 0x3b0e00a3},
+ {0x0000a544, 0x3d110083},
+ {0x0000a548, 0x3e1100a3},
+ {0x0000a54c, 0x401100e3},
+ {0x0000a550, 0x421380e3},
+ {0x0000a554, 0x431780e3},
+ {0x0000a558, 0x461f80e3},
+ {0x0000a55c, 0x461f80e3},
+ {0x0000a560, 0x461f80e3},
+ {0x0000a564, 0x461f80e3},
+ {0x0000a568, 0x461f80e3},
+ {0x0000a56c, 0x461f80e3},
+ {0x0000a570, 0x461f80e3},
+ {0x0000a574, 0x461f80e3},
+ {0x0000a578, 0x461f80e3},
+ {0x0000a57c, 0x461f80e3},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x00804201},
+ {0x0000a610, 0x01008201},
+ {0x0000a614, 0x0180c402},
+ {0x0000a618, 0x0180c603},
+ {0x0000a61c, 0x0180c603},
+ {0x0000a620, 0x01c10603},
+ {0x0000a624, 0x01c10704},
+ {0x0000a628, 0x02c18b05},
+ {0x0000a62c, 0x0301cc07},
+ {0x0000a630, 0x0301cc07},
+ {0x0000a634, 0x0301cc07},
+ {0x0000a638, 0x0301cc07},
+ {0x0000a63c, 0x0301cc07},
+ {0x0000b2dc, 0xffd5f552},
+ {0x0000b2e0, 0xffe60664},
+ {0x0000b2e4, 0xfff80780},
+ {0x0000b2e8, 0xfffff800},
+ {0x00016044, 0x049242db},
+ {0x00016048, 0x6c927a70},
+ {0x00016444, 0x049242db},
+ {0x00016448, 0x6c927a70},
+};
+
+static const u32 qca953x_1p1_modes_no_xpa_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xffd5f552},
+ {0x0000a2e0, 0xffe60664},
+ {0x0000a2e4, 0xfff80780},
+ {0x0000a2e8, 0xfffff800},
+ {0x0000a410, 0x000050de},
+ {0x0000a500, 0x00000061},
+ {0x0000a504, 0x04000063},
+ {0x0000a508, 0x08000065},
+ {0x0000a50c, 0x0c000261},
+ {0x0000a510, 0x10000263},
+ {0x0000a514, 0x14000265},
+ {0x0000a518, 0x18000482},
+ {0x0000a51c, 0x1b000484},
+ {0x0000a520, 0x1f000486},
+ {0x0000a524, 0x240008c2},
+ {0x0000a528, 0x28000cc1},
+ {0x0000a52c, 0x2d000ce3},
+ {0x0000a530, 0x31000ce5},
+ {0x0000a534, 0x350010e5},
+ {0x0000a538, 0x360012e5},
+ {0x0000a53c, 0x380014e5},
+ {0x0000a540, 0x3b0018e5},
+ {0x0000a544, 0x3d001d04},
+ {0x0000a548, 0x3e001d05},
+ {0x0000a54c, 0x40001d07},
+ {0x0000a550, 0x42001f27},
+ {0x0000a554, 0x43001f67},
+ {0x0000a558, 0x46001fe7},
+ {0x0000a55c, 0x47001f2b},
+ {0x0000a560, 0x49001f0d},
+ {0x0000a564, 0x4b001ed2},
+ {0x0000a568, 0x4c001ed4},
+ {0x0000a56c, 0x4e001f15},
+ {0x0000a570, 0x4f001ff6},
+ {0x0000a574, 0x4f001ff6},
+ {0x0000a578, 0x4f001ff6},
+ {0x0000a57c, 0x4f001ff6},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x00804201},
+ {0x0000a610, 0x01008201},
+ {0x0000a614, 0x0180c402},
+ {0x0000a618, 0x0180c603},
+ {0x0000a61c, 0x0180c603},
+ {0x0000a620, 0x01c10603},
+ {0x0000a624, 0x01c10704},
+ {0x0000a628, 0x02c18b05},
+ {0x0000a62c, 0x02c14c07},
+ {0x0000a630, 0x01008704},
+ {0x0000a634, 0x01c10402},
+ {0x0000a638, 0x0301cc07},
+ {0x0000a63c, 0x0301cc07},
+ {0x0000b2dc, 0xffd5f552},
+ {0x0000b2e0, 0xffe60664},
+ {0x0000b2e4, 0xfff80780},
+ {0x0000b2e8, 0xfffff800},
+ {0x00016044, 0x049242db},
+ {0x00016048, 0x6c927a70},
+ {0x00016444, 0x049242db},
+ {0x00016448, 0x6c927a70},
+};
+
+#endif /* INITVALS_953X_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
index ccc5b6c99add..74d8bc05b317 100644
--- a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
@@ -20,6 +20,14 @@
/* AR955X 1.0 */
+#define ar955x_1p0_soc_postamble ar9300_2p2_soc_postamble
+
+#define ar955x_1p0_common_rx_gain_table ar9300Common_rx_gain_table_2p2
+
+#define ar955x_1p0_common_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define ar955x_1p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
static const u32 ar955x_1p0_radio_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00016098, 0xd2dd5554, 0xd2dd5554, 0xd28b3330, 0xd28b3330},
@@ -37,13 +45,6 @@ static const u32 ar955x_1p0_radio_postamble[][5] = {
{0x00016940, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
};
-static const u32 ar955x_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
-
static const u32 ar955x_1p0_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
@@ -473,266 +474,6 @@ static const u32 ar955x_1p0_mac_core[][2] = {
{0x000083d0, 0x8c7901ff},
};
-static const u32 ar955x_1p0_common_rx_gain_table[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x01910190},
- {0x0000a030, 0x01930192},
- {0x0000a034, 0x01950194},
- {0x0000a038, 0x038a0196},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x22222229},
- {0x0000a084, 0x1d1d1d1d},
- {0x0000a088, 0x1d1d1d1d},
- {0x0000a08c, 0x1d1d1d1d},
- {0x0000a090, 0x171d1d1d},
- {0x0000a094, 0x11111717},
- {0x0000a098, 0x00030311},
- {0x0000a09c, 0x00000000},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x23232323},
- {0x0000b084, 0x21232323},
- {0x0000b088, 0x19191c1e},
- {0x0000b08c, 0x12141417},
- {0x0000b090, 0x07070e0e},
- {0x0000b094, 0x03030305},
- {0x0000b098, 0x00000003},
- {0x0000b09c, 0x00000000},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
static const u32 ar955x_1p0_baseband_core[][2] = {
/* Addr allmodes */
{0x00009800, 0xafe68e30},
@@ -891,266 +632,6 @@ static const u32 ar955x_1p0_baseband_core[][2] = {
{0x0000c420, 0x00000000},
};
-static const u32 ar955x_1p0_common_wo_xlna_rx_gain_table[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x03820190},
- {0x0000a030, 0x03840383},
- {0x0000a034, 0x03880385},
- {0x0000a038, 0x038a0389},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x29292929},
- {0x0000a084, 0x29292929},
- {0x0000a088, 0x29292929},
- {0x0000a08c, 0x29292929},
- {0x0000a090, 0x22292929},
- {0x0000a094, 0x1d1d2222},
- {0x0000a098, 0x0c111117},
- {0x0000a09c, 0x00030303},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x32323232},
- {0x0000b084, 0x2f2f3232},
- {0x0000b088, 0x23282a2d},
- {0x0000b08c, 0x1c1e2123},
- {0x0000b090, 0x14171919},
- {0x0000b094, 0x0e0e1214},
- {0x0000b098, 0x03050707},
- {0x0000b09c, 0x00030303},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
static const u32 ar955x_1p0_soc_preamble[][2] = {
/* Addr allmodes */
{0x00007000, 0x00000000},
@@ -1263,11 +744,6 @@ static const u32 ar955x_1p0_modes_no_xpa_tx_gain_table[][9] = {
{0x00016848, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
};
-static const u32 ar955x_1p0_soc_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
-};
-
static const u32 ar955x_1p0_modes_fast_clock[][3] = {
/* Addr 5G_HT20 5G_HT40 */
{0x00001030, 0x00000268, 0x000004d0},
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index a8c757b6124f..10d4a6cb1c3b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -20,6 +20,12 @@
/* AR9565 1.0 */
+#define ar9565_1p0_mac_postamble ar9331_1p1_mac_postamble
+
+#define ar9565_1p0_Modes_lowest_ob_db_tx_gain_table ar9565_1p0_modes_low_ob_db_tx_gain_table
+
+#define ar9565_1p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
static const u32 ar9565_1p0_mac_core[][2] = {
/* Addr allmodes */
{0x00000008, 0x00000000},
@@ -182,18 +188,6 @@ static const u32 ar9565_1p0_mac_core[][2] = {
{0x000083d0, 0x800301ff},
};
-static const u32 ar9565_1p0_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
-
static const u32 ar9565_1p0_baseband_core[][2] = {
/* Addr allmodes */
{0x00009800, 0xafe68e30},
@@ -711,66 +705,6 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
{0x0000b1fc, 0x00000196},
};
-static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
- {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
- {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
- {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
- {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
- {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
- {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
- {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
- {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
- {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
- {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
- {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
- {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
- {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
- {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
- {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
- {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
- {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
- {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
- {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
- {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
- {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
- {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
- {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
- {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
- {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
- {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-};
-
static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = {
/* Addr allmodes */
{0x00018c00, 0x18212ede},
@@ -1231,11 +1165,4 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
{0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
};
-static const u32 ar9565_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
-
#endif /* INITVALS_9565_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p1_initvals.h
new file mode 100644
index 000000000000..56810539971e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p1_initvals.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9565_1P1_H
+#define INITVALS_9565_1P1_H
+
+/* AR9565 1.1 */
+
+#define ar9565_1p1_mac_core ar9565_1p0_mac_core
+
+#define ar9565_1p1_mac_postamble ar9565_1p0_mac_postamble
+
+#define ar9565_1p1_baseband_core ar9565_1p0_baseband_core
+
+#define ar9565_1p1_baseband_postamble ar9565_1p0_baseband_postamble
+
+#define ar9565_1p1_radio_core ar9565_1p0_radio_core
+
+#define ar9565_1p1_soc_preamble ar9565_1p0_soc_preamble
+
+#define ar9565_1p1_soc_postamble ar9565_1p0_soc_postamble
+
+#define ar9565_1p1_Common_rx_gain_table ar9565_1p0_Common_rx_gain_table
+
+#define ar9565_1p1_Modes_lowest_ob_db_tx_gain_table ar9565_1p0_Modes_lowest_ob_db_tx_gain_table
+
+#define ar9565_1p1_pciephy_clkreq_disable_L1 ar9565_1p0_pciephy_clkreq_disable_L1
+
+#define ar9565_1p1_modes_fast_clock ar9565_1p0_modes_fast_clock
+
+#define ar9565_1p1_common_wo_xlna_rx_gain_table ar9565_1p0_common_wo_xlna_rx_gain_table
+
+#define ar9565_1p1_modes_low_ob_db_tx_gain_table ar9565_1p0_modes_low_ob_db_tx_gain_table
+
+#define ar9565_1p1_modes_high_ob_db_tx_gain_table ar9565_1p0_modes_high_ob_db_tx_gain_table
+
+#define ar9565_1p1_modes_high_power_tx_gain_table ar9565_1p0_modes_high_power_tx_gain_table
+
+#define ar9565_1p1_baseband_core_txfir_coeff_japan_2484 ar9565_1p0_baseband_core_txfir_coeff_japan_2484
+
+static const u32 ar9565_1p1_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
+ {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
+ {0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
+ {0x0001610c, 0x40000000, 0x40000000, 0x40000000, 0x40000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+#endif /* INITVALS_9565_1P1_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index bdee2ed67219..e6aec2c0207f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -20,18 +20,34 @@
/* AR9580 1.0 */
+#define ar9580_1p0_soc_preamble ar9300_2p2_soc_preamble
+
+#define ar9580_1p0_soc_postamble ar9300_2p2_soc_postamble
+
+#define ar9580_1p0_radio_core ar9300_2p2_radio_core
+
+#define ar9580_1p0_mac_postamble ar9300_2p2_mac_postamble
+
+#define ar9580_1p0_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define ar9580_1p0_type5_tx_gain_table ar9300Modes_type5_tx_gain_table_2p2
+
+#define ar9580_1p0_high_ob_db_tx_gain_table ar9300Modes_high_ob_db_tx_gain_table_2p2
+
#define ar9580_1p0_modes_fast_clock ar9300Modes_fast_clock_2p2
+#define ar9580_1p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
static const u32 ar9580_1p0_radio_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0001609c, 0x0dd08f29, 0x0dd08f29, 0x0b283f31, 0x0b283f31},
{0x000160ac, 0xa4653c00, 0xa4653c00, 0x24652800, 0x24652800},
{0x000160b0, 0x03284f3e, 0x03284f3e, 0x05d08f20, 0x05d08f20},
- {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0001610c, 0xc8000000, 0xc0000000, 0xc0000000, 0xc0000000},
{0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
- {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0001650c, 0xc8000000, 0xc0000000, 0xc0000000, 0xc0000000},
{0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
- {0x0001690c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0001690c, 0xc8000000, 0xc0000000, 0xc0000000, 0xc0000000},
{0x00016940, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
};
@@ -41,12 +57,10 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
{0x00009804, 0xfd14e000},
{0x00009808, 0x9c0a9f6b},
{0x0000980c, 0x04900000},
- {0x00009814, 0x3280c00a},
- {0x00009818, 0x00000000},
{0x0000981c, 0x00020028},
- {0x00009834, 0x6400a290},
+ {0x00009834, 0x6400a190},
{0x00009838, 0x0108ecff},
- {0x0000983c, 0x0d000600},
+ {0x0000983c, 0x14000600},
{0x00009880, 0x201fff00},
{0x00009884, 0x00001042},
{0x000098a4, 0x00200400},
@@ -67,7 +81,7 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
{0x00009d04, 0x40206c10},
{0x00009d08, 0x009c4060},
{0x00009d0c, 0x9883800a},
- {0x00009d10, 0x01834061},
+ {0x00009d10, 0x01884061},
{0x00009d14, 0x00c0040b},
{0x00009d18, 0x00000000},
{0x00009e08, 0x0038230c},
@@ -198,8 +212,6 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
{0x0000c420, 0x00000000},
};
-#define ar9580_1p0_mac_postamble ar9300_2p2_mac_postamble
-
static const u32 ar9580_1p0_low_ob_db_tx_gain_table[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
@@ -306,7 +318,112 @@ static const u32 ar9580_1p0_low_ob_db_tx_gain_table[][5] = {
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
-#define ar9580_1p0_high_power_tx_gain_table ar9580_1p0_low_ob_db_tx_gain_table
+static const u32 ar9580_1p0_high_power_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
+ {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5e08442e, 0x5e08442e, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x620a4431, 0x620a4431, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x15800028, 0x15800028, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1b80002b, 0x1b80002b, 0x12800400, 0x12800400},
+ {0x0000a598, 0x1f820028, 0x1f820028, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x2582002b, 0x2582002b, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2a84002a, 0x2a84002a, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2e86002a, 0x2e86002a, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x3382202d, 0x3382202d, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3884202c, 0x3884202c, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3c86202c, 0x3c86202c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4188202d, 0x4188202d, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4586402d, 0x4586402d, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4986222d, 0x4986222d, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4d862231, 0x4d862231, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x50882231, 0x50882231, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5688422e, 0x5688422e, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5a88442e, 0x5a88442e, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x5e8a4431, 0x5e8a4431, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x648a4432, 0x648a4432, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x688a4434, 0x688a4434, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x6c8a6434, 0x6c8a6434, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x6f8a6633, 0x6f8a6633, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
+ {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016288, 0x05a2040a, 0x05a2040a, 0x05a20408, 0x05a20408},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
static const u32 ar9580_1p0_lowest_ob_db_tx_gain_table[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
@@ -414,8 +531,6 @@ static const u32 ar9580_1p0_lowest_ob_db_tx_gain_table[][5] = {
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
-#define ar9580_1p0_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
-
static const u32 ar9580_1p0_mac_core[][2] = {
/* Addr allmodes */
{0x00000008, 0x00000000},
@@ -679,14 +794,6 @@ static const u32 ar9580_1p0_mixed_ob_db_tx_gain_table[][5] = {
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
-#define ar9580_1p0_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
-
-#define ar9580_1p0_soc_postamble ar9300_2p2_soc_postamble
-
-#define ar9580_1p0_high_ob_db_tx_gain_table ar9300Modes_high_ob_db_tx_gain_table_2p2
-
-#define ar9580_1p0_type5_tx_gain_table ar9300Modes_type5_tx_gain_table_2p2
-
static const u32 ar9580_1p0_type6_tx_gain_table[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
@@ -761,165 +868,271 @@ static const u32 ar9580_1p0_type6_tx_gain_table[][5] = {
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
-static const u32 ar9580_1p0_soc_preamble[][2] = {
+static const u32 ar9580_1p0_rx_gain_table[][2] = {
/* Addr allmodes */
- {0x000040a4, 0x00a0c1c9},
- {0x00007008, 0x00000000},
- {0x00007020, 0x00000000},
- {0x00007034, 0x00000002},
- {0x00007038, 0x000004c2},
- {0x00007048, 0x00000008},
-};
-
-#define ar9580_1p0_rx_gain_table ar9462_common_rx_gain_table_2p0
-
-static const u32 ar9580_1p0_radio_core[][2] = {
- /* Addr allmodes */
- {0x00016000, 0x36db6db6},
- {0x00016004, 0x6db6db40},
- {0x00016008, 0x73f00000},
- {0x0001600c, 0x00000000},
- {0x00016040, 0x7f80fff8},
- {0x0001604c, 0x76d005b5},
- {0x00016050, 0x556cf031},
- {0x00016054, 0x13449440},
- {0x00016058, 0x0c51c92c},
- {0x0001605c, 0x3db7fffc},
- {0x00016060, 0xfffffffc},
- {0x00016064, 0x000f0278},
- {0x0001606c, 0x6db60000},
- {0x00016080, 0x00000000},
- {0x00016084, 0x0e48048c},
- {0x00016088, 0x54214514},
- {0x0001608c, 0x119f481e},
- {0x00016090, 0x24926490},
- {0x00016098, 0xd2888888},
- {0x000160a0, 0x0a108ffe},
- {0x000160a4, 0x812fc370},
- {0x000160a8, 0x423c8000},
- {0x000160b4, 0x92480080},
- {0x000160c0, 0x00adb6d0},
- {0x000160c4, 0x6db6db60},
- {0x000160c8, 0x6db6db6c},
- {0x000160cc, 0x01e6c000},
- {0x00016100, 0x3fffbe01},
- {0x00016104, 0xfff80000},
- {0x00016108, 0x00080010},
- {0x00016144, 0x02084080},
- {0x00016148, 0x00000000},
- {0x00016280, 0x058a0001},
- {0x00016284, 0x3d840208},
- {0x00016288, 0x05a20408},
- {0x0001628c, 0x00038c07},
- {0x00016290, 0x00000004},
- {0x00016294, 0x458aa14f},
- {0x00016380, 0x00000000},
- {0x00016384, 0x00000000},
- {0x00016388, 0x00800700},
- {0x0001638c, 0x00800700},
- {0x00016390, 0x00800700},
- {0x00016394, 0x00000000},
- {0x00016398, 0x00000000},
- {0x0001639c, 0x00000000},
- {0x000163a0, 0x00000001},
- {0x000163a4, 0x00000001},
- {0x000163a8, 0x00000000},
- {0x000163ac, 0x00000000},
- {0x000163b0, 0x00000000},
- {0x000163b4, 0x00000000},
- {0x000163b8, 0x00000000},
- {0x000163bc, 0x00000000},
- {0x000163c0, 0x000000a0},
- {0x000163c4, 0x000c0000},
- {0x000163c8, 0x14021402},
- {0x000163cc, 0x00001402},
- {0x000163d0, 0x00000000},
- {0x000163d4, 0x00000000},
- {0x00016400, 0x36db6db6},
- {0x00016404, 0x6db6db40},
- {0x00016408, 0x73f00000},
- {0x0001640c, 0x00000000},
- {0x00016440, 0x7f80fff8},
- {0x0001644c, 0x76d005b5},
- {0x00016450, 0x556cf031},
- {0x00016454, 0x13449440},
- {0x00016458, 0x0c51c92c},
- {0x0001645c, 0x3db7fffc},
- {0x00016460, 0xfffffffc},
- {0x00016464, 0x000f0278},
- {0x0001646c, 0x6db60000},
- {0x00016500, 0x3fffbe01},
- {0x00016504, 0xfff80000},
- {0x00016508, 0x00080010},
- {0x00016544, 0x02084080},
- {0x00016548, 0x00000000},
- {0x00016780, 0x00000000},
- {0x00016784, 0x00000000},
- {0x00016788, 0x00800700},
- {0x0001678c, 0x00800700},
- {0x00016790, 0x00800700},
- {0x00016794, 0x00000000},
- {0x00016798, 0x00000000},
- {0x0001679c, 0x00000000},
- {0x000167a0, 0x00000001},
- {0x000167a4, 0x00000001},
- {0x000167a8, 0x00000000},
- {0x000167ac, 0x00000000},
- {0x000167b0, 0x00000000},
- {0x000167b4, 0x00000000},
- {0x000167b8, 0x00000000},
- {0x000167bc, 0x00000000},
- {0x000167c0, 0x000000a0},
- {0x000167c4, 0x000c0000},
- {0x000167c8, 0x14021402},
- {0x000167cc, 0x00001402},
- {0x000167d0, 0x00000000},
- {0x000167d4, 0x00000000},
- {0x00016800, 0x36db6db6},
- {0x00016804, 0x6db6db40},
- {0x00016808, 0x73f00000},
- {0x0001680c, 0x00000000},
- {0x00016840, 0x7f80fff8},
- {0x0001684c, 0x76d005b5},
- {0x00016850, 0x556cf031},
- {0x00016854, 0x13449440},
- {0x00016858, 0x0c51c92c},
- {0x0001685c, 0x3db7fffc},
- {0x00016860, 0xfffffffc},
- {0x00016864, 0x000f0278},
- {0x0001686c, 0x6db60000},
- {0x00016900, 0x3fffbe01},
- {0x00016904, 0xfff80000},
- {0x00016908, 0x00080010},
- {0x00016944, 0x02084080},
- {0x00016948, 0x00000000},
- {0x00016b80, 0x00000000},
- {0x00016b84, 0x00000000},
- {0x00016b88, 0x00800700},
- {0x00016b8c, 0x00800700},
- {0x00016b90, 0x00800700},
- {0x00016b94, 0x00000000},
- {0x00016b98, 0x00000000},
- {0x00016b9c, 0x00000000},
- {0x00016ba0, 0x00000001},
- {0x00016ba4, 0x00000001},
- {0x00016ba8, 0x00000000},
- {0x00016bac, 0x00000000},
- {0x00016bb0, 0x00000000},
- {0x00016bb4, 0x00000000},
- {0x00016bb8, 0x00000000},
- {0x00016bbc, 0x00000000},
- {0x00016bc0, 0x000000a0},
- {0x00016bc4, 0x000c0000},
- {0x00016bc8, 0x14021402},
- {0x00016bcc, 0x00001402},
- {0x00016bd0, 0x00000000},
- {0x00016bd4, 0x00000000},
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x23232323},
+ {0x0000b084, 0x21232323},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
};
static const u32 ar9580_1p0_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009814, 0x3280c00a, 0x3280c00a, 0x3280c00a, 0x3280c00a},
+ {0x00009818, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
{0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
@@ -956,7 +1169,7 @@ static const u32 ar9580_1p0_baseband_postamble[][5] = {
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
- {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
+ {0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
@@ -994,4 +1207,13 @@ static const u32 ar9580_1p0_pcie_phy_pll_on_clkreq[][2] = {
{0x00004044, 0x00000000},
};
+static const u32 ar9580_1p0_baseband_postamble_dfs_channel[][3] = {
+ /* Addr 5G 2G */
+ {0x00009814, 0x3400c00f, 0x3400c00f},
+ {0x00009824, 0x5ac668d0, 0x5ac668d0},
+ {0x00009828, 0x06903080, 0x06903080},
+ {0x00009e0c, 0x6d4000e2, 0x6d4000e2},
+ {0x00009e14, 0x37b9625e, 0x37b9625e},
+};
+
#endif /* INITVALS_9580_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 60a5da53668f..b5ac32cfbeb8 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -27,40 +27,15 @@
#include "common.h"
#include "mci.h"
#include "dfs.h"
-
-/*
- * Header for the ath9k.ko driver core *only* -- hw code nor any other driver
- * should rely on this file or its contents.
- */
+#include "spectral.h"
struct ath_node;
+struct ath_rate_table;
-/* Macro to expand scalars to 64-bit objects */
-
-#define ito64(x) (sizeof(x) == 1) ? \
- (((unsigned long long int)(x)) & (0xff)) : \
- (sizeof(x) == 2) ? \
- (((unsigned long long int)(x)) & 0xffff) : \
- ((sizeof(x) == 4) ? \
- (((unsigned long long int)(x)) & 0xffffffff) : \
- (unsigned long long int)(x))
-
-/* increment with wrap-around */
-#define INCR(_l, _sz) do { \
- (_l)++; \
- (_l) &= ((_sz) - 1); \
- } while (0)
-
-/* decrement with wrap-around */
-#define DECR(_l, _sz) do { \
- (_l)--; \
- (_l) &= ((_sz) - 1); \
- } while (0)
-
-#define TSF_TO_TU(_h,_l) \
- ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
-
-#define ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<<i))
+extern struct ieee80211_ops ath9k_ops;
+extern int ath9k_modparam_nohwcrypt;
+extern int led_blink;
+extern bool is_ath9k_unloaded;
struct ath_config {
u16 txpowlimit;
@@ -70,6 +45,17 @@ struct ath_config {
/* Descriptor Management */
/*************************/
+#define ATH_TXSTATUS_RING_SIZE 512
+
+/* Macro to expand scalars to 64-bit objects */
+#define ito64(x) (sizeof(x) == 1) ? \
+ (((unsigned long long int)(x)) & (0xff)) : \
+ (sizeof(x) == 2) ? \
+ (((unsigned long long int)(x)) & 0xffff) : \
+ ((sizeof(x) == 4) ? \
+ (((unsigned long long int)(x)) & 0xffffffff) : \
+ (unsigned long long int)(x))
+
#define ATH_TXBUF_RESET(_bf) do { \
(_bf)->bf_lastbf = NULL; \
(_bf)->bf_next = NULL; \
@@ -77,23 +63,6 @@ struct ath_config {
sizeof(struct ath_buf_state)); \
} while (0)
-/**
- * enum buffer_type - Buffer type flags
- *
- * @BUF_AMPDU: This buffer is an ampdu, as part of an aggregate (during TX)
- * @BUF_AGGR: Indicates whether the buffer can be aggregated
- * (used in aggregation scheduling)
- */
-enum buffer_type {
- BUF_AMPDU = BIT(0),
- BUF_AGGR = BIT(1),
-};
-
-#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
-#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
-
-#define ATH_TXSTATUS_RING_SIZE 512
-
#define DS2PHYS(_dd, _ds) \
((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
@@ -113,11 +82,20 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
/* RX / TX */
/***********/
+#define ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<<i))
+
+/* increment with wrap-around */
+#define INCR(_l, _sz) do { \
+ (_l)++; \
+ (_l) &= ((_sz) - 1); \
+ } while (0)
+
#define ATH_RXBUF 512
#define ATH_TXBUF 512
#define ATH_TXBUF_RESERVE 5
#define ATH_MAX_QDEPTH (ATH_TXBUF / 4 - ATH_TXBUF_RESERVE)
#define ATH_TXMAXTRY 13
+#define ATH_MAX_SW_RETRIES 30
#define TID_TO_WME_AC(_tid) \
((((_tid) == 0) || ((_tid) == 3)) ? IEEE80211_AC_BE : \
@@ -133,6 +111,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_AGGR_MIN_QDEPTH 2
/* minimum h/w qdepth for non-aggregated traffic */
#define ATH_NON_AGGR_MIN_QDEPTH 8
+#define ATH_TX_COMPLETE_POLL_INT 1000
+#define ATH_TXFIFO_DEPTH 8
+#define ATH_TX_ERROR 0x01
#define IEEE80211_SEQ_SEQ_SHIFT 4
#define IEEE80211_SEQ_MAX 4096
@@ -165,11 +146,10 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
-#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
+#define IS_HT_RATE(rate) (rate & 0x80)
+#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
+#define IS_OFDM_RATE(rate) ((rate >= 0x8) && (rate <= 0xf))
-#define ATH_TX_COMPLETE_POLL_INT 1000
-
-#define ATH_TXFIFO_DEPTH 8
struct ath_txq {
int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
u32 axq_qnum; /* ath9k hardware queue number */
@@ -214,6 +194,21 @@ struct ath_rxbuf {
dma_addr_t bf_buf_addr;
};
+/**
+ * enum buffer_type - Buffer type flags
+ *
+ * @BUF_AMPDU: This buffer is an ampdu, as part of an aggregate (during TX)
+ * @BUF_AGGR: Indicates whether the buffer can be aggregated
+ * (used in aggregation scheduling)
+ */
+enum buffer_type {
+ BUF_AMPDU = BIT(0),
+ BUF_AGGR = BIT(1),
+};
+
+#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
+#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
+
struct ath_buf_state {
u8 bf_type;
u8 bfs_paprd;
@@ -269,6 +264,10 @@ struct ath_node {
bool sleeping;
bool no_ps_filter;
+
+#ifdef CONFIG_ATH9K_STATION_STATISTICS
+ struct ath_rx_rate_stats rx_rate_stats;
+#endif
};
struct ath_tx_control {
@@ -278,7 +277,6 @@ struct ath_tx_control {
struct ieee80211_sta *sta;
};
-#define ATH_TX_ERROR 0x01
/**
* @txq_map: Index is mac80211 queue number. This is
@@ -372,6 +370,22 @@ struct ath_vif {
struct ath_buf *av_bcbuf;
};
+struct ath9k_vif_iter_data {
+ u8 hw_macaddr[ETH_ALEN]; /* address of the first vif */
+ u8 mask[ETH_ALEN]; /* bssid mask */
+ bool has_hw_macaddr;
+
+ int naps; /* number of AP vifs */
+ int nmeshes; /* number of mesh vifs */
+ int nstations; /* number of station vifs */
+ int nwds; /* number of WDS vifs */
+ int nadhocs; /* number of adhoc vifs */
+};
+
+void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ath9k_vif_iter_data *iter_data);
+
/*******************/
/* Beacon Handling */
/*******************/
@@ -387,6 +401,9 @@ struct ath_vif {
#define ATH_DEFAULT_BMISS_LIMIT 10
#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
+#define TSF_TO_TU(_h,_l) \
+ ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
+
struct ath_beacon_config {
int beacon_interval;
u16 listen_interval;
@@ -420,12 +437,10 @@ struct ath_beacon {
};
void ath9k_beacon_tasklet(unsigned long data);
-bool ath9k_allow_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
u32 changed);
void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
-void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_set_beacon(struct ath_softc *sc);
bool ath9k_csa_is_finished(struct ath_softc *sc);
@@ -440,17 +455,14 @@ bool ath9k_csa_is_finished(struct ath_softc *sc);
#define ATH_LONG_CALINTERVAL_INT 1000 /* 1000 ms */
#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
-#define ATH_ANI_MAX_SKIP_COUNT 10
-
-#define ATH_PAPRD_TIMEOUT 100 /* msecs */
-#define ATH_PLL_WORK_INTERVAL 100
+#define ATH_ANI_MAX_SKIP_COUNT 10
+#define ATH_PAPRD_TIMEOUT 100 /* msecs */
+#define ATH_PLL_WORK_INTERVAL 100
void ath_tx_complete_poll_work(struct work_struct *work);
void ath_reset_work(struct work_struct *work);
-void ath_hw_check(struct work_struct *work);
+bool ath_hw_check(struct ath_softc *sc);
void ath_hw_pll_work(struct work_struct *work);
-void ath_rx_poll(unsigned long data);
-void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon);
void ath_paprd_calibrate(struct work_struct *work);
void ath_ani_calibrate(unsigned long data);
void ath_start_ani(struct ath_softc *sc);
@@ -459,6 +471,7 @@ void ath_check_ani(struct ath_softc *sc);
int ath_update_survey_stats(struct ath_softc *sc);
void ath_update_survey_nf(struct ath_softc *sc, int channel);
void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
+void ath_ps_full_sleep(unsigned long data);
/**********/
/* BTCOEX */
@@ -476,20 +489,19 @@ enum bt_op_flags {
};
struct ath_btcoex {
- bool hw_timer_enabled;
spinlock_t btcoex_lock;
struct timer_list period_timer; /* Timer for BT period */
+ struct timer_list no_stomp_timer;
u32 bt_priority_cnt;
unsigned long bt_priority_time;
unsigned long op_flags;
int bt_stomp_type; /* Types of BT stomping */
- u32 btcoex_no_stomp; /* in usec */
+ u32 btcoex_no_stomp; /* in msec */
u32 btcoex_period; /* in msec */
- u32 btscan_no_stomp; /* in usec */
+ u32 btscan_no_stomp; /* in msec */
u32 duty_cycle;
u32 bt_wait_time;
int rssi_count;
- struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
struct ath_mci_profile mci;
u8 stomp_audio;
};
@@ -537,12 +549,6 @@ static inline int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
-struct ath9k_wow_pattern {
- u8 pattern_bytes[MAX_PATTERN_SIZE];
- u8 mask_bytes[MAX_PATTERN_SIZE];
- u32 pattern_len;
-};
-
/********************/
/* LED Control */
/********************/
@@ -570,6 +576,40 @@ static inline void ath_fill_led_pin(struct ath_softc *sc)
}
#endif
+/************************/
+/* Wake on Wireless LAN */
+/************************/
+
+struct ath9k_wow_pattern {
+ u8 pattern_bytes[MAX_PATTERN_SIZE];
+ u8 mask_bytes[MAX_PATTERN_SIZE];
+ u32 pattern_len;
+};
+
+#ifdef CONFIG_ATH9K_WOW
+void ath9k_init_wow(struct ieee80211_hw *hw);
+int ath9k_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
+int ath9k_resume(struct ieee80211_hw *hw);
+void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled);
+#else
+static inline void ath9k_init_wow(struct ieee80211_hw *hw)
+{
+}
+static inline int ath9k_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ return 0;
+}
+static inline int ath9k_resume(struct ieee80211_hw *hw)
+{
+ return 0;
+}
+static inline void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+}
+#endif /* CONFIG_ATH9K_WOW */
+
/*******************************/
/* Antenna diversity/combining */
/*******************************/
@@ -642,19 +682,16 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
#define ATH9K_PCI_AR9565_1ANT 0x0080
#define ATH9K_PCI_AR9565_2ANT 0x0100
#define ATH9K_PCI_NO_PLL_PWRSAVE 0x0200
+#define ATH9K_PCI_KILLER 0x0400
/*
* Default cache line size, in bytes.
* Used when PCI device not fully initialized by bootrom/BIOS
*/
#define DEFAULT_CACHELINE 32
-#define ATH_REGCLASSIDS_MAX 10
#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
-#define ATH_MAX_SW_RETRIES 30
-#define ATH_CHAN_MAX 255
-
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
-#define ATH_RATE_DUMMY_MARKER 0
+#define MAX_GTT_CNT 5
enum sc_op_flags {
SC_OP_INVALID,
@@ -673,37 +710,6 @@ enum sc_op_flags {
#define PS_BEACON_SYNC BIT(4)
#define PS_WAIT_FOR_ANI BIT(5)
-struct ath_rate_table;
-
-struct ath9k_vif_iter_data {
- u8 hw_macaddr[ETH_ALEN]; /* address of the first vif */
- u8 mask[ETH_ALEN]; /* bssid mask */
- bool has_hw_macaddr;
-
- int naps; /* number of AP vifs */
- int nmeshes; /* number of mesh vifs */
- int nstations; /* number of station vifs */
- int nwds; /* number of WDS vifs */
- int nadhocs; /* number of adhoc vifs */
-};
-
-/* enum spectral_mode:
- *
- * @SPECTRAL_DISABLED: spectral mode is disabled
- * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
- * something else.
- * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
- * is performed manually.
- * @SPECTRAL_CHANSCAN: Like manual, but also triggered when changing channels
- * during a channel scan.
- */
-enum spectral_mode {
- SPECTRAL_DISABLED = 0,
- SPECTRAL_BACKGROUND,
- SPECTRAL_MANUAL,
- SPECTRAL_CHANSCAN,
-};
-
struct ath_softc {
struct ieee80211_hw *hw;
struct device *dev;
@@ -721,14 +727,14 @@ struct ath_softc {
spinlock_t sc_pcu_lock;
struct mutex mutex;
struct work_struct paprd_work;
- struct work_struct hw_check_work;
struct work_struct hw_reset_work;
struct completion paprd_complete;
+ wait_queue_head_t tx_wait;
- unsigned int hw_busy_count;
unsigned long sc_flags;
unsigned long driver_data;
+ u8 gtt_cnt;
u32 intrstatus;
u16 ps_flags; /* PS_* */
u16 curtxpow;
@@ -759,7 +765,7 @@ struct ath_softc {
struct ath_beacon_config cur_beacon_conf;
struct delayed_work tx_complete_work;
struct delayed_work hw_pll_work;
- struct timer_list rx_poll_timer;
+ struct timer_list sleep_timer;
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
struct ath_btcoex btcoex;
@@ -784,199 +790,54 @@ struct ath_softc {
bool tx99_state;
s16 tx99_power;
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ATH9K_WOW
atomic_t wow_got_bmiss_intr;
atomic_t wow_sleep_proc_intr; /* in the middle of WoW sleep ? */
u32 wow_intr_before_sleep;
#endif
};
-#define SPECTRAL_SCAN_BITMASK 0x10
-/* Radar info packet format, used for DFS and spectral formats. */
-struct ath_radar_info {
- u8 pulse_length_pri;
- u8 pulse_length_ext;
- u8 pulse_bw_info;
-} __packed;
-
-/* The HT20 spectral data has 4 bytes of additional information at it's end.
- *
- * [7:0]: all bins {max_magnitude[1:0], bitmap_weight[5:0]}
- * [7:0]: all bins max_magnitude[9:2]
- * [7:0]: all bins {max_index[5:0], max_magnitude[11:10]}
- * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
- */
-struct ath_ht20_mag_info {
- u8 all_bins[3];
- u8 max_exp;
-} __packed;
-
-#define SPECTRAL_HT20_NUM_BINS 56
-
-/* WARNING: don't actually use this struct! MAC may vary the amount of
- * data by -1/+2. This struct is for reference only.
- */
-struct ath_ht20_fft_packet {
- u8 data[SPECTRAL_HT20_NUM_BINS];
- struct ath_ht20_mag_info mag_info;
- struct ath_radar_info radar_info;
-} __packed;
-
-#define SPECTRAL_HT20_TOTAL_DATA_LEN (sizeof(struct ath_ht20_fft_packet))
-
-/* Dynamic 20/40 mode:
- *
- * [7:0]: lower bins {max_magnitude[1:0], bitmap_weight[5:0]}
- * [7:0]: lower bins max_magnitude[9:2]
- * [7:0]: lower bins {max_index[5:0], max_magnitude[11:10]}
- * [7:0]: upper bins {max_magnitude[1:0], bitmap_weight[5:0]}
- * [7:0]: upper bins max_magnitude[9:2]
- * [7:0]: upper bins {max_index[5:0], max_magnitude[11:10]}
- * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
- */
-struct ath_ht20_40_mag_info {
- u8 lower_bins[3];
- u8 upper_bins[3];
- u8 max_exp;
-} __packed;
-
-#define SPECTRAL_HT20_40_NUM_BINS 128
-
-/* WARNING: don't actually use this struct! MAC may vary the amount of
- * data. This struct is for reference only.
- */
-struct ath_ht20_40_fft_packet {
- u8 data[SPECTRAL_HT20_40_NUM_BINS];
- struct ath_ht20_40_mag_info mag_info;
- struct ath_radar_info radar_info;
-} __packed;
-
-
-#define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet))
-
-/* grabs the max magnitude from the all/upper/lower bins */
-static inline u16 spectral_max_magnitude(u8 *bins)
-{
- return (bins[0] & 0xc0) >> 6 |
- (bins[1] & 0xff) << 2 |
- (bins[2] & 0x03) << 10;
-}
+/********/
+/* TX99 */
+/********/
-/* return the max magnitude from the all/upper/lower bins */
-static inline u8 spectral_max_index(u8 *bins)
+#ifdef CONFIG_ATH9K_TX99
+void ath9k_tx99_init_debug(struct ath_softc *sc);
+int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
+ struct ath_tx_control *txctl);
+#else
+static inline void ath9k_tx99_init_debug(struct ath_softc *sc)
{
- s8 m = (bins[2] & 0xfc) >> 2;
-
- /* TODO: this still doesn't always report the right values ... */
- if (m > 32)
- m |= 0xe0;
- else
- m &= ~0xe0;
-
- return m + 29;
}
-
-/* return the bitmap weight from the all/upper/lower bins */
-static inline u8 spectral_bitmap_weight(u8 *bins)
+static inline int ath9k_tx99_send(struct ath_softc *sc,
+ struct sk_buff *skb,
+ struct ath_tx_control *txctl)
{
- return bins[0] & 0x3f;
+ return 0;
}
-
-/* FFT sample format given to userspace via debugfs.
- *
- * Please keep the type/length at the front position and change
- * other fields after adding another sample type
- *
- * TODO: this might need rework when switching to nl80211-based
- * interface.
- */
-enum ath_fft_sample_type {
- ATH_FFT_SAMPLE_HT20 = 1,
- ATH_FFT_SAMPLE_HT20_40,
-};
-
-struct fft_sample_tlv {
- u8 type; /* see ath_fft_sample */
- __be16 length;
- /* type dependent data follows */
-} __packed;
-
-struct fft_sample_ht20 {
- struct fft_sample_tlv tlv;
-
- u8 max_exp;
-
- __be16 freq;
- s8 rssi;
- s8 noise;
-
- __be16 max_magnitude;
- u8 max_index;
- u8 bitmap_weight;
-
- __be64 tsf;
-
- u8 data[SPECTRAL_HT20_NUM_BINS];
-} __packed;
-
-struct fft_sample_ht20_40 {
- struct fft_sample_tlv tlv;
-
- u8 channel_type;
- __be16 freq;
-
- s8 lower_rssi;
- s8 upper_rssi;
-
- __be64 tsf;
-
- s8 lower_noise;
- s8 upper_noise;
-
- __be16 lower_max_magnitude;
- __be16 upper_max_magnitude;
-
- u8 lower_max_index;
- u8 upper_max_index;
-
- u8 lower_bitmap_weight;
- u8 upper_bitmap_weight;
-
- u8 max_exp;
-
- u8 data[SPECTRAL_HT20_40_NUM_BINS];
-} __packed;
-
-int ath9k_tx99_init(struct ath_softc *sc);
-void ath9k_tx99_deinit(struct ath_softc *sc);
-int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
- struct ath_tx_control *txctl);
-
-void ath9k_tasklet(unsigned long data);
-int ath_cabq_update(struct ath_softc *);
+#endif /* CONFIG_ATH9K_TX99 */
static inline void ath_read_cachesize(struct ath_common *common, int *csz)
{
common->bus_ops->read_cachesize(common, csz);
}
-extern struct ieee80211_ops ath9k_ops;
-extern int ath9k_modparam_nohwcrypt;
-extern int led_blink;
-extern bool is_ath9k_unloaded;
-
+void ath9k_tasklet(unsigned long data);
+int ath_cabq_update(struct ath_softc *);
u8 ath9k_parse_mpdudensity(u8 mpdudensity);
irqreturn_t ath_isr(int irq, void *dev);
+int ath_reset(struct ath_softc *sc);
+void ath_cancel_work(struct ath_softc *sc);
+void ath_restart_work(struct ath_softc *sc);
int ath9k_init_device(u16 devid, struct ath_softc *sc,
const struct ath_bus_ops *bus_ops);
void ath9k_deinit_device(struct ath_softc *sc);
-void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath9k_reload_chainmask_settings(struct ath_softc *sc);
-
-void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
-int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
- enum spectral_mode spectral_mode);
-
+u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
+void ath_start_rfkill_poll(struct ath_softc *sc);
+void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+void ath9k_ps_wakeup(struct ath_softc *sc);
+void ath9k_ps_restore(struct ath_softc *sc);
#ifdef CONFIG_ATH9K_PCI
int ath_pci_init(void);
@@ -994,15 +855,4 @@ static inline int ath_ahb_init(void) { return 0; };
static inline void ath_ahb_exit(void) {};
#endif
-void ath9k_ps_wakeup(struct ath_softc *sc);
-void ath9k_ps_restore(struct ath_softc *sc);
-
-u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
-
-void ath_start_rfkill_poll(struct ath_softc *sc);
-void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
-void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ath9k_vif_iter_data *iter_data);
-
#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 17be35392bb4..2e8bba0eb361 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -274,18 +274,19 @@ static int ath9k_beacon_choose_slot(struct ath_softc *sc)
return slot;
}
-void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
+static void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
struct ath_vif *avp = (void *)vif->drv_priv;
- u64 tsfadjust;
+ u32 tsfadjust;
if (avp->av_bslot == 0)
return;
- tsfadjust = cur_conf->beacon_interval * avp->av_bslot / ATH_BCBUF;
- avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
+ tsfadjust = cur_conf->beacon_interval * avp->av_bslot;
+ tsfadjust = TU_TO_USEC(tsfadjust) / ATH_BCBUF;
+ avp->tsf_adjust = cpu_to_le64(tsfadjust);
ath_dbg(common, CONFIG, "tsfadjust is: %llu for bslot: %d\n",
(unsigned long long)tsfadjust, avp->av_bslot);
@@ -336,8 +337,14 @@ void ath9k_beacon_tasklet(unsigned long data)
ath9k_hw_check_nav(ah);
- if (!ath9k_hw_check_alive(ah))
- ieee80211_queue_work(sc->hw, &sc->hw_check_work);
+ /*
+ * If the previous beacon has not been transmitted
+ * and a MAC/BB hang has been identified, return
+ * here because a chip reset would have been
+ * initiated.
+ */
+ if (!ath_hw_check(sc))
+ return;
if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) {
ath_dbg(common, BSTUCK,
@@ -431,6 +438,33 @@ static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt,
ath9k_hw_enable_interrupts(ah);
}
+/* Calculate the modulo of a 64 bit TSF snapshot with a TU divisor */
+static u32 ath9k_mod_tsf64_tu(u64 tsf, u32 div_tu)
+{
+ u32 tsf_mod, tsf_hi, tsf_lo, mod_hi, mod_lo;
+
+ tsf_mod = tsf & (BIT(10) - 1);
+ tsf_hi = tsf >> 32;
+ tsf_lo = ((u32) tsf) >> 10;
+
+ mod_hi = tsf_hi % div_tu;
+ mod_lo = ((mod_hi << 22) + tsf_lo) % div_tu;
+
+ return (mod_lo << 10) | tsf_mod;
+}
+
+static u32 ath9k_get_next_tbtt(struct ath_softc *sc, u64 tsf,
+ unsigned int interval)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ unsigned int offset;
+
+ tsf += TU_TO_USEC(FUDGE + ah->config.sw_beacon_response_time);
+ offset = ath9k_mod_tsf64_tu(tsf, interval);
+
+ return (u32) tsf + TU_TO_USEC(interval) - offset;
+}
+
/*
* For multi-bss ap support beacons are either staggered evenly over N slots or
* burst together. For the former arrange for the SWBA to be delivered for each
@@ -446,7 +480,8 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc,
/* NB: the beacon interval is kept internally in TU's */
intval = TU_TO_USEC(conf->beacon_interval);
intval /= ATH_BCBUF;
- nexttbtt = intval;
+ nexttbtt = ath9k_get_next_tbtt(sc, ath9k_hw_gettsf64(ah),
+ conf->beacon_interval);
if (conf->enable_beacon)
ah->imask |= ATH9K_INT_SWBA;
@@ -458,7 +493,7 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc,
(conf->enable_beacon) ? "Enable" : "Disable",
nexttbtt, intval, conf->beacon_interval);
- ath9k_beacon_init(sc, nexttbtt, intval, true);
+ ath9k_beacon_init(sc, nexttbtt, intval, false);
}
/*
@@ -475,11 +510,9 @@ static void ath9k_beacon_config_sta(struct ath_softc *sc,
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_beacon_state bs;
- int dtimperiod, dtimcount, sleepduration;
- int cfpperiod, cfpcount;
- u32 nexttbtt = 0, intval, tsftu;
+ int dtim_intval, sleepduration;
+ u32 nexttbtt = 0, intval;
u64 tsf;
- int num_beacons, offset, dtim_dec_count, cfp_dec_count;
/* No need to configure beacon if we are not associated */
if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
@@ -492,53 +525,25 @@ static void ath9k_beacon_config_sta(struct ath_softc *sc,
intval = conf->beacon_interval;
/*
- * Setup dtim and cfp parameters according to
+ * Setup dtim parameters according to
* last beacon we received (which may be none).
*/
- dtimperiod = conf->dtim_period;
- dtimcount = conf->dtim_count;
- if (dtimcount >= dtimperiod) /* NB: sanity check */
- dtimcount = 0;
- cfpperiod = 1; /* NB: no PCF support yet */
- cfpcount = 0;
-
+ dtim_intval = intval * conf->dtim_period;
sleepduration = conf->listen_interval * intval;
/*
* Pull nexttbtt forward to reflect the current
- * TSF and calculate dtim+cfp state for the result.
+ * TSF and calculate dtim state for the result.
*/
tsf = ath9k_hw_gettsf64(ah);
- tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
-
- num_beacons = tsftu / intval + 1;
- offset = tsftu % intval;
- nexttbtt = tsftu - offset;
- if (offset)
- nexttbtt += intval;
-
- /* DTIM Beacon every dtimperiod Beacon */
- dtim_dec_count = num_beacons % dtimperiod;
- /* CFP every cfpperiod DTIM Beacon */
- cfp_dec_count = (num_beacons / dtimperiod) % cfpperiod;
- if (dtim_dec_count)
- cfp_dec_count++;
-
- dtimcount -= dtim_dec_count;
- if (dtimcount < 0)
- dtimcount += dtimperiod;
-
- cfpcount -= cfp_dec_count;
- if (cfpcount < 0)
- cfpcount += cfpperiod;
-
- bs.bs_intval = intval;
+ nexttbtt = ath9k_get_next_tbtt(sc, tsf, intval);
+
+ bs.bs_intval = TU_TO_USEC(intval);
+ bs.bs_dtimperiod = conf->dtim_period * bs.bs_intval;
bs.bs_nexttbtt = nexttbtt;
- bs.bs_dtimperiod = dtimperiod*intval;
- bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
- bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
- bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
- bs.bs_cfpmaxduration = 0;
+ bs.bs_nextdtim = nexttbtt;
+ if (conf->dtim_period > 1)
+ bs.bs_nextdtim = ath9k_get_next_tbtt(sc, tsf, dtim_intval);
/*
* Calculate the number of consecutive beacons to miss* before taking
@@ -566,18 +571,16 @@ static void ath9k_beacon_config_sta(struct ath_softc *sc,
* XXX fixed at 100ms
*/
- bs.bs_sleepduration = roundup(IEEE80211_MS_TO_TU(100), sleepduration);
+ bs.bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
+ sleepduration));
if (bs.bs_sleepduration > bs.bs_dtimperiod)
bs.bs_sleepduration = bs.bs_dtimperiod;
/* TSF out of range threshold fixed at 1 second */
bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
- ath_dbg(common, BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
- ath_dbg(common, BEACON,
- "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
- bs.bs_bmissthreshold, bs.bs_sleepduration,
- bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
+ ath_dbg(common, BEACON, "bmiss: %u sleep: %u\n",
+ bs.bs_bmissthreshold, bs.bs_sleepduration);
/* Set the computed STA beacon timers */
@@ -600,25 +603,11 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
intval = TU_TO_USEC(conf->beacon_interval);
- if (conf->ibss_creator) {
+ if (conf->ibss_creator)
nexttbtt = intval;
- } else {
- u32 tbtt, offset, tsftu;
- u64 tsf;
-
- /*
- * Pull nexttbtt forward to reflect the current
- * sync'd TSF.
- */
- tsf = ath9k_hw_gettsf64(ah);
- tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
- offset = tsftu % conf->beacon_interval;
- tbtt = tsftu - offset;
- if (offset)
- tbtt += conf->beacon_interval;
-
- nexttbtt = TU_TO_USEC(tbtt);
- }
+ else
+ nexttbtt = ath9k_get_next_tbtt(sc, ath9k_hw_gettsf64(ah),
+ conf->beacon_interval);
if (conf->enable_beacon)
ah->imask |= ATH9K_INT_SWBA;
@@ -640,7 +629,8 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
set_bit(SC_OP_BEACONS, &sc->sc_flags);
}
-bool ath9k_allow_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
+static bool ath9k_allow_beacon_config(struct ath_softc *sc,
+ struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_vif *avp = (void *)vif->drv_priv;
@@ -711,12 +701,17 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
unsigned long flags;
bool skip_beacon = false;
+ if (vif->type == NL80211_IFTYPE_AP)
+ ath9k_set_tsfadjust(sc, vif);
+
+ if (!ath9k_allow_beacon_config(sc, vif))
+ return;
+
if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
ath9k_cache_beacon_config(sc, bss_conf);
ath9k_set_beacon(sc);
set_bit(SC_OP_BEACONS, &sc->sc_flags);
return;
-
}
/*
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 9963b0bf9f72..3dfc2c7f1f07 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -66,7 +66,6 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
.bt_first_slot_time = 5,
.bt_hold_rx_clear = true,
};
- u32 i, idx;
bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
if (AR_SREV_9300_20_OR_LATER(ah))
@@ -88,11 +87,6 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
SM(ath_bt_config.bt_hold_rx_clear, AR_BT_HOLD_RX_CLEAR) |
SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) |
AR_BT_DISABLE_BT_ANT;
-
- for (i = 0; i < 32; i++) {
- idx = (debruijn32 << i) >> 27;
- ah->hw_gen_timers.gen_timer_index[idx] = i;
- }
}
EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw);
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index a7e5a05b2eff..768c733cad31 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -98,10 +98,8 @@ struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
{
struct ieee80211_channel *curchan = chandef->chan;
struct ath9k_channel *channel;
- u8 chan_idx;
- chan_idx = curchan->hw_value;
- channel = &ah->channels[chan_idx];
+ channel = &ah->channels[curchan->hw_value];
ath9k_cmn_update_ichannel(channel, chandef);
return channel;
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 83a2c59f680b..ab7264c1d8f7 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -17,7 +17,6 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
-#include <linux/relay.h>
#include <asm/unaligned.h>
#include "ath9k.h"
@@ -27,6 +26,47 @@
#define REG_READ_D(_ah, _reg) \
ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
+void ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause)
+{
+ if (sync_cause)
+ sc->debug.stats.istats.sync_cause_all++;
+ if (sync_cause & AR_INTR_SYNC_RTC_IRQ)
+ sc->debug.stats.istats.sync_rtc_irq++;
+ if (sync_cause & AR_INTR_SYNC_MAC_IRQ)
+ sc->debug.stats.istats.sync_mac_irq++;
+ if (sync_cause & AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS)
+ sc->debug.stats.istats.eeprom_illegal_access++;
+ if (sync_cause & AR_INTR_SYNC_APB_TIMEOUT)
+ sc->debug.stats.istats.apb_timeout++;
+ if (sync_cause & AR_INTR_SYNC_PCI_MODE_CONFLICT)
+ sc->debug.stats.istats.pci_mode_conflict++;
+ if (sync_cause & AR_INTR_SYNC_HOST1_FATAL)
+ sc->debug.stats.istats.host1_fatal++;
+ if (sync_cause & AR_INTR_SYNC_HOST1_PERR)
+ sc->debug.stats.istats.host1_perr++;
+ if (sync_cause & AR_INTR_SYNC_TRCV_FIFO_PERR)
+ sc->debug.stats.istats.trcv_fifo_perr++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_EP)
+ sc->debug.stats.istats.radm_cpl_ep++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_DLLP_ABORT)
+ sc->debug.stats.istats.radm_cpl_dllp_abort++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TLP_ABORT)
+ sc->debug.stats.istats.radm_cpl_tlp_abort++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_ECRC_ERR)
+ sc->debug.stats.istats.radm_cpl_ecrc_err++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT)
+ sc->debug.stats.istats.radm_cpl_timeout++;
+ if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
+ sc->debug.stats.istats.local_timeout++;
+ if (sync_cause & AR_INTR_SYNC_PM_ACCESS)
+ sc->debug.stats.istats.pm_access++;
+ if (sync_cause & AR_INTR_SYNC_MAC_AWAKE)
+ sc->debug.stats.istats.mac_awake++;
+ if (sync_cause & AR_INTR_SYNC_MAC_ASLEEP)
+ sc->debug.stats.istats.mac_asleep++;
+ if (sync_cause & AR_INTR_SYNC_MAC_SLEEP_ACCESS)
+ sc->debug.stats.istats.mac_sleep_access++;
+}
static ssize_t ath9k_debugfs_read_buf(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -903,14 +943,10 @@ static const struct file_operations fops_reset = {
static ssize_t read_file_recv(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
-#define PHY_ERR(s, p) \
- len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
- sc->debug.stats.rxstats.phy_err_stats[p]);
-
#define RXS_ERR(s, e) \
do { \
len += scnprintf(buf + len, size - len, \
- "%22s : %10u\n", s, \
+ "%18s : %10u\n", s, \
sc->debug.stats.rxstats.e);\
} while (0)
@@ -923,6 +959,12 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
+ RXS_ERR("PKTS-ALL", rx_pkts_all);
+ RXS_ERR("BYTES-ALL", rx_bytes_all);
+ RXS_ERR("BEACONS", rx_beacons);
+ RXS_ERR("FRAGS", rx_frags);
+ RXS_ERR("SPECTRAL", rx_spectral);
+
RXS_ERR("CRC ERR", crc_err);
RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
RXS_ERR("PHY ERR", phy_err);
@@ -930,43 +972,10 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
RXS_ERR("PRE-DELIM CRC ERR", pre_delim_crc_err);
RXS_ERR("POST-DELIM CRC ERR", post_delim_crc_err);
RXS_ERR("DECRYPT BUSY ERR", decrypt_busy_err);
- RXS_ERR("RX-LENGTH-ERR", rx_len_err);
- RXS_ERR("RX-OOM-ERR", rx_oom_err);
- RXS_ERR("RX-RATE-ERR", rx_rate_err);
- RXS_ERR("RX-TOO-MANY-FRAGS", rx_too_many_frags_err);
-
- PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
- PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
- PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY);
- PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE);
- PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH);
- PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
- PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
- PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
- PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
- PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
- PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
- PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
- PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
- PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
- PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
- PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
- PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
- PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
- PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
- PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
- PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
- PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
- PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
- PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
- PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
- PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
-
- RXS_ERR("RX-Pkts-All", rx_pkts_all);
- RXS_ERR("RX-Bytes-All", rx_bytes_all);
- RXS_ERR("RX-Beacons", rx_beacons);
- RXS_ERR("RX-Frags", rx_frags);
- RXS_ERR("RX-Spectral", rx_spectral);
+ RXS_ERR("LENGTH-ERR", rx_len_err);
+ RXS_ERR("OOM-ERR", rx_oom_err);
+ RXS_ERR("RATE-ERR", rx_rate_err);
+ RXS_ERR("TOO-MANY-FRAGS", rx_too_many_frags_err);
if (len > size)
len = size;
@@ -977,7 +986,6 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
return retval;
#undef RXS_ERR
-#undef PHY_ERR
}
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
@@ -1016,293 +1024,67 @@ static const struct file_operations fops_recv = {
.llseek = default_llseek,
};
-static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- char *mode = "";
- unsigned int len;
-
- switch (sc->spectral_mode) {
- case SPECTRAL_DISABLED:
- mode = "disable";
- break;
- case SPECTRAL_BACKGROUND:
- mode = "background";
- break;
- case SPECTRAL_CHANSCAN:
- mode = "chanscan";
- break;
- case SPECTRAL_MANUAL:
- mode = "manual";
- break;
- }
- len = strlen(mode);
- return simple_read_from_buffer(user_buf, count, ppos, mode, len);
-}
-
-static ssize_t write_file_spec_scan_ctl(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- char buf[32];
- ssize_t len;
-
- if (config_enabled(CONFIG_ATH9K_TX99))
- return -EOPNOTSUPP;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
-
- buf[len] = '\0';
-
- if (strncmp("trigger", buf, 7) == 0) {
- ath9k_spectral_scan_trigger(sc->hw);
- } else if (strncmp("background", buf, 9) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND);
- ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
- } else if (strncmp("chanscan", buf, 8) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_CHANSCAN);
- ath_dbg(common, CONFIG, "spectral scan: channel scan mode enabled\n");
- } else if (strncmp("manual", buf, 6) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_MANUAL);
- ath_dbg(common, CONFIG, "spectral scan: manual mode enabled\n");
- } else if (strncmp("disable", buf, 7) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_DISABLED);
- ath_dbg(common, CONFIG, "spectral scan: disabled\n");
- } else {
- return -EINVAL;
- }
-
- return count;
-}
-
-static const struct file_operations fops_spec_scan_ctl = {
- .read = read_file_spec_scan_ctl,
- .write = write_file_spec_scan_ctl,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static ssize_t read_file_spectral_short_repeat(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- char buf[32];
- unsigned int len;
-
- len = sprintf(buf, "%d\n", sc->spec_config.short_repeat);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t write_file_spectral_short_repeat(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- unsigned long val;
- char buf[32];
- ssize_t len;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
-
- buf[len] = '\0';
- if (kstrtoul(buf, 0, &val))
- return -EINVAL;
-
- if (val < 0 || val > 1)
- return -EINVAL;
-
- sc->spec_config.short_repeat = val;
- return count;
-}
-
-static const struct file_operations fops_spectral_short_repeat = {
- .read = read_file_spectral_short_repeat,
- .write = write_file_spectral_short_repeat,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static ssize_t read_file_spectral_count(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- char buf[32];
- unsigned int len;
-
- len = sprintf(buf, "%d\n", sc->spec_config.count);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t write_file_spectral_count(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
+static ssize_t read_file_phy_err(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
- unsigned long val;
- char buf[32];
- ssize_t len;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
-
- buf[len] = '\0';
- if (kstrtoul(buf, 0, &val))
- return -EINVAL;
-
- if (val < 0 || val > 255)
- return -EINVAL;
-
- sc->spec_config.count = val;
- return count;
-}
-
-static const struct file_operations fops_spectral_count = {
- .read = read_file_spectral_count,
- .write = write_file_spectral_count,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static ssize_t read_file_spectral_period(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- char buf[32];
- unsigned int len;
-
- len = sprintf(buf, "%d\n", sc->spec_config.period);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t write_file_spectral_period(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- unsigned long val;
- char buf[32];
- ssize_t len;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
-
- buf[len] = '\0';
- if (kstrtoul(buf, 0, &val))
- return -EINVAL;
-
- if (val < 0 || val > 255)
- return -EINVAL;
-
- sc->spec_config.period = val;
- return count;
-}
-
-static const struct file_operations fops_spectral_period = {
- .read = read_file_spectral_period,
- .write = write_file_spectral_period,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
+#define PHY_ERR(s, p) \
+ len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
+ sc->debug.stats.rxstats.phy_err_stats[p]);
-static ssize_t read_file_spectral_fft_period(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
struct ath_softc *sc = file->private_data;
- char buf[32];
- unsigned int len;
+ char *buf;
+ unsigned int len = 0, size = 1600;
+ ssize_t retval = 0;
- len = sprintf(buf, "%d\n", sc->spec_config.fft_period);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
-static ssize_t write_file_spectral_fft_period(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- unsigned long val;
- char buf[32];
- ssize_t len;
+ PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
+ PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
+ PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY);
+ PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE);
+ PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH);
+ PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
+ PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
+ PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
+ PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
+ PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
+ PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
+ PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
+ PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
+ PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
+ PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
+ PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
+ PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
+ PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
+ PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
+ PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
+ PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
+ PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
+ PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
+ PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
+ PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
+ PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
+ if (len > size)
+ len = size;
- buf[len] = '\0';
- if (kstrtoul(buf, 0, &val))
- return -EINVAL;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
- if (val < 0 || val > 15)
- return -EINVAL;
+ return retval;
- sc->spec_config.fft_period = val;
- return count;
+#undef PHY_ERR
}
-static const struct file_operations fops_spectral_fft_period = {
- .read = read_file_spectral_fft_period,
- .write = write_file_spectral_fft_period,
+static const struct file_operations fops_phy_err = {
+ .read = read_file_phy_err,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
-static struct dentry *create_buf_file_handler(const char *filename,
- struct dentry *parent,
- umode_t mode,
- struct rchan_buf *buf,
- int *is_global)
-{
- struct dentry *buf_file;
-
- buf_file = debugfs_create_file(filename, mode, parent, buf,
- &relay_file_operations);
- *is_global = 1;
- return buf_file;
-}
-
-static int remove_buf_file_handler(struct dentry *dentry)
-{
- debugfs_remove(dentry);
-
- return 0;
-}
-
-void ath_debug_send_fft_sample(struct ath_softc *sc,
- struct fft_sample_tlv *fft_sample_tlv)
-{
- int length;
- if (!sc->rfs_chan_spec_scan)
- return;
-
- length = __be16_to_cpu(fft_sample_tlv->length) +
- sizeof(*fft_sample_tlv);
- relay_write(sc->rfs_chan_spec_scan, fft_sample_tlv, length);
-}
-
-static struct rchan_callbacks rfs_spec_scan_cb = {
- .create_buf_file = create_buf_file_handler,
- .remove_buf_file = remove_buf_file_handler,
-};
-
-
static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -1569,86 +1351,6 @@ static const struct file_operations fops_btcoex = {
};
#endif
-static ssize_t read_file_node_stat(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_node *an = file->private_data;
- struct ath_softc *sc = an->sc;
- struct ath_atx_tid *tid;
- struct ath_atx_ac *ac;
- struct ath_txq *txq;
- u32 len = 0, size = 4096;
- char *buf;
- size_t retval;
- int tidno, acno;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- if (!an->sta->ht_cap.ht_supported) {
- len = scnprintf(buf, size, "%s\n",
- "HT not supported");
- goto exit;
- }
-
- len = scnprintf(buf, size, "Max-AMPDU: %d\n",
- an->maxampdu);
- len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
- an->mpdudensity);
-
- len += scnprintf(buf + len, size - len,
- "%2s%7s\n", "AC", "SCHED");
-
- for (acno = 0, ac = &an->ac[acno];
- acno < IEEE80211_NUM_ACS; acno++, ac++) {
- txq = ac->txq;
- ath_txq_lock(sc, txq);
- len += scnprintf(buf + len, size - len,
- "%2d%7d\n",
- acno, ac->sched);
- ath_txq_unlock(sc, txq);
- }
-
- len += scnprintf(buf + len, size - len,
- "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
- "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
- "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
-
- for (tidno = 0, tid = &an->tid[tidno];
- tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
- txq = tid->ac->txq;
- ath_txq_lock(sc, txq);
- len += scnprintf(buf + len, size - len,
- "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
- tid->tidno, tid->seq_start, tid->seq_next,
- tid->baw_size, tid->baw_head, tid->baw_tail,
- tid->bar_index, tid->sched, tid->paused);
- ath_txq_unlock(sc, txq);
- }
-exit:
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-}
-
-static const struct file_operations fops_node_stat = {
- .read = read_file_node_stat,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct dentry *dir)
-{
- struct ath_node *an = (struct ath_node *)sta->drv_priv;
- debugfs_create_file("node_stat", S_IRUGO, dir, an, &fops_node_stat);
-}
-
/* Ethtool support for get-stats */
#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
@@ -1772,117 +1474,9 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw,
void ath9k_deinit_debug(struct ath_softc *sc)
{
- if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
- relay_close(sc->rfs_chan_spec_scan);
- sc->rfs_chan_spec_scan = NULL;
- }
-}
-
-static ssize_t read_file_tx99(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- char buf[3];
- unsigned int len;
-
- len = sprintf(buf, "%d\n", sc->tx99_state);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- char buf[32];
- bool start;
- ssize_t len;
- int r;
-
- if (sc->nvifs > 1)
- return -EOPNOTSUPP;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
-
- if (strtobool(buf, &start))
- return -EINVAL;
-
- if (start == sc->tx99_state) {
- if (!start)
- return count;
- ath_dbg(common, XMIT, "Resetting TX99\n");
- ath9k_tx99_deinit(sc);
- }
-
- if (!start) {
- ath9k_tx99_deinit(sc);
- return count;
- }
-
- r = ath9k_tx99_init(sc);
- if (r)
- return r;
-
- return count;
-}
-
-static const struct file_operations fops_tx99 = {
- .read = read_file_tx99,
- .write = write_file_tx99,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static ssize_t read_file_tx99_power(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- char buf[32];
- unsigned int len;
-
- len = sprintf(buf, "%d (%d dBm)\n",
- sc->tx99_power,
- sc->tx99_power / 2);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ ath9k_spectral_deinit_debug(sc);
}
-static ssize_t write_file_tx99_power(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- int r;
- u8 tx_power;
-
- r = kstrtou8_from_user(user_buf, count, 0, &tx_power);
- if (r)
- return r;
-
- if (tx_power > MAX_RATE_POWER)
- return -EINVAL;
-
- sc->tx99_power = tx_power;
-
- ath9k_ps_wakeup(sc);
- ath9k_hw_tx99_set_txpower(sc->sc_ah, sc->tx99_power);
- ath9k_ps_restore(sc);
-
- return count;
-}
-
-static const struct file_operations fops_tx99_power = {
- .read = read_file_tx99_power,
- .write = write_file_tx99_power,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -1899,6 +1493,8 @@ int ath9k_init_debug(struct ath_hw *ah)
#endif
ath9k_dfs_init_debug(sc);
+ ath9k_tx99_init_debug(sc);
+ ath9k_spectral_init_debug(sc);
debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_dma);
@@ -1922,6 +1518,8 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_reset);
debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_recv);
+ debugfs_create_file("phy_err", S_IRUSR, sc->debug.debugfs_phy, sc,
+ &fops_phy_err);
debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
&ah->rxchainmask);
debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
@@ -1945,23 +1543,6 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_base_eeprom);
debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_modal_eeprom);
- sc->rfs_chan_spec_scan = relay_open("spectral_scan",
- sc->debug.debugfs_phy,
- 1024, 256, &rfs_spec_scan_cb,
- NULL);
- debugfs_create_file("spectral_scan_ctl", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
- &fops_spec_scan_ctl);
- debugfs_create_file("spectral_short_repeat", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
- &fops_spectral_short_repeat);
- debugfs_create_file("spectral_count", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc, &fops_spectral_count);
- debugfs_create_file("spectral_period", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc, &fops_spectral_period);
- debugfs_create_file("spectral_fft_period", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
- &fops_spectral_fft_period);
debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
@@ -1974,15 +1555,6 @@ int ath9k_init_debug(struct ath_hw *ah)
debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_btcoex);
#endif
- if (config_enabled(CONFIG_ATH9K_TX99) &&
- AR_SREV_9300_20_OR_LATER(ah)) {
- debugfs_create_file("tx99", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
- &fops_tx99);
- debugfs_create_file("tx99_power", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
- &fops_tx99_power);
- }
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index d6e3fa4299a4..cc7a025d833e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -27,11 +27,13 @@ struct fft_sample_tlv;
#ifdef CONFIG_ATH9K_DEBUGFS
#define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++
+#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
#define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++
#define ANT_STAT_INC(i, c) sc->debug.stats.ant_stats[i].c++
#define ANT_LNA_INC(i, c) sc->debug.stats.ant_stats[i].lna_recv_cnt[c]++;
#else
#define TX_STAT_INC(q, c) do { } while (0)
+#define RX_STAT_INC(c)
#define RESET_STAT_INC(sc, type) do { } while (0)
#define ANT_STAT_INC(i, c) do { } while (0)
#define ANT_LNA_INC(i, c) do { } while (0)
@@ -42,6 +44,7 @@ enum ath_reset_type {
RESET_TYPE_BB_WATCHDOG,
RESET_TYPE_FATAL_INT,
RESET_TYPE_TX_ERROR,
+ RESET_TYPE_TX_GTT,
RESET_TYPE_TX_HANG,
RESET_TYPE_PLL_HANG,
RESET_TYPE_MAC_HANG,
@@ -201,7 +204,23 @@ struct ath_tx_stats {
TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
} while(0)
-#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
+struct ath_rx_rate_stats {
+ struct {
+ u32 ht20_cnt;
+ u32 ht40_cnt;
+ u32 sgi_cnt;
+ u32 lgi_cnt;
+ } ht_stats[24];
+
+ struct {
+ u32 ofdm_cnt;
+ } ofdm_stats[8];
+
+ struct {
+ u32 cck_lp_cnt;
+ u32 cck_sp_cnt;
+ } cck_stats[4];
+};
/**
* struct ath_rx_stats - RX Statistics
@@ -292,14 +311,12 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct dentry *dir);
-void ath_debug_send_fft_sample(struct ath_softc *sc,
- struct fft_sample_tlv *fft_sample);
void ath9k_debug_stat_ant(struct ath_softc *sc,
struct ath_hw_antcomb_conf *div_ant_conf,
int main_rssi_avg, int alt_rssi_avg);
-#else
+void ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause);
-#define RX_STAT_INC(c) /* NOP */
+#else
static inline int ath9k_init_debug(struct ath_hw *ah)
{
@@ -331,6 +348,23 @@ static inline void ath9k_debug_stat_ant(struct ath_softc *sc,
}
+static inline void
+ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause)
+{
+}
+
#endif /* CONFIG_ATH9K_DEBUGFS */
+#ifdef CONFIG_ATH9K_STATION_STATISTICS
+void ath_debug_rate_stats(struct ath_softc *sc,
+ struct ath_rx_status *rs,
+ struct sk_buff *skb);
+#else
+static inline void ath_debug_rate_stats(struct ath_softc *sc,
+ struct ath_rx_status *rs,
+ struct sk_buff *skb)
+{
+}
+#endif /* CONFIG_ATH9K_STATION_STATISTICS */
+
#endif /* DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
new file mode 100644
index 000000000000..d76e6e0120d2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+/*************/
+/* node_aggr */
+/*************/
+
+static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_node *an = file->private_data;
+ struct ath_softc *sc = an->sc;
+ struct ath_atx_tid *tid;
+ struct ath_atx_ac *ac;
+ struct ath_txq *txq;
+ u32 len = 0, size = 4096;
+ char *buf;
+ size_t retval;
+ int tidno, acno;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (!an->sta->ht_cap.ht_supported) {
+ len = scnprintf(buf, size, "%s\n",
+ "HT not supported");
+ goto exit;
+ }
+
+ len = scnprintf(buf, size, "Max-AMPDU: %d\n",
+ an->maxampdu);
+ len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
+ an->mpdudensity);
+
+ len += scnprintf(buf + len, size - len,
+ "%2s%7s\n", "AC", "SCHED");
+
+ for (acno = 0, ac = &an->ac[acno];
+ acno < IEEE80211_NUM_ACS; acno++, ac++) {
+ txq = ac->txq;
+ ath_txq_lock(sc, txq);
+ len += scnprintf(buf + len, size - len,
+ "%2d%7d\n",
+ acno, ac->sched);
+ ath_txq_unlock(sc, txq);
+ }
+
+ len += scnprintf(buf + len, size - len,
+ "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
+ "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
+ "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
+
+ for (tidno = 0, tid = &an->tid[tidno];
+ tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
+ txq = tid->ac->txq;
+ ath_txq_lock(sc, txq);
+ if (tid->active) {
+ len += scnprintf(buf + len, size - len,
+ "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
+ tid->tidno,
+ tid->seq_start,
+ tid->seq_next,
+ tid->baw_size,
+ tid->baw_head,
+ tid->baw_tail,
+ tid->bar_index,
+ tid->sched,
+ tid->paused);
+ }
+ ath_txq_unlock(sc, txq);
+ }
+exit:
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_node_aggr = {
+ .read = read_file_node_aggr,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/*************/
+/* node_recv */
+/*************/
+
+void ath_debug_rate_stats(struct ath_softc *sc,
+ struct ath_rx_status *rs,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ieee80211_rx_status *rxs;
+ struct ath_rx_rate_stats *rstats;
+ struct ieee80211_sta *sta;
+ struct ath_node *an;
+
+ if (!ieee80211_is_data(hdr->frame_control))
+ return;
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta_by_ifaddr(sc->hw, hdr->addr2, NULL);
+ if (!sta)
+ goto exit;
+
+ an = (struct ath_node *) sta->drv_priv;
+ rstats = &an->rx_rate_stats;
+ rxs = IEEE80211_SKB_RXCB(skb);
+
+ if (IS_HT_RATE(rs->rs_rate)) {
+ if (rxs->rate_idx >= ARRAY_SIZE(rstats->ht_stats))
+ goto exit;
+
+ if (rxs->flag & RX_FLAG_40MHZ)
+ rstats->ht_stats[rxs->rate_idx].ht40_cnt++;
+ else
+ rstats->ht_stats[rxs->rate_idx].ht20_cnt++;
+
+ if (rxs->flag & RX_FLAG_SHORT_GI)
+ rstats->ht_stats[rxs->rate_idx].sgi_cnt++;
+ else
+ rstats->ht_stats[rxs->rate_idx].lgi_cnt++;
+
+ goto exit;
+ }
+
+ if (IS_CCK_RATE(rs->rs_rate)) {
+ if (rxs->flag & RX_FLAG_SHORTPRE)
+ rstats->cck_stats[rxs->rate_idx].cck_sp_cnt++;
+ else
+ rstats->cck_stats[rxs->rate_idx].cck_lp_cnt++;
+
+ goto exit;
+ }
+
+ if (IS_OFDM_RATE(rs->rs_rate)) {
+ if (ah->curchan->chan->band == IEEE80211_BAND_2GHZ)
+ rstats->ofdm_stats[rxs->rate_idx - 4].ofdm_cnt++;
+ else
+ rstats->ofdm_stats[rxs->rate_idx].ofdm_cnt++;
+ }
+exit:
+ rcu_read_unlock();
+}
+
+#define PRINT_CCK_RATE(str, i, sp) \
+ do { \
+ len += scnprintf(buf + len, size - len, \
+ "%11s : %10u\n", \
+ str, \
+ (sp) ? rstats->cck_stats[i].cck_sp_cnt : \
+ rstats->cck_stats[i].cck_lp_cnt); \
+ } while (0)
+
+#define PRINT_OFDM_RATE(str, i) \
+ do { \
+ len += scnprintf(buf + len, size - len, \
+ "%11s : %10u\n", \
+ str, \
+ rstats->ofdm_stats[i].ofdm_cnt); \
+ } while (0)
+
+static ssize_t read_file_node_recv(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_node *an = file->private_data;
+ struct ath_softc *sc = an->sc;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_rx_rate_stats *rstats;
+ struct ieee80211_sta *sta = an->sta;
+ enum ieee80211_band band;
+ u32 len = 0, size = 4096;
+ char *buf;
+ size_t retval;
+ int i;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ band = ah->curchan->chan->band;
+ rstats = &an->rx_rate_stats;
+
+ if (!sta->ht_cap.ht_supported)
+ goto legacy;
+
+ len += scnprintf(buf + len, size - len,
+ "%24s%10s%10s%10s\n",
+ "HT20", "HT40", "SGI", "LGI");
+
+ for (i = 0; i < 24; i++) {
+ len += scnprintf(buf + len, size - len,
+ "%8s%3u : %10u%10u%10u%10u\n",
+ "MCS", i,
+ rstats->ht_stats[i].ht20_cnt,
+ rstats->ht_stats[i].ht40_cnt,
+ rstats->ht_stats[i].sgi_cnt,
+ rstats->ht_stats[i].lgi_cnt);
+ }
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+legacy:
+ if (band == IEEE80211_BAND_2GHZ) {
+ PRINT_CCK_RATE("CCK-1M/LP", 0, false);
+ PRINT_CCK_RATE("CCK-2M/LP", 1, false);
+ PRINT_CCK_RATE("CCK-5.5M/LP", 2, false);
+ PRINT_CCK_RATE("CCK-11M/LP", 3, false);
+
+ PRINT_CCK_RATE("CCK-2M/SP", 1, true);
+ PRINT_CCK_RATE("CCK-5.5M/SP", 2, true);
+ PRINT_CCK_RATE("CCK-11M/SP", 3, true);
+ }
+
+ PRINT_OFDM_RATE("OFDM-6M", 0);
+ PRINT_OFDM_RATE("OFDM-9M", 1);
+ PRINT_OFDM_RATE("OFDM-12M", 2);
+ PRINT_OFDM_RATE("OFDM-18M", 3);
+ PRINT_OFDM_RATE("OFDM-24M", 4);
+ PRINT_OFDM_RATE("OFDM-36M", 5);
+ PRINT_OFDM_RATE("OFDM-48M", 6);
+ PRINT_OFDM_RATE("OFDM-54M", 7);
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+#undef PRINT_OFDM_RATE
+#undef PRINT_CCK_RATE
+
+static const struct file_operations fops_node_recv = {
+ .read = read_file_node_recv,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct dentry *dir)
+{
+ struct ath_node *an = (struct ath_node *)sta->drv_priv;
+
+ debugfs_create_file("node_aggr", S_IRUGO, dir, an, &fops_node_aggr);
+ debugfs_create_file("node_recv", S_IRUGO, dir, an, &fops_node_recv);
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 7187d3671512..857bb28b3894 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -158,8 +158,8 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
return;
}
- ard.rssi = rs->rs_rssi_ctl0;
- ard.ext_rssi = rs->rs_rssi_ext0;
+ ard.rssi = rs->rs_rssi_ctl[0];
+ ard.ext_rssi = rs->rs_rssi_ext[0];
/*
* hardware stores this as 8 bit signed value.
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index b4091716e9b3..07b806c56c56 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -1085,31 +1085,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
{
-#define EEP_MAP4K_SPURCHAN \
- (ah->eeprom.map4k.modalHeader.spurChans[i].spurChan)
- struct ath_common *common = ath9k_hw_common(ah);
-
- u16 spur_val = AR_NO_SPUR;
-
- ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
- i, is2GHz, ah->config.spurchans[i][is2GHz]);
-
- switch (ah->config.spurmode) {
- case SPUR_DISABLE:
- break;
- case SPUR_ENABLE_IOCTL:
- spur_val = ah->config.spurchans[i][is2GHz];
- ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
- spur_val);
- break;
- case SPUR_ENABLE_EEPROM:
- spur_val = EEP_MAP4K_SPURCHAN;
- break;
- }
-
- return spur_val;
-
-#undef EEP_MAP4K_SPURCHAN
+ return ah->eeprom.map4k.modalHeader.spurChans[i].spurChan;
}
const struct eeprom_ops eep_4k_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index e1d0c217c104..5ba1385c9838 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -1004,31 +1004,7 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
u16 i, bool is2GHz)
{
-#define EEP_MAP9287_SPURCHAN \
- (ah->eeprom.map9287.modalHeader.spurChans[i].spurChan)
-
- struct ath_common *common = ath9k_hw_common(ah);
- u16 spur_val = AR_NO_SPUR;
-
- ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
- i, is2GHz, ah->config.spurchans[i][is2GHz]);
-
- switch (ah->config.spurmode) {
- case SPUR_DISABLE:
- break;
- case SPUR_ENABLE_IOCTL:
- spur_val = ah->config.spurchans[i][is2GHz];
- ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
- spur_val);
- break;
- case SPUR_ENABLE_EEPROM:
- spur_val = EEP_MAP9287_SPURCHAN;
- break;
- }
-
- return spur_val;
-
-#undef EEP_MAP9287_SPURCHAN
+ return ah->eeprom.map9287.modalHeader.spurChans[i].spurChan;
}
const struct eeprom_ops eep_ar9287_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 39107e31e79a..3218ca994746 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -1348,31 +1348,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
{
-#define EEP_DEF_SPURCHAN \
- (ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan)
- struct ath_common *common = ath9k_hw_common(ah);
-
- u16 spur_val = AR_NO_SPUR;
-
- ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
- i, is2GHz, ah->config.spurchans[i][is2GHz]);
-
- switch (ah->config.spurmode) {
- case SPUR_DISABLE:
- break;
- case SPUR_ENABLE_IOCTL:
- spur_val = ah->config.spurchans[i][is2GHz];
- ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
- spur_val);
- break;
- case SPUR_ENABLE_EEPROM:
- spur_val = EEP_DEF_SPURCHAN;
- break;
- }
-
- return spur_val;
-
-#undef EEP_DEF_SPURCHAN
+ return ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan;
}
const struct eeprom_ops eep_def_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index c34f21241da9..b1956bf6e01e 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -157,36 +157,6 @@ static void ath_detect_bt_priority(struct ath_softc *sc)
}
}
-static void ath9k_gen_timer_start(struct ath_hw *ah,
- struct ath_gen_timer *timer,
- u32 trig_timeout,
- u32 timer_period)
-{
- ath9k_hw_gen_timer_start(ah, timer, trig_timeout, timer_period);
-
- if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
- ath9k_hw_disable_interrupts(ah);
- ah->imask |= ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah);
- ath9k_hw_enable_interrupts(ah);
- }
-}
-
-static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
-{
- struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
-
- ath9k_hw_gen_timer_stop(ah, timer);
-
- /* if no timer is enabled, turn off interrupt mask */
- if (timer_table->timer_mask.val == 0) {
- ath9k_hw_disable_interrupts(ah);
- ah->imask &= ~ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah);
- ath9k_hw_enable_interrupts(ah);
- }
-}
-
static void ath_mci_ftp_adjust(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
@@ -257,19 +227,9 @@ static void ath_btcoex_period_timer(unsigned long data)
spin_unlock_bh(&btcoex->btcoex_lock);
- /*
- * btcoex_period is in msec while (btocex/btscan_)no_stomp are in usec,
- * ensure that we properly convert btcoex_period to usec
- * for any comparision with (btcoex/btscan_)no_stomp.
- */
- if (btcoex->btcoex_period * 1000 != btcoex->btcoex_no_stomp) {
- if (btcoex->hw_timer_enabled)
- ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
-
- ath9k_gen_timer_start(ah, btcoex->no_stomp_timer, timer_period,
- timer_period * 10);
- btcoex->hw_timer_enabled = true;
- }
+ if (btcoex->btcoex_period != btcoex->btcoex_no_stomp)
+ mod_timer(&btcoex->no_stomp_timer,
+ jiffies + msecs_to_jiffies(timer_period));
ath9k_ps_restore(sc);
@@ -282,7 +242,7 @@ skip_hw_wakeup:
* Generic tsf based hw timer which configures weight
* registers to time slice between wlan and bt traffic
*/
-static void ath_btcoex_no_stomp_timer(void *arg)
+static void ath_btcoex_no_stomp_timer(unsigned long arg)
{
struct ath_softc *sc = (struct ath_softc *)arg;
struct ath_hw *ah = sc->sc_ah;
@@ -311,24 +271,18 @@ static int ath_init_btcoex_timer(struct ath_softc *sc)
struct ath_btcoex *btcoex = &sc->btcoex;
btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
- btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * 1000 *
+ btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
btcoex->btcoex_period / 100;
- btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * 1000 *
+ btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
btcoex->btcoex_period / 100;
setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
(unsigned long) sc);
+ setup_timer(&btcoex->no_stomp_timer, ath_btcoex_no_stomp_timer,
+ (unsigned long) sc);
spin_lock_init(&btcoex->btcoex_lock);
- btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
- ath_btcoex_no_stomp_timer,
- ath_btcoex_no_stomp_timer,
- (void *) sc, AR_FIRST_NDP_TIMER);
-
- if (!btcoex->no_stomp_timer)
- return -ENOMEM;
-
return 0;
}
@@ -343,10 +297,7 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
/* make sure duty cycle timer is also stopped when resuming */
- if (btcoex->hw_timer_enabled) {
- ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
- btcoex->hw_timer_enabled = false;
- }
+ del_timer_sync(&btcoex->no_stomp_timer);
btcoex->bt_priority_cnt = 0;
btcoex->bt_priority_time = jiffies;
@@ -363,24 +314,16 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
void ath9k_btcoex_timer_pause(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_hw *ah = sc->sc_ah;
del_timer_sync(&btcoex->period_timer);
-
- if (btcoex->hw_timer_enabled) {
- ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
- btcoex->hw_timer_enabled = false;
- }
+ del_timer_sync(&btcoex->no_stomp_timer);
}
void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
- if (btcoex->hw_timer_enabled) {
- ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
- btcoex->hw_timer_enabled = false;
- }
+ del_timer_sync(&btcoex->no_stomp_timer);
}
u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
@@ -400,12 +343,6 @@ u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
void ath9k_btcoex_handle_interrupt(struct ath_softc *sc, u32 status)
{
- struct ath_hw *ah = sc->sc_ah;
-
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
- if (status & ATH9K_INT_GENTIMER)
- ath_gen_timer_isr(sc->sc_ah);
-
if (status & ATH9K_INT_MCI)
ath_mci_intr(sc);
}
@@ -447,10 +384,6 @@ void ath9k_deinit_btcoex(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
- if ((sc->btcoex.no_stomp_timer) &&
- ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE)
- ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
-
if (ath9k_hw_mci_is_enabled(ah))
ath_mci_cleanup(sc);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 055d7c25e090..99a203174f45 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -262,6 +262,8 @@ enum tid_aggr_state {
struct ath9k_htc_sta {
u8 index;
enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
+ struct work_struct rc_update_work;
+ struct ath9k_htc_priv *htc_priv;
};
#define ATH9K_HTC_RXBUF 256
@@ -600,10 +602,15 @@ void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
struct base_eep_header *ath9k_htc_get_eeprom_base(struct ath9k_htc_priv *priv);
#ifdef CONFIG_MAC80211_LEDS
+void ath9k_configure_leds(struct ath9k_htc_priv *priv);
void ath9k_init_leds(struct ath9k_htc_priv *priv);
void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
void ath9k_led_work(struct work_struct *work);
#else
+static inline void ath9k_configure_leds(struct ath9k_htc_priv *priv)
+{
+}
+
static inline void ath9k_init_leds(struct ath9k_htc_priv *priv)
{
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index e0c03bd64182..8b5757734596 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -70,11 +70,11 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
struct ath9k_beacon_state bs;
enum ath9k_int imask = 0;
int dtimperiod, dtimcount, sleepduration;
- int cfpperiod, cfpcount, bmiss_timeout;
+ int bmiss_timeout;
u32 nexttbtt = 0, intval, tsftu;
__be32 htc_imask = 0;
u64 tsf;
- int num_beacons, offset, dtim_dec_count, cfp_dec_count;
+ int num_beacons, offset, dtim_dec_count;
int ret __attribute__ ((unused));
u8 cmd_rsp;
@@ -84,7 +84,7 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_interval);
/*
- * Setup dtim and cfp parameters according to
+ * Setup dtim parameters according to
* last beacon we received (which may be none).
*/
dtimperiod = bss_conf->dtim_period;
@@ -93,8 +93,6 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
dtimcount = 1;
if (dtimcount >= dtimperiod) /* NB: sanity check */
dtimcount = 0;
- cfpperiod = 1; /* NB: no PCF support yet */
- cfpcount = 0;
sleepduration = intval;
if (sleepduration <= 0)
@@ -102,7 +100,7 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
/*
* Pull nexttbtt forward to reflect the current
- * TSF and calculate dtim+cfp state for the result.
+ * TSF and calculate dtim state for the result.
*/
tsf = ath9k_hw_gettsf64(priv->ah);
tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
@@ -115,26 +113,14 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
/* DTIM Beacon every dtimperiod Beacon */
dtim_dec_count = num_beacons % dtimperiod;
- /* CFP every cfpperiod DTIM Beacon */
- cfp_dec_count = (num_beacons / dtimperiod) % cfpperiod;
- if (dtim_dec_count)
- cfp_dec_count++;
-
dtimcount -= dtim_dec_count;
if (dtimcount < 0)
dtimcount += dtimperiod;
- cfpcount -= cfp_dec_count;
- if (cfpcount < 0)
- cfpcount += cfpperiod;
-
- bs.bs_intval = intval;
- bs.bs_nexttbtt = nexttbtt;
- bs.bs_dtimperiod = dtimperiod*intval;
- bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
- bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
- bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
- bs.bs_cfpmaxduration = 0;
+ bs.bs_intval = TU_TO_USEC(intval);
+ bs.bs_nexttbtt = TU_TO_USEC(nexttbtt);
+ bs.bs_dtimperiod = dtimperiod * bs.bs_intval;
+ bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount * bs.bs_intval;
/*
* Calculate the number of consecutive beacons to miss* before taking
@@ -161,7 +147,8 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
* XXX fixed at 100ms
*/
- bs.bs_sleepduration = roundup(IEEE80211_MS_TO_TU(100), sleepduration);
+ bs.bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
+ sleepduration));
if (bs.bs_sleepduration > bs.bs_dtimperiod)
bs.bs_sleepduration = bs.bs_dtimperiod;
@@ -170,10 +157,8 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
ath_dbg(common, CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
intval, tsf, tsftu);
- ath_dbg(common, CONFIG,
- "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
- bs.bs_bmissthreshold, bs.bs_sleepduration,
- bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
+ ath_dbg(common, CONFIG, "bmiss: %u sleep: %u\n",
+ bs.bs_bmissthreshold, bs.bs_sleepduration);
/* Set the computed STA beacon timers */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 105582d6b714..50f74a2a4cf8 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -255,6 +255,17 @@ void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
cancel_work_sync(&priv->led_work);
}
+
+void ath9k_configure_leds(struct ath9k_htc_priv *priv)
+{
+ /* Configure gpio 1 for output */
+ ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ /* LED off, active low */
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
+
+}
+
void ath9k_init_leds(struct ath9k_htc_priv *priv)
{
int ret;
@@ -268,11 +279,7 @@ void ath9k_init_leds(struct ath9k_htc_priv *priv)
else
priv->ah->led_pin = ATH_LED_PIN_DEF;
- /* Configure gpio 1 for output */
- ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- /* LED off, active low */
- ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
+ ath9k_configure_leds(priv);
snprintf(priv->led_name, sizeof(priv->led_name),
"ath9k_htc-%s", wiphy_name(priv->hw->wiphy));
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index c3676bf1d6c4..c57d6b859c04 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -34,6 +34,10 @@ static int ath9k_htc_btcoex_enable;
module_param_named(btcoex_enable, ath9k_htc_btcoex_enable, int, 0444);
MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
+static int ath9k_ps_enable;
+module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
+MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
+
#define CHAN2G(_freq, _idx) { \
.center_freq = (_freq), \
.hw_value = (_idx), \
@@ -725,12 +729,14 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+ if (ath9k_ps_enable)
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
@@ -748,7 +754,6 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
hw->queues = 4;
- hw->channel_change_time = 5000;
hw->max_listen_interval = 1;
hw->vif_data_size = sizeof(struct ath9k_htc_vif);
@@ -1000,6 +1005,8 @@ int ath9k_htc_resume(struct htc_target *htc_handle)
ret = ath9k_init_htc_services(priv, priv->ah->hw_version.devid,
priv->ah->hw_version.usbdev);
+ ath9k_configure_leds(priv);
+
return ret;
}
#endif
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9a2657fdd9cc..c9254a61ca52 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -127,21 +127,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
struct ath9k_vif_iter_data *iter_data = data;
int i;
- for (i = 0; i < ETH_ALEN; i++)
- iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
+ if (iter_data->hw_macaddr != NULL) {
+ for (i = 0; i < ETH_ALEN; i++)
+ iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
+ } else {
+ iter_data->hw_macaddr = mac;
+ }
}
-static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
+static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath9k_vif_iter_data iter_data;
/*
- * Use the hardware MAC address as reference, the hardware uses it
- * together with the BSSID mask when matching addresses.
+ * Pick the MAC address of the first interface as the new hardware
+ * MAC address. The hardware will use it together with the BSSID mask
+ * when matching addresses.
*/
- iter_data.hw_macaddr = common->macaddr;
+ iter_data.hw_macaddr = NULL;
memset(&iter_data.mask, 0xff, ETH_ALEN);
if (vif)
@@ -153,6 +158,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
ath9k_htc_bssid_iter, &iter_data);
memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
+
+ if (iter_data.hw_macaddr)
+ memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
+
ath_hw_setbssidmask(common);
}
@@ -1063,7 +1072,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
goto out;
}
- ath9k_htc_set_bssid_mask(priv, vif);
+ ath9k_htc_set_mac_bssid_mask(priv, vif);
priv->vif_slot |= (1 << avp->index);
priv->nvifs++;
@@ -1128,7 +1137,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
ath9k_htc_set_opmode(priv);
- ath9k_htc_set_bssid_mask(priv, vif);
+ ath9k_htc_set_mac_bssid_mask(priv, vif);
/*
* Stop ANI only if there are no associated station interfaces.
@@ -1261,18 +1270,50 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&priv->mutex);
}
+static void ath9k_htc_sta_rc_update_work(struct work_struct *work)
+{
+ struct ath9k_htc_sta *ista =
+ container_of(work, struct ath9k_htc_sta, rc_update_work);
+ struct ieee80211_sta *sta =
+ container_of((void *)ista, struct ieee80211_sta, drv_priv);
+ struct ath9k_htc_priv *priv = ista->htc_priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_rate trate;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+ memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
+ ath9k_htc_setup_rate(priv, sta, &trate);
+ if (!ath9k_htc_send_rate_cmd(priv, &trate))
+ ath_dbg(common, CONFIG,
+ "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
+ sta->addr, be32_to_cpu(trate.capflags));
+ else
+ ath_dbg(common, CONFIG,
+ "Unable to update supported rates for sta: %pM\n",
+ sta->addr);
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
static int ath9k_htc_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct ath9k_htc_priv *priv = hw->priv;
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
int ret;
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
ret = ath9k_htc_add_station(priv, vif, sta);
- if (!ret)
+ if (!ret) {
+ INIT_WORK(&ista->rc_update_work, ath9k_htc_sta_rc_update_work);
+ ista->htc_priv = priv;
ath9k_htc_init_rate(priv, sta);
+ }
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
@@ -1284,12 +1325,13 @@ static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct ath9k_htc_priv *priv = hw->priv;
- struct ath9k_htc_sta *ista;
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
int ret;
+ cancel_work_sync(&ista->rc_update_work);
+
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
- ista = (struct ath9k_htc_sta *) sta->drv_priv;
htc_sta_drain(priv->htc, ista->index);
ret = ath9k_htc_remove_station(priv, vif, sta);
ath9k_htc_ps_restore(priv);
@@ -1302,28 +1344,12 @@ static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u32 changed)
{
- struct ath9k_htc_priv *priv = hw->priv;
- struct ath_common *common = ath9k_hw_common(priv->ah);
- struct ath9k_htc_target_rate trate;
-
- mutex_lock(&priv->mutex);
- ath9k_htc_ps_wakeup(priv);
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
- memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
- ath9k_htc_setup_rate(priv, sta, &trate);
- if (!ath9k_htc_send_rate_cmd(priv, &trate))
- ath_dbg(common, CONFIG,
- "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
- sta->addr, be32_to_cpu(trate.capflags));
- else
- ath_dbg(common, CONFIG,
- "Unable to update supported rates for sta: %pM\n",
- sta->addr);
- }
+ if (!(changed & IEEE80211_RC_SUPP_RATES_CHANGED))
+ return;
- ath9k_htc_ps_restore(priv);
- mutex_unlock(&priv->mutex);
+ schedule_work(&ista->rc_update_work);
}
static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index c028df76b564..12e0f32a4905 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -1075,9 +1075,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
last_rssi = priv->rx.last_rssi;
- if (ieee80211_is_beacon(hdr->frame_control) &&
- !is_zero_ether_addr(common->curbssid) &&
- ether_addr_equal(hdr->addr3, common->curbssid)) {
+ if (ath_is_mybeacon(common, hdr)) {
s8 rssi = rxbuf->rxstatus.rs_rssi;
if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 4f9378ddf07f..a47ea8423f1e 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -49,9 +49,10 @@ static inline bool ath9k_hw_calibrate(struct ath_hw *ah,
return ath9k_hw_ops(ah)->calibrate(ah, chan, rxchainmask, longcal);
}
-static inline bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
+static inline bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked,
+ u32 *sync_cause_p)
{
- return ath9k_hw_ops(ah)->get_isr(ah, masked);
+ return ath9k_hw_ops(ah)->get_isr(ah, masked, sync_cause_p);
}
static inline void ath9k_hw_set_txdesc(struct ath_hw *ah, void *ds,
@@ -106,6 +107,21 @@ static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
/* Private hardware call ops */
+static inline void ath9k_hw_init_hang_checks(struct ath_hw *ah)
+{
+ ath9k_hw_private_ops(ah)->init_hang_checks(ah);
+}
+
+static inline bool ath9k_hw_detect_mac_hang(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->detect_mac_hang(ah);
+}
+
+static inline bool ath9k_hw_detect_bb_hang(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->detect_bb_hang(ah);
+}
+
/* PHY ops */
static inline int ath9k_hw_rf_set_freq(struct ath_hw *ah,
@@ -231,4 +247,31 @@ static inline void ath9k_hw_set_radar_params(struct ath_hw *ah)
ath9k_hw_private_ops(ah)->set_radar_params(ah, &ah->radar_conf);
}
+static inline void ath9k_hw_init_cal_settings(struct ath_hw *ah)
+{
+ ath9k_hw_private_ops(ah)->init_cal_settings(ah);
+}
+
+static inline u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan);
+}
+
+static inline void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->init_mode_gain_regs)
+ return;
+
+ ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah);
+}
+
+static inline void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->ani_cache_ini_regs)
+ return;
+
+ ath9k_hw_private_ops(ah)->ani_cache_ini_regs(ah);
+}
+
#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8918035da3a3..11eab9f01fd8 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -17,6 +17,8 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/bitops.h>
#include <asm/unaligned.h>
#include "hw.h"
@@ -35,99 +37,6 @@ MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");
-static int __init ath9k_init(void)
-{
- return 0;
-}
-module_init(ath9k_init);
-
-static void __exit ath9k_exit(void)
-{
- return;
-}
-module_exit(ath9k_exit);
-
-/* Private hardware callbacks */
-
-static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
-{
- ath9k_hw_private_ops(ah)->init_cal_settings(ah);
-}
-
-static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan);
-}
-
-static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
-{
- if (!ath9k_hw_private_ops(ah)->init_mode_gain_regs)
- return;
-
- ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah);
-}
-
-static void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah)
-{
- /* You will not have this callback if using the old ANI */
- if (!ath9k_hw_private_ops(ah)->ani_cache_ini_regs)
- return;
-
- ath9k_hw_private_ops(ah)->ani_cache_ini_regs(ah);
-}
-
-/********************/
-/* Helper Functions */
-/********************/
-
-#ifdef CONFIG_ATH9K_DEBUGFS
-
-void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause)
-{
- struct ath_softc *sc = common->priv;
- if (sync_cause)
- sc->debug.stats.istats.sync_cause_all++;
- if (sync_cause & AR_INTR_SYNC_RTC_IRQ)
- sc->debug.stats.istats.sync_rtc_irq++;
- if (sync_cause & AR_INTR_SYNC_MAC_IRQ)
- sc->debug.stats.istats.sync_mac_irq++;
- if (sync_cause & AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS)
- sc->debug.stats.istats.eeprom_illegal_access++;
- if (sync_cause & AR_INTR_SYNC_APB_TIMEOUT)
- sc->debug.stats.istats.apb_timeout++;
- if (sync_cause & AR_INTR_SYNC_PCI_MODE_CONFLICT)
- sc->debug.stats.istats.pci_mode_conflict++;
- if (sync_cause & AR_INTR_SYNC_HOST1_FATAL)
- sc->debug.stats.istats.host1_fatal++;
- if (sync_cause & AR_INTR_SYNC_HOST1_PERR)
- sc->debug.stats.istats.host1_perr++;
- if (sync_cause & AR_INTR_SYNC_TRCV_FIFO_PERR)
- sc->debug.stats.istats.trcv_fifo_perr++;
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_EP)
- sc->debug.stats.istats.radm_cpl_ep++;
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_DLLP_ABORT)
- sc->debug.stats.istats.radm_cpl_dllp_abort++;
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_TLP_ABORT)
- sc->debug.stats.istats.radm_cpl_tlp_abort++;
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_ECRC_ERR)
- sc->debug.stats.istats.radm_cpl_ecrc_err++;
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT)
- sc->debug.stats.istats.radm_cpl_timeout++;
- if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
- sc->debug.stats.istats.local_timeout++;
- if (sync_cause & AR_INTR_SYNC_PM_ACCESS)
- sc->debug.stats.istats.pm_access++;
- if (sync_cause & AR_INTR_SYNC_MAC_AWAKE)
- sc->debug.stats.istats.mac_awake++;
- if (sync_cause & AR_INTR_SYNC_MAC_ASLEEP)
- sc->debug.stats.istats.mac_asleep++;
- if (sync_cause & AR_INTR_SYNC_MAC_SLEEP_ACCESS)
- sc->debug.stats.istats.mac_sleep_access++;
-}
-#endif
-
-
static void ath9k_hw_set_clockrate(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -336,6 +245,9 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
case AR9300_DEVID_QCA955X:
ah->hw_version.macVersion = AR_SREV_VERSION_9550;
return;
+ case AR9300_DEVID_AR953X:
+ ah->hw_version.macVersion = AR_SREV_VERSION_9531;
+ return;
}
val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
@@ -437,23 +349,22 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
static void ath9k_hw_init_config(struct ath_hw *ah)
{
- int i;
+ struct ath_common *common = ath9k_hw_common(ah);
ah->config.dma_beacon_response_time = 1;
ah->config.sw_beacon_response_time = 6;
- ah->config.additional_swba_backoff = 0;
- ah->config.ack_6mb = 0x0;
ah->config.cwm_ignore_extcca = 0;
- ah->config.pcie_clock_req = 0;
ah->config.analog_shiftreg = 1;
- for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
- ah->config.spurchans[i][0] = AR_NO_SPUR;
- ah->config.spurchans[i][1] = AR_NO_SPUR;
- }
-
ah->config.rx_intr_mitigation = true;
- ah->config.pcieSerDesWrite = true;
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ah->config.rimt_last = 500;
+ ah->config.rimt_first = 2000;
+ } else {
+ ah->config.rimt_last = 250;
+ ah->config.rimt_first = 700;
+ }
/*
* We need this for PCI devices only (Cardbus, PCI, miniPCI)
@@ -473,6 +384,24 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
*/
if (num_possible_cpus() > 1)
ah->config.serialize_regmode = SER_REG_MODE_AUTO;
+
+ if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
+ if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
+ ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
+ !ah->is_pciexpress)) {
+ ah->config.serialize_regmode = SER_REG_MODE_ON;
+ } else {
+ ah->config.serialize_regmode = SER_REG_MODE_OFF;
+ }
+ }
+
+ ath_dbg(common, RESET, "serialize_regmode is %d\n",
+ ah->config.serialize_regmode);
+
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+ ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1;
+ else
+ ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
}
static void ath9k_hw_init_defaults(struct ath_hw *ah)
@@ -485,16 +414,24 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->hw_version.magic = AR5416_MAGIC;
ah->hw_version.subvendorid = 0;
- ah->atim_window = 0;
- ah->sta_id1_defaults =
- AR_STA_ID1_CRPT_MIC_ENABLE |
- AR_STA_ID1_MCAST_KSRCH;
+ ah->sta_id1_defaults = AR_STA_ID1_CRPT_MIC_ENABLE |
+ AR_STA_ID1_MCAST_KSRCH;
if (AR_SREV_9100(ah))
ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX;
+
ah->slottime = ATH9K_SLOT_TIME_9;
ah->globaltxtimeout = (u32) -1;
ah->power_mode = ATH9K_PM_UNDEFINED;
ah->htc_reset_init = true;
+
+ ah->ani_function = ATH9K_ANI_ALL;
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
+
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+ ah->tx_trig_level = (AR_FTRIG_256B >> AR_FTRIG_S);
+ else
+ ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
}
static int ath9k_hw_init_macaddr(struct ath_hw *ah)
@@ -548,11 +485,11 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
* EEPROM needs to be initialized before we do this.
* This is required for regulatory compliance.
*/
- if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
u16 regdmn = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
if ((regdmn & 0xF0) == CTL_FCC) {
- ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ;
- ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ;
+ ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_FCC_2GHZ;
+ ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_FCC_5GHZ;
}
}
@@ -576,6 +513,31 @@ static int __ath9k_hw_init(struct ath_hw *ah)
ath9k_hw_read_revisions(ah);
+ switch (ah->hw_version.macVersion) {
+ case AR_SREV_VERSION_5416_PCI:
+ case AR_SREV_VERSION_5416_PCIE:
+ case AR_SREV_VERSION_9160:
+ case AR_SREV_VERSION_9100:
+ case AR_SREV_VERSION_9280:
+ case AR_SREV_VERSION_9285:
+ case AR_SREV_VERSION_9287:
+ case AR_SREV_VERSION_9271:
+ case AR_SREV_VERSION_9300:
+ case AR_SREV_VERSION_9330:
+ case AR_SREV_VERSION_9485:
+ case AR_SREV_VERSION_9340:
+ case AR_SREV_VERSION_9462:
+ case AR_SREV_VERSION_9550:
+ case AR_SREV_VERSION_9565:
+ case AR_SREV_VERSION_9531:
+ break;
+ default:
+ ath_err(common,
+ "Mac Chip Rev 0x%02x.%x is not supported by this driver\n",
+ ah->hw_version.macVersion, ah->hw_version.macRev);
+ return -EOPNOTSUPP;
+ }
+
/*
* Read back AR_WA into a permanent copy and set bits 14 and 17.
* We need to do this to avoid RMW of this register. We cannot
@@ -609,50 +571,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
return -EIO;
}
- if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
- if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
- ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
- !ah->is_pciexpress)) {
- ah->config.serialize_regmode =
- SER_REG_MODE_ON;
- } else {
- ah->config.serialize_regmode =
- SER_REG_MODE_OFF;
- }
- }
-
- ath_dbg(common, RESET, "serialize_regmode is %d\n",
- ah->config.serialize_regmode);
-
- if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
- ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1;
- else
- ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
-
- switch (ah->hw_version.macVersion) {
- case AR_SREV_VERSION_5416_PCI:
- case AR_SREV_VERSION_5416_PCIE:
- case AR_SREV_VERSION_9160:
- case AR_SREV_VERSION_9100:
- case AR_SREV_VERSION_9280:
- case AR_SREV_VERSION_9285:
- case AR_SREV_VERSION_9287:
- case AR_SREV_VERSION_9271:
- case AR_SREV_VERSION_9300:
- case AR_SREV_VERSION_9330:
- case AR_SREV_VERSION_9485:
- case AR_SREV_VERSION_9340:
- case AR_SREV_VERSION_9462:
- case AR_SREV_VERSION_9550:
- case AR_SREV_VERSION_9565:
- break;
- default:
- ath_err(common,
- "Mac Chip Rev 0x%02x.%x is not supported by this driver\n",
- ah->hw_version.macVersion, ah->hw_version.macRev);
- return -EOPNOTSUPP;
- }
-
if (AR_SREV_9271(ah) || AR_SREV_9100(ah) || AR_SREV_9340(ah) ||
AR_SREV_9330(ah) || AR_SREV_9550(ah))
ah->is_pciexpress = false;
@@ -660,10 +578,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
ath9k_hw_init_cal_settings(ah);
- ah->ani_function = ATH9K_ANI_ALL;
- if (!AR_SREV_9300_20_OR_LATER(ah))
- ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
-
if (!ah->is_pciexpress)
ath9k_hw_disablepcie(ah);
@@ -682,15 +596,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
return r;
}
- if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
- ah->tx_trig_level = (AR_FTRIG_256B >> AR_FTRIG_S);
- else
- ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
-
- if (AR_SREV_9330(ah))
- ah->bb_watchdog_timeout_ms = 85;
- else
- ah->bb_watchdog_timeout_ms = 25;
+ ath9k_hw_init_hang_checks(ah);
common->state = ATH_HW_INITIALIZED;
@@ -723,6 +629,7 @@ int ath9k_hw_init(struct ath_hw *ah)
case AR9300_DEVID_AR9462:
case AR9485_DEVID_AR1111:
case AR9300_DEVID_AR9565:
+ case AR9300_DEVID_AR953X:
break;
default:
if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -858,7 +765,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
/* program BB PLL phase_shift */
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x1);
- } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
+ } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
u32 regval, pll2_divint, pll2_divfrac, refdiv;
REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
@@ -868,9 +775,15 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
udelay(100);
if (ah->is_clk_25mhz) {
- pll2_divint = 0x54;
- pll2_divfrac = 0x1eb85;
- refdiv = 3;
+ if (AR_SREV_9531(ah)) {
+ pll2_divint = 0x1c;
+ pll2_divfrac = 0xa3d2;
+ refdiv = 1;
+ } else {
+ pll2_divint = 0x54;
+ pll2_divfrac = 0x1eb85;
+ refdiv = 3;
+ }
} else {
if (AR_SREV_9340(ah)) {
pll2_divint = 88;
@@ -884,7 +797,10 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
}
regval = REG_READ(ah, AR_PHY_PLL_MODE);
- regval |= (0x1 << 16);
+ if (AR_SREV_9531(ah))
+ regval |= (0x1 << 22);
+ else
+ regval |= (0x1 << 16);
REG_WRITE(ah, AR_PHY_PLL_MODE, regval);
udelay(100);
@@ -894,14 +810,33 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
regval = REG_READ(ah, AR_PHY_PLL_MODE);
if (AR_SREV_9340(ah))
- regval = (regval & 0x80071fff) | (0x1 << 30) |
- (0x1 << 13) | (0x4 << 26) | (0x18 << 19);
+ regval = (regval & 0x80071fff) |
+ (0x1 << 30) |
+ (0x1 << 13) |
+ (0x4 << 26) |
+ (0x18 << 19);
+ else if (AR_SREV_9531(ah))
+ regval = (regval & 0x01c00fff) |
+ (0x1 << 31) |
+ (0x2 << 29) |
+ (0xa << 25) |
+ (0x1 << 19) |
+ (0x6 << 12);
else
- regval = (regval & 0x80071fff) | (0x3 << 30) |
- (0x1 << 13) | (0x4 << 26) | (0x60 << 19);
+ regval = (regval & 0x80071fff) |
+ (0x3 << 30) |
+ (0x1 << 13) |
+ (0x4 << 26) |
+ (0x60 << 19);
REG_WRITE(ah, AR_PHY_PLL_MODE, regval);
- REG_WRITE(ah, AR_PHY_PLL_MODE,
- REG_READ(ah, AR_PHY_PLL_MODE) & 0xfffeffff);
+
+ if (AR_SREV_9531(ah))
+ REG_WRITE(ah, AR_PHY_PLL_MODE,
+ REG_READ(ah, AR_PHY_PLL_MODE) & 0xffbfffff);
+ else
+ REG_WRITE(ah, AR_PHY_PLL_MODE,
+ REG_READ(ah, AR_PHY_PLL_MODE) & 0xfffeffff);
+
udelay(1000);
}
@@ -1281,6 +1216,42 @@ void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
*coef_exponent = coef_exp - 16;
}
+/* AR9330 WAR:
+ * call external reset function to reset WMAC if:
+ * - doing a cold reset
+ * - we have pending frames in the TX queues.
+ */
+static bool ath9k_hw_ar9330_reset_war(struct ath_hw *ah, int type)
+{
+ int i, npend = 0;
+
+ for (i = 0; i < AR_NUM_QCU; i++) {
+ npend = ath9k_hw_numtxpending(ah, i);
+ if (npend)
+ break;
+ }
+
+ if (ah->external_reset &&
+ (npend || type == ATH9K_RESET_COLD)) {
+ int reset_err = 0;
+
+ ath_dbg(ath9k_hw_common(ah), RESET,
+ "reset MAC via external reset\n");
+
+ reset_err = ah->external_reset();
+ if (reset_err) {
+ ath_err(ath9k_hw_common(ah),
+ "External reset failed, err=%d\n",
+ reset_err);
+ return false;
+ }
+
+ REG_WRITE(ah, AR_RTC_RESET, 1);
+ }
+
+ return true;
+}
+
static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
{
u32 rst_flags;
@@ -1331,38 +1302,8 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
}
if (AR_SREV_9330(ah)) {
- int npend = 0;
- int i;
-
- /* AR9330 WAR:
- * call external reset function to reset WMAC if:
- * - doing a cold reset
- * - we have pending frames in the TX queues
- */
-
- for (i = 0; i < AR_NUM_QCU; i++) {
- npend = ath9k_hw_numtxpending(ah, i);
- if (npend)
- break;
- }
-
- if (ah->external_reset &&
- (npend || type == ATH9K_RESET_COLD)) {
- int reset_err = 0;
-
- ath_dbg(ath9k_hw_common(ah), RESET,
- "reset MAC via external reset\n");
-
- reset_err = ah->external_reset();
- if (reset_err) {
- ath_err(ath9k_hw_common(ah),
- "External reset failed, err=%d\n",
- reset_err);
- return false;
- }
-
- REG_WRITE(ah, AR_RTC_RESET, 1);
- }
+ if (!ath9k_hw_ar9330_reset_war(ah, type))
+ return false;
}
if (ath9k_hw_mci_is_enabled(ah))
@@ -1372,7 +1313,12 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
REGWRITE_BUFFER_FLUSH(ah);
- udelay(50);
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ udelay(50);
+ else if (AR_SREV_9100(ah))
+ mdelay(10);
+ else
+ udelay(100);
REG_WRITE(ah, AR_RTC_RC, 0);
if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
@@ -1408,8 +1354,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
REGWRITE_BUFFER_FLUSH(ah);
- if (!AR_SREV_9300_20_OR_LATER(ah))
- udelay(2);
+ udelay(2);
if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, 0);
@@ -1485,7 +1430,6 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
if (AR_SREV_9330(ah))
ar9003_hw_internal_regulator_apply(ah);
ath9k_hw_init_pll(ah, chan);
- ath9k_hw_set_rfmode(ah, chan);
return true;
}
@@ -1501,8 +1445,9 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
int r;
if (pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) {
- band_switch = IS_CHAN_5GHZ(ah->curchan) != IS_CHAN_5GHZ(chan);
- mode_diff = (chan->channelFlags != ah->curchan->channelFlags);
+ u32 flags_diff = chan->channelFlags ^ ah->curchan->channelFlags;
+ band_switch = !!(flags_diff & CHANNEL_5GHZ);
+ mode_diff = !!(flags_diff & ~CHANNEL_HT);
}
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
@@ -1573,76 +1518,6 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
}
}
-static bool ath9k_hw_check_dcs(u32 dma_dbg, u32 num_dcu_states,
- int *hang_state, int *hang_pos)
-{
- static u32 dcu_chain_state[] = {5, 6, 9}; /* DCU chain stuck states */
- u32 chain_state, dcs_pos, i;
-
- for (dcs_pos = 0; dcs_pos < num_dcu_states; dcs_pos++) {
- chain_state = (dma_dbg >> (5 * dcs_pos)) & 0x1f;
- for (i = 0; i < 3; i++) {
- if (chain_state == dcu_chain_state[i]) {
- *hang_state = chain_state;
- *hang_pos = dcs_pos;
- return true;
- }
- }
- }
- return false;
-}
-
-#define DCU_COMPLETE_STATE 1
-#define DCU_COMPLETE_STATE_MASK 0x3
-#define NUM_STATUS_READS 50
-static bool ath9k_hw_detect_mac_hang(struct ath_hw *ah)
-{
- u32 chain_state, comp_state, dcs_reg = AR_DMADBG_4;
- u32 i, hang_pos, hang_state, num_state = 6;
-
- comp_state = REG_READ(ah, AR_DMADBG_6);
-
- if ((comp_state & DCU_COMPLETE_STATE_MASK) != DCU_COMPLETE_STATE) {
- ath_dbg(ath9k_hw_common(ah), RESET,
- "MAC Hang signature not found at DCU complete\n");
- return false;
- }
-
- chain_state = REG_READ(ah, dcs_reg);
- if (ath9k_hw_check_dcs(chain_state, num_state, &hang_state, &hang_pos))
- goto hang_check_iter;
-
- dcs_reg = AR_DMADBG_5;
- num_state = 4;
- chain_state = REG_READ(ah, dcs_reg);
- if (ath9k_hw_check_dcs(chain_state, num_state, &hang_state, &hang_pos))
- goto hang_check_iter;
-
- ath_dbg(ath9k_hw_common(ah), RESET,
- "MAC Hang signature 1 not found\n");
- return false;
-
-hang_check_iter:
- ath_dbg(ath9k_hw_common(ah), RESET,
- "DCU registers: chain %08x complete %08x Hang: state %d pos %d\n",
- chain_state, comp_state, hang_state, hang_pos);
-
- for (i = 0; i < NUM_STATUS_READS; i++) {
- chain_state = REG_READ(ah, dcs_reg);
- chain_state = (chain_state >> (5 * hang_pos)) & 0x1f;
- comp_state = REG_READ(ah, AR_DMADBG_6);
-
- if (((comp_state & DCU_COMPLETE_STATE_MASK) !=
- DCU_COMPLETE_STATE) ||
- (chain_state != hang_state))
- return false;
- }
-
- ath_dbg(ath9k_hw_common(ah), RESET, "MAC Hang signature 1 found\n");
-
- return true;
-}
-
void ath9k_hw_check_nav(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -1717,7 +1592,6 @@ static void ath9k_hw_reset_opmode(struct ath_hw *ah,
REG_RMW(ah, AR_STA_ID1, macStaId1
| AR_STA_ID1_RTS_USE_DEF
- | (ah->config.ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
| ah->sta_id1_defaults,
~AR_STA_ID1_SADH_MASK);
ath_hw_setbssidmask(common);
@@ -1776,7 +1650,7 @@ static void ath9k_hw_init_desc(struct ath_hw *ah)
}
#ifdef __BIG_ENDIAN
else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
- AR_SREV_9550(ah))
+ AR_SREV_9550(ah) || AR_SREV_9531(ah))
REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
else
REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
@@ -1814,7 +1688,7 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
* If cross-band fcc is not supoprted, bail out if channelFlags differ.
*/
if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) &&
- chan->channelFlags != ah->curchan->channelFlags)
+ ((chan->channelFlags ^ ah->curchan->channelFlags) & ~CHANNEL_HT))
goto fail;
if (!ath9k_hw_check_alive(ah))
@@ -1855,10 +1729,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath9k_hw_cal_data *caldata, bool fastcc)
{
struct ath_common *common = ath9k_hw_common(ah);
+ struct timespec ts;
u32 saveLedState;
u32 saveDefAntenna;
u32 macStaId1;
u64 tsf = 0;
+ s64 usec = 0;
int r;
bool start_mci_reset = false;
bool save_fullsleep = ah->chip_fullsleep;
@@ -1901,10 +1777,10 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
- /* For chips on which RTC reset is done, save TSF before it gets cleared */
- if (AR_SREV_9100(ah) ||
- (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)))
- tsf = ath9k_hw_gettsf64(ah);
+ /* Save TSF before chip reset, a cold reset clears it */
+ tsf = ath9k_hw_gettsf64(ah);
+ getrawmonotonic(&ts);
+ usec = ts.tv_sec * 1000000ULL + ts.tv_nsec / 1000;
saveLedState = REG_READ(ah, AR_CFG_LED) &
(AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL |
@@ -1937,8 +1813,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
}
/* Restore TSF */
- if (tsf)
- ath9k_hw_settsf64(ah, tsf);
+ getrawmonotonic(&ts);
+ usec = ts.tv_sec * 1000000ULL + ts.tv_nsec / 1000 - usec;
+ ath9k_hw_settsf64(ah, tsf + usec);
if (AR_SREV_9280_20_OR_LATER(ah))
REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
@@ -1950,6 +1827,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (r)
return r;
+ ath9k_hw_set_rfmode(ah, chan);
+
if (ath9k_hw_mci_is_enabled(ah))
ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
@@ -2005,8 +1884,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REG_WRITE(ah, AR_OBS, 8);
if (ah->config.rx_intr_mitigation) {
- REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
- REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
+ REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, ah->config.rimt_last);
+ REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, ah->config.rimt_first);
}
if (ah->config.tx_intr_mitigation) {
@@ -2044,10 +1923,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_loadnf(ah, chan);
ath9k_hw_start_nfcal(ah, true);
- if (AR_SREV_9300_20_OR_LATER(ah)) {
+ if (AR_SREV_9300_20_OR_LATER(ah))
ar9003_hw_bb_watchdog_config(ah);
+
+ if (ah->config.hw_hang_checks & HW_PHYRESTART_CLC_WAR)
ar9003_hw_disable_phy_restart(ah);
- }
ath9k_hw_apply_gpio_override(ah);
@@ -2171,7 +2051,10 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
- udelay(50);
+ if (AR_SREV_9100(ah))
+ mdelay(10);
+ else
+ udelay(50);
for (i = POWER_UP_TIME / 50; i > 0; i--) {
val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
@@ -2260,9 +2143,6 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
case NL80211_IFTYPE_ADHOC:
REG_SET_BIT(ah, AR_TXCFG,
AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
- REG_WRITE(ah, AR_NEXT_NDP_TIMER, next_beacon +
- TU_TO_USEC(ah->atim_window ? ah->atim_window : 1));
- flags |= AR_NDP_TIMER_EN;
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon);
@@ -2283,7 +2163,6 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
REG_WRITE(ah, AR_BEACON_PERIOD, beacon_period);
REG_WRITE(ah, AR_DMA_BEACON_PERIOD, beacon_period);
REG_WRITE(ah, AR_SWBA_PERIOD, beacon_period);
- REG_WRITE(ah, AR_NDP_PERIOD, beacon_period);
REGWRITE_BUFFER_FLUSH(ah);
@@ -2300,12 +2179,9 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
ENABLE_REGWRITE_BUFFER(ah);
- REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
-
- REG_WRITE(ah, AR_BEACON_PERIOD,
- TU_TO_USEC(bs->bs_intval));
- REG_WRITE(ah, AR_DMA_BEACON_PERIOD,
- TU_TO_USEC(bs->bs_intval));
+ REG_WRITE(ah, AR_NEXT_TBTT_TIMER, bs->bs_nexttbtt);
+ REG_WRITE(ah, AR_BEACON_PERIOD, bs->bs_intval);
+ REG_WRITE(ah, AR_DMA_BEACON_PERIOD, bs->bs_intval);
REGWRITE_BUFFER_FLUSH(ah);
@@ -2333,9 +2209,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
ENABLE_REGWRITE_BUFFER(ah);
- REG_WRITE(ah, AR_NEXT_DTIM,
- TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
- REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP));
+ REG_WRITE(ah, AR_NEXT_DTIM, bs->bs_nextdtim - SLEEP_SLOP);
+ REG_WRITE(ah, AR_NEXT_TIM, nextTbtt - SLEEP_SLOP);
REG_WRITE(ah, AR_SLEEP1,
SM((CAB_TIMEOUT_VAL << 3), AR_SLEEP1_CAB_TIMEOUT)
@@ -2349,8 +2224,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
REG_WRITE(ah, AR_SLEEP2,
SM(beacontimeout, AR_SLEEP2_BEACON_TIMEOUT));
- REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval));
- REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod));
+ REG_WRITE(ah, AR_TIM_PERIOD, beaconintval);
+ REG_WRITE(ah, AR_DTIM_PERIOD, dtimperiod);
REGWRITE_BUFFER_FLUSH(ah);
@@ -2608,13 +2483,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
- /*
- * Fast channel change across bands is available
- * only for AR9462 and AR9565.
- */
- if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
- pCap->hw_caps |= ATH9K_HW_CAP_FCC_BAND_SWITCH;
-
return 0;
}
@@ -2986,20 +2854,6 @@ static const struct ath_gen_timer_configuration gen_tmr_configuration[] =
/* HW generic timer primitives */
-/* compute and clear index of rightmost 1 */
-static u32 rightmost_index(struct ath_gen_timer_table *timer_table, u32 *mask)
-{
- u32 b;
-
- b = *mask;
- b &= (0-b);
- *mask &= ~b;
- b *= debruijn32;
- b >>= 27;
-
- return timer_table->gen_timer_index[b];
-}
-
u32 ath9k_hw_gettsf32(struct ath_hw *ah)
{
return REG_READ(ah, AR_TSF_L32);
@@ -3015,6 +2869,10 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
struct ath_gen_timer *timer;
+ if ((timer_index < AR_FIRST_NDP_TIMER) ||
+ (timer_index >= ATH_MAX_GEN_TIMER))
+ return NULL;
+
timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL);
if (timer == NULL)
return NULL;
@@ -3032,23 +2890,13 @@ EXPORT_SYMBOL(ath_gen_timer_alloc);
void ath9k_hw_gen_timer_start(struct ath_hw *ah,
struct ath_gen_timer *timer,
- u32 trig_timeout,
+ u32 timer_next,
u32 timer_period)
{
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
- u32 tsf, timer_next;
-
- BUG_ON(!timer_period);
-
- set_bit(timer->index, &timer_table->timer_mask.timer_bits);
+ u32 mask = 0;
- tsf = ath9k_hw_gettsf32(ah);
-
- timer_next = tsf + trig_timeout;
-
- ath_dbg(ath9k_hw_common(ah), BTCOEX,
- "current tsf %x period %x timer_next %x\n",
- tsf, timer_period, timer_next);
+ timer_table->timer_mask |= BIT(timer->index);
/*
* Program generic timer registers
@@ -3074,10 +2922,19 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
(1 << timer->index));
}
- /* Enable both trigger and thresh interrupt masks */
- REG_SET_BIT(ah, AR_IMR_S5,
- (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
- SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
+ if (timer->trigger)
+ mask |= SM(AR_GENTMR_BIT(timer->index),
+ AR_IMR_S5_GENTIMER_TRIG);
+ if (timer->overflow)
+ mask |= SM(AR_GENTMR_BIT(timer->index),
+ AR_IMR_S5_GENTIMER_THRESH);
+
+ REG_SET_BIT(ah, AR_IMR_S5, mask);
+
+ if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
+ ah->imask |= ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah);
+ }
}
EXPORT_SYMBOL(ath9k_hw_gen_timer_start);
@@ -3085,11 +2942,6 @@ void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
{
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
- if ((timer->index < AR_FIRST_NDP_TIMER) ||
- (timer->index >= ATH_MAX_GEN_TIMER)) {
- return;
- }
-
/* Clear generic timer enable bits. */
REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
gen_tmr_configuration[timer->index].mode_mask);
@@ -3109,7 +2961,12 @@ void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
(SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
- clear_bit(timer->index, &timer_table->timer_mask.timer_bits);
+ timer_table->timer_mask &= ~BIT(timer->index);
+
+ if (timer_table->timer_mask == 0) {
+ ah->imask &= ~ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah);
+ }
}
EXPORT_SYMBOL(ath9k_hw_gen_timer_stop);
@@ -3130,32 +2987,32 @@ void ath_gen_timer_isr(struct ath_hw *ah)
{
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
struct ath_gen_timer *timer;
- struct ath_common *common = ath9k_hw_common(ah);
- u32 trigger_mask, thresh_mask, index;
+ unsigned long trigger_mask, thresh_mask;
+ unsigned int index;
/* get hardware generic timer interrupt status */
trigger_mask = ah->intr_gen_timer_trigger;
thresh_mask = ah->intr_gen_timer_thresh;
- trigger_mask &= timer_table->timer_mask.val;
- thresh_mask &= timer_table->timer_mask.val;
-
- trigger_mask &= ~thresh_mask;
+ trigger_mask &= timer_table->timer_mask;
+ thresh_mask &= timer_table->timer_mask;
- while (thresh_mask) {
- index = rightmost_index(timer_table, &thresh_mask);
+ for_each_set_bit(index, &thresh_mask, ARRAY_SIZE(timer_table->timers)) {
timer = timer_table->timers[index];
- BUG_ON(!timer);
- ath_dbg(common, BTCOEX, "TSF overflow for Gen timer %d\n",
- index);
+ if (!timer)
+ continue;
+ if (!timer->overflow)
+ continue;
+
+ trigger_mask &= ~BIT(index);
timer->overflow(timer->arg);
}
- while (trigger_mask) {
- index = rightmost_index(timer_table, &trigger_mask);
+ for_each_set_bit(index, &trigger_mask, ARRAY_SIZE(timer_table->timers)) {
timer = timer_table->timers[index];
- BUG_ON(!timer);
- ath_dbg(common, BTCOEX,
- "Gen timer[%d] trigger\n", index);
+ if (!timer)
+ continue;
+ if (!timer->trigger)
+ continue;
timer->trigger(timer->arg);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index a2c9a5dbac6b..0acd4b5a4892 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -52,6 +52,7 @@
#define AR9300_DEVID_QCA955X 0x0038
#define AR9485_DEVID_AR1111 0x0037
#define AR9300_DEVID_AR9565 0x0036
+#define AR9300_DEVID_AR953X 0x003d
#define AR5416_AR9100_DEVID 0x000b
@@ -168,7 +169,7 @@
#define CAB_TIMEOUT_VAL 10
#define BEACON_TIMEOUT_VAL 10
#define MIN_BEACON_TIMEOUT_VAL 1
-#define SLEEP_SLOP 3
+#define SLEEP_SLOP TU_TO_USEC(3)
#define INIT_CONFIG_STATUS 0x00000000
#define INIT_RSSI_THR 0x00000700
@@ -277,14 +278,25 @@ struct ath9k_hw_capabilities {
u8 txs_len;
};
+#define AR_NO_SPUR 0x8000
+#define AR_BASE_FREQ_2GHZ 2300
+#define AR_BASE_FREQ_5GHZ 4900
+#define AR_SPUR_FEEQ_BOUND_HT40 19
+#define AR_SPUR_FEEQ_BOUND_HT20 10
+
+enum ath9k_hw_hang_checks {
+ HW_BB_WATCHDOG = BIT(0),
+ HW_PHYRESTART_CLC_WAR = BIT(1),
+ HW_BB_RIFS_HANG = BIT(2),
+ HW_BB_DFS_HANG = BIT(3),
+ HW_BB_RX_CLEAR_STUCK_HANG = BIT(4),
+ HW_MAC_HANG = BIT(5),
+};
+
struct ath9k_ops_config {
int dma_beacon_response_time;
int sw_beacon_response_time;
- int additional_swba_backoff;
- int ack_6mb;
u32 cwm_ignore_extcca;
- bool pcieSerDesWrite;
- u8 pcie_clock_req;
u32 pcie_waen;
u8 analog_shiftreg;
u32 ofdm_trig_low;
@@ -295,20 +307,11 @@ struct ath9k_ops_config {
int serialize_regmode;
bool rx_intr_mitigation;
bool tx_intr_mitigation;
-#define SPUR_DISABLE 0
-#define SPUR_ENABLE_IOCTL 1
-#define SPUR_ENABLE_EEPROM 2
-#define AR_SPUR_5413_1 1640
-#define AR_SPUR_5413_2 1200
-#define AR_NO_SPUR 0x8000
-#define AR_BASE_FREQ_2GHZ 2300
-#define AR_BASE_FREQ_5GHZ 4900
-#define AR_SPUR_FEEQ_BOUND_HT40 19
-#define AR_SPUR_FEEQ_BOUND_HT20 10
- int spurmode;
- u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
u8 max_txtrig_level;
u16 ani_poll_interval; /* ANI poll interval in ms */
+ u16 hw_hang_checks;
+ u16 rimt_first;
+ u16 rimt_last;
/* Platform specific config */
u32 aspm_l1_fix;
@@ -317,6 +320,7 @@ struct ath9k_ops_config {
bool xatten_margin_cfg;
bool alt_mingainidx;
bool no_pll_pwrsave;
+ bool tx_gain_buffalo;
};
enum ath9k_int {
@@ -460,10 +464,6 @@ struct ath9k_beacon_state {
u32 bs_intval;
#define ATH9K_TSFOOR_THRESHOLD 0x00004240 /* 16k us */
u32 bs_dtimperiod;
- u16 bs_cfpperiod;
- u16 bs_cfpmaxduration;
- u32 bs_cfpnext;
- u16 bs_timoffset;
u16 bs_bmissthreshold;
u32 bs_sleepduration;
u32 bs_tsfoor_threshold;
@@ -499,12 +499,6 @@ struct ath9k_hw_version {
#define AR_GENTMR_BIT(_index) (1 << (_index))
-/*
- * Using de Bruijin sequence to look up 1's index in a 32 bit number
- * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001
- */
-#define debruijn32 0x077CB531U
-
struct ath_gen_timer_configuration {
u32 next_addr;
u32 period_addr;
@@ -520,12 +514,8 @@ struct ath_gen_timer {
};
struct ath_gen_timer_table {
- u32 gen_timer_index[32];
struct ath_gen_timer *timers[ATH_MAX_GEN_TIMER];
- union {
- unsigned long timer_bits;
- u16 val;
- } timer_mask;
+ u16 timer_mask;
};
struct ath_hw_antcomb_conf {
@@ -596,6 +586,10 @@ struct ath_hw_radar_conf {
* register settings through the register initialization.
*/
struct ath_hw_private_ops {
+ void (*init_hang_checks)(struct ath_hw *ah);
+ bool (*detect_mac_hang)(struct ath_hw *ah);
+ bool (*detect_bb_hang)(struct ath_hw *ah);
+
/* Calibration ops */
void (*init_cal_settings)(struct ath_hw *ah);
bool (*init_cal)(struct ath_hw *ah, struct ath9k_channel *chan);
@@ -690,7 +684,8 @@ struct ath_hw_ops {
struct ath9k_channel *chan,
u8 rxchainmask,
bool longcal);
- bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked);
+ bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked,
+ u32 *sync_cause_p);
void (*set_txdesc)(struct ath_hw *ah, void *ds,
struct ath_tx_info *i);
int (*proc_txdesc)(struct ath_hw *ah, void *ds,
@@ -786,7 +781,6 @@ struct ath_hw {
u32 txurn_interrupt_mask;
atomic_t intr_ref_cnt;
bool chip_fullsleep;
- u32 atim_window;
u32 modes_index;
/* Calibration */
@@ -865,6 +859,7 @@ struct ath_hw {
u32 gpio_mask;
u32 gpio_val;
+ struct ar5416IniArray ini_dfs;
struct ar5416IniArray iniModes;
struct ar5416IniArray iniCommon;
struct ar5416IniArray iniBB_RfGain;
@@ -921,7 +916,7 @@ struct ath_hw {
/* Enterprise mode cap */
u32 ent_mode;
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ATH9K_WOW
u32 wow_event_mask;
#endif
bool is_clk_25mhz;
@@ -1017,13 +1012,6 @@ bool ath9k_hw_check_alive(struct ath_hw *ah);
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
-#ifdef CONFIG_ATH9K_DEBUGFS
-void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause);
-#else
-static inline void ath9k_debug_sync_cause(struct ath_common *common,
- u32 sync_cause) {}
-#endif
-
/* Generic hw timer primitives */
struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
void (*trigger)(void *),
@@ -1058,6 +1046,7 @@ void ar9002_hw_enable_async_fifo(struct ath_hw *ah);
* Code specific to AR9003, we stuff these here to avoid callbacks
* for older families
*/
+bool ar9003_hw_bb_watchdog_check(struct ath_hw *ah);
void ar9003_hw_bb_watchdog_config(struct ath_hw *ah);
void ar9003_hw_bb_watchdog_read(struct ath_hw *ah);
void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah);
@@ -1127,7 +1116,7 @@ ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ATH9K_WOW
const char *ath9k_hw_wow_event_to_string(u32 wow_event);
void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
u8 *user_mask, int pattern_count,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 710192ed27ed..1fc2e5a26b52 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -57,6 +57,10 @@ static int ath9k_bt_ant_diversity;
module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444);
MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity");
+static int ath9k_ps_enable;
+module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
+MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
+
bool is_ath9k_unloaded;
/* We use the hw_value as an index into our private channel structure */
@@ -470,7 +474,6 @@ static int ath9k_init_queues(struct ath_softc *sc)
sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
-
ath_cabq_update(sc);
sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0);
@@ -554,7 +557,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
sc->spec_config.fft_period = 0xF;
}
-static void ath9k_init_platform(struct ath_softc *sc)
+static void ath9k_init_pcoem_platform(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath9k_hw_capabilities *pCap = &ah->caps;
@@ -589,6 +592,9 @@ static void ath9k_init_platform(struct ath_softc *sc)
if (sc->driver_data & ATH9K_PCI_AR9565_2ANT)
ath_info(common, "WB335 2-ANT card detected\n");
+ if (sc->driver_data & ATH9K_PCI_KILLER)
+ ath_info(common, "Killer Wireless card detected\n");
+
/*
* Some WB335 cards do not support antenna diversity. Since
* we use a hardcoded value for AR9565 instead of using the
@@ -661,6 +667,27 @@ static void ath9k_eeprom_release(struct ath_softc *sc)
release_firmware(sc->sc_ah->eeprom_blob);
}
+static int ath9k_init_soc_platform(struct ath_softc *sc)
+{
+ struct ath9k_platform_data *pdata = sc->dev->platform_data;
+ struct ath_hw *ah = sc->sc_ah;
+ int ret = 0;
+
+ if (!pdata)
+ return 0;
+
+ if (pdata->eeprom_name) {
+ ret = ath9k_eeprom_request(sc, pdata->eeprom_name);
+ if (ret)
+ return ret;
+ }
+
+ if (pdata->tx_gain_buffalo)
+ ah->config.tx_gain_buffalo = true;
+
+ return ret;
+}
+
static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
const struct ath_bus_ops *bus_ops)
{
@@ -681,13 +708,13 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ah->reg_ops.read = ath9k_ioread32;
ah->reg_ops.write = ath9k_iowrite32;
ah->reg_ops.rmw = ath9k_reg_rmw;
- atomic_set(&ah->intr_ref_cnt, -1);
sc->sc_ah = ah;
pCap = &ah->caps;
common = ath9k_hw_common(ah);
sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET);
sc->tx99_power = MAX_RATE_POWER + 1;
+ init_waitqueue_head(&sc->tx_wait);
if (!pdata) {
ah->ah_flags |= AH_USE_EEPROM;
@@ -713,7 +740,11 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
/*
* Platform quirks.
*/
- ath9k_init_platform(sc);
+ ath9k_init_pcoem_platform(sc);
+
+ ret = ath9k_init_soc_platform(sc);
+ if (ret)
+ return ret;
/*
* Enable WLAN/BT RX Antenna diversity only when:
@@ -727,7 +758,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
common->bt_ant_diversity = 1;
spin_lock_init(&common->cc_lock);
-
spin_lock_init(&sc->sc_serial_rw);
spin_lock_init(&sc->sc_pm_lock);
mutex_init(&sc->mutex);
@@ -735,11 +765,10 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
(unsigned long)sc);
+ setup_timer(&sc->sleep_timer, ath_ps_full_sleep, (unsigned long)sc);
INIT_WORK(&sc->hw_reset_work, ath_reset_work);
- INIT_WORK(&sc->hw_check_work, ath_hw_check);
INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
- setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
/*
* Cache line size is used to size and align various
@@ -748,12 +777,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ath_read_cachesize(common, &csz);
common->cachelsz = csz << 2; /* convert to bytes */
- if (pdata && pdata->eeprom_name) {
- ret = ath9k_eeprom_request(sc, pdata->eeprom_name);
- if (ret)
- return ret;
- }
-
/* Initializes the hardware for all supported chipsets */
ret = ath9k_hw_init(ah);
if (ret)
@@ -851,6 +874,9 @@ static const struct ieee80211_iface_limit if_limits[] = {
static const struct ieee80211_iface_limit if_dfs_limits[] = {
{ .max = 1, .types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
BIT(NL80211_IFTYPE_ADHOC) },
};
@@ -873,16 +899,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
}
};
-#ifdef CONFIG_PM
-static const struct wiphy_wowlan_support ath9k_wowlan_support = {
- .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
- .n_patterns = MAX_NUM_USER_PATTERN,
- .pattern_min_len = 1,
- .pattern_max_len = MAX_PATTERN_SIZE,
-};
-#endif
-
-void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -890,13 +907,15 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SUPPORTS_RC_TABLE |
IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+ if (ath9k_ps_enable)
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
@@ -931,19 +950,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
-#ifdef CONFIG_PM_SLEEP
- if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
- (sc->driver_data & ATH9K_PCI_WOW) &&
- device_can_wakeup(sc->dev))
- hw->wiphy->wowlan = &ath9k_wowlan_support;
-
- atomic_set(&sc->wow_sleep_proc_intr, -1);
- atomic_set(&sc->wow_got_bmiss_intr, -1);
-#endif
-
hw->queues = 4;
hw->max_rates = 4;
- hw->channel_change_time = 5000;
hw->max_listen_interval = 1;
hw->max_rate_tries = 10;
hw->sta_data_size = sizeof(struct ath_node);
@@ -966,6 +974,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&sc->sbands[IEEE80211_BAND_5GHZ];
+ ath9k_init_wow(hw);
ath9k_reload_chainmask_settings(sc);
SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
@@ -1064,6 +1073,7 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
if (ATH_TXQ_SETUP(sc, i))
ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+ del_timer_sync(&sc->sleep_timer);
ath9k_hw_deinit(sc->sc_ah);
if (sc->dfs_detector != NULL)
sc->dfs_detector->exit(sc->dfs_detector);
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index aed7e29dc50f..30dcef5aba10 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -65,50 +65,26 @@ void ath_tx_complete_poll_work(struct work_struct *work)
/*
* Checks if the BB/MAC is hung.
*/
-void ath_hw_check(struct work_struct *work)
+bool ath_hw_check(struct ath_softc *sc)
{
- struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- unsigned long flags;
- int busy;
- u8 is_alive, nbeacon = 1;
enum ath_reset_type type;
+ bool is_alive;
ath9k_ps_wakeup(sc);
+
is_alive = ath9k_hw_check_alive(sc->sc_ah);
- if ((is_alive && !AR_SREV_9300(sc->sc_ah)) || sc->tx99_state)
- goto out;
- else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
+ if (!is_alive) {
ath_dbg(common, RESET,
- "DCU stuck is detected. Schedule chip reset\n");
+ "HW hang detected, schedule chip reset\n");
type = RESET_TYPE_MAC_HANG;
- goto sched_reset;
- }
-
- spin_lock_irqsave(&common->cc_lock, flags);
- busy = ath_update_survey_stats(sc);
- spin_unlock_irqrestore(&common->cc_lock, flags);
-
- ath_dbg(common, RESET, "Possible baseband hang, busy=%d (try %d)\n",
- busy, sc->hw_busy_count + 1);
- if (busy >= 99) {
- if (++sc->hw_busy_count >= 3) {
- type = RESET_TYPE_BB_HANG;
- goto sched_reset;
- }
- } else if (busy >= 0) {
- sc->hw_busy_count = 0;
- nbeacon = 3;
+ ath9k_queue_reset(sc, type);
}
- ath_start_rx_poll(sc, nbeacon);
- goto out;
-
-sched_reset:
- ath9k_queue_reset(sc, type);
-out:
ath9k_ps_restore(sc);
+
+ return is_alive;
}
/*
@@ -162,29 +138,6 @@ void ath_hw_pll_work(struct work_struct *work)
}
/*
- * RX Polling - monitors baseband hangs.
- */
-void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon)
-{
- if (!AR_SREV_9300(sc->sc_ah))
- return;
-
- if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
- return;
-
- mod_timer(&sc->rx_poll_timer, jiffies + msecs_to_jiffies
- (nbeacon * sc->cur_beacon_conf.beacon_interval));
-}
-
-void ath_rx_poll(unsigned long data)
-{
- struct ath_softc *sc = (struct ath_softc *)data;
-
- if (!test_bit(SC_OP_INVALID, &sc->sc_flags))
- ieee80211_queue_work(sc->hw, &sc->hw_check_work);
-}
-
-/*
* PA Pre-distortion.
*/
static void ath_paprd_activate(struct ath_softc *sc)
@@ -409,10 +362,10 @@ void ath_ani_calibrate(unsigned long data)
/* Call ANI routine if necessary */
if (aniflag) {
- spin_lock_irqsave(&common->cc_lock, flags);
+ spin_lock(&common->cc_lock);
ath9k_hw_ani_monitor(ah, ah->curchan);
ath_update_survey_stats(sc);
- spin_unlock_irqrestore(&common->cc_lock, flags);
+ spin_unlock(&common->cc_lock);
}
/* Perform calibration if necessary */
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 6a18f9d3e9cc..5f727588ca27 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -481,8 +481,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
| AR_Q_MISC_CBR_INCR_DIS0);
value = (qi->tqi_readyTime -
(ah->config.sw_beacon_response_time -
- ah->config.dma_beacon_response_time) -
- ah->config.additional_swba_backoff) * 1024;
+ ah->config.dma_beacon_response_time)) * 1024;
REG_WRITE(ah, AR_QRDYTIMECFG(q),
value | AR_Q_RDYTIMECFG_EN);
REG_SET_BIT(ah, AR_DMISC(q),
@@ -550,25 +549,25 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
rs->rs_rssi = ATH9K_RSSI_BAD;
- rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
- rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
- rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
- rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
- rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
- rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl[0] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl[1] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl[2] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext[0] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext[1] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext[2] = ATH9K_RSSI_BAD;
} else {
rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
- rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi_ctl[0] = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt00);
- rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi_ctl[1] = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt01);
- rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi_ctl[2] = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt02);
- rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext[0] = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt10);
- rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext[1] = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt11);
- rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext[2] = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt12);
}
if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
@@ -923,11 +922,29 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
mask2 |= AR_IMR_S2_CST;
}
+ if (ah->config.hw_hang_checks & HW_BB_WATCHDOG) {
+ if (ints & ATH9K_INT_BB_WATCHDOG) {
+ mask |= AR_IMR_BCNMISC;
+ mask2 |= AR_IMR_S2_BB_WATCHDOG;
+ }
+ }
+
ath_dbg(common, INTERRUPT, "new IMR 0x%x\n", mask);
REG_WRITE(ah, AR_IMR, mask);
- ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
- AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
- AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
+ ah->imrs2_reg &= ~(AR_IMR_S2_TIM |
+ AR_IMR_S2_DTIM |
+ AR_IMR_S2_DTIMSYNC |
+ AR_IMR_S2_CABEND |
+ AR_IMR_S2_CABTO |
+ AR_IMR_S2_TSFOOR |
+ AR_IMR_S2_GTT |
+ AR_IMR_S2_CST);
+
+ if (ah->config.hw_hang_checks & HW_BB_WATCHDOG) {
+ if (ints & ATH9K_INT_BB_WATCHDOG)
+ ah->imrs2_reg &= ~AR_IMR_S2_BB_WATCHDOG;
+ }
+
ah->imrs2_reg |= mask2;
REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index e3eed81f2439..10271373a0cd 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -133,12 +133,8 @@ struct ath_rx_status {
u8 rs_rate;
u8 rs_antenna;
u8 rs_more;
- int8_t rs_rssi_ctl0;
- int8_t rs_rssi_ctl1;
- int8_t rs_rssi_ctl2;
- int8_t rs_rssi_ext0;
- int8_t rs_rssi_ext1;
- int8_t rs_rssi_ext2;
+ int8_t rs_rssi_ctl[3];
+ int8_t rs_rssi_ext[3];
u8 rs_isaggr;
u8 rs_firstaggr;
u8 rs_moreaggr;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 74f452c7b166..5924f72dd493 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -82,6 +82,22 @@ static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
return ret;
}
+void ath_ps_full_sleep(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *) data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ bool reset;
+
+ spin_lock(&common->cc_lock);
+ ath_hw_cycle_counters_update(common);
+ spin_unlock(&common->cc_lock);
+
+ ath9k_hw_setrxabort(sc->sc_ah, 1);
+ ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
+
+ ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
+}
+
void ath9k_ps_wakeup(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -92,6 +108,7 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
if (++sc->ps_usecount != 1)
goto unlock;
+ del_timer_sync(&sc->sleep_timer);
power_mode = sc->sc_ah->power_mode;
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
@@ -117,17 +134,17 @@ void ath9k_ps_restore(struct ath_softc *sc)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
enum ath9k_power_mode mode;
unsigned long flags;
- bool reset;
spin_lock_irqsave(&sc->sc_pm_lock, flags);
if (--sc->ps_usecount != 0)
goto unlock;
if (sc->ps_idle) {
- ath9k_hw_setrxabort(sc->sc_ah, 1);
- ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
- mode = ATH9K_PM_FULL_SLEEP;
- } else if (sc->ps_enabled &&
+ mod_timer(&sc->sleep_timer, jiffies + HZ / 10);
+ goto unlock;
+ }
+
+ if (sc->ps_enabled &&
!(sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA |
@@ -153,7 +170,6 @@ void ath9k_ps_restore(struct ath_softc *sc)
static void __ath_cancel_work(struct ath_softc *sc)
{
cancel_work_sync(&sc->paprd_work);
- cancel_work_sync(&sc->hw_check_work);
cancel_delayed_work_sync(&sc->tx_complete_work);
cancel_delayed_work_sync(&sc->hw_pll_work);
@@ -163,13 +179,13 @@ static void __ath_cancel_work(struct ath_softc *sc)
#endif
}
-static void ath_cancel_work(struct ath_softc *sc)
+void ath_cancel_work(struct ath_softc *sc)
{
__ath_cancel_work(sc);
cancel_work_sync(&sc->hw_reset_work);
}
-static void ath_restart_work(struct ath_softc *sc)
+void ath_restart_work(struct ath_softc *sc)
{
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
@@ -177,7 +193,6 @@ static void ath_restart_work(struct ath_softc *sc)
ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
- ath_start_rx_poll(sc, 3);
ath_start_ani(sc);
}
@@ -187,11 +202,7 @@ static bool ath_prepare_reset(struct ath_softc *sc)
bool ret = true;
ieee80211_stop_queues(sc->hw);
-
- sc->hw_busy_count = 0;
ath_stop_ani(sc);
- del_timer_sync(&sc->rx_poll_timer);
-
ath9k_hw_disable_interrupts(ah);
if (!ath_drain_all_txq(sc))
@@ -247,6 +258,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
}
}
+ sc->gtt_cnt = 0;
ieee80211_wake_queues(sc->hw);
return true;
@@ -319,7 +331,6 @@ static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chand
struct ieee80211_hw *hw = sc->hw;
struct ath9k_channel *hchan;
struct ieee80211_channel *chan = chandef->chan;
- unsigned long flags;
bool offchannel;
int pos = chan->hw_value;
int old_pos = -1;
@@ -337,9 +348,9 @@ static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chand
chan->center_freq, chandef->width);
/* update survey stats for the old channel before switching */
- spin_lock_irqsave(&common->cc_lock, flags);
+ spin_lock_bh(&common->cc_lock);
ath_update_survey_stats(sc);
- spin_unlock_irqrestore(&common->cc_lock, flags);
+ spin_unlock_bh(&common->cc_lock);
ath9k_cmn_get_channel(hw, ah, chandef);
@@ -410,12 +421,6 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
an->vif = vif;
ath_tx_node_init(sc, an);
-
- if (sta->ht_cap.ht_supported) {
- an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
- sta->ht_cap.ampdu_factor);
- an->mpdudensity = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
- }
}
static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
@@ -437,14 +442,8 @@ void ath9k_tasklet(unsigned long data)
ath9k_ps_wakeup(sc);
spin_lock(&sc->sc_pcu_lock);
- if ((status & ATH9K_INT_FATAL) ||
- (status & ATH9K_INT_BB_WATCHDOG)) {
-
- if (status & ATH9K_INT_FATAL)
- type = RESET_TYPE_FATAL_INT;
- else
- type = RESET_TYPE_BB_WATCHDOG;
-
+ if (status & ATH9K_INT_FATAL) {
+ type = RESET_TYPE_FATAL_INT;
ath9k_queue_reset(sc, type);
/*
@@ -456,6 +455,41 @@ void ath9k_tasklet(unsigned long data)
goto out;
}
+ if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) &&
+ (status & ATH9K_INT_BB_WATCHDOG)) {
+ spin_lock(&common->cc_lock);
+ ath_hw_cycle_counters_update(common);
+ ar9003_hw_bb_watchdog_dbg_info(ah);
+ spin_unlock(&common->cc_lock);
+
+ if (ar9003_hw_bb_watchdog_check(ah)) {
+ type = RESET_TYPE_BB_WATCHDOG;
+ ath9k_queue_reset(sc, type);
+
+ /*
+ * Increment the ref. counter here so that
+ * interrupts are enabled in the reset routine.
+ */
+ atomic_inc(&ah->intr_ref_cnt);
+ ath_dbg(common, ANY,
+ "BB_WATCHDOG: Skipping interrupts\n");
+ goto out;
+ }
+ }
+
+ if (status & ATH9K_INT_GTT) {
+ sc->gtt_cnt++;
+
+ if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) {
+ type = RESET_TYPE_TX_GTT;
+ ath9k_queue_reset(sc, type);
+ atomic_inc(&ah->intr_ref_cnt);
+ ath_dbg(common, ANY,
+ "GTT: Skipping interrupts\n");
+ goto out;
+ }
+ }
+
spin_lock_irqsave(&sc->sc_pm_lock, flags);
if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
/*
@@ -483,12 +517,26 @@ void ath9k_tasklet(unsigned long data)
}
if (status & ATH9K_INT_TX) {
- if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ /*
+ * For EDMA chips, TX completion is enabled for the
+ * beacon queue, so if a beacon has been transmitted
+ * successfully after a GTT interrupt, the GTT counter
+ * gets reset to zero here.
+ */
+ sc->gtt_cnt = 0;
+
ath_tx_edma_tasklet(sc);
- else
+ } else {
ath_tx_tasklet(sc);
+ }
+
+ wake_up(&sc->tx_wait);
}
+ if (status & ATH9K_INT_GENTIMER)
+ ath_gen_timer_isr(sc->sc_ah);
+
ath9k_btcoex_handle_interrupt(sc, status);
/* re-enable hardware interrupt */
@@ -511,14 +559,15 @@ irqreturn_t ath_isr(int irq, void *dev)
ATH9K_INT_TX | \
ATH9K_INT_BMISS | \
ATH9K_INT_CST | \
+ ATH9K_INT_GTT | \
ATH9K_INT_TSFOOR | \
ATH9K_INT_GENTIMER | \
ATH9K_INT_MCI)
struct ath_softc *sc = dev;
struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
enum ath9k_int status;
+ u32 sync_cause = 0;
bool sched = false;
/*
@@ -545,7 +594,8 @@ irqreturn_t ath_isr(int irq, void *dev)
* bits we haven't explicitly enabled so we mask the
* value to insure we only process bits we requested.
*/
- ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
+ ath9k_hw_getisr(ah, &status, &sync_cause); /* NB: clears ISR too */
+ ath9k_debug_sync_cause(sc, sync_cause);
status &= ah->imask; /* discard unasked-for bits */
/*
@@ -569,25 +619,19 @@ irqreturn_t ath_isr(int irq, void *dev)
!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)))
goto chip_reset;
- if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
- (status & ATH9K_INT_BB_WATCHDOG)) {
-
- spin_lock(&common->cc_lock);
- ath_hw_cycle_counters_update(common);
- ar9003_hw_bb_watchdog_dbg_info(ah);
- spin_unlock(&common->cc_lock);
-
+ if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) &&
+ (status & ATH9K_INT_BB_WATCHDOG))
goto chip_reset;
- }
-#ifdef CONFIG_PM_SLEEP
+
+#ifdef CONFIG_ATH9K_WOW
if (status & ATH9K_INT_BMISS) {
if (atomic_read(&sc->wow_sleep_proc_intr) == 0) {
- ath_dbg(common, ANY, "during WoW we got a BMISS\n");
atomic_inc(&sc->wow_got_bmiss_intr);
atomic_dec(&sc->wow_sleep_proc_intr);
}
}
#endif
+
if (status & ATH9K_INT_SWBA)
tasklet_schedule(&sc->bcon_tasklet);
@@ -627,7 +671,7 @@ chip_reset:
#undef SCHED_INTR
}
-static int ath_reset(struct ath_softc *sc)
+int ath_reset(struct ath_softc *sc)
{
int r;
@@ -705,12 +749,19 @@ static int ath9k_start(struct ieee80211_hw *hw)
if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ah->imask |= ATH9K_INT_RXHP |
- ATH9K_INT_RXLP |
- ATH9K_INT_BB_WATCHDOG;
+ ATH9K_INT_RXLP;
else
ah->imask |= ATH9K_INT_RX;
- ah->imask |= ATH9K_INT_GTT;
+ if (ah->config.hw_hang_checks & HW_BB_WATCHDOG)
+ ah->imask |= ATH9K_INT_BB_WATCHDOG;
+
+ /*
+ * Enable GTT interrupts only for AR9003/AR9004 chips
+ * for now.
+ */
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ah->imask |= ATH9K_INT_GTT;
if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
ah->imask |= ATH9K_INT_CST;
@@ -735,6 +786,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
*/
ath9k_cmn_init_crypto(sc->sc_ah);
+ ath9k_hw_reset_tsf(ah);
+
spin_unlock_bh(&sc->sc_pcu_lock);
mutex_unlock(&sc->mutex);
@@ -831,7 +884,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
mutex_lock(&sc->mutex);
ath_cancel_work(sc);
- del_timer_sync(&sc->rx_poll_timer);
if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
ath_dbg(common, ANY, "Device not present\n");
@@ -965,8 +1017,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
struct ath_common *common = ath9k_hw_common(ah);
/*
- * Use the hardware MAC address as reference, the hardware uses it
- * together with the BSSID mask when matching addresses.
+ * Pick the MAC address of the first interface as the new hardware
+ * MAC address. The hardware will use it together with the BSSID mask
+ * when matching addresses.
*/
memset(iter_data, 0, sizeof(*iter_data));
memset(&iter_data->mask, 0xff, ETH_ALEN);
@@ -1635,13 +1688,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
if ((changed & BSS_CHANGED_BEACON_ENABLED) ||
- (changed & BSS_CHANGED_BEACON_INT)) {
- if (ah->opmode == NL80211_IFTYPE_AP &&
- bss_conf->enable_beacon)
- ath9k_set_tsfadjust(sc, vif);
- if (ath9k_allow_beacon_config(sc, vif))
- ath9k_beacon_config(sc, vif, changed);
- }
+ (changed & BSS_CHANGED_BEACON_INT))
+ ath9k_beacon_config(sc, vif, changed);
if (changed & BSS_CHANGED_ERP_SLOT) {
if (bss_conf->use_short_slot)
@@ -1766,13 +1814,12 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
- unsigned long flags;
int pos;
if (config_enabled(CONFIG_ATH9K_TX99))
return -EOPNOTSUPP;
- spin_lock_irqsave(&common->cc_lock, flags);
+ spin_lock_bh(&common->cc_lock);
if (idx == 0)
ath_update_survey_stats(sc);
@@ -1786,7 +1833,7 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
if (!sband || idx >= sband->n_channels) {
- spin_unlock_irqrestore(&common->cc_lock, flags);
+ spin_unlock_bh(&common->cc_lock);
return -ENOENT;
}
@@ -1794,7 +1841,7 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
pos = chan->hw_value;
memcpy(survey, &sc->survey[pos], sizeof(*survey));
survey->channel = chan;
- spin_unlock_irqrestore(&common->cc_lock, flags);
+ spin_unlock_bh(&common->cc_lock);
return 0;
}
@@ -1817,13 +1864,31 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
mutex_unlock(&sc->mutex);
}
+static bool ath9k_has_tx_pending(struct ath_softc *sc)
+{
+ int i, npend;
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+
+ if (!sc->tx.txq[i].axq_depth)
+ continue;
+
+ npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
+ if (npend)
+ break;
+ }
+
+ return !!npend;
+}
+
static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- int timeout = 200; /* ms */
- int i, j;
+ int timeout = HZ / 5; /* 200 ms */
bool drain_txq;
mutex_lock(&sc->mutex);
@@ -1841,25 +1906,9 @@ static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
return;
}
- for (j = 0; j < timeout; j++) {
- bool npend = false;
-
- if (j)
- usleep_range(1000, 2000);
-
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
- if (!ATH_TXQ_SETUP(sc, i))
- continue;
-
- npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
-
- if (npend)
- break;
- }
-
- if (!npend)
- break;
- }
+ if (wait_event_timeout(sc->tx_wait, !ath9k_has_tx_pending(sc),
+ timeout) > 0)
+ drop = false;
if (drop) {
ath9k_ps_wakeup(sc);
@@ -2021,333 +2070,6 @@ static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-
-static void ath9k_wow_map_triggers(struct ath_softc *sc,
- struct cfg80211_wowlan *wowlan,
- u32 *wow_triggers)
-{
- if (wowlan->disconnect)
- *wow_triggers |= AH_WOW_LINK_CHANGE |
- AH_WOW_BEACON_MISS;
- if (wowlan->magic_pkt)
- *wow_triggers |= AH_WOW_MAGIC_PATTERN_EN;
-
- if (wowlan->n_patterns)
- *wow_triggers |= AH_WOW_USER_PATTERN_EN;
-
- sc->wow_enabled = *wow_triggers;
-
-}
-
-static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- int pattern_count = 0;
- int i, byte_cnt;
- u8 dis_deauth_pattern[MAX_PATTERN_SIZE];
- u8 dis_deauth_mask[MAX_PATTERN_SIZE];
-
- memset(dis_deauth_pattern, 0, MAX_PATTERN_SIZE);
- memset(dis_deauth_mask, 0, MAX_PATTERN_SIZE);
-
- /*
- * Create Dissassociate / Deauthenticate packet filter
- *
- * 2 bytes 2 byte 6 bytes 6 bytes 6 bytes
- * +--------------+----------+---------+--------+--------+----
- * + Frame Control+ Duration + DA + SA + BSSID +
- * +--------------+----------+---------+--------+--------+----
- *
- * The above is the management frame format for disassociate/
- * deauthenticate pattern, from this we need to match the first byte
- * of 'Frame Control' and DA, SA, and BSSID fields
- * (skipping 2nd byte of FC and Duration feild.
- *
- * Disassociate pattern
- * --------------------
- * Frame control = 00 00 1010
- * DA, SA, BSSID = x:x:x:x:x:x
- * Pattern will be A0000000 | x:x:x:x:x:x | x:x:x:x:x:x
- * | x:x:x:x:x:x -- 22 bytes
- *
- * Deauthenticate pattern
- * ----------------------
- * Frame control = 00 00 1100
- * DA, SA, BSSID = x:x:x:x:x:x
- * Pattern will be C0000000 | x:x:x:x:x:x | x:x:x:x:x:x
- * | x:x:x:x:x:x -- 22 bytes
- */
-
- /* Create Disassociate Pattern first */
-
- byte_cnt = 0;
-
- /* Fill out the mask with all FF's */
-
- for (i = 0; i < MAX_PATTERN_MASK_SIZE; i++)
- dis_deauth_mask[i] = 0xff;
-
- /* copy the first byte of frame control field */
- dis_deauth_pattern[byte_cnt] = 0xa0;
- byte_cnt++;
-
- /* skip 2nd byte of frame control and Duration field */
- byte_cnt += 3;
-
- /*
- * need not match the destination mac address, it can be a broadcast
- * mac address or an unicast to this station
- */
- byte_cnt += 6;
-
- /* copy the source mac address */
- memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
-
- byte_cnt += 6;
-
- /* copy the bssid, its same as the source mac address */
-
- memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
-
- /* Create Disassociate pattern mask */
-
- dis_deauth_mask[0] = 0xfe;
- dis_deauth_mask[1] = 0x03;
- dis_deauth_mask[2] = 0xc0;
-
- ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n");
-
- ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
- pattern_count, byte_cnt);
-
- pattern_count++;
- /*
- * for de-authenticate pattern, only the first byte of the frame
- * control field gets changed from 0xA0 to 0xC0
- */
- dis_deauth_pattern[0] = 0xC0;
-
- ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
- pattern_count, byte_cnt);
-
-}
-
-static void ath9k_wow_add_pattern(struct ath_softc *sc,
- struct cfg80211_wowlan *wowlan)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath9k_wow_pattern *wow_pattern = NULL;
- struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
- int mask_len;
- s8 i = 0;
-
- if (!wowlan->n_patterns)
- return;
-
- /*
- * Add the new user configured patterns
- */
- for (i = 0; i < wowlan->n_patterns; i++) {
-
- wow_pattern = kzalloc(sizeof(*wow_pattern), GFP_KERNEL);
-
- if (!wow_pattern)
- return;
-
- /*
- * TODO: convert the generic user space pattern to
- * appropriate chip specific/802.11 pattern.
- */
-
- mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
- memset(wow_pattern->pattern_bytes, 0, MAX_PATTERN_SIZE);
- memset(wow_pattern->mask_bytes, 0, MAX_PATTERN_SIZE);
- memcpy(wow_pattern->pattern_bytes, patterns[i].pattern,
- patterns[i].pattern_len);
- memcpy(wow_pattern->mask_bytes, patterns[i].mask, mask_len);
- wow_pattern->pattern_len = patterns[i].pattern_len;
-
- /*
- * just need to take care of deauth and disssoc pattern,
- * make sure we don't overwrite them.
- */
-
- ath9k_hw_wow_apply_pattern(ah, wow_pattern->pattern_bytes,
- wow_pattern->mask_bytes,
- i + 2,
- wow_pattern->pattern_len);
- kfree(wow_pattern);
-
- }
-
-}
-
-static int ath9k_suspend(struct ieee80211_hw *hw,
- struct cfg80211_wowlan *wowlan)
-{
- struct ath_softc *sc = hw->priv;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- u32 wow_triggers_enabled = 0;
- int ret = 0;
-
- mutex_lock(&sc->mutex);
-
- ath_cancel_work(sc);
- ath_stop_ani(sc);
- del_timer_sync(&sc->rx_poll_timer);
-
- if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
- ath_dbg(common, ANY, "Device not present\n");
- ret = -EINVAL;
- goto fail_wow;
- }
-
- if (WARN_ON(!wowlan)) {
- ath_dbg(common, WOW, "None of the WoW triggers enabled\n");
- ret = -EINVAL;
- goto fail_wow;
- }
-
- if (!device_can_wakeup(sc->dev)) {
- ath_dbg(common, WOW, "device_can_wakeup failed, WoW is not enabled\n");
- ret = 1;
- goto fail_wow;
- }
-
- /*
- * none of the sta vifs are associated
- * and we are not currently handling multivif
- * cases, for instance we have to seperately
- * configure 'keep alive frame' for each
- * STA.
- */
-
- if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
- ath_dbg(common, WOW, "None of the STA vifs are associated\n");
- ret = 1;
- goto fail_wow;
- }
-
- if (sc->nvifs > 1) {
- ath_dbg(common, WOW, "WoW for multivif is not yet supported\n");
- ret = 1;
- goto fail_wow;
- }
-
- ath9k_wow_map_triggers(sc, wowlan, &wow_triggers_enabled);
-
- ath_dbg(common, WOW, "WoW triggers enabled 0x%x\n",
- wow_triggers_enabled);
-
- ath9k_ps_wakeup(sc);
-
- ath9k_stop_btcoex(sc);
-
- /*
- * Enable wake up on recieving disassoc/deauth
- * frame by default.
- */
- ath9k_wow_add_disassoc_deauth_pattern(sc);
-
- if (wow_triggers_enabled & AH_WOW_USER_PATTERN_EN)
- ath9k_wow_add_pattern(sc, wowlan);
-
- spin_lock_bh(&sc->sc_pcu_lock);
- /*
- * To avoid false wake, we enable beacon miss interrupt only
- * when we go to sleep. We save the current interrupt mask
- * so we can restore it after the system wakes up
- */
- sc->wow_intr_before_sleep = ah->imask;
- ah->imask &= ~ATH9K_INT_GLOBAL;
- ath9k_hw_disable_interrupts(ah);
- ah->imask = ATH9K_INT_BMISS | ATH9K_INT_GLOBAL;
- ath9k_hw_set_interrupts(ah);
- ath9k_hw_enable_interrupts(ah);
-
- spin_unlock_bh(&sc->sc_pcu_lock);
-
- /*
- * we can now sync irq and kill any running tasklets, since we already
- * disabled interrupts and not holding a spin lock
- */
- synchronize_irq(sc->irq);
- tasklet_kill(&sc->intr_tq);
-
- ath9k_hw_wow_enable(ah, wow_triggers_enabled);
-
- ath9k_ps_restore(sc);
- ath_dbg(common, ANY, "WoW enabled in ath9k\n");
- atomic_inc(&sc->wow_sleep_proc_intr);
-
-fail_wow:
- mutex_unlock(&sc->mutex);
- return ret;
-}
-
-static int ath9k_resume(struct ieee80211_hw *hw)
-{
- struct ath_softc *sc = hw->priv;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- u32 wow_status;
-
- mutex_lock(&sc->mutex);
-
- ath9k_ps_wakeup(sc);
-
- spin_lock_bh(&sc->sc_pcu_lock);
-
- ath9k_hw_disable_interrupts(ah);
- ah->imask = sc->wow_intr_before_sleep;
- ath9k_hw_set_interrupts(ah);
- ath9k_hw_enable_interrupts(ah);
-
- spin_unlock_bh(&sc->sc_pcu_lock);
-
- wow_status = ath9k_hw_wow_wakeup(ah);
-
- if (atomic_read(&sc->wow_got_bmiss_intr) == 0) {
- /*
- * some devices may not pick beacon miss
- * as the reason they woke up so we add
- * that here for that shortcoming.
- */
- wow_status |= AH_WOW_BEACON_MISS;
- atomic_dec(&sc->wow_got_bmiss_intr);
- ath_dbg(common, ANY, "Beacon miss interrupt picked up during WoW sleep\n");
- }
-
- atomic_dec(&sc->wow_sleep_proc_intr);
-
- if (wow_status) {
- ath_dbg(common, ANY, "Waking up due to WoW triggers %s with WoW status = %x\n",
- ath9k_hw_wow_event_to_string(wow_status), wow_status);
- }
-
- ath_restart_work(sc);
- ath9k_start_btcoex(sc);
-
- ath9k_ps_restore(sc);
- mutex_unlock(&sc->mutex);
-
- return 0;
-}
-
-static void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
-{
- struct ath_softc *sc = hw->priv;
-
- mutex_lock(&sc->mutex);
- device_init_wakeup(sc->dev, 1);
- device_set_wakeup_enable(sc->dev, enabled);
- mutex_unlock(&sc->mutex);
-}
-
-#endif
static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
{
struct ath_softc *sc = hw->priv;
@@ -2373,134 +2095,6 @@ static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
sc->csa_vif = vif;
}
-static void ath9k_tx99_stop(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
-
- ath_drain_all_txq(sc);
- ath_startrecv(sc);
-
- ath9k_hw_set_interrupts(ah);
- ath9k_hw_enable_interrupts(ah);
-
- ieee80211_wake_queues(sc->hw);
-
- kfree_skb(sc->tx99_skb);
- sc->tx99_skb = NULL;
- sc->tx99_state = false;
-
- ath9k_hw_tx99_stop(sc->sc_ah);
- ath_dbg(common, XMIT, "TX99 stopped\n");
-}
-
-static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
-{
- static u8 PN9Data[] = {0xff, 0x87, 0xb8, 0x59, 0xb7, 0xa1, 0xcc, 0x24,
- 0x57, 0x5e, 0x4b, 0x9c, 0x0e, 0xe9, 0xea, 0x50,
- 0x2a, 0xbe, 0xb4, 0x1b, 0xb6, 0xb0, 0x5d, 0xf1,
- 0xe6, 0x9a, 0xe3, 0x45, 0xfd, 0x2c, 0x53, 0x18,
- 0x0c, 0xca, 0xc9, 0xfb, 0x49, 0x37, 0xe5, 0xa8,
- 0x51, 0x3b, 0x2f, 0x61, 0xaa, 0x72, 0x18, 0x84,
- 0x02, 0x23, 0x23, 0xab, 0x63, 0x89, 0x51, 0xb3,
- 0xe7, 0x8b, 0x72, 0x90, 0x4c, 0xe8, 0xfb, 0xc0};
- u32 len = 1200;
- struct ieee80211_hw *hw = sc->hw;
- struct ieee80211_hdr *hdr;
- struct ieee80211_tx_info *tx_info;
- struct sk_buff *skb;
-
- skb = alloc_skb(len, GFP_KERNEL);
- if (!skb)
- return NULL;
-
- skb_put(skb, len);
-
- memset(skb->data, 0, len);
-
- hdr = (struct ieee80211_hdr *)skb->data;
- hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA);
- hdr->duration_id = 0;
-
- memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
- memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
- memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
-
- hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
-
- tx_info = IEEE80211_SKB_CB(skb);
- memset(tx_info, 0, sizeof(*tx_info));
- tx_info->band = hw->conf.chandef.chan->band;
- tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
- tx_info->control.vif = sc->tx99_vif;
-
- memcpy(skb->data + sizeof(*hdr), PN9Data, sizeof(PN9Data));
-
- return skb;
-}
-
-void ath9k_tx99_deinit(struct ath_softc *sc)
-{
- ath_reset(sc);
-
- ath9k_ps_wakeup(sc);
- ath9k_tx99_stop(sc);
- ath9k_ps_restore(sc);
-}
-
-int ath9k_tx99_init(struct ath_softc *sc)
-{
- struct ieee80211_hw *hw = sc->hw;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_tx_control txctl;
- int r;
-
- if (sc->sc_flags & SC_OP_INVALID) {
- ath_err(common,
- "driver is in invalid state unable to use TX99");
- return -EINVAL;
- }
-
- sc->tx99_skb = ath9k_build_tx99_skb(sc);
- if (!sc->tx99_skb)
- return -ENOMEM;
-
- memset(&txctl, 0, sizeof(txctl));
- txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
-
- ath_reset(sc);
-
- ath9k_ps_wakeup(sc);
-
- ath9k_hw_disable_interrupts(ah);
- atomic_set(&ah->intr_ref_cnt, -1);
- ath_drain_all_txq(sc);
- ath_stoprecv(sc);
-
- sc->tx99_state = true;
-
- ieee80211_stop_queues(hw);
-
- if (sc->tx99_power == MAX_RATE_POWER + 1)
- sc->tx99_power = MAX_RATE_POWER;
-
- ath9k_hw_tx99_set_txpower(ah, sc->tx99_power);
- r = ath9k_tx99_send(sc, sc->tx99_skb, &txctl);
- if (r) {
- ath_dbg(common, XMIT, "Failed to xmit TX99 skb\n");
- return r;
- }
-
- ath_dbg(common, XMIT, "TX99 xmit started using %d ( %ddBm)\n",
- sc->tx99_power,
- sc->tx99_power / 2);
-
- /* We leave the harware awake as it will be chugging on */
-
- return 0;
-}
-
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
@@ -2531,7 +2125,7 @@ struct ieee80211_ops ath9k_ops = {
.set_antenna = ath9k_set_antenna,
.get_antenna = ath9k_get_antenna,
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ATH9K_WOW
.suspend = ath9k_suspend,
.resume = ath9k_resume,
.set_wakeup = ath9k_set_wakeup,
@@ -2543,7 +2137,7 @@ struct ieee80211_ops ath9k_ops = {
.get_et_strings = ath9k_get_et_strings,
#endif
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_STATION_STATISTICS)
.sta_add_debugfs = ath9k_sta_add_debugfs,
#endif
.sw_scan_start = ath9k_sw_scan_start,
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 0ac1b5f04256..71799fcade54 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -200,7 +200,7 @@ skip_tuning:
if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
- btcoex->btcoex_no_stomp = btcoex->btcoex_period * 1000 *
+ btcoex->btcoex_no_stomp = btcoex->btcoex_period *
(100 - btcoex->duty_cycle) / 100;
ath9k_hw_btcoex_enable(sc->sc_ah);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index b5656fce4ff5..55724b02316b 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -87,6 +87,19 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
+
+ /* Killer Wireless (3x3) */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0030,
+ 0x1A56,
+ 0x2000),
+ .driver_data = ATH9K_PCI_KILLER },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0030,
+ 0x1A56,
+ 0x2001),
+ .driver_data = ATH9K_PCI_KILLER },
+
{ PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
/* PCI-E CUS198 */
@@ -354,6 +367,13 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
0x1783),
.driver_data = ATH9K_PCI_WOW },
+ /* Killer Wireless (2x2) */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0030,
+ 0x1A56,
+ 0x2003),
+ .driver_data = ATH9K_PCI_KILLER },
+
{ PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
{ PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
@@ -392,6 +412,16 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x11AD, /* LITEON */
+ 0x06B2),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0842),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
0x6671),
.driver_data = ATH9K_PCI_AR9565_1ANT },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
@@ -404,6 +434,16 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
0x1B9A, /* XAVI */
0x2812),
.driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x28A1),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x218A),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
/* WB335 1-ANT / Antenna Diversity */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
@@ -448,13 +488,18 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
- PCI_VENDOR_ID_AZWAVE,
- 0x213A),
+ 0x11AD, /* LITEON */
+ 0x06A2),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
- PCI_VENDOR_ID_LENOVO,
- 0x3026),
+ 0x11AD, /* LITEON */
+ 0x0682),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x213A),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
@@ -468,38 +513,41 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
+ PCI_VENDOR_ID_HP,
+ 0x2005),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
PCI_VENDOR_ID_DELL,
- 0x020E),
+ 0x020C),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
- /* WB335 2-ANT */
+ /* WB335 2-ANT / Antenna-Diversity */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_SAMSUNG,
0x411A),
- .driver_data = ATH9K_PCI_AR9565_2ANT },
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_SAMSUNG,
0x411B),
- .driver_data = ATH9K_PCI_AR9565_2ANT },
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_SAMSUNG,
0x411C),
- .driver_data = ATH9K_PCI_AR9565_2ANT },
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_SAMSUNG,
0x411D),
- .driver_data = ATH9K_PCI_AR9565_2ANT },
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_SAMSUNG,
0x411E),
- .driver_data = ATH9K_PCI_AR9565_2ANT },
-
- /* WB335 2-ANT / Antenna-Diversity */
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_ATHEROS,
@@ -527,11 +575,31 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
+ 0x11AD, /* LITEON */
+ 0x0832),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0692),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
PCI_VENDOR_ID_AZWAVE,
0x2130),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x213B),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2182),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
0x144F, /* ASKEY */
0x7202),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
@@ -542,9 +610,49 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
+ 0x1B9A, /* XAVI */
+ 0x28A2),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
0x185F, /* WNC */
0x3027),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0xA120),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE07F),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE081),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3026),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_LENOVO,
+ 0x4026),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ASUSTEK,
+ 0x85F2),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_DELL,
+ 0x020E),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
/* PCI-E AR9565 (WB335) */
{ PCI_VDEVICE(ATHEROS, 0x0036),
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 95ddca5495d4..a0ebdd000fc2 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -15,7 +15,6 @@
*/
#include <linux/dma-mapping.h>
-#include <linux/relay.h>
#include "ath9k.h"
#include "ar9003_mac.h"
@@ -420,7 +419,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
}
- if (AR_SREV_9550(sc->sc_ah))
+ if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah))
rfilt |= ATH9K_RX_FILTER_4ADDRESS;
return rfilt;
@@ -851,20 +850,15 @@ static int ath9k_process_rate(struct ath_common *common,
enum ieee80211_band band;
unsigned int i = 0;
struct ath_softc __maybe_unused *sc = common->priv;
+ struct ath_hw *ah = sc->sc_ah;
- band = hw->conf.chandef.chan->band;
+ band = ah->curchan->chan->band;
sband = hw->wiphy->bands[band];
- switch (hw->conf.chandef.width) {
- case NL80211_CHAN_WIDTH_5:
+ if (IS_CHAN_QUARTER_RATE(ah->curchan))
rxs->flag |= RX_FLAG_5MHZ;
- break;
- case NL80211_CHAN_WIDTH_10:
+ else if (IS_CHAN_HALF_RATE(ah->curchan))
rxs->flag |= RX_FLAG_10MHZ;
- break;
- default:
- break;
- }
if (rx_stats->rs_rate & 0x80) {
/* HT rate */
@@ -906,6 +900,7 @@ static void ath9k_process_rssi(struct ath_common *common,
struct ath_hw *ah = common->ah;
int last_rssi;
int rssi = rx_stats->rs_rssi;
+ int i, j;
/*
* RSSI is not available for subframes in an A-MPDU.
@@ -924,6 +919,20 @@ static void ath9k_process_rssi(struct ath_common *common,
return;
}
+ for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
+ s8 rssi;
+
+ if (!(ah->rxchainmask & BIT(i)))
+ continue;
+
+ rssi = rx_stats->rs_rssi_ctl[i];
+ if (rssi != ATH9K_RSSI_BAD) {
+ rxs->chains |= BIT(j);
+ rxs->chain_signal[j] = ah->noise + rssi;
+ }
+ j++;
+ }
+
/*
* Update Beacon RSSI, this is used by ANI.
*/
@@ -960,201 +969,6 @@ static void ath9k_process_tsf(struct ath_rx_status *rs,
rxs->mactime += 0x100000000ULL;
}
-#ifdef CONFIG_ATH9K_DEBUGFS
-static s8 fix_rssi_inv_only(u8 rssi_val)
-{
- if (rssi_val == 128)
- rssi_val = 0;
- return (s8) rssi_val;
-}
-#endif
-
-/* returns 1 if this was a spectral frame, even if not handled. */
-static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
- struct ath_rx_status *rs, u64 tsf)
-{
-#ifdef CONFIG_ATH9K_DEBUGFS
- struct ath_hw *ah = sc->sc_ah;
- u8 num_bins, *bins, *vdata = (u8 *)hdr;
- struct fft_sample_ht20 fft_sample_20;
- struct fft_sample_ht20_40 fft_sample_40;
- struct fft_sample_tlv *tlv;
- struct ath_radar_info *radar_info;
- int len = rs->rs_datalen;
- int dc_pos;
- u16 fft_len, length, freq = ah->curchan->chan->center_freq;
- enum nl80211_channel_type chan_type;
-
- /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
- * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
- * yet, but this is supposed to be possible as well.
- */
- if (rs->rs_phyerr != ATH9K_PHYERR_RADAR &&
- rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT &&
- rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL)
- return 0;
-
- /* check if spectral scan bit is set. This does not have to be checked
- * if received through a SPECTRAL phy error, but shouldn't hurt.
- */
- radar_info = ((struct ath_radar_info *)&vdata[len]) - 1;
- if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
- return 0;
-
- chan_type = cfg80211_get_chandef_type(&sc->hw->conf.chandef);
- if ((chan_type == NL80211_CHAN_HT40MINUS) ||
- (chan_type == NL80211_CHAN_HT40PLUS)) {
- fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
- num_bins = SPECTRAL_HT20_40_NUM_BINS;
- bins = (u8 *)fft_sample_40.data;
- } else {
- fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
- num_bins = SPECTRAL_HT20_NUM_BINS;
- bins = (u8 *)fft_sample_20.data;
- }
-
- /* Variation in the data length is possible and will be fixed later */
- if ((len > fft_len + 2) || (len < fft_len - 1))
- return 1;
-
- switch (len - fft_len) {
- case 0:
- /* length correct, nothing to do. */
- memcpy(bins, vdata, num_bins);
- break;
- case -1:
- /* first byte missing, duplicate it. */
- memcpy(&bins[1], vdata, num_bins - 1);
- bins[0] = vdata[0];
- break;
- case 2:
- /* MAC added 2 extra bytes at bin 30 and 32, remove them. */
- memcpy(bins, vdata, 30);
- bins[30] = vdata[31];
- memcpy(&bins[31], &vdata[33], num_bins - 31);
- break;
- case 1:
- /* MAC added 2 extra bytes AND first byte is missing. */
- bins[0] = vdata[0];
- memcpy(&bins[1], vdata, 30);
- bins[31] = vdata[31];
- memcpy(&bins[32], &vdata[33], num_bins - 32);
- break;
- default:
- return 1;
- }
-
- /* DC value (value in the middle) is the blind spot of the spectral
- * sample and invalid, interpolate it.
- */
- dc_pos = num_bins / 2;
- bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
-
- if ((chan_type == NL80211_CHAN_HT40MINUS) ||
- (chan_type == NL80211_CHAN_HT40PLUS)) {
- s8 lower_rssi, upper_rssi;
- s16 ext_nf;
- u8 lower_max_index, upper_max_index;
- u8 lower_bitmap_w, upper_bitmap_w;
- u16 lower_mag, upper_mag;
- struct ath9k_hw_cal_data *caldata = ah->caldata;
- struct ath_ht20_40_mag_info *mag_info;
-
- if (caldata)
- ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
- caldata->nfCalHist[3].privNF);
- else
- ext_nf = ATH_DEFAULT_NOISE_FLOOR;
-
- length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
- fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
- fft_sample_40.tlv.length = __cpu_to_be16(length);
- fft_sample_40.freq = __cpu_to_be16(freq);
- fft_sample_40.channel_type = chan_type;
-
- if (chan_type == NL80211_CHAN_HT40PLUS) {
- lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
- upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
-
- fft_sample_40.lower_noise = ah->noise;
- fft_sample_40.upper_noise = ext_nf;
- } else {
- lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
- upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
-
- fft_sample_40.lower_noise = ext_nf;
- fft_sample_40.upper_noise = ah->noise;
- }
- fft_sample_40.lower_rssi = lower_rssi;
- fft_sample_40.upper_rssi = upper_rssi;
-
- mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
- lower_mag = spectral_max_magnitude(mag_info->lower_bins);
- upper_mag = spectral_max_magnitude(mag_info->upper_bins);
- fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
- fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
- lower_max_index = spectral_max_index(mag_info->lower_bins);
- upper_max_index = spectral_max_index(mag_info->upper_bins);
- fft_sample_40.lower_max_index = lower_max_index;
- fft_sample_40.upper_max_index = upper_max_index;
- lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
- upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
- fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
- fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
- fft_sample_40.max_exp = mag_info->max_exp & 0xf;
-
- fft_sample_40.tsf = __cpu_to_be64(tsf);
-
- tlv = (struct fft_sample_tlv *)&fft_sample_40;
- } else {
- u8 max_index, bitmap_w;
- u16 magnitude;
- struct ath_ht20_mag_info *mag_info;
-
- length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
- fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
- fft_sample_20.tlv.length = __cpu_to_be16(length);
- fft_sample_20.freq = __cpu_to_be16(freq);
-
- fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
- fft_sample_20.noise = ah->noise;
-
- mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
- magnitude = spectral_max_magnitude(mag_info->all_bins);
- fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
- max_index = spectral_max_index(mag_info->all_bins);
- fft_sample_20.max_index = max_index;
- bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
- fft_sample_20.bitmap_weight = bitmap_w;
- fft_sample_20.max_exp = mag_info->max_exp & 0xf;
-
- fft_sample_20.tsf = __cpu_to_be64(tsf);
-
- tlv = (struct fft_sample_tlv *)&fft_sample_20;
- }
-
- ath_debug_send_fft_sample(sc, tlv);
- return 1;
-#else
- return 0;
-#endif
-}
-
-static bool ath9k_is_mybeacon(struct ath_softc *sc, struct ieee80211_hdr *hdr)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
-
- if (ieee80211_is_beacon(hdr->frame_control)) {
- RX_STAT_INC(rx_beacons);
- if (!is_zero_ether_addr(common->curbssid) &&
- ether_addr_equal(hdr->addr3, common->curbssid))
- return true;
- }
-
- return false;
-}
-
/*
* For Decrypt or Demic errors, we only mark packet status here and always push
* up the frame up to let mac80211 handle the actual error case, be it no
@@ -1242,10 +1056,17 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
goto exit;
}
- rx_stats->is_mybeacon = ath9k_is_mybeacon(sc, hdr);
- if (rx_stats->is_mybeacon) {
- sc->hw_busy_count = 0;
- ath_start_rx_poll(sc, 3);
+ if (ath_is_mybeacon(common, hdr)) {
+ RX_STAT_INC(rx_beacons);
+ rx_stats->is_mybeacon = true;
+ }
+
+ /*
+ * This shouldn't happen, but have a safety check anyway.
+ */
+ if (WARN_ON(!ah->curchan)) {
+ ret = -EINVAL;
+ goto exit;
}
if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
@@ -1255,8 +1076,8 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
ath9k_process_rssi(common, hw, rx_stats, rx_status);
- rx_status->band = hw->conf.chandef.chan->band;
- rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->band = ah->curchan->chan->band;
+ rx_status->freq = ah->curchan->chan->center_freq;
rx_status->antenna = rx_stats->rs_antenna;
rx_status->flag |= RX_FLAG_MACTIME_END;
@@ -1521,8 +1342,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
ath9k_antenna_check(sc, &rs);
-
ath9k_apply_ampdu_details(sc, &rs, rxs);
+ ath_debug_rate_stats(sc, &rs, skb);
ieee80211_rx(hw, skb);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index a13b2d143d9e..b1fd3fa84983 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -304,6 +304,7 @@
#define AR_IMR_S2 0x00ac
#define AR_IMR_S2_QCU_TXURN 0x000003FF
#define AR_IMR_S2_QCU_TXURN_S 0
+#define AR_IMR_S2_BB_WATCHDOG 0x00010000
#define AR_IMR_S2_CST 0x00400000
#define AR_IMR_S2_GTT 0x00800000
#define AR_IMR_S2_TIM 0x01000000
@@ -809,7 +810,12 @@
#define AR_SREV_REVISION_9462_21 3
#define AR_SREV_VERSION_9565 0x2C0
#define AR_SREV_REVISION_9565_10 0
+#define AR_SREV_REVISION_9565_101 1
+#define AR_SREV_REVISION_9565_11 2
#define AR_SREV_VERSION_9550 0x400
+#define AR_SREV_VERSION_9531 0x500
+#define AR_SREV_REVISION_9531_10 0
+#define AR_SREV_REVISION_9531_11 1
#define AR_SREV_5416(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -881,9 +887,6 @@
#define AR_SREV_9330(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9330))
-#define AR_SREV_9330_10(_ah) \
- (AR_SREV_9330((_ah)) && \
- ((_ah)->hw_version.macRev == AR_SREV_REVISION_9330_10))
#define AR_SREV_9330_11(_ah) \
(AR_SREV_9330((_ah)) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9330_11))
@@ -927,10 +930,18 @@
#define AR_SREV_9565(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
-
#define AR_SREV_9565_10(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_10))
+#define AR_SREV_9565_101(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_101))
+#define AR_SREV_9565_11(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_11))
+#define AR_SREV_9565_11_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9565_11))
#define AR_SREV_9550(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9550))
@@ -938,11 +949,19 @@
#define AR_SREV_9580(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9580_10))
-
#define AR_SREV_9580_10(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9580_10))
+#define AR_SREV_9531(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531))
+#define AR_SREV_9531_10(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9531_10))
+#define AR_SREV_9531_11(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9531_11))
+
/* NOTE: When adding chips newer than Peacock, add chip check here */
#define AR_SREV_9580_10_OR_LATER(_ah) \
(AR_SREV_9580(_ah))
diff --git a/drivers/net/wireless/ath/ath9k/spectral.c b/drivers/net/wireless/ath/ath9k/spectral.c
new file mode 100644
index 000000000000..99f4de95c264
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/spectral.c
@@ -0,0 +1,543 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/relay.h>
+#include "ath9k.h"
+
+static s8 fix_rssi_inv_only(u8 rssi_val)
+{
+ if (rssi_val == 128)
+ rssi_val = 0;
+ return (s8) rssi_val;
+}
+
+static void ath_debug_send_fft_sample(struct ath_softc *sc,
+ struct fft_sample_tlv *fft_sample_tlv)
+{
+ int length;
+ if (!sc->rfs_chan_spec_scan)
+ return;
+
+ length = __be16_to_cpu(fft_sample_tlv->length) +
+ sizeof(*fft_sample_tlv);
+ relay_write(sc->rfs_chan_spec_scan, fft_sample_tlv, length);
+}
+
+/* returns 1 if this was a spectral frame, even if not handled. */
+int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
+ struct ath_rx_status *rs, u64 tsf)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ u8 num_bins, *bins, *vdata = (u8 *)hdr;
+ struct fft_sample_ht20 fft_sample_20;
+ struct fft_sample_ht20_40 fft_sample_40;
+ struct fft_sample_tlv *tlv;
+ struct ath_radar_info *radar_info;
+ int len = rs->rs_datalen;
+ int dc_pos;
+ u16 fft_len, length, freq = ah->curchan->chan->center_freq;
+ enum nl80211_channel_type chan_type;
+
+ /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
+ * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
+ * yet, but this is supposed to be possible as well.
+ */
+ if (rs->rs_phyerr != ATH9K_PHYERR_RADAR &&
+ rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT &&
+ rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL)
+ return 0;
+
+ /* check if spectral scan bit is set. This does not have to be checked
+ * if received through a SPECTRAL phy error, but shouldn't hurt.
+ */
+ radar_info = ((struct ath_radar_info *)&vdata[len]) - 1;
+ if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
+ return 0;
+
+ chan_type = cfg80211_get_chandef_type(&sc->hw->conf.chandef);
+ if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+ (chan_type == NL80211_CHAN_HT40PLUS)) {
+ fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
+ num_bins = SPECTRAL_HT20_40_NUM_BINS;
+ bins = (u8 *)fft_sample_40.data;
+ } else {
+ fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
+ num_bins = SPECTRAL_HT20_NUM_BINS;
+ bins = (u8 *)fft_sample_20.data;
+ }
+
+ /* Variation in the data length is possible and will be fixed later */
+ if ((len > fft_len + 2) || (len < fft_len - 1))
+ return 1;
+
+ switch (len - fft_len) {
+ case 0:
+ /* length correct, nothing to do. */
+ memcpy(bins, vdata, num_bins);
+ break;
+ case -1:
+ /* first byte missing, duplicate it. */
+ memcpy(&bins[1], vdata, num_bins - 1);
+ bins[0] = vdata[0];
+ break;
+ case 2:
+ /* MAC added 2 extra bytes at bin 30 and 32, remove them. */
+ memcpy(bins, vdata, 30);
+ bins[30] = vdata[31];
+ memcpy(&bins[31], &vdata[33], num_bins - 31);
+ break;
+ case 1:
+ /* MAC added 2 extra bytes AND first byte is missing. */
+ bins[0] = vdata[0];
+ memcpy(&bins[1], vdata, 30);
+ bins[31] = vdata[31];
+ memcpy(&bins[32], &vdata[33], num_bins - 32);
+ break;
+ default:
+ return 1;
+ }
+
+ /* DC value (value in the middle) is the blind spot of the spectral
+ * sample and invalid, interpolate it.
+ */
+ dc_pos = num_bins / 2;
+ bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
+
+ if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+ (chan_type == NL80211_CHAN_HT40PLUS)) {
+ s8 lower_rssi, upper_rssi;
+ s16 ext_nf;
+ u8 lower_max_index, upper_max_index;
+ u8 lower_bitmap_w, upper_bitmap_w;
+ u16 lower_mag, upper_mag;
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ struct ath_ht20_40_mag_info *mag_info;
+
+ if (caldata)
+ ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
+ caldata->nfCalHist[3].privNF);
+ else
+ ext_nf = ATH_DEFAULT_NOISE_FLOOR;
+
+ length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
+ fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
+ fft_sample_40.tlv.length = __cpu_to_be16(length);
+ fft_sample_40.freq = __cpu_to_be16(freq);
+ fft_sample_40.channel_type = chan_type;
+
+ if (chan_type == NL80211_CHAN_HT40PLUS) {
+ lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+ upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
+
+ fft_sample_40.lower_noise = ah->noise;
+ fft_sample_40.upper_noise = ext_nf;
+ } else {
+ lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
+ upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+
+ fft_sample_40.lower_noise = ext_nf;
+ fft_sample_40.upper_noise = ah->noise;
+ }
+ fft_sample_40.lower_rssi = lower_rssi;
+ fft_sample_40.upper_rssi = upper_rssi;
+
+ mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
+ lower_mag = spectral_max_magnitude(mag_info->lower_bins);
+ upper_mag = spectral_max_magnitude(mag_info->upper_bins);
+ fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
+ fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
+ lower_max_index = spectral_max_index(mag_info->lower_bins);
+ upper_max_index = spectral_max_index(mag_info->upper_bins);
+ fft_sample_40.lower_max_index = lower_max_index;
+ fft_sample_40.upper_max_index = upper_max_index;
+ lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
+ upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
+ fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
+ fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
+ fft_sample_40.max_exp = mag_info->max_exp & 0xf;
+
+ fft_sample_40.tsf = __cpu_to_be64(tsf);
+
+ tlv = (struct fft_sample_tlv *)&fft_sample_40;
+ } else {
+ u8 max_index, bitmap_w;
+ u16 magnitude;
+ struct ath_ht20_mag_info *mag_info;
+
+ length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
+ fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
+ fft_sample_20.tlv.length = __cpu_to_be16(length);
+ fft_sample_20.freq = __cpu_to_be16(freq);
+
+ fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+ fft_sample_20.noise = ah->noise;
+
+ mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
+ magnitude = spectral_max_magnitude(mag_info->all_bins);
+ fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
+ max_index = spectral_max_index(mag_info->all_bins);
+ fft_sample_20.max_index = max_index;
+ bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
+ fft_sample_20.bitmap_weight = bitmap_w;
+ fft_sample_20.max_exp = mag_info->max_exp & 0xf;
+
+ fft_sample_20.tsf = __cpu_to_be64(tsf);
+
+ tlv = (struct fft_sample_tlv *)&fft_sample_20;
+ }
+
+ ath_debug_send_fft_sample(sc, tlv);
+
+ return 1;
+}
+
+/*********************/
+/* spectral_scan_ctl */
+/*********************/
+
+static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char *mode = "";
+ unsigned int len;
+
+ switch (sc->spectral_mode) {
+ case SPECTRAL_DISABLED:
+ mode = "disable";
+ break;
+ case SPECTRAL_BACKGROUND:
+ mode = "background";
+ break;
+ case SPECTRAL_CHANSCAN:
+ mode = "chanscan";
+ break;
+ case SPECTRAL_MANUAL:
+ mode = "manual";
+ break;
+ }
+ len = strlen(mode);
+ return simple_read_from_buffer(user_buf, count, ppos, mode, len);
+}
+
+static ssize_t write_file_spec_scan_ctl(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ ssize_t len;
+
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return -EOPNOTSUPP;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ if (strncmp("trigger", buf, 7) == 0) {
+ ath9k_spectral_scan_trigger(sc->hw);
+ } else if (strncmp("background", buf, 9) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND);
+ ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
+ } else if (strncmp("chanscan", buf, 8) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_CHANSCAN);
+ ath_dbg(common, CONFIG, "spectral scan: channel scan mode enabled\n");
+ } else if (strncmp("manual", buf, 6) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_MANUAL);
+ ath_dbg(common, CONFIG, "spectral scan: manual mode enabled\n");
+ } else if (strncmp("disable", buf, 7) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_DISABLED);
+ ath_dbg(common, CONFIG, "spectral scan: disabled\n");
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_spec_scan_ctl = {
+ .read = read_file_spec_scan_ctl,
+ .write = write_file_spec_scan_ctl,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/*************************/
+/* spectral_short_repeat */
+/*************************/
+
+static ssize_t read_file_spectral_short_repeat(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.short_repeat);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_short_repeat(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ sc->spec_config.short_repeat = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_short_repeat = {
+ .read = read_file_spectral_short_repeat,
+ .write = write_file_spectral_short_repeat,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/******************/
+/* spectral_count */
+/******************/
+
+static ssize_t read_file_spectral_count(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.count);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_count(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ sc->spec_config.count = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_count = {
+ .read = read_file_spectral_count,
+ .write = write_file_spectral_count,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/*******************/
+/* spectral_period */
+/*******************/
+
+static ssize_t read_file_spectral_period(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.period);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_period(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ sc->spec_config.period = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_period = {
+ .read = read_file_spectral_period,
+ .write = write_file_spectral_period,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/***********************/
+/* spectral_fft_period */
+/***********************/
+
+static ssize_t read_file_spectral_fft_period(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.fft_period);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_fft_period(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 15)
+ return -EINVAL;
+
+ sc->spec_config.fft_period = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_fft_period = {
+ .read = read_file_spectral_fft_period,
+ .write = write_file_spectral_fft_period,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/*******************/
+/* Relay interface */
+/*******************/
+
+static struct dentry *create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *buf_file;
+
+ buf_file = debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+ *is_global = 1;
+ return buf_file;
+}
+
+static int remove_buf_file_handler(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+
+ return 0;
+}
+
+static struct rchan_callbacks rfs_spec_scan_cb = {
+ .create_buf_file = create_buf_file_handler,
+ .remove_buf_file = remove_buf_file_handler,
+};
+
+/*********************/
+/* Debug Init/Deinit */
+/*********************/
+
+void ath9k_spectral_deinit_debug(struct ath_softc *sc)
+{
+ if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
+ relay_close(sc->rfs_chan_spec_scan);
+ sc->rfs_chan_spec_scan = NULL;
+ }
+}
+
+void ath9k_spectral_init_debug(struct ath_softc *sc)
+{
+ sc->rfs_chan_spec_scan = relay_open("spectral_scan",
+ sc->debug.debugfs_phy,
+ 1024, 256, &rfs_spec_scan_cb,
+ NULL);
+ debugfs_create_file("spectral_scan_ctl",
+ S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spec_scan_ctl);
+ debugfs_create_file("spectral_short_repeat",
+ S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spectral_short_repeat);
+ debugfs_create_file("spectral_count",
+ S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spectral_count);
+ debugfs_create_file("spectral_period",
+ S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spectral_period);
+ debugfs_create_file("spectral_fft_period",
+ S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spectral_fft_period);
+}
diff --git a/drivers/net/wireless/ath/ath9k/spectral.h b/drivers/net/wireless/ath/ath9k/spectral.h
new file mode 100644
index 000000000000..ead63412ee1a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/spectral.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef SPECTRAL_H
+#define SPECTRAL_H
+
+/* enum spectral_mode:
+ *
+ * @SPECTRAL_DISABLED: spectral mode is disabled
+ * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
+ * something else.
+ * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
+ * is performed manually.
+ * @SPECTRAL_CHANSCAN: Like manual, but also triggered when changing channels
+ * during a channel scan.
+ */
+enum spectral_mode {
+ SPECTRAL_DISABLED = 0,
+ SPECTRAL_BACKGROUND,
+ SPECTRAL_MANUAL,
+ SPECTRAL_CHANSCAN,
+};
+
+#define SPECTRAL_SCAN_BITMASK 0x10
+/* Radar info packet format, used for DFS and spectral formats. */
+struct ath_radar_info {
+ u8 pulse_length_pri;
+ u8 pulse_length_ext;
+ u8 pulse_bw_info;
+} __packed;
+
+/* The HT20 spectral data has 4 bytes of additional information at it's end.
+ *
+ * [7:0]: all bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: all bins max_magnitude[9:2]
+ * [7:0]: all bins {max_index[5:0], max_magnitude[11:10]}
+ * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
+ */
+struct ath_ht20_mag_info {
+ u8 all_bins[3];
+ u8 max_exp;
+} __packed;
+
+#define SPECTRAL_HT20_NUM_BINS 56
+
+/* WARNING: don't actually use this struct! MAC may vary the amount of
+ * data by -1/+2. This struct is for reference only.
+ */
+struct ath_ht20_fft_packet {
+ u8 data[SPECTRAL_HT20_NUM_BINS];
+ struct ath_ht20_mag_info mag_info;
+ struct ath_radar_info radar_info;
+} __packed;
+
+#define SPECTRAL_HT20_TOTAL_DATA_LEN (sizeof(struct ath_ht20_fft_packet))
+
+/* Dynamic 20/40 mode:
+ *
+ * [7:0]: lower bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: lower bins max_magnitude[9:2]
+ * [7:0]: lower bins {max_index[5:0], max_magnitude[11:10]}
+ * [7:0]: upper bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: upper bins max_magnitude[9:2]
+ * [7:0]: upper bins {max_index[5:0], max_magnitude[11:10]}
+ * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
+ */
+struct ath_ht20_40_mag_info {
+ u8 lower_bins[3];
+ u8 upper_bins[3];
+ u8 max_exp;
+} __packed;
+
+#define SPECTRAL_HT20_40_NUM_BINS 128
+
+/* WARNING: don't actually use this struct! MAC may vary the amount of
+ * data. This struct is for reference only.
+ */
+struct ath_ht20_40_fft_packet {
+ u8 data[SPECTRAL_HT20_40_NUM_BINS];
+ struct ath_ht20_40_mag_info mag_info;
+ struct ath_radar_info radar_info;
+} __packed;
+
+
+#define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet))
+
+/* grabs the max magnitude from the all/upper/lower bins */
+static inline u16 spectral_max_magnitude(u8 *bins)
+{
+ return (bins[0] & 0xc0) >> 6 |
+ (bins[1] & 0xff) << 2 |
+ (bins[2] & 0x03) << 10;
+}
+
+/* return the max magnitude from the all/upper/lower bins */
+static inline u8 spectral_max_index(u8 *bins)
+{
+ s8 m = (bins[2] & 0xfc) >> 2;
+
+ /* TODO: this still doesn't always report the right values ... */
+ if (m > 32)
+ m |= 0xe0;
+ else
+ m &= ~0xe0;
+
+ return m + 29;
+}
+
+/* return the bitmap weight from the all/upper/lower bins */
+static inline u8 spectral_bitmap_weight(u8 *bins)
+{
+ return bins[0] & 0x3f;
+}
+
+/* FFT sample format given to userspace via debugfs.
+ *
+ * Please keep the type/length at the front position and change
+ * other fields after adding another sample type
+ *
+ * TODO: this might need rework when switching to nl80211-based
+ * interface.
+ */
+enum ath_fft_sample_type {
+ ATH_FFT_SAMPLE_HT20 = 1,
+ ATH_FFT_SAMPLE_HT20_40,
+};
+
+struct fft_sample_tlv {
+ u8 type; /* see ath_fft_sample */
+ __be16 length;
+ /* type dependent data follows */
+} __packed;
+
+struct fft_sample_ht20 {
+ struct fft_sample_tlv tlv;
+
+ u8 max_exp;
+
+ __be16 freq;
+ s8 rssi;
+ s8 noise;
+
+ __be16 max_magnitude;
+ u8 max_index;
+ u8 bitmap_weight;
+
+ __be64 tsf;
+
+ u8 data[SPECTRAL_HT20_NUM_BINS];
+} __packed;
+
+struct fft_sample_ht20_40 {
+ struct fft_sample_tlv tlv;
+
+ u8 channel_type;
+ __be16 freq;
+
+ s8 lower_rssi;
+ s8 upper_rssi;
+
+ __be64 tsf;
+
+ s8 lower_noise;
+ s8 upper_noise;
+
+ __be16 lower_max_magnitude;
+ __be16 upper_max_magnitude;
+
+ u8 lower_max_index;
+ u8 upper_max_index;
+
+ u8 lower_bitmap_weight;
+ u8 upper_bitmap_weight;
+
+ u8 max_exp;
+
+ u8 data[SPECTRAL_HT20_40_NUM_BINS];
+} __packed;
+
+void ath9k_spectral_init_debug(struct ath_softc *sc);
+void ath9k_spectral_deinit_debug(struct ath_softc *sc);
+
+void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
+int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
+ enum spectral_mode spectral_mode);
+
+#ifdef CONFIG_ATH9K_DEBUGFS
+int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
+ struct ath_rx_status *rs, u64 tsf);
+#else
+static inline int ath_process_fft(struct ath_softc *sc,
+ struct ieee80211_hdr *hdr,
+ struct ath_rx_status *rs, u64 tsf)
+{
+ return 0;
+}
+#endif /* CONFIG_ATH9K_DEBUGFS */
+
+#endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
new file mode 100644
index 000000000000..b686a7498450
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+static void ath9k_tx99_stop(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ath_drain_all_txq(sc);
+ ath_startrecv(sc);
+
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+
+ ieee80211_wake_queues(sc->hw);
+
+ kfree_skb(sc->tx99_skb);
+ sc->tx99_skb = NULL;
+ sc->tx99_state = false;
+
+ ath9k_hw_tx99_stop(sc->sc_ah);
+ ath_dbg(common, XMIT, "TX99 stopped\n");
+}
+
+static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
+{
+ static u8 PN9Data[] = {0xff, 0x87, 0xb8, 0x59, 0xb7, 0xa1, 0xcc, 0x24,
+ 0x57, 0x5e, 0x4b, 0x9c, 0x0e, 0xe9, 0xea, 0x50,
+ 0x2a, 0xbe, 0xb4, 0x1b, 0xb6, 0xb0, 0x5d, 0xf1,
+ 0xe6, 0x9a, 0xe3, 0x45, 0xfd, 0x2c, 0x53, 0x18,
+ 0x0c, 0xca, 0xc9, 0xfb, 0x49, 0x37, 0xe5, 0xa8,
+ 0x51, 0x3b, 0x2f, 0x61, 0xaa, 0x72, 0x18, 0x84,
+ 0x02, 0x23, 0x23, 0xab, 0x63, 0x89, 0x51, 0xb3,
+ 0xe7, 0x8b, 0x72, 0x90, 0x4c, 0xe8, 0xfb, 0xc0};
+ u32 len = 1200;
+ struct ieee80211_tx_rate *rate;
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *tx_info;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_put(skb, len);
+
+ memset(skb->data, 0, len);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA);
+ hdr->duration_id = 0;
+
+ memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
+
+ hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
+
+ tx_info = IEEE80211_SKB_CB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
+ rate = &tx_info->control.rates[0];
+ tx_info->band = hw->conf.chandef.chan->band;
+ tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
+ tx_info->control.vif = sc->tx99_vif;
+ rate->count = 1;
+ if (ah->curchan && IS_CHAN_HT(ah->curchan)) {
+ rate->flags |= IEEE80211_TX_RC_MCS;
+ if (IS_CHAN_HT40(ah->curchan))
+ rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ }
+
+ memcpy(skb->data + sizeof(*hdr), PN9Data, sizeof(PN9Data));
+
+ return skb;
+}
+
+static void ath9k_tx99_deinit(struct ath_softc *sc)
+{
+ ath_reset(sc);
+
+ ath9k_ps_wakeup(sc);
+ ath9k_tx99_stop(sc);
+ ath9k_ps_restore(sc);
+}
+
+static int ath9k_tx99_init(struct ath_softc *sc)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_tx_control txctl;
+ int r;
+
+ if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+ ath_err(common,
+ "driver is in invalid state unable to use TX99");
+ return -EINVAL;
+ }
+
+ sc->tx99_skb = ath9k_build_tx99_skb(sc);
+ if (!sc->tx99_skb)
+ return -ENOMEM;
+
+ memset(&txctl, 0, sizeof(txctl));
+ txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
+
+ ath_reset(sc);
+
+ ath9k_ps_wakeup(sc);
+
+ ath9k_hw_disable_interrupts(ah);
+ atomic_set(&ah->intr_ref_cnt, -1);
+ ath_drain_all_txq(sc);
+ ath_stoprecv(sc);
+
+ sc->tx99_state = true;
+
+ ieee80211_stop_queues(hw);
+
+ if (sc->tx99_power == MAX_RATE_POWER + 1)
+ sc->tx99_power = MAX_RATE_POWER;
+
+ ath9k_hw_tx99_set_txpower(ah, sc->tx99_power);
+ r = ath9k_tx99_send(sc, sc->tx99_skb, &txctl);
+ if (r) {
+ ath_dbg(common, XMIT, "Failed to xmit TX99 skb\n");
+ return r;
+ }
+
+ ath_dbg(common, XMIT, "TX99 xmit started using %d ( %ddBm)\n",
+ sc->tx99_power,
+ sc->tx99_power / 2);
+
+ /* We leave the harware awake as it will be chugging on */
+
+ return 0;
+}
+
+static ssize_t read_file_tx99(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[3];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->tx99_state);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ bool start;
+ ssize_t len;
+ int r;
+
+ if (sc->nvifs > 1)
+ return -EOPNOTSUPP;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ if (strtobool(buf, &start))
+ return -EINVAL;
+
+ if (start == sc->tx99_state) {
+ if (!start)
+ return count;
+ ath_dbg(common, XMIT, "Resetting TX99\n");
+ ath9k_tx99_deinit(sc);
+ }
+
+ if (!start) {
+ ath9k_tx99_deinit(sc);
+ return count;
+ }
+
+ r = ath9k_tx99_init(sc);
+ if (r)
+ return r;
+
+ return count;
+}
+
+static const struct file_operations fops_tx99 = {
+ .read = read_file_tx99,
+ .write = write_file_tx99,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_tx99_power(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d (%d dBm)\n",
+ sc->tx99_power,
+ sc->tx99_power / 2);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx99_power(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ int r;
+ u8 tx_power;
+
+ r = kstrtou8_from_user(user_buf, count, 0, &tx_power);
+ if (r)
+ return r;
+
+ if (tx_power > MAX_RATE_POWER)
+ return -EINVAL;
+
+ sc->tx99_power = tx_power;
+
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_tx99_set_txpower(sc->sc_ah, sc->tx99_power);
+ ath9k_ps_restore(sc);
+
+ return count;
+}
+
+static const struct file_operations fops_tx99_power = {
+ .read = read_file_tx99_power,
+ .write = write_file_tx99_power,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_tx99_init_debug(struct ath_softc *sc)
+{
+ if (!AR_SREV_9300_20_OR_LATER(sc->sc_ah))
+ return;
+
+ debugfs_create_file("tx99", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_tx99);
+ debugfs_create_file("tx99_power", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_tx99_power);
+}
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index 81c88dd606dc..1b3230fa3651 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -14,409 +14,347 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/export.h>
#include "ath9k.h"
-#include "reg.h"
-#include "hw-ops.h"
-const char *ath9k_hw_wow_event_to_string(u32 wow_event)
-{
- if (wow_event & AH_WOW_MAGIC_PATTERN_EN)
- return "Magic pattern";
- if (wow_event & AH_WOW_USER_PATTERN_EN)
- return "User pattern";
- if (wow_event & AH_WOW_LINK_CHANGE)
- return "Link change";
- if (wow_event & AH_WOW_BEACON_MISS)
- return "Beacon miss";
-
- return "unknown reason";
-}
-EXPORT_SYMBOL(ath9k_hw_wow_event_to_string);
+static const struct wiphy_wowlan_support ath9k_wowlan_support = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .n_patterns = MAX_NUM_USER_PATTERN,
+ .pattern_min_len = 1,
+ .pattern_max_len = MAX_PATTERN_SIZE,
+};
-static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
+static void ath9k_wow_map_triggers(struct ath_softc *sc,
+ struct cfg80211_wowlan *wowlan,
+ u32 *wow_triggers)
{
- struct ath_common *common = ath9k_hw_common(ah);
+ if (wowlan->disconnect)
+ *wow_triggers |= AH_WOW_LINK_CHANGE |
+ AH_WOW_BEACON_MISS;
+ if (wowlan->magic_pkt)
+ *wow_triggers |= AH_WOW_MAGIC_PATTERN_EN;
- REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+ if (wowlan->n_patterns)
+ *wow_triggers |= AH_WOW_USER_PATTERN_EN;
- /* set rx disable bit */
- REG_WRITE(ah, AR_CR, AR_CR_RXD);
-
- if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0, AH_WAIT_TIMEOUT)) {
- ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
- REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
- return;
- }
+ sc->wow_enabled = *wow_triggers;
- REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
}
-static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
+static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
{
+ struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- u8 sta_mac_addr[ETH_ALEN], ap_mac_addr[ETH_ALEN];
- u32 ctl[13] = {0};
- u32 data_word[KAL_NUM_DATA_WORDS];
- u8 i;
- u32 wow_ka_data_word0;
-
- memcpy(sta_mac_addr, common->macaddr, ETH_ALEN);
- memcpy(ap_mac_addr, common->curbssid, ETH_ALEN);
-
- /* set the transmit buffer */
- ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
- ctl[1] = 0;
- ctl[3] = 0xb; /* OFDM_6M hardware value for this rate */
- ctl[4] = 0;
- ctl[7] = (ah->txchainmask) << 2;
- ctl[2] = 0xf << 16; /* tx_tries 0 */
-
- for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
- REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
-
- REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
-
- data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
- (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
- data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
- (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
- data_word[2] = (sta_mac_addr[1] << 24) | (sta_mac_addr[0] << 16) |
- (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
- data_word[3] = (sta_mac_addr[5] << 24) | (sta_mac_addr[4] << 16) |
- (sta_mac_addr[3] << 8) | (sta_mac_addr[2]);
- data_word[4] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
- (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
- data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
-
- if (AR_SREV_9462_20(ah)) {
- /* AR9462 2.0 has an extra descriptor word (time based
- * discard) compared to other chips */
- REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
- wow_ka_data_word0 = AR_WOW_TXBUF(13);
- } else {
- wow_ka_data_word0 = AR_WOW_TXBUF(12);
- }
-
- for (i = 0; i < KAL_NUM_DATA_WORDS; i++)
- REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]);
-
-}
-
-void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
- u8 *user_mask, int pattern_count,
- int pattern_len)
-{
- int i;
- u32 pattern_val, mask_val;
- u32 set, clr;
+ int pattern_count = 0;
+ int i, byte_cnt;
+ u8 dis_deauth_pattern[MAX_PATTERN_SIZE];
+ u8 dis_deauth_mask[MAX_PATTERN_SIZE];
- /* FIXME: should check count by querying the hardware capability */
- if (pattern_count >= MAX_NUM_PATTERN)
- return;
-
- REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count));
-
- /* set the registers for pattern */
- for (i = 0; i < MAX_PATTERN_SIZE; i += 4) {
- memcpy(&pattern_val, user_pattern, 4);
- REG_WRITE(ah, (AR_WOW_TB_PATTERN(pattern_count) + i),
- pattern_val);
- user_pattern += 4;
- }
-
- /* set the registers for mask */
- for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) {
- memcpy(&mask_val, user_mask, 4);
- REG_WRITE(ah, (AR_WOW_TB_MASK(pattern_count) + i), mask_val);
- user_mask += 4;
- }
+ memset(dis_deauth_pattern, 0, MAX_PATTERN_SIZE);
+ memset(dis_deauth_mask, 0, MAX_PATTERN_SIZE);
- /* set the pattern length to be matched
+ /*
+ * Create Dissassociate / Deauthenticate packet filter
+ *
+ * 2 bytes 2 byte 6 bytes 6 bytes 6 bytes
+ * +--------------+----------+---------+--------+--------+----
+ * + Frame Control+ Duration + DA + SA + BSSID +
+ * +--------------+----------+---------+--------+--------+----
*
- * AR_WOW_LENGTH1_REG1
- * bit 31:24 pattern 0 length
- * bit 23:16 pattern 1 length
- * bit 15:8 pattern 2 length
- * bit 7:0 pattern 3 length
+ * The above is the management frame format for disassociate/
+ * deauthenticate pattern, from this we need to match the first byte
+ * of 'Frame Control' and DA, SA, and BSSID fields
+ * (skipping 2nd byte of FC and Duration feild.
*
- * AR_WOW_LENGTH1_REG2
- * bit 31:24 pattern 4 length
- * bit 23:16 pattern 5 length
- * bit 15:8 pattern 6 length
- * bit 7:0 pattern 7 length
+ * Disassociate pattern
+ * --------------------
+ * Frame control = 00 00 1010
+ * DA, SA, BSSID = x:x:x:x:x:x
+ * Pattern will be A0000000 | x:x:x:x:x:x | x:x:x:x:x:x
+ * | x:x:x:x:x:x -- 22 bytes
*
- * the below logic writes out the new
- * pattern length for the corresponding
- * pattern_count, while masking out the
- * other fields
+ * Deauthenticate pattern
+ * ----------------------
+ * Frame control = 00 00 1100
+ * DA, SA, BSSID = x:x:x:x:x:x
+ * Pattern will be C0000000 | x:x:x:x:x:x | x:x:x:x:x:x
+ * | x:x:x:x:x:x -- 22 bytes
*/
- ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
-
- if (pattern_count < 4) {
- /* Pattern 0-3 uses AR_WOW_LENGTH1 register */
- set = (pattern_len & AR_WOW_LENGTH_MAX) <<
- AR_WOW_LEN1_SHIFT(pattern_count);
- clr = AR_WOW_LENGTH1_MASK(pattern_count);
- REG_RMW(ah, AR_WOW_LENGTH1, set, clr);
- } else {
- /* Pattern 4-7 uses AR_WOW_LENGTH2 register */
- set = (pattern_len & AR_WOW_LENGTH_MAX) <<
- AR_WOW_LEN2_SHIFT(pattern_count);
- clr = AR_WOW_LENGTH2_MASK(pattern_count);
- REG_RMW(ah, AR_WOW_LENGTH2, set, clr);
- }
+ /* Create Disassociate Pattern first */
-}
-EXPORT_SYMBOL(ath9k_hw_wow_apply_pattern);
+ byte_cnt = 0;
-u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
-{
- u32 wow_status = 0;
- u32 val = 0, rval;
+ /* Fill out the mask with all FF's */
- /*
- * read the WoW status register to know
- * the wakeup reason
- */
- rval = REG_READ(ah, AR_WOW_PATTERN);
- val = AR_WOW_STATUS(rval);
+ for (i = 0; i < MAX_PATTERN_MASK_SIZE; i++)
+ dis_deauth_mask[i] = 0xff;
- /*
- * mask only the WoW events that we have enabled. Sometimes
- * we have spurious WoW events from the AR_WOW_PATTERN
- * register. This mask will clean it up.
- */
+ /* copy the first byte of frame control field */
+ dis_deauth_pattern[byte_cnt] = 0xa0;
+ byte_cnt++;
- val &= ah->wow_event_mask;
-
- if (val) {
- if (val & AR_WOW_MAGIC_PAT_FOUND)
- wow_status |= AH_WOW_MAGIC_PATTERN_EN;
- if (AR_WOW_PATTERN_FOUND(val))
- wow_status |= AH_WOW_USER_PATTERN_EN;
- if (val & AR_WOW_KEEP_ALIVE_FAIL)
- wow_status |= AH_WOW_LINK_CHANGE;
- if (val & AR_WOW_BEACON_FAIL)
- wow_status |= AH_WOW_BEACON_MISS;
- }
+ /* skip 2nd byte of frame control and Duration field */
+ byte_cnt += 3;
/*
- * set and clear WOW_PME_CLEAR registers for the chip to
- * generate next wow signal.
- * disable D3 before accessing other registers ?
+ * need not match the destination mac address, it can be a broadcast
+ * mac address or an unicast to this station
*/
+ byte_cnt += 6;
- /* do we need to check the bit value 0x01000000 (7-10) ?? */
- REG_RMW(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR,
- AR_PMCTRL_PWR_STATE_D1D3);
+ /* copy the source mac address */
+ memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
- /*
- * clear all events
- */
- REG_WRITE(ah, AR_WOW_PATTERN,
- AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
+ byte_cnt += 6;
- /*
- * restore the beacon threshold to init value
- */
- REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
+ /* copy the bssid, its same as the source mac address */
+
+ memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
+
+ /* Create Disassociate pattern mask */
+
+ dis_deauth_mask[0] = 0xfe;
+ dis_deauth_mask[1] = 0x03;
+ dis_deauth_mask[2] = 0xc0;
+
+ ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n");
+ ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
+ pattern_count, byte_cnt);
+
+ pattern_count++;
/*
- * Restore the way the PCI-E reset, Power-On-Reset, external
- * PCIE_POR_SHORT pins are tied to its original value.
- * Previously just before WoW sleep, we untie the PCI-E
- * reset to our Chip's Power On Reset so that any PCI-E
- * reset from the bus will not reset our chip
+ * for de-authenticate pattern, only the first byte of the frame
+ * control field gets changed from 0xA0 to 0xC0
*/
- if (ah->is_pciexpress)
- ath9k_hw_configpcipowersave(ah, false);
+ dis_deauth_pattern[0] = 0xC0;
- ah->wow_event_mask = 0;
+ ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
+ pattern_count, byte_cnt);
- return wow_status;
}
-EXPORT_SYMBOL(ath9k_hw_wow_wakeup);
-void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
+static void ath9k_wow_add_pattern(struct ath_softc *sc,
+ struct cfg80211_wowlan *wowlan)
{
- u32 wow_event_mask;
- u32 set, clr;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_wow_pattern *wow_pattern = NULL;
+ struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+ int mask_len;
+ s8 i = 0;
- /*
- * wow_event_mask is a mask to the AR_WOW_PATTERN register to
- * indicate which WoW events we have enabled. The WoW events
- * are from the 'pattern_enable' in this function and
- * 'pattern_count' of ath9k_hw_wow_apply_pattern()
- */
- wow_event_mask = ah->wow_event_mask;
+ if (!wowlan->n_patterns)
+ return;
/*
- * Untie Power-on-Reset from the PCI-E-Reset. When we are in
- * WOW sleep, we do want the Reset from the PCI-E to disturb
- * our hw state
+ * Add the new user configured patterns
*/
- if (ah->is_pciexpress) {
+ for (i = 0; i < wowlan->n_patterns; i++) {
+
+ wow_pattern = kzalloc(sizeof(*wow_pattern), GFP_KERNEL);
+
+ if (!wow_pattern)
+ return;
+
+ /*
+ * TODO: convert the generic user space pattern to
+ * appropriate chip specific/802.11 pattern.
+ */
+
+ mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+ memset(wow_pattern->pattern_bytes, 0, MAX_PATTERN_SIZE);
+ memset(wow_pattern->mask_bytes, 0, MAX_PATTERN_SIZE);
+ memcpy(wow_pattern->pattern_bytes, patterns[i].pattern,
+ patterns[i].pattern_len);
+ memcpy(wow_pattern->mask_bytes, patterns[i].mask, mask_len);
+ wow_pattern->pattern_len = patterns[i].pattern_len;
+
/*
- * we need to untie the internal POR (power-on-reset)
- * to the external PCI-E reset. We also need to tie
- * the PCI-E Phy reset to the PCI-E reset.
+ * just need to take care of deauth and disssoc pattern,
+ * make sure we don't overwrite them.
*/
- set = AR_WA_RESET_EN | AR_WA_POR_SHORT;
- clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE;
- REG_RMW(ah, AR_WA, set, clr);
+
+ ath9k_hw_wow_apply_pattern(ah, wow_pattern->pattern_bytes,
+ wow_pattern->mask_bytes,
+ i + 2,
+ wow_pattern->pattern_len);
+ kfree(wow_pattern);
+
}
- /*
- * set the power states appropriately and enable PME
- */
- set = AR_PMCTRL_HOST_PME_EN | AR_PMCTRL_PWR_PM_CTRL_ENA |
- AR_PMCTRL_AUX_PWR_DET | AR_PMCTRL_WOW_PME_CLR;
+}
- /*
- * set and clear WOW_PME_CLEAR registers for the chip
- * to generate next wow signal.
- */
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
- clr = AR_PMCTRL_WOW_PME_CLR;
- REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+int ath9k_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 wow_triggers_enabled = 0;
+ int ret = 0;
- /*
- * Setup for:
- * - beacon misses
- * - magic pattern
- * - keep alive timeout
- * - pattern matching
- */
+ mutex_lock(&sc->mutex);
- /*
- * Program default values for pattern backoff, aifs/slot/KAL count,
- * beacon miss timeout, KAL timeout, etc.
- */
- set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF);
- REG_SET_BIT(ah, AR_WOW_PATTERN, set);
+ ath_cancel_work(sc);
+ ath_stop_ani(sc);
- set = AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) |
- AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) |
- AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT);
- REG_SET_BIT(ah, AR_WOW_COUNT, set);
+ if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+ ath_dbg(common, ANY, "Device not present\n");
+ ret = -EINVAL;
+ goto fail_wow;
+ }
- if (pattern_enable & AH_WOW_BEACON_MISS)
- set = AR_WOW_BEACON_TIMO;
- /* We are not using beacon miss, program a large value */
- else
- set = AR_WOW_BEACON_TIMO_MAX;
+ if (WARN_ON(!wowlan)) {
+ ath_dbg(common, WOW, "None of the WoW triggers enabled\n");
+ ret = -EINVAL;
+ goto fail_wow;
+ }
- REG_WRITE(ah, AR_WOW_BCN_TIMO, set);
+ if (!device_can_wakeup(sc->dev)) {
+ ath_dbg(common, WOW, "device_can_wakeup failed, WoW is not enabled\n");
+ ret = 1;
+ goto fail_wow;
+ }
/*
- * Keep alive timo in ms except AR9280
+ * none of the sta vifs are associated
+ * and we are not currently handling multivif
+ * cases, for instance we have to seperately
+ * configure 'keep alive frame' for each
+ * STA.
*/
- if (!pattern_enable)
- set = AR_WOW_KEEP_ALIVE_NEVER;
- else
- set = KAL_TIMEOUT * 32;
- REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, set);
+ if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
+ ath_dbg(common, WOW, "None of the STA vifs are associated\n");
+ ret = 1;
+ goto fail_wow;
+ }
+
+ if (sc->nvifs > 1) {
+ ath_dbg(common, WOW, "WoW for multivif is not yet supported\n");
+ ret = 1;
+ goto fail_wow;
+ }
+
+ ath9k_wow_map_triggers(sc, wowlan, &wow_triggers_enabled);
+
+ ath_dbg(common, WOW, "WoW triggers enabled 0x%x\n",
+ wow_triggers_enabled);
+
+ ath9k_ps_wakeup(sc);
+
+ ath9k_stop_btcoex(sc);
/*
- * Keep alive delay in us. based on 'power on clock',
- * therefore in usec
+ * Enable wake up on recieving disassoc/deauth
+ * frame by default.
*/
- set = KAL_DELAY * 1000;
- REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, set);
+ ath9k_wow_add_disassoc_deauth_pattern(sc);
+
+ if (wow_triggers_enabled & AH_WOW_USER_PATTERN_EN)
+ ath9k_wow_add_pattern(sc, wowlan);
+ spin_lock_bh(&sc->sc_pcu_lock);
/*
- * Create keep alive pattern to respond to beacons
+ * To avoid false wake, we enable beacon miss interrupt only
+ * when we go to sleep. We save the current interrupt mask
+ * so we can restore it after the system wakes up
*/
- ath9k_wow_create_keep_alive_pattern(ah);
+ sc->wow_intr_before_sleep = ah->imask;
+ ah->imask &= ~ATH9K_INT_GLOBAL;
+ ath9k_hw_disable_interrupts(ah);
+ ah->imask = ATH9K_INT_BMISS | ATH9K_INT_GLOBAL;
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+
+ spin_unlock_bh(&sc->sc_pcu_lock);
/*
- * Configure MAC WoW Registers
+ * we can now sync irq and kill any running tasklets, since we already
+ * disabled interrupts and not holding a spin lock
*/
- set = 0;
- /* Send keep alive timeouts anyway */
- clr = AR_WOW_KEEP_ALIVE_AUTO_DIS;
+ synchronize_irq(sc->irq);
+ tasklet_kill(&sc->intr_tq);
- if (pattern_enable & AH_WOW_LINK_CHANGE)
- wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL;
- else
- set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
+ ath9k_hw_wow_enable(ah, wow_triggers_enabled);
- set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
- REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr);
+ ath9k_ps_restore(sc);
+ ath_dbg(common, ANY, "WoW enabled in ath9k\n");
+ atomic_inc(&sc->wow_sleep_proc_intr);
- /*
- * we are relying on a bmiss failure. ensure we have
- * enough threshold to prevent false positives
- */
- REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR,
- AR_WOW_BMISSTHRESHOLD);
+fail_wow:
+ mutex_unlock(&sc->mutex);
+ return ret;
+}
+
+int ath9k_resume(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 wow_status;
- set = 0;
- clr = 0;
+ mutex_lock(&sc->mutex);
- if (pattern_enable & AH_WOW_BEACON_MISS) {
- set = AR_WOW_BEACON_FAIL_EN;
- wow_event_mask |= AR_WOW_BEACON_FAIL;
- } else {
- clr = AR_WOW_BEACON_FAIL_EN;
+ ath9k_ps_wakeup(sc);
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+
+ ath9k_hw_disable_interrupts(ah);
+ ah->imask = sc->wow_intr_before_sleep;
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
+ wow_status = ath9k_hw_wow_wakeup(ah);
+
+ if (atomic_read(&sc->wow_got_bmiss_intr) == 0) {
+ /*
+ * some devices may not pick beacon miss
+ * as the reason they woke up so we add
+ * that here for that shortcoming.
+ */
+ wow_status |= AH_WOW_BEACON_MISS;
+ atomic_dec(&sc->wow_got_bmiss_intr);
+ ath_dbg(common, ANY, "Beacon miss interrupt picked up during WoW sleep\n");
}
- REG_RMW(ah, AR_WOW_BCN_EN, set, clr);
+ atomic_dec(&sc->wow_sleep_proc_intr);
- set = 0;
- clr = 0;
- /*
- * Enable the magic packet registers
- */
- if (pattern_enable & AH_WOW_MAGIC_PATTERN_EN) {
- set = AR_WOW_MAGIC_EN;
- wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND;
- } else {
- clr = AR_WOW_MAGIC_EN;
+ if (wow_status) {
+ ath_dbg(common, ANY, "Waking up due to WoW triggers %s with WoW status = %x\n",
+ ath9k_hw_wow_event_to_string(wow_status), wow_status);
}
- set |= AR_WOW_MAC_INTR_EN;
- REG_RMW(ah, AR_WOW_PATTERN, set, clr);
- REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
- AR_WOW_PATTERN_SUPPORTED);
+ ath_restart_work(sc);
+ ath9k_start_btcoex(sc);
- /*
- * Set the power states appropriately and enable PME
- */
- clr = 0;
- set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN |
- AR_PMCTRL_PWR_PM_CTRL_ENA;
+ ath9k_ps_restore(sc);
+ mutex_unlock(&sc->mutex);
- clr = AR_PCIE_PM_CTRL_ENA;
- REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
+ return 0;
+}
- /*
- * this is needed to prevent the chip waking up
- * the host within 3-4 seconds with certain
- * platform/BIOS. The fix is to enable
- * D1 & D3 to match original definition and
- * also match the OTP value. Anyway this
- * is more related to SW WOW.
- */
- clr = AR_PMCTRL_PWR_STATE_D1D3;
- REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct ath_softc *sc = hw->priv;
- set = AR_PMCTRL_PWR_STATE_D1D3_REAL;
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
+ mutex_lock(&sc->mutex);
+ device_init_wakeup(sc->dev, 1);
+ device_set_wakeup_enable(sc->dev, enabled);
+ mutex_unlock(&sc->mutex);
+}
- REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
+void ath9k_init_wow(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
- /* to bring down WOW power low margin */
- set = BIT(13);
- REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set);
- /* HW WoW */
- clr = BIT(5);
- REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr);
+ if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
+ (sc->driver_data & ATH9K_PCI_WOW) &&
+ device_can_wakeup(sc->dev))
+ hw->wiphy->wowlan = &ath9k_wowlan_support;
- ath9k_hw_set_powermode_wow_sleep(ah);
- ah->wow_event_mask = wow_event_mask;
+ atomic_set(&sc->wow_sleep_proc_intr, -1);
+ atomic_set(&sc->wow_got_bmiss_intr, -1);
}
-EXPORT_SYMBOL(ath9k_hw_wow_enable);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index b5a19e098f2d..0a75e2f68c9d 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -47,8 +47,6 @@ static u16 bits_per_symbol[][2] = {
{ 260, 540 }, /* 7: 64-QAM 5/6 */
};
-#define IS_HT_RATE(_rate) ((_rate) & 0x80)
-
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid, struct sk_buff *skb);
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
@@ -174,14 +172,7 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
static struct ath_atx_tid *
ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
{
- struct ieee80211_hdr *hdr;
- u8 tidno = 0;
-
- hdr = (struct ieee80211_hdr *) skb->data;
- if (ieee80211_is_data_qos(hdr->frame_control))
- tidno = ieee80211_get_qos_ctl(hdr)[0];
-
- tidno &= IEEE80211_QOS_CTL_TID_MASK;
+ u8 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
return ATH_AN_2_TID(an, tidno);
}
@@ -781,11 +772,6 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
if (bt_aggr_limit)
aggr_limit = bt_aggr_limit;
- /*
- * h/w can accept aggregates up to 16 bit lengths (65535).
- * The IE, however can hold up to 65536, which shows up here
- * as zero. Ignore 65536 since we are constrained by hw.
- */
if (tid->an->maxampdu)
aggr_limit = min(aggr_limit, tid->an->maxampdu);
@@ -1410,8 +1396,8 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
* has already been added.
*/
if (sta->ht_cap.ht_supported) {
- an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
- sta->ht_cap.ampdu_factor);
+ an->maxampdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ sta->ht_cap.ampdu_factor)) - 1;
density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
an->mpdudensity = density;
}
@@ -1790,6 +1776,9 @@ bool ath_drain_all_txq(struct ath_softc *sc)
if (!ATH_TXQ_SETUP(sc, i))
continue;
+ if (!sc->tx.txq[i].axq_depth)
+ continue;
+
if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
npend |= BIT(i);
}
@@ -2753,6 +2742,8 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
}
}
+#ifdef CONFIG_ATH9K_TX99
+
int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
struct ath_tx_control *txctl)
{
@@ -2795,3 +2786,5 @@ int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
return 0;
}
+
+#endif /* CONFIG_ATH9K_TX99 */
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
index 3d70cd277fd7..1c0af9cd9a85 100644
--- a/drivers/net/wireless/ath/carl9170/debug.c
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -37,7 +37,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/seq_file.h>
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 349fa22a921a..4c8cdb097b65 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -37,7 +37,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
@@ -1968,18 +1967,6 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
return -ENOMEM;
ar->num_channels = chans;
- /*
- * I measured this, a bandswitch takes roughly
- * 135 ms and a frequency switch about 80.
- *
- * FIXME: measure these values again once EEPROM settings
- * are used, that will influence them!
- */
- if (bands == 2)
- ar->hw->channel_change_time = 135 * 1000;
- else
- ar->hw->channel_change_time = 80 * 1000;
-
regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
/* second part of wiphy init */
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index e935f61c7fad..536bc46a2912 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -37,7 +37,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
@@ -520,6 +519,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
{
struct ieee80211_hdr *hdr = data;
struct ieee80211_tim_ie *tim_ie;
+ struct ath_common *common = &ar->common;
u8 *tim;
u8 tim_len;
bool cam;
@@ -527,17 +527,13 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
if (likely(!(ar->hw->conf.flags & IEEE80211_CONF_PS)))
return;
- /* check if this really is a beacon */
- if (!ieee80211_is_beacon(hdr->frame_control))
- return;
-
/* min. beacon length + FCS_LEN */
if (len <= 40 + FCS_LEN)
return;
+ /* check if this really is a beacon */
/* and only beacons from the associated BSSID, please */
- if (!ether_addr_equal(hdr->addr3, ar->common.curbssid) ||
- !ar->common.curaid)
+ if (!ath_is_mybeacon(common, hdr) || !common->curaid)
return;
ar->ps.last_beacon = jiffies;
@@ -602,8 +598,8 @@ static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
if (bar->start_seq_num == entry_bar->start_seq_num &&
TID_CHECK(bar->control, entry_bar->control) &&
- ether_addr_equal(bar->ra, entry_bar->ta) &&
- ether_addr_equal(bar->ta, entry_bar->ra)) {
+ ether_addr_equal_64bits(bar->ra, entry_bar->ta) &&
+ ether_addr_equal_64bits(bar->ta, entry_bar->ra)) {
struct ieee80211_tx_info *tx_info;
tx_info = IEEE80211_SKB_CB(entry_skb);
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index e3f696ee4d23..4cadfd48ffdf 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -37,7 +37,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c
index 8e99540cd90e..8b0ac14d5c32 100644
--- a/drivers/net/wireless/ath/main.c
+++ b/drivers/net/wireless/ath/main.c
@@ -59,6 +59,14 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
}
EXPORT_SYMBOL(ath_rxbuf_alloc);
+bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr)
+{
+ return ieee80211_is_beacon(hdr->frame_control) &&
+ !is_zero_ether_addr(common->curbssid) &&
+ ether_addr_equal_64bits(hdr->addr3, common->curbssid);
+}
+EXPORT_SYMBOL(ath_is_mybeacon);
+
void ath_printk(const char *level, const struct ath_common* common,
const char *fmt, ...)
{
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 1217c52ab28e..e5e905910db4 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -37,17 +37,18 @@ static int __ath_regd_init(struct ath_regulatory *reg);
/* We enable active scan on these a case by case basis by regulatory domain */
#define ATH9K_2GHZ_CH12_13 REG_RULE(2467-10, 2472+10, 40, 0, 20,\
- NL80211_RRF_PASSIVE_SCAN)
+ NL80211_RRF_NO_IR)
#define ATH9K_2GHZ_CH14 REG_RULE(2484-10, 2484+10, 40, 0, 20,\
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM)
+ NL80211_RRF_NO_IR | \
+ NL80211_RRF_NO_OFDM)
/* We allow IBSS on these on a case by case basis by regulatory domain */
#define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 80, 0, 30,\
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
#define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 80, 0, 30,\
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
#define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 80, 0, 30,\
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
#define ATH9K_2GHZ_ALL ATH9K_2GHZ_CH01_11, \
ATH9K_2GHZ_CH12_13, \
@@ -113,6 +114,87 @@ static const struct ieee80211_regdomain ath_world_regdom_67_68_6A_6C = {
}
};
+static bool dynamic_country_user_possible(struct ath_regulatory *reg)
+{
+ if (config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
+ return true;
+
+ switch (reg->country_code) {
+ case CTRY_UNITED_STATES:
+ case CTRY_JAPAN1:
+ case CTRY_JAPAN2:
+ case CTRY_JAPAN3:
+ case CTRY_JAPAN4:
+ case CTRY_JAPAN5:
+ case CTRY_JAPAN6:
+ case CTRY_JAPAN7:
+ case CTRY_JAPAN8:
+ case CTRY_JAPAN9:
+ case CTRY_JAPAN10:
+ case CTRY_JAPAN11:
+ case CTRY_JAPAN12:
+ case CTRY_JAPAN13:
+ case CTRY_JAPAN14:
+ case CTRY_JAPAN15:
+ case CTRY_JAPAN16:
+ case CTRY_JAPAN17:
+ case CTRY_JAPAN18:
+ case CTRY_JAPAN19:
+ case CTRY_JAPAN20:
+ case CTRY_JAPAN21:
+ case CTRY_JAPAN22:
+ case CTRY_JAPAN23:
+ case CTRY_JAPAN24:
+ case CTRY_JAPAN25:
+ case CTRY_JAPAN26:
+ case CTRY_JAPAN27:
+ case CTRY_JAPAN28:
+ case CTRY_JAPAN29:
+ case CTRY_JAPAN30:
+ case CTRY_JAPAN31:
+ case CTRY_JAPAN32:
+ case CTRY_JAPAN33:
+ case CTRY_JAPAN34:
+ case CTRY_JAPAN35:
+ case CTRY_JAPAN36:
+ case CTRY_JAPAN37:
+ case CTRY_JAPAN38:
+ case CTRY_JAPAN39:
+ case CTRY_JAPAN40:
+ case CTRY_JAPAN41:
+ case CTRY_JAPAN42:
+ case CTRY_JAPAN43:
+ case CTRY_JAPAN44:
+ case CTRY_JAPAN45:
+ case CTRY_JAPAN46:
+ case CTRY_JAPAN47:
+ case CTRY_JAPAN48:
+ case CTRY_JAPAN49:
+ case CTRY_JAPAN50:
+ case CTRY_JAPAN51:
+ case CTRY_JAPAN52:
+ case CTRY_JAPAN53:
+ case CTRY_JAPAN54:
+ case CTRY_JAPAN55:
+ case CTRY_JAPAN56:
+ case CTRY_JAPAN57:
+ case CTRY_JAPAN58:
+ case CTRY_JAPAN59:
+ return false;
+ }
+
+ return true;
+}
+
+static bool ath_reg_dyn_country_user_allow(struct ath_regulatory *reg)
+{
+ if (!config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
+ return false;
+ if (!dynamic_country_user_possible(reg))
+ return false;
+ return true;
+}
+
static inline bool is_wwr_sku(u16 regd)
{
return ((regd & COUNTRY_ERD_FLAG) != COUNTRY_ERD_FLAG) &&
@@ -177,118 +259,139 @@ static bool ath_is_radar_freq(u16 center_freq)
return (center_freq >= 5260 && center_freq <= 5700);
}
+static void ath_force_clear_no_ir_chan(struct wiphy *wiphy,
+ struct ieee80211_channel *ch)
+{
+ const struct ieee80211_reg_rule *reg_rule;
+
+ reg_rule = freq_reg_info(wiphy, MHZ_TO_KHZ(ch->center_freq));
+ if (IS_ERR(reg_rule))
+ return;
+
+ if (!(reg_rule->flags & NL80211_RRF_NO_IR))
+ if (ch->flags & IEEE80211_CHAN_NO_IR)
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
+}
+
+static void ath_force_clear_no_ir_freq(struct wiphy *wiphy, u16 center_freq)
+{
+ struct ieee80211_channel *ch;
+
+ ch = ieee80211_get_channel(wiphy, center_freq);
+ if (!ch)
+ return;
+
+ ath_force_clear_no_ir_chan(wiphy, ch);
+}
+
+static void ath_force_no_ir_chan(struct ieee80211_channel *ch)
+{
+ ch->flags |= IEEE80211_CHAN_NO_IR;
+}
+
+static void ath_force_no_ir_freq(struct wiphy *wiphy, u16 center_freq)
+{
+ struct ieee80211_channel *ch;
+
+ ch = ieee80211_get_channel(wiphy, center_freq);
+ if (!ch)
+ return;
+
+ ath_force_no_ir_chan(ch);
+}
+
+static void
+__ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
+ enum nl80211_reg_initiator initiator,
+ struct ieee80211_channel *ch)
+{
+ if (ath_is_radar_freq(ch->center_freq) ||
+ (ch->flags & IEEE80211_CHAN_RADAR))
+ return;
+
+ switch (initiator) {
+ case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+ ath_force_clear_no_ir_chan(wiphy, ch);
+ break;
+ case NL80211_REGDOM_SET_BY_USER:
+ if (ath_reg_dyn_country_user_allow(reg))
+ ath_force_clear_no_ir_chan(wiphy, ch);
+ break;
+ default:
+ if (ch->beacon_found)
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
+ }
+}
+
/*
- * N.B: These exception rules do not apply radar freqs.
+ * These exception rules do not apply radar frequencies.
*
- * - We enable adhoc (or beaconing) if allowed by 11d
- * - We enable active scan if the channel is allowed by 11d
+ * - We enable initiating radiation if the country IE says its fine:
* - If no country IE has been processed and a we determine we have
- * received a beacon on a channel we can enable active scan and
- * adhoc (or beaconing).
+ * received a beacon on a channel we can enable initiating radiation.
*/
static void
ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
enum nl80211_reg_initiator initiator)
{
enum ieee80211_band band;
struct ieee80211_supported_band *sband;
- const struct ieee80211_reg_rule *reg_rule;
struct ieee80211_channel *ch;
unsigned int i;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
-
if (!wiphy->bands[band])
continue;
-
sband = wiphy->bands[band];
-
for (i = 0; i < sband->n_channels; i++) {
-
ch = &sband->channels[i];
-
- if (ath_is_radar_freq(ch->center_freq) ||
- (ch->flags & IEEE80211_CHAN_RADAR))
- continue;
-
- if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
- reg_rule = freq_reg_info(wiphy, ch->center_freq);
- if (IS_ERR(reg_rule))
- continue;
- /*
- * If 11d had a rule for this channel ensure
- * we enable adhoc/beaconing if it allows us to
- * use it. Note that we would have disabled it
- * by applying our static world regdomain by
- * default during init, prior to calling our
- * regulatory_hint().
- */
- if (!(reg_rule->flags &
- NL80211_RRF_NO_IBSS))
- ch->flags &=
- ~IEEE80211_CHAN_NO_IBSS;
- if (!(reg_rule->flags &
- NL80211_RRF_PASSIVE_SCAN))
- ch->flags &=
- ~IEEE80211_CHAN_PASSIVE_SCAN;
- } else {
- if (ch->beacon_found)
- ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN);
- }
+ __ath_reg_apply_beaconing_flags(wiphy, reg,
+ initiator, ch);
}
}
-
}
-/* Allows active scan scan on Ch 12 and 13 */
+/**
+ * ath_reg_apply_ir_flags()
+ * @wiphy: the wiphy to use
+ * @initiator: the regulatory hint initiator
+ *
+ * If no country IE has been received always enable passive scan
+ * and no-ibss on these channels. This is only done for specific
+ * regulatory SKUs.
+ *
+ * If a country IE has been received check its rule for this
+ * channel first before enabling active scan. The passive scan
+ * would have been enforced by the initial processing of our
+ * custom regulatory domain.
+ */
static void
-ath_reg_apply_active_scan_flags(struct wiphy *wiphy,
- enum nl80211_reg_initiator initiator)
+ath_reg_apply_ir_flags(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
+ enum nl80211_reg_initiator initiator)
{
struct ieee80211_supported_band *sband;
- struct ieee80211_channel *ch;
- const struct ieee80211_reg_rule *reg_rule;
sband = wiphy->bands[IEEE80211_BAND_2GHZ];
if (!sband)
return;
- /*
- * If no country IE has been received always enable active scan
- * on these channels. This is only done for specific regulatory SKUs
- */
- if (initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
- ch = &sband->channels[11]; /* CH 12 */
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
- ch = &sband->channels[12]; /* CH 13 */
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
- return;
- }
-
- /*
- * If a country IE has been received check its rule for this
- * channel first before enabling active scan. The passive scan
- * would have been enforced by the initial processing of our
- * custom regulatory domain.
- */
-
- ch = &sband->channels[11]; /* CH 12 */
- reg_rule = freq_reg_info(wiphy, ch->center_freq);
- if (!IS_ERR(reg_rule)) {
- if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
- }
-
- ch = &sband->channels[12]; /* CH 13 */
- reg_rule = freq_reg_info(wiphy, ch->center_freq);
- if (!IS_ERR(reg_rule)) {
- if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ switch(initiator) {
+ case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+ ath_force_clear_no_ir_freq(wiphy, 2467);
+ ath_force_clear_no_ir_freq(wiphy, 2472);
+ break;
+ case NL80211_REGDOM_SET_BY_USER:
+ if (!ath_reg_dyn_country_user_allow(reg))
+ break;
+ ath_force_clear_no_ir_freq(wiphy, 2467);
+ ath_force_clear_no_ir_freq(wiphy, 2472);
+ break;
+ default:
+ ath_force_no_ir_freq(wiphy, 2467);
+ ath_force_no_ir_freq(wiphy, 2472);
}
}
@@ -320,8 +423,7 @@ static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
*/
if (!(ch->flags & IEEE80211_CHAN_DISABLED))
ch->flags |= IEEE80211_CHAN_RADAR |
- IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN;
+ IEEE80211_CHAN_NO_IR;
}
}
@@ -335,12 +437,15 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
case 0x66:
case 0x67:
case 0x6C:
- ath_reg_apply_beaconing_flags(wiphy, initiator);
+ ath_reg_apply_beaconing_flags(wiphy, reg, initiator);
break;
case 0x68:
- ath_reg_apply_beaconing_flags(wiphy, initiator);
- ath_reg_apply_active_scan_flags(wiphy, initiator);
+ ath_reg_apply_beaconing_flags(wiphy, reg, initiator);
+ ath_reg_apply_ir_flags(wiphy, reg, initiator);
break;
+ default:
+ if (ath_reg_dyn_country_user_allow(reg))
+ ath_reg_apply_beaconing_flags(wiphy, reg, initiator);
}
}
@@ -393,89 +498,6 @@ static void ath_reg_dyn_country(struct wiphy *wiphy,
reg_initiator_name(request->initiator));
}
-static bool dynamic_country_user_possible(struct ath_regulatory *reg)
-{
- if (config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
- return true;
-
- switch (reg->country_code) {
- case CTRY_UNITED_STATES:
- case CTRY_JAPAN1:
- case CTRY_JAPAN2:
- case CTRY_JAPAN3:
- case CTRY_JAPAN4:
- case CTRY_JAPAN5:
- case CTRY_JAPAN6:
- case CTRY_JAPAN7:
- case CTRY_JAPAN8:
- case CTRY_JAPAN9:
- case CTRY_JAPAN10:
- case CTRY_JAPAN11:
- case CTRY_JAPAN12:
- case CTRY_JAPAN13:
- case CTRY_JAPAN14:
- case CTRY_JAPAN15:
- case CTRY_JAPAN16:
- case CTRY_JAPAN17:
- case CTRY_JAPAN18:
- case CTRY_JAPAN19:
- case CTRY_JAPAN20:
- case CTRY_JAPAN21:
- case CTRY_JAPAN22:
- case CTRY_JAPAN23:
- case CTRY_JAPAN24:
- case CTRY_JAPAN25:
- case CTRY_JAPAN26:
- case CTRY_JAPAN27:
- case CTRY_JAPAN28:
- case CTRY_JAPAN29:
- case CTRY_JAPAN30:
- case CTRY_JAPAN31:
- case CTRY_JAPAN32:
- case CTRY_JAPAN33:
- case CTRY_JAPAN34:
- case CTRY_JAPAN35:
- case CTRY_JAPAN36:
- case CTRY_JAPAN37:
- case CTRY_JAPAN38:
- case CTRY_JAPAN39:
- case CTRY_JAPAN40:
- case CTRY_JAPAN41:
- case CTRY_JAPAN42:
- case CTRY_JAPAN43:
- case CTRY_JAPAN44:
- case CTRY_JAPAN45:
- case CTRY_JAPAN46:
- case CTRY_JAPAN47:
- case CTRY_JAPAN48:
- case CTRY_JAPAN49:
- case CTRY_JAPAN50:
- case CTRY_JAPAN51:
- case CTRY_JAPAN52:
- case CTRY_JAPAN53:
- case CTRY_JAPAN54:
- case CTRY_JAPAN55:
- case CTRY_JAPAN56:
- case CTRY_JAPAN57:
- case CTRY_JAPAN58:
- case CTRY_JAPAN59:
- return false;
- }
-
- return true;
-}
-
-static void ath_reg_dyn_country_user(struct wiphy *wiphy,
- struct ath_regulatory *reg,
- struct regulatory_request *request)
-{
- if (!config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
- return;
- if (!dynamic_country_user_possible(reg))
- return;
- ath_reg_dyn_country(wiphy, reg, request);
-}
-
void ath_reg_notifier_apply(struct wiphy *wiphy,
struct regulatory_request *request,
struct ath_regulatory *reg)
@@ -508,7 +530,8 @@ void ath_reg_notifier_apply(struct wiphy *wiphy,
case NL80211_REGDOM_SET_BY_DRIVER:
break;
case NL80211_REGDOM_SET_BY_USER:
- ath_reg_dyn_country_user(wiphy, reg, request);
+ if (ath_reg_dyn_country_user_allow(reg))
+ ath_reg_dyn_country(wiphy, reg, request);
break;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
ath_reg_dyn_country(wiphy, reg, request);
@@ -609,7 +632,8 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
const struct ieee80211_regdomain *regd;
wiphy->reg_notifier = reg_notifier;
- wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
+ wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
+ REGULATORY_CUSTOM_REG;
if (ath_is_world_regd(reg)) {
/*
@@ -617,7 +641,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
* saved on the wiphy orig_* parameters
*/
regd = ath_world_regdomain(reg);
- wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+ wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_FOLLOW_POWER;
} else {
/*
* This gets applied in the case of the absence of CRDA,
@@ -626,6 +650,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
*/
regd = ath_default_world_regdomain();
}
+
wiphy_apply_custom_regulatory(wiphy, regd);
ath_reg_apply_radar_flags(wiphy);
ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index c02dbc618724..3c2ef0c32f72 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -2644,7 +2644,7 @@ struct wcn36xx_hal_trigger_ba_rsp_candidate {
struct add_ba_info ba_info[STACFG_MAX_TC];
} __packed;
-struct wcn36xx_hal_trigget_ba_req_candidate {
+struct wcn36xx_hal_trigger_ba_req_candidate {
u8 sta_index;
u8 tid_bitmap;
} __packed;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 7839b31e4826..e64a6784079e 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -641,7 +641,8 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
dev_kfree_skb(skb);
}
- if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ if (changed & BSS_CHANGED_BEACON_ENABLED ||
+ changed & BSS_CHANGED_BEACON) {
wcn36xx_dbg(WCN36XX_DBG_MAC,
"mac bss changed beacon enabled %d\n",
bss_conf->enable_beacon);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 366339421d4f..750626b0e22d 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -115,6 +115,22 @@ static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
}
}
+static void wcn36xx_smd_set_sta_default_ht_params(
+ struct wcn36xx_hal_config_sta_params *sta_params)
+{
+ sta_params->ht_capable = 1;
+ sta_params->tx_channel_width_set = 1;
+ sta_params->lsig_txop_protection = 1;
+ sta_params->max_ampdu_size = 3;
+ sta_params->max_ampdu_density = 5;
+ sta_params->max_amsdu_size = 0;
+ sta_params->sgi_20Mhz = 1;
+ sta_params->sgi_40mhz = 1;
+ sta_params->green_field_capable = 1;
+ sta_params->delayed_ba_support = 0;
+ sta_params->dsss_cck_mode_40mhz = 1;
+}
+
static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -172,6 +188,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
sizeof(priv_sta->supported_rates));
} else {
wcn36xx_set_default_rates(&sta_params->supported_rates);
+ wcn36xx_smd_set_sta_default_ht_params(sta_params);
}
}
@@ -1134,14 +1151,14 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
/* STA */
bss->oper_mode = 1;
bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_MODE;
- } else if (vif->type == NL80211_IFTYPE_AP) {
+ } else if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
bss->bss_type = WCN36XX_HAL_INFRA_AP_MODE;
/* AP */
bss->oper_mode = 0;
bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_SAP_MODE;
- } else if (vif->type == NL80211_IFTYPE_ADHOC ||
- vif->type == NL80211_IFTYPE_MESH_POINT) {
+ } else if (vif->type == NL80211_IFTYPE_ADHOC) {
bss->bss_type = WCN36XX_HAL_IBSS_MODE;
/* STA */
@@ -1292,7 +1309,11 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
memcpy(msg_body.bssid, vif->addr, ETH_ALEN);
/* TODO need to find out why this is needed? */
- msg_body.tim_ie_offset = tim_off+4;
+ if (vif->type == NL80211_IFTYPE_MESH_POINT)
+ /* mesh beacon don't need this, so push further down */
+ msg_body.tim_ie_offset = 256;
+ else
+ msg_body.tim_ie_offset = tim_off+4;
msg_body.p2p_ie_offset = p2p_off;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -1838,7 +1859,7 @@ out:
int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
{
struct wcn36xx_hal_trigger_ba_req_msg msg_body;
- struct wcn36xx_hal_trigget_ba_req_candidate *candidate;
+ struct wcn36xx_hal_trigger_ba_req_candidate *candidate;
int ret = 0;
mutex_lock(&wcn->hal_mutex);
@@ -1849,7 +1870,7 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
msg_body.header.len += sizeof(*candidate);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
- candidate = (struct wcn36xx_hal_trigget_ba_req_candidate *)
+ candidate = (struct wcn36xx_hal_trigger_ba_req_candidate *)
(wcn->hal_buf + sizeof(msg_body));
candidate->sta_index = sta_index;
candidate->tid_bitmap = 1;
@@ -2039,22 +2060,28 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
case WCN36XX_HAL_OTA_TX_COMPL_IND:
case WCN36XX_HAL_MISSED_BEACON_IND:
case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
- mutex_lock(&wcn->hal_ind_mutex);
msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL);
- if (msg_ind) {
- msg_ind->msg_len = len;
- msg_ind->msg = kmalloc(len, GFP_KERNEL);
- memcpy(msg_ind->msg, buf, len);
- list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
- queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
- wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
+ if (!msg_ind)
+ goto nomem;
+ msg_ind->msg_len = len;
+ msg_ind->msg = kmalloc(len, GFP_KERNEL);
+ if (!msg_ind->msg) {
+ kfree(msg_ind);
+nomem:
+ /*
+ * FIXME: Do something smarter then just
+ * printing an error.
+ */
+ wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
+ msg_header->msg_type);
+ break;
}
+ memcpy(msg_ind->msg, buf, len);
+ mutex_lock(&wcn->hal_ind_mutex);
+ list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
+ queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
mutex_unlock(&wcn->hal_ind_mutex);
- if (msg_ind)
- break;
- /* FIXME: Do something smarter then just printing an error. */
- wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
- msg_header->msg_type);
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
break;
default:
wcn36xx_err("SMD_EVENT (%d) not supported\n",
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 58b63833e8e7..8fa5cbace5ab 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -54,7 +54,7 @@ enum wcn36xx_debug_mask {
};
#define wcn36xx_err(fmt, arg...) \
- printk(KERN_ERR pr_fmt("ERROR " fmt), ##arg);
+ printk(KERN_ERR pr_fmt("ERROR " fmt), ##arg)
#define wcn36xx_warn(fmt, arg...) \
printk(KERN_WARNING pr_fmt("WARNING " fmt), ##arg)
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 8205d3e4ab66..10919f95a83c 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -156,6 +156,19 @@ void wil6210_enable_irq(struct wil6210_priv *wil)
iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICC));
+ /* interrupt moderation parameters */
+ if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ /* disable interrupt moderation for monitor
+ * to get better timestamp precision
+ */
+ iowrite32(0, wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_CRL));
+ } else {
+ iowrite32(WIL6210_ITR_TRSH,
+ wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_TRSH));
+ iowrite32(BIT_DMA_ITR_CNT_CRL_EN,
+ wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_CRL));
+ }
+
wil6210_unmask_irq_pseudo(wil);
wil6210_unmask_irq_tx(wil);
wil6210_unmask_irq_rx(wil);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index d505b2676a73..0b0975d88b43 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -21,6 +21,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
+#include <linux/prefetch.h>
#include "wil6210.h"
#include "wmi.h"
@@ -377,6 +378,8 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
}
skb_trim(skb, dmalen);
+ prefetch(skb->data);
+
wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
@@ -673,9 +676,12 @@ static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
+ d->dma.b11 = ETH_HLEN; /* MAC header length */
+
switch (skb->protocol) {
case cpu_to_be16(ETH_P_IP):
protocol = ip_hdr(skb)->protocol;
+ d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
break;
case cpu_to_be16(ETH_P_IPV6):
protocol = ipv6_hdr(skb)->nexthdr;
@@ -701,8 +707,6 @@ static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
}
d->dma.ip_length = skb_network_header_len(skb);
- d->dma.b11 = ETH_HLEN; /* MAC header length */
- d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
/* Enable TCP/UDP checksum */
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
/* Calculate pseudo-header */
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index c4a51638736a..1f91eaf95bbe 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -39,6 +39,7 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
#define WIL6210_MAX_CID (8) /* HW limit */
#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
+#define WIL6210_ITR_TRSH (10000) /* arbitrary - about 15 IRQs/msec */
/* Hardware definitions begin */
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 0d950f209dae..bf93ea859f2d 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -28,8 +28,8 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with Atmel wireless lan drivers; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ along with Atmel wireless lan drivers; if not, see
+ <http://www.gnu.org/licenses/>.
For all queries about this code, please contact the current author,
Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation.
@@ -39,7 +39,6 @@
******************************************************************************/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -4278,8 +4277,7 @@ static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data)
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with AtmelMACFW; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ along with AtmelMACFW; if not, see <http://www.gnu.org/licenses/>.
****************************************************************************/
/* This firmware should work on the 76C502 RFMD, RFMD_D, and RFMD_E */
diff --git a/drivers/net/wireless/atmel.h b/drivers/net/wireless/atmel.h
index b9b3e5b76544..96f7318cbb04 100644
--- a/drivers/net/wireless/atmel.h
+++ b/drivers/net/wireless/atmel.h
@@ -15,8 +15,8 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with Atmel wireless lan drivers; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ along with Atmel wireless lan drivers; if not, see
+ <http://www.gnu.org/licenses/>.
******************************************************************************/
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index 522572219217..4cfb4d99ced0 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -24,15 +24,14 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with Atmel wireless lan drivers; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ along with Atmel wireless lan drivers; if not, see
+ <http://www.gnu.org/licenses/>.
******************************************************************************/
#ifdef __IN_PCMCIA_PACKAGE__
#include <pcmcia/k_compat.h>
#endif
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ptrace.h>
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index c1b159ebcffe..5cd97e3cbee3 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -15,14 +15,13 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with Atmel wireless lan drivers; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ along with Atmel wireless lan drivers; if not, see
+ <http://www.gnu.org/licenses/>.
******************************************************************************/
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include "atmel.h"
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 7f3d461f7e8d..54376fddfaf9 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -731,8 +731,6 @@ enum b43_firmware_file_type {
struct b43_request_fw_context {
/* The device we are requesting the fw for. */
struct b43_wldev *dev;
- /* a completion event structure needed if this call is asynchronous */
- struct completion fw_load_complete;
/* a pointer to the firmware object */
const struct firmware *blob;
/* The type of firmware to request. */
@@ -809,6 +807,8 @@ enum {
struct b43_wldev {
struct b43_bus_dev *dev;
struct b43_wl *wl;
+ /* a completion event structure needed if this call is asynchronous */
+ struct completion fw_load_complete;
/* The device initialization status.
* Use b43_status() to query. */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index ccd24f0acb8d..c75237eb55a1 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2070,6 +2070,7 @@ void b43_do_release_fw(struct b43_firmware_file *fw)
static void b43_release_firmware(struct b43_wldev *dev)
{
+ complete(&dev->fw_load_complete);
b43_do_release_fw(&dev->fw.ucode);
b43_do_release_fw(&dev->fw.pcm);
b43_do_release_fw(&dev->fw.initvals);
@@ -2095,7 +2096,7 @@ static void b43_fw_cb(const struct firmware *firmware, void *context)
struct b43_request_fw_context *ctx = context;
ctx->blob = firmware;
- complete(&ctx->fw_load_complete);
+ complete(&ctx->dev->fw_load_complete);
}
int b43_do_request_fw(struct b43_request_fw_context *ctx,
@@ -2142,7 +2143,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
}
if (async) {
/* do this part asynchronously */
- init_completion(&ctx->fw_load_complete);
+ init_completion(&ctx->dev->fw_load_complete);
err = request_firmware_nowait(THIS_MODULE, 1, ctx->fwname,
ctx->dev->dev->dev, GFP_KERNEL,
ctx, b43_fw_cb);
@@ -2150,12 +2151,11 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
pr_err("Unable to load firmware\n");
return err;
}
- /* stall here until fw ready */
- wait_for_completion(&ctx->fw_load_complete);
+ wait_for_completion(&ctx->dev->fw_load_complete);
if (ctx->blob)
goto fw_ready;
/* On some ARM systems, the async request will fail, but the next sync
- * request works. For this reason, we dall through here
+ * request works. For this reason, we fall through here
*/
}
err = request_firmware(&ctx->blob, ctx->fwname,
@@ -2424,6 +2424,7 @@ error:
static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl);
static void b43_one_core_detach(struct b43_bus_dev *dev);
+static int b43_rng_init(struct b43_wl *wl);
static void b43_request_firmware(struct work_struct *work)
{
@@ -2475,6 +2476,10 @@ start_ieee80211:
goto err_one_core_detach;
wl->hw_registred = true;
b43_leds_register(wl->current_dev);
+
+ /* Register HW RNG driver */
+ b43_rng_init(wl);
+
goto out;
err_one_core_detach:
@@ -4636,9 +4641,6 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
if (!dev || b43_status(dev) != B43_STAT_INITIALIZED)
return;
- /* Unregister HW RNG driver */
- b43_rng_exit(dev->wl);
-
b43_set_status(dev, B43_STAT_UNINIT);
/* Stop the microcode PSM. */
@@ -4795,9 +4797,6 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
b43_set_status(dev, B43_STAT_INITIALIZED);
- /* Register HW RNG driver */
- b43_rng_init(dev->wl);
-
out:
return err;
@@ -5464,6 +5463,9 @@ static void b43_bcma_remove(struct bcma_device *core)
b43_one_core_detach(wldev->dev);
+ /* Unregister HW RNG driver */
+ b43_rng_exit(wl);
+
b43_leds_unregister(wl);
ieee80211_free_hw(wl->hw);
@@ -5541,6 +5543,9 @@ static void b43_ssb_remove(struct ssb_device *sdev)
b43_one_core_detach(dev);
+ /* Unregister HW RNG driver */
+ b43_rng_exit(wl);
+
if (list_empty(&wl->devlist)) {
b43_leds_unregister(wl);
/* Last core on the chip unregistered.
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 4ae63f4ddfb2..50e5ddb12fb3 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -821,10 +821,10 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
* channel number in b43. */
if (chanstat & B43_RX_CHAN_5GHZ) {
status.band = IEEE80211_BAND_5GHZ;
- status.freq = b43_freq_to_channel_5ghz(chanid);
+ status.freq = b43_channel_to_freq_5ghz(chanid);
} else {
status.band = IEEE80211_BAND_2GHZ;
- status.freq = b43_freq_to_channel_2ghz(chanid);
+ status.freq = b43_channel_to_freq_2ghz(chanid);
}
break;
default:
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 572668821862..349c77605231 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3919,6 +3919,7 @@ static void b43legacy_remove(struct ssb_device *dev)
* as the ieee80211 unreg will destroy the workqueue. */
cancel_work_sync(&wldev->restart_work);
cancel_work_sync(&wl->firmware_load);
+ complete(&wldev->fw_load_complete);
B43legacy_WARN_ON(!wl);
if (!wldev->fw.ucode)
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index 54e36fcb3954..fcfed6b99a62 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -4,13 +4,12 @@ config BRCMUTIL
config BRCMSMAC
tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver"
depends on MAC80211
- depends on BCMA
+ depends on BCMA_POSSIBLE
+ select BCMA
select NEW_LEDS if BCMA_DRIVER_GPIO
select LEDS_CLASS if BCMA_DRIVER_GPIO
select BRCMUTIL
select FW_LOADER
- select CRC_CCITT
- select CRC8
select CORDIC
---help---
This module adds support for PCIe wireless adapters based on Broadcom
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 8e9b1221b32c..57cddee03252 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -28,14 +28,15 @@ brcmfmac-objs += \
fweh.o \
fwsignal.o \
p2p.o \
- dhd_cdc.o \
+ proto.o \
+ bcdc.o \
dhd_common.o \
dhd_linux.o \
+ nvram.o \
btcoex.o
brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
dhd_sdio.o \
bcmsdh.o \
- bcmsdh_sdmmc.o \
sdio_chip.o
brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
usb.o
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
new file mode 100644
index 000000000000..c229210d50ba
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*******************************************************************************
+ * Communicates with the dongle by using dcmd codes.
+ * For certain dcmd codes, the dongle interprets string data from the host.
+ ******************************************************************************/
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "fwsignal.h"
+#include "dhd_dbg.h"
+#include "tracepoint.h"
+#include "proto.h"
+#include "bcdc.h"
+
+struct brcmf_proto_bcdc_dcmd {
+ __le32 cmd; /* dongle command value */
+ __le32 len; /* lower 16: output buflen;
+ * upper 16: input buflen (excludes header) */
+ __le32 flags; /* flag defns given below */
+ __le32 status; /* status code returned from the device */
+};
+
+/* BCDC flag definitions */
+#define BCDC_DCMD_ERROR 0x01 /* 1=cmd failed */
+#define BCDC_DCMD_SET 0x02 /* 0=get, 1=set cmd */
+#define BCDC_DCMD_IF_MASK 0xF000 /* I/F index */
+#define BCDC_DCMD_IF_SHIFT 12
+#define BCDC_DCMD_ID_MASK 0xFFFF0000 /* id an cmd pairing */
+#define BCDC_DCMD_ID_SHIFT 16 /* ID Mask shift bits */
+#define BCDC_DCMD_ID(flags) \
+ (((flags) & BCDC_DCMD_ID_MASK) >> BCDC_DCMD_ID_SHIFT)
+
+/*
+ * BCDC header - Broadcom specific extension of CDC.
+ * Used on data packets to convey priority across USB.
+ */
+#define BCDC_HEADER_LEN 4
+#define BCDC_PROTO_VER 2 /* Protocol version */
+#define BCDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */
+#define BCDC_FLAG_VER_SHIFT 4 /* Protocol version shift */
+#define BCDC_FLAG_SUM_GOOD 0x04 /* Good RX checksums */
+#define BCDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums */
+#define BCDC_PRIORITY_MASK 0x7
+#define BCDC_FLAG2_IF_MASK 0x0f /* packet rx interface in APSTA */
+#define BCDC_FLAG2_IF_SHIFT 0
+
+#define BCDC_GET_IF_IDX(hdr) \
+ ((int)((((hdr)->flags2) & BCDC_FLAG2_IF_MASK) >> BCDC_FLAG2_IF_SHIFT))
+#define BCDC_SET_IF_IDX(hdr, idx) \
+ ((hdr)->flags2 = (((hdr)->flags2 & ~BCDC_FLAG2_IF_MASK) | \
+ ((idx) << BCDC_FLAG2_IF_SHIFT)))
+
+/**
+ * struct brcmf_proto_bcdc_header - BCDC header format
+ *
+ * @flags: flags contain protocol and checksum info.
+ * @priority: 802.1d priority and USB flow control info (bit 4:7).
+ * @flags2: additional flags containing dongle interface index.
+ * @data_offset: start of packet data. header is following by firmware signals.
+ */
+struct brcmf_proto_bcdc_header {
+ u8 flags;
+ u8 priority;
+ u8 flags2;
+ u8 data_offset;
+};
+
+/*
+ * maximum length of firmware signal data between
+ * the BCDC header and packet data in the tx path.
+ */
+#define BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES 12
+
+#define RETRIES 2 /* # of retries to retrieve matching dcmd response */
+#define BUS_HEADER_LEN (16+64) /* Must be atleast SDPCM_RESERVE
+ * (amount of header tha might be added)
+ * plus any space that might be needed
+ * for bus alignment padding.
+ */
+struct brcmf_bcdc {
+ u16 reqid;
+ u8 bus_header[BUS_HEADER_LEN];
+ struct brcmf_proto_bcdc_dcmd msg;
+ unsigned char buf[BRCMF_DCMD_MAXLEN];
+};
+
+
+static int
+brcmf_proto_bcdc_msg(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
+ uint len, bool set)
+{
+ struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
+ struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
+ u32 flags;
+
+ brcmf_dbg(BCDC, "Enter\n");
+
+ memset(msg, 0, sizeof(struct brcmf_proto_bcdc_dcmd));
+
+ msg->cmd = cpu_to_le32(cmd);
+ msg->len = cpu_to_le32(len);
+ flags = (++bcdc->reqid << BCDC_DCMD_ID_SHIFT);
+ if (set)
+ flags |= BCDC_DCMD_SET;
+ flags = (flags & ~BCDC_DCMD_IF_MASK) |
+ (ifidx << BCDC_DCMD_IF_SHIFT);
+ msg->flags = cpu_to_le32(flags);
+
+ if (buf)
+ memcpy(bcdc->buf, buf, len);
+
+ len += sizeof(*msg);
+ if (len > BRCMF_TX_IOCTL_MAX_MSG_SIZE)
+ len = BRCMF_TX_IOCTL_MAX_MSG_SIZE;
+
+ /* Send request */
+ return brcmf_bus_txctl(drvr->bus_if, (unsigned char *)&bcdc->msg, len);
+}
+
+static int brcmf_proto_bcdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
+{
+ int ret;
+ struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
+
+ brcmf_dbg(BCDC, "Enter\n");
+ len += sizeof(struct brcmf_proto_bcdc_dcmd);
+ do {
+ ret = brcmf_bus_rxctl(drvr->bus_if, (unsigned char *)&bcdc->msg,
+ len);
+ if (ret < 0)
+ break;
+ } while (BCDC_DCMD_ID(le32_to_cpu(bcdc->msg.flags)) != id);
+
+ return ret;
+}
+
+static int
+brcmf_proto_bcdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+ void *buf, uint len)
+{
+ struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
+ struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
+ void *info;
+ int ret = 0, retries = 0;
+ u32 id, flags;
+
+ brcmf_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len);
+
+ ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, false);
+ if (ret < 0) {
+ brcmf_err("brcmf_proto_bcdc_msg failed w/status %d\n",
+ ret);
+ goto done;
+ }
+
+retry:
+ /* wait for interrupt and get first fragment */
+ ret = brcmf_proto_bcdc_cmplt(drvr, bcdc->reqid, len);
+ if (ret < 0)
+ goto done;
+
+ flags = le32_to_cpu(msg->flags);
+ id = (flags & BCDC_DCMD_ID_MASK) >> BCDC_DCMD_ID_SHIFT;
+
+ if ((id < bcdc->reqid) && (++retries < RETRIES))
+ goto retry;
+ if (id != bcdc->reqid) {
+ brcmf_err("%s: unexpected request id %d (expected %d)\n",
+ brcmf_ifname(drvr, ifidx), id, bcdc->reqid);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Check info buffer */
+ info = (void *)&msg[1];
+
+ /* Copy info buffer */
+ if (buf) {
+ if (ret < (int)len)
+ len = ret;
+ memcpy(buf, info, len);
+ }
+
+ /* Check the ERROR flag */
+ if (flags & BCDC_DCMD_ERROR)
+ ret = le32_to_cpu(msg->status);
+
+done:
+ return ret;
+}
+
+static int
+brcmf_proto_bcdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+ void *buf, uint len)
+{
+ struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
+ struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
+ int ret = 0;
+ u32 flags, id;
+
+ brcmf_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len);
+
+ ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, true);
+ if (ret < 0)
+ goto done;
+
+ ret = brcmf_proto_bcdc_cmplt(drvr, bcdc->reqid, len);
+ if (ret < 0)
+ goto done;
+
+ flags = le32_to_cpu(msg->flags);
+ id = (flags & BCDC_DCMD_ID_MASK) >> BCDC_DCMD_ID_SHIFT;
+
+ if (id != bcdc->reqid) {
+ brcmf_err("%s: unexpected request id %d (expected %d)\n",
+ brcmf_ifname(drvr, ifidx), id, bcdc->reqid);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Check the ERROR flag */
+ if (flags & BCDC_DCMD_ERROR)
+ ret = le32_to_cpu(msg->status);
+
+done:
+ return ret;
+}
+
+static void
+brcmf_proto_bcdc_hdrpush(struct brcmf_pub *drvr, int ifidx, u8 offset,
+ struct sk_buff *pktbuf)
+{
+ struct brcmf_proto_bcdc_header *h;
+
+ brcmf_dbg(BCDC, "Enter\n");
+
+ /* Push BDC header used to convey priority for buses that don't */
+ skb_push(pktbuf, BCDC_HEADER_LEN);
+
+ h = (struct brcmf_proto_bcdc_header *)(pktbuf->data);
+
+ h->flags = (BCDC_PROTO_VER << BCDC_FLAG_VER_SHIFT);
+ if (pktbuf->ip_summed == CHECKSUM_PARTIAL)
+ h->flags |= BCDC_FLAG_SUM_NEEDED;
+
+ h->priority = (pktbuf->priority & BCDC_PRIORITY_MASK);
+ h->flags2 = 0;
+ h->data_offset = offset;
+ BCDC_SET_IF_IDX(h, ifidx);
+ trace_brcmf_bcdchdr(pktbuf->data);
+}
+
+static int
+brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
+ struct sk_buff *pktbuf)
+{
+ struct brcmf_proto_bcdc_header *h;
+
+ brcmf_dbg(BCDC, "Enter\n");
+
+ /* Pop BCDC header used to convey priority for buses that don't */
+ if (pktbuf->len <= BCDC_HEADER_LEN) {
+ brcmf_dbg(INFO, "rx data too short (%d <= %d)\n",
+ pktbuf->len, BCDC_HEADER_LEN);
+ return -EBADE;
+ }
+
+ trace_brcmf_bcdchdr(pktbuf->data);
+ h = (struct brcmf_proto_bcdc_header *)(pktbuf->data);
+
+ *ifidx = BCDC_GET_IF_IDX(h);
+ if (*ifidx >= BRCMF_MAX_IFS) {
+ brcmf_err("rx data ifnum out of range (%d)\n", *ifidx);
+ return -EBADE;
+ }
+ /* The ifidx is the idx to map to matching netdev/ifp. When receiving
+ * events this is easy because it contains the bssidx which maps
+ * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
+ * bssidx 1 is used for p2p0 and no data can be received or
+ * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
+ */
+ if (*ifidx)
+ (*ifidx)++;
+
+ if (((h->flags & BCDC_FLAG_VER_MASK) >> BCDC_FLAG_VER_SHIFT) !=
+ BCDC_PROTO_VER) {
+ brcmf_err("%s: non-BCDC packet received, flags 0x%x\n",
+ brcmf_ifname(drvr, *ifidx), h->flags);
+ return -EBADE;
+ }
+
+ if (h->flags & BCDC_FLAG_SUM_GOOD) {
+ brcmf_dbg(BCDC, "%s: BDC rcv, good checksum, flags 0x%x\n",
+ brcmf_ifname(drvr, *ifidx), h->flags);
+ pktbuf->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+
+ pktbuf->priority = h->priority & BCDC_PRIORITY_MASK;
+
+ skb_pull(pktbuf, BCDC_HEADER_LEN);
+ if (do_fws)
+ brcmf_fws_hdrpull(drvr, *ifidx, h->data_offset << 2, pktbuf);
+ else
+ skb_pull(pktbuf, h->data_offset << 2);
+
+ if (pktbuf->len == 0)
+ return -ENODATA;
+ return 0;
+}
+
+static int
+brcmf_proto_bcdc_txdata(struct brcmf_pub *drvr, int ifidx, u8 offset,
+ struct sk_buff *pktbuf)
+{
+ brcmf_proto_bcdc_hdrpush(drvr, ifidx, offset, pktbuf);
+ return brcmf_bus_txdata(drvr->bus_if, pktbuf);
+}
+
+
+int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
+{
+ struct brcmf_bcdc *bcdc;
+
+ bcdc = kzalloc(sizeof(*bcdc), GFP_ATOMIC);
+ if (!bcdc)
+ goto fail;
+
+ /* ensure that the msg buf directly follows the cdc msg struct */
+ if ((unsigned long)(&bcdc->msg + 1) != (unsigned long)bcdc->buf) {
+ brcmf_err("struct brcmf_proto_bcdc is not correctly defined\n");
+ goto fail;
+ }
+
+ drvr->proto->hdrpull = brcmf_proto_bcdc_hdrpull;
+ drvr->proto->query_dcmd = brcmf_proto_bcdc_query_dcmd;
+ drvr->proto->set_dcmd = brcmf_proto_bcdc_set_dcmd;
+ drvr->proto->txdata = brcmf_proto_bcdc_txdata;
+ drvr->proto->pd = bcdc;
+
+ drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
+ drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
+ sizeof(struct brcmf_proto_bcdc_dcmd);
+ return 0;
+
+fail:
+ kfree(bcdc);
+ return -ENOMEM;
+}
+
+void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr)
+{
+ kfree(drvr->proto->pd);
+ drvr->proto->pd = NULL;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h
new file mode 100644
index 000000000000..17e8c039ff32
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_BCDC_H
+#define BRCMFMAC_BCDC_H
+
+
+int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr);
+void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr);
+
+
+#endif /* BRCMFMAC_BCDC_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 3e10b801eee8..fa35b23bbaa7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -17,16 +17,23 @@
#include <linux/types.h>
#include <linux/netdevice.h>
-#include <linux/export.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/scatterlist.h>
#include <linux/mmc/sdio.h>
+#include <linux/mmc/core.h>
#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/platform_device.h>
#include <linux/platform_data/brcmfmac-sdio.h>
+#include <linux/suspend.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <net/cfg80211.h>
#include <defs.h>
#include <brcm_hw_ids.h>
@@ -36,11 +43,19 @@
#include "dhd_bus.h"
#include "dhd_dbg.h"
#include "sdio_host.h"
+#include "sdio_chip.h"
#define SDIOH_API_ACCESS_RETRY_LIMIT 2
+#define DMA_ALIGN_MASK 0x03
-static irqreturn_t brcmf_sdio_oob_irqhandler(int irq, void *dev_id)
+#define SDIO_FUNC1_BLOCKSIZE 64
+#define SDIO_FUNC2_BLOCKSIZE 512
+/* Maximum milliseconds to wait for F2 to come up */
+#define SDIO_WAIT_F2RDY 3000
+
+
+static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
@@ -55,27 +70,46 @@ static irqreturn_t brcmf_sdio_oob_irqhandler(int irq, void *dev_id)
sdiodev->irq_en = false;
}
- brcmf_sdbrcm_isr(sdiodev->bus);
+ brcmf_sdio_isr(sdiodev->bus);
return IRQ_HANDLED;
}
-static void brcmf_sdio_ib_irqhandler(struct sdio_func *func)
+static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func)
{
struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
brcmf_dbg(INTR, "IB intr triggered\n");
- brcmf_sdbrcm_isr(sdiodev->bus);
+ brcmf_sdio_isr(sdiodev->bus);
}
/* dummy handler for SDIO function 2 interrupt */
-static void brcmf_sdio_dummy_irqhandler(struct sdio_func *func)
+static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
{
}
-int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
+static bool brcmf_sdiod_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
+{
+ bool is_err = false;
+#ifdef CONFIG_PM_SLEEP
+ is_err = atomic_read(&sdiodev->suspend);
+#endif
+ return is_err;
+}
+
+static void brcmf_sdiod_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
+ wait_queue_head_t *wq)
+{
+#ifdef CONFIG_PM_SLEEP
+ int retry = 0;
+ while (atomic_read(&sdiodev->suspend) && retry++ != 30)
+ wait_event_timeout(*wq, false, HZ/100);
+#endif
+}
+
+int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
{
int ret = 0;
u8 data;
@@ -85,7 +119,7 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
sdiodev->pdata->oob_irq_nr);
ret = request_irq(sdiodev->pdata->oob_irq_nr,
- brcmf_sdio_oob_irqhandler,
+ brcmf_sdiod_oob_irqhandler,
sdiodev->pdata->oob_irq_flags,
"brcmf_oob_intr",
&sdiodev->func[1]->dev);
@@ -109,36 +143,36 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
sdio_claim_host(sdiodev->func[1]);
/* must configure SDIO_CCCR_IENx to enable irq */
- data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
+ data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
- brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
+ brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
/* redirect, configure and enable io for interrupt signal */
data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
if (sdiodev->pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
data |= SDIO_SEPINT_ACT_HI;
- brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
+ brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
sdio_release_host(sdiodev->func[1]);
} else {
brcmf_dbg(SDIO, "Entering\n");
sdio_claim_host(sdiodev->func[1]);
- sdio_claim_irq(sdiodev->func[1], brcmf_sdio_ib_irqhandler);
- sdio_claim_irq(sdiodev->func[2], brcmf_sdio_dummy_irqhandler);
+ sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler);
+ sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler);
sdio_release_host(sdiodev->func[1]);
}
return 0;
}
-int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
+int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
{
brcmf_dbg(SDIO, "Entering\n");
if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
sdio_claim_host(sdiodev->func[1]);
- brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
- brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
+ brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
+ brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
sdio_release_host(sdiodev->func[1]);
if (sdiodev->oob_irq_requested) {
@@ -161,29 +195,150 @@ int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
return 0;
}
+static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
+ uint regaddr, u8 byte)
+{
+ int err_ret;
+
+ /*
+ * Can only directly write to some F0 registers.
+ * Handle CCCR_IENx and CCCR_ABORT command
+ * as a special case.
+ */
+ if ((regaddr == SDIO_CCCR_ABORT) ||
+ (regaddr == SDIO_CCCR_IENx))
+ sdio_writeb(func, byte, regaddr, &err_ret);
+ else
+ sdio_f0_writeb(func, byte, regaddr, &err_ret);
+
+ return err_ret;
+}
+
+static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
+ u32 addr, u8 regsz, void *data, bool write)
+{
+ struct sdio_func *func;
+ int ret;
+
+ brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+ write, fn, addr, regsz);
+
+ brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
+ if (brcmf_sdiod_pm_resume_error(sdiodev))
+ return -EIO;
+
+ /* only allow byte access on F0 */
+ if (WARN_ON(regsz > 1 && !fn))
+ return -EINVAL;
+ func = sdiodev->func[fn];
+
+ switch (regsz) {
+ case sizeof(u8):
+ if (write) {
+ if (fn)
+ sdio_writeb(func, *(u8 *)data, addr, &ret);
+ else
+ ret = brcmf_sdiod_f0_writeb(func, addr,
+ *(u8 *)data);
+ } else {
+ if (fn)
+ *(u8 *)data = sdio_readb(func, addr, &ret);
+ else
+ *(u8 *)data = sdio_f0_readb(func, addr, &ret);
+ }
+ break;
+ case sizeof(u16):
+ if (write)
+ sdio_writew(func, *(u16 *)data, addr, &ret);
+ else
+ *(u16 *)data = sdio_readw(func, addr, &ret);
+ break;
+ case sizeof(u32):
+ if (write)
+ sdio_writel(func, *(u32 *)data, addr, &ret);
+ else
+ *(u32 *)data = sdio_readl(func, addr, &ret);
+ break;
+ default:
+ brcmf_err("invalid size: %d\n", regsz);
+ break;
+ }
+
+ if (ret) {
+ /*
+ * SleepCSR register access can fail when
+ * waking up the device so reduce this noise
+ * in the logs.
+ */
+ if (addr != SBSDIO_FUNC1_SLEEPCSR)
+ brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
+ write ? "write" : "read", fn, addr, ret);
+ else
+ brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
+ write ? "write" : "read", fn, addr, ret);
+ }
+ return ret;
+}
+
+static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ u8 regsz, void *data, bool write)
+{
+ u8 func_num;
+ s32 retry = 0;
+ int ret;
+
+ if (sdiodev->bus_if->state == BRCMF_BUS_NOMEDIUM)
+ return -ENOMEDIUM;
+
+ /*
+ * figure out how to read the register based on address range
+ * 0x00 ~ 0x7FF: function 0 CCCR and FBR
+ * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
+ * The rest: function 1 silicon backplane core registers
+ */
+ if ((addr & ~REG_F0_REG_MASK) == 0)
+ func_num = SDIO_FUNC_0;
+ else
+ func_num = SDIO_FUNC_1;
+
+ do {
+ if (!write)
+ memset(data, 0, regsz);
+ /* for retry wait for 1 ms till bus get settled down */
+ if (retry)
+ usleep_range(1000, 2000);
+ ret = brcmf_sdiod_request_data(sdiodev, func_num, addr, regsz,
+ data, write);
+ } while (ret != 0 && ret != -ENOMEDIUM &&
+ retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
+
+ if (ret == -ENOMEDIUM)
+ brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
+ else if (ret != 0)
+ brcmf_err("failed with %d\n", ret);
+
+ return ret;
+}
+
static int
-brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
+brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
{
int err = 0, i;
u8 addr[3];
- s32 retry;
+
+ if (sdiodev->bus_if->state == BRCMF_BUS_NOMEDIUM)
+ return -ENOMEDIUM;
addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
for (i = 0; i < 3; i++) {
- retry = 0;
- do {
- if (retry)
- usleep_range(1000, 2000);
- err = brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE,
- SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW + i,
- &addr[i]);
- } while (err != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
-
+ err = brcmf_sdiod_regrw_helper(sdiodev,
+ SBSDIO_FUNC1_SBADDRLOW + i,
+ sizeof(u8), &addr[i], true);
if (err) {
- brcmf_err("failed at addr:0x%0x\n",
+ brcmf_err("failed at addr: 0x%0x\n",
SBSDIO_FUNC1_SBADDRLOW + i);
break;
}
@@ -193,13 +348,13 @@ brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
}
static int
-brcmf_sdio_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
+brcmf_sdiod_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
{
uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
int err = 0;
if (bar0 != sdiodev->sbwad) {
- err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
+ err = brcmf_sdiod_set_sbaddr_window(sdiodev, bar0);
if (err)
return err;
@@ -214,62 +369,14 @@ brcmf_sdio_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
return 0;
}
-int
-brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
- void *data, bool write)
-{
- u8 func_num, reg_size;
- s32 retry = 0;
- int ret;
-
- /*
- * figure out how to read the register based on address range
- * 0x00 ~ 0x7FF: function 0 CCCR and FBR
- * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
- * The rest: function 1 silicon backplane core registers
- */
- if ((addr & ~REG_F0_REG_MASK) == 0) {
- func_num = SDIO_FUNC_0;
- reg_size = 1;
- } else if ((addr & ~REG_F1_MISC_MASK) == 0) {
- func_num = SDIO_FUNC_1;
- reg_size = 1;
- } else {
- func_num = SDIO_FUNC_1;
- reg_size = 4;
-
- ret = brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
- if (ret)
- goto done;
- }
-
- do {
- if (!write)
- memset(data, 0, reg_size);
- if (retry) /* wait for 1 ms till bus get settled down */
- usleep_range(1000, 2000);
- if (reg_size == 1)
- ret = brcmf_sdioh_request_byte(sdiodev, write,
- func_num, addr, data);
- else
- ret = brcmf_sdioh_request_word(sdiodev, write,
- func_num, addr, data, 4);
- } while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
-
-done:
- if (ret != 0)
- brcmf_err("failed with %d\n", ret);
-
- return ret;
-}
-
-u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
{
u8 data;
int retval;
brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
- retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
+ retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
+ false);
brcmf_dbg(SDIO, "data:0x%02x\n", data);
if (ret)
@@ -278,52 +385,63 @@ u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
return data;
}
-u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
{
u32 data;
int retval;
brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
- retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
+ retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
+ if (retval)
+ goto done;
+ retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
+ false);
brcmf_dbg(SDIO, "data:0x%08x\n", data);
+done:
if (ret)
*ret = retval;
return data;
}
-void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
+void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
u8 data, int *ret)
{
int retval;
brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
- retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
-
+ retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
+ true);
if (ret)
*ret = retval;
}
-void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
+void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
u32 data, int *ret)
{
int retval;
brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
- retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
+ retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
+ if (retval)
+ goto done;
+ retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
+ true);
+done:
if (ret)
*ret = retval;
}
-static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
+static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
bool write, u32 addr, struct sk_buff *pkt)
{
unsigned int req_sz;
+ int err;
- brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
- if (brcmf_pm_resume_error(sdiodev))
+ brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
+ if (brcmf_sdiod_pm_resume_error(sdiodev))
return -EIO;
/* Single skb use the standard mmc interface */
@@ -331,22 +449,22 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
req_sz &= (uint)~3;
if (write)
- return sdio_memcpy_toio(sdiodev->func[fn], addr,
- ((u8 *)(pkt->data)),
- req_sz);
+ err = sdio_memcpy_toio(sdiodev->func[fn], addr,
+ ((u8 *)(pkt->data)), req_sz);
else if (fn == 1)
- return sdio_memcpy_fromio(sdiodev->func[fn],
- ((u8 *)(pkt->data)),
- addr, req_sz);
+ err = sdio_memcpy_fromio(sdiodev->func[fn], ((u8 *)(pkt->data)),
+ addr, req_sz);
else
/* function 2 read is FIFO operation */
- return sdio_readsb(sdiodev->func[fn],
- ((u8 *)(pkt->data)), addr,
- req_sz);
+ err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
+ req_sz);
+ if (err == -ENOMEDIUM)
+ brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
+ return err;
}
/**
- * brcmf_sdio_sglist_rw - SDIO interface function for block data access
+ * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
* @sdiodev: brcmfmac sdio device
* @fn: SDIO function number
* @write: direction flag
@@ -357,9 +475,9 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
* stack for block data access. It assumes that the skb passed down by the
* caller has already been padded and aligned.
*/
-static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
- bool write, u32 addr,
- struct sk_buff_head *pktlist)
+static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
+ bool write, u32 addr,
+ struct sk_buff_head *pktlist)
{
unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
unsigned int max_req_sz, orig_offset, dst_offset;
@@ -377,8 +495,8 @@ static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
if (!pktlist->qlen)
return -EINVAL;
- brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
- if (brcmf_pm_resume_error(sdiodev))
+ brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
+ if (brcmf_sdiod_pm_resume_error(sdiodev))
return -EIO;
target_list = pktlist;
@@ -485,7 +603,11 @@ static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
- if (ret != 0) {
+ if (ret == -ENOMEDIUM) {
+ brcmf_bus_change_state(sdiodev->bus_if,
+ BRCMF_BUS_NOMEDIUM);
+ break;
+ } else if (ret != 0) {
brcmf_err("CMD53 sg block %s failed %d\n",
write ? "write" : "read", ret);
ret = -EIO;
@@ -525,9 +647,7 @@ exit:
return ret;
}
-int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes)
+int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
{
struct sk_buff *mypkt;
int err;
@@ -539,7 +659,7 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
return -EIO;
}
- err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt);
+ err = brcmf_sdiod_recv_pkt(sdiodev, mypkt);
if (!err)
memcpy(buf, mypkt->data, nbytes);
@@ -547,50 +667,47 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
return err;
}
-int
-brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff *pkt)
+int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
{
- uint width;
+ u32 addr = sdiodev->sbwad;
int err = 0;
- brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
- fn, addr, pkt->len);
+ brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
- width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+ err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
if (err)
goto done;
- err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pkt);
+ err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, pkt);
done:
return err;
}
-int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq, uint totlen)
+int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
+ struct sk_buff_head *pktq, uint totlen)
{
struct sk_buff *glom_skb;
struct sk_buff *skb;
- uint width;
+ u32 addr = sdiodev->sbwad;
int err = 0;
- brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
- fn, addr, pktq->qlen);
+ brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
+ addr, pktq->qlen);
- width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+ err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
if (err)
goto done;
if (pktq->qlen == 1)
- err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq->next);
+ err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
+ pktq->next);
else if (!sdiodev->sg_support) {
glom_skb = brcmu_pkt_buf_get_skb(totlen);
if (!glom_skb)
return -ENOMEM;
- err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, glom_skb);
+ err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
+ glom_skb);
if (err)
goto done;
@@ -599,18 +716,17 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
skb_pull(glom_skb, skb->len);
}
} else
- err = brcmf_sdio_sglist_rw(sdiodev, fn, false, addr, pktq);
+ err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, false, addr,
+ pktq);
done:
return err;
}
-int
-brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes)
+int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
{
struct sk_buff *mypkt;
- uint width;
+ u32 addr = sdiodev->sbwad;
int err;
mypkt = brcmu_pkt_buf_get_skb(nbytes);
@@ -622,48 +738,47 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
memcpy(mypkt->data, buf, nbytes);
- width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+ err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
if (!err)
- err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, mypkt);
+ err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, addr,
+ mypkt);
brcmu_pkt_buf_free_skb(mypkt);
return err;
}
-int
-brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq)
+int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
+ struct sk_buff_head *pktq)
{
struct sk_buff *skb;
- uint width;
+ u32 addr = sdiodev->sbwad;
int err;
- brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
- fn, addr, pktq->qlen);
+ brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
- width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+ err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
if (err)
return err;
if (pktq->qlen == 1 || !sdiodev->sg_support)
skb_queue_walk(pktq, skb) {
- err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, skb);
+ err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true,
+ addr, skb);
if (err)
break;
}
else
- err = brcmf_sdio_sglist_rw(sdiodev, fn, true, addr, pktq);
+ err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr,
+ pktq);
return err;
}
int
-brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
- u8 *data, uint size)
+brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
+ u8 *data, uint size)
{
int bcmerror = 0;
struct sk_buff *pkt;
@@ -690,7 +805,7 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
/* Do the transfer(s) */
while (size) {
/* Set the backplane window to include the start address */
- bcmerror = brcmf_sdcard_set_sbaddr_window(sdiodev, address);
+ bcmerror = brcmf_sdiod_set_sbaddr_window(sdiodev, address);
if (bcmerror)
break;
@@ -704,8 +819,8 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
skb_put(pkt, dsize);
if (write)
memcpy(pkt->data, data, dsize);
- bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
- sdaddr, pkt);
+ bcmerror = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_1, write,
+ sdaddr, pkt);
if (bcmerror) {
brcmf_err("membytes transfer failed\n");
break;
@@ -727,7 +842,7 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
dev_kfree_skb(pkt);
/* Return the window to backplane enumeration space for core access */
- if (brcmf_sdcard_set_sbaddr_window(sdiodev, sdiodev->sbwad))
+ if (brcmf_sdiod_set_sbaddr_window(sdiodev, sdiodev->sbwad))
brcmf_err("FAILED to set window back to 0x%x\n",
sdiodev->sbwad);
@@ -736,67 +851,335 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
return bcmerror;
}
-int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
{
char t_func = (char)fn;
brcmf_dbg(SDIO, "Enter\n");
/* issue abort cmd52 command through F0 */
- brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
- SDIO_CCCR_ABORT, &t_func);
+ brcmf_sdiod_request_data(sdiodev, SDIO_FUNC_0, SDIO_CCCR_ABORT,
+ sizeof(t_func), &t_func, true);
brcmf_dbg(SDIO, "Exit\n");
return 0;
}
-int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
+static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
{
- u32 regs = 0;
+ if (sdiodev->bus) {
+ brcmf_sdio_remove(sdiodev->bus);
+ sdiodev->bus = NULL;
+ }
+
+ /* Disable Function 2 */
+ sdio_claim_host(sdiodev->func[2]);
+ sdio_disable_func(sdiodev->func[2]);
+ sdio_release_host(sdiodev->func[2]);
+
+ /* Disable Function 1 */
+ sdio_claim_host(sdiodev->func[1]);
+ sdio_disable_func(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func[1]);
+
+ sdiodev->sbwad = 0;
+
+ return 0;
+}
+
+static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
+{
+ struct sdio_func *func;
+ struct mmc_host *host;
+ uint max_blocks;
int ret = 0;
- ret = brcmf_sdioh_attach(sdiodev);
- if (ret)
+ sdiodev->num_funcs = 2;
+
+ sdio_claim_host(sdiodev->func[1]);
+
+ ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
+ if (ret) {
+ brcmf_err("Failed to set F1 blocksize\n");
+ sdio_release_host(sdiodev->func[1]);
+ goto out;
+ }
+ ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
+ if (ret) {
+ brcmf_err("Failed to set F2 blocksize\n");
+ sdio_release_host(sdiodev->func[1]);
+ goto out;
+ }
+
+ /* increase F2 timeout */
+ sdiodev->func[2]->enable_timeout = SDIO_WAIT_F2RDY;
+
+ /* Enable Function 1 */
+ ret = sdio_enable_func(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func[1]);
+ if (ret) {
+ brcmf_err("Failed to enable F1: err=%d\n", ret);
goto out;
+ }
- regs = SI_ENUM_BASE;
+ /*
+ * determine host related variables after brcmf_sdiod_probe()
+ * as func->cur_blksize is properly set and F2 init has been
+ * completed successfully.
+ */
+ func = sdiodev->func[2];
+ host = func->card->host;
+ sdiodev->sg_support = host->max_segs > 1;
+ max_blocks = min_t(uint, host->max_blk_count, 511u);
+ sdiodev->max_request_size = min_t(uint, host->max_req_size,
+ max_blocks * func->cur_blksize);
+ sdiodev->max_segment_count = min_t(uint, host->max_segs,
+ SG_MAX_SINGLE_ALLOC);
+ sdiodev->max_segment_size = host->max_seg_size;
/* try to attach to the target device */
- sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
+ sdiodev->bus = brcmf_sdio_probe(sdiodev);
if (!sdiodev->bus) {
- brcmf_err("device attach failed\n");
ret = -ENODEV;
goto out;
}
out:
if (ret)
- brcmf_sdio_remove(sdiodev);
+ brcmf_sdiod_remove(sdiodev);
return ret;
}
-EXPORT_SYMBOL(brcmf_sdio_probe);
-int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev)
+/* devices we support, null terminated */
+static const struct sdio_device_id brcmf_sdmmc_ids[] = {
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43143)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43362)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
+ SDIO_DEVICE_ID_BROADCOM_4335_4339)},
+ { /* end: all zeroes */ },
+};
+MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
+
+static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
+
+
+static int brcmf_ops_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
{
- sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+ int err;
+ struct brcmf_sdio_dev *sdiodev;
+ struct brcmf_bus *bus_if;
- if (sdiodev->bus) {
- brcmf_sdbrcm_disconnect(sdiodev->bus);
- sdiodev->bus = NULL;
+ brcmf_dbg(SDIO, "Enter\n");
+ brcmf_dbg(SDIO, "Class=%x\n", func->class);
+ brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
+ brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
+ brcmf_dbg(SDIO, "Function#: %d\n", func->num);
+
+ /* Consume func num 1 but dont do anything with it. */
+ if (func->num == 1)
+ return 0;
+
+ /* Ignore anything but func 2 */
+ if (func->num != 2)
+ return -ENODEV;
+
+ bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
+ if (!bus_if)
+ return -ENOMEM;
+ sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
+ if (!sdiodev) {
+ kfree(bus_if);
+ return -ENOMEM;
}
- brcmf_sdioh_detach(sdiodev);
+ /* store refs to functions used. mmc_card does
+ * not hold the F0 function pointer.
+ */
+ sdiodev->func[0] = kmemdup(func, sizeof(*func), GFP_KERNEL);
+ sdiodev->func[0]->num = 0;
+ sdiodev->func[1] = func->card->sdio_func[0];
+ sdiodev->func[2] = func;
+
+ sdiodev->bus_if = bus_if;
+ bus_if->bus_priv.sdio = sdiodev;
+ bus_if->proto_type = BRCMF_PROTO_BCDC;
+ dev_set_drvdata(&func->dev, bus_if);
+ dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
+ sdiodev->dev = &sdiodev->func[1]->dev;
+ sdiodev->pdata = brcmfmac_sdio_pdata;
+
+ atomic_set(&sdiodev->suspend, false);
+ init_waitqueue_head(&sdiodev->request_word_wait);
+ init_waitqueue_head(&sdiodev->request_buffer_wait);
+
+ brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
+ err = brcmf_sdiod_probe(sdiodev);
+ if (err) {
+ brcmf_err("F2 error, probe failed %d...\n", err);
+ goto fail;
+ }
- sdiodev->sbwad = 0;
+ brcmf_dbg(SDIO, "F2 init completed...\n");
+ return 0;
+
+fail:
+ dev_set_drvdata(&func->dev, NULL);
+ dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
+ kfree(sdiodev->func[0]);
+ kfree(sdiodev);
+ kfree(bus_if);
+ return err;
+}
+
+static void brcmf_ops_sdio_remove(struct sdio_func *func)
+{
+ struct brcmf_bus *bus_if;
+ struct brcmf_sdio_dev *sdiodev;
+
+ brcmf_dbg(SDIO, "Enter\n");
+ brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
+ brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
+ brcmf_dbg(SDIO, "Function: %d\n", func->num);
+
+ if (func->num != 1 && func->num != 2)
+ return;
+
+ bus_if = dev_get_drvdata(&func->dev);
+ if (bus_if) {
+ sdiodev = bus_if->bus_priv.sdio;
+ brcmf_sdiod_remove(sdiodev);
+
+ dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
+ dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
+
+ kfree(bus_if);
+ kfree(sdiodev->func[0]);
+ kfree(sdiodev);
+ }
+
+ brcmf_dbg(SDIO, "Exit\n");
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int brcmf_ops_sdio_suspend(struct device *dev)
+{
+ mmc_pm_flag_t sdio_flags;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ int ret = 0;
+
+ brcmf_dbg(SDIO, "\n");
+
+ atomic_set(&sdiodev->suspend, true);
+
+ sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
+ if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
+ brcmf_err("Host can't keep power while suspended\n");
+ return -EINVAL;
+ }
+
+ ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
+ if (ret) {
+ brcmf_err("Failed to set pm_flags\n");
+ return ret;
+ }
+
+ brcmf_sdio_wd_timer(sdiodev->bus, 0);
+
+ return ret;
+}
+
+static int brcmf_ops_sdio_resume(struct device *dev)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+
+ brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
+ atomic_set(&sdiodev->suspend, false);
+ return 0;
+}
+
+static const struct dev_pm_ops brcmf_sdio_pm_ops = {
+ .suspend = brcmf_ops_sdio_suspend,
+ .resume = brcmf_ops_sdio_resume,
+};
+#endif /* CONFIG_PM_SLEEP */
+
+static struct sdio_driver brcmf_sdmmc_driver = {
+ .probe = brcmf_ops_sdio_probe,
+ .remove = brcmf_ops_sdio_remove,
+ .name = BRCMFMAC_SDIO_PDATA_NAME,
+ .id_table = brcmf_sdmmc_ids,
+#ifdef CONFIG_PM_SLEEP
+ .drv = {
+ .pm = &brcmf_sdio_pm_ops,
+ },
+#endif /* CONFIG_PM_SLEEP */
+};
+
+static int brcmf_sdio_pd_probe(struct platform_device *pdev)
+{
+ brcmf_dbg(SDIO, "Enter\n");
+
+ brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
+
+ if (brcmfmac_sdio_pdata->power_on)
+ brcmfmac_sdio_pdata->power_on();
return 0;
}
-EXPORT_SYMBOL(brcmf_sdio_remove);
-void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable)
+static int brcmf_sdio_pd_remove(struct platform_device *pdev)
{
- if (enable)
- brcmf_sdbrcm_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
+ brcmf_dbg(SDIO, "Enter\n");
+
+ if (brcmfmac_sdio_pdata->power_off)
+ brcmfmac_sdio_pdata->power_off();
+
+ sdio_unregister_driver(&brcmf_sdmmc_driver);
+
+ return 0;
+}
+
+static struct platform_driver brcmf_sdio_pd = {
+ .remove = brcmf_sdio_pd_remove,
+ .driver = {
+ .name = BRCMFMAC_SDIO_PDATA_NAME,
+ .owner = THIS_MODULE,
+ }
+};
+
+void brcmf_sdio_register(void)
+{
+ int ret;
+
+ ret = sdio_register_driver(&brcmf_sdmmc_driver);
+ if (ret)
+ brcmf_err("sdio_register_driver failed: %d\n", ret);
+}
+
+void brcmf_sdio_exit(void)
+{
+ brcmf_dbg(SDIO, "Enter\n");
+
+ if (brcmfmac_sdio_pdata)
+ platform_driver_unregister(&brcmf_sdio_pd);
else
- brcmf_sdbrcm_wd_timer(sdiodev->bus, 0);
+ sdio_unregister_driver(&brcmf_sdmmc_driver);
+}
+
+void __init brcmf_sdio_init(void)
+{
+ int ret;
+
+ brcmf_dbg(SDIO, "Enter\n");
+
+ ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
+ if (ret == -ENODEV)
+ brcmf_dbg(SDIO, "No platform data available.\n");
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
deleted file mode 100644
index abc9ceca70f3..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ /dev/null
@@ -1,539 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/mmc/sdio.h>
-#include <linux/mmc/core.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
-#include <linux/suspend.h>
-#include <linux/errno.h>
-#include <linux/sched.h> /* request_irq() */
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/brcmfmac-sdio.h>
-#include <net/cfg80211.h>
-
-#include <defs.h>
-#include <brcm_hw_ids.h>
-#include <brcmu_utils.h>
-#include <brcmu_wifi.h>
-#include "sdio_host.h"
-#include "sdio_chip.h"
-#include "dhd_dbg.h"
-#include "dhd_bus.h"
-
-#define SDIO_VENDOR_ID_BROADCOM 0x02d0
-
-#define DMA_ALIGN_MASK 0x03
-
-#define SDIO_FUNC1_BLOCKSIZE 64
-#define SDIO_FUNC2_BLOCKSIZE 512
-
-/* devices we support, null terminated */
-static const struct sdio_device_id brcmf_sdmmc_ids[] = {
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43143)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
- SDIO_DEVICE_ID_BROADCOM_4335_4339)},
- { /* end: all zeroes */ },
-};
-MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
-
-static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
-
-
-bool
-brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
-{
- bool is_err = false;
-#ifdef CONFIG_PM_SLEEP
- is_err = atomic_read(&sdiodev->suspend);
-#endif
- return is_err;
-}
-
-void
-brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev, wait_queue_head_t *wq)
-{
-#ifdef CONFIG_PM_SLEEP
- int retry = 0;
- while (atomic_read(&sdiodev->suspend) && retry++ != 30)
- wait_event_timeout(*wq, false, HZ/100);
-#endif
-}
-
-static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
- uint regaddr, u8 *byte)
-{
- struct sdio_func *sdfunc = sdiodev->func[0];
- int err_ret;
-
- /*
- * Can only directly write to some F0 registers.
- * Handle F2 enable/disable and Abort command
- * as a special case.
- */
- if (regaddr == SDIO_CCCR_IOEx) {
- sdfunc = sdiodev->func[2];
- if (sdfunc) {
- if (*byte & SDIO_FUNC_ENABLE_2) {
- /* Enable Function 2 */
- err_ret = sdio_enable_func(sdfunc);
- if (err_ret)
- brcmf_err("enable F2 failed:%d\n",
- err_ret);
- } else {
- /* Disable Function 2 */
- err_ret = sdio_disable_func(sdfunc);
- if (err_ret)
- brcmf_err("Disable F2 failed:%d\n",
- err_ret);
- }
- } else {
- err_ret = -ENOENT;
- }
- } else if ((regaddr == SDIO_CCCR_ABORT) ||
- (regaddr == SDIO_CCCR_IENx)) {
- sdfunc = kmemdup(sdiodev->func[0], sizeof(struct sdio_func),
- GFP_KERNEL);
- if (!sdfunc)
- return -ENOMEM;
- sdfunc->num = 0;
- sdio_writeb(sdfunc, *byte, regaddr, &err_ret);
- kfree(sdfunc);
- } else if (regaddr < 0xF0) {
- brcmf_err("F0 Wr:0x%02x: write disallowed\n", regaddr);
- err_ret = -EPERM;
- } else {
- sdio_f0_writeb(sdfunc, *byte, regaddr, &err_ret);
- }
-
- return err_ret;
-}
-
-int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
- uint regaddr, u8 *byte)
-{
- int err_ret;
-
- brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x\n", rw, func, regaddr);
-
- brcmf_pm_resume_wait(sdiodev, &sdiodev->request_byte_wait);
- if (brcmf_pm_resume_error(sdiodev))
- return -EIO;
-
- if (rw && func == 0) {
- /* handle F0 separately */
- err_ret = brcmf_sdioh_f0_write_byte(sdiodev, regaddr, byte);
- } else {
- if (rw) /* CMD52 Write */
- sdio_writeb(sdiodev->func[func], *byte, regaddr,
- &err_ret);
- else if (func == 0) {
- *byte = sdio_f0_readb(sdiodev->func[func], regaddr,
- &err_ret);
- } else {
- *byte = sdio_readb(sdiodev->func[func], regaddr,
- &err_ret);
- }
- }
-
- if (err_ret)
- brcmf_err("Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
- rw ? "write" : "read", func, regaddr, *byte, err_ret);
-
- return err_ret;
-}
-
-int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
- uint rw, uint func, uint addr, u32 *word,
- uint nbytes)
-{
- int err_ret = -EIO;
-
- if (func == 0) {
- brcmf_err("Only CMD52 allowed to F0\n");
- return -EINVAL;
- }
-
- brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
- rw, func, addr, nbytes);
-
- brcmf_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
- if (brcmf_pm_resume_error(sdiodev))
- return -EIO;
-
- if (rw) { /* CMD52 Write */
- if (nbytes == 4)
- sdio_writel(sdiodev->func[func], *word, addr,
- &err_ret);
- else if (nbytes == 2)
- sdio_writew(sdiodev->func[func], (*word & 0xFFFF),
- addr, &err_ret);
- else
- brcmf_err("Invalid nbytes: %d\n", nbytes);
- } else { /* CMD52 Read */
- if (nbytes == 4)
- *word = sdio_readl(sdiodev->func[func], addr, &err_ret);
- else if (nbytes == 2)
- *word = sdio_readw(sdiodev->func[func], addr,
- &err_ret) & 0xFFFF;
- else
- brcmf_err("Invalid nbytes: %d\n", nbytes);
- }
-
- if (err_ret)
- brcmf_err("Failed to %s word, Err: 0x%08x\n",
- rw ? "write" : "read", err_ret);
-
- return err_ret;
-}
-
-static int brcmf_sdioh_get_cisaddr(struct brcmf_sdio_dev *sdiodev, u32 regaddr)
-{
- /* read 24 bits and return valid 17 bit addr */
- int i, ret;
- u32 scratch, regdata;
- __le32 scratch_le;
- u8 *ptr = (u8 *)&scratch_le;
-
- for (i = 0; i < 3; i++) {
- regdata = brcmf_sdio_regrl(sdiodev, regaddr, &ret);
- if (ret != 0)
- brcmf_err("Can't read!\n");
-
- *ptr++ = (u8) regdata;
- regaddr++;
- }
-
- /* Only the lower 17-bits are valid */
- scratch = le32_to_cpu(scratch_le);
- scratch &= 0x0001FFFF;
- return scratch;
-}
-
-static int brcmf_sdioh_enablefuncs(struct brcmf_sdio_dev *sdiodev)
-{
- int err_ret;
- u32 fbraddr;
- u8 func;
-
- brcmf_dbg(SDIO, "\n");
-
- /* Get the Card's common CIS address */
- sdiodev->func_cis_ptr[0] = brcmf_sdioh_get_cisaddr(sdiodev,
- SDIO_CCCR_CIS);
- brcmf_dbg(SDIO, "Card's Common CIS Ptr = 0x%x\n",
- sdiodev->func_cis_ptr[0]);
-
- /* Get the Card's function CIS (for each function) */
- for (fbraddr = SDIO_FBR_BASE(1), func = 1;
- func <= sdiodev->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
- sdiodev->func_cis_ptr[func] =
- brcmf_sdioh_get_cisaddr(sdiodev, SDIO_FBR_CIS + fbraddr);
- brcmf_dbg(SDIO, "Function %d CIS Ptr = 0x%x\n",
- func, sdiodev->func_cis_ptr[func]);
- }
-
- /* Enable Function 1 */
- err_ret = sdio_enable_func(sdiodev->func[1]);
- if (err_ret)
- brcmf_err("Failed to enable F1 Err: 0x%08x\n", err_ret);
-
- return false;
-}
-
-/*
- * Public entry points & extern's
- */
-int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev)
-{
- int err_ret = 0;
-
- brcmf_dbg(SDIO, "\n");
-
- sdiodev->num_funcs = 2;
-
- sdio_claim_host(sdiodev->func[1]);
-
- err_ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
- if (err_ret) {
- brcmf_err("Failed to set F1 blocksize\n");
- goto out;
- }
-
- err_ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
- if (err_ret) {
- brcmf_err("Failed to set F2 blocksize\n");
- goto out;
- }
-
- brcmf_sdioh_enablefuncs(sdiodev);
-
-out:
- sdio_release_host(sdiodev->func[1]);
- brcmf_dbg(SDIO, "Done\n");
- return err_ret;
-}
-
-void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev)
-{
- brcmf_dbg(SDIO, "\n");
-
- /* Disable Function 2 */
- sdio_claim_host(sdiodev->func[2]);
- sdio_disable_func(sdiodev->func[2]);
- sdio_release_host(sdiodev->func[2]);
-
- /* Disable Function 1 */
- sdio_claim_host(sdiodev->func[1]);
- sdio_disable_func(sdiodev->func[1]);
- sdio_release_host(sdiodev->func[1]);
-
-}
-
-static int brcmf_ops_sdio_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
-{
- int err;
- struct brcmf_sdio_dev *sdiodev;
- struct brcmf_bus *bus_if;
- struct mmc_host *host;
- uint max_blocks;
-
- brcmf_dbg(SDIO, "Enter\n");
- brcmf_dbg(SDIO, "Class=%x\n", func->class);
- brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
- brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
- brcmf_dbg(SDIO, "Function#: %d\n", func->num);
-
- /* Consume func num 1 but dont do anything with it. */
- if (func->num == 1)
- return 0;
-
- /* Ignore anything but func 2 */
- if (func->num != 2)
- return -ENODEV;
-
- bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
- if (!bus_if)
- return -ENOMEM;
- sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
- if (!sdiodev) {
- kfree(bus_if);
- return -ENOMEM;
- }
-
- sdiodev->func[0] = func->card->sdio_func[0];
- sdiodev->func[1] = func->card->sdio_func[0];
- sdiodev->func[2] = func;
-
- sdiodev->bus_if = bus_if;
- bus_if->bus_priv.sdio = sdiodev;
- dev_set_drvdata(&func->dev, bus_if);
- dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
- sdiodev->dev = &sdiodev->func[1]->dev;
- sdiodev->pdata = brcmfmac_sdio_pdata;
-
- atomic_set(&sdiodev->suspend, false);
- init_waitqueue_head(&sdiodev->request_byte_wait);
- init_waitqueue_head(&sdiodev->request_word_wait);
- init_waitqueue_head(&sdiodev->request_buffer_wait);
-
- brcmf_dbg(SDIO, "F2 found, calling brcmf_sdio_probe...\n");
- err = brcmf_sdio_probe(sdiodev);
- if (err) {
- brcmf_err("F2 error, probe failed %d...\n", err);
- goto fail;
- }
-
- /*
- * determine host related variables after brcmf_sdio_probe()
- * as func->cur_blksize is properly set and F2 init has been
- * completed successfully.
- */
- host = func->card->host;
- sdiodev->sg_support = host->max_segs > 1;
- max_blocks = min_t(uint, host->max_blk_count, 511u);
- sdiodev->max_request_size = min_t(uint, host->max_req_size,
- max_blocks * func->cur_blksize);
- sdiodev->max_segment_count = min_t(uint, host->max_segs,
- SG_MAX_SINGLE_ALLOC);
- sdiodev->max_segment_size = host->max_seg_size;
- brcmf_dbg(SDIO, "F2 init completed...\n");
- return 0;
-
-fail:
- dev_set_drvdata(&func->dev, NULL);
- dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
- kfree(sdiodev);
- kfree(bus_if);
- return err;
-}
-
-static void brcmf_ops_sdio_remove(struct sdio_func *func)
-{
- struct brcmf_bus *bus_if;
- struct brcmf_sdio_dev *sdiodev;
-
- brcmf_dbg(SDIO, "Enter\n");
- brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
- brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
- brcmf_dbg(SDIO, "Function: %d\n", func->num);
-
- if (func->num != 1 && func->num != 2)
- return;
-
- bus_if = dev_get_drvdata(&func->dev);
- if (bus_if) {
- sdiodev = bus_if->bus_priv.sdio;
- brcmf_sdio_remove(sdiodev);
-
- dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
- dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
-
- kfree(bus_if);
- kfree(sdiodev);
- }
-
- brcmf_dbg(SDIO, "Exit\n");
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int brcmf_sdio_suspend(struct device *dev)
-{
- mmc_pm_flag_t sdio_flags;
- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
- int ret = 0;
-
- brcmf_dbg(SDIO, "\n");
-
- atomic_set(&sdiodev->suspend, true);
-
- sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
- if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
- brcmf_err("Host can't keep power while suspended\n");
- return -EINVAL;
- }
-
- ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
- if (ret) {
- brcmf_err("Failed to set pm_flags\n");
- return ret;
- }
-
- brcmf_sdio_wdtmr_enable(sdiodev, false);
-
- return ret;
-}
-
-static int brcmf_sdio_resume(struct device *dev)
-{
- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
-
- brcmf_sdio_wdtmr_enable(sdiodev, true);
- atomic_set(&sdiodev->suspend, false);
- return 0;
-}
-
-static const struct dev_pm_ops brcmf_sdio_pm_ops = {
- .suspend = brcmf_sdio_suspend,
- .resume = brcmf_sdio_resume,
-};
-#endif /* CONFIG_PM_SLEEP */
-
-static struct sdio_driver brcmf_sdmmc_driver = {
- .probe = brcmf_ops_sdio_probe,
- .remove = brcmf_ops_sdio_remove,
- .name = BRCMFMAC_SDIO_PDATA_NAME,
- .id_table = brcmf_sdmmc_ids,
-#ifdef CONFIG_PM_SLEEP
- .drv = {
- .pm = &brcmf_sdio_pm_ops,
- },
-#endif /* CONFIG_PM_SLEEP */
-};
-
-static int brcmf_sdio_pd_probe(struct platform_device *pdev)
-{
- brcmf_dbg(SDIO, "Enter\n");
-
- brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
-
- if (brcmfmac_sdio_pdata->power_on)
- brcmfmac_sdio_pdata->power_on();
-
- return 0;
-}
-
-static int brcmf_sdio_pd_remove(struct platform_device *pdev)
-{
- brcmf_dbg(SDIO, "Enter\n");
-
- if (brcmfmac_sdio_pdata->power_off)
- brcmfmac_sdio_pdata->power_off();
-
- sdio_unregister_driver(&brcmf_sdmmc_driver);
-
- return 0;
-}
-
-static struct platform_driver brcmf_sdio_pd = {
- .remove = brcmf_sdio_pd_remove,
- .driver = {
- .name = BRCMFMAC_SDIO_PDATA_NAME,
- .owner = THIS_MODULE,
- }
-};
-
-void brcmf_sdio_register(void)
-{
- int ret;
-
- ret = sdio_register_driver(&brcmf_sdmmc_driver);
- if (ret)
- brcmf_err("sdio_register_driver failed: %d\n", ret);
-}
-
-void brcmf_sdio_exit(void)
-{
- brcmf_dbg(SDIO, "Enter\n");
-
- if (brcmfmac_sdio_pdata)
- platform_driver_unregister(&brcmf_sdio_pd);
- else
- sdio_unregister_driver(&brcmf_sdmmc_driver);
-}
-
-void __init brcmf_sdio_init(void)
-{
- int ret;
-
- brcmf_dbg(SDIO, "Enter\n");
-
- ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
- if (ret == -ENODEV)
- brcmf_dbg(SDIO, "No platform data available.\n");
-}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 899a2ada5b82..939d6b132922 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -21,481 +21,33 @@
#ifndef _BRCMF_H_
#define _BRCMF_H_
-#define BRCMF_VERSION_STR "4.218.248.5"
-
#include "fweh.h"
-/*******************************************************************************
- * IO codes that are interpreted by dongle firmware
- ******************************************************************************/
-#define BRCMF_C_GET_VERSION 1
-#define BRCMF_C_UP 2
-#define BRCMF_C_DOWN 3
-#define BRCMF_C_SET_PROMISC 10
-#define BRCMF_C_GET_RATE 12
-#define BRCMF_C_GET_INFRA 19
-#define BRCMF_C_SET_INFRA 20
-#define BRCMF_C_GET_AUTH 21
-#define BRCMF_C_SET_AUTH 22
-#define BRCMF_C_GET_BSSID 23
-#define BRCMF_C_GET_SSID 25
-#define BRCMF_C_SET_SSID 26
-#define BRCMF_C_TERMINATED 28
-#define BRCMF_C_GET_CHANNEL 29
-#define BRCMF_C_SET_CHANNEL 30
-#define BRCMF_C_GET_SRL 31
-#define BRCMF_C_SET_SRL 32
-#define BRCMF_C_GET_LRL 33
-#define BRCMF_C_SET_LRL 34
-#define BRCMF_C_GET_RADIO 37
-#define BRCMF_C_SET_RADIO 38
-#define BRCMF_C_GET_PHYTYPE 39
-#define BRCMF_C_SET_KEY 45
-#define BRCMF_C_SET_PASSIVE_SCAN 49
-#define BRCMF_C_SCAN 50
-#define BRCMF_C_SCAN_RESULTS 51
-#define BRCMF_C_DISASSOC 52
-#define BRCMF_C_REASSOC 53
-#define BRCMF_C_SET_ROAM_TRIGGER 55
-#define BRCMF_C_SET_ROAM_DELTA 57
-#define BRCMF_C_GET_BCNPRD 75
-#define BRCMF_C_SET_BCNPRD 76
-#define BRCMF_C_GET_DTIMPRD 77
-#define BRCMF_C_SET_DTIMPRD 78
-#define BRCMF_C_SET_COUNTRY 84
-#define BRCMF_C_GET_PM 85
-#define BRCMF_C_SET_PM 86
-#define BRCMF_C_GET_CURR_RATESET 114
-#define BRCMF_C_GET_AP 117
-#define BRCMF_C_SET_AP 118
-#define BRCMF_C_GET_RSSI 127
-#define BRCMF_C_GET_WSEC 133
-#define BRCMF_C_SET_WSEC 134
-#define BRCMF_C_GET_PHY_NOISE 135
-#define BRCMF_C_GET_BSS_INFO 136
-#define BRCMF_C_GET_BANDLIST 140
-#define BRCMF_C_SET_SCB_TIMEOUT 158
-#define BRCMF_C_GET_PHYLIST 180
-#define BRCMF_C_SET_SCAN_CHANNEL_TIME 185
-#define BRCMF_C_SET_SCAN_UNASSOC_TIME 187
-#define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON 201
-#define BRCMF_C_GET_VALID_CHANNELS 217
-#define BRCMF_C_GET_KEY_PRIMARY 235
-#define BRCMF_C_SET_KEY_PRIMARY 236
-#define BRCMF_C_SET_SCAN_PASSIVE_TIME 258
-#define BRCMF_C_GET_VAR 262
-#define BRCMF_C_SET_VAR 263
-
-/* phy types (returned by WLC_GET_PHYTPE) */
-#define WLC_PHY_TYPE_A 0
-#define WLC_PHY_TYPE_B 1
-#define WLC_PHY_TYPE_G 2
-#define WLC_PHY_TYPE_N 4
-#define WLC_PHY_TYPE_LP 5
-#define WLC_PHY_TYPE_SSN 6
-#define WLC_PHY_TYPE_HT 7
-#define WLC_PHY_TYPE_LCN 8
-#define WLC_PHY_TYPE_NULL 0xf
-
#define TOE_TX_CSUM_OL 0x00000001
#define TOE_RX_CSUM_OL 0x00000002
-#define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */
-
-/* size of brcmf_scan_params not including variable length array */
-#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
-
-/* masks for channel and ssid count */
-#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
-#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
-
-/* primary (ie tx) key */
-#define BRCMF_PRIMARY_KEY (1 << 1)
-
/* For supporting multiple interfaces */
#define BRCMF_MAX_IFS 16
-#define DOT11_BSSTYPE_ANY 2
#define DOT11_MAX_DEFAULT_KEYS 4
-#define BRCMF_ESCAN_REQ_VERSION 1
-
-#define WLC_BSS_RSSI_ON_CHANNEL 0x0002
-
-#define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */
-#define BRCMF_STA_ASSOC 0x10 /* Associated */
-
-#define BRCMF_E_STATUS_SUCCESS 0
-#define BRCMF_E_STATUS_FAIL 1
-#define BRCMF_E_STATUS_TIMEOUT 2
-#define BRCMF_E_STATUS_NO_NETWORKS 3
-#define BRCMF_E_STATUS_ABORT 4
-#define BRCMF_E_STATUS_NO_ACK 5
-#define BRCMF_E_STATUS_UNSOLICITED 6
-#define BRCMF_E_STATUS_ATTEMPT 7
-#define BRCMF_E_STATUS_PARTIAL 8
-#define BRCMF_E_STATUS_NEWSCAN 9
-#define BRCMF_E_STATUS_NEWASSOC 10
-#define BRCMF_E_STATUS_11HQUIET 11
-#define BRCMF_E_STATUS_SUPPRESS 12
-#define BRCMF_E_STATUS_NOCHANS 13
-#define BRCMF_E_STATUS_CS_ABORT 15
-#define BRCMF_E_STATUS_ERROR 16
-
-#define BRCMF_E_REASON_INITIAL_ASSOC 0
-#define BRCMF_E_REASON_LOW_RSSI 1
-#define BRCMF_E_REASON_DEAUTH 2
-#define BRCMF_E_REASON_DISASSOC 3
-#define BRCMF_E_REASON_BCNS_LOST 4
-#define BRCMF_E_REASON_MINTXRATE 9
-#define BRCMF_E_REASON_TXFAIL 10
-
-#define BRCMF_E_REASON_LINK_BSSCFG_DIS 4
-#define BRCMF_E_REASON_FAST_ROAM_FAILED 5
-#define BRCMF_E_REASON_DIRECTED_ROAM 6
-#define BRCMF_E_REASON_TSPEC_REJECTED 7
-#define BRCMF_E_REASON_BETTER_AP 8
-
-#define BRCMF_E_PRUNE_ENCR_MISMATCH 1
-#define BRCMF_E_PRUNE_BCAST_BSSID 2
-#define BRCMF_E_PRUNE_MAC_DENY 3
-#define BRCMF_E_PRUNE_MAC_NA 4
-#define BRCMF_E_PRUNE_REG_PASSV 5
-#define BRCMF_E_PRUNE_SPCT_MGMT 6
-#define BRCMF_E_PRUNE_RADAR 7
-#define BRCMF_E_RSN_MISMATCH 8
-#define BRCMF_E_PRUNE_NO_COMMON_RATES 9
-#define BRCMF_E_PRUNE_BASIC_RATES 10
-#define BRCMF_E_PRUNE_CIPHER_NA 12
-#define BRCMF_E_PRUNE_KNOWN_STA 13
-#define BRCMF_E_PRUNE_WDS_PEER 15
-#define BRCMF_E_PRUNE_QBSS_LOAD 16
-#define BRCMF_E_PRUNE_HOME_AP 17
-
-#define BRCMF_E_SUP_OTHER 0
-#define BRCMF_E_SUP_DECRYPT_KEY_DATA 1
-#define BRCMF_E_SUP_BAD_UCAST_WEP128 2
-#define BRCMF_E_SUP_BAD_UCAST_WEP40 3
-#define BRCMF_E_SUP_UNSUP_KEY_LEN 4
-#define BRCMF_E_SUP_PW_KEY_CIPHER 5
-#define BRCMF_E_SUP_MSG3_TOO_MANY_IE 6
-#define BRCMF_E_SUP_MSG3_IE_MISMATCH 7
-#define BRCMF_E_SUP_NO_INSTALL_FLAG 8
-#define BRCMF_E_SUP_MSG3_NO_GTK 9
-#define BRCMF_E_SUP_GRP_KEY_CIPHER 10
-#define BRCMF_E_SUP_GRP_MSG1_NO_GTK 11
-#define BRCMF_E_SUP_GTK_DECRYPT_FAIL 12
-#define BRCMF_E_SUP_SEND_FAIL 13
-#define BRCMF_E_SUP_DEAUTH 14
-
-#define BRCMF_E_IF_ADD 1
-#define BRCMF_E_IF_DEL 2
-#define BRCMF_E_IF_CHANGE 3
-
-#define BRCMF_E_IF_FLAG_NOIF 1
-
-#define BRCMF_E_IF_ROLE_STA 0
-#define BRCMF_E_IF_ROLE_AP 1
-#define BRCMF_E_IF_ROLE_WDS 2
-
-#define BRCMF_E_LINK_BCN_LOSS 1
-#define BRCMF_E_LINK_DISASSOC 2
-#define BRCMF_E_LINK_ASSOC_REC 3
-#define BRCMF_E_LINK_BSSCFG_DIS 4
-
/* Small, medium and maximum buffer size for dcmd
*/
#define BRCMF_DCMD_SMLEN 256
#define BRCMF_DCMD_MEDLEN 1536
#define BRCMF_DCMD_MAXLEN 8192
-#define BRCMF_AMPDU_RX_REORDER_MAXFLOWS 256
-
-/* Pattern matching filter. Specifies an offset within received packets to
- * start matching, the pattern to match, the size of the pattern, and a bitmask
- * that indicates which bits within the pattern should be matched.
- */
-struct brcmf_pkt_filter_pattern_le {
- /*
- * Offset within received packet to start pattern matching.
- * Offset '0' is the first byte of the ethernet header.
- */
- __le32 offset;
- /* Size of the pattern. Bitmask must be the same size.*/
- __le32 size_bytes;
- /*
- * Variable length mask and pattern data. mask starts at offset 0.
- * Pattern immediately follows mask.
- */
- u8 mask_and_pattern[1];
-};
-
-/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
-struct brcmf_pkt_filter_le {
- __le32 id; /* Unique filter id, specified by app. */
- __le32 type; /* Filter type (WL_PKT_FILTER_TYPE_xxx). */
- __le32 negate_match; /* Negate the result of filter matches */
- union { /* Filter definitions */
- struct brcmf_pkt_filter_pattern_le pattern; /* Filter pattern */
- } u;
-};
-
-/* IOVAR "pkt_filter_enable" parameter. */
-struct brcmf_pkt_filter_enable_le {
- __le32 id; /* Unique filter id */
- __le32 enable; /* Enable/disable bool */
-};
-
-/* BSS info structure
- * Applications MUST CHECK ie_offset field and length field to access IEs and
- * next bss_info structure in a vector (in struct brcmf_scan_results)
+/* IOCTL from host to device are limited in lenght. A device can only handle
+ * ethernet frame size. This limitation is to be applied by protocol layer.
*/
-struct brcmf_bss_info_le {
- __le32 version; /* version field */
- __le32 length; /* byte length of data in this record,
- * starting at version and including IEs
- */
- u8 BSSID[ETH_ALEN];
- __le16 beacon_period; /* units are Kusec */
- __le16 capability; /* Capability information */
- u8 SSID_len;
- u8 SSID[32];
- struct {
- __le32 count; /* # rates in this set */
- u8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
- } rateset; /* supported rates */
- __le16 chanspec; /* chanspec for bss */
- __le16 atim_window; /* units are Kusec */
- u8 dtim_period; /* DTIM period */
- __le16 RSSI; /* receive signal strength (in dBm) */
- s8 phy_noise; /* noise (in dBm) */
-
- u8 n_cap; /* BSS is 802.11N Capable */
- /* 802.11N BSS Capabilities (based on HT_CAP_*): */
- __le32 nbss_cap;
- u8 ctl_ch; /* 802.11N BSS control channel number */
- __le32 reserved32[1]; /* Reserved for expansion of BSS properties */
- u8 flags; /* flags */
- u8 reserved[3]; /* Reserved for expansion of BSS properties */
- u8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */
+#define BRCMF_TX_IOCTL_MAX_MSG_SIZE (ETH_FRAME_LEN+ETH_FCS_LEN)
- __le16 ie_offset; /* offset at which IEs start, from beginning */
- __le32 ie_length; /* byte length of Information Elements */
- __le16 SNR; /* average SNR of during frame reception */
- /* Add new fields here */
- /* variable length Information Elements */
-};
-
-struct brcm_rateset_le {
- /* # rates in this set */
- __le32 count;
- /* rates in 500kbps units w/hi bit set if basic */
- u8 rates[BRCMF_MAXRATES_IN_SET];
-};
-
-struct brcmf_ssid {
- u32 SSID_len;
- unsigned char SSID[32];
-};
-
-struct brcmf_ssid_le {
- __le32 SSID_len;
- unsigned char SSID[32];
-};
-
-struct brcmf_scan_params_le {
- struct brcmf_ssid_le ssid_le; /* default: {0, ""} */
- u8 bssid[ETH_ALEN]; /* default: bcast */
- s8 bss_type; /* default: any,
- * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
- */
- u8 scan_type; /* flags, 0 use default */
- __le32 nprobes; /* -1 use default, number of probes per channel */
- __le32 active_time; /* -1 use default, dwell time per channel for
- * active scanning
- */
- __le32 passive_time; /* -1 use default, dwell time per channel
- * for passive scanning
- */
- __le32 home_time; /* -1 use default, dwell time for the
- * home channel between channel scans
- */
- __le32 channel_num; /* count of channels and ssids that follow
- *
- * low half is count of channels in
- * channel_list, 0 means default (use all
- * available channels)
- *
- * high half is entries in struct brcmf_ssid
- * array that follows channel_list, aligned for
- * s32 (4 bytes) meaning an odd channel count
- * implies a 2-byte pad between end of
- * channel_list and first ssid
- *
- * if ssid count is zero, single ssid in the
- * fixed parameter portion is assumed, otherwise
- * ssid in the fixed portion is ignored
- */
- __le16 channel_list[1]; /* list of chanspecs */
-};
-
-struct brcmf_scan_results {
- u32 buflen;
- u32 version;
- u32 count;
- struct brcmf_bss_info_le bss_info_le[];
-};
-
-struct brcmf_escan_params_le {
- __le32 version;
- __le16 action;
- __le16 sync_id;
- struct brcmf_scan_params_le params_le;
-};
-
-struct brcmf_escan_result_le {
- __le32 buflen;
- __le32 version;
- __le16 sync_id;
- __le16 bss_count;
- struct brcmf_bss_info_le bss_info_le;
-};
-
-#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(struct brcmf_escan_result_le) - \
- sizeof(struct brcmf_bss_info_le))
-
-/* used for association with a specific BSSID and chanspec list */
-struct brcmf_assoc_params_le {
- /* 00:00:00:00:00:00: broadcast scan */
- u8 bssid[ETH_ALEN];
- /* 0: all available channels, otherwise count of chanspecs in
- * chanspec_list */
- __le32 chanspec_num;
- /* list of chanspecs */
- __le16 chanspec_list[1];
-};
-
-/* used for join with or without a specific bssid and channel list */
-struct brcmf_join_params {
- struct brcmf_ssid_le ssid_le;
- struct brcmf_assoc_params_le params_le;
-};
-
-/* scan params for extended join */
-struct brcmf_join_scan_params_le {
- u8 scan_type; /* 0 use default, active or passive scan */
- __le32 nprobes; /* -1 use default, nr of probes per channel */
- __le32 active_time; /* -1 use default, dwell time per channel for
- * active scanning
- */
- __le32 passive_time; /* -1 use default, dwell time per channel
- * for passive scanning
- */
- __le32 home_time; /* -1 use default, dwell time for the home
- * channel between channel scans
- */
-};
-
-/* extended join params */
-struct brcmf_ext_join_params_le {
- struct brcmf_ssid_le ssid_le; /* {0, ""}: wildcard scan */
- struct brcmf_join_scan_params_le scan_le;
- struct brcmf_assoc_params_le assoc_le;
-};
-
-struct brcmf_wsec_key {
- u32 index; /* key index */
- u32 len; /* key length */
- u8 data[WLAN_MAX_KEY_LEN]; /* key data */
- u32 pad_1[18];
- u32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
- u32 flags; /* misc flags */
- u32 pad_2[3];
- u32 iv_initialized; /* has IV been initialized already? */
- u32 pad_3;
- /* Rx IV */
- struct {
- u32 hi; /* upper 32 bits of IV */
- u16 lo; /* lower 16 bits of IV */
- } rxiv;
- u32 pad_4[2];
- u8 ea[ETH_ALEN]; /* per station */
-};
-
-/*
- * dongle requires same struct as above but with fields in little endian order
- */
-struct brcmf_wsec_key_le {
- __le32 index; /* key index */
- __le32 len; /* key length */
- u8 data[WLAN_MAX_KEY_LEN]; /* key data */
- __le32 pad_1[18];
- __le32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
- __le32 flags; /* misc flags */
- __le32 pad_2[3];
- __le32 iv_initialized; /* has IV been initialized already? */
- __le32 pad_3;
- /* Rx IV */
- struct {
- __le32 hi; /* upper 32 bits of IV */
- __le16 lo; /* lower 16 bits of IV */
- } rxiv;
- __le32 pad_4[2];
- u8 ea[ETH_ALEN]; /* per station */
-};
-
-/* Used to get specific STA parameters */
-struct brcmf_scb_val_le {
- __le32 val;
- u8 ea[ETH_ALEN];
-};
-
-/* channel encoding */
-struct brcmf_channel_info_le {
- __le32 hw_channel;
- __le32 target_channel;
- __le32 scan_channel;
-};
-
-struct brcmf_sta_info_le {
- __le16 ver; /* version of this struct */
- __le16 len; /* length in bytes of this structure */
- __le16 cap; /* sta's advertised capabilities */
- __le32 flags; /* flags defined below */
- __le32 idle; /* time since data pkt rx'd from sta */
- u8 ea[ETH_ALEN]; /* Station address */
- __le32 count; /* # rates in this set */
- u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units */
- /* w/hi bit set if basic */
- __le32 in; /* seconds elapsed since associated */
- __le32 listen_interval_inms; /* Min Listen interval in ms for STA */
- __le32 tx_pkts; /* # of packets transmitted */
- __le32 tx_failures; /* # of packets failed */
- __le32 rx_ucast_pkts; /* # of unicast packets received */
- __le32 rx_mcast_pkts; /* # of multicast packets received */
- __le32 tx_rate; /* Rate of last successful tx frame */
- __le32 rx_rate; /* Rate of last successful rx frame */
- __le32 rx_decrypt_succeeds; /* # of packet decrypted successfully */
- __le32 rx_decrypt_failures; /* # of packet decrypted failed */
-};
-
-struct brcmf_chanspec_list {
- __le32 count; /* # of entries */
- __le32 element[1]; /* variable length uint32 list */
-};
+#define BRCMF_AMPDU_RX_REORDER_MAXFLOWS 256
-/*
- * WLC_E_PROBRESP_MSG
- * WLC_E_P2P_PROBREQ_MSG
- * WLC_E_ACTION_FRAME_RX
+/* Length of firmware version string stored for
+ * ethtool driver info which uses 32 bytes as well.
*/
-struct brcmf_rx_mgmt_data {
- __be16 version;
- __be16 chanspec;
- __be32 rssi;
- __be32 mactime;
- __be32 rate;
-};
+#define BRCMF_DRIVER_FIRMWARE_VERSION_LEN 32
/* Bus independent dongle command */
struct brcmf_dcmd {
@@ -535,7 +87,7 @@ struct brcmf_fws_info; /* firmware signalling info */
struct brcmf_pub {
/* Linkage ponters */
struct brcmf_bus *bus_if;
- struct brcmf_proto *prot;
+ struct brcmf_proto *proto;
struct brcmf_cfg80211_info *config;
/* Internal brcmf items */
@@ -544,7 +96,7 @@ struct brcmf_pub {
u8 wme_dp; /* wme discard priority */
/* Dongle media info */
- unsigned long drv_version; /* Version of dongle-resident driver */
+ char fwver[BRCMF_DRIVER_FIRMWARE_VERSION_LEN];
u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */
/* Multicast data packets sent to dongle */
@@ -566,14 +118,6 @@ struct brcmf_pub {
#endif
};
-struct brcmf_if_event {
- u8 ifidx;
- u8 action;
- u8 flags;
- u8 bssidx;
- u8 role;
-};
-
/* forward declarations */
struct brcmf_cfg80211_vif;
struct brcmf_fws_mac_descriptor;
@@ -635,16 +179,6 @@ int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
/* Return pointer to interface name */
char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
-/* Query dongle */
-int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len);
-int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len);
-
-/* Remove any protocol-specific data header. */
-int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
- struct sk_buff *rxp);
-
int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
char *name, u8 *mac_addr);
@@ -655,4 +189,7 @@ u32 brcmf_get_chip_info(struct brcmf_if *ifp);
void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
bool success);
+/* Sets dongle media info (drv_version, mac address). */
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+
#endif /* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index a6eb09e5d46f..c4535616064e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -17,13 +17,23 @@
#ifndef _BRCMF_BUS_H_
#define _BRCMF_BUS_H_
+#include "dhd_dbg.h"
+
/* The level of bus communication with the dongle */
enum brcmf_bus_state {
+ BRCMF_BUS_UNKNOWN, /* Not determined yet */
+ BRCMF_BUS_NOMEDIUM, /* No medium access to dongle */
BRCMF_BUS_DOWN, /* Not ready for frame transfers */
BRCMF_BUS_LOAD, /* Download access only (CPU reset) */
BRCMF_BUS_DATA /* Ready for frame transfers */
};
+/* The level of bus communication with the dongle */
+enum brcmf_bus_protocol_type {
+ BRCMF_PROTO_BCDC,
+ BRCMF_PROTO_MSGBUF
+};
+
struct brcmf_bus_dcmd {
char *name;
char *param;
@@ -34,6 +44,7 @@ struct brcmf_bus_dcmd {
/**
* struct brcmf_bus_ops - bus callback operations.
*
+ * @preinit: execute bus/device specific dongle init commands (optional).
* @init: prepare for communication with dongle.
* @stop: clear pending frames, disable data flow.
* @txdata: send a data frame to the dongle. When the data
@@ -51,6 +62,7 @@ struct brcmf_bus_dcmd {
* indicated otherwise these callbacks are mandatory.
*/
struct brcmf_bus_ops {
+ int (*preinit)(struct device *dev);
int (*init)(struct device *dev);
void (*stop)(struct device *dev);
int (*txdata)(struct device *dev, struct sk_buff *skb);
@@ -63,6 +75,7 @@ struct brcmf_bus_ops {
* struct brcmf_bus - interface structure between common and bus layer
*
* @bus_priv: pointer to private bus device.
+ * @proto_type: protocol type, bcdc or msgbuf
* @dev: device pointer of bus device.
* @drvr: public driver information.
* @state: operational state of the bus interface.
@@ -78,6 +91,7 @@ struct brcmf_bus {
struct brcmf_sdio_dev *sdio;
struct brcmf_usbdev *usb;
} bus_priv;
+ enum brcmf_bus_protocol_type proto_type;
struct device *dev;
struct brcmf_pub *drvr;
enum brcmf_bus_state state;
@@ -85,7 +99,6 @@ struct brcmf_bus {
unsigned long tx_realloc;
u32 chip;
u32 chiprev;
- struct list_head dcmd_list;
struct brcmf_bus_ops *ops;
};
@@ -93,6 +106,13 @@ struct brcmf_bus {
/*
* callback wrappers
*/
+static inline int brcmf_bus_preinit(struct brcmf_bus *bus)
+{
+ if (!bus->ops->preinit)
+ return 0;
+ return bus->ops->preinit(bus->dev);
+}
+
static inline int brcmf_bus_init(struct brcmf_bus *bus)
{
return bus->ops->init(bus->dev);
@@ -128,6 +148,23 @@ struct pktq *brcmf_bus_gettxq(struct brcmf_bus *bus)
return bus->ops->gettxq(bus->dev);
}
+
+static inline bool brcmf_bus_ready(struct brcmf_bus *bus)
+{
+ return bus->state == BRCMF_BUS_LOAD || bus->state == BRCMF_BUS_DATA;
+}
+
+static inline void brcmf_bus_change_state(struct brcmf_bus *bus,
+ enum brcmf_bus_state new_state)
+{
+ /* NOMEDIUM is permanent */
+ if (bus->state == BRCMF_BUS_NOMEDIUM)
+ return;
+
+ brcmf_dbg(TRACE, "%d -> %d\n", bus->state, new_state);
+ bus->state = new_state;
+}
+
/*
* interface functions from common layer
*/
@@ -139,7 +176,7 @@ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
/* Indication from bus module regarding presence/insertion of dongle. */
-int brcmf_attach(uint bus_hdrlen, struct device *dev);
+int brcmf_attach(struct device *dev);
/* Indication from bus module regarding removal/absence of dongle */
void brcmf_detach(struct device *dev);
/* Indication from bus module that dongle should be reset */
@@ -151,6 +188,9 @@ void brcmf_txflowblock(struct device *dev, bool state);
void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
int brcmf_bus_start(struct device *dev);
+s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data,
+ u32 len);
+void brcmf_bus_add_txhdrlen(struct device *dev, uint len);
#ifdef CONFIG_BRCMFMAC_SDIO
void brcmf_sdio_exit(void);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
deleted file mode 100644
index dd85401063cb..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ /dev/null
@@ -1,392 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*******************************************************************************
- * Communicates with the dongle by using dcmd codes.
- * For certain dcmd codes, the dongle interprets string data from the host.
- ******************************************************************************/
-
-#include <linux/types.h>
-#include <linux/netdevice.h>
-
-#include <brcmu_utils.h>
-#include <brcmu_wifi.h>
-
-#include "dhd.h"
-#include "dhd_proto.h"
-#include "dhd_bus.h"
-#include "fwsignal.h"
-#include "dhd_dbg.h"
-#include "tracepoint.h"
-
-struct brcmf_proto_cdc_dcmd {
- __le32 cmd; /* dongle command value */
- __le32 len; /* lower 16: output buflen;
- * upper 16: input buflen (excludes header) */
- __le32 flags; /* flag defns given below */
- __le32 status; /* status code returned from the device */
-};
-
-/* Max valid buffer size that can be sent to the dongle */
-#define CDC_MAX_MSG_SIZE (ETH_FRAME_LEN+ETH_FCS_LEN)
-
-/* CDC flag definitions */
-#define CDC_DCMD_ERROR 0x01 /* 1=cmd failed */
-#define CDC_DCMD_SET 0x02 /* 0=get, 1=set cmd */
-#define CDC_DCMD_IF_MASK 0xF000 /* I/F index */
-#define CDC_DCMD_IF_SHIFT 12
-#define CDC_DCMD_ID_MASK 0xFFFF0000 /* id an cmd pairing */
-#define CDC_DCMD_ID_SHIFT 16 /* ID Mask shift bits */
-#define CDC_DCMD_ID(flags) \
- (((flags) & CDC_DCMD_ID_MASK) >> CDC_DCMD_ID_SHIFT)
-
-/*
- * BDC header - Broadcom specific extension of CDC.
- * Used on data packets to convey priority across USB.
- */
-#define BDC_HEADER_LEN 4
-#define BDC_PROTO_VER 2 /* Protocol version */
-#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */
-#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */
-#define BDC_FLAG_SUM_GOOD 0x04 /* Good RX checksums */
-#define BDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums */
-#define BDC_PRIORITY_MASK 0x7
-#define BDC_FLAG2_IF_MASK 0x0f /* packet rx interface in APSTA */
-#define BDC_FLAG2_IF_SHIFT 0
-
-#define BDC_GET_IF_IDX(hdr) \
- ((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
-#define BDC_SET_IF_IDX(hdr, idx) \
- ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | \
- ((idx) << BDC_FLAG2_IF_SHIFT)))
-
-/**
- * struct brcmf_proto_bdc_header - BDC header format
- *
- * @flags: flags contain protocol and checksum info.
- * @priority: 802.1d priority and USB flow control info (bit 4:7).
- * @flags2: additional flags containing dongle interface index.
- * @data_offset: start of packet data. header is following by firmware signals.
- */
-struct brcmf_proto_bdc_header {
- u8 flags;
- u8 priority;
- u8 flags2;
- u8 data_offset;
-};
-
-/*
- * maximum length of firmware signal data between
- * the BDC header and packet data in the tx path.
- */
-#define BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES 12
-
-#define RETRIES 2 /* # of retries to retrieve matching dcmd response */
-#define BUS_HEADER_LEN (16+64) /* Must be atleast SDPCM_RESERVE
- * (amount of header tha might be added)
- * plus any space that might be needed
- * for bus alignment padding.
- */
-#define ROUND_UP_MARGIN 2048 /* Biggest bus block size possible for
- * round off at the end of buffer
- * Currently is SDIO
- */
-
-struct brcmf_proto {
- u16 reqid;
- u8 bus_header[BUS_HEADER_LEN];
- struct brcmf_proto_cdc_dcmd msg;
- unsigned char buf[BRCMF_DCMD_MAXLEN + ROUND_UP_MARGIN];
-};
-
-static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr)
-{
- struct brcmf_proto *prot = drvr->prot;
- int len = le32_to_cpu(prot->msg.len) +
- sizeof(struct brcmf_proto_cdc_dcmd);
-
- brcmf_dbg(CDC, "Enter\n");
-
- /* NOTE : cdc->msg.len holds the desired length of the buffer to be
- * returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
- * is actually sent to the dongle
- */
- if (len > CDC_MAX_MSG_SIZE)
- len = CDC_MAX_MSG_SIZE;
-
- /* Send request */
- return brcmf_bus_txctl(drvr->bus_if, (unsigned char *)&prot->msg, len);
-}
-
-static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
-{
- int ret;
- struct brcmf_proto *prot = drvr->prot;
-
- brcmf_dbg(CDC, "Enter\n");
- len += sizeof(struct brcmf_proto_cdc_dcmd);
- do {
- ret = brcmf_bus_rxctl(drvr->bus_if, (unsigned char *)&prot->msg,
- len);
- if (ret < 0)
- break;
- } while (CDC_DCMD_ID(le32_to_cpu(prot->msg.flags)) != id);
-
- return ret;
-}
-
-int
-brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len)
-{
- struct brcmf_proto *prot = drvr->prot;
- struct brcmf_proto_cdc_dcmd *msg = &prot->msg;
- void *info;
- int ret = 0, retries = 0;
- u32 id, flags;
-
- brcmf_dbg(CDC, "Enter, cmd %d len %d\n", cmd, len);
-
- memset(msg, 0, sizeof(struct brcmf_proto_cdc_dcmd));
-
- msg->cmd = cpu_to_le32(cmd);
- msg->len = cpu_to_le32(len);
- flags = (++prot->reqid << CDC_DCMD_ID_SHIFT);
- flags = (flags & ~CDC_DCMD_IF_MASK) |
- (ifidx << CDC_DCMD_IF_SHIFT);
- msg->flags = cpu_to_le32(flags);
-
- if (buf)
- memcpy(prot->buf, buf, len);
-
- ret = brcmf_proto_cdc_msg(drvr);
- if (ret < 0) {
- brcmf_err("brcmf_proto_cdc_msg failed w/status %d\n",
- ret);
- goto done;
- }
-
-retry:
- /* wait for interrupt and get first fragment */
- ret = brcmf_proto_cdc_cmplt(drvr, prot->reqid, len);
- if (ret < 0)
- goto done;
-
- flags = le32_to_cpu(msg->flags);
- id = (flags & CDC_DCMD_ID_MASK) >> CDC_DCMD_ID_SHIFT;
-
- if ((id < prot->reqid) && (++retries < RETRIES))
- goto retry;
- if (id != prot->reqid) {
- brcmf_err("%s: unexpected request id %d (expected %d)\n",
- brcmf_ifname(drvr, ifidx), id, prot->reqid);
- ret = -EINVAL;
- goto done;
- }
-
- /* Check info buffer */
- info = (void *)&msg[1];
-
- /* Copy info buffer */
- if (buf) {
- if (ret < (int)len)
- len = ret;
- memcpy(buf, info, len);
- }
-
- /* Check the ERROR flag */
- if (flags & CDC_DCMD_ERROR)
- ret = le32_to_cpu(msg->status);
-
-done:
- return ret;
-}
-
-int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len)
-{
- struct brcmf_proto *prot = drvr->prot;
- struct brcmf_proto_cdc_dcmd *msg = &prot->msg;
- int ret = 0;
- u32 flags, id;
-
- brcmf_dbg(CDC, "Enter, cmd %d len %d\n", cmd, len);
-
- memset(msg, 0, sizeof(struct brcmf_proto_cdc_dcmd));
-
- msg->cmd = cpu_to_le32(cmd);
- msg->len = cpu_to_le32(len);
- flags = (++prot->reqid << CDC_DCMD_ID_SHIFT) | CDC_DCMD_SET;
- flags = (flags & ~CDC_DCMD_IF_MASK) |
- (ifidx << CDC_DCMD_IF_SHIFT);
- msg->flags = cpu_to_le32(flags);
-
- if (buf)
- memcpy(prot->buf, buf, len);
-
- ret = brcmf_proto_cdc_msg(drvr);
- if (ret < 0)
- goto done;
-
- ret = brcmf_proto_cdc_cmplt(drvr, prot->reqid, len);
- if (ret < 0)
- goto done;
-
- flags = le32_to_cpu(msg->flags);
- id = (flags & CDC_DCMD_ID_MASK) >> CDC_DCMD_ID_SHIFT;
-
- if (id != prot->reqid) {
- brcmf_err("%s: unexpected request id %d (expected %d)\n",
- brcmf_ifname(drvr, ifidx), id, prot->reqid);
- ret = -EINVAL;
- goto done;
- }
-
- /* Check the ERROR flag */
- if (flags & CDC_DCMD_ERROR)
- ret = le32_to_cpu(msg->status);
-
-done:
- return ret;
-}
-
-static bool pkt_sum_needed(struct sk_buff *skb)
-{
- return skb->ip_summed == CHECKSUM_PARTIAL;
-}
-
-static void pkt_set_sum_good(struct sk_buff *skb, bool x)
-{
- skb->ip_summed = (x ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
-}
-
-void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, u8 offset,
- struct sk_buff *pktbuf)
-{
- struct brcmf_proto_bdc_header *h;
-
- brcmf_dbg(CDC, "Enter\n");
-
- /* Push BDC header used to convey priority for buses that don't */
- skb_push(pktbuf, BDC_HEADER_LEN);
-
- h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
-
- h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
- if (pkt_sum_needed(pktbuf))
- h->flags |= BDC_FLAG_SUM_NEEDED;
-
- h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
- h->flags2 = 0;
- h->data_offset = offset;
- BDC_SET_IF_IDX(h, ifidx);
- trace_brcmf_bdchdr(pktbuf->data);
-}
-
-int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
- struct sk_buff *pktbuf)
-{
- struct brcmf_proto_bdc_header *h;
-
- brcmf_dbg(CDC, "Enter\n");
-
- /* Pop BDC header used to convey priority for buses that don't */
-
- if (pktbuf->len <= BDC_HEADER_LEN) {
- brcmf_dbg(INFO, "rx data too short (%d <= %d)\n",
- pktbuf->len, BDC_HEADER_LEN);
- return -EBADE;
- }
-
- trace_brcmf_bdchdr(pktbuf->data);
- h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
-
- *ifidx = BDC_GET_IF_IDX(h);
- if (*ifidx >= BRCMF_MAX_IFS) {
- brcmf_err("rx data ifnum out of range (%d)\n", *ifidx);
- return -EBADE;
- }
- /* The ifidx is the idx to map to matching netdev/ifp. When receiving
- * events this is easy because it contains the bssidx which maps
- * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
- * bssidx 1 is used for p2p0 and no data can be received or
- * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
- */
- if (*ifidx)
- (*ifidx)++;
-
- if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) !=
- BDC_PROTO_VER) {
- brcmf_err("%s: non-BDC packet received, flags 0x%x\n",
- brcmf_ifname(drvr, *ifidx), h->flags);
- return -EBADE;
- }
-
- if (h->flags & BDC_FLAG_SUM_GOOD) {
- brcmf_dbg(CDC, "%s: BDC rcv, good checksum, flags 0x%x\n",
- brcmf_ifname(drvr, *ifidx), h->flags);
- pkt_set_sum_good(pktbuf, true);
- }
-
- pktbuf->priority = h->priority & BDC_PRIORITY_MASK;
-
- skb_pull(pktbuf, BDC_HEADER_LEN);
- if (do_fws)
- brcmf_fws_hdrpull(drvr, *ifidx, h->data_offset << 2, pktbuf);
- else
- skb_pull(pktbuf, h->data_offset << 2);
-
- if (pktbuf->len == 0)
- return -ENODATA;
- return 0;
-}
-
-int brcmf_proto_attach(struct brcmf_pub *drvr)
-{
- struct brcmf_proto *cdc;
-
- cdc = kzalloc(sizeof(struct brcmf_proto), GFP_ATOMIC);
- if (!cdc)
- goto fail;
-
- /* ensure that the msg buf directly follows the cdc msg struct */
- if ((unsigned long)(&cdc->msg + 1) != (unsigned long)cdc->buf) {
- brcmf_err("struct brcmf_proto is not correctly defined\n");
- goto fail;
- }
-
- drvr->prot = cdc;
- drvr->hdrlen += BDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
- drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
- sizeof(struct brcmf_proto_cdc_dcmd) + ROUND_UP_MARGIN;
- return 0;
-
-fail:
- kfree(cdc);
- return -ENOMEM;
-}
-
-/* ~NOTE~ What if another thread is waiting on the semaphore? Holding it? */
-void brcmf_proto_detach(struct brcmf_pub *drvr)
-{
- kfree(drvr->prot);
- drvr->prot = NULL;
-}
-
-void brcmf_proto_stop(struct brcmf_pub *drvr)
-{
- /* Nothing to do for CDC */
-}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 9431af2465f3..6a8983a1fb9c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -21,9 +21,9 @@
#include <brcmu_utils.h>
#include "dhd.h"
#include "dhd_bus.h"
-#include "dhd_proto.h"
#include "dhd_dbg.h"
#include "fwil.h"
+#include "fwil_types.h"
#include "tracepoint.h"
#define PKTFILTER_BUF_SIZE 128
@@ -32,15 +32,6 @@
#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
#define BRCMF_DEFAULT_PACKET_FILTER "100 0 0 0 0x01 0x00"
-#ifdef DEBUG
-static const char brcmf_version[] =
- "Dongle Host Driver, version " BRCMF_VERSION_STR "\nCompiled on "
- __DATE__ " at " __TIME__;
-#else
-static const char brcmf_version[] =
- "Dongle Host Driver, version " BRCMF_VERSION_STR;
-#endif
-
bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
struct sk_buff *pkt, int prec)
@@ -257,8 +248,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
u8 buf[BRCMF_DCMD_SMLEN];
char *ptr;
s32 err;
- struct brcmf_bus_dcmd *cmdlst;
- struct list_head *cur, *q;
/* retreive mac address */
err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
@@ -281,9 +270,14 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
}
ptr = (char *)buf;
strsep(&ptr, "\n");
+
/* Print fw version info */
brcmf_err("Firmware version = %s\n", buf);
+ /* locate firmware version number for ethtool */
+ ptr = strrchr(buf, ' ') + 1;
+ strlcpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
+
/*
* Setup timeout if Beacons are lost and roam is off to report
* link down
@@ -342,17 +336,8 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
brcmf_c_pktfilter_offload_enable(ifp, BRCMF_DEFAULT_PACKET_FILTER,
0, true);
- /* set bus specific command if there is any */
- list_for_each_safe(cur, q, &ifp->drvr->bus_if->dcmd_list) {
- cmdlst = list_entry(cur, struct brcmf_bus_dcmd, list);
- if (cmdlst->name && cmdlst->param && cmdlst->param_len) {
- brcmf_fil_iovar_data_set(ifp, cmdlst->name,
- cmdlst->param,
- cmdlst->param_len);
- }
- list_del(cur);
- kfree(cmdlst);
- }
+ /* do bus specific preinit here */
+ err = brcmf_bus_preinit(ifp->drvr->bus_if);
done:
return err;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
index 0f9e9057e7dd..03fe8aca4d32 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -22,7 +22,6 @@
#include "dhd.h"
#include "dhd_bus.h"
#include "dhd_dbg.h"
-#include "tracepoint.h"
static struct dentry *root_folder;
@@ -42,6 +41,40 @@ void brcmf_debugfs_exit(void)
root_folder = NULL;
}
+static
+ssize_t brcmf_debugfs_chipinfo_read(struct file *f, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct brcmf_pub *drvr = f->private_data;
+ struct brcmf_bus *bus = drvr->bus_if;
+ char buf[40];
+ int res;
+
+ /* only allow read from start */
+ if (*ppos > 0)
+ return 0;
+
+ res = scnprintf(buf, sizeof(buf), "chip: %x(%u) rev %u\n",
+ bus->chip, bus->chip, bus->chiprev);
+ return simple_read_from_buffer(data, count, ppos, buf, res);
+}
+
+static const struct file_operations brcmf_debugfs_chipinfo_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = brcmf_debugfs_chipinfo_read
+};
+
+static int brcmf_debugfs_create_chipinfo(struct brcmf_pub *drvr)
+{
+ struct dentry *dentry = drvr->dbgfs_dir;
+
+ if (!IS_ERR_OR_NULL(dentry))
+ debugfs_create_file("chipinfo", S_IRUGO, dentry, drvr,
+ &brcmf_debugfs_chipinfo_ops);
+ return 0;
+}
+
int brcmf_debugfs_attach(struct brcmf_pub *drvr)
{
struct device *dev = drvr->bus_if->dev;
@@ -50,6 +83,7 @@ int brcmf_debugfs_attach(struct brcmf_pub *drvr)
return -ENODEV;
drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder);
+ brcmf_debugfs_create_chipinfo(drvr);
return PTR_ERR_OR_ZERO(drvr->dbgfs_dir);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index 0af1f5dc583a..ef52ed7abc69 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -33,7 +33,7 @@
#define BRCMF_USB_VAL 0x00002000
#define BRCMF_SCAN_VAL 0x00004000
#define BRCMF_CONN_VAL 0x00008000
-#define BRCMF_CDC_VAL 0x00010000
+#define BRCMF_BCDC_VAL 0x00010000
#define BRCMF_SDIO_VAL 0x00020000
/* set default print format */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 64e9cff241b9..d4d966beb840 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -24,13 +24,13 @@
#include "dhd.h"
#include "dhd_bus.h"
-#include "dhd_proto.h"
#include "dhd_dbg.h"
#include "fwil_types.h"
#include "p2p.h"
#include "wl_cfg80211.h"
#include "fwil.h"
#include "fwsignal.h"
+#include "proto.h"
MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
@@ -592,28 +592,6 @@ static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
return &ifp->stats;
}
-/*
- * Set current toe component enables in toe_ol iovar,
- * and set toe global enable iovar
- */
-static int brcmf_toe_set(struct brcmf_if *ifp, u32 toe_ol)
-{
- s32 err;
-
- err = brcmf_fil_iovar_int_set(ifp, "toe_ol", toe_ol);
- if (err < 0) {
- brcmf_err("Setting toe_ol failed, %d\n", err);
- return err;
- }
-
- err = brcmf_fil_iovar_int_set(ifp, "toe", (toe_ol != 0));
- if (err < 0)
- brcmf_err("Setting toe failed, %d\n", err);
-
- return err;
-
-}
-
static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
@@ -621,8 +599,8 @@ static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
struct brcmf_pub *drvr = ifp->drvr;
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- snprintf(info->version, sizeof(info->version), "%lu",
- drvr->drv_version);
+ snprintf(info->version, sizeof(info->version), "n/a");
+ strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
sizeof(info->bus_info));
}
@@ -631,124 +609,6 @@ static const struct ethtool_ops brcmf_ethtool_ops = {
.get_drvinfo = brcmf_ethtool_get_drvinfo,
};
-static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr)
-{
- struct brcmf_pub *drvr = ifp->drvr;
- struct ethtool_drvinfo info;
- char drvname[sizeof(info.driver)];
- u32 cmd;
- struct ethtool_value edata;
- u32 toe_cmpnt, csum_dir;
- int ret;
-
- brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
-
- /* all ethtool calls start with a cmd word */
- if (copy_from_user(&cmd, uaddr, sizeof(u32)))
- return -EFAULT;
-
- switch (cmd) {
- case ETHTOOL_GDRVINFO:
- /* Copy out any request driver name */
- if (copy_from_user(&info, uaddr, sizeof(info)))
- return -EFAULT;
- strncpy(drvname, info.driver, sizeof(info.driver));
- drvname[sizeof(info.driver) - 1] = '\0';
-
- /* clear struct for return */
- memset(&info, 0, sizeof(info));
- info.cmd = cmd;
-
- /* if requested, identify ourselves */
- if (strcmp(drvname, "?dhd") == 0) {
- sprintf(info.driver, "dhd");
- strcpy(info.version, BRCMF_VERSION_STR);
- }
- /* report dongle driver type */
- else
- sprintf(info.driver, "wl");
-
- sprintf(info.version, "%lu", drvr->drv_version);
- if (copy_to_user(uaddr, &info, sizeof(info)))
- return -EFAULT;
- brcmf_dbg(TRACE, "given %*s, returning %s\n",
- (int)sizeof(drvname), drvname, info.driver);
- break;
-
- /* Get toe offload components from dongle */
- case ETHTOOL_GRXCSUM:
- case ETHTOOL_GTXCSUM:
- ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
- if (ret < 0)
- return ret;
-
- csum_dir =
- (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
-
- edata.cmd = cmd;
- edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
-
- if (copy_to_user(uaddr, &edata, sizeof(edata)))
- return -EFAULT;
- break;
-
- /* Set toe offload components in dongle */
- case ETHTOOL_SRXCSUM:
- case ETHTOOL_STXCSUM:
- if (copy_from_user(&edata, uaddr, sizeof(edata)))
- return -EFAULT;
-
- /* Read the current settings, update and write back */
- ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
- if (ret < 0)
- return ret;
-
- csum_dir =
- (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
-
- if (edata.data != 0)
- toe_cmpnt |= csum_dir;
- else
- toe_cmpnt &= ~csum_dir;
-
- ret = brcmf_toe_set(ifp, toe_cmpnt);
- if (ret < 0)
- return ret;
-
- /* If setting TX checksum mode, tell Linux the new mode */
- if (cmd == ETHTOOL_STXCSUM) {
- if (edata.data)
- ifp->ndev->features |= NETIF_F_IP_CSUM;
- else
- ifp->ndev->features &= ~NETIF_F_IP_CSUM;
- }
-
- break;
-
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
- int cmd)
-{
- struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_pub *drvr = ifp->drvr;
-
- brcmf_dbg(TRACE, "Enter, idx=%d, cmd=0x%04x\n", ifp->bssidx, cmd);
-
- if (!drvr->iflist[ifp->bssidx])
- return -1;
-
- if (cmd == SIOCETHTOOL)
- return brcmf_ethtool(ifp, ifr->ifr_data);
-
- return -EOPNOTSUPP;
-}
-
static int brcmf_netdev_stop(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
@@ -769,7 +629,6 @@ static int brcmf_netdev_open(struct net_device *ndev)
struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_bus *bus_if = drvr->bus_if;
u32 toe_ol;
- s32 ret = 0;
brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
@@ -788,21 +647,20 @@ static int brcmf_netdev_open(struct net_device *ndev)
else
ndev->features &= ~NETIF_F_IP_CSUM;
- /* Allow transmit calls */
- netif_start_queue(ndev);
if (brcmf_cfg80211_up(ndev)) {
brcmf_err("failed to bring up cfg80211\n");
- return -1;
+ return -EIO;
}
- return ret;
+ /* Allow transmit calls */
+ netif_start_queue(ndev);
+ return 0;
}
static const struct net_device_ops brcmf_netdev_ops_pri = {
.ndo_open = brcmf_netdev_open,
.ndo_stop = brcmf_netdev_stop,
.ndo_get_stats = brcmf_netdev_get_stats,
- .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
.ndo_start_xmit = brcmf_netdev_start_xmit,
.ndo_set_mac_address = brcmf_netdev_set_mac_address,
.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
@@ -844,7 +702,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
- ndev->destructor = free_netdev;
+ ndev->destructor = brcmf_cfg80211_free_netdev;
return 0;
fail:
@@ -868,13 +726,6 @@ static int brcmf_net_p2p_stop(struct net_device *ndev)
return brcmf_cfg80211_down(ndev);
}
-static int brcmf_net_p2p_do_ioctl(struct net_device *ndev,
- struct ifreq *ifr, int cmd)
-{
- brcmf_dbg(TRACE, "Enter\n");
- return 0;
-}
-
static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
@@ -887,7 +738,6 @@ static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
static const struct net_device_ops brcmf_netdev_ops_p2p = {
.ndo_open = brcmf_net_p2p_open,
.ndo_stop = brcmf_net_p2p_stop,
- .ndo_do_ioctl = brcmf_net_p2p_do_ioctl,
.ndo_start_xmit = brcmf_net_p2p_start_xmit
};
@@ -1009,14 +859,12 @@ void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
}
/* unregister will take care of freeing it */
unregister_netdev(ifp->ndev);
- if (bssidx == 0)
- brcmf_cfg80211_detach(drvr->config);
} else {
kfree(ifp);
}
}
-int brcmf_attach(uint bus_hdrlen, struct device *dev)
+int brcmf_attach(struct device *dev)
{
struct brcmf_pub *drvr = NULL;
int ret = 0;
@@ -1031,7 +879,7 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
mutex_init(&drvr->proto_block);
/* Link to bus module */
- drvr->hdrlen = bus_hdrlen;
+ drvr->hdrlen = 0;
drvr->bus_if = dev_get_drvdata(dev);
drvr->bus_if->drvr = drvr;
@@ -1048,8 +896,6 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
/* attach firmware event handler */
brcmf_fweh_attach(drvr);
- INIT_LIST_HEAD(&drvr->bus_if->dcmd_list);
-
return ret;
fail:
@@ -1088,7 +934,7 @@ int brcmf_bus_start(struct device *dev)
p2p_ifp = NULL;
/* signal bus ready */
- bus_if->state = BRCMF_BUS_DATA;
+ brcmf_bus_change_state(bus_if, BRCMF_BUS_DATA);
/* Bus is ready, do any initialization */
ret = brcmf_c_preinit_dcmds(ifp);
@@ -1115,8 +961,7 @@ int brcmf_bus_start(struct device *dev)
fail:
if (ret < 0) {
brcmf_err("failed: %d\n", ret);
- if (drvr->config)
- brcmf_cfg80211_detach(drvr->config);
+ brcmf_cfg80211_detach(drvr->config);
if (drvr->fws) {
brcmf_fws_del_interface(ifp);
brcmf_fws_deinit(drvr);
@@ -1138,14 +983,21 @@ fail:
return 0;
}
+void brcmf_bus_add_txhdrlen(struct device *dev, uint len)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
+
+ if (drvr) {
+ drvr->hdrlen += len;
+ }
+}
+
static void brcmf_bus_detach(struct brcmf_pub *drvr)
{
brcmf_dbg(TRACE, "Enter\n");
if (drvr) {
- /* Stop the protocol module */
- brcmf_proto_stop(drvr);
-
/* Stop the bus module */
brcmf_bus_stop(drvr->bus_if);
}
@@ -1177,6 +1029,8 @@ void brcmf_detach(struct device *dev)
/* stop firmware event handling */
brcmf_fweh_detach(drvr);
+ brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
+
/* make sure primary interface removed last */
for (i = BRCMF_MAX_IFS-1; i > -1; i--)
if (drvr->iflist[i]) {
@@ -1184,10 +1038,11 @@ void brcmf_detach(struct device *dev)
brcmf_del_if(drvr, i);
}
+ brcmf_cfg80211_detach(drvr->config);
+
brcmf_bus_detach(drvr);
- if (drvr->prot)
- brcmf_proto_detach(drvr);
+ brcmf_proto_detach(drvr);
brcmf_fws_deinit(drvr);
@@ -1196,6 +1051,14 @@ void brcmf_detach(struct device *dev)
kfree(drvr);
}
+s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_if *ifp = bus_if->drvr->iflist[0];
+
+ return brcmf_fil_iovar_data_set(ifp, name, data, len);
+}
+
static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
{
return atomic_read(&ifp->pend_8021x_cnt);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
deleted file mode 100644
index 53c6e710f2cb..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCMF_PROTO_H_
-#define _BRCMF_PROTO_H_
-
-/*
- * Exported from the brcmf protocol module (brcmf_cdc)
- */
-
-/* Linkage, sets prot link and updates hdrlen in pub */
-int brcmf_proto_attach(struct brcmf_pub *drvr);
-
-/* Unlink, frees allocated protocol memory (including brcmf_proto) */
-void brcmf_proto_detach(struct brcmf_pub *drvr);
-
-/* Stop protocol: sync w/dongle state. */
-void brcmf_proto_stop(struct brcmf_pub *drvr);
-
-/* Add any protocol-specific data header.
- * Caller must reserve prot_hdrlen prepend space.
- */
-void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
- struct sk_buff *txp);
-
-/* Sets dongle media info (drv_version, mac address). */
-int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
-
-#endif /* _BRCMF_PROTO_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index b02953c4ade7..3e991897d7ca 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -32,6 +32,7 @@
#include <linux/debugfs.h>
#include <linux/vmalloc.h>
#include <linux/platform_data/brcmfmac-sdio.h>
+#include <linux/moduleparam.h>
#include <asm/unaligned.h>
#include <defs.h>
#include <brcmu_wifi.h>
@@ -40,6 +41,7 @@
#include <soc.h>
#include "sdio_host.h"
#include "sdio_chip.h"
+#include "nvram.h"
#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
@@ -110,6 +112,8 @@ struct rte_console {
#define BRCMF_TXBOUND 20 /* Default for max tx frames in
one scheduling */
+#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
+
#define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
#define MEMBLOCK 2048 /* Block size used for downloading
@@ -257,9 +261,6 @@ struct rte_console {
#define MAX_HDR_READ (1 << 6)
#define MAX_RX_DATASZ 2048
-/* Maximum milliseconds to wait for F2 to come up */
-#define BRCMF_WAIT_F2RDY 3000
-
/* Bump up limit on waiting for HT to account for first startup;
* if the image is doing a CRC calculation before programming the PMU
* for HT availability, it could take a couple hundred ms more, so
@@ -360,15 +361,15 @@ struct brcmf_sdio_hdrinfo {
u16 len_left;
u16 len_nxtfrm;
u8 dat_offset;
+ bool lastfrm;
+ u16 tail_pad;
};
/* misc chip info needed by some of the routines */
/* Private data for SDIO bus interaction */
struct brcmf_sdio {
struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
- struct chip_info *ci; /* Chip info struct */
- char *vars; /* Variables (from CIS and/or other) */
- uint varsz; /* Size of variables buffer */
+ struct brcmf_chip *ci; /* Chip info struct */
u32 ramsize; /* Size of RAM in SOCRAM (bytes) */
@@ -384,7 +385,7 @@ struct brcmf_sdio {
u8 tx_seq; /* Transmit sequence number (next) */
u8 tx_max; /* Maximum transmit sequence allowed */
- u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
+ u8 *hdrbuf; /* buffer for handling rx frame */
u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
u8 rx_seq; /* Receive sequence number (expected) */
struct brcmf_sdio_hdrinfo cur_read;
@@ -455,6 +456,10 @@ struct brcmf_sdio {
bool sleeping; /* SDIO bus sleeping */
u8 tx_hdrlen; /* sdio bus header length for tx packet */
+ bool txglom; /* host tx glomming enable flag */
+ struct sk_buff *txglom_sgpad; /* scatter-gather padding buffer */
+ u16 head_align; /* buffer pointer alignment */
+ u16 sgentry_align; /* scatter-gather buffer alignment */
};
/* clkstate */
@@ -479,6 +484,10 @@ static const uint max_roundup = 512;
#define ALIGNMENT 4
+static int brcmf_sdio_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
+module_param_named(txglomsz, brcmf_sdio_txglomsz, int, 0);
+MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
+
enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_NORMAL,
BRCMF_SDIO_FT_SUPER,
@@ -499,6 +508,10 @@ enum brcmf_sdio_frmtype {
#define BCM4334_NVRAM_NAME "brcm/brcmfmac4334-sdio.txt"
#define BCM4335_FIRMWARE_NAME "brcm/brcmfmac4335-sdio.bin"
#define BCM4335_NVRAM_NAME "brcm/brcmfmac4335-sdio.txt"
+#define BCM43362_FIRMWARE_NAME "brcm/brcmfmac43362-sdio.bin"
+#define BCM43362_NVRAM_NAME "brcm/brcmfmac43362-sdio.txt"
+#define BCM4339_FIRMWARE_NAME "brcm/brcmfmac4339-sdio.bin"
+#define BCM4339_NVRAM_NAME "brcm/brcmfmac4339-sdio.txt"
MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
@@ -514,6 +527,10 @@ MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
struct brcmf_firmware_names {
u32 chipid;
@@ -537,11 +554,13 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
{ BCM4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
{ BCM4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
{ BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
- { BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) }
+ { BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
+ { BCM43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
+ { BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) }
};
-static const struct firmware *brcmf_sdbrcm_get_fw(struct brcmf_sdio *bus,
+static const struct firmware *brcmf_sdio_get_fw(struct brcmf_sdio *bus,
enum brcmf_firmware_type type)
{
const struct firmware *fw;
@@ -606,8 +625,8 @@ r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
int ret;
- *regvar = brcmf_sdio_regrl(bus->sdiodev,
- bus->ci->c_inf[idx].base + offset, &ret);
+ *regvar = brcmf_sdiod_regrl(bus->sdiodev,
+ bus->ci->c_inf[idx].base + offset, &ret);
return ret;
}
@@ -618,15 +637,15 @@ w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
int ret;
- brcmf_sdio_regwl(bus->sdiodev,
- bus->ci->c_inf[idx].base + reg_offset,
- regval, &ret);
+ brcmf_sdiod_regwl(bus->sdiodev,
+ bus->ci->c_inf[idx].base + reg_offset,
+ regval, &ret);
return ret;
}
static int
-brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
+brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
{
u8 wr_val = 0, rd_val, cmp_val, bmask;
int err = 0;
@@ -636,8 +655,8 @@ brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
/* 1st KSO write goes to AOS wake up core if device is asleep */
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
- wr_val, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ wr_val, &err);
if (err) {
brcmf_err("SDIO_AOS KSO write error: %d\n", err);
return err;
@@ -667,15 +686,15 @@ brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
* just one write attempt may fail,
* read it back until it matches written value
*/
- rd_val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
- &err);
+ rd_val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ &err);
if (((rd_val & bmask) == cmp_val) && !err)
break;
brcmf_dbg(SDIO, "KSO wr/rd retry:%d (max: %d) ERR:%x\n",
try_cnt, MAX_KSO_ATTEMPTS, err);
udelay(KSO_WAIT_US);
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
- wr_val, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ wr_val, &err);
} while (try_cnt++ < MAX_KSO_ATTEMPTS);
return err;
@@ -686,7 +705,7 @@ brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
/* Turn backplane clock on or off */
-static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
+static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
{
int err;
u8 clkctl, clkreq, devctl;
@@ -706,16 +725,16 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
clkreq =
bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- clkreq, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkreq, &err);
if (err) {
brcmf_err("HT Avail request error: %d\n", err);
return -EBADE;
}
/* Check current status */
- clkctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err) {
brcmf_err("HT Avail read error: %d\n", err);
return -EBADE;
@@ -724,8 +743,8 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
/* Go to pending and await interrupt if appropriate */
if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
/* Allow only clock-available interrupt */
- devctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_DEVICE_CTL, &err);
+ devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
if (err) {
brcmf_err("Devctl error setting CA: %d\n",
err);
@@ -733,28 +752,28 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
}
devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
- devctl, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
bus->clkstate = CLK_PENDING;
return 0;
} else if (bus->clkstate == CLK_PENDING) {
/* Cancel CA-only interrupt filter */
- devctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_DEVICE_CTL, &err);
+ devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
- devctl, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
}
/* Otherwise, wait here (polling) for HT Avail */
timeout = jiffies +
msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
- clkctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR,
- &err);
+ clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ &err);
if (time_after(jiffies, timeout))
break;
else
@@ -787,16 +806,16 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
if (bus->clkstate == CLK_PENDING) {
/* Cancel CA-only interrupt filter */
- devctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_DEVICE_CTL, &err);
+ devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
- devctl, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
}
bus->clkstate = CLK_SDONLY;
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- clkreq, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkreq, &err);
brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
if (err) {
brcmf_err("Failed access turning clock off: %d\n",
@@ -808,7 +827,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
}
/* Change idle/active SD state */
-static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
+static int brcmf_sdio_sdclk(struct brcmf_sdio *bus, bool on)
{
brcmf_dbg(SDIO, "Enter\n");
@@ -821,7 +840,7 @@ static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
}
/* Transition SD and backplane clock readiness */
-static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
+static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
{
#ifdef DEBUG
uint oldstate = bus->clkstate;
@@ -832,7 +851,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
/* Early exit if we're already there */
if (bus->clkstate == target) {
if (target == CLK_AVAIL) {
- brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
bus->activity = true;
}
return 0;
@@ -842,32 +861,32 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
case CLK_AVAIL:
/* Make sure SD clock is available */
if (bus->clkstate == CLK_NONE)
- brcmf_sdbrcm_sdclk(bus, true);
+ brcmf_sdio_sdclk(bus, true);
/* Now request HT Avail on the backplane */
- brcmf_sdbrcm_htclk(bus, true, pendok);
- brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ brcmf_sdio_htclk(bus, true, pendok);
+ brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
bus->activity = true;
break;
case CLK_SDONLY:
/* Remove HT request, or bring up SD clock */
if (bus->clkstate == CLK_NONE)
- brcmf_sdbrcm_sdclk(bus, true);
+ brcmf_sdio_sdclk(bus, true);
else if (bus->clkstate == CLK_AVAIL)
- brcmf_sdbrcm_htclk(bus, false, false);
+ brcmf_sdio_htclk(bus, false, false);
else
brcmf_err("request for %d -> %d\n",
bus->clkstate, target);
- brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
break;
case CLK_NONE:
/* Make sure to remove HT request */
if (bus->clkstate == CLK_AVAIL)
- brcmf_sdbrcm_htclk(bus, false, false);
+ brcmf_sdio_htclk(bus, false, false);
/* Now remove the SD clock */
- brcmf_sdbrcm_sdclk(bus, false);
- brcmf_sdbrcm_wd_timer(bus, 0);
+ brcmf_sdio_sdclk(bus, false);
+ brcmf_sdio_wd_timer(bus, 0);
break;
}
#ifdef DEBUG
@@ -878,7 +897,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
}
static int
-brcmf_sdbrcm_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
+brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
{
int err = 0;
brcmf_dbg(TRACE, "Enter\n");
@@ -901,13 +920,13 @@ brcmf_sdbrcm_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
data_ok(bus)))
return -EBUSY;
- err = brcmf_sdbrcm_kso_control(bus, false);
+ err = brcmf_sdio_kso_control(bus, false);
/* disable watchdog */
if (!err)
- brcmf_sdbrcm_wd_timer(bus, 0);
+ brcmf_sdio_wd_timer(bus, 0);
} else {
bus->idlecount = 0;
- err = brcmf_sdbrcm_kso_control(bus, true);
+ err = brcmf_sdio_kso_control(bus, true);
}
if (!err) {
/* Change state */
@@ -925,16 +944,16 @@ end:
/* control clocks */
if (sleep) {
if (!bus->sr_enabled)
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, pendok);
+ brcmf_sdio_clkctl(bus, CLK_NONE, pendok);
} else {
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, pendok);
+ brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
}
return err;
}
-static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
+static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
{
u32 intstatus = 0;
u32 hmb_data;
@@ -1010,7 +1029,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
return intstatus;
}
-static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
+static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
{
uint retries = 0;
u16 lastrbc;
@@ -1022,18 +1041,18 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
rtx ? ", send NAK" : "");
if (abort)
- brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
+ brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
- SFC_RF_TERM, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_RF_TERM, &err);
bus->sdcnt.f1regdata++;
/* Wait until the packet has been flushed (device/FIFO stable) */
for (lastrbc = retries = 0xffff; retries > 0; retries--) {
- hi = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_RFRAMEBCHI, &err);
- lo = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_RFRAMEBCLO, &err);
+ hi = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_RFRAMEBCHI, &err);
+ lo = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_RFRAMEBCLO, &err);
bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
@@ -1063,14 +1082,10 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
/* Clear partial in any case */
bus->cur_read.len = 0;
-
- /* If we can't reach the device, signal failure */
- if (err)
- bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
/* return total length of buffer chain */
-static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
+static uint brcmf_sdio_glom_len(struct brcmf_sdio *bus)
{
struct sk_buff *p;
uint total;
@@ -1081,7 +1096,7 @@ static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
return total;
}
-static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
+static void brcmf_sdio_free_glom(struct brcmf_sdio *bus)
{
struct sk_buff *cur, *next;
@@ -1097,10 +1112,18 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
* host and WiFi dongle which contains information needed for SDIO core and
* firmware
*
- * It consists of 2 parts: hw header and software header
+ * It consists of 3 parts: hardware header, hardware extension header and
+ * software header
* hardware header (frame tag) - 4 bytes
* Byte 0~1: Frame length
* Byte 2~3: Checksum, bit-wise inverse of frame length
+ * hardware extension header - 8 bytes
+ * Tx glom mode only, N/A for Rx or normal Tx
+ * Byte 0~1: Packet length excluding hw frame tag
+ * Byte 2: Reserved
+ * Byte 3: Frame flags, bit 0: last frame indication
+ * Byte 4~5: Reserved
+ * Byte 6~7: Tail padding length
* software header - 8 bytes
* Byte 0: Rx/Tx sequence number
* Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
@@ -1111,6 +1134,7 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
* Byte 6~7: Reserved
*/
#define SDPCM_HWHDR_LEN 4
+#define SDPCM_HWEXT_LEN 8
#define SDPCM_SWHDR_LEN 8
#define SDPCM_HDRLEN (SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN)
/* software header */
@@ -1147,7 +1171,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
u8 rx_seq, fc, tx_seq_max;
u32 swheader;
- trace_brcmf_sdpcm_hdr(false, header);
+ trace_brcmf_sdpcm_hdr(SDPCM_RX, header);
/* hw header */
len = get_unaligned_le16(header);
@@ -1160,7 +1184,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
if ((u16)(~(len ^ checksum))) {
brcmf_err("HW header checksum error\n");
bus->sdcnt.rx_badhdr++;
- brcmf_sdbrcm_rxfail(bus, false, false);
+ brcmf_sdio_rxfail(bus, false, false);
return -EIO;
}
if (len < SDPCM_HDRLEN) {
@@ -1192,7 +1216,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
type != BRCMF_SDIO_FT_SUPER) {
brcmf_err("HW header length too long\n");
bus->sdcnt.rx_toolong++;
- brcmf_sdbrcm_rxfail(bus, false, false);
+ brcmf_sdio_rxfail(bus, false, false);
rd->len = 0;
return -EPROTO;
}
@@ -1211,7 +1235,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
brcmf_err("seq %d: bad data offset\n", rx_seq);
bus->sdcnt.rx_badhdr++;
- brcmf_sdbrcm_rxfail(bus, false, false);
+ brcmf_sdio_rxfail(bus, false, false);
rd->len = 0;
return -ENXIO;
}
@@ -1260,25 +1284,34 @@ static inline void brcmf_sdio_update_hwhdr(u8 *header, u16 frm_length)
static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
struct brcmf_sdio_hdrinfo *hd_info)
{
- u32 sw_header;
+ u32 hdrval;
+ u8 hdr_offset;
brcmf_sdio_update_hwhdr(header, hd_info->len);
-
- sw_header = bus->tx_seq;
- sw_header |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) &
- SDPCM_CHANNEL_MASK;
- sw_header |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) &
- SDPCM_DOFFSET_MASK;
- *(((__le32 *)header) + 1) = cpu_to_le32(sw_header);
- *(((__le32 *)header) + 2) = 0;
- trace_brcmf_sdpcm_hdr(true, header);
+ hdr_offset = SDPCM_HWHDR_LEN;
+
+ if (bus->txglom) {
+ hdrval = (hd_info->len - hdr_offset) | (hd_info->lastfrm << 24);
+ *((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval);
+ hdrval = (u16)hd_info->tail_pad << 16;
+ *(((__le32 *)(header + hdr_offset)) + 1) = cpu_to_le32(hdrval);
+ hdr_offset += SDPCM_HWEXT_LEN;
+ }
+
+ hdrval = hd_info->seq_num;
+ hdrval |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) &
+ SDPCM_CHANNEL_MASK;
+ hdrval |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) &
+ SDPCM_DOFFSET_MASK;
+ *((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval);
+ *(((__le32 *)(header + hdr_offset)) + 1) = 0;
+ trace_brcmf_sdpcm_hdr(SDPCM_TX + !!(bus->txglom), header);
}
-static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
+static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
{
u16 dlen, totlen;
u8 *dptr, num = 0;
- u32 align = 0;
u16 sublen;
struct sk_buff *pfirst, *pnext;
@@ -1293,11 +1326,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
brcmf_dbg(SDIO, "start: glomd %p glom %p\n",
bus->glomd, skb_peek(&bus->glom));
- if (bus->sdiodev->pdata)
- align = bus->sdiodev->pdata->sd_sgentry_align;
- if (align < 4)
- align = 4;
-
/* If there's a descriptor, generate the packet chain */
if (bus->glomd) {
pfirst = pnext = NULL;
@@ -1321,9 +1349,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
pnext = NULL;
break;
}
- if (sublen % align) {
+ if (sublen % bus->sgentry_align) {
brcmf_err("sublen %d not multiple of %d\n",
- sublen, align);
+ sublen, bus->sgentry_align);
}
totlen += sublen;
@@ -1336,7 +1364,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
}
/* Allocate/chain packet for next subframe */
- pnext = brcmu_pkt_buf_get_skb(sublen + align);
+ pnext = brcmu_pkt_buf_get_skb(sublen + bus->sgentry_align);
if (pnext == NULL) {
brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
num, sublen);
@@ -1345,7 +1373,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
skb_queue_tail(&bus->glom, pnext);
/* Adhere to start alignment requirements */
- pkt_align(pnext, sublen, align);
+ pkt_align(pnext, sublen, bus->sgentry_align);
}
/* If all allocations succeeded, save packet chain
@@ -1360,7 +1388,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
}
pfirst = pnext = NULL;
} else {
- brcmf_sdbrcm_free_glom(bus);
+ brcmf_sdio_free_glom(bus);
num = 0;
}
@@ -1383,16 +1411,15 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
}
pfirst = skb_peek(&bus->glom);
- dlen = (u16) brcmf_sdbrcm_glom_len(bus);
+ dlen = (u16) brcmf_sdio_glom_len(bus);
/* Do an SDIO read for the superframe. Configurable iovar to
* read directly into the chained packet, or allocate a large
* packet and and copy into the chain.
*/
sdio_claim_host(bus->sdiodev->func[1]);
- errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
- bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, &bus->glom, dlen);
+ errcode = brcmf_sdiod_recv_chain(bus->sdiodev,
+ &bus->glom, dlen);
sdio_release_host(bus->sdiodev->func[1]);
bus->sdcnt.f2rxdata++;
@@ -1403,12 +1430,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
sdio_claim_host(bus->sdiodev->func[1]);
if (bus->glomerr++ < 3) {
- brcmf_sdbrcm_rxfail(bus, true, true);
+ brcmf_sdio_rxfail(bus, true, true);
} else {
bus->glomerr = 0;
- brcmf_sdbrcm_rxfail(bus, true, false);
+ brcmf_sdio_rxfail(bus, true, false);
bus->sdcnt.rxglomfail++;
- brcmf_sdbrcm_free_glom(bus);
+ brcmf_sdio_free_glom(bus);
}
sdio_release_host(bus->sdiodev->func[1]);
return 0;
@@ -1456,12 +1483,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (bus->glomerr++ < 3) {
/* Restore superframe header space */
skb_push(pfirst, sfdoff);
- brcmf_sdbrcm_rxfail(bus, true, true);
+ brcmf_sdio_rxfail(bus, true, true);
} else {
bus->glomerr = 0;
- brcmf_sdbrcm_rxfail(bus, true, false);
+ brcmf_sdio_rxfail(bus, true, false);
bus->sdcnt.rxglomfail++;
- brcmf_sdbrcm_free_glom(bus);
+ brcmf_sdio_free_glom(bus);
}
sdio_release_host(bus->sdiodev->func[1]);
bus->cur_read.len = 0;
@@ -1505,8 +1532,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
return num;
}
-static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
- bool *pending)
+static int brcmf_sdio_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
+ bool *pending)
{
DECLARE_WAITQUEUE(wait, current);
int timeout = msecs_to_jiffies(DCMD_RESP_TIMEOUT);
@@ -1527,7 +1554,7 @@ static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
return timeout;
}
-static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
+static int brcmf_sdio_dcmd_resp_wake(struct brcmf_sdio *bus)
{
if (waitqueue_active(&bus->dcmd_resp_wait))
wake_up_interruptible(&bus->dcmd_resp_wait);
@@ -1535,7 +1562,7 @@ static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
return 0;
}
static void
-brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
+brcmf_sdio_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
{
uint rdlen, pad;
u8 *buf = NULL, *rbuf;
@@ -1549,9 +1576,9 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
goto done;
rbuf = bus->rxbuf;
- pad = ((unsigned long)rbuf % BRCMF_SDALIGN);
+ pad = ((unsigned long)rbuf % bus->head_align);
if (pad)
- rbuf += (BRCMF_SDALIGN - pad);
+ rbuf += (bus->head_align - pad);
/* Copy the already-read portion over */
memcpy(buf, hdr, BRCMF_FIRSTREAD);
@@ -1565,19 +1592,15 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
((len + pad) < bus->sdiodev->bus_if->maxctl))
rdlen += pad;
- } else if (rdlen % BRCMF_SDALIGN) {
- rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
+ } else if (rdlen % bus->head_align) {
+ rdlen += bus->head_align - (rdlen % bus->head_align);
}
- /* Satisfy length-alignment requirements */
- if (rdlen & (ALIGNMENT - 1))
- rdlen = roundup(rdlen, ALIGNMENT);
-
/* Drop if the read is too big or it exceeds our maximum */
if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
rdlen, bus->sdiodev->bus_if->maxctl);
- brcmf_sdbrcm_rxfail(bus, false, false);
+ brcmf_sdio_rxfail(bus, false, false);
goto done;
}
@@ -1585,15 +1608,12 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
len, len - doff, bus->sdiodev->bus_if->maxctl);
bus->sdcnt.rx_toolong++;
- brcmf_sdbrcm_rxfail(bus, false, false);
+ brcmf_sdio_rxfail(bus, false, false);
goto done;
}
/* Read remain of frame body */
- sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
- bus->sdiodev->sbwad,
- SDIO_FUNC_2,
- F2SYNC, rbuf, rdlen);
+ sdret = brcmf_sdiod_recv_buf(bus->sdiodev, rbuf, rdlen);
bus->sdcnt.f2rxdata++;
/* Control frame failures need retransmission */
@@ -1601,7 +1621,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
brcmf_err("read %d control bytes failed: %d\n",
rdlen, sdret);
bus->sdcnt.rxc_errors++;
- brcmf_sdbrcm_rxfail(bus, true, true);
+ brcmf_sdio_rxfail(bus, true, true);
goto done;
} else
memcpy(buf + BRCMF_FIRSTREAD, rbuf, rdlen);
@@ -1626,19 +1646,19 @@ gotpkt:
done:
/* Awake any waiters */
- brcmf_sdbrcm_dcmd_resp_wake(bus);
+ brcmf_sdio_dcmd_resp_wake(bus);
}
/* Pad read to blocksize for efficiency */
-static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
+static void brcmf_sdio_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
{
if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
*pad = bus->blocksize - (*rdlen % bus->blocksize);
if (*pad <= bus->roundup && *pad < bus->blocksize &&
*rdlen + *pad + BRCMF_FIRSTREAD < MAX_RX_DATASZ)
*rdlen += *pad;
- } else if (*rdlen % BRCMF_SDALIGN) {
- *rdlen += BRCMF_SDALIGN - (*rdlen % BRCMF_SDALIGN);
+ } else if (*rdlen % bus->head_align) {
+ *rdlen += bus->head_align - (*rdlen % bus->head_align);
}
}
@@ -1658,8 +1678,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
bus->rxpending = true;
for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
- !bus->rxskip && rxleft &&
- bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN;
+ !bus->rxskip && rxleft && brcmf_bus_ready(bus->sdiodev->bus_if);
rd->seq_num++, rxleft--) {
/* Handle glomming separately */
@@ -1667,7 +1686,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
u8 cnt;
brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
bus->glomd, skb_peek(&bus->glom));
- cnt = brcmf_sdbrcm_rxglom(bus, rd->seq_num);
+ cnt = brcmf_sdio_rxglom(bus, rd->seq_num);
brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
rd->seq_num += cnt - 1;
rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
@@ -1678,17 +1697,14 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
/* read header first for unknow frame length */
sdio_claim_host(bus->sdiodev->func[1]);
if (!rd->len) {
- ret = brcmf_sdcard_recv_buf(bus->sdiodev,
- bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC,
- bus->rxhdr,
- BRCMF_FIRSTREAD);
+ ret = brcmf_sdiod_recv_buf(bus->sdiodev,
+ bus->rxhdr, BRCMF_FIRSTREAD);
bus->sdcnt.f2rxhdrs++;
if (ret < 0) {
brcmf_err("RXHEADER FAILED: %d\n",
ret);
bus->sdcnt.rx_hdrfail++;
- brcmf_sdbrcm_rxfail(bus, true, true);
+ brcmf_sdio_rxfail(bus, true, true);
sdio_release_host(bus->sdiodev->func[1]);
continue;
}
@@ -1707,9 +1723,9 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
}
if (rd->channel == SDPCM_CONTROL_CHANNEL) {
- brcmf_sdbrcm_read_control(bus, bus->rxhdr,
- rd->len,
- rd->dat_offset);
+ brcmf_sdio_read_control(bus, bus->rxhdr,
+ rd->len,
+ rd->dat_offset);
/* prepare the descriptor for the next read */
rd->len = rd->len_nxtfrm << 4;
rd->len_nxtfrm = 0;
@@ -1723,23 +1739,22 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
head_read = BRCMF_FIRSTREAD;
}
- brcmf_pad(bus, &pad, &rd->len_left);
+ brcmf_sdio_pad(bus, &pad, &rd->len_left);
pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
- BRCMF_SDALIGN);
+ bus->head_align);
if (!pkt) {
/* Give up on data, request rtx of events */
brcmf_err("brcmu_pkt_buf_get_skb failed\n");
- brcmf_sdbrcm_rxfail(bus, false,
+ brcmf_sdio_rxfail(bus, false,
RETRYCHAN(rd->channel));
sdio_release_host(bus->sdiodev->func[1]);
continue;
}
skb_pull(pkt, head_read);
- pkt_align(pkt, rd->len_left, BRCMF_SDALIGN);
+ pkt_align(pkt, rd->len_left, bus->head_align);
- ret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, pkt);
+ ret = brcmf_sdiod_recv_pkt(bus->sdiodev, pkt);
bus->sdcnt.f2rxdata++;
sdio_release_host(bus->sdiodev->func[1]);
@@ -1748,7 +1763,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
rd->len, rd->channel, ret);
brcmu_pkt_buf_free_skb(pkt);
sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdbrcm_rxfail(bus, true,
+ brcmf_sdio_rxfail(bus, true,
RETRYCHAN(rd->channel));
sdio_release_host(bus->sdiodev->func[1]);
continue;
@@ -1773,7 +1788,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
rd->len,
roundup(rd_new.len, 16) >> 4);
rd->len = 0;
- brcmf_sdbrcm_rxfail(bus, true, true);
+ brcmf_sdio_rxfail(bus, true, true);
sdio_release_host(bus->sdiodev->func[1]);
brcmu_pkt_buf_free_skb(pkt);
continue;
@@ -1795,7 +1810,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
/* Force retry w/normal header read */
rd->len = 0;
sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdbrcm_rxfail(bus, false, true);
+ brcmf_sdio_rxfail(bus, false, true);
sdio_release_host(bus->sdiodev->func[1]);
brcmu_pkt_buf_free_skb(pkt);
continue;
@@ -1820,7 +1835,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
brcmf_err("%s: glom superframe w/o "
"descriptor!\n", __func__);
sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdbrcm_rxfail(bus, false, false);
+ brcmf_sdio_rxfail(bus, false, false);
sdio_release_host(bus->sdiodev->func[1]);
}
/* prepare the descriptor for the next read */
@@ -1864,13 +1879,36 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
}
static void
-brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
+brcmf_sdio_wait_event_wakeup(struct brcmf_sdio *bus)
{
if (waitqueue_active(&bus->ctrl_wait))
wake_up_interruptible(&bus->ctrl_wait);
return;
}
+static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
+{
+ u16 head_pad;
+ u8 *dat_buf;
+
+ dat_buf = (u8 *)(pkt->data);
+
+ /* Check head padding */
+ head_pad = ((unsigned long)dat_buf % bus->head_align);
+ if (head_pad) {
+ if (skb_headroom(pkt) < head_pad) {
+ bus->sdiodev->bus_if->tx_realloc++;
+ head_pad = 0;
+ if (skb_cow(pkt, head_pad))
+ return -ENOMEM;
+ }
+ skb_push(pkt, head_pad);
+ dat_buf = (u8 *)(pkt->data);
+ memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
+ }
+ return head_pad;
+}
+
/**
* struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
* bus layer usage.
@@ -1880,32 +1918,40 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
/* bit mask of data length chopped from the previous packet */
#define ALIGN_SKB_CHOP_LEN_MASK 0x7fff
-static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio_dev *sdiodev,
+static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
struct sk_buff_head *pktq,
- struct sk_buff *pkt, uint chan)
+ struct sk_buff *pkt, u16 total_len)
{
+ struct brcmf_sdio_dev *sdiodev;
struct sk_buff *pkt_pad;
- u16 tail_pad, tail_chop, sg_align;
+ u16 tail_pad, tail_chop, chain_pad;
unsigned int blksize;
- u8 *dat_buf;
- int ntail;
+ bool lastfrm;
+ int ntail, ret;
+ sdiodev = bus->sdiodev;
blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
- sg_align = 4;
- if (sdiodev->pdata && sdiodev->pdata->sd_sgentry_align > 4)
- sg_align = sdiodev->pdata->sd_sgentry_align;
/* sg entry alignment should be a divisor of block size */
- WARN_ON(blksize % sg_align);
+ WARN_ON(blksize % bus->sgentry_align);
/* Check tail padding */
- pkt_pad = NULL;
- tail_chop = pkt->len % sg_align;
- tail_pad = sg_align - tail_chop;
- tail_pad += blksize - (pkt->len + tail_pad) % blksize;
+ lastfrm = skb_queue_is_last(pktq, pkt);
+ tail_pad = 0;
+ tail_chop = pkt->len % bus->sgentry_align;
+ if (tail_chop)
+ tail_pad = bus->sgentry_align - tail_chop;
+ chain_pad = (total_len + tail_pad) % blksize;
+ if (lastfrm && chain_pad)
+ tail_pad += blksize - chain_pad;
if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
- pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
+ pkt_pad = bus->txglom_sgpad;
+ if (pkt_pad == NULL)
+ brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
if (pkt_pad == NULL)
return -ENOMEM;
+ ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad);
+ if (unlikely(ret < 0))
+ return ret;
memcpy(pkt_pad->data,
pkt->data + pkt->len - tail_chop,
tail_chop);
@@ -1920,14 +1966,10 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio_dev *sdiodev,
return -ENOMEM;
if (skb_linearize(pkt))
return -ENOMEM;
- dat_buf = (u8 *)(pkt->data);
__skb_put(pkt, tail_pad);
}
- if (pkt_pad)
- return pkt->len + tail_chop;
- else
- return pkt->len - tail_pad;
+ return tail_pad;
}
/**
@@ -1946,58 +1988,66 @@ static int
brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
uint chan)
{
- u16 head_pad, head_align;
+ u16 head_pad, total_len;
struct sk_buff *pkt_next;
- u8 *dat_buf;
- int err;
+ u8 txseq;
+ int ret;
struct brcmf_sdio_hdrinfo hd_info = {0};
- /* SDIO ADMA requires at least 32 bit alignment */
- head_align = 4;
- if (bus->sdiodev->pdata && bus->sdiodev->pdata->sd_head_align > 4)
- head_align = bus->sdiodev->pdata->sd_head_align;
+ txseq = bus->tx_seq;
+ total_len = 0;
+ skb_queue_walk(pktq, pkt_next) {
+ /* alignment packet inserted in previous
+ * loop cycle can be skipped as it is
+ * already properly aligned and does not
+ * need an sdpcm header.
+ */
+ if (*(u32 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
+ continue;
- pkt_next = pktq->next;
- dat_buf = (u8 *)(pkt_next->data);
+ /* align packet data pointer */
+ ret = brcmf_sdio_txpkt_hdalign(bus, pkt_next);
+ if (ret < 0)
+ return ret;
+ head_pad = (u16)ret;
+ if (head_pad)
+ memset(pkt_next->data, 0, head_pad + bus->tx_hdrlen);
- /* Check head padding */
- head_pad = ((unsigned long)dat_buf % head_align);
- if (head_pad) {
- if (skb_headroom(pkt_next) < head_pad) {
- bus->sdiodev->bus_if->tx_realloc++;
- head_pad = 0;
- if (skb_cow(pkt_next, head_pad))
- return -ENOMEM;
- }
- skb_push(pkt_next, head_pad);
- dat_buf = (u8 *)(pkt_next->data);
- memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
- }
+ total_len += pkt_next->len;
- if (bus->sdiodev->sg_support && pktq->qlen > 1) {
- err = brcmf_sdio_txpkt_prep_sg(bus->sdiodev, pktq,
- pkt_next, chan);
- if (err < 0)
- return err;
- hd_info.len = (u16)err;
- } else {
hd_info.len = pkt_next->len;
- }
-
- hd_info.channel = chan;
- hd_info.dat_offset = head_pad + bus->tx_hdrlen;
-
- /* Now fill the header */
- brcmf_sdio_hdpack(bus, dat_buf, &hd_info);
-
- if (BRCMF_BYTES_ON() &&
- ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
- (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
- brcmf_dbg_hex_dump(true, pkt_next, hd_info.len, "Tx Frame:\n");
- else if (BRCMF_HDRS_ON())
- brcmf_dbg_hex_dump(true, pkt_next, head_pad + bus->tx_hdrlen,
- "Tx Header:\n");
+ hd_info.lastfrm = skb_queue_is_last(pktq, pkt_next);
+ if (bus->txglom && pktq->qlen > 1) {
+ ret = brcmf_sdio_txpkt_prep_sg(bus, pktq,
+ pkt_next, total_len);
+ if (ret < 0)
+ return ret;
+ hd_info.tail_pad = (u16)ret;
+ total_len += (u16)ret;
+ }
+ hd_info.channel = chan;
+ hd_info.dat_offset = head_pad + bus->tx_hdrlen;
+ hd_info.seq_num = txseq++;
+
+ /* Now fill the header */
+ brcmf_sdio_hdpack(bus, pkt_next->data, &hd_info);
+
+ if (BRCMF_BYTES_ON() &&
+ ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
+ (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
+ brcmf_dbg_hex_dump(true, pkt_next, hd_info.len,
+ "Tx Frame:\n");
+ else if (BRCMF_HDRS_ON())
+ brcmf_dbg_hex_dump(true, pkt_next,
+ head_pad + bus->tx_hdrlen,
+ "Tx Header:\n");
+ }
+ /* Hardware length tag of the first packet should be total
+ * length of the chain (including padding)
+ */
+ if (bus->txglom)
+ brcmf_sdio_update_hwhdr(pktq->next->data, total_len);
return 0;
}
@@ -2015,6 +2065,7 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
{
u8 *hdr;
u32 dat_offset;
+ u16 tail_pad;
u32 dummy_flags, chop_len;
struct sk_buff *pkt_next, *tmp, *pkt_prev;
@@ -2024,42 +2075,41 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
if (chop_len) {
pkt_prev = pkt_next->prev;
- memcpy(pkt_prev->data + pkt_prev->len,
- pkt_next->data, chop_len);
skb_put(pkt_prev, chop_len);
}
__skb_unlink(pkt_next, pktq);
brcmu_pkt_buf_free_skb(pkt_next);
} else {
- hdr = pkt_next->data + SDPCM_HWHDR_LEN;
+ hdr = pkt_next->data + bus->tx_hdrlen - SDPCM_SWHDR_LEN;
dat_offset = le32_to_cpu(*(__le32 *)hdr);
dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >>
SDPCM_DOFFSET_SHIFT;
skb_pull(pkt_next, dat_offset);
+ if (bus->txglom) {
+ tail_pad = le16_to_cpu(*(__le16 *)(hdr - 2));
+ skb_trim(pkt_next, pkt_next->len - tail_pad);
+ }
}
}
}
/* Writes a HW/SW header into the packet and sends it. */
/* Assumes: (a) header space already there, (b) caller holds lock */
-static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
- uint chan)
+static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
+ uint chan)
{
int ret;
int i;
- struct sk_buff_head localq;
+ struct sk_buff *pkt_next, *tmp;
brcmf_dbg(TRACE, "Enter\n");
- __skb_queue_head_init(&localq);
- __skb_queue_tail(&localq, pkt);
- ret = brcmf_sdio_txpkt_prep(bus, &localq, chan);
+ ret = brcmf_sdio_txpkt_prep(bus, pktq, chan);
if (ret)
goto done;
sdio_claim_host(bus->sdiodev->func[1]);
- ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, &localq);
+ ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
bus->sdcnt.f2txdata++;
if (ret < 0) {
@@ -2068,57 +2118,71 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
ret);
bus->sdcnt.tx_sderrs++;
- brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
- SFC_WF_TERM, NULL);
+ brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
bus->sdcnt.f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
- hi = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCHI, NULL);
- lo = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ hi = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
}
-
}
sdio_release_host(bus->sdiodev->func[1]);
- if (ret == 0)
- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
done:
- brcmf_sdio_txpkt_postp(bus, &localq);
- __skb_dequeue_tail(&localq);
- brcmf_txcomplete(bus->sdiodev->dev, pkt, ret == 0);
+ brcmf_sdio_txpkt_postp(bus, pktq);
+ if (ret == 0)
+ bus->tx_seq = (bus->tx_seq + pktq->qlen) % SDPCM_SEQ_WRAP;
+ skb_queue_walk_safe(pktq, pkt_next, tmp) {
+ __skb_unlink(pkt_next, pktq);
+ brcmf_txcomplete(bus->sdiodev->dev, pkt_next, ret == 0);
+ }
return ret;
}
-static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
+static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt;
+ struct sk_buff_head pktq;
u32 intstatus = 0;
- int ret = 0, prec_out;
+ int ret = 0, prec_out, i;
uint cnt = 0;
- u8 tx_prec_map;
+ u8 tx_prec_map, pkt_num;
brcmf_dbg(TRACE, "Enter\n");
tx_prec_map = ~bus->flowcontrol;
/* Send frames until the limit or some other event */
- for (cnt = 0; (cnt < maxframes) && data_ok(bus); cnt++) {
+ for (cnt = 0; (cnt < maxframes) && data_ok(bus);) {
+ pkt_num = 1;
+ __skb_queue_head_init(&pktq);
+ if (bus->txglom)
+ pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
+ brcmf_sdio_txglomsz);
+ pkt_num = min_t(u32, pkt_num,
+ brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol));
spin_lock_bh(&bus->txqlock);
- pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map, &prec_out);
- if (pkt == NULL) {
- spin_unlock_bh(&bus->txqlock);
- break;
+ for (i = 0; i < pkt_num; i++) {
+ pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map,
+ &prec_out);
+ if (pkt == NULL)
+ break;
+ __skb_queue_tail(&pktq, pkt);
}
spin_unlock_bh(&bus->txqlock);
+ if (i == 0)
+ break;
- ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL);
+ ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
+ cnt += i;
/* In poll mode, need to check for other events */
if (!bus->intr && cnt) {
@@ -2146,7 +2210,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
return cnt;
}
-static void brcmf_sdbrcm_bus_stop(struct device *dev)
+static void brcmf_sdio_bus_stop(struct device *dev)
{
u32 local_hostintmask;
u8 saveclk;
@@ -2163,62 +2227,57 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
bus->watchdog_tsk = NULL;
}
- sdio_claim_host(bus->sdiodev->func[1]);
-
- /* Enable clock for device interrupts */
- brcmf_sdbrcm_bus_sleep(bus, false, false);
+ if (bus_if->state == BRCMF_BUS_DOWN) {
+ sdio_claim_host(sdiodev->func[1]);
+
+ /* Enable clock for device interrupts */
+ brcmf_sdio_bus_sleep(bus, false, false);
+
+ /* Disable and clear interrupts at the chip level also */
+ w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
+ local_hostintmask = bus->hostintmask;
+ bus->hostintmask = 0;
+
+ /* Force backplane clocks to assure F2 interrupt propagates */
+ saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ &err);
+ if (!err)
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ if (err)
+ brcmf_err("Failed to force clock for F2: err %d\n",
+ err);
- /* Disable and clear interrupts at the chip level also */
- w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
- local_hostintmask = bus->hostintmask;
- bus->hostintmask = 0;
+ /* Turn off the bus (F2), free any pending packets */
+ brcmf_dbg(INTR, "disable SDIO interrupts\n");
+ sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
- /* Change our idea of bus state */
- bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+ /* Clear any pending interrupts now that F2 is disabled */
+ w_sdreg32(bus, local_hostintmask,
+ offsetof(struct sdpcmd_regs, intstatus));
- /* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
- if (!err) {
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
+ sdio_release_host(sdiodev->func[1]);
}
- if (err)
- brcmf_err("Failed to force clock for F2: err %d\n", err);
-
- /* Turn off the bus (F2), free any pending packets */
- brcmf_dbg(INTR, "disable SDIO interrupts\n");
- brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, SDIO_FUNC_ENABLE_1,
- NULL);
-
- /* Clear any pending interrupts now that F2 is disabled */
- w_sdreg32(bus, local_hostintmask,
- offsetof(struct sdpcmd_regs, intstatus));
-
- /* Turn off the backplane clock (only) */
- brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
- sdio_release_host(bus->sdiodev->func[1]);
-
/* Clear the data packet queues */
brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
/* Clear any held glomming stuff */
if (bus->glomd)
brcmu_pkt_buf_free_skb(bus->glomd);
- brcmf_sdbrcm_free_glom(bus);
+ brcmf_sdio_free_glom(bus);
/* Clear rx control and wake any waiters */
spin_lock_bh(&bus->rxctl_lock);
bus->rxlen = 0;
spin_unlock_bh(&bus->rxctl_lock);
- brcmf_sdbrcm_dcmd_resp_wake(bus);
+ brcmf_sdio_dcmd_resp_wake(bus);
/* Reset some F2 state stuff */
bus->rxskip = false;
bus->tx_seq = bus->rx_seq = 0;
}
-static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
+static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
{
unsigned long flags;
@@ -2243,7 +2302,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
addr = bus->ci->c_inf[idx].base +
offsetof(struct sdpcmd_regs, intstatus);
- ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, false);
+ val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
bus->sdcnt.f1regdata++;
if (ret != 0)
val = 0;
@@ -2253,7 +2312,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
/* Clear interrupts */
if (val) {
- ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, true);
+ brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
bus->sdcnt.f1regdata++;
}
@@ -2267,7 +2326,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
return ret;
}
-static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
+static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
{
u32 newstatus = 0;
unsigned long intstatus;
@@ -2286,48 +2345,29 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
#ifdef DEBUG
/* Check for inconsistent device control */
- devctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_DEVICE_CTL, &err);
- if (err) {
- brcmf_err("error reading DEVCTL: %d\n", err);
- bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
- }
+ devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
#endif /* DEBUG */
/* Read CSR, if clock on switch to AVAIL, else ignore */
- clkctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
- if (err) {
- brcmf_err("error reading CSR: %d\n",
- err);
- bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
- }
+ clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
devctl, clkctl);
if (SBSDIO_HTAV(clkctl)) {
- devctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_DEVICE_CTL, &err);
- if (err) {
- brcmf_err("error reading DEVCTL: %d\n",
- err);
- bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
- }
+ devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
- devctl, &err);
- if (err) {
- brcmf_err("error writing DEVCTL: %d\n",
- err);
- bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
- }
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
bus->clkstate = CLK_AVAIL;
}
}
/* Make sure backplane clock is on */
- brcmf_sdbrcm_bus_sleep(bus, false, true);
+ brcmf_sdio_bus_sleep(bus, false, true);
/* Pending interrupt indicates new device status */
if (atomic_read(&bus->ipend) > 0) {
@@ -2358,7 +2398,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
/* Handle host mailbox indication */
if (intstatus & I_HMB_HOST_INT) {
intstatus &= ~I_HMB_HOST_INT;
- intstatus |= brcmf_sdbrcm_hostmail(bus);
+ intstatus |= brcmf_sdio_hostmail(bus);
}
sdio_release_host(bus->sdiodev->func[1]);
@@ -2403,16 +2443,15 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
set_bit(n, (unsigned long *)&bus->intstatus.counter);
}
- brcmf_sdbrcm_clrintr(bus);
+ brcmf_sdio_clrintr(bus);
if (data_ok(bus) && bus->ctrl_frame_stat &&
(bus->clkstate == CLK_AVAIL)) {
int i;
sdio_claim_host(bus->sdiodev->func[1]);
- err = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
- (u32) bus->ctrl_frame_len);
+ err = brcmf_sdiod_send_buf(bus->sdiodev, bus->ctrl_frame_buf,
+ (u32)bus->ctrl_frame_len);
if (err < 0) {
/* On failure, abort the command and
@@ -2421,20 +2460,20 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
err);
bus->sdcnt.tx_sderrs++;
- brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
+ brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
- SFC_WF_TERM, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, &err);
bus->sdcnt.f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
- hi = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCHI,
- &err);
- lo = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCLO,
- &err);
+ hi = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCHI,
+ &err);
+ lo = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCLO,
+ &err);
bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
@@ -2445,7 +2484,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
}
sdio_release_host(bus->sdiodev->func[1]);
bus->ctrl_frame_stat = false;
- brcmf_sdbrcm_wait_event_wakeup(bus);
+ brcmf_sdio_wait_event_wakeup(bus);
}
/* Send queued frames (limit 1 if rx may still be pending) */
else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
@@ -2453,13 +2492,12 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
&& data_ok(bus)) {
framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
txlimit;
- framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt);
+ framecnt = brcmf_sdio_sendfromq(bus, framecnt);
txlimit -= framecnt;
}
- if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) {
+ if (!brcmf_bus_ready(bus->sdiodev->bus_if) || (err != 0)) {
brcmf_err("failed backplane access over SDIO, halting operation\n");
- bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
atomic_set(&bus->intstatus, 0);
} else if (atomic_read(&bus->intstatus) ||
atomic_read(&bus->ipend) > 0 ||
@@ -2475,12 +2513,12 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
bus->activity = false;
brcmf_dbg(SDIO, "idle state\n");
sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdbrcm_bus_sleep(bus, true, false);
+ brcmf_sdio_bus_sleep(bus, true, false);
sdio_release_host(bus->sdiodev->func[1]);
}
}
-static struct pktq *brcmf_sdbrcm_bus_gettxq(struct device *dev)
+static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
@@ -2489,7 +2527,7 @@ static struct pktq *brcmf_sdbrcm_bus_gettxq(struct device *dev)
return &bus->txq;
}
-static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
+static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
{
int ret = -EBADE;
uint datalen, prec;
@@ -2545,7 +2583,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
#ifdef DEBUG
#define CONSOLE_LINE_MAX 192
-static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
+static int brcmf_sdio_readconsole(struct brcmf_sdio *bus)
{
struct brcmf_console *c = &bus->console;
u8 line[CONSOLE_LINE_MAX], ch;
@@ -2558,8 +2596,8 @@ static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
/* Read console log struct */
addr = bus->console_addr + offsetof(struct rte_console, log_le);
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le,
- sizeof(c->log_le));
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le,
+ sizeof(c->log_le));
if (rv < 0)
return rv;
@@ -2584,7 +2622,7 @@ static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
/* Read the console buffer */
addr = le32_to_cpu(c->log_le.buf);
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize);
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize);
if (rv < 0)
return rv;
@@ -2622,14 +2660,13 @@ break2:
}
#endif /* DEBUG */
-static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
+static int brcmf_sdio_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
{
int i;
int ret;
bus->ctrl_frame_stat = false;
- ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, frame, len);
+ ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
if (ret < 0) {
/* On failure, abort the command and terminate the frame */
@@ -2637,18 +2674,18 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
ret);
bus->sdcnt.tx_sderrs++;
- brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
+ brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
- SFC_WF_TERM, NULL);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
bus->sdcnt.f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
- hi = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCHI, NULL);
- lo = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ hi = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
bus->sdcnt.f1regdata += 2;
if (hi == 0 && lo == 0)
break;
@@ -2662,10 +2699,10 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
}
static int
-brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
+brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
{
u8 *frame;
- u16 len;
+ u16 len, pad;
uint retries = 0;
u8 doff = 0;
int ret = -1;
@@ -2681,41 +2718,45 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
len = (msglen += bus->tx_hdrlen);
/* Add alignment padding (optional for ctl frames) */
- doff = ((unsigned long)frame % BRCMF_SDALIGN);
+ doff = ((unsigned long)frame % bus->head_align);
if (doff) {
frame -= doff;
len += doff;
msglen += doff;
memset(frame, 0, doff + bus->tx_hdrlen);
}
- /* precondition: doff < BRCMF_SDALIGN */
+ /* precondition: doff < bus->head_align */
doff += bus->tx_hdrlen;
/* Round send length to next SDIO block */
+ pad = 0;
if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
- u16 pad = bus->blocksize - (len % bus->blocksize);
- if ((pad <= bus->roundup) && (pad < bus->blocksize))
- len += pad;
- } else if (len % BRCMF_SDALIGN) {
- len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
+ pad = bus->blocksize - (len % bus->blocksize);
+ if ((pad > bus->roundup) || (pad >= bus->blocksize))
+ pad = 0;
+ } else if (len % bus->head_align) {
+ pad = bus->head_align - (len % bus->head_align);
}
-
- /* Satisfy length-alignment requirements */
- if (len & (ALIGNMENT - 1))
- len = roundup(len, ALIGNMENT);
+ len += pad;
/* precondition: IS_ALIGNED((unsigned long)frame, 2) */
/* Make sure backplane clock is on */
sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdbrcm_bus_sleep(bus, false, false);
+ brcmf_sdio_bus_sleep(bus, false, false);
sdio_release_host(bus->sdiodev->func[1]);
hd_info.len = (u16)msglen;
hd_info.channel = SDPCM_CONTROL_CHANNEL;
hd_info.dat_offset = doff;
+ hd_info.seq_num = bus->tx_seq;
+ hd_info.lastfrm = true;
+ hd_info.tail_pad = pad;
brcmf_sdio_hdpack(bus, frame, &hd_info);
+ if (bus->txglom)
+ brcmf_sdio_update_hwhdr(frame, len);
+
if (!data_ok(bus)) {
brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
bus->tx_max, bus->tx_seq);
@@ -2746,7 +2787,7 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
do {
sdio_claim_host(bus->sdiodev->func[1]);
- ret = brcmf_tx_frame(bus, frame, len);
+ ret = brcmf_sdio_tx_frame(bus, frame, len);
sdio_release_host(bus->sdiodev->func[1]);
} while (ret < 0 && retries++ < TXRETRIES);
}
@@ -2756,7 +2797,7 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
bus->activity = false;
sdio_claim_host(bus->sdiodev->func[1]);
brcmf_dbg(INFO, "idle\n");
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
+ brcmf_sdio_clkctl(bus, CLK_NONE, true);
sdio_release_host(bus->sdiodev->func[1]);
}
@@ -2790,8 +2831,8 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
* address of sdpcm_shared structure
*/
sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdbrcm_bus_sleep(bus, false, false);
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
+ brcmf_sdio_bus_sleep(bus, false, false);
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
sdio_release_host(bus->sdiodev->func[1]);
if (rv < 0)
return rv;
@@ -2811,8 +2852,8 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
}
/* Read hndrte_shared structure */
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
- sizeof(struct sdpcm_shared_le));
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
+ sizeof(struct sdpcm_shared_le));
if (rv < 0)
return rv;
@@ -2848,22 +2889,22 @@ static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
/* obtain console information from device memory */
addr = sh->console_addr + offsetof(struct rte_console, log_le);
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
- (u8 *)&sh_val, sizeof(u32));
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
+ (u8 *)&sh_val, sizeof(u32));
if (rv < 0)
return rv;
console_ptr = le32_to_cpu(sh_val);
addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
- (u8 *)&sh_val, sizeof(u32));
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
+ (u8 *)&sh_val, sizeof(u32));
if (rv < 0)
return rv;
console_size = le32_to_cpu(sh_val);
addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
- (u8 *)&sh_val, sizeof(u32));
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
+ (u8 *)&sh_val, sizeof(u32));
if (rv < 0)
return rv;
console_index = le32_to_cpu(sh_val);
@@ -2877,8 +2918,8 @@ static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
/* obtain the console data from device */
conbuf[console_size] = '\0';
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf,
- console_size);
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf,
+ console_size);
if (rv < 0)
goto done;
@@ -2915,8 +2956,8 @@ static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
return 0;
}
- error = brcmf_sdio_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr,
- sizeof(struct brcmf_trap_info));
+ error = brcmf_sdiod_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr,
+ sizeof(struct brcmf_trap_info));
if (error < 0)
return error;
@@ -2959,14 +3000,14 @@ static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
sdio_claim_host(bus->sdiodev->func[1]);
if (sh->assert_file_addr != 0) {
- error = brcmf_sdio_ramrw(bus->sdiodev, false,
- sh->assert_file_addr, (u8 *)file, 80);
+ error = brcmf_sdiod_ramrw(bus->sdiodev, false,
+ sh->assert_file_addr, (u8 *)file, 80);
if (error < 0)
return error;
}
if (sh->assert_exp_addr != 0) {
- error = brcmf_sdio_ramrw(bus->sdiodev, false,
- sh->assert_exp_addr, (u8 *)expr, 80);
+ error = brcmf_sdiod_ramrw(bus->sdiodev, false,
+ sh->assert_exp_addr, (u8 *)expr, 80);
if (error < 0)
return error;
}
@@ -2978,7 +3019,7 @@ static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
return simple_read_from_buffer(data, count, &pos, buf, res);
}
-static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
+static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
{
int error;
struct sdpcm_shared sh;
@@ -2999,8 +3040,8 @@ static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
return 0;
}
-static int brcmf_sdbrcm_died_dump(struct brcmf_sdio *bus, char __user *data,
- size_t count, loff_t *ppos)
+static int brcmf_sdio_died_dump(struct brcmf_sdio *bus, char __user *data,
+ size_t count, loff_t *ppos)
{
int error = 0;
struct sdpcm_shared sh;
@@ -3041,7 +3082,7 @@ static ssize_t brcmf_sdio_forensic_read(struct file *f, char __user *data,
struct brcmf_sdio *bus = f->private_data;
int res;
- res = brcmf_sdbrcm_died_dump(bus, data, count, ppos);
+ res = brcmf_sdio_died_dump(bus, data, count, ppos);
if (res > 0)
*ppos += res;
return (ssize_t)res;
@@ -3066,7 +3107,7 @@ static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
}
#else
-static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
+static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
{
return 0;
}
@@ -3077,7 +3118,7 @@ static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
#endif /* DEBUG */
static int
-brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
+brcmf_sdio_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
{
int timeleft;
uint rxlen = 0;
@@ -3090,7 +3131,7 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
brcmf_dbg(TRACE, "Enter\n");
/* Wait until control frame is available */
- timeleft = brcmf_sdbrcm_dcmd_resp_wait(bus, &bus->rxlen, &pending);
+ timeleft = brcmf_sdio_dcmd_resp_wait(bus, &bus->rxlen, &pending);
spin_lock_bh(&bus->rxctl_lock);
rxlen = bus->rxlen;
@@ -3107,13 +3148,13 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
rxlen, msglen);
} else if (timeleft == 0) {
brcmf_err("resumed on timeout\n");
- brcmf_sdbrcm_checkdied(bus);
+ brcmf_sdio_checkdied(bus);
} else if (pending) {
brcmf_dbg(CTL, "cancelled\n");
return -ERESTARTSYS;
} else {
brcmf_dbg(CTL, "resumed for unknown reason?\n");
- brcmf_sdbrcm_checkdied(bus);
+ brcmf_sdio_checkdied(bus);
}
if (rxlen)
@@ -3124,46 +3165,69 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
return rxlen ? (int)rxlen : -ETIMEDOUT;
}
-static bool brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
+#ifdef DEBUG
+static bool
+brcmf_sdio_verifymemory(struct brcmf_sdio_dev *sdiodev, u32 ram_addr,
+ u8 *ram_data, uint ram_sz)
{
- struct chip_info *ci = bus->ci;
-
- /* To enter download state, disable ARM and reset SOCRAM.
- * To exit download state, simply reset ARM (default is RAM boot).
- */
- if (enter) {
- bus->alp_only = true;
+ char *ram_cmp;
+ int err;
+ bool ret = true;
+ int address;
+ int offset;
+ int len;
- brcmf_sdio_chip_enter_download(bus->sdiodev, ci);
- } else {
- if (!brcmf_sdio_chip_exit_download(bus->sdiodev, ci, bus->vars,
- bus->varsz))
- return false;
+ /* read back and verify */
+ brcmf_dbg(INFO, "Compare RAM dl & ul at 0x%08x; size=%d\n", ram_addr,
+ ram_sz);
+ ram_cmp = kmalloc(MEMBLOCK, GFP_KERNEL);
+ /* do not proceed while no memory but */
+ if (!ram_cmp)
+ return true;
- /* Allow HT Clock now that the ARM is running. */
- bus->alp_only = false;
-
- bus->sdiodev->bus_if->state = BRCMF_BUS_LOAD;
+ address = ram_addr;
+ offset = 0;
+ while (offset < ram_sz) {
+ len = ((offset + MEMBLOCK) < ram_sz) ? MEMBLOCK :
+ ram_sz - offset;
+ err = brcmf_sdiod_ramrw(sdiodev, false, address, ram_cmp, len);
+ if (err) {
+ brcmf_err("error %d on reading %d membytes at 0x%08x\n",
+ err, len, address);
+ ret = false;
+ break;
+ } else if (memcmp(ram_cmp, &ram_data[offset], len)) {
+ brcmf_err("Downloaded RAM image is corrupted, block offset is %d, len is %d\n",
+ offset, len);
+ ret = false;
+ break;
+ }
+ offset += len;
+ address += len;
}
+ kfree(ram_cmp);
+
+ return ret;
+}
+#else /* DEBUG */
+static bool
+brcmf_sdio_verifymemory(struct brcmf_sdio_dev *sdiodev, u32 ram_addr,
+ u8 *ram_data, uint ram_sz)
+{
return true;
}
+#endif /* DEBUG */
-static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
+static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
+ const struct firmware *fw)
{
- const struct firmware *fw;
int err;
int offset;
int address;
int len;
- fw = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_BIN);
- if (fw == NULL)
- return -ENOENT;
-
- if (brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4) !=
- BRCMF_MAX_CORENUM)
- memcpy(&bus->ci->rst_vec, fw->data, sizeof(bus->ci->rst_vec));
+ brcmf_dbg(TRACE, "Enter\n");
err = 0;
offset = 0;
@@ -3171,148 +3235,113 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
while (offset < fw->size) {
len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
fw->size - offset;
- err = brcmf_sdio_ramrw(bus->sdiodev, true, address,
- (u8 *)&fw->data[offset], len);
+ err = brcmf_sdiod_ramrw(bus->sdiodev, true, address,
+ (u8 *)&fw->data[offset], len);
if (err) {
brcmf_err("error %d on writing %d membytes at 0x%08x\n",
err, len, address);
- goto failure;
+ return err;
}
offset += len;
address += len;
}
-
-failure:
- release_firmware(fw);
+ if (!err)
+ if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
+ (u8 *)fw->data, fw->size))
+ err = -EIO;
return err;
}
-/*
- * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file
- * and ending in a NUL.
- * Removes carriage returns, empty lines, comment lines, and converts
- * newlines to NULs.
- * Shortens buffer as needed and pads with NULs. End of buffer is marked
- * by two NULs.
-*/
-
-static int brcmf_process_nvram_vars(struct brcmf_sdio *bus,
- const struct firmware *nv)
-{
- char *varbuf;
- char *dp;
- bool findNewline;
- int column;
- int ret = 0;
- uint buf_len, n, len;
-
- len = nv->size;
- varbuf = vmalloc(len);
- if (!varbuf)
- return -ENOMEM;
-
- memcpy(varbuf, nv->data, len);
- dp = varbuf;
-
- findNewline = false;
- column = 0;
-
- for (n = 0; n < len; n++) {
- if (varbuf[n] == 0)
- break;
- if (varbuf[n] == '\r')
- continue;
- if (findNewline && varbuf[n] != '\n')
- continue;
- findNewline = false;
- if (varbuf[n] == '#') {
- findNewline = true;
- continue;
- }
- if (varbuf[n] == '\n') {
- if (column == 0)
- continue;
- *dp++ = 0;
- column = 0;
- continue;
- }
- *dp++ = varbuf[n];
- column++;
- }
- buf_len = dp - varbuf;
- while (dp < varbuf + n)
- *dp++ = 0;
-
- kfree(bus->vars);
- /* roundup needed for download to device */
- bus->varsz = roundup(buf_len + 1, 4);
- bus->vars = kmalloc(bus->varsz, GFP_KERNEL);
- if (bus->vars == NULL) {
- bus->varsz = 0;
- ret = -ENOMEM;
- goto err;
- }
+static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus,
+ const struct firmware *nv)
+{
+ void *vars;
+ u32 varsz;
+ int address;
+ int err;
- /* copy the processed variables and add null termination */
- memcpy(bus->vars, varbuf, buf_len);
- bus->vars[buf_len] = 0;
-err:
- vfree(varbuf);
- return ret;
-}
+ brcmf_dbg(TRACE, "Enter\n");
-static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
-{
- const struct firmware *nv;
- int ret;
+ vars = brcmf_nvram_strip(nv, &varsz);
- nv = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
- if (nv == NULL)
- return -ENOENT;
+ if (vars == NULL)
+ return -EINVAL;
- ret = brcmf_process_nvram_vars(bus, nv);
+ address = bus->ci->ramsize - varsz + bus->ci->rambase;
+ err = brcmf_sdiod_ramrw(bus->sdiodev, true, address, vars, varsz);
+ if (err)
+ brcmf_err("error %d on writing %d nvram bytes at 0x%08x\n",
+ err, varsz, address);
+ else if (!brcmf_sdio_verifymemory(bus->sdiodev, address, vars, varsz))
+ err = -EIO;
- release_firmware(nv);
+ brcmf_nvram_free(vars);
- return ret;
+ return err;
}
-static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
+static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
{
- int bcmerror = -1;
+ int bcmerror = -EFAULT;
+ const struct firmware *fw;
+ u32 rstvec;
+
+ sdio_claim_host(bus->sdiodev->func[1]);
+ brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
/* Keep arm in reset */
- if (!brcmf_sdbrcm_download_state(bus, true)) {
- brcmf_err("error placing ARM core in reset\n");
+ brcmf_sdio_chip_enter_download(bus->sdiodev, bus->ci);
+
+ fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN);
+ if (fw == NULL) {
+ bcmerror = -ENOENT;
goto err;
}
- if (brcmf_sdbrcm_download_code_file(bus)) {
+ rstvec = get_unaligned_le32(fw->data);
+ brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
+
+ bcmerror = brcmf_sdio_download_code_file(bus, fw);
+ release_firmware(fw);
+ if (bcmerror) {
brcmf_err("dongle image file download failed\n");
goto err;
}
- if (brcmf_sdbrcm_download_nvram(bus)) {
+ fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
+ if (fw == NULL) {
+ bcmerror = -ENOENT;
+ goto err;
+ }
+
+ bcmerror = brcmf_sdio_download_nvram(bus, fw);
+ release_firmware(fw);
+ if (bcmerror) {
brcmf_err("dongle nvram file download failed\n");
goto err;
}
/* Take arm out of reset */
- if (!brcmf_sdbrcm_download_state(bus, false)) {
+ if (!brcmf_sdio_chip_exit_download(bus->sdiodev, bus->ci, rstvec)) {
brcmf_err("error getting out of ARM core reset\n");
goto err;
}
+ /* Allow HT Clock now that the ARM is running. */
+ brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_LOAD);
bcmerror = 0;
err:
+ brcmf_sdio_clkctl(bus, CLK_SDONLY, false);
+ sdio_release_host(bus->sdiodev->func[1]);
return bcmerror;
}
-static bool brcmf_sdbrcm_sr_capable(struct brcmf_sdio *bus)
+static bool brcmf_sdio_sr_capable(struct brcmf_sdio *bus)
{
- u32 addr, reg;
+ u32 addr, reg, pmu_cc3_mask = ~0;
+ int err;
brcmf_dbg(TRACE, "Enter\n");
@@ -3320,49 +3349,61 @@ static bool brcmf_sdbrcm_sr_capable(struct brcmf_sdio *bus)
if (bus->ci->pmurev < 17)
return false;
- /* read PMU chipcontrol register 3*/
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
- brcmf_sdio_regwl(bus->sdiodev, addr, 3, NULL);
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
- reg = brcmf_sdio_regrl(bus->sdiodev, addr, NULL);
+ switch (bus->ci->chip) {
+ case BCM43241_CHIP_ID:
+ case BCM4335_CHIP_ID:
+ case BCM4339_CHIP_ID:
+ /* read PMU chipcontrol register 3 */
+ addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
+ brcmf_sdiod_regwl(bus->sdiodev, addr, 3, NULL);
+ addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
+ reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
+ return (reg & pmu_cc3_mask) != 0;
+ default:
+ addr = CORE_CC_REG(bus->ci->c_inf[0].base, pmucapabilities_ext);
+ reg = brcmf_sdiod_regrl(bus->sdiodev, addr, &err);
+ if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
+ return false;
- return (bool)reg;
+ addr = CORE_CC_REG(bus->ci->c_inf[0].base, retention_ctl);
+ reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
+ return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
+ PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
+ }
}
-static void brcmf_sdbrcm_sr_init(struct brcmf_sdio *bus)
+static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
{
int err = 0;
u8 val;
brcmf_dbg(TRACE, "Enter\n");
- val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL,
- &err);
+ val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
if (err) {
brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
return;
}
val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL,
- val, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
if (err) {
brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
return;
}
/* Add CMD14 Support */
- brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
- (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
- SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
- &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
+ (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
+ SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
+ &err);
if (err) {
brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
return;
}
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- SBSDIO_FORCE_HT, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_FORCE_HT, &err);
if (err) {
brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
return;
@@ -3374,7 +3415,7 @@ static void brcmf_sdbrcm_sr_init(struct brcmf_sdio *bus)
}
/* enable KSO bit */
-static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
+static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
{
u8 val;
int err = 0;
@@ -3385,8 +3426,7 @@ static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
if (bus->ci->c_inf[1].rev < 12)
return 0;
- val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
- &err);
+ val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
if (err) {
brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
return err;
@@ -3395,8 +3435,8 @@ static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
- val, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ val, &err);
if (err) {
brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
return err;
@@ -3407,31 +3447,70 @@ static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
}
-static bool
-brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
+static int brcmf_sdio_bus_preinit(struct device *dev)
{
- bool ret;
-
- sdio_claim_host(bus->sdiodev->func[1]);
-
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ struct brcmf_sdio *bus = sdiodev->bus;
+ uint pad_size;
+ u32 value;
+ u8 idx;
+ int err;
- ret = _brcmf_sdbrcm_download_firmware(bus) == 0;
+ /* the commands below use the terms tx and rx from
+ * a device perspective, ie. bus:txglom affects the
+ * bus transfers from device to host.
+ */
+ idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+ if (bus->ci->c_inf[idx].rev < 12) {
+ /* for sdio core rev < 12, disable txgloming */
+ value = 0;
+ err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
+ sizeof(u32));
+ } else {
+ /* otherwise, set txglomalign */
+ value = 4;
+ if (sdiodev->pdata)
+ value = sdiodev->pdata->sd_sgentry_align;
+ /* SDIO ADMA requires at least 32 bit alignment */
+ value = max_t(u32, value, 4);
+ err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
+ sizeof(u32));
+ }
- brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+ if (err < 0)
+ goto done;
- sdio_release_host(bus->sdiodev->func[1]);
+ bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
+ if (sdiodev->sg_support) {
+ bus->txglom = false;
+ value = 1;
+ pad_size = bus->sdiodev->func[2]->cur_blksize << 1;
+ bus->txglom_sgpad = brcmu_pkt_buf_get_skb(pad_size);
+ if (!bus->txglom_sgpad)
+ brcmf_err("allocating txglom padding skb failed, reduced performance\n");
+
+ err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
+ &value, sizeof(u32));
+ if (err < 0) {
+ /* bus:rxglom is allowed to fail */
+ err = 0;
+ } else {
+ bus->txglom = true;
+ bus->tx_hdrlen += SDPCM_HWEXT_LEN;
+ }
+ }
+ brcmf_bus_add_txhdrlen(bus->sdiodev->dev, bus->tx_hdrlen);
- return ret;
+done:
+ return err;
}
-static int brcmf_sdbrcm_bus_init(struct device *dev)
+static int brcmf_sdio_bus_init(struct device *dev)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
- unsigned long timeout;
- u8 ready, enable;
int err, ret = 0;
u8 saveclk;
@@ -3439,8 +3518,11 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
/* try to download image and nvram to the dongle */
if (bus_if->state == BRCMF_BUS_DOWN) {
- if (!(brcmf_sdbrcm_download_firmware(bus)))
- return -1;
+ bus->alp_only = true;
+ err = brcmf_sdio_download_firmware(bus);
+ if (err)
+ return err;
+ bus->alp_only = false;
}
if (!bus->sdiodev->bus_if->drvr)
@@ -3448,21 +3530,21 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
/* Start the watchdog timer */
bus->sdcnt.tickcnt = 0;
- brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
sdio_claim_host(bus->sdiodev->func[1]);
/* Make sure backplane clock is on, needed to generate F2 interrupt */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+ brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
if (bus->clkstate != CLK_AVAIL)
goto exit;
/* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ saveclk = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (!err) {
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
}
if (err) {
brcmf_err("Failed to force clock for F2: err %d\n", err);
@@ -3472,56 +3554,42 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
/* Enable function 2 (frame transfers) */
w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
offsetof(struct sdpcmd_regs, tosbmailboxdata));
- enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
-
- brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
+ err = sdio_enable_func(bus->sdiodev->func[SDIO_FUNC_2]);
- timeout = jiffies + msecs_to_jiffies(BRCMF_WAIT_F2RDY);
- ready = 0;
- while (enable != ready) {
- ready = brcmf_sdio_regrb(bus->sdiodev,
- SDIO_CCCR_IORx, NULL);
- if (time_after(jiffies, timeout))
- break;
- else if (time_after(jiffies, timeout - BRCMF_WAIT_F2RDY + 50))
- /* prevent busy waiting if it takes too long */
- msleep_interruptible(20);
- }
- brcmf_dbg(INFO, "enable 0x%02x, ready 0x%02x\n", enable, ready);
+ brcmf_dbg(INFO, "enable F2: err=%d\n", err);
/* If F2 successfully enabled, set core and enable interrupts */
- if (ready == enable) {
+ if (!err) {
/* Set up the interrupt mask and enable interrupts */
bus->hostintmask = HOSTINTMASK;
w_sdreg32(bus, bus->hostintmask,
offsetof(struct sdpcmd_regs, hostintmask));
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
} else {
/* Disable F2 again */
- enable = SDIO_FUNC_ENABLE_1;
- brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
+ sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
ret = -ENODEV;
}
- if (brcmf_sdbrcm_sr_capable(bus)) {
- brcmf_sdbrcm_sr_init(bus);
+ if (brcmf_sdio_sr_capable(bus)) {
+ brcmf_sdio_sr_init(bus);
} else {
/* Restore previous clock setting */
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- saveclk, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ saveclk, &err);
}
if (ret == 0) {
- ret = brcmf_sdio_intr_register(bus->sdiodev);
+ ret = brcmf_sdiod_intr_register(bus->sdiodev);
if (ret != 0)
brcmf_err("intr register failed:%d\n", ret);
}
/* If we didn't come up, turn off backplane clock */
- if (bus_if->state != BRCMF_BUS_DATA)
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+ if (ret != 0)
+ brcmf_sdio_clkctl(bus, CLK_NONE, false);
exit:
sdio_release_host(bus->sdiodev->func[1]);
@@ -3529,10 +3597,8 @@ exit:
return ret;
}
-void brcmf_sdbrcm_isr(void *arg)
+void brcmf_sdio_isr(struct brcmf_sdio *bus)
{
- struct brcmf_sdio *bus = (struct brcmf_sdio *) arg;
-
brcmf_dbg(TRACE, "Enter\n");
if (!bus) {
@@ -3540,7 +3606,7 @@ void brcmf_sdbrcm_isr(void *arg)
return;
}
- if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
+ if (!brcmf_bus_ready(bus->sdiodev->bus_if)) {
brcmf_err("bus is down. we have nothing to do\n");
return;
}
@@ -3551,7 +3617,6 @@ void brcmf_sdbrcm_isr(void *arg)
else
if (brcmf_sdio_intr_rstatus(bus)) {
brcmf_err("failed backplane access\n");
- bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
/* Disable additional interrupts (is this needed now)? */
@@ -3562,7 +3627,7 @@ void brcmf_sdbrcm_isr(void *arg)
queue_work(bus->brcmf_wq, &bus->datawork);
}
-static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
+static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
{
#ifdef DEBUG
struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
@@ -3586,9 +3651,9 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
u8 devpend;
sdio_claim_host(bus->sdiodev->func[1]);
- devpend = brcmf_sdio_regrb(bus->sdiodev,
- SDIO_CCCR_INTx,
- NULL);
+ devpend = brcmf_sdiod_regrb(bus->sdiodev,
+ SDIO_CCCR_INTx,
+ NULL);
sdio_release_host(bus->sdiodev->func[1]);
intstatus =
devpend & (INTR_STATUS_FUNC1 |
@@ -3618,8 +3683,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
bus->console.count -= bus->console_interval;
sdio_claim_host(bus->sdiodev->func[1]);
/* Make sure backplane clock is on */
- brcmf_sdbrcm_bus_sleep(bus, false, false);
- if (brcmf_sdbrcm_readconsole(bus) < 0)
+ brcmf_sdio_bus_sleep(bus, false, false);
+ if (brcmf_sdio_readconsole(bus) < 0)
/* stop on error */
bus->console_interval = 0;
sdio_release_host(bus->sdiodev->func[1]);
@@ -3633,11 +3698,11 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
bus->idlecount = 0;
if (bus->activity) {
bus->activity = false;
- brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
} else {
brcmf_dbg(SDIO, "idle\n");
sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdbrcm_bus_sleep(bus, true, false);
+ brcmf_sdio_bus_sleep(bus, true, false);
sdio_release_host(bus->sdiodev->func[1]);
}
}
@@ -3652,38 +3717,13 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
datawork);
while (atomic_read(&bus->dpc_tskcnt)) {
- brcmf_sdbrcm_dpc(bus);
+ brcmf_sdio_dpc(bus);
atomic_dec(&bus->dpc_tskcnt);
}
}
-static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- kfree(bus->rxbuf);
- bus->rxctl = bus->rxbuf = NULL;
- bus->rxlen = 0;
-}
-
-static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- if (bus->sdiodev->bus_if->maxctl) {
- bus->rxblen =
- roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
- ALIGNMENT) + BRCMF_SDALIGN;
- bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
- if (!(bus->rxbuf))
- return false;
- }
-
- return true;
-}
-
static bool
-brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
+brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
{
u8 clkctl = 0;
int err = 0;
@@ -3691,23 +3731,21 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
u32 reg_val;
u32 drivestrength;
- bus->alp_only = true;
-
sdio_claim_host(bus->sdiodev->func[1]);
pr_debug("F1 signature read @0x18000000=0x%4x\n",
- brcmf_sdio_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
+ brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
/*
* Force PLL off until brcmf_sdio_chip_attach()
* programs PLL control regs
*/
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- BRCMF_INIT_CLKCTL1, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ BRCMF_INIT_CLKCTL1, &err);
if (!err)
- clkctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
@@ -3715,12 +3753,17 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
goto fail;
}
- if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci, regsva)) {
+ /* SDIO register access works so moving
+ * state from UNKNOWN to DOWN.
+ */
+ brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_DOWN);
+
+ if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci)) {
brcmf_err("brcmf_sdio_chip_attach failed!\n");
goto fail;
}
- if (brcmf_sdbrcm_kso_init(bus)) {
+ if (brcmf_sdio_kso_init(bus)) {
brcmf_err("error enabling KSO\n");
goto fail;
}
@@ -3739,33 +3782,33 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
}
/* Set card control so an SDIO card reset does a WLAN backplane reset */
- reg_val = brcmf_sdio_regrb(bus->sdiodev,
- SDIO_CCCR_BRCM_CARDCTRL, &err);
+ reg_val = brcmf_sdiod_regrb(bus->sdiodev,
+ SDIO_CCCR_BRCM_CARDCTRL, &err);
if (err)
goto fail;
reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
- brcmf_sdio_regwb(bus->sdiodev,
- SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
+ brcmf_sdiod_regwb(bus->sdiodev,
+ SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
if (err)
goto fail;
/* set PMUControl so a backplane reset does PMU state reload */
reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base,
pmucontrol);
- reg_val = brcmf_sdio_regrl(bus->sdiodev,
- reg_addr,
- &err);
+ reg_val = brcmf_sdiod_regrl(bus->sdiodev,
+ reg_addr,
+ &err);
if (err)
goto fail;
reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
- brcmf_sdio_regwl(bus->sdiodev,
- reg_addr,
- reg_val,
- &err);
+ brcmf_sdiod_regwl(bus->sdiodev,
+ reg_addr,
+ reg_val,
+ &err);
if (err)
goto fail;
@@ -3774,9 +3817,13 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
+ /* allocate header buffer */
+ bus->hdrbuf = kzalloc(MAX_HDR_READ + bus->head_align, GFP_KERNEL);
+ if (!bus->hdrbuf)
+ return false;
/* Locate an appropriately-aligned portion of hdrbuf */
bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0],
- BRCMF_SDALIGN);
+ bus->head_align);
/* Set the poll and/or interrupt flags */
bus->intr = true;
@@ -3791,42 +3838,8 @@ fail:
return false;
}
-static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- sdio_claim_host(bus->sdiodev->func[1]);
-
- /* Disable F2 to clear any intermediate frame state on the dongle */
- brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx,
- SDIO_FUNC_ENABLE_1, NULL);
-
- bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
- bus->rxflow = false;
-
- /* Done with backplane-dependent accesses, can drop clock... */
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
-
- sdio_release_host(bus->sdiodev->func[1]);
-
- /* ...and initialize clock/power states */
- bus->clkstate = CLK_SDONLY;
- bus->idletime = BRCMF_IDLE_INTERVAL;
- bus->idleclock = BRCMF_IDLE_ACTIVE;
-
- /* Query the F2 block size, set roundup accordingly */
- bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
- bus->roundup = min(max_roundup, bus->blocksize);
-
- /* SR state */
- bus->sleeping = false;
- bus->sr_enabled = false;
-
- return true;
-}
-
static int
-brcmf_sdbrcm_watchdog_thread(void *data)
+brcmf_sdio_watchdog_thread(void *data)
{
struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
@@ -3836,7 +3849,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
if (kthread_should_stop())
break;
if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
- brcmf_sdbrcm_bus_watchdog(bus);
+ brcmf_sdio_bus_watchdog(bus);
/* Count the tick for reference */
bus->sdcnt.tickcnt++;
} else
@@ -3846,7 +3859,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
}
static void
-brcmf_sdbrcm_watchdog(unsigned long data)
+brcmf_sdio_watchdog(unsigned long data)
{
struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
@@ -3859,73 +3872,23 @@ brcmf_sdbrcm_watchdog(unsigned long data)
}
}
-static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- if (bus->ci) {
- sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
- sdio_release_host(bus->sdiodev->func[1]);
- brcmf_sdio_chip_detach(&bus->ci);
- if (bus->vars && bus->varsz)
- kfree(bus->vars);
- bus->vars = NULL;
- }
-
- brcmf_dbg(TRACE, "Disconnected\n");
-}
-
-/* Detach and free everything */
-static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- if (bus) {
- /* De-register interrupt handler */
- brcmf_sdio_intr_unregister(bus->sdiodev);
-
- cancel_work_sync(&bus->datawork);
- if (bus->brcmf_wq)
- destroy_workqueue(bus->brcmf_wq);
-
- if (bus->sdiodev->bus_if->drvr) {
- brcmf_detach(bus->sdiodev->dev);
- brcmf_sdbrcm_release_dongle(bus);
- }
-
- brcmf_sdbrcm_release_malloc(bus);
-
- kfree(bus);
- }
-
- brcmf_dbg(TRACE, "Disconnected\n");
-}
-
static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
- .stop = brcmf_sdbrcm_bus_stop,
- .init = brcmf_sdbrcm_bus_init,
- .txdata = brcmf_sdbrcm_bus_txdata,
- .txctl = brcmf_sdbrcm_bus_txctl,
- .rxctl = brcmf_sdbrcm_bus_rxctl,
- .gettxq = brcmf_sdbrcm_bus_gettxq,
+ .stop = brcmf_sdio_bus_stop,
+ .preinit = brcmf_sdio_bus_preinit,
+ .init = brcmf_sdio_bus_init,
+ .txdata = brcmf_sdio_bus_txdata,
+ .txctl = brcmf_sdio_bus_txctl,
+ .rxctl = brcmf_sdio_bus_rxctl,
+ .gettxq = brcmf_sdio_bus_gettxq,
};
-void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
+struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
{
int ret;
struct brcmf_sdio *bus;
- struct brcmf_bus_dcmd *dlst;
- u32 dngl_txglom;
- u32 txglomalign = 0;
- u8 idx;
brcmf_dbg(TRACE, "Enter\n");
- /* We make an assumption about address window mappings:
- * regsva == SI_ENUM_BASE*/
-
/* Allocate private bus interface state */
bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
if (!bus)
@@ -3939,6 +3902,18 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
bus->txminmax = BRCMF_TXMINMAX;
bus->tx_seq = SDPCM_SEQ_WRAP - 1;
+ /* platform specific configuration:
+ * alignments must be at least 4 bytes for ADMA
+ */
+ bus->head_align = ALIGNMENT;
+ bus->sgentry_align = ALIGNMENT;
+ if (sdiodev->pdata) {
+ if (sdiodev->pdata->sd_head_align > ALIGNMENT)
+ bus->head_align = sdiodev->pdata->sd_head_align;
+ if (sdiodev->pdata->sd_sgentry_align > ALIGNMENT)
+ bus->sgentry_align = sdiodev->pdata->sd_sgentry_align;
+ }
+
INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
if (bus->brcmf_wq == NULL) {
@@ -3947,8 +3922,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
}
/* attempt to attach to the dongle */
- if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) {
- brcmf_err("brcmf_sdbrcm_probe_attach failed\n");
+ if (!(brcmf_sdio_probe_attach(bus))) {
+ brcmf_err("brcmf_sdio_probe_attach failed\n");
goto fail;
}
@@ -3960,11 +3935,11 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
/* Set up the watchdog timer */
init_timer(&bus->timer);
bus->timer.data = (unsigned long)bus;
- bus->timer.function = brcmf_sdbrcm_watchdog;
+ bus->timer.function = brcmf_sdio_watchdog;
/* Initialize watchdog thread */
init_completion(&bus->watchdog_wait);
- bus->watchdog_tsk = kthread_run(brcmf_sdbrcm_watchdog_thread,
+ bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
bus, "brcmf_watchdog");
if (IS_ERR(bus->watchdog_tsk)) {
pr_warn("brcmf_watchdog thread failed to start\n");
@@ -3983,50 +3958,52 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
/* Attach to the common layer, reserve hdr space */
- ret = brcmf_attach(bus->tx_hdrlen, bus->sdiodev->dev);
+ ret = brcmf_attach(bus->sdiodev->dev);
if (ret != 0) {
brcmf_err("brcmf_attach failed\n");
goto fail;
}
/* Allocate buffers */
- if (!(brcmf_sdbrcm_probe_malloc(bus))) {
- brcmf_err("brcmf_sdbrcm_probe_malloc failed\n");
- goto fail;
+ if (bus->sdiodev->bus_if->maxctl) {
+ bus->rxblen =
+ roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
+ ALIGNMENT) + bus->head_align;
+ bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
+ if (!(bus->rxbuf)) {
+ brcmf_err("rxbuf allocation failed\n");
+ goto fail;
+ }
}
- if (!(brcmf_sdbrcm_probe_init(bus))) {
- brcmf_err("brcmf_sdbrcm_probe_init failed\n");
- goto fail;
- }
+ sdio_claim_host(bus->sdiodev->func[1]);
+
+ /* Disable F2 to clear any intermediate frame state on the dongle */
+ sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
+
+ bus->rxflow = false;
+
+ /* Done with backplane-dependent accesses, can drop clock... */
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+ sdio_release_host(bus->sdiodev->func[1]);
+
+ /* ...and initialize clock/power states */
+ bus->clkstate = CLK_SDONLY;
+ bus->idletime = BRCMF_IDLE_INTERVAL;
+ bus->idleclock = BRCMF_IDLE_ACTIVE;
+
+ /* Query the F2 block size, set roundup accordingly */
+ bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+ bus->roundup = min(max_roundup, bus->blocksize);
+
+ /* SR state */
+ bus->sleeping = false;
+ bus->sr_enabled = false;
brcmf_sdio_debugfs_create(bus);
brcmf_dbg(INFO, "completed!!\n");
- /* sdio bus core specific dcmd */
- idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
- dlst = kzalloc(sizeof(struct brcmf_bus_dcmd), GFP_KERNEL);
- if (dlst) {
- if (bus->ci->c_inf[idx].rev < 12) {
- /* for sdio core rev < 12, disable txgloming */
- dngl_txglom = 0;
- dlst->name = "bus:txglom";
- dlst->param = (char *)&dngl_txglom;
- dlst->param_len = sizeof(u32);
- } else {
- /* otherwise, set txglomalign */
- if (sdiodev->pdata)
- txglomalign = sdiodev->pdata->sd_sgentry_align;
- /* SDIO ADMA requires at least 32 bit alignment */
- if (txglomalign < 4)
- txglomalign = 4;
- dlst->name = "bus:txglomalign";
- dlst->param = (char *)&txglomalign;
- dlst->param_len = sizeof(u32);
- }
- list_add(&dlst->list, &bus->sdiodev->bus_if->dcmd_list);
- }
-
/* if firmware path present try to download and bring up bus */
ret = brcmf_bus_start(bus->sdiodev->dev);
if (ret != 0) {
@@ -4037,24 +4014,55 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
return bus;
fail:
- brcmf_sdbrcm_release(bus);
+ brcmf_sdio_remove(bus);
return NULL;
}
-void brcmf_sdbrcm_disconnect(void *ptr)
+/* Detach and free everything */
+void brcmf_sdio_remove(struct brcmf_sdio *bus)
{
- struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr;
-
brcmf_dbg(TRACE, "Enter\n");
- if (bus)
- brcmf_sdbrcm_release(bus);
+ if (bus) {
+ /* De-register interrupt handler */
+ brcmf_sdiod_intr_unregister(bus->sdiodev);
+
+ cancel_work_sync(&bus->datawork);
+ if (bus->brcmf_wq)
+ destroy_workqueue(bus->brcmf_wq);
+
+ if (bus->sdiodev->bus_if->drvr) {
+ brcmf_detach(bus->sdiodev->dev);
+ }
+
+ if (bus->ci) {
+ if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
+ sdio_claim_host(bus->sdiodev->func[1]);
+ brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
+ /* Leave the device in state where it is
+ * 'quiet'. This is done by putting it in
+ * download_state which essentially resets
+ * all necessary cores.
+ */
+ msleep(20);
+ brcmf_sdio_chip_enter_download(bus->sdiodev,
+ bus->ci);
+ brcmf_sdio_clkctl(bus, CLK_NONE, false);
+ sdio_release_host(bus->sdiodev->func[1]);
+ }
+ brcmf_sdio_chip_detach(&bus->ci);
+ }
+
+ brcmu_pkt_buf_free_skb(bus->txglom_sgpad);
+ kfree(bus->rxbuf);
+ kfree(bus->hdrbuf);
+ kfree(bus);
+ }
brcmf_dbg(TRACE, "Disconnected\n");
}
-void
-brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
+void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick)
{
/* Totally stop the timer */
if (!wdtick && bus->wd_timer_valid) {
@@ -4065,7 +4073,7 @@ brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
}
/* don't start the wd until fw is loaded */
- if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN)
+ if (bus->sdiodev->bus_if->state != BRCMF_BUS_DATA)
return;
if (wdtick) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index 14bc24dc5bae..51b53a73d074 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -122,6 +122,52 @@ enum brcmf_fweh_event_code {
#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
#define BRCMF_EVENT_MSG_GROUP 0x04
+/* status field values in struct brcmf_event_msg */
+#define BRCMF_E_STATUS_SUCCESS 0
+#define BRCMF_E_STATUS_FAIL 1
+#define BRCMF_E_STATUS_TIMEOUT 2
+#define BRCMF_E_STATUS_NO_NETWORKS 3
+#define BRCMF_E_STATUS_ABORT 4
+#define BRCMF_E_STATUS_NO_ACK 5
+#define BRCMF_E_STATUS_UNSOLICITED 6
+#define BRCMF_E_STATUS_ATTEMPT 7
+#define BRCMF_E_STATUS_PARTIAL 8
+#define BRCMF_E_STATUS_NEWSCAN 9
+#define BRCMF_E_STATUS_NEWASSOC 10
+#define BRCMF_E_STATUS_11HQUIET 11
+#define BRCMF_E_STATUS_SUPPRESS 12
+#define BRCMF_E_STATUS_NOCHANS 13
+#define BRCMF_E_STATUS_CS_ABORT 15
+#define BRCMF_E_STATUS_ERROR 16
+
+/* reason field values in struct brcmf_event_msg */
+#define BRCMF_E_REASON_INITIAL_ASSOC 0
+#define BRCMF_E_REASON_LOW_RSSI 1
+#define BRCMF_E_REASON_DEAUTH 2
+#define BRCMF_E_REASON_DISASSOC 3
+#define BRCMF_E_REASON_BCNS_LOST 4
+#define BRCMF_E_REASON_MINTXRATE 9
+#define BRCMF_E_REASON_TXFAIL 10
+
+#define BRCMF_E_REASON_LINK_BSSCFG_DIS 4
+#define BRCMF_E_REASON_FAST_ROAM_FAILED 5
+#define BRCMF_E_REASON_DIRECTED_ROAM 6
+#define BRCMF_E_REASON_TSPEC_REJECTED 7
+#define BRCMF_E_REASON_BETTER_AP 8
+
+/* action field values for brcmf_ifevent */
+#define BRCMF_E_IF_ADD 1
+#define BRCMF_E_IF_DEL 2
+#define BRCMF_E_IF_CHANGE 3
+
+/* flag field values for brcmf_ifevent */
+#define BRCMF_E_IF_FLAG_NOIF 1
+
+/* role field values for brcmf_ifevent */
+#define BRCMF_E_IF_ROLE_STA 0
+#define BRCMF_E_IF_ROLE_AP 1
+#define BRCMF_E_IF_ROLE_WDS 2
+
/**
* definitions for event packet validation.
*/
@@ -160,6 +206,14 @@ struct brcmf_event_msg {
u8 bsscfgidx;
};
+struct brcmf_if_event {
+ u8 ifidx;
+ u8 action;
+ u8 flags;
+ u8 bssidx;
+ u8 role;
+};
+
typedef int (*brcmf_fweh_handler_t)(struct brcmf_if *ifp,
const struct brcmf_event_msg *evtmsg,
void *data);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
index 04f395930d86..22adbe311d20 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -27,6 +27,7 @@
#include "dhd_dbg.h"
#include "tracepoint.h"
#include "fwil.h"
+#include "proto.h"
#define MAX_HEX_DUMP_LEN 64
@@ -46,11 +47,9 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
if (data != NULL)
len = min_t(uint, len, BRCMF_DCMD_MAXLEN);
if (set)
- err = brcmf_proto_cdc_set_dcmd(drvr, ifp->ifidx, cmd, data,
- len);
+ err = brcmf_proto_set_dcmd(drvr, ifp->ifidx, cmd, data, len);
else
- err = brcmf_proto_cdc_query_dcmd(drvr, ifp->ifidx, cmd, data,
- len);
+ err = brcmf_proto_query_dcmd(drvr, ifp->ifidx, cmd, data, len);
if (err >= 0)
err = 0;
@@ -69,7 +68,7 @@ brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
- min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
err = brcmf_fil_cmd_data(ifp, cmd, data, len, true);
mutex_unlock(&ifp->drvr->proto_block);
@@ -87,7 +86,7 @@ brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
- min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
mutex_unlock(&ifp->drvr->proto_block);
@@ -156,7 +155,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
- min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
buflen = brcmf_create_iovar(name, data, len, drvr->proto_buf,
sizeof(drvr->proto_buf));
@@ -196,7 +195,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
- min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
mutex_unlock(&drvr->proto_block);
return err;
@@ -279,7 +278,7 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
- min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
buflen = brcmf_create_bsscfg(ifp->bssidx, name, data, len,
drvr->proto_buf, sizeof(drvr->proto_buf));
@@ -318,7 +317,7 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
}
brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
- min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
mutex_unlock(&drvr->proto_block);
return err;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
index 16eb8202fb1e..77eae86e55c2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
@@ -17,6 +17,67 @@
#ifndef _fwil_h_
#define _fwil_h_
+/*******************************************************************************
+ * Dongle command codes that are interpreted by firmware
+ ******************************************************************************/
+#define BRCMF_C_GET_VERSION 1
+#define BRCMF_C_UP 2
+#define BRCMF_C_DOWN 3
+#define BRCMF_C_SET_PROMISC 10
+#define BRCMF_C_GET_RATE 12
+#define BRCMF_C_GET_INFRA 19
+#define BRCMF_C_SET_INFRA 20
+#define BRCMF_C_GET_AUTH 21
+#define BRCMF_C_SET_AUTH 22
+#define BRCMF_C_GET_BSSID 23
+#define BRCMF_C_GET_SSID 25
+#define BRCMF_C_SET_SSID 26
+#define BRCMF_C_TERMINATED 28
+#define BRCMF_C_GET_CHANNEL 29
+#define BRCMF_C_SET_CHANNEL 30
+#define BRCMF_C_GET_SRL 31
+#define BRCMF_C_SET_SRL 32
+#define BRCMF_C_GET_LRL 33
+#define BRCMF_C_SET_LRL 34
+#define BRCMF_C_GET_RADIO 37
+#define BRCMF_C_SET_RADIO 38
+#define BRCMF_C_GET_PHYTYPE 39
+#define BRCMF_C_SET_KEY 45
+#define BRCMF_C_SET_PASSIVE_SCAN 49
+#define BRCMF_C_SCAN 50
+#define BRCMF_C_SCAN_RESULTS 51
+#define BRCMF_C_DISASSOC 52
+#define BRCMF_C_REASSOC 53
+#define BRCMF_C_SET_ROAM_TRIGGER 55
+#define BRCMF_C_SET_ROAM_DELTA 57
+#define BRCMF_C_GET_BCNPRD 75
+#define BRCMF_C_SET_BCNPRD 76
+#define BRCMF_C_GET_DTIMPRD 77
+#define BRCMF_C_SET_DTIMPRD 78
+#define BRCMF_C_SET_COUNTRY 84
+#define BRCMF_C_GET_PM 85
+#define BRCMF_C_SET_PM 86
+#define BRCMF_C_GET_CURR_RATESET 114
+#define BRCMF_C_GET_AP 117
+#define BRCMF_C_SET_AP 118
+#define BRCMF_C_GET_RSSI 127
+#define BRCMF_C_GET_WSEC 133
+#define BRCMF_C_SET_WSEC 134
+#define BRCMF_C_GET_PHY_NOISE 135
+#define BRCMF_C_GET_BSS_INFO 136
+#define BRCMF_C_GET_BANDLIST 140
+#define BRCMF_C_SET_SCB_TIMEOUT 158
+#define BRCMF_C_GET_PHYLIST 180
+#define BRCMF_C_SET_SCAN_CHANNEL_TIME 185
+#define BRCMF_C_SET_SCAN_UNASSOC_TIME 187
+#define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON 201
+#define BRCMF_C_GET_VALID_CHANNELS 217
+#define BRCMF_C_GET_KEY_PRIMARY 235
+#define BRCMF_C_SET_KEY_PRIMARY 236
+#define BRCMF_C_SET_SCAN_PASSIVE_TIME 258
+#define BRCMF_C_GET_VAR 262
+#define BRCMF_C_SET_VAR 263
+
s32 brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index ecabb04f33c3..af17a5bc8b83 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -29,6 +29,24 @@
#define BRCMF_ARP_OL_HOST_AUTO_REPLY 0x00000004
#define BRCMF_ARP_OL_PEER_AUTO_REPLY 0x00000008
+#define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */
+#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0002
+
+#define BRCMF_STA_ASSOC 0x10 /* Associated */
+
+/* size of brcmf_scan_params not including variable length array */
+#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
+
+/* masks for channel and ssid count */
+#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
+#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
+
+/* primary (ie tx) key */
+#define BRCMF_PRIMARY_KEY (1 << 1)
+#define DOT11_BSSTYPE_ANY 2
+#define BRCMF_ESCAN_REQ_VERSION 1
+
+#define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */
enum brcmf_fil_p2p_if_types {
BRCMF_FIL_P2P_IF_CLIENT,
@@ -90,4 +108,290 @@ enum brcmf_tdls_manual_ep_ops {
BRCMF_TDLS_MANUAL_EP_DISCOVERY = 6
};
+/* Pattern matching filter. Specifies an offset within received packets to
+ * start matching, the pattern to match, the size of the pattern, and a bitmask
+ * that indicates which bits within the pattern should be matched.
+ */
+struct brcmf_pkt_filter_pattern_le {
+ /*
+ * Offset within received packet to start pattern matching.
+ * Offset '0' is the first byte of the ethernet header.
+ */
+ __le32 offset;
+ /* Size of the pattern. Bitmask must be the same size.*/
+ __le32 size_bytes;
+ /*
+ * Variable length mask and pattern data. mask starts at offset 0.
+ * Pattern immediately follows mask.
+ */
+ u8 mask_and_pattern[1];
+};
+
+/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
+struct brcmf_pkt_filter_le {
+ __le32 id; /* Unique filter id, specified by app. */
+ __le32 type; /* Filter type (WL_PKT_FILTER_TYPE_xxx). */
+ __le32 negate_match; /* Negate the result of filter matches */
+ union { /* Filter definitions */
+ struct brcmf_pkt_filter_pattern_le pattern; /* Filter pattern */
+ } u;
+};
+
+/* IOVAR "pkt_filter_enable" parameter. */
+struct brcmf_pkt_filter_enable_le {
+ __le32 id; /* Unique filter id */
+ __le32 enable; /* Enable/disable bool */
+};
+
+/* BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in struct brcmf_scan_results)
+ */
+struct brcmf_bss_info_le {
+ __le32 version; /* version field */
+ __le32 length; /* byte length of data in this record,
+ * starting at version and including IEs
+ */
+ u8 BSSID[ETH_ALEN];
+ __le16 beacon_period; /* units are Kusec */
+ __le16 capability; /* Capability information */
+ u8 SSID_len;
+ u8 SSID[32];
+ struct {
+ __le32 count; /* # rates in this set */
+ u8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
+ } rateset; /* supported rates */
+ __le16 chanspec; /* chanspec for bss */
+ __le16 atim_window; /* units are Kusec */
+ u8 dtim_period; /* DTIM period */
+ __le16 RSSI; /* receive signal strength (in dBm) */
+ s8 phy_noise; /* noise (in dBm) */
+
+ u8 n_cap; /* BSS is 802.11N Capable */
+ /* 802.11N BSS Capabilities (based on HT_CAP_*): */
+ __le32 nbss_cap;
+ u8 ctl_ch; /* 802.11N BSS control channel number */
+ __le32 reserved32[1]; /* Reserved for expansion of BSS properties */
+ u8 flags; /* flags */
+ u8 reserved[3]; /* Reserved for expansion of BSS properties */
+ u8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */
+
+ __le16 ie_offset; /* offset at which IEs start, from beginning */
+ __le32 ie_length; /* byte length of Information Elements */
+ __le16 SNR; /* average SNR of during frame reception */
+ /* Add new fields here */
+ /* variable length Information Elements */
+};
+
+struct brcm_rateset_le {
+ /* # rates in this set */
+ __le32 count;
+ /* rates in 500kbps units w/hi bit set if basic */
+ u8 rates[BRCMF_MAXRATES_IN_SET];
+};
+
+struct brcmf_ssid {
+ u32 SSID_len;
+ unsigned char SSID[32];
+};
+
+struct brcmf_ssid_le {
+ __le32 SSID_len;
+ unsigned char SSID[32];
+};
+
+struct brcmf_scan_params_le {
+ struct brcmf_ssid_le ssid_le; /* default: {0, ""} */
+ u8 bssid[ETH_ALEN]; /* default: bcast */
+ s8 bss_type; /* default: any,
+ * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+ */
+ u8 scan_type; /* flags, 0 use default */
+ __le32 nprobes; /* -1 use default, number of probes per channel */
+ __le32 active_time; /* -1 use default, dwell time per channel for
+ * active scanning
+ */
+ __le32 passive_time; /* -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ __le32 home_time; /* -1 use default, dwell time for the
+ * home channel between channel scans
+ */
+ __le32 channel_num; /* count of channels and ssids that follow
+ *
+ * low half is count of channels in
+ * channel_list, 0 means default (use all
+ * available channels)
+ *
+ * high half is entries in struct brcmf_ssid
+ * array that follows channel_list, aligned for
+ * s32 (4 bytes) meaning an odd channel count
+ * implies a 2-byte pad between end of
+ * channel_list and first ssid
+ *
+ * if ssid count is zero, single ssid in the
+ * fixed parameter portion is assumed, otherwise
+ * ssid in the fixed portion is ignored
+ */
+ __le16 channel_list[1]; /* list of chanspecs */
+};
+
+struct brcmf_scan_results {
+ u32 buflen;
+ u32 version;
+ u32 count;
+ struct brcmf_bss_info_le bss_info_le[];
+};
+
+struct brcmf_escan_params_le {
+ __le32 version;
+ __le16 action;
+ __le16 sync_id;
+ struct brcmf_scan_params_le params_le;
+};
+
+struct brcmf_escan_result_le {
+ __le32 buflen;
+ __le32 version;
+ __le16 sync_id;
+ __le16 bss_count;
+ struct brcmf_bss_info_le bss_info_le;
+};
+
+#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(struct brcmf_escan_result_le) - \
+ sizeof(struct brcmf_bss_info_le))
+
+/* used for association with a specific BSSID and chanspec list */
+struct brcmf_assoc_params_le {
+ /* 00:00:00:00:00:00: broadcast scan */
+ u8 bssid[ETH_ALEN];
+ /* 0: all available channels, otherwise count of chanspecs in
+ * chanspec_list */
+ __le32 chanspec_num;
+ /* list of chanspecs */
+ __le16 chanspec_list[1];
+};
+
+/* used for join with or without a specific bssid and channel list */
+struct brcmf_join_params {
+ struct brcmf_ssid_le ssid_le;
+ struct brcmf_assoc_params_le params_le;
+};
+
+/* scan params for extended join */
+struct brcmf_join_scan_params_le {
+ u8 scan_type; /* 0 use default, active or passive scan */
+ __le32 nprobes; /* -1 use default, nr of probes per channel */
+ __le32 active_time; /* -1 use default, dwell time per channel for
+ * active scanning
+ */
+ __le32 passive_time; /* -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ __le32 home_time; /* -1 use default, dwell time for the home
+ * channel between channel scans
+ */
+};
+
+/* extended join params */
+struct brcmf_ext_join_params_le {
+ struct brcmf_ssid_le ssid_le; /* {0, ""}: wildcard scan */
+ struct brcmf_join_scan_params_le scan_le;
+ struct brcmf_assoc_params_le assoc_le;
+};
+
+struct brcmf_wsec_key {
+ u32 index; /* key index */
+ u32 len; /* key length */
+ u8 data[WLAN_MAX_KEY_LEN]; /* key data */
+ u32 pad_1[18];
+ u32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+ u32 flags; /* misc flags */
+ u32 pad_2[3];
+ u32 iv_initialized; /* has IV been initialized already? */
+ u32 pad_3;
+ /* Rx IV */
+ struct {
+ u32 hi; /* upper 32 bits of IV */
+ u16 lo; /* lower 16 bits of IV */
+ } rxiv;
+ u32 pad_4[2];
+ u8 ea[ETH_ALEN]; /* per station */
+};
+
+/*
+ * dongle requires same struct as above but with fields in little endian order
+ */
+struct brcmf_wsec_key_le {
+ __le32 index; /* key index */
+ __le32 len; /* key length */
+ u8 data[WLAN_MAX_KEY_LEN]; /* key data */
+ __le32 pad_1[18];
+ __le32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+ __le32 flags; /* misc flags */
+ __le32 pad_2[3];
+ __le32 iv_initialized; /* has IV been initialized already? */
+ __le32 pad_3;
+ /* Rx IV */
+ struct {
+ __le32 hi; /* upper 32 bits of IV */
+ __le16 lo; /* lower 16 bits of IV */
+ } rxiv;
+ __le32 pad_4[2];
+ u8 ea[ETH_ALEN]; /* per station */
+};
+
+/* Used to get specific STA parameters */
+struct brcmf_scb_val_le {
+ __le32 val;
+ u8 ea[ETH_ALEN];
+};
+
+/* channel encoding */
+struct brcmf_channel_info_le {
+ __le32 hw_channel;
+ __le32 target_channel;
+ __le32 scan_channel;
+};
+
+struct brcmf_sta_info_le {
+ __le16 ver; /* version of this struct */
+ __le16 len; /* length in bytes of this structure */
+ __le16 cap; /* sta's advertised capabilities */
+ __le32 flags; /* flags defined below */
+ __le32 idle; /* time since data pkt rx'd from sta */
+ u8 ea[ETH_ALEN]; /* Station address */
+ __le32 count; /* # rates in this set */
+ u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units */
+ /* w/hi bit set if basic */
+ __le32 in; /* seconds elapsed since associated */
+ __le32 listen_interval_inms; /* Min Listen interval in ms for STA */
+ __le32 tx_pkts; /* # of packets transmitted */
+ __le32 tx_failures; /* # of packets failed */
+ __le32 rx_ucast_pkts; /* # of unicast packets received */
+ __le32 rx_mcast_pkts; /* # of multicast packets received */
+ __le32 tx_rate; /* Rate of last successful tx frame */
+ __le32 rx_rate; /* Rate of last successful rx frame */
+ __le32 rx_decrypt_succeeds; /* # of packet decrypted successfully */
+ __le32 rx_decrypt_failures; /* # of packet decrypted failed */
+};
+
+struct brcmf_chanspec_list {
+ __le32 count; /* # of entries */
+ __le32 element[1]; /* variable length uint32 list */
+};
+
+/*
+ * WLC_E_PROBRESP_MSG
+ * WLC_E_P2P_PROBREQ_MSG
+ * WLC_E_ACTION_FRAME_RX
+ */
+struct brcmf_rx_mgmt_data {
+ __be16 version;
+ __be16 chanspec;
+ __be32 rssi;
+ __be32 mactime;
+ __be32 rate;
+};
+
#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index d0cd0bf95c5a..c3e7d76dbf35 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -27,7 +27,6 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
#include "dhd.h"
-#include "dhd_proto.h"
#include "dhd_dbg.h"
#include "dhd_bus.h"
#include "fwil.h"
@@ -36,6 +35,7 @@
#include "fwsignal.h"
#include "p2p.h"
#include "wl_cfg80211.h"
+#include "proto.h"
/**
* DOC: Firmware Signalling
@@ -105,6 +105,7 @@ static struct {
};
#undef BRCMF_FWS_TLV_DEF
+
static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
{
int i;
@@ -123,6 +124,12 @@ static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
#endif /* DEBUG */
/*
+ * The PKTTAG tlv has additional bytes when firmware-signalling
+ * mode has REUSESEQ flag set.
+ */
+#define BRCMF_FWS_TYPE_SEQ_LEN 2
+
+/*
* flags used to enable tlv signalling from firmware.
*/
#define BRCMF_FWS_FLAGS_RSSI_SIGNALS 0x0001
@@ -147,8 +154,15 @@ static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
#define BRCMF_FWS_HTOD_FLAG_PKTFROMHOST 0x01
#define BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED 0x02
-#define BRCMF_FWS_RET_OK_NOSCHEDULE 0
-#define BRCMF_FWS_RET_OK_SCHEDULE 1
+#define BRCMF_FWS_RET_OK_NOSCHEDULE 0
+#define BRCMF_FWS_RET_OK_SCHEDULE 1
+
+#define BRCMF_FWS_MODE_REUSESEQ_SHIFT 3 /* seq reuse */
+#define BRCMF_FWS_MODE_SET_REUSESEQ(x, val) ((x) = \
+ ((x) & ~(1 << BRCMF_FWS_MODE_REUSESEQ_SHIFT)) | \
+ (((val) & 1) << BRCMF_FWS_MODE_REUSESEQ_SHIFT))
+#define BRCMF_FWS_MODE_GET_REUSESEQ(x) \
+ (((x) >> BRCMF_FWS_MODE_REUSESEQ_SHIFT) & 1)
/**
* enum brcmf_fws_skb_state - indicates processing state of skb.
@@ -171,6 +185,7 @@ enum brcmf_fws_skb_state {
* @bus_flags: 2 bytes reserved for bus specific parameters
* @if_flags: holds interface index and packet related flags.
* @htod: host to device packet identifier (used in PKTTAG tlv).
+ * @htod_seq: this 16-bit is original seq number for every suppress packet.
* @state: transmit state of the packet.
* @mac: descriptor related to destination for this packet.
*
@@ -181,6 +196,7 @@ struct brcmf_skbuff_cb {
u16 bus_flags;
u16 if_flags;
u32 htod;
+ u16 htod_seq;
enum brcmf_fws_skb_state state;
struct brcmf_fws_mac_descriptor *mac;
};
@@ -257,6 +273,22 @@ struct brcmf_skbuff_cb {
BRCMF_SKB_HTOD_TAG_ ## field ## _MASK, \
BRCMF_SKB_HTOD_TAG_ ## field ## _SHIFT)
+#define BRCMF_SKB_HTOD_SEQ_FROMFW_MASK 0x2000
+#define BRCMF_SKB_HTOD_SEQ_FROMFW_SHIFT 13
+#define BRCMF_SKB_HTOD_SEQ_FROMDRV_MASK 0x1000
+#define BRCMF_SKB_HTOD_SEQ_FROMDRV_SHIFT 12
+#define BRCMF_SKB_HTOD_SEQ_NR_MASK 0x0fff
+#define BRCMF_SKB_HTOD_SEQ_NR_SHIFT 0
+
+#define brcmf_skb_htod_seq_set_field(skb, field, value) \
+ brcmu_maskset16(&(brcmf_skbcb(skb)->htod_seq), \
+ BRCMF_SKB_HTOD_SEQ_ ## field ## _MASK, \
+ BRCMF_SKB_HTOD_SEQ_ ## field ## _SHIFT, (value))
+#define brcmf_skb_htod_seq_get_field(skb, field) \
+ brcmu_maskget16(brcmf_skbcb(skb)->htod_seq, \
+ BRCMF_SKB_HTOD_SEQ_ ## field ## _MASK, \
+ BRCMF_SKB_HTOD_SEQ_ ## field ## _SHIFT)
+
#define BRCMF_FWS_TXSTAT_GENERATION_MASK 0x80000000
#define BRCMF_FWS_TXSTAT_GENERATION_SHIFT 31
#define BRCMF_FWS_TXSTAT_FLAGS_MASK 0x78000000
@@ -265,8 +297,8 @@ struct brcmf_skbuff_cb {
#define BRCMF_FWS_TXSTAT_FIFO_SHIFT 24
#define BRCMF_FWS_TXSTAT_HSLOT_MASK 0x00FFFF00
#define BRCMF_FWS_TXSTAT_HSLOT_SHIFT 8
-#define BRCMF_FWS_TXSTAT_PKTID_MASK 0x00FFFFFF
-#define BRCMF_FWS_TXSTAT_PKTID_SHIFT 0
+#define BRCMF_FWS_TXSTAT_FREERUN_MASK 0x000000FF
+#define BRCMF_FWS_TXSTAT_FREERUN_SHIFT 0
#define brcmf_txstatus_get_field(txs, field) \
brcmu_maskget32(txs, BRCMF_FWS_TXSTAT_ ## field ## _MASK, \
@@ -443,6 +475,7 @@ struct brcmf_fws_info {
unsigned long borrow_defer_timestamp;
bool bus_flow_blocked;
bool creditmap_received;
+ u8 mode;
};
/*
@@ -805,20 +838,23 @@ static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx)
brcmf_fws_hanger_cleanup(fws, matchfn, ifidx);
}
-static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
+static u8 brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
{
struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
u8 *wlh;
u16 data_offset = 0;
u8 fillers;
__le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod);
+ __le16 pktseq = cpu_to_le16(brcmf_skbcb(skb)->htod_seq);
- brcmf_dbg(TRACE, "enter: %s, idx=%d pkttag=0x%08X, hslot=%d\n",
+ brcmf_dbg(TRACE, "enter: %s, idx=%d hslot=%d htod %X seq %X\n",
entry->name, brcmf_skb_if_flags_get_field(skb, INDEX),
- le32_to_cpu(pkttag), (le32_to_cpu(pkttag) >> 8) & 0xffff);
+ (le32_to_cpu(pkttag) >> 8) & 0xffff,
+ brcmf_skbcb(skb)->htod, brcmf_skbcb(skb)->htod_seq);
if (entry->send_tim_signal)
data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
-
+ if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode))
+ data_offset += BRCMF_FWS_TYPE_SEQ_LEN;
/* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
data_offset += 2 + BRCMF_FWS_TYPE_PKTTAG_LEN;
fillers = round_up(data_offset, 4) - data_offset;
@@ -830,7 +866,12 @@ static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
wlh[0] = BRCMF_FWS_TYPE_PKTTAG;
wlh[1] = BRCMF_FWS_TYPE_PKTTAG_LEN;
memcpy(&wlh[2], &pkttag, sizeof(pkttag));
- wlh += BRCMF_FWS_TYPE_PKTTAG_LEN + 2;
+ if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode)) {
+ wlh[1] += BRCMF_FWS_TYPE_SEQ_LEN;
+ memcpy(&wlh[2 + BRCMF_FWS_TYPE_PKTTAG_LEN], &pktseq,
+ sizeof(pktseq));
+ }
+ wlh += wlh[1] + 2;
if (entry->send_tim_signal) {
entry->send_tim_signal = 0;
@@ -846,9 +887,7 @@ static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
if (fillers)
memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers);
- brcmf_proto_hdrpush(fws->drvr, brcmf_skb_if_flags_get_field(skb, INDEX),
- data_offset >> 2, skb);
- return 0;
+ return (u8)(data_offset >> 2);
}
static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
@@ -856,10 +895,11 @@ static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
int fifo, bool send_immediately)
{
struct sk_buff *skb;
- struct brcmf_bus *bus;
struct brcmf_skbuff_cb *skcb;
s32 err;
u32 len;
+ u8 data_offset;
+ int ifidx;
/* check delayedQ and suppressQ in one call using bitmap */
if (brcmu_pktq_mlen(&entry->psq, 3 << (fifo * 2)) == 0)
@@ -875,6 +915,7 @@ static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
/* create a dummy packet and sent that. The traffic */
/* bitmap info will automatically be attached to that packet */
len = BRCMF_FWS_TYPE_PKTTAG_LEN + 2 +
+ BRCMF_FWS_TYPE_SEQ_LEN +
BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2 +
4 + fws->drvr->hdrlen;
skb = brcmu_pkt_buf_get_skb(len);
@@ -884,13 +925,13 @@ static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
skcb = brcmf_skbcb(skb);
skcb->mac = entry;
skcb->state = BRCMF_FWS_SKBSTATE_TIM;
- bus = fws->drvr->bus_if;
- err = brcmf_fws_hdrpush(fws, skb);
- if (err == 0) {
- brcmf_fws_unlock(fws);
- err = brcmf_bus_txdata(bus, skb);
- brcmf_fws_lock(fws);
- }
+ skcb->htod = 0;
+ skcb->htod_seq = 0;
+ data_offset = brcmf_fws_hdrpush(fws, skb);
+ ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
+ brcmf_fws_unlock(fws);
+ err = brcmf_proto_txdata(fws->drvr, ifidx, data_offset, skb);
+ brcmf_fws_lock(fws);
if (err)
brcmu_pkt_buf_free_skb(skb);
return true;
@@ -1172,8 +1213,13 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws,
{
int prec = 2 * fifo;
u32 *qfull_stat = &fws->stats.delayq_full_error;
-
struct brcmf_fws_mac_descriptor *entry;
+ struct pktq *pq;
+ struct sk_buff_head *queue;
+ struct sk_buff *p_head;
+ struct sk_buff *p_tail;
+ u32 fr_new;
+ u32 fr_compare;
entry = brcmf_skbcb(p)->mac;
if (entry == NULL) {
@@ -1185,9 +1231,55 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws,
if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) {
prec += 1;
qfull_stat = &fws->stats.supprq_full_error;
- }
- if (brcmu_pktq_penq(&entry->psq, prec, p) == NULL) {
+ /* Fix out of order delivery of frames. Dont assume frame */
+ /* can be inserted at the end, but look for correct position */
+ pq = &entry->psq;
+ if (pktq_full(pq) || pktq_pfull(pq, prec)) {
+ *qfull_stat += 1;
+ return -ENFILE;
+ }
+ queue = &pq->q[prec].skblist;
+
+ p_head = skb_peek(queue);
+ p_tail = skb_peek_tail(queue);
+ fr_new = brcmf_skb_htod_tag_get_field(p, FREERUN);
+
+ while (p_head != p_tail) {
+ fr_compare = brcmf_skb_htod_tag_get_field(p_tail,
+ FREERUN);
+ /* be sure to handle wrap of 256 */
+ if (((fr_new > fr_compare) &&
+ ((fr_new - fr_compare) < 128)) ||
+ ((fr_new < fr_compare) &&
+ ((fr_compare - fr_new) > 128)))
+ break;
+ p_tail = skb_queue_prev(queue, p_tail);
+ }
+ /* Position found. Determine what to do */
+ if (p_tail == NULL) {
+ /* empty list */
+ __skb_queue_tail(queue, p);
+ } else {
+ fr_compare = brcmf_skb_htod_tag_get_field(p_tail,
+ FREERUN);
+ if (((fr_new > fr_compare) &&
+ ((fr_new - fr_compare) < 128)) ||
+ ((fr_new < fr_compare) &&
+ ((fr_compare - fr_new) > 128))) {
+ /* After tail */
+ __skb_queue_after(queue, p_tail, p);
+ } else {
+ /* Before tail */
+ __skb_insert(p, p_tail->prev, p_tail, queue);
+ }
+ }
+
+ /* Complete the counters and statistics */
+ pq->len++;
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (u8) prec;
+ } else if (brcmu_pktq_penq(&entry->psq, prec, p) == NULL) {
*qfull_stat += 1;
return -ENFILE;
}
@@ -1277,7 +1369,8 @@ done:
}
static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
- struct sk_buff *skb, u32 genbit)
+ struct sk_buff *skb, u32 genbit,
+ u16 seq)
{
struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
u32 hslot;
@@ -1297,9 +1390,19 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
entry->generation = genbit;
ret = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
- if (ret == 0)
+ if (ret == 0) {
+ brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit);
+ brcmf_skbcb(skb)->htod_seq = seq;
+ if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) {
+ brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1);
+ brcmf_skb_htod_seq_set_field(skb, FROMFW, 0);
+ } else {
+ brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
+ }
ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo,
skb);
+ }
+
if (ret != 0) {
/* suppress q is full or hdrpull failed, drop this packet */
brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
@@ -1317,7 +1420,7 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
static int
brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
- u32 genbit)
+ u32 genbit, u16 seq)
{
u32 fifo;
int ret;
@@ -1360,8 +1463,8 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
if (entry->suppressed && entry->suppr_transit_count)
entry->suppr_transit_count--;
- brcmf_dbg(DATA, "%s flags %X htod %X\n", entry->name, skcb->if_flags,
- skcb->htod);
+ brcmf_dbg(DATA, "%s flags %d htod %X seq %X\n", entry->name, flags,
+ skcb->htod, seq);
/* pick up the implicit credit from this packet */
fifo = brcmf_skb_htod_tag_get_field(skb, FIFO);
@@ -1374,7 +1477,8 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
brcmf_fws_macdesc_return_req_credit(skb);
if (!remove_from_hanger)
- ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit);
+ ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit,
+ seq);
if (remove_from_hanger || ret)
brcmf_txfinalize(fws->drvr, skb, true);
@@ -1406,10 +1510,12 @@ static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
{
__le32 status_le;
+ __le16 seq_le;
u32 status;
u32 hslot;
u32 genbit;
u8 flags;
+ u16 seq;
fws->stats.txs_indicate++;
memcpy(&status_le, data, sizeof(status_le));
@@ -1417,9 +1523,16 @@ static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
flags = brcmf_txstatus_get_field(status, FLAGS);
hslot = brcmf_txstatus_get_field(status, HSLOT);
genbit = brcmf_txstatus_get_field(status, GENERATION);
+ if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode)) {
+ memcpy(&seq_le, &data[BRCMF_FWS_TYPE_PKTTAG_LEN],
+ sizeof(seq_le));
+ seq = le16_to_cpu(seq_le);
+ } else {
+ seq = 0;
+ }
brcmf_fws_lock(fws);
- brcmf_fws_txs_process(fws, flags, hslot, genbit);
+ brcmf_fws_txs_process(fws, flags, hslot, genbit, seq);
brcmf_fws_unlock(fws);
return BRCMF_FWS_RET_OK_NOSCHEDULE;
}
@@ -1603,15 +1716,15 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
return 0;
}
-static void brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
+static u8 brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
struct sk_buff *p)
{
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
struct brcmf_fws_mac_descriptor *entry = skcb->mac;
u8 flags;
- brcmf_skb_if_flags_set_field(p, TRANSMIT, 1);
- brcmf_skb_htod_tag_set_field(p, GENERATION, entry->generation);
+ if (skcb->state != BRCMF_FWS_SKBSTATE_SUPPRESSED)
+ brcmf_skb_htod_tag_set_field(p, GENERATION, entry->generation);
flags = BRCMF_FWS_HTOD_FLAG_PKTFROMHOST;
if (brcmf_skb_if_flags_get_field(p, REQUESTED)) {
/*
@@ -1621,7 +1734,7 @@ static void brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED;
}
brcmf_skb_htod_tag_set_field(p, FLAGS, flags);
- brcmf_fws_hdrpush(fws, p);
+ return brcmf_fws_hdrpush(fws, p);
}
static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws,
@@ -1652,7 +1765,7 @@ static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws,
fws->stats.rollback_failed++;
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED,
- hslot, 0);
+ hslot, 0, 0);
} else {
fws->stats.rollback_success++;
brcmf_fws_return_credits(fws, fifo, 1);
@@ -1689,20 +1802,21 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
{
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
struct brcmf_fws_mac_descriptor *entry;
- struct brcmf_bus *bus = fws->drvr->bus_if;
int rc;
u8 ifidx;
+ u8 data_offset;
entry = skcb->mac;
if (IS_ERR(entry))
return PTR_ERR(entry);
- brcmf_fws_precommit_skb(fws, fifo, skb);
+ data_offset = brcmf_fws_precommit_skb(fws, fifo, skb);
entry->transit_count++;
if (entry->suppressed)
entry->suppr_transit_count++;
+ ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
brcmf_fws_unlock(fws);
- rc = brcmf_bus_txdata(bus, skb);
+ rc = brcmf_proto_txdata(fws->drvr, ifidx, data_offset, skb);
brcmf_fws_lock(fws);
brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name,
skcb->if_flags, skcb->htod, rc);
@@ -1732,6 +1846,8 @@ static int brcmf_fws_assign_htod(struct brcmf_fws_info *fws, struct sk_buff *p,
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
int rc, hslot;
+ skcb->htod = 0;
+ skcb->htod_seq = 0;
hslot = brcmf_fws_hanger_get_free_slot(&fws->hanger);
brcmf_skb_htod_tag_set_field(p, HSLOT, hslot);
brcmf_skb_htod_tag_set_field(p, FREERUN, skcb->mac->seq[fifo]);
@@ -1757,7 +1873,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
/* determine the priority */
if (!skb->priority)
- skb->priority = cfg80211_classify8021d(skb);
+ skb->priority = cfg80211_classify8021d(skb, NULL);
drvr->tx_multicast += !!multicast;
if (pae)
@@ -1861,10 +1977,9 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
&skb, true);
ifidx = brcmf_skb_if_flags_get_field(skb,
INDEX);
- brcmf_proto_hdrpush(drvr, ifidx, 0, skb);
- /* Use bus module to send data frame */
+ /* Use proto layer to send data frame */
brcmf_fws_unlock(fws);
- ret = brcmf_bus_txdata(drvr->bus_if, skb);
+ ret = brcmf_proto_txdata(drvr, ifidx, 0, skb);
brcmf_fws_lock(fws);
if (ret < 0)
brcmf_txfinalize(drvr, skb, false);
@@ -1908,6 +2023,7 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
struct brcmf_fws_info *fws;
u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS;
int rc;
+ u32 mode;
drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL);
if (!drvr->fws) {
@@ -1966,6 +2082,18 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
if (brcmf_fil_iovar_int_set(drvr->iflist[0], "ampdu_hostreorder", 1))
brcmf_dbg(INFO, "enabling AMPDU host-reorder failed\n");
+ /* Enable seq number reuse, if supported */
+ if (brcmf_fil_iovar_int_get(drvr->iflist[0], "wlfc_mode", &mode) == 0) {
+ if (BRCMF_FWS_MODE_GET_REUSESEQ(mode)) {
+ mode = 0;
+ BRCMF_FWS_MODE_SET_REUSESEQ(mode, 1);
+ if (brcmf_fil_iovar_int_set(drvr->iflist[0],
+ "wlfc_mode", mode) == 0) {
+ BRCMF_FWS_MODE_SET_REUSESEQ(fws->mode, 1);
+ }
+ }
+ }
+
brcmf_fws_hanger_init(&fws->hanger);
brcmf_fws_macdesc_init(&fws->desc.other, NULL, 0);
brcmf_fws_macdesc_set_name(fws, &fws->desc.other);
@@ -2022,7 +2150,7 @@ void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
}
brcmf_fws_lock(fws);
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
- brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0);
+ brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0, 0);
brcmf_fws_unlock(fws);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/nvram.c b/drivers/net/wireless/brcm80211/brcmfmac/nvram.c
new file mode 100644
index 000000000000..d5ef86db631b
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/nvram.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/firmware.h>
+
+#include "nvram.h"
+
+/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a file
+ * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
+ * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
+ * End of buffer is completed with token identifying length of buffer.
+ */
+void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length)
+{
+ u8 *nvram;
+ u32 i;
+ u32 len;
+ u32 column;
+ u8 val;
+ bool comment;
+ u32 token;
+ __le32 token_le;
+
+ /* Alloc for extra 0 byte + roundup by 4 + length field */
+ nvram = kmalloc(nv->size + 1 + 3 + sizeof(token_le), GFP_KERNEL);
+ if (!nvram)
+ return NULL;
+
+ len = 0;
+ column = 0;
+ comment = false;
+ for (i = 0; i < nv->size; i++) {
+ val = nv->data[i];
+ if (val == 0)
+ break;
+ if (val == '\r')
+ continue;
+ if (comment && (val != '\n'))
+ continue;
+ comment = false;
+ if (val == '#') {
+ comment = true;
+ continue;
+ }
+ if (val == '\n') {
+ if (column == 0)
+ continue;
+ nvram[len] = 0;
+ len++;
+ column = 0;
+ continue;
+ }
+ nvram[len] = val;
+ len++;
+ column++;
+ }
+ column = len;
+ *new_length = roundup(len + 1, 4);
+ while (column != *new_length) {
+ nvram[column] = 0;
+ column++;
+ }
+
+ token = *new_length / 4;
+ token = (~token << 16) | (token & 0x0000FFFF);
+ token_le = cpu_to_le32(token);
+
+ memcpy(&nvram[*new_length], &token_le, sizeof(token_le));
+ *new_length += sizeof(token_le);
+
+ return nvram;
+}
+
+void brcmf_nvram_free(void *nvram)
+{
+ kfree(nvram);
+}
+
+
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/nvram.h b/drivers/net/wireless/brcm80211/brcmfmac/nvram.h
new file mode 100644
index 000000000000..d454580928c9
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/nvram.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_NVRAM_H
+#define BRCMFMAC_NVRAM_H
+
+
+void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length);
+void brcmf_nvram_free(void *nvram);
+
+
+#endif /* BRCMFMAC_NVRAM_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index 4a2293041821..fc4f98b275d7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -812,7 +812,7 @@ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
struct ieee80211_channel *chan = request->channels[i];
if (chan->flags & (IEEE80211_CHAN_RADAR |
- IEEE80211_CHAN_PASSIVE_SCAN))
+ IEEE80211_CHAN_NO_IR))
continue;
chanspecs[i] = channel_to_chanspec(&p2p->cfg->d11inf,
@@ -1243,7 +1243,7 @@ bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
IEEE80211_P2P_ATTR_DEVICE_ID,
p2p_dev_addr, sizeof(p2p_dev_addr));
if ((err >= 0) &&
- (!memcmp(p2p_dev_addr, afx_hdl->tx_dst_addr, ETH_ALEN))) {
+ (ether_addr_equal(p2p_dev_addr, afx_hdl->tx_dst_addr))) {
if (!bi->ctl_ch) {
ch.chspec = le16_to_cpu(bi->chanspec);
cfg->d11inf.decchspec(&ch);
@@ -1380,8 +1380,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
(brcmf_p2p_gon_req_collision(p2p, (u8 *)e->addr))) {
if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
&p2p->status) &&
- (memcmp(afx_hdl->tx_dst_addr, e->addr,
- ETH_ALEN) == 0)) {
+ (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) {
afx_hdl->peer_chan = ch.chnum;
brcmf_dbg(INFO, "GON request: Peer found, channel=%d\n",
afx_hdl->peer_chan);
@@ -1865,7 +1864,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
cfg->d11inf.decchspec(&ch);
if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) &&
- (memcmp(afx_hdl->tx_dst_addr, e->addr, ETH_ALEN) == 0)) {
+ (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) {
afx_hdl->peer_chan = ch.chnum;
brcmf_dbg(INFO, "PROBE REQUEST: Peer found, channel=%d\n",
afx_hdl->peer_chan);
@@ -1956,21 +1955,21 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg)
err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
if (err < 0) {
brcmf_err("set p2p_disc error\n");
- brcmf_free_vif(cfg, p2p_vif);
+ brcmf_free_vif(p2p_vif);
goto exit;
}
/* obtain bsscfg index for P2P discovery */
err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx);
if (err < 0) {
brcmf_err("retrieving discover bsscfg index failed\n");
- brcmf_free_vif(cfg, p2p_vif);
+ brcmf_free_vif(p2p_vif);
goto exit;
}
/* Verify that firmware uses same bssidx as driver !! */
if (p2p_ifp->bssidx != bssidx) {
brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n",
bssidx, p2p_ifp->bssidx);
- brcmf_free_vif(cfg, p2p_vif);
+ brcmf_free_vif(p2p_vif);
goto exit;
}
@@ -1998,7 +1997,7 @@ void brcmf_p2p_detach(struct brcmf_p2p_info *p2p)
brcmf_p2p_cancel_remain_on_channel(vif->ifp);
brcmf_p2p_deinit_discovery(p2p);
/* remove discovery interface */
- brcmf_free_vif(p2p->cfg, vif);
+ brcmf_free_vif(vif);
p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
}
/* just set it all to zero */
@@ -2223,7 +2222,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
return &p2p_vif->wdev;
fail:
- brcmf_free_vif(p2p->cfg, p2p_vif);
+ brcmf_free_vif(p2p_vif);
return ERR_PTR(err);
}
@@ -2232,31 +2231,12 @@ fail:
*
* @vif: virtual interface object to delete.
*/
-static void brcmf_p2p_delete_p2pdev(struct brcmf_cfg80211_info *cfg,
+static void brcmf_p2p_delete_p2pdev(struct brcmf_p2p_info *p2p,
struct brcmf_cfg80211_vif *vif)
{
cfg80211_unregister_wdev(&vif->wdev);
- cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
- brcmf_free_vif(cfg, vif);
-}
-
-/**
- * brcmf_p2p_free_p2p_if() - free up net device related data.
- *
- * @ndev: net device that needs to be freed.
- */
-static void brcmf_p2p_free_p2p_if(struct net_device *ndev)
-{
- struct brcmf_cfg80211_info *cfg;
- struct brcmf_cfg80211_vif *vif;
- struct brcmf_if *ifp;
-
- ifp = netdev_priv(ndev);
- cfg = ifp->drvr->config;
- vif = ifp->vif;
-
- brcmf_free_vif(cfg, vif);
- free_netdev(ifp->ndev);
+ p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
+ brcmf_free_vif(vif);
}
/**
@@ -2336,8 +2316,6 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
brcmf_err("Registering netdevice failed\n");
goto fail;
}
- /* override destructor */
- ifp->ndev->destructor = brcmf_p2p_free_p2p_if;
cfg->p2p.bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = vif;
/* Disable firmware roaming for P2P interface */
@@ -2350,7 +2328,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
return &ifp->vif->wdev;
fail:
- brcmf_free_vif(cfg, vif);
+ brcmf_free_vif(vif);
return ERR_PTR(err);
}
@@ -2359,8 +2337,6 @@ fail:
*
* @wiphy: wiphy device of interface.
* @wdev: wireless device of interface.
- *
- * TODO: not yet supported.
*/
int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
{
@@ -2386,7 +2362,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
break;
case NL80211_IFTYPE_P2P_DEVICE:
- brcmf_p2p_delete_p2pdev(cfg, vif);
+ brcmf_p2p_delete_p2pdev(p2p, vif);
return 0;
default:
return -ENOTSUPP;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/brcm80211/brcmfmac/proto.c
new file mode 100644
index 000000000000..b6b464184946
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/proto.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+ #include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+
+#include <brcmu_wifi.h>
+#include "dhd.h"
+#include "dhd_dbg.h"
+#include "proto.h"
+#include "bcdc.h"
+
+
+int brcmf_proto_attach(struct brcmf_pub *drvr)
+{
+ struct brcmf_proto *proto;
+
+ proto = kzalloc(sizeof(*proto), GFP_ATOMIC);
+ if (!proto)
+ goto fail;
+
+ drvr->proto = proto;
+ /* BCDC protocol is only protocol supported for the moment */
+ if (brcmf_proto_bcdc_attach(drvr))
+ goto fail;
+
+ if ((proto->txdata == NULL) || (proto->hdrpull == NULL) ||
+ (proto->query_dcmd == NULL) || (proto->set_dcmd == NULL)) {
+ brcmf_err("Not all proto handlers have been installed\n");
+ goto fail;
+ }
+ return 0;
+
+fail:
+ kfree(proto);
+ drvr->proto = NULL;
+ return -ENOMEM;
+}
+
+void brcmf_proto_detach(struct brcmf_pub *drvr)
+{
+ if (drvr->proto) {
+ brcmf_proto_bcdc_detach(drvr);
+ kfree(drvr->proto);
+ drvr->proto = NULL;
+ }
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/brcm80211/brcmfmac/proto.h
new file mode 100644
index 000000000000..482fb0ba4a30
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/proto.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_PROTO_H
+#define BRCMFMAC_PROTO_H
+
+struct brcmf_proto {
+ int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
+ struct sk_buff *skb);
+ int (*query_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd,
+ void *buf, uint len);
+ int (*set_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
+ uint len);
+ int (*txdata)(struct brcmf_pub *drvr, int ifidx, u8 offset,
+ struct sk_buff *skb);
+ void *pd;
+};
+
+
+int brcmf_proto_attach(struct brcmf_pub *drvr);
+void brcmf_proto_detach(struct brcmf_pub *drvr);
+
+static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws,
+ u8 *ifidx, struct sk_buff *skb)
+{
+ return drvr->proto->hdrpull(drvr, do_fws, ifidx, skb);
+}
+static inline int brcmf_proto_query_dcmd(struct brcmf_pub *drvr, int ifidx,
+ uint cmd, void *buf, uint len)
+{
+ return drvr->proto->query_dcmd(drvr, ifidx, cmd, buf, len);
+}
+static inline int brcmf_proto_set_dcmd(struct brcmf_pub *drvr, int ifidx,
+ uint cmd, void *buf, uint len)
+{
+ return drvr->proto->set_dcmd(drvr, ifidx, cmd, buf, len);
+}
+static inline int brcmf_proto_txdata(struct brcmf_pub *drvr, int ifidx,
+ u8 offset, struct sk_buff *skb)
+{
+ return drvr->proto->txdata(drvr, ifidx, offset, skb);
+}
+
+
+#endif /* BRCMFMAC_PROTO_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index 2096a14ef1fb..82bf3c5d3cdc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -19,6 +19,7 @@
#include <linux/netdevice.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/bcma/bcma.h>
@@ -50,6 +51,9 @@
#define BCM43143_CORE_ARM_BASE 0x18003000
#define BCM43143_RAMSIZE 0x70000
+/* All D11 cores, ID 0x812 */
+#define BCM43xx_CORE_D11_BASE 0x18001000
+
#define SBCOREREV(sbidh) \
((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
((sbidh) & SSB_IDHIGH_RCLO))
@@ -65,6 +69,10 @@
/* ARM CR4 core specific control flag bits */
#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
+/* D11 core specific control flag bits */
+#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
+#define D11_BCMA_IOCTL_PHYRESET 0x0008
+
#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
/* SDIO Pad drive strength to select value mappings */
struct sdiod_drive_str {
@@ -83,6 +91,24 @@ static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
{0, 0x1}
};
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
+ {6, 0x7},
+ {5, 0x6},
+ {4, 0x5},
+ {3, 0x4},
+ {2, 0x2},
+ {1, 0x1},
+ {0, 0x0}
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
+static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
+ {3, 0x3},
+ {2, 0x2},
+ {1, 0x1},
+ {0, 0x0} };
+
/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
{16, 0x7},
@@ -92,7 +118,7 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
};
u8
-brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid)
+brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid)
{
u8 idx;
@@ -105,22 +131,22 @@ brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid)
static u32
brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid)
+ struct brcmf_chip *ci, u16 coreid)
{
u32 regdata;
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbidhigh),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbidhigh),
+ NULL);
return SBCOREREV(regdata);
}
static u32
brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid)
+ struct brcmf_chip *ci, u16 coreid)
{
u8 idx;
@@ -131,7 +157,7 @@ brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
static bool
brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid)
+ struct brcmf_chip *ci, u16 coreid)
{
u32 regdata;
u8 idx;
@@ -140,9 +166,9 @@ brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
if (idx == BRCMF_MAX_CORENUM)
return false;
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ NULL);
regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
return (SSB_TMSLOW_CLOCK == regdata);
@@ -150,7 +176,7 @@ brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
static bool
brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid)
+ struct brcmf_chip *ci, u16 coreid)
{
u32 regdata;
u8 idx;
@@ -160,13 +186,13 @@ brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
if (idx == BRCMF_MAX_CORENUM)
return false;
- regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ NULL);
ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
- regdata = brcmf_sdio_regrl(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ NULL);
ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
return ret;
@@ -174,7 +200,8 @@ brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
static void
brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid, u32 core_bits)
+ struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+ u32 in_resetbits)
{
u32 regdata, base;
u8 idx;
@@ -182,130 +209,126 @@ brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
base = ci->c_inf[idx].base;
- regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
if (regdata & SSB_TMSLOW_RESET)
return;
- regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
/*
* set target reject and spin until busy is clear
* (preserve core-specific bits)
*/
- regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow),
- NULL);
- brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- regdata | SSB_TMSLOW_REJECT, NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbtmstatelow), NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+ regdata | SSB_TMSLOW_REJECT, NULL);
- regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbtmstatelow), NULL);
udelay(1);
- SPINWAIT((brcmf_sdio_regrl(sdiodev,
- CORE_SB(base, sbtmstatehigh),
- NULL) &
- SSB_TMSHIGH_BUSY), 100000);
-
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(base, sbtmstatehigh),
- NULL);
+ SPINWAIT((brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbtmstatehigh),
+ NULL) &
+ SSB_TMSHIGH_BUSY), 100000);
+
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbtmstatehigh),
+ NULL);
if (regdata & SSB_TMSHIGH_BUSY)
brcmf_err("core state still busy\n");
- regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbidlow),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
+ NULL);
if (regdata & SSB_IDLOW_INITIATOR) {
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbimstate),
+ NULL);
regdata |= SSB_IMSTATE_REJECT;
- brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbimstate),
- regdata, NULL);
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
+ regdata, NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbimstate),
+ NULL);
udelay(1);
- SPINWAIT((brcmf_sdio_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL) &
- SSB_IMSTATE_BUSY), 100000);
+ SPINWAIT((brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbimstate),
+ NULL) &
+ SSB_IMSTATE_BUSY), 100000);
}
/* set reset and reject while enabling the clocks */
regdata = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
- brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- regdata, NULL);
- regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow),
- NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+ regdata, NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbtmstatelow), NULL);
udelay(10);
/* clear the initiator reject bit */
- regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbidlow),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
+ NULL);
if (regdata & SSB_IDLOW_INITIATOR) {
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbimstate),
+ NULL);
regdata &= ~SSB_IMSTATE_REJECT;
- brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbimstate),
- regdata, NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
+ regdata, NULL);
}
}
/* leave reset and reject asserted */
- brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+ (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
udelay(1);
}
static void
brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid, u32 core_bits)
+ struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+ u32 in_resetbits)
{
u8 idx;
u32 regdata;
+ u32 wrapbase;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
if (idx == BRCMF_MAX_CORENUM)
return;
+ wrapbase = ci->c_inf[idx].wrapbase;
+
/* if core is already in reset, just return */
- regdata = brcmf_sdio_regrl(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL);
if ((regdata & BCMA_RESET_CTL_RESET) != 0)
return;
- /* ensure no pending backplane operation
- * 300uc should be sufficient for backplane ops to be finish
- * extra 10ms is taken into account for firmware load stage
- * after 10300us carry on disabling the core anyway
- */
- SPINWAIT(brcmf_sdio_regrl(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_ST,
- NULL), 10300);
- regdata = brcmf_sdio_regrl(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_ST,
- NULL);
- if (regdata)
- brcmf_err("disabling core 0x%x with reset status %x\n",
- coreid, regdata);
-
- brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- BCMA_RESET_CTL_RESET, NULL);
- udelay(1);
+ /* configure reset */
+ brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
+ BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
- brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- core_bits, NULL);
- regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- NULL);
+ /* put in reset */
+ brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL,
+ BCMA_RESET_CTL_RESET, NULL);
usleep_range(10, 20);
+ /* wait till reset is 1 */
+ SPINWAIT(brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) !=
+ BCMA_RESET_CTL_RESET, 300);
+
+ /* post reset configure */
+ brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
+ BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
}
static void
brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid, u32 core_bits)
+ struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+ u32 in_resetbits, u32 post_resetbits)
{
u32 regdata;
u8 idx;
@@ -318,93 +341,91 @@ brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
* Must do the disable sequence first to work for
* arbitrary current core state.
*/
- brcmf_sdio_sb_coredisable(sdiodev, ci, coreid, 0);
+ brcmf_sdio_sb_coredisable(sdiodev, ci, coreid, pre_resetbits,
+ in_resetbits);
/*
* Now do the initialization sequence.
* set reset while enabling the clock and
* forcing them on throughout the core
*/
- brcmf_sdio_regwl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
- NULL);
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
+ brcmf_sdiod_regwl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
+ NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ NULL);
udelay(1);
/* clear any serror */
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
+ NULL);
if (regdata & SSB_TMSHIGH_SERR)
- brcmf_sdio_regwl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
- 0, NULL);
+ brcmf_sdiod_regwl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
+ 0, NULL);
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate),
+ NULL);
if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
- brcmf_sdio_regwl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate),
- regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
- NULL);
+ brcmf_sdiod_regwl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate),
+ regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
+ NULL);
/* clear reset and allow it to propagate throughout the core */
- brcmf_sdio_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ NULL);
udelay(1);
/* leave clock enabled */
- brcmf_sdio_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_CLOCK, NULL);
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ SSB_TMSLOW_CLOCK, NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ NULL);
udelay(1);
}
static void
brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid, u32 core_bits)
+ struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+ u32 in_resetbits, u32 post_resetbits)
{
u8 idx;
u32 regdata;
+ u32 wrapbase;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
if (idx == BRCMF_MAX_CORENUM)
return;
+ wrapbase = ci->c_inf[idx].wrapbase;
+
/* must disable first to work for arbitrary current core state */
- brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, core_bits);
-
- /* now do initialization sequence */
- brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- core_bits | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
- regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- NULL);
- brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- 0, NULL);
- regdata = brcmf_sdio_regrl(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- NULL);
- udelay(1);
+ brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, pre_resetbits,
+ in_resetbits);
- brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- core_bits | BCMA_IOCTL_CLK, NULL);
- regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- NULL);
- udelay(1);
+ while (brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) &
+ BCMA_RESET_CTL_RESET) {
+ brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL, 0, NULL);
+ usleep_range(40, 60);
+ }
+
+ brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, post_resetbits |
+ BCMA_IOCTL_CLK, NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
}
#ifdef DEBUG
/* safety check for chipinfo */
-static int brcmf_sdio_chip_cichk(struct chip_info *ci)
+static int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
{
u8 core_idx;
@@ -431,172 +452,213 @@ static int brcmf_sdio_chip_cichk(struct chip_info *ci)
return 0;
}
#else /* DEBUG */
-static inline int brcmf_sdio_chip_cichk(struct chip_info *ci)
+static inline int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
{
return 0;
}
#endif
static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u32 regs)
+ struct brcmf_chip *ci)
{
u32 regdata;
- int ret;
+ u32 socitype;
/* Get CC core rev
- * Chipid is assume to be at offset 0 from regs arg
+ * Chipid is assume to be at offset 0 from SI_ENUM_BASE
* For different chiptypes or old sdio hosts w/o chipcommon,
* other ways of recognition should be added here.
*/
- ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
- ci->c_inf[0].base = regs;
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_CC_REG(ci->c_inf[0].base, chipid),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_CC_REG(SI_ENUM_BASE, chipid),
+ NULL);
ci->chip = regdata & CID_ID_MASK;
ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
ci->chiprev >= 2)
ci->chip = BCM4339_CHIP_ID;
- ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+ socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
- brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
+ brcmf_dbg(INFO, "found %s chip: id=0x%x, rev=%d\n",
+ socitype == SOCI_SB ? "SB" : "AXI", ci->chip, ci->chiprev);
- /* Address of cores for new chips should be added here */
- switch (ci->chip) {
- case BCM43143_CHIP_ID:
- ci->c_inf[0].wrapbase = ci->c_inf[0].base + 0x00100000;
- ci->c_inf[0].cib = 0x2b000000;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = BCM43143_CORE_BUS_BASE;
- ci->c_inf[1].wrapbase = ci->c_inf[1].base + 0x00100000;
- ci->c_inf[1].cib = 0x18000000;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = BCM43143_CORE_SOCRAM_BASE;
- ci->c_inf[2].wrapbase = ci->c_inf[2].base + 0x00100000;
- ci->c_inf[2].cib = 0x14000000;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = BCM43143_CORE_ARM_BASE;
- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
- ci->c_inf[3].cib = 0x07000000;
- ci->ramsize = BCM43143_RAMSIZE;
- break;
- case BCM43241_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x2a084411;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x0e004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x14080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x07004211;
- ci->ramsize = 0x90000;
- break;
- case BCM4329_CHIP_ID:
+ if (socitype == SOCI_SB) {
+ if (ci->chip != BCM4329_CHIP_ID) {
+ brcmf_err("SB chip is not supported\n");
+ return -ENODEV;
+ }
+ ci->iscoreup = brcmf_sdio_sb_iscoreup;
+ ci->corerev = brcmf_sdio_sb_corerev;
+ ci->coredisable = brcmf_sdio_sb_coredisable;
+ ci->resetcore = brcmf_sdio_sb_resetcore;
+
+ ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
+ ci->c_inf[0].base = SI_ENUM_BASE;
ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE;
ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
+ ci->c_inf[4].id = BCMA_CORE_80211;
+ ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
ci->ramsize = BCM4329_RAMSIZE;
- break;
- case BCM4330_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x27004211;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x07004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x0d080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x03004211;
- ci->ramsize = 0x48000;
- break;
- case BCM4334_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x29004211;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x0d004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x13080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x07004211;
- ci->ramsize = 0x80000;
- break;
- case BCM4335_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x2b084411;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18005000;
- ci->c_inf[1].wrapbase = 0x18105000;
- ci->c_inf[1].cib = 0x0f004211;
- ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
- ci->c_inf[2].base = 0x18002000;
- ci->c_inf[2].wrapbase = 0x18102000;
- ci->c_inf[2].cib = 0x01084411;
- ci->ramsize = 0xc0000;
- ci->rambase = 0x180000;
- break;
- case BCM4339_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x2e084411;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18005000;
- ci->c_inf[1].wrapbase = 0x18105000;
- ci->c_inf[1].cib = 0x15004211;
- ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
- ci->c_inf[2].base = 0x18002000;
- ci->c_inf[2].wrapbase = 0x18102000;
- ci->c_inf[2].cib = 0x04084411;
- ci->ramsize = 0xc0000;
- ci->rambase = 0x180000;
- break;
- default:
- brcmf_err("chipid 0x%x is not supported\n", ci->chip);
- return -ENODEV;
- }
-
- ret = brcmf_sdio_chip_cichk(ci);
- if (ret)
- return ret;
-
- switch (ci->socitype) {
- case SOCI_SB:
- ci->iscoreup = brcmf_sdio_sb_iscoreup;
- ci->corerev = brcmf_sdio_sb_corerev;
- ci->coredisable = brcmf_sdio_sb_coredisable;
- ci->resetcore = brcmf_sdio_sb_resetcore;
- break;
- case SOCI_AI:
+ } else if (socitype == SOCI_AI) {
ci->iscoreup = brcmf_sdio_ai_iscoreup;
ci->corerev = brcmf_sdio_ai_corerev;
ci->coredisable = brcmf_sdio_ai_coredisable;
ci->resetcore = brcmf_sdio_ai_resetcore;
- break;
- default:
- brcmf_err("socitype %u not supported\n", ci->socitype);
+
+ ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
+ ci->c_inf[0].base = SI_ENUM_BASE;
+
+ /* Address of cores for new chips should be added here */
+ switch (ci->chip) {
+ case BCM43143_CHIP_ID:
+ ci->c_inf[0].wrapbase = ci->c_inf[0].base + 0x00100000;
+ ci->c_inf[0].cib = 0x2b000000;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = BCM43143_CORE_BUS_BASE;
+ ci->c_inf[1].wrapbase = ci->c_inf[1].base + 0x00100000;
+ ci->c_inf[1].cib = 0x18000000;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = BCM43143_CORE_SOCRAM_BASE;
+ ci->c_inf[2].wrapbase = ci->c_inf[2].base + 0x00100000;
+ ci->c_inf[2].cib = 0x14000000;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = BCM43143_CORE_ARM_BASE;
+ ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
+ ci->c_inf[3].cib = 0x07000000;
+ ci->c_inf[4].id = BCMA_CORE_80211;
+ ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+ ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+ ci->ramsize = BCM43143_RAMSIZE;
+ break;
+ case BCM43241_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x2a084411;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18002000;
+ ci->c_inf[1].wrapbase = 0x18102000;
+ ci->c_inf[1].cib = 0x0e004211;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = 0x18004000;
+ ci->c_inf[2].wrapbase = 0x18104000;
+ ci->c_inf[2].cib = 0x14080401;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = 0x18003000;
+ ci->c_inf[3].wrapbase = 0x18103000;
+ ci->c_inf[3].cib = 0x07004211;
+ ci->c_inf[4].id = BCMA_CORE_80211;
+ ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+ ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+ ci->ramsize = 0x90000;
+ break;
+ case BCM4330_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x27004211;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18002000;
+ ci->c_inf[1].wrapbase = 0x18102000;
+ ci->c_inf[1].cib = 0x07004211;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = 0x18004000;
+ ci->c_inf[2].wrapbase = 0x18104000;
+ ci->c_inf[2].cib = 0x0d080401;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = 0x18003000;
+ ci->c_inf[3].wrapbase = 0x18103000;
+ ci->c_inf[3].cib = 0x03004211;
+ ci->c_inf[4].id = BCMA_CORE_80211;
+ ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+ ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+ ci->ramsize = 0x48000;
+ break;
+ case BCM4334_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x29004211;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18002000;
+ ci->c_inf[1].wrapbase = 0x18102000;
+ ci->c_inf[1].cib = 0x0d004211;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = 0x18004000;
+ ci->c_inf[2].wrapbase = 0x18104000;
+ ci->c_inf[2].cib = 0x13080401;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = 0x18003000;
+ ci->c_inf[3].wrapbase = 0x18103000;
+ ci->c_inf[3].cib = 0x07004211;
+ ci->c_inf[4].id = BCMA_CORE_80211;
+ ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+ ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+ ci->ramsize = 0x80000;
+ break;
+ case BCM4335_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x2b084411;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18005000;
+ ci->c_inf[1].wrapbase = 0x18105000;
+ ci->c_inf[1].cib = 0x0f004211;
+ ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
+ ci->c_inf[2].base = 0x18002000;
+ ci->c_inf[2].wrapbase = 0x18102000;
+ ci->c_inf[2].cib = 0x01084411;
+ ci->c_inf[3].id = BCMA_CORE_80211;
+ ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
+ ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
+ ci->ramsize = 0xc0000;
+ ci->rambase = 0x180000;
+ break;
+ case BCM43362_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x27004211;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18002000;
+ ci->c_inf[1].wrapbase = 0x18102000;
+ ci->c_inf[1].cib = 0x0a004211;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = 0x18004000;
+ ci->c_inf[2].wrapbase = 0x18104000;
+ ci->c_inf[2].cib = 0x08080401;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = 0x18003000;
+ ci->c_inf[3].wrapbase = 0x18103000;
+ ci->c_inf[3].cib = 0x03004211;
+ ci->c_inf[4].id = BCMA_CORE_80211;
+ ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+ ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+ ci->ramsize = 0x3C000;
+ break;
+ case BCM4339_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x2e084411;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18005000;
+ ci->c_inf[1].wrapbase = 0x18105000;
+ ci->c_inf[1].cib = 0x15004211;
+ ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
+ ci->c_inf[2].base = 0x18002000;
+ ci->c_inf[2].wrapbase = 0x18102000;
+ ci->c_inf[2].cib = 0x04084411;
+ ci->c_inf[3].id = BCMA_CORE_80211;
+ ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
+ ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
+ ci->ramsize = 0xc0000;
+ ci->rambase = 0x180000;
+ break;
+ default:
+ brcmf_err("AXI chip is not supported\n");
+ return -ENODEV;
+ }
+ } else {
+ brcmf_err("chip backplane type %u is not supported\n",
+ socitype);
return -ENODEV;
}
- return 0;
+ return brcmf_sdio_chip_cichk(ci);
}
static int
@@ -607,7 +669,7 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
/* Try forcing SDIO core to do ALPAvail request only */
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
- brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
if (err) {
brcmf_err("error writing for HT off\n");
return err;
@@ -615,8 +677,8 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
/* If register supported, wait for ALPAvail and then force ALP */
/* This may take up to 15 milliseconds */
- clkval = brcmf_sdio_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+ clkval = brcmf_sdiod_regrb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL);
if ((clkval & ~SBSDIO_AVBITS) != clkset) {
brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
@@ -624,8 +686,8 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
return -EACCES;
}
- SPINWAIT(((clkval = brcmf_sdio_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+ SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
!SBSDIO_ALPAV(clkval)),
PMU_MAX_TRANSITION_DLY);
if (!SBSDIO_ALPAV(clkval)) {
@@ -635,18 +697,18 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
}
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
- brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
udelay(65);
/* Also, disable the extra SDIO pull-ups */
- brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
return 0;
}
static void
brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci)
+ struct brcmf_chip *ci)
{
u32 base = ci->c_inf[0].base;
@@ -654,16 +716,16 @@ brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
/* get chipcommon capabilites */
- ci->c_inf[0].caps = brcmf_sdio_regrl(sdiodev,
- CORE_CC_REG(base, capabilities),
- NULL);
+ ci->c_inf[0].caps = brcmf_sdiod_regrl(sdiodev,
+ CORE_CC_REG(base, capabilities),
+ NULL);
/* get pmu caps & rev */
if (ci->c_inf[0].caps & CC_CAP_PMU) {
ci->pmucaps =
- brcmf_sdio_regrl(sdiodev,
- CORE_CC_REG(base, pmucapabilities),
- NULL);
+ brcmf_sdiod_regrl(sdiodev,
+ CORE_CC_REG(base, pmucapabilities),
+ NULL);
ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
}
@@ -677,19 +739,18 @@ brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
* Make sure any on-chip ARM is off (in case strapping is wrong),
* or downloaded code was already running.
*/
- ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0);
+ ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
}
int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
- struct chip_info **ci_ptr, u32 regs)
+ struct brcmf_chip **ci_ptr)
{
int ret;
- struct chip_info *ci;
+ struct brcmf_chip *ci;
brcmf_dbg(TRACE, "Enter\n");
- /* alloc chip_info_t */
- ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC);
+ ci = kzalloc(sizeof(*ci), GFP_ATOMIC);
if (!ci)
return -ENOMEM;
@@ -697,16 +758,16 @@ int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
if (ret != 0)
goto err;
- ret = brcmf_sdio_chip_recognition(sdiodev, ci, regs);
+ ret = brcmf_sdio_chip_recognition(sdiodev, ci);
if (ret != 0)
goto err;
brcmf_sdio_chip_buscoresetup(sdiodev, ci);
- brcmf_sdio_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
- 0, NULL);
- brcmf_sdio_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
- 0, NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
+ 0, NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
+ 0, NULL);
*ci_ptr = ci;
return 0;
@@ -717,7 +778,7 @@ err:
}
void
-brcmf_sdio_chip_detach(struct chip_info **ci_ptr)
+brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -736,7 +797,7 @@ static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len)
void
brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u32 drivestrength)
+ struct brcmf_chip *ci, u32 drivestrength)
{
const struct sdiod_drive_str *str_tab = NULL;
u32 str_mask;
@@ -757,6 +818,11 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
str_mask = 0x00003800;
str_shift = 11;
break;
+ case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
+ str_tab = sdiod_drvstr_tab6_1v8;
+ str_mask = 0x00001800;
+ str_shift = 11;
+ break;
case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
/* note: 43143 does not support tristate */
i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
@@ -769,6 +835,11 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
brcmf_sdio_chip_name(ci->chip, chn, 8),
drivestrength);
break;
+ case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+ str_tab = sdiod_drive_strength_tab5_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
default:
brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
brcmf_sdio_chip_name(ci->chip, chn, 8),
@@ -784,119 +855,31 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
}
}
addr = CORE_CC_REG(base, chipcontrol_addr);
- brcmf_sdio_regwl(sdiodev, addr, 1, NULL);
- cc_data_temp = brcmf_sdio_regrl(sdiodev, addr, NULL);
+ brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
+ cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
cc_data_temp &= ~str_mask;
drivestrength_sel <<= str_shift;
cc_data_temp |= drivestrength_sel;
- brcmf_sdio_regwl(sdiodev, addr, cc_data_temp, NULL);
+ brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
str_tab[i].strength, drivestrength, cc_data_temp);
}
}
-#ifdef DEBUG
-static bool
-brcmf_sdio_chip_verifynvram(struct brcmf_sdio_dev *sdiodev, u32 nvram_addr,
- char *nvram_dat, uint nvram_sz)
-{
- char *nvram_ularray;
- int err;
- bool ret = true;
-
- /* read back and verify */
- brcmf_dbg(INFO, "Compare NVRAM dl & ul; size=%d\n", nvram_sz);
- nvram_ularray = kmalloc(nvram_sz, GFP_KERNEL);
- /* do not proceed while no memory but */
- if (!nvram_ularray)
- return true;
-
- /* Upload image to verify downloaded contents. */
- memset(nvram_ularray, 0xaa, nvram_sz);
-
- /* Read the vars list to temp buffer for comparison */
- err = brcmf_sdio_ramrw(sdiodev, false, nvram_addr, nvram_ularray,
- nvram_sz);
- if (err) {
- brcmf_err("error %d on reading %d nvram bytes at 0x%08x\n",
- err, nvram_sz, nvram_addr);
- } else if (memcmp(nvram_dat, nvram_ularray, nvram_sz)) {
- brcmf_err("Downloaded NVRAM image is corrupted\n");
- ret = false;
- }
- kfree(nvram_ularray);
-
- return ret;
-}
-#else /* DEBUG */
-static inline bool
-brcmf_sdio_chip_verifynvram(struct brcmf_sdio_dev *sdiodev, u32 nvram_addr,
- char *nvram_dat, uint nvram_sz)
-{
- return true;
-}
-#endif /* DEBUG */
-
-static bool brcmf_sdio_chip_writenvram(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci,
- char *nvram_dat, uint nvram_sz)
-{
- int err;
- u32 nvram_addr;
- u32 token;
- __le32 token_le;
-
- nvram_addr = (ci->ramsize - 4) - nvram_sz + ci->rambase;
-
- /* Write the vars list */
- err = brcmf_sdio_ramrw(sdiodev, true, nvram_addr, nvram_dat, nvram_sz);
- if (err) {
- brcmf_err("error %d on writing %d nvram bytes at 0x%08x\n",
- err, nvram_sz, nvram_addr);
- return false;
- }
-
- if (!brcmf_sdio_chip_verifynvram(sdiodev, nvram_addr,
- nvram_dat, nvram_sz))
- return false;
-
- /* generate token:
- * nvram size, converted to words, in lower 16-bits, checksum
- * in upper 16-bits.
- */
- token = nvram_sz / 4;
- token = (~token << 16) | (token & 0x0000FFFF);
- token_le = cpu_to_le32(token);
-
- brcmf_dbg(INFO, "RAM size: %d\n", ci->ramsize);
- brcmf_dbg(INFO, "nvram is placed at %d, size %d, token=0x%08x\n",
- nvram_addr, nvram_sz, token);
-
- /* Write the length token to the last word */
- if (brcmf_sdio_ramrw(sdiodev, true, (ci->ramsize - 4 + ci->rambase),
- (u8 *)&token_le, 4))
- return false;
-
- return true;
-}
-
static void
brcmf_sdio_chip_cm3_enterdl(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci)
+ struct brcmf_chip *ci)
{
- u32 zeros = 0;
-
- ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0);
- ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM, 0);
-
- /* clear length token */
- brcmf_sdio_ramrw(sdiodev, true, ci->ramsize - 4, (u8 *)&zeros, 4);
+ ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
+ ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
+ D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
+ ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM, 0, 0, 0);
}
-static bool
-brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
- char *nvram_dat, uint nvram_sz)
+static bool brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev,
+ struct brcmf_chip *ci)
{
u8 core_idx;
u32 reg_addr;
@@ -906,56 +889,64 @@ brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
return false;
}
- if (!brcmf_sdio_chip_writenvram(sdiodev, ci, nvram_dat, nvram_sz))
- return false;
-
/* clear all interrupts */
core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
reg_addr = ci->c_inf[core_idx].base;
reg_addr += offsetof(struct sdpcmd_regs, intstatus);
- brcmf_sdio_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+ brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3, 0);
+ ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0, 0);
return true;
}
static inline void
brcmf_sdio_chip_cr4_enterdl(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci)
+ struct brcmf_chip *ci)
{
- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4,
- ARMCR4_BCMA_IOCTL_CPUHALT);
+ u8 idx;
+ u32 regdata;
+ u32 wrapbase;
+ idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
+
+ if (idx == BRCMF_MAX_CORENUM)
+ return;
+
+ wrapbase = ci->c_inf[idx].wrapbase;
+ regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
+ regdata &= ARMCR4_BCMA_IOCTL_CPUHALT;
+ ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, regdata,
+ ARMCR4_BCMA_IOCTL_CPUHALT, ARMCR4_BCMA_IOCTL_CPUHALT);
+ ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
+ D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
}
-static bool
-brcmf_sdio_chip_cr4_exitdl(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
- char *nvram_dat, uint nvram_sz)
+static bool brcmf_sdio_chip_cr4_exitdl(struct brcmf_sdio_dev *sdiodev,
+ struct brcmf_chip *ci, u32 rstvec)
{
u8 core_idx;
u32 reg_addr;
- if (!brcmf_sdio_chip_writenvram(sdiodev, ci, nvram_dat, nvram_sz))
- return false;
-
/* clear all interrupts */
core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
reg_addr = ci->c_inf[core_idx].base;
reg_addr += offsetof(struct sdpcmd_regs, intstatus);
- brcmf_sdio_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+ brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
/* Write reset vector to address 0 */
- brcmf_sdio_ramrw(sdiodev, true, 0, (void *)&ci->rst_vec,
- sizeof(ci->rst_vec));
+ brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
+ sizeof(rstvec));
/* restore ARM */
- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, 0);
+ ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, ARMCR4_BCMA_IOCTL_CPUHALT,
+ 0, 0);
return true;
}
void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci)
+ struct brcmf_chip *ci)
{
u8 arm_core_idx;
@@ -969,15 +960,13 @@ void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
}
bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, char *nvram_dat,
- uint nvram_sz)
+ struct brcmf_chip *ci, u32 rstvec)
{
u8 arm_core_idx;
arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
if (BRCMF_MAX_CORENUM != arm_core_idx)
- return brcmf_sdio_chip_cm3_exitdl(sdiodev, ci, nvram_dat,
- nvram_sz);
+ return brcmf_sdio_chip_cm3_exitdl(sdiodev, ci);
- return brcmf_sdio_chip_cr4_exitdl(sdiodev, ci, nvram_dat, nvram_sz);
+ return brcmf_sdio_chip_cr4_exitdl(sdiodev, ci, rstvec);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
index 507c61c991fa..fb0614329ede 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
@@ -54,15 +54,7 @@
#define BRCMF_MAX_CORENUM 6
-/* SDIO device ID */
-#define SDIO_DEVICE_ID_BROADCOM_43143 43143
-#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
-#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
-#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
-#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
-#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
-
-struct chip_core_info {
+struct brcmf_core {
u16 id;
u16 rev;
u32 base;
@@ -71,27 +63,28 @@ struct chip_core_info {
u32 cib;
};
-struct chip_info {
+struct brcmf_chip {
u32 chip;
u32 chiprev;
- u32 socitype;
/* core info */
/* always put chipcommon core at 0, bus core at 1 */
- struct chip_core_info c_inf[BRCMF_MAX_CORENUM];
+ struct brcmf_core c_inf[BRCMF_MAX_CORENUM];
u32 pmurev;
u32 pmucaps;
u32 ramsize;
u32 rambase;
u32 rst_vec; /* reset vertor for ARM CR4 core */
- bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
+ bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
u16 coreid);
- u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
+ u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
u16 coreid);
void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid, u32 core_bits);
+ struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+ u32 in_resetbits);
void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid, u32 core_bits);
+ struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+ u32 in_resetbits, u32 post_resetbits);
};
struct sbconfig {
@@ -224,15 +217,15 @@ struct sdpcmd_regs {
};
int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
- struct chip_info **ci_ptr, u32 regs);
-void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
+ struct brcmf_chip **ci_ptr);
+void brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr);
void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u32 drivestrength);
-u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
+ struct brcmf_chip *ci,
+ u32 drivestrength);
+u8 brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid);
void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci);
+ struct brcmf_chip *ci);
bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, char *nvram_dat,
- uint nvram_sz);
+ struct brcmf_chip *ci, u32 rstvec);
#endif /* _BRCMFMAC_SDIO_CHIP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index fc0d4f0129db..092e9c824992 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -164,11 +164,9 @@ struct brcmf_sdio;
struct brcmf_sdio_dev {
struct sdio_func *func[SDIO_MAX_FUNCS];
u8 num_funcs; /* Supported funcs on client */
- u32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
u32 sbwad; /* Save backplane window address */
- void *bus;
+ struct brcmf_sdio *bus;
atomic_t suspend; /* suspend flag */
- wait_queue_head_t request_byte_wait;
wait_queue_head_t request_word_wait;
wait_queue_head_t request_buffer_wait;
struct device *dev;
@@ -185,22 +183,19 @@ struct brcmf_sdio_dev {
};
/* Register/deregister interrupt handler. */
-int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
-int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
/* sdio device register access interface */
-u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
- int *ret);
-void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
- int *ret);
-int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
- void *data, bool write);
+u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
+ int *ret);
+void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
+ int *ret);
/* Buffer transfer to/from device (client) core via cmd53.
* fn: function number
- * addr: backplane address (i.e. >= regsva from attach)
* flags: backplane width, address increment, sync/async
* buf: pointer to memory data buffer
* nbytes: number of bytes to transfer to/from buf
@@ -210,17 +205,14 @@ int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
* Returns 0 or error code.
* NOTE: Async operation is not currently supported.
*/
-int brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq);
-int brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes);
-
-int brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff *pkt);
-int brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes);
-int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq, uint totlen);
+int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
+ struct sk_buff_head *pktq);
+int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes);
+
+int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt);
+int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes);
+int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
+ struct sk_buff_head *pktq, uint totlen);
/* Flags bits */
@@ -236,43 +228,16 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
* nbytes: number of bytes to transfer to/from buf
* Returns 0 or error code.
*/
-int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
- u8 *buf, uint nbytes);
-int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
- u8 *data, uint size);
+int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
+ u8 *data, uint size);
/* Issue an abort to the specified function */
-int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
-
-/* platform specific/high level functions */
-int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
-int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
-
-/* attach, return handler on success, NULL if failed.
- * The handler shall be provided by all subsequent calls. No local cache
- * cfghdl points to the starting address of pci device mapped memory
- */
-int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
-void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
-
-/* read or write one byte using cmd52 */
-int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
- uint addr, u8 *byte);
-
-/* read or write 2/4 bytes using cmd53 */
-int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
- uint addr, u32 *word, uint nbyte);
-
-/* Watchdog timer interface for pm ops */
-void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable);
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
-void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
-void brcmf_sdbrcm_disconnect(void *ptr);
-void brcmf_sdbrcm_isr(void *arg);
+struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdio_remove(struct brcmf_sdio *bus);
+void brcmf_sdio_isr(struct brcmf_sdio *bus);
-void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
+void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick);
-void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
- wait_queue_head_t *wq);
-bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
#endif /* _BRCM_SDH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
index 3c67529b9074..4d7d51f95716 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
@@ -89,7 +89,7 @@ TRACE_EVENT(brcmf_hexdump,
TP_printk("hexdump [addr=%lx, length=%lu]", __entry->addr, __entry->len)
);
-TRACE_EVENT(brcmf_bdchdr,
+TRACE_EVENT(brcmf_bcdchdr,
TP_PROTO(void *data),
TP_ARGS(data),
TP_STRUCT__entry(
@@ -107,24 +107,35 @@ TRACE_EVENT(brcmf_bdchdr,
memcpy(__get_dynamic_array(signal),
(u8 *)data + 4, __entry->siglen);
),
- TP_printk("bdc: prio=%d siglen=%d", __entry->prio, __entry->siglen)
+ TP_printk("bcdc: prio=%d siglen=%d", __entry->prio, __entry->siglen)
);
+#ifndef SDPCM_RX
+#define SDPCM_RX 0
+#endif
+#ifndef SDPCM_TX
+#define SDPCM_TX 1
+#endif
+#ifndef SDPCM_GLOM
+#define SDPCM_GLOM 2
+#endif
+
TRACE_EVENT(brcmf_sdpcm_hdr,
- TP_PROTO(bool tx, void *data),
- TP_ARGS(tx, data),
+ TP_PROTO(u8 dir, void *data),
+ TP_ARGS(dir, data),
TP_STRUCT__entry(
- __field(u8, tx)
+ __field(u8, dir)
__field(u16, len)
- __array(u8, hdr, 12)
+ __dynamic_array(u8, hdr, dir == SDPCM_GLOM ? 20 : 12)
),
TP_fast_assign(
- memcpy(__entry->hdr, data, 12);
- __entry->len = __entry->hdr[0] | (__entry->hdr[1] << 8);
- __entry->tx = tx ? 1 : 0;
+ memcpy(__get_dynamic_array(hdr), data, dir == SDPCM_GLOM ? 20 : 12);
+ __entry->len = *(u8 *)data | (*((u8 *)data + 1) << 8);
+ __entry->dir = dir;
),
- TP_printk("sdpcm: %s len %u, seq %d", __entry->tx ? "TX" : "RX",
- __entry->len, __entry->hdr[4])
+ TP_printk("sdpcm: %s len %u, seq %d",
+ __entry->dir == SDPCM_RX ? "RX" : "TX",
+ __entry->len, ((u8 *)__get_dynamic_array(hdr))[4])
);
#ifdef CONFIG_BRCM_TRACING
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 422f44c63175..24f65cd53859 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -522,10 +522,10 @@ brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state)
/* update state of upper layer */
if (state == BRCMFMAC_USB_STATE_DOWN) {
brcmf_dbg(USB, "DBUS is down\n");
- bcmf_bus->state = BRCMF_BUS_DOWN;
+ brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DOWN);
} else if (state == BRCMFMAC_USB_STATE_UP) {
brcmf_dbg(USB, "DBUS is up\n");
- bcmf_bus->state = BRCMF_BUS_DATA;
+ brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DATA);
} else {
brcmf_dbg(USB, "DBUS current state=%d\n", state);
}
@@ -1253,9 +1253,10 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
bus->ops = &brcmf_usb_bus_ops;
bus->chip = bus_pub->devid;
bus->chiprev = bus_pub->chiprev;
+ bus->proto_type = BRCMF_PROTO_BCDC;
/* Attach to the common driver interface */
- ret = brcmf_attach(0, dev);
+ ret = brcmf_attach(dev);
if (ret) {
brcmf_err("brcmf_attach failed\n");
goto fail;
@@ -1454,7 +1455,7 @@ static int brcmf_usb_resume(struct usb_interface *intf)
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
brcmf_dbg(USB, "Enter\n");
- if (!brcmf_attach(0, devinfo->dev))
+ if (!brcmf_attach(devinfo->dev))
return brcmf_bus_start(&usb->dev);
return 0;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 571f013cebbb..d7718a5fa2f0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -202,9 +202,9 @@ static struct ieee80211_supported_band __wl_band_5ghz_a = {
/* This is to override regulatory domains defined in cfg80211 module (reg.c)
* By default world regulatory domain defined in reg.c puts the flags
- * NL80211_RRF_PASSIVE_SCAN and NL80211_RRF_NO_IBSS for 5GHz channels (for
- * 36..48 and 149..165). With respect to these flags, wpa_supplicant doesn't
- * start p2p operations on 5GHz channels. All the changes in world regulatory
+ * NL80211_RRF_NO_IR for 5GHz channels (for * 36..48 and 149..165).
+ * With respect to these flags, wpa_supplicant doesn't * start p2p
+ * operations on 5GHz channels. All the changes in world regulatory
* domain are to be done here.
*/
static const struct ieee80211_regdomain brcmf_regdom = {
@@ -1095,10 +1095,10 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
BRCMF_C_DISASSOC, NULL, 0);
if (err) {
brcmf_err("WLC_DISASSOC failed (%d)\n", err);
- cfg80211_disconnected(vif->wdev.netdev, 0,
- NULL, 0, GFP_KERNEL);
}
clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
+ cfg80211_disconnected(vif->wdev.netdev, 0, NULL, 0, GFP_KERNEL);
+
}
clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
@@ -1758,6 +1758,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
return -EIO;
clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
+ cfg80211_disconnected(ndev, reason_code, NULL, 0, GFP_KERNEL);
memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
scbval.val = cpu_to_le32(reason_code);
@@ -2556,8 +2557,8 @@ brcmf_compare_update_same_bss(struct brcmf_cfg80211_info *cfg,
ch_bss.band == ch_bss_info_le.band &&
bss_info_le->SSID_len == bss->SSID_len &&
!memcmp(bss_info_le->SSID, bss->SSID, bss_info_le->SSID_len)) {
- if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) ==
- (bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL)) {
+ if ((bss->flags & BRCMF_BSS_RSSI_ON_CHANNEL) ==
+ (bss_info_le->flags & BRCMF_BSS_RSSI_ON_CHANNEL)) {
s16 bss_rssi = le16_to_cpu(bss->RSSI);
s16 bss_info_rssi = le16_to_cpu(bss_info_le->RSSI);
@@ -2566,13 +2567,13 @@ brcmf_compare_update_same_bss(struct brcmf_cfg80211_info *cfg,
*/
if (bss_info_rssi > bss_rssi)
bss->RSSI = bss_info_le->RSSI;
- } else if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) &&
- (bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL) == 0) {
+ } else if ((bss->flags & BRCMF_BSS_RSSI_ON_CHANNEL) &&
+ (bss_info_le->flags & BRCMF_BSS_RSSI_ON_CHANNEL) == 0) {
/* preserve the on-channel rssi measurement
* if the new measurement is off channel
*/
bss->RSSI = bss_info_le->RSSI;
- bss->flags |= WLC_BSS_RSSI_ON_CHANNEL;
+ bss->flags |= BRCMF_BSS_RSSI_ON_CHANNEL;
}
return 1;
}
@@ -2988,6 +2989,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
}
set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
+ cfg->escan_info.run = brcmf_run_escan;
err = brcmf_do_escan(cfg, wiphy, ifp, request);
if (err) {
clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
@@ -3973,11 +3975,12 @@ brcmf_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
static int
brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
- struct ieee80211_channel *chan, bool offchan,
- unsigned int wait, const u8 *buf, size_t len,
- bool no_cck, bool dont_wait_for_ack, u64 *cookie)
+ struct cfg80211_mgmt_tx_params *params, u64 *cookie)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct ieee80211_channel *chan = params->chan;
+ const u8 *buf = params->buf;
+ size_t len = params->len;
const struct ieee80211_mgmt *mgmt;
struct brcmf_cfg80211_vif *vif;
s32 err = 0;
@@ -4341,7 +4344,7 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
wiphy->max_remain_on_channel_duration = 5000;
brcmf_wiphy_pno_params(wiphy);
brcmf_dbg(INFO, "Registering custom regulatory\n");
- wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+ wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
wiphy_apply_custom_regulatory(wiphy, &brcmf_regdom);
err = wiphy_register(wiphy);
if (err < 0) {
@@ -4358,9 +4361,6 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
{
struct brcmf_cfg80211_vif *vif;
- if (cfg->vif_cnt == BRCMF_IFACE_MAX_CNT)
- return ERR_PTR(-ENOSPC);
-
brcmf_dbg(TRACE, "allocating virtual interface (size=%zu)\n",
sizeof(*vif));
vif = kzalloc(sizeof(*vif), GFP_KERNEL);
@@ -4377,21 +4377,25 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
brcmf_init_prof(&vif->profile);
list_add_tail(&vif->list, &cfg->vif_list);
- cfg->vif_cnt++;
return vif;
}
-void brcmf_free_vif(struct brcmf_cfg80211_info *cfg,
- struct brcmf_cfg80211_vif *vif)
+void brcmf_free_vif(struct brcmf_cfg80211_vif *vif)
{
list_del(&vif->list);
- cfg->vif_cnt--;
-
kfree(vif);
- if (!cfg->vif_cnt) {
- wiphy_unregister(cfg->wiphy);
- wiphy_free(cfg->wiphy);
- }
+}
+
+void brcmf_cfg80211_free_netdev(struct net_device *ndev)
+{
+ struct brcmf_cfg80211_vif *vif;
+ struct brcmf_if *ifp;
+
+ ifp = netdev_priv(ndev);
+ vif = ifp->vif;
+
+ brcmf_free_vif(vif);
+ free_netdev(ndev);
}
static bool brcmf_is_linkup(const struct brcmf_event_msg *e)
@@ -4978,20 +4982,20 @@ cfg80211_p2p_attach_out:
wl_deinit_priv(cfg);
cfg80211_attach_out:
- brcmf_free_vif(cfg, vif);
+ brcmf_free_vif(vif);
return NULL;
}
void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_cfg80211_vif *vif;
- struct brcmf_cfg80211_vif *tmp;
+ if (!cfg)
+ return;
- wl_deinit_priv(cfg);
+ WARN_ON(!list_empty(&cfg->vif_list));
+ wiphy_unregister(cfg->wiphy);
brcmf_btcoex_detach(cfg);
- list_for_each_entry_safe(vif, tmp, &cfg->vif_list, list) {
- brcmf_free_vif(cfg, vif);
- }
+ wl_deinit_priv(cfg);
+ wiphy_free(cfg->wiphy);
}
static s32
@@ -5086,7 +5090,8 @@ dongle_scantime_out:
}
-static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap)
+static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
+ u32 bw_cap[])
{
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
struct ieee80211_channel *band_chan_arr;
@@ -5099,7 +5104,6 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap)
enum ieee80211_band band;
u32 channel;
u32 *n_cnt;
- bool ht40_allowed;
u32 index;
u32 ht40_flag;
bool update;
@@ -5132,18 +5136,17 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap)
array_size = ARRAY_SIZE(__wl_2ghz_channels);
n_cnt = &__wl_band_2ghz.n_channels;
band = IEEE80211_BAND_2GHZ;
- ht40_allowed = (bw_cap == WLC_N_BW_40ALL);
} else if (ch.band == BRCMU_CHAN_BAND_5G) {
band_chan_arr = __wl_5ghz_a_channels;
array_size = ARRAY_SIZE(__wl_5ghz_a_channels);
n_cnt = &__wl_band_5ghz_a.n_channels;
band = IEEE80211_BAND_5GHZ;
- ht40_allowed = !(bw_cap == WLC_N_BW_20ALL);
} else {
- brcmf_err("Invalid channel Sepc. 0x%x.\n", ch.chspec);
+ brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
continue;
}
- if (!ht40_allowed && ch.bw == BRCMU_CHAN_BW_40)
+ if (!(bw_cap[band] & WLC_BW_40MHZ_BIT) &&
+ ch.bw == BRCMU_CHAN_BW_40)
continue;
update = false;
for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) {
@@ -5161,7 +5164,10 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap)
ieee80211_channel_to_frequency(ch.chnum, band);
band_chan_arr[index].hw_value = ch.chnum;
- if (ch.bw == BRCMU_CHAN_BW_40 && ht40_allowed) {
+ brcmf_err("channel %d: f=%d bw=%d sb=%d\n",
+ ch.chnum, band_chan_arr[index].center_freq,
+ ch.bw, ch.sb);
+ if (ch.bw == BRCMU_CHAN_BW_40) {
/* assuming the order is HT20, HT40 Upper,
* HT40 lower from chanspecs
*/
@@ -5197,10 +5203,10 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap)
if (channel & WL_CHAN_RADAR)
band_chan_arr[index].flags |=
(IEEE80211_CHAN_RADAR |
- IEEE80211_CHAN_NO_IBSS);
+ IEEE80211_CHAN_NO_IR);
if (channel & WL_CHAN_PASSIVE)
band_chan_arr[index].flags |=
- IEEE80211_CHAN_PASSIVE_SCAN;
+ IEEE80211_CHAN_NO_IR;
}
}
if (!update)
@@ -5212,6 +5218,46 @@ exit:
return err;
}
+static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
+{
+ u32 band, mimo_bwcap;
+ int err;
+
+ band = WLC_BAND_2G;
+ err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
+ if (!err) {
+ bw_cap[IEEE80211_BAND_2GHZ] = band;
+ band = WLC_BAND_5G;
+ err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
+ if (!err) {
+ bw_cap[IEEE80211_BAND_5GHZ] = band;
+ return;
+ }
+ WARN_ON(1);
+ return;
+ }
+ brcmf_dbg(INFO, "fallback to mimo_bw_cap info\n");
+ mimo_bwcap = 0;
+ err = brcmf_fil_iovar_int_get(ifp, "mimo_bw_cap", &mimo_bwcap);
+ if (err)
+ /* assume 20MHz if firmware does not give a clue */
+ mimo_bwcap = WLC_N_BW_20ALL;
+
+ switch (mimo_bwcap) {
+ case WLC_N_BW_40ALL:
+ bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT;
+ /* fall-thru */
+ case WLC_N_BW_20IN2G_40IN5G:
+ bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT;
+ /* fall-thru */
+ case WLC_N_BW_20ALL:
+ bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT;
+ bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
+ break;
+ default:
+ brcmf_err("invalid mimo_bw_cap value\n");
+ }
+}
static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
{
@@ -5220,13 +5266,13 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
s32 phy_list;
u32 band_list[3];
u32 nmode;
- u32 bw_cap = 0;
+ u32 bw_cap[2] = { 0, 0 };
s8 phy;
s32 err;
u32 nband;
s32 i;
- struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS];
- s32 index;
+ struct ieee80211_supported_band *bands[2] = { NULL, NULL };
+ struct ieee80211_supported_band *band;
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_PHYLIST,
&phy_list, sizeof(phy_list));
@@ -5252,11 +5298,10 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
if (err) {
brcmf_err("nmode error (%d)\n", err);
} else {
- err = brcmf_fil_iovar_int_get(ifp, "mimo_bw_cap", &bw_cap);
- if (err)
- brcmf_err("mimo_bw_cap error (%d)\n", err);
+ brcmf_get_bwcap(ifp, bw_cap);
}
- brcmf_dbg(INFO, "nmode=%d, mimo_bw_cap=%d\n", nmode, bw_cap);
+ brcmf_dbg(INFO, "nmode=%d, bw_cap=(%d, %d)\n", nmode,
+ bw_cap[IEEE80211_BAND_2GHZ], bw_cap[IEEE80211_BAND_5GHZ]);
err = brcmf_construct_reginfo(cfg, bw_cap);
if (err) {
@@ -5265,40 +5310,33 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
}
nband = band_list[0];
- memset(bands, 0, sizeof(bands));
for (i = 1; i <= nband && i < ARRAY_SIZE(band_list); i++) {
- index = -1;
+ band = NULL;
if ((band_list[i] == WLC_BAND_5G) &&
- (__wl_band_5ghz_a.n_channels > 0)) {
- index = IEEE80211_BAND_5GHZ;
- bands[index] = &__wl_band_5ghz_a;
- if ((bw_cap == WLC_N_BW_40ALL) ||
- (bw_cap == WLC_N_BW_20IN2G_40IN5G))
- bands[index]->ht_cap.cap |=
- IEEE80211_HT_CAP_SGI_40;
- } else if ((band_list[i] == WLC_BAND_2G) &&
- (__wl_band_2ghz.n_channels > 0)) {
- index = IEEE80211_BAND_2GHZ;
- bands[index] = &__wl_band_2ghz;
- if (bw_cap == WLC_N_BW_40ALL)
- bands[index]->ht_cap.cap |=
- IEEE80211_HT_CAP_SGI_40;
- }
+ (__wl_band_5ghz_a.n_channels > 0))
+ band = &__wl_band_5ghz_a;
+ else if ((band_list[i] == WLC_BAND_2G) &&
+ (__wl_band_2ghz.n_channels > 0))
+ band = &__wl_band_2ghz;
+ else
+ continue;
- if ((index >= 0) && nmode) {
- bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
- bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
- bands[index]->ht_cap.ht_supported = true;
- bands[index]->ht_cap.ampdu_factor =
- IEEE80211_HT_MAX_AMPDU_64K;
- bands[index]->ht_cap.ampdu_density =
- IEEE80211_HT_MPDU_DENSITY_16;
- /* An HT shall support all EQM rates for one spatial
- * stream
- */
- bands[index]->ht_cap.mcs.rx_mask[0] = 0xff;
+ if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) {
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
}
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+ band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+ band->ht_cap.ht_supported = true;
+ band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+ /* An HT shall support all EQM rates for one spatial
+ * stream
+ */
+ band->ht_cap.mcs.rx_mask[0] = 0xff;
+ band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ bands[band->band] = band;
}
wiphy = cfg_to_wiphy(cfg);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index d9bdaf9a72d0..2dc6a074e8ed 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -412,7 +412,6 @@ struct brcmf_cfg80211_info {
struct work_struct escan_timeout_work;
u8 *escan_ioctl_buf;
struct list_head vif_list;
- u8 vif_cnt;
struct brcmf_cfg80211_vif_event vif_event;
struct completion vif_disabled;
struct brcmu_d11inf d11inf;
@@ -487,8 +486,7 @@ enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp);
struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
enum nl80211_iftype type,
bool pm_block);
-void brcmf_free_vif(struct brcmf_cfg80211_info *cfg,
- struct brcmf_cfg80211_vif *vif);
+void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
const u8 *vndr_ie_buf, u32 vndr_ie_len);
@@ -507,5 +505,6 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
bool fw_abort);
void brcmf_set_mpc(struct brcmf_if *ndev, int mpc);
void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
+void brcmf_cfg80211_free_netdev(struct net_device *ndev);
#endif /* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index cc87926f5055..635ae034c7e5 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -59,23 +59,18 @@
#define BRCM_2GHZ_2412_2462 REG_RULE(2412-10, 2462+10, 40, 0, 19, 0)
#define BRCM_2GHZ_2467_2472 REG_RULE(2467-10, 2472+10, 20, 0, 19, \
- NL80211_RRF_PASSIVE_SCAN | \
- NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
#define BRCM_5GHZ_5180_5240 REG_RULE(5180-10, 5240+10, 40, 0, 21, \
- NL80211_RRF_PASSIVE_SCAN | \
- NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
#define BRCM_5GHZ_5260_5320 REG_RULE(5260-10, 5320+10, 40, 0, 21, \
- NL80211_RRF_PASSIVE_SCAN | \
NL80211_RRF_DFS | \
- NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
#define BRCM_5GHZ_5500_5700 REG_RULE(5500-10, 5700+10, 40, 0, 21, \
- NL80211_RRF_PASSIVE_SCAN | \
NL80211_RRF_DFS | \
- NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
#define BRCM_5GHZ_5745_5825 REG_RULE(5745-10, 5825+10, 40, 0, 21, \
- NL80211_RRF_PASSIVE_SCAN | \
- NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
static const struct ieee80211_regdomain brcms_regdom_x2 = {
.n_reg_rules = 6,
@@ -395,7 +390,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
brcms_b_set_chanspec(wlc->hw, chanspec,
- !!(ch->flags & IEEE80211_CHAN_PASSIVE_SCAN),
+ !!(ch->flags & IEEE80211_CHAN_NO_IR),
&txpwr);
}
@@ -657,8 +652,8 @@ static void brcms_reg_apply_radar_flags(struct wiphy *wiphy)
*/
if (!(ch->flags & IEEE80211_CHAN_DISABLED))
ch->flags |= IEEE80211_CHAN_RADAR |
- IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN;
+ IEEE80211_CHAN_NO_IR |
+ IEEE80211_CHAN_NO_IR;
}
}
@@ -684,18 +679,15 @@ brcms_reg_apply_beaconing_flags(struct wiphy *wiphy,
continue;
if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
- rule = freq_reg_info(wiphy, ch->center_freq);
+ rule = freq_reg_info(wiphy,
+ MHZ_TO_KHZ(ch->center_freq));
if (IS_ERR(rule))
continue;
- if (!(rule->flags & NL80211_RRF_NO_IBSS))
- ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
- if (!(rule->flags & NL80211_RRF_PASSIVE_SCAN))
- ch->flags &=
- ~IEEE80211_CHAN_PASSIVE_SCAN;
+ if (!(rule->flags & NL80211_RRF_NO_IR))
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
} else if (ch->beacon_found) {
- ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN);
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
}
}
}
@@ -775,8 +767,8 @@ void brcms_c_regd_init(struct brcms_c_info *wlc)
}
wlc->wiphy->reg_notifier = brcms_reg_notifier;
- wlc->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_STRICT_REGULATORY;
+ wlc->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+ REGULATORY_STRICT_REG;
wiphy_apply_custom_regulatory(wlc->wiphy, regd->regdomain);
brcms_reg_apply_beaconing_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER);
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index edc5d105ff98..925034b80e9c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -125,13 +125,13 @@ static struct ieee80211_channel brcms_2ghz_chantable[] = {
CHAN2GHZ(10, 2457, IEEE80211_CHAN_NO_HT40PLUS),
CHAN2GHZ(11, 2462, IEEE80211_CHAN_NO_HT40PLUS),
CHAN2GHZ(12, 2467,
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_NO_IR |
IEEE80211_CHAN_NO_HT40PLUS),
CHAN2GHZ(13, 2472,
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_NO_IR |
IEEE80211_CHAN_NO_HT40PLUS),
CHAN2GHZ(14, 2484,
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_NO_IR |
IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS |
IEEE80211_CHAN_NO_OFDM)
};
@@ -144,51 +144,51 @@ static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = {
CHAN5GHZ(48, IEEE80211_CHAN_NO_HT40PLUS),
/* UNII-2 */
CHAN5GHZ(52,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
CHAN5GHZ(56,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
CHAN5GHZ(60,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
CHAN5GHZ(64,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
/* MID */
CHAN5GHZ(100,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
CHAN5GHZ(104,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
CHAN5GHZ(108,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
CHAN5GHZ(112,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
CHAN5GHZ(116,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
CHAN5GHZ(120,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
CHAN5GHZ(124,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
CHAN5GHZ(128,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
CHAN5GHZ(132,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
CHAN5GHZ(136,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
CHAN5GHZ(140,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS |
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS |
IEEE80211_CHAN_NO_HT40MINUS),
/* UNII-3 */
CHAN5GHZ(149, IEEE80211_CHAN_NO_HT40MINUS),
@@ -1071,7 +1071,6 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
hw->max_rates = 2; /* Primary rate and 1 fallback rate */
/* channel change time is dependent on chip and band */
- hw->channel_change_time = 7 * 1000;
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_ADHOC);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 8138f1cff4e5..9417cb5a2553 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -7108,7 +7108,6 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
struct sk_buff *p,
struct ieee80211_rx_status *rx_status)
{
- int preamble;
int channel;
u32 rspec;
unsigned char *plcp;
@@ -7191,7 +7190,6 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
rx_status->rate_idx -= BRCMS_LEGACY_5G_RATE_OFFSET;
/* Determine short preamble and rate_idx */
- preamble = 0;
if (is_cck_rate(rspec)) {
if (rxh->PhyRxStatus_0 & PRXS0_SHORTH)
rx_status->flag |= RX_FLAG_SHORTPRE;
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 84113ea16f84..6fa5d4863782 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -41,6 +41,7 @@
#define BCM4331_CHIP_ID 0x4331
#define BCM4334_CHIP_ID 0x4334
#define BCM4335_CHIP_ID 0x4335
+#define BCM43362_CHIP_ID 43362
#define BCM4339_CHIP_ID 0x4339
#endif /* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
index 0505cc065e0d..7ca2aa1035b2 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
@@ -82,6 +82,20 @@
#define WLC_N_BW_40ALL 1
#define WLC_N_BW_20IN2G_40IN5G 2
+#define WLC_BW_20MHZ_BIT BIT(0)
+#define WLC_BW_40MHZ_BIT BIT(1)
+#define WLC_BW_80MHZ_BIT BIT(2)
+#define WLC_BW_160MHZ_BIT BIT(3)
+
+/* Bandwidth capabilities */
+#define WLC_BW_CAP_20MHZ (WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_40MHZ (WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_80MHZ (WLC_BW_80MHZ_BIT|WLC_BW_40MHZ_BIT| \
+ WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_160MHZ (WLC_BW_160MHZ_BIT|WLC_BW_80MHZ_BIT| \
+ WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_UNRESTRICTED 0xFF
+
/* band types */
#define WLC_BAND_AUTO 0 /* auto-select */
#define WLC_BAND_5G 1 /* 5 Ghz */
diff --git a/drivers/net/wireless/cw1200/cw1200_sdio.c b/drivers/net/wireless/cw1200/cw1200_sdio.c
index ebdcdf44f155..d3acc85932a5 100644
--- a/drivers/net/wireless/cw1200/cw1200_sdio.c
+++ b/drivers/net/wireless/cw1200/cw1200_sdio.c
@@ -108,9 +108,9 @@ static irqreturn_t cw1200_gpio_irq(int irq, void *dev_id)
struct hwbus_priv *self = dev_id;
if (self->core) {
- sdio_claim_host(self->func);
+ cw1200_sdio_lock(self);
cw1200_irq_handler(self->core);
- sdio_release_host(self->func);
+ cw1200_sdio_unlock(self);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c
index acdff0f7f952..5a9ffd3a6a6c 100644
--- a/drivers/net/wireless/cw1200/fwio.c
+++ b/drivers/net/wireless/cw1200/fwio.c
@@ -14,7 +14,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/firmware.h>
diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c
index 090f01577dd2..3e78cc3ccb78 100644
--- a/drivers/net/wireless/cw1200/main.c
+++ b/drivers/net/wireless/cw1200/main.c
@@ -21,7 +21,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
@@ -302,7 +301,6 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
- hw->channel_change_time = 1000; /* TODO: find actual value */
hw->queues = 4;
priv->rts_threshold = -1;
diff --git a/drivers/net/wireless/cw1200/pm.c b/drivers/net/wireless/cw1200/pm.c
index b37abb9f0453..6907c8fd4578 100644
--- a/drivers/net/wireless/cw1200/pm.c
+++ b/drivers/net/wireless/cw1200/pm.c
@@ -225,7 +225,7 @@ int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
cw1200_set_pm(priv, &priv->powersave_mode);
if (wait_event_interruptible_timeout(priv->ps_mode_switch_done,
!priv->ps_mode_switch_in_progress, 1*HZ) <= 0) {
- goto revert3;
+ goto revert4;
}
}
@@ -254,11 +254,11 @@ int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
/* Stop serving thread */
if (cw1200_bh_suspend(priv))
- goto revert4;
+ goto revert5;
ret = timer_pending(&priv->mcast_timeout);
if (ret)
- goto revert5;
+ goto revert6;
/* Store suspend state */
pm_state->suspend_state = state;
@@ -280,9 +280,9 @@ int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
return 0;
-revert5:
+revert6:
WARN_ON(cw1200_bh_resume(priv));
-revert4:
+revert5:
cw1200_resume_work(priv, &priv->bss_loss_work,
state->bss_loss_tmo);
cw1200_resume_work(priv, &priv->join_timeout,
@@ -291,6 +291,7 @@ revert4:
state->direct_probe);
cw1200_resume_work(priv, &priv->link_id_gc_work,
state->link_id_gc);
+revert4:
kfree(state);
revert3:
wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off);
diff --git a/drivers/net/wireless/cw1200/scan.c b/drivers/net/wireless/cw1200/scan.c
index ee3c19037aac..9afcd4ce3368 100644
--- a/drivers/net/wireless/cw1200/scan.c
+++ b/drivers/net/wireless/cw1200/scan.c
@@ -173,8 +173,9 @@ void cw1200_scan_work(struct work_struct *work)
cw1200_set_pm(priv, &priv->powersave_mode);
if (priv->scan.status < 0)
- wiphy_dbg(priv->hw->wiphy, "[SCAN] Scan failed (%d).\n",
- priv->scan.status);
+ wiphy_warn(priv->hw->wiphy,
+ "[SCAN] Scan failed (%d).\n",
+ priv->scan.status);
else if (priv->scan.req)
wiphy_dbg(priv->hw->wiphy,
"[SCAN] Scan completed.\n");
@@ -197,9 +198,9 @@ void cw1200_scan_work(struct work_struct *work)
if ((*it)->band != first->band)
break;
if (((*it)->flags ^ first->flags) &
- IEEE80211_CHAN_PASSIVE_SCAN)
+ IEEE80211_CHAN_NO_IR)
break;
- if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+ if (!(first->flags & IEEE80211_CHAN_NO_IR) &&
(*it)->max_power != first->max_power)
break;
}
@@ -210,7 +211,7 @@ void cw1200_scan_work(struct work_struct *work)
else
scan.max_tx_rate = WSM_TRANSMIT_RATE_1;
scan.num_probes =
- (first->flags & IEEE80211_CHAN_PASSIVE_SCAN) ? 0 : 2;
+ (first->flags & IEEE80211_CHAN_NO_IR) ? 0 : 2;
scan.num_ssids = priv->scan.n_ssids;
scan.ssids = &priv->scan.ssids[0];
scan.num_channels = it - priv->scan.curr;
@@ -233,7 +234,7 @@ void cw1200_scan_work(struct work_struct *work)
}
for (i = 0; i < scan.num_channels; ++i) {
scan.ch[i].number = priv->scan.curr[i]->hw_value;
- if (priv->scan.curr[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN) {
+ if (priv->scan.curr[i]->flags & IEEE80211_CHAN_NO_IR) {
scan.ch[i].min_chan_time = 50;
scan.ch[i].max_chan_time = 100;
} else {
@@ -241,7 +242,7 @@ void cw1200_scan_work(struct work_struct *work)
scan.ch[i].max_chan_time = 25;
}
}
- if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+ if (!(first->flags & IEEE80211_CHAN_NO_IR) &&
priv->scan.output_power != first->max_power) {
priv->scan.output_power = first->max_power;
wsm_set_output_power(priv,
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 010b252be584..103f7bce8932 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/etherdevice.h>
#include "cw1200.h"
#include "sta.h"
@@ -555,8 +556,8 @@ u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
pr_debug("[STA] multicast: %pM\n", ha->addr);
memcpy(&priv->multicast_filter.macaddrs[count],
ha->addr, ETH_ALEN);
- if (memcmp(ha->addr, broadcast_ipv4, ETH_ALEN) &&
- memcmp(ha->addr, broadcast_ipv6, ETH_ALEN))
+ if (!ether_addr_equal(ha->addr, broadcast_ipv4) &&
+ !ether_addr_equal(ha->addr, broadcast_ipv6))
priv->has_multicast_subscription = true;
count++;
}
diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c
index e824d4d4a18d..0bd541175ecd 100644
--- a/drivers/net/wireless/cw1200/txrx.c
+++ b/drivers/net/wireless/cw1200/txrx.c
@@ -1166,8 +1166,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
return;
} else if (ieee80211_is_beacon(frame->frame_control) &&
!arg->status && priv->vif &&
- !memcmp(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid,
- ETH_ALEN)) {
+ ether_addr_equal(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid)) {
const u8 *tim_ie;
u8 *ies = ((struct ieee80211_mgmt *)
(skb->data))->u.beacon.variable;
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index d39e3e24077b..599f30f22841 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -563,7 +563,7 @@ hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr *hdr, u16 fc,
/* Possible WDS frame: either IEEE 802.11 compliant (if FromDS)
* or own non-standard frame with 4th address after payload */
- if (memcmp(hdr->addr1, local->dev->dev_addr, ETH_ALEN) != 0 &&
+ if (!ether_addr_equal(hdr->addr1, local->dev->dev_addr) &&
(hdr->addr1[0] != 0xff || hdr->addr1[1] != 0xff ||
hdr->addr1[2] != 0xff || hdr->addr1[3] != 0xff ||
hdr->addr1[4] != 0xff || hdr->addr1[5] != 0xff)) {
@@ -622,12 +622,12 @@ static int hostap_is_eapol_frame(local_info_t *local, struct sk_buff *skb)
/* check that the frame is unicast frame to us */
if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
IEEE80211_FCTL_TODS &&
- memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 &&
- memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
+ ether_addr_equal(hdr->addr1, dev->dev_addr) &&
+ ether_addr_equal(hdr->addr3, dev->dev_addr)) {
/* ToDS frame with own addr BSSID and DA */
} else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
IEEE80211_FCTL_FROMDS &&
- memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
+ ether_addr_equal(hdr->addr1, dev->dev_addr)) {
/* FromDS frame with own addr as DA */
} else
return 0;
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index 344a981a052e..8bde77689469 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -1,5 +1,6 @@
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/etherdevice.h>
#include "hostap_80211.h"
#include "hostap_common.h"
@@ -103,8 +104,7 @@ netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
} else if (local->iw_mode == IW_MODE_INFRA &&
(local->wds_type & HOSTAP_WDS_AP_CLIENT) &&
- memcmp(skb->data + ETH_ALEN, dev->dev_addr,
- ETH_ALEN) != 0) {
+ !ether_addr_equal(skb->data + ETH_ALEN, dev->dev_addr)) {
/* AP client mode: send frames with foreign src addr
* using 4-addr WDS frames */
use_wds = WDS_COMPLIANT_FRAME;
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index d6033a8e5dea..d36e252d2ccb 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/moduleparam.h>
+#include <linux/etherdevice.h>
#include "hostap_wlan.h"
#include "hostap.h"
@@ -106,13 +107,12 @@ static void ap_sta_hash_del(struct ap_data *ap, struct sta_info *sta)
s = ap->sta_hash[STA_HASH(sta->addr)];
if (s == NULL) return;
- if (memcmp(s->addr, sta->addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(s->addr, sta->addr)) {
ap->sta_hash[STA_HASH(sta->addr)] = s->hnext;
return;
}
- while (s->hnext != NULL && memcmp(s->hnext->addr, sta->addr, ETH_ALEN)
- != 0)
+ while (s->hnext != NULL && !ether_addr_equal(s->hnext->addr, sta->addr))
s = s->hnext;
if (s->hnext != NULL)
s->hnext = s->hnext->hnext;
@@ -435,7 +435,7 @@ int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
ptr != &mac_restrictions->mac_list; ptr = ptr->next) {
entry = list_entry(ptr, struct mac_entry, list);
- if (memcmp(entry->addr, mac, ETH_ALEN) == 0) {
+ if (ether_addr_equal(entry->addr, mac)) {
list_del(ptr);
kfree(entry);
mac_restrictions->entries--;
@@ -459,7 +459,7 @@ static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
spin_lock_bh(&mac_restrictions->lock);
list_for_each_entry(entry, &mac_restrictions->mac_list, list) {
- if (memcmp(entry->addr, mac, ETH_ALEN) == 0) {
+ if (ether_addr_equal(entry->addr, mac)) {
found = 1;
break;
}
@@ -957,7 +957,7 @@ static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta)
struct sta_info *s;
s = ap->sta_hash[STA_HASH(sta)];
- while (s != NULL && memcmp(s->addr, sta, ETH_ALEN) != 0)
+ while (s != NULL && !ether_addr_equal(s->addr, sta))
s = s->hnext;
return s;
}
@@ -1391,7 +1391,7 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb,
status_code = __le16_to_cpu(*pos);
pos++;
- if (memcmp(dev->dev_addr, hdr->addr2, ETH_ALEN) == 0 ||
+ if (ether_addr_equal(dev->dev_addr, hdr->addr2) ||
ap_control_mac_deny(&ap->mac_restrictions, hdr->addr2)) {
txt = "authentication denied";
resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
@@ -1935,7 +1935,7 @@ static void handle_pspoll(local_info_t *local,
PDEBUG(DEBUG_PS2, "handle_pspoll: BSSID=%pM, TA=%pM PWRMGT=%d\n",
hdr->addr1, hdr->addr2, !!ieee80211_has_pm(hdr->frame_control));
- if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) {
PDEBUG(DEBUG_AP,
"handle_pspoll - addr1(BSSID)=%pM not own MAC\n",
hdr->addr1);
@@ -2230,7 +2230,7 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb,
goto done;
}
- if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) {
PDEBUG(DEBUG_AP, "handle_ap_item - addr1(BSSID)=%pM"
" not own MAC\n", hdr->addr1);
goto done;
@@ -2267,13 +2267,13 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb,
goto done;
}
- if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) {
PDEBUG(DEBUG_AP, "handle_ap_item - addr1(DA)=%pM"
" not own MAC\n", hdr->addr1);
goto done;
}
- if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(hdr->addr3, dev->dev_addr)) {
PDEBUG(DEBUG_AP, "handle_ap_item - addr3(BSSID)=%pM"
" not own MAC\n", hdr->addr3);
goto done;
@@ -3035,7 +3035,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
if (!wds) {
/* FromDS frame - not for us; probably
* broadcast/multicast in another BSS - drop */
- if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(hdr->addr1, dev->dev_addr)) {
printk(KERN_DEBUG "Odd.. FromDS packet "
"received with own BSSID\n");
hostap_dump_rx_80211(dev->name, skb, rx_stats);
@@ -3044,7 +3044,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
goto out;
}
} else if (stype == IEEE80211_STYPE_NULLFUNC && sta == NULL &&
- memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
+ ether_addr_equal(hdr->addr1, dev->dev_addr)) {
if (local->hostapd) {
prism2_rx_80211(local->apdev, skb, rx_stats,
@@ -3073,7 +3073,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
/* If BSSID (Addr3) is foreign, this frame is a normal
* broadcast frame from an IBSS network. Drop it silently.
* If BSSID is own, report the dropping of this frame. */
- if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(hdr->addr3, dev->dev_addr)) {
printk(KERN_DEBUG "%s: dropped received packet from %pM"
" with no ToDS flag "
"(type=0x%02x, subtype=0x%02x)\n", dev->name,
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 56cd01ca8ad0..9f825f2620da 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -1,7 +1,6 @@
#define PRISM2_PCCARD
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/if.h>
#include <linux/slab.h>
#include <linux/wait.h>
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index c275dc1623fe..6df3ee561d52 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -2175,7 +2175,7 @@ static void hostap_tx_callback(local_info_t *local,
struct hostap_tx_callback_info *cb;
/* Make sure that frame was from us. */
- if (memcmp(txdesc->addr2, local->dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(txdesc->addr2, local->dev->dev_addr)) {
printk(KERN_DEBUG "%s: TX callback - foreign frame\n",
local->dev->name);
return;
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index e5090309824e..3e5fa7872b64 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -655,7 +655,7 @@ static int hostap_join_ap(struct net_device *dev)
if (!local->last_scan_results)
break;
entry = &local->last_scan_results[i];
- if (memcmp(local->preferred_ap, entry->bssid, ETH_ALEN) == 0) {
+ if (ether_addr_equal(local->preferred_ap, entry->bssid)) {
req.channel = entry->chid;
break;
}
@@ -1978,7 +1978,7 @@ static inline int prism2_translate_scan(local_info_t *local,
list_for_each(ptr, &local->bss_list) {
struct hostap_bss_info *bss;
bss = list_entry(ptr, struct hostap_bss_info, list);
- if (memcmp(bss->bssid, scan->bssid, ETH_ALEN) == 0) {
+ if (ether_addr_equal(bss->bssid, scan->bssid)) {
bss->included = 1;
current_ev = __prism2_translate_scan(
local, info, scan, bss, current_ev,
@@ -2567,7 +2567,7 @@ static int prism2_ioctl_priv_prism2_param(struct net_device *dev,
local->passive_scan_interval = value;
if (timer_pending(&local->passive_scan_timer))
del_timer(&local->passive_scan_timer);
- if (value > 0) {
+ if (value > 0 && value < INT_MAX / HZ) {
local->passive_scan_timer.expires = jiffies +
local->passive_scan_interval * HZ;
add_timer(&local->passive_scan_timer);
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index a1257c92afc4..67db34e56d7e 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -155,8 +155,7 @@ int prism2_wds_add(local_info_t *local, u8 *remote_addr,
if (prism2_wds_special_addr(iface->u.wds.remote_addr))
empty = iface;
- else if (memcmp(iface->u.wds.remote_addr, remote_addr,
- ETH_ALEN) == 0) {
+ else if (ether_addr_equal(iface->u.wds.remote_addr, remote_addr)) {
match = iface;
break;
}
@@ -214,8 +213,7 @@ int prism2_wds_del(local_info_t *local, u8 *remote_addr,
if (iface->type != HOSTAP_INTERFACE_WDS)
continue;
- if (memcmp(iface->u.wds.remote_addr, remote_addr,
- ETH_ALEN) == 0) {
+ if (ether_addr_equal(iface->u.wds.remote_addr, remote_addr)) {
selected = iface;
break;
}
@@ -1085,7 +1083,7 @@ int prism2_sta_deauth(local_info_t *local, u16 reason)
if (local->iw_mode != IW_MODE_INFRA ||
is_zero_ether_addr(local->bssid) ||
- memcmp(local->bssid, "\x44\x44\x44\x44\x44\x44", ETH_ALEN) == 0)
+ ether_addr_equal(local->bssid, "\x44\x44\x44\x44\x44\x44"))
return 0;
ret = prism2_sta_send_mgmt(local, local->bssid, IEEE80211_STYPE_DEAUTH,
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 05ca3402dca7..91158e2e961c 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -5,7 +5,6 @@
* Andy Warner <andyw@pobox.com> */
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/if.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index c3d067ee4db9..3bf530d9a40f 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -8,7 +8,6 @@
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/if.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index f8ab193009cd..3aba49259ef1 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1930,10 +1930,10 @@ static int ipw2100_wdev_init(struct net_device *dev)
bg_band->channels[i].max_power = geo->bg[i].max_power;
if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
bg_band->channels[i].flags |=
- IEEE80211_CHAN_PASSIVE_SCAN;
+ IEEE80211_CHAN_NO_IR;
if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
bg_band->channels[i].flags |=
- IEEE80211_CHAN_NO_IBSS;
+ IEEE80211_CHAN_NO_IR;
if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
bg_band->channels[i].flags |=
IEEE80211_CHAN_RADAR;
@@ -6362,7 +6362,6 @@ out:
&ipw2100_attribute_group);
free_libipw(dev, 0);
- pci_set_drvdata(pci_dev, NULL);
}
pci_iounmap(pci_dev, ioaddr);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 81903e33d5b1..139326065bd9 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -3012,7 +3012,7 @@ static void ipw_remove_current_network(struct ipw_priv *priv)
spin_lock_irqsave(&priv->ieee->lock, flags);
list_for_each_safe(element, safe, &priv->ieee->network_list) {
network = list_entry(element, struct libipw_network, list);
- if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
+ if (ether_addr_equal(network->bssid, priv->bssid)) {
list_del(element);
list_add_tail(&network->list,
&priv->ieee->network_free_list);
@@ -3921,7 +3921,7 @@ static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
int i;
for (i = 0; i < priv->num_stations; i++) {
- if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
+ if (ether_addr_equal(priv->stations[i], bssid)) {
/* Another node is active in network */
priv->missed_adhoc_beacons = 0;
if (!(priv->config & CFG_STATIC_CHANNEL))
@@ -3953,7 +3953,7 @@ static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
int i;
for (i = 0; i < priv->num_stations; i++)
- if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
+ if (ether_addr_equal(priv->stations[i], bssid))
return i;
return IPW_INVALID_STATION;
@@ -5622,7 +5622,7 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv,
return 0;
}
- if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
+ if (ether_addr_equal(network->bssid, priv->bssid)) {
IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
"because of the same BSSID match: %pM"
".\n", print_ssid(ssid, network->ssid,
@@ -5849,7 +5849,7 @@ static int ipw_best_network(struct ipw_priv *priv,
}
if ((priv->config & CFG_STATIC_BSSID) &&
- memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
+ !ether_addr_equal(network->bssid, priv->bssid)) {
IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
"because of BSSID mismatch: %pM.\n",
print_ssid(ssid, network->ssid,
@@ -6988,7 +6988,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
}
if ((priv->status & STATUS_ASSOCIATED) &&
(priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
- if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
+ if (!ether_addr_equal(network->bssid, priv->bssid))
if (network->capability & WLAN_CAPABILITY_IBSS)
if ((network->ssid_len ==
priv->assoc_network->ssid_len) &&
@@ -8210,29 +8210,29 @@ static int is_network_packet(struct ipw_priv *priv,
switch (priv->ieee->iw_mode) {
case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
/* packets from our adapter are dropped (echo) */
- if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
+ if (ether_addr_equal(header->addr2, priv->net_dev->dev_addr))
return 0;
/* {broad,multi}cast packets to our BSSID go through */
if (is_multicast_ether_addr(header->addr1))
- return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
+ return ether_addr_equal(header->addr3, priv->bssid);
/* packets to our adapter go through */
- return !memcmp(header->addr1, priv->net_dev->dev_addr,
- ETH_ALEN);
+ return ether_addr_equal(header->addr1,
+ priv->net_dev->dev_addr);
case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
/* packets from our adapter are dropped (echo) */
- if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
+ if (ether_addr_equal(header->addr3, priv->net_dev->dev_addr))
return 0;
/* {broad,multi}cast packets to our BSS go through */
if (is_multicast_ether_addr(header->addr1))
- return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
+ return ether_addr_equal(header->addr2, priv->bssid);
/* packets to our adapter go through */
- return !memcmp(header->addr1, priv->net_dev->dev_addr,
- ETH_ALEN);
+ return ether_addr_equal(header->addr1,
+ priv->net_dev->dev_addr);
}
return 1;
@@ -8260,7 +8260,7 @@ static int is_duplicate_packet(struct ipw_priv *priv,
list_for_each(p, &priv->ibss_mac_hash[index]) {
entry =
list_entry(p, struct ipw_ibss_seq, list);
- if (!memcmp(entry->mac, mac, ETH_ALEN))
+ if (ether_addr_equal(entry->mac, mac))
break;
}
if (p == &priv->ibss_mac_hash[index]) {
@@ -8329,7 +8329,7 @@ static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
IEEE80211_STYPE_PROBE_RESP) ||
(WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
IEEE80211_STYPE_BEACON))) {
- if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
+ if (ether_addr_equal(header->addr3, priv->bssid))
ipw_add_station(priv, header->addr2);
}
@@ -9045,7 +9045,7 @@ static int ipw_wx_set_wap(struct net_device *dev,
}
priv->config |= CFG_STATIC_BSSID;
- if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
+ if (ether_addr_equal(priv->bssid, wrqu->ap_addr.sa_data)) {
IPW_DEBUG_WX("BSSID set to current BSSID.\n");
mutex_unlock(&priv->mutex);
return 0;
@@ -11472,10 +11472,10 @@ static int ipw_wdev_init(struct net_device *dev)
bg_band->channels[i].max_power = geo->bg[i].max_power;
if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
bg_band->channels[i].flags |=
- IEEE80211_CHAN_PASSIVE_SCAN;
+ IEEE80211_CHAN_NO_IR;
if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
bg_band->channels[i].flags |=
- IEEE80211_CHAN_NO_IBSS;
+ IEEE80211_CHAN_NO_IR;
if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
bg_band->channels[i].flags |=
IEEE80211_CHAN_RADAR;
@@ -11511,10 +11511,10 @@ static int ipw_wdev_init(struct net_device *dev)
a_band->channels[i].max_power = geo->a[i].max_power;
if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
a_band->channels[i].flags |=
- IEEE80211_CHAN_PASSIVE_SCAN;
+ IEEE80211_CHAN_NO_IR;
if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
a_band->channels[i].flags |=
- IEEE80211_CHAN_NO_IBSS;
+ IEEE80211_CHAN_NO_IR;
if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
a_band->channels[i].flags |=
IEEE80211_CHAN_RADAR;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h
index 570d6fb88967..aa301d1eee3c 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/ipw2x00/ipw2200.h
@@ -29,7 +29,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index 9ffe65931b29..a586a85bfcfe 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -874,13 +874,13 @@ void libipw_rx_any(struct libipw_device *ieee,
switch (ieee->iw_mode) {
case IW_MODE_ADHOC:
/* our BSS and not from/to DS */
- if (memcmp(hdr->addr3, ieee->bssid, ETH_ALEN) == 0)
+ if (ether_addr_equal(hdr->addr3, ieee->bssid))
if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == 0) {
/* promisc: get all */
if (ieee->dev->flags & IFF_PROMISC)
is_packet_for_us = 1;
/* to us */
- else if (memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN) == 0)
+ else if (ether_addr_equal(hdr->addr1, ieee->dev->dev_addr))
is_packet_for_us = 1;
/* mcast */
else if (is_multicast_ether_addr(hdr->addr1))
@@ -889,18 +889,18 @@ void libipw_rx_any(struct libipw_device *ieee,
break;
case IW_MODE_INFRA:
/* our BSS (== from our AP) and from DS */
- if (memcmp(hdr->addr2, ieee->bssid, ETH_ALEN) == 0)
+ if (ether_addr_equal(hdr->addr2, ieee->bssid))
if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS) {
/* promisc: get all */
if (ieee->dev->flags & IFF_PROMISC)
is_packet_for_us = 1;
/* to us */
- else if (memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN) == 0)
+ else if (ether_addr_equal(hdr->addr1, ieee->dev->dev_addr))
is_packet_for_us = 1;
/* mcast */
else if (is_multicast_ether_addr(hdr->addr1)) {
/* not our own packet bcasted from AP */
- if (memcmp(hdr->addr3, ieee->dev->dev_addr, ETH_ALEN))
+ if (!ether_addr_equal(hdr->addr3, ieee->dev->dev_addr))
is_packet_for_us = 1;
}
}
@@ -1468,7 +1468,7 @@ static inline int is_same_network(struct libipw_network *src,
* as one network */
return ((src->ssid_len == dst->ssid_len) &&
(src->channel == dst->channel) &&
- ether_addr_equal(src->bssid, dst->bssid) &&
+ ether_addr_equal_64bits(src->bssid, dst->bssid) &&
!memcmp(src->ssid, dst->ssid, src->ssid_len));
}
diff --git a/drivers/net/wireless/iwlegacy/3945-debug.c b/drivers/net/wireless/iwlegacy/3945-debug.c
index f767dd106b09..c1b4441fb8b2 100644
--- a/drivers/net/wireless/iwlegacy/3945-debug.c
+++ b/drivers/net/wireless/iwlegacy/3945-debug.c
@@ -48,7 +48,7 @@ il3945_stats_flag(struct il_priv *il, char *buf, int bufsz)
return p;
}
-ssize_t
+static ssize_t
il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -313,7 +313,7 @@ il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
return ret;
}
-ssize_t
+static ssize_t
il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -403,7 +403,7 @@ il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
return ret;
}
-ssize_t
+static ssize_t
il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index dea3b50d68b9..0487461ae4da 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -1595,7 +1595,7 @@ il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
* and use long active_dwell time.
*/
if (!is_active || il_is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
+ (chan->flags & IEEE80211_CHAN_NO_IR)) {
scan_ch->type = 0; /* passive */
if (IL_UCODE_API(il->ucode_ver) == 1)
scan_ch->active_dwell =
@@ -2396,8 +2396,7 @@ __il3945_up(struct il_priv *il)
clear_bit(S_RFKILL, &il->status);
else {
set_bit(S_RFKILL, &il->status);
- IL_WARN("Radio disabled by HW RF Kill switch\n");
- return -ENODEV;
+ return -ERFKILL;
}
_il_wr(il, CSR_INT, 0xFFFFFFFF);
@@ -3575,9 +3574,9 @@ il3945_setup_mac(struct il_priv *il)
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
- hw->wiphy->flags |=
- WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
- WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+ REGULATORY_DISABLE_BEACON_HINTS;
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index aea667b430c3..9a45f6f626f6 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -25,7 +25,6 @@
*****************************************************************************/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index f09e257759d5..d37a6fd90d40 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
@@ -466,10 +465,10 @@ il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
switch (il->iw_mode) {
case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
/* packets to our IBSS update information */
- return ether_addr_equal(header->addr3, il->bssid);
+ return ether_addr_equal_64bits(header->addr3, il->bssid);
case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
/* packets to our IBSS update information */
- return ether_addr_equal(header->addr2, il->bssid);
+ return ether_addr_equal_64bits(header->addr2, il->bssid);
default:
return 1;
}
diff --git a/drivers/net/wireless/iwlegacy/4965-debug.c b/drivers/net/wireless/iwlegacy/4965-debug.c
index c8153fc64f74..e0597bfdddb8 100644
--- a/drivers/net/wireless/iwlegacy/4965-debug.c
+++ b/drivers/net/wireless/iwlegacy/4965-debug.c
@@ -55,7 +55,7 @@ il4965_stats_flag(struct il_priv *il, char *buf, int bufsz)
return p;
}
-ssize_t
+static ssize_t
il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -467,7 +467,7 @@ il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
return ret;
}
-ssize_t
+static ssize_t
il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -633,7 +633,7 @@ il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
return ret;
}
-ssize_t
+static ssize_t
il4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 3982ab76f375..43f488a8cda2 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -805,7 +805,7 @@ il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
}
if (!is_active || il_is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ (chan->flags & IEEE80211_CHAN_NO_IR))
scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
else
scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
@@ -5778,9 +5778,9 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
- hw->wiphy->flags |=
- WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
- WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+ REGULATORY_DISABLE_BEACON_HINTS;
/*
* For now, disable PS by default because it affects
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 3ccbaf791b48..4d5e33259ca8 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -24,7 +24,6 @@
*
*****************************************************************************/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c
index 777a578294bd..fe47db9c20cd 100644
--- a/drivers/net/wireless/iwlegacy/4965.c
+++ b/drivers/net/wireless/iwlegacy/4965.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index b03e22ef5462..02e8233ccf29 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -33,7 +33,6 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/lockdep.h>
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
@@ -3445,10 +3444,10 @@ il_init_geos(struct il_priv *il)
if (il_is_channel_valid(ch)) {
if (!(ch->flags & EEPROM_CHANNEL_IBSS))
- geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
+ geo_ch->flags |= IEEE80211_CHAN_NO_IR;
if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
- geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+ geo_ch->flags |= IEEE80211_CHAN_NO_IR;
if (ch->flags & EEPROM_CHANNEL_RADAR)
geo_ch->flags |= IEEE80211_CHAN_RADAR;
@@ -3746,10 +3745,10 @@ il_full_rxon_required(struct il_priv *il)
/* These items are only settable from the full RXON command */
CHK(!il_is_associated(il));
- CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr));
- CHK(!ether_addr_equal(staging->node_addr, active->node_addr));
- CHK(!ether_addr_equal(staging->wlap_bssid_addr,
- active->wlap_bssid_addr));
+ CHK(!ether_addr_equal_64bits(staging->bssid_addr, active->bssid_addr));
+ CHK(!ether_addr_equal_64bits(staging->node_addr, active->node_addr));
+ CHK(!ether_addr_equal_64bits(staging->wlap_bssid_addr,
+ active->wlap_bssid_addr));
CHK_NEQ(staging->dev_type, active->dev_type);
CHK_NEQ(staging->channel, active->channel);
CHK_NEQ(staging->air_propagation, active->air_propagation);
diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c
index eff26501d60a..344010153196 100644
--- a/drivers/net/wireless/iwlegacy/debug.c
+++ b/drivers/net/wireless/iwlegacy/debug.c
@@ -31,7 +31,7 @@
#include "common.h"
-void
+static void
il_clear_traffic_stats(struct il_priv *il)
{
memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
@@ -567,12 +567,12 @@ il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
flags & IEEE80211_CHAN_RADAR ?
" (IEEE 802.11h required)" : "",
((channels[i].
- flags & IEEE80211_CHAN_NO_IBSS) ||
+ flags & IEEE80211_CHAN_NO_IR) ||
(channels[i].
flags & IEEE80211_CHAN_RADAR)) ? "" :
", IBSS",
channels[i].
- flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+ flags & IEEE80211_CHAN_NO_IR ?
"passive only" : "active/passive");
}
supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ);
@@ -594,12 +594,12 @@ il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
flags & IEEE80211_CHAN_RADAR ?
" (IEEE 802.11h required)" : "",
((channels[i].
- flags & IEEE80211_CHAN_NO_IBSS) ||
+ flags & IEEE80211_CHAN_NO_IR) ||
(channels[i].
flags & IEEE80211_CHAN_RADAR)) ? "" :
", IBSS",
channels[i].
- flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+ flags & IEEE80211_CHAN_NO_IR ?
"passive only" : "active/passive");
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 23d5f0275ce9..562772d85102 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index 1b0f0d502568..be1086c87157 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.h b/drivers/net/wireless/iwlwifi/dvm/calib.h
index cfddde194940..aeae4e80ea40 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.h
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index ebdac909f0cd..751ae1d10b7f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index d94f8ab15004..d2fe2596d54e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -352,12 +352,12 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
channels[i].max_power,
channels[i].flags & IEEE80211_CHAN_RADAR ?
" (IEEE 802.11h required)" : "",
- ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
+ ((channels[i].flags & IEEE80211_CHAN_NO_IR)
|| (channels[i].flags &
IEEE80211_CHAN_RADAR)) ? "" :
", IBSS",
channels[i].flags &
- IEEE80211_CHAN_PASSIVE_SCAN ?
+ IEEE80211_CHAN_NO_IR ?
"passive only" : "active/passive");
}
supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
@@ -375,12 +375,12 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
channels[i].max_power,
channels[i].flags & IEEE80211_CHAN_RADAR ?
" (IEEE 802.11h required)" : "",
- ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
+ ((channels[i].flags & IEEE80211_CHAN_NO_IR)
|| (channels[i].flags &
IEEE80211_CHAN_RADAR)) ? "" :
", IBSS",
channels[i].flags &
- IEEE80211_CHAN_PASSIVE_SCAN ?
+ IEEE80211_CHAN_NO_IR ?
"passive only" : "active/passive");
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 7434d9edf3b7..3441f70d0ff9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 352c6cb7b4f1..7b140e487deb 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/led.c b/drivers/net/wireless/iwlwifi/dvm/led.c
index 33c7e15d24f5..ca4d6692cc4e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/led.c
+++ b/drivers/net/wireless/iwlwifi/dvm/led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/wireless/iwlwifi/dvm/led.h b/drivers/net/wireless/iwlwifi/dvm/led.h
index 8749dcfe695f..6a0817d9c4fa 100644
--- a/drivers/net/wireless/iwlwifi/dvm/led.h
+++ b/drivers/net/wireless/iwlwifi/dvm/led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 3d5bdc4217a8..576f7ee38ca5 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,6 @@
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/sched.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index cae4d3182e33..c24d1d3d55f6 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -28,7 +28,6 @@
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
@@ -155,9 +154,9 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
}
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS |
- WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+ REGULATORY_DISABLE_BEACON_HINTS;
#ifdef CONFIG_PM_SLEEP
if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
@@ -322,12 +321,6 @@ static void iwlagn_mac_stop(struct ieee80211_hw *hw)
flush_workqueue(priv->workqueue);
- /* User space software may expect getting rfkill changes
- * even if interface is down, trans->down will leave the RF
- * kill interrupt enabled
- */
- iwl_trans_stop_hw(priv->trans, false);
-
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -413,9 +406,8 @@ static bool iwl_resume_status_fn(struct iwl_notif_wait_data *notif_wait,
{
struct iwl_resume_data *resume_data = data;
struct iwl_priv *priv = resume_data->priv;
- u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- if (len - 4 != sizeof(*resume_data->cmd)) {
+ if (iwl_rx_packet_payload_len(pkt) != sizeof(*resume_data->cmd)) {
IWL_ERR(priv, "rx wrong size data\n");
return true;
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 7aad766865cf..ba1b1ea54252 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -1313,7 +1313,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
}
/* Reset chip to save power until we load uCode during "up". */
- iwl_trans_stop_hw(priv->trans, false);
+ iwl_trans_stop_device(priv->trans);
priv->nvm_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg,
priv->eeprom_blob,
@@ -1458,7 +1458,7 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
dev_kfree_skb(priv->beacon_skb);
- iwl_trans_stop_hw(priv->trans, true);
+ iwl_trans_op_mode_leave(priv->trans);
ieee80211_free_hw(priv->hw);
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c
index 77cb59712235..b4e61417013a 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/iwlwifi/dvm/power.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <net/mac80211.h>
#include "iwl-io.h"
#include "iwl-debug.h"
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.h b/drivers/net/wireless/iwlwifi/dvm/power.h
index 7b03e1342d47..570d3a5e4670 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.h
+++ b/drivers/net/wireless/iwlwifi/dvm/power.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index b647e506564c..0977d93b529d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -24,7 +24,6 @@
*
*****************************************************************************/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index 26fc550cd68c..bdd5644a400b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -389,13 +389,6 @@ struct iwl_lq_sta {
u8 last_bt_traffic;
};
-static inline u8 num_of_ant(u8 mask)
-{
- return !!((mask) & ANT_A) +
- !!((mask) & ANT_B) +
- !!((mask) & ANT_C);
-}
-
static inline u8 first_antenna(u8 mask)
{
if (mask & ANT_A)
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index d71776dd1e6a..7a1bc1c547e1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portionhelp of the ieee80211 subsystem header files.
@@ -205,8 +205,7 @@ static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 __maybe_unused len =
- le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ u32 __maybe_unused len = iwl_rx_packet_len(pkt);
IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
"notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len);
iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
@@ -457,7 +456,7 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
const int reg_recalib_period = 60;
int change;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ u32 len = iwl_rx_packet_payload_len(pkt);
__le32 *flag;
struct statistics_general_common *common;
struct statistics_rx_non_phy *rx_non_phy;
@@ -467,8 +466,6 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
struct statistics_tx *tx;
struct statistics_bt_activity *bt_activity;
- len -= sizeof(struct iwl_cmd_header); /* skip header */
-
IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n",
len);
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index d7ce2f12a907..503a81e58185 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 35e0ee8b4e5b..be98b913ed58 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -544,7 +544,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
channel = chan->hw_value;
scan_ch->channel = cpu_to_le16(channel);
- if (!is_active || (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ if (!is_active || (chan->flags & IEEE80211_CHAN_NO_IR))
scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
else
scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index c3c13ce96eb0..c0d070c5df5e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
index fbeee081ee2f..058c5892c427 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <net/mac80211.h>
#include "iwl-io.h"
#include "iwl-modparams.h"
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.h b/drivers/net/wireless/iwlwifi/dvm/tt.h
index 9356c4b908ca..507726534b84 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.h
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 1fef5240e6ad..a6839dfcb82d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/sched.h>
#include <linux/ieee80211.h>
#include "iwl-io.h"
@@ -368,6 +367,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
goto drop_unlock_priv;
memset(dev_cmd, 0, sizeof(*dev_cmd));
+ dev_cmd->hdr.cmd = REPLY_TX;
tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
/* Total # bytes to be transmitted */
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 63637949a146..cf03ef5619d9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,7 +28,6 @@
*****************************************************************************/
#include <linux/kernel.h>
-#include <linux/init.h>
#include "iwl-io.h"
#include "iwl-agn-hw.h"
@@ -389,7 +388,6 @@ static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
{
struct iwl_priv *priv = data;
struct iwl_calib_hdr *hdr;
- int len;
if (pkt->hdr.cmd != CALIBRATION_RES_NOTIFICATION) {
WARN_ON(pkt->hdr.cmd != CALIBRATION_COMPLETE_NOTIFICATION);
@@ -397,12 +395,8 @@ static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
}
hdr = (struct iwl_calib_hdr *)pkt->data;
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- /* reduce the size by the length field itself */
- len -= sizeof(__le32);
-
- if (iwl_calib_set(priv, hdr, len))
+ if (iwl_calib_set(priv, hdr, iwl_rx_packet_payload_len(pkt)))
IWL_ERR(priv, "Failed to record calibration data %d\n",
hdr->op_code);
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 0d2afe098afc..854ba84ccb73 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index c727ec7c90a6..3e63323637f3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index ecc01e1a61a1..6674f2c4541c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 8ac305be68f4..8048de90233f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 3c34a72a5d64..2a59da2ff87a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -108,7 +108,7 @@ static const struct iwl_base_params iwl7000_base_params = {
};
static const struct iwl_ht_params iwl7000_ht_params = {
- .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .stbc = true,
.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index 6d73f943cefa..7f37fb86837b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 03fd9aa8bfda..1ced525157dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -129,6 +129,12 @@ enum iwl_led_mode {
#define ANT_BC (ANT_B | ANT_C)
#define ANT_ABC (ANT_A | ANT_B | ANT_C)
+static inline u8 num_of_ant(u8 mask)
+{
+ return !!((mask) & ANT_A) +
+ !!((mask) & ANT_B) +
+ !!((mask) & ANT_C);
+}
/*
* @max_ll_items: max number of OTP blocks
@@ -156,12 +162,14 @@ struct iwl_base_params {
};
/*
+ * @stbc: support Tx STBC and 1*SS Rx STBC
* @use_rts_for_aggregation: use rts/cts protection for HT traffic
* @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40
*/
struct iwl_ht_params {
enum ieee80211_smps_mode smps_mode;
const bool ht_greenfield_support; /* if used set to true */
+ const bool stbc;
bool use_rts_for_aggregation;
u8 ht40_bands;
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index da4eca8b3007..9d325516c42d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -198,7 +198,8 @@
CSR_INT_BIT_RF_KILL | \
CSR_INT_BIT_SW_RX | \
CSR_INT_BIT_WAKEUP | \
- CSR_INT_BIT_ALIVE)
+ CSR_INT_BIT_ALIVE | \
+ CSR_INT_BIT_RX_PERIODIC)
/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index b2bb32a781dd..a75aac986a23 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 8f61c717f619..23e7351e02de 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 684c416d3493..78bd41bf34b0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index ff570027e9dd..c3728163be46 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -322,6 +322,41 @@ static void set_sec_offset(struct iwl_firmware_pieces *pieces,
pieces->img[type].sec[sec].offset = offset;
}
+static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
+{
+ int i, j;
+ struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data;
+ struct iwl_fw_cipher_scheme *fwcs;
+ struct ieee80211_cipher_scheme *cs;
+ u32 cipher;
+
+ if (len < sizeof(*l) ||
+ len < sizeof(l->size) + l->size * sizeof(l->cs[0]))
+ return -EINVAL;
+
+ for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) {
+ fwcs = &l->cs[j];
+ cipher = le32_to_cpu(fwcs->cipher);
+
+ /* we skip schemes with zero cipher suite selector */
+ if (!cipher)
+ continue;
+
+ cs = &fw->cs[j++];
+ cs->cipher = cipher;
+ cs->iftype = BIT(NL80211_IFTYPE_STATION);
+ cs->hdr_len = fwcs->hdr_len;
+ cs->pn_len = fwcs->pn_len;
+ cs->pn_off = fwcs->pn_off;
+ cs->key_idx_off = fwcs->key_idx_off;
+ cs->key_idx_mask = fwcs->key_idx_mask;
+ cs->key_idx_shift = fwcs->key_idx_shift;
+ cs->mic_len = fwcs->mic_len;
+ }
+
+ return 0;
+}
+
/*
* Gets uCode section from tlv.
*/
@@ -729,6 +764,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
break;
+ case IWL_UCODE_TLV_CSCHEME:
+ if (iwl_store_cscheme(&drv->fw, tlv_data, tlv_len))
+ goto invalid_tlv_len;
+ break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 429337a2b9a1..592c01e11013 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -67,7 +67,7 @@
/* for all modules */
#define DRV_NAME "iwlwifi"
#define IWLWIFI_VERSION "in-tree:"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2013 Intel Corporation"
+#define DRV_COPYRIGHT "Copyright(c) 2003- 2014 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index 4c887f365908..c44cf1149648 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -614,10 +614,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel->flags = IEEE80211_CHAN_NO_HT40;
if (!(eeprom_ch->flags & EEPROM_CHANNEL_IBSS))
- channel->flags |= IEEE80211_CHAN_NO_IBSS;
+ channel->flags |= IEEE80211_CHAN_NO_IR;
if (!(eeprom_ch->flags & EEPROM_CHANNEL_ACTIVE))
- channel->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+ channel->flags |= IEEE80211_CHAN_NO_IR;
if (eeprom_ch->flags & EEPROM_CHANNEL_RADAR)
channel->flags |= IEEE80211_CHAN_RADAR;
@@ -751,6 +751,13 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
ht_info->ht_supported = true;
ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
+ if (cfg->ht_params->stbc) {
+ ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+ if (tx_chains > 1)
+ ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+ }
+
if (iwlwifi_mod_params.amsdu_size_8K)
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index d73304a23ec2..e3c7deafabe6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
index e5f2e362ab0b..25d0105741db 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
index 8e941f8bd7d6..a6d3bdf82cdd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 484d318245fb..9564ae173d06 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 6c6c35c5228c..88e2d6eb569f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -125,6 +125,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_SECURE_SEC_INIT = 25,
IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26,
IWL_UCODE_TLV_NUM_OF_CPU = 27,
+ IWL_UCODE_TLV_CSCHEME = 28,
};
struct iwl_ucode_tlv {
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 75db087120c3..5f1493c44097 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -92,6 +92,9 @@
* @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
* @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
* containing CAM (Continuous Active Mode) indication.
+ * @IWL_UCODE_TLV_FLAGS_P2P_PS: P2P client power save is supported (only on a
+ * single bound interface).
+ * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@@ -113,7 +116,9 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17),
IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19),
IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20),
+ IWL_UCODE_TLV_FLAGS_P2P_PS = BIT(21),
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
+ IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
};
/* The default calibrate table size if not specified by firmware file */
@@ -209,6 +214,44 @@ enum iwl_fw_phy_cfg {
FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS,
};
+#define IWL_UCODE_MAX_CS 1
+
+/**
+ * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW.
+ * @cipher: a cipher suite selector
+ * @flags: cipher scheme flags (currently reserved for a future use)
+ * @hdr_len: a size of MPDU security header
+ * @pn_len: a size of PN
+ * @pn_off: an offset of pn from the beginning of the security header
+ * @key_idx_off: an offset of key index byte in the security header
+ * @key_idx_mask: a bit mask of key_idx bits
+ * @key_idx_shift: bit shift needed to get key_idx
+ * @mic_len: mic length in bytes
+ * @hw_cipher: a HW cipher index used in host commands
+ */
+struct iwl_fw_cipher_scheme {
+ __le32 cipher;
+ u8 flags;
+ u8 hdr_len;
+ u8 pn_len;
+ u8 pn_off;
+ u8 key_idx_off;
+ u8 key_idx_mask;
+ u8 key_idx_shift;
+ u8 mic_len;
+ u8 hw_cipher;
+} __packed;
+
+/**
+ * struct iwl_fw_cscheme_list - a cipher scheme list
+ * @size: a number of entries
+ * @cs: cipher scheme entries
+ */
+struct iwl_fw_cscheme_list {
+ u8 size;
+ struct iwl_fw_cipher_scheme cs[];
+} __packed;
+
/**
* struct iwl_fw - variables associated with the firmware
*
@@ -224,6 +267,7 @@ enum iwl_fw_phy_cfg {
* @inst_evtlog_size: event log size for runtime ucode.
* @inst_errlog_ptr: error log offfset for runtime ucode.
* @mvm_fw: indicates this is MVM firmware
+ * @cipher_scheme: optional external cipher scheme.
*/
struct iwl_fw {
u32 ucode_ver;
@@ -243,6 +287,8 @@ struct iwl_fw {
u32 phy_config;
bool mvm_fw;
+
+ struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
};
static inline u8 iwl_fw_valid_tx_ant(const struct iwl_fw *fw)
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index ad8e19a56eca..f98175a0d35b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 63d10ec08dbc..c339c1bed080 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index a1f580c0c6c6..0a84ade7edac 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index 940b8a9d5285..b5bc959b1dfe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
index 2e2f1c8c99f9..95af97a6c2cf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index b76a9a8fc0b3..725e954d8475 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -182,6 +182,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) {
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
+
+ if (ch_idx >= NUM_2GHZ_CHANNELS &&
+ !data->sku_cap_band_52GHz_enable)
+ ch_flags &= ~NVM_CHANNEL_VALID;
+
if (!(ch_flags & NVM_CHANNEL_VALID)) {
IWL_DEBUG_EEPROM(dev,
"Ch. %d Flags %x [%sGHz] - No traffic\n",
@@ -223,10 +228,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel->flags |= IEEE80211_CHAN_NO_160MHZ;
if (!(ch_flags & NVM_CHANNEL_IBSS))
- channel->flags |= IEEE80211_CHAN_NO_IBSS;
+ channel->flags |= IEEE80211_CHAN_NO_IR;
if (!(ch_flags & NVM_CHANNEL_ACTIVE))
- channel->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+ channel->flags |= IEEE80211_CHAN_NO_IR;
if (ch_flags & NVM_CHANNEL_RADAR)
channel->flags |= IEEE80211_CHAN_RADAR;
@@ -263,13 +268,19 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
struct ieee80211_sta_vht_cap *vht_cap)
{
+ int num_ants = num_of_ant(data->valid_rx_ant);
+
vht_cap->vht_supported = true;
vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+ if (num_ants > 1)
+ vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
+
if (iwlwifi_mod_params.amsdu_size_8K)
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
@@ -283,7 +294,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
- if (data->valid_rx_ant == 1 || cfg->rx_with_siso_diversity) {
+ if (num_ants == 1 ||
+ cfg->rx_with_siso_diversity) {
vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
/* this works because NOT_SUPPORTED == 3 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
index 3325059c52d4..0c4399aba8c6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 976448a57d02..b5be51f3cd3d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -155,14 +155,12 @@ void iwl_opmode_deregister(const char *name);
/**
* struct iwl_op_mode - operational mode
+ * @ops - pointer to its own ops
*
* This holds an implementation of the mac80211 / fw API.
- *
- * @ops - pointer to its own ops
*/
struct iwl_op_mode {
const struct iwl_op_mode_ops *ops;
- const struct iwl_trans *trans;
char op_mode_specific[0] __aligned(sizeof(void *));
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index 1a405ae6a9c5..fa77d63a277a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
index ce983af79644..9ee18d0d2d01 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.h
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index a70c7b9d9bad..100bd0d79681 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -102,6 +102,9 @@
/* Device system time */
#define DEVICE_SYSTEM_TIME_REG 0xA0206C
+/* Device NMI register */
+#define DEVICE_SET_NMI_REG 0x00a01c30
+
/*****************************************************************************
* 7000/3000 series SHR DTS addresses *
*****************************************************************************/
@@ -274,4 +277,8 @@ static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
/*********************** END TX SCHEDULER *************************************/
+/* Oscillator clock */
+#define OSC_CLK (0xa04068)
+#define OSC_CLK_FORCE_CONTROL (0x8)
+
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 143292b4dbbf..1f065cf4a4ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,7 @@
#include "iwl-debug.h"
#include "iwl-config.h"
#include "iwl-fw.h"
+#include "iwl-op-mode.h"
/**
* DOC: Transport layer - what is it ?
@@ -100,8 +101,7 @@
* start_fw
*
* 5) Then when finished (or reset):
- * stop_fw (a.k.a. stop device for the moment)
- * stop_hw
+ * stop_device
*
* 6) Eventually, the free function will be called.
*/
@@ -176,6 +176,16 @@ struct iwl_rx_packet {
u8 data[];
} __packed;
+static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
+{
+ return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+}
+
+static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
+{
+ return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
+}
+
/**
* enum CMD_MODE - how to send the host commands ?
*
@@ -318,6 +328,24 @@ enum iwl_d3_status {
};
/**
+ * enum iwl_trans_status: transport status flags
+ * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
+ * @STATUS_DEVICE_ENABLED: APM is enabled
+ * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
+ * @STATUS_INT_ENABLED: interrupts are enabled
+ * @STATUS_RFKILL: the HW RFkill switch is in KILL position
+ * @STATUS_FW_ERROR: the fw is in error state
+ */
+enum iwl_trans_status {
+ STATUS_SYNC_HCMD_ACTIVE,
+ STATUS_DEVICE_ENABLED,
+ STATUS_TPOWER_PMI,
+ STATUS_INT_ENABLED,
+ STATUS_RFKILL,
+ STATUS_FW_ERROR,
+};
+
+/**
* struct iwl_trans_config - transport configuration
*
* @op_mode: pointer to the upper layer.
@@ -361,9 +389,7 @@ struct iwl_trans;
*
* @start_hw: starts the HW- from that point on, the HW can send interrupts
* May sleep
- * @stop_hw: stops the HW- from that point on, the HW will be in low power but
- * will still issue interrupt if the HW RF kill is triggered unless
- * op_mode_leaving is true.
+ * @op_mode_leave: Turn off the HW RF kill indication if on
* May sleep
* @start_fw: allocates and inits all the resources for the transport
* layer. Also kick a fw image.
@@ -371,8 +397,11 @@ struct iwl_trans;
* @fw_alive: called when the fw sends alive notification. If the fw provides
* the SCD base address in SRAM, then provide it here, or 0 otherwise.
* May sleep
- * @stop_device:stops the whole device (embedded CPU put to reset)
- * May sleep
+ * @stop_device: stops the whole device (embedded CPU put to reset) and stops
+ * the HW. From that point on, the HW will be in low power but will still
+ * issue interrupt if the HW RF kill is triggered. This callback must do
+ * the right thing and not crash even if start_hw() was called but not
+ * start_fw(). May sleep
* @d3_suspend: put the device into the correct mode for WoWLAN during
* suspend. This is optional, if not implemented WoWLAN will not be
* supported. This callback may sleep.
@@ -418,7 +447,7 @@ struct iwl_trans;
struct iwl_trans_ops {
int (*start_hw)(struct iwl_trans *iwl_trans);
- void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
+ void (*op_mode_leave)(struct iwl_trans *iwl_trans);
int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
bool run_in_rfkill);
void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
@@ -479,6 +508,7 @@ enum iwl_trans_state {
* @ops - pointer to iwl_trans_ops
* @op_mode - pointer to the op_mode
* @cfg - pointer to the configuration
+ * @status: a bit-mask of transport status flags
* @dev - pointer to struct device * that represents the device
* @hw_id: a u32 with the ID of the device / subdevice.
* Set during transport allocation.
@@ -499,6 +529,7 @@ struct iwl_trans {
struct iwl_op_mode *op_mode;
const struct iwl_cfg *cfg;
enum iwl_trans_state state;
+ unsigned long status;
struct device *dev;
u32 hw_rev;
@@ -540,15 +571,14 @@ static inline int iwl_trans_start_hw(struct iwl_trans *trans)
return trans->ops->start_hw(trans);
}
-static inline void iwl_trans_stop_hw(struct iwl_trans *trans,
- bool op_mode_leaving)
+static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
{
might_sleep();
- trans->ops->stop_hw(trans, op_mode_leaving);
+ if (trans->ops->op_mode_leave)
+ trans->ops->op_mode_leave(trans);
- if (op_mode_leaving)
- trans->op_mode = NULL;
+ trans->op_mode = NULL;
trans->state = IWL_TRANS_NO_FW;
}
@@ -570,6 +600,7 @@ static inline int iwl_trans_start_fw(struct iwl_trans *trans,
WARN_ON_ONCE(!trans->rx_mpdu_cmd);
+ clear_bit(STATUS_FW_ERROR, &trans->status);
return trans->ops->start_fw(trans, fw, run_in_rfkill);
}
@@ -601,6 +632,13 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
{
int ret;
+ if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL, &trans->status)))
+ return -ERFKILL;
+
+ if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
+ return -EIO;
+
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
return -EIO;
@@ -640,6 +678,9 @@ static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int queue)
{
+ if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
+ return -EIO;
+
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
@@ -657,9 +698,6 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
{
- if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
- IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
-
trans->ops->txq_disable(trans, queue);
}
@@ -760,7 +798,8 @@ static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
{
- trans->ops->set_pmi(trans, state);
+ if (trans->ops->set_pmi)
+ trans->ops->set_pmi(trans, state);
}
static inline void
@@ -780,6 +819,16 @@ iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
__release(nic_access);
}
+static inline void iwl_trans_fw_error(struct iwl_trans *trans)
+{
+ if (WARN_ON_ONCE(!trans->op_mode))
+ return;
+
+ /* prevent double restarts due to the same erroneous FW */
+ if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
+ iwl_op_mode_nic_error(trans->op_mode);
+}
+
/*****************************************************
* driver (transport) register/unregister functions
******************************************************/
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index 6d73817850ce..f98ec2b23898 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -1,10 +1,10 @@
obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
-iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o
+iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
iwlmvm-y += scan.o time-event.o rs.o
iwlmvm-y += power.o power_legacy.o bt-coex.o
iwlmvm-y += led.o tt.o
-iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
+iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/mvm/binding.c b/drivers/net/wireless/iwlwifi/mvm/binding.c
index 93fd1457954b..a1376539d2dc 100644
--- a/drivers/net/wireless/iwlwifi/mvm/binding.c
+++ b/drivers/net/wireless/iwlwifi/mvm/binding.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -183,15 +183,29 @@ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
return -EINVAL;
+ /*
+ * Update SF - Disable if needed. if this fails, SF might still be on
+ * while many macs are bound, which is forbidden - so fail the binding.
+ */
+ if (iwl_mvm_sf_update(mvm, vif, false))
+ return -EINVAL;
+
return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true);
}
int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
return -EINVAL;
- return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false);
+ ret = iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false);
+
+ if (!ret)
+ if (iwl_mvm_sf_update(mvm, vif, true))
+ IWL_ERR(mvm, "Failed to update SF state\n");
+
+ return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 75b72a956552..76cde6ce6551 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -294,9 +294,9 @@ static const __le64 iwl_ci_mask[][3] = {
cpu_to_le64(0x0)
},
{
- cpu_to_le64(0xFE00000000ULL),
+ cpu_to_le64(0xFFC0000000ULL),
cpu_to_le64(0x0ULL),
- cpu_to_le64(0x0)
+ cpu_to_le64(0x0ULL)
},
};
@@ -396,7 +396,8 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
BT_VALID_ANT_ISOLATION |
BT_VALID_ANT_ISOLATION_THRS |
BT_VALID_TXTX_DELTA_FREQ_THRS |
- BT_VALID_TXRX_MAX_FREQ_0);
+ BT_VALID_TXRX_MAX_FREQ_0 |
+ BT_VALID_SYNC_TO_SCO);
if (mvm->cfg->bt_shared_single_ant)
memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
@@ -514,7 +515,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
if (IS_ERR_OR_NULL(sta))
return 0;
- mvmsta = (void *)sta->drv_priv;
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
/* nothing to do */
if (mvmsta->bt_reduced_txpower == enable)
@@ -846,7 +847,7 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
if (IS_ERR_OR_NULL(sta))
return;
- mvmsta = (void *)sta->drv_priv;
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
data->num_bss_ifaces++;
@@ -917,11 +918,11 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
enum iwl_bt_coex_lut_type lut_type;
if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
- BT_LOW_TRAFFIC)
+ BT_HIGH_TRAFFIC)
return LINK_QUAL_AGG_TIME_LIMIT_DEF;
lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
@@ -936,7 +937,7 @@ u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
BT_HIGH_TRAFFIC)
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index 4b6d670c3509..036857698565 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index b9b81e881dd0..f36a7ee0267f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -886,8 +886,7 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
if (err)
return err;
- size = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- size -= sizeof(cmd.resp_pkt->hdr);
+ size = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (size < sizeof(__le16)) {
err = -EINVAL;
} else {
@@ -1211,15 +1210,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
#ifdef CONFIG_IWLWIFI_DEBUGFS
- len = le32_to_cpu(d3_cfg_cmd.resp_pkt->len_n_flags) &
- FH_RSCSR_FRAME_SIZE_MSK;
- if (len >= sizeof(u32) * 2) {
+ len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt);
+ if (len >= sizeof(u32)) {
mvm->d3_test_pme_ptr =
le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
- } else if (test) {
- /* in test mode we require the pointer */
- ret = -EIO;
- goto out;
}
#endif
iwl_free_resp(&d3_cfg_cmd);
@@ -1231,10 +1225,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm->aux_sta.sta_id = old_aux_sta_id;
mvm_ap_sta->sta_id = old_ap_sta_id;
mvmvif->ap_sta_id = old_ap_sta_id;
- out_noreset:
- kfree(key_data.rsc_tsc);
+
if (ret < 0)
ieee80211_restart_hw(mvm->hw);
+ out_noreset:
+ kfree(key_data.rsc_tsc);
mutex_unlock(&mvm->mutex);
@@ -1537,10 +1532,16 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
struct iwl_mvm_d3_gtk_iter_data gtkdata = {
.status = status,
};
+ u32 disconnection_reasons =
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
if (!status || !vif->bss_conf.bssid)
return false;
+ if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons)
+ return false;
+
/* find last GTK that we used initially, if any */
gtkdata.find_phase = true;
ieee80211_iter_keys(mvm->hw, vif,
@@ -1665,8 +1666,8 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
else
status_size = sizeof(struct iwl_wowlan_status_v4);
- len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- if (len - sizeof(struct iwl_cmd_header) < status_size) {
+ len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+ if (len < status_size) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out_free_resp;
}
@@ -1701,8 +1702,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
status.wake_packet = status_v4->wake_packet;
}
- if (len - sizeof(struct iwl_cmd_header) !=
- status_size + ALIGN(status.wake_packet_bufsize, 4)) {
+ if (len != status_size + ALIGN(status.wake_packet_bufsize, 4)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out_free_resp;
}
@@ -1805,6 +1805,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_mvm_read_d3_sram(mvm);
keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (keep)
+ mvm->keep_vif = vif;
+#endif
/* has unlocked the mutex, so skip that */
goto out;
@@ -1861,6 +1865,7 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
return err;
}
mvm->d3_test_active = true;
+ mvm->keep_vif = NULL;
return 0;
}
@@ -1871,10 +1876,14 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
u32 pme_asserted;
while (true) {
- pme_asserted = iwl_trans_read_mem32(mvm->trans,
- mvm->d3_test_pme_ptr);
- if (pme_asserted)
- break;
+ /* read pme_ptr if available */
+ if (mvm->d3_test_pme_ptr) {
+ pme_asserted = iwl_trans_read_mem32(mvm->trans,
+ mvm->d3_test_pme_ptr);
+ if (pme_asserted)
+ break;
+ }
+
if (msleep_interruptible(100))
break;
}
@@ -1885,6 +1894,10 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
+ /* skip the one we keep connection on */
+ if (_data == vif)
+ return;
+
if (vif->type == NL80211_IFTYPE_STATION)
ieee80211_connection_loss(vif);
}
@@ -1911,7 +1924,7 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_d3_test_disconn_work_iter, NULL);
+ iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
ieee80211_wake_queues(mvm->hw);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
new file mode 100644
index 000000000000..0e29cd83a06a
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -0,0 +1,546 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+#include "debugfs.h"
+
+static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum iwl_dbgfs_pm_mask param, int val)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm;
+
+ dbgfs_pm->mask |= param;
+
+ switch (param) {
+ case MVM_DEBUGFS_PM_KEEP_ALIVE: {
+ struct ieee80211_hw *hw = mvm->hw;
+ int dtimper = hw->conf.ps_dtim_period ?: 1;
+ int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+
+ IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
+ if (val * MSEC_PER_SEC < 3 * dtimper_msec)
+ IWL_WARN(mvm,
+ "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n",
+ val * MSEC_PER_SEC, 3 * dtimper_msec);
+ dbgfs_pm->keep_alive_seconds = val;
+ break;
+ }
+ case MVM_DEBUGFS_PM_SKIP_OVER_DTIM:
+ IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n",
+ val ? "enabled" : "disabled");
+ dbgfs_pm->skip_over_dtim = val;
+ break;
+ case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS:
+ IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val);
+ dbgfs_pm->skip_dtim_periods = val;
+ break;
+ case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT:
+ IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val);
+ dbgfs_pm->rx_data_timeout = val;
+ break;
+ case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT:
+ IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
+ dbgfs_pm->tx_data_timeout = val;
+ break;
+ case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
+ IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
+ dbgfs_pm->disable_power_off = val;
+ break;
+ case MVM_DEBUGFS_PM_LPRX_ENA:
+ IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
+ dbgfs_pm->lprx_ena = val;
+ break;
+ case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD:
+ IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
+ dbgfs_pm->lprx_rssi_threshold = val;
+ break;
+ case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
+ IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
+ dbgfs_pm->snooze_ena = val;
+ break;
+ case MVM_DEBUGFS_PM_UAPSD_MISBEHAVING:
+ IWL_DEBUG_POWER(mvm, "uapsd_misbehaving_enable=%d\n", val);
+ dbgfs_pm->uapsd_misbehaving = val;
+ break;
+ }
+}
+
+static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ enum iwl_dbgfs_pm_mask param;
+ int val, ret;
+
+ if (!strncmp("keep_alive=", buf, 11)) {
+ if (sscanf(buf + 11, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_KEEP_ALIVE;
+ } else if (!strncmp("skip_over_dtim=", buf, 15)) {
+ if (sscanf(buf + 15, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM;
+ } else if (!strncmp("skip_dtim_periods=", buf, 18)) {
+ if (sscanf(buf + 18, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS;
+ } else if (!strncmp("rx_data_timeout=", buf, 16)) {
+ if (sscanf(buf + 16, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT;
+ } else if (!strncmp("tx_data_timeout=", buf, 16)) {
+ if (sscanf(buf + 16, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
+ } else if (!strncmp("disable_power_off=", buf, 18) &&
+ !(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
+ if (sscanf(buf + 18, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
+ } else if (!strncmp("lprx=", buf, 5)) {
+ if (sscanf(buf + 5, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_LPRX_ENA;
+ } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
+ if (sscanf(buf + 20, "%d", &val) != 1)
+ return -EINVAL;
+ if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val <
+ POWER_LPRX_RSSI_THRESHOLD_MIN)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
+ } else if (!strncmp("snooze_enable=", buf, 14)) {
+ if (sscanf(buf + 14, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
+ } else if (!strncmp("uapsd_misbehaving=", buf, 18)) {
+ if (sscanf(buf + 18, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_UAPSD_MISBEHAVING;
+ } else {
+ return -EINVAL;
+ }
+
+ mutex_lock(&mvm->mutex);
+ iwl_dbgfs_update_pm(mvm, vif, param, val);
+ ret = iwl_mvm_power_update_mode(mvm, vif);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[512];
+ int bufsz = sizeof(buf);
+ int pos;
+
+ pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u8 ap_sta_id;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ char buf[512];
+ int bufsz = sizeof(buf);
+ int pos = 0;
+ int i;
+
+ mutex_lock(&mvm->mutex);
+
+ ap_sta_id = mvmvif->ap_sta_id;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
+ mvmvif->id, mvmvif->color);
+ pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
+ vif->bss_conf.bssid);
+ pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n");
+ for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++)
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n",
+ i, mvmvif->queue_params[i].txop,
+ mvmvif->queue_params[i].cw_min,
+ mvmvif->queue_params[i].cw_max,
+ mvmvif->queue_params[i].aifs,
+ mvmvif->queue_params[i].uapsd);
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ ap_sta_id != IWL_MVM_STATION_COUNT) {
+ struct ieee80211_sta *sta;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (!IS_ERR_OR_NULL(sta)) {
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "ap_sta_id %d - reduced Tx power %d\n",
+ ap_sta_id,
+ mvm_sta->bt_reduced_txpower);
+ }
+ }
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ if (chanctx_conf)
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "idle rx chains %d, active rx chains: %d\n",
+ chanctx_conf->rx_chains_static,
+ chanctx_conf->rx_chains_dynamic);
+ rcu_read_unlock();
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
+ enum iwl_dbgfs_bf_mask param, int value)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
+
+ dbgfs_bf->mask |= param;
+
+ switch (param) {
+ case MVM_DEBUGFS_BF_ENERGY_DELTA:
+ dbgfs_bf->bf_energy_delta = value;
+ break;
+ case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA:
+ dbgfs_bf->bf_roaming_energy_delta = value;
+ break;
+ case MVM_DEBUGFS_BF_ROAMING_STATE:
+ dbgfs_bf->bf_roaming_state = value;
+ break;
+ case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
+ dbgfs_bf->bf_temp_threshold = value;
+ break;
+ case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
+ dbgfs_bf->bf_temp_fast_filter = value;
+ break;
+ case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
+ dbgfs_bf->bf_temp_slow_filter = value;
+ break;
+ case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
+ dbgfs_bf->bf_enable_beacon_filter = value;
+ break;
+ case MVM_DEBUGFS_BF_DEBUG_FLAG:
+ dbgfs_bf->bf_debug_flag = value;
+ break;
+ case MVM_DEBUGFS_BF_ESCAPE_TIMER:
+ dbgfs_bf->bf_escape_timer = value;
+ break;
+ case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT:
+ dbgfs_bf->ba_enable_beacon_abort = value;
+ break;
+ case MVM_DEBUGFS_BA_ESCAPE_TIMER:
+ dbgfs_bf->ba_escape_timer = value;
+ break;
+ }
+}
+
+static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ enum iwl_dbgfs_bf_mask param;
+ int value, ret = 0;
+
+ if (!strncmp("bf_energy_delta=", buf, 16)) {
+ if (sscanf(buf+16, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ENERGY_DELTA_MIN ||
+ value > IWL_BF_ENERGY_DELTA_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ENERGY_DELTA;
+ } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) {
+ if (sscanf(buf+24, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN ||
+ value > IWL_BF_ROAMING_ENERGY_DELTA_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA;
+ } else if (!strncmp("bf_roaming_state=", buf, 17)) {
+ if (sscanf(buf+17, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ROAMING_STATE_MIN ||
+ value > IWL_BF_ROAMING_STATE_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ROAMING_STATE;
+ } else if (!strncmp("bf_temp_threshold=", buf, 18)) {
+ if (sscanf(buf+18, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_TEMP_THRESHOLD_MIN ||
+ value > IWL_BF_TEMP_THRESHOLD_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
+ } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
+ if (sscanf(buf+20, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_TEMP_FAST_FILTER_MIN ||
+ value > IWL_BF_TEMP_FAST_FILTER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
+ } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
+ if (sscanf(buf+20, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_TEMP_SLOW_FILTER_MIN ||
+ value > IWL_BF_TEMP_SLOW_FILTER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
+ } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
+ if (sscanf(buf+24, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER;
+ } else if (!strncmp("bf_debug_flag=", buf, 14)) {
+ if (sscanf(buf+14, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_DEBUG_FLAG;
+ } else if (!strncmp("bf_escape_timer=", buf, 16)) {
+ if (sscanf(buf+16, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ESCAPE_TIMER_MIN ||
+ value > IWL_BF_ESCAPE_TIMER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ESCAPE_TIMER;
+ } else if (!strncmp("ba_escape_timer=", buf, 16)) {
+ if (sscanf(buf+16, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BA_ESCAPE_TIMER_MIN ||
+ value > IWL_BA_ESCAPE_TIMER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BA_ESCAPE_TIMER;
+ } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) {
+ if (sscanf(buf+23, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT;
+ } else {
+ return -EINVAL;
+ }
+
+ mutex_lock(&mvm->mutex);
+ iwl_dbgfs_update_bf(vif, param, value);
+ if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ else
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter =
+ cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
+ .ba_enable_beacon_abort =
+ cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
+ };
+
+ iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
+ if (mvmvif->bf_data.bf_enabled)
+ cmd.bf_enable_beacon_filter = cpu_to_le32(1);
+ else
+ cmd.bf_enable_beacon_filter = 0;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
+ le32_to_cpu(cmd.bf_energy_delta));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
+ le32_to_cpu(cmd.bf_roaming_energy_delta));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
+ le32_to_cpu(cmd.bf_roaming_state));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n",
+ le32_to_cpu(cmd.bf_temp_threshold));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n",
+ le32_to_cpu(cmd.bf_temp_fast_filter));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n",
+ le32_to_cpu(cmd.bf_temp_slow_filter));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
+ le32_to_cpu(cmd.bf_enable_beacon_filter));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
+ le32_to_cpu(cmd.bf_debug_flag));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
+ le32_to_cpu(cmd.bf_escape_timer));
+ pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
+ le32_to_cpu(cmd.ba_escape_timer));
+ pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
+ le32_to_cpu(cmd.ba_enable_beacon_abort));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
+#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, vif, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
+ } while (0)
+
+MVM_DEBUGFS_READ_FILE_OPS(mac_params);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
+
+void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct dentry *dbgfs_dir = vif->debugfs_dir;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[100];
+
+ /*
+ * Check if debugfs directory already exist before creating it.
+ * This may happen when, for example, resetting hw or suspend-resume
+ */
+ if (!dbgfs_dir || mvmvif->dbgfs_dir)
+ return;
+
+ mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
+ mvmvif->mvm = mvm;
+
+ if (!mvmvif->dbgfs_dir) {
+ IWL_ERR(mvm, "Failed to create debugfs directory under %s\n",
+ dbgfs_dir->d_name.name);
+ return;
+ }
+
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+ ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
+ (vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
+ mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS)))
+ MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
+ S_IRUSR);
+
+ MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir,
+ S_IRUSR);
+
+ if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+ mvmvif == mvm->bf_allowed_vif)
+ MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
+
+ /*
+ * Create symlink for convenience pointing to interface specific
+ * debugfs entries for the driver. For example, under
+ * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/
+ * find
+ * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
+ */
+ snprintf(buf, 100, "../../../%s/%s/%s/%s",
+ dbgfs_dir->d_parent->d_parent->d_name.name,
+ dbgfs_dir->d_parent->d_name.name,
+ dbgfs_dir->d_name.name,
+ mvmvif->dbgfs_dir->d_name.name);
+
+ mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
+ mvm->debugfs_dir, buf);
+ if (!mvmvif->dbgfs_slink)
+ IWL_ERR(mvm, "Can't create debugfs symbolic link under %s\n",
+ dbgfs_dir->d_name.name);
+ return;
+err:
+ IWL_ERR(mvm, "Can't create debugfs entity\n");
+}
+
+void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ debugfs_remove(mvmvif->dbgfs_slink);
+ mvmvif->dbgfs_slink = NULL;
+
+ debugfs_remove_recursive(mvmvif->dbgfs_dir);
+ mvmvif->dbgfs_dir = NULL;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index a8fe6b41f9a3..369d4c90e669 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,30 +63,18 @@
#include "mvm.h"
#include "sta.h"
#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "debugfs.h"
-struct iwl_dbgfs_mvm_ctx {
- struct iwl_mvm *mvm;
- struct ieee80211_vif *vif;
-};
-
-static ssize_t iwl_dbgfs_tx_flush_write(struct file *file,
- const char __user *user_buf,
+static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- struct iwl_mvm *mvm = file->private_data;
-
- char buf[16];
- int buf_size, ret;
+ int ret;
u32 scd_q_msk;
if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
return -EIO;
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
if (sscanf(buf, "%x", &scd_q_msk) != 1)
return -EINVAL;
@@ -99,24 +87,15 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct file *file,
return ret;
}
-static ssize_t iwl_dbgfs_sta_drain_write(struct file *file,
- const char __user *user_buf,
+static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- struct iwl_mvm *mvm = file->private_data;
struct ieee80211_sta *sta;
-
- char buf[8];
- int buf_size, sta_id, drain, ret;
+ int sta_id, drain, ret;
if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
return -EIO;
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
return -EINVAL;
if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT)
@@ -144,73 +123,57 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
{
struct iwl_mvm *mvm = file->private_data;
const struct fw_img *img;
- int ofs, len, pos = 0;
- size_t bufsz, ret;
- char *buf;
+ unsigned int ofs, len;
+ size_t ret;
u8 *ptr;
if (!mvm->ucode_loaded)
return -EINVAL;
/* default is to dump the entire data segment */
- if (!mvm->dbgfs_sram_offset && !mvm->dbgfs_sram_len) {
- img = &mvm->fw->img[mvm->cur_ucode];
- ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
- len = img->sec[IWL_UCODE_SECTION_DATA].len;
- } else {
+ img = &mvm->fw->img[mvm->cur_ucode];
+ ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+ len = img->sec[IWL_UCODE_SECTION_DATA].len;
+
+ if (mvm->dbgfs_sram_len) {
ofs = mvm->dbgfs_sram_offset;
len = mvm->dbgfs_sram_len;
}
- bufsz = len * 4 + 256;
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
ptr = kzalloc(len, GFP_KERNEL);
- if (!ptr) {
- kfree(buf);
+ if (!ptr)
return -ENOMEM;
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", len);
- pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", ofs);
iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len);
- for (ofs = 0; ofs < len; ofs += 16) {
- pos += scnprintf(buf + pos, bufsz - pos, "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
- bufsz - pos, false);
- pos += strlen(buf + pos);
- if (bufsz - pos > 0)
- buf[pos++] = '\n';
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ ret = simple_read_from_buffer(user_buf, count, ppos, ptr, len);
- kfree(buf);
kfree(ptr);
return ret;
}
-static ssize_t iwl_dbgfs_sram_write(struct file *file,
- const char __user *user_buf, size_t count,
- loff_t *ppos)
+static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
{
- struct iwl_mvm *mvm = file->private_data;
- char buf[64];
- int buf_size;
+ const struct fw_img *img;
u32 offset, len;
+ u32 img_offset, img_len;
+
+ if (!mvm->ucode_loaded)
+ return -EINVAL;
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
+ img = &mvm->fw->img[mvm->cur_ucode];
+ img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset;
+ img_len = img->sec[IWL_UCODE_SECTION_DATA].len;
if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
if ((offset & 0x3) || (len & 0x3))
return -EINVAL;
+
+ if (offset + len > img_offset + img_len)
+ return -EINVAL;
+
mvm->dbgfs_sram_offset = offset;
mvm->dbgfs_sram_len = len;
} else {
@@ -267,22 +230,14 @@ static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static ssize_t iwl_dbgfs_disable_power_off_write(struct file *file,
- const char __user *user_buf,
+static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- struct iwl_mvm *mvm = file->private_data;
- char buf[64] = {};
- int ret;
- int val;
+ int ret, val;
if (!mvm->ucode_loaded)
return -EIO;
- count = min_t(size_t, count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
if (!strncmp("disable_power_off_d0=", buf, 21)) {
if (sscanf(buf + 21, "%d", &val) != 1)
return -EINVAL;
@@ -302,212 +257,6 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct file *file,
return ret ?: count;
}
-static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- enum iwl_dbgfs_pm_mask param, int val)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm;
-
- dbgfs_pm->mask |= param;
-
- switch (param) {
- case MVM_DEBUGFS_PM_KEEP_ALIVE: {
- struct ieee80211_hw *hw = mvm->hw;
- int dtimper = hw->conf.ps_dtim_period ?: 1;
- int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
-
- IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
- if (val * MSEC_PER_SEC < 3 * dtimper_msec) {
- IWL_WARN(mvm,
- "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n",
- val * MSEC_PER_SEC, 3 * dtimper_msec);
- }
- dbgfs_pm->keep_alive_seconds = val;
- break;
- }
- case MVM_DEBUGFS_PM_SKIP_OVER_DTIM:
- IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n",
- val ? "enabled" : "disabled");
- dbgfs_pm->skip_over_dtim = val;
- break;
- case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS:
- IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val);
- dbgfs_pm->skip_dtim_periods = val;
- break;
- case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT:
- IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val);
- dbgfs_pm->rx_data_timeout = val;
- break;
- case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT:
- IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
- dbgfs_pm->tx_data_timeout = val;
- break;
- case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
- IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
- dbgfs_pm->disable_power_off = val;
- break;
- case MVM_DEBUGFS_PM_LPRX_ENA:
- IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
- dbgfs_pm->lprx_ena = val;
- break;
- case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD:
- IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
- dbgfs_pm->lprx_rssi_threshold = val;
- break;
- case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
- IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
- dbgfs_pm->snooze_ena = val;
- break;
- }
-}
-
-static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->dbgfs_data;
- enum iwl_dbgfs_pm_mask param;
- char buf[32] = {};
- int val;
- int ret;
-
- count = min_t(size_t, count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
- if (!strncmp("keep_alive=", buf, 11)) {
- if (sscanf(buf + 11, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_KEEP_ALIVE;
- } else if (!strncmp("skip_over_dtim=", buf, 15)) {
- if (sscanf(buf + 15, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM;
- } else if (!strncmp("skip_dtim_periods=", buf, 18)) {
- if (sscanf(buf + 18, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS;
- } else if (!strncmp("rx_data_timeout=", buf, 16)) {
- if (sscanf(buf + 16, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT;
- } else if (!strncmp("tx_data_timeout=", buf, 16)) {
- if (sscanf(buf + 16, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
- } else if (!strncmp("disable_power_off=", buf, 18) &&
- !(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
- if (sscanf(buf + 18, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
- } else if (!strncmp("lprx=", buf, 5)) {
- if (sscanf(buf + 5, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_LPRX_ENA;
- } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
- if (sscanf(buf + 20, "%d", &val) != 1)
- return -EINVAL;
- if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val <
- POWER_LPRX_RSSI_THRESHOLD_MIN)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
- } else if (!strncmp("snooze_enable=", buf, 14)) {
- if (sscanf(buf + 14, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
- } else {
- return -EINVAL;
- }
-
- mutex_lock(&mvm->mutex);
- iwl_dbgfs_update_pm(mvm, vif, param, val);
- ret = iwl_mvm_power_update_mode(mvm, vif);
- mutex_unlock(&mvm->mutex);
-
- return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->dbgfs_data;
- char buf[512];
- int bufsz = sizeof(buf);
- int pos;
-
- pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->dbgfs_data;
- u8 ap_sta_id;
- struct ieee80211_chanctx_conf *chanctx_conf;
- char buf[512];
- int bufsz = sizeof(buf);
- int pos = 0;
- int i;
-
- mutex_lock(&mvm->mutex);
-
- ap_sta_id = mvmvif->ap_sta_id;
-
- pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
- mvmvif->id, mvmvif->color);
- pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
- vif->bss_conf.bssid);
- pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n");
- for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++) {
- pos += scnprintf(buf+pos, bufsz-pos,
- "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n",
- i, mvmvif->queue_params[i].txop,
- mvmvif->queue_params[i].cw_min,
- mvmvif->queue_params[i].cw_max,
- mvmvif->queue_params[i].aifs,
- mvmvif->queue_params[i].uapsd);
- }
-
- if (vif->type == NL80211_IFTYPE_STATION &&
- ap_sta_id != IWL_MVM_STATION_COUNT) {
- struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvm_sta;
-
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id],
- lockdep_is_held(&mvm->mutex));
- mvm_sta = (void *)sta->drv_priv;
- pos += scnprintf(buf+pos, bufsz-pos,
- "ap_sta_id %d - reduced Tx power %d\n",
- ap_sta_id, mvm_sta->bt_reduced_txpower);
- }
-
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
- if (chanctx_conf) {
- pos += scnprintf(buf+pos, bufsz-pos,
- "idle rx chains %d, active rx chains: %d\n",
- chanctx_conf->rx_chains_static,
- chanctx_conf->rx_chains_dynamic);
- }
- rcu_read_unlock();
-
- mutex_unlock(&mvm->mutex);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
#define BT_MBOX_MSG(_notif, _num, _field) \
((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
>> BT_MBOX##_num##_##_field##_POS)
@@ -783,11 +532,9 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
}
#undef PRINT_STAT_LE32
-static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
- const char __user *user_buf,
+static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- struct iwl_mvm *mvm = file->private_data;
int ret;
mutex_lock(&mvm->mutex);
@@ -804,6 +551,14 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
return count;
}
+static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ iwl_write_prph(mvm->trans, DEVICE_SET_NMI_REG, 1);
+
+ return count;
+}
+
static ssize_t
iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
char __user *user_buf,
@@ -828,21 +583,11 @@ iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
}
static ssize_t
-iwl_dbgfs_scan_ant_rxchain_write(struct file *file,
- const char __user *user_buf,
+iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- struct iwl_mvm *mvm = file->private_data;
- char buf[8];
- int buf_size;
u8 scan_rx_ant;
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
-
- /* get the argument from the user and check if it is valid */
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
if (sscanf(buf, "%hhx", &scan_rx_ant) != 1)
return -EINVAL;
if (scan_rx_ant > ANT_ABC)
@@ -850,228 +595,17 @@ iwl_dbgfs_scan_ant_rxchain_write(struct file *file,
if (scan_rx_ant & ~iwl_fw_valid_rx_ant(mvm->fw))
return -EINVAL;
- /* change the rx antennas for scan command */
mvm->scan_rx_ant = scan_rx_ant;
return count;
}
-
-static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
- enum iwl_dbgfs_bf_mask param, int value)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
-
- dbgfs_bf->mask |= param;
-
- switch (param) {
- case MVM_DEBUGFS_BF_ENERGY_DELTA:
- dbgfs_bf->bf_energy_delta = value;
- break;
- case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA:
- dbgfs_bf->bf_roaming_energy_delta = value;
- break;
- case MVM_DEBUGFS_BF_ROAMING_STATE:
- dbgfs_bf->bf_roaming_state = value;
- break;
- case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
- dbgfs_bf->bf_temp_threshold = value;
- break;
- case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
- dbgfs_bf->bf_temp_fast_filter = value;
- break;
- case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
- dbgfs_bf->bf_temp_slow_filter = value;
- break;
- case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
- dbgfs_bf->bf_enable_beacon_filter = value;
- break;
- case MVM_DEBUGFS_BF_DEBUG_FLAG:
- dbgfs_bf->bf_debug_flag = value;
- break;
- case MVM_DEBUGFS_BF_ESCAPE_TIMER:
- dbgfs_bf->bf_escape_timer = value;
- break;
- case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT:
- dbgfs_bf->ba_enable_beacon_abort = value;
- break;
- case MVM_DEBUGFS_BA_ESCAPE_TIMER:
- dbgfs_bf->ba_escape_timer = value;
- break;
- }
-}
-
-static ssize_t iwl_dbgfs_bf_params_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->dbgfs_data;
- enum iwl_dbgfs_bf_mask param;
- char buf[256];
- int buf_size;
- int value;
- int ret = 0;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- if (!strncmp("bf_energy_delta=", buf, 16)) {
- if (sscanf(buf+16, "%d", &value) != 1)
- return -EINVAL;
- if (value < IWL_BF_ENERGY_DELTA_MIN ||
- value > IWL_BF_ENERGY_DELTA_MAX)
- return -EINVAL;
- param = MVM_DEBUGFS_BF_ENERGY_DELTA;
- } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) {
- if (sscanf(buf+24, "%d", &value) != 1)
- return -EINVAL;
- if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN ||
- value > IWL_BF_ROAMING_ENERGY_DELTA_MAX)
- return -EINVAL;
- param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA;
- } else if (!strncmp("bf_roaming_state=", buf, 17)) {
- if (sscanf(buf+17, "%d", &value) != 1)
- return -EINVAL;
- if (value < IWL_BF_ROAMING_STATE_MIN ||
- value > IWL_BF_ROAMING_STATE_MAX)
- return -EINVAL;
- param = MVM_DEBUGFS_BF_ROAMING_STATE;
- } else if (!strncmp("bf_temp_threshold=", buf, 18)) {
- if (sscanf(buf+18, "%d", &value) != 1)
- return -EINVAL;
- if (value < IWL_BF_TEMP_THRESHOLD_MIN ||
- value > IWL_BF_TEMP_THRESHOLD_MAX)
- return -EINVAL;
- param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
- } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
- if (sscanf(buf+20, "%d", &value) != 1)
- return -EINVAL;
- if (value < IWL_BF_TEMP_FAST_FILTER_MIN ||
- value > IWL_BF_TEMP_FAST_FILTER_MAX)
- return -EINVAL;
- param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
- } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
- if (sscanf(buf+20, "%d", &value) != 1)
- return -EINVAL;
- if (value < IWL_BF_TEMP_SLOW_FILTER_MIN ||
- value > IWL_BF_TEMP_SLOW_FILTER_MAX)
- return -EINVAL;
- param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
- } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
- if (sscanf(buf+24, "%d", &value) != 1)
- return -EINVAL;
- if (value < 0 || value > 1)
- return -EINVAL;
- param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER;
- } else if (!strncmp("bf_debug_flag=", buf, 14)) {
- if (sscanf(buf+14, "%d", &value) != 1)
- return -EINVAL;
- if (value < 0 || value > 1)
- return -EINVAL;
- param = MVM_DEBUGFS_BF_DEBUG_FLAG;
- } else if (!strncmp("bf_escape_timer=", buf, 16)) {
- if (sscanf(buf+16, "%d", &value) != 1)
- return -EINVAL;
- if (value < IWL_BF_ESCAPE_TIMER_MIN ||
- value > IWL_BF_ESCAPE_TIMER_MAX)
- return -EINVAL;
- param = MVM_DEBUGFS_BF_ESCAPE_TIMER;
- } else if (!strncmp("ba_escape_timer=", buf, 16)) {
- if (sscanf(buf+16, "%d", &value) != 1)
- return -EINVAL;
- if (value < IWL_BA_ESCAPE_TIMER_MIN ||
- value > IWL_BA_ESCAPE_TIMER_MAX)
- return -EINVAL;
- param = MVM_DEBUGFS_BA_ESCAPE_TIMER;
- } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) {
- if (sscanf(buf+23, "%d", &value) != 1)
- return -EINVAL;
- if (value < 0 || value > 1)
- return -EINVAL;
- param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT;
- } else {
- return -EINVAL;
- }
-
- mutex_lock(&mvm->mutex);
- iwl_dbgfs_update_bf(vif, param, value);
- if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) {
- ret = iwl_mvm_disable_beacon_filter(mvm, vif);
- } else {
- ret = iwl_mvm_enable_beacon_filter(mvm, vif);
- }
- mutex_unlock(&mvm->mutex);
-
- return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- char buf[256];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- struct iwl_beacon_filter_cmd cmd = {
- IWL_BF_CMD_CONFIG_DEFAULTS,
- .bf_enable_beacon_filter =
- cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
- .ba_enable_beacon_abort =
- cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
- };
-
- iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
- if (mvmvif->bf_data.bf_enabled)
- cmd.bf_enable_beacon_filter = cpu_to_le32(1);
- else
- cmd.bf_enable_beacon_filter = 0;
-
- pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
- le32_to_cpu(cmd.bf_energy_delta));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
- le32_to_cpu(cmd.bf_roaming_energy_delta));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
- le32_to_cpu(cmd.bf_roaming_state));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n",
- le32_to_cpu(cmd.bf_temp_threshold));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n",
- le32_to_cpu(cmd.bf_temp_fast_filter));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n",
- le32_to_cpu(cmd.bf_temp_slow_filter));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
- le32_to_cpu(cmd.bf_enable_beacon_filter));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
- le32_to_cpu(cmd.bf_debug_flag));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
- le32_to_cpu(cmd.bf_escape_timer));
- pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
- le32_to_cpu(cmd.ba_escape_timer));
- pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
- le32_to_cpu(cmd.ba_enable_beacon_abort));
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
#ifdef CONFIG_PM_SLEEP
-static ssize_t iwl_dbgfs_d3_sram_write(struct file *file,
- const char __user *user_buf,
+static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- struct iwl_mvm *mvm = file->private_data;
- char buf[8] = {};
int store;
- count = min_t(size_t, count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
if (sscanf(buf, "%d", &store) != 1)
return -EINVAL;
@@ -1124,61 +658,33 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
}
#endif
-#define MVM_DEBUGFS_READ_FILE_OPS(name) \
-static const struct file_operations iwl_dbgfs_##name##_ops = { \
- .read = iwl_dbgfs_##name##_read, \
- .open = simple_open, \
- .llseek = generic_file_llseek, \
-}
-
-#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name) \
-static const struct file_operations iwl_dbgfs_##name##_ops = { \
- .write = iwl_dbgfs_##name##_write, \
- .read = iwl_dbgfs_##name##_read, \
- .open = simple_open, \
- .llseek = generic_file_llseek, \
-};
-
-#define MVM_DEBUGFS_WRITE_FILE_OPS(name) \
-static const struct file_operations iwl_dbgfs_##name##_ops = { \
- .write = iwl_dbgfs_##name##_write, \
- .open = simple_open, \
- .llseek = generic_file_llseek, \
-};
-
+#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) do { \
if (!debugfs_create_file(#name, mode, parent, mvm, \
&iwl_dbgfs_##name##_ops)) \
goto err; \
} while (0)
-#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, vif, \
- &iwl_dbgfs_##name##_ops)) \
- goto err; \
- } while (0)
-
/* Device wide debugfs entries */
-MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush);
-MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram);
+MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
+MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
MVM_DEBUGFS_READ_FILE_OPS(stations);
MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
-MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
#ifdef CONFIG_PM_SLEEP
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
#endif
-/* Interface specific debugfs entries */
-MVM_DEBUGFS_READ_FILE_OPS(mac_params);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params);
-
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
{
char buf[100];
@@ -1196,6 +702,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
S_IWUSR | S_IRUSR);
#ifdef CONFIG_PM_SLEEP
@@ -1206,6 +713,19 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
goto err;
#endif
+ if (!debugfs_create_blob("nvm_hw", S_IRUSR,
+ mvm->debugfs_dir, &mvm->nvm_hw_blob))
+ goto err;
+ if (!debugfs_create_blob("nvm_sw", S_IRUSR,
+ mvm->debugfs_dir, &mvm->nvm_sw_blob))
+ goto err;
+ if (!debugfs_create_blob("nvm_calib", S_IRUSR,
+ mvm->debugfs_dir, &mvm->nvm_calib_blob))
+ goto err;
+ if (!debugfs_create_blob("nvm_prod", S_IRUSR,
+ mvm->debugfs_dir, &mvm->nvm_prod_blob))
+ goto err;
+
/*
* Create a symlink with mac80211. It will be removed when mac80211
* exists (before the opmode exists which removes the target.)
@@ -1221,72 +741,3 @@ err:
IWL_ERR(mvm, "Can't create the mvm debugfs directory\n");
return -ENOMEM;
}
-
-void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
-{
- struct dentry *dbgfs_dir = vif->debugfs_dir;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- char buf[100];
-
- /*
- * Check if debugfs directory already exist before creating it.
- * This may happen when, for example, resetting hw or suspend-resume
- */
- if (!dbgfs_dir || mvmvif->dbgfs_dir)
- return;
-
- mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
- mvmvif->dbgfs_data = mvm;
-
- if (!mvmvif->dbgfs_dir) {
- IWL_ERR(mvm, "Failed to create debugfs directory under %s\n",
- dbgfs_dir->d_name.name);
- return;
- }
-
- if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
- vif->type == NL80211_IFTYPE_STATION && !vif->p2p)
- MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
- S_IRUSR);
-
- MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir,
- S_IRUSR);
-
- if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
- mvmvif == mvm->bf_allowed_vif)
- MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
- S_IRUSR | S_IWUSR);
-
- /*
- * Create symlink for convenience pointing to interface specific
- * debugfs entries for the driver. For example, under
- * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/
- * find
- * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
- */
- snprintf(buf, 100, "../../../%s/%s/%s/%s",
- dbgfs_dir->d_parent->d_parent->d_name.name,
- dbgfs_dir->d_parent->d_name.name,
- dbgfs_dir->d_name.name,
- mvmvif->dbgfs_dir->d_name.name);
-
- mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
- mvm->debugfs_dir, buf);
- if (!mvmvif->dbgfs_slink)
- IWL_ERR(mvm, "Can't create debugfs symbolic link under %s\n",
- dbgfs_dir->d_name.name);
- return;
-err:
- IWL_ERR(mvm, "Can't create debugfs entity\n");
-}
-
-void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- debugfs_remove(mvmvif->dbgfs_slink);
- mvmvif->dbgfs_slink = NULL;
-
- debugfs_remove_recursive(mvmvif->dbgfs_dir);
- mvmvif->dbgfs_dir = NULL;
-}
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.h b/drivers/net/wireless/iwlwifi/mvm/debugfs.h
new file mode 100644
index 000000000000..e3a9774af495
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.h
@@ -0,0 +1,101 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#define MVM_DEBUGFS_READ_FILE_OPS(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+}
+
+#define MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ argtype *arg = file->private_data; \
+ char buf[buflen] = {}; \
+ size_t buf_size = min(count, sizeof(buf) - 1); \
+ \
+ if (copy_from_user(buf, user_buf, buf_size)) \
+ return -EFAULT; \
+ \
+ return iwl_dbgfs_##name##_write(arg, buf, buf_size, ppos); \
+} \
+
+#define _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \
+MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = _iwl_dbgfs_##name##_write, \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+#define _MVM_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \
+MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = _iwl_dbgfs_##name##_write, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
index 4ea5e24ca92d..1b4e54d416b0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -127,6 +127,7 @@ enum iwl_bt_coex_valid_bit_msk {
BT_VALID_ANT_ISOLATION_THRS = BIT(15),
BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
+ BT_VALID_SYNC_TO_SCO = BIT(18),
};
/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 4e7dd8cf87dc..8415ff312d0e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
index 39c3148bdfa8..c405cda1025f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 5cb93ae5cd2f..884c08725308 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -85,6 +85,8 @@
* PBW Snoozing enabled
* @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
* @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
+ * @POWER_FLAGS_AP_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving
+ * detection enablement
*/
enum iwl_power_flags {
POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
@@ -94,6 +96,7 @@ enum iwl_power_flags {
POWER_FLAGS_BT_SCO_ENA = BIT(8),
POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9),
POWER_FLAGS_LPRX_ENA_MSK = BIT(11),
+ POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = BIT(12),
};
#define IWL_POWER_VEC_SIZE 5
@@ -228,6 +231,19 @@ struct iwl_mac_power_cmd {
u8 reserved;
} __packed;
+/*
+ * struct iwl_uapsd_misbehaving_ap_notif - FW sends this notification when
+ * associated AP is identified as improperly implementing uAPSD protocol.
+ * PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78
+ * @sta_id: index of station in uCode's station table - associated AP ID in
+ * this context.
+ */
+struct iwl_uapsd_misbehaving_ap_notif {
+ __le32 sta_id;
+ u8 mac_id;
+ u8 reserved[3];
+} __packed;
+
/**
* struct iwl_beacon_filter_cmd
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index 538f1c7a5966..85057219cc43 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -281,8 +281,31 @@ enum {
/* # entries in rate scale table to support Tx retries */
#define LQ_MAX_RETRY_NUM 16
-/* Link quality command flags, only this one is available */
-#define LQ_FLAG_SET_STA_TLC_RTS_MSK BIT(0)
+/* Link quality command flags bit fields */
+
+/* Bit 0: (0) Don't use RTS (1) Use RTS */
+#define LQ_FLAG_USE_RTS_POS 0
+#define LQ_FLAG_USE_RTS_MSK (1 << LQ_FLAG_USE_RTS_POS)
+
+/* Bit 1-3: LQ command color. Used to match responses to LQ commands */
+#define LQ_FLAG_COLOR_POS 1
+#define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS)
+
+/* Bit 4-5: Tx RTS BW Signalling
+ * (0) No RTS BW signalling
+ * (1) Static BW signalling
+ * (2) Dynamic BW signalling
+ */
+#define LQ_FLAG_RTS_BW_SIG_POS 4
+#define LQ_FLAG_RTS_BW_SIG_NONE (0 << LQ_FLAG_RTS_BW_SIG_POS)
+#define LQ_FLAG_RTS_BW_SIG_STATIC (1 << LQ_FLAG_RTS_BW_SIG_POS)
+#define LQ_FLAG_RTS_BW_SIG_DYNAMIC (2 << LQ_FLAG_RTS_BW_SIG_POS)
+
+/* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection
+ * Dyanmic BW selection allows Tx with narrower BW then requested in rates
+ */
+#define LQ_FLAG_DYNAMIC_BW_POS 6
+#define LQ_FLAG_DYNAMIC_BW_MSK (1 << LQ_FLAG_DYNAMIC_BW_POS)
/**
* struct iwl_lq_cmd - link quality command
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index c3782b48ded1..9426905de6b2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -504,6 +504,7 @@ struct iwl_scan_offload_profile {
* @match_notify: clients waiting for match found notification
* @pass_match: clients waiting for the results
* @active_clients: active clients bitmap - enum scan_framework_client
+ * @any_beacon_notify: clients waiting for match notification without match
*/
struct iwl_scan_offload_profile_cfg {
struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
@@ -512,7 +513,8 @@ struct iwl_scan_offload_profile_cfg {
u8 match_notify;
u8 pass_match;
u8 active_clients;
- u8 reserved[3];
+ u8 any_beacon_notify;
+ u8 reserved[2];
} __packed;
/**
@@ -530,14 +532,13 @@ struct iwl_scan_offload_schedule {
/*
* iwl_scan_offload_flags
*
- * IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID: filter mode - upload every beacon or match
- * ssid list.
+ * IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
* IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
* IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan
* on A band.
*/
enum iwl_scan_offload_flags {
- IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID = BIT(0),
+ IWL_SCAN_OFFLOAD_FLAG_PASS_ALL = BIT(0),
IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2),
IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN = BIT(3),
};
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index 4aca5933a65d..1b60fdff6a56 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -97,9 +97,6 @@ enum iwl_sta_flags {
STA_FLG_FLG_ANT_B),
STA_FLG_PS = BIT(8),
- STA_FLG_INVALID = BIT(9),
- STA_FLG_DLP_EN = BIT(10),
- STA_FLG_SET_ALL_KEYS = BIT(11),
STA_FLG_DRAIN_FLOW = BIT(12),
STA_FLG_PAN = BIT(13),
STA_FLG_CLASS_AUTH = BIT(14),
@@ -138,7 +135,14 @@ enum iwl_sta_flags {
/**
* enum iwl_sta_key_flag - key flags for the ADD_STA host command
- * @STA_KEY_FLG_EN_MSK: mask for encryption algorithm
+ * @STA_KEY_FLG_NO_ENC: no encryption
+ * @STA_KEY_FLG_WEP: WEP encryption algorithm
+ * @STA_KEY_FLG_CCM: CCMP encryption algorithm
+ * @STA_KEY_FLG_TKIP: TKIP encryption algorithm
+ * @STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support)
+ * @STA_KEY_FLG_CMAC: CMAC encryption algorithm
+ * @STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm
+ * @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value
* @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
* station info array (1 - n 1X mode)
* @STA_KEY_FLG_KEYID_MSK: the index of the key
@@ -152,6 +156,7 @@ enum iwl_sta_key_flag {
STA_KEY_FLG_WEP = (1 << 0),
STA_KEY_FLG_CCM = (2 << 0),
STA_KEY_FLG_TKIP = (3 << 0),
+ STA_KEY_FLG_EXT = (4 << 0),
STA_KEY_FLG_CMAC = (6 << 0),
STA_KEY_FLG_ENC_UNKNOWN = (7 << 0),
STA_KEY_FLG_EN_MSK = (7 << 0),
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index d606197bde8f..b674c2a2b51c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -132,6 +132,7 @@ enum iwl_tx_flags {
#define TX_CMD_SEC_WEP 0x01
#define TX_CMD_SEC_CCM 0x02
#define TX_CMD_SEC_TKIP 0x03
+#define TX_CMD_SEC_EXT 0x04
#define TX_CMD_SEC_MSK 0x07
#define TX_CMD_SEC_WEP_KEY_IDX_POS 6
#define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index bad5a552dd8d..989d7dbdca6c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -141,6 +141,7 @@ enum {
/* Power - legacy power table command */
POWER_TABLE_CMD = 0x77,
+ PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
/* Thermal Throttling*/
REPLY_THERMAL_MNG_BACKOFF = 0x7e,
@@ -183,6 +184,7 @@ enum {
BT_PROFILE_NOTIFICATION = 0xce,
BT_COEX_CI = 0x5d,
+ REPLY_SF_CFG_CMD = 0xd1,
REPLY_BEACON_FILTERING_CMD = 0xd2,
REPLY_DEBUG_CMD = 0xf0,
@@ -1052,6 +1054,7 @@ enum iwl_mvm_rx_status {
RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8),
RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8),
RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8),
+ RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8),
RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8),
RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8),
RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8),
@@ -1131,6 +1134,7 @@ struct iwl_set_calib_default_cmd {
} __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */
#define MAX_PORT_ID_NUM 2
+#define MAX_MCAST_FILTERING_ADDRESSES 256
/**
* struct iwl_mcast_filter_cmd - configure multicast filter.
@@ -1363,4 +1367,65 @@ struct iwl_notif_statistics { /* STATISTICS_NTFY_API_S_VER_8 */
struct mvm_statistics_general general;
} __packed;
+/***********************************
+ * Smart Fifo API
+ ***********************************/
+/* Smart Fifo state */
+enum iwl_sf_state {
+ SF_LONG_DELAY_ON = 0, /* should never be called by driver */
+ SF_FULL_ON,
+ SF_UNINIT,
+ SF_INIT_OFF,
+ SF_HW_NUM_STATES
+};
+
+/* Smart Fifo possible scenario */
+enum iwl_sf_scenario {
+ SF_SCENARIO_SINGLE_UNICAST,
+ SF_SCENARIO_AGG_UNICAST,
+ SF_SCENARIO_MULTICAST,
+ SF_SCENARIO_BA_RESP,
+ SF_SCENARIO_TX_RESP,
+ SF_NUM_SCENARIO
+};
+
+#define SF_TRANSIENT_STATES_NUMBER 2 /* SF_LONG_DELAY_ON and SF_FULL_ON */
+#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */
+
+/* smart FIFO default values */
+#define SF_W_MARK_SISO 4096
+#define SF_W_MARK_MIMO2 8192
+#define SF_W_MARK_MIMO3 6144
+#define SF_W_MARK_LEGACY 4096
+#define SF_W_MARK_SCAN 4096
+
+/* SF Scenarios timers for FULL_ON state (aligned to 32 uSec) */
+#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */
+#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */
+#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */
+#define SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */
+#define SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */
+#define SF_MCAST_AGING_TIMER 10016 /* 10 mSec */
+#define SF_BA_IDLE_TIMER 320 /* 300 uSec */
+#define SF_BA_AGING_TIMER 2016 /* 2 mSec */
+#define SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */
+#define SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */
+
+#define SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */
+
+/**
+ * Smart Fifo configuration command.
+ * @state: smart fifo state, types listed in iwl_sf_sate.
+ * @watermark: Minimum allowed availabe free space in RXF for transient state.
+ * @long_delay_timeouts: aging and idle timer values for each scenario
+ * in long delay state.
+ * @full_on_timeouts: timer values for each scenario in full on state.
+ */
+struct iwl_sf_cfg_cmd {
+ enum iwl_sf_state state;
+ __le32 watermark[SF_TRANSIENT_STATES_NUMBER];
+ __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
+ __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
+} __packed; /* SF_CFG_API_S_VER_2 */
+
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index 70e5297646b2..c03d39541f9e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -241,7 +241,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
lockdep_assert_held(&mvm->mutex);
- if (mvm->init_ucode_complete)
+ if (WARN_ON_ONCE(mvm->init_ucode_complete))
return 0;
iwl_init_notification_wait(&mvm->notif_wait,
@@ -287,7 +287,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
IWL_DEBUG_RF_KILL(mvm,
"jump over all phy activities due to RF kill\n");
iwl_remove_notification(&mvm->notif_wait, &calib_wait);
- return 1;
+ ret = 1;
+ goto out;
}
/* Send TX valid antennas before triggering calibrations */
@@ -319,9 +320,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
error:
iwl_remove_notification(&mvm->notif_wait, &calib_wait);
out:
- if (!iwlmvm_mod_params.init_dbg) {
- iwl_trans_stop_device(mvm->trans);
- } else if (!mvm->nvm_data) {
+ if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
/* we want to debug INIT and we have no NVM - fake */
mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
sizeof(struct ieee80211_channel) +
@@ -370,11 +369,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = -ERFKILL;
goto error;
}
- /* should stop & start HW since that INIT image just loaded */
- iwl_trans_stop_hw(mvm->trans, false);
- ret = iwl_trans_start_hw(mvm->trans);
- if (ret)
- return ret;
+ if (!iwlmvm_mod_params.init_dbg) {
+ /*
+ * should stop and start HW since that INIT
+ * image just loaded
+ */
+ iwl_trans_stop_device(mvm->trans);
+ ret = iwl_trans_start_hw(mvm->trans);
+ if (ret)
+ return ret;
+ }
}
if (iwlmvm_mod_params.init_dbg)
@@ -386,6 +390,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
+ ret = iwl_mvm_sf_update(mvm, NULL, false);
+ if (ret)
+ IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
+
ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
if (ret)
goto error;
diff --git a/drivers/net/wireless/iwlwifi/mvm/led.c b/drivers/net/wireless/iwlwifi/mvm/led.c
index 2269a9e5cc67..6b4ea6bf8ffe 100644
--- a/drivers/net/wireless/iwlwifi/mvm/led.c
+++ b/drivers/net/wireless/iwlwifi/mvm/led.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -103,7 +103,7 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm)
return 0;
default:
return -EINVAL;
- };
+ }
mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
wiphy_name(mvm->hw->wiphy));
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index f41f9b079831..ba723d50939a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -69,10 +69,10 @@
#include "mvm.h"
const u8 iwl_mvm_ac_to_tx_fifo[] = {
- IWL_MVM_TX_FIFO_BK,
- IWL_MVM_TX_FIFO_BE,
- IWL_MVM_TX_FIFO_VI,
IWL_MVM_TX_FIFO_VO,
+ IWL_MVM_TX_FIFO_VI,
+ IWL_MVM_TX_FIFO_BE,
+ IWL_MVM_TX_FIFO_BK,
};
struct iwl_mvm_mac_iface_iterator_data {
@@ -85,35 +85,15 @@ struct iwl_mvm_mac_iface_iterator_data {
bool found_vif;
};
-static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm_mac_iface_iterator_data *data = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- u32 ac;
- /* Iterator may already find the interface being added -- skip it */
- if (vif == data->vif) {
- data->found_vif = true;
+ /* Skip the interface for which we are trying to assign a tsf_id */
+ if (vif == data->vif)
return;
- }
-
- /* Mark the queues used by the vif */
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
- if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
- __set_bit(vif->hw_queue[ac], data->used_hw_queues);
-
- if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
- __set_bit(vif->cab_queue, data->used_hw_queues);
-
- /*
- * Mark MAC IDs as used by clearing the available bit, and
- * (below) mark TSFs as used if their existing use is not
- * compatible with the new interface type.
- * No locking or atomic bit operations are needed since the
- * data is on the stack of the caller function.
- */
- __clear_bit(mvmvif->id, data->available_mac_ids);
/*
* The TSF is a hardware/firmware resource, there are 4 and
@@ -135,21 +115,26 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
case NL80211_IFTYPE_STATION:
/*
* The new interface is client, so if the existing one
- * we're iterating is an AP, the TSF should be used to
+ * we're iterating is an AP, and both interfaces have the
+ * same beacon interval, the same TSF should be used to
* avoid drift between the new client and existing AP,
* the existing AP will get drift updates from the new
* client context in this case
*/
if (vif->type == NL80211_IFTYPE_AP) {
if (data->preferred_tsf == NUM_TSF_IDS &&
- test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ test_bit(mvmvif->tsf_id, data->available_tsf_ids) &&
+ (vif->bss_conf.beacon_int ==
+ data->vif->bss_conf.beacon_int)) {
data->preferred_tsf = mvmvif->tsf_id;
- return;
+ return;
+ }
}
break;
case NL80211_IFTYPE_AP:
/*
- * The new interface is AP/GO, so should get drift
+ * The new interface is AP/GO, so in case both interfaces
+ * have the same beacon interval, it should get drift
* updates from an existing client or use the same
* TSF as an existing GO. There's no drift between
* TSFs internally but if they used different TSFs
@@ -159,9 +144,12 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
if (vif->type == NL80211_IFTYPE_STATION ||
vif->type == NL80211_IFTYPE_AP) {
if (data->preferred_tsf == NUM_TSF_IDS &&
- test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ test_bit(mvmvif->tsf_id, data->available_tsf_ids) &&
+ (vif->bss_conf.beacon_int ==
+ data->vif->bss_conf.beacon_int)) {
data->preferred_tsf = mvmvif->tsf_id;
- return;
+ return;
+ }
}
break;
default:
@@ -187,6 +175,39 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
data->preferred_tsf = NUM_TSF_IDS;
}
+static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_mac_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 ac;
+
+ /* Iterator may already find the interface being added -- skip it */
+ if (vif == data->vif) {
+ data->found_vif = true;
+ return;
+ }
+
+ /* Mark the queues used by the vif */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+ __set_bit(vif->hw_queue[ac], data->used_hw_queues);
+
+ if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
+ __set_bit(vif->cab_queue, data->used_hw_queues);
+
+ /* Mark MAC IDs as used by clearing the available bit, and
+ * (below) mark TSFs as used if their existing use is not
+ * compatible with the new interface type.
+ * No locking or atomic bit operations are needed since the
+ * data is on the stack of the caller function.
+ */
+ __clear_bit(mvmvif->id, data->available_mac_ids);
+
+ /* find a suitable tsf_id */
+ iwl_mvm_mac_tsf_id_iter(_data, mac, vif);
+}
+
/*
* Get the mask of the queus used by the vif
*/
@@ -205,6 +226,29 @@ u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
return qmask;
}
+void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_mac_iface_iterator_data data = {
+ .mvm = mvm,
+ .vif = vif,
+ .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
+ /* no preference yet */
+ .preferred_tsf = NUM_TSF_IDS,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_mac_tsf_id_iter, &data);
+
+ if (data.preferred_tsf != NUM_TSF_IDS)
+ mvmvif->tsf_id = data.preferred_tsf;
+ else if (!test_bit(mvmvif->tsf_id, data.available_tsf_ids))
+ mvmvif->tsf_id = find_first_bit(data.available_tsf_ids,
+ NUM_TSF_IDS);
+}
+
static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -488,6 +532,40 @@ static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
*ofdm_rates = ofdm;
}
+static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_ctx_cmd *cmd)
+{
+ /* for both sta and ap, ht_operation_mode hold the protection_mode */
+ u8 protection_mode = vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_PROTECTION;
+ /* The fw does not distinguish between ht and fat */
+ u32 ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT;
+
+ IWL_DEBUG_RATE(mvm, "protection mode set to %d\n", protection_mode);
+ /*
+ * See section 9.23.3.1 of IEEE 80211-2012.
+ * Nongreenfield HT STAs Present is not supported.
+ */
+ switch (protection_mode) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ cmd->protection_flags |= cpu_to_le32(ht_flag);
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ /* Protect when channel wider than 20MHz */
+ if (vif->bss_conf.chandef.width > NL80211_CHAN_WIDTH_20)
+ cmd->protection_flags |= cpu_to_le32(ht_flag);
+ break;
+ default:
+ IWL_ERR(mvm, "Illegal protection mode %d\n",
+ protection_mode);
+ break;
+ }
+}
+
static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mac_ctx_cmd *cmd,
@@ -495,6 +573,8 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_chanctx_conf *chanctx;
+ bool ht_enabled = !!(vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_PROTECTION);
u8 cck_ack_rates, ofdm_ack_rates;
int i;
@@ -550,18 +630,23 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
cpu_to_le32(vif->bss_conf.use_short_slot ?
MAC_FLG_SHORT_SLOT : 0);
- for (i = 0; i < AC_NUM; i++) {
- cmd->ac[i].cw_min = cpu_to_le16(mvmvif->queue_params[i].cw_min);
- cmd->ac[i].cw_max = cpu_to_le16(mvmvif->queue_params[i].cw_max);
- cmd->ac[i].aifsn = mvmvif->queue_params[i].aifs;
- cmd->ac[i].edca_txop =
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ u8 txf = iwl_mvm_ac_to_tx_fifo[i];
+
+ cmd->ac[txf].cw_min =
+ cpu_to_le16(mvmvif->queue_params[i].cw_min);
+ cmd->ac[txf].cw_max =
+ cpu_to_le16(mvmvif->queue_params[i].cw_max);
+ cmd->ac[txf].edca_txop =
cpu_to_le16(mvmvif->queue_params[i].txop * 32);
- cmd->ac[i].fifos_mask = BIT(iwl_mvm_ac_to_tx_fifo[i]);
+ cmd->ac[txf].aifsn = mvmvif->queue_params[i].aifs;
+ cmd->ac[txf].fifos_mask = BIT(txf);
}
/* in AP mode, the MCAST FIFO takes the EDCA params from VO */
if (vif->type == NL80211_IFTYPE_AP)
- cmd->ac[AC_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST);
+ cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |=
+ BIT(IWL_MVM_TX_FIFO_MCAST);
if (vif->bss_conf.qos)
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
@@ -573,16 +658,13 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
cmd->protection_flags |=
cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
}
-
- /*
- * I think that we should enable these 2 flags regardless the HT PROT
- * fields in the HT IE, but I am not sure. Someone knows whom to ask?...
- */
- if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
+ IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
+ vif->bss_conf.use_cts_prot,
+ vif->bss_conf.ht_operation_mode);
+ if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
- cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_HT_PROT |
- MAC_PROT_FLG_FAT_PROT);
- }
+ if (ht_enabled)
+ iwl_mvm_mac_ctxt_set_ht_flags(mvm, vif, cmd);
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
}
@@ -974,7 +1056,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
iwl_mvm_mac_ap_iterator, &data);
if (data.beacon_device_ts) {
- u32 rand = (prandom_u32() % (80 - 20)) + 20;
+ u32 rand = (prandom_u32() % (64 - 36)) + 36;
mvmvif->ap_beacon_time = data.beacon_device_ts +
ieee80211_tu_to_usec(data.beacon_int * rand /
100);
@@ -1153,10 +1235,18 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
- u16 *id = _data;
+ struct iwl_missed_beacons_notif *missed_beacons = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- if (mvmvif->id == *id)
+ if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id))
+ return;
+
+ /*
+ * TODO: the threshold should be adjusted based on latency conditions,
+ * and/or in case of a CS flow on one of the other AP vifs.
+ */
+ if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) >
+ IWL_MVM_MISSED_BEACONS_THRESHOLD)
ieee80211_beacon_loss(vif);
}
@@ -1165,12 +1255,19 @@ int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_missed_beacons_notif *missed_beacons = (void *)pkt->data;
- u16 id = (u16)le32_to_cpu(missed_beacons->mac_id);
+ struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
+
+ IWL_DEBUG_INFO(mvm,
+ "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
+ le32_to_cpu(mb->mac_id),
+ le32_to_cpu(mb->consec_missed_beacons),
+ le32_to_cpu(mb->consec_missed_beacons_since_last_rx),
+ le32_to_cpu(mb->num_recvd_beacons),
+ le32_to_cpu(mb->num_expected_beacons));
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_beacon_loss_iterator,
- &id);
+ mb);
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 74bc2c8af06d..6bf9766e5982 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -199,9 +199,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS |
- WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+ REGULATORY_DISABLE_BEACON_HINTS;
hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
hw->wiphy->n_iface_combinations =
@@ -246,7 +246,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
+ if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
@@ -256,10 +256,17 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
- NL80211_FEATURE_P2P_GO_OPPPS;
+ NL80211_FEATURE_P2P_GO_OPPPS |
+ NL80211_FEATURE_LOW_PRIORITY_SCAN;
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+ /* currently FW API supports only one optional cipher scheme */
+ if (mvm->fw->cs[0].cipher) {
+ mvm->hw->n_cipher_schemes = 1;
+ mvm->hw->cipher_schemes = &mvm->fw->cs[0];
+ }
+
#ifdef CONFIG_PM_SLEEP
if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
mvm->trans->ops->d3_suspend &&
@@ -398,7 +405,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{
iwl_trans_stop_device(mvm->trans);
- iwl_trans_stop_hw(mvm->trans, false);
mvm->scan_status = IWL_MVM_SCAN_NONE;
@@ -470,7 +476,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
cancel_work_sync(&mvm->roc_done_wk);
iwl_trans_stop_device(mvm->trans);
- iwl_trans_stop_hw(mvm->trans, false);
iwl_mvm_async_handlers_purge(mvm);
/* async_handlers_list is empty and will stay empty: HW is stopped */
@@ -487,17 +492,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
cancel_work_sync(&mvm->async_handlers_wk);
}
-static void iwl_mvm_pm_disable_iterator(void *data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm *mvm = data;
- int ret;
-
- ret = iwl_mvm_power_disable(mvm, vif);
- if (ret)
- IWL_ERR(mvm, "failed to disable power management\n");
-}
-
static void iwl_mvm_power_update_iterator(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -520,6 +514,20 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
return NULL;
}
+static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ s8 tx_power)
+{
+ /* FW is in charge of regulatory enforcement */
+ struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
+ .mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id,
+ .pwr_restriction = cpu_to_le16(tx_power),
+ };
+
+ return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC,
+ sizeof(reduce_txpwr_cmd),
+ &reduce_txpwr_cmd);
+}
+
static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -540,26 +548,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out_unlock;
- /*
- * TODO: remove this temporary code.
- * Currently MVM FW supports power management only on single MAC.
- * If new interface added, disable PM on existing interface.
- * P2P device is a special case, since it is handled by FW similary to
- * scan. If P2P deviced is added, PM remains enabled on existing
- * interface.
- * Note: the method below does not count the new interface being added
- * at this moment.
- */
+ /* Counting number of interfaces is needed for legacy PM */
if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count++;
- if (mvm->vif_count > 1) {
- IWL_DEBUG_MAC80211(mvm,
- "Disable power on existing interfaces\n");
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_pm_disable_iterator, mvm);
- }
/*
* The AP binding flow can be done only after the beacon
@@ -590,11 +581,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out_release;
- /*
- * Update power state on the new interface. Admittedly, based on
- * mac80211 logics this power update will disable power management
- */
- iwl_mvm_power_update_mode(mvm, vif);
+ iwl_mvm_power_disable(mvm, vif);
/* beacon filtering */
ret = iwl_mvm_disable_beacon_filter(mvm, vif);
@@ -655,9 +642,12 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
out_release:
if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--;
+
+ /* TODO: remove this when legacy PM will be discarded */
ieee80211_iterate_active_interfaces(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_power_update_iterator, mvm);
+
iwl_mvm_mac_ctxt_release(mvm, vif);
out_unlock:
mutex_unlock(&mvm->mutex);
@@ -743,21 +733,13 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mvmvif->phy_ctxt = NULL;
}
- /*
- * TODO: remove this temporary code.
- * Currently MVM FW supports power management only on single MAC.
- * Check if only one additional interface remains after removing
- * current one. Update power mode on the remaining interface.
- */
if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--;
- IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
- mvm->vif_count);
- if (mvm->vif_count == 1) {
- ieee80211_iterate_active_interfaces(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_update_iterator, mvm);
- }
+
+ /* TODO: remove this when legacy PM will be discarded */
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_update_iterator, mvm);
iwl_mvm_mac_ctxt_remove(mvm, vif);
@@ -766,23 +748,91 @@ out_release:
mutex_unlock(&mvm->mutex);
}
-static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- s8 tx_power)
+static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
{
- /* FW is in charge of regulatory enforcement */
- struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
- .mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id,
- .pwr_restriction = cpu_to_le16(tx_power),
+ return 0;
+}
+
+struct iwl_mvm_mc_iter_data {
+ struct iwl_mvm *mvm;
+ int port_id;
+};
+
+static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_mc_iter_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
+ int ret, len;
+
+ /* if we don't have free ports, mcast frames will be dropped */
+ if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION ||
+ !vif->bss_conf.assoc)
+ return;
+
+ cmd->port_id = data->port_id++;
+ memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd);
+ if (ret)
+ IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
+}
+
+static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_mc_iter_data iter_data = {
+ .mvm = mvm,
};
- return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC,
- sizeof(reduce_txpwr_cmd),
- &reduce_txpwr_cmd);
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
+ return;
+
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_mc_iface_iterator, &iter_data);
}
-static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
+static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
{
- return 0;
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mcast_filter_cmd *cmd;
+ struct netdev_hw_addr *addr;
+ int addr_count = netdev_hw_addr_list_count(mc_list);
+ bool pass_all = false;
+ int len;
+
+ if (addr_count > MAX_MCAST_FILTERING_ADDRESSES) {
+ pass_all = true;
+ addr_count = 0;
+ }
+
+ len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
+ cmd = kzalloc(len, GFP_ATOMIC);
+ if (!cmd)
+ return 0;
+
+ if (pass_all) {
+ cmd->pass_all = 1;
+ return (u64)(unsigned long)cmd;
+ }
+
+ netdev_hw_addr_list_for_each(addr, mc_list) {
+ IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
+ cmd->count, addr->addr);
+ memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
+ addr->addr, ETH_ALEN);
+ cmd->count++;
+ }
+
+ return (u64)(unsigned long)cmd;
}
static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
@@ -790,21 +840,22 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
unsigned int *total_flags,
u64 multicast)
{
- *total_flags = 0;
-}
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
-static int iwl_mvm_configure_mcast_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_mcast_filter_cmd mcast_filter_cmd = {
- .pass_all = 1,
- };
+ mutex_lock(&mvm->mutex);
- memcpy(mcast_filter_cmd.bssid, vif->bss_conf.bssid, ETH_ALEN);
+ /* replace previous configuration */
+ kfree(mvm->mcast_filter_cmd);
+ mvm->mcast_filter_cmd = cmd;
- return iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC,
- sizeof(mcast_filter_cmd),
- &mcast_filter_cmd);
+ if (!cmd)
+ goto out;
+
+ iwl_mvm_recalc_multicast(mvm);
+out:
+ mutex_unlock(&mvm->mutex);
+ *total_flags = 0;
}
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
@@ -815,6 +866,14 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
+ /*
+ * Re-calculate the tsf id, as the master-slave relations depend on the
+ * beacon interval, which was not known when the station interface was
+ * added.
+ */
+ if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
+ iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+
ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
if (ret)
IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
@@ -827,7 +886,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_ERR(mvm, "failed to update quotas\n");
return;
}
- iwl_mvm_configure_mcast_filter(mvm, vif);
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
&mvm->status)) {
@@ -849,7 +907,17 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
iwl_mvm_protect_session(mvm, vif, dur, dur,
5 * dur);
}
+
+ iwl_mvm_sf_update(mvm, vif, false);
+ iwl_mvm_power_vif_assoc(mvm, vif);
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+ /*
+ * If update fails - SF might be running in associated
+ * mode while disassociated - which is forbidden.
+ */
+ WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
+ "Failed to update SF upon disassociation\n");
+
/* remove AP station now that the MAC is unassoc */
ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
if (ret)
@@ -861,6 +929,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_ERR(mvm, "failed to update quotas\n");
}
+ iwl_mvm_recalc_multicast(mvm);
+
/* reset rssi values */
mvmvif->bf_data.ave_beacon_signal = 0;
@@ -874,6 +944,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_ERR(mvm, "failed to update power mode\n");
}
iwl_mvm_bt_coex_vif_change(mvm);
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
+ IEEE80211_SMPS_AUTOMATIC);
} else if (changes & BSS_CHANGED_BEACON_INFO) {
/*
* We received a beacon _after_ association so
@@ -881,7 +953,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*/
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
- } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_QOS)) {
+ } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
+ BSS_CHANGED_QOS)) {
ret = iwl_mvm_power_update_mode(mvm, vif);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
@@ -916,6 +989,13 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (ret)
goto out_unlock;
+ /*
+ * Re-calculate the tsf id, as the master-slave relations depend on the
+ * beacon interval, which was not known when the AP interface was added.
+ */
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+
/* Add the mac context */
ret = iwl_mvm_mac_ctxt_add(mvm, vif);
if (ret)
@@ -934,9 +1014,16 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (ret)
goto out_unbind;
+ /* must be set before quota calculations */
+ mvmvif->ap_ibss_active = true;
+
+ /* power updated needs to be done before quotas */
+ mvm->bound_vif_cnt++;
+ iwl_mvm_power_update_binding(mvm, vif, true);
+
ret = iwl_mvm_update_quotas(mvm, vif);
if (ret)
- goto out_rm_bcast;
+ goto out_quota_failed;
/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
if (vif->p2p && mvm->p2p_device_vif)
@@ -947,7 +1034,10 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
return 0;
-out_rm_bcast:
+out_quota_failed:
+ mvm->bound_vif_cnt--;
+ iwl_mvm_power_update_binding(mvm, vif, false);
+ mvmvif->ap_ibss_active = false;
iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
out_unbind:
iwl_mvm_binding_remove_vif(mvm, vif);
@@ -979,6 +1069,10 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_update_quotas(mvm, NULL);
iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
iwl_mvm_binding_remove_vif(mvm, vif);
+
+ mvm->bound_vif_cnt--;
+ iwl_mvm_power_update_binding(mvm, vif, false);
+
iwl_mvm_mac_ctxt_remove(mvm, vif);
mutex_unlock(&mvm->mutex);
@@ -990,6 +1084,22 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
struct ieee80211_bss_conf *bss_conf,
u32 changes)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ enum ieee80211_bss_change ht_change = BSS_CHANGED_ERP_CTS_PROT |
+ BSS_CHANGED_HT |
+ BSS_CHANGED_BANDWIDTH;
+ int ret;
+
+ /* Changes will be applied when the AP/IBSS is started */
+ if (!mvmvif->ap_ibss_active)
+ return;
+
+ if (changes & ht_change) {
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+ if (ret)
+ IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
+ }
+
/* Need to send a new beacon template to the FW */
if (changes & BSS_CHANGED_BEACON) {
if (iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
@@ -1080,7 +1190,7 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
switch (cmd) {
case STA_NOTIFY_SLEEP:
@@ -1102,6 +1212,28 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
}
}
+static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+
+ /*
+ * This is called before mac80211 does RCU synchronisation,
+ * so here we already invalidate our internal RCU-protected
+ * station pointer. The rest of the code will thus no longer
+ * be able to find the station this way, and we don't rely
+ * on further RCU synchronisation after the sta_state()
+ * callback deleted the station.
+ */
+ mutex_lock(&mvm->mutex);
+ if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
+ ERR_PTR(-ENOENT));
+ mutex_unlock(&mvm->mutex);
+}
+
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -1149,7 +1281,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
ret = iwl_mvm_update_sta(mvm, vif, sta);
if (ret == 0)
iwl_mvm_rs_rate_init(mvm, sta,
- mvmvif->phy_ctxt->channel->band);
+ mvmvif->phy_ctxt->channel->band,
+ true);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
/* enable beacon filtering */
@@ -1187,6 +1320,17 @@ static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return 0;
}
+static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u32 changed)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ changed & IEEE80211_RC_NSS_CHANGED)
+ iwl_mvm_sf_update(mvm, vif, false);
+}
+
static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 ac,
const struct ieee80211_tx_queue_params *params)
@@ -1309,7 +1453,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
*/
return 0;
default:
- return -EOPNOTSUPP;
+ /* currently FW supports only one optional cipher scheme */
+ if (hw->n_cipher_schemes &&
+ hw->cipher_schemes->cipher == key->cipher)
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+ else
+ return -EOPNOTSUPP;
}
mutex_lock(&mvm->mutex);
@@ -1515,7 +1664,7 @@ static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
goto out;
}
- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
if (ret) {
@@ -1553,13 +1702,14 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
if (WARN_ONCE((phy_ctxt->ref > 1) &&
(changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
- IEEE80211_CHANCTX_CHANGE_RADAR)),
+ IEEE80211_CHANCTX_CHANGE_RADAR |
+ IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
"Cannot change PHY. Ref=%d, changed=0x%X\n",
phy_ctxt->ref, changed))
return;
mutex_lock(&mvm->mutex);
- iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
+ iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
iwl_mvm_bt_coex_vif_change(mvm);
@@ -1602,7 +1752,13 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out_unlock;
/*
- * Setting the quota at this stage is only required for monitor
+ * Power state must be updated before quotas,
+ * otherwise fw will complain.
+ */
+ mvm->bound_vif_cnt++;
+ iwl_mvm_power_update_binding(mvm, vif, true);
+
+ /* Setting the quota at this stage is only required for monitor
* interfaces. For the other types, the bss_info changed flow
* will handle quota settings.
*/
@@ -1617,6 +1773,8 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
out_remove_binding:
iwl_mvm_binding_remove_vif(mvm, vif);
+ mvm->bound_vif_cnt--;
+ iwl_mvm_power_update_binding(mvm, vif, false);
out_unlock:
mutex_unlock(&mvm->mutex);
if (ret)
@@ -1648,6 +1806,9 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
iwl_mvm_binding_remove_vif(mvm, vif);
+ mvm->bound_vif_cnt--;
+ iwl_mvm_power_update_binding(mvm, vif, false);
+
out_unlock:
mvmvif->phy_ctxt = NULL;
mutex_unlock(&mvm->mutex);
@@ -1744,14 +1905,17 @@ struct ieee80211_ops iwl_mvm_hw_ops = {
.add_interface = iwl_mvm_mac_add_interface,
.remove_interface = iwl_mvm_mac_remove_interface,
.config = iwl_mvm_mac_config,
+ .prepare_multicast = iwl_mvm_prepare_multicast,
.configure_filter = iwl_mvm_configure_filter,
.bss_info_changed = iwl_mvm_bss_info_changed,
.hw_scan = iwl_mvm_mac_hw_scan,
.cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
+ .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
.sta_state = iwl_mvm_mac_sta_state,
.sta_notify = iwl_mvm_mac_sta_notify,
.allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
+ .sta_rc_update = iwl_mvm_sta_rc_update,
.conf_tx = iwl_mvm_mac_conf_tx,
.mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
.sched_scan_start = iwl_mvm_mac_sched_scan_start,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index fed21ef4162d..e4ead86f06d6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -81,6 +81,7 @@
#define IWL_MVM_MAX_ADDRESSES 5
/* RSSI offset for WkP */
#define IWL_RSSI_OFFSET 50
+#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
enum iwl_mvm_tx_fifo {
IWL_MVM_TX_FIFO_BK = 0,
@@ -163,6 +164,8 @@ struct iwl_mvm_power_ops {
struct ieee80211_vif *vif);
int (*power_update_device_mode)(struct iwl_mvm *mvm);
int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+ void (*power_update_binding)(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, bool assign);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
char *buf, int bufsz);
@@ -181,6 +184,7 @@ enum iwl_dbgfs_pm_mask {
MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
+ MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9),
};
struct iwl_dbgfs_pm {
@@ -193,6 +197,7 @@ struct iwl_dbgfs_pm {
bool lprx_ena;
u32 lprx_rssi_threshold;
bool snooze_ena;
+ bool uapsd_misbehaving;
int mask;
};
@@ -269,8 +274,8 @@ struct iwl_mvm_vif_bf_data {
* @bcast_sta: station used for broadcast packets. Used by the following
* vifs: P2P_DEVICE, GO and AP.
* @beacon_skb: the skb used to hold the AP/GO beacon template
- * @smps_requests: the requests of of differents parts of the driver, regard
- the desired smps mode.
+ * @smps_requests: the SMPS requests of differents parts of the driver,
+ * combined on update to yield the overall request to mac80211.
*/
struct iwl_mvm_vif {
u16 id;
@@ -323,14 +328,19 @@ struct iwl_mvm_vif {
#endif
#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct iwl_mvm *mvm;
struct dentry *dbgfs_dir;
struct dentry *dbgfs_slink;
- void *dbgfs_data;
struct iwl_dbgfs_pm dbgfs_pm;
struct iwl_dbgfs_bf dbgfs_bf;
#endif
enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
+
+ /* FW identified misbehaving AP */
+ u8 uapsd_misbehaving_bssid[ETH_ALEN];
+
+ bool pm_prevented;
};
static inline struct iwl_mvm_vif *
@@ -479,6 +489,7 @@ struct iwl_mvm {
/* Scan status, cmd (pre-allocated) and auxiliary station */
enum iwl_scan_status scan_status;
struct iwl_scan_cmd *scan_cmd;
+ struct iwl_mcast_filter_cmd *mcast_filter_cmd;
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
@@ -489,11 +500,19 @@ struct iwl_mvm {
u8 scan_last_antenna_idx; /* to toggle TX between antennas */
u8 mgmt_last_antenna_idx;
+ /* last smart fifo state that was successfully sent to firmware */
+ enum iwl_sf_state sf_state;
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct dentry *debugfs_dir;
u32 dbgfs_sram_offset, dbgfs_sram_len;
bool disable_power_off;
bool disable_power_off_d3;
+
+ struct debugfs_blob_wrapper nvm_hw_blob;
+ struct debugfs_blob_wrapper nvm_sw_blob;
+ struct debugfs_blob_wrapper nvm_calib_blob;
+ struct debugfs_blob_wrapper nvm_prod_blob;
#endif
struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
@@ -507,12 +526,6 @@ struct iwl_mvm {
*/
unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
- /*
- * This counter of created interfaces is referenced only in conjunction
- * with FW limitation related to power management. Currently PM is
- * supported only on a single interface.
- * IMPORTANT: this variable counts all interfaces except P2P device.
- */
u8 vif_count;
/* -1 for always, 0 for never, >0 for that many times */
@@ -531,6 +544,7 @@ struct iwl_mvm {
bool store_d3_resume_sram;
void *d3_resume_sram;
u32 d3_test_pme_ptr;
+ struct ieee80211_vif *keep_vif;
#endif
#endif
@@ -554,6 +568,11 @@ struct iwl_mvm {
u8 aux_queue;
u8 first_agg_queue;
u8 last_agg_queue;
+
+ u8 bound_vif_cnt;
+
+ /* Indicate if device power save is allowed */
+ bool ps_prevented;
};
/* Extract MVM priv from op_mode and _hw */
@@ -693,6 +712,8 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
/* Bindings */
int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -750,8 +771,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
#endif /* CONFIG_IWLWIFI_DEBUGFS */
/* rate scaling */
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
- u8 flags, bool init);
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
/* power managment */
static inline int iwl_mvm_power_update_mode(struct iwl_mvm *mvm,
@@ -773,6 +793,19 @@ static inline int iwl_mvm_power_update_device_mode(struct iwl_mvm *mvm)
return 0;
}
+static inline void iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool assign)
+{
+ if (mvm->pm_ops->power_update_binding)
+ mvm->pm_ops->power_update_binding(mvm, vif, assign);
+}
+
+void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -864,4 +897,8 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm);
void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
+/* smart fifo */
+int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool added_vif);
+
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 2beffd028b67..35b71af78d02 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -367,16 +367,17 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
break;
}
+ if (WARN(section_id >= NVM_NUM_OF_SECTIONS,
+ "Invalid NVM section ID %d\n", section_id)) {
+ ret = -EINVAL;
+ break;
+ }
+
temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
if (!temp) {
ret = -ENOMEM;
break;
}
- if (WARN_ON(section_id >= NVM_NUM_OF_SECTIONS)) {
- IWL_ERR(mvm, "Invalid NVM section ID\n");
- ret = -EINVAL;
- break;
- }
mvm->nvm_sections[section_id].data = temp;
mvm->nvm_sections[section_id].length = section_size;
@@ -391,17 +392,16 @@ out:
/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
{
- int i, ret;
- u16 section_id;
+ int i, ret = 0;
struct iwl_nvm_section *sections = mvm->nvm_sections;
IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n");
- for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
- section_id = nvm_to_read[i];
- ret = iwl_nvm_write_section(mvm, section_id,
- sections[section_id].data,
- sections[section_id].length);
+ for (i = 0; i < ARRAY_SIZE(mvm->nvm_sections); i++) {
+ if (!mvm->nvm_sections[i].data || !mvm->nvm_sections[i].length)
+ continue;
+ ret = iwl_nvm_write_section(mvm, i, sections[i].data,
+ sections[i].length);
if (ret < 0) {
IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
break;
@@ -443,6 +443,29 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
}
mvm->nvm_sections[section].data = temp;
mvm->nvm_sections[section].length = ret;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ switch (section) {
+ case NVM_SECTION_TYPE_HW:
+ mvm->nvm_hw_blob.data = temp;
+ mvm->nvm_hw_blob.size = ret;
+ break;
+ case NVM_SECTION_TYPE_SW:
+ mvm->nvm_sw_blob.data = temp;
+ mvm->nvm_sw_blob.size = ret;
+ break;
+ case NVM_SECTION_TYPE_CALIBRATION:
+ mvm->nvm_calib_blob.data = temp;
+ mvm->nvm_calib_blob.size = ret;
+ break;
+ case NVM_SECTION_TYPE_PRODUCTION:
+ mvm->nvm_prod_blob.data = temp;
+ mvm->nvm_prod_blob.size = ret;
+ break;
+ default:
+ WARN(1, "section: %d", section);
+ }
+#endif
}
kfree(nvm_buffer);
if (ret < 0)
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index d86083c6f445..a3d43de342d7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -236,6 +236,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
false),
RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
+ RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
+ iwl_mvm_power_uapsd_misbehaving_ap_notif, false),
};
#undef RX_HANDLER
#define CMD(x) [x] = #x
@@ -307,10 +309,12 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BT_PROFILE_NOTIFICATION),
CMD(BT_CONFIG),
CMD(MCAST_FILTER_CMD),
+ CMD(REPLY_SF_CFG_CMD),
CMD(REPLY_BEACON_FILTERING_CMD),
CMD(REPLY_THERMAL_MNG_BACKOFF),
CMD(MAC_PM_POWER_TABLE),
CMD(BT_COEX_CI),
+ CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
};
#undef CMD
@@ -341,7 +345,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
op_mode = hw->priv;
op_mode->ops = &iwl_mvm_ops;
- op_mode->trans = trans;
mvm = IWL_OP_MODE_GET_MVM(op_mode);
mvm->dev = trans->dev;
@@ -359,6 +362,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->aux_queue = 11;
mvm->first_agg_queue = 12;
}
+ mvm->sf_state = SF_UNINIT;
mutex_init(&mvm->mutex);
spin_lock_init(&mvm->async_handlers_lock);
@@ -424,7 +428,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
* there is no need to unnecessarily power up the NIC at driver load
*/
if (iwlwifi_mod_params.nvm_file) {
- iwl_nvm_init(mvm);
+ err = iwl_nvm_init(mvm);
+ if (err)
+ goto out_free;
} else {
err = iwl_trans_start_hw(mvm->trans);
if (err)
@@ -432,16 +438,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex);
err = iwl_run_init_mvm_ucode(mvm, true);
+ iwl_trans_stop_device(trans);
mutex_unlock(&mvm->mutex);
/* returns 0 if successful, 1 if success but in rfkill */
if (err < 0 && !iwlmvm_mod_params.init_dbg) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
goto out_free;
}
-
- /* Stop the hw after the ALIVE and NVM has been read */
- if (!iwlmvm_mod_params.init_dbg)
- iwl_trans_stop_hw(mvm->trans, false);
}
scan_size = sizeof(struct iwl_scan_cmd) +
@@ -470,11 +473,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
out_unregister:
ieee80211_unregister_hw(mvm->hw);
+ iwl_mvm_leds_exit(mvm);
out_free:
iwl_phy_db_free(mvm->phy_db);
kfree(mvm->scan_cmd);
if (!iwlwifi_mod_params.nvm_file)
- iwl_trans_stop_hw(trans, true);
+ iwl_trans_op_mode_leave(trans);
ieee80211_free_hw(mvm->hw);
return NULL;
}
@@ -491,12 +495,14 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
ieee80211_unregister_hw(mvm->hw);
kfree(mvm->scan_cmd);
+ kfree(mvm->mcast_filter_cmd);
+ mvm->mcast_filter_cmd = NULL;
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
kfree(mvm->d3_resume_sram);
#endif
- iwl_trans_stop_hw(mvm->trans, true);
+ iwl_trans_op_mode_leave(mvm->trans);
iwl_phy_db_free(mvm->phy_db);
mvm->phy_db = NULL;
@@ -661,6 +667,8 @@ static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
else
clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+ if (state && mvm->cur_ucode != IWL_UCODE_INIT)
+ iwl_trans_stop_device(mvm->trans);
wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index a8652ddd6bed..b7268c0b3333 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 550824aa84ea..d9eab3b7bb9f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -64,7 +64,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <net/mac80211.h>
@@ -186,6 +185,92 @@ static void iwl_mvm_power_log(struct iwl_mvm *mvm,
}
}
+static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_power_cmd *cmd)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ enum ieee80211_ac_numbers ac;
+ bool tid_found = false;
+
+ for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
+ if (!mvmvif->queue_params[ac].uapsd)
+ continue;
+
+ if (mvm->cur_ucode != IWL_UCODE_WOWLAN)
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+
+ cmd->uapsd_ac_flags |= BIT(ac);
+
+ /* QNDP TID - the highest TID with no admission control */
+ if (!tid_found && !mvmvif->queue_params[ac].acm) {
+ tid_found = true;
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ cmd->qndp_tid = 6;
+ break;
+ case IEEE80211_AC_VI:
+ cmd->qndp_tid = 5;
+ break;
+ case IEEE80211_AC_BE:
+ cmd->qndp_tid = 0;
+ break;
+ case IEEE80211_AC_BK:
+ cmd->qndp_tid = 1;
+ break;
+ }
+ }
+ }
+
+ if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)))
+ return;
+
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
+
+ if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
+ BIT(IEEE80211_AC_VI) |
+ BIT(IEEE80211_AC_BE) |
+ BIT(IEEE80211_AC_BK))) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+ cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
+ cmd->snooze_window = (mvm->cur_ucode == IWL_UCODE_WOWLAN) ?
+ cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
+ cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
+ }
+
+ cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
+
+ if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
+ cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+ } else {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+ }
+
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ cmd->heavy_tx_thld_packets =
+ IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
+ cmd->heavy_rx_thld_packets =
+ IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
+ } else {
+ cmd->heavy_tx_thld_packets =
+ IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
+ cmd->heavy_rx_thld_packets =
+ IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+ }
+ cmd->heavy_tx_thld_percentage =
+ IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
+ cmd->heavy_rx_thld_percentage =
+ IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
+}
+
static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd)
@@ -198,8 +283,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
bool radar_detect = false;
struct iwl_mvm_vif *mvmvif __maybe_unused =
iwl_mvm_vif_from_mac80211(vif);
- enum ieee80211_ac_numbers ac;
- bool tid_found = false;
+ bool allow_uapsd = true;
cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color));
@@ -217,7 +301,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
+ mvm->ps_prevented)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
@@ -227,7 +312,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
mvmvif->dbgfs_pm.disable_power_off)
cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
#endif
- if (!vif->bss_conf.ps)
+ if (!vif->bss_conf.ps || mvmvif->pm_prevented)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -269,81 +354,24 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
}
- for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
- if (!mvmvif->queue_params[ac].uapsd)
- continue;
-
- if (mvm->cur_ucode != IWL_UCODE_WOWLAN)
- cmd->flags |=
- cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
-
- cmd->uapsd_ac_flags |= BIT(ac);
+ if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
+ ETH_ALEN))
+ allow_uapsd = false;
- /* QNDP TID - the highest TID with no admission control */
- if (!tid_found && !mvmvif->queue_params[ac].acm) {
- tid_found = true;
- switch (ac) {
- case IEEE80211_AC_VO:
- cmd->qndp_tid = 6;
- break;
- case IEEE80211_AC_VI:
- cmd->qndp_tid = 5;
- break;
- case IEEE80211_AC_BE:
- cmd->qndp_tid = 0;
- break;
- case IEEE80211_AC_BK:
- cmd->qndp_tid = 1;
- break;
- }
- }
- }
-
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
- if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
- BIT(IEEE80211_AC_VI) |
- BIT(IEEE80211_AC_BE) |
- BIT(IEEE80211_AC_BK))) {
- cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
- cmd->snooze_interval =
- cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
- cmd->snooze_window =
- (mvm->cur_ucode == IWL_UCODE_WOWLAN) ?
- cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
- cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
- }
-
- cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
-
- if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
- cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
- cmd->rx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
- cmd->tx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
- } else {
- cmd->rx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
- cmd->tx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
- }
+ if (vif->p2p &&
+ !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
+ allow_uapsd = false;
+ /*
+ * Avoid using uAPSD if P2P client is associated to GO that uses
+ * opportunistic power save. This is due to current FW limitation.
+ */
+ if (vif->p2p &&
+ vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
+ IEEE80211_P2P_OPPPS_ENABLE_BIT)
+ allow_uapsd = false;
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
- cmd->heavy_tx_thld_packets =
- IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
- cmd->heavy_rx_thld_packets =
- IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
- } else {
- cmd->heavy_tx_thld_packets =
- IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
- cmd->heavy_rx_thld_packets =
- IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
- }
- cmd->heavy_tx_thld_percentage =
- IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
- cmd->heavy_rx_thld_percentage =
- IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
- }
+ if (allow_uapsd)
+ iwl_mvm_power_configure_uapsd(mvm, vif, cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
@@ -381,6 +409,13 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->flags &=
cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK);
}
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_UAPSD_MISBEHAVING) {
+ u16 flag = POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK;
+ if (mvmvif->dbgfs_pm.uapsd_misbehaving)
+ cmd->flags |= cpu_to_le16(flag);
+ else
+ cmd->flags &= cpu_to_le16(flag);
+ }
#endif /* CONFIG_IWLWIFI_DEBUGFS */
}
@@ -391,18 +426,11 @@ static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm,
bool ba_enable;
struct iwl_mac_power_cmd cmd = {};
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ if (vif->type != NL80211_IFTYPE_STATION)
return 0;
- /*
- * TODO: The following vif_count verification is temporary condition.
- * Avoid power mode update if more than one interface is currently
- * active. Remove this condition when FW will support power management
- * on multiple MACs.
- */
- IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n",
- mvm->vif_count);
- if (mvm->vif_count > 1)
+ if (vif->p2p &&
+ !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS))
return 0;
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
@@ -446,7 +474,7 @@ static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
sizeof(cmd), &cmd);
}
-static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
+static int _iwl_mvm_power_update_device(struct iwl_mvm *mvm, bool force_disable)
{
struct iwl_device_power_cmd cmd = {
.flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
@@ -455,7 +483,8 @@ static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
return 0;
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
+ force_disable)
cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -472,6 +501,78 @@ static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
&cmd);
}
+static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
+{
+ return _iwl_mvm_power_update_device(mvm, false);
+}
+
+void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid,
+ ETH_ALEN))
+ memset(mvmvif->uapsd_misbehaving_bssid, 0, ETH_ALEN);
+}
+
+static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ u8 *ap_sta_id = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /* The ap_sta_id is not expected to change during current association
+ * so no explicit protection is needed
+ */
+ if (mvmvif->ap_sta_id == *ap_sta_id)
+ memcpy(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
+ ETH_ALEN);
+}
+
+int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
+ u8 ap_sta_id = le32_to_cpu(notif->sta_id);
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id);
+
+ return 0;
+}
+
+static void iwl_mvm_power_binding_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = _data;
+ int ret;
+
+ mvmvif->pm_prevented = (mvm->bound_vif_cnt <= 1) ? false : true;
+
+ ret = iwl_mvm_power_mac_update_mode(mvm, vif);
+ WARN_ONCE(ret, "Failed to update power parameters on a specific vif\n");
+}
+
+static void _iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool assign)
+{
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ int ret = _iwl_mvm_power_update_device(mvm, assign);
+ mvm->ps_prevented = assign;
+ WARN_ONCE(ret, "Failed to update power device state\n");
+ }
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_binding_iterator,
+ mvm);
+}
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, char *buf,
@@ -494,70 +595,58 @@ static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
le16_to_cpu(cmd.keep_alive_seconds));
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
- pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
- 1 : 0);
- pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
- cmd.skip_dtim_periods);
- if (!(cmd.flags &
- cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
- pos += scnprintf(buf+pos, bufsz-pos,
- "rx_data_timeout = %d\n",
- le32_to_cpu(cmd.rx_data_timeout));
- pos += scnprintf(buf+pos, bufsz-pos,
- "tx_data_timeout = %d\n",
- le32_to_cpu(cmd.tx_data_timeout));
- }
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
- pos += scnprintf(buf+pos, bufsz-pos,
- "lprx_rssi_threshold = %d\n",
- cmd.lprx_rssi_threshold);
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
- pos +=
- scnprintf(buf+pos, bufsz-pos,
- "rx_data_timeout_uapsd = %d\n",
- le32_to_cpu(cmd.rx_data_timeout_uapsd));
- pos +=
- scnprintf(buf+pos, bufsz-pos,
- "tx_data_timeout_uapsd = %d\n",
- le32_to_cpu(cmd.tx_data_timeout_uapsd));
- pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n",
- cmd.qndp_tid);
- pos += scnprintf(buf+pos, bufsz-pos,
- "uapsd_ac_flags = 0x%x\n",
- cmd.uapsd_ac_flags);
- pos += scnprintf(buf+pos, bufsz-pos,
- "uapsd_max_sp = %d\n",
- cmd.uapsd_max_sp);
- pos += scnprintf(buf+pos, bufsz-pos,
- "heavy_tx_thld_packets = %d\n",
- cmd.heavy_tx_thld_packets);
- pos += scnprintf(buf+pos, bufsz-pos,
- "heavy_rx_thld_packets = %d\n",
- cmd.heavy_rx_thld_packets);
- pos += scnprintf(buf+pos, bufsz-pos,
- "heavy_tx_thld_percentage = %d\n",
- cmd.heavy_tx_thld_percentage);
- pos += scnprintf(buf+pos, bufsz-pos,
- "heavy_rx_thld_percentage = %d\n",
- cmd.heavy_rx_thld_percentage);
- pos +=
- scnprintf(buf+pos, bufsz-pos, "snooze_enable = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) ?
- 1 : 0);
- }
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
- pos += scnprintf(buf+pos, bufsz-pos,
- "snooze_interval = %d\n",
- cmd.snooze_interval);
- pos += scnprintf(buf+pos, bufsz-pos,
- "snooze_window = %d\n",
- cmd.snooze_window);
- }
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)))
+ return pos;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? 1 : 0);
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
+ cmd.skip_dtim_periods);
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
+ pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
+ le32_to_cpu(cmd.rx_data_timeout));
+ pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
+ le32_to_cpu(cmd.tx_data_timeout));
}
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "lprx_rssi_threshold = %d\n",
+ cmd.lprx_rssi_threshold);
+
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)))
+ return pos;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout_uapsd = %d\n",
+ le32_to_cpu(cmd.rx_data_timeout_uapsd));
+ pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout_uapsd = %d\n",
+ le32_to_cpu(cmd.tx_data_timeout_uapsd));
+ pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n", cmd.qndp_tid);
+ pos += scnprintf(buf+pos, bufsz-pos, "uapsd_ac_flags = 0x%x\n",
+ cmd.uapsd_ac_flags);
+ pos += scnprintf(buf+pos, bufsz-pos, "uapsd_max_sp = %d\n",
+ cmd.uapsd_max_sp);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_packets = %d\n",
+ cmd.heavy_tx_thld_packets);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_packets = %d\n",
+ cmd.heavy_rx_thld_packets);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_percentage = %d\n",
+ cmd.heavy_tx_thld_percentage);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_percentage = %d\n",
+ cmd.heavy_rx_thld_percentage);
+ pos += scnprintf(buf+pos, bufsz-pos, "uapsd_misbehaving_enable = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK)) ?
+ 1 : 0);
+
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)))
+ return pos;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "snooze_interval = %d\n",
+ cmd.snooze_interval);
+ pos += scnprintf(buf+pos, bufsz-pos, "snooze_window = %d\n",
+ cmd.snooze_window);
+
return pos;
}
@@ -654,6 +743,7 @@ const struct iwl_mvm_power_ops pm_mac_ops = {
.power_update_mode = iwl_mvm_power_mac_update_mode,
.power_update_device_mode = iwl_mvm_power_update_device,
.power_disable = iwl_mvm_power_mac_disable,
+ .power_update_binding = _iwl_mvm_power_update_binding,
#ifdef CONFIG_IWLWIFI_DEBUGFS
.power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
index 2ce79bad5845..ef712ae5bc62 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index 17e2bc827f9a..ce5db6c4ef7e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -217,8 +217,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
} else {
cmd.quotas[idx].quota =
cpu_to_le32(quota * data.n_interfaces[i]);
- cmd.quotas[idx].max_duration =
- cpu_to_le32(IWL_MVM_MAX_QUOTA);
+ cmd.quotas[idx].max_duration = cpu_to_le32(0);
}
idx++;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index a0b4cc8d9c3b..6abf74e1351f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -24,7 +24,6 @@
*
*****************************************************************************/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/mac80211.h>
@@ -42,33 +41,37 @@
#define RS_NAME "iwl-mvm-rs"
-#define NUM_TRY_BEFORE_ANT_TOGGLE 1
-#define IWL_NUMBER_TRY 1
-#define IWL_HT_NUMBER_TRY 3
+#define NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define RS_LEGACY_RETRIES_PER_RATE 1
+#define RS_HT_VHT_RETRIES_PER_RATE 2
+#define RS_HT_VHT_RETRIES_PER_RATE_TW 1
+#define RS_INITIAL_MIMO_NUM_RATES 3
+#define RS_INITIAL_SISO_NUM_RATES 3
+#define RS_INITIAL_LEGACY_NUM_RATES LINK_QUAL_MAX_RETRY_NUM
+#define RS_SECONDARY_LEGACY_NUM_RATES LINK_QUAL_MAX_RETRY_NUM
+#define RS_SECONDARY_SISO_NUM_RATES 3
+#define RS_SECONDARY_SISO_RETRIES 1
#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
-#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
+#define IWL_RATE_MIN_FAILURE_TH 3 /* min failures to calc tpt */
#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
/* max allowed rate miss before sync LQ cmd */
#define IWL_MISSED_RATE_MAX 15
-/* max time to accum history 2 seconds */
-#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
+#define RS_STAY_IN_COLUMN_TIMEOUT (5*HZ)
+
static u8 rs_ht_to_legacy[] = {
- [IWL_RATE_1M_INDEX] = IWL_RATE_6M_INDEX,
- [IWL_RATE_2M_INDEX] = IWL_RATE_6M_INDEX,
- [IWL_RATE_5M_INDEX] = IWL_RATE_6M_INDEX,
- [IWL_RATE_11M_INDEX] = IWL_RATE_6M_INDEX,
- [IWL_RATE_6M_INDEX] = IWL_RATE_6M_INDEX,
- [IWL_RATE_9M_INDEX] = IWL_RATE_6M_INDEX,
- [IWL_RATE_12M_INDEX] = IWL_RATE_9M_INDEX,
- [IWL_RATE_18M_INDEX] = IWL_RATE_12M_INDEX,
- [IWL_RATE_24M_INDEX] = IWL_RATE_18M_INDEX,
- [IWL_RATE_36M_INDEX] = IWL_RATE_24M_INDEX,
- [IWL_RATE_48M_INDEX] = IWL_RATE_36M_INDEX,
- [IWL_RATE_54M_INDEX] = IWL_RATE_48M_INDEX,
- [IWL_RATE_60M_INDEX] = IWL_RATE_54M_INDEX,
+ [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX,
+ [IWL_RATE_MCS_1_INDEX] = IWL_RATE_9M_INDEX,
+ [IWL_RATE_MCS_2_INDEX] = IWL_RATE_12M_INDEX,
+ [IWL_RATE_MCS_3_INDEX] = IWL_RATE_18M_INDEX,
+ [IWL_RATE_MCS_4_INDEX] = IWL_RATE_24M_INDEX,
+ [IWL_RATE_MCS_5_INDEX] = IWL_RATE_36M_INDEX,
+ [IWL_RATE_MCS_6_INDEX] = IWL_RATE_48M_INDEX,
+ [IWL_RATE_MCS_7_INDEX] = IWL_RATE_54M_INDEX,
+ [IWL_RATE_MCS_8_INDEX] = IWL_RATE_54M_INDEX,
+ [IWL_RATE_MCS_9_INDEX] = IWL_RATE_54M_INDEX,
};
static const u8 ant_toggle_lookup[] = {
@@ -126,6 +129,196 @@ static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
IWL_DECLARE_MCS_RATE(9), /* MCS 9 */
};
+enum rs_action {
+ RS_ACTION_STAY = 0,
+ RS_ACTION_DOWNSCALE = -1,
+ RS_ACTION_UPSCALE = 1,
+};
+
+enum rs_column_mode {
+ RS_INVALID = 0,
+ RS_LEGACY,
+ RS_SISO,
+ RS_MIMO2,
+};
+
+#define MAX_NEXT_COLUMNS 5
+#define MAX_COLUMN_CHECKS 3
+
+typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl);
+
+struct rs_tx_column {
+ enum rs_column_mode mode;
+ u8 ant;
+ bool sgi;
+ enum rs_column next_columns[MAX_NEXT_COLUMNS];
+ allow_column_func_t checks[MAX_COLUMN_CHECKS];
+};
+
+static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ if (!sta->ht_cap.ht_supported)
+ return false;
+
+ if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ return false;
+
+ if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 2)
+ return false;
+
+ if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+ return false;
+
+ return true;
+}
+
+static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ if (!sta->ht_cap.ht_supported)
+ return false;
+
+ return true;
+}
+
+static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ struct rs_rate *rate = &tbl->rate;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+
+ if (is_ht20(rate) && (ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_20))
+ return true;
+ if (is_ht40(rate) && (ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_40))
+ return true;
+ if (is_ht80(rate) && (vht_cap->cap &
+ IEEE80211_VHT_CAP_SHORT_GI_80))
+ return true;
+
+ return false;
+}
+
+static const struct rs_tx_column rs_tx_columns[] = {
+ [RS_COLUMN_LEGACY_ANT_A] = {
+ .mode = RS_LEGACY,
+ .ant = ANT_A,
+ .next_columns = {
+ RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ },
+ [RS_COLUMN_LEGACY_ANT_B] = {
+ .mode = RS_LEGACY,
+ .ant = ANT_B,
+ .next_columns = {
+ RS_COLUMN_LEGACY_ANT_A,
+ RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ },
+ [RS_COLUMN_SISO_ANT_A] = {
+ .mode = RS_SISO,
+ .ant = ANT_A,
+ .next_columns = {
+ RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_SISO_ANT_A_SGI,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_siso_allow,
+ },
+ },
+ [RS_COLUMN_SISO_ANT_B] = {
+ .mode = RS_SISO,
+ .ant = ANT_B,
+ .next_columns = {
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_SISO_ANT_B_SGI,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_siso_allow,
+ },
+ },
+ [RS_COLUMN_SISO_ANT_A_SGI] = {
+ .mode = RS_SISO,
+ .ant = ANT_A,
+ .sgi = true,
+ .next_columns = {
+ RS_COLUMN_SISO_ANT_B_SGI,
+ RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_siso_allow,
+ rs_sgi_allow,
+ },
+ },
+ [RS_COLUMN_SISO_ANT_B_SGI] = {
+ .mode = RS_SISO,
+ .ant = ANT_B,
+ .sgi = true,
+ .next_columns = {
+ RS_COLUMN_SISO_ANT_A_SGI,
+ RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_siso_allow,
+ rs_sgi_allow,
+ },
+ },
+ [RS_COLUMN_MIMO2] = {
+ .mode = RS_MIMO2,
+ .ant = ANT_AB,
+ .next_columns = {
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_mimo_allow,
+ },
+ },
+ [RS_COLUMN_MIMO2_SGI] = {
+ .mode = RS_MIMO2,
+ .ant = ANT_AB,
+ .sgi = true,
+ .next_columns = {
+ RS_COLUMN_SISO_ANT_A_SGI,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_mimo_allow,
+ rs_sgi_allow,
+ },
+ },
+};
+
static inline u8 rs_extract_rate(u32 rate_n_flags)
{
/* also works for HT because bits 7:6 are zero there */
@@ -163,28 +356,19 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
return idx;
}
- return -1;
+ return IWL_RATE_INVALID;
}
static void rs_rate_scale_perform(struct iwl_mvm *mvm,
struct sk_buff *skb,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta);
-static void rs_fill_link_cmd(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta,
- struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
+static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ const struct rs_rate *initial_rate);
static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags);
-#else
-static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags)
-{}
-#endif
-
/**
* The following tables contain the expected throughput metrics for all rates
*
@@ -264,6 +448,52 @@ static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
#define MCS_INDEX_PER_STREAM (8)
+static const char *rs_pretty_ant(u8 ant)
+{
+ static const char * const ant_name[] = {
+ [ANT_NONE] = "None",
+ [ANT_A] = "A",
+ [ANT_B] = "B",
+ [ANT_AB] = "AB",
+ [ANT_C] = "C",
+ [ANT_AC] = "AC",
+ [ANT_BC] = "BC",
+ [ANT_ABC] = "ABC",
+ };
+
+ if (ant > ANT_ABC)
+ return "UNKNOWN";
+
+ return ant_name[ant];
+}
+
+static const char *rs_pretty_lq_type(enum iwl_table_type type)
+{
+ static const char * const lq_types[] = {
+ [LQ_NONE] = "NONE",
+ [LQ_LEGACY_A] = "LEGACY_A",
+ [LQ_LEGACY_G] = "LEGACY_G",
+ [LQ_HT_SISO] = "HT SISO",
+ [LQ_HT_MIMO2] = "HT MIMO",
+ [LQ_VHT_SISO] = "VHT SISO",
+ [LQ_VHT_MIMO2] = "VHT MIMO",
+ };
+
+ if (type < LQ_NONE || type >= LQ_MAX)
+ return "UNKNOWN";
+
+ return lq_types[type];
+}
+
+static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate,
+ const char *prefix)
+{
+ IWL_DEBUG_RATE(mvm, "%s: (%s: %d) ANT: %s BW: %d SGI: %d\n",
+ prefix, rs_pretty_lq_type(rate->type),
+ rate->index, rs_pretty_ant(rate->ant),
+ rate->bw, rate->sgi);
+}
+
static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
{
window->data = 0;
@@ -271,7 +501,6 @@ static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
window->success_ratio = IWL_INVALID_VALUE;
window->counter = 0;
window->average_tpt = IWL_INVALID_VALUE;
- window->stamp = 0;
}
static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
@@ -279,30 +508,6 @@ static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
return (ant_type & valid_antenna) == ant_type;
}
-#ifdef CONFIG_MAC80211_DEBUGFS
-/**
- * Program the device to use fixed rate for frame transmit
- * This is for debugging/testing only
- * once the device start use fixed rate, we need to reload the module
- * to being back the normal operation.
- */
-static void rs_program_fix_rate(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta)
-{
- lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
- lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
- lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
-
- IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
- lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
-
- if (lq_sta->dbg_fixed_rate) {
- rs_fill_link_cmd(NULL, NULL, lq_sta, lq_sta->dbg_fixed_rate);
- iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
- }
-}
-#endif
-
static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_data, u8 tid,
struct ieee80211_sta *sta)
@@ -428,192 +633,168 @@ static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
else
window->average_tpt = IWL_INVALID_VALUE;
- /* Tag this window as having been updated */
- window->stamp = jiffies;
-
return 0;
}
-/*
- * Fill uCode API rate_n_flags field, based on "search" or "active" table.
- */
-/* FIXME:RS:remove this function and put the flags statically in the table */
-static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
- struct iwl_scale_tbl_info *tbl, int index)
+/* Convert rs_rate object into ucode rate bitmask */
+static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
+ struct rs_rate *rate)
{
- u32 rate_n_flags = 0;
+ u32 ucode_rate = 0;
+ int index = rate->index;
- rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
+ ucode_rate |= ((rate->ant << RATE_MCS_ANT_POS) &
RATE_MCS_ANT_ABC_MSK);
- if (is_legacy(tbl->lq_type)) {
- rate_n_flags |= iwl_rates[index].plcp;
+ if (is_legacy(rate)) {
+ ucode_rate |= iwl_rates[index].plcp;
if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
- rate_n_flags |= RATE_MCS_CCK_MSK;
- return rate_n_flags;
+ ucode_rate |= RATE_MCS_CCK_MSK;
+ return ucode_rate;
}
- if (is_ht(tbl->lq_type)) {
+ if (is_ht(rate)) {
if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) {
IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
index = IWL_LAST_HT_RATE;
}
- rate_n_flags |= RATE_MCS_HT_MSK;
+ ucode_rate |= RATE_MCS_HT_MSK;
- if (is_ht_siso(tbl->lq_type))
- rate_n_flags |= iwl_rates[index].plcp_ht_siso;
- else if (is_ht_mimo2(tbl->lq_type))
- rate_n_flags |= iwl_rates[index].plcp_ht_mimo2;
+ if (is_ht_siso(rate))
+ ucode_rate |= iwl_rates[index].plcp_ht_siso;
+ else if (is_ht_mimo2(rate))
+ ucode_rate |= iwl_rates[index].plcp_ht_mimo2;
else
WARN_ON_ONCE(1);
- } else if (is_vht(tbl->lq_type)) {
+ } else if (is_vht(rate)) {
if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) {
IWL_ERR(mvm, "Invalid VHT rate index %d\n", index);
index = IWL_LAST_VHT_RATE;
}
- rate_n_flags |= RATE_MCS_VHT_MSK;
- if (is_vht_siso(tbl->lq_type))
- rate_n_flags |= iwl_rates[index].plcp_vht_siso;
- else if (is_vht_mimo2(tbl->lq_type))
- rate_n_flags |= iwl_rates[index].plcp_vht_mimo2;
+ ucode_rate |= RATE_MCS_VHT_MSK;
+ if (is_vht_siso(rate))
+ ucode_rate |= iwl_rates[index].plcp_vht_siso;
+ else if (is_vht_mimo2(rate))
+ ucode_rate |= iwl_rates[index].plcp_vht_mimo2;
else
WARN_ON_ONCE(1);
} else {
- IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
+ IWL_ERR(mvm, "Invalid rate->type %d\n", rate->type);
}
- rate_n_flags |= tbl->bw;
- if (tbl->is_SGI)
- rate_n_flags |= RATE_MCS_SGI_MSK;
+ ucode_rate |= rate->bw;
+ if (rate->sgi)
+ ucode_rate |= RATE_MCS_SGI_MSK;
- return rate_n_flags;
+ return ucode_rate;
}
-/*
- * Interpret uCode API's rate_n_flags format,
- * fill "search" or "active" tx mode table.
- */
-static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
- enum ieee80211_band band,
- struct iwl_scale_tbl_info *tbl,
- int *rate_idx)
+/* Convert a ucode rate into an rs_rate object */
+static int rs_rate_from_ucode_rate(const u32 ucode_rate,
+ enum ieee80211_band band,
+ struct rs_rate *rate)
{
- u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
- u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
+ u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK;
+ u8 num_of_ant = get_num_of_ant_from_rate(ucode_rate);
u8 nss;
- memset(tbl, 0, offsetof(struct iwl_scale_tbl_info, win));
- *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
+ memset(rate, 0, sizeof(*rate));
+ rate->index = iwl_hwrate_to_plcp_idx(ucode_rate);
- if (*rate_idx == IWL_RATE_INVALID) {
- *rate_idx = -1;
+ if (rate->index == IWL_RATE_INVALID)
return -EINVAL;
- }
- tbl->is_SGI = 0; /* default legacy setup */
- tbl->bw = 0;
- tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
- tbl->lq_type = LQ_NONE;
- tbl->max_search = IWL_MAX_SEARCH;
+
+ rate->ant = (ant_msk >> RATE_MCS_ANT_POS);
/* Legacy */
- if (!(rate_n_flags & RATE_MCS_HT_MSK) &&
- !(rate_n_flags & RATE_MCS_VHT_MSK)) {
+ if (!(ucode_rate & RATE_MCS_HT_MSK) &&
+ !(ucode_rate & RATE_MCS_VHT_MSK)) {
if (num_of_ant == 1) {
if (band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_LEGACY_A;
+ rate->type = LQ_LEGACY_A;
else
- tbl->lq_type = LQ_LEGACY_G;
+ rate->type = LQ_LEGACY_G;
}
return 0;
}
/* HT or VHT */
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- tbl->is_SGI = 1;
+ if (ucode_rate & RATE_MCS_SGI_MSK)
+ rate->sgi = true;
- tbl->bw = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
+ rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK;
- if (rate_n_flags & RATE_MCS_HT_MSK) {
- nss = ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >>
+ if (ucode_rate & RATE_MCS_HT_MSK) {
+ nss = ((ucode_rate & RATE_HT_MCS_NSS_MSK) >>
RATE_HT_MCS_NSS_POS) + 1;
if (nss == 1) {
- tbl->lq_type = LQ_HT_SISO;
+ rate->type = LQ_HT_SISO;
WARN_ON_ONCE(num_of_ant != 1);
} else if (nss == 2) {
- tbl->lq_type = LQ_HT_MIMO2;
+ rate->type = LQ_HT_MIMO2;
WARN_ON_ONCE(num_of_ant != 2);
} else {
WARN_ON_ONCE(1);
}
- } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
- nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ } else if (ucode_rate & RATE_MCS_VHT_MSK) {
+ nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
if (nss == 1) {
- tbl->lq_type = LQ_VHT_SISO;
+ rate->type = LQ_VHT_SISO;
WARN_ON_ONCE(num_of_ant != 1);
} else if (nss == 2) {
- tbl->lq_type = LQ_VHT_MIMO2;
+ rate->type = LQ_VHT_MIMO2;
WARN_ON_ONCE(num_of_ant != 2);
} else {
WARN_ON_ONCE(1);
}
}
- WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_160);
- WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_80 &&
- !is_vht(tbl->lq_type));
+ WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_160);
+ WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
+ !is_vht(rate));
return 0;
}
/* switch to another antenna/antennas and return 1 */
/* if no other valid antenna found, return 0 */
-static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
- struct iwl_scale_tbl_info *tbl)
+static int rs_toggle_antenna(u32 valid_ant, struct rs_rate *rate)
{
u8 new_ant_type;
- if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+ if (!rate->ant || rate->ant > ANT_ABC)
return 0;
- if (!rs_is_valid_ant(valid_ant, tbl->ant_type))
+ if (!rs_is_valid_ant(valid_ant, rate->ant))
return 0;
- new_ant_type = ant_toggle_lookup[tbl->ant_type];
+ new_ant_type = ant_toggle_lookup[rate->ant];
- while ((new_ant_type != tbl->ant_type) &&
+ while ((new_ant_type != rate->ant) &&
!rs_is_valid_ant(valid_ant, new_ant_type))
new_ant_type = ant_toggle_lookup[new_ant_type];
- if (new_ant_type == tbl->ant_type)
+ if (new_ant_type == rate->ant)
return 0;
- tbl->ant_type = new_ant_type;
- *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
- *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+ rate->ant = new_ant_type;
+
return 1;
}
-/**
- * rs_get_supported_rates - get the available rates
- *
- * if management frame or broadcast frame only return
- * basic available rates.
- *
- */
static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
- struct ieee80211_hdr *hdr,
- enum iwl_table_type rate_type)
+ struct rs_rate *rate)
{
- if (is_legacy(rate_type))
+ if (is_legacy(rate))
return lq_sta->active_legacy_rate;
- else if (is_siso(rate_type))
+ else if (is_siso(rate))
return lq_sta->active_siso_rate;
- else if (is_mimo2(rate_type))
+ else if (is_mimo2(rate))
return lq_sta->active_mimo2_rate;
WARN_ON_ONCE(1);
@@ -628,7 +809,7 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
/* 802.11A or ht walks to the next literal adjacent rate in
* the rate table */
- if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+ if (is_type_a_band(rate_type) || !is_type_legacy(rate_type)) {
int i;
u32 mask;
@@ -676,73 +857,80 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
return (high << 8) | low;
}
-static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl,
- u8 scale_index, u8 ht_possible)
+static inline bool rs_rate_supported(struct iwl_lq_sta *lq_sta,
+ struct rs_rate *rate)
{
- s32 low;
- u16 rate_mask;
+ return BIT(rate->index) & rs_get_supported_rates(lq_sta, rate);
+}
+
+/* Get the next supported lower rate in the current column.
+ * Return true if bottom rate in the current column was reached
+ */
+static bool rs_get_lower_rate_in_column(struct iwl_lq_sta *lq_sta,
+ struct rs_rate *rate)
+{
+ u8 low;
u16 high_low;
- u8 switch_to_legacy = 0;
+ u16 rate_mask;
struct iwl_mvm *mvm = lq_sta->drv;
- /* check if we need to switch from HT to legacy rates.
- * assumption is that mandatory rates (1Mbps or 6Mbps)
- * are always supported (spec demand) */
- if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
- switch_to_legacy = 1;
- scale_index = rs_ht_to_legacy[scale_index];
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_LEGACY_A;
- else
- tbl->lq_type = LQ_LEGACY_G;
+ rate_mask = rs_get_supported_rates(lq_sta, rate);
+ high_low = rs_get_adjacent_rate(mvm, rate->index, rate_mask,
+ rate->type);
+ low = high_low & 0xff;
- if (num_of_ant(tbl->ant_type) > 1)
- tbl->ant_type =
- first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
+ /* Bottom rate of column reached */
+ if (low == IWL_RATE_INVALID)
+ return true;
- tbl->bw = 0;
- tbl->is_SGI = 0;
- tbl->max_search = IWL_MAX_SEARCH;
- }
+ rate->index = low;
+ return false;
+}
- rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+/* Get the next rate to use following a column downgrade */
+static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
+ struct rs_rate *rate)
+{
+ struct iwl_mvm *mvm = lq_sta->drv;
- /* Mask with station rate restriction */
- if (is_legacy(tbl->lq_type)) {
- /* supp_rates has no CCK bits in A mode */
+ if (is_legacy(rate)) {
+ /* No column to downgrade from Legacy */
+ return;
+ } else if (is_siso(rate)) {
+ /* Downgrade to Legacy if we were in SISO */
if (lq_sta->band == IEEE80211_BAND_5GHZ)
- rate_mask = (u16)(rate_mask &
- (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+ rate->type = LQ_LEGACY_A;
else
- rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
- }
+ rate->type = LQ_LEGACY_G;
- /* If we switched from HT to legacy, check current rate */
- if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
- low = scale_index;
- goto out;
+ rate->bw = RATE_MCS_CHAN_WIDTH_20;
+
+ WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX &&
+ rate->index > IWL_RATE_MCS_9_INDEX);
+
+ rate->index = rs_ht_to_legacy[rate->index];
+ } else {
+ /* Downgrade to SISO with same MCS if in MIMO */
+ rate->type = is_vht_mimo2(rate) ?
+ LQ_VHT_SISO : LQ_HT_SISO;
}
- high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask,
- tbl->lq_type);
- low = high_low & 0xff;
- if (low == IWL_RATE_INVALID)
- low = scale_index;
+ if (num_of_ant(rate->ant) > 1)
+ rate->ant = first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
-out:
- return rate_n_flags_from_tbl(lq_sta->drv, tbl, low);
+ /* Relevant in both switching to SISO or Legacy */
+ rate->sgi = false;
+
+ if (!rs_rate_supported(lq_sta, rate))
+ rs_get_lower_rate_in_column(lq_sta, rate);
}
-/*
- * Simple function to compare two rate scale table types
- */
-static bool table_type_matches(struct iwl_scale_tbl_info *a,
- struct iwl_scale_tbl_info *b)
+/* Simple function to compare two rate scale table types */
+static inline bool rs_rate_match(struct rs_rate *a,
+ struct rs_rate *b)
{
- return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
- (a->is_SGI == b->is_SGI);
+ return (a->type == b->type) && (a->ant == b->ant) && (a->sgi == b->sgi);
}
static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags)
@@ -766,7 +954,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
{
int legacy_success;
int retries;
- int rs_index, mac_index, i;
+ int mac_index, i;
struct iwl_lq_sta *lq_sta = priv_sta;
struct iwl_lq_cmd *table;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -774,13 +962,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
enum mac80211_rate_control_flags mac_flags;
- u32 tx_rate;
- struct iwl_scale_tbl_info tbl_type;
+ u32 ucode_rate;
+ struct rs_rate rate;
struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
- IWL_DEBUG_RATE_LIMIT(mvm,
- "get frame ack response, update rate scale window\n");
-
/* Treat uninitialized rate scaling data same as non-existing. */
if (!lq_sta) {
IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
@@ -808,10 +993,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
* to a new "search" mode (which might become the new "active" mode).
*/
table = &lq_sta->lq;
- tx_rate = le32_to_cpu(table->rs_table[0]);
- rs_get_tbl_info_from_mcs(tx_rate, info->band, &tbl_type, &rs_index);
+ ucode_rate = le32_to_cpu(table->rs_table[0]);
+ rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
if (info->band == IEEE80211_BAND_5GHZ)
- rs_index -= IWL_FIRST_OFDM_RATE;
+ rate.index -= IWL_FIRST_OFDM_RATE;
mac_flags = info->status.rates[0].flags;
mac_index = info->status.rates[0].idx;
/* For HT packets, map MCS to PLCP */
@@ -834,19 +1019,19 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
/* Here we actually compare this rate to the latest LQ command */
if ((mac_index < 0) ||
- (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
- (tbl_type.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
- (tbl_type.ant_type != info->status.antenna) ||
- (!!(tx_rate & RATE_MCS_HT_MSK) !=
+ (rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
+ (rate.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
+ (rate.ant != info->status.antenna) ||
+ (!!(ucode_rate & RATE_MCS_HT_MSK) !=
!!(mac_flags & IEEE80211_TX_RC_MCS)) ||
- (!!(tx_rate & RATE_MCS_VHT_MSK) !=
+ (!!(ucode_rate & RATE_MCS_VHT_MSK) !=
!!(mac_flags & IEEE80211_TX_RC_VHT_MCS)) ||
- (!!(tx_rate & RATE_HT_MCS_GF_MSK) !=
+ (!!(ucode_rate & RATE_HT_MCS_GF_MSK) !=
!!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
- (rs_index != mac_index)) {
+ (rate.index != mac_index)) {
IWL_DEBUG_RATE(mvm,
"initial rate %d does not match %d (0x%x)\n",
- mac_index, rs_index, tx_rate);
+ mac_index, rate.index, ucode_rate);
/*
* Since rates mis-match, the last LQ command may have failed.
* After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
@@ -855,7 +1040,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
lq_sta->missed_rate_counter++;
if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
lq_sta->missed_rate_counter = 0;
- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+ IWL_DEBUG_RATE(mvm,
+ "Too many rates mismatch. Send sync LQ. rs_state %d\n",
+ lq_sta->rs_state);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
}
/* Regardless, ignore this status info for outdated rate */
return;
@@ -864,28 +1052,23 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
lq_sta->missed_rate_counter = 0;
/* Figure out if rate scale algorithm is in active or search table */
- if (table_type_matches(&tbl_type,
- &(lq_sta->lq_info[lq_sta->active_tbl]))) {
+ if (rs_rate_match(&rate,
+ &(lq_sta->lq_info[lq_sta->active_tbl].rate))) {
curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- } else if (table_type_matches(
- &tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+ } else if (rs_rate_match(&rate,
+ &lq_sta->lq_info[1 - lq_sta->active_tbl].rate)) {
curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
} else {
IWL_DEBUG_RATE(mvm,
"Neither active nor search matches tx rate\n");
tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- IWL_DEBUG_RATE(mvm, "active- lq:%x, ant:%x, SGI:%d\n",
- tmp_tbl->lq_type, tmp_tbl->ant_type,
- tmp_tbl->is_SGI);
+ rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- IWL_DEBUG_RATE(mvm, "search- lq:%x, ant:%x, SGI:%d\n",
- tmp_tbl->lq_type, tmp_tbl->ant_type,
- tmp_tbl->is_SGI);
- IWL_DEBUG_RATE(mvm, "actual- lq:%x, ant:%x, SGI:%d\n",
- tbl_type.lq_type, tbl_type.ant_type,
- tbl_type.is_SGI);
+ rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
+ rs_dump_rate(mvm, &rate, "ACTUAL");
+
/*
* no matching table found, let's by-pass the data collection
* and continue to perform rate scale to find the rate table
@@ -902,15 +1085,14 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
* first index into rate scale table.
*/
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
- tx_rate = le32_to_cpu(table->rs_table[0]);
- rs_get_tbl_info_from_mcs(tx_rate, info->band, &tbl_type,
- &rs_index);
- rs_collect_tx_data(curr_tbl, rs_index,
+ ucode_rate = le32_to_cpu(table->rs_table[0]);
+ rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
+ rs_collect_tx_data(curr_tbl, rate.index,
info->status.ampdu_len,
info->status.ampdu_ack_len);
/* Update success/fail counts if not searching for new mode */
- if (lq_sta->stay_in_tbl) {
+ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
lq_sta->total_success += info->status.ampdu_ack_len;
lq_sta->total_failed += (info->status.ampdu_len -
info->status.ampdu_ack_len);
@@ -927,31 +1109,31 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
/* Collect data for each rate used during failed TX attempts */
for (i = 0; i <= retries; ++i) {
- tx_rate = le32_to_cpu(table->rs_table[i]);
- rs_get_tbl_info_from_mcs(tx_rate, info->band,
- &tbl_type, &rs_index);
+ ucode_rate = le32_to_cpu(table->rs_table[i]);
+ rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
/*
* Only collect stats if retried rate is in the same RS
* table as active/search.
*/
- if (table_type_matches(&tbl_type, curr_tbl))
+ if (rs_rate_match(&rate, &curr_tbl->rate))
tmp_tbl = curr_tbl;
- else if (table_type_matches(&tbl_type, other_tbl))
+ else if (rs_rate_match(&rate, &other_tbl->rate))
tmp_tbl = other_tbl;
else
continue;
- rs_collect_tx_data(tmp_tbl, rs_index, 1,
+
+ rs_collect_tx_data(tmp_tbl, rate.index, 1,
i < retries ? 0 : legacy_success);
}
/* Update success/fail counts if not searching for new mode */
- if (lq_sta->stay_in_tbl) {
+ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
lq_sta->total_success += legacy_success;
lq_sta->total_failed += retries + (1 - legacy_success);
}
}
/* The last TX rate is cached in lq_sta; it's set in if/else above */
- lq_sta->last_rate_n_flags = tx_rate;
+ lq_sta->last_rate_n_flags = ucode_rate;
done:
/* See if there's a better rate or modulation mode to try. */
if (sta && sta->supp_rates[sband->band])
@@ -969,8 +1151,8 @@ done:
static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
struct iwl_lq_sta *lq_sta)
{
- IWL_DEBUG_RATE(mvm, "we are staying in the same table\n");
- lq_sta->stay_in_tbl = 1; /* only place this gets set */
+ IWL_DEBUG_RATE(mvm, "Moving to RS_STATE_STAY_IN_COLUMN\n");
+ lq_sta->rs_state = RS_STATE_STAY_IN_COLUMN;
if (is_legacy) {
lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
@@ -984,37 +1166,31 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
lq_sta->total_failed = 0;
lq_sta->total_success = 0;
lq_sta->flush_timer = jiffies;
- lq_sta->action_counter = 0;
+ lq_sta->visited_columns = 0;
}
-/*
- * Find correct throughput table for given mode of modulation
- */
-static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl)
+static s32 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+ const struct rs_tx_column *column,
+ u32 bw)
{
/* Used to choose among HT tables */
s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
- /* Check for invalid LQ type */
- if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_ht(tbl->lq_type) &&
- !(is_vht(tbl->lq_type)))) {
- tbl->expected_tpt = expected_tpt_legacy;
- return;
- }
+ if (WARN_ON_ONCE(column->mode != RS_LEGACY &&
+ column->mode != RS_SISO &&
+ column->mode != RS_MIMO2))
+ return expected_tpt_legacy;
/* Legacy rates have only one table */
- if (is_legacy(tbl->lq_type)) {
- tbl->expected_tpt = expected_tpt_legacy;
- return;
- }
+ if (column->mode == RS_LEGACY)
+ return expected_tpt_legacy;
ht_tbl_pointer = expected_tpt_mimo2_20MHz;
/* Choose among many HT tables depending on number of streams
* (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation
* status */
- if (is_siso(tbl->lq_type)) {
- switch (tbl->bw) {
+ if (column->mode == RS_SISO) {
+ switch (bw) {
case RATE_MCS_CHAN_WIDTH_20:
ht_tbl_pointer = expected_tpt_siso_20MHz;
break;
@@ -1027,8 +1203,8 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
default:
WARN_ON_ONCE(1);
}
- } else if (is_mimo2(tbl->lq_type)) {
- switch (tbl->bw) {
+ } else if (column->mode == RS_MIMO2) {
+ switch (bw) {
case RATE_MCS_CHAN_WIDTH_20:
ht_tbl_pointer = expected_tpt_mimo2_20MHz;
break;
@@ -1045,14 +1221,23 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
WARN_ON_ONCE(1);
}
- if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
- tbl->expected_tpt = ht_tbl_pointer[0];
- else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
- tbl->expected_tpt = ht_tbl_pointer[1];
- else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
- tbl->expected_tpt = ht_tbl_pointer[2];
+ if (!column->sgi && !lq_sta->is_agg) /* Normal */
+ return ht_tbl_pointer[0];
+ else if (column->sgi && !lq_sta->is_agg) /* SGI */
+ return ht_tbl_pointer[1];
+ else if (!column->sgi && lq_sta->is_agg) /* AGG */
+ return ht_tbl_pointer[2];
else /* AGG+SGI */
- tbl->expected_tpt = ht_tbl_pointer[3];
+ return ht_tbl_pointer[3];
+}
+
+static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ struct rs_rate *rate = &tbl->rate;
+ const struct rs_tx_column *column = &rs_tx_columns[tbl->column];
+
+ tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
}
/*
@@ -1089,7 +1274,7 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
while (1) {
high_low = rs_get_adjacent_rate(mvm, rate, rate_mask,
- tbl->lq_type);
+ tbl->rate.type);
low = high_low & 0xff;
high = (high_low >> 8) & 0xff;
@@ -1110,7 +1295,7 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
* "active" throughput (under perfect conditions).
*/
if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
- ((active_sr > IWL_RATE_DECREASE_TH) &&
+ ((active_sr > RS_SR_FORCE_DECREASE) &&
(active_sr <= IWL_RATE_HIGH_TH) &&
(tpt_tbl[rate] <= active_tpt))) ||
((active_sr >= IWL_RATE_SCALE_SWITCH) &&
@@ -1157,417 +1342,14 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
return new_rate;
}
-/* Move to the next action and wrap around to the first action in case
- * we're at the last action. Assumes actions start at 0.
- */
-static inline void rs_move_next_action(struct iwl_scale_tbl_info *tbl,
- u8 last_action)
-{
- BUILD_BUG_ON(IWL_LEGACY_FIRST_ACTION != 0);
- BUILD_BUG_ON(IWL_SISO_FIRST_ACTION != 0);
- BUILD_BUG_ON(IWL_MIMO2_FIRST_ACTION != 0);
-
- tbl->action = (tbl->action + 1) % (last_action + 1);
-}
-
-static void rs_set_bw_from_sta(struct iwl_scale_tbl_info *tbl,
- struct ieee80211_sta *sta)
+static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
{
if (sta->bandwidth >= IEEE80211_STA_RX_BW_80)
- tbl->bw = RATE_MCS_CHAN_WIDTH_80;
+ return RATE_MCS_CHAN_WIDTH_80;
else if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
- tbl->bw = RATE_MCS_CHAN_WIDTH_40;
- else
- tbl->bw = RATE_MCS_CHAN_WIDTH_20;
-}
-
-static bool rs_sgi_allowed(struct iwl_scale_tbl_info *tbl,
- struct ieee80211_sta *sta)
-{
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
-
- if (is_ht20(tbl) && (ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- return true;
- if (is_ht40(tbl) && (ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
- return true;
- if (is_ht80(tbl) && (vht_cap->cap &
- IEEE80211_VHT_CAP_SHORT_GI_80))
- return true;
-
- return false;
-}
-
-/*
- * Set up search table for MIMO2
- */
-static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl, int index)
-{
- u16 rate_mask;
- s32 rate;
-
- if (!sta->ht_cap.ht_supported)
- return -1;
-
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
- return -1;
-
- /* Need both Tx chains/antennas to support MIMO */
- if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 2)
- return -1;
-
- IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO2\n");
-
- tbl->lq_type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
- tbl->action = 0;
- tbl->max_search = IWL_MAX_SEARCH;
- rate_mask = lq_sta->active_mimo2_rate;
-
- rs_set_bw_from_sta(tbl, sta);
- rs_set_expected_tpt_table(lq_sta, tbl);
-
- rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
-
- IWL_DEBUG_RATE(mvm, "LQ: MIMO2 best rate %d mask %X\n",
- rate, rate_mask);
- if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
- IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n",
- rate, rate_mask);
- return -1;
- }
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
-
- IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
- tbl->current_rate);
- return 0;
-}
-
-/*
- * Set up search table for SISO
- */
-static int rs_switch_to_siso(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl, int index)
-{
- u16 rate_mask;
- s32 rate;
-
- if (!sta->ht_cap.ht_supported)
- return -1;
-
- IWL_DEBUG_RATE(mvm, "LQ: try to switch to SISO\n");
-
- tbl->lq_type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
- tbl->action = 0;
- tbl->max_search = IWL_MAX_SEARCH;
- rate_mask = lq_sta->active_siso_rate;
-
- rs_set_bw_from_sta(tbl, sta);
- rs_set_expected_tpt_table(lq_sta, tbl);
- rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
-
- IWL_DEBUG_RATE(mvm, "LQ: get best rate %d mask %X\n", rate, rate_mask);
- if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
- IWL_DEBUG_RATE(mvm,
- "can not switch with index %d rate mask %x\n",
- rate, rate_mask);
- return -1;
- }
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
- IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
- tbl->current_rate);
- return 0;
-}
-
-/*
- * Try to switch to new modulation mode from legacy
- */
-static int rs_move_legacy_other(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_sta *sta,
- int index)
-{
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
- u8 tx_chains_num = num_of_ant(valid_tx_ant);
- int ret;
- u8 update_search_tbl_counter = 0;
-
- start_action = tbl->action;
- while (1) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_LEGACY_SWITCH_ANTENNA:
- IWL_DEBUG_RATE(mvm, "LQ: Legacy toggle Antenna\n");
-
- if (tx_chains_num <= 1)
- break;
-
- /* Don't change antenna if success has been great */
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- /* Set up search table to try other antenna */
- memcpy(search_tbl, tbl, sz);
-
- if (rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate,
- search_tbl)) {
- update_search_tbl_counter = 1;
- rs_set_expected_tpt_table(lq_sta, search_tbl);
- goto out;
- }
- break;
- case IWL_LEGACY_SWITCH_SISO:
- IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to SISO\n");
-
- /* Set up search table to try SISO */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
- ret = rs_switch_to_siso(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret) {
- lq_sta->action_counter = 0;
- goto out;
- }
-
- break;
- case IWL_LEGACY_SWITCH_MIMO2:
- IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO2\n");
-
- /* Set up search table to try MIMO */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
-
- search_tbl->ant_type = ANT_AB;
-
- if (!rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret) {
- lq_sta->action_counter = 0;
- goto out;
- }
- break;
- default:
- WARN_ON_ONCE(1);
- }
- rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
-
- if (tbl->action == start_action)
- break;
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
-
-out:
- lq_sta->search_better_tbl = 1;
- rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
- return 0;
-}
-
-/*
- * Try to switch to new modulation mode from SISO
- */
-static int rs_move_siso_to_other(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_sta *sta, int index)
-{
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
- u8 tx_chains_num = num_of_ant(valid_tx_ant);
- u8 update_search_tbl_counter = 0;
- int ret;
-
- if (tbl->action == IWL_SISO_SWITCH_MIMO2 &&
- !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
- tbl->action = IWL_SISO_SWITCH_ANTENNA;
-
- start_action = tbl->action;
- while (1) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_SISO_SWITCH_ANTENNA:
- IWL_DEBUG_RATE(mvm, "LQ: SISO toggle Antenna\n");
- if (tx_chains_num <= 1)
- break;
-
- if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
- BT_MBOX_MSG(&mvm->last_bt_notif, 3,
- TRAFFIC_LOAD) == 0)
- break;
-
- memcpy(search_tbl, tbl, sz);
- if (rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate,
- search_tbl)) {
- update_search_tbl_counter = 1;
- goto out;
- }
- break;
- case IWL_SISO_SWITCH_MIMO2:
- IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO2\n");
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
-
- search_tbl->ant_type = ANT_AB;
-
- if (!rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret)
- goto out;
- break;
- case IWL_SISO_SWITCH_GI:
- if (!rs_sgi_allowed(tbl, sta))
- break;
-
- IWL_DEBUG_RATE(mvm, "LQ: SISO toggle SGI/NGI\n");
-
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = !tbl->is_SGI;
- rs_set_expected_tpt_table(lq_sta, search_tbl);
- if (tbl->is_SGI) {
- s32 tpt = lq_sta->last_tpt / 100;
- if (tpt >= search_tbl->expected_tpt[index])
- break;
- }
- search_tbl->current_rate =
- rate_n_flags_from_tbl(mvm, search_tbl, index);
- update_search_tbl_counter = 1;
- goto out;
- default:
- WARN_ON_ONCE(1);
- }
- rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
-
- if (tbl->action == start_action)
- break;
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
-
- out:
- lq_sta->search_better_tbl = 1;
- rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
-
- return 0;
-}
-
-/*
- * Try to switch to new modulation mode from MIMO2
- */
-static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_sta *sta, int index)
-{
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
- u8 update_search_tbl_counter = 0;
- int ret;
-
- start_action = tbl->action;
- while (1) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_MIMO2_SWITCH_SISO_A:
- case IWL_MIMO2_SWITCH_SISO_B:
- IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
-
- /* Set up new search table for SISO */
- memcpy(search_tbl, tbl, sz);
-
- if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
- search_tbl->ant_type = ANT_A;
- else /* tbl->action == IWL_MIMO2_SWITCH_SISO_B */
- search_tbl->ant_type = ANT_B;
-
- if (!rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = rs_switch_to_siso(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret)
- goto out;
-
- break;
-
- case IWL_MIMO2_SWITCH_GI:
- if (!rs_sgi_allowed(tbl, sta))
- break;
-
- IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle SGI/NGI\n");
-
- /* Set up new search table for MIMO2 */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = !tbl->is_SGI;
- rs_set_expected_tpt_table(lq_sta, search_tbl);
- /*
- * If active table already uses the fastest possible
- * modulation (dual stream with short guard interval),
- * and it's working well, there's no need to look
- * for a better type of modulation!
- */
- if (tbl->is_SGI) {
- s32 tpt = lq_sta->last_tpt / 100;
- if (tpt >= search_tbl->expected_tpt[index])
- break;
- }
- search_tbl->current_rate =
- rate_n_flags_from_tbl(mvm, search_tbl, index);
- update_search_tbl_counter = 1;
- goto out;
- default:
- WARN_ON_ONCE(1);
- }
- rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
-
- if (tbl->action == start_action)
- break;
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
- out:
- lq_sta->search_better_tbl = 1;
- rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
+ return RATE_MCS_CHAN_WIDTH_40;
- return 0;
+ return RATE_MCS_CHAN_WIDTH_20;
}
/*
@@ -1591,13 +1373,13 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
tbl = &(lq_sta->lq_info[active_tbl]);
/* If we've been disallowing search, see if we should now allow it */
- if (lq_sta->stay_in_tbl) {
+ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
/* Elapsed time using current modulation mode */
if (lq_sta->flush_timer)
flush_interval_passed =
time_after(jiffies,
(unsigned long)(lq_sta->flush_timer +
- IWL_RATE_SCALE_FLUSH_INTVL));
+ RS_STAY_IN_COLUMN_TIMEOUT));
/*
* Check if we should allow search for new modulation mode.
@@ -1619,10 +1401,14 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
flush_interval_passed);
/* Allow search for new mode */
- lq_sta->stay_in_tbl = 0; /* only place reset */
+ lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_STARTED;
+ IWL_DEBUG_RATE(mvm,
+ "Moving to RS_STATE_SEARCH_CYCLE_STARTED\n");
lq_sta->total_failed = 0;
lq_sta->total_success = 0;
lq_sta->flush_timer = 0;
+ /* mark the current column as visited */
+ lq_sta->visited_columns = BIT(tbl->column);
/*
* Else if we've used this modulation mode enough repetitions
* (regardless of elapsed time or success/failure), reset
@@ -1646,7 +1432,8 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
/* If transitioning to allow "search", reset all history
* bitmaps and stats in active table (this will become the new
* "search" table). */
- if (!lq_sta->stay_in_tbl) {
+ if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
+ IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(&(tbl->win[i]));
}
@@ -1659,15 +1446,10 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
static void rs_update_rate_tbl(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl,
- int index)
+ struct rs_rate *rate)
{
- u32 rate;
-
- /* Update uCode's rate table. */
- rate = rate_n_flags_from_tbl(mvm, tbl, index);
- rs_fill_link_cmd(mvm, sta, lq_sta, rate);
- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+ rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
}
static u8 rs_get_tid(struct iwl_lq_sta *lq_data,
@@ -1686,6 +1468,250 @@ static u8 rs_get_tid(struct iwl_lq_sta *lq_data,
return tid;
}
+static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ int i, j, n;
+ enum rs_column next_col_id;
+ const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
+ const struct rs_tx_column *next_col;
+ allow_column_func_t allow_func;
+ u8 valid_ants = iwl_fw_valid_tx_ant(mvm->fw);
+ s32 *expected_tpt_tbl;
+ s32 tpt, max_expected_tpt;
+
+ for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
+ next_col_id = curr_col->next_columns[i];
+
+ if (next_col_id == RS_COLUMN_INVALID)
+ continue;
+
+ if (lq_sta->visited_columns & BIT(next_col_id)) {
+ IWL_DEBUG_RATE(mvm, "Skip already visited column %d\n",
+ next_col_id);
+ continue;
+ }
+
+ next_col = &rs_tx_columns[next_col_id];
+
+ if (!rs_is_valid_ant(valid_ants, next_col->ant)) {
+ IWL_DEBUG_RATE(mvm,
+ "Skip column %d as ANT config isn't supported by chip. valid_ants 0x%x column ant 0x%x\n",
+ next_col_id, valid_ants, next_col->ant);
+ continue;
+ }
+
+ for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
+ allow_func = next_col->checks[j];
+ if (allow_func && !allow_func(mvm, sta, tbl))
+ break;
+ }
+
+ if (j != MAX_COLUMN_CHECKS) {
+ IWL_DEBUG_RATE(mvm,
+ "Skip column %d: not allowed (check %d failed)\n",
+ next_col_id, j);
+
+ continue;
+ }
+
+ tpt = lq_sta->last_tpt / 100;
+ expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col,
+ tbl->rate.bw);
+ if (WARN_ON_ONCE(!expected_tpt_tbl))
+ continue;
+
+ max_expected_tpt = 0;
+ for (n = 0; n < IWL_RATE_COUNT; n++)
+ if (expected_tpt_tbl[n] > max_expected_tpt)
+ max_expected_tpt = expected_tpt_tbl[n];
+
+ if (tpt >= max_expected_tpt) {
+ IWL_DEBUG_RATE(mvm,
+ "Skip column %d: can't beat current TPT. Max expected %d current %d\n",
+ next_col_id, max_expected_tpt, tpt);
+ continue;
+ }
+
+ break;
+ }
+
+ if (i == MAX_NEXT_COLUMNS)
+ return RS_COLUMN_INVALID;
+
+ IWL_DEBUG_RATE(mvm, "Found potential column %d\n", next_col_id);
+
+ return next_col_id;
+}
+
+static int rs_switch_to_column(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta,
+ enum rs_column col_id)
+{
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct rs_rate *rate = &search_tbl->rate;
+ const struct rs_tx_column *column = &rs_tx_columns[col_id];
+ const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+ u16 rate_mask = 0;
+ u32 rate_idx = 0;
+
+ memcpy(search_tbl, tbl, sz);
+
+ rate->sgi = column->sgi;
+ rate->ant = column->ant;
+
+ if (column->mode == RS_LEGACY) {
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ rate->type = LQ_LEGACY_A;
+ else
+ rate->type = LQ_LEGACY_G;
+
+ rate_mask = lq_sta->active_legacy_rate;
+ } else if (column->mode == RS_SISO) {
+ rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
+ rate_mask = lq_sta->active_siso_rate;
+ } else if (column->mode == RS_MIMO2) {
+ rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
+ rate_mask = lq_sta->active_mimo2_rate;
+ } else {
+ WARN_ON_ONCE("Bad column mode");
+ }
+
+ rate->bw = rs_bw_from_sta_bw(sta);
+ search_tbl->column = col_id;
+ rs_set_expected_tpt_table(lq_sta, search_tbl);
+
+ lq_sta->visited_columns |= BIT(col_id);
+
+ /* Get the best matching rate if we're changing modes. e.g.
+ * SISO->MIMO, LEGACY->SISO, MIMO->SISO
+ */
+ if (curr_column->mode != column->mode) {
+ rate_idx = rs_get_best_rate(mvm, lq_sta, search_tbl,
+ rate_mask, rate->index);
+
+ if ((rate_idx == IWL_RATE_INVALID) ||
+ !(BIT(rate_idx) & rate_mask)) {
+ IWL_DEBUG_RATE(mvm,
+ "can not switch with index %d"
+ " rate mask %x\n",
+ rate_idx, rate_mask);
+
+ goto err;
+ }
+
+ rate->index = rate_idx;
+ }
+
+ IWL_DEBUG_RATE(mvm, "Switched to column %d: Index %d\n",
+ col_id, rate->index);
+
+ return 0;
+
+err:
+ rate->type = LQ_NONE;
+ return -1;
+}
+
+static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
+ struct iwl_scale_tbl_info *tbl,
+ s32 sr, int low, int high,
+ int current_tpt,
+ int low_tpt, int high_tpt)
+{
+ enum rs_action action = RS_ACTION_STAY;
+
+ /* Too many failures, decrease rate */
+ if ((sr <= RS_SR_FORCE_DECREASE) || (current_tpt == 0)) {
+ IWL_DEBUG_RATE(mvm,
+ "decrease rate because of low SR\n");
+ action = RS_ACTION_DOWNSCALE;
+ /* No throughput measured yet for adjacent rates; try increase. */
+ } else if ((low_tpt == IWL_INVALID_VALUE) &&
+ (high_tpt == IWL_INVALID_VALUE)) {
+ if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) {
+ IWL_DEBUG_RATE(mvm,
+ "Good SR and no high rate measurement. "
+ "Increase rate\n");
+ action = RS_ACTION_UPSCALE;
+ } else if (low != IWL_RATE_INVALID) {
+ IWL_DEBUG_RATE(mvm,
+ "Remain in current rate\n");
+ action = RS_ACTION_STAY;
+ }
+ }
+
+ /* Both adjacent throughputs are measured, but neither one has better
+ * throughput; we're using the best rate, don't change it!
+ */
+ else if ((low_tpt != IWL_INVALID_VALUE) &&
+ (high_tpt != IWL_INVALID_VALUE) &&
+ (low_tpt < current_tpt) &&
+ (high_tpt < current_tpt)) {
+ IWL_DEBUG_RATE(mvm,
+ "Both high and low are worse. "
+ "Maintain rate\n");
+ action = RS_ACTION_STAY;
+ }
+
+ /* At least one adjacent rate's throughput is measured,
+ * and may have better performance.
+ */
+ else {
+ /* Higher adjacent rate's throughput is measured */
+ if (high_tpt != IWL_INVALID_VALUE) {
+ /* Higher rate has better throughput */
+ if (high_tpt > current_tpt &&
+ sr >= IWL_RATE_INCREASE_TH) {
+ IWL_DEBUG_RATE(mvm,
+ "Higher rate is better and good "
+ "SR. Increate rate\n");
+ action = RS_ACTION_UPSCALE;
+ } else {
+ IWL_DEBUG_RATE(mvm,
+ "Higher rate isn't better OR "
+ "no good SR. Maintain rate\n");
+ action = RS_ACTION_STAY;
+ }
+
+ /* Lower adjacent rate's throughput is measured */
+ } else if (low_tpt != IWL_INVALID_VALUE) {
+ /* Lower rate has better throughput */
+ if (low_tpt > current_tpt) {
+ IWL_DEBUG_RATE(mvm,
+ "Lower rate is better. "
+ "Decrease rate\n");
+ action = RS_ACTION_DOWNSCALE;
+ } else if (sr >= IWL_RATE_INCREASE_TH) {
+ IWL_DEBUG_RATE(mvm,
+ "Lower rate isn't better and "
+ "good SR. Increase rate\n");
+ action = RS_ACTION_UPSCALE;
+ }
+ }
+ }
+
+ /* Sanity check; asked for decrease, but success rate or throughput
+ * has been good at old rate. Don't change it.
+ */
+ if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID) &&
+ ((sr > IWL_RATE_HIGH_TH) ||
+ (current_tpt > (100 * tbl->expected_tpt[low])))) {
+ IWL_DEBUG_RATE(mvm,
+ "Sanity check failed. Maintain rate\n");
+ action = RS_ACTION_STAY;
+ }
+
+ return action;
+}
+
/*
* Do rate scaling and search for new modulation mode.
*/
@@ -1705,20 +1731,19 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
int low_tpt = IWL_INVALID_VALUE;
int high_tpt = IWL_INVALID_VALUE;
u32 fail_count;
- s8 scale_action = 0;
+ enum rs_action scale_action = RS_ACTION_STAY;
u16 rate_mask;
u8 update_lq = 0;
struct iwl_scale_tbl_info *tbl, *tbl1;
- u16 rate_scale_index_msk = 0;
u8 active_tbl = 0;
u8 done_search = 0;
u16 high_low;
s32 sr;
u8 tid = IWL_MAX_TID_COUNT;
+ u8 prev_agg = lq_sta->is_agg;
struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv;
struct iwl_mvm_tid_data *tid_data;
-
- IWL_DEBUG_RATE(mvm, "rate scale calculate new rate for skb\n");
+ struct rs_rate *rate;
/* Send management frames and NO_ACK data using lowest rate. */
/* TODO: this could probably be improved.. */
@@ -1726,8 +1751,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
- lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
-
tid = rs_get_tid(lq_sta, hdr);
if ((tid != IWL_MAX_TID_COUNT) &&
(lq_sta->tx_agg_tid_en & (1 << tid))) {
@@ -1751,45 +1774,29 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
active_tbl = 1 - lq_sta->active_tbl;
tbl = &(lq_sta->lq_info[active_tbl]);
+ rate = &tbl->rate;
+
+ if (prev_agg != lq_sta->is_agg) {
+ IWL_DEBUG_RATE(mvm,
+ "Aggregation changed: prev %d current %d. Update expected TPT table\n",
+ prev_agg, lq_sta->is_agg);
+ rs_set_expected_tpt_table(lq_sta, tbl);
+ }
/* current tx rate */
index = lq_sta->last_txrate_idx;
- IWL_DEBUG_RATE(mvm, "Rate scale index %d for type %d\n", index,
- tbl->lq_type);
-
/* rates available for this association, and for modulation mode */
- rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
+ rate_mask = rs_get_supported_rates(lq_sta, rate);
- IWL_DEBUG_RATE(mvm, "mask 0x%04X\n", rate_mask);
-
- /* mask with station rate restriction */
- if (is_legacy(tbl->lq_type)) {
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
- /* supp_rates has no CCK bits in A mode */
- rate_scale_index_msk = (u16) (rate_mask &
- (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
- else
- rate_scale_index_msk = (u16) (rate_mask &
- lq_sta->supp_rates);
-
- } else {
- rate_scale_index_msk = rate_mask;
- }
-
- if (!rate_scale_index_msk)
- rate_scale_index_msk = rate_mask;
-
- if (!((1 << index) & rate_scale_index_msk)) {
+ if (!(BIT(index) & rate_mask)) {
IWL_ERR(mvm, "Current Rate is not valid\n");
if (lq_sta->search_better_tbl) {
/* revert to active table if search table is not valid*/
- tbl->lq_type = LQ_NONE;
+ rate->type = LQ_NONE;
lq_sta->search_better_tbl = 0;
tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- /* get "active" rate info */
- index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
- rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
+ rs_update_rate_tbl(mvm, sta, lq_sta, &tbl->rate);
}
return;
}
@@ -1806,6 +1813,9 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
index = lq_sta->max_rate_idx;
update_lq = 1;
window = &(tbl->win[index]);
+ IWL_DEBUG_RATE(mvm,
+ "Forcing user max rate %d\n",
+ index);
goto lq_update;
}
@@ -1822,8 +1832,9 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
(window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
IWL_DEBUG_RATE(mvm,
- "LQ: still below TH. succ=%d total=%d for index %d\n",
- window->success_counter, window->counter, index);
+ "(%s: %d): Test Window: succ %d total %d\n",
+ rs_pretty_lq_type(rate->type),
+ index, window->success_counter, window->counter);
/* Can't calculate this yet; not enough history */
window->average_tpt = IWL_INVALID_VALUE;
@@ -1838,8 +1849,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
* actual average throughput */
if (window->average_tpt != ((window->success_ratio *
tbl->expected_tpt[index] + 64) / 128)) {
- IWL_ERR(mvm,
- "expected_tpt should have been calculated by now\n");
window->average_tpt = ((window->success_ratio *
tbl->expected_tpt[index] + 64) / 128);
}
@@ -1851,34 +1860,33 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
* continuing to use the setup that we've been trying. */
if (window->average_tpt > lq_sta->last_tpt) {
IWL_DEBUG_RATE(mvm,
- "LQ: SWITCHING TO NEW TABLE suc=%d cur-tpt=%d old-tpt=%d\n",
+ "SWITCHING TO NEW TABLE SR: %d "
+ "cur-tpt %d old-tpt %d\n",
window->success_ratio,
window->average_tpt,
lq_sta->last_tpt);
- if (!is_legacy(tbl->lq_type))
- lq_sta->enable_counter = 1;
-
/* Swap tables; "search" becomes "active" */
lq_sta->active_tbl = active_tbl;
current_tpt = window->average_tpt;
/* Else poor success; go back to mode in "active" table */
} else {
IWL_DEBUG_RATE(mvm,
- "LQ: GOING BACK TO THE OLD TABLE suc=%d cur-tpt=%d old-tpt=%d\n",
+ "GOING BACK TO THE OLD TABLE: SR %d "
+ "cur-tpt %d old-tpt %d\n",
window->success_ratio,
window->average_tpt,
lq_sta->last_tpt);
/* Nullify "search" table */
- tbl->lq_type = LQ_NONE;
+ rate->type = LQ_NONE;
/* Revert to "active" table */
active_tbl = lq_sta->active_tbl;
tbl = &(lq_sta->lq_info[active_tbl]);
/* Revert to "active" rate and throughput info */
- index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
+ index = tbl->rate.index;
current_tpt = lq_sta->last_tpt;
/* Need to set up a new rate table in uCode */
@@ -1894,8 +1902,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
/* (Else) not in search of better modulation mode, try for better
* starting rate, while staying in this mode. */
- high_low = rs_get_adjacent_rate(mvm, index, rate_scale_index_msk,
- tbl->lq_type);
+ high_low = rs_get_adjacent_rate(mvm, index, rate_mask, rate->type);
low = high_low & 0xff;
high = (high_low >> 8) & 0xff;
@@ -1913,118 +1920,58 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
if (high != IWL_RATE_INVALID)
high_tpt = tbl->win[high].average_tpt;
- scale_action = 0;
-
- /* Too many failures, decrease rate */
- if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
- IWL_DEBUG_RATE(mvm,
- "decrease rate because of low success_ratio\n");
- scale_action = -1;
- /* No throughput measured yet for adjacent rates; try increase. */
- } else if ((low_tpt == IWL_INVALID_VALUE) &&
- (high_tpt == IWL_INVALID_VALUE)) {
- if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
- scale_action = 1;
- else if (low != IWL_RATE_INVALID)
- scale_action = 0;
- }
-
- /* Both adjacent throughputs are measured, but neither one has better
- * throughput; we're using the best rate, don't change it! */
- else if ((low_tpt != IWL_INVALID_VALUE) &&
- (high_tpt != IWL_INVALID_VALUE) &&
- (low_tpt < current_tpt) &&
- (high_tpt < current_tpt))
- scale_action = 0;
-
- /* At least one adjacent rate's throughput is measured,
- * and may have better performance. */
- else {
- /* Higher adjacent rate's throughput is measured */
- if (high_tpt != IWL_INVALID_VALUE) {
- /* Higher rate has better throughput */
- if (high_tpt > current_tpt &&
- sr >= IWL_RATE_INCREASE_TH) {
- scale_action = 1;
- } else {
- scale_action = 0;
- }
-
- /* Lower adjacent rate's throughput is measured */
- } else if (low_tpt != IWL_INVALID_VALUE) {
- /* Lower rate has better throughput */
- if (low_tpt > current_tpt) {
- IWL_DEBUG_RATE(mvm,
- "decrease rate because of low tpt\n");
- scale_action = -1;
- } else if (sr >= IWL_RATE_INCREASE_TH) {
- scale_action = 1;
- }
- }
- }
-
- /* Sanity check; asked for decrease, but success rate or throughput
- * has been good at old rate. Don't change it. */
- if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
- ((sr > IWL_RATE_HIGH_TH) ||
- (current_tpt > (100 * tbl->expected_tpt[low]))))
- scale_action = 0;
+ IWL_DEBUG_RATE(mvm,
+ "(%s: %d): cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n",
+ rs_pretty_lq_type(rate->type), index, current_tpt, sr,
+ low, high, low_tpt, high_tpt);
- if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
- IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && (is_mimo(tbl->lq_type))) {
- if (lq_sta->last_bt_traffic >
- le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
- /*
- * don't set scale_action, don't want to scale up if
- * the rate scale doesn't otherwise think that is a
- * good idea.
- */
- } else if (lq_sta->last_bt_traffic <=
- le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
- scale_action = -1;
- }
- }
- lq_sta->last_bt_traffic =
- le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
+ scale_action = rs_get_rate_action(mvm, tbl, sr, low, high,
+ current_tpt, low_tpt, high_tpt);
- if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
- IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && is_mimo(tbl->lq_type)) {
- /* search for a new modulation */
+ /* Force a search in case BT doesn't like us being in MIMO */
+ if (is_mimo(rate) &&
+ !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) {
+ IWL_DEBUG_RATE(mvm,
+ "BT Coex forbids MIMO. Search for new config\n");
rs_stay_in_table(lq_sta, true);
goto lq_update;
}
switch (scale_action) {
- case -1:
+ case RS_ACTION_DOWNSCALE:
/* Decrease starting rate, update uCode's rate table */
if (low != IWL_RATE_INVALID) {
update_lq = 1;
index = low;
+ } else {
+ IWL_DEBUG_RATE(mvm,
+ "At the bottom rate. Can't decrease\n");
}
break;
- case 1:
+ case RS_ACTION_UPSCALE:
/* Increase starting rate, update uCode's rate table */
if (high != IWL_RATE_INVALID) {
update_lq = 1;
index = high;
+ } else {
+ IWL_DEBUG_RATE(mvm,
+ "At the top rate. Can't increase\n");
}
break;
- case 0:
+ case RS_ACTION_STAY:
/* No change */
default:
break;
}
- IWL_DEBUG_RATE(mvm,
- "choose rate scale index %d action %d low %d high %d type %d\n",
- index, scale_action, low, high, tbl->lq_type);
-
lq_update:
/* Replace uCode's rate table for the destination station. */
- if (update_lq)
- rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
+ if (update_lq) {
+ tbl->rate.index = index;
+ rs_update_rate_tbl(mvm, sta, lq_sta, &tbl->rate);
+ }
rs_stay_in_table(lq_sta, false);
@@ -2035,20 +1982,29 @@ lq_update:
* 3) Allowing a new search
*/
if (!update_lq && !done_search &&
- !lq_sta->stay_in_tbl && window->counter) {
+ lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED
+ && window->counter) {
+ enum rs_column next_column;
+
/* Save current throughput to compare with "search" throughput*/
lq_sta->last_tpt = current_tpt;
- /* Select a new "search" modulation mode to try.
- * If one is found, set up the new "search" table. */
- if (is_legacy(tbl->lq_type))
- rs_move_legacy_other(mvm, lq_sta, sta, index);
- else if (is_siso(tbl->lq_type))
- rs_move_siso_to_other(mvm, lq_sta, sta, index);
- else if (is_mimo2(tbl->lq_type))
- rs_move_mimo2_to_other(mvm, lq_sta, sta, index);
- else
- WARN_ON_ONCE(1);
+ IWL_DEBUG_RATE(mvm,
+ "Start Search: update_lq %d done_search %d rs_state %d win->counter %d\n",
+ update_lq, done_search, lq_sta->rs_state,
+ window->counter);
+
+ next_column = rs_get_next_column(mvm, lq_sta, sta, tbl);
+ if (next_column != RS_COLUMN_INVALID) {
+ int ret = rs_switch_to_column(mvm, lq_sta, sta,
+ next_column);
+ if (!ret)
+ lq_sta->search_better_tbl = 1;
+ } else {
+ IWL_DEBUG_RATE(mvm,
+ "No more columns to explore in search cycle. Go to RS_STATE_SEARCH_CYCLE_ENDED\n");
+ lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_ENDED;
+ }
/* If new "search" mode was selected, set up in uCode table */
if (lq_sta->search_better_tbl) {
@@ -2058,36 +2014,31 @@ lq_update:
rs_rate_scale_clear_window(&(tbl->win[i]));
/* Use new "search" start rate */
- index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
+ index = tbl->rate.index;
- IWL_DEBUG_RATE(mvm,
- "Switch current mcs: %X index: %d\n",
- tbl->current_rate, index);
- rs_fill_link_cmd(mvm, sta, lq_sta, tbl->current_rate);
- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+ rs_dump_rate(mvm, &tbl->rate,
+ "Switch to SEARCH TABLE:");
+ rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
} else {
done_search = 1;
}
}
- if (done_search && !lq_sta->stay_in_tbl) {
+ if (done_search && lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_ENDED) {
/* If the "active" (non-search) mode was legacy,
* and we've tried switching antennas,
* but we haven't been able to try HT modes (not available),
* stay with best antenna legacy modulation for a while
* before next round of mode comparisons. */
tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
- if (is_legacy(tbl1->lq_type) && !sta->ht_cap.ht_supported &&
- lq_sta->action_counter > tbl1->max_search) {
+ if (is_legacy(&tbl1->rate) && !sta->ht_cap.ht_supported) {
IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n");
rs_set_stay_in_table(mvm, 1, lq_sta);
- }
-
+ } else {
/* If we're in an HT mode, and all 3 mode switch actions
* have been tried and compared, stay in this best modulation
* mode for a while before next round of mode comparisons. */
- if (lq_sta->enable_counter &&
- (lq_sta->action_counter >= tbl1->max_search)) {
if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
(lq_sta->tx_agg_tid_en & (1 << tid)) &&
(tid != IWL_MAX_TID_COUNT)) {
@@ -2105,7 +2056,6 @@ lq_update:
}
out:
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index);
lq_sta->last_txrate_idx = index;
}
@@ -2126,12 +2076,12 @@ out:
static void rs_initialize_lq(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
- enum ieee80211_band band)
+ enum ieee80211_band band,
+ bool init)
{
struct iwl_scale_tbl_info *tbl;
- int rate_idx;
+ struct rs_rate *rate;
int i;
- u32 rate;
u8 active_tbl = 0;
u8 valid_tx_ant;
@@ -2148,27 +2098,30 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
active_tbl = 1 - lq_sta->active_tbl;
tbl = &(lq_sta->lq_info[active_tbl]);
+ rate = &tbl->rate;
if ((i < 0) || (i >= IWL_RATE_COUNT))
i = 0;
- rate = iwl_rates[i].plcp;
- tbl->ant_type = first_antenna(valid_tx_ant);
- rate |= tbl->ant_type << RATE_MCS_ANT_POS;
-
- if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
- rate |= RATE_MCS_CCK_MSK;
+ rate->index = i;
+ rate->ant = first_antenna(valid_tx_ant);
+ rate->sgi = false;
+ rate->bw = RATE_MCS_CHAN_WIDTH_20;
+ if (band == IEEE80211_BAND_5GHZ)
+ rate->type = LQ_LEGACY_A;
+ else
+ rate->type = LQ_LEGACY_G;
- rs_get_tbl_info_from_mcs(rate, band, tbl, &rate_idx);
- if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
- rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+ WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
+ if (rate->ant == ANT_A)
+ tbl->column = RS_COLUMN_LEGACY_ANT_A;
+ else
+ tbl->column = RS_COLUMN_LEGACY_ANT_B;
- rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx);
- tbl->current_rate = rate;
rs_set_expected_tpt_table(lq_sta, tbl);
- rs_fill_link_cmd(NULL, NULL, lq_sta, rate);
+ rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
/* TODO restore station should remember the lq cmd */
- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_SYNC, true);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, init);
}
static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
@@ -2182,8 +2135,6 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_lq_sta *lq_sta = mvm_sta;
- IWL_DEBUG_RATE_LIMIT(mvm, "rate scale calculate new rate for skb\n");
-
/* Get max rate if user set max rate */
if (lq_sta) {
lq_sta->max_rate_idx = txrc->max_rate_idx;
@@ -2242,11 +2193,59 @@ static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
return -1;
}
+static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
+ struct ieee80211_sta_vht_cap *vht_cap,
+ struct iwl_lq_sta *lq_sta)
+{
+ int i;
+ int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
+
+ if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+ for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+ if (i == IWL_RATE_9M_INDEX)
+ continue;
+
+ /* Disable MCS9 as a workaround */
+ if (i == IWL_RATE_MCS_9_INDEX)
+ continue;
+
+ /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
+ if (i == IWL_RATE_MCS_9_INDEX &&
+ sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ continue;
+
+ lq_sta->active_siso_rate |= BIT(i);
+ }
+ }
+
+ if (sta->rx_nss < 2)
+ return;
+
+ highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
+ if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+ for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+ if (i == IWL_RATE_9M_INDEX)
+ continue;
+
+ /* Disable MCS9 as a workaround */
+ if (i == IWL_RATE_MCS_9_INDEX)
+ continue;
+
+ /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
+ if (i == IWL_RATE_MCS_9_INDEX &&
+ sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ continue;
+
+ lq_sta->active_mimo2_rate |= BIT(i);
+ }
+ }
+}
+
/*
* Called after adding a new station to initialize rate scaling
*/
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- enum ieee80211_band band)
+ enum ieee80211_band band, bool init)
{
int i, j;
struct ieee80211_hw *hw = mvm->hw;
@@ -2259,6 +2258,8 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
sta_priv = (struct iwl_mvm_sta *)sta->drv_priv;
lq_sta = &sta_priv->lq_sta;
+ memset(lq_sta, 0, sizeof(*lq_sta));
+
sband = hw->wiphy->bands[band];
lq_sta->lq.sta_id = sta_priv->sta_id;
@@ -2268,7 +2269,6 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
lq_sta->flush_timer = 0;
- lq_sta->supp_rates = sta->supp_rates[sband->band];
IWL_DEBUG_RATE(mvm,
"LQ: *** rate scale station global init for station %d ***\n",
@@ -2308,27 +2308,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->is_vht = false;
} else {
- int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
- if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
- for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
- if (i == IWL_RATE_9M_INDEX)
- continue;
-
- lq_sta->active_siso_rate |= BIT(i);
- }
- }
-
- highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
- if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
- for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
- if (i == IWL_RATE_9M_INDEX)
- continue;
-
- lq_sta->active_mimo2_rate |= BIT(i);
- }
- }
-
- /* TODO: avoid MCS9 in 20Mhz which isn't valid for 11ac */
+ rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
lq_sta->is_vht = true;
}
@@ -2341,15 +2321,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/* These values will be overridden later */
lq_sta->lq.single_stream_ant_msk =
first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
- lq_sta->lq.dual_stream_ant_msk =
- iwl_fw_valid_tx_ant(mvm->fw) &
- ~first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
- if (!lq_sta->lq.dual_stream_ant_msk) {
- lq_sta->lq.dual_stream_ant_msk = ANT_AB;
- } else if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) == 2) {
- lq_sta->lq.dual_stream_ant_msk =
- iwl_fw_valid_tx_ant(mvm->fw);
- }
+ lq_sta->lq.dual_stream_ant_msk = ANT_AB;
/* as default allow aggregation for all tids */
lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
@@ -2364,121 +2336,184 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->dbg_fixed_rate = 0;
#endif
- rs_initialize_lq(mvm, sta, lq_sta, band);
+ rs_initialize_lq(mvm, sta, lq_sta, band, init);
}
-static void rs_fill_link_cmd(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta,
- struct iwl_lq_sta *lq_sta, u32 new_rate)
+static void rs_rate_update(void *mvm_r,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *priv_sta,
+ u32 changed)
+{
+ u8 tid;
+ struct iwl_op_mode *op_mode =
+ (struct iwl_op_mode *)mvm_r;
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ /* Stop any ongoing aggregations as rs starts off assuming no agg */
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+ ieee80211_stop_tx_ba_session(sta, tid);
+
+ iwl_mvm_rs_rate_init(mvm, sta, sband->band, false);
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
+ struct iwl_lq_cmd *lq_cmd,
+ enum ieee80211_band band,
+ u32 ucode_rate)
+{
+ struct rs_rate rate;
+ int i;
+ int num_rates = ARRAY_SIZE(lq_cmd->rs_table);
+ __le32 ucode_rate_le32 = cpu_to_le32(ucode_rate);
+
+ for (i = 0; i < num_rates; i++)
+ lq_cmd->rs_table[i] = ucode_rate_le32;
+
+ rs_rate_from_ucode_rate(ucode_rate, band, &rate);
+
+ if (is_mimo(&rate))
+ lq_cmd->mimo_delim = num_rates - 1;
+ else
+ lq_cmd->mimo_delim = 0;
+}
+#endif /* CONFIG_MAC80211_DEBUGFS */
+
+static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct rs_rate *rate,
+ __le32 *rs_table, int *rs_table_index,
+ int num_rates, int num_retries,
+ u8 valid_tx_ant, bool toggle_ant)
+{
+ int i, j;
+ __le32 ucode_rate;
+ bool bottom_reached = false;
+ int prev_rate_idx = rate->index;
+ int end = LINK_QUAL_MAX_RETRY_NUM;
+ int index = *rs_table_index;
+
+ for (i = 0; i < num_rates && index < end; i++) {
+ ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm, rate));
+ for (j = 0; j < num_retries && index < end; j++, index++)
+ rs_table[index] = ucode_rate;
+
+ if (toggle_ant)
+ rs_toggle_antenna(valid_tx_ant, rate);
+
+ prev_rate_idx = rate->index;
+ bottom_reached = rs_get_lower_rate_in_column(lq_sta, rate);
+ if (bottom_reached && !is_legacy(rate))
+ break;
+ }
+
+ if (!bottom_reached)
+ rate->index = prev_rate_idx;
+
+ *rs_table_index = index;
+}
+
+/* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI
+ * column the rate table should look like this:
+ *
+ * rate[0] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
+ * rate[1] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
+ * rate[2] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
+ * rate[3] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
+ * rate[4] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
+ * rate[5] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
+ * rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI
+ * rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI
+ * rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI
+ * rate[9] 0x800B Legacy | ANT: B Rate: 36 Mbps
+ * rate[10] 0x4009 Legacy | ANT: A Rate: 24 Mbps
+ * rate[11] 0x8007 Legacy | ANT: B Rate: 18 Mbps
+ * rate[12] 0x4005 Legacy | ANT: A Rate: 12 Mbps
+ * rate[13] 0x800F Legacy | ANT: B Rate: 9 Mbps
+ * rate[14] 0x400D Legacy | ANT: A Rate: 6 Mbps
+ * rate[15] 0x800D Legacy | ANT: B Rate: 6 Mbps
+ */
+static void rs_build_rates_table(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ const struct rs_rate *initial_rate)
{
- struct iwl_scale_tbl_info tbl_type;
- int index = 0;
- int rate_idx;
- int repeat_rate = 0;
- u8 ant_toggle_cnt = 0;
- u8 use_ht_possible = 1;
+ struct rs_rate rate;
+ int num_rates, num_retries, index = 0;
u8 valid_tx_ant = 0;
struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+ bool toggle_ant = false;
- /* Override starting rate (index 0) if needed for debug purposes */
- rs_dbgfs_set_mcs(lq_sta, &new_rate);
+ memcpy(&rate, initial_rate, sizeof(rate));
- /* Interpret new_rate (rate_n_flags) */
- rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
- &tbl_type, &rate_idx);
+ valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
- /* How many times should we repeat the initial rate? */
- if (is_legacy(tbl_type.lq_type)) {
- ant_toggle_cnt = 1;
- repeat_rate = IWL_NUMBER_TRY;
+ if (is_siso(&rate)) {
+ num_rates = RS_INITIAL_SISO_NUM_RATES;
+ num_retries = RS_HT_VHT_RETRIES_PER_RATE;
+ } else if (is_mimo(&rate)) {
+ num_rates = RS_INITIAL_MIMO_NUM_RATES;
+ num_retries = RS_HT_VHT_RETRIES_PER_RATE;
} else {
- repeat_rate = min(IWL_HT_NUMBER_TRY,
- LINK_QUAL_AGG_DISABLE_START_DEF - 1);
+ num_rates = RS_INITIAL_LEGACY_NUM_RATES;
+ num_retries = RS_LEGACY_RETRIES_PER_RATE;
+ toggle_ant = true;
}
- lq_cmd->mimo_delim = is_mimo(tbl_type.lq_type) ? 1 : 0;
-
- /* Fill 1st table entry (index 0) */
- lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
-
- if (num_of_ant(tbl_type.ant_type) == 1)
- lq_cmd->single_stream_ant_msk = tbl_type.ant_type;
- else if (num_of_ant(tbl_type.ant_type) == 2)
- lq_cmd->dual_stream_ant_msk = tbl_type.ant_type;
- /* otherwise we don't modify the existing value */
-
- index++;
- repeat_rate--;
- if (mvm)
- valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
-
- /* Fill rest of rate table */
- while (index < LINK_QUAL_MAX_RETRY_NUM) {
- /* Repeat initial/next rate.
- * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
- * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
- while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
- if (is_legacy(tbl_type.lq_type)) {
- if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
- ant_toggle_cnt++;
- else if (mvm &&
- rs_toggle_antenna(valid_tx_ant,
- &new_rate, &tbl_type))
- ant_toggle_cnt = 1;
- }
+ rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
+ num_rates, num_retries, valid_tx_ant,
+ toggle_ant);
- /* Override next rate if needed for debug purposes */
- rs_dbgfs_set_mcs(lq_sta, &new_rate);
+ rs_get_lower_rate_down_column(lq_sta, &rate);
- /* Fill next table entry */
- lq_cmd->rs_table[index] =
- cpu_to_le32(new_rate);
- repeat_rate--;
- index++;
- }
+ if (is_siso(&rate)) {
+ num_rates = RS_SECONDARY_SISO_NUM_RATES;
+ num_retries = RS_SECONDARY_SISO_RETRIES;
+ } else if (is_legacy(&rate)) {
+ num_rates = RS_SECONDARY_LEGACY_NUM_RATES;
+ num_retries = RS_LEGACY_RETRIES_PER_RATE;
+ } else {
+ WARN_ON_ONCE(1);
+ }
- rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
- &rate_idx);
-
- /* Indicate to uCode which entries might be MIMO.
- * If initial rate was MIMO, this will finally end up
- * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
- if (is_mimo(tbl_type.lq_type))
- lq_cmd->mimo_delim = index;
-
- /* Get next rate */
- new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
- use_ht_possible);
-
- /* How many times should we repeat the next rate? */
- if (is_legacy(tbl_type.lq_type)) {
- if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
- ant_toggle_cnt++;
- else if (mvm &&
- rs_toggle_antenna(valid_tx_ant,
- &new_rate, &tbl_type))
- ant_toggle_cnt = 1;
-
- repeat_rate = IWL_NUMBER_TRY;
- } else {
- repeat_rate = IWL_HT_NUMBER_TRY;
- }
+ toggle_ant = true;
- /* Don't allow HT rates after next pass.
- * rs_get_lower_rate() will change type to LQ_LEGACY_A
- * or LQ_LEGACY_G.
- */
- use_ht_possible = 0;
+ rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
+ num_rates, num_retries, valid_tx_ant,
+ toggle_ant);
- /* Override next rate if needed for debug purposes */
- rs_dbgfs_set_mcs(lq_sta, &new_rate);
+ rs_get_lower_rate_down_column(lq_sta, &rate);
- /* Fill next table entry */
- lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
+ num_rates = RS_SECONDARY_LEGACY_NUM_RATES;
+ num_retries = RS_LEGACY_RETRIES_PER_RATE;
- index++;
- repeat_rate--;
- }
+ rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
+ num_rates, num_retries, valid_tx_ant,
+ toggle_ant);
+
+}
+
+static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ const struct rs_rate *initial_rate)
+{
+ struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+ u8 ant = initial_rate->ant;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ if (lq_sta->dbg_fixed_rate) {
+ rs_build_rates_table_from_fixed(mvm, lq_cmd,
+ lq_sta->band,
+ lq_sta->dbg_fixed_rate);
+ ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
+ RATE_MCS_ANT_POS;
+ } else
+#endif
+ rs_build_rates_table(mvm, lq_sta, initial_rate);
+
+ if (num_of_ant(ant) == 1)
+ lq_cmd->single_stream_ant_msk = ant;
lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
lq_cmd->agg_disable_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
@@ -2512,31 +2547,83 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
}
#ifdef CONFIG_MAC80211_DEBUGFS
-static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags)
+static int rs_pretty_print_rate(char *buf, const u32 rate)
{
- struct iwl_mvm *mvm;
- u8 valid_tx_ant;
- u8 ant_sel_tx;
- mvm = lq_sta->drv;
- valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
- if (lq_sta->dbg_fixed_rate) {
- ant_sel_tx =
- ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
- >> RATE_MCS_ANT_POS);
- if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
- *rate_n_flags = lq_sta->dbg_fixed_rate;
- IWL_DEBUG_RATE(mvm, "Fixed rate ON\n");
- } else {
- lq_sta->dbg_fixed_rate = 0;
- IWL_ERR(mvm,
- "Invalid antenna selection 0x%X, Valid is 0x%X\n",
- ant_sel_tx, valid_tx_ant);
- IWL_DEBUG_RATE(mvm, "Fixed rate OFF\n");
- }
+ char *type, *bw;
+ u8 mcs = 0, nss = 0;
+ u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
+
+ if (!(rate & RATE_MCS_HT_MSK) &&
+ !(rate & RATE_MCS_VHT_MSK)) {
+ int index = iwl_hwrate_to_plcp_idx(rate);
+
+ return sprintf(buf, "Legacy | ANT: %s Rate: %s Mbps\n",
+ rs_pretty_ant(ant),
+ index == IWL_RATE_INVALID ? "BAD" :
+ iwl_rate_mcs[index].mbps);
+ }
+
+ if (rate & RATE_MCS_VHT_MSK) {
+ type = "VHT";
+ mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
+ nss = ((rate & RATE_VHT_MCS_NSS_MSK)
+ >> RATE_VHT_MCS_NSS_POS) + 1;
+ } else if (rate & RATE_MCS_HT_MSK) {
+ type = "HT";
+ mcs = rate & RATE_HT_MCS_INDEX_MSK;
} else {
- IWL_DEBUG_RATE(mvm, "Fixed rate OFF\n");
+ type = "Unknown"; /* shouldn't happen */
+ }
+
+ switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ bw = "20Mhz";
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ bw = "40Mhz";
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ bw = "80Mhz";
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ bw = "160Mhz";
+ break;
+ default:
+ bw = "BAD BW";
+ }
+
+ return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n",
+ type, rs_pretty_ant(ant), bw, mcs, nss,
+ (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
+ (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
+ (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
+ (rate & RATE_MCS_BF_MSK) ? "BF " : "",
+ (rate & RATE_MCS_ZLF_MSK) ? "ZLF " : "");
+}
+
+/**
+ * Program the device to use fixed rate for frame transmit
+ * This is for debugging/testing only
+ * once the device start use fixed rate, we need to reload the module
+ * to being back the normal operation.
+ */
+static void rs_program_fix_rate(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta)
+{
+ lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
+ lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+ lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+
+ IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
+ lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
+
+ if (lq_sta->dbg_fixed_rate) {
+ struct rs_rate rate;
+ rs_rate_from_ucode_rate(lq_sta->dbg_fixed_rate,
+ lq_sta->band, &rate);
+ rs_fill_lq_cmd(mvm, NULL, lq_sta, &rate);
+ iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, false);
}
}
@@ -2572,15 +2659,14 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
char *buff;
int desc = 0;
int i = 0;
- int index = 0;
ssize_t ret;
struct iwl_lq_sta *lq_sta = file->private_data;
struct iwl_mvm *mvm;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-
+ struct rs_rate *rate = &tbl->rate;
mvm = lq_sta->drv;
- buff = kmalloc(1024, GFP_KERNEL);
+ buff = kmalloc(2048, GFP_KERNEL);
if (!buff)
return -ENOMEM;
@@ -2595,23 +2681,23 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
(iwl_fw_valid_tx_ant(mvm->fw) & ANT_B) ? "ANT_B," : "",
(iwl_fw_valid_tx_ant(mvm->fw) & ANT_C) ? "ANT_C" : "");
desc += sprintf(buff+desc, "lq type %s\n",
- (is_legacy(tbl->lq_type)) ? "legacy" :
- is_vht(tbl->lq_type) ? "VHT" : "HT");
- if (is_ht(tbl->lq_type)) {
+ (is_legacy(rate)) ? "legacy" :
+ is_vht(rate) ? "VHT" : "HT");
+ if (!is_legacy(rate)) {
desc += sprintf(buff+desc, " %s",
- (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
+ (is_siso(rate)) ? "SISO" : "MIMO2");
desc += sprintf(buff+desc, " %s",
- (is_ht20(tbl)) ? "20MHz" :
- (is_ht40(tbl)) ? "40MHz" :
- (is_ht80(tbl)) ? "80Mhz" : "BAD BW");
+ (is_ht20(rate)) ? "20MHz" :
+ (is_ht40(rate)) ? "40MHz" :
+ (is_ht80(rate)) ? "80Mhz" : "BAD BW");
desc += sprintf(buff+desc, " %s %s\n",
- (tbl->is_SGI) ? "SGI" : "",
+ (rate->sgi) ? "SGI" : "NGI",
(lq_sta->is_agg) ? "AGG on" : "");
}
desc += sprintf(buff+desc, "last tx rate=0x%X\n",
lq_sta->last_rate_n_flags);
desc += sprintf(buff+desc,
- "general: flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+ "general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n",
lq_sta->lq.flags,
lq_sta->lq.mimo_delim,
lq_sta->lq.single_stream_ant_msk,
@@ -2631,19 +2717,10 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
lq_sta->lq.initial_rate_index[3]);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
- index = iwl_hwrate_to_plcp_idx(
- le32_to_cpu(lq_sta->lq.rs_table[i]));
- if (is_legacy(tbl->lq_type)) {
- desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
- i, le32_to_cpu(lq_sta->lq.rs_table[i]),
- iwl_rate_mcs[index].mbps);
- } else {
- desc += sprintf(buff+desc,
- " rate[%d] 0x%X %smbps (%s)\n",
- i, le32_to_cpu(lq_sta->lq.rs_table[i]),
- iwl_rate_mcs[index].mbps,
- iwl_rate_mcs[index].mcs);
- }
+ u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]);
+
+ desc += sprintf(buff+desc, " rate[%d] 0x%X ", i, r);
+ desc += rs_pretty_print_rate(buff+desc, r);
}
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
@@ -2665,6 +2742,7 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
int i, j;
ssize_t ret;
struct iwl_scale_tbl_info *tbl;
+ struct rs_rate *rate;
struct iwl_lq_sta *lq_sta = file->private_data;
buff = kmalloc(1024, GFP_KERNEL);
@@ -2673,16 +2751,17 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
for (i = 0; i < LQ_SIZE; i++) {
tbl = &(lq_sta->lq_info[i]);
+ rate = &tbl->rate;
desc += sprintf(buff+desc,
"%s type=%d SGI=%d BW=%s DUP=0\n"
- "rate=0x%X\n",
+ "index=%d\n",
lq_sta->active_tbl == i ? "*" : "x",
- tbl->lq_type,
- tbl->is_SGI,
- is_ht20(tbl) ? "20Mhz" :
- is_ht40(tbl) ? "40Mhz" :
- is_ht80(tbl) ? "80Mhz" : "ERR",
- tbl->current_rate);
+ rate->type,
+ rate->sgi,
+ is_ht20(rate) ? "20Mhz" :
+ is_ht40(rate) ? "40Mhz" :
+ is_ht80(rate) ? "80Mhz" : "ERR",
+ rate->index);
for (j = 0; j < IWL_RATE_COUNT; j++) {
desc += sprintf(buff+desc,
"counter=%d success=%d %%=%d\n",
@@ -2746,6 +2825,7 @@ static struct rate_control_ops rs_mvm_ops = {
.free = rs_free,
.alloc_sta = rs_alloc_sta,
.free_sta = rs_free_sta,
+ .rate_update = rs_rate_update,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = rs_add_debugfs,
.remove_sta_debugfs = rs_remove_debugfs,
@@ -2778,13 +2858,13 @@ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
if (enable) {
if (mvmsta->tx_protection == 0)
- lq->flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK;
+ lq->flags |= LQ_FLAG_USE_RTS_MSK;
mvmsta->tx_protection++;
} else {
mvmsta->tx_protection--;
if (mvmsta->tx_protection == 0)
- lq->flags &= ~LQ_FLAG_SET_STA_TLC_RTS_MSK;
+ lq->flags &= ~LQ_FLAG_USE_RTS_MSK;
}
- return iwl_mvm_send_lq_cmd(mvm, lq, CMD_ASYNC, false);
+ return iwl_mvm_send_lq_cmd(mvm, lq, false);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 5d5344f7070b..7bc6404f6986 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -155,38 +155,7 @@ enum {
#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
#define IWL_RATE_HIGH_TH 10880 /* 85% */
#define IWL_RATE_INCREASE_TH 6400 /* 50% */
-#define IWL_RATE_DECREASE_TH 1920 /* 15% */
-
-/* possible actions when in legacy mode */
-enum {
- IWL_LEGACY_SWITCH_ANTENNA,
- IWL_LEGACY_SWITCH_SISO,
- IWL_LEGACY_SWITCH_MIMO2,
- IWL_LEGACY_FIRST_ACTION = IWL_LEGACY_SWITCH_ANTENNA,
- IWL_LEGACY_LAST_ACTION = IWL_LEGACY_SWITCH_MIMO2,
-};
-
-/* possible actions when in siso mode */
-enum {
- IWL_SISO_SWITCH_ANTENNA,
- IWL_SISO_SWITCH_MIMO2,
- IWL_SISO_SWITCH_GI,
- IWL_SISO_FIRST_ACTION = IWL_SISO_SWITCH_ANTENNA,
- IWL_SISO_LAST_ACTION = IWL_SISO_SWITCH_GI,
-};
-
-/* possible actions when in mimo mode */
-enum {
- IWL_MIMO2_SWITCH_SISO_A,
- IWL_MIMO2_SWITCH_SISO_B,
- IWL_MIMO2_SWITCH_GI,
- IWL_MIMO2_FIRST_ACTION = IWL_MIMO2_SWITCH_SISO_A,
- IWL_MIMO2_LAST_ACTION = IWL_MIMO2_SWITCH_GI,
-};
-
-#define IWL_MAX_SEARCH IWL_MIMO2_LAST_ACTION
-
-#define IWL_ACTION_LIMIT 3 /* # possible actions */
+#define RS_SR_FORCE_DECREASE 1920 /* 15% */
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
@@ -224,22 +193,45 @@ enum iwl_table_type {
LQ_MAX,
};
-#define is_legacy(tbl) (((tbl) == LQ_LEGACY_G) || ((tbl) == LQ_LEGACY_A))
-#define is_ht_siso(tbl) ((tbl) == LQ_HT_SISO)
-#define is_ht_mimo2(tbl) ((tbl) == LQ_HT_MIMO2)
-#define is_vht_siso(tbl) ((tbl) == LQ_VHT_SISO)
-#define is_vht_mimo2(tbl) ((tbl) == LQ_VHT_MIMO2)
-#define is_siso(tbl) (is_ht_siso(tbl) || is_vht_siso(tbl))
-#define is_mimo2(tbl) (is_ht_mimo2(tbl) || is_vht_mimo2(tbl))
-#define is_mimo(tbl) (is_mimo2(tbl))
-#define is_ht(tbl) (is_ht_siso(tbl) || is_ht_mimo2(tbl))
-#define is_vht(tbl) (is_vht_siso(tbl) || is_vht_mimo2(tbl))
-#define is_a_band(tbl) ((tbl) == LQ_LEGACY_A)
-#define is_g_band(tbl) ((tbl) == LQ_LEGACY_G)
-
-#define is_ht20(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_20)
-#define is_ht40(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_40)
-#define is_ht80(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_80)
+struct rs_rate {
+ int index;
+ enum iwl_table_type type;
+ u8 ant;
+ u32 bw;
+ bool sgi;
+};
+
+
+#define is_type_legacy(type) (((type) == LQ_LEGACY_G) || \
+ ((type) == LQ_LEGACY_A))
+#define is_type_ht_siso(type) ((type) == LQ_HT_SISO)
+#define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2)
+#define is_type_vht_siso(type) ((type) == LQ_VHT_SISO)
+#define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2)
+#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type))
+#define is_type_mimo2(type) (is_type_ht_mimo2(type) || is_type_vht_mimo2(type))
+#define is_type_mimo(type) (is_type_mimo2(type))
+#define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type))
+#define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type))
+#define is_type_a_band(type) ((type) == LQ_LEGACY_A)
+#define is_type_g_band(type) ((type) == LQ_LEGACY_G)
+
+#define is_legacy(rate) is_type_legacy((rate)->type)
+#define is_ht_siso(rate) is_type_ht_siso((rate)->type)
+#define is_ht_mimo2(rate) is_type_ht_mimo2((rate)->type)
+#define is_vht_siso(rate) is_type_vht_siso((rate)->type)
+#define is_vht_mimo2(rate) is_type_vht_mimo2((rate)->type)
+#define is_siso(rate) is_type_siso((rate)->type)
+#define is_mimo2(rate) is_type_mimo2((rate)->type)
+#define is_mimo(rate) is_type_mimo((rate)->type)
+#define is_ht(rate) is_type_ht((rate)->type)
+#define is_vht(rate) is_type_vht((rate)->type)
+#define is_a_band(rate) is_type_a_band((rate)->type)
+#define is_g_band(rate) is_type_g_band((rate)->type)
+
+#define is_ht20(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_20)
+#define is_ht40(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_40)
+#define is_ht80(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_80)
#define IWL_MAX_MCS_DISPLAY_SIZE 12
@@ -257,7 +249,23 @@ struct iwl_rate_scale_data {
s32 success_ratio; /* per-cent * 128 */
s32 counter; /* number of frames attempted */
s32 average_tpt; /* success ratio * expected throughput */
- unsigned long stamp;
+};
+
+/* Possible Tx columns
+ * Tx Column = a combo of legacy/siso/mimo x antenna x SGI
+ */
+enum rs_column {
+ RS_COLUMN_LEGACY_ANT_A = 0,
+ RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_SISO_ANT_A_SGI,
+ RS_COLUMN_SISO_ANT_B_SGI,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_MIMO2_SGI,
+
+ RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_INVALID,
};
/**
@@ -267,17 +275,18 @@ struct iwl_rate_scale_data {
* one for "active", and one for "search".
*/
struct iwl_scale_tbl_info {
- enum iwl_table_type lq_type;
- u8 ant_type;
- u8 is_SGI; /* 1 = short guard interval */
- u32 bw; /* channel bandwidth; RATE_MCS_CHAN_WIDTH_XX */
- u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
- u8 max_search; /* maximun number of tables we can search */
+ struct rs_rate rate;
+ enum rs_column column;
s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
- u32 current_rate; /* rate_n_flags, uCode API format */
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
};
+enum {
+ RS_STATE_SEARCH_CYCLE_STARTED,
+ RS_STATE_SEARCH_CYCLE_ENDED,
+ RS_STATE_STAY_IN_COLUMN,
+};
+
/**
* struct iwl_lq_sta -- driver's rate scaling private structure
*
@@ -285,8 +294,7 @@ struct iwl_scale_tbl_info {
*/
struct iwl_lq_sta {
u8 active_tbl; /* index of active table, range 0-1 */
- u8 enable_counter; /* indicates HT mode */
- u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
+ u8 rs_state; /* RS_STATE_* */
u8 search_better_tbl; /* 1: currently trying alternate mode */
s32 last_tpt;
@@ -299,12 +307,13 @@ struct iwl_lq_sta {
u32 total_success; /* total successful frames, any/all rates */
u64 flush_timer; /* time staying in mode before new search */
- u8 action_counter; /* # mode-switch actions tried */
+ u32 visited_columns; /* Bitmask marking which Tx columns were
+ * explored during a search cycle
+ */
bool is_vht;
enum ieee80211_band band;
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
- u32 supp_rates;
u16 active_legacy_rate;
u16 active_siso_rate;
u16 active_mimo2_rate;
@@ -328,32 +337,11 @@ struct iwl_lq_sta {
u32 last_rate_n_flags;
/* packets destined for this STA are aggregated */
u8 is_agg;
- /* BT traffic this sta was last updated in */
- u8 last_bt_traffic;
-};
-
-enum iwl_bt_coex_profile_traffic_load {
- IWL_BT_COEX_TRAFFIC_LOAD_NONE = 0,
- IWL_BT_COEX_TRAFFIC_LOAD_LOW = 1,
- IWL_BT_COEX_TRAFFIC_LOAD_HIGH = 2,
- IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS = 3,
-/*
- * There are no more even though below is a u8, the
- * indication from the BT device only has two bits.
- */
};
-
-static inline u8 num_of_ant(u8 mask)
-{
- return !!((mask) & ANT_A) +
- !!((mask) & ANT_B) +
- !!((mask) & ANT_C);
-}
-
/* Initialize station's rate scaling information after adding station */
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- enum ieee80211_band band);
+ enum ieee80211_band band, bool init);
/**
* iwl_rate_control_register - Register the rate control algorithm callbacks
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 3a1f3982109d..a85b60f7e67e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -251,6 +251,12 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
stats->flag |= RX_FLAG_DECRYPTED;
return 0;
+ case RX_MPDU_RES_STATUS_SEC_EXT_ENC:
+ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK))
+ return -1;
+ stats->flag |= RX_FLAG_DECRYPTED;
+ return 0;
+
default:
IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index dff7592e1ff8..742afc429c94 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,9 @@
#define IWL_PLCP_QUIET_THRESH 1
#define IWL_ACTIVE_QUIET_TIME 10
+#define LONG_OUT_TIME_PERIOD 600
+#define SHORT_OUT_TIME_PERIOD 200
+#define SUSPEND_TIME_PERIOD 100
static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
{
@@ -87,20 +90,22 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
return cpu_to_le16(rx_chain);
}
-static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif)
+static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif,
+ u32 flags, bool is_assoc)
{
- if (vif->bss_conf.assoc)
- return cpu_to_le32(200 * 1024);
- else
+ if (!is_assoc)
return 0;
+ if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
+ return cpu_to_le32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
+ return cpu_to_le32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
}
-static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif)
+static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif,
+ bool is_assoc)
{
- if (!vif->bss_conf.assoc)
+ if (!is_assoc)
return 0;
-
- return cpu_to_le32(ieee80211_tu_to_usec(vif->bss_conf.beacon_int));
+ return cpu_to_le32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
}
static inline __le32
@@ -192,7 +197,7 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
for (i = 0; i < cmd->channel_count; i++) {
chan->channel = cpu_to_le16(req->channels[i]->hw_value);
chan->type = cpu_to_le32(type);
- if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR)
chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
chan->active_dwell = cpu_to_le16(active_dwell);
chan->passive_dwell = cpu_to_le16(passive_dwell);
@@ -262,6 +267,15 @@ static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
return (u16)len;
}
+static void iwl_mvm_vif_assoc_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ bool *is_assoc = data;
+
+ if (vif->bss_conf.assoc)
+ *is_assoc = true;
+}
+
int iwl_mvm_scan_request(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
@@ -274,6 +288,7 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
struct iwl_scan_cmd *cmd = mvm->scan_cmd;
+ bool is_assoc = false;
int ret;
u32 status;
int ssid_len = 0;
@@ -289,13 +304,17 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
memset(cmd, 0, sizeof(struct iwl_scan_cmd) +
mvm->fw->ucode_capa.max_probe_length +
(MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel)));
-
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_vif_assoc_iterator,
+ &is_assoc);
cmd->channel_count = (u8)req->n_channels;
cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
- cmd->max_out_time = iwl_mvm_scan_max_out_time(vif);
- cmd->suspend_time = iwl_mvm_scan_suspend_time(vif);
+ cmd->max_out_time = iwl_mvm_scan_max_out_time(vif, req->flags,
+ is_assoc);
+ cmd->suspend_time = iwl_mvm_scan_suspend_time(vif, is_assoc);
cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req);
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
MAC_FILTER_IN_BEACON);
@@ -325,7 +344,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
iwl_mvm_scan_fill_ssids(cmd, req, basic_ssid ? 1 : 0);
- cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
+ cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
+ TX_CMD_FLG_BT_DIS);
cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
cmd->tx_cmd.rate_n_flags =
@@ -454,13 +474,18 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
if (mvm->scan_status == IWL_MVM_SCAN_NONE)
return;
+ if (iwl_mvm_is_radio_killed(mvm)) {
+ ieee80211_scan_completed(mvm->hw, true);
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+ return;
+ }
+
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
scan_abort_notif,
ARRAY_SIZE(scan_abort_notif),
iwl_mvm_scan_abort_notif, NULL);
- ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD,
- CMD_SYNC | CMD_SEND_IN_RFKILL, 0, NULL);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
if (ret) {
IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
/* mac80211's state will be cleaned in the fw_restart flow */
@@ -522,6 +547,12 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req,
struct iwl_scan_offload_cmd *scan)
{
+ bool is_assoc = false;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_vif_assoc_iterator,
+ &is_assoc);
scan->channel_count =
mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
@@ -529,8 +560,9 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
- scan->max_out_time = cpu_to_le32(200 * 1024);
- scan->suspend_time = iwl_mvm_scan_suspend_time(vif);
+ scan->max_out_time = iwl_mvm_scan_max_out_time(vif, req->flags,
+ is_assoc);
+ scan->suspend_time = iwl_mvm_scan_suspend_time(vif, is_assoc);
scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
MAC_FILTER_IN_BEACON);
scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
@@ -642,7 +674,7 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
channels->iter_count[index] = cpu_to_le16(1);
channels->iter_interval[index] = 0;
- if (!(s_band->channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ if (!(s_band->channels[i].flags & IEEE80211_CHAN_NO_IR))
channels->type[index] |=
cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
@@ -776,6 +808,8 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
+ if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
+ profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
for (i = 0; i < req->n_match_sets; i++) {
profile = &profile_cfg->profiles[i];
@@ -817,11 +851,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
IWL_DEBUG_SCAN(mvm,
"Sending scheduled scan with filtering, filter len %d\n",
req->n_match_sets);
- scan_req.flags |=
- cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID);
} else {
IWL_DEBUG_SCAN(mvm,
"Sending Scheduled scan without filtering\n");
+ scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
}
return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC,
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
new file mode 100644
index 000000000000..8401627c0030
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/sf.c
@@ -0,0 +1,291 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+
+/* For counting bound interfaces */
+struct iwl_mvm_active_iface_iterator_data {
+ struct ieee80211_vif *ignore_vif;
+ u8 sta_vif_ap_sta_id;
+ enum iwl_sf_state sta_vif_state;
+ int num_active_macs;
+};
+
+/*
+ * Count bound interfaces which are not p2p, besides data->ignore_vif.
+ * data->station_vif will point to one bound vif of type station, if exists.
+ */
+static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_active_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif == data->ignore_vif || !mvmvif->phy_ctxt ||
+ vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ return;
+
+ data->num_active_macs++;
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ data->sta_vif_ap_sta_id = mvmvif->ap_sta_id;
+ if (vif->bss_conf.assoc)
+ data->sta_vif_state = SF_FULL_ON;
+ else
+ data->sta_vif_state = SF_INIT_OFF;
+ }
+}
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in SF_FULL_ON state.
+ */
+static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
+ {
+ cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER),
+ cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER)
+ },
+ {
+ cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER),
+ cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER)
+ },
+ {
+ cpu_to_le32(SF_MCAST_AGING_TIMER),
+ cpu_to_le32(SF_MCAST_IDLE_TIMER)
+ },
+ {
+ cpu_to_le32(SF_BA_AGING_TIMER),
+ cpu_to_le32(SF_BA_IDLE_TIMER)
+ },
+ {
+ cpu_to_le32(SF_TX_RE_AGING_TIMER),
+ cpu_to_le32(SF_TX_RE_IDLE_TIMER)
+ },
+};
+
+static void iwl_mvm_fill_sf_command(struct iwl_sf_cfg_cmd *sf_cmd,
+ struct ieee80211_sta *sta)
+{
+ int i, j, watermark;
+
+ sf_cmd->watermark[SF_LONG_DELAY_ON] = cpu_to_le32(SF_W_MARK_SCAN);
+
+ /*
+ * If we are in association flow - check antenna configuration
+ * capabilities of the AP station, and choose the watermark accordingly.
+ */
+ if (sta) {
+ if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) {
+ switch (sta->rx_nss) {
+ case 1:
+ watermark = SF_W_MARK_SISO;
+ break;
+ case 2:
+ watermark = SF_W_MARK_MIMO2;
+ break;
+ default:
+ watermark = SF_W_MARK_MIMO3;
+ break;
+ }
+ } else {
+ watermark = SF_W_MARK_LEGACY;
+ }
+ /* default watermark value for unassociated mode. */
+ } else {
+ watermark = SF_W_MARK_MIMO2;
+ }
+ sf_cmd->watermark[SF_FULL_ON] = cpu_to_le32(watermark);
+
+ for (i = 0; i < SF_NUM_SCENARIO; i++) {
+ for (j = 0; j < SF_NUM_TIMEOUT_TYPES; j++) {
+ sf_cmd->long_delay_timeouts[i][j] =
+ cpu_to_le32(SF_LONG_DELAY_AGING_TIMER);
+ }
+ }
+ BUILD_BUG_ON(sizeof(sf_full_timeout) !=
+ sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES);
+
+ memcpy(sf_cmd->full_on_timeouts, sf_full_timeout,
+ sizeof(sf_full_timeout));
+}
+
+static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
+ enum iwl_sf_state new_state)
+{
+ struct iwl_sf_cfg_cmd sf_cmd = {
+ .state = new_state,
+ };
+ struct ieee80211_sta *sta;
+ int ret = 0;
+
+ /*
+ * If an associated AP sta changed its antenna configuration, the state
+ * will remain FULL_ON but SF parameters need to be reconsidered.
+ */
+ if (new_state != SF_FULL_ON && mvm->sf_state == new_state)
+ return 0;
+
+ switch (new_state) {
+ case SF_UNINIT:
+ break;
+ case SF_FULL_ON:
+ if (sta_id == IWL_MVM_STATION_COUNT) {
+ IWL_ERR(mvm,
+ "No station: Cannot switch SF to FULL_ON\n");
+ return -EINVAL;
+ }
+ rcu_read_lock();
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (IS_ERR_OR_NULL(sta)) {
+ IWL_ERR(mvm, "Invalid station id\n");
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ iwl_mvm_fill_sf_command(&sf_cmd, sta);
+ rcu_read_unlock();
+ break;
+ case SF_INIT_OFF:
+ iwl_mvm_fill_sf_command(&sf_cmd, NULL);
+ break;
+ default:
+ WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n",
+ new_state);
+ return -EINVAL;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_SF_CFG_CMD, CMD_ASYNC,
+ sizeof(sf_cmd), &sf_cmd);
+ if (!ret)
+ mvm->sf_state = new_state;
+
+ return ret;
+}
+
+/*
+ * Update Smart fifo:
+ * Count bound interfaces that are not to be removed, ignoring p2p devices,
+ * and set new state accordingly.
+ */
+int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
+ bool remove_vif)
+{
+ enum iwl_sf_state new_state;
+ u8 sta_id = IWL_MVM_STATION_COUNT;
+ struct iwl_mvm_vif *mvmvif = NULL;
+ struct iwl_mvm_active_iface_iterator_data data = {
+ .ignore_vif = changed_vif,
+ .sta_vif_state = SF_UNINIT,
+ .sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT,
+ };
+
+ if (IWL_UCODE_API(mvm->fw->ucode_ver) < 8)
+ return 0;
+
+ /*
+ * Ignore the call if we are in HW Restart flow, or if the handled
+ * vif is a p2p device.
+ */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+ (changed_vif && changed_vif->type == NL80211_IFTYPE_P2P_DEVICE))
+ return 0;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bound_iface_iterator,
+ &data);
+
+ /* If changed_vif exists and is not to be removed, add to the count */
+ if (changed_vif && !remove_vif)
+ data.num_active_macs++;
+
+ switch (data.num_active_macs) {
+ case 0:
+ /* If there are no active macs - change state to SF_INIT_OFF */
+ new_state = SF_INIT_OFF;
+ break;
+ case 1:
+ if (remove_vif) {
+ /* The one active mac left is of type station
+ * and we filled the relevant data during iteration
+ */
+ new_state = data.sta_vif_state;
+ sta_id = data.sta_vif_ap_sta_id;
+ } else {
+ if (WARN_ON(!changed_vif))
+ return -EINVAL;
+ if (changed_vif->type != NL80211_IFTYPE_STATION) {
+ new_state = SF_UNINIT;
+ } else if (changed_vif->bss_conf.assoc) {
+ mvmvif = iwl_mvm_vif_from_mac80211(changed_vif);
+ sta_id = mvmvif->ap_sta_id;
+ new_state = SF_FULL_ON;
+ } else {
+ new_state = SF_INIT_OFF;
+ }
+ }
+ break;
+ default:
+ /* If there are multiple active macs - change to SF_UNINIT */
+ new_state = SF_UNINIT;
+ }
+ return iwl_mvm_sf_config(mvm, sta_id, new_state);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 329952363a54..3397f59cd4e4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -452,8 +452,15 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
- /* This station is in use */
- if (!IS_ERR(sta))
+ /*
+ * This station is in use or RCU-removed; the latter happens in
+ * managed mode, where mac80211 removes the station before we
+ * can remove it from firmware (we can only do that after the
+ * MAC is marked unassociated), and possibly while the deauth
+ * frame to disconnect from the AP is still queued. Then, the
+ * station pointer is -ENOENT when the last skb is reclaimed.
+ */
+ if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
continue;
if (PTR_ERR(sta) == -EINVAL) {
@@ -645,7 +652,7 @@ int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
- static const u8 *baddr = _baddr;
+ const u8 *baddr = _baddr;
lockdep_assert_held(&mvm->mutex);
@@ -840,7 +847,7 @@ static const u8 tid_to_ac[] = {
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data;
int txq_id;
@@ -895,7 +902,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u8 buf_size)
{
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
int queue, fifo, ret;
u16 ssn;
@@ -932,26 +939,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
sta->addr, tid);
- if (mvm->cfg->ht_params->use_rts_for_aggregation) {
- /*
- * switch to RTS/CTS if it is the prefer protection
- * method for HT traffic
- * this function also sends the LQ command
- */
- return iwl_mvm_tx_protection(mvm, mvmsta, true);
- /*
- * TODO: remove the TLC_RTS flag when we tear down the last
- * AGG session (agg_tids_count in DVM)
- */
- }
-
- return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, CMD_ASYNC, false);
+ return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
}
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid)
{
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
u16 txq_id;
int err;
@@ -1023,7 +1017,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid)
{
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
u16 txq_id;
enum iwl_mvm_agg_state old_state;
@@ -1123,8 +1117,8 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
memcpy(cmd.key, keyconf->key, keyconf->keylen);
break;
default:
- WARN_ON(1);
- return -EINVAL;
+ key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
+ memcpy(cmd.key, keyconf->key, keyconf->keylen);
}
if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
@@ -1288,8 +1282,8 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
0, NULL, CMD_SYNC);
break;
default:
- IWL_ERR(mvm, "Unknown cipher %x\n", keyconf->cipher);
- ret = -EINVAL;
+ ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf,
+ sta_id, 0, NULL, CMD_SYNC);
}
if (ret)
@@ -1416,7 +1410,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd_v6 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
@@ -1438,7 +1432,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
u16 sleep_state_flags =
(reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd_v6 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 4dfc359a4bdd..4968d0237dc5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -298,6 +298,12 @@ struct iwl_mvm_sta {
bool tt_tx_protection;
};
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta)
+{
+ return (void *)sta->drv_priv;
+}
+
/**
* struct iwl_mvm_int_sta - representation of an internal station (auxiliary or
* broadcast)
diff --git a/drivers/net/wireless/iwlwifi/mvm/testmode.h b/drivers/net/wireless/iwlwifi/mvm/testmode.h
index eb74391d91ca..0241665925f7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/testmode.h
+++ b/drivers/net/wireless/iwlwifi/mvm/testmode.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 95ce4b601fef..b4c2abaa297b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -249,12 +249,12 @@ static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_mvm_time_event_data *te_data = data;
struct iwl_time_event_resp *resp;
- int resp_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ int resp_len = iwl_rx_packet_payload_len(pkt);
if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
return true;
- if (WARN_ON_ONCE(resp_len != sizeof(pkt->hdr) + sizeof(*resp))) {
+ if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
return true;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
index d9c8d6cfa2db..4a61c8c02372 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 1f3282dff513..3afa6b6bf835 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -340,7 +340,7 @@ static void check_exit_ctkill(struct work_struct *work)
iwl_trans_start_hw(mvm->trans);
temp = check_nic_temperature(mvm);
- iwl_trans_stop_hw(mvm->trans, false);
+ iwl_trans_stop_device(mvm->trans);
if (temp < MIN_TEMPERATURE || temp > MAX_TEMPERATURE) {
IWL_DEBUG_TEMP(mvm, "Failed to measure NIC temperature\n");
@@ -388,7 +388,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))
continue;
- mvmsta = (void *)sta->drv_priv;
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (enable == mvmsta->tt_tx_protection)
continue;
err = iwl_mvm_tx_protection(mvm, mvmsta, enable);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 43d97c33a75a..4df12fa9d336 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -253,8 +253,7 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
break;
default:
- IWL_ERR(mvm, "Unknown encode cipher %x\n", keyconf->cipher);
- break;
+ tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
}
}
@@ -276,6 +275,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
return NULL;
memset(dev_cmd, 0, sizeof(*dev_cmd));
+ dev_cmd->hdr.cmd = TX_CMD;
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
if (info->control.hw_key)
@@ -361,7 +361,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
u8 txq_id = info->hw_queue;
bool is_data_qos = false, is_ampdu = false;
- mvmsta = (void *)sta->drv_priv;
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
fc = hdr->frame_control;
if (WARN_ON_ONCE(!mvmsta))
@@ -390,7 +390,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
seq_number &= IEEE80211_SCTL_SEQ;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(seq_number);
- seq_number += 0x10;
is_data_qos = true;
is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
}
@@ -407,13 +406,13 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
}
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
- tid, txq_id, seq_number);
+ tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
goto drop_unlock_sta;
if (is_data_qos && !ieee80211_has_morefrags(fc))
- mvmsta->tid_data[tid].seq_number = seq_number;
+ mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
spin_unlock(&mvmsta->lock);
@@ -432,7 +431,7 @@ drop:
static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u8 tid)
{
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
struct ieee80211_vif *vif = mvmsta->vif;
@@ -660,9 +659,15 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ /*
+ * sta can't be NULL otherwise it'd mean that the sta has been freed in
+ * the firmware while we still have packets for it in the Tx queues.
+ */
+ if (WARN_ON_ONCE(!sta))
+ goto out;
- if (!IS_ERR_OR_NULL(sta)) {
- mvmsta = (void *)sta->drv_priv;
+ if (!IS_ERR(sta)) {
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (tid != IWL_TID_NON_QOS) {
struct iwl_mvm_tid_data *tid_data =
@@ -676,7 +681,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
spin_unlock_bh(&mvmsta->lock);
}
} else {
- sta = NULL;
mvmsta = NULL;
}
@@ -684,42 +688,38 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
* If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out...
*/
- if (txq_id < mvm->first_agg_queue && !WARN_ON(skb_freed > 1) &&
- atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) {
- if (mvmsta) {
- /*
- * If there are no pending frames for this STA, notify
- * mac80211 that this station can go to sleep in its
- * STA table.
- */
- if (mvmsta->vif->type == NL80211_IFTYPE_AP)
- ieee80211_sta_block_awake(mvm->hw, sta, false);
- /*
- * We might very well have taken mvmsta pointer while
- * the station was being removed. The remove flow might
- * have seen a pending_frame (because we didn't take
- * the lock) even if now the queues are drained. So make
- * really sure now that this the station is not being
- * removed. If it is, run the drain worker to remove it.
- */
- spin_lock_bh(&mvmsta->lock);
- sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
- if (IS_ERR_OR_NULL(sta)) {
- /*
- * Station disappeared in the meantime:
- * so we are draining.
- */
- set_bit(sta_id, mvm->sta_drained);
- schedule_work(&mvm->sta_drained_wk);
- }
- spin_unlock_bh(&mvmsta->lock);
- } else if (!mvmsta) {
- /* Tx response without STA, so we are draining */
- set_bit(sta_id, mvm->sta_drained);
- schedule_work(&mvm->sta_drained_wk);
- }
+ if (txq_id >= mvm->first_agg_queue)
+ goto out;
+
+ /* We can't free more than one frame at once on a shared queue */
+ WARN_ON(skb_freed > 1);
+
+ /* If we have still frames from this STA nothing to do here */
+ if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
+ goto out;
+
+ if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) {
+ /*
+ * If there are no pending frames for this STA, notify
+ * mac80211 that this station can go to sleep in its
+ * STA table.
+ * If mvmsta is not NULL, sta is valid.
+ */
+ ieee80211_sta_block_awake(mvm->hw, sta, false);
+ }
+
+ if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) {
+ /*
+ * We are draining and this was the last packet - pre_rcu_remove
+ * has been called already. We might be after the
+ * synchronize_net already.
+ * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
+ */
+ set_bit(sta_id, mvm->sta_drained);
+ schedule_work(&mvm->sta_drained_wk);
}
+out:
rcu_read_unlock();
}
@@ -793,7 +793,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvmsta->tid_data[tid].rate_n_flags =
le32_to_cpu(tx_resp->initial_rate);
}
@@ -849,7 +849,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
return 0;
}
- mvmsta = (void *)sta->drv_priv;
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
tid_data = &mvmsta->tid_data[tid];
if (WARN_ONCE(tid_data->txq_id != scd_flow, "Q %d, tid %d, flow %d",
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index ed69e9b78e82..86989df69356 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -168,8 +168,8 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
goto out_free_resp;
}
- resp_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- if (WARN_ON_ONCE(resp_len != sizeof(pkt->hdr) + sizeof(*resp))) {
+ resp_len = iwl_rx_packet_payload_len(pkt);
+ if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
ret = -EIO;
goto out_free_resp;
}
@@ -411,6 +411,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
mvm->status, table.valid);
}
+ IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
+
trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
table.data1, table.data2, table.data3,
table.blink1, table.blink2, table.ilink1,
@@ -486,22 +488,18 @@ void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
* this case to clear the state indicating that station creation is in
* progress.
*/
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
- u8 flags, bool init)
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
{
struct iwl_host_cmd cmd = {
.id = LQ_CMD,
.len = { sizeof(struct iwl_lq_cmd), },
- .flags = flags,
+ .flags = init ? CMD_SYNC : CMD_ASYNC,
.data = { lq, },
};
if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT))
return -EINVAL;
- if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
- return -EINVAL;
-
return iwl_mvm_send_cmd(mvm, &cmd);
}
@@ -522,6 +520,11 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int i;
lockdep_assert_held(&mvm->mutex);
+
+ /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
+ if (num_of_ant(iwl_fw_valid_rx_ant(mvm->fw)) == 1)
+ return;
+
mvmvif = iwl_mvm_vif_from_mac80211(vif);
mvmvif->smps_requests[req_type] = smps_request;
for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 86605027c41d..f47bcbe2945a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -297,6 +297,9 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
@@ -350,28 +353,41 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)},
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5112, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x510A, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5012, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x500A, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 051268c037b1..e851f26fd44c 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -256,13 +256,13 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
* @hw_base: pci hardware address support
* @ucode_write_complete: indicates that the ucode has been copied.
* @ucode_write_waitq: wait queue for uCode load
- * @status - transport specific status flags
* @cmd_queue - command queue number
* @rx_buf_size_8k: 8 kB RX buffer size
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @rx_page_order: page order for receive buffer size
* @wd_timeout: queue watchdog timeout (jiffies)
* @reg_lock: protect hw register access
+ * @cmd_in_flight: true when we have a host command in flight
*/
struct iwl_trans_pcie {
struct iwl_rxq rxq;
@@ -274,7 +274,6 @@ struct iwl_trans_pcie {
__le32 *ict_tbl;
dma_addr_t ict_tbl_dma;
int ict_index;
- u32 inta;
bool use_ict;
struct isr_statistics isr_stats;
@@ -296,7 +295,6 @@ struct iwl_trans_pcie {
wait_queue_head_t ucode_write_waitq;
wait_queue_head_t wait_command_queue;
- unsigned long status;
u8 cmd_queue;
u8 cmd_fifo;
u8 n_no_reclaim_cmds;
@@ -313,24 +311,7 @@ struct iwl_trans_pcie {
/*protect hw register */
spinlock_t reg_lock;
-};
-
-/**
- * enum iwl_pcie_status: status of the PCIe transport
- * @STATUS_HCMD_ACTIVE: a SYNC command is being processed
- * @STATUS_DEVICE_ENABLED: APM is enabled
- * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
- * @STATUS_INT_ENABLED: interrupts are enabled
- * @STATUS_RFKILL: the HW RFkill switch is in KILL position
- * @STATUS_FW_ERROR: the fw is in error state
- */
-enum iwl_pcie_status {
- STATUS_HCMD_ACTIVE,
- STATUS_DEVICE_ENABLED,
- STATUS_TPOWER_PMI,
- STATUS_INT_ENABLED,
- STATUS_RFKILL,
- STATUS_FW_ERROR,
+ bool cmd_in_flight;
};
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
@@ -363,7 +344,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans);
/*****************************************************
* ICT - interrupt handling
******************************************************/
-irqreturn_t iwl_pcie_isr_ict(int irq, void *data);
+irqreturn_t iwl_pcie_isr(int irq, void *data);
int iwl_pcie_alloc_ict(struct iwl_trans *trans);
void iwl_pcie_free_ict(struct iwl_trans *trans);
void iwl_pcie_reset_ict(struct iwl_trans *trans);
@@ -399,8 +380,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans);
******************************************************/
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
+ clear_bit(STATUS_INT_ENABLED, &trans->status);
/* disable interrupts from uCode/NIC to host */
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
@@ -417,14 +397,18 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
- set_bit(STATUS_INT_ENABLED, &trans_pcie->status);
+ set_bit(STATUS_INT_ENABLED, &trans->status);
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
}
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
- iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+ trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
+ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
}
static inline void iwl_wake_queue(struct iwl_trans *trans,
@@ -477,12 +461,31 @@ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
}
-static inline void iwl_nic_error(struct iwl_trans *trans)
+static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
+ u32 reg, u32 mask, u32 value)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 v;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ WARN_ON_ONCE(value & ~mask);
+#endif
+
+ v = iwl_read32(trans, reg);
+ v &= ~mask;
+ v |= value;
+ iwl_write32(trans, reg, v);
+}
- set_bit(STATUS_FW_ERROR, &trans_pcie->status);
- iwl_op_mode_nic_error(trans->op_mode);
+static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
+}
+
+static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
}
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index be3995afa9d0..08c23d497a02 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -148,10 +148,9 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
- unsigned long flags;
u32 reg;
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (rxq->need_update == 0)
goto exit_unlock;
@@ -162,11 +161,8 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
rxq->write_actual = (rxq->write & ~0x7);
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
} else {
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
/* If power-saving is in use, make sure device is awake */
- if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
+ if (test_bit(STATUS_TPOWER_PMI, &trans->status)) {
reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
@@ -193,7 +189,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
rxq->need_update = 0;
exit_unlock:
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
}
/*
@@ -212,7 +208,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
/*
* If the device isn't enabled - not need to try to add buffers...
@@ -222,10 +217,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
* stopped, we cannot access the HW (in particular not prph).
* So don't try to restock if the APM has been already stopped.
*/
- if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
+ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
return;
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
/* The overwritten rxb must be a used one */
rxb = rxq->queue[rxq->write];
@@ -242,7 +237,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
}
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
/* If the pre-allocated buffer pool is dropping low, schedule to
* refill it */
if (rxq->free_count <= RX_LOW_WATERMARK)
@@ -251,9 +246,9 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
}
}
@@ -273,16 +268,15 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
struct page *page;
- unsigned long flags;
gfp_t gfp_mask = priority;
while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
return;
}
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
if (rxq->free_count > RX_LOW_WATERMARK)
gfp_mask |= __GFP_NOWARN;
@@ -311,17 +305,17 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
return;
}
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
list);
list_del(&rxb->list);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
BUG_ON(rxb->page);
rxb->page = page;
@@ -332,9 +326,9 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
list_add(&rxb->list, &rxq->rx_used);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
@@ -343,12 +337,12 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
/* and also 256 byte aligned! */
BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
}
}
@@ -382,13 +376,12 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_pcie_rxq_restock(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
}
static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
@@ -514,7 +507,6 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
int i, err;
- unsigned long flags;
if (!rxq->bd) {
err = iwl_pcie_rx_alloc(trans);
@@ -522,7 +514,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
return err;
}
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
@@ -538,16 +530,16 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->read = rxq->write = 0;
rxq->write_actual = 0;
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
iwl_pcie_rx_replenish(trans);
iwl_pcie_rx_hw_init(trans, rxq);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
rxq->need_update = 1;
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
return 0;
}
@@ -556,7 +548,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
- unsigned long flags;
/*if rxq->bd is NULL, it means that nothing has been allocated,
* exit now */
@@ -567,9 +558,9 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
cancel_work_sync(&trans_pcie->rx_replenish);
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
iwl_pcie_rxq_free_rbs(trans);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
rxq->bd, rxq->bd_dma);
@@ -592,7 +583,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
- unsigned long flags;
bool page_stolen = false;
int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
u32 offset = 0;
@@ -625,7 +615,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
pkt->hdr.cmd);
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ len = iwl_rx_packet_len(pkt);
len += sizeof(u32); /* account for status word */
trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
@@ -694,7 +684,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
/* Reuse the page if possible. For notification packets and
* SKBs that fail to Rx correctly, add them back into the
* rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (rxb->page != NULL) {
rxb->page_dma =
dma_map_page(trans->dev, rxb->page, 0,
@@ -715,7 +705,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
} else
list_add_tail(&rxb->list, &rxq->rx_used);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
}
/*
@@ -791,7 +781,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
APMS_CLK_VAL_MRB_FUNC_MODE) ||
(iwl_read_prph(trans, APMG_PS_CTRL_REG) &
APMG_PS_CTRL_VAL_RESET_REQ))) {
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
iwl_op_mode_wimax_active(trans->op_mode);
wake_up(&trans_pcie->wait_command_queue);
return;
@@ -800,14 +790,95 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
iwl_pcie_dump_csr(trans);
iwl_dump_fh(trans, NULL);
- /* set the ERROR bit before we wake up the caller */
- set_bit(STATUS_FW_ERROR, &trans_pcie->status);
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
- wake_up(&trans_pcie->wait_command_queue);
-
local_bh_disable();
- iwl_nic_error(trans);
+ /* The STATUS_FW_ERROR bit is set in this function. This must happen
+ * before we wake up the command caller, to ensure a proper cleanup. */
+ iwl_trans_fw_error(trans);
local_bh_enable();
+
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ wake_up(&trans_pcie->wait_command_queue);
+}
+
+static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 inta;
+
+ lockdep_assert_held(&trans_pcie->irq_lock);
+
+ trace_iwlwifi_dev_irq(trans->dev);
+
+ /* Discover which interrupts are active/pending */
+ inta = iwl_read32(trans, CSR_INT);
+
+ /* the thread will service interrupts and re-enable them */
+ return inta;
+}
+
+/* a device (PCI-E) page is 4096 bytes long */
+#define ICT_SHIFT 12
+#define ICT_SIZE (1 << ICT_SHIFT)
+#define ICT_COUNT (ICT_SIZE / sizeof(u32))
+
+/* interrupt handler using ict table, with this interrupt driver will
+ * stop using INTA register to get device's interrupt, reading this register
+ * is expensive, device will write interrupts in ICT dram table, increment
+ * index then will fire interrupt to driver, driver will OR all ICT table
+ * entries from current index up to table entry with 0 value. the result is
+ * the interrupt we need to service, driver will set the entries back to 0 and
+ * set index.
+ */
+static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 inta;
+ u32 val = 0;
+ u32 read;
+
+ trace_iwlwifi_dev_irq(trans->dev);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
+ if (!read)
+ return 0;
+
+ /*
+ * Collect all entries up to the first 0, starting from ict_index;
+ * note we already read at ict_index.
+ */
+ do {
+ val |= read;
+ IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
+ trans_pcie->ict_index, read);
+ trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
+ trans_pcie->ict_index =
+ iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
+
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
+ read);
+ } while (read);
+
+ /* We should not get this value, just ignore it. */
+ if (val == 0xffffffff)
+ val = 0;
+
+ /*
+ * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+ * (bit 15 before shifting it to 31) to clear when using interrupt
+ * coalescing. fortunately, bits 18 and 19 stay set when this happens
+ * so we use them to decide on the real state of the Rx bit.
+ * In order words, bit 15 is set if bit 18 or bit 19 are set.
+ */
+ if (val & 0xC0000)
+ val |= 0x8000;
+
+ inta = (0xff & val) | ((0xff00 & val) << 16);
+ return inta;
}
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
@@ -817,12 +888,61 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta = 0;
u32 handled = 0;
- unsigned long flags;
u32 i;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
+
+ /* dram interrupt table not set yet,
+ * use legacy interrupt.
+ */
+ if (likely(trans_pcie->use_ict))
+ inta = iwl_pcie_int_cause_ict(trans);
+ else
+ inta = iwl_pcie_int_cause_non_ict(trans);
+
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
+ IWL_DEBUG_ISR(trans,
+ "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
+ inta, trans_pcie->inta_mask,
+ iwl_read32(trans, CSR_INT_MASK),
+ iwl_read32(trans, CSR_FH_INT_STATUS));
+ if (inta & (~trans_pcie->inta_mask))
+ IWL_DEBUG_ISR(trans,
+ "We got a masked interrupt (0x%08x)\n",
+ inta & (~trans_pcie->inta_mask));
+ }
+
+ inta &= trans_pcie->inta_mask;
+
+ /*
+ * Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC.
+ */
+ if (unlikely(!inta)) {
+ IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+ /*
+ * Re-enable interrupts here since we don't
+ * have anything to service
+ */
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
+ iwl_enable_interrupts(trans);
+ spin_unlock(&trans_pcie->irq_lock);
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+ return IRQ_NONE;
+ }
+
+ if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+ /*
+ * Hardware disappeared. It might have
+ * already raised an interrupt.
+ */
+ IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ spin_unlock(&trans_pcie->irq_lock);
+ goto out;
+ }
/* Ack/clear/reset pending uCode interrupts.
* Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
@@ -835,19 +955,13 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
* hardware bugs here by ACKing all the possible interrupts so that
* interrupt coalescing can still be achieved.
*/
- iwl_write32(trans, CSR_INT,
- trans_pcie->inta | ~trans_pcie->inta_mask);
-
- inta = trans_pcie->inta;
+ iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
if (iwl_have_debug_level(IWL_DL_ISR))
IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
inta, iwl_read32(trans, CSR_INT_MASK));
- /* saved interrupt in inta variable now we can reset trans_pcie->inta */
- trans_pcie->inta = 0;
-
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
/* Now service all interrupt bits discovered above. */
if (inta & CSR_INT_BIT_HW_ERR) {
@@ -894,14 +1008,14 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
if (hw_rfkill) {
- set_bit(STATUS_RFKILL, &trans_pcie->status);
- if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
- &trans_pcie->status))
+ set_bit(STATUS_RFKILL, &trans->status);
+ if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status))
IWL_DEBUG_RF_KILL(trans,
"Rfkill while SYNC HCMD in flight\n");
wake_up(&trans_pcie->wait_command_queue);
} else {
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
+ clear_bit(STATUS_RFKILL, &trans->status);
}
handled |= CSR_INT_BIT_RF_KILL;
@@ -1005,7 +1119,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* Re-enable all interrupts */
/* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
iwl_enable_interrupts(trans);
/* Re-enable RF_KILL if it occurred */
else if (handled & CSR_INT_BIT_RF_KILL)
@@ -1022,11 +1136,6 @@ out:
*
******************************************************************************/
-/* a device (PCI-E) page is 4096 bytes long */
-#define ICT_SHIFT 12
-#define ICT_SIZE (1 << ICT_SHIFT)
-#define ICT_COUNT (ICT_SIZE / sizeof(u32))
-
/* Free dram table */
void iwl_pcie_free_ict(struct iwl_trans *trans)
{
@@ -1051,7 +1160,7 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->ict_tbl =
- dma_alloc_coherent(trans->dev, ICT_SIZE,
+ dma_zalloc_coherent(trans->dev, ICT_SIZE,
&trans_pcie->ict_tbl_dma,
GFP_KERNEL);
if (!trans_pcie->ict_tbl)
@@ -1063,17 +1172,10 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
return -EINVAL;
}
- IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
- (unsigned long long)trans_pcie->ict_tbl_dma);
-
- IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
-
- /* reset table and index to all 0 */
- memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
- trans_pcie->ict_index = 0;
+ IWL_DEBUG_ISR(trans, "ict dma addr %Lx ict vir addr %p\n",
+ (unsigned long long)trans_pcie->ict_tbl_dma,
+ trans_pcie->ict_tbl);
- /* add periodic RX interrupt */
- trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
return 0;
}
@@ -1084,12 +1186,11 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
- unsigned long flags;
if (!trans_pcie->ict_tbl)
return;
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
@@ -1106,124 +1207,26 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
trans_pcie->ict_index = 0;
iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
iwl_enable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
}
/* Device is going down disable ict interrupt usage */
void iwl_pcie_disable_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
trans_pcie->use_ict = false;
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
}
-/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
-static irqreturn_t iwl_pcie_isr(int irq, void *data)
+irqreturn_t iwl_pcie_isr(int irq, void *data)
{
struct iwl_trans *trans = data;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 inta, inta_mask;
- irqreturn_t ret = IRQ_NONE;
-
- lockdep_assert_held(&trans_pcie->irq_lock);
-
- trace_iwlwifi_dev_irq(trans->dev);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the irq thread will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(trans, CSR_INT_MASK);
- iwl_write32(trans, CSR_INT_MASK, 0x00000000);
-
- /* Discover which interrupts are active/pending */
- inta = iwl_read32(trans, CSR_INT);
-
- if (inta & (~inta_mask)) {
- IWL_DEBUG_ISR(trans,
- "We got a masked interrupt (0x%08x)...Ack and ignore\n",
- inta & (~inta_mask));
- iwl_write32(trans, CSR_INT, inta & (~inta_mask));
- inta &= inta_mask;
- }
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!inta) {
- IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
- /* Hardware disappeared. It might have already raised
- * an interrupt */
- IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- return IRQ_HANDLED;
- }
-
- if (iwl_have_debug_level(IWL_DL_ISR))
- IWL_DEBUG_ISR(trans,
- "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
- inta, inta_mask,
- iwl_read32(trans, CSR_FH_INT_STATUS));
-
- trans_pcie->inta |= inta;
- /* the thread will service interrupts and re-enable them */
- if (likely(inta))
- return IRQ_WAKE_THREAD;
-
- ret = IRQ_HANDLED;
-
-none:
- /* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if disabled by irq and no schedules tasklet. */
- if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta)
- iwl_enable_interrupts(trans);
-
- return ret;
-}
-
-/* interrupt handler using ict table, with this interrupt driver will
- * stop using INTA register to get device's interrupt, reading this register
- * is expensive, device will write interrupts in ICT dram table, increment
- * index then will fire interrupt to driver, driver will OR all ICT table
- * entries from current index up to table entry with 0 value. the result is
- * the interrupt we need to service, driver will set the entries back to 0 and
- * set index.
- */
-irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
-{
- struct iwl_trans *trans = data;
- struct iwl_trans_pcie *trans_pcie;
- u32 inta;
- u32 val = 0;
- u32 read;
- unsigned long flags;
- irqreturn_t ret = IRQ_NONE;
if (!trans)
return IRQ_NONE;
- trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-
- /* dram interrupt table not set yet,
- * use legacy interrupt.
- */
- if (unlikely(!trans_pcie->use_ict)) {
- ret = iwl_pcie_isr(irq, data);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return ret;
- }
-
- trace_iwlwifi_dev_irq(trans->dev);
-
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
* If we have something to service, the tasklet will re-enable ints.
@@ -1231,73 +1234,5 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
*/
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
- trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
- if (!read) {
- IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- /*
- * Collect all entries up to the first 0, starting from ict_index;
- * note we already read at ict_index.
- */
- do {
- val |= read;
- IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
- trans_pcie->ict_index, read);
- trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
- trans_pcie->ict_index =
- iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
-
- read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
- trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
- read);
- } while (read);
-
- /* We should not get this value, just ignore it. */
- if (val == 0xffffffff)
- val = 0;
-
- /*
- * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
- * (bit 15 before shifting it to 31) to clear when using interrupt
- * coalescing. fortunately, bits 18 and 19 stay set when this happens
- * so we use them to decide on the real state of the Rx bit.
- * In order words, bit 15 is set if bit 18 or bit 19 are set.
- */
- if (val & 0xC0000)
- val |= 0x8000;
-
- inta = (0xff & val) | ((0xff00 & val) << 16);
- IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n",
- inta, trans_pcie->inta_mask, val);
- if (iwl_have_debug_level(IWL_DL_ISR))
- IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n",
- iwl_read32(trans, CSR_INT_MASK));
-
- inta &= trans_pcie->inta_mask;
- trans_pcie->inta |= inta;
-
- /* iwl_pcie_tasklet() will service interrupts and re-enable them */
- if (likely(inta)) {
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return IRQ_WAKE_THREAD;
- }
-
- ret = IRQ_HANDLED;
-
- none:
- /* re-enable interrupts here since we don't have anything to service.
- * only Re-enable if disabled by irq.
- */
- if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta)
- iwl_enable_interrupts(trans);
-
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return ret;
+ return IRQ_WAKE_THREAD;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index cde9c16f6e4f..f9507807b486 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -75,33 +75,6 @@
#include "iwl-agn-hw.h"
#include "internal.h"
-static void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
- u32 reg, u32 mask, u32 value)
-{
- u32 v;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- WARN_ON_ONCE(value & ~mask);
-#endif
-
- v = iwl_read32(trans, reg);
- v &= ~mask;
- v |= value;
- iwl_write32(trans, reg, v);
-}
-
-static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
- u32 reg, u32 mask)
-{
- __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
-}
-
-static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
- u32 reg, u32 mask)
-{
- __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
-}
-
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
{
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -150,7 +123,6 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
*/
static int iwl_pcie_apm_init(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret = 0;
IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
@@ -206,6 +178,28 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
goto out;
}
+ if (trans->cfg->host_interrupt_operation_mode) {
+ /*
+ * This is a bit of an abuse - This is needed for 7260 / 3160
+ * only check host_interrupt_operation_mode even if this is
+ * not related to host_interrupt_operation_mode.
+ *
+ * Enable the oscillator to count wake up time for L1 exit. This
+ * consumes slightly more power (100uA) - but allows to be sure
+ * that we wake up from L1 on time.
+ *
+ * This looks weird: read twice the same register, discard the
+ * value, set a bit, and yet again, read that same register
+ * just to discard the value. But that's the way the hardware
+ * seems to like it.
+ */
+ iwl_read_prph(trans, OSC_CLK);
+ iwl_read_prph(trans, OSC_CLK);
+ iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
+ iwl_read_prph(trans, OSC_CLK);
+ iwl_read_prph(trans, OSC_CLK);
+ }
+
/*
* Enable DMA clock and wait for it to stabilize.
*
@@ -223,7 +217,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
/* Clear the interrupt in APMG if the NIC is in RFKILL */
iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL);
- set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+ set_bit(STATUS_DEVICE_ENABLED, &trans->status);
out:
return ret;
@@ -249,10 +243,9 @@ static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
static void iwl_pcie_apm_stop(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
- clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+ clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
/* Stop device's DMA activity */
iwl_pcie_apm_stop_master(trans);
@@ -273,13 +266,12 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans)
static int iwl_pcie_nic_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
/* nic_init */
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_pcie_apm_init(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
iwl_pcie_set_pwr(trans, false);
@@ -582,7 +574,6 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
bool hw_rfkill;
@@ -592,16 +583,14 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
return -EIO;
}
- clear_bit(STATUS_FW_ERROR, &trans_pcie->status);
-
iwl_enable_rfkill_int(trans);
/* If platform's RF_KILL switch is NOT set to KILL */
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans_pcie->status);
+ set_bit(STATUS_RFKILL, &trans->status);
else
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
+ clear_bit(STATUS_RFKILL, &trans->status);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
if (hw_rfkill && !run_in_rfkill)
return -ERFKILL;
@@ -640,12 +629,14 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
+ bool hw_rfkill, was_hw_rfkill;
+
+ was_hw_rfkill = iwl_is_rfkill_set(trans);
/* tell the device to stop sending interrupts */
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
/* device going down, Stop using ICT table */
iwl_pcie_disable_ict(trans);
@@ -657,7 +648,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
* restart. So don't process again if the device is
* already dead.
*/
- if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
iwl_pcie_tx_stop(trans);
iwl_pcie_rx_stop(trans);
@@ -677,21 +668,45 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Upon stop, the APM issues an interrupt if HW RF kill is set.
* Clean again the interrupt here
*/
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-
- iwl_enable_rfkill_int(trans);
+ spin_unlock(&trans_pcie->irq_lock);
/* stop and reset the on-board processor */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
/* clear all status bits */
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
- clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
- clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
- clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ clear_bit(STATUS_INT_ENABLED, &trans->status);
+ clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
+ clear_bit(STATUS_TPOWER_PMI, &trans->status);
+ clear_bit(STATUS_RFKILL, &trans->status);
+
+ /*
+ * Even if we stop the HW, we still want the RF kill
+ * interrupt
+ */
+ iwl_enable_rfkill_int(trans);
+
+ /*
+ * Check again since the RF kill state may have changed while
+ * all the interrupts were disabled, in this case we couldn't
+ * receive the RF kill interrupt and update the state in the
+ * op_mode.
+ * Don't call the op_mode if the rkfill state hasn't changed.
+ * This allows the op_mode to call stop_device from the rfkill
+ * notification without endless recursion. Under very rare
+ * circumstances, we might have a small recursion if the rfkill
+ * state changed exactly now while we were called from stop_device.
+ * This is very unlikely but can happen and is supported.
+ */
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans->status);
+ else
+ clear_bit(STATUS_RFKILL, &trans->status);
+ if (hw_rfkill != was_hw_rfkill)
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
}
static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
@@ -776,7 +791,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
int err;
@@ -787,7 +801,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
}
/* Reset the entire device */
- iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(10, 15);
@@ -798,53 +812,30 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans_pcie->status);
+ set_bit(STATUS_RFKILL, &trans->status);
else
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
+ clear_bit(STATUS_RFKILL, &trans->status);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
return 0;
}
-static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
- bool op_mode_leaving)
+static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- bool hw_rfkill;
- unsigned long flags;
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ /* disable interrupts - don't enable HW RF kill interrupt */
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
iwl_pcie_apm_stop(trans);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
iwl_pcie_disable_ict(trans);
-
- if (!op_mode_leaving) {
- /*
- * Even if we stop the HW, we still want the RF kill
- * interrupt
- */
- iwl_enable_rfkill_int(trans);
-
- /*
- * Check again since the RF kill state may have changed while
- * all the interrupts were disabled, in this case we couldn't
- * receive the RF kill interrupt and update the state in the
- * op_mode.
- */
- hw_rfkill = iwl_is_rfkill_set(trans);
- if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans_pcie->status);
- else
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
- }
}
static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -928,12 +919,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
if (state)
- set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+ set_bit(STATUS_TPOWER_PMI, &trans->status);
else
- clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+ clear_bit(STATUS_TPOWER_PMI, &trans->status);
}
static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
@@ -944,6 +933,9 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
+ if (trans_pcie->cmd_in_flight)
+ goto out;
+
/* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -983,6 +975,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
}
}
+out:
/*
* Fool sparse by faking we release the lock - sparse will
* track nic_access anyway.
@@ -1004,6 +997,9 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
*/
__acquire(&trans_pcie->reg_lock);
+ if (trans_pcie->cmd_in_flight)
+ goto out;
+
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
@@ -1013,6 +1009,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
* scheduled on different CPUs (after we drop reg_lock).
*/
mmiowb();
+out:
spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
}
@@ -1457,7 +1454,7 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
static const struct iwl_trans_ops trans_ops_pcie = {
.start_hw = iwl_trans_pcie_start_hw,
- .stop_hw = iwl_trans_pcie_stop_hw,
+ .op_mode_leave = iwl_trans_pcie_op_mode_leave,
.fw_alive = iwl_trans_pcie_fw_alive,
.start_fw = iwl_trans_pcie_start_fw,
.stop_device = iwl_trans_pcie_stop_device,
@@ -1609,7 +1606,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
if (iwl_pcie_alloc_ict(trans))
goto out_free_cmd_pool;
- err = request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
+ err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
iwl_pcie_irq_handler,
IRQF_SHARED, DRV_NAME, trans);
if (err) {
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 0adde919a258..3d549008b3e2 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -207,7 +207,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
le32_to_cpu(txq->scratchbufs[i].scratch));
- iwl_nic_error(trans);
+ iwl_trans_fw_error(trans);
}
/*
@@ -289,21 +289,21 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
*/
void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
int txq_id = txq->q.id;
if (txq->need_update == 0)
return;
- if (trans->cfg->base_params->shadow_reg_enable) {
+ if (trans->cfg->base_params->shadow_reg_enable ||
+ txq_id == trans_pcie->cmd_queue) {
/* shadow register enabled */
iwl_write32(trans, HBUS_TARG_WRPTR,
txq->q.write_ptr | (txq_id << 8));
} else {
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
/* if we're trying to save power */
- if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
+ if (test_bit(STATUS_TPOWER_PMI, &trans->status)) {
/* wake up nic if it's powered down ...
* uCode will wake up, and interrupt us again, so next
* time we'll skip this part. */
@@ -739,10 +739,9 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ch, txq_id, ret;
- unsigned long flags;
/* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_pcie_txq_set_sched(trans, 0);
@@ -759,13 +758,19 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
iwl_read_direct32(trans,
FH_TSSR_TX_STATUS_REG));
}
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
- if (!trans_pcie->txq) {
- IWL_WARN(trans,
- "Stopping tx queues that aren't allocated...\n");
+ /*
+ * This function can be called before the op_mode disabled the
+ * queues. This happens when we have an rfkill interrupt.
+ * Since we stop Tx altogether - mark the queues as stopped.
+ */
+ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+ /* This can happen: start_hw, stop_device */
+ if (!trans_pcie->txq)
return 0;
- }
/* Unmap DMA from host system and free skb's */
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
@@ -867,7 +872,6 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
int txq_id, slots_num;
- unsigned long flags;
bool alloc = false;
if (!trans_pcie->txq) {
@@ -877,7 +881,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
alloc = true;
}
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
/* Turn off all Tx DMA fifos */
iwl_write_prph(trans, SCD_TXFACT, 0);
@@ -886,7 +890,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
trans_pcie->kw.dma >> 4);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
@@ -1005,6 +1009,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
struct iwl_queue *q = &txq->q;
+ unsigned long flags;
int nfreed = 0;
lockdep_assert_held(&txq->lock);
@@ -1023,10 +1028,20 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
idx, q->write_ptr, q->read_ptr);
- iwl_nic_error(trans);
+ iwl_trans_fw_error(trans);
}
}
+ if (q->read_ptr == q->write_ptr) {
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ WARN_ON(!trans_pcie->cmd_in_flight);
+ trans_pcie->cmd_in_flight = false;
+ __iwl_trans_pcie_clear_bit(trans,
+ CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ }
+
iwl_pcie_txq_progress(trans_pcie, txq);
}
@@ -1143,8 +1158,15 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
SCD_TX_STTS_QUEUE_OFFSET(txq_id);
static const u32 zero_val[4] = {};
+ /*
+ * Upon HW Rfkill - we stop the device, and then stop the queues
+ * in the op_mode. Just for the sake of the simplicity of the op_mode,
+ * allow the op_mode to call txq_disable after it already called
+ * stop_device.
+ */
if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
- WARN_ONCE(1, "queue %d not used", txq_id);
+ WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+ "queue %d not used", txq_id);
return;
}
@@ -1178,12 +1200,13 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
+ unsigned long flags;
void *dup_buf = NULL;
dma_addr_t phys_addr;
int idx;
u16 copy_size, cmd_size, scratch_size;
bool had_nocopy = false;
- int i;
+ int i, ret;
u32 cmd_pos;
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
@@ -1381,10 +1404,38 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+
+ /*
+ * wake up the NIC to make sure that the firmware will see the host
+ * command - we will let the NIC sleep once all the host commands
+ * returned.
+ */
+ if (!trans_pcie->cmd_in_flight) {
+ trans_pcie->cmd_in_flight = true;
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
+ 15000);
+ if (ret < 0) {
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ trans_pcie->cmd_in_flight = false;
+ idx = -EIO;
+ goto out;
+ }
+ }
+
/* Increment and update queue's write index */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+
out:
spin_unlock_bh(&txq->lock);
free_dup_buf:
@@ -1449,12 +1500,12 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
iwl_pcie_cmdq_reclaim(trans, txq_id, index);
if (!(meta->flags & CMD_ASYNC)) {
- if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
+ if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
IWL_WARN(trans,
"HCMD_ACTIVE already clear for command %s\n",
get_cmd_string(trans_pcie, cmd->hdr.cmd));
}
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
get_cmd_string(trans_pcie, cmd->hdr.cmd));
wake_up(&trans_pcie->wait_command_queue);
@@ -1466,7 +1517,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
}
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
-#define COMMAND_POKE_TIMEOUT (HZ / 10)
static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
@@ -1494,13 +1544,12 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int cmd_idx;
int ret;
- int timeout = HOST_COMPLETE_TIMEOUT;
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
get_cmd_string(trans_pcie, cmd->id));
- if (WARN(test_and_set_bit(STATUS_HCMD_ACTIVE,
- &trans_pcie->status),
+ if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
"Command %s: a command is already active!\n",
get_cmd_string(trans_pcie, cmd->id)))
return -EIO;
@@ -1511,64 +1560,39 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
if (cmd_idx < 0) {
ret = cmd_idx;
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
IWL_ERR(trans,
"Error sending %s: enqueue_hcmd failed: %d\n",
get_cmd_string(trans_pcie, cmd->id), ret);
return ret;
}
- while (timeout > 0) {
- unsigned long flags;
-
- timeout -= COMMAND_POKE_TIMEOUT;
- ret = wait_event_timeout(trans_pcie->wait_command_queue,
- !test_bit(STATUS_HCMD_ACTIVE,
- &trans_pcie->status),
- COMMAND_POKE_TIMEOUT);
- if (ret)
- break;
- /* poke the device - it may have lost the command */
- if (iwl_trans_grab_nic_access(trans, true, &flags)) {
- iwl_trans_release_nic_access(trans, &flags);
- IWL_DEBUG_INFO(trans,
- "Tried to wake NIC for command %s\n",
- get_cmd_string(trans_pcie, cmd->id));
- } else {
- IWL_ERR(trans, "Failed to poke NIC for command %s\n",
- get_cmd_string(trans_pcie, cmd->id));
- break;
- }
- }
-
+ ret = wait_event_timeout(trans_pcie->wait_command_queue,
+ !test_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ HOST_COMPLETE_TIMEOUT);
if (!ret) {
- if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
- struct iwl_txq *txq =
- &trans_pcie->txq[trans_pcie->cmd_queue];
- struct iwl_queue *q = &txq->q;
+ struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_queue *q = &txq->q;
- IWL_ERR(trans,
- "Error sending %s: time out after %dms.\n",
- get_cmd_string(trans_pcie, cmd->id),
- jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+ IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
+ get_cmd_string(trans_pcie, cmd->id),
+ jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
- IWL_ERR(trans,
- "Current CMD queue read_ptr %d write_ptr %d\n",
- q->read_ptr, q->write_ptr);
+ IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
+ q->read_ptr, q->write_ptr);
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
- IWL_DEBUG_INFO(trans,
- "Clearing HCMD_ACTIVE for command %s\n",
- get_cmd_string(trans_pcie, cmd->id));
- ret = -ETIMEDOUT;
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+ get_cmd_string(trans_pcie, cmd->id));
+ ret = -ETIMEDOUT;
- iwl_nic_error(trans);
+ iwl_trans_fw_error(trans);
- goto cancel;
- }
+ goto cancel;
}
- if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) {
+ if (test_bit(STATUS_FW_ERROR, &trans->status)) {
IWL_ERR(trans, "FW error in SYNC CMD %s\n",
get_cmd_string(trans_pcie, cmd->id));
dump_stack();
@@ -1577,7 +1601,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
}
if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL, &trans_pcie->status)) {
+ test_bit(STATUS_RFKILL, &trans->status)) {
IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
ret = -ERFKILL;
goto cancel;
@@ -1614,13 +1638,8 @@ cancel:
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (test_bit(STATUS_FW_ERROR, &trans_pcie->status))
- return -EIO;
-
if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL, &trans_pcie->status)) {
+ test_bit(STATUS_RFKILL, &trans->status)) {
IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
cmd->id);
return -ERFKILL;
@@ -1674,7 +1693,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq->entries[q->write_ptr].skb = skb;
txq->entries[q->write_ptr].cmd = dev_cmd;
- dev_cmd->hdr.cmd = REPLY_TX;
dev_cmd->hdr.sequence =
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
INDEX_TO_SEQ(q->write_ptr)));
diff --git a/drivers/net/wireless/libertas/README b/drivers/net/wireless/libertas/README
index 91f2ca90c70f..1a554a685e91 100644
--- a/drivers/net/wireless/libertas/README
+++ b/drivers/net/wireless/libertas/README
@@ -8,9 +8,8 @@
Ltd. under the terms of the GNU General Public License Version 2, June 1991
(the "License"). You may use, redistribute and/or modify this File in
accordance with the terms and conditions of the License, a copy of which
- is available along with the File in the license.txt file or by writing to
- the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 or on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+ is available along with the File in the license.txt file or on the worldwide
+ web at http://www.gnu.org/licenses/gpl.txt.
THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 116f4aba08d6..32f75007a825 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1268,14 +1268,9 @@ static struct cfg80211_scan_request *
_new_connect_scan_req(struct wiphy *wiphy, struct cfg80211_connect_params *sme)
{
struct cfg80211_scan_request *creq = NULL;
- int i, n_channels = 0;
+ int i, n_channels = ieee80211_get_num_supported_channels(wiphy);
enum ieee80211_band band;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
- if (wiphy->bands[band])
- n_channels += wiphy->bands[band]->n_channels;
- }
-
creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
n_channels * sizeof(void *),
GFP_ATOMIC);
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 991238afd1b6..58c6ee5de98f 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -849,7 +849,7 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card)
card->started = true;
/* Tell PM core that we don't need the card to be
* powered now */
- pm_runtime_put_noidle(&func->dev);
+ pm_runtime_put(&func->dev);
}
}
@@ -907,8 +907,8 @@ static int if_sdio_power_on(struct if_sdio_card *card)
sdio_release_host(func);
ret = if_sdio_prog_firmware(card);
if (ret) {
- sdio_disable_func(func);
- return ret;
+ sdio_claim_host(func);
+ goto disable;
}
return 0;
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 83669151bb82..f11728a866ff 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -93,7 +93,6 @@ static void free_if_spi_card(struct if_spi_card *card)
list_del(&packet->list);
kfree(packet);
}
- spi_set_drvdata(card->spi, NULL);
kfree(card);
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index c72438bb2faf..69d4c3179d04 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -159,10 +159,15 @@ static const struct ieee80211_regdomain hwsim_world_regdom_custom_02 = {
.reg_rules = {
REG_RULE(2412-10, 2462+10, 40, 0, 20, 0),
REG_RULE(5725-10, 5850+10, 40, 0, 30,
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+ NL80211_RRF_NO_IR),
}
};
+static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = {
+ &hwsim_world_regdom_custom_01,
+ &hwsim_world_regdom_custom_02,
+};
+
struct hwsim_vif_priv {
u32 magic;
u8 bssid[ETH_ALEN];
@@ -321,8 +326,52 @@ static const struct ieee80211_rate hwsim_rates[] = {
{ .bitrate = 540 }
};
+static const struct ieee80211_iface_limit hwsim_if_limits[] = {
+ { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
+ { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
+};
+
+static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
+ { .max = 8, .types = BIT(NL80211_IFTYPE_AP) },
+};
+
+static const struct ieee80211_iface_combination hwsim_if_comb[] = {
+ {
+ .limits = hwsim_if_limits,
+ .n_limits = ARRAY_SIZE(hwsim_if_limits),
+ .max_interfaces = 2048,
+ .num_different_channels = 1,
+ },
+ {
+ .limits = hwsim_if_dfs_limits,
+ .n_limits = ARRAY_SIZE(hwsim_if_dfs_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
+ }
+};
+
static spinlock_t hwsim_radio_lock;
static struct list_head hwsim_radios;
+static int hwsim_radio_idx;
+
+static struct platform_driver mac80211_hwsim_driver = {
+ .driver = {
+ .name = "mac80211_hwsim",
+ .owner = THIS_MODULE,
+ },
+};
struct mac80211_hwsim_data {
struct list_head list;
@@ -332,8 +381,10 @@ struct mac80211_hwsim_data {
struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)];
struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
+ struct ieee80211_iface_combination if_combination;
struct mac_address addresses[2];
+ int channels, idx;
struct ieee80211_channel *tmp_chan;
struct delayed_work roc_done;
@@ -353,7 +404,6 @@ struct mac80211_hwsim_data {
} ps;
bool ps_poll_pending;
struct dentry *debugfs;
- struct dentry *debugfs_ps;
struct sk_buff_head pending; /* packets pending */
/*
@@ -362,7 +412,6 @@ struct mac80211_hwsim_data {
* radio can be in more then one group.
*/
u64 group;
- struct dentry *debugfs_group;
int power_level;
@@ -403,21 +452,179 @@ static struct genl_family hwsim_genl_family = {
/* MAC80211_HWSIM netlink policy */
static struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
- [HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC,
- .len = 6*sizeof(u8) },
- [HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC,
- .len = 6*sizeof(u8) },
+ [HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
+ [HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
[HWSIM_ATTR_FRAME] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_DATA_LEN },
[HWSIM_ATTR_FLAGS] = { .type = NLA_U32 },
[HWSIM_ATTR_RX_RATE] = { .type = NLA_U32 },
[HWSIM_ATTR_SIGNAL] = { .type = NLA_U32 },
[HWSIM_ATTR_TX_INFO] = { .type = NLA_UNSPEC,
- .len = IEEE80211_TX_MAX_RATES*sizeof(
- struct hwsim_tx_rate)},
+ .len = IEEE80211_TX_MAX_RATES *
+ sizeof(struct hwsim_tx_rate)},
[HWSIM_ATTR_COOKIE] = { .type = NLA_U64 },
+ [HWSIM_ATTR_CHANNELS] = { .type = NLA_U32 },
+ [HWSIM_ATTR_RADIO_ID] = { .type = NLA_U32 },
+ [HWSIM_ATTR_REG_HINT_ALPHA2] = { .type = NLA_STRING, .len = 2 },
+ [HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 },
+ [HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG },
};
+static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct ieee80211_channel *chan);
+
+/* sysfs attributes */
+static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mac80211_hwsim_data *data = dat;
+ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+ struct sk_buff *skb;
+ struct ieee80211_pspoll *pspoll;
+
+ if (!vp->assoc)
+ return;
+
+ wiphy_debug(data->hw->wiphy,
+ "%s: send PS-Poll to %pM for aid %d\n",
+ __func__, vp->bssid, vp->aid);
+
+ skb = dev_alloc_skb(sizeof(*pspoll));
+ if (!skb)
+ return;
+ pspoll = (void *) skb_put(skb, sizeof(*pspoll));
+ pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
+ IEEE80211_STYPE_PSPOLL |
+ IEEE80211_FCTL_PM);
+ pspoll->aid = cpu_to_le16(0xc000 | vp->aid);
+ memcpy(pspoll->bssid, vp->bssid, ETH_ALEN);
+ memcpy(pspoll->ta, mac, ETH_ALEN);
+
+ rcu_read_lock();
+ mac80211_hwsim_tx_frame(data->hw, skb,
+ rcu_dereference(vif->chanctx_conf)->def.chan);
+ rcu_read_unlock();
+}
+
+static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
+ struct ieee80211_vif *vif, int ps)
+{
+ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+
+ if (!vp->assoc)
+ return;
+
+ wiphy_debug(data->hw->wiphy,
+ "%s: send data::nullfunc to %pM ps=%d\n",
+ __func__, vp->bssid, ps);
+
+ skb = dev_alloc_skb(sizeof(*hdr));
+ if (!skb)
+ return;
+ hdr = (void *) skb_put(skb, sizeof(*hdr) - ETH_ALEN);
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC |
+ (ps ? IEEE80211_FCTL_PM : 0));
+ hdr->duration_id = cpu_to_le16(0);
+ memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
+ memcpy(hdr->addr2, mac, ETH_ALEN);
+ memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
+
+ rcu_read_lock();
+ mac80211_hwsim_tx_frame(data->hw, skb,
+ rcu_dereference(vif->chanctx_conf)->def.chan);
+ rcu_read_unlock();
+}
+
+
+static void hwsim_send_nullfunc_ps(void *dat, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mac80211_hwsim_data *data = dat;
+ hwsim_send_nullfunc(data, mac, vif, 1);
+}
+
+static void hwsim_send_nullfunc_no_ps(void *dat, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mac80211_hwsim_data *data = dat;
+ hwsim_send_nullfunc(data, mac, vif, 0);
+}
+
+static int hwsim_fops_ps_read(void *dat, u64 *val)
+{
+ struct mac80211_hwsim_data *data = dat;
+ *val = data->ps;
+ return 0;
+}
+
+static int hwsim_fops_ps_write(void *dat, u64 val)
+{
+ struct mac80211_hwsim_data *data = dat;
+ enum ps_mode old_ps;
+
+ if (val != PS_DISABLED && val != PS_ENABLED && val != PS_AUTO_POLL &&
+ val != PS_MANUAL_POLL)
+ return -EINVAL;
+
+ old_ps = data->ps;
+ data->ps = val;
+
+ if (val == PS_MANUAL_POLL) {
+ ieee80211_iterate_active_interfaces(data->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ hwsim_send_ps_poll, data);
+ data->ps_poll_pending = true;
+ } else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
+ ieee80211_iterate_active_interfaces(data->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ hwsim_send_nullfunc_ps,
+ data);
+ } else if (old_ps != PS_DISABLED && val == PS_DISABLED) {
+ ieee80211_iterate_active_interfaces(data->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ hwsim_send_nullfunc_no_ps,
+ data);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write,
+ "%llu\n");
+
+static int hwsim_write_simulate_radar(void *dat, u64 val)
+{
+ struct mac80211_hwsim_data *data = dat;
+
+ ieee80211_radar_detected(data->hw);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(hwsim_simulate_radar, NULL,
+ hwsim_write_simulate_radar, "%llu\n");
+
+static int hwsim_fops_group_read(void *dat, u64 *val)
+{
+ struct mac80211_hwsim_data *data = dat;
+ *val = data->group;
+ return 0;
+}
+
+static int hwsim_fops_group_write(void *dat, u64 val)
+{
+ struct mac80211_hwsim_data *data = dat;
+ data->group = val;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group,
+ hwsim_fops_group_read, hwsim_fops_group_write,
+ "%llx\n");
+
static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -641,7 +848,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
}
if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
- sizeof(struct mac_address), data->addresses[1].addr))
+ ETH_ALEN, data->addresses[1].addr))
goto nla_put_failure;
/* We get the skb->data */
@@ -880,7 +1087,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
return;
}
- if (channels == 1) {
+ if (data->channels == 1) {
channel = data->channel;
} else if (txi->hw_queue == 4) {
channel = data->tmp_chan;
@@ -908,7 +1115,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
if (control->sta)
hwsim_check_sta_magic(control->sta);
- if (rctbl)
+ if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE)
ieee80211_get_tx_rates(txi->control.vif, control->sta, skb,
txi->control.rates,
ARRAY_SIZE(txi->control.rates));
@@ -1015,7 +1222,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
{
u32 _pid = ACCESS_ONCE(wmediumd_portid);
- if (rctbl) {
+ if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE) {
struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
ieee80211_get_tx_rates(txi->control.vif, NULL, skb,
txi->control.rates,
@@ -1052,7 +1259,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
if (skb == NULL)
return;
info = IEEE80211_SKB_CB(skb);
- if (rctbl)
+ if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE)
ieee80211_get_tx_rates(vif, NULL, skb,
info->control.rates,
ARRAY_SIZE(info->control.rates));
@@ -1143,7 +1350,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
data->channel = conf->chandef.chan;
- WARN_ON(data->channel && channels > 1);
+ WARN_ON(data->channel && data->channels > 1);
data->power_level = conf->power_level;
if (!data->started || !data->beacon_int)
@@ -1390,8 +1597,6 @@ static const struct nla_policy hwsim_testmode_policy[HWSIM_TM_ATTR_MAX + 1] = {
[HWSIM_TM_ATTR_PS] = { .type = NLA_U32 },
};
-static int hwsim_fops_ps_write(void *dat, u64 val);
-
static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
void *data, int len)
@@ -1493,7 +1698,7 @@ static void hw_scan_work(struct work_struct *work)
req->channels[hwsim->scan_chan_idx]->center_freq);
hwsim->tmp_chan = req->channels[hwsim->scan_chan_idx];
- if (hwsim->tmp_chan->flags & IEEE80211_CHAN_PASSIVE_SCAN ||
+ if (hwsim->tmp_chan->flags & IEEE80211_CHAN_NO_IR ||
!req->n_ssids) {
dwell = 120;
} else {
@@ -1702,8 +1907,7 @@ static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw,
hwsim_check_chanctx_magic(ctx);
}
-static struct ieee80211_ops mac80211_hwsim_ops =
-{
+static const struct ieee80211_ops mac80211_hwsim_ops = {
.tx = mac80211_hwsim_tx,
.start = mac80211_hwsim_start,
.stop = mac80211_hwsim_stop,
@@ -1728,208 +1932,290 @@ static struct ieee80211_ops mac80211_hwsim_ops =
.set_tsf = mac80211_hwsim_set_tsf,
};
+static struct ieee80211_ops mac80211_hwsim_mchan_ops;
-static void mac80211_hwsim_free(void)
+static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
+ const struct ieee80211_regdomain *regd,
+ bool reg_strict)
{
- struct list_head tmplist, *i, *tmp;
- struct mac80211_hwsim_data *data, *tmpdata;
-
- INIT_LIST_HEAD(&tmplist);
+ int err;
+ u8 addr[ETH_ALEN];
+ struct mac80211_hwsim_data *data;
+ struct ieee80211_hw *hw;
+ enum ieee80211_band band;
+ const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
+ int idx;
spin_lock_bh(&hwsim_radio_lock);
- list_for_each_safe(i, tmp, &hwsim_radios)
- list_move(i, &tmplist);
+ idx = hwsim_radio_idx++;
spin_unlock_bh(&hwsim_radio_lock);
- list_for_each_entry_safe(data, tmpdata, &tmplist, list) {
- debugfs_remove(data->debugfs_group);
- debugfs_remove(data->debugfs_ps);
- debugfs_remove(data->debugfs);
- ieee80211_unregister_hw(data->hw);
- device_release_driver(data->dev);
- device_unregister(data->dev);
- ieee80211_free_hw(data->hw);
+ if (channels > 1)
+ ops = &mac80211_hwsim_mchan_ops;
+ hw = ieee80211_alloc_hw(sizeof(*data), ops);
+ if (!hw) {
+ printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw failed\n");
+ err = -ENOMEM;
+ goto failed;
+ }
+ data = hw->priv;
+ data->hw = hw;
+
+ data->dev = device_create(hwsim_class, NULL, 0, hw, "hwsim%d", idx);
+ if (IS_ERR(data->dev)) {
+ printk(KERN_DEBUG
+ "mac80211_hwsim: device_create failed (%ld)\n",
+ PTR_ERR(data->dev));
+ err = -ENOMEM;
+ goto failed_drvdata;
+ }
+ data->dev->driver = &mac80211_hwsim_driver.driver;
+ err = device_bind_driver(data->dev);
+ if (err != 0) {
+ printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n",
+ err);
+ goto failed_hw;
}
- class_destroy(hwsim_class);
-}
-
-static struct platform_driver mac80211_hwsim_driver = {
- .driver = {
- .name = "mac80211_hwsim",
- .owner = THIS_MODULE,
- },
-};
-
-static const struct net_device_ops hwsim_netdev_ops = {
- .ndo_start_xmit = hwsim_mon_xmit,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static void hwsim_mon_setup(struct net_device *dev)
-{
- dev->netdev_ops = &hwsim_netdev_ops;
- dev->destructor = free_netdev;
- ether_setup(dev);
- dev->tx_queue_len = 0;
- dev->type = ARPHRD_IEEE80211_RADIOTAP;
- memset(dev->dev_addr, 0, ETH_ALEN);
- dev->dev_addr[0] = 0x12;
-}
+ skb_queue_head_init(&data->pending);
-static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
-{
- struct mac80211_hwsim_data *data = dat;
- struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
- struct sk_buff *skb;
- struct ieee80211_pspoll *pspoll;
+ SET_IEEE80211_DEV(hw, data->dev);
+ memset(addr, 0, ETH_ALEN);
+ addr[0] = 0x02;
+ addr[3] = idx >> 8;
+ addr[4] = idx;
+ memcpy(data->addresses[0].addr, addr, ETH_ALEN);
+ memcpy(data->addresses[1].addr, addr, ETH_ALEN);
+ data->addresses[1].addr[0] |= 0x40;
+ hw->wiphy->n_addresses = 2;
+ hw->wiphy->addresses = data->addresses;
+
+ data->channels = channels;
+ data->idx = idx;
+
+ if (data->channels > 1) {
+ hw->wiphy->max_scan_ssids = 255;
+ hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+ hw->wiphy->max_remain_on_channel_duration = 1000;
+ /* For channels > 1 DFS is not allowed */
+ hw->wiphy->n_iface_combinations = 1;
+ hw->wiphy->iface_combinations = &data->if_combination;
+ data->if_combination = hwsim_if_comb[0];
+ data->if_combination.num_different_channels = data->channels;
+ } else {
+ hw->wiphy->iface_combinations = hwsim_if_comb;
+ hw->wiphy->n_iface_combinations = ARRAY_SIZE(hwsim_if_comb);
+ }
- if (!vp->assoc)
- return;
+ INIT_DELAYED_WORK(&data->roc_done, hw_roc_done);
+ INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work);
+
+ hw->queues = 5;
+ hw->offchannel_tx_hw_queue = 4;
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE);
+
+ hw->flags = IEEE80211_HW_MFP_CAPABLE |
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_WANT_MONITOR_VIF |
+ IEEE80211_HW_QUEUE_CONTROL |
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+ if (rctbl)
+ hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
+
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_AP_UAPSD;
+ hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+
+ /* ask mac80211 to reserve space for magic */
+ hw->vif_data_size = sizeof(struct hwsim_vif_priv);
+ hw->sta_data_size = sizeof(struct hwsim_sta_priv);
+ hw->chanctx_data_size = sizeof(struct hwsim_chanctx_priv);
+
+ memcpy(data->channels_2ghz, hwsim_channels_2ghz,
+ sizeof(hwsim_channels_2ghz));
+ memcpy(data->channels_5ghz, hwsim_channels_5ghz,
+ sizeof(hwsim_channels_5ghz));
+ memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
+
+ for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+ struct ieee80211_supported_band *sband = &data->bands[band];
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ sband->channels = data->channels_2ghz;
+ sband->n_channels = ARRAY_SIZE(hwsim_channels_2ghz);
+ sband->bitrates = data->rates;
+ sband->n_bitrates = ARRAY_SIZE(hwsim_rates);
+ break;
+ case IEEE80211_BAND_5GHZ:
+ sband->channels = data->channels_5ghz;
+ sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz);
+ sband->bitrates = data->rates + 4;
+ sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4;
+ break;
+ default:
+ continue;
+ }
- wiphy_debug(data->hw->wiphy,
- "%s: send PS-Poll to %pM for aid %d\n",
- __func__, vp->bssid, vp->aid);
+ sband->ht_cap.ht_supported = true;
+ sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_DSSSCCK40;
+ sband->ht_cap.ampdu_factor = 0x3;
+ sband->ht_cap.ampdu_density = 0x6;
+ memset(&sband->ht_cap.mcs, 0,
+ sizeof(sband->ht_cap.mcs));
+ sband->ht_cap.mcs.rx_mask[0] = 0xff;
+ sband->ht_cap.mcs.rx_mask[1] = 0xff;
+ sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+
+ hw->wiphy->bands[band] = sband;
+
+ sband->vht_cap.vht_supported = true;
+ sband->vht_cap.cap =
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+ IEEE80211_VHT_CAP_RXLDPC |
+ IEEE80211_VHT_CAP_SHORT_GI_80 |
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_TXSTBC |
+ IEEE80211_VHT_CAP_RXSTBC_1 |
+ IEEE80211_VHT_CAP_RXSTBC_2 |
+ IEEE80211_VHT_CAP_RXSTBC_3 |
+ IEEE80211_VHT_CAP_RXSTBC_4 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ sband->vht_cap.vht_mcs.rx_mcs_map =
+ cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_8 << 0 |
+ IEEE80211_VHT_MCS_SUPPORT_0_8 << 2 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
+ IEEE80211_VHT_MCS_SUPPORT_0_8 << 6 |
+ IEEE80211_VHT_MCS_SUPPORT_0_8 << 8 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
+ IEEE80211_VHT_MCS_SUPPORT_0_8 << 14);
+ sband->vht_cap.vht_mcs.tx_mcs_map =
+ sband->vht_cap.vht_mcs.rx_mcs_map;
+ }
- skb = dev_alloc_skb(sizeof(*pspoll));
- if (!skb)
- return;
- pspoll = (void *) skb_put(skb, sizeof(*pspoll));
- pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
- IEEE80211_STYPE_PSPOLL |
- IEEE80211_FCTL_PM);
- pspoll->aid = cpu_to_le16(0xc000 | vp->aid);
- memcpy(pspoll->bssid, vp->bssid, ETH_ALEN);
- memcpy(pspoll->ta, mac, ETH_ALEN);
+ /* By default all radios belong to the first group */
+ data->group = 1;
+ mutex_init(&data->mutex);
- rcu_read_lock();
- mac80211_hwsim_tx_frame(data->hw, skb,
- rcu_dereference(vif->chanctx_conf)->def.chan);
- rcu_read_unlock();
-}
+ /* Enable frame retransmissions for lossy channels */
+ hw->max_rates = 4;
+ hw->max_rate_tries = 11;
-static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
- struct ieee80211_vif *vif, int ps)
-{
- struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
- struct sk_buff *skb;
- struct ieee80211_hdr *hdr;
+ if (reg_strict)
+ hw->wiphy->regulatory_flags |= REGULATORY_STRICT_REG;
+ if (regd) {
+ hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+ wiphy_apply_custom_regulatory(hw->wiphy, regd);
+ /* give the regulatory workqueue a chance to run */
+ schedule_timeout_interruptible(1);
+ }
- if (!vp->assoc)
- return;
+ err = ieee80211_register_hw(hw);
+ if (err < 0) {
+ printk(KERN_DEBUG "mac80211_hwsim: ieee80211_register_hw failed (%d)\n",
+ err);
+ goto failed_hw;
+ }
- wiphy_debug(data->hw->wiphy,
- "%s: send data::nullfunc to %pM ps=%d\n",
- __func__, vp->bssid, ps);
+ wiphy_debug(hw->wiphy, "hwaddr %pM registered\n", hw->wiphy->perm_addr);
- skb = dev_alloc_skb(sizeof(*hdr));
- if (!skb)
- return;
- hdr = (void *) skb_put(skb, sizeof(*hdr) - ETH_ALEN);
- hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_NULLFUNC |
- (ps ? IEEE80211_FCTL_PM : 0));
- hdr->duration_id = cpu_to_le16(0);
- memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
- memcpy(hdr->addr2, mac, ETH_ALEN);
- memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
+ if (reg_alpha2)
+ regulatory_hint(hw->wiphy, reg_alpha2);
- rcu_read_lock();
- mac80211_hwsim_tx_frame(data->hw, skb,
- rcu_dereference(vif->chanctx_conf)->def.chan);
- rcu_read_unlock();
-}
+ data->debugfs = debugfs_create_dir("hwsim", hw->wiphy->debugfsdir);
+ debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps);
+ debugfs_create_file("group", 0666, data->debugfs, data,
+ &hwsim_fops_group);
+ if (data->channels == 1)
+ debugfs_create_file("dfs_simulate_radar", 0222,
+ data->debugfs,
+ data, &hwsim_simulate_radar);
+ tasklet_hrtimer_init(&data->beacon_timer,
+ mac80211_hwsim_beacon,
+ CLOCK_MONOTONIC_RAW, HRTIMER_MODE_ABS);
-static void hwsim_send_nullfunc_ps(void *dat, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct mac80211_hwsim_data *data = dat;
- hwsim_send_nullfunc(data, mac, vif, 1);
-}
+ spin_lock_bh(&hwsim_radio_lock);
+ list_add_tail(&data->list, &hwsim_radios);
+ spin_unlock_bh(&hwsim_radio_lock);
+ return idx;
-static void hwsim_send_nullfunc_no_ps(void *dat, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct mac80211_hwsim_data *data = dat;
- hwsim_send_nullfunc(data, mac, vif, 0);
+failed_hw:
+ device_unregister(data->dev);
+failed_drvdata:
+ ieee80211_free_hw(hw);
+failed:
+ return err;
}
-
-static int hwsim_fops_ps_read(void *dat, u64 *val)
+static void mac80211_hwsim_destroy_radio(struct mac80211_hwsim_data *data)
{
- struct mac80211_hwsim_data *data = dat;
- *val = data->ps;
- return 0;
+ debugfs_remove_recursive(data->debugfs);
+ ieee80211_unregister_hw(data->hw);
+ device_release_driver(data->dev);
+ device_unregister(data->dev);
+ ieee80211_free_hw(data->hw);
}
-static int hwsim_fops_ps_write(void *dat, u64 val)
+static void mac80211_hwsim_free(void)
{
- struct mac80211_hwsim_data *data = dat;
- enum ps_mode old_ps;
-
- if (val != PS_DISABLED && val != PS_ENABLED && val != PS_AUTO_POLL &&
- val != PS_MANUAL_POLL)
- return -EINVAL;
-
- old_ps = data->ps;
- data->ps = val;
+ struct mac80211_hwsim_data *data;
- if (val == PS_MANUAL_POLL) {
- ieee80211_iterate_active_interfaces(data->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- hwsim_send_ps_poll, data);
- data->ps_poll_pending = true;
- } else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
- ieee80211_iterate_active_interfaces(data->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- hwsim_send_nullfunc_ps,
- data);
- } else if (old_ps != PS_DISABLED && val == PS_DISABLED) {
- ieee80211_iterate_active_interfaces(data->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- hwsim_send_nullfunc_no_ps,
- data);
+ spin_lock_bh(&hwsim_radio_lock);
+ while ((data = list_first_entry_or_null(&hwsim_radios,
+ struct mac80211_hwsim_data,
+ list))) {
+ list_del(&data->list);
+ spin_unlock_bh(&hwsim_radio_lock);
+ mac80211_hwsim_destroy_radio(data);
+ spin_lock_bh(&hwsim_radio_lock);
}
-
- return 0;
+ spin_unlock_bh(&hwsim_radio_lock);
+ class_destroy(hwsim_class);
}
-DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write,
- "%llu\n");
-
-
-static int hwsim_fops_group_read(void *dat, u64 *val)
-{
- struct mac80211_hwsim_data *data = dat;
- *val = data->group;
- return 0;
-}
+static const struct net_device_ops hwsim_netdev_ops = {
+ .ndo_start_xmit = hwsim_mon_xmit,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
-static int hwsim_fops_group_write(void *dat, u64 val)
+static void hwsim_mon_setup(struct net_device *dev)
{
- struct mac80211_hwsim_data *data = dat;
- data->group = val;
- return 0;
+ dev->netdev_ops = &hwsim_netdev_ops;
+ dev->destructor = free_netdev;
+ ether_setup(dev);
+ dev->tx_queue_len = 0;
+ dev->type = ARPHRD_IEEE80211_RADIOTAP;
+ memset(dev->dev_addr, 0, ETH_ALEN);
+ dev->dev_addr[0] = 0x12;
}
-DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group,
- hwsim_fops_group_read, hwsim_fops_group_write,
- "%llx\n");
-
-static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(
- struct mac_address *addr)
+static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr)
{
struct mac80211_hwsim_data *data;
bool _found = false;
spin_lock_bh(&hwsim_radio_lock);
list_for_each_entry(data, &hwsim_radios, list) {
- if (memcmp(data->addresses[1].addr, addr,
- sizeof(struct mac_address)) == 0) {
+ if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) {
_found = true;
break;
}
@@ -1952,27 +2238,26 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
struct hwsim_tx_rate *tx_attempts;
unsigned long ret_skb_ptr;
struct sk_buff *skb, *tmp;
- struct mac_address *src;
+ const u8 *src;
unsigned int hwsim_flags;
-
int i;
bool found = false;
+ if (info->snd_portid != wmediumd_portid)
+ return -EINVAL;
+
if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
- !info->attrs[HWSIM_ATTR_FLAGS] ||
- !info->attrs[HWSIM_ATTR_COOKIE] ||
- !info->attrs[HWSIM_ATTR_TX_INFO])
+ !info->attrs[HWSIM_ATTR_FLAGS] ||
+ !info->attrs[HWSIM_ATTR_COOKIE] ||
+ !info->attrs[HWSIM_ATTR_TX_INFO])
goto out;
- src = (struct mac_address *)nla_data(
- info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]);
+ src = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]);
hwsim_flags = nla_get_u32(info->attrs[HWSIM_ATTR_FLAGS]);
-
ret_skb_ptr = nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]);
data2 = get_hwsim_data_ref_from_addr(src);
-
- if (data2 == NULL)
+ if (!data2)
goto out;
/* look for the skb matching the cookie passed back from user */
@@ -2011,7 +2296,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
(hwsim_flags & HWSIM_TX_STAT_ACK)) {
if (skb->len >= 16) {
hdr = (struct ieee80211_hdr *) skb->data;
- mac80211_hwsim_monitor_ack(txi->rate_driver_data[0],
+ mac80211_hwsim_monitor_ack(data2->channel,
hdr->addr2);
}
txi->flags |= IEEE80211_TX_STAT_ACK;
@@ -2029,38 +2314,37 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
struct mac80211_hwsim_data *data2;
struct ieee80211_rx_status rx_status;
- struct mac_address *dst;
+ const u8 *dst;
int frame_data_len;
- char *frame_data;
+ void *frame_data;
struct sk_buff *skb = NULL;
+ if (info->snd_portid != wmediumd_portid)
+ return -EINVAL;
+
if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] ||
!info->attrs[HWSIM_ATTR_FRAME] ||
!info->attrs[HWSIM_ATTR_RX_RATE] ||
!info->attrs[HWSIM_ATTR_SIGNAL])
goto out;
- dst = (struct mac_address *)nla_data(
- info->attrs[HWSIM_ATTR_ADDR_RECEIVER]);
-
+ dst = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_RECEIVER]);
frame_data_len = nla_len(info->attrs[HWSIM_ATTR_FRAME]);
- frame_data = (char *)nla_data(info->attrs[HWSIM_ATTR_FRAME]);
+ frame_data = (void *)nla_data(info->attrs[HWSIM_ATTR_FRAME]);
/* Allocate new skb here */
skb = alloc_skb(frame_data_len, GFP_KERNEL);
if (skb == NULL)
goto err;
- if (frame_data_len <= IEEE80211_MAX_DATA_LEN) {
- /* Copy the data */
- memcpy(skb_put(skb, frame_data_len), frame_data,
- frame_data_len);
- } else
+ if (frame_data_len > IEEE80211_MAX_DATA_LEN)
goto err;
- data2 = get_hwsim_data_ref_from_addr(dst);
+ /* Copy the data */
+ memcpy(skb_put(skb, frame_data_len), frame_data, frame_data_len);
- if (data2 == NULL)
+ data2 = get_hwsim_data_ref_from_addr(dst);
+ if (!data2)
goto out;
/* check if radio is configured properly */
@@ -2068,7 +2352,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
if (data2->idle || !data2->started)
goto out;
- /*A frame is received from user space*/
+ /* A frame is received from user space */
memset(&rx_status, 0, sizeof(rx_status));
rx_status.freq = data2->channel->center_freq;
rx_status.band = data2->channel->band;
@@ -2090,8 +2374,24 @@ out:
static int hwsim_register_received_nl(struct sk_buff *skb_2,
struct genl_info *info)
{
- if (info == NULL)
- goto out;
+ struct mac80211_hwsim_data *data;
+ int chans = 1;
+
+ spin_lock_bh(&hwsim_radio_lock);
+ list_for_each_entry(data, &hwsim_radios, list)
+ chans = max(chans, data->channels);
+ spin_unlock_bh(&hwsim_radio_lock);
+
+ /* In the future we should revise the userspace API and allow it
+ * to set a flag that it does support multi-channel, then we can
+ * let this pass conditionally on the flag.
+ * For current userspace, prohibit it since it won't work right.
+ */
+ if (chans > 1)
+ return -EOPNOTSUPP;
+
+ if (wmediumd_portid)
+ return -EBUSY;
wmediumd_portid = info->snd_portid;
@@ -2099,9 +2399,53 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
"switching to wmediumd mode with pid %d\n", info->snd_portid);
return 0;
-out:
- printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
- return -EINVAL;
+}
+
+static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info)
+{
+ unsigned int chans = channels;
+ const char *alpha2 = NULL;
+ const struct ieee80211_regdomain *regd = NULL;
+ bool reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
+
+ if (info->attrs[HWSIM_ATTR_CHANNELS])
+ chans = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
+
+ if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2])
+ alpha2 = nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]);
+
+ if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
+ u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
+
+ if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom))
+ return -EINVAL;
+ regd = hwsim_world_regdom_custom[idx];
+ }
+
+ return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict);
+}
+
+static int hwsim_destroy_radio_nl(struct sk_buff *msg, struct genl_info *info)
+{
+ struct mac80211_hwsim_data *data;
+ int idx;
+
+ if (!info->attrs[HWSIM_ATTR_RADIO_ID])
+ return -EINVAL;
+ idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
+
+ spin_lock_bh(&hwsim_radio_lock);
+ list_for_each_entry(data, &hwsim_radios, list) {
+ if (data->idx != idx)
+ continue;
+ list_del(&data->list);
+ spin_unlock_bh(&hwsim_radio_lock);
+ mac80211_hwsim_destroy_radio(data);
+ return 0;
+ }
+ spin_unlock_bh(&hwsim_radio_lock);
+
+ return -ENODEV;
}
/* Generic Netlink operations array */
@@ -2122,6 +2466,18 @@ static const struct genl_ops hwsim_ops[] = {
.policy = hwsim_genl_policy,
.doit = hwsim_tx_info_frame_received_nl,
},
+ {
+ .cmd = HWSIM_CMD_CREATE_RADIO,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_create_radio_nl,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = HWSIM_CMD_DESTROY_RADIO,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_destroy_radio_nl,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
@@ -2150,10 +2506,6 @@ static int hwsim_init_netlink(void)
{
int rc;
- /* userspace test API hasn't been adjusted for multi-channel */
- if (channels > 1)
- return 0;
-
printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
rc = genl_register_family_with_ops(&hwsim_genl_family, hwsim_ops);
@@ -2173,77 +2525,36 @@ failure:
static void hwsim_exit_netlink(void)
{
- int ret;
-
- /* userspace test API hasn't been adjusted for multi-channel */
- if (channels > 1)
- return;
-
- printk(KERN_INFO "mac80211_hwsim: closing netlink\n");
/* unregister the notifier */
netlink_unregister_notifier(&hwsim_netlink_notifier);
/* unregister the family */
- ret = genl_unregister_family(&hwsim_genl_family);
- if (ret)
- printk(KERN_DEBUG "mac80211_hwsim: "
- "unregister family %i\n", ret);
+ genl_unregister_family(&hwsim_genl_family);
}
-static const struct ieee80211_iface_limit hwsim_if_limits[] = {
- { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
- { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_GO) },
- { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
-};
-
-static struct ieee80211_iface_combination hwsim_if_comb = {
- .limits = hwsim_if_limits,
- .n_limits = ARRAY_SIZE(hwsim_if_limits),
- .max_interfaces = 2048,
- .num_different_channels = 1,
-};
-
static int __init init_mac80211_hwsim(void)
{
- int i, err = 0;
- u8 addr[ETH_ALEN];
- struct mac80211_hwsim_data *data;
- struct ieee80211_hw *hw;
- enum ieee80211_band band;
+ int i, err;
- if (radios < 1 || radios > 100)
+ if (radios < 0 || radios > 100)
return -EINVAL;
if (channels < 1)
return -EINVAL;
- if (channels > 1) {
- hwsim_if_comb.num_different_channels = channels;
- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
- mac80211_hwsim_ops.cancel_hw_scan =
- mac80211_hwsim_cancel_hw_scan;
- mac80211_hwsim_ops.sw_scan_start = NULL;
- mac80211_hwsim_ops.sw_scan_complete = NULL;
- mac80211_hwsim_ops.remain_on_channel =
- mac80211_hwsim_roc;
- mac80211_hwsim_ops.cancel_remain_on_channel =
- mac80211_hwsim_croc;
- mac80211_hwsim_ops.add_chanctx =
- mac80211_hwsim_add_chanctx;
- mac80211_hwsim_ops.remove_chanctx =
- mac80211_hwsim_remove_chanctx;
- mac80211_hwsim_ops.change_chanctx =
- mac80211_hwsim_change_chanctx;
- mac80211_hwsim_ops.assign_vif_chanctx =
- mac80211_hwsim_assign_vif_chanctx;
- mac80211_hwsim_ops.unassign_vif_chanctx =
- mac80211_hwsim_unassign_vif_chanctx;
- }
+ mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
+ mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
+ mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
+ mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
+ mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
+ mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
+ mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
+ mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
+ mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
+ mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
+ mac80211_hwsim_mchan_ops.assign_vif_chanctx =
+ mac80211_hwsim_assign_vif_chanctx;
+ mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
+ mac80211_hwsim_unassign_vif_chanctx;
spin_lock_init(&hwsim_radio_lock);
INIT_LIST_HEAD(&hwsim_radios);
@@ -2255,348 +2566,116 @@ static int __init init_mac80211_hwsim(void)
hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
if (IS_ERR(hwsim_class)) {
err = PTR_ERR(hwsim_class);
- goto failed_unregister_driver;
+ goto out_unregister_driver;
}
- memset(addr, 0, ETH_ALEN);
- addr[0] = 0x02;
-
for (i = 0; i < radios; i++) {
- printk(KERN_DEBUG "mac80211_hwsim: Initializing radio %d\n",
- i);
- hw = ieee80211_alloc_hw(sizeof(*data), &mac80211_hwsim_ops);
- if (!hw) {
- printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw "
- "failed\n");
- err = -ENOMEM;
- goto failed;
- }
- data = hw->priv;
- data->hw = hw;
-
- data->dev = device_create(hwsim_class, NULL, 0, hw,
- "hwsim%d", i);
- if (IS_ERR(data->dev)) {
- printk(KERN_DEBUG
- "mac80211_hwsim: device_create failed (%ld)\n",
- PTR_ERR(data->dev));
- err = -ENOMEM;
- goto failed_drvdata;
- }
- data->dev->driver = &mac80211_hwsim_driver.driver;
- err = device_bind_driver(data->dev);
- if (err != 0) {
- printk(KERN_DEBUG
- "mac80211_hwsim: device_bind_driver failed (%d)\n",
- err);
- goto failed_hw;
- }
-
- skb_queue_head_init(&data->pending);
+ const char *reg_alpha2 = NULL;
+ const struct ieee80211_regdomain *regd = NULL;
+ bool reg_strict = false;
- SET_IEEE80211_DEV(hw, data->dev);
- addr[3] = i >> 8;
- addr[4] = i;
- memcpy(data->addresses[0].addr, addr, ETH_ALEN);
- memcpy(data->addresses[1].addr, addr, ETH_ALEN);
- data->addresses[1].addr[0] |= 0x40;
- hw->wiphy->n_addresses = 2;
- hw->wiphy->addresses = data->addresses;
-
- hw->wiphy->iface_combinations = &hwsim_if_comb;
- hw->wiphy->n_iface_combinations = 1;
-
- if (channels > 1) {
- hw->wiphy->max_scan_ssids = 255;
- hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
- hw->wiphy->max_remain_on_channel_duration = 1000;
- }
-
- INIT_DELAYED_WORK(&data->roc_done, hw_roc_done);
- INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work);
-
- hw->channel_change_time = 1;
- hw->queues = 5;
- hw->offchannel_tx_hw_queue = 4;
- hw->wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT) |
- BIT(NL80211_IFTYPE_P2P_DEVICE);
-
- hw->flags = IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_STATIC_SMPS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_WANT_MONITOR_VIF |
- IEEE80211_HW_QUEUE_CONTROL;
- if (rctbl)
- hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
-
- hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
- WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
- WIPHY_FLAG_AP_UAPSD;
- hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
-
- /* ask mac80211 to reserve space for magic */
- hw->vif_data_size = sizeof(struct hwsim_vif_priv);
- hw->sta_data_size = sizeof(struct hwsim_sta_priv);
- hw->chanctx_data_size = sizeof(struct hwsim_chanctx_priv);
-
- memcpy(data->channels_2ghz, hwsim_channels_2ghz,
- sizeof(hwsim_channels_2ghz));
- memcpy(data->channels_5ghz, hwsim_channels_5ghz,
- sizeof(hwsim_channels_5ghz));
- memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
-
- for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
- struct ieee80211_supported_band *sband = &data->bands[band];
- switch (band) {
- case IEEE80211_BAND_2GHZ:
- sband->channels = data->channels_2ghz;
- sband->n_channels =
- ARRAY_SIZE(hwsim_channels_2ghz);
- sband->bitrates = data->rates;
- sband->n_bitrates = ARRAY_SIZE(hwsim_rates);
- break;
- case IEEE80211_BAND_5GHZ:
- sband->channels = data->channels_5ghz;
- sband->n_channels =
- ARRAY_SIZE(hwsim_channels_5ghz);
- sband->bitrates = data->rates + 4;
- sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4;
- break;
- default:
- continue;
- }
-
- sband->ht_cap.ht_supported = true;
- sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
- IEEE80211_HT_CAP_GRN_FLD |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_DSSSCCK40;
- sband->ht_cap.ampdu_factor = 0x3;
- sband->ht_cap.ampdu_density = 0x6;
- memset(&sband->ht_cap.mcs, 0,
- sizeof(sband->ht_cap.mcs));
- sband->ht_cap.mcs.rx_mask[0] = 0xff;
- sband->ht_cap.mcs.rx_mask[1] = 0xff;
- sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
-
- hw->wiphy->bands[band] = sband;
-
- sband->vht_cap.vht_supported = true;
- sband->vht_cap.cap =
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
- IEEE80211_VHT_CAP_RXLDPC |
- IEEE80211_VHT_CAP_SHORT_GI_80 |
- IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_TXSTBC |
- IEEE80211_VHT_CAP_RXSTBC_1 |
- IEEE80211_VHT_CAP_RXSTBC_2 |
- IEEE80211_VHT_CAP_RXSTBC_3 |
- IEEE80211_VHT_CAP_RXSTBC_4 |
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
- sband->vht_cap.vht_mcs.rx_mcs_map =
- cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_8 << 0 |
- IEEE80211_VHT_MCS_SUPPORT_0_8 << 2 |
- IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
- IEEE80211_VHT_MCS_SUPPORT_0_8 << 6 |
- IEEE80211_VHT_MCS_SUPPORT_0_8 << 8 |
- IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
- IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
- IEEE80211_VHT_MCS_SUPPORT_0_8 << 14);
- sband->vht_cap.vht_mcs.tx_mcs_map =
- sband->vht_cap.vht_mcs.rx_mcs_map;
- }
- /* By default all radios are belonging to the first group */
- data->group = 1;
- mutex_init(&data->mutex);
-
- /* Enable frame retransmissions for lossy channels */
- hw->max_rates = 4;
- hw->max_rate_tries = 11;
-
- /* Work to be done prior to ieee80211_register_hw() */
switch (regtest) {
- case HWSIM_REGTEST_DISABLED:
- case HWSIM_REGTEST_DRIVER_REG_FOLLOW:
- case HWSIM_REGTEST_DRIVER_REG_ALL:
case HWSIM_REGTEST_DIFF_COUNTRY:
- /*
- * Nothing to be done for driver regulatory domain
- * hints prior to ieee80211_register_hw()
- */
- break;
- case HWSIM_REGTEST_WORLD_ROAM:
- if (i == 0) {
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
- wiphy_apply_custom_regulatory(hw->wiphy,
- &hwsim_world_regdom_custom_01);
- }
- break;
- case HWSIM_REGTEST_CUSTOM_WORLD:
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
- wiphy_apply_custom_regulatory(hw->wiphy,
- &hwsim_world_regdom_custom_01);
- break;
- case HWSIM_REGTEST_CUSTOM_WORLD_2:
- if (i == 0) {
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
- wiphy_apply_custom_regulatory(hw->wiphy,
- &hwsim_world_regdom_custom_01);
- } else if (i == 1) {
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
- wiphy_apply_custom_regulatory(hw->wiphy,
- &hwsim_world_regdom_custom_02);
- }
- break;
- case HWSIM_REGTEST_STRICT_ALL:
- hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
- break;
- case HWSIM_REGTEST_STRICT_FOLLOW:
- case HWSIM_REGTEST_STRICT_AND_DRIVER_REG:
- if (i == 0)
- hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
- break;
- case HWSIM_REGTEST_ALL:
- if (i == 0) {
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
- wiphy_apply_custom_regulatory(hw->wiphy,
- &hwsim_world_regdom_custom_01);
- } else if (i == 1) {
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
- wiphy_apply_custom_regulatory(hw->wiphy,
- &hwsim_world_regdom_custom_02);
- } else if (i == 4)
- hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
- break;
- default:
- break;
- }
-
- /* give the regulatory workqueue a chance to run */
- if (regtest)
- schedule_timeout_interruptible(1);
- err = ieee80211_register_hw(hw);
- if (err < 0) {
- printk(KERN_DEBUG "mac80211_hwsim: "
- "ieee80211_register_hw failed (%d)\n", err);
- goto failed_hw;
- }
-
- /* Work to be done after to ieee80211_register_hw() */
- switch (regtest) {
- case HWSIM_REGTEST_WORLD_ROAM:
- case HWSIM_REGTEST_DISABLED:
+ if (i < ARRAY_SIZE(hwsim_alpha2s))
+ reg_alpha2 = hwsim_alpha2s[i];
break;
case HWSIM_REGTEST_DRIVER_REG_FOLLOW:
if (!i)
- regulatory_hint(hw->wiphy, hwsim_alpha2s[0]);
+ reg_alpha2 = hwsim_alpha2s[0];
break;
- case HWSIM_REGTEST_DRIVER_REG_ALL:
case HWSIM_REGTEST_STRICT_ALL:
- regulatory_hint(hw->wiphy, hwsim_alpha2s[0]);
+ reg_strict = true;
+ case HWSIM_REGTEST_DRIVER_REG_ALL:
+ reg_alpha2 = hwsim_alpha2s[0];
break;
- case HWSIM_REGTEST_DIFF_COUNTRY:
- if (i < ARRAY_SIZE(hwsim_alpha2s))
- regulatory_hint(hw->wiphy, hwsim_alpha2s[i]);
+ case HWSIM_REGTEST_WORLD_ROAM:
+ if (i == 0)
+ regd = &hwsim_world_regdom_custom_01;
break;
case HWSIM_REGTEST_CUSTOM_WORLD:
+ regd = &hwsim_world_regdom_custom_01;
+ break;
case HWSIM_REGTEST_CUSTOM_WORLD_2:
- /*
- * Nothing to be done for custom world regulatory
- * domains after to ieee80211_register_hw
- */
+ if (i == 0)
+ regd = &hwsim_world_regdom_custom_01;
+ else if (i == 1)
+ regd = &hwsim_world_regdom_custom_02;
break;
case HWSIM_REGTEST_STRICT_FOLLOW:
- if (i == 0)
- regulatory_hint(hw->wiphy, hwsim_alpha2s[0]);
+ if (i == 0) {
+ reg_strict = true;
+ reg_alpha2 = hwsim_alpha2s[0];
+ }
break;
case HWSIM_REGTEST_STRICT_AND_DRIVER_REG:
- if (i == 0)
- regulatory_hint(hw->wiphy, hwsim_alpha2s[0]);
- else if (i == 1)
- regulatory_hint(hw->wiphy, hwsim_alpha2s[1]);
+ if (i == 0) {
+ reg_strict = true;
+ reg_alpha2 = hwsim_alpha2s[0];
+ } else if (i == 1) {
+ reg_alpha2 = hwsim_alpha2s[1];
+ }
break;
case HWSIM_REGTEST_ALL:
- if (i == 2)
- regulatory_hint(hw->wiphy, hwsim_alpha2s[0]);
- else if (i == 3)
- regulatory_hint(hw->wiphy, hwsim_alpha2s[1]);
- else if (i == 4)
- regulatory_hint(hw->wiphy, hwsim_alpha2s[2]);
+ switch (i) {
+ case 0:
+ regd = &hwsim_world_regdom_custom_01;
+ break;
+ case 1:
+ regd = &hwsim_world_regdom_custom_02;
+ break;
+ case 2:
+ reg_alpha2 = hwsim_alpha2s[0];
+ break;
+ case 3:
+ reg_alpha2 = hwsim_alpha2s[1];
+ break;
+ case 4:
+ reg_strict = true;
+ reg_alpha2 = hwsim_alpha2s[2];
+ break;
+ }
break;
default:
break;
}
- wiphy_debug(hw->wiphy, "hwaddr %pm registered\n",
- hw->wiphy->perm_addr);
-
- data->debugfs = debugfs_create_dir("hwsim",
- hw->wiphy->debugfsdir);
- data->debugfs_ps = debugfs_create_file("ps", 0666,
- data->debugfs, data,
- &hwsim_fops_ps);
- data->debugfs_group = debugfs_create_file("group", 0666,
- data->debugfs, data,
- &hwsim_fops_group);
-
- tasklet_hrtimer_init(&data->beacon_timer,
- mac80211_hwsim_beacon,
- CLOCK_REALTIME, HRTIMER_MODE_ABS);
-
- list_add_tail(&data->list, &hwsim_radios);
+ err = mac80211_hwsim_create_radio(channels, reg_alpha2,
+ regd, reg_strict);
+ if (err < 0)
+ goto out_free_radios;
}
hwsim_mon = alloc_netdev(0, "hwsim%d", hwsim_mon_setup);
if (hwsim_mon == NULL) {
err = -ENOMEM;
- goto failed;
+ goto out_free_radios;
}
rtnl_lock();
-
err = dev_alloc_name(hwsim_mon, hwsim_mon->name);
- if (err < 0)
- goto failed_mon;
-
+ if (err < 0) {
+ rtnl_unlock();
+ goto out_free_radios;
+ }
err = register_netdevice(hwsim_mon);
- if (err < 0)
- goto failed_mon;
-
+ if (err < 0) {
+ rtnl_unlock();
+ goto out_free_mon;
+ }
rtnl_unlock();
err = hwsim_init_netlink();
if (err < 0)
- goto failed_nl;
+ goto out_free_mon;
return 0;
-failed_nl:
- printk(KERN_DEBUG "mac_80211_hwsim: failed initializing netlink\n");
- return err;
-
-failed_mon:
- rtnl_unlock();
+out_free_mon:
free_netdev(hwsim_mon);
+out_free_radios:
mac80211_hwsim_free();
- return err;
-
-failed_hw:
- device_unregister(data->dev);
-failed_drvdata:
- ieee80211_free_hw(hw);
-failed:
- mac80211_hwsim_free();
-failed_unregister_driver:
+out_unregister_driver:
platform_driver_unregister(&mac80211_hwsim_driver);
return err;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index afaad5a443b6..2747cce5a269 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -65,6 +65,9 @@ enum hwsim_tx_control_flags {
* kernel, uses:
* %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_FLAGS,
* %HWSIM_ATTR_TX_INFO, %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
+ * @HWSIM_CMD_CREATE_RADIO: create a new radio with the given parameters,
+ * returns the radio ID (>= 0) or negative on errors
+ * @HWSIM_CMD_DESTROY_RADIO: destroy a radio
* @__HWSIM_CMD_MAX: enum limit
*/
enum {
@@ -72,6 +75,8 @@ enum {
HWSIM_CMD_REGISTER,
HWSIM_CMD_FRAME,
HWSIM_CMD_TX_INFO_FRAME,
+ HWSIM_CMD_CREATE_RADIO,
+ HWSIM_CMD_DESTROY_RADIO,
__HWSIM_CMD_MAX,
};
#define HWSIM_CMD_MAX (_HWSIM_CMD_MAX - 1)
@@ -94,6 +99,14 @@ enum {
space
* @HWSIM_ATTR_TX_INFO: ieee80211_tx_rate array
* @HWSIM_ATTR_COOKIE: sk_buff cookie to identify the frame
+ * @HWSIM_ATTR_CHANNELS: u32 attribute used with the %HWSIM_CMD_CREATE_RADIO
+ * command giving the number of channels supported by the new radio
+ * @HWSIM_ATTR_RADIO_ID: u32 attribute used with %HWSIM_CMD_DESTROY_RADIO
+ * only to destroy a radio
+ * @HWSIM_ATTR_REG_HINT_ALPHA2: alpha2 for regulatoro driver hint
+ * (nla string, length 2)
+ * @HWSIM_ATTR_REG_CUSTOM_REG: custom regulatory domain index (u32 attribute)
+ * @HWSIM_ATTR_REG_STRICT_REG: request REGULATORY_STRICT_REG (flag attribute)
* @__HWSIM_ATTR_MAX: enum limit
*/
@@ -108,6 +121,11 @@ enum {
HWSIM_ATTR_SIGNAL,
HWSIM_ATTR_TX_INFO,
HWSIM_ATTR_COOKIE,
+ HWSIM_ATTR_CHANNELS,
+ HWSIM_ATTR_RADIO_ID,
+ HWSIM_ATTR_REG_HINT_ALPHA2,
+ HWSIM_ATTR_REG_CUSTOM_REG,
+ HWSIM_ATTR_REG_STRICT_REG,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 0b803c05cab3..6261f8c53d44 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -483,7 +483,7 @@ mwifiex_get_ba_tbl(struct mwifiex_private *priv, int tid, u8 *ra)
spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
- if (!memcmp(tx_ba_tsr_tbl->ra, ra, ETH_ALEN) &&
+ if (ether_addr_equal_unaligned(tx_ba_tsr_tbl->ra, ra) &&
tx_ba_tsr_tbl->tid == tid) {
spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
flags);
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 1214c587fd08..63211707f939 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -69,9 +69,9 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
memcpy(&tx_header->eth803_hdr, skb_src->data, dt_offset);
/* Copy SNAP header */
- snap.snap_type =
- le16_to_cpu(*(__le16 *) ((u8 *)skb_src->data + dt_offset));
- dt_offset += sizeof(u16);
+ snap.snap_type = ((struct ethhdr *)skb_src->data)->h_proto;
+
+ dt_offset += sizeof(__be16);
memcpy(&tx_header->rfc1042_hdr, &snap, sizeof(struct rfc_1042_hdr));
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index f7ff4725506a..ecdf34505b54 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -31,12 +31,12 @@ config MWIFIEX_PCIE
mwifiex_pcie.
config MWIFIEX_USB
- tristate "Marvell WiFi-Ex Driver for USB8797"
+ tristate "Marvell WiFi-Ex Driver for USB8797/8897"
depends on MWIFIEX && USB
select FW_LOADER
---help---
This adds support for wireless adapters based on Marvell
- Avastar 88W8797 chipset with USB interface.
+ 8797/8897 chipset with USB interface.
If you choose to build it as a module, it will be called
mwifiex_usb.
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index aeaea0e3b4c4..8bfc07cd330e 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -50,24 +50,24 @@ static const struct ieee80211_regdomain mwifiex_world_regdom_custom = {
REG_RULE(2412-10, 2462+10, 40, 3, 20, 0),
/* Channel 12 - 13 */
REG_RULE(2467-10, 2472+10, 20, 3, 20,
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+ NL80211_RRF_NO_IR),
/* Channel 14 */
REG_RULE(2484-10, 2484+10, 20, 3, 20,
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
+ NL80211_RRF_NO_IR |
NL80211_RRF_NO_OFDM),
/* Channel 36 - 48 */
REG_RULE(5180-10, 5240+10, 40, 3, 20,
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+ NL80211_RRF_NO_IR),
/* Channel 149 - 165 */
REG_RULE(5745-10, 5825+10, 40, 3, 20,
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+ NL80211_RRF_NO_IR),
/* Channel 52 - 64 */
REG_RULE(5260-10, 5320+10, 40, 3, 30,
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
+ NL80211_RRF_NO_IR |
NL80211_RRF_DFS),
/* Channel 100 - 140 */
REG_RULE(5500-10, 5700+10, 40, 3, 30,
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
+ NL80211_RRF_NO_IR |
NL80211_RRF_DFS),
}
};
@@ -184,10 +184,10 @@ mwifiex_form_mgmt_frame(struct sk_buff *skb, const u8 *buf, size_t len)
*/
static int
mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
- struct ieee80211_channel *chan, bool offchan,
- unsigned int wait, const u8 *buf, size_t len,
- bool no_cck, bool dont_wait_for_ack, u64 *cookie)
+ struct cfg80211_mgmt_tx_params *params, u64 *cookie)
{
+ const u8 *buf = params->buf;
+ size_t len = params->len;
struct sk_buff *skb;
u16 pkt_len;
const struct ieee80211_mgmt *mgmt;
@@ -222,6 +222,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
tx_info = MWIFIEX_SKB_TXCB(skb);
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
+ tx_info->pkt_len = pkt_len;
mwifiex_form_mgmt_frame(skb, buf, len);
mwifiex_queue_tx_pkt(priv, skb);
@@ -537,23 +538,33 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ struct mwifiex_private *priv = mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY);
wiphy_dbg(wiphy, "info: cfg80211 regulatory domain callback for %c%c\n",
request->alpha2[0], request->alpha2[1]);
- memcpy(adapter->country_code, request->alpha2, sizeof(request->alpha2));
-
switch (request->initiator) {
case NL80211_REGDOM_SET_BY_DRIVER:
case NL80211_REGDOM_SET_BY_CORE:
case NL80211_REGDOM_SET_BY_USER:
- break;
- /* Todo: apply driver specific changes in channel flags based
- on the request initiator if necessary. */
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
break;
+ default:
+ wiphy_err(wiphy, "unknown regdom initiator: %d\n",
+ request->initiator);
+ return;
+ }
+
+ /* Don't send world or same regdom info to firmware */
+ if (strncmp(request->alpha2, "00", 2) &&
+ strncmp(request->alpha2, adapter->country_code,
+ sizeof(request->alpha2))) {
+ memcpy(adapter->country_code, request->alpha2,
+ sizeof(request->alpha2));
+ mwifiex_send_domain_info_cmd_fw(wiphy);
+ mwifiex_dnld_txpwr_table(priv);
}
- mwifiex_send_domain_info_cmd_fw(wiphy);
}
/*
@@ -1170,10 +1181,10 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
else
bitmap_rates[1] = mask->control[band].legacy;
- /* Fill MCS rates */
- bitmap_rates[2] = mask->control[band].mcs[0];
+ /* Fill HT MCS rates */
+ bitmap_rates[2] = mask->control[band].ht_mcs[0];
if (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2)
- bitmap_rates[2] |= mask->control[band].mcs[1] << 8;
+ bitmap_rates[2] |= mask->control[band].ht_mcs[1] << 8;
return mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
HostCmd_ACT_GEN_SET, 0, bitmap_rates);
@@ -1968,7 +1979,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
user_scan_cfg->chan_list[i].radio_type = chan->band;
- if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ if (chan->flags & IEEE80211_CHAN_NO_IR)
user_scan_cfg->chan_list[i].scan_type =
MWIFIEX_SCAN_TYPE_PASSIVE;
else
@@ -2438,7 +2449,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
ETH_ALEN);
mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] =
ETH_ALEN;
- mef_entry->filter[filt_num].offset = 14;
+ mef_entry->filter[filt_num].offset = 28;
mef_entry->filter[filt_num].filt_type = TYPE_EQ;
if (filt_num)
mef_entry->filter[filt_num].filt_action = TYPE_OR;
@@ -2666,6 +2677,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
struct wiphy *wiphy;
struct mwifiex_private *priv = adapter->priv[MWIFIEX_BSS_TYPE_STA];
u8 *country_code;
+ u32 thr, retry;
/* create a new wiphy for use with cfg80211 */
wiphy = wiphy_new(&mwifiex_cfg80211_ops,
@@ -2702,9 +2714,10 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
- WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_STRICT_REGULATORY |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ wiphy->regulatory_flags |=
+ REGULATORY_CUSTOM_REG |
+ REGULATORY_STRICT_REG;
wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom);
@@ -2754,6 +2767,19 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
country_code);
}
+ mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, FRAG_THRESH_I, &thr);
+ wiphy->frag_threshold = thr;
+ mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, RTS_THRESH_I, &thr);
+ wiphy->rts_threshold = thr;
+ mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, SHORT_RETRY_LIM_I, &retry);
+ wiphy->retry_short = (u8) retry;
+ mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, LONG_RETRY_LIM_I, &retry);
+ wiphy->retry_long = (u8) retry;
+
adapter->wiphy = wiphy;
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index e47f4e3012b8..1ddc8b2e3722 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -312,14 +312,14 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
}
if (GET_BSS_ROLE(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY))
== MWIFIEX_BSS_ROLE_STA) {
- if (!sleep_cfm_buf->resp_ctrl)
+ if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl))
/* Response is not needed for sleep
confirm command */
adapter->ps_state = PS_STATE_SLEEP;
else
adapter->ps_state = PS_STATE_SLEEP_CFM;
- if (!sleep_cfm_buf->resp_ctrl &&
+ if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl) &&
(adapter->is_hs_configured &&
!adapter->sleep_period.period)) {
adapter->pm_wakeup_card_req = true;
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 5c85d7803d00..3a21bd03d6db 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -130,6 +130,7 @@ struct mwifiex_txinfo {
u8 flags;
u8 bss_num;
u8 bss_type;
+ u32 pkt_len;
};
enum mwifiex_wmm_ac_e {
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index c8385ec77a86..5fa932d5f905 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -30,7 +30,7 @@ struct rfc_1042_hdr {
u8 llc_ssap;
u8 llc_ctrl;
u8 snap_oui[3];
- u16 snap_type;
+ __be16 snap_type;
};
struct rx_packet_hdr {
@@ -226,7 +226,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
/* HW_SPEC fw_cap_info */
-#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(13)|BIT(14)))
+#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(12)|BIT(13)))
#define GET_VHTCAP_CHWDSET(vht_cap_info) ((vht_cap_info >> 2) & 0x3)
#define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3)
@@ -468,8 +468,6 @@ enum P2P_MODES {
#define MWIFIEX_CRITERIA_UNICAST BIT(1)
#define MWIFIEX_CRITERIA_MULTICAST BIT(3)
-#define CFG_DATA_TYPE_CAL 2
-
struct mwifiex_ie_types_header {
__le16 type;
__le16 len;
@@ -610,12 +608,12 @@ struct mwifiex_ie_types_tsf_timestamp {
struct mwifiex_cf_param_set {
u8 cfp_cnt;
u8 cfp_period;
- u16 cfp_max_duration;
- u16 cfp_duration_remaining;
+ __le16 cfp_max_duration;
+ __le16 cfp_duration_remaining;
} __packed;
struct mwifiex_ibss_param_set {
- u16 atim_window;
+ __le16 atim_window;
} __packed;
struct mwifiex_ie_types_ss_param_set {
@@ -627,7 +625,7 @@ struct mwifiex_ie_types_ss_param_set {
} __packed;
struct mwifiex_fh_param_set {
- u16 dwell_time;
+ __le16 dwell_time;
u8 hop_set;
u8 hop_pattern;
u8 hop_index;
@@ -684,10 +682,10 @@ struct host_cmd_ds_802_11_key_material {
} __packed;
struct host_cmd_ds_gen {
- u16 command;
- u16 size;
- u16 seq_num;
- u16 result;
+ __le16 command;
+ __le16 size;
+ __le16 seq_num;
+ __le16 result;
};
#define S_DS_GEN sizeof(struct host_cmd_ds_gen)
@@ -820,8 +818,8 @@ struct ieee_types_cf_param_set {
u8 len;
u8 cfp_cnt;
u8 cfp_period;
- u16 cfp_max_duration;
- u16 cfp_duration_remaining;
+ __le16 cfp_max_duration;
+ __le16 cfp_duration_remaining;
} __packed;
struct ieee_types_ibss_param_set {
@@ -957,7 +955,7 @@ struct mwifiex_hs_config_param {
} __packed;
struct hs_activate_param {
- u16 resp_ctrl;
+ __le16 resp_ctrl;
} __packed;
struct host_cmd_ds_802_11_hs_cfg_enh {
@@ -1131,7 +1129,7 @@ struct host_cmd_ds_802_11_bg_scan_query {
} __packed;
struct host_cmd_ds_802_11_bg_scan_query_rsp {
- u32 report_condition;
+ __le32 report_condition;
struct host_cmd_ds_802_11_scan_rsp scan_resp;
} __packed;
@@ -1230,7 +1228,7 @@ struct mwifiex_ie_types_wmm_queue_status {
struct mwifiex_ie_types_header header;
u8 queue_index;
u8 disabled;
- u16 medium_time;
+ __le16 medium_time;
u8 flow_required;
u8 flow_created;
u32 reserved;
@@ -1310,7 +1308,7 @@ struct mwifiex_ie_types_vht_oper {
u8 chan_center_freq_1;
u8 chan_center_freq_2;
/* Basic MCS set map, each 2 bits stands for a NSS */
- u16 basic_mcs_map;
+ __le16 basic_mcs_map;
} __packed;
struct mwifiex_ie_types_wmmcap {
@@ -1592,12 +1590,6 @@ struct mwifiex_ie_list {
struct mwifiex_ie ie_list[MAX_MGMT_IE_INDEX];
} __packed;
-struct host_cmd_ds_802_11_cfg_data {
- __le16 action;
- __le16 type;
- __le16 data_len;
-} __packed;
-
struct coalesce_filt_field_param {
u8 operation;
u8 operand_len;
@@ -1678,7 +1670,6 @@ struct host_cmd_ds_command {
struct host_cmd_ds_sys_config uap_sys_config;
struct host_cmd_ds_sta_deauth sta_deauth;
struct host_cmd_11ac_vht_cfg vht_cfg;
- struct host_cmd_ds_802_11_cfg_data cfg_data;
struct host_cmd_ds_coalesce_cfg coalesce_cfg;
} params;
} __packed;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 6499117fce43..1d0a817f2bf0 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -643,7 +643,8 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
if (priv)
priv->stats.rx_dropped++;
- adapter->if_ops.data_complete(adapter, skb);
+ dev_kfree_skb_any(skb);
+ adapter->if_ops.data_complete(adapter);
}
}
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 78e8a6666cc6..4d79761b9c87 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -648,6 +648,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info = MWIFIEX_SKB_TXCB(skb);
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
+ tx_info->pkt_len = skb->len;
/* Record the current time the packet was queued; used to
* determine the amount of time the packet was queued in
@@ -746,9 +747,10 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
}
static u16
-mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb)
+mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
- skb->priority = cfg80211_classify8021d(skb);
+ skb->priority = cfg80211_classify8021d(skb, NULL);
return mwifiex_1d_to_wmm_queue[skb->priority];
}
@@ -991,12 +993,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
rtnl_unlock();
}
- priv = adapter->priv[0];
- if (!priv || !priv->wdev)
- goto exit_remove;
-
- wiphy_unregister(priv->wdev->wiphy);
- wiphy_free(priv->wdev->wiphy);
+ wiphy_unregister(adapter->wiphy);
+ wiphy_free(adapter->wiphy);
mwifiex_terminate_workqueue(adapter);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 1d72f13adb9d..d8ad554ce39f 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -32,6 +32,7 @@
#include <net/lib80211.h>
#include <linux/firmware.h>
#include <linux/ctype.h>
+#include <linux/of.h>
#include "decl.h"
#include "ioctl.h"
@@ -615,7 +616,7 @@ struct mwifiex_if_ops {
void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
- int (*data_complete) (struct mwifiex_adapter *, struct sk_buff *);
+ int (*data_complete) (struct mwifiex_adapter *);
int (*init_fw_port) (struct mwifiex_adapter *);
int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
void (*card_reset) (struct mwifiex_adapter *);
@@ -739,6 +740,7 @@ struct mwifiex_adapter {
u8 scan_delay_cnt;
u8 empty_tx_q_cnt;
const struct firmware *cal_data;
+ struct device_node *dt_node;
/* 11AC */
u32 is_hw_11ac_capable;
@@ -1151,6 +1153,9 @@ void mwifiex_uap_del_sta_data(struct mwifiex_private *priv,
void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer,
struct mwifiex_bssdescriptor *bss_desc);
int mwifiex_11h_handle_event_chanswann(struct mwifiex_private *priv);
+int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
+ struct device_node *node, const char *prefix);
+void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv);
extern const struct ethtool_ops mwifiex_ethtool_ops;
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 8cf7d50a7603..0a8a26e10f01 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -515,14 +515,14 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16((u16) user_scan_in->
chan_list[0].scan_time);
- else if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ else if (ch->flags & IEEE80211_CHAN_NO_IR)
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16(adapter->passive_scan_time);
else
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16(adapter->active_scan_time);
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ if (ch->flags & IEEE80211_CHAN_NO_IR)
scan_chan_list[chan_idx].chan_scan_mode_bitmap
|= MWIFIEX_PASSIVE_SCAN;
else
@@ -1681,7 +1681,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
const u8 *ie_buf;
size_t ie_len;
u16 channel = 0;
- u64 fw_tsf = 0;
+ __le64 fw_tsf = 0;
u16 beacon_size = 0;
u32 curr_bcn_bytes;
u32 freq;
@@ -1815,7 +1815,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
ie_buf, ie_len, rssi, GFP_KERNEL);
bss_priv = (struct mwifiex_bss_priv *)bss->priv;
bss_priv->band = band;
- bss_priv->fw_tsf = fw_tsf;
+ bss_priv->fw_tsf = le64_to_cpu(fw_tsf);
if (priv->media_connected &&
!memcmp(bssid,
priv->curr_bss_params.bss_descriptor
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 2181ee283d82..9208a8816b80 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -354,7 +354,7 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
}
if (hs_activate) {
hs_cfg->action = cpu_to_le16(HS_ACTIVATE);
- hs_cfg->params.hs_activate.resp_ctrl = RESP_NEEDED;
+ hs_cfg->params.hs_activate.resp_ctrl = cpu_to_le16(RESP_NEEDED);
} else {
hs_cfg->action = cpu_to_le16(HS_CONFIGURE);
hs_cfg->params.hs_config.conditions = hscfg_param->conditions;
@@ -1156,30 +1156,62 @@ static u32 mwifiex_parse_cal_cfg(u8 *src, size_t len, u8 *dst)
return d - dst;
}
+int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
+ struct device_node *node, const char *prefix)
+{
+#ifdef CONFIG_OF
+ struct property *prop;
+ size_t len = strlen(prefix);
+ int ret;
+
+ /* look for all matching property names */
+ for_each_property_of_node(node, prop) {
+ if (len > strlen(prop->name) ||
+ strncmp(prop->name, prefix, len))
+ continue;
+
+ /* property header is 6 bytes, data must fit in cmd buffer */
+ if (prop && prop->value && prop->length > 6 &&
+ prop->length <= MWIFIEX_SIZE_OF_CMD_BUFFER - S_DS_GEN) {
+ ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA,
+ HostCmd_ACT_GEN_SET, 0,
+ prop);
+ if (ret)
+ return ret;
+ }
+ }
+#endif
+ return 0;
+}
+
/* This function prepares command of set_cfg_data. */
static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
- struct host_cmd_ds_command *cmd,
- u16 cmd_action)
+ struct host_cmd_ds_command *cmd, void *data_buf)
{
- struct host_cmd_ds_802_11_cfg_data *cfg_data = &cmd->params.cfg_data;
struct mwifiex_adapter *adapter = priv->adapter;
- u32 len, cal_data_offset;
- u8 *tmp_cmd = (u8 *)cmd;
+ struct property *prop = data_buf;
+ u32 len;
+ u8 *data = (u8 *)cmd + S_DS_GEN;
+ int ret;
- cal_data_offset = S_DS_GEN + sizeof(*cfg_data);
- if ((adapter->cal_data->data) && (adapter->cal_data->size > 0))
+ if (prop) {
+ len = prop->length;
+ ret = of_property_read_u8_array(adapter->dt_node, prop->name,
+ data, len);
+ if (ret)
+ return ret;
+ dev_dbg(adapter->dev,
+ "download cfg_data from device tree: %s\n", prop->name);
+ } else if (adapter->cal_data->data && adapter->cal_data->size > 0) {
len = mwifiex_parse_cal_cfg((u8 *)adapter->cal_data->data,
- adapter->cal_data->size,
- (u8 *)(tmp_cmd + cal_data_offset));
- else
+ adapter->cal_data->size, data);
+ dev_dbg(adapter->dev, "download cfg_data from config file\n");
+ } else {
return -1;
-
- cfg_data->action = cpu_to_le16(cmd_action);
- cfg_data->type = cpu_to_le16(CFG_DATA_TYPE_CAL);
- cfg_data->data_len = cpu_to_le16(len);
+ }
cmd->command = cpu_to_le16(HostCmd_CMD_CFG_DATA);
- cmd->size = cpu_to_le16(S_DS_GEN + sizeof(*cfg_data) + len);
+ cmd->size = cpu_to_le16(S_DS_GEN + len);
return 0;
}
@@ -1267,7 +1299,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_get_hw_spec(priv, cmd_ptr);
break;
case HostCmd_CMD_CFG_DATA:
- ret = mwifiex_cmd_cfg_data(priv, cmd_ptr, cmd_action);
+ ret = mwifiex_cmd_cfg_data(priv, cmd_ptr, data_buf);
break;
case HostCmd_CMD_MAC_CONTROL:
ret = mwifiex_cmd_mac_control(priv, cmd_ptr, cmd_action,
@@ -1527,7 +1559,19 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
if (ret)
return -1;
- /* Download calibration data to firmware */
+ /* Download calibration data to firmware.
+ * The cal-data can be read from device tree and/or
+ * a configuration file and downloaded to firmware.
+ */
+ adapter->dt_node =
+ of_find_node_by_name(NULL, "marvell_cfgdata");
+ if (adapter->dt_node) {
+ ret = mwifiex_dnld_dt_cfgdata(priv, adapter->dt_node,
+ "marvell,caldata");
+ if (ret)
+ return -1;
+ }
+
if (adapter->cal_data) {
ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA,
HostCmd_ACT_GEN_SET, 0, NULL);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 2675ca7f8d14..24523e4015cb 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -338,8 +338,7 @@ static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf)
if (!data_buf)
return -1;
- pg_tlv_hdr = (struct mwifiex_types_power_group *)
- ((u8 *) data_buf + sizeof(struct host_cmd_ds_txpwr_cfg));
+ pg_tlv_hdr = (struct mwifiex_types_power_group *)((u8 *)data_buf);
pg = (struct mwifiex_power_group *)
((u8 *) pg_tlv_hdr + sizeof(struct mwifiex_types_power_group));
length = le16_to_cpu(pg_tlv_hdr->length);
@@ -383,19 +382,25 @@ static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv,
struct mwifiex_types_power_group *pg_tlv_hdr;
struct mwifiex_power_group *pg;
u16 action = le16_to_cpu(txp_cfg->action);
+ u16 tlv_buf_left;
- switch (action) {
- case HostCmd_ACT_GEN_GET:
- pg_tlv_hdr = (struct mwifiex_types_power_group *)
- ((u8 *) txp_cfg +
- sizeof(struct host_cmd_ds_txpwr_cfg));
+ pg_tlv_hdr = (struct mwifiex_types_power_group *)
+ ((u8 *)txp_cfg +
+ sizeof(struct host_cmd_ds_txpwr_cfg));
- pg = (struct mwifiex_power_group *)
- ((u8 *) pg_tlv_hdr +
- sizeof(struct mwifiex_types_power_group));
+ pg = (struct mwifiex_power_group *)
+ ((u8 *)pg_tlv_hdr +
+ sizeof(struct mwifiex_types_power_group));
+ tlv_buf_left = le16_to_cpu(resp->size) - S_DS_GEN - sizeof(*txp_cfg);
+ if (tlv_buf_left <
+ le16_to_cpu(pg_tlv_hdr->length) + sizeof(*pg_tlv_hdr))
+ return 0;
+
+ switch (action) {
+ case HostCmd_ACT_GEN_GET:
if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
- mwifiex_get_power_level(priv, txp_cfg);
+ mwifiex_get_power_level(priv, pg_tlv_hdr);
priv->tx_power_level = (u16) pg->power_min;
break;
@@ -404,14 +409,6 @@ static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv,
if (!le32_to_cpu(txp_cfg->mode))
break;
- pg_tlv_hdr = (struct mwifiex_types_power_group *)
- ((u8 *) txp_cfg +
- sizeof(struct host_cmd_ds_txpwr_cfg));
-
- pg = (struct mwifiex_power_group *)
- ((u8 *) pg_tlv_hdr +
- sizeof(struct mwifiex_types_power_group));
-
if (pg->power_max == pg->power_min)
priv->tx_power_level = (u16) pg->power_min;
break;
@@ -785,8 +782,7 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
}
/* If BSSID is diff, modify current BSS parameters */
- if (memcmp(priv->curr_bss_params.bss_descriptor.mac_address,
- ibss_coal_resp->bssid, ETH_ALEN)) {
+ if (!ether_addr_equal(priv->curr_bss_params.bss_descriptor.mac_address, ibss_coal_resp->bssid)) {
/* BSSID */
memcpy(priv->curr_bss_params.bss_descriptor.mac_address,
ibss_coal_resp->bssid, ETH_ALEN);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index a09398fe9e2a..c5cb2ed19ec2 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -184,6 +184,16 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
}
+void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv)
+{
+ if (priv->adapter->dt_node) {
+ char txpwr[] = {"marvell,00_txpwrlimit"};
+
+ memcpy(&txpwr[8], priv->adapter->country_code, 2);
+ mwifiex_dnld_dt_cfgdata(priv, priv->adapter->dt_node, txpwr);
+ }
+}
+
static int mwifiex_process_country_ie(struct mwifiex_private *priv,
struct cfg80211_bss *bss)
{
@@ -205,6 +215,14 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
return 0;
}
+ if (!strncmp(priv->adapter->country_code, &country_ie[2], 2)) {
+ rcu_read_unlock();
+ wiphy_dbg(priv->wdev->wiphy,
+ "11D: skip setting domain info in FW\n");
+ return 0;
+ }
+ memcpy(priv->adapter->country_code, &country_ie[2], 2);
+
domain_info->country_code[0] = country_ie[2];
domain_info->country_code[1] = country_ie[3];
domain_info->country_code[2] = ' ';
@@ -226,6 +244,8 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
return -1;
}
+ mwifiex_dnld_txpwr_table(priv);
+
return 0;
}
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index bb22664923ef..4651d676df38 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -36,12 +36,12 @@ mwifiex_discard_gratuitous_arp(struct mwifiex_private *priv,
struct sk_buff *skb)
{
const struct mwifiex_arp_eth_header *arp;
- struct ethhdr *eth_hdr;
+ struct ethhdr *eth;
struct ipv6hdr *ipv6;
struct icmp6hdr *icmpv6;
- eth_hdr = (struct ethhdr *)skb->data;
- switch (ntohs(eth_hdr->h_proto)) {
+ eth = (struct ethhdr *)skb->data;
+ switch (ntohs(eth->h_proto)) {
case ETH_P_ARP:
arp = (void *)(skb->data + sizeof(struct ethhdr));
if (arp->hdr.ar_op == htons(ARPOP_REPLY) ||
@@ -87,16 +87,19 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
struct rx_packet_hdr *rx_pkt_hdr;
struct rxpd *local_rx_pd;
int hdr_chop;
- struct ethhdr *eth_hdr;
- u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+ struct ethhdr *eth;
local_rx_pd = (struct rxpd *) (skb->data);
rx_pkt_hdr = (void *)local_rx_pd +
le16_to_cpu(local_rx_pd->rx_pkt_offset);
- if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
- rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
+ if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
+ sizeof(bridge_tunnel_header))) ||
+ (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
+ sizeof(rfc1042_header)) &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
/*
* Replace the 803 header and rfc1042 header (llc/snap) with an
* EthernetII header, keep the src/dst and snap_type
@@ -106,7 +109,7 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
* To create the Ethernet II, just move the src, dst address
* right before the snap_type.
*/
- eth_hdr = (struct ethhdr *)
+ eth = (struct ethhdr *)
((u8 *) &rx_pkt_hdr->eth803_hdr
+ sizeof(rx_pkt_hdr->eth803_hdr) +
sizeof(rx_pkt_hdr->rfc1042_hdr)
@@ -114,14 +117,14 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
- sizeof(rx_pkt_hdr->eth803_hdr.h_source)
- sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type));
- memcpy(eth_hdr->h_source, rx_pkt_hdr->eth803_hdr.h_source,
- sizeof(eth_hdr->h_source));
- memcpy(eth_hdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest,
- sizeof(eth_hdr->h_dest));
+ memcpy(eth->h_source, rx_pkt_hdr->eth803_hdr.h_source,
+ sizeof(eth->h_source));
+ memcpy(eth->h_dest, rx_pkt_hdr->eth803_hdr.h_dest,
+ sizeof(eth->h_dest));
/* Chop off the rxpd + the excess memory from the 802.2/llc/snap
header that was removed. */
- hdr_chop = (u8 *) eth_hdr - (u8 *) local_rx_pd;
+ hdr_chop = (u8 *) eth - (u8 *) local_rx_pd;
} else {
/* Chop off the rxpd */
hdr_chop = (u8 *) &rx_pkt_hdr->eth803_hdr -
@@ -185,12 +188,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
"wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
skb->len, rx_pkt_offset, rx_pkt_length);
priv->stats.rx_dropped++;
-
- if (adapter->if_ops.data_complete)
- adapter->if_ops.data_complete(adapter, skb);
- else
- dev_kfree_skb_any(skb);
-
+ dev_kfree_skb_any(skb);
return ret;
}
@@ -226,7 +224,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
* directly to os. Don't pass thru rx reordering
*/
if (!IS_11N_ENABLED(priv) ||
- memcmp(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN)) {
+ !ether_addr_equal_unaligned(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest)) {
mwifiex_process_rx_packet(priv, skb);
return ret;
}
@@ -244,12 +242,8 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
ret = mwifiex_11n_rx_reorder_pkt(priv, seq_num, local_rx_pd->priority,
ta, (u8) rx_pkt_type, skb);
- if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
- if (adapter->if_ops.data_complete)
- adapter->if_ops.data_complete(adapter, skb);
- else
- dev_kfree_skb_any(skb);
- }
+ if (ret || (rx_pkt_type == PKT_TYPE_BAR))
+ dev_kfree_skb_any(skb);
if (ret)
priv->stats.rx_dropped++;
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 7b581af24f5f..354d64c9606f 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -148,6 +148,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
tx_info = MWIFIEX_SKB_TXCB(skb);
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
+ tx_info->pkt_len = data_len - (sizeof(struct txpd) + INTF_HEADER_LEN);
skb_reserve(skb, sizeof(struct txpd) + INTF_HEADER_LEN);
skb_push(skb, sizeof(struct txpd));
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 8f923d0d2ba6..37f26afd4314 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -40,6 +40,7 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
struct rxpd *local_rx_pd;
struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
+ int ret;
local_rx_pd = (struct rxpd *) (skb->data);
/* Get the BSS number from rxpd, get corresponding priv */
@@ -58,9 +59,15 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
rx_info->bss_type = priv->bss_type;
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
- return mwifiex_process_uap_rx_packet(priv, skb);
+ ret = mwifiex_process_uap_rx_packet(priv, skb);
+ else
+ ret = mwifiex_process_sta_rx_packet(priv, skb);
+
+ /* Decrement RX pending counter for each packet */
+ if (adapter->if_ops.data_complete)
+ adapter->if_ops.data_complete(adapter);
- return mwifiex_process_sta_rx_packet(priv, skb);
+ return ret;
}
EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
@@ -105,7 +112,7 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
switch (ret) {
case -ENOSR:
- dev_err(adapter->dev, "data: -ENOSR is returned\n");
+ dev_dbg(adapter->dev, "data: -ENOSR is returned\n");
break;
case -EBUSY:
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
@@ -168,7 +175,7 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
mwifiex_set_trans_start(priv->netdev);
if (!status) {
priv->stats.tx_packets++;
- priv->stats.tx_bytes += skb->len;
+ priv->stats.tx_bytes += tx_info->pkt_len;
if (priv->tx_timeout_cnt)
priv->tx_timeout_cnt = 0;
} else {
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 92f76d655e6c..3c74eb254927 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -98,7 +98,6 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
int hdr_chop;
struct timeval tv;
struct ethhdr *p_ethhdr;
- u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
uap_rx_pd = (struct uap_rxpd *)(skb->data);
rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
@@ -112,8 +111,12 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
return;
}
- if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
- rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
+ if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
+ sizeof(bridge_tunnel_header))) ||
+ (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
+ sizeof(rfc1042_header)) &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
/* Replace the 803 header and rfc1042 header (llc/snap) with
* an Ethernet II header, keep the src/dst and snap_type
* (ethertype).
@@ -144,7 +147,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd;
}
- /* Chop off the leading header bytes so the it points
+ /* Chop off the leading header bytes so that it points
* to the start of either the reconstructed EthII frame
* or the 802.2/llc/snap frame.
*/
@@ -176,6 +179,19 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
tx_info->bss_type = priv->bss_type;
tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
+ if (is_unicast_ether_addr(rx_pkt_hdr->eth803_hdr.h_dest)) {
+ /* Update bridge packet statistics as the
+ * packet is not going to kernel/upper layer.
+ */
+ priv->stats.rx_bytes += skb->len;
+ priv->stats.rx_packets++;
+
+ /* Sending bridge packet to TX queue, so save the packet
+ * length in TXCB to update statistics in TX complete.
+ */
+ tx_info->pkt_len = skb->len;
+ }
+
do_gettimeofday(&tv);
skb->tstamp = timeval_to_ktime(tv);
mwifiex_wmm_add_buf_txqueue(priv, skb);
@@ -264,12 +280,7 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
le16_to_cpu(uap_rx_pd->rx_pkt_length));
priv->stats.rx_dropped++;
-
- if (adapter->if_ops.data_complete)
- adapter->if_ops.data_complete(adapter, skb);
- else
- dev_kfree_skb_any(skb);
-
+ dev_kfree_skb_any(skb);
return 0;
}
@@ -323,12 +334,8 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
uap_rx_pd->priority, ta, pkt_type,
skb);
- if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
- if (adapter->if_ops.data_complete)
- adapter->if_ops.data_complete(adapter, skb);
- else
- dev_kfree_skb_any(skb);
- }
+ if (ret || (rx_pkt_type == PKT_TYPE_BAR))
+ dev_kfree_skb_any(skb);
if (ret)
priv->stats.rx_dropped++;
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index edf5b7a24900..e8ebbd4bc3cd 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -22,15 +22,21 @@
#define USB_VERSION "1.0"
-static const char usbdriver_name[] = "usb8797";
+static const char usbdriver_name[] = "usb8xxx";
static struct mwifiex_if_ops usb_ops;
static struct semaphore add_remove_card_sem;
static struct usb_card_rec *usb_card;
static struct usb_device_id mwifiex_usb_table[] = {
- {USB_DEVICE(USB8797_VID, USB8797_PID_1)},
- {USB_DEVICE_AND_INTERFACE_INFO(USB8797_VID, USB8797_PID_2,
+ /* 8797 */
+ {USB_DEVICE(USB8XXX_VID, USB8797_PID_1)},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8797_PID_2,
+ USB_CLASS_VENDOR_SPEC,
+ USB_SUBCLASS_VENDOR_SPEC, 0xff)},
+ /* 8897 */
+ {USB_DEVICE(USB8XXX_VID, USB8897_PID_1)},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2,
USB_CLASS_VENDOR_SPEC,
USB_SUBCLASS_VENDOR_SPEC, 0xff)},
{ } /* Terminating entry */
@@ -343,10 +349,20 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
id_vendor, id_product, bcd_device);
/* PID_1 is used for firmware downloading only */
- if (id_product == USB8797_PID_1)
- card->usb_boot_state = USB8797_FW_DNLD;
- else
- card->usb_boot_state = USB8797_FW_READY;
+ switch (id_product) {
+ case USB8797_PID_1:
+ case USB8897_PID_1:
+ card->usb_boot_state = USB8XXX_FW_DNLD;
+ break;
+ case USB8797_PID_2:
+ case USB8897_PID_2:
+ card->usb_boot_state = USB8XXX_FW_READY;
+ break;
+ default:
+ pr_warning("unknown id_product %#x\n", id_product);
+ card->usb_boot_state = USB8XXX_FW_DNLD;
+ break;
+ }
card->udev = udev;
card->intf = intf;
@@ -755,9 +771,20 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
card->adapter = adapter;
adapter->dev = &card->udev->dev;
- strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME);
usb_card = card;
+ switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
+ case USB8897_PID_1:
+ case USB8897_PID_2:
+ strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME);
+ break;
+ case USB8797_PID_1:
+ case USB8797_PID_2:
+ default:
+ strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME);
+ break;
+ }
+
return 0;
}
@@ -773,7 +800,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
{
int ret = 0;
u8 *firmware = fw->fw_buf, *recv_buff;
- u32 retries = USB8797_FW_MAX_RETRY, dlen;
+ u32 retries = USB8XXX_FW_MAX_RETRY, dlen;
u32 fw_seqnum = 0, tlen = 0, dnld_cmd = 0;
struct fw_data *fwdata;
struct fw_sync_header sync_fw;
@@ -875,7 +902,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
continue;
}
- retries = USB8797_FW_MAX_RETRY;
+ retries = USB8XXX_FW_MAX_RETRY;
break;
}
fw_seqnum++;
@@ -899,13 +926,13 @@ static int mwifiex_usb_dnld_fw(struct mwifiex_adapter *adapter,
int ret;
struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
- if (card->usb_boot_state == USB8797_FW_DNLD) {
+ if (card->usb_boot_state == USB8XXX_FW_DNLD) {
ret = mwifiex_prog_fw_w_helper(adapter, fw);
if (ret)
return -1;
/* Boot state changes after successful firmware download */
- if (card->usb_boot_state == USB8797_FW_DNLD)
+ if (card->usb_boot_state == USB8XXX_FW_DNLD)
return -1;
}
@@ -938,11 +965,9 @@ static int mwifiex_usb_cmd_event_complete(struct mwifiex_adapter *adapter,
return 0;
}
-static int mwifiex_usb_data_complete(struct mwifiex_adapter *adapter,
- struct sk_buff *skb)
+static int mwifiex_usb_data_complete(struct mwifiex_adapter *adapter)
{
atomic_dec(&adapter->rx_pending);
- dev_kfree_skb_any(skb);
return 0;
}
@@ -1041,4 +1066,5 @@ MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex USB Driver version" USB_VERSION);
MODULE_VERSION(USB_VERSION);
MODULE_LICENSE("GPL v2");
-MODULE_FIRMWARE("mrvl/usb8797_uapsta.bin");
+MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h
index 98c4316cd1a9..15b73d12e998 100644
--- a/drivers/net/wireless/mwifiex/usb.h
+++ b/drivers/net/wireless/mwifiex/usb.h
@@ -22,19 +22,23 @@
#include <linux/usb.h>
-#define USB8797_VID 0x1286
+#define USB8XXX_VID 0x1286
+
#define USB8797_PID_1 0x2043
#define USB8797_PID_2 0x2044
+#define USB8897_PID_1 0x2045
+#define USB8897_PID_2 0x2046
-#define USB8797_FW_DNLD 1
-#define USB8797_FW_READY 2
-#define USB8797_FW_MAX_RETRY 3
+#define USB8XXX_FW_DNLD 1
+#define USB8XXX_FW_READY 2
+#define USB8XXX_FW_MAX_RETRY 3
#define MWIFIEX_TX_DATA_URB 6
#define MWIFIEX_RX_DATA_URB 6
#define MWIFIEX_USB_TIMEOUT 100
#define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin"
+#define USB8897_DEFAULT_FW_NAME "mrvl/usb8897_uapsta.bin"
#define FW_DNLD_TX_BUF_SIZE 620
#define FW_DNLD_RX_BUF_SIZE 2048
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 5d9e150f4111..9b82e225880c 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -191,6 +191,9 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
if (!skb)
return -1;
+ priv->stats.rx_bytes += skb->len;
+ priv->stats.rx_packets++;
+
skb->dev = priv->netdev;
skb->protocol = eth_type_trans(skb, priv->netdev);
skb->ip_summed = CHECKSUM_NONE;
@@ -217,8 +220,6 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
(skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
- priv->stats.rx_bytes += skb->len;
- priv->stats.rx_packets++;
if (in_interrupt())
netif_rx(skb);
else
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index b953ad621e0b..4987c3f942ce 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -9,7 +9,6 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -1258,7 +1257,7 @@ mwl8k_capture_bssid(struct mwl8k_priv *priv, struct ieee80211_hdr *wh)
{
return priv->capture_beacon &&
ieee80211_is_beacon(wh->frame_control) &&
- ether_addr_equal(wh->addr3, priv->capture_bssid);
+ ether_addr_equal_64bits(wh->addr3, priv->capture_bssid);
}
static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
@@ -5893,8 +5892,6 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
hw->extra_tx_headroom -= priv->ap_fw ? REDUCED_TX_HEADROOM : 0;
- hw->channel_change_time = 10;
-
hw->queues = MWL8K_TX_WMM_QUEUES;
/* Set rssi values to dBm */
diff --git a/drivers/net/wireless/orinoco/hermes.c b/drivers/net/wireless/orinoco/hermes.c
index 75c15bc7b34c..43790fbea0e0 100644
--- a/drivers/net/wireless/orinoco/hermes.c
+++ b/drivers/net/wireless/orinoco/hermes.c
@@ -40,7 +40,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include "hermes.h"
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index d21d95939316..c0a27377d9e2 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index bdfe637953f4..f9805c9353d2 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -52,7 +52,6 @@
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/poll.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/fcntl.h>
#include <linux/spinlock.h>
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index e2264bc12ebf..b60048c95e0a 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index d43e3740e45d..0fe67d2da208 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -16,7 +16,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/sort.h>
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index b3879fbf5368..bc065e8e348b 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -16,7 +16,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/p54/led.c b/drivers/net/wireless/p54/led.c
index 3837e1eec5f4..1f6fd5ff5531 100644
--- a/drivers/net/wireless/p54/led.c
+++ b/drivers/net/wireless/p54/led.c
@@ -16,7 +16,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 067e6f2fd050..eede90b63f84 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -16,7 +16,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
@@ -757,7 +756,6 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT);
- dev->channel_change_time = 1000; /* TODO: find actual value */
priv->beacon_req_id = cpu_to_le32(0);
priv->tx_stats[P54_QUEUE_BEACON].limit = 1;
priv->tx_stats[P54_QUEUE_FWSCAN].limit = 1;
diff --git a/drivers/net/wireless/p54/net2280.h b/drivers/net/wireless/p54/net2280.h
index e3ed893b5aaf..aedfaf24f386 100644
--- a/drivers/net/wireless/p54/net2280.h
+++ b/drivers/net/wireless/p54/net2280.h
@@ -20,8 +20,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*-------------------------------------------------------------------------*/
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index f9a07b0d83ac..d411de409050 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -13,7 +13,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/firmware.h>
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index e328d3058c41..6e635cfa24c8 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -12,7 +12,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/usb.h>
#include <linux/pci.h>
#include <linux/slab.h>
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index f95de0d16216..153c61539ec8 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -17,7 +17,6 @@
*/
#include <linux/export.h>
-#include <linux/init.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <asm/div64.h>
@@ -308,7 +307,7 @@ static void p54_pspoll_workaround(struct p54_common *priv, struct sk_buff *skb)
return;
/* only consider beacons from the associated BSSID */
- if (!ether_addr_equal(hdr->addr3, priv->bssid))
+ if (!ether_addr_equal_64bits(hdr->addr3, priv->bssid))
return;
tim = p54_find_ie(skb, WLAN_EID_TIM);
@@ -587,7 +586,7 @@ static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
chan = priv->curchan;
if (chan) {
struct survey_info *survey = &priv->survey[chan->hw_value];
- survey->noise = clamp_t(s8, priv->noise, -128, 127);
+ survey->noise = clamp(priv->noise, -128, 127);
survey->channel_time = priv->survey_raw.active;
survey->channel_time_tx = priv->survey_raw.tx;
survey->channel_time_busy = priv->survey_raw.tx +
diff --git a/drivers/net/wireless/prism54/isl_38xx.c b/drivers/net/wireless/prism54/isl_38xx.c
index 02fc67bccbd0..333c1a2f882e 100644
--- a/drivers/net/wireless/prism54/isl_38xx.c
+++ b/drivers/net/wireless/prism54/isl_38xx.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/isl_38xx.h b/drivers/net/wireless/prism54/isl_38xx.h
index 19c33d313734..547ab885610b 100644
--- a/drivers/net/wireless/prism54/isl_38xx.h
+++ b/drivers/net/wireless/prism54/isl_38xx.h
@@ -11,8 +11,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 8863a6cb2388..78fa64d3f223 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
@@ -25,6 +24,7 @@
#include <linux/if_arp.h>
#include <linux/slab.h>
#include <linux/pci.h>
+#include <linux/etherdevice.h>
#include <asm/uaccess.h>
@@ -1861,7 +1861,7 @@ prism54_del_mac(struct net_device *ndev, struct iw_request_info *info,
if (mutex_lock_interruptible(&acl->lock))
return -ERESTARTSYS;
list_for_each_entry(entry, &acl->mac_list, _list) {
- if (memcmp(entry->addr, addr->sa_data, ETH_ALEN) == 0) {
+ if (ether_addr_equal(entry->addr, addr->sa_data)) {
list_del(&entry->_list);
acl->size--;
kfree(entry);
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h
index a34bceb6e3cd..842a2549facc 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.h
+++ b/drivers/net/wireless/prism54/isl_ioctl.h
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/isl_oid.h b/drivers/net/wireless/prism54/isl_oid.h
index 59e31258d450..83fec557997e 100644
--- a/drivers/net/wireless/prism54/isl_oid.h
+++ b/drivers/net/wireless/prism54/isl_oid.h
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index e05d9b4c8317..931cf440ff18 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
@@ -914,7 +913,6 @@ islpci_setup(struct pci_dev *pdev)
do_islpci_free_memory:
islpci_free_memory(priv);
do_free_netdev:
- pci_set_drvdata(pdev, NULL);
free_netdev(ndev);
priv = NULL;
return NULL;
diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h
index c40403877f97..f6f088e05fe4 100644
--- a/drivers/net/wireless/prism54/islpci_dev.h
+++ b/drivers/net/wireless/prism54/islpci_dev.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 799e148d0370..674658f2e6ef 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -11,8 +11,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h
index 6ca30a5b7bfb..80f50f1bc6f2 100644
--- a/drivers/net/wireless/prism54/islpci_eth.h
+++ b/drivers/net/wireless/prism54/islpci_eth.h
@@ -11,8 +11,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index 9e68e0cb718e..1105a12dbde8 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
@@ -199,7 +198,6 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)
do_unregister_netdev:
unregister_netdev(ndev);
islpci_free_memory(priv);
- pci_set_drvdata(pdev, NULL);
free_netdev(ndev);
priv = NULL;
do_pci_clear_mwi:
@@ -247,7 +245,6 @@ prism54_remove(struct pci_dev *pdev)
/* free the PCI memory and unmap the remapped page */
islpci_free_memory(priv);
- pci_set_drvdata(pdev, NULL);
free_netdev(ndev);
priv = NULL;
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index 9f19cceab487..0de14dfa68cc 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/islpci_mgt.h b/drivers/net/wireless/prism54/islpci_mgt.h
index 0db93db9b675..700c434c8803 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.h
+++ b/drivers/net/wireless/prism54/islpci_mgt.h
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index 056af38e72e3..47b34bfe890a 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -11,8 +11,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/oid_mgt.h b/drivers/net/wireless/prism54/oid_mgt.h
index 92c8a2d4acd8..cf5141df8474 100644
--- a/drivers/net/wireless/prism54/oid_mgt.h
+++ b/drivers/net/wireless/prism54/oid_mgt.h
@@ -11,8 +11,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/prismcompat.h b/drivers/net/wireless/prism54/prismcompat.h
index aa1d1747784f..bc1401eb4b9d 100644
--- a/drivers/net/wireless/prism54/prismcompat.h
+++ b/drivers/net/wireless/prism54/prismcompat.h
@@ -11,8 +11,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 9b557a1bb7f8..cbf0a589d32a 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -17,8 +17,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Changes:
* Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 08/08/2000
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 8169a85c4498..5028557aa18a 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -15,8 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Portions of this file are based on NDISwrapper project,
* Copyright (C) 2003-2005 Pontus Fuchs, Giridhar Pemmasani
@@ -27,7 +26,6 @@
// #define VERBOSE // more; success messages
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 38ed9a3e44c8..4ccfef5094e0 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -26,7 +24,6 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index e4b07f0aa3cc..0fd3a9d01a60 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 0ac5c589ddce..2f1cd929c6f6 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -26,7 +24,6 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -1880,6 +1877,11 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
EEPROM_MAC_ADDR_0));
/*
+ * Disable powersaving as default.
+ */
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ /*
* Initialize hw_mode information.
*/
spec->supported_bands = SUPPORT_BAND_2GHZ;
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index 9c10068e4987..573e87bcc553 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 85acc79f68b8..d849d590de25 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -26,7 +24,6 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -1709,6 +1706,11 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK;
+ /*
+ * Disable powersaving as default.
+ */
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
rt2x00_eeprom_addr(rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index 1b91a4cef965..afba0739c3b8 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index aab6b5e4f5dd..a394a9a95919 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -21,9 +21,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 776aff3678ff..7f8b5d156c8c 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -24,9 +24,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -5462,15 +5460,14 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 68, 0x0b);
- rt2800_bbp_write(rt2x00dev, 69, 0x12);
+ rt2800_bbp_write(rt2x00dev, 69, 0x0d);
+ rt2800_bbp_write(rt2x00dev, 70, 0x06);
rt2800_bbp_write(rt2x00dev, 73, 0x13);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
rt2800_bbp_write(rt2x00dev, 76, 0x28);
rt2800_bbp_write(rt2x00dev, 77, 0x59);
- rt2800_bbp_write(rt2x00dev, 70, 0x0a);
-
rt2800_bbp_write(rt2x00dev, 79, 0x13);
rt2800_bbp_write(rt2x00dev, 80, 0x05);
rt2800_bbp_write(rt2x00dev, 81, 0x33);
@@ -5513,6 +5510,7 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_bbp_write(rt2x00dev, 134, 0xd0);
rt2800_bbp_write(rt2x00dev, 135, 0xf6);
+ rt2800_bbp_write(rt2x00dev, 148, 0x84);
}
rt2800_disable_unused_dac_adc(rt2x00dev);
@@ -6453,7 +6451,7 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
- rt2800_rfcsr_write(rt2x00dev, 12, 0xc6);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x46);
rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
@@ -6466,7 +6464,8 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ if (rt2x00_is_usb(rt2x00dev) &&
+ rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
else
rt2800_rfcsr_write(rt2x00dev, 25, 0xc0);
@@ -6486,10 +6485,7 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
- rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
- else
- rt2800_rfcsr_write(rt2x00dev, 40, 0x4b);
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
rt2800_rfcsr_write(rt2x00dev, 42, 0xd2);
rt2800_rfcsr_write(rt2x00dev, 43, 0x9a);
@@ -6510,16 +6506,26 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 53, 0x84);
rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
rt2800_rfcsr_write(rt2x00dev, 55, 0x44);
- rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 56, 0x42);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
rt2800_rfcsr_write(rt2x00dev, 59, 0x8f);
rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
- rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
- else
- rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
+ if (rt2x00_is_usb(rt2x00dev))
+ rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 61, 0xd5);
+ } else {
+ if (rt2x00_is_usb(rt2x00dev))
+ rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 61, 0xb5);
+ }
rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
@@ -6602,7 +6608,6 @@ static void rt2800_init_rfcsr_5592(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 1, 0x3F);
rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
- rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
rt2800_rfcsr_write(rt2x00dev, 6, 0xE4);
rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
@@ -7453,10 +7458,9 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
u32 reg;
/*
- * Disable powersaving as default on PCI devices.
+ * Disable powersaving as default.
*/
- if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
- rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
/*
* Initialize all hw fields.
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index a94ba447e63c..3019db637a4b 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -14,9 +14,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef RT2800LIB_H
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.c b/drivers/net/wireless/rt2x00/rt2800mmio.c
index a8cc736b5063..de4790b41be7 100644
--- a/drivers/net/wireless/rt2x00/rt2800mmio.c
+++ b/drivers/net/wireless/rt2x00/rt2800mmio.c
@@ -19,9 +19,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/* Module: rt2800mmio
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.h b/drivers/net/wireless/rt2x00/rt2800mmio.h
index 6a10de3eee3e..b63312ce3f27 100644
--- a/drivers/net/wireless/rt2x00/rt2800mmio.h
+++ b/drivers/net/wireless/rt2x00/rt2800mmio.h
@@ -19,9 +19,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/* Module: rt2800mmio
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index b504455b4fec..a5b32ca2cf0f 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -20,9 +20,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
index a81c9ee281c0..9dfef4607d6b 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.h
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -20,9 +20,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2800soc.c b/drivers/net/wireless/rt2x00/rt2800soc.c
index 1359227ca411..f6d1bf5be006 100644
--- a/drivers/net/wireless/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/rt2x00/rt2800soc.c
@@ -19,9 +19,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/* Module: rt2800soc
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index a81ceb61d746..caddc1b427a9 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -18,9 +18,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -31,7 +29,6 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
@@ -992,6 +989,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c15) },
{ USB_DEVICE(0x07d1, 0x3c16) },
{ USB_DEVICE(0x07d1, 0x3c17) },
+ { USB_DEVICE(0x2001, 0x3317) },
{ USB_DEVICE(0x2001, 0x3c1b) },
/* Draytek */
{ USB_DEVICE(0x07fa, 0x7712) },
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 671ea3592610..ea7cac095997 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -17,9 +17,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index e4ba2ce0f212..e3b885d8f7db 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -15,9 +15,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 8cb43f8f3efc..1122dc44c9fd 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index 3db0d99d9da7..a2fd05ba25ca 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 7f7baae5ae02..2e3d1645e68b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.h b/drivers/net/wireless/rt2x00/rt2x00debug.h
index e11d39bdfef7..e65712c235bd 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.h
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 9dd92a700442..2bde6729f5e6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -14,9 +14,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -567,10 +565,10 @@ static void rt2x00lib_rxdone_check_ba(struct rt2x00_dev *rt2x00dev,
#undef TID_CHECK
- if (!ether_addr_equal(ba->ra, entry->ta))
+ if (!ether_addr_equal_64bits(ba->ra, entry->ta))
continue;
- if (!ether_addr_equal(ba->ta, entry->ra))
+ if (!ether_addr_equal_64bits(ba->ta, entry->ra))
continue;
/* Mark BAR since we received the according BA */
diff --git a/drivers/net/wireless/rt2x00/rt2x00dump.h b/drivers/net/wireless/rt2x00/rt2x00dump.h
index 063ebcce97f8..4c0e01b5d515 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dump.h
+++ b/drivers/net/wireless/rt2x00/rt2x00dump.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index 1b4254b4272d..fbae2799e3ee 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -14,9 +14,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/rt2x00/rt2x00leds.c
index 997a6c89e66e..c681d04b506c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.c
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.h b/drivers/net/wireless/rt2x00/rt2x00leds.h
index 3b46f0c3332a..b2c5269570da 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.h
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 7f40ab8e1bd8..fb7c349ccc9c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -14,9 +14,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index c2b3b6629188..9b941c0c1264 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 2183e7978399..ddeb5a709aa3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.c b/drivers/net/wireless/rt2x00/rt2x00mmio.c
index 64b06c6abe58..6f236ea180aa 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mmio.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mmio.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.h b/drivers/net/wireless/rt2x00/rt2x00mmio.h
index cda3dbcf7ead..701c3127efb9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mmio.h
+++ b/drivers/net/wireless/rt2x00/rt2x00mmio.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 25da20e7e1f3..d93db4b0371b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -156,8 +154,6 @@ exit_release_regions:
exit_disable_device:
pci_disable_device(pci_dev);
- pci_set_drvdata(pci_dev, NULL);
-
return retval;
}
EXPORT_SYMBOL_GPL(rt2x00pci_probe);
@@ -177,7 +173,6 @@ void rt2x00pci_remove(struct pci_dev *pci_dev)
/*
* Free the PCI device data.
*/
- pci_set_drvdata(pci_dev, NULL);
pci_disable_device(pci_dev);
pci_release_regions(pci_dev);
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 60d90b20f8b9..bc0ca5f58f38 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index a5d38e8ad9e4..5642ccceca7c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -15,9 +15,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index ebe117224979..c48125be0e34 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index 6f867eec49cc..3cc541d13d67 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.c b/drivers/net/wireless/rt2x00/rt2x00soc.c
index 9271a5fce0a8..69a0cdadb07f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00soc.c
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.c
@@ -14,9 +14,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.h b/drivers/net/wireless/rt2x00/rt2x00soc.h
index 474cbfc1efc7..9948d355e9a4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00soc.h
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 4e121627925d..10572452cc21 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -14,9 +14,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 323ca7b2b095..e7bcf62347d5 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index a5b69cb49012..24402984ee57 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -27,7 +25,6 @@
#include <linux/crc-itu-t.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 9bc6b6044e34..1442075a8382 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 1baf9c896dcd..a140170b1eb3 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -27,7 +25,6 @@
#include <linux/crc-itu-t.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 7577e0ba3877..4a4f235466d1 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index a91506b12a62..3867d1470b36 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -15,7 +15,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -108,6 +107,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
struct rtl8180_priv *priv = dev->priv;
unsigned int count = 32;
u8 signal, agc, sq;
+ dma_addr_t mapping;
while (count--) {
struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx];
@@ -129,6 +129,17 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
if (unlikely(!new_skb))
goto done;
+ mapping = pci_map_single(priv->pdev,
+ skb_tail_pointer(new_skb),
+ MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ kfree_skb(new_skb);
+ dev_err(&priv->pdev->dev, "RX DMA map error\n");
+
+ goto done;
+ }
+
pci_unmap_single(priv->pdev,
*((dma_addr_t *)skb->cb),
MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
@@ -159,9 +170,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
skb = new_skb;
priv->rx_buf[priv->rx_idx] = skb;
- *((dma_addr_t *) skb->cb) =
- pci_map_single(priv->pdev, skb_tail_pointer(skb),
- MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+ *((dma_addr_t *) skb->cb) = mapping;
}
done:
@@ -267,6 +276,13 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
mapping = pci_map_single(priv->pdev, skb->data,
skb->len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ kfree_skb(skb);
+ dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
+ return;
+
+ }
+
tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS |
RTL818X_TX_DESC_FLAG_LS |
(ieee80211_get_tx_rate(dev, info)->hw_value << 24) |
diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
index dc845693f321..b1bfee738937 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
@@ -19,7 +19,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.c b/drivers/net/wireless/rtl818x/rtl8180/max2820.c
index a63c443c3c6f..eebf23976524 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/max2820.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/max2820.c
@@ -18,7 +18,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
index ee638d0749d6..d60a5f399022 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
@@ -15,7 +15,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
index 7614d9ccc729..959b049827de 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
@@ -19,7 +19,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 9a6edb0c014e..fd78df813a85 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -20,7 +20,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -416,7 +415,7 @@ static int rtl8187_init_urbs(struct ieee80211_hw *dev)
struct rtl8187_rx_info *info;
int ret = 0;
- while (skb_queue_len(&priv->rx_queue) < 16) {
+ while (skb_queue_len(&priv->rx_queue) < 32) {
skb = __dev_alloc_skb(RTL8187_MAX_RX, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
index a26193a04447..5ecf18ed67b8 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
@@ -16,7 +16,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/usb.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index ff784072fb42..93bb384eb001 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -353,7 +353,6 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
/* TODO: Correct this value for our hw */
/* TODO: define these hard code value */
- hw->channel_change_time = 100;
hw->max_listen_interval = 10;
hw->max_rate_tries = 4;
/* hw->max_rates = 1; */
@@ -1293,7 +1292,7 @@ void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
return;
/* and only beacons from the associated BSSID, please */
- if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+ if (!ether_addr_equal_64bits(hdr->addr3, rtlpriv->mac80211.bssid))
return;
rtlpriv->link_info.bcn_rx_inperiod++;
@@ -1437,7 +1436,8 @@ void rtl_watchdog_wq_callback(void *data)
/* if we can't recv beacon for 6s, we should
* reconnect this AP
*/
- if (rtlpriv->link_info.roam_times >= 3) {
+ if ((rtlpriv->link_info.roam_times >= 3) &&
+ !is_zero_ether_addr(rtlpriv->mac80211.bssid)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"AP off, try to reconnect now\n");
rtlpriv->link_info.roam_times = 0;
@@ -1780,7 +1780,7 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
return;
/* and only beacons from the associated BSSID, please */
- if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+ if (!ether_addr_equal_64bits(hdr->addr3, rtlpriv->mac80211.bssid))
return;
if (rtl_find_221_ie(hw, data, len))
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 0e510f73041a..0276153c72cc 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -295,7 +295,7 @@ u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr)
/* Does STA already exist? */
for (i = 4; i < TOTAL_CAM_ENTRY; i++) {
addr = rtlpriv->sec.hwsec_cam_sta_addr[i];
- if (memcmp(addr, sta_addr, ETH_ALEN) == 0)
+ if (ether_addr_equal_unaligned(addr, sta_addr))
return i;
}
/* Get a free CAM entry. */
@@ -335,7 +335,7 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
addr = rtlpriv->sec.hwsec_cam_sta_addr[i];
bitmap = (rtlpriv->sec.hwsec_cam_bitmap) >> i;
if (((bitmap & BIT(0)) == BIT(0)) &&
- (memcmp(addr, sta_addr, ETH_ALEN) == 0)) {
+ (ether_addr_equal_unaligned(addr, sta_addr))) {
/* Remove from HW Security CAM */
eth_zero_addr(rtlpriv->sec.hwsec_cam_sta_addr[i]);
rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 210ce7cd94d8..2d337a0c3df0 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -46,10 +46,20 @@ void rtl_fw_cb(const struct firmware *firmware, void *context)
"Firmware callback routine entered!\n");
complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
+ if (rtlpriv->cfg->alt_fw_name) {
+ err = request_firmware(&firmware,
+ rtlpriv->cfg->alt_fw_name,
+ rtlpriv->io.dev);
+ pr_info("Loading alternative firmware %s\n",
+ rtlpriv->cfg->alt_fw_name);
+ if (!err)
+ goto found_alt;
+ }
pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name);
rtlpriv->max_fw_size = 0;
return;
}
+found_alt:
if (firmware->size > rtlpriv->max_fw_size) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Firmware is too big!\n");
@@ -184,6 +194,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
rtlpriv->cfg->maps
[RTL_IBSS_INT_MASKS]);
}
+ mac->link_state = MAC80211_LINKED;
break;
case NL80211_IFTYPE_ADHOC:
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 0f494444bcd1..d7aa165fe677 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -688,8 +688,6 @@ static void _rtl_receive_one(struct ieee80211_hw *hw, struct sk_buff *skb,
rtlpriv->stats.rxbytesunicast += skb->len;
}
- rtl_is_special_data(hw, skb, false);
-
if (ieee80211_is_data(fc)) {
rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
@@ -740,6 +738,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
};
int index = rtlpci->rx_ring[rx_queue_idx].idx;
+ if (rtlpci->driver_is_goingto_unload)
+ return;
/*RX NORMAL PKT */
while (count--) {
/*rx descriptor */
@@ -1636,6 +1636,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
*/
set_hal_stop(rtlhal);
+ rtlpci->driver_is_goingto_unload = true;
rtlpriv->cfg->ops->disable_interrupt(hw);
cancel_work_sync(&rtlpriv->works.lps_change_work);
@@ -1653,7 +1654,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
ppsc->rfchange_inprogress = true;
spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
- rtlpci->driver_is_goingto_unload = true;
rtlpriv->cfg->ops->hw_disable(hw);
/* some things are not needed if firmware not available */
if (!rtlpriv->max_fw_size)
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 0d81f766fd0f..deedae3c5449 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -478,7 +478,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
return;
/* and only beacons from the associated BSSID, please */
- if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+ if (!ether_addr_equal_64bits(hdr->addr3, rtlpriv->mac80211.bssid))
return;
rtlpriv->psc.last_beacon = jiffies;
@@ -923,7 +923,7 @@ void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
return;
/* and only beacons from the associated BSSID, please */
- if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+ if (!ether_addr_equal_64bits(hdr->addr3, rtlpriv->mac80211.bssid))
return;
/* check if this really is a beacon */
diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c
index d7d0d4948b01..a4eb9b271438 100644
--- a/drivers/net/wireless/rtlwifi/regd.c
+++ b/drivers/net/wireless/rtlwifi/regd.c
@@ -59,30 +59,26 @@ static struct country_code_to_enum_rd allCountries[] = {
*/
#define RTL819x_2GHZ_CH12_13 \
REG_RULE(2467-10, 2472+10, 40, 0, 20,\
- NL80211_RRF_PASSIVE_SCAN)
+ NL80211_RRF_NO_IR)
#define RTL819x_2GHZ_CH14 \
REG_RULE(2484-10, 2484+10, 40, 0, 20, \
- NL80211_RRF_PASSIVE_SCAN | \
- NL80211_RRF_NO_OFDM)
+ NL80211_RRF_NO_IR | NL80211_RRF_NO_OFDM)
/* 5G chan 36 - chan 64*/
#define RTL819x_5GHZ_5150_5350 \
REG_RULE(5150-10, 5350+10, 40, 0, 30, \
- NL80211_RRF_PASSIVE_SCAN | \
- NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
/* 5G chan 100 - chan 165*/
#define RTL819x_5GHZ_5470_5850 \
REG_RULE(5470-10, 5850+10, 40, 0, 30, \
- NL80211_RRF_PASSIVE_SCAN | \
- NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
/* 5G chan 149 - chan 165*/
#define RTL819x_5GHZ_5725_5850 \
REG_RULE(5725-10, 5850+10, 40, 0, 30, \
- NL80211_RRF_PASSIVE_SCAN | \
- NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
#define RTL819x_5GHZ_ALL \
(RTL819x_5GHZ_5150_5350, RTL819x_5GHZ_5470_5850)
@@ -172,7 +168,8 @@ static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
(ch->flags & IEEE80211_CHAN_RADAR))
continue;
if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
- reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ reg_rule = freq_reg_info(wiphy,
+ MHZ_TO_KHZ(ch->center_freq));
if (IS_ERR(reg_rule))
continue;
@@ -185,16 +182,11 @@ static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
*regulatory_hint().
*/
- if (!(reg_rule->flags & NL80211_RRF_NO_IBSS))
- ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
- if (!(reg_rule->
- flags & NL80211_RRF_PASSIVE_SCAN))
- ch->flags &=
- ~IEEE80211_CHAN_PASSIVE_SCAN;
+ if (!(reg_rule->flags & NL80211_RRF_NO_IR))
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
} else {
if (ch->beacon_found)
- ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN);
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
}
}
}
@@ -219,11 +211,11 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
*/
if (initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
ch = &sband->channels[11]; /* CH 12 */
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ if (ch->flags & IEEE80211_CHAN_NO_IR)
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
ch = &sband->channels[12]; /* CH 13 */
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ if (ch->flags & IEEE80211_CHAN_NO_IR)
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
return;
}
@@ -235,19 +227,19 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
*/
ch = &sband->channels[11]; /* CH 12 */
- reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ reg_rule = freq_reg_info(wiphy, MHZ_TO_KHZ(ch->center_freq));
if (!IS_ERR(reg_rule)) {
- if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ if (!(reg_rule->flags & NL80211_RRF_NO_IR))
+ if (ch->flags & IEEE80211_CHAN_NO_IR)
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
}
ch = &sband->channels[12]; /* CH 13 */
- reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ reg_rule = freq_reg_info(wiphy, MHZ_TO_KHZ(ch->center_freq));
if (!IS_ERR(reg_rule)) {
- if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ if (!(reg_rule->flags & NL80211_RRF_NO_IR))
+ if (ch->flags & IEEE80211_CHAN_NO_IR)
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
}
}
@@ -284,8 +276,7 @@ static void _rtl_reg_apply_radar_flags(struct wiphy *wiphy)
*/
if (!(ch->flags & IEEE80211_CHAN_DISABLED))
ch->flags |= IEEE80211_CHAN_RADAR |
- IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN;
+ IEEE80211_CHAN_NO_IR;
}
}
@@ -354,9 +345,9 @@ static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg,
wiphy->reg_notifier = reg_notifier;
- wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
- wiphy->flags &= ~WIPHY_FLAG_STRICT_REGULATORY;
- wiphy->flags &= ~WIPHY_FLAG_DISABLE_BEACON_HINTS;
+ wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+ wiphy->regulatory_flags &= ~REGULATORY_STRICT_REG;
+ wiphy->regulatory_flags &= ~REGULATORY_DISABLE_BEACON_HINTS;
regd = _rtl_regdomain_select(reg);
wiphy_apply_custom_regulatory(wiphy, regd);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
index 21a5cf060677..a6184b6e1d57 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
@@ -1078,7 +1078,7 @@ static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
rtldm->swing_flag_ofdm = true;
}
- if (rtldm->swing_idx_cck != rtldm->swing_idx_cck) {
+ if (rtldm->swing_idx_cck_cur != rtldm->swing_idx_cck) {
rtldm->swing_idx_cck_cur = rtldm->swing_idx_cck;
rtldm->swing_flag_cck = true;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index e9caa5d4cff0..eb78fd8607f7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -158,6 +158,42 @@ static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
{0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
};
+static u32 power_index_reg[6] = {0xc90, 0xc91, 0xc92, 0xc98, 0xc99, 0xc9a};
+
+void dm_restorepowerindex(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 index;
+
+ for (index = 0; index < 6; index++)
+ rtl_write_byte(rtlpriv, power_index_reg[index],
+ rtlpriv->dm.powerindex_backup[index]);
+}
+EXPORT_SYMBOL_GPL(dm_restorepowerindex);
+
+void dm_writepowerindex(struct ieee80211_hw *hw, u8 value)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 index;
+
+ for (index = 0; index < 6; index++)
+ rtl_write_byte(rtlpriv, power_index_reg[index], value);
+}
+EXPORT_SYMBOL_GPL(dm_writepowerindex);
+
+void dm_savepowerindex(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 index;
+ u8 tmp;
+
+ for (index = 0; index < 6; index++) {
+ tmp = rtl_read_byte(rtlpriv, power_index_reg[index]);
+ rtlpriv->dm.powerindex_backup[index] = tmp;
+ }
+}
+EXPORT_SYMBOL_GPL(dm_savepowerindex);
+
static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -180,7 +216,12 @@ static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
- dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
+ dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LowRssi;
+
+ dm_digtable->forbidden_igi = DM_DIG_MIN;
+ dm_digtable->large_fa_hit = 0;
+ dm_digtable->recover_cnt = 0;
+ dm_digtable->dig_dynamic_min = 0x25;
}
static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
@@ -206,7 +247,9 @@ static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb;
}
- return (u8) rssi_val_min;
+ if (rssi_val_min > 100)
+ rssi_val_min = 100;
+ return (u8)rssi_val_min;
}
static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
@@ -224,9 +267,17 @@ static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
+
+ ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
+ falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff);
+ falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16);
+
falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
- falsealm_cnt->cnt_rate_illegal +
- falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
+ falsealm_cnt->cnt_rate_illegal +
+ falsealm_cnt->cnt_crc8_fail +
+ falsealm_cnt->cnt_mcs_fail +
+ falsealm_cnt->cnt_fast_fsync_fail +
+ falsealm_cnt->cnt_sb_search_fail;
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
@@ -271,12 +322,14 @@ static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
value_igi++;
else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2)
value_igi += 2;
+
if (value_igi > DM_DIG_FA_UPPER)
value_igi = DM_DIG_FA_UPPER;
else if (value_igi < DM_DIG_FA_LOWER)
value_igi = DM_DIG_FA_LOWER;
+
if (rtlpriv->falsealm_cnt.cnt_all > 10000)
- value_igi = 0x32;
+ value_igi = DM_DIG_FA_UPPER;
dm_digtable->cur_igvalue = value_igi;
rtl92c_dm_write_dig(hw);
@@ -286,32 +339,80 @@ static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct dig_t *digtable = &rtlpriv->dm_digtable;
+ u32 isbt;
+
+ /* modify DIG lower bound, deal with abnorally large false alarm */
+ if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
+ digtable->large_fa_hit++;
+ if (digtable->forbidden_igi < digtable->cur_igvalue) {
+ digtable->forbidden_igi = digtable->cur_igvalue;
+ digtable->large_fa_hit = 1;
+ }
- if (rtlpriv->falsealm_cnt.cnt_all > digtable->fa_highthresh) {
- if ((digtable->back_val - 2) < digtable->back_range_min)
- digtable->back_val = digtable->back_range_min;
- else
- digtable->back_val -= 2;
- } else if (rtlpriv->falsealm_cnt.cnt_all < digtable->fa_lowthresh) {
- if ((digtable->back_val + 2) > digtable->back_range_max)
- digtable->back_val = digtable->back_range_max;
- else
- digtable->back_val += 2;
+ if (digtable->large_fa_hit >= 3) {
+ if ((digtable->forbidden_igi + 1) >
+ digtable->rx_gain_max)
+ digtable->rx_gain_min = digtable->rx_gain_max;
+ else
+ digtable->rx_gain_min = (digtable->forbidden_igi + 1);
+ digtable->recover_cnt = 3600; /* 3600=2hr */
+ }
+ } else {
+ /* Recovery mechanism for IGI lower bound */
+ if (digtable->recover_cnt != 0) {
+ digtable->recover_cnt--;
+ } else {
+ if (digtable->large_fa_hit == 0) {
+ if ((digtable->forbidden_igi-1) < DM_DIG_MIN) {
+ digtable->forbidden_igi = DM_DIG_MIN;
+ digtable->rx_gain_min = DM_DIG_MIN;
+ } else {
+ digtable->forbidden_igi--;
+ digtable->rx_gain_min = digtable->forbidden_igi + 1;
+ }
+ } else if (digtable->large_fa_hit == 3) {
+ digtable->large_fa_hit = 0;
+ }
+ }
+ }
+ if (rtlpriv->falsealm_cnt.cnt_all < 250) {
+ isbt = rtl_read_byte(rtlpriv, 0x4fd) & 0x01;
+
+ if (!isbt) {
+ if (rtlpriv->falsealm_cnt.cnt_all >
+ digtable->fa_lowthresh) {
+ if ((digtable->back_val - 2) <
+ digtable->back_range_min)
+ digtable->back_val = digtable->back_range_min;
+ else
+ digtable->back_val -= 2;
+ } else if (rtlpriv->falsealm_cnt.cnt_all <
+ digtable->fa_lowthresh) {
+ if ((digtable->back_val + 2) >
+ digtable->back_range_max)
+ digtable->back_val = digtable->back_range_max;
+ else
+ digtable->back_val += 2;
+ }
+ } else {
+ digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
+ }
+ } else {
+ /* Adjust initial gain by false alarm */
+ if (rtlpriv->falsealm_cnt.cnt_all > 1000)
+ digtable->cur_igvalue = digtable->pre_igvalue + 2;
+ else if (rtlpriv->falsealm_cnt.cnt_all > 750)
+ digtable->cur_igvalue = digtable->pre_igvalue + 1;
+ else if (rtlpriv->falsealm_cnt.cnt_all < 500)
+ digtable->cur_igvalue = digtable->pre_igvalue - 1;
}
- if ((digtable->rssi_val_min + 10 - digtable->back_val) >
- digtable->rx_gain_max)
+ /* Check initial gain by upper/lower bound */
+ if (digtable->cur_igvalue > digtable->rx_gain_max)
digtable->cur_igvalue = digtable->rx_gain_max;
- else if ((digtable->rssi_val_min + 10 -
- digtable->back_val) < digtable->rx_gain_min)
- digtable->cur_igvalue = digtable->rx_gain_min;
- else
- digtable->cur_igvalue = digtable->rssi_val_min + 10 -
- digtable->back_val;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "rssi_val_min = %x back_val %x\n",
- digtable->rssi_val_min, digtable->back_val);
+ if (digtable->cur_igvalue < digtable->rx_gain_min)
+ digtable->cur_igvalue = digtable->rx_gain_min;
rtl92c_dm_write_dig(hw);
}
@@ -329,7 +430,7 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
multi_sta = true;
if (!multi_sta ||
- dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) {
+ dm_digtable->cursta_cstate == DIG_STA_DISCONNECT) {
initialized = false;
dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
return;
@@ -375,7 +476,6 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
"presta_cstate = %x, cursta_cstate = %x\n",
dm_digtable->presta_cstate, dm_digtable->cursta_cstate);
-
if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate ||
dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT ||
dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
@@ -383,6 +483,8 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
if (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) {
dm_digtable->rssi_val_min =
rtl92c_dm_initial_gain_min_pwdb(hw);
+ if (dm_digtable->rssi_val_min > 100)
+ dm_digtable->rssi_val_min = 100;
rtl92c_dm_ctrl_initgain_by_rssi(hw);
}
} else {
@@ -398,11 +500,12 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
if (dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
+ if (dm_digtable->rssi_val_min > 100)
+ dm_digtable->rssi_val_min = 100;
if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
if (dm_digtable->rssi_val_min <= 25)
@@ -424,48 +527,14 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
}
if (dm_digtable->pre_cck_pd_state != dm_digtable->cur_cck_pd_state) {
- if (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
- if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
- dm_digtable->cur_cck_fa_state =
- CCK_FA_STAGE_High;
- else
- dm_digtable->cur_cck_fa_state = CCK_FA_STAGE_Low;
-
- if (dm_digtable->pre_cck_fa_state !=
- dm_digtable->cur_cck_fa_state) {
- if (dm_digtable->cur_cck_fa_state ==
- CCK_FA_STAGE_Low)
- rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
- 0x83);
- else
- rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
- 0xcd);
-
- dm_digtable->pre_cck_fa_state =
- dm_digtable->cur_cck_fa_state;
- }
-
- rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
-
- if (IS_92C_SERIAL(rtlhal->version))
- rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
- MASKBYTE2, 0xd7);
- } else {
+ if ((dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) ||
+ (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_MAX))
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83);
+ else
rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
- rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
- if (IS_92C_SERIAL(rtlhal->version))
- rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
- MASKBYTE2, 0xd3);
- }
dm_digtable->pre_cck_pd_state = dm_digtable->cur_cck_pd_state;
}
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "CCKPDStage=%x\n",
- dm_digtable->cur_cck_pd_state);
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "is92C=%x\n",
- IS_92C_SERIAL(rtlhal->version));
}
static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
@@ -482,6 +551,8 @@ static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
else
dm_digtable->cursta_cstate = DIG_STA_DISCONNECT;
+ dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
+
rtl92c_dm_initial_gain_sta(hw);
rtl92c_dm_initial_gain_multi_sta(hw);
rtl92c_dm_cck_packet_detection_thresh(hw);
@@ -493,23 +564,26 @@ static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
static void rtl92c_dm_dig(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
if (rtlpriv->dm.dm_initialgain_enable == false)
return;
- if (dm_digtable->dig_enable_flag == false)
+ if (!(rtlpriv->dm.dm_flag & DYNAMIC_FUNC_DIG))
return;
rtl92c_dm_ctrl_initgain_by_twoport(hw);
-
}
static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- rtlpriv->dm.dynamic_txpower_enable = false;
-
+ if (rtlpriv->rtlhal.interface == INTF_USB &&
+ rtlpriv->rtlhal.board_type & 0x1) {
+ dm_savepowerindex(hw);
+ rtlpriv->dm.dynamic_txpower_enable = true;
+ } else {
+ rtlpriv->dm.dynamic_txpower_enable = false;
+ }
rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
}
@@ -524,9 +598,14 @@ void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
dm_digtable->back_val);
- dm_digtable->cur_igvalue += 2;
- if (dm_digtable->cur_igvalue > 0x3f)
- dm_digtable->cur_igvalue = 0x3f;
+ if (rtlpriv->rtlhal.interface == INTF_USB &&
+ !dm_digtable->dig_enable_flag) {
+ dm_digtable->pre_igvalue = 0x17;
+ return;
+ }
+ dm_digtable->cur_igvalue -= 1;
+ if (dm_digtable->cur_igvalue < DM_DIG_MIN)
+ dm_digtable->cur_igvalue = DM_DIG_MIN;
if (dm_digtable->pre_igvalue != dm_digtable->cur_igvalue) {
rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
@@ -536,11 +615,47 @@ void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
dm_digtable->pre_igvalue = dm_digtable->cur_igvalue;
}
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_WARNING,
+ "dig values 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
+ dm_digtable->rssi_val_min, dm_digtable->back_val,
+ dm_digtable->rx_gain_max, dm_digtable->rx_gain_min,
+ dm_digtable->large_fa_hit, dm_digtable->forbidden_igi);
}
EXPORT_SYMBOL(rtl92c_dm_write_dig);
static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
+
+ if (mac->link_state != MAC80211_LINKED)
+ return;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC ||
+ mac->opmode == NL80211_IFTYPE_AP) {
+ /* TODO: Handle ADHOC and AP Mode */
+ }
+
+ if (tmpentry_max_pwdb != 0)
+ rtlpriv->dm.entry_max_undec_sm_pwdb = tmpentry_max_pwdb;
+ else
+ rtlpriv->dm.entry_max_undec_sm_pwdb = 0;
+
+ if (tmpentry_min_pwdb != 0xff)
+ rtlpriv->dm.entry_min_undec_sm_pwdb = tmpentry_min_pwdb;
+ else
+ rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
+
+/* TODO:
+ * if (mac->opmode == NL80211_IFTYPE_STATION) {
+ * if (rtlpriv->rtlhal.fw_ready) {
+ * u32 param = (u32)(rtlpriv->dm.undec_sm_pwdb << 16);
+ * rtl8192c_set_rssi_cmd(hw, param);
+ * }
+ * }
+ */
}
void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
@@ -750,6 +865,7 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
rtlpriv->dm.cck_index = cck_index_old;
}
+ /* Handle USB High PA boards */
delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
(thermalvalue - rtlpriv->dm.thermalvalue) :
@@ -1140,22 +1256,22 @@ void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
- static u8 initialize;
- static u32 reg_874, reg_c70, reg_85c, reg_a74;
- if (initialize == 0) {
- reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
- MASKDWORD) & 0x1CC000) >> 14;
+ if (!rtlpriv->reg_init) {
+ rtlpriv->reg_874 = (rtl_get_bbreg(hw,
+ RFPGA0_XCD_RFINTERFACESW,
+ MASKDWORD) & 0x1CC000) >> 14;
- reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
- MASKDWORD) & BIT(3)) >> 3;
+ rtlpriv->reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
+ MASKDWORD) & BIT(3)) >> 3;
- reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
- MASKDWORD) & 0xFF000000) >> 24;
+ rtlpriv->reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
+ MASKDWORD) & 0xFF000000) >> 24;
- reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12;
+ rtlpriv->reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) &
+ 0xF000) >> 12;
- initialize = 1;
+ rtlpriv->reg_init = true;
}
if (!bforce_in_normal) {
@@ -1192,12 +1308,12 @@ void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
} else {
rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
- 0x1CC000, reg_874);
+ 0x1CC000, rtlpriv->reg_874);
rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
- reg_c70);
+ rtlpriv->reg_c70);
rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
- reg_85c);
- rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74);
+ rtlpriv->reg_85c);
+ rtl_set_bbreg(hw, 0xa74, 0xF000, rtlpriv->reg_a74);
rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
}
@@ -1213,6 +1329,7 @@ static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ /* Determine the minimum RSSI */
if (((mac->link_state == MAC80211_NOLINK)) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
dm_pstable->rssi_val_min = 0;
@@ -1241,6 +1358,7 @@ static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
dm_pstable->rssi_val_min);
}
+ /* Power Saving for 92C */
if (IS_92C_SERIAL(rtlhal->version))
;/* rtl92c_dm_1r_cca(hw); */
else
@@ -1252,12 +1370,23 @@ void rtl92c_dm_init(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+ rtlpriv->dm.dm_flag = DYNAMIC_FUNC_DISABLE | DYNAMIC_FUNC_DIG;
+ rtlpriv->dm.undec_sm_pwdb = -1;
+ rtlpriv->dm.undec_sm_cck = -1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtl92c_dm_diginit(hw);
+
+ rtlpriv->dm.dm_flag |= HAL_DM_HIPWR_DISABLE;
rtl92c_dm_init_dynamic_txpower(hw);
+
rtl92c_dm_init_edca_turbo(hw);
rtl92c_dm_init_rate_adaptive_mask(hw);
+ rtlpriv->dm.dm_flag |= DYNAMIC_FUNC_SS;
rtl92c_dm_initialize_txpower_tracking(hw);
rtl92c_dm_init_dynamic_bb_powersaving(hw);
+
+ rtlpriv->dm.ofdm_pkt_cnt = 0;
+ rtlpriv->dm.dm_rssi_sel = RSSI_DEFAULT;
}
EXPORT_SYMBOL(rtl92c_dm_init);
@@ -1308,7 +1437,7 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
}
if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
- rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL2;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
@@ -1328,8 +1457,16 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
"PHY_SetTxPowerLevel8192S() Channel = %d\n",
rtlphy->current_channel);
rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+ if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_NORMAL)
+ dm_restorepowerindex(hw);
+ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_LEVEL1)
+ dm_writepowerindex(hw, 0x14);
+ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_LEVEL2)
+ dm_writepowerindex(hw, 0x10);
}
-
rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
}
@@ -1400,12 +1537,6 @@ u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw)
else
curr_bt_rssi_state &= (~BT_RSSI_STATE_SPECIAL_LOW);
- /* Set Tx Power according to BT status. */
- if (undec_sm_pwdb >= 30)
- curr_bt_rssi_state |= BT_RSSI_STATE_TXPOWER_LOW;
- else if (undec_sm_pwdb < 25)
- curr_bt_rssi_state &= (~BT_RSSI_STATE_TXPOWER_LOW);
-
/* Check BT state related to BT_Idle in B/G mode. */
if (undec_sm_pwdb < 15)
curr_bt_rssi_state |= BT_RSSI_STATE_BG_EDCA_LOW;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
index 518e208c0180..4f232a063636 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
@@ -91,6 +91,17 @@
#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
+#define DYNAMIC_FUNC_DISABLE 0x0
+#define DYNAMIC_FUNC_DIG BIT(0)
+#define DYNAMIC_FUNC_HP BIT(1)
+#define DYNAMIC_FUNC_SS BIT(2) /*Tx Power Tracking*/
+#define DYNAMIC_FUNC_BT BIT(3)
+#define DYNAMIC_FUNC_ANT_DIV BIT(4)
+
+#define RSSI_CCK 0
+#define RSSI_OFDM 1
+#define RSSI_DEFAULT 2
+
struct swat_t {
u8 failure_cnt;
u8 try_flag;
@@ -167,5 +178,8 @@ void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw);
void rtl92c_dm_bt_coexist(struct ieee80211_hw *hw);
+void dm_savepowerindex(struct ieee80211_hw *hw);
+void dm_writepowerindex(struct ieee80211_hw *hw, u8 value);
+void dm_restorepowerindex(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 0c0e78263a66..9e32ac8a4425 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -1147,6 +1147,12 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
0x522, 0x550, 0x551, 0x040
};
+ u32 iqk_bb_reg_92C[9] = {
+ 0xc04, 0xc08, 0x874, 0xb68,
+ 0xb6c, 0x870, 0x860, 0x864,
+ 0x800
+ };
+
const u32 retrycount = 2;
if (t == 0) {
@@ -1157,6 +1163,8 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
rtlphy->adda_backup, 16);
_rtl92c_phy_save_mac_registers(hw, iqk_mac_reg,
rtlphy->iqk_mac_backup);
+ _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg_92C,
+ rtlphy->iqk_bb_backup, 9);
}
_rtl92c_phy_path_adda_on(hw, adda_reg, true, is2t);
if (t == 0) {
@@ -1167,14 +1175,18 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
if (!rtlphy->rfpi_enable)
_rtl92c_phy_pi_mode_switch(hw, true);
- if (t == 0) {
- rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
- rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
- rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD);
- }
+
+ rtl_set_bbreg(hw, 0x800, BIT(24), 0x0);
+
rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
+
+ rtl_set_bbreg(hw, 0x870, BIT(10), 0x1);
+ rtl_set_bbreg(hw, 0x870, BIT(26), 0x1);
+ rtl_set_bbreg(hw, 0x860, BIT(10), 0x0);
+ rtl_set_bbreg(hw, 0x864, BIT(10), 0x0);
+
if (is2t) {
rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
@@ -1239,13 +1251,9 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
0x3FF0000) >> 16;
}
}
- rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04);
- rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874);
- rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08);
+
rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
- rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
- if (is2t)
- rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
+
if (t != 0) {
if (!rtlphy->rfpi_enable)
_rtl92c_phy_pi_mode_switch(hw, false);
@@ -1253,6 +1261,15 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
rtlphy->adda_backup, 16);
_rtl92c_phy_reload_mac_registers(hw, iqk_mac_reg,
rtlphy->iqk_mac_backup);
+ _rtl92c_phy_reload_adda_registers(hw, iqk_bb_reg_92C,
+ rtlphy->iqk_bb_backup, 9);
+
+ rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
+ if (is2t)
+ rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
+
+ rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x01008c00);
+ rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x01008c00);
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
index 16a0b9e59acf..c16209a336ea 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
@@ -101,6 +101,15 @@ void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
"PHY_SetTxPowerLevel8192S() Channel = %d\n",
rtlphy->current_channel);
rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+ if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_NORMAL)
+ dm_restorepowerindex(hw);
+ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_LEVEL1)
+ dm_writepowerindex(hw, 0x14);
+ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_LEVEL2)
+ dm_writepowerindex(hw, 0x10);
}
rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
index d947e7d350bb..fafa6bac2a3f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
@@ -30,3 +30,6 @@
#include "../rtl8192ce/dm.h"
void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw);
+void dm_savepowerindex(struct ieee80211_hw *hw);
+void dm_writepowerindex(struct ieee80211_hw *hw, u8 value);
+void dm_restorepowerindex(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 189ba124a8c6..468bf73cc883 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1022,7 +1022,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
if (ppsc->rfpwr_state == ERFON) {
rtl92c_phy_set_rfpath_switch(hw, 1);
if (iqk_initialized) {
- rtl92c_phy_iq_calibrate(hw, false);
+ rtl92c_phy_iq_calibrate(hw, true);
} else {
rtl92c_phy_iq_calibrate(hw, false);
iqk_initialized = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 34e56308301e..0c09240eadcc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -120,6 +120,7 @@ bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u16 regval;
+ u32 regval32;
u8 b_reg_hwparafile = 1;
_rtl92c_phy_init_bb_rf_register_definition(hw);
@@ -135,8 +136,11 @@ bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw)
} else if (IS_HARDWARE_TYPE_8192CU(rtlhal)) {
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_USBA | FEN_USBD |
FEN_BB_GLB_RSTn | FEN_BBRSTB);
- rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
}
+ regval32 = rtl_read_dword(rtlpriv, 0x87c);
+ rtl_write_dword(rtlpriv, 0x87c, regval32 & (~BIT(31)));
+ if (IS_HARDWARE_TYPE_8192CU(rtlhal))
+ rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
if (b_reg_hwparafile == 1)
rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
index 2119313a737b..b878d56d2f4d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -85,17 +85,15 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
if (mac->act_scanning) {
tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
- if (turbo_scanoff) {
- for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
- tx_agc[idx1] = ppowerlevel[idx1] |
- (ppowerlevel[idx1] << 8) |
- (ppowerlevel[idx1] << 16) |
- (ppowerlevel[idx1] << 24);
- if (rtlhal->interface == INTF_USB) {
- if (tx_agc[idx1] > 0x20 &&
- rtlefuse->external_pa)
- tx_agc[idx1] = 0x20;
- }
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ if (rtlhal->interface == INTF_USB) {
+ if (tx_agc[idx1] > 0x20 &&
+ rtlefuse->external_pa)
+ tx_agc[idx1] = 0x20;
}
}
} else {
@@ -107,7 +105,7 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
TXHIGHPWRLEVEL_LEVEL2) {
tx_agc[RF90_PATH_A] = 0x00000000;
tx_agc[RF90_PATH_B] = 0x00000000;
- } else{
+ } else {
for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
tx_agc[idx1] = ppowerlevel[idx1] |
(ppowerlevel[idx1] << 8) |
@@ -373,7 +371,12 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
regoffset == RTXAGC_B_MCS07_MCS04)
regoffset = 0xc98;
for (i = 0; i < 3; i++) {
- writeVal = (writeVal > 6) ? (writeVal - 6) : 0;
+ if (i != 2)
+ writeVal = (writeVal > 8) ?
+ (writeVal - 8) : 0;
+ else
+ writeVal = (writeVal > 6) ?
+ (writeVal - 6) : 0;
rtl_write_byte(rtlpriv, (u32)(regoffset + i),
(u8)writeVal);
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 9936de716ad5..c61311084d7e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -50,6 +50,9 @@ MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n USB wireless");
MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_A.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_B.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_TMSC.bin");
static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
{
@@ -69,14 +72,21 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
"Can't alloc buffer for fw\n");
return 1;
}
-
+ if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) &&
+ !IS_92C_SERIAL(rtlpriv->rtlhal.version)) {
+ rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_A.bin";
+ } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlpriv->rtlhal.version)) {
+ rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_B.bin";
+ } else {
+ rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
+ }
+ /* provide name of alternative file */
+ rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin";
pr_info("Loading firmware %s\n", rtlpriv->cfg->fw_name);
rtlpriv->max_fw_size = 0x4000;
err = request_firmware_nowait(THIS_MODULE, 1,
rtlpriv->cfg->fw_name, rtlpriv->io.dev,
GFP_KERNEL, hw, rtl_fw_cb);
-
-
return err;
}
@@ -307,6 +317,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
{RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
/* HP - Lite-On ,8188CUS Slim Combo */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
index 966be519edb8..7903c154de00 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
@@ -36,7 +36,7 @@ u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH] = {
0x804, 0x00000003,
0x808, 0x0000fc00,
0x80c, 0x0000000a,
- 0x810, 0x10005388,
+ 0x810, 0x10000330,
0x814, 0x020c3d10,
0x818, 0x02200385,
0x81c, 0x00000000,
@@ -110,22 +110,22 @@ u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH] = {
0xc44, 0x000100b7,
0xc48, 0xec020107,
0xc4c, 0x007f037f,
- 0xc50, 0x6954341e,
+ 0xc50, 0x69543420,
0xc54, 0x43bc0094,
- 0xc58, 0x6954341e,
+ 0xc58, 0x69543420,
0xc5c, 0x433c0094,
0xc60, 0x00000000,
0xc64, 0x5116848b,
0xc68, 0x47c00bff,
0xc6c, 0x00000036,
0xc70, 0x2c7f000d,
- 0xc74, 0x0186115b,
+ 0xc74, 0x2186115b,
0xc78, 0x0000001f,
0xc7c, 0x00b99612,
0xc80, 0x40000100,
0xc84, 0x20f60000,
0xc88, 0x40000100,
- 0xc8c, 0x20200000,
+ 0xc8c, 0xa0e40000,
0xc90, 0x00121820,
0xc94, 0x00000000,
0xc98, 0x00121820,
@@ -226,7 +226,7 @@ u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH] = {
0x804, 0x00000001,
0x808, 0x0000fc00,
0x80c, 0x0000000a,
- 0x810, 0x10005388,
+ 0x810, 0x10000330,
0x814, 0x020c3d10,
0x818, 0x02200385,
0x81c, 0x00000000,
@@ -300,9 +300,9 @@ u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH] = {
0xc44, 0x000100b7,
0xc48, 0xec020107,
0xc4c, 0x007f037f,
- 0xc50, 0x6954341e,
+ 0xc50, 0x69543420,
0xc54, 0x43bc0094,
- 0xc58, 0x6954341e,
+ 0xc58, 0x69543420,
0xc5c, 0x433c0094,
0xc60, 0x00000000,
0xc64, 0x5116848b,
@@ -340,7 +340,7 @@ u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH] = {
0xce4, 0x00000000,
0xce8, 0x37644302,
0xcec, 0x2f97d40c,
- 0xd00, 0x00080740,
+ 0xd00, 0x00000740,
0xd04, 0x00020401,
0xd08, 0x0000907f,
0xd0c, 0x20010201,
@@ -633,17 +633,17 @@ u32 RTL8192CURADIOA_2TARRAY[RTL8192CURADIOA_2TARRAYLENGTH] = {
0x012, 0x00071000,
0x012, 0x000b0000,
0x012, 0x000fc000,
- 0x013, 0x000287af,
+ 0x013, 0x000287b3,
0x013, 0x000244b7,
0x013, 0x000204ab,
0x013, 0x0001c49f,
0x013, 0x00018493,
- 0x013, 0x00014297,
- 0x013, 0x00010295,
- 0x013, 0x0000c298,
- 0x013, 0x0000819c,
- 0x013, 0x000040a8,
- 0x013, 0x0000001c,
+ 0x013, 0x0001429b,
+ 0x013, 0x00010299,
+ 0x013, 0x0000c29c,
+ 0x013, 0x000081a0,
+ 0x013, 0x000040ac,
+ 0x013, 0x00000020,
0x014, 0x0001944c,
0x014, 0x00059444,
0x014, 0x0009944c,
@@ -932,10 +932,10 @@ u32 RTL8192CUMAC_2T_ARRAY[RTL8192CUMAC_2T_ARRAYLENGTH] = {
0x608, 0x0000000e,
0x609, 0x0000002a,
0x652, 0x00000020,
- 0x63c, 0x0000000a,
- 0x63d, 0x0000000e,
- 0x63e, 0x0000000a,
- 0x63f, 0x0000000e,
+ 0x63c, 0x00000008,
+ 0x63d, 0x00000008,
+ 0x63e, 0x0000000c,
+ 0x63f, 0x0000000c,
0x66e, 0x00000005,
0x700, 0x00000021,
0x701, 0x00000043,
diff --git a/drivers/net/wireless/rtlwifi/stats.c b/drivers/net/wireless/rtlwifi/stats.c
index 8ed31744a054..4f083fc1d360 100644
--- a/drivers/net/wireless/rtlwifi/stats.c
+++ b/drivers/net/wireless/rtlwifi/stats.c
@@ -176,6 +176,7 @@ static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
struct rtl_sta_info *drv_priv = NULL;
struct ieee80211_sta *sta = NULL;
long undec_sm_pwdb;
+ long undec_sm_cck;
rcu_read_lock();
if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
@@ -185,12 +186,16 @@ static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
if (sta) {
drv_priv = (struct rtl_sta_info *) sta->drv_priv;
undec_sm_pwdb = drv_priv->rssi_stat.undec_sm_pwdb;
+ undec_sm_cck = drv_priv->rssi_stat.undec_sm_cck;
} else {
undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
+ undec_sm_cck = rtlpriv->dm.undec_sm_cck;
}
if (undec_sm_pwdb < 0)
undec_sm_pwdb = pstatus->rx_pwdb_all;
+ if (undec_sm_cck < 0)
+ undec_sm_cck = pstatus->rx_pwdb_all;
if (pstatus->rx_pwdb_all > (u32) undec_sm_pwdb) {
undec_sm_pwdb = (((undec_sm_pwdb) *
(RX_SMOOTH_FACTOR - 1)) +
@@ -200,6 +205,15 @@ static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
undec_sm_pwdb = (((undec_sm_pwdb) * (RX_SMOOTH_FACTOR - 1)) +
(pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
}
+ if (pstatus->rx_pwdb_all > (u32) undec_sm_cck) {
+ undec_sm_cck = (((undec_sm_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ undec_sm_cck = undec_sm_cck + 1;
+ } else {
+ undec_sm_pwdb = (((undec_sm_cck) * (RX_SMOOTH_FACTOR - 1)) +
+ (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ }
if (sta) {
drv_priv->rssi_stat.undec_sm_pwdb = undec_sm_pwdb;
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 6e2b5c5c83c8..4933f02ce1d5 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -475,14 +475,14 @@ static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
rtlpriv->stats.rxbytesunicast += skb->len;
}
- rtl_is_special_data(hw, skb, false);
-
if (ieee80211_is_data(fc)) {
rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
if (unicast)
rtlpriv->link_info.num_rx_inperiod++;
}
+ /* static bcn for roaming */
+ rtl_beacon_statistic(hw, skb);
}
}
@@ -517,8 +517,6 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
rtlpriv->stats.rxbytesunicast += skb->len;
}
- rtl_is_special_data(hw, skb, false);
-
if (ieee80211_is_data(fc)) {
rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
@@ -553,7 +551,7 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
}
}
-#define __RX_SKB_MAX_QUEUED 32
+#define __RX_SKB_MAX_QUEUED 64
static void _rtl_rx_work(unsigned long param)
{
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 0c65386fa30d..8c647391bedf 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1033,6 +1033,7 @@ struct rtl_ht_agg {
struct rssi_sta {
long undec_sm_pwdb;
+ long undec_sm_cck;
};
struct rtl_tid_data {
@@ -1323,8 +1324,10 @@ struct fast_ant_training {
struct rtl_dm {
/*PHY status for Dynamic Management */
long entry_min_undec_sm_pwdb;
+ long undec_sm_cck;
long undec_sm_pwdb; /*out dm */
long entry_max_undec_sm_pwdb;
+ s32 ofdm_pkt_cnt;
bool dm_initialgain_enable;
bool dynamic_txpower_enable;
bool current_turbo_edca;
@@ -1339,6 +1342,7 @@ struct rtl_dm {
bool inform_fw_driverctrldm;
bool current_mrc_switch;
u8 txpowercount;
+ u8 powerindex_backup[6];
u8 thermalvalue_rxgain;
u8 thermalvalue_iqk;
@@ -1350,7 +1354,9 @@ struct rtl_dm {
bool done_txpower;
u8 dynamic_txhighpower_lvl; /*Tx high power level */
u8 dm_flag; /*Indicate each dynamic mechanism's status. */
+ u8 dm_flag_tmp;
u8 dm_type;
+ u8 dm_rssi_sel;
u8 txpower_track_control;
bool interrupt_migration;
bool disable_tx_int;
@@ -1804,6 +1810,7 @@ struct rtl_hal_cfg {
bool write_readback;
char *name;
char *fw_name;
+ char *alt_fw_name;
struct rtl_hal_ops *ops;
struct rtl_mod_params *mod_params;
struct rtl_hal_usbint_cfg *usb_interface_cfg;
@@ -1948,6 +1955,7 @@ struct dig_t {
u8 pre_ccastate;
u8 cur_ccasate;
u8 large_fa_hit;
+ u8 dig_dynamic_min;
u8 forbidden_igi;
u8 dig_state;
u8 dig_highpwrstate;
@@ -2028,22 +2036,15 @@ struct rtl_priv {
struct dig_t dm_digtable;
struct ps_t dm_pstable;
- /* section shared by individual drivers */
- union {
- struct { /* data buffer pointer for USB reads */
- __le32 *usb_data;
- int usb_data_index;
- bool initialized;
- };
- struct { /* section for 8723ae */
- bool reg_init; /* true if regs saved */
- u32 reg_874;
- u32 reg_c70;
- u32 reg_85c;
- u32 reg_a74;
- bool bt_operation_on;
- };
- };
+ u32 reg_874;
+ u32 reg_c70;
+ u32 reg_85c;
+ u32 reg_a74;
+ bool reg_init; /* true if regs saved */
+ bool bt_operation_on;
+ __le32 *usb_data;
+ int usb_data_index;
+ bool initialized;
bool enter_ps; /* true when entering PS */
u8 rate_mask[5];
diff --git a/drivers/net/wireless/ti/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c
index db6430c1a084..5a4ec56c83d0 100644
--- a/drivers/net/wireless/ti/wl1251/acx.c
+++ b/drivers/net/wireless/ti/wl1251/acx.c
@@ -18,10 +18,8 @@ int wl1251_acx_frame_rates(struct wl1251 *wl, u8 ctrl_rate, u8 ctrl_mod,
wl1251_debug(DEBUG_ACX, "acx frame rates");
rates = kzalloc(sizeof(*rates), GFP_KERNEL);
- if (!rates) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!rates)
+ return -ENOMEM;
rates->tx_ctrl_frame_rate = ctrl_rate;
rates->tx_ctrl_frame_mod = ctrl_mod;
@@ -49,10 +47,8 @@ int wl1251_acx_station_id(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx dot11_station_id");
mac = kzalloc(sizeof(*mac), GFP_KERNEL);
- if (!mac) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!mac)
+ return -ENOMEM;
for (i = 0; i < ETH_ALEN; i++)
mac->mac[i] = wl->mac_addr[ETH_ALEN - 1 - i];
@@ -74,10 +70,8 @@ int wl1251_acx_default_key(struct wl1251 *wl, u8 key_id)
wl1251_debug(DEBUG_ACX, "acx dot11_default_key (%d)", key_id);
default_key = kzalloc(sizeof(*default_key), GFP_KERNEL);
- if (!default_key) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!default_key)
+ return -ENOMEM;
default_key->id = key_id;
@@ -104,10 +98,8 @@ int wl1251_acx_wake_up_conditions(struct wl1251 *wl, u8 wake_up_event,
wl1251_debug(DEBUG_ACX, "acx wake up conditions");
wake_up = kzalloc(sizeof(*wake_up), GFP_KERNEL);
- if (!wake_up) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!wake_up)
+ return -ENOMEM;
wake_up->wake_up_event = wake_up_event;
wake_up->listen_interval = listen_interval;
@@ -132,16 +124,13 @@ int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth)
wl1251_debug(DEBUG_ACX, "acx sleep auth");
auth = kzalloc(sizeof(*auth), GFP_KERNEL);
- if (!auth) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!auth)
+ return -ENOMEM;
auth->sleep_auth = sleep_auth;
ret = wl1251_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
-out:
kfree(auth);
return ret;
}
@@ -154,10 +143,8 @@ int wl1251_acx_fw_version(struct wl1251 *wl, char *buf, size_t len)
wl1251_debug(DEBUG_ACX, "acx fw rev");
rev = kzalloc(sizeof(*rev), GFP_KERNEL);
- if (!rev) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!rev)
+ return -ENOMEM;
ret = wl1251_cmd_interrogate(wl, ACX_FW_REV, rev, sizeof(*rev));
if (ret < 0) {
@@ -191,10 +178,8 @@ int wl1251_acx_tx_power(struct wl1251 *wl, int power)
return -EINVAL;
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
acx->current_tx_power = power * 10;
@@ -209,7 +194,7 @@ out:
return ret;
}
-int wl1251_acx_feature_cfg(struct wl1251 *wl)
+int wl1251_acx_feature_cfg(struct wl1251 *wl, u32 data_flow_options)
{
struct acx_feature_config *feature;
int ret;
@@ -217,13 +202,11 @@ int wl1251_acx_feature_cfg(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx feature cfg");
feature = kzalloc(sizeof(*feature), GFP_KERNEL);
- if (!feature) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!feature)
+ return -ENOMEM;
- /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */
- feature->data_flow_options = 0;
+ /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE can be set */
+ feature->data_flow_options = data_flow_options;
feature->options = 0;
ret = wl1251_cmd_configure(wl, ACX_FEATURE_CFG,
@@ -261,10 +244,8 @@ int wl1251_acx_data_path_params(struct wl1251 *wl,
wl1251_debug(DEBUG_ACX, "acx data path params");
params = kzalloc(sizeof(*params), GFP_KERNEL);
- if (!params) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!params)
+ return -ENOMEM;
params->rx_packet_ring_chunk_size = DP_RX_PACKET_RING_CHUNK_SIZE;
params->tx_packet_ring_chunk_size = DP_TX_PACKET_RING_CHUNK_SIZE;
@@ -309,10 +290,8 @@ int wl1251_acx_rx_msdu_life_time(struct wl1251 *wl, u32 life_time)
wl1251_debug(DEBUG_ACX, "acx rx msdu life time");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
acx->lifetime = life_time;
ret = wl1251_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME,
@@ -335,10 +314,8 @@ int wl1251_acx_rx_config(struct wl1251 *wl, u32 config, u32 filter)
wl1251_debug(DEBUG_ACX, "acx rx config");
rx_config = kzalloc(sizeof(*rx_config), GFP_KERNEL);
- if (!rx_config) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!rx_config)
+ return -ENOMEM;
rx_config->config_options = config;
rx_config->filter_options = filter;
@@ -363,10 +340,8 @@ int wl1251_acx_pd_threshold(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx data pd threshold");
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!pd)
+ return -ENOMEM;
/* FIXME: threshold value not set */
@@ -389,10 +364,8 @@ int wl1251_acx_slot(struct wl1251 *wl, enum acx_slot_type slot_time)
wl1251_debug(DEBUG_ACX, "acx slot");
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!slot)
+ return -ENOMEM;
slot->wone_index = STATION_WONE_INDEX;
slot->slot_time = slot_time;
@@ -408,7 +381,8 @@ out:
return ret;
}
-int wl1251_acx_group_address_tbl(struct wl1251 *wl)
+int wl1251_acx_group_address_tbl(struct wl1251 *wl, bool enable,
+ void *mc_list, u32 mc_list_len)
{
struct acx_dot11_grp_addr_tbl *acx;
int ret;
@@ -416,15 +390,13 @@ int wl1251_acx_group_address_tbl(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx group address tbl");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
/* MAC filtering */
- acx->enabled = 0;
- acx->num_groups = 0;
- memset(acx->mac_table, 0, ADDRESS_GROUP_MAX_LEN);
+ acx->enabled = enable;
+ acx->num_groups = mc_list_len;
+ memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
ret = wl1251_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL,
acx, sizeof(*acx));
@@ -444,10 +416,8 @@ int wl1251_acx_service_period_timeout(struct wl1251 *wl)
int ret;
rx_timeout = kzalloc(sizeof(*rx_timeout), GFP_KERNEL);
- if (!rx_timeout) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!rx_timeout)
+ return -ENOMEM;
wl1251_debug(DEBUG_ACX, "acx service period timeout");
@@ -475,10 +445,8 @@ int wl1251_acx_rts_threshold(struct wl1251 *wl, u16 rts_threshold)
wl1251_debug(DEBUG_ACX, "acx rts threshold");
rts = kzalloc(sizeof(*rts), GFP_KERNEL);
- if (!rts) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!rts)
+ return -ENOMEM;
rts->threshold = rts_threshold;
@@ -501,10 +469,8 @@ int wl1251_acx_beacon_filter_opt(struct wl1251 *wl, bool enable_filter)
wl1251_debug(DEBUG_ACX, "acx beacon filter opt");
beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL);
- if (!beacon_filter) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!beacon_filter)
+ return -ENOMEM;
beacon_filter->enable = enable_filter;
beacon_filter->max_num_beacons = 0;
@@ -530,10 +496,8 @@ int wl1251_acx_beacon_filter_table(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx beacon filter table");
ie_table = kzalloc(sizeof(*ie_table), GFP_KERNEL);
- if (!ie_table) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!ie_table)
+ return -ENOMEM;
/* configure default beacon pass-through rules */
ie_table->num_ie = 1;
@@ -560,10 +524,8 @@ int wl1251_acx_conn_monit_params(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx connection monitor parameters");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
acx->synch_fail_thold = SYNCH_FAIL_DEFAULT_THRESHOLD;
acx->bss_lose_timeout = NO_BEACON_DEFAULT_TIMEOUT;
@@ -589,10 +551,8 @@ int wl1251_acx_sg_enable(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx sg enable");
pta = kzalloc(sizeof(*pta), GFP_KERNEL);
- if (!pta) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!pta)
+ return -ENOMEM;
pta->enable = SG_ENABLE;
@@ -615,10 +575,8 @@ int wl1251_acx_sg_cfg(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx sg cfg");
param = kzalloc(sizeof(*param), GFP_KERNEL);
- if (!param) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!param)
+ return -ENOMEM;
/* BT-WLAN coext parameters */
param->min_rate = RATE_INDEX_24MBPS;
@@ -669,10 +627,8 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx cca threshold");
detection = kzalloc(sizeof(*detection), GFP_KERNEL);
- if (!detection) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!detection)
+ return -ENOMEM;
detection->rx_cca_threshold = CCA_THRSH_DISABLE_ENERGY_D;
detection->tx_energy_detection = 0;
@@ -682,7 +638,6 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl)
if (ret < 0)
wl1251_warning("failed to set cca threshold: %d", ret);
-out:
kfree(detection);
return ret;
}
@@ -695,10 +650,8 @@ int wl1251_acx_bcn_dtim_options(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx bcn dtim options");
bb = kzalloc(sizeof(*bb), GFP_KERNEL);
- if (!bb) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!bb)
+ return -ENOMEM;
bb->beacon_rx_timeout = BCN_RX_TIMEOUT_DEF_VALUE;
bb->broadcast_timeout = BROADCAST_RX_TIMEOUT_DEF_VALUE;
@@ -724,10 +677,8 @@ int wl1251_acx_aid(struct wl1251 *wl, u16 aid)
wl1251_debug(DEBUG_ACX, "acx aid");
acx_aid = kzalloc(sizeof(*acx_aid), GFP_KERNEL);
- if (!acx_aid) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx_aid)
+ return -ENOMEM;
acx_aid->aid = aid;
@@ -750,10 +701,8 @@ int wl1251_acx_event_mbox_mask(struct wl1251 *wl, u32 event_mask)
wl1251_debug(DEBUG_ACX, "acx event mbox mask");
mask = kzalloc(sizeof(*mask), GFP_KERNEL);
- if (!mask) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!mask)
+ return -ENOMEM;
/* high event mask is unused */
mask->high_event_mask = 0xffffffff;
@@ -805,10 +754,8 @@ int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble)
wl1251_debug(DEBUG_ACX, "acx_set_preamble");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
acx->preamble = preamble;
@@ -832,10 +779,8 @@ int wl1251_acx_cts_protect(struct wl1251 *wl,
wl1251_debug(DEBUG_ACX, "acx_set_ctsprotect");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
acx->ctsprotect = ctsprotect;
@@ -856,10 +801,8 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime)
int ret;
tsf_info = kzalloc(sizeof(*tsf_info), GFP_KERNEL);
- if (!tsf_info) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!tsf_info)
+ return -ENOMEM;
ret = wl1251_cmd_interrogate(wl, ACX_TSF_INFO,
tsf_info, sizeof(*tsf_info));
@@ -900,19 +843,22 @@ int wl1251_acx_rate_policies(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx rate policies");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
-
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
/* configure one default (one-size-fits-all) rate class */
- acx->rate_class_cnt = 1;
+ acx->rate_class_cnt = 2;
acx->rate_class[0].enabled_rates = ACX_RATE_MASK_UNSPECIFIED;
acx->rate_class[0].short_retry_limit = ACX_RATE_RETRY_LIMIT;
acx->rate_class[0].long_retry_limit = ACX_RATE_RETRY_LIMIT;
acx->rate_class[0].aflags = 0;
+ /* no-retry rate class */
+ acx->rate_class[1].enabled_rates = ACX_RATE_MASK_UNSPECIFIED;
+ acx->rate_class[1].short_retry_limit = 0;
+ acx->rate_class[1].long_retry_limit = 0;
+ acx->rate_class[1].aflags = 0;
+
ret = wl1251_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
if (ret < 0) {
wl1251_warning("Setting of rate policies failed: %d", ret);
@@ -932,10 +878,8 @@ int wl1251_acx_mem_cfg(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx mem cfg");
mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL);
- if (!mem_conf) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!mem_conf)
+ return -ENOMEM;
/* memory config */
mem_conf->mem_config.num_stations = cpu_to_le16(DEFAULT_NUM_STATIONS);
@@ -979,10 +923,8 @@ int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim)
wl1251_debug(DEBUG_ACX, "acx tbtt and dtim");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
acx->tbtt = tbtt;
acx->dtim = dtim;
@@ -1008,10 +950,8 @@ int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
wl1251_debug(DEBUG_ACX, "acx bet enable");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
acx->enable = mode;
acx->max_consecutive = max_consecutive;
@@ -1027,6 +967,32 @@ out:
return ret;
}
+int wl1251_acx_arp_ip_filter(struct wl1251 *wl, bool enable, __be32 address)
+{
+ struct wl1251_acx_arp_filter *acx;
+ int ret;
+
+ wl1251_debug(DEBUG_ACX, "acx arp ip filter, enable: %d", enable);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx)
+ return -ENOMEM;
+
+ acx->version = ACX_IPV4_VERSION;
+ acx->enable = enable;
+
+ if (enable)
+ memcpy(acx->address, &address, ACX_IPV4_ADDR_SIZE);
+
+ ret = wl1251_cmd_configure(wl, ACX_ARP_IP_FILTER,
+ acx, sizeof(*acx));
+ if (ret < 0)
+ wl1251_warning("failed to set arp ip filter: %d", ret);
+
+ kfree(acx);
+ return ret;
+}
+
int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifs, u16 txop)
{
@@ -1037,11 +1003,8 @@ int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
"aifs %d txop %d", ac, cw_min, cw_max, aifs, txop);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
-
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
acx->ac = ac;
acx->cw_min = cw_min;
@@ -1073,11 +1036,8 @@ int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
ps_scheme, ack_policy);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
-
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
acx->queue = queue;
acx->type = type;
diff --git a/drivers/net/wireless/ti/wl1251/acx.h b/drivers/net/wireless/ti/wl1251/acx.h
index c2ba100f9b1a..2bdec38699f4 100644
--- a/drivers/net/wireless/ti/wl1251/acx.h
+++ b/drivers/net/wireless/ti/wl1251/acx.h
@@ -350,8 +350,8 @@ struct acx_slot {
} __packed;
-#define ADDRESS_GROUP_MAX (8)
-#define ADDRESS_GROUP_MAX_LEN (ETH_ALEN * ADDRESS_GROUP_MAX)
+#define ACX_MC_ADDRESS_GROUP_MAX (8)
+#define ACX_MC_ADDRESS_GROUP_MAX_LEN (ETH_ALEN * ACX_MC_ADDRESS_GROUP_MAX)
struct acx_dot11_grp_addr_tbl {
struct acx_header header;
@@ -359,7 +359,7 @@ struct acx_dot11_grp_addr_tbl {
u8 enabled;
u8 num_groups;
u8 pad[2];
- u8 mac_table[ADDRESS_GROUP_MAX_LEN];
+ u8 mac_table[ACX_MC_ADDRESS_GROUP_MAX_LEN];
} __packed;
@@ -1232,6 +1232,20 @@ struct wl1251_acx_bet_enable {
u8 padding[2];
} __packed;
+#define ACX_IPV4_VERSION 4
+#define ACX_IPV6_VERSION 6
+#define ACX_IPV4_ADDR_SIZE 4
+struct wl1251_acx_arp_filter {
+ struct acx_header header;
+ u8 version; /* The IP version: 4 - IPv4, 6 - IPv6.*/
+ u8 enable; /* 1 - ARP filtering is enabled, 0 - disabled */
+ u8 padding[2];
+ u8 address[16]; /* The IP address used to filter ARP packets.
+ ARP packets that do not match this address are
+ dropped. When the IP Version is 4, the last 12
+ bytes of the the address are ignored. */
+} __attribute__((packed));
+
struct wl1251_acx_ac_cfg {
struct acx_header header;
@@ -1440,7 +1454,7 @@ int wl1251_acx_wake_up_conditions(struct wl1251 *wl, u8 wake_up_event,
int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth);
int wl1251_acx_fw_version(struct wl1251 *wl, char *buf, size_t len);
int wl1251_acx_tx_power(struct wl1251 *wl, int power);
-int wl1251_acx_feature_cfg(struct wl1251 *wl);
+int wl1251_acx_feature_cfg(struct wl1251 *wl, u32 data_flow_options);
int wl1251_acx_mem_map(struct wl1251 *wl,
struct acx_header *mem_map, size_t len);
int wl1251_acx_data_path_params(struct wl1251 *wl,
@@ -1449,7 +1463,8 @@ int wl1251_acx_rx_msdu_life_time(struct wl1251 *wl, u32 life_time);
int wl1251_acx_rx_config(struct wl1251 *wl, u32 config, u32 filter);
int wl1251_acx_pd_threshold(struct wl1251 *wl);
int wl1251_acx_slot(struct wl1251 *wl, enum acx_slot_type slot_time);
-int wl1251_acx_group_address_tbl(struct wl1251 *wl);
+int wl1251_acx_group_address_tbl(struct wl1251 *wl, bool enable,
+ void *mc_list, u32 mc_list_len);
int wl1251_acx_service_period_timeout(struct wl1251 *wl);
int wl1251_acx_rts_threshold(struct wl1251 *wl, u16 rts_threshold);
int wl1251_acx_beacon_filter_opt(struct wl1251 *wl, bool enable_filter);
@@ -1473,6 +1488,7 @@ int wl1251_acx_mem_cfg(struct wl1251 *wl);
int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
u8 max_consecutive);
+int wl1251_acx_arp_ip_filter(struct wl1251 *wl, bool enable, __be32 address);
int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifs, u16 txop);
int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
diff --git a/drivers/net/wireless/ti/wl1251/boot.c b/drivers/net/wireless/ti/wl1251/boot.c
index a2e5241382da..2000cd536077 100644
--- a/drivers/net/wireless/ti/wl1251/boot.c
+++ b/drivers/net/wireless/ti/wl1251/boot.c
@@ -299,7 +299,8 @@ int wl1251_boot_run_firmware(struct wl1251 *wl)
ROAMING_TRIGGER_LOW_RSSI_EVENT_ID |
ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID |
REGAINED_BSS_EVENT_ID | BT_PTA_SENSE_EVENT_ID |
- BT_PTA_PREDICTION_EVENT_ID | JOIN_EVENT_COMPLETE_ID;
+ BT_PTA_PREDICTION_EVENT_ID | JOIN_EVENT_COMPLETE_ID |
+ PS_REPORT_EVENT_ID;
ret = wl1251_event_unmask(wl);
if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index 6822b845efc1..223649bcaa5a 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -3,6 +3,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/crc7.h>
+#include <linux/etherdevice.h>
#include "wl1251.h"
#include "reg.h"
@@ -203,11 +204,11 @@ out:
return ret;
}
-int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
+int wl1251_cmd_data_path_rx(struct wl1251 *wl, u8 channel, bool enable)
{
struct cmd_enabledisable_path *cmd;
int ret;
- u16 cmd_rx, cmd_tx;
+ u16 cmd_rx;
wl1251_debug(DEBUG_CMD, "cmd data path");
@@ -219,13 +220,10 @@ int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
cmd->channel = channel;
- if (enable) {
+ if (enable)
cmd_rx = CMD_ENABLE_RX;
- cmd_tx = CMD_ENABLE_TX;
- } else {
+ else
cmd_rx = CMD_DISABLE_RX;
- cmd_tx = CMD_DISABLE_TX;
- }
ret = wl1251_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd));
if (ret < 0) {
@@ -237,17 +235,38 @@ int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
wl1251_debug(DEBUG_BOOT, "rx %s cmd channel %d",
enable ? "start" : "stop", channel);
+out:
+ kfree(cmd);
+ return ret;
+}
+
+int wl1251_cmd_data_path_tx(struct wl1251 *wl, u8 channel, bool enable)
+{
+ struct cmd_enabledisable_path *cmd;
+ int ret;
+ u16 cmd_tx;
+
+ wl1251_debug(DEBUG_CMD, "cmd data path");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->channel = channel;
+
+ if (enable)
+ cmd_tx = CMD_ENABLE_TX;
+ else
+ cmd_tx = CMD_DISABLE_TX;
+
ret = wl1251_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd));
- if (ret < 0) {
+ if (ret < 0)
wl1251_error("tx %s cmd for channel %d failed",
enable ? "start" : "stop", channel);
- goto out;
- }
-
- wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
- enable ? "start" : "stop", channel);
+ else
+ wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
+ enable ? "start" : "stop", channel);
-out:
kfree(cmd);
return ret;
}
@@ -410,7 +429,9 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
struct wl1251_cmd_scan *cmd;
int i, ret = 0;
- wl1251_debug(DEBUG_CMD, "cmd scan");
+ wl1251_debug(DEBUG_CMD, "cmd scan channels %d", n_channels);
+
+ WARN_ON(n_channels > SCAN_MAX_NUM_OF_CHANNELS);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
@@ -421,6 +442,13 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
CFG_RX_MGMT_EN |
CFG_RX_BCN_EN);
cmd->params.scan_options = 0;
+ /*
+ * Use high priority scan when not associated to prevent fw issue
+ * causing never-ending scans (sometimes 20+ minutes).
+ * Note: This bug may be caused by the fw's DTIM handling.
+ */
+ if (is_zero_ether_addr(wl->bssid))
+ cmd->params.scan_options |= WL1251_SCAN_OPT_PRIORITY_HIGH;
cmd->params.num_channels = n_channels;
cmd->params.num_probe_requests = n_probes;
cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
diff --git a/drivers/net/wireless/ti/wl1251/cmd.h b/drivers/net/wireless/ti/wl1251/cmd.h
index ee4f2b391822..d824ff978311 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.h
+++ b/drivers/net/wireless/ti/wl1251/cmd.h
@@ -35,7 +35,8 @@ int wl1251_cmd_interrogate(struct wl1251 *wl, u16 id, void *buf, size_t len);
int wl1251_cmd_configure(struct wl1251 *wl, u16 id, void *buf, size_t len);
int wl1251_cmd_vbm(struct wl1251 *wl, u8 identity,
void *bitmap, u16 bitmap_len, u8 bitmap_control);
-int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable);
+int wl1251_cmd_data_path_rx(struct wl1251 *wl, u8 channel, bool enable);
+int wl1251_cmd_data_path_tx(struct wl1251 *wl, u8 channel, bool enable);
int wl1251_cmd_join(struct wl1251 *wl, u8 bss_type, u8 channel,
u16 beacon_interval, u8 dtim_interval);
int wl1251_cmd_ps_mode(struct wl1251 *wl, u8 ps_mode);
@@ -167,6 +168,11 @@ struct cmd_read_write_memory {
#define CMDMBOX_HEADER_LEN 4
#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
+#define WL1251_SCAN_OPT_PASSIVE 1
+#define WL1251_SCAN_OPT_5GHZ_BAND 2
+#define WL1251_SCAN_OPT_TRIGGERD_SCAN 4
+#define WL1251_SCAN_OPT_PRIORITY_HIGH 8
+
#define WL1251_SCAN_MIN_DURATION 30000
#define WL1251_SCAN_MAX_DURATION 60000
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index 74ae8e1c2e33..db0105313745 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -46,6 +46,43 @@ static int wl1251_event_scan_complete(struct wl1251 *wl,
return ret;
}
+#define WL1251_PSM_ENTRY_RETRIES 3
+static int wl1251_event_ps_report(struct wl1251 *wl,
+ struct event_mailbox *mbox)
+{
+ int ret = 0;
+
+ wl1251_debug(DEBUG_EVENT, "ps status: %x", mbox->ps_status);
+
+ switch (mbox->ps_status) {
+ case EVENT_ENTER_POWER_SAVE_FAIL:
+ wl1251_debug(DEBUG_PSM, "PSM entry failed");
+
+ if (wl->station_mode != STATION_POWER_SAVE_MODE) {
+ /* remain in active mode */
+ wl->psm_entry_retry = 0;
+ break;
+ }
+
+ if (wl->psm_entry_retry < WL1251_PSM_ENTRY_RETRIES) {
+ wl->psm_entry_retry++;
+ ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
+ } else {
+ wl1251_error("Power save entry failed, giving up");
+ wl->psm_entry_retry = 0;
+ }
+ break;
+ case EVENT_ENTER_POWER_SAVE_SUCCESS:
+ case EVENT_EXIT_POWER_SAVE_FAIL:
+ case EVENT_EXIT_POWER_SAVE_SUCCESS:
+ default:
+ wl->psm_entry_retry = 0;
+ break;
+ }
+
+ return 0;
+}
+
static void wl1251_event_mbox_dump(struct event_mailbox *mbox)
{
wl1251_debug(DEBUG_EVENT, "MBOX DUMP:");
@@ -80,7 +117,14 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
}
}
- if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
+ if (vector & PS_REPORT_EVENT_ID) {
+ wl1251_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
+ ret = wl1251_event_ps_report(wl, mbox);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (wl->vif && vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
/* indicate to the stack, that beacons have been lost */
diff --git a/drivers/net/wireless/ti/wl1251/event.h b/drivers/net/wireless/ti/wl1251/event.h
index 30eb5d150bf7..88570a5cd042 100644
--- a/drivers/net/wireless/ti/wl1251/event.h
+++ b/drivers/net/wireless/ti/wl1251/event.h
@@ -112,6 +112,13 @@ struct event_mailbox {
u8 padding[19];
} __packed;
+enum {
+ EVENT_ENTER_POWER_SAVE_FAIL = 0,
+ EVENT_ENTER_POWER_SAVE_SUCCESS,
+ EVENT_EXIT_POWER_SAVE_FAIL,
+ EVENT_EXIT_POWER_SAVE_SUCCESS,
+};
+
int wl1251_event_unmask(struct wl1251 *wl);
void wl1251_event_mbox_config(struct wl1251 *wl);
int wl1251_event_handle(struct wl1251 *wl, u8 mbox);
diff --git a/drivers/net/wireless/ti/wl1251/init.c b/drivers/net/wireless/ti/wl1251/init.c
index 89b43d35473c..1d799bffaa9f 100644
--- a/drivers/net/wireless/ti/wl1251/init.c
+++ b/drivers/net/wireless/ti/wl1251/init.c
@@ -33,7 +33,7 @@ int wl1251_hw_init_hwenc_config(struct wl1251 *wl)
{
int ret;
- ret = wl1251_acx_feature_cfg(wl);
+ ret = wl1251_acx_feature_cfg(wl, 0);
if (ret < 0) {
wl1251_warning("couldn't set feature config");
return ret;
@@ -127,7 +127,7 @@ int wl1251_hw_init_phy_config(struct wl1251 *wl)
if (ret < 0)
return ret;
- ret = wl1251_acx_group_address_tbl(wl);
+ ret = wl1251_acx_group_address_tbl(wl, true, NULL, 0);
if (ret < 0)
return ret;
@@ -394,8 +394,13 @@ int wl1251_hw_init(struct wl1251 *wl)
if (ret < 0)
goto out_free_data_path;
- /* Enable data path */
- ret = wl1251_cmd_data_path(wl, wl->channel, 1);
+ /* Enable rx data path */
+ ret = wl1251_cmd_data_path_rx(wl, wl->channel, 1);
+ if (ret < 0)
+ goto out_free_data_path;
+
+ /* Enable tx data path */
+ ret = wl1251_cmd_data_path_tx(wl, wl->channel, 1);
if (ret < 0)
goto out_free_data_path;
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 3291ffa95273..757e25784a8a 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -28,6 +28,7 @@
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/netdevice.h>
#include "wl1251.h"
#include "wl12xx_80211.h"
@@ -479,10 +480,13 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
wl->next_tx_complete = 0;
wl->elp = false;
wl->station_mode = STATION_ACTIVE_MODE;
+ wl->psm_entry_retry = 0;
wl->tx_queue_stopped = false;
wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
wl->rssi_thold = 0;
wl->channel = WL1251_DEFAULT_CHANNEL;
+ wl->monitor_present = false;
+ wl->joined = false;
wl1251_debugfs_reset(wl);
@@ -521,7 +525,7 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
- if (memcmp(wl->mac_addr, vif->addr, ETH_ALEN)) {
+ if (!ether_addr_equal_unaligned(wl->mac_addr, vif->addr)) {
memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
ret = wl1251_acx_station_id(wl);
@@ -542,6 +546,7 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
wl1251_debug(DEBUG_MAC80211, "mac80211 remove interface");
wl->vif = NULL;
+ memset(wl->bssid, 0, ETH_ALEN);
mutex_unlock(&wl->mutex);
}
@@ -566,6 +571,11 @@ static int wl1251_build_qos_null_data(struct wl1251 *wl)
sizeof(template));
}
+static bool wl1251_can_do_pm(struct ieee80211_conf *conf, struct wl1251 *wl)
+{
+ return (conf->flags & IEEE80211_CONF_PS) && !wl->monitor_present;
+}
+
static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct wl1251 *wl = hw->priv;
@@ -575,8 +585,10 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
channel = ieee80211_frequency_to_channel(
conf->chandef.chan->center_freq);
- wl1251_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d",
+ wl1251_debug(DEBUG_MAC80211,
+ "mac80211 config ch %d monitor %s psm %s power %d",
channel,
+ conf->flags & IEEE80211_CONF_MONITOR ? "on" : "off",
conf->flags & IEEE80211_CONF_PS ? "on" : "off",
conf->power_level);
@@ -586,16 +598,44 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
if (ret < 0)
goto out;
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ u32 mode;
+
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ wl->monitor_present = true;
+ mode = DF_SNIFF_MODE_ENABLE | DF_ENCRYPTION_DISABLE;
+ } else {
+ wl->monitor_present = false;
+ mode = 0;
+ }
+
+ ret = wl1251_acx_feature_cfg(wl, mode);
+ if (ret < 0)
+ goto out_sleep;
+ }
+
if (channel != wl->channel) {
wl->channel = channel;
- ret = wl1251_join(wl, wl->bss_type, wl->channel,
- wl->beacon_int, wl->dtim_period);
+ /*
+ * Use ENABLE_RX command for channel switching when no
+ * interface is present (monitor mode only).
+ * This leaves the tx path disabled in firmware, whereas
+ * the usual JOIN command seems to transmit some frames
+ * at firmware level.
+ */
+ if (wl->vif == NULL) {
+ wl->joined = false;
+ ret = wl1251_cmd_data_path_rx(wl, wl->channel, 1);
+ } else {
+ ret = wl1251_join(wl, wl->bss_type, wl->channel,
+ wl->beacon_int, wl->dtim_period);
+ }
if (ret < 0)
goto out_sleep;
}
- if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) {
+ if (wl1251_can_do_pm(conf, wl) && !wl->psm_requested) {
wl1251_debug(DEBUG_PSM, "psm enabled");
wl->psm_requested = true;
@@ -611,8 +651,7 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
if (ret < 0)
goto out_sleep;
- } else if (!(conf->flags & IEEE80211_CONF_PS) &&
- wl->psm_requested) {
+ } else if (!wl1251_can_do_pm(conf, wl) && wl->psm_requested) {
wl1251_debug(DEBUG_PSM, "psm disabled");
wl->psm_requested = false;
@@ -648,6 +687,16 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
wl->power_level = conf->power_level;
}
+ /*
+ * Tell stack that connection is lost because hw encryption isn't
+ * supported in monitor mode.
+ * This requires temporary enabling of the hw connection monitor flag
+ */
+ if ((changed & IEEE80211_CONF_CHANGE_MONITOR) && wl->vif) {
+ wl->hw->flags |= IEEE80211_HW_CONNECTION_MONITOR;
+ ieee80211_connection_loss(wl->vif);
+ }
+
out_sleep:
wl1251_ps_elp_sleep(wl);
@@ -657,6 +706,44 @@ out:
return ret;
}
+struct wl1251_filter_params {
+ bool enabled;
+ int mc_list_length;
+ u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
+};
+
+static u64 wl1251_op_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ struct wl1251_filter_params *fp;
+ struct netdev_hw_addr *ha;
+ struct wl1251 *wl = hw->priv;
+
+ if (unlikely(wl->state == WL1251_STATE_OFF))
+ return 0;
+
+ fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
+ if (!fp) {
+ wl1251_error("Out of memory setting filters.");
+ return 0;
+ }
+
+ /* update multicast filtering parameters */
+ fp->mc_list_length = 0;
+ if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
+ fp->enabled = false;
+ } else {
+ fp->enabled = true;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ memcpy(fp->mc_list[fp->mc_list_length],
+ ha->addr, ETH_ALEN);
+ fp->mc_list_length++;
+ }
+ }
+
+ return (u64)(unsigned long)fp;
+}
+
#define WL1251_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
FIF_ALLMULTI | \
FIF_FCSFAIL | \
@@ -667,8 +754,9 @@ out:
static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed,
- unsigned int *total,u64 multicast)
+ unsigned int *total, u64 multicast)
{
+ struct wl1251_filter_params *fp = (void *)(unsigned long)multicast;
struct wl1251 *wl = hw->priv;
int ret;
@@ -677,9 +765,11 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
*total &= WL1251_SUPPORTED_FILTERS;
changed &= WL1251_SUPPORTED_FILTERS;
- if (changed == 0)
+ if (changed == 0) {
/* no filters which we support changed */
+ kfree(fp);
return;
+ }
mutex_lock(&wl->mutex);
@@ -716,6 +806,15 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
+ if (*total & FIF_ALLMULTI || *total & FIF_PROMISC_IN_BSS)
+ ret = wl1251_acx_group_address_tbl(wl, false, NULL, 0);
+ else if (fp)
+ ret = wl1251_acx_group_address_tbl(wl, fp->enabled,
+ fp->mc_list,
+ fp->mc_list_length);
+ if (ret < 0)
+ goto out;
+
/* send filters to firmware */
wl1251_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
@@ -723,6 +822,7 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
out:
mutex_unlock(&wl->mutex);
+ kfree(fp);
}
/* HW encryption */
@@ -802,12 +902,12 @@ static int wl1251_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mutex_lock(&wl->mutex);
- ret = wl1251_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out_unlock;
-
switch (cmd) {
case SET_KEY:
+ if (wl->monitor_present) {
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ }
wl_cmd->key_action = KEY_ADD_OR_REPLACE;
break;
case DISABLE_KEY:
@@ -818,6 +918,10 @@ static int wl1251_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
break;
}
+ ret = wl1251_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out_unlock;
+
ret = wl1251_set_key_type(wl, wl_cmd, cmd, key, addr);
if (ret < 0) {
wl1251_error("Set KEY type failed");
@@ -930,6 +1034,7 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
ret = wl1251_cmd_scan(wl, ssid, ssid_len, req->channels,
req->n_channels, WL1251_SCAN_NUM_PROBES);
if (ret < 0) {
+ wl1251_debug(DEBUG_SCAN, "scan failed %d", ret);
wl->scanning = false;
goto out_idle;
}
@@ -977,6 +1082,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
{
struct wl1251 *wl = hw->priv;
struct sk_buff *beacon, *skb;
+ bool enable;
int ret;
wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1023,6 +1129,9 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC) {
+ /* Disable temporary enabled hw connection monitor flag */
+ wl->hw->flags &= ~IEEE80211_HW_CONNECTION_MONITOR;
+
if (bss_conf->assoc) {
wl->beacon_int = bss_conf->beacon_int;
@@ -1075,6 +1184,17 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
}
}
+ if (changed & BSS_CHANGED_ARP_FILTER) {
+ __be32 addr = bss_conf->arp_addr_list[0];
+ WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
+
+ enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
+ wl1251_acx_arp_ip_filter(wl, enable, addr);
+
+ if (ret < 0)
+ goto out_sleep;
+ }
+
if (changed & BSS_CHANGED_BEACON) {
beacon = ieee80211_beacon_get(hw, vif);
if (!beacon)
@@ -1245,6 +1365,7 @@ static const struct ieee80211_ops wl1251_ops = {
.add_interface = wl1251_op_add_interface,
.remove_interface = wl1251_op_remove_interface,
.config = wl1251_op_config,
+ .prepare_multicast = wl1251_op_prepare_multicast,
.configure_filter = wl1251_op_configure_filter,
.tx = wl1251_op_tx,
.set_key = wl1251_op_set_key,
@@ -1347,7 +1468,6 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
/* unit us */
/* FIXME: find a proper value */
- wl->hw->channel_change_time = 10000;
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_PS |
@@ -1401,7 +1521,10 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
INIT_DELAYED_WORK(&wl->elp_work, wl1251_elp_work);
wl->channel = WL1251_DEFAULT_CHANNEL;
+ wl->monitor_present = false;
+ wl->joined = false;
wl->scanning = false;
+ wl->bss_type = MAX_BSS_TYPE;
wl->default_key = 0;
wl->listen_int = 1;
wl->rx_counter = 0;
@@ -1413,6 +1536,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
wl->elp = false;
wl->station_mode = STATION_ACTIVE_MODE;
wl->psm_requested = false;
+ wl->psm_entry_retry = 0;
wl->tx_queue_stopped = false;
wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
wl->rssi_thold = 0;
@@ -1478,3 +1602,4 @@ MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
MODULE_FIRMWARE(WL1251_FW_NAME);
+MODULE_FIRMWARE(WL1251_NVS_NAME);
diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c
index 23289d49dd31..123c4bb50e0a 100644
--- a/drivers/net/wireless/ti/wl1251/rx.c
+++ b/drivers/net/wireless/ti/wl1251/rx.c
@@ -83,7 +83,7 @@ static void wl1251_rx_status(struct wl1251 *wl,
status->flag |= RX_FLAG_MACTIME_START;
- if (desc->flags & RX_DESC_ENCRYPTION_MASK) {
+ if (!wl->monitor_present && (desc->flags & RX_DESC_ENCRYPTION_MASK)) {
status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
if (likely(!(desc->flags & RX_DESC_DECRYPT_FAIL)))
diff --git a/drivers/net/wireless/ti/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c
index 28121c590a2b..81de83c6fcf6 100644
--- a/drivers/net/wireless/ti/wl1251/tx.c
+++ b/drivers/net/wireless/ti/wl1251/tx.c
@@ -28,6 +28,7 @@
#include "tx.h"
#include "ps.h"
#include "io.h"
+#include "event.h"
static bool wl1251_tx_double_buffer_busy(struct wl1251 *wl, u32 data_out_count)
{
@@ -89,8 +90,12 @@ static void wl1251_tx_control(struct tx_double_buffer_desc *tx_hdr,
/* 802.11 packets */
tx_hdr->control.packet_type = 0;
- if (control->flags & IEEE80211_TX_CTL_NO_ACK)
+ /* Also disable retry and ACK policy for injected packets */
+ if ((control->flags & IEEE80211_TX_CTL_NO_ACK) ||
+ (control->flags & IEEE80211_TX_CTL_INJECTED)) {
+ tx_hdr->control.rate_policy = 1;
tx_hdr->control.ack_policy = 1;
+ }
tx_hdr->control.tx_complete = 1;
@@ -277,6 +282,26 @@ static void wl1251_tx_trigger(struct wl1251 *wl)
TX_STATUS_DATA_OUT_COUNT_MASK;
}
+static void enable_tx_for_packet_injection(struct wl1251 *wl)
+{
+ int ret;
+
+ ret = wl1251_cmd_join(wl, BSS_TYPE_STA_BSS, wl->channel,
+ wl->beacon_int, wl->dtim_period);
+ if (ret < 0) {
+ wl1251_warning("join failed");
+ return;
+ }
+
+ ret = wl1251_event_wait(wl, JOIN_EVENT_COMPLETE_ID, 100);
+ if (ret < 0) {
+ wl1251_warning("join timeout");
+ return;
+ }
+
+ wl->joined = true;
+}
+
/* caller must hold wl->mutex */
static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
{
@@ -287,6 +312,9 @@ static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
info = IEEE80211_SKB_CB(skb);
if (info->control.hw_key) {
+ if (unlikely(wl->monitor_present))
+ return -EINVAL;
+
idx = info->control.hw_key->hw_key_idx;
if (unlikely(wl->default_key != idx)) {
ret = wl1251_acx_default_key(wl, idx);
@@ -295,6 +323,10 @@ static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
}
}
+ /* Enable tx path in monitor mode for packet injection */
+ if ((wl->vif == NULL) && !wl->joined)
+ enable_tx_for_packet_injection(wl);
+
ret = wl1251_tx_path_status(wl);
if (ret < 0)
return ret;
@@ -394,6 +426,7 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
info = IEEE80211_SKB_CB(skb);
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
+ !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
(result->status == TX_SUCCESS))
info->flags |= IEEE80211_TX_STAT_ACK;
diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index 2c3bd1bff3f6..235617a7716d 100644
--- a/drivers/net/wireless/ti/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
@@ -93,6 +93,7 @@ enum {
} while (0)
#define WL1251_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \
+ CFG_MC_FILTER_EN | \
CFG_BSSID_FILTER_EN)
#define WL1251_DEFAULT_RX_FILTER (CFG_RX_PRSP_EN | \
@@ -303,6 +304,8 @@ struct wl1251 {
u8 bss_type;
u8 listen_int;
int channel;
+ bool monitor_present;
+ bool joined;
void *target_mem_map;
struct acx_data_path_params_resp *data_path;
@@ -368,6 +371,9 @@ struct wl1251 {
/* PSM mode requested */
bool psm_requested;
+ /* retry counter for PSM entries */
+ u8 psm_entry_retry;
+
u16 beacon_int;
u8 dtim_period;
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
index 4a0bbb13806b..7541bd1a4a4b 100644
--- a/drivers/net/wireless/ti/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -47,7 +47,7 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
* In active scans, we only scan channels not
* marked as passive.
*/
- (passive || !(flags & IEEE80211_CHAN_PASSIVE_SCAN))) {
+ (passive || !(flags & IEEE80211_CHAN_NO_IR))) {
wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
req->channels[i]->band,
req->channels[i]->center_freq);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 34d9dfff2ad3..9b2ecf52449f 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1688,7 +1688,7 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
if (channel->flags & (IEEE80211_CHAN_DISABLED |
IEEE80211_CHAN_RADAR |
- IEEE80211_CHAN_PASSIVE_SCAN))
+ IEEE80211_CHAN_NO_IR))
continue;
ch_bit_idx = wlcore_get_reg_conf_ch_idx(b, ch);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 0368b9cbfb89..b46b3116cc55 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -91,8 +91,7 @@ static void wl1271_reg_notify(struct wiphy *wiphy,
continue;
if (ch->flags & IEEE80211_CHAN_RADAR)
- ch->flags |= IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN;
+ ch->flags |= IEEE80211_CHAN_NO_IR;
}
@@ -4458,6 +4457,16 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
+ if ((changed & BSS_CHANGED_TXPOWER) &&
+ bss_conf->txpower != wlvif->power_level) {
+
+ ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
+ if (ret < 0)
+ goto out;
+
+ wlvif->power_level = bss_conf->txpower;
+ }
+
if (is_ap)
wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
else
@@ -5711,7 +5720,6 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
/* unit us */
/* FIXME: find a proper value */
- wl->hw->channel_change_time = 10000;
wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index 7ed86203304b..1e3d51cd673a 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -188,16 +188,14 @@ wlcore_scan_get_channels(struct wl1271 *wl,
flags = req_channels[i]->flags;
if (force_passive)
- flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+ flags |= IEEE80211_CHAN_NO_IR;
if ((req_channels[i]->band == band) &&
!(flags & IEEE80211_CHAN_DISABLED) &&
(!!(flags & IEEE80211_CHAN_RADAR) == radar) &&
/* if radar is set, we ignore the passive flag */
(radar ||
- !!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
-
-
+ !!(flags & IEEE80211_CHAN_NO_IR) == passive)) {
if (flags & IEEE80211_CHAN_RADAR) {
channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
@@ -220,7 +218,7 @@ wlcore_scan_get_channels(struct wl1271 *wl,
(band == IEEE80211_BAND_2GHZ) &&
(channels[j].channel >= 12) &&
(channels[j].channel <= 14) &&
- (flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+ (flags & IEEE80211_CHAN_NO_IR) &&
!force_passive) {
/* pactive channels treated as DFS */
channels[j].flags = SCAN_CHANNEL_FLAGS_DFS;
@@ -243,8 +241,8 @@ wlcore_scan_get_channels(struct wl1271 *wl,
max_dwell_time_active,
flags & IEEE80211_CHAN_RADAR ?
", DFS" : "",
- flags & IEEE80211_CHAN_PASSIVE_SCAN ?
- ", PASSIVE" : "");
+ flags & IEEE80211_CHAN_NO_IR ?
+ ", NO-IR" : "");
j++;
}
}
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 38d2089f338a..d24d4a958c67 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -29,7 +29,6 @@
#include <linux/delay.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/kernel.h>
@@ -44,6 +43,7 @@
#include <linux/string.h>
#include <linux/wireless.h>
#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
#include <net/iw_handler.h>
@@ -673,8 +673,7 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
matchflag = 1;
if (matchflag) {
for (i = 0; i < this->bss_cnt; i++) {
- if (!memcmp(this->bss_set[i].bssid,
- sig.bssid, ETH_ALEN)) {
+ if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) {
matchflag = 0;
break;
}
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 71ab320fae82..73a49b868035 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/* This file implements all the hardware specific functions for the ZD1211
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 7ab922209b25..b03786c9f3aa 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ZD_CHIP_H
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h
index 9a1b013f81be..41bd755bc135 100644
--- a/drivers/net/wireless/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zd1211rw/zd_def.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ZD_DEF_H
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index c6208a7988e4..e7af261e9198 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/netdevice.h>
@@ -533,9 +532,8 @@ void zd_mac_tx_failed(struct urb *urb)
tx_hdr = (struct ieee80211_hdr *)skb->data;
/* we skip all frames not matching the reported destination */
- if (unlikely(memcmp(tx_hdr->addr1, tx_status->mac, ETH_ALEN))) {
+ if (unlikely(!ether_addr_equal(tx_hdr->addr1, tx_status->mac)))
continue;
- }
/* we skip all frames not matching the reported final rate */
@@ -998,7 +996,7 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
continue;
tx_hdr = (struct ieee80211_hdr *)skb->data;
- if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
+ if (likely(ether_addr_equal(tx_hdr->addr2, rx_hdr->addr1)))
{
found = 1;
break;
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index c01eca859f95..5a484235308f 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ZD_MAC_H
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.c b/drivers/net/wireless/zd1211rw/zd_rf.c
index c875ee05e22e..dc179c414518 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/errno.h>
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.h b/drivers/net/wireless/zd1211rw/zd_rf.h
index 725b7c99b23d..8f14e25e1041 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.h
+++ b/drivers/net/wireless/zd1211rw/zd_rf.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ZD_RF_H
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
index 12babcb633c3..99aed7d78952 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
index 385c670d1293..5fea485be574 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
index 784d9ccb8fef..a93f657a41c7 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
index c4d324e19c24..61b924027356 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 84d94f572a46..a912dc051111 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -15,8 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 45e3bb28a01c..a9075f225178 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ZD_USB_H
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 08ae01b41c83..ae413a2cbee7 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -101,6 +101,13 @@ struct xenvif_rx_meta {
#define MAX_PENDING_REQS 256
+/* It's possible for an skb to have a maximal number of frags
+ * but still be less than MAX_BUFFER_OFFSET in size. Thus the
+ * worst-case number of copy operations is MAX_SKB_FRAGS per
+ * ring slot.
+ */
+#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
+
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
@@ -136,20 +143,15 @@ struct xenvif {
char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
struct xen_netif_rx_back_ring rx;
struct sk_buff_head rx_queue;
+ RING_IDX rx_last_skb_slots;
- /* Allow xenvif_start_xmit() to peek ahead in the rx request
- * ring. This is a prediction of what rx_req_cons will be
- * once all queued skbs are put on the ring.
- */
- RING_IDX rx_req_cons_peek;
+ /* This array is allocated seperately as it is large */
+ struct gnttab_copy *grant_copy_op;
- /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
- * head/fragment page uses 2 copy operations because it
- * straddles two buffers in the frontend.
+ /* We create one meta structure per ring request we consume, so
+ * the maximum number is the same as the ring size.
*/
- struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
- struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
-
+ struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
u8 fe_dev_addr[6];
@@ -198,8 +200,6 @@ void xenvif_xenbus_fini(void);
int xenvif_schedulable(struct xenvif *vif);
-int xenvif_rx_ring_full(struct xenvif *vif);
-
int xenvif_must_stop_queue(struct xenvif *vif);
/* (Un)Map communication rings. */
@@ -211,21 +211,20 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
/* Check for SKBs from frontend and schedule backend processing */
void xenvif_check_rx_xenvif(struct xenvif *vif);
-/* Queue an SKB for transmission to the frontend */
-void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
-/* Notify xenvif that ring now has space to send an skb to the frontend */
-void xenvif_notify_tx_completion(struct xenvif *vif);
-
/* Prevent the device from generating any further traffic. */
void xenvif_carrier_off(struct xenvif *vif);
-/* Returns number of ring slots required to send an skb to the frontend */
-unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
-
int xenvif_tx_action(struct xenvif *vif, int budget);
-void xenvif_rx_action(struct xenvif *vif);
int xenvif_kthread(void *data);
+void xenvif_kick_thread(struct xenvif *vif);
+
+/* Determine whether the needed number of slots (req) are available,
+ * and set req_event if not.
+ */
+bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed);
+
+void xenvif_stop_queue(struct xenvif *vif);
extern bool separate_tx_rx_irq;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 870f1fa58370..7669d49a67e2 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -34,6 +34,7 @@
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
#include <xen/events.h>
#include <asm/xen/hypercall.h>
@@ -46,11 +47,6 @@ int xenvif_schedulable(struct xenvif *vif)
return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
}
-static int xenvif_rx_schedulable(struct xenvif *vif)
-{
- return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif);
-}
-
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
{
struct xenvif *vif = dev_id;
@@ -104,8 +100,7 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
struct xenvif *vif = dev_id;
- if (xenvif_rx_schedulable(vif))
- netif_wake_queue(vif->dev);
+ xenvif_kick_thread(vif);
return IRQ_HANDLED;
}
@@ -121,24 +116,35 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
+ int min_slots_needed;
BUG_ON(skb->dev != dev);
/* Drop the packet if vif is not ready */
- if (vif->task == NULL)
+ if (vif->task == NULL || !xenvif_schedulable(vif))
goto drop;
- /* Drop the packet if the target domain has no receive buffers. */
- if (!xenvif_rx_schedulable(vif))
- goto drop;
+ /* At best we'll need one slot for the header and one for each
+ * frag.
+ */
+ min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
- /* Reserve ring slots for the worst-case number of fragments. */
- vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb);
+ /* If the skb is GSO then we'll also need an extra slot for the
+ * metadata.
+ */
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
+ skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ min_slots_needed++;
- if (vif->can_queue && xenvif_must_stop_queue(vif))
- netif_stop_queue(dev);
+ /* If the skb can't possibly fit in the remaining slots
+ * then turn off the queue to give the ring a chance to
+ * drain.
+ */
+ if (!xenvif_rx_ring_slots_available(vif, min_slots_needed))
+ xenvif_stop_queue(vif);
- xenvif_queue_tx_skb(vif, skb);
+ skb_queue_tail(&vif->rx_queue, skb);
+ xenvif_kick_thread(vif);
return NETDEV_TX_OK;
@@ -148,12 +154,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-void xenvif_notify_tx_completion(struct xenvif *vif)
-{
- if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
- netif_wake_queue(vif->dev);
-}
-
static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
@@ -307,6 +307,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
SET_NETDEV_DEV(dev, parent);
vif = netdev_priv(dev);
+
+ vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
+ MAX_GRANT_COPY_OPS);
+ if (vif->grant_copy_op == NULL) {
+ pr_warn("Could not allocate grant copy space for %s\n", name);
+ free_netdev(dev);
+ return ERR_PTR(-ENOMEM);
+ }
+
vif->domid = domid;
vif->handle = handle;
vif->can_sg = 1;
@@ -378,6 +387,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
if (err < 0)
goto err;
+ init_waitqueue_head(&vif->wq);
+
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
err = bind_interdomain_evtchn_to_irqhandler(
@@ -410,7 +421,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
disable_irq(vif->rx_irq);
}
- init_waitqueue_head(&vif->wq);
task = kthread_create(xenvif_kthread,
(void *)vif, "%s", vif->dev->name);
if (IS_ERR(task)) {
@@ -487,6 +497,7 @@ void xenvif_free(struct xenvif *vif)
unregister_netdev(vif->dev);
+ vfree(vif->grant_copy_op);
free_netdev(vif->dev);
module_put(THIS_MODULE);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index e884ee1fe7ed..e5284bca2d90 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -39,7 +39,6 @@
#include <linux/udp.h>
#include <net/tcp.h>
-#include <net/ip6_checksum.h>
#include <xen/xen.h>
#include <xen/events.h>
@@ -138,36 +137,26 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
vif->pending_prod + vif->pending_cons;
}
-static int max_required_rx_slots(struct xenvif *vif)
+bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
{
- int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
+ RING_IDX prod, cons;
- /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
- if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask)
- max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
+ do {
+ prod = vif->rx.sring->req_prod;
+ cons = vif->rx.req_cons;
- return max;
-}
+ if (prod - cons >= needed)
+ return true;
-int xenvif_rx_ring_full(struct xenvif *vif)
-{
- RING_IDX peek = vif->rx_req_cons_peek;
- RING_IDX needed = max_required_rx_slots(vif);
-
- return ((vif->rx.sring->req_prod - peek) < needed) ||
- ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
-}
+ vif->rx.sring->req_event = prod + 1;
-int xenvif_must_stop_queue(struct xenvif *vif)
-{
- if (!xenvif_rx_ring_full(vif))
- return 0;
-
- vif->rx.sring->req_event = vif->rx_req_cons_peek +
- max_required_rx_slots(vif);
- mb(); /* request notification /then/ check the queue */
+ /* Make sure event is visible before we check prod
+ * again.
+ */
+ mb();
+ } while (vif->rx.sring->req_prod != prod);
- return xenvif_rx_ring_full(vif);
+ return false;
}
/*
@@ -210,93 +199,6 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
return false;
}
-struct xenvif_count_slot_state {
- unsigned long copy_off;
- bool head;
-};
-
-unsigned int xenvif_count_frag_slots(struct xenvif *vif,
- unsigned long offset, unsigned long size,
- struct xenvif_count_slot_state *state)
-{
- unsigned count = 0;
-
- offset &= ~PAGE_MASK;
-
- while (size > 0) {
- unsigned long bytes;
-
- bytes = PAGE_SIZE - offset;
-
- if (bytes > size)
- bytes = size;
-
- if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
- count++;
- state->copy_off = 0;
- }
-
- if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
- bytes = MAX_BUFFER_OFFSET - state->copy_off;
-
- state->copy_off += bytes;
-
- offset += bytes;
- size -= bytes;
-
- if (offset == PAGE_SIZE)
- offset = 0;
-
- state->head = false;
- }
-
- return count;
-}
-
-/*
- * Figure out how many ring slots we're going to need to send @skb to
- * the guest. This function is essentially a dry run of
- * xenvif_gop_frag_copy.
- */
-unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
-{
- struct xenvif_count_slot_state state;
- unsigned int count;
- unsigned char *data;
- unsigned i;
-
- state.head = true;
- state.copy_off = 0;
-
- /* Slot for the first (partial) page of data. */
- count = 1;
-
- /* Need a slot for the GSO prefix for GSO extra data? */
- if (skb_shinfo(skb)->gso_size)
- count++;
-
- data = skb->data;
- while (data < skb_tail_pointer(skb)) {
- unsigned long offset = offset_in_page(data);
- unsigned long size = PAGE_SIZE - offset;
-
- if (data + size > skb_tail_pointer(skb))
- size = skb_tail_pointer(skb) - data;
-
- count += xenvif_count_frag_slots(vif, offset, size, &state);
-
- data += size;
- }
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
- unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
-
- count += xenvif_count_frag_slots(vif, offset, size, &state);
- }
- return count;
-}
-
struct netrx_pending_operations {
unsigned copy_prod, copy_cons;
unsigned meta_prod, meta_cons;
@@ -557,12 +459,12 @@ struct skb_cb_overlay {
int meta_slots_used;
};
-static void xenvif_kick_thread(struct xenvif *vif)
+void xenvif_kick_thread(struct xenvif *vif)
{
wake_up(&vif->wq);
}
-void xenvif_rx_action(struct xenvif *vif)
+static void xenvif_rx_action(struct xenvif *vif)
{
s8 status;
u16 flags;
@@ -571,11 +473,9 @@ void xenvif_rx_action(struct xenvif *vif)
struct sk_buff *skb;
LIST_HEAD(notify);
int ret;
- int nr_frags;
- int count;
unsigned long offset;
struct skb_cb_overlay *sco;
- int need_to_notify = 0;
+ bool need_to_notify = false;
struct netrx_pending_operations npo = {
.copy = vif->grant_copy_op,
@@ -584,38 +484,53 @@ void xenvif_rx_action(struct xenvif *vif)
skb_queue_head_init(&rxq);
- count = 0;
-
while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
- vif = netdev_priv(skb->dev);
- nr_frags = skb_shinfo(skb)->nr_frags;
+ RING_IDX max_slots_needed;
+ int i;
+
+ /* We need a cheap worse case estimate for the number of
+ * slots we'll use.
+ */
+
+ max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
+ skb_headlen(skb),
+ PAGE_SIZE);
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ unsigned int size;
+ size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
+ }
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
+ skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ max_slots_needed++;
+
+ /* If the skb may not fit then bail out now */
+ if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
+ skb_queue_head(&vif->rx_queue, skb);
+ need_to_notify = true;
+ vif->rx_last_skb_slots = max_slots_needed;
+ break;
+ } else
+ vif->rx_last_skb_slots = 0;
sco = (struct skb_cb_overlay *)skb->cb;
sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
-
- count += nr_frags + 1;
+ BUG_ON(sco->meta_slots_used > max_slots_needed);
__skb_queue_tail(&rxq, skb);
-
- /* Filled the batch queue? */
- /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
- if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
- break;
}
BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
if (!npo.copy_prod)
- return;
+ goto done;
- BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
+ BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
sco = (struct skb_cb_overlay *)skb->cb;
- vif = netdev_priv(skb->dev);
-
if ((1 << vif->meta[npo.meta_cons].gso_type) &
vif->gso_prefix_mask) {
resp = RING_GET_RESPONSE(&vif->rx,
@@ -678,28 +593,15 @@ void xenvif_rx_action(struct xenvif *vif)
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
- if (ret)
- need_to_notify = 1;
-
- xenvif_notify_tx_completion(vif);
+ need_to_notify |= !!ret;
npo.meta_cons += sco->meta_slots_used;
dev_kfree_skb(skb);
}
+done:
if (need_to_notify)
notify_remote_via_irq(vif->rx_irq);
-
- /* More work to do? */
- if (!skb_queue_empty(&vif->rx_queue))
- xenvif_kick_thread(vif);
-}
-
-void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
-{
- skb_queue_tail(&vif->rx_queue, skb);
-
- xenvif_kick_thread(vif);
}
void xenvif_check_rx_xenvif(struct xenvif *vif)
@@ -1141,254 +1043,14 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
}
skb_shinfo(skb)->gso_size = gso->u.gso.size;
-
- /* Header must be checked, and gso_segs computed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
-
- return 0;
-}
-
-static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len,
- unsigned int max)
-{
- if (skb_headlen(skb) >= len)
- return 0;
-
- /* If we need to pullup then pullup to the max, so we
- * won't need to do it again.
- */
- if (max > skb->len)
- max = skb->len;
-
- if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
- return -ENOMEM;
-
- if (skb_headlen(skb) < len)
- return -EPROTO;
+ /* gso_segs will be calculated later */
return 0;
}
-/* This value should be large enough to cover a tagged ethernet header plus
- * maximally sized IP and TCP or UDP headers.
- */
-#define MAX_IP_HDR_LEN 128
-
-static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
- int recalculate_partial_csum)
-{
- unsigned int off;
- bool fragment;
- int err;
-
- fragment = false;
-
- err = maybe_pull_tail(skb,
- sizeof(struct iphdr),
- MAX_IP_HDR_LEN);
- if (err < 0)
- goto out;
-
- if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
- fragment = true;
-
- off = ip_hdrlen(skb);
-
- err = -EPROTO;
-
- switch (ip_hdr(skb)->protocol) {
- case IPPROTO_TCP:
- err = maybe_pull_tail(skb,
- off + sizeof(struct tcphdr),
- MAX_IP_HDR_LEN);
- if (err < 0)
- goto out;
-
- if (!skb_partial_csum_set(skb, off,
- offsetof(struct tcphdr, check)))
- goto out;
-
- if (recalculate_partial_csum)
- tcp_hdr(skb)->check =
- ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
- ip_hdr(skb)->daddr,
- skb->len - off,
- IPPROTO_TCP, 0);
- break;
- case IPPROTO_UDP:
- err = maybe_pull_tail(skb,
- off + sizeof(struct udphdr),
- MAX_IP_HDR_LEN);
- if (err < 0)
- goto out;
-
- if (!skb_partial_csum_set(skb, off,
- offsetof(struct udphdr, check)))
- goto out;
-
- if (recalculate_partial_csum)
- udp_hdr(skb)->check =
- ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
- ip_hdr(skb)->daddr,
- skb->len - off,
- IPPROTO_UDP, 0);
- break;
- default:
- goto out;
- }
-
- err = 0;
-
-out:
- return err;
-}
-
-/* This value should be large enough to cover a tagged ethernet header plus
- * an IPv6 header, all options, and a maximal TCP or UDP header.
- */
-#define MAX_IPV6_HDR_LEN 256
-
-#define OPT_HDR(type, skb, off) \
- (type *)(skb_network_header(skb) + (off))
-
-static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
- int recalculate_partial_csum)
-{
- int err;
- u8 nexthdr;
- unsigned int off;
- unsigned int len;
- bool fragment;
- bool done;
-
- fragment = false;
- done = false;
-
- off = sizeof(struct ipv6hdr);
-
- err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
- if (err < 0)
- goto out;
-
- nexthdr = ipv6_hdr(skb)->nexthdr;
-
- len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
- while (off <= len && !done) {
- switch (nexthdr) {
- case IPPROTO_DSTOPTS:
- case IPPROTO_HOPOPTS:
- case IPPROTO_ROUTING: {
- struct ipv6_opt_hdr *hp;
-
- err = maybe_pull_tail(skb,
- off +
- sizeof(struct ipv6_opt_hdr),
- MAX_IPV6_HDR_LEN);
- if (err < 0)
- goto out;
-
- hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
- nexthdr = hp->nexthdr;
- off += ipv6_optlen(hp);
- break;
- }
- case IPPROTO_AH: {
- struct ip_auth_hdr *hp;
-
- err = maybe_pull_tail(skb,
- off +
- sizeof(struct ip_auth_hdr),
- MAX_IPV6_HDR_LEN);
- if (err < 0)
- goto out;
-
- hp = OPT_HDR(struct ip_auth_hdr, skb, off);
- nexthdr = hp->nexthdr;
- off += ipv6_authlen(hp);
- break;
- }
- case IPPROTO_FRAGMENT: {
- struct frag_hdr *hp;
-
- err = maybe_pull_tail(skb,
- off +
- sizeof(struct frag_hdr),
- MAX_IPV6_HDR_LEN);
- if (err < 0)
- goto out;
-
- hp = OPT_HDR(struct frag_hdr, skb, off);
-
- if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
- fragment = true;
-
- nexthdr = hp->nexthdr;
- off += sizeof(struct frag_hdr);
- break;
- }
- default:
- done = true;
- break;
- }
- }
-
- err = -EPROTO;
-
- if (!done || fragment)
- goto out;
-
- switch (nexthdr) {
- case IPPROTO_TCP:
- err = maybe_pull_tail(skb,
- off + sizeof(struct tcphdr),
- MAX_IPV6_HDR_LEN);
- if (err < 0)
- goto out;
-
- if (!skb_partial_csum_set(skb, off,
- offsetof(struct tcphdr, check)))
- goto out;
-
- if (recalculate_partial_csum)
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- skb->len - off,
- IPPROTO_TCP, 0);
- break;
- case IPPROTO_UDP:
- err = maybe_pull_tail(skb,
- off + sizeof(struct udphdr),
- MAX_IPV6_HDR_LEN);
- if (err < 0)
- goto out;
-
- if (!skb_partial_csum_set(skb, off,
- offsetof(struct udphdr, check)))
- goto out;
-
- if (recalculate_partial_csum)
- udp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- skb->len - off,
- IPPROTO_UDP, 0);
- break;
- default:
- goto out;
- }
-
- err = 0;
-
-out:
- return err;
-}
-
static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
{
- int err = -EPROTO;
- int recalculate_partial_csum = 0;
+ bool recalculate_partial_csum = false;
/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
* peers can fail to set NETRXF_csum_blank when sending a GSO
@@ -1398,19 +1060,14 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
vif->rx_gso_checksum_fixup++;
skb->ip_summed = CHECKSUM_PARTIAL;
- recalculate_partial_csum = 1;
+ recalculate_partial_csum = true;
}
/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
- if (skb->protocol == htons(ETH_P_IP))
- err = checksum_setup_ip(vif, skb, recalculate_partial_csum);
- else if (skb->protocol == htons(ETH_P_IPV6))
- err = checksum_setup_ipv6(vif, skb, recalculate_partial_csum);
-
- return err;
+ return skb_checksum_setup(skb, recalculate_partial_csum);
}
static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
@@ -1676,6 +1333,20 @@ static int xenvif_tx_submit(struct xenvif *vif)
skb_probe_transport_header(skb, 0);
+ /* If the packet is GSO then we will have just set up the
+ * transport header offset in checksum_setup so it's now
+ * straightforward to calculate gso_segs.
+ */
+ if (skb_is_gso(skb)) {
+ int mss = skb_shinfo(skb)->gso_size;
+ int hdrlen = skb_transport_header(skb) -
+ skb_mac_header(skb) +
+ tcp_hdrlen(skb);
+
+ skb_shinfo(skb)->gso_segs =
+ DIV_ROUND_UP(skb->len - hdrlen, mss);
+ }
+
vif->dev->stats.rx_bytes += skb->len;
vif->dev->stats.rx_packets++;
@@ -1800,7 +1471,8 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
static inline int rx_work_todo(struct xenvif *vif)
{
- return !skb_queue_empty(&vif->rx_queue);
+ return !skb_queue_empty(&vif->rx_queue) &&
+ xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots);
}
static inline int tx_work_todo(struct xenvif *vif)
@@ -1850,8 +1522,6 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
rxs = (struct xen_netif_rx_sring *)addr;
BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
- vif->rx_req_cons_peek = 0;
-
return 0;
err:
@@ -1859,9 +1529,24 @@ err:
return err;
}
+void xenvif_stop_queue(struct xenvif *vif)
+{
+ if (!vif->can_queue)
+ return;
+
+ netif_stop_queue(vif->dev);
+}
+
+static void xenvif_start_queue(struct xenvif *vif)
+{
+ if (xenvif_schedulable(vif))
+ netif_wake_queue(vif->dev);
+}
+
int xenvif_kthread(void *data)
{
struct xenvif *vif = data;
+ struct sk_buff *skb;
while (!kthread_should_stop()) {
wait_event_interruptible(vif->wq,
@@ -1870,12 +1555,20 @@ int xenvif_kthread(void *data)
if (kthread_should_stop())
break;
- if (rx_work_todo(vif))
+ if (!skb_queue_empty(&vif->rx_queue))
xenvif_rx_action(vif);
+ if (skb_queue_empty(&vif->rx_queue) &&
+ netif_queue_stopped(vif->dev))
+ xenvif_start_queue(vif);
+
cond_resched();
}
+ /* Bin any remaining skbs */
+ while ((skb = skb_dequeue(&vif->rx_queue)) != NULL)
+ dev_kfree_skb(skb);
+
return 0;
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index f0358992b04f..7a206cffb062 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -15,8 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "common.h"
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e59acb1daa23..f9daa9e183f2 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -117,6 +117,7 @@ struct netfront_info {
} tx_skbs[NET_TX_RING_SIZE];
grant_ref_t gref_tx_head;
grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
+ struct page *grant_tx_page[NET_TX_RING_SIZE];
unsigned tx_skb_freelist;
spinlock_t rx_lock ____cacheline_aligned_in_smp;
@@ -396,6 +397,7 @@ static void xennet_tx_buf_gc(struct net_device *dev)
gnttab_release_grant_reference(
&np->gref_tx_head, np->grant_tx_ref[id]);
np->grant_tx_ref[id] = GRANT_INVALID_REF;
+ np->grant_tx_page[id] = NULL;
add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
dev_kfree_skb_irq(skb);
}
@@ -452,6 +454,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
mfn, GNTMAP_readonly);
+ np->grant_tx_page[id] = virt_to_page(data);
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = len;
@@ -497,6 +500,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
np->xbdev->otherend_id,
mfn, GNTMAP_readonly);
+ np->grant_tx_page[id] = page;
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = bytes;
@@ -596,6 +600,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
mfn = virt_to_mfn(data);
gnttab_grant_foreign_access_ref(
ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
+ np->grant_tx_page[id] = virt_to_page(data);
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = len;
@@ -617,7 +622,9 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx->flags |= XEN_NETTXF_extra_info;
gso->u.gso.size = skb_shinfo(skb)->gso_size;
- gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
+ gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
+ XEN_NETIF_GSO_TYPE_TCPV6 :
+ XEN_NETIF_GSO_TYPE_TCPV4;
gso->u.gso.pad = 0;
gso->u.gso.features = 0;
@@ -809,15 +816,18 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
return -EINVAL;
}
- /* Currently only TCPv4 S.O. is supported. */
- if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
+ gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
if (net_ratelimit())
pr_warn("Bad GSO type %d\n", gso->u.gso.type);
return -EINVAL;
}
skb_shinfo(skb)->gso_size = gso->u.gso.size;
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ skb_shinfo(skb)->gso_type =
+ (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
+ SKB_GSO_TCPV4 :
+ SKB_GSO_TCPV6;
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
@@ -859,9 +869,7 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
{
- struct iphdr *iph;
- int err = -EPROTO;
- int recalculate_partial_csum = 0;
+ bool recalculate_partial_csum = false;
/*
* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
@@ -873,54 +881,14 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
struct netfront_info *np = netdev_priv(dev);
np->rx_gso_checksum_fixup++;
skb->ip_summed = CHECKSUM_PARTIAL;
- recalculate_partial_csum = 1;
+ recalculate_partial_csum = true;
}
/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
- if (skb->protocol != htons(ETH_P_IP))
- goto out;
-
- iph = (void *)skb->data;
-
- switch (iph->protocol) {
- case IPPROTO_TCP:
- if (!skb_partial_csum_set(skb, 4 * iph->ihl,
- offsetof(struct tcphdr, check)))
- goto out;
-
- if (recalculate_partial_csum) {
- struct tcphdr *tcph = tcp_hdr(skb);
- tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - iph->ihl*4,
- IPPROTO_TCP, 0);
- }
- break;
- case IPPROTO_UDP:
- if (!skb_partial_csum_set(skb, 4 * iph->ihl,
- offsetof(struct udphdr, check)))
- goto out;
-
- if (recalculate_partial_csum) {
- struct udphdr *udph = udp_hdr(skb);
- udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - iph->ihl*4,
- IPPROTO_UDP, 0);
- }
- break;
- default:
- if (net_ratelimit())
- pr_err("Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
- iph->protocol);
- goto out;
- }
-
- err = 0;
-
-out:
- return err;
+ return skb_checksum_setup(skb, recalculate_partial_csum);
}
static int handle_incoming_queue(struct net_device *dev,
@@ -1122,10 +1090,11 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
continue;
skb = np->tx_skbs[i].skb;
- gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
- GNTMAP_readonly);
- gnttab_release_grant_reference(&np->gref_tx_head,
- np->grant_tx_ref[i]);
+ get_page(np->grant_tx_page[i]);
+ gnttab_end_foreign_access(np->grant_tx_ref[i],
+ GNTMAP_readonly,
+ (unsigned long)page_address(np->grant_tx_page[i]));
+ np->grant_tx_page[i] = NULL;
np->grant_tx_ref[i] = GRANT_INVALID_REF;
add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
dev_kfree_skb_irq(skb);
@@ -1134,78 +1103,35 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
static void xennet_release_rx_bufs(struct netfront_info *np)
{
- struct mmu_update *mmu = np->rx_mmu;
- struct multicall_entry *mcl = np->rx_mcl;
- struct sk_buff_head free_list;
- struct sk_buff *skb;
- unsigned long mfn;
- int xfer = 0, noxfer = 0, unused = 0;
int id, ref;
- dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
- __func__);
- return;
-
- skb_queue_head_init(&free_list);
-
spin_lock_bh(&np->rx_lock);
for (id = 0; id < NET_RX_RING_SIZE; id++) {
- ref = np->grant_rx_ref[id];
- if (ref == GRANT_INVALID_REF) {
- unused++;
- continue;
- }
+ struct sk_buff *skb;
+ struct page *page;
skb = np->rx_skbs[id];
- mfn = gnttab_end_foreign_transfer_ref(ref);
- gnttab_release_grant_reference(&np->gref_rx_head, ref);
- np->grant_rx_ref[id] = GRANT_INVALID_REF;
-
- if (0 == mfn) {
- skb_shinfo(skb)->nr_frags = 0;
- dev_kfree_skb(skb);
- noxfer++;
+ if (!skb)
continue;
- }
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- /* Remap the page. */
- const struct page *page =
- skb_frag_page(&skb_shinfo(skb)->frags[0]);
- unsigned long pfn = page_to_pfn(page);
- void *vaddr = page_address(page);
-
- MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
- mfn_pte(mfn, PAGE_KERNEL),
- 0);
- mcl++;
- mmu->ptr = ((u64)mfn << PAGE_SHIFT)
- | MMU_MACHPHYS_UPDATE;
- mmu->val = pfn;
- mmu++;
+ ref = np->grant_rx_ref[id];
+ if (ref == GRANT_INVALID_REF)
+ continue;
- set_phys_to_machine(pfn, mfn);
- }
- __skb_queue_tail(&free_list, skb);
- xfer++;
- }
+ page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
- dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
- __func__, xfer, noxfer, unused);
+ /* gnttab_end_foreign_access() needs a page ref until
+ * foreign access is ended (which may be deferred).
+ */
+ get_page(page);
+ gnttab_end_foreign_access(ref, 0,
+ (unsigned long)page_address(page));
+ np->grant_rx_ref[id] = GRANT_INVALID_REF;
- if (xfer) {
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- /* Do all the remapping work and M2P updates. */
- MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
- NULL, DOMID_SELF);
- mcl++;
- HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
- }
+ kfree_skb(skb);
}
- __skb_queue_purge(&free_list);
-
spin_unlock_bh(&np->rx_lock);
}
@@ -1233,6 +1159,15 @@ static netdev_features_t xennet_fix_features(struct net_device *dev,
features &= ~NETIF_F_SG;
}
+ if (features & NETIF_F_IPV6_CSUM) {
+ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
+ "feature-ipv6-csum-offload", "%d", &val) < 0)
+ val = 0;
+
+ if (!val)
+ features &= ~NETIF_F_IPV6_CSUM;
+ }
+
if (features & NETIF_F_TSO) {
if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
"feature-gso-tcpv4", "%d", &val) < 0)
@@ -1242,6 +1177,15 @@ static netdev_features_t xennet_fix_features(struct net_device *dev,
features &= ~NETIF_F_TSO;
}
+ if (features & NETIF_F_TSO6) {
+ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
+ "feature-gso-tcpv6", "%d", &val) < 0)
+ val = 0;
+
+ if (!val)
+ features &= ~NETIF_F_TSO6;
+ }
+
return features;
}
@@ -1358,6 +1302,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
for (i = 0; i < NET_RX_RING_SIZE; i++) {
np->rx_skbs[i] = NULL;
np->grant_rx_ref[i] = GRANT_INVALID_REF;
+ np->grant_tx_page[i] = NULL;
}
/* A grant for every tx ring slot */
@@ -1380,7 +1325,9 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netif_napi_add(netdev, &np->napi, xennet_poll, 64);
netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_GSO_ROBUST;
- netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
+ netdev->hw_features = NETIF_F_SG |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
/*
* Assume that all hw features are available for now. This set
@@ -1758,6 +1705,19 @@ again:
goto abort_transaction;
}
+ err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
+ if (err) {
+ message = "writing feature-gso-tcpv6";
+ goto abort_transaction;
+ }
+
+ err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
+ "1");
+ if (err) {
+ message = "writing feature-ipv6-csum-offload";
+ goto abort_transaction;
+ }
+
err = xenbus_transaction_end(xbt, 0);
if (err) {
if (err == -EAGAIN)
@@ -1872,7 +1832,6 @@ static void netback_changed(struct xenbus_device *dev,
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
- case XenbusStateClosed:
break;
case XenbusStateInitWait:
@@ -1887,6 +1846,10 @@ static void netback_changed(struct xenbus_device *dev,
netdev_notify_peers(netdev);
break;
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
@@ -2115,7 +2078,7 @@ static int __init netif_init(void)
if (!xen_domain())
return -ENODEV;
- if (xen_hvm_domain() && !xen_platform_pci_unplug)
+ if (!xen_has_pv_nic_devices())
return -ENODEV;
pr_info("Initialising Xen virtual ethernet driver\n");
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index c1fb20603338..fe20e1cc0545 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -58,5 +58,6 @@ config NFC_PORT100
source "drivers/nfc/pn544/Kconfig"
source "drivers/nfc/microread/Kconfig"
+source "drivers/nfc/nfcmrvl/Kconfig"
endmenu
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index c715fe8582a8..56ab822ba03d 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -9,5 +9,6 @@ obj-$(CONFIG_NFC_WILINK) += nfcwilink.o
obj-$(CONFIG_NFC_MEI_PHY) += mei_phy.o
obj-$(CONFIG_NFC_SIM) += nfcsim.o
obj-$(CONFIG_NFC_PORT100) += port100.o
+obj-$(CONFIG_NFC_MRVL) += nfcmrvl/
ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 85f90090cc1d..11c7cbdade66 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -129,7 +127,7 @@ void nfc_mei_event_cb(struct mei_cl_device *device, u32 events, void *context)
reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ);
if (reply_size < MEI_NFC_HEADER_SIZE) {
- kfree(skb);
+ kfree_skb(skb);
return;
}
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index 696e3467eccc..df85cd3d9db0 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index 72fafec3d460..2d1395be64ae 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c
index 970ded6bfcf5..f868333271aa 100644
--- a/drivers/nfc/microread/microread.c
+++ b/drivers/nfc/microread/microread.c
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/nfc/microread/microread.h b/drivers/nfc/microread/microread.h
index 64b447a1c5bf..f538641431a2 100644
--- a/drivers/nfc/microread/microread.h
+++ b/drivers/nfc/microread/microread.h
@@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __LOCAL_MICROREAD_H_
diff --git a/drivers/nfc/nfcmrvl/Kconfig b/drivers/nfc/nfcmrvl/Kconfig
new file mode 100644
index 000000000000..5e18afd9abe2
--- /dev/null
+++ b/drivers/nfc/nfcmrvl/Kconfig
@@ -0,0 +1,23 @@
+config NFC_MRVL
+ tristate "Marvell NFC driver support"
+ depends on NFC_NCI
+ help
+ The core driver to support Marvell NFC devices.
+
+ This driver is required if you want to support
+ Marvell NFC device 8897.
+
+ Say Y here to compile Marvell NFC driver into the kernel or
+ say M to compile it as module.
+
+config NFC_MRVL_USB
+ tristate "Marvell NFC-over-USB driver"
+ depends on NFC_MRVL && USB
+ help
+ Marvell NFC-over-USB driver.
+
+ This driver provides support for Marvell NFC-over-USB devices:
+ 8897.
+
+ Say Y here to compile support for Marvell NFC-over-USB driver
+ into the kernel or say M to compile it as module.
diff --git a/drivers/nfc/nfcmrvl/Makefile b/drivers/nfc/nfcmrvl/Makefile
new file mode 100644
index 000000000000..97a0de72dc01
--- /dev/null
+++ b/drivers/nfc/nfcmrvl/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for NFCMRVL NCI based NFC driver
+#
+
+nfcmrvl-y += main.o
+obj-$(CONFIG_NFC_MRVL) += nfcmrvl.o
+
+nfcmrvl_usb-y += usb.o
+obj-$(CONFIG_NFC_MRVL_USB) += nfcmrvl_usb.o
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
new file mode 100644
index 000000000000..85e8bcf98693
--- /dev/null
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -0,0 +1,165 @@
+/*
+ * Marvell NFC driver: major functions
+ *
+ * Copyright (C) 2014, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include "nfcmrvl.h"
+
+#define VERSION "1.0"
+
+static int nfcmrvl_nci_open(struct nci_dev *ndev)
+{
+ struct nfcmrvl_private *priv = nci_get_drvdata(ndev);
+ int err;
+
+ if (test_and_set_bit(NFCMRVL_NCI_RUNNING, &priv->flags))
+ return 0;
+
+ err = priv->if_ops->nci_open(priv);
+
+ if (err)
+ clear_bit(NFCMRVL_NCI_RUNNING, &priv->flags);
+
+ return err;
+}
+
+static int nfcmrvl_nci_close(struct nci_dev *ndev)
+{
+ struct nfcmrvl_private *priv = nci_get_drvdata(ndev);
+
+ if (!test_and_clear_bit(NFCMRVL_NCI_RUNNING, &priv->flags))
+ return 0;
+
+ priv->if_ops->nci_close(priv);
+
+ return 0;
+}
+
+static int nfcmrvl_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ struct nfcmrvl_private *priv = nci_get_drvdata(ndev);
+
+ nfc_info(priv->dev, "send entry, len %d\n", skb->len);
+
+ skb->dev = (void *)ndev;
+
+ if (!test_bit(NFCMRVL_NCI_RUNNING, &priv->flags))
+ return -EBUSY;
+
+ return priv->if_ops->nci_send(priv, skb);
+}
+
+static int nfcmrvl_nci_setup(struct nci_dev *ndev)
+{
+ __u8 val;
+
+ val = NFCMRVL_GPIO_PIN_NFC_NOT_ALLOWED;
+ nci_set_config(ndev, NFCMRVL_NOT_ALLOWED_ID, 1, &val);
+ val = NFCMRVL_GPIO_PIN_NFC_ACTIVE;
+ nci_set_config(ndev, NFCMRVL_ACTIVE_ID, 1, &val);
+ val = NFCMRVL_EXT_COEX_ENABLE;
+ nci_set_config(ndev, NFCMRVL_EXT_COEX_ID, 1, &val);
+
+ return 0;
+}
+
+static struct nci_ops nfcmrvl_nci_ops = {
+ .open = nfcmrvl_nci_open,
+ .close = nfcmrvl_nci_close,
+ .send = nfcmrvl_nci_send,
+ .setup = nfcmrvl_nci_setup,
+};
+
+struct nfcmrvl_private *nfcmrvl_nci_register_dev(void *drv_data,
+ struct nfcmrvl_if_ops *ops,
+ struct device *dev)
+{
+ struct nfcmrvl_private *priv;
+ int rc;
+ u32 protocols;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->drv_data = drv_data;
+ priv->if_ops = ops;
+ priv->dev = dev;
+
+ protocols = NFC_PROTO_JEWEL_MASK
+ | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK
+ | NFC_PROTO_ISO14443_MASK
+ | NFC_PROTO_ISO14443_B_MASK
+ | NFC_PROTO_NFC_DEP_MASK;
+
+ priv->ndev = nci_allocate_device(&nfcmrvl_nci_ops, protocols, 0, 0);
+ if (!priv->ndev) {
+ nfc_err(dev, "nci_allocate_device failed");
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ nci_set_drvdata(priv->ndev, priv);
+
+ rc = nci_register_device(priv->ndev);
+ if (rc) {
+ nfc_err(dev, "nci_register_device failed %d", rc);
+ nci_free_device(priv->ndev);
+ goto error;
+ }
+
+ nfc_info(dev, "registered with nci successfully\n");
+ return priv;
+
+error:
+ kfree(priv);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(nfcmrvl_nci_register_dev);
+
+void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv)
+{
+ struct nci_dev *ndev = priv->ndev;
+
+ nci_unregister_device(ndev);
+ nci_free_device(ndev);
+ kfree(priv);
+}
+EXPORT_SYMBOL_GPL(nfcmrvl_nci_unregister_dev);
+
+int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, void *data, int count)
+{
+ struct sk_buff *skb;
+
+ skb = nci_skb_alloc(priv->ndev, count, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(skb_put(skb, count), data, count);
+ nci_recv_frame(priv->ndev, skb);
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(nfcmrvl_nci_recv_frame);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell NFC driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nfc/nfcmrvl/nfcmrvl.h b/drivers/nfc/nfcmrvl/nfcmrvl.h
new file mode 100644
index 000000000000..54c4a956bd45
--- /dev/null
+++ b/drivers/nfc/nfcmrvl/nfcmrvl.h
@@ -0,0 +1,48 @@
+/**
+ * Marvell NFC driver
+ *
+ * Copyright (C) 2014, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ **/
+
+/* Define private flags: */
+#define NFCMRVL_NCI_RUNNING 1
+
+#define NFCMRVL_EXT_COEX_ID 0xE0
+#define NFCMRVL_NOT_ALLOWED_ID 0xE1
+#define NFCMRVL_ACTIVE_ID 0xE2
+#define NFCMRVL_EXT_COEX_ENABLE 1
+#define NFCMRVL_GPIO_PIN_NFC_NOT_ALLOWED 0xA
+#define NFCMRVL_GPIO_PIN_NFC_ACTIVE 0xB
+#define NFCMRVL_NCI_MAX_EVENT_SIZE 260
+
+struct nfcmrvl_private {
+ struct nci_dev *ndev;
+ unsigned long flags;
+ void *drv_data;
+ struct device *dev;
+ struct nfcmrvl_if_ops *if_ops;
+};
+
+struct nfcmrvl_if_ops {
+ int (*nci_open) (struct nfcmrvl_private *priv);
+ int (*nci_close) (struct nfcmrvl_private *priv);
+ int (*nci_send) (struct nfcmrvl_private *priv, struct sk_buff *skb);
+};
+
+void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv);
+int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, void *data, int count);
+struct nfcmrvl_private *nfcmrvl_nci_register_dev(void *drv_data,
+ struct nfcmrvl_if_ops *ops,
+ struct device *dev);
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
new file mode 100644
index 000000000000..3221ca37d6c9
--- /dev/null
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -0,0 +1,459 @@
+/**
+ * Marvell NFC-over-USB driver: USB interface related functions
+ *
+ * Copyright (C) 2014, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ **/
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/nfc.h>
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include "nfcmrvl.h"
+
+#define VERSION "1.0"
+
+static struct usb_device_id nfcmrvl_table[] = {
+ { USB_DEVICE_INTERFACE_CLASS(0x1286, 0x2046, 0xff) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, nfcmrvl_table);
+
+#define NFCMRVL_USB_BULK_RUNNING 1
+#define NFCMRVL_USB_SUSPENDING 2
+
+struct nfcmrvl_usb_drv_data {
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ unsigned long flags;
+ struct work_struct waker;
+ struct usb_anchor tx_anchor;
+ struct usb_anchor bulk_anchor;
+ struct usb_anchor deferred;
+ int tx_in_flight;
+ /* protects tx_in_flight */
+ spinlock_t txlock;
+ struct usb_endpoint_descriptor *bulk_tx_ep;
+ struct usb_endpoint_descriptor *bulk_rx_ep;
+ int suspend_count;
+ struct nfcmrvl_private *priv;
+};
+
+static int nfcmrvl_inc_tx(struct nfcmrvl_usb_drv_data *drv_data)
+{
+ unsigned long flags;
+ int rv;
+
+ spin_lock_irqsave(&drv_data->txlock, flags);
+ rv = test_bit(NFCMRVL_USB_SUSPENDING, &drv_data->flags);
+ if (!rv)
+ drv_data->tx_in_flight++;
+ spin_unlock_irqrestore(&drv_data->txlock, flags);
+
+ return rv;
+}
+
+static void nfcmrvl_bulk_complete(struct urb *urb)
+{
+ struct nfcmrvl_usb_drv_data *drv_data = urb->context;
+ int err;
+
+ dev_dbg(&drv_data->udev->dev, "urb %p status %d count %d",
+ urb, urb->status, urb->actual_length);
+
+ if (!test_bit(NFCMRVL_NCI_RUNNING, &drv_data->flags))
+ return;
+
+ if (!urb->status) {
+ if (nfcmrvl_nci_recv_frame(drv_data->priv, urb->transfer_buffer,
+ urb->actual_length) < 0)
+ nfc_err(&drv_data->udev->dev, "corrupted Rx packet");
+ }
+
+ if (!test_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags))
+ return;
+
+ usb_anchor_urb(urb, &drv_data->bulk_anchor);
+ usb_mark_last_busy(drv_data->udev);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ /* -EPERM: urb is being killed;
+ * -ENODEV: device got disconnected
+ */
+ if (err != -EPERM && err != -ENODEV)
+ nfc_err(&drv_data->udev->dev,
+ "urb %p failed to resubmit (%d)", urb, -err);
+ usb_unanchor_urb(urb);
+ }
+}
+
+static int
+nfcmrvl_submit_bulk_urb(struct nfcmrvl_usb_drv_data *drv_data, gfp_t mem_flags)
+{
+ struct urb *urb;
+ unsigned char *buf;
+ unsigned int pipe;
+ int err, size = NFCMRVL_NCI_MAX_EVENT_SIZE;
+
+ if (!drv_data->bulk_rx_ep)
+ return -ENODEV;
+
+ urb = usb_alloc_urb(0, mem_flags);
+ if (!urb)
+ return -ENOMEM;
+
+ buf = kmalloc(size, mem_flags);
+ if (!buf) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ pipe = usb_rcvbulkpipe(drv_data->udev,
+ drv_data->bulk_rx_ep->bEndpointAddress);
+
+ usb_fill_bulk_urb(urb, drv_data->udev, pipe, buf, size,
+ nfcmrvl_bulk_complete, drv_data);
+
+ urb->transfer_flags |= URB_FREE_BUFFER;
+
+ usb_mark_last_busy(drv_data->udev);
+ usb_anchor_urb(urb, &drv_data->bulk_anchor);
+
+ err = usb_submit_urb(urb, mem_flags);
+ if (err) {
+ if (err != -EPERM && err != -ENODEV)
+ nfc_err(&drv_data->udev->dev,
+ "urb %p submission failed (%d)", urb, -err);
+ usb_unanchor_urb(urb);
+ }
+
+ usb_free_urb(urb);
+
+ return err;
+}
+
+static void nfcmrvl_tx_complete(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct nci_dev *ndev = (struct nci_dev *)skb->dev;
+ struct nfcmrvl_private *priv = nci_get_drvdata(ndev);
+ struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data;
+
+ nfc_info(priv->dev, "urb %p status %d count %d",
+ urb, urb->status, urb->actual_length);
+
+ spin_lock(&drv_data->txlock);
+ drv_data->tx_in_flight--;
+ spin_unlock(&drv_data->txlock);
+
+ kfree(urb->setup_packet);
+ kfree_skb(skb);
+}
+
+static int nfcmrvl_usb_nci_open(struct nfcmrvl_private *priv)
+{
+ struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data;
+ int err;
+
+ err = usb_autopm_get_interface(drv_data->intf);
+ if (err)
+ return err;
+
+ drv_data->intf->needs_remote_wakeup = 1;
+
+ err = nfcmrvl_submit_bulk_urb(drv_data, GFP_KERNEL);
+ if (err)
+ goto failed;
+
+ set_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags);
+ nfcmrvl_submit_bulk_urb(drv_data, GFP_KERNEL);
+
+ usb_autopm_put_interface(drv_data->intf);
+ return 0;
+
+failed:
+ usb_autopm_put_interface(drv_data->intf);
+ return err;
+}
+
+static void nfcmrvl_usb_stop_traffic(struct nfcmrvl_usb_drv_data *drv_data)
+{
+ usb_kill_anchored_urbs(&drv_data->bulk_anchor);
+}
+
+static int nfcmrvl_usb_nci_close(struct nfcmrvl_private *priv)
+{
+ struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data;
+ int err;
+
+ cancel_work_sync(&drv_data->waker);
+
+ clear_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags);
+
+ nfcmrvl_usb_stop_traffic(drv_data);
+ usb_kill_anchored_urbs(&drv_data->tx_anchor);
+ err = usb_autopm_get_interface(drv_data->intf);
+ if (err)
+ goto failed;
+
+ drv_data->intf->needs_remote_wakeup = 0;
+ usb_autopm_put_interface(drv_data->intf);
+
+failed:
+ usb_scuttle_anchored_urbs(&drv_data->deferred);
+ return 0;
+}
+
+static int nfcmrvl_usb_nci_send(struct nfcmrvl_private *priv,
+ struct sk_buff *skb)
+{
+ struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data;
+ struct urb *urb;
+ unsigned int pipe;
+ int err;
+
+ if (!drv_data->bulk_tx_ep)
+ return -ENODEV;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+
+ pipe = usb_sndbulkpipe(drv_data->udev,
+ drv_data->bulk_tx_ep->bEndpointAddress);
+
+ usb_fill_bulk_urb(urb, drv_data->udev, pipe, skb->data, skb->len,
+ nfcmrvl_tx_complete, skb);
+
+ err = nfcmrvl_inc_tx(drv_data);
+ if (err) {
+ usb_anchor_urb(urb, &drv_data->deferred);
+ schedule_work(&drv_data->waker);
+ err = 0;
+ goto done;
+ }
+
+ usb_anchor_urb(urb, &drv_data->tx_anchor);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ if (err != -EPERM && err != -ENODEV)
+ nfc_err(&drv_data->udev->dev,
+ "urb %p submission failed (%d)", urb, -err);
+ kfree(urb->setup_packet);
+ usb_unanchor_urb(urb);
+ } else {
+ usb_mark_last_busy(drv_data->udev);
+ }
+
+done:
+ usb_free_urb(urb);
+ return err;
+}
+
+static struct nfcmrvl_if_ops usb_ops = {
+ .nci_open = nfcmrvl_usb_nci_open,
+ .nci_close = nfcmrvl_usb_nci_close,
+ .nci_send = nfcmrvl_usb_nci_send,
+};
+
+static void nfcmrvl_waker(struct work_struct *work)
+{
+ struct nfcmrvl_usb_drv_data *drv_data =
+ container_of(work, struct nfcmrvl_usb_drv_data, waker);
+ int err;
+
+ err = usb_autopm_get_interface(drv_data->intf);
+ if (err)
+ return;
+
+ usb_autopm_put_interface(drv_data->intf);
+}
+
+static int nfcmrvl_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_endpoint_descriptor *ep_desc;
+ struct nfcmrvl_usb_drv_data *drv_data;
+ struct nfcmrvl_private *priv;
+ int i;
+ struct usb_device *udev = interface_to_usbdev(intf);
+
+ nfc_info(&udev->dev, "intf %p id %p", intf, id);
+
+ drv_data = devm_kzalloc(&intf->dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
+ for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
+ ep_desc = &intf->cur_altsetting->endpoint[i].desc;
+
+ if (!drv_data->bulk_tx_ep &&
+ usb_endpoint_is_bulk_out(ep_desc)) {
+ drv_data->bulk_tx_ep = ep_desc;
+ continue;
+ }
+
+ if (!drv_data->bulk_rx_ep &&
+ usb_endpoint_is_bulk_in(ep_desc)) {
+ drv_data->bulk_rx_ep = ep_desc;
+ continue;
+ }
+ }
+
+ if (!drv_data->bulk_tx_ep || !drv_data->bulk_rx_ep)
+ return -ENODEV;
+
+ drv_data->udev = udev;
+ drv_data->intf = intf;
+
+ INIT_WORK(&drv_data->waker, nfcmrvl_waker);
+ spin_lock_init(&drv_data->txlock);
+
+ init_usb_anchor(&drv_data->tx_anchor);
+ init_usb_anchor(&drv_data->bulk_anchor);
+ init_usb_anchor(&drv_data->deferred);
+
+ priv = nfcmrvl_nci_register_dev(drv_data, &usb_ops,
+ &drv_data->udev->dev);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ drv_data->priv = priv;
+ priv->dev = &drv_data->udev->dev;
+
+ usb_set_intfdata(intf, drv_data);
+
+ return 0;
+}
+
+static void nfcmrvl_disconnect(struct usb_interface *intf)
+{
+ struct nfcmrvl_usb_drv_data *drv_data = usb_get_intfdata(intf);
+
+ if (!drv_data)
+ return;
+
+ nfc_info(&drv_data->udev->dev, "intf %p", intf);
+
+ nfcmrvl_nci_unregister_dev(drv_data->priv);
+
+ usb_set_intfdata(drv_data->intf, NULL);
+}
+
+#ifdef CONFIG_PM
+static int nfcmrvl_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct nfcmrvl_usb_drv_data *drv_data = usb_get_intfdata(intf);
+
+ nfc_info(&drv_data->udev->dev, "intf %p", intf);
+
+ if (drv_data->suspend_count++)
+ return 0;
+
+ spin_lock_irq(&drv_data->txlock);
+ if (!(PMSG_IS_AUTO(message) && drv_data->tx_in_flight)) {
+ set_bit(NFCMRVL_USB_SUSPENDING, &drv_data->flags);
+ spin_unlock_irq(&drv_data->txlock);
+ } else {
+ spin_unlock_irq(&drv_data->txlock);
+ drv_data->suspend_count--;
+ return -EBUSY;
+ }
+
+ nfcmrvl_usb_stop_traffic(drv_data);
+ usb_kill_anchored_urbs(&drv_data->tx_anchor);
+
+ return 0;
+}
+
+static void nfcmrvl_play_deferred(struct nfcmrvl_usb_drv_data *drv_data)
+{
+ struct urb *urb;
+ int err;
+
+ while ((urb = usb_get_from_anchor(&drv_data->deferred))) {
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err)
+ break;
+
+ drv_data->tx_in_flight++;
+ }
+ usb_scuttle_anchored_urbs(&drv_data->deferred);
+}
+
+static int nfcmrvl_resume(struct usb_interface *intf)
+{
+ struct nfcmrvl_usb_drv_data *drv_data = usb_get_intfdata(intf);
+ int err = 0;
+
+ nfc_info(&drv_data->udev->dev, "intf %p", intf);
+
+ if (--drv_data->suspend_count)
+ return 0;
+
+ if (!test_bit(NFCMRVL_NCI_RUNNING, &drv_data->flags))
+ goto done;
+
+ if (test_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags)) {
+ err = nfcmrvl_submit_bulk_urb(drv_data, GFP_NOIO);
+ if (err) {
+ clear_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags);
+ goto failed;
+ }
+
+ nfcmrvl_submit_bulk_urb(drv_data, GFP_NOIO);
+ }
+
+ spin_lock_irq(&drv_data->txlock);
+ nfcmrvl_play_deferred(drv_data);
+ clear_bit(NFCMRVL_USB_SUSPENDING, &drv_data->flags);
+ spin_unlock_irq(&drv_data->txlock);
+
+ return 0;
+
+failed:
+ usb_scuttle_anchored_urbs(&drv_data->deferred);
+done:
+ spin_lock_irq(&drv_data->txlock);
+ clear_bit(NFCMRVL_USB_SUSPENDING, &drv_data->flags);
+ spin_unlock_irq(&drv_data->txlock);
+
+ return err;
+}
+#endif
+
+static struct usb_driver nfcmrvl_usb_driver = {
+ .name = "nfcmrvl",
+ .probe = nfcmrvl_probe,
+ .disconnect = nfcmrvl_disconnect,
+#ifdef CONFIG_PM
+ .suspend = nfcmrvl_suspend,
+ .resume = nfcmrvl_resume,
+ .reset_resume = nfcmrvl_resume,
+#endif
+ .id_table = nfcmrvl_table,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
+ .soft_unbind = 1,
+};
+module_usb_driver(nfcmrvl_usb_driver);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell NFC-over-USB driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index 71308645593f..683671a71c7e 100644
--- a/drivers/nfc/nfcwilink.c
+++ b/drivers/nfc/nfcwilink.c
@@ -22,8 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
#include <linux/platform_device.h>
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 2daf04c07338..cf1a87bb74f8 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/device.h>
@@ -523,6 +521,9 @@ static bool pn533_acr122_is_rx_frame_valid(void *_frame, struct pn533 *dev)
if (frame->ccid.type != 0x83)
return false;
+ if (!frame->ccid.datalen)
+ return false;
+
if (frame->data[frame->ccid.datalen - 2] == 0x63)
return false;
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index b158ee1c2ac6..d6185ff2f87b 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
index ee67de50c36f..330cd4031009 100644
--- a/drivers/nfc/pn544/mei.c
+++ b/drivers/nfc/pn544/mei.c
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index 74cfa0a88b9e..3df4a109cfad 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -197,42 +195,42 @@ static int pn544_hci_ready(struct nfc_hci_dev *hdev)
{{0x9e, 0xaa}, 0x01},
- {{0x9b, 0xd1}, 0x0d},
- {{0x9b, 0xd2}, 0x24},
- {{0x9b, 0xd3}, 0x0a},
- {{0x9b, 0xd4}, 0x22},
- {{0x9b, 0xd5}, 0x08},
- {{0x9b, 0xd6}, 0x1e},
- {{0x9b, 0xdd}, 0x1c},
+ {{0x9b, 0xd1}, 0x17},
+ {{0x9b, 0xd2}, 0x58},
+ {{0x9b, 0xd3}, 0x10},
+ {{0x9b, 0xd4}, 0x47},
+ {{0x9b, 0xd5}, 0x0c},
+ {{0x9b, 0xd6}, 0x37},
+ {{0x9b, 0xdd}, 0x33},
- {{0x9b, 0x84}, 0x13},
- {{0x99, 0x81}, 0x7f},
- {{0x99, 0x31}, 0x70},
+ {{0x9b, 0x84}, 0x00},
+ {{0x99, 0x81}, 0x79},
+ {{0x99, 0x31}, 0x79},
{{0x98, 0x00}, 0x3f},
- {{0x9f, 0x09}, 0x00},
+ {{0x9f, 0x09}, 0x02},
{{0x9f, 0x0a}, 0x05},
{{0x9e, 0xd1}, 0xa1},
- {{0x99, 0x23}, 0x00},
-
- {{0x9e, 0x74}, 0x80},
+ {{0x99, 0x23}, 0x01},
+ {{0x9e, 0x74}, 0x00},
+ {{0x9e, 0x90}, 0x00},
{{0x9f, 0x28}, 0x10},
- {{0x9f, 0x35}, 0x14},
+ {{0x9f, 0x35}, 0x04},
- {{0x9f, 0x36}, 0x60},
+ {{0x9f, 0x36}, 0x11},
{{0x9c, 0x31}, 0x00},
- {{0x9c, 0x32}, 0xc8},
+ {{0x9c, 0x32}, 0x00},
- {{0x9c, 0x19}, 0x40},
+ {{0x9c, 0x19}, 0x0a},
- {{0x9c, 0x1a}, 0x40},
+ {{0x9c, 0x1a}, 0x0a},
{{0x9c, 0x0c}, 0x00},
@@ -242,13 +240,13 @@ static int pn544_hci_ready(struct nfc_hci_dev *hdev)
{{0x9c, 0x13}, 0x00},
- {{0x98, 0xa2}, 0x0e},
+ {{0x98, 0xa2}, 0x09},
- {{0x98, 0x93}, 0x40},
+ {{0x98, 0x93}, 0x00},
- {{0x98, 0x7d}, 0x02},
+ {{0x98, 0x7d}, 0x08},
{{0x98, 0x7e}, 0x00},
- {{0x9f, 0xc8}, 0x01},
+ {{0x9f, 0xc8}, 0x00},
};
struct hw_config *p = hw_config;
int count = ARRAY_SIZE(hw_config);
diff --git a/drivers/nfc/pn544/pn544.h b/drivers/nfc/pn544/pn544.h
index 01020e585443..491bf45da358 100644
--- a/drivers/nfc/pn544/pn544.h
+++ b/drivers/nfc/pn544/pn544.h
@@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __LOCAL_PN544_H_
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 8a0571eb2627..a8555f81cbba 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -1509,6 +1509,7 @@ static void port100_disconnect(struct usb_interface *interface)
usb_free_urb(dev->in_urb);
usb_free_urb(dev->out_urb);
+ usb_put_dev(dev->udev);
kfree(dev->cmd);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 4b9317bdb81c..1a54f1ffaadb 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
(unsigned long long)cp, (unsigned long long)s,
(unsigned long long)da);
- /*
- * If the number of address cells is larger than 2 we assume the
- * mapping doesn't specify a physical address. Rather, the address
- * specifies an identifier that must match exactly.
- */
- if (na > 2 && memcmp(range, addr, na * 4) != 0)
- return OF_BAD_ADDR;
-
if (da < cp || da >= (cp + s))
return OF_BAD_ADDR;
return da - cp;
@@ -107,11 +99,12 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
static int of_bus_pci_match(struct device_node *np)
{
/*
+ * "pciex" is PCI Express
* "vci" is for the /chaos bridge on 1st-gen PCI powermacs
* "ht" is hypertransport
*/
- return !strcmp(np->type, "pci") || !strcmp(np->type, "vci") ||
- !strcmp(np->type, "ht");
+ return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") ||
+ !strcmp(np->type, "vci") || !strcmp(np->type, "ht");
}
static void of_bus_pci_count_cells(struct device_node *np,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index f807d0edabf3..10b51106c854 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -415,6 +415,9 @@ static int __of_device_is_available(const struct device_node *device)
const char *status;
int statlen;
+ if (!device)
+ return 0;
+
status = __of_get_property(device, "status", &statlen);
if (status == NULL)
return 1;
@@ -727,13 +730,49 @@ out:
}
EXPORT_SYMBOL(of_find_node_with_property);
+static const struct of_device_id *
+of_match_compatible(const struct of_device_id *matches,
+ const struct device_node *node)
+{
+ const char *cp;
+ int cplen, l;
+ const struct of_device_id *m;
+
+ cp = __of_get_property(node, "compatible", &cplen);
+ while (cp && (cplen > 0)) {
+ m = matches;
+ while (m->name[0] || m->type[0] || m->compatible[0]) {
+ /* Only match for the entries without type and name */
+ if (m->name[0] || m->type[0] ||
+ of_compat_cmp(m->compatible, cp,
+ strlen(m->compatible)))
+ m++;
+ else
+ return m;
+ }
+
+ /* Get node's next compatible string */
+ l = strlen(cp) + 1;
+ cp += l;
+ cplen -= l;
+ }
+
+ return NULL;
+}
+
static
const struct of_device_id *__of_match_node(const struct of_device_id *matches,
const struct device_node *node)
{
+ const struct of_device_id *m;
+
if (!matches)
return NULL;
+ m = of_match_compatible(matches, node);
+ if (m)
+ return m;
+
while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
int match = 1;
if (matches->name[0])
@@ -757,7 +796,12 @@ const struct of_device_id *__of_match_node(const struct of_device_id *matches,
* @matches: array of of device match structures to search in
* @node: the of device structure to match against
*
- * Low level utility function used by device matching.
+ * Low level utility function used by device matching. We have two ways
+ * of matching:
+ * - Try to find the best compatible match by comparing each compatible
+ * string of device node with all the given matches respectively.
+ * - If the above method failed, then try to match the compatible by using
+ * __of_device_is_compatible() besides the match in type and name.
*/
const struct of_device_id *of_match_node(const struct of_device_id *matches,
const struct device_node *node)
diff --git a/drivers/of/device.c b/drivers/of/device.c
index f685e55e0717..dafb9736ab9b 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -85,6 +85,9 @@ ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
int cplen, i;
ssize_t tsize, csize, repend;
+ if ((!dev) || (!dev->of_node))
+ return -ENODEV;
+
/* Name & Type */
csize = snprintf(str, len, "of:N%sT%s", dev->of_node->name,
dev->of_node->type);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 2fa024b97c43..758b4f8b30b7 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -922,8 +922,16 @@ void __init unflatten_device_tree(void)
*/
void __init unflatten_and_copy_device_tree(void)
{
- int size = __be32_to_cpu(initial_boot_params->totalsize);
- void *dt = early_init_dt_alloc_memory_arch(size,
+ int size;
+ void *dt;
+
+ if (!initial_boot_params) {
+ pr_warn("No valid device tree found, continuing without\n");
+ return;
+ }
+
+ size = __be32_to_cpu(initial_boot_params->totalsize);
+ dt = early_init_dt_alloc_memory_arch(size,
__alignof__(struct boot_param_header));
if (dt) {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 786b0b47fae4..9bcf2cf19357 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -165,7 +165,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
if (of_get_property(ipar, "interrupt-controller", NULL) !=
NULL) {
pr_debug(" -> got it !\n");
- of_node_put(old);
return 0;
}
@@ -217,6 +216,9 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
goto fail;
}
+ if (!of_device_is_available(newpar))
+ match = 0;
+
/* Get #interrupt-cells and #address-cells of new
* parent
*/
@@ -250,8 +252,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
* Successfully parsed an interrrupt-map translation; copy new
* interrupt specifier into the out_irq structure
*/
- of_node_put(out_irq->np);
- out_irq->np = of_node_get(newpar);
+ out_irq->np = newpar;
match_array = imap - newaddrsize - newintsize;
for (i = 0; i < newintsize; i++)
@@ -268,7 +269,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
}
fail:
of_node_put(ipar);
- of_node_put(out_irq->np);
of_node_put(newpar);
return -EINVAL;
@@ -438,7 +438,8 @@ void __init of_irq_init(const struct of_device_id *matches)
INIT_LIST_HEAD(&intc_parent_list);
for_each_matching_node(np, matches) {
- if (!of_find_property(np, "interrupt-controller", NULL))
+ if (!of_find_property(np, "interrupt-controller", NULL) ||
+ !of_device_is_available(np))
continue;
/*
* Here, we allocate and populate an intc_desc with the node
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index d5a57a9e329c..875b7b6f0d2a 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -22,6 +22,71 @@
MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
MODULE_LICENSE("GPL");
+static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed)
+{
+ phydev->supported |= PHY_DEFAULT_FEATURES;
+
+ switch (max_speed) {
+ default:
+ return;
+
+ case SPEED_1000:
+ phydev->supported |= PHY_1000BT_FEATURES;
+ case SPEED_100:
+ phydev->supported |= PHY_100BT_FEATURES;
+ case SPEED_10:
+ phydev->supported |= PHY_10BT_FEATURES;
+ }
+}
+
+static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *child,
+ u32 addr)
+{
+ struct phy_device *phy;
+ bool is_c45;
+ int rc, prev_irq;
+ u32 max_speed = 0;
+
+ is_c45 = of_device_is_compatible(child,
+ "ethernet-phy-ieee802.3-c45");
+
+ phy = get_phy_device(mdio, addr, is_c45);
+ if (!phy || IS_ERR(phy))
+ return 1;
+
+ if (mdio->irq) {
+ prev_irq = mdio->irq[addr];
+ mdio->irq[addr] =
+ irq_of_parse_and_map(child, 0);
+ if (!mdio->irq[addr])
+ mdio->irq[addr] = prev_irq;
+ }
+
+ /* Associate the OF node with the device structure so it
+ * can be looked up later */
+ of_node_get(child);
+ phy->dev.of_node = child;
+
+ /* All data is now stored in the phy struct;
+ * register it */
+ rc = phy_device_register(phy);
+ if (rc) {
+ phy_device_free(phy);
+ of_node_put(child);
+ return 1;
+ }
+
+ /* Set phydev->supported based on the "max-speed" property
+ * if present */
+ if (!of_property_read_u32(child, "max-speed", &max_speed))
+ of_set_phy_supported(phy, max_speed);
+
+ dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
+ child->name, addr);
+
+ return 0;
+}
+
/**
* of_mdiobus_register - Register mii_bus and create PHYs from the device tree
* @mdio: pointer to mii_bus structure
@@ -32,11 +97,10 @@ MODULE_LICENSE("GPL");
*/
int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
{
- struct phy_device *phy;
struct device_node *child;
const __be32 *paddr;
u32 addr;
- bool is_c45, scanphys = false;
+ bool scanphys = false;
int rc, i, len;
/* Mask out all PHYs from auto probing. Instead the PHYs listed in
@@ -67,44 +131,15 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
}
addr = be32_to_cpup(paddr);
- if (addr >= 32) {
+ if (addr >= PHY_MAX_ADDR) {
dev_err(&mdio->dev, "%s PHY address %i is too large\n",
child->full_name, addr);
continue;
}
- if (mdio->irq) {
- mdio->irq[addr] = irq_of_parse_and_map(child, 0);
- if (!mdio->irq[addr])
- mdio->irq[addr] = PHY_POLL;
- }
-
- is_c45 = of_device_is_compatible(child,
- "ethernet-phy-ieee802.3-c45");
- phy = get_phy_device(mdio, addr, is_c45);
-
- if (!phy || IS_ERR(phy)) {
- dev_err(&mdio->dev,
- "cannot get PHY at address %i\n",
- addr);
- continue;
- }
-
- /* Associate the OF node with the device structure so it
- * can be looked up later */
- of_node_get(child);
- phy->dev.of_node = child;
-
- /* All data is now stored in the phy struct; register it */
- rc = phy_device_register(phy);
- if (rc) {
- phy_device_free(phy);
- of_node_put(child);
+ rc = of_mdiobus_register_phy(mdio, child, addr);
+ if (rc)
continue;
- }
-
- dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
- child->name, addr);
}
if (!scanphys)
@@ -117,9 +152,6 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
if (paddr)
continue;
- is_c45 = of_device_is_compatible(child,
- "ethernet-phy-ieee802.3-c45");
-
for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
/* skip already registered PHYs */
if (mdio->phy_map[addr])
@@ -129,34 +161,9 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
dev_info(&mdio->dev, "scan phy %s at address %i\n",
child->name, addr);
- phy = get_phy_device(mdio, addr, is_c45);
- if (!phy || IS_ERR(phy))
+ rc = of_mdiobus_register_phy(mdio, child, addr);
+ if (rc)
continue;
-
- if (mdio->irq) {
- mdio->irq[addr] =
- irq_of_parse_and_map(child, 0);
- if (!mdio->irq[addr])
- mdio->irq[addr] = PHY_POLL;
- }
-
- /* Associate the OF node with the device structure so it
- * can be looked up later */
- of_node_get(child);
- phy->dev.of_node = child;
-
- /* All data is now stored in the phy struct;
- * register it */
- rc = phy_device_register(phy);
- if (rc) {
- phy_device_free(phy);
- of_node_put(child);
- continue;
- }
-
- dev_info(&mdio->dev, "registered phy %s at address %i\n",
- child->name, addr);
- break;
}
}
@@ -247,3 +254,23 @@ struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
return IS_ERR(phy) ? NULL : phy;
}
EXPORT_SYMBOL(of_phy_connect_fixed_link);
+
+/**
+ * of_phy_attach - Attach to a PHY without starting the state machine
+ * @dev: pointer to net_device claiming the phy
+ * @phy_np: Node pointer for the PHY
+ * @flags: flags to pass to the PHY
+ * @iface: PHY data interface type
+ */
+struct phy_device *of_phy_attach(struct net_device *dev,
+ struct device_node *phy_np, u32 flags,
+ phy_interface_t iface)
+{
+ struct phy_device *phy = of_phy_find_device(phy_np);
+
+ if (!phy)
+ return NULL;
+
+ return phy_attach_direct(dev, phy, flags, iface) ? NULL : phy;
+}
+EXPORT_SYMBOL(of_phy_attach);
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index 8f9be2e09937..a208a457558c 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -30,6 +30,7 @@ static const char *phy_modes[] = {
[PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
[PHY_INTERFACE_MODE_RTBI] = "rtbi",
[PHY_INTERFACE_MODE_SMII] = "smii",
+ [PHY_INTERFACE_MODE_XGMII] = "xgmii",
};
/**
diff --git a/drivers/parport/parport_mfc3.c b/drivers/parport/parport_mfc3.c
index 7578d79b3688..2f650f68af14 100644
--- a/drivers/parport/parport_mfc3.c
+++ b/drivers/parport/parport_mfc3.c
@@ -300,7 +300,7 @@ static int __init parport_mfc3_init(void)
if (!request_mem_region(piabase, sizeof(struct pia), "PIA"))
continue;
- pp = (struct pia *)ZTWO_VADDR(piabase);
+ pp = ZTWO_VADDR(piabase);
pp->crb = 0;
pp->pddrb = 255; /* all data pins output */
pp->crb = PIA_DDR|32|8;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 963761526229..76ee7750bc5e 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2600,8 +2600,6 @@ enum parport_pc_pci_cards {
syba_2p_epp,
syba_1p_ecp,
titan_010l,
- titan_1284p1,
- titan_1284p2,
avlab_1p,
avlab_2p,
oxsemi_952,
@@ -2660,8 +2658,6 @@ static struct parport_pc_pci {
/* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } },
/* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } },
/* titan_010l */ { 1, { { 3, -1 }, } },
- /* titan_1284p1 */ { 1, { { 0, 1 }, } },
- /* titan_1284p2 */ { 2, { { 0, 1 }, { 2, 3 }, } },
/* avlab_1p */ { 1, { { 0, 1}, } },
/* avlab_2p */ { 2, { { 0, 1}, { 2, 3 },} },
/* The Oxford Semi cards are unusual: 954 doesn't support ECP,
@@ -2677,8 +2673,8 @@ static struct parport_pc_pci {
/* netmos_9705 */ { 1, { { 0, -1 }, } },
/* netmos_9715 */ { 2, { { 0, 1 }, { 2, 3 },} },
/* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} },
- /* netmos_9805 */ { 1, { { 0, -1 }, } },
- /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
+ /* netmos_9805 */ { 1, { { 0, 1 }, } },
+ /* netmos_9815 */ { 2, { { 0, 1 }, { 2, 3 }, } },
/* netmos_9901 */ { 1, { { 0, -1 }, } },
/* netmos_9865 */ { 1, { { 0, -1 }, } },
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
@@ -2722,8 +2718,6 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_1p_ecp },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_010L,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_010l },
- { 0x9710, 0x9805, 0x1000, 0x0010, 0, 0, titan_1284p1 },
- { 0x9710, 0x9815, 0x1000, 0x0020, 0, 0, titan_1284p2 },
/* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
/* AFAVLAB_TK9902 */
{ 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p},
@@ -2827,16 +2821,12 @@ static int parport_pc_pci_probe(struct pci_dev *dev,
if (irq == IRQ_NONE) {
printk(KERN_DEBUG
"PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx)\n",
- parport_pc_pci_tbl[i + last_sio].vendor,
- parport_pc_pci_tbl[i + last_sio].device,
- io_lo, io_hi);
+ id->vendor, id->device, io_lo, io_hi);
irq = PARPORT_IRQ_NONE;
} else {
printk(KERN_DEBUG
"PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx), IRQ %d\n",
- parport_pc_pci_tbl[i + last_sio].vendor,
- parport_pc_pci_tbl[i + last_sio].device,
- io_lo, io_hi, irq);
+ id->vendor, id->device, io_lo, io_hi, irq);
}
data->ports[count] =
parport_pc_probe_port(io_lo, io_hi, irq,
@@ -2866,8 +2856,6 @@ static void parport_pc_pci_remove(struct pci_dev *dev)
struct pci_parport_data *data = pci_get_drvdata(dev);
int i;
- pci_set_drvdata(dev, NULL);
-
if (data) {
for (i = data->num - 1; i >= 0; i--)
parport_pc_unregister_port(data->ports[i]);
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index 1b8bdb7e9bf4..ff53314100f6 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -596,13 +596,11 @@ static int parport_serial_pci_probe(struct pci_dev *dev,
err = pci_enable_device (dev);
if (err) {
- pci_set_drvdata (dev, NULL);
kfree (priv);
return err;
}
if (parport_register (dev, id)) {
- pci_set_drvdata (dev, NULL);
kfree (priv);
return -ENODEV;
}
@@ -611,7 +609,6 @@ static int parport_serial_pci_probe(struct pci_dev *dev,
int i;
for (i = 0; i < priv->num_par; i++)
parport_pc_unregister_port (priv->port[i]);
- pci_set_drvdata (dev, NULL);
kfree (priv);
return -ENODEV;
}
@@ -624,8 +621,6 @@ static void parport_serial_pci_remove(struct pci_dev *dev)
struct parport_serial_private *priv = pci_get_drvdata (dev);
int i;
- pci_set_drvdata(dev, NULL);
-
// Serial ports
if (priv->serial)
pciserial_remove_ports(priv->serial);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index b6a99f7a9b20..893503fa1782 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -105,9 +105,10 @@ config PCI_PASID
If unsure, say N.
config PCI_IOAPIC
- tristate "PCI IO-APIC hotplug support" if X86
+ bool "PCI IO-APIC hotplug support" if X86
depends on PCI
depends on ACPI
+ depends on X86_IO_APIC
default !X86
config PCI_LABEL
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 6ebf5bf8e7a7..17d2b07ee67c 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -4,7 +4,7 @@
obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
- irq.o vpd.o setup-bus.o
+ irq.o vpd.o setup-bus.o vc.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 0857ca981fae..7f8b78c08879 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -381,30 +381,6 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
}
/**
- * pci_vpd_truncate - Set available Vital Product Data size
- * @dev: pci device struct
- * @size: available memory in bytes
- *
- * Adjust size of available VPD area.
- */
-int pci_vpd_truncate(struct pci_dev *dev, size_t size)
-{
- if (!dev->vpd)
- return -EINVAL;
-
- /* limited by the access method */
- if (size > dev->vpd->len)
- return -EINVAL;
-
- dev->vpd->len = size;
- if (dev->vpd->attr)
- dev->vpd->attr->size = size;
-
- return 0;
-}
-EXPORT_SYMBOL(pci_vpd_truncate);
-
-/**
* pci_cfg_access_lock - Lock PCI config reads/writes
* @dev: pci device struct
*
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index e52d7ffa38b9..a8099d4d0c9d 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -235,27 +235,6 @@ void pci_disable_pri(struct pci_dev *pdev)
EXPORT_SYMBOL_GPL(pci_disable_pri);
/**
- * pci_pri_enabled - Checks if PRI capability is enabled
- * @pdev: PCI device structure
- *
- * Returns true if PRI is enabled on the device, false otherwise
- */
-bool pci_pri_enabled(struct pci_dev *pdev)
-{
- u16 control;
- int pos;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
- return false;
-
- pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
-
- return (control & PCI_PRI_CTRL_ENABLE) ? true : false;
-}
-EXPORT_SYMBOL_GPL(pci_pri_enabled);
-
-/**
* pci_reset_pri - Resets device's PRI state
* @pdev: PCI device structure
*
@@ -282,67 +261,6 @@ int pci_reset_pri(struct pci_dev *pdev)
return 0;
}
EXPORT_SYMBOL_GPL(pci_reset_pri);
-
-/**
- * pci_pri_stopped - Checks whether the PRI capability is stopped
- * @pdev: PCI device structure
- *
- * Returns true if the PRI capability on the device is disabled and the
- * device has no outstanding PRI requests, false otherwise. The device
- * indicates this via the STOPPED bit in the status register of the
- * capability.
- * The device internal state can be cleared by resetting the PRI state
- * with pci_reset_pri(). This can force the capability into the STOPPED
- * state.
- */
-bool pci_pri_stopped(struct pci_dev *pdev)
-{
- u16 control, status;
- int pos;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
- return true;
-
- pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
-
- if (control & PCI_PRI_CTRL_ENABLE)
- return false;
-
- return (status & PCI_PRI_STATUS_STOPPED) ? true : false;
-}
-EXPORT_SYMBOL_GPL(pci_pri_stopped);
-
-/**
- * pci_pri_status - Request PRI status of a device
- * @pdev: PCI device structure
- *
- * Returns negative value on failure, status on success. The status can
- * be checked against status-bits. Supported bits are currently:
- * PCI_PRI_STATUS_RF: Response failure
- * PCI_PRI_STATUS_UPRGI: Unexpected Page Request Group Index
- * PCI_PRI_STATUS_STOPPED: PRI has stopped
- */
-int pci_pri_status(struct pci_dev *pdev)
-{
- u16 status, control;
- int pos;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
- return -EINVAL;
-
- pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
-
- /* Stopped bit is undefined when enable == 1, so clear it */
- if (control & PCI_PRI_CTRL_ENABLE)
- status &= ~PCI_PRI_STATUS_STOPPED;
-
- return status;
-}
-EXPORT_SYMBOL_GPL(pci_pri_status);
#endif /* CONFIG_PCI_PRI */
#ifdef CONFIG_PCI_PASID
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index fc1b74013743..00660cc502c5 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -98,41 +98,54 @@ void pci_bus_remove_resources(struct pci_bus *bus)
}
}
-/**
- * pci_bus_alloc_resource - allocate a resource from a parent bus
- * @bus: PCI bus
- * @res: resource to allocate
- * @size: size of resource to allocate
- * @align: alignment of resource to allocate
- * @min: minimum /proc/iomem address to allocate
- * @type_mask: IORESOURCE_* type flags
- * @alignf: resource alignment function
- * @alignf_data: data argument for resource alignment function
- *
- * Given the PCI bus a device resides on, the size, minimum address,
- * alignment and type, try to find an acceptable resource allocation
- * for a specific device resource.
+static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL};
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+static struct pci_bus_region pci_64_bit = {0,
+ (dma_addr_t) 0xffffffffffffffffULL};
+static struct pci_bus_region pci_high = {(dma_addr_t) 0x100000000ULL,
+ (dma_addr_t) 0xffffffffffffffffULL};
+#endif
+
+/*
+ * @res contains CPU addresses. Clip it so the corresponding bus addresses
+ * on @bus are entirely within @region. This is used to control the bus
+ * addresses of resources we allocate, e.g., we may need a resource that
+ * can be mapped by a 32-bit BAR.
*/
-int
-pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
+static void pci_clip_resource_to_region(struct pci_bus *bus,
+ struct resource *res,
+ struct pci_bus_region *region)
+{
+ struct pci_bus_region r;
+
+ pcibios_resource_to_bus(bus, &r, res);
+ if (r.start < region->start)
+ r.start = region->start;
+ if (r.end > region->end)
+ r.end = region->end;
+
+ if (r.end < r.start)
+ res->end = res->start - 1;
+ else
+ pcibios_bus_to_resource(bus, res, &r);
+}
+
+static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
resource_size_t size, resource_size_t align,
resource_size_t min, unsigned int type_mask,
resource_size_t (*alignf)(void *,
const struct resource *,
resource_size_t,
resource_size_t),
- void *alignf_data)
+ void *alignf_data,
+ struct pci_bus_region *region)
{
- int i, ret = -ENOMEM;
- struct resource *r;
- resource_size_t max = -1;
+ int i, ret;
+ struct resource *r, avail;
+ resource_size_t max;
type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
- /* don't allocate too high if the pref mem doesn't support 64bit*/
- if (!(res->flags & IORESOURCE_MEM_64))
- max = PCIBIOS_MAX_MEM_32;
-
pci_bus_for_each_resource(bus, r, i) {
if (!r)
continue;
@@ -147,15 +160,74 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
!(res->flags & IORESOURCE_PREFETCH))
continue;
+ avail = *r;
+ pci_clip_resource_to_region(bus, &avail, region);
+ if (!resource_size(&avail))
+ continue;
+
+ /*
+ * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
+ * protect badly documented motherboard resources, but if
+ * this is an already-configured bridge window, its start
+ * overrides "min".
+ */
+ if (avail.start)
+ min = avail.start;
+
+ max = avail.end;
+
/* Ok, try it out.. */
- ret = allocate_resource(r, res, size,
- r->start ? : min,
- max, align,
- alignf, alignf_data);
+ ret = allocate_resource(r, res, size, min, max,
+ align, alignf, alignf_data);
if (ret == 0)
- break;
+ return 0;
}
- return ret;
+ return -ENOMEM;
+}
+
+/**
+ * pci_bus_alloc_resource - allocate a resource from a parent bus
+ * @bus: PCI bus
+ * @res: resource to allocate
+ * @size: size of resource to allocate
+ * @align: alignment of resource to allocate
+ * @min: minimum /proc/iomem address to allocate
+ * @type_mask: IORESOURCE_* type flags
+ * @alignf: resource alignment function
+ * @alignf_data: data argument for resource alignment function
+ *
+ * Given the PCI bus a device resides on, the size, minimum address,
+ * alignment and type, try to find an acceptable resource allocation
+ * for a specific device resource.
+ */
+int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
+ resource_size_t size, resource_size_t align,
+ resource_size_t min, unsigned int type_mask,
+ resource_size_t (*alignf)(void *,
+ const struct resource *,
+ resource_size_t,
+ resource_size_t),
+ void *alignf_data)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ int rc;
+
+ if (res->flags & IORESOURCE_MEM_64) {
+ rc = pci_bus_alloc_from_region(bus, res, size, align, min,
+ type_mask, alignf, alignf_data,
+ &pci_high);
+ if (rc == 0)
+ return 0;
+
+ return pci_bus_alloc_from_region(bus, res, size, align, min,
+ type_mask, alignf, alignf_data,
+ &pci_64_bit);
+ }
+#endif
+
+ return pci_bus_alloc_from_region(bus, res, size, align, min,
+ type_mask, alignf, alignf_data,
+ &pci_32_bit);
}
void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
@@ -176,6 +248,7 @@ int pci_bus_add_device(struct pci_dev *dev)
*/
pci_fixup_device(pci_fixup_final, dev);
pci_create_sysfs_dev_files(dev);
+ pci_proc_attach_device(dev);
dev->match_driver = true;
retval = device_attach(&dev->dev);
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
index a68dc613a5be..06ace6248c61 100644
--- a/drivers/pci/host-bridge.c
+++ b/drivers/pci/host-bridge.c
@@ -9,22 +9,19 @@
#include "pci.h"
-static struct pci_bus *find_pci_root_bus(struct pci_dev *dev)
+static struct pci_bus *find_pci_root_bus(struct pci_bus *bus)
{
- struct pci_bus *bus;
-
- bus = dev->bus;
while (bus->parent)
bus = bus->parent;
return bus;
}
-static struct pci_host_bridge *find_pci_host_bridge(struct pci_dev *dev)
+static struct pci_host_bridge *find_pci_host_bridge(struct pci_bus *bus)
{
- struct pci_bus *bus = find_pci_root_bus(dev);
+ struct pci_bus *root_bus = find_pci_root_bus(bus);
- return to_pci_host_bridge(bus->bridge);
+ return to_pci_host_bridge(root_bus->bridge);
}
void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
@@ -40,10 +37,10 @@ static bool resource_contains(struct resource *res1, struct resource *res2)
return res1->start <= res2->start && res1->end >= res2->end;
}
-void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
struct resource *res)
{
- struct pci_host_bridge *bridge = find_pci_host_bridge(dev);
+ struct pci_host_bridge *bridge = find_pci_host_bridge(bus);
struct pci_host_bridge_window *window;
resource_size_t offset = 0;
@@ -68,10 +65,10 @@ static bool region_contains(struct pci_bus_region *region1,
return region1->start <= region2->start && region1->end >= region2->end;
}
-void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
struct pci_bus_region *region)
{
- struct pci_host_bridge *bridge = find_pci_host_bridge(dev);
+ struct pci_host_bridge *bridge = find_pci_host_bridge(bus);
struct pci_host_bridge_window *window;
resource_size_t offset = 0;
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index 24beed38ddc7..3de6bfbbe8e9 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -468,7 +468,7 @@ static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
int ret;
exynos_pcie_sideband_dbi_r_mode(pp, true);
- ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
+ ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
exynos_pcie_sideband_dbi_r_mode(pp, false);
return ret;
}
@@ -479,7 +479,8 @@ static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
int ret;
exynos_pcie_sideband_dbi_w_mode(pp, true);
- ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, val);
+ ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3),
+ where, size, val);
exynos_pcie_sideband_dbi_w_mode(pp, false);
return ret;
}
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index bd70af8f31ac..e8663a8c3406 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -44,10 +44,18 @@ struct imx6_pcie {
void __iomem *mem_base;
};
+/* PCIe Root Complex registers (memory-mapped) */
+#define PCIE_RC_LCR 0x7c
+#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
+#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
+#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
+
/* PCIe Port Logic registers (memory-mapped) */
#define PL_OFFSET 0x700
#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
+#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
+#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
#define PCIE_PHY_CTRL_DATA_LOC 0
@@ -59,6 +67,9 @@ struct imx6_pcie {
#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
#define PCIE_PHY_STAT_ACK_LOC 16
+#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
+#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
+
/* PHY registers (not memory-mapped) */
#define PCIE_PHY_RX_ASIC_OUT 0x100D
@@ -209,15 +220,9 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
- gpio_set_value(imx6_pcie->reset_gpio, 0);
- msleep(100);
- gpio_set_value(imx6_pcie->reset_gpio, 1);
-
return 0;
}
@@ -261,6 +266,12 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
/* allow the clocks to stabilize */
usleep_range(200, 500);
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ gpio_set_value(imx6_pcie->reset_gpio, 0);
+ msleep(100);
+ gpio_set_value(imx6_pcie->reset_gpio, 1);
+ }
return 0;
err_pcie_axi:
@@ -299,11 +310,90 @@ static void imx6_pcie_init_phy(struct pcie_port *pp)
IMX6Q_GPR8_TX_SWING_LOW, 127 << 25);
}
-static void imx6_pcie_host_init(struct pcie_port *pp)
+static int imx6_pcie_wait_for_link(struct pcie_port *pp)
+{
+ int count = 200;
+
+ while (!dw_pcie_link_up(pp)) {
+ usleep_range(100, 1000);
+ if (--count)
+ continue;
+
+ dev_err(pp->dev, "phy link never came up\n");
+ dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
+ readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
+ readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int imx6_pcie_start_link(struct pcie_port *pp)
{
- int count = 0;
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+ uint32_t tmp;
+ int ret, count;
+ /*
+ * Force Gen1 operation when starting the link. In case the link is
+ * started in Gen2 mode, there is a possibility the devices on the
+ * bus will not be detected at all. This happens with PCIe switches.
+ */
+ tmp = readl(pp->dbi_base + PCIE_RC_LCR);
+ tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
+ tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
+ writel(tmp, pp->dbi_base + PCIE_RC_LCR);
+
+ /* Start LTSSM. */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+
+ ret = imx6_pcie_wait_for_link(pp);
+ if (ret)
+ return ret;
+
+ /* Allow Gen2 mode after the link is up. */
+ tmp = readl(pp->dbi_base + PCIE_RC_LCR);
+ tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
+ tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
+ writel(tmp, pp->dbi_base + PCIE_RC_LCR);
+
+ /*
+ * Start Directed Speed Change so the best possible speed both link
+ * partners support can be negotiated.
+ */
+ tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+ tmp |= PORT_LOGIC_SPEED_CHANGE;
+ writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+
+ count = 200;
+ while (count--) {
+ tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+ /* Test if the speed change finished. */
+ if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
+ break;
+ usleep_range(100, 1000);
+ }
+
+ /* Make sure link training is finished as well! */
+ if (count)
+ ret = imx6_pcie_wait_for_link(pp);
+ else
+ ret = -EINVAL;
+
+ if (ret) {
+ dev_err(pp->dev, "Failed to bring link up!\n");
+ } else {
+ tmp = readl(pp->dbi_base + 0x80);
+ dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
+ }
+
+ return ret;
+}
+
+static void imx6_pcie_host_init(struct pcie_port *pp)
+{
imx6_pcie_assert_core_reset(pp);
imx6_pcie_init_phy(pp);
@@ -312,33 +402,41 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
dw_pcie_setup_rc(pp);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+ imx6_pcie_start_link(pp);
+}
- while (!dw_pcie_link_up(pp)) {
- usleep_range(100, 1000);
- count++;
- if (count >= 200) {
- dev_err(pp->dev, "phy link never came up\n");
- dev_dbg(pp->dev,
- "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
- readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
- readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
- break;
- }
- }
+static void imx6_pcie_reset_phy(struct pcie_port *pp)
+{
+ uint32_t temp;
+
+ pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
+ temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+ PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+ pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
+
+ usleep_range(2000, 3000);
- return;
+ pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
+ temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+ PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+ pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
}
static int imx6_pcie_link_up(struct pcie_port *pp)
{
- u32 rc, ltssm, rx_valid, temp;
+ u32 rc, ltssm, rx_valid;
- /* link is debug bit 36, debug register 1 starts at bit 32 */
- rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32));
- if (rc)
- return -EAGAIN;
+ /*
+ * Test if the PHY reports that the link is up and also that
+ * the link training finished. It might happen that the PHY
+ * reports the link is already up, but the link training bit
+ * is still set, so make sure to check the training is done
+ * as well here.
+ */
+ rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
+ if ((rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP) &&
+ !(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
+ return 1;
/*
* From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
@@ -358,21 +456,7 @@ static int imx6_pcie_link_up(struct pcie_port *pp)
dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
- pcie_phy_read(pp->dbi_base,
- PHY_RX_OVRD_IN_LO, &temp);
- temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN
- | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(pp->dbi_base,
- PHY_RX_OVRD_IN_LO, temp);
-
- usleep_range(2000, 3000);
-
- pcie_phy_read(pp->dbi_base,
- PHY_RX_OVRD_IN_LO, &temp);
- temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN
- | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(pp->dbi_base,
- PHY_RX_OVRD_IN_LO, temp);
+ imx6_pcie_reset_phy(pp);
return 0;
}
@@ -426,30 +510,19 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
"imprecise external abort");
dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!dbi_base) {
- dev_err(&pdev->dev, "dbi_base memory resource not found\n");
- return -ENODEV;
- }
-
pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
- if (IS_ERR(pp->dbi_base)) {
- ret = PTR_ERR(pp->dbi_base);
- goto err;
- }
+ if (IS_ERR(pp->dbi_base))
+ return PTR_ERR(pp->dbi_base);
/* Fetch GPIOs */
imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
- if (!gpio_is_valid(imx6_pcie->reset_gpio)) {
- dev_err(&pdev->dev, "no reset-gpio defined\n");
- ret = -ENODEV;
- }
- ret = devm_gpio_request_one(&pdev->dev,
- imx6_pcie->reset_gpio,
- GPIOF_OUT_INIT_LOW,
- "PCIe reset");
- if (ret) {
- dev_err(&pdev->dev, "unable to get reset gpio\n");
- goto err;
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
+ GPIOF_OUT_INIT_LOW, "PCIe reset");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get reset gpio\n");
+ return ret;
+ }
}
imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0);
@@ -460,7 +533,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
"PCIe power enable");
if (ret) {
dev_err(&pdev->dev, "unable to get power-on gpio\n");
- goto err;
+ return ret;
}
}
@@ -472,7 +545,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
"PCIe wake up");
if (ret) {
dev_err(&pdev->dev, "unable to get wake-up gpio\n");
- goto err;
+ return ret;
}
}
@@ -484,7 +557,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
"PCIe disable endpoint");
if (ret) {
dev_err(&pdev->dev, "unable to get disable-ep gpio\n");
- goto err;
+ return ret;
}
}
@@ -493,32 +566,28 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
if (IS_ERR(imx6_pcie->lvds_gate)) {
dev_err(&pdev->dev,
"lvds_gate clock select missing or invalid\n");
- ret = PTR_ERR(imx6_pcie->lvds_gate);
- goto err;
+ return PTR_ERR(imx6_pcie->lvds_gate);
}
imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m");
if (IS_ERR(imx6_pcie->sata_ref_100m)) {
dev_err(&pdev->dev,
"sata_ref_100m clock source missing or invalid\n");
- ret = PTR_ERR(imx6_pcie->sata_ref_100m);
- goto err;
+ return PTR_ERR(imx6_pcie->sata_ref_100m);
}
imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m");
if (IS_ERR(imx6_pcie->pcie_ref_125m)) {
dev_err(&pdev->dev,
"pcie_ref_125m clock source missing or invalid\n");
- ret = PTR_ERR(imx6_pcie->pcie_ref_125m);
- goto err;
+ return PTR_ERR(imx6_pcie->pcie_ref_125m);
}
imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi");
if (IS_ERR(imx6_pcie->pcie_axi)) {
dev_err(&pdev->dev,
"pcie_axi clock source missing or invalid\n");
- ret = PTR_ERR(imx6_pcie->pcie_axi);
- goto err;
+ return PTR_ERR(imx6_pcie->pcie_axi);
}
/* Grab GPR config register range */
@@ -526,19 +595,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
dev_err(&pdev->dev, "unable to find iomuxc registers\n");
- ret = PTR_ERR(imx6_pcie->iomuxc_gpr);
- goto err;
+ return PTR_ERR(imx6_pcie->iomuxc_gpr);
}
ret = imx6_add_pcie_port(pp, pdev);
if (ret < 0)
- goto err;
+ return ret;
platform_set_drvdata(pdev, imx6_pcie);
return 0;
-
-err:
- return ret;
}
static const struct of_device_id imx6_pcie_of_match[] = {
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 2aa7b77c7c88..13478ecd4113 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -150,6 +150,11 @@ static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
return readl(port->base + reg);
}
+static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
+{
+ return port->io_target != -1 && port->io_attr != -1;
+}
+
static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
{
return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
@@ -300,7 +305,8 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
/* Are the new iobase/iolimit values invalid? */
if (port->bridge.iolimit < port->bridge.iobase ||
- port->bridge.iolimitupper < port->bridge.iobaseupper) {
+ port->bridge.iolimitupper < port->bridge.iobaseupper ||
+ !(port->bridge.command & PCI_COMMAND_IO)) {
/* If a window was configured, remove it */
if (port->iowin_base) {
@@ -313,6 +319,12 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
return;
}
+ if (!mvebu_has_ioport(port)) {
+ dev_WARN(&port->pcie->pdev->dev,
+ "Attempt to set IO when IO is disabled\n");
+ return;
+ }
+
/*
* We read the PCI-to-PCI bridge emulated registers, and
* calculate the base address and size of the address decoding
@@ -330,14 +342,13 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr,
port->iowin_base, port->iowin_size,
iobase);
-
- pci_ioremap_io(iobase, port->iowin_base);
}
static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
{
/* Are the new membase/memlimit values invalid? */
- if (port->bridge.memlimit < port->bridge.membase) {
+ if (port->bridge.memlimit < port->bridge.membase ||
+ !(port->bridge.command & PCI_COMMAND_MEMORY)) {
/* If a window was configured, remove it */
if (port->memwin_base) {
@@ -426,9 +437,12 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
break;
case PCI_IO_BASE:
- *value = (bridge->secondary_status << 16 |
- bridge->iolimit << 8 |
- bridge->iobase);
+ if (!mvebu_has_ioport(port))
+ *value = bridge->secondary_status << 16;
+ else
+ *value = (bridge->secondary_status << 16 |
+ bridge->iolimit << 8 |
+ bridge->iobase);
break;
case PCI_MEMORY_BASE:
@@ -490,8 +504,19 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
switch (where & ~3) {
case PCI_COMMAND:
+ {
+ u32 old = bridge->command;
+
+ if (!mvebu_has_ioport(port))
+ value &= ~PCI_COMMAND_IO;
+
bridge->command = value & 0xffff;
+ if ((old ^ bridge->command) & PCI_COMMAND_IO)
+ mvebu_pcie_handle_iobase_change(port);
+ if ((old ^ bridge->command) & PCI_COMMAND_MEMORY)
+ mvebu_pcie_handle_membase_change(port);
break;
+ }
case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value;
@@ -505,7 +530,6 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
*/
bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32;
bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32;
- bridge->secondary_status = value >> 16;
mvebu_pcie_handle_iobase_change(port);
break;
@@ -656,7 +680,9 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
struct mvebu_pcie *pcie = sys_to_pcie(sys);
int i;
- pci_add_resource_offset(&sys->resources, &pcie->realio, sys->io_offset);
+ if (resource_size(&pcie->realio) != 0)
+ pci_add_resource_offset(&sys->resources, &pcie->realio,
+ sys->io_offset);
pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
pci_add_resource(&sys->resources, &pcie->busn);
@@ -707,9 +733,9 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
* aligned on their size
*/
if (res->flags & IORESOURCE_IO)
- return round_up(start, max((resource_size_t)SZ_64K, size));
+ return round_up(start, max_t(resource_size_t, SZ_64K, size));
else if (res->flags & IORESOURCE_MEM)
- return round_up(start, max((resource_size_t)SZ_1M, size));
+ return round_up(start, max_t(resource_size_t, SZ_1M, size));
else
return start;
}
@@ -757,12 +783,17 @@ static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
#define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
- unsigned long type, int *tgt, int *attr)
+ unsigned long type,
+ unsigned int *tgt,
+ unsigned int *attr)
{
const int na = 3, ns = 2;
const __be32 *range;
int rlen, nranges, rangesz, pna, i;
+ *tgt = -1;
+ *attr = -1;
+
range = of_get_property(np, "ranges", &rlen);
if (!range)
return -EINVAL;
@@ -832,16 +863,15 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
}
mvebu_mbus_get_pcie_io_aperture(&pcie->io);
- if (resource_size(&pcie->io) == 0) {
- dev_err(&pdev->dev, "invalid I/O aperture size\n");
- return -EINVAL;
- }
- pcie->realio.flags = pcie->io.flags;
- pcie->realio.start = PCIBIOS_MIN_IO;
- pcie->realio.end = min_t(resource_size_t,
- IO_SPACE_LIMIT,
- resource_size(&pcie->io));
+ if (resource_size(&pcie->io) != 0) {
+ pcie->realio.flags = pcie->io.flags;
+ pcie->realio.start = PCIBIOS_MIN_IO;
+ pcie->realio.end = min_t(resource_size_t,
+ IO_SPACE_LIMIT,
+ resource_size(&pcie->io));
+ } else
+ pcie->realio = pcie->io;
/* Get the bus range */
ret = of_pci_parse_bus_range(np, &pcie->busn);
@@ -900,12 +930,12 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
continue;
}
- ret = mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_IO,
- &port->io_target, &port->io_attr);
- if (ret < 0) {
- dev_err(&pdev->dev, "PCIe%d.%d: cannot get tgt/attr for io window\n",
- port->port, port->lane);
- continue;
+ if (resource_size(&pcie->io) != 0)
+ mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_IO,
+ &port->io_target, &port->io_attr);
+ else {
+ port->io_target = -1;
+ port->io_attr = -1;
}
port->reset_gpio = of_get_named_gpio_flags(child,
@@ -954,14 +984,6 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
mvebu_pcie_set_local_dev_nr(port, 1);
- port->clk = of_clk_get_by_name(child, NULL);
- if (IS_ERR(port->clk)) {
- dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n",
- port->port, port->lane);
- iounmap(port->base);
- continue;
- }
-
port->dn = child;
spin_lock_init(&port->conf_lock);
mvebu_sw_pci_bridge_init(port);
@@ -969,6 +991,10 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
}
pcie->nports = i;
+
+ for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K)
+ pci_ioremap_io(i, pcie->io.start + i);
+
mvebu_pcie_msi_enable(pcie);
mvebu_pcie_enable(pcie);
@@ -988,8 +1014,7 @@ static struct platform_driver mvebu_pcie_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "mvebu-pcie",
- .of_match_table =
- of_match_ptr(mvebu_pcie_of_match_table),
+ .of_match_table = mvebu_pcie_of_match_table,
/* driver unloading/unbinding currently not supported */
.suppress_bind_attrs = true,
},
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index cbaa5c4397e3..ceec147baec3 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
/* AHB-PCI Bridge PCI communication registers */
@@ -77,6 +78,7 @@
#define RCAR_PCI_NR_CONTROLLERS 3
struct rcar_pci_priv {
+ struct device *dev;
void __iomem *reg;
struct resource io_res;
struct resource mem_res;
@@ -169,8 +171,11 @@ static int __init rcar_pci_setup(int nr, struct pci_sys_data *sys)
void __iomem *reg = priv->reg;
u32 val;
+ pm_runtime_enable(priv->dev);
+ pm_runtime_get_sync(priv->dev);
+
val = ioread32(reg + RCAR_PCI_UNIT_REV_REG);
- pr_info("PCI: bus%u revision %x\n", sys->busnr, val);
+ dev_info(priv->dev, "PCI: bus%u revision %x\n", sys->busnr, val);
/* Disable Direct Power Down State and assert reset */
val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD;
@@ -276,8 +281,8 @@ static int __init rcar_pci_probe(struct platform_device *pdev)
cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(&pdev->dev, cfg_res);
- if (!reg)
- return -ENODEV;
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!mem_res || !mem_res->start)
@@ -301,6 +306,7 @@ static int __init rcar_pci_probe(struct platform_device *pdev)
priv->irq = platform_get_irq(pdev, 0);
priv->reg = reg;
+ priv->dev = &pdev->dev;
return rcar_pci_add_controller(priv);
}
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 0afbbbc55c81..330f7e3a32dd 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -25,7 +25,6 @@
*/
#include <linux/clk.h>
-#include <linux/clk/tegra.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/interrupt.h>
@@ -39,6 +38,7 @@
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/tegra-cpuidle.h>
@@ -259,10 +259,13 @@ struct tegra_pcie {
struct clk *pex_clk;
struct clk *afi_clk;
- struct clk *pcie_xclk;
struct clk *pll_e;
struct clk *cml_clk;
+ struct reset_control *pex_rst;
+ struct reset_control *afi_rst;
+ struct reset_control *pcie_xrst;
+
struct tegra_msi msi;
struct list_head ports;
@@ -805,7 +808,7 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
afi_writel(pcie, value, AFI_PCIE_CONFIG);
value = afi_readl(pcie, AFI_FUSE);
- value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
+ value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
afi_writel(pcie, value, AFI_FUSE);
/* initialize internal PHY, enable up to 16 PCIE lanes */
@@ -858,7 +861,7 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
pads_writel(pcie, value, PADS_CTL);
/* take the PCIe interface module out of reset */
- tegra_periph_reset_deassert(pcie->pcie_xclk);
+ reset_control_deassert(pcie->pcie_xrst);
/* finally enable PCIe */
value = afi_readl(pcie, AFI_CONFIGURATION);
@@ -891,9 +894,9 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
/* TODO: disable and unprepare clocks? */
- tegra_periph_reset_assert(pcie->pcie_xclk);
- tegra_periph_reset_assert(pcie->afi_clk);
- tegra_periph_reset_assert(pcie->pex_clk);
+ reset_control_assert(pcie->pcie_xrst);
+ reset_control_assert(pcie->afi_rst);
+ reset_control_assert(pcie->pex_rst);
tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
@@ -921,9 +924,9 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
const struct tegra_pcie_soc_data *soc = pcie->soc_data;
int err;
- tegra_periph_reset_assert(pcie->pcie_xclk);
- tegra_periph_reset_assert(pcie->afi_clk);
- tegra_periph_reset_assert(pcie->pex_clk);
+ reset_control_assert(pcie->pcie_xrst);
+ reset_control_assert(pcie->afi_rst);
+ reset_control_assert(pcie->pex_rst);
tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
@@ -952,13 +955,14 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
}
err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
- pcie->pex_clk);
+ pcie->pex_clk,
+ pcie->pex_rst);
if (err) {
dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
return err;
}
- tegra_periph_reset_deassert(pcie->afi_clk);
+ reset_control_deassert(pcie->afi_rst);
err = clk_prepare_enable(pcie->afi_clk);
if (err < 0) {
@@ -996,10 +1000,6 @@ static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
if (IS_ERR(pcie->afi_clk))
return PTR_ERR(pcie->afi_clk);
- pcie->pcie_xclk = devm_clk_get(pcie->dev, "pcie_xclk");
- if (IS_ERR(pcie->pcie_xclk))
- return PTR_ERR(pcie->pcie_xclk);
-
pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
if (IS_ERR(pcie->pll_e))
return PTR_ERR(pcie->pll_e);
@@ -1013,6 +1013,23 @@ static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
return 0;
}
+static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
+{
+ pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
+ if (IS_ERR(pcie->pex_rst))
+ return PTR_ERR(pcie->pex_rst);
+
+ pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
+ if (IS_ERR(pcie->afi_rst))
+ return PTR_ERR(pcie->afi_rst);
+
+ pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
+ if (IS_ERR(pcie->pcie_xrst))
+ return PTR_ERR(pcie->pcie_xrst);
+
+ return 0;
+}
+
static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
@@ -1025,6 +1042,12 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
return err;
}
+ err = tegra_pcie_resets_get(pcie);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get resets: %d\n", err);
+ return err;
+ }
+
err = tegra_pcie_power_on(pcie);
if (err) {
dev_err(&pdev->dev, "failed to power up: %d\n", err);
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index e33b68be0391..17ce88f79d2b 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -74,7 +74,7 @@ static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
return sys->private_data;
}
-int cfg_read(void __iomem *addr, int where, int size, u32 *val)
+int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val)
{
*val = readl(addr);
@@ -88,7 +88,7 @@ int cfg_read(void __iomem *addr, int where, int size, u32 *val)
return PCIBIOS_SUCCESSFUL;
}
-int cfg_write(void __iomem *addr, int where, int size, u32 val)
+int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val)
{
if (size == 4)
writel(val, addr);
@@ -126,7 +126,8 @@ static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
if (pp->ops->rd_own_conf)
ret = pp->ops->rd_own_conf(pp, where, size, val);
else
- ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
+ ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where,
+ size, val);
return ret;
}
@@ -139,8 +140,8 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
if (pp->ops->wr_own_conf)
ret = pp->ops->wr_own_conf(pp, where, size, val);
else
- ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size,
- val);
+ ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3), where,
+ size, val);
return ret;
}
@@ -167,11 +168,13 @@ void dw_handle_msi_irq(struct pcie_port *pp)
while ((pos = find_next_bit(&val, 32, pos)) != 32) {
irq = irq_find_mapping(pp->irq_domain,
i * 32 + pos);
+ dw_pcie_wr_own_conf(pp,
+ PCIE_MSI_INTR0_STATUS + i * 12,
+ 4, 1 << pos);
generic_handle_irq(irq);
pos++;
}
}
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, val);
}
}
@@ -209,6 +212,23 @@ static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
return 0;
}
+static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
+ unsigned int nvec, unsigned int pos)
+{
+ unsigned int i, res, bit, val;
+
+ for (i = 0; i < nvec; i++) {
+ irq_set_msi_desc_off(irq_base, i, NULL);
+ clear_bit(pos + i, pp->msi_irq_in_use);
+ /* Disable corresponding interrupt on MSI controller */
+ res = ((pos + i) / 32) * 12;
+ bit = (pos + i) % 32;
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+ val &= ~(1 << bit);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+ }
+}
+
static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
{
int res, bit, irq, pos0, pos1, i;
@@ -242,18 +262,25 @@ static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
if (!irq)
goto no_valid_irq;
- i = 0;
- while (i < no_irqs) {
+ /*
+ * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
+ * descs so there is no need to allocate descs here. We can therefore
+ * assume that if irq_find_mapping above returns non-zero, then the
+ * descs are also successfully allocated.
+ */
+
+ for (i = 0; i < no_irqs; i++) {
+ if (irq_set_msi_desc_off(irq, i, desc) != 0) {
+ clear_irq_range(pp, irq, i, pos0);
+ goto no_valid_irq;
+ }
set_bit(pos0 + i, pp->msi_irq_in_use);
- irq_alloc_descs((irq + i), (irq + i), 1, 0);
- irq_set_msi_desc(irq + i, desc);
/*Enable corresponding interrupt in MSI interrupt controller */
res = ((pos0 + i) / 32) * 12;
bit = (pos0 + i) % 32;
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
val |= 1 << bit;
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
- i++;
}
*pos = pos0;
@@ -266,7 +293,7 @@ no_valid_irq:
static void clear_irq(unsigned int irq)
{
- int res, bit, val, pos;
+ unsigned int pos, nvec;
struct irq_desc *desc;
struct msi_desc *msi;
struct pcie_port *pp;
@@ -281,18 +308,15 @@ static void clear_irq(unsigned int irq)
return;
}
+ /* undo what was done in assign_irq */
pos = data->hwirq;
+ nvec = 1 << msi->msi_attrib.multiple;
- irq_free_desc(irq);
-
- clear_bit(pos, pp->msi_irq_in_use);
+ clear_irq_range(pp, irq, nvec, pos);
- /* Disable corresponding interrupt on MSI interrupt controller */
- res = (pos / 32) * 12;
- bit = pos % 32;
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
- val &= ~(1 << bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+ /* all irqs cleared; reset attributes */
+ msi->irq = 0;
+ msi->msi_attrib.multiple = 0;
}
static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
@@ -320,10 +344,10 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
if (irq < 0)
return irq;
- msg_ctr &= ~PCI_MSI_FLAGS_QSIZE;
- msg_ctr |= msgvec << 4;
- pci_write_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
- msg_ctr);
+ /*
+ * write_msi_msg() will update PCI_MSI_FLAGS so there is
+ * no need to explicitly call pci_write_config_word().
+ */
desc->msi_attrib.multiple = msgvec;
msg.address_lo = virt_to_phys((void *)pp->msi_data);
@@ -394,6 +418,7 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
+ global_io_offset);
pp->config.io_size = resource_size(&pp->io);
pp->config.io_bus_addr = range.pci_addr;
+ pp->io_base = range.cpu_addr;
}
if (restype == IORESOURCE_MEM) {
of_pci_range_to_resource(&range, np, &pp->mem);
@@ -419,7 +444,6 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
pp->cfg0_base = pp->cfg.start;
pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
- pp->io_base = pp->io.start;
pp->mem_base = pp->mem.start;
pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
@@ -551,11 +575,13 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
if (bus->parent->number == pp->root_bus_nr) {
dw_pcie_prog_viewport_cfg0(pp, busdev);
- ret = cfg_read(pp->va_cfg0_base + address, where, size, val);
+ ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size,
+ val);
dw_pcie_prog_viewport_mem_outbound(pp);
} else {
dw_pcie_prog_viewport_cfg1(pp, busdev);
- ret = cfg_read(pp->va_cfg1_base + address, where, size, val);
+ ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size,
+ val);
dw_pcie_prog_viewport_io_outbound(pp);
}
@@ -574,18 +600,19 @@ static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
if (bus->parent->number == pp->root_bus_nr) {
dw_pcie_prog_viewport_cfg0(pp, busdev);
- ret = cfg_write(pp->va_cfg0_base + address, where, size, val);
+ ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size,
+ val);
dw_pcie_prog_viewport_mem_outbound(pp);
} else {
dw_pcie_prog_viewport_cfg1(pp, busdev);
- ret = cfg_write(pp->va_cfg1_base + address, where, size, val);
+ ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size,
+ val);
dw_pcie_prog_viewport_io_outbound(pp);
}
return ret;
}
-
static int dw_pcie_valid_config(struct pcie_port *pp,
struct pci_bus *bus, int dev)
{
@@ -679,7 +706,7 @@ static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
sys->io_offset = global_io_offset - pp->config.io_bus_addr;
- pci_ioremap_io(sys->io_offset, pp->io.start);
+ pci_ioremap_io(global_io_offset, pp->io_base);
global_io_offset += SZ_64K;
pci_add_resource_offset(&sys->resources, &pp->io,
sys->io_offset);
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index c15379be2372..3063b3594d88 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -66,8 +66,8 @@ struct pcie_host_ops {
void (*host_init)(struct pcie_port *pp);
};
-int cfg_read(void __iomem *addr, int where, int size, u32 *val);
-int cfg_write(void __iomem *addr, int where, int size, u32 val);
+int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
+int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val);
void dw_handle_msi_irq(struct pcie_port *pp);
void dw_pcie_msi_init(struct pcie_port *pp);
int dw_pcie_link_up(struct pcie_port *pp);
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 1592dbe4f904..b6162be4df40 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -77,6 +77,8 @@ struct acpiphp_bridge {
/* PCI-to-PCI bridge device */
struct pci_dev *pci_dev;
+
+ bool is_going_away;
};
@@ -150,6 +152,7 @@ struct acpiphp_attention_info
/* slot flags */
#define SLOT_ENABLED (0x00000001)
+#define SLOT_IS_GOING_AWAY (0x00000002)
/* function flags */
@@ -169,7 +172,7 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot);
typedef int (*acpiphp_callback)(struct acpiphp_slot *slot, void *data);
int acpiphp_enable_slot(struct acpiphp_slot *slot);
-int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot);
+int acpiphp_disable_slot(struct acpiphp_slot *slot);
u8 acpiphp_get_power_status(struct acpiphp_slot *slot);
u8 acpiphp_get_attention_status(struct acpiphp_slot *slot);
u8 acpiphp_get_latch_status(struct acpiphp_slot *slot);
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index dca66bc44578..728c31f4c2c5 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -156,7 +156,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
/* disable the specified slot */
- return acpiphp_disable_and_eject_slot(slot->acpi_slot);
+ return acpiphp_disable_slot(slot->acpi_slot);
}
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 1cf605f67673..7c7a388c85ab 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -210,10 +210,29 @@ static void post_dock_fixups(acpi_handle not_used, u32 event, void *data)
}
}
+static void dock_event(acpi_handle handle, u32 type, void *data)
+{
+ struct acpiphp_context *context;
+
+ mutex_lock(&acpiphp_context_lock);
+ context = acpiphp_get_context(handle);
+ if (!context || WARN_ON(context->handle != handle)
+ || context->func.parent->is_going_away) {
+ mutex_unlock(&acpiphp_context_lock);
+ return;
+ }
+ get_bridge(context->func.parent);
+ acpiphp_put_context(context);
+ mutex_unlock(&acpiphp_context_lock);
+
+ hotplug_event(handle, type, data);
+
+ put_bridge(context->func.parent);
+}
static const struct acpi_dock_ops acpiphp_dock_ops = {
.fixup = post_dock_fixups,
- .handler = hotplug_event,
+ .handler = dock_event,
};
/* Check whether the PCI device is managed by native PCIe hotplug driver */
@@ -279,7 +298,9 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
if (ACPI_FAILURE(status)) {
- acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status);
+ if (status != AE_NOT_FOUND)
+ acpi_handle_warn(handle,
+ "can't evaluate _ADR (%#x)\n", status);
return AE_OK;
}
@@ -430,6 +451,7 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
pr_err("failed to remove notify handler\n");
}
}
+ slot->flags |= SLOT_IS_GOING_AWAY;
if (slot->slot)
acpiphp_unregister_hotplug_slot(slot);
}
@@ -437,6 +459,10 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
mutex_lock(&bridge_mutex);
list_del(&bridge->list);
mutex_unlock(&bridge_mutex);
+
+ mutex_lock(&acpiphp_context_lock);
+ bridge->is_going_away = true;
+ mutex_unlock(&acpiphp_context_lock);
}
/**
@@ -489,7 +515,7 @@ static void acpiphp_bus_add(acpi_handle handle)
acpi_bus_scan(handle);
acpi_bus_get_device(handle, &adev);
- if (adev)
+ if (acpi_device_enumerated(adev))
acpi_device_set_power(adev, ACPI_STATE_D0);
}
@@ -643,6 +669,24 @@ static void disable_slot(struct acpiphp_slot *slot)
slot->flags &= (~SLOT_ENABLED);
}
+static bool acpiphp_no_hotplug(acpi_handle handle)
+{
+ struct acpi_device *adev = NULL;
+
+ acpi_bus_get_device(handle, &adev);
+ return adev && adev->flags.no_hotplug;
+}
+
+static bool slot_no_hotplug(struct acpiphp_slot *slot)
+{
+ struct acpiphp_func *func;
+
+ list_for_each_entry(func, &slot->funcs, sibling)
+ if (acpiphp_no_hotplug(func_to_handle(func)))
+ return true;
+
+ return false;
+}
/**
* get_slot_status - get ACPI slot status
@@ -686,6 +730,17 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
return (unsigned int)sta;
}
+static inline bool device_status_valid(unsigned int sta)
+{
+ /*
+ * ACPI spec says that _STA may return bit 0 clear with bit 3 set
+ * if the device is valid but does not require a device driver to be
+ * loaded (Section 6.3.7 of ACPI 5.0A).
+ */
+ unsigned int mask = ACPI_STA_DEVICE_ENABLED | ACPI_STA_DEVICE_FUNCTIONING;
+ return (sta & mask) == mask;
+}
+
/**
* trim_stale_devices - remove PCI devices that are not responding.
* @dev: PCI device to start walking the hierarchy from.
@@ -701,7 +756,8 @@ static void trim_stale_devices(struct pci_dev *dev)
unsigned long long sta;
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
- alive = ACPI_SUCCESS(status) && sta == ACPI_STA_ALL;
+ alive = (ACPI_SUCCESS(status) && device_status_valid(sta))
+ || acpiphp_no_hotplug(handle);
}
if (!alive) {
u32 v;
@@ -718,7 +774,7 @@ static void trim_stale_devices(struct pci_dev *dev)
/* The device is a bridge. so check the bus below it. */
pm_runtime_get_sync(&dev->dev);
- list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
+ list_for_each_entry_safe_reverse(child, tmp, &bus->devices, bus_list)
trim_stale_devices(child);
pm_runtime_put(&dev->dev);
@@ -736,16 +792,21 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
{
struct acpiphp_slot *slot;
+ /* Bail out if the bridge is going away. */
+ if (bridge->is_going_away)
+ return;
+
list_for_each_entry(slot, &bridge->slots, node) {
struct pci_bus *bus = slot->bus;
struct pci_dev *dev, *tmp;
mutex_lock(&slot->crit_sect);
- /* wake up all functions */
- if (get_slot_status(slot) == ACPI_STA_ALL) {
+ if (slot_no_hotplug(slot)) {
+ ; /* do nothing */
+ } else if (device_status_valid(get_slot_status(slot))) {
/* remove stale devices if any */
- list_for_each_entry_safe(dev, tmp, &bus->devices,
- bus_list)
+ list_for_each_entry_safe_reverse(dev, tmp,
+ &bus->devices, bus_list)
if (PCI_SLOT(dev->devfn) == slot->device)
trim_stale_devices(dev);
@@ -776,7 +837,7 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
int i;
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM;
- list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
+ list_for_each_entry_safe_reverse(dev, tmp, &bus->devices, bus_list) {
for (i=0; i<PCI_BRIDGE_RESOURCES; i++) {
struct resource *res = &dev->resource[i];
if ((res->flags & type_mask) && !res->start &&
@@ -800,11 +861,17 @@ void acpiphp_check_host_bridge(acpi_handle handle)
bridge = acpiphp_handle_to_bridge(handle);
if (bridge) {
+ pci_lock_rescan_remove();
+
acpiphp_check_bridge(bridge);
+
+ pci_unlock_rescan_remove();
put_bridge(bridge);
}
}
+static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot);
+
static void hotplug_event(acpi_handle handle, u32 type, void *data)
{
struct acpiphp_context *context = data;
@@ -821,6 +888,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
mutex_unlock(&acpiphp_context_lock);
+ pci_lock_rescan_remove();
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
switch (type) {
@@ -834,6 +902,9 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
} else {
struct acpiphp_slot *slot = func->slot;
+ if (slot->flags & SLOT_IS_GOING_AWAY)
+ break;
+
mutex_lock(&slot->crit_sect);
enable_slot(slot);
mutex_unlock(&slot->crit_sect);
@@ -849,6 +920,9 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
struct acpiphp_slot *slot = func->slot;
int ret;
+ if (slot->flags & SLOT_IS_GOING_AWAY)
+ break;
+
/*
* Check if anything has changed in the slot and rescan
* from the parent if that's the case.
@@ -868,6 +942,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
break;
}
+ pci_unlock_rescan_remove();
if (bridge)
put_bridge(bridge);
}
@@ -898,6 +973,7 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
{
struct acpiphp_context *context;
u32 ost_code = ACPI_OST_SC_SUCCESS;
+ acpi_status status;
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
@@ -933,13 +1009,20 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
mutex_lock(&acpiphp_context_lock);
context = acpiphp_get_context(handle);
- if (context && !WARN_ON(context->handle != handle)) {
- get_bridge(context->func.parent);
- acpiphp_put_context(context);
- acpi_hotplug_execute(hotplug_event_work, context, type);
+ if (!context || WARN_ON(context->handle != handle)
+ || context->func.parent->is_going_away)
+ goto err_out;
+
+ get_bridge(context->func.parent);
+ acpiphp_put_context(context);
+ status = acpi_hotplug_execute(hotplug_event_work, context, type);
+ if (ACPI_SUCCESS(status)) {
mutex_unlock(&acpiphp_context_lock);
return;
}
+ put_bridge(context->func.parent);
+
+ err_out:
mutex_unlock(&acpiphp_context_lock);
ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
@@ -1048,12 +1131,19 @@ void acpiphp_remove_slots(struct pci_bus *bus)
*/
int acpiphp_enable_slot(struct acpiphp_slot *slot)
{
+ pci_lock_rescan_remove();
+
+ if (slot->flags & SLOT_IS_GOING_AWAY)
+ return -ENODEV;
+
mutex_lock(&slot->crit_sect);
/* configure all functions */
if (!(slot->flags & SLOT_ENABLED))
enable_slot(slot);
mutex_unlock(&slot->crit_sect);
+
+ pci_unlock_rescan_remove();
return 0;
}
@@ -1061,10 +1151,12 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
* acpiphp_disable_and_eject_slot - power off and eject slot
* @slot: ACPI PHP slot
*/
-int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot)
+static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot)
{
struct acpiphp_func *func;
- int retval = 0;
+
+ if (slot->flags & SLOT_IS_GOING_AWAY)
+ return -ENODEV;
mutex_lock(&slot->crit_sect);
@@ -1082,9 +1174,18 @@ int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot)
}
mutex_unlock(&slot->crit_sect);
- return retval;
+ return 0;
}
+int acpiphp_disable_slot(struct acpiphp_slot *slot)
+{
+ int ret;
+
+ pci_lock_rescan_remove();
+ ret = acpiphp_disable_and_eject_slot(slot);
+ pci_unlock_rescan_remove();
+ return ret;
+}
/*
* slot enabled: 1
@@ -1095,7 +1196,6 @@ u8 acpiphp_get_power_status(struct acpiphp_slot *slot)
return (slot->flags & SLOT_ENABLED);
}
-
/*
* latch open: 1
* latch closed: 0
@@ -1105,7 +1205,6 @@ u8 acpiphp_get_latch_status(struct acpiphp_slot *slot)
return !(get_slot_status(slot) & ACPI_STA_DEVICE_UI);
}
-
/*
* adapter presence : 1
* absence : 0
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index ecfac7e72d91..8dcccffd6e21 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -31,12 +31,11 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <acpi/acpi_bus.h>
#include <linux/sysfs.h>
#include <linux/kobject.h>
-#include <asm/uaccess.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
+#include <asm/uaccess.h>
#include "acpiphp.h"
#include "../pci.h"
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index d3add9819f63..8c1464851768 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -254,9 +254,12 @@ int __ref cpci_configure_slot(struct slot *slot)
{
struct pci_dev *dev;
struct pci_bus *parent;
+ int ret = 0;
dbg("%s - enter", __func__);
+ pci_lock_rescan_remove();
+
if (slot->dev == NULL) {
dbg("pci_dev null, finding %02x:%02x:%x",
slot->bus->number, PCI_SLOT(slot->devfn), PCI_FUNC(slot->devfn));
@@ -277,7 +280,8 @@ int __ref cpci_configure_slot(struct slot *slot)
slot->dev = pci_get_slot(slot->bus, slot->devfn);
if (slot->dev == NULL) {
err("Could not find PCI device for slot %02x", slot->number);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
}
}
parent = slot->dev->bus;
@@ -294,8 +298,10 @@ int __ref cpci_configure_slot(struct slot *slot)
pci_bus_add_devices(parent);
+ out:
+ pci_unlock_rescan_remove();
dbg("%s - exit", __func__);
- return 0;
+ return ret;
}
int cpci_unconfigure_slot(struct slot* slot)
@@ -308,6 +314,8 @@ int cpci_unconfigure_slot(struct slot* slot)
return -ENODEV;
}
+ pci_lock_rescan_remove();
+
list_for_each_entry_safe(dev, temp, &slot->bus->devices, bus_list) {
if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn))
continue;
@@ -318,6 +326,8 @@ int cpci_unconfigure_slot(struct slot* slot)
pci_dev_put(slot->dev);
slot->dev = NULL;
+ pci_unlock_rescan_remove();
+
dbg("%s - exit", __func__);
return 0;
}
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index 6e4a12c91adb..a3e3c2002b58 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -86,6 +86,8 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
struct pci_bus *child;
int num;
+ pci_lock_rescan_remove();
+
if (func->pci_dev == NULL)
func->pci_dev = pci_get_bus_and_slot(func->bus,PCI_DEVFN(func->device, func->function));
@@ -100,7 +102,7 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
func->pci_dev = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, func->function));
if (func->pci_dev == NULL) {
dbg("ERROR: pci_dev still null\n");
- return 0;
+ goto out;
}
}
@@ -113,6 +115,8 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
pci_dev_put(func->pci_dev);
+ out:
+ pci_unlock_rescan_remove();
return 0;
}
@@ -123,6 +127,7 @@ int cpqhp_unconfigure_device(struct pci_func* func)
dbg("%s: bus/dev/func = %x/%x/%x\n", __func__, func->bus, func->device, func->function);
+ pci_lock_rescan_remove();
for (j=0; j<8 ; j++) {
struct pci_dev* temp = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, j));
if (temp) {
@@ -130,6 +135,7 @@ int cpqhp_unconfigure_device(struct pci_func* func)
pci_stop_and_remove_bus_device(temp);
}
}
+ pci_unlock_rescan_remove();
return 0;
}
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index efdc13adbe41..cf3ac1e4b099 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -718,6 +718,8 @@ static void ibm_unconfigure_device(struct pci_func *func)
func->device, func->function);
debug("func->device << 3 | 0x0 = %x\n", func->device << 3 | 0x0);
+ pci_lock_rescan_remove();
+
for (j = 0; j < 0x08; j++) {
temp = pci_get_bus_and_slot(func->busno, (func->device << 3) | j);
if (temp) {
@@ -725,7 +727,10 @@ static void ibm_unconfigure_device(struct pci_func *func)
pci_dev_put(temp);
}
}
+
pci_dev_put(func->dev);
+
+ pci_unlock_rescan_remove();
}
/*
@@ -780,6 +785,8 @@ static int ibm_configure_device(struct pci_func *func)
int flag = 0; /* this is to make sure we don't double scan the bus,
for bridged devices primarily */
+ pci_lock_rescan_remove();
+
if (!(bus_structure_fixup(func->busno)))
flag = 1;
if (func->dev == NULL)
@@ -789,7 +796,7 @@ static int ibm_configure_device(struct pci_func *func)
if (func->dev == NULL) {
struct pci_bus *bus = pci_find_bus(0, func->busno);
if (!bus)
- return 0;
+ goto out;
num = pci_scan_slot(bus,
PCI_DEVFN(func->device, func->function));
@@ -800,7 +807,7 @@ static int ibm_configure_device(struct pci_func *func)
PCI_DEVFN(func->device, func->function));
if (func->dev == NULL) {
err("ERROR... : pci_dev still NULL\n");
- return 0;
+ goto out;
}
}
if (!(flag) && (func->dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) {
@@ -810,6 +817,8 @@ static int ibm_configure_device(struct pci_func *func)
pci_bus_add_devices(child);
}
+ out:
+ pci_unlock_rescan_remove();
return 0;
}
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 21e865ded1dc..88b37cad4b35 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -43,7 +43,6 @@
extern bool pciehp_poll_mode;
extern int pciehp_poll_time;
extern bool pciehp_debug;
-extern bool pciehp_force;
#define dbg(format, arg...) \
do { \
@@ -140,15 +139,15 @@ struct controller *pcie_init(struct pcie_device *dev);
int pcie_init_notification(struct controller *ctrl);
int pciehp_enable_slot(struct slot *p_slot);
int pciehp_disable_slot(struct slot *p_slot);
-int pcie_enable_notification(struct controller *ctrl);
+void pcie_enable_notification(struct controller *ctrl);
int pciehp_power_on_slot(struct slot *slot);
-int pciehp_power_off_slot(struct slot *slot);
-int pciehp_get_power_status(struct slot *slot, u8 *status);
-int pciehp_get_attention_status(struct slot *slot, u8 *status);
+void pciehp_power_off_slot(struct slot *slot);
+void pciehp_get_power_status(struct slot *slot, u8 *status);
+void pciehp_get_attention_status(struct slot *slot, u8 *status);
-int pciehp_set_attention_status(struct slot *slot, u8 status);
-int pciehp_get_latch_status(struct slot *slot, u8 *status);
-int pciehp_get_adapter_status(struct slot *slot, u8 *status);
+void pciehp_set_attention_status(struct slot *slot, u8 status);
+void pciehp_get_latch_status(struct slot *slot, u8 *status);
+void pciehp_get_adapter_status(struct slot *slot, u8 *status);
int pciehp_query_power_fault(struct slot *slot);
void pciehp_green_led_on(struct slot *slot);
void pciehp_green_led_off(struct slot *slot);
@@ -163,8 +162,6 @@ static inline const char *slot_name(struct slot *slot)
}
#ifdef CONFIG_ACPI
-#include <acpi/acpi.h>
-#include <acpi/acpi_bus.h>
#include <linux/pci-acpi.h>
void __init pciehp_acpi_slot_detection_init(void);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index bbd48bbe4e9b..53b58debc288 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -41,7 +41,7 @@
bool pciehp_debug;
bool pciehp_poll_mode;
int pciehp_poll_time;
-bool pciehp_force;
+static bool pciehp_force;
#define DRIVER_VERSION "0.4"
#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
@@ -160,7 +160,8 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- return pciehp_set_attention_status(slot, status);
+ pciehp_set_attention_status(slot, status);
+ return 0;
}
@@ -192,7 +193,8 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- return pciehp_get_power_status(slot, value);
+ pciehp_get_power_status(slot, value);
+ return 0;
}
static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
@@ -202,7 +204,8 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- return pciehp_get_attention_status(slot, value);
+ pciehp_get_attention_status(slot, value);
+ return 0;
}
static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
@@ -212,7 +215,8 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- return pciehp_get_latch_status(slot, value);
+ pciehp_get_latch_status(slot, value);
+ return 0;
}
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
@@ -222,7 +226,8 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- return pciehp_get_adapter_status(slot, value);
+ pciehp_get_adapter_status(slot, value);
+ return 0;
}
static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 38f018679175..50628487597d 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -158,11 +158,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
{
/* turn off slot, turn on Amber LED, turn off Green LED if supported*/
if (POWER_CTRL(ctrl)) {
- if (pciehp_power_off_slot(pslot)) {
- ctrl_err(ctrl,
- "Issue of Slot Power Off command failed\n");
- return;
- }
+ pciehp_power_off_slot(pslot);
+
/*
* After turning power off, we must wait for at least 1 second
* before taking any action that relies on power having been
@@ -171,16 +168,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
msleep(1000);
}
- if (PWR_LED(ctrl))
- pciehp_green_led_off(pslot);
-
- if (ATTN_LED(ctrl)) {
- if (pciehp_set_attention_status(pslot, 1)) {
- ctrl_err(ctrl,
- "Issue of Set Attention Led command failed\n");
- return;
- }
- }
+ pciehp_green_led_off(pslot);
+ pciehp_set_attention_status(pslot, 1);
}
/**
@@ -203,8 +192,7 @@ static int board_added(struct slot *p_slot)
return retval;
}
- if (PWR_LED(ctrl))
- pciehp_green_led_blink(p_slot);
+ pciehp_green_led_blink(p_slot);
/* Check link training status */
retval = pciehp_check_link_status(ctrl);
@@ -227,9 +215,7 @@ static int board_added(struct slot *p_slot)
goto err_exit;
}
- if (PWR_LED(ctrl))
- pciehp_green_led_on(p_slot);
-
+ pciehp_green_led_on(p_slot);
return 0;
err_exit:
@@ -243,7 +229,7 @@ err_exit:
*/
static int remove_board(struct slot *p_slot)
{
- int retval = 0;
+ int retval;
struct controller *ctrl = p_slot->ctrl;
retval = pciehp_unconfigure_device(p_slot);
@@ -251,13 +237,8 @@ static int remove_board(struct slot *p_slot)
return retval;
if (POWER_CTRL(ctrl)) {
- /* power off slot */
- retval = pciehp_power_off_slot(p_slot);
- if (retval) {
- ctrl_err(ctrl,
- "Issue of Slot Disable command failed\n");
- return retval;
- }
+ pciehp_power_off_slot(p_slot);
+
/*
* After turning power off, we must wait for at least 1 second
* before taking any action that relies on power having been
@@ -267,9 +248,7 @@ static int remove_board(struct slot *p_slot)
}
/* turn off Green LED */
- if (PWR_LED(ctrl))
- pciehp_green_led_off(p_slot);
-
+ pciehp_green_led_off(p_slot);
return 0;
}
@@ -305,7 +284,7 @@ static void pciehp_power_thread(struct work_struct *work)
break;
case POWERON_STATE:
mutex_unlock(&p_slot->lock);
- if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl))
+ if (pciehp_enable_slot(p_slot))
pciehp_green_led_off(p_slot);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
@@ -372,11 +351,8 @@ static void handle_button_press_event(struct slot *p_slot)
"press.\n", slot_name(p_slot));
}
/* blink green LED and turn off amber */
- if (PWR_LED(ctrl))
- pciehp_green_led_blink(p_slot);
- if (ATTN_LED(ctrl))
- pciehp_set_attention_status(p_slot, 0);
-
+ pciehp_green_led_blink(p_slot);
+ pciehp_set_attention_status(p_slot, 0);
queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
break;
case BLINKINGOFF_STATE:
@@ -389,14 +365,11 @@ static void handle_button_press_event(struct slot *p_slot)
ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot));
cancel_delayed_work(&p_slot->work);
if (p_slot->state == BLINKINGOFF_STATE) {
- if (PWR_LED(ctrl))
- pciehp_green_led_on(p_slot);
+ pciehp_green_led_on(p_slot);
} else {
- if (PWR_LED(ctrl))
- pciehp_green_led_off(p_slot);
+ pciehp_green_led_off(p_slot);
}
- if (ATTN_LED(ctrl))
- pciehp_set_attention_status(p_slot, 0);
+ pciehp_set_attention_status(p_slot, 0);
ctrl_info(ctrl, "PCI slot #%s - action canceled "
"due to button press\n", slot_name(p_slot));
p_slot->state = STATIC_STATE;
@@ -456,10 +429,8 @@ static void interrupt_event_handler(struct work_struct *work)
case INT_POWER_FAULT:
if (!POWER_CTRL(ctrl))
break;
- if (ATTN_LED(ctrl))
- pciehp_set_attention_status(p_slot, 1);
- if (PWR_LED(ctrl))
- pciehp_green_led_off(p_slot);
+ pciehp_set_attention_status(p_slot, 1);
+ pciehp_green_led_off(p_slot);
break;
case INT_PRESENCE_ON:
case INT_PRESENCE_OFF:
@@ -482,14 +453,14 @@ int pciehp_enable_slot(struct slot *p_slot)
int rc;
struct controller *ctrl = p_slot->ctrl;
- rc = pciehp_get_adapter_status(p_slot, &getstatus);
- if (rc || !getstatus) {
+ pciehp_get_adapter_status(p_slot, &getstatus);
+ if (!getstatus) {
ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
return -ENODEV;
}
if (MRL_SENS(p_slot->ctrl)) {
- rc = pciehp_get_latch_status(p_slot, &getstatus);
- if (rc || getstatus) {
+ pciehp_get_latch_status(p_slot, &getstatus);
+ if (getstatus) {
ctrl_info(ctrl, "Latch open on slot(%s)\n",
slot_name(p_slot));
return -ENODEV;
@@ -497,8 +468,8 @@ int pciehp_enable_slot(struct slot *p_slot)
}
if (POWER_CTRL(p_slot->ctrl)) {
- rc = pciehp_get_power_status(p_slot, &getstatus);
- if (rc || getstatus) {
+ pciehp_get_power_status(p_slot, &getstatus);
+ if (getstatus) {
ctrl_info(ctrl, "Already enabled on slot(%s)\n",
slot_name(p_slot));
return -EINVAL;
@@ -518,15 +489,14 @@ int pciehp_enable_slot(struct slot *p_slot)
int pciehp_disable_slot(struct slot *p_slot)
{
u8 getstatus = 0;
- int ret = 0;
struct controller *ctrl = p_slot->ctrl;
if (!p_slot->ctrl)
return 1;
if (!HP_SUPR_RM(p_slot->ctrl)) {
- ret = pciehp_get_adapter_status(p_slot, &getstatus);
- if (ret || !getstatus) {
+ pciehp_get_adapter_status(p_slot, &getstatus);
+ if (!getstatus) {
ctrl_info(ctrl, "No adapter on slot(%s)\n",
slot_name(p_slot));
return -ENODEV;
@@ -534,8 +504,8 @@ int pciehp_disable_slot(struct slot *p_slot)
}
if (MRL_SENS(p_slot->ctrl)) {
- ret = pciehp_get_latch_status(p_slot, &getstatus);
- if (ret || getstatus) {
+ pciehp_get_latch_status(p_slot, &getstatus);
+ if (getstatus) {
ctrl_info(ctrl, "Latch open on slot(%s)\n",
slot_name(p_slot));
return -ENODEV;
@@ -543,8 +513,8 @@ int pciehp_disable_slot(struct slot *p_slot)
}
if (POWER_CTRL(p_slot->ctrl)) {
- ret = pciehp_get_power_status(p_slot, &getstatus);
- if (ret || !getstatus) {
+ pciehp_get_power_status(p_slot, &getstatus);
+ if (!getstatus) {
ctrl_info(ctrl, "Already disabled on slot(%s)\n",
slot_name(p_slot));
return -EINVAL;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 3eea3fdd4b0b..14acfccb7670 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -41,34 +41,11 @@
#include "../pci.h"
#include "pciehp.h"
-static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value)
+static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
{
- struct pci_dev *dev = ctrl->pcie->port;
- return pcie_capability_read_word(dev, reg, value);
+ return ctrl->pcie->port;
}
-static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value)
-{
- struct pci_dev *dev = ctrl->pcie->port;
- return pcie_capability_read_dword(dev, reg, value);
-}
-
-static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value)
-{
- struct pci_dev *dev = ctrl->pcie->port;
- return pcie_capability_write_word(dev, reg, value);
-}
-
-static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
-{
- struct pci_dev *dev = ctrl->pcie->port;
- return pcie_capability_write_dword(dev, reg, value);
-}
-
-/* Power Control Command */
-#define POWER_ON 0
-#define POWER_OFF PCI_EXP_SLTCTL_PCC
-
static irqreturn_t pcie_isr(int irq, void *dev_id);
static void start_int_poll_timer(struct controller *ctrl, int sec);
@@ -129,20 +106,23 @@ static inline void pciehp_free_irq(struct controller *ctrl)
static int pcie_poll_cmd(struct controller *ctrl)
{
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
- int err, timeout = 1000;
+ int timeout = 1000;
- err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) {
- pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC);
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ if (slot_status & PCI_EXP_SLTSTA_CC) {
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_CC);
return 1;
}
while (timeout > 0) {
msleep(10);
timeout -= 10;
- err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) {
- pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC);
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ if (slot_status & PCI_EXP_SLTSTA_CC) {
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_CC);
return 1;
}
}
@@ -169,21 +149,15 @@ static void pcie_wait_cmd(struct controller *ctrl, int poll)
* @cmd: command value written to slot control register
* @mask: bitmask of slot control register to be modified
*/
-static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
+static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
{
- int retval = 0;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
u16 slot_ctrl;
mutex_lock(&ctrl->ctrl_lock);
- retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
- __func__);
- goto out;
- }
-
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
if (slot_status & PCI_EXP_SLTSTA_CC) {
if (!ctrl->no_cmd_complete) {
/*
@@ -207,24 +181,17 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
}
}
- retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
- goto out;
- }
-
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
slot_ctrl &= ~mask;
slot_ctrl |= (cmd & mask);
ctrl->cmd_busy = 1;
smp_mb();
- retval = pciehp_writew(ctrl, PCI_EXP_SLTCTL, slot_ctrl);
- if (retval)
- ctrl_err(ctrl, "Cannot write to SLOTCTRL register\n");
+ pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
/*
* Wait for command completion.
*/
- if (!retval && !ctrl->no_cmd_complete) {
+ if (!ctrl->no_cmd_complete) {
int poll = 0;
/*
* if hotplug interrupt is not enabled or command
@@ -236,19 +203,16 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
poll = 1;
pcie_wait_cmd(ctrl, poll);
}
- out:
mutex_unlock(&ctrl->ctrl_lock);
- return retval;
}
static bool check_link_active(struct controller *ctrl)
{
- bool ret = false;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 lnk_status;
+ bool ret;
- if (pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status))
- return ret;
-
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
if (ret)
@@ -311,9 +275,9 @@ static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
int pciehp_check_link_status(struct controller *ctrl)
{
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ bool found;
u16 lnk_status;
- int retval = 0;
- bool found = false;
/*
* Data Link Layer Link Active Reporting must be capable for
@@ -330,52 +294,37 @@ int pciehp_check_link_status(struct controller *ctrl)
found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
PCI_DEVFN(0, 0));
- retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
- if (retval) {
- ctrl_err(ctrl, "Cannot read LNKSTATUS register\n");
- return retval;
- }
-
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
!(lnk_status & PCI_EXP_LNKSTA_NLW)) {
ctrl_err(ctrl, "Link Training Error occurs \n");
- retval = -1;
- return retval;
+ return -1;
}
pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
- if (!found && !retval)
- retval = -1;
+ if (!found)
+ return -1;
- return retval;
+ return 0;
}
static int __pciehp_link_set(struct controller *ctrl, bool enable)
{
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 lnk_ctrl;
- int retval = 0;
- retval = pciehp_readw(ctrl, PCI_EXP_LNKCTL, &lnk_ctrl);
- if (retval) {
- ctrl_err(ctrl, "Cannot read LNKCTRL register\n");
- return retval;
- }
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl);
if (enable)
lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
else
lnk_ctrl |= PCI_EXP_LNKCTL_LD;
- retval = pciehp_writew(ctrl, PCI_EXP_LNKCTL, lnk_ctrl);
- if (retval) {
- ctrl_err(ctrl, "Cannot write LNKCTRL register\n");
- return retval;
- }
+ pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl);
ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
-
- return retval;
+ return 0;
}
static int pciehp_link_enable(struct controller *ctrl)
@@ -388,223 +337,165 @@ static int pciehp_link_disable(struct controller *ctrl)
return __pciehp_link_set(ctrl, false);
}
-int pciehp_get_attention_status(struct slot *slot, u8 *status)
+void pciehp_get_attention_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_ctrl;
- u8 atten_led_state;
- int retval = 0;
-
- retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
- return retval;
- }
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
- atten_led_state = (slot_ctrl & PCI_EXP_SLTCTL_AIC) >> 6;
-
- switch (atten_led_state) {
- case 0:
- *status = 0xFF; /* Reserved */
- break;
- case 1:
+ switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) {
+ case PCI_EXP_SLTCTL_ATTN_IND_ON:
*status = 1; /* On */
break;
- case 2:
+ case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
*status = 2; /* Blink */
break;
- case 3:
+ case PCI_EXP_SLTCTL_ATTN_IND_OFF:
*status = 0; /* Off */
break;
default:
*status = 0xFF;
break;
}
-
- return 0;
}
-int pciehp_get_power_status(struct slot *slot, u8 *status)
+void pciehp_get_power_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_ctrl;
- u8 pwr_state;
- int retval = 0;
- retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
- return retval;
- }
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
- pwr_state = (slot_ctrl & PCI_EXP_SLTCTL_PCC) >> 10;
-
- switch (pwr_state) {
- case 0:
- *status = 1;
+ switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) {
+ case PCI_EXP_SLTCTL_PWR_ON:
+ *status = 1; /* On */
break;
- case 1:
- *status = 0;
+ case PCI_EXP_SLTCTL_PWR_OFF:
+ *status = 0; /* Off */
break;
default:
*status = 0xFF;
break;
}
-
- return retval;
}
-int pciehp_get_latch_status(struct slot *slot, u8 *status)
+void pciehp_get_latch_status(struct slot *slot, u8 *status)
{
- struct controller *ctrl = slot->ctrl;
+ struct pci_dev *pdev = ctrl_dev(slot->ctrl);
u16 slot_status;
- int retval;
- retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
- __func__);
- return retval;
- }
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
- return 0;
}
-int pciehp_get_adapter_status(struct slot *slot, u8 *status)
+void pciehp_get_adapter_status(struct slot *slot, u8 *status)
{
- struct controller *ctrl = slot->ctrl;
+ struct pci_dev *pdev = ctrl_dev(slot->ctrl);
u16 slot_status;
- int retval;
- retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
- __func__);
- return retval;
- }
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
*status = !!(slot_status & PCI_EXP_SLTSTA_PDS);
- return 0;
}
int pciehp_query_power_fault(struct slot *slot)
{
- struct controller *ctrl = slot->ctrl;
+ struct pci_dev *pdev = ctrl_dev(slot->ctrl);
u16 slot_status;
- int retval;
- retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (retval) {
- ctrl_err(ctrl, "Cannot check for power fault\n");
- return retval;
- }
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
return !!(slot_status & PCI_EXP_SLTSTA_PFD);
}
-int pciehp_set_attention_status(struct slot *slot, u8 value)
+void pciehp_set_attention_status(struct slot *slot, u8 value)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
- u16 cmd_mask;
- cmd_mask = PCI_EXP_SLTCTL_AIC;
+ if (!ATTN_LED(ctrl))
+ return;
+
switch (value) {
case 0 : /* turn off */
- slot_cmd = 0x00C0;
+ slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_OFF;
break;
case 1: /* turn on */
- slot_cmd = 0x0040;
+ slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_ON;
break;
case 2: /* turn blink */
- slot_cmd = 0x0080;
+ slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_BLINK;
break;
default:
- return -EINVAL;
+ return;
}
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
- return pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
+ pcie_write_cmd(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
}
void pciehp_green_led_on(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
- u16 slot_cmd;
- u16 cmd_mask;
- slot_cmd = 0x0100;
- cmd_mask = PCI_EXP_SLTCTL_PIC;
- pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
+ if (!PWR_LED(ctrl))
+ return;
+
+ pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, PCI_EXP_SLTCTL_PIC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
- pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_PWR_IND_ON);
}
void pciehp_green_led_off(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
- u16 slot_cmd;
- u16 cmd_mask;
- slot_cmd = 0x0300;
- cmd_mask = PCI_EXP_SLTCTL_PIC;
- pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
+ if (!PWR_LED(ctrl))
+ return;
+
+ pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, PCI_EXP_SLTCTL_PIC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
- pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_PWR_IND_OFF);
}
void pciehp_green_led_blink(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
- u16 slot_cmd;
- u16 cmd_mask;
- slot_cmd = 0x0200;
- cmd_mask = PCI_EXP_SLTCTL_PIC;
- pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
+ if (!PWR_LED(ctrl))
+ return;
+
+ pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, PCI_EXP_SLTCTL_PIC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
- pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_PWR_IND_BLINK);
}
int pciehp_power_on_slot(struct slot * slot)
{
struct controller *ctrl = slot->ctrl;
- u16 slot_cmd;
- u16 cmd_mask;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
- int retval = 0;
+ int retval;
/* Clear sticky power-fault bit from previous power failures */
- retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
- __func__);
- return retval;
- }
- slot_status &= PCI_EXP_SLTSTA_PFD;
- if (slot_status) {
- retval = pciehp_writew(ctrl, PCI_EXP_SLTSTA, slot_status);
- if (retval) {
- ctrl_err(ctrl,
- "%s: Cannot write to SLOTSTATUS register\n",
- __func__);
- return retval;
- }
- }
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ if (slot_status & PCI_EXP_SLTSTA_PFD)
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_PFD);
ctrl->power_fault_detected = 0;
- slot_cmd = POWER_ON;
- cmd_mask = PCI_EXP_SLTCTL_PCC;
- retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
- if (retval) {
- ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd);
- return retval;
- }
+ pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
- pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_PWR_ON);
retval = pciehp_link_enable(ctrl);
if (retval)
@@ -613,12 +504,9 @@ int pciehp_power_on_slot(struct slot * slot)
return retval;
}
-int pciehp_power_off_slot(struct slot * slot)
+void pciehp_power_off_slot(struct slot * slot)
{
struct controller *ctrl = slot->ctrl;
- u16 slot_cmd;
- u16 cmd_mask;
- int retval;
/* Disable the link at first */
pciehp_link_disable(ctrl);
@@ -628,21 +516,16 @@ int pciehp_power_off_slot(struct slot * slot)
else
msleep(1000);
- slot_cmd = POWER_OFF;
- cmd_mask = PCI_EXP_SLTCTL_PCC;
- retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
- if (retval) {
- ctrl_err(ctrl, "Write command failed!\n");
- return retval;
- }
+ pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
- pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
- return 0;
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_PWR_OFF);
}
static irqreturn_t pcie_isr(int irq, void *dev_id)
{
struct controller *ctrl = (struct controller *)dev_id;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
struct slot *slot = ctrl->slot;
u16 detected, intr_loc;
@@ -653,11 +536,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
*/
intr_loc = 0;
do {
- if (pciehp_readw(ctrl, PCI_EXP_SLTSTA, &detected)) {
- ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS\n",
- __func__);
- return IRQ_NONE;
- }
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &detected);
detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
@@ -666,11 +545,9 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
intr_loc |= detected;
if (!intr_loc)
return IRQ_NONE;
- if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, intr_loc)) {
- ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n",
- __func__);
- return IRQ_NONE;
- }
+ if (detected)
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ intr_loc);
} while (detected);
ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc);
@@ -705,7 +582,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int pcie_enable_notification(struct controller *ctrl)
+void pcie_enable_notification(struct controller *ctrl)
{
u16 cmd, mask;
@@ -731,22 +608,18 @@ int pcie_enable_notification(struct controller *ctrl)
PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE);
- if (pcie_write_cmd(ctrl, cmd, mask)) {
- ctrl_err(ctrl, "Cannot enable software notification\n");
- return -1;
- }
- return 0;
+ pcie_write_cmd(ctrl, cmd, mask);
}
static void pcie_disable_notification(struct controller *ctrl)
{
u16 mask;
+
mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
PCI_EXP_SLTCTL_DLLSCE);
- if (pcie_write_cmd(ctrl, 0, mask))
- ctrl_warn(ctrl, "Cannot disable software notification\n");
+ pcie_write_cmd(ctrl, 0, mask);
}
/*
@@ -758,6 +631,7 @@ static void pcie_disable_notification(struct controller *ctrl)
int pciehp_reset_slot(struct slot *slot, int probe)
{
struct controller *ctrl = slot->ctrl;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
if (probe)
return 0;
@@ -771,7 +645,8 @@ int pciehp_reset_slot(struct slot *slot, int probe)
pci_reset_bridge_secondary_bus(ctrl->pcie->port);
if (HP_SUPR_RM(ctrl)) {
- pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDC);
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_PDC);
pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PDCE, PCI_EXP_SLTCTL_PDCE);
if (pciehp_poll_mode)
int_poll_timeout(ctrl->poll_timer.data);
@@ -784,10 +659,7 @@ int pcie_init_notification(struct controller *ctrl)
{
if (pciehp_request_irq(ctrl))
return -1;
- if (pcie_enable_notification(ctrl)) {
- pciehp_free_irq(ctrl);
- return -1;
- }
+ pcie_enable_notification(ctrl);
ctrl->notification_enabled = 1;
return 0;
}
@@ -875,12 +747,14 @@ static inline void dbg_ctrl(struct controller *ctrl)
EMI(ctrl) ? "yes" : "no");
ctrl_info(ctrl, " Command Completed : %3s\n",
NO_CMD_CMPL(ctrl) ? "no" : "yes");
- pciehp_readw(ctrl, PCI_EXP_SLTSTA, &reg16);
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16);
- pciehp_readw(ctrl, PCI_EXP_SLTCTL, &reg16);
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16);
}
+#define FLAG(x,y) (((x) & (y)) ? '+' : '-')
+
struct controller *pcie_init(struct pcie_device *dev)
{
struct controller *ctrl;
@@ -893,11 +767,7 @@ struct controller *pcie_init(struct pcie_device *dev)
goto abort;
}
ctrl->pcie = dev;
- if (pciehp_readl(ctrl, PCI_EXP_SLTCAP, &slot_cap)) {
- ctrl_err(ctrl, "Cannot read SLOTCAP register\n");
- goto abort_ctrl;
- }
-
+ pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
ctrl->slot_cap = slot_cap;
mutex_init(&ctrl->ctrl_lock);
init_waitqueue_head(&ctrl->queue);
@@ -913,25 +783,31 @@ struct controller *pcie_init(struct pcie_device *dev)
ctrl->no_cmd_complete = 1;
/* Check if Data Link Layer Link Active Reporting is implemented */
- if (pciehp_readl(ctrl, PCI_EXP_LNKCAP, &link_cap)) {
- ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
- goto abort_ctrl;
- }
+ pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
if (link_cap & PCI_EXP_LNKCAP_DLLLARC) {
ctrl_dbg(ctrl, "Link Active Reporting supported\n");
ctrl->link_active_reporting = 1;
}
/* Clear all remaining event bits in Slot Status register */
- if (pciehp_writew(ctrl, PCI_EXP_SLTSTA, 0x1f))
- goto abort_ctrl;
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+ PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
+ PCI_EXP_SLTSTA_CC);
/* Disable software notification */
pcie_disable_notification(ctrl);
- ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
- pdev->vendor, pdev->device, pdev->subsystem_vendor,
- pdev->subsystem_device);
+ ctrl_info(ctrl, "Slot #%d AttnBtn%c AttnInd%c PwrInd%c PwrCtrl%c MRL%c Interlock%c NoCompl%c LLActRep%c\n",
+ (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
+ FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
+ FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC));
if (pcie_init_slot(ctrl))
goto abort_ctrl;
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 0e0d0f7f63fd..b07d7cc2d697 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -39,22 +39,26 @@ int pciehp_configure_device(struct slot *p_slot)
struct pci_dev *dev;
struct pci_dev *bridge = p_slot->ctrl->pcie->port;
struct pci_bus *parent = bridge->subordinate;
- int num;
+ int num, ret = 0;
struct controller *ctrl = p_slot->ctrl;
+ pci_lock_rescan_remove();
+
dev = pci_get_slot(parent, PCI_DEVFN(0, 0));
if (dev) {
ctrl_err(ctrl, "Device %s already exists "
"at %04x:%02x:00, cannot hot-add\n", pci_name(dev),
pci_domain_nr(parent), parent->number);
pci_dev_put(dev);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
num = pci_scan_slot(parent, PCI_DEVFN(0, 0));
if (num == 0) {
ctrl_err(ctrl, "No new device found\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
}
list_for_each_entry(dev, &parent->devices, bus_list)
@@ -73,12 +77,14 @@ int pciehp_configure_device(struct slot *p_slot)
pci_bus_add_devices(parent);
- return 0;
+ out:
+ pci_unlock_rescan_remove();
+ return ret;
}
int pciehp_unconfigure_device(struct slot *p_slot)
{
- int ret, rc = 0;
+ int rc = 0;
u8 bctl = 0;
u8 presence = 0;
struct pci_dev *dev, *temp;
@@ -88,9 +94,9 @@ int pciehp_unconfigure_device(struct slot *p_slot)
ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n",
__func__, pci_domain_nr(parent), parent->number);
- ret = pciehp_get_adapter_status(p_slot, &presence);
- if (ret)
- presence = 0;
+ pciehp_get_adapter_status(p_slot, &presence);
+
+ pci_lock_rescan_remove();
/*
* Stopping an SR-IOV PF device removes all the associated VFs,
@@ -126,5 +132,6 @@ int pciehp_unconfigure_device(struct slot *p_slot)
pci_dev_put(dev);
}
+ pci_unlock_rescan_remove();
return rc;
}
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index e9c044d15add..4fcdeedda31b 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -354,10 +354,15 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
{
struct pci_bus *bus;
struct slot *slot;
+ int ret = 0;
+
+ pci_lock_rescan_remove();
bus = pcibios_find_pci_bus(dn);
- if (!bus)
- return -EINVAL;
+ if (!bus) {
+ ret = -EINVAL;
+ goto out;
+ }
pr_debug("PCI: Removing PCI slot below EADS bridge %s\n",
bus->self ? pci_name(bus->self) : "<!PHB!>");
@@ -371,7 +376,8 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
printk(KERN_ERR
"%s: unable to remove hotplug slot %s\n",
__func__, drc_name);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
}
@@ -382,7 +388,8 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
if (pcibios_unmap_io_space(bus)) {
printk(KERN_ERR "%s: failed to unmap bus range\n",
__func__);
- return -ERANGE;
+ ret = -ERANGE;
+ goto out;
}
/* Remove the EADS bridge device itself */
@@ -390,7 +397,9 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self));
pci_stop_and_remove_bus_device(bus->self);
- return 0;
+ out:
+ pci_unlock_rescan_remove();
+ return ret;
}
/**
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index b7fc5c9255a5..4796c15fba94 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -398,7 +398,9 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
return retval;
if (state == PRESENT) {
+ pci_lock_rescan_remove();
pcibios_add_pci_devices(slot->bus);
+ pci_unlock_rescan_remove();
slot->state = CONFIGURED;
} else if (state == EMPTY) {
slot->state = EMPTY;
@@ -418,7 +420,9 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
if (slot->state == NOT_CONFIGURED)
return -EINVAL;
+ pci_lock_rescan_remove();
pcibios_remove_pci_devices(slot->bus);
+ pci_unlock_rescan_remove();
vm_unmap_aliases();
slot->state = NOT_CONFIGURED;
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 3c7eb5dd91c6..8d2ce22151eb 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -80,7 +80,9 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
goto out_deconfigure;
pci_scan_slot(slot->zdev->bus, ZPCI_DEVFN);
+ pci_lock_rescan_remove();
pci_bus_add_devices(slot->zdev->bus);
+ pci_unlock_rescan_remove();
return rc;
@@ -98,7 +100,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
return -EIO;
if (slot->zdev->pdev)
- pci_stop_and_remove_bus_device(slot->zdev->pdev);
+ pci_stop_and_remove_bus_device_locked(slot->zdev->pdev);
rc = zpci_disable_device(slot->zdev);
if (rc)
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 5b05a68cca6c..613043f7576f 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -459,12 +459,15 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
acpi_scan_lock_release();
}
+ pci_lock_rescan_remove();
+
/* Call the driver for the new device */
pci_bus_add_devices(slot->pci_bus);
/* Call the drivers for the new devices subordinate to PPB */
if (new_ppb)
pci_bus_add_devices(new_bus);
+ pci_unlock_rescan_remove();
mutex_unlock(&sn_hotplug_mutex);
if (rc == 0)
@@ -540,6 +543,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
acpi_scan_lock_release();
}
+ pci_lock_rescan_remove();
/* Free the SN resources assigned to the Linux device.*/
list_for_each_entry_safe(dev, temp, &slot->pci_bus->devices, bus_list) {
if (PCI_SLOT(dev->devfn) != slot->device_num + 1)
@@ -550,6 +554,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
pci_stop_and_remove_bus_device(dev);
pci_dev_put(dev);
}
+ pci_unlock_rescan_remove();
/* Remove the SSDT for the slot from the ACPI namespace */
if (SN_ACPI_BASE_SUPPORT() && ssdt_id) {
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index b0e83132542e..2bf69fe1926c 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -40,7 +40,9 @@ int __ref shpchp_configure_device(struct slot *p_slot)
struct controller *ctrl = p_slot->ctrl;
struct pci_dev *bridge = ctrl->pci_dev;
struct pci_bus *parent = bridge->subordinate;
- int num;
+ int num, ret = 0;
+
+ pci_lock_rescan_remove();
dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0));
if (dev) {
@@ -48,13 +50,15 @@ int __ref shpchp_configure_device(struct slot *p_slot)
"at %04x:%02x:%02x, cannot hot-add\n", pci_name(dev),
pci_domain_nr(parent), p_slot->bus, p_slot->device);
pci_dev_put(dev);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0));
if (num == 0) {
ctrl_err(ctrl, "No new device found\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
}
list_for_each_entry(dev, &parent->devices, bus_list) {
@@ -75,7 +79,9 @@ int __ref shpchp_configure_device(struct slot *p_slot)
pci_bus_add_devices(parent);
- return 0;
+ out:
+ pci_unlock_rescan_remove();
+ return ret;
}
int shpchp_unconfigure_device(struct slot *p_slot)
@@ -89,6 +95,8 @@ int shpchp_unconfigure_device(struct slot *p_slot)
ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n",
__func__, pci_domain_nr(parent), p_slot->bus, p_slot->device);
+ pci_lock_rescan_remove();
+
list_for_each_entry_safe(dev, temp, &parent->devices, bus_list) {
if (PCI_SLOT(dev->devfn) != p_slot->device)
continue;
@@ -108,6 +116,8 @@ int shpchp_unconfigure_device(struct slot *p_slot)
pci_stop_and_remove_bus_device(dev);
pci_dev_put(dev);
}
+
+ pci_unlock_rescan_remove();
return rc;
}
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
index 50ce68098298..6b2b7dddbbdb 100644
--- a/drivers/pci/ioapic.c
+++ b/drivers/pci/ioapic.c
@@ -20,7 +20,6 @@
#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <acpi/acpi_bus.h>
struct ioapic {
acpi_handle handle;
@@ -113,6 +112,10 @@ static struct pci_driver ioapic_driver = {
.remove = ioapic_remove,
};
-module_pci_driver(ioapic_driver);
+static int __init ioapic_init(void)
+{
+ return pci_register_driver(&ioapic_driver);
+}
+module_init(ioapic_init);
MODULE_LICENSE("GPL");
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 1fe2d6fb19d5..9dce7c5e2a77 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -84,6 +84,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
virtfn->dev.parent = dev->dev.parent;
virtfn->physfn = pci_dev_get(dev);
virtfn->is_virtfn = 1;
+ virtfn->multifunction = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = dev->resource + PCI_IOV_RESOURCES + i;
@@ -441,6 +442,7 @@ static int sriov_init(struct pci_dev *dev, int pos)
found:
pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
+ pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, 0);
pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
if (!offset || (total > 1 && !stride))
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 3fcd67a16677..7a0fec6ce571 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -116,7 +116,7 @@ void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
return default_teardown_msi_irqs(dev);
}
-void default_restore_msi_irqs(struct pci_dev *dev, int irq)
+static void default_restore_msi_irq(struct pci_dev *dev, int irq)
{
struct msi_desc *entry;
@@ -134,9 +134,9 @@ void default_restore_msi_irqs(struct pci_dev *dev, int irq)
write_msi_msg(irq, &entry->msg);
}
-void __weak arch_restore_msi_irqs(struct pci_dev *dev, int irq)
+void __weak arch_restore_msi_irqs(struct pci_dev *dev)
{
- return default_restore_msi_irqs(dev, irq);
+ return default_restore_msi_irqs(dev);
}
static void msi_set_enable(struct pci_dev *dev, int enable)
@@ -262,6 +262,15 @@ void unmask_msi_irq(struct irq_data *data)
msi_set_mask_bit(data, 0);
}
+void default_restore_msi_irqs(struct pci_dev *dev)
+{
+ struct msi_desc *entry;
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ default_restore_msi_irq(dev, entry->irq);
+ }
+}
+
void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
BUG_ON(entry->dev->current_state != PCI_D0);
@@ -363,6 +372,9 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
static void free_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *entry, *tmp;
+ struct attribute **msi_attrs;
+ struct device_attribute *dev_attr;
+ int count = 0;
list_for_each_entry(entry, &dev->msi_list, list) {
int i, nvec;
@@ -398,6 +410,22 @@ static void free_msi_irqs(struct pci_dev *dev)
list_del(&entry->list);
kfree(entry);
}
+
+ if (dev->msi_irq_groups) {
+ sysfs_remove_groups(&dev->dev.kobj, dev->msi_irq_groups);
+ msi_attrs = dev->msi_irq_groups[0]->attrs;
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ dev_attr = container_of(msi_attrs[count],
+ struct device_attribute, attr);
+ kfree(dev_attr->attr.name);
+ kfree(dev_attr);
+ ++count;
+ }
+ kfree(msi_attrs);
+ kfree(dev->msi_irq_groups[0]);
+ kfree(dev->msi_irq_groups);
+ dev->msi_irq_groups = NULL;
+ }
}
static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
@@ -430,7 +458,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
pci_intx_for_msi(dev, 0);
msi_set_enable(dev, 0);
- arch_restore_msi_irqs(dev, dev->irq);
+ arch_restore_msi_irqs(dev);
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
@@ -455,8 +483,8 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;
pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
+ arch_restore_msi_irqs(dev);
list_for_each_entry(entry, &dev->msi_list, list) {
- arch_restore_msi_irqs(dev, entry->irq);
msix_mask_irq(entry, entry->masked);
}
@@ -471,94 +499,95 @@ void pci_restore_msi_state(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_restore_msi_state);
-
-#define to_msi_attr(obj) container_of(obj, struct msi_attribute, attr)
-#define to_msi_desc(obj) container_of(obj, struct msi_desc, kobj)
-
-struct msi_attribute {
- struct attribute attr;
- ssize_t (*show)(struct msi_desc *entry, struct msi_attribute *attr,
- char *buf);
- ssize_t (*store)(struct msi_desc *entry, struct msi_attribute *attr,
- const char *buf, size_t count);
-};
-
-static ssize_t show_msi_mode(struct msi_desc *entry, struct msi_attribute *atr,
+static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%s\n", entry->msi_attrib.is_msix ? "msix" : "msi");
-}
-
-static ssize_t msi_irq_attr_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct msi_attribute *attribute = to_msi_attr(attr);
- struct msi_desc *entry = to_msi_desc(kobj);
-
- if (!attribute->show)
- return -EIO;
-
- return attribute->show(entry, attribute, buf);
-}
-
-static const struct sysfs_ops msi_irq_sysfs_ops = {
- .show = msi_irq_attr_show,
-};
-
-static struct msi_attribute mode_attribute =
- __ATTR(mode, S_IRUGO, show_msi_mode, NULL);
-
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct msi_desc *entry;
+ unsigned long irq;
+ int retval;
-static struct attribute *msi_irq_default_attrs[] = {
- &mode_attribute.attr,
- NULL
-};
+ retval = kstrtoul(attr->attr.name, 10, &irq);
+ if (retval)
+ return retval;
-static void msi_kobj_release(struct kobject *kobj)
-{
- struct msi_desc *entry = to_msi_desc(kobj);
-
- pci_dev_put(entry->dev);
+ list_for_each_entry(entry, &pdev->msi_list, list) {
+ if (entry->irq == irq) {
+ return sprintf(buf, "%s\n",
+ entry->msi_attrib.is_msix ? "msix" : "msi");
+ }
+ }
+ return -ENODEV;
}
-static struct kobj_type msi_irq_ktype = {
- .release = msi_kobj_release,
- .sysfs_ops = &msi_irq_sysfs_ops,
- .default_attrs = msi_irq_default_attrs,
-};
-
static int populate_msi_sysfs(struct pci_dev *pdev)
{
+ struct attribute **msi_attrs;
+ struct attribute *msi_attr;
+ struct device_attribute *msi_dev_attr;
+ struct attribute_group *msi_irq_group;
+ const struct attribute_group **msi_irq_groups;
struct msi_desc *entry;
- struct kobject *kobj;
- int ret;
+ int ret = -ENOMEM;
+ int num_msi = 0;
int count = 0;
- pdev->msi_kset = kset_create_and_add("msi_irqs", NULL, &pdev->dev.kobj);
- if (!pdev->msi_kset)
- return -ENOMEM;
+ /* Determine how many msi entries we have */
+ list_for_each_entry(entry, &pdev->msi_list, list) {
+ ++num_msi;
+ }
+ if (!num_msi)
+ return 0;
+ /* Dynamically create the MSI attributes for the PCI device */
+ msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL);
+ if (!msi_attrs)
+ return -ENOMEM;
list_for_each_entry(entry, &pdev->msi_list, list) {
- kobj = &entry->kobj;
- kobj->kset = pdev->msi_kset;
- pci_dev_get(pdev);
- ret = kobject_init_and_add(kobj, &msi_irq_ktype, NULL,
- "%u", entry->irq);
- if (ret)
- goto out_unroll;
-
- count++;
+ char *name = kmalloc(20, GFP_KERNEL);
+ msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
+ if (!msi_dev_attr)
+ goto error_attrs;
+ sprintf(name, "%d", entry->irq);
+ sysfs_attr_init(&msi_dev_attr->attr);
+ msi_dev_attr->attr.name = name;
+ msi_dev_attr->attr.mode = S_IRUGO;
+ msi_dev_attr->show = msi_mode_show;
+ msi_attrs[count] = &msi_dev_attr->attr;
+ ++count;
}
+ msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
+ if (!msi_irq_group)
+ goto error_attrs;
+ msi_irq_group->name = "msi_irqs";
+ msi_irq_group->attrs = msi_attrs;
+
+ msi_irq_groups = kzalloc(sizeof(void *) * 2, GFP_KERNEL);
+ if (!msi_irq_groups)
+ goto error_irq_group;
+ msi_irq_groups[0] = msi_irq_group;
+
+ ret = sysfs_create_groups(&pdev->dev.kobj, msi_irq_groups);
+ if (ret)
+ goto error_irq_groups;
+ pdev->msi_irq_groups = msi_irq_groups;
+
return 0;
-out_unroll:
- list_for_each_entry(entry, &pdev->msi_list, list) {
- if (!count)
- break;
- kobject_del(&entry->kobj);
- kobject_put(&entry->kobj);
- count--;
+error_irq_groups:
+ kfree(msi_irq_groups);
+error_irq_group:
+ kfree(msi_irq_group);
+error_attrs:
+ count = 0;
+ msi_attr = msi_attrs[count];
+ while (msi_attr) {
+ msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
+ kfree(msi_attr->name);
+ kfree(msi_dev_attr);
+ ++count;
+ msi_attr = msi_attrs[count];
}
return ret;
}
@@ -729,7 +758,7 @@ static int msix_capability_init(struct pci_dev *dev,
ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
if (ret)
- goto error;
+ goto out_avail;
/*
* Some devices require MSI-X to be enabled before we can touch the
@@ -742,10 +771,8 @@ static int msix_capability_init(struct pci_dev *dev,
msix_program_entries(dev, entries);
ret = populate_msi_sysfs(dev);
- if (ret) {
- ret = 0;
- goto error;
- }
+ if (ret)
+ goto out_free;
/* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
@@ -756,7 +783,7 @@ static int msix_capability_init(struct pci_dev *dev,
return 0;
-error:
+out_avail:
if (ret < 0) {
/*
* If we had some success, report the number of irqs
@@ -773,6 +800,7 @@ error:
ret = avail;
}
+out_free:
free_msi_irqs(dev);
return ret;
@@ -824,6 +852,31 @@ static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
}
/**
+ * pci_msi_vec_count - Return the number of MSI vectors a device can send
+ * @dev: device to report about
+ *
+ * This function returns the number of MSI vectors a device requested via
+ * Multiple Message Capable register. It returns a negative errno if the
+ * device is not capable sending MSI interrupts. Otherwise, the call succeeds
+ * and returns a power of two, up to a maximum of 2^5 (32), according to the
+ * MSI specification.
+ **/
+int pci_msi_vec_count(struct pci_dev *dev)
+{
+ int ret;
+ u16 msgctl;
+
+ if (!dev->msi_cap)
+ return -EINVAL;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
+ ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
+
+ return ret;
+}
+EXPORT_SYMBOL(pci_msi_vec_count);
+
+/**
* pci_enable_msi_block - configure device's MSI capability structure
* @dev: device to configure
* @nvec: number of interrupts to configure
@@ -836,16 +889,16 @@ static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
* updates the @dev's irq member to the lowest new interrupt number; the
* other interrupt numbers allocated to this device are consecutive.
*/
-int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
+int pci_enable_msi_block(struct pci_dev *dev, int nvec)
{
int status, maxvec;
- u16 msgctl;
- if (!dev->msi_cap || dev->current_state != PCI_D0)
+ if (dev->current_state != PCI_D0)
return -EINVAL;
- pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
- maxvec = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
+ maxvec = pci_msi_vec_count(dev);
+ if (maxvec < 0)
+ return maxvec;
if (nvec > maxvec)
return maxvec;
@@ -867,31 +920,6 @@ int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
}
EXPORT_SYMBOL(pci_enable_msi_block);
-int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec)
-{
- int ret, nvec;
- u16 msgctl;
-
- if (!dev->msi_cap || dev->current_state != PCI_D0)
- return -EINVAL;
-
- pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
- ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
-
- if (maxvec)
- *maxvec = ret;
-
- do {
- nvec = ret;
- ret = pci_enable_msi_block(dev, nvec);
- } while (ret > 0);
-
- if (ret < 0)
- return ret;
- return nvec;
-}
-EXPORT_SYMBOL(pci_enable_msi_block_auto);
-
void pci_msi_shutdown(struct pci_dev *dev)
{
struct msi_desc *desc;
@@ -925,25 +953,29 @@ void pci_disable_msi(struct pci_dev *dev)
pci_msi_shutdown(dev);
free_msi_irqs(dev);
- kset_unregister(dev->msi_kset);
- dev->msi_kset = NULL;
}
EXPORT_SYMBOL(pci_disable_msi);
/**
- * pci_msix_table_size - return the number of device's MSI-X table entries
+ * pci_msix_vec_count - return the number of device's MSI-X table entries
* @dev: pointer to the pci_dev data structure of MSI-X device function
- */
-int pci_msix_table_size(struct pci_dev *dev)
+
+ * This function returns the number of device's MSI-X table entries and
+ * therefore the number of MSI-X vectors device is capable of sending.
+ * It returns a negative errno if the device is not capable of sending MSI-X
+ * interrupts.
+ **/
+int pci_msix_vec_count(struct pci_dev *dev)
{
u16 control;
if (!dev->msix_cap)
- return 0;
+ return -EINVAL;
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
return msix_table_size(control);
}
+EXPORT_SYMBOL(pci_msix_vec_count);
/**
* pci_enable_msix - configure device's MSI-X capability structure
@@ -972,7 +1004,9 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
if (status)
return status;
- nr_entries = pci_msix_table_size(dev);
+ nr_entries = pci_msix_vec_count(dev);
+ if (nr_entries < 0)
+ return nr_entries;
if (nvec > nr_entries)
return nr_entries;
@@ -1023,8 +1057,6 @@ void pci_disable_msix(struct pci_dev *dev)
pci_msix_shutdown(dev);
free_msi_irqs(dev);
- kset_unregister(dev->msi_kset);
- dev->msi_kset = NULL;
}
EXPORT_SYMBOL(pci_disable_msix);
@@ -1079,3 +1111,77 @@ void pci_msi_init_pci_dev(struct pci_dev *dev)
if (dev->msix_cap)
msix_set_enable(dev, 0);
}
+
+/**
+ * pci_enable_msi_range - configure device's MSI capability structure
+ * @dev: device to configure
+ * @minvec: minimal number of interrupts to configure
+ * @maxvec: maximum number of interrupts to configure
+ *
+ * This function tries to allocate a maximum possible number of interrupts in a
+ * range between @minvec and @maxvec. It returns a negative errno if an error
+ * occurs. If it succeeds, it returns the actual number of interrupts allocated
+ * and updates the @dev's irq member to the lowest new interrupt number;
+ * the other interrupt numbers allocated to this device are consecutive.
+ **/
+int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
+{
+ int nvec = maxvec;
+ int rc;
+
+ if (maxvec < minvec)
+ return -ERANGE;
+
+ do {
+ rc = pci_enable_msi_block(dev, nvec);
+ if (rc < 0) {
+ return rc;
+ } else if (rc > 0) {
+ if (rc < minvec)
+ return -ENOSPC;
+ nvec = rc;
+ }
+ } while (rc);
+
+ return nvec;
+}
+EXPORT_SYMBOL(pci_enable_msi_range);
+
+/**
+ * pci_enable_msix_range - configure device's MSI-X capability structure
+ * @dev: pointer to the pci_dev data structure of MSI-X device function
+ * @entries: pointer to an array of MSI-X entries
+ * @minvec: minimum number of MSI-X irqs requested
+ * @maxvec: maximum number of MSI-X irqs requested
+ *
+ * Setup the MSI-X capability structure of device function with a maximum
+ * possible number of interrupts in the range between @minvec and @maxvec
+ * upon its software driver call to request for MSI-X mode enabled on its
+ * hardware device function. It returns a negative errno if an error occurs.
+ * If it succeeds, it returns the actual number of interrupts allocated and
+ * indicates the successful configuration of MSI-X capability structure
+ * with new allocated MSI-X interrupts.
+ **/
+int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
+ int minvec, int maxvec)
+{
+ int nvec = maxvec;
+ int rc;
+
+ if (maxvec < minvec)
+ return -ERANGE;
+
+ do {
+ rc = pci_enable_msix(dev, entries, nvec);
+ if (rc < 0) {
+ return rc;
+ } else if (rc > 0) {
+ if (rc < minvec)
+ return -ENOSPC;
+ nvec = rc;
+ }
+ } while (rc);
+
+ return nvec;
+}
+EXPORT_SYMBOL(pci_enable_msix_range);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 577074efbe62..f49abef88485 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -12,9 +12,6 @@
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/pci-aspm.h>
-#include <acpi/acpi.h>
-#include <acpi/acpi_bus.h>
-
#include <linux/pci-acpi.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
@@ -306,10 +303,10 @@ void acpi_pci_remove_bus(struct pci_bus *bus)
}
/* ACPI bus type */
-static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
+static struct acpi_device *acpi_pci_find_companion(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- bool is_bridge;
+ bool check_children;
u64 addr;
/*
@@ -317,54 +314,55 @@ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
* is set only after acpi_pci_find_device() has been called for the
* given device.
*/
- is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE
+ check_children = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE
|| pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
/* Please ref to ACPI spec for the syntax of _ADR */
addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
- *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge);
- if (!*handle)
- return -ENODEV;
- return 0;
+ return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
+ check_children);
}
static void pci_acpi_setup(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- acpi_handle handle = ACPI_HANDLE(dev);
- struct acpi_device *adev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (!adev)
+ return;
- if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid)
+ pci_acpi_add_pm_notifier(adev, pci_dev);
+ if (!adev->wakeup.flags.valid)
return;
device_set_wakeup_capable(dev, true);
acpi_pci_sleep_wake(pci_dev, false);
-
- pci_acpi_add_pm_notifier(adev, pci_dev);
if (adev->wakeup.flags.run_wake)
device_set_run_wake(dev, true);
}
static void pci_acpi_cleanup(struct device *dev)
{
- acpi_handle handle = ACPI_HANDLE(dev);
- struct acpi_device *adev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (!adev)
+ return;
- if (!acpi_bus_get_device(handle, &adev) && adev->wakeup.flags.valid) {
+ pci_acpi_remove_pm_notifier(adev);
+ if (adev->wakeup.flags.valid) {
device_set_wakeup_capable(dev, false);
device_set_run_wake(dev, false);
- pci_acpi_remove_pm_notifier(adev);
}
}
static bool pci_acpi_bus_match(struct device *dev)
{
- return dev->bus == &pci_bus_type;
+ return dev_is_pci(dev);
}
static struct acpi_bus_type acpi_pci_bus = {
.name = "PCI",
.match = pci_acpi_bus_match,
- .find_device = acpi_pci_find_device,
+ .find_companion = acpi_pci_find_companion,
.setup = pci_acpi_setup,
.cleanup = pci_acpi_cleanup,
};
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index d51f45aa669e..45113daaa778 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -29,26 +29,11 @@
#include <linux/nls.h>
#include <linux/acpi.h>
#include <linux/pci-acpi.h>
-#include <acpi/acpi_bus.h>
#include "pci.h"
#define DEVICE_LABEL_DSM 0x07
-#ifndef CONFIG_DMI
-
-static inline int
-pci_create_smbiosname_file(struct pci_dev *pdev)
-{
- return -1;
-}
-
-static inline void
-pci_remove_smbiosname_file(struct pci_dev *pdev)
-{
-}
-
-#else
-
+#ifdef CONFIG_DMI
enum smbios_attr_enum {
SMBIOS_ATTR_NONE = 0,
SMBIOS_ATTR_LABEL_SHOW,
@@ -156,38 +141,26 @@ pci_remove_smbiosname_file(struct pci_dev *pdev)
{
sysfs_remove_group(&pdev->dev.kobj, &smbios_attr_group);
}
-
-#endif
-
-#ifndef CONFIG_ACPI
-
-static inline int
-pci_create_acpi_index_label_files(struct pci_dev *pdev)
-{
- return -1;
-}
-
+#else
static inline int
-pci_remove_acpi_index_label_files(struct pci_dev *pdev)
+pci_create_smbiosname_file(struct pci_dev *pdev)
{
return -1;
}
-static inline bool
-device_has_dsm(struct device *dev)
+static inline void
+pci_remove_smbiosname_file(struct pci_dev *pdev)
{
- return false;
}
+#endif
-#else
-
+#ifdef CONFIG_ACPI
static const char device_label_dsm_uuid[] = {
0xD0, 0x37, 0xC9, 0xE5, 0x53, 0x35, 0x7A, 0x4D,
0x91, 0x17, 0xEA, 0x4D, 0x19, 0xC3, 0x43, 0x4D
};
enum acpi_attr_enum {
- ACPI_ATTR_NONE = 0,
ACPI_ATTR_LABEL_SHOW,
ACPI_ATTR_INDEX_SHOW,
};
@@ -195,84 +168,61 @@ enum acpi_attr_enum {
static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
{
int len;
- len = utf16s_to_utf8s((const wchar_t *)obj->
- package.elements[1].string.pointer,
- obj->package.elements[1].string.length,
+ len = utf16s_to_utf8s((const wchar_t *)obj->string.pointer,
+ obj->string.length,
UTF16_LITTLE_ENDIAN,
buf, PAGE_SIZE);
buf[len] = '\n';
}
static int
-dsm_get_label(acpi_handle handle, int func,
- struct acpi_buffer *output,
- char *buf, enum acpi_attr_enum attribute)
+dsm_get_label(struct device *dev, char *buf, enum acpi_attr_enum attr)
{
- struct acpi_object_list input;
- union acpi_object params[4];
- union acpi_object *obj;
- int len = 0;
-
- int err;
-
- input.count = 4;
- input.pointer = params;
- params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(device_label_dsm_uuid);
- params[0].buffer.pointer = (char *)device_label_dsm_uuid;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = 0x02;
- params[2].type = ACPI_TYPE_INTEGER;
- params[2].integer.value = func;
- params[3].type = ACPI_TYPE_PACKAGE;
- params[3].package.count = 0;
- params[3].package.elements = NULL;
-
- err = acpi_evaluate_object(handle, "_DSM", &input, output);
- if (err)
+ acpi_handle handle;
+ union acpi_object *obj, *tmp;
+ int len = -1;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
return -1;
- obj = (union acpi_object *)output->pointer;
-
- switch (obj->type) {
- case ACPI_TYPE_PACKAGE:
- if (obj->package.count != 2)
- break;
- len = obj->package.elements[0].integer.value;
- if (buf) {
- if (attribute == ACPI_ATTR_INDEX_SHOW)
- scnprintf(buf, PAGE_SIZE, "%llu\n",
- obj->package.elements[0].integer.value);
- else if (attribute == ACPI_ATTR_LABEL_SHOW)
- dsm_label_utf16s_to_utf8s(obj, buf);
- kfree(output->pointer);
- return strlen(buf);
- }
- kfree(output->pointer);
- return len;
- break;
- default:
- kfree(output->pointer);
+ obj = acpi_evaluate_dsm(handle, device_label_dsm_uuid, 0x2,
+ DEVICE_LABEL_DSM, NULL);
+ if (!obj)
+ return -1;
+
+ tmp = obj->package.elements;
+ if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 2 &&
+ tmp[0].type == ACPI_TYPE_INTEGER &&
+ tmp[1].type == ACPI_TYPE_STRING) {
+ /*
+ * The second string element is optional even when
+ * this _DSM is implemented; when not implemented,
+ * this entry must return a null string.
+ */
+ if (attr == ACPI_ATTR_INDEX_SHOW)
+ scnprintf(buf, PAGE_SIZE, "%llu\n", tmp->integer.value);
+ else if (attr == ACPI_ATTR_LABEL_SHOW)
+ dsm_label_utf16s_to_utf8s(tmp + 1, buf);
+ len = strlen(buf) > 0 ? strlen(buf) : -1;
}
- return -1;
+
+ ACPI_FREE(obj);
+
+ return len;
}
static bool
device_has_dsm(struct device *dev)
{
acpi_handle handle;
- struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
handle = ACPI_HANDLE(dev);
-
if (!handle)
- return FALSE;
+ return false;
- if (dsm_get_label(handle, DEVICE_LABEL_DSM, &output, NULL,
- ACPI_ATTR_NONE) > 0)
- return TRUE;
-
- return FALSE;
+ return !!acpi_check_dsm(handle, device_label_dsm_uuid, 0x2,
+ 1 << DEVICE_LABEL_DSM);
}
static umode_t
@@ -291,44 +241,13 @@ acpi_index_string_exist(struct kobject *kobj, struct attribute *attr, int n)
static ssize_t
acpilabel_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_handle handle;
- int length;
-
- handle = ACPI_HANDLE(dev);
-
- if (!handle)
- return -1;
-
- length = dsm_get_label(handle, DEVICE_LABEL_DSM,
- &output, buf, ACPI_ATTR_LABEL_SHOW);
-
- if (length < 1)
- return -1;
-
- return length;
+ return dsm_get_label(dev, buf, ACPI_ATTR_LABEL_SHOW);
}
static ssize_t
acpiindex_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_handle handle;
- int length;
-
- handle = ACPI_HANDLE(dev);
-
- if (!handle)
- return -1;
-
- length = dsm_get_label(handle, DEVICE_LABEL_DSM,
- &output, buf, ACPI_ATTR_INDEX_SHOW);
-
- if (length < 0)
- return -1;
-
- return length;
-
+ return dsm_get_label(dev, buf, ACPI_ATTR_INDEX_SHOW);
}
static struct device_attribute acpi_attr_label = {
@@ -364,6 +283,24 @@ pci_remove_acpi_index_label_files(struct pci_dev *pdev)
sysfs_remove_group(&pdev->dev.kobj, &acpi_attr_group);
return 0;
}
+#else
+static inline int
+pci_create_acpi_index_label_files(struct pci_dev *pdev)
+{
+ return -1;
+}
+
+static inline int
+pci_remove_acpi_index_label_files(struct pci_dev *pdev)
+{
+ return -1;
+}
+
+static inline bool
+device_has_dsm(struct device *dev)
+{
+ return false;
+}
#endif
void pci_create_firmware_label_files(struct pci_dev *pdev)
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index c91e6c18debc..276ef9c18802 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -297,7 +297,6 @@ msi_bus_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(msi_bus);
-static DEFINE_MUTEX(pci_remove_rescan_mutex);
static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf,
size_t count)
{
@@ -308,10 +307,10 @@ static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf,
return -EINVAL;
if (val) {
- mutex_lock(&pci_remove_rescan_mutex);
+ pci_lock_rescan_remove();
while ((b = pci_find_next_bus(b)) != NULL)
pci_rescan_bus(b);
- mutex_unlock(&pci_remove_rescan_mutex);
+ pci_unlock_rescan_remove();
}
return count;
}
@@ -342,9 +341,9 @@ dev_rescan_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
if (val) {
- mutex_lock(&pci_remove_rescan_mutex);
+ pci_lock_rescan_remove();
pci_rescan_bus(pdev->bus);
- mutex_unlock(&pci_remove_rescan_mutex);
+ pci_unlock_rescan_remove();
}
return count;
}
@@ -354,11 +353,7 @@ static struct device_attribute dev_rescan_attr = __ATTR(rescan,
static void remove_callback(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
-
- mutex_lock(&pci_remove_rescan_mutex);
- pci_stop_and_remove_bus_device(pdev);
- mutex_unlock(&pci_remove_rescan_mutex);
+ pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
}
static ssize_t
@@ -395,12 +390,12 @@ dev_bus_rescan_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
if (val) {
- mutex_lock(&pci_remove_rescan_mutex);
+ pci_lock_rescan_remove();
if (!pci_is_root_bus(bus) && list_empty(&bus->devices))
pci_rescan_bus_bridge_resize(bus->self);
else
pci_rescan_bus(bus);
- mutex_unlock(&pci_remove_rescan_mutex);
+ pci_unlock_rescan_remove();
}
return count;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 07369f32e8bb..1febe90831b4 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -431,6 +431,32 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
}
/**
+ * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
+ * @dev: the PCI device to operate on
+ * @pos: config space offset of status word
+ * @mask: mask of bit(s) to care about in status word
+ *
+ * Return 1 when mask bit(s) in status word clear, 0 otherwise.
+ */
+int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
+{
+ int i;
+
+ /* Wait for Transaction Pending bit clean */
+ for (i = 0; i < 4; i++) {
+ u16 status;
+ if (i)
+ msleep((1 << (i - 1)) * 100);
+
+ pci_read_config_word(dev, pos, &status);
+ if (!(status & mask))
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
* pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
* @dev: PCI device to have its BARs restored
*
@@ -657,6 +683,28 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
}
/**
+ * pci_wakeup - Wake up a PCI device
+ * @pci_dev: Device to handle.
+ * @ign: ignored parameter
+ */
+static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
+{
+ pci_wakeup_event(pci_dev);
+ pm_request_resume(&pci_dev->dev);
+ return 0;
+}
+
+/**
+ * pci_wakeup_bus - Walk given bus and wake up devices on it
+ * @bus: Top bus of the subtree to walk.
+ */
+static void pci_wakeup_bus(struct pci_bus *bus)
+{
+ if (bus)
+ pci_walk_bus(bus, pci_wakeup, NULL);
+}
+
+/**
* __pci_start_power_transition - Start power transition of a PCI device
* @dev: PCI device to handle.
* @state: State to put the device into.
@@ -835,18 +883,28 @@ EXPORT_SYMBOL(pci_choose_state);
#define PCI_EXP_SAVE_REGS 7
-static struct pci_cap_saved_state *pci_find_saved_cap(
- struct pci_dev *pci_dev, char cap)
+static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
+ u16 cap, bool extended)
{
struct pci_cap_saved_state *tmp;
hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
- if (tmp->cap.cap_nr == cap)
+ if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
return tmp;
}
return NULL;
}
+struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
+{
+ return _pci_find_saved_cap(dev, cap, false);
+}
+
+struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
+{
+ return _pci_find_saved_cap(dev, cap, true);
+}
+
static int pci_save_pcie_state(struct pci_dev *dev)
{
int i = 0;
@@ -948,6 +1006,8 @@ pci_save_state(struct pci_dev *dev)
return i;
if ((i = pci_save_pcix_state(dev)) != 0)
return i;
+ if ((i = pci_save_vc_state(dev)) != 0)
+ return i;
return 0;
}
@@ -1010,6 +1070,7 @@ void pci_restore_state(struct pci_dev *dev)
/* PCI Express register must be restored first */
pci_restore_pcie_state(dev);
pci_restore_ats_state(dev);
+ pci_restore_vc_state(dev);
pci_restore_config_space(dev);
@@ -1071,7 +1132,8 @@ EXPORT_SYMBOL_GPL(pci_store_saved_state);
* @dev: PCI device that we're dealing with
* @state: Saved state returned from pci_store_saved_state()
*/
-int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
+static int pci_load_saved_state(struct pci_dev *dev,
+ struct pci_saved_state *state)
{
struct pci_cap_saved_data *cap;
@@ -1087,7 +1149,7 @@ int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
while (cap->size) {
struct pci_cap_saved_state *tmp;
- tmp = pci_find_saved_cap(dev, cap->cap_nr);
+ tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
if (!tmp || tmp->cap.size != cap->size)
return -EINVAL;
@@ -1099,7 +1161,6 @@ int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
dev->state_saved = true;
return 0;
}
-EXPORT_SYMBOL_GPL(pci_load_saved_state);
/**
* pci_load_and_free_saved_state - Reload the save state pointed to by state,
@@ -1531,27 +1592,6 @@ void pci_pme_wakeup_bus(struct pci_bus *bus)
pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
}
-/**
- * pci_wakeup - Wake up a PCI device
- * @pci_dev: Device to handle.
- * @ign: ignored parameter
- */
-static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
-{
- pci_wakeup_event(pci_dev);
- pm_request_resume(&pci_dev->dev);
- return 0;
-}
-
-/**
- * pci_wakeup_bus - Walk given bus and wake up devices on it
- * @bus: Top bus of the subtree to walk.
- */
-void pci_wakeup_bus(struct pci_bus *bus)
-{
- if (bus)
- pci_walk_bus(bus, pci_wakeup, NULL);
-}
/**
* pci_pme_capable - check the capability of PCI device to generate PME#
@@ -1765,7 +1805,7 @@ int pci_wake_from_d3(struct pci_dev *dev, bool enable)
* If the platform can't manage @dev, return the deepest state from which it
* can generate wake events, based on any available PME info.
*/
-pci_power_t pci_target_state(struct pci_dev *dev)
+static pci_power_t pci_target_state(struct pci_dev *dev)
{
pci_power_t target_state = PCI_D3hot;
@@ -2021,18 +2061,24 @@ static void pci_add_saved_cap(struct pci_dev *pci_dev,
}
/**
- * pci_add_cap_save_buffer - allocate buffer for saving given capability registers
+ * _pci_add_cap_save_buffer - allocate buffer for saving given
+ * capability registers
* @dev: the PCI device
* @cap: the capability to allocate the buffer for
+ * @extended: Standard or Extended capability ID
* @size: requested size of the buffer
*/
-static int pci_add_cap_save_buffer(
- struct pci_dev *dev, char cap, unsigned int size)
+static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
+ bool extended, unsigned int size)
{
int pos;
struct pci_cap_saved_state *save_state;
- pos = pci_find_capability(dev, cap);
+ if (extended)
+ pos = pci_find_ext_capability(dev, cap);
+ else
+ pos = pci_find_capability(dev, cap);
+
if (pos <= 0)
return 0;
@@ -2041,12 +2087,23 @@ static int pci_add_cap_save_buffer(
return -ENOMEM;
save_state->cap.cap_nr = cap;
+ save_state->cap.cap_extended = extended;
save_state->cap.size = size;
pci_add_saved_cap(dev, save_state);
return 0;
}
+int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
+{
+ return _pci_add_cap_save_buffer(dev, cap, false, size);
+}
+
+int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
+{
+ return _pci_add_cap_save_buffer(dev, cap, true, size);
+}
+
/**
* pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
* @dev: the PCI device
@@ -2065,6 +2122,8 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
if (error)
dev_err(&dev->dev,
"unable to preallocate PCI-X save buffer\n");
+
+ pci_allocate_vc_save_buffers(dev);
}
void pci_free_cap_save_buffers(struct pci_dev *dev)
@@ -2110,242 +2169,6 @@ void pci_configure_ari(struct pci_dev *dev)
}
}
-/**
- * pci_enable_ido - enable ID-based Ordering on a device
- * @dev: the PCI device
- * @type: which types of IDO to enable
- *
- * Enable ID-based ordering on @dev. @type can contain the bits
- * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
- * which types of transactions are allowed to be re-ordered.
- */
-void pci_enable_ido(struct pci_dev *dev, unsigned long type)
-{
- u16 ctrl = 0;
-
- if (type & PCI_EXP_IDO_REQUEST)
- ctrl |= PCI_EXP_DEVCTL2_IDO_REQ_EN;
- if (type & PCI_EXP_IDO_COMPLETION)
- ctrl |= PCI_EXP_DEVCTL2_IDO_CMP_EN;
- if (ctrl)
- pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, ctrl);
-}
-EXPORT_SYMBOL(pci_enable_ido);
-
-/**
- * pci_disable_ido - disable ID-based ordering on a device
- * @dev: the PCI device
- * @type: which types of IDO to disable
- */
-void pci_disable_ido(struct pci_dev *dev, unsigned long type)
-{
- u16 ctrl = 0;
-
- if (type & PCI_EXP_IDO_REQUEST)
- ctrl |= PCI_EXP_DEVCTL2_IDO_REQ_EN;
- if (type & PCI_EXP_IDO_COMPLETION)
- ctrl |= PCI_EXP_DEVCTL2_IDO_CMP_EN;
- if (ctrl)
- pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, ctrl);
-}
-EXPORT_SYMBOL(pci_disable_ido);
-
-/**
- * pci_enable_obff - enable optimized buffer flush/fill
- * @dev: PCI device
- * @type: type of signaling to use
- *
- * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
- * signaling if possible, falling back to message signaling only if
- * WAKE# isn't supported. @type should indicate whether the PCIe link
- * be brought out of L0s or L1 to send the message. It should be either
- * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
- *
- * If your device can benefit from receiving all messages, even at the
- * power cost of bringing the link back up from a low power state, use
- * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
- * preferred type).
- *
- * RETURNS:
- * Zero on success, appropriate error number on failure.
- */
-int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
-{
- u32 cap;
- u16 ctrl;
- int ret;
-
- pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
- if (!(cap & PCI_EXP_DEVCAP2_OBFF_MASK))
- return -ENOTSUPP; /* no OBFF support at all */
-
- /* Make sure the topology supports OBFF as well */
- if (dev->bus->self) {
- ret = pci_enable_obff(dev->bus->self, type);
- if (ret)
- return ret;
- }
-
- pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctrl);
- if (cap & PCI_EXP_DEVCAP2_OBFF_WAKE)
- ctrl |= PCI_EXP_DEVCTL2_OBFF_WAKE_EN;
- else {
- switch (type) {
- case PCI_EXP_OBFF_SIGNAL_L0:
- if (!(ctrl & PCI_EXP_DEVCTL2_OBFF_WAKE_EN))
- ctrl |= PCI_EXP_DEVCTL2_OBFF_MSGA_EN;
- break;
- case PCI_EXP_OBFF_SIGNAL_ALWAYS:
- ctrl &= ~PCI_EXP_DEVCTL2_OBFF_WAKE_EN;
- ctrl |= PCI_EXP_DEVCTL2_OBFF_MSGB_EN;
- break;
- default:
- WARN(1, "bad OBFF signal type\n");
- return -ENOTSUPP;
- }
- }
- pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, ctrl);
-
- return 0;
-}
-EXPORT_SYMBOL(pci_enable_obff);
-
-/**
- * pci_disable_obff - disable optimized buffer flush/fill
- * @dev: PCI device
- *
- * Disable OBFF on @dev.
- */
-void pci_disable_obff(struct pci_dev *dev)
-{
- pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2,
- PCI_EXP_DEVCTL2_OBFF_WAKE_EN);
-}
-EXPORT_SYMBOL(pci_disable_obff);
-
-/**
- * pci_ltr_supported - check whether a device supports LTR
- * @dev: PCI device
- *
- * RETURNS:
- * True if @dev supports latency tolerance reporting, false otherwise.
- */
-static bool pci_ltr_supported(struct pci_dev *dev)
-{
- u32 cap;
-
- pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
-
- return cap & PCI_EXP_DEVCAP2_LTR;
-}
-
-/**
- * pci_enable_ltr - enable latency tolerance reporting
- * @dev: PCI device
- *
- * Enable LTR on @dev if possible, which means enabling it first on
- * upstream ports.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int pci_enable_ltr(struct pci_dev *dev)
-{
- int ret;
-
- /* Only primary function can enable/disable LTR */
- if (PCI_FUNC(dev->devfn) != 0)
- return -EINVAL;
-
- if (!pci_ltr_supported(dev))
- return -ENOTSUPP;
-
- /* Enable upstream ports first */
- if (dev->bus->self) {
- ret = pci_enable_ltr(dev->bus->self);
- if (ret)
- return ret;
- }
-
- return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
- PCI_EXP_DEVCTL2_LTR_EN);
-}
-EXPORT_SYMBOL(pci_enable_ltr);
-
-/**
- * pci_disable_ltr - disable latency tolerance reporting
- * @dev: PCI device
- */
-void pci_disable_ltr(struct pci_dev *dev)
-{
- /* Only primary function can enable/disable LTR */
- if (PCI_FUNC(dev->devfn) != 0)
- return;
-
- if (!pci_ltr_supported(dev))
- return;
-
- pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2,
- PCI_EXP_DEVCTL2_LTR_EN);
-}
-EXPORT_SYMBOL(pci_disable_ltr);
-
-static int __pci_ltr_scale(int *val)
-{
- int scale = 0;
-
- while (*val > 1023) {
- *val = (*val + 31) / 32;
- scale++;
- }
- return scale;
-}
-
-/**
- * pci_set_ltr - set LTR latency values
- * @dev: PCI device
- * @snoop_lat_ns: snoop latency in nanoseconds
- * @nosnoop_lat_ns: nosnoop latency in nanoseconds
- *
- * Figure out the scale and set the LTR values accordingly.
- */
-int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
-{
- int pos, ret, snoop_scale, nosnoop_scale;
- u16 val;
-
- if (!pci_ltr_supported(dev))
- return -ENOTSUPP;
-
- snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
- nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
-
- if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
- nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
- return -EINVAL;
-
- if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
- (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
- return -EINVAL;
-
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
- if (!pos)
- return -ENOTSUPP;
-
- val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
- ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
- if (ret != 4)
- return -EIO;
-
- val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
- ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
- if (ret != 4)
- return -EIO;
-
- return 0;
-}
-EXPORT_SYMBOL(pci_set_ltr);
-
static int pci_acs_enable;
/**
@@ -3138,7 +2961,7 @@ bool pci_check_and_mask_intx(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
/**
- * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
+ * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
* @dev: the PCI device to operate on
*
* Check if the device dev has its INTx line asserted, unmask it if not
@@ -3204,20 +3027,10 @@ EXPORT_SYMBOL(pci_set_dma_seg_boundary);
*/
int pci_wait_for_pending_transaction(struct pci_dev *dev)
{
- int i;
- u16 status;
-
- /* Wait for Transaction Pending bit clean */
- for (i = 0; i < 4; i++) {
- if (i)
- msleep((1 << (i - 1)) * 100);
-
- pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- return 1;
- }
+ if (!pci_is_pcie(dev))
+ return 1;
- return 0;
+ return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND);
}
EXPORT_SYMBOL(pci_wait_for_pending_transaction);
@@ -3244,10 +3057,8 @@ static int pcie_flr(struct pci_dev *dev, int probe)
static int pci_af_flr(struct pci_dev *dev, int probe)
{
- int i;
int pos;
u8 cap;
- u8 status;
pos = pci_find_capability(dev, PCI_CAP_ID_AF);
if (!pos)
@@ -3261,14 +3072,8 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
return 0;
/* Wait for Transaction Pending bit clean */
- for (i = 0; i < 4; i++) {
- if (i)
- msleep((1 << (i - 1)) * 100);
-
- pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
- if (!(status & PCI_AF_STATUS_TP))
- goto clear;
- }
+ if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP))
+ goto clear;
dev_err(&dev->dev, "transaction is not cleared; "
"proceeding with reset anyway\n");
@@ -3445,6 +3250,18 @@ static void pci_dev_lock(struct pci_dev *dev)
device_lock(&dev->dev);
}
+/* Return 1 on successful lock, 0 on contention */
+static int pci_dev_trylock(struct pci_dev *dev)
+{
+ if (pci_cfg_access_trylock(dev)) {
+ if (device_trylock(&dev->dev))
+ return 1;
+ pci_cfg_access_unlock(dev);
+ }
+
+ return 0;
+}
+
static void pci_dev_unlock(struct pci_dev *dev)
{
device_unlock(&dev->dev);
@@ -3588,6 +3405,34 @@ int pci_reset_function(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_reset_function);
+/**
+ * pci_try_reset_function - quiesce and reset a PCI device function
+ * @dev: PCI device to reset
+ *
+ * Same as above, except return -EAGAIN if unable to lock device.
+ */
+int pci_try_reset_function(struct pci_dev *dev)
+{
+ int rc;
+
+ rc = pci_dev_reset(dev, 1);
+ if (rc)
+ return rc;
+
+ pci_dev_save_and_disable(dev);
+
+ if (pci_dev_trylock(dev)) {
+ rc = __pci_dev_reset(dev, 0);
+ pci_dev_unlock(dev);
+ } else
+ rc = -EAGAIN;
+
+ pci_dev_restore(dev);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_try_reset_function);
+
/* Lock devices from the top of the tree down */
static void pci_bus_lock(struct pci_bus *bus)
{
@@ -3612,6 +3457,32 @@ static void pci_bus_unlock(struct pci_bus *bus)
}
}
+/* Return 1 on successful lock, 0 on contention */
+static int pci_bus_trylock(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (!pci_dev_trylock(dev))
+ goto unlock;
+ if (dev->subordinate) {
+ if (!pci_bus_trylock(dev->subordinate)) {
+ pci_dev_unlock(dev);
+ goto unlock;
+ }
+ }
+ }
+ return 1;
+
+unlock:
+ list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+ pci_dev_unlock(dev);
+ }
+ return 0;
+}
+
/* Lock devices from the top of the tree down */
static void pci_slot_lock(struct pci_slot *slot)
{
@@ -3640,6 +3511,37 @@ static void pci_slot_unlock(struct pci_slot *slot)
}
}
+/* Return 1 on successful lock, 0 on contention */
+static int pci_slot_trylock(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ if (!pci_dev_trylock(dev))
+ goto unlock;
+ if (dev->subordinate) {
+ if (!pci_bus_trylock(dev->subordinate)) {
+ pci_dev_unlock(dev);
+ goto unlock;
+ }
+ }
+ }
+ return 1;
+
+unlock:
+ list_for_each_entry_continue_reverse(dev,
+ &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+ pci_dev_unlock(dev);
+ }
+ return 0;
+}
+
/* Save and disable devices from the top of the tree down */
static void pci_bus_save_and_disable(struct pci_bus *bus)
{
@@ -3763,6 +3665,35 @@ int pci_reset_slot(struct pci_slot *slot)
}
EXPORT_SYMBOL_GPL(pci_reset_slot);
+/**
+ * pci_try_reset_slot - Try to reset a PCI slot
+ * @slot: PCI slot to reset
+ *
+ * Same as above except return -EAGAIN if the slot cannot be locked
+ */
+int pci_try_reset_slot(struct pci_slot *slot)
+{
+ int rc;
+
+ rc = pci_slot_reset(slot, 1);
+ if (rc)
+ return rc;
+
+ pci_slot_save_and_disable(slot);
+
+ if (pci_slot_trylock(slot)) {
+ might_sleep();
+ rc = pci_reset_hotplug_slot(slot->hotplug, 0);
+ pci_slot_unlock(slot);
+ } else
+ rc = -EAGAIN;
+
+ pci_slot_restore(slot);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_try_reset_slot);
+
static int pci_bus_reset(struct pci_bus *bus, int probe)
{
if (!bus->self)
@@ -3822,6 +3753,35 @@ int pci_reset_bus(struct pci_bus *bus)
EXPORT_SYMBOL_GPL(pci_reset_bus);
/**
+ * pci_try_reset_bus - Try to reset a PCI bus
+ * @bus: top level PCI bus to reset
+ *
+ * Same as above except return -EAGAIN if the bus cannot be locked
+ */
+int pci_try_reset_bus(struct pci_bus *bus)
+{
+ int rc;
+
+ rc = pci_bus_reset(bus, 1);
+ if (rc)
+ return rc;
+
+ pci_bus_save_and_disable(bus);
+
+ if (pci_bus_trylock(bus)) {
+ might_sleep();
+ pci_reset_bridge_secondary_bus(bus->self);
+ pci_bus_unlock(bus);
+ } else
+ rc = -EAGAIN;
+
+ pci_bus_restore(bus);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_try_reset_bus);
+
+/**
* pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
* @dev: PCI device to query
*
@@ -4450,7 +4410,6 @@ EXPORT_SYMBOL(pci_restore_state);
EXPORT_SYMBOL(pci_pme_capable);
EXPORT_SYMBOL(pci_pme_active);
EXPORT_SYMBOL(pci_wake_from_d3);
-EXPORT_SYMBOL(pci_target_state);
EXPORT_SYMBOL(pci_prepare_to_sleep);
EXPORT_SYMBOL(pci_back_from_sleep);
EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 9c91ecc1301b..4df38df224f4 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -6,7 +6,6 @@
#define PCI_CFG_SPACE_SIZE 256
#define PCI_CFG_SPACE_EXP_SIZE 4096
-extern const unsigned char pcix_bus_speed[];
extern const unsigned char pcie_link_speed[];
/* Functions internal to the PCI core code */
@@ -68,7 +67,6 @@ void pci_power_up(struct pci_dev *dev);
void pci_disable_enabled_device(struct pci_dev *dev);
int pci_finish_runtime_suspend(struct pci_dev *dev);
int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
-void pci_wakeup_bus(struct pci_bus *bus);
void pci_config_pm_runtime_get(struct pci_dev *dev);
void pci_config_pm_runtime_put(struct pci_dev *dev);
void pci_pm_init(struct pci_dev *dev);
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index cf611ab2193a..01906576ab91 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -23,10 +23,10 @@
static inline int hest_match_pci(struct acpi_hest_aer_common *p,
struct pci_dev *pci)
{
- return (0 == pci_domain_nr(pci->bus) &&
- p->bus == pci->bus->number &&
- p->device == PCI_SLOT(pci->devfn) &&
- p->function == PCI_FUNC(pci->devfn));
+ return ACPI_HEST_SEGMENT(p->bus) == pci_domain_nr(pci->bus) &&
+ ACPI_HEST_BUS(p->bus) == pci->bus->number &&
+ p->device == PCI_SLOT(pci->devfn) &&
+ p->function == PCI_FUNC(pci->devfn);
}
static inline bool hest_match_type(struct acpi_hest_header *hest_hdr,
@@ -50,14 +50,37 @@ struct aer_hest_parse_info {
int firmware_first;
};
+static int hest_source_is_pcie_aer(struct acpi_hest_header *hest_hdr)
+{
+ if (hest_hdr->type == ACPI_HEST_TYPE_AER_ROOT_PORT ||
+ hest_hdr->type == ACPI_HEST_TYPE_AER_ENDPOINT ||
+ hest_hdr->type == ACPI_HEST_TYPE_AER_BRIDGE)
+ return 1;
+ return 0;
+}
+
static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
{
struct aer_hest_parse_info *info = data;
struct acpi_hest_aer_common *p;
int ff;
+ if (!hest_source_is_pcie_aer(hest_hdr))
+ return 0;
+
p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
+
+ /*
+ * If no specific device is supplied, determine whether
+ * FIRMWARE_FIRST is set for *any* PCIe device.
+ */
+ if (!info->pci_dev) {
+ info->firmware_first |= ff;
+ return 0;
+ }
+
+ /* Otherwise, check the specific device */
if (p->flags & ACPI_HEST_GLOBAL) {
if (hest_match_type(hest_hdr, info->pci_dev))
info->firmware_first = ff;
@@ -97,33 +120,20 @@ int pcie_aer_get_firmware_first(struct pci_dev *dev)
static bool aer_firmware_first;
-static int aer_hest_parse_aff(struct acpi_hest_header *hest_hdr, void *data)
-{
- struct acpi_hest_aer_common *p;
-
- if (aer_firmware_first)
- return 0;
-
- switch (hest_hdr->type) {
- case ACPI_HEST_TYPE_AER_ROOT_PORT:
- case ACPI_HEST_TYPE_AER_ENDPOINT:
- case ACPI_HEST_TYPE_AER_BRIDGE:
- p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
- aer_firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
- default:
- return 0;
- }
-}
-
/**
* aer_acpi_firmware_first - Check if APEI should control AER.
*/
bool aer_acpi_firmware_first(void)
{
static bool parsed = false;
+ struct aer_hest_parse_info info = {
+ .pci_dev = NULL, /* Check all PCIe devices */
+ .firmware_first = 0,
+ };
if (!parsed) {
- apei_hest_parse(aer_hest_parse_aff, NULL);
+ apei_hest_parse(aer_hest_parse, &info);
+ aer_firmware_first = info.firmware_first;
parsed = true;
}
return aer_firmware_first;
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 2c7c9f5f592c..34ff7026440c 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -124,6 +124,21 @@ static const char *aer_agent_string[] = {
"Transmitter ID"
};
+static void __print_tlp_header(struct pci_dev *dev,
+ struct aer_header_log_regs *t)
+{
+ unsigned char *tlp = (unsigned char *)&t;
+
+ dev_err(&dev->dev, " TLP Header:"
+ " %02x%02x%02x%02x %02x%02x%02x%02x"
+ " %02x%02x%02x%02x %02x%02x%02x%02x\n",
+ *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
+ *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
+ *(tlp + 11), *(tlp + 10), *(tlp + 9),
+ *(tlp + 8), *(tlp + 15), *(tlp + 14),
+ *(tlp + 13), *(tlp + 12));
+}
+
static void __aer_print_error(struct pci_dev *dev,
struct aer_err_info *info)
{
@@ -153,48 +168,39 @@ static void __aer_print_error(struct pci_dev *dev,
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
{
+ int layer, agent;
int id = ((dev->bus->number << 8) | dev->devfn);
- if (info->status == 0) {
+ if (!info->status) {
dev_err(&dev->dev,
"PCIe Bus Error: severity=%s, type=Unaccessible, "
"id=%04x(Unregistered Agent ID)\n",
aer_error_severity_string[info->severity], id);
- } else {
- int layer, agent;
+ goto out;
+ }
- layer = AER_GET_LAYER_ERROR(info->severity, info->status);
- agent = AER_GET_AGENT(info->severity, info->status);
+ layer = AER_GET_LAYER_ERROR(info->severity, info->status);
+ agent = AER_GET_AGENT(info->severity, info->status);
- dev_err(&dev->dev,
- "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
- aer_error_severity_string[info->severity],
- aer_error_layer[layer], id, aer_agent_string[agent]);
+ dev_err(&dev->dev,
+ "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
+ aer_error_severity_string[info->severity],
+ aer_error_layer[layer], id, aer_agent_string[agent]);
- dev_err(&dev->dev,
- " device [%04x:%04x] error status/mask=%08x/%08x\n",
- dev->vendor, dev->device,
- info->status, info->mask);
-
- __aer_print_error(dev, info);
-
- if (info->tlp_header_valid) {
- unsigned char *tlp = (unsigned char *) &info->tlp;
- dev_err(&dev->dev, " TLP Header:"
- " %02x%02x%02x%02x %02x%02x%02x%02x"
- " %02x%02x%02x%02x %02x%02x%02x%02x\n",
- *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
- *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
- *(tlp + 11), *(tlp + 10), *(tlp + 9),
- *(tlp + 8), *(tlp + 15), *(tlp + 14),
- *(tlp + 13), *(tlp + 12));
- }
- }
+ dev_err(&dev->dev,
+ " device [%04x:%04x] error status/mask=%08x/%08x\n",
+ dev->vendor, dev->device,
+ info->status, info->mask);
+
+ __aer_print_error(dev, info);
+ if (info->tlp_header_valid)
+ __print_tlp_header(dev, &info->tlp);
+
+out:
if (info->id && info->error_dev_num > 1 && info->id == id)
- dev_err(&dev->dev,
- " Error of this Agent(%04x) is reported first\n",
- id);
+ dev_err(&dev->dev, " Error of this Agent(%04x) is reported first\n", id);
+
trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
info->severity);
}
@@ -228,6 +234,7 @@ void cper_print_aer(struct pci_dev *dev, int cper_severity,
const char **status_strs;
aer_severity = cper_severity_to_aer(cper_severity);
+
if (aer_severity == AER_CORRECTABLE) {
status = aer->cor_status;
mask = aer->cor_mask;
@@ -240,28 +247,22 @@ void cper_print_aer(struct pci_dev *dev, int cper_severity,
status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string);
tlp_header_valid = status & AER_LOG_TLP_MASKS;
}
+
layer = AER_GET_LAYER_ERROR(aer_severity, status);
agent = AER_GET_AGENT(aer_severity, status);
- dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n",
- status, mask);
+
+ dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
cper_print_bits("", status, status_strs, status_strs_size);
dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n",
- aer_error_layer[layer], aer_agent_string[agent]);
+ aer_error_layer[layer], aer_agent_string[agent]);
+
if (aer_severity != AER_CORRECTABLE)
dev_err(&dev->dev, "aer_uncor_severity: 0x%08x\n",
- aer->uncor_severity);
- if (tlp_header_valid) {
- const unsigned char *tlp;
- tlp = (const unsigned char *)&aer->header_log;
- dev_err(&dev->dev, "aer_tlp_header:"
- " %02x%02x%02x%02x %02x%02x%02x%02x"
- " %02x%02x%02x%02x %02x%02x%02x%02x\n",
- *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
- *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
- *(tlp + 11), *(tlp + 10), *(tlp + 9),
- *(tlp + 8), *(tlp + 15), *(tlp + 14),
- *(tlp + 13), *(tlp + 12));
- }
+ aer->uncor_severity);
+
+ if (tlp_header_valid)
+ __print_tlp_header(dev, &aer->header_log);
+
trace_aer_event(dev_name(&dev->dev), (status & ~mask),
aer_severity);
}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index f1272dc54de1..e1e7026b838d 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -984,18 +984,6 @@ void pcie_no_aspm(void)
}
}
-/**
- * pcie_aspm_enabled - is PCIe ASPM enabled?
- *
- * Returns true if ASPM has not been disabled by the command-line option
- * pcie_aspm=off.
- **/
-int pcie_aspm_enabled(void)
-{
- return !aspm_disabled;
-}
-EXPORT_SYMBOL(pcie_aspm_enabled);
-
bool pcie_aspm_support_enabled(void)
{
return aspm_support_enabled;
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 0b6e76604068..986f8eadfd39 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -79,9 +79,10 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
u16 reg16;
u32 reg32;
- nr_entries = pci_msix_table_size(dev);
- if (!nr_entries)
- return -EINVAL;
+ nr_entries = pci_msix_vec_count(dev);
+ if (nr_entries < 0)
+ return nr_entries;
+ BUG_ON(!nr_entries);
if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES)
nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES;
@@ -344,11 +345,12 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
device_enable_async_suspend(device);
retval = device_register(device);
- if (retval)
- kfree(pcie);
- else
- get_device(device);
- return retval;
+ if (retval) {
+ put_device(device);
+ return retval;
+ }
+
+ return 0;
}
/**
@@ -454,10 +456,8 @@ int pcie_port_device_resume(struct device *dev)
static int remove_iter(struct device *dev, void *data)
{
- if (dev->bus == &pcie_port_bus_type) {
- put_device(dev);
+ if (dev->bus == &pcie_port_bus_type)
device_unregister(dev);
- }
return 0;
}
@@ -498,12 +498,12 @@ static int pcie_port_probe_service(struct device *dev)
pciedev = to_pcie_device(dev);
status = driver->probe(pciedev);
- if (!status) {
- dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n",
- driver->name);
- get_device(dev);
- }
- return status;
+ if (status)
+ return status;
+
+ dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", driver->name);
+ get_device(dev);
+ return 0;
}
/**
@@ -554,7 +554,7 @@ int pcie_port_service_register(struct pcie_port_service_driver *new)
if (pcie_ports_disabled)
return -ENODEV;
- new->driver.name = (char *)new->name;
+ new->driver.name = new->name;
new->driver.bus = &pcie_port_bus_type;
new->driver.probe = pcie_port_probe_service;
new->driver.remove = pcie_port_remove_service;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 38e403dddf6e..6e34498ec9f0 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -16,7 +16,7 @@
#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
#define CARDBUS_RESERVE_BUSNR 3
-struct resource busn_resource = {
+static struct resource busn_resource = {
.name = "PCI busn",
.start = 0,
.end = 255,
@@ -269,8 +269,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
region.end = l + sz;
}
- pcibios_bus_to_resource(dev, res, &region);
- pcibios_resource_to_bus(dev, &inverted_region, res);
+ pcibios_bus_to_resource(dev->bus, res, &region);
+ pcibios_resource_to_bus(dev->bus, &inverted_region, res);
/*
* If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
@@ -364,7 +364,7 @@ static void pci_read_bridge_io(struct pci_bus *child)
res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
region.start = base;
region.end = limit + io_granularity - 1;
- pcibios_bus_to_resource(dev, res, &region);
+ pcibios_bus_to_resource(dev->bus, res, &region);
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
}
}
@@ -386,7 +386,7 @@ static void pci_read_bridge_mmio(struct pci_bus *child)
res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
region.start = base;
region.end = limit + 0xfffff;
- pcibios_bus_to_resource(dev, res, &region);
+ pcibios_bus_to_resource(dev->bus, res, &region);
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
}
}
@@ -436,7 +436,7 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
res->flags |= IORESOURCE_MEM_64;
region.start = base;
region.end = limit + 0xfffff;
- pcibios_bus_to_resource(dev, res, &region);
+ pcibios_bus_to_resource(dev->bus, res, &region);
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
}
}
@@ -518,7 +518,7 @@ static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
return bridge;
}
-const unsigned char pcix_bus_speed[] = {
+static const unsigned char pcix_bus_speed[] = {
PCI_SPEED_UNKNOWN, /* 0 */
PCI_SPEED_66MHz_PCIX, /* 1 */
PCI_SPEED_100MHz_PCIX, /* 2 */
@@ -999,6 +999,60 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
pdev->is_hotplug_bridge = 1;
}
+
+/**
+ * pci_cfg_space_size - get the configuration space size of the PCI device.
+ * @dev: PCI device
+ *
+ * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
+ * have 4096 bytes. Even if the device is capable, that doesn't mean we can
+ * access it. Maybe we don't have a way to generate extended config space
+ * accesses, or the device is behind a reverse Express bridge. So we try
+ * reading the dword at 0x100 which must either be 0 or a valid extended
+ * capability header.
+ */
+static int pci_cfg_space_size_ext(struct pci_dev *dev)
+{
+ u32 status;
+ int pos = PCI_CFG_SPACE_SIZE;
+
+ if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
+ goto fail;
+ if (status == 0xffffffff)
+ goto fail;
+
+ return PCI_CFG_SPACE_EXP_SIZE;
+
+ fail:
+ return PCI_CFG_SPACE_SIZE;
+}
+
+int pci_cfg_space_size(struct pci_dev *dev)
+{
+ int pos;
+ u32 status;
+ u16 class;
+
+ class = dev->class >> 8;
+ if (class == PCI_CLASS_BRIDGE_HOST)
+ return pci_cfg_space_size_ext(dev);
+
+ if (!pci_is_pcie(dev)) {
+ pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (!pos)
+ goto fail;
+
+ pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
+ if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
+ goto fail;
+ }
+
+ return pci_cfg_space_size_ext(dev);
+
+ fail:
+ return PCI_CFG_SPACE_SIZE;
+}
+
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
/**
@@ -1084,24 +1138,24 @@ int pci_setup_device(struct pci_dev *dev)
region.end = 0x1F7;
res = &dev->resource[0];
res->flags = LEGACY_IO_RESOURCE;
- pcibios_bus_to_resource(dev, res, &region);
+ pcibios_bus_to_resource(dev->bus, res, &region);
region.start = 0x3F6;
region.end = 0x3F6;
res = &dev->resource[1];
res->flags = LEGACY_IO_RESOURCE;
- pcibios_bus_to_resource(dev, res, &region);
+ pcibios_bus_to_resource(dev->bus, res, &region);
}
if ((progif & 4) == 0) {
region.start = 0x170;
region.end = 0x177;
res = &dev->resource[2];
res->flags = LEGACY_IO_RESOURCE;
- pcibios_bus_to_resource(dev, res, &region);
+ pcibios_bus_to_resource(dev->bus, res, &region);
region.start = 0x376;
region.end = 0x376;
res = &dev->resource[3];
res->flags = LEGACY_IO_RESOURCE;
- pcibios_bus_to_resource(dev, res, &region);
+ pcibios_bus_to_resource(dev->bus, res, &region);
}
}
break;
@@ -1173,59 +1227,6 @@ static void pci_release_dev(struct device *dev)
kfree(pci_dev);
}
-/**
- * pci_cfg_space_size - get the configuration space size of the PCI device.
- * @dev: PCI device
- *
- * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
- * have 4096 bytes. Even if the device is capable, that doesn't mean we can
- * access it. Maybe we don't have a way to generate extended config space
- * accesses, or the device is behind a reverse Express bridge. So we try
- * reading the dword at 0x100 which must either be 0 or a valid extended
- * capability header.
- */
-int pci_cfg_space_size_ext(struct pci_dev *dev)
-{
- u32 status;
- int pos = PCI_CFG_SPACE_SIZE;
-
- if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
- goto fail;
- if (status == 0xffffffff)
- goto fail;
-
- return PCI_CFG_SPACE_EXP_SIZE;
-
- fail:
- return PCI_CFG_SPACE_SIZE;
-}
-
-int pci_cfg_space_size(struct pci_dev *dev)
-{
- int pos;
- u32 status;
- u16 class;
-
- class = dev->class >> 8;
- if (class == PCI_CLASS_BRIDGE_HOST)
- return pci_cfg_space_size_ext(dev);
-
- if (!pci_is_pcie(dev)) {
- pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
- if (!pos)
- goto fail;
-
- pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
- if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
- goto fail;
- }
-
- return pci_cfg_space_size_ext(dev);
-
- fail:
- return PCI_CFG_SPACE_SIZE;
-}
-
struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -1242,12 +1243,6 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_alloc_dev);
-struct pci_dev *alloc_pci_dev(void)
-{
- return pci_alloc_dev(NULL);
-}
-EXPORT_SYMBOL(alloc_pci_dev);
-
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
int crs_timeout)
{
@@ -1381,8 +1376,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
dev->match_driver = false;
ret = device_add(&dev->dev);
WARN_ON(ret < 0);
-
- pci_proc_attach_device(dev);
}
struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
@@ -2014,6 +2007,24 @@ EXPORT_SYMBOL(pci_scan_slot);
EXPORT_SYMBOL(pci_scan_bridge);
EXPORT_SYMBOL_GPL(pci_scan_child_bus);
+/*
+ * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
+ * routines should always be executed under this mutex.
+ */
+static DEFINE_MUTEX(pci_rescan_remove_lock);
+
+void pci_lock_rescan_remove(void)
+{
+ mutex_lock(&pci_rescan_remove_lock);
+}
+EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
+
+void pci_unlock_rescan_remove(void)
+{
+ mutex_unlock(&pci_rescan_remove_lock);
+}
+EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
+
static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
{
const struct pci_dev *a = to_pci_dev(d_a);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 3a02717473ad..5cb726c193de 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -339,7 +339,7 @@ static void quirk_io_region(struct pci_dev *dev, int port,
/* Convert from PCI bus to resource space */
bus_region.start = region;
bus_region.end = region + size - 1;
- pcibios_bus_to_resource(dev, res, &bus_region);
+ pcibios_bus_to_resource(dev->bus, res, &bus_region);
if (!pci_claim_resource(dev, nr))
dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index cc9337a71529..8bd76c9ba21c 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -7,8 +7,6 @@ static void pci_free_resources(struct pci_dev *dev)
{
int i;
- msi_remove_pci_irq_vectors(dev);
-
pci_cleanup_rom(dev);
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct resource *res = dev->resource + i;
@@ -34,6 +32,9 @@ static void pci_stop_dev(struct pci_dev *dev)
static void pci_destroy_dev(struct pci_dev *dev)
{
+ if (!dev->dev.kobj.parent)
+ return;
+
device_del(&dev->dev);
down_write(&pci_bus_sem);
@@ -114,6 +115,14 @@ void pci_stop_and_remove_bus_device(struct pci_dev *dev)
}
EXPORT_SYMBOL(pci_stop_and_remove_bus_device);
+void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev)
+{
+ pci_lock_rescan_remove();
+ pci_stop_and_remove_bus_device(dev);
+ pci_unlock_rescan_remove();
+}
+EXPORT_SYMBOL_GPL(pci_stop_and_remove_bus_device_locked);
+
void pci_stop_root_bus(struct pci_bus *bus)
{
struct pci_dev *child, *tmp;
@@ -128,7 +137,7 @@ void pci_stop_root_bus(struct pci_bus *bus)
pci_stop_bus_device(child);
/* stop the host bridge */
- device_del(&host_bridge->dev);
+ device_release_driver(&host_bridge->dev);
}
void pci_remove_root_bus(struct pci_bus *bus)
@@ -147,5 +156,5 @@ void pci_remove_root_bus(struct pci_bus *bus)
host_bridge->bus = NULL;
/* remove the host bridge */
- put_device(&host_bridge->dev);
+ device_unregister(&host_bridge->dev);
}
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index c5d0a08a8747..5d595724e5f4 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -31,7 +31,7 @@ int pci_enable_rom(struct pci_dev *pdev)
if (!res->flags)
return -1;
- pcibios_resource_to_bus(pdev, &region, res);
+ pcibios_resource_to_bus(pdev->bus, &region, res);
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_MASK;
rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 219a4106480a..138bdd6393be 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -475,7 +475,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
&bus->busn_res);
res = bus->resource[0];
- pcibios_resource_to_bus(bridge, &region, res);
+ pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_IO) {
/*
* The IO resource is allocated a range twice as large as it
@@ -489,7 +489,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
}
res = bus->resource[1];
- pcibios_resource_to_bus(bridge, &region, res);
+ pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_IO) {
dev_info(&bridge->dev, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
@@ -499,7 +499,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
}
res = bus->resource[2];
- pcibios_resource_to_bus(bridge, &region, res);
+ pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_MEM) {
dev_info(&bridge->dev, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
@@ -509,7 +509,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
}
res = bus->resource[3];
- pcibios_resource_to_bus(bridge, &region, res);
+ pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_MEM) {
dev_info(&bridge->dev, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
@@ -538,7 +538,8 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
struct pci_bus_region region;
unsigned long io_mask;
u8 io_base_lo, io_limit_lo;
- u32 l, io_upper16;
+ u16 l;
+ u32 io_upper16;
io_mask = PCI_IO_RANGE_MASK;
if (bridge->io_window_1k)
@@ -546,13 +547,12 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
/* Set up the top and bottom of the PCI I/O segment for this bus. */
res = bus->resource[0];
- pcibios_resource_to_bus(bridge, &region, res);
+ pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_IO) {
- pci_read_config_dword(bridge, PCI_IO_BASE, &l);
- l &= 0xffff0000;
+ pci_read_config_word(bridge, PCI_IO_BASE, &l);
io_base_lo = (region.start >> 8) & io_mask;
io_limit_lo = (region.end >> 8) & io_mask;
- l |= ((u32) io_limit_lo << 8) | io_base_lo;
+ l = ((u16) io_limit_lo << 8) | io_base_lo;
/* Set up upper 16 bits of I/O base/limit. */
io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
dev_info(&bridge->dev, " bridge window %pR\n", res);
@@ -564,7 +564,7 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
/* Temporarily disable the I/O range before updating PCI_IO_BASE. */
pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
/* Update lower 16 bits of I/O base/limit. */
- pci_write_config_dword(bridge, PCI_IO_BASE, l);
+ pci_write_config_word(bridge, PCI_IO_BASE, l);
/* Update upper 16 bits of I/O base/limit. */
pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
}
@@ -578,7 +578,7 @@ static void pci_setup_bridge_mmio(struct pci_bus *bus)
/* Set up the top and bottom of the PCI Memory segment for this bus. */
res = bus->resource[1];
- pcibios_resource_to_bus(bridge, &region, res);
+ pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_MEM) {
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
@@ -604,7 +604,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
/* Set up PREF base/limit. */
bu = lu = 0;
res = bus->resource[2];
- pcibios_resource_to_bus(bridge, &region, res);
+ pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_PREFETCH) {
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
@@ -665,21 +665,23 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
pci_read_config_word(bridge, PCI_IO_BASE, &io);
if (!io) {
- pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0);
+ pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0);
pci_read_config_word(bridge, PCI_IO_BASE, &io);
pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
}
if (io)
b_res[0].flags |= IORESOURCE_IO;
+
/* DECchip 21050 pass 2 errata: the bridge may miss an address
disconnect boundary by one PCI data phase.
Workaround: do not use prefetching on this device. */
if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
return;
+
pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
if (!pmem) {
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
- 0xfff0fff0);
+ 0xffe0fff0);
pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
}
@@ -1422,7 +1424,7 @@ static int iov_resources_unassigned(struct pci_dev *dev, void *data)
if (!r->flags)
continue;
- pcibios_resource_to_bus(dev, &region, r);
+ pcibios_resource_to_bus(dev->bus, &region, r);
if (!region.start) {
*unassigned = true;
return 1; /* return early from pci_walk_bus() */
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 83c4d3bc47ab..5c060b152ce6 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -52,7 +52,7 @@ void pci_update_resource(struct pci_dev *dev, int resno)
if (res->flags & IORESOURCE_PCI_FIXED)
return;
- pcibios_resource_to_bus(dev, &region, res);
+ pcibios_resource_to_bus(dev->bus, &region, res);
new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
if (res->flags & IORESOURCE_IO)
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 448ca562d1f8..7dd62fa9d0bd 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -320,32 +320,6 @@ err:
EXPORT_SYMBOL_GPL(pci_create_slot);
/**
- * pci_renumber_slot - update %struct pci_slot -> number
- * @slot: &struct pci_slot to update
- * @slot_nr: new number for slot
- *
- * The primary purpose of this interface is to allow callers who earlier
- * created a placeholder slot in pci_create_slot() by passing a -1 as
- * slot_nr, to update their %struct pci_slot with the correct @slot_nr.
- */
-void pci_renumber_slot(struct pci_slot *slot, int slot_nr)
-{
- struct pci_slot *tmp;
-
- down_write(&pci_bus_sem);
-
- list_for_each_entry(tmp, &slot->bus->slots, list) {
- WARN_ON(tmp->number == slot_nr);
- goto out;
- }
-
- slot->number = slot_nr;
-out:
- up_write(&pci_bus_sem);
-}
-EXPORT_SYMBOL_GPL(pci_renumber_slot);
-
-/**
* pci_destroy_slot - decrement refcount for physical PCI slot
* @slot: struct pci_slot to decrement
*
diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c
new file mode 100644
index 000000000000..7e1304d2e389
--- /dev/null
+++ b/drivers/pci/vc.c
@@ -0,0 +1,434 @@
+/*
+ * PCI Virtual Channel support
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/types.h>
+
+/**
+ * pci_vc_save_restore_dwords - Save or restore a series of dwords
+ * @dev: device
+ * @pos: starting config space position
+ * @buf: buffer to save to or restore from
+ * @dwords: number of dwords to save/restore
+ * @save: whether to save or restore
+ */
+static void pci_vc_save_restore_dwords(struct pci_dev *dev, int pos,
+ u32 *buf, int dwords, bool save)
+{
+ int i;
+
+ for (i = 0; i < dwords; i++, buf++) {
+ if (save)
+ pci_read_config_dword(dev, pos + (i * 4), buf);
+ else
+ pci_write_config_dword(dev, pos + (i * 4), *buf);
+ }
+}
+
+/**
+ * pci_vc_load_arb_table - load and wait for VC arbitration table
+ * @dev: device
+ * @pos: starting position of VC capability (VC/VC9/MFVC)
+ *
+ * Set Load VC Arbitration Table bit requesting hardware to apply the VC
+ * Arbitration Table (previously loaded). When the VC Arbitration Table
+ * Status clears, hardware has latched the table into VC arbitration logic.
+ */
+static void pci_vc_load_arb_table(struct pci_dev *dev, int pos)
+{
+ u16 ctrl;
+
+ pci_read_config_word(dev, pos + PCI_VC_PORT_CTRL, &ctrl);
+ pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
+ ctrl | PCI_VC_PORT_CTRL_LOAD_TABLE);
+ if (pci_wait_for_pending(dev, pos + PCI_VC_PORT_STATUS,
+ PCI_VC_PORT_STATUS_TABLE))
+ return;
+
+ dev_err(&dev->dev, "VC arbitration table failed to load\n");
+}
+
+/**
+ * pci_vc_load_port_arb_table - Load and wait for VC port arbitration table
+ * @dev: device
+ * @pos: starting position of VC capability (VC/VC9/MFVC)
+ * @res: VC resource number, ie. VCn (0-7)
+ *
+ * Set Load Port Arbitration Table bit requesting hardware to apply the Port
+ * Arbitration Table (previously loaded). When the Port Arbitration Table
+ * Status clears, hardware has latched the table into port arbitration logic.
+ */
+static void pci_vc_load_port_arb_table(struct pci_dev *dev, int pos, int res)
+{
+ int ctrl_pos, status_pos;
+ u32 ctrl;
+
+ ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF);
+ status_pos = pos + PCI_VC_RES_STATUS + (res * PCI_CAP_VC_PER_VC_SIZEOF);
+
+ pci_read_config_dword(dev, ctrl_pos, &ctrl);
+ pci_write_config_dword(dev, ctrl_pos,
+ ctrl | PCI_VC_RES_CTRL_LOAD_TABLE);
+
+ if (pci_wait_for_pending(dev, status_pos, PCI_VC_RES_STATUS_TABLE))
+ return;
+
+ dev_err(&dev->dev, "VC%d port arbitration table failed to load\n", res);
+}
+
+/**
+ * pci_vc_enable - Enable virtual channel
+ * @dev: device
+ * @pos: starting position of VC capability (VC/VC9/MFVC)
+ * @res: VC res number, ie. VCn (0-7)
+ *
+ * A VC is enabled by setting the enable bit in matching resource control
+ * registers on both sides of a link. We therefore need to find the opposite
+ * end of the link. To keep this simple we enable from the downstream device.
+ * RC devices do not have an upstream device, nor does it seem that VC9 do
+ * (spec is unclear). Once we find the upstream device, match the VC ID to
+ * get the correct resource, disable and enable on both ends.
+ */
+static void pci_vc_enable(struct pci_dev *dev, int pos, int res)
+{
+ int ctrl_pos, status_pos, id, pos2, evcc, i, ctrl_pos2, status_pos2;
+ u32 ctrl, header, cap1, ctrl2;
+ struct pci_dev *link = NULL;
+
+ /* Enable VCs from the downstream device */
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+
+ ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF);
+ status_pos = pos + PCI_VC_RES_STATUS + (res * PCI_CAP_VC_PER_VC_SIZEOF);
+
+ pci_read_config_dword(dev, ctrl_pos, &ctrl);
+ id = ctrl & PCI_VC_RES_CTRL_ID;
+
+ pci_read_config_dword(dev, pos, &header);
+
+ /* If there is no opposite end of the link, skip to enable */
+ if (PCI_EXT_CAP_ID(header) == PCI_EXT_CAP_ID_VC9 ||
+ pci_is_root_bus(dev->bus))
+ goto enable;
+
+ pos2 = pci_find_ext_capability(dev->bus->self, PCI_EXT_CAP_ID_VC);
+ if (!pos2)
+ goto enable;
+
+ pci_read_config_dword(dev->bus->self, pos2 + PCI_VC_PORT_CAP1, &cap1);
+ evcc = cap1 & PCI_VC_CAP1_EVCC;
+
+ /* VC0 is hardwired enabled, so we can start with 1 */
+ for (i = 1; i < evcc + 1; i++) {
+ ctrl_pos2 = pos2 + PCI_VC_RES_CTRL +
+ (i * PCI_CAP_VC_PER_VC_SIZEOF);
+ status_pos2 = pos2 + PCI_VC_RES_STATUS +
+ (i * PCI_CAP_VC_PER_VC_SIZEOF);
+ pci_read_config_dword(dev->bus->self, ctrl_pos2, &ctrl2);
+ if ((ctrl2 & PCI_VC_RES_CTRL_ID) == id) {
+ link = dev->bus->self;
+ break;
+ }
+ }
+
+ if (!link)
+ goto enable;
+
+ /* Disable if enabled */
+ if (ctrl2 & PCI_VC_RES_CTRL_ENABLE) {
+ ctrl2 &= ~PCI_VC_RES_CTRL_ENABLE;
+ pci_write_config_dword(link, ctrl_pos2, ctrl2);
+ }
+
+ /* Enable on both ends */
+ ctrl2 |= PCI_VC_RES_CTRL_ENABLE;
+ pci_write_config_dword(link, ctrl_pos2, ctrl2);
+enable:
+ ctrl |= PCI_VC_RES_CTRL_ENABLE;
+ pci_write_config_dword(dev, ctrl_pos, ctrl);
+
+ if (!pci_wait_for_pending(dev, status_pos, PCI_VC_RES_STATUS_NEGO))
+ dev_err(&dev->dev, "VC%d negotiation stuck pending\n", id);
+
+ if (link && !pci_wait_for_pending(link, status_pos2,
+ PCI_VC_RES_STATUS_NEGO))
+ dev_err(&link->dev, "VC%d negotiation stuck pending\n", id);
+}
+
+/**
+ * pci_vc_do_save_buffer - Size, save, or restore VC state
+ * @dev: device
+ * @pos: starting position of VC capability (VC/VC9/MFVC)
+ * @save_state: buffer for save/restore
+ * @name: for error message
+ * @save: if provided a buffer, this indicates what to do with it
+ *
+ * Walking Virtual Channel config space to size, save, or restore it
+ * is complicated, so we do it all from one function to reduce code and
+ * guarantee ordering matches in the buffer. When called with NULL
+ * @save_state, return the size of the necessary save buffer. When called
+ * with a non-NULL @save_state, @save determines whether we save to the
+ * buffer or restore from it.
+ */
+static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos,
+ struct pci_cap_saved_state *save_state,
+ bool save)
+{
+ u32 cap1;
+ char evcc, lpevcc, parb_size;
+ int i, len = 0;
+ u8 *buf = save_state ? (u8 *)save_state->cap.data : NULL;
+
+ /* Sanity check buffer size for save/restore */
+ if (buf && save_state->cap.size !=
+ pci_vc_do_save_buffer(dev, pos, NULL, save)) {
+ dev_err(&dev->dev,
+ "VC save buffer size does not match @0x%x\n", pos);
+ return -ENOMEM;
+ }
+
+ pci_read_config_dword(dev, pos + PCI_VC_PORT_CAP1, &cap1);
+ /* Extended VC Count (not counting VC0) */
+ evcc = cap1 & PCI_VC_CAP1_EVCC;
+ /* Low Priority Extended VC Count (not counting VC0) */
+ lpevcc = (cap1 & PCI_VC_CAP1_LPEVCC) >> 4;
+ /* Port Arbitration Table Entry Size (bits) */
+ parb_size = 1 << ((cap1 & PCI_VC_CAP1_ARB_SIZE) >> 10);
+
+ /*
+ * Port VC Control Register contains VC Arbitration Select, which
+ * cannot be modified when more than one LPVC is in operation. We
+ * therefore save/restore it first, as only VC0 should be enabled
+ * after device reset.
+ */
+ if (buf) {
+ if (save)
+ pci_read_config_word(dev, pos + PCI_VC_PORT_CTRL,
+ (u16 *)buf);
+ else
+ pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
+ *(u16 *)buf);
+ buf += 2;
+ }
+ len += 2;
+
+ /*
+ * If we have any Low Priority VCs and a VC Arbitration Table Offset
+ * in Port VC Capability Register 2 then save/restore it next.
+ */
+ if (lpevcc) {
+ u32 cap2;
+ int vcarb_offset;
+
+ pci_read_config_dword(dev, pos + PCI_VC_PORT_CAP2, &cap2);
+ vcarb_offset = ((cap2 & PCI_VC_CAP2_ARB_OFF) >> 24) * 16;
+
+ if (vcarb_offset) {
+ int size, vcarb_phases = 0;
+
+ if (cap2 & PCI_VC_CAP2_128_PHASE)
+ vcarb_phases = 128;
+ else if (cap2 & PCI_VC_CAP2_64_PHASE)
+ vcarb_phases = 64;
+ else if (cap2 & PCI_VC_CAP2_32_PHASE)
+ vcarb_phases = 32;
+
+ /* Fixed 4 bits per phase per lpevcc (plus VC0) */
+ size = ((lpevcc + 1) * vcarb_phases * 4) / 8;
+
+ if (size && buf) {
+ pci_vc_save_restore_dwords(dev,
+ pos + vcarb_offset,
+ (u32 *)buf,
+ size / 4, save);
+ /*
+ * On restore, we need to signal hardware to
+ * re-load the VC Arbitration Table.
+ */
+ if (!save)
+ pci_vc_load_arb_table(dev, pos);
+
+ buf += size;
+ }
+ len += size;
+ }
+ }
+
+ /*
+ * In addition to each VC Resource Control Register, we may have a
+ * Port Arbitration Table attached to each VC. The Port Arbitration
+ * Table Offset in each VC Resource Capability Register tells us if
+ * it exists. The entry size is global from the Port VC Capability
+ * Register1 above. The number of phases is determined per VC.
+ */
+ for (i = 0; i < evcc + 1; i++) {
+ u32 cap;
+ int parb_offset;
+
+ pci_read_config_dword(dev, pos + PCI_VC_RES_CAP +
+ (i * PCI_CAP_VC_PER_VC_SIZEOF), &cap);
+ parb_offset = ((cap & PCI_VC_RES_CAP_ARB_OFF) >> 24) * 16;
+ if (parb_offset) {
+ int size, parb_phases = 0;
+
+ if (cap & PCI_VC_RES_CAP_256_PHASE)
+ parb_phases = 256;
+ else if (cap & (PCI_VC_RES_CAP_128_PHASE |
+ PCI_VC_RES_CAP_128_PHASE_TB))
+ parb_phases = 128;
+ else if (cap & PCI_VC_RES_CAP_64_PHASE)
+ parb_phases = 64;
+ else if (cap & PCI_VC_RES_CAP_32_PHASE)
+ parb_phases = 32;
+
+ size = (parb_size * parb_phases) / 8;
+
+ if (size && buf) {
+ pci_vc_save_restore_dwords(dev,
+ pos + parb_offset,
+ (u32 *)buf,
+ size / 4, save);
+ buf += size;
+ }
+ len += size;
+ }
+
+ /* VC Resource Control Register */
+ if (buf) {
+ int ctrl_pos = pos + PCI_VC_RES_CTRL +
+ (i * PCI_CAP_VC_PER_VC_SIZEOF);
+ if (save)
+ pci_read_config_dword(dev, ctrl_pos,
+ (u32 *)buf);
+ else {
+ u32 tmp, ctrl = *(u32 *)buf;
+ /*
+ * For an FLR case, the VC config may remain.
+ * Preserve enable bit, restore the rest.
+ */
+ pci_read_config_dword(dev, ctrl_pos, &tmp);
+ tmp &= PCI_VC_RES_CTRL_ENABLE;
+ tmp |= ctrl & ~PCI_VC_RES_CTRL_ENABLE;
+ pci_write_config_dword(dev, ctrl_pos, tmp);
+ /* Load port arbitration table if used */
+ if (ctrl & PCI_VC_RES_CTRL_ARB_SELECT)
+ pci_vc_load_port_arb_table(dev, pos, i);
+ /* Re-enable if needed */
+ if ((ctrl ^ tmp) & PCI_VC_RES_CTRL_ENABLE)
+ pci_vc_enable(dev, pos, i);
+ }
+ buf += 4;
+ }
+ len += 4;
+ }
+
+ return buf ? 0 : len;
+}
+
+static struct {
+ u16 id;
+ const char *name;
+} vc_caps[] = { { PCI_EXT_CAP_ID_MFVC, "MFVC" },
+ { PCI_EXT_CAP_ID_VC, "VC" },
+ { PCI_EXT_CAP_ID_VC9, "VC9" } };
+
+/**
+ * pci_save_vc_state - Save VC state to pre-allocate save buffer
+ * @dev: device
+ *
+ * For each type of VC capability, VC/VC9/MFVC, find the capability and
+ * save it to the pre-allocated save buffer.
+ */
+int pci_save_vc_state(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vc_caps); i++) {
+ int pos, ret;
+ struct pci_cap_saved_state *save_state;
+
+ pos = pci_find_ext_capability(dev, vc_caps[i].id);
+ if (!pos)
+ continue;
+
+ save_state = pci_find_saved_ext_cap(dev, vc_caps[i].id);
+ if (!save_state) {
+ dev_err(&dev->dev, "%s buffer not found in %s\n",
+ vc_caps[i].name, __func__);
+ return -ENOMEM;
+ }
+
+ ret = pci_vc_do_save_buffer(dev, pos, save_state, true);
+ if (ret) {
+ dev_err(&dev->dev, "%s save unsuccessful %s\n",
+ vc_caps[i].name, __func__);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * pci_restore_vc_state - Restore VC state from save buffer
+ * @dev: device
+ *
+ * For each type of VC capability, VC/VC9/MFVC, find the capability and
+ * restore it from the previously saved buffer.
+ */
+void pci_restore_vc_state(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vc_caps); i++) {
+ int pos;
+ struct pci_cap_saved_state *save_state;
+
+ pos = pci_find_ext_capability(dev, vc_caps[i].id);
+ save_state = pci_find_saved_ext_cap(dev, vc_caps[i].id);
+ if (!save_state || !pos)
+ continue;
+
+ pci_vc_do_save_buffer(dev, pos, save_state, false);
+ }
+}
+
+/**
+ * pci_allocate_vc_save_buffers - Allocate save buffers for VC caps
+ * @dev: device
+ *
+ * For each type of VC capability, VC/VC9/MFVC, find the capability, size
+ * it, and allocate a buffer for save/restore.
+ */
+
+void pci_allocate_vc_save_buffers(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vc_caps); i++) {
+ int len, pos = pci_find_ext_capability(dev, vc_caps[i].id);
+
+ if (!pos)
+ continue;
+
+ len = pci_vc_do_save_buffer(dev, pos, NULL, false);
+ if (pci_add_ext_cap_save_buffer(dev, vc_caps[i].id, len))
+ dev_err(&dev->dev,
+ "unable to preallocate %s save buffer\n",
+ vc_caps[i].name);
+ }
+}
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index f7197a790341..179b8edc2262 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -20,6 +20,7 @@
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/time.h>
+#include <xen/platform_pci.h>
#include <asm/xen/swiotlb-xen.h>
#define INVALID_GRANT_REF (0)
@@ -471,12 +472,15 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
}
pcifront_init_sd(sd, domain, bus, pdev);
+ pci_lock_rescan_remove();
+
b = pci_scan_bus_parented(&pdev->xdev->dev, bus,
&pcifront_bus_ops, sd);
if (!b) {
dev_err(&pdev->xdev->dev,
"Error creating PCI Frontend Bus!\n");
err = -ENOMEM;
+ pci_unlock_rescan_remove();
goto err_out;
}
@@ -494,6 +498,7 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
/* Create SysFS and notify udev of the devices. Aka: "going live" */
pci_bus_add_devices(b);
+ pci_unlock_rescan_remove();
return err;
err_out:
@@ -556,6 +561,7 @@ static void pcifront_free_roots(struct pcifront_device *pdev)
dev_dbg(&pdev->xdev->dev, "cleaning up root buses\n");
+ pci_lock_rescan_remove();
list_for_each_entry_safe(bus_entry, t, &pdev->root_buses, list) {
list_del(&bus_entry->list);
@@ -568,6 +574,7 @@ static void pcifront_free_roots(struct pcifront_device *pdev)
kfree(bus_entry);
}
+ pci_unlock_rescan_remove();
}
static pci_ers_result_t pcifront_common_process(int cmd,
@@ -1043,8 +1050,10 @@ static int pcifront_detach_devices(struct pcifront_device *pdev)
domain, bus, slot, func);
continue;
}
+ pci_lock_rescan_remove();
pci_stop_and_remove_bus_device(pci_dev);
pci_dev_put(pci_dev);
+ pci_unlock_rescan_remove();
dev_dbg(&pdev->xdev->dev,
"PCI device %04x:%02x:%02x.%d removed.\n",
@@ -1138,6 +1147,9 @@ static int __init pcifront_init(void)
if (!xen_pv_domain() || xen_initial_domain())
return -ENODEV;
+ if (!xen_has_pv_devices())
+ return -ENODEV;
+
pci_frontend_registrar(1 /* enable */);
return xenbus_register_frontend(&xenpci_driver);
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
index ed3b522601b3..971991bab975 100644
--- a/drivers/pcmcia/bfin_cf_pcmcia.c
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -303,7 +303,7 @@ static int bfin_cf_remove(struct platform_device *pdev)
static struct platform_driver bfin_cf_driver = {
.driver = {
- .name = (char *)driver_name,
+ .name = driver_name,
.owner = THIS_MODULE,
},
.probe = bfin_cf_probe,
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index b2a98cdbd0d2..8bde61952d20 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -70,6 +70,8 @@ int __ref cb_alloc(struct pcmcia_socket *s)
struct pci_dev *dev;
unsigned int max, pass;
+ pci_lock_rescan_remove();
+
s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0));
pci_fixup_cardbus(bus);
@@ -93,6 +95,7 @@ int __ref cb_alloc(struct pcmcia_socket *s)
pci_bus_add_devices(bus);
+ pci_unlock_rescan_remove();
return 0;
}
@@ -115,6 +118,10 @@ void cb_free(struct pcmcia_socket *s)
if (!bus)
return;
+ pci_lock_rescan_remove();
+
list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list)
pci_stop_and_remove_bus_device(dev);
+
+ pci_unlock_rescan_remove();
}
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 1b206eac5f93..5ea64d0f61ab 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -359,7 +359,7 @@ MODULE_DEVICE_TABLE(of, electra_cf_match);
static struct platform_driver electra_cf_driver = {
.driver = {
- .name = (char *)driver_name,
+ .name = driver_name,
.owner = THIS_MODULE,
.of_match_table = electra_cf_match,
},
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 519c4d6003a6..7d47456429a1 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -608,7 +608,7 @@ static int i82092aa_set_mem_map(struct pcmcia_socket *socket, struct pccard_mem_
enter("i82092aa_set_mem_map");
- pcibios_resource_to_bus(sock_info->dev, &region, mem->res);
+ pcibios_resource_to_bus(sock_info->dev->bus, &region, mem->res);
map = mem->map;
if (map > 4) {
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index dc18a3a5e010..8485761e76af 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -445,7 +445,7 @@ static int yenta_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *
unsigned int start, stop, card_start;
unsigned short word;
- pcibios_resource_to_bus(socket->dev, &region, mem->res);
+ pcibios_resource_to_bus(socket->dev->bus, &region, mem->res);
map = mem->map;
start = region.start;
@@ -709,7 +709,7 @@ static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type
region.start = config_readl(socket, addr_start) & mask;
region.end = config_readl(socket, addr_end) | ~mask;
if (region.start && region.end > region.start && !override_bios) {
- pcibios_bus_to_resource(dev, res, &region);
+ pcibios_bus_to_resource(dev->bus, res, &region);
if (pci_claim_resource(dev, PCI_BRIDGE_RESOURCES + nr) == 0)
return 0;
dev_printk(KERN_INFO, &dev->dev,
@@ -1033,7 +1033,7 @@ static void yenta_config_init(struct yenta_socket *socket)
struct pci_dev *dev = socket->dev;
struct pci_bus_region region;
- pcibios_resource_to_bus(socket->dev, &region, &dev->resource[0]);
+ pcibios_resource_to_bus(socket->dev->bus, &region, &dev->resource[0]);
config_writel(socket, CB_LEGACY_MODE_BASE, 0);
config_writel(socket, PCI_BASE_ADDRESS_0, region.start);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index a344f3d52361..afa2354f6600 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -16,16 +16,23 @@ config GENERIC_PHY
framework should select this config.
config PHY_EXYNOS_MIPI_VIDEO
+ depends on HAS_IOMEM
tristate "S5P/EXYNOS SoC series MIPI CSI-2/DSI PHY driver"
help
Support for MIPI CSI-2 and MIPI DSI DPHY found on Samsung S5P
and EXYNOS SoCs.
+config PHY_MVEBU_SATA
+ def_bool y
+ depends on ARCH_KIRKWOOD || ARCH_DOVE
+ depends on OF
+ select GENERIC_PHY
+
config OMAP_USB2
tristate "OMAP USB2 PHY Driver"
depends on ARCH_OMAP2PLUS
+ depends on USB_PHY
select GENERIC_PHY
- select USB_PHY
select OMAP_CONTROL_USB
help
Enable this to support the transceiver that is part of SOC. This
@@ -36,8 +43,8 @@ config OMAP_USB2
config TWL4030_USB
tristate "TWL4030 USB Transceiver Driver"
depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
+ depends on USB_PHY
select GENERIC_PHY
- select USB_PHY
help
Enable this to support the USB OTG transceiver on TWL4030
family chips (including the TWL5030 and TPS659x0 devices).
@@ -51,4 +58,10 @@ config PHY_EXYNOS_DP_VIDEO
help
Support for Display Port PHY found on Samsung EXYNOS SoCs.
+config BCM_KONA_USB2_PHY
+ tristate "Broadcom Kona USB2 PHY Driver"
+ depends on GENERIC_PHY
+ help
+ Enable this to support the Broadcom Kona USB 2.0 PHY.
+
endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index d0caae9cfb83..b57c25371cca 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -3,7 +3,9 @@
#
obj-$(CONFIG_GENERIC_PHY) += phy-core.o
+obj-$(CONFIG_BCM_KONA_USB2_PHY) += phy-bcm-kona-usb2.o
obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO) += phy-exynos-dp-video.o
obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO) += phy-exynos-mipi-video.o
+obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o
obj-$(CONFIG_OMAP_USB2) += phy-omap-usb2.o
obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o
diff --git a/drivers/phy/phy-bcm-kona-usb2.c b/drivers/phy/phy-bcm-kona-usb2.c
new file mode 100644
index 000000000000..efc5c1a13a5d
--- /dev/null
+++ b/drivers/phy/phy-bcm-kona-usb2.c
@@ -0,0 +1,158 @@
+/*
+ * phy-bcm-kona-usb2.c - Broadcom Kona USB2 Phy Driver
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Matt Porter <mporter@linaro.org>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+#define OTGCTL (0)
+#define OTGCTL_OTGSTAT2 BIT(31)
+#define OTGCTL_OTGSTAT1 BIT(30)
+#define OTGCTL_PRST_N_SW BIT(11)
+#define OTGCTL_HRESET_N BIT(10)
+#define OTGCTL_UTMI_LINE_STATE1 BIT(9)
+#define OTGCTL_UTMI_LINE_STATE0 BIT(8)
+
+#define P1CTL (8)
+#define P1CTL_SOFT_RESET BIT(1)
+#define P1CTL_NON_DRIVING BIT(0)
+
+struct bcm_kona_usb {
+ void __iomem *regs;
+};
+
+static void bcm_kona_usb_phy_power(struct bcm_kona_usb *phy, int on)
+{
+ u32 val;
+
+ val = readl(phy->regs + OTGCTL);
+ if (on) {
+ /* Configure and power PHY */
+ val &= ~(OTGCTL_OTGSTAT2 | OTGCTL_OTGSTAT1 |
+ OTGCTL_UTMI_LINE_STATE1 | OTGCTL_UTMI_LINE_STATE0);
+ val |= OTGCTL_PRST_N_SW | OTGCTL_HRESET_N;
+ } else {
+ val &= ~(OTGCTL_PRST_N_SW | OTGCTL_HRESET_N);
+ }
+ writel(val, phy->regs + OTGCTL);
+}
+
+static int bcm_kona_usb_phy_init(struct phy *gphy)
+{
+ struct bcm_kona_usb *phy = phy_get_drvdata(gphy);
+ u32 val;
+
+ /* Soft reset PHY */
+ val = readl(phy->regs + P1CTL);
+ val &= ~P1CTL_NON_DRIVING;
+ val |= P1CTL_SOFT_RESET;
+ writel(val, phy->regs + P1CTL);
+ writel(val & ~P1CTL_SOFT_RESET, phy->regs + P1CTL);
+ /* Reset needs to be asserted for 2ms */
+ mdelay(2);
+ writel(val | P1CTL_SOFT_RESET, phy->regs + P1CTL);
+
+ return 0;
+}
+
+static int bcm_kona_usb_phy_power_on(struct phy *gphy)
+{
+ struct bcm_kona_usb *phy = phy_get_drvdata(gphy);
+
+ bcm_kona_usb_phy_power(phy, 1);
+
+ return 0;
+}
+
+static int bcm_kona_usb_phy_power_off(struct phy *gphy)
+{
+ struct bcm_kona_usb *phy = phy_get_drvdata(gphy);
+
+ bcm_kona_usb_phy_power(phy, 0);
+
+ return 0;
+}
+
+static struct phy_ops ops = {
+ .init = bcm_kona_usb_phy_init,
+ .power_on = bcm_kona_usb_phy_power_on,
+ .power_off = bcm_kona_usb_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int bcm_kona_usb2_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm_kona_usb *phy;
+ struct resource *res;
+ struct phy *gphy;
+ struct phy_provider *phy_provider;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ phy->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(phy->regs))
+ return PTR_ERR(phy->regs);
+
+ platform_set_drvdata(pdev, phy);
+
+ gphy = devm_phy_create(dev, &ops, NULL);
+ if (IS_ERR(gphy))
+ return PTR_ERR(gphy);
+
+ /* The Kona PHY supports an 8-bit wide UTMI interface */
+ phy_set_bus_width(gphy, 8);
+
+ phy_set_drvdata(gphy, phy);
+
+ phy_provider = devm_of_phy_provider_register(dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ return 0;
+}
+
+static const struct of_device_id bcm_kona_usb2_dt_ids[] = {
+ { .compatible = "brcm,kona-usb2-phy" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, bcm_kona_usb2_dt_ids);
+
+static struct platform_driver bcm_kona_usb2_driver = {
+ .probe = bcm_kona_usb2_probe,
+ .driver = {
+ .name = "bcm-kona-usb2",
+ .owner = THIS_MODULE,
+ .of_match_table = bcm_kona_usb2_dt_ids,
+ },
+};
+
+module_platform_driver(bcm_kona_usb2_driver);
+
+MODULE_ALIAS("platform:bcm-kona-usb2");
+MODULE_AUTHOR("Matt Porter <mporter@linaro.org>");
+MODULE_DESCRIPTION("BCM Kona USB 2.0 PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 03cf8fb81554..5f5b0f4be5be 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -94,19 +94,31 @@ static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
int phy_pm_runtime_get(struct phy *phy)
{
+ int ret;
+
if (!pm_runtime_enabled(&phy->dev))
return -ENOTSUPP;
- return pm_runtime_get(&phy->dev);
+ ret = pm_runtime_get(&phy->dev);
+ if (ret < 0 && ret != -EINPROGRESS)
+ pm_runtime_put_noidle(&phy->dev);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(phy_pm_runtime_get);
int phy_pm_runtime_get_sync(struct phy *phy)
{
+ int ret;
+
if (!pm_runtime_enabled(&phy->dev))
return -ENOTSUPP;
- return pm_runtime_get_sync(&phy->dev);
+ ret = pm_runtime_get_sync(&phy->dev);
+ if (ret < 0)
+ pm_runtime_put_sync(&phy->dev);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync);
@@ -150,18 +162,22 @@ int phy_init(struct phy *phy)
{
int ret;
+ if (!phy)
+ return 0;
+
ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP)
return ret;
mutex_lock(&phy->mutex);
- if (phy->init_count++ == 0 && phy->ops->init) {
+ if (phy->init_count == 0 && phy->ops->init) {
ret = phy->ops->init(phy);
if (ret < 0) {
dev_err(&phy->dev, "phy init failed --> %d\n", ret);
goto out;
}
}
+ ++phy->init_count;
out:
mutex_unlock(&phy->mutex);
@@ -174,18 +190,22 @@ int phy_exit(struct phy *phy)
{
int ret;
+ if (!phy)
+ return 0;
+
ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP)
return ret;
mutex_lock(&phy->mutex);
- if (--phy->init_count == 0 && phy->ops->exit) {
+ if (phy->init_count == 1 && phy->ops->exit) {
ret = phy->ops->exit(phy);
if (ret < 0) {
dev_err(&phy->dev, "phy exit failed --> %d\n", ret);
goto out;
}
}
+ --phy->init_count;
out:
mutex_unlock(&phy->mutex);
@@ -196,23 +216,30 @@ EXPORT_SYMBOL_GPL(phy_exit);
int phy_power_on(struct phy *phy)
{
- int ret = -ENOTSUPP;
+ int ret;
+
+ if (!phy)
+ return 0;
ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP)
return ret;
mutex_lock(&phy->mutex);
- if (phy->power_count++ == 0 && phy->ops->power_on) {
+ if (phy->power_count == 0 && phy->ops->power_on) {
ret = phy->ops->power_on(phy);
if (ret < 0) {
dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
goto out;
}
}
+ ++phy->power_count;
+ mutex_unlock(&phy->mutex);
+ return 0;
out:
mutex_unlock(&phy->mutex);
+ phy_pm_runtime_put_sync(phy);
return ret;
}
@@ -220,22 +247,25 @@ EXPORT_SYMBOL_GPL(phy_power_on);
int phy_power_off(struct phy *phy)
{
- int ret = -ENOTSUPP;
+ int ret;
+
+ if (!phy)
+ return 0;
mutex_lock(&phy->mutex);
- if (--phy->power_count == 0 && phy->ops->power_off) {
+ if (phy->power_count == 1 && phy->ops->power_off) {
ret = phy->ops->power_off(phy);
if (ret < 0) {
dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret);
- goto out;
+ mutex_unlock(&phy->mutex);
+ return ret;
}
}
-
-out:
+ --phy->power_count;
mutex_unlock(&phy->mutex);
phy_pm_runtime_put(phy);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(phy_power_off);
@@ -290,7 +320,7 @@ err0:
*/
void phy_put(struct phy *phy)
{
- if (IS_ERR(phy))
+ if (!phy || IS_ERR(phy))
return;
module_put(phy->ops->owner);
@@ -310,6 +340,9 @@ void devm_phy_put(struct device *dev, struct phy *phy)
{
int r;
+ if (!phy)
+ return;
+
r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy);
dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
}
@@ -360,7 +393,7 @@ EXPORT_SYMBOL_GPL(of_phy_simple_xlate);
struct phy *phy_get(struct device *dev, const char *string)
{
int index = 0;
- struct phy *phy = NULL;
+ struct phy *phy;
if (string == NULL) {
dev_WARN(dev, "missing string\n");
@@ -393,6 +426,27 @@ struct phy *phy_get(struct device *dev, const char *string)
EXPORT_SYMBOL_GPL(phy_get);
/**
+ * phy_optional_get() - lookup and obtain a reference to an optional phy.
+ * @dev: device that requests this phy
+ * @string: the phy name as given in the dt data or the name of the controller
+ * port for non-dt case
+ *
+ * Returns the phy driver, after getting a refcount to it; or
+ * NULL if there is no such phy. The caller is responsible for
+ * calling phy_put() to release that count.
+ */
+struct phy *phy_optional_get(struct device *dev, const char *string)
+{
+ struct phy *phy = phy_get(dev, string);
+
+ if (PTR_ERR(phy) == -ENODEV)
+ phy = NULL;
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(phy_optional_get);
+
+/**
* devm_phy_get() - lookup and obtain a reference to a phy.
* @dev: device that requests this phy
* @string: the phy name as given in the dt data or phy device name
@@ -423,6 +477,30 @@ struct phy *devm_phy_get(struct device *dev, const char *string)
EXPORT_SYMBOL_GPL(devm_phy_get);
/**
+ * devm_phy_optional_get() - lookup and obtain a reference to an optional phy.
+ * @dev: device that requests this phy
+ * @string: the phy name as given in the dt data or phy device name
+ * for non-dt case
+ *
+ * Gets the phy using phy_get(), and associates a device with it using
+ * devres. On driver detach, release function is invoked on the devres
+ * data, then, devres data is freed. This differs to devm_phy_get() in
+ * that if the phy does not exist, it is not considered an error and
+ * -ENODEV will not be returned. Instead the NULL phy is returned,
+ * which can be passed to all other phy consumer calls.
+ */
+struct phy *devm_phy_optional_get(struct device *dev, const char *string)
+{
+ struct phy *phy = devm_phy_get(dev, string);
+
+ if (PTR_ERR(phy) == -ENODEV)
+ phy = NULL;
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(devm_phy_optional_get);
+
+/**
* phy_create() - create a new phy
* @dev: device that is creating the new phy
* @ops: function pointers for performing phy operations
@@ -437,23 +515,18 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
int id;
struct phy *phy;
- if (!dev) {
- dev_WARN(dev, "no device provided for PHY\n");
- ret = -EINVAL;
- goto err0;
- }
+ if (WARN_ON(!dev))
+ return ERR_PTR(-EINVAL);
phy = kzalloc(sizeof(*phy), GFP_KERNEL);
- if (!phy) {
- ret = -ENOMEM;
- goto err0;
- }
+ if (!phy)
+ return ERR_PTR(-ENOMEM);
id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
dev_err(dev, "unable to get id\n");
ret = id;
- goto err0;
+ goto free_phy;
}
device_initialize(&phy->dev);
@@ -468,11 +541,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
if (ret)
- goto err1;
+ goto put_dev;
ret = device_add(&phy->dev);
if (ret)
- goto err1;
+ goto put_dev;
if (pm_runtime_enabled(dev)) {
pm_runtime_enable(&phy->dev);
@@ -481,12 +554,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
return phy;
-err1:
- ida_remove(&phy_ida, phy->id);
+put_dev:
put_device(&phy->dev);
+ ida_remove(&phy_ida, phy->id);
+free_phy:
kfree(phy);
-
-err0:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(phy_create);
diff --git a/drivers/phy/phy-mvebu-sata.c b/drivers/phy/phy-mvebu-sata.c
new file mode 100644
index 000000000000..d43786f62437
--- /dev/null
+++ b/drivers/phy/phy-mvebu-sata.c
@@ -0,0 +1,137 @@
+/*
+ * phy-mvebu-sata.c: SATA Phy driver for the Marvell mvebu SoCs.
+ *
+ * Copyright (C) 2013 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/phy/phy.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+struct priv {
+ struct clk *clk;
+ void __iomem *base;
+};
+
+#define SATA_PHY_MODE_2 0x0330
+#define MODE_2_FORCE_PU_TX BIT(0)
+#define MODE_2_FORCE_PU_RX BIT(1)
+#define MODE_2_PU_PLL BIT(2)
+#define MODE_2_PU_IVREF BIT(3)
+#define SATA_IF_CTRL 0x0050
+#define CTRL_PHY_SHUTDOWN BIT(9)
+
+static int phy_mvebu_sata_power_on(struct phy *phy)
+{
+ struct priv *priv = phy_get_drvdata(phy);
+ u32 reg;
+
+ clk_prepare_enable(priv->clk);
+
+ /* Enable PLL and IVREF */
+ reg = readl(priv->base + SATA_PHY_MODE_2);
+ reg |= (MODE_2_FORCE_PU_TX | MODE_2_FORCE_PU_RX |
+ MODE_2_PU_PLL | MODE_2_PU_IVREF);
+ writel(reg , priv->base + SATA_PHY_MODE_2);
+
+ /* Enable PHY */
+ reg = readl(priv->base + SATA_IF_CTRL);
+ reg &= ~CTRL_PHY_SHUTDOWN;
+ writel(reg, priv->base + SATA_IF_CTRL);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int phy_mvebu_sata_power_off(struct phy *phy)
+{
+ struct priv *priv = phy_get_drvdata(phy);
+ u32 reg;
+
+ clk_prepare_enable(priv->clk);
+
+ /* Disable PLL and IVREF */
+ reg = readl(priv->base + SATA_PHY_MODE_2);
+ reg &= ~(MODE_2_FORCE_PU_TX | MODE_2_FORCE_PU_RX |
+ MODE_2_PU_PLL | MODE_2_PU_IVREF);
+ writel(reg, priv->base + SATA_PHY_MODE_2);
+
+ /* Disable PHY */
+ reg = readl(priv->base + SATA_IF_CTRL);
+ reg |= CTRL_PHY_SHUTDOWN;
+ writel(reg, priv->base + SATA_IF_CTRL);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static struct phy_ops phy_mvebu_sata_ops = {
+ .power_on = phy_mvebu_sata_power_on,
+ .power_off = phy_mvebu_sata_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int phy_mvebu_sata_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ struct priv *priv;
+ struct phy *phy;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get(&pdev->dev, "sata");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ phy_provider = devm_of_phy_provider_register(&pdev->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ phy = devm_phy_create(&pdev->dev, &phy_mvebu_sata_ops, NULL);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ phy_set_drvdata(phy, priv);
+
+ /* The boot loader may of left it on. Turn it off. */
+ phy_mvebu_sata_power_off(phy);
+
+ return 0;
+}
+
+static const struct of_device_id phy_mvebu_sata_of_match[] = {
+ { .compatible = "marvell,mvebu-sata-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, phy_mvebu_sata_of_match);
+
+static struct platform_driver phy_mvebu_sata_driver = {
+ .probe = phy_mvebu_sata_probe,
+ .driver = {
+ .name = "phy-mvebu-sata",
+ .owner = THIS_MODULE,
+ .of_match_table = phy_mvebu_sata_of_match,
+ }
+};
+module_platform_driver(phy_mvebu_sata_driver);
+
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
+MODULE_DESCRIPTION("Marvell MVEBU SATA PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 33f9dc1f14fd..be361b7cd30f 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -104,6 +104,19 @@ config PINCTRL_BCM2835
select PINMUX
select PINCONF
+config PINCTRL_CAPRI
+ bool "Broadcom Capri pinctrl driver"
+ depends on OF
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select REGMAP_MMIO
+ help
+ Say Y here to support Broadcom Capri pinctrl driver, which is used for
+ the BCM281xx SoC family, including BCM11130, BCM11140, BCM11351,
+ BCM28145, and BCM28155 SoCs. This driver requires the pinctrl
+ framework. GPIO is provided by a separate GPIO driver.
+
config PINCTRL_IMX
bool
select PINMUX
@@ -116,15 +129,22 @@ config PINCTRL_IMX1_CORE
config PINCTRL_IMX27
bool "IMX27 pinctrl driver"
- depends on OF
depends on SOC_IMX27
select PINCTRL_IMX1_CORE
help
Say Y here to enable the imx27 pinctrl driver
+
+config PINCTRL_IMX25
+ bool "IMX25 pinctrl driver"
+ depends on OF
+ depends on SOC_IMX25
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx25 pinctrl driver
+
config PINCTRL_IMX35
bool "IMX35 pinctrl driver"
- depends on OF
depends on SOC_IMX35
select PINCTRL_IMX
help
@@ -132,7 +152,6 @@ config PINCTRL_IMX35
config PINCTRL_IMX50
bool "IMX50 pinctrl driver"
- depends on OF
depends on SOC_IMX50
select PINCTRL_IMX
help
@@ -140,7 +159,6 @@ config PINCTRL_IMX50
config PINCTRL_IMX51
bool "IMX51 pinctrl driver"
- depends on OF
depends on SOC_IMX51
select PINCTRL_IMX
help
@@ -148,7 +166,6 @@ config PINCTRL_IMX51
config PINCTRL_IMX53
bool "IMX53 pinctrl driver"
- depends on OF
depends on SOC_IMX53
select PINCTRL_IMX
help
@@ -156,7 +173,6 @@ config PINCTRL_IMX53
config PINCTRL_IMX6Q
bool "IMX6Q/DL pinctrl driver"
- depends on OF
depends on SOC_IMX6Q
select PINCTRL_IMX
help
@@ -164,7 +180,6 @@ config PINCTRL_IMX6Q
config PINCTRL_IMX6SL
bool "IMX6SL pinctrl driver"
- depends on OF
depends on SOC_IMX6SL
select PINCTRL_IMX
help
@@ -172,7 +187,6 @@ config PINCTRL_IMX6SL
config PINCTRL_VF610
bool "Freescale Vybrid VF610 pinctrl driver"
- depends on OF
depends on SOC_VF610
select PINCTRL_IMX
help
@@ -202,6 +216,20 @@ config PINCTRL_IMX28
bool
select PINCTRL_MXS
+config PINCTRL_MSM
+ tristate
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+
+config PINCTRL_MSM8X74
+ tristate "Qualcomm 8x74 pin controller driver"
+ depends on GPIOLIB && OF && OF_IRQ
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found in the Qualcomm 8974 platform.
+
config PINCTRL_NOMADIK
bool "Nomadik pin controller driver"
depends on ARCH_U8500 || ARCH_NOMADIK
@@ -268,6 +296,10 @@ config PINCTRL_TEGRA114
bool
select PINCTRL_TEGRA
+config PINCTRL_TEGRA124
+ bool
+ select PINCTRL_TEGRA
+
config PINCTRL_TZ1090
bool "Toumaz Xenif TZ1090 pin control driver"
depends on SOC_TZ1090
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 4f7be2921aa5..4b835880cf80 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_PINCTRL_BF60x) += pinctrl-adi2-bf60x.o
obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o
obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o
+obj-$(CONFIG_PINCTRL_CAPRI) += pinctrl-capri.o
obj-$(CONFIG_PINCTRL_IMX) += pinctrl-imx.o
obj-$(CONFIG_PINCTRL_IMX1_CORE) += pinctrl-imx1-core.o
obj-$(CONFIG_PINCTRL_IMX27) += pinctrl-imx27.o
@@ -34,7 +35,10 @@ obj-$(CONFIG_PINCTRL_IMX6SL) += pinctrl-imx6sl.o
obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o
obj-$(CONFIG_PINCTRL_IMX23) += pinctrl-imx23.o
+obj-$(CONFIG_PINCTRL_IMX25) += pinctrl-imx25.o
obj-$(CONFIG_PINCTRL_IMX28) += pinctrl-imx28.o
+obj-$(CONFIG_PINCTRL_MSM) += pinctrl-msm.o
+obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
obj-$(CONFIG_PINCTRL_NOMADIK) += pinctrl-nomadik.o
obj-$(CONFIG_PINCTRL_STN8815) += pinctrl-nomadik-stn8815.o
obj-$(CONFIG_PINCTRL_DB8500) += pinctrl-nomadik-db8500.o
@@ -48,6 +52,7 @@ obj-$(CONFIG_PINCTRL_TEGRA) += pinctrl-tegra.o
obj-$(CONFIG_PINCTRL_TEGRA20) += pinctrl-tegra20.o
obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o
obj-$(CONFIG_PINCTRL_TEGRA114) += pinctrl-tegra114.o
+obj-$(CONFIG_PINCTRL_TEGRA124) += pinctrl-tegra124.o
obj-$(CONFIG_PINCTRL_TZ1090) += pinctrl-tz1090.o
obj-$(CONFIG_PINCTRL_TZ1090_PDC) += pinctrl-tz1090-pdc.o
obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 5ee61a470016..c0fe6091566a 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -851,7 +851,9 @@ static struct pinctrl *create_pinctrl(struct device *dev)
kref_init(&p->users);
/* Add the pinctrl handle to the global list */
+ mutex_lock(&pinctrl_list_mutex);
list_add_tail(&p->node, &pinctrl_list);
+ mutex_unlock(&pinctrl_list_mutex);
return p;
}
@@ -1642,8 +1644,10 @@ static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
device_root, pctldev, &pinctrl_groups_ops);
debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO,
device_root, pctldev, &pinctrl_gpioranges_ops);
- pinmux_init_device_debugfs(device_root, pctldev);
- pinconf_init_device_debugfs(device_root, pctldev);
+ if (pctldev->desc->pmxops)
+ pinmux_init_device_debugfs(device_root, pctldev);
+ if (pctldev->desc->confops)
+ pinconf_init_device_debugfs(device_root, pctldev);
}
static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 55a0ebe830ac..3d9a999fb699 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -48,6 +48,7 @@ static struct pin_config_item conf_items[] = {
PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL),
PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL),
PCONFDUMP(PIN_CONFIG_DRIVE_STRENGTH, "output drive strength", "mA"),
+ PCONFDUMP(PIN_CONFIG_INPUT_ENABLE, "input enabled", NULL),
PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL),
PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL),
PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "usec"),
@@ -160,6 +161,8 @@ static struct pinconf_generic_dt_params dt_params[] = {
{ "drive-open-drain", PIN_CONFIG_DRIVE_OPEN_DRAIN, 0 },
{ "drive-open-source", PIN_CONFIG_DRIVE_OPEN_SOURCE, 0 },
{ "drive-strength", PIN_CONFIG_DRIVE_STRENGTH, 0 },
+ { "input-enable", PIN_CONFIG_INPUT_ENABLE, 1 },
+ { "input-disable", PIN_CONFIG_INPUT_ENABLE, 0 },
{ "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 },
{ "input-schmitt-disable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 0 },
{ "input-debounce", PIN_CONFIG_INPUT_DEBOUNCE, 0 },
@@ -167,6 +170,7 @@ static struct pinconf_generic_dt_params dt_params[] = {
{ "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 },
{ "output-low", PIN_CONFIG_OUTPUT, 0, },
{ "output-high", PIN_CONFIG_OUTPUT, 1, },
+ { "slew-rate", PIN_CONFIG_SLEW_RATE, 0},
};
/**
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index b8fcc38c0d11..8bfa0643e5dc 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -28,12 +28,6 @@ int pinconf_check_ops(struct pinctrl_dev *pctldev)
{
const struct pinconf_ops *ops = pctldev->desc->confops;
- /* We must be able to read out pin status */
- if (!ops->pin_config_get && !ops->pin_config_group_get) {
- dev_err(pctldev->dev,
- "pinconf must be able to read out pin status\n");
- return -EINVAL;
- }
/* We have to be able to config the pins in SOME way */
if (!ops->pin_config_set && !ops->pin_config_group_set) {
dev_err(pctldev->dev,
@@ -67,9 +61,9 @@ int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
const struct pinconf_ops *ops = pctldev->desc->confops;
if (!ops || !ops->pin_config_get) {
- dev_err(pctldev->dev, "cannot get pin configuration, missing "
+ dev_dbg(pctldev->dev, "cannot get pin configuration, missing "
"pin_config_get() function in driver\n");
- return -EINVAL;
+ return -ENOTSUPP;
}
return ops->pin_config_get(pctldev, pin, config);
@@ -93,10 +87,10 @@ int pin_config_group_get(const char *dev_name, const char *pin_group,
ops = pctldev->desc->confops;
if (!ops || !ops->pin_config_group_get) {
- dev_err(pctldev->dev, "cannot get configuration for pin "
+ dev_dbg(pctldev->dev, "cannot get configuration for pin "
"group, missing group config get function in "
"driver\n");
- ret = -EINVAL;
+ ret = -ENOTSUPP;
goto unlock;
}
@@ -302,12 +296,8 @@ static void pinconf_dump_pin(struct pinctrl_dev *pctldev,
static int pinconf_pins_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev = s->private;
- const struct pinconf_ops *ops = pctldev->desc->confops;
unsigned i, pin;
- if (!ops || !ops->pin_config_get)
- return 0;
-
seq_puts(s, "Pin config settings per pin\n");
seq_puts(s, "Format: pin (name): configs\n");
@@ -352,13 +342,9 @@ static int pinconf_groups_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev = s->private;
const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
- const struct pinconf_ops *ops = pctldev->desc->confops;
unsigned ngroups = pctlops->get_groups_count(pctldev);
unsigned selector = 0;
- if (!ops || !ops->pin_config_group_get)
- return 0;
-
seq_puts(s, "Pin config settings per pin group\n");
seq_puts(s, "Format: group (name): configs\n");
diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/pinctrl-abx500.c
index 5183e7bb8de3..163da9c3ea0e 100644
--- a/drivers/pinctrl/pinctrl-abx500.c
+++ b/drivers/pinctrl/pinctrl-abx500.c
@@ -24,7 +24,6 @@
#include <linux/bitops.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
-#include <linux/mfd/abx500/ab8500-gpio.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinmux.h>
@@ -1218,21 +1217,15 @@ static const struct of_device_id abx500_gpio_match[] = {
static int abx500_gpio_probe(struct platform_device *pdev)
{
- struct ab8500_platform_data *abx500_pdata =
- dev_get_platdata(pdev->dev.parent);
- struct abx500_gpio_platform_data *pdata = NULL;
struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
struct abx500_pinctrl *pct;
- const struct platform_device_id *platid = platform_get_device_id(pdev);
unsigned int id = -1;
int ret, err;
int i;
- if (abx500_pdata)
- pdata = abx500_pdata->gpio;
-
- if (!(pdata || np)) {
- dev_err(&pdev->dev, "gpio dt and platform data missing\n");
+ if (!np) {
+ dev_err(&pdev->dev, "gpio dt node missing\n");
return -ENODEV;
}
@@ -1248,17 +1241,14 @@ static int abx500_gpio_probe(struct platform_device *pdev)
pct->parent = dev_get_drvdata(pdev->dev.parent);
pct->chip = abx500gpio_chip;
pct->chip.dev = &pdev->dev;
- pct->chip.base = (np) ? -1 : pdata->gpio_base;
-
- if (platid)
- id = platid->driver_data;
- else if (np) {
- const struct of_device_id *match;
+ pct->chip.base = -1; /* Dynamic allocation */
- match = of_match_device(abx500_gpio_match, &pdev->dev);
- if (match)
- id = (unsigned long)match->data;
+ match = of_match_device(abx500_gpio_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "gpio dt not matching\n");
+ return -ENODEV;
}
+ id = (unsigned long)match->data;
/* Poke in other ASIC variants here */
switch (id) {
@@ -1349,14 +1339,6 @@ static int abx500_gpio_remove(struct platform_device *pdev)
return 0;
}
-static const struct platform_device_id abx500_pinctrl_id[] = {
- { "pinctrl-ab8500", PINCTRL_AB8500 },
- { "pinctrl-ab8540", PINCTRL_AB8540 },
- { "pinctrl-ab9540", PINCTRL_AB9540 },
- { "pinctrl-ab8505", PINCTRL_AB8505 },
- { },
-};
-
static struct platform_driver abx500_gpio_driver = {
.driver = {
.name = "abx500-gpio",
@@ -1365,7 +1347,6 @@ static struct platform_driver abx500_gpio_driver = {
},
.probe = abx500_gpio_probe,
.remove = abx500_gpio_remove,
- .id_table = abx500_pinctrl_id,
};
static int __init abx500_gpio_init(void)
diff --git a/drivers/pinctrl/pinctrl-abx500.h b/drivers/pinctrl/pinctrl-abx500.h
index 82293806e842..2beef3bfe9ca 100644
--- a/drivers/pinctrl/pinctrl-abx500.h
+++ b/drivers/pinctrl/pinctrl-abx500.h
@@ -15,6 +15,18 @@ enum abx500_pin_func {
ABX500_ALT_C,
};
+enum abx500_gpio_pull_updown {
+ ABX500_GPIO_PULL_DOWN = 0x0,
+ ABX500_GPIO_PULL_NONE = 0x1,
+ ABX500_GPIO_PULL_UP = 0x3,
+};
+
+enum abx500_gpio_vinsel {
+ ABX500_GPIO_VINSEL_VBAT = 0x0,
+ ABX500_GPIO_VINSEL_VIN_1V8 = 0x1,
+ ABX500_GPIO_VINSEL_VDD_BIF = 0x2,
+};
+
/**
* struct abx500_function - ABx500 pinctrl mux function
* @name: The name of the function, exported to pinctrl core.
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index 01bffc1d52fd..92ed4b2e3c07 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -250,6 +250,26 @@ static int as3722_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned function,
return ret;
}
as_pci->gpio_control[group].io_function = function;
+
+ switch (val) {
+ case AS3722_GPIO_IOSF_SD0_OUT:
+ case AS3722_GPIO_IOSF_PWR_GOOD_OUT:
+ case AS3722_GPIO_IOSF_Q32K_OUT:
+ case AS3722_GPIO_IOSF_PWM_OUT:
+ case AS3722_GPIO_IOSF_SD6_LOW_VOLT_LOW:
+ ret = as3722_update_bits(as_pci->as3722, gpio_cntr_reg,
+ AS3722_GPIO_MODE_MASK, AS3722_GPIO_MODE_OUTPUT_VDDH);
+ if (ret < 0) {
+ dev_err(as_pci->dev, "GPIO%d_CTRL update failed %d\n",
+ group, ret);
+ return ret;
+ }
+ as_pci->gpio_control[group].mode_prop =
+ AS3722_GPIO_MODE_OUTPUT_VDDH;
+ break;
+ default:
+ break;
+ }
return ret;
}
@@ -531,7 +551,7 @@ static const struct gpio_chip as3722_gpio_chip = {
.direction_input = as3722_gpio_direction_input,
.direction_output = as3722_gpio_direction_output,
.to_irq = as3722_gpio_to_irq,
- .can_sleep = 1,
+ .can_sleep = true,
.ngpio = AS3722_PIN_NUM,
.base = -1,
};
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index a7549c4c83b4..d990e33d8aa7 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -118,7 +118,7 @@ struct at91_pin_group {
};
/**
- * struct at91_pinctrl_mux_ops - describes an At91 mux ops group
+ * struct at91_pinctrl_mux_ops - describes an AT91 mux ops group
* on new IP with support for periph C and D the way to mux in
* periph A and B has changed
* So provide the right call back
@@ -722,7 +722,8 @@ static int at91_pinconf_get(struct pinctrl_dev *pctldev,
unsigned pin;
int div;
- dev_dbg(info->dev, "%s:%d, pin_id=%d, config=0x%lx", __func__, __LINE__, pin_id, *config);
+ *config = 0;
+ dev_dbg(info->dev, "%s:%d, pin_id=%d", __func__, __LINE__, pin_id);
pio = pin_to_controller(info, pin_to_bank(pin_id));
pin = pin_id % MAX_NB_GPIO_PER_BANK;
@@ -783,10 +784,35 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev,
return 0;
}
+#define DBG_SHOW_FLAG(flag) do { \
+ if (config & flag) { \
+ if (num_conf) \
+ seq_puts(s, "|"); \
+ seq_puts(s, #flag); \
+ num_conf++; \
+ } \
+} while (0)
+
static void at91_pinconf_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned pin_id)
{
+ unsigned long config;
+ int ret, val, num_conf = 0;
+
+ ret = at91_pinconf_get(pctldev, pin_id, &config);
+
+ DBG_SHOW_FLAG(MULTI_DRIVE);
+ DBG_SHOW_FLAG(PULL_UP);
+ DBG_SHOW_FLAG(PULL_DOWN);
+ DBG_SHOW_FLAG(DIS_SCHMIT);
+ DBG_SHOW_FLAG(DEGLITCH);
+ DBG_SHOW_FLAG(DEBOUNCE);
+ if (config & DEBOUNCE) {
+ val = config >> DEBOUNCE_VAL_SHIFT;
+ seq_printf(s, "(%d)", val);
+ }
+ return;
}
static void at91_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
@@ -1260,22 +1286,22 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- irq_set_handler(d->irq, handle_simple_irq);
+ __irq_set_handler_locked(d->irq, handle_simple_irq);
writel_relaxed(mask, pio + PIO_ESR);
writel_relaxed(mask, pio + PIO_REHLSR);
break;
case IRQ_TYPE_EDGE_FALLING:
- irq_set_handler(d->irq, handle_simple_irq);
+ __irq_set_handler_locked(d->irq, handle_simple_irq);
writel_relaxed(mask, pio + PIO_ESR);
writel_relaxed(mask, pio + PIO_FELLSR);
break;
case IRQ_TYPE_LEVEL_LOW:
- irq_set_handler(d->irq, handle_level_irq);
+ __irq_set_handler_locked(d->irq, handle_level_irq);
writel_relaxed(mask, pio + PIO_LSR);
writel_relaxed(mask, pio + PIO_FELLSR);
break;
case IRQ_TYPE_LEVEL_HIGH:
- irq_set_handler(d->irq, handle_level_irq);
+ __irq_set_handler_locked(d->irq, handle_level_irq);
writel_relaxed(mask, pio + PIO_LSR);
writel_relaxed(mask, pio + PIO_REHLSR);
break;
@@ -1284,7 +1310,7 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
* disable additional interrupt modes:
* fall back to default behavior
*/
- irq_set_handler(d->irq, handle_simple_irq);
+ __irq_set_handler_locked(d->irq, handle_simple_irq);
writel_relaxed(mask, pio + PIO_AIMDR);
return 0;
case IRQ_TYPE_NONE:
@@ -1339,13 +1365,11 @@ void at91_pinctrl_gpio_suspend(void)
__raw_writel(backups[i], pio + PIO_IDR);
__raw_writel(wakeups[i], pio + PIO_IER);
- if (!wakeups[i]) {
- clk_unprepare(gpio_chips[i]->clock);
- clk_disable(gpio_chips[i]->clock);
- } else {
+ if (!wakeups[i])
+ clk_disable_unprepare(gpio_chips[i]->clock);
+ else
printk(KERN_DEBUG "GPIO-%c may wake for %08x\n",
'A'+i, wakeups[i]);
- }
}
}
@@ -1361,10 +1385,8 @@ void at91_pinctrl_gpio_resume(void)
pio = gpio_chips[i]->regbase;
- if (!wakeups[i]) {
- if (clk_prepare(gpio_chips[i]->clock) == 0)
- clk_enable(gpio_chips[i]->clock);
- }
+ if (!wakeups[i])
+ clk_prepare_enable(gpio_chips[i]->clock);
__raw_writel(wakeups[i], pio + PIO_IDR);
__raw_writel(backups[i], pio + PIO_IER);
@@ -1396,7 +1418,7 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
chained_irq_enter(chip, desc);
for (;;) {
/* Reading ISR acks pending (edge triggered) GPIO interrupts.
- * When there none are pending, we're finished unless we need
+ * When there are none pending, we're finished unless we need
* to process multiple banks (like ID_PIOCDE on sam9263).
*/
isr = readl_relaxed(pio + PIO_ISR) & readl_relaxed(pio + PIO_IMR);
@@ -1505,7 +1527,7 @@ static int at91_gpio_of_irq_setup(struct device_node *node,
prev = gpio_chips[at91_gpio->pioc_idx - 1];
/* The top level handler handles one bank of GPIOs, except
- * on some SoC it can handles up to three...
+ * on some SoC it can handle up to three...
* We only set up the handler for the first of the list.
*/
if (prev && prev->next == at91_gpio)
@@ -1527,7 +1549,7 @@ static struct gpio_chip at91_gpio_template = {
.set = at91_gpio_set,
.to_irq = at91_gpio_to_irq,
.dbg_show = at91_gpio_dbg_show,
- .can_sleep = 0,
+ .can_sleep = false,
.ngpio = MAX_NB_GPIO_PER_BANK,
};
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
index 2832576d8b12..665b96bc0c3a 100644
--- a/drivers/pinctrl/pinctrl-baytrail.c
+++ b/drivers/pinctrl/pinctrl-baytrail.c
@@ -29,7 +29,6 @@
#include <linux/gpio.h>
#include <linux/irqdomain.h>
#include <linux/acpi.h>
-#include <linux/acpi_gpio.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/io.h>
@@ -286,13 +285,19 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
spin_lock_irqsave(&vg->lock, flags);
for (i = 0; i < vg->chip.ngpio; i++) {
+ const char *label;
offs = vg->range->pins[i] * 16;
conf0 = readl(vg->reg_base + offs + BYT_CONF0_REG);
val = readl(vg->reg_base + offs + BYT_VAL_REG);
+ label = gpiochip_is_requested(chip, i);
+ if (!label)
+ label = "Unrequested";
+
seq_printf(s,
- " gpio-%-3d %s %s %s pad-%-3d offset:0x%03x mux:%d %s%s%s\n",
+ " gpio-%-3d (%-20.20s) %s %s %s pad-%-3d offset:0x%03x mux:%d %s%s%s\n",
i,
+ label,
val & BYT_INPUT_EN ? " " : "in",
val & BYT_OUTPUT_EN ? " " : "out",
val & BYT_LEVEL ? "hi" : "lo",
@@ -366,11 +371,33 @@ static void byt_irq_mask(struct irq_data *d)
{
}
+static unsigned int byt_irq_startup(struct irq_data *d)
+{
+ struct byt_gpio *vg = irq_data_get_irq_chip_data(d);
+
+ if (gpio_lock_as_irq(&vg->chip, irqd_to_hwirq(d)))
+ dev_err(vg->chip.dev,
+ "unable to lock HW IRQ %lu for IRQ\n",
+ irqd_to_hwirq(d));
+ byt_irq_unmask(d);
+ return 0;
+}
+
+static void byt_irq_shutdown(struct irq_data *d)
+{
+ struct byt_gpio *vg = irq_data_get_irq_chip_data(d);
+
+ byt_irq_mask(d);
+ gpio_unlock_as_irq(&vg->chip, irqd_to_hwirq(d));
+}
+
static struct irq_chip byt_irqchip = {
.name = "BYT-GPIO",
.irq_mask = byt_irq_mask,
.irq_unmask = byt_irq_unmask,
.irq_set_type = byt_irq_type,
+ .irq_startup = byt_irq_startup,
+ .irq_shutdown = byt_irq_shutdown,
};
static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
@@ -461,7 +488,7 @@ static int byt_gpio_probe(struct platform_device *pdev)
gc->set = byt_gpio_set;
gc->dbg_show = byt_gpio_dbg_show;
gc->base = -1;
- gc->can_sleep = 0;
+ gc->can_sleep = false;
gc->dev = dev;
ret = gpiochip_add(gc);
@@ -485,9 +512,6 @@ static int byt_gpio_probe(struct platform_device *pdev)
irq_set_handler_data(hwirq, vg);
irq_set_chained_handler(hwirq, byt_gpio_irq_handler);
-
- /* Register interrupt handlers for gpio signaled acpi events */
- acpi_gpiochip_request_interrupts(gc);
}
pm_runtime_enable(dev);
diff --git a/drivers/pinctrl/pinctrl-bcm2835.c b/drivers/pinctrl/pinctrl-bcm2835.c
index c05c1ef2cc3c..3d907de9bc91 100644
--- a/drivers/pinctrl/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/pinctrl-bcm2835.c
@@ -384,7 +384,7 @@ static struct gpio_chip bcm2835_gpio_chip = {
.to_irq = bcm2835_gpio_to_irq,
.base = -1,
.ngpio = BCM2835_NUM_GPIOS,
- .can_sleep = 0,
+ .can_sleep = false,
};
static irqreturn_t bcm2835_gpio_irq_handler(int irq, void *dev_id)
diff --git a/drivers/pinctrl/pinctrl-capri.c b/drivers/pinctrl/pinctrl-capri.c
new file mode 100644
index 000000000000..4669c53f99b0
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-capri.c
@@ -0,0 +1,1454 @@
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include "core.h"
+#include "pinctrl-utils.h"
+
+/* Capri Pin Control Registers Definitions */
+
+/* Function Select bits are the same for all pin control registers */
+#define CAPRI_PIN_REG_F_SEL_MASK 0x0700
+#define CAPRI_PIN_REG_F_SEL_SHIFT 8
+
+/* Standard pin register */
+#define CAPRI_STD_PIN_REG_DRV_STR_MASK 0x0007
+#define CAPRI_STD_PIN_REG_DRV_STR_SHIFT 0
+#define CAPRI_STD_PIN_REG_INPUT_DIS_MASK 0x0008
+#define CAPRI_STD_PIN_REG_INPUT_DIS_SHIFT 3
+#define CAPRI_STD_PIN_REG_SLEW_MASK 0x0010
+#define CAPRI_STD_PIN_REG_SLEW_SHIFT 4
+#define CAPRI_STD_PIN_REG_PULL_UP_MASK 0x0020
+#define CAPRI_STD_PIN_REG_PULL_UP_SHIFT 5
+#define CAPRI_STD_PIN_REG_PULL_DN_MASK 0x0040
+#define CAPRI_STD_PIN_REG_PULL_DN_SHIFT 6
+#define CAPRI_STD_PIN_REG_HYST_MASK 0x0080
+#define CAPRI_STD_PIN_REG_HYST_SHIFT 7
+
+/* I2C pin register */
+#define CAPRI_I2C_PIN_REG_INPUT_DIS_MASK 0x0004
+#define CAPRI_I2C_PIN_REG_INPUT_DIS_SHIFT 2
+#define CAPRI_I2C_PIN_REG_SLEW_MASK 0x0008
+#define CAPRI_I2C_PIN_REG_SLEW_SHIFT 3
+#define CAPRI_I2C_PIN_REG_PULL_UP_STR_MASK 0x0070
+#define CAPRI_I2C_PIN_REG_PULL_UP_STR_SHIFT 4
+
+/* HDMI pin register */
+#define CAPRI_HDMI_PIN_REG_INPUT_DIS_MASK 0x0008
+#define CAPRI_HDMI_PIN_REG_INPUT_DIS_SHIFT 3
+#define CAPRI_HDMI_PIN_REG_MODE_MASK 0x0010
+#define CAPRI_HDMI_PIN_REG_MODE_SHIFT 4
+
+/**
+ * capri_pin_type - types of pin register
+ */
+enum capri_pin_type {
+ CAPRI_PIN_TYPE_UNKNOWN = 0,
+ CAPRI_PIN_TYPE_STD,
+ CAPRI_PIN_TYPE_I2C,
+ CAPRI_PIN_TYPE_HDMI,
+};
+
+static enum capri_pin_type std_pin = CAPRI_PIN_TYPE_STD;
+static enum capri_pin_type i2c_pin = CAPRI_PIN_TYPE_I2C;
+static enum capri_pin_type hdmi_pin = CAPRI_PIN_TYPE_HDMI;
+
+/**
+ * capri_pin_function- define pin function
+ */
+struct capri_pin_function {
+ const char *name;
+ const char * const *groups;
+ const unsigned ngroups;
+};
+
+/**
+ * capri_pinctrl_data - Broadcom-specific pinctrl data
+ * @reg_base - base of pinctrl registers
+ */
+struct capri_pinctrl_data {
+ void __iomem *reg_base;
+
+ /* List of all pins */
+ const struct pinctrl_pin_desc *pins;
+ const unsigned npins;
+
+ const struct capri_pin_function *functions;
+ const unsigned nfunctions;
+
+ struct regmap *regmap;
+};
+
+/*
+ * Pin number definition. The order here must be the same as defined in the
+ * PADCTRLREG block in the RDB.
+ */
+#define CAPRI_PIN_ADCSYNC 0
+#define CAPRI_PIN_BAT_RM 1
+#define CAPRI_PIN_BSC1_SCL 2
+#define CAPRI_PIN_BSC1_SDA 3
+#define CAPRI_PIN_BSC2_SCL 4
+#define CAPRI_PIN_BSC2_SDA 5
+#define CAPRI_PIN_CLASSGPWR 6
+#define CAPRI_PIN_CLK_CX8 7
+#define CAPRI_PIN_CLKOUT_0 8
+#define CAPRI_PIN_CLKOUT_1 9
+#define CAPRI_PIN_CLKOUT_2 10
+#define CAPRI_PIN_CLKOUT_3 11
+#define CAPRI_PIN_CLKREQ_IN_0 12
+#define CAPRI_PIN_CLKREQ_IN_1 13
+#define CAPRI_PIN_CWS_SYS_REQ1 14
+#define CAPRI_PIN_CWS_SYS_REQ2 15
+#define CAPRI_PIN_CWS_SYS_REQ3 16
+#define CAPRI_PIN_DIGMIC1_CLK 17
+#define CAPRI_PIN_DIGMIC1_DQ 18
+#define CAPRI_PIN_DIGMIC2_CLK 19
+#define CAPRI_PIN_DIGMIC2_DQ 20
+#define CAPRI_PIN_GPEN13 21
+#define CAPRI_PIN_GPEN14 22
+#define CAPRI_PIN_GPEN15 23
+#define CAPRI_PIN_GPIO00 24
+#define CAPRI_PIN_GPIO01 25
+#define CAPRI_PIN_GPIO02 26
+#define CAPRI_PIN_GPIO03 27
+#define CAPRI_PIN_GPIO04 28
+#define CAPRI_PIN_GPIO05 29
+#define CAPRI_PIN_GPIO06 30
+#define CAPRI_PIN_GPIO07 31
+#define CAPRI_PIN_GPIO08 32
+#define CAPRI_PIN_GPIO09 33
+#define CAPRI_PIN_GPIO10 34
+#define CAPRI_PIN_GPIO11 35
+#define CAPRI_PIN_GPIO12 36
+#define CAPRI_PIN_GPIO13 37
+#define CAPRI_PIN_GPIO14 38
+#define CAPRI_PIN_GPS_PABLANK 39
+#define CAPRI_PIN_GPS_TMARK 40
+#define CAPRI_PIN_HDMI_SCL 41
+#define CAPRI_PIN_HDMI_SDA 42
+#define CAPRI_PIN_IC_DM 43
+#define CAPRI_PIN_IC_DP 44
+#define CAPRI_PIN_KP_COL_IP_0 45
+#define CAPRI_PIN_KP_COL_IP_1 46
+#define CAPRI_PIN_KP_COL_IP_2 47
+#define CAPRI_PIN_KP_COL_IP_3 48
+#define CAPRI_PIN_KP_ROW_OP_0 49
+#define CAPRI_PIN_KP_ROW_OP_1 50
+#define CAPRI_PIN_KP_ROW_OP_2 51
+#define CAPRI_PIN_KP_ROW_OP_3 52
+#define CAPRI_PIN_LCD_B_0 53
+#define CAPRI_PIN_LCD_B_1 54
+#define CAPRI_PIN_LCD_B_2 55
+#define CAPRI_PIN_LCD_B_3 56
+#define CAPRI_PIN_LCD_B_4 57
+#define CAPRI_PIN_LCD_B_5 58
+#define CAPRI_PIN_LCD_B_6 59
+#define CAPRI_PIN_LCD_B_7 60
+#define CAPRI_PIN_LCD_G_0 61
+#define CAPRI_PIN_LCD_G_1 62
+#define CAPRI_PIN_LCD_G_2 63
+#define CAPRI_PIN_LCD_G_3 64
+#define CAPRI_PIN_LCD_G_4 65
+#define CAPRI_PIN_LCD_G_5 66
+#define CAPRI_PIN_LCD_G_6 67
+#define CAPRI_PIN_LCD_G_7 68
+#define CAPRI_PIN_LCD_HSYNC 69
+#define CAPRI_PIN_LCD_OE 70
+#define CAPRI_PIN_LCD_PCLK 71
+#define CAPRI_PIN_LCD_R_0 72
+#define CAPRI_PIN_LCD_R_1 73
+#define CAPRI_PIN_LCD_R_2 74
+#define CAPRI_PIN_LCD_R_3 75
+#define CAPRI_PIN_LCD_R_4 76
+#define CAPRI_PIN_LCD_R_5 77
+#define CAPRI_PIN_LCD_R_6 78
+#define CAPRI_PIN_LCD_R_7 79
+#define CAPRI_PIN_LCD_VSYNC 80
+#define CAPRI_PIN_MDMGPIO0 81
+#define CAPRI_PIN_MDMGPIO1 82
+#define CAPRI_PIN_MDMGPIO2 83
+#define CAPRI_PIN_MDMGPIO3 84
+#define CAPRI_PIN_MDMGPIO4 85
+#define CAPRI_PIN_MDMGPIO5 86
+#define CAPRI_PIN_MDMGPIO6 87
+#define CAPRI_PIN_MDMGPIO7 88
+#define CAPRI_PIN_MDMGPIO8 89
+#define CAPRI_PIN_MPHI_DATA_0 90
+#define CAPRI_PIN_MPHI_DATA_1 91
+#define CAPRI_PIN_MPHI_DATA_2 92
+#define CAPRI_PIN_MPHI_DATA_3 93
+#define CAPRI_PIN_MPHI_DATA_4 94
+#define CAPRI_PIN_MPHI_DATA_5 95
+#define CAPRI_PIN_MPHI_DATA_6 96
+#define CAPRI_PIN_MPHI_DATA_7 97
+#define CAPRI_PIN_MPHI_DATA_8 98
+#define CAPRI_PIN_MPHI_DATA_9 99
+#define CAPRI_PIN_MPHI_DATA_10 100
+#define CAPRI_PIN_MPHI_DATA_11 101
+#define CAPRI_PIN_MPHI_DATA_12 102
+#define CAPRI_PIN_MPHI_DATA_13 103
+#define CAPRI_PIN_MPHI_DATA_14 104
+#define CAPRI_PIN_MPHI_DATA_15 105
+#define CAPRI_PIN_MPHI_HA0 106
+#define CAPRI_PIN_MPHI_HAT0 107
+#define CAPRI_PIN_MPHI_HAT1 108
+#define CAPRI_PIN_MPHI_HCE0_N 109
+#define CAPRI_PIN_MPHI_HCE1_N 110
+#define CAPRI_PIN_MPHI_HRD_N 111
+#define CAPRI_PIN_MPHI_HWR_N 112
+#define CAPRI_PIN_MPHI_RUN0 113
+#define CAPRI_PIN_MPHI_RUN1 114
+#define CAPRI_PIN_MTX_SCAN_CLK 115
+#define CAPRI_PIN_MTX_SCAN_DATA 116
+#define CAPRI_PIN_NAND_AD_0 117
+#define CAPRI_PIN_NAND_AD_1 118
+#define CAPRI_PIN_NAND_AD_2 119
+#define CAPRI_PIN_NAND_AD_3 120
+#define CAPRI_PIN_NAND_AD_4 121
+#define CAPRI_PIN_NAND_AD_5 122
+#define CAPRI_PIN_NAND_AD_6 123
+#define CAPRI_PIN_NAND_AD_7 124
+#define CAPRI_PIN_NAND_ALE 125
+#define CAPRI_PIN_NAND_CEN_0 126
+#define CAPRI_PIN_NAND_CEN_1 127
+#define CAPRI_PIN_NAND_CLE 128
+#define CAPRI_PIN_NAND_OEN 129
+#define CAPRI_PIN_NAND_RDY_0 130
+#define CAPRI_PIN_NAND_RDY_1 131
+#define CAPRI_PIN_NAND_WEN 132
+#define CAPRI_PIN_NAND_WP 133
+#define CAPRI_PIN_PC1 134
+#define CAPRI_PIN_PC2 135
+#define CAPRI_PIN_PMU_INT 136
+#define CAPRI_PIN_PMU_SCL 137
+#define CAPRI_PIN_PMU_SDA 138
+#define CAPRI_PIN_RFST2G_MTSLOTEN3G 139
+#define CAPRI_PIN_RGMII_0_RX_CTL 140
+#define CAPRI_PIN_RGMII_0_RXC 141
+#define CAPRI_PIN_RGMII_0_RXD_0 142
+#define CAPRI_PIN_RGMII_0_RXD_1 143
+#define CAPRI_PIN_RGMII_0_RXD_2 144
+#define CAPRI_PIN_RGMII_0_RXD_3 145
+#define CAPRI_PIN_RGMII_0_TX_CTL 146
+#define CAPRI_PIN_RGMII_0_TXC 147
+#define CAPRI_PIN_RGMII_0_TXD_0 148
+#define CAPRI_PIN_RGMII_0_TXD_1 149
+#define CAPRI_PIN_RGMII_0_TXD_2 150
+#define CAPRI_PIN_RGMII_0_TXD_3 151
+#define CAPRI_PIN_RGMII_1_RX_CTL 152
+#define CAPRI_PIN_RGMII_1_RXC 153
+#define CAPRI_PIN_RGMII_1_RXD_0 154
+#define CAPRI_PIN_RGMII_1_RXD_1 155
+#define CAPRI_PIN_RGMII_1_RXD_2 156
+#define CAPRI_PIN_RGMII_1_RXD_3 157
+#define CAPRI_PIN_RGMII_1_TX_CTL 158
+#define CAPRI_PIN_RGMII_1_TXC 159
+#define CAPRI_PIN_RGMII_1_TXD_0 160
+#define CAPRI_PIN_RGMII_1_TXD_1 161
+#define CAPRI_PIN_RGMII_1_TXD_2 162
+#define CAPRI_PIN_RGMII_1_TXD_3 163
+#define CAPRI_PIN_RGMII_GPIO_0 164
+#define CAPRI_PIN_RGMII_GPIO_1 165
+#define CAPRI_PIN_RGMII_GPIO_2 166
+#define CAPRI_PIN_RGMII_GPIO_3 167
+#define CAPRI_PIN_RTXDATA2G_TXDATA3G1 168
+#define CAPRI_PIN_RTXEN2G_TXDATA3G2 169
+#define CAPRI_PIN_RXDATA3G0 170
+#define CAPRI_PIN_RXDATA3G1 171
+#define CAPRI_PIN_RXDATA3G2 172
+#define CAPRI_PIN_SDIO1_CLK 173
+#define CAPRI_PIN_SDIO1_CMD 174
+#define CAPRI_PIN_SDIO1_DATA_0 175
+#define CAPRI_PIN_SDIO1_DATA_1 176
+#define CAPRI_PIN_SDIO1_DATA_2 177
+#define CAPRI_PIN_SDIO1_DATA_3 178
+#define CAPRI_PIN_SDIO4_CLK 179
+#define CAPRI_PIN_SDIO4_CMD 180
+#define CAPRI_PIN_SDIO4_DATA_0 181
+#define CAPRI_PIN_SDIO4_DATA_1 182
+#define CAPRI_PIN_SDIO4_DATA_2 183
+#define CAPRI_PIN_SDIO4_DATA_3 184
+#define CAPRI_PIN_SIM_CLK 185
+#define CAPRI_PIN_SIM_DATA 186
+#define CAPRI_PIN_SIM_DET 187
+#define CAPRI_PIN_SIM_RESETN 188
+#define CAPRI_PIN_SIM2_CLK 189
+#define CAPRI_PIN_SIM2_DATA 190
+#define CAPRI_PIN_SIM2_DET 191
+#define CAPRI_PIN_SIM2_RESETN 192
+#define CAPRI_PIN_SRI_C 193
+#define CAPRI_PIN_SRI_D 194
+#define CAPRI_PIN_SRI_E 195
+#define CAPRI_PIN_SSP_EXTCLK 196
+#define CAPRI_PIN_SSP0_CLK 197
+#define CAPRI_PIN_SSP0_FS 198
+#define CAPRI_PIN_SSP0_RXD 199
+#define CAPRI_PIN_SSP0_TXD 200
+#define CAPRI_PIN_SSP2_CLK 201
+#define CAPRI_PIN_SSP2_FS_0 202
+#define CAPRI_PIN_SSP2_FS_1 203
+#define CAPRI_PIN_SSP2_FS_2 204
+#define CAPRI_PIN_SSP2_FS_3 205
+#define CAPRI_PIN_SSP2_RXD_0 206
+#define CAPRI_PIN_SSP2_RXD_1 207
+#define CAPRI_PIN_SSP2_TXD_0 208
+#define CAPRI_PIN_SSP2_TXD_1 209
+#define CAPRI_PIN_SSP3_CLK 210
+#define CAPRI_PIN_SSP3_FS 211
+#define CAPRI_PIN_SSP3_RXD 212
+#define CAPRI_PIN_SSP3_TXD 213
+#define CAPRI_PIN_SSP4_CLK 214
+#define CAPRI_PIN_SSP4_FS 215
+#define CAPRI_PIN_SSP4_RXD 216
+#define CAPRI_PIN_SSP4_TXD 217
+#define CAPRI_PIN_SSP5_CLK 218
+#define CAPRI_PIN_SSP5_FS 219
+#define CAPRI_PIN_SSP5_RXD 220
+#define CAPRI_PIN_SSP5_TXD 221
+#define CAPRI_PIN_SSP6_CLK 222
+#define CAPRI_PIN_SSP6_FS 223
+#define CAPRI_PIN_SSP6_RXD 224
+#define CAPRI_PIN_SSP6_TXD 225
+#define CAPRI_PIN_STAT_1 226
+#define CAPRI_PIN_STAT_2 227
+#define CAPRI_PIN_SYSCLKEN 228
+#define CAPRI_PIN_TRACECLK 229
+#define CAPRI_PIN_TRACEDT00 230
+#define CAPRI_PIN_TRACEDT01 231
+#define CAPRI_PIN_TRACEDT02 232
+#define CAPRI_PIN_TRACEDT03 233
+#define CAPRI_PIN_TRACEDT04 234
+#define CAPRI_PIN_TRACEDT05 235
+#define CAPRI_PIN_TRACEDT06 236
+#define CAPRI_PIN_TRACEDT07 237
+#define CAPRI_PIN_TRACEDT08 238
+#define CAPRI_PIN_TRACEDT09 239
+#define CAPRI_PIN_TRACEDT10 240
+#define CAPRI_PIN_TRACEDT11 241
+#define CAPRI_PIN_TRACEDT12 242
+#define CAPRI_PIN_TRACEDT13 243
+#define CAPRI_PIN_TRACEDT14 244
+#define CAPRI_PIN_TRACEDT15 245
+#define CAPRI_PIN_TXDATA3G0 246
+#define CAPRI_PIN_TXPWRIND 247
+#define CAPRI_PIN_UARTB1_UCTS 248
+#define CAPRI_PIN_UARTB1_URTS 249
+#define CAPRI_PIN_UARTB1_URXD 250
+#define CAPRI_PIN_UARTB1_UTXD 251
+#define CAPRI_PIN_UARTB2_URXD 252
+#define CAPRI_PIN_UARTB2_UTXD 253
+#define CAPRI_PIN_UARTB3_UCTS 254
+#define CAPRI_PIN_UARTB3_URTS 255
+#define CAPRI_PIN_UARTB3_URXD 256
+#define CAPRI_PIN_UARTB3_UTXD 257
+#define CAPRI_PIN_UARTB4_UCTS 258
+#define CAPRI_PIN_UARTB4_URTS 259
+#define CAPRI_PIN_UARTB4_URXD 260
+#define CAPRI_PIN_UARTB4_UTXD 261
+#define CAPRI_PIN_VC_CAM1_SCL 262
+#define CAPRI_PIN_VC_CAM1_SDA 263
+#define CAPRI_PIN_VC_CAM2_SCL 264
+#define CAPRI_PIN_VC_CAM2_SDA 265
+#define CAPRI_PIN_VC_CAM3_SCL 266
+#define CAPRI_PIN_VC_CAM3_SDA 267
+
+#define CAPRI_PIN_DESC(a, b, c) \
+ { .number = a, .name = b, .drv_data = &c##_pin }
+
+/*
+ * Pin description definition. The order here must be the same as defined in
+ * the PADCTRLREG block in the RDB, since the pin number is used as an index
+ * into this array.
+ */
+static const struct pinctrl_pin_desc capri_pinctrl_pins[] = {
+ CAPRI_PIN_DESC(CAPRI_PIN_ADCSYNC, "adcsync", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_BAT_RM, "bat_rm", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_BSC1_SCL, "bsc1_scl", i2c),
+ CAPRI_PIN_DESC(CAPRI_PIN_BSC1_SDA, "bsc1_sda", i2c),
+ CAPRI_PIN_DESC(CAPRI_PIN_BSC2_SCL, "bsc2_scl", i2c),
+ CAPRI_PIN_DESC(CAPRI_PIN_BSC2_SDA, "bsc2_sda", i2c),
+ CAPRI_PIN_DESC(CAPRI_PIN_CLASSGPWR, "classgpwr", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_CLK_CX8, "clk_cx8", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_CLKOUT_0, "clkout_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_CLKOUT_1, "clkout_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_CLKOUT_2, "clkout_2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_CLKOUT_3, "clkout_3", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_CLKREQ_IN_0, "clkreq_in_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_CLKREQ_IN_1, "clkreq_in_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_CWS_SYS_REQ1, "cws_sys_req1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_CWS_SYS_REQ2, "cws_sys_req2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_CWS_SYS_REQ3, "cws_sys_req3", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_DIGMIC1_CLK, "digmic1_clk", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_DIGMIC1_DQ, "digmic1_dq", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_DIGMIC2_CLK, "digmic2_clk", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_DIGMIC2_DQ, "digmic2_dq", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPEN13, "gpen13", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPEN14, "gpen14", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPEN15, "gpen15", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPIO00, "gpio00", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPIO01, "gpio01", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPIO02, "gpio02", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPIO03, "gpio03", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPIO04, "gpio04", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPIO05, "gpio05", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPIO06, "gpio06", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPIO07, "gpio07", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPIO08, "gpio08", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPIO09, "gpio09", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPIO10, "gpio10", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPIO11, "gpio11", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPIO12, "gpio12", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPIO13, "gpio13", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPIO14, "gpio14", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPS_PABLANK, "gps_pablank", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_GPS_TMARK, "gps_tmark", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_HDMI_SCL, "hdmi_scl", hdmi),
+ CAPRI_PIN_DESC(CAPRI_PIN_HDMI_SDA, "hdmi_sda", hdmi),
+ CAPRI_PIN_DESC(CAPRI_PIN_IC_DM, "ic_dm", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_IC_DP, "ic_dp", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_KP_COL_IP_0, "kp_col_ip_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_KP_COL_IP_1, "kp_col_ip_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_KP_COL_IP_2, "kp_col_ip_2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_KP_COL_IP_3, "kp_col_ip_3", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_KP_ROW_OP_0, "kp_row_op_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_KP_ROW_OP_1, "kp_row_op_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_KP_ROW_OP_2, "kp_row_op_2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_KP_ROW_OP_3, "kp_row_op_3", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_0, "lcd_b_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_1, "lcd_b_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_2, "lcd_b_2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_3, "lcd_b_3", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_4, "lcd_b_4", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_5, "lcd_b_5", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_6, "lcd_b_6", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_7, "lcd_b_7", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_0, "lcd_g_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_1, "lcd_g_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_2, "lcd_g_2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_3, "lcd_g_3", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_4, "lcd_g_4", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_5, "lcd_g_5", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_6, "lcd_g_6", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_7, "lcd_g_7", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_HSYNC, "lcd_hsync", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_OE, "lcd_oe", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_PCLK, "lcd_pclk", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_0, "lcd_r_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_1, "lcd_r_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_2, "lcd_r_2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_3, "lcd_r_3", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_4, "lcd_r_4", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_5, "lcd_r_5", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_6, "lcd_r_6", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_7, "lcd_r_7", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_LCD_VSYNC, "lcd_vsync", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO0, "mdmgpio0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO1, "mdmgpio1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO2, "mdmgpio2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO3, "mdmgpio3", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO4, "mdmgpio4", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO5, "mdmgpio5", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO6, "mdmgpio6", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO7, "mdmgpio7", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO8, "mdmgpio8", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_0, "mphi_data_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_1, "mphi_data_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_2, "mphi_data_2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_3, "mphi_data_3", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_4, "mphi_data_4", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_5, "mphi_data_5", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_6, "mphi_data_6", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_7, "mphi_data_7", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_8, "mphi_data_8", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_9, "mphi_data_9", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_10, "mphi_data_10", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_11, "mphi_data_11", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_12, "mphi_data_12", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_13, "mphi_data_13", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_14, "mphi_data_14", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_15, "mphi_data_15", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HA0, "mphi_ha0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HAT0, "mphi_hat0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HAT1, "mphi_hat1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HCE0_N, "mphi_hce0_n", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HCE1_N, "mphi_hce1_n", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HRD_N, "mphi_hrd_n", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HWR_N, "mphi_hwr_n", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_RUN0, "mphi_run0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MPHI_RUN1, "mphi_run1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MTX_SCAN_CLK, "mtx_scan_clk", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_MTX_SCAN_DATA, "mtx_scan_data", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_0, "nand_ad_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_1, "nand_ad_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_2, "nand_ad_2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_3, "nand_ad_3", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_4, "nand_ad_4", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_5, "nand_ad_5", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_6, "nand_ad_6", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_7, "nand_ad_7", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_NAND_ALE, "nand_ale", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_NAND_CEN_0, "nand_cen_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_NAND_CEN_1, "nand_cen_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_NAND_CLE, "nand_cle", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_NAND_OEN, "nand_oen", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_NAND_RDY_0, "nand_rdy_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_NAND_RDY_1, "nand_rdy_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_NAND_WEN, "nand_wen", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_NAND_WP, "nand_wp", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_PC1, "pc1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_PC2, "pc2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_PMU_INT, "pmu_int", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_PMU_SCL, "pmu_scl", i2c),
+ CAPRI_PIN_DESC(CAPRI_PIN_PMU_SDA, "pmu_sda", i2c),
+ CAPRI_PIN_DESC(CAPRI_PIN_RFST2G_MTSLOTEN3G, "rfst2g_mtsloten3g", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_RX_CTL, "rgmii_0_rx_ctl", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_RXC, "rgmii_0_rxc", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_RXD_0, "rgmii_0_rxd_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_RXD_1, "rgmii_0_rxd_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_RXD_2, "rgmii_0_rxd_2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_RXD_3, "rgmii_0_rxd_3", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_TX_CTL, "rgmii_0_tx_ctl", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_TXC, "rgmii_0_txc", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_TXD_0, "rgmii_0_txd_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_TXD_1, "rgmii_0_txd_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_TXD_2, "rgmii_0_txd_2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_TXD_3, "rgmii_0_txd_3", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_RX_CTL, "rgmii_1_rx_ctl", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_RXC, "rgmii_1_rxc", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_RXD_0, "rgmii_1_rxd_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_RXD_1, "rgmii_1_rxd_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_RXD_2, "rgmii_1_rxd_2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_RXD_3, "rgmii_1_rxd_3", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_TX_CTL, "rgmii_1_tx_ctl", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_TXC, "rgmii_1_txc", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_TXD_0, "rgmii_1_txd_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_TXD_1, "rgmii_1_txd_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_TXD_2, "rgmii_1_txd_2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_TXD_3, "rgmii_1_txd_3", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_GPIO_0, "rgmii_gpio_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_GPIO_1, "rgmii_gpio_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_GPIO_2, "rgmii_gpio_2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RGMII_GPIO_3, "rgmii_gpio_3", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RTXDATA2G_TXDATA3G1, "rtxdata2g_txdata3g1",
+ std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RTXEN2G_TXDATA3G2, "rtxen2g_txdata3g2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RXDATA3G0, "rxdata3g0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RXDATA3G1, "rxdata3g1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_RXDATA3G2, "rxdata3g2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SDIO1_CLK, "sdio1_clk", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SDIO1_CMD, "sdio1_cmd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SDIO1_DATA_0, "sdio1_data_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SDIO1_DATA_1, "sdio1_data_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SDIO1_DATA_2, "sdio1_data_2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SDIO1_DATA_3, "sdio1_data_3", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SDIO4_CLK, "sdio4_clk", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SDIO4_CMD, "sdio4_cmd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SDIO4_DATA_0, "sdio4_data_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SDIO4_DATA_1, "sdio4_data_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SDIO4_DATA_2, "sdio4_data_2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SDIO4_DATA_3, "sdio4_data_3", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SIM_CLK, "sim_clk", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SIM_DATA, "sim_data", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SIM_DET, "sim_det", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SIM_RESETN, "sim_resetn", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SIM2_CLK, "sim2_clk", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SIM2_DATA, "sim2_data", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SIM2_DET, "sim2_det", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SIM2_RESETN, "sim2_resetn", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SRI_C, "sri_c", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SRI_D, "sri_d", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SRI_E, "sri_e", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP_EXTCLK, "ssp_extclk", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP0_CLK, "ssp0_clk", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP0_FS, "ssp0_fs", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP0_RXD, "ssp0_rxd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP0_TXD, "ssp0_txd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP2_CLK, "ssp2_clk", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP2_FS_0, "ssp2_fs_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP2_FS_1, "ssp2_fs_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP2_FS_2, "ssp2_fs_2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP2_FS_3, "ssp2_fs_3", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP2_RXD_0, "ssp2_rxd_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP2_RXD_1, "ssp2_rxd_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP2_TXD_0, "ssp2_txd_0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP2_TXD_1, "ssp2_txd_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP3_CLK, "ssp3_clk", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP3_FS, "ssp3_fs", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP3_RXD, "ssp3_rxd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP3_TXD, "ssp3_txd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP4_CLK, "ssp4_clk", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP4_FS, "ssp4_fs", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP4_RXD, "ssp4_rxd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP4_TXD, "ssp4_txd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP5_CLK, "ssp5_clk", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP5_FS, "ssp5_fs", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP5_RXD, "ssp5_rxd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP5_TXD, "ssp5_txd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP6_CLK, "ssp6_clk", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP6_FS, "ssp6_fs", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP6_RXD, "ssp6_rxd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SSP6_TXD, "ssp6_txd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_STAT_1, "stat_1", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_STAT_2, "stat_2", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_SYSCLKEN, "sysclken", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TRACECLK, "traceclk", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT00, "tracedt00", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT01, "tracedt01", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT02, "tracedt02", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT03, "tracedt03", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT04, "tracedt04", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT05, "tracedt05", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT06, "tracedt06", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT07, "tracedt07", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT08, "tracedt08", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT09, "tracedt09", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT10, "tracedt10", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT11, "tracedt11", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT12, "tracedt12", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT13, "tracedt13", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT14, "tracedt14", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT15, "tracedt15", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TXDATA3G0, "txdata3g0", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_TXPWRIND, "txpwrind", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_UARTB1_UCTS, "uartb1_ucts", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_UARTB1_URTS, "uartb1_urts", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_UARTB1_URXD, "uartb1_urxd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_UARTB1_UTXD, "uartb1_utxd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_UARTB2_URXD, "uartb2_urxd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_UARTB2_UTXD, "uartb2_utxd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_UARTB3_UCTS, "uartb3_ucts", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_UARTB3_URTS, "uartb3_urts", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_UARTB3_URXD, "uartb3_urxd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_UARTB3_UTXD, "uartb3_utxd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_UARTB4_UCTS, "uartb4_ucts", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_UARTB4_URTS, "uartb4_urts", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_UARTB4_URXD, "uartb4_urxd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_UARTB4_UTXD, "uartb4_utxd", std),
+ CAPRI_PIN_DESC(CAPRI_PIN_VC_CAM1_SCL, "vc_cam1_scl", i2c),
+ CAPRI_PIN_DESC(CAPRI_PIN_VC_CAM1_SDA, "vc_cam1_sda", i2c),
+ CAPRI_PIN_DESC(CAPRI_PIN_VC_CAM2_SCL, "vc_cam2_scl", i2c),
+ CAPRI_PIN_DESC(CAPRI_PIN_VC_CAM2_SDA, "vc_cam2_sda", i2c),
+ CAPRI_PIN_DESC(CAPRI_PIN_VC_CAM3_SCL, "vc_cam3_scl", i2c),
+ CAPRI_PIN_DESC(CAPRI_PIN_VC_CAM3_SDA, "vc_cam3_sda", i2c),
+};
+
+static const char * const capri_alt_groups[] = {
+ "adcsync",
+ "bat_rm",
+ "bsc1_scl",
+ "bsc1_sda",
+ "bsc2_scl",
+ "bsc2_sda",
+ "classgpwr",
+ "clk_cx8",
+ "clkout_0",
+ "clkout_1",
+ "clkout_2",
+ "clkout_3",
+ "clkreq_in_0",
+ "clkreq_in_1",
+ "cws_sys_req1",
+ "cws_sys_req2",
+ "cws_sys_req3",
+ "digmic1_clk",
+ "digmic1_dq",
+ "digmic2_clk",
+ "digmic2_dq",
+ "gpen13",
+ "gpen14",
+ "gpen15",
+ "gpio00",
+ "gpio01",
+ "gpio02",
+ "gpio03",
+ "gpio04",
+ "gpio05",
+ "gpio06",
+ "gpio07",
+ "gpio08",
+ "gpio09",
+ "gpio10",
+ "gpio11",
+ "gpio12",
+ "gpio13",
+ "gpio14",
+ "gps_pablank",
+ "gps_tmark",
+ "hdmi_scl",
+ "hdmi_sda",
+ "ic_dm",
+ "ic_dp",
+ "kp_col_ip_0",
+ "kp_col_ip_1",
+ "kp_col_ip_2",
+ "kp_col_ip_3",
+ "kp_row_op_0",
+ "kp_row_op_1",
+ "kp_row_op_2",
+ "kp_row_op_3",
+ "lcd_b_0",
+ "lcd_b_1",
+ "lcd_b_2",
+ "lcd_b_3",
+ "lcd_b_4",
+ "lcd_b_5",
+ "lcd_b_6",
+ "lcd_b_7",
+ "lcd_g_0",
+ "lcd_g_1",
+ "lcd_g_2",
+ "lcd_g_3",
+ "lcd_g_4",
+ "lcd_g_5",
+ "lcd_g_6",
+ "lcd_g_7",
+ "lcd_hsync",
+ "lcd_oe",
+ "lcd_pclk",
+ "lcd_r_0",
+ "lcd_r_1",
+ "lcd_r_2",
+ "lcd_r_3",
+ "lcd_r_4",
+ "lcd_r_5",
+ "lcd_r_6",
+ "lcd_r_7",
+ "lcd_vsync",
+ "mdmgpio0",
+ "mdmgpio1",
+ "mdmgpio2",
+ "mdmgpio3",
+ "mdmgpio4",
+ "mdmgpio5",
+ "mdmgpio6",
+ "mdmgpio7",
+ "mdmgpio8",
+ "mphi_data_0",
+ "mphi_data_1",
+ "mphi_data_2",
+ "mphi_data_3",
+ "mphi_data_4",
+ "mphi_data_5",
+ "mphi_data_6",
+ "mphi_data_7",
+ "mphi_data_8",
+ "mphi_data_9",
+ "mphi_data_10",
+ "mphi_data_11",
+ "mphi_data_12",
+ "mphi_data_13",
+ "mphi_data_14",
+ "mphi_data_15",
+ "mphi_ha0",
+ "mphi_hat0",
+ "mphi_hat1",
+ "mphi_hce0_n",
+ "mphi_hce1_n",
+ "mphi_hrd_n",
+ "mphi_hwr_n",
+ "mphi_run0",
+ "mphi_run1",
+ "mtx_scan_clk",
+ "mtx_scan_data",
+ "nand_ad_0",
+ "nand_ad_1",
+ "nand_ad_2",
+ "nand_ad_3",
+ "nand_ad_4",
+ "nand_ad_5",
+ "nand_ad_6",
+ "nand_ad_7",
+ "nand_ale",
+ "nand_cen_0",
+ "nand_cen_1",
+ "nand_cle",
+ "nand_oen",
+ "nand_rdy_0",
+ "nand_rdy_1",
+ "nand_wen",
+ "nand_wp",
+ "pc1",
+ "pc2",
+ "pmu_int",
+ "pmu_scl",
+ "pmu_sda",
+ "rfst2g_mtsloten3g",
+ "rgmii_0_rx_ctl",
+ "rgmii_0_rxc",
+ "rgmii_0_rxd_0",
+ "rgmii_0_rxd_1",
+ "rgmii_0_rxd_2",
+ "rgmii_0_rxd_3",
+ "rgmii_0_tx_ctl",
+ "rgmii_0_txc",
+ "rgmii_0_txd_0",
+ "rgmii_0_txd_1",
+ "rgmii_0_txd_2",
+ "rgmii_0_txd_3",
+ "rgmii_1_rx_ctl",
+ "rgmii_1_rxc",
+ "rgmii_1_rxd_0",
+ "rgmii_1_rxd_1",
+ "rgmii_1_rxd_2",
+ "rgmii_1_rxd_3",
+ "rgmii_1_tx_ctl",
+ "rgmii_1_txc",
+ "rgmii_1_txd_0",
+ "rgmii_1_txd_1",
+ "rgmii_1_txd_2",
+ "rgmii_1_txd_3",
+ "rgmii_gpio_0",
+ "rgmii_gpio_1",
+ "rgmii_gpio_2",
+ "rgmii_gpio_3",
+ "rtxdata2g_txdata3g1",
+ "rtxen2g_txdata3g2",
+ "rxdata3g0",
+ "rxdata3g1",
+ "rxdata3g2",
+ "sdio1_clk",
+ "sdio1_cmd",
+ "sdio1_data_0",
+ "sdio1_data_1",
+ "sdio1_data_2",
+ "sdio1_data_3",
+ "sdio4_clk",
+ "sdio4_cmd",
+ "sdio4_data_0",
+ "sdio4_data_1",
+ "sdio4_data_2",
+ "sdio4_data_3",
+ "sim_clk",
+ "sim_data",
+ "sim_det",
+ "sim_resetn",
+ "sim2_clk",
+ "sim2_data",
+ "sim2_det",
+ "sim2_resetn",
+ "sri_c",
+ "sri_d",
+ "sri_e",
+ "ssp_extclk",
+ "ssp0_clk",
+ "ssp0_fs",
+ "ssp0_rxd",
+ "ssp0_txd",
+ "ssp2_clk",
+ "ssp2_fs_0",
+ "ssp2_fs_1",
+ "ssp2_fs_2",
+ "ssp2_fs_3",
+ "ssp2_rxd_0",
+ "ssp2_rxd_1",
+ "ssp2_txd_0",
+ "ssp2_txd_1",
+ "ssp3_clk",
+ "ssp3_fs",
+ "ssp3_rxd",
+ "ssp3_txd",
+ "ssp4_clk",
+ "ssp4_fs",
+ "ssp4_rxd",
+ "ssp4_txd",
+ "ssp5_clk",
+ "ssp5_fs",
+ "ssp5_rxd",
+ "ssp5_txd",
+ "ssp6_clk",
+ "ssp6_fs",
+ "ssp6_rxd",
+ "ssp6_txd",
+ "stat_1",
+ "stat_2",
+ "sysclken",
+ "traceclk",
+ "tracedt00",
+ "tracedt01",
+ "tracedt02",
+ "tracedt03",
+ "tracedt04",
+ "tracedt05",
+ "tracedt06",
+ "tracedt07",
+ "tracedt08",
+ "tracedt09",
+ "tracedt10",
+ "tracedt11",
+ "tracedt12",
+ "tracedt13",
+ "tracedt14",
+ "tracedt15",
+ "txdata3g0",
+ "txpwrind",
+ "uartb1_ucts",
+ "uartb1_urts",
+ "uartb1_urxd",
+ "uartb1_utxd",
+ "uartb2_urxd",
+ "uartb2_utxd",
+ "uartb3_ucts",
+ "uartb3_urts",
+ "uartb3_urxd",
+ "uartb3_utxd",
+ "uartb4_ucts",
+ "uartb4_urts",
+ "uartb4_urxd",
+ "uartb4_utxd",
+ "vc_cam1_scl",
+ "vc_cam1_sda",
+ "vc_cam2_scl",
+ "vc_cam2_sda",
+ "vc_cam3_scl",
+ "vc_cam3_sda",
+};
+
+/* Every pin can implement all ALT1-ALT4 functions */
+#define CAPRI_PIN_FUNCTION(fcn_name) \
+{ \
+ .name = #fcn_name, \
+ .groups = capri_alt_groups, \
+ .ngroups = ARRAY_SIZE(capri_alt_groups), \
+}
+
+static const struct capri_pin_function capri_functions[] = {
+ CAPRI_PIN_FUNCTION(alt1),
+ CAPRI_PIN_FUNCTION(alt2),
+ CAPRI_PIN_FUNCTION(alt3),
+ CAPRI_PIN_FUNCTION(alt4),
+};
+
+static struct capri_pinctrl_data capri_pinctrl = {
+ .pins = capri_pinctrl_pins,
+ .npins = ARRAY_SIZE(capri_pinctrl_pins),
+ .functions = capri_functions,
+ .nfunctions = ARRAY_SIZE(capri_functions),
+};
+
+static inline enum capri_pin_type pin_type_get(struct pinctrl_dev *pctldev,
+ unsigned pin)
+{
+ struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ if (pin >= pdata->npins)
+ return CAPRI_PIN_TYPE_UNKNOWN;
+
+ return *(enum capri_pin_type *)(pdata->pins[pin].drv_data);
+}
+
+#define CAPRI_PIN_SHIFT(type, param) \
+ (CAPRI_ ## type ## _PIN_REG_ ## param ## _SHIFT)
+
+#define CAPRI_PIN_MASK(type, param) \
+ (CAPRI_ ## type ## _PIN_REG_ ## param ## _MASK)
+
+/*
+ * This helper function is used to build up the value and mask used to write to
+ * a pin register, but does not actually write to the register.
+ */
+static inline void capri_pin_update(u32 *reg_val, u32 *reg_mask, u32 param_val,
+ u32 param_shift, u32 param_mask)
+{
+ *reg_val &= ~param_mask;
+ *reg_val |= (param_val << param_shift) & param_mask;
+ *reg_mask |= param_mask;
+}
+
+static struct regmap_config capri_pinctrl_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = CAPRI_PIN_VC_CAM3_SDA,
+};
+
+static int capri_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ return pdata->npins;
+}
+
+static const char *capri_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ return pdata->pins[group].name;
+}
+
+static int capri_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = &pdata->pins[group].number;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static void capri_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned offset)
+{
+ seq_printf(s, " %s", dev_name(pctldev->dev));
+}
+
+static struct pinctrl_ops capri_pinctrl_ops = {
+ .get_groups_count = capri_pinctrl_get_groups_count,
+ .get_group_name = capri_pinctrl_get_group_name,
+ .get_group_pins = capri_pinctrl_get_group_pins,
+ .pin_dbg_show = capri_pinctrl_pin_dbg_show,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int capri_pinctrl_get_fcns_count(struct pinctrl_dev *pctldev)
+{
+ struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ return pdata->nfunctions;
+}
+
+static const char *capri_pinctrl_get_fcn_name(struct pinctrl_dev *pctldev,
+ unsigned function)
+{
+ struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ return pdata->functions[function].name;
+}
+
+static int capri_pinctrl_get_fcn_groups(struct pinctrl_dev *pctldev,
+ unsigned function,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pdata->functions[function].groups;
+ *num_groups = pdata->functions[function].ngroups;
+
+ return 0;
+}
+
+static int capri_pinmux_enable(struct pinctrl_dev *pctldev,
+ unsigned function,
+ unsigned group)
+{
+ struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ const struct capri_pin_function *f = &pdata->functions[function];
+ u32 offset = 4 * pdata->pins[group].number;
+ int rc = 0;
+
+ dev_dbg(pctldev->dev,
+ "%s(): Enable function %s (%d) of pin %s (%d) @offset 0x%x.\n",
+ __func__, f->name, function, pdata->pins[group].name,
+ pdata->pins[group].number, offset);
+
+ rc = regmap_update_bits(pdata->regmap, offset, CAPRI_PIN_REG_F_SEL_MASK,
+ function << CAPRI_PIN_REG_F_SEL_SHIFT);
+ if (rc)
+ dev_err(pctldev->dev,
+ "Error updating register for pin %s (%d).\n",
+ pdata->pins[group].name, pdata->pins[group].number);
+
+ return rc;
+}
+
+static struct pinmux_ops capri_pinctrl_pinmux_ops = {
+ .get_functions_count = capri_pinctrl_get_fcns_count,
+ .get_function_name = capri_pinctrl_get_fcn_name,
+ .get_function_groups = capri_pinctrl_get_fcn_groups,
+ .enable = capri_pinmux_enable,
+};
+
+static int capri_pinctrl_pin_config_get(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *config)
+{
+ return -ENOTSUPP;
+}
+
+
+/* Goes through the configs and update register val/mask */
+static int capri_std_pin_update(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *configs,
+ unsigned num_configs,
+ u32 *val,
+ u32 *mask)
+{
+ struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ int i;
+ enum pin_config_param param;
+ u16 arg;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ arg = (arg >= 1 ? 1 : 0);
+ capri_pin_update(val, mask, arg,
+ CAPRI_PIN_SHIFT(STD, HYST),
+ CAPRI_PIN_MASK(STD, HYST));
+ break;
+ /*
+ * The pin bias can only be one of pull-up, pull-down, or
+ * disable. The user does not need to specify a value for the
+ * property, and the default value from pinconf-generic is
+ * ignored.
+ */
+ case PIN_CONFIG_BIAS_DISABLE:
+ capri_pin_update(val, mask, 0,
+ CAPRI_PIN_SHIFT(STD, PULL_UP),
+ CAPRI_PIN_MASK(STD, PULL_UP));
+ capri_pin_update(val, mask, 0,
+ CAPRI_PIN_SHIFT(STD, PULL_DN),
+ CAPRI_PIN_MASK(STD, PULL_DN));
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ capri_pin_update(val, mask, 1,
+ CAPRI_PIN_SHIFT(STD, PULL_UP),
+ CAPRI_PIN_MASK(STD, PULL_UP));
+ capri_pin_update(val, mask, 0,
+ CAPRI_PIN_SHIFT(STD, PULL_DN),
+ CAPRI_PIN_MASK(STD, PULL_DN));
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ capri_pin_update(val, mask, 0,
+ CAPRI_PIN_SHIFT(STD, PULL_UP),
+ CAPRI_PIN_MASK(STD, PULL_UP));
+ capri_pin_update(val, mask, 1,
+ CAPRI_PIN_SHIFT(STD, PULL_DN),
+ CAPRI_PIN_MASK(STD, PULL_DN));
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ arg = (arg >= 1 ? 1 : 0);
+ capri_pin_update(val, mask, arg,
+ CAPRI_PIN_SHIFT(STD, SLEW),
+ CAPRI_PIN_MASK(STD, SLEW));
+ break;
+
+ case PIN_CONFIG_INPUT_ENABLE:
+ /* inversed since register is for input _disable_ */
+ arg = (arg >= 1 ? 0 : 1);
+ capri_pin_update(val, mask, arg,
+ CAPRI_PIN_SHIFT(STD, INPUT_DIS),
+ CAPRI_PIN_MASK(STD, INPUT_DIS));
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ /* Valid range is 2-16 mA, even numbers only */
+ if ((arg < 2) || (arg > 16) || (arg % 2)) {
+ dev_err(pctldev->dev,
+ "Invalid Drive Strength value (%d) for "
+ "pin %s (%d). Valid values are "
+ "(2..16) mA, even numbers only.\n",
+ arg, pdata->pins[pin].name, pin);
+ return -EINVAL;
+ }
+ capri_pin_update(val, mask, (arg/2)-1,
+ CAPRI_PIN_SHIFT(STD, DRV_STR),
+ CAPRI_PIN_MASK(STD, DRV_STR));
+ break;
+
+ default:
+ dev_err(pctldev->dev,
+ "Unrecognized pin config %d for pin %s (%d).\n",
+ param, pdata->pins[pin].name, pin);
+ return -EINVAL;
+
+ } /* switch config */
+ } /* for each config */
+
+ return 0;
+}
+
+/*
+ * The pull-up strength for an I2C pin is represented by bits 4-6 in the
+ * register with the following mapping:
+ * 0b000: No pull-up
+ * 0b001: 1200 Ohm
+ * 0b010: 1800 Ohm
+ * 0b011: 720 Ohm
+ * 0b100: 2700 Ohm
+ * 0b101: 831 Ohm
+ * 0b110: 1080 Ohm
+ * 0b111: 568 Ohm
+ * This array maps pull-up strength in Ohms to register values (1+index).
+ */
+static const u16 capri_pullup_map[] = {1200, 1800, 720, 2700, 831, 1080, 568};
+
+/* Goes through the configs and update register val/mask */
+static int capri_i2c_pin_update(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *configs,
+ unsigned num_configs,
+ u32 *val,
+ u32 *mask)
+{
+ struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ int i, j;
+ enum pin_config_param param;
+ u16 arg;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ for (j = 0; j < ARRAY_SIZE(capri_pullup_map); j++)
+ if (capri_pullup_map[j] == arg)
+ break;
+
+ if (j == ARRAY_SIZE(capri_pullup_map)) {
+ dev_err(pctldev->dev,
+ "Invalid pull-up value (%d) for pin %s "
+ "(%d). Valid values are 568, 720, 831, "
+ "1080, 1200, 1800, 2700 Ohms.\n",
+ arg, pdata->pins[pin].name, pin);
+ return -EINVAL;
+ }
+
+ capri_pin_update(val, mask, j+1,
+ CAPRI_PIN_SHIFT(I2C, PULL_UP_STR),
+ CAPRI_PIN_MASK(I2C, PULL_UP_STR));
+ break;
+
+ case PIN_CONFIG_BIAS_DISABLE:
+ capri_pin_update(val, mask, 0,
+ CAPRI_PIN_SHIFT(I2C, PULL_UP_STR),
+ CAPRI_PIN_MASK(I2C, PULL_UP_STR));
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ arg = (arg >= 1 ? 1 : 0);
+ capri_pin_update(val, mask, arg,
+ CAPRI_PIN_SHIFT(I2C, SLEW),
+ CAPRI_PIN_MASK(I2C, SLEW));
+ break;
+
+ case PIN_CONFIG_INPUT_ENABLE:
+ /* inversed since register is for input _disable_ */
+ arg = (arg >= 1 ? 0 : 1);
+ capri_pin_update(val, mask, arg,
+ CAPRI_PIN_SHIFT(I2C, INPUT_DIS),
+ CAPRI_PIN_MASK(I2C, INPUT_DIS));
+ break;
+
+ default:
+ dev_err(pctldev->dev,
+ "Unrecognized pin config %d for pin %s (%d).\n",
+ param, pdata->pins[pin].name, pin);
+ return -EINVAL;
+
+ } /* switch config */
+ } /* for each config */
+
+ return 0;
+}
+
+/* Goes through the configs and update register val/mask */
+static int capri_hdmi_pin_update(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *configs,
+ unsigned num_configs,
+ u32 *val,
+ u32 *mask)
+{
+ struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ int i;
+ enum pin_config_param param;
+ u16 arg;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_SLEW_RATE:
+ arg = (arg >= 1 ? 1 : 0);
+ capri_pin_update(val, mask, arg,
+ CAPRI_PIN_SHIFT(HDMI, MODE),
+ CAPRI_PIN_MASK(HDMI, MODE));
+ break;
+
+ case PIN_CONFIG_INPUT_ENABLE:
+ /* inversed since register is for input _disable_ */
+ arg = (arg >= 1 ? 0 : 1);
+ capri_pin_update(val, mask, arg,
+ CAPRI_PIN_SHIFT(HDMI, INPUT_DIS),
+ CAPRI_PIN_MASK(HDMI, INPUT_DIS));
+ break;
+
+ default:
+ dev_err(pctldev->dev,
+ "Unrecognized pin config %d for pin %s (%d).\n",
+ param, pdata->pins[pin].name, pin);
+ return -EINVAL;
+
+ } /* switch config */
+ } /* for each config */
+
+ return 0;
+}
+
+static int capri_pinctrl_pin_config_set(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *configs,
+ unsigned num_configs)
+{
+ struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ enum capri_pin_type pin_type;
+ u32 offset = 4 * pin;
+ u32 cfg_val, cfg_mask;
+ int rc;
+
+ cfg_val = 0;
+ cfg_mask = 0;
+ pin_type = pin_type_get(pctldev, pin);
+
+ /* Different pins have different configuration options */
+ switch (pin_type) {
+ case CAPRI_PIN_TYPE_STD:
+ rc = capri_std_pin_update(pctldev, pin, configs, num_configs,
+ &cfg_val, &cfg_mask);
+ break;
+
+ case CAPRI_PIN_TYPE_I2C:
+ rc = capri_i2c_pin_update(pctldev, pin, configs, num_configs,
+ &cfg_val, &cfg_mask);
+ break;
+
+ case CAPRI_PIN_TYPE_HDMI:
+ rc = capri_hdmi_pin_update(pctldev, pin, configs, num_configs,
+ &cfg_val, &cfg_mask);
+ break;
+
+ default:
+ dev_err(pctldev->dev, "Unknown pin type for pin %s (%d).\n",
+ pdata->pins[pin].name, pin);
+ return -EINVAL;
+
+ } /* switch pin type */
+
+ if (rc)
+ return rc;
+
+ dev_dbg(pctldev->dev,
+ "%s(): Set pin %s (%d) with config 0x%x, mask 0x%x\n",
+ __func__, pdata->pins[pin].name, pin, cfg_val, cfg_mask);
+
+ rc = regmap_update_bits(pdata->regmap, offset, cfg_mask, cfg_val);
+ if (rc) {
+ dev_err(pctldev->dev,
+ "Error updating register for pin %s (%d).\n",
+ pdata->pins[pin].name, pin);
+ return rc;
+ }
+
+ return 0;
+}
+
+static struct pinconf_ops capri_pinctrl_pinconf_ops = {
+ .pin_config_get = capri_pinctrl_pin_config_get,
+ .pin_config_set = capri_pinctrl_pin_config_set,
+};
+
+static struct pinctrl_desc capri_pinctrl_desc = {
+ /* name, pins, npins members initialized in probe function */
+ .pctlops = &capri_pinctrl_ops,
+ .pmxops = &capri_pinctrl_pinmux_ops,
+ .confops = &capri_pinctrl_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+int __init capri_pinctrl_probe(struct platform_device *pdev)
+{
+ struct capri_pinctrl_data *pdata = &capri_pinctrl;
+ struct resource *res;
+ struct pinctrl_dev *pctl;
+
+ /* So far We can assume there is only 1 bank of registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Missing MEM resource\n");
+ return -ENODEV;
+ }
+
+ pdata->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdata->reg_base)) {
+ dev_err(&pdev->dev, "Failed to ioremap MEM resource\n");
+ return -ENODEV;
+ }
+
+ /* Initialize the dynamic part of pinctrl_desc */
+ pdata->regmap = devm_regmap_init_mmio(&pdev->dev, pdata->reg_base,
+ &capri_pinctrl_regmap_config);
+ if (IS_ERR(pdata->regmap)) {
+ dev_err(&pdev->dev, "Regmap MMIO init failed.\n");
+ return -ENODEV;
+ }
+
+ capri_pinctrl_desc.name = dev_name(&pdev->dev);
+ capri_pinctrl_desc.pins = capri_pinctrl.pins;
+ capri_pinctrl_desc.npins = capri_pinctrl.npins;
+
+ pctl = pinctrl_register(&capri_pinctrl_desc,
+ &pdev->dev,
+ pdata);
+ if (!pctl) {
+ dev_err(&pdev->dev, "Failed to register pinctrl\n");
+ return -ENODEV;
+ }
+
+ platform_set_drvdata(pdev, pdata);
+
+ return 0;
+}
+
+static struct of_device_id capri_pinctrl_of_match[] = {
+ { .compatible = "brcm,capri-pinctrl", },
+ { },
+};
+
+static struct platform_driver capri_pinctrl_driver = {
+ .driver = {
+ .name = "bcm-capri-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = capri_pinctrl_of_match,
+ },
+};
+
+module_platform_driver_probe(capri_pinctrl_driver, capri_pinctrl_probe);
+
+MODULE_AUTHOR("Sherman Yin <syin@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom Capri pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-imx1-core.c b/drivers/pinctrl/pinctrl-imx1-core.c
index f77914ac081a..815384b377b5 100644
--- a/drivers/pinctrl/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/pinctrl-imx1-core.c
@@ -45,7 +45,7 @@ struct imx1_pinctrl {
#define MX1_DDIR 0x00
#define MX1_OCR 0x04
#define MX1_ICONFA 0x0c
-#define MX1_ICONFB 0x10
+#define MX1_ICONFB 0x14
#define MX1_GIUS 0x20
#define MX1_GPR 0x38
#define MX1_PUEN 0x40
@@ -97,13 +97,13 @@ static void imx1_write_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id,
u32 old_val;
u32 new_val;
- dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n",
- reg, offset, value);
-
/* Use the next register if the pin's port pin number is >=16 */
if (pin_id % 32 >= 16)
reg += 0x04;
+ dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n",
+ reg, offset, value);
+
/* Get current state of pins */
old_val = readl(reg);
old_val &= mask;
@@ -139,7 +139,7 @@ static int imx1_read_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id,
u32 reg_offset)
{
void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset;
- int offset = pin_id % 16;
+ int offset = (pin_id % 16) * 2;
/* Use the next register if the pin's port pin number is >=16 */
if (pin_id % 32 >= 16)
@@ -638,6 +638,13 @@ int imx1_pinctrl_core_probe(struct platform_device *pdev,
return -EINVAL;
}
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (ret) {
+ pinctrl_unregister(ipctl->pctl);
+ dev_err(&pdev->dev, "Failed to populate subdevices\n");
+ return ret;
+ }
+
dev_info(&pdev->dev, "initialized IMX pinctrl driver\n");
return 0;
diff --git a/drivers/pinctrl/pinctrl-imx25.c b/drivers/pinctrl/pinctrl-imx25.c
new file mode 100644
index 000000000000..1aae1b61c4dc
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-imx25.c
@@ -0,0 +1,351 @@
+/*
+ * imx25 pinctrl driver.
+ *
+ * Copyright 2013 Eukréa Electromatique <denis@eukrea.com>
+ *
+ * This driver was mostly copied from the imx51 pinctrl driver which has:
+ *
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro, Inc.
+ *
+ * Author: Denis Carikli <denis@eukrea.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx.h"
+
+enum imx25_pads {
+ MX25_PAD_RESERVE0 = 1,
+ MX25_PAD_RESERVE1 = 2,
+ MX25_PAD_A10 = 3,
+ MX25_PAD_A13 = 4,
+ MX25_PAD_A14 = 5,
+ MX25_PAD_A15 = 6,
+ MX25_PAD_A16 = 7,
+ MX25_PAD_A17 = 8,
+ MX25_PAD_A18 = 9,
+ MX25_PAD_A19 = 10,
+ MX25_PAD_A20 = 11,
+ MX25_PAD_A21 = 12,
+ MX25_PAD_A22 = 13,
+ MX25_PAD_A23 = 14,
+ MX25_PAD_A24 = 15,
+ MX25_PAD_A25 = 16,
+ MX25_PAD_EB0 = 17,
+ MX25_PAD_EB1 = 18,
+ MX25_PAD_OE = 19,
+ MX25_PAD_CS0 = 20,
+ MX25_PAD_CS1 = 21,
+ MX25_PAD_CS4 = 22,
+ MX25_PAD_CS5 = 23,
+ MX25_PAD_NF_CE0 = 24,
+ MX25_PAD_ECB = 25,
+ MX25_PAD_LBA = 26,
+ MX25_PAD_BCLK = 27,
+ MX25_PAD_RW = 28,
+ MX25_PAD_NFWE_B = 29,
+ MX25_PAD_NFRE_B = 30,
+ MX25_PAD_NFALE = 31,
+ MX25_PAD_NFCLE = 32,
+ MX25_PAD_NFWP_B = 33,
+ MX25_PAD_NFRB = 34,
+ MX25_PAD_D15 = 35,
+ MX25_PAD_D14 = 36,
+ MX25_PAD_D13 = 37,
+ MX25_PAD_D12 = 38,
+ MX25_PAD_D11 = 39,
+ MX25_PAD_D10 = 40,
+ MX25_PAD_D9 = 41,
+ MX25_PAD_D8 = 42,
+ MX25_PAD_D7 = 43,
+ MX25_PAD_D6 = 44,
+ MX25_PAD_D5 = 45,
+ MX25_PAD_D4 = 46,
+ MX25_PAD_D3 = 47,
+ MX25_PAD_D2 = 48,
+ MX25_PAD_D1 = 49,
+ MX25_PAD_D0 = 50,
+ MX25_PAD_LD0 = 51,
+ MX25_PAD_LD1 = 52,
+ MX25_PAD_LD2 = 53,
+ MX25_PAD_LD3 = 54,
+ MX25_PAD_LD4 = 55,
+ MX25_PAD_LD5 = 56,
+ MX25_PAD_LD6 = 57,
+ MX25_PAD_LD7 = 58,
+ MX25_PAD_LD8 = 59,
+ MX25_PAD_LD9 = 60,
+ MX25_PAD_LD10 = 61,
+ MX25_PAD_LD11 = 62,
+ MX25_PAD_LD12 = 63,
+ MX25_PAD_LD13 = 64,
+ MX25_PAD_LD14 = 65,
+ MX25_PAD_LD15 = 66,
+ MX25_PAD_HSYNC = 67,
+ MX25_PAD_VSYNC = 68,
+ MX25_PAD_LSCLK = 69,
+ MX25_PAD_OE_ACD = 70,
+ MX25_PAD_CONTRAST = 71,
+ MX25_PAD_PWM = 72,
+ MX25_PAD_CSI_D2 = 73,
+ MX25_PAD_CSI_D3 = 74,
+ MX25_PAD_CSI_D4 = 75,
+ MX25_PAD_CSI_D5 = 76,
+ MX25_PAD_CSI_D6 = 77,
+ MX25_PAD_CSI_D7 = 78,
+ MX25_PAD_CSI_D8 = 79,
+ MX25_PAD_CSI_D9 = 80,
+ MX25_PAD_CSI_MCLK = 81,
+ MX25_PAD_CSI_VSYNC = 82,
+ MX25_PAD_CSI_HSYNC = 83,
+ MX25_PAD_CSI_PIXCLK = 84,
+ MX25_PAD_I2C1_CLK = 85,
+ MX25_PAD_I2C1_DAT = 86,
+ MX25_PAD_CSPI1_MOSI = 87,
+ MX25_PAD_CSPI1_MISO = 88,
+ MX25_PAD_CSPI1_SS0 = 89,
+ MX25_PAD_CSPI1_SS1 = 90,
+ MX25_PAD_CSPI1_SCLK = 91,
+ MX25_PAD_CSPI1_RDY = 92,
+ MX25_PAD_UART1_RXD = 93,
+ MX25_PAD_UART1_TXD = 94,
+ MX25_PAD_UART1_RTS = 95,
+ MX25_PAD_UART1_CTS = 96,
+ MX25_PAD_UART2_RXD = 97,
+ MX25_PAD_UART2_TXD = 98,
+ MX25_PAD_UART2_RTS = 99,
+ MX25_PAD_UART2_CTS = 100,
+ MX25_PAD_SD1_CMD = 101,
+ MX25_PAD_SD1_CLK = 102,
+ MX25_PAD_SD1_DATA0 = 103,
+ MX25_PAD_SD1_DATA1 = 104,
+ MX25_PAD_SD1_DATA2 = 105,
+ MX25_PAD_SD1_DATA3 = 106,
+ MX25_PAD_KPP_ROW0 = 107,
+ MX25_PAD_KPP_ROW1 = 108,
+ MX25_PAD_KPP_ROW2 = 109,
+ MX25_PAD_KPP_ROW3 = 110,
+ MX25_PAD_KPP_COL0 = 111,
+ MX25_PAD_KPP_COL1 = 112,
+ MX25_PAD_KPP_COL2 = 113,
+ MX25_PAD_KPP_COL3 = 114,
+ MX25_PAD_FEC_MDC = 115,
+ MX25_PAD_FEC_MDIO = 116,
+ MX25_PAD_FEC_TDATA0 = 117,
+ MX25_PAD_FEC_TDATA1 = 118,
+ MX25_PAD_FEC_TX_EN = 119,
+ MX25_PAD_FEC_RDATA0 = 120,
+ MX25_PAD_FEC_RDATA1 = 121,
+ MX25_PAD_FEC_RX_DV = 122,
+ MX25_PAD_FEC_TX_CLK = 123,
+ MX25_PAD_RTCK = 124,
+ MX25_PAD_DE_B = 125,
+ MX25_PAD_GPIO_A = 126,
+ MX25_PAD_GPIO_B = 127,
+ MX25_PAD_GPIO_C = 128,
+ MX25_PAD_GPIO_D = 129,
+ MX25_PAD_GPIO_E = 130,
+ MX25_PAD_GPIO_F = 131,
+ MX25_PAD_EXT_ARMCLK = 132,
+ MX25_PAD_UPLL_BYPCLK = 133,
+ MX25_PAD_VSTBY_REQ = 134,
+ MX25_PAD_VSTBY_ACK = 135,
+ MX25_PAD_POWER_FAIL = 136,
+ MX25_PAD_CLKO = 137,
+ MX25_PAD_BOOT_MODE0 = 138,
+ MX25_PAD_BOOT_MODE1 = 139,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx25_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX25_PAD_RESERVE0),
+ IMX_PINCTRL_PIN(MX25_PAD_RESERVE1),
+ IMX_PINCTRL_PIN(MX25_PAD_A10),
+ IMX_PINCTRL_PIN(MX25_PAD_A13),
+ IMX_PINCTRL_PIN(MX25_PAD_A14),
+ IMX_PINCTRL_PIN(MX25_PAD_A15),
+ IMX_PINCTRL_PIN(MX25_PAD_A16),
+ IMX_PINCTRL_PIN(MX25_PAD_A17),
+ IMX_PINCTRL_PIN(MX25_PAD_A18),
+ IMX_PINCTRL_PIN(MX25_PAD_A19),
+ IMX_PINCTRL_PIN(MX25_PAD_A20),
+ IMX_PINCTRL_PIN(MX25_PAD_A21),
+ IMX_PINCTRL_PIN(MX25_PAD_A22),
+ IMX_PINCTRL_PIN(MX25_PAD_A23),
+ IMX_PINCTRL_PIN(MX25_PAD_A24),
+ IMX_PINCTRL_PIN(MX25_PAD_A25),
+ IMX_PINCTRL_PIN(MX25_PAD_EB0),
+ IMX_PINCTRL_PIN(MX25_PAD_EB1),
+ IMX_PINCTRL_PIN(MX25_PAD_OE),
+ IMX_PINCTRL_PIN(MX25_PAD_CS0),
+ IMX_PINCTRL_PIN(MX25_PAD_CS1),
+ IMX_PINCTRL_PIN(MX25_PAD_CS4),
+ IMX_PINCTRL_PIN(MX25_PAD_CS5),
+ IMX_PINCTRL_PIN(MX25_PAD_NF_CE0),
+ IMX_PINCTRL_PIN(MX25_PAD_ECB),
+ IMX_PINCTRL_PIN(MX25_PAD_LBA),
+ IMX_PINCTRL_PIN(MX25_PAD_BCLK),
+ IMX_PINCTRL_PIN(MX25_PAD_RW),
+ IMX_PINCTRL_PIN(MX25_PAD_NFWE_B),
+ IMX_PINCTRL_PIN(MX25_PAD_NFRE_B),
+ IMX_PINCTRL_PIN(MX25_PAD_NFALE),
+ IMX_PINCTRL_PIN(MX25_PAD_NFCLE),
+ IMX_PINCTRL_PIN(MX25_PAD_NFWP_B),
+ IMX_PINCTRL_PIN(MX25_PAD_NFRB),
+ IMX_PINCTRL_PIN(MX25_PAD_D15),
+ IMX_PINCTRL_PIN(MX25_PAD_D14),
+ IMX_PINCTRL_PIN(MX25_PAD_D13),
+ IMX_PINCTRL_PIN(MX25_PAD_D12),
+ IMX_PINCTRL_PIN(MX25_PAD_D11),
+ IMX_PINCTRL_PIN(MX25_PAD_D10),
+ IMX_PINCTRL_PIN(MX25_PAD_D9),
+ IMX_PINCTRL_PIN(MX25_PAD_D8),
+ IMX_PINCTRL_PIN(MX25_PAD_D7),
+ IMX_PINCTRL_PIN(MX25_PAD_D6),
+ IMX_PINCTRL_PIN(MX25_PAD_D5),
+ IMX_PINCTRL_PIN(MX25_PAD_D4),
+ IMX_PINCTRL_PIN(MX25_PAD_D3),
+ IMX_PINCTRL_PIN(MX25_PAD_D2),
+ IMX_PINCTRL_PIN(MX25_PAD_D1),
+ IMX_PINCTRL_PIN(MX25_PAD_D0),
+ IMX_PINCTRL_PIN(MX25_PAD_LD0),
+ IMX_PINCTRL_PIN(MX25_PAD_LD1),
+ IMX_PINCTRL_PIN(MX25_PAD_LD2),
+ IMX_PINCTRL_PIN(MX25_PAD_LD3),
+ IMX_PINCTRL_PIN(MX25_PAD_LD4),
+ IMX_PINCTRL_PIN(MX25_PAD_LD5),
+ IMX_PINCTRL_PIN(MX25_PAD_LD6),
+ IMX_PINCTRL_PIN(MX25_PAD_LD7),
+ IMX_PINCTRL_PIN(MX25_PAD_LD8),
+ IMX_PINCTRL_PIN(MX25_PAD_LD9),
+ IMX_PINCTRL_PIN(MX25_PAD_LD10),
+ IMX_PINCTRL_PIN(MX25_PAD_LD11),
+ IMX_PINCTRL_PIN(MX25_PAD_LD12),
+ IMX_PINCTRL_PIN(MX25_PAD_LD13),
+ IMX_PINCTRL_PIN(MX25_PAD_LD14),
+ IMX_PINCTRL_PIN(MX25_PAD_LD15),
+ IMX_PINCTRL_PIN(MX25_PAD_HSYNC),
+ IMX_PINCTRL_PIN(MX25_PAD_VSYNC),
+ IMX_PINCTRL_PIN(MX25_PAD_LSCLK),
+ IMX_PINCTRL_PIN(MX25_PAD_OE_ACD),
+ IMX_PINCTRL_PIN(MX25_PAD_CONTRAST),
+ IMX_PINCTRL_PIN(MX25_PAD_PWM),
+ IMX_PINCTRL_PIN(MX25_PAD_CSI_D2),
+ IMX_PINCTRL_PIN(MX25_PAD_CSI_D3),
+ IMX_PINCTRL_PIN(MX25_PAD_CSI_D4),
+ IMX_PINCTRL_PIN(MX25_PAD_CSI_D5),
+ IMX_PINCTRL_PIN(MX25_PAD_CSI_D6),
+ IMX_PINCTRL_PIN(MX25_PAD_CSI_D7),
+ IMX_PINCTRL_PIN(MX25_PAD_CSI_D8),
+ IMX_PINCTRL_PIN(MX25_PAD_CSI_D9),
+ IMX_PINCTRL_PIN(MX25_PAD_CSI_MCLK),
+ IMX_PINCTRL_PIN(MX25_PAD_CSI_VSYNC),
+ IMX_PINCTRL_PIN(MX25_PAD_CSI_HSYNC),
+ IMX_PINCTRL_PIN(MX25_PAD_CSI_PIXCLK),
+ IMX_PINCTRL_PIN(MX25_PAD_I2C1_CLK),
+ IMX_PINCTRL_PIN(MX25_PAD_I2C1_DAT),
+ IMX_PINCTRL_PIN(MX25_PAD_CSPI1_MOSI),
+ IMX_PINCTRL_PIN(MX25_PAD_CSPI1_MISO),
+ IMX_PINCTRL_PIN(MX25_PAD_CSPI1_SS0),
+ IMX_PINCTRL_PIN(MX25_PAD_CSPI1_SS1),
+ IMX_PINCTRL_PIN(MX25_PAD_CSPI1_SCLK),
+ IMX_PINCTRL_PIN(MX25_PAD_CSPI1_RDY),
+ IMX_PINCTRL_PIN(MX25_PAD_UART1_RXD),
+ IMX_PINCTRL_PIN(MX25_PAD_UART1_TXD),
+ IMX_PINCTRL_PIN(MX25_PAD_UART1_RTS),
+ IMX_PINCTRL_PIN(MX25_PAD_UART1_CTS),
+ IMX_PINCTRL_PIN(MX25_PAD_UART2_RXD),
+ IMX_PINCTRL_PIN(MX25_PAD_UART2_TXD),
+ IMX_PINCTRL_PIN(MX25_PAD_UART2_RTS),
+ IMX_PINCTRL_PIN(MX25_PAD_UART2_CTS),
+ IMX_PINCTRL_PIN(MX25_PAD_SD1_CMD),
+ IMX_PINCTRL_PIN(MX25_PAD_SD1_CLK),
+ IMX_PINCTRL_PIN(MX25_PAD_SD1_DATA0),
+ IMX_PINCTRL_PIN(MX25_PAD_SD1_DATA1),
+ IMX_PINCTRL_PIN(MX25_PAD_SD1_DATA2),
+ IMX_PINCTRL_PIN(MX25_PAD_SD1_DATA3),
+ IMX_PINCTRL_PIN(MX25_PAD_KPP_ROW0),
+ IMX_PINCTRL_PIN(MX25_PAD_KPP_ROW1),
+ IMX_PINCTRL_PIN(MX25_PAD_KPP_ROW2),
+ IMX_PINCTRL_PIN(MX25_PAD_KPP_ROW3),
+ IMX_PINCTRL_PIN(MX25_PAD_KPP_COL0),
+ IMX_PINCTRL_PIN(MX25_PAD_KPP_COL1),
+ IMX_PINCTRL_PIN(MX25_PAD_KPP_COL2),
+ IMX_PINCTRL_PIN(MX25_PAD_KPP_COL3),
+ IMX_PINCTRL_PIN(MX25_PAD_FEC_MDC),
+ IMX_PINCTRL_PIN(MX25_PAD_FEC_MDIO),
+ IMX_PINCTRL_PIN(MX25_PAD_FEC_TDATA0),
+ IMX_PINCTRL_PIN(MX25_PAD_FEC_TDATA1),
+ IMX_PINCTRL_PIN(MX25_PAD_FEC_TX_EN),
+ IMX_PINCTRL_PIN(MX25_PAD_FEC_RDATA0),
+ IMX_PINCTRL_PIN(MX25_PAD_FEC_RDATA1),
+ IMX_PINCTRL_PIN(MX25_PAD_FEC_RX_DV),
+ IMX_PINCTRL_PIN(MX25_PAD_FEC_TX_CLK),
+ IMX_PINCTRL_PIN(MX25_PAD_RTCK),
+ IMX_PINCTRL_PIN(MX25_PAD_DE_B),
+ IMX_PINCTRL_PIN(MX25_PAD_GPIO_A),
+ IMX_PINCTRL_PIN(MX25_PAD_GPIO_B),
+ IMX_PINCTRL_PIN(MX25_PAD_GPIO_C),
+ IMX_PINCTRL_PIN(MX25_PAD_GPIO_D),
+ IMX_PINCTRL_PIN(MX25_PAD_GPIO_E),
+ IMX_PINCTRL_PIN(MX25_PAD_GPIO_F),
+ IMX_PINCTRL_PIN(MX25_PAD_EXT_ARMCLK),
+ IMX_PINCTRL_PIN(MX25_PAD_UPLL_BYPCLK),
+ IMX_PINCTRL_PIN(MX25_PAD_VSTBY_REQ),
+ IMX_PINCTRL_PIN(MX25_PAD_VSTBY_ACK),
+ IMX_PINCTRL_PIN(MX25_PAD_POWER_FAIL),
+ IMX_PINCTRL_PIN(MX25_PAD_CLKO),
+ IMX_PINCTRL_PIN(MX25_PAD_BOOT_MODE0),
+ IMX_PINCTRL_PIN(MX25_PAD_BOOT_MODE1),
+};
+
+static struct imx_pinctrl_soc_info imx25_pinctrl_info = {
+ .pins = imx25_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx25_pinctrl_pads),
+};
+
+static struct of_device_id imx25_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx25-iomuxc", },
+ { /* sentinel */ }
+};
+
+static int imx25_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx_pinctrl_probe(pdev, &imx25_pinctrl_info);
+}
+
+static struct platform_driver imx25_pinctrl_driver = {
+ .driver = {
+ .name = "imx25-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(imx25_pinctrl_of_match),
+ },
+ .probe = imx25_pinctrl_probe,
+ .remove = imx_pinctrl_remove,
+};
+
+static int __init imx25_pinctrl_init(void)
+{
+ return platform_driver_register(&imx25_pinctrl_driver);
+}
+arch_initcall(imx25_pinctrl_init);
+
+static void __exit imx25_pinctrl_exit(void)
+{
+ platform_driver_unregister(&imx25_pinctrl_driver);
+}
+module_exit(imx25_pinctrl_exit);
+MODULE_AUTHOR("Denis Carikli <denis@eukrea.com>");
+MODULE_DESCRIPTION("Freescale IMX25 pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-msm.c b/drivers/pinctrl/pinctrl-msm.c
new file mode 100644
index 000000000000..ef2bf3126da6
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-msm.c
@@ -0,0 +1,990 @@
+/*
+ * Copyright (c) 2013, Sony Mobile Communications AB.
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+#include "core.h"
+#include "pinconf.h"
+#include "pinctrl-msm.h"
+#include "pinctrl-utils.h"
+
+#define MAX_NR_GPIO 300
+
+/**
+ * struct msm_pinctrl - state for a pinctrl-msm device
+ * @dev: device handle.
+ * @pctrl: pinctrl handle.
+ * @domain: irqdomain handle.
+ * @chip: gpiochip handle.
+ * @irq: parent irq for the TLMM irq_chip.
+ * @lock: Spinlock to protect register resources as well
+ * as msm_pinctrl data structures.
+ * @enabled_irqs: Bitmap of currently enabled irqs.
+ * @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
+ * detection.
+ * @wake_irqs: Bitmap of irqs with requested as wakeup source.
+ * @soc; Reference to soc_data of platform specific data.
+ * @regs: Base address for the TLMM register map.
+ */
+struct msm_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pctrl;
+ struct irq_domain *domain;
+ struct gpio_chip chip;
+ int irq;
+
+ spinlock_t lock;
+
+ DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
+ DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
+ DECLARE_BITMAP(wake_irqs, MAX_NR_GPIO);
+
+ const struct msm_pinctrl_soc_data *soc;
+ void __iomem *regs;
+};
+
+static int msm_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->soc->ngroups;
+}
+
+static const char *msm_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->soc->groups[group].name;
+}
+
+static int msm_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = pctrl->soc->groups[group].pins;
+ *num_pins = pctrl->soc->groups[group].npins;
+ return 0;
+}
+
+static const struct pinctrl_ops msm_pinctrl_ops = {
+ .get_groups_count = msm_get_groups_count,
+ .get_group_name = msm_get_group_name,
+ .get_group_pins = msm_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int msm_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->soc->nfunctions;
+}
+
+static const char *msm_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned function)
+{
+ struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->soc->functions[function].name;
+}
+
+static int msm_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned function,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctrl->soc->functions[function].groups;
+ *num_groups = pctrl->soc->functions[function].ngroups;
+ return 0;
+}
+
+static int msm_pinmux_enable(struct pinctrl_dev *pctldev,
+ unsigned function,
+ unsigned group)
+{
+ struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct msm_pingroup *g;
+ unsigned long flags;
+ u32 val;
+ int i;
+
+ g = &pctrl->soc->groups[group];
+
+ if (WARN_ON(g->mux_bit < 0))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(g->funcs); i++) {
+ if (g->funcs[i] == function)
+ break;
+ }
+
+ if (WARN_ON(i == ARRAY_SIZE(g->funcs)))
+ return -EINVAL;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ val = readl(pctrl->regs + g->ctl_reg);
+ val &= ~(0x7 << g->mux_bit);
+ val |= i << g->mux_bit;
+ writel(val, pctrl->regs + g->ctl_reg);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static void msm_pinmux_disable(struct pinctrl_dev *pctldev,
+ unsigned function,
+ unsigned group)
+{
+ struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct msm_pingroup *g;
+ unsigned long flags;
+ u32 val;
+
+ g = &pctrl->soc->groups[group];
+
+ if (WARN_ON(g->mux_bit < 0))
+ return;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ /* Clear the mux bits to select gpio mode */
+ val = readl(pctrl->regs + g->ctl_reg);
+ val &= ~(0x7 << g->mux_bit);
+ writel(val, pctrl->regs + g->ctl_reg);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static const struct pinmux_ops msm_pinmux_ops = {
+ .get_functions_count = msm_get_functions_count,
+ .get_function_name = msm_get_function_name,
+ .get_function_groups = msm_get_function_groups,
+ .enable = msm_pinmux_enable,
+ .disable = msm_pinmux_disable,
+};
+
+static int msm_config_reg(struct msm_pinctrl *pctrl,
+ const struct msm_pingroup *g,
+ unsigned param,
+ s16 *reg,
+ unsigned *mask,
+ unsigned *bit)
+{
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ *reg = g->ctl_reg;
+ *bit = g->pull_bit;
+ *mask = 3;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ *reg = g->ctl_reg;
+ *bit = g->pull_bit;
+ *mask = 3;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ *reg = g->ctl_reg;
+ *bit = g->pull_bit;
+ *mask = 3;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ *reg = g->ctl_reg;
+ *bit = g->drv_bit;
+ *mask = 7;
+ break;
+ default:
+ dev_err(pctrl->dev, "Invalid config param %04x\n", param);
+ return -ENOTSUPP;
+ }
+
+ if (*reg < 0) {
+ dev_err(pctrl->dev, "Config param %04x not supported on group %s\n",
+ param, g->name);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int msm_config_get(struct pinctrl_dev *pctldev,
+ unsigned int pin,
+ unsigned long *config)
+{
+ dev_err(pctldev->dev, "pin_config_set op not supported\n");
+ return -ENOTSUPP;
+}
+
+static int msm_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned num_configs)
+{
+ dev_err(pctldev->dev, "pin_config_set op not supported\n");
+ return -ENOTSUPP;
+}
+
+#define MSM_NO_PULL 0
+#define MSM_PULL_DOWN 1
+#define MSM_PULL_UP 3
+
+static const unsigned msm_regval_to_drive[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
+static const unsigned msm_drive_to_regval[] = { -1, -1, 0, -1, 1, -1, 2, -1, 3, -1, 4, -1, 5, -1, 6, -1, 7 };
+
+static int msm_config_group_get(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ unsigned long *config)
+{
+ const struct msm_pingroup *g;
+ struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned param = pinconf_to_config_param(*config);
+ unsigned mask;
+ unsigned arg;
+ unsigned bit;
+ s16 reg;
+ int ret;
+ u32 val;
+
+ g = &pctrl->soc->groups[group];
+
+ ret = msm_config_reg(pctrl, g, param, &reg, &mask, &bit);
+ if (ret < 0)
+ return ret;
+
+ val = readl(pctrl->regs + reg);
+ arg = (val >> bit) & mask;
+
+ /* Convert register value to pinconf value */
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ arg = arg == MSM_NO_PULL;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ arg = arg == MSM_PULL_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = arg == MSM_PULL_UP;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ arg = msm_regval_to_drive[arg];
+ break;
+ default:
+ dev_err(pctrl->dev, "Unsupported config parameter: %x\n",
+ param);
+ return -EINVAL;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int msm_config_group_set(struct pinctrl_dev *pctldev,
+ unsigned group,
+ unsigned long *configs,
+ unsigned num_configs)
+{
+ const struct msm_pingroup *g;
+ struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned long flags;
+ unsigned param;
+ unsigned mask;
+ unsigned arg;
+ unsigned bit;
+ s16 reg;
+ int ret;
+ u32 val;
+ int i;
+
+ g = &pctrl->soc->groups[group];
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ ret = msm_config_reg(pctrl, g, param, &reg, &mask, &bit);
+ if (ret < 0)
+ return ret;
+
+ /* Convert pinconf values to register values */
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ arg = MSM_NO_PULL;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ arg = MSM_PULL_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = MSM_PULL_UP;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ /* Check for invalid values */
+ if (arg >= ARRAY_SIZE(msm_drive_to_regval))
+ arg = -1;
+ else
+ arg = msm_drive_to_regval[arg];
+ break;
+ default:
+ dev_err(pctrl->dev, "Unsupported config parameter: %x\n",
+ param);
+ return -EINVAL;
+ }
+
+ /* Range-check user-supplied value */
+ if (arg & ~mask) {
+ dev_err(pctrl->dev, "config %x: %x is invalid\n", param, arg);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+ val = readl(pctrl->regs + reg);
+ val &= ~(mask << bit);
+ val |= arg << bit;
+ writel(val, pctrl->regs + reg);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops msm_pinconf_ops = {
+ .pin_config_get = msm_config_get,
+ .pin_config_set = msm_config_set,
+ .pin_config_group_get = msm_config_group_get,
+ .pin_config_group_set = msm_config_group_set,
+};
+
+static struct pinctrl_desc msm_pinctrl_desc = {
+ .pctlops = &msm_pinctrl_ops,
+ .pmxops = &msm_pinmux_ops,
+ .confops = &msm_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ const struct msm_pingroup *g;
+ struct msm_pinctrl *pctrl = container_of(chip, struct msm_pinctrl, chip);
+ unsigned long flags;
+ u32 val;
+
+ g = &pctrl->soc->groups[offset];
+ if (WARN_ON(g->io_reg < 0))
+ return -EINVAL;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ val = readl(pctrl->regs + g->ctl_reg);
+ val &= ~BIT(g->oe_bit);
+ writel(val, pctrl->regs + g->ctl_reg);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value)
+{
+ const struct msm_pingroup *g;
+ struct msm_pinctrl *pctrl = container_of(chip, struct msm_pinctrl, chip);
+ unsigned long flags;
+ u32 val;
+
+ g = &pctrl->soc->groups[offset];
+ if (WARN_ON(g->io_reg < 0))
+ return -EINVAL;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ val = readl(pctrl->regs + g->io_reg);
+ if (value)
+ val |= BIT(g->out_bit);
+ else
+ val &= ~BIT(g->out_bit);
+ writel(val, pctrl->regs + g->io_reg);
+
+ val = readl(pctrl->regs + g->ctl_reg);
+ val |= BIT(g->oe_bit);
+ writel(val, pctrl->regs + g->ctl_reg);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ const struct msm_pingroup *g;
+ struct msm_pinctrl *pctrl = container_of(chip, struct msm_pinctrl, chip);
+ u32 val;
+
+ g = &pctrl->soc->groups[offset];
+ if (WARN_ON(g->io_reg < 0))
+ return -EINVAL;
+
+ val = readl(pctrl->regs + g->io_reg);
+ return !!(val & BIT(g->in_bit));
+}
+
+static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ const struct msm_pingroup *g;
+ struct msm_pinctrl *pctrl = container_of(chip, struct msm_pinctrl, chip);
+ unsigned long flags;
+ u32 val;
+
+ g = &pctrl->soc->groups[offset];
+ if (WARN_ON(g->io_reg < 0))
+ return;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ val = readl(pctrl->regs + g->io_reg);
+ if (value)
+ val |= BIT(g->out_bit);
+ else
+ val &= ~BIT(g->out_bit);
+ writel(val, pctrl->regs + g->io_reg);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct msm_pinctrl *pctrl = container_of(chip, struct msm_pinctrl, chip);
+
+ return irq_find_mapping(pctrl->domain, offset);
+}
+
+static int msm_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ int gpio = chip->base + offset;
+ return pinctrl_request_gpio(gpio);
+}
+
+static void msm_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ int gpio = chip->base + offset;
+ return pinctrl_free_gpio(gpio);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+static void msm_gpio_dbg_show_one(struct seq_file *s,
+ struct pinctrl_dev *pctldev,
+ struct gpio_chip *chip,
+ unsigned offset,
+ unsigned gpio)
+{
+ const struct msm_pingroup *g;
+ struct msm_pinctrl *pctrl = container_of(chip, struct msm_pinctrl, chip);
+ unsigned func;
+ int is_out;
+ int drive;
+ int pull;
+ u32 ctl_reg;
+
+ static const char * const pulls[] = {
+ "no pull",
+ "pull down",
+ "keeper",
+ "pull up"
+ };
+
+ g = &pctrl->soc->groups[offset];
+ ctl_reg = readl(pctrl->regs + g->ctl_reg);
+
+ is_out = !!(ctl_reg & BIT(g->oe_bit));
+ func = (ctl_reg >> g->mux_bit) & 7;
+ drive = (ctl_reg >> g->drv_bit) & 7;
+ pull = (ctl_reg >> g->pull_bit) & 3;
+
+ seq_printf(s, " %-8s: %-3s %d", g->name, is_out ? "out" : "in", func);
+ seq_printf(s, " %dmA", msm_regval_to_drive[drive]);
+ seq_printf(s, " %s", pulls[pull]);
+}
+
+static void msm_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ unsigned gpio = chip->base;
+ unsigned i;
+
+ for (i = 0; i < chip->ngpio; i++, gpio++) {
+ msm_gpio_dbg_show_one(s, NULL, chip, i, gpio);
+ seq_puts(s, "\n");
+ }
+}
+
+#else
+#define msm_gpio_dbg_show NULL
+#endif
+
+static struct gpio_chip msm_gpio_template = {
+ .direction_input = msm_gpio_direction_input,
+ .direction_output = msm_gpio_direction_output,
+ .get = msm_gpio_get,
+ .set = msm_gpio_set,
+ .to_irq = msm_gpio_to_irq,
+ .request = msm_gpio_request,
+ .free = msm_gpio_free,
+ .dbg_show = msm_gpio_dbg_show,
+};
+
+/* For dual-edge interrupts in software, since some hardware has no
+ * such support:
+ *
+ * At appropriate moments, this function may be called to flip the polarity
+ * settings of both-edge irq lines to try and catch the next edge.
+ *
+ * The attempt is considered successful if:
+ * - the status bit goes high, indicating that an edge was caught, or
+ * - the input value of the gpio doesn't change during the attempt.
+ * If the value changes twice during the process, that would cause the first
+ * test to fail but would force the second, as two opposite
+ * transitions would cause a detection no matter the polarity setting.
+ *
+ * The do-loop tries to sledge-hammer closed the timing hole between
+ * the initial value-read and the polarity-write - if the line value changes
+ * during that window, an interrupt is lost, the new polarity setting is
+ * incorrect, and the first success test will fail, causing a retry.
+ *
+ * Algorithm comes from Google's msmgpio driver.
+ */
+static void msm_gpio_update_dual_edge_pos(struct msm_pinctrl *pctrl,
+ const struct msm_pingroup *g,
+ struct irq_data *d)
+{
+ int loop_limit = 100;
+ unsigned val, val2, intstat;
+ unsigned pol;
+
+ do {
+ val = readl(pctrl->regs + g->io_reg) & BIT(g->in_bit);
+
+ pol = readl(pctrl->regs + g->intr_cfg_reg);
+ pol ^= BIT(g->intr_polarity_bit);
+ writel(pol, pctrl->regs + g->intr_cfg_reg);
+
+ val2 = readl(pctrl->regs + g->io_reg) & BIT(g->in_bit);
+ intstat = readl(pctrl->regs + g->intr_status_reg);
+ if (intstat || (val == val2))
+ return;
+ } while (loop_limit-- > 0);
+ dev_err(pctrl->dev, "dual-edge irq failed to stabilize, %#08x != %#08x\n",
+ val, val2);
+}
+
+static void msm_gpio_irq_mask(struct irq_data *d)
+{
+ const struct msm_pingroup *g;
+ struct msm_pinctrl *pctrl;
+ unsigned long flags;
+ u32 val;
+
+ pctrl = irq_data_get_irq_chip_data(d);
+ g = &pctrl->soc->groups[d->hwirq];
+ if (WARN_ON(g->intr_cfg_reg < 0))
+ return;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ val = readl(pctrl->regs + g->intr_cfg_reg);
+ val &= ~BIT(g->intr_enable_bit);
+ writel(val, pctrl->regs + g->intr_cfg_reg);
+
+ clear_bit(d->hwirq, pctrl->enabled_irqs);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static void msm_gpio_irq_unmask(struct irq_data *d)
+{
+ const struct msm_pingroup *g;
+ struct msm_pinctrl *pctrl;
+ unsigned long flags;
+ u32 val;
+
+ pctrl = irq_data_get_irq_chip_data(d);
+ g = &pctrl->soc->groups[d->hwirq];
+ if (WARN_ON(g->intr_status_reg < 0))
+ return;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ val = readl(pctrl->regs + g->intr_status_reg);
+ val &= ~BIT(g->intr_status_bit);
+ writel(val, pctrl->regs + g->intr_status_reg);
+
+ val = readl(pctrl->regs + g->intr_cfg_reg);
+ val |= BIT(g->intr_enable_bit);
+ writel(val, pctrl->regs + g->intr_cfg_reg);
+
+ set_bit(d->hwirq, pctrl->enabled_irqs);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static void msm_gpio_irq_ack(struct irq_data *d)
+{
+ const struct msm_pingroup *g;
+ struct msm_pinctrl *pctrl;
+ unsigned long flags;
+ u32 val;
+
+ pctrl = irq_data_get_irq_chip_data(d);
+ g = &pctrl->soc->groups[d->hwirq];
+ if (WARN_ON(g->intr_status_reg < 0))
+ return;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ val = readl(pctrl->regs + g->intr_status_reg);
+ val &= ~BIT(g->intr_status_bit);
+ writel(val, pctrl->regs + g->intr_status_reg);
+
+ if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
+ msm_gpio_update_dual_edge_pos(pctrl, g, d);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+#define INTR_TARGET_PROC_APPS 4
+
+static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ const struct msm_pingroup *g;
+ struct msm_pinctrl *pctrl;
+ unsigned long flags;
+ u32 val;
+
+ pctrl = irq_data_get_irq_chip_data(d);
+ g = &pctrl->soc->groups[d->hwirq];
+ if (WARN_ON(g->intr_cfg_reg < 0))
+ return -EINVAL;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ /*
+ * For hw without possibility of detecting both edges
+ */
+ if (g->intr_detection_width == 1 && type == IRQ_TYPE_EDGE_BOTH)
+ set_bit(d->hwirq, pctrl->dual_edge_irqs);
+ else
+ clear_bit(d->hwirq, pctrl->dual_edge_irqs);
+
+ /* Route interrupts to application cpu */
+ val = readl(pctrl->regs + g->intr_target_reg);
+ val &= ~(7 << g->intr_target_bit);
+ val |= INTR_TARGET_PROC_APPS << g->intr_target_bit;
+ writel(val, pctrl->regs + g->intr_target_reg);
+
+ /* Update configuration for gpio.
+ * RAW_STATUS_EN is left on for all gpio irqs. Due to the
+ * internal circuitry of TLMM, toggling the RAW_STATUS
+ * could cause the INTR_STATUS to be set for EDGE interrupts.
+ */
+ val = readl(pctrl->regs + g->intr_cfg_reg);
+ val |= BIT(g->intr_raw_status_bit);
+ if (g->intr_detection_width == 2) {
+ val &= ~(3 << g->intr_detection_bit);
+ val &= ~(1 << g->intr_polarity_bit);
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ val |= 1 << g->intr_detection_bit;
+ val |= BIT(g->intr_polarity_bit);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val |= 2 << g->intr_detection_bit;
+ val |= BIT(g->intr_polarity_bit);
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ val |= 3 << g->intr_detection_bit;
+ val |= BIT(g->intr_polarity_bit);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ val |= BIT(g->intr_polarity_bit);
+ break;
+ }
+ } else if (g->intr_detection_width == 1) {
+ val &= ~(1 << g->intr_detection_bit);
+ val &= ~(1 << g->intr_polarity_bit);
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ val |= BIT(g->intr_detection_bit);
+ val |= BIT(g->intr_polarity_bit);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val |= BIT(g->intr_detection_bit);
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ val |= BIT(g->intr_detection_bit);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ val |= BIT(g->intr_polarity_bit);
+ break;
+ }
+ } else {
+ BUG();
+ }
+ writel(val, pctrl->regs + g->intr_cfg_reg);
+
+ if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
+ msm_gpio_update_dual_edge_pos(pctrl, g, d);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+
+ return 0;
+}
+
+static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct msm_pinctrl *pctrl;
+ unsigned long flags;
+ unsigned ngpio;
+
+ pctrl = irq_data_get_irq_chip_data(d);
+ ngpio = pctrl->chip.ngpio;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ if (on) {
+ if (bitmap_empty(pctrl->wake_irqs, ngpio))
+ enable_irq_wake(pctrl->irq);
+ set_bit(d->hwirq, pctrl->wake_irqs);
+ } else {
+ clear_bit(d->hwirq, pctrl->wake_irqs);
+ if (bitmap_empty(pctrl->wake_irqs, ngpio))
+ disable_irq_wake(pctrl->irq);
+ }
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static unsigned int msm_gpio_irq_startup(struct irq_data *d)
+{
+ struct msm_pinctrl *pctrl = irq_data_get_irq_chip_data(d);
+
+ if (gpio_lock_as_irq(&pctrl->chip, d->hwirq)) {
+ dev_err(pctrl->dev, "unable to lock HW IRQ %lu for IRQ\n",
+ d->hwirq);
+ }
+ msm_gpio_irq_unmask(d);
+ return 0;
+}
+
+static void msm_gpio_irq_shutdown(struct irq_data *d)
+{
+ struct msm_pinctrl *pctrl = irq_data_get_irq_chip_data(d);
+
+ msm_gpio_irq_mask(d);
+ gpio_unlock_as_irq(&pctrl->chip, d->hwirq);
+}
+
+static struct irq_chip msm_gpio_irq_chip = {
+ .name = "msmgpio",
+ .irq_mask = msm_gpio_irq_mask,
+ .irq_unmask = msm_gpio_irq_unmask,
+ .irq_ack = msm_gpio_irq_ack,
+ .irq_set_type = msm_gpio_irq_set_type,
+ .irq_set_wake = msm_gpio_irq_set_wake,
+ .irq_startup = msm_gpio_irq_startup,
+ .irq_shutdown = msm_gpio_irq_shutdown,
+};
+
+static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ const struct msm_pingroup *g;
+ struct msm_pinctrl *pctrl = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_get_chip(irq);
+ int irq_pin;
+ int handled = 0;
+ u32 val;
+ int i;
+
+ chained_irq_enter(chip, desc);
+
+ /*
+ * Each pin has it's own IRQ status register, so use
+ * enabled_irq bitmap to limit the number of reads.
+ */
+ for_each_set_bit(i, pctrl->enabled_irqs, pctrl->chip.ngpio) {
+ g = &pctrl->soc->groups[i];
+ val = readl(pctrl->regs + g->intr_status_reg);
+ if (val & BIT(g->intr_status_bit)) {
+ irq_pin = irq_find_mapping(pctrl->domain, i);
+ generic_handle_irq(irq_pin);
+ handled++;
+ }
+ }
+
+ /* No interrupts were flagged */
+ if (handled == 0)
+ handle_bad_irq(irq, desc);
+
+ chained_irq_exit(chip, desc);
+}
+
+static int msm_gpio_init(struct msm_pinctrl *pctrl)
+{
+ struct gpio_chip *chip;
+ int irq;
+ int ret;
+ int i;
+ int r;
+
+ chip = &pctrl->chip;
+ chip->base = 0;
+ chip->ngpio = pctrl->soc->ngpios;
+ chip->label = dev_name(pctrl->dev);
+ chip->dev = pctrl->dev;
+ chip->owner = THIS_MODULE;
+ chip->of_node = pctrl->dev->of_node;
+
+ ret = gpiochip_add(&pctrl->chip);
+ if (ret) {
+ dev_err(pctrl->dev, "Failed register gpiochip\n");
+ return ret;
+ }
+
+ ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), 0, 0, chip->ngpio);
+ if (ret) {
+ dev_err(pctrl->dev, "Failed to add pin range\n");
+ return ret;
+ }
+
+ pctrl->domain = irq_domain_add_linear(pctrl->dev->of_node, chip->ngpio,
+ &irq_domain_simple_ops, NULL);
+ if (!pctrl->domain) {
+ dev_err(pctrl->dev, "Failed to register irq domain\n");
+ r = gpiochip_remove(&pctrl->chip);
+ return -ENOSYS;
+ }
+
+ for (i = 0; i < chip->ngpio; i++) {
+ irq = irq_create_mapping(pctrl->domain, i);
+ irq_set_chip_and_handler(irq, &msm_gpio_irq_chip, handle_edge_irq);
+ irq_set_chip_data(irq, pctrl);
+ }
+
+ irq_set_handler_data(pctrl->irq, pctrl);
+ irq_set_chained_handler(pctrl->irq, msm_gpio_irq_handler);
+
+ return 0;
+}
+
+int msm_pinctrl_probe(struct platform_device *pdev,
+ const struct msm_pinctrl_soc_data *soc_data)
+{
+ struct msm_pinctrl *pctrl;
+ struct resource *res;
+ int ret;
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl) {
+ dev_err(&pdev->dev, "Can't allocate msm_pinctrl\n");
+ return -ENOMEM;
+ }
+ pctrl->dev = &pdev->dev;
+ pctrl->soc = soc_data;
+ pctrl->chip = msm_gpio_template;
+
+ spin_lock_init(&pctrl->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pctrl->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pctrl->regs))
+ return PTR_ERR(pctrl->regs);
+
+ pctrl->irq = platform_get_irq(pdev, 0);
+ if (pctrl->irq < 0) {
+ dev_err(&pdev->dev, "No interrupt defined for msmgpio\n");
+ return pctrl->irq;
+ }
+
+ msm_pinctrl_desc.name = dev_name(&pdev->dev);
+ msm_pinctrl_desc.pins = pctrl->soc->pins;
+ msm_pinctrl_desc.npins = pctrl->soc->npins;
+ pctrl->pctrl = pinctrl_register(&msm_pinctrl_desc, &pdev->dev, pctrl);
+ if (!pctrl->pctrl) {
+ dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
+ return -ENODEV;
+ }
+
+ ret = msm_gpio_init(pctrl);
+ if (ret) {
+ pinctrl_unregister(pctrl->pctrl);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pctrl);
+
+ dev_dbg(&pdev->dev, "Probed Qualcomm pinctrl driver\n");
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_pinctrl_probe);
+
+int msm_pinctrl_remove(struct platform_device *pdev)
+{
+ struct msm_pinctrl *pctrl = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&pctrl->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to remove gpiochip\n");
+ return ret;
+ }
+
+ irq_set_chained_handler(pctrl->irq, NULL);
+ irq_domain_remove(pctrl->domain);
+ pinctrl_unregister(pctrl->pctrl);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_pinctrl_remove);
+
diff --git a/drivers/pinctrl/pinctrl-msm.h b/drivers/pinctrl/pinctrl-msm.h
new file mode 100644
index 000000000000..206e782e2daa
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-msm.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2013, Sony Mobile Communications AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __PINCTRL_MSM_H__
+#define __PINCTRL_MSM_H__
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/machine.h>
+
+/**
+ * struct msm_function - a pinmux function
+ * @name: Name of the pinmux function.
+ * @groups: List of pingroups for this function.
+ * @ngroups: Number of entries in @groups.
+ */
+struct msm_function {
+ const char *name;
+ const char * const *groups;
+ unsigned ngroups;
+};
+
+/**
+ * struct msm_pingroup - Qualcomm pingroup definition
+ * @name: Name of the pingroup.
+ * @pins: A list of pins assigned to this pingroup.
+ * @npins: Number of entries in @pins.
+ * @funcs: A list of pinmux functions that can be selected for
+ * this group. The index of the selected function is used
+ * for programming the function selector.
+ * Entries should be indices into the groups list of the
+ * struct msm_pinctrl_soc_data.
+ * @ctl_reg: Offset of the register holding control bits for this group.
+ * @io_reg: Offset of the register holding input/output bits for this group.
+ * @intr_cfg_reg: Offset of the register holding interrupt configuration bits.
+ * @intr_status_reg: Offset of the register holding the status bits for this group.
+ * @intr_target_reg: Offset of the register specifying routing of the interrupts
+ * from this group.
+ * @mux_bit: Offset in @ctl_reg for the pinmux function selection.
+ * @pull_bit: Offset in @ctl_reg for the bias configuration.
+ * @drv_bit: Offset in @ctl_reg for the drive strength configuration.
+ * @oe_bit: Offset in @ctl_reg for controlling output enable.
+ * @in_bit: Offset in @io_reg for the input bit value.
+ * @out_bit: Offset in @io_reg for the output bit value.
+ * @intr_enable_bit: Offset in @intr_cfg_reg for enabling the interrupt for this group.
+ * @intr_status_bit: Offset in @intr_status_reg for reading and acking the interrupt
+ * status.
+ * @intr_target_bit: Offset in @intr_target_reg for configuring the interrupt routing.
+ * @intr_raw_status_bit: Offset in @intr_cfg_reg for the raw status bit.
+ * @intr_polarity_bit: Offset in @intr_cfg_reg for specifying polarity of the interrupt.
+ * @intr_detection_bit: Offset in @intr_cfg_reg for specifying interrupt type.
+ * @intr_detection_width: Number of bits used for specifying interrupt type,
+ * Should be 2 for SoCs that can detect both edges in hardware,
+ * otherwise 1.
+ */
+struct msm_pingroup {
+ const char *name;
+ const unsigned *pins;
+ unsigned npins;
+
+ unsigned funcs[8];
+
+ s16 ctl_reg;
+ s16 io_reg;
+ s16 intr_cfg_reg;
+ s16 intr_status_reg;
+ s16 intr_target_reg;
+
+ unsigned mux_bit:5;
+
+ unsigned pull_bit:5;
+ unsigned drv_bit:5;
+
+ unsigned oe_bit:5;
+ unsigned in_bit:5;
+ unsigned out_bit:5;
+
+ unsigned intr_enable_bit:5;
+ unsigned intr_status_bit:5;
+
+ unsigned intr_target_bit:5;
+ unsigned intr_raw_status_bit:5;
+ unsigned intr_polarity_bit:5;
+ unsigned intr_detection_bit:5;
+ unsigned intr_detection_width:5;
+};
+
+/**
+ * struct msm_pinctrl_soc_data - Qualcomm pin controller driver configuration
+ * @pins: An array describing all pins the pin controller affects.
+ * @npins: The number of entries in @pins.
+ * @functions: An array describing all mux functions the SoC supports.
+ * @nfunctions: The number of entries in @functions.
+ * @groups: An array describing all pin groups the pin SoC supports.
+ * @ngroups: The numbmer of entries in @groups.
+ * @ngpio: The number of pingroups the driver should expose as GPIOs.
+ */
+struct msm_pinctrl_soc_data {
+ const struct pinctrl_pin_desc *pins;
+ unsigned npins;
+ const struct msm_function *functions;
+ unsigned nfunctions;
+ const struct msm_pingroup *groups;
+ unsigned ngroups;
+ unsigned ngpios;
+};
+
+int msm_pinctrl_probe(struct platform_device *pdev,
+ const struct msm_pinctrl_soc_data *soc_data);
+int msm_pinctrl_remove(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/pinctrl/pinctrl-msm8x74.c b/drivers/pinctrl/pinctrl-msm8x74.c
new file mode 100644
index 000000000000..f944bf2172ef
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-msm8x74.c
@@ -0,0 +1,636 @@
+/*
+ * Copyright (c) 2013, Sony Mobile Communications AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "pinctrl-msm.h"
+
+static const struct pinctrl_pin_desc msm8x74_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+
+ PINCTRL_PIN(146, "SDC1_CLK"),
+ PINCTRL_PIN(147, "SDC1_CMD"),
+ PINCTRL_PIN(148, "SDC1_DATA"),
+ PINCTRL_PIN(149, "SDC2_CLK"),
+ PINCTRL_PIN(150, "SDC2_CMD"),
+ PINCTRL_PIN(151, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+
+static const unsigned int sdc1_clk_pins[] = { 146 };
+static const unsigned int sdc1_cmd_pins[] = { 147 };
+static const unsigned int sdc1_data_pins[] = { 148 };
+static const unsigned int sdc2_clk_pins[] = { 149 };
+static const unsigned int sdc2_cmd_pins[] = { 150 };
+static const unsigned int sdc2_data_pins[] = { 151 };
+
+#define FUNCTION(fname) \
+ [MSM_MUX_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = { \
+ MSM_MUX_NA, /* gpio mode */ \
+ MSM_MUX_##f1, \
+ MSM_MUX_##f2, \
+ MSM_MUX_##f3, \
+ MSM_MUX_##f4, \
+ MSM_MUX_##f5, \
+ MSM_MUX_##f6, \
+ MSM_MUX_##f7 \
+ }, \
+ .ctl_reg = 0x1000 + 0x10 * id, \
+ .io_reg = 0x1004 + 0x10 * id, \
+ .intr_cfg_reg = 0x1008 + 0x10 * id, \
+ .intr_status_reg = 0x100c + 0x10 * id, \
+ .intr_target_reg = 0x1008 + 0x10 * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+/*
+ * TODO: Add the rest of the possible functions and fill out
+ * the pingroup table below.
+ */
+enum msm8x74_functions {
+ MSM_MUX_blsp_i2c2,
+ MSM_MUX_blsp_i2c6,
+ MSM_MUX_blsp_i2c11,
+ MSM_MUX_blsp_spi1,
+ MSM_MUX_blsp_uart2,
+ MSM_MUX_blsp_uart8,
+ MSM_MUX_slimbus,
+ MSM_MUX_NA,
+};
+
+static const char * const blsp_i2c2_groups[] = { "gpio6", "gpio7" };
+static const char * const blsp_i2c6_groups[] = { "gpio29", "gpio30" };
+static const char * const blsp_i2c11_groups[] = { "gpio83", "gpio84" };
+static const char * const blsp_spi1_groups[] = { "gpio0", "gpio1", "gpio2", "gpio3" };
+static const char * const blsp_uart2_groups[] = { "gpio4", "gpio5" };
+static const char * const blsp_uart8_groups[] = { "gpio45", "gpio46" };
+static const char * const slimbus_groups[] = { "gpio70", "gpio71" };
+
+static const struct msm_function msm8x74_functions[] = {
+ FUNCTION(blsp_i2c2),
+ FUNCTION(blsp_i2c6),
+ FUNCTION(blsp_i2c11),
+ FUNCTION(blsp_spi1),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_uart8),
+ FUNCTION(slimbus),
+};
+
+static const struct msm_pingroup msm8x74_groups[] = {
+ PINGROUP(0, blsp_spi1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(1, blsp_spi1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(2, blsp_spi1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(3, blsp_spi1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(4, NA, blsp_uart2, NA, NA, NA, NA, NA),
+ PINGROUP(5, NA, blsp_uart2, NA, NA, NA, NA, NA),
+ PINGROUP(6, NA, NA, blsp_i2c2, NA, NA, NA, NA),
+ PINGROUP(7, NA, NA, blsp_i2c2, NA, NA, NA, NA),
+ PINGROUP(8, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(9, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(10, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(11, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(12, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(13, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(14, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(15, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(16, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(17, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(18, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(19, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(20, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(21, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(22, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(23, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(24, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(25, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(26, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(27, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(28, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(29, NA, NA, blsp_i2c6, NA, NA, NA, NA),
+ PINGROUP(30, NA, NA, blsp_i2c6, NA, NA, NA, NA),
+ PINGROUP(31, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(32, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(33, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(34, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(35, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(36, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(37, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(38, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(39, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(40, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(41, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(42, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(43, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(44, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(45, NA, blsp_uart8, NA, NA, NA, NA, NA),
+ PINGROUP(46, NA, blsp_uart8, NA, NA, NA, NA, NA),
+ PINGROUP(47, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(48, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(49, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(50, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(51, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(52, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(53, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(54, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(55, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(56, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(57, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(58, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(59, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(60, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(61, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(62, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(63, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(64, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(65, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(66, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(67, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(68, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(69, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(70, slimbus, NA, NA, NA, NA, NA, NA),
+ PINGROUP(71, slimbus, NA, NA, NA, NA, NA, NA),
+ PINGROUP(72, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(73, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(74, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(75, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(76, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(77, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(78, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(79, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(80, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(81, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(82, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(83, NA, NA, blsp_i2c11, NA, NA, NA, NA),
+ PINGROUP(84, NA, NA, blsp_i2c11, NA, NA, NA, NA),
+ PINGROUP(85, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(86, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(87, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(88, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(89, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(90, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(91, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(92, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(93, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(94, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(95, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(96, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(97, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(98, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(99, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(100, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(101, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(102, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(103, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(104, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(105, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(106, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(107, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(108, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(109, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(110, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(111, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(112, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(113, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(114, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(115, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(116, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(117, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(118, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(119, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(120, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(121, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(122, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(123, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(124, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(125, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(126, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(127, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(128, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(129, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(130, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(131, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(132, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(133, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(134, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(135, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(136, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(137, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(138, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(139, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(140, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(141, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(143, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(143, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(144, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(145, NA, NA, NA, NA, NA, NA, NA),
+ SDC_PINGROUP(sdc1_clk, 0x2044, 13, 6),
+ SDC_PINGROUP(sdc1_cmd, 0x2044, 11, 3),
+ SDC_PINGROUP(sdc1_data, 0x2044, 9, 0),
+ SDC_PINGROUP(sdc2_clk, 0x2048, 14, 6),
+ SDC_PINGROUP(sdc2_cmd, 0x2048, 11, 3),
+ SDC_PINGROUP(sdc2_data, 0x2048, 9, 0),
+};
+
+#define NUM_GPIO_PINGROUPS 146
+
+static const struct msm_pinctrl_soc_data msm8x74_pinctrl = {
+ .pins = msm8x74_pins,
+ .npins = ARRAY_SIZE(msm8x74_pins),
+ .functions = msm8x74_functions,
+ .nfunctions = ARRAY_SIZE(msm8x74_functions),
+ .groups = msm8x74_groups,
+ .ngroups = ARRAY_SIZE(msm8x74_groups),
+ .ngpios = NUM_GPIO_PINGROUPS,
+};
+
+static int msm8x74_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &msm8x74_pinctrl);
+}
+
+static const struct of_device_id msm8x74_pinctrl_of_match[] = {
+ { .compatible = "qcom,msm8974-pinctrl", },
+ { },
+};
+
+static struct platform_driver msm8x74_pinctrl_driver = {
+ .driver = {
+ .name = "msm8x74-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = msm8x74_pinctrl_of_match,
+ },
+ .probe = msm8x74_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init msm8x74_pinctrl_init(void)
+{
+ return platform_driver_register(&msm8x74_pinctrl_driver);
+}
+arch_initcall(msm8x74_pinctrl_init);
+
+static void __exit msm8x74_pinctrl_exit(void)
+{
+ platform_driver_unregister(&msm8x74_pinctrl_driver);
+}
+module_exit(msm8x74_pinctrl_exit);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm MSM8x74 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, msm8x74_pinctrl_of_match);
+
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 7111c3b59130..53a11114927f 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -4,7 +4,7 @@
* Copyright (C) 2008,2009 STMicroelectronics
* Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
* Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com>
- * Copyright (C) 2011 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2011-2013 Linus Walleij <linus.walleij@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -33,7 +33,6 @@
#include <linux/pinctrl/pinconf.h>
/* Since we request GPIOs from ourself */
#include <linux/pinctrl/consumer.h>
-#include <linux/platform_data/pinctrl-nomadik.h>
#include "pinctrl-nomadik.h"
#include "core.h"
@@ -45,6 +44,221 @@
* Symbols in this file are called "nmk_gpio" for "nomadik gpio"
*/
+/*
+ * pin configurations are represented by 32-bit integers:
+ *
+ * bit 0.. 8 - Pin Number (512 Pins Maximum)
+ * bit 9..10 - Alternate Function Selection
+ * bit 11..12 - Pull up/down state
+ * bit 13 - Sleep mode behaviour
+ * bit 14 - Direction
+ * bit 15 - Value (if output)
+ * bit 16..18 - SLPM pull up/down state
+ * bit 19..20 - SLPM direction
+ * bit 21..22 - SLPM Value (if output)
+ * bit 23..25 - PDIS value (if input)
+ * bit 26 - Gpio mode
+ * bit 27 - Sleep mode
+ *
+ * to facilitate the definition, the following macros are provided
+ *
+ * PIN_CFG_DEFAULT - default config (0):
+ * pull up/down = disabled
+ * sleep mode = input/wakeup
+ * direction = input
+ * value = low
+ * SLPM direction = same as normal
+ * SLPM pull = same as normal
+ * SLPM value = same as normal
+ *
+ * PIN_CFG - default config with alternate function
+ */
+
+typedef unsigned long pin_cfg_t;
+
+#define PIN_NUM_MASK 0x1ff
+#define PIN_NUM(x) ((x) & PIN_NUM_MASK)
+
+#define PIN_ALT_SHIFT 9
+#define PIN_ALT_MASK (0x3 << PIN_ALT_SHIFT)
+#define PIN_ALT(x) (((x) & PIN_ALT_MASK) >> PIN_ALT_SHIFT)
+#define PIN_GPIO (NMK_GPIO_ALT_GPIO << PIN_ALT_SHIFT)
+#define PIN_ALT_A (NMK_GPIO_ALT_A << PIN_ALT_SHIFT)
+#define PIN_ALT_B (NMK_GPIO_ALT_B << PIN_ALT_SHIFT)
+#define PIN_ALT_C (NMK_GPIO_ALT_C << PIN_ALT_SHIFT)
+
+#define PIN_PULL_SHIFT 11
+#define PIN_PULL_MASK (0x3 << PIN_PULL_SHIFT)
+#define PIN_PULL(x) (((x) & PIN_PULL_MASK) >> PIN_PULL_SHIFT)
+#define PIN_PULL_NONE (NMK_GPIO_PULL_NONE << PIN_PULL_SHIFT)
+#define PIN_PULL_UP (NMK_GPIO_PULL_UP << PIN_PULL_SHIFT)
+#define PIN_PULL_DOWN (NMK_GPIO_PULL_DOWN << PIN_PULL_SHIFT)
+
+#define PIN_SLPM_SHIFT 13
+#define PIN_SLPM_MASK (0x1 << PIN_SLPM_SHIFT)
+#define PIN_SLPM(x) (((x) & PIN_SLPM_MASK) >> PIN_SLPM_SHIFT)
+#define PIN_SLPM_MAKE_INPUT (NMK_GPIO_SLPM_INPUT << PIN_SLPM_SHIFT)
+#define PIN_SLPM_NOCHANGE (NMK_GPIO_SLPM_NOCHANGE << PIN_SLPM_SHIFT)
+/* These two replace the above in DB8500v2+ */
+#define PIN_SLPM_WAKEUP_ENABLE (NMK_GPIO_SLPM_WAKEUP_ENABLE << PIN_SLPM_SHIFT)
+#define PIN_SLPM_WAKEUP_DISABLE (NMK_GPIO_SLPM_WAKEUP_DISABLE << PIN_SLPM_SHIFT)
+#define PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP PIN_SLPM_WAKEUP_DISABLE
+
+#define PIN_SLPM_GPIO PIN_SLPM_WAKEUP_ENABLE /* In SLPM, pin is a gpio */
+#define PIN_SLPM_ALTFUNC PIN_SLPM_WAKEUP_DISABLE /* In SLPM, pin is altfunc */
+
+#define PIN_DIR_SHIFT 14
+#define PIN_DIR_MASK (0x1 << PIN_DIR_SHIFT)
+#define PIN_DIR(x) (((x) & PIN_DIR_MASK) >> PIN_DIR_SHIFT)
+#define PIN_DIR_INPUT (0 << PIN_DIR_SHIFT)
+#define PIN_DIR_OUTPUT (1 << PIN_DIR_SHIFT)
+
+#define PIN_VAL_SHIFT 15
+#define PIN_VAL_MASK (0x1 << PIN_VAL_SHIFT)
+#define PIN_VAL(x) (((x) & PIN_VAL_MASK) >> PIN_VAL_SHIFT)
+#define PIN_VAL_LOW (0 << PIN_VAL_SHIFT)
+#define PIN_VAL_HIGH (1 << PIN_VAL_SHIFT)
+
+#define PIN_SLPM_PULL_SHIFT 16
+#define PIN_SLPM_PULL_MASK (0x7 << PIN_SLPM_PULL_SHIFT)
+#define PIN_SLPM_PULL(x) \
+ (((x) & PIN_SLPM_PULL_MASK) >> PIN_SLPM_PULL_SHIFT)
+#define PIN_SLPM_PULL_NONE \
+ ((1 + NMK_GPIO_PULL_NONE) << PIN_SLPM_PULL_SHIFT)
+#define PIN_SLPM_PULL_UP \
+ ((1 + NMK_GPIO_PULL_UP) << PIN_SLPM_PULL_SHIFT)
+#define PIN_SLPM_PULL_DOWN \
+ ((1 + NMK_GPIO_PULL_DOWN) << PIN_SLPM_PULL_SHIFT)
+
+#define PIN_SLPM_DIR_SHIFT 19
+#define PIN_SLPM_DIR_MASK (0x3 << PIN_SLPM_DIR_SHIFT)
+#define PIN_SLPM_DIR(x) \
+ (((x) & PIN_SLPM_DIR_MASK) >> PIN_SLPM_DIR_SHIFT)
+#define PIN_SLPM_DIR_INPUT ((1 + 0) << PIN_SLPM_DIR_SHIFT)
+#define PIN_SLPM_DIR_OUTPUT ((1 + 1) << PIN_SLPM_DIR_SHIFT)
+
+#define PIN_SLPM_VAL_SHIFT 21
+#define PIN_SLPM_VAL_MASK (0x3 << PIN_SLPM_VAL_SHIFT)
+#define PIN_SLPM_VAL(x) \
+ (((x) & PIN_SLPM_VAL_MASK) >> PIN_SLPM_VAL_SHIFT)
+#define PIN_SLPM_VAL_LOW ((1 + 0) << PIN_SLPM_VAL_SHIFT)
+#define PIN_SLPM_VAL_HIGH ((1 + 1) << PIN_SLPM_VAL_SHIFT)
+
+#define PIN_SLPM_PDIS_SHIFT 23
+#define PIN_SLPM_PDIS_MASK (0x3 << PIN_SLPM_PDIS_SHIFT)
+#define PIN_SLPM_PDIS(x) \
+ (((x) & PIN_SLPM_PDIS_MASK) >> PIN_SLPM_PDIS_SHIFT)
+#define PIN_SLPM_PDIS_NO_CHANGE (0 << PIN_SLPM_PDIS_SHIFT)
+#define PIN_SLPM_PDIS_DISABLED (1 << PIN_SLPM_PDIS_SHIFT)
+#define PIN_SLPM_PDIS_ENABLED (2 << PIN_SLPM_PDIS_SHIFT)
+
+#define PIN_LOWEMI_SHIFT 25
+#define PIN_LOWEMI_MASK (0x1 << PIN_LOWEMI_SHIFT)
+#define PIN_LOWEMI(x) (((x) & PIN_LOWEMI_MASK) >> PIN_LOWEMI_SHIFT)
+#define PIN_LOWEMI_DISABLED (0 << PIN_LOWEMI_SHIFT)
+#define PIN_LOWEMI_ENABLED (1 << PIN_LOWEMI_SHIFT)
+
+#define PIN_GPIOMODE_SHIFT 26
+#define PIN_GPIOMODE_MASK (0x1 << PIN_GPIOMODE_SHIFT)
+#define PIN_GPIOMODE(x) (((x) & PIN_GPIOMODE_MASK) >> PIN_GPIOMODE_SHIFT)
+#define PIN_GPIOMODE_DISABLED (0 << PIN_GPIOMODE_SHIFT)
+#define PIN_GPIOMODE_ENABLED (1 << PIN_GPIOMODE_SHIFT)
+
+#define PIN_SLEEPMODE_SHIFT 27
+#define PIN_SLEEPMODE_MASK (0x1 << PIN_SLEEPMODE_SHIFT)
+#define PIN_SLEEPMODE(x) (((x) & PIN_SLEEPMODE_MASK) >> PIN_SLEEPMODE_SHIFT)
+#define PIN_SLEEPMODE_DISABLED (0 << PIN_SLEEPMODE_SHIFT)
+#define PIN_SLEEPMODE_ENABLED (1 << PIN_SLEEPMODE_SHIFT)
+
+
+/* Shortcuts. Use these instead of separate DIR, PULL, and VAL. */
+#define PIN_INPUT_PULLDOWN (PIN_DIR_INPUT | PIN_PULL_DOWN)
+#define PIN_INPUT_PULLUP (PIN_DIR_INPUT | PIN_PULL_UP)
+#define PIN_INPUT_NOPULL (PIN_DIR_INPUT | PIN_PULL_NONE)
+#define PIN_OUTPUT_LOW (PIN_DIR_OUTPUT | PIN_VAL_LOW)
+#define PIN_OUTPUT_HIGH (PIN_DIR_OUTPUT | PIN_VAL_HIGH)
+
+#define PIN_SLPM_INPUT_PULLDOWN (PIN_SLPM_DIR_INPUT | PIN_SLPM_PULL_DOWN)
+#define PIN_SLPM_INPUT_PULLUP (PIN_SLPM_DIR_INPUT | PIN_SLPM_PULL_UP)
+#define PIN_SLPM_INPUT_NOPULL (PIN_SLPM_DIR_INPUT | PIN_SLPM_PULL_NONE)
+#define PIN_SLPM_OUTPUT_LOW (PIN_SLPM_DIR_OUTPUT | PIN_SLPM_VAL_LOW)
+#define PIN_SLPM_OUTPUT_HIGH (PIN_SLPM_DIR_OUTPUT | PIN_SLPM_VAL_HIGH)
+
+#define PIN_CFG_DEFAULT (0)
+
+#define PIN_CFG(num, alt) \
+ (PIN_CFG_DEFAULT |\
+ (PIN_NUM(num) | PIN_##alt))
+
+#define PIN_CFG_INPUT(num, alt, pull) \
+ (PIN_CFG_DEFAULT |\
+ (PIN_NUM(num) | PIN_##alt | PIN_INPUT_##pull))
+
+#define PIN_CFG_OUTPUT(num, alt, val) \
+ (PIN_CFG_DEFAULT |\
+ (PIN_NUM(num) | PIN_##alt | PIN_OUTPUT_##val))
+
+/*
+ * "nmk_gpio" and "NMK_GPIO" stand for "Nomadik GPIO", leaving
+ * the "gpio" namespace for generic and cross-machine functions
+ */
+
+#define GPIO_BLOCK_SHIFT 5
+#define NMK_GPIO_PER_CHIP (1 << GPIO_BLOCK_SHIFT)
+
+/* Register in the logic block */
+#define NMK_GPIO_DAT 0x00
+#define NMK_GPIO_DATS 0x04
+#define NMK_GPIO_DATC 0x08
+#define NMK_GPIO_PDIS 0x0c
+#define NMK_GPIO_DIR 0x10
+#define NMK_GPIO_DIRS 0x14
+#define NMK_GPIO_DIRC 0x18
+#define NMK_GPIO_SLPC 0x1c
+#define NMK_GPIO_AFSLA 0x20
+#define NMK_GPIO_AFSLB 0x24
+#define NMK_GPIO_LOWEMI 0x28
+
+#define NMK_GPIO_RIMSC 0x40
+#define NMK_GPIO_FIMSC 0x44
+#define NMK_GPIO_IS 0x48
+#define NMK_GPIO_IC 0x4c
+#define NMK_GPIO_RWIMSC 0x50
+#define NMK_GPIO_FWIMSC 0x54
+#define NMK_GPIO_WKS 0x58
+/* These appear in DB8540 and later ASICs */
+#define NMK_GPIO_EDGELEVEL 0x5C
+#define NMK_GPIO_LEVEL 0x60
+
+
+/* Pull up/down values */
+enum nmk_gpio_pull {
+ NMK_GPIO_PULL_NONE,
+ NMK_GPIO_PULL_UP,
+ NMK_GPIO_PULL_DOWN,
+};
+
+/* Sleep mode */
+enum nmk_gpio_slpm {
+ NMK_GPIO_SLPM_INPUT,
+ NMK_GPIO_SLPM_WAKEUP_ENABLE = NMK_GPIO_SLPM_INPUT,
+ NMK_GPIO_SLPM_NOCHANGE,
+ NMK_GPIO_SLPM_WAKEUP_DISABLE = NMK_GPIO_SLPM_NOCHANGE,
+};
+
+/*
+ * Platform data to register a block: only the initial gpio/irq number.
+ */
+struct nmk_gpio_platform_data {
+ char *name;
+ int first_gpio;
+ int first_irq;
+ int num_gpio;
+ u32 (*get_secondary_status)(unsigned int bank);
+ void (*set_ioforce)(bool enable);
+ bool supports_sleepmode;
+};
+
struct nmk_gpio_chip {
struct gpio_chip chip;
struct irq_domain *domain;
@@ -846,14 +1060,14 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
(mode < 0) ? "unknown" : modes[mode],
pull ? "pull" : "none");
- if (label && !is_out) {
- int irq = gpio_to_irq(gpio);
+ if (!is_out) {
+ int irq = gpio_to_irq(gpio);
struct irq_desc *desc = irq_to_desc(irq);
/* This races with request_irq(), set_irq_type(),
* and set_irq_wake() ... but those are "rare".
*/
- if (irq >= 0 && desc->action) {
+ if (irq > 0 && desc && desc->action) {
char *trigger;
u32 bitmask = nmk_gpio_get_bitmask(gpio);
@@ -904,7 +1118,7 @@ static struct gpio_chip nmk_gpio_template = {
.set = nmk_gpio_set_output,
.to_irq = nmk_gpio_to_irq,
.dbg_show = nmk_gpio_dbg_show,
- .can_sleep = 0,
+ .can_sleep = false,
};
void nmk_gpio_clocks_enable(void)
@@ -1026,7 +1240,7 @@ static const struct irq_domain_ops nmk_gpio_irq_simple_ops = {
static int nmk_gpio_probe(struct platform_device *dev)
{
- struct nmk_gpio_platform_data *pdata = dev->dev.platform_data;
+ struct nmk_gpio_platform_data *pdata;
struct device_node *np = dev->dev.of_node;
struct nmk_gpio_chip *nmk_chip;
struct gpio_chip *chip;
@@ -1034,32 +1248,24 @@ static int nmk_gpio_probe(struct platform_device *dev)
struct clk *clk;
int secondary_irq;
void __iomem *base;
- int irq_start = 0;
int irq;
int ret;
- if (!pdata && !np) {
- dev_err(&dev->dev, "No platform data or device tree found\n");
- return -ENODEV;
- }
-
- if (np) {
- pdata = devm_kzalloc(&dev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- if (of_get_property(np, "st,supports-sleepmode", NULL))
- pdata->supports_sleepmode = true;
+ pdata = devm_kzalloc(&dev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
- if (of_property_read_u32(np, "gpio-bank", &dev->id)) {
- dev_err(&dev->dev, "gpio-bank property not found\n");
- return -EINVAL;
- }
+ if (of_get_property(np, "st,supports-sleepmode", NULL))
+ pdata->supports_sleepmode = true;
- pdata->first_gpio = dev->id * NMK_GPIO_PER_CHIP;
- pdata->num_gpio = NMK_GPIO_PER_CHIP;
+ if (of_property_read_u32(np, "gpio-bank", &dev->id)) {
+ dev_err(&dev->dev, "gpio-bank property not found\n");
+ return -EINVAL;
}
+ pdata->first_gpio = dev->id * NMK_GPIO_PER_CHIP;
+ pdata->num_gpio = NMK_GPIO_PER_CHIP;
+
irq = platform_get_irq(dev, 0);
if (irq < 0)
return irq;
@@ -1107,10 +1313,7 @@ static int nmk_gpio_probe(struct platform_device *dev)
clk_enable(nmk_chip->clk);
nmk_chip->lowemi = readl_relaxed(nmk_chip->addr + NMK_GPIO_LOWEMI);
clk_disable(nmk_chip->clk);
-
-#ifdef CONFIG_OF_GPIO
chip->of_node = np;
-#endif
ret = gpiochip_add(&nmk_chip->chip);
if (ret)
@@ -1122,10 +1325,8 @@ static int nmk_gpio_probe(struct platform_device *dev)
platform_set_drvdata(dev, nmk_chip);
- if (!np)
- irq_start = pdata->first_irq;
nmk_chip->domain = irq_domain_add_simple(np,
- NMK_GPIO_PER_CHIP, irq_start,
+ NMK_GPIO_PER_CHIP, 0,
&nmk_gpio_irq_simple_ops, nmk_chip);
if (!nmk_chip->domain) {
dev_err(&dev->dev, "failed to create irqdomain\n");
@@ -1858,11 +2059,10 @@ static int nmk_pinctrl_resume(struct platform_device *pdev)
static int nmk_pinctrl_probe(struct platform_device *pdev)
{
- const struct platform_device_id *platid = platform_get_device_id(pdev);
+ const struct of_device_id *match;
struct device_node *np = pdev->dev.of_node;
struct device_node *prcm_np;
struct nmk_pinctrl *npct;
- struct resource *res;
unsigned int version = 0;
int i;
@@ -1870,16 +2070,10 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
if (!npct)
return -ENOMEM;
- if (platid)
- version = platid->driver_data;
- else if (np) {
- const struct of_device_id *match;
-
- match = of_match_device(nmk_pinctrl_match, &pdev->dev);
- if (!match)
- return -ENODEV;
- version = (unsigned int) match->data;
- }
+ match = of_match_device(nmk_pinctrl_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+ version = (unsigned int) match->data;
/* Poke in other ASIC variants here */
if (version == PINCTRL_NMK_STN8815)
@@ -1889,17 +2083,9 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
if (version == PINCTRL_NMK_DB8540)
nmk_pinctrl_db8540_init(&npct->soc);
- if (np) {
- prcm_np = of_parse_phandle(np, "prcm", 0);
- if (prcm_np)
- npct->prcm_base = of_iomap(prcm_np, 0);
- }
-
- /* Allow platform passed information to over-write DT. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
- npct->prcm_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+ prcm_np = of_parse_phandle(np, "prcm", 0);
+ if (prcm_np)
+ npct->prcm_base = of_iomap(prcm_np, 0);
if (!npct->prcm_base) {
if (version == PINCTRL_NMK_STN8815) {
dev_info(&pdev->dev,
@@ -1958,13 +2144,6 @@ static struct platform_driver nmk_gpio_driver = {
.probe = nmk_gpio_probe,
};
-static const struct platform_device_id nmk_pinctrl_id[] = {
- { "pinctrl-stn8815", PINCTRL_NMK_STN8815 },
- { "pinctrl-db8500", PINCTRL_NMK_DB8500 },
- { "pinctrl-db8540", PINCTRL_NMK_DB8540 },
- { }
-};
-
static struct platform_driver nmk_pinctrl_driver = {
.driver = {
.owner = THIS_MODULE,
@@ -1972,7 +2151,6 @@ static struct platform_driver nmk_pinctrl_driver = {
.of_match_table = nmk_pinctrl_match,
},
.probe = nmk_pinctrl_probe,
- .id_table = nmk_pinctrl_id,
#ifdef CONFIG_PM
.suspend = nmk_pinctrl_suspend,
.resume = nmk_pinctrl_resume,
diff --git a/drivers/pinctrl/pinctrl-nomadik.h b/drivers/pinctrl/pinctrl-nomadik.h
index bcd4191e10ea..d8215f1e70c7 100644
--- a/drivers/pinctrl/pinctrl-nomadik.h
+++ b/drivers/pinctrl/pinctrl-nomadik.h
@@ -1,13 +1,23 @@
#ifndef PINCTRL_PINCTRL_NOMADIK_H
#define PINCTRL_PINCTRL_NOMADIK_H
-#include <linux/platform_data/pinctrl-nomadik.h>
-
/* Package definitions */
#define PINCTRL_NMK_STN8815 0
#define PINCTRL_NMK_DB8500 1
#define PINCTRL_NMK_DB8540 2
+/* Alternate functions: function C is set in hw by setting both A and B */
+#define NMK_GPIO_ALT_GPIO 0
+#define NMK_GPIO_ALT_A 1
+#define NMK_GPIO_ALT_B 2
+#define NMK_GPIO_ALT_C (NMK_GPIO_ALT_A | NMK_GPIO_ALT_B)
+
+#define NMK_GPIO_ALT_CX_SHIFT 2
+#define NMK_GPIO_ALT_C1 ((1<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+#define NMK_GPIO_ALT_C2 ((2<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+#define NMK_GPIO_ALT_C3 ((3<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+#define NMK_GPIO_ALT_C4 ((4<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+
#define PRCM_GPIOCR_ALTCX(pin_num,\
altc1_used, altc1_ri, altc1_cb,\
altc2_used, altc2_ri, altc2_cb,\
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 829b98c5c66f..de6459628b4f 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -525,12 +525,18 @@ static void pcs_disable(struct pinctrl_dev *pctldev, unsigned fselector,
for (i = 0; i < func->nvals; i++) {
struct pcs_func_vals *vals;
unsigned long flags;
- unsigned val;
+ unsigned val, mask;
vals = &func->vals[i];
raw_spin_lock_irqsave(&pcs->lock, flags);
val = pcs->read(vals->reg);
- val &= ~pcs->fmask;
+
+ if (pcs->bits_per_mux)
+ mask = vals->mask;
+ else
+ mask = pcs->fmask;
+
+ val &= ~mask;
val |= pcs->foff << pcs->fshift;
pcs->write(val, vals->reg);
raw_spin_unlock_irqrestore(&pcs->lock, flags);
@@ -1312,6 +1318,14 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
mask_pos = ((pcs->fmask) << (bit_pos - 1));
val_pos = val & mask_pos;
submask = mask & mask_pos;
+
+ if ((mask & mask_pos) == 0) {
+ dev_err(pcs->dev,
+ "Invalid mask for %s at 0x%x\n",
+ np->name, offset);
+ break;
+ }
+
mask &= ~mask_pos;
if (submask != mask_pos) {
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 9cadc68ee572..320c27363cc8 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1370,10 +1370,10 @@ static int st_pctl_probe(struct platform_device *pdev)
if (ret)
return ret;
- pctl_desc->owner = THIS_MODULE,
- pctl_desc->pctlops = &st_pctlops,
- pctl_desc->pmxops = &st_pmxops,
- pctl_desc->confops = &st_confops,
+ pctl_desc->owner = THIS_MODULE;
+ pctl_desc->pctlops = &st_pctlops;
+ pctl_desc->pmxops = &st_pmxops;
+ pctl_desc->confops = &st_confops;
pctl_desc->name = dev_name(&pdev->dev);
info->pctl = pinctrl_register(pctl_desc, &pdev->dev, info);
diff --git a/drivers/pinctrl/pinctrl-sunxi-pins.h b/drivers/pinctrl/pinctrl-sunxi-pins.h
index 2c7446a1a199..6fd8d4d95140 100644
--- a/drivers/pinctrl/pinctrl-sunxi-pins.h
+++ b/drivers/pinctrl/pinctrl-sunxi-pins.h
@@ -3774,12 +3774,14 @@ static const struct sunxi_desc_pin sun7i_a20_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi0"), /* MOSI */
SUNXI_FUNCTION(0x3, "uart6"), /* TX */
+ SUNXI_FUNCTION(0x4, "clk_out_a"), /* CLK_OUT_A */
SUNXI_FUNCTION_IRQ(0x5, 24)), /* EINT24 */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PI13,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi0"), /* MISO */
SUNXI_FUNCTION(0x3, "uart6"), /* RX */
+ SUNXI_FUNCTION(0x4, "clk_out_b"), /* CLK_OUT_B */
SUNXI_FUNCTION_IRQ(0x5, 25)), /* EINT25 */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PI14,
SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
index 119d2ddedfe7..9ccf681dad2f 100644
--- a/drivers/pinctrl/pinctrl-sunxi.c
+++ b/drivers/pinctrl/pinctrl-sunxi.c
@@ -469,12 +469,6 @@ static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
return val;
}
-static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- return pinctrl_gpio_direction_output(chip->base + offset);
-}
-
static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
unsigned offset, int value)
{
@@ -498,6 +492,13 @@ static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
spin_unlock_irqrestore(&pctl->lock, flags);
}
+static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ sunxi_pinctrl_gpio_set(chip, offset, value);
+ return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec,
u32 *flags)
@@ -547,7 +548,7 @@ static struct gpio_chip sunxi_pinctrl_gpio_chip = {
.of_xlate = sunxi_pinctrl_gpio_of_xlate,
.to_irq = sunxi_pinctrl_gpio_to_irq,
.of_gpio_n_cells = 3,
- .can_sleep = 0,
+ .can_sleep = false,
};
static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index a2e93a2b5ff4..e767355ab0ad 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -645,7 +645,7 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
GFP_KERNEL);
if (!pmx->regs) {
dev_err(&pdev->dev, "Can't alloc regs pointer\n");
- return -ENODEV;
+ return -ENOMEM;
}
for (i = 0; i < pmx->nbanks; i++) {
diff --git a/drivers/pinctrl/pinctrl-tegra124.c b/drivers/pinctrl/pinctrl-tegra124.c
new file mode 100644
index 000000000000..c20e0e1dda83
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-tegra124.c
@@ -0,0 +1,3137 @@
+/*
+ * Pinctrl data for the NVIDIA Tegra124 pinmux
+ *
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "pinctrl-tegra.h"
+
+/*
+ * Most pins affected by the pinmux can also be GPIOs. Define these first.
+ * These must match how the GPIO driver names/numbers its pins.
+ */
+#define _GPIO(offset) (offset)
+
+#define TEGRA_PIN_CLK_32K_OUT_PA0 _GPIO(0)
+#define TEGRA_PIN_UART3_CTS_N_PA1 _GPIO(1)
+#define TEGRA_PIN_DAP2_FS_PA2 _GPIO(2)
+#define TEGRA_PIN_DAP2_SCLK_PA3 _GPIO(3)
+#define TEGRA_PIN_DAP2_DIN_PA4 _GPIO(4)
+#define TEGRA_PIN_DAP2_DOUT_PA5 _GPIO(5)
+#define TEGRA_PIN_SDMMC3_CLK_PA6 _GPIO(6)
+#define TEGRA_PIN_SDMMC3_CMD_PA7 _GPIO(7)
+#define TEGRA_PIN_PB0 _GPIO(8)
+#define TEGRA_PIN_PB1 _GPIO(9)
+#define TEGRA_PIN_SDMMC3_DAT3_PB4 _GPIO(12)
+#define TEGRA_PIN_SDMMC3_DAT2_PB5 _GPIO(13)
+#define TEGRA_PIN_SDMMC3_DAT1_PB6 _GPIO(14)
+#define TEGRA_PIN_SDMMC3_DAT0_PB7 _GPIO(15)
+#define TEGRA_PIN_UART3_RTS_N_PC0 _GPIO(16)
+#define TEGRA_PIN_UART2_TXD_PC2 _GPIO(18)
+#define TEGRA_PIN_UART2_RXD_PC3 _GPIO(19)
+#define TEGRA_PIN_GEN1_I2C_SCL_PC4 _GPIO(20)
+#define TEGRA_PIN_GEN1_I2C_SDA_PC5 _GPIO(21)
+#define TEGRA_PIN_PC7 _GPIO(23)
+#define TEGRA_PIN_PG0 _GPIO(48)
+#define TEGRA_PIN_PG1 _GPIO(49)
+#define TEGRA_PIN_PG2 _GPIO(50)
+#define TEGRA_PIN_PG3 _GPIO(51)
+#define TEGRA_PIN_PG4 _GPIO(52)
+#define TEGRA_PIN_PG5 _GPIO(53)
+#define TEGRA_PIN_PG6 _GPIO(54)
+#define TEGRA_PIN_PG7 _GPIO(55)
+#define TEGRA_PIN_PH0 _GPIO(56)
+#define TEGRA_PIN_PH1 _GPIO(57)
+#define TEGRA_PIN_PH2 _GPIO(58)
+#define TEGRA_PIN_PH3 _GPIO(59)
+#define TEGRA_PIN_PH4 _GPIO(60)
+#define TEGRA_PIN_PH5 _GPIO(61)
+#define TEGRA_PIN_PH6 _GPIO(62)
+#define TEGRA_PIN_PH7 _GPIO(63)
+#define TEGRA_PIN_PI0 _GPIO(64)
+#define TEGRA_PIN_PI1 _GPIO(65)
+#define TEGRA_PIN_PI2 _GPIO(66)
+#define TEGRA_PIN_PI3 _GPIO(67)
+#define TEGRA_PIN_PI4 _GPIO(68)
+#define TEGRA_PIN_PI5 _GPIO(69)
+#define TEGRA_PIN_PI6 _GPIO(70)
+#define TEGRA_PIN_PI7 _GPIO(71)
+#define TEGRA_PIN_PJ0 _GPIO(72)
+#define TEGRA_PIN_PJ2 _GPIO(74)
+#define TEGRA_PIN_UART2_CTS_N_PJ5 _GPIO(77)
+#define TEGRA_PIN_UART2_RTS_N_PJ6 _GPIO(78)
+#define TEGRA_PIN_PJ7 _GPIO(79)
+#define TEGRA_PIN_PK0 _GPIO(80)
+#define TEGRA_PIN_PK1 _GPIO(81)
+#define TEGRA_PIN_PK2 _GPIO(82)
+#define TEGRA_PIN_PK3 _GPIO(83)
+#define TEGRA_PIN_PK4 _GPIO(84)
+#define TEGRA_PIN_SPDIF_OUT_PK5 _GPIO(85)
+#define TEGRA_PIN_SPDIF_IN_PK6 _GPIO(86)
+#define TEGRA_PIN_PK7 _GPIO(87)
+#define TEGRA_PIN_DAP1_FS_PN0 _GPIO(104)
+#define TEGRA_PIN_DAP1_DIN_PN1 _GPIO(105)
+#define TEGRA_PIN_DAP1_DOUT_PN2 _GPIO(106)
+#define TEGRA_PIN_DAP1_SCLK_PN3 _GPIO(107)
+#define TEGRA_PIN_USB_VBUS_EN0_PN4 _GPIO(108)
+#define TEGRA_PIN_USB_VBUS_EN1_PN5 _GPIO(109)
+#define TEGRA_PIN_HDMI_INT_PN7 _GPIO(111)
+#define TEGRA_PIN_ULPI_DATA7_PO0 _GPIO(112)
+#define TEGRA_PIN_ULPI_DATA0_PO1 _GPIO(113)
+#define TEGRA_PIN_ULPI_DATA1_PO2 _GPIO(114)
+#define TEGRA_PIN_ULPI_DATA2_PO3 _GPIO(115)
+#define TEGRA_PIN_ULPI_DATA3_PO4 _GPIO(116)
+#define TEGRA_PIN_ULPI_DATA4_PO5 _GPIO(117)
+#define TEGRA_PIN_ULPI_DATA5_PO6 _GPIO(118)
+#define TEGRA_PIN_ULPI_DATA6_PO7 _GPIO(119)
+#define TEGRA_PIN_DAP3_FS_PP0 _GPIO(120)
+#define TEGRA_PIN_DAP3_DIN_PP1 _GPIO(121)
+#define TEGRA_PIN_DAP3_DOUT_PP2 _GPIO(122)
+#define TEGRA_PIN_DAP3_SCLK_PP3 _GPIO(123)
+#define TEGRA_PIN_DAP4_FS_PP4 _GPIO(124)
+#define TEGRA_PIN_DAP4_DIN_PP5 _GPIO(125)
+#define TEGRA_PIN_DAP4_DOUT_PP6 _GPIO(126)
+#define TEGRA_PIN_DAP4_SCLK_PP7 _GPIO(127)
+#define TEGRA_PIN_KB_COL0_PQ0 _GPIO(128)
+#define TEGRA_PIN_KB_COL1_PQ1 _GPIO(129)
+#define TEGRA_PIN_KB_COL2_PQ2 _GPIO(130)
+#define TEGRA_PIN_KB_COL3_PQ3 _GPIO(131)
+#define TEGRA_PIN_KB_COL4_PQ4 _GPIO(132)
+#define TEGRA_PIN_KB_COL5_PQ5 _GPIO(133)
+#define TEGRA_PIN_KB_COL6_PQ6 _GPIO(134)
+#define TEGRA_PIN_KB_COL7_PQ7 _GPIO(135)
+#define TEGRA_PIN_KB_ROW0_PR0 _GPIO(136)
+#define TEGRA_PIN_KB_ROW1_PR1 _GPIO(137)
+#define TEGRA_PIN_KB_ROW2_PR2 _GPIO(138)
+#define TEGRA_PIN_KB_ROW3_PR3 _GPIO(139)
+#define TEGRA_PIN_KB_ROW4_PR4 _GPIO(140)
+#define TEGRA_PIN_KB_ROW5_PR5 _GPIO(141)
+#define TEGRA_PIN_KB_ROW6_PR6 _GPIO(142)
+#define TEGRA_PIN_KB_ROW7_PR7 _GPIO(143)
+#define TEGRA_PIN_KB_ROW8_PS0 _GPIO(144)
+#define TEGRA_PIN_KB_ROW9_PS1 _GPIO(145)
+#define TEGRA_PIN_KB_ROW10_PS2 _GPIO(146)
+#define TEGRA_PIN_KB_ROW11_PS3 _GPIO(147)
+#define TEGRA_PIN_KB_ROW12_PS4 _GPIO(148)
+#define TEGRA_PIN_KB_ROW13_PS5 _GPIO(149)
+#define TEGRA_PIN_KB_ROW14_PS6 _GPIO(150)
+#define TEGRA_PIN_KB_ROW15_PS7 _GPIO(151)
+#define TEGRA_PIN_KB_ROW16_PT0 _GPIO(152)
+#define TEGRA_PIN_KB_ROW17_PT1 _GPIO(153)
+#define TEGRA_PIN_GEN2_I2C_SCL_PT5 _GPIO(157)
+#define TEGRA_PIN_GEN2_I2C_SDA_PT6 _GPIO(158)
+#define TEGRA_PIN_SDMMC4_CMD_PT7 _GPIO(159)
+#define TEGRA_PIN_PU0 _GPIO(160)
+#define TEGRA_PIN_PU1 _GPIO(161)
+#define TEGRA_PIN_PU2 _GPIO(162)
+#define TEGRA_PIN_PU3 _GPIO(163)
+#define TEGRA_PIN_PU4 _GPIO(164)
+#define TEGRA_PIN_PU5 _GPIO(165)
+#define TEGRA_PIN_PU6 _GPIO(166)
+#define TEGRA_PIN_PV0 _GPIO(168)
+#define TEGRA_PIN_PV1 _GPIO(169)
+#define TEGRA_PIN_SDMMC3_CD_N_PV2 _GPIO(170)
+#define TEGRA_PIN_SDMMC1_WP_N_PV3 _GPIO(171)
+#define TEGRA_PIN_DDC_SCL_PV4 _GPIO(172)
+#define TEGRA_PIN_DDC_SDA_PV5 _GPIO(173)
+#define TEGRA_PIN_GPIO_W2_AUD_PW2 _GPIO(178)
+#define TEGRA_PIN_GPIO_W3_AUD_PW3 _GPIO(179)
+#define TEGRA_PIN_DAP_MCLK1_PW4 _GPIO(180)
+#define TEGRA_PIN_CLK2_OUT_PW5 _GPIO(181)
+#define TEGRA_PIN_UART3_TXD_PW6 _GPIO(182)
+#define TEGRA_PIN_UART3_RXD_PW7 _GPIO(183)
+#define TEGRA_PIN_DVFS_PWM_PX0 _GPIO(184)
+#define TEGRA_PIN_GPIO_X1_AUD_PX1 _GPIO(185)
+#define TEGRA_PIN_DVFS_CLK_PX2 _GPIO(186)
+#define TEGRA_PIN_GPIO_X3_AUD_PX3 _GPIO(187)
+#define TEGRA_PIN_GPIO_X4_AUD_PX4 _GPIO(188)
+#define TEGRA_PIN_GPIO_X5_AUD_PX5 _GPIO(189)
+#define TEGRA_PIN_GPIO_X6_AUD_PX6 _GPIO(190)
+#define TEGRA_PIN_GPIO_X7_AUD_PX7 _GPIO(191)
+#define TEGRA_PIN_ULPI_CLK_PY0 _GPIO(192)
+#define TEGRA_PIN_ULPI_DIR_PY1 _GPIO(193)
+#define TEGRA_PIN_ULPI_NXT_PY2 _GPIO(194)
+#define TEGRA_PIN_ULPI_STP_PY3 _GPIO(195)
+#define TEGRA_PIN_SDMMC1_DAT3_PY4 _GPIO(196)
+#define TEGRA_PIN_SDMMC1_DAT2_PY5 _GPIO(197)
+#define TEGRA_PIN_SDMMC1_DAT1_PY6 _GPIO(198)
+#define TEGRA_PIN_SDMMC1_DAT0_PY7 _GPIO(199)
+#define TEGRA_PIN_SDMMC1_CLK_PZ0 _GPIO(200)
+#define TEGRA_PIN_SDMMC1_CMD_PZ1 _GPIO(201)
+#define TEGRA_PIN_PWR_I2C_SCL_PZ6 _GPIO(206)
+#define TEGRA_PIN_PWR_I2C_SDA_PZ7 _GPIO(207)
+#define TEGRA_PIN_SDMMC4_DAT0_PAA0 _GPIO(208)
+#define TEGRA_PIN_SDMMC4_DAT1_PAA1 _GPIO(209)
+#define TEGRA_PIN_SDMMC4_DAT2_PAA2 _GPIO(210)
+#define TEGRA_PIN_SDMMC4_DAT3_PAA3 _GPIO(211)
+#define TEGRA_PIN_SDMMC4_DAT4_PAA4 _GPIO(212)
+#define TEGRA_PIN_SDMMC4_DAT5_PAA5 _GPIO(213)
+#define TEGRA_PIN_SDMMC4_DAT6_PAA6 _GPIO(214)
+#define TEGRA_PIN_SDMMC4_DAT7_PAA7 _GPIO(215)
+#define TEGRA_PIN_PBB0 _GPIO(216)
+#define TEGRA_PIN_CAM_I2C_SCL_PBB1 _GPIO(217)
+#define TEGRA_PIN_CAM_I2C_SDA_PBB2 _GPIO(218)
+#define TEGRA_PIN_PBB3 _GPIO(219)
+#define TEGRA_PIN_PBB4 _GPIO(220)
+#define TEGRA_PIN_PBB5 _GPIO(221)
+#define TEGRA_PIN_PBB6 _GPIO(222)
+#define TEGRA_PIN_PBB7 _GPIO(223)
+#define TEGRA_PIN_CAM_MCLK_PCC0 _GPIO(224)
+#define TEGRA_PIN_PCC1 _GPIO(225)
+#define TEGRA_PIN_PCC2 _GPIO(226)
+#define TEGRA_PIN_SDMMC4_CLK_PCC4 _GPIO(228)
+#define TEGRA_PIN_CLK2_REQ_PCC5 _GPIO(229)
+#define TEGRA_PIN_PEX_L0_RST_N_PDD1 _GPIO(233)
+#define TEGRA_PIN_PEX_L0_CLKREQ_N_PDD2 _GPIO(234)
+#define TEGRA_PIN_PEX_WAKE_N_PDD3 _GPIO(235)
+#define TEGRA_PIN_PEX_L1_RST_N_PDD5 _GPIO(237)
+#define TEGRA_PIN_PEX_L1_CLKREQ_N_PDD6 _GPIO(238)
+#define TEGRA_PIN_CLK3_OUT_PEE0 _GPIO(240)
+#define TEGRA_PIN_CLK3_REQ_PEE1 _GPIO(241)
+#define TEGRA_PIN_DAP_MCLK1_REQ_PEE2 _GPIO(242)
+#define TEGRA_PIN_HDMI_CEC_PEE3 _GPIO(243)
+#define TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4 _GPIO(244)
+#define TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5 _GPIO(245)
+#define TEGRA_PIN_DP_HPD_PFF0 _GPIO(248)
+#define TEGRA_PIN_USB_VBUS_EN2_PFF1 _GPIO(249)
+#define TEGRA_PIN_PFF2 _GPIO(250)
+
+/* All non-GPIO pins follow */
+#define NUM_GPIOS (TEGRA_PIN_PFF2 + 1)
+#define _PIN(offset) (NUM_GPIOS + (offset))
+
+/* Non-GPIO pins */
+#define TEGRA_PIN_CORE_PWR_REQ _PIN(0)
+#define TEGRA_PIN_CPU_PWR_REQ _PIN(1)
+#define TEGRA_PIN_PWR_INT_N _PIN(2)
+#define TEGRA_PIN_GMI_CLK_LB _PIN(3)
+#define TEGRA_PIN_RESET_OUT_N _PIN(4)
+#define TEGRA_PIN_OWR _PIN(5)
+#define TEGRA_PIN_CLK_32K_IN _PIN(6)
+#define TEGRA_PIN_JTAG_RTCK _PIN(7)
+
+static const struct pinctrl_pin_desc tegra124_pins[] = {
+ PINCTRL_PIN(TEGRA_PIN_CLK_32K_OUT_PA0, "CLK_32K_OUT PA0"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_CTS_N_PA1, "UART3_CTS_N PA1"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_FS_PA2, "DAP2_FS PA2"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_SCLK_PA3, "DAP2_SCLK PA3"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_DIN_PA4, "DAP2_DIN PA4"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_DOUT_PA5, "DAP2_DOUT PA5"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_PA6, "SDMMC3_CLK PA6"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_CMD_PA7, "SDMMC3_CMD PA7"),
+ PINCTRL_PIN(TEGRA_PIN_PB0, "PB0"),
+ PINCTRL_PIN(TEGRA_PIN_PB1, "PB1"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT3_PB4, "SDMMC3_DAT3 PB4"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT2_PB5, "SDMMC3_DAT2 PB5"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT1_PB6, "SDMMC3_DAT1 PB6"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT0_PB7, "SDMMC3_DAT0 PB7"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_RTS_N_PC0, "UART3_RTS_N PC0"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_TXD_PC2, "UART2_TXD PC2"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_RXD_PC3, "UART2_RXD PC3"),
+ PINCTRL_PIN(TEGRA_PIN_GEN1_I2C_SCL_PC4, "GEN1_I2C_SCL PC4"),
+ PINCTRL_PIN(TEGRA_PIN_GEN1_I2C_SDA_PC5, "GEN1_I2C_SDA PC5"),
+ PINCTRL_PIN(TEGRA_PIN_PC7, "PC7"),
+ PINCTRL_PIN(TEGRA_PIN_PG0, "PG0"),
+ PINCTRL_PIN(TEGRA_PIN_PG1, "PG1"),
+ PINCTRL_PIN(TEGRA_PIN_PG2, "PG2"),
+ PINCTRL_PIN(TEGRA_PIN_PG3, "PG3"),
+ PINCTRL_PIN(TEGRA_PIN_PG4, "PG4"),
+ PINCTRL_PIN(TEGRA_PIN_PG5, "PG5"),
+ PINCTRL_PIN(TEGRA_PIN_PG6, "PG6"),
+ PINCTRL_PIN(TEGRA_PIN_PG7, "PG7"),
+ PINCTRL_PIN(TEGRA_PIN_PH0, "PH0"),
+ PINCTRL_PIN(TEGRA_PIN_PH1, "PH1"),
+ PINCTRL_PIN(TEGRA_PIN_PH2, "PH2"),
+ PINCTRL_PIN(TEGRA_PIN_PH3, "PH3"),
+ PINCTRL_PIN(TEGRA_PIN_PH4, "PH4"),
+ PINCTRL_PIN(TEGRA_PIN_PH5, "PH5"),
+ PINCTRL_PIN(TEGRA_PIN_PH6, "PH6"),
+ PINCTRL_PIN(TEGRA_PIN_PH7, "PH7"),
+ PINCTRL_PIN(TEGRA_PIN_PI0, "PI0"),
+ PINCTRL_PIN(TEGRA_PIN_PI1, "PI1"),
+ PINCTRL_PIN(TEGRA_PIN_PI2, "PI2"),
+ PINCTRL_PIN(TEGRA_PIN_PI3, "PI3"),
+ PINCTRL_PIN(TEGRA_PIN_PI4, "PI4"),
+ PINCTRL_PIN(TEGRA_PIN_PI5, "PI5"),
+ PINCTRL_PIN(TEGRA_PIN_PI6, "PI6"),
+ PINCTRL_PIN(TEGRA_PIN_PI7, "PI7"),
+ PINCTRL_PIN(TEGRA_PIN_PJ0, "PJ0"),
+ PINCTRL_PIN(TEGRA_PIN_PJ2, "PJ2"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_CTS_N_PJ5, "UART2_CTS_N PJ5"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_RTS_N_PJ6, "UART2_RTS_N PJ6"),
+ PINCTRL_PIN(TEGRA_PIN_PJ7, "PJ7"),
+ PINCTRL_PIN(TEGRA_PIN_PK0, "PK0"),
+ PINCTRL_PIN(TEGRA_PIN_PK1, "PK1"),
+ PINCTRL_PIN(TEGRA_PIN_PK2, "PK2"),
+ PINCTRL_PIN(TEGRA_PIN_PK3, "PK3"),
+ PINCTRL_PIN(TEGRA_PIN_PK4, "PK4"),
+ PINCTRL_PIN(TEGRA_PIN_SPDIF_OUT_PK5, "SPDIF_OUT PK5"),
+ PINCTRL_PIN(TEGRA_PIN_SPDIF_IN_PK6, "SPDIF_IN PK6"),
+ PINCTRL_PIN(TEGRA_PIN_PK7, "PK7"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_FS_PN0, "DAP1_FS PN0"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_DIN_PN1, "DAP1_DIN PN1"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_DOUT_PN2, "DAP1_DOUT PN2"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_SCLK_PN3, "DAP1_SCLK PN3"),
+ PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN0_PN4, "USB_VBUS_EN0 PN4"),
+ PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN1_PN5, "USB_VBUS_EN1 PN5"),
+ PINCTRL_PIN(TEGRA_PIN_HDMI_INT_PN7, "HDMI_INT PN7"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_DATA7_PO0, "ULPI_DATA7 PO0"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_DATA0_PO1, "ULPI_DATA0 PO1"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_DATA1_PO2, "ULPI_DATA1 PO2"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_DATA2_PO3, "ULPI_DATA2 PO3"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_DATA3_PO4, "ULPI_DATA3 PO4"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_DATA4_PO5, "ULPI_DATA4 PO5"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_DATA5_PO6, "ULPI_DATA5 PO6"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_DATA6_PO7, "ULPI_DATA6 PO7"),
+ PINCTRL_PIN(TEGRA_PIN_DAP3_FS_PP0, "DAP3_FS PP0"),
+ PINCTRL_PIN(TEGRA_PIN_DAP3_DIN_PP1, "DAP3_DIN PP1"),
+ PINCTRL_PIN(TEGRA_PIN_DAP3_DOUT_PP2, "DAP3_DOUT PP2"),
+ PINCTRL_PIN(TEGRA_PIN_DAP3_SCLK_PP3, "DAP3_SCLK PP3"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_FS_PP4, "DAP4_FS PP4"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_DIN_PP5, "DAP4_DIN PP5"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_DOUT_PP6, "DAP4_DOUT PP6"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_SCLK_PP7, "DAP4_SCLK PP7"),
+ PINCTRL_PIN(TEGRA_PIN_KB_COL0_PQ0, "KB_COL0 PQ0"),
+ PINCTRL_PIN(TEGRA_PIN_KB_COL1_PQ1, "KB_COL1 PQ1"),
+ PINCTRL_PIN(TEGRA_PIN_KB_COL2_PQ2, "KB_COL2 PQ2"),
+ PINCTRL_PIN(TEGRA_PIN_KB_COL3_PQ3, "KB_COL3 PQ3"),
+ PINCTRL_PIN(TEGRA_PIN_KB_COL4_PQ4, "KB_COL4 PQ4"),
+ PINCTRL_PIN(TEGRA_PIN_KB_COL5_PQ5, "KB_COL5 PQ5"),
+ PINCTRL_PIN(TEGRA_PIN_KB_COL6_PQ6, "KB_COL6 PQ6"),
+ PINCTRL_PIN(TEGRA_PIN_KB_COL7_PQ7, "KB_COL7 PQ7"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW0_PR0, "KB_ROW0 PR0"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW1_PR1, "KB_ROW1 PR1"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW2_PR2, "KB_ROW2 PR2"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW3_PR3, "KB_ROW3 PR3"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW4_PR4, "KB_ROW4 PR4"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW5_PR5, "KB_ROW5 PR5"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW6_PR6, "KB_ROW6 PR6"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW7_PR7, "KB_ROW7 PR7"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW8_PS0, "KB_ROW8 PS0"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW9_PS1, "KB_ROW9 PS1"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW10_PS2, "KB_ROW10 PS2"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW11_PS3, "KB_ROW10 PS3"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW12_PS4, "KB_ROW10 PS4"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW13_PS5, "KB_ROW10 PS5"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW14_PS6, "KB_ROW10 PS6"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW15_PS7, "KB_ROW10 PS7"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW16_PT0, "KB_ROW10 PT0"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW17_PT1, "KB_ROW10 PT1"),
+ PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SCL_PT5, "GEN2_I2C_SCL PT5"),
+ PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SDA_PT6, "GEN2_I2C_SDA PT6"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_CMD_PT7, "SDMMC4_CMD PT7"),
+ PINCTRL_PIN(TEGRA_PIN_PU0, "PU0"),
+ PINCTRL_PIN(TEGRA_PIN_PU1, "PU1"),
+ PINCTRL_PIN(TEGRA_PIN_PU2, "PU2"),
+ PINCTRL_PIN(TEGRA_PIN_PU3, "PU3"),
+ PINCTRL_PIN(TEGRA_PIN_PU4, "PU4"),
+ PINCTRL_PIN(TEGRA_PIN_PU5, "PU5"),
+ PINCTRL_PIN(TEGRA_PIN_PU6, "PU6"),
+ PINCTRL_PIN(TEGRA_PIN_PV0, "PV0"),
+ PINCTRL_PIN(TEGRA_PIN_PV1, "PV1"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_CD_N_PV2, "SDMMC3_CD_N PV2"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_WP_N_PV3, "SDMMC1_WP_N PV3"),
+ PINCTRL_PIN(TEGRA_PIN_DDC_SCL_PV4, "DDC_SCL PV4"),
+ PINCTRL_PIN(TEGRA_PIN_DDC_SDA_PV5, "DDC_SDA PV5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_W2_AUD_PW2, "GPIO_W2_AUD PW2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_W3_AUD_PW3, "GPIO_W3_AUD PW3"),
+ PINCTRL_PIN(TEGRA_PIN_DAP_MCLK1_PW4, "DAP_MCLK1 PW4"),
+ PINCTRL_PIN(TEGRA_PIN_CLK2_OUT_PW5, "CLK2_OUT PW5"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_TXD_PW6, "UART3_TXD PW6"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_RXD_PW7, "UART3_RXD PW7"),
+ PINCTRL_PIN(TEGRA_PIN_DVFS_PWM_PX0, "DVFS_PWM PX0"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_X1_AUD_PX1, "GPIO_X1_AUD PX1"),
+ PINCTRL_PIN(TEGRA_PIN_DVFS_CLK_PX2, "DVFS_CLK PX2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_X3_AUD_PX3, "GPIO_X3_AUD PX3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_X4_AUD_PX4, "GPIO_X4_AUD PX4"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_X5_AUD_PX5, "GPIO_X5_AUD PX5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_X6_AUD_PX6, "GPIO_X6_AUD PX6"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_X7_AUD_PX7, "GPIO_X7_AUD PX7"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_CLK_PY0, "ULPI_CLK PY0"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_DIR_PY1, "ULPI_DIR PY1"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_NXT_PY2, "ULPI_NXT PY2"),
+ PINCTRL_PIN(TEGRA_PIN_ULPI_STP_PY3, "ULPI_STP PY3"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT3_PY4, "SDMMC1_DAT3 PY4"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT2_PY5, "SDMMC1_DAT2 PY5"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT1_PY6, "SDMMC1_DAT1 PY6"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT0_PY7, "SDMMC1_DAT0 PY7"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_CLK_PZ0, "SDMMC1_CLK PZ0"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_CMD_PZ1, "SDMMC1_CMD PZ1"),
+ PINCTRL_PIN(TEGRA_PIN_PWR_I2C_SCL_PZ6, "PWR_I2C_SCL PZ6"),
+ PINCTRL_PIN(TEGRA_PIN_PWR_I2C_SDA_PZ7, "PWR_I2C_SDA PZ7"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT0_PAA0, "SDMMC4_DAT0 PAA0"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT1_PAA1, "SDMMC4_DAT1 PAA1"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT2_PAA2, "SDMMC4_DAT2 PAA2"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT3_PAA3, "SDMMC4_DAT3 PAA3"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT4_PAA4, "SDMMC4_DAT4 PAA4"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT5_PAA5, "SDMMC4_DAT5 PAA5"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT6_PAA6, "SDMMC4_DAT6 PAA6"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT7_PAA7, "SDMMC4_DAT7 PAA7"),
+ PINCTRL_PIN(TEGRA_PIN_PBB0, "PBB0"),
+ PINCTRL_PIN(TEGRA_PIN_CAM_I2C_SCL_PBB1, "CAM_I2C_SCL PBB1"),
+ PINCTRL_PIN(TEGRA_PIN_CAM_I2C_SDA_PBB2, "CAM_I2C_SDA PBB2"),
+ PINCTRL_PIN(TEGRA_PIN_PBB3, "PBB3"),
+ PINCTRL_PIN(TEGRA_PIN_PBB4, "PBB4"),
+ PINCTRL_PIN(TEGRA_PIN_PBB5, "PBB5"),
+ PINCTRL_PIN(TEGRA_PIN_PBB6, "PBB6"),
+ PINCTRL_PIN(TEGRA_PIN_PBB7, "PBB7"),
+ PINCTRL_PIN(TEGRA_PIN_CAM_MCLK_PCC0, "CAM_MCLK PCC0"),
+ PINCTRL_PIN(TEGRA_PIN_PCC1, "PCC1"),
+ PINCTRL_PIN(TEGRA_PIN_PCC2, "PCC2"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC4_CLK_PCC4, "SDMMC4_CLK PCC4"),
+ PINCTRL_PIN(TEGRA_PIN_CLK2_REQ_PCC5, "CLK2_REQ PCC5"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L0_RST_N_PDD1, "PEX_L0_RST_N PDD1"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L0_CLKREQ_N_PDD2, "PEX_L0_CLKREQ_N PDD2"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_WAKE_N_PDD3, "PEX_WAKE_N PDD3"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L1_RST_N_PDD5, "PEX_L1_RST_N PDD5"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L1_CLKREQ_N_PDD6, "PEX_L1_CLKREQ_N PDD6"),
+ PINCTRL_PIN(TEGRA_PIN_CLK3_OUT_PEE0, "CLK3_OUT PEE0"),
+ PINCTRL_PIN(TEGRA_PIN_CLK3_REQ_PEE1, "CLK3_REQ PEE1"),
+ PINCTRL_PIN(TEGRA_PIN_DAP_MCLK1_REQ_PEE2, "DAP_MCLK1_REQ PEE2"),
+ PINCTRL_PIN(TEGRA_PIN_HDMI_CEC_PEE3, "HDMI_CEC PEE3"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4, "SDMMC3_CLK_LB_OUT PEE4"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5, "SDMMC3_CLK_LB_IN PEE5"),
+ PINCTRL_PIN(TEGRA_PIN_CORE_PWR_REQ, "CORE_PWR_REQ"),
+ PINCTRL_PIN(TEGRA_PIN_CPU_PWR_REQ, "CPU_PWR_REQ"),
+ PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
+ PINCTRL_PIN(TEGRA_PIN_PWR_INT_N, "PWR_INT_N"),
+ PINCTRL_PIN(TEGRA_PIN_RESET_OUT_N, "RESET_OUT_N"),
+ PINCTRL_PIN(TEGRA_PIN_DP_HPD_PFF0, "DP_HPD PFF0"),
+ PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN2_PFF1, "USB_VBUS_EN2 PFF1"),
+ PINCTRL_PIN(TEGRA_PIN_PFF2, "PFF2"),
+ PINCTRL_PIN(TEGRA_PIN_CLK_32K_IN, "CLK_32K_IN"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_CLK_LB, "GMI_CLK_LB"),
+ PINCTRL_PIN(TEGRA_PIN_JTAG_RTCK, "JTAG_RTCK"),
+};
+
+static const unsigned clk_32k_out_pa0_pins[] = {
+ TEGRA_PIN_CLK_32K_OUT_PA0,
+};
+
+static const unsigned uart3_cts_n_pa1_pins[] = {
+ TEGRA_PIN_UART3_CTS_N_PA1,
+};
+
+static const unsigned dap2_fs_pa2_pins[] = {
+ TEGRA_PIN_DAP2_FS_PA2,
+};
+
+static const unsigned dap2_sclk_pa3_pins[] = {
+ TEGRA_PIN_DAP2_SCLK_PA3,
+};
+
+static const unsigned dap2_din_pa4_pins[] = {
+ TEGRA_PIN_DAP2_DIN_PA4,
+};
+
+static const unsigned dap2_dout_pa5_pins[] = {
+ TEGRA_PIN_DAP2_DOUT_PA5,
+};
+
+static const unsigned sdmmc3_clk_pa6_pins[] = {
+ TEGRA_PIN_SDMMC3_CLK_PA6,
+};
+
+static const unsigned sdmmc3_cmd_pa7_pins[] = {
+ TEGRA_PIN_SDMMC3_CMD_PA7,
+};
+
+static const unsigned pb0_pins[] = {
+ TEGRA_PIN_PB0,
+};
+
+static const unsigned pb1_pins[] = {
+ TEGRA_PIN_PB1,
+};
+
+static const unsigned sdmmc3_dat3_pb4_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT3_PB4,
+};
+
+static const unsigned sdmmc3_dat2_pb5_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT2_PB5,
+};
+
+static const unsigned sdmmc3_dat1_pb6_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT1_PB6,
+};
+
+static const unsigned sdmmc3_dat0_pb7_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT0_PB7,
+};
+
+static const unsigned uart3_rts_n_pc0_pins[] = {
+ TEGRA_PIN_UART3_RTS_N_PC0,
+};
+
+static const unsigned uart2_txd_pc2_pins[] = {
+ TEGRA_PIN_UART2_TXD_PC2,
+};
+
+static const unsigned uart2_rxd_pc3_pins[] = {
+ TEGRA_PIN_UART2_RXD_PC3,
+};
+
+static const unsigned gen1_i2c_scl_pc4_pins[] = {
+ TEGRA_PIN_GEN1_I2C_SCL_PC4,
+};
+
+static const unsigned gen1_i2c_sda_pc5_pins[] = {
+ TEGRA_PIN_GEN1_I2C_SDA_PC5,
+};
+
+static const unsigned pc7_pins[] = {
+ TEGRA_PIN_PC7,
+};
+
+static const unsigned pg0_pins[] = {
+ TEGRA_PIN_PG0,
+};
+
+static const unsigned pg1_pins[] = {
+ TEGRA_PIN_PG1,
+};
+
+static const unsigned pg2_pins[] = {
+ TEGRA_PIN_PG2,
+};
+
+static const unsigned pg3_pins[] = {
+ TEGRA_PIN_PG3,
+};
+
+static const unsigned pg4_pins[] = {
+ TEGRA_PIN_PG4,
+};
+
+static const unsigned pg5_pins[] = {
+ TEGRA_PIN_PG5,
+};
+
+static const unsigned pg6_pins[] = {
+ TEGRA_PIN_PG6,
+};
+
+static const unsigned pg7_pins[] = {
+ TEGRA_PIN_PG7,
+};
+
+static const unsigned ph0_pins[] = {
+ TEGRA_PIN_PH0,
+};
+
+static const unsigned ph1_pins[] = {
+ TEGRA_PIN_PH1,
+};
+
+static const unsigned ph2_pins[] = {
+ TEGRA_PIN_PH2,
+};
+
+static const unsigned ph3_pins[] = {
+ TEGRA_PIN_PH3,
+};
+
+static const unsigned ph4_pins[] = {
+ TEGRA_PIN_PH4,
+};
+
+static const unsigned ph5_pins[] = {
+ TEGRA_PIN_PH5,
+};
+
+static const unsigned ph6_pins[] = {
+ TEGRA_PIN_PH6,
+};
+
+static const unsigned ph7_pins[] = {
+ TEGRA_PIN_PH7,
+};
+
+static const unsigned pi0_pins[] = {
+ TEGRA_PIN_PI0,
+};
+
+static const unsigned pi1_pins[] = {
+ TEGRA_PIN_PI1,
+};
+
+static const unsigned pi2_pins[] = {
+ TEGRA_PIN_PI2,
+};
+
+static const unsigned pi3_pins[] = {
+ TEGRA_PIN_PI3,
+};
+
+static const unsigned pi4_pins[] = {
+ TEGRA_PIN_PI4,
+};
+
+static const unsigned pi5_pins[] = {
+ TEGRA_PIN_PI5,
+};
+
+static const unsigned pi6_pins[] = {
+ TEGRA_PIN_PI6,
+};
+
+static const unsigned pi7_pins[] = {
+ TEGRA_PIN_PI7,
+};
+
+static const unsigned pj0_pins[] = {
+ TEGRA_PIN_PJ0,
+};
+
+static const unsigned pj2_pins[] = {
+ TEGRA_PIN_PJ2,
+};
+
+static const unsigned uart2_cts_n_pj5_pins[] = {
+ TEGRA_PIN_UART2_CTS_N_PJ5,
+};
+
+static const unsigned uart2_rts_n_pj6_pins[] = {
+ TEGRA_PIN_UART2_RTS_N_PJ6,
+};
+
+static const unsigned pj7_pins[] = {
+ TEGRA_PIN_PJ7,
+};
+
+static const unsigned pk0_pins[] = {
+ TEGRA_PIN_PK0,
+};
+
+static const unsigned pk1_pins[] = {
+ TEGRA_PIN_PK1,
+};
+
+static const unsigned pk2_pins[] = {
+ TEGRA_PIN_PK2,
+};
+
+static const unsigned pk3_pins[] = {
+ TEGRA_PIN_PK3,
+};
+
+static const unsigned pk4_pins[] = {
+ TEGRA_PIN_PK4,
+};
+
+static const unsigned spdif_out_pk5_pins[] = {
+ TEGRA_PIN_SPDIF_OUT_PK5,
+};
+
+static const unsigned spdif_in_pk6_pins[] = {
+ TEGRA_PIN_SPDIF_IN_PK6,
+};
+
+static const unsigned pk7_pins[] = {
+ TEGRA_PIN_PK7,
+};
+
+static const unsigned dap1_fs_pn0_pins[] = {
+ TEGRA_PIN_DAP1_FS_PN0,
+};
+
+static const unsigned dap1_din_pn1_pins[] = {
+ TEGRA_PIN_DAP1_DIN_PN1,
+};
+
+static const unsigned dap1_dout_pn2_pins[] = {
+ TEGRA_PIN_DAP1_DOUT_PN2,
+};
+
+static const unsigned dap1_sclk_pn3_pins[] = {
+ TEGRA_PIN_DAP1_SCLK_PN3,
+};
+
+static const unsigned usb_vbus_en0_pn4_pins[] = {
+ TEGRA_PIN_USB_VBUS_EN0_PN4,
+};
+
+static const unsigned usb_vbus_en1_pn5_pins[] = {
+ TEGRA_PIN_USB_VBUS_EN1_PN5,
+};
+
+static const unsigned hdmi_int_pn7_pins[] = {
+ TEGRA_PIN_HDMI_INT_PN7,
+};
+
+static const unsigned ulpi_data7_po0_pins[] = {
+ TEGRA_PIN_ULPI_DATA7_PO0,
+};
+
+static const unsigned ulpi_data0_po1_pins[] = {
+ TEGRA_PIN_ULPI_DATA0_PO1,
+};
+
+static const unsigned ulpi_data1_po2_pins[] = {
+ TEGRA_PIN_ULPI_DATA1_PO2,
+};
+
+static const unsigned ulpi_data2_po3_pins[] = {
+ TEGRA_PIN_ULPI_DATA2_PO3,
+};
+
+static const unsigned ulpi_data3_po4_pins[] = {
+ TEGRA_PIN_ULPI_DATA3_PO4,
+};
+
+static const unsigned ulpi_data4_po5_pins[] = {
+ TEGRA_PIN_ULPI_DATA4_PO5,
+};
+
+static const unsigned ulpi_data5_po6_pins[] = {
+ TEGRA_PIN_ULPI_DATA5_PO6,
+};
+
+static const unsigned ulpi_data6_po7_pins[] = {
+ TEGRA_PIN_ULPI_DATA6_PO7,
+};
+
+static const unsigned dap3_fs_pp0_pins[] = {
+ TEGRA_PIN_DAP3_FS_PP0,
+};
+
+static const unsigned dap3_din_pp1_pins[] = {
+ TEGRA_PIN_DAP3_DIN_PP1,
+};
+
+static const unsigned dap3_dout_pp2_pins[] = {
+ TEGRA_PIN_DAP3_DOUT_PP2,
+};
+
+static const unsigned dap3_sclk_pp3_pins[] = {
+ TEGRA_PIN_DAP3_SCLK_PP3,
+};
+
+static const unsigned dap4_fs_pp4_pins[] = {
+ TEGRA_PIN_DAP4_FS_PP4,
+};
+
+static const unsigned dap4_din_pp5_pins[] = {
+ TEGRA_PIN_DAP4_DIN_PP5,
+};
+
+static const unsigned dap4_dout_pp6_pins[] = {
+ TEGRA_PIN_DAP4_DOUT_PP6,
+};
+
+static const unsigned dap4_sclk_pp7_pins[] = {
+ TEGRA_PIN_DAP4_SCLK_PP7,
+};
+
+static const unsigned kb_col0_pq0_pins[] = {
+ TEGRA_PIN_KB_COL0_PQ0,
+};
+
+static const unsigned kb_col1_pq1_pins[] = {
+ TEGRA_PIN_KB_COL1_PQ1,
+};
+
+static const unsigned kb_col2_pq2_pins[] = {
+ TEGRA_PIN_KB_COL2_PQ2,
+};
+
+static const unsigned kb_col3_pq3_pins[] = {
+ TEGRA_PIN_KB_COL3_PQ3,
+};
+
+static const unsigned kb_col4_pq4_pins[] = {
+ TEGRA_PIN_KB_COL4_PQ4,
+};
+
+static const unsigned kb_col5_pq5_pins[] = {
+ TEGRA_PIN_KB_COL5_PQ5,
+};
+
+static const unsigned kb_col6_pq6_pins[] = {
+ TEGRA_PIN_KB_COL6_PQ6,
+};
+
+static const unsigned kb_col7_pq7_pins[] = {
+ TEGRA_PIN_KB_COL7_PQ7,
+};
+
+static const unsigned kb_row0_pr0_pins[] = {
+ TEGRA_PIN_KB_ROW0_PR0,
+};
+
+static const unsigned kb_row1_pr1_pins[] = {
+ TEGRA_PIN_KB_ROW1_PR1,
+};
+
+static const unsigned kb_row2_pr2_pins[] = {
+ TEGRA_PIN_KB_ROW2_PR2,
+};
+
+static const unsigned kb_row3_pr3_pins[] = {
+ TEGRA_PIN_KB_ROW3_PR3,
+};
+
+static const unsigned kb_row4_pr4_pins[] = {
+ TEGRA_PIN_KB_ROW4_PR4,
+};
+
+static const unsigned kb_row5_pr5_pins[] = {
+ TEGRA_PIN_KB_ROW5_PR5,
+};
+
+static const unsigned kb_row6_pr6_pins[] = {
+ TEGRA_PIN_KB_ROW6_PR6,
+};
+
+static const unsigned kb_row7_pr7_pins[] = {
+ TEGRA_PIN_KB_ROW7_PR7,
+};
+
+static const unsigned kb_row8_ps0_pins[] = {
+ TEGRA_PIN_KB_ROW8_PS0,
+};
+
+static const unsigned kb_row9_ps1_pins[] = {
+ TEGRA_PIN_KB_ROW9_PS1,
+};
+
+static const unsigned kb_row10_ps2_pins[] = {
+ TEGRA_PIN_KB_ROW10_PS2,
+};
+
+static const unsigned kb_row11_ps3_pins[] = {
+ TEGRA_PIN_KB_ROW11_PS3,
+};
+
+static const unsigned kb_row12_ps4_pins[] = {
+ TEGRA_PIN_KB_ROW12_PS4,
+};
+
+static const unsigned kb_row13_ps5_pins[] = {
+ TEGRA_PIN_KB_ROW13_PS5,
+};
+
+static const unsigned kb_row14_ps6_pins[] = {
+ TEGRA_PIN_KB_ROW14_PS6,
+};
+
+static const unsigned kb_row15_ps7_pins[] = {
+ TEGRA_PIN_KB_ROW15_PS7,
+};
+
+static const unsigned kb_row16_pt0_pins[] = {
+ TEGRA_PIN_KB_ROW16_PT0,
+};
+
+static const unsigned kb_row17_pt1_pins[] = {
+ TEGRA_PIN_KB_ROW17_PT1,
+};
+
+static const unsigned gen2_i2c_scl_pt5_pins[] = {
+ TEGRA_PIN_GEN2_I2C_SCL_PT5,
+};
+
+static const unsigned gen2_i2c_sda_pt6_pins[] = {
+ TEGRA_PIN_GEN2_I2C_SDA_PT6,
+};
+
+static const unsigned sdmmc4_cmd_pt7_pins[] = {
+ TEGRA_PIN_SDMMC4_CMD_PT7,
+};
+
+static const unsigned pu0_pins[] = {
+ TEGRA_PIN_PU0,
+};
+
+static const unsigned pu1_pins[] = {
+ TEGRA_PIN_PU1,
+};
+
+static const unsigned pu2_pins[] = {
+ TEGRA_PIN_PU2,
+};
+
+static const unsigned pu3_pins[] = {
+ TEGRA_PIN_PU3,
+};
+
+static const unsigned pu4_pins[] = {
+ TEGRA_PIN_PU4,
+};
+
+static const unsigned pu5_pins[] = {
+ TEGRA_PIN_PU5,
+};
+
+static const unsigned pu6_pins[] = {
+ TEGRA_PIN_PU6,
+};
+
+static const unsigned pv0_pins[] = {
+ TEGRA_PIN_PV0,
+};
+
+static const unsigned pv1_pins[] = {
+ TEGRA_PIN_PV1,
+};
+
+static const unsigned sdmmc3_cd_n_pv2_pins[] = {
+ TEGRA_PIN_SDMMC3_CD_N_PV2,
+};
+
+static const unsigned sdmmc1_wp_n_pv3_pins[] = {
+ TEGRA_PIN_SDMMC1_WP_N_PV3,
+};
+
+static const unsigned ddc_scl_pv4_pins[] = {
+ TEGRA_PIN_DDC_SCL_PV4,
+};
+
+static const unsigned ddc_sda_pv5_pins[] = {
+ TEGRA_PIN_DDC_SDA_PV5,
+};
+
+static const unsigned gpio_w2_aud_pw2_pins[] = {
+ TEGRA_PIN_GPIO_W2_AUD_PW2,
+};
+
+static const unsigned gpio_w3_aud_pw3_pins[] = {
+ TEGRA_PIN_GPIO_W3_AUD_PW3,
+};
+
+static const unsigned dap_mclk1_pw4_pins[] = {
+ TEGRA_PIN_DAP_MCLK1_PW4,
+};
+
+static const unsigned clk2_out_pw5_pins[] = {
+ TEGRA_PIN_CLK2_OUT_PW5,
+};
+
+static const unsigned uart3_txd_pw6_pins[] = {
+ TEGRA_PIN_UART3_TXD_PW6,
+};
+
+static const unsigned uart3_rxd_pw7_pins[] = {
+ TEGRA_PIN_UART3_RXD_PW7,
+};
+
+static const unsigned dvfs_pwm_px0_pins[] = {
+ TEGRA_PIN_DVFS_PWM_PX0,
+};
+
+static const unsigned gpio_x1_aud_px1_pins[] = {
+ TEGRA_PIN_GPIO_X1_AUD_PX1,
+};
+
+static const unsigned dvfs_clk_px2_pins[] = {
+ TEGRA_PIN_DVFS_CLK_PX2,
+};
+
+static const unsigned gpio_x3_aud_px3_pins[] = {
+ TEGRA_PIN_GPIO_X3_AUD_PX3,
+};
+
+static const unsigned gpio_x4_aud_px4_pins[] = {
+ TEGRA_PIN_GPIO_X4_AUD_PX4,
+};
+
+static const unsigned gpio_x5_aud_px5_pins[] = {
+ TEGRA_PIN_GPIO_X5_AUD_PX5,
+};
+
+static const unsigned gpio_x6_aud_px6_pins[] = {
+ TEGRA_PIN_GPIO_X6_AUD_PX6,
+};
+
+static const unsigned gpio_x7_aud_px7_pins[] = {
+ TEGRA_PIN_GPIO_X7_AUD_PX7,
+};
+
+static const unsigned ulpi_clk_py0_pins[] = {
+ TEGRA_PIN_ULPI_CLK_PY0,
+};
+
+static const unsigned ulpi_dir_py1_pins[] = {
+ TEGRA_PIN_ULPI_DIR_PY1,
+};
+
+static const unsigned ulpi_nxt_py2_pins[] = {
+ TEGRA_PIN_ULPI_NXT_PY2,
+};
+
+static const unsigned ulpi_stp_py3_pins[] = {
+ TEGRA_PIN_ULPI_STP_PY3,
+};
+
+static const unsigned sdmmc1_dat3_py4_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT3_PY4,
+};
+
+static const unsigned sdmmc1_dat2_py5_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT2_PY5,
+};
+
+static const unsigned sdmmc1_dat1_py6_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT1_PY6,
+};
+
+static const unsigned sdmmc1_dat0_py7_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT0_PY7,
+};
+
+static const unsigned sdmmc1_clk_pz0_pins[] = {
+ TEGRA_PIN_SDMMC1_CLK_PZ0,
+};
+
+static const unsigned sdmmc1_cmd_pz1_pins[] = {
+ TEGRA_PIN_SDMMC1_CMD_PZ1,
+};
+
+static const unsigned pwr_i2c_scl_pz6_pins[] = {
+ TEGRA_PIN_PWR_I2C_SCL_PZ6,
+};
+
+static const unsigned pwr_i2c_sda_pz7_pins[] = {
+ TEGRA_PIN_PWR_I2C_SDA_PZ7,
+};
+
+static const unsigned sdmmc4_dat0_paa0_pins[] = {
+ TEGRA_PIN_SDMMC4_DAT0_PAA0,
+};
+
+static const unsigned sdmmc4_dat1_paa1_pins[] = {
+ TEGRA_PIN_SDMMC4_DAT1_PAA1,
+};
+
+static const unsigned sdmmc4_dat2_paa2_pins[] = {
+ TEGRA_PIN_SDMMC4_DAT2_PAA2,
+};
+
+static const unsigned sdmmc4_dat3_paa3_pins[] = {
+ TEGRA_PIN_SDMMC4_DAT3_PAA3,
+};
+
+static const unsigned sdmmc4_dat4_paa4_pins[] = {
+ TEGRA_PIN_SDMMC4_DAT4_PAA4,
+};
+
+static const unsigned sdmmc4_dat5_paa5_pins[] = {
+ TEGRA_PIN_SDMMC4_DAT5_PAA5,
+};
+
+static const unsigned sdmmc4_dat6_paa6_pins[] = {
+ TEGRA_PIN_SDMMC4_DAT6_PAA6,
+};
+
+static const unsigned sdmmc4_dat7_paa7_pins[] = {
+ TEGRA_PIN_SDMMC4_DAT7_PAA7,
+};
+
+static const unsigned pbb0_pins[] = {
+ TEGRA_PIN_PBB0,
+};
+
+static const unsigned cam_i2c_scl_pbb1_pins[] = {
+ TEGRA_PIN_CAM_I2C_SCL_PBB1,
+};
+
+static const unsigned cam_i2c_sda_pbb2_pins[] = {
+ TEGRA_PIN_CAM_I2C_SDA_PBB2,
+};
+
+static const unsigned pbb3_pins[] = {
+ TEGRA_PIN_PBB3,
+};
+
+static const unsigned pbb4_pins[] = {
+ TEGRA_PIN_PBB4,
+};
+
+static const unsigned pbb5_pins[] = {
+ TEGRA_PIN_PBB5,
+};
+
+static const unsigned pbb6_pins[] = {
+ TEGRA_PIN_PBB6,
+};
+
+static const unsigned pbb7_pins[] = {
+ TEGRA_PIN_PBB7,
+};
+
+static const unsigned cam_mclk_pcc0_pins[] = {
+ TEGRA_PIN_CAM_MCLK_PCC0,
+};
+
+static const unsigned pcc1_pins[] = {
+ TEGRA_PIN_PCC1,
+};
+
+static const unsigned pcc2_pins[] = {
+ TEGRA_PIN_PCC2,
+};
+
+static const unsigned sdmmc4_clk_pcc4_pins[] = {
+ TEGRA_PIN_SDMMC4_CLK_PCC4,
+};
+
+static const unsigned clk2_req_pcc5_pins[] = {
+ TEGRA_PIN_CLK2_REQ_PCC5,
+};
+
+static const unsigned pex_l0_rst_n_pdd1_pins[] = {
+ TEGRA_PIN_PEX_L0_RST_N_PDD1,
+};
+
+static const unsigned pex_l0_clkreq_n_pdd2_pins[] = {
+ TEGRA_PIN_PEX_L0_CLKREQ_N_PDD2,
+};
+
+static const unsigned pex_wake_n_pdd3_pins[] = {
+ TEGRA_PIN_PEX_WAKE_N_PDD3,
+};
+
+static const unsigned pex_l1_rst_n_pdd5_pins[] = {
+ TEGRA_PIN_PEX_L1_RST_N_PDD5,
+};
+
+static const unsigned pex_l1_clkreq_n_pdd6_pins[] = {
+ TEGRA_PIN_PEX_L1_CLKREQ_N_PDD6,
+};
+
+static const unsigned clk3_out_pee0_pins[] = {
+ TEGRA_PIN_CLK3_OUT_PEE0,
+};
+
+static const unsigned clk3_req_pee1_pins[] = {
+ TEGRA_PIN_CLK3_REQ_PEE1,
+};
+
+static const unsigned dap_mclk1_req_pee2_pins[] = {
+ TEGRA_PIN_DAP_MCLK1_REQ_PEE2,
+};
+
+static const unsigned hdmi_cec_pee3_pins[] = {
+ TEGRA_PIN_HDMI_CEC_PEE3,
+};
+
+static const unsigned sdmmc3_clk_lb_out_pee4_pins[] = {
+ TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4,
+};
+
+static const unsigned sdmmc3_clk_lb_in_pee5_pins[] = {
+ TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5,
+};
+static const unsigned dp_hpd_pff0_pins[] = {
+ TEGRA_PIN_DP_HPD_PFF0,
+};
+
+static const unsigned usb_vbus_en2_pff1_pins[] = {
+ TEGRA_PIN_USB_VBUS_EN2_PFF1,
+};
+
+static const unsigned pff2_pins[] = {
+ TEGRA_PIN_PFF2,
+};
+
+static const unsigned core_pwr_req_pins[] = {
+ TEGRA_PIN_CORE_PWR_REQ,
+};
+
+static const unsigned cpu_pwr_req_pins[] = {
+ TEGRA_PIN_CPU_PWR_REQ,
+};
+
+static const unsigned owr_pins[] = {
+ TEGRA_PIN_OWR,
+};
+
+static const unsigned pwr_int_n_pins[] = {
+ TEGRA_PIN_PWR_INT_N,
+};
+
+static const unsigned reset_out_n_pins[] = {
+ TEGRA_PIN_RESET_OUT_N,
+};
+
+static const unsigned clk_32k_in_pins[] = {
+ TEGRA_PIN_CLK_32K_IN,
+};
+
+static const unsigned gmi_clk_lb_pins[] = {
+ TEGRA_PIN_GMI_CLK_LB,
+};
+
+static const unsigned jtag_rtck_pins[] = {
+ TEGRA_PIN_JTAG_RTCK,
+};
+
+static const unsigned drive_ao1_pins[] = {
+ TEGRA_PIN_KB_ROW0_PR0,
+ TEGRA_PIN_KB_ROW1_PR1,
+ TEGRA_PIN_KB_ROW2_PR2,
+ TEGRA_PIN_KB_ROW3_PR3,
+ TEGRA_PIN_KB_ROW4_PR4,
+ TEGRA_PIN_KB_ROW5_PR5,
+ TEGRA_PIN_KB_ROW6_PR6,
+ TEGRA_PIN_KB_ROW7_PR7,
+ TEGRA_PIN_PWR_I2C_SCL_PZ6,
+ TEGRA_PIN_PWR_I2C_SDA_PZ7,
+};
+
+static const unsigned drive_ao2_pins[] = {
+ TEGRA_PIN_CLK_32K_OUT_PA0,
+ TEGRA_PIN_CLK_32K_IN,
+ TEGRA_PIN_KB_COL0_PQ0,
+ TEGRA_PIN_KB_COL1_PQ1,
+ TEGRA_PIN_KB_COL2_PQ2,
+ TEGRA_PIN_KB_COL3_PQ3,
+ TEGRA_PIN_KB_COL4_PQ4,
+ TEGRA_PIN_KB_COL5_PQ5,
+ TEGRA_PIN_KB_COL6_PQ6,
+ TEGRA_PIN_KB_COL7_PQ7,
+ TEGRA_PIN_KB_ROW8_PS0,
+ TEGRA_PIN_KB_ROW9_PS1,
+ TEGRA_PIN_KB_ROW10_PS2,
+ TEGRA_PIN_KB_ROW11_PS3,
+ TEGRA_PIN_KB_ROW12_PS4,
+ TEGRA_PIN_KB_ROW13_PS5,
+ TEGRA_PIN_KB_ROW14_PS6,
+ TEGRA_PIN_KB_ROW15_PS7,
+ TEGRA_PIN_KB_ROW16_PT0,
+ TEGRA_PIN_KB_ROW17_PT1,
+ TEGRA_PIN_SDMMC3_CD_N_PV2,
+ TEGRA_PIN_CORE_PWR_REQ,
+ TEGRA_PIN_CPU_PWR_REQ,
+ TEGRA_PIN_PWR_INT_N,
+};
+
+static const unsigned drive_at1_pins[] = {
+ TEGRA_PIN_PH0,
+ TEGRA_PIN_PH1,
+ TEGRA_PIN_PH2,
+ TEGRA_PIN_PH3,
+};
+
+static const unsigned drive_at2_pins[] = {
+ TEGRA_PIN_PG0,
+ TEGRA_PIN_PG1,
+ TEGRA_PIN_PG2,
+ TEGRA_PIN_PG3,
+ TEGRA_PIN_PG4,
+ TEGRA_PIN_PG5,
+ TEGRA_PIN_PG6,
+ TEGRA_PIN_PG7,
+ TEGRA_PIN_PI0,
+ TEGRA_PIN_PI1,
+ TEGRA_PIN_PI3,
+ TEGRA_PIN_PI4,
+ TEGRA_PIN_PI7,
+ TEGRA_PIN_PK0,
+ TEGRA_PIN_PK2,
+};
+
+static const unsigned drive_at3_pins[] = {
+ TEGRA_PIN_PC7,
+ TEGRA_PIN_PJ0,
+};
+
+static const unsigned drive_at4_pins[] = {
+ TEGRA_PIN_PB0,
+ TEGRA_PIN_PB1,
+ TEGRA_PIN_PJ0,
+ TEGRA_PIN_PJ7,
+ TEGRA_PIN_PK7,
+};
+
+static const unsigned drive_at5_pins[] = {
+ TEGRA_PIN_GEN2_I2C_SCL_PT5,
+ TEGRA_PIN_GEN2_I2C_SDA_PT6,
+};
+
+static const unsigned drive_cdev1_pins[] = {
+ TEGRA_PIN_DAP_MCLK1_PW4,
+ TEGRA_PIN_DAP_MCLK1_REQ_PEE2,
+};
+
+static const unsigned drive_cdev2_pins[] = {
+ TEGRA_PIN_CLK2_OUT_PW5,
+ TEGRA_PIN_CLK2_REQ_PCC5,
+};
+
+static const unsigned drive_dap1_pins[] = {
+ TEGRA_PIN_DAP1_FS_PN0,
+ TEGRA_PIN_DAP1_DIN_PN1,
+ TEGRA_PIN_DAP1_DOUT_PN2,
+ TEGRA_PIN_DAP1_SCLK_PN3,
+};
+
+static const unsigned drive_dap2_pins[] = {
+ TEGRA_PIN_DAP2_FS_PA2,
+ TEGRA_PIN_DAP2_SCLK_PA3,
+ TEGRA_PIN_DAP2_DIN_PA4,
+ TEGRA_PIN_DAP2_DOUT_PA5,
+};
+
+static const unsigned drive_dap3_pins[] = {
+ TEGRA_PIN_DAP3_FS_PP0,
+ TEGRA_PIN_DAP3_DIN_PP1,
+ TEGRA_PIN_DAP3_DOUT_PP2,
+ TEGRA_PIN_DAP3_SCLK_PP3,
+};
+
+static const unsigned drive_dap4_pins[] = {
+ TEGRA_PIN_DAP4_FS_PP4,
+ TEGRA_PIN_DAP4_DIN_PP5,
+ TEGRA_PIN_DAP4_DOUT_PP6,
+ TEGRA_PIN_DAP4_SCLK_PP7,
+};
+
+static const unsigned drive_dbg_pins[] = {
+ TEGRA_PIN_GEN1_I2C_SCL_PC4,
+ TEGRA_PIN_GEN1_I2C_SDA_PC5,
+ TEGRA_PIN_PU0,
+ TEGRA_PIN_PU1,
+ TEGRA_PIN_PU2,
+ TEGRA_PIN_PU3,
+ TEGRA_PIN_PU4,
+ TEGRA_PIN_PU5,
+ TEGRA_PIN_PU6,
+};
+
+static const unsigned drive_sdio3_pins[] = {
+ TEGRA_PIN_SDMMC3_CLK_PA6,
+ TEGRA_PIN_SDMMC3_CMD_PA7,
+ TEGRA_PIN_SDMMC3_DAT3_PB4,
+ TEGRA_PIN_SDMMC3_DAT2_PB5,
+ TEGRA_PIN_SDMMC3_DAT1_PB6,
+ TEGRA_PIN_SDMMC3_DAT0_PB7,
+ TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4,
+ TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5,
+};
+
+static const unsigned drive_spi_pins[] = {
+ TEGRA_PIN_DVFS_PWM_PX0,
+ TEGRA_PIN_GPIO_X1_AUD_PX1,
+ TEGRA_PIN_DVFS_CLK_PX2,
+ TEGRA_PIN_GPIO_X3_AUD_PX3,
+ TEGRA_PIN_GPIO_X4_AUD_PX4,
+ TEGRA_PIN_GPIO_X5_AUD_PX5,
+ TEGRA_PIN_GPIO_X6_AUD_PX6,
+ TEGRA_PIN_GPIO_X7_AUD_PX7,
+ TEGRA_PIN_GPIO_W2_AUD_PW2,
+ TEGRA_PIN_GPIO_W3_AUD_PW3,
+};
+
+static const unsigned drive_uaa_pins[] = {
+ TEGRA_PIN_ULPI_DATA0_PO1,
+ TEGRA_PIN_ULPI_DATA1_PO2,
+ TEGRA_PIN_ULPI_DATA2_PO3,
+ TEGRA_PIN_ULPI_DATA3_PO4,
+};
+
+static const unsigned drive_uab_pins[] = {
+ TEGRA_PIN_ULPI_DATA7_PO0,
+ TEGRA_PIN_ULPI_DATA4_PO5,
+ TEGRA_PIN_ULPI_DATA5_PO6,
+ TEGRA_PIN_ULPI_DATA6_PO7,
+ TEGRA_PIN_PV0,
+ TEGRA_PIN_PV1,
+};
+
+static const unsigned drive_uart2_pins[] = {
+ TEGRA_PIN_UART2_TXD_PC2,
+ TEGRA_PIN_UART2_RXD_PC3,
+ TEGRA_PIN_UART2_CTS_N_PJ5,
+ TEGRA_PIN_UART2_RTS_N_PJ6,
+};
+
+static const unsigned drive_uart3_pins[] = {
+ TEGRA_PIN_UART3_CTS_N_PA1,
+ TEGRA_PIN_UART3_RTS_N_PC0,
+ TEGRA_PIN_UART3_TXD_PW6,
+ TEGRA_PIN_UART3_RXD_PW7,
+};
+
+static const unsigned drive_sdio1_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT3_PY4,
+ TEGRA_PIN_SDMMC1_DAT2_PY5,
+ TEGRA_PIN_SDMMC1_DAT1_PY6,
+ TEGRA_PIN_SDMMC1_DAT0_PY7,
+ TEGRA_PIN_SDMMC1_CLK_PZ0,
+ TEGRA_PIN_SDMMC1_CMD_PZ1,
+};
+
+static const unsigned drive_ddc_pins[] = {
+ TEGRA_PIN_DDC_SCL_PV4,
+ TEGRA_PIN_DDC_SDA_PV5,
+};
+
+static const unsigned drive_gma_pins[] = {
+ TEGRA_PIN_SDMMC4_CLK_PCC4,
+ TEGRA_PIN_SDMMC4_CMD_PT7,
+ TEGRA_PIN_SDMMC4_DAT0_PAA0,
+ TEGRA_PIN_SDMMC4_DAT1_PAA1,
+ TEGRA_PIN_SDMMC4_DAT2_PAA2,
+ TEGRA_PIN_SDMMC4_DAT3_PAA3,
+ TEGRA_PIN_SDMMC4_DAT4_PAA4,
+ TEGRA_PIN_SDMMC4_DAT5_PAA5,
+ TEGRA_PIN_SDMMC4_DAT6_PAA6,
+ TEGRA_PIN_SDMMC4_DAT7_PAA7,
+};
+
+static const unsigned drive_gme_pins[] = {
+ TEGRA_PIN_PBB0,
+ TEGRA_PIN_CAM_I2C_SCL_PBB1,
+ TEGRA_PIN_CAM_I2C_SDA_PBB2,
+ TEGRA_PIN_PBB3,
+ TEGRA_PIN_PCC2,
+};
+
+static const unsigned drive_gmf_pins[] = {
+ TEGRA_PIN_PBB4,
+ TEGRA_PIN_PBB5,
+ TEGRA_PIN_PBB6,
+ TEGRA_PIN_PBB7,
+};
+
+static const unsigned drive_gmg_pins[] = {
+ TEGRA_PIN_CAM_MCLK_PCC0,
+};
+
+static const unsigned drive_gmh_pins[] = {
+ TEGRA_PIN_PCC1,
+};
+
+static const unsigned drive_owr_pins[] = {
+ TEGRA_PIN_SDMMC3_CD_N_PV2,
+ TEGRA_PIN_OWR,
+};
+
+static const unsigned drive_uda_pins[] = {
+ TEGRA_PIN_ULPI_CLK_PY0,
+ TEGRA_PIN_ULPI_DIR_PY1,
+ TEGRA_PIN_ULPI_NXT_PY2,
+ TEGRA_PIN_ULPI_STP_PY3,
+};
+
+static const unsigned drive_gpv_pins[] = {
+ TEGRA_PIN_PEX_L0_RST_N_PDD1,
+ TEGRA_PIN_PEX_L0_CLKREQ_N_PDD2,
+ TEGRA_PIN_PEX_WAKE_N_PDD3,
+ TEGRA_PIN_PEX_L1_RST_N_PDD5,
+ TEGRA_PIN_PEX_L1_CLKREQ_N_PDD6,
+ TEGRA_PIN_USB_VBUS_EN2_PFF1,
+ TEGRA_PIN_PFF2,
+};
+
+static const unsigned drive_cec_pins[] = {
+ TEGRA_PIN_HDMI_CEC_PEE3,
+};
+
+static const unsigned drive_dev3_pins[] = {
+ TEGRA_PIN_CLK3_OUT_PEE0,
+ TEGRA_PIN_CLK3_REQ_PEE1,
+};
+
+static const unsigned drive_at6_pins[] = {
+ TEGRA_PIN_PK1,
+ TEGRA_PIN_PK3,
+ TEGRA_PIN_PK4,
+ TEGRA_PIN_PI2,
+ TEGRA_PIN_PI5,
+ TEGRA_PIN_PI6,
+ TEGRA_PIN_PH4,
+ TEGRA_PIN_PH5,
+ TEGRA_PIN_PH6,
+ TEGRA_PIN_PH7,
+};
+
+static const unsigned drive_dap5_pins[] = {
+ TEGRA_PIN_SPDIF_IN_PK6,
+ TEGRA_PIN_SPDIF_OUT_PK5,
+ TEGRA_PIN_DP_HPD_PFF0,
+};
+
+static const unsigned drive_usb_vbus_en_pins[] = {
+ TEGRA_PIN_USB_VBUS_EN0_PN4,
+ TEGRA_PIN_USB_VBUS_EN1_PN5,
+};
+
+static const unsigned drive_ao3_pins[] = {
+ TEGRA_PIN_RESET_OUT_N,
+};
+
+static const unsigned drive_ao0_pins[] = {
+ TEGRA_PIN_JTAG_RTCK,
+};
+
+static const unsigned drive_hv0_pins[] = {
+ TEGRA_PIN_HDMI_INT_PN7,
+};
+
+static const unsigned drive_sdio4_pins[] = {
+ TEGRA_PIN_SDMMC1_WP_N_PV3,
+};
+
+static const unsigned drive_ao4_pins[] = {
+ TEGRA_PIN_JTAG_RTCK,
+};
+
+enum tegra_mux {
+ TEGRA_MUX_BLINK,
+ TEGRA_MUX_CEC,
+ TEGRA_MUX_CLDVFS,
+ TEGRA_MUX_CLK12,
+ TEGRA_MUX_CPU,
+ TEGRA_MUX_DAP,
+ TEGRA_MUX_DAP1,
+ TEGRA_MUX_DAP2,
+ TEGRA_MUX_DEV3,
+ TEGRA_MUX_DISPLAYA,
+ TEGRA_MUX_DISPLAYA_ALT,
+ TEGRA_MUX_DISPLAYB,
+ TEGRA_MUX_DTV,
+ TEGRA_MUX_EXTPERIPH1,
+ TEGRA_MUX_EXTPERIPH2,
+ TEGRA_MUX_EXTPERIPH3,
+ TEGRA_MUX_GMI,
+ TEGRA_MUX_GMI_ALT,
+ TEGRA_MUX_HDA,
+ TEGRA_MUX_HSI,
+ TEGRA_MUX_I2C1,
+ TEGRA_MUX_I2C2,
+ TEGRA_MUX_I2C3,
+ TEGRA_MUX_I2C4,
+ TEGRA_MUX_I2CPWR,
+ TEGRA_MUX_I2S0,
+ TEGRA_MUX_I2S1,
+ TEGRA_MUX_I2S2,
+ TEGRA_MUX_I2S3,
+ TEGRA_MUX_I2S4,
+ TEGRA_MUX_IRDA,
+ TEGRA_MUX_KBC,
+ TEGRA_MUX_OWR,
+ TEGRA_MUX_PMI,
+ TEGRA_MUX_PWM0,
+ TEGRA_MUX_PWM1,
+ TEGRA_MUX_PWM2,
+ TEGRA_MUX_PWM3,
+ TEGRA_MUX_PWRON,
+ TEGRA_MUX_RESET_OUT_N,
+ TEGRA_MUX_RSVD1,
+ TEGRA_MUX_RSVD2,
+ TEGRA_MUX_RSVD3,
+ TEGRA_MUX_RSVD4,
+ TEGRA_MUX_SDMMC1,
+ TEGRA_MUX_SDMMC2,
+ TEGRA_MUX_SDMMC3,
+ TEGRA_MUX_SDMMC4,
+ TEGRA_MUX_SOC,
+ TEGRA_MUX_SPDIF,
+ TEGRA_MUX_SPI1,
+ TEGRA_MUX_SPI2,
+ TEGRA_MUX_SPI3,
+ TEGRA_MUX_SPI4,
+ TEGRA_MUX_SPI5,
+ TEGRA_MUX_SPI6,
+ TEGRA_MUX_TRACE,
+ TEGRA_MUX_UARTA,
+ TEGRA_MUX_UARTB,
+ TEGRA_MUX_UARTC,
+ TEGRA_MUX_UARTD,
+ TEGRA_MUX_ULPI,
+ TEGRA_MUX_USB,
+ TEGRA_MUX_VGP1,
+ TEGRA_MUX_VGP2,
+ TEGRA_MUX_VGP3,
+ TEGRA_MUX_VGP4,
+ TEGRA_MUX_VGP5,
+ TEGRA_MUX_VGP6,
+ TEGRA_MUX_VI,
+ TEGRA_MUX_VI_ALT1,
+ TEGRA_MUX_VI_ALT3,
+ TEGRA_MUX_VIMCLK2,
+ TEGRA_MUX_VIMCLK2_ALT,
+ TEGRA_MUX_SATA,
+ TEGRA_MUX_CCLA,
+ TEGRA_MUX_PE0,
+ TEGRA_MUX_PE,
+ TEGRA_MUX_PE1,
+ TEGRA_MUX_DP,
+ TEGRA_MUX_RTCK,
+ TEGRA_MUX_SYS,
+ TEGRA_MUX_CLK,
+ TEGRA_MUX_TMDS,
+};
+
+static const char * const blink_groups[] = {
+ "clk_32k_out_pa0",
+};
+
+static const char * const cec_groups[] = {
+ "hdmi_cec_pee3",
+};
+
+static const char * const cldvfs_groups[] = {
+ "ph2",
+ "ph3",
+ "kb_row7_pr7",
+ "kb_row8_ps0",
+ "dvfs_pwm_px0",
+ "dvfs_clk_px2",
+};
+
+static const char * const clk12_groups[] = {
+ "sdmmc1_wp_n_pv3",
+ "sdmmc1_clk_pz0",
+};
+
+static const char * const cpu_groups[] = {
+ "cpu_pwr_req",
+};
+
+static const char * const dap_groups[] = {
+ "dap_mclk1_pee2",
+ "clk2_req_pcc5",
+};
+
+static const char * const dap1_groups[] = {
+ "dap_mclk1_pee2",
+};
+
+static const char * const dap2_groups[] = {
+ "dap_mclk1_pw4",
+ "gpio_x4_aud_px4",
+};
+
+static const char * const dev3_groups[] = {
+ "clk3_req_pee1",
+};
+
+static const char * const displaya_groups[] = {
+ "dap3_fs_pp0",
+ "dap3_din_pp1",
+ "dap3_dout_pp2",
+ "ph1",
+ "pi4",
+ "pbb3",
+ "pbb4",
+ "pbb5",
+ "kb_row3_pr3",
+ "kb_row4_pr4",
+ "kb_row5_pr5",
+ "kb_row6_pr6",
+ "kb_col3_pq3",
+ "sdmmc3_dat2_pb5",
+};
+
+static const char * const displaya_alt_groups[] = {
+ "kb_row6_pr6",
+};
+
+static const char * const displayb_groups[] = {
+ "dap3_fs_pp0",
+ "dap3_din_pp1",
+ "dap3_sclk_pp3",
+
+ "pu3",
+ "pu4",
+ "pu5",
+
+ "pbb3",
+ "pbb4",
+ "pbb6",
+
+ "kb_row3_pr3",
+ "kb_row4_pr4",
+ "kb_row5_pr5",
+ "kb_row6_pr6",
+
+ "sdmmc3_dat3_pb4",
+};
+
+static const char * const dtv_groups[] = {
+ "uart3_cts_n_pa1",
+ "uart3_rts_n_pc0",
+ "dap4_fs_pp4",
+ "dap4_dout_pp6",
+ "pi7",
+ "ph0",
+ "ph6",
+ "ph7",
+};
+
+static const char * const extperiph1_groups[] = {
+ "dap_mclk1_pw4",
+};
+
+static const char * const extperiph2_groups[] = {
+ "clk2_out_pw5",
+};
+
+static const char * const extperiph3_groups[] = {
+ "clk3_out_pee0",
+};
+
+static const char * const gmi_groups[] = {
+ "uart2_cts_n_pj5",
+ "uart2_rts_n_pj6",
+ "uart3_txd_pw6",
+ "uart3_rxd_pw7",
+ "uart3_cts_n_pa1",
+ "uart3_rts_n_pc0",
+
+ "pu0",
+ "pu1",
+ "pu2",
+ "pu3",
+ "pu4",
+ "pu5",
+ "pu6",
+
+ "dap4_fs_pp4",
+ "dap4_din_pp5",
+ "dap4_dout_pp6",
+ "dap4_sclk_pp7",
+
+ "pc7",
+
+ "pg0",
+ "pg1",
+ "pg2",
+ "pg3",
+ "pg4",
+ "pg5",
+ "pg6",
+ "pg7",
+
+ "ph0",
+ "ph1",
+ "ph2",
+ "ph3",
+ "ph4",
+ "ph5",
+ "ph6",
+ "ph7",
+
+ "pi0",
+ "pi1",
+ "pi2",
+ "pi3",
+ "pi4",
+ "pi5",
+ "pi6",
+ "pi7",
+
+ "pj0",
+ "pj2",
+
+ "pk0",
+ "pk1",
+ "pk2",
+ "pk3",
+ "pk4",
+
+ "pj7",
+ "pb0",
+ "pb1",
+ "pk7",
+
+ "gen2_i2c_scl_pt5",
+ "gen2_i2c_sda_pt6",
+
+ "sdmmc4_dat0_paa0",
+ "sdmmc4_dat1_paa1",
+ "sdmmc4_dat2_paa2",
+ "sdmmc4_dat3_paa3",
+ "sdmmc4_dat4_paa4",
+ "sdmmc4_dat6_paa6",
+ "sdmmc4_dat7_paa7",
+ "sdmmc4_clk_pcc4",
+ "sdmmc4_cmd_pt7",
+ "gmi_clk_lb",
+
+ "dap1_fs_pn0",
+ "dap1_din_pn1",
+ "dap1_dout_pn2",
+ "dap1_sclk_pn3",
+
+ "dap2_fs_pa2",
+ "dap2_din_pa4",
+ "dap2_dout_pa5",
+ "dap2_sclk_pa3",
+
+ "dvfs_pwm_px0",
+ "dvfs_clk_px2",
+ "gpio_x1_aud_px1",
+ "gpio_x3_aud_px3",
+ "gpio_x4_aud_px4",
+ "gpio_x5_aud_px5",
+ "gpio_x6_aud_px6",
+};
+
+static const char * const gmi_alt_groups[] = {
+ "pc7",
+ "pk4",
+ "pj7",
+};
+
+static const char * const hda_groups[] = {
+ "dap1_fs_pn0",
+ "dap1_din_pn1",
+ "dap1_dout_pn2",
+ "dap1_sclk_pn3",
+ "dap2_fs_pa2",
+ "dap2_sclk_pa3",
+ "dap2_din_pa4",
+ "dap2_dout_pa5",
+};
+
+static const char * const hsi_groups[] = {
+ "ulpi_data0_po1",
+ "ulpi_data1_po2",
+ "ulpi_data2_po3",
+ "ulpi_data3_po4",
+ "ulpi_data4_po5",
+ "ulpi_data5_po6",
+ "ulpi_data6_po7",
+ "ulpi_data7_po0",
+};
+
+static const char * const i2c1_groups[] = {
+ "gen1_i2c_scl_pc4",
+ "gen1_i2c_sda_pc5",
+ "gpio_w2_aud_pw2",
+ "gpio_w3_aud_pw3",
+};
+
+static const char * const i2c2_groups[] = {
+ "gen2_i2c_scl_pt5",
+ "gen2_i2c_sda_pt6",
+};
+
+static const char * const i2c3_groups[] = {
+ "spdif_in_pk6",
+ "spdif_out_pk5",
+ "cam_i2c_scl_pbb1",
+ "cam_i2c_sda_pbb2",
+};
+
+static const char * const i2c4_groups[] = {
+ "ddc_scl_pv4",
+ "ddc_sda_pv5",
+};
+
+static const char * const i2cpwr_groups[] = {
+ "pwr_i2c_scl_pz6",
+ "pwr_i2c_sda_pz7",
+};
+
+static const char * const i2s0_groups[] = {
+ "dap1_fs_pn0",
+ "dap1_din_pn1",
+ "dap1_dout_pn2",
+ "dap1_sclk_pn3",
+};
+
+static const char * const i2s1_groups[] = {
+ "dap2_fs_pa2",
+ "dap2_sclk_pa3",
+ "dap2_din_pa4",
+ "dap2_dout_pa5",
+};
+
+static const char * const i2s2_groups[] = {
+ "dap3_fs_pp0",
+ "dap3_din_pp1",
+ "dap3_dout_pp2",
+ "dap3_sclk_pp3",
+};
+
+static const char * const i2s3_groups[] = {
+ "dap4_fs_pp4",
+ "dap4_din_pp5",
+ "dap4_dout_pp6",
+ "dap4_sclk_pp7",
+};
+
+static const char * const i2s4_groups[] = {
+ "pcc1",
+ "pbb6",
+ "pbb7",
+ "pcc2",
+};
+
+static const char * const irda_groups[] = {
+ "uart2_rxd_pc3",
+ "uart2_txd_pc2",
+ "kb_row11_ps3",
+ "kb_row12_ps4",
+};
+
+static const char * const kbc_groups[] = {
+ "kb_row0_pr0",
+ "kb_row1_pr1",
+ "kb_row2_pr2",
+ "kb_row3_pr3",
+ "kb_row4_pr4",
+ "kb_row5_pr5",
+ "kb_row6_pr6",
+ "kb_row7_pr7",
+ "kb_row8_ps0",
+ "kb_row9_ps1",
+ "kb_row10_ps2",
+ "kb_row11_ps3",
+ "kb_row12_ps4",
+ "kb_row13_ps5",
+ "kb_row14_ps6",
+ "kb_row15_ps7",
+ "kb_row16_pt0",
+ "kb_row17_pt1",
+
+ "kb_col0_pq0",
+ "kb_col1_pq1",
+ "kb_col2_pq2",
+ "kb_col3_pq3",
+ "kb_col4_pq4",
+ "kb_col5_pq5",
+ "kb_col6_pq6",
+ "kb_col7_pq7",
+};
+
+static const char * const owr_groups[] = {
+ "pu0",
+ "kb_col4_pq4",
+ "owr",
+ "sdmmc3_cd_n_pv2",
+};
+
+static const char * const pmi_groups[] = {
+ "pwr_int_n",
+};
+
+static const char * const pwm0_groups[] = {
+ "sdmmc1_dat2_py5",
+ "uart3_rts_n_pc0",
+ "pu3",
+ "ph0",
+ "sdmmc3_dat3_pb4",
+};
+
+static const char * const pwm1_groups[] = {
+ "sdmmc1_dat1_py6",
+ "pu4",
+ "ph1",
+ "sdmmc3_dat2_pb5",
+};
+
+static const char * const pwm2_groups[] = {
+ "pu5",
+ "ph2",
+ "kb_col3_pq3",
+ "sdmmc3_dat1_pb6",
+};
+
+static const char * const pwm3_groups[] = {
+ "pu6",
+ "ph3",
+ "sdmmc3_cmd_pa7",
+};
+
+static const char * const pwron_groups[] = {
+ "core_pwr_req",
+};
+
+static const char * const reset_out_n_groups[] = {
+ "reset_out_n",
+};
+
+static const char * const rsvd1_groups[] = {
+ "pv0",
+ "pv1",
+
+ "hdmi_int_pn7",
+ "pu1",
+ "pu2",
+ "pc7",
+ "pi7",
+ "pk0",
+ "pj0",
+ "pj2",
+ "pk2",
+ "pi3",
+ "pi6",
+
+ "pg0",
+ "pg1",
+ "pg2",
+ "pg3",
+ "pg4",
+ "pg5",
+ "pg6",
+ "pg7",
+
+ "pi0",
+ "pi1",
+
+ "gpio_x7_aud_px7",
+
+ "reset_out_n",
+};
+
+static const char * const rsvd2_groups[] = {
+ "pv0",
+ "pv1",
+
+ "sdmmc1_dat0_py7",
+ "clk2_out_pw5",
+ "clk2_req_pcc5",
+ "hdmi_int_pn7",
+ "ddc_scl_pv4",
+ "ddc_sda_pv5",
+
+ "uart3_txd_pw6",
+ "uart3_rxd_pw7",
+
+ "gen1_i2c_scl_pc4",
+ "gen1_i2c_sda_pc5",
+
+ "clk2_out_pee0",
+ "clk2_req_pee1",
+ "pc7",
+ "pi5",
+ "pj0",
+ "pj2",
+
+ "pk4",
+ "pk2",
+ "pi3",
+ "pi6",
+ "pg0",
+ "pg1",
+ "pg5",
+ "pg6",
+ "pg7",
+
+ "ph4",
+ "ph5",
+ "pj7",
+ "pb0",
+ "pb1",
+ "pk7",
+ "pi0",
+ "pi1",
+
+ "gen2_i2c_scl_pt5",
+ "gen2_i2c_sda_pt6",
+ "sdmmc4_clk_pcc4",
+ "sdmmc4_cmd_pt7",
+ "sdmmc4_dat7_paa7",
+ "pcc1",
+ "pbb6",
+ "pbb7",
+ "pcc2",
+ "jtag_rtck",
+
+ "pwr_i2c_scl_pz6",
+ "pwr_i2c_sda_pz7",
+
+ "kb_row0_pr0",
+ "kb_row1_pr1",
+ "kb_row2_pr2",
+ "kb_row7_pr7",
+ "kb_row8_ps0",
+ "kb_row9_ps1",
+ "kb_row10_ps2",
+ "kb_row11_ps3",
+ "kb_row12_ps4",
+ "kb_row13_ps5",
+ "kb_row14_ps6",
+
+ "kb_col0_pq0",
+ "kb_col1_pq1",
+ "kb_col2_pq2",
+ "kb_col5_pq5",
+ "kb_col6_pq6",
+ "kb_col7_pq7",
+
+ "core_pwr_req",
+ "cpu_pwr_req",
+ "pwr_int_n",
+ "clk_32k_in",
+ "owr",
+
+ "spdif_in_pk6",
+ "spdif_out_pk5",
+ "gpio_x1_aud_px1",
+
+ "sdmmc3_clk_pa6",
+ "sdmmc3_dat0_pb7",
+
+ "pex_l0_rst_n_pdd1",
+ "pex_l0_clkreq_n_pdd2",
+ "pex_wake_n_pdd3",
+ "pex_l1_rst_n_pdd5",
+ "pex_l1_clkreq_n_pdd6",
+ "hdmi_cec_pee3",
+
+ "gpio_w2_aud_pw2",
+ "usb_vbus_en0_pn4",
+ "usb_vbus_en1_pn5",
+ "sdmmc3_clk_lb_out_pee4",
+ "sdmmc3_clk_lb_in_pee5",
+ "gmi_clk_lb",
+ "reset_out_n",
+ "kb_row16_pt0",
+ "kb_row17_pt1",
+ "dp_hpd_pff0",
+ "usb_vbus_en2_pff1",
+ "pff2",
+};
+
+static const char * const rsvd3_groups[] = {
+ "dap3_sclk_pp3",
+ "pv0",
+ "pv1",
+ "sdmmc1_clk_pz0",
+ "clk2_out_pw5",
+ "clk2_req_pcc5",
+ "hdmi_int_pn7",
+
+ "ddc_scl_pv4",
+ "ddc_sda_pv5",
+
+ "pu6",
+
+ "gen1_i2c_scl_pc4",
+ "gen1_i2c_sda_pc5",
+
+ "dap4_din_pp5",
+ "dap4_sclk_pp7",
+
+ "clk3_out_pee0",
+ "clk3_req_pee1",
+
+ "sdmmc4_dat5_paa5",
+ "gpio_pcc1",
+ "cam_i2c_scl_pbb1",
+ "cam_i2c_sda_pbb2",
+ "pbb5",
+ "pbb7",
+ "jtag_rtck",
+ "pwr_i2c_scl_pz6",
+ "pwr_i2c_sda_pz7",
+
+ "kb_row0_pr0",
+ "kb_row1_pr1",
+ "kb_row2_pr2",
+ "kb_row4_pr4",
+ "kb_row5_pr5",
+ "kb_row9_ps1",
+ "kb_row10_ps2",
+ "kb_row11_ps3",
+ "kb_row12_ps4",
+ "kb_row15_ps7",
+
+ "clk_32k_out_pa0",
+ "core_pwr_req",
+ "cpu_pwr_req",
+ "pwr_int_n",
+ "clk_32k_in",
+ "owr",
+
+ "dap_mclk1_pw4",
+ "spdif_in_pk6",
+ "spdif_out_pk5",
+ "sdmmc3_clk_pa6",
+ "sdmmc3_dat0_pb7",
+
+ "pex_l0_rst_n_pdd1",
+ "pex_l0_clkreq_n_pdd2",
+ "pex_wake_n_pdd3",
+ "pex_l1_rst_n_pdd5",
+ "pex_l1_clkreq_n_pdd6",
+ "hdmi_cec_pee3",
+
+ "sdmmc3_cd_n_pv2",
+ "usb_vbus_en0_pn4",
+ "usb_vbus_en1_pn5",
+ "sdmmc3_clk_lb_out_pee4",
+ "sdmmc3_clk_lb_in_pee5",
+ "reset_out_n",
+ "kb_row16_pt0",
+ "kb_row17_pt1",
+ "dp_hpd_pff0",
+ "usb_vbus_en2_pff1",
+ "pff2",
+};
+
+static const char * const rsvd4_groups[] = {
+ "dap3_dout_pp2",
+ "pv0",
+ "pv1",
+ "sdmmc1_clk_pz0",
+
+ "clk2_out_pw5",
+ "clk2_req_pcc5",
+ "hdmi_int_pn7",
+ "ddc_scl_pv4",
+ "ddc_sda_pv5",
+
+ "uart2_rts_n_pj6",
+ "uart2_cts_n_pj5",
+ "uart3_txd_pw6",
+ "uart3_rxd_pw7",
+
+ "pu0",
+ "pu1",
+ "pu2",
+
+ "gen1_i2c_scl_pc4",
+ "gen1_i2c_sda_pc5",
+
+ "dap4_fs_pp4",
+ "dap4_dout_pp6",
+ "dap4_din_pp5",
+ "dap4_sclk_pp7",
+
+ "clk3_out_pee0",
+ "clk3_req_pee1",
+
+ "pi5",
+ "pk1",
+ "pk2",
+ "pg0",
+ "pg1",
+ "pg2",
+ "pg3",
+ "ph4",
+ "ph5",
+ "pb0",
+ "pb1",
+ "pk7",
+ "pi0",
+ "pi1",
+ "pi2",
+
+ "gen2_i2c_scl_pt5",
+ "gen2_i2c_sda_pt6",
+
+ "sdmmc4_cmd_pt7",
+ "sdmmc4_dat0_paa0",
+ "sdmmc4_dat1_paa1",
+ "sdmmc4_dat2_paa2",
+ "sdmmc4_dat3_paa3",
+ "sdmmc4_dat4_paa4",
+ "sdmmc4_dat5_paa5",
+ "sdmmc4_dat6_paa6",
+ "sdmmc4_dat7_paa7",
+
+ "jtag_rtck",
+ "pwr_i2c_scl_pz6",
+ "pwr_i2c_sda_pz7",
+
+ "kb_row0_pr0",
+ "kb_row1_pr1",
+ "kb_row2_pr2",
+ "kb_row13_ps5",
+ "kb_row14_ps6",
+ "kb_row15_ps7",
+
+ "kb_col0_pq0",
+ "kb_col1_pq1",
+ "kb_col2_pq2",
+ "kb_col5_pq5",
+
+ "clk_32k_out_pa0",
+ "core_pwr_req",
+ "cpu_pwr_req",
+ "pwr_int_n",
+ "clk_32k_in",
+ "owr",
+
+ "dap1_fs_pn0",
+ "dap1_din_pn1",
+ "dap1_sclk_pn3",
+ "dap_mclk1_req_pee2",
+ "dap_mclk1_pw5",
+
+ "dap2_fs_pa2",
+ "dap2_din_pa4",
+ "dap2_dout_pa5",
+ "dap2_sclk_pa3",
+
+ "dvfs_pwm_px0",
+ "dvfs_clk_px2",
+ "gpio_x1_aud_px1",
+ "gpio_x3_aud_px3",
+
+ "gpio_x5_aud_px5",
+ "gpio_x7_aud_px7",
+
+ "pex_l0_rst_n_pdd1",
+ "pex_l0_clkreq_n_pdd2",
+ "pex_wake_n_pdd3",
+ "pex_l1_rst_n_pdd5",
+ "pex_l1_clkreq_n_pdd6",
+ "hdmi_cec_pee3",
+
+ "sdmmc3_cd_n_pv2",
+ "usb_vbus_en0_pn4",
+ "usb_vbus_en1_pn5",
+ "sdmmc3_clk_lb_out_pee4",
+ "sdmmc3_clk_lb_in_pee5",
+ "gmi_clk_lb",
+
+ "dp_hpd_pff0",
+ "usb_vbus_en2_pff1",
+ "pff2",
+};
+
+static const char * const sdmmc1_groups[] = {
+ "sdmmc1_clk_pz0",
+ "sdmmc1_cmd_pz1",
+ "sdmmc1_dat3_py4",
+ "sdmmc1_dat2_py5",
+ "sdmmc1_dat1_py6",
+ "sdmmc1_dat0_py7",
+ "clk2_out_pw5",
+ "clk2_req_pcc",
+ "uart3_cts_n_pa1",
+ "sdmmc1_wp_n_pv3",
+};
+
+static const char * const sdmmc2_groups[] = {
+ "pi5",
+ "pk1",
+ "pk3",
+ "pk4",
+ "pi6",
+ "ph4",
+ "ph5",
+ "ph6",
+ "ph7",
+ "pi2",
+ "cam_mclk_pcc0",
+ "pcc1",
+ "pbb0",
+ "cam_i2c_scl_pbb1",
+ "cam_i2c_sda_pbb2",
+ "pbb3",
+ "pbb4",
+ "pbb5",
+ "pbb6",
+ "pbb7",
+ "pcc2",
+ "gmi_clk_lb",
+};
+
+static const char * const sdmmc3_groups[] = {
+ "pk0",
+ "pcc2",
+
+ "kb_col4_pq4",
+ "kb_col5_pq5",
+
+ "sdmmc3_clk_pa6",
+ "sdmmc3_cmd_pa7",
+ "sdmmc3_dat0_pb7",
+ "sdmmc3_dat1_pb6",
+ "sdmmc3_dat2_pb5",
+ "sdmmc3_dat3_pb4",
+
+ "sdmmc3_cd_n_pv2",
+ "sdmmc3_clk_lb_in_pee5",
+ "sdmmc3_clk_lb_out_pee4",
+};
+
+static const char * const sdmmc4_groups[] = {
+ "sdmmc4_clk_pcc4",
+ "sdmmc4_cmd_pt7",
+ "sdmmc4_dat0_paa0",
+ "sdmmc4_dat1_paa1",
+ "sdmmc4_dat2_paa2",
+ "sdmmc4_dat3_paa3",
+ "sdmmc4_dat4_paa4",
+ "sdmmc4_dat5_paa5",
+ "sdmmc4_dat6_paa6",
+ "sdmmc4_dat7_paa7",
+};
+
+static const char * const soc_groups[] = {
+ "pk0",
+ "pj2",
+ "kb_row15_ps7",
+ "clk_32k_out_pa0",
+};
+
+static const char * const spdif_groups[] = {
+ "sdmmc1_cmd_pz1",
+ "sdmmc1_dat3_py4",
+ "uart2_rxd_pc3",
+ "uart2_txd_pc2",
+ "spdif_in_pk6",
+ "spdif_out_pk5",
+};
+
+static const char * const spi1_groups[] = {
+ "ulpi_clk_py0",
+ "ulpi_dir_py1",
+ "ulpi_nxt_py2",
+ "ulpi_stp_py3",
+ "gpio_x3_aud_px3",
+ "gpio_x4_aud_px4",
+ "gpio_x5_aud_px5",
+ "gpio_x6_aud_px6",
+ "gpio_x7_aud_px7",
+ "gpio_w3_aud_pw3",
+};
+
+static const char * const spi2_groups[] = {
+ "ulpi_data4_po5",
+ "ulpi_data5_po6",
+ "ulpi_data6_po7",
+ "ulpi_data7_po0",
+
+ "kb_row13_ps5",
+ "kb_row14_ps6",
+ "kb_row15_ps7",
+ "kb_col0_pq0",
+ "kb_col1_pq1",
+ "kb_col2_pq2",
+ "kb_col6_pq6",
+ "kb_col7_pq7",
+ "gpio_x4_aud_px4",
+ "gpio_x5_aud_px5",
+ "gpio_x6_aud_px6",
+ "gpio_x7_aud_px7",
+ "gpio_w2_aud_pw2",
+ "gpio_w3_aud_pw3",
+};
+
+static const char * const spi3_groups[] = {
+ "ulpi_data0_po1",
+ "ulpi_data1_po2",
+ "ulpi_data2_po3",
+ "ulpi_data3_po4",
+ "sdmmc4_dat0_paa0",
+ "sdmmc4_dat1_paa1",
+ "sdmmc4_dat2_paa2",
+ "sdmmc4_dat3_paa3",
+ "sdmmc4_dat4_paa4",
+ "sdmmc4_dat5_paa5",
+ "sdmmc4_dat6_paa6",
+ "sdmmc3_clk_pa6",
+ "sdmmc3_cmd_pa7",
+ "sdmmc3_dat0_pb7",
+ "sdmmc3_dat1_pb6",
+ "sdmmc3_dat2_pb5",
+ "sdmmc3_dat3_pb4",
+};
+
+static const char * const spi4_groups[] = {
+ "sdmmc1_cmd_pz1",
+ "sdmmc1_dat3_py4",
+ "sdmmc1_dat2_py5",
+ "sdmmc1_dat1_py6",
+ "sdmmc1_dat0_py7",
+
+ "uart2_rxd_pc3",
+ "uart2_txd_pc2",
+ "uart2_rts_n_pj6",
+ "uart2_cts_n_pj5",
+ "uart3_txd_pw6",
+ "uart3_rxd_pw7",
+
+ "pi3",
+ "pg4",
+ "pg5",
+ "pg6",
+ "pg7",
+ "ph3",
+ "pi4",
+ "sdmmc1_wp_n_pv3",
+};
+
+static const char * const spi5_groups[] = {
+ "ulpi_clk_py0",
+ "ulpi_dir_py1",
+ "ulpi_nxt_py2",
+ "ulpi_stp_py3",
+ "dap3_fs_pp0",
+ "dap3_din_pp1",
+ "dap3_dout_pp2",
+ "dap3_sclk_pp3",
+};
+
+static const char * const spi6_groups[] = {
+ "dvfs_pwm_px0",
+ "gpio_x1_aud_px1",
+ "gpio_x3_aud_px3",
+ "dvfs_clk_px2",
+ "gpio_x6_aud_px6",
+ "gpio_w2_aud_pw2",
+ "gpio_w3_aud_pw3",
+};
+
+static const char * const trace_groups[] = {
+ "pi2",
+ "pi4",
+ "pi7",
+ "ph0",
+ "ph6",
+ "ph7",
+ "pg2",
+ "pg3",
+ "pk1",
+ "pk3",
+};
+
+static const char * const uarta_groups[] = {
+ "ulpi_data0_po1",
+ "ulpi_data1_po2",
+ "ulpi_data2_po3",
+ "ulpi_data3_po4",
+ "ulpi_data4_po5",
+ "ulpi_data5_po6",
+ "ulpi_data6_po7",
+ "ulpi_data7_po0",
+
+ "sdmmc1_cmd_pz1",
+ "sdmmc1_dat3_py4",
+ "sdmmc1_dat2_py5",
+ "sdmmc1_dat1_py6",
+ "sdmmc1_dat0_py7",
+
+
+ "uart2_rxd_pc3",
+ "uart2_txd_pc2",
+ "uart2_rts_n_pj6",
+ "uart2_cts_n_pj5",
+
+ "pu0",
+ "pu1",
+ "pu2",
+ "pu3",
+ "pu4",
+ "pu5",
+ "pu6",
+
+ "kb_row7_pr7",
+ "kb_row8_ps0",
+ "kb_row9_ps1",
+ "kb_row10_ps2",
+ "kb_col3_pq3",
+ "kb_col4_pq4",
+
+ "sdmmc3_cmd_pa7",
+ "sdmmc3_dat1_pb6",
+ "sdmmc1_wp_n_pv3",
+
+};
+
+static const char * const uartb_groups[] = {
+ "uart2_rts_n_pj6",
+ "uart2_cts_n_pj5",
+};
+
+static const char * const uartc_groups[] = {
+ "uart3_txd_pw6",
+ "uart3_rxd_pw7",
+ "uart3_cts_n_pa1",
+ "uart3_rts_n_pc0",
+ "kb_row16_pt0",
+ "kn_row17_pt1",
+};
+
+static const char * const uartd_groups[] = {
+ "ulpi_clk_py0",
+ "ulpi_dir_py1",
+ "ulpi_nxt_py2",
+ "ulpi_stp_py3",
+ "pj7",
+ "pb0",
+ "pb1",
+ "pk7",
+ "kb_col6_pq6",
+ "kb_col7_pq7",
+};
+
+static const char * const ulpi_groups[] = {
+ "ulpi_data0_po1",
+ "ulpi_data1_po2",
+ "ulpi_data2_po3",
+ "ulpi_data3_po4",
+ "ulpi_data4_po5",
+ "ulpi_data5_po6",
+ "ulpi_data6_po7",
+ "ulpi_data7_po0",
+ "ulpi_clk_py0",
+ "ulpi_dir_py1",
+ "ulpi_nxt_py2",
+ "ulpi_stp_py3",
+};
+
+static const char * const usb_groups[] = {
+ "pj0",
+ "usb_vbus_en0_pn4",
+ "usb_vbus_en1_pn5",
+ "usb_vbus_en2_pff1",
+};
+
+static const char * const vgp1_groups[] = {
+ "cam_i2c_scl_pbb1",
+};
+
+static const char * const vgp2_groups[] = {
+ "cam_i2c_sda_pbb2",
+};
+
+static const char * const vgp3_groups[] = {
+ "pbb3",
+};
+
+static const char * const vgp4_groups[] = {
+ "pbb4",
+};
+
+static const char * const vgp5_groups[] = {
+ "pbb5",
+};
+
+static const char * const vgp6_groups[] = {
+ "pbb0",
+};
+
+static const char * const vi_groups[] = {
+ "cam_mclk_pcc0",
+};
+
+static const char * const vi_alt1_groups[] = {
+ "cam_mclk_pcc0",
+};
+
+static const char * const vi_alt3_groups[] = {
+ "cam_mclk_pcc0",
+};
+
+static const char * const vimclk2_groups[] = {
+ "pbb0",
+};
+
+static const char * const vimclk2_alt_groups[] = {
+ "pbb0",
+};
+
+static const char * const sata_groups[] = {
+ "dap_mclk1_req_pee2",
+ "dap1_dout_pn2",
+ "pff2",
+};
+
+static const char * const ccla_groups[] = {
+ "pk3",
+};
+
+static const char * const rtck_groups[] = {
+ "jtag_rtck",
+};
+
+static const char * const sys_groups[] = {
+ "kb_row3_pr3",
+};
+
+static const char * const pe0_groups[] = {
+ "pex_l0_rst_n_pdd1",
+ "pex_l0_clkreq_n_pdd2",
+};
+
+static const char * const pe_groups[] = {
+ "pex_wake_n_pdd3",
+};
+
+static const char * const pe1_groups[] = {
+ "pex_l1_rst_n_pdd5",
+ "pex_l1_clkreq_n_pdd6",
+};
+
+static const char * const dp_groups[] = {
+ "dp_hpd_pff0",
+};
+
+static const char * const clk_groups[] = {
+ "clk_32k_in",
+};
+
+static const char * const tmds_groups[] = {
+ "pg4",
+ "ph1",
+ "ph2",
+};
+
+#define FUNCTION(fname) \
+ { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+static const struct tegra_function tegra124_functions[] = {
+ FUNCTION(blink),
+ FUNCTION(cec),
+ FUNCTION(cldvfs),
+ FUNCTION(clk12),
+ FUNCTION(cpu),
+ FUNCTION(dap),
+ FUNCTION(dap1),
+ FUNCTION(dap2),
+ FUNCTION(dev3),
+ FUNCTION(displaya),
+ FUNCTION(displaya_alt),
+ FUNCTION(displayb),
+ FUNCTION(dtv),
+ FUNCTION(extperiph1),
+ FUNCTION(extperiph2),
+ FUNCTION(extperiph3),
+ FUNCTION(gmi),
+ FUNCTION(gmi_alt),
+ FUNCTION(hda),
+ FUNCTION(hsi),
+ FUNCTION(i2c1),
+ FUNCTION(i2c2),
+ FUNCTION(i2c3),
+ FUNCTION(i2c4),
+ FUNCTION(i2cpwr),
+ FUNCTION(i2s0),
+ FUNCTION(i2s1),
+ FUNCTION(i2s2),
+ FUNCTION(i2s3),
+ FUNCTION(i2s4),
+ FUNCTION(irda),
+ FUNCTION(kbc),
+ FUNCTION(owr),
+ FUNCTION(pmi),
+ FUNCTION(pwm0),
+ FUNCTION(pwm1),
+ FUNCTION(pwm2),
+ FUNCTION(pwm3),
+ FUNCTION(pwron),
+ FUNCTION(reset_out_n),
+ FUNCTION(rsvd1),
+ FUNCTION(rsvd2),
+ FUNCTION(rsvd3),
+ FUNCTION(rsvd4),
+ FUNCTION(sdmmc1),
+ FUNCTION(sdmmc2),
+ FUNCTION(sdmmc3),
+ FUNCTION(sdmmc4),
+ FUNCTION(soc),
+ FUNCTION(spdif),
+ FUNCTION(spi1),
+ FUNCTION(spi2),
+ FUNCTION(spi3),
+ FUNCTION(spi4),
+ FUNCTION(spi5),
+ FUNCTION(spi6),
+ FUNCTION(trace),
+ FUNCTION(uarta),
+ FUNCTION(uartb),
+ FUNCTION(uartc),
+ FUNCTION(uartd),
+ FUNCTION(ulpi),
+ FUNCTION(usb),
+ FUNCTION(vgp1),
+ FUNCTION(vgp2),
+ FUNCTION(vgp3),
+ FUNCTION(vgp4),
+ FUNCTION(vgp5),
+ FUNCTION(vgp6),
+ FUNCTION(vi),
+ FUNCTION(vi_alt1),
+ FUNCTION(vi_alt3),
+ FUNCTION(vimclk2),
+ FUNCTION(vimclk2_alt),
+ FUNCTION(sata),
+ FUNCTION(ccla),
+ FUNCTION(pe0),
+ FUNCTION(pe),
+ FUNCTION(pe1),
+ FUNCTION(dp),
+ FUNCTION(rtck),
+ FUNCTION(sys),
+ FUNCTION(clk),
+ FUNCTION(tmds),
+};
+
+#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
+#define PINGROUP_REG_A 0x3000 /* bank 1 */
+
+#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
+#define PINGROUP_REG_N(r) -1
+
+#define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior, rcv_sel) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .funcs = { \
+ TEGRA_MUX_ ## f0, \
+ TEGRA_MUX_ ## f1, \
+ TEGRA_MUX_ ## f2, \
+ TEGRA_MUX_ ## f3, \
+ }, \
+ .func_safe = TEGRA_MUX_ ## f_safe, \
+ .mux_reg = PINGROUP_REG_Y(r), \
+ .mux_bank = 1, \
+ .mux_bit = 0, \
+ .pupd_reg = PINGROUP_REG_Y(r), \
+ .pupd_bank = 1, \
+ .pupd_bit = 2, \
+ .tri_reg = PINGROUP_REG_Y(r), \
+ .tri_bank = 1, \
+ .tri_bit = 4, \
+ .einput_reg = PINGROUP_REG_Y(r), \
+ .einput_bank = 1, \
+ .einput_bit = 5, \
+ .odrain_reg = PINGROUP_REG_##od(r), \
+ .odrain_bank = 1, \
+ .odrain_bit = 6, \
+ .lock_reg = PINGROUP_REG_Y(r), \
+ .lock_bank = 1, \
+ .lock_bit = 7, \
+ .ioreset_reg = PINGROUP_REG_##ior(r), \
+ .ioreset_bank = 1, \
+ .ioreset_bit = 8, \
+ .rcv_sel_reg = PINGROUP_REG_##rcv_sel(r), \
+ .rcv_sel_bank = 1, \
+ .rcv_sel_bit = 9, \
+ .drv_reg = -1, \
+ .drvtype_reg = -1, \
+ }
+
+#define DRV_PINGROUP_DVRTYPE_Y(r) ((r) - DRV_PINGROUP_REG_A)
+#define DRV_PINGROUP_DVRTYPE_N(r) -1
+
+#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, \
+ drvdn_b, drvdn_w, drvup_b, drvup_w, \
+ slwr_b, slwr_w, slwf_b, slwf_w, \
+ drvtype) \
+ { \
+ .name = "drive_" #pg_name, \
+ .pins = drive_##pg_name##_pins, \
+ .npins = ARRAY_SIZE(drive_##pg_name##_pins), \
+ .mux_reg = -1, \
+ .pupd_reg = -1, \
+ .tri_reg = -1, \
+ .einput_reg = -1, \
+ .odrain_reg = -1, \
+ .lock_reg = -1, \
+ .ioreset_reg = -1, \
+ .rcv_sel_reg = -1, \
+ .drv_reg = DRV_PINGROUP_DVRTYPE_Y(r), \
+ .drv_bank = 0, \
+ .hsm_bit = hsm_b, \
+ .schmitt_bit = schmitt_b, \
+ .lpmd_bit = lpmd_b, \
+ .drvdn_bit = drvdn_b, \
+ .drvdn_width = drvdn_w, \
+ .drvup_bit = drvup_b, \
+ .drvup_width = drvup_w, \
+ .slwr_bit = slwr_b, \
+ .slwr_width = slwr_w, \
+ .slwf_bit = slwf_b, \
+ .slwf_width = slwf_w, \
+ .drvtype_reg = DRV_PINGROUP_DVRTYPE_##drvtype(r), \
+ .drvtype_bank = 0, \
+ .drvtype_bit = 6, \
+ }
+
+static const struct tegra_pingroup tegra124_groups[] = {
+ /* pg_name, f0, f1, f2, f3, safe, r, od, ior, rcv_sel */
+ PINGROUP(ulpi_data0_po1, SPI3, HSI, UARTA, ULPI, SPI3, 0x3000, N, N, N),
+ PINGROUP(ulpi_data1_po2, SPI3, HSI, UARTA, ULPI, SPI3, 0x3004, N, N, N),
+ PINGROUP(ulpi_data2_po3, SPI3, HSI, UARTA, ULPI, SPI3, 0x3008, N, N, N),
+ PINGROUP(ulpi_data3_po4, SPI3, HSI, UARTA, ULPI, SPI3, 0x300c, N, N, N),
+ PINGROUP(ulpi_data4_po5, SPI2, HSI, UARTA, ULPI, SPI2, 0x3010, N, N, N),
+ PINGROUP(ulpi_data5_po6, SPI2, HSI, UARTA, ULPI, SPI2, 0x3014, N, N, N),
+ PINGROUP(ulpi_data6_po7, SPI2, HSI, UARTA, ULPI, SPI2, 0x3018, N, N, N),
+ PINGROUP(ulpi_data7_po0, SPI2, HSI, UARTA, ULPI, SPI2, 0x301c, N, N, N),
+ PINGROUP(ulpi_clk_py0, SPI1, SPI5, UARTD, ULPI, SPI1, 0x3020, N, N, N),
+ PINGROUP(ulpi_dir_py1, SPI1, SPI5, UARTD, ULPI, SPI1, 0x3024, N, N, N),
+ PINGROUP(ulpi_nxt_py2, SPI1, SPI5, UARTD, ULPI, SPI1, 0x3028, N, N, N),
+ PINGROUP(ulpi_stp_py3, SPI1, SPI5, UARTD, ULPI, SPI1, 0x302c, N, N, N),
+ PINGROUP(dap3_fs_pp0, I2S2, SPI5, DISPLAYA, DISPLAYB, I2S2, 0x3030, N, N, N),
+ PINGROUP(dap3_din_pp1, I2S2, SPI5, DISPLAYA, DISPLAYB, I2S2, 0x3034, N, N, N),
+ PINGROUP(dap3_dout_pp2, I2S2, SPI5, DISPLAYA, RSVD4, I2S2, 0x3038, N, N, N),
+ PINGROUP(dap3_sclk_pp3, I2S2, SPI5, RSVD3, DISPLAYB, I2S2, 0x303c, N, N, N),
+ PINGROUP(pv0, RSVD1, RSVD2, RSVD3, RSVD4, RSVD1, 0x3040, N, N, N),
+ PINGROUP(pv1, RSVD1, RSVD2, RSVD3, RSVD4, RSVD1, 0x3044, N, N, N),
+ PINGROUP(sdmmc1_clk_pz0, SDMMC1, CLK12, RSVD3, RSVD4, RSVD3, 0x3048, N, N, N),
+ PINGROUP(sdmmc1_cmd_pz1, SDMMC1, SPDIF, SPI4, UARTA, SDMMC1, 0x304c, N, N, N),
+ PINGROUP(sdmmc1_dat3_py4, SDMMC1, SPDIF, SPI4, UARTA, SDMMC1, 0x3050, N, N, N),
+ PINGROUP(sdmmc1_dat2_py5, SDMMC1, PWM0, SPI4, UARTA, SDMMC1, 0x3054, N, N, N),
+ PINGROUP(sdmmc1_dat1_py6, SDMMC1, PWM1, SPI4, UARTA, SDMMC1, 0x3058, N, N, N),
+ PINGROUP(sdmmc1_dat0_py7, SDMMC1, RSVD2, SPI4, UARTA, SDMMC1, 0x305c, N, N, N),
+ PINGROUP(clk2_out_pw5, EXTPERIPH2, RSVD2, RSVD3, RSVD4, EXTPERIPH2, 0x3068, N, N, N),
+ PINGROUP(clk2_req_pcc5, DAP, RSVD2, RSVD3, RSVD4, DAP, 0x306c, N, N, N),
+ PINGROUP(hdmi_int_pn7, RSVD1, RSVD2, RSVD3, RSVD4, RSVD1, 0x3110, N, N, Y),
+ PINGROUP(ddc_scl_pv4, I2C4, RSVD2, RSVD3, RSVD4, I2C4, 0x3114, N, N, Y),
+ PINGROUP(ddc_sda_pv5, I2C4, RSVD2, RSVD3, RSVD4, I2C4, 0x3118, N, N, Y),
+ PINGROUP(uart2_rxd_pc3, IRDA, SPDIF, UARTA, SPI4, IRDA, 0x3164, N, N, N),
+ PINGROUP(uart2_txd_pc2, IRDA, SPDIF, UARTA, SPI4, IRDA, 0x3168, N, N, N),
+ PINGROUP(uart2_rts_n_pj6, UARTA, UARTB, GMI, SPI4, UARTA, 0x316c, N, N, N),
+ PINGROUP(uart2_cts_n_pj5, UARTA, UARTB, GMI, SPI4, UARTA, 0x3170, N, N, N),
+ PINGROUP(uart3_txd_pw6, UARTC, RSVD2, GMI, SPI4, UARTC, 0x3174, N, N, N),
+ PINGROUP(uart3_rxd_pw7, UARTC, RSVD2, GMI, SPI4, UARTC, 0x3178, N, N, N),
+ PINGROUP(uart3_cts_n_pa1, UARTC, SDMMC1, DTV, GMI, UARTC, 0x317c, N, N, N),
+ PINGROUP(uart3_rts_n_pc0, UARTC, PWM0, DTV, GMI, UARTC, 0x3180, N, N, N),
+ PINGROUP(pu0, OWR, UARTA, GMI, RSVD4, RSVD4, 0x3184, N, N, N),
+ PINGROUP(pu1, RSVD1, UARTA, GMI, RSVD4, RSVD4, 0x3188, N, N, N),
+ PINGROUP(pu2, RSVD1, UARTA, GMI, RSVD4, RSVD4, 0x318c, N, N, N),
+ PINGROUP(pu3, PWM0, UARTA, GMI, DISPLAYB, PWM0, 0x3190, N, N, N),
+ PINGROUP(pu4, PWM1, UARTA, GMI, DISPLAYB, PWM1, 0x3194, N, N, N),
+ PINGROUP(pu5, PWM2, UARTA, GMI, DISPLAYB, PWM2, 0x3198, N, N, N),
+ PINGROUP(pu6, PWM3, UARTA, RSVD3, GMI, RSVD3, 0x319c, N, N, N),
+ PINGROUP(gen1_i2c_scl_pc4, I2C1, RSVD2, RSVD3, RSVD4, I2C1, 0x31a0, Y, N, N),
+ PINGROUP(gen1_i2c_sda_pc5, I2C1, RSVD2, RSVD3, RSVD4, I2C1, 0x31a4, Y, N, N),
+ PINGROUP(dap4_fs_pp4, I2S3, GMI, DTV, RSVD4, I2S3, 0x31a8, N, N, N),
+ PINGROUP(dap4_din_pp5, I2S3, GMI, RSVD3, RSVD4, I2S3, 0x31ac, N, N, N),
+ PINGROUP(dap4_dout_pp6, I2S3, GMI, DTV, RSVD4, I2S3, 0x31b0, N, N, N),
+ PINGROUP(dap4_sclk_pp7, I2S3, GMI, RSVD3, RSVD4, I2S3, 0x31b4, N, N, N),
+ PINGROUP(clk3_out_pee0, EXTPERIPH3, RSVD2, RSVD3, RSVD4, RSVD3, 0x31b8, N, N, N),
+ PINGROUP(clk3_req_pee1, DEV3, RSVD2, RSVD3, RSVD4, RSVD4, 0x31bc, N, N, N),
+ PINGROUP(pc7, RSVD1, RSVD2, GMI, GMI_ALT, RSVD1, 0x31c0, N, N, N),
+ PINGROUP(pi5, SDMMC2, RSVD2, GMI, RSVD4, GMI, 0x31c4, N, N, N),
+ PINGROUP(pi7, RSVD1, TRACE, GMI, DTV, RSVD1, 0x31c8, N, N, N),
+ PINGROUP(pk0, RSVD1, SDMMC3, GMI, SOC, RSVD1, 0x31cc, N, N, N),
+ PINGROUP(pk1, SDMMC2, TRACE, GMI, RSVD4, GMI, 0x31d0, N, N, N),
+ PINGROUP(pj0, RSVD1, RSVD2, GMI, USB, RSVD1, 0x31d4, N, N, N),
+ PINGROUP(pj2, RSVD1, RSVD2, GMI, SOC, RSVD1, 0x31d8, N, N, N),
+ PINGROUP(pk3, SDMMC2, TRACE, GMI, CCLA, GMI, 0x31dc, N, N, N),
+ PINGROUP(pk4, SDMMC2, RSVD2, GMI, GMI_ALT, GMI, 0x31e0, N, N, N),
+ PINGROUP(pk2, RSVD1, RSVD2, GMI, RSVD4, RSVD4, 0x31e4, N, N, N),
+ PINGROUP(pi3, RSVD1, RSVD2, GMI, SPI4, RSVD1, 0x31e8, N, N, N),
+ PINGROUP(pi6, RSVD1, RSVD2, GMI, SDMMC2, RSVD1, 0x31ec, N, N, N),
+ PINGROUP(pg0, RSVD1, RSVD2, GMI, RSVD4, RSVD4, 0x31f0, N, N, N),
+ PINGROUP(pg1, RSVD1, RSVD2, GMI, RSVD4, RSVD4, 0x31f4, N, N, N),
+ PINGROUP(pg2, RSVD1, TRACE, GMI, RSVD4, RSVD4, 0x31f8, N, N, N),
+ PINGROUP(pg3, RSVD1, TRACE, GMI, RSVD4, RSVD4, 0x31fc, N, N, N),
+ PINGROUP(pg4, RSVD1, TMDS, GMI, SPI4, RSVD1, 0x3200, N, N, N),
+ PINGROUP(pg5, RSVD1, RSVD2, GMI, SPI4, RSVD1, 0x3204, N, N, N),
+ PINGROUP(pg6, RSVD1, RSVD2, GMI, SPI4, RSVD1, 0x3208, N, N, N),
+ PINGROUP(pg7, RSVD1, RSVD2, GMI, SPI4, RSVD1, 0x320c, N, N, N),
+ PINGROUP(ph0, PWM0, TRACE, GMI, DTV, GMI, 0x3210, N, N, N),
+ PINGROUP(ph1, PWM1, TMDS, GMI, DISPLAYA, GMI, 0x3214, N, N, N),
+ PINGROUP(ph2, PWM2, TMDS, GMI, CLDVFS, GMI, 0x3218, N, N, N),
+ PINGROUP(ph3, PWM3, SPI4, GMI, CLDVFS, GMI, 0x321c, N, N, N),
+ PINGROUP(ph4, SDMMC2, RSVD2, GMI, RSVD4, GMI, 0x3220, N, N, N),
+ PINGROUP(ph5, SDMMC2, RSVD2, GMI, RSVD4, GMI, 0x3224, N, N, N),
+ PINGROUP(ph6, SDMMC2, TRACE, GMI, DTV, GMI, 0x3228, N, N, N),
+ PINGROUP(ph7, SDMMC2, TRACE, GMI, DTV, GMI, 0x322c, N, N, N),
+ PINGROUP(pj7, UARTD, RSVD2, GMI, GMI_ALT, RSVD2, 0x3230, N, N, N),
+ PINGROUP(pb0, UARTD, RSVD2, GMI, RSVD4, RSVD2, 0x3234, N, N, N),
+ PINGROUP(pb1, UARTD, RSVD2, GMI, RSVD4, RSVD2, 0x3238, N, N, N),
+ PINGROUP(pk7, UARTD, RSVD2, GMI, RSVD4, RSVD2, 0x323c, N, N, N),
+ PINGROUP(pi0, RSVD1, RSVD2, GMI, RSVD4, RSVD4, 0x3240, N, N, N),
+ PINGROUP(pi1, RSVD1, RSVD2, GMI, RSVD4, RSVD1, 0x3244, N, N, N),
+ PINGROUP(pi2, SDMMC2, TRACE, GMI, RSVD4, GMI, 0x3248, N, N, N),
+ PINGROUP(pi4, SPI4, TRACE, GMI, DISPLAYA, GMI, 0x324c, N, N, N),
+ PINGROUP(gen2_i2c_scl_pt5, I2C2, RSVD2, GMI, RSVD4, RSVD2, 0x3250, Y, N, N),
+ PINGROUP(gen2_i2c_sda_pt6, I2C2, RSVD2, GMI, RSVD4, RSVD2, 0x3254, Y, N, N),
+ PINGROUP(sdmmc4_clk_pcc4, SDMMC4, RSVD2, GMI, RSVD4, RSVD2, 0x3258, N, Y, N),
+ PINGROUP(sdmmc4_cmd_pt7, SDMMC4, RSVD2, GMI, RSVD4, RSVD2, 0x325c, N, Y, N),
+ PINGROUP(sdmmc4_dat0_paa0, SDMMC4, SPI3, GMI, RSVD4, SDMMC4, 0x3260, N, Y, N),
+ PINGROUP(sdmmc4_dat1_paa1, SDMMC4, SPI3, GMI, RSVD4, SDMMC4, 0x3264, N, Y, N),
+ PINGROUP(sdmmc4_dat2_paa2, SDMMC4, SPI3, GMI, RSVD4, SDMMC4, 0x3268, N, Y, N),
+ PINGROUP(sdmmc4_dat3_paa3, SDMMC4, SPI3, GMI, RSVD4, SDMMC4, 0x326c, N, Y, N),
+ PINGROUP(sdmmc4_dat4_paa4, SDMMC4, SPI3, GMI, RSVD4, SDMMC4, 0x3270, N, Y, N),
+ PINGROUP(sdmmc4_dat5_paa5, SDMMC4, SPI3, RSVD3, RSVD4, SDMMC4, 0x3274, N, Y, N),
+ PINGROUP(sdmmc4_dat6_paa6, SDMMC4, SPI3, GMI, RSVD4, SDMMC4, 0x3278, N, Y, N),
+ PINGROUP(sdmmc4_dat7_paa7, SDMMC4, RSVD1, GMI, RSVD4, SDMMC4, 0x327c, N, Y, N),
+ PINGROUP(cam_mclk_pcc0, VI, VI_ALT1, VI_ALT3, SDMMC2, VI, 0x3284, N, N, N),
+ PINGROUP(pcc1, I2S4, RSVD1, RSVD3, SDMMC2, I2S4, 0x3288, N, N, N),
+ PINGROUP(pbb0, VGP6, VIMCLK2, SDMMC2, VIMCLK2_ALT, VGP6, 0x328c, N, N, N),
+ PINGROUP(cam_i2c_scl_pbb1, VGP1, I2C3, RSVD3, SDMMC2, VGP1, 0x3290, Y, N, N),
+ PINGROUP(cam_i2c_sda_pbb2, VGP2, I2C3, RSVD3, SDMMC2, VGP2, 0x3294, Y, N, N),
+ PINGROUP(pbb3, VGP3, DISPLAYA, DISPLAYB, SDMMC2, VGP3, 0x3298, N, N, N),
+ PINGROUP(pbb4, VGP4, DISPLAYA, DISPLAYB, SDMMC2, VGP4, 0x329c, N, N, N),
+ PINGROUP(pbb5, VGP5, DISPLAYA, RSVD3, SDMMC2, VGP5, 0x32a0, N, N, N),
+ PINGROUP(pbb6, I2S4, RSVD2, DISPLAYB, SDMMC2, I2S4, 0x32a4, N, N, N),
+ PINGROUP(pbb7, I2S4, RSVD2, RSVD3, SDMMC2, I2S4, 0x32a8, N, N, N),
+ PINGROUP(pcc2, I2S4, RSVD2, SDMMC3, SDMMC2, I2S4, 0x32ac, N, N, N),
+ PINGROUP(jtag_rtck, RTCK, RSVD2, RSVD3, RSVD4, RTCK, 0x32b0, N, N, N),
+ PINGROUP(pwr_i2c_scl_pz6, I2CPWR, RSVD2, RSVD3, RSVD4, RSVD2, 0x32b4, Y, N, N),
+ PINGROUP(pwr_i2c_sda_pz7, I2CPWR, RSVD2, RSVD3, RSVD4, RSVD2, 0x32b8, Y, N, N),
+ PINGROUP(kb_row0_pr0, KBC, RSVD2, RSVD3, RSVD4, RSVD4, 0x32bc, N, N, N),
+ PINGROUP(kb_row1_pr1, KBC, RSVD2, RSVD3, RSVD4, RSVD4, 0x32c0, N, N, N),
+ PINGROUP(kb_row2_pr2, KBC, RSVD2, RSVD3, RSVD4, RSVD4, 0x32c4, N, N, N),
+ PINGROUP(kb_row3_pr3, KBC, DISPLAYA, SYS, DISPLAYB, KBC, 0x32c8, N, N, N),
+ PINGROUP(kb_row4_pr4, KBC, DISPLAYA, RSVD3, DISPLAYB, RSVD3, 0x32cc, N, N, N),
+ PINGROUP(kb_row5_pr5, KBC, DISPLAYA, RSVD3, DISPLAYB, RSVD3, 0x32d0, N, N, N),
+ PINGROUP(kb_row6_pr6, KBC, DISPLAYA, DISPLAYA_ALT, DISPLAYB, KBC, 0x32d4, N, N, N),
+ PINGROUP(kb_row7_pr7, KBC, RSVD2, CLDVFS, UARTA, RSVD2, 0x32d8, N, N, N),
+ PINGROUP(kb_row8_ps0, KBC, RSVD2, CLDVFS, UARTA, RSVD2, 0x32dc, N, N, N),
+ PINGROUP(kb_row9_ps1, KBC, RSVD2, RSVD3, UARTA, KBC, 0x32e0, N, N, N),
+ PINGROUP(kb_row10_ps2, KBC, RSVD2, RSVD3, UARTA, KBC, 0x32e4, N, N, N),
+ PINGROUP(kb_row11_ps3, KBC, RSVD2, RSVD3, IRDA, RSVD3, 0x32e8, N, N, N),
+ PINGROUP(kb_row12_ps4, KBC, RSVD2, RSVD3, IRDA, RSVD3, 0x32ec, N, N, N),
+ PINGROUP(kb_row13_ps5, KBC, RSVD2, SPI2, RSVD4, RSVD4, 0x32f0, N, N, N),
+ PINGROUP(kb_row14_ps6, KBC, RSVD2, SPI2, RSVD4, RSVD4, 0x32f4, N, N, N),
+ PINGROUP(kb_row15_ps7, KBC, SOC, RSVD3, RSVD4, KBC, 0x32f8, N, N, N),
+ PINGROUP(kb_col0_pq0, KBC, RSVD2, SPI2, RSVD4, RSVD4, 0x32fc, N, N, N),
+ PINGROUP(kb_col1_pq1, KBC, RSVD2, SPI2, RSVD4, RSVD4, 0x3300, N, N, N),
+ PINGROUP(kb_col2_pq2, KBC, RSVD2, SPI2, RSVD4, RSVD4, 0x3304, N, N, N),
+ PINGROUP(kb_col3_pq3, KBC, DISPLAYA, PWM2, UARTA, KBC, 0x3308, N, N, N),
+ PINGROUP(kb_col4_pq4, KBC, OWR, SDMMC3, UARTA, KBC, 0x330c, N, N, N),
+ PINGROUP(kb_col5_pq5, KBC, RSVD2, SDMMC3, RSVD4, RSVD4, 0x3310, N, N, N),
+ PINGROUP(kb_col6_pq6, KBC, RSVD2, SPI2, UARTD, RSVD2, 0x3314, N, N, N),
+ PINGROUP(kb_col7_pq7, KBC, RSVD2, SPI2, UARTD, RSVD2, 0x3318, N, N, N),
+ PINGROUP(clk_32k_out_pa0, BLINK, SOC, RSVD3, RSVD4, RSVD3, 0x331c, N, N, N),
+ PINGROUP(core_pwr_req, PWRON, RSVD2, RSVD3, RSVD4, RSVD2, 0x3324, N, N, N),
+ PINGROUP(cpu_pwr_req, CPU, RSVD2, RSVD3, RSVD4, RSVD2, 0x3328, N, N, N),
+ PINGROUP(pwr_int_n, PMI, RSVD2, RSVD3, RSVD4, RSVD2, 0x332c, N, N, N),
+ PINGROUP(clk_32k_in, CLK, RSVD2, RSVD3, RSVD4, RSVD2, 0x3330, N, N, N),
+ PINGROUP(owr, OWR, RSVD2, RSVD3, RSVD4, RSVD2, 0x3334, N, N, Y),
+ PINGROUP(dap1_fs_pn0, I2S0, HDA, GMI, RSVD4, RSVD4, 0x3338, N, N, N),
+ PINGROUP(dap1_din_pn1, I2S0, HDA, GMI, RSVD4, RSVD4, 0x333c, N, N, N),
+ PINGROUP(dap1_dout_pn2, I2S0, HDA, GMI, SATA, I2S0, 0x3340, N, N, N),
+ PINGROUP(dap1_sclk_pn3, I2S0, HDA, GMI, RSVD4, I2S0, 0x3344, N, N, N),
+ PINGROUP(dap_mclk1_req_pee2, DAP, DAP1, SATA, RSVD4, DAP, 0x3348, N, N, N),
+ PINGROUP(dap_mclk1_pw4, EXTPERIPH1, DAP2, RSVD3, RSVD4, RSVD3, 0x334c, N, N, N),
+ PINGROUP(spdif_in_pk6, SPDIF, RSVD2, RSVD3, I2C3, RSVD3, 0x3350, N, N, N),
+ PINGROUP(spdif_out_pk5, SPDIF, RSVD2, RSVD3, I2C3, RSVD3, 0x3354, N, N, N),
+ PINGROUP(dap2_fs_pa2, I2S1, HDA, GMI, RSVD4, I2S1, 0x3358, N, N, N),
+ PINGROUP(dap2_din_pa4, I2S1, HDA, GMI, RSVD4, I2S1, 0x335c, N, N, N),
+ PINGROUP(dap2_dout_pa5, I2S1, HDA, GMI, RSVD4, I2S1, 0x3360, N, N, N),
+ PINGROUP(dap2_sclk_pa3, I2S1, HDA, GMI, RSVD4, I2S1, 0x3364, N, N, N),
+ PINGROUP(dvfs_pwm_px0, SPI6, CLDVFS, GMI, RSVD4, SPI6, 0x3368, N, N, N),
+ PINGROUP(gpio_x1_aud_px1, SPI6, RSVD2, GMI, RSVD4, SPI6, 0x336c, N, N, N),
+ PINGROUP(gpio_x3_aud_px3, SPI6, SPI1, GMI, RSVD4, SPI6, 0x3370, N, N, N),
+ PINGROUP(dvfs_clk_px2, SPI6, CLDVFS, GMI, RSVD4, SPI6, 0x3374, N, N, N),
+ PINGROUP(gpio_x4_aud_px4, GMI, SPI1, SPI2, DAP2, SPI1, 0x3378, N, N, N),
+ PINGROUP(gpio_x5_aud_px5, GMI, SPI1, SPI2, RSVD4, SPI1, 0x337c, N, N, N),
+ PINGROUP(gpio_x6_aud_px6, SPI6, SPI1, SPI2, GMI, SPI1, 0x3380, N, N, N),
+ PINGROUP(gpio_x7_aud_px7, RSVD1, SPI1, SPI2, RSVD4, SPI1, 0x3384, N, N, N),
+ PINGROUP(sdmmc3_clk_pa6, SDMMC3, RSVD2, RSVD3, SPI3, SDMMC3, 0x3390, N, N, N),
+ PINGROUP(sdmmc3_cmd_pa7, SDMMC3, PWM3, UARTA, SPI3, SDMMC3, 0x3394, N, N, N),
+ PINGROUP(sdmmc3_dat0_pb7, SDMMC3, RSVD2, RSVD3, SPI3, SDMMC3, 0x3398, N, N, N),
+ PINGROUP(sdmmc3_dat1_pb6, SDMMC3, PWM2, UARTA, SPI3, SDMMC3, 0x339c, N, N, N),
+ PINGROUP(sdmmc3_dat2_pb5, SDMMC3, PWM1, DISPLAYA, SPI3, SDMMC3, 0x33a0, N, N, N),
+ PINGROUP(sdmmc3_dat3_pb4, SDMMC3, PWM0, DISPLAYB, SPI3, SDMMC3, 0x33a4, N, N, N),
+ PINGROUP(pex_l0_rst_n_pdd1, PE0, RSVD2, RSVD3, RSVD4, PE0, 0x33bc, N, N, N),
+ PINGROUP(pex_l0_clkreq_n_pdd2, PE0, RSVD2, RSVD3, RSVD4, PE0, 0x33c0, N, N, N),
+ PINGROUP(pex_wake_n_pdd3, PE, RSVD2, RSVD3, RSVD4, PE, 0x33c4, N, N, N),
+ PINGROUP(pex_l1_rst_n_pdd5, PE1, RSVD2, RSVD3, RSVD4, PE1, 0x33cc, N, N, N),
+ PINGROUP(pex_l1_clkreq_n_pdd6, PE1, RSVD2, RSVD3, RSVD4, PE1, 0x33d0, N, N, N),
+ PINGROUP(hdmi_cec_pee3, CEC, RSVD2, RSVD3, RSVD4, CEC, 0x33e0, Y, N, N),
+ PINGROUP(sdmmc1_wp_n_pv3, SDMMC1, CLK12, SPI4, UARTA, SDMMC1, 0x33e4, N, N, N),
+ PINGROUP(sdmmc3_cd_n_pv2, SDMMC3, OWR, RSVD3, RSVD4, SDMMC3, 0x33e8, N, N, N),
+ PINGROUP(gpio_w2_aud_pw2, SPI6, RSVD2, SPI2, I2C1, RSVD2, 0x33ec, N, N, N),
+ PINGROUP(gpio_w3_aud_pw3, SPI6, SPI1, SPI2, I2C1, SPI1, 0x33f0, N, N, N),
+ PINGROUP(usb_vbus_en0_pn4, USB, RSVD2, RSVD3, RSVD4, USB, 0x33f4, Y, N, N),
+ PINGROUP(usb_vbus_en1_pn5, USB, RSVD2, RSVD3, RSVD4, USB, 0x33f8, Y, N, N),
+ PINGROUP(sdmmc3_clk_lb_out_pee4, SDMMC3, RSVD2, RSVD3, RSVD4, SDMMC3, 0x33fc, N, N, N),
+ PINGROUP(sdmmc3_clk_lb_in_pee5, SDMMC3, RSVD2, RSVD3, RSVD4, SDMMC3, 0x3400, N, N, N),
+ PINGROUP(gmi_clk_lb, SDMMC2, RSVD2, GMI, RSVD4, SDMMC2, 0x3404, N, N, N),
+ PINGROUP(reset_out_n, RSVD1, RSVD2, RSVD3, RESET_OUT_N, RSVD1, 0x3408, N, N, N),
+ PINGROUP(kb_row16_pt0, KBC, RSVD2, RSVD3, UARTC, KBC, 0x340c, N, N, N),
+ PINGROUP(kb_row17_pt1, KBC, RSVD2, RSVD3, UARTC, KBC, 0x3410, N, N, N),
+ PINGROUP(usb_vbus_en2_pff1, USB, RSVD2, RSVD3, RSVD4, USB, 0x3414, Y, N, N),
+ PINGROUP(pff2, SATA, RSVD2, RSVD3, RSVD4, RSVD2, 0x3418, Y, N, N),
+ PINGROUP(dp_hpd_pff0, DP, RSVD2, RSVD3, RSVD4, DP, 0x3430, N, N, N),
+
+ /* pg_name, r, hsm_b, schmitt_b, lpmd_b, drvdn_b, drvdn_w, drvup_b, drvup_w, slwr_b, slwr_w, slwf_b, slwf_w, drvtype */
+ DRV_PINGROUP(ao1, 0x868, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(ao2, 0x86c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(at1, 0x870, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(at2, 0x874, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(at3, 0x878, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(at4, 0x87c, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(at5, 0x880, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(cdev1, 0x884, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(cdev2, 0x888, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dap1, 0x890, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dap2, 0x894, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dap3, 0x898, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dap4, 0x89c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dbg, 0x8a0, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(sdio3, 0x8b0, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, N),
+ DRV_PINGROUP(spi, 0x8b4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uaa, 0x8b8, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uab, 0x8bc, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uart2, 0x8c0, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uart3, 0x8c4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(sdio1, 0x8ec, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, N),
+ DRV_PINGROUP(ddc, 0x8fc, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gma, 0x900, 2, 3, 4, 14, 5, 20, 5, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(gme, 0x910, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gmf, 0x914, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gmg, 0x918, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gmh, 0x91c, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(owr, 0x920, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uda, 0x924, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gpv, 0x928, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dev3, 0x92c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(cec, 0x938, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(at6, 0x994, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(dap5, 0x998, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(usb_vbus_en, 0x99c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(ao3, 0x9a8, 2, 3, 4, 12, 5, -1, -1, 28, 2, -1, -1, N),
+ DRV_PINGROUP(ao0, 0x9b0, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(hv0, 0x9b4, 2, 3, 4, 12, 5, -1, -1, 28, 2, -1, -1, N),
+ DRV_PINGROUP(sdio4, 0x9c4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(ao4, 0x9c8, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+};
+
+static const struct tegra_pinctrl_soc_data tegra124_pinctrl = {
+ .ngpios = NUM_GPIOS,
+ .pins = tegra124_pins,
+ .npins = ARRAY_SIZE(tegra124_pins),
+ .functions = tegra124_functions,
+ .nfunctions = ARRAY_SIZE(tegra124_functions),
+ .groups = tegra124_groups,
+ .ngroups = ARRAY_SIZE(tegra124_groups),
+};
+
+static int tegra124_pinctrl_probe(struct platform_device *pdev)
+{
+ return tegra_pinctrl_probe(pdev, &tegra124_pinctrl);
+}
+
+static struct of_device_id tegra124_pinctrl_of_match[] = {
+ { .compatible = "nvidia,tegra124-pinmux", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra124_pinctrl_of_match);
+
+static struct platform_driver tegra124_pinctrl_driver = {
+ .driver = {
+ .name = "tegra124-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = tegra124_pinctrl_of_match,
+ },
+ .probe = tegra124_pinctrl_probe,
+ .remove = tegra_pinctrl_remove,
+};
+module_platform_driver(tegra124_pinctrl_driver);
+
+MODULE_AUTHOR("Ashwini Ghuge <aghuge@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra124 pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index ed2d1ba69cef..e66f4cae7633 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -332,10 +332,10 @@ static const struct ltq_pin_group xway_grps[] = {
GRP_MUX("mdio", MDIO, pins_mdio),
GRP_MUX("gphy0 led0", GPHY, pins_gphy0_led0),
GRP_MUX("gphy0 led1", GPHY, pins_gphy0_led1),
- GRP_MUX("gphy0 lde2", GPHY, pins_gphy0_led2),
+ GRP_MUX("gphy0 led2", GPHY, pins_gphy0_led2),
GRP_MUX("gphy1 led0", GPHY, pins_gphy1_led0),
GRP_MUX("gphy1 led1", GPHY, pins_gphy1_led1),
- GRP_MUX("gphy1 lde2", GPHY, pins_gphy1_led2),
+ GRP_MUX("gphy1 led2", GPHY, pins_gphy1_led2),
};
static const struct ltq_pin_group ase_grps[] = {
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index d77ece5217f0..b9b464d0578c 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -26,29 +26,67 @@
#include "core.h"
-static int sh_pfc_ioremap(struct sh_pfc *pfc, struct platform_device *pdev)
+static int sh_pfc_map_resources(struct sh_pfc *pfc,
+ struct platform_device *pdev)
{
+ unsigned int num_windows = 0;
+ unsigned int num_irqs = 0;
+ struct sh_pfc_window *windows;
+ unsigned int *irqs = NULL;
struct resource *res;
- int k;
+ unsigned int i;
+
+ /* Count the MEM and IRQ resources. */
+ for (i = 0; i < pdev->num_resources; ++i) {
+ switch (resource_type(&pdev->resource[i])) {
+ case IORESOURCE_MEM:
+ num_windows++;
+ break;
+
+ case IORESOURCE_IRQ:
+ num_irqs++;
+ break;
+ }
+ }
- if (pdev->num_resources == 0)
+ if (num_windows == 0)
return -EINVAL;
- pfc->window = devm_kzalloc(pfc->dev, pdev->num_resources *
- sizeof(*pfc->window), GFP_NOWAIT);
- if (!pfc->window)
+ /* Allocate memory windows and IRQs arrays. */
+ windows = devm_kzalloc(pfc->dev, num_windows * sizeof(*windows),
+ GFP_KERNEL);
+ if (windows == NULL)
return -ENOMEM;
- pfc->num_windows = pdev->num_resources;
+ pfc->num_windows = num_windows;
+ pfc->windows = windows;
- for (k = 0, res = pdev->resource; k < pdev->num_resources; k++, res++) {
- WARN_ON(resource_type(res) != IORESOURCE_MEM);
- pfc->window[k].phys = res->start;
- pfc->window[k].size = resource_size(res);
- pfc->window[k].virt = devm_ioremap_nocache(pfc->dev, res->start,
- resource_size(res));
- if (!pfc->window[k].virt)
+ if (num_irqs) {
+ irqs = devm_kzalloc(pfc->dev, num_irqs * sizeof(*irqs),
+ GFP_KERNEL);
+ if (irqs == NULL)
return -ENOMEM;
+
+ pfc->num_irqs = num_irqs;
+ pfc->irqs = irqs;
+ }
+
+ /* Fill them. */
+ for (i = 0, res = pdev->resource; i < pdev->num_resources; i++, res++) {
+ switch (resource_type(res)) {
+ case IORESOURCE_MEM:
+ windows->phys = res->start;
+ windows->size = resource_size(res);
+ windows->virt = devm_ioremap_resource(pfc->dev, res);
+ if (IS_ERR(windows->virt))
+ return -ENOMEM;
+ windows++;
+ break;
+
+ case IORESOURCE_IRQ:
+ *irqs++ = res->start;
+ break;
+ }
}
return 0;
@@ -62,7 +100,7 @@ static void __iomem *sh_pfc_phys_to_virt(struct sh_pfc *pfc,
/* scan through physical windows and convert address */
for (i = 0; i < pfc->num_windows; i++) {
- window = pfc->window + i;
+ window = pfc->windows + i;
if (address < window->phys)
continue;
@@ -147,7 +185,7 @@ static void sh_pfc_config_reg_helper(struct sh_pfc *pfc,
unsigned long *maskp,
unsigned long *posp)
{
- int k;
+ unsigned int k;
*mapped_regp = sh_pfc_phys_to_virt(pfc, crp->reg);
@@ -196,7 +234,7 @@ static int sh_pfc_get_config_reg(struct sh_pfc *pfc, u16 enum_id,
{
const struct pinmux_cfg_reg *config_reg;
unsigned long r_width, f_width, curr_width, ncomb;
- int k, m, n, pos, bit_pos;
+ unsigned int k, m, n, pos, bit_pos;
k = 0;
while (1) {
@@ -238,7 +276,7 @@ static int sh_pfc_mark_to_enum(struct sh_pfc *pfc, u16 mark, int pos,
u16 *enum_idp)
{
const u16 *data = pfc->info->gpio_data;
- int k;
+ unsigned int k;
if (pos) {
*enum_idp = data[pos + 1];
@@ -481,7 +519,7 @@ static int sh_pfc_probe(struct platform_device *pdev)
pfc->info = info;
pfc->dev = &pdev->dev;
- ret = sh_pfc_ioremap(pfc, pdev);
+ ret = sh_pfc_map_resources(pfc, pdev);
if (unlikely(ret < 0))
return ret;
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
index 11ea87268658..b7b0e6ccf305 100644
--- a/drivers/pinctrl/sh-pfc/core.h
+++ b/drivers/pinctrl/sh-pfc/core.h
@@ -37,7 +37,9 @@ struct sh_pfc {
spinlock_t lock;
unsigned int num_windows;
- struct sh_pfc_window *window;
+ struct sh_pfc_window *windows;
+ unsigned int num_irqs;
+ unsigned int *irqs;
struct sh_pfc_pin_range *ranges;
unsigned int nr_ranges;
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index 04bf52b64fb3..a9288ab01f7b 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -204,18 +204,24 @@ static void gpio_pin_set(struct gpio_chip *gc, unsigned offset, int value)
static int gpio_pin_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct sh_pfc *pfc = gpio_to_pfc(gc);
- int i, k;
+ unsigned int i, k;
for (i = 0; i < pfc->info->gpio_irq_size; i++) {
- unsigned short *gpios = pfc->info->gpio_irq[i].gpios;
+ const short *gpios = pfc->info->gpio_irq[i].gpios;
- for (k = 0; gpios[k]; k++) {
+ for (k = 0; gpios[k] >= 0; k++) {
if (gpios[k] == offset)
- return pfc->info->gpio_irq[i].irq;
+ goto found;
}
}
return -ENOSYS;
+
+found:
+ if (pfc->num_irqs)
+ return pfc->irqs[i];
+ else
+ return pfc->info->gpio_irq[i].irq;
}
static int gpio_pin_setup(struct sh_pfc_chip *chip)
@@ -347,7 +353,7 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
* GPIOs.
*/
for (i = 0; i < pfc->num_windows; ++i) {
- struct sh_pfc_window *window = &pfc->window[i];
+ struct sh_pfc_window *window = &pfc->windows[i];
if (pfc->info->data_regs[0].reg >= window->phys &&
pfc->info->data_regs[0].reg < window->phys + window->size)
@@ -357,8 +363,14 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
if (i == pfc->num_windows)
return 0;
+ /* If we have IRQ resources make sure their number is correct. */
+ if (pfc->num_irqs && pfc->num_irqs != pfc->info->gpio_irq_size) {
+ dev_err(pfc->dev, "invalid number of IRQ resources\n");
+ return -EINVAL;
+ }
+
/* Register the real GPIOs chip. */
- chip = sh_pfc_add_gpiochip(pfc, gpio_pin_setup, &pfc->window[i]);
+ chip = sh_pfc_add_gpiochip(pfc, gpio_pin_setup, &pfc->windows[i]);
if (IS_ERR(chip))
return PTR_ERR(chip);
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
index d25fd4ea0a1d..d39ca87353e4 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
@@ -1272,7 +1272,7 @@ static const u16 pinmux_data[] = {
#define R8A73A4_PIN_IO_PU_PD(pin) SH_PFC_PIN_CFG(pin, __IO | __PUD)
#define R8A73A4_PIN_O(pin) SH_PFC_PIN_CFG(pin, __O)
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
R8A73A4_PIN_IO_PU_PD(0), R8A73A4_PIN_IO_PU_PD(1),
R8A73A4_PIN_IO_PU_PD(2), R8A73A4_PIN_IO_PU_PD(3),
R8A73A4_PIN_IO_PU_PD(4), R8A73A4_PIN_IO_PU_PD(5),
@@ -2061,17 +2061,6 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(sdhi2),
};
-#undef PORTCR
-#define PORTCR(nr, reg) \
- { \
- PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
- _PCRH(PORT##nr##_IN, 0, 0, PORT##nr##_OUT), \
- PORT##nr##_FN0, PORT##nr##_FN1, \
- PORT##nr##_FN2, PORT##nr##_FN3, \
- PORT##nr##_FN4, PORT##nr##_FN5, \
- PORT##nr##_FN6, PORT##nr##_FN7 } \
- }
-
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PORTCR(0, 0xe6050000),
PORTCR(1, 0xe6050001),
@@ -2691,7 +2680,7 @@ static unsigned int r8a73a4_pinmux_get_bias(struct sh_pfc *pfc,
{
void __iomem *addr;
- addr = pfc->window->virt + r8a73a4_portcr_offsets[pin >> 5] + pin;
+ addr = pfc->windows->virt + r8a73a4_portcr_offsets[pin >> 5] + pin;
switch (ioread8(addr) & PORTCR_PULMD_MASK) {
case PORTCR_PULMD_UP:
@@ -2710,7 +2699,7 @@ static void r8a73a4_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
void __iomem *addr;
u32 value;
- addr = pfc->window->virt + r8a73a4_portcr_offsets[pin >> 5] + pin;
+ addr = pfc->windows->virt + r8a73a4_portcr_offsets[pin >> 5] + pin;
value = ioread8(addr) & ~PORTCR_PULMD_MASK;
switch (bias) {
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
index bc5eb453a45c..6c83ce43a940 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
@@ -1543,7 +1543,7 @@ static const u16 pinmux_data[] = {
#define R8A7740_PIN_O(pin) SH_PFC_PIN_CFG(pin, __O)
#define R8A7740_PIN_O_PU_PD(pin) SH_PFC_PIN_CFG(pin, __O | __PUD)
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
/* Table 56-1 (I/O and Pull U/D) */
R8A7740_PIN_IO_PD(0), R8A7740_PIN_IO_PD(1),
R8A7740_PIN_IO_PD(2), R8A7740_PIN_IO_PD(3),
@@ -3234,17 +3234,6 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(tpu0),
};
-#undef PORTCR
-#define PORTCR(nr, reg) \
- { \
- PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
- _PCRH(PORT##nr##_IN, 0, 0, PORT##nr##_OUT), \
- PORT##nr##_FN0, PORT##nr##_FN1, \
- PORT##nr##_FN2, PORT##nr##_FN3, \
- PORT##nr##_FN4, PORT##nr##_FN5, \
- PORT##nr##_FN6, PORT##nr##_FN7 } \
- }
-
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PORTCR(0, 0xe6050000), /* PORT0CR */
PORTCR(1, 0xe6050001), /* PORT1CR */
@@ -3721,7 +3710,7 @@ static void __iomem *r8a7740_pinmux_portcr(struct sh_pfc *pfc, unsigned int pin)
&r8a7740_portcr_offsets[i];
if (pin <= group->end_pin)
- return pfc->window->virt + group->offset + pin;
+ return pfc->windows->virt + group->offset + pin;
}
return NULL;
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index 8b1881c20598..c7d610d1f3ef 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -1260,7 +1260,7 @@ static const u16 pinmux_data[] = {
*/
#define PIN_NUMBER(row, col) (1000+((row)-1)*25+(col)-1)
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
/* Pins not associated with a GPIO port */
@@ -2104,7 +2104,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(vin1),
};
-static struct pinmux_cfg_reg pinmux_config_regs[] = {
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("GPSR0", 0xfffc0004, 32, 1) {
GP_0_31_FN, FN_IP1_14_11,
GP_0_30_FN, FN_IP1_10_8,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
index d3e94e307d7f..f5c01e1e2615 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
@@ -1410,7 +1410,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP12_17_15, SCK4_B, SEL_SCIF4_1),
};
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index 72786fc93958..c381ae63c508 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -1731,7 +1731,7 @@ static const u16 pinmux_data[] = {
#define PIN_NUMBER(r, c) (((r) - 'A') * 31 + (c) + 200)
#define PIN_A_NUMBER(r, c) PIN_NUMBER(ROW_GROUP_A(r), c)
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
/* Pins not associated with a GPIO port */
@@ -1739,6 +1739,56 @@ static struct sh_pfc_pin pinmux_pins[] = {
SH_PFC_PIN_NAMED(ROW_GROUP_A('H'), 15, AH15),
};
+/* - AUDIO CLOCK ------------------------------------------------------------ */
+static const unsigned int audio_clk_a_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(4, 25),
+};
+static const unsigned int audio_clk_a_mux[] = {
+ AUDIO_CLKA_MARK,
+};
+static const unsigned int audio_clk_b_pins[] = {
+ /* CLK B */
+ RCAR_GP_PIN(4, 26),
+};
+static const unsigned int audio_clk_b_mux[] = {
+ AUDIO_CLKB_MARK,
+};
+static const unsigned int audio_clk_c_pins[] = {
+ /* CLK C */
+ RCAR_GP_PIN(5, 27),
+};
+static const unsigned int audio_clk_c_mux[] = {
+ AUDIO_CLKC_MARK,
+};
+static const unsigned int audio_clkout_pins[] = {
+ /* CLK OUT */
+ RCAR_GP_PIN(5, 16),
+};
+static const unsigned int audio_clkout_mux[] = {
+ AUDIO_CLKOUT_MARK,
+};
+static const unsigned int audio_clkout_b_pins[] = {
+ /* CLK OUT B */
+ RCAR_GP_PIN(0, 23),
+};
+static const unsigned int audio_clkout_b_mux[] = {
+ AUDIO_CLKOUT_B_MARK,
+};
+static const unsigned int audio_clkout_c_pins[] = {
+ /* CLK OUT C */
+ RCAR_GP_PIN(5, 27),
+};
+static const unsigned int audio_clkout_c_mux[] = {
+ AUDIO_CLKOUT_C_MARK,
+};
+static const unsigned int audio_clkout_d_pins[] = {
+ /* CLK OUT D */
+ RCAR_GP_PIN(5, 20),
+};
+static const unsigned int audio_clkout_d_mux[] = {
+ AUDIO_CLKOUT_D_MARK,
+};
/* - DU RGB ----------------------------------------------------------------- */
static const unsigned int du_rgb666_pins[] = {
/* R[7:2], G[7:2], B[7:2] */
@@ -2961,6 +3011,189 @@ static const unsigned int sdhi3_wp_pins[] = {
static const unsigned int sdhi3_wp_mux[] = {
SD3_WP_MARK,
};
+/* - SSI -------------------------------------------------------------------- */
+static const unsigned int ssi0_data_pins[] = {
+ /* SDATA0 */
+ RCAR_GP_PIN(4, 5),
+};
+static const unsigned int ssi0_data_mux[] = {
+ SSI_SDATA0_MARK,
+};
+static const unsigned int ssi0129_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(4, 3), RCAR_GP_PIN(4, 4),
+};
+static const unsigned int ssi0129_ctrl_mux[] = {
+ SSI_SCK0129_MARK, SSI_WS0129_MARK,
+};
+static const unsigned int ssi1_data_pins[] = {
+ /* SDATA1 */
+ RCAR_GP_PIN(4, 6),
+};
+static const unsigned int ssi1_data_mux[] = {
+ SSI_SDATA1_MARK,
+};
+static const unsigned int ssi1_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 24),
+};
+static const unsigned int ssi1_ctrl_mux[] = {
+ SSI_SCK1_MARK, SSI_WS1_MARK,
+};
+static const unsigned int ssi2_data_pins[] = {
+ /* SDATA2 */
+ RCAR_GP_PIN(4, 7),
+};
+static const unsigned int ssi2_data_mux[] = {
+ SSI_SDATA2_MARK,
+};
+static const unsigned int ssi2_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 17),
+};
+static const unsigned int ssi2_ctrl_mux[] = {
+ SSI_SCK2_MARK, SSI_WS2_MARK,
+};
+static const unsigned int ssi3_data_pins[] = {
+ /* SDATA3 */
+ RCAR_GP_PIN(4, 10),
+};
+static const unsigned int ssi3_data_mux[] = {
+ SSI_SDATA3_MARK
+};
+static const unsigned int ssi34_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(4, 8), RCAR_GP_PIN(4, 9),
+};
+static const unsigned int ssi34_ctrl_mux[] = {
+ SSI_SCK34_MARK, SSI_WS34_MARK,
+};
+static const unsigned int ssi4_data_pins[] = {
+ /* SDATA4 */
+ RCAR_GP_PIN(4, 13),
+};
+static const unsigned int ssi4_data_mux[] = {
+ SSI_SDATA4_MARK,
+};
+static const unsigned int ssi4_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+};
+static const unsigned int ssi4_ctrl_mux[] = {
+ SSI_SCK4_MARK, SSI_WS4_MARK,
+};
+static const unsigned int ssi5_pins[] = {
+ /* SDATA5, SCK, WS */
+ RCAR_GP_PIN(4, 16), RCAR_GP_PIN(4, 14), RCAR_GP_PIN(4, 15),
+};
+static const unsigned int ssi5_mux[] = {
+ SSI_SDATA5_MARK, SSI_SCK5_MARK, SSI_WS5_MARK,
+};
+static const unsigned int ssi5_b_pins[] = {
+ /* SDATA5, SCK, WS */
+ RCAR_GP_PIN(0, 26), RCAR_GP_PIN(0, 24), RCAR_GP_PIN(0, 25),
+};
+static const unsigned int ssi5_b_mux[] = {
+ SSI_SDATA5_B_MARK, SSI_SCK5_B_MARK, SSI_WS5_B_MARK
+};
+static const unsigned int ssi5_c_pins[] = {
+ /* SDATA5, SCK, WS */
+ RCAR_GP_PIN(4, 24), RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+};
+static const unsigned int ssi5_c_mux[] = {
+ SSI_SDATA5_C_MARK, SSI_SCK5_C_MARK, SSI_WS5_C_MARK,
+};
+static const unsigned int ssi6_pins[] = {
+ /* SDATA6, SCK, WS */
+ RCAR_GP_PIN(4, 19), RCAR_GP_PIN(4, 17), RCAR_GP_PIN(4, 18),
+};
+static const unsigned int ssi6_mux[] = {
+ SSI_SDATA6_MARK, SSI_SCK6_MARK, SSI_WS6_MARK,
+};
+static const unsigned int ssi6_b_pins[] = {
+ /* SDATA6, SCK, WS */
+ RCAR_GP_PIN(1, 29), RCAR_GP_PIN(1, 25), RCAR_GP_PIN(1, 27),
+};
+static const unsigned int ssi6_b_mux[] = {
+ SSI_SDATA6_B_MARK, SSI_SCK6_B_MARK, SSI_WS6_B_MARK,
+};
+static const unsigned int ssi7_data_pins[] = {
+ /* SDATA7 */
+ RCAR_GP_PIN(4, 22),
+};
+static const unsigned int ssi7_data_mux[] = {
+ SSI_SDATA7_MARK,
+};
+static const unsigned int ssi7_b_data_pins[] = {
+ /* SDATA7 */
+ RCAR_GP_PIN(4, 22),
+};
+static const unsigned int ssi7_b_data_mux[] = {
+ SSI_SDATA7_B_MARK,
+};
+static const unsigned int ssi7_c_data_pins[] = {
+ /* SDATA7 */
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int ssi7_c_data_mux[] = {
+ SSI_SDATA7_C_MARK,
+};
+static const unsigned int ssi78_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(4, 20), RCAR_GP_PIN(4, 21),
+};
+static const unsigned int ssi78_ctrl_mux[] = {
+ SSI_SCK78_MARK, SSI_WS78_MARK,
+};
+static const unsigned int ssi78_b_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 24),
+};
+static const unsigned int ssi78_b_ctrl_mux[] = {
+ SSI_SCK78_B_MARK, SSI_WS78_B_MARK,
+};
+static const unsigned int ssi78_c_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(1, 24), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int ssi78_c_ctrl_mux[] = {
+ SSI_SCK78_C_MARK, SSI_WS78_C_MARK,
+};
+static const unsigned int ssi8_data_pins[] = {
+ /* SDATA8 */
+ RCAR_GP_PIN(4, 23),
+};
+static const unsigned int ssi8_data_mux[] = {
+ SSI_SDATA8_MARK,
+};
+static const unsigned int ssi8_b_data_pins[] = {
+ /* SDATA8 */
+ RCAR_GP_PIN(4, 23),
+};
+static const unsigned int ssi8_b_data_mux[] = {
+ SSI_SDATA8_B_MARK,
+};
+static const unsigned int ssi8_c_data_pins[] = {
+ /* SDATA8 */
+ RCAR_GP_PIN(1, 27),
+};
+static const unsigned int ssi8_c_data_mux[] = {
+ SSI_SDATA8_C_MARK,
+};
+static const unsigned int ssi9_data_pins[] = {
+ /* SDATA9 */
+ RCAR_GP_PIN(4, 24),
+};
+static const unsigned int ssi9_data_mux[] = {
+ SSI_SDATA9_MARK,
+};
+static const unsigned int ssi9_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(5, 10), RCAR_GP_PIN(5, 11),
+};
+static const unsigned int ssi9_ctrl_mux[] = {
+ SSI_SCK9_MARK, SSI_WS9_MARK,
+};
/* - TPU0 ------------------------------------------------------------------- */
static const unsigned int tpu0_to0_pins[] = {
/* TO */
@@ -3014,59 +3247,110 @@ static const unsigned int usb2_pins[] = {
static const unsigned int usb2_mux[] = {
USB2_PWEN_MARK, USB2_OVC_MARK,
};
-/* - VIN0 ------------------------------------------------------------------- */
-static const unsigned int vin0_data_g_pins[] = {
- RCAR_GP_PIN(0, 8), RCAR_GP_PIN(0, 9), RCAR_GP_PIN(0, 10),
- RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
- RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
-};
-static const unsigned int vin0_data_g_mux[] = {
- VI0_G0_MARK, VI0_G1_MARK, VI0_G2_MARK,
- VI0_G3_MARK, VI0_G4_MARK, VI0_G5_MARK,
- VI0_G6_MARK, VI0_G7_MARK,
+
+union vin_data {
+ unsigned int data24[24];
+ unsigned int data20[20];
+ unsigned int data16[16];
+ unsigned int data12[12];
+ unsigned int data10[10];
+ unsigned int data8[8];
+ unsigned int data4[4];
};
-static const unsigned int vin0_data_r_pins[] = {
- RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5), RCAR_GP_PIN(0, 6),
- RCAR_GP_PIN(0, 7), RCAR_GP_PIN(0, 24), RCAR_GP_PIN(0, 25),
- RCAR_GP_PIN(0, 26), RCAR_GP_PIN(1, 11),
+
+#define VIN_DATA_PIN_GROUP(n, s) \
+ { \
+ .name = #n#s, \
+ .pins = n##_pins.data##s, \
+ .mux = n##_mux.data##s, \
+ .nr_pins = ARRAY_SIZE(n##_pins.data##s), \
+ }
+
+/* - VIN0 ------------------------------------------------------------------- */
+static const union vin_data vin0_data_pins = {
+ .data24 = {
+ /* B */
+ RCAR_GP_PIN(2, 1), RCAR_GP_PIN(2, 2),
+ RCAR_GP_PIN(2, 3), RCAR_GP_PIN(2, 4),
+ RCAR_GP_PIN(2, 5), RCAR_GP_PIN(2, 6),
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+ /* G */
+ RCAR_GP_PIN(0, 8), RCAR_GP_PIN(0, 9),
+ RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
+ RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
+ RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+ /* R */
+ RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
+ RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
+ RCAR_GP_PIN(0, 24), RCAR_GP_PIN(0, 25),
+ RCAR_GP_PIN(0, 26), RCAR_GP_PIN(1, 11),
+ },
};
-static const unsigned int vin0_data_r_mux[] = {
- VI0_R0_MARK, VI0_R1_MARK, VI0_R2_MARK,
- VI0_R3_MARK, VI0_R4_MARK, VI0_R5_MARK,
- VI0_R6_MARK, VI0_R7_MARK,
+static const union vin_data vin0_data_mux = {
+ .data24 = {
+ /* B */
+ VI0_DATA0_VI0_B0_MARK, VI0_DATA1_VI0_B1_MARK,
+ VI0_DATA2_VI0_B2_MARK, VI0_DATA3_VI0_B3_MARK,
+ VI0_DATA4_VI0_B4_MARK, VI0_DATA5_VI0_B5_MARK,
+ VI0_DATA6_VI0_B6_MARK, VI0_DATA7_VI0_B7_MARK,
+ /* G */
+ VI0_G0_MARK, VI0_G1_MARK,
+ VI0_G2_MARK, VI0_G3_MARK,
+ VI0_G4_MARK, VI0_G5_MARK,
+ VI0_G6_MARK, VI0_G7_MARK,
+ /* R */
+ VI0_R0_MARK, VI0_R1_MARK,
+ VI0_R2_MARK, VI0_R3_MARK,
+ VI0_R4_MARK, VI0_R5_MARK,
+ VI0_R6_MARK, VI0_R7_MARK,
+ },
};
-static const unsigned int vin0_data_b_pins[] = {
- RCAR_GP_PIN(2, 1), RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
- RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5), RCAR_GP_PIN(2, 6),
+static const unsigned int vin0_data18_pins[] = {
+ /* B */
+ RCAR_GP_PIN(2, 3), RCAR_GP_PIN(2, 4),
+ RCAR_GP_PIN(2, 5), RCAR_GP_PIN(2, 6),
RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+ /* G */
+ RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
+ RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
+ RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+ /* R */
+ RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
+ RCAR_GP_PIN(0, 24), RCAR_GP_PIN(0, 25),
+ RCAR_GP_PIN(0, 26), RCAR_GP_PIN(1, 11),
};
-static const unsigned int vin0_data_b_mux[] = {
- VI0_DATA0_VI0_B0_MARK, VI0_DATA1_VI0_B1_MARK, VI0_DATA2_VI0_B2_MARK,
- VI0_DATA3_VI0_B3_MARK, VI0_DATA4_VI0_B4_MARK, VI0_DATA5_VI0_B5_MARK,
+static const unsigned int vin0_data18_mux[] = {
+ /* B */
+ VI0_DATA2_VI0_B2_MARK, VI0_DATA3_VI0_B3_MARK,
+ VI0_DATA4_VI0_B4_MARK, VI0_DATA5_VI0_B5_MARK,
VI0_DATA6_VI0_B6_MARK, VI0_DATA7_VI0_B7_MARK,
+ /* G */
+ VI0_G2_MARK, VI0_G3_MARK,
+ VI0_G4_MARK, VI0_G5_MARK,
+ VI0_G6_MARK, VI0_G7_MARK,
+ /* R */
+ VI0_R2_MARK, VI0_R3_MARK,
+ VI0_R4_MARK, VI0_R5_MARK,
+ VI0_R6_MARK, VI0_R7_MARK,
};
-static const unsigned int vin0_hsync_signal_pins[] = {
- RCAR_GP_PIN(0, 12),
+static const unsigned int vin0_sync_pins[] = {
+ RCAR_GP_PIN(0, 12), /* HSYNC */
+ RCAR_GP_PIN(0, 13), /* VSYNC */
};
-static const unsigned int vin0_hsync_signal_mux[] = {
+static const unsigned int vin0_sync_mux[] = {
VI0_HSYNC_N_MARK,
-};
-static const unsigned int vin0_vsync_signal_pins[] = {
- RCAR_GP_PIN(0, 13),
-};
-static const unsigned int vin0_vsync_signal_mux[] = {
VI0_VSYNC_N_MARK,
};
-static const unsigned int vin0_field_signal_pins[] = {
+static const unsigned int vin0_field_pins[] = {
RCAR_GP_PIN(0, 15),
};
-static const unsigned int vin0_field_signal_mux[] = {
+static const unsigned int vin0_field_mux[] = {
VI0_FIELD_MARK,
};
-static const unsigned int vin0_data_enable_pins[] = {
+static const unsigned int vin0_clkenb_pins[] = {
RCAR_GP_PIN(0, 14),
};
-static const unsigned int vin0_data_enable_mux[] = {
+static const unsigned int vin0_clkenb_mux[] = {
VI0_CLKENB_MARK,
};
static const unsigned int vin0_clk_pins[] = {
@@ -3076,15 +3360,91 @@ static const unsigned int vin0_clk_mux[] = {
VI0_CLK_MARK,
};
/* - VIN1 ------------------------------------------------------------------- */
-static const unsigned int vin1_data_pins[] = {
- RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 12),
- RCAR_GP_PIN(2, 13), RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 15),
- RCAR_GP_PIN(2, 16), RCAR_GP_PIN(2, 17),
+static const union vin_data vin1_data_pins = {
+ .data24 = {
+ /* B */
+ RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11),
+ RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 13),
+ RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 15),
+ RCAR_GP_PIN(2, 16), RCAR_GP_PIN(2, 17),
+ /* G */
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
+ RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 20),
+ RCAR_GP_PIN(1, 22), RCAR_GP_PIN(1, 12),
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 7),
+ /* R */
+ RCAR_GP_PIN(0, 27), RCAR_GP_PIN(0, 28),
+ RCAR_GP_PIN(0, 29), RCAR_GP_PIN(1, 4),
+ RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 6),
+ RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 8),
+ },
};
-static const unsigned int vin1_data_mux[] = {
- VI1_DATA0_VI1_B0_MARK, VI1_DATA1_VI1_B1_MARK, VI1_DATA2_VI1_B2_MARK,
- VI1_DATA3_VI1_B3_MARK, VI1_DATA4_VI1_B4_MARK, VI1_DATA5_VI1_B5_MARK,
+static const union vin_data vin1_data_mux = {
+ .data24 = {
+ /* B */
+ VI1_DATA0_VI1_B0_MARK, VI1_DATA1_VI1_B1_MARK,
+ VI1_DATA2_VI1_B2_MARK, VI1_DATA3_VI1_B3_MARK,
+ VI1_DATA4_VI1_B4_MARK, VI1_DATA5_VI1_B5_MARK,
+ VI1_DATA6_VI1_B6_MARK, VI1_DATA7_VI1_B7_MARK,
+ /* G */
+ VI1_G0_MARK, VI1_G1_MARK,
+ VI1_G2_MARK, VI1_G3_MARK,
+ VI1_G4_MARK, VI1_G5_MARK,
+ VI1_G6_MARK, VI1_G7_MARK,
+ /* R */
+ VI1_R0_MARK, VI1_R1_MARK,
+ VI1_R2_MARK, VI1_R3_MARK,
+ VI1_R4_MARK, VI1_R5_MARK,
+ VI1_R6_MARK, VI1_R7_MARK,
+ },
+};
+static const unsigned int vin1_data18_pins[] = {
+ /* B */
+ RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 13),
+ RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 15),
+ RCAR_GP_PIN(2, 16), RCAR_GP_PIN(2, 17),
+ /* G */
+ RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 20),
+ RCAR_GP_PIN(1, 22), RCAR_GP_PIN(1, 12),
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 7),
+ /* R */
+ RCAR_GP_PIN(0, 29), RCAR_GP_PIN(1, 4),
+ RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 6),
+ RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 8),
+};
+static const unsigned int vin1_data18_mux[] = {
+ /* B */
+ VI1_DATA2_VI1_B2_MARK, VI1_DATA3_VI1_B3_MARK,
+ VI1_DATA4_VI1_B4_MARK, VI1_DATA5_VI1_B5_MARK,
VI1_DATA6_VI1_B6_MARK, VI1_DATA7_VI1_B7_MARK,
+ /* G */
+ VI1_G2_MARK, VI1_G3_MARK,
+ VI1_G4_MARK, VI1_G5_MARK,
+ VI1_G6_MARK, VI1_G7_MARK,
+ /* R */
+ VI1_R2_MARK, VI1_R3_MARK,
+ VI1_R4_MARK, VI1_R5_MARK,
+ VI1_R6_MARK, VI1_R7_MARK,
+};
+static const unsigned int vin1_sync_pins[] = {
+ RCAR_GP_PIN(1, 24), /* HSYNC */
+ RCAR_GP_PIN(1, 25), /* VSYNC */
+};
+static const unsigned int vin1_sync_mux[] = {
+ VI1_HSYNC_N_MARK,
+ VI1_VSYNC_N_MARK,
+};
+static const unsigned int vin1_field_pins[] = {
+ RCAR_GP_PIN(1, 13),
+};
+static const unsigned int vin1_field_mux[] = {
+ VI1_FIELD_MARK,
+};
+static const unsigned int vin1_clkenb_pins[] = {
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int vin1_clkenb_mux[] = {
+ VI1_CLKENB_MARK,
};
static const unsigned int vin1_clk_pins[] = {
RCAR_GP_PIN(2, 9),
@@ -3092,8 +3452,147 @@ static const unsigned int vin1_clk_pins[] = {
static const unsigned int vin1_clk_mux[] = {
VI1_CLK_MARK,
};
+/* - VIN2 ----------------------------------------------------------------- */
+static const union vin_data vin2_data_pins = {
+ .data24 = {
+ /* B */
+ RCAR_GP_PIN(0, 8), RCAR_GP_PIN(0, 9),
+ RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
+ /* G */
+ RCAR_GP_PIN(0, 27), RCAR_GP_PIN(0, 28),
+ RCAR_GP_PIN(0, 29), RCAR_GP_PIN(1, 10),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+ /* R */
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
+ RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 20),
+ RCAR_GP_PIN(1, 22), RCAR_GP_PIN(1, 24),
+ },
+};
+static const union vin_data vin2_data_mux = {
+ .data24 = {
+ /* B */
+ VI2_DATA0_VI2_B0_MARK, VI2_DATA1_VI2_B1_MARK,
+ VI2_DATA2_VI2_B2_MARK, VI2_DATA3_VI2_B3_MARK,
+ VI2_DATA4_VI2_B4_MARK, VI2_DATA5_VI2_B5_MARK,
+ VI2_DATA6_VI2_B6_MARK, VI2_DATA7_VI2_B7_MARK,
+ /* G */
+ VI2_G0_MARK, VI2_G1_MARK,
+ VI2_G2_MARK, VI2_G3_MARK,
+ VI2_G4_MARK, VI2_G5_MARK,
+ VI2_G6_MARK, VI2_G7_MARK,
+ /* R */
+ VI2_R0_MARK, VI2_R1_MARK,
+ VI2_R2_MARK, VI2_R3_MARK,
+ VI2_R4_MARK, VI2_R5_MARK,
+ VI2_R6_MARK, VI2_R7_MARK,
+ },
+};
+static const unsigned int vin2_data18_pins[] = {
+ /* B */
+ RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
+ /* G */
+ RCAR_GP_PIN(0, 29), RCAR_GP_PIN(1, 10),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+ /* R */
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
+ RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 20),
+ RCAR_GP_PIN(1, 22), RCAR_GP_PIN(1, 24),
+};
+static const unsigned int vin2_data18_mux[] = {
+ /* B */
+ VI2_DATA2_VI2_B2_MARK, VI2_DATA3_VI2_B3_MARK,
+ VI2_DATA4_VI2_B4_MARK, VI2_DATA5_VI2_B5_MARK,
+ VI2_DATA6_VI2_B6_MARK, VI2_DATA7_VI2_B7_MARK,
+ /* G */
+ VI2_G2_MARK, VI2_G3_MARK,
+ VI2_G4_MARK, VI2_G5_MARK,
+ VI2_G6_MARK, VI2_G7_MARK,
+ /* R */
+ VI2_R2_MARK, VI2_R3_MARK,
+ VI2_R4_MARK, VI2_R5_MARK,
+ VI2_R6_MARK, VI2_R7_MARK,
+};
+static const unsigned int vin2_sync_pins[] = {
+ RCAR_GP_PIN(1, 16), /* HSYNC */
+ RCAR_GP_PIN(1, 21), /* VSYNC */
+};
+static const unsigned int vin2_sync_mux[] = {
+ VI2_HSYNC_N_MARK,
+ VI2_VSYNC_N_MARK,
+};
+static const unsigned int vin2_field_pins[] = {
+ RCAR_GP_PIN(1, 9),
+};
+static const unsigned int vin2_field_mux[] = {
+ VI2_FIELD_MARK,
+};
+static const unsigned int vin2_clkenb_pins[] = {
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int vin2_clkenb_mux[] = {
+ VI2_CLKENB_MARK,
+};
+static const unsigned int vin2_clk_pins[] = {
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int vin2_clk_mux[] = {
+ VI2_CLK_MARK,
+};
+/* - VIN3 ----------------------------------------------------------------- */
+static const unsigned int vin3_data8_pins[] = {
+ RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
+ RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+ RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
+ RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
+};
+static const unsigned int vin3_data8_mux[] = {
+ VI3_DATA0_MARK, VI3_DATA1_MARK,
+ VI3_DATA2_MARK, VI3_DATA3_MARK,
+ VI3_DATA4_MARK, VI3_DATA5_MARK,
+ VI3_DATA6_MARK, VI3_DATA7_MARK,
+};
+static const unsigned int vin3_sync_pins[] = {
+ RCAR_GP_PIN(1, 16), /* HSYNC */
+ RCAR_GP_PIN(1, 17), /* VSYNC */
+};
+static const unsigned int vin3_sync_mux[] = {
+ VI3_HSYNC_N_MARK,
+ VI3_VSYNC_N_MARK,
+};
+static const unsigned int vin3_field_pins[] = {
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int vin3_field_mux[] = {
+ VI3_FIELD_MARK,
+};
+static const unsigned int vin3_clkenb_pins[] = {
+ RCAR_GP_PIN(1, 14),
+};
+static const unsigned int vin3_clkenb_mux[] = {
+ VI3_CLKENB_MARK,
+};
+static const unsigned int vin3_clk_pins[] = {
+ RCAR_GP_PIN(1, 23),
+};
+static const unsigned int vin3_clk_mux[] = {
+ VI3_CLK_MARK,
+};
static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(audio_clk_a),
+ SH_PFC_PIN_GROUP(audio_clk_b),
+ SH_PFC_PIN_GROUP(audio_clk_c),
+ SH_PFC_PIN_GROUP(audio_clkout),
+ SH_PFC_PIN_GROUP(audio_clkout_b),
+ SH_PFC_PIN_GROUP(audio_clkout_c),
+ SH_PFC_PIN_GROUP(audio_clkout_d),
SH_PFC_PIN_GROUP(du_rgb666),
SH_PFC_PIN_GROUP(du_rgb888),
SH_PFC_PIN_GROUP(du_clk_out_0),
@@ -3259,6 +3758,32 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(sdhi3_ctrl),
SH_PFC_PIN_GROUP(sdhi3_cd),
SH_PFC_PIN_GROUP(sdhi3_wp),
+ SH_PFC_PIN_GROUP(ssi0_data),
+ SH_PFC_PIN_GROUP(ssi0129_ctrl),
+ SH_PFC_PIN_GROUP(ssi1_data),
+ SH_PFC_PIN_GROUP(ssi1_ctrl),
+ SH_PFC_PIN_GROUP(ssi2_data),
+ SH_PFC_PIN_GROUP(ssi2_ctrl),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi34_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_data),
+ SH_PFC_PIN_GROUP(ssi4_ctrl),
+ SH_PFC_PIN_GROUP(ssi5),
+ SH_PFC_PIN_GROUP(ssi5_b),
+ SH_PFC_PIN_GROUP(ssi5_c),
+ SH_PFC_PIN_GROUP(ssi6),
+ SH_PFC_PIN_GROUP(ssi6_b),
+ SH_PFC_PIN_GROUP(ssi7_data),
+ SH_PFC_PIN_GROUP(ssi7_b_data),
+ SH_PFC_PIN_GROUP(ssi7_c_data),
+ SH_PFC_PIN_GROUP(ssi78_ctrl),
+ SH_PFC_PIN_GROUP(ssi78_b_ctrl),
+ SH_PFC_PIN_GROUP(ssi78_c_ctrl),
+ SH_PFC_PIN_GROUP(ssi8_data),
+ SH_PFC_PIN_GROUP(ssi8_b_data),
+ SH_PFC_PIN_GROUP(ssi8_c_data),
+ SH_PFC_PIN_GROUP(ssi9_data),
+ SH_PFC_PIN_GROUP(ssi9_ctrl),
SH_PFC_PIN_GROUP(tpu0_to0),
SH_PFC_PIN_GROUP(tpu0_to1),
SH_PFC_PIN_GROUP(tpu0_to2),
@@ -3266,16 +3791,54 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(usb0),
SH_PFC_PIN_GROUP(usb1),
SH_PFC_PIN_GROUP(usb2),
- SH_PFC_PIN_GROUP(vin0_data_g),
- SH_PFC_PIN_GROUP(vin0_data_r),
- SH_PFC_PIN_GROUP(vin0_data_b),
- SH_PFC_PIN_GROUP(vin0_hsync_signal),
- SH_PFC_PIN_GROUP(vin0_vsync_signal),
- SH_PFC_PIN_GROUP(vin0_field_signal),
- SH_PFC_PIN_GROUP(vin0_data_enable),
+ VIN_DATA_PIN_GROUP(vin0_data, 24),
+ VIN_DATA_PIN_GROUP(vin0_data, 20),
+ SH_PFC_PIN_GROUP(vin0_data18),
+ VIN_DATA_PIN_GROUP(vin0_data, 16),
+ VIN_DATA_PIN_GROUP(vin0_data, 12),
+ VIN_DATA_PIN_GROUP(vin0_data, 10),
+ VIN_DATA_PIN_GROUP(vin0_data, 8),
+ VIN_DATA_PIN_GROUP(vin0_data, 4),
+ SH_PFC_PIN_GROUP(vin0_sync),
+ SH_PFC_PIN_GROUP(vin0_field),
+ SH_PFC_PIN_GROUP(vin0_clkenb),
SH_PFC_PIN_GROUP(vin0_clk),
- SH_PFC_PIN_GROUP(vin1_data),
+ VIN_DATA_PIN_GROUP(vin1_data, 24),
+ VIN_DATA_PIN_GROUP(vin1_data, 20),
+ SH_PFC_PIN_GROUP(vin1_data18),
+ VIN_DATA_PIN_GROUP(vin1_data, 16),
+ VIN_DATA_PIN_GROUP(vin1_data, 12),
+ VIN_DATA_PIN_GROUP(vin1_data, 10),
+ VIN_DATA_PIN_GROUP(vin1_data, 8),
+ VIN_DATA_PIN_GROUP(vin1_data, 4),
+ SH_PFC_PIN_GROUP(vin1_sync),
+ SH_PFC_PIN_GROUP(vin1_field),
+ SH_PFC_PIN_GROUP(vin1_clkenb),
SH_PFC_PIN_GROUP(vin1_clk),
+ VIN_DATA_PIN_GROUP(vin2_data, 24),
+ SH_PFC_PIN_GROUP(vin2_data18),
+ VIN_DATA_PIN_GROUP(vin2_data, 16),
+ VIN_DATA_PIN_GROUP(vin2_data, 8),
+ VIN_DATA_PIN_GROUP(vin2_data, 4),
+ SH_PFC_PIN_GROUP(vin2_sync),
+ SH_PFC_PIN_GROUP(vin2_field),
+ SH_PFC_PIN_GROUP(vin2_clkenb),
+ SH_PFC_PIN_GROUP(vin2_clk),
+ SH_PFC_PIN_GROUP(vin3_data8),
+ SH_PFC_PIN_GROUP(vin3_sync),
+ SH_PFC_PIN_GROUP(vin3_field),
+ SH_PFC_PIN_GROUP(vin3_clkenb),
+ SH_PFC_PIN_GROUP(vin3_clk),
+};
+
+static const char * const audio_clk_groups[] = {
+ "audio_clk_a",
+ "audio_clk_b",
+ "audio_clk_c",
+ "audio_clkout",
+ "audio_clkout_b",
+ "audio_clkout_c",
+ "audio_clkout_d",
};
static const char * const du_groups[] = {
@@ -3533,6 +4096,35 @@ static const char * const sdhi3_groups[] = {
"sdhi3_wp",
};
+static const char * const ssi_groups[] = {
+ "ssi0_data",
+ "ssi0129_ctrl",
+ "ssi1_data",
+ "ssi1_ctrl",
+ "ssi2_data",
+ "ssi2_ctrl",
+ "ssi3_data",
+ "ssi34_ctrl",
+ "ssi4_data",
+ "ssi4_ctrl",
+ "ssi5",
+ "ssi5_b",
+ "ssi5_c",
+ "ssi6",
+ "ssi6_b",
+ "ssi7_data",
+ "ssi7_b_data",
+ "ssi7_c_data",
+ "ssi78_ctrl",
+ "ssi78_b_ctrl",
+ "ssi78_c_ctrl",
+ "ssi8_data",
+ "ssi8_b_data",
+ "ssi8_c_data",
+ "ssi9_data",
+ "ssi9_ctrl",
+};
+
static const char * const tpu0_groups[] = {
"tpu0_to0",
"tpu0_to1",
@@ -3553,22 +4145,57 @@ static const char * const usb2_groups[] = {
};
static const char * const vin0_groups[] = {
- "vin0_data_g",
- "vin0_data_r",
- "vin0_data_b",
- "vin0_hsync_signal",
- "vin0_vsync_signal",
- "vin0_field_signal",
- "vin0_data_enable",
+ "vin0_data24",
+ "vin0_data20",
+ "vin0_data18",
+ "vin0_data16",
+ "vin0_data12",
+ "vin0_data10",
+ "vin0_data8",
+ "vin0_data4",
+ "vin0_sync",
+ "vin0_field",
+ "vin0_clkenb",
"vin0_clk",
};
static const char * const vin1_groups[] = {
- "vin1_data",
+ "vin1_data24",
+ "vin1_data20",
+ "vin1_data18",
+ "vin1_data16",
+ "vin1_data12",
+ "vin1_data10",
+ "vin1_data8",
+ "vin1_data4",
+ "vin1_sync",
+ "vin1_field",
+ "vin1_clkenb",
"vin1_clk",
};
+static const char * const vin2_groups[] = {
+ "vin2_data24",
+ "vin2_data18",
+ "vin2_data16",
+ "vin2_data8",
+ "vin2_data4",
+ "vin2_sync",
+ "vin2_field",
+ "vin2_clkenb",
+ "vin2_clk",
+};
+
+static const char * const vin3_groups[] = {
+ "vin3_data8",
+ "vin3_sync",
+ "vin3_field",
+ "vin3_clkenb",
+ "vin3_clk",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(audio_clk),
SH_PFC_FUNCTION(du),
SH_PFC_FUNCTION(du0),
SH_PFC_FUNCTION(du1),
@@ -3599,15 +4226,18 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(sdhi1),
SH_PFC_FUNCTION(sdhi2),
SH_PFC_FUNCTION(sdhi3),
+ SH_PFC_FUNCTION(ssi),
SH_PFC_FUNCTION(tpu0),
SH_PFC_FUNCTION(usb0),
SH_PFC_FUNCTION(usb1),
SH_PFC_FUNCTION(usb2),
SH_PFC_FUNCTION(vin0),
SH_PFC_FUNCTION(vin1),
+ SH_PFC_FUNCTION(vin2),
+ SH_PFC_FUNCTION(vin3),
};
-static struct pinmux_cfg_reg pinmux_config_regs[] = {
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1) {
GP_0_31_FN, FN_IP3_17_15,
GP_0_30_FN, FN_IP3_14_12,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index bf76a654c02f..77d103fe39d9 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -1674,7 +1674,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP16_11_10, CAN1_RX_B, SEL_CAN1_1),
};
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
};
@@ -1730,11 +1730,11 @@ static const unsigned int du_clk_out_1_pins[] = {
static const unsigned int du_clk_out_1_mux[] = {
DU1_DOTCLKOUT1_MARK
};
-static const unsigned int du_sync_1_pins[] = {
+static const unsigned int du_sync_pins[] = {
/* EXVSYNC/VSYNC, EXHSYNC/HSYNC, EXDISP/EXODDF/EXCDE */
RCAR_GP_PIN(3, 29), RCAR_GP_PIN(3, 28), RCAR_GP_PIN(3, 27),
};
-static const unsigned int du_sync_1_mux[] = {
+static const unsigned int du_sync_mux[] = {
DU1_EXODDF_DU1_ODDF_DISP_CDE_MARK,
DU1_EXVSYNC_DU1_VSYNC_MARK, DU1_EXHSYNC_DU1_HSYNC_MARK
};
@@ -1742,6 +1742,9 @@ static const unsigned int du_cde_disp_pins[] = {
/* CDE DISP */
RCAR_GP_PIN(3, 31), RCAR_GP_PIN(3, 30),
};
+static const unsigned int du_cde_disp_mux[] = {
+ DU1_CDE_MARK, DU1_DISP_MARK
+};
static const unsigned int du0_clk_in_pins[] = {
/* CLKIN */
RCAR_GP_PIN(6, 31),
@@ -1749,15 +1752,26 @@ static const unsigned int du0_clk_in_pins[] = {
static const unsigned int du0_clk_in_mux[] = {
DU0_DOTCLKIN_MARK
};
-static const unsigned int du_cde_disp_mux[] = {
- DU1_CDE_MARK, DU1_DISP_MARK
-};
static const unsigned int du1_clk_in_pins[] = {
/* CLKIN */
- RCAR_GP_PIN(7, 20), RCAR_GP_PIN(7, 19), RCAR_GP_PIN(3, 24),
+ RCAR_GP_PIN(3, 24),
};
static const unsigned int du1_clk_in_mux[] = {
- DU1_DOTCLKIN_C_MARK, DU1_DOTCLKIN_B_MARK, DU1_DOTCLKIN_MARK
+ DU1_DOTCLKIN_MARK
+};
+static const unsigned int du1_clk_in_b_pins[] = {
+ /* CLKIN */
+ RCAR_GP_PIN(7, 19),
+};
+static const unsigned int du1_clk_in_b_mux[] = {
+ DU1_DOTCLKIN_B_MARK,
+};
+static const unsigned int du1_clk_in_c_pins[] = {
+ /* CLKIN */
+ RCAR_GP_PIN(7, 20),
+};
+static const unsigned int du1_clk_in_c_mux[] = {
+ DU1_DOTCLKIN_C_MARK,
};
/* - ETH -------------------------------------------------------------------- */
static const unsigned int eth_link_pins[] = {
@@ -1791,6 +1805,144 @@ static const unsigned int eth_rmii_mux[] = {
ETH_RXD0_MARK, ETH_RXD1_MARK, ETH_RX_ER_MARK, ETH_CRS_DV_MARK,
ETH_TXD0_MARK, ETH_TXD1_MARK, ETH_TX_EN_MARK, ETH_REFCLK_MARK,
};
+/* - I2C0 ------------------------------------------------------------------- */
+static const unsigned int i2c0_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(0, 24), RCAR_GP_PIN(0, 25),
+};
+static const unsigned int i2c0_mux[] = {
+ SCL0_MARK, SDA0_MARK,
+};
+static const unsigned int i2c0_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
+};
+static const unsigned int i2c0_b_mux[] = {
+ SCL0_B_MARK, SDA0_B_MARK,
+};
+static const unsigned int i2c0_c_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(0, 16), RCAR_GP_PIN(1, 1),
+};
+static const unsigned int i2c0_c_mux[] = {
+ SCL0_C_MARK, SDA0_C_MARK,
+};
+/* - I2C1 ------------------------------------------------------------------- */
+static const unsigned int i2c1_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int i2c1_mux[] = {
+ SCL1_MARK, SDA1_MARK,
+};
+static const unsigned int i2c1_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5),
+};
+static const unsigned int i2c1_b_mux[] = {
+ SCL1_B_MARK, SDA1_B_MARK,
+};
+static const unsigned int i2c1_c_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
+};
+static const unsigned int i2c1_c_mux[] = {
+ SCL1_C_MARK, SDA1_C_MARK,
+};
+static const unsigned int i2c1_d_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(4, 25), RCAR_GP_PIN(4, 26),
+};
+static const unsigned int i2c1_d_mux[] = {
+ SCL1_D_MARK, SDA1_D_MARK,
+};
+static const unsigned int i2c1_e_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(7, 15), RCAR_GP_PIN(7, 16),
+};
+static const unsigned int i2c1_e_mux[] = {
+ SCL1_E_MARK, SDA1_E_MARK,
+};
+/* - I2C2 ------------------------------------------------------------------- */
+static const unsigned int i2c2_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7),
+};
+static const unsigned int i2c2_mux[] = {
+ SCL2_MARK, SDA2_MARK,
+};
+static const unsigned int i2c2_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(3, 26), RCAR_GP_PIN(3, 29),
+};
+static const unsigned int i2c2_b_mux[] = {
+ SCL2_B_MARK, SDA2_B_MARK,
+};
+static const unsigned int i2c2_c_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 14),
+};
+static const unsigned int i2c2_c_mux[] = {
+ SCL2_C_MARK, SDA2_C_MARK,
+};
+static const unsigned int i2c2_d_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 18),
+};
+static const unsigned int i2c2_d_mux[] = {
+ SCL2_D_MARK, SDA2_D_MARK,
+};
+/* - I2C3 ------------------------------------------------------------------- */
+static const unsigned int i2c3_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int i2c3_mux[] = {
+ SCL3_MARK, SDA3_MARK,
+};
+static const unsigned int i2c3_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 16),
+};
+static const unsigned int i2c3_b_mux[] = {
+ SCL3_B_MARK, SDA3_B_MARK,
+};
+static const unsigned int i2c3_c_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 23),
+};
+static const unsigned int i2c3_c_mux[] = {
+ SCL3_C_MARK, SDA3_C_MARK,
+};
+static const unsigned int i2c3_d_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(0, 27), RCAR_GP_PIN(0, 28),
+};
+static const unsigned int i2c3_d_mux[] = {
+ SCL3_D_MARK, SDA3_D_MARK,
+};
+/* - I2C4 ------------------------------------------------------------------- */
+static const unsigned int i2c4_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14),
+};
+static const unsigned int i2c4_mux[] = {
+ SCL4_MARK, SDA4_MARK,
+};
+static const unsigned int i2c4_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(4, 27), RCAR_GP_PIN(4, 28),
+};
+static const unsigned int i2c4_b_mux[] = {
+ SCL4_B_MARK, SDA4_B_MARK,
+};
+static const unsigned int i2c4_c_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(7, 13), RCAR_GP_PIN(7, 14),
+};
+static const unsigned int i2c4_c_mux[] = {
+ SCL4_C_MARK, SDA4_C_MARK,
+};
/* - INTC ------------------------------------------------------------------- */
static const unsigned int intc_irq0_pins[] = {
/* IRQ */
@@ -2635,34 +2787,306 @@ static const unsigned int sdhi2_wp_mux[] = {
SD2_WP_MARK,
};
/* - USB0 ------------------------------------------------------------------- */
-static const unsigned int usb0_pwen_pins[] = {
- /* PWEN */
- RCAR_GP_PIN(7, 23),
+static const unsigned int usb0_pins[] = {
+ RCAR_GP_PIN(7, 23), /* PWEN */
+ RCAR_GP_PIN(7, 24), /* OVC */
};
-static const unsigned int usb0_pwen_mux[] = {
+static const unsigned int usb0_mux[] = {
USB0_PWEN_MARK,
-};
-static const unsigned int usb0_ovc_pins[] = {
- /* OVC */
- RCAR_GP_PIN(7, 24),
-};
-static const unsigned int usb0_ovc_mux[] = {
USB0_OVC_MARK,
};
/* - USB1 ------------------------------------------------------------------- */
-static const unsigned int usb1_pwen_pins[] = {
- /* PWEN */
- RCAR_GP_PIN(7, 25),
+static const unsigned int usb1_pins[] = {
+ RCAR_GP_PIN(7, 25), /* PWEN */
+ RCAR_GP_PIN(6, 30), /* OVC */
};
-static const unsigned int usb1_pwen_mux[] = {
+static const unsigned int usb1_mux[] = {
USB1_PWEN_MARK,
+ USB1_OVC_MARK,
};
-static const unsigned int usb1_ovc_pins[] = {
- /* OVC */
- RCAR_GP_PIN(6, 30),
+
+union vin_data {
+ unsigned int data24[24];
+ unsigned int data20[20];
+ unsigned int data16[16];
+ unsigned int data12[12];
+ unsigned int data10[10];
+ unsigned int data8[8];
};
-static const unsigned int usb1_ovc_mux[] = {
- USB1_OVC_MARK,
+
+#define VIN_DATA_PIN_GROUP(n, s) \
+ { \
+ .name = #n#s, \
+ .pins = n##_pins.data##s, \
+ .mux = n##_mux.data##s, \
+ .nr_pins = ARRAY_SIZE(n##_pins.data##s), \
+ }
+
+/* - VIN0 ------------------------------------------------------------------- */
+static const union vin_data vin0_data_pins = {
+ .data24 = {
+ /* B */
+ RCAR_GP_PIN(4, 5), RCAR_GP_PIN(4, 6),
+ RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 8),
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 10),
+ RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+ /* G */
+ RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14),
+ RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 16),
+ RCAR_GP_PIN(4, 17), RCAR_GP_PIN(4, 18),
+ RCAR_GP_PIN(4, 19), RCAR_GP_PIN(4, 20),
+ /* R */
+ RCAR_GP_PIN(4, 21), RCAR_GP_PIN(4, 22),
+ RCAR_GP_PIN(4, 23), RCAR_GP_PIN(4, 24),
+ RCAR_GP_PIN(4, 25), RCAR_GP_PIN(4, 26),
+ RCAR_GP_PIN(4, 27), RCAR_GP_PIN(4, 28),
+ },
+};
+static const union vin_data vin0_data_mux = {
+ .data24 = {
+ /* B */
+ VI0_DATA0_VI0_B0_MARK, VI0_DATA1_VI0_B1_MARK,
+ VI0_DATA2_VI0_B2_MARK, VI0_DATA3_VI0_B3_MARK,
+ VI0_DATA4_VI0_B4_MARK, VI0_DATA5_VI0_B5_MARK,
+ VI0_DATA6_VI0_B6_MARK, VI0_DATA7_VI0_B7_MARK,
+ /* G */
+ VI0_G0_MARK, VI0_G1_MARK,
+ VI0_G2_MARK, VI0_G3_MARK,
+ VI0_G4_MARK, VI0_G5_MARK,
+ VI0_G6_MARK, VI0_G7_MARK,
+ /* R */
+ VI0_R0_MARK, VI0_R1_MARK,
+ VI0_R2_MARK, VI0_R3_MARK,
+ VI0_R4_MARK, VI0_R5_MARK,
+ VI0_R6_MARK, VI0_R7_MARK,
+ },
+};
+static const unsigned int vin0_data18_pins[] = {
+ /* B */
+ RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 8),
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 10),
+ RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+ /* G */
+ RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 16),
+ RCAR_GP_PIN(4, 17), RCAR_GP_PIN(4, 18),
+ RCAR_GP_PIN(4, 19), RCAR_GP_PIN(4, 20),
+ /* R */
+ RCAR_GP_PIN(4, 23), RCAR_GP_PIN(4, 24),
+ RCAR_GP_PIN(4, 25), RCAR_GP_PIN(4, 26),
+ RCAR_GP_PIN(4, 27), RCAR_GP_PIN(4, 28),
+};
+static const unsigned int vin0_data18_mux[] = {
+ /* B */
+ VI0_DATA2_VI0_B2_MARK, VI0_DATA3_VI0_B3_MARK,
+ VI0_DATA4_VI0_B4_MARK, VI0_DATA5_VI0_B5_MARK,
+ VI0_DATA6_VI0_B6_MARK, VI0_DATA7_VI0_B7_MARK,
+ /* G */
+ VI0_G2_MARK, VI0_G3_MARK,
+ VI0_G4_MARK, VI0_G5_MARK,
+ VI0_G6_MARK, VI0_G7_MARK,
+ /* R */
+ VI0_R2_MARK, VI0_R3_MARK,
+ VI0_R4_MARK, VI0_R5_MARK,
+ VI0_R6_MARK, VI0_R7_MARK,
+};
+static const unsigned int vin0_sync_pins[] = {
+ RCAR_GP_PIN(4, 3), /* HSYNC */
+ RCAR_GP_PIN(4, 4), /* VSYNC */
+};
+static const unsigned int vin0_sync_mux[] = {
+ VI0_HSYNC_N_MARK,
+ VI0_VSYNC_N_MARK,
+};
+static const unsigned int vin0_field_pins[] = {
+ RCAR_GP_PIN(4, 2),
+};
+static const unsigned int vin0_field_mux[] = {
+ VI0_FIELD_MARK,
+};
+static const unsigned int vin0_clkenb_pins[] = {
+ RCAR_GP_PIN(4, 1),
+};
+static const unsigned int vin0_clkenb_mux[] = {
+ VI0_CLKENB_MARK,
+};
+static const unsigned int vin0_clk_pins[] = {
+ RCAR_GP_PIN(4, 0),
+};
+static const unsigned int vin0_clk_mux[] = {
+ VI0_CLK_MARK,
+};
+/* - VIN1 ----------------------------------------------------------------- */
+static const unsigned int vin1_data8_pins[] = {
+ RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
+ RCAR_GP_PIN(5, 7), RCAR_GP_PIN(5, 8),
+ RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 10),
+ RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 12),
+};
+static const unsigned int vin1_data8_mux[] = {
+ VI1_DATA0_MARK, VI1_DATA1_MARK,
+ VI1_DATA2_MARK, VI1_DATA3_MARK,
+ VI1_DATA4_MARK, VI1_DATA5_MARK,
+ VI1_DATA6_MARK, VI1_DATA7_MARK,
+};
+static const unsigned int vin1_sync_pins[] = {
+ RCAR_GP_PIN(5, 0), /* HSYNC */
+ RCAR_GP_PIN(5, 1), /* VSYNC */
+};
+static const unsigned int vin1_sync_mux[] = {
+ VI1_HSYNC_N_MARK,
+ VI1_VSYNC_N_MARK,
+};
+static const unsigned int vin1_field_pins[] = {
+ RCAR_GP_PIN(5, 3),
+};
+static const unsigned int vin1_field_mux[] = {
+ VI1_FIELD_MARK,
+};
+static const unsigned int vin1_clkenb_pins[] = {
+ RCAR_GP_PIN(5, 2),
+};
+static const unsigned int vin1_clkenb_mux[] = {
+ VI1_CLKENB_MARK,
+};
+static const unsigned int vin1_clk_pins[] = {
+ RCAR_GP_PIN(5, 4),
+};
+static const unsigned int vin1_clk_mux[] = {
+ VI1_CLK_MARK,
+};
+static const union vin_data vin1_b_data_pins = {
+ .data24 = {
+ /* B */
+ RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1),
+ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+ RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+ RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13),
+ /* G */
+ RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25),
+ RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+ RCAR_GP_PIN(7, 21), RCAR_GP_PIN(7, 22),
+ /* R */
+ RCAR_GP_PIN(7, 5), RCAR_GP_PIN(7, 6),
+ RCAR_GP_PIN(2, 15), RCAR_GP_PIN(2, 16),
+ RCAR_GP_PIN(2, 17), RCAR_GP_PIN(2, 18),
+ RCAR_GP_PIN(2, 19), RCAR_GP_PIN(2, 20),
+ },
+};
+static const union vin_data vin1_b_data_mux = {
+ .data24 = {
+ /* B */
+ VI1_DATA0_B_MARK, VI1_DATA1_B_MARK,
+ VI1_DATA2_B_MARK, VI1_DATA3_B_MARK,
+ VI1_DATA4_B_MARK, VI1_DATA5_B_MARK,
+ VI1_DATA6_B_MARK, VI1_DATA7_B_MARK,
+ /* G */
+ VI1_G0_B_MARK, VI1_G1_B_MARK,
+ VI1_G2_B_MARK, VI1_G3_B_MARK,
+ VI1_G4_B_MARK, VI1_G5_B_MARK,
+ VI1_G6_B_MARK, VI1_G7_B_MARK,
+ /* R */
+ VI1_R0_B_MARK, VI1_R1_B_MARK,
+ VI1_R2_B_MARK, VI1_R3_B_MARK,
+ VI1_R4_B_MARK, VI1_R5_B_MARK,
+ VI1_R6_B_MARK, VI1_R7_B_MARK,
+ },
+};
+static const unsigned int vin1_b_data18_pins[] = {
+ /* B */
+ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+ RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+ RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13),
+ /* G */
+ RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+ RCAR_GP_PIN(7, 21), RCAR_GP_PIN(7, 22),
+ /* R */
+ RCAR_GP_PIN(2, 15), RCAR_GP_PIN(2, 16),
+ RCAR_GP_PIN(2, 17), RCAR_GP_PIN(2, 18),
+ RCAR_GP_PIN(2, 19), RCAR_GP_PIN(2, 20),
+};
+static const unsigned int vin1_b_data18_mux[] = {
+ /* B */
+ VI1_DATA0_B_MARK, VI1_DATA1_B_MARK,
+ VI1_DATA2_B_MARK, VI1_DATA3_B_MARK,
+ VI1_DATA4_B_MARK, VI1_DATA5_B_MARK,
+ VI1_DATA6_B_MARK, VI1_DATA7_B_MARK,
+ /* G */
+ VI1_G0_B_MARK, VI1_G1_B_MARK,
+ VI1_G2_B_MARK, VI1_G3_B_MARK,
+ VI1_G4_B_MARK, VI1_G5_B_MARK,
+ VI1_G6_B_MARK, VI1_G7_B_MARK,
+ /* R */
+ VI1_R0_B_MARK, VI1_R1_B_MARK,
+ VI1_R2_B_MARK, VI1_R3_B_MARK,
+ VI1_R4_B_MARK, VI1_R5_B_MARK,
+ VI1_R6_B_MARK, VI1_R7_B_MARK,
+};
+static const unsigned int vin1_b_sync_pins[] = {
+ RCAR_GP_PIN(3, 17), /* HSYNC */
+ RCAR_GP_PIN(3, 18), /* VSYNC */
+};
+static const unsigned int vin1_b_sync_mux[] = {
+ VI1_HSYNC_N_B_MARK,
+ VI1_VSYNC_N_B_MARK,
+};
+static const unsigned int vin1_b_field_pins[] = {
+ RCAR_GP_PIN(3, 20),
+};
+static const unsigned int vin1_b_field_mux[] = {
+ VI1_FIELD_B_MARK,
+};
+static const unsigned int vin1_b_clkenb_pins[] = {
+ RCAR_GP_PIN(3, 19),
+};
+static const unsigned int vin1_b_clkenb_mux[] = {
+ VI1_CLKENB_B_MARK,
+};
+static const unsigned int vin1_b_clk_pins[] = {
+ RCAR_GP_PIN(3, 16),
+};
+static const unsigned int vin1_b_clk_mux[] = {
+ VI1_CLK_B_MARK,
+};
+/* - VIN2 ----------------------------------------------------------------- */
+static const unsigned int vin2_data8_pins[] = {
+ RCAR_GP_PIN(4, 20), RCAR_GP_PIN(4, 21),
+ RCAR_GP_PIN(4, 22), RCAR_GP_PIN(4, 23),
+ RCAR_GP_PIN(4, 24), RCAR_GP_PIN(4, 25),
+ RCAR_GP_PIN(4, 26), RCAR_GP_PIN(4, 27),
+};
+static const unsigned int vin2_data8_mux[] = {
+ VI2_DATA0_MARK, VI2_DATA1_MARK,
+ VI2_DATA2_MARK, VI2_DATA3_MARK,
+ VI2_DATA4_MARK, VI2_DATA5_MARK,
+ VI2_DATA6_MARK, VI2_DATA7_MARK,
+};
+static const unsigned int vin2_sync_pins[] = {
+ RCAR_GP_PIN(4, 15), /* HSYNC */
+ RCAR_GP_PIN(4, 16), /* VSYNC */
+};
+static const unsigned int vin2_sync_mux[] = {
+ VI2_HSYNC_N_MARK,
+ VI2_VSYNC_N_MARK,
+};
+static const unsigned int vin2_field_pins[] = {
+ RCAR_GP_PIN(4, 18),
+};
+static const unsigned int vin2_field_mux[] = {
+ VI2_FIELD_MARK,
+};
+static const unsigned int vin2_clkenb_pins[] = {
+ RCAR_GP_PIN(4, 17),
+};
+static const unsigned int vin2_clkenb_mux[] = {
+ VI2_CLKENB_MARK,
+};
+static const unsigned int vin2_clk_pins[] = {
+ RCAR_GP_PIN(4, 19),
+};
+static const unsigned int vin2_clk_mux[] = {
+ VI2_CLK_MARK,
};
static const struct sh_pfc_pin_group pinmux_groups[] = {
@@ -2670,14 +3094,35 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(du_rgb888),
SH_PFC_PIN_GROUP(du_clk_out_0),
SH_PFC_PIN_GROUP(du_clk_out_1),
- SH_PFC_PIN_GROUP(du_sync_1),
+ SH_PFC_PIN_GROUP(du_sync),
SH_PFC_PIN_GROUP(du_cde_disp),
SH_PFC_PIN_GROUP(du0_clk_in),
SH_PFC_PIN_GROUP(du1_clk_in),
+ SH_PFC_PIN_GROUP(du1_clk_in_b),
+ SH_PFC_PIN_GROUP(du1_clk_in_c),
SH_PFC_PIN_GROUP(eth_link),
SH_PFC_PIN_GROUP(eth_magic),
SH_PFC_PIN_GROUP(eth_mdio),
SH_PFC_PIN_GROUP(eth_rmii),
+ SH_PFC_PIN_GROUP(i2c0),
+ SH_PFC_PIN_GROUP(i2c0_b),
+ SH_PFC_PIN_GROUP(i2c0_c),
+ SH_PFC_PIN_GROUP(i2c1),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c1_c),
+ SH_PFC_PIN_GROUP(i2c1_d),
+ SH_PFC_PIN_GROUP(i2c1_e),
+ SH_PFC_PIN_GROUP(i2c2),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c2_c),
+ SH_PFC_PIN_GROUP(i2c2_d),
+ SH_PFC_PIN_GROUP(i2c3),
+ SH_PFC_PIN_GROUP(i2c3_b),
+ SH_PFC_PIN_GROUP(i2c3_c),
+ SH_PFC_PIN_GROUP(i2c3_d),
+ SH_PFC_PIN_GROUP(i2c4),
+ SH_PFC_PIN_GROUP(i2c4_b),
+ SH_PFC_PIN_GROUP(i2c4_c),
SH_PFC_PIN_GROUP(intc_irq0),
SH_PFC_PIN_GROUP(intc_irq1),
SH_PFC_PIN_GROUP(intc_irq2),
@@ -2794,10 +3239,40 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(sdhi2_ctrl),
SH_PFC_PIN_GROUP(sdhi2_cd),
SH_PFC_PIN_GROUP(sdhi2_wp),
- SH_PFC_PIN_GROUP(usb0_pwen),
- SH_PFC_PIN_GROUP(usb0_ovc),
- SH_PFC_PIN_GROUP(usb1_pwen),
- SH_PFC_PIN_GROUP(usb1_ovc),
+ SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb1),
+ VIN_DATA_PIN_GROUP(vin0_data, 24),
+ VIN_DATA_PIN_GROUP(vin0_data, 20),
+ SH_PFC_PIN_GROUP(vin0_data18),
+ VIN_DATA_PIN_GROUP(vin0_data, 16),
+ VIN_DATA_PIN_GROUP(vin0_data, 12),
+ VIN_DATA_PIN_GROUP(vin0_data, 10),
+ VIN_DATA_PIN_GROUP(vin0_data, 8),
+ SH_PFC_PIN_GROUP(vin0_sync),
+ SH_PFC_PIN_GROUP(vin0_field),
+ SH_PFC_PIN_GROUP(vin0_clkenb),
+ SH_PFC_PIN_GROUP(vin0_clk),
+ SH_PFC_PIN_GROUP(vin1_data8),
+ SH_PFC_PIN_GROUP(vin1_sync),
+ SH_PFC_PIN_GROUP(vin1_field),
+ SH_PFC_PIN_GROUP(vin1_clkenb),
+ SH_PFC_PIN_GROUP(vin1_clk),
+ VIN_DATA_PIN_GROUP(vin1_b_data, 24),
+ VIN_DATA_PIN_GROUP(vin1_b_data, 20),
+ SH_PFC_PIN_GROUP(vin1_b_data18),
+ VIN_DATA_PIN_GROUP(vin1_b_data, 16),
+ VIN_DATA_PIN_GROUP(vin1_b_data, 12),
+ VIN_DATA_PIN_GROUP(vin1_b_data, 10),
+ VIN_DATA_PIN_GROUP(vin1_b_data, 8),
+ SH_PFC_PIN_GROUP(vin1_b_sync),
+ SH_PFC_PIN_GROUP(vin1_b_field),
+ SH_PFC_PIN_GROUP(vin1_b_clkenb),
+ SH_PFC_PIN_GROUP(vin1_b_clk),
+ SH_PFC_PIN_GROUP(vin2_data8),
+ SH_PFC_PIN_GROUP(vin2_sync),
+ SH_PFC_PIN_GROUP(vin2_field),
+ SH_PFC_PIN_GROUP(vin2_clkenb),
+ SH_PFC_PIN_GROUP(vin2_clk),
};
static const char * const du_groups[] = {
@@ -2805,7 +3280,7 @@ static const char * const du_groups[] = {
"du_rgb888",
"du_clk_out_0",
"du_clk_out_1",
- "du_sync_1",
+ "du_sync",
"du_cde_disp",
};
@@ -2815,6 +3290,8 @@ static const char * const du0_groups[] = {
static const char * const du1_groups[] = {
"du1_clk_in",
+ "du1_clk_in_b",
+ "du1_clk_in_c",
};
static const char * const eth_groups[] = {
@@ -2824,6 +3301,40 @@ static const char * const eth_groups[] = {
"eth_rmii",
};
+static const char * const i2c0_groups[] = {
+ "i2c0",
+ "i2c0_b",
+ "i2c0_c",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1",
+ "i2c1_b",
+ "i2c1_c",
+ "i2c1_d",
+ "i2c1_e",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2",
+ "i2c2_b",
+ "i2c2_c",
+ "i2c2_d",
+};
+
+static const char * const i2c3_groups[] = {
+ "i2c3",
+ "i2c3_b",
+ "i2c3_c",
+ "i2c3_d",
+};
+
+static const char * const i2c4_groups[] = {
+ "i2c4",
+ "i2c4_b",
+ "i2c4_c",
+};
+
static const char * const intc_groups[] = {
"intc_irq0",
"intc_irq1",
@@ -2840,20 +3351,29 @@ static const char * const mmc_groups[] = {
static const char * const msiof0_groups[] = {
"msiof0_clk",
- "msiof0_ctrl",
- "msiof0_data",
+ "msiof0_sync",
+ "msiof0_ss1",
+ "msiof0_ss2",
+ "msiof0_rx",
+ "msiof0_tx",
};
static const char * const msiof1_groups[] = {
"msiof1_clk",
- "msiof1_ctrl",
- "msiof1_data",
+ "msiof1_sync",
+ "msiof1_ss1",
+ "msiof1_ss2",
+ "msiof1_rx",
+ "msiof1_tx",
};
static const char * const msiof2_groups[] = {
"msiof2_clk",
- "msiof2_ctrl",
- "msiof2_data",
+ "msiof2_sync",
+ "msiof2_ss1",
+ "msiof2_ss2",
+ "msiof2_rx",
+ "msiof2_tx",
};
static const char * const scif0_groups[] = {
@@ -2989,12 +3509,51 @@ static const char * const sdhi2_groups[] = {
};
static const char * const usb0_groups[] = {
- "usb0_pwen",
- "usb0_ovc",
+ "usb0",
};
static const char * const usb1_groups[] = {
- "usb1_pwen",
- "usb1_ovc",
+ "usb1",
+};
+
+static const char * const vin0_groups[] = {
+ "vin0_data24",
+ "vin0_data20",
+ "vin0_data18",
+ "vin0_data16",
+ "vin0_data12",
+ "vin0_data10",
+ "vin0_data8",
+ "vin0_sync",
+ "vin0_field",
+ "vin0_clkenb",
+ "vin0_clk",
+};
+
+static const char * const vin1_groups[] = {
+ "vin1_data8",
+ "vin1_sync",
+ "vin1_field",
+ "vin1_clkenb",
+ "vin1_clk",
+ "vin1_b_data24",
+ "vin1_b_data20",
+ "vin1_b_data18",
+ "vin1_b_data16",
+ "vin1_b_data12",
+ "vin1_b_data10",
+ "vin1_b_data8",
+ "vin1_b_sync",
+ "vin1_b_field",
+ "vin1_b_clkenb",
+ "vin1_b_clk",
+};
+
+static const char * const vin2_groups[] = {
+ "vin2_data8",
+ "vin2_sync",
+ "vin2_field",
+ "vin2_clkenb",
+ "vin2_clk",
};
static const struct sh_pfc_function pinmux_functions[] = {
@@ -3002,6 +3561,11 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(du0),
SH_PFC_FUNCTION(du1),
SH_PFC_FUNCTION(eth),
+ SH_PFC_FUNCTION(i2c0),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
+ SH_PFC_FUNCTION(i2c4),
SH_PFC_FUNCTION(intc),
SH_PFC_FUNCTION(mmc),
SH_PFC_FUNCTION(msiof0),
@@ -3027,9 +3591,12 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(sdhi2),
SH_PFC_FUNCTION(usb0),
SH_PFC_FUNCTION(usb1),
+ SH_PFC_FUNCTION(vin0),
+ SH_PFC_FUNCTION(vin1),
+ SH_PFC_FUNCTION(vin2),
};
-static struct pinmux_cfg_reg pinmux_config_regs[] = {
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1) {
GP_0_31_FN, FN_IP1_22_20,
GP_0_30_FN, FN_IP1_19_17,
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7203.c b/drivers/pinctrl/sh-pfc/pfc-sh7203.c
index bf3d8f28768d..3bda7bafd0ab 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7203.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7203.c
@@ -702,7 +702,7 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(SSCK0_PF_MARK, PF0MD_11),
};
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
/* PA */
PINMUX_GPIO(PA7),
PINMUX_GPIO(PA6),
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
index 673a59503223..e1cb6dc05028 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7264.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
@@ -1071,7 +1071,7 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(SD_D2_MARK, PK0MD_10),
};
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
/* Port A */
PINMUX_GPIO(PA3),
PINMUX_GPIO(PA2),
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
index a19b60f72b23..7a11320ad96d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7269.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
@@ -1451,7 +1451,7 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(PWM1A_MARK, PJ0MD_100),
};
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
/* Port A */
PINMUX_GPIO(PA1),
PINMUX_GPIO(PA0),
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7372.c b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
index cc097b693820..d9158b3b2919 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7372.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
@@ -844,7 +844,7 @@ static const u16 pinmux_data[] = {
#define SH7372_PIN_O(pin) SH_PFC_PIN_CFG(pin, __O)
#define SH7372_PIN_O_PU_PD(pin) SH_PFC_PIN_CFG(pin, __O | __PUD)
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
/* Table 57-1 (I/O and Pull U/D) */
SH7372_PIN_IO_PD(0), SH7372_PIN_IO_PD(1),
SH7372_PIN_O(2), SH7372_PIN_I_PD(3),
@@ -2118,17 +2118,6 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(usb1),
};
-#undef PORTCR
-#define PORTCR(nr, reg) \
- { \
- PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
- _PCRH(PORT##nr##_IN, 0, 0, PORT##nr##_OUT), \
- PORT##nr##_FN0, PORT##nr##_FN1, \
- PORT##nr##_FN2, PORT##nr##_FN3, \
- PORT##nr##_FN4, PORT##nr##_FN5, \
- PORT##nr##_FN6, PORT##nr##_FN7 } \
- }
-
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PORTCR(0, 0xE6051000), /* PORT0CR */
PORTCR(1, 0xE6051001), /* PORT1CR */
@@ -2585,7 +2574,7 @@ static void __iomem *sh7372_pinmux_portcr(struct sh_pfc *pfc, unsigned int pin)
&sh7372_portcr_offsets[i];
if (pin <= group->end_pin)
- return pfc->window->virt + group->offset + pin;
+ return pfc->windows->virt + group->offset + pin;
}
return NULL;
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
index 7e278a97e411..6f6ba100994d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -1179,7 +1179,7 @@ static const u16 pinmux_data[] = {
*/
#define PIN_NUMBER(row, col) (1000+((row)-1)*34+(col)-1)
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
/* Table 25-1 (I/O and Pull U/D) */
SH73A0_PIN_I_PD(0),
SH73A0_PIN_I_PU(1),
@@ -3138,16 +3138,6 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(usb),
};
-#undef PORTCR
-#define PORTCR(nr, reg) \
- { \
- PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
- _PCRH(PORT##nr##_IN, 0, 0, PORT##nr##_OUT), \
- PORT##nr##_FN0, PORT##nr##_FN1, \
- PORT##nr##_FN2, PORT##nr##_FN3, \
- PORT##nr##_FN4, PORT##nr##_FN5, \
- PORT##nr##_FN6, PORT##nr##_FN7 } \
- }
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PORTCR(0, 0xe6050000), /* PORT0CR */
PORTCR(1, 0xe6050001), /* PORT1CR */
@@ -3661,38 +3651,38 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
};
static const struct pinmux_irq pinmux_irqs[] = {
- PINMUX_IRQ(irq_pin(19), 9),
- PINMUX_IRQ(irq_pin(1), 10),
PINMUX_IRQ(irq_pin(0), 11),
+ PINMUX_IRQ(irq_pin(1), 10),
+ PINMUX_IRQ(irq_pin(2), 149),
+ PINMUX_IRQ(irq_pin(3), 224),
+ PINMUX_IRQ(irq_pin(4), 159),
+ PINMUX_IRQ(irq_pin(5), 227),
+ PINMUX_IRQ(irq_pin(6), 147),
+ PINMUX_IRQ(irq_pin(7), 150),
+ PINMUX_IRQ(irq_pin(8), 223),
+ PINMUX_IRQ(irq_pin(9), 56, 308),
+ PINMUX_IRQ(irq_pin(10), 54),
+ PINMUX_IRQ(irq_pin(11), 238),
+ PINMUX_IRQ(irq_pin(12), 156),
+ PINMUX_IRQ(irq_pin(13), 239),
+ PINMUX_IRQ(irq_pin(14), 251),
+ PINMUX_IRQ(irq_pin(15), 0),
+ PINMUX_IRQ(irq_pin(16), 249),
+ PINMUX_IRQ(irq_pin(17), 234),
PINMUX_IRQ(irq_pin(18), 13),
+ PINMUX_IRQ(irq_pin(19), 9),
PINMUX_IRQ(irq_pin(20), 14),
PINMUX_IRQ(irq_pin(21), 15),
- PINMUX_IRQ(irq_pin(31), 26),
- PINMUX_IRQ(irq_pin(30), 27),
- PINMUX_IRQ(irq_pin(29), 28),
PINMUX_IRQ(irq_pin(22), 40),
PINMUX_IRQ(irq_pin(23), 53),
- PINMUX_IRQ(irq_pin(10), 54),
- PINMUX_IRQ(irq_pin(9), 56),
+ PINMUX_IRQ(irq_pin(24), 118),
+ PINMUX_IRQ(irq_pin(25), 164),
PINMUX_IRQ(irq_pin(26), 115),
PINMUX_IRQ(irq_pin(27), 116),
PINMUX_IRQ(irq_pin(28), 117),
- PINMUX_IRQ(irq_pin(24), 118),
- PINMUX_IRQ(irq_pin(6), 147),
- PINMUX_IRQ(irq_pin(2), 149),
- PINMUX_IRQ(irq_pin(7), 150),
- PINMUX_IRQ(irq_pin(12), 156),
- PINMUX_IRQ(irq_pin(4), 159),
- PINMUX_IRQ(irq_pin(25), 164),
- PINMUX_IRQ(irq_pin(8), 223),
- PINMUX_IRQ(irq_pin(3), 224),
- PINMUX_IRQ(irq_pin(5), 227),
- PINMUX_IRQ(irq_pin(17), 234),
- PINMUX_IRQ(irq_pin(11), 238),
- PINMUX_IRQ(irq_pin(13), 239),
- PINMUX_IRQ(irq_pin(16), 249),
- PINMUX_IRQ(irq_pin(14), 251),
- PINMUX_IRQ(irq_pin(9), 308),
+ PINMUX_IRQ(irq_pin(29), 28),
+ PINMUX_IRQ(irq_pin(30), 27),
+ PINMUX_IRQ(irq_pin(31), 26),
};
/* -----------------------------------------------------------------------------
@@ -3702,7 +3692,7 @@ static const struct pinmux_irq pinmux_irqs[] = {
static void sh73a0_vccq_mc0_endisable(struct regulator_dev *reg, bool enable)
{
struct sh_pfc *pfc = reg->reg_data;
- void __iomem *addr = pfc->window[1].virt + 4;
+ void __iomem *addr = pfc->windows[1].virt + 4;
unsigned long flags;
u32 value;
@@ -3735,7 +3725,7 @@ static int sh73a0_vccq_mc0_disable(struct regulator_dev *reg)
static int sh73a0_vccq_mc0_is_enabled(struct regulator_dev *reg)
{
struct sh_pfc *pfc = reg->reg_data;
- void __iomem *addr = pfc->window[1].virt + 4;
+ void __iomem *addr = pfc->windows[1].virt + 4;
unsigned long flags;
u32 value;
@@ -3794,7 +3784,7 @@ static const unsigned int sh73a0_portcr_offsets[] = {
static unsigned int sh73a0_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin)
{
- void __iomem *addr = pfc->window->virt
+ void __iomem *addr = pfc->windows->virt
+ sh73a0_portcr_offsets[pin >> 5] + pin;
u32 value = ioread8(addr) & PORTnCR_PULMD_MASK;
@@ -3812,7 +3802,7 @@ static unsigned int sh73a0_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin)
static void sh73a0_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias)
{
- void __iomem *addr = pfc->window->virt
+ void __iomem *addr = pfc->windows->virt
+ sh73a0_portcr_offsets[pin >> 5] + pin;
u32 value = ioread8(addr) & ~PORTnCR_PULMD_MASK;
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7720.c b/drivers/pinctrl/sh-pfc/pfc-sh7720.c
index 7a26809eda15..13d05f88bc01 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7720.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7720.c
@@ -576,7 +576,7 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(SIM_CLK_MARK, PSELD_1_0_10, PTV0_FN),
};
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
/* PTA */
PINMUX_GPIO(PTA7),
PINMUX_GPIO(PTA6),
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7722.c b/drivers/pinctrl/sh-pfc/pfc-sh7722.c
index add309347b05..914d872c37a4 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7722.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7722.c
@@ -754,7 +754,7 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(KEYOUT5_IN5_MARK, HIZA14_KEYSC, KEYOUT5_IN5),
};
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
/* PTA */
PINMUX_GPIO(PTA7),
PINMUX_GPIO(PTA6),
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7723.c b/drivers/pinctrl/sh-pfc/pfc-sh7723.c
index 1cecc9101a52..4eb7eae2e6d0 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7723.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7723.c
@@ -917,7 +917,7 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(SIUBISLD_MARK, PSD1_PSD0_FN2, PTZ0_FN),
};
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
/* PTA */
PINMUX_GPIO(PTA7),
PINMUX_GPIO(PTA6),
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7724.c b/drivers/pinctrl/sh-pfc/pfc-sh7724.c
index 1085ab556b8e..74a1a7f1317c 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7724.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7724.c
@@ -1146,7 +1146,7 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(SCIF3_I_TXD_MARK, PSB14_1, PTZ3_FN),
};
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
/* PTA */
PINMUX_GPIO(PTA7),
PINMUX_GPIO(PTA6),
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
index ec0c47c4f100..e53dd1cb1625 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -1357,7 +1357,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_DATA(IP11_28, ST_CLKOUT),
};
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7757.c b/drivers/pinctrl/sh-pfc/pfc-sh7757.c
index 33d75e510911..625661a88c52 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7757.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7757.c
@@ -1074,7 +1074,7 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(ON_DQ0_MARK, PS8_8_FN2, PTZ0_FN),
};
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
/* PTA */
PINMUX_GPIO(PTA7),
PINMUX_GPIO(PTA6),
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7785.c b/drivers/pinctrl/sh-pfc/pfc-sh7785.c
index 517eb49d76bd..b38dd7e3e375 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7785.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7785.c
@@ -671,7 +671,7 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(IRQOUT_MARK, P2MSEL2_1),
};
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
/* PA */
PINMUX_GPIO(PA7),
PINMUX_GPIO(PA6),
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7786.c b/drivers/pinctrl/sh-pfc/pfc-sh7786.c
index 623345fac936..6cb4e0aaf20b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7786.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7786.c
@@ -407,7 +407,7 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(SSI3_SCK_MARK, P2MSEL6_1, P2MSEL5_1, PJ1_FN),
};
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
/* PA */
PINMUX_GPIO(PA7),
PINMUX_GPIO(PA6),
diff --git a/drivers/pinctrl/sh-pfc/pfc-shx3.c b/drivers/pinctrl/sh-pfc/pfc-shx3.c
index 55262bd869ed..a3fcb2284d91 100644
--- a/drivers/pinctrl/sh-pfc/pfc-shx3.c
+++ b/drivers/pinctrl/sh-pfc/pfc-shx3.c
@@ -285,7 +285,7 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(IRQOUT_MARK, PH0_FN),
};
-static struct sh_pfc_pin pinmux_pins[] = {
+static const struct sh_pfc_pin pinmux_pins[] = {
/* PA */
PINMUX_GPIO(PA7),
PINMUX_GPIO(PA6),
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 11bd0d970a52..ab8fd258d9ed 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -76,12 +76,13 @@ struct pinmux_cfg_reg {
#define PINMUX_CFG_REG(name, r, r_width, f_width) \
.reg = r, .reg_width = r_width, .field_width = f_width, \
- .enum_ids = (u16 [(r_width / f_width) * (1 << f_width)])
+ .enum_ids = (const u16 [(r_width / f_width) * (1 << f_width)])
#define PINMUX_CFG_REG_VAR(name, r, r_width, var_fw0, var_fwn...) \
.reg = r, .reg_width = r_width, \
- .var_field_width = (unsigned long [r_width]) { var_fw0, var_fwn, 0 }, \
- .enum_ids = (u16 [])
+ .var_field_width = (const unsigned long [r_width]) \
+ { var_fw0, var_fwn, 0 }, \
+ .enum_ids = (const u16 [])
struct pinmux_data_reg {
unsigned long reg, reg_width;
@@ -90,15 +91,15 @@ struct pinmux_data_reg {
#define PINMUX_DATA_REG(name, r, r_width) \
.reg = r, .reg_width = r_width, \
- .enum_ids = (u16 [r_width]) \
+ .enum_ids = (const u16 [r_width]) \
struct pinmux_irq {
int irq;
- unsigned short *gpios;
+ const short *gpios;
};
#define PINMUX_IRQ(irq_nr, ids...) \
- { .irq = irq_nr, .gpios = (unsigned short []) { ids, 0 } } \
+ { .irq = irq_nr, .gpios = (const short []) { ids, -1 } }
struct pinmux_range {
u16 begin;
@@ -254,7 +255,7 @@ struct sh_pfc_soc_info {
#define PINMUX_GPIO(_pin) \
[GPIO_##_pin] = { \
.pin = (u16)-1, \
- .name = __stringify(name), \
+ .name = __stringify(GPIO_##_pin), \
.enum_id = _pin##_DATA, \
}
@@ -304,8 +305,7 @@ struct sh_pfc_soc_info {
#define PORTCR(nr, reg) \
{ \
PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
- _PCRH(PORT##nr##_IN, PORT##nr##_IN_PD, \
- PORT##nr##_IN_PU, PORT##nr##_OUT), \
+ _PCRH(PORT##nr##_IN, 0, 0, PORT##nr##_OUT), \
PORT##nr##_FN0, PORT##nr##_FN1, \
PORT##nr##_FN2, PORT##nr##_FN3, \
PORT##nr##_FN4, PORT##nr##_FN5, \
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas6.c b/drivers/pinctrl/sirf/pinctrl-atlas6.c
index 8ab7898d21be..2b9f32065920 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas6.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas6.c
@@ -562,6 +562,23 @@ static const struct sirfsoc_padmux usp1_padmux = {
static const unsigned usp1_pins[] = { 15, 43, 44, 45, 46 };
+static const struct sirfsoc_muxmask usp1_uart_nostreamctrl_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(12) | BIT(13),
+ },
+};
+
+static const struct sirfsoc_padmux usp1_uart_nostreamctrl_padmux = {
+ .muxmask_counts = ARRAY_SIZE(usp1_uart_nostreamctrl_muxmask),
+ .muxmask = usp1_uart_nostreamctrl_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
+ .funcmask = BIT(16),
+ .funcval = BIT(16),
+};
+
+static const unsigned usp1_uart_nostreamctrl_pins[] = { 44, 45 };
+
static const struct sirfsoc_muxmask nand_muxmask[] = {
{
.group = 2,
@@ -889,6 +906,8 @@ static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
SIRFSOC_PIN_GROUP("usp0_uart_nostreamctrl_grp",
usp0_uart_nostreamctrl_pins),
SIRFSOC_PIN_GROUP("usp1grp", usp1_pins),
+ SIRFSOC_PIN_GROUP("usp1_uart_nostreamctrl_grp",
+ usp1_uart_nostreamctrl_pins),
SIRFSOC_PIN_GROUP("i2c0grp", i2c0_pins),
SIRFSOC_PIN_GROUP("i2c1grp", i2c1_pins),
SIRFSOC_PIN_GROUP("pwm0grp", pwm0_pins),
@@ -935,6 +954,8 @@ static const char * const usp0_uart_nostreamctrl_grp[] = {
"usp0_uart_nostreamctrl_grp" };
static const char * const usp0grp[] = { "usp0grp" };
static const char * const usp1grp[] = { "usp1grp" };
+static const char * const usp1_uart_nostreamctrl_grp[] = {
+ "usp1_uart_nostreamctrl_grp" };
static const char * const i2c0grp[] = { "i2c0grp" };
static const char * const i2c1grp[] = { "i2c1grp" };
static const char * const pwm0grp[] = { "pwm0grp" };
@@ -983,6 +1004,9 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
usp0_uart_nostreamctrl_grp,
usp0_uart_nostreamctrl_padmux),
SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux),
+ SIRFSOC_PMX_FUNCTION("usp1_uart_nostreamctrl",
+ usp1_uart_nostreamctrl_grp,
+ usp1_uart_nostreamctrl_padmux),
SIRFSOC_PMX_FUNCTION("i2c0", i2c0grp, i2c0_padmux),
SIRFSOC_PMX_FUNCTION("i2c1", i2c1grp, i2c1_padmux),
SIRFSOC_PMX_FUNCTION("pwm0", pwm0grp, pwm0_padmux),
diff --git a/drivers/pinctrl/sirf/pinctrl-prima2.c b/drivers/pinctrl/sirf/pinctrl-prima2.c
index 050777be0f1e..dde0285544d6 100644
--- a/drivers/pinctrl/sirf/pinctrl-prima2.c
+++ b/drivers/pinctrl/sirf/pinctrl-prima2.c
@@ -413,7 +413,7 @@ static const struct sirfsoc_padmux ac97_padmux = {
.funcval = 0,
};
-static const unsigned ac97_pins[] = { 33, 34, 35, 36 };
+static const unsigned ac97_pins[] = { 43, 44, 45, 46 };
static const struct sirfsoc_muxmask spi1_muxmask[] = {
{
@@ -467,12 +467,6 @@ static const struct sirfsoc_muxmask sdmmc5_muxmask[] = {
{
.group = 0,
.mask = BIT(24) | BIT(25) | BIT(26),
- }, {
- .group = 1,
- .mask = BIT(29),
- }, {
- .group = 2,
- .mask = BIT(0) | BIT(1),
},
};
@@ -484,7 +478,7 @@ static const struct sirfsoc_padmux sdmmc5_padmux = {
.funcval = BIT(13) | BIT(14),
};
-static const unsigned sdmmc5_pins[] = { 24, 25, 26, 61, 64, 65 };
+static const unsigned sdmmc5_pins[] = { 24, 25, 26 };
static const struct sirfsoc_muxmask usp0_muxmask[] = {
{
@@ -503,6 +497,40 @@ static const struct sirfsoc_padmux usp0_padmux = {
static const unsigned usp0_pins[] = { 51, 52, 53, 54, 55 };
+static const struct sirfsoc_muxmask usp0_only_utfs_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22),
+ },
+};
+
+static const struct sirfsoc_padmux usp0_only_utfs_padmux = {
+ .muxmask_counts = ARRAY_SIZE(usp0_only_utfs_muxmask),
+ .muxmask = usp0_only_utfs_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
+ .funcmask = BIT(1) | BIT(2) | BIT(6),
+ .funcval = 0,
+};
+
+static const unsigned usp0_only_utfs_pins[] = { 51, 52, 53, 54 };
+
+static const struct sirfsoc_muxmask usp0_only_urfs_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(19) | BIT(20) | BIT(21) | BIT(23),
+ },
+};
+
+static const struct sirfsoc_padmux usp0_only_urfs_padmux = {
+ .muxmask_counts = ARRAY_SIZE(usp0_only_urfs_muxmask),
+ .muxmask = usp0_only_urfs_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
+ .funcmask = BIT(1) | BIT(2) | BIT(9),
+ .funcval = 0,
+};
+
+static const unsigned usp0_only_urfs_pins[] = { 51, 52, 53, 55 };
+
static const struct sirfsoc_muxmask usp0_uart_nostreamctrl_muxmask[] = {
{
.group = 1,
@@ -859,6 +887,8 @@ static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
SIRFSOC_PIN_GROUP("usp0grp", usp0_pins),
SIRFSOC_PIN_GROUP("usp0_uart_nostreamctrl_grp",
usp0_uart_nostreamctrl_pins),
+ SIRFSOC_PIN_GROUP("usp0_only_utfs_grp", usp0_only_utfs_pins),
+ SIRFSOC_PIN_GROUP("usp0_only_urfs_grp", usp0_only_urfs_pins),
SIRFSOC_PIN_GROUP("usp1grp", usp1_pins),
SIRFSOC_PIN_GROUP("usp1_uart_nostreamctrl_grp",
usp1_uart_nostreamctrl_pins),
@@ -907,6 +937,8 @@ static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" };
static const char * const usp0grp[] = { "usp0grp" };
static const char * const usp0_uart_nostreamctrl_grp[] =
{ "usp0_uart_nostreamctrl_grp" };
+static const char * const usp0_only_utfs_grp[] = { "usp0_only_utfs_grp" };
+static const char * const usp0_only_urfs_grp[] = { "usp0_only_urfs_grp" };
static const char * const usp1grp[] = { "usp1grp" };
static const char * const usp1_uart_nostreamctrl_grp[] =
{ "usp1_uart_nostreamctrl_grp" };
@@ -955,6 +987,8 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux),
SIRFSOC_PMX_FUNCTION("usp0_uart_nostreamctrl",
usp0_uart_nostreamctrl_grp, usp0_uart_nostreamctrl_padmux),
+ SIRFSOC_PMX_FUNCTION("usp0_only_utfs", usp0_only_utfs_grp, usp0_only_utfs_padmux),
+ SIRFSOC_PMX_FUNCTION("usp0_only_urfs", usp0_only_urfs_grp, usp0_only_urfs_padmux),
SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux),
SIRFSOC_PMX_FUNCTION("usp1_uart_nostreamctrl",
usp1_uart_nostreamctrl_grp, usp1_uart_nostreamctrl_padmux),
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index b81e388c50de..a0d6152701cd 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -468,7 +468,8 @@ static inline int sirfsoc_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
struct sirfsoc_gpio_bank *bank = container_of(to_of_mm_gpio_chip(chip),
struct sirfsoc_gpio_bank, chip);
- return irq_create_mapping(bank->domain, offset);
+ return irq_create_mapping(bank->domain, offset + bank->id *
+ SIRFSOC_GPIO_BANK_SIZE);
}
static inline int sirfsoc_gpio_to_offset(unsigned int gpio)
@@ -559,7 +560,7 @@ static int sirfsoc_gpio_irq_type(struct irq_data *d, unsigned type)
spin_lock_irqsave(&sgpio_lock, flags);
val = readl(bank->chip.regs + offset);
- val &= ~SIRFSOC_GPIO_CTL_INTR_STS_MASK;
+ val &= ~(SIRFSOC_GPIO_CTL_INTR_STS_MASK | SIRFSOC_GPIO_CTL_OUT_EN_MASK);
switch (type) {
case IRQ_TYPE_NONE:
@@ -593,12 +594,34 @@ static int sirfsoc_gpio_irq_type(struct irq_data *d, unsigned type)
return 0;
}
+static unsigned int sirfsoc_gpio_irq_startup(struct irq_data *d)
+{
+ struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq))
+ dev_err(bank->chip.gc.dev,
+ "unable to lock HW IRQ %lu for IRQ\n",
+ d->hwirq);
+ sirfsoc_gpio_irq_unmask(d);
+ return 0;
+}
+
+static void sirfsoc_gpio_irq_shutdown(struct irq_data *d)
+{
+ struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ sirfsoc_gpio_irq_mask(d);
+ gpio_unlock_as_irq(&bank->chip.gc, d->hwirq);
+}
+
static struct irq_chip sirfsoc_irq_chip = {
.name = "sirf-gpio-irq",
.irq_ack = sirfsoc_gpio_irq_ack,
.irq_mask = sirfsoc_gpio_irq_mask,
.irq_unmask = sirfsoc_gpio_irq_unmask,
.irq_set_type = sirfsoc_gpio_irq_type,
+ .irq_startup = sirfsoc_gpio_irq_startup,
+ .irq_shutdown = sirfsoc_gpio_irq_shutdown,
};
static void sirfsoc_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
@@ -629,7 +652,8 @@ static void sirfsoc_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
if ((status & 0x1) && (ctrl & SIRFSOC_GPIO_CTL_INTR_EN_MASK)) {
pr_debug("%s: gpio id %d idx %d happens\n",
__func__, bank->id, idx);
- generic_handle_irq(irq_find_mapping(bank->domain, idx));
+ generic_handle_irq(irq_find_mapping(bank->domain, idx +
+ bank->id * SIRFSOC_GPIO_BANK_SIZE));
}
idx++;
@@ -786,7 +810,7 @@ static int sirfsoc_gpio_irq_map(struct irq_domain *d, unsigned int irq,
irq_set_chip(irq, &sirfsoc_irq_chip);
irq_set_handler(irq, handle_level_irq);
- irq_set_chip_data(irq, bank);
+ irq_set_chip_data(irq, bank + hwirq / SIRFSOC_GPIO_BANK_SIZE);
set_irq_flags(irq, IRQF_VALID);
return 0;
@@ -835,6 +859,7 @@ static int sirfsoc_gpio_probe(struct device_node *np)
struct sirfsoc_gpio_bank *bank;
void __iomem *regs;
struct platform_device *pdev;
+ struct irq_domain *domain;
bool is_marco = false;
u32 pullups[SIRFSOC_GPIO_NO_OF_BANKS], pulldowns[SIRFSOC_GPIO_NO_OF_BANKS];
@@ -850,6 +875,14 @@ static int sirfsoc_gpio_probe(struct device_node *np)
if (of_device_is_compatible(np, "sirf,marco-pinctrl"))
is_marco = 1;
+ domain = irq_domain_add_linear(np, SIRFSOC_GPIO_BANK_SIZE * SIRFSOC_GPIO_NO_OF_BANKS,
+ &sirfsoc_gpio_irq_simple_ops, sgpio_bank);
+ if (!domain) {
+ pr_err("%s: Failed to create irqdomain\n", np->full_name);
+ err = -ENOSYS;
+ goto out;
+ }
+
for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
bank = &sgpio_bank[i];
spin_lock_init(&bank->lock);
@@ -866,6 +899,7 @@ static int sirfsoc_gpio_probe(struct device_node *np)
bank->chip.gc.of_node = np;
bank->chip.gc.of_xlate = sirfsoc_gpio_of_xlate;
bank->chip.gc.of_gpio_n_cells = 2;
+ bank->chip.gc.dev = &pdev->dev;
bank->chip.regs = regs;
bank->id = i;
bank->is_marco = is_marco;
@@ -882,14 +916,7 @@ static int sirfsoc_gpio_probe(struct device_node *np)
goto out;
}
- bank->domain = irq_domain_add_linear(np, SIRFSOC_GPIO_BANK_SIZE,
- &sirfsoc_gpio_irq_simple_ops, bank);
-
- if (!bank->domain) {
- pr_err("%s: Failed to create irqdomain\n", np->full_name);
- err = -ENOSYS;
- goto out;
- }
+ bank->domain = domain;
irq_set_chained_handler(bank->parent_irq, sirfsoc_gpio_handle_irq);
irq_set_handler_data(bank->parent_irq, bank);
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 39aec0850810..9802b67040cc 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -276,7 +276,20 @@ static int wmt_pctl_dt_node_to_map_pull(struct wmt_pinctrl_data *data,
if (!configs)
return -ENOMEM;
- configs[0] = pull;
+ switch (pull) {
+ case 0:
+ configs[0] = PIN_CONFIG_BIAS_DISABLE;
+ break;
+ case 1:
+ configs[0] = PIN_CONFIG_BIAS_PULL_DOWN;
+ break;
+ case 2:
+ configs[0] = PIN_CONFIG_BIAS_PULL_UP;
+ break;
+ default:
+ configs[0] = PIN_CONFIG_BIAS_DISABLE;
+ dev_err(data->dev, "invalid pull state %d - disabling\n", pull);
+ }
map->type = PIN_MAP_TYPE_CONFIGS_PIN;
map->data.configs.group_or_pin = data->groups[group];
@@ -565,7 +578,7 @@ static struct gpio_chip wmt_gpio_chip = {
.direction_output = wmt_gpio_direction_output,
.get = wmt_gpio_get_value,
.set = wmt_gpio_set_value,
- .can_sleep = 0,
+ .can_sleep = false,
};
int wmt_pinctrl_probe(struct platform_device *pdev,
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index b13303e75a34..440ed776efd4 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -25,4 +25,18 @@ config CHROMEOS_LAPTOP
If you have a supported Chromebook, choose Y or M here.
The module will be called chromeos_laptop.
+config CHROMEOS_PSTORE
+ tristate "Chrome OS pstore support"
+ ---help---
+ This module instantiates the persistent storage on x86 ChromeOS
+ devices. It can be used to store away console logs and crash
+ information across reboots.
+
+ The range of memory used is 0xf00000-0x1000000, traditionally
+ the memory used to back VGA controller memory.
+
+ If you have a supported Chromebook, choose Y or M here.
+ The module will be called chromeos_pstore.
+
+
endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 015e9195e226..2b860ca7450f 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
+obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 3e5b4497a1d0..7f3aad0e115c 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -27,6 +27,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#define ATMEL_TP_I2C_ADDR 0x4b
#define ATMEL_TP_I2C_BL_ADDR 0x25
@@ -40,7 +41,7 @@ static struct i2c_client *als;
static struct i2c_client *tp;
static struct i2c_client *ts;
-const char *i2c_adapter_names[] = {
+static const char *i2c_adapter_names[] = {
"SMBus I801 adapter",
"i915 gmbus vga",
"i915 gmbus panel",
@@ -53,20 +54,33 @@ enum i2c_adapter_type {
I2C_ADAPTER_PANEL,
};
-static struct i2c_board_info __initdata cyapa_device = {
+struct i2c_peripheral {
+ int (*add)(enum i2c_adapter_type type);
+ enum i2c_adapter_type type;
+};
+
+#define MAX_I2C_PERIPHERALS 3
+
+struct chromeos_laptop {
+ struct i2c_peripheral i2c_peripherals[MAX_I2C_PERIPHERALS];
+};
+
+static struct chromeos_laptop *cros_laptop;
+
+static struct i2c_board_info cyapa_device = {
I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
.flags = I2C_CLIENT_WAKE,
};
-static struct i2c_board_info __initdata isl_als_device = {
+static struct i2c_board_info isl_als_device = {
I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
};
-static struct i2c_board_info __initdata tsl2583_als_device = {
+static struct i2c_board_info tsl2583_als_device = {
I2C_BOARD_INFO("tsl2583", TAOS_ALS_I2C_ADDR),
};
-static struct i2c_board_info __initdata tsl2563_als_device = {
+static struct i2c_board_info tsl2563_als_device = {
I2C_BOARD_INFO("tsl2563", TAOS_ALS_I2C_ADDR),
};
@@ -89,7 +103,7 @@ static struct mxt_platform_data atmel_224s_tp_platform_data = {
.config_length = 0,
};
-static struct i2c_board_info __initdata atmel_224s_tp_device = {
+static struct i2c_board_info atmel_224s_tp_device = {
I2C_BOARD_INFO("atmel_mxt_tp", ATMEL_TP_I2C_ADDR),
.platform_data = &atmel_224s_tp_platform_data,
.flags = I2C_CLIENT_WAKE,
@@ -110,13 +124,13 @@ static struct mxt_platform_data atmel_1664s_platform_data = {
.config_length = 0,
};
-static struct i2c_board_info __initdata atmel_1664s_device = {
+static struct i2c_board_info atmel_1664s_device = {
I2C_BOARD_INFO("atmel_mxt_ts", ATMEL_TS_I2C_ADDR),
.platform_data = &atmel_1664s_platform_data,
.flags = I2C_CLIENT_WAKE,
};
-static struct i2c_client __init *__add_probed_i2c_device(
+static struct i2c_client *__add_probed_i2c_device(
const char *name,
int bus,
struct i2c_board_info *info,
@@ -169,7 +183,7 @@ static struct i2c_client __init *__add_probed_i2c_device(
return client;
}
-static int __init __find_i2c_adap(struct device *dev, void *data)
+static int __find_i2c_adap(struct device *dev, void *data)
{
const char *name = data;
static const char *prefix = "i2c-";
@@ -180,7 +194,7 @@ static int __init __find_i2c_adap(struct device *dev, void *data)
return (strncmp(adapter->name, name, strlen(name)) == 0);
}
-static int __init find_i2c_adapter_num(enum i2c_adapter_type type)
+static int find_i2c_adapter_num(enum i2c_adapter_type type)
{
struct device *dev = NULL;
struct i2c_adapter *adapter;
@@ -189,8 +203,9 @@ static int __init find_i2c_adapter_num(enum i2c_adapter_type type)
dev = bus_find_device(&i2c_bus_type, NULL, (void *)name,
__find_i2c_adap);
if (!dev) {
- pr_err("%s: i2c adapter %s not found on system.\n", __func__,
- name);
+ /* Adapters may appear later. Deferred probing will retry */
+ pr_notice("%s: i2c adapter %s not found on system.\n", __func__,
+ name);
return -ENODEV;
}
adapter = to_i2c_adapter(dev);
@@ -205,7 +220,7 @@ static int __init find_i2c_adapter_num(enum i2c_adapter_type type)
* Returns NULL if no devices found.
* See Documentation/i2c/instantiating-devices for more information.
*/
-static __init struct i2c_client *add_probed_i2c_device(
+static struct i2c_client *add_probed_i2c_device(
const char *name,
enum i2c_adapter_type type,
struct i2c_board_info *info,
@@ -222,7 +237,7 @@ static __init struct i2c_client *add_probed_i2c_device(
* info->addr.
* Returns NULL if no device found.
*/
-static __init struct i2c_client *add_i2c_device(const char *name,
+static struct i2c_client *add_i2c_device(const char *name,
enum i2c_adapter_type type,
struct i2c_board_info *info)
{
@@ -233,161 +248,259 @@ static __init struct i2c_client *add_i2c_device(const char *name,
addr_list);
}
-
-static struct i2c_client __init *add_smbus_device(const char *name,
- struct i2c_board_info *info)
+static int setup_cyapa_tp(enum i2c_adapter_type type)
{
- return add_i2c_device(name, I2C_ADAPTER_SMBUS, info);
-}
+ if (tp)
+ return 0;
-static int __init setup_cyapa_smbus_tp(const struct dmi_system_id *id)
-{
- /* add cyapa touchpad on smbus */
- tp = add_smbus_device("trackpad", &cyapa_device);
- return 0;
+ /* add cyapa touchpad */
+ tp = add_i2c_device("trackpad", type, &cyapa_device);
+ return (!tp) ? -EAGAIN : 0;
}
-static int __init setup_atmel_224s_tp(const struct dmi_system_id *id)
+static int setup_atmel_224s_tp(enum i2c_adapter_type type)
{
const unsigned short addr_list[] = { ATMEL_TP_I2C_BL_ADDR,
ATMEL_TP_I2C_ADDR,
I2C_CLIENT_END };
+ if (tp)
+ return 0;
- /* add atmel mxt touchpad on VGA DDC GMBus */
- tp = add_probed_i2c_device("trackpad", I2C_ADAPTER_VGADDC,
+ /* add atmel mxt touchpad */
+ tp = add_probed_i2c_device("trackpad", type,
&atmel_224s_tp_device, addr_list);
- return 0;
+ return (!tp) ? -EAGAIN : 0;
}
-static int __init setup_atmel_1664s_ts(const struct dmi_system_id *id)
+static int setup_atmel_1664s_ts(enum i2c_adapter_type type)
{
const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR,
ATMEL_TS_I2C_ADDR,
I2C_CLIENT_END };
+ if (ts)
+ return 0;
- /* add atmel mxt touch device on PANEL GMBus */
- ts = add_probed_i2c_device("touchscreen", I2C_ADAPTER_PANEL,
+ /* add atmel mxt touch device */
+ ts = add_probed_i2c_device("touchscreen", type,
&atmel_1664s_device, addr_list);
- return 0;
+ return (!ts) ? -EAGAIN : 0;
}
-
-static int __init setup_isl29018_als(const struct dmi_system_id *id)
+static int setup_isl29018_als(enum i2c_adapter_type type)
{
+ if (als)
+ return 0;
+
/* add isl29018 light sensor */
- als = add_smbus_device("lightsensor", &isl_als_device);
- return 0;
+ als = add_i2c_device("lightsensor", type, &isl_als_device);
+ return (!als) ? -EAGAIN : 0;
}
-static int __init setup_isl29023_als(const struct dmi_system_id *id)
+static int setup_tsl2583_als(enum i2c_adapter_type type)
{
- /* add isl29023 light sensor on Panel GMBus */
- als = add_i2c_device("lightsensor", I2C_ADAPTER_PANEL,
- &isl_als_device);
- return 0;
+ if (als)
+ return 0;
+
+ /* add tsl2583 light sensor */
+ als = add_i2c_device(NULL, type, &tsl2583_als_device);
+ return (!als) ? -EAGAIN : 0;
}
-static int __init setup_tsl2583_als(const struct dmi_system_id *id)
+static int setup_tsl2563_als(enum i2c_adapter_type type)
{
- /* add tsl2583 light sensor on smbus */
- als = add_smbus_device(NULL, &tsl2583_als_device);
- return 0;
+ if (als)
+ return 0;
+
+ /* add tsl2563 light sensor */
+ als = add_i2c_device(NULL, type, &tsl2563_als_device);
+ return (!als) ? -EAGAIN : 0;
}
-static int __init setup_tsl2563_als(const struct dmi_system_id *id)
+static int __init chromeos_laptop_dmi_matched(const struct dmi_system_id *id)
{
- /* add tsl2563 light sensor on smbus */
- als = add_smbus_device(NULL, &tsl2563_als_device);
- return 0;
+ cros_laptop = (void *)id->driver_data;
+ pr_debug("DMI Matched %s.\n", id->ident);
+
+ /* Indicate to dmi_scan that processing is done. */
+ return 1;
}
-static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
- {
- .ident = "Samsung Series 5 550 - Touchpad",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
- },
- .callback = setup_cyapa_smbus_tp,
+static int chromeos_laptop_probe(struct platform_device *pdev)
+{
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < MAX_I2C_PERIPHERALS; i++) {
+ struct i2c_peripheral *i2c_dev;
+
+ i2c_dev = &cros_laptop->i2c_peripherals[i];
+
+ /* No more peripherals. */
+ if (i2c_dev->add == NULL)
+ break;
+
+ /* Add the device. Set -EPROBE_DEFER on any failure */
+ if (i2c_dev->add(i2c_dev->type))
+ ret = -EPROBE_DEFER;
+ }
+
+ return ret;
+}
+
+static struct chromeos_laptop samsung_series_5_550 = {
+ .i2c_peripherals = {
+ /* Touchpad. */
+ { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
+ /* Light Sensor. */
+ { .add = setup_isl29018_als, I2C_ADAPTER_SMBUS },
},
- {
- .ident = "Chromebook Pixel - Touchscreen",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
- },
- .callback = setup_atmel_1664s_ts,
+};
+
+static struct chromeos_laptop samsung_series_5 = {
+ .i2c_peripherals = {
+ /* Light Sensor. */
+ { .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS },
+ },
+};
+
+static struct chromeos_laptop chromebook_pixel = {
+ .i2c_peripherals = {
+ /* Touch Screen. */
+ { .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL },
+ /* Touchpad. */
+ { .add = setup_atmel_224s_tp, I2C_ADAPTER_VGADDC },
+ /* Light Sensor. */
+ { .add = setup_isl29018_als, I2C_ADAPTER_PANEL },
+ },
+};
+
+static struct chromeos_laptop acer_c7_chromebook = {
+ .i2c_peripherals = {
+ /* Touchpad. */
+ { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
+ },
+};
+
+static struct chromeos_laptop acer_ac700 = {
+ .i2c_peripherals = {
+ /* Light Sensor. */
+ { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
},
+};
+
+static struct chromeos_laptop hp_pavilion_14_chromebook = {
+ .i2c_peripherals = {
+ /* Touchpad. */
+ { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
+ },
+};
+
+static struct chromeos_laptop cr48 = {
+ .i2c_peripherals = {
+ /* Light Sensor. */
+ { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
+ },
+};
+
+#define _CBDD(board_) \
+ .callback = chromeos_laptop_dmi_matched, \
+ .driver_data = (void *)&board_
+
+static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
{
- .ident = "Chromebook Pixel - Touchpad",
+ .ident = "Samsung Series 5 550",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
},
- .callback = setup_atmel_224s_tp,
+ _CBDD(samsung_series_5_550),
},
{
- .ident = "Samsung Series 5 550 - Light Sensor",
+ .ident = "Samsung Series 5",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
},
- .callback = setup_isl29018_als,
+ _CBDD(samsung_series_5),
},
{
- .ident = "Chromebook Pixel - Light Sensor",
+ .ident = "Chromebook Pixel",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
},
- .callback = setup_isl29023_als,
+ _CBDD(chromebook_pixel),
},
{
- .ident = "Acer C7 Chromebook - Touchpad",
+ .ident = "Acer C7 Chromebook",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Parrot"),
},
- .callback = setup_cyapa_smbus_tp,
+ _CBDD(acer_c7_chromebook),
},
{
- .ident = "HP Pavilion 14 Chromebook - Touchpad",
+ .ident = "Acer AC700",
.matches = {
- DMI_MATCH(DMI_PRODUCT_NAME, "Butterfly"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
},
- .callback = setup_cyapa_smbus_tp,
+ _CBDD(acer_ac700),
},
{
- .ident = "Samsung Series 5 - Light Sensor",
+ .ident = "HP Pavilion 14 Chromebook",
.matches = {
- DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Butterfly"),
},
- .callback = setup_tsl2583_als,
+ _CBDD(hp_pavilion_14_chromebook),
},
{
- .ident = "Cr-48 - Light Sensor",
+ .ident = "Cr-48",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
},
- .callback = setup_tsl2563_als,
- },
- {
- .ident = "Acer AC700 - Light Sensor",
- .matches = {
- DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
- },
- .callback = setup_tsl2563_als,
+ _CBDD(cr48),
},
{ }
};
MODULE_DEVICE_TABLE(dmi, chromeos_laptop_dmi_table);
+static struct platform_device *cros_platform_device;
+
+static struct platform_driver cros_platform_driver = {
+ .driver = {
+ .name = "chromeos_laptop",
+ .owner = THIS_MODULE,
+ },
+ .probe = chromeos_laptop_probe,
+};
+
static int __init chromeos_laptop_init(void)
{
+ int ret;
if (!dmi_check_system(chromeos_laptop_dmi_table)) {
pr_debug("%s unsupported system.\n", __func__);
return -ENODEV;
}
+
+ ret = platform_driver_register(&cros_platform_driver);
+ if (ret)
+ return ret;
+
+ cros_platform_device = platform_device_alloc("chromeos_laptop", -1);
+ if (!cros_platform_device) {
+ ret = -ENOMEM;
+ goto fail_platform_device1;
+ }
+
+ ret = platform_device_add(cros_platform_device);
+ if (ret)
+ goto fail_platform_device2;
+
return 0;
+
+fail_platform_device2:
+ platform_device_put(cros_platform_device);
+fail_platform_device1:
+ platform_driver_unregister(&cros_platform_driver);
+ return ret;
}
static void __exit chromeos_laptop_exit(void)
@@ -398,6 +511,9 @@ static void __exit chromeos_laptop_exit(void)
i2c_unregister_device(tp);
if (ts)
i2c_unregister_device(ts);
+
+ platform_device_unregister(cros_platform_device);
+ platform_driver_unregister(&cros_platform_driver);
}
module_init(chromeos_laptop_init);
diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
new file mode 100644
index 000000000000..e0e0e65cf442
--- /dev/null
+++ b/drivers/platform/chrome/chromeos_pstore.c
@@ -0,0 +1,101 @@
+/*
+ * chromeos_pstore.c - Driver to instantiate Chromebook ramoops device
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#include <linux/dmi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pstore_ram.h>
+
+static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = {
+ {
+ /*
+ * Today all Chromebooks/boxes ship with GOOGLE as vendor and
+ * coreboot as bios vendor. No other systems with this
+ * combination are known to date.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ },
+ },
+ {
+ /*
+ * The first Samsung Chromebox and Chromebook Series 5 550 use
+ * coreboot but with Samsung as the system vendor.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ },
+ },
+ {
+ /* x86-alex, the first Samsung Chromebook. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
+ },
+ },
+ {
+ /* x86-mario, the Cr-48 pilot device from Google. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "IEC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
+ },
+ },
+ {
+ /* x86-zgb, the first Acer Chromebook. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+ },
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, chromeos_pstore_dmi_table);
+
+/*
+ * On x86 chromebooks/boxes, the firmware will keep the legacy VGA memory
+ * range untouched across reboots, so we use that to store our pstore
+ * contents for panic logs, etc.
+ */
+static struct ramoops_platform_data chromeos_ramoops_data = {
+ .mem_size = 0x100000,
+ .mem_address = 0xf00000,
+ .record_size = 0x20000,
+ .console_size = 0x20000,
+ .ftrace_size = 0x20000,
+ .dump_oops = 1,
+};
+
+static struct platform_device chromeos_ramoops = {
+ .name = "ramoops",
+ .dev = {
+ .platform_data = &chromeos_ramoops_data,
+ },
+};
+
+static int __init chromeos_pstore_init(void)
+{
+ if (dmi_check_system(chromeos_pstore_dmi_table))
+ return platform_device_register(&chromeos_ramoops);
+
+ return -ENODEV;
+}
+
+static void __exit chromeos_pstore_exit(void)
+{
+ platform_device_unregister(&chromeos_ramoops);
+}
+
+module_init(chromeos_pstore_init);
+module_exit(chromeos_pstore_exit);
+
+MODULE_DESCRIPTION("Chrome OS pstore module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index d9dcd37b5a52..5ae65c11d544 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -197,6 +197,17 @@ config HP_ACCEL
To compile this driver as a module, choose M here: the module will
be called hp_accel.
+config HP_WIRELESS
+ tristate "HP WIRELESS"
+ depends on ACPI
+ depends on INPUT
+ help
+ This driver provides supports for new HP wireless button for Windows 8.
+ On such systems the driver should load automatically (via ACPI alias).
+
+ To compile this driver as a module, choose M here: the module will
+ be called hp-wireless.
+
config HP_WMI
tristate "HP WMI extras"
depends on ACPI_WMI
@@ -808,4 +819,12 @@ config PVPANIC
a paravirtualized device provided by QEMU; it lets a virtual machine
(guest) communicate panic events to the host.
+config INTEL_BAYTRAIL_MBI
+ tristate
+ depends on PCI
+ ---help---
+ Needed on Baytrail platforms for access to the IOSF Sideband Mailbox
+ Interface. This is a requirement for systems that need to configure
+ the PUNIT for power management features such as RAPL.
+
endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index f0e6aa407ffb..9b87cfc42b84 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
obj-$(CONFIG_ACER_WMI) += acer-wmi.o
obj-$(CONFIG_ACERHDF) += acerhdf.o
obj-$(CONFIG_HP_ACCEL) += hp_accel.o
+obj-$(CONFIG_HP_WIRELESS) += hp-wireless.o
obj-$(CONFIG_HP_WMI) += hp-wmi.o
obj-$(CONFIG_AMILO_RFKILL) += amilo-rfkill.o
obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
@@ -54,3 +55,4 @@ obj-$(CONFIG_INTEL_RST) += intel-rst.o
obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o
obj-$(CONFIG_PVPANIC) += pvpanic.o
+obj-$(CONFIG_INTEL_BAYTRAIL_MBI) += intel_baytrail.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index c9076bdaf2c1..c91f69b39db4 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -41,8 +41,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
-
-#include <acpi/acpi_drivers.h>
#include <acpi/video.h>
MODULE_AUTHOR("Carlos Corbacho");
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 594323a926cf..7f4dc6f51f8a 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -53,8 +53,7 @@
#include <linux/rfkill.h>
#include <linux/slab.h>
#include <linux/dmi.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_bus.h>
+#include <linux/acpi.h>
#define ASUS_LAPTOP_VERSION "0.42"
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 19c313b056c3..c5e082fb82fa 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -45,8 +45,7 @@
#include <linux/seq_file.h>
#include <linux/platform_device.h>
#include <linux/thermal.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <acpi/video.h>
#include "asus-wmi.h"
@@ -184,7 +183,6 @@ struct asus_wmi {
struct input_dev *inputdev;
struct backlight_device *backlight_device;
- struct device *hwmon_device;
struct platform_device *platform_device;
struct led_classdev wlan_led;
@@ -606,6 +604,7 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus)
mutex_unlock(&asus->wmi_lock);
mutex_lock(&asus->hotplug_lock);
+ pci_lock_rescan_remove();
if (asus->wlan.rfkill)
rfkill_set_sw_state(asus->wlan.rfkill, blocked);
@@ -656,6 +655,7 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus)
}
out_unlock:
+ pci_unlock_rescan_remove();
mutex_unlock(&asus->hotplug_lock);
}
@@ -1071,20 +1071,12 @@ static ssize_t asus_hwmon_temp1(struct device *dev,
return sprintf(buf, "%d\n", value);
}
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL, 0);
-
-static ssize_t
-show_name(struct device *dev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "asus\n");
-}
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
+static DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL);
+static DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL);
static struct attribute *hwmon_attributes[] = {
- &sensor_dev_attr_pwm1.dev_attr.attr,
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_name.dev_attr.attr,
+ &dev_attr_pwm1.attr,
+ &dev_attr_temp1_input.attr,
NULL
};
@@ -1098,9 +1090,9 @@ static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
int dev_id = -1;
u32 value = ASUS_WMI_UNSUPPORTED_METHOD;
- if (attr == &sensor_dev_attr_pwm1.dev_attr.attr)
+ if (attr == &dev_attr_pwm1.attr)
dev_id = ASUS_WMI_DEVID_FAN_CTRL;
- else if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr)
+ else if (attr == &dev_attr_temp1_input.attr)
dev_id = ASUS_WMI_DEVID_THERMAL_CTRL;
if (dev_id != -1) {
@@ -1135,35 +1127,20 @@ static struct attribute_group hwmon_attribute_group = {
.is_visible = asus_hwmon_sysfs_is_visible,
.attrs = hwmon_attributes
};
-
-static void asus_wmi_hwmon_exit(struct asus_wmi *asus)
-{
- struct device *hwmon;
-
- hwmon = asus->hwmon_device;
- if (!hwmon)
- return;
- sysfs_remove_group(&hwmon->kobj, &hwmon_attribute_group);
- hwmon_device_unregister(hwmon);
- asus->hwmon_device = NULL;
-}
+__ATTRIBUTE_GROUPS(hwmon_attribute);
static int asus_wmi_hwmon_init(struct asus_wmi *asus)
{
struct device *hwmon;
- int result;
- hwmon = hwmon_device_register(&asus->platform_device->dev);
+ hwmon = hwmon_device_register_with_groups(&asus->platform_device->dev,
+ "asus", asus,
+ hwmon_attribute_groups);
if (IS_ERR(hwmon)) {
pr_err("Could not register asus hwmon device\n");
return PTR_ERR(hwmon);
}
- dev_set_drvdata(hwmon, asus);
- asus->hwmon_device = hwmon;
- result = sysfs_create_group(&hwmon->kobj, &hwmon_attribute_group);
- if (result)
- asus_wmi_hwmon_exit(asus);
- return result;
+ return 0;
}
/*
@@ -1834,7 +1811,6 @@ fail_backlight:
fail_rfkill:
asus_wmi_led_exit(asus);
fail_leds:
- asus_wmi_hwmon_exit(asus);
fail_hwmon:
asus_wmi_input_exit(asus);
fail_input:
@@ -1852,7 +1828,6 @@ static int asus_wmi_remove(struct platform_device *device)
wmi_remove_notify_handler(asus->driver->event_guid);
asus_wmi_backlight_exit(asus);
asus_wmi_input_exit(asus);
- asus_wmi_hwmon_exit(asus);
asus_wmi_led_exit(asus);
asus_wmi_rfkill_exit(asus);
asus_wmi_debugfs_exit(asus);
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 6dfa8d3b4eec..70d355a9ae2c 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -21,14 +21,13 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/backlight.h>
#include <linux/input.h>
#include <linux/rfkill.h>
MODULE_LICENSE("GPL");
-
struct cmpc_accel {
int sensitivity;
int g_select;
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index eaa78edb1f4e..7297df2ebf50 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -173,8 +173,7 @@
/* ======= */
struct compal_data{
/* Fan control */
- struct device *hwmon_dev;
- int pwm_enable; /* 0:full on, 1:set by pwm1, 2:control by moterboard */
+ int pwm_enable; /* 0:full on, 1:set by pwm1, 2:control by motherboard */
unsigned char curr_pwm;
/* Power supply */
@@ -402,15 +401,6 @@ SIMPLE_MASKED_STORE_SHOW(wake_up_wlan, WAKE_UP_ADDR, WAKE_UP_WLAN)
SIMPLE_MASKED_STORE_SHOW(wake_up_key, WAKE_UP_ADDR, WAKE_UP_KEY)
SIMPLE_MASKED_STORE_SHOW(wake_up_mouse, WAKE_UP_ADDR, WAKE_UP_MOUSE)
-
-/* General hwmon interface */
-static ssize_t hwmon_name_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n", DRIVER_NAME);
-}
-
-
/* Fan control interface */
static ssize_t pwm_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -665,55 +655,55 @@ static DEVICE_ATTR(wake_up_key,
static DEVICE_ATTR(wake_up_mouse,
0644, wake_up_mouse_show, wake_up_mouse_store);
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, hwmon_name_show, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, fan_show, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, temp_cpu, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, temp_cpu_local, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, temp_cpu_DTS, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, temp_northbridge, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, temp_vga, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, temp_SKIN, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, label_cpu, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, label_cpu_local, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, label_cpu_DTS, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, label_northbridge, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp5_label, S_IRUGO, label_vga, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp6_label, S_IRUGO, label_SKIN, NULL, 1);
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, pwm_show, pwm_store, 1);
-static SENSOR_DEVICE_ATTR(pwm1_enable,
- S_IRUGO | S_IWUSR, pwm_enable_show, pwm_enable_store, 0);
-
-static struct attribute *compal_attributes[] = {
+static DEVICE_ATTR(fan1_input, S_IRUGO, fan_show, NULL);
+static DEVICE_ATTR(temp1_input, S_IRUGO, temp_cpu, NULL);
+static DEVICE_ATTR(temp2_input, S_IRUGO, temp_cpu_local, NULL);
+static DEVICE_ATTR(temp3_input, S_IRUGO, temp_cpu_DTS, NULL);
+static DEVICE_ATTR(temp4_input, S_IRUGO, temp_northbridge, NULL);
+static DEVICE_ATTR(temp5_input, S_IRUGO, temp_vga, NULL);
+static DEVICE_ATTR(temp6_input, S_IRUGO, temp_SKIN, NULL);
+static DEVICE_ATTR(temp1_label, S_IRUGO, label_cpu, NULL);
+static DEVICE_ATTR(temp2_label, S_IRUGO, label_cpu_local, NULL);
+static DEVICE_ATTR(temp3_label, S_IRUGO, label_cpu_DTS, NULL);
+static DEVICE_ATTR(temp4_label, S_IRUGO, label_northbridge, NULL);
+static DEVICE_ATTR(temp5_label, S_IRUGO, label_vga, NULL);
+static DEVICE_ATTR(temp6_label, S_IRUGO, label_SKIN, NULL);
+static DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, pwm_show, pwm_store);
+static DEVICE_ATTR(pwm1_enable,
+ S_IRUGO | S_IWUSR, pwm_enable_show, pwm_enable_store);
+
+static struct attribute *compal_platform_attrs[] = {
&dev_attr_wake_up_pme.attr,
&dev_attr_wake_up_modem.attr,
&dev_attr_wake_up_lan.attr,
&dev_attr_wake_up_wlan.attr,
&dev_attr_wake_up_key.attr,
&dev_attr_wake_up_mouse.attr,
- /* Maybe put the sensor-stuff in a separate hwmon-driver? That way,
- * the hwmon sysfs won't be cluttered with the above files. */
- &sensor_dev_attr_name.dev_attr.attr,
- &sensor_dev_attr_pwm1_enable.dev_attr.attr,
- &sensor_dev_attr_pwm1.dev_attr.attr,
- &sensor_dev_attr_fan1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp4_input.dev_attr.attr,
- &sensor_dev_attr_temp5_input.dev_attr.attr,
- &sensor_dev_attr_temp6_input.dev_attr.attr,
- &sensor_dev_attr_temp1_label.dev_attr.attr,
- &sensor_dev_attr_temp2_label.dev_attr.attr,
- &sensor_dev_attr_temp3_label.dev_attr.attr,
- &sensor_dev_attr_temp4_label.dev_attr.attr,
- &sensor_dev_attr_temp5_label.dev_attr.attr,
- &sensor_dev_attr_temp6_label.dev_attr.attr,
NULL
};
+static struct attribute_group compal_platform_attr_group = {
+ .attrs = compal_platform_attrs
+};
-static struct attribute_group compal_attribute_group = {
- .attrs = compal_attributes
+static struct attribute *compal_hwmon_attrs[] = {
+ &dev_attr_pwm1_enable.attr,
+ &dev_attr_pwm1.attr,
+ &dev_attr_fan1_input.attr,
+ &dev_attr_temp1_input.attr,
+ &dev_attr_temp2_input.attr,
+ &dev_attr_temp3_input.attr,
+ &dev_attr_temp4_input.attr,
+ &dev_attr_temp5_input.attr,
+ &dev_attr_temp6_input.attr,
+ &dev_attr_temp1_label.attr,
+ &dev_attr_temp2_label.attr,
+ &dev_attr_temp3_label.attr,
+ &dev_attr_temp4_label.attr,
+ &dev_attr_temp5_label.attr,
+ &dev_attr_temp6_label.attr,
+ NULL
};
+ATTRIBUTE_GROUPS(compal_hwmon);
static int compal_probe(struct platform_device *);
static int compal_remove(struct platform_device *);
@@ -1021,30 +1011,28 @@ static int compal_probe(struct platform_device *pdev)
{
int err;
struct compal_data *data;
+ struct device *hwmon_dev;
if (!extra_features)
return 0;
/* Fan control */
- data = kzalloc(sizeof(struct compal_data), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, sizeof(struct compal_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
initialize_fan_control_data(data);
- err = sysfs_create_group(&pdev->dev.kobj, &compal_attribute_group);
- if (err) {
- kfree(data);
+ err = sysfs_create_group(&pdev->dev.kobj, &compal_platform_attr_group);
+ if (err)
return err;
- }
- data->hwmon_dev = hwmon_device_register(&pdev->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- sysfs_remove_group(&pdev->dev.kobj,
- &compal_attribute_group);
- kfree(data);
- return err;
+ hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
+ DRIVER_NAME, data,
+ compal_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ err = PTR_ERR(hwmon_dev);
+ goto remove;
}
/* Power supply */
@@ -1054,6 +1042,10 @@ static int compal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
return 0;
+
+remove:
+ sysfs_remove_group(&pdev->dev.kobj, &compal_platform_attr_group);
+ return err;
}
static void __exit compal_cleanup(void)
@@ -1080,12 +1072,9 @@ static int compal_remove(struct platform_device *pdev)
pwm_disable_control();
data = platform_get_drvdata(pdev);
- hwmon_device_unregister(data->hwmon_dev);
power_supply_unregister(&data->psy);
- kfree(data);
-
- sysfs_remove_group(&pdev->dev.kobj, &compal_attribute_group);
+ sysfs_remove_group(&pdev->dev.kobj, &compal_platform_attr_group);
return 0;
}
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index c608b1d33f4a..fed4111ac31a 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -559,19 +559,45 @@ static void dell_update_rfkill(struct work_struct *ignored)
}
static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
+static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
+ struct serio *port)
+{
+ static bool extended;
+
+ if (str & 0x20)
+ return false;
+
+ if (unlikely(data == 0xe0)) {
+ extended = true;
+ return false;
+ } else if (unlikely(extended)) {
+ switch (data) {
+ case 0x8:
+ schedule_delayed_work(&dell_rfkill_work,
+ round_jiffies_relative(HZ / 4));
+ break;
+ }
+ extended = false;
+ }
+
+ return false;
+}
static int __init dell_setup_rfkill(void)
{
- int status;
- int ret;
+ int status, ret, whitelisted;
const char *product;
/*
- * rfkill causes trouble on various non Latitudes, according to Dell
- * actually testing the rfkill functionality is only done on Latitudes.
+ * rfkill support causes trouble on various models, mostly Inspirons.
+ * So we whitelist certain series, and don't support rfkill on others.
*/
+ whitelisted = 0;
product = dmi_get_system_info(DMI_PRODUCT_NAME);
- if (!force_rfkill && (!product || strncmp(product, "Latitude", 8)))
+ if (product && (strncmp(product, "Latitude", 8) == 0 ||
+ strncmp(product, "Precision", 9) == 0))
+ whitelisted = 1;
+ if (!force_rfkill && !whitelisted)
return 0;
get_buffer();
@@ -633,7 +659,16 @@ static int __init dell_setup_rfkill(void)
goto err_wwan;
}
+ ret = i8042_install_filter(dell_laptop_i8042_filter);
+ if (ret) {
+ pr_warn("Unable to install key filter\n");
+ goto err_filter;
+ }
+
return 0;
+err_filter:
+ if (wwan_rfkill)
+ rfkill_unregister(wwan_rfkill);
err_wwan:
rfkill_destroy(wwan_rfkill);
if (bluetooth_rfkill)
@@ -684,7 +719,7 @@ static int dell_send_intensity(struct backlight_device *bd)
out:
release_buffer();
- return 0;
+ return ret;
}
static int dell_get_intensity(struct backlight_device *bd)
@@ -755,30 +790,6 @@ static void touchpad_led_exit(void)
led_classdev_unregister(&touchpad_led);
}
-static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
-{
- static bool extended;
-
- if (str & 0x20)
- return false;
-
- if (unlikely(data == 0xe0)) {
- extended = true;
- return false;
- } else if (unlikely(extended)) {
- switch (data) {
- case 0x8:
- schedule_delayed_work(&dell_rfkill_work,
- round_jiffies_relative(HZ / 4));
- break;
- }
- extended = false;
- }
-
- return false;
-}
-
static int __init dell_init(void)
{
int max_intensity = 0;
@@ -828,12 +839,6 @@ static int __init dell_init(void)
goto fail_rfkill;
}
- ret = i8042_install_filter(dell_laptop_i8042_filter);
- if (ret) {
- pr_warn("Unable to install key filter\n");
- goto fail_filter;
- }
-
if (quirks && quirks->touchpad_led)
touchpad_led_init(&platform_device->dev);
@@ -885,7 +890,6 @@ static int __init dell_init(void)
fail_backlight:
i8042_remove_filter(dell_laptop_i8042_filter);
cancel_delayed_work_sync(&dell_rfkill_work);
-fail_filter:
dell_cleanup_rfkill();
fail_rfkill:
free_page((unsigned long)bufferpage);
diff --git a/drivers/platform/x86/dell-wmi-aio.c b/drivers/platform/x86/dell-wmi-aio.c
index bcf8cc6b5537..dbc97a33bbc8 100644
--- a/drivers/platform/x86/dell-wmi-aio.c
+++ b/drivers/platform/x86/dell-wmi-aio.c
@@ -24,7 +24,6 @@
#include <linux/types.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
-#include <acpi/acpi_drivers.h>
#include <linux/acpi.h>
#include <linux/string.h>
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 60e0900bc117..390e8e33d5e3 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -32,7 +32,6 @@
#include <linux/types.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
-#include <acpi/acpi_drivers.h>
#include <linux/acpi.h>
#include <linux/string.h>
#include <linux/dmi.h>
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index dec68e7a99c7..399e8c562192 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -28,8 +28,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/slab.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_bus.h>
+#include <linux/acpi.h>
#include <linux/uaccess.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
@@ -166,7 +165,6 @@ struct eeepc_laptop {
struct platform_device *platform_device;
struct acpi_device *device; /* the device we are in */
- struct device *hwmon_device;
struct backlight_device *backlight_device;
struct input_dev *inputdev;
@@ -592,6 +590,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
rfkill_set_sw_state(eeepc->wlan_rfkill, blocked);
mutex_lock(&eeepc->hotplug_lock);
+ pci_lock_rescan_remove();
if (eeepc->hotplug_slot) {
port = acpi_get_pci_dev(handle);
@@ -649,6 +648,7 @@ out_put_dev:
}
out_unlock:
+ pci_unlock_rescan_remove();
mutex_unlock(&eeepc->hotplug_lock);
}
@@ -1067,7 +1067,7 @@ static ssize_t show_sys_hwmon(int (*get)(void), char *buf)
{ \
return store_sys_hwmon(_get, buf, count); \
} \
- static SENSOR_DEVICE_ATTR(_name, _mode, show_##_name, store_##_name, 0);
+ static DEVICE_ATTR(_name, _mode, show_##_name, store_##_name);
EEEPC_CREATE_SENSOR_ATTR(fan1_input, S_IRUGO, eeepc_get_fan_rpm, NULL);
EEEPC_CREATE_SENSOR_ATTR(pwm1, S_IRUGO | S_IWUSR,
@@ -1075,55 +1075,26 @@ EEEPC_CREATE_SENSOR_ATTR(pwm1, S_IRUGO | S_IWUSR,
EEEPC_CREATE_SENSOR_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
eeepc_get_fan_ctrl, eeepc_set_fan_ctrl);
-static ssize_t
-show_name(struct device *dev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "eeepc\n");
-}
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
-
-static struct attribute *hwmon_attributes[] = {
- &sensor_dev_attr_pwm1.dev_attr.attr,
- &sensor_dev_attr_fan1_input.dev_attr.attr,
- &sensor_dev_attr_pwm1_enable.dev_attr.attr,
- &sensor_dev_attr_name.dev_attr.attr,
+static struct attribute *hwmon_attrs[] = {
+ &dev_attr_pwm1.attr,
+ &dev_attr_fan1_input.attr,
+ &dev_attr_pwm1_enable.attr,
NULL
};
-
-static struct attribute_group hwmon_attribute_group = {
- .attrs = hwmon_attributes
-};
-
-static void eeepc_hwmon_exit(struct eeepc_laptop *eeepc)
-{
- struct device *hwmon;
-
- hwmon = eeepc->hwmon_device;
- if (!hwmon)
- return;
- sysfs_remove_group(&hwmon->kobj,
- &hwmon_attribute_group);
- hwmon_device_unregister(hwmon);
- eeepc->hwmon_device = NULL;
-}
+ATTRIBUTE_GROUPS(hwmon);
static int eeepc_hwmon_init(struct eeepc_laptop *eeepc)
{
+ struct device *dev = &eeepc->platform_device->dev;
struct device *hwmon;
- int result;
- hwmon = hwmon_device_register(&eeepc->platform_device->dev);
+ hwmon = devm_hwmon_device_register_with_groups(dev, "eeepc", NULL,
+ hwmon_groups);
if (IS_ERR(hwmon)) {
pr_err("Could not register eeepc hwmon device\n");
- eeepc->hwmon_device = NULL;
return PTR_ERR(hwmon);
}
- eeepc->hwmon_device = hwmon;
- result = sysfs_create_group(&hwmon->kobj,
- &hwmon_attribute_group);
- if (result)
- eeepc_hwmon_exit(eeepc);
- return result;
+ return 0;
}
/*
@@ -1479,7 +1450,6 @@ static int eeepc_acpi_add(struct acpi_device *device)
fail_rfkill:
eeepc_led_exit(eeepc);
fail_led:
- eeepc_hwmon_exit(eeepc);
fail_hwmon:
eeepc_input_exit(eeepc);
fail_input:
@@ -1499,7 +1469,6 @@ static int eeepc_acpi_remove(struct acpi_device *device)
eeepc_backlight_exit(eeepc);
eeepc_rfkill_exit(eeepc);
eeepc_input_exit(eeepc);
- eeepc_hwmon_exit(eeepc);
eeepc_led_exit(eeepc);
eeepc_platform_exit(eeepc);
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index af67e6e56ebb..6112933f6278 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -33,7 +33,7 @@
#include <linux/input/sparse-keymap.h>
#include <linux/dmi.h>
#include <linux/fb.h>
-#include <acpi/acpi_bus.h>
+#include <linux/acpi.h>
#include "asus-wmi.h"
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 9d30d69aa78f..be02bcc346d3 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -633,7 +633,6 @@ static struct dmi_system_id fujitsu_dmi_table[] = {
static int acpi_fujitsu_add(struct acpi_device *device)
{
- int result = 0;
int state = 0;
struct input_dev *input;
int error;
@@ -669,8 +668,8 @@ static int acpi_fujitsu_add(struct acpi_device *device)
if (error)
goto err_free_input_dev;
- result = acpi_bus_update_power(fujitsu->acpi_handle, &state);
- if (result) {
+ error = acpi_bus_update_power(fujitsu->acpi_handle, &state);
+ if (error) {
pr_err("Error reading power state\n");
goto err_unregister_input_dev;
}
@@ -700,7 +699,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS;
get_lcd_level();
- return result;
+ return 0;
err_unregister_input_dev:
input_unregister_device(input);
@@ -708,7 +707,7 @@ err_unregister_input_dev:
err_free_input_dev:
input_free_device(input);
err_stop:
- return result;
+ return error;
}
static int acpi_fujitsu_remove(struct acpi_device *device)
@@ -831,8 +830,8 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
if (error)
goto err_free_input_dev;
- result = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
- if (result) {
+ error = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
+ if (error) {
pr_err("Error reading power state\n");
goto err_unregister_input_dev;
}
@@ -907,7 +906,7 @@ err_free_input_dev:
err_free_fifo:
kfifo_free(&fujitsu_hotkey->fifo);
err_stop:
- return result;
+ return error;
}
static int acpi_fujitsu_hotkey_remove(struct acpi_device *device)
diff --git a/drivers/platform/x86/hp-wireless.c b/drivers/platform/x86/hp-wireless.c
new file mode 100644
index 000000000000..415348fc1210
--- /dev/null
+++ b/drivers/platform/x86/hp-wireless.c
@@ -0,0 +1,132 @@
+/*
+ * hp-wireless button for Windows 8
+ *
+ * Copyright (C) 2014 Alex Hung <alex.hung@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alex Hung");
+MODULE_ALIAS("acpi*:HPQ6001:*");
+
+static struct input_dev *hpwl_input_dev;
+
+static const struct acpi_device_id hpwl_ids[] = {
+ {"HPQ6001", 0},
+ {"", 0},
+};
+
+static int hp_wireless_input_setup(void)
+{
+ int err;
+
+ hpwl_input_dev = input_allocate_device();
+ if (!hpwl_input_dev)
+ return -ENOMEM;
+
+ hpwl_input_dev->name = "HP Wireless hotkeys";
+ hpwl_input_dev->phys = "hpq6001/input0";
+ hpwl_input_dev->id.bustype = BUS_HOST;
+ hpwl_input_dev->evbit[0] = BIT(EV_KEY);
+ set_bit(KEY_RFKILL, hpwl_input_dev->keybit);
+
+ err = input_register_device(hpwl_input_dev);
+ if (err)
+ goto err_free_dev;
+
+ return 0;
+
+err_free_dev:
+ input_free_device(hpwl_input_dev);
+ return err;
+}
+
+static void hp_wireless_input_destroy(void)
+{
+ input_unregister_device(hpwl_input_dev);
+}
+
+static void hpwl_notify(struct acpi_device *acpi_dev, u32 event)
+{
+ if (event != 0x80) {
+ pr_info("Received unknown event (0x%x)\n", event);
+ return;
+ }
+
+ input_report_key(hpwl_input_dev, KEY_RFKILL, 1);
+ input_sync(hpwl_input_dev);
+ input_report_key(hpwl_input_dev, KEY_RFKILL, 0);
+ input_sync(hpwl_input_dev);
+}
+
+static int hpwl_add(struct acpi_device *device)
+{
+ int err;
+
+ err = hp_wireless_input_setup();
+ return err;
+}
+
+static int hpwl_remove(struct acpi_device *device)
+{
+ hp_wireless_input_destroy();
+ return 0;
+}
+
+static struct acpi_driver hpwl_driver = {
+ .name = "hp-wireless",
+ .owner = THIS_MODULE,
+ .ids = hpwl_ids,
+ .ops = {
+ .add = hpwl_add,
+ .remove = hpwl_remove,
+ .notify = hpwl_notify,
+ },
+};
+
+static int __init hpwl_init(void)
+{
+ int err;
+
+ pr_info("Initializing HPQ6001 module\n");
+ err = acpi_bus_register_driver(&hpwl_driver);
+ if (err) {
+ pr_err("Unable to register HP wireless control driver.\n");
+ goto error_acpi_register;
+ }
+
+ return 0;
+
+error_acpi_register:
+ return err;
+}
+
+static void __exit hpwl_exit(void)
+{
+ pr_info("Exiting HPQ6001 module\n");
+ acpi_bus_unregister_driver(&hpwl_driver);
+}
+
+module_init(hpwl_init);
+module_exit(hpwl_exit);
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index a8e43cf70fac..3dc934438c28 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -36,7 +36,7 @@
#include <linux/uaccess.h>
#include <linux/leds.h>
#include <linux/atomic.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include "../../misc/lis3lv02d/lis3lv02d.h"
#define DRIVER_NAME "hp_accel"
@@ -77,6 +77,7 @@ static inline void delayed_sysfs_set(struct led_classdev *led_cdev,
static struct acpi_device_id lis3lv02d_device_ids[] = {
{"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
{"HPQ6000", 0}, /* HP Mobile Data Protection System PNP */
+ {"HPQ6007", 0}, /* HP Mobile Data Protection System PNP */
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
@@ -88,7 +89,7 @@ MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
*
* Returns 0 on success.
*/
-int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
+static int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
{
struct acpi_device *dev = lis3->bus_priv;
if (acpi_evaluate_object(dev->handle, METHOD_NAME__INI,
@@ -106,7 +107,7 @@ int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
*
* Returns 0 on success.
*/
-int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
+static int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
{
struct acpi_device *dev = lis3->bus_priv;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
@@ -129,7 +130,7 @@ int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
*
* Returns 0 on success.
*/
-int lis3lv02d_acpi_write(struct lis3lv02d *lis3, int reg, u8 val)
+static int lis3lv02d_acpi_write(struct lis3lv02d *lis3, int reg, u8 val)
{
struct acpi_device *dev = lis3->bus_priv;
unsigned long long ret; /* Not used when writting */
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 19ec95147f69..6dd060a0bb65 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -26,8 +26,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/rfkill.h>
#include <linux/platform_device.h>
#include <linux/input.h>
diff --git a/drivers/platform/x86/intel-rst.c b/drivers/platform/x86/intel-rst.c
index a2083a9e5662..d45bca34bf1b 100644
--- a/drivers/platform/x86/intel-rst.c
+++ b/drivers/platform/x86/intel-rst.c
@@ -20,7 +20,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel-smartconnect.c b/drivers/platform/x86/intel-smartconnect.c
index 1838400dc036..04cf5dffdfd9 100644
--- a/drivers/platform/x86/intel-smartconnect.c
+++ b/drivers/platform/x86/intel-smartconnect.c
@@ -19,7 +19,7 @@
#include <linux/init.h>
#include <linux/module.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel_baytrail.c b/drivers/platform/x86/intel_baytrail.c
new file mode 100644
index 000000000000..f96626b17260
--- /dev/null
+++ b/drivers/platform/x86/intel_baytrail.c
@@ -0,0 +1,224 @@
+/*
+ * Baytrail IOSF-SB MailBox Interface Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *
+ * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
+ * mailbox interface (MBI) to communicate with mutiple devices. This
+ * driver implements BayTrail-specific access to this interface.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+
+#include "intel_baytrail.h"
+
+static DEFINE_SPINLOCK(iosf_mbi_lock);
+
+static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
+{
+ return (op << 24) | (port << 16) | (offset << 8) | BT_MBI_ENABLE;
+}
+
+static struct pci_dev *mbi_pdev; /* one mbi device */
+
+/* Hold lock before calling */
+static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
+{
+ int result;
+
+ if (!mbi_pdev)
+ return -ENODEV;
+
+ if (mcrx) {
+ result = pci_write_config_dword(mbi_pdev,
+ BT_MBI_MCRX_OFFSET, mcrx);
+ if (result < 0)
+ goto iosf_mbi_read_err;
+ }
+
+ result = pci_write_config_dword(mbi_pdev,
+ BT_MBI_MCR_OFFSET, mcr);
+ if (result < 0)
+ goto iosf_mbi_read_err;
+
+ result = pci_read_config_dword(mbi_pdev,
+ BT_MBI_MDR_OFFSET, mdr);
+ if (result < 0)
+ goto iosf_mbi_read_err;
+
+ return 0;
+
+iosf_mbi_read_err:
+ dev_err(&mbi_pdev->dev, "error: PCI config operation returned %d\n",
+ result);
+ return result;
+}
+
+/* Hold lock before calling */
+static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
+{
+ int result;
+
+ if (!mbi_pdev)
+ return -ENODEV;
+
+ result = pci_write_config_dword(mbi_pdev,
+ BT_MBI_MDR_OFFSET, mdr);
+ if (result < 0)
+ goto iosf_mbi_write_err;
+
+ if (mcrx) {
+ result = pci_write_config_dword(mbi_pdev,
+ BT_MBI_MCRX_OFFSET, mcrx);
+ if (result < 0)
+ goto iosf_mbi_write_err;
+ }
+
+ result = pci_write_config_dword(mbi_pdev,
+ BT_MBI_MCR_OFFSET, mcr);
+ if (result < 0)
+ goto iosf_mbi_write_err;
+
+ return 0;
+
+iosf_mbi_write_err:
+ dev_err(&mbi_pdev->dev, "error: PCI config operation returned %d\n",
+ result);
+ return result;
+}
+
+int bt_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
+{
+ u32 mcr, mcrx;
+ unsigned long flags;
+ int ret;
+
+ /*Access to the GFX unit is handled by GPU code */
+ BUG_ON(port == BT_MBI_UNIT_GFX);
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
+ mcrx = offset & BT_MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+ ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr);
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(bt_mbi_read);
+
+int bt_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
+{
+ u32 mcr, mcrx;
+ unsigned long flags;
+ int ret;
+
+ /*Access to the GFX unit is handled by GPU code */
+ BUG_ON(port == BT_MBI_UNIT_GFX);
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
+ mcrx = offset & BT_MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+ ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr);
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(bt_mbi_write);
+
+int bt_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
+{
+ u32 mcr, mcrx;
+ u32 value;
+ unsigned long flags;
+ int ret;
+
+ /*Access to the GFX unit is handled by GPU code */
+ BUG_ON(port == BT_MBI_UNIT_GFX);
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
+ mcrx = offset & BT_MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+
+ /* Read current mdr value */
+ ret = iosf_mbi_pci_read_mdr(mcrx, mcr & BT_MBI_RD_MASK, &value);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+ return ret;
+ }
+
+ /* Apply mask */
+ value &= ~mask;
+ mdr &= mask;
+ value |= mdr;
+
+ /* Write back */
+ ret = iosf_mbi_pci_write_mdr(mcrx, mcr | BT_MBI_WR_MASK, value);
+
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(bt_mbi_modify);
+
+static int iosf_mbi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *unused)
+{
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "error: could not enable device\n");
+ return ret;
+ }
+
+ mbi_pdev = pci_dev_get(pdev);
+ return 0;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(iosf_mbi_pci_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F00) },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
+
+static struct pci_driver iosf_mbi_pci_driver = {
+ .name = "iosf_mbi_pci",
+ .probe = iosf_mbi_probe,
+ .id_table = iosf_mbi_pci_ids,
+};
+
+static int __init bt_mbi_init(void)
+{
+ return pci_register_driver(&iosf_mbi_pci_driver);
+}
+
+static void __exit bt_mbi_exit(void)
+{
+ pci_unregister_driver(&iosf_mbi_pci_driver);
+ if (mbi_pdev) {
+ pci_dev_put(mbi_pdev);
+ mbi_pdev = NULL;
+ }
+}
+
+module_init(bt_mbi_init);
+module_exit(bt_mbi_exit);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("BayTrail Mailbox Interface accessor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_baytrail.h b/drivers/platform/x86/intel_baytrail.h
new file mode 100644
index 000000000000..8bcc311262e9
--- /dev/null
+++ b/drivers/platform/x86/intel_baytrail.h
@@ -0,0 +1,90 @@
+/*
+ * intel_baytrail.h: MailBox access support for Intel BayTrail platforms
+ */
+
+#ifndef INTEL_BAYTRAIL_MBI_SYMS_H
+#define INTEL_BAYTRAIL_MBI_SYMS_H
+
+#define BT_MBI_MCR_OFFSET 0xD0
+#define BT_MBI_MDR_OFFSET 0xD4
+#define BT_MBI_MCRX_OFFSET 0xD8
+
+#define BT_MBI_RD_MASK 0xFEFFFFFF
+#define BT_MBI_WR_MASK 0X01000000
+
+#define BT_MBI_MASK_HI 0xFFFFFF00
+#define BT_MBI_MASK_LO 0x000000FF
+#define BT_MBI_ENABLE 0xF0
+
+/* BT-SB unit access methods */
+#define BT_MBI_UNIT_AUNIT 0x00
+#define BT_MBI_UNIT_SMC 0x01
+#define BT_MBI_UNIT_CPU 0x02
+#define BT_MBI_UNIT_BUNIT 0x03
+#define BT_MBI_UNIT_PMC 0x04
+#define BT_MBI_UNIT_GFX 0x06
+#define BT_MBI_UNIT_SMI 0x0C
+#define BT_MBI_UNIT_USB 0x43
+#define BT_MBI_UNIT_SATA 0xA3
+#define BT_MBI_UNIT_PCIE 0xA6
+
+/* Read/write opcodes */
+#define BT_MBI_AUNIT_READ 0x10
+#define BT_MBI_AUNIT_WRITE 0x11
+#define BT_MBI_SMC_READ 0x10
+#define BT_MBI_SMC_WRITE 0x11
+#define BT_MBI_CPU_READ 0x10
+#define BT_MBI_CPU_WRITE 0x11
+#define BT_MBI_BUNIT_READ 0x10
+#define BT_MBI_BUNIT_WRITE 0x11
+#define BT_MBI_PMC_READ 0x06
+#define BT_MBI_PMC_WRITE 0x07
+#define BT_MBI_GFX_READ 0x00
+#define BT_MBI_GFX_WRITE 0x01
+#define BT_MBI_SMIO_READ 0x06
+#define BT_MBI_SMIO_WRITE 0x07
+#define BT_MBI_USB_READ 0x06
+#define BT_MBI_USB_WRITE 0x07
+#define BT_MBI_SATA_READ 0x00
+#define BT_MBI_SATA_WRITE 0x01
+#define BT_MBI_PCIE_READ 0x00
+#define BT_MBI_PCIE_WRITE 0x01
+
+/**
+ * bt_mbi_read() - MailBox Interface read command
+ * @port: port indicating subunit being accessed
+ * @opcode: port specific read or write opcode
+ * @offset: register address offset
+ * @mdr: register data to be read
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int bt_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr);
+
+/**
+ * bt_mbi_write() - MailBox unmasked write command
+ * @port: port indicating subunit being accessed
+ * @opcode: port specific read or write opcode
+ * @offset: register address offset
+ * @mdr: register data to be written
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int bt_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr);
+
+/**
+ * bt_mbi_modify() - MailBox masked write command
+ * @port: port indicating subunit being accessed
+ * @opcode: port specific read or write opcode
+ * @offset: register address offset
+ * @mdr: register data being modified
+ * @mask: mask indicating bits in mdr to be modified
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int bt_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask);
+
+#endif /* INTEL_BAYTRAIL_MBI_SYMS_H */
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index 11244f8703c4..e8b46d2c468c 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -36,10 +36,8 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/pm.h>
-
#include <linux/thermal.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
MODULE_AUTHOR("Thomas Sujith");
MODULE_AUTHOR("Zhang Rui");
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
index f6f18cde0f11..4bc960416785 100644
--- a/drivers/platform/x86/intel_oaktrail.c
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -50,9 +50,6 @@
#include <linux/platform_device.h>
#include <linux/dmi.h>
#include <linux/rfkill.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
#define DRIVER_NAME "intel_oaktrail"
#define DRIVER_VERSION "0.4ac1"
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 60ea476a9130..76ca094ed012 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -62,12 +62,10 @@
#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
#define IPC_IOC 0x100 /* IPC command register IOC bit */
-enum {
- SCU_IPC_LINCROFT,
- SCU_IPC_PENWELL,
- SCU_IPC_CLOVERVIEW,
- SCU_IPC_TANGIER,
-};
+#define PCI_DEVICE_ID_LINCROFT 0x082a
+#define PCI_DEVICE_ID_PENWELL 0x080e
+#define PCI_DEVICE_ID_CLOVERVIEW 0x08ea
+#define PCI_DEVICE_ID_TANGIER 0x11a0
/* intel scu ipc driver data*/
struct intel_scu_ipc_pdata_t {
@@ -78,35 +76,29 @@ struct intel_scu_ipc_pdata_t {
u8 irq_mode;
};
-static struct intel_scu_ipc_pdata_t intel_scu_ipc_pdata[] = {
- [SCU_IPC_LINCROFT] = {
- .ipc_base = 0xff11c000,
- .i2c_base = 0xff12b000,
- .ipc_len = 0x100,
- .i2c_len = 0x10,
- .irq_mode = 0,
- },
- [SCU_IPC_PENWELL] = {
- .ipc_base = 0xff11c000,
- .i2c_base = 0xff12b000,
- .ipc_len = 0x100,
- .i2c_len = 0x10,
- .irq_mode = 1,
- },
- [SCU_IPC_CLOVERVIEW] = {
- .ipc_base = 0xff11c000,
- .i2c_base = 0xff12b000,
- .ipc_len = 0x100,
- .i2c_len = 0x10,
- .irq_mode = 1,
- },
- [SCU_IPC_TANGIER] = {
- .ipc_base = 0xff009000,
- .i2c_base = 0xff00d000,
- .ipc_len = 0x100,
- .i2c_len = 0x10,
- .irq_mode = 0,
- },
+static struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = {
+ .ipc_base = 0xff11c000,
+ .i2c_base = 0xff12b000,
+ .ipc_len = 0x100,
+ .i2c_len = 0x10,
+ .irq_mode = 0,
+};
+
+/* Penwell and Cloverview */
+static struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = {
+ .ipc_base = 0xff11c000,
+ .i2c_base = 0xff12b000,
+ .ipc_len = 0x100,
+ .i2c_len = 0x10,
+ .irq_mode = 1,
+};
+
+static struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = {
+ .ipc_base = 0xff009000,
+ .i2c_base = 0xff00d000,
+ .ipc_len = 0x100,
+ .i2c_len = 0x10,
+ .irq_mode = 0,
};
static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id);
@@ -583,15 +575,14 @@ static irqreturn_t ioc(int irq, void *dev_id)
*/
static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- int err, pid;
+ int err;
struct intel_scu_ipc_pdata_t *pdata;
resource_size_t pci_resource;
if (ipcdev.pdev) /* We support only one SCU */
return -EBUSY;
- pid = id->driver_data;
- pdata = &intel_scu_ipc_pdata[pid];
+ pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data;
ipcdev.pdev = pci_dev_get(dev);
ipcdev.irq_mode = pdata->irq_mode;
@@ -650,11 +641,21 @@ static void ipc_remove(struct pci_dev *pdev)
}
static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
- {PCI_VDEVICE(INTEL, 0x082a), SCU_IPC_LINCROFT},
- {PCI_VDEVICE(INTEL, 0x080e), SCU_IPC_PENWELL},
- {PCI_VDEVICE(INTEL, 0x08ea), SCU_IPC_CLOVERVIEW},
- {PCI_VDEVICE(INTEL, 0x11a0), SCU_IPC_TANGIER},
- { 0,}
+ {
+ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_LINCROFT),
+ (kernel_ulong_t)&intel_scu_ipc_lincroft_pdata,
+ }, {
+ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PENWELL),
+ (kernel_ulong_t)&intel_scu_ipc_penwell_pdata,
+ }, {
+ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_CLOVERVIEW),
+ (kernel_ulong_t)&intel_scu_ipc_penwell_pdata,
+ }, {
+ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER),
+ (kernel_ulong_t)&intel_scu_ipc_tangier_pdata,
+ }, {
+ 0,
+ }
};
MODULE_DEVICE_TABLE(pci, pci_ids);
diff --git a/drivers/platform/x86/mxm-wmi.c b/drivers/platform/x86/mxm-wmi.c
index 0aea63b3729a..f4bad83053a9 100644
--- a/drivers/platform/x86/mxm-wmi.c
+++ b/drivers/platform/x86/mxm-wmi.c
@@ -20,8 +20,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/mxm-wmi.h>
+#include <linux/acpi.h>
MODULE_AUTHOR("Dave Airlie");
MODULE_DESCRIPTION("MXM WMI Driver");
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 3008fd20572e..609d38779b26 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -125,12 +125,10 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
-
#ifndef ACPI_HOTKEY_COMPONENT
#define ACPI_HOTKEY_COMPONENT 0x10000000
#endif
diff --git a/drivers/platform/x86/pvpanic.c b/drivers/platform/x86/pvpanic.c
index 47ae0c47d4b5..c9f6e511daa6 100644
--- a/drivers/platform/x86/pvpanic.c
+++ b/drivers/platform/x86/pvpanic.c
@@ -24,8 +24,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
MODULE_DESCRIPTION("pvpanic device driver");
diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
index cae7098e9b0d..5413f62d2e61 100644
--- a/drivers/platform/x86/samsung-q10.c
+++ b/drivers/platform/x86/samsung-q10.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/backlight.h>
#include <linux/dmi.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#define SAMSUNGQ10_BL_MAX_INTENSITY 7
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index fb233ae7bb0e..8f8551a63cc0 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -61,9 +61,6 @@
#include <linux/workqueue.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_bus.h>
-#include <asm/uaccess.h>
#include <linux/sonypi.h>
#include <linux/sony-laptop.h>
#include <linux/rfkill.h>
@@ -71,6 +68,7 @@
#include <linux/poll.h>
#include <linux/miscdevice.h>
#endif
+#include <asm/uaccess.h>
#define dprintk(fmt, ...) \
do { \
@@ -791,7 +789,7 @@ static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
void *buffer, size_t buflen)
{
int ret = 0;
- size_t len = len;
+ size_t len;
union acpi_object *object = __call_snc_method(handle, name, value);
if (!object)
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 9b93fdb61ed7..6a6ea28a7e51 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -32,9 +32,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <acpi/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/platform_device.h>
#define GUID "C364AC71-36DB-495A-8494-B439D472A505"
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 58b0274d24cc..defb6afc1409 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -61,7 +61,6 @@
#include <linux/freezer.h>
#include <linux/delay.h>
#include <linux/slab.h>
-
#include <linux/nvram.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
@@ -74,21 +73,16 @@
#include <linux/input.h>
#include <linux/leds.h>
#include <linux/rfkill.h>
-#include <asm/uaccess.h>
-
#include <linux/dmi.h>
#include <linux/jiffies.h>
#include <linux/workqueue.h>
-
+#include <linux/acpi.h>
+#include <linux/pci_ids.h>
+#include <linux/thinkpad_acpi.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/initval.h>
-
-#include <acpi/acpi_drivers.h>
-
-#include <linux/pci_ids.h>
-
-#include <linux/thinkpad_acpi.h>
+#include <asm/uaccess.h>
/* ThinkPad CMOS commands */
#define TP_CMOS_VOLUME_DOWN 0
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 7fce391818d3..90dd7645a9e5 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -54,11 +54,9 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/i8042.h>
-
+#include <linux/acpi.h>
#include <asm/uaccess.h>
-#include <acpi/acpi_drivers.h>
-
MODULE_AUTHOR("John Belmonte");
MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver");
MODULE_LICENSE("GPL");
@@ -151,6 +149,7 @@ static const struct acpi_device_id toshiba_device_ids[] = {
MODULE_DEVICE_TABLE(acpi, toshiba_device_ids);
static const struct key_entry toshiba_acpi_keymap[] = {
+ { KE_KEY, 0x9e, { KEY_RFKILL } },
{ KE_KEY, 0x101, { KEY_MUTE } },
{ KE_KEY, 0x102, { KEY_ZOOMOUT } },
{ KE_KEY, 0x103, { KEY_ZOOMIN } },
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 74dd01ae343b..2cb1ea62b4a7 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -23,14 +23,12 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@gmail.com>");
MODULE_DESCRIPTION("Toshiba Laptop ACPI Bluetooth Enable Driver");
MODULE_LICENSE("GPL");
-
static int toshiba_bt_rfkill_add(struct acpi_device *device);
static int toshiba_bt_rfkill_remove(struct acpi_device *device);
static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event);
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index c2e7b2657aeb..43d13295e63d 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -37,8 +37,6 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
ACPI_MODULE_NAME("wmi");
MODULE_AUTHOR("Carlos Corbacho");
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index 4b1377bd5944..49cbccec6e2d 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -18,8 +18,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/input.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#define MODULE_NAME "xo15-ebook"
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c
index bc00693d0c79..874c236ac1a7 100644
--- a/drivers/pnp/card.c
+++ b/drivers/pnp/card.c
@@ -239,6 +239,7 @@ int pnp_add_card(struct pnp_card *card)
error = device_register(&card->dev);
if (error) {
dev_err(&card->dev, "could not register (err=%d)\n", error);
+ put_device(&card->dev);
return error;
}
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 14655a0f0431..9f611cbbc294 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -24,7 +24,6 @@
#include <linux/pnp.h>
#include <linux/slab.h>
#include <linux/mod_devicetable.h>
-#include <acpi/acpi_bus.h>
#include "../base.h"
#include "pnpacpi.h"
@@ -242,6 +241,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
struct pnp_dev *dev;
char *pnpid;
struct acpi_hardware_id *id;
+ int error;
/* Skip devices that are already bound */
if (device->physical_node_count)
@@ -300,10 +300,16 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
/* clear out the damaged flags */
if (!dev->active)
pnp_init_resources(dev);
- pnp_add_device(dev);
+
+ error = pnp_add_device(dev);
+ if (error) {
+ put_device(&dev->dev);
+ return error;
+ }
+
num++;
- return AE_OK;
+ return 0;
}
static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle,
@@ -329,20 +335,15 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
&& compare_pnp_id(pnp->id, acpi_device_hid(acpi));
}
-static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle)
+static struct acpi_device * __init acpi_pnp_find_companion(struct device *dev)
{
- struct device *adev;
- struct acpi_device *acpi;
-
- adev = bus_find_device(&acpi_bus_type, NULL,
- to_pnp_dev(dev), acpi_pnp_match);
- if (!adev)
- return -ENODEV;
+ dev = bus_find_device(&acpi_bus_type, NULL, to_pnp_dev(dev),
+ acpi_pnp_match);
+ if (!dev)
+ return NULL;
- acpi = to_acpi_device(adev);
- *handle = acpi->handle;
- put_device(adev);
- return 0;
+ put_device(dev);
+ return to_acpi_device(dev);
}
/* complete initialization of a PNPACPI device includes having
@@ -356,7 +357,7 @@ static bool acpi_pnp_bus_match(struct device *dev)
static struct acpi_bus_type __initdata acpi_pnp_bus = {
.name = "PNP",
.match = acpi_pnp_bus_match,
- .find_device = acpi_pnp_find_device,
+ .find_companion = acpi_pnp_find_companion,
};
int pnpacpi_disabled __initdata;
diff --git a/drivers/pnp/pnpacpi/pnpacpi.h b/drivers/pnp/pnpacpi/pnpacpi.h
index 3e60225b0227..051ef9699777 100644
--- a/drivers/pnp/pnpacpi/pnpacpi.h
+++ b/drivers/pnp/pnpacpi/pnpacpi.h
@@ -1,7 +1,6 @@
#ifndef ACPI_PNP_H
#define ACPI_PNP_H
-#include <acpi/acpi_bus.h>
#include <linux/acpi.h>
#include <linux/pnp.h>
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 9b86a01af631..074569e77d22 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -312,18 +312,19 @@ static int __init insert_device(struct pnp_bios_node *node)
struct list_head *pos;
struct pnp_dev *dev;
char id[8];
+ int error;
/* check if the device is already added */
list_for_each(pos, &pnpbios_protocol.devices) {
dev = list_entry(pos, struct pnp_dev, protocol_list);
if (dev->number == node->handle)
- return -1;
+ return -EEXIST;
}
pnp_eisa_id_to_string(node->eisa_id & PNP_EISA_ID_MASK, id);
dev = pnp_alloc_dev(&pnpbios_protocol, node->handle, id);
if (!dev)
- return -1;
+ return -ENOMEM;
pnpbios_parse_data_stream(dev, node);
dev->active = pnp_is_active(dev);
@@ -342,7 +343,12 @@ static int __init insert_device(struct pnp_bios_node *node)
if (!dev->active)
pnp_init_resources(dev);
- pnp_add_device(dev);
+ error = pnp_add_device(dev);
+ if (error) {
+ put_device(&dev->dev);
+ return error;
+ }
+
pnpbios_interface_attach_device(node);
return 0;
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index d95e101ffb43..bacddd102ae9 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -31,7 +31,7 @@ static int pnp_reserve_mem[16] = {[0 ... 15] = -1 }; /* reserve (don't use) some
* option registration
*/
-struct pnp_option *pnp_build_option(struct pnp_dev *dev, unsigned long type,
+static struct pnp_option *pnp_build_option(struct pnp_dev *dev, unsigned long type,
unsigned int option_flags)
{
struct pnp_option *option;
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 5e2054afe840..ba6975123071 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -196,6 +196,7 @@ config BATTERY_MAX17040
config BATTERY_MAX17042
tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
depends on I2C
+ select REGMAP_I2C
help
MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
in handheld and portable equipment. The MAX17042 is configured
@@ -316,6 +317,13 @@ config CHARGER_MANAGER
runtime and in suspend-to-RAM by waking up the system periodically
with help of suspend_again support.
+config CHARGER_MAX14577
+ tristate "Maxim MAX14577 MUIC battery charger driver"
+ depends on MFD_MAX14577
+ help
+ Say Y to enable support for the battery charger control sysfs and
+ platform data of MAX14577 MUICs.
+
config CHARGER_MAX8997
tristate "Maxim MAX8997/MAX8966 PMIC battery charger driver"
depends on MFD_MAX8997 && REGULATOR_MAX8997
@@ -376,6 +384,7 @@ config AB8500_BM
config BATTERY_GOLDFISH
tristate "Goldfish battery driver"
depends on GOLDFISH || COMPILE_TEST
+ depends on HAS_IOMEM
help
Say Y to enable support for the battery and AC power in the
Goldfish emulator.
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 372b4e8ab598..ee54a3e4c90a 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_CHARGER_LP8727) += lp8727_charger.o
obj-$(CONFIG_CHARGER_LP8788) += lp8788-charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
+obj-$(CONFIG_CHARGER_MAX14577) += max14577_charger.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
index df893dd1447d..79a37f6d3307 100644
--- a/drivers/power/bq2415x_charger.c
+++ b/drivers/power/bq2415x_charger.c
@@ -1,7 +1,7 @@
/*
* bq2415x charger driver
*
- * Copyright (C) 2011-2012 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2011-2013 Pali Rohár <pali.rohar@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -170,6 +170,8 @@ struct bq2415x_device {
struct bq2415x_platform_data init_data;
struct power_supply charger;
struct delayed_work work;
+ struct power_supply *notify_psy;
+ struct notifier_block nb;
enum bq2415x_mode reported_mode;/* mode reported by hook function */
enum bq2415x_mode mode; /* current configured mode */
enum bq2415x_chip chip;
@@ -795,24 +797,53 @@ static int bq2415x_set_mode(struct bq2415x_device *bq, enum bq2415x_mode mode)
}
-/* hook function called by other driver which set reported mode */
-static void bq2415x_hook_function(enum bq2415x_mode mode, void *data)
+static int bq2415x_notifier_call(struct notifier_block *nb,
+ unsigned long val, void *v)
{
- struct bq2415x_device *bq = data;
+ struct bq2415x_device *bq =
+ container_of(nb, struct bq2415x_device, nb);
+ struct power_supply *psy = v;
+ enum bq2415x_mode mode;
+ union power_supply_propval prop;
+ int ret;
+ int mA;
- if (!bq)
- return;
+ if (val != PSY_EVENT_PROP_CHANGED)
+ return NOTIFY_OK;
+
+ if (psy != bq->notify_psy)
+ return NOTIFY_OK;
+
+ dev_dbg(bq->dev, "notifier call was called\n");
+
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_CURRENT_MAX, &prop);
+ if (ret != 0)
+ return NOTIFY_OK;
+
+ mA = prop.intval;
+
+ if (mA == 0)
+ mode = BQ2415X_MODE_OFF;
+ else if (mA < 500)
+ mode = BQ2415X_MODE_NONE;
+ else if (mA < 1800)
+ mode = BQ2415X_MODE_HOST_CHARGER;
+ else
+ mode = BQ2415X_MODE_DEDICATED_CHARGER;
+
+ if (bq->reported_mode == mode)
+ return NOTIFY_OK;
- dev_dbg(bq->dev, "hook function was called\n");
bq->reported_mode = mode;
/* if automode is not enabled do not tell about reported_mode */
if (bq->automode < 1)
- return;
+ return NOTIFY_OK;
sysfs_notify(&bq->charger.dev->kobj, NULL, "reported_mode");
bq2415x_set_mode(bq, bq->reported_mode);
+ return NOTIFY_OK;
}
/**** timer functions ****/
@@ -1512,9 +1543,11 @@ static int bq2415x_probe(struct i2c_client *client,
int num;
char *name;
struct bq2415x_device *bq;
+ struct device_node *np = client->dev.of_node;
+ struct bq2415x_platform_data *pdata = client->dev.platform_data;
- if (!client->dev.platform_data) {
- dev_err(&client->dev, "platform data not set\n");
+ if (!np && !pdata) {
+ dev_err(&client->dev, "platform data missing\n");
return -ENODEV;
}
@@ -1539,6 +1572,17 @@ static int bq2415x_probe(struct i2c_client *client,
goto error_2;
}
+ if (np) {
+ bq->notify_psy = power_supply_get_by_phandle(np, "ti,usb-charger-detection");
+
+ if (!bq->notify_psy)
+ return -EPROBE_DEFER;
+ }
+ else if (pdata->notify_device)
+ bq->notify_psy = power_supply_get_by_name(pdata->notify_device);
+ else
+ bq->notify_psy = NULL;
+
i2c_set_clientdata(client, bq);
bq->id = num;
@@ -1550,8 +1594,34 @@ static int bq2415x_probe(struct i2c_client *client,
bq->autotimer = 0;
bq->automode = 0;
- memcpy(&bq->init_data, client->dev.platform_data,
- sizeof(bq->init_data));
+ if (np) {
+ ret = of_property_read_u32(np, "ti,current-limit",
+ &bq->init_data.current_limit);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(np, "ti,weak-battery-voltage",
+ &bq->init_data.weak_battery_voltage);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(np, "ti,battery-regulation-voltage",
+ &bq->init_data.battery_regulation_voltage);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(np, "ti,charge-current",
+ &bq->init_data.charge_current);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(np, "ti,termination-current",
+ &bq->init_data.termination_current);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(np, "ti,resistor-sense",
+ &bq->init_data.resistor_sense);
+ if (ret)
+ return ret;
+ } else {
+ memcpy(&bq->init_data, pdata, sizeof(bq->init_data));
+ }
bq2415x_reset_chip(bq);
@@ -1573,16 +1643,20 @@ static int bq2415x_probe(struct i2c_client *client,
goto error_4;
}
- if (bq->init_data.set_mode_hook) {
- if (bq->init_data.set_mode_hook(
- bq2415x_hook_function, bq)) {
- bq->automode = 1;
- bq2415x_set_mode(bq, bq->reported_mode);
- dev_info(bq->dev, "automode enabled\n");
- } else {
- bq->automode = -1;
- dev_info(bq->dev, "automode failed\n");
+ if (bq->notify_psy) {
+ bq->nb.notifier_call = bq2415x_notifier_call;
+ ret = power_supply_reg_notifier(&bq->nb);
+ if (ret) {
+ dev_err(bq->dev, "failed to reg notifier: %d\n", ret);
+ goto error_5;
}
+
+ /* Query for initial reported_mode and set it */
+ bq2415x_notifier_call(&bq->nb, PSY_EVENT_PROP_CHANGED, bq->notify_psy);
+ bq2415x_set_mode(bq, bq->reported_mode);
+
+ bq->automode = 1;
+ dev_info(bq->dev, "automode enabled\n");
} else {
bq->automode = -1;
dev_info(bq->dev, "automode not supported\n");
@@ -1594,6 +1668,7 @@ static int bq2415x_probe(struct i2c_client *client,
dev_info(bq->dev, "driver registered\n");
return 0;
+error_5:
error_4:
bq2415x_sysfs_exit(bq);
error_3:
@@ -1614,8 +1689,8 @@ static int bq2415x_remove(struct i2c_client *client)
{
struct bq2415x_device *bq = i2c_get_clientdata(client);
- if (bq->init_data.set_mode_hook)
- bq->init_data.set_mode_hook(NULL, NULL);
+ if (bq->notify_psy)
+ power_supply_unreg_notifier(&bq->nb);
bq2415x_sysfs_exit(bq);
bq2415x_power_supply_exit(bq);
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 7287c0efd6bf..9e4dab46eefd 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -25,12 +25,23 @@
#include <linux/power/charger-manager.h>
#include <linux/regulator/consumer.h>
#include <linux/sysfs.h>
+#include <linux/of.h>
+#include <linux/thermal.h>
+
+/*
+ * Default termperature threshold for charging.
+ * Every temperature units are in tenth of centigrade.
+ */
+#define CM_DEFAULT_RECHARGE_TEMP_DIFF 50
+#define CM_DEFAULT_CHARGE_TEMP_MAX 500
static const char * const default_event_names[] = {
[CM_EVENT_UNKNOWN] = "Unknown",
[CM_EVENT_BATT_FULL] = "Battery Full",
[CM_EVENT_BATT_IN] = "Battery Inserted",
[CM_EVENT_BATT_OUT] = "Battery Pulled Out",
+ [CM_EVENT_BATT_OVERHEAT] = "Battery Overheat",
+ [CM_EVENT_BATT_COLD] = "Battery Cold",
[CM_EVENT_EXT_PWR_IN_OUT] = "External Power Attach/Detach",
[CM_EVENT_CHG_START_STOP] = "Charging Start/Stop",
[CM_EVENT_OTHERS] = "Other battery events"
@@ -518,7 +529,7 @@ static int check_charging_duration(struct charger_manager *cm)
duration = curr - cm->charging_start_time;
if (duration > desc->charging_max_duration_ms) {
- dev_info(cm->dev, "Charging duration exceed %lldms\n",
+ dev_info(cm->dev, "Charging duration exceed %ums\n",
desc->charging_max_duration_ms);
uevent_notify(cm, "Discharging");
try_charger_enable(cm, false);
@@ -529,7 +540,7 @@ static int check_charging_duration(struct charger_manager *cm)
if (duration > desc->charging_max_duration_ms &&
is_ext_pwr_online(cm)) {
- dev_info(cm->dev, "Discharging duration exceed %lldms\n",
+ dev_info(cm->dev, "Discharging duration exceed %ums\n",
desc->discharging_max_duration_ms);
uevent_notify(cm, "Recharging");
try_charger_enable(cm, true);
@@ -540,6 +551,60 @@ static int check_charging_duration(struct charger_manager *cm)
return ret;
}
+static int cm_get_battery_temperature(struct charger_manager *cm,
+ int *temp)
+{
+ int ret;
+
+ if (!cm->desc->measure_battery_temp)
+ return -ENODEV;
+
+#ifdef CONFIG_THERMAL
+ ret = thermal_zone_get_temp(cm->tzd_batt, (unsigned long *)temp);
+ if (!ret)
+ /* Calibrate temperature unit */
+ *temp /= 100;
+#else
+ ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_TEMP,
+ (union power_supply_propval *)temp);
+#endif
+ return ret;
+}
+
+static int cm_check_thermal_status(struct charger_manager *cm)
+{
+ struct charger_desc *desc = cm->desc;
+ int temp, upper_limit, lower_limit;
+ int ret = 0;
+
+ ret = cm_get_battery_temperature(cm, &temp);
+ if (ret) {
+ /* FIXME:
+ * No information of battery temperature might
+ * occur hazadous result. We have to handle it
+ * depending on battery type.
+ */
+ dev_err(cm->dev, "Failed to get battery temperature\n");
+ return 0;
+ }
+
+ upper_limit = desc->temp_max;
+ lower_limit = desc->temp_min;
+
+ if (cm->emergency_stop) {
+ upper_limit -= desc->temp_diff;
+ lower_limit += desc->temp_diff;
+ }
+
+ if (temp > upper_limit)
+ ret = CM_EVENT_BATT_OVERHEAT;
+ else if (temp < lower_limit)
+ ret = CM_EVENT_BATT_COLD;
+
+ return ret;
+}
+
/**
* _cm_monitor - Monitor the temperature and return true for exceptions.
* @cm: the Charger Manager representing the battery.
@@ -549,28 +614,22 @@ static int check_charging_duration(struct charger_manager *cm)
*/
static bool _cm_monitor(struct charger_manager *cm)
{
- struct charger_desc *desc = cm->desc;
- int temp = desc->temperature_out_of_range(&cm->last_temp_mC);
+ int temp_alrt;
- dev_dbg(cm->dev, "monitoring (%2.2d.%3.3dC)\n",
- cm->last_temp_mC / 1000, cm->last_temp_mC % 1000);
+ temp_alrt = cm_check_thermal_status(cm);
/* It has been stopped already */
- if (temp && cm->emergency_stop)
+ if (temp_alrt && cm->emergency_stop)
return false;
/*
* Check temperature whether overheat or cold.
* If temperature is out of range normal state, stop charging.
*/
- if (temp) {
- cm->emergency_stop = temp;
- if (!try_charger_enable(cm, false)) {
- if (temp > 0)
- uevent_notify(cm, "OVERHEAT");
- else
- uevent_notify(cm, "COLD");
- }
+ if (temp_alrt) {
+ cm->emergency_stop = temp_alrt;
+ if (!try_charger_enable(cm, false))
+ uevent_notify(cm, default_event_names[temp_alrt]);
/*
* Check whole charging duration and discharing duration
@@ -802,21 +861,8 @@ static int charger_get_property(struct power_supply *psy,
POWER_SUPPLY_PROP_CURRENT_NOW, val);
break;
case POWER_SUPPLY_PROP_TEMP:
- /* in thenth of centigrade */
- if (cm->last_temp_mC == INT_MIN)
- desc->temperature_out_of_range(&cm->last_temp_mC);
- val->intval = cm->last_temp_mC / 100;
- if (!desc->measure_battery_temp)
- ret = -ENODEV;
- break;
case POWER_SUPPLY_PROP_TEMP_AMBIENT:
- /* in thenth of centigrade */
- if (cm->last_temp_mC == INT_MIN)
- desc->temperature_out_of_range(&cm->last_temp_mC);
- val->intval = cm->last_temp_mC / 100;
- if (desc->measure_battery_temp)
- ret = -ENODEV;
- break;
+ return cm_get_battery_temperature(cm, &val->intval);
case POWER_SUPPLY_PROP_CAPACITY:
if (!cm->fuel_gauge) {
ret = -ENODEV;
@@ -1439,9 +1485,183 @@ err:
return ret;
}
+static int cm_init_thermal_data(struct charger_manager *cm)
+{
+ struct charger_desc *desc = cm->desc;
+ union power_supply_propval val;
+ int ret;
+
+ /* Verify whether fuel gauge provides battery temperature */
+ ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_TEMP, &val);
+
+ if (!ret) {
+ cm->charger_psy.properties[cm->charger_psy.num_properties] =
+ POWER_SUPPLY_PROP_TEMP;
+ cm->charger_psy.num_properties++;
+ cm->desc->measure_battery_temp = true;
+ }
+#ifdef CONFIG_THERMAL
+ cm->tzd_batt = cm->fuel_gauge->tzd;
+
+ if (ret && desc->thermal_zone) {
+ cm->tzd_batt =
+ thermal_zone_get_zone_by_name(desc->thermal_zone);
+ if (IS_ERR(cm->tzd_batt))
+ return PTR_ERR(cm->tzd_batt);
+
+ /* Use external thermometer */
+ cm->charger_psy.properties[cm->charger_psy.num_properties] =
+ POWER_SUPPLY_PROP_TEMP_AMBIENT;
+ cm->charger_psy.num_properties++;
+ cm->desc->measure_battery_temp = true;
+ ret = 0;
+ }
+#endif
+ if (cm->desc->measure_battery_temp) {
+ /* NOTICE : Default allowable minimum charge temperature is 0 */
+ if (!desc->temp_max)
+ desc->temp_max = CM_DEFAULT_CHARGE_TEMP_MAX;
+ if (!desc->temp_diff)
+ desc->temp_diff = CM_DEFAULT_RECHARGE_TEMP_DIFF;
+ }
+
+ return ret;
+}
+
+static struct of_device_id charger_manager_match[] = {
+ {
+ .compatible = "charger-manager",
+ },
+ {},
+};
+
+static struct charger_desc *of_cm_parse_desc(struct device *dev)
+{
+ struct charger_desc *desc;
+ struct device_node *np = dev->of_node;
+ u32 poll_mode = CM_POLL_DISABLE;
+ u32 battery_stat = CM_NO_BATTERY;
+ int num_chgs = 0;
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ of_property_read_string(np, "cm-name", &desc->psy_name);
+
+ of_property_read_u32(np, "cm-poll-mode", &poll_mode);
+ desc->polling_mode = poll_mode;
+
+ of_property_read_u32(np, "cm-poll-interval",
+ &desc->polling_interval_ms);
+
+ of_property_read_u32(np, "cm-fullbatt-vchkdrop-ms",
+ &desc->fullbatt_vchkdrop_ms);
+ of_property_read_u32(np, "cm-fullbatt-vchkdrop-volt",
+ &desc->fullbatt_vchkdrop_uV);
+ of_property_read_u32(np, "cm-fullbatt-voltage", &desc->fullbatt_uV);
+ of_property_read_u32(np, "cm-fullbatt-soc", &desc->fullbatt_soc);
+ of_property_read_u32(np, "cm-fullbatt-capacity",
+ &desc->fullbatt_full_capacity);
+
+ of_property_read_u32(np, "cm-battery-stat", &battery_stat);
+ desc->battery_present = battery_stat;
+
+ /* chargers */
+ of_property_read_u32(np, "cm-num-chargers", &num_chgs);
+ if (num_chgs) {
+ /* Allocate empty bin at the tail of array */
+ desc->psy_charger_stat = devm_kzalloc(dev, sizeof(char *)
+ * (num_chgs + 1), GFP_KERNEL);
+ if (desc->psy_charger_stat) {
+ int i;
+ for (i = 0; i < num_chgs; i++)
+ of_property_read_string_index(np, "cm-chargers",
+ i, &desc->psy_charger_stat[i]);
+ } else {
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ of_property_read_string(np, "cm-fuel-gauge", &desc->psy_fuel_gauge);
+
+ of_property_read_string(np, "cm-thermal-zone", &desc->thermal_zone);
+
+ of_property_read_u32(np, "cm-battery-cold", &desc->temp_min);
+ if (of_get_property(np, "cm-battery-cold-in-minus", NULL))
+ desc->temp_min *= -1;
+ of_property_read_u32(np, "cm-battery-hot", &desc->temp_max);
+ of_property_read_u32(np, "cm-battery-temp-diff", &desc->temp_diff);
+
+ of_property_read_u32(np, "cm-charging-max",
+ &desc->charging_max_duration_ms);
+ of_property_read_u32(np, "cm-discharging-max",
+ &desc->discharging_max_duration_ms);
+
+ /* battery charger regualtors */
+ desc->num_charger_regulators = of_get_child_count(np);
+ if (desc->num_charger_regulators) {
+ struct charger_regulator *chg_regs;
+ struct device_node *child;
+
+ chg_regs = devm_kzalloc(dev, sizeof(*chg_regs)
+ * desc->num_charger_regulators,
+ GFP_KERNEL);
+ if (!chg_regs)
+ return ERR_PTR(-ENOMEM);
+
+ desc->charger_regulators = chg_regs;
+
+ for_each_child_of_node(np, child) {
+ struct charger_cable *cables;
+ struct device_node *_child;
+
+ of_property_read_string(child, "cm-regulator-name",
+ &chg_regs->regulator_name);
+
+ /* charger cables */
+ chg_regs->num_cables = of_get_child_count(child);
+ if (chg_regs->num_cables) {
+ cables = devm_kzalloc(dev, sizeof(*cables)
+ * chg_regs->num_cables,
+ GFP_KERNEL);
+ if (!cables)
+ return ERR_PTR(-ENOMEM);
+
+ chg_regs->cables = cables;
+
+ for_each_child_of_node(child, _child) {
+ of_property_read_string(_child,
+ "cm-cable-name", &cables->name);
+ of_property_read_string(_child,
+ "cm-cable-extcon",
+ &cables->extcon_name);
+ of_property_read_u32(_child,
+ "cm-cable-min",
+ &cables->min_uA);
+ of_property_read_u32(_child,
+ "cm-cable-max",
+ &cables->max_uA);
+ cables++;
+ }
+ }
+ chg_regs++;
+ }
+ }
+ return desc;
+}
+
+static inline struct charger_desc *cm_get_drv_data(struct platform_device *pdev)
+{
+ if (pdev->dev.of_node)
+ return of_cm_parse_desc(&pdev->dev);
+ return (struct charger_desc *)dev_get_platdata(&pdev->dev);
+}
+
static int charger_manager_probe(struct platform_device *pdev)
{
- struct charger_desc *desc = dev_get_platdata(&pdev->dev);
+ struct charger_desc *desc = cm_get_drv_data(pdev);
struct charger_manager *cm;
int ret = 0, i = 0;
int j = 0;
@@ -1470,7 +1690,6 @@ static int charger_manager_probe(struct platform_device *pdev)
/* Basic Values. Unspecified are Null or 0 */
cm->dev = &pdev->dev;
cm->desc = desc;
- cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */
/*
* The following two do not need to be errors.
@@ -1533,11 +1752,6 @@ static int charger_manager_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (!desc->temperature_out_of_range) {
- dev_err(&pdev->dev, "there is no temperature_out_of_range\n");
- return -EINVAL;
- }
-
if (!desc->charging_max_duration_ms ||
!desc->discharging_max_duration_ms) {
dev_info(&pdev->dev, "Cannot limit charging duration checking mechanism to prevent overcharge/overheat and control discharging duration\n");
@@ -1583,14 +1797,10 @@ static int charger_manager_probe(struct platform_device *pdev)
cm->charger_psy.num_properties++;
}
- if (desc->measure_battery_temp) {
- cm->charger_psy.properties[cm->charger_psy.num_properties] =
- POWER_SUPPLY_PROP_TEMP;
- cm->charger_psy.num_properties++;
- } else {
- cm->charger_psy.properties[cm->charger_psy.num_properties] =
- POWER_SUPPLY_PROP_TEMP_AMBIENT;
- cm->charger_psy.num_properties++;
+ ret = cm_init_thermal_data(cm);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize thermal data\n");
+ cm->desc->measure_battery_temp = false;
}
INIT_DELAYED_WORK(&cm->fullbatt_vchk_work, fullbatt_vchk);
@@ -1808,6 +2018,7 @@ static struct platform_driver charger_manager_driver = {
.name = "charger-manager",
.owner = THIS_MODULE,
.pm = &charger_manager_pm,
+ .of_match_table = charger_manager_match,
},
.probe = charger_manager_probe,
.remove = charger_manager_remove,
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 563174891c90..041f9b638d28 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -192,7 +192,7 @@ static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uV)
/*
* Voltage is measured in units of 1.22mV. The voltage is stored as
- * a 10-bit number plus sign, in the upper bits of a 16-bit register
+ * a 12-bit number plus sign, in the upper bits of a 16-bit register
*/
err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw);
if (err)
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
index 4e858a23568f..a0024b252197 100644
--- a/drivers/power/gpio-charger.c
+++ b/drivers/power/gpio-charger.c
@@ -28,6 +28,7 @@
struct gpio_charger {
const struct gpio_charger_platform_data *pdata;
unsigned int irq;
+ bool wakeup_enabled;
struct power_supply charger;
};
@@ -136,6 +137,8 @@ static int gpio_charger_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gpio_charger);
+ device_init_wakeup(&pdev->dev, 1);
+
return 0;
err_gpio_free:
@@ -159,18 +162,32 @@ static int gpio_charger_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
+static int gpio_charger_suspend(struct device *dev)
+{
+ struct gpio_charger *gpio_charger = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ gpio_charger->wakeup_enabled =
+ enable_irq_wake(gpio_charger->irq);
+
+ return 0;
+}
+
static int gpio_charger_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct gpio_charger *gpio_charger = platform_get_drvdata(pdev);
+ if (gpio_charger->wakeup_enabled)
+ disable_irq_wake(gpio_charger->irq);
power_supply_changed(&gpio_charger->charger);
return 0;
}
#endif
-static SIMPLE_DEV_PM_OPS(gpio_charger_pm_ops, NULL, gpio_charger_resume);
+static SIMPLE_DEV_PM_OPS(gpio_charger_pm_ops,
+ gpio_charger_suspend, gpio_charger_resume);
static struct platform_driver gpio_charger_driver = {
.probe = gpio_charger_probe,
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 1bb3a91b1acc..0b4cf9d63291 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -29,6 +29,8 @@
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
@@ -88,6 +90,8 @@ static void isp1704_charger_set_power(struct isp1704_charger *isp, bool on)
if (board && board->set_power)
board->set_power(on);
+ else if (board)
+ gpio_set_value(board->enable_gpio, on);
}
/*
@@ -400,13 +404,46 @@ static int isp1704_charger_probe(struct platform_device *pdev)
struct isp1704_charger *isp;
int ret = -ENODEV;
+ struct isp1704_charger_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
+
+ if (np) {
+ int gpio = of_get_named_gpio(np, "nxp,enable-gpio", 0);
+
+ if (gpio < 0)
+ return gpio;
+
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct isp1704_charger_data), GFP_KERNEL);
+ pdata->enable_gpio = gpio;
+
+ dev_info(&pdev->dev, "init gpio %d\n", pdata->enable_gpio);
+
+ ret = devm_gpio_request_one(&pdev->dev, pdata->enable_gpio,
+ GPIOF_OUT_INIT_HIGH, "isp1704_reset");
+ if (ret)
+ goto fail0;
+ }
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "missing platform data!\n");
+ return -ENODEV;
+ }
+
+
isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
if (!isp)
return -ENOMEM;
- isp->phy = usb_get_phy(USB_PHY_TYPE_USB2);
- if (IS_ERR_OR_NULL(isp->phy))
+ if (np)
+ isp->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
+ else
+ isp->phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
+
+ if (IS_ERR(isp->phy)) {
+ ret = PTR_ERR(isp->phy);
goto fail0;
+ }
isp->dev = &pdev->dev;
platform_set_drvdata(pdev, isp);
@@ -464,7 +501,6 @@ fail2:
power_supply_unregister(&isp->psy);
fail1:
isp1704_charger_set_power(isp, 0);
- usb_put_phy(isp->phy);
fail0:
dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret);
@@ -477,15 +513,23 @@ static int isp1704_charger_remove(struct platform_device *pdev)
usb_unregister_notifier(isp->phy, &isp->nb);
power_supply_unregister(&isp->psy);
- usb_put_phy(isp->phy);
isp1704_charger_set_power(isp, 0);
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id omap_isp1704_of_match[] = {
+ { .compatible = "nxp,isp1704", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_isp1704_of_match);
+#endif
+
static struct platform_driver isp1704_charger_driver = {
.driver = {
.name = "isp1704_charger",
+ .of_match_table = of_match_ptr(omap_isp1704_of_match),
},
.probe = isp1704_charger_probe,
.remove = isp1704_charger_remove,
diff --git a/drivers/power/max14577_charger.c b/drivers/power/max14577_charger.c
new file mode 100644
index 000000000000..fad2a75b3604
--- /dev/null
+++ b/drivers/power/max14577_charger.c
@@ -0,0 +1,311 @@
+/*
+ * Battery charger driver for the Maxim 14577
+ *
+ * Copyright (C) 2013 Samsung Electronics
+ * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/mfd/max14577-private.h>
+
+struct max14577_charger {
+ struct device *dev;
+ struct max14577 *max14577;
+ struct power_supply charger;
+
+ unsigned int charging_state;
+ unsigned int battery_state;
+};
+
+static int max14577_get_charger_state(struct max14577_charger *chg)
+{
+ struct regmap *rmap = chg->max14577->regmap;
+ int state = POWER_SUPPLY_STATUS_DISCHARGING;
+ u8 reg_data;
+
+ /*
+ * Charging occurs only if:
+ * - CHGCTRL2/MBCHOSTEN == 1
+ * - STATUS2/CGMBC == 1
+ *
+ * TODO:
+ * - handle FULL after Top-off timer (EOC register may be off
+ * and the charger won't be charging although MBCHOSTEN is on)
+ * - handle properly dead-battery charging (respect timer)
+ * - handle timers (fast-charge and prequal) /MBCCHGERR/
+ */
+ max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL2, &reg_data);
+ if ((reg_data & CHGCTRL2_MBCHOSTEN_MASK) == 0)
+ goto state_set;
+
+ max14577_read_reg(rmap, MAX14577_CHG_REG_STATUS3, &reg_data);
+ if (reg_data & STATUS3_CGMBC_MASK) {
+ /* Charger or USB-cable is connected */
+ if (reg_data & STATUS3_EOC_MASK)
+ state = POWER_SUPPLY_STATUS_FULL;
+ else
+ state = POWER_SUPPLY_STATUS_CHARGING;
+ goto state_set;
+ }
+
+state_set:
+ chg->charging_state = state;
+ return state;
+}
+
+/*
+ * Supported charge types:
+ * - POWER_SUPPLY_CHARGE_TYPE_NONE
+ * - POWER_SUPPLY_CHARGE_TYPE_FAST
+ */
+static int max14577_get_charge_type(struct max14577_charger *chg)
+{
+ /*
+ * TODO: CHARGE_TYPE_TRICKLE (VCHGR_RC or EOC)?
+ * As spec says:
+ * [after reaching EOC interrupt]
+ * "When the battery is fully charged, the 30-minute (typ)
+ * top-off timer starts. The device continues to trickle
+ * charge the battery until the top-off timer runs out."
+ */
+ if (max14577_get_charger_state(chg) == POWER_SUPPLY_STATUS_CHARGING)
+ return POWER_SUPPLY_CHARGE_TYPE_FAST;
+ return POWER_SUPPLY_CHARGE_TYPE_NONE;
+}
+
+static int max14577_get_online(struct max14577_charger *chg)
+{
+ struct regmap *rmap = chg->max14577->regmap;
+ u8 reg_data;
+
+ max14577_read_reg(rmap, MAX14577_MUIC_REG_STATUS2, &reg_data);
+ reg_data = ((reg_data & STATUS2_CHGTYP_MASK) >> STATUS2_CHGTYP_SHIFT);
+ switch (reg_data) {
+ case MAX14577_CHARGER_TYPE_USB:
+ case MAX14577_CHARGER_TYPE_DEDICATED_CHG:
+ case MAX14577_CHARGER_TYPE_SPECIAL_500MA:
+ case MAX14577_CHARGER_TYPE_SPECIAL_1A:
+ case MAX14577_CHARGER_TYPE_DEAD_BATTERY:
+ return 1;
+ case MAX14577_CHARGER_TYPE_NONE:
+ case MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT:
+ case MAX14577_CHARGER_TYPE_RESERVED:
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Supported health statuses:
+ * - POWER_SUPPLY_HEALTH_DEAD
+ * - POWER_SUPPLY_HEALTH_OVERVOLTAGE
+ * - POWER_SUPPLY_HEALTH_GOOD
+ */
+static int max14577_get_battery_health(struct max14577_charger *chg)
+{
+ struct regmap *rmap = chg->max14577->regmap;
+ int state = POWER_SUPPLY_HEALTH_GOOD;
+ u8 reg_data;
+
+ max14577_read_reg(rmap, MAX14577_MUIC_REG_STATUS2, &reg_data);
+ reg_data = ((reg_data & STATUS2_CHGTYP_MASK) >> STATUS2_CHGTYP_SHIFT);
+ if (reg_data == MAX14577_CHARGER_TYPE_DEAD_BATTERY) {
+ state = POWER_SUPPLY_HEALTH_DEAD;
+ goto state_set;
+ }
+
+ max14577_read_reg(rmap, MAX14577_CHG_REG_STATUS3, &reg_data);
+ if (reg_data & STATUS3_OVP_MASK) {
+ state = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ goto state_set;
+ }
+
+state_set:
+ chg->battery_state = state;
+ return state;
+}
+
+/*
+ * Always returns 1.
+ * The max14577 chip doesn't report any status of battery presence.
+ * Lets assume that it will always be used with some battery.
+ */
+static int max14577_get_present(struct max14577_charger *chg)
+{
+ return 1;
+}
+
+/*
+ * Sets charger registers to proper and safe default values.
+ * Some of these values are equal to defaults in MAX14577E
+ * data sheet but there are minor differences.
+ */
+static void max14577_charger_reg_init(struct max14577_charger *chg)
+{
+ struct regmap *rmap = chg->max14577->regmap;
+ u8 reg_data;
+
+ /*
+ * Charger-Type Manual Detection, default off (set CHGTYPMAN to 0)
+ * Charger-Detection Enable, default on (set CHGDETEN to 1)
+ * Combined mask of CHGDETEN and CHGTYPMAN will zero the CHGTYPMAN bit
+ */
+ reg_data = 0x1 << CDETCTRL1_CHGDETEN_SHIFT;
+ max14577_update_reg(rmap, MAX14577_REG_CDETCTRL1,
+ CDETCTRL1_CHGDETEN_MASK | CDETCTRL1_CHGTYPMAN_MASK,
+ reg_data);
+
+ /* Battery Fast-Charge Timer, from SM-V700: 6hrs */
+ reg_data = 0x3 << CHGCTRL1_TCHW_SHIFT;
+ max14577_write_reg(rmap, MAX14577_REG_CHGCTRL1, reg_data);
+
+ /*
+ * Wall-Adapter Rapid Charge, default on
+ * Battery-Charger, default on
+ */
+ reg_data = 0x1 << CHGCTRL2_VCHGR_RC_SHIFT;
+ reg_data |= 0x1 << CHGCTRL2_MBCHOSTEN_SHIFT;
+ max14577_write_reg(rmap, MAX14577_REG_CHGCTRL2, reg_data);
+
+ /* Battery-Charger Constant Voltage (CV) Mode, from SM-V700: 4.35V */
+ reg_data = 0xf << CHGCTRL3_MBCCVWRC_SHIFT;
+ max14577_write_reg(rmap, MAX14577_REG_CHGCTRL3, reg_data);
+
+ /*
+ * Fast Battery-Charge Current Low, default 200-950mA
+ * Fast Battery-Charge Current High, from SM-V700: 450mA
+ */
+ reg_data = 0x1 << CHGCTRL4_MBCICHWRCL_SHIFT;
+ reg_data |= 0x5 << CHGCTRL4_MBCICHWRCH_SHIFT;
+ max14577_write_reg(rmap, MAX14577_REG_CHGCTRL4, reg_data);
+
+ /* End-of-Charge Current, from SM-V700: 50mA */
+ reg_data = 0x0 << CHGCTRL5_EOCS_SHIFT;
+ max14577_write_reg(rmap, MAX14577_REG_CHGCTRL5, reg_data);
+
+ /* Auto Charging Stop, default off */
+ reg_data = 0x0 << CHGCTRL6_AUTOSTOP_SHIFT;
+ max14577_write_reg(rmap, MAX14577_REG_CHGCTRL6, reg_data);
+
+ /* Overvoltage-Protection Threshold, from SM-V700: 6.5V */
+ reg_data = 0x2 << CHGCTRL7_OTPCGHCVS_SHIFT;
+ max14577_write_reg(rmap, MAX14577_REG_CHGCTRL7, reg_data);
+}
+
+/* Support property from charger */
+static enum power_supply_property max14577_charger_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static const char *model_name = "MAX14577";
+static const char *manufacturer = "Maxim Integrated";
+
+static int max14577_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max14577_charger *chg = container_of(psy,
+ struct max14577_charger,
+ charger);
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = max14577_get_charger_state(chg);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = max14577_get_charge_type(chg);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = max14577_get_battery_health(chg);
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = max14577_get_present(chg);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = max14577_get_online(chg);
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = model_name;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = manufacturer;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int max14577_charger_probe(struct platform_device *pdev)
+{
+ struct max14577_charger *chg;
+ struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent);
+ int ret;
+
+ chg = devm_kzalloc(&pdev->dev, sizeof(*chg), GFP_KERNEL);
+ if (!chg)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, chg);
+ chg->dev = &pdev->dev;
+ chg->max14577 = max14577;
+
+ max14577_charger_reg_init(chg);
+
+ chg->charger.name = "max14577-charger",
+ chg->charger.type = POWER_SUPPLY_TYPE_BATTERY,
+ chg->charger.properties = max14577_charger_props,
+ chg->charger.num_properties = ARRAY_SIZE(max14577_charger_props),
+ chg->charger.get_property = max14577_charger_get_property,
+
+ ret = power_supply_register(&pdev->dev, &chg->charger);
+ if (ret) {
+ dev_err(&pdev->dev, "failed: power supply register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int max14577_charger_remove(struct platform_device *pdev)
+{
+ struct max14577_charger *chg = platform_get_drvdata(pdev);
+
+ power_supply_unregister(&chg->charger);
+
+ return 0;
+}
+
+static struct platform_driver max14577_charger_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "max14577-charger",
+ },
+ .probe = max14577_charger_probe,
+ .remove = max14577_charger_remove,
+};
+module_platform_driver(max14577_charger_driver);
+
+MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>");
+MODULE_DESCRIPTION("MAXIM 14577 charger driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
index c7ff6d67f158..0fbac861080d 100644
--- a/drivers/power/max17040_battery.c
+++ b/drivers/power/max17040_battery.c
@@ -148,7 +148,7 @@ static void max17040_get_online(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- if (chip->pdata->battery_online)
+ if (chip->pdata && chip->pdata->battery_online)
chip->online = chip->pdata->battery_online();
else
chip->online = 1;
@@ -158,7 +158,8 @@ static void max17040_get_status(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- if (!chip->pdata->charger_online || !chip->pdata->charger_enable) {
+ if (!chip->pdata || !chip->pdata->charger_online
+ || !chip->pdata->charger_enable) {
chip->status = POWER_SUPPLY_STATUS_UNKNOWN;
return;
}
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index e0b22f9b6fdd..66da691c41cf 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -741,9 +741,9 @@ static int max17042_probe(struct i2c_client *client,
if (client->irq) {
ret = request_threaded_irq(client->irq, NULL,
- max17042_thread_handler,
- IRQF_TRIGGER_FALLING,
- chip->battery.name, chip);
+ max17042_thread_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ chip->battery.name, chip);
if (!ret) {
regmap_read(chip->regmap, MAX17042_CONFIG, &val);
val |= CONFIG_ALRT_BIT_ENBL;
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 00e667296360..26606641fe44 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/device.h>
+#include <linux/notifier.h>
#include <linux/err.h>
#include <linux/power_supply.h>
#include <linux/thermal.h>
@@ -24,6 +25,9 @@
struct class *power_supply_class;
EXPORT_SYMBOL_GPL(power_supply_class);
+ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
+EXPORT_SYMBOL_GPL(power_supply_notifier);
+
static struct device_type power_supply_dev_type;
static bool __power_supply_is_supplied_by(struct power_supply *supplier,
@@ -80,6 +84,8 @@ static void power_supply_changed_work(struct work_struct *work)
class_for_each_device(power_supply_class, NULL, psy,
__power_supply_changed_work);
power_supply_update_leds(psy);
+ atomic_notifier_call_chain(&power_supply_notifier,
+ PSY_EVENT_PROP_CHANGED, psy);
kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
spin_lock_irqsave(&psy->changed_lock, flags);
}
@@ -335,6 +341,32 @@ struct power_supply *power_supply_get_by_name(const char *name)
}
EXPORT_SYMBOL_GPL(power_supply_get_by_name);
+#ifdef CONFIG_OF
+static int power_supply_match_device_node(struct device *dev, const void *data)
+{
+ return dev->parent && dev->parent->of_node == data;
+}
+
+struct power_supply *power_supply_get_by_phandle(struct device_node *np,
+ const char *property)
+{
+ struct device_node *power_supply_np;
+ struct device *dev;
+
+ power_supply_np = of_parse_phandle(np, property, 0);
+ if (!power_supply_np)
+ return ERR_PTR(-ENODEV);
+
+ dev = class_find_device(power_supply_class, NULL, power_supply_np,
+ power_supply_match_device_node);
+
+ of_node_put(power_supply_np);
+
+ return dev ? dev_get_drvdata(dev) : NULL;
+}
+EXPORT_SYMBOL_GPL(power_supply_get_by_phandle);
+#endif /* CONFIG_OF */
+
int power_supply_powers(struct power_supply *psy, struct device *dev)
{
return sysfs_create_link(&psy->dev->kobj, &dev->kobj, "powers");
@@ -347,6 +379,18 @@ static void power_supply_dev_release(struct device *dev)
kfree(dev);
}
+int power_supply_reg_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&power_supply_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(power_supply_reg_notifier);
+
+void power_supply_unreg_notifier(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&power_supply_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(power_supply_unreg_notifier);
+
#ifdef CONFIG_THERMAL
static int power_supply_read_temp(struct thermal_zone_device *tzd,
unsigned long *temp)
@@ -511,6 +555,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
dev_set_drvdata(dev, psy);
psy->dev = dev;
+ rc = dev_set_name(dev, "%s", psy->name);
+ if (rc)
+ goto dev_set_name_failed;
+
INIT_WORK(&psy->changed_work, power_supply_changed_work);
rc = power_supply_check_supplies(psy);
@@ -524,10 +572,6 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
if (rc)
goto wakeup_init_failed;
- rc = kobject_set_name(&dev->kobj, "%s", psy->name);
- if (rc)
- goto kobject_set_name_failed;
-
rc = device_add(dev);
if (rc)
goto device_add_failed;
@@ -553,11 +597,11 @@ create_triggers_failed:
register_cooler_failed:
psy_unregister_thermal(psy);
register_thermal_failed:
-wakeup_init_failed:
device_del(dev);
-kobject_set_name_failed:
device_add_failed:
+wakeup_init_failed:
check_supplies_failed:
+dev_set_name_failed:
put_device(dev);
success:
return rc;
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 9b3ea535b472..6d452a78b19c 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -6,6 +6,12 @@ menuconfig POWER_RESET
Say Y here to enable board reset and power off
+config POWER_RESET_AS3722
+ bool "ams AS3722 power-off driver"
+ depends on MFD_AS3722 && POWER_RESET
+ help
+ This driver supports turning off board via a ams AS3722 power-off.
+
config POWER_RESET_GPIO
bool "GPIO power-off driver"
depends on OF_GPIO && POWER_RESET
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 3e6ed88725ac..a5b4a77d1a41 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -1,3 +1,4 @@
+obj-$(CONFIG_POWER_RESET_AS3722) += as3722-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
diff --git a/drivers/power/reset/as3722-poweroff.c b/drivers/power/reset/as3722-poweroff.c
new file mode 100644
index 000000000000..684971199bd3
--- /dev/null
+++ b/drivers/power/reset/as3722-poweroff.c
@@ -0,0 +1,96 @@
+/*
+ * Power off driver for ams AS3722 device.
+ *
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/mfd/as3722.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct as3722_poweroff {
+ struct device *dev;
+ struct as3722 *as3722;
+};
+
+static struct as3722_poweroff *as3722_pm_poweroff;
+
+static void as3722_pm_power_off(void)
+{
+ int ret;
+
+ if (!as3722_pm_poweroff) {
+ pr_err("AS3722 poweroff is not initialised\n");
+ return;
+ }
+
+ ret = as3722_update_bits(as3722_pm_poweroff->as3722,
+ AS3722_RESET_CONTROL_REG, AS3722_POWER_OFF, AS3722_POWER_OFF);
+ if (ret < 0)
+ dev_err(as3722_pm_poweroff->dev,
+ "RESET_CONTROL_REG update failed, %d\n", ret);
+}
+
+static int as3722_poweroff_probe(struct platform_device *pdev)
+{
+ struct as3722_poweroff *as3722_poweroff;
+ struct device_node *np = pdev->dev.parent->of_node;
+
+ if (!np)
+ return -EINVAL;
+
+ if (!of_property_read_bool(np, "ams,system-power-controller"))
+ return 0;
+
+ as3722_poweroff = devm_kzalloc(&pdev->dev, sizeof(*as3722_poweroff),
+ GFP_KERNEL);
+ if (!as3722_poweroff)
+ return -ENOMEM;
+
+ as3722_poweroff->as3722 = dev_get_drvdata(pdev->dev.parent);
+ as3722_poweroff->dev = &pdev->dev;
+ as3722_pm_poweroff = as3722_poweroff;
+ if (!pm_power_off)
+ pm_power_off = as3722_pm_power_off;
+
+ return 0;
+}
+
+static int as3722_poweroff_remove(struct platform_device *pdev)
+{
+ if (pm_power_off == as3722_pm_power_off)
+ pm_power_off = NULL;
+ as3722_pm_poweroff = NULL;
+
+ return 0;
+}
+
+static struct platform_driver as3722_poweroff_driver = {
+ .driver = {
+ .name = "as3722-power-off",
+ .owner = THIS_MODULE,
+ },
+ .probe = as3722_poweroff_probe,
+ .remove = as3722_poweroff_remove,
+};
+
+module_platform_driver(as3722_poweroff_driver);
+
+MODULE_DESCRIPTION("Power off driver for ams AS3722 PMIC Device");
+MODULE_ALIAS("platform:as3722-power-off");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 2a786c504460..3c6768378a94 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -833,6 +833,11 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
return 0;
}
+static const struct x86_cpu_id energy_unit_quirk_ids[] = {
+ { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
+ {}
+};
+
static int rapl_check_unit(struct rapl_package *rp, int cpu)
{
u64 msr_val;
@@ -853,8 +858,11 @@ static int rapl_check_unit(struct rapl_package *rp, int cpu)
* time unit: 1/time_unit_divisor Seconds
*/
value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
- rp->energy_unit_divisor = 1 << value;
-
+ /* some CPUs have different way to calculate energy unit */
+ if (x86_match_cpu(energy_unit_quirk_ids))
+ rp->energy_unit_divisor = 1000000 / (1 << value);
+ else
+ rp->energy_unit_divisor = 1 << value;
value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
rp->power_unit_divisor = 1 << value;
@@ -941,6 +949,7 @@ static void package_power_limit_irq_restore(int package_id)
static const struct x86_cpu_id rapl_ids[] = {
{ X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
{ X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
+ { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
{ X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
{ X86_VENDOR_INTEL, 6, 0x45},/* HSW */
/* TODO: Add more CPU IDs after testing */
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 5be73ba0519a..5a7910e61e17 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -73,6 +73,7 @@ config DP83640_PHY
config PTP_1588_CLOCK_PCH
tristate "Intel PCH EG20T as PTP clock"
depends on X86 || COMPILE_TEST
+ depends on HAS_IOMEM
select PTP_1588_CLOCK
help
This driver adds support for using the PCH EG20T as a PTP
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index eece329d7872..22f2f2857b82 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -41,6 +41,15 @@ config PWM_AB8500
To compile this driver as a module, choose M here: the module
will be called pwm-ab8500.
+config PWM_ATMEL
+ tristate "Atmel PWM support"
+ depends on ARCH_AT91
+ help
+ Generic PWM framework driver for Atmel SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-atmel.
+
config PWM_ATMEL_TCB
tristate "Atmel TC Block PWM support"
depends on ATMEL_TCLIB && OF
@@ -90,6 +99,16 @@ config PWM_JZ4740
To compile this driver as a module, choose M here: the module
will be called pwm-jz4740.
+config PWM_LP3943
+ tristate "TI/National Semiconductor LP3943 PWM support"
+ depends on MFD_LP3943
+ help
+ Generic PWM framework driver for LP3943 which supports two PWM
+ channels.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-lp3943.
+
config PWM_LPC32XX
tristate "LPC32XX PWM support"
depends on ARCH_LPC32XX
@@ -112,7 +131,8 @@ config PWM_MXS
config PWM_PCA9685
tristate "NXP PCA9685 PWM driver"
- depends on OF && REGMAP_I2C
+ depends on OF && I2C
+ select REGMAP_I2C
help
Generic PWM framework driver for NXP PCA9685 LED controller.
@@ -139,7 +159,7 @@ config PWM_PXA
config PWM_RENESAS_TPU
tristate "Renesas TPU PWM support"
- depends on ARCH_SHMOBILE
+ depends on ARCH_SHMOBILE || COMPILE_TEST
help
This driver exposes the Timer Pulse Unit (TPU) PWM controller found
in Renesas chips through the PWM API.
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 8b754e4dba4a..d8906ec69976 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -1,11 +1,13 @@
obj-$(CONFIG_PWM) += core.o
obj-$(CONFIG_PWM_SYSFS) += sysfs.o
obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o
+obj-$(CONFIG_PWM_ATMEL) += pwm-atmel.o
obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o
obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o
obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
obj-$(CONFIG_PWM_IMX) += pwm-imx.o
obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
+obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o
obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o
obj-$(CONFIG_PWM_MXS) += pwm-mxs.o
obj-$(CONFIG_PWM_PCA9685) += pwm-pca9685.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 2ca95042a0b9..a80471399c20 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -808,12 +808,12 @@ static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
seq_printf(s, " pwm-%-3d (%-20.20s):", i, pwm->label);
if (test_bit(PWMF_REQUESTED, &pwm->flags))
- seq_printf(s, " requested");
+ seq_puts(s, " requested");
if (test_bit(PWMF_ENABLED, &pwm->flags))
- seq_printf(s, " enabled");
+ seq_puts(s, " enabled");
- seq_printf(s, "\n");
+ seq_puts(s, "\n");
}
}
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
new file mode 100644
index 000000000000..bf4144a14661
--- /dev/null
+++ b/drivers/pwm/pwm-atmel.c
@@ -0,0 +1,395 @@
+/*
+ * Driver for Atmel Pulse Width Modulation Controller
+ *
+ * Copyright (C) 2013 Atmel Corporation
+ * Bo Shen <voice.shen@atmel.com>
+ *
+ * Licensed under GPLv2.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+/* The following is global registers for PWM controller */
+#define PWM_ENA 0x04
+#define PWM_DIS 0x08
+#define PWM_SR 0x0C
+/* Bit field in SR */
+#define PWM_SR_ALL_CH_ON 0x0F
+
+/* The following register is PWM channel related registers */
+#define PWM_CH_REG_OFFSET 0x200
+#define PWM_CH_REG_SIZE 0x20
+
+#define PWM_CMR 0x0
+/* Bit field in CMR */
+#define PWM_CMR_CPOL (1 << 9)
+#define PWM_CMR_UPD_CDTY (1 << 10)
+
+/* The following registers for PWM v1 */
+#define PWMV1_CDTY 0x04
+#define PWMV1_CPRD 0x08
+#define PWMV1_CUPD 0x10
+
+/* The following registers for PWM v2 */
+#define PWMV2_CDTY 0x04
+#define PWMV2_CDTYUPD 0x08
+#define PWMV2_CPRD 0x0C
+#define PWMV2_CPRDUPD 0x10
+
+/*
+ * Max value for duty and period
+ *
+ * Although the duty and period register is 32 bit,
+ * however only the LSB 16 bits are significant.
+ */
+#define PWM_MAX_DTY 0xFFFF
+#define PWM_MAX_PRD 0xFFFF
+#define PRD_MAX_PRES 10
+
+struct atmel_pwm_chip {
+ struct pwm_chip chip;
+ struct clk *clk;
+ void __iomem *base;
+
+ void (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
+ unsigned long dty, unsigned long prd);
+};
+
+static inline struct atmel_pwm_chip *to_atmel_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct atmel_pwm_chip, chip);
+}
+
+static inline u32 atmel_pwm_readl(struct atmel_pwm_chip *chip,
+ unsigned long offset)
+{
+ return readl_relaxed(chip->base + offset);
+}
+
+static inline void atmel_pwm_writel(struct atmel_pwm_chip *chip,
+ unsigned long offset, unsigned long val)
+{
+ writel_relaxed(val, chip->base + offset);
+}
+
+static inline u32 atmel_pwm_ch_readl(struct atmel_pwm_chip *chip,
+ unsigned int ch, unsigned long offset)
+{
+ unsigned long base = PWM_CH_REG_OFFSET + ch * PWM_CH_REG_SIZE;
+
+ return readl_relaxed(chip->base + base + offset);
+}
+
+static inline void atmel_pwm_ch_writel(struct atmel_pwm_chip *chip,
+ unsigned int ch, unsigned long offset,
+ unsigned long val)
+{
+ unsigned long base = PWM_CH_REG_OFFSET + ch * PWM_CH_REG_SIZE;
+
+ writel_relaxed(val, chip->base + base + offset);
+}
+
+static int atmel_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
+ unsigned long clk_rate, prd, dty;
+ unsigned long long div;
+ unsigned int pres = 0;
+ int ret;
+
+ if (test_bit(PWMF_ENABLED, &pwm->flags) && (period_ns != pwm->period)) {
+ dev_err(chip->dev, "cannot change PWM period while enabled\n");
+ return -EBUSY;
+ }
+
+ clk_rate = clk_get_rate(atmel_pwm->clk);
+ div = clk_rate;
+
+ /* Calculate the period cycles */
+ while (div > PWM_MAX_PRD) {
+ div = clk_rate / (1 << pres);
+ div = div * period_ns;
+ /* 1/Hz = 100000000 ns */
+ do_div(div, 1000000000);
+
+ if (pres++ > PRD_MAX_PRES) {
+ dev_err(chip->dev, "pres exceeds the maximum value\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Calculate the duty cycles */
+ prd = div;
+ div *= duty_ns;
+ do_div(div, period_ns);
+ dty = div;
+
+ ret = clk_enable(atmel_pwm->clk);
+ if (ret) {
+ dev_err(chip->dev, "failed to enable PWM clock\n");
+ return ret;
+ }
+
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, pres);
+ atmel_pwm->config(chip, pwm, dty, prd);
+
+ clk_disable(atmel_pwm->clk);
+ return ret;
+}
+
+static void atmel_pwm_config_v1(struct pwm_chip *chip, struct pwm_device *pwm,
+ unsigned long dty, unsigned long prd)
+{
+ struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
+ unsigned int val;
+
+ if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+ /*
+ * If the PWM channel is enabled, using the update register,
+ * it needs to set bit 10 of CMR to 0
+ */
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CUPD, dty);
+
+ val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
+ val &= ~PWM_CMR_UPD_CDTY;
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val);
+ } else {
+ /*
+ * If the PWM channel is disabled, write value to duty and
+ * period registers directly.
+ */
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CDTY, dty);
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CPRD, prd);
+ }
+}
+
+static void atmel_pwm_config_v2(struct pwm_chip *chip, struct pwm_device *pwm,
+ unsigned long dty, unsigned long prd)
+{
+ struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
+
+ if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+ /*
+ * If the PWM channel is enabled, using the duty update register
+ * to update the value.
+ */
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV2_CDTYUPD, dty);
+ } else {
+ /*
+ * If the PWM channel is disabled, write value to duty and
+ * period registers directly.
+ */
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV2_CDTY, dty);
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV2_CPRD, prd);
+ }
+}
+
+static int atmel_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
+ u32 val;
+ int ret;
+
+ val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
+
+ if (polarity == PWM_POLARITY_NORMAL)
+ val &= ~PWM_CMR_CPOL;
+ else
+ val |= PWM_CMR_CPOL;
+
+ ret = clk_enable(atmel_pwm->clk);
+ if (ret) {
+ dev_err(chip->dev, "failed to enable PWM clock\n");
+ return ret;
+ }
+
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val);
+
+ clk_disable(atmel_pwm->clk);
+
+ return 0;
+}
+
+static int atmel_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
+ int ret;
+
+ ret = clk_enable(atmel_pwm->clk);
+ if (ret) {
+ dev_err(chip->dev, "failed to enable PWM clock\n");
+ return ret;
+ }
+
+ atmel_pwm_writel(atmel_pwm, PWM_ENA, 1 << pwm->hwpwm);
+
+ return 0;
+}
+
+static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
+
+ atmel_pwm_writel(atmel_pwm, PWM_DIS, 1 << pwm->hwpwm);
+
+ clk_disable(atmel_pwm->clk);
+}
+
+static const struct pwm_ops atmel_pwm_ops = {
+ .config = atmel_pwm_config,
+ .set_polarity = atmel_pwm_set_polarity,
+ .enable = atmel_pwm_enable,
+ .disable = atmel_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+struct atmel_pwm_data {
+ void (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
+ unsigned long dty, unsigned long prd);
+};
+
+static const struct atmel_pwm_data atmel_pwm_data_v1 = {
+ .config = atmel_pwm_config_v1,
+};
+
+static const struct atmel_pwm_data atmel_pwm_data_v2 = {
+ .config = atmel_pwm_config_v2,
+};
+
+static const struct platform_device_id atmel_pwm_devtypes[] = {
+ {
+ .name = "at91sam9rl-pwm",
+ .driver_data = (kernel_ulong_t)&atmel_pwm_data_v1,
+ }, {
+ .name = "sama5d3-pwm",
+ .driver_data = (kernel_ulong_t)&atmel_pwm_data_v2,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(platform, atmel_pwm_devtypes);
+
+static const struct of_device_id atmel_pwm_dt_ids[] = {
+ {
+ .compatible = "atmel,at91sam9rl-pwm",
+ .data = &atmel_pwm_data_v1,
+ }, {
+ .compatible = "atmel,sama5d3-pwm",
+ .data = &atmel_pwm_data_v2,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids);
+
+static inline const struct atmel_pwm_data *
+atmel_pwm_get_driver_data(struct platform_device *pdev)
+{
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+
+ match = of_match_device(atmel_pwm_dt_ids, &pdev->dev);
+ if (!match)
+ return NULL;
+
+ return match->data;
+ } else {
+ const struct platform_device_id *id;
+
+ id = platform_get_device_id(pdev);
+
+ return (struct atmel_pwm_data *)id->driver_data;
+ }
+}
+
+static int atmel_pwm_probe(struct platform_device *pdev)
+{
+ const struct atmel_pwm_data *data;
+ struct atmel_pwm_chip *atmel_pwm;
+ struct resource *res;
+ int ret;
+
+ data = atmel_pwm_get_driver_data(pdev);
+ if (!data)
+ return -ENODEV;
+
+ atmel_pwm = devm_kzalloc(&pdev->dev, sizeof(*atmel_pwm), GFP_KERNEL);
+ if (!atmel_pwm)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ atmel_pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(atmel_pwm->base))
+ return PTR_ERR(atmel_pwm->base);
+
+ atmel_pwm->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(atmel_pwm->clk))
+ return PTR_ERR(atmel_pwm->clk);
+
+ ret = clk_prepare(atmel_pwm->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to prepare PWM clock\n");
+ return ret;
+ }
+
+ atmel_pwm->chip.dev = &pdev->dev;
+ atmel_pwm->chip.ops = &atmel_pwm_ops;
+
+ if (pdev->dev.of_node) {
+ atmel_pwm->chip.of_xlate = of_pwm_xlate_with_flags;
+ atmel_pwm->chip.of_pwm_n_cells = 3;
+ }
+
+ atmel_pwm->chip.base = -1;
+ atmel_pwm->chip.npwm = 4;
+ atmel_pwm->config = data->config;
+
+ ret = pwmchip_add(&atmel_pwm->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add PWM chip %d\n", ret);
+ goto unprepare_clk;
+ }
+
+ platform_set_drvdata(pdev, atmel_pwm);
+
+ return ret;
+
+unprepare_clk:
+ clk_unprepare(atmel_pwm->clk);
+ return ret;
+}
+
+static int atmel_pwm_remove(struct platform_device *pdev)
+{
+ struct atmel_pwm_chip *atmel_pwm = platform_get_drvdata(pdev);
+
+ clk_unprepare(atmel_pwm->clk);
+
+ return pwmchip_remove(&atmel_pwm->chip);
+}
+
+static struct platform_driver atmel_pwm_driver = {
+ .driver = {
+ .name = "atmel-pwm",
+ .of_match_table = of_match_ptr(atmel_pwm_dt_ids),
+ },
+ .id_table = atmel_pwm_devtypes,
+ .probe = atmel_pwm_probe,
+ .remove = atmel_pwm_remove,
+};
+module_platform_driver(atmel_pwm_driver);
+
+MODULE_ALIAS("platform:atmel-pwm");
+MODULE_AUTHOR("Bo Shen <voice.shen@atmel.com>");
+MODULE_DESCRIPTION("Atmel PWM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index 33aa4461e1ce..e593e9c45c51 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -224,7 +224,7 @@ static struct platform_driver ep93xx_pwm_driver = {
module_platform_driver(ep93xx_pwm_driver);
MODULE_DESCRIPTION("Cirrus Logic EP93xx PWM driver");
-MODULE_AUTHOR("Matthieu Crapet <mcrapet@gmail.com>, "
- "H Hartley Sweeten <hsweeten@visionengravers.com>");
+MODULE_AUTHOR("Matthieu Crapet <mcrapet@gmail.com>");
+MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
MODULE_ALIAS("platform:ep93xx-pwm");
MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index 0a2ede3c3932..9c46209e1d02 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -165,13 +165,12 @@ static const struct pwm_ops jz4740_pwm_ops = {
static int jz4740_pwm_probe(struct platform_device *pdev)
{
struct jz4740_pwm_chip *jz4740;
- int ret;
jz4740 = devm_kzalloc(&pdev->dev, sizeof(*jz4740), GFP_KERNEL);
if (!jz4740)
return -ENOMEM;
- jz4740->clk = clk_get(NULL, "ext");
+ jz4740->clk = devm_clk_get(&pdev->dev, "ext");
if (IS_ERR(jz4740->clk))
return PTR_ERR(jz4740->clk);
@@ -180,29 +179,16 @@ static int jz4740_pwm_probe(struct platform_device *pdev)
jz4740->chip.npwm = NUM_PWM;
jz4740->chip.base = -1;
- ret = pwmchip_add(&jz4740->chip);
- if (ret < 0) {
- clk_put(jz4740->clk);
- return ret;
- }
-
platform_set_drvdata(pdev, jz4740);
- return 0;
+ return pwmchip_add(&jz4740->chip);
}
static int jz4740_pwm_remove(struct platform_device *pdev)
{
struct jz4740_pwm_chip *jz4740 = platform_get_drvdata(pdev);
- int ret;
-
- ret = pwmchip_remove(&jz4740->chip);
- if (ret < 0)
- return ret;
- clk_put(jz4740->clk);
-
- return 0;
+ return pwmchip_remove(&jz4740->chip);
}
static struct platform_driver jz4740_pwm_driver = {
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
new file mode 100644
index 000000000000..8a843a04c224
--- /dev/null
+++ b/drivers/pwm/pwm-lp3943.c
@@ -0,0 +1,314 @@
+/*
+ * TI/National Semiconductor LP3943 PWM driver
+ *
+ * Copyright 2013 Texas Instruments
+ *
+ * Author: Milo Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mfd/lp3943.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+#define LP3943_MAX_DUTY 255
+#define LP3943_MIN_PERIOD 6250
+#define LP3943_MAX_PERIOD 1600000
+
+struct lp3943_pwm {
+ struct pwm_chip chip;
+ struct lp3943 *lp3943;
+ struct lp3943_platform_data *pdata;
+};
+
+static inline struct lp3943_pwm *to_lp3943_pwm(struct pwm_chip *_chip)
+{
+ return container_of(_chip, struct lp3943_pwm, chip);
+}
+
+static struct lp3943_pwm_map *
+lp3943_pwm_request_map(struct lp3943_pwm *lp3943_pwm, int hwpwm)
+{
+ struct lp3943_platform_data *pdata = lp3943_pwm->pdata;
+ struct lp3943 *lp3943 = lp3943_pwm->lp3943;
+ struct lp3943_pwm_map *pwm_map;
+ int i, offset;
+
+ pwm_map = kzalloc(sizeof(*pwm_map), GFP_KERNEL);
+ if (!pwm_map)
+ return ERR_PTR(-ENOMEM);
+
+ pwm_map->output = pdata->pwms[hwpwm]->output;
+ pwm_map->num_outputs = pdata->pwms[hwpwm]->num_outputs;
+
+ for (i = 0; i < pwm_map->num_outputs; i++) {
+ offset = pwm_map->output[i];
+
+ /* Return an error if the pin is already assigned */
+ if (test_and_set_bit(offset, &lp3943->pin_used))
+ return ERR_PTR(-EBUSY);
+ }
+
+ return pwm_map;
+}
+
+static int lp3943_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip);
+ struct lp3943_pwm_map *pwm_map;
+
+ pwm_map = lp3943_pwm_request_map(lp3943_pwm, pwm->hwpwm);
+ if (IS_ERR(pwm_map))
+ return PTR_ERR(pwm_map);
+
+ return pwm_set_chip_data(pwm, pwm_map);
+}
+
+static void lp3943_pwm_free_map(struct lp3943_pwm *lp3943_pwm,
+ struct lp3943_pwm_map *pwm_map)
+{
+ struct lp3943 *lp3943 = lp3943_pwm->lp3943;
+ int i, offset;
+
+ for (i = 0; i < pwm_map->num_outputs; i++) {
+ offset = pwm_map->output[i];
+ clear_bit(offset, &lp3943->pin_used);
+ }
+
+ kfree(pwm_map);
+}
+
+static void lp3943_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip);
+ struct lp3943_pwm_map *pwm_map = pwm_get_chip_data(pwm);
+
+ lp3943_pwm_free_map(lp3943_pwm, pwm_map);
+}
+
+static int lp3943_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip);
+ struct lp3943 *lp3943 = lp3943_pwm->lp3943;
+ u8 val, reg_duty, reg_prescale;
+ int err;
+
+ /*
+ * How to configure the LP3943 PWMs
+ *
+ * 1) Period = 6250 ~ 1600000
+ * 2) Prescale = period / 6250 -1
+ * 3) Duty = input duty
+ *
+ * Prescale and duty are register values
+ */
+
+ if (pwm->hwpwm == 0) {
+ reg_prescale = LP3943_REG_PRESCALE0;
+ reg_duty = LP3943_REG_PWM0;
+ } else {
+ reg_prescale = LP3943_REG_PRESCALE1;
+ reg_duty = LP3943_REG_PWM1;
+ }
+
+ period_ns = clamp(period_ns, LP3943_MIN_PERIOD, LP3943_MAX_PERIOD);
+ val = (u8)(period_ns / LP3943_MIN_PERIOD - 1);
+
+ err = lp3943_write_byte(lp3943, reg_prescale, val);
+ if (err)
+ return err;
+
+ val = (u8)(duty_ns * LP3943_MAX_DUTY / period_ns);
+
+ return lp3943_write_byte(lp3943, reg_duty, val);
+}
+
+static int lp3943_pwm_set_mode(struct lp3943_pwm *lp3943_pwm,
+ struct lp3943_pwm_map *pwm_map,
+ u8 val)
+{
+ struct lp3943 *lp3943 = lp3943_pwm->lp3943;
+ const struct lp3943_reg_cfg *mux = lp3943->mux_cfg;
+ int i, index, err;
+
+ for (i = 0; i < pwm_map->num_outputs; i++) {
+ index = pwm_map->output[i];
+ err = lp3943_update_bits(lp3943, mux[index].reg,
+ mux[index].mask,
+ val << mux[index].shift);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int lp3943_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip);
+ struct lp3943_pwm_map *pwm_map = pwm_get_chip_data(pwm);
+ u8 val;
+
+ if (pwm->hwpwm == 0)
+ val = LP3943_DIM_PWM0;
+ else
+ val = LP3943_DIM_PWM1;
+
+ /*
+ * Each PWM generator is set to control any of outputs of LP3943.
+ * To enable/disable the PWM, these output pins should be configured.
+ */
+
+ return lp3943_pwm_set_mode(lp3943_pwm, pwm_map, val);
+}
+
+static void lp3943_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip);
+ struct lp3943_pwm_map *pwm_map = pwm_get_chip_data(pwm);
+
+ /*
+ * LP3943 outputs are open-drain, so the pin should be configured
+ * when the PWM is disabled.
+ */
+
+ lp3943_pwm_set_mode(lp3943_pwm, pwm_map, LP3943_GPIO_OUT_HIGH);
+}
+
+static const struct pwm_ops lp3943_pwm_ops = {
+ .request = lp3943_pwm_request,
+ .free = lp3943_pwm_free,
+ .config = lp3943_pwm_config,
+ .enable = lp3943_pwm_enable,
+ .disable = lp3943_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static int lp3943_pwm_parse_dt(struct device *dev,
+ struct lp3943_pwm *lp3943_pwm)
+{
+ static const char * const name[] = { "ti,pwm0", "ti,pwm1", };
+ struct device_node *node = dev->of_node;
+ struct lp3943_platform_data *pdata;
+ struct lp3943_pwm_map *pwm_map;
+ enum lp3943_pwm_output *output;
+ int i, err, proplen, count = 0;
+ u32 num_outputs;
+
+ if (!node)
+ return -EINVAL;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ /*
+ * Read the output map configuration from the device tree.
+ * Each of the two PWM generators can drive zero or more outputs.
+ */
+
+ for (i = 0; i < LP3943_NUM_PWMS; i++) {
+ if (!of_get_property(node, name[i], &proplen))
+ continue;
+
+ num_outputs = proplen / sizeof(u32);
+ if (num_outputs == 0)
+ continue;
+
+ output = devm_kzalloc(dev, sizeof(*output) * num_outputs,
+ GFP_KERNEL);
+ if (!output)
+ return -ENOMEM;
+
+ err = of_property_read_u32_array(node, name[i], output,
+ num_outputs);
+ if (err)
+ return err;
+
+ pwm_map = devm_kzalloc(dev, sizeof(*pwm_map), GFP_KERNEL);
+ if (!pwm_map)
+ return -ENOMEM;
+
+ pwm_map->output = output;
+ pwm_map->num_outputs = num_outputs;
+ pdata->pwms[i] = pwm_map;
+
+ count++;
+ }
+
+ if (count == 0)
+ return -ENODATA;
+
+ lp3943_pwm->pdata = pdata;
+ return 0;
+}
+
+static int lp3943_pwm_probe(struct platform_device *pdev)
+{
+ struct lp3943 *lp3943 = dev_get_drvdata(pdev->dev.parent);
+ struct lp3943_pwm *lp3943_pwm;
+ int ret;
+
+ lp3943_pwm = devm_kzalloc(&pdev->dev, sizeof(*lp3943_pwm), GFP_KERNEL);
+ if (!lp3943_pwm)
+ return -ENOMEM;
+
+ lp3943_pwm->pdata = lp3943->pdata;
+ if (!lp3943_pwm->pdata) {
+ if (IS_ENABLED(CONFIG_OF))
+ ret = lp3943_pwm_parse_dt(&pdev->dev, lp3943_pwm);
+ else
+ ret = -ENODEV;
+
+ if (ret)
+ return ret;
+ }
+
+ lp3943_pwm->lp3943 = lp3943;
+ lp3943_pwm->chip.dev = &pdev->dev;
+ lp3943_pwm->chip.ops = &lp3943_pwm_ops;
+ lp3943_pwm->chip.npwm = LP3943_NUM_PWMS;
+
+ platform_set_drvdata(pdev, lp3943_pwm);
+
+ return pwmchip_add(&lp3943_pwm->chip);
+}
+
+static int lp3943_pwm_remove(struct platform_device *pdev)
+{
+ struct lp3943_pwm *lp3943_pwm = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&lp3943_pwm->chip);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id lp3943_pwm_of_match[] = {
+ { .compatible = "ti,lp3943-pwm", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lp3943_pwm_of_match);
+#endif
+
+static struct platform_driver lp3943_pwm_driver = {
+ .probe = lp3943_pwm_probe,
+ .remove = lp3943_pwm_remove,
+ .driver = {
+ .name = "lp3943-pwm",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(lp3943_pwm_of_match),
+ },
+};
+module_platform_driver(lp3943_pwm_driver);
+
+MODULE_DESCRIPTION("LP3943 PWM driver");
+MODULE_ALIAS("platform:lp3943-pwm");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index a4d2164aaf55..8d995731cef8 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -8,7 +8,7 @@
* published by the Free Software Foundation.
*
* 2008-02-13 initial version
- * eric miao <eric.miao@marvell.com>
+ * eric miao <eric.miao@marvell.com>
*/
#include <linux/module.h>
@@ -19,6 +19,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/pwm.h>
+#include <linux/of_device.h>
#include <asm/div64.h>
@@ -124,6 +125,46 @@ static struct pwm_ops pxa_pwm_ops = {
.owner = THIS_MODULE,
};
+#ifdef CONFIG_OF
+/*
+ * Device tree users must create one device instance for each pwm channel.
+ * Hence we dispense with the HAS_SECONDARY_PWM and "tell" the original driver
+ * code that this is a single channel pxa25x-pwm. Currently all devices are
+ * supported identically.
+ */
+static struct of_device_id pwm_of_match[] = {
+ { .compatible = "marvell,pxa250-pwm", .data = &pwm_id_table[0]},
+ { .compatible = "marvell,pxa270-pwm", .data = &pwm_id_table[0]},
+ { .compatible = "marvell,pxa168-pwm", .data = &pwm_id_table[0]},
+ { .compatible = "marvell,pxa910-pwm", .data = &pwm_id_table[0]},
+ { }
+};
+MODULE_DEVICE_TABLE(of, pwm_of_match);
+#else
+#define pwm_of_match NULL
+#endif
+
+static const struct platform_device_id *pxa_pwm_get_id_dt(struct device *dev)
+{
+ const struct of_device_id *id = of_match_device(pwm_of_match, dev);
+
+ return id ? id->data : NULL;
+}
+
+static struct pwm_device *
+pxa_pwm_of_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
+{
+ struct pwm_device *pwm;
+
+ pwm = pwm_request_from_chip(pc, 0, NULL);
+ if (IS_ERR(pwm))
+ return pwm;
+
+ pwm_set_period(pwm, args->args[0]);
+
+ return pwm;
+}
+
static int pwm_probe(struct platform_device *pdev)
{
const struct platform_device_id *id = platform_get_device_id(pdev);
@@ -131,6 +172,12 @@ static int pwm_probe(struct platform_device *pdev)
struct resource *r;
int ret = 0;
+ if (IS_ENABLED(CONFIG_OF) && id == NULL)
+ id = pxa_pwm_get_id_dt(&pdev->dev);
+
+ if (id == NULL)
+ return -EINVAL;
+
pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
if (pwm == NULL) {
dev_err(&pdev->dev, "failed to allocate memory\n");
@@ -146,6 +193,11 @@ static int pwm_probe(struct platform_device *pdev)
pwm->chip.base = -1;
pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1;
+ if (IS_ENABLED(CONFIG_OF)) {
+ pwm->chip.of_xlate = pxa_pwm_of_xlate;
+ pwm->chip.of_pwm_n_cells = 1;
+ }
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(pwm->mmio_base))
@@ -176,6 +228,7 @@ static struct platform_driver pwm_driver = {
.driver = {
.name = "pxa25x-pwm",
.owner = THIS_MODULE,
+ .of_match_table = pwm_of_match,
},
.probe = pwm_probe,
.remove = pwm_remove,
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 4e5c3d13d4f8..032092c7a6ae 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -279,7 +279,6 @@ static int ecap_pwm_remove(struct platform_device *pdev)
pwmss_submodule_state_change(pdev->dev.parent, PWMSS_ECAPCLK_STOP_REQ);
pm_runtime_put_sync(&pdev->dev);
- pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return pwmchip_remove(&pc->chip);
}
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index a4d8f519d965..aee4471424d1 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -360,8 +360,8 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
/* Enable TBCLK before enabling PWM device */
ret = clk_enable(pc->tbclk);
if (ret) {
- pr_err("Failed to enable TBCLK for %s\n",
- dev_name(pc->chip.dev));
+ dev_err(chip->dev, "Failed to enable TBCLK for %s\n",
+ dev_name(pc->chip.dev));
return ret;
}
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 8c20332d4825..4bd0c639e16d 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -169,15 +169,7 @@ static struct attribute *pwm_attrs[] = {
&dev_attr_polarity.attr,
NULL
};
-
-static const struct attribute_group pwm_attr_group = {
- .attrs = pwm_attrs,
-};
-
-static const struct attribute_group *pwm_attr_groups[] = {
- &pwm_attr_group,
- NULL,
-};
+ATTRIBUTE_GROUPS(pwm);
static void pwm_export_release(struct device *child)
{
@@ -205,7 +197,7 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
export->child.release = pwm_export_release;
export->child.parent = parent;
export->child.devt = MKDEV(0, 0);
- export->child.groups = pwm_attr_groups;
+ export->child.groups = pwm_groups;
dev_set_name(&export->child, "pwm%u", pwm->hwpwm);
ret = device_register(&export->child);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index ce785f481281..6a7932822e37 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -70,6 +70,14 @@ config REGULATOR_88PM8607
help
This driver supports 88PM8607 voltage regulator chips.
+config REGULATOR_ACT8865
+ tristate "Active-semi act8865 voltage regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver controls a active-semi act8865 voltage output
+ regulator via I2C bus.
+
config REGULATOR_AD5398
tristate "Analog Devices AD5398/AD5821 regulators"
depends on I2C
@@ -249,6 +257,13 @@ config REGULATOR_LP8788
help
This driver supports LP8788 voltage regulator chip.
+config REGULATOR_MAX14577
+ tristate "Maxim 14577 regulator"
+ depends on MFD_MAX14577
+ help
+ This driver controls a Maxim 14577 regulator via I2C bus.
+ The regulators include safeout LDO and current regulator 'CHARGER'.
+
config REGULATOR_MAX1586
tristate "Maxim 1586/1587 voltage regulator"
depends on I2C
@@ -343,7 +358,7 @@ config REGULATOR_MC13XXX_CORE
config REGULATOR_MC13783
tristate "Freescale MC13783 regulator driver"
- depends on MFD_MC13783
+ depends on MFD_MC13XXX
select REGULATOR_MC13XXX_CORE
help
Say y here to support the regulators found on the Freescale MC13783
@@ -384,7 +399,7 @@ config REGULATOR_PCF50633
on PCF50633
config REGULATOR_PFUZE100
- tristate "Support regulators on Freescale PFUZE100 PMIC"
+ tristate "Freescale PFUZE100 regulator driver"
depends on I2C
select REGMAP_I2C
help
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 01c597ea1744..979f9ddcf259 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
obj-$(CONFIG_REGULATOR_AB8500) += ab8500-ext.o ab8500.o
+obj-$(CONFIG_REGULATOR_ACT8865) += act8865-regulator.o
obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o
obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o
@@ -35,6 +36,7 @@ obj-$(CONFIG_REGULATOR_LP872X) += lp872x.o
obj-$(CONFIG_REGULATOR_LP8788) += lp8788-buck.o
obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o
obj-$(CONFIG_REGULATOR_LP8755) += lp8755.o
+obj-$(CONFIG_REGULATOR_MAX14577) += max14577.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 77b46d0b37a6..e10febe9ec34 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -498,7 +498,7 @@ static int ab3100_regulator_register(struct platform_device *pdev,
struct ab3100_platform_data *plfdata,
struct regulator_init_data *init_data,
struct device_node *np,
- int id)
+ unsigned long id)
{
struct regulator_desc *desc;
struct ab3100_regulator *reg;
@@ -646,7 +646,7 @@ ab3100_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
err = ab3100_regulator_register(
pdev, NULL, ab3100_regulator_matches[i].init_data,
ab3100_regulator_matches[i].of_node,
- (int) ab3100_regulator_matches[i].driver_data);
+ (unsigned long)ab3100_regulator_matches[i].driver_data);
if (err) {
ab3100_regulators_remove(pdev);
return err;
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 603f192e84f1..c625468c7f2c 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -2998,37 +2998,6 @@ static void abx500_get_regulator_info(struct ab8500 *ab8500)
}
}
-static int ab8500_regulator_init_registers(struct platform_device *pdev,
- int id, int mask, int value)
-{
- struct ab8500_reg_init *reg_init = abx500_regulator.init;
- int err;
-
- BUG_ON(value & ~mask);
- BUG_ON(mask & ~reg_init[id].mask);
-
- /* initialize register */
- err = abx500_mask_and_set_register_interruptible(
- &pdev->dev,
- reg_init[id].bank,
- reg_init[id].addr,
- mask, value);
- if (err < 0) {
- dev_err(&pdev->dev,
- "Failed to initialize 0x%02x, 0x%02x.\n",
- reg_init[id].bank,
- reg_init[id].addr);
- return err;
- }
- dev_vdbg(&pdev->dev,
- " init: 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
- reg_init[id].bank,
- reg_init[id].addr,
- mask, value);
-
- return 0;
-}
-
static int ab8500_regulator_register(struct platform_device *pdev,
struct regulator_init_data *init_data,
int id, struct device_node *np)
@@ -3036,7 +3005,6 @@ static int ab8500_regulator_register(struct platform_device *pdev,
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct ab8500_regulator_info *info = NULL;
struct regulator_config config = { };
- int err;
/* assign per-regulator data */
info = &abx500_regulator.info[id];
@@ -3058,17 +3026,12 @@ static int ab8500_regulator_register(struct platform_device *pdev,
}
/* register regulator with framework */
- info->regulator = regulator_register(&info->desc, &config);
+ info->regulator = devm_regulator_register(&pdev->dev, &info->desc,
+ &config);
if (IS_ERR(info->regulator)) {
- err = PTR_ERR(info->regulator);
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
- /* when we fail, un-register all earlier regulators */
- while (--id >= 0) {
- info = &abx500_regulator.info[id];
- regulator_unregister(info->regulator);
- }
- return err;
+ return PTR_ERR(info->regulator);
}
return 0;
@@ -3095,9 +3058,7 @@ static int ab8500_regulator_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct device_node *np = pdev->dev.of_node;
- struct ab8500_platform_data *ppdata;
- struct ab8500_regulator_platform_data *pdata;
- int i, err;
+ int err;
if (!ab8500) {
dev_err(&pdev->dev, "null mfd parent\n");
@@ -3106,83 +3067,20 @@ static int ab8500_regulator_probe(struct platform_device *pdev)
abx500_get_regulator_info(ab8500);
- if (np) {
- err = of_regulator_match(&pdev->dev, np,
- abx500_regulator.match,
- abx500_regulator.match_size);
- if (err < 0) {
- dev_err(&pdev->dev,
- "Error parsing regulator init data: %d\n", err);
- return err;
- }
-
- err = ab8500_regulator_of_probe(pdev, np);
- return err;
- }
-
- ppdata = dev_get_platdata(ab8500->dev);
- if (!ppdata) {
- dev_err(&pdev->dev, "null parent pdata\n");
- return -EINVAL;
- }
-
- pdata = ppdata->regulator;
- if (!pdata) {
- dev_err(&pdev->dev, "null pdata\n");
- return -EINVAL;
- }
-
- /* make sure the platform data has the correct size */
- if (pdata->num_regulator != abx500_regulator.info_size) {
- dev_err(&pdev->dev, "Configuration error: size mismatch.\n");
- return -EINVAL;
- }
-
- /* initialize debug (initial state is recorded with this call) */
- err = ab8500_regulator_debug_init(pdev);
- if (err)
+ err = of_regulator_match(&pdev->dev, np,
+ abx500_regulator.match,
+ abx500_regulator.match_size);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Error parsing regulator init data: %d\n", err);
return err;
-
- /* initialize registers */
- for (i = 0; i < pdata->num_reg_init; i++) {
- int id, mask, value;
-
- id = pdata->reg_init[i].id;
- mask = pdata->reg_init[i].mask;
- value = pdata->reg_init[i].value;
-
- /* check for configuration errors */
- BUG_ON(id >= abx500_regulator.init_size);
-
- err = ab8500_regulator_init_registers(pdev, id, mask, value);
- if (err < 0)
- return err;
}
-
- /* register all regulators */
- for (i = 0; i < abx500_regulator.info_size; i++) {
- err = ab8500_regulator_register(pdev, &pdata->regulator[i],
- i, NULL);
- if (err < 0)
- return err;
- }
-
- return 0;
+ return ab8500_regulator_of_probe(pdev, np);
}
static int ab8500_regulator_remove(struct platform_device *pdev)
{
- int i, err;
-
- for (i = 0; i < abx500_regulator.info_size; i++) {
- struct ab8500_regulator_info *info = NULL;
- info = &abx500_regulator.info[i];
-
- dev_vdbg(rdev_get_dev(info->regulator),
- "%s-remove\n", info->desc.name);
-
- regulator_unregister(info->regulator);
- }
+ int err;
/* remove regulator debug */
err = ab8500_regulator_debug_exit(pdev);
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
new file mode 100644
index 000000000000..084cc0819a52
--- /dev/null
+++ b/drivers/regulator/act8865-regulator.c
@@ -0,0 +1,349 @@
+/*
+ * act8865-regulator.c - Voltage regulation for the active-semi ACT8865
+ * http://www.active-semi.com/sheets/ACT8865_Datasheet.pdf
+ *
+ * Copyright (C) 2013 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/act8865.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regmap.h>
+
+/*
+ * ACT8865 Global Register Map.
+ */
+#define ACT8865_SYS_MODE 0x00
+#define ACT8865_SYS_CTRL 0x01
+#define ACT8865_DCDC1_VSET1 0x20
+#define ACT8865_DCDC1_VSET2 0x21
+#define ACT8865_DCDC1_CTRL 0x22
+#define ACT8865_DCDC2_VSET1 0x30
+#define ACT8865_DCDC2_VSET2 0x31
+#define ACT8865_DCDC2_CTRL 0x32
+#define ACT8865_DCDC3_VSET1 0x40
+#define ACT8865_DCDC3_VSET2 0x41
+#define ACT8865_DCDC3_CTRL 0x42
+#define ACT8865_LDO1_VSET 0x50
+#define ACT8865_LDO1_CTRL 0x51
+#define ACT8865_LDO2_VSET 0x54
+#define ACT8865_LDO2_CTRL 0x55
+#define ACT8865_LDO3_VSET 0x60
+#define ACT8865_LDO3_CTRL 0x61
+#define ACT8865_LDO4_VSET 0x64
+#define ACT8865_LDO4_CTRL 0x65
+
+/*
+ * Field Definitions.
+ */
+#define ACT8865_ENA 0x80 /* ON - [7] */
+#define ACT8865_VSEL_MASK 0x3F /* VSET - [5:0] */
+
+/*
+ * ACT8865 voltage number
+ */
+#define ACT8865_VOLTAGE_NUM 64
+
+struct act8865 {
+ struct regulator_dev *rdev[ACT8865_REG_NUM];
+ struct regmap *regmap;
+};
+
+static const struct regmap_config act8865_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static const struct regulator_linear_range act8865_volatge_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0, 23, 25000),
+ REGULATOR_LINEAR_RANGE(1200000, 24, 47, 50000),
+ REGULATOR_LINEAR_RANGE(2400000, 48, 63, 100000),
+};
+
+static struct regulator_ops act8865_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct regulator_desc act8865_reg[] = {
+ {
+ .name = "DCDC_REG1",
+ .id = ACT8865_ID_DCDC1,
+ .ops = &act8865_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ACT8865_VOLTAGE_NUM,
+ .linear_ranges = act8865_volatge_ranges,
+ .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
+ .vsel_reg = ACT8865_DCDC1_VSET1,
+ .vsel_mask = ACT8865_VSEL_MASK,
+ .enable_reg = ACT8865_DCDC1_CTRL,
+ .enable_mask = ACT8865_ENA,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "DCDC_REG2",
+ .id = ACT8865_ID_DCDC2,
+ .ops = &act8865_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ACT8865_VOLTAGE_NUM,
+ .linear_ranges = act8865_volatge_ranges,
+ .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
+ .vsel_reg = ACT8865_DCDC2_VSET1,
+ .vsel_mask = ACT8865_VSEL_MASK,
+ .enable_reg = ACT8865_DCDC2_CTRL,
+ .enable_mask = ACT8865_ENA,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "DCDC_REG3",
+ .id = ACT8865_ID_DCDC3,
+ .ops = &act8865_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ACT8865_VOLTAGE_NUM,
+ .linear_ranges = act8865_volatge_ranges,
+ .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
+ .vsel_reg = ACT8865_DCDC3_VSET1,
+ .vsel_mask = ACT8865_VSEL_MASK,
+ .enable_reg = ACT8865_DCDC3_CTRL,
+ .enable_mask = ACT8865_ENA,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO_REG1",
+ .id = ACT8865_ID_LDO1,
+ .ops = &act8865_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ACT8865_VOLTAGE_NUM,
+ .linear_ranges = act8865_volatge_ranges,
+ .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
+ .vsel_reg = ACT8865_LDO1_VSET,
+ .vsel_mask = ACT8865_VSEL_MASK,
+ .enable_reg = ACT8865_LDO1_CTRL,
+ .enable_mask = ACT8865_ENA,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO_REG2",
+ .id = ACT8865_ID_LDO2,
+ .ops = &act8865_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ACT8865_VOLTAGE_NUM,
+ .linear_ranges = act8865_volatge_ranges,
+ .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
+ .vsel_reg = ACT8865_LDO2_VSET,
+ .vsel_mask = ACT8865_VSEL_MASK,
+ .enable_reg = ACT8865_LDO2_CTRL,
+ .enable_mask = ACT8865_ENA,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO_REG3",
+ .id = ACT8865_ID_LDO3,
+ .ops = &act8865_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ACT8865_VOLTAGE_NUM,
+ .linear_ranges = act8865_volatge_ranges,
+ .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
+ .vsel_reg = ACT8865_LDO3_VSET,
+ .vsel_mask = ACT8865_VSEL_MASK,
+ .enable_reg = ACT8865_LDO3_CTRL,
+ .enable_mask = ACT8865_ENA,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO_REG4",
+ .id = ACT8865_ID_LDO4,
+ .ops = &act8865_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ACT8865_VOLTAGE_NUM,
+ .linear_ranges = act8865_volatge_ranges,
+ .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
+ .vsel_reg = ACT8865_LDO4_VSET,
+ .vsel_mask = ACT8865_VSEL_MASK,
+ .enable_reg = ACT8865_LDO4_CTRL,
+ .enable_mask = ACT8865_ENA,
+ .owner = THIS_MODULE,
+ },
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id act8865_dt_ids[] = {
+ { .compatible = "active-semi,act8865" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, act8865_dt_ids);
+
+static struct of_regulator_match act8865_matches[] = {
+ [ACT8865_ID_DCDC1] = { .name = "DCDC_REG1"},
+ [ACT8865_ID_DCDC2] = { .name = "DCDC_REG2"},
+ [ACT8865_ID_DCDC3] = { .name = "DCDC_REG3"},
+ [ACT8865_ID_LDO1] = { .name = "LDO_REG1"},
+ [ACT8865_ID_LDO2] = { .name = "LDO_REG2"},
+ [ACT8865_ID_LDO3] = { .name = "LDO_REG3"},
+ [ACT8865_ID_LDO4] = { .name = "LDO_REG4"},
+};
+
+static int act8865_pdata_from_dt(struct device *dev,
+ struct device_node **of_node,
+ struct act8865_platform_data *pdata)
+{
+ int matched, i;
+ struct device_node *np;
+ struct act8865_regulator_data *regulator;
+
+ np = of_find_node_by_name(dev->of_node, "regulators");
+ if (!np) {
+ dev_err(dev, "missing 'regulators' subnode in DT\n");
+ return -EINVAL;
+ }
+
+ matched = of_regulator_match(dev, np,
+ act8865_matches, ARRAY_SIZE(act8865_matches));
+ if (matched <= 0)
+ return matched;
+
+ pdata->regulators = devm_kzalloc(dev,
+ sizeof(struct act8865_regulator_data) *
+ ARRAY_SIZE(act8865_matches), GFP_KERNEL);
+ if (!pdata->regulators) {
+ dev_err(dev, "%s: failed to allocate act8865 registor\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ pdata->num_regulators = matched;
+ regulator = pdata->regulators;
+
+ for (i = 0; i < ARRAY_SIZE(act8865_matches); i++) {
+ regulator->id = i;
+ regulator->name = act8865_matches[i].name;
+ regulator->platform_data = act8865_matches[i].init_data;
+ of_node[i] = act8865_matches[i].of_node;
+ regulator++;
+ }
+
+ return 0;
+}
+#else
+static inline int act8865_pdata_from_dt(struct device *dev,
+ struct device_node **of_node,
+ struct act8865_platform_data *pdata)
+{
+ return 0;
+}
+#endif
+
+static int act8865_pmic_probe(struct i2c_client *client,
+ const struct i2c_device_id *i2c_id)
+{
+ struct regulator_dev **rdev;
+ struct device *dev = &client->dev;
+ struct act8865_platform_data *pdata = dev_get_platdata(dev);
+ struct regulator_config config = { };
+ struct act8865 *act8865;
+ struct device_node *of_node[ACT8865_REG_NUM];
+ int i, id;
+ int ret = -EINVAL;
+ int error;
+
+ if (dev->of_node && !pdata) {
+ const struct of_device_id *id;
+ struct act8865_platform_data pdata_of;
+
+ id = of_match_device(of_match_ptr(act8865_dt_ids), dev);
+ if (!id)
+ return -ENODEV;
+
+ ret = act8865_pdata_from_dt(dev, of_node, &pdata_of);
+ if (ret < 0)
+ return ret;
+
+ pdata = &pdata_of;
+ }
+
+ if (pdata->num_regulators > ACT8865_REG_NUM) {
+ dev_err(dev, "Too many regulators found!\n");
+ return -EINVAL;
+ }
+
+ act8865 = devm_kzalloc(dev, sizeof(struct act8865), GFP_KERNEL);
+ if (!act8865)
+ return -ENOMEM;
+
+ rdev = act8865->rdev;
+
+ act8865->regmap = devm_regmap_init_i2c(client, &act8865_regmap_config);
+ if (IS_ERR(act8865->regmap)) {
+ error = PTR_ERR(act8865->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ error);
+ return error;
+ }
+
+ /* Finally register devices */
+ for (i = 0; i < ACT8865_REG_NUM; i++) {
+
+ id = pdata->regulators[i].id;
+
+ config.dev = dev;
+ config.init_data = pdata->regulators[i].platform_data;
+ config.of_node = of_node[i];
+ config.driver_data = act8865;
+ config.regmap = act8865->regmap;
+
+ rdev[i] = devm_regulator_register(&client->dev,
+ &act8865_reg[i], &config);
+ if (IS_ERR(rdev[i])) {
+ dev_err(dev, "failed to register %s\n",
+ act8865_reg[id].name);
+ return PTR_ERR(rdev[i]);
+ }
+ }
+
+ i2c_set_clientdata(client, act8865);
+
+ return 0;
+}
+
+static const struct i2c_device_id act8865_ids[] = {
+ { "act8865", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, act8865_ids);
+
+static struct i2c_driver act8865_pmic_driver = {
+ .driver = {
+ .name = "act8865",
+ .owner = THIS_MODULE,
+ },
+ .probe = act8865_pmic_probe,
+ .id_table = act8865_ids,
+};
+
+module_i2c_driver(act8865_pmic_driver);
+
+MODULE_DESCRIPTION("active-semi act8865 voltage regulator driver");
+MODULE_AUTHOR("Wenyou Yang <wenyou.yang@atmel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index c734d0980826..862e63e451d0 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -122,10 +122,8 @@ static int anatop_regulator_probe(struct platform_device *pdev)
if (!sreg)
return -ENOMEM;
sreg->initdata = initdata;
- sreg->name = kstrdup(of_get_property(np, "regulator-name", NULL),
- GFP_KERNEL);
+ sreg->name = of_get_property(np, "regulator-name", NULL);
rdesc = &sreg->rdesc;
- memset(rdesc, 0, sizeof(*rdesc));
rdesc->name = sreg->name;
rdesc->ops = &anatop_rops;
rdesc->type = REGULATOR_VOLTAGE;
@@ -143,37 +141,37 @@ static int anatop_regulator_probe(struct platform_device *pdev)
&sreg->control_reg);
if (ret) {
dev_err(dev, "no anatop-reg-offset property set\n");
- goto anatop_probe_end;
+ return ret;
}
ret = of_property_read_u32(np, "anatop-vol-bit-width",
&sreg->vol_bit_width);
if (ret) {
dev_err(dev, "no anatop-vol-bit-width property set\n");
- goto anatop_probe_end;
+ return ret;
}
ret = of_property_read_u32(np, "anatop-vol-bit-shift",
&sreg->vol_bit_shift);
if (ret) {
dev_err(dev, "no anatop-vol-bit-shift property set\n");
- goto anatop_probe_end;
+ return ret;
}
ret = of_property_read_u32(np, "anatop-min-bit-val",
&sreg->min_bit_val);
if (ret) {
dev_err(dev, "no anatop-min-bit-val property set\n");
- goto anatop_probe_end;
+ return ret;
}
ret = of_property_read_u32(np, "anatop-min-voltage",
&sreg->min_voltage);
if (ret) {
dev_err(dev, "no anatop-min-voltage property set\n");
- goto anatop_probe_end;
+ return ret;
}
ret = of_property_read_u32(np, "anatop-max-voltage",
&sreg->max_voltage);
if (ret) {
dev_err(dev, "no anatop-max-voltage property set\n");
- goto anatop_probe_end;
+ return ret;
}
/* read LDO ramp up setting, only for core reg */
@@ -204,27 +202,11 @@ static int anatop_regulator_probe(struct platform_device *pdev)
if (IS_ERR(rdev)) {
dev_err(dev, "failed to register %s\n",
rdesc->name);
- ret = PTR_ERR(rdev);
- goto anatop_probe_end;
+ return PTR_ERR(rdev);
}
platform_set_drvdata(pdev, rdev);
-anatop_probe_end:
- if (ret)
- kfree(sreg->name);
-
- return ret;
-}
-
-static int anatop_regulator_remove(struct platform_device *pdev)
-{
- struct regulator_dev *rdev = platform_get_drvdata(pdev);
- struct anatop_regulator *sreg = rdev_get_drvdata(rdev);
- const char *name = sreg->name;
-
- kfree(name);
-
return 0;
}
@@ -240,7 +222,6 @@ static struct platform_driver anatop_regulator_driver = {
.of_match_table = of_anatop_regulator_match_tbl,
},
.probe = anatop_regulator_probe,
- .remove = anatop_regulator_remove,
};
static int __init anatop_regulator_init(void)
@@ -259,3 +240,4 @@ MODULE_AUTHOR("Nancy Chen <Nancy.Chen@freescale.com>");
MODULE_AUTHOR("Ying-Chun Liu (PaulLiu) <paul.liu@linaro.org>");
MODULE_DESCRIPTION("ANATOP Regulator driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:anatop_regulator");
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index fd3154d86901..034ece707083 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -28,8 +28,6 @@
#include <linux/mfd/arizona/pdata.h>
#include <linux/mfd/arizona/registers.h>
-#define ARIZONA_MICSUPP_MAX_SELECTOR 0x1f
-
struct arizona_micsupp {
struct regulator_dev *regulator;
struct arizona *arizona;
@@ -40,42 +38,6 @@ struct arizona_micsupp {
struct work_struct check_cp_work;
};
-static int arizona_micsupp_list_voltage(struct regulator_dev *rdev,
- unsigned int selector)
-{
- if (selector > ARIZONA_MICSUPP_MAX_SELECTOR)
- return -EINVAL;
-
- if (selector == ARIZONA_MICSUPP_MAX_SELECTOR)
- return 3300000;
- else
- return (selector * 50000) + 1700000;
-}
-
-static int arizona_micsupp_map_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- unsigned int voltage;
- int selector;
-
- if (min_uV < 1700000)
- min_uV = 1700000;
-
- if (min_uV > 3200000)
- selector = ARIZONA_MICSUPP_MAX_SELECTOR;
- else
- selector = DIV_ROUND_UP(min_uV - 1700000, 50000);
-
- if (selector < 0)
- return -EINVAL;
-
- voltage = arizona_micsupp_list_voltage(rdev, selector);
- if (voltage < min_uV || voltage > max_uV)
- return -EINVAL;
-
- return selector;
-}
-
static void arizona_micsupp_check_cp(struct work_struct *work)
{
struct arizona_micsupp *micsupp =
@@ -145,8 +107,8 @@ static struct regulator_ops arizona_micsupp_ops = {
.disable = arizona_micsupp_disable,
.is_enabled = regulator_is_enabled_regmap,
- .list_voltage = arizona_micsupp_list_voltage,
- .map_voltage = arizona_micsupp_map_voltage,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -155,11 +117,16 @@ static struct regulator_ops arizona_micsupp_ops = {
.set_bypass = arizona_micsupp_set_bypass,
};
+static const struct regulator_linear_range arizona_micsupp_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1700000, 0, 0x1e, 50000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x1f, 0x1f, 0),
+};
+
static const struct regulator_desc arizona_micsupp = {
.name = "MICVDD",
.supply_name = "CPVDD",
.type = REGULATOR_VOLTAGE,
- .n_voltages = ARIZONA_MICSUPP_MAX_SELECTOR + 1,
+ .n_voltages = 32,
.ops = &arizona_micsupp_ops,
.vsel_reg = ARIZONA_LDO2_CONTROL_1,
@@ -169,6 +136,9 @@ static const struct regulator_desc arizona_micsupp = {
.bypass_reg = ARIZONA_MIC_CHARGE_PUMP_1,
.bypass_mask = ARIZONA_CPMIC_BYPASS,
+ .linear_ranges = arizona_micsupp_ranges,
+ .n_linear_ranges = ARRAY_SIZE(arizona_micsupp_ranges),
+
.enable_time = 3000,
.owner = THIS_MODULE,
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index b9f1d24c6812..8b17d786cb71 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -99,7 +99,6 @@ static const struct as3722_register_mapping as3722_reg_lookup[] = {
.sleep_ctrl_mask = AS3722_SD0_EXT_ENABLE_MASK,
.control_reg = AS3722_SD0_CONTROL_REG,
.mode_mask = AS3722_SD0_MODE_FAST,
- .n_voltages = AS3722_SD0_VSEL_MAX + 1,
},
{
.regulator_id = AS3722_REGULATOR_ID_SD1,
@@ -112,7 +111,6 @@ static const struct as3722_register_mapping as3722_reg_lookup[] = {
.sleep_ctrl_mask = AS3722_SD1_EXT_ENABLE_MASK,
.control_reg = AS3722_SD1_CONTROL_REG,
.mode_mask = AS3722_SD1_MODE_FAST,
- .n_voltages = AS3722_SD0_VSEL_MAX + 1,
},
{
.regulator_id = AS3722_REGULATOR_ID_SD2,
@@ -181,7 +179,6 @@ static const struct as3722_register_mapping as3722_reg_lookup[] = {
.sleep_ctrl_mask = AS3722_SD6_EXT_ENABLE_MASK,
.control_reg = AS3722_SD6_CONTROL_REG,
.mode_mask = AS3722_SD6_MODE_FAST,
- .n_voltages = AS3722_SD0_VSEL_MAX + 1,
},
{
.regulator_id = AS3722_REGULATOR_ID_LDO0,
@@ -595,6 +592,22 @@ static int as3722_sd016_set_current_limit(struct regulator_dev *rdev,
return as3722_update_bits(as3722, reg, mask, val);
}
+static bool as3722_sd0_is_low_voltage(struct as3722_regulators *as3722_regs)
+{
+ int err;
+ unsigned val;
+
+ err = as3722_read(as3722_regs->as3722, AS3722_FUSE7_REG, &val);
+ if (err < 0) {
+ dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n",
+ AS3722_FUSE7_REG, err);
+ return false;
+ }
+ if (val & AS3722_FUSE7_SD0_LOW_VOLTAGE)
+ return true;
+ return false;
+}
+
static const struct regulator_linear_range as3722_sd2345_ranges[] = {
REGULATOR_LINEAR_RANGE(612500, 0x01, 0x40, 12500),
REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000),
@@ -820,9 +833,19 @@ static int as3722_regulator_probe(struct platform_device *pdev)
ops = &as3722_sd016_extcntrl_ops;
else
ops = &as3722_sd016_ops;
- as3722_regs->desc[id].min_uV = 610000;
+ if (id == AS3722_REGULATOR_ID_SD0 &&
+ as3722_sd0_is_low_voltage(as3722_regs)) {
+ as3722_regs->desc[id].n_voltages =
+ AS3722_SD0_VSEL_LOW_VOL_MAX + 1;
+ as3722_regs->desc[id].min_uV = 410000;
+ } else {
+ as3722_regs->desc[id].n_voltages =
+ AS3722_SD0_VSEL_MAX + 1,
+ as3722_regs->desc[id].min_uV = 610000;
+ }
as3722_regs->desc[id].uV_step = 10000;
as3722_regs->desc[id].linear_min_sel = 1;
+ as3722_regs->desc[id].enable_time = 600;
break;
case AS3722_REGULATOR_ID_SD2:
case AS3722_REGULATOR_ID_SD3:
@@ -842,9 +865,6 @@ static int as3722_regulator_probe(struct platform_device *pdev)
ops = &as3722_ldo_extcntrl_ops;
else
ops = &as3722_ldo_ops;
- as3722_regs->desc[id].min_uV = 825000;
- as3722_regs->desc[id].uV_step = 25000;
- as3722_regs->desc[id].linear_min_sel = 1;
as3722_regs->desc[id].enable_time = 500;
as3722_regs->desc[id].linear_ranges = as3722_ldo_ranges;
as3722_regs->desc[id].n_linear_ranges =
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d59aa96a4dc4..8bd5cbf3aafb 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1272,6 +1272,8 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
if (r->dev.parent &&
node == r->dev.of_node)
return r;
+ *ret = -EPROBE_DEFER;
+ return NULL;
} else {
/*
* If we couldn't even get the node then it's
@@ -1312,7 +1314,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
struct regulator_dev *rdev;
struct regulator *regulator = ERR_PTR(-EPROBE_DEFER);
const char *devname = NULL;
- int ret = -EPROBE_DEFER;
+ int ret;
if (id == NULL) {
pr_err("get() with no identifier\n");
@@ -1322,6 +1324,11 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
if (dev)
devname = dev_name(dev);
+ if (have_full_constraints())
+ ret = -ENODEV;
+ else
+ ret = -EPROBE_DEFER;
+
mutex_lock(&regulator_list_mutex);
rdev = regulator_dev_lookup(dev, id, &ret);
@@ -1334,9 +1341,8 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
* If we have return value from dev_lookup fail, we do not expect to
* succeed, so, quit with appropriate error value
*/
- if (ret && ret != -ENODEV) {
+ if (ret && ret != -ENODEV)
goto out;
- }
if (!devname)
devname = "deviceless";
@@ -1351,7 +1357,8 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
rdev = dummy_regulator_rdev;
goto found;
- } else {
+ /* Don't log an error when called from regulator_get_optional() */
+ } else if (!have_full_constraints() || exclusive) {
dev_err(dev, "dummy supplies not allowed\n");
}
@@ -2244,7 +2251,7 @@ int regulator_is_supported_voltage(struct regulator *regulator,
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
ret = regulator_get_voltage(regulator);
if (ret >= 0)
- return (min_uV <= ret && ret <= max_uV);
+ return min_uV <= ret && ret <= max_uV;
else
return ret;
}
@@ -2416,7 +2423,7 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
if (ret < 0)
goto out;
-
+
/* restore original values in case of error */
old_min_uV = regulator->min_uV;
old_max_uV = regulator->max_uV;
@@ -2430,7 +2437,7 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
if (ret < 0)
goto out2;
-
+
out:
mutex_unlock(&rdev->mutex);
return ret;
@@ -3835,9 +3842,8 @@ static int __init regulator_init_complete(void)
* goes wrong. */
rdev_info(rdev, "disabling\n");
ret = ops->disable(rdev);
- if (ret != 0) {
+ if (ret != 0)
rdev_err(rdev, "couldn't disable: %d\n", ret);
- }
} else {
/* The intention is that in future we will
* assume that full constraints are provided
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 7f340206d329..b14ebdad5dd2 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -576,7 +576,9 @@ static int da9055_regulator_probe(struct platform_device *pdev)
/* Only LDO 5 and 6 has got the over current interrupt */
if (pdev->id == DA9055_ID_LDO5 || pdev->id == DA9055_ID_LDO6) {
irq = platform_get_irq_byname(pdev, "REGULATOR");
- irq = regmap_irq_get_virq(da9055->irq_data, irq);
+ if (irq < 0)
+ return irq;
+
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
da9055_ldo5_6_oc_irq,
IRQF_TRIGGER_HIGH |
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index a53c11a529d5..846acf240e48 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -431,17 +431,11 @@ static int db8500_regulator_register(struct platform_device *pdev,
config.of_node = np;
/* register with the regulator framework */
- info->rdev = regulator_register(&info->desc, &config);
+ info->rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
if (IS_ERR(info->rdev)) {
err = PTR_ERR(info->rdev);
dev_err(&pdev->dev, "failed to register %s: err %i\n",
info->desc.name, err);
-
- /* if failing, unregister all earlier regulators */
- while (--id >= 0) {
- info = &dbx500_regulator_info[id];
- regulator_unregister(info->rdev);
- }
return err;
}
@@ -530,20 +524,8 @@ static int db8500_regulator_probe(struct platform_device *pdev)
static int db8500_regulator_remove(struct platform_device *pdev)
{
- int i;
-
ux500_regulator_debug_exit();
- for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
- struct dbx500_regulator_info *info;
- info = &dbx500_regulator_info[i];
-
- dev_vdbg(rdev_get_dev(info->rdev),
- "regulator-%s-remove\n", info->desc.name);
-
- regulator_unregister(info->rdev);
- }
-
return 0;
}
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 234960dc9607..c0a1d00b78c9 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -203,17 +203,18 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
}
config->nr_states = i;
+ config->type = REGULATOR_VOLTAGE;
ret = of_property_read_string(np, "regulator-type", &regtype);
- if (ret < 0) {
- dev_err(dev, "Missing 'regulator-type' property\n");
- return ERR_PTR(-EINVAL);
+ if (ret >= 0) {
+ if (!strncmp("voltage", regtype, 7))
+ config->type = REGULATOR_VOLTAGE;
+ else if (!strncmp("current", regtype, 7))
+ config->type = REGULATOR_CURRENT;
+ else
+ dev_warn(dev, "Unknown regulator-type '%s'\n",
+ regtype);
}
- if (!strncmp("voltage", regtype, 7))
- config->type = REGULATOR_VOLTAGE;
- else if (!strncmp("current", regtype, 7))
- config->type = REGULATOR_CURRENT;
-
return config;
}
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 947c05ffe0ab..3b1102b75071 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -25,8 +25,6 @@ struct lp3971 {
struct device *dev;
struct mutex io_lock;
struct i2c_client *i2c;
- int num_regulators;
- struct regulator_dev **rdev;
};
static u8 lp3971_reg_read(struct lp3971 *lp3971, u8 reg);
@@ -383,42 +381,27 @@ static int setup_regulators(struct lp3971 *lp3971,
{
int i, err;
- lp3971->num_regulators = pdata->num_regulators;
- lp3971->rdev = kcalloc(pdata->num_regulators,
- sizeof(struct regulator_dev *), GFP_KERNEL);
- if (!lp3971->rdev) {
- err = -ENOMEM;
- goto err_nomem;
- }
-
/* Instantiate the regulators */
for (i = 0; i < pdata->num_regulators; i++) {
struct regulator_config config = { };
struct lp3971_regulator_subdev *reg = &pdata->regulators[i];
+ struct regulator_dev *rdev;
config.dev = lp3971->dev;
config.init_data = reg->initdata;
config.driver_data = lp3971;
- lp3971->rdev[i] = regulator_register(&regulators[reg->id],
- &config);
- if (IS_ERR(lp3971->rdev[i])) {
- err = PTR_ERR(lp3971->rdev[i]);
+ rdev = devm_regulator_register(lp3971->dev,
+ &regulators[reg->id], &config);
+ if (IS_ERR(rdev)) {
+ err = PTR_ERR(rdev);
dev_err(lp3971->dev, "regulator init failed: %d\n",
err);
- goto error;
+ return err;
}
}
return 0;
-
-error:
- while (--i >= 0)
- regulator_unregister(lp3971->rdev[i]);
- kfree(lp3971->rdev);
- lp3971->rdev = NULL;
-err_nomem:
- return err;
}
static int lp3971_i2c_probe(struct i2c_client *i2c,
@@ -460,19 +443,6 @@ static int lp3971_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int lp3971_i2c_remove(struct i2c_client *i2c)
-{
- struct lp3971 *lp3971 = i2c_get_clientdata(i2c);
- int i;
-
- for (i = 0; i < lp3971->num_regulators; i++)
- regulator_unregister(lp3971->rdev[i]);
-
- kfree(lp3971->rdev);
-
- return 0;
-}
-
static const struct i2c_device_id lp3971_i2c_id[] = {
{ "lp3971", 0 },
{ }
@@ -485,7 +455,6 @@ static struct i2c_driver lp3971_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = lp3971_i2c_probe,
- .remove = lp3971_i2c_remove,
.id_table = lp3971_i2c_id,
};
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index 093e6f44ff8a..aea485afcc1a 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -22,8 +22,6 @@ struct lp3972 {
struct device *dev;
struct mutex io_lock;
struct i2c_client *i2c;
- int num_regulators;
- struct regulator_dev **rdev;
};
/* LP3972 Control Registers */
@@ -478,41 +476,27 @@ static int setup_regulators(struct lp3972 *lp3972,
{
int i, err;
- lp3972->num_regulators = pdata->num_regulators;
- lp3972->rdev = kcalloc(pdata->num_regulators,
- sizeof(struct regulator_dev *), GFP_KERNEL);
- if (!lp3972->rdev) {
- err = -ENOMEM;
- goto err_nomem;
- }
-
/* Instantiate the regulators */
for (i = 0; i < pdata->num_regulators; i++) {
struct lp3972_regulator_subdev *reg = &pdata->regulators[i];
struct regulator_config config = { };
+ struct regulator_dev *rdev;
config.dev = lp3972->dev;
config.init_data = reg->initdata;
config.driver_data = lp3972;
- lp3972->rdev[i] = regulator_register(&regulators[reg->id],
- &config);
- if (IS_ERR(lp3972->rdev[i])) {
- err = PTR_ERR(lp3972->rdev[i]);
+ rdev = devm_regulator_register(lp3972->dev,
+ &regulators[reg->id], &config);
+ if (IS_ERR(rdev)) {
+ err = PTR_ERR(rdev);
dev_err(lp3972->dev, "regulator init failed: %d\n",
err);
- goto error;
+ return err;
}
}
return 0;
-error:
- while (--i >= 0)
- regulator_unregister(lp3972->rdev[i]);
- kfree(lp3972->rdev);
- lp3972->rdev = NULL;
-err_nomem:
- return err;
}
static int lp3972_i2c_probe(struct i2c_client *i2c,
@@ -557,18 +541,6 @@ static int lp3972_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int lp3972_i2c_remove(struct i2c_client *i2c)
-{
- struct lp3972 *lp3972 = i2c_get_clientdata(i2c);
- int i;
-
- for (i = 0; i < lp3972->num_regulators; i++)
- regulator_unregister(lp3972->rdev[i]);
- kfree(lp3972->rdev);
-
- return 0;
-}
-
static const struct i2c_device_id lp3972_i2c_id[] = {
{ "lp3972", 0 },
{ }
@@ -581,7 +553,6 @@ static struct i2c_driver lp3972_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = lp3972_i2c_probe,
- .remove = lp3972_i2c_remove,
.id_table = lp3972_i2c_id,
};
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c
new file mode 100644
index 000000000000..186df8785a91
--- /dev/null
+++ b/drivers/regulator/max14577.c
@@ -0,0 +1,274 @@
+/*
+ * max14577.c - Regulator driver for the Maxim 14577
+ *
+ * Copyright (C) 2013 Samsung Electronics
+ * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/mfd/max14577.h>
+#include <linux/mfd/max14577-private.h>
+#include <linux/regulator/of_regulator.h>
+
+struct max14577_regulator {
+ struct device *dev;
+ struct max14577 *max14577;
+ struct regulator_dev **regulators;
+};
+
+static int max14577_reg_is_enabled(struct regulator_dev *rdev)
+{
+ int rid = rdev_get_id(rdev);
+ struct regmap *rmap = rdev->regmap;
+ u8 reg_data;
+
+ switch (rid) {
+ case MAX14577_CHARGER:
+ max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL2, &reg_data);
+ if ((reg_data & CHGCTRL2_MBCHOSTEN_MASK) == 0)
+ return 0;
+ max14577_read_reg(rmap, MAX14577_CHG_REG_STATUS3, &reg_data);
+ if ((reg_data & STATUS3_CGMBC_MASK) == 0)
+ return 0;
+ /* MBCHOSTEN and CGMBC are on */
+ return 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int max14577_reg_get_current_limit(struct regulator_dev *rdev)
+{
+ u8 reg_data;
+ struct regmap *rmap = rdev->regmap;
+
+ if (rdev_get_id(rdev) != MAX14577_CHARGER)
+ return -EINVAL;
+
+ max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL4, &reg_data);
+
+ if ((reg_data & CHGCTRL4_MBCICHWRCL_MASK) == 0)
+ return MAX14577_REGULATOR_CURRENT_LIMIT_MIN;
+
+ reg_data = ((reg_data & CHGCTRL4_MBCICHWRCH_MASK) >>
+ CHGCTRL4_MBCICHWRCH_SHIFT);
+ return MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START +
+ reg_data * MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP;
+}
+
+static int max14577_reg_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ int i, current_bits = 0xf;
+ u8 reg_data;
+
+ if (rdev_get_id(rdev) != MAX14577_CHARGER)
+ return -EINVAL;
+
+ if (min_uA > MAX14577_REGULATOR_CURRENT_LIMIT_MAX ||
+ max_uA < MAX14577_REGULATOR_CURRENT_LIMIT_MIN)
+ return -EINVAL;
+
+ if (max_uA < MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START) {
+ /* Less than 200 mA, so set 90mA (turn only Low Bit off) */
+ u8 reg_data = 0x0 << CHGCTRL4_MBCICHWRCL_SHIFT;
+ return max14577_update_reg(rdev->regmap,
+ MAX14577_CHG_REG_CHG_CTRL4,
+ CHGCTRL4_MBCICHWRCL_MASK, reg_data);
+ }
+
+ /* max_uA is in range: <LIMIT_HIGH_START, inifinite>, so search for
+ * valid current starting from LIMIT_MAX. */
+ for (i = MAX14577_REGULATOR_CURRENT_LIMIT_MAX;
+ i >= MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START;
+ i -= MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP) {
+ if (i <= max_uA)
+ break;
+ current_bits--;
+ }
+ BUG_ON(current_bits < 0); /* Cannot happen */
+ /* Turn Low Bit on (use range 200mA-950 mA) */
+ reg_data = 0x1 << CHGCTRL4_MBCICHWRCL_SHIFT;
+ /* and set proper High Bits */
+ reg_data |= current_bits << CHGCTRL4_MBCICHWRCH_SHIFT;
+
+ return max14577_update_reg(rdev->regmap, MAX14577_CHG_REG_CHG_CTRL4,
+ CHGCTRL4_MBCICHWRCL_MASK | CHGCTRL4_MBCICHWRCH_MASK,
+ reg_data);
+}
+
+static struct regulator_ops max14577_safeout_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+};
+
+static struct regulator_ops max14577_charger_ops = {
+ .is_enabled = max14577_reg_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_current_limit = max14577_reg_get_current_limit,
+ .set_current_limit = max14577_reg_set_current_limit,
+};
+
+static const struct regulator_desc supported_regulators[] = {
+ [MAX14577_SAFEOUT] = {
+ .name = "SAFEOUT",
+ .id = MAX14577_SAFEOUT,
+ .ops = &max14577_safeout_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .min_uV = MAX14577_REGULATOR_SAFEOUT_VOLTAGE,
+ .enable_reg = MAX14577_REG_CONTROL2,
+ .enable_mask = CTRL2_SFOUTORD_MASK,
+ },
+ [MAX14577_CHARGER] = {
+ .name = "CHARGER",
+ .id = MAX14577_CHARGER,
+ .ops = &max14577_charger_ops,
+ .type = REGULATOR_CURRENT,
+ .owner = THIS_MODULE,
+ .enable_reg = MAX14577_CHG_REG_CHG_CTRL2,
+ .enable_mask = CHGCTRL2_MBCHOSTEN_MASK,
+ },
+};
+
+#ifdef CONFIG_OF
+static struct of_regulator_match max14577_regulator_matches[] = {
+ { .name = "SAFEOUT", },
+ { .name = "CHARGER", },
+};
+
+static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *np;
+
+ np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
+ if (!np) {
+ dev_err(&pdev->dev, "Failed to get child OF node for regulators\n");
+ return -EINVAL;
+ }
+
+ ret = of_regulator_match(&pdev->dev, np, max14577_regulator_matches,
+ MAX14577_REG_MAX);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
+ }
+
+ of_node_put(np);
+
+ return ret;
+}
+
+static inline struct regulator_init_data *match_init_data(int index)
+{
+ return max14577_regulator_matches[index].init_data;
+}
+
+static inline struct device_node *match_of_node(int index)
+{
+ return max14577_regulator_matches[index].of_node;
+}
+#else /* CONFIG_OF */
+static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline struct regulator_init_data *match_init_data(int index)
+{
+ return NULL;
+}
+
+static inline struct device_node *match_of_node(int index)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+
+static int max14577_regulator_probe(struct platform_device *pdev)
+{
+ struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent);
+ struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev);
+ int i, ret;
+ struct regulator_config config = {};
+
+ ret = max14577_regulator_dt_parse_pdata(pdev);
+ if (ret)
+ return ret;
+
+ config.dev = &pdev->dev;
+ config.regmap = max14577->regmap;
+
+ for (i = 0; i < ARRAY_SIZE(supported_regulators); i++) {
+ struct regulator_dev *regulator;
+ /*
+ * Index of supported_regulators[] is also the id and must
+ * match index of pdata->regulators[].
+ */
+ if (pdata && pdata->regulators) {
+ config.init_data = pdata->regulators[i].initdata;
+ config.of_node = pdata->regulators[i].of_node;
+ } else {
+ config.init_data = match_init_data(i);
+ config.of_node = match_of_node(i);
+ }
+
+ regulator = devm_regulator_register(&pdev->dev,
+ &supported_regulators[i], &config);
+ if (IS_ERR(regulator)) {
+ ret = PTR_ERR(regulator);
+ dev_err(&pdev->dev,
+ "Regulator init failed for ID %d with error: %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static struct platform_driver max14577_regulator_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "max14577-regulator",
+ },
+ .probe = max14577_regulator_probe,
+};
+
+static int __init max14577_regulator_init(void)
+{
+ BUILD_BUG_ON(MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START +
+ MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP * 0xf !=
+ MAX14577_REGULATOR_CURRENT_LIMIT_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(supported_regulators) != MAX14577_REG_MAX);
+
+ return platform_driver_register(&max14577_regulator_driver);
+}
+subsys_initcall(max14577_regulator_init);
+
+static void __exit max14577_regulator_exit(void)
+{
+ platform_driver_unregister(&max14577_regulator_driver);
+}
+module_exit(max14577_regulator_exit);
+
+MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>");
+MODULE_DESCRIPTION("MAXIM 14577 regulator driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:max14577-regulator");
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c
index feb20bf4ccab..5fb899f461d0 100644
--- a/drivers/regulator/max77693.c
+++ b/drivers/regulator/max77693.c
@@ -138,6 +138,7 @@ static struct regulator_ops max77693_charger_ops = {
.n_voltages = 4, \
.ops = &max77693_safeout_ops, \
.type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
.volt_table = max77693_safeout_table, \
.vsel_reg = MAX77693_CHG_REG_SAFEOUT_CTRL, \
.vsel_mask = SAFEOUT_CTRL_SAFEOUT##_num##_MASK, \
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 96c9f80d9550..f374fa57220f 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -274,25 +274,25 @@ static struct mc13xxx_regulator mc13892_regulators[] = {
MC13892_SW_DEFINE(SW4, SWITCHERS3, SWITCHERS3, mc13892_sw),
MC13892_FIXED_DEFINE(SWBST, SWITCHERS5, mc13892_swbst),
MC13892_FIXED_DEFINE(VIOHI, REGULATORMODE0, mc13892_viohi),
- MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0, \
+ MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0,
mc13892_vpll),
- MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, \
+ MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0,
mc13892_vdig),
- MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1, \
+ MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1,
mc13892_vsd),
- MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0, \
+ MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0,
mc13892_vusb2),
- MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1, \
+ MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1,
mc13892_vvideo),
- MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1, \
+ MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1,
mc13892_vaudio),
- MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, \
+ MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0,
mc13892_vcam),
- MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0, \
+ MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0,
mc13892_vgen1),
- MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0, \
+ MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0,
mc13892_vgen2),
- MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0, \
+ MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0,
mc13892_vgen3),
MC13892_FIXED_DEFINE(VUSB, USB1, mc13892_vusb),
MC13892_GPO_DEFINE(GPO1, POWERMISC, mc13892_gpo),
@@ -476,8 +476,8 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev,
}
mc13xxx_lock(priv->mc13xxx);
- ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg, mask,
- reg_value);
+ ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg,
+ mask, reg_value);
mc13xxx_unlock(priv->mc13xxx);
return ret;
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index d7da1c15a6da..134f90ec9ca1 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -105,7 +105,7 @@ static int pcf50633_regulator_probe(struct platform_device *pdev)
static struct platform_driver pcf50633_regulator_driver = {
.driver = {
- .name = "pcf50633-regltr",
+ .name = "pcf50633-regulator",
},
.probe = pcf50633_regulator_probe,
};
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 8b5e4c712a01..ab174f20ca11 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -309,21 +309,24 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip)
return ret;
switch (value & 0x0f) {
- /* Freescale misprogrammed 1-3% of parts prior to week 8 of 2013 as ID=8 */
- case 0x8:
- dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
- case 0x0:
- break;
- default:
- dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
- return -ENODEV;
+ /*
+ * Freescale misprogrammed 1-3% of parts prior to week 8 of 2013
+ * as ID=8
+ */
+ case 0x8:
+ dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
+ case 0x0:
+ break;
+ default:
+ dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
+ return -ENODEV;
}
ret = regmap_read(pfuze_chip->regmap, PFUZE100_REVID, &value);
if (ret)
return ret;
dev_info(pfuze_chip->dev,
- "Full lay: %x, Metal lay: %x\n",
+ "Full layer: %x, Metal layer: %x\n",
(value & 0xf0) >> 4, value & 0x0f);
ret = regmap_read(pfuze_chip->regmap, PFUZE100_FABID, &value);
@@ -408,31 +411,18 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
config.driver_data = pfuze_chip;
config.of_node = match_of_node(i);
- pfuze_chip->regulators[i] = regulator_register(desc, &config);
+ pfuze_chip->regulators[i] =
+ devm_regulator_register(&client->dev, desc, &config);
if (IS_ERR(pfuze_chip->regulators[i])) {
dev_err(&client->dev, "register regulator%s failed\n",
pfuze100_regulators[i].desc.name);
- ret = PTR_ERR(pfuze_chip->regulators[i]);
- while (--i >= 0)
- regulator_unregister(pfuze_chip->regulators[i]);
- return ret;
+ return PTR_ERR(pfuze_chip->regulators[i]);
}
}
return 0;
}
-static int pfuze100_regulator_remove(struct i2c_client *client)
-{
- int i;
- struct pfuze_chip *pfuze_chip = i2c_get_clientdata(client);
-
- for (i = 0; i < PFUZE100_MAX_REGULATOR; i++)
- regulator_unregister(pfuze_chip->regulators[i]);
-
- return 0;
-}
-
static struct i2c_driver pfuze_driver = {
.id_table = pfuze_device_id,
.driver = {
@@ -441,7 +431,6 @@ static struct i2c_driver pfuze_driver = {
.of_match_table = pfuze_dt_ids,
},
.probe = pfuze100_regulator_probe,
- .remove = pfuze100_regulator_remove,
};
module_i2c_driver(pfuze_driver);
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 333677d68d0e..cd0b9e35a56d 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -70,8 +70,6 @@ static int s2mps11_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
ramp_delay = s2mps11->ramp_delay2;
break;
case S2MPS11_BUCK3:
- ramp_delay = s2mps11->ramp_delay34;
- break;
case S2MPS11_BUCK4:
ramp_delay = s2mps11->ramp_delay34;
break;
@@ -438,11 +436,12 @@ common_reg:
platform_set_drvdata(pdev, s2mps11);
config.dev = &pdev->dev;
- config.regmap = iodev->regmap;
+ config.regmap = iodev->regmap_pmic;
config.driver_data = s2mps11;
for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
if (!reg_np) {
config.init_data = pdata->regulators[i].initdata;
+ config.of_node = pdata->regulators[i].reg_node;
} else {
config.init_data = rdata[i].init_data;
config.of_node = rdata[i].of_node;
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index aeb40aad0ae7..d7164bb75d3e 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -23,6 +23,7 @@
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/s5m8767.h>
#include <linux/regulator/of_regulator.h>
+#include <linux/regmap.h>
#define S5M8767_OPMODE_NORMAL_MODE 0x1
@@ -120,8 +121,8 @@ static const struct sec_voltage_desc *reg_voltage_map[] = {
[S5M8767_BUCK4] = &buck_voltage_val2,
[S5M8767_BUCK5] = &buck_voltage_val1,
[S5M8767_BUCK6] = &buck_voltage_val1,
- [S5M8767_BUCK7] = NULL,
- [S5M8767_BUCK8] = NULL,
+ [S5M8767_BUCK7] = &buck_voltage_val3,
+ [S5M8767_BUCK8] = &buck_voltage_val3,
[S5M8767_BUCK9] = &buck_voltage_val3,
};
@@ -217,7 +218,7 @@ static int s5m8767_reg_is_enabled(struct regulator_dev *rdev)
{
struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
int ret, reg;
- int mask = 0xc0, enable_ctrl;
+ int enable_ctrl;
unsigned int val;
ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
@@ -226,37 +227,38 @@ static int s5m8767_reg_is_enabled(struct regulator_dev *rdev)
else if (ret)
return ret;
- ret = sec_reg_read(s5m8767->iodev, reg, &val);
+ ret = regmap_read(s5m8767->iodev->regmap_pmic, reg, &val);
if (ret)
return ret;
- return (val & mask) == enable_ctrl;
+ return (val & S5M8767_ENCTRL_MASK) == enable_ctrl;
}
static int s5m8767_reg_enable(struct regulator_dev *rdev)
{
struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
int ret, reg;
- int mask = 0xc0, enable_ctrl;
+ int enable_ctrl;
ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
if (ret)
return ret;
- return sec_reg_update(s5m8767->iodev, reg, enable_ctrl, mask);
+ return regmap_update_bits(s5m8767->iodev->regmap_pmic, reg,
+ S5M8767_ENCTRL_MASK, enable_ctrl);
}
static int s5m8767_reg_disable(struct regulator_dev *rdev)
{
struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
- int ret, reg;
- int mask = 0xc0, enable_ctrl;
+ int ret, reg, enable_ctrl;
ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
if (ret)
return ret;
- return sec_reg_update(s5m8767->iodev, reg, ~mask, mask);
+ return regmap_update_bits(s5m8767->iodev->regmap_pmic, reg,
+ S5M8767_ENCTRL_MASK, ~S5M8767_ENCTRL_MASK);
}
static int s5m8767_get_vsel_reg(int reg_id, struct s5m8767_info *s5m8767)
@@ -417,9 +419,12 @@ static struct regulator_ops s5m8767_ops = {
};
static struct regulator_ops s5m8767_buck78_ops = {
+ .list_voltage = regulator_list_voltage_linear,
.is_enabled = s5m8767_reg_is_enabled,
.enable = s5m8767_reg_enable,
.disable = s5m8767_reg_disable,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
};
#define s5m8767_regulator_desc(_name) { \
@@ -745,17 +750,20 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
pdata->buck2_init);
- sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK2DVS2, buck_init);
+ regmap_write(s5m8767->iodev->regmap_pmic, S5M8767_REG_BUCK2DVS2,
+ buck_init);
buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
pdata->buck3_init);
- sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK3DVS2, buck_init);
+ regmap_write(s5m8767->iodev->regmap_pmic, S5M8767_REG_BUCK3DVS2,
+ buck_init);
buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
pdata->buck4_init);
- sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK4DVS2, buck_init);
+ regmap_write(s5m8767->iodev->regmap_pmic, S5M8767_REG_BUCK4DVS2,
+ buck_init);
for (i = 0; i < 8; i++) {
if (s5m8767->buck2_gpiodvs) {
@@ -837,71 +845,76 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs ||
pdata->buck4_gpiodvs) {
- sec_reg_update(s5m8767->iodev, S5M8767_REG_BUCK2CTRL,
- (pdata->buck2_gpiodvs) ? (1 << 1) : (0 << 1),
- 1 << 1);
- sec_reg_update(s5m8767->iodev, S5M8767_REG_BUCK3CTRL,
- (pdata->buck3_gpiodvs) ? (1 << 1) : (0 << 1),
- 1 << 1);
- sec_reg_update(s5m8767->iodev, S5M8767_REG_BUCK4CTRL,
- (pdata->buck4_gpiodvs) ? (1 << 1) : (0 << 1),
- 1 << 1);
+ regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_BUCK2CTRL, 1 << 1,
+ (pdata->buck2_gpiodvs) ? (1 << 1) : (0 << 1));
+ regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_BUCK3CTRL, 1 << 1,
+ (pdata->buck3_gpiodvs) ? (1 << 1) : (0 << 1));
+ regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_BUCK4CTRL, 1 << 1,
+ (pdata->buck4_gpiodvs) ? (1 << 1) : (0 << 1));
}
/* Initialize GPIO DVS registers */
for (i = 0; i < 8; i++) {
if (s5m8767->buck2_gpiodvs) {
- sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK2DVS1 + i,
- s5m8767->buck2_vol[i]);
+ regmap_write(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_BUCK2DVS1 + i,
+ s5m8767->buck2_vol[i]);
}
if (s5m8767->buck3_gpiodvs) {
- sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK3DVS1 + i,
- s5m8767->buck3_vol[i]);
+ regmap_write(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_BUCK3DVS1 + i,
+ s5m8767->buck3_vol[i]);
}
if (s5m8767->buck4_gpiodvs) {
- sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK4DVS1 + i,
- s5m8767->buck4_vol[i]);
+ regmap_write(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_BUCK4DVS1 + i,
+ s5m8767->buck4_vol[i]);
}
}
if (s5m8767->buck2_ramp)
- sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP, 0x08, 0x08);
+ regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_DVSRAMP, 0x08, 0x08);
if (s5m8767->buck3_ramp)
- sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP, 0x04, 0x04);
+ regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_DVSRAMP, 0x04, 0x04);
if (s5m8767->buck4_ramp)
- sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP, 0x02, 0x02);
+ regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_DVSRAMP, 0x02, 0x02);
if (s5m8767->buck2_ramp || s5m8767->buck3_ramp
|| s5m8767->buck4_ramp) {
+ unsigned int val;
switch (s5m8767->ramp_delay) {
case 5:
- sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
- 0x40, 0xf0);
+ val = S5M8767_DVS_BUCK_RAMP_5;
break;
case 10:
- sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
- 0x90, 0xf0);
+ val = S5M8767_DVS_BUCK_RAMP_10;
break;
case 25:
- sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
- 0xd0, 0xf0);
+ val = S5M8767_DVS_BUCK_RAMP_25;
break;
case 50:
- sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
- 0xe0, 0xf0);
+ val = S5M8767_DVS_BUCK_RAMP_50;
break;
case 100:
- sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
- 0xf0, 0xf0);
+ val = S5M8767_DVS_BUCK_RAMP_100;
break;
default:
- sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
- 0x90, 0xf0);
+ val = S5M8767_DVS_BUCK_RAMP_10;
}
+ regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_DVSRAMP,
+ S5M8767_DVS_BUCK_RAMP_MASK,
+ val << S5M8767_DVS_BUCK_RAMP_SHIFT);
}
for (i = 0; i < pdata->num_regulators; i++) {
diff --git a/drivers/regulator/stw481x-vmmc.c b/drivers/regulator/stw481x-vmmc.c
index f78857bd6a15..a7e152696a02 100644
--- a/drivers/regulator/stw481x-vmmc.c
+++ b/drivers/regulator/stw481x-vmmc.c
@@ -74,7 +74,8 @@ static int stw481x_vmmc_regulator_probe(struct platform_device *pdev)
config.init_data = of_get_regulator_init_data(&pdev->dev,
pdev->dev.of_node);
- stw481x->vmmc_regulator = regulator_register(&vmmc_regulator, &config);
+ stw481x->vmmc_regulator = devm_regulator_register(&pdev->dev,
+ &vmmc_regulator, &config);
if (IS_ERR(stw481x->vmmc_regulator)) {
dev_err(&pdev->dev,
"error initializing STw481x VMMC regulator\n");
@@ -85,14 +86,6 @@ static int stw481x_vmmc_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int stw481x_vmmc_regulator_remove(struct platform_device *pdev)
-{
- struct stw481x *stw481x = dev_get_platdata(&pdev->dev);
-
- regulator_unregister(stw481x->vmmc_regulator);
- return 0;
-}
-
static const struct of_device_id stw481x_vmmc_match[] = {
{ .compatible = "st,stw481x-vmmc", },
{},
@@ -105,7 +98,6 @@ static struct platform_driver stw481x_vmmc_regulator_driver = {
.of_match_table = stw481x_vmmc_match,
},
.probe = stw481x_vmmc_regulator_probe,
- .remove = stw481x_vmmc_regulator_remove,
};
module_platform_driver(stw481x_vmmc_regulator_driver);
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
index b0a3f0917a27..b3764f594ee9 100644
--- a/drivers/regulator/tps51632-regulator.c
+++ b/drivers/regulator/tps51632-regulator.c
@@ -70,16 +70,16 @@
#define TPS51632_POWER_STATE_SINGLE_PHASE_CCM 0x1
#define TPS51632_POWER_STATE_SINGLE_PHASE_DCM 0x2
-#define TPS51632_MIN_VOLATGE 500000
-#define TPS51632_MAX_VOLATGE 1520000
-#define TPS51632_VOLATGE_STEP_10mV 10000
-#define TPS51632_VOLATGE_STEP_20mV 20000
+#define TPS51632_MIN_VOLTAGE 500000
+#define TPS51632_MAX_VOLTAGE 1520000
+#define TPS51632_VOLTAGE_STEP_10mV 10000
+#define TPS51632_VOLTAGE_STEP_20mV 20000
#define TPS51632_MAX_VSEL 0x7F
#define TPS51632_MIN_VSEL 0x19
#define TPS51632_DEFAULT_RAMP_DELAY 6000
#define TPS51632_VOLT_VSEL(uV) \
- (DIV_ROUND_UP(uV - TPS51632_MIN_VOLATGE, \
- TPS51632_VOLATGE_STEP_10mV) + \
+ (DIV_ROUND_UP(uV - TPS51632_MIN_VOLTAGE, \
+ TPS51632_VOLTAGE_STEP_10mV) + \
TPS51632_MIN_VSEL)
/* TPS51632 chip information */
@@ -243,9 +243,9 @@ static struct tps51632_regulator_platform_data *
pdata->dvfs_step_20mV = of_property_read_bool(np, "ti,dvfs-step-20mV");
pdata->base_voltage_uV = pdata->reg_init_data->constraints.min_uV ? :
- TPS51632_MIN_VOLATGE;
+ TPS51632_MIN_VOLTAGE;
pdata->max_voltage_uV = pdata->reg_init_data->constraints.max_uV ? :
- TPS51632_MAX_VOLATGE;
+ TPS51632_MAX_VOLTAGE;
return pdata;
}
#else
@@ -284,15 +284,15 @@ static int tps51632_probe(struct i2c_client *client,
}
if (pdata->enable_pwm_dvfs) {
- if ((pdata->base_voltage_uV < TPS51632_MIN_VOLATGE) ||
- (pdata->base_voltage_uV > TPS51632_MAX_VOLATGE)) {
+ if ((pdata->base_voltage_uV < TPS51632_MIN_VOLTAGE) ||
+ (pdata->base_voltage_uV > TPS51632_MAX_VOLTAGE)) {
dev_err(&client->dev, "Invalid base_voltage_uV setting\n");
return -EINVAL;
}
if ((pdata->max_voltage_uV) &&
- ((pdata->max_voltage_uV < TPS51632_MIN_VOLATGE) ||
- (pdata->max_voltage_uV > TPS51632_MAX_VOLATGE))) {
+ ((pdata->max_voltage_uV < TPS51632_MIN_VOLTAGE) ||
+ (pdata->max_voltage_uV > TPS51632_MAX_VOLTAGE))) {
dev_err(&client->dev, "Invalid max_voltage_uV setting\n");
return -EINVAL;
}
@@ -305,11 +305,11 @@ static int tps51632_probe(struct i2c_client *client,
}
tps->dev = &client->dev;
- tps->desc.name = id->name;
+ tps->desc.name = client->name;
tps->desc.id = 0;
tps->desc.ramp_delay = TPS51632_DEFAULT_RAMP_DELAY;
- tps->desc.min_uV = TPS51632_MIN_VOLATGE;
- tps->desc.uV_step = TPS51632_VOLATGE_STEP_10mV;
+ tps->desc.min_uV = TPS51632_MIN_VOLTAGE;
+ tps->desc.uV_step = TPS51632_VOLTAGE_STEP_10mV;
tps->desc.linear_min_sel = TPS51632_MIN_VSEL;
tps->desc.n_voltages = TPS51632_MAX_VSEL + 1;
tps->desc.ops = &tps51632_dcdc_ops;
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index c2c0185a2dcd..c3fa15a299b1 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -360,7 +360,7 @@ static int tps62360_probe(struct i2c_client *client,
dev_err(&client->dev, "Error: No device match found\n");
return -ENODEV;
}
- chip_id = (int)match->data;
+ chip_id = (int)(long)match->data;
if (!pdata)
pdata = of_get_tps62360_platform_data(&client->dev);
} else if (id) {
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index e8e3a8afd3e2..0485d47f0d8a 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -93,6 +93,8 @@ static const unsigned int tps6586x_ldo4_voltages[] = {
2300000, 2325000, 2350000, 2375000, 2400000, 2425000, 2450000, 2475000,
};
+#define tps658623_sm2_voltages tps6586x_ldo4_voltages
+
static const unsigned int tps6586x_ldo_voltages[] = {
1250000, 1500000, 1800000, 2500000, 2700000, 2850000, 3100000, 3300000,
};
@@ -104,6 +106,13 @@ static const unsigned int tps6586x_sm2_voltages[] = {
4200000, 4250000, 4300000, 4350000, 4400000, 4450000, 4500000, 4550000,
};
+static const unsigned int tps658643_sm2_voltages[] = {
+ 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000, 1200000,
+ 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000, 1400000,
+ 1425000, 1450000, 1475000, 1500000, 1525000, 1550000, 1575000, 1600000,
+ 1625000, 1650000, 1675000, 1700000, 1725000, 1750000, 1775000, 1800000,
+};
+
static const unsigned int tps6586x_dvm_voltages[] = {
725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000,
925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000,
@@ -119,8 +128,8 @@ static const unsigned int tps6586x_dvm_voltages[] = {
.ops = &tps6586x_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = TPS6586X_ID_##_id, \
- .n_voltages = ARRAY_SIZE(tps6586x_##vdata##_voltages), \
- .volt_table = tps6586x_##vdata##_voltages, \
+ .n_voltages = ARRAY_SIZE(vdata##_voltages), \
+ .volt_table = vdata##_voltages, \
.owner = THIS_MODULE, \
.enable_reg = TPS6586X_SUPPLY##ereg0, \
.enable_mask = 1 << (ebit0), \
@@ -162,27 +171,47 @@ static const unsigned int tps6586x_dvm_voltages[] = {
static struct tps6586x_regulator tps6586x_regulator[] = {
TPS6586X_SYS_REGULATOR(),
- TPS6586X_LDO(LDO_0, "vinldo01", ldo0, SUPPLYV1, 5, 3, ENC, 0, END, 0),
- TPS6586X_LDO(LDO_3, "vinldo23", ldo, SUPPLYV4, 0, 3, ENC, 2, END, 2),
- TPS6586X_LDO(LDO_5, "REG-SYS", ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6),
- TPS6586X_LDO(LDO_6, "vinldo678", ldo, SUPPLYV3, 0, 3, ENC, 4, END, 4),
- TPS6586X_LDO(LDO_7, "vinldo678", ldo, SUPPLYV3, 3, 3, ENC, 5, END, 5),
- TPS6586X_LDO(LDO_8, "vinldo678", ldo, SUPPLYV2, 5, 3, ENC, 6, END, 6),
- TPS6586X_LDO(LDO_9, "vinldo9", ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7),
- TPS6586X_LDO(LDO_RTC, "REG-SYS", ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7),
- TPS6586X_LDO(LDO_1, "vinldo01", dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1),
- TPS6586X_LDO(SM_2, "vin-sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7),
-
- TPS6586X_DVM(LDO_2, "vinldo23", dvm, LDO2BV1, 0, 5, ENA, 3,
+ TPS6586X_LDO(LDO_0, "vinldo01", tps6586x_ldo0, SUPPLYV1, 5, 3, ENC, 0,
+ END, 0),
+ TPS6586X_LDO(LDO_3, "vinldo23", tps6586x_ldo, SUPPLYV4, 0, 3, ENC, 2,
+ END, 2),
+ TPS6586X_LDO(LDO_5, "REG-SYS", tps6586x_ldo, SUPPLYV6, 0, 3, ENE, 6,
+ ENE, 6),
+ TPS6586X_LDO(LDO_6, "vinldo678", tps6586x_ldo, SUPPLYV3, 0, 3, ENC, 4,
+ END, 4),
+ TPS6586X_LDO(LDO_7, "vinldo678", tps6586x_ldo, SUPPLYV3, 3, 3, ENC, 5,
+ END, 5),
+ TPS6586X_LDO(LDO_8, "vinldo678", tps6586x_ldo, SUPPLYV2, 5, 3, ENC, 6,
+ END, 6),
+ TPS6586X_LDO(LDO_9, "vinldo9", tps6586x_ldo, SUPPLYV6, 3, 3, ENE, 7,
+ ENE, 7),
+ TPS6586X_LDO(LDO_RTC, "REG-SYS", tps6586x_ldo, SUPPLYV4, 3, 3, V4, 7,
+ V4, 7),
+ TPS6586X_LDO(LDO_1, "vinldo01", tps6586x_dvm, SUPPLYV1, 0, 5, ENC, 1,
+ END, 1),
+ TPS6586X_LDO(SM_2, "vin-sm2", tps6586x_sm2, SUPPLYV2, 0, 5, ENC, 7,
+ END, 7),
+
+ TPS6586X_DVM(LDO_2, "vinldo23", tps6586x_dvm, LDO2BV1, 0, 5, ENA, 3,
ENB, 3, TPS6586X_VCC2, BIT(6)),
- TPS6586X_DVM(LDO_4, "vinldo4", ldo4, LDO4V1, 0, 5, ENC, 3,
+ TPS6586X_DVM(LDO_4, "vinldo4", tps6586x_ldo4, LDO4V1, 0, 5, ENC, 3,
END, 3, TPS6586X_VCC1, BIT(6)),
- TPS6586X_DVM(SM_0, "vin-sm0", dvm, SM0V1, 0, 5, ENA, 1,
+ TPS6586X_DVM(SM_0, "vin-sm0", tps6586x_dvm, SM0V1, 0, 5, ENA, 1,
ENB, 1, TPS6586X_VCC1, BIT(2)),
- TPS6586X_DVM(SM_1, "vin-sm1", dvm, SM1V1, 0, 5, ENA, 0,
+ TPS6586X_DVM(SM_1, "vin-sm1", tps6586x_dvm, SM1V1, 0, 5, ENA, 0,
ENB, 0, TPS6586X_VCC1, BIT(0)),
};
+static struct tps6586x_regulator tps658623_regulator[] = {
+ TPS6586X_LDO(SM_2, "vin-sm2", tps658623_sm2, SUPPLYV2, 0, 5, ENC, 7,
+ END, 7),
+};
+
+static struct tps6586x_regulator tps658643_regulator[] = {
+ TPS6586X_LDO(SM_2, "vin-sm2", tps658643_sm2, SUPPLYV2, 0, 5, ENC, 7,
+ END, 7),
+};
+
/*
* TPS6586X has 2 enable bits that are OR'ed to determine the actual
* regulator state. Clearing one of this bits allows switching
@@ -254,11 +283,33 @@ static int tps6586x_regulator_set_slew_rate(struct platform_device *pdev,
setting->slew_rate & TPS6586X_SLEW_RATE_MASK);
}
-static inline struct tps6586x_regulator *find_regulator_info(int id)
+static struct tps6586x_regulator *find_regulator_info(int id, int version)
{
struct tps6586x_regulator *ri;
+ struct tps6586x_regulator *table = NULL;
+ int num;
int i;
+ switch (version) {
+ case TPS658623:
+ table = tps658623_regulator;
+ num = ARRAY_SIZE(tps658623_regulator);
+ break;
+ case TPS658643:
+ table = tps658643_regulator;
+ num = ARRAY_SIZE(tps658643_regulator);
+ break;
+ }
+
+ /* Search version specific table first */
+ if (table) {
+ for (i = 0; i < num; i++) {
+ ri = &table[i];
+ if (ri->desc.id == id)
+ return ri;
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(tps6586x_regulator); i++) {
ri = &tps6586x_regulator[i];
if (ri->desc.id == id)
@@ -351,6 +402,7 @@ static int tps6586x_regulator_probe(struct platform_device *pdev)
struct regulator_init_data *reg_data;
struct tps6586x_platform_data *pdata;
struct of_regulator_match *tps6586x_reg_matches = NULL;
+ int version;
int id;
int err;
@@ -373,10 +425,13 @@ static int tps6586x_regulator_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ version = tps6586x_get_version(pdev->dev.parent);
+
for (id = 0; id < TPS6586X_ID_MAX_REGULATOR; ++id) {
reg_data = pdata->reg_init_data[id];
- ri = find_regulator_info(id);
+ ri = find_regulator_info(id, version);
+
if (!ri) {
dev_err(&pdev->dev, "invalid regulator ID specified\n");
return -EINVAL;
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index a00132e31ec7..f50dd847eebc 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -88,6 +88,11 @@ static const unsigned int VMMC_VSEL_table[] = {
1800000, 2800000, 3000000, 3300000,
};
+/* supported BBCH voltages in microvolts */
+static const unsigned int VBB_VSEL_table[] = {
+ 3000000, 2520000, 3150000, 5000000,
+};
+
struct tps_info {
const char *name;
const char *vin_name;
@@ -183,6 +188,12 @@ static struct tps_info tps65910_regs[] = {
.voltage_table = VMMC_VSEL_table,
.enable_time_us = 100,
},
+ {
+ .name = "vbb",
+ .vin_name = "vcc7",
+ .n_voltages = ARRAY_SIZE(VBB_VSEL_table),
+ .voltage_table = VBB_VSEL_table,
+ },
};
static struct tps_info tps65911_regs[] = {
@@ -339,6 +350,8 @@ static int tps65910_get_ctrl_register(int id)
return TPS65910_VAUX33;
case TPS65910_REG_VMMC:
return TPS65910_VMMC;
+ case TPS65910_REG_VBB:
+ return TPS65910_BBCH;
default:
return -EINVAL;
}
@@ -528,6 +541,10 @@ static int tps65910_get_voltage_sel(struct regulator_dev *dev)
value &= LDO_SEL_MASK;
value >>= LDO_SEL_SHIFT;
break;
+ case TPS65910_REG_VBB:
+ value &= BBCH_BBSEL_MASK;
+ value >>= BBCH_BBSEL_SHIFT;
+ break;
default:
return -EINVAL;
}
@@ -638,6 +655,9 @@ static int tps65910_set_voltage_sel(struct regulator_dev *dev,
case TPS65910_REG_VMMC:
return tps65910_reg_update_bits(pmic->mfd, reg, LDO_SEL_MASK,
selector << LDO_SEL_SHIFT);
+ case TPS65910_REG_VBB:
+ return tps65910_reg_update_bits(pmic->mfd, reg, BBCH_BBSEL_MASK,
+ selector << BBCH_BBSEL_SHIFT);
}
return -EINVAL;
@@ -669,6 +689,9 @@ static int tps65911_set_voltage_sel(struct regulator_dev *dev,
case TPS65910_REG_VIO:
return tps65910_reg_update_bits(pmic->mfd, reg, LDO_SEL_MASK,
selector << LDO_SEL_SHIFT);
+ case TPS65910_REG_VBB:
+ return tps65910_reg_update_bits(pmic->mfd, reg, BBCH_BBSEL_MASK,
+ selector << BBCH_BBSEL_SHIFT);
}
return -EINVAL;
@@ -762,6 +785,18 @@ static struct regulator_ops tps65910_ops_vdd3 = {
.map_voltage = regulator_map_voltage_ascend,
};
+static struct regulator_ops tps65910_ops_vbb = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_mode = tps65910_set_mode,
+ .get_mode = tps65910_get_mode,
+ .get_voltage_sel = tps65910_get_voltage_sel,
+ .set_voltage_sel = tps65910_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+};
+
static struct regulator_ops tps65910_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
@@ -944,6 +979,7 @@ static struct of_regulator_match tps65910_matches[] = {
{ .name = "vaux2", .driver_data = (void *) &tps65910_regs[10] },
{ .name = "vaux33", .driver_data = (void *) &tps65910_regs[11] },
{ .name = "vmmc", .driver_data = (void *) &tps65910_regs[12] },
+ { .name = "vbb", .driver_data = (void *) &tps65910_regs[13] },
};
static struct of_regulator_match tps65911_matches[] = {
@@ -1145,6 +1181,10 @@ static int tps65910_probe(struct platform_device *pdev)
pmic->desc[i].ops = &tps65910_ops_dcdc;
pmic->desc[i].ramp_delay = 5000;
}
+ } else if (i == TPS65910_REG_VBB &&
+ tps65910_chip_id(tps65910) == TPS65910) {
+ pmic->desc[i].ops = &tps65910_ops_vbb;
+ pmic->desc[i].volt_table = info->voltage_table;
} else {
if (tps65910_chip_id(tps65910) == TPS65910) {
pmic->desc[i].ops = &tps65910_ops;
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 8ebd785485c7..fed28abef419 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -58,7 +58,7 @@ struct twlreg_info {
struct regulator_desc desc;
/* chip specific features */
- unsigned long features;
+ unsigned long features;
/*
* optional override functions for voltage set/get
@@ -1128,7 +1128,7 @@ static int twlreg_probe(struct platform_device *pdev)
if (!initdata)
return -EINVAL;
- info = kmemdup(template, sizeof (*info), GFP_KERNEL);
+ info = kmemdup(template, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 6823e6f2b88a..04cf9c16ef23 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -762,8 +762,7 @@ static int wm831x_boostp_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_REG, 0);
if (res == NULL) {
dev_err(&pdev->dev, "No REG resource\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
dcdc->base = res->start;
@@ -788,7 +787,7 @@ static int wm831x_boostp_probe(struct platform_device *pdev)
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
id + 1, ret);
- goto err;
+ return ret;
}
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
@@ -799,15 +798,12 @@ static int wm831x_boostp_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
- goto err;
+ return ret;
}
platform_set_drvdata(pdev, dcdc);
return 0;
-
-err:
- return ret;
}
static struct platform_driver wm831x_boostp_driver = {
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 1e2d83f2b995..cc29832c9638 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_RESET_CONTROLLER) += core.o
+obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
new file mode 100644
index 000000000000..695bd3496eba
--- /dev/null
+++ b/drivers/reset/reset-sunxi.c
@@ -0,0 +1,175 @@
+/*
+ * Allwinner SoCs Reset Controller driver
+ *
+ * Copyright 2013 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+struct sunxi_reset_data {
+ spinlock_t lock;
+ void __iomem *membase;
+ struct reset_controller_dev rcdev;
+};
+
+static int sunxi_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct sunxi_reset_data *data = container_of(rcdev,
+ struct sunxi_reset_data,
+ rcdev);
+ int bank = id / BITS_PER_LONG;
+ int offset = id % BITS_PER_LONG;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(data->membase + (bank * 4));
+ writel(reg & ~BIT(offset), data->membase + (bank * 4));
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static int sunxi_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct sunxi_reset_data *data = container_of(rcdev,
+ struct sunxi_reset_data,
+ rcdev);
+ int bank = id / BITS_PER_LONG;
+ int offset = id % BITS_PER_LONG;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(data->membase + (bank * 4));
+ writel(reg | BIT(offset), data->membase + (bank * 4));
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static struct reset_control_ops sunxi_reset_ops = {
+ .assert = sunxi_reset_assert,
+ .deassert = sunxi_reset_deassert,
+};
+
+static int sunxi_reset_init(struct device_node *np)
+{
+ struct sunxi_reset_data *data;
+ struct resource res;
+ resource_size_t size;
+ int ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ goto err_alloc;
+
+ size = resource_size(&res);
+ if (!request_mem_region(res.start, size, np->name)) {
+ ret = -EBUSY;
+ goto err_alloc;
+ }
+
+ data->membase = ioremap(res.start, size);
+ if (!data->membase) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.nr_resets = size * 32;
+ data->rcdev.ops = &sunxi_reset_ops;
+ data->rcdev.of_node = np;
+ reset_controller_register(&data->rcdev);
+
+ return 0;
+
+err_alloc:
+ kfree(data);
+ return ret;
+};
+
+/*
+ * These are the reset controller we need to initialize early on in
+ * our system, before we can even think of using a regular device
+ * driver for it.
+ */
+static const struct of_device_id sunxi_early_reset_dt_ids[] __initdata = {
+ { .compatible = "allwinner,sun6i-a31-ahb1-reset", },
+ { /* sentinel */ },
+};
+
+void __init sun6i_reset_init(void)
+{
+ struct device_node *np;
+
+ for_each_matching_node(np, sunxi_early_reset_dt_ids)
+ sunxi_reset_init(np);
+}
+
+/*
+ * And these are the controllers we can register through the regular
+ * device model.
+ */
+static const struct of_device_id sunxi_reset_dt_ids[] = {
+ { .compatible = "allwinner,sun6i-a31-clock-reset", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, sunxi_reset_dt_ids);
+
+static int sunxi_reset_probe(struct platform_device *pdev)
+{
+ return sunxi_reset_init(pdev->dev.of_node);
+}
+
+static int sunxi_reset_remove(struct platform_device *pdev)
+{
+ struct sunxi_reset_data *data = platform_get_drvdata(pdev);
+
+ reset_controller_unregister(&data->rcdev);
+ iounmap(data->membase);
+ kfree(data);
+
+ return 0;
+}
+
+static struct platform_driver sunxi_reset_driver = {
+ .probe = sunxi_reset_probe,
+ .remove = sunxi_reset_remove,
+ .driver = {
+ .name = "sunxi-reset",
+ .owner = THIS_MODULE,
+ .of_match_table = sunxi_reset_dt_ids,
+ },
+};
+module_platform_driver(sunxi_reset_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
+MODULE_DESCRIPTION("Allwinner SoCs Reset Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 007730222116..db933decc39c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -212,6 +212,17 @@ config RTC_DRV_DS3232
This driver can also be built as a module. If so, the module
will be called rtc-ds3232.
+config RTC_DRV_HYM8563
+ tristate "Haoyu Microelectronics HYM8563"
+ depends on I2C && OF
+ help
+ Say Y to enable support for the HYM8563 I2C RTC chip. Apart
+ from the usual rtc functions it provides a clock output of
+ up to 32kHz.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-hym8563.
+
config RTC_DRV_LP8788
tristate "TI LP8788 RTC driver"
depends on MFD_LP8788
@@ -304,6 +315,17 @@ config RTC_DRV_ISL12022
This driver can also be built as a module. If so, the module
will be called rtc-isl12022.
+config RTC_DRV_ISL12057
+ depends on I2C
+ select REGMAP_I2C
+ tristate "Intersil ISL12057"
+ help
+ If you say yes here you get support for the Intersil ISL12057
+ I2C RTC chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-isl12057.
+
config RTC_DRV_X1205
tristate "Xicor/Intersil X1205"
help
@@ -626,7 +648,7 @@ comment "Platform RTC drivers"
config RTC_DRV_CMOS
tristate "PC-style 'CMOS'"
- depends on X86 || ARM || M32R || ATARI || PPC || MIPS || SPARC64
+ depends on X86 || ARM || M32R || PPC || MIPS || SPARC64
default y if X86
help
Say "yes" here to get direct support for the real time clock
@@ -1104,6 +1126,13 @@ config RTC_DRV_SUN4V
If you say Y here you will get support for the Hypervisor
based RTC on SUN4V systems.
+config RTC_DRV_SUNXI
+ tristate "Allwinner sun4i/sun7i RTC"
+ depends on ARCH_SUNXI
+ help
+ If you say Y here you will get support for the RTC found on
+ Allwinner A10/A20.
+
config RTC_DRV_STARFIRE
bool "Starfire RTC"
depends on SPARC64
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 27b4bd884066..b427bf7dd20d 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -55,9 +55,11 @@ obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o
obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o
obj-$(CONFIG_RTC_DRV_HID_SENSOR_TIME) += rtc-hid-sensor-time.o
+obj-$(CONFIG_RTC_DRV_HYM8563) += rtc-hym8563.o
obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o
obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o
+obj-$(CONFIG_RTC_DRV_ISL12057) += rtc-isl12057.o
obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o
obj-$(CONFIG_RTC_DRV_LP8788) += rtc-lp8788.o
obj-$(CONFIG_RTC_DRV_LPC32XX) += rtc-lpc32xx.o
@@ -117,6 +119,7 @@ obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o
obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o
obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o
+obj-$(CONFIG_RTC_DRV_SUNXI) += rtc-sunxi.o
obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
obj-$(CONFIG_RTC_DRV_TILE) += rtc-tile.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 02426812bebc..589351ef75d0 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -14,6 +14,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/rtc.h>
#include <linux/kdev_t.h>
#include <linux/idr.h>
@@ -157,12 +158,27 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
{
struct rtc_device *rtc;
struct rtc_wkalrm alrm;
- int id, err;
+ int of_id = -1, id = -1, err;
+
+ if (dev->of_node)
+ of_id = of_alias_get_id(dev->of_node, "rtc");
+ else if (dev->parent && dev->parent->of_node)
+ of_id = of_alias_get_id(dev->parent->of_node, "rtc");
+
+ if (of_id >= 0) {
+ id = ida_simple_get(&rtc_ida, of_id, of_id + 1,
+ GFP_KERNEL);
+ if (id < 0)
+ dev_warn(dev, "/aliases ID %d not available\n",
+ of_id);
+ }
- id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
- err = id;
- goto exit;
+ id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ err = id;
+ goto exit;
+ }
}
rtc = kzalloc(sizeof(struct rtc_device), GFP_KERNEL);
diff --git a/drivers/rtc/rtc-as3722.c b/drivers/rtc/rtc-as3722.c
index 9cfa8170a2d6..4af016985890 100644
--- a/drivers/rtc/rtc-as3722.c
+++ b/drivers/rtc/rtc-as3722.c
@@ -198,7 +198,7 @@ static int as3722_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
- as3722_rtc->rtc = rtc_device_register("as3722", &pdev->dev,
+ as3722_rtc->rtc = devm_rtc_device_register(&pdev->dev, "as3722-rtc",
&as3722_rtc_ops, THIS_MODULE);
if (IS_ERR(as3722_rtc->rtc)) {
ret = PTR_ERR(as3722_rtc->rtc);
@@ -209,28 +209,16 @@ static int as3722_rtc_probe(struct platform_device *pdev)
as3722_rtc->alarm_irq = platform_get_irq(pdev, 0);
dev_info(&pdev->dev, "RTC interrupt %d\n", as3722_rtc->alarm_irq);
- ret = request_threaded_irq(as3722_rtc->alarm_irq, NULL,
+ ret = devm_request_threaded_irq(&pdev->dev, as3722_rtc->alarm_irq, NULL,
as3722_alarm_irq, IRQF_ONESHOT | IRQF_EARLY_RESUME,
"rtc-alarm", as3722_rtc);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n",
as3722_rtc->alarm_irq, ret);
- goto scrub;
+ return ret;
}
disable_irq(as3722_rtc->alarm_irq);
return 0;
-scrub:
- rtc_device_unregister(as3722_rtc->rtc);
- return ret;
-}
-
-static int as3722_rtc_remove(struct platform_device *pdev)
-{
- struct as3722_rtc *as3722_rtc = platform_get_drvdata(pdev);
-
- free_irq(as3722_rtc->alarm_irq, as3722_rtc);
- rtc_device_unregister(as3722_rtc->rtc);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -260,7 +248,6 @@ static const struct dev_pm_ops as3722_rtc_pm_ops = {
static struct platform_driver as3722_rtc_driver = {
.probe = as3722_rtc_probe,
- .remove = as3722_rtc_remove,
.driver = {
.name = "as3722-rtc",
.pm = &as3722_rtc_pm_ops,
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index f14876256a4a..cae212f30d65 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -34,11 +34,11 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
-#include <linux/mod_devicetable.h>
#include <linux/log2.h>
#include <linux/pm.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/dmi.h>
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
#include <asm-generic/rtc.h>
@@ -377,6 +377,51 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
return 0;
}
+/*
+ * Do not disable RTC alarm on shutdown - workaround for b0rked BIOSes.
+ */
+static bool alarm_disable_quirk;
+
+static int __init set_alarm_disable_quirk(const struct dmi_system_id *id)
+{
+ alarm_disable_quirk = true;
+ pr_info("rtc-cmos: BIOS has alarm-disable quirk. ");
+ pr_info("RTC alarms disabled\n");
+ return 0;
+}
+
+static const struct dmi_system_id rtc_quirks[] __initconst = {
+ /* https://bugzilla.novell.com/show_bug.cgi?id=805740 */
+ {
+ .callback = set_alarm_disable_quirk,
+ .ident = "IBM Truman",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "4852570"),
+ },
+ },
+ /* https://bugzilla.novell.com/show_bug.cgi?id=812592 */
+ {
+ .callback = set_alarm_disable_quirk,
+ .ident = "Gigabyte GA-990XA-UD3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GA-990XA-UD3"),
+ },
+ },
+ /* http://permalink.gmane.org/gmane.linux.kernel/1604474 */
+ {
+ .callback = set_alarm_disable_quirk,
+ .ident = "Toshiba Satellite L300",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
+ },
+ },
+ {}
+};
+
static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
@@ -385,6 +430,9 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
if (!is_valid_irq(cmos->irq))
return -EINVAL;
+ if (alarm_disable_quirk)
+ return 0;
+
spin_lock_irqsave(&rtc_lock, flags);
if (enabled)
@@ -708,11 +756,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
irq_handler_t rtc_cmos_int_handler;
if (is_hpet_enabled()) {
- int err;
-
rtc_cmos_int_handler = hpet_rtc_interrupt;
- err = hpet_register_irq_handler(cmos_interrupt);
- if (err != 0) {
+ retval = hpet_register_irq_handler(cmos_interrupt);
+ if (retval) {
dev_warn(dev, "hpet_register_irq_handler "
" failed in rtc_init().");
goto cleanup1;
@@ -1127,7 +1173,7 @@ static struct platform_driver cmos_platform_driver = {
.remove = __exit_p(cmos_platform_remove),
.shutdown = cmos_platform_shutdown,
.driver = {
- .name = (char *) driver_name,
+ .name = driver_name,
#ifdef CONFIG_PM
.pm = &cmos_pm_ops,
#endif
@@ -1157,6 +1203,8 @@ static int __init cmos_init(void)
platform_driver_registered = true;
}
+ dmi_check_system(rtc_quirks);
+
if (retval == 0)
return 0;
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 80f323731ee2..2dd586a19b59 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -787,7 +787,6 @@ static int ds1305_remove(struct spi_device *spi)
cancel_work_sync(&ds1305->work);
}
- spi_set_drvdata(spi, NULL);
return 0;
}
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 17b73fdc3b6e..5a1f3b2a8f1e 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -13,12 +13,13 @@
*/
#include <linux/bcd.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/rtc.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -215,12 +216,19 @@ static int ds1742_rtc_remove(struct platform_device *pdev)
return 0;
}
+static struct of_device_id __maybe_unused ds1742_rtc_of_match[] = {
+ { .compatible = "maxim,ds1742", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ds1742_rtc_of_match);
+
static struct platform_driver ds1742_rtc_driver = {
.probe = ds1742_rtc_probe,
.remove = ds1742_rtc_remove,
.driver = {
.name = "rtc-ds1742",
.owner = THIS_MODULE,
+ .of_match_table = ds1742_rtc_of_match,
},
};
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
new file mode 100644
index 000000000000..bd628a6f981d
--- /dev/null
+++ b/drivers/rtc/rtc-hym8563.c
@@ -0,0 +1,606 @@
+/*
+ * Haoyu HYM8563 RTC driver
+ *
+ * Copyright (C) 2013 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on rtc-HYM8563
+ * Copyright (C) 2010 ROCKCHIP, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/i2c.h>
+#include <linux/bcd.h>
+#include <linux/rtc.h>
+
+#define HYM8563_CTL1 0x00
+#define HYM8563_CTL1_TEST BIT(7)
+#define HYM8563_CTL1_STOP BIT(5)
+#define HYM8563_CTL1_TESTC BIT(3)
+
+#define HYM8563_CTL2 0x01
+#define HYM8563_CTL2_TI_TP BIT(4)
+#define HYM8563_CTL2_AF BIT(3)
+#define HYM8563_CTL2_TF BIT(2)
+#define HYM8563_CTL2_AIE BIT(1)
+#define HYM8563_CTL2_TIE BIT(0)
+
+#define HYM8563_SEC 0x02
+#define HYM8563_SEC_VL BIT(7)
+#define HYM8563_SEC_MASK 0x7f
+
+#define HYM8563_MIN 0x03
+#define HYM8563_MIN_MASK 0x7f
+
+#define HYM8563_HOUR 0x04
+#define HYM8563_HOUR_MASK 0x3f
+
+#define HYM8563_DAY 0x05
+#define HYM8563_DAY_MASK 0x3f
+
+#define HYM8563_WEEKDAY 0x06
+#define HYM8563_WEEKDAY_MASK 0x07
+
+#define HYM8563_MONTH 0x07
+#define HYM8563_MONTH_CENTURY BIT(7)
+#define HYM8563_MONTH_MASK 0x1f
+
+#define HYM8563_YEAR 0x08
+
+#define HYM8563_ALM_MIN 0x09
+#define HYM8563_ALM_HOUR 0x0a
+#define HYM8563_ALM_DAY 0x0b
+#define HYM8563_ALM_WEEK 0x0c
+
+/* Each alarm check can be disabled by setting this bit in the register */
+#define HYM8563_ALM_BIT_DISABLE BIT(7)
+
+#define HYM8563_CLKOUT 0x0d
+#define HYM8563_CLKOUT_DISABLE BIT(7)
+#define HYM8563_CLKOUT_32768 0
+#define HYM8563_CLKOUT_1024 1
+#define HYM8563_CLKOUT_32 2
+#define HYM8563_CLKOUT_1 3
+#define HYM8563_CLKOUT_MASK 3
+
+#define HYM8563_TMR_CTL 0x0e
+#define HYM8563_TMR_CTL_ENABLE BIT(7)
+#define HYM8563_TMR_CTL_4096 0
+#define HYM8563_TMR_CTL_64 1
+#define HYM8563_TMR_CTL_1 2
+#define HYM8563_TMR_CTL_1_60 3
+#define HYM8563_TMR_CTL_MASK 3
+
+#define HYM8563_TMR_CNT 0x0f
+
+struct hym8563 {
+ struct i2c_client *client;
+ struct rtc_device *rtc;
+ bool valid;
+#ifdef CONFIG_COMMON_CLK
+ struct clk_hw clkout_hw;
+#endif
+};
+
+/*
+ * RTC handling
+ */
+
+static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hym8563 *hym8563 = i2c_get_clientdata(client);
+ u8 buf[7];
+ int ret;
+
+ if (!hym8563->valid) {
+ dev_warn(&client->dev, "no valid clock/calendar values available\n");
+ return -EPERM;
+ }
+
+ ret = i2c_smbus_read_i2c_block_data(client, HYM8563_SEC, 7, buf);
+
+ tm->tm_sec = bcd2bin(buf[0] & HYM8563_SEC_MASK);
+ tm->tm_min = bcd2bin(buf[1] & HYM8563_MIN_MASK);
+ tm->tm_hour = bcd2bin(buf[2] & HYM8563_HOUR_MASK);
+ tm->tm_mday = bcd2bin(buf[3] & HYM8563_DAY_MASK);
+ tm->tm_wday = bcd2bin(buf[4] & HYM8563_WEEKDAY_MASK); /* 0 = Sun */
+ tm->tm_mon = bcd2bin(buf[5] & HYM8563_MONTH_MASK) - 1; /* 0 = Jan */
+ tm->tm_year = bcd2bin(buf[6]) + 100;
+
+ return 0;
+}
+
+static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hym8563 *hym8563 = i2c_get_clientdata(client);
+ u8 buf[7];
+ int ret;
+
+ /* Years >= 2100 are to far in the future, 19XX is to early */
+ if (tm->tm_year < 100 || tm->tm_year >= 200)
+ return -EINVAL;
+
+ buf[0] = bin2bcd(tm->tm_sec);
+ buf[1] = bin2bcd(tm->tm_min);
+ buf[2] = bin2bcd(tm->tm_hour);
+ buf[3] = bin2bcd(tm->tm_mday);
+ buf[4] = bin2bcd(tm->tm_wday);
+ buf[5] = bin2bcd(tm->tm_mon + 1);
+
+ /*
+ * While the HYM8563 has a century flag in the month register,
+ * it does not seem to carry it over a subsequent write/read.
+ * So we'll limit ourself to 100 years, starting at 2000 for now.
+ */
+ buf[6] = tm->tm_year - 100;
+
+ /*
+ * CTL1 only contains TEST-mode bits apart from stop,
+ * so no need to read the value first
+ */
+ ret = i2c_smbus_write_byte_data(client, HYM8563_CTL1,
+ HYM8563_CTL1_STOP);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_i2c_block_data(client, HYM8563_SEC, 7, buf);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, HYM8563_CTL1, 0);
+ if (ret < 0)
+ return ret;
+
+ hym8563->valid = true;
+
+ return 0;
+}
+
+static int hym8563_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int data;
+
+ data = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
+ if (data < 0)
+ return data;
+
+ if (enabled)
+ data |= HYM8563_CTL2_AIE;
+ else
+ data &= ~HYM8563_CTL2_AIE;
+
+ return i2c_smbus_write_byte_data(client, HYM8563_CTL2, data);
+};
+
+static int hym8563_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rtc_time *alm_tm = &alm->time;
+ u8 buf[4];
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(client, HYM8563_ALM_MIN, 4, buf);
+ if (ret < 0)
+ return ret;
+
+ /* The alarm only has a minute accuracy */
+ alm_tm->tm_sec = -1;
+
+ alm_tm->tm_min = (buf[0] & HYM8563_ALM_BIT_DISABLE) ?
+ -1 :
+ bcd2bin(buf[0] & HYM8563_MIN_MASK);
+ alm_tm->tm_hour = (buf[1] & HYM8563_ALM_BIT_DISABLE) ?
+ -1 :
+ bcd2bin(buf[1] & HYM8563_HOUR_MASK);
+ alm_tm->tm_mday = (buf[2] & HYM8563_ALM_BIT_DISABLE) ?
+ -1 :
+ bcd2bin(buf[2] & HYM8563_DAY_MASK);
+ alm_tm->tm_wday = (buf[3] & HYM8563_ALM_BIT_DISABLE) ?
+ -1 :
+ bcd2bin(buf[3] & HYM8563_WEEKDAY_MASK);
+
+ alm_tm->tm_mon = -1;
+ alm_tm->tm_year = -1;
+
+ ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
+ if (ret < 0)
+ return ret;
+
+ if (ret & HYM8563_CTL2_AIE)
+ alm->enabled = 1;
+
+ return 0;
+}
+
+static int hym8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rtc_time *alm_tm = &alm->time;
+ u8 buf[4];
+ int ret;
+
+ /*
+ * The alarm has no seconds so deal with it
+ */
+ if (alm_tm->tm_sec) {
+ alm_tm->tm_sec = 0;
+ alm_tm->tm_min++;
+ if (alm_tm->tm_min >= 60) {
+ alm_tm->tm_min = 0;
+ alm_tm->tm_hour++;
+ if (alm_tm->tm_hour >= 24) {
+ alm_tm->tm_hour = 0;
+ alm_tm->tm_mday++;
+ if (alm_tm->tm_mday > 31)
+ alm_tm->tm_mday = 0;
+ }
+ }
+ }
+
+ ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~HYM8563_CTL2_AIE;
+
+ ret = i2c_smbus_write_byte_data(client, HYM8563_CTL2, ret);
+ if (ret < 0)
+ return ret;
+
+ buf[0] = (alm_tm->tm_min < 60 && alm_tm->tm_min >= 0) ?
+ bin2bcd(alm_tm->tm_min) : HYM8563_ALM_BIT_DISABLE;
+
+ buf[1] = (alm_tm->tm_hour < 24 && alm_tm->tm_hour >= 0) ?
+ bin2bcd(alm_tm->tm_hour) : HYM8563_ALM_BIT_DISABLE;
+
+ buf[2] = (alm_tm->tm_mday <= 31 && alm_tm->tm_mday >= 1) ?
+ bin2bcd(alm_tm->tm_mday) : HYM8563_ALM_BIT_DISABLE;
+
+ buf[3] = (alm_tm->tm_wday < 7 && alm_tm->tm_wday >= 0) ?
+ bin2bcd(alm_tm->tm_wday) : HYM8563_ALM_BIT_DISABLE;
+
+ ret = i2c_smbus_write_i2c_block_data(client, HYM8563_ALM_MIN, 4, buf);
+ if (ret < 0)
+ return ret;
+
+ return hym8563_rtc_alarm_irq_enable(dev, alm->enabled);
+}
+
+static const struct rtc_class_ops hym8563_rtc_ops = {
+ .read_time = hym8563_rtc_read_time,
+ .set_time = hym8563_rtc_set_time,
+ .alarm_irq_enable = hym8563_rtc_alarm_irq_enable,
+ .read_alarm = hym8563_rtc_read_alarm,
+ .set_alarm = hym8563_rtc_set_alarm,
+};
+
+/*
+ * Handling of the clkout
+ */
+
+#ifdef CONFIG_COMMON_CLK
+#define clkout_hw_to_hym8563(_hw) container_of(_hw, struct hym8563, clkout_hw)
+
+static int clkout_rates[] = {
+ 32768,
+ 1024,
+ 32,
+ 1,
+};
+
+static unsigned long hym8563_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw);
+ struct i2c_client *client = hym8563->client;
+ int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT);
+
+ if (ret < 0 || ret & HYM8563_CLKOUT_DISABLE)
+ return 0;
+
+ ret &= HYM8563_CLKOUT_MASK;
+ return clkout_rates[ret];
+}
+
+static long hym8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] <= rate)
+ return clkout_rates[i];
+
+ return 0;
+}
+
+static int hym8563_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw);
+ struct i2c_client *client = hym8563->client;
+ int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT);
+ int i;
+
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] == rate) {
+ ret &= ~HYM8563_CLKOUT_MASK;
+ ret |= i;
+ return i2c_smbus_write_byte_data(client,
+ HYM8563_CLKOUT, ret);
+ }
+
+ return -EINVAL;
+}
+
+static int hym8563_clkout_control(struct clk_hw *hw, bool enable)
+{
+ struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw);
+ struct i2c_client *client = hym8563->client;
+ int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT);
+
+ if (ret < 0)
+ return ret;
+
+ if (enable)
+ ret &= ~HYM8563_CLKOUT_DISABLE;
+ else
+ ret |= HYM8563_CLKOUT_DISABLE;
+
+ return i2c_smbus_write_byte_data(client, HYM8563_CLKOUT, ret);
+}
+
+static int hym8563_clkout_prepare(struct clk_hw *hw)
+{
+ return hym8563_clkout_control(hw, 1);
+}
+
+static void hym8563_clkout_unprepare(struct clk_hw *hw)
+{
+ hym8563_clkout_control(hw, 0);
+}
+
+static int hym8563_clkout_is_prepared(struct clk_hw *hw)
+{
+ struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw);
+ struct i2c_client *client = hym8563->client;
+ int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT);
+
+ if (ret < 0)
+ return ret;
+
+ return !(ret & HYM8563_CLKOUT_DISABLE);
+}
+
+static const struct clk_ops hym8563_clkout_ops = {
+ .prepare = hym8563_clkout_prepare,
+ .unprepare = hym8563_clkout_unprepare,
+ .is_prepared = hym8563_clkout_is_prepared,
+ .recalc_rate = hym8563_clkout_recalc_rate,
+ .round_rate = hym8563_clkout_round_rate,
+ .set_rate = hym8563_clkout_set_rate,
+};
+
+static struct clk *hym8563_clkout_register_clk(struct hym8563 *hym8563)
+{
+ struct i2c_client *client = hym8563->client;
+ struct device_node *node = client->dev.of_node;
+ struct clk *clk;
+ struct clk_init_data init;
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, HYM8563_CLKOUT,
+ HYM8563_CLKOUT_DISABLE);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ init.name = "hym8563-clkout";
+ init.ops = &hym8563_clkout_ops;
+ init.flags = CLK_IS_ROOT;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ hym8563->clkout_hw.init = &init;
+
+ /* register the clock */
+ clk = clk_register(&client->dev, &hym8563->clkout_hw);
+
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+ return clk;
+}
+#endif
+
+/*
+ * The alarm interrupt is implemented as a level-low interrupt in the
+ * hym8563, while the timer interrupt uses a falling edge.
+ * We don't use the timer at all, so the interrupt is requested to
+ * use the level-low trigger.
+ */
+static irqreturn_t hym8563_irq(int irq, void *dev_id)
+{
+ struct hym8563 *hym8563 = (struct hym8563 *)dev_id;
+ struct i2c_client *client = hym8563->client;
+ struct mutex *lock = &hym8563->rtc->ops_lock;
+ int data, ret;
+
+ mutex_lock(lock);
+
+ /* Clear the alarm flag */
+
+ data = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
+ if (data < 0) {
+ dev_err(&client->dev, "%s: error reading i2c data %d\n",
+ __func__, data);
+ goto out;
+ }
+
+ data &= ~HYM8563_CTL2_AF;
+
+ ret = i2c_smbus_write_byte_data(client, HYM8563_CTL2, data);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: error writing i2c data %d\n",
+ __func__, ret);
+ }
+
+out:
+ mutex_unlock(lock);
+ return IRQ_HANDLED;
+}
+
+static int hym8563_init_device(struct i2c_client *client)
+{
+ int ret;
+
+ /* Clear stop flag if present */
+ ret = i2c_smbus_write_byte_data(client, HYM8563_CTL1, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
+ if (ret < 0)
+ return ret;
+
+ /* Disable alarm and timer interrupts */
+ ret &= ~HYM8563_CTL2_AIE;
+ ret &= ~HYM8563_CTL2_TIE;
+
+ /* Clear any pending alarm and timer flags */
+ if (ret & HYM8563_CTL2_AF)
+ ret &= ~HYM8563_CTL2_AF;
+
+ if (ret & HYM8563_CTL2_TF)
+ ret &= ~HYM8563_CTL2_TF;
+
+ ret &= ~HYM8563_CTL2_TI_TP;
+
+ return i2c_smbus_write_byte_data(client, HYM8563_CTL2, ret);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int hym8563_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret;
+
+ if (device_may_wakeup(dev)) {
+ ret = enable_irq_wake(client->irq);
+ if (ret) {
+ dev_err(dev, "enable_irq_wake failed, %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hym8563_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(client->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(hym8563_pm_ops, hym8563_suspend, hym8563_resume);
+
+static int hym8563_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct hym8563 *hym8563;
+ int ret;
+
+ hym8563 = devm_kzalloc(&client->dev, sizeof(*hym8563), GFP_KERNEL);
+ if (!hym8563)
+ return -ENOMEM;
+
+ hym8563->client = client;
+ i2c_set_clientdata(client, hym8563);
+
+ device_set_wakeup_capable(&client->dev, true);
+
+ ret = hym8563_init_device(client);
+ if (ret) {
+ dev_err(&client->dev, "could not init device, %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, hym8563_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ client->name, hym8563);
+ if (ret < 0) {
+ dev_err(&client->dev, "irq %d request failed, %d\n",
+ client->irq, ret);
+ return ret;
+ }
+
+ /* check state of calendar information */
+ ret = i2c_smbus_read_byte_data(client, HYM8563_SEC);
+ if (ret < 0)
+ return ret;
+
+ hym8563->valid = !(ret & HYM8563_SEC_VL);
+ dev_dbg(&client->dev, "rtc information is %s\n",
+ hym8563->valid ? "valid" : "invalid");
+
+ hym8563->rtc = devm_rtc_device_register(&client->dev, client->name,
+ &hym8563_rtc_ops, THIS_MODULE);
+ if (IS_ERR(hym8563->rtc))
+ return PTR_ERR(hym8563->rtc);
+
+#ifdef CONFIG_COMMON_CLK
+ hym8563_clkout_register_clk(hym8563);
+#endif
+
+ return 0;
+}
+
+static const struct i2c_device_id hym8563_id[] = {
+ { "hym8563", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, hym8563_id);
+
+static struct of_device_id hym8563_dt_idtable[] = {
+ { .compatible = "haoyu,hym8563" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hym8563_dt_idtable);
+
+static struct i2c_driver hym8563_driver = {
+ .driver = {
+ .name = "rtc-hym8563",
+ .owner = THIS_MODULE,
+ .pm = &hym8563_pm_ops,
+ .of_match_table = hym8563_dt_idtable,
+ },
+ .probe = hym8563_probe,
+ .id_table = hym8563_id,
+};
+
+module_i2c_driver(hym8563_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("HYM8563 RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c
new file mode 100644
index 000000000000..7854a656628f
--- /dev/null
+++ b/drivers/rtc/rtc-isl12057.c
@@ -0,0 +1,310 @@
+/*
+ * rtc-isl12057 - Driver for Intersil ISL12057 I2C Real Time Clock
+ *
+ * Copyright (C) 2013, Arnaud EBALARD <arno@natisbad.org>
+ *
+ * This work is largely based on Intersil ISL1208 driver developed by
+ * Hebert Valerio Riedel <hvr@gnu.org>.
+ *
+ * Detailed datasheet on which this development is based is available here:
+ *
+ * http://natisbad.org/NAS2/refs/ISL12057.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rtc.h>
+#include <linux/i2c.h>
+#include <linux/bcd.h>
+#include <linux/rtc.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#define DRV_NAME "rtc-isl12057"
+
+/* RTC section */
+#define ISL12057_REG_RTC_SC 0x00 /* Seconds */
+#define ISL12057_REG_RTC_MN 0x01 /* Minutes */
+#define ISL12057_REG_RTC_HR 0x02 /* Hours */
+#define ISL12057_REG_RTC_HR_PM BIT(5) /* AM/PM bit in 12h format */
+#define ISL12057_REG_RTC_HR_MIL BIT(6) /* 24h/12h format */
+#define ISL12057_REG_RTC_DW 0x03 /* Day of the Week */
+#define ISL12057_REG_RTC_DT 0x04 /* Date */
+#define ISL12057_REG_RTC_MO 0x05 /* Month */
+#define ISL12057_REG_RTC_YR 0x06 /* Year */
+#define ISL12057_RTC_SEC_LEN 7
+
+/* Alarm 1 section */
+#define ISL12057_REG_A1_SC 0x07 /* Alarm 1 Seconds */
+#define ISL12057_REG_A1_MN 0x08 /* Alarm 1 Minutes */
+#define ISL12057_REG_A1_HR 0x09 /* Alarm 1 Hours */
+#define ISL12057_REG_A1_HR_PM BIT(5) /* AM/PM bit in 12h format */
+#define ISL12057_REG_A1_HR_MIL BIT(6) /* 24h/12h format */
+#define ISL12057_REG_A1_DWDT 0x0A /* Alarm 1 Date / Day of the week */
+#define ISL12057_REG_A1_DWDT_B BIT(6) /* DW / DT selection bit */
+#define ISL12057_A1_SEC_LEN 4
+
+/* Alarm 2 section */
+#define ISL12057_REG_A2_MN 0x0B /* Alarm 2 Minutes */
+#define ISL12057_REG_A2_HR 0x0C /* Alarm 2 Hours */
+#define ISL12057_REG_A2_DWDT 0x0D /* Alarm 2 Date / Day of the week */
+#define ISL12057_A2_SEC_LEN 3
+
+/* Control/Status registers */
+#define ISL12057_REG_INT 0x0E
+#define ISL12057_REG_INT_A1IE BIT(0) /* Alarm 1 interrupt enable bit */
+#define ISL12057_REG_INT_A2IE BIT(1) /* Alarm 2 interrupt enable bit */
+#define ISL12057_REG_INT_INTCN BIT(2) /* Interrupt control enable bit */
+#define ISL12057_REG_INT_RS1 BIT(3) /* Freq out control bit 1 */
+#define ISL12057_REG_INT_RS2 BIT(4) /* Freq out control bit 2 */
+#define ISL12057_REG_INT_EOSC BIT(7) /* Oscillator enable bit */
+
+#define ISL12057_REG_SR 0x0F
+#define ISL12057_REG_SR_A1F BIT(0) /* Alarm 1 interrupt bit */
+#define ISL12057_REG_SR_A2F BIT(1) /* Alarm 2 interrupt bit */
+#define ISL12057_REG_SR_OSF BIT(7) /* Oscillator failure bit */
+
+/* Register memory map length */
+#define ISL12057_MEM_MAP_LEN 0x10
+
+struct isl12057_rtc_data {
+ struct regmap *regmap;
+ struct mutex lock;
+};
+
+static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 *regs)
+{
+ tm->tm_sec = bcd2bin(regs[ISL12057_REG_RTC_SC]);
+ tm->tm_min = bcd2bin(regs[ISL12057_REG_RTC_MN]);
+
+ if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_MIL) { /* AM/PM */
+ tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x0f);
+ if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_PM)
+ tm->tm_hour += 12;
+ } else { /* 24 hour mode */
+ tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x3f);
+ }
+
+ tm->tm_mday = bcd2bin(regs[ISL12057_REG_RTC_DT]);
+ tm->tm_wday = bcd2bin(regs[ISL12057_REG_RTC_DW]) - 1; /* starts at 1 */
+ tm->tm_mon = bcd2bin(regs[ISL12057_REG_RTC_MO]) - 1; /* starts at 1 */
+ tm->tm_year = bcd2bin(regs[ISL12057_REG_RTC_YR]) + 100;
+}
+
+static int isl12057_rtc_tm_to_regs(u8 *regs, struct rtc_time *tm)
+{
+ /*
+ * The clock has an 8 bit wide bcd-coded register for the year.
+ * tm_year is an offset from 1900 and we are interested in the
+ * 2000-2099 range, so any value less than 100 is invalid.
+ */
+ if (tm->tm_year < 100)
+ return -EINVAL;
+
+ regs[ISL12057_REG_RTC_SC] = bin2bcd(tm->tm_sec);
+ regs[ISL12057_REG_RTC_MN] = bin2bcd(tm->tm_min);
+ regs[ISL12057_REG_RTC_HR] = bin2bcd(tm->tm_hour); /* 24-hour format */
+ regs[ISL12057_REG_RTC_DT] = bin2bcd(tm->tm_mday);
+ regs[ISL12057_REG_RTC_MO] = bin2bcd(tm->tm_mon + 1);
+ regs[ISL12057_REG_RTC_YR] = bin2bcd(tm->tm_year - 100);
+ regs[ISL12057_REG_RTC_DW] = bin2bcd(tm->tm_wday + 1);
+
+ return 0;
+}
+
+/*
+ * Try and match register bits w/ fixed null values to see whether we
+ * are dealing with an ISL12057. Note: this function is called early
+ * during init and hence does need mutex protection.
+ */
+static int isl12057_i2c_validate_chip(struct regmap *regmap)
+{
+ u8 regs[ISL12057_MEM_MAP_LEN];
+ static const u8 mask[ISL12057_MEM_MAP_LEN] = { 0x80, 0x80, 0x80, 0xf8,
+ 0xc0, 0x60, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x60, 0x7c };
+ int ret, i;
+
+ ret = regmap_bulk_read(regmap, 0, regs, ISL12057_MEM_MAP_LEN);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ISL12057_MEM_MAP_LEN; ++i) {
+ if (regs[i] & mask[i]) /* check if bits are cleared */
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int isl12057_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct isl12057_rtc_data *data = dev_get_drvdata(dev);
+ u8 regs[ISL12057_RTC_SEC_LEN];
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = regmap_bulk_read(data->regmap, ISL12057_REG_RTC_SC, regs,
+ ISL12057_RTC_SEC_LEN);
+ mutex_unlock(&data->lock);
+
+ if (ret) {
+ dev_err(dev, "%s: RTC read failed\n", __func__);
+ return ret;
+ }
+
+ isl12057_rtc_regs_to_tm(tm, regs);
+
+ return rtc_valid_tm(tm);
+}
+
+static int isl12057_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct isl12057_rtc_data *data = dev_get_drvdata(dev);
+ u8 regs[ISL12057_RTC_SEC_LEN];
+ int ret;
+
+ ret = isl12057_rtc_tm_to_regs(regs, tm);
+ if (ret)
+ return ret;
+
+ mutex_lock(&data->lock);
+ ret = regmap_bulk_write(data->regmap, ISL12057_REG_RTC_SC, regs,
+ ISL12057_RTC_SEC_LEN);
+ mutex_unlock(&data->lock);
+
+ if (ret)
+ dev_err(dev, "%s: RTC write failed\n", __func__);
+
+ return ret;
+}
+
+/*
+ * Check current RTC status and enable/disable what needs to be. Return 0 if
+ * everything went ok and a negative value upon error. Note: this function
+ * is called early during init and hence does need mutex protection.
+ */
+static int isl12057_check_rtc_status(struct device *dev, struct regmap *regmap)
+{
+ int ret;
+
+ /* Enable oscillator if not already running */
+ ret = regmap_update_bits(regmap, ISL12057_REG_INT,
+ ISL12057_REG_INT_EOSC, 0);
+ if (ret < 0) {
+ dev_err(dev, "Unable to enable oscillator\n");
+ return ret;
+ }
+
+ /* Clear oscillator failure bit if needed */
+ ret = regmap_update_bits(regmap, ISL12057_REG_SR,
+ ISL12057_REG_SR_OSF, 0);
+ if (ret < 0) {
+ dev_err(dev, "Unable to clear oscillator failure bit\n");
+ return ret;
+ }
+
+ /* Clear alarm bit if needed */
+ ret = regmap_update_bits(regmap, ISL12057_REG_SR,
+ ISL12057_REG_SR_A1F, 0);
+ if (ret < 0) {
+ dev_err(dev, "Unable to clear alarm bit\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct rtc_class_ops rtc_ops = {
+ .read_time = isl12057_rtc_read_time,
+ .set_time = isl12057_rtc_set_time,
+};
+
+static struct regmap_config isl12057_rtc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int isl12057_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct isl12057_rtc_data *data;
+ struct rtc_device *rtc;
+ struct regmap *regmap;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -ENODEV;
+
+ regmap = devm_regmap_init_i2c(client, &isl12057_rtc_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(dev, "regmap allocation failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = isl12057_i2c_validate_chip(regmap);
+ if (ret)
+ return ret;
+
+ ret = isl12057_check_rtc_status(dev, regmap);
+ if (ret)
+ return ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ mutex_init(&data->lock);
+ data->regmap = regmap;
+ dev_set_drvdata(dev, data);
+
+ rtc = devm_rtc_device_register(dev, DRV_NAME, &rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id isl12057_dt_match[] = {
+ { .compatible = "isl,isl12057" },
+ { },
+};
+#endif
+
+static const struct i2c_device_id isl12057_id[] = {
+ { "isl12057", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, isl12057_id);
+
+static struct i2c_driver isl12057_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(isl12057_dt_match),
+ },
+ .probe = isl12057_probe,
+ .id_table = isl12057_id,
+};
+module_i2c_driver(isl12057_driver);
+
+MODULE_AUTHOR("Arnaud EBALARD <arno@natisbad.org>");
+MODULE_DESCRIPTION("Intersil ISL12057 RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-max8907.c b/drivers/rtc/rtc-max8907.c
index 8e45b3c4aa2f..3032178bd9e6 100644
--- a/drivers/rtc/rtc-max8907.c
+++ b/drivers/rtc/rtc-max8907.c
@@ -51,7 +51,7 @@ static irqreturn_t max8907_irq_handler(int irq, void *data)
{
struct max8907_rtc *rtc = data;
- regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x7f, 0);
+ regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0);
rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF);
@@ -64,7 +64,7 @@ static void regs_to_tm(u8 *regs, struct rtc_time *tm)
bcd2bin(regs[RTC_YEAR1]) - 1900;
tm->tm_mon = bcd2bin(regs[RTC_MONTH] & 0x1f) - 1;
tm->tm_mday = bcd2bin(regs[RTC_DATE] & 0x3f);
- tm->tm_wday = (regs[RTC_WEEKDAY] & 0x07) - 1;
+ tm->tm_wday = (regs[RTC_WEEKDAY] & 0x07);
if (regs[RTC_HOUR] & HOUR_12) {
tm->tm_hour = bcd2bin(regs[RTC_HOUR] & 0x01f);
if (tm->tm_hour == 12)
@@ -88,7 +88,7 @@ static void tm_to_regs(struct rtc_time *tm, u8 *regs)
regs[RTC_YEAR1] = bin2bcd(low);
regs[RTC_MONTH] = bin2bcd(tm->tm_mon + 1);
regs[RTC_DATE] = bin2bcd(tm->tm_mday);
- regs[RTC_WEEKDAY] = tm->tm_wday + 1;
+ regs[RTC_WEEKDAY] = tm->tm_wday;
regs[RTC_HOUR] = bin2bcd(tm->tm_hour);
regs[RTC_MIN] = bin2bcd(tm->tm_min);
regs[RTC_SEC] = bin2bcd(tm->tm_sec);
@@ -153,7 +153,7 @@ static int max8907_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
tm_to_regs(&alrm->time, regs);
/* Disable alarm while we update the target time */
- ret = regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x7f, 0);
+ ret = regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0);
if (ret < 0)
return ret;
@@ -163,8 +163,7 @@ static int max8907_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return ret;
if (alrm->enabled)
- ret = regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL,
- 0x7f, 0x7f);
+ ret = regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x77);
return ret;
}
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 50c572645546..419874fefa4b 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -391,11 +391,13 @@ static int mxc_rtc_probe(struct platform_device *pdev)
pdata->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk)) {
dev_err(&pdev->dev, "unable to get clock!\n");
- ret = PTR_ERR(pdata->clk);
- goto exit_free_pdata;
+ return PTR_ERR(pdata->clk);
}
- clk_prepare_enable(pdata->clk);
+ ret = clk_prepare_enable(pdata->clk);
+ if (ret)
+ return ret;
+
rate = clk_get_rate(pdata->clk);
if (rate == 32768)
@@ -447,8 +449,6 @@ static int mxc_rtc_probe(struct platform_device *pdev)
exit_put_clk:
clk_disable_unprepare(pdata->clk);
-exit_free_pdata:
-
return ret;
}
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 1ee514a3972c..9bd842e97749 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -197,10 +197,7 @@ static int pcf2127_probe(struct i2c_client *client,
pcf2127_driver.driver.name,
&pcf2127_rtc_ops, THIS_MODULE);
- if (IS_ERR(pcf2127->rtc))
- return PTR_ERR(pcf2127->rtc);
-
- return 0;
+ return PTR_ERR_OR_ZERO(pcf2127->rtc);
}
static const struct i2c_device_id pcf2127_id[] = {
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index 00b0eb7fe166..de8d9c427782 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -52,8 +52,45 @@
#define RX8581_CTRL_STOP 0x02 /* STOP bit */
#define RX8581_CTRL_RESET 0x01 /* RESET bit */
+struct rx8581 {
+ struct i2c_client *client;
+ struct rtc_device *rtc;
+ s32 (*read_block_data)(const struct i2c_client *client, u8 command,
+ u8 length, u8 *values);
+ s32 (*write_block_data)(const struct i2c_client *client, u8 command,
+ u8 length, const u8 *values);
+};
+
static struct i2c_driver rx8581_driver;
+static int rx8581_read_block_data(const struct i2c_client *client, u8 command,
+ u8 length, u8 *values)
+{
+ s32 i, data;
+
+ for (i = 0; i < length; i++) {
+ data = i2c_smbus_read_byte_data(client, command + i);
+ if (data < 0)
+ return data;
+ values[i] = data;
+ }
+ return i;
+}
+
+static int rx8581_write_block_data(const struct i2c_client *client, u8 command,
+ u8 length, const u8 *values)
+{
+ s32 i, ret;
+
+ for (i = 0; i < length; i++) {
+ ret = i2c_smbus_write_byte_data(client, command + i,
+ values[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return length;
+}
+
/*
* In the routines that deal directly with the rx8581 hardware, we use
* rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
@@ -62,6 +99,7 @@ static int rx8581_get_datetime(struct i2c_client *client, struct rtc_time *tm)
{
unsigned char date[7];
int data, err;
+ struct rx8581 *rx8581 = i2c_get_clientdata(client);
/* First we ensure that the "update flag" is not set, we read the
* time and date then re-read the "update flag". If the update flag
@@ -80,14 +118,13 @@ static int rx8581_get_datetime(struct i2c_client *client, struct rtc_time *tm)
err = i2c_smbus_write_byte_data(client,
RX8581_REG_FLAG, (data & ~RX8581_FLAG_UF));
if (err != 0) {
- dev_err(&client->dev, "Unable to write device "
- "flags\n");
+ dev_err(&client->dev, "Unable to write device flags\n");
return -EIO;
}
}
/* Now read time and date */
- err = i2c_smbus_read_i2c_block_data(client, RX8581_REG_SC,
+ err = rx8581->read_block_data(client, RX8581_REG_SC,
7, date);
if (err < 0) {
dev_err(&client->dev, "Unable to read date\n");
@@ -140,6 +177,7 @@ static int rx8581_set_datetime(struct i2c_client *client, struct rtc_time *tm)
{
int data, err;
unsigned char buf[7];
+ struct rx8581 *rx8581 = i2c_get_clientdata(client);
dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
@@ -176,7 +214,7 @@ static int rx8581_set_datetime(struct i2c_client *client, struct rtc_time *tm)
}
/* write register's data */
- err = i2c_smbus_write_i2c_block_data(client, RX8581_REG_SC, 7, buf);
+ err = rx8581->write_block_data(client, RX8581_REG_SC, 7, buf);
if (err < 0) {
dev_err(&client->dev, "Unable to write to date registers\n");
return -EIO;
@@ -231,22 +269,39 @@ static const struct rtc_class_ops rx8581_rtc_ops = {
static int rx8581_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct rtc_device *rtc;
+ struct rx8581 *rx8581;
dev_dbg(&client->dev, "%s\n", __func__);
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
- return -ENODEV;
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)
+ && !i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -EIO;
- dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
+ rx8581 = devm_kzalloc(&client->dev, sizeof(struct rx8581), GFP_KERNEL);
+ if (!rx8581)
+ return -ENOMEM;
- rtc = devm_rtc_device_register(&client->dev, rx8581_driver.driver.name,
- &rx8581_rtc_ops, THIS_MODULE);
+ i2c_set_clientdata(client, rx8581);
+ rx8581->client = client;
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) {
+ rx8581->read_block_data = i2c_smbus_read_i2c_block_data;
+ rx8581->write_block_data = i2c_smbus_write_i2c_block_data;
+ } else {
+ rx8581->read_block_data = rx8581_read_block_data;
+ rx8581->write_block_data = rx8581_write_block_data;
+ }
- i2c_set_clientdata(client, rtc);
+ dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
+
+ rx8581->rtc = devm_rtc_device_register(&client->dev,
+ rx8581_driver.driver.name, &rx8581_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(rx8581->rtc)) {
+ dev_err(&client->dev,
+ "unable to register the class device\n");
+ return PTR_ERR(rx8581->rtc);
+ }
return 0;
}
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index ae8119dc2846..476af93543f6 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -639,6 +639,7 @@ static void s5m_rtc_shutdown(struct platform_device *pdev)
s5m_rtc_enable_smpl(info, false);
}
+#ifdef CONFIG_PM_SLEEP
static int s5m_rtc_resume(struct device *dev)
{
struct s5m_rtc_info *info = dev_get_drvdata(dev);
@@ -660,6 +661,7 @@ static int s5m_rtc_suspend(struct device *dev)
return ret;
}
+#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
diff --git a/drivers/rtc/rtc-sunxi.c b/drivers/rtc/rtc-sunxi.c
new file mode 100644
index 000000000000..68a35284e5ad
--- /dev/null
+++ b/drivers/rtc/rtc-sunxi.c
@@ -0,0 +1,523 @@
+/*
+ * An RTC driver for Allwinner A10/A20
+ *
+ * Copyright (c) 2013, Carlo Caione <carlo.caione@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/types.h>
+
+#define SUNXI_LOSC_CTRL 0x0000
+#define SUNXI_LOSC_CTRL_RTC_HMS_ACC BIT(8)
+#define SUNXI_LOSC_CTRL_RTC_YMD_ACC BIT(7)
+
+#define SUNXI_RTC_YMD 0x0004
+
+#define SUNXI_RTC_HMS 0x0008
+
+#define SUNXI_ALRM_DHMS 0x000c
+
+#define SUNXI_ALRM_EN 0x0014
+#define SUNXI_ALRM_EN_CNT_EN BIT(8)
+
+#define SUNXI_ALRM_IRQ_EN 0x0018
+#define SUNXI_ALRM_IRQ_EN_CNT_IRQ_EN BIT(0)
+
+#define SUNXI_ALRM_IRQ_STA 0x001c
+#define SUNXI_ALRM_IRQ_STA_CNT_IRQ_PEND BIT(0)
+
+#define SUNXI_MASK_DH 0x0000001f
+#define SUNXI_MASK_SM 0x0000003f
+#define SUNXI_MASK_M 0x0000000f
+#define SUNXI_MASK_LY 0x00000001
+#define SUNXI_MASK_D 0x00000ffe
+#define SUNXI_MASK_M 0x0000000f
+
+#define SUNXI_GET(x, mask, shift) (((x) & ((mask) << (shift))) \
+ >> (shift))
+
+#define SUNXI_SET(x, mask, shift) (((x) & (mask)) << (shift))
+
+/*
+ * Get date values
+ */
+#define SUNXI_DATE_GET_DAY_VALUE(x) SUNXI_GET(x, SUNXI_MASK_DH, 0)
+#define SUNXI_DATE_GET_MON_VALUE(x) SUNXI_GET(x, SUNXI_MASK_M, 8)
+#define SUNXI_DATE_GET_YEAR_VALUE(x, mask) SUNXI_GET(x, mask, 16)
+
+/*
+ * Get time values
+ */
+#define SUNXI_TIME_GET_SEC_VALUE(x) SUNXI_GET(x, SUNXI_MASK_SM, 0)
+#define SUNXI_TIME_GET_MIN_VALUE(x) SUNXI_GET(x, SUNXI_MASK_SM, 8)
+#define SUNXI_TIME_GET_HOUR_VALUE(x) SUNXI_GET(x, SUNXI_MASK_DH, 16)
+
+/*
+ * Get alarm values
+ */
+#define SUNXI_ALRM_GET_SEC_VALUE(x) SUNXI_GET(x, SUNXI_MASK_SM, 0)
+#define SUNXI_ALRM_GET_MIN_VALUE(x) SUNXI_GET(x, SUNXI_MASK_SM, 8)
+#define SUNXI_ALRM_GET_HOUR_VALUE(x) SUNXI_GET(x, SUNXI_MASK_DH, 16)
+
+/*
+ * Set date values
+ */
+#define SUNXI_DATE_SET_DAY_VALUE(x) SUNXI_DATE_GET_DAY_VALUE(x)
+#define SUNXI_DATE_SET_MON_VALUE(x) SUNXI_SET(x, SUNXI_MASK_M, 8)
+#define SUNXI_DATE_SET_YEAR_VALUE(x, mask) SUNXI_SET(x, mask, 16)
+#define SUNXI_LEAP_SET_VALUE(x, shift) SUNXI_SET(x, SUNXI_MASK_LY, shift)
+
+/*
+ * Set time values
+ */
+#define SUNXI_TIME_SET_SEC_VALUE(x) SUNXI_TIME_GET_SEC_VALUE(x)
+#define SUNXI_TIME_SET_MIN_VALUE(x) SUNXI_SET(x, SUNXI_MASK_SM, 8)
+#define SUNXI_TIME_SET_HOUR_VALUE(x) SUNXI_SET(x, SUNXI_MASK_DH, 16)
+
+/*
+ * Set alarm values
+ */
+#define SUNXI_ALRM_SET_SEC_VALUE(x) SUNXI_ALRM_GET_SEC_VALUE(x)
+#define SUNXI_ALRM_SET_MIN_VALUE(x) SUNXI_SET(x, SUNXI_MASK_SM, 8)
+#define SUNXI_ALRM_SET_HOUR_VALUE(x) SUNXI_SET(x, SUNXI_MASK_DH, 16)
+#define SUNXI_ALRM_SET_DAY_VALUE(x) SUNXI_SET(x, SUNXI_MASK_D, 21)
+
+/*
+ * Time unit conversions
+ */
+#define SEC_IN_MIN 60
+#define SEC_IN_HOUR (60 * SEC_IN_MIN)
+#define SEC_IN_DAY (24 * SEC_IN_HOUR)
+
+/*
+ * The year parameter passed to the driver is usually an offset relative to
+ * the year 1900. This macro is used to convert this offset to another one
+ * relative to the minimum year allowed by the hardware.
+ */
+#define SUNXI_YEAR_OFF(x) ((x)->min - 1900)
+
+/*
+ * min and max year are arbitrary set considering the limited range of the
+ * hardware register field
+ */
+struct sunxi_rtc_data_year {
+ unsigned int min; /* min year allowed */
+ unsigned int max; /* max year allowed */
+ unsigned int mask; /* mask for the year field */
+ unsigned char leap_shift; /* bit shift to get the leap year */
+};
+
+static struct sunxi_rtc_data_year data_year_param[] = {
+ [0] = {
+ .min = 2010,
+ .max = 2073,
+ .mask = 0x3f,
+ .leap_shift = 22,
+ },
+ [1] = {
+ .min = 1970,
+ .max = 2225,
+ .mask = 0xff,
+ .leap_shift = 24,
+ },
+};
+
+struct sunxi_rtc_dev {
+ struct rtc_device *rtc;
+ struct device *dev;
+ struct sunxi_rtc_data_year *data_year;
+ void __iomem *base;
+ int irq;
+};
+
+static irqreturn_t sunxi_rtc_alarmirq(int irq, void *id)
+{
+ struct sunxi_rtc_dev *chip = (struct sunxi_rtc_dev *) id;
+ u32 val;
+
+ val = readl(chip->base + SUNXI_ALRM_IRQ_STA);
+
+ if (val & SUNXI_ALRM_IRQ_STA_CNT_IRQ_PEND) {
+ val |= SUNXI_ALRM_IRQ_STA_CNT_IRQ_PEND;
+ writel(val, chip->base + SUNXI_ALRM_IRQ_STA);
+
+ rtc_update_irq(chip->rtc, 1, RTC_AF | RTC_IRQF);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void sunxi_rtc_setaie(int to, struct sunxi_rtc_dev *chip)
+{
+ u32 alrm_val = 0;
+ u32 alrm_irq_val = 0;
+
+ if (to) {
+ alrm_val = readl(chip->base + SUNXI_ALRM_EN);
+ alrm_val |= SUNXI_ALRM_EN_CNT_EN;
+
+ alrm_irq_val = readl(chip->base + SUNXI_ALRM_IRQ_EN);
+ alrm_irq_val |= SUNXI_ALRM_IRQ_EN_CNT_IRQ_EN;
+ } else {
+ writel(SUNXI_ALRM_IRQ_STA_CNT_IRQ_PEND,
+ chip->base + SUNXI_ALRM_IRQ_STA);
+ }
+
+ writel(alrm_val, chip->base + SUNXI_ALRM_EN);
+ writel(alrm_irq_val, chip->base + SUNXI_ALRM_IRQ_EN);
+}
+
+static int sunxi_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+ struct sunxi_rtc_dev *chip = dev_get_drvdata(dev);
+ struct rtc_time *alrm_tm = &wkalrm->time;
+ u32 alrm;
+ u32 alrm_en;
+ u32 date;
+
+ alrm = readl(chip->base + SUNXI_ALRM_DHMS);
+ date = readl(chip->base + SUNXI_RTC_YMD);
+
+ alrm_tm->tm_sec = SUNXI_ALRM_GET_SEC_VALUE(alrm);
+ alrm_tm->tm_min = SUNXI_ALRM_GET_MIN_VALUE(alrm);
+ alrm_tm->tm_hour = SUNXI_ALRM_GET_HOUR_VALUE(alrm);
+
+ alrm_tm->tm_mday = SUNXI_DATE_GET_DAY_VALUE(date);
+ alrm_tm->tm_mon = SUNXI_DATE_GET_MON_VALUE(date);
+ alrm_tm->tm_year = SUNXI_DATE_GET_YEAR_VALUE(date,
+ chip->data_year->mask);
+
+ alrm_tm->tm_mon -= 1;
+
+ /*
+ * switch from (data_year->min)-relative offset to
+ * a (1900)-relative one
+ */
+ alrm_tm->tm_year += SUNXI_YEAR_OFF(chip->data_year);
+
+ alrm_en = readl(chip->base + SUNXI_ALRM_IRQ_EN);
+ if (alrm_en & SUNXI_ALRM_EN_CNT_EN)
+ wkalrm->enabled = 1;
+
+ return 0;
+}
+
+static int sunxi_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
+{
+ struct sunxi_rtc_dev *chip = dev_get_drvdata(dev);
+ u32 date, time;
+
+ /*
+ * read again in case it changes
+ */
+ do {
+ date = readl(chip->base + SUNXI_RTC_YMD);
+ time = readl(chip->base + SUNXI_RTC_HMS);
+ } while ((date != readl(chip->base + SUNXI_RTC_YMD)) ||
+ (time != readl(chip->base + SUNXI_RTC_HMS)));
+
+ rtc_tm->tm_sec = SUNXI_TIME_GET_SEC_VALUE(time);
+ rtc_tm->tm_min = SUNXI_TIME_GET_MIN_VALUE(time);
+ rtc_tm->tm_hour = SUNXI_TIME_GET_HOUR_VALUE(time);
+
+ rtc_tm->tm_mday = SUNXI_DATE_GET_DAY_VALUE(date);
+ rtc_tm->tm_mon = SUNXI_DATE_GET_MON_VALUE(date);
+ rtc_tm->tm_year = SUNXI_DATE_GET_YEAR_VALUE(date,
+ chip->data_year->mask);
+
+ rtc_tm->tm_mon -= 1;
+
+ /*
+ * switch from (data_year->min)-relative offset to
+ * a (1900)-relative one
+ */
+ rtc_tm->tm_year += SUNXI_YEAR_OFF(chip->data_year);
+
+ return rtc_valid_tm(rtc_tm);
+}
+
+static int sunxi_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+ struct sunxi_rtc_dev *chip = dev_get_drvdata(dev);
+ struct rtc_time *alrm_tm = &wkalrm->time;
+ struct rtc_time tm_now;
+ u32 alrm = 0;
+ unsigned long time_now = 0;
+ unsigned long time_set = 0;
+ unsigned long time_gap = 0;
+ unsigned long time_gap_day = 0;
+ unsigned long time_gap_hour = 0;
+ unsigned long time_gap_min = 0;
+ int ret = 0;
+
+ ret = sunxi_rtc_gettime(dev, &tm_now);
+ if (ret < 0) {
+ dev_err(dev, "Error in getting time\n");
+ return -EINVAL;
+ }
+
+ rtc_tm_to_time(alrm_tm, &time_set);
+ rtc_tm_to_time(&tm_now, &time_now);
+ if (time_set <= time_now) {
+ dev_err(dev, "Date to set in the past\n");
+ return -EINVAL;
+ }
+
+ time_gap = time_set - time_now;
+ time_gap_day = time_gap / SEC_IN_DAY;
+ time_gap -= time_gap_day * SEC_IN_DAY;
+ time_gap_hour = time_gap / SEC_IN_HOUR;
+ time_gap -= time_gap_hour * SEC_IN_HOUR;
+ time_gap_min = time_gap / SEC_IN_MIN;
+ time_gap -= time_gap_min * SEC_IN_MIN;
+
+ if (time_gap_day > 255) {
+ dev_err(dev, "Day must be in the range 0 - 255\n");
+ return -EINVAL;
+ }
+
+ sunxi_rtc_setaie(0, chip);
+ writel(0, chip->base + SUNXI_ALRM_DHMS);
+ usleep_range(100, 300);
+
+ alrm = SUNXI_ALRM_SET_SEC_VALUE(time_gap) |
+ SUNXI_ALRM_SET_MIN_VALUE(time_gap_min) |
+ SUNXI_ALRM_SET_HOUR_VALUE(time_gap_hour) |
+ SUNXI_ALRM_SET_DAY_VALUE(time_gap_day);
+ writel(alrm, chip->base + SUNXI_ALRM_DHMS);
+
+ writel(0, chip->base + SUNXI_ALRM_IRQ_EN);
+ writel(SUNXI_ALRM_IRQ_EN_CNT_IRQ_EN, chip->base + SUNXI_ALRM_IRQ_EN);
+
+ sunxi_rtc_setaie(wkalrm->enabled, chip);
+
+ return 0;
+}
+
+static int sunxi_rtc_wait(struct sunxi_rtc_dev *chip, int offset,
+ unsigned int mask, unsigned int ms_timeout)
+{
+ const unsigned long timeout = jiffies + msecs_to_jiffies(ms_timeout);
+ u32 reg;
+
+ do {
+ reg = readl(chip->base + offset);
+ reg &= mask;
+
+ if (reg == mask)
+ return 0;
+
+ } while (time_before(jiffies, timeout));
+
+ return -ETIMEDOUT;
+}
+
+static int sunxi_rtc_settime(struct device *dev, struct rtc_time *rtc_tm)
+{
+ struct sunxi_rtc_dev *chip = dev_get_drvdata(dev);
+ u32 date = 0;
+ u32 time = 0;
+ int year;
+
+ /*
+ * the input rtc_tm->tm_year is the offset relative to 1900. We use
+ * the SUNXI_YEAR_OFF macro to rebase it with respect to the min year
+ * allowed by the hardware
+ */
+
+ year = rtc_tm->tm_year + 1900;
+ if (year < chip->data_year->min || year > chip->data_year->max) {
+ dev_err(dev, "rtc only supports year in range %d - %d\n",
+ chip->data_year->min, chip->data_year->max);
+ return -EINVAL;
+ }
+
+ rtc_tm->tm_year -= SUNXI_YEAR_OFF(chip->data_year);
+ rtc_tm->tm_mon += 1;
+
+ date = SUNXI_DATE_SET_DAY_VALUE(rtc_tm->tm_mday) |
+ SUNXI_DATE_SET_MON_VALUE(rtc_tm->tm_mon) |
+ SUNXI_DATE_SET_YEAR_VALUE(rtc_tm->tm_year,
+ chip->data_year->mask);
+
+ if (is_leap_year(year))
+ date |= SUNXI_LEAP_SET_VALUE(1, chip->data_year->leap_shift);
+
+ time = SUNXI_TIME_SET_SEC_VALUE(rtc_tm->tm_sec) |
+ SUNXI_TIME_SET_MIN_VALUE(rtc_tm->tm_min) |
+ SUNXI_TIME_SET_HOUR_VALUE(rtc_tm->tm_hour);
+
+ writel(0, chip->base + SUNXI_RTC_HMS);
+ writel(0, chip->base + SUNXI_RTC_YMD);
+
+ writel(time, chip->base + SUNXI_RTC_HMS);
+
+ /*
+ * After writing the RTC HH-MM-SS register, the
+ * SUNXI_LOSC_CTRL_RTC_HMS_ACC bit is set and it will not
+ * be cleared until the real writing operation is finished
+ */
+
+ if (sunxi_rtc_wait(chip, SUNXI_LOSC_CTRL,
+ SUNXI_LOSC_CTRL_RTC_HMS_ACC, 50)) {
+ dev_err(dev, "Failed to set rtc time.\n");
+ return -1;
+ }
+
+ writel(date, chip->base + SUNXI_RTC_YMD);
+
+ /*
+ * After writing the RTC YY-MM-DD register, the
+ * SUNXI_LOSC_CTRL_RTC_YMD_ACC bit is set and it will not
+ * be cleared until the real writing operation is finished
+ */
+
+ if (sunxi_rtc_wait(chip, SUNXI_LOSC_CTRL,
+ SUNXI_LOSC_CTRL_RTC_YMD_ACC, 50)) {
+ dev_err(dev, "Failed to set rtc time.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int sunxi_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct sunxi_rtc_dev *chip = dev_get_drvdata(dev);
+
+ if (!enabled)
+ sunxi_rtc_setaie(enabled, chip);
+
+ return 0;
+}
+
+static const struct rtc_class_ops sunxi_rtc_ops = {
+ .read_time = sunxi_rtc_gettime,
+ .set_time = sunxi_rtc_settime,
+ .read_alarm = sunxi_rtc_getalarm,
+ .set_alarm = sunxi_rtc_setalarm,
+ .alarm_irq_enable = sunxi_rtc_alarm_irq_enable
+};
+
+static const struct of_device_id sunxi_rtc_dt_ids[] = {
+ { .compatible = "allwinner,sun4i-rtc", .data = &data_year_param[0] },
+ { .compatible = "allwinner,sun7i-a20-rtc", .data = &data_year_param[1] },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, sunxi_rtc_dt_ids);
+
+static int sunxi_rtc_probe(struct platform_device *pdev)
+{
+ struct sunxi_rtc_dev *chip;
+ struct resource *res;
+ const struct of_device_id *of_id;
+ int ret;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, chip);
+ chip->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ chip->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(chip->base))
+ return PTR_ERR(chip->base);
+
+ chip->irq = platform_get_irq(pdev, 0);
+ if (chip->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ return chip->irq;
+ }
+ ret = devm_request_irq(&pdev->dev, chip->irq, sunxi_rtc_alarmirq,
+ 0, dev_name(&pdev->dev), chip);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not request IRQ\n");
+ return ret;
+ }
+
+ of_id = of_match_device(sunxi_rtc_dt_ids, &pdev->dev);
+ if (!of_id) {
+ dev_err(&pdev->dev, "Unable to setup RTC data\n");
+ return -ENODEV;
+ }
+ chip->data_year = (struct sunxi_rtc_data_year *) of_id->data;
+
+ /* clear the alarm count value */
+ writel(0, chip->base + SUNXI_ALRM_DHMS);
+
+ /* disable alarm, not generate irq pending */
+ writel(0, chip->base + SUNXI_ALRM_EN);
+
+ /* disable alarm week/cnt irq, unset to cpu */
+ writel(0, chip->base + SUNXI_ALRM_IRQ_EN);
+
+ /* clear alarm week/cnt irq pending */
+ writel(SUNXI_ALRM_IRQ_STA_CNT_IRQ_PEND, chip->base +
+ SUNXI_ALRM_IRQ_STA);
+
+ chip->rtc = rtc_device_register("rtc-sunxi", &pdev->dev,
+ &sunxi_rtc_ops, THIS_MODULE);
+ if (IS_ERR(chip->rtc)) {
+ dev_err(&pdev->dev, "unable to register device\n");
+ return PTR_ERR(chip->rtc);
+ }
+
+ dev_info(&pdev->dev, "RTC enabled\n");
+
+ return 0;
+}
+
+static int sunxi_rtc_remove(struct platform_device *pdev)
+{
+ struct sunxi_rtc_dev *chip = platform_get_drvdata(pdev);
+
+ rtc_device_unregister(chip->rtc);
+
+ return 0;
+}
+
+static struct platform_driver sunxi_rtc_driver = {
+ .probe = sunxi_rtc_probe,
+ .remove = sunxi_rtc_remove,
+ .driver = {
+ .name = "sunxi-rtc",
+ .owner = THIS_MODULE,
+ .of_match_table = sunxi_rtc_dt_ids,
+ },
+};
+
+module_platform_driver(sunxi_rtc_driver);
+
+MODULE_DESCRIPTION("sunxi RTC driver");
+MODULE_AUTHOR("Carlo Caione <carlo.caione@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index c2e80d7ca5e2..1915464e4cd6 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -479,7 +479,7 @@ static int twl_rtc_probe(struct platform_device *pdev)
u8 rd_reg;
if (irq <= 0)
- goto out1;
+ return ret;
/* Initialize the register map */
if (twl_class_is_4030())
@@ -489,7 +489,7 @@ static int twl_rtc_probe(struct platform_device *pdev)
ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
if (ret < 0)
- goto out1;
+ return ret;
if (rd_reg & BIT_RTC_STATUS_REG_POWER_UP_M)
dev_warn(&pdev->dev, "Power up reset detected.\n");
@@ -500,7 +500,7 @@ static int twl_rtc_probe(struct platform_device *pdev)
/* Clear RTC Power up reset and pending alarm interrupts */
ret = twl_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG);
if (ret < 0)
- goto out1;
+ return ret;
if (twl_class_is_6030()) {
twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
@@ -512,7 +512,7 @@ static int twl_rtc_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Enabling TWL-RTC\n");
ret = twl_rtc_write_u8(BIT_RTC_CTRL_REG_STOP_RTC_M, REG_RTC_CTRL_REG);
if (ret < 0)
- goto out1;
+ return ret;
/* ensure interrupts are disabled, bootloaders can be strange */
ret = twl_rtc_write_u8(0, REG_RTC_INTERRUPTS_REG);
@@ -522,34 +522,29 @@ static int twl_rtc_probe(struct platform_device *pdev)
/* init cached IRQ enable bits */
ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
if (ret < 0)
- goto out1;
+ return ret;
device_init_wakeup(&pdev->dev, 1);
- rtc = rtc_device_register(pdev->name,
- &pdev->dev, &twl_rtc_ops, THIS_MODULE);
+ rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &twl_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
PTR_ERR(rtc));
- goto out1;
+ return PTR_ERR(rtc);
}
- ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- dev_name(&rtc->dev), rtc);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ twl_rtc_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ dev_name(&rtc->dev), rtc);
if (ret < 0) {
dev_err(&pdev->dev, "IRQ is not free.\n");
- goto out2;
+ return ret;
}
platform_set_drvdata(pdev, rtc);
return 0;
-
-out2:
- rtc_device_unregister(rtc);
-out1:
- return ret;
}
/*
@@ -559,9 +554,6 @@ out1:
static int twl_rtc_remove(struct platform_device *pdev)
{
/* leave rtc running, but disable irqs */
- struct rtc_device *rtc = platform_get_drvdata(pdev);
- int irq = platform_get_irq(pdev, 0);
-
mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
if (twl_class_is_6030()) {
@@ -571,10 +563,6 @@ static int twl_rtc_remove(struct platform_device *pdev)
REG_INT_MSK_STS_A);
}
-
- free_irq(irq, rtc);
-
- rtc_device_unregister(rtc);
return 0;
}
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index aabc22c587fb..88c9c92e89fd 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -293,7 +293,7 @@ static int rtc_probe(struct platform_device *pdev)
if (!res)
return -EBUSY;
- rtc1_base = ioremap(res->start, resource_size(res));
+ rtc1_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!rtc1_base)
return -EBUSY;
@@ -303,13 +303,14 @@ static int rtc_probe(struct platform_device *pdev)
goto err_rtc1_iounmap;
}
- rtc2_base = ioremap(res->start, resource_size(res));
+ rtc2_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!rtc2_base) {
retval = -EBUSY;
goto err_rtc1_iounmap;
}
- rtc = rtc_device_register(rtc_name, &pdev->dev, &vr41xx_rtc_ops, THIS_MODULE);
+ rtc = devm_rtc_device_register(&pdev->dev, rtc_name, &vr41xx_rtc_ops,
+ THIS_MODULE);
if (IS_ERR(rtc)) {
retval = PTR_ERR(rtc);
goto err_iounmap_all;
@@ -330,24 +331,24 @@ static int rtc_probe(struct platform_device *pdev)
aie_irq = platform_get_irq(pdev, 0);
if (aie_irq <= 0) {
retval = -EBUSY;
- goto err_device_unregister;
+ goto err_iounmap_all;
}
- retval = request_irq(aie_irq, elapsedtime_interrupt, 0,
- "elapsed_time", pdev);
+ retval = devm_request_irq(&pdev->dev, aie_irq, elapsedtime_interrupt, 0,
+ "elapsed_time", pdev);
if (retval < 0)
- goto err_device_unregister;
+ goto err_iounmap_all;
pie_irq = platform_get_irq(pdev, 1);
if (pie_irq <= 0) {
retval = -EBUSY;
- goto err_free_irq;
+ goto err_iounmap_all;
}
- retval = request_irq(pie_irq, rtclong1_interrupt, 0,
- "rtclong1", pdev);
+ retval = devm_request_irq(&pdev->dev, pie_irq, rtclong1_interrupt, 0,
+ "rtclong1", pdev);
if (retval < 0)
- goto err_free_irq;
+ goto err_iounmap_all;
platform_set_drvdata(pdev, rtc);
@@ -358,47 +359,20 @@ static int rtc_probe(struct platform_device *pdev)
return 0;
-err_free_irq:
- free_irq(aie_irq, pdev);
-
-err_device_unregister:
- rtc_device_unregister(rtc);
-
err_iounmap_all:
- iounmap(rtc2_base);
rtc2_base = NULL;
err_rtc1_iounmap:
- iounmap(rtc1_base);
rtc1_base = NULL;
return retval;
}
-static int rtc_remove(struct platform_device *pdev)
-{
- struct rtc_device *rtc;
-
- rtc = platform_get_drvdata(pdev);
- if (rtc)
- rtc_device_unregister(rtc);
-
- free_irq(aie_irq, pdev);
- free_irq(pie_irq, pdev);
- if (rtc1_base)
- iounmap(rtc1_base);
- if (rtc2_base)
- iounmap(rtc2_base);
-
- return 0;
-}
-
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:RTC");
static struct platform_driver rtc_platform_driver = {
.probe = rtc_probe,
- .remove = rtc_remove,
.driver = {
.name = rtc_name,
.owner = THIS_MODULE,
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index f302efa937ef..1eef0f586950 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3386,7 +3386,7 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
/*
- * safe offline allready running
+ * safe offline already running
* could only be called by normal offline so safe_offline flag
* needs to be removed to run normal offline and kill all I/O
*/
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 92bd22ce6760..9cbc567698ce 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -504,7 +504,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
struct dasd_diag_req *dreq;
struct dasd_diag_bio *dbio;
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
char *dst;
unsigned int count, datasize;
sector_t recid, first_rec, last_rec;
@@ -525,10 +525,10 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
/* Check struct bio and count the number of blocks for the request. */
count = 0;
rq_for_each_segment(bv, req, iter) {
- if (bv->bv_len & (blksize - 1))
+ if (bv.bv_len & (blksize - 1))
/* Fba can only do full blocks. */
return ERR_PTR(-EINVAL);
- count += bv->bv_len >> (block->s2b_shift + 9);
+ count += bv.bv_len >> (block->s2b_shift + 9);
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
@@ -545,8 +545,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
dbio = dreq->bio;
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
- for (off = 0; off < bv->bv_len; off += blksize) {
+ dst = page_address(bv.bv_page) + bv.bv_offset;
+ for (off = 0; off < bv.bv_len; off += blksize) {
memset(dbio, 0, sizeof (struct dasd_diag_bio));
dbio->type = rw_cmd;
dbio->block_number = recid + 1;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 95e45782692f..2e8e0755070b 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2551,7 +2551,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
char *dst;
unsigned int off;
int count, cidaw, cplength, datasize;
@@ -2573,13 +2573,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
count = 0;
cidaw = 0;
rq_for_each_segment(bv, req, iter) {
- if (bv->bv_len & (blksize - 1))
+ if (bv.bv_len & (blksize - 1))
/* Eckd can only do full blocks. */
return ERR_PTR(-EINVAL);
- count += bv->bv_len >> (block->s2b_shift + 9);
+ count += bv.bv_len >> (block->s2b_shift + 9);
#if defined(CONFIG_64BIT)
- if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
- cidaw += bv->bv_len >> (block->s2b_shift + 9);
+ if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
+ cidaw += bv.bv_len >> (block->s2b_shift + 9);
#endif
}
/* Paranoia. */
@@ -2650,16 +2650,16 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
last_rec - recid + 1, cmd, basedev, blksize);
}
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
+ dst = page_address(bv.bv_page) + bv.bv_offset;
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
GFP_DMA | __GFP_NOWARN);
if (copy && rq_data_dir(req) == WRITE)
- memcpy(copy + bv->bv_offset, dst, bv->bv_len);
+ memcpy(copy + bv.bv_offset, dst, bv.bv_len);
if (copy)
- dst = copy + bv->bv_offset;
+ dst = copy + bv.bv_offset;
}
- for (off = 0; off < bv->bv_len; off += blksize) {
+ for (off = 0; off < bv.bv_len; off += blksize) {
sector_t trkid = recid;
unsigned int recoffs = sector_div(trkid, blk_per_trk);
rcmd = cmd;
@@ -2735,7 +2735,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
char *dst, *idaw_dst;
unsigned int cidaw, cplength, datasize;
unsigned int tlf;
@@ -2813,8 +2813,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
idaw_dst = NULL;
idaw_len = 0;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
- seg_len = bv->bv_len;
+ dst = page_address(bv.bv_page) + bv.bv_offset;
+ seg_len = bv.bv_len;
while (seg_len) {
if (new_track) {
trkid = recid;
@@ -3039,7 +3039,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
{
struct dasd_ccw_req *cqr;
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
char *dst;
unsigned int trkcount, ctidaw;
unsigned char cmd;
@@ -3125,8 +3125,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
new_track = 1;
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
- seg_len = bv->bv_len;
+ dst = page_address(bv.bv_page) + bv.bv_offset;
+ seg_len = bv.bv_len;
while (seg_len) {
if (new_track) {
trkid = recid;
@@ -3158,9 +3158,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
}
} else {
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
+ dst = page_address(bv.bv_page) + bv.bv_offset;
last_tidaw = itcw_add_tidaw(itcw, 0x00,
- dst, bv->bv_len);
+ dst, bv.bv_len);
if (IS_ERR(last_tidaw)) {
ret = -EINVAL;
goto out_error;
@@ -3278,7 +3278,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
char *dst;
unsigned char cmd;
unsigned int trkcount;
@@ -3378,8 +3378,8 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
}
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
- seg_len = bv->bv_len;
+ dst = page_address(bv.bv_page) + bv.bv_offset;
+ seg_len = bv.bv_len;
if (cmd == DASD_ECKD_CCW_READ_TRACK)
memset(dst, 0, seg_len);
if (!len_to_track_end) {
@@ -3424,7 +3424,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
struct dasd_eckd_private *private;
struct ccw1 *ccw;
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
char *dst, *cda;
unsigned int blksize, blk_per_trk, off;
sector_t recid;
@@ -3442,8 +3442,8 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
ccw++;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
- for (off = 0; off < bv->bv_len; off += blksize) {
+ dst = page_address(bv.bv_page) + bv.bv_offset;
+ for (off = 0; off < bv.bv_len; off += blksize) {
/* Skip locate record. */
if (private->uses_cdl && recid <= 2*blk_per_trk)
ccw++;
@@ -3454,7 +3454,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
cda = (char *)((addr_t) ccw->cda);
if (dst != cda) {
if (rq_data_dir(req) == READ)
- memcpy(dst, cda, bv->bv_len);
+ memcpy(dst, cda, bv.bv_len);
kmem_cache_free(dasd_page_cache,
(void *)((addr_t)cda & PAGE_MASK));
}
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 9cbc8c32ba59..2c8e68bf9a1c 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -260,7 +260,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
char *dst;
int count, cidaw, cplength, datasize;
sector_t recid, first_rec, last_rec;
@@ -283,13 +283,13 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
count = 0;
cidaw = 0;
rq_for_each_segment(bv, req, iter) {
- if (bv->bv_len & (blksize - 1))
+ if (bv.bv_len & (blksize - 1))
/* Fba can only do full blocks. */
return ERR_PTR(-EINVAL);
- count += bv->bv_len >> (block->s2b_shift + 9);
+ count += bv.bv_len >> (block->s2b_shift + 9);
#if defined(CONFIG_64BIT)
- if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
- cidaw += bv->bv_len / blksize;
+ if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
+ cidaw += bv.bv_len / blksize;
#endif
}
/* Paranoia. */
@@ -326,16 +326,16 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
}
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
+ dst = page_address(bv.bv_page) + bv.bv_offset;
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
GFP_DMA | __GFP_NOWARN);
if (copy && rq_data_dir(req) == WRITE)
- memcpy(copy + bv->bv_offset, dst, bv->bv_len);
+ memcpy(copy + bv.bv_offset, dst, bv.bv_len);
if (copy)
- dst = copy + bv->bv_offset;
+ dst = copy + bv.bv_offset;
}
- for (off = 0; off < bv->bv_len; off += blksize) {
+ for (off = 0; off < bv.bv_len; off += blksize) {
/* Locate record for stupid devices. */
if (private->rdc_data.mode.bits.data_chain == 0) {
ccw[-1].flags |= CCW_FLAG_CC;
@@ -384,7 +384,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
struct dasd_fba_private *private;
struct ccw1 *ccw;
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
char *dst, *cda;
unsigned int blksize, off;
int status;
@@ -399,8 +399,8 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
if (private->rdc_data.mode.bits.data_chain != 0)
ccw++;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
- for (off = 0; off < bv->bv_len; off += blksize) {
+ dst = page_address(bv.bv_page) + bv.bv_offset;
+ for (off = 0; off < bv.bv_len; off += blksize) {
/* Skip locate record. */
if (private->rdc_data.mode.bits.data_chain == 0)
ccw++;
@@ -411,7 +411,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
cda = (char *)((addr_t) ccw->cda);
if (dst != cda) {
if (rq_data_dir(req) == READ)
- memcpy(dst, cda, bv->bv_len);
+ memcpy(dst, cda, bv.bv_len);
kmem_cache_free(dasd_page_cache,
(void *)((addr_t)cda & PAGE_MASK));
}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 6eca019bcf30..ebf41e228e55 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -808,18 +808,19 @@ static void
dcssblk_make_request(struct request_queue *q, struct bio *bio)
{
struct dcssblk_dev_info *dev_info;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
unsigned long index;
unsigned long page_addr;
unsigned long source_addr;
unsigned long bytes_done;
- int i;
bytes_done = 0;
dev_info = bio->bi_bdev->bd_disk->private_data;
if (dev_info == NULL)
goto fail;
- if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
+ if ((bio->bi_iter.bi_sector & 7) != 0 ||
+ (bio->bi_iter.bi_size & 4095) != 0)
/* Request is not page-aligned. */
goto fail;
if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) {
@@ -842,22 +843,22 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
}
}
- index = (bio->bi_sector >> 3);
- bio_for_each_segment(bvec, bio, i) {
+ index = (bio->bi_iter.bi_sector >> 3);
+ bio_for_each_segment(bvec, bio, iter) {
page_addr = (unsigned long)
- page_address(bvec->bv_page) + bvec->bv_offset;
+ page_address(bvec.bv_page) + bvec.bv_offset;
source_addr = dev_info->start + (index<<12) + bytes_done;
- if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0)
+ if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0)
// More paranoia.
goto fail;
if (bio_data_dir(bio) == READ) {
memcpy((void*)page_addr, (void*)source_addr,
- bvec->bv_len);
+ bvec.bv_len);
} else {
memcpy((void*)source_addr, (void*)page_addr,
- bvec->bv_len);
+ bvec.bv_len);
}
- bytes_done += bvec->bv_len;
+ bytes_done += bvec.bv_len;
}
bio_endio(bio, 0);
return;
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index d0ab5019d885..76bed1743db1 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -130,7 +130,7 @@ static void scm_request_prepare(struct scm_request *scmrq)
struct aidaw *aidaw = scmrq->aidaw;
struct msb *msb = &scmrq->aob->msb[0];
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
msb->bs = MSB_BS_4K;
scmrq->aob->request.msb_count = 1;
@@ -142,9 +142,9 @@ static void scm_request_prepare(struct scm_request *scmrq)
msb->data_addr = (u64) aidaw;
rq_for_each_segment(bv, scmrq->request, iter) {
- WARN_ON(bv->bv_offset);
- msb->blk_count += bv->bv_len >> 12;
- aidaw->data_addr = (u64) page_address(bv->bv_page);
+ WARN_ON(bv.bv_offset);
+ msb->blk_count += bv.bv_len >> 12;
+ aidaw->data_addr = (u64) page_address(bv.bv_page);
aidaw++;
}
}
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
index 27f930cd657f..9aae909d47a5 100644
--- a/drivers/s390/block/scm_blk_cluster.c
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -122,7 +122,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
struct aidaw *aidaw = scmrq->aidaw;
struct msb *msb = &scmrq->aob->msb[0];
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
int i = 0;
u64 addr;
@@ -163,7 +163,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
i++;
}
rq_for_each_segment(bv, req, iter) {
- aidaw->data_addr = (u64) page_address(bv->bv_page);
+ aidaw->data_addr = (u64) page_address(bv.bv_page);
aidaw++;
i++;
}
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 464dd29d06c0..6969d39f1e2e 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -184,25 +184,26 @@ static unsigned long xpram_highest_page_index(void)
static void xpram_make_request(struct request_queue *q, struct bio *bio)
{
xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
unsigned int index;
unsigned long page_addr;
unsigned long bytes;
- int i;
- if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
+ if ((bio->bi_iter.bi_sector & 7) != 0 ||
+ (bio->bi_iter.bi_size & 4095) != 0)
/* Request is not page-aligned. */
goto fail;
- if ((bio->bi_size >> 12) > xdev->size)
+ if ((bio->bi_iter.bi_size >> 12) > xdev->size)
/* Request size is no page-aligned. */
goto fail;
- if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset)
+ if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
goto fail;
- index = (bio->bi_sector >> 3) + xdev->offset;
- bio_for_each_segment(bvec, bio, i) {
+ index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
+ bio_for_each_segment(bvec, bio, iter) {
page_addr = (unsigned long)
- kmap(bvec->bv_page) + bvec->bv_offset;
- bytes = bvec->bv_len;
+ kmap(bvec.bv_page) + bvec.bv_offset;
+ bytes = bvec.bv_len;
if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
/* More paranoia. */
goto fail;
@@ -257,6 +258,7 @@ static int __init xpram_setup_sizes(unsigned long pages)
unsigned long mem_needed;
unsigned long mem_auto;
unsigned long long size;
+ char *sizes_end;
int mem_auto_no;
int i;
@@ -275,8 +277,8 @@ static int __init xpram_setup_sizes(unsigned long pages)
mem_auto_no = 0;
for (i = 0; i < xpram_devs; i++) {
if (sizes[i]) {
- size = simple_strtoull(sizes[i], &sizes[i], 0);
- switch (sizes[i][0]) {
+ size = simple_strtoull(sizes[i], &sizes_end, 0);
+ switch (*sizes_end) {
case 'g':
case 'G':
size <<= 20;
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 6fbe09686d18..fea76aed9eea 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -183,7 +183,6 @@ extern unsigned long sclp_console_full;
extern u8 sclp_fac84;
extern unsigned long long sclp_rzm;
extern unsigned long long sclp_rnmax;
-extern __initdata int sclp_early_read_info_sccb_valid;
/* useful inlines */
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index eaa21d542c5c..49af8eeb90ea 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -455,8 +455,6 @@ static int __init sclp_detect_standby_memory(void)
if (OLDMEM_BASE) /* No standby memory in kdump mode */
return 0;
- if (!sclp_early_read_info_sccb_valid)
- return 0;
if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
return 0;
rc = -ENOMEM;
@@ -702,3 +700,8 @@ out:
free_page((unsigned long) sccb);
return rc;
}
+
+bool sclp_has_sprp(void)
+{
+ return !!(sclp_fac84 & 0x2);
+}
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 1465e9563101..82f2c389b4d1 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -35,11 +35,12 @@ struct read_info_sccb {
u8 _reserved5[4096 - 112]; /* 112-4095 */
} __packed __aligned(PAGE_SIZE);
-static __initdata struct read_info_sccb early_read_info_sccb;
-static __initdata char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE);
+static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata;
+static unsigned int sclp_con_has_vt220 __initdata;
+static unsigned int sclp_con_has_linemode __initdata;
static unsigned long sclp_hsa_size;
+static struct sclp_ipl_info sclp_ipl_info;
-__initdata int sclp_early_read_info_sccb_valid;
u64 sclp_facilities;
u8 sclp_fac84;
unsigned long long sclp_rzm;
@@ -63,15 +64,12 @@ out:
return rc;
}
-static void __init sclp_read_info_early(void)
+static int __init sclp_read_info_early(struct read_info_sccb *sccb)
{
- int rc;
- int i;
- struct read_info_sccb *sccb;
+ int rc, i;
sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
SCLP_CMDW_READ_SCP_INFO};
- sccb = &early_read_info_sccb;
for (i = 0; i < ARRAY_SIZE(commands); i++) {
do {
memset(sccb, 0, sizeof(*sccb));
@@ -83,24 +81,19 @@ static void __init sclp_read_info_early(void)
if (rc)
break;
- if (sccb->header.response_code == 0x10) {
- sclp_early_read_info_sccb_valid = 1;
- break;
- }
+ if (sccb->header.response_code == 0x10)
+ return 0;
if (sccb->header.response_code != 0x1f0)
break;
}
+ return -EIO;
}
-static void __init sclp_facilities_detect(void)
+static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
{
- struct read_info_sccb *sccb;
-
- sclp_read_info_early();
- if (!sclp_early_read_info_sccb_valid)
+ if (sclp_read_info_early(sccb))
return;
- sccb = &early_read_info_sccb;
sclp_facilities = sccb->facilities;
sclp_fac84 = sccb->fac84;
if (sccb->fac85 & 0x02)
@@ -108,30 +101,22 @@ static void __init sclp_facilities_detect(void)
sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
sclp_rzm <<= 20;
+
+ /* Save IPL information */
+ sclp_ipl_info.is_valid = 1;
+ if (sccb->flags & 0x2)
+ sclp_ipl_info.has_dump = 1;
+ memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN);
}
bool __init sclp_has_linemode(void)
{
- struct init_sccb *sccb = (void *) &sccb_early;
-
- if (sccb->header.response_code != 0x20)
- return 0;
- if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
- return 0;
- if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
- return 0;
- return 1;
+ return !!sclp_con_has_linemode;
}
bool __init sclp_has_vt220(void)
{
- struct init_sccb *sccb = (void *) &sccb_early;
-
- if (sccb->header.response_code != 0x20)
- return 0;
- if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
- return 1;
- return 0;
+ return !!sclp_con_has_vt220;
}
unsigned long long sclp_get_rnmax(void)
@@ -146,19 +131,12 @@ unsigned long long sclp_get_rzm(void)
/*
* This function will be called after sclp_facilities_detect(), which gets
- * called from early.c code. Therefore the sccb should have valid contents.
+ * called from early.c code. The sclp_facilities_detect() function retrieves
+ * and saves the IPL information.
*/
void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
{
- struct read_info_sccb *sccb;
-
- if (!sclp_early_read_info_sccb_valid)
- return;
- sccb = &early_read_info_sccb;
- info->is_valid = 1;
- if (sccb->flags & 0x2)
- info->has_dump = 1;
- memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
+ *info = sclp_ipl_info;
}
static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb)
@@ -189,11 +167,10 @@ static void __init sccb_init_eq_size(struct sdias_sccb *sccb)
sccb->evbuf.dbs = 1;
}
-static int __init sclp_set_event_mask(unsigned long receive_mask,
+static int __init sclp_set_event_mask(struct init_sccb *sccb,
+ unsigned long receive_mask,
unsigned long send_mask)
{
- struct init_sccb *sccb = (void *) &sccb_early;
-
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
sccb->mask_length = sizeof(sccb_mask_t);
@@ -202,10 +179,8 @@ static int __init sclp_set_event_mask(unsigned long receive_mask,
return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
}
-static long __init sclp_hsa_size_init(void)
+static long __init sclp_hsa_size_init(struct sdias_sccb *sccb)
{
- struct sdias_sccb *sccb = (void *) &sccb_early;
-
sccb_init_eq_size(sccb);
if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
return -EIO;
@@ -214,10 +189,8 @@ static long __init sclp_hsa_size_init(void)
return 0;
}
-static long __init sclp_hsa_copy_wait(void)
+static long __init sclp_hsa_copy_wait(struct sccb_header *sccb)
{
- struct sccb_header *sccb = (void *) &sccb_early;
-
memset(sccb, 0, PAGE_SIZE);
sccb->length = PAGE_SIZE;
if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb))
@@ -230,34 +203,62 @@ unsigned long sclp_get_hsa_size(void)
return sclp_hsa_size;
}
-static void __init sclp_hsa_size_detect(void)
+static void __init sclp_hsa_size_detect(void *sccb)
{
long size;
/* First try synchronous interface (LPAR) */
- if (sclp_set_event_mask(0, 0x40000010))
+ if (sclp_set_event_mask(sccb, 0, 0x40000010))
return;
- size = sclp_hsa_size_init();
+ size = sclp_hsa_size_init(sccb);
if (size < 0)
return;
if (size != 0)
goto out;
/* Then try asynchronous interface (z/VM) */
- if (sclp_set_event_mask(0x00000010, 0x40000010))
+ if (sclp_set_event_mask(sccb, 0x00000010, 0x40000010))
return;
- size = sclp_hsa_size_init();
+ size = sclp_hsa_size_init(sccb);
if (size < 0)
return;
- size = sclp_hsa_copy_wait();
+ size = sclp_hsa_copy_wait(sccb);
if (size < 0)
return;
out:
sclp_hsa_size = size;
}
+static unsigned int __init sclp_con_check_linemode(struct init_sccb *sccb)
+{
+ if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
+ return 0;
+ if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
+ return 0;
+ return 1;
+}
+
+static void __init sclp_console_detect(struct init_sccb *sccb)
+{
+ if (sccb->header.response_code != 0x20)
+ return;
+
+ if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
+ sclp_con_has_vt220 = 1;
+
+ if (sclp_con_check_linemode(sccb))
+ sclp_con_has_linemode = 1;
+}
+
void __init sclp_early_detect(void)
{
- sclp_facilities_detect();
- sclp_hsa_size_detect();
- sclp_set_event_mask(0, 0);
+ void *sccb = &sccb_early;
+
+ sclp_facilities_detect(sccb);
+ sclp_hsa_size_detect(sccb);
+
+ /* Turn off SCLP event notifications. Also save remote masks in the
+ * sccb. These are sufficient to detect sclp console capabilities.
+ */
+ sclp_set_event_mask(sccb, 0, 0);
+ sclp_console_detect(sccb);
}
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 3f4ca4e09a4c..e91b89dc6d1f 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -125,10 +125,7 @@ static void tty3270_resize_work(struct work_struct *work);
*/
static void tty3270_set_timer(struct tty3270 *tp, int expires)
{
- if (expires == 0)
- del_timer(&tp->timer);
- else
- mod_timer(&tp->timer, jiffies + expires);
+ mod_timer(&tp->timer, jiffies + expires);
}
/*
@@ -744,7 +741,6 @@ tty3270_free_view(struct tty3270 *tp)
{
int pages;
- del_timer_sync(&tp->timer);
kbd_free(tp->kbd);
raw3270_request_free(tp->kreset);
raw3270_request_free(tp->read);
@@ -877,6 +873,7 @@ tty3270_free(struct raw3270_view *view)
{
struct tty3270 *tp = container_of(view, struct tty3270, view);
+ del_timer_sync(&tp->timer);
tty3270_free_screen(tp->screen, tp->view.rows);
tty3270_free_view(tp);
}
@@ -942,7 +939,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
return rc;
}
- tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows);
+ tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols);
if (IS_ERR(tp->screen)) {
rc = PTR_ERR(tp->screen);
raw3270_put_view(&tp->view);
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 64c467998a90..0efb27f6f199 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -922,8 +922,8 @@ static int ur_set_online(struct ccw_device *cdev)
goto fail_free_cdev;
}
- urd->device = device_create(vmur_class, NULL, urd->char_device->dev,
- NULL, "%s", node_id);
+ urd->device = device_create(vmur_class, &cdev->dev,
+ urd->char_device->dev, NULL, "%s", node_id);
if (IS_ERR(urd->device)) {
rc = PTR_ERR(urd->device);
TRACE("ur_set_online: device_create rc=%d\n", rc);
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index a9fe3de2dec1..b3f791b2c1f8 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -260,16 +260,16 @@ static int blacklist_parse_proc_parameters(char *buf)
parm = strsep(&buf, " ");
- if (strcmp("free", parm) == 0)
+ if (strcmp("free", parm) == 0) {
rc = blacklist_parse_parameters(buf, free, 0);
- else if (strcmp("add", parm) == 0)
+ css_schedule_eval_all_unreg(0);
+ } else if (strcmp("add", parm) == 0)
rc = blacklist_parse_parameters(buf, add, 0);
else if (strcmp("purge", parm) == 0)
return ccw_purge_blacklisted();
else
return -EINVAL;
- css_schedule_reprobe();
return rc;
}
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 959135a01847..fd3367a1dc7a 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -128,14 +128,14 @@ static ssize_t ccwgroup_online_store(struct device *dev,
const char *buf, size_t count)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
- struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
unsigned long value;
int ret;
- if (!dev->driver)
- return -EINVAL;
- if (!try_module_get(gdrv->driver.owner))
- return -EINVAL;
+ device_lock(dev);
+ if (!dev->driver) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = kstrtoul(buf, 0, &value);
if (ret)
@@ -148,7 +148,7 @@ static ssize_t ccwgroup_online_store(struct device *dev,
else
ret = -EINVAL;
out:
- module_put(gdrv->driver.owner);
+ device_unlock(dev);
return (ret == 0) ? count : ret;
}
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 13299f902676..f6b9188c5af5 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -55,6 +55,7 @@ int chsc_error_from_response(int response)
case 0x0004:
return -EOPNOTSUPP;
case 0x000b:
+ case 0x0107: /* "Channel busy" for the op 0x003d */
return -EBUSY;
case 0x0100:
case 0x0102:
@@ -237,26 +238,6 @@ void chsc_chp_offline(struct chp_id chpid)
for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
}
-static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
-{
- struct schib schib;
- /*
- * We don't know the device yet, but since a path
- * may be available now to the device we'll have
- * to do recognition again.
- * Since we don't have any idea about which chpid
- * that beast may be on we'll have to do a stsch
- * on all devices, grr...
- */
- if (stsch_err(schid, &schib))
- /* We're through */
- return -ENXIO;
-
- /* Put it on the slow path. */
- css_schedule_eval(schid);
- return 0;
-}
-
static int __s390_process_res_acc(struct subchannel *sch, void *data)
{
spin_lock_irq(sch->lock);
@@ -287,8 +268,8 @@ static void s390_process_res_acc(struct chp_link *link)
* The more information we have (info), the less scanning
* will we have to do.
*/
- for_each_subchannel_staged(__s390_process_res_acc,
- s390_process_res_acc_new_sch, link);
+ for_each_subchannel_staged(__s390_process_res_acc, NULL, link);
+ css_schedule_reprobe();
}
static int
@@ -663,19 +644,6 @@ static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
return 0;
}
-static int
-__s390_vary_chpid_on(struct subchannel_id schid, void *data)
-{
- struct schib schib;
-
- if (stsch_err(schid, &schib))
- /* We're through */
- return -ENXIO;
- /* Put it on the slow path. */
- css_schedule_eval(schid);
- return 0;
-}
-
/**
* chsc_chp_vary - propagate channel-path vary operation to subchannels
* @chpid: channl-path ID
@@ -694,7 +662,8 @@ int chsc_chp_vary(struct chp_id chpid, int on)
/* Try to update the channel path description. */
chp_update_desc(chp);
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
- __s390_vary_chpid_on, &chpid);
+ NULL, &chpid);
+ css_schedule_reprobe();
} else
for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
NULL, &chpid);
@@ -1234,3 +1203,35 @@ out:
return ret;
}
EXPORT_SYMBOL_GPL(chsc_scm_info);
+
+/**
+ * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info.
+ * @schid: id of the subchannel on which PNSO is performed
+ * @brinfo_area: request and response block for the operation
+ * @resume_token: resume token for multiblock response
+ * @cnc: Boolean change-notification control
+ *
+ * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
+ *
+ * Returns 0 on success.
+ */
+int chsc_pnso_brinfo(struct subchannel_id schid,
+ struct chsc_pnso_area *brinfo_area,
+ struct chsc_brinfo_resume_token resume_token,
+ int cnc)
+{
+ memset(brinfo_area, 0, sizeof(*brinfo_area));
+ brinfo_area->request.length = 0x0030;
+ brinfo_area->request.code = 0x003d; /* network-subchannel operation */
+ brinfo_area->m = schid.m;
+ brinfo_area->ssid = schid.ssid;
+ brinfo_area->sch = schid.sch_no;
+ brinfo_area->cssid = schid.cssid;
+ brinfo_area->oc = 0; /* Store-network-bridging-information list */
+ brinfo_area->resume_token = resume_token;
+ brinfo_area->n = (cnc != 0);
+ if (chsc(brinfo_area))
+ return -EIO;
+ return chsc_error_from_response(brinfo_area->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 23d072e70eb2..7e53a9c8b0b9 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -61,7 +61,9 @@ struct css_chsc_char {
u32 : 20;
u32 scssc : 1; /* bit 107 */
u32 scsscf : 1; /* bit 108 */
- u32 : 19;
+ u32:7;
+ u32 pnso:1; /* bit 116 */
+ u32:11;
}__attribute__((packed));
extern struct css_chsc_char css_chsc_characteristics;
@@ -188,6 +190,53 @@ struct chsc_scm_info {
int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
+struct chsc_brinfo_resume_token {
+ u64 t1;
+ u64 t2;
+} __packed;
+
+struct chsc_brinfo_naihdr {
+ struct chsc_brinfo_resume_token resume_token;
+ u32:32;
+ u32 instance;
+ u32:24;
+ u8 naids;
+ u32 reserved[3];
+} __packed;
+
+struct chsc_pnso_area {
+ struct chsc_header request;
+ u8:2;
+ u8 m:1;
+ u8:5;
+ u8:2;
+ u8 ssid:2;
+ u8 fmt:4;
+ u16 sch;
+ u8:8;
+ u8 cssid;
+ u16:16;
+ u8 oc;
+ u32:24;
+ struct chsc_brinfo_resume_token resume_token;
+ u32 n:1;
+ u32:31;
+ u32 reserved[3];
+ struct chsc_header response;
+ u32:32;
+ struct chsc_brinfo_naihdr naihdr;
+ union {
+ struct qdio_brinfo_entry_l3_ipv6 l3_ipv6[0];
+ struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0];
+ struct qdio_brinfo_entry_l2 l2[0];
+ } entries;
+} __packed;
+
+int chsc_pnso_brinfo(struct subchannel_id schid,
+ struct chsc_pnso_area *brinfo_area,
+ struct chsc_brinfo_resume_token resume_token,
+ int cnc);
+
#ifdef CONFIG_SCM_BUS
int scm_update_information(void);
int scm_process_availability_information(void);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 88e35d85d205..8ee88c4ebd83 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -342,8 +342,9 @@ static int cio_check_config(struct subchannel *sch, struct schib *schib)
*/
int cio_commit_config(struct subchannel *sch)
{
- struct schib schib;
int ccode, retry, ret = 0;
+ struct schib schib;
+ struct irb irb;
if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
return -ENODEV;
@@ -367,7 +368,10 @@ int cio_commit_config(struct subchannel *sch)
ret = -EAGAIN;
break;
case 1: /* status pending */
- return -EBUSY;
+ ret = -EBUSY;
+ if (tsch(sch->schid, &irb))
+ return ret;
+ break;
case 2: /* busy */
udelay(100); /* allow for recovery */
ret = -EBUSY;
@@ -403,7 +407,6 @@ EXPORT_SYMBOL_GPL(cio_update_schib);
*/
int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
{
- int retry;
int ret;
CIO_TRACE_EVENT(2, "ensch");
@@ -418,20 +421,14 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
sch->config.isc = sch->isc;
sch->config.intparm = intparm;
- for (retry = 0; retry < 3; retry++) {
+ ret = cio_commit_config(sch);
+ if (ret == -EIO) {
+ /*
+ * Got a program check in msch. Try without
+ * the concurrent sense bit the next time.
+ */
+ sch->config.csense = 0;
ret = cio_commit_config(sch);
- if (ret == -EIO) {
- /*
- * Got a program check in msch. Try without
- * the concurrent sense bit the next time.
- */
- sch->config.csense = 0;
- } else if (ret == -EBUSY) {
- struct irb irb;
- if (tsch(sch->schid, &irb) != 0)
- break;
- } else
- break;
}
CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
@@ -444,7 +441,6 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
*/
int cio_disable_subchannel(struct subchannel *sch)
{
- int retry;
int ret;
CIO_TRACE_EVENT(2, "dissch");
@@ -456,16 +452,8 @@ int cio_disable_subchannel(struct subchannel *sch)
return -ENODEV;
sch->config.ena = 0;
+ ret = cio_commit_config(sch);
- for (retry = 0; retry < 3; retry++) {
- ret = cio_commit_config(sch);
- if (ret == -EBUSY) {
- struct irb irb;
- if (tsch(sch->schid, &irb) != 0)
- break;
- } else
- break;
- }
CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 8c2cb87bccc5..0268e5fd59b5 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -69,7 +69,8 @@ static int call_fn_known_sch(struct device *dev, void *data)
struct cb_data *cb = data;
int rc = 0;
- idset_sch_del(cb->set, sch->schid);
+ if (cb->set)
+ idset_sch_del(cb->set, sch->schid);
if (cb->fn_known_sch)
rc = cb->fn_known_sch(sch, cb->data);
return rc;
@@ -115,6 +116,13 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
cb.fn_known_sch = fn_known;
cb.fn_unknown_sch = fn_unknown;
+ if (fn_known && !fn_unknown) {
+ /* Skip idset allocation in case of known-only loop. */
+ cb.set = NULL;
+ return bus_for_each_dev(&css_bus_type, NULL, &cb,
+ call_fn_known_sch);
+ }
+
cb.set = idset_sch_new();
if (!cb.set)
/* fall back to brute force scanning in case of oom */
@@ -553,6 +561,9 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
default:
rc = 0;
}
+ /* Allow scheduling here since the containing loop might
+ * take a while. */
+ cond_resched();
}
return rc;
}
@@ -572,7 +583,7 @@ static void css_slow_path_func(struct work_struct *unused)
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
-static DECLARE_WORK(slow_path_work, css_slow_path_func);
+static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
struct workqueue_struct *cio_work_q;
void css_schedule_eval(struct subchannel_id schid)
@@ -582,7 +593,7 @@ void css_schedule_eval(struct subchannel_id schid)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_sch_add(slow_subchannel_set, schid);
atomic_set(&css_eval_scheduled, 1);
- queue_work(cio_work_q, &slow_path_work);
+ queue_delayed_work(cio_work_q, &slow_path_work, 0);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
@@ -593,7 +604,7 @@ void css_schedule_eval_all(void)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_fill(slow_subchannel_set);
atomic_set(&css_eval_scheduled, 1);
- queue_work(cio_work_q, &slow_path_work);
+ queue_delayed_work(cio_work_q, &slow_path_work, 0);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
@@ -606,7 +617,7 @@ static int __unset_registered(struct device *dev, void *data)
return 0;
}
-static void css_schedule_eval_all_unreg(void)
+void css_schedule_eval_all_unreg(unsigned long delay)
{
unsigned long flags;
struct idset *unreg_set;
@@ -624,7 +635,7 @@ static void css_schedule_eval_all_unreg(void)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_add_set(slow_subchannel_set, unreg_set);
atomic_set(&css_eval_scheduled, 1);
- queue_work(cio_work_q, &slow_path_work);
+ queue_delayed_work(cio_work_q, &slow_path_work, delay);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
idset_free(unreg_set);
}
@@ -637,7 +648,8 @@ void css_wait_for_slow_path(void)
/* Schedule reprobing of all unregistered subchannels. */
void css_schedule_reprobe(void)
{
- css_schedule_eval_all_unreg();
+ /* Schedule with a delay to allow merging of subsequent calls. */
+ css_schedule_eval_all_unreg(1 * HZ);
}
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 29351321bad6..2c9107e20251 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -133,6 +133,7 @@ extern struct channel_subsystem *channel_subsystems[];
/* Helper functions to build lists for the slow path. */
void css_schedule_eval(struct subchannel_id schid);
void css_schedule_eval_all(void);
+void css_schedule_eval_all_unreg(unsigned long delay);
int css_complete_work(void);
int sch_is_pseudo_sch(struct subchannel *);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e4a7ab2bb629..e9d783563cbb 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -333,9 +333,9 @@ int ccw_device_set_offline(struct ccw_device *cdev)
if (ret != 0)
return ret;
}
- cdev->online = 0;
spin_lock_irq(cdev->ccwlock);
sch = to_subchannel(cdev->dev.parent);
+ cdev->online = 0;
/* Wait until a final state or DISCONNECTED is reached */
while (!dev_fsm_final_state(cdev) &&
cdev->private->state != DEV_STATE_DISCONNECTED) {
@@ -446,7 +446,10 @@ int ccw_device_set_online(struct ccw_device *cdev)
ret = cdev->drv->set_online(cdev);
if (ret)
goto rollback;
+
+ spin_lock_irq(cdev->ccwlock);
cdev->online = 1;
+ spin_unlock_irq(cdev->ccwlock);
return 0;
rollback:
@@ -546,17 +549,12 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
if (!dev_fsm_final_state(cdev) &&
cdev->private->state != DEV_STATE_DISCONNECTED) {
ret = -EAGAIN;
- goto out_onoff;
+ goto out;
}
/* Prevent conflict between pending work and on-/offline processing.*/
if (work_pending(&cdev->private->todo_work)) {
ret = -EAGAIN;
- goto out_onoff;
- }
-
- if (cdev->drv && !try_module_get(cdev->drv->driver.owner)) {
- ret = -EINVAL;
- goto out_onoff;
+ goto out;
}
if (!strncmp(buf, "force\n", count)) {
force = 1;
@@ -568,6 +566,8 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
}
if (ret)
goto out;
+
+ device_lock(dev);
switch (i) {
case 0:
ret = online_store_handle_offline(cdev);
@@ -578,10 +578,9 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
default:
ret = -EINVAL;
}
+ device_unlock(dev);
+
out:
- if (cdev->drv)
- module_put(cdev->drv->driver.owner);
-out_onoff:
atomic_set(&cdev->private->onoff, 0);
return (ret < 0) ? ret : count;
}
@@ -1745,8 +1744,7 @@ ccw_device_probe (struct device *dev)
return 0;
}
-static int
-ccw_device_remove (struct device *dev)
+static int ccw_device_remove(struct device *dev)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_driver *cdrv = cdev->drv;
@@ -1754,9 +1752,10 @@ ccw_device_remove (struct device *dev)
if (cdrv->remove)
cdrv->remove(cdev);
+
+ spin_lock_irq(cdev->ccwlock);
if (cdev->online) {
cdev->online = 0;
- spin_lock_irq(cdev->ccwlock);
ret = ccw_device_offline(cdev);
spin_unlock_irq(cdev->ccwlock);
if (ret == 0)
@@ -1769,10 +1768,12 @@ ccw_device_remove (struct device *dev)
cdev->private->dev_id.devno);
/* Give up reference obtained in ccw_device_set_online(). */
put_device(&cdev->dev);
+ spin_lock_irq(cdev->ccwlock);
}
ccw_device_set_timeout(cdev, 0);
cdev->drv = NULL;
cdev->private->int_class = IRQIO_CIO;
+ spin_unlock_irq(cdev->ccwlock);
return 0;
}
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 8acaae18bd11..a563e4c00590 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -359,14 +359,12 @@ static inline int multicast_outbound(struct qdio_q *q)
#define need_siga_sync_out_after_pci(q) \
(unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
-#define for_each_input_queue(irq_ptr, q, i) \
- for (i = 0, q = irq_ptr->input_qs[0]; \
- i < irq_ptr->nr_input_qs; \
- q = irq_ptr->input_qs[++i])
-#define for_each_output_queue(irq_ptr, q, i) \
- for (i = 0, q = irq_ptr->output_qs[0]; \
- i < irq_ptr->nr_output_qs; \
- q = irq_ptr->output_qs[++i])
+#define for_each_input_queue(irq_ptr, q, i) \
+ for (i = 0; i < irq_ptr->nr_input_qs && \
+ ({ q = irq_ptr->input_qs[i]; 1; }); i++)
+#define for_each_output_queue(irq_ptr, q, i) \
+ for (i = 0; i < irq_ptr->nr_output_qs && \
+ ({ q = irq_ptr->output_qs[i]; 1; }); i++)
#define prev_buf(bufnr) \
((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 3e602e8affa7..77466c4faabb 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -996,7 +996,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
}
}
- if (!pci_out_supported(q))
+ if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
return;
for_each_output_queue(irq_ptr, q, i) {
@@ -1752,6 +1752,97 @@ int qdio_stop_irq(struct ccw_device *cdev, int nr)
}
EXPORT_SYMBOL(qdio_stop_irq);
+/**
+ * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
+ * @schid: Subchannel ID.
+ * @cnc: Boolean Change-Notification Control
+ * @response: Response code will be stored at this address
+ * @cb: Callback function will be executed for each element
+ * of the address list
+ * @priv: Pointer passed from the caller to qdio_pnso_brinfo()
+ * @type: Type of the address entry passed to the callback
+ * @entry: Entry containg the address of the specified type
+ * @priv: Pointer to pass to the callback function.
+ *
+ * Performs "Store-network-bridging-information list" operation and calls
+ * the callback function for every entry in the list. If "change-
+ * notification-control" is set, further changes in the address list
+ * will be reported via the IPA command.
+ */
+int qdio_pnso_brinfo(struct subchannel_id schid,
+ int cnc, u16 *response,
+ void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
+ void *entry),
+ void *priv)
+{
+ struct chsc_pnso_area *rr;
+ int rc;
+ u32 prev_instance = 0;
+ int isfirstblock = 1;
+ int i, size, elems;
+
+ rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
+ if (rr == NULL)
+ return -ENOMEM;
+ do {
+ /* on the first iteration, naihdr.resume_token will be zero */
+ rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
+ if (rc != 0 && rc != -EBUSY)
+ goto out;
+ if (rr->response.code != 1) {
+ rc = -EIO;
+ continue;
+ } else
+ rc = 0;
+
+ if (cb == NULL)
+ continue;
+
+ size = rr->naihdr.naids;
+ elems = (rr->response.length -
+ sizeof(struct chsc_header) -
+ sizeof(struct chsc_brinfo_naihdr)) /
+ size;
+
+ if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
+ /* Inform the caller that they need to scrap */
+ /* the data that was already reported via cb */
+ rc = -EAGAIN;
+ break;
+ }
+ isfirstblock = 0;
+ prev_instance = rr->naihdr.instance;
+ for (i = 0; i < elems; i++)
+ switch (size) {
+ case sizeof(struct qdio_brinfo_entry_l3_ipv6):
+ (*cb)(priv, l3_ipv6_addr,
+ &rr->entries.l3_ipv6[i]);
+ break;
+ case sizeof(struct qdio_brinfo_entry_l3_ipv4):
+ (*cb)(priv, l3_ipv4_addr,
+ &rr->entries.l3_ipv4[i]);
+ break;
+ case sizeof(struct qdio_brinfo_entry_l2):
+ (*cb)(priv, l2_addr_lnid,
+ &rr->entries.l2[i]);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ rc = -EIO;
+ goto out;
+ }
+ } while (rr->response.code == 0x0107 || /* channel busy */
+ (rr->response.code == 1 && /* list stored */
+ /* resume token is non-zero => list incomplete */
+ (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
+ (*response) = rr->response.code;
+
+out:
+ free_page((unsigned long)rr);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
+
static int __init init_QDIO(void)
{
int rc;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 02300dcfac91..ab3baa7f9508 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -591,7 +591,13 @@ static int ap_init_queue(ap_qid_t qid)
if (rc != -ENODEV && rc != -EBUSY)
break;
if (i < AP_MAX_RESET - 1) {
- udelay(5);
+ /* Time we are waiting until we give up (0.7sec * 90).
+ * Since the actual request (in progress) will not
+ * interrupted immediately for the reset command,
+ * we have to be patient. In worst case we have to
+ * wait 60sec + reset time (some msec).
+ */
+ schedule_timeout(AP_RESET_TIMEOUT);
status = ap_test_queue(qid, &dummy, &dummy);
}
}
@@ -992,6 +998,28 @@ static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
+static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
+{
+ if (ap_configuration != NULL) { /* QCI not supported */
+ if (test_facility(76)) { /* format 1 - 256 bit domain field */
+ return snprintf(buf, PAGE_SIZE,
+ "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_configuration->adm[0], ap_configuration->adm[1],
+ ap_configuration->adm[2], ap_configuration->adm[3],
+ ap_configuration->adm[4], ap_configuration->adm[5],
+ ap_configuration->adm[6], ap_configuration->adm[7]);
+ } else { /* format 0 - 16 bit domain field */
+ return snprintf(buf, PAGE_SIZE, "%08x%08x\n",
+ ap_configuration->adm[0], ap_configuration->adm[1]);
+ }
+ } else {
+ return snprintf(buf, PAGE_SIZE, "not supported\n");
+ }
+}
+
+static BUS_ATTR(ap_control_domain_mask, 0444,
+ ap_control_domain_mask_show, NULL);
+
static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
@@ -1077,6 +1105,7 @@ static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
static struct bus_attribute *const ap_bus_attrs[] = {
&bus_attr_ap_domain,
+ &bus_attr_ap_control_domain_mask,
&bus_attr_config_time,
&bus_attr_poll_thread,
&bus_attr_ap_interrupts,
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 685f6cc022f9..6405ae24a7a6 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -33,7 +33,7 @@
#define AP_DEVICES 64 /* Number of AP devices. */
#define AP_DOMAINS 16 /* Number of AP domains. */
#define AP_MAX_RESET 90 /* Maximum number of resets. */
-#define AP_RESET_TIMEOUT (HZ/2) /* Time in ticks for reset timeouts. */
+#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */
#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
#define AP_POLL_TIME 1 /* Time in ticks between receive polls. */
@@ -125,6 +125,8 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
#define AP_FUNC_CRT4K 2
#define AP_FUNC_COPRO 3
#define AP_FUNC_ACCEL 4
+#define AP_FUNC_EP11 5
+#define AP_FUNC_APXA 6
/*
* AP reset flag states
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 31cfaa556072..4b824b15194f 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -44,6 +44,8 @@
#include "zcrypt_debug.h"
#include "zcrypt_api.h"
+#include "zcrypt_msgtype6.h"
+
/*
* Module description.
*/
@@ -554,9 +556,9 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
spin_lock_bh(&zcrypt_device_lock);
list_for_each_entry(zdev, &zcrypt_device_list, list) {
if (!zdev->online || !zdev->ops->send_cprb ||
- (xcRB->user_defined != AUTOSELECT &&
- AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)
- )
+ (zdev->ops->variant == MSGTYPE06_VARIANT_EP11) ||
+ (xcRB->user_defined != AUTOSELECT &&
+ AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined))
continue;
zcrypt_device_get(zdev);
get_device(&zdev->ap_dev->device);
@@ -581,6 +583,90 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
return -ENODEV;
}
+struct ep11_target_dev_list {
+ unsigned short targets_num;
+ struct ep11_target_dev *targets;
+};
+
+static bool is_desired_ep11dev(unsigned int dev_qid,
+ struct ep11_target_dev_list dev_list)
+{
+ int n;
+
+ for (n = 0; n < dev_list.targets_num; n++, dev_list.targets++) {
+ if ((AP_QID_DEVICE(dev_qid) == dev_list.targets->ap_id) &&
+ (AP_QID_QUEUE(dev_qid) == dev_list.targets->dom_id)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
+{
+ struct zcrypt_device *zdev;
+ bool autoselect = false;
+ int rc;
+ struct ep11_target_dev_list ep11_dev_list = {
+ .targets_num = 0x00,
+ .targets = NULL,
+ };
+
+ ep11_dev_list.targets_num = (unsigned short) xcrb->targets_num;
+
+ /* empty list indicates autoselect (all available targets) */
+ if (ep11_dev_list.targets_num == 0)
+ autoselect = true;
+ else {
+ ep11_dev_list.targets = kcalloc((unsigned short)
+ xcrb->targets_num,
+ sizeof(struct ep11_target_dev),
+ GFP_KERNEL);
+ if (!ep11_dev_list.targets)
+ return -ENOMEM;
+
+ if (copy_from_user(ep11_dev_list.targets,
+ (struct ep11_target_dev *)xcrb->targets,
+ xcrb->targets_num *
+ sizeof(struct ep11_target_dev)))
+ return -EFAULT;
+ }
+
+ spin_lock_bh(&zcrypt_device_lock);
+ list_for_each_entry(zdev, &zcrypt_device_list, list) {
+ /* check if device is eligible */
+ if (!zdev->online ||
+ zdev->ops->variant != MSGTYPE06_VARIANT_EP11)
+ continue;
+
+ /* check if device is selected as valid target */
+ if (!is_desired_ep11dev(zdev->ap_dev->qid, ep11_dev_list) &&
+ !autoselect)
+ continue;
+
+ zcrypt_device_get(zdev);
+ get_device(&zdev->ap_dev->device);
+ zdev->request_count++;
+ __zcrypt_decrease_preference(zdev);
+ if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
+ spin_unlock_bh(&zcrypt_device_lock);
+ rc = zdev->ops->send_ep11_cprb(zdev, xcrb);
+ spin_lock_bh(&zcrypt_device_lock);
+ module_put(zdev->ap_dev->drv->driver.owner);
+ } else {
+ rc = -EAGAIN;
+ }
+ zdev->request_count--;
+ __zcrypt_increase_preference(zdev);
+ put_device(&zdev->ap_dev->device);
+ zcrypt_device_put(zdev);
+ spin_unlock_bh(&zcrypt_device_lock);
+ return rc;
+ }
+ spin_unlock_bh(&zcrypt_device_lock);
+ return -ENODEV;
+}
+
static long zcrypt_rng(char *buffer)
{
struct zcrypt_device *zdev;
@@ -784,6 +870,23 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
return rc;
}
+ case ZSENDEP11CPRB: {
+ struct ep11_urb __user *uxcrb = (void __user *)arg;
+ struct ep11_urb xcrb;
+ if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
+ return -EFAULT;
+ do {
+ rc = zcrypt_send_ep11_cprb(&xcrb);
+ } while (rc == -EAGAIN);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ do {
+ rc = zcrypt_send_ep11_cprb(&xcrb);
+ } while (rc == -EAGAIN);
+ if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
+ return -EFAULT;
+ return rc;
+ }
case Z90STAT_STATUS_MASK: {
char status[AP_DEVICES];
zcrypt_status_mask(status);
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 89632919c993..b3d496bfaa7e 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -74,6 +74,7 @@ struct ica_z90_status {
#define ZCRYPT_CEX2A 6
#define ZCRYPT_CEX3C 7
#define ZCRYPT_CEX3A 8
+#define ZCRYPT_CEX4 10
/**
* Large random numbers are pulled in 4096 byte chunks from the crypto cards
@@ -89,6 +90,7 @@ struct zcrypt_ops {
long (*rsa_modexpo_crt)(struct zcrypt_device *,
struct ica_rsa_modexpo_crt *);
long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
+ long (*send_ep11_cprb)(struct zcrypt_device *, struct ep11_urb *);
long (*rng)(struct zcrypt_device *, char *);
struct list_head list; /* zcrypt ops list. */
struct module *owner;
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index ce1226398ac9..569f8b1d86c0 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -30,7 +30,12 @@
#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE
#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE
-#define CEX4_CLEANUP_TIME (15*HZ)
+/* Waiting time for requests to be processed.
+ * Currently there are some types of request which are not deterministic.
+ * But the maximum time limit managed by the stomper code is set to 60sec.
+ * Hence we have to wait at least that time period.
+ */
+#define CEX4_CLEANUP_TIME (61*HZ)
static struct ap_device_id zcrypt_cex4_ids[] = {
{ AP_DEVICE(AP_DEVICE_TYPE_CEX4) },
@@ -101,6 +106,19 @@ static int zcrypt_cex4_probe(struct ap_device *ap_dev)
zdev->speed_rating = CEX4C_SPEED_RATING;
zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_DEFAULT);
+ } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_EP11)) {
+ zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
+ if (!zdev)
+ return -ENOMEM;
+ zdev->type_string = "CEX4P";
+ zdev->user_space_type = ZCRYPT_CEX4;
+ zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
+ zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
+ zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
+ zdev->short_crt = 0;
+ zdev->speed_rating = CEX4C_SPEED_RATING;
+ zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
+ MSGTYPE06_VARIANT_EP11);
}
break;
}
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 0079b6617211..7b23f43c7b08 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -106,15 +106,15 @@ static inline int convert_error(struct zcrypt_device *zdev,
// REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
/*
* To sent a message of the wrong type is a bug in the
- * device driver. Warn about it, disable the device
+ * device driver. Send error msg, disable the device
* and then repeat the request.
*/
- WARN_ON(1);
atomic_set(&zcrypt_rescan_req, 1);
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- zdev->ap_dev->qid,
- zdev->online, ehdr->reply_code);
+ zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
return -EAGAIN;
case REP82_ERROR_TRANSPORT_FAIL:
case REP82_ERROR_MACHINE_FAILURE:
@@ -122,15 +122,17 @@ static inline int convert_error(struct zcrypt_device *zdev,
/* If a card fails disable it and repeat the request. */
atomic_set(&zcrypt_rescan_req, 1);
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- zdev->ap_dev->qid,
- zdev->online, ehdr->reply_code);
+ zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
return -EAGAIN;
default:
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- zdev->ap_dev->qid,
- zdev->online, ehdr->reply_code);
+ zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
return -EAGAIN; /* repeat the request on a different device. */
}
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 7c522f338bda..334e282f255b 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -25,6 +25,9 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -332,6 +335,11 @@ static int convert_type80(struct zcrypt_device *zdev,
if (t80h->len < sizeof(*t80h) + outputdatalength) {
/* The result is too short, the CEX2A card may not do that.. */
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
+ zdev->ap_dev->qid, zdev->online, t80h->code);
+
return -EAGAIN; /* repeat the request on a different device. */
}
if (zdev->user_space_type == ZCRYPT_CEX2A)
@@ -359,6 +367,10 @@ static int convert_response(struct zcrypt_device *zdev,
outputdata, outputdatalength);
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
+ zdev->ap_dev->qid, zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 7d97fa5a26d0..dc542e0a3055 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -25,6 +25,9 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
@@ -50,6 +53,7 @@ struct response_type {
};
#define PCIXCC_RESPONSE_TYPE_ICA 0
#define PCIXCC_RESPONSE_TYPE_XCRB 1
+#define PCIXCC_RESPONSE_TYPE_EP11 2
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
@@ -358,6 +362,91 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
return 0;
}
+static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
+ struct ap_message *ap_msg,
+ struct ep11_urb *xcRB)
+{
+ unsigned int lfmt;
+
+ static struct type6_hdr static_type6_ep11_hdr = {
+ .type = 0x06,
+ .rqid = {0x00, 0x01},
+ .function_code = {0x00, 0x00},
+ .agent_id[0] = 0x58, /* {'X'} */
+ .agent_id[1] = 0x43, /* {'C'} */
+ .offset1 = 0x00000058,
+ };
+
+ struct {
+ struct type6_hdr hdr;
+ struct ep11_cprb cprbx;
+ unsigned char pld_tag; /* fixed value 0x30 */
+ unsigned char pld_lenfmt; /* payload length format */
+ } __packed * msg = ap_msg->message;
+
+ struct pld_hdr {
+ unsigned char func_tag; /* fixed value 0x4 */
+ unsigned char func_len; /* fixed value 0x4 */
+ unsigned int func_val; /* function ID */
+ unsigned char dom_tag; /* fixed value 0x4 */
+ unsigned char dom_len; /* fixed value 0x4 */
+ unsigned int dom_val; /* domain id */
+ } __packed * payload_hdr;
+
+ /* length checks */
+ ap_msg->length = sizeof(struct type6_hdr) + xcRB->req_len;
+ if (CEIL4(xcRB->req_len) > MSGTYPE06_MAX_MSG_SIZE -
+ (sizeof(struct type6_hdr)))
+ return -EINVAL;
+
+ if (CEIL4(xcRB->resp_len) > MSGTYPE06_MAX_MSG_SIZE -
+ (sizeof(struct type86_fmt2_msg)))
+ return -EINVAL;
+
+ /* prepare type6 header */
+ msg->hdr = static_type6_ep11_hdr;
+ msg->hdr.ToCardLen1 = xcRB->req_len;
+ msg->hdr.FromCardLen1 = xcRB->resp_len;
+
+ /* Import CPRB data from the ioctl input parameter */
+ if (copy_from_user(&(msg->cprbx.cprb_len),
+ (char *)xcRB->req, xcRB->req_len)) {
+ return -EFAULT;
+ }
+
+ /*
+ The target domain field within the cprb body/payload block will be
+ replaced by the usage domain for non-management commands only.
+ Therefore we check the first bit of the 'flags' parameter for
+ management command indication.
+ 0 - non management command
+ 1 - management command
+ */
+ if (!((msg->cprbx.flags & 0x80) == 0x80)) {
+ msg->cprbx.target_id = (unsigned int)
+ AP_QID_QUEUE(zdev->ap_dev->qid);
+
+ if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
+ switch (msg->pld_lenfmt & 0x03) {
+ case 1:
+ lfmt = 2;
+ break;
+ case 2:
+ lfmt = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ lfmt = 1; /* length format #1 */
+ }
+ payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
+ payload_hdr->dom_val = (unsigned int)
+ AP_QID_QUEUE(zdev->ap_dev->qid);
+ }
+ return 0;
+}
+
/**
* Copy results from a type 86 ICA reply message back to user space.
*
@@ -377,6 +466,12 @@ struct type86x_reply {
char text[0];
} __packed;
+struct type86_ep11_reply {
+ struct type86_hdr hdr;
+ struct type86_fmt2_ext fmt2;
+ struct ep11_cprb cprbx;
+} __packed;
+
static int convert_type86_ica(struct zcrypt_device *zdev,
struct ap_message *reply,
char __user *outputdata,
@@ -440,6 +535,11 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
if (service_rc == 8 && service_rs == 72)
return -EINVAL;
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
+ zdev->ap_dev->qid, zdev->online,
+ msg->hdr.reply_code);
return -EAGAIN; /* repeat the request on a different device. */
}
data = msg->text;
@@ -503,6 +603,33 @@ static int convert_type86_xcrb(struct zcrypt_device *zdev,
return 0;
}
+/**
+ * Copy results from a type 86 EP11 XCRB reply message back to user space.
+ *
+ * @zdev: crypto device pointer
+ * @reply: reply AP message.
+ * @xcRB: pointer to EP11 user request block
+ *
+ * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
+ */
+static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ struct ep11_urb *xcRB)
+{
+ struct type86_fmt2_msg *msg = reply->message;
+ char *data = reply->message;
+
+ if (xcRB->resp_len < msg->fmt2.count1)
+ return -EINVAL;
+
+ /* Copy response CPRB to user */
+ if (copy_to_user((char *)xcRB->resp,
+ data + msg->fmt2.offset1, msg->fmt2.count1))
+ return -EFAULT;
+ xcRB->resp_len = msg->fmt2.count1;
+ return 0;
+}
+
static int convert_type86_rng(struct zcrypt_device *zdev,
struct ap_message *reply,
char *buffer)
@@ -551,6 +678,10 @@ static int convert_response_ica(struct zcrypt_device *zdev,
* response */
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
+ zdev->ap_dev->qid, zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
@@ -579,10 +710,40 @@ static int convert_response_xcrb(struct zcrypt_device *zdev,
default: /* Unknown response type, this should NEVER EVER happen */
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
+ zdev->ap_dev->qid, zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
+static int convert_response_ep11_xcrb(struct zcrypt_device *zdev,
+ struct ap_message *reply, struct ep11_urb *xcRB)
+{
+ struct type86_ep11_reply *msg = reply->message;
+
+ /* Response type byte is the second byte in the response. */
+ switch (((unsigned char *)reply->message)[1]) {
+ case TYPE82_RSP_CODE:
+ case TYPE87_RSP_CODE:
+ return convert_error(zdev, reply);
+ case TYPE86_RSP_CODE:
+ if (msg->hdr.reply_code)
+ return convert_error(zdev, reply);
+ if (msg->cprbx.cprb_ver_id == 0x04)
+ return convert_type86_ep11_xcrb(zdev, reply, xcRB);
+ /* Fall through, no break, incorrect cprb version is an unknown resp.*/
+ default: /* Unknown response type, this should NEVER EVER happen */
+ zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
+ zdev->ap_dev->qid, zdev->online);
+ return -EAGAIN; /* repeat the request on a different device. */
+ }
+}
+
static int convert_response_rng(struct zcrypt_device *zdev,
struct ap_message *reply,
char *data)
@@ -602,6 +763,10 @@ static int convert_response_rng(struct zcrypt_device *zdev,
* response */
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
+ zdev->ap_dev->qid, zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
@@ -657,6 +822,51 @@ out:
complete(&(resp_type->work));
}
+/**
+ * This function is called from the AP bus code after a crypto request
+ * "msg" has finished with the reply message "reply".
+ * It is called from tasklet context.
+ * @ap_dev: pointer to the AP device
+ * @msg: pointer to the AP message
+ * @reply: pointer to the AP reply message
+ */
+static void zcrypt_msgtype6_receive_ep11(struct ap_device *ap_dev,
+ struct ap_message *msg,
+ struct ap_message *reply)
+{
+ static struct error_hdr error_reply = {
+ .type = TYPE82_RSP_CODE,
+ .reply_code = REP82_ERROR_MACHINE_FAILURE,
+ };
+ struct response_type *resp_type =
+ (struct response_type *)msg->private;
+ struct type86_ep11_reply *t86r;
+ int length;
+
+ /* Copy the reply message to the request message buffer. */
+ if (IS_ERR(reply)) {
+ memcpy(msg->message, &error_reply, sizeof(error_reply));
+ goto out;
+ }
+ t86r = reply->message;
+ if (t86r->hdr.type == TYPE86_RSP_CODE &&
+ t86r->cprbx.cprb_ver_id == 0x04) {
+ switch (resp_type->type) {
+ case PCIXCC_RESPONSE_TYPE_EP11:
+ length = t86r->fmt2.offset1 + t86r->fmt2.count1;
+ length = min(MSGTYPE06_MAX_MSG_SIZE, length);
+ memcpy(msg->message, reply->message, length);
+ break;
+ default:
+ memcpy(msg->message, &error_reply, sizeof(error_reply));
+ }
+ } else {
+ memcpy(msg->message, reply->message, sizeof(error_reply));
+ }
+out:
+ complete(&(resp_type->work));
+}
+
static atomic_t zcrypt_step = ATOMIC_INIT(0);
/**
@@ -782,6 +992,46 @@ out_free:
}
/**
+ * The request distributor calls this function if it picked the CEX4P
+ * device to handle a send_ep11_cprb request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ * CEX4P device to the request distributor
+ * @xcRB: pointer to the ep11 user request block
+ */
+static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_device *zdev,
+ struct ep11_urb *xcrb)
+{
+ struct ap_message ap_msg;
+ struct response_type resp_type = {
+ .type = PCIXCC_RESPONSE_TYPE_EP11,
+ };
+ int rc;
+
+ ap_init_message(&ap_msg);
+ ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg.message)
+ return -ENOMEM;
+ ap_msg.receive = zcrypt_msgtype6_receive_ep11;
+ ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg.private = &resp_type;
+ rc = xcrb_msg_to_type6_ep11cprb_msgx(zdev, &ap_msg, xcrb);
+ if (rc)
+ goto out_free;
+ init_completion(&resp_type.work);
+ ap_queue_message(zdev->ap_dev, &ap_msg);
+ rc = wait_for_completion_interruptible(&resp_type.work);
+ if (rc == 0)
+ rc = convert_response_ep11_xcrb(zdev, &ap_msg, xcrb);
+ else /* Signal pending. */
+ ap_cancel_message(zdev->ap_dev, &ap_msg);
+
+out_free:
+ kzfree(ap_msg.message);
+ return rc;
+}
+
+/**
* The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to generate random data.
* @zdev: pointer to zcrypt_device structure that identifies the
@@ -839,10 +1089,19 @@ static struct zcrypt_ops zcrypt_msgtype6_ops = {
.rng = zcrypt_msgtype6_rng,
};
+static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
+ .owner = THIS_MODULE,
+ .variant = MSGTYPE06_VARIANT_EP11,
+ .rsa_modexpo = NULL,
+ .rsa_modexpo_crt = NULL,
+ .send_ep11_cprb = zcrypt_msgtype6_send_ep11_cprb,
+};
+
int __init zcrypt_msgtype6_init(void)
{
zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops);
zcrypt_msgtype_register(&zcrypt_msgtype6_ops);
+ zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops);
return 0;
}
@@ -850,6 +1109,7 @@ void __exit zcrypt_msgtype6_exit(void)
{
zcrypt_msgtype_unregister(&zcrypt_msgtype6_norng_ops);
zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops);
+ zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops);
}
module_init(zcrypt_msgtype6_init);
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
index 1e500d3c0735..207247570623 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.h
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -32,6 +32,7 @@
#define MSGTYPE06_NAME "zcrypt_msgtype6"
#define MSGTYPE06_VARIANT_DEFAULT 0
#define MSGTYPE06_VARIANT_NORNG 1
+#define MSGTYPE06_VARIANT_EP11 2
#define MSGTYPE06_MAX_MSG_SIZE (12*1024)
@@ -99,6 +100,7 @@ struct type86_hdr {
} __packed;
#define TYPE86_RSP_CODE 0x86
+#define TYPE87_RSP_CODE 0x87
#define TYPE86_FMT2 0x02
struct type86_fmt2_ext {
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index f2b71d8df01f..7a743f4c646c 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -24,6 +24,9 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -199,6 +202,10 @@ static int convert_type84(struct zcrypt_device *zdev,
if (t84h->len < sizeof(*t84h) + outputdatalength) {
/* The result is too short, the PCICA card may not do that.. */
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
+ zdev->ap_dev->qid, zdev->online, t84h->code);
return -EAGAIN; /* repeat the request on a different device. */
}
BUG_ON(t84h->len > PCICA_MAX_RESPONSE_SIZE);
@@ -223,6 +230,10 @@ static int convert_response(struct zcrypt_device *zdev,
outputdata, outputdatalength);
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
+ zdev->ap_dev->qid, zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index 0d90a4334055..4d14c04b746e 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -24,6 +24,9 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/gfp.h>
@@ -372,6 +375,11 @@ static int convert_type86(struct zcrypt_device *zdev,
if (service_rc == 8 && service_rs == 72)
return -EINVAL;
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
+ zdev->ap_dev->qid, zdev->online,
+ msg->hdr.reply_code);
return -EAGAIN; /* repeat the request on a different device. */
}
data = msg->text;
@@ -425,6 +433,10 @@ static int convert_response(struct zcrypt_device *zdev,
/* no break, incorrect cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
+ pr_err("Cryptographic device %x failed and was set offline\n",
+ zdev->ap_dev->qid);
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
+ zdev->ap_dev->qid, zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index d6297176ab85..0fc584832001 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -642,8 +642,15 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
(SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
/* OK */
}
- if (irb_is_error(irb))
- vcdev->err = -EIO; /* XXX - use real error */
+ if (irb_is_error(irb)) {
+ /* Command reject? */
+ if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
+ (irb->ecw[0] & SNS0_CMD_REJECT))
+ vcdev->err = -EOPNOTSUPP;
+ else
+ /* Map everything else to -EIO. */
+ vcdev->err = -EIO;
+ }
if (vcdev->curr_io & activity) {
switch (activity) {
case VIRTIO_CCW_DOING_READ_FEAT:
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 4dfe8c1092da..d28f05d0c75a 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_LCS) += lcs.o
obj-$(CONFIG_CLAW) += claw.o
qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
obj-$(CONFIG_QETH) += qeth.o
-qeth_l2-y += qeth_l2_main.o
+qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o
obj-$(CONFIG_QETH_L2) += qeth_l2.o
qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o
obj-$(CONFIG_QETH_L3) += qeth_l3.o
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 9b333fcf1a4c..ce16d1bdb20a 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -739,8 +739,12 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
IUCV_DBF_TEXT(trace, 4, __func__);
- if (conn && conn->netdev)
- privptr = netdev_priv(conn->netdev);
+ if (!conn || !conn->netdev) {
+ IUCV_DBF_TEXT(data, 2,
+ "Send confirmation for unlinked connection\n");
+ return;
+ }
+ privptr = netdev_priv(conn->netdev);
conn->prof.tx_pending--;
if (single_flag) {
if ((skb = skb_dequeue(&conn->commit_queue))) {
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 41ef94320ee8..a0de045eb227 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -156,6 +156,27 @@ struct qeth_ipa_info {
__u32 enabled_funcs;
};
+/* SETBRIDGEPORT stuff */
+enum qeth_sbp_roles {
+ QETH_SBP_ROLE_NONE = 0,
+ QETH_SBP_ROLE_PRIMARY = 1,
+ QETH_SBP_ROLE_SECONDARY = 2,
+};
+
+enum qeth_sbp_states {
+ QETH_SBP_STATE_INACTIVE = 0,
+ QETH_SBP_STATE_STANDBY = 1,
+ QETH_SBP_STATE_ACTIVE = 2,
+};
+
+#define QETH_SBP_HOST_NOTIFICATION 1
+
+struct qeth_sbp_info {
+ __u32 supported_funcs;
+ enum qeth_sbp_roles role;
+ __u32 hostnotification:1;
+};
+
static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
enum qeth_ipa_funcs func)
{
@@ -672,6 +693,7 @@ struct qeth_card_options {
struct qeth_ipa_info adp; /*Adapter parameters*/
struct qeth_routing_info route6;
struct qeth_ipa_info ipa6;
+ struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */
int fake_broadcast;
int add_hhlen;
int layer2;
@@ -716,6 +738,8 @@ struct qeth_discipline {
int (*freeze)(struct ccwgroup_device *);
int (*thaw) (struct ccwgroup_device *);
int (*restore)(struct ccwgroup_device *);
+ int (*control_event_handler)(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd);
};
struct qeth_vlan_vid {
@@ -738,6 +762,12 @@ struct qeth_rx {
int qdio_err;
};
+struct carrier_info {
+ __u8 card_type;
+ __u16 port_mode;
+ __u32 port_speed;
+};
+
#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
struct qeth_card {
@@ -851,6 +881,7 @@ extern struct qeth_discipline qeth_l2_discipline;
extern struct qeth_discipline qeth_l3_discipline;
extern const struct attribute_group *qeth_generic_attr_groups[];
extern const struct attribute_group *qeth_osn_attr_groups[];
+extern struct workqueue_struct *qeth_wq;
const char *qeth_get_cardname_short(struct qeth_card *);
int qeth_realloc_buffer_pool(struct qeth_card *, int);
@@ -914,9 +945,15 @@ struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
int qeth_mdio_read(struct net_device *, int, int);
int qeth_snmp_command(struct qeth_card *, char __user *);
int qeth_query_oat_command(struct qeth_card *, char __user *);
+int qeth_query_card_info(struct qeth_card *card,
+ struct carrier_info *carrier_info);
int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
void *reply_param);
+int qeth_bridgeport_query_ports(struct qeth_card *card,
+ enum qeth_sbp_roles *role, enum qeth_sbp_states *state);
+int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
+int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int);
int qeth_get_elements_for_frags(struct sk_buff *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index eb4e1f809feb..c3a83df07894 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -68,7 +68,8 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
enum qeth_qdio_buffer_states newbufstate);
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
-static struct workqueue_struct *qeth_wq;
+struct workqueue_struct *qeth_wq;
+EXPORT_SYMBOL_GPL(qeth_wq);
static void qeth_close_dev_handler(struct work_struct *work)
{
@@ -615,6 +616,13 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
card->info.hwtrap = 2;
qeth_schedule_recovery(card);
return NULL;
+ case IPA_CMD_SETBRIDGEPORT:
+ case IPA_CMD_ADDRESS_CHANGE_NOTIF:
+ if (card->discipline->control_event_handler
+ (card, cmd))
+ return cmd;
+ else
+ return NULL;
case IPA_CMD_MODCCID:
return cmd;
case IPA_CMD_REGISTER_LOCAL_ADDR:
@@ -4602,6 +4610,42 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_query_oat_command);
+int qeth_query_card_info_cb(struct qeth_card *card,
+ struct qeth_reply *reply, unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd;
+ struct qeth_query_card_info *card_info;
+ struct carrier_info *carrier_info;
+
+ QETH_CARD_TEXT(card, 2, "qcrdincb");
+ carrier_info = (struct carrier_info *)reply->param;
+ cmd = (struct qeth_ipa_cmd *)data;
+ card_info = &cmd->data.setadapterparms.data.card_info;
+ if (cmd->data.setadapterparms.hdr.return_code == 0) {
+ carrier_info->card_type = card_info->card_type;
+ carrier_info->port_mode = card_info->port_mode;
+ carrier_info->port_speed = card_info->port_speed;
+ }
+
+ qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+ return 0;
+}
+
+int qeth_query_card_info(struct qeth_card *card,
+ struct carrier_info *carrier_info)
+{
+ struct qeth_cmd_buffer *iob;
+
+ QETH_CARD_TEXT(card, 2, "qcrdinfo");
+ if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
+ return -EOPNOTSUPP;
+ iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
+ sizeof(struct qeth_ipacmd_setadpparms_hdr));
+ return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
+ (void *)carrier_info);
+}
+EXPORT_SYMBOL_GPL(qeth_query_card_info);
+
static inline int qeth_get_qdio_q_format(struct qeth_card *card)
{
switch (card->info.type) {
@@ -4920,6 +4964,7 @@ retriable:
card->options.ipa4.supported_funcs = 0;
card->options.adp.supported_funcs = 0;
+ card->options.sbp.supported_funcs = 0;
card->info.diagass_support = 0;
qeth_query_ipassists(card, QETH_PROT_IPV4);
if (qeth_is_supported(card, IPA_SETADAPTERPARMS))
@@ -5606,11 +5651,65 @@ void qeth_core_get_drvinfo(struct net_device *dev,
}
EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
+/* Helper function to fill 'advertizing' and 'supported' which are the same. */
+/* Autoneg and full-duplex are supported and advertized uncondionally. */
+/* Always advertize and support all speeds up to specified, and only one */
+/* specified port type. */
+static void qeth_set_ecmd_adv_sup(struct ethtool_cmd *ecmd,
+ int maxspeed, int porttype)
+{
+ int port_sup, port_adv, spd_sup, spd_adv;
+
+ switch (porttype) {
+ case PORT_TP:
+ port_sup = SUPPORTED_TP;
+ port_adv = ADVERTISED_TP;
+ break;
+ case PORT_FIBRE:
+ port_sup = SUPPORTED_FIBRE;
+ port_adv = ADVERTISED_FIBRE;
+ break;
+ default:
+ port_sup = SUPPORTED_TP;
+ port_adv = ADVERTISED_TP;
+ WARN_ON_ONCE(1);
+ }
+
+ /* "Fallthrough" case'es ordered from high to low result in setting */
+ /* flags cumulatively, starting from the specified speed and down to */
+ /* the lowest possible. */
+ spd_sup = 0;
+ spd_adv = 0;
+ switch (maxspeed) {
+ case SPEED_10000:
+ spd_sup |= SUPPORTED_10000baseT_Full;
+ spd_adv |= ADVERTISED_10000baseT_Full;
+ case SPEED_1000:
+ spd_sup |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full;
+ spd_adv |= ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full;
+ case SPEED_100:
+ spd_sup |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
+ spd_adv |= ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
+ case SPEED_10:
+ spd_sup |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
+ spd_adv |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
+ break;
+ default:
+ spd_sup = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
+ spd_adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
+ WARN_ON_ONCE(1);
+ }
+ ecmd->advertising = ADVERTISED_Autoneg | port_adv | spd_adv;
+ ecmd->supported = SUPPORTED_Autoneg | port_sup | spd_sup;
+}
+
int qeth_core_ethtool_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct qeth_card *card = netdev->ml_priv;
enum qeth_link_types link_type;
+ struct carrier_info carrier_info;
if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
link_type = QETH_LINK_TYPE_10GBIT_ETH;
@@ -5618,80 +5717,92 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
link_type = card->info.link_type;
ecmd->transceiver = XCVR_INTERNAL;
- ecmd->supported = SUPPORTED_Autoneg;
- ecmd->advertising = ADVERTISED_Autoneg;
ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = AUTONEG_ENABLE;
switch (link_type) {
case QETH_LINK_TYPE_FAST_ETH:
case QETH_LINK_TYPE_LANE_ETH100:
- ecmd->supported |= SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_TP;
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_100, PORT_TP);
ecmd->speed = SPEED_100;
ecmd->port = PORT_TP;
break;
case QETH_LINK_TYPE_GBIT_ETH:
case QETH_LINK_TYPE_LANE_ETH1000:
- ecmd->supported |= SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE;
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_FIBRE);
ecmd->speed = SPEED_1000;
ecmd->port = PORT_FIBRE;
break;
case QETH_LINK_TYPE_10GBIT_ETH:
- ecmd->supported |= SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_10000baseT_Full |
- SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_10000baseT_Full |
- ADVERTISED_FIBRE;
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_10000, PORT_FIBRE);
ecmd->speed = SPEED_10000;
ecmd->port = PORT_FIBRE;
break;
default:
- ecmd->supported |= SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_TP;
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_10, PORT_TP);
ecmd->speed = SPEED_10;
ecmd->port = PORT_TP;
}
+ /* Check if we can obtain more accurate information. */
+ /* If QUERY_CARD_INFO command is not supported or fails, */
+ /* just return the heuristics that was filled above. */
+ if (qeth_query_card_info(card, &carrier_info) != 0)
+ return 0;
+
+ netdev_dbg(netdev,
+ "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
+ carrier_info.card_type,
+ carrier_info.port_mode,
+ carrier_info.port_speed);
+
+ /* Update attributes for which we've obtained more authoritative */
+ /* information, leave the rest the way they where filled above. */
+ switch (carrier_info.card_type) {
+ case CARD_INFO_TYPE_1G_COPPER_A:
+ case CARD_INFO_TYPE_1G_COPPER_B:
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_TP);
+ ecmd->port = PORT_TP;
+ break;
+ case CARD_INFO_TYPE_1G_FIBRE_A:
+ case CARD_INFO_TYPE_1G_FIBRE_B:
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ break;
+ case CARD_INFO_TYPE_10G_FIBRE_A:
+ case CARD_INFO_TYPE_10G_FIBRE_B:
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_10000, PORT_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ break;
+ }
+
+ switch (carrier_info.port_mode) {
+ case CARD_INFO_PORTM_FULLDUPLEX:
+ ecmd->duplex = DUPLEX_FULL;
+ break;
+ case CARD_INFO_PORTM_HALFDUPLEX:
+ ecmd->duplex = DUPLEX_HALF;
+ break;
+ }
+
+ switch (carrier_info.port_speed) {
+ case CARD_INFO_PORTS_10M:
+ ecmd->speed = SPEED_10;
+ break;
+ case CARD_INFO_PORTS_100M:
+ ecmd->speed = SPEED_100;
+ break;
+ case CARD_INFO_PORTS_1G:
+ ecmd->speed = SPEED_1000;
+ break;
+ case CARD_INFO_PORTS_10G:
+ ecmd->speed = SPEED_10000;
+ break;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 06c55780005e..7b55768a9592 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -249,10 +249,12 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
{IPA_CMD_DELIP, "delip"},
{IPA_CMD_SETADAPTERPARMS, "setadapterparms"},
{IPA_CMD_SET_DIAG_ASS, "set_diag_ass"},
+ {IPA_CMD_SETBRIDGEPORT, "set_bridge_port"},
{IPA_CMD_CREATE_ADDR, "create_addr"},
{IPA_CMD_DESTROY_ADDR, "destroy_addr"},
{IPA_CMD_REGISTER_LOCAL_ADDR, "register_local_addr"},
{IPA_CMD_UNREGISTER_LOCAL_ADDR, "unregister_local_addr"},
+ {IPA_CMD_ADDRESS_CHANGE_NOTIF, "address_change_notification"},
{IPA_CMD_UNKNOWN, "unknown"},
};
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 07085d55f9a1..cf6a90ed42ae 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -104,10 +104,12 @@ enum qeth_ipa_cmds {
IPA_CMD_DELIP = 0xb7,
IPA_CMD_SETADAPTERPARMS = 0xb8,
IPA_CMD_SET_DIAG_ASS = 0xb9,
+ IPA_CMD_SETBRIDGEPORT = 0xbe,
IPA_CMD_CREATE_ADDR = 0xc3,
IPA_CMD_DESTROY_ADDR = 0xc4,
IPA_CMD_REGISTER_LOCAL_ADDR = 0xd1,
IPA_CMD_UNREGISTER_LOCAL_ADDR = 0xd2,
+ IPA_CMD_ADDRESS_CHANGE_NOTIF = 0xd3,
IPA_CMD_UNKNOWN = 0x00
};
@@ -274,7 +276,24 @@ enum qeth_ipa_set_access_mode_rc {
SET_ACCESS_CTRL_RC_REFLREL_FAILED = 0x0024,
SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED = 0x0028,
};
-
+enum qeth_card_info_card_type {
+ CARD_INFO_TYPE_1G_COPPER_A = 0x61,
+ CARD_INFO_TYPE_1G_FIBRE_A = 0x71,
+ CARD_INFO_TYPE_10G_FIBRE_A = 0x91,
+ CARD_INFO_TYPE_1G_COPPER_B = 0xb1,
+ CARD_INFO_TYPE_1G_FIBRE_B = 0xa1,
+ CARD_INFO_TYPE_10G_FIBRE_B = 0xc1,
+};
+enum qeth_card_info_port_mode {
+ CARD_INFO_PORTM_HALFDUPLEX = 0x0002,
+ CARD_INFO_PORTM_FULLDUPLEX = 0x0003,
+};
+enum qeth_card_info_port_speed {
+ CARD_INFO_PORTS_10M = 0x00000005,
+ CARD_INFO_PORTS_100M = 0x00000006,
+ CARD_INFO_PORTS_1G = 0x00000007,
+ CARD_INFO_PORTS_10G = 0x00000008,
+};
/* (SET)DELIP(M) IPA stuff ***************************************************/
struct qeth_ipacmd_setdelip4 {
@@ -404,6 +423,14 @@ struct qeth_qoat_priv {
char *buffer;
};
+struct qeth_query_card_info {
+ __u8 card_type;
+ __u8 reserved1;
+ __u16 port_mode;
+ __u32 port_speed;
+ __u32 reserved2;
+};
+
struct qeth_ipacmd_setadpparms_hdr {
__u32 supp_hw_cmds;
__u32 reserved1;
@@ -424,6 +451,7 @@ struct qeth_ipacmd_setadpparms {
struct qeth_snmp_cmd snmp;
struct qeth_set_access_ctrl set_access_ctrl;
struct qeth_query_oat query_oat;
+ struct qeth_query_card_info card_info;
__u32 mode;
} data;
} __attribute__ ((packed));
@@ -474,6 +502,124 @@ struct qeth_ipacmd_diagass {
__u8 cdata[64];
} __attribute__ ((packed));
+/* SETBRIDGEPORT IPA Command: *********************************************/
+enum qeth_ipa_sbp_cmd {
+ IPA_SBP_QUERY_COMMANDS_SUPPORTED = 0x00000000L,
+ IPA_SBP_RESET_BRIDGE_PORT_ROLE = 0x00000001L,
+ IPA_SBP_SET_PRIMARY_BRIDGE_PORT = 0x00000002L,
+ IPA_SBP_SET_SECONDARY_BRIDGE_PORT = 0x00000004L,
+ IPA_SBP_QUERY_BRIDGE_PORTS = 0x00000008L,
+ IPA_SBP_BRIDGE_PORT_STATE_CHANGE = 0x00000010L,
+};
+
+struct net_if_token {
+ __u16 devnum;
+ __u8 cssid;
+ __u8 iid;
+ __u8 ssid;
+ __u8 chpid;
+ __u16 chid;
+} __packed;
+
+struct mac_addr_lnid {
+ __u8 mac[6];
+ __u16 lnid;
+} __packed;
+
+struct qeth_ipacmd_sbp_hdr {
+ __u32 supported_sbp_cmds;
+ __u32 enabled_sbp_cmds;
+ __u16 cmdlength;
+ __u16 reserved1;
+ __u32 command_code;
+ __u16 return_code;
+ __u8 used_total;
+ __u8 seq_no;
+ __u32 reserved2;
+} __packed;
+
+struct qeth_sbp_query_cmds_supp {
+ __u32 supported_cmds;
+ __u32 reserved;
+} __packed;
+
+struct qeth_sbp_reset_role {
+} __packed;
+
+struct qeth_sbp_set_primary {
+ struct net_if_token token;
+} __packed;
+
+struct qeth_sbp_set_secondary {
+} __packed;
+
+struct qeth_sbp_port_entry {
+ __u8 role;
+ __u8 state;
+ __u8 reserved1;
+ __u8 reserved2;
+ struct net_if_token token;
+} __packed;
+
+struct qeth_sbp_query_ports {
+ __u8 primary_bp_supported;
+ __u8 secondary_bp_supported;
+ __u8 num_entries;
+ __u8 entry_length;
+ struct qeth_sbp_port_entry entry[];
+} __packed;
+
+struct qeth_sbp_state_change {
+ __u8 primary_bp_supported;
+ __u8 secondary_bp_supported;
+ __u8 num_entries;
+ __u8 entry_length;
+ struct qeth_sbp_port_entry entry[];
+} __packed;
+
+struct qeth_ipacmd_setbridgeport {
+ struct qeth_ipacmd_sbp_hdr hdr;
+ union {
+ struct qeth_sbp_query_cmds_supp query_cmds_supp;
+ struct qeth_sbp_reset_role reset_role;
+ struct qeth_sbp_set_primary set_primary;
+ struct qeth_sbp_set_secondary set_secondary;
+ struct qeth_sbp_query_ports query_ports;
+ struct qeth_sbp_state_change state_change;
+ } data;
+} __packed;
+
+/* ADDRESS_CHANGE_NOTIFICATION adapter-initiated "command" *******************/
+/* Bitmask for entry->change_code. Both bits may be raised. */
+enum qeth_ipa_addr_change_code {
+ IPA_ADDR_CHANGE_CODE_VLANID = 0x01,
+ IPA_ADDR_CHANGE_CODE_MACADDR = 0x02,
+ IPA_ADDR_CHANGE_CODE_REMOVAL = 0x80, /* else addition */
+};
+enum qeth_ipa_addr_change_retcode {
+ IPA_ADDR_CHANGE_RETCODE_OK = 0x0000,
+ IPA_ADDR_CHANGE_RETCODE_LOSTEVENTS = 0x0010,
+};
+enum qeth_ipa_addr_change_lostmask {
+ IPA_ADDR_CHANGE_MASK_OVERFLOW = 0x01,
+ IPA_ADDR_CHANGE_MASK_STATECHANGE = 0x02,
+};
+
+struct qeth_ipacmd_addr_change_entry {
+ struct net_if_token token;
+ struct mac_addr_lnid addr_lnid;
+ __u8 change_code;
+ __u8 reserved1;
+ __u16 reserved2;
+} __packed;
+
+struct qeth_ipacmd_addr_change {
+ __u8 lost_event_mask;
+ __u8 reserved;
+ __u16 num_entries;
+ struct qeth_ipacmd_addr_change_entry entry[];
+} __packed;
+
/* Header for each IPA command */
struct qeth_ipacmd_hdr {
__u8 command;
@@ -503,6 +649,8 @@ struct qeth_ipa_cmd {
struct qeth_ipacmd_setadpparms setadapterparms;
struct qeth_set_routing setrtg;
struct qeth_ipacmd_diagass diagass;
+ struct qeth_ipacmd_setbridgeport sbp;
+ struct qeth_ipacmd_addr_change addrchange;
} data;
} __attribute__ ((packed));
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
new file mode 100644
index 000000000000..0767556404bd
--- /dev/null
+++ b/drivers/s390/net/qeth_l2.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright IBM Corp. 2013
+ * Author(s): Eugene Crosser <eugene.crosser@ru.ibm.com>
+ */
+
+#ifndef __QETH_L2_H__
+#define __QETH_L2_H__
+
+#include "qeth_core.h"
+
+int qeth_l2_create_device_attributes(struct device *);
+void qeth_l2_remove_device_attributes(struct device *);
+void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
+
+#endif /* __QETH_L2_H__ */
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index ec8ccdae7aba..0710550093ce 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -21,6 +21,7 @@
#include <linux/list.h>
#include "qeth_core.h"
+#include "qeth_l2.h"
static int qeth_l2_set_offline(struct ccwgroup_device *);
static int qeth_l2_stop(struct net_device *);
@@ -32,6 +33,11 @@ static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
unsigned long));
static void qeth_l2_set_multicast_list(struct net_device *);
static int qeth_l2_recover(void *);
+static void qeth_bridgeport_query_support(struct qeth_card *card);
+static void qeth_bridge_state_change(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd);
+static void qeth_bridge_host_event(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd);
static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
@@ -880,6 +886,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ qeth_l2_create_device_attributes(&gdev->dev);
INIT_LIST_HEAD(&card->vid_list);
INIT_LIST_HEAD(&card->mc_list);
card->options.layer2 = 1;
@@ -891,6 +898,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
{
struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
+ qeth_l2_remove_device_attributes(&cgdev->dev);
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
@@ -986,6 +994,10 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
rc = -ENODEV;
goto out_remove;
}
+ qeth_bridgeport_query_support(card);
+ if (card->options.sbp.supported_funcs)
+ dev_info(&card->gdev->dev,
+ "The device represents a HiperSockets Bridge Capable Port\n");
qeth_trace_features(card);
if (!card->dev && qeth_l2_setup_netdev(card)) {
@@ -1003,6 +1015,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
} else
card->info.hwtrap = 0;
+ qeth_l2_setup_bridgeport_attrs(card);
+
card->state = CARD_STATE_HARDSETUP;
memset(&card->rx, 0, sizeof(struct qeth_rx));
qeth_print_status_message(card);
@@ -1228,6 +1242,26 @@ out:
return rc;
}
+/* Returns zero if the command is successfully "consumed" */
+static int qeth_l2_control_event(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd)
+{
+ switch (cmd->hdr.command) {
+ case IPA_CMD_SETBRIDGEPORT:
+ if (cmd->data.sbp.hdr.command_code ==
+ IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
+ qeth_bridge_state_change(card, cmd);
+ return 0;
+ } else
+ return 1;
+ case IPA_CMD_ADDRESS_CHANGE_NOTIF:
+ qeth_bridge_host_event(card, cmd);
+ return 0;
+ default:
+ return 1;
+ }
+}
+
struct qeth_discipline qeth_l2_discipline = {
.start_poll = qeth_qdio_start_poll,
.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
@@ -1241,6 +1275,7 @@ struct qeth_discipline qeth_l2_discipline = {
.freeze = qeth_l2_pm_suspend,
.thaw = qeth_l2_pm_resume,
.restore = qeth_l2_pm_resume,
+ .control_event_handler = qeth_l2_control_event,
};
EXPORT_SYMBOL_GPL(qeth_l2_discipline);
@@ -1347,6 +1382,594 @@ void qeth_osn_deregister(struct net_device *dev)
}
EXPORT_SYMBOL(qeth_osn_deregister);
+/* SETBRIDGEPORT support, async notifications */
+
+enum qeth_an_event_type {anev_reg_unreg, anev_abort, anev_reset};
+
+/**
+ * qeth_bridge_emit_host_event() - bridgeport address change notification
+ * @card: qeth_card structure pointer, for udev events.
+ * @evtype: "normal" register/unregister, or abort, or reset. For abort
+ * and reset token and addr_lnid are unused and may be NULL.
+ * @code: event bitmask: high order bit 0x80 value 1 means removal of an
+ * object, 0 - addition of an object.
+ * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC.
+ * @token: "network token" structure identifying physical address of the port.
+ * @addr_lnid: pointer to structure with MAC address and VLAN ID.
+ *
+ * This function is called when registrations and deregistrations are
+ * reported by the hardware, and also when notifications are enabled -
+ * for all currently registered addresses.
+ */
+static void qeth_bridge_emit_host_event(struct qeth_card *card,
+ enum qeth_an_event_type evtype,
+ u8 code, struct net_if_token *token, struct mac_addr_lnid *addr_lnid)
+{
+ char str[7][32];
+ char *env[8];
+ int i = 0;
+
+ switch (evtype) {
+ case anev_reg_unreg:
+ snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=%s",
+ (code & IPA_ADDR_CHANGE_CODE_REMOVAL)
+ ? "deregister" : "register");
+ env[i] = str[i]; i++;
+ if (code & IPA_ADDR_CHANGE_CODE_VLANID) {
+ snprintf(str[i], sizeof(str[i]), "VLAN=%d",
+ addr_lnid->lnid);
+ env[i] = str[i]; i++;
+ }
+ if (code & IPA_ADDR_CHANGE_CODE_MACADDR) {
+ snprintf(str[i], sizeof(str[i]), "MAC=%pM6",
+ &addr_lnid->mac);
+ env[i] = str[i]; i++;
+ }
+ snprintf(str[i], sizeof(str[i]), "NTOK_BUSID=%x.%x.%04x",
+ token->cssid, token->ssid, token->devnum);
+ env[i] = str[i]; i++;
+ snprintf(str[i], sizeof(str[i]), "NTOK_IID=%02x", token->iid);
+ env[i] = str[i]; i++;
+ snprintf(str[i], sizeof(str[i]), "NTOK_CHPID=%02x",
+ token->chpid);
+ env[i] = str[i]; i++;
+ snprintf(str[i], sizeof(str[i]), "NTOK_CHID=%04x", token->chid);
+ env[i] = str[i]; i++;
+ break;
+ case anev_abort:
+ snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=abort");
+ env[i] = str[i]; i++;
+ break;
+ case anev_reset:
+ snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=reset");
+ env[i] = str[i]; i++;
+ break;
+ }
+ env[i] = NULL;
+ kobject_uevent_env(&card->gdev->dev.kobj, KOBJ_CHANGE, env);
+}
+
+struct qeth_bridge_state_data {
+ struct work_struct worker;
+ struct qeth_card *card;
+ struct qeth_sbp_state_change qports;
+};
+
+static void qeth_bridge_state_change_worker(struct work_struct *work)
+{
+ struct qeth_bridge_state_data *data =
+ container_of(work, struct qeth_bridge_state_data, worker);
+ /* We are only interested in the first entry - local port */
+ struct qeth_sbp_port_entry *entry = &data->qports.entry[0];
+ char env_locrem[32];
+ char env_role[32];
+ char env_state[32];
+ char *env[] = {
+ env_locrem,
+ env_role,
+ env_state,
+ NULL
+ };
+
+ /* Role should not change by itself, but if it did, */
+ /* information from the hardware is authoritative. */
+ mutex_lock(&data->card->conf_mutex);
+ data->card->options.sbp.role = entry->role;
+ mutex_unlock(&data->card->conf_mutex);
+
+ snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
+ snprintf(env_role, sizeof(env_role), "ROLE=%s",
+ (entry->role == QETH_SBP_ROLE_NONE) ? "none" :
+ (entry->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
+ (entry->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
+ "<INVALID>");
+ snprintf(env_state, sizeof(env_state), "STATE=%s",
+ (entry->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
+ (entry->state == QETH_SBP_STATE_STANDBY) ? "standby" :
+ (entry->state == QETH_SBP_STATE_ACTIVE) ? "active" :
+ "<INVALID>");
+ kobject_uevent_env(&data->card->gdev->dev.kobj,
+ KOBJ_CHANGE, env);
+ kfree(data);
+}
+
+static void qeth_bridge_state_change(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd)
+{
+ struct qeth_sbp_state_change *qports =
+ &cmd->data.sbp.data.state_change;
+ struct qeth_bridge_state_data *data;
+ int extrasize;
+
+ QETH_CARD_TEXT(card, 2, "brstchng");
+ if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
+ QETH_CARD_TEXT_(card, 2, "BPsz%.8d", qports->entry_length);
+ return;
+ }
+ extrasize = sizeof(struct qeth_sbp_port_entry) * qports->num_entries;
+ data = kzalloc(sizeof(struct qeth_bridge_state_data) + extrasize,
+ GFP_ATOMIC);
+ if (!data) {
+ QETH_CARD_TEXT(card, 2, "BPSalloc");
+ return;
+ }
+ INIT_WORK(&data->worker, qeth_bridge_state_change_worker);
+ data->card = card;
+ memcpy(&data->qports, qports,
+ sizeof(struct qeth_sbp_state_change) + extrasize);
+ queue_work(qeth_wq, &data->worker);
+}
+
+struct qeth_bridge_host_data {
+ struct work_struct worker;
+ struct qeth_card *card;
+ struct qeth_ipacmd_addr_change hostevs;
+};
+
+static void qeth_bridge_host_event_worker(struct work_struct *work)
+{
+ struct qeth_bridge_host_data *data =
+ container_of(work, struct qeth_bridge_host_data, worker);
+ int i;
+
+ if (data->hostevs.lost_event_mask) {
+ dev_info(&data->card->gdev->dev,
+"Address notification from the HiperSockets Bridge Port stopped %s (%s)\n",
+ data->card->dev->name,
+ (data->hostevs.lost_event_mask == 0x01)
+ ? "Overflow"
+ : (data->hostevs.lost_event_mask == 0x02)
+ ? "Bridge port state change"
+ : "Unknown reason");
+ mutex_lock(&data->card->conf_mutex);
+ data->card->options.sbp.hostnotification = 0;
+ mutex_unlock(&data->card->conf_mutex);
+ qeth_bridge_emit_host_event(data->card, anev_abort,
+ 0, NULL, NULL);
+ } else
+ for (i = 0; i < data->hostevs.num_entries; i++) {
+ struct qeth_ipacmd_addr_change_entry *entry =
+ &data->hostevs.entry[i];
+ qeth_bridge_emit_host_event(data->card,
+ anev_reg_unreg,
+ entry->change_code,
+ &entry->token, &entry->addr_lnid);
+ }
+ kfree(data);
+}
+
+static void qeth_bridge_host_event(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd)
+{
+ struct qeth_ipacmd_addr_change *hostevs =
+ &cmd->data.addrchange;
+ struct qeth_bridge_host_data *data;
+ int extrasize;
+
+ QETH_CARD_TEXT(card, 2, "brhostev");
+ if (cmd->hdr.return_code != 0x0000) {
+ if (cmd->hdr.return_code == 0x0010) {
+ if (hostevs->lost_event_mask == 0x00)
+ hostevs->lost_event_mask = 0xff;
+ } else {
+ QETH_CARD_TEXT_(card, 2, "BPHe%04x",
+ cmd->hdr.return_code);
+ return;
+ }
+ }
+ extrasize = sizeof(struct qeth_ipacmd_addr_change_entry) *
+ hostevs->num_entries;
+ data = kzalloc(sizeof(struct qeth_bridge_host_data) + extrasize,
+ GFP_ATOMIC);
+ if (!data) {
+ QETH_CARD_TEXT(card, 2, "BPHalloc");
+ return;
+ }
+ INIT_WORK(&data->worker, qeth_bridge_host_event_worker);
+ data->card = card;
+ memcpy(&data->hostevs, hostevs,
+ sizeof(struct qeth_ipacmd_addr_change) + extrasize);
+ queue_work(qeth_wq, &data->worker);
+}
+
+/* SETBRIDGEPORT support; sending commands */
+
+struct _qeth_sbp_cbctl {
+ u16 ipa_rc;
+ u16 cmd_rc;
+ union {
+ u32 supported;
+ struct {
+ enum qeth_sbp_roles *role;
+ enum qeth_sbp_states *state;
+ } qports;
+ } data;
+};
+
+/**
+ * qeth_bridgeport_makerc() - derive "traditional" error from hardware codes.
+ * @card: qeth_card structure pointer, for debug messages.
+ * @cbctl: state structure with hardware return codes.
+ * @setcmd: IPA command code
+ *
+ * Returns negative errno-compatible error indication or 0 on success.
+ */
+static int qeth_bridgeport_makerc(struct qeth_card *card,
+ struct _qeth_sbp_cbctl *cbctl, enum qeth_ipa_sbp_cmd setcmd)
+{
+ int rc;
+
+ switch (cbctl->ipa_rc) {
+ case IPA_RC_SUCCESS:
+ switch (cbctl->cmd_rc) {
+ case 0x0000:
+ rc = 0;
+ break;
+ case 0x0004:
+ rc = -ENOSYS;
+ break;
+ case 0x000C: /* Not configured as bridge Port */
+ rc = -ENODEV; /* maybe not the best code here? */
+ dev_err(&card->gdev->dev,
+ "The HiperSockets device is not configured as a Bridge Port\n");
+ break;
+ case 0x0014: /* Another device is Primary */
+ switch (setcmd) {
+ case IPA_SBP_SET_PRIMARY_BRIDGE_PORT:
+ rc = -EEXIST;
+ dev_err(&card->gdev->dev,
+ "The HiperSockets LAN already has a primary Bridge Port\n");
+ break;
+ case IPA_SBP_SET_SECONDARY_BRIDGE_PORT:
+ rc = -EBUSY;
+ dev_err(&card->gdev->dev,
+ "The HiperSockets device is already a primary Bridge Port\n");
+ break;
+ default:
+ rc = -EIO;
+ }
+ break;
+ case 0x0018: /* This device is currently Secondary */
+ rc = -EBUSY;
+ dev_err(&card->gdev->dev,
+ "The HiperSockets device is already a secondary Bridge Port\n");
+ break;
+ case 0x001C: /* Limit for Secondary devices reached */
+ rc = -EEXIST;
+ dev_err(&card->gdev->dev,
+ "The HiperSockets LAN cannot have more secondary Bridge Ports\n");
+ break;
+ case 0x0024: /* This device is currently Primary */
+ rc = -EBUSY;
+ dev_err(&card->gdev->dev,
+ "The HiperSockets device is already a primary Bridge Port\n");
+ break;
+ case 0x0020: /* Not authorized by zManager */
+ rc = -EACCES;
+ dev_err(&card->gdev->dev,
+ "The HiperSockets device is not authorized to be a Bridge Port\n");
+ break;
+ default:
+ rc = -EIO;
+ }
+ break;
+ case IPA_RC_NOTSUPP:
+ rc = -ENOSYS;
+ break;
+ case IPA_RC_UNSUPPORTED_COMMAND:
+ rc = -ENOSYS;
+ break;
+ default:
+ rc = -EIO;
+ }
+ if (rc) {
+ QETH_CARD_TEXT_(card, 2, "SBPi%04x", cbctl->ipa_rc);
+ QETH_CARD_TEXT_(card, 2, "SBPc%04x", cbctl->cmd_rc);
+ }
+ return rc;
+}
+
+static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
+ struct qeth_reply *reply, unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
+ QETH_CARD_TEXT(card, 2, "brqsupcb");
+ cbctl->ipa_rc = cmd->hdr.return_code;
+ cbctl->cmd_rc = cmd->data.sbp.hdr.return_code;
+ if ((cbctl->ipa_rc == 0) && (cbctl->cmd_rc == 0)) {
+ cbctl->data.supported =
+ cmd->data.sbp.data.query_cmds_supp.supported_cmds;
+ } else {
+ cbctl->data.supported = 0;
+ }
+ return 0;
+}
+
+/**
+ * qeth_bridgeport_query_support() - store bitmask of supported subfunctions.
+ * @card: qeth_card structure pointer.
+ *
+ * Sets bitmask of supported setbridgeport subfunctions in the qeth_card
+ * strucutre: card->options.sbp.supported_funcs.
+ */
+static void qeth_bridgeport_query_support(struct qeth_card *card)
+{
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+ struct _qeth_sbp_cbctl cbctl;
+
+ QETH_CARD_TEXT(card, 2, "brqsuppo");
+ iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.sbp.hdr.cmdlength =
+ sizeof(struct qeth_ipacmd_sbp_hdr) +
+ sizeof(struct qeth_sbp_query_cmds_supp);
+ cmd->data.sbp.hdr.command_code =
+ IPA_SBP_QUERY_COMMANDS_SUPPORTED;
+ cmd->data.sbp.hdr.used_total = 1;
+ cmd->data.sbp.hdr.seq_no = 1;
+ if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb,
+ (void *)&cbctl) ||
+ qeth_bridgeport_makerc(card, &cbctl,
+ IPA_SBP_QUERY_COMMANDS_SUPPORTED)) {
+ /* non-zero makerc signifies failure, and produce messages */
+ card->options.sbp.role = QETH_SBP_ROLE_NONE;
+ return;
+ }
+ card->options.sbp.supported_funcs = cbctl.data.supported;
+}
+
+static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
+ struct qeth_reply *reply, unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ struct qeth_sbp_query_ports *qports = &cmd->data.sbp.data.query_ports;
+ struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
+
+ QETH_CARD_TEXT(card, 2, "brqprtcb");
+ cbctl->ipa_rc = cmd->hdr.return_code;
+ cbctl->cmd_rc = cmd->data.sbp.hdr.return_code;
+ if ((cbctl->ipa_rc != 0) || (cbctl->cmd_rc != 0))
+ return 0;
+ if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
+ cbctl->cmd_rc = 0xffff;
+ QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length);
+ return 0;
+ }
+ /* first entry contains the state of the local port */
+ if (qports->num_entries > 0) {
+ if (cbctl->data.qports.role)
+ *cbctl->data.qports.role = qports->entry[0].role;
+ if (cbctl->data.qports.state)
+ *cbctl->data.qports.state = qports->entry[0].state;
+ }
+ return 0;
+}
+
+/**
+ * qeth_bridgeport_query_ports() - query local bridgeport status.
+ * @card: qeth_card structure pointer.
+ * @role: Role of the port: 0-none, 1-primary, 2-secondary.
+ * @state: State of the port: 0-inactive, 1-standby, 2-active.
+ *
+ * Returns negative errno-compatible error indication or 0 on success.
+ *
+ * 'role' and 'state' are not updated in case of hardware operation failure.
+ */
+int qeth_bridgeport_query_ports(struct qeth_card *card,
+ enum qeth_sbp_roles *role, enum qeth_sbp_states *state)
+{
+ int rc = 0;
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+ struct _qeth_sbp_cbctl cbctl = {
+ .data = {
+ .qports = {
+ .role = role,
+ .state = state,
+ },
+ },
+ };
+
+ QETH_CARD_TEXT(card, 2, "brqports");
+ if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
+ return -EOPNOTSUPP;
+ iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.sbp.hdr.cmdlength =
+ sizeof(struct qeth_ipacmd_sbp_hdr);
+ cmd->data.sbp.hdr.command_code =
+ IPA_SBP_QUERY_BRIDGE_PORTS;
+ cmd->data.sbp.hdr.used_total = 1;
+ cmd->data.sbp.hdr.seq_no = 1;
+ rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb,
+ (void *)&cbctl);
+ if (rc)
+ return rc;
+ rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
+ if (rc)
+ return rc;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports);
+
+static int qeth_bridgeport_set_cb(struct qeth_card *card,
+ struct qeth_reply *reply, unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
+ struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
+ QETH_CARD_TEXT(card, 2, "brsetrcb");
+ cbctl->ipa_rc = cmd->hdr.return_code;
+ cbctl->cmd_rc = cmd->data.sbp.hdr.return_code;
+ return 0;
+}
+
+/**
+ * qeth_bridgeport_setrole() - Assign primary role to the port.
+ * @card: qeth_card structure pointer.
+ * @role: Role to assign.
+ *
+ * Returns negative errno-compatible error indication or 0 on success.
+ */
+int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
+{
+ int rc = 0;
+ int cmdlength;
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+ struct _qeth_sbp_cbctl cbctl;
+ enum qeth_ipa_sbp_cmd setcmd;
+
+ QETH_CARD_TEXT(card, 2, "brsetrol");
+ switch (role) {
+ case QETH_SBP_ROLE_NONE:
+ setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE;
+ cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) +
+ sizeof(struct qeth_sbp_reset_role);
+ break;
+ case QETH_SBP_ROLE_PRIMARY:
+ setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT;
+ cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) +
+ sizeof(struct qeth_sbp_set_primary);
+ break;
+ case QETH_SBP_ROLE_SECONDARY:
+ setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT;
+ cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) +
+ sizeof(struct qeth_sbp_set_secondary);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (!(card->options.sbp.supported_funcs & setcmd))
+ return -EOPNOTSUPP;
+ iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.sbp.hdr.cmdlength = cmdlength;
+ cmd->data.sbp.hdr.command_code = setcmd;
+ cmd->data.sbp.hdr.used_total = 1;
+ cmd->data.sbp.hdr.seq_no = 1;
+ rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb,
+ (void *)&cbctl);
+ if (rc)
+ return rc;
+ rc = qeth_bridgeport_makerc(card, &cbctl, setcmd);
+ return rc;
+}
+
+/**
+ * qeth_anset_makerc() - derive "traditional" error from hardware codes.
+ * @card: qeth_card structure pointer, for debug messages.
+ *
+ * Returns negative errno-compatible error indication or 0 on success.
+ */
+static int qeth_anset_makerc(struct qeth_card *card, int pnso_rc, u16 response)
+{
+ int rc;
+
+ if (pnso_rc == 0)
+ switch (response) {
+ case 0x0001:
+ rc = 0;
+ break;
+ case 0x0004:
+ case 0x0100:
+ case 0x0106:
+ rc = -ENOSYS;
+ dev_err(&card->gdev->dev,
+ "Setting address notification failed\n");
+ break;
+ case 0x0107:
+ rc = -EAGAIN;
+ break;
+ default:
+ rc = -EIO;
+ }
+ else
+ rc = -EIO;
+
+ if (rc) {
+ QETH_CARD_TEXT_(card, 2, "SBPp%04x", pnso_rc);
+ QETH_CARD_TEXT_(card, 2, "SBPr%04x", response);
+ }
+ return rc;
+}
+
+static void qeth_bridgeport_an_set_cb(void *priv,
+ enum qdio_brinfo_entry_type type, void *entry)
+{
+ struct qeth_card *card = (struct qeth_card *)priv;
+ struct qdio_brinfo_entry_l2 *l2entry;
+ u8 code;
+
+ if (type != l2_addr_lnid) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ l2entry = (struct qdio_brinfo_entry_l2 *)entry;
+ code = IPA_ADDR_CHANGE_CODE_MACADDR;
+ if (l2entry->addr_lnid.lnid)
+ code |= IPA_ADDR_CHANGE_CODE_VLANID;
+ qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
+ (struct net_if_token *)&l2entry->nit,
+ (struct mac_addr_lnid *)&l2entry->addr_lnid);
+}
+
+/**
+ * qeth_bridgeport_an_set() - Enable or disable bridgeport address notification
+ * @card: qeth_card structure pointer.
+ * @enable: 0 - disable, non-zero - enable notifications
+ *
+ * Returns negative errno-compatible error indication or 0 on success.
+ *
+ * On enable, emits a series of address notifications udev events for all
+ * currently registered hosts.
+ */
+int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
+{
+ int rc;
+ u16 response;
+ struct ccw_device *ddev;
+ struct subchannel_id schid;
+
+ if (!card)
+ return -EINVAL;
+ if (!card->options.sbp.supported_funcs)
+ return -EOPNOTSUPP;
+ ddev = CARD_DDEV(card);
+ ccw_device_get_schid(ddev, &schid);
+
+ if (enable) {
+ qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
+ rc = qdio_pnso_brinfo(schid, 1, &response,
+ qeth_bridgeport_an_set_cb, card);
+ } else
+ rc = qdio_pnso_brinfo(schid, 0, &response, NULL, NULL);
+ return qeth_anset_makerc(card, rc, response);
+}
+EXPORT_SYMBOL_GPL(qeth_bridgeport_an_set);
+
module_init(qeth_l2_init);
module_exit(qeth_l2_exit);
MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
new file mode 100644
index 000000000000..ae1bc04b8653
--- /dev/null
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright IBM Corp. 2013
+ * Author(s): Eugene Crosser <eugene.crosser@ru.ibm.com>
+ */
+
+#include <linux/slab.h>
+#include <asm/ebcdic.h>
+#include "qeth_l2.h"
+
+#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
+struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
+
+static int qeth_card_hw_is_reachable(struct qeth_card *card)
+{
+ return (card->state == CARD_STATE_SOFTSETUP) ||
+ (card->state == CARD_STATE_UP);
+}
+
+static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf,
+ int show_state)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ enum qeth_sbp_states state = QETH_SBP_STATE_INACTIVE;
+ int rc = 0;
+ char *word;
+
+ if (!card)
+ return -EINVAL;
+
+ mutex_lock(&card->conf_mutex);
+
+ if (qeth_card_hw_is_reachable(card) &&
+ card->options.sbp.supported_funcs)
+ rc = qeth_bridgeport_query_ports(card,
+ &card->options.sbp.role, &state);
+ if (!rc) {
+ if (show_state)
+ switch (state) {
+ case QETH_SBP_STATE_INACTIVE:
+ word = "inactive"; break;
+ case QETH_SBP_STATE_STANDBY:
+ word = "standby"; break;
+ case QETH_SBP_STATE_ACTIVE:
+ word = "active"; break;
+ default:
+ rc = -EIO;
+ }
+ else
+ switch (card->options.sbp.role) {
+ case QETH_SBP_ROLE_NONE:
+ word = "none"; break;
+ case QETH_SBP_ROLE_PRIMARY:
+ word = "primary"; break;
+ case QETH_SBP_ROLE_SECONDARY:
+ word = "secondary"; break;
+ default:
+ rc = -EIO;
+ }
+ if (rc)
+ QETH_CARD_TEXT_(card, 2, "SBP%02x:%02x",
+ card->options.sbp.role, state);
+ else
+ rc = sprintf(buf, "%s\n", word);
+ }
+
+ mutex_unlock(&card->conf_mutex);
+
+ return rc;
+}
+
+static ssize_t qeth_bridge_port_role_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return qeth_bridge_port_role_state_show(dev, attr, buf, 0);
+}
+
+static ssize_t qeth_bridge_port_role_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ int rc = 0;
+ enum qeth_sbp_roles role;
+
+ if (!card)
+ return -EINVAL;
+ if (sysfs_streq(buf, "primary"))
+ role = QETH_SBP_ROLE_PRIMARY;
+ else if (sysfs_streq(buf, "secondary"))
+ role = QETH_SBP_ROLE_SECONDARY;
+ else if (sysfs_streq(buf, "none"))
+ role = QETH_SBP_ROLE_NONE;
+ else
+ return -EINVAL;
+
+ mutex_lock(&card->conf_mutex);
+
+ if (qeth_card_hw_is_reachable(card)) {
+ rc = qeth_bridgeport_setrole(card, role);
+ if (!rc)
+ card->options.sbp.role = role;
+ } else
+ card->options.sbp.role = role;
+
+ mutex_unlock(&card->conf_mutex);
+
+ return rc ? rc : count;
+}
+
+static DEVICE_ATTR(bridge_role, 0644, qeth_bridge_port_role_show,
+ qeth_bridge_port_role_store);
+
+static ssize_t qeth_bridge_port_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return qeth_bridge_port_role_state_show(dev, attr, buf, 1);
+}
+
+static DEVICE_ATTR(bridge_state, 0644, qeth_bridge_port_state_show,
+ NULL);
+
+static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ int enabled;
+
+ if (!card)
+ return -EINVAL;
+
+ mutex_lock(&card->conf_mutex);
+
+ enabled = card->options.sbp.hostnotification;
+
+ mutex_unlock(&card->conf_mutex);
+
+ return sprintf(buf, "%d\n", enabled);
+}
+
+static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ int rc = 0;
+ int enable;
+
+ if (!card)
+ return -EINVAL;
+
+ if (sysfs_streq(buf, "0"))
+ enable = 0;
+ else if (sysfs_streq(buf, "1"))
+ enable = 1;
+ else
+ return -EINVAL;
+
+ mutex_lock(&card->conf_mutex);
+
+ if (qeth_card_hw_is_reachable(card)) {
+ rc = qeth_bridgeport_an_set(card, enable);
+ if (!rc)
+ card->options.sbp.hostnotification = enable;
+ } else
+ card->options.sbp.hostnotification = enable;
+
+ mutex_unlock(&card->conf_mutex);
+
+ return rc ? rc : count;
+}
+
+static DEVICE_ATTR(bridge_hostnotify, 0644,
+ qeth_bridgeport_hostnotification_show,
+ qeth_bridgeport_hostnotification_store);
+
+static struct attribute *qeth_l2_bridgeport_attrs[] = {
+ &dev_attr_bridge_role.attr,
+ &dev_attr_bridge_state.attr,
+ &dev_attr_bridge_hostnotify.attr,
+ NULL,
+};
+
+static struct attribute_group qeth_l2_bridgeport_attr_group = {
+ .attrs = qeth_l2_bridgeport_attrs,
+};
+
+int qeth_l2_create_device_attributes(struct device *dev)
+{
+ return sysfs_create_group(&dev->kobj, &qeth_l2_bridgeport_attr_group);
+}
+
+void qeth_l2_remove_device_attributes(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, &qeth_l2_bridgeport_attr_group);
+}
+
+/**
+ * qeth_l2_setup_bridgeport_attrs() - set/restore attrs when turning online.
+ * @card: qeth_card structure pointer
+ *
+ * Note: this function is called with conf_mutex held by the caller
+ */
+void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
+{
+ int rc;
+
+ if (!card)
+ return;
+ if (!card->options.sbp.supported_funcs)
+ return;
+ if (card->options.sbp.role != QETH_SBP_ROLE_NONE) {
+ /* Conditional to avoid spurious error messages */
+ qeth_bridgeport_setrole(card, card->options.sbp.role);
+ /* Let the callback function refresh the stored role value. */
+ qeth_bridgeport_query_ports(card,
+ &card->options.sbp.role, NULL);
+ }
+ if (card->options.sbp.hostnotification) {
+ rc = qeth_bridgeport_an_set(card, 1);
+ if (rc)
+ card->options.sbp.hostnotification = 0;
+ } else
+ qeth_bridgeport_an_set(card, 0);
+}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index c1b0b2761f8d..0f430424c3b8 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3593,6 +3593,13 @@ out:
return rc;
}
+/* Returns zero if the command is successfully "consumed" */
+static int qeth_l3_control_event(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd)
+{
+ return 1;
+}
+
struct qeth_discipline qeth_l3_discipline = {
.start_poll = qeth_qdio_start_poll,
.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
@@ -3606,6 +3613,7 @@ struct qeth_discipline qeth_l3_discipline = {
.freeze = qeth_l3_pm_suspend,
.thaw = qeth_l3_pm_resume,
.restore = qeth_l3_pm_resume,
+ .control_event_handler = qeth_l3_control_event,
};
EXPORT_SYMBOL_GPL(qeth_l3_discipline);
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index c1441ed282eb..c7763e482eb2 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -11,7 +11,6 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index fc1339cf91ac..7c71e7b4febf 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -9,7 +9,6 @@
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/major.h>
-#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/ioport.h> /* request_region */
#include <linux/slab.h>
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index ddbe5a9e713d..af15a2fdab5e 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -19,7 +19,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/ioport.h>
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index d9f268f23774..25c738e9ef19 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -9,7 +9,6 @@
#include <linux/miscdevice.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
-#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index b0aae0536d58..b7acafc85099 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -11,7 +11,6 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/ioport.h>
-#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/of.h>
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a2d61d061be7..c8bd092fc945 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -499,47 +499,6 @@ config SCSI_AACRAID
source "drivers/scsi/aic7xxx/Kconfig.aic7xxx"
-
-config SCSI_AIC7XXX_OLD
- tristate "Adaptec AIC7xxx support (old driver)"
- depends on (ISA || EISA || PCI ) && SCSI
- help
- WARNING This driver is an older aic7xxx driver and is no longer
- under active development. Adaptec, Inc. is writing a new driver to
- take the place of this one, and it is recommended that whenever
- possible, people should use the new Adaptec written driver instead
- of this one. This driver will eventually be phased out entirely.
-
- This is support for the various aic7xxx based Adaptec SCSI
- controllers. These include the 274x EISA cards; 284x VLB cards;
- 2902, 2910, 293x, 294x, 394x, 3985 and several other PCI and
- motherboard based SCSI controllers from Adaptec. It does not support
- the AAA-13x RAID controllers from Adaptec, nor will it likely ever
- support them. It does not support the 2920 cards from Adaptec that
- use the Future Domain SCSI controller chip. For those cards, you
- need the "Future Domain 16xx SCSI support" driver.
-
- In general, if the controller is based on an Adaptec SCSI controller
- chip from the aic777x series or the aic78xx series, this driver
- should work. The only exception is the 7810 which is specifically
- not supported (that's the RAID controller chip on the AAA-13x
- cards).
-
- Note that the AHA2920 SCSI host adapter is *not* supported by this
- driver; choose "Future Domain 16xx SCSI support" instead if you have
- one of those.
-
- Information on the configuration options for this controller can be
- found by checking the help file for each of the available
- configuration options. You should read
- <file:Documentation/scsi/aic7xxx_old.txt> at a minimum before
- contacting the maintainer with any questions. The SCSI-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>, can also
- be of great help.
-
- To compile this driver as a module, choose M here: the
- module will be called aic7xxx_old.
-
source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
source "drivers/scsi/aic94xx/Kconfig"
source "drivers/scsi/mvsas/Kconfig"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 149bb6bf1849..e172d4f8e02f 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -70,7 +70,6 @@ obj-$(CONFIG_SCSI_AHA1740) += aha1740.o
obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx/
obj-$(CONFIG_SCSI_AIC79XX) += aic7xxx/
obj-$(CONFIG_SCSI_AACRAID) += aacraid/
-obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o
obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/
obj-$(CONFIG_SCSI_PM8001) += pm8001/
obj-$(CONFIG_SCSI_ISCI) += isci/
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 30fa38a0ad39..9176bfbd5745 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -201,7 +201,7 @@ static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
instance->irq = IRQ_AMIGA_PORTS;
instance->unique_id = z->slotaddr;
- regs = (struct a2091_scsiregs *)ZTWO_VADDR(z->resource.start);
+ regs = ZTWO_VADDR(z->resource.start);
regs->DAWR = DAWR_A2091;
wdregs.SASR = &regs->SASR;
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index c0f4f4290dd6..dd5b64726ddc 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -220,7 +220,7 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
instance->irq = IRQ_AMIGA_PORTS;
- regs = (struct a3000_scsiregs *)ZTWO_VADDR(res->start);
+ regs = ZTWO_VADDR(res->start);
regs->DAWR = DAWR_A3000;
wdregs.SASR = &regs->SASR;
diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c
index 70c521f79f7c..f5a2ab41543b 100644
--- a/drivers/scsi/a4000t.c
+++ b/drivers/scsi/a4000t.c
@@ -56,7 +56,7 @@ static int __init amiga_a4000t_scsi_probe(struct platform_device *pdev)
scsi_addr = res->start + A4000T_SCSI_OFFSET;
/* Fill in the required pieces of hostdata */
- hostdata->base = (void __iomem *)ZTWO_VADDR(scsi_addr);
+ hostdata->base = ZTWO_VADDR(scsi_addr);
hostdata->clock = 50;
hostdata->chip710 = 1;
hostdata->dmode_extra = DMODE_FC2;
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
deleted file mode 100644
index 33ec9c643400..000000000000
--- a/drivers/scsi/aic7xxx_old.c
+++ /dev/null
@@ -1,11149 +0,0 @@
-/*+M*************************************************************************
- * Adaptec AIC7xxx device driver for Linux.
- *
- * Copyright (c) 1994 John Aycock
- * The University of Calgary Department of Computer Science.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Sources include the Adaptec 1740 driver (aha1740.c), the Ultrastor 24F
- * driver (ultrastor.c), various Linux kernel source, the Adaptec EISA
- * config file (!adp7771.cfg), the Adaptec AHA-2740A Series User's Guide,
- * the Linux Kernel Hacker's Guide, Writing a SCSI Device Driver for Linux,
- * the Adaptec 1542 driver (aha1542.c), the Adaptec EISA overlay file
- * (adp7770.ovl), the Adaptec AHA-2740 Series Technical Reference Manual,
- * the Adaptec AIC-7770 Data Book, the ANSI SCSI specification, the
- * ANSI SCSI-2 specification (draft 10c), ...
- *
- * --------------------------------------------------------------------------
- *
- * Modifications by Daniel M. Eischen (deischen@iworks.InterWorks.org):
- *
- * Substantially modified to include support for wide and twin bus
- * adapters, DMAing of SCBs, tagged queueing, IRQ sharing, bug fixes,
- * SCB paging, and other rework of the code.
- *
- * Parts of this driver were also based on the FreeBSD driver by
- * Justin T. Gibbs. His copyright follows:
- *
- * --------------------------------------------------------------------------
- * Copyright (c) 1994-1997 Justin Gibbs.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Where this Software is combined with software released under the terms of
- * the GNU General Public License ("GPL") and the terms of the GPL would require the
- * combined work to also be released under the terms of the GPL, the terms
- * and conditions of this License will apply in addition to those of the
- * GPL with the exception of any terms or conditions of this License that
- * conflict with, or are expressly prohibited by, the GPL.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $Id: aic7xxx.c,v 1.119 1997/06/27 19:39:18 gibbs Exp $
- *---------------------------------------------------------------------------
- *
- * Thanks also go to (in alphabetical order) the following:
- *
- * Rory Bolt - Sequencer bug fixes
- * Jay Estabrook - Initial DEC Alpha support
- * Doug Ledford - Much needed abort/reset bug fixes
- * Kai Makisara - DMAing of SCBs
- *
- * A Boot time option was also added for not resetting the scsi bus.
- *
- * Form: aic7xxx=extended
- * aic7xxx=no_reset
- * aic7xxx=ultra
- * aic7xxx=irq_trigger:[0,1] # 0 edge, 1 level
- * aic7xxx=verbose
- *
- * Daniel M. Eischen, deischen@iworks.InterWorks.org, 1/23/97
- *
- * $Id: aic7xxx.c,v 4.1 1997/06/12 08:23:42 deang Exp $
- *-M*************************************************************************/
-
-/*+M**************************************************************************
- *
- * Further driver modifications made by Doug Ledford <dledford@redhat.com>
- *
- * Copyright (c) 1997-1999 Doug Ledford
- *
- * These changes are released under the same licensing terms as the FreeBSD
- * driver written by Justin Gibbs. Please see his Copyright notice above
- * for the exact terms and conditions covering my changes as well as the
- * warranty statement.
- *
- * Modifications made to the aic7xxx.c,v 4.1 driver from Dan Eischen include
- * but are not limited to:
- *
- * 1: Import of the latest FreeBSD sequencer code for this driver
- * 2: Modification of kernel code to accommodate different sequencer semantics
- * 3: Extensive changes throughout kernel portion of driver to improve
- * abort/reset processing and error hanndling
- * 4: Other work contributed by various people on the Internet
- * 5: Changes to printk information and verbosity selection code
- * 6: General reliability related changes, especially in IRQ management
- * 7: Modifications to the default probe/attach order for supported cards
- * 8: SMP friendliness has been improved
- *
- * Overall, this driver represents a significant departure from the official
- * aic7xxx driver released by Dan Eischen in two ways. First, in the code
- * itself. A diff between the two version of the driver is now a several
- * thousand line diff. Second, in approach to solving the same problem. The
- * problem is importing the FreeBSD aic7xxx driver code to linux can be a
- * difficult and time consuming process, that also can be error prone. Dan
- * Eischen's official driver uses the approach that the linux and FreeBSD
- * drivers should be as identical as possible. To that end, his next version
- * of this driver will be using a mid-layer code library that he is developing
- * to moderate communications between the linux mid-level SCSI code and the
- * low level FreeBSD driver. He intends to be able to essentially drop the
- * FreeBSD driver into the linux kernel with only a few minor tweaks to some
- * include files and the like and get things working, making for fast easy
- * imports of the FreeBSD code into linux.
- *
- * I disagree with Dan's approach. Not that I don't think his way of doing
- * things would be nice, easy to maintain, and create a more uniform driver
- * between FreeBSD and Linux. I have no objection to those issues. My
- * disagreement is on the needed functionality. There simply are certain
- * things that are done differently in FreeBSD than linux that will cause
- * problems for this driver regardless of any middle ware Dan implements.
- * The biggest example of this at the moment is interrupt semantics. Linux
- * doesn't provide the same protection techniques as FreeBSD does, nor can
- * they be easily implemented in any middle ware code since they would truly
- * belong in the kernel proper and would effect all drivers. For the time
- * being, I see issues such as these as major stumbling blocks to the
- * reliability of code based upon such middle ware. Therefore, I choose to
- * use a different approach to importing the FreeBSD code that doesn't
- * involve any middle ware type code. My approach is to import the sequencer
- * code from FreeBSD wholesale. Then, to only make changes in the kernel
- * portion of the driver as they are needed for the new sequencer semantics.
- * In this way, the portion of the driver that speaks to the rest of the
- * linux kernel is fairly static and can be changed/modified to solve
- * any problems one might encounter without concern for the FreeBSD driver.
- *
- * Note: If time and experience should prove me wrong that the middle ware
- * code Dan writes is reliable in its operation, then I'll retract my above
- * statements. But, for those that don't know, I'm from Missouri (in the US)
- * and our state motto is "The Show-Me State". Well, before I will put
- * faith into it, you'll have to show me that it works :)
- *
- *_M*************************************************************************/
-
-/*
- * The next three defines are user configurable. These should be the only
- * defines a user might need to get in here and change. There are other
- * defines buried deeper in the code, but those really shouldn't need touched
- * under normal conditions.
- */
-
-/*
- * AIC7XXX_STRICT_PCI_SETUP
- * Should we assume the PCI config options on our controllers are set with
- * sane and proper values, or should we be anal about our PCI config
- * registers and force them to what we want? The main advantage to
- * defining this option is on non-Intel hardware where the BIOS may not
- * have been run to set things up, or if you have one of the BIOSless
- * Adaptec controllers, such as a 2910, that don't get set up by the
- * BIOS. However, keep in mind that we really do set the most important
- * items in the driver regardless of this setting, this only controls some
- * of the more esoteric PCI options on these cards. In that sense, I
- * would default to leaving this off. However, if people wish to try
- * things both ways, that would also help me to know if there are some
- * machines where it works one way but not another.
- *
- * -- July 7, 17:09
- * OK...I need this on my machine for testing, so the default is to
- * leave it defined.
- *
- * -- July 7, 18:49
- * I needed it for testing, but it didn't make any difference, so back
- * off she goes.
- *
- * -- July 16, 23:04
- * I turned it back on to try and compensate for the 2.1.x PCI code
- * which no longer relies solely on the BIOS and now tries to set
- * things itself.
- */
-
-#define AIC7XXX_STRICT_PCI_SETUP
-
-/*
- * AIC7XXX_VERBOSE_DEBUGGING
- * This option enables a lot of extra printk();s in the code, surrounded
- * by if (aic7xxx_verbose ...) statements. Executing all of those if
- * statements and the extra checks can get to where it actually does have
- * an impact on CPU usage and such, as well as code size. Disabling this
- * define will keep some of those from becoming part of the code.
- *
- * NOTE: Currently, this option has no real effect, I will be adding the
- * various #ifdef's in the code later when I've decided a section is
- * complete and no longer needs debugging. OK...a lot of things are now
- * surrounded by this define, so turning this off does have an impact.
- */
-
-/*
- * #define AIC7XXX_VERBOSE_DEBUGGING
- */
-
-#include <linux/module.h>
-#include <stdarg.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/proc_fs.h>
-#include <linux/blkdev.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "aic7xxx_old/aic7xxx.h"
-
-#include "aic7xxx_old/sequencer.h"
-#include "aic7xxx_old/scsi_message.h"
-#include "aic7xxx_old/aic7xxx_reg.h"
-#include <scsi/scsicam.h>
-
-#include <linux/stat.h>
-#include <linux/slab.h> /* for kmalloc() */
-
-#define AIC7XXX_C_VERSION "5.2.6"
-
-#define ALL_TARGETS -1
-#define ALL_CHANNELS -1
-#define ALL_LUNS -1
-#define MAX_TARGETS 16
-#define MAX_LUNS 8
-#ifndef TRUE
-# define TRUE 1
-#endif
-#ifndef FALSE
-# define FALSE 0
-#endif
-
-#if defined(__powerpc__) || defined(__i386__) || defined(__x86_64__)
-# define MMAPIO
-#endif
-
-/*
- * You can try raising me for better performance or lowering me if you have
- * flaky devices that go off the scsi bus when hit with too many tagged
- * commands (like some IBM SCSI-3 LVD drives).
- */
-#define AIC7XXX_CMDS_PER_DEVICE 32
-
-typedef struct
-{
- unsigned char tag_commands[16]; /* Allow for wide/twin adapters. */
-} adapter_tag_info_t;
-
-/*
- * Make a define that will tell the driver not to the default tag depth
- * everywhere.
- */
-#define DEFAULT_TAG_COMMANDS {0, 0, 0, 0, 0, 0, 0, 0,\
- 0, 0, 0, 0, 0, 0, 0, 0}
-
-/*
- * Modify this as you see fit for your system. By setting tag_commands
- * to 0, the driver will use it's own algorithm for determining the
- * number of commands to use (see above). When 255, the driver will
- * not enable tagged queueing for that particular device. When positive
- * (> 0) and (< 255) the values in the array are used for the queue_depth.
- * Note that the maximum value for an entry is 254, but you're insane if
- * you try to use that many commands on one device.
- *
- * In this example, the first line will disable tagged queueing for all
- * the devices on the first probed aic7xxx adapter.
- *
- * The second line enables tagged queueing with 4 commands/LUN for IDs
- * (1, 2-11, 13-15), disables tagged queueing for ID 12, and tells the
- * driver to use its own algorithm for ID 1.
- *
- * The third line is the same as the first line.
- *
- * The fourth line disables tagged queueing for devices 0 and 3. It
- * enables tagged queueing for the other IDs, with 16 commands/LUN
- * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for
- * IDs 2, 5-7, and 9-15.
- */
-
-/*
- * NOTE: The below structure is for reference only, the actual structure
- * to modify in order to change things is found after this fake one.
- *
-adapter_tag_info_t aic7xxx_tag_info[] =
-{
- {DEFAULT_TAG_COMMANDS},
- {{4, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 255, 4, 4, 4}},
- {DEFAULT_TAG_COMMANDS},
- {{255, 16, 4, 255, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}}
-};
-*/
-
-static adapter_tag_info_t aic7xxx_tag_info[] =
-{
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS}
-};
-
-
-/*
- * Define an array of board names that can be indexed by aha_type.
- * Don't forget to change this when changing the types!
- */
-static const char *board_names[] = {
- "AIC-7xxx Unknown", /* AIC_NONE */
- "Adaptec AIC-7810 Hardware RAID Controller", /* AIC_7810 */
- "Adaptec AIC-7770 SCSI host adapter", /* AIC_7770 */
- "Adaptec AHA-274X SCSI host adapter", /* AIC_7771 */
- "Adaptec AHA-284X SCSI host adapter", /* AIC_284x */
- "Adaptec AIC-7850 SCSI host adapter", /* AIC_7850 */
- "Adaptec AIC-7855 SCSI host adapter", /* AIC_7855 */
- "Adaptec AIC-7860 Ultra SCSI host adapter", /* AIC_7860 */
- "Adaptec AHA-2940A Ultra SCSI host adapter", /* AIC_7861 */
- "Adaptec AIC-7870 SCSI host adapter", /* AIC_7870 */
- "Adaptec AHA-294X SCSI host adapter", /* AIC_7871 */
- "Adaptec AHA-394X SCSI host adapter", /* AIC_7872 */
- "Adaptec AHA-398X SCSI host adapter", /* AIC_7873 */
- "Adaptec AHA-2944 SCSI host adapter", /* AIC_7874 */
- "Adaptec AIC-7880 Ultra SCSI host adapter", /* AIC_7880 */
- "Adaptec AHA-294X Ultra SCSI host adapter", /* AIC_7881 */
- "Adaptec AHA-394X Ultra SCSI host adapter", /* AIC_7882 */
- "Adaptec AHA-398X Ultra SCSI host adapter", /* AIC_7883 */
- "Adaptec AHA-2944 Ultra SCSI host adapter", /* AIC_7884 */
- "Adaptec AHA-2940UW Pro Ultra SCSI host adapter", /* AIC_7887 */
- "Adaptec AIC-7895 Ultra SCSI host adapter", /* AIC_7895 */
- "Adaptec AIC-7890/1 Ultra2 SCSI host adapter", /* AIC_7890 */
- "Adaptec AHA-293X Ultra2 SCSI host adapter", /* AIC_7890 */
- "Adaptec AHA-294X Ultra2 SCSI host adapter", /* AIC_7890 */
- "Adaptec AIC-7896/7 Ultra2 SCSI host adapter", /* AIC_7896 */
- "Adaptec AHA-394X Ultra2 SCSI host adapter", /* AIC_7897 */
- "Adaptec AHA-395X Ultra2 SCSI host adapter", /* AIC_7897 */
- "Adaptec PCMCIA SCSI controller", /* card bus stuff */
- "Adaptec AIC-7892 Ultra 160/m SCSI host adapter", /* AIC_7892 */
- "Adaptec AIC-7899 Ultra 160/m SCSI host adapter", /* AIC_7899 */
-};
-
-/*
- * There should be a specific return value for this in scsi.h, but
- * it seems that most drivers ignore it.
- */
-#define DID_UNDERFLOW DID_ERROR
-
-/*
- * What we want to do is have the higher level scsi driver requeue
- * the command to us. There is no specific driver status for this
- * condition, but the higher level scsi driver will requeue the
- * command on a DID_BUS_BUSY error.
- *
- * Upon further inspection and testing, it seems that DID_BUS_BUSY
- * will *always* retry the command. We can get into an infinite loop
- * if this happens when we really want some sort of counter that
- * will automatically abort/reset the command after so many retries.
- * Using DID_ERROR will do just that. (Made by a suggestion by
- * Doug Ledford 8/1/96)
- */
-#define DID_RETRY_COMMAND DID_ERROR
-
-#define HSCSIID 0x07
-#define SCSI_RESET 0x040
-
-/*
- * EISA/VL-bus stuff
- */
-#define MINSLOT 1
-#define MAXSLOT 15
-#define SLOTBASE(x) ((x) << 12)
-#define BASE_TO_SLOT(x) ((x) >> 12)
-
-/*
- * Standard EISA Host ID regs (Offset from slot base)
- */
-#define AHC_HID0 0x80 /* 0,1: msb of ID2, 2-7: ID1 */
-#define AHC_HID1 0x81 /* 0-4: ID3, 5-7: LSB ID2 */
-#define AHC_HID2 0x82 /* product */
-#define AHC_HID3 0x83 /* firmware revision */
-
-/*
- * AIC-7770 I/O range to reserve for a card
- */
-#define MINREG 0xC00
-#define MAXREG 0xCFF
-
-#define INTDEF 0x5C /* Interrupt Definition Register */
-
-/*
- * AIC-78X0 PCI registers
- */
-#define CLASS_PROGIF_REVID 0x08
-#define DEVREVID 0x000000FFul
-#define PROGINFC 0x0000FF00ul
-#define SUBCLASS 0x00FF0000ul
-#define BASECLASS 0xFF000000ul
-
-#define CSIZE_LATTIME 0x0C
-#define CACHESIZE 0x0000003Ful /* only 5 bits */
-#define LATTIME 0x0000FF00ul
-
-#define DEVCONFIG 0x40
-#define SCBSIZE32 0x00010000ul /* aic789X only */
-#define MPORTMODE 0x00000400ul /* aic7870 only */
-#define RAMPSM 0x00000200ul /* aic7870 only */
-#define RAMPSM_ULTRA2 0x00000004
-#define VOLSENSE 0x00000100ul
-#define SCBRAMSEL 0x00000080ul
-#define SCBRAMSEL_ULTRA2 0x00000008
-#define MRDCEN 0x00000040ul
-#define EXTSCBTIME 0x00000020ul /* aic7870 only */
-#define EXTSCBPEN 0x00000010ul /* aic7870 only */
-#define BERREN 0x00000008ul
-#define DACEN 0x00000004ul
-#define STPWLEVEL 0x00000002ul
-#define DIFACTNEGEN 0x00000001ul /* aic7870 only */
-
-#define SCAMCTL 0x1a /* Ultra2 only */
-#define CCSCBBADDR 0xf0 /* aic7895/6/7 */
-
-/*
- * Define the different types of SEEPROMs on aic7xxx adapters
- * and make it also represent the address size used in accessing
- * its registers. The 93C46 chips have 1024 bits organized into
- * 64 16-bit words, while the 93C56 chips have 2048 bits organized
- * into 128 16-bit words. The C46 chips use 6 bits to address
- * each word, while the C56 and C66 (4096 bits) use 8 bits to
- * address each word.
- */
-typedef enum {C46 = 6, C56_66 = 8} seeprom_chip_type;
-
-/*
- *
- * Define the format of the SEEPROM registers (16 bits).
- *
- */
-struct seeprom_config {
-
-/*
- * SCSI ID Configuration Flags
- */
-#define CFXFER 0x0007 /* synchronous transfer rate */
-#define CFSYNCH 0x0008 /* enable synchronous transfer */
-#define CFDISC 0x0010 /* enable disconnection */
-#define CFWIDEB 0x0020 /* wide bus device (wide card) */
-#define CFSYNCHISULTRA 0x0040 /* CFSYNC is an ultra offset */
-#define CFNEWULTRAFORMAT 0x0080 /* Use the Ultra2 SEEPROM format */
-#define CFSTART 0x0100 /* send start unit SCSI command */
-#define CFINCBIOS 0x0200 /* include in BIOS scan */
-#define CFRNFOUND 0x0400 /* report even if not found */
-#define CFMULTILUN 0x0800 /* probe mult luns in BIOS scan */
-#define CFWBCACHEYES 0x4000 /* Enable W-Behind Cache on drive */
-#define CFWBCACHENC 0xc000 /* Don't change W-Behind Cache */
-/* UNUSED 0x3000 */
- unsigned short device_flags[16]; /* words 0-15 */
-
-/*
- * BIOS Control Bits
- */
-#define CFSUPREM 0x0001 /* support all removable drives */
-#define CFSUPREMB 0x0002 /* support removable drives for boot only */
-#define CFBIOSEN 0x0004 /* BIOS enabled */
-/* UNUSED 0x0008 */
-#define CFSM2DRV 0x0010 /* support more than two drives */
-#define CF284XEXTEND 0x0020 /* extended translation (284x cards) */
-/* UNUSED 0x0040 */
-#define CFEXTEND 0x0080 /* extended translation enabled */
-/* UNUSED 0xFF00 */
- unsigned short bios_control; /* word 16 */
-
-/*
- * Host Adapter Control Bits
- */
-#define CFAUTOTERM 0x0001 /* Perform Auto termination */
-#define CFULTRAEN 0x0002 /* Ultra SCSI speed enable (Ultra cards) */
-#define CF284XSELTO 0x0003 /* Selection timeout (284x cards) */
-#define CF284XFIFO 0x000C /* FIFO Threshold (284x cards) */
-#define CFSTERM 0x0004 /* SCSI low byte termination */
-#define CFWSTERM 0x0008 /* SCSI high byte termination (wide card) */
-#define CFSPARITY 0x0010 /* SCSI parity */
-#define CF284XSTERM 0x0020 /* SCSI low byte termination (284x cards) */
-#define CFRESETB 0x0040 /* reset SCSI bus at boot */
-#define CFBPRIMARY 0x0100 /* Channel B primary on 7895 chipsets */
-#define CFSEAUTOTERM 0x0400 /* aic7890 Perform SE Auto Term */
-#define CFLVDSTERM 0x0800 /* aic7890 LVD Termination */
-/* UNUSED 0xF280 */
- unsigned short adapter_control; /* word 17 */
-
-/*
- * Bus Release, Host Adapter ID
- */
-#define CFSCSIID 0x000F /* host adapter SCSI ID */
-/* UNUSED 0x00F0 */
-#define CFBRTIME 0xFF00 /* bus release time */
- unsigned short brtime_id; /* word 18 */
-
-/*
- * Maximum targets
- */
-#define CFMAXTARG 0x00FF /* maximum targets */
-/* UNUSED 0xFF00 */
- unsigned short max_targets; /* word 19 */
-
- unsigned short res_1[11]; /* words 20-30 */
- unsigned short checksum; /* word 31 */
-};
-
-#define SELBUS_MASK 0x0a
-#define SELNARROW 0x00
-#define SELBUSB 0x08
-#define SINGLE_BUS 0x00
-
-#define SCB_TARGET(scb) \
- (((scb)->hscb->target_channel_lun & TID) >> 4)
-#define SCB_LUN(scb) \
- ((scb)->hscb->target_channel_lun & LID)
-#define SCB_IS_SCSIBUS_B(scb) \
- (((scb)->hscb->target_channel_lun & SELBUSB) != 0)
-
-/*
- * If an error occurs during a data transfer phase, run the command
- * to completion - it's easier that way - making a note of the error
- * condition in this location. This then will modify a DID_OK status
- * into an appropriate error for the higher-level SCSI code.
- */
-#define aic7xxx_error(cmd) ((cmd)->SCp.Status)
-
-/*
- * Keep track of the targets returned status.
- */
-#define aic7xxx_status(cmd) ((cmd)->SCp.sent_command)
-
-/*
- * The position of the SCSI commands scb within the scb array.
- */
-#define aic7xxx_position(cmd) ((cmd)->SCp.have_data_in)
-
-/*
- * The stored DMA mapping for single-buffer data transfers.
- */
-#define aic7xxx_mapping(cmd) ((cmd)->SCp.phase)
-
-/*
- * Get out private data area from a scsi cmd pointer
- */
-#define AIC_DEV(cmd) ((struct aic_dev_data *)(cmd)->device->hostdata)
-
-/*
- * So we can keep track of our host structs
- */
-static struct aic7xxx_host *first_aic7xxx = NULL;
-
-/*
- * As of Linux 2.1, the mid-level SCSI code uses virtual addresses
- * in the scatter-gather lists. We need to convert the virtual
- * addresses to physical addresses.
- */
-struct hw_scatterlist {
- unsigned int address;
- unsigned int length;
-};
-
-/*
- * Maximum number of SG segments these cards can support.
- */
-#define AIC7XXX_MAX_SG 128
-
-/*
- * The maximum number of SCBs we could have for ANY type
- * of card. DON'T FORGET TO CHANGE THE SCB MASK IN THE
- * SEQUENCER CODE IF THIS IS MODIFIED!
- */
-#define AIC7XXX_MAXSCB 255
-
-
-struct aic7xxx_hwscb {
-/* ------------ Begin hardware supported fields ---------------- */
-/* 0*/ unsigned char control;
-/* 1*/ unsigned char target_channel_lun; /* 4/1/3 bits */
-/* 2*/ unsigned char target_status;
-/* 3*/ unsigned char SG_segment_count;
-/* 4*/ unsigned int SG_list_pointer;
-/* 8*/ unsigned char residual_SG_segment_count;
-/* 9*/ unsigned char residual_data_count[3];
-/*12*/ unsigned int data_pointer;
-/*16*/ unsigned int data_count;
-/*20*/ unsigned int SCSI_cmd_pointer;
-/*24*/ unsigned char SCSI_cmd_length;
-/*25*/ unsigned char tag; /* Index into our kernel SCB array.
- * Also used as the tag for tagged I/O
- */
-#define SCB_PIO_TRANSFER_SIZE 26 /* amount we need to upload/download
- * via PIO to initialize a transaction.
- */
-/*26*/ unsigned char next; /* Used to thread SCBs awaiting selection
- * or disconnected down in the sequencer.
- */
-/*27*/ unsigned char prev;
-/*28*/ unsigned int pad; /*
- * Unused by the kernel, but we require
- * the padding so that the array of
- * hardware SCBs is aligned on 32 byte
- * boundaries so the sequencer can index
- */
-};
-
-typedef enum {
- SCB_FREE = 0x0000,
- SCB_DTR_SCB = 0x0001,
- SCB_WAITINGQ = 0x0002,
- SCB_ACTIVE = 0x0004,
- SCB_SENSE = 0x0008,
- SCB_ABORT = 0x0010,
- SCB_DEVICE_RESET = 0x0020,
- SCB_RESET = 0x0040,
- SCB_RECOVERY_SCB = 0x0080,
- SCB_MSGOUT_PPR = 0x0100,
- SCB_MSGOUT_SENT = 0x0200,
- SCB_MSGOUT_SDTR = 0x0400,
- SCB_MSGOUT_WDTR = 0x0800,
- SCB_MSGOUT_BITS = SCB_MSGOUT_PPR |
- SCB_MSGOUT_SENT |
- SCB_MSGOUT_SDTR |
- SCB_MSGOUT_WDTR,
- SCB_QUEUED_ABORT = 0x1000,
- SCB_QUEUED_FOR_DONE = 0x2000,
- SCB_WAS_BUSY = 0x4000,
- SCB_QUEUE_FULL = 0x8000
-} scb_flag_type;
-
-typedef enum {
- AHC_FNONE = 0x00000000,
- AHC_PAGESCBS = 0x00000001,
- AHC_CHANNEL_B_PRIMARY = 0x00000002,
- AHC_USEDEFAULTS = 0x00000004,
- AHC_INDIRECT_PAGING = 0x00000008,
- AHC_CHNLB = 0x00000020,
- AHC_CHNLC = 0x00000040,
- AHC_EXTEND_TRANS_A = 0x00000100,
- AHC_EXTEND_TRANS_B = 0x00000200,
- AHC_TERM_ENB_A = 0x00000400,
- AHC_TERM_ENB_SE_LOW = 0x00000400,
- AHC_TERM_ENB_B = 0x00000800,
- AHC_TERM_ENB_SE_HIGH = 0x00000800,
- AHC_HANDLING_REQINITS = 0x00001000,
- AHC_TARGETMODE = 0x00002000,
- AHC_NEWEEPROM_FMT = 0x00004000,
- /*
- * Here ends the FreeBSD defined flags and here begins the linux defined
- * flags. NOTE: I did not preserve the old flag name during this change
- * specifically to force me to evaluate what flags were being used properly
- * and what flags weren't. This way, I could clean up the flag usage on
- * a use by use basis. Doug Ledford
- */
- AHC_MOTHERBOARD = 0x00020000,
- AHC_NO_STPWEN = 0x00040000,
- AHC_RESET_DELAY = 0x00080000,
- AHC_A_SCANNED = 0x00100000,
- AHC_B_SCANNED = 0x00200000,
- AHC_MULTI_CHANNEL = 0x00400000,
- AHC_BIOS_ENABLED = 0x00800000,
- AHC_SEEPROM_FOUND = 0x01000000,
- AHC_TERM_ENB_LVD = 0x02000000,
- AHC_ABORT_PENDING = 0x04000000,
- AHC_RESET_PENDING = 0x08000000,
-#define AHC_IN_ISR_BIT 28
- AHC_IN_ISR = 0x10000000,
- AHC_IN_ABORT = 0x20000000,
- AHC_IN_RESET = 0x40000000,
- AHC_EXTERNAL_SRAM = 0x80000000
-} ahc_flag_type;
-
-typedef enum {
- AHC_NONE = 0x0000,
- AHC_CHIPID_MASK = 0x00ff,
- AHC_AIC7770 = 0x0001,
- AHC_AIC7850 = 0x0002,
- AHC_AIC7860 = 0x0003,
- AHC_AIC7870 = 0x0004,
- AHC_AIC7880 = 0x0005,
- AHC_AIC7890 = 0x0006,
- AHC_AIC7895 = 0x0007,
- AHC_AIC7896 = 0x0008,
- AHC_AIC7892 = 0x0009,
- AHC_AIC7899 = 0x000a,
- AHC_VL = 0x0100,
- AHC_EISA = 0x0200,
- AHC_PCI = 0x0400,
-} ahc_chip;
-
-typedef enum {
- AHC_FENONE = 0x0000,
- AHC_ULTRA = 0x0001,
- AHC_ULTRA2 = 0x0002,
- AHC_WIDE = 0x0004,
- AHC_TWIN = 0x0008,
- AHC_MORE_SRAM = 0x0010,
- AHC_CMD_CHAN = 0x0020,
- AHC_QUEUE_REGS = 0x0040,
- AHC_SG_PRELOAD = 0x0080,
- AHC_SPIOCAP = 0x0100,
- AHC_ULTRA3 = 0x0200,
- AHC_NEW_AUTOTERM = 0x0400,
- AHC_AIC7770_FE = AHC_FENONE,
- AHC_AIC7850_FE = AHC_SPIOCAP,
- AHC_AIC7860_FE = AHC_ULTRA|AHC_SPIOCAP,
- AHC_AIC7870_FE = AHC_FENONE,
- AHC_AIC7880_FE = AHC_ULTRA,
- AHC_AIC7890_FE = AHC_MORE_SRAM|AHC_CMD_CHAN|AHC_ULTRA2|
- AHC_QUEUE_REGS|AHC_SG_PRELOAD|AHC_NEW_AUTOTERM,
- AHC_AIC7895_FE = AHC_MORE_SRAM|AHC_CMD_CHAN|AHC_ULTRA,
- AHC_AIC7896_FE = AHC_AIC7890_FE,
- AHC_AIC7892_FE = AHC_AIC7890_FE|AHC_ULTRA3,
- AHC_AIC7899_FE = AHC_AIC7890_FE|AHC_ULTRA3,
-} ahc_feature;
-
-#define SCB_DMA_ADDR(scb, addr) ((unsigned long)(addr) + (scb)->scb_dma->dma_offset)
-
-struct aic7xxx_scb_dma {
- unsigned long dma_offset; /* Correction you have to add
- * to virtual address to get
- * dma handle in this region */
- dma_addr_t dma_address; /* DMA handle of the start,
- * for unmap */
- unsigned int dma_len; /* DMA length */
-};
-
-typedef enum {
- AHC_BUG_NONE = 0x0000,
- AHC_BUG_TMODE_WIDEODD = 0x0001,
- AHC_BUG_AUTOFLUSH = 0x0002,
- AHC_BUG_CACHETHEN = 0x0004,
- AHC_BUG_CACHETHEN_DIS = 0x0008,
- AHC_BUG_PCI_2_1_RETRY = 0x0010,
- AHC_BUG_PCI_MWI = 0x0020,
- AHC_BUG_SCBCHAN_UPLOAD = 0x0040,
-} ahc_bugs;
-
-struct aic7xxx_scb {
- struct aic7xxx_hwscb *hscb; /* corresponding hardware scb */
- struct scsi_cmnd *cmd; /* scsi_cmnd for this scb */
- struct aic7xxx_scb *q_next; /* next scb in queue */
- volatile scb_flag_type flags; /* current state of scb */
- struct hw_scatterlist *sg_list; /* SG list in adapter format */
- unsigned char tag_action;
- unsigned char sg_count;
- unsigned char *sense_cmd; /*
- * Allocate 6 characters for
- * sense command.
- */
- unsigned char *cmnd;
- unsigned int sg_length; /*
- * We init this during
- * buildscb so we don't have
- * to calculate anything during
- * underflow/overflow/stat code
- */
- void *kmalloc_ptr;
- struct aic7xxx_scb_dma *scb_dma;
-};
-
-/*
- * Define a linked list of SCBs.
- */
-typedef struct {
- struct aic7xxx_scb *head;
- struct aic7xxx_scb *tail;
-} scb_queue_type;
-
-static struct {
- unsigned char errno;
- const char *errmesg;
-} hard_error[] = {
- { ILLHADDR, "Illegal Host Access" },
- { ILLSADDR, "Illegal Sequencer Address referenced" },
- { ILLOPCODE, "Illegal Opcode in sequencer program" },
- { SQPARERR, "Sequencer Ram Parity Error" },
- { DPARERR, "Data-Path Ram Parity Error" },
- { MPARERR, "Scratch Ram/SCB Array Ram Parity Error" },
- { PCIERRSTAT,"PCI Error detected" },
- { CIOPARERR, "CIOBUS Parity Error" }
-};
-
-static unsigned char
-generic_sense[] = { REQUEST_SENSE, 0, 0, 0, 255, 0 };
-
-typedef struct {
- scb_queue_type free_scbs; /*
- * SCBs assigned to free slot on
- * card (no paging required)
- */
- struct aic7xxx_scb *scb_array[AIC7XXX_MAXSCB];
- struct aic7xxx_hwscb *hscbs;
- unsigned char numscbs; /* current number of scbs */
- unsigned char maxhscbs; /* hardware scbs */
- unsigned char maxscbs; /* max scbs including pageable scbs */
- dma_addr_t hscbs_dma; /* DMA handle to hscbs */
- unsigned int hscbs_dma_len; /* length of the above DMA area */
- void *hscb_kmalloc_ptr;
-} scb_data_type;
-
-struct target_cmd {
- unsigned char mesg_bytes[4];
- unsigned char command[28];
-};
-
-#define AHC_TRANS_CUR 0x0001
-#define AHC_TRANS_ACTIVE 0x0002
-#define AHC_TRANS_GOAL 0x0004
-#define AHC_TRANS_USER 0x0008
-#define AHC_TRANS_QUITE 0x0010
-typedef struct {
- unsigned char width;
- unsigned char period;
- unsigned char offset;
- unsigned char options;
-} transinfo_type;
-
-struct aic_dev_data {
- volatile scb_queue_type delayed_scbs;
- volatile unsigned short temp_q_depth;
- unsigned short max_q_depth;
- volatile unsigned char active_cmds;
- /*
- * Statistics Kept:
- *
- * Total Xfers (count for each command that has a data xfer),
- * broken down by reads && writes.
- *
- * Further sorted into a few bins for keeping tabs on how many commands
- * we get of various sizes.
- *
- */
- long w_total; /* total writes */
- long r_total; /* total reads */
- long barrier_total; /* total num of REQ_BARRIER commands */
- long ordered_total; /* How many REQ_BARRIER commands we
- used ordered tags to satisfy */
- long w_bins[6]; /* binned write */
- long r_bins[6]; /* binned reads */
- transinfo_type cur;
- transinfo_type goal;
-#define BUS_DEVICE_RESET_PENDING 0x01
-#define DEVICE_RESET_DELAY 0x02
-#define DEVICE_PRINT_DTR 0x04
-#define DEVICE_WAS_BUSY 0x08
-#define DEVICE_DTR_SCANNED 0x10
-#define DEVICE_SCSI_3 0x20
- volatile unsigned char flags;
- unsigned needppr:1;
- unsigned needppr_copy:1;
- unsigned needsdtr:1;
- unsigned needsdtr_copy:1;
- unsigned needwdtr:1;
- unsigned needwdtr_copy:1;
- unsigned dtr_pending:1;
- struct scsi_device *SDptr;
- struct list_head list;
-};
-
-/*
- * Define a structure used for each host adapter. Note, in order to avoid
- * problems with architectures I can't test on (because I don't have one,
- * such as the Alpha based systems) which happen to give faults for
- * non-aligned memory accesses, care was taken to align this structure
- * in a way that guaranteed all accesses larger than 8 bits were aligned
- * on the appropriate boundary. It's also organized to try and be more
- * cache line efficient. Be careful when changing this lest you might hurt
- * overall performance and bring down the wrath of the masses.
- */
-struct aic7xxx_host {
- /*
- * This is the first 64 bytes in the host struct
- */
-
- /*
- * We are grouping things here....first, items that get either read or
- * written with nearly every interrupt
- */
- volatile long flags;
- ahc_feature features; /* chip features */
- unsigned long base; /* card base address */
- volatile unsigned char __iomem *maddr; /* memory mapped address */
- unsigned long isr_count; /* Interrupt count */
- unsigned long spurious_int;
- scb_data_type *scb_data;
- struct aic7xxx_cmd_queue {
- struct scsi_cmnd *head;
- struct scsi_cmnd *tail;
- } completeq;
-
- /*
- * Things read/written on nearly every entry into aic7xxx_queue()
- */
- volatile scb_queue_type waiting_scbs;
- unsigned char unpause; /* unpause value for HCNTRL */
- unsigned char pause; /* pause value for HCNTRL */
- volatile unsigned char qoutfifonext;
- volatile unsigned char activescbs; /* active scbs */
- volatile unsigned char max_activescbs;
- volatile unsigned char qinfifonext;
- volatile unsigned char *untagged_scbs;
- volatile unsigned char *qoutfifo;
- volatile unsigned char *qinfifo;
-
- unsigned char dev_last_queue_full[MAX_TARGETS];
- unsigned char dev_last_queue_full_count[MAX_TARGETS];
- unsigned short ultraenb; /* Gets downloaded to card as a bitmap */
- unsigned short discenable; /* Gets downloaded to card as a bitmap */
- transinfo_type user[MAX_TARGETS];
-
- unsigned char msg_buf[13]; /* The message for the target */
- unsigned char msg_type;
-#define MSG_TYPE_NONE 0x00
-#define MSG_TYPE_INITIATOR_MSGOUT 0x01
-#define MSG_TYPE_INITIATOR_MSGIN 0x02
- unsigned char msg_len; /* Length of message */
- unsigned char msg_index; /* Index into msg_buf array */
-
-
- /*
- * We put the less frequently used host structure items
- * after the more frequently used items to try and ease
- * the burden on the cache subsystem.
- * These entries are not *commonly* accessed, whereas
- * the preceding entries are accessed very often.
- */
-
- unsigned int irq; /* IRQ for this adapter */
- int instance; /* aic7xxx instance number */
- int scsi_id; /* host adapter SCSI ID */
- int scsi_id_b; /* channel B for twin adapters */
- unsigned int bios_address;
- int board_name_index;
- unsigned short bios_control; /* bios control - SEEPROM */
- unsigned short adapter_control; /* adapter control - SEEPROM */
- struct pci_dev *pdev;
- unsigned char pci_bus;
- unsigned char pci_device_fn;
- struct seeprom_config sc;
- unsigned short sc_type;
- unsigned short sc_size;
- struct aic7xxx_host *next; /* allow for multiple IRQs */
- struct Scsi_Host *host; /* pointer to scsi host */
- struct list_head aic_devs; /* all aic_dev structs on host */
- int host_no; /* SCSI host number */
- unsigned long mbase; /* I/O memory address */
- ahc_chip chip; /* chip type */
- ahc_bugs bugs;
- dma_addr_t fifo_dma; /* DMA handle for fifo arrays */
-};
-
-/*
- * Valid SCSIRATE values. (p. 3-17)
- * Provides a mapping of transfer periods in ns/4 to the proper value to
- * stick in the SCSIRATE reg to use that transfer rate.
- */
-#define AHC_SYNCRATE_ULTRA3 0
-#define AHC_SYNCRATE_ULTRA2 1
-#define AHC_SYNCRATE_ULTRA 3
-#define AHC_SYNCRATE_FAST 6
-#define AHC_SYNCRATE_CRC 0x40
-#define AHC_SYNCRATE_SE 0x10
-static struct aic7xxx_syncrate {
- /* Rates in Ultra mode have bit 8 of sxfr set */
-#define ULTRA_SXFR 0x100
- int sxfr_ultra2;
- int sxfr;
- unsigned char period;
- const char *rate[2];
-} aic7xxx_syncrates[] = {
- { 0x42, 0x000, 9, {"80.0", "160.0"} },
- { 0x13, 0x000, 10, {"40.0", "80.0"} },
- { 0x14, 0x000, 11, {"33.0", "66.6"} },
- { 0x15, 0x100, 12, {"20.0", "40.0"} },
- { 0x16, 0x110, 15, {"16.0", "32.0"} },
- { 0x17, 0x120, 18, {"13.4", "26.8"} },
- { 0x18, 0x000, 25, {"10.0", "20.0"} },
- { 0x19, 0x010, 31, {"8.0", "16.0"} },
- { 0x1a, 0x020, 37, {"6.67", "13.3"} },
- { 0x1b, 0x030, 43, {"5.7", "11.4"} },
- { 0x10, 0x040, 50, {"5.0", "10.0"} },
- { 0x00, 0x050, 56, {"4.4", "8.8" } },
- { 0x00, 0x060, 62, {"4.0", "8.0" } },
- { 0x00, 0x070, 68, {"3.6", "7.2" } },
- { 0x00, 0x000, 0, {NULL, NULL} },
-};
-
-#define CTL_OF_SCB(scb) (((scb->hscb)->target_channel_lun >> 3) & 0x1), \
- (((scb->hscb)->target_channel_lun >> 4) & 0xf), \
- ((scb->hscb)->target_channel_lun & 0x07)
-
-#define CTL_OF_CMD(cmd) ((cmd->device->channel) & 0x01), \
- ((cmd->device->id) & 0x0f), \
- ((cmd->device->lun) & 0x07)
-
-#define TARGET_INDEX(cmd) ((cmd)->device->id | ((cmd)->device->channel << 3))
-
-/*
- * A nice little define to make doing our printks a little easier
- */
-
-#define WARN_LEAD KERN_WARNING "(scsi%d:%d:%d:%d) "
-#define INFO_LEAD KERN_INFO "(scsi%d:%d:%d:%d) "
-
-/*
- * XXX - these options apply unilaterally to _all_ 274x/284x/294x
- * cards in the system. This should be fixed. Exceptions to this
- * rule are noted in the comments.
- */
-
-/*
- * Use this as the default queue depth when setting tagged queueing on.
- */
-static unsigned int aic7xxx_default_queue_depth = AIC7XXX_CMDS_PER_DEVICE;
-
-/*
- * Skip the scsi bus reset. Non 0 make us skip the reset at startup. This
- * has no effect on any later resets that might occur due to things like
- * SCSI bus timeouts.
- */
-static unsigned int aic7xxx_no_reset = 0;
-/*
- * Certain PCI motherboards will scan PCI devices from highest to lowest,
- * others scan from lowest to highest, and they tend to do all kinds of
- * strange things when they come into contact with PCI bridge chips. The
- * net result of all this is that the PCI card that is actually used to boot
- * the machine is very hard to detect. Most motherboards go from lowest
- * PCI slot number to highest, and the first SCSI controller found is the
- * one you boot from. The only exceptions to this are when a controller
- * has its BIOS disabled. So, we by default sort all of our SCSI controllers
- * from lowest PCI slot number to highest PCI slot number. We also force
- * all controllers with their BIOS disabled to the end of the list. This
- * works on *almost* all computers. Where it doesn't work, we have this
- * option. Setting this option to non-0 will reverse the order of the sort
- * to highest first, then lowest, but will still leave cards with their BIOS
- * disabled at the very end. That should fix everyone up unless there are
- * really strange cirumstances.
- */
-static int aic7xxx_reverse_scan = 0;
-/*
- * Should we force EXTENDED translation on a controller.
- * 0 == Use whatever is in the SEEPROM or default to off
- * 1 == Use whatever is in the SEEPROM or default to on
- */
-static unsigned int aic7xxx_extended = 0;
-/*
- * The IRQ trigger method used on EISA controllers. Does not effect PCI cards.
- * -1 = Use detected settings.
- * 0 = Force Edge triggered mode.
- * 1 = Force Level triggered mode.
- */
-static int aic7xxx_irq_trigger = -1;
-/*
- * This variable is used to override the termination settings on a controller.
- * This should not be used under normal conditions. However, in the case
- * that a controller does not have a readable SEEPROM (so that we can't
- * read the SEEPROM settings directly) and that a controller has a buggered
- * version of the cable detection logic, this can be used to force the
- * correct termination. It is preferable to use the manual termination
- * settings in the BIOS if possible, but some motherboard controllers store
- * those settings in a format we can't read. In other cases, auto term
- * should also work, but the chipset was put together with no auto term
- * logic (common on motherboard controllers). In those cases, we have
- * 32 bits here to work with. That's good for 8 controllers/channels. The
- * bits are organized as 4 bits per channel, with scsi0 getting the lowest
- * 4 bits in the int. A 1 in a bit position indicates the termination setting
- * that corresponds to that bit should be enabled, a 0 is disabled.
- * It looks something like this:
- *
- * 0x0f = 1111-Single Ended Low Byte Termination on/off
- * ||\-Single Ended High Byte Termination on/off
- * |\-LVD Low Byte Termination on/off
- * \-LVD High Byte Termination on/off
- *
- * For non-Ultra2 controllers, the upper 2 bits are not important. So, to
- * enable both high byte and low byte termination on scsi0, I would need to
- * make sure that the override_term variable was set to 0x03 (bits 0011).
- * To make sure that all termination is enabled on an Ultra2 controller at
- * scsi2 and only high byte termination on scsi1 and high and low byte
- * termination on scsi0, I would set override_term=0xf23 (bits 1111 0010 0011)
- *
- * For the most part, users should never have to use this, that's why I
- * left it fairly cryptic instead of easy to understand. If you need it,
- * most likely someone will be telling you what your's needs to be set to.
- */
-static int aic7xxx_override_term = -1;
-/*
- * Certain motherboard chipset controllers tend to screw
- * up the polarity of the term enable output pin. Use this variable
- * to force the correct polarity for your system. This is a bitfield variable
- * similar to the previous one, but this one has one bit per channel instead
- * of four.
- * 0 = Force the setting to active low.
- * 1 = Force setting to active high.
- * Most Adaptec cards are active high, several motherboards are active low.
- * To force a 2940 card at SCSI 0 to active high and a motherboard 7895
- * controller at scsi1 and scsi2 to active low, and a 2910 card at scsi3
- * to active high, you would need to set stpwlev=0x9 (bits 1001).
- *
- * People shouldn't need to use this, but if you are experiencing lots of
- * SCSI timeout problems, this may help. There is one sure way to test what
- * this option needs to be. Using a boot floppy to boot the system, configure
- * your system to enable all SCSI termination (in the Adaptec SCSI BIOS) and
- * if needed then also pass a value to override_term to make sure that the
- * driver is enabling SCSI termination, then set this variable to either 0
- * or 1. When the driver boots, make sure there are *NO* SCSI cables
- * connected to your controller. If it finds and inits the controller
- * without problem, then the setting you passed to stpwlev was correct. If
- * the driver goes into a reset loop and hangs the system, then you need the
- * other setting for this variable. If neither setting lets the machine
- * boot then you have definite termination problems that may not be fixable.
- */
-static int aic7xxx_stpwlev = -1;
-/*
- * Set this to non-0 in order to force the driver to panic the kernel
- * and print out debugging info on a SCSI abort or reset cycle.
- */
-static int aic7xxx_panic_on_abort = 0;
-/*
- * PCI bus parity checking of the Adaptec controllers. This is somewhat
- * dubious at best. To my knowledge, this option has never actually
- * solved a PCI parity problem, but on certain machines with broken PCI
- * chipset configurations, it can generate tons of false error messages.
- * It's included in the driver for completeness.
- * 0 = Shut off PCI parity check
- * -1 = Normal polarity pci parity checking
- * 1 = reverse polarity pci parity checking
- *
- * NOTE: you can't actually pass -1 on the lilo prompt. So, to set this
- * variable to -1 you would actually want to simply pass the variable
- * name without a number. That will invert the 0 which will result in
- * -1.
- */
-static int aic7xxx_pci_parity = 0;
-/*
- * Set this to any non-0 value to cause us to dump the contents of all
- * the card's registers in a hex dump format tailored to each model of
- * controller.
- *
- * NOTE: THE CONTROLLER IS LEFT IN AN UNUSABLE STATE BY THIS OPTION.
- * YOU CANNOT BOOT UP WITH THIS OPTION, IT IS FOR DEBUGGING PURPOSES
- * ONLY
- */
-static int aic7xxx_dump_card = 0;
-/*
- * Set this to a non-0 value to make us dump out the 32 bit instruction
- * registers on the card after completing the sequencer download. This
- * allows the actual sequencer download to be verified. It is possible
- * to use this option and still boot up and run your system. This is
- * only intended for debugging purposes.
- */
-static int aic7xxx_dump_sequencer = 0;
-/*
- * Certain newer motherboards have put new PCI based devices into the
- * IO spaces that used to typically be occupied by VLB or EISA cards.
- * This overlap can cause these newer motherboards to lock up when scanned
- * for older EISA and VLB devices. Setting this option to non-0 will
- * cause the driver to skip scanning for any VLB or EISA controllers and
- * only support the PCI controllers. NOTE: this means that if the kernel
- * os compiled with PCI support disabled, then setting this to non-0
- * would result in never finding any devices :)
- */
-static int aic7xxx_no_probe = 0;
-/*
- * On some machines, enabling the external SCB RAM isn't reliable yet. I
- * haven't had time to make test patches for things like changing the
- * timing mode on that external RAM either. Some of those changes may
- * fix the problem. Until then though, we default to external SCB RAM
- * off and give a command line option to enable it.
- */
-static int aic7xxx_scbram = 0;
-/*
- * So that we can set how long each device is given as a selection timeout.
- * The table of values goes like this:
- * 0 - 256ms
- * 1 - 128ms
- * 2 - 64ms
- * 3 - 32ms
- * We default to 64ms because it's fast. Some old SCSI-I devices need a
- * longer time. The final value has to be left shifted by 3, hence 0x10
- * is the final value.
- */
-static int aic7xxx_seltime = 0x10;
-/*
- * So that insmod can find the variable and make it point to something
- */
-#ifdef MODULE
-static char * aic7xxx = NULL;
-module_param(aic7xxx, charp, 0);
-#endif
-
-#define VERBOSE_NORMAL 0x0000
-#define VERBOSE_NEGOTIATION 0x0001
-#define VERBOSE_SEQINT 0x0002
-#define VERBOSE_SCSIINT 0x0004
-#define VERBOSE_PROBE 0x0008
-#define VERBOSE_PROBE2 0x0010
-#define VERBOSE_NEGOTIATION2 0x0020
-#define VERBOSE_MINOR_ERROR 0x0040
-#define VERBOSE_TRACING 0x0080
-#define VERBOSE_ABORT 0x0f00
-#define VERBOSE_ABORT_MID 0x0100
-#define VERBOSE_ABORT_FIND 0x0200
-#define VERBOSE_ABORT_PROCESS 0x0400
-#define VERBOSE_ABORT_RETURN 0x0800
-#define VERBOSE_RESET 0xf000
-#define VERBOSE_RESET_MID 0x1000
-#define VERBOSE_RESET_FIND 0x2000
-#define VERBOSE_RESET_PROCESS 0x4000
-#define VERBOSE_RESET_RETURN 0x8000
-static int aic7xxx_verbose = VERBOSE_NORMAL | VERBOSE_NEGOTIATION |
- VERBOSE_PROBE; /* verbose messages */
-
-
-/****************************************************************************
- *
- * We're going to start putting in function declarations so that order of
- * functions is no longer important. As needed, they are added here.
- *
- ***************************************************************************/
-
-static int aic7xxx_release(struct Scsi_Host *host);
-static void aic7xxx_set_syncrate(struct aic7xxx_host *p,
- struct aic7xxx_syncrate *syncrate, int target, int channel,
- unsigned int period, unsigned int offset, unsigned char options,
- unsigned int type, struct aic_dev_data *aic_dev);
-static void aic7xxx_set_width(struct aic7xxx_host *p, int target, int channel,
- int lun, unsigned int width, unsigned int type,
- struct aic_dev_data *aic_dev);
-static void aic7xxx_panic_abort(struct aic7xxx_host *p, struct scsi_cmnd *cmd);
-static void aic7xxx_print_card(struct aic7xxx_host *p);
-static void aic7xxx_print_scratch_ram(struct aic7xxx_host *p);
-static void aic7xxx_print_sequencer(struct aic7xxx_host *p, int downloaded);
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
-static void aic7xxx_check_scbs(struct aic7xxx_host *p, char *buffer);
-#endif
-
-/****************************************************************************
- *
- * These functions are now used. They happen to be wrapped in useless
- * inb/outb port read/writes around the real reads and writes because it
- * seems that certain very fast CPUs have a problem dealing with us when
- * going at full speed.
- *
- ***************************************************************************/
-
-static unsigned char
-aic_inb(struct aic7xxx_host *p, long port)
-{
-#ifdef MMAPIO
- unsigned char x;
- if(p->maddr)
- {
- x = readb(p->maddr + port);
- }
- else
- {
- x = inb(p->base + port);
- }
- return(x);
-#else
- return(inb(p->base + port));
-#endif
-}
-
-static void
-aic_outb(struct aic7xxx_host *p, unsigned char val, long port)
-{
-#ifdef MMAPIO
- if(p->maddr)
- {
- writeb(val, p->maddr + port);
- mb(); /* locked operation in order to force CPU ordering */
- readb(p->maddr + HCNTRL); /* dummy read to flush the PCI write */
- }
- else
- {
- outb(val, p->base + port);
- mb(); /* locked operation in order to force CPU ordering */
- }
-#else
- outb(val, p->base + port);
- mb(); /* locked operation in order to force CPU ordering */
-#endif
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_setup
- *
- * Description:
- * Handle Linux boot parameters. This routine allows for assigning a value
- * to a parameter with a ':' between the parameter and the value.
- * ie. aic7xxx=unpause:0x0A,extended
- *-F*************************************************************************/
-static int
-aic7xxx_setup(char *s)
-{
- int i, n;
- char *p;
- char *end;
-
- static struct {
- const char *name;
- unsigned int *flag;
- } options[] = {
- { "extended", &aic7xxx_extended },
- { "no_reset", &aic7xxx_no_reset },
- { "irq_trigger", &aic7xxx_irq_trigger },
- { "verbose", &aic7xxx_verbose },
- { "reverse_scan",&aic7xxx_reverse_scan },
- { "override_term", &aic7xxx_override_term },
- { "stpwlev", &aic7xxx_stpwlev },
- { "no_probe", &aic7xxx_no_probe },
- { "panic_on_abort", &aic7xxx_panic_on_abort },
- { "pci_parity", &aic7xxx_pci_parity },
- { "dump_card", &aic7xxx_dump_card },
- { "dump_sequencer", &aic7xxx_dump_sequencer },
- { "default_queue_depth", &aic7xxx_default_queue_depth },
- { "scbram", &aic7xxx_scbram },
- { "seltime", &aic7xxx_seltime },
- { "tag_info", NULL }
- };
-
- end = strchr(s, '\0');
-
- while ((p = strsep(&s, ",.")) != NULL)
- {
- for (i = 0; i < ARRAY_SIZE(options); i++)
- {
- n = strlen(options[i].name);
- if (!strncmp(options[i].name, p, n))
- {
- if (!strncmp(p, "tag_info", n))
- {
- if (p[n] == ':')
- {
- char *base;
- char *tok, *tok_end, *tok_end2;
- char tok_list[] = { '.', ',', '{', '}', '\0' };
- int i, instance = -1, device = -1;
- unsigned char done = FALSE;
-
- base = p;
- tok = base + n + 1; /* Forward us just past the ':' */
- tok_end = strchr(tok, '\0');
- if (tok_end < end)
- *tok_end = ',';
- while(!done)
- {
- switch(*tok)
- {
- case '{':
- if (instance == -1)
- instance = 0;
- else if (device == -1)
- device = 0;
- tok++;
- break;
- case '}':
- if (device != -1)
- device = -1;
- else if (instance != -1)
- instance = -1;
- tok++;
- break;
- case ',':
- case '.':
- if (instance == -1)
- done = TRUE;
- else if (device >= 0)
- device++;
- else if (instance >= 0)
- instance++;
- if ( (device >= MAX_TARGETS) ||
- (instance >= ARRAY_SIZE(aic7xxx_tag_info)) )
- done = TRUE;
- tok++;
- if (!done)
- {
- base = tok;
- }
- break;
- case '\0':
- done = TRUE;
- break;
- default:
- done = TRUE;
- tok_end = strchr(tok, '\0');
- for(i=0; tok_list[i]; i++)
- {
- tok_end2 = strchr(tok, tok_list[i]);
- if ( (tok_end2) && (tok_end2 < tok_end) )
- {
- tok_end = tok_end2;
- done = FALSE;
- }
- }
- if ( (instance >= 0) && (device >= 0) &&
- (instance < ARRAY_SIZE(aic7xxx_tag_info)) &&
- (device < MAX_TARGETS) )
- aic7xxx_tag_info[instance].tag_commands[device] =
- simple_strtoul(tok, NULL, 0) & 0xff;
- tok = tok_end;
- break;
- }
- }
- while((p != base) && (p != NULL))
- p = strsep(&s, ",.");
- }
- }
- else if (p[n] == ':')
- {
- *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
- if(!strncmp(p, "seltime", n))
- {
- *(options[i].flag) = (*(options[i].flag) % 4) << 3;
- }
- }
- else if (!strncmp(p, "verbose", n))
- {
- *(options[i].flag) = 0xff29;
- }
- else
- {
- *(options[i].flag) = ~(*(options[i].flag));
- if(!strncmp(p, "seltime", n))
- {
- *(options[i].flag) = (*(options[i].flag) % 4) << 3;
- }
- }
- }
- }
- }
- return 1;
-}
-
-__setup("aic7xxx=", aic7xxx_setup);
-
-/*+F*************************************************************************
- * Function:
- * pause_sequencer
- *
- * Description:
- * Pause the sequencer and wait for it to actually stop - this
- * is important since the sequencer can disable pausing for critical
- * sections.
- *-F*************************************************************************/
-static void
-pause_sequencer(struct aic7xxx_host *p)
-{
- aic_outb(p, p->pause, HCNTRL);
- while ((aic_inb(p, HCNTRL) & PAUSE) == 0)
- {
- ;
- }
- if(p->features & AHC_ULTRA2)
- {
- aic_inb(p, CCSCBCTL);
- }
-}
-
-/*+F*************************************************************************
- * Function:
- * unpause_sequencer
- *
- * Description:
- * Unpause the sequencer. Unremarkable, yet done often enough to
- * warrant an easy way to do it.
- *-F*************************************************************************/
-static void
-unpause_sequencer(struct aic7xxx_host *p, int unpause_always)
-{
- if (unpause_always ||
- ( !(aic_inb(p, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) &&
- !(p->flags & AHC_HANDLING_REQINITS) ) )
- {
- aic_outb(p, p->unpause, HCNTRL);
- }
-}
-
-/*+F*************************************************************************
- * Function:
- * restart_sequencer
- *
- * Description:
- * Restart the sequencer program from address zero. This assumes
- * that the sequencer is already paused.
- *-F*************************************************************************/
-static void
-restart_sequencer(struct aic7xxx_host *p)
-{
- aic_outb(p, 0, SEQADDR0);
- aic_outb(p, 0, SEQADDR1);
- aic_outb(p, FASTMODE, SEQCTL);
-}
-
-/*
- * We include the aic7xxx_seq.c file here so that the other defines have
- * already been made, and so that it comes before the code that actually
- * downloads the instructions (since we don't typically use function
- * prototype, our code has to be ordered that way, it's a left-over from
- * the original driver days.....I should fix it some time DL).
- */
-#include "aic7xxx_old/aic7xxx_seq.c"
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_check_patch
- *
- * Description:
- * See if the next patch to download should be downloaded.
- *-F*************************************************************************/
-static int
-aic7xxx_check_patch(struct aic7xxx_host *p,
- struct sequencer_patch **start_patch, int start_instr, int *skip_addr)
-{
- struct sequencer_patch *cur_patch;
- struct sequencer_patch *last_patch;
- int num_patches;
-
- num_patches = ARRAY_SIZE(sequencer_patches);
- last_patch = &sequencer_patches[num_patches];
- cur_patch = *start_patch;
-
- while ((cur_patch < last_patch) && (start_instr == cur_patch->begin))
- {
- if (cur_patch->patch_func(p) == 0)
- {
- /*
- * Start rejecting code.
- */
- *skip_addr = start_instr + cur_patch->skip_instr;
- cur_patch += cur_patch->skip_patch;
- }
- else
- {
- /*
- * Found an OK patch. Advance the patch pointer to the next patch
- * and wait for our instruction pointer to get here.
- */
- cur_patch++;
- }
- }
-
- *start_patch = cur_patch;
- if (start_instr < *skip_addr)
- /*
- * Still skipping
- */
- return (0);
- return(1);
-}
-
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_download_instr
- *
- * Description:
- * Find the next patch to download.
- *-F*************************************************************************/
-static void
-aic7xxx_download_instr(struct aic7xxx_host *p, int instrptr,
- unsigned char *dconsts)
-{
- union ins_formats instr;
- struct ins_format1 *fmt1_ins;
- struct ins_format3 *fmt3_ins;
- unsigned char opcode;
-
- instr = *(union ins_formats*) &seqprog[instrptr * 4];
-
- instr.integer = le32_to_cpu(instr.integer);
-
- fmt1_ins = &instr.format1;
- fmt3_ins = NULL;
-
- /* Pull the opcode */
- opcode = instr.format1.opcode;
- switch (opcode)
- {
- case AIC_OP_JMP:
- case AIC_OP_JC:
- case AIC_OP_JNC:
- case AIC_OP_CALL:
- case AIC_OP_JNE:
- case AIC_OP_JNZ:
- case AIC_OP_JE:
- case AIC_OP_JZ:
- {
- struct sequencer_patch *cur_patch;
- int address_offset;
- unsigned int address;
- int skip_addr;
- int i;
-
- fmt3_ins = &instr.format3;
- address_offset = 0;
- address = fmt3_ins->address;
- cur_patch = sequencer_patches;
- skip_addr = 0;
-
- for (i = 0; i < address;)
- {
- aic7xxx_check_patch(p, &cur_patch, i, &skip_addr);
- if (skip_addr > i)
- {
- int end_addr;
-
- end_addr = min_t(int, address, skip_addr);
- address_offset += end_addr - i;
- i = skip_addr;
- }
- else
- {
- i++;
- }
- }
- address -= address_offset;
- fmt3_ins->address = address;
- /* Fall Through to the next code section */
- }
- case AIC_OP_OR:
- case AIC_OP_AND:
- case AIC_OP_XOR:
- case AIC_OP_ADD:
- case AIC_OP_ADC:
- case AIC_OP_BMOV:
- if (fmt1_ins->parity != 0)
- {
- fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
- }
- fmt1_ins->parity = 0;
- /* Fall Through to the next code section */
- case AIC_OP_ROL:
- if ((p->features & AHC_ULTRA2) != 0)
- {
- int i, count;
-
- /* Calculate odd parity for the instruction */
- for ( i=0, count=0; i < 31; i++)
- {
- unsigned int mask;
-
- mask = 0x01 << i;
- if ((instr.integer & mask) != 0)
- count++;
- }
- if (!(count & 0x01))
- instr.format1.parity = 1;
- }
- else
- {
- if (fmt3_ins != NULL)
- {
- instr.integer = fmt3_ins->immediate |
- (fmt3_ins->source << 8) |
- (fmt3_ins->address << 16) |
- (fmt3_ins->opcode << 25);
- }
- else
- {
- instr.integer = fmt1_ins->immediate |
- (fmt1_ins->source << 8) |
- (fmt1_ins->destination << 16) |
- (fmt1_ins->ret << 24) |
- (fmt1_ins->opcode << 25);
- }
- }
- aic_outb(p, (instr.integer & 0xff), SEQRAM);
- aic_outb(p, ((instr.integer >> 8) & 0xff), SEQRAM);
- aic_outb(p, ((instr.integer >> 16) & 0xff), SEQRAM);
- aic_outb(p, ((instr.integer >> 24) & 0xff), SEQRAM);
- udelay(10);
- break;
-
- default:
- panic("aic7xxx: Unknown opcode encountered in sequencer program.");
- break;
- }
-}
-
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_loadseq
- *
- * Description:
- * Load the sequencer code into the controller memory.
- *-F*************************************************************************/
-static void
-aic7xxx_loadseq(struct aic7xxx_host *p)
-{
- struct sequencer_patch *cur_patch;
- int i;
- int downloaded;
- int skip_addr;
- unsigned char download_consts[4] = {0, 0, 0, 0};
-
- if (aic7xxx_verbose & VERBOSE_PROBE)
- {
- printk(KERN_INFO "(scsi%d) Downloading sequencer code...", p->host_no);
- }
-#if 0
- download_consts[TMODE_NUMCMDS] = p->num_targetcmds;
-#endif
- download_consts[TMODE_NUMCMDS] = 0;
- cur_patch = &sequencer_patches[0];
- downloaded = 0;
- skip_addr = 0;
-
- aic_outb(p, PERRORDIS|LOADRAM|FAILDIS|FASTMODE, SEQCTL);
- aic_outb(p, 0, SEQADDR0);
- aic_outb(p, 0, SEQADDR1);
-
- for (i = 0; i < sizeof(seqprog) / 4; i++)
- {
- if (aic7xxx_check_patch(p, &cur_patch, i, &skip_addr) == 0)
- {
- /* Skip this instruction for this configuration. */
- continue;
- }
- aic7xxx_download_instr(p, i, &download_consts[0]);
- downloaded++;
- }
-
- aic_outb(p, 0, SEQADDR0);
- aic_outb(p, 0, SEQADDR1);
- aic_outb(p, FASTMODE | FAILDIS, SEQCTL);
- unpause_sequencer(p, TRUE);
- mdelay(1);
- pause_sequencer(p);
- aic_outb(p, FASTMODE, SEQCTL);
- if (aic7xxx_verbose & VERBOSE_PROBE)
- {
- printk(" %d instructions downloaded\n", downloaded);
- }
- if (aic7xxx_dump_sequencer)
- aic7xxx_print_sequencer(p, downloaded);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_print_sequencer
- *
- * Description:
- * Print the contents of the sequencer memory to the screen.
- *-F*************************************************************************/
-static void
-aic7xxx_print_sequencer(struct aic7xxx_host *p, int downloaded)
-{
- int i, k, temp;
-
- aic_outb(p, PERRORDIS|LOADRAM|FAILDIS|FASTMODE, SEQCTL);
- aic_outb(p, 0, SEQADDR0);
- aic_outb(p, 0, SEQADDR1);
-
- k = 0;
- for (i=0; i < downloaded; i++)
- {
- if ( k == 0 )
- printk("%03x: ", i);
- temp = aic_inb(p, SEQRAM);
- temp |= (aic_inb(p, SEQRAM) << 8);
- temp |= (aic_inb(p, SEQRAM) << 16);
- temp |= (aic_inb(p, SEQRAM) << 24);
- printk("%08x", temp);
- if ( ++k == 8 )
- {
- printk("\n");
- k = 0;
- }
- else
- printk(" ");
- }
- aic_outb(p, 0, SEQADDR0);
- aic_outb(p, 0, SEQADDR1);
- aic_outb(p, FASTMODE | FAILDIS, SEQCTL);
- unpause_sequencer(p, TRUE);
- mdelay(1);
- pause_sequencer(p);
- aic_outb(p, FASTMODE, SEQCTL);
- printk("\n");
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_info
- *
- * Description:
- * Return a string describing the driver.
- *-F*************************************************************************/
-static const char *
-aic7xxx_info(struct Scsi_Host *dooh)
-{
- static char buffer[256];
- char *bp;
- struct aic7xxx_host *p;
-
- bp = &buffer[0];
- p = (struct aic7xxx_host *)dooh->hostdata;
- memset(bp, 0, sizeof(buffer));
- strcpy(bp, "Adaptec AHA274x/284x/294x (EISA/VLB/PCI-Fast SCSI) ");
- strcat(bp, AIC7XXX_C_VERSION);
- strcat(bp, "/");
- strcat(bp, AIC7XXX_H_VERSION);
- strcat(bp, "\n");
- strcat(bp, " <");
- strcat(bp, board_names[p->board_name_index]);
- strcat(bp, ">");
-
- return(bp);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_find_syncrate
- *
- * Description:
- * Look up the valid period to SCSIRATE conversion in our table
- *-F*************************************************************************/
-static struct aic7xxx_syncrate *
-aic7xxx_find_syncrate(struct aic7xxx_host *p, unsigned int *period,
- unsigned int maxsync, unsigned char *options)
-{
- struct aic7xxx_syncrate *syncrate;
- int done = FALSE;
-
- switch(*options)
- {
- case MSG_EXT_PPR_OPTION_DT_CRC:
- case MSG_EXT_PPR_OPTION_DT_UNITS:
- if(!(p->features & AHC_ULTRA3))
- {
- *options = 0;
- maxsync = max_t(unsigned int, maxsync, AHC_SYNCRATE_ULTRA2);
- }
- break;
- case MSG_EXT_PPR_OPTION_DT_CRC_QUICK:
- case MSG_EXT_PPR_OPTION_DT_UNITS_QUICK:
- if(!(p->features & AHC_ULTRA3))
- {
- *options = 0;
- maxsync = max_t(unsigned int, maxsync, AHC_SYNCRATE_ULTRA2);
- }
- else
- {
- /*
- * we don't support the Quick Arbitration variants of dual edge
- * clocking. As it turns out, we want to send back the
- * same basic option, but without the QA attribute.
- * We know that we are responding because we would never set
- * these options ourself, we would only respond to them.
- */
- switch(*options)
- {
- case MSG_EXT_PPR_OPTION_DT_CRC_QUICK:
- *options = MSG_EXT_PPR_OPTION_DT_CRC;
- break;
- case MSG_EXT_PPR_OPTION_DT_UNITS_QUICK:
- *options = MSG_EXT_PPR_OPTION_DT_UNITS;
- break;
- }
- }
- break;
- default:
- *options = 0;
- maxsync = max_t(unsigned int, maxsync, AHC_SYNCRATE_ULTRA2);
- break;
- }
- syncrate = &aic7xxx_syncrates[maxsync];
- while ( (syncrate->rate[0] != NULL) &&
- (!(p->features & AHC_ULTRA2) || syncrate->sxfr_ultra2) )
- {
- if (*period <= syncrate->period)
- {
- switch(*options)
- {
- case MSG_EXT_PPR_OPTION_DT_CRC:
- case MSG_EXT_PPR_OPTION_DT_UNITS:
- if(!(syncrate->sxfr_ultra2 & AHC_SYNCRATE_CRC))
- {
- done = TRUE;
- /*
- * oops, we went too low for the CRC/DualEdge signalling, so
- * clear the options byte
- */
- *options = 0;
- /*
- * We'll be sending a reply to this packet to set the options
- * properly, so unilaterally set the period as well.
- */
- *period = syncrate->period;
- }
- else
- {
- done = TRUE;
- if(syncrate == &aic7xxx_syncrates[maxsync])
- {
- *period = syncrate->period;
- }
- }
- break;
- default:
- if(!(syncrate->sxfr_ultra2 & AHC_SYNCRATE_CRC))
- {
- done = TRUE;
- if(syncrate == &aic7xxx_syncrates[maxsync])
- {
- *period = syncrate->period;
- }
- }
- break;
- }
- if(done)
- {
- break;
- }
- }
- syncrate++;
- }
- if ( (*period == 0) || (syncrate->rate[0] == NULL) ||
- ((p->features & AHC_ULTRA2) && (syncrate->sxfr_ultra2 == 0)) )
- {
- /*
- * Use async transfers for this target
- */
- *options = 0;
- *period = 255;
- syncrate = NULL;
- }
- return (syncrate);
-}
-
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_find_period
- *
- * Description:
- * Look up the valid SCSIRATE to period conversion in our table
- *-F*************************************************************************/
-static unsigned int
-aic7xxx_find_period(struct aic7xxx_host *p, unsigned int scsirate,
- unsigned int maxsync)
-{
- struct aic7xxx_syncrate *syncrate;
-
- if (p->features & AHC_ULTRA2)
- {
- scsirate &= SXFR_ULTRA2;
- }
- else
- {
- scsirate &= SXFR;
- }
-
- syncrate = &aic7xxx_syncrates[maxsync];
- while (syncrate->rate[0] != NULL)
- {
- if (p->features & AHC_ULTRA2)
- {
- if (syncrate->sxfr_ultra2 == 0)
- break;
- else if (scsirate == syncrate->sxfr_ultra2)
- return (syncrate->period);
- else if (scsirate == (syncrate->sxfr_ultra2 & ~AHC_SYNCRATE_CRC))
- return (syncrate->period);
- }
- else if (scsirate == (syncrate->sxfr & ~ULTRA_SXFR))
- {
- return (syncrate->period);
- }
- syncrate++;
- }
- return (0); /* async */
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_validate_offset
- *
- * Description:
- * Set a valid offset value for a particular card in use and transfer
- * settings in use.
- *-F*************************************************************************/
-static void
-aic7xxx_validate_offset(struct aic7xxx_host *p,
- struct aic7xxx_syncrate *syncrate, unsigned int *offset, int wide)
-{
- unsigned int maxoffset;
-
- /* Limit offset to what the card (and device) can do */
- if (syncrate == NULL)
- {
- maxoffset = 0;
- }
- else if (p->features & AHC_ULTRA2)
- {
- maxoffset = MAX_OFFSET_ULTRA2;
- }
- else
- {
- if (wide)
- maxoffset = MAX_OFFSET_16BIT;
- else
- maxoffset = MAX_OFFSET_8BIT;
- }
- *offset = min(*offset, maxoffset);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_set_syncrate
- *
- * Description:
- * Set the actual syncrate down in the card and in our host structs
- *-F*************************************************************************/
-static void
-aic7xxx_set_syncrate(struct aic7xxx_host *p, struct aic7xxx_syncrate *syncrate,
- int target, int channel, unsigned int period, unsigned int offset,
- unsigned char options, unsigned int type, struct aic_dev_data *aic_dev)
-{
- unsigned char tindex;
- unsigned short target_mask;
- unsigned char lun, old_options;
- unsigned int old_period, old_offset;
-
- tindex = target | (channel << 3);
- target_mask = 0x01 << tindex;
- lun = aic_inb(p, SCB_TCL) & 0x07;
-
- if (syncrate == NULL)
- {
- period = 0;
- offset = 0;
- }
-
- old_period = aic_dev->cur.period;
- old_offset = aic_dev->cur.offset;
- old_options = aic_dev->cur.options;
-
-
- if (type & AHC_TRANS_CUR)
- {
- unsigned int scsirate;
-
- scsirate = aic_inb(p, TARG_SCSIRATE + tindex);
- if (p->features & AHC_ULTRA2)
- {
- scsirate &= ~SXFR_ULTRA2;
- if (syncrate != NULL)
- {
- switch(options)
- {
- case MSG_EXT_PPR_OPTION_DT_UNITS:
- /*
- * mask off the CRC bit in the xfer settings
- */
- scsirate |= (syncrate->sxfr_ultra2 & ~AHC_SYNCRATE_CRC);
- break;
- default:
- scsirate |= syncrate->sxfr_ultra2;
- break;
- }
- }
- if (type & AHC_TRANS_ACTIVE)
- {
- aic_outb(p, offset, SCSIOFFSET);
- }
- aic_outb(p, offset, TARG_OFFSET + tindex);
- }
- else /* Not an Ultra2 controller */
- {
- scsirate &= ~(SXFR|SOFS);
- p->ultraenb &= ~target_mask;
- if (syncrate != NULL)
- {
- if (syncrate->sxfr & ULTRA_SXFR)
- {
- p->ultraenb |= target_mask;
- }
- scsirate |= (syncrate->sxfr & SXFR);
- scsirate |= (offset & SOFS);
- }
- if (type & AHC_TRANS_ACTIVE)
- {
- unsigned char sxfrctl0;
-
- sxfrctl0 = aic_inb(p, SXFRCTL0);
- sxfrctl0 &= ~FAST20;
- if (p->ultraenb & target_mask)
- sxfrctl0 |= FAST20;
- aic_outb(p, sxfrctl0, SXFRCTL0);
- }
- aic_outb(p, p->ultraenb & 0xff, ULTRA_ENB);
- aic_outb(p, (p->ultraenb >> 8) & 0xff, ULTRA_ENB + 1 );
- }
- if (type & AHC_TRANS_ACTIVE)
- {
- aic_outb(p, scsirate, SCSIRATE);
- }
- aic_outb(p, scsirate, TARG_SCSIRATE + tindex);
- aic_dev->cur.period = period;
- aic_dev->cur.offset = offset;
- aic_dev->cur.options = options;
- if ( !(type & AHC_TRANS_QUITE) &&
- (aic7xxx_verbose & VERBOSE_NEGOTIATION) &&
- (aic_dev->flags & DEVICE_PRINT_DTR) )
- {
- if (offset)
- {
- int rate_mod = (scsirate & WIDEXFER) ? 1 : 0;
-
- printk(INFO_LEAD "Synchronous at %s Mbyte/sec, "
- "offset %d.\n", p->host_no, channel, target, lun,
- syncrate->rate[rate_mod], offset);
- }
- else
- {
- printk(INFO_LEAD "Using asynchronous transfers.\n",
- p->host_no, channel, target, lun);
- }
- aic_dev->flags &= ~DEVICE_PRINT_DTR;
- }
- }
-
- if (type & AHC_TRANS_GOAL)
- {
- aic_dev->goal.period = period;
- aic_dev->goal.offset = offset;
- aic_dev->goal.options = options;
- }
-
- if (type & AHC_TRANS_USER)
- {
- p->user[tindex].period = period;
- p->user[tindex].offset = offset;
- p->user[tindex].options = options;
- }
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_set_width
- *
- * Description:
- * Set the actual width down in the card and in our host structs
- *-F*************************************************************************/
-static void
-aic7xxx_set_width(struct aic7xxx_host *p, int target, int channel, int lun,
- unsigned int width, unsigned int type, struct aic_dev_data *aic_dev)
-{
- unsigned char tindex;
- unsigned short target_mask;
- unsigned int old_width;
-
- tindex = target | (channel << 3);
- target_mask = 1 << tindex;
-
- old_width = aic_dev->cur.width;
-
- if (type & AHC_TRANS_CUR)
- {
- unsigned char scsirate;
-
- scsirate = aic_inb(p, TARG_SCSIRATE + tindex);
-
- scsirate &= ~WIDEXFER;
- if (width == MSG_EXT_WDTR_BUS_16_BIT)
- scsirate |= WIDEXFER;
-
- aic_outb(p, scsirate, TARG_SCSIRATE + tindex);
-
- if (type & AHC_TRANS_ACTIVE)
- aic_outb(p, scsirate, SCSIRATE);
-
- aic_dev->cur.width = width;
-
- if ( !(type & AHC_TRANS_QUITE) &&
- (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
- (aic_dev->flags & DEVICE_PRINT_DTR) )
- {
- printk(INFO_LEAD "Using %s transfers\n", p->host_no, channel, target,
- lun, (scsirate & WIDEXFER) ? "Wide(16bit)" : "Narrow(8bit)" );
- }
- }
-
- if (type & AHC_TRANS_GOAL)
- aic_dev->goal.width = width;
- if (type & AHC_TRANS_USER)
- p->user[tindex].width = width;
-
- if (aic_dev->goal.offset)
- {
- if (p->features & AHC_ULTRA2)
- {
- aic_dev->goal.offset = MAX_OFFSET_ULTRA2;
- }
- else if (width == MSG_EXT_WDTR_BUS_16_BIT)
- {
- aic_dev->goal.offset = MAX_OFFSET_16BIT;
- }
- else
- {
- aic_dev->goal.offset = MAX_OFFSET_8BIT;
- }
- }
-}
-
-/*+F*************************************************************************
- * Function:
- * scbq_init
- *
- * Description:
- * SCB queue initialization.
- *
- *-F*************************************************************************/
-static void
-scbq_init(volatile scb_queue_type *queue)
-{
- queue->head = NULL;
- queue->tail = NULL;
-}
-
-/*+F*************************************************************************
- * Function:
- * scbq_insert_head
- *
- * Description:
- * Add an SCB to the head of the list.
- *
- *-F*************************************************************************/
-static inline void
-scbq_insert_head(volatile scb_queue_type *queue, struct aic7xxx_scb *scb)
-{
- scb->q_next = queue->head;
- queue->head = scb;
- if (queue->tail == NULL) /* If list was empty, update tail. */
- queue->tail = queue->head;
-}
-
-/*+F*************************************************************************
- * Function:
- * scbq_remove_head
- *
- * Description:
- * Remove an SCB from the head of the list.
- *
- *-F*************************************************************************/
-static inline struct aic7xxx_scb *
-scbq_remove_head(volatile scb_queue_type *queue)
-{
- struct aic7xxx_scb * scbp;
-
- scbp = queue->head;
- if (queue->head != NULL)
- queue->head = queue->head->q_next;
- if (queue->head == NULL) /* If list is now empty, update tail. */
- queue->tail = NULL;
- return(scbp);
-}
-
-/*+F*************************************************************************
- * Function:
- * scbq_remove
- *
- * Description:
- * Removes an SCB from the list.
- *
- *-F*************************************************************************/
-static inline void
-scbq_remove(volatile scb_queue_type *queue, struct aic7xxx_scb *scb)
-{
- if (queue->head == scb)
- {
- /* At beginning of queue, remove from head. */
- scbq_remove_head(queue);
- }
- else
- {
- struct aic7xxx_scb *curscb = queue->head;
-
- /*
- * Search until the next scb is the one we're looking for, or
- * we run out of queue.
- */
- while ((curscb != NULL) && (curscb->q_next != scb))
- {
- curscb = curscb->q_next;
- }
- if (curscb != NULL)
- {
- /* Found it. */
- curscb->q_next = scb->q_next;
- if (scb->q_next == NULL)
- {
- /* Update the tail when removing the tail. */
- queue->tail = curscb;
- }
- }
- }
-}
-
-/*+F*************************************************************************
- * Function:
- * scbq_insert_tail
- *
- * Description:
- * Add an SCB at the tail of the list.
- *
- *-F*************************************************************************/
-static inline void
-scbq_insert_tail(volatile scb_queue_type *queue, struct aic7xxx_scb *scb)
-{
- scb->q_next = NULL;
- if (queue->tail != NULL) /* Add the scb at the end of the list. */
- queue->tail->q_next = scb;
- queue->tail = scb; /* Update the tail. */
- if (queue->head == NULL) /* If list was empty, update head. */
- queue->head = queue->tail;
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_match_scb
- *
- * Description:
- * Checks to see if an scb matches the target/channel as specified.
- * If target is ALL_TARGETS (-1), then we're looking for any device
- * on the specified channel; this happens when a channel is going
- * to be reset and all devices on that channel must be aborted.
- *-F*************************************************************************/
-static int
-aic7xxx_match_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb,
- int target, int channel, int lun, unsigned char tag)
-{
- int targ = (scb->hscb->target_channel_lun >> 4) & 0x0F;
- int chan = (scb->hscb->target_channel_lun >> 3) & 0x01;
- int slun = scb->hscb->target_channel_lun & 0x07;
- int match;
-
- match = ((chan == channel) || (channel == ALL_CHANNELS));
- if (match != 0)
- match = ((targ == target) || (target == ALL_TARGETS));
- if (match != 0)
- match = ((lun == slun) || (lun == ALL_LUNS));
- if (match != 0)
- match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL));
-
- return (match);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_add_curscb_to_free_list
- *
- * Description:
- * Adds the current scb (in SCBPTR) to the list of free SCBs.
- *-F*************************************************************************/
-static void
-aic7xxx_add_curscb_to_free_list(struct aic7xxx_host *p)
-{
- /*
- * Invalidate the tag so that aic7xxx_find_scb doesn't think
- * it's active
- */
- aic_outb(p, SCB_LIST_NULL, SCB_TAG);
- aic_outb(p, 0, SCB_CONTROL);
-
- aic_outb(p, aic_inb(p, FREE_SCBH), SCB_NEXT);
- aic_outb(p, aic_inb(p, SCBPTR), FREE_SCBH);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_rem_scb_from_disc_list
- *
- * Description:
- * Removes the current SCB from the disconnected list and adds it
- * to the free list.
- *-F*************************************************************************/
-static unsigned char
-aic7xxx_rem_scb_from_disc_list(struct aic7xxx_host *p, unsigned char scbptr,
- unsigned char prev)
-{
- unsigned char next;
-
- aic_outb(p, scbptr, SCBPTR);
- next = aic_inb(p, SCB_NEXT);
- aic7xxx_add_curscb_to_free_list(p);
-
- if (prev != SCB_LIST_NULL)
- {
- aic_outb(p, prev, SCBPTR);
- aic_outb(p, next, SCB_NEXT);
- }
- else
- {
- aic_outb(p, next, DISCONNECTED_SCBH);
- }
-
- return next;
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_busy_target
- *
- * Description:
- * Set the specified target busy.
- *-F*************************************************************************/
-static inline void
-aic7xxx_busy_target(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
-{
- p->untagged_scbs[scb->hscb->target_channel_lun] = scb->hscb->tag;
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_index_busy_target
- *
- * Description:
- * Returns the index of the busy target, and optionally sets the
- * target inactive.
- *-F*************************************************************************/
-static inline unsigned char
-aic7xxx_index_busy_target(struct aic7xxx_host *p, unsigned char tcl,
- int unbusy)
-{
- unsigned char busy_scbid;
-
- busy_scbid = p->untagged_scbs[tcl];
- if (unbusy)
- {
- p->untagged_scbs[tcl] = SCB_LIST_NULL;
- }
- return (busy_scbid);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_find_scb
- *
- * Description:
- * Look through the SCB array of the card and attempt to find the
- * hardware SCB that corresponds to the passed in SCB. Return
- * SCB_LIST_NULL if unsuccessful. This routine assumes that the
- * card is already paused.
- *-F*************************************************************************/
-static unsigned char
-aic7xxx_find_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
-{
- unsigned char saved_scbptr;
- unsigned char curindex;
-
- saved_scbptr = aic_inb(p, SCBPTR);
- curindex = 0;
- for (curindex = 0; curindex < p->scb_data->maxhscbs; curindex++)
- {
- aic_outb(p, curindex, SCBPTR);
- if (aic_inb(p, SCB_TAG) == scb->hscb->tag)
- {
- break;
- }
- }
- aic_outb(p, saved_scbptr, SCBPTR);
- if (curindex >= p->scb_data->maxhscbs)
- {
- curindex = SCB_LIST_NULL;
- }
-
- return (curindex);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_allocate_scb
- *
- * Description:
- * Get an SCB from the free list or by allocating a new one.
- *-F*************************************************************************/
-static int
-aic7xxx_allocate_scb(struct aic7xxx_host *p)
-{
- struct aic7xxx_scb *scbp = NULL;
- int scb_size = (sizeof (struct hw_scatterlist) * AIC7XXX_MAX_SG) + 12 + 6;
- int i;
- int step = PAGE_SIZE / 1024;
- unsigned long scb_count = 0;
- struct hw_scatterlist *hsgp;
- struct aic7xxx_scb *scb_ap;
- struct aic7xxx_scb_dma *scb_dma;
- unsigned char *bufs;
-
- if (p->scb_data->numscbs < p->scb_data->maxscbs)
- {
- /*
- * Calculate the optimal number of SCBs to allocate.
- *
- * NOTE: This formula works because the sizeof(sg_array) is always
- * 1024. Therefore, scb_size * i would always be > PAGE_SIZE *
- * (i/step). The (i-1) allows the left hand side of the equation
- * to grow into the right hand side to a point of near perfect
- * efficiency since scb_size * (i -1) is growing slightly faster
- * than the right hand side. If the number of SG array elements
- * is changed, this function may not be near so efficient any more.
- *
- * Since the DMA'able buffers are now allocated in a separate
- * chunk this algorithm has been modified to match. The '12'
- * and '6' factors in scb_size are for the DMA'able command byte
- * and sensebuffers respectively. -DaveM
- */
- for ( i=step;; i *= 2 )
- {
- if ( (scb_size * (i-1)) >= ( (PAGE_SIZE * (i/step)) - 64 ) )
- {
- i /= 2;
- break;
- }
- }
- scb_count = min( (i-1), p->scb_data->maxscbs - p->scb_data->numscbs);
- scb_ap = kmalloc(sizeof (struct aic7xxx_scb) * scb_count
- + sizeof(struct aic7xxx_scb_dma), GFP_ATOMIC);
- if (scb_ap == NULL)
- return(0);
- scb_dma = (struct aic7xxx_scb_dma *)&scb_ap[scb_count];
- hsgp = (struct hw_scatterlist *)
- pci_alloc_consistent(p->pdev, scb_size * scb_count,
- &scb_dma->dma_address);
- if (hsgp == NULL)
- {
- kfree(scb_ap);
- return(0);
- }
- bufs = (unsigned char *)&hsgp[scb_count * AIC7XXX_MAX_SG];
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if (aic7xxx_verbose > 0xffff)
- {
- if (p->scb_data->numscbs == 0)
- printk(INFO_LEAD "Allocating initial %ld SCB structures.\n",
- p->host_no, -1, -1, -1, scb_count);
- else
- printk(INFO_LEAD "Allocating %ld additional SCB structures.\n",
- p->host_no, -1, -1, -1, scb_count);
- }
-#endif
- memset(scb_ap, 0, sizeof (struct aic7xxx_scb) * scb_count);
- scb_dma->dma_offset = (unsigned long)scb_dma->dma_address
- - (unsigned long)hsgp;
- scb_dma->dma_len = scb_size * scb_count;
- for (i=0; i < scb_count; i++)
- {
- scbp = &scb_ap[i];
- scbp->hscb = &p->scb_data->hscbs[p->scb_data->numscbs];
- scbp->sg_list = &hsgp[i * AIC7XXX_MAX_SG];
- scbp->sense_cmd = bufs;
- scbp->cmnd = bufs + 6;
- bufs += 12 + 6;
- scbp->scb_dma = scb_dma;
- memset(scbp->hscb, 0, sizeof(struct aic7xxx_hwscb));
- scbp->hscb->tag = p->scb_data->numscbs;
- /*
- * Place in the scb array; never is removed
- */
- p->scb_data->scb_array[p->scb_data->numscbs++] = scbp;
- scbq_insert_tail(&p->scb_data->free_scbs, scbp);
- }
- scbp->kmalloc_ptr = scb_ap;
- }
- return(scb_count);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_queue_cmd_complete
- *
- * Description:
- * Due to race conditions present in the SCSI subsystem, it is easier
- * to queue completed commands, then call scsi_done() on them when
- * we're finished. This function queues the completed commands.
- *-F*************************************************************************/
-static void
-aic7xxx_queue_cmd_complete(struct aic7xxx_host *p, struct scsi_cmnd *cmd)
-{
- aic7xxx_position(cmd) = SCB_LIST_NULL;
- cmd->host_scribble = (char *)p->completeq.head;
- p->completeq.head = cmd;
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_done_cmds_complete
- *
- * Description:
- * Process the completed command queue.
- *-F*************************************************************************/
-static void aic7xxx_done_cmds_complete(struct aic7xxx_host *p)
-{
- struct scsi_cmnd *cmd;
-
- while (p->completeq.head != NULL) {
- cmd = p->completeq.head;
- p->completeq.head = (struct scsi_cmnd *) cmd->host_scribble;
- cmd->host_scribble = NULL;
- cmd->scsi_done(cmd);
- }
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_free_scb
- *
- * Description:
- * Free the scb and insert into the free scb list.
- *-F*************************************************************************/
-static void
-aic7xxx_free_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
-{
-
- scb->flags = SCB_FREE;
- scb->cmd = NULL;
- scb->sg_count = 0;
- scb->sg_length = 0;
- scb->tag_action = 0;
- scb->hscb->control = 0;
- scb->hscb->target_status = 0;
- scb->hscb->target_channel_lun = SCB_LIST_NULL;
-
- scbq_insert_head(&p->scb_data->free_scbs, scb);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_done
- *
- * Description:
- * Calls the higher level scsi done function and frees the scb.
- *-F*************************************************************************/
-static void
-aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
-{
- struct scsi_cmnd *cmd = scb->cmd;
- struct aic_dev_data *aic_dev = cmd->device->hostdata;
- int tindex = TARGET_INDEX(cmd);
- struct aic7xxx_scb *scbp;
- unsigned char queue_depth;
-
- scsi_dma_unmap(cmd);
-
- if (scb->flags & SCB_SENSE)
- {
- pci_unmap_single(p->pdev,
- le32_to_cpu(scb->sg_list[0].address),
- SCSI_SENSE_BUFFERSIZE,
- PCI_DMA_FROMDEVICE);
- }
- if (scb->flags & SCB_RECOVERY_SCB)
- {
- p->flags &= ~AHC_ABORT_PENDING;
- }
- if (scb->flags & (SCB_RESET|SCB_ABORT))
- {
- cmd->result |= (DID_RESET << 16);
- }
-
- if ((scb->flags & SCB_MSGOUT_BITS) != 0)
- {
- unsigned short mask;
- int message_error = FALSE;
-
- mask = 0x01 << tindex;
-
- /*
- * Check to see if we get an invalid message or a message error
- * after failing to negotiate a wide or sync transfer message.
- */
- if ((scb->flags & SCB_SENSE) &&
- ((scb->cmd->sense_buffer[12] == 0x43) || /* INVALID_MESSAGE */
- (scb->cmd->sense_buffer[12] == 0x49))) /* MESSAGE_ERROR */
- {
- message_error = TRUE;
- }
-
- if (scb->flags & SCB_MSGOUT_WDTR)
- {
- if (message_error)
- {
- if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
- (aic_dev->flags & DEVICE_PRINT_DTR) )
- {
- printk(INFO_LEAD "Device failed to complete Wide Negotiation "
- "processing and\n", p->host_no, CTL_OF_SCB(scb));
- printk(INFO_LEAD "returned a sense error code for invalid message, "
- "disabling future\n", p->host_no, CTL_OF_SCB(scb));
- printk(INFO_LEAD "Wide negotiation to this device.\n", p->host_no,
- CTL_OF_SCB(scb));
- }
- aic_dev->needwdtr = aic_dev->needwdtr_copy = 0;
- }
- }
- if (scb->flags & SCB_MSGOUT_SDTR)
- {
- if (message_error)
- {
- if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
- (aic_dev->flags & DEVICE_PRINT_DTR) )
- {
- printk(INFO_LEAD "Device failed to complete Sync Negotiation "
- "processing and\n", p->host_no, CTL_OF_SCB(scb));
- printk(INFO_LEAD "returned a sense error code for invalid message, "
- "disabling future\n", p->host_no, CTL_OF_SCB(scb));
- printk(INFO_LEAD "Sync negotiation to this device.\n", p->host_no,
- CTL_OF_SCB(scb));
- aic_dev->flags &= ~DEVICE_PRINT_DTR;
- }
- aic_dev->needsdtr = aic_dev->needsdtr_copy = 0;
- }
- }
- if (scb->flags & SCB_MSGOUT_PPR)
- {
- if(message_error)
- {
- if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
- (aic_dev->flags & DEVICE_PRINT_DTR) )
- {
- printk(INFO_LEAD "Device failed to complete Parallel Protocol "
- "Request processing and\n", p->host_no, CTL_OF_SCB(scb));
- printk(INFO_LEAD "returned a sense error code for invalid message, "
- "disabling future\n", p->host_no, CTL_OF_SCB(scb));
- printk(INFO_LEAD "Parallel Protocol Request negotiation to this "
- "device.\n", p->host_no, CTL_OF_SCB(scb));
- }
- /*
- * Disable PPR negotiation and revert back to WDTR and SDTR setup
- */
- aic_dev->needppr = aic_dev->needppr_copy = 0;
- aic_dev->needsdtr = aic_dev->needsdtr_copy = 1;
- aic_dev->needwdtr = aic_dev->needwdtr_copy = 1;
- }
- }
- }
-
- queue_depth = aic_dev->temp_q_depth;
- if (queue_depth >= aic_dev->active_cmds)
- {
- scbp = scbq_remove_head(&aic_dev->delayed_scbs);
- if (scbp)
- {
- if (queue_depth == 1)
- {
- /*
- * Give extra preference to untagged devices, such as CD-R devices
- * This makes it more likely that a drive *won't* stuff up while
- * waiting on data at a critical time, such as CD-R writing and
- * audio CD ripping operations. Should also benefit tape drives.
- */
- scbq_insert_head(&p->waiting_scbs, scbp);
- }
- else
- {
- scbq_insert_tail(&p->waiting_scbs, scbp);
- }
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if (aic7xxx_verbose > 0xffff)
- printk(INFO_LEAD "Moving SCB from delayed to waiting queue.\n",
- p->host_no, CTL_OF_SCB(scbp));
-#endif
- if (queue_depth > aic_dev->active_cmds)
- {
- scbp = scbq_remove_head(&aic_dev->delayed_scbs);
- if (scbp)
- scbq_insert_tail(&p->waiting_scbs, scbp);
- }
- }
- }
- if (!(scb->tag_action))
- {
- aic7xxx_index_busy_target(p, scb->hscb->target_channel_lun,
- /* unbusy */ TRUE);
- if (cmd->device->simple_tags)
- {
- aic_dev->temp_q_depth = aic_dev->max_q_depth;
- }
- }
- if(scb->flags & SCB_DTR_SCB)
- {
- aic_dev->dtr_pending = 0;
- }
- aic_dev->active_cmds--;
- p->activescbs--;
-
- if ((scb->sg_length >= 512) && (((cmd->result >> 16) & 0xf) == DID_OK))
- {
- long *ptr;
- int x, i;
-
-
- if (rq_data_dir(cmd->request) == WRITE)
- {
- aic_dev->w_total++;
- ptr = aic_dev->w_bins;
- }
- else
- {
- aic_dev->r_total++;
- ptr = aic_dev->r_bins;
- }
- x = scb->sg_length;
- x >>= 10;
- for(i=0; i<6; i++)
- {
- x >>= 2;
- if(!x) {
- ptr[i]++;
- break;
- }
- }
- if(i == 6 && x)
- ptr[5]++;
- }
- aic7xxx_free_scb(p, scb);
- aic7xxx_queue_cmd_complete(p, cmd);
-
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_run_done_queue
- *
- * Description:
- * Calls the aic7xxx_done() for the scsi_cmnd of each scb in the
- * aborted list, and adds each scb to the free list. If complete
- * is TRUE, we also process the commands complete list.
- *-F*************************************************************************/
-static void
-aic7xxx_run_done_queue(struct aic7xxx_host *p, /*complete*/ int complete)
-{
- struct aic7xxx_scb *scb;
- int i, found = 0;
-
- for (i = 0; i < p->scb_data->numscbs; i++)
- {
- scb = p->scb_data->scb_array[i];
- if (scb->flags & SCB_QUEUED_FOR_DONE)
- {
- if (scb->flags & SCB_QUEUE_FULL)
- {
- scb->cmd->result = QUEUE_FULL << 1;
- }
- else
- {
- if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
- printk(INFO_LEAD "Aborting scb %d\n",
- p->host_no, CTL_OF_SCB(scb), scb->hscb->tag);
- /*
- * Clear any residual information since the normal aic7xxx_done() path
- * doesn't touch the residuals.
- */
- scb->hscb->residual_SG_segment_count = 0;
- scb->hscb->residual_data_count[0] = 0;
- scb->hscb->residual_data_count[1] = 0;
- scb->hscb->residual_data_count[2] = 0;
- }
- found++;
- aic7xxx_done(p, scb);
- }
- }
- if (aic7xxx_verbose & (VERBOSE_ABORT_RETURN | VERBOSE_RESET_RETURN))
- {
- printk(INFO_LEAD "%d commands found and queued for "
- "completion.\n", p->host_no, -1, -1, -1, found);
- }
- if (complete)
- {
- aic7xxx_done_cmds_complete(p);
- }
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_abort_waiting_scb
- *
- * Description:
- * Manipulate the waiting for selection list and return the
- * scb that follows the one that we remove.
- *-F*************************************************************************/
-static unsigned char
-aic7xxx_abort_waiting_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb,
- unsigned char scbpos, unsigned char prev)
-{
- unsigned char curscb, next;
-
- /*
- * Select the SCB we want to abort and pull the next pointer out of it.
- */
- curscb = aic_inb(p, SCBPTR);
- aic_outb(p, scbpos, SCBPTR);
- next = aic_inb(p, SCB_NEXT);
-
- aic7xxx_add_curscb_to_free_list(p);
-
- /*
- * Update the waiting list
- */
- if (prev == SCB_LIST_NULL)
- {
- /*
- * First in the list
- */
- aic_outb(p, next, WAITING_SCBH);
- }
- else
- {
- /*
- * Select the scb that pointed to us and update its next pointer.
- */
- aic_outb(p, prev, SCBPTR);
- aic_outb(p, next, SCB_NEXT);
- }
- /*
- * Point us back at the original scb position and inform the SCSI
- * system that the command has been aborted.
- */
- aic_outb(p, curscb, SCBPTR);
- return (next);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_search_qinfifo
- *
- * Description:
- * Search the queue-in FIFO for matching SCBs and conditionally
- * requeue. Returns the number of matching SCBs.
- *-F*************************************************************************/
-static int
-aic7xxx_search_qinfifo(struct aic7xxx_host *p, int target, int channel,
- int lun, unsigned char tag, int flags, int requeue,
- volatile scb_queue_type *queue)
-{
- int found;
- unsigned char qinpos, qintail;
- struct aic7xxx_scb *scbp;
-
- found = 0;
- qinpos = aic_inb(p, QINPOS);
- qintail = p->qinfifonext;
-
- p->qinfifonext = qinpos;
-
- while (qinpos != qintail)
- {
- scbp = p->scb_data->scb_array[p->qinfifo[qinpos++]];
- if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag))
- {
- /*
- * We found an scb that needs to be removed.
- */
- if (requeue && (queue != NULL))
- {
- if (scbp->flags & SCB_WAITINGQ)
- {
- scbq_remove(queue, scbp);
- scbq_remove(&p->waiting_scbs, scbp);
- scbq_remove(&AIC_DEV(scbp->cmd)->delayed_scbs, scbp);
- AIC_DEV(scbp->cmd)->active_cmds++;
- p->activescbs++;
- }
- scbq_insert_tail(queue, scbp);
- AIC_DEV(scbp->cmd)->active_cmds--;
- p->activescbs--;
- scbp->flags |= SCB_WAITINGQ;
- if ( !(scbp->tag_action & TAG_ENB) )
- {
- aic7xxx_index_busy_target(p, scbp->hscb->target_channel_lun,
- TRUE);
- }
- }
- else if (requeue)
- {
- p->qinfifo[p->qinfifonext++] = scbp->hscb->tag;
- }
- else
- {
- /*
- * Preserve any SCB_RECOVERY_SCB flags on this scb then set the
- * flags we were called with, presumeably so aic7xxx_run_done_queue
- * can find this scb
- */
- scbp->flags = flags | (scbp->flags & SCB_RECOVERY_SCB);
- if (aic7xxx_index_busy_target(p, scbp->hscb->target_channel_lun,
- FALSE) == scbp->hscb->tag)
- {
- aic7xxx_index_busy_target(p, scbp->hscb->target_channel_lun,
- TRUE);
- }
- }
- found++;
- }
- else
- {
- p->qinfifo[p->qinfifonext++] = scbp->hscb->tag;
- }
- }
- /*
- * Now that we've done the work, clear out any left over commands in the
- * qinfifo and update the KERNEL_QINPOS down on the card.
- *
- * NOTE: This routine expect the sequencer to already be paused when
- * it is run....make sure it's that way!
- */
- qinpos = p->qinfifonext;
- while(qinpos != qintail)
- {
- p->qinfifo[qinpos++] = SCB_LIST_NULL;
- }
- if (p->features & AHC_QUEUE_REGS)
- aic_outb(p, p->qinfifonext, HNSCB_QOFF);
- else
- aic_outb(p, p->qinfifonext, KERNEL_QINPOS);
-
- return (found);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_scb_on_qoutfifo
- *
- * Description:
- * Is the scb that was passed to us currently on the qoutfifo?
- *-F*************************************************************************/
-static int
-aic7xxx_scb_on_qoutfifo(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
-{
- int i=0;
-
- while(p->qoutfifo[(p->qoutfifonext + i) & 0xff ] != SCB_LIST_NULL)
- {
- if(p->qoutfifo[(p->qoutfifonext + i) & 0xff ] == scb->hscb->tag)
- return TRUE;
- else
- i++;
- }
- return FALSE;
-}
-
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_reset_device
- *
- * Description:
- * The device at the given target/channel has been reset. Abort
- * all active and queued scbs for that target/channel. This function
- * need not worry about linked next pointers because if was a MSG_ABORT_TAG
- * then we had a tagged command (no linked next), if it was MSG_ABORT or
- * MSG_BUS_DEV_RESET then the device won't know about any commands any more
- * and no busy commands will exist, and if it was a bus reset, then nothing
- * knows about any linked next commands any more. In all cases, we don't
- * need to worry about the linked next or busy scb, we just need to clear
- * them.
- *-F*************************************************************************/
-static void
-aic7xxx_reset_device(struct aic7xxx_host *p, int target, int channel,
- int lun, unsigned char tag)
-{
- struct aic7xxx_scb *scbp, *prev_scbp;
- struct scsi_device *sd;
- unsigned char active_scb, tcl, scb_tag;
- int i = 0, init_lists = FALSE;
- struct aic_dev_data *aic_dev;
-
- /*
- * Restore this when we're done
- */
- active_scb = aic_inb(p, SCBPTR);
- scb_tag = aic_inb(p, SCB_TAG);
-
- if (aic7xxx_verbose & (VERBOSE_RESET_PROCESS | VERBOSE_ABORT_PROCESS))
- {
- printk(INFO_LEAD "Reset device, hardware_scb %d,\n",
- p->host_no, channel, target, lun, active_scb);
- printk(INFO_LEAD "Current scb %d, SEQADDR 0x%x, LASTPHASE "
- "0x%x\n",
- p->host_no, channel, target, lun, scb_tag,
- aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8),
- aic_inb(p, LASTPHASE));
- printk(INFO_LEAD "SG_CACHEPTR 0x%x, SG_COUNT %d, SCSISIGI 0x%x\n",
- p->host_no, channel, target, lun,
- (p->features & AHC_ULTRA2) ? aic_inb(p, SG_CACHEPTR) : 0,
- aic_inb(p, SG_COUNT), aic_inb(p, SCSISIGI));
- printk(INFO_LEAD "SSTAT0 0x%x, SSTAT1 0x%x, SSTAT2 0x%x\n",
- p->host_no, channel, target, lun, aic_inb(p, SSTAT0),
- aic_inb(p, SSTAT1), aic_inb(p, SSTAT2));
- }
-
- /*
- * Deal with the busy target and linked next issues.
- */
- list_for_each_entry(aic_dev, &p->aic_devs, list)
- {
- if (aic7xxx_verbose & (VERBOSE_RESET_PROCESS | VERBOSE_ABORT_PROCESS))
- printk(INFO_LEAD "processing aic_dev %p\n", p->host_no, channel, target,
- lun, aic_dev);
- sd = aic_dev->SDptr;
-
- if((target != ALL_TARGETS && target != sd->id) ||
- (channel != ALL_CHANNELS && channel != sd->channel))
- continue;
- if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
- printk(INFO_LEAD "Cleaning up status information "
- "and delayed_scbs.\n", p->host_no, sd->channel, sd->id, sd->lun);
- aic_dev->flags &= ~BUS_DEVICE_RESET_PENDING;
- if ( tag == SCB_LIST_NULL )
- {
- aic_dev->dtr_pending = 0;
- aic_dev->needppr = aic_dev->needppr_copy;
- aic_dev->needsdtr = aic_dev->needsdtr_copy;
- aic_dev->needwdtr = aic_dev->needwdtr_copy;
- aic_dev->flags = DEVICE_PRINT_DTR;
- aic_dev->temp_q_depth = aic_dev->max_q_depth;
- }
- tcl = (sd->id << 4) | (sd->channel << 3) | sd->lun;
- if ( (aic7xxx_index_busy_target(p, tcl, FALSE) == tag) ||
- (tag == SCB_LIST_NULL) )
- aic7xxx_index_busy_target(p, tcl, /* unbusy */ TRUE);
- prev_scbp = NULL;
- scbp = aic_dev->delayed_scbs.head;
- while (scbp != NULL)
- {
- prev_scbp = scbp;
- scbp = scbp->q_next;
- if (aic7xxx_match_scb(p, prev_scbp, target, channel, lun, tag))
- {
- scbq_remove(&aic_dev->delayed_scbs, prev_scbp);
- if (prev_scbp->flags & SCB_WAITINGQ)
- {
- aic_dev->active_cmds++;
- p->activescbs++;
- }
- prev_scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
- prev_scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
- }
- }
- }
-
- if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
- printk(INFO_LEAD "Cleaning QINFIFO.\n", p->host_no, channel, target, lun );
- aic7xxx_search_qinfifo(p, target, channel, lun, tag,
- SCB_RESET | SCB_QUEUED_FOR_DONE, /* requeue */ FALSE, NULL);
-
-/*
- * Search the waiting_scbs queue for matches, this catches any SCB_QUEUED
- * ABORT/RESET commands.
- */
- if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
- printk(INFO_LEAD "Cleaning waiting_scbs.\n", p->host_no, channel,
- target, lun );
- {
- struct aic7xxx_scb *scbp, *prev_scbp;
-
- prev_scbp = NULL;
- scbp = p->waiting_scbs.head;
- while (scbp != NULL)
- {
- prev_scbp = scbp;
- scbp = scbp->q_next;
- if (aic7xxx_match_scb(p, prev_scbp, target, channel, lun, tag))
- {
- scbq_remove(&p->waiting_scbs, prev_scbp);
- if (prev_scbp->flags & SCB_WAITINGQ)
- {
- AIC_DEV(prev_scbp->cmd)->active_cmds++;
- p->activescbs++;
- }
- prev_scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
- prev_scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
- }
- }
- }
-
-
- /*
- * Search waiting for selection list.
- */
- if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
- printk(INFO_LEAD "Cleaning waiting for selection "
- "list.\n", p->host_no, channel, target, lun);
- {
- unsigned char next, prev, scb_index;
-
- next = aic_inb(p, WAITING_SCBH); /* Start at head of list. */
- prev = SCB_LIST_NULL;
- while (next != SCB_LIST_NULL)
- {
- aic_outb(p, next, SCBPTR);
- scb_index = aic_inb(p, SCB_TAG);
- if (scb_index >= p->scb_data->numscbs)
- {
- /*
- * No aic7xxx_verbose check here.....we want to see this since it
- * means either the kernel driver or the sequencer screwed things up
- */
- printk(WARN_LEAD "Waiting List inconsistency; SCB index=%d, "
- "numscbs=%d\n", p->host_no, channel, target, lun, scb_index,
- p->scb_data->numscbs);
- next = aic_inb(p, SCB_NEXT);
- aic7xxx_add_curscb_to_free_list(p);
- }
- else
- {
- scbp = p->scb_data->scb_array[scb_index];
- if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag))
- {
- next = aic7xxx_abort_waiting_scb(p, scbp, next, prev);
- if (scbp->flags & SCB_WAITINGQ)
- {
- AIC_DEV(scbp->cmd)->active_cmds++;
- p->activescbs++;
- }
- scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
- scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
- if (prev == SCB_LIST_NULL)
- {
- /*
- * This is either the first scb on the waiting list, or we
- * have already yanked the first and haven't left any behind.
- * Either way, we need to turn off the selection hardware if
- * it isn't already off.
- */
- aic_outb(p, aic_inb(p, SCSISEQ) & ~ENSELO, SCSISEQ);
- aic_outb(p, CLRSELTIMEO, CLRSINT1);
- }
- }
- else
- {
- prev = next;
- next = aic_inb(p, SCB_NEXT);
- }
- }
- }
- }
-
- /*
- * Go through disconnected list and remove any entries we have queued
- * for completion, zeroing their control byte too.
- */
- if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
- printk(INFO_LEAD "Cleaning disconnected scbs "
- "list.\n", p->host_no, channel, target, lun);
- if (p->flags & AHC_PAGESCBS)
- {
- unsigned char next, prev, scb_index;
-
- next = aic_inb(p, DISCONNECTED_SCBH);
- prev = SCB_LIST_NULL;
- while (next != SCB_LIST_NULL)
- {
- aic_outb(p, next, SCBPTR);
- scb_index = aic_inb(p, SCB_TAG);
- if (scb_index > p->scb_data->numscbs)
- {
- printk(WARN_LEAD "Disconnected List inconsistency; SCB index=%d, "
- "numscbs=%d\n", p->host_no, channel, target, lun, scb_index,
- p->scb_data->numscbs);
- next = aic7xxx_rem_scb_from_disc_list(p, next, prev);
- }
- else
- {
- scbp = p->scb_data->scb_array[scb_index];
- if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag))
- {
- next = aic7xxx_rem_scb_from_disc_list(p, next, prev);
- if (scbp->flags & SCB_WAITINGQ)
- {
- AIC_DEV(scbp->cmd)->active_cmds++;
- p->activescbs++;
- }
- scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
- scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
- scbp->hscb->control = 0;
- }
- else
- {
- prev = next;
- next = aic_inb(p, SCB_NEXT);
- }
- }
- }
- }
-
- /*
- * Walk the free list making sure no entries on the free list have
- * a valid SCB_TAG value or SCB_CONTROL byte.
- */
- if (p->flags & AHC_PAGESCBS)
- {
- unsigned char next;
-
- next = aic_inb(p, FREE_SCBH);
- while (next != SCB_LIST_NULL)
- {
- aic_outb(p, next, SCBPTR);
- if (aic_inb(p, SCB_TAG) < p->scb_data->numscbs)
- {
- printk(WARN_LEAD "Free list inconsistency!.\n", p->host_no, channel,
- target, lun);
- init_lists = TRUE;
- next = SCB_LIST_NULL;
- }
- else
- {
- aic_outb(p, SCB_LIST_NULL, SCB_TAG);
- aic_outb(p, 0, SCB_CONTROL);
- next = aic_inb(p, SCB_NEXT);
- }
- }
- }
-
- /*
- * Go through the hardware SCB array looking for commands that
- * were active but not on any list.
- */
- if (init_lists)
- {
- aic_outb(p, SCB_LIST_NULL, FREE_SCBH);
- aic_outb(p, SCB_LIST_NULL, WAITING_SCBH);
- aic_outb(p, SCB_LIST_NULL, DISCONNECTED_SCBH);
- }
- for (i = p->scb_data->maxhscbs - 1; i >= 0; i--)
- {
- unsigned char scbid;
-
- aic_outb(p, i, SCBPTR);
- if (init_lists)
- {
- aic_outb(p, SCB_LIST_NULL, SCB_TAG);
- aic_outb(p, SCB_LIST_NULL, SCB_NEXT);
- aic_outb(p, 0, SCB_CONTROL);
- aic7xxx_add_curscb_to_free_list(p);
- }
- else
- {
- scbid = aic_inb(p, SCB_TAG);
- if (scbid < p->scb_data->numscbs)
- {
- scbp = p->scb_data->scb_array[scbid];
- if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag))
- {
- aic_outb(p, 0, SCB_CONTROL);
- aic_outb(p, SCB_LIST_NULL, SCB_TAG);
- aic7xxx_add_curscb_to_free_list(p);
- }
- }
- }
- }
-
- /*
- * Go through the entire SCB array now and look for commands for
- * for this target that are stillactive. These are other (most likely
- * tagged) commands that were disconnected when the reset occurred.
- * Any commands we find here we know this about, it wasn't on any queue,
- * it wasn't in the qinfifo, it wasn't in the disconnected or waiting
- * lists, so it really must have been a paged out SCB. In that case,
- * we shouldn't need to bother with updating any counters, just mark
- * the correct flags and go on.
- */
- for (i = 0; i < p->scb_data->numscbs; i++)
- {
- scbp = p->scb_data->scb_array[i];
- if ((scbp->flags & SCB_ACTIVE) &&
- aic7xxx_match_scb(p, scbp, target, channel, lun, tag) &&
- !aic7xxx_scb_on_qoutfifo(p, scbp))
- {
- if (scbp->flags & SCB_WAITINGQ)
- {
- scbq_remove(&p->waiting_scbs, scbp);
- scbq_remove(&AIC_DEV(scbp->cmd)->delayed_scbs, scbp);
- AIC_DEV(scbp->cmd)->active_cmds++;
- p->activescbs++;
- }
- scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
- scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
- }
- }
-
- aic_outb(p, active_scb, SCBPTR);
-}
-
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_clear_intstat
- *
- * Description:
- * Clears the interrupt status.
- *-F*************************************************************************/
-static void
-aic7xxx_clear_intstat(struct aic7xxx_host *p)
-{
- /* Clear any interrupt conditions this may have caused. */
- aic_outb(p, CLRSELDO | CLRSELDI | CLRSELINGO, CLRSINT0);
- aic_outb(p, CLRSELTIMEO | CLRATNO | CLRSCSIRSTI | CLRBUSFREE | CLRSCSIPERR |
- CLRPHASECHG | CLRREQINIT, CLRSINT1);
- aic_outb(p, CLRSCSIINT | CLRSEQINT | CLRBRKADRINT | CLRPARERR, CLRINT);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_reset_current_bus
- *
- * Description:
- * Reset the current SCSI bus.
- *-F*************************************************************************/
-static void
-aic7xxx_reset_current_bus(struct aic7xxx_host *p)
-{
-
- /* Disable reset interrupts. */
- aic_outb(p, aic_inb(p, SIMODE1) & ~ENSCSIRST, SIMODE1);
-
- /* Turn off the bus' current operations, after all, we shouldn't have any
- * valid commands left to cause a RSELI and SELO once we've tossed the
- * bus away with this reset, so we might as well shut down the sequencer
- * until the bus is restarted as opposed to saving the current settings
- * and restoring them (which makes no sense to me). */
-
- /* Turn on the bus reset. */
- aic_outb(p, aic_inb(p, SCSISEQ) | SCSIRSTO, SCSISEQ);
- while ( (aic_inb(p, SCSISEQ) & SCSIRSTO) == 0)
- mdelay(5);
-
- /*
- * Some of the new Ultra2 chipsets need a longer delay after a chip
- * reset than just the init setup creates, so we have to delay here
- * before we go into a reset in order to make the chips happy.
- */
- if (p->features & AHC_ULTRA2)
- mdelay(250);
- else
- mdelay(50);
-
- /* Turn off the bus reset. */
- aic_outb(p, 0, SCSISEQ);
- mdelay(10);
-
- aic7xxx_clear_intstat(p);
- /* Re-enable reset interrupts. */
- aic_outb(p, aic_inb(p, SIMODE1) | ENSCSIRST, SIMODE1);
-
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_reset_channel
- *
- * Description:
- * Reset the channel.
- *-F*************************************************************************/
-static void
-aic7xxx_reset_channel(struct aic7xxx_host *p, int channel, int initiate_reset)
-{
- unsigned long offset_min, offset_max;
- unsigned char sblkctl;
- int cur_channel;
-
- if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
- printk(INFO_LEAD "Reset channel called, %s initiate reset.\n",
- p->host_no, channel, -1, -1, (initiate_reset==TRUE) ? "will" : "won't" );
-
-
- if (channel == 1)
- {
- offset_min = 8;
- offset_max = 16;
- }
- else
- {
- if (p->features & AHC_TWIN)
- {
- /* Channel A */
- offset_min = 0;
- offset_max = 8;
- }
- else
- {
- offset_min = 0;
- if (p->features & AHC_WIDE)
- {
- offset_max = 16;
- }
- else
- {
- offset_max = 8;
- }
- }
- }
-
- while (offset_min < offset_max)
- {
- /*
- * Revert to async/narrow transfers until we renegotiate.
- */
- aic_outb(p, 0, TARG_SCSIRATE + offset_min);
- if (p->features & AHC_ULTRA2)
- {
- aic_outb(p, 0, TARG_OFFSET + offset_min);
- }
- offset_min++;
- }
-
- /*
- * Reset the bus and unpause/restart the controller
- */
- sblkctl = aic_inb(p, SBLKCTL);
- if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 )
- cur_channel = (sblkctl & SELBUSB) >> 3;
- else
- cur_channel = 0;
- if ( (cur_channel != channel) && (p->features & AHC_TWIN) )
- {
- /*
- * Case 1: Command for another bus is active
- */
- if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
- printk(INFO_LEAD "Stealthily resetting idle channel.\n", p->host_no,
- channel, -1, -1);
- /*
- * Stealthily reset the other bus without upsetting the current bus.
- */
- aic_outb(p, sblkctl ^ SELBUSB, SBLKCTL);
- aic_outb(p, aic_inb(p, SIMODE1) & ~ENBUSFREE, SIMODE1);
- if (initiate_reset)
- {
- aic7xxx_reset_current_bus(p);
- }
- aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP), SCSISEQ);
- aic7xxx_clear_intstat(p);
- aic_outb(p, sblkctl, SBLKCTL);
- }
- else
- {
- /*
- * Case 2: A command from this bus is active or we're idle.
- */
- if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
- printk(INFO_LEAD "Resetting currently active channel.\n", p->host_no,
- channel, -1, -1);
- aic_outb(p, aic_inb(p, SIMODE1) & ~(ENBUSFREE|ENREQINIT),
- SIMODE1);
- p->flags &= ~AHC_HANDLING_REQINITS;
- p->msg_type = MSG_TYPE_NONE;
- p->msg_len = 0;
- if (initiate_reset)
- {
- aic7xxx_reset_current_bus(p);
- }
- aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP), SCSISEQ);
- aic7xxx_clear_intstat(p);
- }
- if (aic7xxx_verbose & VERBOSE_RESET_RETURN)
- printk(INFO_LEAD "Channel reset\n", p->host_no, channel, -1, -1);
- /*
- * Clean up all the state information for the pending transactions
- * on this bus.
- */
- aic7xxx_reset_device(p, ALL_TARGETS, channel, ALL_LUNS, SCB_LIST_NULL);
-
- if ( !(p->features & AHC_TWIN) )
- {
- restart_sequencer(p);
- }
-
- return;
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_run_waiting_queues
- *
- * Description:
- * Scan the awaiting_scbs queue downloading and starting as many
- * scbs as we can.
- *-F*************************************************************************/
-static void
-aic7xxx_run_waiting_queues(struct aic7xxx_host *p)
-{
- struct aic7xxx_scb *scb;
- struct aic_dev_data *aic_dev;
- int sent;
-
-
- if (p->waiting_scbs.head == NULL)
- return;
-
- sent = 0;
-
- /*
- * First handle SCBs that are waiting but have been assigned a slot.
- */
- while ((scb = scbq_remove_head(&p->waiting_scbs)) != NULL)
- {
- aic_dev = scb->cmd->device->hostdata;
- if ( !scb->tag_action )
- {
- aic_dev->temp_q_depth = 1;
- }
- if ( aic_dev->active_cmds >= aic_dev->temp_q_depth)
- {
- scbq_insert_tail(&aic_dev->delayed_scbs, scb);
- }
- else
- {
- scb->flags &= ~SCB_WAITINGQ;
- aic_dev->active_cmds++;
- p->activescbs++;
- if ( !(scb->tag_action) )
- {
- aic7xxx_busy_target(p, scb);
- }
- p->qinfifo[p->qinfifonext++] = scb->hscb->tag;
- sent++;
- }
- }
- if (sent)
- {
- if (p->features & AHC_QUEUE_REGS)
- aic_outb(p, p->qinfifonext, HNSCB_QOFF);
- else
- {
- pause_sequencer(p);
- aic_outb(p, p->qinfifonext, KERNEL_QINPOS);
- unpause_sequencer(p, FALSE);
- }
- if (p->activescbs > p->max_activescbs)
- p->max_activescbs = p->activescbs;
- }
-}
-
-#ifdef CONFIG_PCI
-
-#define DPE 0x80
-#define SSE 0x40
-#define RMA 0x20
-#define RTA 0x10
-#define STA 0x08
-#define DPR 0x01
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_pci_intr
- *
- * Description:
- * Check the scsi card for PCI errors and clear the interrupt
- *
- * NOTE: If you don't have this function and a 2940 card encounters
- * a PCI error condition, the machine will end up locked as the
- * interrupt handler gets slammed with non-stop PCI error interrupts
- *-F*************************************************************************/
-static void
-aic7xxx_pci_intr(struct aic7xxx_host *p)
-{
- unsigned char status1;
-
- pci_read_config_byte(p->pdev, PCI_STATUS + 1, &status1);
-
- if ( (status1 & DPE) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) )
- printk(WARN_LEAD "Data Parity Error during PCI address or PCI write"
- "phase.\n", p->host_no, -1, -1, -1);
- if ( (status1 & SSE) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) )
- printk(WARN_LEAD "Signal System Error Detected\n", p->host_no,
- -1, -1, -1);
- if ( (status1 & RMA) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) )
- printk(WARN_LEAD "Received a PCI Master Abort\n", p->host_no,
- -1, -1, -1);
- if ( (status1 & RTA) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) )
- printk(WARN_LEAD "Received a PCI Target Abort\n", p->host_no,
- -1, -1, -1);
- if ( (status1 & STA) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) )
- printk(WARN_LEAD "Signaled a PCI Target Abort\n", p->host_no,
- -1, -1, -1);
- if ( (status1 & DPR) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) )
- printk(WARN_LEAD "Data Parity Error has been reported via PCI pin "
- "PERR#\n", p->host_no, -1, -1, -1);
-
- pci_write_config_byte(p->pdev, PCI_STATUS + 1, status1);
- if (status1 & (DPR|RMA|RTA))
- aic_outb(p, CLRPARERR, CLRINT);
-
- if ( (aic7xxx_panic_on_abort) && (p->spurious_int > 500) )
- aic7xxx_panic_abort(p, NULL);
-
-}
-#endif /* CONFIG_PCI */
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_construct_ppr
- *
- * Description:
- * Build up a Parallel Protocol Request message for use with SCSI-3
- * devices.
- *-F*************************************************************************/
-static void
-aic7xxx_construct_ppr(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
-{
- p->msg_buf[p->msg_index++] = MSG_EXTENDED;
- p->msg_buf[p->msg_index++] = MSG_EXT_PPR_LEN;
- p->msg_buf[p->msg_index++] = MSG_EXT_PPR;
- p->msg_buf[p->msg_index++] = AIC_DEV(scb->cmd)->goal.period;
- p->msg_buf[p->msg_index++] = 0;
- p->msg_buf[p->msg_index++] = AIC_DEV(scb->cmd)->goal.offset;
- p->msg_buf[p->msg_index++] = AIC_DEV(scb->cmd)->goal.width;
- p->msg_buf[p->msg_index++] = AIC_DEV(scb->cmd)->goal.options;
- p->msg_len += 8;
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_construct_sdtr
- *
- * Description:
- * Constucts a synchronous data transfer message in the message
- * buffer on the sequencer.
- *-F*************************************************************************/
-static void
-aic7xxx_construct_sdtr(struct aic7xxx_host *p, unsigned char period,
- unsigned char offset)
-{
- p->msg_buf[p->msg_index++] = MSG_EXTENDED;
- p->msg_buf[p->msg_index++] = MSG_EXT_SDTR_LEN;
- p->msg_buf[p->msg_index++] = MSG_EXT_SDTR;
- p->msg_buf[p->msg_index++] = period;
- p->msg_buf[p->msg_index++] = offset;
- p->msg_len += 5;
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_construct_wdtr
- *
- * Description:
- * Constucts a wide data transfer message in the message buffer
- * on the sequencer.
- *-F*************************************************************************/
-static void
-aic7xxx_construct_wdtr(struct aic7xxx_host *p, unsigned char bus_width)
-{
- p->msg_buf[p->msg_index++] = MSG_EXTENDED;
- p->msg_buf[p->msg_index++] = MSG_EXT_WDTR_LEN;
- p->msg_buf[p->msg_index++] = MSG_EXT_WDTR;
- p->msg_buf[p->msg_index++] = bus_width;
- p->msg_len += 4;
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_calc_residual
- *
- * Description:
- * Calculate the residual data not yet transferred.
- *-F*************************************************************************/
-static void
-aic7xxx_calculate_residual (struct aic7xxx_host *p, struct aic7xxx_scb *scb)
-{
- struct aic7xxx_hwscb *hscb;
- struct scsi_cmnd *cmd;
- int actual, i;
-
- cmd = scb->cmd;
- hscb = scb->hscb;
-
- /*
- * Don't destroy valid residual information with
- * residual coming from a check sense operation.
- */
- if (((scb->hscb->control & DISCONNECTED) == 0) &&
- (scb->flags & SCB_SENSE) == 0)
- {
- /*
- * We had an underflow. At this time, there's only
- * one other driver that bothers to check for this,
- * and cmd->underflow seems to be set rather half-
- * heartedly in the higher-level SCSI code.
- */
- actual = scb->sg_length;
- for (i=1; i < hscb->residual_SG_segment_count; i++)
- {
- actual -= scb->sg_list[scb->sg_count - i].length;
- }
- actual -= (hscb->residual_data_count[2] << 16) |
- (hscb->residual_data_count[1] << 8) |
- hscb->residual_data_count[0];
-
- if (actual < cmd->underflow)
- {
- if (aic7xxx_verbose & VERBOSE_MINOR_ERROR)
- {
- printk(INFO_LEAD "Underflow - Wanted %u, %s %u, residual SG "
- "count %d.\n", p->host_no, CTL_OF_SCB(scb), cmd->underflow,
- (rq_data_dir(cmd->request) == WRITE) ? "wrote" : "read", actual,
- hscb->residual_SG_segment_count);
- printk(INFO_LEAD "status 0x%x.\n", p->host_no, CTL_OF_SCB(scb),
- hscb->target_status);
- }
- /*
- * In 2.4, only send back the residual information, don't flag this
- * as an error. Before 2.4 we had to flag this as an error because
- * the mid layer didn't check residual data counts to see if the
- * command needs retried.
- */
- scsi_set_resid(cmd, scb->sg_length - actual);
- aic7xxx_status(cmd) = hscb->target_status;
- }
- }
-
- /*
- * Clean out the residual information in the SCB for the
- * next consumer.
- */
- hscb->residual_data_count[2] = 0;
- hscb->residual_data_count[1] = 0;
- hscb->residual_data_count[0] = 0;
- hscb->residual_SG_segment_count = 0;
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_handle_device_reset
- *
- * Description:
- * Interrupt handler for sequencer interrupts (SEQINT).
- *-F*************************************************************************/
-static void
-aic7xxx_handle_device_reset(struct aic7xxx_host *p, int target, int channel)
-{
- unsigned char tindex = target;
-
- tindex |= ((channel & 0x01) << 3);
-
- /*
- * Go back to async/narrow transfers and renegotiate.
- */
- aic_outb(p, 0, TARG_SCSIRATE + tindex);
- if (p->features & AHC_ULTRA2)
- aic_outb(p, 0, TARG_OFFSET + tindex);
- aic7xxx_reset_device(p, target, channel, ALL_LUNS, SCB_LIST_NULL);
- if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
- printk(INFO_LEAD "Bus Device Reset delivered.\n", p->host_no, channel,
- target, -1);
- aic7xxx_run_done_queue(p, /*complete*/ TRUE);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_handle_seqint
- *
- * Description:
- * Interrupt handler for sequencer interrupts (SEQINT).
- *-F*************************************************************************/
-static void
-aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
-{
- struct aic7xxx_scb *scb;
- struct aic_dev_data *aic_dev;
- unsigned short target_mask;
- unsigned char target, lun, tindex;
- unsigned char queue_flag = FALSE;
- char channel;
- int result;
-
- target = ((aic_inb(p, SAVED_TCL) >> 4) & 0x0f);
- if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 )
- channel = (aic_inb(p, SBLKCTL) & SELBUSB) >> 3;
- else
- channel = 0;
- tindex = target + (channel << 3);
- lun = aic_inb(p, SAVED_TCL) & 0x07;
- target_mask = (0x01 << tindex);
-
- /*
- * Go ahead and clear the SEQINT now, that avoids any interrupt race
- * conditions later on in case we enable some other interrupt.
- */
- aic_outb(p, CLRSEQINT, CLRINT);
- switch (intstat & SEQINT_MASK)
- {
- case NO_MATCH:
- {
- aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP),
- SCSISEQ);
- printk(WARN_LEAD "No active SCB for reconnecting target - Issuing "
- "BUS DEVICE RESET.\n", p->host_no, channel, target, lun);
- printk(WARN_LEAD " SAVED_TCL=0x%x, ARG_1=0x%x, SEQADDR=0x%x\n",
- p->host_no, channel, target, lun,
- aic_inb(p, SAVED_TCL), aic_inb(p, ARG_1),
- (aic_inb(p, SEQADDR1) << 8) | aic_inb(p, SEQADDR0));
- if (aic7xxx_panic_on_abort)
- aic7xxx_panic_abort(p, NULL);
- }
- break;
-
- case SEND_REJECT:
- {
- if (aic7xxx_verbose & VERBOSE_MINOR_ERROR)
- printk(INFO_LEAD "Rejecting unknown message (0x%x) received from "
- "target, SEQ_FLAGS=0x%x\n", p->host_no, channel, target, lun,
- aic_inb(p, ACCUM), aic_inb(p, SEQ_FLAGS));
- }
- break;
-
- case NO_IDENT:
- {
- /*
- * The reconnecting target either did not send an identify
- * message, or did, but we didn't find an SCB to match and
- * before it could respond to our ATN/abort, it hit a dataphase.
- * The only safe thing to do is to blow it away with a bus
- * reset.
- */
- if (aic7xxx_verbose & (VERBOSE_SEQINT | VERBOSE_RESET_MID))
- printk(INFO_LEAD "Target did not send an IDENTIFY message; "
- "LASTPHASE 0x%x, SAVED_TCL 0x%x\n", p->host_no, channel, target,
- lun, aic_inb(p, LASTPHASE), aic_inb(p, SAVED_TCL));
-
- aic7xxx_reset_channel(p, channel, /*initiate reset*/ TRUE);
- aic7xxx_run_done_queue(p, TRUE);
-
- }
- break;
-
- case BAD_PHASE:
- if (aic_inb(p, LASTPHASE) == P_BUSFREE)
- {
- if (aic7xxx_verbose & VERBOSE_SEQINT)
- printk(INFO_LEAD "Missed busfree.\n", p->host_no, channel,
- target, lun);
- restart_sequencer(p);
- }
- else
- {
- if (aic7xxx_verbose & VERBOSE_SEQINT)
- printk(INFO_LEAD "Unknown scsi bus phase, continuing\n", p->host_no,
- channel, target, lun);
- }
- break;
-
- case EXTENDED_MSG:
- {
- p->msg_type = MSG_TYPE_INITIATOR_MSGIN;
- p->msg_len = 0;
- p->msg_index = 0;
-
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if (aic7xxx_verbose > 0xffff)
- printk(INFO_LEAD "Enabling REQINITs for MSG_IN\n", p->host_no,
- channel, target, lun);
-#endif
-
- /*
- * To actually receive the message, simply turn on
- * REQINIT interrupts and let our interrupt handler
- * do the rest (REQINIT should already be true).
- */
- p->flags |= AHC_HANDLING_REQINITS;
- aic_outb(p, aic_inb(p, SIMODE1) | ENREQINIT, SIMODE1);
-
- /*
- * We don't want the sequencer unpaused yet so we return early
- */
- return;
- }
-
- case REJECT_MSG:
- {
- /*
- * What we care about here is if we had an outstanding SDTR
- * or WDTR message for this target. If we did, this is a
- * signal that the target is refusing negotiation.
- */
- unsigned char scb_index;
- unsigned char last_msg;
-
- scb_index = aic_inb(p, SCB_TAG);
- scb = p->scb_data->scb_array[scb_index];
- aic_dev = AIC_DEV(scb->cmd);
- last_msg = aic_inb(p, LAST_MSG);
-
- if ( (last_msg == MSG_IDENTIFYFLAG) &&
- (scb->tag_action) &&
- !(scb->flags & SCB_MSGOUT_BITS) )
- {
- if (scb->tag_action == MSG_ORDERED_Q_TAG)
- {
- /*
- * OK...the device seems able to accept tagged commands, but
- * not ordered tag commands, only simple tag commands. So, we
- * disable ordered tag commands and go on with life just like
- * normal.
- */
- scsi_adjust_queue_depth(scb->cmd->device, MSG_SIMPLE_TAG,
- scb->cmd->device->queue_depth);
- scb->tag_action = MSG_SIMPLE_Q_TAG;
- scb->hscb->control &= ~SCB_TAG_TYPE;
- scb->hscb->control |= MSG_SIMPLE_Q_TAG;
- aic_outb(p, scb->hscb->control, SCB_CONTROL);
- /*
- * OK..we set the tag type to simple tag command, now we re-assert
- * ATNO and hope this will take us into the identify phase again
- * so we can resend the tag type and info to the device.
- */
- aic_outb(p, MSG_IDENTIFYFLAG, MSG_OUT);
- aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO);
- }
- else if (scb->tag_action == MSG_SIMPLE_Q_TAG)
- {
- unsigned char i;
- struct aic7xxx_scb *scbp;
- int old_verbose;
- /*
- * Hmmmm....the device is flaking out on tagged commands.
- */
- scsi_adjust_queue_depth(scb->cmd->device, 0 /* untagged */,
- p->host->cmd_per_lun);
- aic_dev->max_q_depth = aic_dev->temp_q_depth = 1;
- /*
- * We set this command up as a bus device reset. However, we have
- * to clear the tag type as it's causing us problems. We shouldn't
- * have to worry about any other commands being active, since if
- * the device is refusing tagged commands, this should be the
- * first tagged command sent to the device, however, we do have
- * to worry about any other tagged commands that may already be
- * in the qinfifo. The easiest way to do this, is to issue a BDR,
- * send all the commands back to the mid level code, then let them
- * come back and get rebuilt as untagged commands.
- */
- scb->tag_action = 0;
- scb->hscb->control &= ~(TAG_ENB | SCB_TAG_TYPE);
- aic_outb(p, scb->hscb->control, SCB_CONTROL);
-
- old_verbose = aic7xxx_verbose;
- aic7xxx_verbose &= ~(VERBOSE_RESET|VERBOSE_ABORT);
- for (i=0; i < p->scb_data->numscbs; i++)
- {
- scbp = p->scb_data->scb_array[i];
- if ((scbp->flags & SCB_ACTIVE) && (scbp != scb))
- {
- if (aic7xxx_match_scb(p, scbp, target, channel, lun, i))
- {
- aic7xxx_reset_device(p, target, channel, lun, i);
- }
- }
- }
- aic7xxx_run_done_queue(p, TRUE);
- aic7xxx_verbose = old_verbose;
- /*
- * Wait until after the for loop to set the busy index since
- * aic7xxx_reset_device will clear the busy index during its
- * operation.
- */
- aic7xxx_busy_target(p, scb);
- printk(INFO_LEAD "Device is refusing tagged commands, using "
- "untagged I/O.\n", p->host_no, channel, target, lun);
- aic_outb(p, MSG_IDENTIFYFLAG, MSG_OUT);
- aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO);
- }
- }
- else if (scb->flags & SCB_MSGOUT_PPR)
- {
- /*
- * As per the draft specs, any device capable of supporting any of
- * the option values other than 0 are not allowed to reject the
- * PPR message. Instead, they must negotiate out what they do
- * support instead of rejecting our offering or else they cause
- * a parity error during msg_out phase to signal that they don't
- * like our settings.
- */
- aic_dev->needppr = aic_dev->needppr_copy = 0;
- aic7xxx_set_width(p, target, channel, lun, MSG_EXT_WDTR_BUS_8_BIT,
- (AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE), aic_dev);
- aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0,
- AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE,
- aic_dev);
- aic_dev->goal.options = aic_dev->dtr_pending = 0;
- scb->flags &= ~SCB_MSGOUT_BITS;
- if(aic7xxx_verbose & VERBOSE_NEGOTIATION2)
- {
- printk(INFO_LEAD "Device is rejecting PPR messages, falling "
- "back.\n", p->host_no, channel, target, lun);
- }
- if ( aic_dev->goal.width )
- {
- aic_dev->needwdtr = aic_dev->needwdtr_copy = 1;
- aic_dev->dtr_pending = 1;
- scb->flags |= SCB_MSGOUT_WDTR;
- }
- if ( aic_dev->goal.offset )
- {
- aic_dev->needsdtr = aic_dev->needsdtr_copy = 1;
- if( !aic_dev->dtr_pending )
- {
- aic_dev->dtr_pending = 1;
- scb->flags |= SCB_MSGOUT_SDTR;
- }
- }
- if ( aic_dev->dtr_pending )
- {
- aic_outb(p, HOST_MSG, MSG_OUT);
- aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO);
- }
- }
- else if (scb->flags & SCB_MSGOUT_WDTR)
- {
- /*
- * note 8bit xfers and clear flag
- */
- aic_dev->needwdtr = aic_dev->needwdtr_copy = 0;
- scb->flags &= ~SCB_MSGOUT_BITS;
- aic7xxx_set_width(p, target, channel, lun, MSG_EXT_WDTR_BUS_8_BIT,
- (AHC_TRANS_ACTIVE|AHC_TRANS_GOAL|AHC_TRANS_CUR), aic_dev);
- aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0,
- AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE,
- aic_dev);
- if(aic7xxx_verbose & VERBOSE_NEGOTIATION2)
- {
- printk(INFO_LEAD "Device is rejecting WDTR messages, using "
- "narrow transfers.\n", p->host_no, channel, target, lun);
- }
- aic_dev->needsdtr = aic_dev->needsdtr_copy;
- }
- else if (scb->flags & SCB_MSGOUT_SDTR)
- {
- /*
- * note asynch xfers and clear flag
- */
- aic_dev->needsdtr = aic_dev->needsdtr_copy = 0;
- scb->flags &= ~SCB_MSGOUT_BITS;
- aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0,
- (AHC_TRANS_CUR|AHC_TRANS_ACTIVE|AHC_TRANS_GOAL), aic_dev);
- if(aic7xxx_verbose & VERBOSE_NEGOTIATION2)
- {
- printk(INFO_LEAD "Device is rejecting SDTR messages, using "
- "async transfers.\n", p->host_no, channel, target, lun);
- }
- }
- else if (aic7xxx_verbose & VERBOSE_SEQINT)
- {
- /*
- * Otherwise, we ignore it.
- */
- printk(INFO_LEAD "Received MESSAGE_REJECT for unknown cause. "
- "Ignoring.\n", p->host_no, channel, target, lun);
- }
- }
- break;
-
- case BAD_STATUS:
- {
- unsigned char scb_index;
- struct aic7xxx_hwscb *hscb;
- struct scsi_cmnd *cmd;
-
- /* The sequencer will notify us when a command has an error that
- * would be of interest to the kernel. This allows us to leave
- * the sequencer running in the common case of command completes
- * without error. The sequencer will have DMA'd the SCB back
- * up to us, so we can reference the drivers SCB array.
- *
- * Set the default return value to 0 indicating not to send
- * sense. The sense code will change this if needed and this
- * reduces code duplication.
- */
- aic_outb(p, 0, RETURN_1);
- scb_index = aic_inb(p, SCB_TAG);
- if (scb_index > p->scb_data->numscbs)
- {
- printk(WARN_LEAD "Invalid SCB during SEQINT 0x%02x, SCB_TAG %d.\n",
- p->host_no, channel, target, lun, intstat, scb_index);
- break;
- }
- scb = p->scb_data->scb_array[scb_index];
- hscb = scb->hscb;
-
- if (!(scb->flags & SCB_ACTIVE) || (scb->cmd == NULL))
- {
- printk(WARN_LEAD "Invalid SCB during SEQINT 0x%x, scb %d, flags 0x%x,"
- " cmd 0x%lx.\n", p->host_no, channel, target, lun, intstat,
- scb_index, scb->flags, (unsigned long) scb->cmd);
- }
- else
- {
- cmd = scb->cmd;
- aic_dev = AIC_DEV(scb->cmd);
- hscb->target_status = aic_inb(p, SCB_TARGET_STATUS);
- aic7xxx_status(cmd) = hscb->target_status;
-
- cmd->result = hscb->target_status;
-
- switch (status_byte(hscb->target_status))
- {
- case GOOD:
- if (aic7xxx_verbose & VERBOSE_SEQINT)
- printk(INFO_LEAD "Interrupted for status of GOOD???\n",
- p->host_no, CTL_OF_SCB(scb));
- break;
-
- case COMMAND_TERMINATED:
- case CHECK_CONDITION:
- if ( !(scb->flags & SCB_SENSE) )
- {
- /*
- * Send a sense command to the requesting target.
- * XXX - revisit this and get rid of the memcopys.
- */
- memcpy(scb->sense_cmd, &generic_sense[0],
- sizeof(generic_sense));
-
- scb->sense_cmd[1] = (cmd->device->lun << 5);
- scb->sense_cmd[4] = SCSI_SENSE_BUFFERSIZE;
-
- scb->sg_list[0].length =
- cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
- scb->sg_list[0].address =
- cpu_to_le32(pci_map_single(p->pdev, cmd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE,
- PCI_DMA_FROMDEVICE));
-
- /*
- * XXX - We should allow disconnection, but can't as it
- * might allow overlapped tagged commands.
- */
- /* hscb->control &= DISCENB; */
- hscb->control = 0;
- hscb->target_status = 0;
- hscb->SG_list_pointer =
- cpu_to_le32(SCB_DMA_ADDR(scb, scb->sg_list));
- hscb->SCSI_cmd_pointer =
- cpu_to_le32(SCB_DMA_ADDR(scb, scb->sense_cmd));
- hscb->data_count = scb->sg_list[0].length;
- hscb->data_pointer = scb->sg_list[0].address;
- hscb->SCSI_cmd_length = COMMAND_SIZE(scb->sense_cmd[0]);
- hscb->residual_SG_segment_count = 0;
- hscb->residual_data_count[0] = 0;
- hscb->residual_data_count[1] = 0;
- hscb->residual_data_count[2] = 0;
-
- scb->sg_count = hscb->SG_segment_count = 1;
- scb->sg_length = SCSI_SENSE_BUFFERSIZE;
- scb->tag_action = 0;
- scb->flags |= SCB_SENSE;
- /*
- * Ensure the target is busy since this will be an
- * an untagged request.
- */
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
- {
- if (scb->flags & SCB_MSGOUT_BITS)
- printk(INFO_LEAD "Requesting SENSE with %s\n", p->host_no,
- CTL_OF_SCB(scb), (scb->flags & SCB_MSGOUT_SDTR) ?
- "SDTR" : "WDTR");
- else
- printk(INFO_LEAD "Requesting SENSE, no MSG\n", p->host_no,
- CTL_OF_SCB(scb));
- }
-#endif
- aic7xxx_busy_target(p, scb);
- aic_outb(p, SEND_SENSE, RETURN_1);
- aic7xxx_error(cmd) = DID_OK;
- break;
- } /* first time sense, no errors */
- printk(INFO_LEAD "CHECK_CONDITION on REQUEST_SENSE, returning "
- "an error.\n", p->host_no, CTL_OF_SCB(scb));
- aic7xxx_error(cmd) = DID_ERROR;
- scb->flags &= ~SCB_SENSE;
- break;
-
- case QUEUE_FULL:
- queue_flag = TRUE; /* Mark that this is a QUEUE_FULL and */
- case BUSY: /* drop through to here */
- {
- struct aic7xxx_scb *next_scbp, *prev_scbp;
- unsigned char active_hscb, next_hscb, prev_hscb, scb_index;
- /*
- * We have to look three places for queued commands:
- * 1: p->waiting_scbs queue
- * 2: QINFIFO
- * 3: WAITING_SCBS list on card (for commands that are started
- * but haven't yet made it to the device)
- *
- * Of special note here is that commands on 2 or 3 above will
- * have already been marked as active, while commands on 1 will
- * not. The aic7xxx_done() function will want to unmark them
- * from active, so any commands we pull off of 1 need to
- * up the active count.
- */
- next_scbp = p->waiting_scbs.head;
- while ( next_scbp != NULL )
- {
- prev_scbp = next_scbp;
- next_scbp = next_scbp->q_next;
- if ( aic7xxx_match_scb(p, prev_scbp, target, channel, lun,
- SCB_LIST_NULL) )
- {
- scbq_remove(&p->waiting_scbs, prev_scbp);
- scb->flags = SCB_QUEUED_FOR_DONE | SCB_QUEUE_FULL;
- p->activescbs++;
- aic_dev->active_cmds++;
- }
- }
- aic7xxx_search_qinfifo(p, target, channel, lun,
- SCB_LIST_NULL, SCB_QUEUED_FOR_DONE | SCB_QUEUE_FULL,
- FALSE, NULL);
- next_scbp = NULL;
- active_hscb = aic_inb(p, SCBPTR);
- prev_hscb = next_hscb = scb_index = SCB_LIST_NULL;
- next_hscb = aic_inb(p, WAITING_SCBH);
- while (next_hscb != SCB_LIST_NULL)
- {
- aic_outb(p, next_hscb, SCBPTR);
- scb_index = aic_inb(p, SCB_TAG);
- if (scb_index < p->scb_data->numscbs)
- {
- next_scbp = p->scb_data->scb_array[scb_index];
- if (aic7xxx_match_scb(p, next_scbp, target, channel, lun,
- SCB_LIST_NULL) )
- {
- next_scbp->flags = SCB_QUEUED_FOR_DONE | SCB_QUEUE_FULL;
- next_hscb = aic_inb(p, SCB_NEXT);
- aic_outb(p, 0, SCB_CONTROL);
- aic_outb(p, SCB_LIST_NULL, SCB_TAG);
- aic7xxx_add_curscb_to_free_list(p);
- if (prev_hscb == SCB_LIST_NULL)
- {
- /* We were first on the list,
- * so we kill the selection
- * hardware. Let the sequencer
- * re-init the hardware itself
- */
- aic_outb(p, aic_inb(p, SCSISEQ) & ~ENSELO, SCSISEQ);
- aic_outb(p, CLRSELTIMEO, CLRSINT1);
- aic_outb(p, next_hscb, WAITING_SCBH);
- }
- else
- {
- aic_outb(p, prev_hscb, SCBPTR);
- aic_outb(p, next_hscb, SCB_NEXT);
- }
- }
- else
- {
- prev_hscb = next_hscb;
- next_hscb = aic_inb(p, SCB_NEXT);
- }
- } /* scb_index >= p->scb_data->numscbs */
- }
- aic_outb(p, active_hscb, SCBPTR);
- aic7xxx_run_done_queue(p, FALSE);
-
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if( (aic7xxx_verbose & VERBOSE_MINOR_ERROR) ||
- (aic7xxx_verbose > 0xffff) )
- {
- if (queue_flag)
- printk(INFO_LEAD "Queue full received; queue depth %d, "
- "active %d\n", p->host_no, CTL_OF_SCB(scb),
- aic_dev->max_q_depth, aic_dev->active_cmds);
- else
- printk(INFO_LEAD "Target busy\n", p->host_no, CTL_OF_SCB(scb));
- }
-#endif
- if (queue_flag)
- {
- int diff;
- result = scsi_track_queue_full(cmd->device,
- aic_dev->active_cmds);
- if ( result < 0 )
- {
- if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
- printk(INFO_LEAD "Tagged Command Queueing disabled.\n",
- p->host_no, CTL_OF_SCB(scb));
- diff = aic_dev->max_q_depth - p->host->cmd_per_lun;
- aic_dev->temp_q_depth = 1;
- aic_dev->max_q_depth = 1;
- }
- else if ( result > 0 )
- {
- if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
- printk(INFO_LEAD "Queue depth reduced to %d\n", p->host_no,
- CTL_OF_SCB(scb), result);
- diff = aic_dev->max_q_depth - result;
- aic_dev->max_q_depth = result;
- /* temp_q_depth could have been dropped to 1 for an untagged
- * command that might be coming up */
- if(aic_dev->temp_q_depth > result)
- aic_dev->temp_q_depth = result;
- }
- /* We should free up the no unused SCB entries. But, that's
- * a difficult thing to do because we use a direct indexed
- * array, so we can't just take any entries and free them,
- * we *have* to free the ones at the end of the array, and
- * they very well could be in use right now, which means
- * in order to do this right, we have to add a delayed
- * freeing mechanism tied into the scb_free() code area.
- * We'll add that later.
- */
- }
- break;
- }
-
- default:
- if (aic7xxx_verbose & VERBOSE_SEQINT)
- printk(INFO_LEAD "Unexpected target status 0x%x.\n", p->host_no,
- CTL_OF_SCB(scb), scb->hscb->target_status);
- if (!aic7xxx_error(cmd))
- {
- aic7xxx_error(cmd) = DID_RETRY_COMMAND;
- }
- break;
- } /* end switch */
- } /* end else of */
- }
- break;
-
- case AWAITING_MSG:
- {
- unsigned char scb_index, msg_out;
-
- scb_index = aic_inb(p, SCB_TAG);
- msg_out = aic_inb(p, MSG_OUT);
- scb = p->scb_data->scb_array[scb_index];
- aic_dev = AIC_DEV(scb->cmd);
- p->msg_index = p->msg_len = 0;
- /*
- * This SCB had a MK_MESSAGE set in its control byte informing
- * the sequencer that we wanted to send a special message to
- * this target.
- */
-
- if ( !(scb->flags & SCB_DEVICE_RESET) &&
- (msg_out == MSG_IDENTIFYFLAG) &&
- (scb->hscb->control & TAG_ENB) )
- {
- p->msg_buf[p->msg_index++] = scb->tag_action;
- p->msg_buf[p->msg_index++] = scb->hscb->tag;
- p->msg_len += 2;
- }
-
- if (scb->flags & SCB_DEVICE_RESET)
- {
- p->msg_buf[p->msg_index++] = MSG_BUS_DEV_RESET;
- p->msg_len++;
- if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
- printk(INFO_LEAD "Bus device reset mailed.\n",
- p->host_no, CTL_OF_SCB(scb));
- }
- else if (scb->flags & SCB_ABORT)
- {
- if (scb->tag_action)
- {
- p->msg_buf[p->msg_index++] = MSG_ABORT_TAG;
- }
- else
- {
- p->msg_buf[p->msg_index++] = MSG_ABORT;
- }
- p->msg_len++;
- if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
- printk(INFO_LEAD "Abort message mailed.\n", p->host_no,
- CTL_OF_SCB(scb));
- }
- else if (scb->flags & SCB_MSGOUT_PPR)
- {
- if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
- {
- printk(INFO_LEAD "Sending PPR (%d/%d/%d/%d) message.\n",
- p->host_no, CTL_OF_SCB(scb),
- aic_dev->goal.period,
- aic_dev->goal.offset,
- aic_dev->goal.width,
- aic_dev->goal.options);
- }
- aic7xxx_construct_ppr(p, scb);
- }
- else if (scb->flags & SCB_MSGOUT_WDTR)
- {
- if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
- {
- printk(INFO_LEAD "Sending WDTR message.\n", p->host_no,
- CTL_OF_SCB(scb));
- }
- aic7xxx_construct_wdtr(p, aic_dev->goal.width);
- }
- else if (scb->flags & SCB_MSGOUT_SDTR)
- {
- unsigned int max_sync, period;
- unsigned char options = 0;
- /*
- * Now that the device is selected, use the bits in SBLKCTL and
- * SSTAT2 to determine the max sync rate for this device.
- */
- if (p->features & AHC_ULTRA2)
- {
- if ( (aic_inb(p, SBLKCTL) & ENAB40) &&
- !(aic_inb(p, SSTAT2) & EXP_ACTIVE) )
- {
- max_sync = AHC_SYNCRATE_ULTRA2;
- }
- else
- {
- max_sync = AHC_SYNCRATE_ULTRA;
- }
- }
- else if (p->features & AHC_ULTRA)
- {
- max_sync = AHC_SYNCRATE_ULTRA;
- }
- else
- {
- max_sync = AHC_SYNCRATE_FAST;
- }
- period = aic_dev->goal.period;
- aic7xxx_find_syncrate(p, &period, max_sync, &options);
- if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
- {
- printk(INFO_LEAD "Sending SDTR %d/%d message.\n", p->host_no,
- CTL_OF_SCB(scb), period,
- aic_dev->goal.offset);
- }
- aic7xxx_construct_sdtr(p, period, aic_dev->goal.offset);
- }
- else
- {
- panic("aic7xxx: AWAITING_MSG for an SCB that does "
- "not have a waiting message.\n");
- }
- /*
- * We've set everything up to send our message, now to actually do
- * so we need to enable reqinit interrupts and let the interrupt
- * handler do the rest. We don't want to unpause the sequencer yet
- * though so we'll return early. We also have to make sure that
- * we clear the SEQINT *BEFORE* we set the REQINIT handler active
- * or else it's possible on VLB cards to lose the first REQINIT
- * interrupt. Edge triggered EISA cards could also lose this
- * interrupt, although PCI and level triggered cards should not
- * have this problem since they continually interrupt the kernel
- * until we take care of the situation.
- */
- scb->flags |= SCB_MSGOUT_SENT;
- p->msg_index = 0;
- p->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
- p->flags |= AHC_HANDLING_REQINITS;
- aic_outb(p, aic_inb(p, SIMODE1) | ENREQINIT, SIMODE1);
- return;
- }
- break;
-
- case DATA_OVERRUN:
- {
- unsigned char scb_index = aic_inb(p, SCB_TAG);
- unsigned char lastphase = aic_inb(p, LASTPHASE);
- unsigned int i;
-
- scb = (p->scb_data->scb_array[scb_index]);
- /*
- * XXX - What do we really want to do on an overrun? The
- * mid-level SCSI code should handle this, but for now,
- * we'll just indicate that the command should retried.
- * If we retrieved sense info on this target, then the
- * base SENSE info should have been saved prior to the
- * overrun error. In that case, we return DID_OK and let
- * the mid level code pick up on the sense info. Otherwise
- * we return DID_ERROR so the command will get retried.
- */
- if ( !(scb->flags & SCB_SENSE) )
- {
- printk(WARN_LEAD "Data overrun detected in %s phase, tag %d;\n",
- p->host_no, CTL_OF_SCB(scb),
- (lastphase == P_DATAIN) ? "Data-In" : "Data-Out", scb->hscb->tag);
- printk(KERN_WARNING " %s seen Data Phase. Length=%d, NumSGs=%d.\n",
- (aic_inb(p, SEQ_FLAGS) & DPHASE) ? "Have" : "Haven't",
- scb->sg_length, scb->sg_count);
- printk(KERN_WARNING " Raw SCSI Command: 0x");
- for (i = 0; i < scb->hscb->SCSI_cmd_length; i++)
- {
- printk("%02x ", scb->cmd->cmnd[i]);
- }
- printk("\n");
- if(aic7xxx_verbose > 0xffff)
- {
- for (i = 0; i < scb->sg_count; i++)
- {
- printk(KERN_WARNING " sg[%d] - Addr 0x%x : Length %d\n",
- i,
- le32_to_cpu(scb->sg_list[i].address),
- le32_to_cpu(scb->sg_list[i].length) );
- }
- }
- aic7xxx_error(scb->cmd) = DID_ERROR;
- }
- else
- printk(INFO_LEAD "Data Overrun during SEND_SENSE operation.\n",
- p->host_no, CTL_OF_SCB(scb));
- }
- break;
-
- case WIDE_RESIDUE:
- {
- unsigned char resid_sgcnt, index;
- unsigned char scb_index = aic_inb(p, SCB_TAG);
- unsigned int cur_addr, resid_dcnt;
- unsigned int native_addr, native_length, sg_addr;
- int i;
-
- if(scb_index > p->scb_data->numscbs)
- {
- printk(WARN_LEAD "invalid scb_index during WIDE_RESIDUE.\n",
- p->host_no, -1, -1, -1);
- /*
- * XXX: Add error handling here
- */
- break;
- }
- scb = p->scb_data->scb_array[scb_index];
- if(!(scb->flags & SCB_ACTIVE) || (scb->cmd == NULL))
- {
- printk(WARN_LEAD "invalid scb during WIDE_RESIDUE flags:0x%x "
- "scb->cmd:0x%lx\n", p->host_no, CTL_OF_SCB(scb),
- scb->flags, (unsigned long)scb->cmd);
- break;
- }
- if(aic7xxx_verbose & VERBOSE_MINOR_ERROR)
- printk(INFO_LEAD "Got WIDE_RESIDUE message, patching up data "
- "pointer.\n", p->host_no, CTL_OF_SCB(scb));
-
- /*
- * We have a valid scb to use on this WIDE_RESIDUE message, so
- * we need to walk the sg list looking for this particular sg
- * segment, then see if we happen to be at the very beginning of
- * the segment. If we are, then we have to back things up to
- * the previous segment. If not, then we simply need to remove
- * one byte from this segments address and add one to the byte
- * count.
- */
- cur_addr = aic_inb(p, SHADDR) | (aic_inb(p, SHADDR + 1) << 8) |
- (aic_inb(p, SHADDR + 2) << 16) | (aic_inb(p, SHADDR + 3) << 24);
- sg_addr = aic_inb(p, SG_COUNT + 1) | (aic_inb(p, SG_COUNT + 2) << 8) |
- (aic_inb(p, SG_COUNT + 3) << 16) | (aic_inb(p, SG_COUNT + 4) << 24);
- resid_sgcnt = aic_inb(p, SCB_RESID_SGCNT);
- resid_dcnt = aic_inb(p, SCB_RESID_DCNT) |
- (aic_inb(p, SCB_RESID_DCNT + 1) << 8) |
- (aic_inb(p, SCB_RESID_DCNT + 2) << 16);
- index = scb->sg_count - ((resid_sgcnt) ? resid_sgcnt : 1);
- native_addr = le32_to_cpu(scb->sg_list[index].address);
- native_length = le32_to_cpu(scb->sg_list[index].length);
- /*
- * If resid_dcnt == native_length, then we just loaded this SG
- * segment and we need to back it up one...
- */
- if(resid_dcnt == native_length)
- {
- if(index == 0)
- {
- /*
- * Oops, this isn't right, we can't back up to before the
- * beginning. This must be a bogus message, ignore it.
- */
- break;
- }
- resid_dcnt = 1;
- resid_sgcnt += 1;
- native_addr = le32_to_cpu(scb->sg_list[index - 1].address);
- native_length = le32_to_cpu(scb->sg_list[index - 1].length);
- cur_addr = native_addr + (native_length - 1);
- sg_addr -= sizeof(struct hw_scatterlist);
- }
- else
- {
- /*
- * resid_dcnt != native_length, so we are in the middle of a SG
- * element. Back it up one byte and leave the rest alone.
- */
- resid_dcnt += 1;
- cur_addr -= 1;
- }
-
- /*
- * Output the new addresses and counts to the right places on the
- * card.
- */
- aic_outb(p, resid_sgcnt, SG_COUNT);
- aic_outb(p, resid_sgcnt, SCB_RESID_SGCNT);
- aic_outb(p, sg_addr & 0xff, SG_COUNT + 1);
- aic_outb(p, (sg_addr >> 8) & 0xff, SG_COUNT + 2);
- aic_outb(p, (sg_addr >> 16) & 0xff, SG_COUNT + 3);
- aic_outb(p, (sg_addr >> 24) & 0xff, SG_COUNT + 4);
- aic_outb(p, resid_dcnt & 0xff, SCB_RESID_DCNT);
- aic_outb(p, (resid_dcnt >> 8) & 0xff, SCB_RESID_DCNT + 1);
- aic_outb(p, (resid_dcnt >> 16) & 0xff, SCB_RESID_DCNT + 2);
-
- /*
- * The sequencer actually wants to find the new address
- * in the SHADDR register set. On the Ultra2 and later controllers
- * this register set is readonly. In order to get the right number
- * into the register, you actually have to enter it in HADDR and then
- * use the PRELOADEN bit of DFCNTRL to drop it through from the
- * HADDR register to the SHADDR register. On non-Ultra2 controllers,
- * we simply write it direct.
- */
- if(p->features & AHC_ULTRA2)
- {
- /*
- * We might as well be accurate and drop both the resid_dcnt and
- * cur_addr into HCNT and HADDR and have both of them drop
- * through to the shadow layer together.
- */
- aic_outb(p, resid_dcnt & 0xff, HCNT);
- aic_outb(p, (resid_dcnt >> 8) & 0xff, HCNT + 1);
- aic_outb(p, (resid_dcnt >> 16) & 0xff, HCNT + 2);
- aic_outb(p, cur_addr & 0xff, HADDR);
- aic_outb(p, (cur_addr >> 8) & 0xff, HADDR + 1);
- aic_outb(p, (cur_addr >> 16) & 0xff, HADDR + 2);
- aic_outb(p, (cur_addr >> 24) & 0xff, HADDR + 3);
- aic_outb(p, aic_inb(p, DMAPARAMS) | PRELOADEN, DFCNTRL);
- udelay(1);
- aic_outb(p, aic_inb(p, DMAPARAMS) & ~(SCSIEN|HDMAEN), DFCNTRL);
- i=0;
- while(((aic_inb(p, DFCNTRL) & (SCSIEN|HDMAEN)) != 0) && (i++ < 1000))
- {
- udelay(1);
- }
- }
- else
- {
- aic_outb(p, cur_addr & 0xff, SHADDR);
- aic_outb(p, (cur_addr >> 8) & 0xff, SHADDR + 1);
- aic_outb(p, (cur_addr >> 16) & 0xff, SHADDR + 2);
- aic_outb(p, (cur_addr >> 24) & 0xff, SHADDR + 3);
- }
- }
- break;
-
- case SEQ_SG_FIXUP:
- {
- unsigned char scb_index, tmp;
- int sg_addr, sg_length;
-
- scb_index = aic_inb(p, SCB_TAG);
-
- if(scb_index > p->scb_data->numscbs)
- {
- printk(WARN_LEAD "invalid scb_index during SEQ_SG_FIXUP.\n",
- p->host_no, -1, -1, -1);
- printk(INFO_LEAD "SCSISIGI 0x%x, SEQADDR 0x%x, SSTAT0 0x%x, SSTAT1 "
- "0x%x\n", p->host_no, -1, -1, -1,
- aic_inb(p, SCSISIGI),
- aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8),
- aic_inb(p, SSTAT0), aic_inb(p, SSTAT1));
- printk(INFO_LEAD "SG_CACHEPTR 0x%x, SSTAT2 0x%x, STCNT 0x%x\n",
- p->host_no, -1, -1, -1, aic_inb(p, SG_CACHEPTR),
- aic_inb(p, SSTAT2), aic_inb(p, STCNT + 2) << 16 |
- aic_inb(p, STCNT + 1) << 8 | aic_inb(p, STCNT));
- /*
- * XXX: Add error handling here
- */
- break;
- }
- scb = p->scb_data->scb_array[scb_index];
- if(!(scb->flags & SCB_ACTIVE) || (scb->cmd == NULL))
- {
- printk(WARN_LEAD "invalid scb during SEQ_SG_FIXUP flags:0x%x "
- "scb->cmd:0x%p\n", p->host_no, CTL_OF_SCB(scb),
- scb->flags, scb->cmd);
- printk(INFO_LEAD "SCSISIGI 0x%x, SEQADDR 0x%x, SSTAT0 0x%x, SSTAT1 "
- "0x%x\n", p->host_no, CTL_OF_SCB(scb),
- aic_inb(p, SCSISIGI),
- aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8),
- aic_inb(p, SSTAT0), aic_inb(p, SSTAT1));
- printk(INFO_LEAD "SG_CACHEPTR 0x%x, SSTAT2 0x%x, STCNT 0x%x\n",
- p->host_no, CTL_OF_SCB(scb), aic_inb(p, SG_CACHEPTR),
- aic_inb(p, SSTAT2), aic_inb(p, STCNT + 2) << 16 |
- aic_inb(p, STCNT + 1) << 8 | aic_inb(p, STCNT));
- break;
- }
- if(aic7xxx_verbose & VERBOSE_MINOR_ERROR)
- printk(INFO_LEAD "Fixing up SG address for sequencer.\n", p->host_no,
- CTL_OF_SCB(scb));
- /*
- * Advance the SG pointer to the next element in the list
- */
- tmp = aic_inb(p, SG_NEXT);
- tmp += SG_SIZEOF;
- aic_outb(p, tmp, SG_NEXT);
- if( tmp < SG_SIZEOF )
- aic_outb(p, aic_inb(p, SG_NEXT + 1) + 1, SG_NEXT + 1);
- tmp = aic_inb(p, SG_COUNT) - 1;
- aic_outb(p, tmp, SG_COUNT);
- sg_addr = le32_to_cpu(scb->sg_list[scb->sg_count - tmp].address);
- sg_length = le32_to_cpu(scb->sg_list[scb->sg_count - tmp].length);
- /*
- * Now stuff the element we just advanced past down onto the
- * card so it can be stored in the residual area.
- */
- aic_outb(p, sg_addr & 0xff, HADDR);
- aic_outb(p, (sg_addr >> 8) & 0xff, HADDR + 1);
- aic_outb(p, (sg_addr >> 16) & 0xff, HADDR + 2);
- aic_outb(p, (sg_addr >> 24) & 0xff, HADDR + 3);
- aic_outb(p, sg_length & 0xff, HCNT);
- aic_outb(p, (sg_length >> 8) & 0xff, HCNT + 1);
- aic_outb(p, (sg_length >> 16) & 0xff, HCNT + 2);
- aic_outb(p, (tmp << 2) | ((tmp == 1) ? LAST_SEG : 0), SG_CACHEPTR);
- aic_outb(p, aic_inb(p, DMAPARAMS), DFCNTRL);
- while(aic_inb(p, SSTAT0) & SDONE) udelay(1);
- while(aic_inb(p, DFCNTRL) & (HDMAEN|SCSIEN)) aic_outb(p, 0, DFCNTRL);
- }
- break;
-
-#ifdef AIC7XXX_NOT_YET
- case TRACEPOINT2:
- {
- printk(INFO_LEAD "Tracepoint #2 reached.\n", p->host_no,
- channel, target, lun);
- }
- break;
-
- /* XXX Fill these in later */
- case MSG_BUFFER_BUSY:
- printk("aic7xxx: Message buffer busy.\n");
- break;
- case MSGIN_PHASEMIS:
- printk("aic7xxx: Message-in phasemis.\n");
- break;
-#endif
-
- default: /* unknown */
- printk(WARN_LEAD "Unknown SEQINT, INTSTAT 0x%x, SCSISIGI 0x%x.\n",
- p->host_no, channel, target, lun, intstat,
- aic_inb(p, SCSISIGI));
- break;
- }
-
- /*
- * Clear the sequencer interrupt and unpause the sequencer.
- */
- unpause_sequencer(p, /* unpause always */ TRUE);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_parse_msg
- *
- * Description:
- * Parses incoming messages into actions on behalf of
- * aic7xxx_handle_reqinit
- *_F*************************************************************************/
-static int
-aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
-{
- int reject, reply, done;
- unsigned char target_scsirate, tindex;
- unsigned short target_mask;
- unsigned char target, channel, lun;
- unsigned char bus_width, new_bus_width;
- unsigned char trans_options, new_trans_options;
- unsigned int period, new_period, offset, new_offset, maxsync;
- struct aic7xxx_syncrate *syncrate;
- struct aic_dev_data *aic_dev;
-
- target = scb->cmd->device->id;
- channel = scb->cmd->device->channel;
- lun = scb->cmd->device->lun;
- reply = reject = done = FALSE;
- tindex = TARGET_INDEX(scb->cmd);
- aic_dev = AIC_DEV(scb->cmd);
- target_scsirate = aic_inb(p, TARG_SCSIRATE + tindex);
- target_mask = (0x01 << tindex);
-
- /*
- * Parse as much of the message as is available,
- * rejecting it if we don't support it. When
- * the entire message is available and has been
- * handled, return TRUE indicating that we have
- * parsed an entire message.
- */
-
- if (p->msg_buf[0] != MSG_EXTENDED)
- {
- reject = TRUE;
- }
-
- /*
- * Even if we are an Ultra3 card, don't allow Ultra3 sync rates when
- * using the SDTR messages. We need the PPR messages to enable the
- * higher speeds that include things like Dual Edge clocking.
- */
- if (p->features & AHC_ULTRA2)
- {
- if ( (aic_inb(p, SBLKCTL) & ENAB40) &&
- !(aic_inb(p, SSTAT2) & EXP_ACTIVE) )
- {
- if (p->features & AHC_ULTRA3)
- maxsync = AHC_SYNCRATE_ULTRA3;
- else
- maxsync = AHC_SYNCRATE_ULTRA2;
- }
- else
- {
- maxsync = AHC_SYNCRATE_ULTRA;
- }
- }
- else if (p->features & AHC_ULTRA)
- {
- maxsync = AHC_SYNCRATE_ULTRA;
- }
- else
- {
- maxsync = AHC_SYNCRATE_FAST;
- }
-
- /*
- * Just accept the length byte outright and perform
- * more checking once we know the message type.
- */
-
- if ( !reject && (p->msg_len > 2) )
- {
- switch(p->msg_buf[2])
- {
- case MSG_EXT_SDTR:
- {
-
- if (p->msg_buf[1] != MSG_EXT_SDTR_LEN)
- {
- reject = TRUE;
- break;
- }
-
- if (p->msg_len < (MSG_EXT_SDTR_LEN + 2))
- {
- break;
- }
-
- period = new_period = p->msg_buf[3];
- offset = new_offset = p->msg_buf[4];
- trans_options = new_trans_options = 0;
- bus_width = new_bus_width = target_scsirate & WIDEXFER;
-
- /*
- * If our current max syncrate is in the Ultra3 range, bump it back
- * down to Ultra2 since we can't negotiate DT transfers using SDTR
- */
- if(maxsync == AHC_SYNCRATE_ULTRA3)
- maxsync = AHC_SYNCRATE_ULTRA2;
-
- /*
- * We might have a device that is starting negotiation with us
- * before we can start up negotiation with it....be prepared to
- * have a device ask for a higher speed then we want to give it
- * in that case
- */
- if ( (scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR)) !=
- (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR) )
- {
- if (!(aic_dev->flags & DEVICE_DTR_SCANNED))
- {
- /*
- * We shouldn't get here unless this is a narrow drive, wide
- * devices should trigger this same section of code in the WDTR
- * handler first instead.
- */
- aic_dev->goal.width = MSG_EXT_WDTR_BUS_8_BIT;
- aic_dev->goal.options = 0;
- if(p->user[tindex].offset)
- {
- aic_dev->needsdtr_copy = 1;
- aic_dev->goal.period = max_t(unsigned char, 10,p->user[tindex].period);
- if(p->features & AHC_ULTRA2)
- {
- aic_dev->goal.offset = MAX_OFFSET_ULTRA2;
- }
- else
- {
- aic_dev->goal.offset = MAX_OFFSET_8BIT;
- }
- }
- else
- {
- aic_dev->needsdtr_copy = 0;
- aic_dev->goal.period = 255;
- aic_dev->goal.offset = 0;
- }
- aic_dev->flags |= DEVICE_DTR_SCANNED | DEVICE_PRINT_DTR;
- }
- else if (aic_dev->needsdtr_copy == 0)
- {
- /*
- * This is a preemptive message from the target, we've already
- * scanned this target and set our options for it, and we
- * don't need a SDTR with this target (for whatever reason),
- * so reject this incoming SDTR
- */
- reject = TRUE;
- break;
- }
-
- /* The device is sending this message first and we have to reply */
- reply = TRUE;
-
- if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
- {
- printk(INFO_LEAD "Received pre-emptive SDTR message from "
- "target.\n", p->host_no, CTL_OF_SCB(scb));
- }
- /*
- * Validate the values the device passed to us against our SEEPROM
- * settings. We don't have to do this if we aren't replying since
- * the device isn't allowed to send values greater than the ones
- * we first sent to it.
- */
- new_period = max_t(unsigned int, period, aic_dev->goal.period);
- new_offset = min_t(unsigned int, offset, aic_dev->goal.offset);
- }
-
- /*
- * Use our new_period, new_offset, bus_width, and card options
- * to determine the actual syncrate settings
- */
- syncrate = aic7xxx_find_syncrate(p, &new_period, maxsync,
- &trans_options);
- aic7xxx_validate_offset(p, syncrate, &new_offset, bus_width);
-
- /*
- * Did we drop to async? If so, send a reply regardless of whether
- * or not we initiated this negotiation.
- */
- if ((new_offset == 0) && (new_offset != offset))
- {
- aic_dev->needsdtr_copy = 0;
- reply = TRUE;
- }
-
- /*
- * Did we start this, if not, or if we went too low and had to
- * go async, then send an SDTR back to the target
- */
- if(reply)
- {
- /* when sending a reply, make sure that the goal settings are
- * updated along with current and active since the code that
- * will actually build the message for the sequencer uses the
- * goal settings as its guidelines.
- */
- aic7xxx_set_syncrate(p, syncrate, target, channel, new_period,
- new_offset, trans_options,
- AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR,
- aic_dev);
- scb->flags &= ~SCB_MSGOUT_BITS;
- scb->flags |= SCB_MSGOUT_SDTR;
- aic_outb(p, HOST_MSG, MSG_OUT);
- aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
- }
- else
- {
- aic7xxx_set_syncrate(p, syncrate, target, channel, new_period,
- new_offset, trans_options,
- AHC_TRANS_ACTIVE|AHC_TRANS_CUR, aic_dev);
- aic_dev->needsdtr = 0;
- }
- done = TRUE;
- break;
- }
- case MSG_EXT_WDTR:
- {
-
- if (p->msg_buf[1] != MSG_EXT_WDTR_LEN)
- {
- reject = TRUE;
- break;
- }
-
- if (p->msg_len < (MSG_EXT_WDTR_LEN + 2))
- {
- break;
- }
-
- bus_width = new_bus_width = p->msg_buf[3];
-
- if ( (scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_WDTR)) ==
- (SCB_MSGOUT_SENT|SCB_MSGOUT_WDTR) )
- {
- switch(bus_width)
- {
- default:
- {
- reject = TRUE;
- if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
- ((aic_dev->flags & DEVICE_PRINT_DTR) ||
- (aic7xxx_verbose > 0xffff)) )
- {
- printk(INFO_LEAD "Requesting %d bit transfers, rejecting.\n",
- p->host_no, CTL_OF_SCB(scb), 8 * (0x01 << bus_width));
- }
- } /* We fall through on purpose */
- case MSG_EXT_WDTR_BUS_8_BIT:
- {
- aic_dev->goal.width = MSG_EXT_WDTR_BUS_8_BIT;
- aic_dev->needwdtr_copy &= ~target_mask;
- break;
- }
- case MSG_EXT_WDTR_BUS_16_BIT:
- {
- break;
- }
- }
- aic_dev->needwdtr = 0;
- aic7xxx_set_width(p, target, channel, lun, new_bus_width,
- AHC_TRANS_ACTIVE|AHC_TRANS_CUR, aic_dev);
- }
- else
- {
- if ( !(aic_dev->flags & DEVICE_DTR_SCANNED) )
- {
- /*
- * Well, we now know the WDTR and SYNC caps of this device since
- * it contacted us first, mark it as such and copy the user stuff
- * over to the goal stuff.
- */
- if( (p->features & AHC_WIDE) && p->user[tindex].width )
- {
- aic_dev->goal.width = MSG_EXT_WDTR_BUS_16_BIT;
- aic_dev->needwdtr_copy = 1;
- }
-
- /*
- * Devices that support DT transfers don't start WDTR requests
- */
- aic_dev->goal.options = 0;
-
- if(p->user[tindex].offset)
- {
- aic_dev->needsdtr_copy = 1;
- aic_dev->goal.period = max_t(unsigned char, 10, p->user[tindex].period);
- if(p->features & AHC_ULTRA2)
- {
- aic_dev->goal.offset = MAX_OFFSET_ULTRA2;
- }
- else if( aic_dev->goal.width )
- {
- aic_dev->goal.offset = MAX_OFFSET_16BIT;
- }
- else
- {
- aic_dev->goal.offset = MAX_OFFSET_8BIT;
- }
- } else {
- aic_dev->needsdtr_copy = 0;
- aic_dev->goal.period = 255;
- aic_dev->goal.offset = 0;
- }
-
- aic_dev->flags |= DEVICE_DTR_SCANNED | DEVICE_PRINT_DTR;
- }
- else if (aic_dev->needwdtr_copy == 0)
- {
- /*
- * This is a preemptive message from the target, we've already
- * scanned this target and set our options for it, and we
- * don't need a WDTR with this target (for whatever reason),
- * so reject this incoming WDTR
- */
- reject = TRUE;
- break;
- }
-
- /* The device is sending this message first and we have to reply */
- reply = TRUE;
-
- if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
- {
- printk(INFO_LEAD "Received pre-emptive WDTR message from "
- "target.\n", p->host_no, CTL_OF_SCB(scb));
- }
- switch(bus_width)
- {
- case MSG_EXT_WDTR_BUS_16_BIT:
- {
- if ( (p->features & AHC_WIDE) &&
- (aic_dev->goal.width == MSG_EXT_WDTR_BUS_16_BIT) )
- {
- new_bus_width = MSG_EXT_WDTR_BUS_16_BIT;
- break;
- }
- } /* Fall through if we aren't a wide card */
- default:
- case MSG_EXT_WDTR_BUS_8_BIT:
- {
- aic_dev->needwdtr_copy = 0;
- new_bus_width = MSG_EXT_WDTR_BUS_8_BIT;
- break;
- }
- }
- scb->flags &= ~SCB_MSGOUT_BITS;
- scb->flags |= SCB_MSGOUT_WDTR;
- aic_dev->needwdtr = 0;
- if(aic_dev->dtr_pending == 0)
- {
- /* there is no other command with SCB_DTR_SCB already set that will
- * trigger the release of the dtr_pending bit. Both set the bit
- * and set scb->flags |= SCB_DTR_SCB
- */
- aic_dev->dtr_pending = 1;
- scb->flags |= SCB_DTR_SCB;
- }
- aic_outb(p, HOST_MSG, MSG_OUT);
- aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
- /* when sending a reply, make sure that the goal settings are
- * updated along with current and active since the code that
- * will actually build the message for the sequencer uses the
- * goal settings as its guidelines.
- */
- aic7xxx_set_width(p, target, channel, lun, new_bus_width,
- AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR,
- aic_dev);
- }
-
- /*
- * By virtue of the SCSI spec, a WDTR message negates any existing
- * SDTR negotiations. So, even if needsdtr isn't marked for this
- * device, we still have to do a new SDTR message if the device
- * supports SDTR at all. Therefore, we check needsdtr_copy instead
- * of needstr.
- */
- aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0,
- AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE,
- aic_dev);
- aic_dev->needsdtr = aic_dev->needsdtr_copy;
- done = TRUE;
- break;
- }
- case MSG_EXT_PPR:
- {
-
- if (p->msg_buf[1] != MSG_EXT_PPR_LEN)
- {
- reject = TRUE;
- break;
- }
-
- if (p->msg_len < (MSG_EXT_PPR_LEN + 2))
- {
- break;
- }
-
- period = new_period = p->msg_buf[3];
- offset = new_offset = p->msg_buf[5];
- bus_width = new_bus_width = p->msg_buf[6];
- trans_options = new_trans_options = p->msg_buf[7] & 0xf;
-
- if(aic7xxx_verbose & VERBOSE_NEGOTIATION2)
- {
- printk(INFO_LEAD "Parsing PPR message (%d/%d/%d/%d)\n",
- p->host_no, CTL_OF_SCB(scb), period, offset, bus_width,
- trans_options);
- }
-
- /*
- * We might have a device that is starting negotiation with us
- * before we can start up negotiation with it....be prepared to
- * have a device ask for a higher speed then we want to give it
- * in that case
- */
- if ( (scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_PPR)) !=
- (SCB_MSGOUT_SENT|SCB_MSGOUT_PPR) )
- {
- /* Have we scanned the device yet? */
- if (!(aic_dev->flags & DEVICE_DTR_SCANNED))
- {
- /* The device is electing to use PPR messages, so we will too until
- * we know better */
- aic_dev->needppr = aic_dev->needppr_copy = 1;
- aic_dev->needsdtr = aic_dev->needsdtr_copy = 0;
- aic_dev->needwdtr = aic_dev->needwdtr_copy = 0;
-
- /* We know the device is SCSI-3 compliant due to PPR */
- aic_dev->flags |= DEVICE_SCSI_3;
-
- /*
- * Not only is the device starting this up, but it also hasn't
- * been scanned yet, so this would likely be our TUR or our
- * INQUIRY command at scan time, so we need to use the
- * settings from the SEEPROM if they existed. Of course, even
- * if we didn't find a SEEPROM, we stuffed default values into
- * the user settings anyway, so use those in all cases.
- */
- aic_dev->goal.width = p->user[tindex].width;
- if(p->user[tindex].offset)
- {
- aic_dev->goal.period = p->user[tindex].period;
- aic_dev->goal.options = p->user[tindex].options;
- if(p->features & AHC_ULTRA2)
- {
- aic_dev->goal.offset = MAX_OFFSET_ULTRA2;
- }
- else if( aic_dev->goal.width &&
- (bus_width == MSG_EXT_WDTR_BUS_16_BIT) &&
- p->features & AHC_WIDE )
- {
- aic_dev->goal.offset = MAX_OFFSET_16BIT;
- }
- else
- {
- aic_dev->goal.offset = MAX_OFFSET_8BIT;
- }
- }
- else
- {
- aic_dev->goal.period = 255;
- aic_dev->goal.offset = 0;
- aic_dev->goal.options = 0;
- }
- aic_dev->flags |= DEVICE_DTR_SCANNED | DEVICE_PRINT_DTR;
- }
- else if (aic_dev->needppr_copy == 0)
- {
- /*
- * This is a preemptive message from the target, we've already
- * scanned this target and set our options for it, and we
- * don't need a PPR with this target (for whatever reason),
- * so reject this incoming PPR
- */
- reject = TRUE;
- break;
- }
-
- /* The device is sending this message first and we have to reply */
- reply = TRUE;
-
- if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
- {
- printk(INFO_LEAD "Received pre-emptive PPR message from "
- "target.\n", p->host_no, CTL_OF_SCB(scb));
- }
-
- }
-
- switch(bus_width)
- {
- case MSG_EXT_WDTR_BUS_16_BIT:
- {
- if ( (aic_dev->goal.width == MSG_EXT_WDTR_BUS_16_BIT) &&
- p->features & AHC_WIDE)
- {
- break;
- }
- }
- default:
- {
- if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
- ((aic_dev->flags & DEVICE_PRINT_DTR) ||
- (aic7xxx_verbose > 0xffff)) )
- {
- reply = TRUE;
- printk(INFO_LEAD "Requesting %d bit transfers, rejecting.\n",
- p->host_no, CTL_OF_SCB(scb), 8 * (0x01 << bus_width));
- }
- } /* We fall through on purpose */
- case MSG_EXT_WDTR_BUS_8_BIT:
- {
- /*
- * According to the spec, if we aren't wide, we also can't be
- * Dual Edge so clear the options byte
- */
- new_trans_options = 0;
- new_bus_width = MSG_EXT_WDTR_BUS_8_BIT;
- break;
- }
- }
-
- if(reply)
- {
- /* when sending a reply, make sure that the goal settings are
- * updated along with current and active since the code that
- * will actually build the message for the sequencer uses the
- * goal settings as its guidelines.
- */
- aic7xxx_set_width(p, target, channel, lun, new_bus_width,
- AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR,
- aic_dev);
- syncrate = aic7xxx_find_syncrate(p, &new_period, maxsync,
- &new_trans_options);
- aic7xxx_validate_offset(p, syncrate, &new_offset, new_bus_width);
- aic7xxx_set_syncrate(p, syncrate, target, channel, new_period,
- new_offset, new_trans_options,
- AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR,
- aic_dev);
- }
- else
- {
- aic7xxx_set_width(p, target, channel, lun, new_bus_width,
- AHC_TRANS_ACTIVE|AHC_TRANS_CUR, aic_dev);
- syncrate = aic7xxx_find_syncrate(p, &new_period, maxsync,
- &new_trans_options);
- aic7xxx_validate_offset(p, syncrate, &new_offset, new_bus_width);
- aic7xxx_set_syncrate(p, syncrate, target, channel, new_period,
- new_offset, new_trans_options,
- AHC_TRANS_ACTIVE|AHC_TRANS_CUR, aic_dev);
- }
-
- /*
- * As it turns out, if we don't *have* to have PPR messages, then
- * configure ourselves not to use them since that makes some
- * external drive chassis work (those chassis can't parse PPR
- * messages and they mangle the SCSI bus until you send a WDTR
- * and SDTR that they can understand).
- */
- if(new_trans_options == 0)
- {
- aic_dev->needppr = aic_dev->needppr_copy = 0;
- if(new_offset)
- {
- aic_dev->needsdtr = aic_dev->needsdtr_copy = 1;
- }
- if (new_bus_width)
- {
- aic_dev->needwdtr = aic_dev->needwdtr_copy = 1;
- }
- }
-
- if((new_offset == 0) && (offset != 0))
- {
- /*
- * Oops, the syncrate went to low for this card and we fell off
- * to async (should never happen with a device that uses PPR
- * messages, but have to be complete)
- */
- reply = TRUE;
- }
-
- if(reply)
- {
- scb->flags &= ~SCB_MSGOUT_BITS;
- scb->flags |= SCB_MSGOUT_PPR;
- aic_outb(p, HOST_MSG, MSG_OUT);
- aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
- }
- else
- {
- aic_dev->needppr = 0;
- }
- done = TRUE;
- break;
- }
- default:
- {
- reject = TRUE;
- break;
- }
- } /* end of switch(p->msg_type) */
- } /* end of if (!reject && (p->msg_len > 2)) */
-
- if (!reply && reject)
- {
- aic_outb(p, MSG_MESSAGE_REJECT, MSG_OUT);
- aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
- done = TRUE;
- }
- return(done);
-}
-
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_handle_reqinit
- *
- * Description:
- * Interrupt handler for REQINIT interrupts (used to transfer messages to
- * and from devices).
- *_F*************************************************************************/
-static void
-aic7xxx_handle_reqinit(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
-{
- unsigned char lastbyte;
- unsigned char phasemis;
- int done = FALSE;
-
- switch(p->msg_type)
- {
- case MSG_TYPE_INITIATOR_MSGOUT:
- {
- if (p->msg_len == 0)
- panic("aic7xxx: REQINIT with no active message!\n");
-
- lastbyte = (p->msg_index == (p->msg_len - 1));
- phasemis = ( aic_inb(p, SCSISIGI) & PHASE_MASK) != P_MESGOUT;
-
- if (lastbyte || phasemis)
- {
- /* Time to end the message */
- p->msg_len = 0;
- p->msg_type = MSG_TYPE_NONE;
- /*
- * NOTE-TO-MYSELF: If you clear the REQINIT after you
- * disable REQINITs, then cases of REJECT_MSG stop working
- * and hang the bus
- */
- aic_outb(p, aic_inb(p, SIMODE1) & ~ENREQINIT, SIMODE1);
- aic_outb(p, CLRSCSIINT, CLRINT);
- p->flags &= ~AHC_HANDLING_REQINITS;
-
- if (phasemis == 0)
- {
- aic_outb(p, p->msg_buf[p->msg_index], SINDEX);
- aic_outb(p, 0, RETURN_1);
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if (aic7xxx_verbose > 0xffff)
- printk(INFO_LEAD "Completed sending of REQINIT message.\n",
- p->host_no, CTL_OF_SCB(scb));
-#endif
- }
- else
- {
- aic_outb(p, MSGOUT_PHASEMIS, RETURN_1);
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if (aic7xxx_verbose > 0xffff)
- printk(INFO_LEAD "PHASEMIS while sending REQINIT message.\n",
- p->host_no, CTL_OF_SCB(scb));
-#endif
- }
- unpause_sequencer(p, TRUE);
- }
- else
- {
- /*
- * Present the byte on the bus (clearing REQINIT) but don't
- * unpause the sequencer.
- */
- aic_outb(p, CLRREQINIT, CLRSINT1);
- aic_outb(p, CLRSCSIINT, CLRINT);
- aic_outb(p, p->msg_buf[p->msg_index++], SCSIDATL);
- }
- break;
- }
- case MSG_TYPE_INITIATOR_MSGIN:
- {
- phasemis = ( aic_inb(p, SCSISIGI) & PHASE_MASK ) != P_MESGIN;
-
- if (phasemis == 0)
- {
- p->msg_len++;
- /* Pull the byte in without acking it */
- p->msg_buf[p->msg_index] = aic_inb(p, SCSIBUSL);
- done = aic7xxx_parse_msg(p, scb);
- /* Ack the byte */
- aic_outb(p, CLRREQINIT, CLRSINT1);
- aic_outb(p, CLRSCSIINT, CLRINT);
- aic_inb(p, SCSIDATL);
- p->msg_index++;
- }
- if (phasemis || done)
- {
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if (aic7xxx_verbose > 0xffff)
- {
- if (phasemis)
- printk(INFO_LEAD "PHASEMIS while receiving REQINIT message.\n",
- p->host_no, CTL_OF_SCB(scb));
- else
- printk(INFO_LEAD "Completed receipt of REQINIT message.\n",
- p->host_no, CTL_OF_SCB(scb));
- }
-#endif
- /* Time to end our message session */
- p->msg_len = 0;
- p->msg_type = MSG_TYPE_NONE;
- aic_outb(p, aic_inb(p, SIMODE1) & ~ENREQINIT, SIMODE1);
- aic_outb(p, CLRSCSIINT, CLRINT);
- p->flags &= ~AHC_HANDLING_REQINITS;
- unpause_sequencer(p, TRUE);
- }
- break;
- }
- default:
- {
- panic("aic7xxx: Unknown REQINIT message type.\n");
- break;
- }
- } /* End of switch(p->msg_type) */
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_handle_scsiint
- *
- * Description:
- * Interrupt handler for SCSI interrupts (SCSIINT).
- *-F*************************************************************************/
-static void
-aic7xxx_handle_scsiint(struct aic7xxx_host *p, unsigned char intstat)
-{
- unsigned char scb_index;
- unsigned char status;
- struct aic7xxx_scb *scb;
- struct aic_dev_data *aic_dev;
-
- scb_index = aic_inb(p, SCB_TAG);
- status = aic_inb(p, SSTAT1);
-
- if (scb_index < p->scb_data->numscbs)
- {
- scb = p->scb_data->scb_array[scb_index];
- if ((scb->flags & SCB_ACTIVE) == 0)
- {
- scb = NULL;
- }
- }
- else
- {
- scb = NULL;
- }
-
-
- if ((status & SCSIRSTI) != 0)
- {
- int channel;
-
- if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 )
- channel = (aic_inb(p, SBLKCTL) & SELBUSB) >> 3;
- else
- channel = 0;
-
- if (aic7xxx_verbose & VERBOSE_RESET)
- printk(WARN_LEAD "Someone else reset the channel!!\n",
- p->host_no, channel, -1, -1);
- if (aic7xxx_panic_on_abort)
- aic7xxx_panic_abort(p, NULL);
- /*
- * Go through and abort all commands for the channel, but do not
- * reset the channel again.
- */
- aic7xxx_reset_channel(p, channel, /* Initiate Reset */ FALSE);
- aic7xxx_run_done_queue(p, TRUE);
- scb = NULL;
- }
- else if ( ((status & BUSFREE) != 0) && ((status & SELTO) == 0) )
- {
- /*
- * First look at what phase we were last in. If it's message-out,
- * chances are pretty good that the bus free was in response to
- * one of our abort requests.
- */
- unsigned char lastphase = aic_inb(p, LASTPHASE);
- unsigned char saved_tcl = aic_inb(p, SAVED_TCL);
- unsigned char target = (saved_tcl >> 4) & 0x0F;
- int channel;
- int printerror = TRUE;
-
- if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 )
- channel = (aic_inb(p, SBLKCTL) & SELBUSB) >> 3;
- else
- channel = 0;
-
- aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP),
- SCSISEQ);
- if (lastphase == P_MESGOUT)
- {
- unsigned char message;
-
- message = aic_inb(p, SINDEX);
-
- if ((message == MSG_ABORT) || (message == MSG_ABORT_TAG))
- {
- if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
- printk(INFO_LEAD "SCB %d abort delivered.\n", p->host_no,
- CTL_OF_SCB(scb), scb->hscb->tag);
- aic7xxx_reset_device(p, target, channel, ALL_LUNS,
- (message == MSG_ABORT) ? SCB_LIST_NULL : scb->hscb->tag );
- aic7xxx_run_done_queue(p, TRUE);
- scb = NULL;
- printerror = 0;
- }
- else if (message == MSG_BUS_DEV_RESET)
- {
- aic7xxx_handle_device_reset(p, target, channel);
- scb = NULL;
- printerror = 0;
- }
- }
- if ( (scb != NULL) && (scb->flags & SCB_DTR_SCB) )
- {
- /*
- * Hmmm...error during a negotiation command. Either we have a
- * borken bus, or the device doesn't like our negotiation message.
- * Since we check the INQUIRY data of a device before sending it
- * negotiation messages, assume the bus is borken for whatever
- * reason. Complete the command.
- */
- printerror = 0;
- aic7xxx_reset_device(p, target, channel, ALL_LUNS, scb->hscb->tag);
- aic7xxx_run_done_queue(p, TRUE);
- scb = NULL;
- }
- if (printerror != 0)
- {
- if (scb != NULL)
- {
- unsigned char tag;
-
- if ((scb->hscb->control & TAG_ENB) != 0)
- {
- tag = scb->hscb->tag;
- }
- else
- {
- tag = SCB_LIST_NULL;
- }
- aic7xxx_reset_device(p, target, channel, ALL_LUNS, tag);
- aic7xxx_run_done_queue(p, TRUE);
- }
- else
- {
- aic7xxx_reset_device(p, target, channel, ALL_LUNS, SCB_LIST_NULL);
- aic7xxx_run_done_queue(p, TRUE);
- }
- printk(INFO_LEAD "Unexpected busfree, LASTPHASE = 0x%x, "
- "SEQADDR = 0x%x\n", p->host_no, channel, target, -1, lastphase,
- (aic_inb(p, SEQADDR1) << 8) | aic_inb(p, SEQADDR0));
- scb = NULL;
- }
- aic_outb(p, MSG_NOOP, MSG_OUT);
- aic_outb(p, aic_inb(p, SIMODE1) & ~(ENBUSFREE|ENREQINIT),
- SIMODE1);
- p->flags &= ~AHC_HANDLING_REQINITS;
- aic_outb(p, CLRBUSFREE, CLRSINT1);
- aic_outb(p, CLRSCSIINT, CLRINT);
- restart_sequencer(p);
- unpause_sequencer(p, TRUE);
- }
- else if ((status & SELTO) != 0)
- {
- unsigned char scbptr;
- unsigned char nextscb;
- struct scsi_cmnd *cmd;
-
- scbptr = aic_inb(p, WAITING_SCBH);
- if (scbptr > p->scb_data->maxhscbs)
- {
- /*
- * I'm still trying to track down exactly how this happens, but until
- * I find it, this code will make sure we aren't passing bogus values
- * into the SCBPTR register, even if that register will just wrap
- * things around, we still don't like having out of range variables.
- *
- * NOTE: Don't check the aic7xxx_verbose variable, I want this message
- * to always be displayed.
- */
- printk(INFO_LEAD "Invalid WAITING_SCBH value %d, improvising.\n",
- p->host_no, -1, -1, -1, scbptr);
- if (p->scb_data->maxhscbs > 4)
- scbptr &= (p->scb_data->maxhscbs - 1);
- else
- scbptr &= 0x03;
- }
- aic_outb(p, scbptr, SCBPTR);
- scb_index = aic_inb(p, SCB_TAG);
-
- scb = NULL;
- if (scb_index < p->scb_data->numscbs)
- {
- scb = p->scb_data->scb_array[scb_index];
- if ((scb->flags & SCB_ACTIVE) == 0)
- {
- scb = NULL;
- }
- }
- if (scb == NULL)
- {
- printk(WARN_LEAD "Referenced SCB %d not valid during SELTO.\n",
- p->host_no, -1, -1, -1, scb_index);
- printk(KERN_WARNING " SCSISEQ = 0x%x SEQADDR = 0x%x SSTAT0 = 0x%x "
- "SSTAT1 = 0x%x\n", aic_inb(p, SCSISEQ),
- aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8),
- aic_inb(p, SSTAT0), aic_inb(p, SSTAT1));
- if (aic7xxx_panic_on_abort)
- aic7xxx_panic_abort(p, NULL);
- }
- else
- {
- cmd = scb->cmd;
- cmd->result = (DID_TIME_OUT << 16);
-
- /*
- * Clear out this hardware SCB
- */
- aic_outb(p, 0, SCB_CONTROL);
-
- /*
- * Clear out a few values in the card that are in an undetermined
- * state.
- */
- aic_outb(p, MSG_NOOP, MSG_OUT);
-
- /*
- * Shift the waiting for selection queue forward
- */
- nextscb = aic_inb(p, SCB_NEXT);
- aic_outb(p, nextscb, WAITING_SCBH);
-
- /*
- * Put this SCB back on the free list.
- */
- aic7xxx_add_curscb_to_free_list(p);
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if (aic7xxx_verbose > 0xffff)
- printk(INFO_LEAD "Selection Timeout.\n", p->host_no, CTL_OF_SCB(scb));
-#endif
- if (scb->flags & SCB_QUEUED_ABORT)
- {
- /*
- * We know that this particular SCB had to be the queued abort since
- * the disconnected SCB would have gotten a reconnect instead.
- * What we need to do then is to let the command timeout again so
- * we get a reset since this abort just failed.
- */
- cmd->result = 0;
- scb = NULL;
- }
- }
- /*
- * Keep the sequencer from trying to restart any selections
- */
- aic_outb(p, aic_inb(p, SCSISEQ) & ~ENSELO, SCSISEQ);
- /*
- * Make sure the data bits on the bus are released
- * Don't do this on 7770 chipsets, it makes them give us
- * a BRKADDRINT and kills the card.
- */
- if( (p->chip & ~AHC_CHIPID_MASK) == AHC_PCI )
- aic_outb(p, 0, SCSIBUSL);
-
- /*
- * Delay for the selection timeout delay period then stop the selection
- */
- udelay(301);
- aic_outb(p, CLRSELINGO, CLRSINT0);
- /*
- * Clear out all the interrupt status bits
- */
- aic_outb(p, aic_inb(p, SIMODE1) & ~(ENREQINIT|ENBUSFREE), SIMODE1);
- p->flags &= ~AHC_HANDLING_REQINITS;
- aic_outb(p, CLRSELTIMEO | CLRBUSFREE, CLRSINT1);
- aic_outb(p, CLRSCSIINT, CLRINT);
- /*
- * Restarting the sequencer will stop the selection and make sure devices
- * are allowed to reselect in.
- */
- restart_sequencer(p);
- unpause_sequencer(p, TRUE);
- }
- else if (scb == NULL)
- {
- printk(WARN_LEAD "aic7xxx_isr - referenced scb not valid "
- "during scsiint 0x%x scb(%d)\n"
- " SIMODE0 0x%x, SIMODE1 0x%x, SSTAT0 0x%x, SEQADDR 0x%x\n",
- p->host_no, -1, -1, -1, status, scb_index, aic_inb(p, SIMODE0),
- aic_inb(p, SIMODE1), aic_inb(p, SSTAT0),
- (aic_inb(p, SEQADDR1) << 8) | aic_inb(p, SEQADDR0));
- /*
- * Turn off the interrupt and set status to zero, so that it
- * falls through the rest of the SCSIINT code.
- */
- aic_outb(p, status, CLRSINT1);
- aic_outb(p, CLRSCSIINT, CLRINT);
- unpause_sequencer(p, /* unpause always */ TRUE);
- scb = NULL;
- }
- else if (status & SCSIPERR)
- {
- /*
- * Determine the bus phase and queue an appropriate message.
- */
- char *phase;
- struct scsi_cmnd *cmd;
- unsigned char mesg_out = MSG_NOOP;
- unsigned char lastphase = aic_inb(p, LASTPHASE);
- unsigned char sstat2 = aic_inb(p, SSTAT2);
-
- cmd = scb->cmd;
- switch (lastphase)
- {
- case P_DATAOUT:
- phase = "Data-Out";
- break;
- case P_DATAIN:
- phase = "Data-In";
- mesg_out = MSG_INITIATOR_DET_ERR;
- break;
- case P_COMMAND:
- phase = "Command";
- break;
- case P_MESGOUT:
- phase = "Message-Out";
- break;
- case P_STATUS:
- phase = "Status";
- mesg_out = MSG_INITIATOR_DET_ERR;
- break;
- case P_MESGIN:
- phase = "Message-In";
- mesg_out = MSG_PARITY_ERROR;
- break;
- default:
- phase = "unknown";
- break;
- }
-
- /*
- * A parity error has occurred during a data
- * transfer phase. Flag it and continue.
- */
- if( (p->features & AHC_ULTRA3) &&
- (aic_inb(p, SCSIRATE) & AHC_SYNCRATE_CRC) &&
- (lastphase == P_DATAIN) )
- {
- printk(WARN_LEAD "CRC error during %s phase.\n",
- p->host_no, CTL_OF_SCB(scb), phase);
- if(sstat2 & CRCVALERR)
- {
- printk(WARN_LEAD " CRC error in intermediate CRC packet.\n",
- p->host_no, CTL_OF_SCB(scb));
- }
- if(sstat2 & CRCENDERR)
- {
- printk(WARN_LEAD " CRC error in ending CRC packet.\n",
- p->host_no, CTL_OF_SCB(scb));
- }
- if(sstat2 & CRCREQERR)
- {
- printk(WARN_LEAD " Target incorrectly requested a CRC packet.\n",
- p->host_no, CTL_OF_SCB(scb));
- }
- if(sstat2 & DUAL_EDGE_ERROR)
- {
- printk(WARN_LEAD " Dual Edge transmission error.\n",
- p->host_no, CTL_OF_SCB(scb));
- }
- }
- else if( (lastphase == P_MESGOUT) &&
- (scb->flags & SCB_MSGOUT_PPR) )
- {
- /*
- * As per the draft specs, any device capable of supporting any of
- * the option values other than 0 are not allowed to reject the
- * PPR message. Instead, they must negotiate out what they do
- * support instead of rejecting our offering or else they cause
- * a parity error during msg_out phase to signal that they don't
- * like our settings.
- */
- aic_dev = AIC_DEV(scb->cmd);
- aic_dev->needppr = aic_dev->needppr_copy = 0;
- aic7xxx_set_width(p, scb->cmd->device->id, scb->cmd->device->channel, scb->cmd->device->lun,
- MSG_EXT_WDTR_BUS_8_BIT,
- (AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE),
- aic_dev);
- aic7xxx_set_syncrate(p, NULL, scb->cmd->device->id, scb->cmd->device->channel, 0, 0,
- 0, AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE,
- aic_dev);
- aic_dev->goal.options = 0;
- scb->flags &= ~SCB_MSGOUT_BITS;
- if(aic7xxx_verbose & VERBOSE_NEGOTIATION2)
- {
- printk(INFO_LEAD "parity error during PPR message, reverting "
- "to WDTR/SDTR\n", p->host_no, CTL_OF_SCB(scb));
- }
- if ( aic_dev->goal.width )
- {
- aic_dev->needwdtr = aic_dev->needwdtr_copy = 1;
- }
- if ( aic_dev->goal.offset )
- {
- if( aic_dev->goal.period <= 9 )
- {
- aic_dev->goal.period = 10;
- }
- aic_dev->needsdtr = aic_dev->needsdtr_copy = 1;
- }
- scb = NULL;
- }
-
- /*
- * We've set the hardware to assert ATN if we get a parity
- * error on "in" phases, so all we need to do is stuff the
- * message buffer with the appropriate message. "In" phases
- * have set mesg_out to something other than MSG_NOP.
- */
- if (mesg_out != MSG_NOOP)
- {
- aic_outb(p, mesg_out, MSG_OUT);
- aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO);
- scb = NULL;
- }
- aic_outb(p, CLRSCSIPERR, CLRSINT1);
- aic_outb(p, CLRSCSIINT, CLRINT);
- unpause_sequencer(p, /* unpause_always */ TRUE);
- }
- else if ( (status & REQINIT) &&
- (p->flags & AHC_HANDLING_REQINITS) )
- {
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if (aic7xxx_verbose > 0xffff)
- printk(INFO_LEAD "Handling REQINIT, SSTAT1=0x%x.\n", p->host_no,
- CTL_OF_SCB(scb), aic_inb(p, SSTAT1));
-#endif
- aic7xxx_handle_reqinit(p, scb);
- return;
- }
- else
- {
- /*
- * We don't know what's going on. Turn off the
- * interrupt source and try to continue.
- */
- if (aic7xxx_verbose & VERBOSE_SCSIINT)
- printk(INFO_LEAD "Unknown SCSIINT status, SSTAT1(0x%x).\n",
- p->host_no, -1, -1, -1, status);
- aic_outb(p, status, CLRSINT1);
- aic_outb(p, CLRSCSIINT, CLRINT);
- unpause_sequencer(p, /* unpause always */ TRUE);
- scb = NULL;
- }
- if (scb != NULL)
- {
- aic7xxx_done(p, scb);
- }
-}
-
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
-static void
-aic7xxx_check_scbs(struct aic7xxx_host *p, char *buffer)
-{
- unsigned char saved_scbptr, free_scbh, dis_scbh, wait_scbh, temp;
- int i, bogus, lost;
- static unsigned char scb_status[AIC7XXX_MAXSCB];
-
-#define SCB_NO_LIST 0
-#define SCB_FREE_LIST 1
-#define SCB_WAITING_LIST 2
-#define SCB_DISCONNECTED_LIST 4
-#define SCB_CURRENTLY_ACTIVE 8
-
- /*
- * Note, these checks will fail on a regular basis once the machine moves
- * beyond the bus scan phase. The problem is race conditions concerning
- * the scbs and where they are linked in. When you have 30 or so commands
- * outstanding on the bus, and run this twice with every interrupt, the
- * chances get pretty good that you'll catch the sequencer with an SCB
- * only partially linked in. Therefore, once we pass the scan phase
- * of the bus, we really should disable this function.
- */
- bogus = FALSE;
- memset(&scb_status[0], 0, sizeof(scb_status));
- pause_sequencer(p);
- saved_scbptr = aic_inb(p, SCBPTR);
- if (saved_scbptr >= p->scb_data->maxhscbs)
- {
- printk("Bogus SCBPTR %d\n", saved_scbptr);
- bogus = TRUE;
- }
- scb_status[saved_scbptr] = SCB_CURRENTLY_ACTIVE;
- free_scbh = aic_inb(p, FREE_SCBH);
- if ( (free_scbh != SCB_LIST_NULL) &&
- (free_scbh >= p->scb_data->maxhscbs) )
- {
- printk("Bogus FREE_SCBH %d\n", free_scbh);
- bogus = TRUE;
- }
- else
- {
- temp = free_scbh;
- while( (temp != SCB_LIST_NULL) && (temp < p->scb_data->maxhscbs) )
- {
- if(scb_status[temp] & 0x07)
- {
- printk("HSCB %d on multiple lists, status 0x%02x", temp,
- scb_status[temp] | SCB_FREE_LIST);
- bogus = TRUE;
- }
- scb_status[temp] |= SCB_FREE_LIST;
- aic_outb(p, temp, SCBPTR);
- temp = aic_inb(p, SCB_NEXT);
- }
- }
-
- dis_scbh = aic_inb(p, DISCONNECTED_SCBH);
- if ( (dis_scbh != SCB_LIST_NULL) &&
- (dis_scbh >= p->scb_data->maxhscbs) )
- {
- printk("Bogus DISCONNECTED_SCBH %d\n", dis_scbh);
- bogus = TRUE;
- }
- else
- {
- temp = dis_scbh;
- while( (temp != SCB_LIST_NULL) && (temp < p->scb_data->maxhscbs) )
- {
- if(scb_status[temp] & 0x07)
- {
- printk("HSCB %d on multiple lists, status 0x%02x", temp,
- scb_status[temp] | SCB_DISCONNECTED_LIST);
- bogus = TRUE;
- }
- scb_status[temp] |= SCB_DISCONNECTED_LIST;
- aic_outb(p, temp, SCBPTR);
- temp = aic_inb(p, SCB_NEXT);
- }
- }
-
- wait_scbh = aic_inb(p, WAITING_SCBH);
- if ( (wait_scbh != SCB_LIST_NULL) &&
- (wait_scbh >= p->scb_data->maxhscbs) )
- {
- printk("Bogus WAITING_SCBH %d\n", wait_scbh);
- bogus = TRUE;
- }
- else
- {
- temp = wait_scbh;
- while( (temp != SCB_LIST_NULL) && (temp < p->scb_data->maxhscbs) )
- {
- if(scb_status[temp] & 0x07)
- {
- printk("HSCB %d on multiple lists, status 0x%02x", temp,
- scb_status[temp] | SCB_WAITING_LIST);
- bogus = TRUE;
- }
- scb_status[temp] |= SCB_WAITING_LIST;
- aic_outb(p, temp, SCBPTR);
- temp = aic_inb(p, SCB_NEXT);
- }
- }
-
- lost=0;
- for(i=0; i < p->scb_data->maxhscbs; i++)
- {
- aic_outb(p, i, SCBPTR);
- temp = aic_inb(p, SCB_NEXT);
- if ( ((temp != SCB_LIST_NULL) &&
- (temp >= p->scb_data->maxhscbs)) )
- {
- printk("HSCB %d bad, SCB_NEXT invalid(%d).\n", i, temp);
- bogus = TRUE;
- }
- if ( temp == i )
- {
- printk("HSCB %d bad, SCB_NEXT points to self.\n", i);
- bogus = TRUE;
- }
- if (scb_status[i] == 0)
- lost++;
- if (lost > 1)
- {
- printk("Too many lost scbs.\n");
- bogus=TRUE;
- }
- }
- aic_outb(p, saved_scbptr, SCBPTR);
- unpause_sequencer(p, FALSE);
- if (bogus)
- {
- printk("Bogus parameters found in card SCB array structures.\n");
- printk("%s\n", buffer);
- aic7xxx_panic_abort(p, NULL);
- }
- return;
-}
-#endif
-
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_handle_command_completion_intr
- *
- * Description:
- * SCSI command completion interrupt handler.
- *-F*************************************************************************/
-static void
-aic7xxx_handle_command_completion_intr(struct aic7xxx_host *p)
-{
- struct aic7xxx_scb *scb = NULL;
- struct aic_dev_data *aic_dev;
- struct scsi_cmnd *cmd;
- unsigned char scb_index, tindex;
-
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if( (p->isr_count < 16) && (aic7xxx_verbose > 0xffff) )
- printk(INFO_LEAD "Command Complete Int.\n", p->host_no, -1, -1, -1);
-#endif
-
- /*
- * Read the INTSTAT location after clearing the CMDINT bit. This forces
- * any posted PCI writes to flush to memory. Gerard Roudier suggested
- * this fix to the possible race of clearing the CMDINT bit but not
- * having all command bytes flushed onto the qoutfifo.
- */
- aic_outb(p, CLRCMDINT, CLRINT);
- aic_inb(p, INTSTAT);
- /*
- * The sequencer will continue running when it
- * issues this interrupt. There may be >1 commands
- * finished, so loop until we've processed them all.
- */
-
- while (p->qoutfifo[p->qoutfifonext] != SCB_LIST_NULL)
- {
- scb_index = p->qoutfifo[p->qoutfifonext];
- p->qoutfifo[p->qoutfifonext++] = SCB_LIST_NULL;
- if ( scb_index >= p->scb_data->numscbs )
- {
- printk(WARN_LEAD "CMDCMPLT with invalid SCB index %d\n", p->host_no,
- -1, -1, -1, scb_index);
- continue;
- }
- scb = p->scb_data->scb_array[scb_index];
- if (!(scb->flags & SCB_ACTIVE) || (scb->cmd == NULL))
- {
- printk(WARN_LEAD "CMDCMPLT without command for SCB %d, SCB flags "
- "0x%x, cmd 0x%lx\n", p->host_no, -1, -1, -1, scb_index, scb->flags,
- (unsigned long) scb->cmd);
- continue;
- }
- tindex = TARGET_INDEX(scb->cmd);
- aic_dev = AIC_DEV(scb->cmd);
- if (scb->flags & SCB_QUEUED_ABORT)
- {
- pause_sequencer(p);
- if ( ((aic_inb(p, LASTPHASE) & PHASE_MASK) != P_BUSFREE) &&
- (aic_inb(p, SCB_TAG) == scb->hscb->tag) )
- {
- unpause_sequencer(p, FALSE);
- continue;
- }
- aic7xxx_reset_device(p, scb->cmd->device->id, scb->cmd->device->channel,
- scb->cmd->device->lun, scb->hscb->tag);
- scb->flags &= ~(SCB_QUEUED_FOR_DONE | SCB_RESET | SCB_ABORT |
- SCB_QUEUED_ABORT);
- unpause_sequencer(p, FALSE);
- }
- else if (scb->flags & SCB_ABORT)
- {
- /*
- * We started to abort this, but it completed on us, let it
- * through as successful
- */
- scb->flags &= ~(SCB_ABORT|SCB_RESET);
- }
- else if (scb->flags & SCB_SENSE)
- {
- char *buffer = &scb->cmd->sense_buffer[0];
-
- if (buffer[12] == 0x47 || buffer[12] == 0x54)
- {
- /*
- * Signal that we need to re-negotiate things.
- */
- aic_dev->needppr = aic_dev->needppr_copy;
- aic_dev->needsdtr = aic_dev->needsdtr_copy;
- aic_dev->needwdtr = aic_dev->needwdtr_copy;
- }
- }
- cmd = scb->cmd;
- if (scb->hscb->residual_SG_segment_count != 0)
- {
- aic7xxx_calculate_residual(p, scb);
- }
- cmd->result |= (aic7xxx_error(cmd) << 16);
- aic7xxx_done(p, scb);
- }
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_isr
- *
- * Description:
- * SCSI controller interrupt handler.
- *-F*************************************************************************/
-static void
-aic7xxx_isr(void *dev_id)
-{
- struct aic7xxx_host *p;
- unsigned char intstat;
-
- p = dev_id;
-
- /*
- * Just a few sanity checks. Make sure that we have an int pending.
- * Also, if PCI, then we are going to check for a PCI bus error status
- * should we get too many spurious interrupts.
- */
- if (!((intstat = aic_inb(p, INTSTAT)) & INT_PEND))
- {
-#ifdef CONFIG_PCI
- if ( (p->chip & AHC_PCI) && (p->spurious_int > 500) &&
- !(p->flags & AHC_HANDLING_REQINITS) )
- {
- if ( aic_inb(p, ERROR) & PCIERRSTAT )
- {
- aic7xxx_pci_intr(p);
- }
- p->spurious_int = 0;
- }
- else if ( !(p->flags & AHC_HANDLING_REQINITS) )
- {
- p->spurious_int++;
- }
-#endif
- return;
- }
-
- p->spurious_int = 0;
-
- /*
- * Keep track of interrupts for /proc/scsi
- */
- p->isr_count++;
-
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if ( (p->isr_count < 16) && (aic7xxx_verbose > 0xffff) &&
- (aic7xxx_panic_on_abort) && (p->flags & AHC_PAGESCBS) )
- aic7xxx_check_scbs(p, "Bogus settings at start of interrupt.");
-#endif
-
- /*
- * Handle all the interrupt sources - especially for SCSI
- * interrupts, we won't get a second chance at them.
- */
- if (intstat & CMDCMPLT)
- {
- aic7xxx_handle_command_completion_intr(p);
- }
-
- if (intstat & BRKADRINT)
- {
- int i;
- unsigned char errno = aic_inb(p, ERROR);
-
- printk(KERN_ERR "(scsi%d) BRKADRINT error(0x%x):\n", p->host_no, errno);
- for (i = 0; i < ARRAY_SIZE(hard_error); i++)
- {
- if (errno & hard_error[i].errno)
- {
- printk(KERN_ERR " %s\n", hard_error[i].errmesg);
- }
- }
- printk(KERN_ERR "(scsi%d) SEQADDR=0x%x\n", p->host_no,
- (((aic_inb(p, SEQADDR1) << 8) & 0x100) | aic_inb(p, SEQADDR0)));
- if (aic7xxx_panic_on_abort)
- aic7xxx_panic_abort(p, NULL);
-#ifdef CONFIG_PCI
- if (errno & PCIERRSTAT)
- aic7xxx_pci_intr(p);
-#endif
- if (errno & (SQPARERR | ILLOPCODE | ILLSADDR))
- {
- panic("aic7xxx: unrecoverable BRKADRINT.\n");
- }
- if (errno & ILLHADDR)
- {
- printk(KERN_ERR "(scsi%d) BUG! Driver accessed chip without first "
- "pausing controller!\n", p->host_no);
- }
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if (errno & DPARERR)
- {
- if (aic_inb(p, DMAPARAMS) & DIRECTION)
- printk("(scsi%d) while DMAing SCB from host to card.\n", p->host_no);
- else
- printk("(scsi%d) while DMAing SCB from card to host.\n", p->host_no);
- }
-#endif
- aic_outb(p, CLRPARERR | CLRBRKADRINT, CLRINT);
- unpause_sequencer(p, FALSE);
- }
-
- if (intstat & SEQINT)
- {
- /*
- * Read the CCSCBCTL register to work around a bug in the Ultra2 cards
- */
- if(p->features & AHC_ULTRA2)
- {
- aic_inb(p, CCSCBCTL);
- }
- aic7xxx_handle_seqint(p, intstat);
- }
-
- if (intstat & SCSIINT)
- {
- aic7xxx_handle_scsiint(p, intstat);
- }
-
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if ( (p->isr_count < 16) && (aic7xxx_verbose > 0xffff) &&
- (aic7xxx_panic_on_abort) && (p->flags & AHC_PAGESCBS) )
- aic7xxx_check_scbs(p, "Bogus settings at end of interrupt.");
-#endif
-
-}
-
-/*+F*************************************************************************
- * Function:
- * do_aic7xxx_isr
- *
- * Description:
- * This is a gross hack to solve a problem in linux kernels 2.1.85 and
- * above. Please, children, do not try this at home, and if you ever see
- * anything like it, please inform the Gross Hack Police immediately
- *-F*************************************************************************/
-static irqreturn_t
-do_aic7xxx_isr(int irq, void *dev_id)
-{
- unsigned long cpu_flags;
- struct aic7xxx_host *p;
-
- p = dev_id;
- if(!p)
- return IRQ_NONE;
- spin_lock_irqsave(p->host->host_lock, cpu_flags);
- p->flags |= AHC_IN_ISR;
- do
- {
- aic7xxx_isr(dev_id);
- } while ( (aic_inb(p, INTSTAT) & INT_PEND) );
- aic7xxx_done_cmds_complete(p);
- aic7xxx_run_waiting_queues(p);
- p->flags &= ~AHC_IN_ISR;
- spin_unlock_irqrestore(p->host->host_lock, cpu_flags);
-
- return IRQ_HANDLED;
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_init_transinfo
- *
- * Description:
- * Set up the initial aic_dev values from the BIOS settings and from
- * INQUIRY results
- *-F*************************************************************************/
-static void
-aic7xxx_init_transinfo(struct aic7xxx_host *p, struct aic_dev_data *aic_dev)
-{
- struct scsi_device *sdpnt = aic_dev->SDptr;
- unsigned char tindex;
-
- tindex = sdpnt->id | (sdpnt->channel << 3);
- if (!(aic_dev->flags & DEVICE_DTR_SCANNED))
- {
- aic_dev->flags |= DEVICE_DTR_SCANNED;
-
- if ( sdpnt->wdtr && (p->features & AHC_WIDE) )
- {
- aic_dev->needwdtr = aic_dev->needwdtr_copy = 1;
- aic_dev->goal.width = p->user[tindex].width;
- }
- else
- {
- aic_dev->needwdtr = aic_dev->needwdtr_copy = 0;
- pause_sequencer(p);
- aic7xxx_set_width(p, sdpnt->id, sdpnt->channel, sdpnt->lun,
- MSG_EXT_WDTR_BUS_8_BIT, (AHC_TRANS_ACTIVE |
- AHC_TRANS_GOAL |
- AHC_TRANS_CUR), aic_dev );
- unpause_sequencer(p, FALSE);
- }
- if ( sdpnt->sdtr && p->user[tindex].offset )
- {
- aic_dev->goal.period = p->user[tindex].period;
- aic_dev->goal.options = p->user[tindex].options;
- if (p->features & AHC_ULTRA2)
- aic_dev->goal.offset = MAX_OFFSET_ULTRA2;
- else if (aic_dev->goal.width == MSG_EXT_WDTR_BUS_16_BIT)
- aic_dev->goal.offset = MAX_OFFSET_16BIT;
- else
- aic_dev->goal.offset = MAX_OFFSET_8BIT;
- if ( sdpnt->ppr && p->user[tindex].period <= 9 &&
- p->user[tindex].options )
- {
- aic_dev->needppr = aic_dev->needppr_copy = 1;
- aic_dev->needsdtr = aic_dev->needsdtr_copy = 0;
- aic_dev->needwdtr = aic_dev->needwdtr_copy = 0;
- aic_dev->flags |= DEVICE_SCSI_3;
- }
- else
- {
- aic_dev->needsdtr = aic_dev->needsdtr_copy = 1;
- aic_dev->goal.period = max_t(unsigned char, 10, aic_dev->goal.period);
- aic_dev->goal.options = 0;
- }
- }
- else
- {
- aic_dev->needsdtr = aic_dev->needsdtr_copy = 0;
- aic_dev->goal.period = 255;
- aic_dev->goal.offset = 0;
- aic_dev->goal.options = 0;
- }
- aic_dev->flags |= DEVICE_PRINT_DTR;
- }
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_slave_alloc
- *
- * Description:
- * Set up the initial aic_dev struct pointers
- *-F*************************************************************************/
-static int
-aic7xxx_slave_alloc(struct scsi_device *SDptr)
-{
- struct aic7xxx_host *p = (struct aic7xxx_host *)SDptr->host->hostdata;
- struct aic_dev_data *aic_dev;
-
- aic_dev = kmalloc(sizeof(struct aic_dev_data), GFP_KERNEL);
- if(!aic_dev)
- return 1;
- /*
- * Check to see if channel was scanned.
- */
-
- if (!(p->flags & AHC_A_SCANNED) && (SDptr->channel == 0))
- {
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk(INFO_LEAD "Scanning channel for devices.\n",
- p->host_no, 0, -1, -1);
- p->flags |= AHC_A_SCANNED;
- }
- else
- {
- if (!(p->flags & AHC_B_SCANNED) && (SDptr->channel == 1))
- {
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk(INFO_LEAD "Scanning channel for devices.\n",
- p->host_no, 1, -1, -1);
- p->flags |= AHC_B_SCANNED;
- }
- }
-
- memset(aic_dev, 0, sizeof(struct aic_dev_data));
- SDptr->hostdata = aic_dev;
- aic_dev->SDptr = SDptr;
- aic_dev->max_q_depth = 1;
- aic_dev->temp_q_depth = 1;
- scbq_init(&aic_dev->delayed_scbs);
- INIT_LIST_HEAD(&aic_dev->list);
- list_add_tail(&aic_dev->list, &p->aic_devs);
- return 0;
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_device_queue_depth
- *
- * Description:
- * Determines the queue depth for a given device. There are two ways
- * a queue depth can be obtained for a tagged queueing device. One
- * way is the default queue depth which is determined by whether
- * aic7xxx_default_queue_depth. The other is by the aic7xxx_tag_info
- * array.
- *
- * If tagged queueing isn't supported on the device, then we set the
- * depth to p->host->hostt->cmd_per_lun for internal driver queueing.
- * as the default queue depth. Otherwise, we use either 4 or 8 as the
- * default queue depth (dependent on the number of hardware SCBs).
- * The other way we determine queue depth is through the use of the
- * aic7xxx_tag_info array which is enabled by defining
- * AIC7XXX_TAGGED_QUEUEING_BY_DEVICE. This array can be initialized
- * with queue depths for individual devices. It also allows tagged
- * queueing to be [en|dis]abled for a specific adapter.
- *-F*************************************************************************/
-static void
-aic7xxx_device_queue_depth(struct aic7xxx_host *p, struct scsi_device *device)
-{
- int tag_enabled = FALSE;
- struct aic_dev_data *aic_dev = device->hostdata;
- unsigned char tindex;
-
- tindex = device->id | (device->channel << 3);
-
- if (device->simple_tags)
- return; // We've already enabled this device
-
- if (device->tagged_supported)
- {
- tag_enabled = TRUE;
-
- if (!(p->discenable & (1 << tindex)))
- {
- if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
- printk(INFO_LEAD "Disconnection disabled, unable to "
- "enable tagged queueing.\n",
- p->host_no, device->channel, device->id, device->lun);
- tag_enabled = FALSE;
- }
- else
- {
- if (p->instance >= ARRAY_SIZE(aic7xxx_tag_info))
- {
- static int print_warning = TRUE;
- if(print_warning)
- {
- printk(KERN_INFO "aic7xxx: WARNING, insufficient tag_info instances for"
- " installed controllers.\n");
- printk(KERN_INFO "aic7xxx: Please update the aic7xxx_tag_info array in"
- " the aic7xxx.c source file.\n");
- print_warning = FALSE;
- }
- aic_dev->max_q_depth = aic_dev->temp_q_depth =
- aic7xxx_default_queue_depth;
- }
- else
- {
-
- if (aic7xxx_tag_info[p->instance].tag_commands[tindex] == 255)
- {
- tag_enabled = FALSE;
- }
- else if (aic7xxx_tag_info[p->instance].tag_commands[tindex] == 0)
- {
- aic_dev->max_q_depth = aic_dev->temp_q_depth =
- aic7xxx_default_queue_depth;
- }
- else
- {
- aic_dev->max_q_depth = aic_dev->temp_q_depth =
- aic7xxx_tag_info[p->instance].tag_commands[tindex];
- }
- }
- }
- }
- if (tag_enabled)
- {
- if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
- {
- printk(INFO_LEAD "Tagged queuing enabled, queue depth %d.\n",
- p->host_no, device->channel, device->id,
- device->lun, aic_dev->max_q_depth);
- }
- scsi_adjust_queue_depth(device, MSG_ORDERED_TAG, aic_dev->max_q_depth);
- }
- else
- {
- if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
- {
- printk(INFO_LEAD "Tagged queuing disabled, queue depth %d.\n",
- p->host_no, device->channel, device->id,
- device->lun, device->host->cmd_per_lun);
- }
- scsi_adjust_queue_depth(device, 0, device->host->cmd_per_lun);
- }
- return;
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_slave_destroy
- *
- * Description:
- * prepare for this device to go away
- *-F*************************************************************************/
-static void
-aic7xxx_slave_destroy(struct scsi_device *SDptr)
-{
- struct aic_dev_data *aic_dev = SDptr->hostdata;
-
- list_del(&aic_dev->list);
- SDptr->hostdata = NULL;
- kfree(aic_dev);
- return;
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_slave_configure
- *
- * Description:
- * Configure the device we are attaching to the controller. This is
- * where we get to do things like scan the INQUIRY data, set queue
- * depths, allocate command structs, etc.
- *-F*************************************************************************/
-static int
-aic7xxx_slave_configure(struct scsi_device *SDptr)
-{
- struct aic7xxx_host *p = (struct aic7xxx_host *) SDptr->host->hostdata;
- struct aic_dev_data *aic_dev;
- int scbnum;
-
- aic_dev = (struct aic_dev_data *)SDptr->hostdata;
-
- aic7xxx_init_transinfo(p, aic_dev);
- aic7xxx_device_queue_depth(p, SDptr);
- if(list_empty(&aic_dev->list))
- list_add_tail(&aic_dev->list, &p->aic_devs);
-
- scbnum = 0;
- list_for_each_entry(aic_dev, &p->aic_devs, list) {
- scbnum += aic_dev->max_q_depth;
- }
- while (scbnum > p->scb_data->numscbs)
- {
- /*
- * Pre-allocate the needed SCBs to get around the possibility of having
- * to allocate some when memory is more or less exhausted and we need
- * the SCB in order to perform a swap operation (possible deadlock)
- */
- if ( aic7xxx_allocate_scb(p) == 0 )
- break;
- }
-
-
- return(0);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_probe
- *
- * Description:
- * Probing for EISA boards: it looks like the first two bytes
- * are a manufacturer code - three characters, five bits each:
- *
- * BYTE 0 BYTE 1 BYTE 2 BYTE 3
- * ?1111122 22233333 PPPPPPPP RRRRRRRR
- *
- * The characters are baselined off ASCII '@', so add that value
- * to each to get the real ASCII code for it. The next two bytes
- * appear to be a product and revision number, probably vendor-
- * specific. This is what is being searched for at each port,
- * and what should probably correspond to the ID= field in the
- * ECU's .cfg file for the card - if your card is not detected,
- * make sure your signature is listed in the array.
- *
- * The fourth byte's lowest bit seems to be an enabled/disabled
- * flag (rest of the bits are reserved?).
- *
- * NOTE: This function is only needed on Intel and Alpha platforms,
- * the other platforms we support don't have EISA/VLB busses. So,
- * we #ifdef this entire function to avoid compiler warnings about
- * an unused function.
- *-F*************************************************************************/
-#if defined(__i386__) || defined(__alpha__)
-static int
-aic7xxx_probe(int slot, int base, ahc_flag_type *flags)
-{
- int i;
- unsigned char buf[4];
-
- static struct {
- int n;
- unsigned char signature[sizeof(buf)];
- ahc_chip type;
- int bios_disabled;
- } AIC7xxx[] = {
- { 4, { 0x04, 0x90, 0x77, 0x70 },
- AHC_AIC7770|AHC_EISA, FALSE }, /* mb 7770 */
- { 4, { 0x04, 0x90, 0x77, 0x71 },
- AHC_AIC7770|AHC_EISA, FALSE }, /* host adapter 274x */
- { 4, { 0x04, 0x90, 0x77, 0x56 },
- AHC_AIC7770|AHC_VL, FALSE }, /* 284x BIOS enabled */
- { 4, { 0x04, 0x90, 0x77, 0x57 },
- AHC_AIC7770|AHC_VL, TRUE } /* 284x BIOS disabled */
- };
-
- /*
- * The VL-bus cards need to be primed by
- * writing before a signature check.
- */
- for (i = 0; i < sizeof(buf); i++)
- {
- outb(0x80 + i, base);
- buf[i] = inb(base + i);
- }
-
- for (i = 0; i < ARRAY_SIZE(AIC7xxx); i++)
- {
- /*
- * Signature match on enabled card?
- */
- if (!memcmp(buf, AIC7xxx[i].signature, AIC7xxx[i].n))
- {
- if (inb(base + 4) & 1)
- {
- if (AIC7xxx[i].bios_disabled)
- {
- *flags |= AHC_USEDEFAULTS;
- }
- else
- {
- *flags |= AHC_BIOS_ENABLED;
- }
- return (i);
- }
-
- printk("aic7xxx: <Adaptec 7770 SCSI Host Adapter> "
- "disabled at slot %d, ignored.\n", slot);
- }
- }
-
- return (-1);
-}
-#endif /* (__i386__) || (__alpha__) */
-
-
-/*+F*************************************************************************
- * Function:
- * read_2840_seeprom
- *
- * Description:
- * Reads the 2840 serial EEPROM and returns 1 if successful and 0 if
- * not successful.
- *
- * See read_seeprom (for the 2940) for the instruction set of the 93C46
- * chip.
- *
- * The 2840 interface to the 93C46 serial EEPROM is through the
- * STATUS_2840 and SEECTL_2840 registers. The CS_2840, CK_2840, and
- * DO_2840 bits of the SEECTL_2840 register are connected to the chip
- * select, clock, and data out lines respectively of the serial EEPROM.
- * The DI_2840 bit of the STATUS_2840 is connected to the data in line
- * of the serial EEPROM. The EEPROM_TF bit of STATUS_2840 register is
- * useful in that it gives us an 800 nsec timer. After a read from the
- * SEECTL_2840 register the timing flag is cleared and goes high 800 nsec
- * later.
- *-F*************************************************************************/
-static int
-read_284x_seeprom(struct aic7xxx_host *p, struct seeprom_config *sc)
-{
- int i = 0, k = 0;
- unsigned char temp;
- unsigned short checksum = 0;
- unsigned short *seeprom = (unsigned short *) sc;
- struct seeprom_cmd {
- unsigned char len;
- unsigned char bits[3];
- };
- struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
-
-#define CLOCK_PULSE(p) \
- while ((aic_inb(p, STATUS_2840) & EEPROM_TF) == 0) \
- { \
- ; /* Do nothing */ \
- } \
- (void) aic_inb(p, SEECTL_2840);
-
- /*
- * Read the first 32 registers of the seeprom. For the 2840,
- * the 93C46 SEEPROM is a 1024-bit device with 64 16-bit registers
- * but only the first 32 are used by Adaptec BIOS. The loop
- * will range from 0 to 31.
- */
- for (k = 0; k < (sizeof(*sc) / 2); k++)
- {
- /*
- * Send chip select for one clock cycle.
- */
- aic_outb(p, CK_2840 | CS_2840, SEECTL_2840);
- CLOCK_PULSE(p);
-
- /*
- * Now we're ready to send the read command followed by the
- * address of the 16-bit register we want to read.
- */
- for (i = 0; i < seeprom_read.len; i++)
- {
- temp = CS_2840 | seeprom_read.bits[i];
- aic_outb(p, temp, SEECTL_2840);
- CLOCK_PULSE(p);
- temp = temp ^ CK_2840;
- aic_outb(p, temp, SEECTL_2840);
- CLOCK_PULSE(p);
- }
- /*
- * Send the 6 bit address (MSB first, LSB last).
- */
- for (i = 5; i >= 0; i--)
- {
- temp = k;
- temp = (temp >> i) & 1; /* Mask out all but lower bit. */
- temp = CS_2840 | temp;
- aic_outb(p, temp, SEECTL_2840);
- CLOCK_PULSE(p);
- temp = temp ^ CK_2840;
- aic_outb(p, temp, SEECTL_2840);
- CLOCK_PULSE(p);
- }
-
- /*
- * Now read the 16 bit register. An initial 0 precedes the
- * register contents which begins with bit 15 (MSB) and ends
- * with bit 0 (LSB). The initial 0 will be shifted off the
- * top of our word as we let the loop run from 0 to 16.
- */
- for (i = 0; i <= 16; i++)
- {
- temp = CS_2840;
- aic_outb(p, temp, SEECTL_2840);
- CLOCK_PULSE(p);
- temp = temp ^ CK_2840;
- seeprom[k] = (seeprom[k] << 1) | (aic_inb(p, STATUS_2840) & DI_2840);
- aic_outb(p, temp, SEECTL_2840);
- CLOCK_PULSE(p);
- }
- /*
- * The serial EEPROM has a checksum in the last word. Keep a
- * running checksum for all words read except for the last
- * word. We'll verify the checksum after all words have been
- * read.
- */
- if (k < (sizeof(*sc) / 2) - 1)
- {
- checksum = checksum + seeprom[k];
- }
-
- /*
- * Reset the chip select for the next command cycle.
- */
- aic_outb(p, 0, SEECTL_2840);
- CLOCK_PULSE(p);
- aic_outb(p, CK_2840, SEECTL_2840);
- CLOCK_PULSE(p);
- aic_outb(p, 0, SEECTL_2840);
- CLOCK_PULSE(p);
- }
-
-#if 0
- printk("Computed checksum 0x%x, checksum read 0x%x\n", checksum, sc->checksum);
- printk("Serial EEPROM:");
- for (k = 0; k < (sizeof(*sc) / 2); k++)
- {
- if (((k % 8) == 0) && (k != 0))
- {
- printk("\n ");
- }
- printk(" 0x%x", seeprom[k]);
- }
- printk("\n");
-#endif
-
- if (checksum != sc->checksum)
- {
- printk("aic7xxx: SEEPROM checksum error, ignoring SEEPROM settings.\n");
- return (0);
- }
-
- return (1);
-#undef CLOCK_PULSE
-}
-
-#define CLOCK_PULSE(p) \
- do { \
- int limit = 0; \
- do { \
- mb(); \
- pause_sequencer(p); /* This is just to generate some PCI */ \
- /* traffic so the PCI read is flushed */ \
- /* it shouldn't be needed, but some */ \
- /* chipsets do indeed appear to need */ \
- /* something to force PCI reads to get */ \
- /* flushed */ \
- udelay(1); /* Do nothing */ \
- } while (((aic_inb(p, SEECTL) & SEERDY) == 0) && (++limit < 1000)); \
- } while(0)
-
-/*+F*************************************************************************
- * Function:
- * acquire_seeprom
- *
- * Description:
- * Acquires access to the memory port on PCI controllers.
- *-F*************************************************************************/
-static int
-acquire_seeprom(struct aic7xxx_host *p)
-{
-
- /*
- * Request access of the memory port. When access is
- * granted, SEERDY will go high. We use a 1 second
- * timeout which should be near 1 second more than
- * is needed. Reason: after the 7870 chip reset, there
- * should be no contention.
- */
- aic_outb(p, SEEMS, SEECTL);
- CLOCK_PULSE(p);
- if ((aic_inb(p, SEECTL) & SEERDY) == 0)
- {
- aic_outb(p, 0, SEECTL);
- return (0);
- }
- return (1);
-}
-
-/*+F*************************************************************************
- * Function:
- * release_seeprom
- *
- * Description:
- * Releases access to the memory port on PCI controllers.
- *-F*************************************************************************/
-static void
-release_seeprom(struct aic7xxx_host *p)
-{
- /*
- * Make sure the SEEPROM is ready before we release it.
- */
- CLOCK_PULSE(p);
- aic_outb(p, 0, SEECTL);
-}
-
-/*+F*************************************************************************
- * Function:
- * read_seeprom
- *
- * Description:
- * Reads the serial EEPROM and returns 1 if successful and 0 if
- * not successful.
- *
- * The instruction set of the 93C46/56/66 chips is as follows:
- *
- * Start OP
- * Function Bit Code Address Data Description
- * -------------------------------------------------------------------
- * READ 1 10 A5 - A0 Reads data stored in memory,
- * starting at specified address
- * EWEN 1 00 11XXXX Write enable must precede
- * all programming modes
- * ERASE 1 11 A5 - A0 Erase register A5A4A3A2A1A0
- * WRITE 1 01 A5 - A0 D15 - D0 Writes register
- * ERAL 1 00 10XXXX Erase all registers
- * WRAL 1 00 01XXXX D15 - D0 Writes to all registers
- * EWDS 1 00 00XXXX Disables all programming
- * instructions
- * *Note: A value of X for address is a don't care condition.
- * *Note: The 93C56 and 93C66 have 8 address bits.
- *
- *
- * The 93C46 has a four wire interface: clock, chip select, data in, and
- * data out. In order to perform one of the above functions, you need
- * to enable the chip select for a clock period (typically a minimum of
- * 1 usec, with the clock high and low a minimum of 750 and 250 nsec
- * respectively. While the chip select remains high, you can clock in
- * the instructions (above) starting with the start bit, followed by the
- * OP code, Address, and Data (if needed). For the READ instruction, the
- * requested 16-bit register contents is read from the data out line but
- * is preceded by an initial zero (leading 0, followed by 16-bits, MSB
- * first). The clock cycling from low to high initiates the next data
- * bit to be sent from the chip.
- *
- * The 78xx interface to the 93C46 serial EEPROM is through the SEECTL
- * register. After successful arbitration for the memory port, the
- * SEECS bit of the SEECTL register is connected to the chip select.
- * The SEECK, SEEDO, and SEEDI are connected to the clock, data out,
- * and data in lines respectively. The SEERDY bit of SEECTL is useful
- * in that it gives us an 800 nsec timer. After a write to the SEECTL
- * register, the SEERDY goes high 800 nsec later. The one exception
- * to this is when we first request access to the memory port. The
- * SEERDY goes high to signify that access has been granted and, for
- * this case, has no implied timing.
- *-F*************************************************************************/
-static int
-read_seeprom(struct aic7xxx_host *p, int offset,
- unsigned short *scarray, unsigned int len, seeprom_chip_type chip)
-{
- int i = 0, k;
- unsigned char temp;
- unsigned short checksum = 0;
- struct seeprom_cmd {
- unsigned char len;
- unsigned char bits[3];
- };
- struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
-
- /*
- * Request access of the memory port.
- */
- if (acquire_seeprom(p) == 0)
- {
- return (0);
- }
-
- /*
- * Read 'len' registers of the seeprom. For the 7870, the 93C46
- * SEEPROM is a 1024-bit device with 64 16-bit registers but only
- * the first 32 are used by Adaptec BIOS. Some adapters use the
- * 93C56 SEEPROM which is a 2048-bit device. The loop will range
- * from 0 to 'len' - 1.
- */
- for (k = 0; k < len; k++)
- {
- /*
- * Send chip select for one clock cycle.
- */
- aic_outb(p, SEEMS | SEECK | SEECS, SEECTL);
- CLOCK_PULSE(p);
-
- /*
- * Now we're ready to send the read command followed by the
- * address of the 16-bit register we want to read.
- */
- for (i = 0; i < seeprom_read.len; i++)
- {
- temp = SEEMS | SEECS | (seeprom_read.bits[i] << 1);
- aic_outb(p, temp, SEECTL);
- CLOCK_PULSE(p);
- temp = temp ^ SEECK;
- aic_outb(p, temp, SEECTL);
- CLOCK_PULSE(p);
- }
- /*
- * Send the 6 or 8 bit address (MSB first, LSB last).
- */
- for (i = ((int) chip - 1); i >= 0; i--)
- {
- temp = k + offset;
- temp = (temp >> i) & 1; /* Mask out all but lower bit. */
- temp = SEEMS | SEECS | (temp << 1);
- aic_outb(p, temp, SEECTL);
- CLOCK_PULSE(p);
- temp = temp ^ SEECK;
- aic_outb(p, temp, SEECTL);
- CLOCK_PULSE(p);
- }
-
- /*
- * Now read the 16 bit register. An initial 0 precedes the
- * register contents which begins with bit 15 (MSB) and ends
- * with bit 0 (LSB). The initial 0 will be shifted off the
- * top of our word as we let the loop run from 0 to 16.
- */
- for (i = 0; i <= 16; i++)
- {
- temp = SEEMS | SEECS;
- aic_outb(p, temp, SEECTL);
- CLOCK_PULSE(p);
- temp = temp ^ SEECK;
- scarray[k] = (scarray[k] << 1) | (aic_inb(p, SEECTL) & SEEDI);
- aic_outb(p, temp, SEECTL);
- CLOCK_PULSE(p);
- }
-
- /*
- * The serial EEPROM should have a checksum in the last word.
- * Keep a running checksum for all words read except for the
- * last word. We'll verify the checksum after all words have
- * been read.
- */
- if (k < (len - 1))
- {
- checksum = checksum + scarray[k];
- }
-
- /*
- * Reset the chip select for the next command cycle.
- */
- aic_outb(p, SEEMS, SEECTL);
- CLOCK_PULSE(p);
- aic_outb(p, SEEMS | SEECK, SEECTL);
- CLOCK_PULSE(p);
- aic_outb(p, SEEMS, SEECTL);
- CLOCK_PULSE(p);
- }
-
- /*
- * Release access to the memory port and the serial EEPROM.
- */
- release_seeprom(p);
-
-#if 0
- printk("Computed checksum 0x%x, checksum read 0x%x\n",
- checksum, scarray[len - 1]);
- printk("Serial EEPROM:");
- for (k = 0; k < len; k++)
- {
- if (((k % 8) == 0) && (k != 0))
- {
- printk("\n ");
- }
- printk(" 0x%x", scarray[k]);
- }
- printk("\n");
-#endif
- if ( (checksum != scarray[len - 1]) || (checksum == 0) )
- {
- return (0);
- }
-
- return (1);
-}
-
-/*+F*************************************************************************
- * Function:
- * read_brdctl
- *
- * Description:
- * Reads the BRDCTL register.
- *-F*************************************************************************/
-static unsigned char
-read_brdctl(struct aic7xxx_host *p)
-{
- unsigned char brdctl, value;
-
- /*
- * Make sure the SEEPROM is ready before we access it
- */
- CLOCK_PULSE(p);
- if (p->features & AHC_ULTRA2)
- {
- brdctl = BRDRW_ULTRA2;
- aic_outb(p, brdctl, BRDCTL);
- CLOCK_PULSE(p);
- value = aic_inb(p, BRDCTL);
- CLOCK_PULSE(p);
- return(value);
- }
- brdctl = BRDRW;
- if ( !((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895) ||
- (p->flags & AHC_CHNLB) )
- {
- brdctl |= BRDCS;
- }
- aic_outb(p, brdctl, BRDCTL);
- CLOCK_PULSE(p);
- value = aic_inb(p, BRDCTL);
- CLOCK_PULSE(p);
- aic_outb(p, 0, BRDCTL);
- CLOCK_PULSE(p);
- return (value);
-}
-
-/*+F*************************************************************************
- * Function:
- * write_brdctl
- *
- * Description:
- * Writes a value to the BRDCTL register.
- *-F*************************************************************************/
-static void
-write_brdctl(struct aic7xxx_host *p, unsigned char value)
-{
- unsigned char brdctl;
-
- /*
- * Make sure the SEEPROM is ready before we access it
- */
- CLOCK_PULSE(p);
- if (p->features & AHC_ULTRA2)
- {
- brdctl = value;
- aic_outb(p, brdctl, BRDCTL);
- CLOCK_PULSE(p);
- brdctl |= BRDSTB_ULTRA2;
- aic_outb(p, brdctl, BRDCTL);
- CLOCK_PULSE(p);
- brdctl &= ~BRDSTB_ULTRA2;
- aic_outb(p, brdctl, BRDCTL);
- CLOCK_PULSE(p);
- read_brdctl(p);
- CLOCK_PULSE(p);
- }
- else
- {
- brdctl = BRDSTB;
- if ( !((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895) ||
- (p->flags & AHC_CHNLB) )
- {
- brdctl |= BRDCS;
- }
- brdctl = BRDSTB | BRDCS;
- aic_outb(p, brdctl, BRDCTL);
- CLOCK_PULSE(p);
- brdctl |= value;
- aic_outb(p, brdctl, BRDCTL);
- CLOCK_PULSE(p);
- brdctl &= ~BRDSTB;
- aic_outb(p, brdctl, BRDCTL);
- CLOCK_PULSE(p);
- brdctl &= ~BRDCS;
- aic_outb(p, brdctl, BRDCTL);
- CLOCK_PULSE(p);
- }
-}
-
-/*+F*************************************************************************
- * Function:
- * aic785x_cable_detect
- *
- * Description:
- * Detect the cables that are present on aic785x class controller chips
- *-F*************************************************************************/
-static void
-aic785x_cable_detect(struct aic7xxx_host *p, int *int_50,
- int *ext_present, int *eeprom)
-{
- unsigned char brdctl;
-
- aic_outb(p, BRDRW | BRDCS, BRDCTL);
- CLOCK_PULSE(p);
- aic_outb(p, 0, BRDCTL);
- CLOCK_PULSE(p);
- brdctl = aic_inb(p, BRDCTL);
- CLOCK_PULSE(p);
- *int_50 = !(brdctl & BRDDAT5);
- *ext_present = !(brdctl & BRDDAT6);
- *eeprom = (aic_inb(p, SPIOCAP) & EEPROM);
-}
-
-#undef CLOCK_PULSE
-
-/*+F*************************************************************************
- * Function:
- * aic2940_uwpro_cable_detect
- *
- * Description:
- * Detect the cables that are present on the 2940-UWPro cards
- *
- * NOTE: This function assumes the SEEPROM will have already been acquired
- * prior to invocation of this function.
- *-F*************************************************************************/
-static void
-aic2940_uwpro_wide_cable_detect(struct aic7xxx_host *p, int *int_68,
- int *ext_68, int *eeprom)
-{
- unsigned char brdctl;
-
- /*
- * First read the status of our cables. Set the rom bank to
- * 0 since the bank setting serves as a multiplexor for the
- * cable detection logic. BRDDAT5 controls the bank switch.
- */
- write_brdctl(p, 0);
-
- /*
- * Now we read the state of the internal 68 connector. BRDDAT6
- * is don't care, BRDDAT7 is internal 68. The cable is
- * present if the bit is 0
- */
- brdctl = read_brdctl(p);
- *int_68 = !(brdctl & BRDDAT7);
-
- /*
- * Set the bank bit in brdctl and then read the external cable state
- * and the EEPROM status
- */
- write_brdctl(p, BRDDAT5);
- brdctl = read_brdctl(p);
-
- *ext_68 = !(brdctl & BRDDAT6);
- *eeprom = !(brdctl & BRDDAT7);
-
- /*
- * We're done, the calling function will release the SEEPROM for us
- */
-}
-
-/*+F*************************************************************************
- * Function:
- * aic787x_cable_detect
- *
- * Description:
- * Detect the cables that are present on aic787x class controller chips
- *
- * NOTE: This function assumes the SEEPROM will have already been acquired
- * prior to invocation of this function.
- *-F*************************************************************************/
-static void
-aic787x_cable_detect(struct aic7xxx_host *p, int *int_50, int *int_68,
- int *ext_present, int *eeprom)
-{
- unsigned char brdctl;
-
- /*
- * First read the status of our cables. Set the rom bank to
- * 0 since the bank setting serves as a multiplexor for the
- * cable detection logic. BRDDAT5 controls the bank switch.
- */
- write_brdctl(p, 0);
-
- /*
- * Now we read the state of the two internal connectors. BRDDAT6
- * is internal 50, BRDDAT7 is internal 68. For each, the cable is
- * present if the bit is 0
- */
- brdctl = read_brdctl(p);
- *int_50 = !(brdctl & BRDDAT6);
- *int_68 = !(brdctl & BRDDAT7);
-
- /*
- * Set the bank bit in brdctl and then read the external cable state
- * and the EEPROM status
- */
- write_brdctl(p, BRDDAT5);
- brdctl = read_brdctl(p);
-
- *ext_present = !(brdctl & BRDDAT6);
- *eeprom = !(brdctl & BRDDAT7);
-
- /*
- * We're done, the calling function will release the SEEPROM for us
- */
-}
-
-/*+F*************************************************************************
- * Function:
- * aic787x_ultra2_term_detect
- *
- * Description:
- * Detect the termination settings present on ultra2 class controllers
- *
- * NOTE: This function assumes the SEEPROM will have already been acquired
- * prior to invocation of this function.
- *-F*************************************************************************/
-static void
-aic7xxx_ultra2_term_detect(struct aic7xxx_host *p, int *enableSE_low,
- int *enableSE_high, int *enableLVD_low,
- int *enableLVD_high, int *eprom_present)
-{
- unsigned char brdctl;
-
- brdctl = read_brdctl(p);
-
- *eprom_present = (brdctl & BRDDAT7);
- *enableSE_high = (brdctl & BRDDAT6);
- *enableSE_low = (brdctl & BRDDAT5);
- *enableLVD_high = (brdctl & BRDDAT4);
- *enableLVD_low = (brdctl & BRDDAT3);
-}
-
-/*+F*************************************************************************
- * Function:
- * configure_termination
- *
- * Description:
- * Configures the termination settings on PCI adapters that have
- * SEEPROMs available.
- *-F*************************************************************************/
-static void
-configure_termination(struct aic7xxx_host *p)
-{
- int internal50_present = 0;
- int internal68_present = 0;
- int external_present = 0;
- int eprom_present = 0;
- int enableSE_low = 0;
- int enableSE_high = 0;
- int enableLVD_low = 0;
- int enableLVD_high = 0;
- unsigned char brddat = 0;
- unsigned char max_target = 0;
- unsigned char sxfrctl1 = aic_inb(p, SXFRCTL1);
-
- if (acquire_seeprom(p))
- {
- if (p->features & (AHC_WIDE|AHC_TWIN))
- max_target = 16;
- else
- max_target = 8;
- aic_outb(p, SEEMS | SEECS, SEECTL);
- sxfrctl1 &= ~STPWEN;
- /*
- * The termination/cable detection logic is split into three distinct
- * groups. Ultra2 and later controllers, 2940UW-Pro controllers, and
- * older 7850, 7860, 7870, 7880, and 7895 controllers. Each has its
- * own unique way of detecting their cables and writing the results
- * back to the card.
- */
- if (p->features & AHC_ULTRA2)
- {
- /*
- * As long as user hasn't overridden term settings, always check the
- * cable detection logic
- */
- if (aic7xxx_override_term == -1)
- {
- aic7xxx_ultra2_term_detect(p, &enableSE_low, &enableSE_high,
- &enableLVD_low, &enableLVD_high,
- &eprom_present);
- }
-
- /*
- * If the user is overriding settings, then they have been preserved
- * to here as fake adapter_control entries. Parse them and allow
- * them to override the detected settings (if we even did detection).
- */
- if (!(p->adapter_control & CFSEAUTOTERM))
- {
- enableSE_low = (p->adapter_control & CFSTERM);
- enableSE_high = (p->adapter_control & CFWSTERM);
- }
- if (!(p->adapter_control & CFAUTOTERM))
- {
- enableLVD_low = enableLVD_high = (p->adapter_control & CFLVDSTERM);
- }
-
- /*
- * Now take those settings that we have and translate them into the
- * values that must be written into the registers.
- *
- * Flash Enable = BRDDAT7
- * Secondary High Term Enable = BRDDAT6
- * Secondary Low Term Enable = BRDDAT5
- * LVD/Primary High Term Enable = BRDDAT4
- * LVD/Primary Low Term Enable = STPWEN bit in SXFRCTL1
- */
- if (enableLVD_low != 0)
- {
- sxfrctl1 |= STPWEN;
- p->flags |= AHC_TERM_ENB_LVD;
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk(KERN_INFO "(scsi%d) LVD/Primary Low byte termination "
- "Enabled\n", p->host_no);
- }
-
- if (enableLVD_high != 0)
- {
- brddat |= BRDDAT4;
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk(KERN_INFO "(scsi%d) LVD/Primary High byte termination "
- "Enabled\n", p->host_no);
- }
-
- if (enableSE_low != 0)
- {
- brddat |= BRDDAT5;
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk(KERN_INFO "(scsi%d) Secondary Low byte termination "
- "Enabled\n", p->host_no);
- }
-
- if (enableSE_high != 0)
- {
- brddat |= BRDDAT6;
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk(KERN_INFO "(scsi%d) Secondary High byte termination "
- "Enabled\n", p->host_no);
- }
- }
- else if (p->features & AHC_NEW_AUTOTERM)
- {
- /*
- * The 50 pin connector termination is controlled by STPWEN in the
- * SXFRCTL1 register. Since the Adaptec docs typically say the
- * controller is not allowed to be in the middle of a cable and
- * this is the only connection on that stub of the bus, there is
- * no need to even check for narrow termination, it's simply
- * always on.
- */
- sxfrctl1 |= STPWEN;
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk(KERN_INFO "(scsi%d) Narrow channel termination Enabled\n",
- p->host_no);
-
- if (p->adapter_control & CFAUTOTERM)
- {
- aic2940_uwpro_wide_cable_detect(p, &internal68_present,
- &external_present,
- &eprom_present);
- printk(KERN_INFO "(scsi%d) Cables present (Int-50 %s, Int-68 %s, "
- "Ext-68 %s)\n", p->host_no,
- "Don't Care",
- internal68_present ? "YES" : "NO",
- external_present ? "YES" : "NO");
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk(KERN_INFO "(scsi%d) EEPROM %s present.\n", p->host_no,
- eprom_present ? "is" : "is not");
- if (internal68_present && external_present)
- {
- brddat = 0;
- p->flags &= ~AHC_TERM_ENB_SE_HIGH;
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk(KERN_INFO "(scsi%d) Wide channel termination Disabled\n",
- p->host_no);
- }
- else
- {
- brddat = BRDDAT6;
- p->flags |= AHC_TERM_ENB_SE_HIGH;
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk(KERN_INFO "(scsi%d) Wide channel termination Enabled\n",
- p->host_no);
- }
- }
- else
- {
- /*
- * The termination of the Wide channel is done more like normal
- * though, and the setting of this termination is done by writing
- * either a 0 or 1 to BRDDAT6 of the BRDDAT register
- */
- if (p->adapter_control & CFWSTERM)
- {
- brddat = BRDDAT6;
- p->flags |= AHC_TERM_ENB_SE_HIGH;
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk(KERN_INFO "(scsi%d) Wide channel termination Enabled\n",
- p->host_no);
- }
- else
- {
- brddat = 0;
- }
- }
- }
- else
- {
- if (p->adapter_control & CFAUTOTERM)
- {
- if (p->flags & AHC_MOTHERBOARD)
- {
- printk(KERN_INFO "(scsi%d) Warning - detected auto-termination\n",
- p->host_no);
- printk(KERN_INFO "(scsi%d) Please verify driver detected settings "
- "are correct.\n", p->host_no);
- printk(KERN_INFO "(scsi%d) If not, then please properly set the "
- "device termination\n", p->host_no);
- printk(KERN_INFO "(scsi%d) in the Adaptec SCSI BIOS by hitting "
- "CTRL-A when prompted\n", p->host_no);
- printk(KERN_INFO "(scsi%d) during machine bootup.\n", p->host_no);
- }
- /* Configure auto termination. */
-
- if ( (p->chip & AHC_CHIPID_MASK) >= AHC_AIC7870 )
- {
- aic787x_cable_detect(p, &internal50_present, &internal68_present,
- &external_present, &eprom_present);
- }
- else
- {
- aic785x_cable_detect(p, &internal50_present, &external_present,
- &eprom_present);
- }
-
- if (max_target <= 8)
- internal68_present = 0;
-
- if (max_target > 8)
- {
- printk(KERN_INFO "(scsi%d) Cables present (Int-50 %s, Int-68 %s, "
- "Ext-68 %s)\n", p->host_no,
- internal50_present ? "YES" : "NO",
- internal68_present ? "YES" : "NO",
- external_present ? "YES" : "NO");
- }
- else
- {
- printk(KERN_INFO "(scsi%d) Cables present (Int-50 %s, Ext-50 %s)\n",
- p->host_no,
- internal50_present ? "YES" : "NO",
- external_present ? "YES" : "NO");
- }
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk(KERN_INFO "(scsi%d) EEPROM %s present.\n", p->host_no,
- eprom_present ? "is" : "is not");
-
- /*
- * Now set the termination based on what we found. BRDDAT6
- * controls wide termination enable.
- * Flash Enable = BRDDAT7
- * SE High Term Enable = BRDDAT6
- */
- if (internal50_present && internal68_present && external_present)
- {
- printk(KERN_INFO "(scsi%d) Illegal cable configuration!! Only two\n",
- p->host_no);
- printk(KERN_INFO "(scsi%d) connectors on the SCSI controller may be "
- "in use at a time!\n", p->host_no);
- /*
- * Force termination (low and high byte) on. This is safer than
- * leaving it completely off, especially since this message comes
- * most often from motherboard controllers that don't even have 3
- * connectors, but instead are failing the cable detection.
- */
- internal50_present = external_present = 0;
- enableSE_high = enableSE_low = 1;
- }
-
- if ((max_target > 8) &&
- ((external_present == 0) || (internal68_present == 0)) )
- {
- brddat |= BRDDAT6;
- p->flags |= AHC_TERM_ENB_SE_HIGH;
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk(KERN_INFO "(scsi%d) SE High byte termination Enabled\n",
- p->host_no);
- }
-
- if ( ((internal50_present ? 1 : 0) +
- (internal68_present ? 1 : 0) +
- (external_present ? 1 : 0)) <= 1 )
- {
- sxfrctl1 |= STPWEN;
- p->flags |= AHC_TERM_ENB_SE_LOW;
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk(KERN_INFO "(scsi%d) SE Low byte termination Enabled\n",
- p->host_no);
- }
- }
- else /* p->adapter_control & CFAUTOTERM */
- {
- if (p->adapter_control & CFSTERM)
- {
- sxfrctl1 |= STPWEN;
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk(KERN_INFO "(scsi%d) SE Low byte termination Enabled\n",
- p->host_no);
- }
-
- if (p->adapter_control & CFWSTERM)
- {
- brddat |= BRDDAT6;
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk(KERN_INFO "(scsi%d) SE High byte termination Enabled\n",
- p->host_no);
- }
- }
- }
-
- aic_outb(p, sxfrctl1, SXFRCTL1);
- write_brdctl(p, brddat);
- release_seeprom(p);
- }
-}
-
-/*+F*************************************************************************
- * Function:
- * detect_maxscb
- *
- * Description:
- * Detects the maximum number of SCBs for the controller and returns
- * the count and a mask in p (p->maxscbs, p->qcntmask).
- *-F*************************************************************************/
-static void
-detect_maxscb(struct aic7xxx_host *p)
-{
- int i;
-
- /*
- * It's possible that we've already done this for multichannel
- * adapters.
- */
- if (p->scb_data->maxhscbs == 0)
- {
- /*
- * We haven't initialized the SCB settings yet. Walk the SCBs to
- * determince how many there are.
- */
- aic_outb(p, 0, FREE_SCBH);
-
- for (i = 0; i < AIC7XXX_MAXSCB; i++)
- {
- aic_outb(p, i, SCBPTR);
- aic_outb(p, i, SCB_CONTROL);
- if (aic_inb(p, SCB_CONTROL) != i)
- break;
- aic_outb(p, 0, SCBPTR);
- if (aic_inb(p, SCB_CONTROL) != 0)
- break;
-
- aic_outb(p, i, SCBPTR);
- aic_outb(p, 0, SCB_CONTROL); /* Clear the control byte. */
- aic_outb(p, i + 1, SCB_NEXT); /* Set the next pointer. */
- aic_outb(p, SCB_LIST_NULL, SCB_TAG); /* Make the tag invalid. */
- aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS); /* no busy untagged */
- aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS+1);/* targets active yet */
- aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS+2);
- aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS+3);
- }
-
- /* Make sure the last SCB terminates the free list. */
- aic_outb(p, i - 1, SCBPTR);
- aic_outb(p, SCB_LIST_NULL, SCB_NEXT);
-
- /* Ensure we clear the first (0) SCBs control byte. */
- aic_outb(p, 0, SCBPTR);
- aic_outb(p, 0, SCB_CONTROL);
-
- p->scb_data->maxhscbs = i;
- /*
- * Use direct indexing instead for speed
- */
- if ( i == AIC7XXX_MAXSCB )
- p->flags &= ~AHC_PAGESCBS;
- }
-
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_register
- *
- * Description:
- * Register a Adaptec aic7xxx chip SCSI controller with the kernel.
- *-F*************************************************************************/
-static int
-aic7xxx_register(struct scsi_host_template *template, struct aic7xxx_host *p,
- int reset_delay)
-{
- int i, result;
- int max_targets;
- int found = 1;
- unsigned char term, scsi_conf;
- struct Scsi_Host *host;
-
- host = p->host;
-
- p->scb_data->maxscbs = AIC7XXX_MAXSCB;
- host->can_queue = AIC7XXX_MAXSCB;
- host->cmd_per_lun = 3;
- host->sg_tablesize = AIC7XXX_MAX_SG;
- host->this_id = p->scsi_id;
- host->io_port = p->base;
- host->n_io_port = 0xFF;
- host->base = p->mbase;
- host->irq = p->irq;
- if (p->features & AHC_WIDE)
- {
- host->max_id = 16;
- }
- if (p->features & AHC_TWIN)
- {
- host->max_channel = 1;
- }
-
- p->host = host;
- p->host_no = host->host_no;
- host->unique_id = p->instance;
- p->isr_count = 0;
- p->next = NULL;
- p->completeq.head = NULL;
- p->completeq.tail = NULL;
- scbq_init(&p->scb_data->free_scbs);
- scbq_init(&p->waiting_scbs);
- INIT_LIST_HEAD(&p->aic_devs);
-
- /*
- * We currently have no commands of any type
- */
- p->qinfifonext = 0;
- p->qoutfifonext = 0;
-
- printk(KERN_INFO "(scsi%d) <%s> found at ", p->host_no,
- board_names[p->board_name_index]);
- switch(p->chip)
- {
- case (AHC_AIC7770|AHC_EISA):
- printk("EISA slot %d\n", p->pci_device_fn);
- break;
- case (AHC_AIC7770|AHC_VL):
- printk("VLB slot %d\n", p->pci_device_fn);
- break;
- default:
- printk("PCI %d/%d/%d\n", p->pci_bus, PCI_SLOT(p->pci_device_fn),
- PCI_FUNC(p->pci_device_fn));
- break;
- }
- if (p->features & AHC_TWIN)
- {
- printk(KERN_INFO "(scsi%d) Twin Channel, A SCSI ID %d, B SCSI ID %d, ",
- p->host_no, p->scsi_id, p->scsi_id_b);
- }
- else
- {
- char *channel;
-
- channel = "";
-
- if ((p->flags & AHC_MULTI_CHANNEL) != 0)
- {
- channel = " A";
-
- if ( (p->flags & (AHC_CHNLB|AHC_CHNLC)) != 0 )
- {
- channel = (p->flags & AHC_CHNLB) ? " B" : " C";
- }
- }
- if (p->features & AHC_WIDE)
- {
- printk(KERN_INFO "(scsi%d) Wide ", p->host_no);
- }
- else
- {
- printk(KERN_INFO "(scsi%d) Narrow ", p->host_no);
- }
- printk("Channel%s, SCSI ID=%d, ", channel, p->scsi_id);
- }
- aic_outb(p, 0, SEQ_FLAGS);
-
- detect_maxscb(p);
-
- printk("%d/%d SCBs\n", p->scb_data->maxhscbs, p->scb_data->maxscbs);
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- {
- printk(KERN_INFO "(scsi%d) BIOS %sabled, IO Port 0x%lx, IRQ %d\n",
- p->host_no, (p->flags & AHC_BIOS_ENABLED) ? "en" : "dis",
- p->base, p->irq);
- printk(KERN_INFO "(scsi%d) IO Memory at 0x%lx, MMAP Memory at %p\n",
- p->host_no, p->mbase, p->maddr);
- }
-
-#ifdef CONFIG_PCI
- /*
- * Now that we know our instance number, we can set the flags we need to
- * force termination if need be.
- */
- if (aic7xxx_stpwlev != -1)
- {
- /*
- * This option only applies to PCI controllers.
- */
- if ( (p->chip & ~AHC_CHIPID_MASK) == AHC_PCI)
- {
- unsigned char devconfig;
-
- pci_read_config_byte(p->pdev, DEVCONFIG, &devconfig);
- if ( (aic7xxx_stpwlev >> p->instance) & 0x01 )
- {
- devconfig |= STPWLEVEL;
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk("(scsi%d) Force setting STPWLEVEL bit\n", p->host_no);
- }
- else
- {
- devconfig &= ~STPWLEVEL;
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk("(scsi%d) Force clearing STPWLEVEL bit\n", p->host_no);
- }
- pci_write_config_byte(p->pdev, DEVCONFIG, devconfig);
- }
- }
-#endif
-
- /*
- * That took care of devconfig and stpwlev, now for the actual termination
- * settings.
- */
- if (aic7xxx_override_term != -1)
- {
- /*
- * Again, this only applies to PCI controllers. We don't have problems
- * with the termination on 274x controllers to the best of my knowledge.
- */
- if ( (p->chip & ~AHC_CHIPID_MASK) == AHC_PCI)
- {
- unsigned char term_override;
-
- term_override = ( (aic7xxx_override_term >> (p->instance * 4)) & 0x0f);
- p->adapter_control &=
- ~(CFSTERM|CFWSTERM|CFLVDSTERM|CFAUTOTERM|CFSEAUTOTERM);
- if ( (p->features & AHC_ULTRA2) && (term_override & 0x0c) )
- {
- p->adapter_control |= CFLVDSTERM;
- }
- if (term_override & 0x02)
- {
- p->adapter_control |= CFWSTERM;
- }
- if (term_override & 0x01)
- {
- p->adapter_control |= CFSTERM;
- }
- }
- }
-
- if ( (p->flags & AHC_SEEPROM_FOUND) || (aic7xxx_override_term != -1) )
- {
- if (p->features & AHC_SPIOCAP)
- {
- if ( aic_inb(p, SPIOCAP) & SSPIOCPS )
- /*
- * Update the settings in sxfrctl1 to match the termination
- * settings.
- */
- configure_termination(p);
- }
- else if ((p->chip & AHC_CHIPID_MASK) >= AHC_AIC7870)
- {
- configure_termination(p);
- }
- }
-
- /*
- * Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels
- */
- if (p->features & AHC_TWIN)
- {
- /* Select channel B */
- aic_outb(p, aic_inb(p, SBLKCTL) | SELBUSB, SBLKCTL);
-
- if ((p->flags & AHC_SEEPROM_FOUND) || (aic7xxx_override_term != -1))
- term = (aic_inb(p, SXFRCTL1) & STPWEN);
- else
- term = ((p->flags & AHC_TERM_ENB_B) ? STPWEN : 0);
-
- aic_outb(p, p->scsi_id_b, SCSIID);
- scsi_conf = aic_inb(p, SCSICONF + 1);
- aic_outb(p, DFON | SPIOEN, SXFRCTL0);
- aic_outb(p, (scsi_conf & ENSPCHK) | aic7xxx_seltime | term |
- ENSTIMER | ACTNEGEN, SXFRCTL1);
- aic_outb(p, 0, SIMODE0);
- aic_outb(p, ENSELTIMO | ENSCSIRST | ENSCSIPERR, SIMODE1);
- aic_outb(p, 0, SCSIRATE);
-
- /* Select channel A */
- aic_outb(p, aic_inb(p, SBLKCTL) & ~SELBUSB, SBLKCTL);
- }
-
- if (p->features & AHC_ULTRA2)
- {
- aic_outb(p, p->scsi_id, SCSIID_ULTRA2);
- }
- else
- {
- aic_outb(p, p->scsi_id, SCSIID);
- }
- if ((p->flags & AHC_SEEPROM_FOUND) || (aic7xxx_override_term != -1))
- term = (aic_inb(p, SXFRCTL1) & STPWEN);
- else
- term = ((p->flags & (AHC_TERM_ENB_A|AHC_TERM_ENB_LVD)) ? STPWEN : 0);
- scsi_conf = aic_inb(p, SCSICONF);
- aic_outb(p, DFON | SPIOEN, SXFRCTL0);
- aic_outb(p, (scsi_conf & ENSPCHK) | aic7xxx_seltime | term |
- ENSTIMER | ACTNEGEN, SXFRCTL1);
- aic_outb(p, 0, SIMODE0);
- /*
- * If we are a cardbus adapter then don't enable SCSI reset detection.
- * We shouldn't likely be sharing SCSI busses with someone else, and
- * if we don't have a cable currently plugged into the controller then
- * we won't have a power source for the SCSI termination, which means
- * we'll see infinite incoming bus resets.
- */
- if(p->flags & AHC_NO_STPWEN)
- aic_outb(p, ENSELTIMO | ENSCSIPERR, SIMODE1);
- else
- aic_outb(p, ENSELTIMO | ENSCSIRST | ENSCSIPERR, SIMODE1);
- aic_outb(p, 0, SCSIRATE);
- if ( p->features & AHC_ULTRA2)
- aic_outb(p, 0, SCSIOFFSET);
-
- /*
- * Look at the information that board initialization or the board
- * BIOS has left us. In the lower four bits of each target's
- * scratch space any value other than 0 indicates that we should
- * initiate synchronous transfers. If it's zero, the user or the
- * BIOS has decided to disable synchronous negotiation to that
- * target so we don't activate the needsdtr flag.
- */
- if ((p->features & (AHC_TWIN|AHC_WIDE)) == 0)
- {
- max_targets = 8;
- }
- else
- {
- max_targets = 16;
- }
-
- if (!(aic7xxx_no_reset))
- {
- /*
- * If we reset the bus, then clear the transfer settings, else leave
- * them be.
- */
- aic_outb(p, 0, ULTRA_ENB);
- aic_outb(p, 0, ULTRA_ENB + 1);
- p->ultraenb = 0;
- }
-
- /*
- * Allocate enough hardware scbs to handle the maximum number of
- * concurrent transactions we can have. We have to make sure that
- * the allocated memory is contiguous memory. The Linux kmalloc
- * routine should only allocate contiguous memory, but note that
- * this could be a problem if kmalloc() is changed.
- */
- {
- size_t array_size;
- unsigned int hscb_physaddr;
-
- array_size = p->scb_data->maxscbs * sizeof(struct aic7xxx_hwscb);
- if (p->scb_data->hscbs == NULL)
- {
- /* pci_alloc_consistent enforces the alignment already and
- * clears the area as well.
- */
- p->scb_data->hscbs = pci_alloc_consistent(p->pdev, array_size,
- &p->scb_data->hscbs_dma);
- /* We have to use pci_free_consistent, not kfree */
- p->scb_data->hscb_kmalloc_ptr = NULL;
- p->scb_data->hscbs_dma_len = array_size;
- }
- if (p->scb_data->hscbs == NULL)
- {
- printk("(scsi%d) Unable to allocate hardware SCB array; "
- "failing detection.\n", p->host_no);
- aic_outb(p, 0, SIMODE1);
- p->irq = 0;
- return(0);
- }
-
- hscb_physaddr = p->scb_data->hscbs_dma;
- aic_outb(p, hscb_physaddr & 0xFF, HSCB_ADDR);
- aic_outb(p, (hscb_physaddr >> 8) & 0xFF, HSCB_ADDR + 1);
- aic_outb(p, (hscb_physaddr >> 16) & 0xFF, HSCB_ADDR + 2);
- aic_outb(p, (hscb_physaddr >> 24) & 0xFF, HSCB_ADDR + 3);
-
- /* Set up the fifo areas at the same time */
- p->untagged_scbs = pci_alloc_consistent(p->pdev, 3*256, &p->fifo_dma);
- if (p->untagged_scbs == NULL)
- {
- printk("(scsi%d) Unable to allocate hardware FIFO arrays; "
- "failing detection.\n", p->host_no);
- p->irq = 0;
- return(0);
- }
-
- p->qoutfifo = p->untagged_scbs + 256;
- p->qinfifo = p->qoutfifo + 256;
- for (i = 0; i < 256; i++)
- {
- p->untagged_scbs[i] = SCB_LIST_NULL;
- p->qinfifo[i] = SCB_LIST_NULL;
- p->qoutfifo[i] = SCB_LIST_NULL;
- }
-
- hscb_physaddr = p->fifo_dma;
- aic_outb(p, hscb_physaddr & 0xFF, SCBID_ADDR);
- aic_outb(p, (hscb_physaddr >> 8) & 0xFF, SCBID_ADDR + 1);
- aic_outb(p, (hscb_physaddr >> 16) & 0xFF, SCBID_ADDR + 2);
- aic_outb(p, (hscb_physaddr >> 24) & 0xFF, SCBID_ADDR + 3);
- }
-
- /* The Q-FIFOs we just set up are all empty */
- aic_outb(p, 0, QINPOS);
- aic_outb(p, 0, KERNEL_QINPOS);
- aic_outb(p, 0, QOUTPOS);
-
- if(p->features & AHC_QUEUE_REGS)
- {
- aic_outb(p, SCB_QSIZE_256, QOFF_CTLSTA);
- aic_outb(p, 0, SDSCB_QOFF);
- aic_outb(p, 0, SNSCB_QOFF);
- aic_outb(p, 0, HNSCB_QOFF);
- }
-
- /*
- * We don't have any waiting selections or disconnected SCBs.
- */
- aic_outb(p, SCB_LIST_NULL, WAITING_SCBH);
- aic_outb(p, SCB_LIST_NULL, DISCONNECTED_SCBH);
-
- /*
- * Message out buffer starts empty
- */
- aic_outb(p, MSG_NOOP, MSG_OUT);
- aic_outb(p, MSG_NOOP, LAST_MSG);
-
- /*
- * Set all the other asundry items that haven't been set yet.
- * This includes just dumping init values to a lot of registers simply
- * to make sure they've been touched and are ready for use parity wise
- * speaking.
- */
- aic_outb(p, 0, TMODE_CMDADDR);
- aic_outb(p, 0, TMODE_CMDADDR + 1);
- aic_outb(p, 0, TMODE_CMDADDR + 2);
- aic_outb(p, 0, TMODE_CMDADDR + 3);
- aic_outb(p, 0, TMODE_CMDADDR_NEXT);
-
- /*
- * Link us into the list of valid hosts
- */
- p->next = first_aic7xxx;
- first_aic7xxx = p;
-
- /*
- * Allocate the first set of scbs for this controller. This is to stream-
- * line code elsewhere in the driver. If we have to check for the existence
- * of scbs in certain code sections, it slows things down. However, as
- * soon as we register the IRQ for this card, we could get an interrupt that
- * includes possibly the SCSI_RSTI interrupt. If we catch that interrupt
- * then we are likely to segfault if we don't have at least one chunk of
- * SCBs allocated or add checks all through the reset code to make sure
- * that the SCBs have been allocated which is an invalid running condition
- * and therefore I think it's preferable to simply pre-allocate the first
- * chunk of SCBs.
- */
- aic7xxx_allocate_scb(p);
-
- /*
- * Load the sequencer program, then re-enable the board -
- * resetting the AIC-7770 disables it, leaving the lights
- * on with nobody home.
- */
- aic7xxx_loadseq(p);
-
- /*
- * Make sure the AUTOFLUSHDIS bit is *not* set in the SBLKCTL register
- */
- aic_outb(p, aic_inb(p, SBLKCTL) & ~AUTOFLUSHDIS, SBLKCTL);
-
- if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 )
- {
- aic_outb(p, ENABLE, BCTL); /* Enable the boards BUS drivers. */
- }
-
- if ( !(aic7xxx_no_reset) )
- {
- if (p->features & AHC_TWIN)
- {
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk(KERN_INFO "(scsi%d) Resetting channel B\n", p->host_no);
- aic_outb(p, aic_inb(p, SBLKCTL) | SELBUSB, SBLKCTL);
- aic7xxx_reset_current_bus(p);
- aic_outb(p, aic_inb(p, SBLKCTL) & ~SELBUSB, SBLKCTL);
- }
- /* Reset SCSI bus A. */
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- { /* In case we are a 3940, 3985, or 7895, print the right channel */
- char *channel = "";
- if (p->flags & AHC_MULTI_CHANNEL)
- {
- channel = " A";
- if (p->flags & (AHC_CHNLB|AHC_CHNLC))
- channel = (p->flags & AHC_CHNLB) ? " B" : " C";
- }
- printk(KERN_INFO "(scsi%d) Resetting channel%s\n", p->host_no, channel);
- }
-
- aic7xxx_reset_current_bus(p);
-
- }
- else
- {
- if (!reset_delay)
- {
- printk(KERN_INFO "(scsi%d) Not resetting SCSI bus. Note: Don't use "
- "the no_reset\n", p->host_no);
- printk(KERN_INFO "(scsi%d) option unless you have a verifiable need "
- "for it.\n", p->host_no);
- }
- }
-
- /*
- * Register IRQ with the kernel. Only allow sharing IRQs with
- * PCI devices.
- */
- if (!(p->chip & AHC_PCI))
- {
- result = (request_irq(p->irq, do_aic7xxx_isr, 0, "aic7xxx", p));
- }
- else
- {
- result = (request_irq(p->irq, do_aic7xxx_isr, IRQF_SHARED,
- "aic7xxx", p));
- if (result < 0)
- {
- result = (request_irq(p->irq, do_aic7xxx_isr, IRQF_DISABLED | IRQF_SHARED,
- "aic7xxx", p));
- }
- }
- if (result < 0)
- {
- printk(KERN_WARNING "(scsi%d) Couldn't register IRQ %d, ignoring "
- "controller.\n", p->host_no, p->irq);
- aic_outb(p, 0, SIMODE1);
- p->irq = 0;
- return (0);
- }
-
- if(aic_inb(p, INTSTAT) & INT_PEND)
- printk(INFO_LEAD "spurious interrupt during configuration, cleared.\n",
- p->host_no, -1, -1 , -1);
- aic7xxx_clear_intstat(p);
-
- unpause_sequencer(p, /* unpause_always */ TRUE);
-
- return (found);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_chip_reset
- *
- * Description:
- * Perform a chip reset on the aic7xxx SCSI controller. The controller
- * is paused upon return.
- *-F*************************************************************************/
-static int
-aic7xxx_chip_reset(struct aic7xxx_host *p)
-{
- unsigned char sblkctl;
- int wait;
-
- /*
- * For some 274x boards, we must clear the CHIPRST bit and pause
- * the sequencer. For some reason, this makes the driver work.
- */
- aic_outb(p, PAUSE | CHIPRST, HCNTRL);
-
- /*
- * In the future, we may call this function as a last resort for
- * error handling. Let's be nice and not do any unnecessary delays.
- */
- wait = 1000; /* 1 msec (1000 * 1 msec) */
- while (--wait && !(aic_inb(p, HCNTRL) & CHIPRSTACK))
- {
- udelay(1); /* 1 usec */
- }
-
- pause_sequencer(p);
-
- sblkctl = aic_inb(p, SBLKCTL) & (SELBUSB|SELWIDE);
- if (p->chip & AHC_PCI)
- sblkctl &= ~SELBUSB;
- switch( sblkctl )
- {
- case 0: /* normal narrow card */
- break;
- case 2: /* Wide card */
- p->features |= AHC_WIDE;
- break;
- case 8: /* Twin card */
- p->features |= AHC_TWIN;
- p->flags |= AHC_MULTI_CHANNEL;
- break;
- default: /* hmmm...we don't know what this is */
- printk(KERN_WARNING "aic7xxx: Unsupported adapter type %d, ignoring.\n",
- aic_inb(p, SBLKCTL) & 0x0a);
- return(-1);
- }
- return(0);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_alloc
- *
- * Description:
- * Allocate and initialize a host structure. Returns NULL upon error
- * and a pointer to a aic7xxx_host struct upon success.
- *-F*************************************************************************/
-static struct aic7xxx_host *
-aic7xxx_alloc(struct scsi_host_template *sht, struct aic7xxx_host *temp)
-{
- struct aic7xxx_host *p = NULL;
- struct Scsi_Host *host;
-
- /*
- * Allocate a storage area by registering us with the mid-level
- * SCSI layer.
- */
- host = scsi_register(sht, sizeof(struct aic7xxx_host));
-
- if (host != NULL)
- {
- p = (struct aic7xxx_host *) host->hostdata;
- memset(p, 0, sizeof(struct aic7xxx_host));
- *p = *temp;
- p->host = host;
-
- p->scb_data = kzalloc(sizeof(scb_data_type), GFP_ATOMIC);
- if (p->scb_data)
- {
- scbq_init (&p->scb_data->free_scbs);
- }
- else
- {
- /*
- * For some reason we don't have enough memory. Free the
- * allocated memory for the aic7xxx_host struct, and return NULL.
- */
- release_region(p->base, MAXREG - MINREG);
- scsi_unregister(host);
- return(NULL);
- }
- p->host_no = host->host_no;
- }
- return (p);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_free
- *
- * Description:
- * Frees and releases all resources associated with an instance of
- * the driver (struct aic7xxx_host *).
- *-F*************************************************************************/
-static void
-aic7xxx_free(struct aic7xxx_host *p)
-{
- int i;
-
- /*
- * Free the allocated hardware SCB space.
- */
- if (p->scb_data != NULL)
- {
- struct aic7xxx_scb_dma *scb_dma = NULL;
- if (p->scb_data->hscbs != NULL)
- {
- pci_free_consistent(p->pdev, p->scb_data->hscbs_dma_len,
- p->scb_data->hscbs, p->scb_data->hscbs_dma);
- p->scb_data->hscbs = p->scb_data->hscb_kmalloc_ptr = NULL;
- }
- /*
- * Free the driver SCBs. These were allocated on an as-need
- * basis. We allocated these in groups depending on how many
- * we could fit into a given amount of RAM. The tail SCB for
- * these allocations has a pointer to the alloced area.
- */
- for (i = 0; i < p->scb_data->numscbs; i++)
- {
- if (p->scb_data->scb_array[i]->scb_dma != scb_dma)
- {
- scb_dma = p->scb_data->scb_array[i]->scb_dma;
- pci_free_consistent(p->pdev, scb_dma->dma_len,
- (void *)((unsigned long)scb_dma->dma_address
- - scb_dma->dma_offset),
- scb_dma->dma_address);
- }
- kfree(p->scb_data->scb_array[i]->kmalloc_ptr);
- p->scb_data->scb_array[i] = NULL;
- }
-
- /*
- * Free the SCB data area.
- */
- kfree(p->scb_data);
- }
-
- pci_free_consistent(p->pdev, 3*256, (void *)p->untagged_scbs, p->fifo_dma);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_load_seeprom
- *
- * Description:
- * Load the seeprom and configure adapter and target settings.
- * Returns 1 if the load was successful and 0 otherwise.
- *-F*************************************************************************/
-static void
-aic7xxx_load_seeprom(struct aic7xxx_host *p, unsigned char *sxfrctl1)
-{
- int have_seeprom = 0;
- int i, max_targets, mask;
- unsigned char scsirate, scsi_conf;
- unsigned short scarray[128];
- struct seeprom_config *sc = (struct seeprom_config *) scarray;
-
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- {
- printk(KERN_INFO "aic7xxx: Loading serial EEPROM...");
- }
- switch (p->chip)
- {
- case (AHC_AIC7770|AHC_EISA): /* None of these adapters have seeproms. */
- if (aic_inb(p, SCSICONF) & TERM_ENB)
- p->flags |= AHC_TERM_ENB_A;
- if ( (p->features & AHC_TWIN) && (aic_inb(p, SCSICONF + 1) & TERM_ENB) )
- p->flags |= AHC_TERM_ENB_B;
- break;
-
- case (AHC_AIC7770|AHC_VL):
- have_seeprom = read_284x_seeprom(p, (struct seeprom_config *) scarray);
- break;
-
- default:
- have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)),
- scarray, p->sc_size, p->sc_type);
- if (!have_seeprom)
- {
- if(p->sc_type == C46)
- have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)),
- scarray, p->sc_size, C56_66);
- else
- have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)),
- scarray, p->sc_size, C46);
- }
- if (!have_seeprom)
- {
- p->sc_size = 128;
- have_seeprom = read_seeprom(p, 4*(p->flags & (AHC_CHNLB|AHC_CHNLC)),
- scarray, p->sc_size, p->sc_type);
- if (!have_seeprom)
- {
- if(p->sc_type == C46)
- have_seeprom = read_seeprom(p, 4*(p->flags & (AHC_CHNLB|AHC_CHNLC)),
- scarray, p->sc_size, C56_66);
- else
- have_seeprom = read_seeprom(p, 4*(p->flags & (AHC_CHNLB|AHC_CHNLC)),
- scarray, p->sc_size, C46);
- }
- }
- break;
- }
-
- if (!have_seeprom)
- {
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- {
- printk("\naic7xxx: No SEEPROM available.\n");
- }
- p->flags |= AHC_NEWEEPROM_FMT;
- if (aic_inb(p, SCSISEQ) == 0)
- {
- p->flags |= AHC_USEDEFAULTS;
- p->flags &= ~AHC_BIOS_ENABLED;
- p->scsi_id = p->scsi_id_b = 7;
- *sxfrctl1 |= STPWEN;
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- {
- printk("aic7xxx: Using default values.\n");
- }
- }
- else if (aic7xxx_verbose & VERBOSE_PROBE2)
- {
- printk("aic7xxx: Using leftover BIOS values.\n");
- }
- if ( ((p->chip & ~AHC_CHIPID_MASK) == AHC_PCI) && (*sxfrctl1 & STPWEN) )
- {
- p->flags |= AHC_TERM_ENB_SE_LOW | AHC_TERM_ENB_SE_HIGH;
- sc->adapter_control &= ~CFAUTOTERM;
- sc->adapter_control |= CFSTERM | CFWSTERM | CFLVDSTERM;
- }
- if (aic7xxx_extended)
- p->flags |= (AHC_EXTEND_TRANS_A | AHC_EXTEND_TRANS_B);
- else
- p->flags &= ~(AHC_EXTEND_TRANS_A | AHC_EXTEND_TRANS_B);
- }
- else
- {
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- {
- printk("done\n");
- }
-
- /*
- * Note things in our flags
- */
- p->flags |= AHC_SEEPROM_FOUND;
-
- /*
- * Update the settings in sxfrctl1 to match the termination settings.
- */
- *sxfrctl1 = 0;
-
- /*
- * Get our SCSI ID from the SEEPROM setting...
- */
- p->scsi_id = (sc->brtime_id & CFSCSIID);
-
- /*
- * First process the settings that are different between the VLB
- * and PCI adapter seeproms.
- */
- if ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7770)
- {
- /* VLB adapter seeproms */
- if (sc->bios_control & CF284XEXTEND)
- p->flags |= AHC_EXTEND_TRANS_A;
-
- if (sc->adapter_control & CF284XSTERM)
- {
- *sxfrctl1 |= STPWEN;
- p->flags |= AHC_TERM_ENB_SE_LOW | AHC_TERM_ENB_SE_HIGH;
- }
- }
- else
- {
- /* PCI adapter seeproms */
- if (sc->bios_control & CFEXTEND)
- p->flags |= AHC_EXTEND_TRANS_A;
- if (sc->bios_control & CFBIOSEN)
- p->flags |= AHC_BIOS_ENABLED;
- else
- p->flags &= ~AHC_BIOS_ENABLED;
-
- if (sc->adapter_control & CFSTERM)
- {
- *sxfrctl1 |= STPWEN;
- p->flags |= AHC_TERM_ENB_SE_LOW | AHC_TERM_ENB_SE_HIGH;
- }
- }
- memcpy(&p->sc, sc, sizeof(struct seeprom_config));
- }
-
- p->discenable = 0;
-
- /*
- * Limit to 16 targets just in case. The 2842 for one is known to
- * blow the max_targets setting, future cards might also.
- */
- max_targets = ((p->features & (AHC_TWIN | AHC_WIDE)) ? 16 : 8);
-
- if (have_seeprom)
- {
- for (i = 0; i < max_targets; i++)
- {
- if( ((p->features & AHC_ULTRA) &&
- !(sc->adapter_control & CFULTRAEN) &&
- (sc->device_flags[i] & CFSYNCHISULTRA)) ||
- (sc->device_flags[i] & CFNEWULTRAFORMAT) )
- {
- p->flags |= AHC_NEWEEPROM_FMT;
- break;
- }
- }
- }
-
- for (i = 0; i < max_targets; i++)
- {
- mask = (0x01 << i);
- if (!have_seeprom)
- {
- if (aic_inb(p, SCSISEQ) != 0)
- {
- /*
- * OK...the BIOS set things up and left behind the settings we need.
- * Just make our sc->device_flags[i] entry match what the card has
- * set for this device.
- */
- p->discenable =
- ~(aic_inb(p, DISC_DSB) | (aic_inb(p, DISC_DSB + 1) << 8) );
- p->ultraenb =
- (aic_inb(p, ULTRA_ENB) | (aic_inb(p, ULTRA_ENB + 1) << 8) );
- sc->device_flags[i] = (p->discenable & mask) ? CFDISC : 0;
- if (aic_inb(p, TARG_SCSIRATE + i) & WIDEXFER)
- sc->device_flags[i] |= CFWIDEB;
- if (p->features & AHC_ULTRA2)
- {
- if (aic_inb(p, TARG_OFFSET + i))
- {
- sc->device_flags[i] |= CFSYNCH;
- sc->device_flags[i] |= (aic_inb(p, TARG_SCSIRATE + i) & 0x07);
- if ( (aic_inb(p, TARG_SCSIRATE + i) & 0x18) == 0x18 )
- sc->device_flags[i] |= CFSYNCHISULTRA;
- }
- }
- else
- {
- if (aic_inb(p, TARG_SCSIRATE + i) & ~WIDEXFER)
- {
- sc->device_flags[i] |= CFSYNCH;
- if (p->features & AHC_ULTRA)
- sc->device_flags[i] |= ((p->ultraenb & mask) ?
- CFSYNCHISULTRA : 0);
- }
- }
- }
- else
- {
- /*
- * Assume the BIOS has NOT been run on this card and nothing between
- * the card and the devices is configured yet.
- */
- sc->device_flags[i] = CFDISC;
- if (p->features & AHC_WIDE)
- sc->device_flags[i] |= CFWIDEB;
- if (p->features & AHC_ULTRA3)
- sc->device_flags[i] |= 2;
- else if (p->features & AHC_ULTRA2)
- sc->device_flags[i] |= 3;
- else if (p->features & AHC_ULTRA)
- sc->device_flags[i] |= CFSYNCHISULTRA;
- sc->device_flags[i] |= CFSYNCH;
- aic_outb(p, 0, TARG_SCSIRATE + i);
- if (p->features & AHC_ULTRA2)
- aic_outb(p, 0, TARG_OFFSET + i);
- }
- }
- if (sc->device_flags[i] & CFDISC)
- {
- p->discenable |= mask;
- }
- if (p->flags & AHC_NEWEEPROM_FMT)
- {
- if ( !(p->features & AHC_ULTRA2) )
- {
- /*
- * I know of two different Ultra BIOSes that do this differently.
- * One on the Gigabyte 6BXU mb that wants flags[i] & CFXFER to
- * be == to 0x03 and SYNCHISULTRA to be true to mean 40MByte/s
- * while on the IBM Netfinity 5000 they want the same thing
- * to be something else, while flags[i] & CFXFER == 0x03 and
- * SYNCHISULTRA false should be 40MByte/s. So, we set both to
- * 40MByte/s and the lower speeds be damned. People will have
- * to select around the conversely mapped lower speeds in order
- * to select lower speeds on these boards.
- */
- if ( (sc->device_flags[i] & CFNEWULTRAFORMAT) &&
- ((sc->device_flags[i] & CFXFER) == 0x03) )
- {
- sc->device_flags[i] &= ~CFXFER;
- sc->device_flags[i] |= CFSYNCHISULTRA;
- }
- if (sc->device_flags[i] & CFSYNCHISULTRA)
- {
- p->ultraenb |= mask;
- }
- }
- else if ( !(sc->device_flags[i] & CFNEWULTRAFORMAT) &&
- (p->features & AHC_ULTRA2) &&
- (sc->device_flags[i] & CFSYNCHISULTRA) )
- {
- p->ultraenb |= mask;
- }
- }
- else if (sc->adapter_control & CFULTRAEN)
- {
- p->ultraenb |= mask;
- }
- if ( (sc->device_flags[i] & CFSYNCH) == 0)
- {
- sc->device_flags[i] &= ~CFXFER;
- p->ultraenb &= ~mask;
- p->user[i].offset = 0;
- p->user[i].period = 0;
- p->user[i].options = 0;
- }
- else
- {
- if (p->features & AHC_ULTRA3)
- {
- p->user[i].offset = MAX_OFFSET_ULTRA2;
- if( (sc->device_flags[i] & CFXFER) < 0x03 )
- {
- scsirate = (sc->device_flags[i] & CFXFER);
- p->user[i].options = MSG_EXT_PPR_OPTION_DT_CRC;
- }
- else
- {
- scsirate = (sc->device_flags[i] & CFXFER) |
- ((p->ultraenb & mask) ? 0x18 : 0x10);
- p->user[i].options = 0;
- }
- p->user[i].period = aic7xxx_find_period(p, scsirate,
- AHC_SYNCRATE_ULTRA3);
- }
- else if (p->features & AHC_ULTRA2)
- {
- p->user[i].offset = MAX_OFFSET_ULTRA2;
- scsirate = (sc->device_flags[i] & CFXFER) |
- ((p->ultraenb & mask) ? 0x18 : 0x10);
- p->user[i].options = 0;
- p->user[i].period = aic7xxx_find_period(p, scsirate,
- AHC_SYNCRATE_ULTRA2);
- }
- else
- {
- scsirate = (sc->device_flags[i] & CFXFER) << 4;
- p->user[i].options = 0;
- p->user[i].offset = MAX_OFFSET_8BIT;
- if (p->features & AHC_ULTRA)
- {
- short ultraenb;
- ultraenb = aic_inb(p, ULTRA_ENB) |
- (aic_inb(p, ULTRA_ENB + 1) << 8);
- p->user[i].period = aic7xxx_find_period(p, scsirate,
- (p->ultraenb & mask) ?
- AHC_SYNCRATE_ULTRA :
- AHC_SYNCRATE_FAST);
- }
- else
- p->user[i].period = aic7xxx_find_period(p, scsirate,
- AHC_SYNCRATE_FAST);
- }
- }
- if ( (sc->device_flags[i] & CFWIDEB) && (p->features & AHC_WIDE) )
- {
- p->user[i].width = MSG_EXT_WDTR_BUS_16_BIT;
- }
- else
- {
- p->user[i].width = MSG_EXT_WDTR_BUS_8_BIT;
- }
- }
- aic_outb(p, ~(p->discenable & 0xFF), DISC_DSB);
- aic_outb(p, ~((p->discenable >> 8) & 0xFF), DISC_DSB + 1);
-
- /*
- * We set the p->ultraenb from the SEEPROM to begin with, but now we make
- * it match what is already down in the card. If we are doing a reset
- * on the card then this will get put back to a default state anyway.
- * This allows us to not have to pre-emptively negotiate when using the
- * no_reset option.
- */
- if (p->features & AHC_ULTRA)
- p->ultraenb = aic_inb(p, ULTRA_ENB) | (aic_inb(p, ULTRA_ENB + 1) << 8);
-
-
- scsi_conf = (p->scsi_id & HSCSIID);
-
- if(have_seeprom)
- {
- p->adapter_control = sc->adapter_control;
- p->bios_control = sc->bios_control;
-
- switch (p->chip & AHC_CHIPID_MASK)
- {
- case AHC_AIC7895:
- case AHC_AIC7896:
- case AHC_AIC7899:
- if (p->adapter_control & CFBPRIMARY)
- p->flags |= AHC_CHANNEL_B_PRIMARY;
- default:
- break;
- }
-
- if (sc->adapter_control & CFSPARITY)
- scsi_conf |= ENSPCHK;
- }
- else
- {
- scsi_conf |= ENSPCHK | RESET_SCSI;
- }
-
- /*
- * Only set the SCSICONF and SCSICONF + 1 registers if we are a PCI card.
- * The 2842 and 2742 cards already have these registers set and we don't
- * want to muck with them since we don't set all the bits they do.
- */
- if ( (p->chip & ~AHC_CHIPID_MASK) == AHC_PCI )
- {
- /* Set the host ID */
- aic_outb(p, scsi_conf, SCSICONF);
- /* In case we are a wide card */
- aic_outb(p, p->scsi_id, SCSICONF + 1);
- }
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_configure_bugs
- *
- * Description:
- * Take the card passed in and set the appropriate bug flags based upon
- * the card model. Also make any changes needed to device registers or
- * PCI registers while we are here.
- *-F*************************************************************************/
-static void
-aic7xxx_configure_bugs(struct aic7xxx_host *p)
-{
- unsigned short tmp_word;
-
- switch(p->chip & AHC_CHIPID_MASK)
- {
- case AHC_AIC7860:
- p->bugs |= AHC_BUG_PCI_2_1_RETRY;
- /* fall through */
- case AHC_AIC7850:
- case AHC_AIC7870:
- p->bugs |= AHC_BUG_TMODE_WIDEODD | AHC_BUG_CACHETHEN | AHC_BUG_PCI_MWI;
- break;
- case AHC_AIC7880:
- p->bugs |= AHC_BUG_TMODE_WIDEODD | AHC_BUG_PCI_2_1_RETRY |
- AHC_BUG_CACHETHEN | AHC_BUG_PCI_MWI;
- break;
- case AHC_AIC7890:
- p->bugs |= AHC_BUG_AUTOFLUSH | AHC_BUG_CACHETHEN;
- break;
- case AHC_AIC7892:
- p->bugs |= AHC_BUG_SCBCHAN_UPLOAD;
- break;
- case AHC_AIC7895:
- p->bugs |= AHC_BUG_TMODE_WIDEODD | AHC_BUG_PCI_2_1_RETRY |
- AHC_BUG_CACHETHEN | AHC_BUG_PCI_MWI;
- break;
- case AHC_AIC7896:
- p->bugs |= AHC_BUG_CACHETHEN_DIS;
- break;
- case AHC_AIC7899:
- p->bugs |= AHC_BUG_SCBCHAN_UPLOAD;
- break;
- default:
- /* Nothing to do */
- break;
- }
-
- /*
- * Now handle the bugs that require PCI register or card register tweaks
- */
- pci_read_config_word(p->pdev, PCI_COMMAND, &tmp_word);
- if(p->bugs & AHC_BUG_PCI_MWI)
- {
- tmp_word &= ~PCI_COMMAND_INVALIDATE;
- }
- else
- {
- tmp_word |= PCI_COMMAND_INVALIDATE;
- }
- pci_write_config_word(p->pdev, PCI_COMMAND, tmp_word);
-
- if(p->bugs & AHC_BUG_CACHETHEN)
- {
- aic_outb(p, aic_inb(p, DSCOMMAND0) & ~CACHETHEN, DSCOMMAND0);
- }
- else if (p->bugs & AHC_BUG_CACHETHEN_DIS)
- {
- aic_outb(p, aic_inb(p, DSCOMMAND0) | CACHETHEN, DSCOMMAND0);
- }
-
- return;
-}
-
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_detect
- *
- * Description:
- * Try to detect and register an Adaptec 7770 or 7870 SCSI controller.
- *
- * XXX - This should really be called aic7xxx_probe(). A sequence of
- * probe(), attach()/detach(), and init() makes more sense than
- * one do-it-all function. This may be useful when (and if) the
- * mid-level SCSI code is overhauled.
- *-F*************************************************************************/
-static int
-aic7xxx_detect(struct scsi_host_template *template)
-{
- struct aic7xxx_host *temp_p = NULL;
- struct aic7xxx_host *current_p = NULL;
- struct aic7xxx_host *list_p = NULL;
- int found = 0;
-#if defined(__i386__) || defined(__alpha__)
- ahc_flag_type flags = 0;
- int type;
-#endif
- unsigned char sxfrctl1;
-#if defined(__i386__) || defined(__alpha__)
- unsigned char hcntrl, hostconf;
- unsigned int slot, base;
-#endif
-
-#ifdef MODULE
- /*
- * If we are called as a module, the aic7xxx pointer may not be null
- * and it would point to our bootup string, just like on the lilo
- * command line. IF not NULL, then process this config string with
- * aic7xxx_setup
- */
- if(aic7xxx)
- aic7xxx_setup(aic7xxx);
-#endif
-
- template->proc_name = "aic7xxx";
- template->sg_tablesize = AIC7XXX_MAX_SG;
-
-
-#ifdef CONFIG_PCI
- /*
- * PCI-bus probe.
- */
- {
- static struct
- {
- unsigned short vendor_id;
- unsigned short device_id;
- ahc_chip chip;
- ahc_flag_type flags;
- ahc_feature features;
- int board_name_index;
- unsigned short seeprom_size;
- unsigned short seeprom_type;
- } const aic_pdevs[] = {
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7810, AHC_NONE,
- AHC_FNONE, AHC_FENONE, 1,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7850, AHC_AIC7850,
- AHC_PAGESCBS, AHC_AIC7850_FE, 5,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7855, AHC_AIC7850,
- AHC_PAGESCBS, AHC_AIC7850_FE, 6,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7821, AHC_AIC7860,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
- AHC_AIC7860_FE, 7,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_3860, AHC_AIC7860,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
- AHC_AIC7860_FE, 7,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_38602, AHC_AIC7860,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
- AHC_AIC7860_FE, 7,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_38602, AHC_AIC7860,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
- AHC_AIC7860_FE, 7,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7860, AHC_AIC7860,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MOTHERBOARD,
- AHC_AIC7860_FE, 7,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7861, AHC_AIC7860,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
- AHC_AIC7860_FE, 8,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7870, AHC_AIC7870,
- AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MOTHERBOARD,
- AHC_AIC7870_FE, 9,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7871, AHC_AIC7870,
- AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7870_FE, 10,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7872, AHC_AIC7870,
- AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
- AHC_AIC7870_FE, 11,
- 32, C56_66 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7873, AHC_AIC7870,
- AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
- AHC_AIC7870_FE, 12,
- 32, C56_66 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7874, AHC_AIC7870,
- AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7870_FE, 13,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7880, AHC_AIC7880,
- AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MOTHERBOARD,
- AHC_AIC7880_FE, 14,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7881, AHC_AIC7880,
- AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 15,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7882, AHC_AIC7880,
- AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
- AHC_AIC7880_FE, 16,
- 32, C56_66 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7883, AHC_AIC7880,
- AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
- AHC_AIC7880_FE, 17,
- 32, C56_66 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7884, AHC_AIC7880,
- AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7885, AHC_AIC7880,
- AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7886, AHC_AIC7880,
- AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7887, AHC_AIC7880,
- AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE | AHC_NEW_AUTOTERM, 19,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7888, AHC_AIC7880,
- AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7895, AHC_AIC7895,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
- AHC_AIC7895_FE, 20,
- 32, C56_66 },
- {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7890, AHC_AIC7890,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
- AHC_AIC7890_FE, 21,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7890B, AHC_AIC7890,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
- AHC_AIC7890_FE, 21,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_2930U2, AHC_AIC7890,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
- AHC_AIC7890_FE, 22,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_2940U2, AHC_AIC7890,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
- AHC_AIC7890_FE, 23,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7896, AHC_AIC7896,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
- AHC_AIC7896_FE, 24,
- 32, C56_66 },
- {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_3940U2, AHC_AIC7896,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
- AHC_AIC7896_FE, 25,
- 32, C56_66 },
- {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_3950U2D, AHC_AIC7896,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
- AHC_AIC7896_FE, 26,
- 32, C56_66 },
- {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_1480A, AHC_AIC7860,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_NO_STPWEN,
- AHC_AIC7860_FE, 27,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892A, AHC_AIC7892,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
- AHC_AIC7892_FE, 28,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892B, AHC_AIC7892,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
- AHC_AIC7892_FE, 28,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892D, AHC_AIC7892,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
- AHC_AIC7892_FE, 28,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892P, AHC_AIC7892,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
- AHC_AIC7892_FE, 28,
- 32, C46 },
- {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899A, AHC_AIC7899,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
- AHC_AIC7899_FE, 29,
- 32, C56_66 },
- {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899B, AHC_AIC7899,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
- AHC_AIC7899_FE, 29,
- 32, C56_66 },
- {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899D, AHC_AIC7899,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
- AHC_AIC7899_FE, 29,
- 32, C56_66 },
- {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899P, AHC_AIC7899,
- AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
- AHC_AIC7899_FE, 29,
- 32, C56_66 },
- };
-
- unsigned short command;
- unsigned int devconfig, i, oldverbose;
- struct pci_dev *pdev = NULL;
-
- for (i = 0; i < ARRAY_SIZE(aic_pdevs); i++)
- {
- pdev = NULL;
- while ((pdev = pci_get_device(aic_pdevs[i].vendor_id,
- aic_pdevs[i].device_id,
- pdev))) {
- if (pci_enable_device(pdev))
- continue;
- if ( i == 0 ) /* We found one, but it's the 7810 RAID cont. */
- {
- if (aic7xxx_verbose & (VERBOSE_PROBE|VERBOSE_PROBE2))
- {
- printk(KERN_INFO "aic7xxx: The 7810 RAID controller is not "
- "supported by\n");
- printk(KERN_INFO " this driver, we are ignoring it.\n");
- }
- }
- else if ( (temp_p = kzalloc(sizeof(struct aic7xxx_host),
- GFP_ATOMIC)) != NULL )
- {
- temp_p->chip = aic_pdevs[i].chip | AHC_PCI;
- temp_p->flags = aic_pdevs[i].flags;
- temp_p->features = aic_pdevs[i].features;
- temp_p->board_name_index = aic_pdevs[i].board_name_index;
- temp_p->sc_size = aic_pdevs[i].seeprom_size;
- temp_p->sc_type = aic_pdevs[i].seeprom_type;
-
- /*
- * Read sundry information from PCI BIOS.
- */
- temp_p->irq = pdev->irq;
- temp_p->pdev = pdev;
- temp_p->pci_bus = pdev->bus->number;
- temp_p->pci_device_fn = pdev->devfn;
- temp_p->base = pci_resource_start(pdev, 0);
- temp_p->mbase = pci_resource_start(pdev, 1);
- current_p = list_p;
- while(current_p && temp_p)
- {
- if ( ((current_p->pci_bus == temp_p->pci_bus) &&
- (current_p->pci_device_fn == temp_p->pci_device_fn)) ||
- (temp_p->base && (current_p->base == temp_p->base)) ||
- (temp_p->mbase && (current_p->mbase == temp_p->mbase)) )
- {
- /* duplicate PCI entry, skip it */
- kfree(temp_p);
- temp_p = NULL;
- continue;
- }
- current_p = current_p->next;
- }
- if(pci_request_regions(temp_p->pdev, "aic7xxx"))
- {
- printk("aic7xxx: <%s> at PCI %d/%d/%d\n",
- board_names[aic_pdevs[i].board_name_index],
- temp_p->pci_bus,
- PCI_SLOT(temp_p->pci_device_fn),
- PCI_FUNC(temp_p->pci_device_fn));
- printk("aic7xxx: I/O ports already in use, ignoring.\n");
- kfree(temp_p);
- continue;
- }
-
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk("aic7xxx: <%s> at PCI %d/%d\n",
- board_names[aic_pdevs[i].board_name_index],
- PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
- pci_read_config_word(pdev, PCI_COMMAND, &command);
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- {
- printk("aic7xxx: Initial PCI_COMMAND value was 0x%x\n",
- (int)command);
- }
-#ifdef AIC7XXX_STRICT_PCI_SETUP
- command |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
- PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
-#else
- command |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
-#endif
- command &= ~PCI_COMMAND_INVALIDATE;
- if (aic7xxx_pci_parity == 0)
- command &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
- pci_write_config_word(pdev, PCI_COMMAND, command);
-#ifdef AIC7XXX_STRICT_PCI_SETUP
- pci_read_config_dword(pdev, DEVCONFIG, &devconfig);
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- {
- printk("aic7xxx: Initial DEVCONFIG value was 0x%x\n", devconfig);
- }
- devconfig |= 0x80000040;
- pci_write_config_dword(pdev, DEVCONFIG, devconfig);
-#endif /* AIC7XXX_STRICT_PCI_SETUP */
-
- temp_p->unpause = INTEN;
- temp_p->pause = temp_p->unpause | PAUSE;
- if ( ((temp_p->base == 0) &&
- (temp_p->mbase == 0)) ||
- (temp_p->irq == 0) )
- {
- printk("aic7xxx: <%s> at PCI %d/%d/%d\n",
- board_names[aic_pdevs[i].board_name_index],
- temp_p->pci_bus,
- PCI_SLOT(temp_p->pci_device_fn),
- PCI_FUNC(temp_p->pci_device_fn));
- printk("aic7xxx: Controller disabled by BIOS, ignoring.\n");
- goto skip_pci_controller;
- }
-
-#ifdef MMAPIO
- if ( !(temp_p->base) || !(temp_p->flags & AHC_MULTI_CHANNEL) ||
- ((temp_p->chip != (AHC_AIC7870 | AHC_PCI)) &&
- (temp_p->chip != (AHC_AIC7880 | AHC_PCI))) )
- {
- temp_p->maddr = ioremap_nocache(temp_p->mbase, 256);
- if(temp_p->maddr)
- {
- /*
- * We need to check the I/O with the MMAPed address. Some machines
- * simply fail to work with MMAPed I/O and certain controllers.
- */
- if(aic_inb(temp_p, HCNTRL) == 0xff)
- {
- /*
- * OK.....we failed our test....go back to programmed I/O
- */
- printk(KERN_INFO "aic7xxx: <%s> at PCI %d/%d/%d\n",
- board_names[aic_pdevs[i].board_name_index],
- temp_p->pci_bus,
- PCI_SLOT(temp_p->pci_device_fn),
- PCI_FUNC(temp_p->pci_device_fn));
- printk(KERN_INFO "aic7xxx: MMAPed I/O failed, reverting to "
- "Programmed I/O.\n");
- iounmap(temp_p->maddr);
- temp_p->maddr = NULL;
- if(temp_p->base == 0)
- {
- printk("aic7xxx: <%s> at PCI %d/%d/%d\n",
- board_names[aic_pdevs[i].board_name_index],
- temp_p->pci_bus,
- PCI_SLOT(temp_p->pci_device_fn),
- PCI_FUNC(temp_p->pci_device_fn));
- printk("aic7xxx: Controller disabled by BIOS, ignoring.\n");
- goto skip_pci_controller;
- }
- }
- }
- }
-#endif
-
- /*
- * We HAVE to make sure the first pause_sequencer() and all other
- * subsequent I/O that isn't PCI config space I/O takes place
- * after the MMAPed I/O region is configured and tested. The
- * problem is the PowerPC architecture that doesn't support
- * programmed I/O at all, so we have to have the MMAP I/O set up
- * for this pause to even work on those machines.
- */
- pause_sequencer(temp_p);
-
- /*
- * Clear out any pending PCI error status messages. Also set
- * verbose to 0 so that we don't emit strange PCI error messages
- * while cleaning out the current status bits.
- */
- oldverbose = aic7xxx_verbose;
- aic7xxx_verbose = 0;
- aic7xxx_pci_intr(temp_p);
- aic7xxx_verbose = oldverbose;
-
- temp_p->bios_address = 0;
-
- /*
- * Remember how the card was setup in case there is no seeprom.
- */
- if (temp_p->features & AHC_ULTRA2)
- temp_p->scsi_id = aic_inb(temp_p, SCSIID_ULTRA2) & OID;
- else
- temp_p->scsi_id = aic_inb(temp_p, SCSIID) & OID;
- /*
- * Get current termination setting
- */
- sxfrctl1 = aic_inb(temp_p, SXFRCTL1);
-
- if (aic7xxx_chip_reset(temp_p) == -1)
- {
- goto skip_pci_controller;
- }
- /*
- * Very quickly put the term setting back into the register since
- * the chip reset may cause odd things to happen. This is to keep
- * LVD busses with lots of drives from draining the power out of
- * the diffsense line before we get around to running the
- * configure_termination() function. Also restore the STPWLEVEL
- * bit of DEVCONFIG
- */
- aic_outb(temp_p, sxfrctl1, SXFRCTL1);
- pci_write_config_dword(temp_p->pdev, DEVCONFIG, devconfig);
- sxfrctl1 &= STPWEN;
-
- /*
- * We need to set the CHNL? assignments before loading the SEEPROM
- * The 3940 and 3985 cards (original stuff, not any of the later
- * stuff) are 7870 and 7880 class chips. The Ultra2 stuff falls
- * under 7896 and 7897. The 7895 is in a class by itself :)
- */
- switch (temp_p->chip & AHC_CHIPID_MASK)
- {
- case AHC_AIC7870: /* 3840 / 3985 */
- case AHC_AIC7880: /* 3840 UW / 3985 UW */
- if(temp_p->flags & AHC_MULTI_CHANNEL)
- {
- switch(PCI_SLOT(temp_p->pci_device_fn))
- {
- case 5:
- temp_p->flags |= AHC_CHNLB;
- break;
- case 8:
- temp_p->flags |= AHC_CHNLB;
- break;
- case 12:
- temp_p->flags |= AHC_CHNLC;
- break;
- default:
- break;
- }
- }
- break;
-
- case AHC_AIC7895: /* 7895 */
- case AHC_AIC7896: /* 7896/7 */
- case AHC_AIC7899: /* 7899 */
- if (PCI_FUNC(pdev->devfn) != 0)
- {
- temp_p->flags |= AHC_CHNLB;
- }
- /*
- * The 7895 is the only chipset that sets the SCBSIZE32 param
- * in the DEVCONFIG register. The Ultra2 chipsets use
- * the DSCOMMAND0 register instead.
- */
- if ((temp_p->chip & AHC_CHIPID_MASK) == AHC_AIC7895)
- {
- pci_read_config_dword(pdev, DEVCONFIG, &devconfig);
- devconfig |= SCBSIZE32;
- pci_write_config_dword(pdev, DEVCONFIG, devconfig);
- }
- break;
- default:
- break;
- }
-
- /*
- * Loading of the SEEPROM needs to come after we've set the flags
- * to indicate possible CHNLB and CHNLC assigments. Otherwise,
- * on 394x and 398x cards we'll end up reading the wrong settings
- * for channels B and C
- */
- switch (temp_p->chip & AHC_CHIPID_MASK)
- {
- case AHC_AIC7892:
- case AHC_AIC7899:
- aic_outb(temp_p, 0, SCAMCTL);
- /*
- * Switch to the alt mode of the chip...
- */
- aic_outb(temp_p, aic_inb(temp_p, SFUNCT) | ALT_MODE, SFUNCT);
- /*
- * Set our options...the last two items set our CRC after x byte
- * count in target mode...
- */
- aic_outb(temp_p, AUTO_MSGOUT_DE | DIS_MSGIN_DUALEDGE, OPTIONMODE);
- aic_outb(temp_p, 0x00, 0x0b);
- aic_outb(temp_p, 0x10, 0x0a);
- /*
- * switch back to normal mode...
- */
- aic_outb(temp_p, aic_inb(temp_p, SFUNCT) & ~ALT_MODE, SFUNCT);
- aic_outb(temp_p, CRCVALCHKEN | CRCENDCHKEN | CRCREQCHKEN |
- TARGCRCENDEN | TARGCRCCNTEN,
- CRCCONTROL1);
- aic_outb(temp_p, ((aic_inb(temp_p, DSCOMMAND0) | USCBSIZE32 |
- MPARCKEN | CIOPARCKEN | CACHETHEN) &
- ~DPARCKEN), DSCOMMAND0);
- aic7xxx_load_seeprom(temp_p, &sxfrctl1);
- break;
- case AHC_AIC7890:
- case AHC_AIC7896:
- aic_outb(temp_p, 0, SCAMCTL);
- aic_outb(temp_p, (aic_inb(temp_p, DSCOMMAND0) |
- CACHETHEN | MPARCKEN | USCBSIZE32 |
- CIOPARCKEN) & ~DPARCKEN, DSCOMMAND0);
- aic7xxx_load_seeprom(temp_p, &sxfrctl1);
- break;
- case AHC_AIC7850:
- case AHC_AIC7860:
- /*
- * Set the DSCOMMAND0 register on these cards different from
- * on the 789x cards. Also, read the SEEPROM as well.
- */
- aic_outb(temp_p, (aic_inb(temp_p, DSCOMMAND0) |
- CACHETHEN | MPARCKEN) & ~DPARCKEN,
- DSCOMMAND0);
- /* FALLTHROUGH */
- default:
- aic7xxx_load_seeprom(temp_p, &sxfrctl1);
- break;
- case AHC_AIC7880:
- /*
- * Check the rev of the chipset before we change DSCOMMAND0
- */
- pci_read_config_dword(pdev, DEVCONFIG, &devconfig);
- if ((devconfig & 0xff) >= 1)
- {
- aic_outb(temp_p, (aic_inb(temp_p, DSCOMMAND0) |
- CACHETHEN | MPARCKEN) & ~DPARCKEN,
- DSCOMMAND0);
- }
- aic7xxx_load_seeprom(temp_p, &sxfrctl1);
- break;
- }
-
-
- /*
- * and then we need another switch based on the type in order to
- * make sure the channel B primary flag is set properly on 7895
- * controllers....Arrrgggghhh!!! We also have to catch the fact
- * that when you disable the BIOS on the 7895 on the Intel DK440LX
- * motherboard, and possibly others, it only sets the BIOS disabled
- * bit on the A channel...I think I'm starting to lean towards
- * going postal....
- */
- switch(temp_p->chip & AHC_CHIPID_MASK)
- {
- case AHC_AIC7895:
- case AHC_AIC7896:
- case AHC_AIC7899:
- current_p = list_p;
- while(current_p != NULL)
- {
- if ( (current_p->pci_bus == temp_p->pci_bus) &&
- (PCI_SLOT(current_p->pci_device_fn) ==
- PCI_SLOT(temp_p->pci_device_fn)) )
- {
- if ( PCI_FUNC(current_p->pci_device_fn) == 0 )
- {
- temp_p->flags |=
- (current_p->flags & AHC_CHANNEL_B_PRIMARY);
- temp_p->flags &= ~(AHC_BIOS_ENABLED|AHC_USEDEFAULTS);
- temp_p->flags |=
- (current_p->flags & (AHC_BIOS_ENABLED|AHC_USEDEFAULTS));
- }
- else
- {
- current_p->flags |=
- (temp_p->flags & AHC_CHANNEL_B_PRIMARY);
- current_p->flags &= ~(AHC_BIOS_ENABLED|AHC_USEDEFAULTS);
- current_p->flags |=
- (temp_p->flags & (AHC_BIOS_ENABLED|AHC_USEDEFAULTS));
- }
- }
- current_p = current_p->next;
- }
- break;
- default:
- break;
- }
-
- /*
- * We only support external SCB RAM on the 7895/6/7 chipsets.
- * We could support it on the 7890/1 easy enough, but I don't
- * know of any 7890/1 based cards that have it. I do know
- * of 7895/6/7 cards that have it and they work properly.
- */
- switch(temp_p->chip & AHC_CHIPID_MASK)
- {
- default:
- break;
- case AHC_AIC7895:
- case AHC_AIC7896:
- case AHC_AIC7899:
- pci_read_config_dword(pdev, DEVCONFIG, &devconfig);
- if (temp_p->features & AHC_ULTRA2)
- {
- if ( (aic_inb(temp_p, DSCOMMAND0) & RAMPSM_ULTRA2) &&
- (aic7xxx_scbram) )
- {
- aic_outb(temp_p,
- aic_inb(temp_p, DSCOMMAND0) & ~SCBRAMSEL_ULTRA2,
- DSCOMMAND0);
- temp_p->flags |= AHC_EXTERNAL_SRAM;
- devconfig |= EXTSCBPEN;
- }
- else if (aic_inb(temp_p, DSCOMMAND0) & RAMPSM_ULTRA2)
- {
- printk(KERN_INFO "aic7xxx: <%s> at PCI %d/%d/%d\n",
- board_names[aic_pdevs[i].board_name_index],
- temp_p->pci_bus,
- PCI_SLOT(temp_p->pci_device_fn),
- PCI_FUNC(temp_p->pci_device_fn));
- printk("aic7xxx: external SCB RAM detected, "
- "but not enabled\n");
- }
- }
- else
- {
- if ((devconfig & RAMPSM) && (aic7xxx_scbram))
- {
- devconfig &= ~SCBRAMSEL;
- devconfig |= EXTSCBPEN;
- temp_p->flags |= AHC_EXTERNAL_SRAM;
- }
- else if (devconfig & RAMPSM)
- {
- printk(KERN_INFO "aic7xxx: <%s> at PCI %d/%d/%d\n",
- board_names[aic_pdevs[i].board_name_index],
- temp_p->pci_bus,
- PCI_SLOT(temp_p->pci_device_fn),
- PCI_FUNC(temp_p->pci_device_fn));
- printk("aic7xxx: external SCB RAM detected, "
- "but not enabled\n");
- }
- }
- pci_write_config_dword(pdev, DEVCONFIG, devconfig);
- if ( (temp_p->flags & AHC_EXTERNAL_SRAM) &&
- (temp_p->flags & AHC_CHNLB) )
- aic_outb(temp_p, 1, CCSCBBADDR);
- break;
- }
-
- /*
- * Take the LED out of diagnostic mode
- */
- aic_outb(temp_p,
- (aic_inb(temp_p, SBLKCTL) & ~(DIAGLEDEN | DIAGLEDON)),
- SBLKCTL);
-
- /*
- * We don't know where this is set in the SEEPROM or by the
- * BIOS, so we default to 100%. On Ultra2 controllers, use 75%
- * instead.
- */
- if (temp_p->features & AHC_ULTRA2)
- {
- aic_outb(temp_p, RD_DFTHRSH_MAX | WR_DFTHRSH_MAX, DFF_THRSH);
- }
- else
- {
- aic_outb(temp_p, DFTHRSH_100, DSPCISTATUS);
- }
-
- /*
- * Call our function to fixup any bugs that exist on this chipset.
- * This may muck with PCI settings and other device settings, so
- * make sure it's after all the other PCI and device register
- * tweaks so it can back out bad settings on specific broken cards.
- */
- aic7xxx_configure_bugs(temp_p);
-
- /* Hold a pci device reference */
- pci_dev_get(temp_p->pdev);
-
- if ( list_p == NULL )
- {
- list_p = current_p = temp_p;
- }
- else
- {
- current_p = list_p;
- while(current_p->next != NULL)
- current_p = current_p->next;
- current_p->next = temp_p;
- }
- temp_p->next = NULL;
- found++;
- continue;
-skip_pci_controller:
-#ifdef CONFIG_PCI
- pci_release_regions(temp_p->pdev);
-#endif
- kfree(temp_p);
- } /* Found an Adaptec PCI device. */
- else /* Well, we found one, but we couldn't get any memory */
- {
- printk("aic7xxx: Found <%s>\n",
- board_names[aic_pdevs[i].board_name_index]);
- printk(KERN_INFO "aic7xxx: Unable to allocate device memory, "
- "skipping.\n");
- }
- } /* while(pdev=....) */
- } /* for PCI_DEVICES */
- }
-#endif /* CONFIG_PCI */
-
-#if defined(__i386__) || defined(__alpha__)
- /*
- * EISA/VL-bus card signature probe.
- */
- slot = MINSLOT;
- while ( (slot <= MAXSLOT) &&
- !(aic7xxx_no_probe) )
- {
- base = SLOTBASE(slot) + MINREG;
-
- if (!request_region(base, MAXREG - MINREG, "aic7xxx"))
- {
- /*
- * Some other driver has staked a
- * claim to this i/o region already.
- */
- slot++;
- continue; /* back to the beginning of the for loop */
- }
- flags = 0;
- type = aic7xxx_probe(slot, base + AHC_HID0, &flags);
- if (type == -1)
- {
- release_region(base, MAXREG - MINREG);
- slot++;
- continue;
- }
- temp_p = kmalloc(sizeof(struct aic7xxx_host), GFP_ATOMIC);
- if (temp_p == NULL)
- {
- printk(KERN_WARNING "aic7xxx: Unable to allocate device space.\n");
- release_region(base, MAXREG - MINREG);
- slot++;
- continue; /* back to the beginning of the while loop */
- }
-
- /*
- * Pause the card preserving the IRQ type. Allow the operator
- * to override the IRQ trigger.
- */
- if (aic7xxx_irq_trigger == 1)
- hcntrl = IRQMS; /* Level */
- else if (aic7xxx_irq_trigger == 0)
- hcntrl = 0; /* Edge */
- else
- hcntrl = inb(base + HCNTRL) & IRQMS; /* Default */
- memset(temp_p, 0, sizeof(struct aic7xxx_host));
- temp_p->unpause = hcntrl | INTEN;
- temp_p->pause = hcntrl | PAUSE | INTEN;
- temp_p->base = base;
- temp_p->mbase = 0;
- temp_p->maddr = NULL;
- temp_p->pci_bus = 0;
- temp_p->pci_device_fn = slot;
- aic_outb(temp_p, hcntrl | PAUSE, HCNTRL);
- while( (aic_inb(temp_p, HCNTRL) & PAUSE) == 0 ) ;
- if (aic7xxx_chip_reset(temp_p) == -1)
- temp_p->irq = 0;
- else
- temp_p->irq = aic_inb(temp_p, INTDEF) & 0x0F;
- temp_p->flags |= AHC_PAGESCBS;
-
- switch (temp_p->irq)
- {
- case 9:
- case 10:
- case 11:
- case 12:
- case 14:
- case 15:
- break;
-
- default:
- printk(KERN_WARNING "aic7xxx: Host adapter uses unsupported IRQ "
- "level %d, ignoring.\n", temp_p->irq);
- kfree(temp_p);
- release_region(base, MAXREG - MINREG);
- slot++;
- continue; /* back to the beginning of the while loop */
- }
-
- /*
- * We are committed now, everything has been checked and this card
- * has been found, now we just set it up
- */
-
- /*
- * Insert our new struct into the list at the end
- */
- if (list_p == NULL)
- {
- list_p = current_p = temp_p;
- }
- else
- {
- current_p = list_p;
- while (current_p->next != NULL)
- current_p = current_p->next;
- current_p->next = temp_p;
- }
-
- switch (type)
- {
- case 0:
- temp_p->board_name_index = 2;
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk("aic7xxx: <%s> at EISA %d\n",
- board_names[2], slot);
- /* FALLTHROUGH */
- case 1:
- {
- temp_p->chip = AHC_AIC7770 | AHC_EISA;
- temp_p->features |= AHC_AIC7770_FE;
- temp_p->bios_control = aic_inb(temp_p, HA_274_BIOSCTRL);
-
- /*
- * Get the primary channel information. Right now we don't
- * do anything with this, but someday we will be able to inform
- * the mid-level SCSI code which channel is primary.
- */
- if (temp_p->board_name_index == 0)
- {
- temp_p->board_name_index = 3;
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk("aic7xxx: <%s> at EISA %d\n",
- board_names[3], slot);
- }
- if (temp_p->bios_control & CHANNEL_B_PRIMARY)
- {
- temp_p->flags |= AHC_CHANNEL_B_PRIMARY;
- }
-
- if ((temp_p->bios_control & BIOSMODE) == BIOSDISABLED)
- {
- temp_p->flags &= ~AHC_BIOS_ENABLED;
- }
- else
- {
- temp_p->flags &= ~AHC_USEDEFAULTS;
- temp_p->flags |= AHC_BIOS_ENABLED;
- if ( (temp_p->bios_control & 0x20) == 0 )
- {
- temp_p->bios_address = 0xcc000;
- temp_p->bios_address += (0x4000 * (temp_p->bios_control & 0x07));
- }
- else
- {
- temp_p->bios_address = 0xd0000;
- temp_p->bios_address += (0x8000 * (temp_p->bios_control & 0x06));
- }
- }
- temp_p->adapter_control = aic_inb(temp_p, SCSICONF) << 8;
- temp_p->adapter_control |= aic_inb(temp_p, SCSICONF + 1);
- if (temp_p->features & AHC_WIDE)
- {
- temp_p->scsi_id = temp_p->adapter_control & HWSCSIID;
- temp_p->scsi_id_b = temp_p->scsi_id;
- }
- else
- {
- temp_p->scsi_id = (temp_p->adapter_control >> 8) & HSCSIID;
- temp_p->scsi_id_b = temp_p->adapter_control & HSCSIID;
- }
- aic7xxx_load_seeprom(temp_p, &sxfrctl1);
- break;
- }
-
- case 2:
- case 3:
- temp_p->chip = AHC_AIC7770 | AHC_VL;
- temp_p->features |= AHC_AIC7770_FE;
- if (type == 2)
- temp_p->flags |= AHC_BIOS_ENABLED;
- else
- temp_p->flags &= ~AHC_BIOS_ENABLED;
- if (aic_inb(temp_p, SCSICONF) & TERM_ENB)
- sxfrctl1 = STPWEN;
- aic7xxx_load_seeprom(temp_p, &sxfrctl1);
- temp_p->board_name_index = 4;
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- printk("aic7xxx: <%s> at VLB %d\n",
- board_names[2], slot);
- switch( aic_inb(temp_p, STATUS_2840) & BIOS_SEL )
- {
- case 0x00:
- temp_p->bios_address = 0xe0000;
- break;
- case 0x20:
- temp_p->bios_address = 0xc8000;
- break;
- case 0x40:
- temp_p->bios_address = 0xd0000;
- break;
- case 0x60:
- temp_p->bios_address = 0xd8000;
- break;
- default:
- break; /* can't get here */
- }
- break;
-
- default: /* Won't get here. */
- break;
- }
- if (aic7xxx_verbose & VERBOSE_PROBE2)
- {
- printk(KERN_INFO "aic7xxx: BIOS %sabled, IO Port 0x%lx, IRQ %d (%s)\n",
- (temp_p->flags & AHC_USEDEFAULTS) ? "dis" : "en", temp_p->base,
- temp_p->irq,
- (temp_p->pause & IRQMS) ? "level sensitive" : "edge triggered");
- printk(KERN_INFO "aic7xxx: Extended translation %sabled.\n",
- (temp_p->flags & AHC_EXTEND_TRANS_A) ? "en" : "dis");
- }
-
- /*
- * All the 7770 based chipsets have this bug
- */
- temp_p->bugs |= AHC_BUG_TMODE_WIDEODD;
-
- /*
- * Set the FIFO threshold and the bus off time.
- */
- hostconf = aic_inb(temp_p, HOSTCONF);
- aic_outb(temp_p, hostconf & DFTHRSH, BUSSPD);
- aic_outb(temp_p, (hostconf << 2) & BOFF, BUSTIME);
- slot++;
- found++;
- }
-
-#endif /* defined(__i386__) || defined(__alpha__) */
-
- /*
- * Now, we re-order the probed devices by BIOS address and BUS class.
- * In general, we follow this algorithm to make the adapters show up
- * in the same order under linux that the computer finds them.
- * 1: All VLB/EISA cards with BIOS_ENABLED first, according to BIOS
- * address, going from lowest to highest.
- * 2: All PCI controllers with BIOS_ENABLED next, according to BIOS
- * address, going from lowest to highest.
- * 3: Remaining VLB/EISA controllers going in slot order.
- * 4: Remaining PCI controllers, going in PCI device order (reversible)
- */
-
- {
- struct aic7xxx_host *sort_list[4] = { NULL, NULL, NULL, NULL };
- struct aic7xxx_host *vlb, *pci;
- struct aic7xxx_host *prev_p;
- struct aic7xxx_host *p;
- unsigned char left;
-
- prev_p = vlb = pci = NULL;
-
- temp_p = list_p;
- while (temp_p != NULL)
- {
- switch(temp_p->chip & ~AHC_CHIPID_MASK)
- {
- case AHC_EISA:
- case AHC_VL:
- {
- p = temp_p;
- if (p->flags & AHC_BIOS_ENABLED)
- vlb = sort_list[0];
- else
- vlb = sort_list[2];
-
- if (vlb == NULL)
- {
- vlb = temp_p;
- temp_p = temp_p->next;
- vlb->next = NULL;
- }
- else
- {
- current_p = vlb;
- prev_p = NULL;
- while ( (current_p != NULL) &&
- (current_p->bios_address < temp_p->bios_address))
- {
- prev_p = current_p;
- current_p = current_p->next;
- }
- if (prev_p != NULL)
- {
- prev_p->next = temp_p;
- temp_p = temp_p->next;
- prev_p->next->next = current_p;
- }
- else
- {
- vlb = temp_p;
- temp_p = temp_p->next;
- vlb->next = current_p;
- }
- }
-
- if (p->flags & AHC_BIOS_ENABLED)
- sort_list[0] = vlb;
- else
- sort_list[2] = vlb;
-
- break;
- }
- default: /* All PCI controllers fall through to default */
- {
-
- p = temp_p;
- if (p->flags & AHC_BIOS_ENABLED)
- pci = sort_list[1];
- else
- pci = sort_list[3];
-
- if (pci == NULL)
- {
- pci = temp_p;
- temp_p = temp_p->next;
- pci->next = NULL;
- }
- else
- {
- current_p = pci;
- prev_p = NULL;
- if (!aic7xxx_reverse_scan)
- {
- while ( (current_p != NULL) &&
- ( (PCI_SLOT(current_p->pci_device_fn) |
- (current_p->pci_bus << 8)) <
- (PCI_SLOT(temp_p->pci_device_fn) |
- (temp_p->pci_bus << 8)) ) )
- {
- prev_p = current_p;
- current_p = current_p->next;
- }
- }
- else
- {
- while ( (current_p != NULL) &&
- ( (PCI_SLOT(current_p->pci_device_fn) |
- (current_p->pci_bus << 8)) >
- (PCI_SLOT(temp_p->pci_device_fn) |
- (temp_p->pci_bus << 8)) ) )
- {
- prev_p = current_p;
- current_p = current_p->next;
- }
- }
- /*
- * Are we dealing with a 7895/6/7/9 where we need to sort the
- * channels as well, if so, the bios_address values should
- * be the same
- */
- if ( (current_p) && (temp_p->flags & AHC_MULTI_CHANNEL) &&
- (temp_p->pci_bus == current_p->pci_bus) &&
- (PCI_SLOT(temp_p->pci_device_fn) ==
- PCI_SLOT(current_p->pci_device_fn)) )
- {
- if (temp_p->flags & AHC_CHNLB)
- {
- if ( !(temp_p->flags & AHC_CHANNEL_B_PRIMARY) )
- {
- prev_p = current_p;
- current_p = current_p->next;
- }
- }
- else
- {
- if (temp_p->flags & AHC_CHANNEL_B_PRIMARY)
- {
- prev_p = current_p;
- current_p = current_p->next;
- }
- }
- }
- if (prev_p != NULL)
- {
- prev_p->next = temp_p;
- temp_p = temp_p->next;
- prev_p->next->next = current_p;
- }
- else
- {
- pci = temp_p;
- temp_p = temp_p->next;
- pci->next = current_p;
- }
- }
-
- if (p->flags & AHC_BIOS_ENABLED)
- sort_list[1] = pci;
- else
- sort_list[3] = pci;
-
- break;
- }
- } /* End of switch(temp_p->type) */
- } /* End of while (temp_p != NULL) */
- /*
- * At this point, the cards have been broken into 4 sorted lists, now
- * we run through the lists in order and register each controller
- */
- {
- int i;
-
- left = found;
- for (i=0; i<ARRAY_SIZE(sort_list); i++)
- {
- temp_p = sort_list[i];
- while(temp_p != NULL)
- {
- template->name = board_names[temp_p->board_name_index];
- p = aic7xxx_alloc(template, temp_p);
- if (p != NULL)
- {
- p->instance = found - left;
- if (aic7xxx_register(template, p, (--left)) == 0)
- {
- found--;
- aic7xxx_release(p->host);
- scsi_unregister(p->host);
- }
- else if (aic7xxx_dump_card)
- {
- pause_sequencer(p);
- aic7xxx_print_card(p);
- aic7xxx_print_scratch_ram(p);
- unpause_sequencer(p, TRUE);
- }
- }
- current_p = temp_p;
- temp_p = (struct aic7xxx_host *)temp_p->next;
- kfree(current_p);
- }
- }
- }
- }
- return (found);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_buildscb
- *
- * Description:
- * Build a SCB.
- *-F*************************************************************************/
-static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
- struct aic7xxx_scb *scb)
-{
- unsigned short mask;
- struct aic7xxx_hwscb *hscb;
- struct aic_dev_data *aic_dev = cmd->device->hostdata;
- struct scsi_device *sdptr = cmd->device;
- unsigned char tindex = TARGET_INDEX(cmd);
- int use_sg;
-
- mask = (0x01 << tindex);
- hscb = scb->hscb;
-
- /*
- * Setup the control byte if we need negotiation and have not
- * already requested it.
- */
- hscb->control = 0;
- scb->tag_action = 0;
-
- if (p->discenable & mask)
- {
- hscb->control |= DISCENB;
- /* We always force TEST_UNIT_READY to untagged */
- if (cmd->cmnd[0] != TEST_UNIT_READY && sdptr->simple_tags)
- {
- hscb->control |= MSG_SIMPLE_Q_TAG;
- scb->tag_action = MSG_SIMPLE_Q_TAG;
- }
- }
- if ( !(aic_dev->dtr_pending) &&
- (aic_dev->needppr || aic_dev->needwdtr || aic_dev->needsdtr) &&
- (aic_dev->flags & DEVICE_DTR_SCANNED) )
- {
- aic_dev->dtr_pending = 1;
- scb->tag_action = 0;
- hscb->control &= DISCENB;
- hscb->control |= MK_MESSAGE;
- if(aic_dev->needppr)
- {
- scb->flags |= SCB_MSGOUT_PPR;
- }
- else if(aic_dev->needwdtr)
- {
- scb->flags |= SCB_MSGOUT_WDTR;
- }
- else if(aic_dev->needsdtr)
- {
- scb->flags |= SCB_MSGOUT_SDTR;
- }
- scb->flags |= SCB_DTR_SCB;
- }
- hscb->target_channel_lun = ((cmd->device->id << 4) & 0xF0) |
- ((cmd->device->channel & 0x01) << 3) | (cmd->device->lun & 0x07);
-
- /*
- * The interpretation of request_buffer and request_bufflen
- * changes depending on whether or not use_sg is zero; a
- * non-zero use_sg indicates the number of elements in the
- * scatter-gather array.
- */
-
- /*
- * XXX - this relies on the host data being stored in a
- * little-endian format.
- */
- hscb->SCSI_cmd_length = cmd->cmd_len;
- memcpy(scb->cmnd, cmd->cmnd, cmd->cmd_len);
- hscb->SCSI_cmd_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, scb->cmnd));
-
- use_sg = scsi_dma_map(cmd);
- BUG_ON(use_sg < 0);
-
- if (use_sg) {
- struct scatterlist *sg; /* Must be mid-level SCSI code scatterlist */
-
- /*
- * We must build an SG list in adapter format, as the kernel's SG list
- * cannot be used directly because of data field size (__alpha__)
- * differences and the kernel SG list uses virtual addresses where
- * we need physical addresses.
- */
- int i;
-
- scb->sg_length = 0;
-
-
- /*
- * Copy the segments into the SG array. NOTE!!! - We used to
- * have the first entry both in the data_pointer area and the first
- * SG element. That has changed somewhat. We still have the first
- * entry in both places, but now we download the address of
- * scb->sg_list[1] instead of 0 to the sg pointer in the hscb.
- */
- scsi_for_each_sg(cmd, sg, use_sg, i) {
- unsigned int len = sg_dma_len(sg);
- scb->sg_list[i].address = cpu_to_le32(sg_dma_address(sg));
- scb->sg_list[i].length = cpu_to_le32(len);
- scb->sg_length += len;
- }
- /* Copy the first SG into the data pointer area. */
- hscb->data_pointer = scb->sg_list[0].address;
- hscb->data_count = scb->sg_list[0].length;
- scb->sg_count = i;
- hscb->SG_segment_count = i;
- hscb->SG_list_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, &scb->sg_list[1]));
- } else {
- scb->sg_count = 0;
- scb->sg_length = 0;
- hscb->SG_segment_count = 0;
- hscb->SG_list_pointer = 0;
- hscb->data_count = 0;
- hscb->data_pointer = 0;
- }
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_queue
- *
- * Description:
- * Queue a SCB to the controller.
- *-F*************************************************************************/
-static int aic7xxx_queue_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
-{
- struct aic7xxx_host *p;
- struct aic7xxx_scb *scb;
- struct aic_dev_data *aic_dev;
-
- p = (struct aic7xxx_host *) cmd->device->host->hostdata;
-
- aic_dev = cmd->device->hostdata;
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if (aic_dev->active_cmds > aic_dev->max_q_depth)
- {
- printk(WARN_LEAD "Commands queued exceeds queue "
- "depth, active=%d\n",
- p->host_no, CTL_OF_CMD(cmd),
- aic_dev->active_cmds);
- }
-#endif
-
- scb = scbq_remove_head(&p->scb_data->free_scbs);
- if (scb == NULL)
- {
- aic7xxx_allocate_scb(p);
- scb = scbq_remove_head(&p->scb_data->free_scbs);
- if(scb == NULL)
- {
- printk(WARN_LEAD "Couldn't get a free SCB.\n", p->host_no,
- CTL_OF_CMD(cmd));
- return 1;
- }
- }
- scb->cmd = cmd;
-
- /*
- * Make sure the scsi_cmnd pointer is saved, the struct it points to
- * is set up properly, and the parity error flag is reset, then send
- * the SCB to the sequencer and watch the fun begin.
- */
- aic7xxx_position(cmd) = scb->hscb->tag;
- cmd->scsi_done = fn;
- cmd->result = DID_OK;
- aic7xxx_error(cmd) = DID_OK;
- aic7xxx_status(cmd) = 0;
- cmd->host_scribble = NULL;
-
- /*
- * Construct the SCB beforehand, so the sequencer is
- * paused a minimal amount of time.
- */
- aic7xxx_buildscb(p, cmd, scb);
-
- scb->flags |= SCB_ACTIVE | SCB_WAITINGQ;
-
- scbq_insert_tail(&p->waiting_scbs, scb);
- aic7xxx_run_waiting_queues(p);
- return (0);
-}
-
-static DEF_SCSI_QCMD(aic7xxx_queue)
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_bus_device_reset
- *
- * Description:
- * Abort or reset the current SCSI command(s). If the scb has not
- * previously been aborted, then we attempt to send a BUS_DEVICE_RESET
- * message to the target. If the scb has previously been unsuccessfully
- * aborted, then we will reset the channel and have all devices renegotiate.
- * Returns an enumerated type that indicates the status of the operation.
- *-F*************************************************************************/
-static int __aic7xxx_bus_device_reset(struct scsi_cmnd *cmd)
-{
- struct aic7xxx_host *p;
- struct aic7xxx_scb *scb;
- struct aic7xxx_hwscb *hscb;
- int channel;
- unsigned char saved_scbptr, lastphase;
- unsigned char hscb_index;
- int disconnected;
- struct aic_dev_data *aic_dev;
-
- if(cmd == NULL)
- {
- printk(KERN_ERR "aic7xxx_bus_device_reset: called with NULL cmd!\n");
- return FAILED;
- }
- p = (struct aic7xxx_host *)cmd->device->host->hostdata;
- aic_dev = AIC_DEV(cmd);
- if(aic7xxx_position(cmd) < p->scb_data->numscbs)
- scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]);
- else
- return FAILED;
-
- hscb = scb->hscb;
-
- aic7xxx_isr(p);
- aic7xxx_done_cmds_complete(p);
- /* If the command was already complete or just completed, then we didn't
- * do a reset, return FAILED */
- if(!(scb->flags & SCB_ACTIVE))
- return FAILED;
-
- pause_sequencer(p);
- lastphase = aic_inb(p, LASTPHASE);
- if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
- {
- printk(INFO_LEAD "Bus Device reset, scb flags 0x%x, ",
- p->host_no, CTL_OF_SCB(scb), scb->flags);
- switch (lastphase)
- {
- case P_DATAOUT:
- printk("Data-Out phase\n");
- break;
- case P_DATAIN:
- printk("Data-In phase\n");
- break;
- case P_COMMAND:
- printk("Command phase\n");
- break;
- case P_MESGOUT:
- printk("Message-Out phase\n");
- break;
- case P_STATUS:
- printk("Status phase\n");
- break;
- case P_MESGIN:
- printk("Message-In phase\n");
- break;
- default:
- /*
- * We're not in a valid phase, so assume we're idle.
- */
- printk("while idle, LASTPHASE = 0x%x\n", lastphase);
- break;
- }
- printk(INFO_LEAD "SCSISIGI 0x%x, SEQADDR 0x%x, SSTAT0 0x%x, SSTAT1 "
- "0x%x\n", p->host_no, CTL_OF_SCB(scb),
- aic_inb(p, SCSISIGI),
- aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8),
- aic_inb(p, SSTAT0), aic_inb(p, SSTAT1));
- printk(INFO_LEAD "SG_CACHEPTR 0x%x, SSTAT2 0x%x, STCNT 0x%x\n", p->host_no,
- CTL_OF_SCB(scb),
- (p->features & AHC_ULTRA2) ? aic_inb(p, SG_CACHEPTR) : 0,
- aic_inb(p, SSTAT2),
- aic_inb(p, STCNT + 2) << 16 | aic_inb(p, STCNT + 1) << 8 |
- aic_inb(p, STCNT));
- }
-
- channel = cmd->device->channel;
-
- /*
- * Send a Device Reset Message:
- * The target that is holding up the bus may not be the same as
- * the one that triggered this timeout (different commands have
- * different timeout lengths). Our strategy here is to queue an
- * abort message to the timed out target if it is disconnected.
- * Otherwise, if we have an active target we stuff the message buffer
- * with an abort message and assert ATN in the hopes that the target
- * will let go of the bus and go to the mesgout phase. If this
- * fails, we'll get another timeout a few seconds later which will
- * attempt a bus reset.
- */
- saved_scbptr = aic_inb(p, SCBPTR);
- disconnected = FALSE;
-
- if (lastphase != P_BUSFREE)
- {
- if (aic_inb(p, SCB_TAG) >= p->scb_data->numscbs)
- {
- printk(WARN_LEAD "Invalid SCB ID %d is active, "
- "SCB flags = 0x%x.\n", p->host_no,
- CTL_OF_CMD(cmd), scb->hscb->tag, scb->flags);
- unpause_sequencer(p, FALSE);
- return FAILED;
- }
- if (scb->hscb->tag == aic_inb(p, SCB_TAG))
- {
- if ( (lastphase == P_MESGOUT) || (lastphase == P_MESGIN) )
- {
- printk(WARN_LEAD "Device reset, Message buffer "
- "in use\n", p->host_no, CTL_OF_SCB(scb));
- unpause_sequencer(p, FALSE);
- return FAILED;
- }
-
- if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
- printk(INFO_LEAD "Device reset message in "
- "message buffer\n", p->host_no, CTL_OF_SCB(scb));
- scb->flags |= SCB_RESET | SCB_DEVICE_RESET;
- aic7xxx_error(cmd) = DID_RESET;
- aic_dev->flags |= BUS_DEVICE_RESET_PENDING;
- /* Send the abort message to the active SCB. */
- aic_outb(p, HOST_MSG, MSG_OUT);
- aic_outb(p, lastphase | ATNO, SCSISIGO);
- unpause_sequencer(p, FALSE);
- spin_unlock_irq(p->host->host_lock);
- ssleep(1);
- spin_lock_irq(p->host->host_lock);
- if(aic_dev->flags & BUS_DEVICE_RESET_PENDING)
- return FAILED;
- else
- return SUCCESS;
- }
- } /* if (last_phase != P_BUSFREE).....indicates we are idle and can work */
- /*
- * Simply set the MK_MESSAGE flag and the SEQINT handler will do
- * the rest on a reconnect/connect.
- */
- scb->hscb->control |= MK_MESSAGE;
- scb->flags |= SCB_RESET | SCB_DEVICE_RESET;
- aic_dev->flags |= BUS_DEVICE_RESET_PENDING;
- /*
- * Check to see if the command is on the qinfifo. If it is, then we will
- * not need to queue the command again since the card should start it soon
- */
- if (aic7xxx_search_qinfifo(p, cmd->device->channel, cmd->device->id, cmd->device->lun, hscb->tag,
- 0, TRUE, NULL) == 0)
- {
- disconnected = TRUE;
- if ((hscb_index = aic7xxx_find_scb(p, scb)) != SCB_LIST_NULL)
- {
- unsigned char scb_control;
-
- aic_outb(p, hscb_index, SCBPTR);
- scb_control = aic_inb(p, SCB_CONTROL);
- /*
- * If the DISCONNECTED bit is not set in SCB_CONTROL, then we are
- * actually on the waiting list, not disconnected, and we don't
- * need to requeue the command.
- */
- disconnected = (scb_control & DISCONNECTED);
- aic_outb(p, scb_control | MK_MESSAGE, SCB_CONTROL);
- }
- if (disconnected)
- {
- /*
- * Actually requeue this SCB in case we can select the
- * device before it reconnects. This can result in the command
- * being on the qinfifo twice, but we don't care because it will
- * all get cleaned up if/when the reset takes place.
- */
- if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
- printk(INFO_LEAD "Queueing device reset command.\n", p->host_no,
- CTL_OF_SCB(scb));
- p->qinfifo[p->qinfifonext++] = scb->hscb->tag;
- if (p->features & AHC_QUEUE_REGS)
- aic_outb(p, p->qinfifonext, HNSCB_QOFF);
- else
- aic_outb(p, p->qinfifonext, KERNEL_QINPOS);
- scb->flags |= SCB_QUEUED_ABORT;
- }
- }
- aic_outb(p, saved_scbptr, SCBPTR);
- unpause_sequencer(p, FALSE);
- spin_unlock_irq(p->host->host_lock);
- msleep(1000/4);
- spin_lock_irq(p->host->host_lock);
- if(aic_dev->flags & BUS_DEVICE_RESET_PENDING)
- return FAILED;
- else
- return SUCCESS;
-}
-
-static int aic7xxx_bus_device_reset(struct scsi_cmnd *cmd)
-{
- int rc;
-
- spin_lock_irq(cmd->device->host->host_lock);
- rc = __aic7xxx_bus_device_reset(cmd);
- spin_unlock_irq(cmd->device->host->host_lock);
-
- return rc;
-}
-
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_panic_abort
- *
- * Description:
- * Abort the current SCSI command(s).
- *-F*************************************************************************/
-static void aic7xxx_panic_abort(struct aic7xxx_host *p, struct scsi_cmnd *cmd)
-{
-
- printk("aic7xxx driver version %s\n", AIC7XXX_C_VERSION);
- printk("Controller type:\n %s\n", board_names[p->board_name_index]);
- printk("p->flags=0x%lx, p->chip=0x%x, p->features=0x%x, "
- "sequencer %s paused\n",
- p->flags, p->chip, p->features,
- (aic_inb(p, HCNTRL) & PAUSE) ? "is" : "isn't" );
- pause_sequencer(p);
- disable_irq(p->irq);
- aic7xxx_print_card(p);
- aic7xxx_print_scratch_ram(p);
- spin_unlock_irq(p->host->host_lock);
- for(;;) barrier();
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_abort
- *
- * Description:
- * Abort the current SCSI command(s).
- *-F*************************************************************************/
-static int __aic7xxx_abort(struct scsi_cmnd *cmd)
-{
- struct aic7xxx_scb *scb = NULL;
- struct aic7xxx_host *p;
- int found=0, disconnected;
- unsigned char saved_hscbptr, hscbptr, scb_control;
- struct aic_dev_data *aic_dev;
-
- if(cmd == NULL)
- {
- printk(KERN_ERR "aic7xxx_abort: called with NULL cmd!\n");
- return FAILED;
- }
- p = (struct aic7xxx_host *)cmd->device->host->hostdata;
- aic_dev = AIC_DEV(cmd);
- if(aic7xxx_position(cmd) < p->scb_data->numscbs)
- scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]);
- else
- return FAILED;
-
- aic7xxx_isr(p);
- aic7xxx_done_cmds_complete(p);
- /* If the command was already complete or just completed, then we didn't
- * do a reset, return FAILED */
- if(!(scb->flags & SCB_ACTIVE))
- return FAILED;
-
- pause_sequencer(p);
-
- /*
- * I added a new config option to the driver: "panic_on_abort" that will
- * cause the driver to panic and the machine to stop on the first abort
- * or reset call into the driver. At that point, it prints out a lot of
- * useful information for me which I can then use to try and debug the
- * problem. Simply enable the boot time prompt in order to activate this
- * code.
- */
- if (aic7xxx_panic_on_abort)
- aic7xxx_panic_abort(p, cmd);
-
- if (aic7xxx_verbose & VERBOSE_ABORT)
- {
- printk(INFO_LEAD "Aborting scb %d, flags 0x%x, SEQADDR 0x%x, LASTPHASE "
- "0x%x\n",
- p->host_no, CTL_OF_SCB(scb), scb->hscb->tag, scb->flags,
- aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8),
- aic_inb(p, LASTPHASE));
- printk(INFO_LEAD "SG_CACHEPTR 0x%x, SG_COUNT %d, SCSISIGI 0x%x\n",
- p->host_no, CTL_OF_SCB(scb), (p->features & AHC_ULTRA2) ?
- aic_inb(p, SG_CACHEPTR) : 0, aic_inb(p, SG_COUNT),
- aic_inb(p, SCSISIGI));
- printk(INFO_LEAD "SSTAT0 0x%x, SSTAT1 0x%x, SSTAT2 0x%x\n",
- p->host_no, CTL_OF_SCB(scb), aic_inb(p, SSTAT0),
- aic_inb(p, SSTAT1), aic_inb(p, SSTAT2));
- }
-
- if (scb->flags & SCB_WAITINGQ)
- {
- if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
- printk(INFO_LEAD "SCB found on waiting list and "
- "aborted.\n", p->host_no, CTL_OF_SCB(scb));
- scbq_remove(&p->waiting_scbs, scb);
- scbq_remove(&aic_dev->delayed_scbs, scb);
- aic_dev->active_cmds++;
- p->activescbs++;
- scb->flags &= ~(SCB_WAITINGQ | SCB_ACTIVE);
- scb->flags |= SCB_ABORT | SCB_QUEUED_FOR_DONE;
- goto success;
- }
-
-/*
- * We just checked the waiting_q, now for the QINFIFO
- */
- if ( ((found = aic7xxx_search_qinfifo(p, cmd->device->id, cmd->device->channel,
- cmd->device->lun, scb->hscb->tag, SCB_ABORT | SCB_QUEUED_FOR_DONE,
- FALSE, NULL)) != 0) &&
- (aic7xxx_verbose & VERBOSE_ABORT_PROCESS))
- {
- printk(INFO_LEAD "SCB found in QINFIFO and aborted.\n", p->host_no,
- CTL_OF_SCB(scb));
- goto success;
- }
-
-/*
- * QINFIFO, waitingq, completeq done. Next, check WAITING_SCB list in card
- */
-
- saved_hscbptr = aic_inb(p, SCBPTR);
- if ((hscbptr = aic7xxx_find_scb(p, scb)) != SCB_LIST_NULL)
- {
- aic_outb(p, hscbptr, SCBPTR);
- scb_control = aic_inb(p, SCB_CONTROL);
- disconnected = scb_control & DISCONNECTED;
- /*
- * If the DISCONNECTED bit is not set in SCB_CONTROL, then we are
- * either currently active or on the waiting list.
- */
- if(!disconnected && aic_inb(p, LASTPHASE) == P_BUSFREE) {
- if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
- printk(INFO_LEAD "SCB found on hardware waiting"
- " list and aborted.\n", p->host_no, CTL_OF_SCB(scb));
- /* If we are the only waiting command, stop the selection engine */
- if (aic_inb(p, WAITING_SCBH) == hscbptr && aic_inb(p, SCB_NEXT) ==
- SCB_LIST_NULL)
- {
- aic_outb(p, aic_inb(p, SCSISEQ) & ~ENSELO, SCSISEQ);
- aic_outb(p, CLRSELTIMEO, CLRSINT1);
- aic_outb(p, SCB_LIST_NULL, WAITING_SCBH);
- }
- else
- {
- unsigned char prev, next;
- prev = SCB_LIST_NULL;
- next = aic_inb(p, WAITING_SCBH);
- while(next != SCB_LIST_NULL)
- {
- aic_outb(p, next, SCBPTR);
- if (next == hscbptr)
- {
- next = aic_inb(p, SCB_NEXT);
- if (prev != SCB_LIST_NULL)
- {
- aic_outb(p, prev, SCBPTR);
- aic_outb(p, next, SCB_NEXT);
- }
- else
- aic_outb(p, next, WAITING_SCBH);
- aic_outb(p, hscbptr, SCBPTR);
- next = SCB_LIST_NULL;
- }
- else
- {
- prev = next;
- next = aic_inb(p, SCB_NEXT);
- }
- }
- }
- aic_outb(p, SCB_LIST_NULL, SCB_TAG);
- aic_outb(p, 0, SCB_CONTROL);
- aic7xxx_add_curscb_to_free_list(p);
- scb->flags = SCB_ABORT | SCB_QUEUED_FOR_DONE;
- goto success;
- }
- else if (!disconnected)
- {
- /*
- * We are the currently active command
- */
- if((aic_inb(p, LASTPHASE) == P_MESGIN) ||
- (aic_inb(p, LASTPHASE) == P_MESGOUT))
- {
- /*
- * Message buffer busy, unable to abort
- */
- printk(INFO_LEAD "message buffer busy, unable to abort.\n",
- p->host_no, CTL_OF_SCB(scb));
- unpause_sequencer(p, FALSE);
- return FAILED;
- }
- /* Fallthrough to below, set ATNO after we set SCB_CONTROL */
- }
- aic_outb(p, scb_control | MK_MESSAGE, SCB_CONTROL);
- if(!disconnected)
- {
- aic_outb(p, HOST_MSG, MSG_OUT);
- aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO);
- }
- aic_outb(p, saved_hscbptr, SCBPTR);
- }
- else
- {
- /*
- * The scb isn't in the card at all and it is active and it isn't in
- * any of the queues, so it must be disconnected and paged out. Fall
- * through to the code below.
- */
- disconnected = 1;
- }
-
- p->flags |= AHC_ABORT_PENDING;
- scb->flags |= SCB_QUEUED_ABORT | SCB_ABORT | SCB_RECOVERY_SCB;
- scb->hscb->control |= MK_MESSAGE;
- if(disconnected)
- {
- if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
- printk(INFO_LEAD "SCB disconnected. Queueing Abort"
- " SCB.\n", p->host_no, CTL_OF_SCB(scb));
- p->qinfifo[p->qinfifonext++] = scb->hscb->tag;
- if (p->features & AHC_QUEUE_REGS)
- aic_outb(p, p->qinfifonext, HNSCB_QOFF);
- else
- aic_outb(p, p->qinfifonext, KERNEL_QINPOS);
- }
- unpause_sequencer(p, FALSE);
- spin_unlock_irq(p->host->host_lock);
- msleep(1000/4);
- spin_lock_irq(p->host->host_lock);
- if (p->flags & AHC_ABORT_PENDING)
- {
- if (aic7xxx_verbose & VERBOSE_ABORT_RETURN)
- printk(INFO_LEAD "Abort never delivered, returning FAILED\n", p->host_no,
- CTL_OF_CMD(cmd));
- p->flags &= ~AHC_ABORT_PENDING;
- return FAILED;
- }
- if (aic7xxx_verbose & VERBOSE_ABORT_RETURN)
- printk(INFO_LEAD "Abort successful.\n", p->host_no, CTL_OF_CMD(cmd));
- return SUCCESS;
-
-success:
- if (aic7xxx_verbose & VERBOSE_ABORT_RETURN)
- printk(INFO_LEAD "Abort successful.\n", p->host_no, CTL_OF_CMD(cmd));
- aic7xxx_run_done_queue(p, TRUE);
- unpause_sequencer(p, FALSE);
- return SUCCESS;
-}
-
-static int aic7xxx_abort(struct scsi_cmnd *cmd)
-{
- int rc;
-
- spin_lock_irq(cmd->device->host->host_lock);
- rc = __aic7xxx_abort(cmd);
- spin_unlock_irq(cmd->device->host->host_lock);
-
- return rc;
-}
-
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_reset
- *
- * Description:
- * Resetting the bus always succeeds - is has to, otherwise the
- * kernel will panic! Try a surgical technique - sending a BUS
- * DEVICE RESET message - on the offending target before pulling
- * the SCSI bus reset line.
- *-F*************************************************************************/
-static int aic7xxx_reset(struct scsi_cmnd *cmd)
-{
- struct aic7xxx_scb *scb;
- struct aic7xxx_host *p;
- struct aic_dev_data *aic_dev;
-
- p = (struct aic7xxx_host *) cmd->device->host->hostdata;
- spin_lock_irq(p->host->host_lock);
-
- aic_dev = AIC_DEV(cmd);
- if(aic7xxx_position(cmd) < p->scb_data->numscbs)
- {
- scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]);
- if (scb->cmd != cmd)
- scb = NULL;
- }
- else
- {
- scb = NULL;
- }
-
- /*
- * I added a new config option to the driver: "panic_on_abort" that will
- * cause the driver to panic and the machine to stop on the first abort
- * or reset call into the driver. At that point, it prints out a lot of
- * useful information for me which I can then use to try and debug the
- * problem. Simply enable the boot time prompt in order to activate this
- * code.
- */
- if (aic7xxx_panic_on_abort)
- aic7xxx_panic_abort(p, cmd);
-
- pause_sequencer(p);
-
- while((aic_inb(p, INTSTAT) & INT_PEND) && !(p->flags & AHC_IN_ISR))
- {
- aic7xxx_isr(p);
- pause_sequencer(p);
- }
- aic7xxx_done_cmds_complete(p);
-
- if(scb && (scb->cmd == NULL))
- {
- /*
- * We just completed the command when we ran the isr stuff, so we no
- * longer have it.
- */
- unpause_sequencer(p, FALSE);
- spin_unlock_irq(p->host->host_lock);
- return SUCCESS;
- }
-
-/*
- * By this point, we want to already know what we are going to do and
- * only have the following code implement our course of action.
- */
- aic7xxx_reset_channel(p, cmd->device->channel, TRUE);
- if (p->features & AHC_TWIN)
- {
- aic7xxx_reset_channel(p, cmd->device->channel ^ 0x01, TRUE);
- restart_sequencer(p);
- }
- aic_outb(p, aic_inb(p, SIMODE1) & ~(ENREQINIT|ENBUSFREE), SIMODE1);
- aic7xxx_clear_intstat(p);
- p->flags &= ~AHC_HANDLING_REQINITS;
- p->msg_type = MSG_TYPE_NONE;
- p->msg_index = 0;
- p->msg_len = 0;
- aic7xxx_run_done_queue(p, TRUE);
- unpause_sequencer(p, FALSE);
- spin_unlock_irq(p->host->host_lock);
- ssleep(2);
- return SUCCESS;
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_biosparam
- *
- * Description:
- * Return the disk geometry for the given SCSI device.
- *
- * Note:
- * This function is broken for today's really large drives and needs
- * fixed.
- *-F*************************************************************************/
-static int
-aic7xxx_biosparam(struct scsi_device *sdev, struct block_device *bdev,
- sector_t capacity, int geom[])
-{
- sector_t heads, sectors, cylinders;
- int ret;
- struct aic7xxx_host *p;
- unsigned char *buf;
-
- p = (struct aic7xxx_host *) sdev->host->hostdata;
- buf = scsi_bios_ptable(bdev);
-
- if ( buf )
- {
- ret = scsi_partsize(buf, capacity, &geom[2], &geom[0], &geom[1]);
- kfree(buf);
- if ( ret != -1 )
- return(ret);
- }
-
- heads = 64;
- sectors = 32;
- cylinders = capacity >> 11;
-
- if ((p->flags & AHC_EXTEND_TRANS_A) && (cylinders > 1024))
- {
- heads = 255;
- sectors = 63;
- cylinders = capacity >> 14;
- if(capacity > (65535 * heads * sectors))
- cylinders = 65535;
- else
- cylinders = ((unsigned int)capacity) / (unsigned int)(heads * sectors);
- }
-
- geom[0] = (int)heads;
- geom[1] = (int)sectors;
- geom[2] = (int)cylinders;
-
- return (0);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_release
- *
- * Description:
- * Free the passed in Scsi_Host memory structures prior to unloading the
- * module.
- *-F*************************************************************************/
-static int
-aic7xxx_release(struct Scsi_Host *host)
-{
- struct aic7xxx_host *p = (struct aic7xxx_host *) host->hostdata;
- struct aic7xxx_host *next, *prev;
-
- if(p->irq)
- free_irq(p->irq, p);
-#ifdef MMAPIO
- if(p->maddr)
- {
- iounmap(p->maddr);
- }
-#endif /* MMAPIO */
- if(!p->pdev)
- release_region(p->base, MAXREG - MINREG);
-#ifdef CONFIG_PCI
- else {
- pci_release_regions(p->pdev);
- pci_dev_put(p->pdev);
- }
-#endif
- prev = NULL;
- next = first_aic7xxx;
- while(next != NULL)
- {
- if(next == p)
- {
- if(prev == NULL)
- first_aic7xxx = next->next;
- else
- prev->next = next->next;
- }
- else
- {
- prev = next;
- }
- next = next->next;
- }
- aic7xxx_free(p);
- return(0);
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_print_card
- *
- * Description:
- * Print out all of the control registers on the card
- *
- * NOTE: This function is not yet safe for use on the VLB and EISA
- * controllers, so it isn't used on those controllers at all.
- *-F*************************************************************************/
-static void
-aic7xxx_print_card(struct aic7xxx_host *p)
-{
- int i, j, k, chip;
- static struct register_ranges {
- int num_ranges;
- int range_val[32];
- } cards_ds[] = {
- { 0, {0,} }, /* none */
- {10, {0x00, 0x05, 0x08, 0x11, 0x18, 0x19, 0x1f, 0x1f, 0x60, 0x60, /*7771*/
- 0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9b, 0x9f} },
- { 9, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7850*/
- 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} },
- { 9, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7860*/
- 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} },
- {10, {0x00, 0x05, 0x08, 0x11, 0x18, 0x19, 0x1c, 0x1f, 0x60, 0x60, /*7870*/
- 0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} },
- {10, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1a, 0x1c, 0x1f, 0x60, 0x60, /*7880*/
- 0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} },
- {16, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7890*/
- 0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9f, 0x9f,
- 0xe0, 0xf1, 0xf4, 0xf4, 0xf6, 0xf6, 0xf8, 0xf8, 0xfa, 0xfc,
- 0xfe, 0xff} },
- {12, {0x00, 0x05, 0x08, 0x11, 0x18, 0x19, 0x1b, 0x1f, 0x60, 0x60, /*7895*/
- 0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a,
- 0x9f, 0x9f, 0xe0, 0xf1} },
- {16, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7896*/
- 0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9f, 0x9f,
- 0xe0, 0xf1, 0xf4, 0xf4, 0xf6, 0xf6, 0xf8, 0xf8, 0xfa, 0xfc,
- 0xfe, 0xff} },
- {12, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7892*/
- 0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9c, 0x9f,
- 0xe0, 0xf1, 0xf4, 0xfc} },
- {12, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7899*/
- 0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9c, 0x9f,
- 0xe0, 0xf1, 0xf4, 0xfc} },
- };
- chip = p->chip & AHC_CHIPID_MASK;
- printk("%s at ",
- board_names[p->board_name_index]);
- switch(p->chip & ~AHC_CHIPID_MASK)
- {
- case AHC_VL:
- printk("VLB Slot %d.\n", p->pci_device_fn);
- break;
- case AHC_EISA:
- printk("EISA Slot %d.\n", p->pci_device_fn);
- break;
- case AHC_PCI:
- default:
- printk("PCI %d/%d/%d.\n", p->pci_bus, PCI_SLOT(p->pci_device_fn),
- PCI_FUNC(p->pci_device_fn));
- break;
- }
-
- /*
- * the registers on the card....
- */
- printk("Card Dump:\n");
- k = 0;
- for(i=0; i<cards_ds[chip].num_ranges; i++)
- {
- for(j = cards_ds[chip].range_val[ i * 2 ];
- j <= cards_ds[chip].range_val[ i * 2 + 1 ] ;
- j++)
- {
- printk("%02x:%02x ", j, aic_inb(p, j));
- if(++k == 13)
- {
- printk("\n");
- k=0;
- }
- }
- }
- if(k != 0)
- printk("\n");
-
- /*
- * If this was an Ultra2 controller, then we just hosed the card in terms
- * of the QUEUE REGS. This function is only called at init time or by
- * the panic_abort function, so it's safe to assume a generic init time
- * setting here
- */
-
- if(p->features & AHC_QUEUE_REGS)
- {
- aic_outb(p, 0, SDSCB_QOFF);
- aic_outb(p, 0, SNSCB_QOFF);
- aic_outb(p, 0, HNSCB_QOFF);
- }
-
-}
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_print_scratch_ram
- *
- * Description:
- * Print out the scratch RAM values on the card.
- *-F*************************************************************************/
-static void
-aic7xxx_print_scratch_ram(struct aic7xxx_host *p)
-{
- int i, k;
-
- k = 0;
- printk("Scratch RAM:\n");
- for(i = SRAM_BASE; i < SEQCTL; i++)
- {
- printk("%02x:%02x ", i, aic_inb(p, i));
- if(++k == 13)
- {
- printk("\n");
- k=0;
- }
- }
- if (p->features & AHC_MORE_SRAM)
- {
- for(i = TARG_OFFSET; i < 0x80; i++)
- {
- printk("%02x:%02x ", i, aic_inb(p, i));
- if(++k == 13)
- {
- printk("\n");
- k=0;
- }
- }
- }
- printk("\n");
-}
-
-
-#include "aic7xxx_old/aic7xxx_proc.c"
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(AIC7XXX_H_VERSION);
-
-
-static struct scsi_host_template driver_template = {
- .show_info = aic7xxx_show_info,
- .detect = aic7xxx_detect,
- .release = aic7xxx_release,
- .info = aic7xxx_info,
- .queuecommand = aic7xxx_queue,
- .slave_alloc = aic7xxx_slave_alloc,
- .slave_configure = aic7xxx_slave_configure,
- .slave_destroy = aic7xxx_slave_destroy,
- .bios_param = aic7xxx_biosparam,
- .eh_abort_handler = aic7xxx_abort,
- .eh_device_reset_handler = aic7xxx_bus_device_reset,
- .eh_host_reset_handler = aic7xxx_reset,
- .can_queue = 255,
- .this_id = -1,
- .max_sectors = 2048,
- .cmd_per_lun = 3,
- .use_clustering = ENABLE_CLUSTERING,
-};
-
-#include "scsi_module.c"
-
-/*
- * Overrides for Emacs so that we almost follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 2
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -2
- * c-argdecl-indent: 2
- * c-label-offset: -2
- * c-continued-statement-offset: 2
- * c-continued-brace-offset: 0
- * indent-tabs-mode: nil
- * tab-width: 8
- * End:
- */
diff --git a/drivers/scsi/aic7xxx_old/aic7xxx.h b/drivers/scsi/aic7xxx_old/aic7xxx.h
deleted file mode 100644
index 0116c8128a6b..000000000000
--- a/drivers/scsi/aic7xxx_old/aic7xxx.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*+M*************************************************************************
- * Adaptec AIC7xxx device driver for Linux.
- *
- * Copyright (c) 1994 John Aycock
- * The University of Calgary Department of Computer Science.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * $Id: aic7xxx.h,v 3.2 1996/07/23 03:37:26 deang Exp $
- *-M*************************************************************************/
-#ifndef _aic7xxx_h
-#define _aic7xxx_h
-
-#define AIC7XXX_H_VERSION "5.2.0"
-
-#endif /* _aic7xxx_h */
diff --git a/drivers/scsi/aic7xxx_old/aic7xxx.reg b/drivers/scsi/aic7xxx_old/aic7xxx.reg
deleted file mode 100644
index f67b4bced01c..000000000000
--- a/drivers/scsi/aic7xxx_old/aic7xxx.reg
+++ /dev/null
@@ -1,1401 +0,0 @@
-/*
- * Aic7xxx register and scratch ram definitions.
- *
- * Copyright (c) 1994-1998 Justin Gibbs.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Where this Software is combined with software released under the terms of
- * the GNU General Public License ("GPL") and the terms of the GPL would require the
- * combined work to also be released under the terms of the GPL, the terms
- * and conditions of this License will apply in addition to those of the
- * GPL with the exception of any terms or conditions of this License that
- * conflict with, or are expressly prohibited by, the GPL.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $Id: aic7xxx.reg,v 1.4 1997/06/27 19:38:39 gibbs Exp $
- */
-
-/*
- * This file is processed by the aic7xxx_asm utility for use in assembling
- * firmware for the aic7xxx family of SCSI host adapters as well as to generate
- * a C header file for use in the kernel portion of the Aic7xxx driver.
- *
- * All page numbers refer to the Adaptec AIC-7770 Data Book available from
- * Adaptec's Technical Documents Department 1-800-934-2766
- */
-
-/*
- * SCSI Sequence Control (p. 3-11).
- * Each bit, when set starts a specific SCSI sequence on the bus
- */
-register SCSISEQ {
- address 0x000
- access_mode RW
- bit TEMODE 0x80
- bit ENSELO 0x40
- bit ENSELI 0x20
- bit ENRSELI 0x10
- bit ENAUTOATNO 0x08
- bit ENAUTOATNI 0x04
- bit ENAUTOATNP 0x02
- bit SCSIRSTO 0x01
-}
-
-/*
- * SCSI Transfer Control 0 Register (pp. 3-13).
- * Controls the SCSI module data path.
- */
-register SXFRCTL0 {
- address 0x001
- access_mode RW
- bit DFON 0x80
- bit DFPEXP 0x40
- bit FAST20 0x20
- bit CLRSTCNT 0x10
- bit SPIOEN 0x08
- bit SCAMEN 0x04
- bit CLRCHN 0x02
-}
-
-/*
- * SCSI Transfer Control 1 Register (pp. 3-14,15).
- * Controls the SCSI module data path.
- */
-register SXFRCTL1 {
- address 0x002
- access_mode RW
- bit BITBUCKET 0x80
- bit SWRAPEN 0x40
- bit ENSPCHK 0x20
- mask STIMESEL 0x18
- bit ENSTIMER 0x04
- bit ACTNEGEN 0x02
- bit STPWEN 0x01 /* Powered Termination */
-}
-
-/*
- * SCSI Control Signal Read Register (p. 3-15).
- * Reads the actual state of the SCSI bus pins
- */
-register SCSISIGI {
- address 0x003
- access_mode RO
- bit CDI 0x80
- bit IOI 0x40
- bit MSGI 0x20
- bit ATNI 0x10
- bit SELI 0x08
- bit BSYI 0x04
- bit REQI 0x02
- bit ACKI 0x01
-/*
- * Possible phases in SCSISIGI
- */
- mask PHASE_MASK CDI|IOI|MSGI
- mask P_DATAOUT 0x00
- mask P_DATAIN IOI
- mask P_COMMAND CDI
- mask P_MESGOUT CDI|MSGI
- mask P_STATUS CDI|IOI
- mask P_MESGIN CDI|IOI|MSGI
-}
-
-/*
- * SCSI Control Signal Write Register (p. 3-16).
- * Writing to this register modifies the control signals on the bus. Only
- * those signals that are allowed in the current mode (Initiator/Target) are
- * asserted.
- */
-register SCSISIGO {
- address 0x003
- access_mode WO
- bit CDO 0x80
- bit IOO 0x40
- bit MSGO 0x20
- bit ATNO 0x10
- bit SELO 0x08
- bit BSYO 0x04
- bit REQO 0x02
- bit ACKO 0x01
-/*
- * Possible phases to write into SCSISIG0
- */
- mask PHASE_MASK CDI|IOI|MSGI
- mask P_DATAOUT 0x00
- mask P_DATAIN IOI
- mask P_COMMAND CDI
- mask P_MESGOUT CDI|MSGI
- mask P_STATUS CDI|IOI
- mask P_MESGIN CDI|IOI|MSGI
-}
-
-/*
- * SCSI Rate Control (p. 3-17).
- * Contents of this register determine the Synchronous SCSI data transfer
- * rate and the maximum synchronous Req/Ack offset. An offset of 0 in the
- * SOFS (3:0) bits disables synchronous data transfers. Any offset value
- * greater than 0 enables synchronous transfers.
- */
-register SCSIRATE {
- address 0x004
- access_mode RW
- bit WIDEXFER 0x80 /* Wide transfer control */
- mask SXFR 0x70 /* Sync transfer rate */
- mask SXFR_ULTRA2 0x7f /* Sync transfer rate */
- mask SOFS 0x0f /* Sync offset */
-}
-
-/*
- * SCSI ID (p. 3-18).
- * Contains the ID of the board and the current target on the
- * selected channel.
- */
-register SCSIID {
- address 0x005
- access_mode RW
- mask TID 0xf0 /* Target ID mask */
- mask OID 0x0f /* Our ID mask */
- /*
- * SCSI Maximum Offset (p. 4-61 aic7890/91 Data Book)
- * The aic7890/91 allow an offset of up to 127 transfers in both wide
- * and narrow mode.
- */
- alias SCSIOFFSET
- mask SOFS_ULTRA2 0x7f /* Sync offset U2 chips */
-}
-
-/*
- * SCSI Latched Data (p. 3-19).
- * Read/Write latches used to transfer data on the SCSI bus during
- * Automatic or Manual PIO mode. SCSIDATH can be used for the
- * upper byte of a 16bit wide asynchronouse data phase transfer.
- */
-register SCSIDATL {
- address 0x006
- access_mode RW
-}
-
-register SCSIDATH {
- address 0x007
- access_mode RW
-}
-
-/*
- * SCSI Transfer Count (pp. 3-19,20)
- * These registers count down the number of bytes transferred
- * across the SCSI bus. The counter is decremented only once
- * the data has been safely transferred. SDONE in SSTAT0 is
- * set when STCNT goes to 0
- */
-register STCNT {
- address 0x008
- size 3
- access_mode RW
-}
-
-/*
- * Option Mode Register (Alternate Mode) (p. 5-198)
- * This register is used to set certain options on Ultra3 based chips.
- * The chip must be in alternate mode (bit ALT_MODE in SFUNCT must be set)
- */
-register OPTIONMODE {
- address 0x008
- access_mode RW
- bit AUTORATEEN 0x80
- bit AUTOACKEN 0x40
- bit ATNMGMNTEN 0x20
- bit BUSFREEREV 0x10
- bit EXPPHASEDIS 0x08
- bit SCSIDATL_IMGEN 0x04
- bit AUTO_MSGOUT_DE 0x02
- bit DIS_MSGIN_DUALEDGE 0x01
-}
-
-
-/*
- * Clear SCSI Interrupt 0 (p. 3-20)
- * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT0.
- */
-register CLRSINT0 {
- address 0x00b
- access_mode WO
- bit CLRSELDO 0x40
- bit CLRSELDI 0x20
- bit CLRSELINGO 0x10
- bit CLRSWRAP 0x08
- bit CLRSPIORDY 0x02
-}
-
-/*
- * SCSI Status 0 (p. 3-21)
- * Contains one set of SCSI Interrupt codes
- * These are most likely of interest to the sequencer
- */
-register SSTAT0 {
- address 0x00b
- access_mode RO
- bit TARGET 0x80 /* Board acting as target */
- bit SELDO 0x40 /* Selection Done */
- bit SELDI 0x20 /* Board has been selected */
- bit SELINGO 0x10 /* Selection In Progress */
- bit SWRAP 0x08 /* 24bit counter wrap */
- bit IOERR 0x08 /* LVD Tranceiver mode changed */
- bit SDONE 0x04 /* STCNT = 0x000000 */
- bit SPIORDY 0x02 /* SCSI PIO Ready */
- bit DMADONE 0x01 /* DMA transfer completed */
-}
-
-/*
- * Clear SCSI Interrupt 1 (p. 3-23)
- * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT1.
- */
-register CLRSINT1 {
- address 0x00c
- access_mode WO
- bit CLRSELTIMEO 0x80
- bit CLRATNO 0x40
- bit CLRSCSIRSTI 0x20
- bit CLRBUSFREE 0x08
- bit CLRSCSIPERR 0x04
- bit CLRPHASECHG 0x02
- bit CLRREQINIT 0x01
-}
-
-/*
- * SCSI Status 1 (p. 3-24)
- */
-register SSTAT1 {
- address 0x00c
- access_mode RO
- bit SELTO 0x80
- bit ATNTARG 0x40
- bit SCSIRSTI 0x20
- bit PHASEMIS 0x10
- bit BUSFREE 0x08
- bit SCSIPERR 0x04
- bit PHASECHG 0x02
- bit REQINIT 0x01
-}
-
-/*
- * SCSI Status 2 (pp. 3-25,26)
- */
-register SSTAT2 {
- address 0x00d
- access_mode RO
- bit OVERRUN 0x80
- bit SHVALID 0x40
- bit WIDE_RES 0x20
- bit EXP_ACTIVE 0x10 /* SCSI Expander Active */
- bit CRCVALERR 0x08 /* CRC Value Error */
- bit CRCENDERR 0x04 /* CRC End Error */
- bit CRCREQERR 0x02 /* CRC REQ Error */
- bit DUAL_EDGE_ERROR 0x01 /* Invalid pins for Dual Edge phase */
- mask SFCNT 0x1f
-}
-
-/*
- * SCSI Status 3 (p. 3-26)
- */
-register SSTAT3 {
- address 0x00e
- access_mode RO
- mask SCSICNT 0xf0
- mask OFFCNT 0x0f
-}
-
-/*
- * SCSI ID for the aic7890/91 chips
- */
-register SCSIID_ULTRA2 {
- address 0x00f
- access_mode RW
- mask TID 0xf0 /* Target ID mask */
- mask OID 0x0f /* Our ID mask */
-}
-
-/*
- * SCSI Interrupt Mode 1 (p. 3-28)
- * Setting any bit will enable the corresponding function
- * in SIMODE0 to interrupt via the IRQ pin.
- */
-register SIMODE0 {
- address 0x010
- access_mode RW
- bit ENSELDO 0x40
- bit ENSELDI 0x20
- bit ENSELINGO 0x10
- bit ENSWRAP 0x08
- bit ENIOERR 0x08 /* LVD Tranceiver mode changes */
- bit ENSDONE 0x04
- bit ENSPIORDY 0x02
- bit ENDMADONE 0x01
-}
-
-/*
- * SCSI Interrupt Mode 1 (pp. 3-28,29)
- * Setting any bit will enable the corresponding function
- * in SIMODE1 to interrupt via the IRQ pin.
- */
-register SIMODE1 {
- address 0x011
- access_mode RW
- bit ENSELTIMO 0x80
- bit ENATNTARG 0x40
- bit ENSCSIRST 0x20
- bit ENPHASEMIS 0x10
- bit ENBUSFREE 0x08
- bit ENSCSIPERR 0x04
- bit ENPHASECHG 0x02
- bit ENREQINIT 0x01
-}
-
-/*
- * SCSI Data Bus (High) (p. 3-29)
- * This register reads data on the SCSI Data bus directly.
- */
-register SCSIBUSL {
- address 0x012
- access_mode RO
-}
-
-register SCSIBUSH {
- address 0x013
- access_mode RO
-}
-
-/*
- * SCSI/Host Address (p. 3-30)
- * These registers hold the host address for the byte about to be
- * transferred on the SCSI bus. They are counted up in the same
- * manner as STCNT is counted down. SHADDR should always be used
- * to determine the address of the last byte transferred since HADDR
- * can be skewed by write ahead.
- */
-register SHADDR {
- address 0x014
- size 4
- access_mode RO
-}
-
-/*
- * Selection Timeout Timer (p. 3-30)
- */
-register SELTIMER {
- address 0x018
- access_mode RW
- bit STAGE6 0x20
- bit STAGE5 0x10
- bit STAGE4 0x08
- bit STAGE3 0x04
- bit STAGE2 0x02
- bit STAGE1 0x01
-}
-
-/*
- * Selection/Reselection ID (p. 3-31)
- * Upper four bits are the device id. The ONEBIT is set when the re/selecting
- * device did not set its own ID.
- */
-register SELID {
- address 0x019
- access_mode RW
- mask SELID_MASK 0xf0
- bit ONEBIT 0x08
-}
-
-/*
- * Serial Port I/O Cabability register (p. 4-95 aic7860 Data Book)
- * Indicates if external logic has been attached to the chip to
- * perform the tasks of accessing a serial eeprom, testing termination
- * strength, and performing cable detection. On the aic7860, most of
- * these features are handled on chip, but on the aic7855 an attached
- * aic3800 does the grunt work.
- */
-register SPIOCAP {
- address 0x01b
- access_mode RW
- bit SOFT1 0x80
- bit SOFT0 0x40
- bit SOFTCMDEN 0x20
- bit HAS_BRDCTL 0x10 /* External Board control */
- bit SEEPROM 0x08 /* External serial eeprom logic */
- bit EEPROM 0x04 /* Writable external BIOS ROM */
- bit ROM 0x02 /* Logic for accessing external ROM */
- bit SSPIOCPS 0x01 /* Termination and cable detection */
-}
-
-/*
- * SCSI Block Control (p. 3-32)
- * Controls Bus type and channel selection. In a twin channel configuration
- * addresses 0x00-0x1e are gated to the appropriate channel based on this
- * register. SELWIDE allows for the coexistence of 8bit and 16bit devices
- * on a wide bus.
- */
-register SBLKCTL {
- address 0x01f
- access_mode RW
- bit DIAGLEDEN 0x80 /* Aic78X0 only */
- bit DIAGLEDON 0x40 /* Aic78X0 only */
- bit AUTOFLUSHDIS 0x20
- bit SELBUSB 0x08
- bit ENAB40 0x08 /* LVD transceiver active */
- bit ENAB20 0x04 /* SE/HVD transceiver active */
- bit SELWIDE 0x02
- bit XCVR 0x01 /* External transceiver active */
-}
-
-/*
- * Sequencer Control (p. 3-33)
- * Error detection mode and speed configuration
- */
-register SEQCTL {
- address 0x060
- access_mode RW
- bit PERRORDIS 0x80
- bit PAUSEDIS 0x40
- bit FAILDIS 0x20
- bit FASTMODE 0x10
- bit BRKADRINTEN 0x08
- bit STEP 0x04
- bit SEQRESET 0x02
- bit LOADRAM 0x01
-}
-
-/*
- * Sequencer RAM Data (p. 3-34)
- * Single byte window into the Scratch Ram area starting at the address
- * specified by SEQADDR0 and SEQADDR1. To write a full word, simply write
- * four bytes in succession. The SEQADDRs will increment after the most
- * significant byte is written
- */
-register SEQRAM {
- address 0x061
- access_mode RW
-}
-
-/*
- * Sequencer Address Registers (p. 3-35)
- * Only the first bit of SEQADDR1 holds addressing information
- */
-register SEQADDR0 {
- address 0x062
- access_mode RW
-}
-
-register SEQADDR1 {
- address 0x063
- access_mode RW
- mask SEQADDR1_MASK 0x01
-}
-
-/*
- * Accumulator
- * We cheat by passing arguments in the Accumulator up to the kernel driver
- */
-register ACCUM {
- address 0x064
- access_mode RW
- accumulator
-}
-
-register SINDEX {
- address 0x065
- access_mode RW
- sindex
-}
-
-register DINDEX {
- address 0x066
- access_mode RW
-}
-
-register ALLONES {
- address 0x069
- access_mode RO
- allones
-}
-
-register ALLZEROS {
- address 0x06a
- access_mode RO
- allzeros
-}
-
-register NONE {
- address 0x06a
- access_mode WO
- none
-}
-
-register FLAGS {
- address 0x06b
- access_mode RO
- bit ZERO 0x02
- bit CARRY 0x01
-}
-
-register SINDIR {
- address 0x06c
- access_mode RO
-}
-
-register DINDIR {
- address 0x06d
- access_mode WO
-}
-
-register FUNCTION1 {
- address 0x06e
- access_mode RW
-}
-
-register STACK {
- address 0x06f
- access_mode RO
-}
-
-/*
- * Board Control (p. 3-43)
- */
-register BCTL {
- address 0x084
- access_mode RW
- bit ACE 0x08
- bit ENABLE 0x01
-}
-
-register DSCOMMAND0 {
- address 0x084
- access_mode RW
- bit CACHETHEN 0x80
- bit DPARCKEN 0x40
- bit MPARCKEN 0x20
- bit EXTREQLCK 0x10
- bit INTSCBRAMSEL 0x08
- bit RAMPS 0x04
- bit USCBSIZE32 0x02
- bit CIOPARCKEN 0x01
-}
-
-/*
- * On the aic78X0 chips, Board Control is replaced by the DSCommand
- * register (p. 4-64)
- */
-register DSCOMMAND {
- address 0x084
- access_mode RW
- bit CACHETHEN 0x80 /* Cache Threshold enable */
- bit DPARCKEN 0x40 /* Data Parity Check Enable */
- bit MPARCKEN 0x20 /* Memory Parity Check Enable */
- bit EXTREQLCK 0x10 /* External Request Lock */
-}
-
-/*
- * Bus On/Off Time (p. 3-44)
- */
-register BUSTIME {
- address 0x085
- access_mode RW
- mask BOFF 0xf0
- mask BON 0x0f
-}
-
-/*
- * Bus Speed (p. 3-45)
- */
-register BUSSPD {
- address 0x086
- access_mode RW
- mask DFTHRSH 0xc0
- mask STBOFF 0x38
- mask STBON 0x07
- mask DFTHRSH_100 0xc0
-}
-
-/*
- * Host Control (p. 3-47) R/W
- * Overall host control of the device.
- */
-register HCNTRL {
- address 0x087
- access_mode RW
- bit POWRDN 0x40
- bit SWINT 0x10
- bit IRQMS 0x08
- bit PAUSE 0x04
- bit INTEN 0x02
- bit CHIPRST 0x01
- bit CHIPRSTACK 0x01
-}
-
-/*
- * Host Address (p. 3-48)
- * This register contains the address of the byte about
- * to be transferred across the host bus.
- */
-register HADDR {
- address 0x088
- size 4
- access_mode RW
-}
-
-register HCNT {
- address 0x08c
- size 3
- access_mode RW
-}
-
-/*
- * SCB Pointer (p. 3-49)
- * Gate one of the four SCBs into the SCBARRAY window.
- */
-register SCBPTR {
- address 0x090
- access_mode RW
-}
-
-/*
- * Interrupt Status (p. 3-50)
- * Status for system interrupts
- */
-register INTSTAT {
- address 0x091
- access_mode RW
- bit BRKADRINT 0x08
- bit SCSIINT 0x04
- bit CMDCMPLT 0x02
- bit SEQINT 0x01
- mask BAD_PHASE SEQINT /* unknown scsi bus phase */
- mask SEND_REJECT 0x10|SEQINT /* sending a message reject */
- mask NO_IDENT 0x20|SEQINT /* no IDENTIFY after reconnect*/
- mask NO_MATCH 0x30|SEQINT /* no cmd match for reconnect */
- mask EXTENDED_MSG 0x40|SEQINT /* Extended message received */
- mask WIDE_RESIDUE 0x50|SEQINT /* need kernel to back up */
- /* the SG array for us */
- mask REJECT_MSG 0x60|SEQINT /* Reject message received */
- mask BAD_STATUS 0x70|SEQINT /* Bad status from target */
- mask RESIDUAL 0x80|SEQINT /* Residual byte count != 0 */
- mask AWAITING_MSG 0xa0|SEQINT /*
- * Kernel requested to specify
- * a message to this target
- * (command was null), so tell
- * it that it can fill the
- * message buffer.
- */
- mask SEQ_SG_FIXUP 0xb0|SEQINT /* need help with fixing up
- * the sg array pointer after
- * a phasemis with no valid
- * sg elements in the shadow
- * pipeline.
- */
- mask TRACEPOINT2 0xc0|SEQINT
- mask MSGIN_PHASEMIS 0xd0|SEQINT /*
- * Target changed phase on us
- * when we were expecting
- * another msgin byte.
- */
- mask DATA_OVERRUN 0xe0|SEQINT /*
- * Target attempted to write
- * beyond the bounds of its
- * command.
- */
-
- mask SEQINT_MASK 0xf0|SEQINT /* SEQINT Status Codes */
- mask INT_PEND (BRKADRINT|SEQINT|SCSIINT|CMDCMPLT)
-}
-
-/*
- * Hard Error (p. 3-53)
- * Reporting of catastrophic errors. You usually cannot recover from
- * these without a full board reset.
- */
-register ERROR {
- address 0x092
- access_mode RO
- bit CIOPARERR 0x80 /* Ultra2 only */
- bit PCIERRSTAT 0x40 /* PCI only */
- bit MPARERR 0x20 /* PCI only */
- bit DPARERR 0x10 /* PCI only */
- bit SQPARERR 0x08
- bit ILLOPCODE 0x04
- bit ILLSADDR 0x02
- bit DSCTMOUT 0x02 /* Ultra3 only */
- bit ILLHADDR 0x01
-}
-
-/*
- * Clear Interrupt Status (p. 3-52)
- */
-register CLRINT {
- address 0x092
- access_mode WO
- bit CLRPARERR 0x10 /* PCI only */
- bit CLRBRKADRINT 0x08
- bit CLRSCSIINT 0x04
- bit CLRCMDINT 0x02
- bit CLRSEQINT 0x01
-}
-
-register DFCNTRL {
- address 0x093
- access_mode RW
- bit PRELOADEN 0x80 /* aic7890 only */
- bit WIDEODD 0x40
- bit SCSIEN 0x20
- bit SDMAEN 0x10
- bit SDMAENACK 0x10
- bit HDMAEN 0x08
- bit HDMAENACK 0x08
- bit DIRECTION 0x04
- bit FIFOFLUSH 0x02
- bit FIFORESET 0x01
-}
-
-register DFSTATUS {
- address 0x094
- access_mode RO
- bit PRELOAD_AVAIL 0x80
- bit DWORDEMP 0x20
- bit MREQPEND 0x10
- bit HDONE 0x08
- bit DFTHRESH 0x04
- bit FIFOFULL 0x02
- bit FIFOEMP 0x01
-}
-
-register DFDAT {
- address 0x099
- access_mode RW
-}
-
-/*
- * SCB Auto Increment (p. 3-59)
- * Byte offset into the SCB Array and an optional bit to allow auto
- * incrementing of the address during download and upload operations
- */
-register SCBCNT {
- address 0x09a
- access_mode RW
- bit SCBAUTO 0x80
- mask SCBCNT_MASK 0x1f
-}
-
-/*
- * Queue In FIFO (p. 3-60)
- * Input queue for queued SCBs (commands that the seqencer has yet to start)
- */
-register QINFIFO {
- address 0x09b
- access_mode RW
-}
-
-/*
- * Queue In Count (p. 3-60)
- * Number of queued SCBs
- */
-register QINCNT {
- address 0x09c
- access_mode RO
-}
-
-/*
- * SCSIDATL IMAGE Register (p. 5-104)
- * Write to this register also go to SCSIDATL but this register will preserve
- * the data for later reading as long as the SCSIDATL_IMGEN bit in the
- * OPTIONMODE register is set.
- */
-register SCSIDATL_IMG {
- address 0x09c
- access_mode RW
-}
-
-/*
- * Queue Out FIFO (p. 3-61)
- * Queue of SCBs that have completed and await the host
- */
-register QOUTFIFO {
- address 0x09d
- access_mode WO
-}
-
-/*
- * CRC Control 1 Register (p. 5-105)
- * Control bits for the Ultra 160/m CRC facilities
- */
-register CRCCONTROL1 {
- address 0x09d
- access_mode RW
- bit CRCONSEEN 0x80 /* CRC ON Single Edge ENable */
- bit CRCVALCHKEN 0x40 /* CRC Value Check Enable */
- bit CRCENDCHKEN 0x20 /* CRC End Check Enable */
- bit CRCREQCHKEN 0x10
- bit TARGCRCENDEN 0x08 /* Enable End CRC transfer when target */
- bit TARGCRCCNTEN 0x04 /* Enable CRC transfer when target */
-}
-
-/*
- * Queue Out Count (p. 3-61)
- * Number of queued SCBs in the Out FIFO
- */
-register QOUTCNT {
- address 0x09e
- access_mode RO
-}
-
-/*
- * SCSI Phase Register (p. 5-106)
- * Current bus phase
- */
-register SCSIPHASE {
- address 0x09e
- access_mode RO
- bit SP_STATUS 0x20
- bit SP_COMMAND 0x10
- bit SP_MSG_IN 0x08
- bit SP_MSG_OUT 0x04
- bit SP_DATA_IN 0x02
- bit SP_DATA_OUT 0x01
-}
-
-/*
- * Special Function
- */
-register SFUNCT {
- address 0x09f
- access_mode RW
- bit ALT_MODE 0x80
-}
-
-/*
- * SCB Definition (p. 5-4)
- */
-scb {
- address 0x0a0
- SCB_CONTROL {
- size 1
- bit MK_MESSAGE 0x80
- bit DISCENB 0x40
- bit TAG_ENB 0x20
- bit DISCONNECTED 0x04
- mask SCB_TAG_TYPE 0x03
- }
- SCB_TCL {
- size 1
- bit SELBUSB 0x08
- mask TID 0xf0
- mask LID 0x07
- }
- SCB_TARGET_STATUS {
- size 1
- }
- SCB_SGCOUNT {
- size 1
- }
- SCB_SGPTR {
- size 4
- }
- SCB_RESID_SGCNT {
- size 1
- }
- SCB_RESID_DCNT {
- size 3
- }
- SCB_DATAPTR {
- size 4
- }
- SCB_DATACNT {
- /*
- * Really only 3 bytes, but padded to make
- * the kernel's job easier.
- */
- size 4
- }
- SCB_CMDPTR {
- size 4
- }
- SCB_CMDLEN {
- size 1
- }
- SCB_TAG {
- size 1
- }
- SCB_NEXT {
- size 1
- }
- SCB_PREV {
- size 1
- }
- SCB_BUSYTARGETS {
- size 4
- }
-}
-
-const SG_SIZEOF 0x08 /* sizeof(struct ahc_dma) */
-
-/* --------------------- AHA-2840-only definitions -------------------- */
-
-register SEECTL_2840 {
- address 0x0c0
- access_mode RW
- bit CS_2840 0x04
- bit CK_2840 0x02
- bit DO_2840 0x01
-}
-
-register STATUS_2840 {
- address 0x0c1
- access_mode RW
- bit EEPROM_TF 0x80
- mask BIOS_SEL 0x60
- mask ADSEL 0x1e
- bit DI_2840 0x01
-}
-
-/* --------------------- AIC-7870-only definitions -------------------- */
-
-register DSPCISTATUS {
- address 0x086
- mask DFTHRSH_100 0xc0
-}
-
-register CCHADDR {
- address 0x0E0
- size 8
-}
-
-register CCHCNT {
- address 0x0E8
-}
-
-register CCSGRAM {
- address 0x0E9
-}
-
-register CCSGADDR {
- address 0x0EA
-}
-
-register CCSGCTL {
- address 0x0EB
- bit CCSGDONE 0x80
- bit CCSGEN 0x08
- bit FLAG 0x02
- bit CCSGRESET 0x01
-}
-
-register CCSCBCNT {
- address 0xEF
-}
-
-register CCSCBCTL {
- address 0x0EE
- bit CCSCBDONE 0x80
- bit ARRDONE 0x40 /* SCB Array prefetch done */
- bit CCARREN 0x10
- bit CCSCBEN 0x08
- bit CCSCBDIR 0x04
- bit CCSCBRESET 0x01
-}
-
-register CCSCBADDR {
- address 0x0ED
-}
-
-register CCSCBRAM {
- address 0xEC
-}
-
-register CCSCBPTR {
- address 0x0F1
-}
-
-register HNSCB_QOFF {
- address 0x0F4
-}
-
-register HESCB_QOFF {
- address 0x0F5
-}
-
-register SNSCB_QOFF {
- address 0x0F6
-}
-
-register SESCB_QOFF {
- address 0x0F7
-}
-
-register SDSCB_QOFF {
- address 0x0F8
-}
-
-register QOFF_CTLSTA {
- address 0x0FA
- bit ESTABLISH_SCB_AVAIL 0x80
- bit SCB_AVAIL 0x40
- bit SNSCB_ROLLOVER 0x20
- bit SDSCB_ROLLOVER 0x10
- bit SESCB_ROLLOVER 0x08
- mask SCB_QSIZE 0x07
- mask SCB_QSIZE_256 0x06
-}
-
-register DFF_THRSH {
- address 0x0FB
- mask WR_DFTHRSH 0x70
- mask RD_DFTHRSH 0x07
- mask RD_DFTHRSH_MIN 0x00
- mask RD_DFTHRSH_25 0x01
- mask RD_DFTHRSH_50 0x02
- mask RD_DFTHRSH_63 0x03
- mask RD_DFTHRSH_75 0x04
- mask RD_DFTHRSH_85 0x05
- mask RD_DFTHRSH_90 0x06
- mask RD_DFTHRSH_MAX 0x07
- mask WR_DFTHRSH_MIN 0x00
- mask WR_DFTHRSH_25 0x10
- mask WR_DFTHRSH_50 0x20
- mask WR_DFTHRSH_63 0x30
- mask WR_DFTHRSH_75 0x40
- mask WR_DFTHRSH_85 0x50
- mask WR_DFTHRSH_90 0x60
- mask WR_DFTHRSH_MAX 0x70
-}
-
-register SG_CACHEPTR {
- access_mode RW
- address 0x0fc
- mask SG_USER_DATA 0xfc
- bit LAST_SEG 0x02
- bit LAST_SEG_DONE 0x01
-}
-
-register BRDCTL {
- address 0x01d
- bit BRDDAT7 0x80
- bit BRDDAT6 0x40
- bit BRDDAT5 0x20
- bit BRDSTB 0x10
- bit BRDCS 0x08
- bit BRDRW 0x04
- bit BRDCTL1 0x02
- bit BRDCTL0 0x01
- /* 7890 Definitions */
- bit BRDDAT4 0x10
- bit BRDDAT3 0x08
- bit BRDDAT2 0x04
- bit BRDRW_ULTRA2 0x02
- bit BRDSTB_ULTRA2 0x01
-}
-
-/*
- * Serial EEPROM Control (p. 4-92 in 7870 Databook)
- * Controls the reading and writing of an external serial 1-bit
- * EEPROM Device. In order to access the serial EEPROM, you must
- * first set the SEEMS bit that generates a request to the memory
- * port for access to the serial EEPROM device. When the memory
- * port is not busy servicing another request, it reconfigures
- * to allow access to the serial EEPROM. When this happens, SEERDY
- * gets set high to verify that the memory port access has been
- * granted.
- *
- * After successful arbitration for the memory port, the SEECS bit of
- * the SEECTL register is connected to the chip select. The SEECK,
- * SEEDO, and SEEDI are connected to the clock, data out, and data in
- * lines respectively. The SEERDY bit of SEECTL is useful in that it
- * gives us an 800 nsec timer. After a write to the SEECTL register,
- * the SEERDY goes high 800 nsec later. The one exception to this is
- * when we first request access to the memory port. The SEERDY goes
- * high to signify that access has been granted and, for this case, has
- * no implied timing.
- *
- * See 93cx6.c for detailed information on the protocol necessary to
- * read the serial EEPROM.
- */
-register SEECTL {
- address 0x01e
- bit EXTARBACK 0x80
- bit EXTARBREQ 0x40
- bit SEEMS 0x20
- bit SEERDY 0x10
- bit SEECS 0x08
- bit SEECK 0x04
- bit SEEDO 0x02
- bit SEEDI 0x01
-}
-/* ---------------------- Scratch RAM Offsets ------------------------- */
-/* These offsets are either to values that are initialized by the board's
- * BIOS or are specified by the sequencer code.
- *
- * The host adapter card (at least the BIOS) uses 20-2f for SCSI
- * device information, 32-33 and 5a-5f as well. As it turns out, the
- * BIOS trashes 20-2f, writing the synchronous negotiation results
- * on top of the BIOS values, so we re-use those for our per-target
- * scratchspace (actually a value that can be copied directly into
- * SCSIRATE). The kernel driver will enable synchronous negotiation
- * for all targets that have a value other than 0 in the lower four
- * bits of the target scratch space. This should work regardless of
- * whether the bios has been installed.
- */
-
-scratch_ram {
- address 0x020
-
- /*
- * 1 byte per target starting at this address for configuration values
- */
- TARG_SCSIRATE {
- size 16
- }
- /*
- * Bit vector of targets that have ULTRA enabled.
- */
- ULTRA_ENB {
- size 2
- }
- /*
- * Bit vector of targets that have disconnection disabled.
- */
- DISC_DSB {
- size 2
- }
- /*
- * Single byte buffer used to designate the type or message
- * to send to a target.
- */
- MSG_OUT {
- size 1
- }
- /* Parameters for DMA Logic */
- DMAPARAMS {
- size 1
- bit PRELOADEN 0x80
- bit WIDEODD 0x40
- bit SCSIEN 0x20
- bit SDMAEN 0x10
- bit SDMAENACK 0x10
- bit HDMAEN 0x08
- bit HDMAENACK 0x08
- bit DIRECTION 0x04
- bit FIFOFLUSH 0x02
- bit FIFORESET 0x01
- }
- SEQ_FLAGS {
- size 1
- bit IDENTIFY_SEEN 0x80
- bit SCBPTR_VALID 0x20
- bit DPHASE 0x10
- bit AMTARGET 0x08
- bit WIDE_BUS 0x02
- bit TWIN_BUS 0x01
- }
- /*
- * Temporary storage for the
- * target/channel/lun of a
- * reconnecting target
- */
- SAVED_TCL {
- size 1
- }
- /* Working value of the number of SG segments left */
- SG_COUNT {
- size 1
- }
- /* Working value of SG pointer */
- SG_NEXT {
- size 4
- }
- /*
- * The last bus phase as seen by the sequencer.
- */
- LASTPHASE {
- size 1
- bit CDI 0x80
- bit IOI 0x40
- bit MSGI 0x20
- mask PHASE_MASK CDI|IOI|MSGI
- mask P_DATAOUT 0x00
- mask P_DATAIN IOI
- mask P_COMMAND CDI
- mask P_MESGOUT CDI|MSGI
- mask P_STATUS CDI|IOI
- mask P_MESGIN CDI|IOI|MSGI
- mask P_BUSFREE 0x01
- }
- /*
- * head of list of SCBs awaiting
- * selection
- */
- WAITING_SCBH {
- size 1
- }
- /*
- * head of list of SCBs that are
- * disconnected. Used for SCB
- * paging.
- */
- DISCONNECTED_SCBH {
- size 1
- }
- /*
- * head of list of SCBs that are
- * not in use. Used for SCB paging.
- */
- FREE_SCBH {
- size 1
- }
- /*
- * Address of the hardware scb array in the host.
- */
- HSCB_ADDR {
- size 4
- }
- /*
- * Address of the 256 byte array storing the SCBID of outstanding
- * untagged SCBs indexed by TCL.
- */
- SCBID_ADDR {
- size 4
- }
- /*
- * Address of the array of command descriptors used to store
- * information about incoming selections.
- */
- TMODE_CMDADDR {
- size 4
- }
- KERNEL_QINPOS {
- size 1
- }
- QINPOS {
- size 1
- }
- QOUTPOS {
- size 1
- }
- /*
- * Offset into the command descriptor array for the next
- * available desciptor to use.
- */
- TMODE_CMDADDR_NEXT {
- size 1
- }
- ARG_1 {
- size 1
- mask SEND_MSG 0x80
- mask SEND_SENSE 0x40
- mask SEND_REJ 0x20
- mask MSGOUT_PHASEMIS 0x10
- alias RETURN_1
- }
- ARG_2 {
- size 1
- alias RETURN_2
- }
-
- /*
- * Snapshot of MSG_OUT taken after each message is sent.
- */
- LAST_MSG {
- size 1
- }
-
- /*
- * Number of times we have filled the CCSGRAM with prefetched
- * SG elements.
- */
- PREFETCH_CNT {
- size 1
- }
-
-
- /*
- * These are reserved registers in the card's scratch ram. Some of
- * the values are specified in the AHA2742 technical reference manual
- * and are initialized by the BIOS at boot time.
- */
- SCSICONF {
- address 0x05a
- size 1
- bit TERM_ENB 0x80
- bit RESET_SCSI 0x40
- mask HSCSIID 0x07 /* our SCSI ID */
- mask HWSCSIID 0x0f /* our SCSI ID if Wide Bus */
- }
- HOSTCONF {
- address 0x05d
- size 1
- }
- HA_274_BIOSCTRL {
- address 0x05f
- size 1
- mask BIOSMODE 0x30
- mask BIOSDISABLED 0x30
- bit CHANNEL_B_PRIMARY 0x08
- }
- /*
- * Per target SCSI offset values for Ultra2 controllers.
- */
- TARG_OFFSET {
- address 0x070
- size 16
- }
-}
-
-const SCB_LIST_NULL 0xff
-
-const CCSGADDR_MAX 0x80
-const CCSGRAM_MAXSEGS 16
-
-/* Offsets into the SCBID array where different data is stored */
-const UNTAGGEDSCB_OFFSET 0
-const QOUTFIFO_OFFSET 1
-const QINFIFO_OFFSET 2
-
-/* WDTR Message values */
-const BUS_8_BIT 0x00
-const BUS_16_BIT 0x01
-const BUS_32_BIT 0x02
-
-/* Offset maximums */
-const MAX_OFFSET_8BIT 0x0f
-const MAX_OFFSET_16BIT 0x08
-const MAX_OFFSET_ULTRA2 0x7f
-const HOST_MSG 0xff
-
-/* Target mode command processing constants */
-const CMD_GROUP_CODE_SHIFT 0x05
-const CMD_GROUP0_BYTE_DELTA -4
-const CMD_GROUP2_BYTE_DELTA -6
-const CMD_GROUP4_BYTE_DELTA 4
-const CMD_GROUP5_BYTE_DELTA 11
-
-/*
- * Downloaded (kernel inserted) constants
- */
-
-/*
- * Number of command descriptors in the command descriptor array.
- */
-const TMODE_NUMCMDS download
diff --git a/drivers/scsi/aic7xxx_old/aic7xxx.seq b/drivers/scsi/aic7xxx_old/aic7xxx.seq
deleted file mode 100644
index dc3bb81cff0c..000000000000
--- a/drivers/scsi/aic7xxx_old/aic7xxx.seq
+++ /dev/null
@@ -1,1539 +0,0 @@
-/*
- * Adaptec 274x/284x/294x device driver firmware for Linux and FreeBSD.
- *
- * Copyright (c) 1994-1999 Justin Gibbs.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Where this Software is combined with software released under the terms of
- * the GNU General Public License (GPL) and the terms of the GPL would require the
- * combined work to also be released under the terms of the GPL, the terms
- * and conditions of this License will apply in addition to those of the
- * GPL with the exception of any terms or conditions of this License that
- * conflict with, or are expressly prohibited by, the GPL.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $Id: aic7xxx.seq,v 1.77 1998/06/28 02:58:57 gibbs Exp $
- */
-
-#include "aic7xxx.reg"
-#include "scsi_message.h"
-
-/*
- * A few words on the waiting SCB list:
- * After starting the selection hardware, we check for reconnecting targets
- * as well as for our selection to complete just in case the reselection wins
- * bus arbitration. The problem with this is that we must keep track of the
- * SCB that we've already pulled from the QINFIFO and started the selection
- * on just in case the reselection wins so that we can retry the selection at
- * a later time. This problem cannot be resolved by holding a single entry
- * in scratch ram since a reconnecting target can request sense and this will
- * create yet another SCB waiting for selection. The solution used here is to
- * use byte 27 of the SCB as a pseudo-next pointer and to thread a list
- * of SCBs that are awaiting selection. Since 0-0xfe are valid SCB indexes,
- * SCB_LIST_NULL is 0xff which is out of range. An entry is also added to
- * this list every time a request sense occurs or after completing a non-tagged
- * command for which a second SCB has been queued. The sequencer will
- * automatically consume the entries.
- */
-
-reset:
- clr SCSISIGO; /* De-assert BSY */
- and SXFRCTL1, ~BITBUCKET;
- /* Always allow reselection */
- mvi SCSISEQ, ENRSELI|ENAUTOATNP;
-
- if ((p->features & AHC_CMD_CHAN) != 0) {
- /* Ensure that no DMA operations are in progress */
- clr CCSGCTL;
- clr CCSCBCTL;
- }
-
- call clear_target_state;
-poll_for_work:
- and SXFRCTL0, ~SPIOEN;
- if ((p->features & AHC_QUEUE_REGS) == 0) {
- mov A, QINPOS;
- }
-poll_for_work_loop:
- if ((p->features & AHC_QUEUE_REGS) == 0) {
- and SEQCTL, ~PAUSEDIS;
- }
- test SSTAT0, SELDO|SELDI jnz selection;
- test SCSISEQ, ENSELO jnz poll_for_work;
- if ((p->features & AHC_TWIN) != 0) {
- /*
- * Twin channel devices cannot handle things like SELTO
- * interrupts on the "background" channel. So, if we
- * are selecting, keep polling the current channel util
- * either a selection or reselection occurs.
- */
- xor SBLKCTL,SELBUSB; /* Toggle to the other bus */
- test SSTAT0, SELDO|SELDI jnz selection;
- test SCSISEQ, ENSELO jnz poll_for_work;
- xor SBLKCTL,SELBUSB; /* Toggle back */
- }
- cmp WAITING_SCBH,SCB_LIST_NULL jne start_waiting;
-test_queue:
- /* Has the driver posted any work for us? */
- if ((p->features & AHC_QUEUE_REGS) != 0) {
- test QOFF_CTLSTA, SCB_AVAIL jz poll_for_work_loop;
- mov NONE, SNSCB_QOFF;
- inc QINPOS;
- } else {
- or SEQCTL, PAUSEDIS;
- cmp KERNEL_QINPOS, A je poll_for_work_loop;
- inc QINPOS;
- and SEQCTL, ~PAUSEDIS;
- }
-
-/*
- * We have at least one queued SCB now and we don't have any
- * SCBs in the list of SCBs awaiting selection. If we have
- * any SCBs available for use, pull the tag from the QINFIFO
- * and get to work on it.
- */
- if ((p->flags & AHC_PAGESCBS) != 0) {
- mov ALLZEROS call get_free_or_disc_scb;
- }
-
-dequeue_scb:
- add A, -1, QINPOS;
- mvi QINFIFO_OFFSET call fetch_byte;
-
- if ((p->flags & AHC_PAGESCBS) == 0) {
- /* In the non-paging case, the SCBID == hardware SCB index */
- mov SCBPTR, RETURN_2;
- }
-dma_queued_scb:
-/*
- * DMA the SCB from host ram into the current SCB location.
- */
- mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET;
- mov RETURN_2 call dma_scb;
-
-/*
- * Preset the residual fields in case we never go through a data phase.
- * This isn't done by the host so we can avoid a DMA to clear these
- * fields for the normal case of I/O that completes without underrun
- * or overrun conditions.
- */
- if ((p->features & AHC_CMD_CHAN) != 0) {
- bmov SCB_RESID_DCNT, SCB_DATACNT, 3;
- } else {
- mov SCB_RESID_DCNT[0],SCB_DATACNT[0];
- mov SCB_RESID_DCNT[1],SCB_DATACNT[1];
- mov SCB_RESID_DCNT[2],SCB_DATACNT[2];
- }
- mov SCB_RESID_SGCNT, SCB_SGCOUNT;
-
-start_scb:
- /*
- * Place us on the waiting list in case our selection
- * doesn't win during bus arbitration.
- */
- mov SCB_NEXT,WAITING_SCBH;
- mov WAITING_SCBH, SCBPTR;
-start_waiting:
- /*
- * Pull the first entry off of the waiting SCB list.
- */
- mov SCBPTR, WAITING_SCBH;
- call start_selection;
- jmp poll_for_work;
-
-start_selection:
- if ((p->features & AHC_TWIN) != 0) {
- and SINDEX,~SELBUSB,SBLKCTL;/* Clear channel select bit */
- and A,SELBUSB,SCB_TCL; /* Get new channel bit */
- or SINDEX,A;
- mov SBLKCTL,SINDEX; /* select channel */
- }
-initialize_scsiid:
- if ((p->features & AHC_ULTRA2) != 0) {
- and A, TID, SCB_TCL; /* Get target ID */
- and SCSIID_ULTRA2, OID; /* Clear old target */
- or SCSIID_ULTRA2, A;
- } else {
- and A, TID, SCB_TCL; /* Get target ID */
- and SCSIID, OID; /* Clear old target */
- or SCSIID, A;
- }
- mov SCSIDATL, ALLZEROS; /* clear out the latched */
- /* data register, this */
- /* fixes a bug on some */
- /* controllers where the */
- /* last byte written to */
- /* this register can leak */
- /* onto the data bus at */
- /* bad times, such as during */
- /* selection timeouts */
- mvi SCSISEQ, ENSELO|ENAUTOATNO|ENRSELI|ENAUTOATNP ret;
-
-/*
- * Initialize Ultra mode setting and clear the SCSI channel.
- * SINDEX should contain any additional bit's the client wants
- * set in SXFRCTL0.
- */
-initialize_channel:
- or SXFRCTL0, CLRSTCNT|CLRCHN, SINDEX;
- if ((p->features & AHC_ULTRA) != 0) {
-ultra:
- mvi SINDEX, ULTRA_ENB+1;
- test SAVED_TCL, 0x80 jnz ultra_2; /* Target ID > 7 */
- dec SINDEX;
-ultra_2:
- mov FUNCTION1,SAVED_TCL;
- mov A,FUNCTION1;
- test SINDIR, A jz ndx_dtr;
- or SXFRCTL0, FAST20;
- }
-/*
- * Initialize SCSIRATE with the appropriate value for this target.
- * The SCSIRATE settings for each target are stored in an array
- * based at TARG_SCSIRATE.
- */
-ndx_dtr:
- shr A,4,SAVED_TCL;
- if ((p->features & AHC_TWIN) != 0) {
- test SBLKCTL,SELBUSB jz ndx_dtr_2;
- or SAVED_TCL, SELBUSB;
- or A,0x08; /* Channel B entries add 8 */
-ndx_dtr_2:
- }
-
- if ((p->features & AHC_ULTRA2) != 0) {
- add SINDEX, TARG_OFFSET, A;
- mov SCSIOFFSET, SINDIR;
- }
-
- add SINDEX,TARG_SCSIRATE,A;
- mov SCSIRATE,SINDIR ret;
-
-
-selection:
- test SSTAT0,SELDO jnz select_out;
-/*
- * Reselection has been initiated by a target. Make a note that we've been
- * reselected, but haven't seen an IDENTIFY message from the target yet.
- */
-initiator_reselect:
- mvi CLRSINT0, CLRSELDI;
- /* XXX test for and handle ONE BIT condition */
- and SAVED_TCL, SELID_MASK, SELID;
- mvi CLRSINT1,CLRBUSFREE;
- or SIMODE1, ENBUSFREE; /*
- * We aren't expecting a
- * bus free, so interrupt
- * the kernel driver if it
- * happens.
- */
- mvi SPIOEN call initialize_channel;
- mvi MSG_OUT, MSG_NOOP; /* No message to send */
- jmp ITloop;
-
-/*
- * After the selection, remove this SCB from the "waiting SCB"
- * list. This is achieved by simply moving our "next" pointer into
- * WAITING_SCBH. Our next pointer will be set to null the next time this
- * SCB is used, so don't bother with it now.
- */
-select_out:
- /* Turn off the selection hardware */
- mvi SCSISEQ, ENRSELI|ENAUTOATNP; /*
- * ATN on parity errors
- * for "in" phases
- */
- mvi CLRSINT0, CLRSELDO;
- mov SCBPTR, WAITING_SCBH;
- mov WAITING_SCBH,SCB_NEXT;
- mov SAVED_TCL, SCB_TCL;
- mvi CLRSINT1,CLRBUSFREE;
- or SIMODE1, ENBUSFREE; /*
- * We aren't expecting a
- * bus free, so interrupt
- * the kernel driver if it
- * happens.
- */
- mvi SPIOEN call initialize_channel;
-/*
- * As soon as we get a successful selection, the target should go
- * into the message out phase since we have ATN asserted.
- */
- mvi MSG_OUT, MSG_IDENTIFYFLAG;
- or SEQ_FLAGS, IDENTIFY_SEEN;
-
-/*
- * Main loop for information transfer phases. Wait for the target
- * to assert REQ before checking MSG, C/D and I/O for the bus phase.
- */
-ITloop:
- call phase_lock;
-
- mov A, LASTPHASE;
-
- test A, ~P_DATAIN jz p_data;
- cmp A,P_COMMAND je p_command;
- cmp A,P_MESGOUT je p_mesgout;
- cmp A,P_STATUS je p_status;
- cmp A,P_MESGIN je p_mesgin;
-
- mvi INTSTAT,BAD_PHASE; /* unknown phase - signal driver */
- jmp ITloop; /* Try reading the bus again. */
-
-await_busfree:
- and SIMODE1, ~ENBUSFREE;
- call clear_target_state;
- mov NONE, SCSIDATL; /* Ack the last byte */
- and SXFRCTL0, ~SPIOEN;
- test SSTAT1,REQINIT|BUSFREE jz .;
- test SSTAT1, BUSFREE jnz poll_for_work;
- mvi INTSTAT, BAD_PHASE;
-
-clear_target_state:
- /*
- * We assume that the kernel driver may reset us
- * at any time, even in the middle of a DMA, so
- * clear DFCNTRL too.
- */
- clr DFCNTRL;
-
- /*
- * We don't know the target we will connect to,
- * so default to narrow transfers to avoid
- * parity problems.
- */
- if ((p->features & AHC_ULTRA2) != 0) {
- bmov SCSIRATE, ALLZEROS, 2;
- } else {
- clr SCSIRATE;
- and SXFRCTL0, ~(FAST20);
- }
- mvi LASTPHASE, P_BUSFREE;
- /* clear target specific flags */
- clr SEQ_FLAGS ret;
-
-
-data_phase_reinit:
-/*
- * If we re-enter the data phase after going through another phase, the
- * STCNT may have been cleared, so restore it from the residual field.
- * On Ultra2, we have to put it into the HCNT field because we have to
- * drop the data down into the shadow layer via the preload ability.
- */
- if ((p->features & AHC_ULTRA2) != 0) {
- bmov HADDR, SHADDR, 4;
- bmov HCNT, SCB_RESID_DCNT, 3;
- }
- if ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895) {
- bmov STCNT, SCB_RESID_DCNT, 3;
- }
- if ((p->features & AHC_CMD_CHAN) == 0) {
- mvi DINDEX, STCNT;
- mvi SCB_RESID_DCNT call bcopy_3;
- }
- jmp data_phase_loop;
-p_data:
- if ((p->features & AHC_ULTRA2) != 0) {
- mvi DMAPARAMS, PRELOADEN|SCSIEN|HDMAEN;
- } else {
- mvi DMAPARAMS, WIDEODD|SCSIEN|SDMAEN|HDMAEN|FIFORESET;
- }
- test LASTPHASE, IOI jnz . + 2;
- or DMAPARAMS, DIRECTION;
- call assert; /*
- * Ensure entering a data
- * phase is okay - seen identify, etc.
- */
- if ((p->features & AHC_CMD_CHAN) != 0) {
- mvi CCSGADDR, CCSGADDR_MAX;
- }
-
- test SEQ_FLAGS, DPHASE jnz data_phase_reinit;
- or SEQ_FLAGS, DPHASE; /* we've seen a data phase */
- /*
- * Initialize the DMA address and counter from the SCB.
- * Also set SG_COUNT and SG_NEXT in memory since we cannot
- * modify the values in the SCB itself until we see a
- * save data pointers message.
- */
- if ((p->features & AHC_CMD_CHAN) != 0) {
- bmov HADDR, SCB_DATAPTR, 7;
- bmov SG_COUNT, SCB_SGCOUNT, 5;
- if ((p->features & AHC_ULTRA2) == 0) {
- bmov STCNT, HCNT, 3;
- }
- } else {
- mvi DINDEX, HADDR;
- mvi SCB_DATAPTR call bcopy_7;
- call set_stcnt_from_hcnt;
- mvi DINDEX, SG_COUNT;
- mvi SCB_SGCOUNT call bcopy_5;
- }
-data_phase_loop:
- /* Guard against overruns */
- test SG_COUNT, 0xff jnz data_phase_inbounds;
-/*
- * Turn on 'Bit Bucket' mode, set the transfer count to
- * 16meg and let the target run until it changes phase.
- * When the transfer completes, notify the host that we
- * had an overrun.
- */
- or SXFRCTL1,BITBUCKET;
- and DMAPARAMS, ~(HDMAEN|SDMAEN);
- if ((p->features & AHC_ULTRA2) != 0) {
- bmov HCNT, ALLONES, 3;
- }
- if ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895) {
- bmov STCNT, ALLONES, 3;
- }
- if ((p->features & AHC_CMD_CHAN) == 0) {
- mvi STCNT[0], 0xFF;
- mvi STCNT[1], 0xFF;
- mvi STCNT[2], 0xFF;
- }
-
-data_phase_inbounds:
-/* If we are the last SG block, tell the hardware. */
- if ((p->features & AHC_ULTRA2) != 0) {
- shl A, 2, SG_COUNT;
- cmp SG_COUNT,0x01 jne data_phase_wideodd;
- or A, LAST_SEG;
- } else {
- cmp SG_COUNT,0x01 jne data_phase_wideodd;
- and DMAPARAMS, ~WIDEODD;
- }
-data_phase_wideodd:
- if ((p->features & AHC_ULTRA2) != 0) {
- mov SG_CACHEPTR, A;
- mov DFCNTRL, DMAPARAMS; /* start the operation */
- test SXFRCTL1, BITBUCKET jnz data_phase_overrun;
-u2_preload_wait:
- test SSTAT1, PHASEMIS jnz u2_phasemis;
- test DFSTATUS, PRELOAD_AVAIL jz u2_preload_wait;
- } else {
- mov DMAPARAMS call dma;
-data_phase_dma_done:
-/* Go tell the host about any overruns */
- test SXFRCTL1,BITBUCKET jnz data_phase_overrun;
-
-/* Exit if we had an underrun. dma clears SINDEX in this case. */
- test SINDEX,0xff jz data_phase_finish;
- }
-/*
- * Advance the scatter-gather pointers
- */
-sg_advance:
- if ((p->features & AHC_ULTRA2) != 0) {
- cmp SG_COUNT, 0x01 je u2_data_phase_finish;
- } else {
- dec SG_COUNT;
- test SG_COUNT, 0xff jz data_phase_finish;
- }
-
- if ((p->features & AHC_CMD_CHAN) != 0) {
-
- /*
- * Do we have any prefetch left???
- */
- cmp CCSGADDR, CCSGADDR_MAX jne prefetch_avail;
-
- /*
- * Fetch MIN(CCSGADDR_MAX, (SG_COUNT * 8)) bytes.
- */
- add A, -(CCSGRAM_MAXSEGS + 1), SG_COUNT;
- mvi A, CCSGADDR_MAX;
- jc . + 2;
- shl A, 3, SG_COUNT;
- mov CCHCNT, A;
- bmov CCHADDR, SG_NEXT, 4;
- mvi CCSGCTL, CCSGEN|CCSGRESET;
- test CCSGCTL, CCSGDONE jz .;
- and CCSGCTL, ~CCSGEN;
- test CCSGCTL, CCSGEN jnz .;
- mvi CCSGCTL, CCSGRESET;
-prefetch_avail:
- bmov HADDR, CCSGRAM, 8;
- if ((p->features & AHC_ULTRA2) == 0) {
- bmov STCNT, HCNT, 3;
- } else {
- dec SG_COUNT;
- }
- } else {
- mvi DINDEX, HADDR;
- mvi SG_NEXT call bcopy_4;
-
- mvi HCNT[0],SG_SIZEOF;
- clr HCNT[1];
- clr HCNT[2];
-
- or DFCNTRL, HDMAEN|DIRECTION|FIFORESET;
-
- call dma_finish;
-
-/*
- * Copy data from FIFO into SCB data pointer and data count.
- * This assumes that the SG segments are of the form:
- * struct ahc_dma_seg {
- * u_int32_t addr; four bytes, little-endian order
- * u_int32_t len; four bytes, little endian order
- * };
- */
- mvi DINDEX, HADDR;
- call dfdat_in_7;
- call set_stcnt_from_hcnt;
- }
-/* Advance the SG pointer */
- clr A; /* add sizeof(struct scatter) */
- add SG_NEXT[0],SG_SIZEOF;
- adc SG_NEXT[1],A;
-
- if ((p->features & AHC_ULTRA2) != 0) {
- jmp data_phase_loop;
- } else {
- test SSTAT1, REQINIT jz .;
- test SSTAT1,PHASEMIS jz data_phase_loop;
- }
-
-
-/*
- * We've loaded all of our segments into the preload layer. Now, we simply
- * have to wait for it to finish or for us to get a phasemis. And, since
- * we'll get a phasemis if we do finish, all we really need to do is wait
- * for a phasemis then check if we did actually complete all the segments.
- */
- if ((p->features & AHC_ULTRA2) != 0) {
-u2_data_phase_finish:
- test SSTAT1, PHASEMIS jnz u2_phasemis;
- test SG_CACHEPTR, LAST_SEG_DONE jz u2_data_phase_finish;
- clr SG_COUNT;
- test SSTAT1, REQINIT jz .;
- test SSTAT1, PHASEMIS jz data_phase_loop;
-u2_phasemis:
- call ultra2_dmafinish;
- test SG_CACHEPTR, LAST_SEG_DONE jnz data_phase_finish;
- test SSTAT2, SHVALID jnz u2_fixup_residual;
- mvi INTSTAT, SEQ_SG_FIXUP;
- jmp data_phase_finish;
-u2_fixup_residual:
- shr ARG_1, 2, SG_CACHEPTR;
-u2_phasemis_loop:
- and A, 0x3f, SG_COUNT;
- cmp ARG_1, A je data_phase_finish;
-/*
- * Subtract SG_SIZEOF from the SG_NEXT pointer and add 1 to the SG_COUNT
- */
- clr A;
- add SG_NEXT[0], -SG_SIZEOF;
- adc SG_NEXT[1], 0xff;
- inc SG_COUNT;
- jmp u2_phasemis_loop;
- }
-
-data_phase_finish:
-/*
- * After a DMA finishes, save the SG and STCNT residuals back into the SCB
- * We use STCNT instead of HCNT, since it's a reflection of how many bytes
- * were transferred on the SCSI (as opposed to the host) bus.
- */
- if ((p->features & AHC_CMD_CHAN) != 0) {
- bmov SCB_RESID_DCNT, STCNT, 3;
- mov SCB_RESID_SGCNT, SG_COUNT;
- if ((p->features & AHC_ULTRA2) != 0) {
- or SXFRCTL0, CLRSTCNT|CLRCHN;
- }
- } else {
- mov SCB_RESID_DCNT[0],STCNT[0];
- mov SCB_RESID_DCNT[1],STCNT[1];
- mov SCB_RESID_DCNT[2],STCNT[2];
- mov SCB_RESID_SGCNT, SG_COUNT;
- }
-
- jmp ITloop;
-
-data_phase_overrun:
-/*
- * Turn off BITBUCKET mode and notify the host
- */
- if ((p->features & AHC_ULTRA2) != 0) {
-/*
- * Wait for the target to quit transferring data on the SCSI bus
- */
- test SSTAT1, PHASEMIS jz .;
- call ultra2_dmafinish;
- }
- and SXFRCTL1, ~BITBUCKET;
- mvi INTSTAT,DATA_OVERRUN;
- jmp ITloop;
-
-
-
-
-/*
- * Actually turn off the DMA hardware, save our current position into the
- * proper residual variables, wait for the next REQ signal, then jump to
- * the ITloop. Jumping to the ITloop ensures that if we happen to get
- * brought into the data phase again (or are still in it after our last
- * segment) that we will properly signal an overrun to the kernel.
- */
- if ((p->features & AHC_ULTRA2) != 0) {
-ultra2_dmafinish:
- test DFCNTRL, DIRECTION jnz ultra2_dmahalt;
- and DFCNTRL, ~SCSIEN;
- test DFCNTRL, SCSIEN jnz .;
- if ((p->bugs & AHC_BUG_AUTOFLUSH) != 0) {
- or DFCNTRL, FIFOFLUSH;
- }
-ultra2_dmafifoflush:
- if ((p->bugs & AHC_BUG_AUTOFLUSH) != 0) {
- /*
- * hardware bug alert! This needless set of jumps
- * works around a glitch in the silicon. When the
- * PCI DMA fifo goes empty, but there is still SCSI
- * data to be flushed into the PCI DMA fifo (and from
- * there on into main memory), the FIFOEMP bit will
- * come on between the time when the PCI DMA buffer
- * went empty and the next bit of data is copied from
- * the SCSI fifo into the PCI fifo. It should only
- * come on when both FIFOs (meaning the entire FIFO
- * chain) are empty. Since it can take up to 4 cycles
- * for new data to be copied from the SCSI fifo into
- * the PCI fifo, testing for FIFOEMP status for 4
- * extra times gives the needed time for any
- * remaining SCSI fifo data to be put in the PCI fifo
- * before we declare it *truly* empty.
- */
- test DFSTATUS, FIFOEMP jz ultra2_dmafifoflush;
- test DFSTATUS, FIFOEMP jz ultra2_dmafifoflush;
- test DFSTATUS, FIFOEMP jz ultra2_dmafifoflush;
- test DFSTATUS, FIFOEMP jz ultra2_dmafifoflush;
- }
- test DFSTATUS, FIFOEMP jz ultra2_dmafifoflush;
- test DFSTATUS, MREQPEND jnz .;
-ultra2_dmahalt:
- and DFCNTRL, ~(HDMAEN|SCSIEN);
- test DFCNTRL, (HDMAEN|SCSIEN) jnz .;
- ret;
- }
-
-/*
- * Command phase. Set up the DMA registers and let 'er rip.
- */
-p_command:
- call assert;
-
-/*
- * Load HADDR and HCNT.
- */
- if ((p->features & AHC_CMD_CHAN) != 0) {
- bmov HADDR, SCB_CMDPTR, 5;
- bmov HCNT[1], ALLZEROS, 2;
- if ((p->features & AHC_ULTRA2) == 0) {
- bmov STCNT, HCNT, 3;
- }
- } else {
- mvi DINDEX, HADDR;
- mvi SCB_CMDPTR call bcopy_5;
- clr HCNT[1];
- clr HCNT[2];
- call set_stcnt_from_hcnt;
- }
-
- if ((p->features & AHC_ULTRA2) == 0) {
- mvi (SCSIEN|SDMAEN|HDMAEN|DIRECTION|FIFORESET) call dma;
- } else {
- mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN|DIRECTION);
- test SSTAT0, SDONE jnz .;
-p_command_dma_loop:
- test SSTAT0, SDONE jnz p_command_ultra2_dma_done;
- test SSTAT1,PHASEMIS jz p_command_dma_loop; /* ie. underrun */
-p_command_ultra2_dma_done:
- test SCSISIGI, REQI jz p_command_ultra2_shutdown;
- test SSTAT1, (PHASEMIS|REQINIT) jz p_command_ultra2_dma_done;
-p_command_ultra2_shutdown:
- and DFCNTRL, ~(HDMAEN|SCSIEN);
- test DFCNTRL, (HDMAEN|SCSIEN) jnz .;
- or SXFRCTL0, CLRSTCNT|CLRCHN;
- }
- jmp ITloop;
-
-/*
- * Status phase. Wait for the data byte to appear, then read it
- * and store it into the SCB.
- */
-p_status:
- call assert;
-
- mov SCB_TARGET_STATUS, SCSIDATL;
- jmp ITloop;
-
-/*
- * Message out phase. If MSG_OUT is 0x80, build I full indentify message
- * sequence and send it to the target. In addition, if the MK_MESSAGE bit
- * is set in the SCB_CONTROL byte, interrupt the host and allow it to send
- * it's own message.
- *
- * If MSG_OUT is == HOST_MSG, also interrupt the host and take a message.
- * This is done to allow the host to send messages outside of an identify
- * sequence while protecting the seqencer from testing the MK_MESSAGE bit
- * on an SCB that might not be for the current nexus. (For example, a
- * BDR message in response to a bad reselection would leave us pointed to
- * an SCB that doesn't have anything to do with the current target).
- * Otherwise, treat MSG_OUT as a 1 byte message to send (abort, abort tag,
- * bus device reset).
- *
- * When there are no messages to send, MSG_OUT should be set to MSG_NOOP,
- * in case the target decides to put us in this phase for some strange
- * reason.
- */
-p_mesgout_retry:
- or SCSISIGO,ATNO,LASTPHASE;/* turn on ATN for the retry */
-p_mesgout:
- mov SINDEX, MSG_OUT;
- cmp SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host;
-p_mesgout_identify:
- if ((p->features & AHC_WIDE) != 0) {
- and SINDEX,0xf,SCB_TCL; /* lun */
- } else {
- and SINDEX,0x7,SCB_TCL; /* lun */
- }
- and A,DISCENB,SCB_CONTROL; /* mask off disconnect privilege */
- or SINDEX,A; /* or in disconnect privilege */
- or SINDEX,MSG_IDENTIFYFLAG;
-p_mesgout_mk_message:
- test SCB_CONTROL,MK_MESSAGE jz p_mesgout_tag;
- mov SCSIDATL, SINDEX; /* Send the last byte */
- jmp p_mesgout_from_host + 1;/* Skip HOST_MSG test */
-/*
- * Send a tag message if TAG_ENB is set in the SCB control block.
- * Use SCB_TAG (the position in the kernel's SCB array) as the tag value.
- */
-p_mesgout_tag:
- test SCB_CONTROL,TAG_ENB jz p_mesgout_onebyte;
- mov SCSIDATL, SINDEX; /* Send the identify message */
- call phase_lock;
- cmp LASTPHASE, P_MESGOUT jne p_mesgout_done;
- and SCSIDATL,TAG_ENB|SCB_TAG_TYPE,SCB_CONTROL;
- call phase_lock;
- cmp LASTPHASE, P_MESGOUT jne p_mesgout_done;
- mov SCB_TAG jmp p_mesgout_onebyte;
-/*
- * Interrupt the driver, and allow it to send a message
- * if it asks.
- */
-p_mesgout_from_host:
- cmp SINDEX, HOST_MSG jne p_mesgout_onebyte;
- mvi INTSTAT,AWAITING_MSG;
- nop;
- /*
- * Did the host detect a phase change?
- */
- cmp RETURN_1, MSGOUT_PHASEMIS je p_mesgout_done;
-
-p_mesgout_onebyte:
- mvi CLRSINT1, CLRATNO;
- mov SCSIDATL, SINDEX;
-
-/*
- * If the next bus phase after ATN drops is a message out, it means
- * that the target is requesting that the last message(s) be resent.
- */
- call phase_lock;
- cmp LASTPHASE, P_MESGOUT je p_mesgout_retry;
-
-p_mesgout_done:
- mvi CLRSINT1,CLRATNO; /* Be sure to turn ATNO off */
- mov LAST_MSG, MSG_OUT;
- cmp MSG_OUT, MSG_IDENTIFYFLAG jne . + 2;
- and SCB_CONTROL, ~MK_MESSAGE;
- mvi MSG_OUT, MSG_NOOP; /* No message left */
- jmp ITloop;
-
-/*
- * Message in phase. Bytes are read using Automatic PIO mode.
- */
-p_mesgin:
- mvi ACCUM call inb_first; /* read the 1st message byte */
-
- test A,MSG_IDENTIFYFLAG jnz mesgin_identify;
- cmp A,MSG_DISCONNECT je mesgin_disconnect;
- cmp A,MSG_SAVEDATAPOINTER je mesgin_sdptrs;
- cmp ALLZEROS,A je mesgin_complete;
- cmp A,MSG_RESTOREPOINTERS je mesgin_rdptrs;
- cmp A,MSG_EXTENDED je mesgin_extended;
- cmp A,MSG_MESSAGE_REJECT je mesgin_reject;
- cmp A,MSG_NOOP je mesgin_done;
- cmp A,MSG_IGN_WIDE_RESIDUE je mesgin_wide_residue;
-
-rej_mesgin:
-/*
- * We have no idea what this message in is, so we issue a message reject
- * and hope for the best. In any case, rejection should be a rare
- * occurrence - signal the driver when it happens.
- */
- mvi INTSTAT,SEND_REJECT; /* let driver know */
-
- mvi MSG_MESSAGE_REJECT call mk_mesg;
-
-mesgin_done:
- mov NONE,SCSIDATL; /*dummy read from latch to ACK*/
- jmp ITloop;
-
-
-mesgin_complete:
-/*
- * We got a "command complete" message, so put the SCB_TAG into the QOUTFIFO,
- * and trigger a completion interrupt. Before doing so, check to see if there
- * is a residual or the status byte is something other than STATUS_GOOD (0).
- * In either of these conditions, we upload the SCB back to the host so it can
- * process this information. In the case of a non zero status byte, we
- * additionally interrupt the kernel driver synchronously, allowing it to
- * decide if sense should be retrieved. If the kernel driver wishes to request
- * sense, it will fill the kernel SCB with a request sense command and set
- * RETURN_1 to SEND_SENSE. If RETURN_1 is set to SEND_SENSE we redownload
- * the SCB, and process it as the next command by adding it to the waiting list.
- * If the kernel driver does not wish to request sense, it need only clear
- * RETURN_1, and the command is allowed to complete normally. We don't bother
- * to post to the QOUTFIFO in the error cases since it would require extra
- * work in the kernel driver to ensure that the entry was removed before the
- * command complete code tried processing it.
- */
-
-/*
- * First check for residuals
- */
- test SCB_RESID_SGCNT,0xff jnz upload_scb;
- test SCB_TARGET_STATUS,0xff jz complete; /* Good Status? */
-upload_scb:
- mvi DMAPARAMS, FIFORESET;
- mov SCB_TAG call dma_scb;
-check_status:
- test SCB_TARGET_STATUS,0xff jz complete; /* Just a residual? */
- mvi INTSTAT,BAD_STATUS; /* let driver know */
- nop;
- cmp RETURN_1, SEND_SENSE jne complete;
- /* This SCB becomes the next to execute as it will retrieve sense */
- mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET;
- mov SCB_TAG call dma_scb;
-add_to_waiting_list:
- mov SCB_NEXT,WAITING_SCBH;
- mov WAITING_SCBH, SCBPTR;
- /*
- * Prepare our selection hardware before the busfree so we have a
- * high probability of winning arbitration.
- */
- call start_selection;
- jmp await_busfree;
-
-complete:
- /* If we are untagged, clear our address up in host ram */
- test SCB_CONTROL, TAG_ENB jnz complete_post;
- mov A, SAVED_TCL;
- mvi UNTAGGEDSCB_OFFSET call post_byte_setup;
- mvi SCB_LIST_NULL call post_byte;
-
-complete_post:
- /* Post the SCB and issue an interrupt */
- if ((p->features & AHC_QUEUE_REGS) != 0) {
- mov A, SDSCB_QOFF;
- } else {
- mov A, QOUTPOS;
- }
- mvi QOUTFIFO_OFFSET call post_byte_setup;
- mov SCB_TAG call post_byte;
- if ((p->features & AHC_QUEUE_REGS) == 0) {
- inc QOUTPOS;
- }
- mvi INTSTAT,CMDCMPLT;
-
-add_to_free_list:
- call add_scb_to_free_list;
- jmp await_busfree;
-
-/*
- * Is it an extended message? Copy the message to our message buffer and
- * notify the host. The host will tell us whether to reject this message,
- * respond to it with the message that the host placed in our message buffer,
- * or simply to do nothing.
- */
-mesgin_extended:
- mvi INTSTAT,EXTENDED_MSG; /* let driver know */
- jmp ITloop;
-
-/*
- * Is it a disconnect message? Set a flag in the SCB to remind us
- * and await the bus going free.
- */
-mesgin_disconnect:
- or SCB_CONTROL,DISCONNECTED;
- call add_scb_to_disc_list;
- jmp await_busfree;
-
-/*
- * Save data pointers message:
- * Copying RAM values back to SCB, for Save Data Pointers message, but
- * only if we've actually been into a data phase to change them. This
- * protects against bogus data in scratch ram and the residual counts
- * since they are only initialized when we go into data_in or data_out.
- */
-mesgin_sdptrs:
- test SEQ_FLAGS, DPHASE jz mesgin_done;
- /*
- * The SCB SGPTR becomes the next one we'll download,
- * and the SCB DATAPTR becomes the current SHADDR.
- * Use the residual number since STCNT is corrupted by
- * any message transfer.
- */
- if ((p->features & AHC_CMD_CHAN) != 0) {
- bmov SCB_SGCOUNT, SG_COUNT, 5;
- bmov SCB_DATAPTR, SHADDR, 4;
- bmov SCB_DATACNT, SCB_RESID_DCNT, 3;
- } else {
- mvi DINDEX, SCB_SGCOUNT;
- mvi SG_COUNT call bcopy_5;
- mvi DINDEX, SCB_DATAPTR;
- mvi SHADDR call bcopy_4;
- mvi SCB_RESID_DCNT call bcopy_3;
- }
- jmp mesgin_done;
-
-/*
- * Restore pointers message? Data pointers are recopied from the
- * SCB anytime we enter a data phase for the first time, so all
- * we need to do is clear the DPHASE flag and let the data phase
- * code do the rest.
- */
-mesgin_rdptrs:
- and SEQ_FLAGS, ~DPHASE; /*
- * We'll reload them
- * the next time through
- * the dataphase.
- */
- jmp mesgin_done;
-
-/*
- * Identify message? For a reconnecting target, this tells us the lun
- * that the reconnection is for - find the correct SCB and switch to it,
- * clearing the "disconnected" bit so we don't "find" it by accident later.
- */
-mesgin_identify:
-
- if ((p->features & AHC_WIDE) != 0) {
- and A,0x0f; /* lun in lower four bits */
- } else {
- and A,0x07; /* lun in lower three bits */
- }
- or SAVED_TCL,A; /* SAVED_TCL should be complete now */
-
- mvi ARG_2, SCB_LIST_NULL; /* SCBID of prev SCB in disc List */
- call get_untagged_SCBID;
- cmp ARG_1, SCB_LIST_NULL je snoop_tag;
- if ((p->flags & AHC_PAGESCBS) != 0) {
- test SEQ_FLAGS, SCBPTR_VALID jz use_retrieveSCB;
- }
- /*
- * If the SCB was found in the disconnected list (as is
- * always the case in non-paging scenarios), SCBPTR is already
- * set to the correct SCB. So, simply setup the SCB and get
- * on with things.
- */
- mov SCBPTR call rem_scb_from_disc_list;
- jmp setup_SCB;
-/*
- * Here we "snoop" the bus looking for a SIMPLE QUEUE TAG message.
- * If we get one, we use the tag returned to find the proper
- * SCB. With SCB paging, this requires using search for both tagged
- * and non-tagged transactions since the SCB may exist in any slot.
- * If we're not using SCB paging, we can use the tag as the direct
- * index to the SCB.
- */
-snoop_tag:
- mov NONE,SCSIDATL; /* ACK Identify MSG */
-snoop_tag_loop:
- call phase_lock;
- cmp LASTPHASE, P_MESGIN jne not_found;
- cmp SCSIBUSL,MSG_SIMPLE_Q_TAG jne not_found;
-get_tag:
- mvi ARG_1 call inb_next; /* tag value */
-
-use_retrieveSCB:
- call retrieveSCB;
-setup_SCB:
- mov A, SAVED_TCL;
- cmp SCB_TCL, A jne not_found_cleanup_scb;
- test SCB_CONTROL,DISCONNECTED jz not_found_cleanup_scb;
- and SCB_CONTROL,~DISCONNECTED;
- or SEQ_FLAGS,IDENTIFY_SEEN; /* make note of IDENTIFY */
- /* See if the host wants to send a message upon reconnection */
- test SCB_CONTROL, MK_MESSAGE jz mesgin_done;
- and SCB_CONTROL, ~MK_MESSAGE;
- mvi HOST_MSG call mk_mesg;
- jmp mesgin_done;
-
-not_found_cleanup_scb:
- test SCB_CONTROL, DISCONNECTED jz . + 3;
- call add_scb_to_disc_list;
- jmp not_found;
- call add_scb_to_free_list;
-not_found:
- mvi INTSTAT, NO_MATCH;
- mvi MSG_BUS_DEV_RESET call mk_mesg;
- jmp mesgin_done;
-
-/*
- * Message reject? Let the kernel driver handle this. If we have an
- * outstanding WDTR or SDTR negotiation, assume that it's a response from
- * the target selecting 8bit or asynchronous transfer, otherwise just ignore
- * it since we have no clue what it pertains to.
- */
-mesgin_reject:
- mvi INTSTAT, REJECT_MSG;
- jmp mesgin_done;
-
-/*
- * Wide Residue. We handle the simple cases, but pass of the one hard case
- * to the kernel (when the residue byte happened to cause us to advance our
- * sg element array, so we know have to back that advance out).
- */
-mesgin_wide_residue:
- mvi ARG_1 call inb_next; /* ACK the wide_residue and get */
- /* the size byte */
-/*
- * In order for this to be reliable, we have to do all sorts of horrible
- * magic in terms of resetting the datafifo and reloading the shadow layer
- * with the correct new values (so that a subsequent save data pointers
- * message will do the right thing). We let the kernel do that work.
- */
- mvi INTSTAT, WIDE_RESIDUE;
- jmp mesgin_done;
-
-/*
- * [ ADD MORE MESSAGE HANDLING HERE ]
- */
-
-/*
- * Locking the driver out, build a one-byte message passed in SINDEX
- * if there is no active message already. SINDEX is returned intact.
- */
-mk_mesg:
- or SCSISIGO,ATNO,LASTPHASE;/* turn on ATNO */
- mov MSG_OUT,SINDEX ret;
-
-/*
- * Functions to read data in Automatic PIO mode.
- *
- * According to Adaptec's documentation, an ACK is not sent on input from
- * the target until SCSIDATL is read from. So we wait until SCSIDATL is
- * latched (the usual way), then read the data byte directly off the bus
- * using SCSIBUSL. When we have pulled the ATN line, or we just want to
- * acknowledge the byte, then we do a dummy read from SCISDATL. The SCSI
- * spec guarantees that the target will hold the data byte on the bus until
- * we send our ACK.
- *
- * The assumption here is that these are called in a particular sequence,
- * and that REQ is already set when inb_first is called. inb_{first,next}
- * use the same calling convention as inb.
- */
-
-inb_next:
- mov NONE,SCSIDATL; /*dummy read from latch to ACK*/
-inb_next_wait:
- /*
- * If there is a parity error, wait for the kernel to
- * see the interrupt and prepare our message response
- * before continuing.
- */
- test SSTAT1, REQINIT jz inb_next_wait;
- test SSTAT1, SCSIPERR jnz .;
- and LASTPHASE, PHASE_MASK, SCSISIGI;
- cmp LASTPHASE, P_MESGIN jne mesgin_phasemis;
-inb_first:
- mov DINDEX,SINDEX;
- mov DINDIR,SCSIBUSL ret; /*read byte directly from bus*/
-inb_last:
- mov NONE,SCSIDATL ret; /*dummy read from latch to ACK*/
-
-
-mesgin_phasemis:
-/*
- * We expected to receive another byte, but the target changed phase
- */
- mvi INTSTAT, MSGIN_PHASEMIS;
- jmp ITloop;
-
-/*
- * DMA data transfer. HADDR and HCNT must be loaded first, and
- * SINDEX should contain the value to load DFCNTRL with - 0x3d for
- * host->scsi, or 0x39 for scsi->host. The SCSI channel is cleared
- * during initialization.
- */
-if ((p->features & AHC_ULTRA2) == 0) {
-dma:
- mov DFCNTRL,SINDEX;
-dma_loop:
- test SSTAT0,DMADONE jnz dma_dmadone;
- test SSTAT1,PHASEMIS jz dma_loop; /* ie. underrun */
-dma_phasemis:
- test SSTAT0,SDONE jnz dma_checkfifo;
- mov SINDEX,ALLZEROS; /* Notify caller of phasemiss */
-
-/*
- * We will be "done" DMAing when the transfer count goes to zero, or
- * the target changes the phase (in light of this, it makes sense that
- * the DMA circuitry doesn't ACK when PHASEMIS is active). If we are
- * doing a SCSI->Host transfer, the data FIFO should be flushed auto-
- * magically on STCNT=0 or a phase change, so just wait for FIFO empty
- * status.
- */
-dma_checkfifo:
- test DFCNTRL,DIRECTION jnz dma_fifoempty;
-dma_fifoflush:
- test DFSTATUS,FIFOEMP jz dma_fifoflush;
-
-dma_fifoempty:
- /* Don't clobber an inprogress host data transfer */
- test DFSTATUS, MREQPEND jnz dma_fifoempty;
-/*
- * Now shut the DMA enables off and make sure that the DMA enables are
- * actually off first lest we get an ILLSADDR.
- */
-dma_dmadone:
- cmp LASTPHASE, P_COMMAND je dma_await_nreq;
- test SCSIRATE, 0x0f jnz dma_shutdown;
-dma_await_nreq:
- test SCSISIGI, REQI jz dma_shutdown;
- test SSTAT1, (PHASEMIS|REQINIT) jz dma_await_nreq;
-dma_shutdown:
- and DFCNTRL, ~(SCSIEN|SDMAEN|HDMAEN);
-dma_halt:
- /*
- * Some revisions of the aic7880 have a problem where, if the
- * data fifo is full, but the PCI input latch is not empty,
- * HDMAEN cannot be cleared. The fix used here is to attempt
- * to drain the data fifo until there is space for the input
- * latch to drain and HDMAEN de-asserts.
- */
- if ((p->bugs & AHC_BUG_PCI_2_1_RETRY) != 0) {
- mov NONE, DFDAT;
- }
- test DFCNTRL, (SCSIEN|SDMAEN|HDMAEN) jnz dma_halt;
-}
-return:
- ret;
-
-/*
- * Assert that if we've been reselected, then we've seen an IDENTIFY
- * message.
- */
-assert:
- test SEQ_FLAGS,IDENTIFY_SEEN jnz return; /* seen IDENTIFY? */
-
- mvi INTSTAT,NO_IDENT ret; /* no - tell the kernel */
-
-/*
- * Locate a disconnected SCB either by SAVED_TCL (ARG_1 is SCB_LIST_NULL)
- * or by the SCBID ARG_1. The search begins at the SCB index passed in
- * via SINDEX which is an SCB that must be on the disconnected list. If
- * the SCB cannot be found, SINDEX will be SCB_LIST_NULL, otherwise, SCBPTR
- * is set to the proper SCB.
- */
-findSCB:
- mov SCBPTR,SINDEX; /* Initialize SCBPTR */
- cmp ARG_1, SCB_LIST_NULL jne findSCB_by_SCBID;
- mov A, SAVED_TCL;
- mvi SCB_TCL jmp findSCB_loop; /* &SCB_TCL -> SINDEX */
-findSCB_by_SCBID:
- mov A, ARG_1; /* Tag passed in ARG_1 */
- mvi SCB_TAG jmp findSCB_loop; /* &SCB_TAG -> SINDEX */
-findSCB_next:
- mov ARG_2, SCBPTR;
- cmp SCB_NEXT, SCB_LIST_NULL je notFound;
- mov SCBPTR,SCB_NEXT;
- dec SINDEX; /* Last comparison moved us too far */
-findSCB_loop:
- cmp SINDIR, A jne findSCB_next;
- mov SINDEX, SCBPTR ret;
-notFound:
- mvi SINDEX, SCB_LIST_NULL ret;
-
-/*
- * Retrieve an SCB by SCBID first searching the disconnected list falling
- * back to DMA'ing the SCB down from the host. This routine assumes that
- * ARG_1 is the SCBID of interest and that SINDEX is the position in the
- * disconnected list to start the search from. If SINDEX is SCB_LIST_NULL,
- * we go directly to the host for the SCB.
- */
-retrieveSCB:
- test SEQ_FLAGS, SCBPTR_VALID jz retrieve_from_host;
- mov SCBPTR call findSCB; /* Continue the search */
- cmp SINDEX, SCB_LIST_NULL je retrieve_from_host;
-
-/*
- * This routine expects SINDEX to contain the index of the SCB to be
- * removed, SCBPTR to be pointing to that SCB, and ARG_2 to be the
- * SCBID of the SCB just previous to this one in the list or SCB_LIST_NULL
- * if it is at the head.
- */
-rem_scb_from_disc_list:
-/* Remove this SCB from the disconnection list */
- cmp ARG_2, SCB_LIST_NULL je rHead;
- mov DINDEX, SCB_NEXT;
- mov SCBPTR, ARG_2;
- mov SCB_NEXT, DINDEX;
- mov SCBPTR, SINDEX ret;
-rHead:
- mov DISCONNECTED_SCBH,SCB_NEXT ret;
-
-retrieve_from_host:
-/*
- * We didn't find it. Pull an SCB and DMA down the one we want.
- * We should never get here in the non-paging case.
- */
- mov ALLZEROS call get_free_or_disc_scb;
- mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET;
- /* Jump instead of call as we want to return anyway */
- mov ARG_1 jmp dma_scb;
-
-/*
- * Determine whether a target is using tagged or non-tagged transactions
- * by first looking for a matching transaction based on the TCL and if
- * that fails, looking up this device in the host's untagged SCB array.
- * The TCL to search for is assumed to be in SAVED_TCL. The value is
- * returned in ARG_1 (SCB_LIST_NULL for tagged, SCBID for non-tagged).
- * The SCBPTR_VALID bit is set in SEQ_FLAGS if we found the information
- * in an SCB instead of having to go to the host.
- */
-get_untagged_SCBID:
- cmp DISCONNECTED_SCBH, SCB_LIST_NULL je get_SCBID_from_host;
- mvi ARG_1, SCB_LIST_NULL;
- mov DISCONNECTED_SCBH call findSCB;
- cmp SINDEX, SCB_LIST_NULL je get_SCBID_from_host;
- or SEQ_FLAGS, SCBPTR_VALID;/* Was in disconnected list */
- test SCB_CONTROL, TAG_ENB jnz . + 2;
- mov ARG_1, SCB_TAG ret;
- mvi ARG_1, SCB_LIST_NULL ret;
-
-/*
- * Fetch a byte from host memory given an index of (A + (256 * SINDEX))
- * and a base address of SCBID_ADDR. The byte is returned in RETURN_2.
- */
-fetch_byte:
- mov ARG_2, SINDEX;
- if ((p->features & AHC_CMD_CHAN) != 0) {
- mvi DINDEX, CCHADDR;
- mvi SCBID_ADDR call set_1byte_addr;
- mvi CCHCNT, 1;
- mvi CCSGCTL, CCSGEN|CCSGRESET;
- test CCSGCTL, CCSGDONE jz .;
- mvi CCSGCTL, CCSGRESET;
- bmov RETURN_2, CCSGRAM, 1 ret;
- } else {
- mvi DINDEX, HADDR;
- mvi SCBID_ADDR call set_1byte_addr;
- mvi HCNT[0], 1;
- clr HCNT[1];
- clr HCNT[2];
- mvi DFCNTRL, HDMAEN|DIRECTION|FIFORESET;
- call dma_finish;
- mov RETURN_2, DFDAT ret;
- }
-
-/*
- * Prepare the hardware to post a byte to host memory given an
- * index of (A + (256 * SINDEX)) and a base address of SCBID_ADDR.
- */
-post_byte_setup:
- mov ARG_2, SINDEX;
- if ((p->features & AHC_CMD_CHAN) != 0) {
- mvi DINDEX, CCHADDR;
- mvi SCBID_ADDR call set_1byte_addr;
- mvi CCHCNT, 1;
- mvi CCSCBCTL, CCSCBRESET ret;
- } else {
- mvi DINDEX, HADDR;
- mvi SCBID_ADDR call set_1byte_addr;
- mvi HCNT[0], 1;
- clr HCNT[1];
- clr HCNT[2];
- mvi DFCNTRL, FIFORESET ret;
- }
-
-post_byte:
- if ((p->features & AHC_CMD_CHAN) != 0) {
- bmov CCSCBRAM, SINDEX, 1;
- or CCSCBCTL, CCSCBEN|CCSCBRESET;
- test CCSCBCTL, CCSCBDONE jz .;
- clr CCSCBCTL ret;
- } else {
- mov DFDAT, SINDEX;
- or DFCNTRL, HDMAEN|FIFOFLUSH;
- jmp dma_finish;
- }
-
-get_SCBID_from_host:
- mov A, SAVED_TCL;
- mvi UNTAGGEDSCB_OFFSET call fetch_byte;
- mov RETURN_1, RETURN_2 ret;
-
-phase_lock:
- test SSTAT1, REQINIT jz phase_lock;
- test SSTAT1, SCSIPERR jnz phase_lock;
- and SCSISIGO, PHASE_MASK, SCSISIGI;
- and LASTPHASE, PHASE_MASK, SCSISIGI ret;
-
-if ((p->features & AHC_CMD_CHAN) == 0) {
-set_stcnt_from_hcnt:
- mov STCNT[0], HCNT[0];
- mov STCNT[1], HCNT[1];
- mov STCNT[2], HCNT[2] ret;
-
-bcopy_7:
- mov DINDIR, SINDIR;
- mov DINDIR, SINDIR;
-bcopy_5:
- mov DINDIR, SINDIR;
-bcopy_4:
- mov DINDIR, SINDIR;
-bcopy_3:
- mov DINDIR, SINDIR;
- mov DINDIR, SINDIR;
- mov DINDIR, SINDIR ret;
-}
-
-/*
- * Setup addr assuming that A is an index into
- * an array of 32byte objects, SINDEX contains
- * the base address of that array, and DINDEX
- * contains the base address of the location
- * to store the indexed address.
- */
-set_32byte_addr:
- shr ARG_2, 3, A;
- shl A, 5;
-/*
- * Setup addr assuming that A + (ARG_1 * 256) is an
- * index into an array of 1byte objects, SINDEX contains
- * the base address of that array, and DINDEX contains
- * the base address of the location to store the computed
- * address.
- */
-set_1byte_addr:
- add DINDIR, A, SINDIR;
- mov A, ARG_2;
- adc DINDIR, A, SINDIR;
- clr A;
- adc DINDIR, A, SINDIR;
- adc DINDIR, A, SINDIR ret;
-
-/*
- * Either post or fetch and SCB from host memory based on the
- * DIRECTION bit in DMAPARAMS. The host SCB index is in SINDEX.
- */
-dma_scb:
- mov A, SINDEX;
- if ((p->features & AHC_CMD_CHAN) != 0) {
- mvi DINDEX, CCHADDR;
- mvi HSCB_ADDR call set_32byte_addr;
- mov CCSCBPTR, SCBPTR;
- mvi CCHCNT, 32;
- test DMAPARAMS, DIRECTION jz dma_scb_tohost;
- mvi CCSCBCTL, CCARREN|CCSCBEN|CCSCBDIR|CCSCBRESET;
- cmp CCSCBCTL, CCSCBDONE|ARRDONE|CCARREN|CCSCBEN|CCSCBDIR jne .;
- jmp dma_scb_finish;
-dma_scb_tohost:
- if ((p->features & AHC_ULTRA2) == 0) {
- mvi CCSCBCTL, CCSCBRESET;
- bmov CCSCBRAM, SCB_CONTROL, 32;
- or CCSCBCTL, CCSCBEN|CCSCBRESET;
- test CCSCBCTL, CCSCBDONE jz .;
- }
- if ((p->features & AHC_ULTRA2) != 0) {
- if ((p->bugs & AHC_BUG_SCBCHAN_UPLOAD) != 0) {
- mvi CCSCBCTL, CCARREN|CCSCBRESET;
- cmp CCSCBCTL, ARRDONE|CCARREN jne .;
- mvi CCHCNT, 32;
- mvi CCSCBCTL, CCSCBEN|CCSCBRESET;
- cmp CCSCBCTL, CCSCBDONE|CCSCBEN jne .;
- } else {
- mvi CCSCBCTL, CCARREN|CCSCBEN|CCSCBRESET;
- cmp CCSCBCTL, CCSCBDONE|ARRDONE|CCARREN|CCSCBEN jne .;
- }
- }
-dma_scb_finish:
- clr CCSCBCTL;
- test CCSCBCTL, CCARREN|CCSCBEN jnz .;
- ret;
- }
- if ((p->features & AHC_CMD_CHAN) == 0) {
- mvi DINDEX, HADDR;
- mvi HSCB_ADDR call set_32byte_addr;
- mvi HCNT[0], 32;
- clr HCNT[1];
- clr HCNT[2];
- mov DFCNTRL, DMAPARAMS;
- test DMAPARAMS, DIRECTION jnz dma_scb_fromhost;
- /* Fill it with the SCB data */
-copy_scb_tofifo:
- mvi SINDEX, SCB_CONTROL;
- add A, 32, SINDEX;
-copy_scb_tofifo_loop:
- mov DFDAT,SINDIR;
- mov DFDAT,SINDIR;
- mov DFDAT,SINDIR;
- mov DFDAT,SINDIR;
- mov DFDAT,SINDIR;
- mov DFDAT,SINDIR;
- mov DFDAT,SINDIR;
- mov DFDAT,SINDIR;
- cmp SINDEX, A jne copy_scb_tofifo_loop;
- or DFCNTRL, HDMAEN|FIFOFLUSH;
- jmp dma_finish;
-dma_scb_fromhost:
- mvi DINDEX, SCB_CONTROL;
- if ((p->bugs & AHC_BUG_PCI_2_1_RETRY) != 0) {
- /*
- * Set the A to -24. It it hits 0, then we let
- * our code fall through to dfdat_in_8 to complete
- * the last of the copy.
- *
- * Also, things happen 8 bytes at a time in this
- * case, so we may need to drain the fifo at most
- * 3 times to keep things flowing
- */
- mvi A, -24;
-dma_scb_hang_fifo:
- /* Wait for the first bit of data to hit the fifo */
- test DFSTATUS, FIFOEMP jnz .;
-dma_scb_hang_wait:
- /* OK, now they've started to transfer into the fifo,
- * so wait for them to stop trying to transfer any
- * more data.
- */
- test DFSTATUS, MREQPEND jnz .;
- /*
- * OK, they started, then they stopped, now see if they
- * managed to complete the job before stopping. Try
- * it multiple times to give the chip a few cycles to
- * set the flag if it did complete.
- */
- test DFSTATUS, HDONE jnz dma_scb_hang_dma_done;
- test DFSTATUS, HDONE jnz dma_scb_hang_dma_done;
- test DFSTATUS, HDONE jnz dma_scb_hang_dma_done;
- /*
- * Too bad, the chip didn't complete the DMA, but there
- * aren't any more memory requests pending, so that
- * means it stopped part way through and hung. That's
- * our bug, so now we drain what data there is in the
- * fifo in order to get things going again.
- */
-dma_scb_hang_empty_fifo:
- call dfdat_in_8;
- add A, 8;
- add SINDEX, A, HCNT;
- /*
- * If there are another 8 bytes of data waiting in the
- * fifo, then the carry bit will be set as a result
- * of the above add command (unless A is non-negative,
- * in which case the carry bit won't be set).
- */
- jc dma_scb_hang_empty_fifo;
- /*
- * We've emptied the fifo now, but we wouldn't have got
- * here if the memory transfer hadn't stopped part way
- * through, so go back up to the beginning of the
- * loop and start over. When it succeeds in getting
- * all the data down, HDONE will be set and we'll
- * jump to the code just below here.
- */
- jmp dma_scb_hang_fifo;
-dma_scb_hang_dma_done:
- and DFCNTRL, ~HDMAEN;
- test DFCNTRL, HDMAEN jnz .;
- call dfdat_in_8;
- add A, 8;
- cmp A, 8 jne . - 2;
- ret;
- } else {
- call dma_finish;
- call dfdat_in_8;
- call dfdat_in_8;
- call dfdat_in_8;
- }
-dfdat_in_8:
- mov DINDIR,DFDAT;
-dfdat_in_7:
- mov DINDIR,DFDAT;
- mov DINDIR,DFDAT;
- mov DINDIR,DFDAT;
- mov DINDIR,DFDAT;
- mov DINDIR,DFDAT;
- mov DINDIR,DFDAT;
- mov DINDIR,DFDAT ret;
- }
-
-
-/*
- * Wait for DMA from host memory to data FIFO to complete, then disable
- * DMA and wait for it to acknowledge that it's off.
- */
-if ((p->features & AHC_CMD_CHAN) == 0) {
-dma_finish:
- test DFSTATUS,HDONE jz dma_finish;
- /* Turn off DMA */
- and DFCNTRL, ~HDMAEN;
- test DFCNTRL, HDMAEN jnz .;
- ret;
-}
-
-add_scb_to_free_list:
- if ((p->flags & AHC_PAGESCBS) != 0) {
- mov SCB_NEXT, FREE_SCBH;
- mov FREE_SCBH, SCBPTR;
- }
- mvi SCB_TAG, SCB_LIST_NULL ret;
-
-if ((p->flags & AHC_PAGESCBS) != 0) {
-get_free_or_disc_scb:
- cmp FREE_SCBH, SCB_LIST_NULL jne dequeue_free_scb;
- cmp DISCONNECTED_SCBH, SCB_LIST_NULL jne dequeue_disc_scb;
-return_error:
- mvi SINDEX, SCB_LIST_NULL ret;
-dequeue_disc_scb:
- mov SCBPTR, DISCONNECTED_SCBH;
-dma_up_scb:
- mvi DMAPARAMS, FIFORESET;
- mov SCB_TAG call dma_scb;
-unlink_disc_scb:
- mov DISCONNECTED_SCBH, SCB_NEXT ret;
-dequeue_free_scb:
- mov SCBPTR, FREE_SCBH;
- mov FREE_SCBH, SCB_NEXT ret;
-}
-
-add_scb_to_disc_list:
-/*
- * Link this SCB into the DISCONNECTED list. This list holds the
- * candidates for paging out an SCB if one is needed for a new command.
- * Modifying the disconnected list is a critical(pause dissabled) section.
- */
- mov SCB_NEXT, DISCONNECTED_SCBH;
- mov DISCONNECTED_SCBH, SCBPTR ret;
diff --git a/drivers/scsi/aic7xxx_old/aic7xxx_proc.c b/drivers/scsi/aic7xxx_old/aic7xxx_proc.c
deleted file mode 100644
index 976f45ccf2cf..000000000000
--- a/drivers/scsi/aic7xxx_old/aic7xxx_proc.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*+M*************************************************************************
- * Adaptec AIC7xxx device driver proc support for Linux.
- *
- * Copyright (c) 1995, 1996 Dean W. Gehnert
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * ----------------------------------------------------------------
- * o Modified from the EATA-DMA /proc support.
- * o Additional support for device block statistics provided by
- * Matthew Jacob.
- * o Correction of overflow by Heinz Mauelshagen
- * o Adittional corrections by Doug Ledford
- *
- * Dean W. Gehnert, deang@teleport.com, 05/01/96
- *
- * $Id: aic7xxx_proc.c,v 4.1 1997/06/97 08:23:42 deang Exp $
- *-M*************************************************************************/
-
-
-#define HDRB \
-" 0 - 4K 4 - 16K 16 - 64K 64 - 256K 256K - 1M 1M+"
-
-
-/*+F*************************************************************************
- * Function:
- * aic7xxx_show_info
- *
- * Description:
- * Return information to handle /proc support for the driver.
- *-F*************************************************************************/
-int
-aic7xxx_show_info(struct seq_file *m, struct Scsi_Host *HBAptr)
-{
- struct aic7xxx_host *p;
- struct aic_dev_data *aic_dev;
- struct scsi_device *sdptr;
- unsigned char i;
- unsigned char tindex;
-
- for(p=first_aic7xxx; p && p->host != HBAptr; p=p->next)
- ;
-
- if (!p)
- {
- seq_printf(m, "Can't find adapter for host number %d\n", HBAptr->host_no);
- return 0;
- }
-
- p = (struct aic7xxx_host *) HBAptr->hostdata;
-
- seq_printf(m, "Adaptec AIC7xxx driver version: ");
- seq_printf(m, "%s/", AIC7XXX_C_VERSION);
- seq_printf(m, "%s", AIC7XXX_H_VERSION);
- seq_printf(m, "\n");
- seq_printf(m, "Adapter Configuration:\n");
- seq_printf(m, " SCSI Adapter: %s\n",
- board_names[p->board_name_index]);
- if (p->flags & AHC_TWIN)
- seq_printf(m, " Twin Channel Controller ");
- else
- {
- char *channel = "";
- char *ultra = "";
- char *wide = "Narrow ";
- if (p->flags & AHC_MULTI_CHANNEL)
- {
- channel = " Channel A";
- if (p->flags & (AHC_CHNLB|AHC_CHNLC))
- channel = (p->flags & AHC_CHNLB) ? " Channel B" : " Channel C";
- }
- if (p->features & AHC_WIDE)
- wide = "Wide ";
- if (p->features & AHC_ULTRA3)
- {
- switch(p->chip & AHC_CHIPID_MASK)
- {
- case AHC_AIC7892:
- case AHC_AIC7899:
- ultra = "Ultra-160/m LVD/SE ";
- break;
- default:
- ultra = "Ultra-3 LVD/SE ";
- break;
- }
- }
- else if (p->features & AHC_ULTRA2)
- ultra = "Ultra-2 LVD/SE ";
- else if (p->features & AHC_ULTRA)
- ultra = "Ultra ";
- seq_printf(m, " %s%sController%s ",
- ultra, wide, channel);
- }
- switch(p->chip & ~AHC_CHIPID_MASK)
- {
- case AHC_VL:
- seq_printf(m, "at VLB slot %d\n", p->pci_device_fn);
- break;
- case AHC_EISA:
- seq_printf(m, "at EISA slot %d\n", p->pci_device_fn);
- break;
- default:
- seq_printf(m, "at PCI %d/%d/%d\n", p->pci_bus,
- PCI_SLOT(p->pci_device_fn), PCI_FUNC(p->pci_device_fn));
- break;
- }
- if( !(p->maddr) )
- {
- seq_printf(m, " Programmed I/O Base: %lx\n", p->base);
- }
- else
- {
- seq_printf(m, " PCI MMAPed I/O Base: 0x%lx\n", p->mbase);
- }
- if( (p->chip & (AHC_VL | AHC_EISA)) )
- {
- seq_printf(m, " BIOS Memory Address: 0x%08x\n", p->bios_address);
- }
- seq_printf(m, " Adapter SEEPROM Config: %s\n",
- (p->flags & AHC_SEEPROM_FOUND) ? "SEEPROM found and used." :
- ((p->flags & AHC_USEDEFAULTS) ? "SEEPROM not found, using defaults." :
- "SEEPROM not found, using leftover BIOS values.") );
- seq_printf(m, " Adaptec SCSI BIOS: %s\n",
- (p->flags & AHC_BIOS_ENABLED) ? "Enabled" : "Disabled");
- seq_printf(m, " IRQ: %d\n", HBAptr->irq);
- seq_printf(m, " SCBs: Active %d, Max Active %d,\n",
- p->activescbs, p->max_activescbs);
- seq_printf(m, " Allocated %d, HW %d, "
- "Page %d\n", p->scb_data->numscbs, p->scb_data->maxhscbs,
- p->scb_data->maxscbs);
- if (p->flags & AHC_EXTERNAL_SRAM)
- seq_printf(m, " Using External SCB SRAM\n");
- seq_printf(m, " Interrupts: %ld", p->isr_count);
- if (p->chip & AHC_EISA)
- {
- seq_printf(m, " %s\n",
- (p->pause & IRQMS) ? "(Level Sensitive)" : "(Edge Triggered)");
- }
- else
- {
- seq_printf(m, "\n");
- }
- seq_printf(m, " BIOS Control Word: 0x%04x\n",
- p->bios_control);
- seq_printf(m, " Adapter Control Word: 0x%04x\n",
- p->adapter_control);
- seq_printf(m, " Extended Translation: %sabled\n",
- (p->flags & AHC_EXTEND_TRANS_A) ? "En" : "Dis");
- seq_printf(m, "Disconnect Enable Flags: 0x%04x\n", p->discenable);
- if (p->features & (AHC_ULTRA | AHC_ULTRA2))
- {
- seq_printf(m, " Ultra Enable Flags: 0x%04x\n", p->ultraenb);
- }
- seq_printf(m, "Default Tag Queue Depth: %d\n", aic7xxx_default_queue_depth);
- seq_printf(m, " Tagged Queue By Device array for aic7xxx host "
- "instance %d:\n", p->instance);
- seq_printf(m, " {");
- for(i=0; i < (MAX_TARGETS - 1); i++)
- seq_printf(m, "%d,",aic7xxx_tag_info[p->instance].tag_commands[i]);
- seq_printf(m, "%d}\n",aic7xxx_tag_info[p->instance].tag_commands[i]);
-
- seq_printf(m, "\n");
- seq_printf(m, "Statistics:\n\n");
- list_for_each_entry(aic_dev, &p->aic_devs, list)
- {
- sdptr = aic_dev->SDptr;
- tindex = sdptr->channel << 3 | sdptr->id;
- seq_printf(m, "(scsi%d:%d:%d:%d)\n",
- p->host_no, sdptr->channel, sdptr->id, sdptr->lun);
- seq_printf(m, " Device using %s/%s",
- (aic_dev->cur.width == MSG_EXT_WDTR_BUS_16_BIT) ?
- "Wide" : "Narrow",
- (aic_dev->cur.offset != 0) ?
- "Sync transfers at " : "Async transfers.\n" );
- if (aic_dev->cur.offset != 0)
- {
- struct aic7xxx_syncrate *sync_rate;
- unsigned char options = aic_dev->cur.options;
- int period = aic_dev->cur.period;
- int rate = (aic_dev->cur.width ==
- MSG_EXT_WDTR_BUS_16_BIT) ? 1 : 0;
-
- sync_rate = aic7xxx_find_syncrate(p, &period, 0, &options);
- if (sync_rate != NULL)
- {
- seq_printf(m, "%s MByte/sec, offset %d\n",
- sync_rate->rate[rate],
- aic_dev->cur.offset );
- }
- else
- {
- seq_printf(m, "3.3 MByte/sec, offset %d\n",
- aic_dev->cur.offset );
- }
- }
- seq_printf(m, " Transinfo settings: ");
- seq_printf(m, "current(%d/%d/%d/%d), ",
- aic_dev->cur.period,
- aic_dev->cur.offset,
- aic_dev->cur.width,
- aic_dev->cur.options);
- seq_printf(m, "goal(%d/%d/%d/%d), ",
- aic_dev->goal.period,
- aic_dev->goal.offset,
- aic_dev->goal.width,
- aic_dev->goal.options);
- seq_printf(m, "user(%d/%d/%d/%d)\n",
- p->user[tindex].period,
- p->user[tindex].offset,
- p->user[tindex].width,
- p->user[tindex].options);
- if(sdptr->simple_tags)
- {
- seq_printf(m, " Tagged Command Queueing Enabled, Ordered Tags %s, Depth %d/%d\n", sdptr->ordered_tags ? "Enabled" : "Disabled", sdptr->queue_depth, aic_dev->max_q_depth);
- }
- if(aic_dev->barrier_total)
- seq_printf(m, " Total transfers %ld:\n (%ld/%ld/%ld/%ld reads/writes/REQ_BARRIER/Ordered Tags)\n",
- aic_dev->r_total+aic_dev->w_total, aic_dev->r_total, aic_dev->w_total,
- aic_dev->barrier_total, aic_dev->ordered_total);
- else
- seq_printf(m, " Total transfers %ld:\n (%ld/%ld reads/writes)\n",
- aic_dev->r_total+aic_dev->w_total, aic_dev->r_total, aic_dev->w_total);
- seq_printf(m, "%s\n", HDRB);
- seq_printf(m, " Reads:");
- for (i = 0; i < ARRAY_SIZE(aic_dev->r_bins); i++)
- {
- seq_printf(m, " %10ld", aic_dev->r_bins[i]);
- }
- seq_printf(m, "\n");
- seq_printf(m, " Writes:");
- for (i = 0; i < ARRAY_SIZE(aic_dev->w_bins); i++)
- {
- seq_printf(m, " %10ld", aic_dev->w_bins[i]);
- }
- seq_printf(m, "\n");
- seq_printf(m, "\n\n");
- }
- return 0;
-}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 2
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -2
- * c-argdecl-indent: 2
- * c-label-offset: -2
- * c-continued-statement-offset: 2
- * c-continued-brace-offset: 0
- * indent-tabs-mode: nil
- * tab-width: 8
- * End:
- */
diff --git a/drivers/scsi/aic7xxx_old/aic7xxx_reg.h b/drivers/scsi/aic7xxx_old/aic7xxx_reg.h
deleted file mode 100644
index 27f2334abc71..000000000000
--- a/drivers/scsi/aic7xxx_old/aic7xxx_reg.h
+++ /dev/null
@@ -1,629 +0,0 @@
-/*
- * DO NOT EDIT - This file is automatically generated.
- */
-
-#define SCSISEQ 0x00
-#define TEMODE 0x80
-#define ENSELO 0x40
-#define ENSELI 0x20
-#define ENRSELI 0x10
-#define ENAUTOATNO 0x08
-#define ENAUTOATNI 0x04
-#define ENAUTOATNP 0x02
-#define SCSIRSTO 0x01
-
-#define SXFRCTL0 0x01
-#define DFON 0x80
-#define DFPEXP 0x40
-#define FAST20 0x20
-#define CLRSTCNT 0x10
-#define SPIOEN 0x08
-#define SCAMEN 0x04
-#define CLRCHN 0x02
-
-#define SXFRCTL1 0x02
-#define BITBUCKET 0x80
-#define SWRAPEN 0x40
-#define ENSPCHK 0x20
-#define STIMESEL 0x18
-#define ENSTIMER 0x04
-#define ACTNEGEN 0x02
-#define STPWEN 0x01
-
-#define SCSISIGO 0x03
-#define CDO 0x80
-#define IOO 0x40
-#define MSGO 0x20
-#define ATNO 0x10
-#define SELO 0x08
-#define BSYO 0x04
-#define REQO 0x02
-#define ACKO 0x01
-
-#define SCSISIGI 0x03
-#define ATNI 0x10
-#define SELI 0x08
-#define BSYI 0x04
-#define REQI 0x02
-#define ACKI 0x01
-
-#define SCSIRATE 0x04
-#define WIDEXFER 0x80
-#define SXFR_ULTRA2 0x7f
-#define SXFR 0x70
-#define SOFS 0x0f
-
-#define SCSIID 0x05
-#define SCSIOFFSET 0x05
-#define SOFS_ULTRA2 0x7f
-
-#define SCSIDATL 0x06
-
-#define SCSIDATH 0x07
-
-#define STCNT 0x08
-
-#define OPTIONMODE 0x08
-#define AUTORATEEN 0x80
-#define AUTOACKEN 0x40
-#define ATNMGMNTEN 0x20
-#define BUSFREEREV 0x10
-#define EXPPHASEDIS 0x08
-#define SCSIDATL_IMGEN 0x04
-#define AUTO_MSGOUT_DE 0x02
-#define DIS_MSGIN_DUALEDGE 0x01
-
-#define CLRSINT0 0x0b
-#define CLRSELDO 0x40
-#define CLRSELDI 0x20
-#define CLRSELINGO 0x10
-#define CLRSWRAP 0x08
-#define CLRSPIORDY 0x02
-
-#define SSTAT0 0x0b
-#define TARGET 0x80
-#define SELDO 0x40
-#define SELDI 0x20
-#define SELINGO 0x10
-#define IOERR 0x08
-#define SWRAP 0x08
-#define SDONE 0x04
-#define SPIORDY 0x02
-#define DMADONE 0x01
-
-#define CLRSINT1 0x0c
-#define CLRSELTIMEO 0x80
-#define CLRATNO 0x40
-#define CLRSCSIRSTI 0x20
-#define CLRBUSFREE 0x08
-#define CLRSCSIPERR 0x04
-#define CLRPHASECHG 0x02
-#define CLRREQINIT 0x01
-
-#define SSTAT1 0x0c
-#define SELTO 0x80
-#define ATNTARG 0x40
-#define SCSIRSTI 0x20
-#define PHASEMIS 0x10
-#define BUSFREE 0x08
-#define SCSIPERR 0x04
-#define PHASECHG 0x02
-#define REQINIT 0x01
-
-#define SSTAT2 0x0d
-#define OVERRUN 0x80
-#define SHVALID 0x40
-#define WIDE_RES 0x20
-#define SFCNT 0x1f
-#define EXP_ACTIVE 0x10
-#define CRCVALERR 0x08
-#define CRCENDERR 0x04
-#define CRCREQERR 0x02
-#define DUAL_EDGE_ERROR 0x01
-
-#define SSTAT3 0x0e
-#define SCSICNT 0xf0
-#define OFFCNT 0x0f
-
-#define SCSIID_ULTRA2 0x0f
-#define OID 0x0f
-
-#define SIMODE0 0x10
-#define ENSELDO 0x40
-#define ENSELDI 0x20
-#define ENSELINGO 0x10
-#define ENIOERR 0x08
-#define ENSWRAP 0x08
-#define ENSDONE 0x04
-#define ENSPIORDY 0x02
-#define ENDMADONE 0x01
-
-#define SIMODE1 0x11
-#define ENSELTIMO 0x80
-#define ENATNTARG 0x40
-#define ENSCSIRST 0x20
-#define ENPHASEMIS 0x10
-#define ENBUSFREE 0x08
-#define ENSCSIPERR 0x04
-#define ENPHASECHG 0x02
-#define ENREQINIT 0x01
-
-#define SCSIBUSL 0x12
-
-#define SCSIBUSH 0x13
-
-#define SHADDR 0x14
-
-#define SELTIMER 0x18
-#define STAGE6 0x20
-#define STAGE5 0x10
-#define STAGE4 0x08
-#define STAGE3 0x04
-#define STAGE2 0x02
-#define STAGE1 0x01
-
-#define SELID 0x19
-#define SELID_MASK 0xf0
-#define ONEBIT 0x08
-
-#define SPIOCAP 0x1b
-#define SOFT1 0x80
-#define SOFT0 0x40
-#define SOFTCMDEN 0x20
-#define HAS_BRDCTL 0x10
-#define SEEPROM 0x08
-#define EEPROM 0x04
-#define ROM 0x02
-#define SSPIOCPS 0x01
-
-#define BRDCTL 0x1d
-#define BRDDAT7 0x80
-#define BRDDAT6 0x40
-#define BRDDAT5 0x20
-#define BRDDAT4 0x10
-#define BRDSTB 0x10
-#define BRDCS 0x08
-#define BRDDAT3 0x08
-#define BRDDAT2 0x04
-#define BRDRW 0x04
-#define BRDRW_ULTRA2 0x02
-#define BRDCTL1 0x02
-#define BRDSTB_ULTRA2 0x01
-#define BRDCTL0 0x01
-
-#define SEECTL 0x1e
-#define EXTARBACK 0x80
-#define EXTARBREQ 0x40
-#define SEEMS 0x20
-#define SEERDY 0x10
-#define SEECS 0x08
-#define SEECK 0x04
-#define SEEDO 0x02
-#define SEEDI 0x01
-
-#define SBLKCTL 0x1f
-#define DIAGLEDEN 0x80
-#define DIAGLEDON 0x40
-#define AUTOFLUSHDIS 0x20
-#define ENAB40 0x08
-#define ENAB20 0x04
-#define SELWIDE 0x02
-#define XCVR 0x01
-
-#define SRAM_BASE 0x20
-
-#define TARG_SCSIRATE 0x20
-
-#define ULTRA_ENB 0x30
-
-#define DISC_DSB 0x32
-
-#define MSG_OUT 0x34
-
-#define DMAPARAMS 0x35
-#define PRELOADEN 0x80
-#define WIDEODD 0x40
-#define SCSIEN 0x20
-#define SDMAENACK 0x10
-#define SDMAEN 0x10
-#define HDMAEN 0x08
-#define HDMAENACK 0x08
-#define DIRECTION 0x04
-#define FIFOFLUSH 0x02
-#define FIFORESET 0x01
-
-#define SEQ_FLAGS 0x36
-#define IDENTIFY_SEEN 0x80
-#define SCBPTR_VALID 0x20
-#define DPHASE 0x10
-#define AMTARGET 0x08
-#define WIDE_BUS 0x02
-#define TWIN_BUS 0x01
-
-#define SAVED_TCL 0x37
-
-#define SG_COUNT 0x38
-
-#define SG_NEXT 0x39
-
-#define LASTPHASE 0x3d
-#define P_MESGIN 0xe0
-#define PHASE_MASK 0xe0
-#define P_STATUS 0xc0
-#define P_MESGOUT 0xa0
-#define P_COMMAND 0x80
-#define CDI 0x80
-#define IOI 0x40
-#define P_DATAIN 0x40
-#define MSGI 0x20
-#define P_BUSFREE 0x01
-#define P_DATAOUT 0x00
-
-#define WAITING_SCBH 0x3e
-
-#define DISCONNECTED_SCBH 0x3f
-
-#define FREE_SCBH 0x40
-
-#define HSCB_ADDR 0x41
-
-#define SCBID_ADDR 0x45
-
-#define TMODE_CMDADDR 0x49
-
-#define KERNEL_QINPOS 0x4d
-
-#define QINPOS 0x4e
-
-#define QOUTPOS 0x4f
-
-#define TMODE_CMDADDR_NEXT 0x50
-
-#define ARG_1 0x51
-#define RETURN_1 0x51
-#define SEND_MSG 0x80
-#define SEND_SENSE 0x40
-#define SEND_REJ 0x20
-#define MSGOUT_PHASEMIS 0x10
-
-#define ARG_2 0x52
-#define RETURN_2 0x52
-
-#define LAST_MSG 0x53
-
-#define PREFETCH_CNT 0x54
-
-#define SCSICONF 0x5a
-#define TERM_ENB 0x80
-#define RESET_SCSI 0x40
-#define HWSCSIID 0x0f
-#define HSCSIID 0x07
-
-#define HOSTCONF 0x5d
-
-#define HA_274_BIOSCTRL 0x5f
-#define BIOSMODE 0x30
-#define BIOSDISABLED 0x30
-#define CHANNEL_B_PRIMARY 0x08
-
-#define SEQCTL 0x60
-#define PERRORDIS 0x80
-#define PAUSEDIS 0x40
-#define FAILDIS 0x20
-#define FASTMODE 0x10
-#define BRKADRINTEN 0x08
-#define STEP 0x04
-#define SEQRESET 0x02
-#define LOADRAM 0x01
-
-#define SEQRAM 0x61
-
-#define SEQADDR0 0x62
-
-#define SEQADDR1 0x63
-#define SEQADDR1_MASK 0x01
-
-#define ACCUM 0x64
-
-#define SINDEX 0x65
-
-#define DINDEX 0x66
-
-#define ALLONES 0x69
-
-#define ALLZEROS 0x6a
-
-#define NONE 0x6a
-
-#define FLAGS 0x6b
-#define ZERO 0x02
-#define CARRY 0x01
-
-#define SINDIR 0x6c
-
-#define DINDIR 0x6d
-
-#define FUNCTION1 0x6e
-
-#define STACK 0x6f
-
-#define TARG_OFFSET 0x70
-
-#define BCTL 0x84
-#define ACE 0x08
-#define ENABLE 0x01
-
-#define DSCOMMAND0 0x84
-#define INTSCBRAMSEL 0x08
-#define RAMPS 0x04
-#define USCBSIZE32 0x02
-#define CIOPARCKEN 0x01
-
-#define DSCOMMAND 0x84
-#define CACHETHEN 0x80
-#define DPARCKEN 0x40
-#define MPARCKEN 0x20
-#define EXTREQLCK 0x10
-
-#define BUSTIME 0x85
-#define BOFF 0xf0
-#define BON 0x0f
-
-#define BUSSPD 0x86
-#define DFTHRSH 0xc0
-#define STBOFF 0x38
-#define STBON 0x07
-
-#define DSPCISTATUS 0x86
-#define DFTHRSH_100 0xc0
-
-#define HCNTRL 0x87
-#define POWRDN 0x40
-#define SWINT 0x10
-#define IRQMS 0x08
-#define PAUSE 0x04
-#define INTEN 0x02
-#define CHIPRST 0x01
-#define CHIPRSTACK 0x01
-
-#define HADDR 0x88
-
-#define HCNT 0x8c
-
-#define SCBPTR 0x90
-
-#define INTSTAT 0x91
-#define SEQINT_MASK 0xf1
-#define DATA_OVERRUN 0xe1
-#define MSGIN_PHASEMIS 0xd1
-#define TRACEPOINT2 0xc1
-#define SEQ_SG_FIXUP 0xb1
-#define AWAITING_MSG 0xa1
-#define RESIDUAL 0x81
-#define BAD_STATUS 0x71
-#define REJECT_MSG 0x61
-#define WIDE_RESIDUE 0x51
-#define EXTENDED_MSG 0x41
-#define NO_MATCH 0x31
-#define NO_IDENT 0x21
-#define SEND_REJECT 0x11
-#define INT_PEND 0x0f
-#define BRKADRINT 0x08
-#define SCSIINT 0x04
-#define CMDCMPLT 0x02
-#define BAD_PHASE 0x01
-#define SEQINT 0x01
-
-#define CLRINT 0x92
-#define CLRPARERR 0x10
-#define CLRBRKADRINT 0x08
-#define CLRSCSIINT 0x04
-#define CLRCMDINT 0x02
-#define CLRSEQINT 0x01
-
-#define ERROR 0x92
-#define CIOPARERR 0x80
-#define PCIERRSTAT 0x40
-#define MPARERR 0x20
-#define DPARERR 0x10
-#define SQPARERR 0x08
-#define ILLOPCODE 0x04
-#define DSCTMOUT 0x02
-#define ILLSADDR 0x02
-#define ILLHADDR 0x01
-
-#define DFCNTRL 0x93
-
-#define DFSTATUS 0x94
-#define PRELOAD_AVAIL 0x80
-#define DWORDEMP 0x20
-#define MREQPEND 0x10
-#define HDONE 0x08
-#define DFTHRESH 0x04
-#define FIFOFULL 0x02
-#define FIFOEMP 0x01
-
-#define DFDAT 0x99
-
-#define SCBCNT 0x9a
-#define SCBAUTO 0x80
-#define SCBCNT_MASK 0x1f
-
-#define QINFIFO 0x9b
-
-#define QINCNT 0x9c
-
-#define SCSIDATL_IMG 0x9c
-
-#define QOUTFIFO 0x9d
-
-#define CRCCONTROL1 0x9d
-#define CRCONSEEN 0x80
-#define CRCVALCHKEN 0x40
-#define CRCENDCHKEN 0x20
-#define CRCREQCHKEN 0x10
-#define TARGCRCENDEN 0x08
-#define TARGCRCCNTEN 0x04
-
-#define SCSIPHASE 0x9e
-#define SP_STATUS 0x20
-#define SP_COMMAND 0x10
-#define SP_MSG_IN 0x08
-#define SP_MSG_OUT 0x04
-#define SP_DATA_IN 0x02
-#define SP_DATA_OUT 0x01
-
-#define QOUTCNT 0x9e
-
-#define SFUNCT 0x9f
-#define ALT_MODE 0x80
-
-#define SCB_CONTROL 0xa0
-#define MK_MESSAGE 0x80
-#define DISCENB 0x40
-#define TAG_ENB 0x20
-#define DISCONNECTED 0x04
-#define SCB_TAG_TYPE 0x03
-
-#define SCB_BASE 0xa0
-
-#define SCB_TCL 0xa1
-#define TID 0xf0
-#define SELBUSB 0x08
-#define LID 0x07
-
-#define SCB_TARGET_STATUS 0xa2
-
-#define SCB_SGCOUNT 0xa3
-
-#define SCB_SGPTR 0xa4
-
-#define SCB_RESID_SGCNT 0xa8
-
-#define SCB_RESID_DCNT 0xa9
-
-#define SCB_DATAPTR 0xac
-
-#define SCB_DATACNT 0xb0
-
-#define SCB_CMDPTR 0xb4
-
-#define SCB_CMDLEN 0xb8
-
-#define SCB_TAG 0xb9
-
-#define SCB_NEXT 0xba
-
-#define SCB_PREV 0xbb
-
-#define SCB_BUSYTARGETS 0xbc
-
-#define SEECTL_2840 0xc0
-#define CS_2840 0x04
-#define CK_2840 0x02
-#define DO_2840 0x01
-
-#define STATUS_2840 0xc1
-#define EEPROM_TF 0x80
-#define BIOS_SEL 0x60
-#define ADSEL 0x1e
-#define DI_2840 0x01
-
-#define CCHADDR 0xe0
-
-#define CCHCNT 0xe8
-
-#define CCSGRAM 0xe9
-
-#define CCSGADDR 0xea
-
-#define CCSGCTL 0xeb
-#define CCSGDONE 0x80
-#define CCSGEN 0x08
-#define FLAG 0x02
-#define CCSGRESET 0x01
-
-#define CCSCBRAM 0xec
-
-#define CCSCBADDR 0xed
-
-#define CCSCBCTL 0xee
-#define CCSCBDONE 0x80
-#define ARRDONE 0x40
-#define CCARREN 0x10
-#define CCSCBEN 0x08
-#define CCSCBDIR 0x04
-#define CCSCBRESET 0x01
-
-#define CCSCBCNT 0xef
-
-#define CCSCBPTR 0xf1
-
-#define HNSCB_QOFF 0xf4
-
-#define HESCB_QOFF 0xf5
-
-#define SNSCB_QOFF 0xf6
-
-#define SESCB_QOFF 0xf7
-
-#define SDSCB_QOFF 0xf8
-
-#define QOFF_CTLSTA 0xfa
-#define ESTABLISH_SCB_AVAIL 0x80
-#define SCB_AVAIL 0x40
-#define SNSCB_ROLLOVER 0x20
-#define SDSCB_ROLLOVER 0x10
-#define SESCB_ROLLOVER 0x08
-#define SCB_QSIZE 0x07
-#define SCB_QSIZE_256 0x06
-
-#define DFF_THRSH 0xfb
-#define WR_DFTHRSH 0x70
-#define WR_DFTHRSH_MAX 0x70
-#define WR_DFTHRSH_90 0x60
-#define WR_DFTHRSH_85 0x50
-#define WR_DFTHRSH_75 0x40
-#define WR_DFTHRSH_63 0x30
-#define WR_DFTHRSH_50 0x20
-#define WR_DFTHRSH_25 0x10
-#define RD_DFTHRSH_MAX 0x07
-#define RD_DFTHRSH 0x07
-#define RD_DFTHRSH_90 0x06
-#define RD_DFTHRSH_85 0x05
-#define RD_DFTHRSH_75 0x04
-#define RD_DFTHRSH_63 0x03
-#define RD_DFTHRSH_50 0x02
-#define RD_DFTHRSH_25 0x01
-#define WR_DFTHRSH_MIN 0x00
-#define RD_DFTHRSH_MIN 0x00
-
-#define SG_CACHEPTR 0xfc
-#define SG_USER_DATA 0xfc
-#define LAST_SEG 0x02
-#define LAST_SEG_DONE 0x01
-
-
-#define CMD_GROUP2_BYTE_DELTA 0xfa
-#define MAX_OFFSET_8BIT 0x0f
-#define BUS_16_BIT 0x01
-#define QINFIFO_OFFSET 0x02
-#define CMD_GROUP5_BYTE_DELTA 0x0b
-#define CMD_GROUP_CODE_SHIFT 0x05
-#define MAX_OFFSET_ULTRA2 0x7f
-#define MAX_OFFSET_16BIT 0x08
-#define BUS_8_BIT 0x00
-#define QOUTFIFO_OFFSET 0x01
-#define UNTAGGEDSCB_OFFSET 0x00
-#define CCSGRAM_MAXSEGS 0x10
-#define SCB_LIST_NULL 0xff
-#define SG_SIZEOF 0x08
-#define CMD_GROUP4_BYTE_DELTA 0x04
-#define CMD_GROUP0_BYTE_DELTA 0xfc
-#define HOST_MSG 0xff
-#define BUS_32_BIT 0x02
-#define CCSGADDR_MAX 0x80
-
-
-/* Downloaded Constant Definitions */
-#define TMODE_NUMCMDS 0x00
diff --git a/drivers/scsi/aic7xxx_old/aic7xxx_seq.c b/drivers/scsi/aic7xxx_old/aic7xxx_seq.c
deleted file mode 100644
index e1bc140e9735..000000000000
--- a/drivers/scsi/aic7xxx_old/aic7xxx_seq.c
+++ /dev/null
@@ -1,817 +0,0 @@
-/*
- * DO NOT EDIT - This file is automatically generated.
- */
-static unsigned char seqprog[] = {
- 0xff, 0x6a, 0x06, 0x08,
- 0x7f, 0x02, 0x04, 0x08,
- 0x12, 0x6a, 0x00, 0x00,
- 0xff, 0x6a, 0xd6, 0x09,
- 0xff, 0x6a, 0xdc, 0x09,
- 0x00, 0x65, 0xca, 0x58,
- 0xf7, 0x01, 0x02, 0x08,
- 0xff, 0x4e, 0xc8, 0x08,
- 0xbf, 0x60, 0xc0, 0x08,
- 0x60, 0x0b, 0x86, 0x68,
- 0x40, 0x00, 0x0c, 0x68,
- 0x08, 0x1f, 0x3e, 0x10,
- 0x60, 0x0b, 0x86, 0x68,
- 0x40, 0x00, 0x0c, 0x68,
- 0x08, 0x1f, 0x3e, 0x10,
- 0xff, 0x3e, 0x48, 0x60,
- 0x40, 0xfa, 0x10, 0x78,
- 0xff, 0xf6, 0xd4, 0x08,
- 0x01, 0x4e, 0x9c, 0x18,
- 0x40, 0x60, 0xc0, 0x00,
- 0x00, 0x4d, 0x10, 0x70,
- 0x01, 0x4e, 0x9c, 0x18,
- 0xbf, 0x60, 0xc0, 0x08,
- 0x00, 0x6a, 0x86, 0x5c,
- 0xff, 0x4e, 0xc8, 0x18,
- 0x02, 0x6a, 0x70, 0x5b,
- 0xff, 0x52, 0x20, 0x09,
- 0x0d, 0x6a, 0x6a, 0x00,
- 0x00, 0x52, 0xe6, 0x5b,
- 0x03, 0xb0, 0x52, 0x31,
- 0xff, 0xb0, 0x52, 0x09,
- 0xff, 0xb1, 0x54, 0x09,
- 0xff, 0xb2, 0x56, 0x09,
- 0xff, 0xa3, 0x50, 0x09,
- 0xff, 0x3e, 0x74, 0x09,
- 0xff, 0x90, 0x7c, 0x08,
- 0xff, 0x3e, 0x20, 0x09,
- 0x00, 0x65, 0x4e, 0x58,
- 0x00, 0x65, 0x0c, 0x40,
- 0xf7, 0x1f, 0xca, 0x08,
- 0x08, 0xa1, 0xc8, 0x08,
- 0x00, 0x65, 0xca, 0x00,
- 0xff, 0x65, 0x3e, 0x08,
- 0xf0, 0xa1, 0xc8, 0x08,
- 0x0f, 0x0f, 0x1e, 0x08,
- 0x00, 0x0f, 0x1e, 0x00,
- 0xf0, 0xa1, 0xc8, 0x08,
- 0x0f, 0x05, 0x0a, 0x08,
- 0x00, 0x05, 0x0a, 0x00,
- 0xff, 0x6a, 0x0c, 0x08,
- 0x5a, 0x6a, 0x00, 0x04,
- 0x12, 0x65, 0x02, 0x00,
- 0x31, 0x6a, 0xca, 0x00,
- 0x80, 0x37, 0x6e, 0x68,
- 0xff, 0x65, 0xca, 0x18,
- 0xff, 0x37, 0xdc, 0x08,
- 0xff, 0x6e, 0xc8, 0x08,
- 0x00, 0x6c, 0x76, 0x78,
- 0x20, 0x01, 0x02, 0x00,
- 0x4c, 0x37, 0xc8, 0x28,
- 0x08, 0x1f, 0x7e, 0x78,
- 0x08, 0x37, 0x6e, 0x00,
- 0x08, 0x64, 0xc8, 0x00,
- 0x70, 0x64, 0xca, 0x18,
- 0xff, 0x6c, 0x0a, 0x08,
- 0x20, 0x64, 0xca, 0x18,
- 0xff, 0x6c, 0x08, 0x0c,
- 0x40, 0x0b, 0x96, 0x68,
- 0x20, 0x6a, 0x16, 0x00,
- 0xf0, 0x19, 0x6e, 0x08,
- 0x08, 0x6a, 0x18, 0x00,
- 0x08, 0x11, 0x22, 0x00,
- 0x08, 0x6a, 0x66, 0x58,
- 0x08, 0x6a, 0x68, 0x00,
- 0x00, 0x65, 0xaa, 0x40,
- 0x12, 0x6a, 0x00, 0x00,
- 0x40, 0x6a, 0x16, 0x00,
- 0xff, 0x3e, 0x20, 0x09,
- 0xff, 0xba, 0x7c, 0x08,
- 0xff, 0xa1, 0x6e, 0x08,
- 0x08, 0x6a, 0x18, 0x00,
- 0x08, 0x11, 0x22, 0x00,
- 0x08, 0x6a, 0x66, 0x58,
- 0x80, 0x6a, 0x68, 0x00,
- 0x80, 0x36, 0x6c, 0x00,
- 0x00, 0x65, 0xba, 0x5b,
- 0xff, 0x3d, 0xc8, 0x08,
- 0xbf, 0x64, 0xe2, 0x78,
- 0x80, 0x64, 0xc8, 0x71,
- 0xa0, 0x64, 0xf8, 0x71,
- 0xc0, 0x64, 0xf0, 0x71,
- 0xe0, 0x64, 0x38, 0x72,
- 0x01, 0x6a, 0x22, 0x01,
- 0x00, 0x65, 0xaa, 0x40,
- 0xf7, 0x11, 0x22, 0x08,
- 0x00, 0x65, 0xca, 0x58,
- 0xff, 0x06, 0xd4, 0x08,
- 0xf7, 0x01, 0x02, 0x08,
- 0x09, 0x0c, 0xc4, 0x78,
- 0x08, 0x0c, 0x0c, 0x68,
- 0x01, 0x6a, 0x22, 0x01,
- 0xff, 0x6a, 0x26, 0x09,
- 0x02, 0x6a, 0x08, 0x30,
- 0xff, 0x6a, 0x08, 0x08,
- 0xdf, 0x01, 0x02, 0x08,
- 0x01, 0x6a, 0x7a, 0x00,
- 0xff, 0x6a, 0x6c, 0x0c,
- 0x04, 0x14, 0x10, 0x31,
- 0x03, 0xa9, 0x18, 0x31,
- 0x03, 0xa9, 0x10, 0x30,
- 0x08, 0x6a, 0xcc, 0x00,
- 0xa9, 0x6a, 0xd0, 0x5b,
- 0x00, 0x65, 0x02, 0x41,
- 0xa8, 0x6a, 0x6a, 0x00,
- 0x79, 0x6a, 0x6a, 0x00,
- 0x40, 0x3d, 0xea, 0x68,
- 0x04, 0x35, 0x6a, 0x00,
- 0x00, 0x65, 0x2a, 0x5b,
- 0x80, 0x6a, 0xd4, 0x01,
- 0x10, 0x36, 0xd6, 0x68,
- 0x10, 0x36, 0x6c, 0x00,
- 0x07, 0xac, 0x10, 0x31,
- 0x05, 0xa3, 0x70, 0x30,
- 0x03, 0x8c, 0x10, 0x30,
- 0x88, 0x6a, 0xcc, 0x00,
- 0xac, 0x6a, 0xc8, 0x5b,
- 0x00, 0x65, 0xc2, 0x5b,
- 0x38, 0x6a, 0xcc, 0x00,
- 0xa3, 0x6a, 0xcc, 0x5b,
- 0xff, 0x38, 0x12, 0x69,
- 0x80, 0x02, 0x04, 0x00,
- 0xe7, 0x35, 0x6a, 0x08,
- 0x03, 0x69, 0x18, 0x31,
- 0x03, 0x69, 0x10, 0x30,
- 0xff, 0x6a, 0x10, 0x00,
- 0xff, 0x6a, 0x12, 0x00,
- 0xff, 0x6a, 0x14, 0x00,
- 0x22, 0x38, 0xc8, 0x28,
- 0x01, 0x38, 0x1c, 0x61,
- 0x02, 0x64, 0xc8, 0x00,
- 0x01, 0x38, 0x1c, 0x61,
- 0xbf, 0x35, 0x6a, 0x08,
- 0xff, 0x64, 0xf8, 0x09,
- 0xff, 0x35, 0x26, 0x09,
- 0x80, 0x02, 0xa4, 0x69,
- 0x10, 0x0c, 0x7a, 0x69,
- 0x80, 0x94, 0x22, 0x79,
- 0x00, 0x35, 0x0a, 0x5b,
- 0x80, 0x02, 0xa4, 0x69,
- 0xff, 0x65, 0x94, 0x79,
- 0x01, 0x38, 0x70, 0x71,
- 0xff, 0x38, 0x70, 0x18,
- 0xff, 0x38, 0x94, 0x79,
- 0x80, 0xea, 0x4a, 0x61,
- 0xef, 0x38, 0xc8, 0x18,
- 0x80, 0x6a, 0xc8, 0x00,
- 0x00, 0x65, 0x3c, 0x49,
- 0x33, 0x38, 0xc8, 0x28,
- 0xff, 0x64, 0xd0, 0x09,
- 0x04, 0x39, 0xc0, 0x31,
- 0x09, 0x6a, 0xd6, 0x01,
- 0x80, 0xeb, 0x42, 0x79,
- 0xf7, 0xeb, 0xd6, 0x09,
- 0x08, 0xeb, 0x46, 0x69,
- 0x01, 0x6a, 0xd6, 0x01,
- 0x08, 0xe9, 0x10, 0x31,
- 0x03, 0x8c, 0x10, 0x30,
- 0xff, 0x38, 0x70, 0x18,
- 0x88, 0x6a, 0xcc, 0x00,
- 0x39, 0x6a, 0xce, 0x5b,
- 0x08, 0x6a, 0x18, 0x01,
- 0xff, 0x6a, 0x1a, 0x09,
- 0xff, 0x6a, 0x1c, 0x09,
- 0x0d, 0x93, 0x26, 0x01,
- 0x00, 0x65, 0x78, 0x5c,
- 0x88, 0x6a, 0xcc, 0x00,
- 0x00, 0x65, 0x6a, 0x5c,
- 0x00, 0x65, 0xc2, 0x5b,
- 0xff, 0x6a, 0xc8, 0x08,
- 0x08, 0x39, 0x72, 0x18,
- 0x00, 0x3a, 0x74, 0x20,
- 0x00, 0x65, 0x02, 0x41,
- 0x01, 0x0c, 0x6c, 0x79,
- 0x10, 0x0c, 0x02, 0x79,
- 0x10, 0x0c, 0x7a, 0x69,
- 0x01, 0xfc, 0x70, 0x79,
- 0xff, 0x6a, 0x70, 0x08,
- 0x01, 0x0c, 0x76, 0x79,
- 0x10, 0x0c, 0x02, 0x79,
- 0x00, 0x65, 0xae, 0x59,
- 0x01, 0xfc, 0x94, 0x69,
- 0x40, 0x0d, 0x84, 0x69,
- 0xb1, 0x6a, 0x22, 0x01,
- 0x00, 0x65, 0x94, 0x41,
- 0x2e, 0xfc, 0xa2, 0x28,
- 0x3f, 0x38, 0xc8, 0x08,
- 0x00, 0x51, 0x94, 0x71,
- 0xff, 0x6a, 0xc8, 0x08,
- 0xf8, 0x39, 0x72, 0x18,
- 0xff, 0x3a, 0x74, 0x20,
- 0x01, 0x38, 0x70, 0x18,
- 0x00, 0x65, 0x86, 0x41,
- 0x03, 0x08, 0x52, 0x31,
- 0xff, 0x38, 0x50, 0x09,
- 0x12, 0x01, 0x02, 0x00,
- 0xff, 0x08, 0x52, 0x09,
- 0xff, 0x09, 0x54, 0x09,
- 0xff, 0x0a, 0x56, 0x09,
- 0xff, 0x38, 0x50, 0x09,
- 0x00, 0x65, 0xaa, 0x40,
- 0x10, 0x0c, 0xa4, 0x79,
- 0x00, 0x65, 0xae, 0x59,
- 0x7f, 0x02, 0x04, 0x08,
- 0xe1, 0x6a, 0x22, 0x01,
- 0x00, 0x65, 0xaa, 0x40,
- 0x04, 0x93, 0xc2, 0x69,
- 0xdf, 0x93, 0x26, 0x09,
- 0x20, 0x93, 0xb2, 0x69,
- 0x02, 0x93, 0x26, 0x01,
- 0x01, 0x94, 0xb6, 0x79,
- 0x01, 0x94, 0xb6, 0x79,
- 0x01, 0x94, 0xb6, 0x79,
- 0x01, 0x94, 0xb6, 0x79,
- 0x01, 0x94, 0xb6, 0x79,
- 0x10, 0x94, 0xc0, 0x69,
- 0xd7, 0x93, 0x26, 0x09,
- 0x28, 0x93, 0xc4, 0x69,
- 0xff, 0x6a, 0xd4, 0x0c,
- 0x00, 0x65, 0x2a, 0x5b,
- 0x05, 0xb4, 0x10, 0x31,
- 0x02, 0x6a, 0x1a, 0x31,
- 0x03, 0x8c, 0x10, 0x30,
- 0x88, 0x6a, 0xcc, 0x00,
- 0xb4, 0x6a, 0xcc, 0x5b,
- 0xff, 0x6a, 0x1a, 0x09,
- 0xff, 0x6a, 0x1c, 0x09,
- 0x00, 0x65, 0xc2, 0x5b,
- 0x3d, 0x6a, 0x0a, 0x5b,
- 0xac, 0x6a, 0x26, 0x01,
- 0x04, 0x0b, 0xde, 0x69,
- 0x04, 0x0b, 0xe4, 0x69,
- 0x10, 0x0c, 0xe0, 0x79,
- 0x02, 0x03, 0xe8, 0x79,
- 0x11, 0x0c, 0xe4, 0x79,
- 0xd7, 0x93, 0x26, 0x09,
- 0x28, 0x93, 0xea, 0x69,
- 0x12, 0x01, 0x02, 0x00,
- 0x00, 0x65, 0xaa, 0x40,
- 0x00, 0x65, 0x2a, 0x5b,
- 0xff, 0x06, 0x44, 0x09,
- 0x00, 0x65, 0xaa, 0x40,
- 0x10, 0x3d, 0x06, 0x00,
- 0xff, 0x34, 0xca, 0x08,
- 0x80, 0x65, 0x1c, 0x62,
- 0x0f, 0xa1, 0xca, 0x08,
- 0x07, 0xa1, 0xca, 0x08,
- 0x40, 0xa0, 0xc8, 0x08,
- 0x00, 0x65, 0xca, 0x00,
- 0x80, 0x65, 0xca, 0x00,
- 0x80, 0xa0, 0x0c, 0x7a,
- 0xff, 0x65, 0x0c, 0x08,
- 0x00, 0x65, 0x1e, 0x42,
- 0x20, 0xa0, 0x24, 0x7a,
- 0xff, 0x65, 0x0c, 0x08,
- 0x00, 0x65, 0xba, 0x5b,
- 0xa0, 0x3d, 0x2c, 0x62,
- 0x23, 0xa0, 0x0c, 0x08,
- 0x00, 0x65, 0xba, 0x5b,
- 0xa0, 0x3d, 0x2c, 0x62,
- 0x00, 0xb9, 0x24, 0x42,
- 0xff, 0x65, 0x24, 0x62,
- 0xa1, 0x6a, 0x22, 0x01,
- 0xff, 0x6a, 0xd4, 0x08,
- 0x10, 0x51, 0x2c, 0x72,
- 0x40, 0x6a, 0x18, 0x00,
- 0xff, 0x65, 0x0c, 0x08,
- 0x00, 0x65, 0xba, 0x5b,
- 0xa0, 0x3d, 0xf6, 0x71,
- 0x40, 0x6a, 0x18, 0x00,
- 0xff, 0x34, 0xa6, 0x08,
- 0x80, 0x34, 0x34, 0x62,
- 0x7f, 0xa0, 0x40, 0x09,
- 0x08, 0x6a, 0x68, 0x00,
- 0x00, 0x65, 0xaa, 0x40,
- 0x64, 0x6a, 0x00, 0x5b,
- 0x80, 0x64, 0xaa, 0x6a,
- 0x04, 0x64, 0x8c, 0x72,
- 0x02, 0x64, 0x92, 0x72,
- 0x00, 0x6a, 0x54, 0x72,
- 0x03, 0x64, 0xa6, 0x72,
- 0x01, 0x64, 0x88, 0x72,
- 0x07, 0x64, 0xe8, 0x72,
- 0x08, 0x64, 0x50, 0x72,
- 0x23, 0x64, 0xec, 0x72,
- 0x11, 0x6a, 0x22, 0x01,
- 0x07, 0x6a, 0xf2, 0x5a,
- 0xff, 0x06, 0xd4, 0x08,
- 0x00, 0x65, 0xaa, 0x40,
- 0xff, 0xa8, 0x58, 0x6a,
- 0xff, 0xa2, 0x70, 0x7a,
- 0x01, 0x6a, 0x6a, 0x00,
- 0x00, 0xb9, 0xe6, 0x5b,
- 0xff, 0xa2, 0x70, 0x7a,
- 0x71, 0x6a, 0x22, 0x01,
- 0xff, 0x6a, 0xd4, 0x08,
- 0x40, 0x51, 0x70, 0x62,
- 0x0d, 0x6a, 0x6a, 0x00,
- 0x00, 0xb9, 0xe6, 0x5b,
- 0xff, 0x3e, 0x74, 0x09,
- 0xff, 0x90, 0x7c, 0x08,
- 0x00, 0x65, 0x4e, 0x58,
- 0x00, 0x65, 0xbc, 0x40,
- 0x20, 0xa0, 0x78, 0x6a,
- 0xff, 0x37, 0xc8, 0x08,
- 0x00, 0x6a, 0x90, 0x5b,
- 0xff, 0x6a, 0xa6, 0x5b,
- 0xff, 0xf8, 0xc8, 0x08,
- 0xff, 0x4f, 0xc8, 0x08,
- 0x01, 0x6a, 0x90, 0x5b,
- 0x00, 0xb9, 0xa6, 0x5b,
- 0x01, 0x4f, 0x9e, 0x18,
- 0x02, 0x6a, 0x22, 0x01,
- 0x00, 0x65, 0x80, 0x5c,
- 0x00, 0x65, 0xbc, 0x40,
- 0x41, 0x6a, 0x22, 0x01,
- 0x00, 0x65, 0xaa, 0x40,
- 0x04, 0xa0, 0x40, 0x01,
- 0x00, 0x65, 0x98, 0x5c,
- 0x00, 0x65, 0xbc, 0x40,
- 0x10, 0x36, 0x50, 0x7a,
- 0x05, 0x38, 0x46, 0x31,
- 0x04, 0x14, 0x58, 0x31,
- 0x03, 0xa9, 0x60, 0x31,
- 0xa3, 0x6a, 0xcc, 0x00,
- 0x38, 0x6a, 0xcc, 0x5b,
- 0xac, 0x6a, 0xcc, 0x00,
- 0x14, 0x6a, 0xce, 0x5b,
- 0xa9, 0x6a, 0xd0, 0x5b,
- 0x00, 0x65, 0x50, 0x42,
- 0xef, 0x36, 0x6c, 0x08,
- 0x00, 0x65, 0x50, 0x42,
- 0x0f, 0x64, 0xc8, 0x08,
- 0x07, 0x64, 0xc8, 0x08,
- 0x00, 0x37, 0x6e, 0x00,
- 0xff, 0x6a, 0xa4, 0x00,
- 0x00, 0x65, 0x60, 0x5b,
- 0xff, 0x51, 0xbc, 0x72,
- 0x20, 0x36, 0xc6, 0x7a,
- 0x00, 0x90, 0x4e, 0x5b,
- 0x00, 0x65, 0xc8, 0x42,
- 0xff, 0x06, 0xd4, 0x08,
- 0x00, 0x65, 0xba, 0x5b,
- 0xe0, 0x3d, 0xe2, 0x62,
- 0x20, 0x12, 0xe2, 0x62,
- 0x51, 0x6a, 0xf6, 0x5a,
- 0x00, 0x65, 0x48, 0x5b,
- 0xff, 0x37, 0xc8, 0x08,
- 0x00, 0xa1, 0xda, 0x62,
- 0x04, 0xa0, 0xda, 0x7a,
- 0xfb, 0xa0, 0x40, 0x09,
- 0x80, 0x36, 0x6c, 0x00,
- 0x80, 0xa0, 0x50, 0x7a,
- 0x7f, 0xa0, 0x40, 0x09,
- 0xff, 0x6a, 0xf2, 0x5a,
- 0x00, 0x65, 0x50, 0x42,
- 0x04, 0xa0, 0xe0, 0x7a,
- 0x00, 0x65, 0x98, 0x5c,
- 0x00, 0x65, 0xe2, 0x42,
- 0x00, 0x65, 0x80, 0x5c,
- 0x31, 0x6a, 0x22, 0x01,
- 0x0c, 0x6a, 0xf2, 0x5a,
- 0x00, 0x65, 0x50, 0x42,
- 0x61, 0x6a, 0x22, 0x01,
- 0x00, 0x65, 0x50, 0x42,
- 0x51, 0x6a, 0xf6, 0x5a,
- 0x51, 0x6a, 0x22, 0x01,
- 0x00, 0x65, 0x50, 0x42,
- 0x10, 0x3d, 0x06, 0x00,
- 0xff, 0x65, 0x68, 0x0c,
- 0xff, 0x06, 0xd4, 0x08,
- 0x01, 0x0c, 0xf8, 0x7a,
- 0x04, 0x0c, 0xfa, 0x6a,
- 0xe0, 0x03, 0x7a, 0x08,
- 0xe0, 0x3d, 0x06, 0x63,
- 0xff, 0x65, 0xcc, 0x08,
- 0xff, 0x12, 0xda, 0x0c,
- 0xff, 0x06, 0xd4, 0x0c,
- 0xd1, 0x6a, 0x22, 0x01,
- 0x00, 0x65, 0xaa, 0x40,
- 0xff, 0x65, 0x26, 0x09,
- 0x01, 0x0b, 0x1a, 0x6b,
- 0x10, 0x0c, 0x0c, 0x7b,
- 0x04, 0x0b, 0x14, 0x6b,
- 0xff, 0x6a, 0xca, 0x08,
- 0x04, 0x93, 0x18, 0x6b,
- 0x01, 0x94, 0x16, 0x7b,
- 0x10, 0x94, 0x18, 0x6b,
- 0x80, 0x3d, 0x1e, 0x73,
- 0x0f, 0x04, 0x22, 0x6b,
- 0x02, 0x03, 0x22, 0x7b,
- 0x11, 0x0c, 0x1e, 0x7b,
- 0xc7, 0x93, 0x26, 0x09,
- 0xff, 0x99, 0xd4, 0x08,
- 0x38, 0x93, 0x24, 0x6b,
- 0xff, 0x6a, 0xd4, 0x0c,
- 0x80, 0x36, 0x28, 0x6b,
- 0x21, 0x6a, 0x22, 0x05,
- 0xff, 0x65, 0x20, 0x09,
- 0xff, 0x51, 0x36, 0x63,
- 0xff, 0x37, 0xc8, 0x08,
- 0xa1, 0x6a, 0x42, 0x43,
- 0xff, 0x51, 0xc8, 0x08,
- 0xb9, 0x6a, 0x42, 0x43,
- 0xff, 0x90, 0xa4, 0x08,
- 0xff, 0xba, 0x46, 0x73,
- 0xff, 0xba, 0x20, 0x09,
- 0xff, 0x65, 0xca, 0x18,
- 0x00, 0x6c, 0x3a, 0x63,
- 0xff, 0x90, 0xca, 0x0c,
- 0xff, 0x6a, 0xca, 0x04,
- 0x20, 0x36, 0x5a, 0x7b,
- 0x00, 0x90, 0x2e, 0x5b,
- 0xff, 0x65, 0x5a, 0x73,
- 0xff, 0x52, 0x58, 0x73,
- 0xff, 0xba, 0xcc, 0x08,
- 0xff, 0x52, 0x20, 0x09,
- 0xff, 0x66, 0x74, 0x09,
- 0xff, 0x65, 0x20, 0x0d,
- 0xff, 0xba, 0x7e, 0x0c,
- 0x00, 0x6a, 0x86, 0x5c,
- 0x0d, 0x6a, 0x6a, 0x00,
- 0x00, 0x51, 0xe6, 0x43,
- 0xff, 0x3f, 0xb4, 0x73,
- 0xff, 0x6a, 0xa2, 0x00,
- 0x00, 0x3f, 0x2e, 0x5b,
- 0xff, 0x65, 0xb4, 0x73,
- 0x20, 0x36, 0x6c, 0x00,
- 0x20, 0xa0, 0x6e, 0x6b,
- 0xff, 0xb9, 0xa2, 0x0c,
- 0xff, 0x6a, 0xa2, 0x04,
- 0xff, 0x65, 0xa4, 0x08,
- 0xe0, 0x6a, 0xcc, 0x00,
- 0x45, 0x6a, 0xda, 0x5b,
- 0x01, 0x6a, 0xd0, 0x01,
- 0x09, 0x6a, 0xd6, 0x01,
- 0x80, 0xeb, 0x7a, 0x7b,
- 0x01, 0x6a, 0xd6, 0x01,
- 0x01, 0xe9, 0xa4, 0x34,
- 0x88, 0x6a, 0xcc, 0x00,
- 0x45, 0x6a, 0xda, 0x5b,
- 0x01, 0x6a, 0x18, 0x01,
- 0xff, 0x6a, 0x1a, 0x09,
- 0xff, 0x6a, 0x1c, 0x09,
- 0x0d, 0x6a, 0x26, 0x01,
- 0x00, 0x65, 0x78, 0x5c,
- 0xff, 0x99, 0xa4, 0x0c,
- 0xff, 0x65, 0xa4, 0x08,
- 0xe0, 0x6a, 0xcc, 0x00,
- 0x45, 0x6a, 0xda, 0x5b,
- 0x01, 0x6a, 0xd0, 0x01,
- 0x01, 0x6a, 0xdc, 0x05,
- 0x88, 0x6a, 0xcc, 0x00,
- 0x45, 0x6a, 0xda, 0x5b,
- 0x01, 0x6a, 0x18, 0x01,
- 0xff, 0x6a, 0x1a, 0x09,
- 0xff, 0x6a, 0x1c, 0x09,
- 0x01, 0x6a, 0x26, 0x05,
- 0x01, 0x65, 0xd8, 0x31,
- 0x09, 0xee, 0xdc, 0x01,
- 0x80, 0xee, 0xaa, 0x7b,
- 0xff, 0x6a, 0xdc, 0x0d,
- 0xff, 0x65, 0x32, 0x09,
- 0x0a, 0x93, 0x26, 0x01,
- 0x00, 0x65, 0x78, 0x44,
- 0xff, 0x37, 0xc8, 0x08,
- 0x00, 0x6a, 0x70, 0x5b,
- 0xff, 0x52, 0xa2, 0x0c,
- 0x01, 0x0c, 0xba, 0x7b,
- 0x04, 0x0c, 0xba, 0x6b,
- 0xe0, 0x03, 0x06, 0x08,
- 0xe0, 0x03, 0x7a, 0x0c,
- 0xff, 0x8c, 0x10, 0x08,
- 0xff, 0x8d, 0x12, 0x08,
- 0xff, 0x8e, 0x14, 0x0c,
- 0xff, 0x6c, 0xda, 0x08,
- 0xff, 0x6c, 0xda, 0x08,
- 0xff, 0x6c, 0xda, 0x08,
- 0xff, 0x6c, 0xda, 0x08,
- 0xff, 0x6c, 0xda, 0x08,
- 0xff, 0x6c, 0xda, 0x08,
- 0xff, 0x6c, 0xda, 0x0c,
- 0x3d, 0x64, 0xa4, 0x28,
- 0x55, 0x64, 0xc8, 0x28,
- 0x00, 0x6c, 0xda, 0x18,
- 0xff, 0x52, 0xc8, 0x08,
- 0x00, 0x6c, 0xda, 0x20,
- 0xff, 0x6a, 0xc8, 0x08,
- 0x00, 0x6c, 0xda, 0x20,
- 0x00, 0x6c, 0xda, 0x24,
- 0xff, 0x65, 0xc8, 0x08,
- 0xe0, 0x6a, 0xcc, 0x00,
- 0x41, 0x6a, 0xd6, 0x5b,
- 0xff, 0x90, 0xe2, 0x09,
- 0x20, 0x6a, 0xd0, 0x01,
- 0x04, 0x35, 0xf8, 0x7b,
- 0x1d, 0x6a, 0xdc, 0x01,
- 0xdc, 0xee, 0xf4, 0x63,
- 0x00, 0x65, 0x0e, 0x44,
- 0x01, 0x6a, 0xdc, 0x01,
- 0x20, 0xa0, 0xd8, 0x31,
- 0x09, 0xee, 0xdc, 0x01,
- 0x80, 0xee, 0xfe, 0x7b,
- 0x11, 0x6a, 0xdc, 0x01,
- 0x50, 0xee, 0x02, 0x64,
- 0x20, 0x6a, 0xd0, 0x01,
- 0x09, 0x6a, 0xdc, 0x01,
- 0x88, 0xee, 0x08, 0x64,
- 0x19, 0x6a, 0xdc, 0x01,
- 0xd8, 0xee, 0x0c, 0x64,
- 0xff, 0x6a, 0xdc, 0x09,
- 0x18, 0xee, 0x10, 0x6c,
- 0xff, 0x6a, 0xd4, 0x0c,
- 0x88, 0x6a, 0xcc, 0x00,
- 0x41, 0x6a, 0xd6, 0x5b,
- 0x20, 0x6a, 0x18, 0x01,
- 0xff, 0x6a, 0x1a, 0x09,
- 0xff, 0x6a, 0x1c, 0x09,
- 0xff, 0x35, 0x26, 0x09,
- 0x04, 0x35, 0x3c, 0x6c,
- 0xa0, 0x6a, 0xca, 0x00,
- 0x20, 0x65, 0xc8, 0x18,
- 0xff, 0x6c, 0x32, 0x09,
- 0xff, 0x6c, 0x32, 0x09,
- 0xff, 0x6c, 0x32, 0x09,
- 0xff, 0x6c, 0x32, 0x09,
- 0xff, 0x6c, 0x32, 0x09,
- 0xff, 0x6c, 0x32, 0x09,
- 0xff, 0x6c, 0x32, 0x09,
- 0xff, 0x6c, 0x32, 0x09,
- 0x00, 0x65, 0x26, 0x64,
- 0x0a, 0x93, 0x26, 0x01,
- 0x00, 0x65, 0x78, 0x44,
- 0xa0, 0x6a, 0xcc, 0x00,
- 0xe8, 0x6a, 0xc8, 0x00,
- 0x01, 0x94, 0x40, 0x6c,
- 0x10, 0x94, 0x42, 0x6c,
- 0x08, 0x94, 0x54, 0x6c,
- 0x08, 0x94, 0x54, 0x6c,
- 0x08, 0x94, 0x54, 0x6c,
- 0x00, 0x65, 0x68, 0x5c,
- 0x08, 0x64, 0xc8, 0x18,
- 0x00, 0x8c, 0xca, 0x18,
- 0x00, 0x65, 0x4a, 0x4c,
- 0x00, 0x65, 0x40, 0x44,
- 0xf7, 0x93, 0x26, 0x09,
- 0x08, 0x93, 0x56, 0x6c,
- 0x00, 0x65, 0x68, 0x5c,
- 0x08, 0x64, 0xc8, 0x18,
- 0x08, 0x64, 0x58, 0x64,
- 0xff, 0x6a, 0xd4, 0x0c,
- 0x00, 0x65, 0x78, 0x5c,
- 0x00, 0x65, 0x68, 0x5c,
- 0x00, 0x65, 0x68, 0x5c,
- 0x00, 0x65, 0x68, 0x5c,
- 0xff, 0x99, 0xda, 0x08,
- 0xff, 0x99, 0xda, 0x08,
- 0xff, 0x99, 0xda, 0x08,
- 0xff, 0x99, 0xda, 0x08,
- 0xff, 0x99, 0xda, 0x08,
- 0xff, 0x99, 0xda, 0x08,
- 0xff, 0x99, 0xda, 0x08,
- 0xff, 0x99, 0xda, 0x0c,
- 0x08, 0x94, 0x78, 0x7c,
- 0xf7, 0x93, 0x26, 0x09,
- 0x08, 0x93, 0x7c, 0x6c,
- 0xff, 0x6a, 0xd4, 0x0c,
- 0xff, 0x40, 0x74, 0x09,
- 0xff, 0x90, 0x80, 0x08,
- 0xff, 0x6a, 0x72, 0x05,
- 0xff, 0x40, 0x94, 0x64,
- 0xff, 0x3f, 0x8c, 0x64,
- 0xff, 0x6a, 0xca, 0x04,
- 0xff, 0x3f, 0x20, 0x09,
- 0x01, 0x6a, 0x6a, 0x00,
- 0x00, 0xb9, 0xe6, 0x5b,
- 0xff, 0xba, 0x7e, 0x0c,
- 0xff, 0x40, 0x20, 0x09,
- 0xff, 0xba, 0x80, 0x0c,
- 0xff, 0x3f, 0x74, 0x09,
- 0xff, 0x90, 0x7e, 0x0c,
-};
-
-static int aic7xxx_patch15_func(struct aic7xxx_host *p);
-
-static int
-aic7xxx_patch15_func(struct aic7xxx_host *p)
-{
- return ((p->bugs & AHC_BUG_SCBCHAN_UPLOAD) != 0);
-}
-
-static int aic7xxx_patch14_func(struct aic7xxx_host *p);
-
-static int
-aic7xxx_patch14_func(struct aic7xxx_host *p)
-{
- return ((p->bugs & AHC_BUG_PCI_2_1_RETRY) != 0);
-}
-
-static int aic7xxx_patch13_func(struct aic7xxx_host *p);
-
-static int
-aic7xxx_patch13_func(struct aic7xxx_host *p)
-{
- return ((p->features & AHC_WIDE) != 0);
-}
-
-static int aic7xxx_patch12_func(struct aic7xxx_host *p);
-
-static int
-aic7xxx_patch12_func(struct aic7xxx_host *p)
-{
- return ((p->bugs & AHC_BUG_AUTOFLUSH) != 0);
-}
-
-static int aic7xxx_patch11_func(struct aic7xxx_host *p);
-
-static int
-aic7xxx_patch11_func(struct aic7xxx_host *p)
-{
- return ((p->features & AHC_ULTRA2) == 0);
-}
-
-static int aic7xxx_patch10_func(struct aic7xxx_host *p);
-
-static int
-aic7xxx_patch10_func(struct aic7xxx_host *p)
-{
- return ((p->features & AHC_CMD_CHAN) == 0);
-}
-
-static int aic7xxx_patch9_func(struct aic7xxx_host *p);
-
-static int
-aic7xxx_patch9_func(struct aic7xxx_host *p)
-{
- return ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895);
-}
-
-static int aic7xxx_patch8_func(struct aic7xxx_host *p);
-
-static int
-aic7xxx_patch8_func(struct aic7xxx_host *p)
-{
- return ((p->features & AHC_ULTRA) != 0);
-}
-
-static int aic7xxx_patch7_func(struct aic7xxx_host *p);
-
-static int
-aic7xxx_patch7_func(struct aic7xxx_host *p)
-{
- return ((p->features & AHC_ULTRA2) != 0);
-}
-
-static int aic7xxx_patch6_func(struct aic7xxx_host *p);
-
-static int
-aic7xxx_patch6_func(struct aic7xxx_host *p)
-{
- return ((p->flags & AHC_PAGESCBS) == 0);
-}
-
-static int aic7xxx_patch5_func(struct aic7xxx_host *p);
-
-static int
-aic7xxx_patch5_func(struct aic7xxx_host *p)
-{
- return ((p->flags & AHC_PAGESCBS) != 0);
-}
-
-static int aic7xxx_patch4_func(struct aic7xxx_host *p);
-
-static int
-aic7xxx_patch4_func(struct aic7xxx_host *p)
-{
- return ((p->features & AHC_QUEUE_REGS) != 0);
-}
-
-static int aic7xxx_patch3_func(struct aic7xxx_host *p);
-
-static int
-aic7xxx_patch3_func(struct aic7xxx_host *p)
-{
- return ((p->features & AHC_TWIN) != 0);
-}
-
-static int aic7xxx_patch2_func(struct aic7xxx_host *p);
-
-static int
-aic7xxx_patch2_func(struct aic7xxx_host *p)
-{
- return ((p->features & AHC_QUEUE_REGS) == 0);
-}
-
-static int aic7xxx_patch1_func(struct aic7xxx_host *p);
-
-static int
-aic7xxx_patch1_func(struct aic7xxx_host *p)
-{
- return ((p->features & AHC_CMD_CHAN) != 0);
-}
-
-static int aic7xxx_patch0_func(struct aic7xxx_host *p);
-
-static int
-aic7xxx_patch0_func(struct aic7xxx_host *p)
-{
- return (0);
-}
-
-struct sequencer_patch {
- int (*patch_func)(struct aic7xxx_host *);
- unsigned int begin :10,
- skip_instr :10,
- skip_patch :12;
-} sequencer_patches[] = {
- { aic7xxx_patch1_func, 3, 2, 1 },
- { aic7xxx_patch2_func, 7, 1, 1 },
- { aic7xxx_patch2_func, 8, 1, 1 },
- { aic7xxx_patch3_func, 11, 4, 1 },
- { aic7xxx_patch4_func, 16, 3, 2 },
- { aic7xxx_patch0_func, 19, 4, 1 },
- { aic7xxx_patch5_func, 23, 1, 1 },
- { aic7xxx_patch6_func, 26, 1, 1 },
- { aic7xxx_patch1_func, 29, 1, 2 },
- { aic7xxx_patch0_func, 30, 3, 1 },
- { aic7xxx_patch3_func, 39, 4, 1 },
- { aic7xxx_patch7_func, 43, 3, 2 },
- { aic7xxx_patch0_func, 46, 3, 1 },
- { aic7xxx_patch8_func, 52, 7, 1 },
- { aic7xxx_patch3_func, 60, 3, 1 },
- { aic7xxx_patch7_func, 63, 2, 1 },
- { aic7xxx_patch7_func, 102, 1, 2 },
- { aic7xxx_patch0_func, 103, 2, 1 },
- { aic7xxx_patch7_func, 107, 2, 1 },
- { aic7xxx_patch9_func, 109, 1, 1 },
- { aic7xxx_patch10_func, 110, 2, 1 },
- { aic7xxx_patch7_func, 113, 1, 2 },
- { aic7xxx_patch0_func, 114, 1, 1 },
- { aic7xxx_patch1_func, 118, 1, 1 },
- { aic7xxx_patch1_func, 121, 3, 3 },
- { aic7xxx_patch11_func, 123, 1, 1 },
- { aic7xxx_patch0_func, 124, 5, 1 },
- { aic7xxx_patch7_func, 132, 1, 1 },
- { aic7xxx_patch9_func, 133, 1, 1 },
- { aic7xxx_patch10_func, 134, 3, 1 },
- { aic7xxx_patch7_func, 137, 3, 2 },
- { aic7xxx_patch0_func, 140, 2, 1 },
- { aic7xxx_patch7_func, 142, 5, 2 },
- { aic7xxx_patch0_func, 147, 3, 1 },
- { aic7xxx_patch7_func, 150, 1, 2 },
- { aic7xxx_patch0_func, 151, 2, 1 },
- { aic7xxx_patch1_func, 153, 15, 4 },
- { aic7xxx_patch11_func, 166, 1, 2 },
- { aic7xxx_patch0_func, 167, 1, 1 },
- { aic7xxx_patch0_func, 168, 10, 1 },
- { aic7xxx_patch7_func, 181, 1, 2 },
- { aic7xxx_patch0_func, 182, 2, 1 },
- { aic7xxx_patch7_func, 184, 18, 1 },
- { aic7xxx_patch1_func, 202, 3, 3 },
- { aic7xxx_patch7_func, 204, 1, 1 },
- { aic7xxx_patch0_func, 205, 4, 1 },
- { aic7xxx_patch7_func, 210, 2, 1 },
- { aic7xxx_patch7_func, 215, 13, 3 },
- { aic7xxx_patch12_func, 218, 1, 1 },
- { aic7xxx_patch12_func, 219, 4, 1 },
- { aic7xxx_patch1_func, 229, 3, 3 },
- { aic7xxx_patch11_func, 231, 1, 1 },
- { aic7xxx_patch0_func, 232, 5, 1 },
- { aic7xxx_patch11_func, 237, 1, 2 },
- { aic7xxx_patch0_func, 238, 9, 1 },
- { aic7xxx_patch13_func, 254, 1, 2 },
- { aic7xxx_patch0_func, 255, 1, 1 },
- { aic7xxx_patch4_func, 316, 1, 2 },
- { aic7xxx_patch0_func, 317, 1, 1 },
- { aic7xxx_patch2_func, 320, 1, 1 },
- { aic7xxx_patch1_func, 330, 3, 2 },
- { aic7xxx_patch0_func, 333, 5, 1 },
- { aic7xxx_patch13_func, 341, 1, 2 },
- { aic7xxx_patch0_func, 342, 1, 1 },
- { aic7xxx_patch5_func, 347, 1, 1 },
- { aic7xxx_patch11_func, 389, 15, 2 },
- { aic7xxx_patch14_func, 402, 1, 1 },
- { aic7xxx_patch1_func, 441, 7, 2 },
- { aic7xxx_patch0_func, 448, 8, 1 },
- { aic7xxx_patch1_func, 457, 4, 2 },
- { aic7xxx_patch0_func, 461, 6, 1 },
- { aic7xxx_patch1_func, 467, 4, 2 },
- { aic7xxx_patch0_func, 471, 3, 1 },
- { aic7xxx_patch10_func, 481, 10, 1 },
- { aic7xxx_patch1_func, 500, 22, 5 },
- { aic7xxx_patch11_func, 508, 4, 1 },
- { aic7xxx_patch7_func, 512, 7, 3 },
- { aic7xxx_patch15_func, 512, 5, 2 },
- { aic7xxx_patch0_func, 517, 2, 1 },
- { aic7xxx_patch10_func, 522, 50, 3 },
- { aic7xxx_patch14_func, 543, 17, 2 },
- { aic7xxx_patch0_func, 560, 4, 1 },
- { aic7xxx_patch10_func, 572, 4, 1 },
- { aic7xxx_patch5_func, 576, 2, 1 },
- { aic7xxx_patch5_func, 579, 9, 1 },
-
-};
diff --git a/drivers/scsi/aic7xxx_old/scsi_message.h b/drivers/scsi/aic7xxx_old/scsi_message.h
deleted file mode 100644
index a79f89c65173..000000000000
--- a/drivers/scsi/aic7xxx_old/scsi_message.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* Messages (1 byte) */ /* I/T (M)andatory or (O)ptional */
-#define MSG_CMDCOMPLETE 0x00 /* M/M */
-#define MSG_EXTENDED 0x01 /* O/O */
-#define MSG_SAVEDATAPOINTER 0x02 /* O/O */
-#define MSG_RESTOREPOINTERS 0x03 /* O/O */
-#define MSG_DISCONNECT 0x04 /* O/O */
-#define MSG_INITIATOR_DET_ERR 0x05 /* M/M */
-#define MSG_ABORT 0x06 /* O/M */
-#define MSG_MESSAGE_REJECT 0x07 /* M/M */
-#define MSG_NOOP 0x08 /* M/M */
-#define MSG_PARITY_ERROR 0x09 /* M/M */
-#define MSG_LINK_CMD_COMPLETE 0x0a /* O/O */
-#define MSG_LINK_CMD_COMPLETEF 0x0b /* O/O */
-#define MSG_BUS_DEV_RESET 0x0c /* O/M */
-#define MSG_ABORT_TAG 0x0d /* O/O */
-#define MSG_CLEAR_QUEUE 0x0e /* O/O */
-#define MSG_INIT_RECOVERY 0x0f /* O/O */
-#define MSG_REL_RECOVERY 0x10 /* O/O */
-#define MSG_TERM_IO_PROC 0x11 /* O/O */
-
-/* Messages (2 byte) */
-#define MSG_SIMPLE_Q_TAG 0x20 /* O/O */
-#define MSG_HEAD_OF_Q_TAG 0x21 /* O/O */
-#define MSG_ORDERED_Q_TAG 0x22 /* O/O */
-#define MSG_IGN_WIDE_RESIDUE 0x23 /* O/O */
-
-/* Identify message */ /* M/M */
-#define MSG_IDENTIFYFLAG 0x80
-#define MSG_IDENTIFY_DISCFLAG 0x40
-#define MSG_IDENTIFY(lun, disc) (((disc) ? 0xc0 : MSG_IDENTIFYFLAG) | (lun))
-#define MSG_ISIDENTIFY(m) ((m) & MSG_IDENTIFYFLAG)
-
-/* Extended messages (opcode and length) */
-#define MSG_EXT_SDTR 0x01
-#define MSG_EXT_SDTR_LEN 0x03
-
-#define MSG_EXT_WDTR 0x03
-#define MSG_EXT_WDTR_LEN 0x02
-#define MSG_EXT_WDTR_BUS_8_BIT 0x00
-#define MSG_EXT_WDTR_BUS_16_BIT 0x01
-#define MSG_EXT_WDTR_BUS_32_BIT 0x02
-
-#define MSG_EXT_PPR 0x04
-#define MSG_EXT_PPR_LEN 0x06
-#define MSG_EXT_PPR_OPTION_ST 0x00
-#define MSG_EXT_PPR_OPTION_DT_CRC 0x02
-#define MSG_EXT_PPR_OPTION_DT_UNITS 0x03
-#define MSG_EXT_PPR_OPTION_DT_CRC_QUICK 0x04
-#define MSG_EXT_PPR_OPTION_DT_UNITS_QUICK 0x05
diff --git a/drivers/scsi/aic7xxx_old/sequencer.h b/drivers/scsi/aic7xxx_old/sequencer.h
deleted file mode 100644
index ee66855222b1..000000000000
--- a/drivers/scsi/aic7xxx_old/sequencer.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Instruction formats for the sequencer program downloaded to
- * Aic7xxx SCSI host adapters
- *
- * Copyright (c) 1997, 1998 Justin T. Gibbs.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Where this Software is combined with software released under the terms of
- * the GNU General Public License ("GPL") and the terms of the GPL would require the
- * combined work to also be released under the terms of the GPL, the terms
- * and conditions of this License will apply in addition to those of the
- * GPL with the exception of any terms or conditions of this License that
- * conflict with, or are expressly prohibited by, the GPL.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $Id: sequencer.h,v 1.3 1997/09/27 19:37:31 gibbs Exp $
- */
-
-#ifdef __LITTLE_ENDIAN_BITFIELD
-struct ins_format1 {
- unsigned int
- immediate : 8,
- source : 9,
- destination : 9,
- ret : 1,
- opcode : 4,
- parity : 1;
-};
-
-struct ins_format2 {
- unsigned int
- shift_control : 8,
- source : 9,
- destination : 9,
- ret : 1,
- opcode : 4,
- parity : 1;
-};
-
-struct ins_format3 {
- unsigned int
- immediate : 8,
- source : 9,
- address : 10,
- opcode : 4,
- parity : 1;
-};
-#elif defined(__BIG_ENDIAN_BITFIELD)
-struct ins_format1 {
- unsigned int
- parity : 1,
- opcode : 4,
- ret : 1,
- destination : 9,
- source : 9,
- immediate : 8;
-};
-
-struct ins_format2 {
- unsigned int
- parity : 1,
- opcode : 4,
- ret : 1,
- destination : 9,
- source : 9,
- shift_control : 8;
-};
-
-struct ins_format3 {
- unsigned int
- parity : 1,
- opcode : 4,
- address : 10,
- source : 9,
- immediate : 8;
-};
-#endif
-
-union ins_formats {
- struct ins_format1 format1;
- struct ins_format2 format2;
- struct ins_format3 format3;
- unsigned char bytes[4];
- unsigned int integer;
-};
-struct instruction {
- union ins_formats format;
- unsigned int srcline;
- struct symbol *patch_label;
- struct {
- struct instruction *stqe_next;
- } links;
-};
-
-#define AIC_OP_OR 0x0
-#define AIC_OP_AND 0x1
-#define AIC_OP_XOR 0x2
-#define AIC_OP_ADD 0x3
-#define AIC_OP_ADC 0x4
-#define AIC_OP_ROL 0x5
-#define AIC_OP_BMOV 0x6
-
-#define AIC_OP_JMP 0x8
-#define AIC_OP_JC 0x9
-#define AIC_OP_JNC 0xa
-#define AIC_OP_CALL 0xb
-#define AIC_OP_JNE 0xc
-#define AIC_OP_JNZ 0xd
-#define AIC_OP_JE 0xe
-#define AIC_OP_JZ 0xf
-
-/* Pseudo Ops */
-#define AIC_OP_SHL 0x10
-#define AIC_OP_SHR 0x20
-#define AIC_OP_ROR 0x30
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index ffadbee0b4d9..889066d9d6fb 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -541,10 +541,8 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
ip_type = BE2_IPV6;
len = mgmt_get_if_info(phba, ip_type, &if_info);
- if (len) {
- kfree(if_info);
+ if (len)
return len;
- }
switch (param) {
case ISCSI_NET_PARAM_IPV4_ADDR:
@@ -569,7 +567,7 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
break;
case ISCSI_NET_PARAM_VLAN_ID:
if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
- return -EINVAL;
+ len = -EINVAL;
else
len = sprintf(buf, "%d\n",
(if_info->vlan_priority &
@@ -577,7 +575,7 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
break;
case ISCSI_NET_PARAM_VLAN_PRIORITY:
if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
- return -EINVAL;
+ len = -EINVAL;
else
len = sprintf(buf, "%d\n",
((if_info->vlan_priority >> 13) &
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 520540a5fef6..e3f67b097a5c 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -1367,10 +1367,6 @@ bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
bfa_status_t status;
- iocfc->faa_args.faa_attr = attr;
- iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
- iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
-
status = bfa_faa_validate_request(bfa);
if (status != BFA_STATUS_OK)
return status;
@@ -1378,6 +1374,10 @@ bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
if (iocfc->faa_args.busy == BFA_TRUE)
return BFA_STATUS_DEVBUSY;
+ iocfc->faa_args.faa_attr = attr;
+ iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
+ iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
+
iocfc->faa_args.busy = BFA_TRUE;
memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index d40a79f5265f..877b86dd2837 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -132,6 +132,7 @@ enum bfa_status {
BFA_STATUS_ETIMER = 5, /* Timer expired - Retry, if persists,
* contact support */
BFA_STATUS_EPROTOCOL = 6, /* Protocol error */
+ BFA_STATUS_BADFLASH = 9, /* Flash is bad */
BFA_STATUS_SFP_UNSUPP = 10, /* Unsupported SFP - Replace SFP */
BFA_STATUS_UNKNOWN_VFID = 11, /* VF_ID not found */
BFA_STATUS_DATACORRUPTED = 12, /* Diag returned data corrupted */
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 562ef739b0bc..64069a0a3d0d 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -1026,7 +1026,7 @@ struct fc_alpabm_s {
#define FC_ED_TOV 2
#define FC_REC_TOV (FC_ED_TOV + 1)
#define FC_RA_TOV 10
-#define FC_ELS_TOV ((2 * FC_RA_TOV) + 1)
+#define FC_ELS_TOV (2 * FC_RA_TOV)
#define FC_FCCT_TOV (3 * FC_RA_TOV)
/*
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index f5e4e61a0fd7..ff75ef891755 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -773,7 +773,20 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
bfa_trc(lport->fcs, fchs->type);
if (!bfa_fcs_lport_is_online(lport)) {
- bfa_stats(lport, uf_recv_drops);
+ /*
+ * In direct attach topology, it is possible to get a PLOGI
+ * before the lport is online due to port feature
+ * (QoS/Trunk/FEC/CR), so send a rjt
+ */
+ if ((fchs->type == FC_TYPE_ELS) &&
+ (els_cmd->els_code == FC_ELS_PLOGI)) {
+ bfa_fcs_lport_send_ls_rjt(lport, fchs,
+ FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD,
+ FC_LS_RJT_EXP_NO_ADDL_INFO);
+ bfa_stats(lport, plogi_rcvd);
+ } else
+ bfa_stats(lport, uf_recv_drops);
+
return;
}
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index f78bcb6696b2..65180e15de6e 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -21,6 +21,7 @@
#include "bfi_reg.h"
#include "bfa_defs.h"
#include "bfa_defs_svc.h"
+#include "bfi.h"
BFA_TRC_FILE(CNA, IOC);
@@ -45,6 +46,14 @@ BFA_TRC_FILE(CNA, IOC);
#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
+#define bfa_ioc_state_disabled(__sm) \
+ (((__sm) == BFI_IOC_UNINIT) || \
+ ((__sm) == BFI_IOC_INITING) || \
+ ((__sm) == BFI_IOC_HWINIT) || \
+ ((__sm) == BFI_IOC_DISABLED) || \
+ ((__sm) == BFI_IOC_FAIL) || \
+ ((__sm) == BFI_IOC_CFG_DISABLED))
+
/*
* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
*/
@@ -102,6 +111,12 @@ static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
+static enum bfi_ioc_img_ver_cmp_e bfa_ioc_fw_ver_patch_cmp(
+ struct bfi_ioc_image_hdr_s *base_fwhdr,
+ struct bfi_ioc_image_hdr_s *fwhdr_to_cmp);
+static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp(
+ struct bfa_ioc_s *ioc,
+ struct bfi_ioc_image_hdr_s *base_fwhdr);
/*
* IOC state machine definitions/declarations
@@ -1454,28 +1469,42 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
}
/*
- * Returns TRUE if same.
+ * Returns TRUE if driver is willing to work with current smem f/w version.
*/
bfa_boolean_t
-bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
+bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
+ struct bfi_ioc_image_hdr_s *smem_fwhdr)
{
struct bfi_ioc_image_hdr_s *drv_fwhdr;
- int i;
+ enum bfi_ioc_img_ver_cmp_e smem_flash_cmp, drv_smem_cmp;
drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
- for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
- if (fwhdr->md5sum[i] != cpu_to_le32(drv_fwhdr->md5sum[i])) {
- bfa_trc(ioc, i);
- bfa_trc(ioc, fwhdr->md5sum[i]);
- bfa_trc(ioc, drv_fwhdr->md5sum[i]);
- return BFA_FALSE;
- }
+ /*
+ * If smem is incompatible or old, driver should not work with it.
+ */
+ drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, smem_fwhdr);
+ if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
+ drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
+ return BFA_FALSE;
}
- bfa_trc(ioc, fwhdr->md5sum[0]);
- return BFA_TRUE;
+ /*
+ * IF Flash has a better F/W than smem do not work with smem.
+ * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
+ * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
+ */
+ smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, smem_fwhdr);
+
+ if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) {
+ return BFA_FALSE;
+ } else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) {
+ return BFA_TRUE;
+ } else {
+ return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
+ BFA_TRUE : BFA_FALSE;
+ }
}
/*
@@ -1485,17 +1514,9 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
static bfa_boolean_t
bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
{
- struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
+ struct bfi_ioc_image_hdr_s fwhdr;
bfa_ioc_fwver_get(ioc, &fwhdr);
- drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
- bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
-
- if (fwhdr.signature != cpu_to_le32(drv_fwhdr->signature)) {
- bfa_trc(ioc, fwhdr.signature);
- bfa_trc(ioc, drv_fwhdr->signature);
- return BFA_FALSE;
- }
if (swab32(fwhdr.bootenv) != boot_env) {
bfa_trc(ioc, fwhdr.bootenv);
@@ -1506,6 +1527,168 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
return bfa_ioc_fwver_cmp(ioc, &fwhdr);
}
+static bfa_boolean_t
+bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s *fwhdr_1,
+ struct bfi_ioc_image_hdr_s *fwhdr_2)
+{
+ int i;
+
+ for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++)
+ if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
+ return BFA_FALSE;
+
+ return BFA_TRUE;
+}
+
+/*
+ * Returns TRUE if major minor and maintainence are same.
+ * If patch versions are same, check for MD5 Checksum to be same.
+ */
+static bfa_boolean_t
+bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s *drv_fwhdr,
+ struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
+{
+ if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
+ return BFA_FALSE;
+
+ if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
+ return BFA_FALSE;
+
+ if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
+ return BFA_FALSE;
+
+ if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
+ return BFA_FALSE;
+
+ if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
+ drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
+ drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) {
+ return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
+ }
+
+ return BFA_TRUE;
+}
+
+static bfa_boolean_t
+bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s *flash_fwhdr)
+{
+ if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
+ return BFA_FALSE;
+
+ return BFA_TRUE;
+}
+
+static bfa_boolean_t fwhdr_is_ga(struct bfi_ioc_image_hdr_s *fwhdr)
+{
+ if (fwhdr->fwver.phase == 0 &&
+ fwhdr->fwver.build == 0)
+ return BFA_TRUE;
+
+ return BFA_FALSE;
+}
+
+/*
+ * Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better.
+ */
+static enum bfi_ioc_img_ver_cmp_e
+bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s *base_fwhdr,
+ struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
+{
+ if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == BFA_FALSE)
+ return BFI_IOC_IMG_VER_INCOMP;
+
+ if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
+ return BFI_IOC_IMG_VER_BETTER;
+
+ else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
+ return BFI_IOC_IMG_VER_OLD;
+
+ /*
+ * GA takes priority over internal builds of the same patch stream.
+ * At this point major minor maint and patch numbers are same.
+ */
+
+ if (fwhdr_is_ga(base_fwhdr) == BFA_TRUE) {
+ if (fwhdr_is_ga(fwhdr_to_cmp))
+ return BFI_IOC_IMG_VER_SAME;
+ else
+ return BFI_IOC_IMG_VER_OLD;
+ } else {
+ if (fwhdr_is_ga(fwhdr_to_cmp))
+ return BFI_IOC_IMG_VER_BETTER;
+ }
+
+ if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
+ return BFI_IOC_IMG_VER_OLD;
+
+ if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
+ return BFI_IOC_IMG_VER_OLD;
+
+ /*
+ * All Version Numbers are equal.
+ * Md5 check to be done as a part of compatibility check.
+ */
+ return BFI_IOC_IMG_VER_SAME;
+}
+
+#define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */
+
+bfa_status_t
+bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off,
+ u32 *fwimg)
+{
+ return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
+ BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
+ (char *)fwimg, BFI_FLASH_CHUNK_SZ);
+}
+
+static enum bfi_ioc_img_ver_cmp_e
+bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s *ioc,
+ struct bfi_ioc_image_hdr_s *base_fwhdr)
+{
+ struct bfi_ioc_image_hdr_s *flash_fwhdr;
+ bfa_status_t status;
+ u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
+
+ status = bfa_ioc_flash_img_get_chnk(ioc, 0, fwimg);
+ if (status != BFA_STATUS_OK)
+ return BFI_IOC_IMG_VER_INCOMP;
+
+ flash_fwhdr = (struct bfi_ioc_image_hdr_s *) fwimg;
+ if (bfa_ioc_flash_fwver_valid(flash_fwhdr) == BFA_TRUE)
+ return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
+ else
+ return BFI_IOC_IMG_VER_INCOMP;
+}
+
+
+/*
+ * Invalidate fwver signature
+ */
+bfa_status_t
+bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc)
+{
+
+ u32 pgnum, pgoff;
+ u32 loff = 0;
+ enum bfi_ioc_state ioc_fwstate;
+
+ ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
+ if (!bfa_ioc_state_disabled(ioc_fwstate))
+ return BFA_STATUS_ADAPTER_ENABLED;
+
+ pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
+ pgoff = PSS_SMEM_PGOFF(loff);
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+ bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN);
+
+ return BFA_STATUS_OK;
+}
+
/*
* Conditionally flush any pending message from firmware at start.
*/
@@ -1544,8 +1727,8 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
if (!fwvalid) {
- bfa_ioc_boot(ioc, boot_type, boot_env);
- bfa_ioc_poll_fwinit(ioc);
+ if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
+ bfa_ioc_poll_fwinit(ioc);
return;
}
@@ -1580,8 +1763,8 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
/*
* Initialize the h/w for any other states.
*/
- bfa_ioc_boot(ioc, boot_type, boot_env);
- bfa_ioc_poll_fwinit(ioc);
+ if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
+ bfa_ioc_poll_fwinit(ioc);
}
static void
@@ -1684,7 +1867,7 @@ bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
/*
* Initiate a full firmware download.
*/
-static void
+static bfa_status_t
bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
u32 boot_env)
{
@@ -1694,28 +1877,60 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
u32 chunkno = 0;
u32 i;
u32 asicmode;
+ u32 fwimg_size;
+ u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
+ bfa_status_t status;
+
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
+
+ status = bfa_ioc_flash_img_get_chnk(ioc,
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ fwimg = fwimg_buf;
+ } else {
+ fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
+ fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+ }
+
+ bfa_trc(ioc, fwimg_size);
- bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
- fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
- for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
+ for (i = 0; i < fwimg_size; i++) {
if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
- fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
+
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ status = bfa_ioc_flash_img_get_chnk(ioc,
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
+ fwimg_buf);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ fwimg = fwimg_buf;
+ } else {
+ fwimg = bfa_cb_image_get_chunk(
+ bfa_ioc_asic_gen(ioc),
BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+ }
}
/*
* write smem
*/
bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
- cpu_to_le32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]));
+ fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
loff += sizeof(u32);
@@ -1733,8 +1948,12 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
ioc->ioc_regs.host_page_num_fn);
/*
- * Set boot type and device mode at the end.
+ * Set boot type, env and device mode at the end.
*/
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ boot_type = BFI_FWBOOT_TYPE_NORMAL;
+ }
asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
ioc->port0_mode, ioc->port1_mode);
bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
@@ -1743,6 +1962,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
swab32(boot_type));
bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
swab32(boot_env));
+ return BFA_STATUS_OK;
}
@@ -2002,13 +2222,30 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
* Interface used by diag module to do firmware boot with memory test
* as the entry vector.
*/
-void
+bfa_status_t
bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
{
+ struct bfi_ioc_image_hdr_s *drv_fwhdr;
+ bfa_status_t status;
bfa_ioc_stats(ioc, ioc_boots);
if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
- return;
+ return BFA_STATUS_FAILED;
+
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_NORMAL) {
+
+ drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
+ bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
+
+ /*
+ * Work with Flash iff flash f/w is better than driver f/w.
+ * Otherwise push drivers firmware.
+ */
+ if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
+ BFI_IOC_IMG_VER_BETTER)
+ boot_type = BFI_FWBOOT_TYPE_FLASH;
+ }
/*
* Initialize IOC state of all functions on a chip reset.
@@ -2022,8 +2259,14 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
}
bfa_ioc_msgflush(ioc);
- bfa_ioc_download_fw(ioc, boot_type, boot_env);
- bfa_ioc_lpu_start(ioc);
+ status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
+ if (status == BFA_STATUS_OK)
+ bfa_ioc_lpu_start(ioc);
+ else {
+ WARN_ON(boot_type == BFI_FWBOOT_TYPE_MEMTEST);
+ bfa_iocpf_timeout(ioc);
+ }
+ return status;
}
/*
@@ -2419,14 +2662,6 @@ bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
}
-#define bfa_ioc_state_disabled(__sm) \
- (((__sm) == BFI_IOC_UNINIT) || \
- ((__sm) == BFI_IOC_INITING) || \
- ((__sm) == BFI_IOC_HWINIT) || \
- ((__sm) == BFI_IOC_DISABLED) || \
- ((__sm) == BFI_IOC_FAIL) || \
- ((__sm) == BFI_IOC_CFG_DISABLED))
-
/*
* Check if adapter is disabled -- both IOCs should be in a disabled
* state.
@@ -6423,3 +6658,407 @@ bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg)
WARN_ON(1);
}
}
+
+/*
+ * register definitions
+ */
+#define FLI_CMD_REG 0x0001d000
+#define FLI_RDDATA_REG 0x0001d010
+#define FLI_ADDR_REG 0x0001d004
+#define FLI_DEV_STATUS_REG 0x0001d014
+
+#define BFA_FLASH_FIFO_SIZE 128 /* fifo size */
+#define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */
+#define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */
+#define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */
+
+enum bfa_flash_cmd {
+ BFA_FLASH_FAST_READ = 0x0b, /* fast read */
+ BFA_FLASH_READ_STATUS = 0x05, /* read status */
+};
+
+/**
+ * @brief hardware error definition
+ */
+enum bfa_flash_err {
+ BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */
+ BFA_FLASH_UNINIT = -2, /*!< flash not initialized */
+ BFA_FLASH_BAD = -3, /*!< flash bad */
+ BFA_FLASH_BUSY = -4, /*!< flash busy */
+ BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */
+ BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */
+ BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */
+ BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */
+ BFA_FLASH_ERR_LEN = -9, /*!< invalid length */
+};
+
+/**
+ * @brief flash command register data structure
+ */
+union bfa_flash_cmd_reg_u {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 act:1;
+ u32 rsv:1;
+ u32 write_cnt:9;
+ u32 read_cnt:9;
+ u32 addr_cnt:4;
+ u32 cmd:8;
+#else
+ u32 cmd:8;
+ u32 addr_cnt:4;
+ u32 read_cnt:9;
+ u32 write_cnt:9;
+ u32 rsv:1;
+ u32 act:1;
+#endif
+ } r;
+ u32 i;
+};
+
+/**
+ * @brief flash device status register data structure
+ */
+union bfa_flash_dev_status_reg_u {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 rsv:21;
+ u32 fifo_cnt:6;
+ u32 busy:1;
+ u32 init_status:1;
+ u32 present:1;
+ u32 bad:1;
+ u32 good:1;
+#else
+ u32 good:1;
+ u32 bad:1;
+ u32 present:1;
+ u32 init_status:1;
+ u32 busy:1;
+ u32 fifo_cnt:6;
+ u32 rsv:21;
+#endif
+ } r;
+ u32 i;
+};
+
+/**
+ * @brief flash address register data structure
+ */
+union bfa_flash_addr_reg_u {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 addr:24;
+ u32 dummy:8;
+#else
+ u32 dummy:8;
+ u32 addr:24;
+#endif
+ } r;
+ u32 i;
+};
+
+/**
+ * dg flash_raw_private Flash raw private functions
+ */
+static void
+bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
+ u8 rd_cnt, u8 ad_cnt, u8 op)
+{
+ union bfa_flash_cmd_reg_u cmd;
+
+ cmd.i = 0;
+ cmd.r.act = 1;
+ cmd.r.write_cnt = wr_cnt;
+ cmd.r.read_cnt = rd_cnt;
+ cmd.r.addr_cnt = ad_cnt;
+ cmd.r.cmd = op;
+ writel(cmd.i, (pci_bar + FLI_CMD_REG));
+}
+
+static void
+bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
+{
+ union bfa_flash_addr_reg_u addr;
+
+ addr.r.addr = address & 0x00ffffff;
+ addr.r.dummy = 0;
+ writel(addr.i, (pci_bar + FLI_ADDR_REG));
+}
+
+static int
+bfa_flash_cmd_act_check(void __iomem *pci_bar)
+{
+ union bfa_flash_cmd_reg_u cmd;
+
+ cmd.i = readl(pci_bar + FLI_CMD_REG);
+
+ if (cmd.r.act)
+ return BFA_FLASH_ERR_CMD_ACT;
+
+ return 0;
+}
+
+/**
+ * @brief
+ * Flush FLI data fifo.
+ *
+ * @param[in] pci_bar - pci bar address
+ * @param[in] dev_status - device status
+ *
+ * Return 0 on success, negative error number on error.
+ */
+static u32
+bfa_flash_fifo_flush(void __iomem *pci_bar)
+{
+ u32 i;
+ u32 t;
+ union bfa_flash_dev_status_reg_u dev_status;
+
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+
+ if (!dev_status.r.fifo_cnt)
+ return 0;
+
+ /* fifo counter in terms of words */
+ for (i = 0; i < dev_status.r.fifo_cnt; i++)
+ t = readl(pci_bar + FLI_RDDATA_REG);
+
+ /*
+ * Check the device status. It may take some time.
+ */
+ for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+ if (!dev_status.r.fifo_cnt)
+ break;
+ }
+
+ if (dev_status.r.fifo_cnt)
+ return BFA_FLASH_ERR_FIFO_CNT;
+
+ return 0;
+}
+
+/**
+ * @brief
+ * Read flash status.
+ *
+ * @param[in] pci_bar - pci bar address
+ *
+ * Return 0 on success, negative error number on error.
+*/
+static u32
+bfa_flash_status_read(void __iomem *pci_bar)
+{
+ union bfa_flash_dev_status_reg_u dev_status;
+ u32 status;
+ u32 ret_status;
+ int i;
+
+ status = bfa_flash_fifo_flush(pci_bar);
+ if (status < 0)
+ return status;
+
+ bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
+
+ for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
+ status = bfa_flash_cmd_act_check(pci_bar);
+ if (!status)
+ break;
+ }
+
+ if (status)
+ return status;
+
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+ if (!dev_status.r.fifo_cnt)
+ return BFA_FLASH_BUSY;
+
+ ret_status = readl(pci_bar + FLI_RDDATA_REG);
+ ret_status >>= 24;
+
+ status = bfa_flash_fifo_flush(pci_bar);
+ if (status < 0)
+ return status;
+
+ return ret_status;
+}
+
+/**
+ * @brief
+ * Start flash read operation.
+ *
+ * @param[in] pci_bar - pci bar address
+ * @param[in] offset - flash address offset
+ * @param[in] len - read data length
+ * @param[in] buf - read data buffer
+ *
+ * Return 0 on success, negative error number on error.
+ */
+static u32
+bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
+ char *buf)
+{
+ u32 status;
+
+ /*
+ * len must be mutiple of 4 and not exceeding fifo size
+ */
+ if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
+ return BFA_FLASH_ERR_LEN;
+
+ /*
+ * check status
+ */
+ status = bfa_flash_status_read(pci_bar);
+ if (status == BFA_FLASH_BUSY)
+ status = bfa_flash_status_read(pci_bar);
+
+ if (status < 0)
+ return status;
+
+ /*
+ * check if write-in-progress bit is cleared
+ */
+ if (status & BFA_FLASH_WIP_MASK)
+ return BFA_FLASH_ERR_WIP;
+
+ bfa_flash_set_addr(pci_bar, offset);
+
+ bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
+
+ return 0;
+}
+
+/**
+ * @brief
+ * Check flash read operation.
+ *
+ * @param[in] pci_bar - pci bar address
+ *
+ * Return flash device status, 1 if busy, 0 if not.
+ */
+static u32
+bfa_flash_read_check(void __iomem *pci_bar)
+{
+ if (bfa_flash_cmd_act_check(pci_bar))
+ return 1;
+
+ return 0;
+}
+/**
+ * @brief
+ * End flash read operation.
+ *
+ * @param[in] pci_bar - pci bar address
+ * @param[in] len - read data length
+ * @param[in] buf - read data buffer
+ *
+ */
+static void
+bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
+{
+
+ u32 i;
+
+ /*
+ * read data fifo up to 32 words
+ */
+ for (i = 0; i < len; i += 4) {
+ u32 w = readl(pci_bar + FLI_RDDATA_REG);
+ *((u32 *) (buf + i)) = swab32(w);
+ }
+
+ bfa_flash_fifo_flush(pci_bar);
+}
+
+/**
+ * @brief
+ * Perform flash raw read.
+ *
+ * @param[in] pci_bar - pci bar address
+ * @param[in] offset - flash partition address offset
+ * @param[in] buf - read data buffer
+ * @param[in] len - read data length
+ *
+ * Return status.
+ */
+
+
+#define FLASH_BLOCKING_OP_MAX 500
+#define FLASH_SEM_LOCK_REG 0x18820
+
+static int
+bfa_raw_sem_get(void __iomem *bar)
+{
+ int locked;
+
+ locked = readl((bar + FLASH_SEM_LOCK_REG));
+ return !locked;
+
+}
+
+bfa_status_t
+bfa_flash_sem_get(void __iomem *bar)
+{
+ u32 n = FLASH_BLOCKING_OP_MAX;
+
+ while (!bfa_raw_sem_get(bar)) {
+ if (--n <= 0)
+ return BFA_STATUS_BADFLASH;
+ udelay(10000);
+ }
+ return BFA_STATUS_OK;
+}
+
+void
+bfa_flash_sem_put(void __iomem *bar)
+{
+ writel(0, (bar + FLASH_SEM_LOCK_REG));
+}
+
+bfa_status_t
+bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
+ u32 len)
+{
+ u32 n, status;
+ u32 off, l, s, residue, fifo_sz;
+
+ residue = len;
+ off = 0;
+ fifo_sz = BFA_FLASH_FIFO_SIZE;
+ status = bfa_flash_sem_get(pci_bar);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ while (residue) {
+ s = offset + off;
+ n = s / fifo_sz;
+ l = (n + 1) * fifo_sz - s;
+ if (l > residue)
+ l = residue;
+
+ status = bfa_flash_read_start(pci_bar, offset + off, l,
+ &buf[off]);
+ if (status < 0) {
+ bfa_flash_sem_put(pci_bar);
+ return BFA_STATUS_FAILED;
+ }
+
+ n = BFA_FLASH_BLOCKING_OP_MAX;
+ while (bfa_flash_read_check(pci_bar)) {
+ if (--n <= 0) {
+ bfa_flash_sem_put(pci_bar);
+ return BFA_STATUS_FAILED;
+ }
+ }
+
+ bfa_flash_read_end(pci_bar, l, &buf[off]);
+
+ residue -= l;
+ off += l;
+ }
+ bfa_flash_sem_put(pci_bar);
+
+ return BFA_STATUS_OK;
+}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 90814fe85ac1..2e28392c2fb6 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -515,6 +515,8 @@ void bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc,
void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
void bfa_flash_memclaim(struct bfa_flash_s *flash,
u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
+bfa_status_t bfa_flash_raw_read(void __iomem *pci_bar_kva,
+ u32 offset, char *buf, u32 len);
/*
* DIAG module specific
@@ -888,7 +890,7 @@ void bfa_ioc_enable(struct bfa_ioc_s *ioc);
void bfa_ioc_disable(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
-void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type,
+bfa_status_t bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type,
u32 boot_env);
void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
@@ -919,6 +921,7 @@ bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
int *trclen);
bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
u32 *offset, int *buflen);
+bfa_status_t bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);
void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
struct bfi_ioc_image_hdr_s *fwhdr);
@@ -956,6 +959,8 @@ bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk,
bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk,
bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off,
+ u32 *fwimg);
/*
* bfa mfg wwn API functions
*/
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index e3b928746674..453c2f5b5561 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -81,6 +81,29 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
static bfa_boolean_t
bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)
{
+ enum bfi_ioc_state alt_fwstate, cur_fwstate;
+ struct bfi_ioc_image_hdr_s fwhdr;
+
+ cur_fwstate = bfa_ioc_cb_get_cur_ioc_fwstate(ioc);
+ bfa_trc(ioc, cur_fwstate);
+ alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate(ioc);
+ bfa_trc(ioc, alt_fwstate);
+
+ /*
+ * Uninit implies this is the only driver as of now.
+ */
+ if (cur_fwstate == BFI_IOC_UNINIT)
+ return BFA_TRUE;
+ /*
+ * Check if another driver with a different firmware is active
+ */
+ bfa_ioc_fwver_get(ioc, &fwhdr);
+ if (!bfa_ioc_fwver_cmp(ioc, &fwhdr) &&
+ alt_fwstate != BFI_IOC_DISABLED) {
+ bfa_trc(ioc, alt_fwstate);
+ return BFA_FALSE;
+ }
+
return BFA_TRUE;
}
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 6c41e57fd752..625225f31081 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -6758,7 +6758,7 @@ bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
dport->rp_pwwn = msg->info.teststart.pwwn;
dport->rp_nwwn = msg->info.teststart.nwwn;
dport->lpcnt = cpu_to_be32(msg->info.teststart.numfrm);
- bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO);
+ bfa_dport_result_start(dport, msg->info.teststart.mode);
break;
case BFI_DPORT_SCN_SUBTESTSTART:
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index fc80a325a1e6..cc0fbcdc5192 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -63,9 +63,9 @@ int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
-#define BFAD_FW_FILE_CB "cbfw-3.2.1.1.bin"
-#define BFAD_FW_FILE_CT "ctfw-3.2.1.1.bin"
-#define BFAD_FW_FILE_CT2 "ct2fw-3.2.1.1.bin"
+#define BFAD_FW_FILE_CB "cbfw-3.2.3.0.bin"
+#define BFAD_FW_FILE_CT "ctfw-3.2.3.0.bin"
+#define BFAD_FW_FILE_CT2 "ct2fw-3.2.3.0.bin"
static u32 *bfad_load_fwimg(struct pci_dev *pdev);
static void bfad_free_fwimg(void);
@@ -204,6 +204,7 @@ static void
bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
{
unsigned long flags;
+ bfa_status_t ret;
bfa_trc(bfad, event);
@@ -217,7 +218,7 @@ bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
if (bfad_setup_intr(bfad)) {
printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",
bfad->inst_no);
- bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED);
+ bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
break;
}
@@ -242,8 +243,26 @@ bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
printk(KERN_WARNING
"bfa %s: bfa init failed\n",
bfad->pci_name);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_fcs_init(&bfad->bfa_fcs);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ ret = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
+ if (ret != BFA_STATUS_OK) {
+ init_completion(&bfad->comp);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->pport.flags |= BFAD_PORT_DELETE;
+ bfa_fcs_exit(&bfad->bfa_fcs);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ wait_for_completion(&bfad->comp);
+
+ bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
+ break;
+ }
bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
- bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
+ bfa_sm_send_event(bfad, BFAD_E_HAL_INIT_FAILED);
}
break;
@@ -273,12 +292,14 @@ bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event)
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
retval = bfad_start_ops(bfad);
- if (retval != BFA_STATUS_OK)
+ if (retval != BFA_STATUS_OK) {
+ bfa_sm_set_state(bfad, bfad_sm_failed);
break;
+ }
bfa_sm_set_state(bfad, bfad_sm_operational);
break;
- case BFAD_E_INTR_INIT_FAILED:
+ case BFAD_E_INIT_FAILED:
bfa_sm_set_state(bfad, bfad_sm_uninit);
kthread_stop(bfad->bfad_tsk);
spin_lock_irqsave(&bfad->bfad_lock, flags);
@@ -286,7 +307,7 @@ bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event)
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
break;
- case BFAD_E_INIT_FAILED:
+ case BFAD_E_HAL_INIT_FAILED:
bfa_sm_set_state(bfad, bfad_sm_failed);
break;
default:
@@ -310,13 +331,8 @@ bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event)
break;
case BFAD_E_STOP:
- if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
- bfad_uncfg_pport(bfad);
- if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) {
- bfad_im_probe_undo(bfad);
- bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
- }
- bfad_stop(bfad);
+ bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
+ bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
break;
case BFAD_E_EXIT_COMP:
@@ -824,7 +840,7 @@ bfad_drv_init(struct bfad_s *bfad)
printk(KERN_WARNING
"Not enough memory to attach all Brocade HBA ports, %s",
"System may need more memory.\n");
- goto out_hal_mem_alloc_failure;
+ return BFA_STATUS_FAILED;
}
bfad->bfa.trcmod = bfad->trcmod;
@@ -841,31 +857,11 @@ bfad_drv_init(struct bfad_s *bfad)
bfad->bfa_fcs.trcmod = bfad->trcmod;
bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
- bfa_fcs_init(&bfad->bfa_fcs);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
- /* configure base port */
- rc = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
- if (rc != BFA_STATUS_OK)
- goto out_cfg_pport_fail;
-
return BFA_STATUS_OK;
-
-out_cfg_pport_fail:
- /* fcs exit - on cfg pport failure */
- spin_lock_irqsave(&bfad->bfad_lock, flags);
- init_completion(&bfad->comp);
- bfad->pport.flags |= BFAD_PORT_DELETE;
- bfa_fcs_exit(&bfad->bfa_fcs);
- spin_unlock_irqrestore(&bfad->bfad_lock, flags);
- wait_for_completion(&bfad->comp);
- /* bfa detach - free hal memory */
- bfa_detach(&bfad->bfa);
- bfad_hal_mem_release(bfad);
-out_hal_mem_alloc_failure:
- return BFA_STATUS_FAILED;
}
void
@@ -1009,13 +1005,19 @@ bfad_start_ops(struct bfad_s *bfad) {
/* FCS driver info init */
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
+
+ if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
+ bfa_fcs_update_cfg(&bfad->bfa_fcs);
+ else
+ bfa_fcs_init(&bfad->bfa_fcs);
+
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
- /*
- * FCS update cfg - reset the pwwn/nwwn of fabric base logical port
- * with values learned during bfa_init firmware GETATTR REQ.
- */
- bfa_fcs_update_cfg(&bfad->bfa_fcs);
+ if (!(bfad->bfad_flags & BFAD_CFG_PPORT_DONE)) {
+ retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
+ if (retval != BFA_STATUS_OK)
+ return BFA_STATUS_FAILED;
+ }
/* Setup fc host fixed attribute if the lk supports */
bfad_fc_host_init(bfad->pport.im_port);
@@ -1026,10 +1028,6 @@ bfad_start_ops(struct bfad_s *bfad) {
printk(KERN_WARNING "bfad_im_probe failed\n");
if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
bfa_sm_set_state(bfad, bfad_sm_failed);
- bfad_im_probe_undo(bfad);
- bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
- bfad_uncfg_pport(bfad);
- bfad_stop(bfad);
return BFA_STATUS_FAILED;
} else
bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
@@ -1399,7 +1397,6 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
return 0;
out_bfad_sm_failure:
- bfa_detach(&bfad->bfa);
bfad_hal_mem_release(bfad);
out_drv_init_failure:
/* Remove the debugfs node for this bfad */
@@ -1534,7 +1531,7 @@ restart_bfa(struct bfad_s *bfad)
if (bfad_setup_intr(bfad)) {
dev_printk(KERN_WARNING, &pdev->dev,
"%s: bfad_setup_intr failed\n", bfad->pci_name);
- bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED);
+ bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
return -1;
}
@@ -1802,7 +1799,7 @@ out:
static u32 *
bfad_load_fwimg(struct pci_dev *pdev)
{
- if (pdev->device == BFA_PCI_DEVICE_ID_CT2) {
+ if (bfa_asic_id_ct2(pdev->device)) {
if (bfi_image_ct2_size == 0)
bfad_read_firmware(pdev, &bfi_image_ct2,
&bfi_image_ct2_size, BFAD_FW_FILE_CT2);
@@ -1812,12 +1809,14 @@ bfad_load_fwimg(struct pci_dev *pdev)
bfad_read_firmware(pdev, &bfi_image_ct,
&bfi_image_ct_size, BFAD_FW_FILE_CT);
return bfi_image_ct;
- } else {
+ } else if (bfa_asic_id_cb(pdev->device)) {
if (bfi_image_cb_size == 0)
bfad_read_firmware(pdev, &bfi_image_cb,
&bfi_image_cb_size, BFAD_FW_FILE_CB);
return bfi_image_cb;
}
+
+ return NULL;
}
static void
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 0467c349251a..157f6044a9bb 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -229,6 +229,18 @@ bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
}
int
+bfad_iocmd_ioc_fw_sig_inv(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ioc_fwsig_invalidate(&bfad->bfa.ioc);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
@@ -2893,6 +2905,9 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
case IOCMD_IOC_PCIFN_CFG:
rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
break;
+ case IOCMD_IOC_FW_SIG_INV:
+ rc = bfad_iocmd_ioc_fw_sig_inv(bfad, iocmd);
+ break;
case IOCMD_PCIFN_CREATE:
rc = bfad_iocmd_pcifn_create(bfad, iocmd);
break;
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
index 05f0fc9cf063..90abef691585 100644
--- a/drivers/scsi/bfa/bfad_bsg.h
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -34,6 +34,7 @@ enum {
IOCMD_IOC_RESET_FWSTATS,
IOCMD_IOC_SET_ADAPTER_NAME,
IOCMD_IOC_SET_PORT_NAME,
+ IOCMD_IOC_FW_SIG_INV,
IOCMD_IOCFC_GET_ATTR,
IOCMD_IOCFC_SET_INTR,
IOCMD_PORT_ENABLE,
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 78d3401bc16b..8b97877d42cf 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -57,7 +57,7 @@
#ifdef BFA_DRIVER_VERSION
#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
#else
-#define BFAD_DRIVER_VERSION "3.2.21.1"
+#define BFAD_DRIVER_VERSION "3.2.23.0"
#endif
#define BFAD_PROTO_NAME FCPI_NAME
@@ -240,8 +240,8 @@ enum bfad_sm_event {
BFAD_E_KTHREAD_CREATE_FAILED = 2,
BFAD_E_INIT = 3,
BFAD_E_INIT_SUCCESS = 4,
- BFAD_E_INIT_FAILED = 5,
- BFAD_E_INTR_INIT_FAILED = 6,
+ BFAD_E_HAL_INIT_FAILED = 5,
+ BFAD_E_INIT_FAILED = 6,
BFAD_E_FCS_EXIT_COMP = 7,
BFAD_E_EXIT_COMP = 8,
BFAD_E_STOP = 9
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index 37bd2564e83b..9ef91f907dec 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -46,6 +46,7 @@
*/
#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */
#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
+#define BFI_FLASH_IMAGE_SZ 0x100000
/*
* Msg header common to all msgs
@@ -324,7 +325,29 @@ struct bfi_ioc_getattr_reply_s {
#define BFI_IOC_TRC_ENTS 256
#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
+#define BFA_IOC_FW_INV_SIGN (0xdeaddead)
#define BFI_IOC_MD5SUM_SZ 4
+
+struct bfi_ioc_fwver_s {
+#ifdef __BIG_ENDIAN
+ uint8_t patch;
+ uint8_t maint;
+ uint8_t minor;
+ uint8_t major;
+ uint8_t rsvd[2];
+ uint8_t build;
+ uint8_t phase;
+#else
+ uint8_t major;
+ uint8_t minor;
+ uint8_t maint;
+ uint8_t patch;
+ uint8_t phase;
+ uint8_t build;
+ uint8_t rsvd[2];
+#endif
+};
+
struct bfi_ioc_image_hdr_s {
u32 signature; /* constant signature */
u8 asic_gen; /* asic generation */
@@ -333,10 +356,18 @@ struct bfi_ioc_image_hdr_s {
u8 port1_mode; /* device mode for port 1 */
u32 exec; /* exec vector */
u32 bootenv; /* fimware boot env */
- u32 rsvd_b[4];
+ u32 rsvd_b[2];
+ struct bfi_ioc_fwver_s fwver;
u32 md5sum[BFI_IOC_MD5SUM_SZ];
};
+enum bfi_ioc_img_ver_cmp_e {
+ BFI_IOC_IMG_VER_INCOMP,
+ BFI_IOC_IMG_VER_OLD,
+ BFI_IOC_IMG_VER_SAME,
+ BFI_IOC_IMG_VER_BETTER
+};
+
#define BFI_FWBOOT_DEVMODE_OFF 4
#define BFI_FWBOOT_TYPE_OFF 8
#define BFI_FWBOOT_ENV_OFF 12
@@ -346,6 +377,12 @@ struct bfi_ioc_image_hdr_s {
((u32)(__p0_mode)) << 8 | \
((u32)(__p1_mode)))
+enum bfi_fwboot_type {
+ BFI_FWBOOT_TYPE_NORMAL = 0,
+ BFI_FWBOOT_TYPE_FLASH = 1,
+ BFI_FWBOOT_TYPE_MEMTEST = 2,
+};
+
#define BFI_FWBOOT_TYPE_NORMAL 0
#define BFI_FWBOOT_TYPE_MEMTEST 2
#define BFI_FWBOOT_ENV_OS 0
@@ -1107,7 +1144,8 @@ struct bfi_diag_dport_scn_teststart_s {
wwn_t pwwn; /* switch port wwn. 8 bytes */
wwn_t nwwn; /* switch node wwn. 8 bytes */
u8 type; /* bfa_diag_dport_test_type_e */
- u8 rsvd[3];
+ u8 mode; /* bfa_diag_dport_test_opmode */
+ u8 rsvd[2];
u32 numfrm; /* from switch uint in 1M */
};
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 2203ac281103..3b6f83ffddc4 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -310,7 +310,7 @@ static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
if (!request_mem_region(address, 256, "wd33c93"))
return -EBUSY;
- regs = (struct gvp11_scsiregs *)(ZTWO_VADDR(address));
+ regs = ZTWO_VADDR(address);
error = check_wd33c93(regs);
if (error)
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index f2c5005f312a..f28ea070d3df 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -169,6 +169,7 @@ void scsi_remove_host(struct Scsi_Host *shost)
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_autopm_get_host(shost);
+ flush_workqueue(shost->tmf_work_q);
scsi_forget_host(shost);
mutex_unlock(&shost->scan_mutex);
scsi_proc_host_rm(shost);
@@ -294,6 +295,8 @@ static void scsi_host_dev_release(struct device *dev)
scsi_proc_hostdir_rm(shost->hostt);
+ if (shost->tmf_work_q)
+ destroy_workqueue(shost->tmf_work_q);
if (shost->ehandler)
kthread_stop(shost->ehandler);
if (shost->work_q)
@@ -316,11 +319,11 @@ static void scsi_host_dev_release(struct device *dev)
kfree(shost);
}
-static unsigned int shost_eh_deadline;
+static int shost_eh_deadline = -1;
-module_param_named(eh_deadline, shost_eh_deadline, uint, S_IRUGO|S_IWUSR);
+module_param_named(eh_deadline, shost_eh_deadline, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(eh_deadline,
- "SCSI EH timeout in seconds (should be between 1 and 2^32-1)");
+ "SCSI EH timeout in seconds (should be between 0 and 2^31-1)");
static struct device_type scsi_host_type = {
.name = "scsi_host",
@@ -360,7 +363,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
INIT_LIST_HEAD(&shost->eh_cmd_q);
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
-
mutex_init(&shost->scan_mutex);
/*
@@ -394,9 +396,18 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->unchecked_isa_dma = sht->unchecked_isa_dma;
shost->use_clustering = sht->use_clustering;
shost->ordered_tag = sht->ordered_tag;
- shost->eh_deadline = shost_eh_deadline * HZ;
shost->no_write_same = sht->no_write_same;
+ if (shost_eh_deadline == -1)
+ shost->eh_deadline = -1;
+ else if ((ulong) shost_eh_deadline * HZ > INT_MAX) {
+ shost_printk(KERN_WARNING, shost,
+ "eh_deadline %u too large, setting to %u\n",
+ shost_eh_deadline, INT_MAX / HZ);
+ shost->eh_deadline = INT_MAX;
+ } else
+ shost->eh_deadline = shost_eh_deadline * HZ;
+
if (sht->supported_mode == MODE_UNKNOWN)
/* means we didn't set it ... default to INITIATOR */
shost->active_mode = MODE_INITIATOR;
@@ -444,9 +455,19 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
goto fail_kfree;
}
+ shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
+ WQ_UNBOUND | WQ_MEM_RECLAIM,
+ 1, shost->host_no);
+ if (!shost->tmf_work_q) {
+ printk(KERN_WARNING "scsi%d: failed to create tmf workq\n",
+ shost->host_no);
+ goto fail_kthread;
+ }
scsi_proc_hostdir_add(shost->hostt);
return shost;
+ fail_kthread:
+ kthread_stop(shost->ehandler);
fail_kfree:
kfree(shost);
return NULL;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 20a5e6ecf945..868318a7067c 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -29,7 +29,6 @@
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/timer.h>
-#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/compat.h>
@@ -96,7 +95,6 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334D},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
@@ -143,7 +141,6 @@ static struct board_type products[] = {
{0x3351103C, "Smart Array P420", &SA5_access},
{0x3352103C, "Smart Array P421", &SA5_access},
{0x3353103C, "Smart Array P822", &SA5_access},
- {0x334D103C, "Smart Array P822se", &SA5_access},
{0x3354103C, "Smart Array P420i", &SA5_access},
{0x3355103C, "Smart Array P220i", &SA5_access},
{0x3356103C, "Smart Array P721m", &SA5_access},
@@ -171,10 +168,6 @@ static struct board_type products[] = {
static int number_of_controllers;
-static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list);
-static spinlock_t lockup_detector_lock;
-static struct task_struct *hpsa_lockup_detector;
-
static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
@@ -1248,10 +1241,8 @@ static void complete_scsi_command(struct CommandList *cp)
}
if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
- if (check_for_unit_attention(h, cp)) {
- cmd->result = DID_SOFT_ERROR << 16;
+ if (check_for_unit_attention(h, cp))
break;
- }
if (sense_key == ILLEGAL_REQUEST) {
/*
* SCSI REPORT_LUNS is commonly unsupported on
@@ -1783,6 +1774,7 @@ static unsigned char *ext_target_model[] = {
"MSA2312",
"MSA2324",
"P2000 G3 SAS",
+ "MSA 2040 SAS",
NULL,
};
@@ -3171,7 +3163,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
hpsa_pci_unmap(h->pdev, c, i,
PCI_DMA_BIDIRECTIONAL);
status = -ENOMEM;
- goto cleanup1;
+ goto cleanup0;
}
c->SG[i].Addr.lower = temp64.val32.lower;
c->SG[i].Addr.upper = temp64.val32.upper;
@@ -3187,24 +3179,23 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
/* Copy the error information out */
memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
if (copy_to_user(argp, ioc, sizeof(*ioc))) {
- cmd_special_free(h, c);
status = -EFAULT;
- goto cleanup1;
+ goto cleanup0;
}
if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
/* Copy the data out of the buffer we created */
BYTE __user *ptr = ioc->buf;
for (i = 0; i < sg_used; i++) {
if (copy_to_user(ptr, buff[i], buff_size[i])) {
- cmd_special_free(h, c);
status = -EFAULT;
- goto cleanup1;
+ goto cleanup0;
}
ptr += buff_size[i];
}
}
- cmd_special_free(h, c);
status = 0;
+cleanup0:
+ cmd_special_free(h, c);
cleanup1:
if (buff) {
for (i = 0; i < sg_used; i++)
@@ -3223,6 +3214,36 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
(void) check_for_unit_attention(h, c);
}
+
+static int increment_passthru_count(struct ctlr_info *h)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&h->passthru_count_lock, flags);
+ if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) {
+ spin_unlock_irqrestore(&h->passthru_count_lock, flags);
+ return -1;
+ }
+ h->passthru_count++;
+ spin_unlock_irqrestore(&h->passthru_count_lock, flags);
+ return 0;
+}
+
+static void decrement_passthru_count(struct ctlr_info *h)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&h->passthru_count_lock, flags);
+ if (h->passthru_count <= 0) {
+ spin_unlock_irqrestore(&h->passthru_count_lock, flags);
+ /* not expecting to get here. */
+ dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n");
+ return;
+ }
+ h->passthru_count--;
+ spin_unlock_irqrestore(&h->passthru_count_lock, flags);
+}
+
/*
* ioctl
*/
@@ -3230,6 +3251,7 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
{
struct ctlr_info *h;
void __user *argp = (void __user *)arg;
+ int rc;
h = sdev_to_hba(dev);
@@ -3244,9 +3266,17 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
case CCISS_GETDRIVVER:
return hpsa_getdrivver_ioctl(h, argp);
case CCISS_PASSTHRU:
- return hpsa_passthru_ioctl(h, argp);
+ if (increment_passthru_count(h))
+ return -EAGAIN;
+ rc = hpsa_passthru_ioctl(h, argp);
+ decrement_passthru_count(h);
+ return rc;
case CCISS_BIG_PASSTHRU:
- return hpsa_big_passthru_ioctl(h, argp);
+ if (increment_passthru_count(h))
+ return -EAGAIN;
+ rc = hpsa_big_passthru_ioctl(h, argp);
+ decrement_passthru_count(h);
+ return rc;
default:
return -ENOTTY;
}
@@ -3445,9 +3475,11 @@ static void start_io(struct ctlr_info *h)
c = list_entry(h->reqQ.next, struct CommandList, list);
/* can't do anything if fifo is full */
if ((h->access.fifo_full(h))) {
+ h->fifo_recently_full = 1;
dev_warn(&h->pdev->dev, "fifo full\n");
break;
}
+ h->fifo_recently_full = 0;
/* Get the first entry from the Request Q */
removeQ(c);
@@ -3501,15 +3533,41 @@ static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
static inline void finish_cmd(struct CommandList *c)
{
unsigned long flags;
+ int io_may_be_stalled = 0;
+ struct ctlr_info *h = c->h;
- spin_lock_irqsave(&c->h->lock, flags);
+ spin_lock_irqsave(&h->lock, flags);
removeQ(c);
- spin_unlock_irqrestore(&c->h->lock, flags);
+
+ /*
+ * Check for possibly stalled i/o.
+ *
+ * If a fifo_full condition is encountered, requests will back up
+ * in h->reqQ. This queue is only emptied out by start_io which is
+ * only called when a new i/o request comes in. If no i/o's are
+ * forthcoming, the i/o's in h->reqQ can get stuck. So we call
+ * start_io from here if we detect such a danger.
+ *
+ * Normally, we shouldn't hit this case, but pounding on the
+ * CCISS_PASSTHRU ioctl can provoke it. Only call start_io if
+ * commands_outstanding is low. We want to avoid calling
+ * start_io from in here as much as possible, and esp. don't
+ * want to get in a cycle where we call start_io every time
+ * through here.
+ */
+ if (unlikely(h->fifo_recently_full) &&
+ h->commands_outstanding < 5)
+ io_may_be_stalled = 1;
+
+ spin_unlock_irqrestore(&h->lock, flags);
+
dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
if (likely(c->cmd_type == CMD_SCSI))
complete_scsi_command(c);
else if (c->cmd_type == CMD_IOCTL_PEND)
complete(c->waiting);
+ if (unlikely(io_may_be_stalled))
+ start_io(h);
}
static inline u32 hpsa_tag_contains_index(u32 tag)
@@ -3785,6 +3843,13 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
*/
dev_info(&pdev->dev, "using doorbell to reset controller\n");
writel(use_doorbell, vaddr + SA5_DOORBELL);
+
+ /* PMC hardware guys tell us we need a 5 second delay after
+ * doorbell reset and before any attempt to talk to the board
+ * at all to ensure that this actually works and doesn't fall
+ * over in some weird corner cases.
+ */
+ msleep(5000);
} else { /* Try to do it the PCI power state way */
/* Quoting from the Open CISS Specification: "The Power
@@ -3981,16 +4046,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
need a little pause here */
msleep(HPSA_POST_RESET_PAUSE_MSECS);
- /* Wait for board to become not ready, then ready. */
- dev_info(&pdev->dev, "Waiting for board to reset.\n");
- rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
- if (rc) {
- dev_warn(&pdev->dev,
- "failed waiting for board to reset."
- " Will try soft reset.\n");
- rc = -ENOTSUPP; /* Not expected, but try soft reset later */
- goto unmap_cfgtable;
- }
rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
if (rc) {
dev_warn(&pdev->dev,
@@ -4308,16 +4363,17 @@ static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
return true;
}
-/* Need to enable prefetch in the SCSI core for 6400 in x86 */
-static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h)
+static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
{
-#ifdef CONFIG_X86
- u32 prefetch;
+ u32 driver_support;
- prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
- prefetch |= 0x100;
- writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
+#ifdef CONFIG_X86
+ /* Need to enable prefetch in the SCSI core for 6400 in x86 */
+ driver_support = readl(&(h->cfgtable->driver_support));
+ driver_support |= ENABLE_SCSI_PREFETCH;
#endif
+ driver_support |= ENABLE_UNIT_ATTN;
+ writel(driver_support, &(h->cfgtable->driver_support));
}
/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
@@ -4427,7 +4483,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
err = -ENODEV;
goto err_out_free_res;
}
- hpsa_enable_scsi_prefetch(h);
+ hpsa_set_driver_support_bits(h);
hpsa_p600_dma_prefetch_quirk(h);
err = hpsa_enter_simple_mode(h);
if (err)
@@ -4638,16 +4694,6 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
kfree(h);
}
-static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h)
-{
- assert_spin_locked(&lockup_detector_lock);
- if (!hpsa_lockup_detector)
- return;
- if (h->lockup_detected)
- return; /* already stopped the lockup detector */
- list_del(&h->lockup_list);
-}
-
/* Called when controller lockup detected. */
static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
{
@@ -4666,8 +4712,6 @@ static void controller_lockup_detected(struct ctlr_info *h)
{
unsigned long flags;
- assert_spin_locked(&lockup_detector_lock);
- remove_ctlr_from_lockup_detector_list(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
spin_lock_irqsave(&h->lock, flags);
h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
@@ -4687,7 +4731,6 @@ static void detect_controller_lockup(struct ctlr_info *h)
u32 heartbeat;
unsigned long flags;
- assert_spin_locked(&lockup_detector_lock);
now = get_jiffies_64();
/* If we've received an interrupt recently, we're ok. */
if (time_after64(h->last_intr_timestamp +
@@ -4717,68 +4760,22 @@ static void detect_controller_lockup(struct ctlr_info *h)
h->last_heartbeat_timestamp = now;
}
-static int detect_controller_lockup_thread(void *notused)
+static void hpsa_monitor_ctlr_worker(struct work_struct *work)
{
- struct ctlr_info *h;
unsigned long flags;
-
- while (1) {
- struct list_head *this, *tmp;
-
- schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL);
- if (kthread_should_stop())
- break;
- spin_lock_irqsave(&lockup_detector_lock, flags);
- list_for_each_safe(this, tmp, &hpsa_ctlr_list) {
- h = list_entry(this, struct ctlr_info, lockup_list);
- detect_controller_lockup(h);
- }
- spin_unlock_irqrestore(&lockup_detector_lock, flags);
- }
- return 0;
-}
-
-static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h)
-{
- unsigned long flags;
-
- h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
- spin_lock_irqsave(&lockup_detector_lock, flags);
- list_add_tail(&h->lockup_list, &hpsa_ctlr_list);
- spin_unlock_irqrestore(&lockup_detector_lock, flags);
-}
-
-static void start_controller_lockup_detector(struct ctlr_info *h)
-{
- /* Start the lockup detector thread if not already started */
- if (!hpsa_lockup_detector) {
- spin_lock_init(&lockup_detector_lock);
- hpsa_lockup_detector =
- kthread_run(detect_controller_lockup_thread,
- NULL, HPSA);
- }
- if (!hpsa_lockup_detector) {
- dev_warn(&h->pdev->dev,
- "Could not start lockup detector thread\n");
+ struct ctlr_info *h = container_of(to_delayed_work(work),
+ struct ctlr_info, monitor_ctlr_work);
+ detect_controller_lockup(h);
+ if (h->lockup_detected)
+ return;
+ spin_lock_irqsave(&h->lock, flags);
+ if (h->remove_in_progress) {
+ spin_unlock_irqrestore(&h->lock, flags);
return;
}
- add_ctlr_to_lockup_detector_list(h);
-}
-
-static void stop_controller_lockup_detector(struct ctlr_info *h)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&lockup_detector_lock, flags);
- remove_ctlr_from_lockup_detector_list(h);
- /* If the list of ctlr's to monitor is empty, stop the thread */
- if (list_empty(&hpsa_ctlr_list)) {
- spin_unlock_irqrestore(&lockup_detector_lock, flags);
- kthread_stop(hpsa_lockup_detector);
- spin_lock_irqsave(&lockup_detector_lock, flags);
- hpsa_lockup_detector = NULL;
- }
- spin_unlock_irqrestore(&lockup_detector_lock, flags);
+ schedule_delayed_work(&h->monitor_ctlr_work,
+ h->heartbeat_sample_interval);
+ spin_unlock_irqrestore(&h->lock, flags);
}
static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -4822,6 +4819,7 @@ reinit_after_soft_reset:
INIT_LIST_HEAD(&h->reqQ);
spin_lock_init(&h->lock);
spin_lock_init(&h->scan_lock);
+ spin_lock_init(&h->passthru_count_lock);
rc = hpsa_pci_init(h);
if (rc != 0)
goto clean1;
@@ -4925,7 +4923,12 @@ reinit_after_soft_reset:
hpsa_hba_inquiry(h);
hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
- start_controller_lockup_detector(h);
+
+ /* Monitor the controller for firmware lockups */
+ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
+ INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
+ schedule_delayed_work(&h->monitor_ctlr_work,
+ h->heartbeat_sample_interval);
return 0;
clean4:
@@ -4942,6 +4945,15 @@ static void hpsa_flush_cache(struct ctlr_info *h)
{
char *flush_buf;
struct CommandList *c;
+ unsigned long flags;
+
+ /* Don't bother trying to flush the cache if locked up */
+ spin_lock_irqsave(&h->lock, flags);
+ if (unlikely(h->lockup_detected)) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&h->lock, flags);
flush_buf = kzalloc(4, GFP_KERNEL);
if (!flush_buf)
@@ -4991,13 +5003,20 @@ static void hpsa_free_device_info(struct ctlr_info *h)
static void hpsa_remove_one(struct pci_dev *pdev)
{
struct ctlr_info *h;
+ unsigned long flags;
if (pci_get_drvdata(pdev) == NULL) {
dev_err(&pdev->dev, "unable to remove device\n");
return;
}
h = pci_get_drvdata(pdev);
- stop_controller_lockup_detector(h);
+
+ /* Get rid of any controller monitoring work items */
+ spin_lock_irqsave(&h->lock, flags);
+ h->remove_in_progress = 1;
+ cancel_delayed_work(&h->monitor_ctlr_work);
+ spin_unlock_irqrestore(&h->lock, flags);
+
hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
hpsa_shutdown(pdev);
iounmap(h->vaddr);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index bc85e7244f40..01c328349c83 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -114,6 +114,11 @@ struct ctlr_info {
struct TransTable_struct *transtable;
unsigned long transMethod;
+ /* cap concurrent passthrus at some reasonable maximum */
+#define HPSA_MAX_CONCURRENT_PASSTHRUS (20)
+ spinlock_t passthru_count_lock; /* protects passthru_count */
+ int passthru_count;
+
/*
* Performant mode completion buffers
*/
@@ -130,7 +135,9 @@ struct ctlr_info {
u32 heartbeat_sample_interval;
atomic_t firmware_flash_in_progress;
u32 lockup_detected;
- struct list_head lockup_list;
+ struct delayed_work monitor_ctlr_work;
+ int remove_in_progress;
+ u32 fifo_recently_full;
/* Address of h->q[x] is passed to intr handler to know which queue */
u8 q[MAX_REPLY_QUEUES];
u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index a894f2eca7ac..bfc8c4ea66f8 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -356,7 +356,9 @@ struct CfgTable {
u32 TransMethodOffset;
u8 ServerName[16];
u32 HeartBeat;
- u32 SCSI_Prefetch;
+ u32 driver_support;
+#define ENABLE_SCSI_PREFETCH 0x100
+#define ENABLE_UNIT_ATTN 0x01
u32 MaxScatterGatherElements;
u32 MaxLogicalUnits;
u32 MaxPhysicalDevices;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 573f4128b6b6..3f5b56a99892 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -220,7 +220,7 @@ module_param_named(max_devs, ipr_max_devs, int, 0);
MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
"[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
-MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
+MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
MODULE_LICENSE("GPL");
MODULE_VERSION(IPR_DRIVER_VERSION);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index cad1483f05da..9ce38a22647e 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -301,7 +301,7 @@ IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR)
* Dump literals
*/
#define IPR_FMT2_MAX_IOA_DUMP_SIZE (4 * 1024 * 1024)
-#define IPR_FMT3_MAX_IOA_DUMP_SIZE (32 * 1024 * 1024)
+#define IPR_FMT3_MAX_IOA_DUMP_SIZE (80 * 1024 * 1024)
#define IPR_FMT2_NUM_SDT_ENTRIES 511
#define IPR_FMT3_NUM_SDT_ENTRIES 0xFFF
#define IPR_FMT2_MAX_NUM_DUMP_PAGES ((IPR_FMT2_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1)
@@ -311,7 +311,7 @@ IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR)
* Misc literals
*/
#define IPR_NUM_IOADL_ENTRIES IPR_MAX_SGLIST
-#define IPR_MAX_MSIX_VECTORS 0x5
+#define IPR_MAX_MSIX_VECTORS 0x10
#define IPR_MAX_HRRQ_NUM 0x10
#define IPR_INIT_HRRQ 0x0
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index e3995612ea76..40462415291e 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2945,6 +2945,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
free_pages((unsigned long) conn->data,
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
kfree(conn->persistent_address);
+ kfree(conn->local_ipaddr);
kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
sizeof(void*));
if (session->leadconn == conn)
@@ -3269,6 +3270,8 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
sscanf(buf, "%d", &val);
session->discovery_sess = !!val;
break;
+ case ISCSI_PARAM_LOCAL_IPADDR:
+ return iscsi_switch_str_param(&conn->local_ipaddr, buf);
default:
return -ENOSYS;
}
@@ -3542,6 +3545,9 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
case ISCSI_PARAM_TCP_RECV_WSF:
len = sprintf(buf, "%u\n", conn->tcp_recv_wsf);
break;
+ case ISCSI_PARAM_LOCAL_IPADDR:
+ len = sprintf(buf, "%s\n", conn->local_ipaddr);
+ break;
default:
return -ENOSYS;
}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 446b85110a1f..0cac7d8fd0f7 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -2163,10 +2163,10 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
}
/* do we need to support multiple segments? */
- if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) {
- printk("%s: multiple segments req %u %u, rsp %u %u\n",
- __func__, bio_segments(req->bio), blk_rq_bytes(req),
- bio_segments(rsp->bio), blk_rq_bytes(rsp));
+ if (bio_multiple_segments(req->bio) ||
+ bio_multiple_segments(rsp->bio)) {
+ printk("%s: multiple segments req %u, rsp %u\n",
+ __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
return -EINVAL;
}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 60084e6ad2f2..b800cc952ca6 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -4001,7 +4001,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
} else
- phba->debug_dumpHBASlim = NULL;
+ phba->debug_dumpHostSlim = NULL;
/* Setup dumpData */
snprintf(name, sizeof(name), "dumpData");
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 858075723c87..f5cdc68cd5b6 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -260,6 +260,8 @@ int __init macscsi_detect(struct scsi_host_template * tpnt)
/* Once we support multiple 5380s (e.g. DuoDock) we'll do
something different here */
instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
+ if (instance == NULL)
+ return 0;
if (macintosh_config->ident == MAC_MODEL_IIFX) {
mac_scsi_regp = via1+0x8000;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index e9e543c58485..34452ea386ac 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1527,7 +1527,6 @@ struct megasas_instance {
u32 *reply_queue;
dma_addr_t reply_queue_h;
- unsigned long base_addr;
struct megasas_register_set __iomem *reg_set;
u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index c99812bf2a73..3b7ad10497fe 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3615,6 +3615,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
u32 max_sectors_1;
u32 max_sectors_2;
u32 tmp_sectors, msix_enable, scratch_pad_2;
+ resource_size_t base_addr;
struct megasas_register_set __iomem *reg_set;
struct megasas_ctrl_info *ctrl_info;
unsigned long bar_list;
@@ -3623,14 +3624,14 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Find first memory bar */
bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
- instance->base_addr = pci_resource_start(instance->pdev, instance->bar);
if (pci_request_selected_regions(instance->pdev, instance->bar,
"megasas: LSI")) {
printk(KERN_DEBUG "megasas: IO memory region busy!\n");
return -EBUSY;
}
- instance->reg_set = ioremap_nocache(instance->base_addr, 8192);
+ base_addr = pci_resource_start(instance->pdev, instance->bar);
+ instance->reg_set = ioremap_nocache(base_addr, 8192);
if (!instance->reg_set) {
printk(KERN_DEBUG "megasas: Failed to map IO mem\n");
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 3901edc35812..bde63f7452bd 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -128,7 +128,7 @@ static int mpt2sas_remove_dead_ioc_func(void *arg)
pdev = ioc->pdev;
if ((pdev == NULL))
return -1;
- pci_stop_and_remove_bus_device(pdev);
+ pci_stop_and_remove_bus_device_locked(pdev);
return 0;
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 9d26637308be..410f4a3e8888 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1901,7 +1901,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
Mpi2SmpPassthroughRequest_t *mpi_request;
Mpi2SmpPassthroughReply_t *mpi_reply;
- int rc, i;
+ int rc;
u16 smid;
u32 ioc_state;
unsigned long timeleft;
@@ -1916,7 +1916,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
void *pci_addr_out = NULL;
u16 wait_state_count;
struct request *rsp = req->next_rq;
- struct bio_vec *bvec = NULL;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
if (!rsp) {
printk(MPT2SAS_ERR_FMT "%s: the smp response space is "
@@ -1942,7 +1943,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
ioc->transport_cmds.status = MPT2_CMD_PENDING;
/* Check if the request is split across multiple segments */
- if (bio_segments(req->bio) > 1) {
+ if (bio_multiple_segments(req->bio)) {
u32 offset = 0;
/* Allocate memory and copy the request */
@@ -1955,11 +1956,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
goto out;
}
- bio_for_each_segment(bvec, req->bio, i) {
+ bio_for_each_segment(bvec, req->bio, iter) {
memcpy(pci_addr_out + offset,
- page_address(bvec->bv_page) + bvec->bv_offset,
- bvec->bv_len);
- offset += bvec->bv_len;
+ page_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -1974,7 +1975,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* Check if the response needs to be populated across
* multiple segments */
- if (bio_segments(rsp->bio) > 1) {
+ if (bio_multiple_segments(rsp->bio)) {
pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
&pci_dma_in);
if (!pci_addr_in) {
@@ -2041,7 +2042,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
- if (bio_segments(req->bio) > 1) {
+ if (bio_multiple_segments(req->bio)) {
ioc->base_add_sg_single(psge, sgl_flags |
(blk_rq_bytes(req) - 4), pci_dma_out);
} else {
@@ -2057,7 +2058,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
MPI2_SGE_FLAGS_END_OF_LIST);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
- if (bio_segments(rsp->bio) > 1) {
+ if (bio_multiple_segments(rsp->bio)) {
ioc->base_add_sg_single(psge, sgl_flags |
(blk_rq_bytes(rsp) + 4), pci_dma_in);
} else {
@@ -2102,23 +2103,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
le16_to_cpu(mpi_reply->ResponseDataLength);
/* check if the resp needs to be copied from the allocated
* pci mem */
- if (bio_segments(rsp->bio) > 1) {
+ if (bio_multiple_segments(rsp->bio)) {
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+ if (bytes_to_copy <= bvec.bv_len) {
+ memcpy(page_address(bvec.bv_page) +
+ bvec.bv_offset, pci_addr_in +
offset, bytes_to_copy);
break;
} else {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
- offset, bvec->bv_len);
- bytes_to_copy -= bvec->bv_len;
+ memcpy(page_address(bvec.bv_page) +
+ bvec.bv_offset, pci_addr_in +
+ offset, bvec.bv_len);
+ bytes_to_copy -= bvec.bv_len;
}
- offset += bvec->bv_len;
+ offset += bvec.bv_len;
}
}
} else {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index fa785062e97b..0cf4f7000f94 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -131,7 +131,7 @@ static int mpt3sas_remove_dead_ioc_func(void *arg)
pdev = ioc->pdev;
if ((pdev == NULL))
return -1;
- pci_stop_and_remove_bus_device(pdev);
+ pci_stop_and_remove_bus_device_locked(pdev);
return 0;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index e771a88c6a74..65170cb1a00f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -1884,7 +1884,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
Mpi2SmpPassthroughRequest_t *mpi_request;
Mpi2SmpPassthroughReply_t *mpi_reply;
- int rc, i;
+ int rc;
u16 smid;
u32 ioc_state;
unsigned long timeleft;
@@ -1898,7 +1898,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
void *pci_addr_out = NULL;
u16 wait_state_count;
struct request *rsp = req->next_rq;
- struct bio_vec *bvec = NULL;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
if (!rsp) {
pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n",
@@ -1925,7 +1926,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
ioc->transport_cmds.status = MPT3_CMD_PENDING;
/* Check if the request is split across multiple segments */
- if (req->bio->bi_vcnt > 1) {
+ if (bio_multiple_segments(req->bio)) {
u32 offset = 0;
/* Allocate memory and copy the request */
@@ -1938,11 +1939,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
goto out;
}
- bio_for_each_segment(bvec, req->bio, i) {
+ bio_for_each_segment(bvec, req->bio, iter) {
memcpy(pci_addr_out + offset,
- page_address(bvec->bv_page) + bvec->bv_offset,
- bvec->bv_len);
- offset += bvec->bv_len;
+ page_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -1957,7 +1958,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* Check if the response needs to be populated across
* multiple segments */
- if (rsp->bio->bi_vcnt > 1) {
+ if (bio_multiple_segments(rsp->bio)) {
pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
&pci_dma_in);
if (!pci_addr_in) {
@@ -2018,7 +2019,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
psge = &mpi_request->SGL;
- if (req->bio->bi_vcnt > 1)
+ if (bio_multiple_segments(req->bio))
ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4),
pci_dma_in, (blk_rq_bytes(rsp) + 4));
else
@@ -2063,23 +2064,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* check if the resp needs to be copied from the allocated
* pci mem */
- if (rsp->bio->bi_vcnt > 1) {
+ if (bio_multiple_segments(rsp->bio)) {
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+ if (bytes_to_copy <= bvec.bv_len) {
+ memcpy(page_address(bvec.bv_page) +
+ bvec.bv_offset, pci_addr_in +
offset, bytes_to_copy);
break;
} else {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
- offset, bvec->bv_len);
- bytes_to_copy -= bvec->bv_len;
+ memcpy(page_address(bvec.bv_page) +
+ bvec.bv_offset, pci_addr_in +
+ offset, bvec.bv_len);
+ bytes_to_copy -= bvec.bv_len;
}
- offset += bvec->bv_len;
+ offset += bvec.bv_len;
}
}
} else {
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index aa66361ed44b..bac04c2335aa 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -731,7 +731,7 @@ static int _osd_req_list_objects(struct osd_request *or,
bio->bi_rw &= ~REQ_WRITE;
or->in.bio = bio;
- or->in.total_bytes = bio->bi_size;
+ or->in.total_bytes = bio->bi_iter.bi_size;
return 0;
}
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 5a522c5bbd43..97dabd39b092 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2502,7 +2502,7 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
/* Issue set host interrupt command. */
/* set up a timer just in case we're really jammed */
- init_timer(&timer);
+ init_timer_on_stack(&timer);
timer.expires = jiffies + 20*HZ;
timer.data = (unsigned long)ha;
timer.function = qla1280_mailbox_timeout;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 5f174b83f56f..4a0d7c92181f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -862,7 +862,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
}
void
-qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
+qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
{
struct Scsi_Host *host = vha->host;
struct sysfs_entry *iter;
@@ -880,7 +880,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
iter->attr);
}
- if (ha->beacon_blink_led == 1)
+ if (stop_beacon && ha->beacon_blink_led == 1)
ha->isp_ops->beacon_off(vha);
}
@@ -890,7 +890,7 @@ static ssize_t
qla2x00_drvr_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
}
static ssize_t
@@ -901,7 +901,7 @@ qla2x00_fw_version_show(struct device *dev,
struct qla_hw_data *ha = vha->hw;
char fw_str[128];
- return snprintf(buf, PAGE_SIZE, "%s\n",
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
ha->isp_ops->fw_version_str(vha, fw_str));
}
@@ -914,15 +914,15 @@ qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
uint32_t sn;
if (IS_QLAFX00(vha->hw)) {
- return snprintf(buf, PAGE_SIZE, "%s\n",
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
vha->hw->mr.serial_num);
} else if (IS_FWI2_CAPABLE(ha)) {
- qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
- return snprintf(buf, PAGE_SIZE, "%s\n", buf);
+ qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
+ return strlen(strcat(buf, "\n"));
}
sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
- return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
+ return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
sn % 100000);
}
@@ -931,7 +931,7 @@ qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
+ return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
}
static ssize_t
@@ -942,10 +942,10 @@ qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
struct qla_hw_data *ha = vha->hw;
if (IS_QLAFX00(vha->hw))
- return snprintf(buf, PAGE_SIZE, "%s\n",
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
vha->hw->mr.hw_version);
- return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
+ return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
ha->product_id[0], ha->product_id[1], ha->product_id[2],
ha->product_id[3]);
}
@@ -956,11 +956,7 @@ qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- if (IS_QLAFX00(vha->hw))
- return snprintf(buf, PAGE_SIZE, "%s\n",
- vha->hw->mr.product_name);
-
- return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
}
static ssize_t
@@ -968,7 +964,7 @@ qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%s\n",
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
vha->hw->model_desc ? vha->hw->model_desc : "");
}
@@ -979,7 +975,7 @@ qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
char pci_info[30];
- return snprintf(buf, PAGE_SIZE, "%s\n",
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
vha->hw->isp_ops->pci_info_str(vha, pci_info));
}
@@ -994,29 +990,29 @@ qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
atomic_read(&vha->loop_state) == LOOP_DEAD ||
vha->device_flags & DFLG_NO_CABLE)
- len = snprintf(buf, PAGE_SIZE, "Link Down\n");
+ len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
else if (atomic_read(&vha->loop_state) != LOOP_READY ||
qla2x00_reset_active(vha))
- len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
+ len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
else {
- len = snprintf(buf, PAGE_SIZE, "Link Up - ");
+ len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
switch (ha->current_topology) {
case ISP_CFG_NL:
- len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
+ len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
break;
case ISP_CFG_FL:
- len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
+ len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
break;
case ISP_CFG_N:
- len += snprintf(buf + len, PAGE_SIZE-len,
+ len += scnprintf(buf + len, PAGE_SIZE-len,
"N_Port to N_Port\n");
break;
case ISP_CFG_F:
- len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
+ len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
break;
default:
- len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
+ len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
break;
}
}
@@ -1032,10 +1028,10 @@ qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
switch (vha->hw->zio_mode) {
case QLA_ZIO_MODE_6:
- len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
+ len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
break;
case QLA_ZIO_DISABLED:
- len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
+ len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
break;
}
return len;
@@ -1075,7 +1071,7 @@ qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
+ return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
}
static ssize_t
@@ -1105,9 +1101,9 @@ qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
int len = 0;
if (vha->hw->beacon_blink_led)
- len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
+ len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
else
- len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
+ len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
return len;
}
@@ -1149,7 +1145,7 @@ qla2x00_optrom_bios_version_show(struct device *dev,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
ha->bios_revision[0]);
}
@@ -1159,7 +1155,7 @@ qla2x00_optrom_efi_version_show(struct device *dev,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
ha->efi_revision[0]);
}
@@ -1169,7 +1165,7 @@ qla2x00_optrom_fcode_version_show(struct device *dev,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
ha->fcode_revision[0]);
}
@@ -1179,7 +1175,7 @@ qla2x00_optrom_fw_version_show(struct device *dev,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
ha->fw_revision[3]);
}
@@ -1192,9 +1188,9 @@ qla2x00_optrom_gold_fw_version_show(struct device *dev,
struct qla_hw_data *ha = vha->hw;
if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
- return snprintf(buf, PAGE_SIZE, "\n");
+ return scnprintf(buf, PAGE_SIZE, "\n");
- return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
ha->gold_fw_version[0], ha->gold_fw_version[1],
ha->gold_fw_version[2], ha->gold_fw_version[3]);
}
@@ -1204,7 +1200,7 @@ qla2x00_total_isp_aborts_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
vha->qla_stats.total_isp_aborts);
}
@@ -1218,16 +1214,16 @@ qla24xx_84xx_fw_version_show(struct device *dev,
struct qla_hw_data *ha = vha->hw;
if (!IS_QLA84XX(ha))
- return snprintf(buf, PAGE_SIZE, "\n");
+ return scnprintf(buf, PAGE_SIZE, "\n");
if (ha->cs84xx->op_fw_version == 0)
rval = qla84xx_verify_chip(vha, status);
if ((rval == QLA_SUCCESS) && (status[0] == 0))
- return snprintf(buf, PAGE_SIZE, "%u\n",
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
(uint32_t)ha->cs84xx->op_fw_version);
- return snprintf(buf, PAGE_SIZE, "\n");
+ return scnprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t
@@ -1238,9 +1234,9 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
struct qla_hw_data *ha = vha->hw;
if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
- return snprintf(buf, PAGE_SIZE, "\n");
+ return scnprintf(buf, PAGE_SIZE, "\n");
- return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
ha->mpi_capabilities);
}
@@ -1253,9 +1249,9 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
struct qla_hw_data *ha = vha->hw;
if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
- return snprintf(buf, PAGE_SIZE, "\n");
+ return scnprintf(buf, PAGE_SIZE, "\n");
- return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
}
@@ -1266,7 +1262,7 @@ qla2x00_flash_block_size_show(struct device *dev,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
}
static ssize_t
@@ -1276,9 +1272,9 @@ qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
if (!IS_CNA_CAPABLE(vha->hw))
- return snprintf(buf, PAGE_SIZE, "\n");
+ return scnprintf(buf, PAGE_SIZE, "\n");
- return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
}
static ssize_t
@@ -1288,9 +1284,9 @@ qla2x00_vn_port_mac_address_show(struct device *dev,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
if (!IS_CNA_CAPABLE(vha->hw))
- return snprintf(buf, PAGE_SIZE, "\n");
+ return scnprintf(buf, PAGE_SIZE, "\n");
- return snprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
+ return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
}
static ssize_t
@@ -1299,7 +1295,7 @@ qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
}
static ssize_t
@@ -1320,10 +1316,10 @@ qla2x00_thermal_temp_show(struct device *dev,
}
if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
done:
- return snprintf(buf, PAGE_SIZE, "\n");
+ return scnprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t
@@ -1337,7 +1333,7 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
if (IS_QLAFX00(vha->hw)) {
pstate = qlafx00_fw_state_show(dev, attr, buf);
- return snprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
}
if (qla2x00_reset_active(vha))
@@ -1348,7 +1344,7 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
if (rval != QLA_SUCCESS)
memset(state, -1, sizeof(state));
- return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
+ return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
state[1], state[2], state[3], state[4]);
}
@@ -1359,9 +1355,9 @@ qla2x00_diag_requests_show(struct device *dev,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
if (!IS_BIDI_CAPABLE(vha->hw))
- return snprintf(buf, PAGE_SIZE, "\n");
+ return scnprintf(buf, PAGE_SIZE, "\n");
- return snprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
}
static ssize_t
@@ -1371,9 +1367,9 @@ qla2x00_diag_megabytes_show(struct device *dev,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
if (!IS_BIDI_CAPABLE(vha->hw))
- return snprintf(buf, PAGE_SIZE, "\n");
+ return scnprintf(buf, PAGE_SIZE, "\n");
- return snprintf(buf, PAGE_SIZE, "%llu\n",
+ return scnprintf(buf, PAGE_SIZE, "%llu\n",
vha->bidi_stats.transfer_bytes >> 20);
}
@@ -1392,7 +1388,7 @@ qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
else
size = ha->fw_dump_len;
- return snprintf(buf, PAGE_SIZE, "%d\n", size);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", size);
}
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
@@ -1994,6 +1990,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
vha->flags.delete_progress = 1;
+ qlt_remove_target(ha, vha);
+
fc_remove_host(vha->host);
scsi_remove_host(vha->host);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index aa57bf0af574..f15d03e6b7ee 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2022,6 +2022,46 @@ done:
}
static int
+qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ int rval = 0;
+ struct qla_serdes_reg sr;
+
+ memset(&sr, 0, sizeof(sr));
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
+
+ switch (sr.cmd) {
+ case INT_SC_SERDES_WRITE_REG:
+ rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ break;
+ case INT_SC_SERDES_READ_REG:
+ rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
+ bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
+ break;
+ default:
+ ql_log(ql_log_warn, vha, 0x708c,
+ "Unknown serdes cmd %x.\n", sr.cmd);
+ rval = -EDOM;
+ break;
+ }
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ rval ? EXT_STATUS_MAILBOX : 0;
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+ return 0;
+}
+
+static int
qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
{
switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
@@ -2069,6 +2109,10 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
case QL_VND_FX00_MGMT_CMD:
return qlafx00_mgmt_cmd(bsg_job);
+
+ case QL_VND_SERDES_OP:
+ return qla26xx_serdes_op(bsg_job);
+
default:
return -ENOSYS;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 04f770332c2b..e5c2126221e9 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -23,6 +23,7 @@
#define QL_VND_WRITE_I2C 0x10
#define QL_VND_READ_I2C 0x11
#define QL_VND_FX00_MGMT_CMD 0x12
+#define QL_VND_SERDES_OP 0x13
/* BSG Vendor specific subcode returns */
#define EXT_STATUS_OK 0
@@ -212,4 +213,16 @@ struct qla_i2c_access {
uint8_t buffer[0x40];
} __packed;
+/* 26xx serdes register interface */
+
+/* serdes reg commands */
+#define INT_SC_SERDES_READ_REG 1
+#define INT_SC_SERDES_WRITE_REG 2
+
+struct qla_serdes_reg {
+ uint16_t cmd;
+ uint16_t addr;
+ uint16_t val;
+} __packed;
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index ee5c1833eb73..f6103f553bb1 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,8 +11,9 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0159 | 0x4b,0xba,0xfa |
- * | Mailbox commands | 0x1181 | 0x111a-0x111b |
+ * | Module Init and Probe | 0x015b | 0x4b,0xba,0xfa |
+ * | | | 0x0x015a |
+ * | Mailbox commands | 0x1187 | 0x111a-0x111b |
* | | | 0x1155-0x1158 |
* | | | 0x1018-0x1019 |
* | | | 0x1115-0x1116 |
@@ -26,7 +27,7 @@
* | | | 0x302d,0x3033 |
* | | | 0x3036,0x3038 |
* | | | 0x303a |
- * | DPC Thread | 0x4022 | 0x4002,0x4013 |
+ * | DPC Thread | 0x4023 | 0x4002,0x4013 |
* | Async Events | 0x5087 | 0x502b-0x502f |
* | | | 0x5047,0x5052 |
* | | | 0x5084,0x5075 |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 93db74ef3461..e1fe95ef23e1 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -862,7 +862,6 @@ struct mbx_cmd_32 {
*/
#define MBC_LOAD_RAM 1 /* Load RAM. */
#define MBC_EXECUTE_FIRMWARE 2 /* Execute firmware. */
-#define MBC_WRITE_RAM_WORD 4 /* Write RAM word. */
#define MBC_READ_RAM_WORD 5 /* Read RAM word. */
#define MBC_MAILBOX_REGISTER_TEST 6 /* Wrap incoming mailboxes */
#define MBC_VERIFY_CHECKSUM 7 /* Verify checksum. */
@@ -937,6 +936,8 @@ struct mbx_cmd_32 {
/*
* ISP24xx mailbox commands
*/
+#define MBC_WRITE_SERDES 0x3 /* Write serdes word. */
+#define MBC_READ_SERDES 0x4 /* Read serdes word. */
#define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */
#define MBC_GET_IOCB_STATUS 0x12 /* Get IOCB status command. */
#define MBC_PORT_PARAMS 0x1A /* Port iDMA Parameters. */
@@ -2734,7 +2735,6 @@ struct req_que {
srb_t **outstanding_cmds;
uint32_t current_outstanding_cmd;
uint16_t num_outstanding_cmds;
-#define MAX_Q_DEPTH 32
int max_q_depth;
dma_addr_t dma_fx00;
@@ -2750,6 +2750,13 @@ struct qlfc_fw {
uint32_t len;
};
+struct scsi_qlt_host {
+ void *target_lport_ptr;
+ struct mutex tgt_mutex;
+ struct mutex tgt_host_action_mutex;
+ struct qla_tgt *qla_tgt;
+};
+
struct qlt_hw_data {
/* Protected by hw lock */
uint32_t enable_class_2:1;
@@ -2765,15 +2772,11 @@ struct qlt_hw_data {
uint32_t __iomem *atio_q_in;
uint32_t __iomem *atio_q_out;
- void *target_lport_ptr;
struct qla_tgt_func_tmpl *tgt_ops;
- struct qla_tgt *qla_tgt;
struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS];
uint16_t current_handle;
struct qla_tgt_vp_map *tgt_vp_map;
- struct mutex tgt_mutex;
- struct mutex tgt_host_action_mutex;
int saved_set;
uint16_t saved_exchange_count;
@@ -3302,12 +3305,7 @@ struct qla_hw_data {
struct work_struct nic_core_reset;
struct work_struct idc_state_handler;
struct work_struct nic_core_unrecoverable;
-
-#define HOST_QUEUE_RAMPDOWN_INTERVAL (60 * HZ)
-#define HOST_QUEUE_RAMPUP_INTERVAL (30 * HZ)
- unsigned long host_last_rampdown_time;
- unsigned long host_last_rampup_time;
- int cfg_lun_q_depth;
+ struct work_struct board_disable;
struct mr_data_fx00 mr;
@@ -3372,12 +3370,11 @@ typedef struct scsi_qla_host {
#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
#define SCR_PENDING 21 /* SCR in target mode */
-#define HOST_RAMP_DOWN_QUEUE_DEPTH 22
-#define HOST_RAMP_UP_QUEUE_DEPTH 23
-#define PORT_UPDATE_NEEDED 24
-#define FX00_RESET_RECOVERY 25
-#define FX00_TARGET_SCAN 26
-#define FX00_CRITEMP_RECOVERY 27
+#define PORT_UPDATE_NEEDED 22
+#define FX00_RESET_RECOVERY 23
+#define FX00_TARGET_SCAN 24
+#define FX00_CRITEMP_RECOVERY 25
+#define FX00_HOST_INFO_RESEND 26
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
@@ -3441,6 +3438,7 @@ typedef struct scsi_qla_host {
#define VP_ERR_FAB_LOGOUT 4
#define VP_ERR_ADAP_NORESOURCES 5
struct qla_hw_data *hw;
+ struct scsi_qlt_host vha_tgt;
struct req_que *req;
int fw_heartbeat_counter;
int seconds_since_last_heartbeat;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 4446bf5fe292..1f426628a0a5 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -98,7 +98,6 @@ extern int qlport_down_retry;
extern int ql2xplogiabsentdevice;
extern int ql2xloginretrycount;
extern int ql2xfdmienable;
-extern int ql2xmaxqdepth;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xiidmaenable;
@@ -160,6 +159,9 @@ extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
+extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
+extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
+
/*
* Global Functions in qla_mid.c source file.
*/
@@ -339,6 +341,11 @@ extern int
qla2x00_system_error(scsi_qla_host_t *);
extern int
+qla2x00_write_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t);
+extern int
+qla2x00_read_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t *);
+
+extern int
qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t);
extern int
@@ -455,6 +462,7 @@ extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
uint32_t);
extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
+bool qla2x00_check_reg_for_disconnect(scsi_qla_host_t *, uint32_t);
extern int qla2x00_beacon_on(struct scsi_qla_host *);
extern int qla2x00_beacon_off(struct scsi_qla_host *);
@@ -541,10 +549,9 @@ struct fc_function_template;
extern struct fc_function_template qla2xxx_transport_functions;
extern struct fc_function_template qla2xxx_transport_vport_functions;
extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
-extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
+extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
extern void qla2x00_init_host_attr(scsi_qla_host_t *);
extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
-extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
extern int qla2x00_echo_test(scsi_qla_host_t *,
struct msg_echo_lb *, uint16_t *);
@@ -725,7 +732,7 @@ extern inline void qla8044_set_qsnt_ready(struct scsi_qla_host *vha);
extern inline void qla8044_need_reset_handler(struct scsi_qla_host *vha);
extern int qla8044_device_state_handler(struct scsi_qla_host *vha);
extern void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha);
-extern void qla8044_clear_drv_active(struct scsi_qla_host *vha);
+extern void qla8044_clear_drv_active(struct qla_hw_data *);
void qla8044_get_minidump(struct scsi_qla_host *vha);
int qla8044_collect_md_data(struct scsi_qla_host *vha);
extern int qla8044_md_get_template(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 03f715e7591e..e7e5f4facf7f 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1694,6 +1694,8 @@ enable_82xx_npiv:
if (!fw_major_version && ql2xallocfwdump
&& !(IS_P3P_TYPE(ha)))
qla2x00_alloc_fw_dump(vha);
+ } else {
+ goto failed;
}
} else {
ql_log(ql_log_fatal, vha, 0x00cd,
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 957088b04611..ce8b5fb0f347 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -261,25 +261,6 @@ qla2x00_gid_list_size(struct qla_hw_data *ha)
}
static inline void
-qla2x00_do_host_ramp_up(scsi_qla_host_t *vha)
-{
- if (vha->hw->cfg_lun_q_depth >= ql2xmaxqdepth)
- return;
-
- /* Wait at least HOST_QUEUE_RAMPDOWN_INTERVAL before ramping up */
- if (time_before(jiffies, (vha->hw->host_last_rampdown_time +
- HOST_QUEUE_RAMPDOWN_INTERVAL)))
- return;
-
- /* Wait at least HOST_QUEUE_RAMPUP_INTERVAL between each ramp up */
- if (time_before(jiffies, (vha->hw->host_last_rampup_time +
- HOST_QUEUE_RAMPUP_INTERVAL)))
- return;
-
- set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags);
-}
-
-static inline void
qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
{
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ff9c86b1a0d8..9bc86b9e86b1 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -56,6 +56,16 @@ qla2100_intr_handler(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
hccr = RD_REG_WORD(&reg->hccr);
+ /* Check for PCI disconnection */
+ if (hccr == 0xffff) {
+ /*
+ * Schedule this on the default system workqueue so that
+ * all the adapter workqueues and the DPC thread can be
+ * shutdown cleanly.
+ */
+ schedule_work(&ha->board_disable);
+ break;
+ }
if (hccr & HCCR_RISC_PAUSE) {
if (pci_channel_offline(ha->pdev))
break;
@@ -110,6 +120,22 @@ qla2100_intr_handler(int irq, void *dev_id)
return (IRQ_HANDLED);
}
+bool
+qla2x00_check_reg_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
+{
+ /* Check for PCI disconnection */
+ if (reg == 0xffffffff) {
+ /*
+ * Schedule this on the default system workqueue so that all the
+ * adapter workqueues and the DPC thread can be shutdown
+ * cleanly.
+ */
+ schedule_work(&vha->hw->board_disable);
+ return true;
+ } else
+ return false;
+}
+
/**
* qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
* @irq:
@@ -148,11 +174,14 @@ qla2300_intr_handler(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+ if (qla2x00_check_reg_for_disconnect(vha, stat))
+ break;
if (stat & HSR_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
break;
hccr = RD_REG_WORD(&reg->hccr);
+
if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
ql_log(ql_log_warn, vha, 0x5026,
"Parity error -- HCCR=%x, Dumping "
@@ -269,11 +298,18 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
{ "Complete", "Request Notification", "Time Extension" };
int rval;
struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
+ struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
uint16_t __iomem *wptr;
uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
/* Seed data -- mailbox1 -> mailbox7. */
- wptr = (uint16_t __iomem *)&reg24->mailbox1;
+ if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
+ wptr = (uint16_t __iomem *)&reg24->mailbox1;
+ else if (IS_QLA8044(vha->hw))
+ wptr = (uint16_t __iomem *)&reg82->mailbox_out[1];
+ else
+ return;
+
for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
mb[cnt] = RD_REG_WORD(wptr);
@@ -287,7 +323,7 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
case MBA_IDC_COMPLETE:
if (mb[1] >> 15) {
vha->hw->flags.idc_compl_status = 1;
- if (vha->hw->notify_dcbx_comp)
+ if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
complete(&vha->hw->dcbx_comp);
}
break;
@@ -758,7 +794,7 @@ skip_rio:
ql_dbg(ql_dbg_async, vha, 0x500d,
"DCBX Completed -- %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
- if (ha->notify_dcbx_comp)
+ if (ha->notify_dcbx_comp && !vha->vp_idx)
complete(&ha->dcbx_comp);
} else
@@ -1032,7 +1068,7 @@ skip_rio:
}
}
case MBA_IDC_COMPLETE:
- if (ha->notify_lb_portup_comp)
+ if (ha->notify_lb_portup_comp && !vha->vp_idx)
complete(&ha->lb_portup_comp);
/* Fallthru */
case MBA_IDC_TIME_EXT:
@@ -1991,7 +2027,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
/* Fast path completion. */
if (comp_status == CS_COMPLETE && scsi_status == 0) {
- qla2x00_do_host_ramp_up(vha);
qla2x00_process_completed_request(vha, req, handle);
return;
@@ -2250,9 +2285,6 @@ out:
cp->cmnd, scsi_bufflen(cp), rsp_info_len,
resid_len, fw_resid_len);
- if (!res)
- qla2x00_do_host_ramp_up(vha);
-
if (rsp->status_srb == NULL)
sp->done(ha, sp, res);
}
@@ -2575,6 +2607,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->host_status);
+ if (qla2x00_check_reg_for_disconnect(vha, stat))
+ break;
if (stat & HSRX_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
break;
@@ -2644,6 +2678,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
struct device_reg_24xx __iomem *reg;
struct scsi_qla_host *vha;
unsigned long flags;
+ uint32_t stat = 0;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -2657,11 +2692,19 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
+ /*
+ * Use host_status register to check to PCI disconnection before we
+ * we process the response queue.
+ */
+ stat = RD_REG_DWORD(&reg->host_status);
+ if (qla2x00_check_reg_for_disconnect(vha, stat))
+ goto out;
qla24xx_process_response_queue(vha, rsp);
if (!ha->flags.disable_msix_handshake) {
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
RD_REG_DWORD_RELAXED(&reg->hccr);
}
+out:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
@@ -2671,9 +2714,11 @@ static irqreturn_t
qla25xx_msix_rsp_q(int irq, void *dev_id)
{
struct qla_hw_data *ha;
+ scsi_qla_host_t *vha;
struct rsp_que *rsp;
struct device_reg_24xx __iomem *reg;
unsigned long flags;
+ uint32_t hccr = 0;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -2682,17 +2727,21 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
return IRQ_NONE;
}
ha = rsp->hw;
+ vha = pci_get_drvdata(ha->pdev);
/* Clear the interrupt, if enabled, for this response queue */
if (!ha->flags.disable_msix_handshake) {
reg = &ha->iobase->isp24;
spin_lock_irqsave(&ha->hardware_lock, flags);
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD_RELAXED(&reg->hccr);
+ hccr = RD_REG_DWORD_RELAXED(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+ if (qla2x00_check_reg_for_disconnect(vha, hccr))
+ goto out;
queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
+out:
return IRQ_HANDLED;
}
@@ -2723,6 +2772,8 @@ qla24xx_msix_default(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
do {
stat = RD_REG_DWORD(&reg->host_status);
+ if (qla2x00_check_reg_for_disconnect(vha, stat))
+ break;
if (stat & HSRX_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
break;
@@ -2937,7 +2988,7 @@ msix_out:
int
qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
{
- int ret;
+ int ret = QLA_FUNCTION_FAILED;
device_reg_t __iomem *reg = ha->iobase;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
@@ -2971,10 +3022,12 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
ha->chip_revision, ha->fw_attributes);
goto clear_risc_ints;
}
- ql_log(ql_log_info, vha, 0x0037,
- "MSI-X Falling back-to MSI mode -%d.\n", ret);
+
skip_msix:
+ ql_log(ql_log_info, vha, 0x0037,
+ "Falling back-to MSI mode -%d.\n", ret);
+
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
!IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha))
goto skip_msi;
@@ -2986,14 +3039,13 @@ skip_msix:
ha->flags.msi_enabled = 1;
} else
ql_log(ql_log_warn, vha, 0x0039,
- "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
+ "Falling back-to INTa mode -- %d.\n", ret);
+skip_msi:
/* Skip INTx on ISP82xx. */
if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
return QLA_FUNCTION_FAILED;
-skip_msi:
-
ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
ha->flags.msi_enabled ? 0 : IRQF_SHARED,
QLA2XXX_DRIVER_NAME, rsp);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index a9aae500e791..b94511ae0051 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -468,7 +468,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
mcp->mb[1] = MSW(risc_addr);
mcp->mb[2] = LSW(risc_addr);
mcp->mb[3] = 0;
- if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) {
+ if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha)) {
struct nvram_81xx *nv = ha->nvram;
mcp->mb[4] = (nv->enhanced_features &
EXTENDED_BB_CREDITS);
@@ -1214,7 +1214,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- if ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) && ha->ex_init_cb->ex_version) {
+ if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
mcp->mb[1] = BIT_0;
mcp->mb[10] = MSW(ha->ex_init_cb_dma);
mcp->mb[11] = LSW(ha->ex_init_cb_dma);
@@ -2800,6 +2800,75 @@ qla2x00_system_error(scsi_qla_host_t *vha)
return rval;
}
+int
+qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA2031(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_WRITE_SERDES;
+ mcp->mb[1] = addr;
+ mcp->mb[2] = data & 0xff;
+ mcp->mb[3] = 0;
+ mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1183,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA2031(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_READ_SERDES;
+ mcp->mb[1] = addr;
+ mcp->mb[3] = 0;
+ mcp->out_mb = MBX_3|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ *data = mcp->mb[1] & 0xff;
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1186,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
/**
* qla2x00_set_serdes_params() -
* @ha: HA context
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 30d20e74e48a..ba6f8b139c98 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -1610,6 +1610,22 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
ha->mr.fw_critemp_timer_tick--;
}
}
+ if (ha->mr.host_info_resend) {
+ /*
+ * Incomplete host info might be sent to firmware
+ * durinng system boot - info should be resend
+ */
+ if (ha->mr.hinfo_resend_timer_tick == 0) {
+ ha->mr.host_info_resend = false;
+ set_bit(FX00_HOST_INFO_RESEND, &vha->dpc_flags);
+ ha->mr.hinfo_resend_timer_tick =
+ QLAFX00_HINFO_RESEND_INTERVAL;
+ qla2xxx_wake_dpc(vha);
+ } else {
+ ha->mr.hinfo_resend_timer_tick--;
+ }
+ }
+
}
/*
@@ -1867,6 +1883,7 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
goto done_free_sp;
}
break;
+ case FXDISC_ABORT_IOCTL:
default:
break;
}
@@ -1888,6 +1905,8 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
p_sysid->sysname, SYSNAME_LENGTH);
strncpy(phost_info->nodename,
p_sysid->nodename, NODENAME_LENGTH);
+ if (!strcmp(phost_info->nodename, "(none)"))
+ ha->mr.host_info_resend = true;
strncpy(phost_info->release,
p_sysid->release, RELEASE_LENGTH);
strncpy(phost_info->version,
@@ -1948,8 +1967,8 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
if (fx_type == FXDISC_GET_CONFIG_INFO) {
struct config_info_data *pinfo =
(struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
- memcpy(&vha->hw->mr.product_name, pinfo->product_name,
- sizeof(vha->hw->mr.product_name));
+ strcpy(vha->hw->model_number, pinfo->model_num);
+ strcpy(vha->hw->model_desc, pinfo->model_description);
memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
sizeof(vha->hw->mr.symbolic_name));
memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
@@ -1993,7 +2012,11 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,
(uint8_t *)pinfo, 16);
memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
- }
+ } else if (fx_type == FXDISC_ABORT_IOCTL)
+ fdisc->u.fxiocb.result =
+ (fdisc->u.fxiocb.result == cpu_to_le32(0x68)) ?
+ cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED);
+
rval = le32_to_cpu(fdisc->u.fxiocb.result);
done_unmap_dma:
@@ -2092,6 +2115,10 @@ qlafx00_abort_command(srb_t *sp)
/* Command not found. */
return QLA_FUNCTION_FAILED;
}
+ if (sp->type == SRB_FXIOCB_DCMD)
+ return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
+ FXDISC_ABORT_IOCTL);
+
return qlafx00_async_abt_cmd(sp);
}
@@ -2419,7 +2446,6 @@ qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
/* Fast path completion. */
if (comp_status == CS_COMPLETE && scsi_status == 0) {
- qla2x00_do_host_ramp_up(vha);
qla2x00_process_completed_request(vha, req, handle);
return;
}
@@ -2630,9 +2656,6 @@ check_scsi_status:
rsp_info_len, resid_len, fw_resid_len, sense_len,
par_sense_len, rsp_info_len);
- if (!res)
- qla2x00_do_host_ramp_up(vha);
-
if (rsp->status_srb == NULL)
sp->done(ha, sp, res);
}
@@ -3021,6 +3044,8 @@ qlafx00_intr_handler(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; clr_intr = 0) {
stat = QLAFX00_RD_INTR_REG(ha);
+ if (qla2x00_check_reg_for_disconnect(vha, stat))
+ break;
if ((stat & QLAFX00_HST_INT_STS_BITS) == 0)
break;
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 79a93c52baec..6cd7072cc0ff 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -304,7 +304,9 @@ struct register_host_info {
#define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32)
struct config_info_data {
- uint8_t product_name[256];
+ uint8_t model_num[16];
+ uint8_t model_description[80];
+ uint8_t reserved0[160];
uint8_t symbolic_name[64];
uint8_t serial_num[32];
uint8_t hw_version[16];
@@ -343,6 +345,7 @@ struct config_info_data {
#define FXDISC_GET_TGT_NODE_INFO 0x80
#define FXDISC_GET_TGT_NODE_LIST 0x81
#define FXDISC_REG_HOST_INFO 0x99
+#define FXDISC_ABORT_IOCTL 0xff
#define QLAFX00_HBA_ICNTRL_REG 0x20B08
#define QLAFX00_ICR_ENB_MASK 0x80000000
@@ -490,7 +493,6 @@ struct qla_mt_iocb_rsp_fx00 {
#define FX00_DEF_RATOV 10
struct mr_data_fx00 {
- uint8_t product_name[256];
uint8_t symbolic_name[64];
uint8_t serial_num[32];
uint8_t hw_version[16];
@@ -511,6 +513,8 @@ struct mr_data_fx00 {
uint32_t old_aenmbx0_state;
uint32_t critical_temperature;
bool extended_io_enabled;
+ bool host_info_resend;
+ uint8_t hinfo_resend_timer_tick;
};
#define QLAFX00_EXTENDED_IO_EN_MASK 0x20
@@ -537,7 +541,11 @@ struct mr_data_fx00 {
#define QLAFX00_RESET_INTERVAL 120 /* number of seconds */
#define QLAFX00_MAX_RESET_INTERVAL 600 /* number of seconds */
#define QLAFX00_CRITEMP_INTERVAL 60 /* number of seconds */
+#define QLAFX00_HINFO_RESEND_INTERVAL 60 /* number of seconds */
#define QLAFX00_CRITEMP_THRSHLD 80 /* Celsius degrees */
+/* Max conncurrent IOs that can be queued */
+#define QLAFX00_MAX_CANQUEUE 1024
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 11ce53dcbe7e..1e6ba4a369e2 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -2096,6 +2096,7 @@ qla82xx_msix_default(int irq, void *dev_id)
int status = 0;
unsigned long flags;
uint32_t stat = 0;
+ uint32_t host_int = 0;
uint16_t mb[4];
rsp = (struct rsp_que *) dev_id;
@@ -2111,7 +2112,10 @@ qla82xx_msix_default(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
do {
- if (RD_REG_DWORD(&reg->host_int)) {
+ host_int = RD_REG_DWORD(&reg->host_int);
+ if (qla2x00_check_reg_for_disconnect(vha, host_int))
+ break;
+ if (host_int) {
stat = RD_REG_DWORD(&reg->host_status);
switch (stat & 0xff) {
@@ -2156,6 +2160,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
struct rsp_que *rsp;
struct device_reg_82xx __iomem *reg;
unsigned long flags;
+ uint32_t host_int = 0;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -2168,8 +2173,12 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
reg = &ha->iobase->isp82;
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
+ host_int = RD_REG_DWORD(&reg->host_int);
+ if (qla2x00_check_reg_for_disconnect(vha, host_int))
+ goto out;
qla24xx_process_response_queue(vha, rsp);
WRT_REG_DWORD(&reg->host_int, 0);
+out:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
}
@@ -2183,6 +2192,7 @@ qla82xx_poll(int irq, void *dev_id)
struct device_reg_82xx __iomem *reg;
int status = 0;
uint32_t stat;
+ uint32_t host_int = 0;
uint16_t mb[4];
unsigned long flags;
@@ -2198,7 +2208,10 @@ qla82xx_poll(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
- if (RD_REG_DWORD(&reg->host_int)) {
+ host_int = RD_REG_DWORD(&reg->host_int);
+ if (qla2x00_check_reg_for_disconnect(vha, host_int))
+ goto out;
+ if (host_int) {
stat = RD_REG_DWORD(&reg->host_status);
switch (stat & 0xff) {
case 0x1:
@@ -2224,8 +2237,9 @@ qla82xx_poll(int irq, void *dev_id)
stat * 0xff);
break;
}
+ WRT_REG_DWORD(&reg->host_int, 0);
}
- WRT_REG_DWORD(&reg->host_int, 0);
+out:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -3003,7 +3017,7 @@ qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)
qla82xx_clear_drv_active(ha);
qla82xx_idc_unlock(ha);
} else if (IS_QLA8044(ha)) {
- qla8044_clear_drv_active(vha);
+ qla8044_clear_drv_active(ha);
qla8044_idc_unlock(ha);
}
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 4f5d66b2168b..f60989d729a8 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -1257,10 +1257,10 @@ exit_start_fw:
}
void
-qla8044_clear_drv_active(struct scsi_qla_host *vha)
+qla8044_clear_drv_active(struct qla_hw_data *ha)
{
uint32_t drv_active;
- struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
drv_active &= ~(1 << (ha->portnum));
@@ -1324,7 +1324,7 @@ qla8044_device_bootstrap(struct scsi_qla_host *vha)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_info, vha, 0xb0b3,
"%s: HW State: FAILED\n", __func__);
- qla8044_clear_drv_active(vha);
+ qla8044_clear_drv_active(ha);
qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
QLA8XXX_DEV_FAILED);
return rval;
@@ -1555,6 +1555,15 @@ qla8044_need_reset_handler(struct scsi_qla_host *vha)
qla8044_idc_lock(ha);
}
+ drv_state = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_STATE_INDEX);
+ drv_active = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+ ql_log(ql_log_info, vha, 0xb0c5,
+ "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
+ __func__, vha->host_no, drv_state, drv_active);
+
if (!ha->flags.nic_core_reset_owner) {
ql_dbg(ql_dbg_p3p, vha, 0xb0c3,
"%s(%ld): reset acknowledged\n",
@@ -1580,23 +1589,15 @@ qla8044_need_reset_handler(struct scsi_qla_host *vha)
dev_state = qla8044_rd_direct(vha,
QLA8044_CRB_DEV_STATE_INDEX);
- } while (dev_state == QLA8XXX_DEV_NEED_RESET);
+ } while (((drv_state & drv_active) != drv_active) &&
+ (dev_state == QLA8XXX_DEV_NEED_RESET));
} else {
qla8044_set_rst_ready(vha);
/* wait for 10 seconds for reset ack from all functions */
reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
- drv_state = qla8044_rd_direct(vha,
- QLA8044_CRB_DRV_STATE_INDEX);
- drv_active = qla8044_rd_direct(vha,
- QLA8044_CRB_DRV_ACTIVE_INDEX);
-
- ql_log(ql_log_info, vha, 0xb0c5,
- "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
- __func__, vha->host_no, drv_state, drv_active);
-
- while (drv_state != drv_active) {
+ while ((drv_state & drv_active) != drv_active) {
if (time_after_eq(jiffies, reset_timeout)) {
ql_log(ql_log_info, vha, 0xb0c6,
"%s: RESET TIMEOUT!"
@@ -1736,7 +1737,7 @@ qla8044_update_idc_reg(struct scsi_qla_host *vha)
rval = qla8044_set_idc_ver(vha);
if (rval == QLA_FUNCTION_FAILED)
- qla8044_clear_drv_active(vha);
+ qla8044_clear_drv_active(ha);
qla8044_idc_unlock(ha);
exit_update_idc_reg:
@@ -1859,7 +1860,7 @@ qla8044_device_state_handler(struct scsi_qla_host *vha)
goto exit;
case QLA8XXX_DEV_COLD:
rval = qla8044_device_bootstrap(vha);
- goto exit;
+ break;
case QLA8XXX_DEV_INITIALIZING:
qla8044_idc_unlock(ha);
msleep(1000);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 52be35e0300c..89a53002b585 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -110,7 +110,8 @@ MODULE_PARM_DESC(ql2xfdmienable,
"Enables FDMI registrations. "
"0 - no FDMI. Default is 1 - perform FDMI.");
-int ql2xmaxqdepth = MAX_Q_DEPTH;
+#define MAX_Q_DEPTH 32
+static int ql2xmaxqdepth = MAX_Q_DEPTH;
module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to set for each LUN. "
@@ -728,10 +729,8 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
}
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
- if (!sp) {
- set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags);
+ if (!sp)
goto qc24_host_busy;
- }
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
@@ -744,7 +743,6 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
- set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags);
goto qc24_host_busy_free_sp;
}
@@ -1474,81 +1472,6 @@ qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
return tag_type;
}
-static void
-qla2x00_host_ramp_down_queuedepth(scsi_qla_host_t *vha)
-{
- scsi_qla_host_t *vp;
- struct Scsi_Host *shost;
- struct scsi_device *sdev;
- struct qla_hw_data *ha = vha->hw;
- unsigned long flags;
-
- ha->host_last_rampdown_time = jiffies;
-
- if (ha->cfg_lun_q_depth <= vha->host->cmd_per_lun)
- return;
-
- if ((ha->cfg_lun_q_depth / 2) < vha->host->cmd_per_lun)
- ha->cfg_lun_q_depth = vha->host->cmd_per_lun;
- else
- ha->cfg_lun_q_depth = ha->cfg_lun_q_depth / 2;
-
- /*
- * Geometrically ramp down the queue depth for all devices on this
- * adapter
- */
- spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list) {
- shost = vp->host;
- shost_for_each_device(sdev, shost) {
- if (sdev->queue_depth > shost->cmd_per_lun) {
- if (sdev->queue_depth < ha->cfg_lun_q_depth)
- continue;
- ql_dbg(ql_dbg_io, vp, 0x3031,
- "%ld:%d:%d: Ramping down queue depth to %d",
- vp->host_no, sdev->id, sdev->lun,
- ha->cfg_lun_q_depth);
- qla2x00_change_queue_depth(sdev,
- ha->cfg_lun_q_depth, SCSI_QDEPTH_DEFAULT);
- }
- }
- }
- spin_unlock_irqrestore(&ha->vport_slock, flags);
-
- return;
-}
-
-static void
-qla2x00_host_ramp_up_queuedepth(scsi_qla_host_t *vha)
-{
- scsi_qla_host_t *vp;
- struct Scsi_Host *shost;
- struct scsi_device *sdev;
- struct qla_hw_data *ha = vha->hw;
- unsigned long flags;
-
- ha->host_last_rampup_time = jiffies;
- ha->cfg_lun_q_depth++;
-
- /*
- * Linearly ramp up the queue depth for all devices on this
- * adapter
- */
- spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list) {
- shost = vp->host;
- shost_for_each_device(sdev, shost) {
- if (sdev->queue_depth > ha->cfg_lun_q_depth)
- continue;
- qla2x00_change_queue_depth(sdev, ha->cfg_lun_q_depth,
- SCSI_QDEPTH_RAMP_UP);
- }
- }
- spin_unlock_irqrestore(&ha->vport_slock, flags);
-
- return;
-}
-
/**
* qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
* @ha: HA context
@@ -2424,7 +2347,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->init_cb_size = sizeof(init_cb_t);
ha->link_data_rate = PORT_SPEED_UNKNOWN;
ha->optrom_size = OPTROM_SIZE_2300;
- ha->cfg_lun_q_depth = ql2xmaxqdepth;
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -2573,6 +2495,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL;
ha->mr.fw_hbt_en = 1;
+ ha->mr.host_info_resend = false;
+ ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL;
}
ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
@@ -2638,7 +2562,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host = base_vha->host;
base_vha->req = req;
if (IS_QLAFX00(ha))
- host->can_queue = 1024;
+ host->can_queue = QLAFX00_MAX_CANQUEUE;
else
host->can_queue = req->length + 128;
if (IS_QLA2XXX_MIDTYPE(ha))
@@ -2816,6 +2740,8 @@ que_init:
*/
qla2xxx_wake_dpc(base_vha);
+ INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
+
if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
ha->dpc_lp_wq = create_singlethread_workqueue(wq_name);
@@ -2955,7 +2881,7 @@ probe_hw_failed:
}
if (IS_QLA8044(ha)) {
qla8044_idc_lock(ha);
- qla8044_clear_drv_active(base_vha);
+ qla8044_clear_drv_active(ha);
qla8044_idc_unlock(ha);
}
iospace_config_failed:
@@ -2980,22 +2906,6 @@ probe_out:
}
static void
-qla2x00_stop_dpc_thread(scsi_qla_host_t *vha)
-{
- struct qla_hw_data *ha = vha->hw;
- struct task_struct *t = ha->dpc_thread;
-
- if (ha->dpc_thread == NULL)
- return;
- /*
- * qla2xxx_wake_dpc checks for ->dpc_thread
- * so we need to zero it out.
- */
- ha->dpc_thread = NULL;
- kthread_stop(t);
-}
-
-static void
qla2x00_shutdown(struct pci_dev *pdev)
{
scsi_qla_host_t *vha;
@@ -3038,29 +2948,14 @@ qla2x00_shutdown(struct pci_dev *pdev)
qla2x00_free_fw_dump(ha);
}
+/* Deletes all the virtual ports for a given ha */
static void
-qla2x00_remove_one(struct pci_dev *pdev)
+qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
{
- scsi_qla_host_t *base_vha, *vha;
- struct qla_hw_data *ha;
+ struct Scsi_Host *scsi_host;
+ scsi_qla_host_t *vha;
unsigned long flags;
- /*
- * If the PCI device is disabled that means that probe failed and any
- * resources should be have cleaned up on probe exit.
- */
- if (!atomic_read(&pdev->enable_cnt))
- return;
-
- base_vha = pci_get_drvdata(pdev);
- ha = base_vha->hw;
-
- ha->flags.host_shutting_down = 1;
-
- set_bit(UNLOADING, &base_vha->dpc_flags);
- if (IS_QLAFX00(ha))
- qlafx00_driver_shutdown(base_vha, 20);
-
mutex_lock(&ha->vport_lock);
while (ha->cur_vport_count) {
spin_lock_irqsave(&ha->vport_slock, flags);
@@ -3068,7 +2963,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
BUG_ON(base_vha->list.next == &ha->vp_list);
/* This assumes first entry in ha->vp_list is always base vha */
vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
- scsi_host_get(vha->host);
+ scsi_host = scsi_host_get(vha->host);
spin_unlock_irqrestore(&ha->vport_slock, flags);
mutex_unlock(&ha->vport_lock);
@@ -3079,27 +2974,12 @@ qla2x00_remove_one(struct pci_dev *pdev)
mutex_lock(&ha->vport_lock);
}
mutex_unlock(&ha->vport_lock);
+}
- if (IS_QLA8031(ha)) {
- ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
- "Clearing fcoe driver presence.\n");
- if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
- ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
- "Error while clearing DRV-Presence.\n");
- }
-
- qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
-
- qla2x00_dfs_remove(base_vha);
-
- qla84xx_put_chip(base_vha);
-
- /* Disable timer */
- if (base_vha->timer_active)
- qla2x00_stop_timer(base_vha);
-
- base_vha->flags.online = 0;
-
+/* Stops all deferred work threads */
+static void
+qla2x00_destroy_deferred_work(struct qla_hw_data *ha)
+{
/* Flush the work queue and remove it */
if (ha->wq) {
flush_workqueue(ha->wq);
@@ -3133,27 +3013,12 @@ qla2x00_remove_one(struct pci_dev *pdev)
ha->dpc_thread = NULL;
kthread_stop(t);
}
- qlt_remove_target(ha, base_vha);
-
- qla2x00_free_sysfs_attr(base_vha);
-
- fc_remove_host(base_vha->host);
-
- scsi_remove_host(base_vha->host);
-
- qla2x00_free_device(base_vha);
-
- scsi_host_put(base_vha->host);
+}
- if (IS_QLA8044(ha)) {
- qla8044_idc_lock(ha);
- qla8044_clear_drv_active(base_vha);
- qla8044_idc_unlock(ha);
- }
+static void
+qla2x00_unmap_iobases(struct qla_hw_data *ha)
+{
if (IS_QLA82XX(ha)) {
- qla82xx_idc_lock(ha);
- qla82xx_clear_drv_active(ha);
- qla82xx_idc_unlock(ha);
iounmap((device_reg_t __iomem *)ha->nx_pcibase);
if (!ql2xdbwr)
@@ -3171,6 +3036,84 @@ qla2x00_remove_one(struct pci_dev *pdev)
if (IS_QLA83XX(ha) && ha->msixbase)
iounmap(ha->msixbase);
}
+}
+
+static void
+qla2x00_clear_drv_active(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ qla8044_clear_drv_active(ha);
+ qla8044_idc_unlock(ha);
+ } else if (IS_QLA82XX(ha)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_clear_drv_active(ha);
+ qla82xx_idc_unlock(ha);
+ }
+}
+
+static void
+qla2x00_remove_one(struct pci_dev *pdev)
+{
+ scsi_qla_host_t *base_vha;
+ struct qla_hw_data *ha;
+
+ /*
+ * If the PCI device is disabled that means that probe failed and any
+ * resources should be have cleaned up on probe exit.
+ */
+ if (!atomic_read(&pdev->enable_cnt))
+ return;
+
+ base_vha = pci_get_drvdata(pdev);
+ ha = base_vha->hw;
+
+ set_bit(UNLOADING, &base_vha->dpc_flags);
+
+ if (IS_QLAFX00(ha))
+ qlafx00_driver_shutdown(base_vha, 20);
+
+ qla2x00_delete_all_vps(ha, base_vha);
+
+ if (IS_QLA8031(ha)) {
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
+ "Clearing fcoe driver presence.\n");
+ if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
+ "Error while clearing DRV-Presence.\n");
+ }
+
+ qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
+
+ qla2x00_dfs_remove(base_vha);
+
+ qla84xx_put_chip(base_vha);
+
+ /* Disable timer */
+ if (base_vha->timer_active)
+ qla2x00_stop_timer(base_vha);
+
+ base_vha->flags.online = 0;
+
+ qla2x00_destroy_deferred_work(ha);
+
+ qlt_remove_target(ha, base_vha);
+
+ qla2x00_free_sysfs_attr(base_vha, true);
+
+ fc_remove_host(base_vha->host);
+
+ scsi_remove_host(base_vha->host);
+
+ qla2x00_free_device(base_vha);
+
+ scsi_host_put(base_vha->host);
+
+ qla2x00_clear_drv_active(base_vha);
+
+ qla2x00_unmap_iobases(ha);
pci_release_selected_regions(ha->pdev, ha->bars);
kfree(ha);
@@ -3192,9 +3135,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
if (vha->timer_active)
qla2x00_stop_timer(vha);
- qla2x00_stop_dpc_thread(vha);
-
qla25xx_delete_queues(vha);
+
if (ha->flags.fce_enabled)
qla2x00_disable_fce_trace(vha, NULL, NULL);
@@ -4731,6 +4673,66 @@ exit:
return rval;
}
+void
+qla2x00_disable_board_on_pci_error(struct work_struct *work)
+{
+ struct qla_hw_data *ha = container_of(work, struct qla_hw_data,
+ board_disable);
+ struct pci_dev *pdev = ha->pdev;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+ ql_log(ql_log_warn, base_vha, 0x015b,
+ "Disabling adapter.\n");
+
+ set_bit(UNLOADING, &base_vha->dpc_flags);
+
+ qla2x00_delete_all_vps(ha, base_vha);
+
+ qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
+
+ qla2x00_dfs_remove(base_vha);
+
+ qla84xx_put_chip(base_vha);
+
+ if (base_vha->timer_active)
+ qla2x00_stop_timer(base_vha);
+
+ base_vha->flags.online = 0;
+
+ qla2x00_destroy_deferred_work(ha);
+
+ /*
+ * Do not try to stop beacon blink as it will issue a mailbox
+ * command.
+ */
+ qla2x00_free_sysfs_attr(base_vha, false);
+
+ fc_remove_host(base_vha->host);
+
+ scsi_remove_host(base_vha->host);
+
+ base_vha->flags.init_done = 0;
+ qla25xx_delete_queues(base_vha);
+ qla2x00_free_irqs(base_vha);
+ qla2x00_free_fcports(base_vha);
+ qla2x00_mem_free(ha);
+ qla82xx_md_free(base_vha);
+ qla2x00_free_queues(ha);
+
+ scsi_host_put(base_vha->host);
+
+ qla2x00_unmap_iobases(ha);
+
+ pci_release_selected_regions(ha->pdev, ha->bars);
+ kfree(ha);
+ ha = NULL;
+
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+}
+
/**************************************************************************
* qla2x00_do_dpc
* This kernel thread is a task that is schedule by the interrupt handler
@@ -4863,6 +4865,14 @@ qla2x00_do_dpc(void *data)
ql_dbg(ql_dbg_dpc, base_vha, 0x401f,
"ISPFx00 Target Scan End\n");
}
+ if (test_and_clear_bit(FX00_HOST_INFO_RESEND,
+ &base_vha->dpc_flags)) {
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4023,
+ "ISPFx00 Host Info resend scheduled\n");
+ qlafx00_fx_disc(base_vha,
+ &base_vha->hw->mr.fcport,
+ FXDISC_REG_HOST_INFO);
+ }
}
if (test_and_clear_bit(ISP_ABORT_NEEDED,
@@ -4990,17 +5000,6 @@ loop_resync_check:
qla2xxx_flash_npiv_conf(base_vha);
}
- if (test_and_clear_bit(HOST_RAMP_DOWN_QUEUE_DEPTH,
- &base_vha->dpc_flags)) {
- /* Prevents simultaneous ramp up and down */
- clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
- &base_vha->dpc_flags);
- qla2x00_host_ramp_down_queuedepth(base_vha);
- }
-
- if (test_and_clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
- &base_vha->dpc_flags))
- qla2x00_host_ramp_up_queuedepth(base_vha);
intr_on_check:
if (!ha->interrupts_on)
ha->isp_ops->enable_intrs(ha);
@@ -5095,9 +5094,20 @@ qla2x00_timer(scsi_qla_host_t *vha)
return;
}
- /* Hardware read to raise pending EEH errors during mailbox waits. */
- if (!pci_channel_offline(ha->pdev))
+ /*
+ * Hardware read to raise pending EEH errors during mailbox waits. If
+ * the read returns -1 then disable the board.
+ */
+ if (!pci_channel_offline(ha->pdev)) {
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
+ if (w == 0xffff)
+ /*
+ * Schedule this on the default system workqueue so that
+ * all the adapter workqueues and the DPC thread can be
+ * shutdown cleanly.
+ */
+ schedule_work(&ha->board_disable);
+ }
/* Make sure qla82xx_watchdog is run only for physical port */
if (!vha->vp_idx && IS_P3P_TYPE(ha)) {
@@ -5182,7 +5192,6 @@ qla2x00_timer(scsi_qla_host_t *vha)
"Loop down - seconds remaining %d.\n",
atomic_read(&vha->loop_down_timer));
}
-
/* Check if beacon LED needs to be blinked for physical host only */
if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
/* There is no beacon_blink function for ISP82xx */
@@ -5206,9 +5215,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags) ||
- test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags) ||
- test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags))) {
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
ql_dbg(ql_dbg_timer, vha, 0x600b,
"isp_abort_needed=%d loop_resync_needed=%d "
"fcport_update_needed=%d start_dpc=%d "
@@ -5221,15 +5228,12 @@ qla2x00_timer(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_timer, vha, 0x600c,
"beacon_blink_needed=%d isp_unrecoverable=%d "
"fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
- "relogin_needed=%d, host_ramp_down_needed=%d "
- "host_ramp_up_needed=%d.\n",
+ "relogin_needed=%d.\n",
test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
- test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags),
- test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags));
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
qla2xxx_wake_dpc(vha);
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 596480022b0a..2eb97d7e8d12 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -471,7 +471,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
schedule_delayed_work(&tgt->sess_del_work, 0);
else
schedule_delayed_work(&tgt->sess_del_work,
- jiffies - sess->expires);
+ sess->expires - jiffies);
}
/* ha->hardware_lock supposed to be held on entry */
@@ -550,13 +550,14 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
struct scsi_qla_host *vha = tgt->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_sess *sess;
- unsigned long flags;
+ unsigned long flags, elapsed;
spin_lock_irqsave(&ha->hardware_lock, flags);
while (!list_empty(&tgt->del_sess_list)) {
sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
del_list_entry);
- if (time_after_eq(jiffies, sess->expires)) {
+ elapsed = jiffies;
+ if (time_after_eq(elapsed, sess->expires)) {
qlt_undelete_sess(sess);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
@@ -566,7 +567,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
ha->tgt.tgt_ops->put_sess(sess);
} else {
schedule_delayed_work(&tgt->sess_del_work,
- jiffies - sess->expires);
+ sess->expires - elapsed);
break;
}
}
@@ -589,7 +590,7 @@ static struct qla_tgt_sess *qlt_create_sess(
/* Check to avoid double sessions */
spin_lock_irqsave(&ha->hardware_lock, flags);
- list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list,
+ list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
sess_list_entry) {
if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
@@ -626,7 +627,7 @@ static struct qla_tgt_sess *qlt_create_sess(
return NULL;
}
- sess->tgt = ha->tgt.qla_tgt;
+ sess->tgt = vha->vha_tgt.qla_tgt;
sess->vha = vha;
sess->s_id = fcport->d_id;
sess->loop_id = fcport->loop_id;
@@ -634,7 +635,7 @@ static struct qla_tgt_sess *qlt_create_sess(
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
"Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
- sess, ha->tgt.qla_tgt);
+ sess, vha->vha_tgt.qla_tgt);
be_sid[0] = sess->s_id.b.domain;
be_sid[1] = sess->s_id.b.area;
@@ -661,8 +662,8 @@ static struct qla_tgt_sess *qlt_create_sess(
memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
spin_lock_irqsave(&ha->hardware_lock, flags);
- list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list);
- ha->tgt.qla_tgt->sess_count++;
+ list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
+ vha->vha_tgt.qla_tgt->sess_count++;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
@@ -681,7 +682,7 @@ static struct qla_tgt_sess *qlt_create_sess(
void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_sess *sess;
unsigned long flags;
@@ -691,6 +692,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
if (!tgt || (fcport->port_type != FCT_INITIATOR))
return;
+ if (qla_ini_mode_enabled(vha))
+ return;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
if (tgt->tgt_stop) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -700,9 +704,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
if (!sess) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- mutex_lock(&ha->tgt.tgt_mutex);
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
sess = qlt_create_sess(vha, fcport, false);
- mutex_unlock(&ha->tgt.tgt_mutex);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
spin_lock_irqsave(&ha->hardware_lock, flags);
} else {
@@ -738,7 +742,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_sess *sess;
unsigned long flags;
@@ -805,12 +809,12 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
* Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
* Lock is needed, because we still can get an incoming packet.
*/
- mutex_lock(&ha->tgt.tgt_mutex);
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
spin_lock_irqsave(&ha->hardware_lock, flags);
tgt->tgt_stop = 1;
qlt_clear_tgt_db(tgt, true);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- mutex_unlock(&ha->tgt.tgt_mutex);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
flush_delayed_work(&tgt->sess_del_work);
@@ -844,20 +848,21 @@ EXPORT_SYMBOL(qlt_stop_phase1);
void qlt_stop_phase2(struct qla_tgt *tgt)
{
struct qla_hw_data *ha = tgt->ha;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
unsigned long flags;
if (tgt->tgt_stopped) {
- ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f,
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
"Already in tgt->tgt_stopped state\n");
dump_stack();
return;
}
- ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b,
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
"Waiting for %d IRQ commands to complete (tgt %p)",
tgt->irq_cmd_count, tgt);
- mutex_lock(&ha->tgt.tgt_mutex);
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
spin_lock_irqsave(&ha->hardware_lock, flags);
while (tgt->irq_cmd_count != 0) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -867,9 +872,9 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
tgt->tgt_stop = 0;
tgt->tgt_stopped = 1;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- mutex_unlock(&ha->tgt.tgt_mutex);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
- ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished",
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
tgt);
}
EXPORT_SYMBOL(qlt_stop_phase2);
@@ -877,14 +882,14 @@ EXPORT_SYMBOL(qlt_stop_phase2);
/* Called from qlt_remove_target() -> qla2x00_remove_one() */
static void qlt_release(struct qla_tgt *tgt)
{
- struct qla_hw_data *ha = tgt->ha;
+ scsi_qla_host_t *vha = tgt->vha;
- if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
+ if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
qlt_stop_phase2(tgt);
- ha->tgt.qla_tgt = NULL;
+ vha->vha_tgt.qla_tgt = NULL;
- ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d,
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
"Release of tgt %p finished\n", tgt);
kfree(tgt);
@@ -948,8 +953,8 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
return;
}
- if (ha->tgt.qla_tgt != NULL)
- ha->tgt.qla_tgt->notify_ack_expected++;
+ if (vha->vha_tgt.qla_tgt != NULL)
+ vha->vha_tgt.qla_tgt->notify_ack_expected++;
pkt->entry_type = NOTIFY_ACK_TYPE;
pkt->entry_count = 1;
@@ -1053,7 +1058,7 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
/* Other bytes are zero */
}
- ha->tgt.qla_tgt->abts_resp_expected++;
+ vha->vha_tgt.qla_tgt->abts_resp_expected++;
qla2x00_start_iocbs(vha, vha->req);
}
@@ -1205,7 +1210,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
"qla_target(%d): task abort for non-existant session\n",
vha->vp_idx);
- rc = qlt_sched_sess_work(ha->tgt.qla_tgt,
+ rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
if (rc != 0) {
qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
@@ -2156,8 +2161,7 @@ static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
struct qla_tgt_cmd *cmd, void *ctio)
{
struct qla_tgt_srr_ctio *sc;
- struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_srr_imm *imm;
tgt->ctio_srr_id++;
@@ -2473,7 +2477,7 @@ static void qlt_do_work(struct work_struct *work)
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
scsi_qla_host_t *vha = cmd->vha;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_sess *sess = NULL;
struct atio_from_isp *atio = &cmd->atio;
unsigned char *cdb;
@@ -2506,10 +2510,10 @@ static void qlt_do_work(struct work_struct *work)
goto out_term;
}
- mutex_lock(&ha->tgt.tgt_mutex);
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
sess = qlt_make_local_sess(vha, s_id);
/* sess has an extra creation ref. */
- mutex_unlock(&ha->tgt.tgt_mutex);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
if (!sess)
goto out_term;
@@ -2575,8 +2579,7 @@ out_term:
static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
struct atio_from_isp *atio)
{
- struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_cmd *cmd;
if (unlikely(tgt->tgt_stop)) {
@@ -2592,11 +2595,9 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
return -ENOMEM;
}
- INIT_LIST_HEAD(&cmd->cmd_list);
-
memcpy(&cmd->atio, atio, sizeof(*atio));
cmd->state = QLA_TGT_STATE_NEW;
- cmd->tgt = ha->tgt.qla_tgt;
+ cmd->tgt = vha->vha_tgt.qla_tgt;
cmd->vha = vha;
INIT_WORK(&cmd->work, qlt_do_work);
@@ -2722,7 +2723,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
uint32_t lun, unpacked_lun;
int lun_size, fn;
- tgt = ha->tgt.qla_tgt;
+ tgt = vha->vha_tgt.qla_tgt;
lun = a->u.isp24.fcp_cmnd.lun;
lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
@@ -2796,7 +2797,7 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
"qla_target(%d): task abort for unexisting "
"session\n", vha->vp_idx);
- return qlt_sched_sess_work(ha->tgt.qla_tgt,
+ return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
}
@@ -2809,7 +2810,6 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *iocb)
{
- struct qla_hw_data *ha = vha->hw;
int res = 0;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
@@ -2827,7 +2827,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
case ELS_PDISC:
case ELS_ADISC:
{
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
if (tgt->link_reinit_iocb_pending) {
qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
0, 0, 0, 0, 0, 0);
@@ -3201,8 +3201,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *iocb)
{
struct qla_tgt_srr_imm *imm;
- struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_srr_ctio *sctio;
tgt->imm_srr_id++;
@@ -3312,7 +3311,7 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
case IMM_NTFY_LIP_LINK_REINIT:
{
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
"qla_target(%d): LINK REINIT (loop %#x, "
"subcode %x)\n", vha->vp_idx,
@@ -3488,7 +3487,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
struct atio_from_isp *atio)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
int rc;
if (unlikely(tgt == NULL)) {
@@ -3590,7 +3589,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
if (unlikely(tgt == NULL)) {
ql_dbg(ql_dbg_tgt, vha, 0xe05d,
@@ -3793,7 +3792,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
uint16_t *mailbox)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
int login_code;
ql_dbg(ql_dbg_tgt, vha, 0xe039,
@@ -3923,14 +3922,14 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
uint8_t *s_id)
{
- struct qla_hw_data *ha = vha->hw;
struct qla_tgt_sess *sess = NULL;
fc_port_t *fcport = NULL;
int rc, global_resets;
uint16_t loop_id = 0;
retry:
- global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
+ global_resets =
+ atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
if (rc != 0) {
@@ -3957,12 +3956,13 @@ retry:
return NULL;
if (global_resets !=
- atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
+ atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
"qla_target(%d): global reset during session discovery "
"(counter was %d, new %d), retrying", vha->vp_idx,
global_resets,
- atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
+ atomic_read(&vha->vha_tgt.
+ qla_tgt->tgt_global_resets_count));
goto retry;
}
@@ -3997,10 +3997,10 @@ static void qlt_abort_work(struct qla_tgt *tgt,
if (!sess) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- mutex_lock(&ha->tgt.tgt_mutex);
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
sess = qlt_make_local_sess(vha, s_id);
/* sess has got an extra creation ref */
- mutex_unlock(&ha->tgt.tgt_mutex);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!sess)
@@ -4051,10 +4051,10 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
if (!sess) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- mutex_lock(&ha->tgt.tgt_mutex);
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
sess = qlt_make_local_sess(vha, s_id);
/* sess has got an extra creation ref */
- mutex_unlock(&ha->tgt.tgt_mutex);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!sess)
@@ -4140,9 +4140,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
}
ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
- "Registering target for host %ld(%p)", base_vha->host_no, ha);
+ "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
- BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL));
+ BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
if (!tgt) {
@@ -4170,7 +4170,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
atomic_set(&tgt->tgt_global_resets_count, 0);
- ha->tgt.qla_tgt = tgt;
+ base_vha->vha_tgt.qla_tgt = tgt;
ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
"qla_target(%d): using 64 Bit PCI addressing",
@@ -4191,16 +4191,16 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
/* Must be called under tgt_host_action_mutex */
int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
{
- if (!ha->tgt.qla_tgt)
+ if (!vha->vha_tgt.qla_tgt)
return 0;
mutex_lock(&qla_tgt_mutex);
- list_del(&ha->tgt.qla_tgt->tgt_list_entry);
+ list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
mutex_unlock(&qla_tgt_mutex);
ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
vha->host_no, ha);
- qlt_release(ha->tgt.qla_tgt);
+ qlt_release(vha->vha_tgt.qla_tgt);
return 0;
}
@@ -4234,8 +4234,9 @@ static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
* @callback: lport initialization callback for tcm_qla2xxx code
* @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
*/
-int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
- int (*callback)(struct scsi_qla_host *), void *target_lport_ptr)
+int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
+ u64 npiv_wwpn, u64 npiv_wwnn,
+ int (*callback)(struct scsi_qla_host *, void *, u64, u64))
{
struct qla_tgt *tgt;
struct scsi_qla_host *vha;
@@ -4254,14 +4255,11 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
if (!host)
continue;
- if (ha->tgt.tgt_ops != NULL)
- continue;
-
if (!(host->hostt->supported_mode & MODE_TARGET))
continue;
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (host->active_mode & MODE_TARGET) {
+ if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
host->host_no);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -4275,23 +4273,18 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
" qla2xxx scsi_host\n");
continue;
}
- qlt_lport_dump(vha, wwpn, b);
+ qlt_lport_dump(vha, phys_wwpn, b);
if (memcmp(vha->port_name, b, WWN_SIZE)) {
scsi_host_put(host);
continue;
}
- /*
- * Setup passed parameters ahead of invoking callback
- */
- ha->tgt.tgt_ops = qla_tgt_ops;
- ha->tgt.target_lport_ptr = target_lport_ptr;
- rc = (*callback)(vha);
- if (rc != 0) {
- ha->tgt.tgt_ops = NULL;
- ha->tgt.target_lport_ptr = NULL;
- }
mutex_unlock(&qla_tgt_mutex);
+
+ rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
+ if (rc != 0)
+ scsi_host_put(host);
+
return rc;
}
mutex_unlock(&qla_tgt_mutex);
@@ -4312,7 +4305,7 @@ void qlt_lport_deregister(struct scsi_qla_host *vha)
/*
* Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
*/
- ha->tgt.target_lport_ptr = NULL;
+ vha->vha_tgt.target_lport_ptr = NULL;
ha->tgt.tgt_ops = NULL;
/*
* Release the Scsi_Host reference for the underlying qla2xxx host
@@ -4374,8 +4367,9 @@ void
qlt_enable_vha(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
unsigned long flags;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
if (!tgt) {
ql_dbg(ql_dbg_tgt, vha, 0xe069,
@@ -4390,9 +4384,14 @@ qlt_enable_vha(struct scsi_qla_host *vha)
qlt_set_mode(vha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
- qla2x00_wait_for_hba_online(vha);
+ if (vha->vp_idx) {
+ qla24xx_disable_vp(vha);
+ qla24xx_enable_vp(vha);
+ } else {
+ set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+ qla2xxx_wake_dpc(base_vha);
+ qla2x00_wait_for_hba_online(base_vha);
+ }
}
EXPORT_SYMBOL(qlt_enable_vha);
@@ -4405,7 +4404,7 @@ void
qlt_disable_vha(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
unsigned long flags;
if (!tgt) {
@@ -4436,8 +4435,10 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
if (!qla_tgt_mode_enabled(vha))
return;
- mutex_init(&ha->tgt.tgt_mutex);
- mutex_init(&ha->tgt.tgt_host_action_mutex);
+ vha->vha_tgt.qla_tgt = NULL;
+
+ mutex_init(&vha->vha_tgt.tgt_mutex);
+ mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
qlt_clear_mode(vha);
@@ -4448,6 +4449,8 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
* assigning the value appropriately.
*/
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+
+ qlt_add_target(ha, vha);
}
void
@@ -4766,8 +4769,8 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
}
- mutex_init(&ha->tgt.tgt_mutex);
- mutex_init(&ha->tgt.tgt_host_action_mutex);
+ mutex_init(&base_vha->vha_tgt.tgt_mutex);
+ mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
qlt_clear_mode(base_vha);
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index b33e411f28a0..66e755cdde57 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -855,7 +855,6 @@ struct qla_tgt_cmd {
uint16_t loop_id; /* to save extra sess dereferences */
struct qla_tgt *tgt; /* to save extra sess dereferences */
struct scsi_qla_host *vha;
- struct list_head cmd_list;
struct atio_from_isp atio;
};
@@ -932,8 +931,8 @@ void qlt_disable_vha(struct scsi_qla_host *);
*/
extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *);
extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
-extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64,
- int (*callback)(struct scsi_qla_host *), void *);
+extern int qlt_lport_register(void *, u64, u64, u64,
+ int (*callback)(struct scsi_qla_host *, void *, u64, u64));
extern void qlt_lport_deregister(struct scsi_qla_host *);
extern void qlt_unreg_sess(struct qla_tgt_sess *);
extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index a808e293dae0..31d19535b015 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.06.00.08-k"
+#define QLA2XXX_VERSION "8.06.00.12-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 6
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 7eb19be35d46..75a141bbe74d 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -53,16 +53,6 @@
struct workqueue_struct *tcm_qla2xxx_free_wq;
struct workqueue_struct *tcm_qla2xxx_cmd_wq;
-static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg)
-{
- return 0;
-}
-
/*
* Parse WWN.
* If strict, we require lower-case hex and colon separators to be sure
@@ -174,7 +164,7 @@ static int tcm_qla2xxx_npiv_parse_wwn(
*wwnn = 0;
/* count may include a LF at end of string */
- if (name[cnt-1] == '\n')
+ if (name[cnt-1] == '\n' || name[cnt-1] == 0)
cnt--;
/* validate we have enough characters for WWPN */
@@ -777,6 +767,9 @@ static void tcm_qla2xxx_put_session(struct se_session *se_sess)
static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
{
+ if (!sess)
+ return;
+
assert_spin_locked(&sess->vha->hw->hardware_lock);
kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);
}
@@ -957,7 +950,6 @@ static ssize_t tcm_qla2xxx_tpg_store_enable(
struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
struct tcm_qla2xxx_lport, lport_wwn);
struct scsi_qla_host *vha = lport->qla_vha;
- struct qla_hw_data *ha = vha->hw;
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
unsigned long op;
@@ -977,12 +969,12 @@ static ssize_t tcm_qla2xxx_tpg_store_enable(
atomic_set(&tpg->lport_tpg_enabled, 1);
qlt_enable_vha(vha);
} else {
- if (!ha->tgt.qla_tgt) {
- pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n");
+ if (!vha->vha_tgt.qla_tgt) {
+ pr_err("struct qla_hw_data *vha->vha_tgt.qla_tgt is NULL\n");
return -ENODEV;
}
atomic_set(&tpg->lport_tpg_enabled, 0);
- qlt_stop_phase1(ha->tgt.qla_tgt);
+ qlt_stop_phase1(vha->vha_tgt.qla_tgt);
}
return count;
@@ -1011,7 +1003,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
return ERR_PTR(-EINVAL);
- if (!lport->qla_npiv_vp && (tpgt != 1)) {
+ if ((tpgt != 1)) {
pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
return ERR_PTR(-ENOSYS);
}
@@ -1038,11 +1030,8 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
kfree(tpg);
return NULL;
}
- /*
- * Setup local TPG=1 pointer for non NPIV mode.
- */
- if (lport->qla_npiv_vp == NULL)
- lport->tpg_1 = tpg;
+
+ lport->tpg_1 = tpg;
return &tpg->se_tpg;
}
@@ -1053,19 +1042,17 @@ static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
struct tcm_qla2xxx_tpg, se_tpg);
struct tcm_qla2xxx_lport *lport = tpg->lport;
struct scsi_qla_host *vha = lport->qla_vha;
- struct qla_hw_data *ha = vha->hw;
/*
* Call into qla2x_target.c LLD logic to shutdown the active
* FC Nexuses and disable target mode operation for this qla_hw_data
*/
- if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop)
- qlt_stop_phase1(ha->tgt.qla_tgt);
+ if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop)
+ qlt_stop_phase1(vha->vha_tgt.qla_tgt);
core_tpg_deregister(se_tpg);
/*
* Clear local TPG=1 pointer for non NPIV mode.
*/
- if (lport->qla_npiv_vp == NULL)
lport->tpg_1 = NULL;
kfree(tpg);
@@ -1095,12 +1082,22 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
tpg->lport = lport;
tpg->lport_tpgt = tpgt;
+ /*
+ * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
+ * NodeACLs
+ */
+ tpg->tpg_attrib.generate_node_acls = 1;
+ tpg->tpg_attrib.demo_mode_write_protect = 1;
+ tpg->tpg_attrib.cache_dynamic_acls = 1;
+ tpg->tpg_attrib.demo_mode_login_only = 1;
+
ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
kfree(tpg);
return NULL;
}
+ lport->tpg_1 = tpg;
return &tpg->se_tpg;
}
@@ -1111,13 +1108,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
scsi_qla_host_t *vha,
const uint8_t *s_id)
{
- struct qla_hw_data *ha = vha->hw;
struct tcm_qla2xxx_lport *lport;
struct se_node_acl *se_nacl;
struct tcm_qla2xxx_nacl *nacl;
u32 key;
- lport = ha->tgt.target_lport_ptr;
+ lport = vha->vha_tgt.target_lport_ptr;
if (!lport) {
pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
dump_stack();
@@ -1221,13 +1217,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
scsi_qla_host_t *vha,
const uint16_t loop_id)
{
- struct qla_hw_data *ha = vha->hw;
struct tcm_qla2xxx_lport *lport;
struct se_node_acl *se_nacl;
struct tcm_qla2xxx_nacl *nacl;
struct tcm_qla2xxx_fc_loopid *fc_loopid;
- lport = ha->tgt.target_lport_ptr;
+ lport = vha->vha_tgt.target_lport_ptr;
if (!lport) {
pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
dump_stack();
@@ -1341,6 +1336,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
{
struct qla_tgt *tgt = sess->tgt;
struct qla_hw_data *ha = tgt->ha;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
struct se_session *se_sess;
struct se_node_acl *se_nacl;
struct tcm_qla2xxx_lport *lport;
@@ -1357,7 +1353,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
se_nacl = se_sess->se_node_acl;
nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
- lport = ha->tgt.target_lport_ptr;
+ lport = vha->vha_tgt.target_lport_ptr;
if (!lport) {
pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
dump_stack();
@@ -1391,7 +1387,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
unsigned char port_name[36];
unsigned long flags;
- lport = ha->tgt.target_lport_ptr;
+ lport = vha->vha_tgt.target_lport_ptr;
if (!lport) {
pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
dump_stack();
@@ -1455,7 +1451,8 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
{
struct qla_tgt *tgt = sess->tgt;
struct qla_hw_data *ha = tgt->ha;
- struct tcm_qla2xxx_lport *lport = ha->tgt.target_lport_ptr;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr;
struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
struct tcm_qla2xxx_nacl, se_node_acl);
@@ -1562,15 +1559,18 @@ static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
return 0;
}
-static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha)
+static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha,
+ void *target_lport_ptr,
+ u64 npiv_wwpn, u64 npiv_wwnn)
{
struct qla_hw_data *ha = vha->hw;
- struct tcm_qla2xxx_lport *lport;
+ struct tcm_qla2xxx_lport *lport =
+ (struct tcm_qla2xxx_lport *)target_lport_ptr;
/*
- * Setup local pointer to vha, NPIV VP pointer (if present) and
- * vha->tcm_lport pointer
+ * Setup tgt_ops, local pointer to vha and target_lport_ptr
*/
- lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr;
+ ha->tgt.tgt_ops = &tcm_qla2xxx_template;
+ vha->vha_tgt.target_lport_ptr = target_lport_ptr;
lport->qla_vha = vha;
return 0;
@@ -1602,8 +1602,8 @@ static struct se_wwn *tcm_qla2xxx_make_lport(
if (ret != 0)
goto out;
- ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn,
- tcm_qla2xxx_lport_register_cb, lport);
+ ret = qlt_lport_register(lport, wwpn, 0, 0,
+ tcm_qla2xxx_lport_register_cb);
if (ret != 0)
goto out_lport;
@@ -1621,7 +1621,6 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
struct tcm_qla2xxx_lport *lport = container_of(wwn,
struct tcm_qla2xxx_lport, lport_wwn);
struct scsi_qla_host *vha = lport->qla_vha;
- struct qla_hw_data *ha = vha->hw;
struct se_node_acl *node;
u32 key = 0;
@@ -1630,8 +1629,8 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
* shutdown of struct qla_tgt after the call to
* qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
*/
- if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped)
- qlt_stop_phase2(ha->tgt.qla_tgt);
+ if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped)
+ qlt_stop_phase2(vha->vha_tgt.qla_tgt);
qlt_lport_deregister(vha);
@@ -1642,17 +1641,70 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
kfree(lport);
}
+static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
+ void *target_lport_ptr,
+ u64 npiv_wwpn, u64 npiv_wwnn)
+{
+ struct fc_vport *vport;
+ struct Scsi_Host *sh = base_vha->host;
+ struct scsi_qla_host *npiv_vha;
+ struct tcm_qla2xxx_lport *lport =
+ (struct tcm_qla2xxx_lport *)target_lport_ptr;
+ struct fc_vport_identifiers vport_id;
+
+ if (!qla_tgt_mode_enabled(base_vha)) {
+ pr_err("qla2xxx base_vha not enabled for target mode\n");
+ return -EPERM;
+ }
+
+ memset(&vport_id, 0, sizeof(vport_id));
+ vport_id.port_name = npiv_wwpn;
+ vport_id.node_name = npiv_wwnn;
+ vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
+ vport_id.vport_type = FC_PORTTYPE_NPIV;
+ vport_id.disable = false;
+
+ vport = fc_vport_create(sh, 0, &vport_id);
+ if (!vport) {
+ pr_err("fc_vport_create failed for qla2xxx_npiv\n");
+ return -ENODEV;
+ }
+ /*
+ * Setup local pointer to NPIV vhba + target_lport_ptr
+ */
+ npiv_vha = (struct scsi_qla_host *)vport->dd_data;
+ npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr;
+ lport->qla_vha = npiv_vha;
+
+ scsi_host_get(npiv_vha->host);
+ return 0;
+}
+
+
static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
struct tcm_qla2xxx_lport *lport;
- u64 npiv_wwpn, npiv_wwnn;
+ u64 phys_wwpn, npiv_wwpn, npiv_wwnn;
+ char *p, tmp[128];
int ret;
- if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1,
- &npiv_wwpn, &npiv_wwnn) < 0)
+ snprintf(tmp, 128, "%s", name);
+
+ p = strchr(tmp, '@');
+ if (!p) {
+ pr_err("Unable to locate NPIV '@' seperator\n");
+ return ERR_PTR(-EINVAL);
+ }
+ *p++ = '\0';
+
+ if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0)
+ return ERR_PTR(-EINVAL);
+
+ if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1,
+ &npiv_wwpn, &npiv_wwnn) < 0)
return ERR_PTR(-EINVAL);
lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
@@ -1666,12 +1718,19 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
-/* FIXME: tcm_qla2xxx_npiv_make_lport */
- ret = -ENOSYS;
+ ret = tcm_qla2xxx_init_lport(lport);
if (ret != 0)
goto out;
+ ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn,
+ tcm_qla2xxx_lport_register_npiv_cb);
+ if (ret != 0)
+ goto out_lport;
+
return &lport->lport_wwn;
+out_lport:
+ vfree(lport->lport_loopid_map);
+ btree_destroy32(&lport->lport_fcport_map);
out:
kfree(lport);
return ERR_PTR(ret);
@@ -1681,14 +1740,16 @@ static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
{
struct tcm_qla2xxx_lport *lport = container_of(wwn,
struct tcm_qla2xxx_lport, lport_wwn);
- struct scsi_qla_host *vha = lport->qla_vha;
- struct Scsi_Host *sh = vha->host;
+ struct scsi_qla_host *npiv_vha = lport->qla_vha;
+ struct qla_hw_data *ha = npiv_vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+ scsi_host_put(npiv_vha->host);
/*
- * Notify libfc that we want to release the lport->npiv_vport
+ * Notify libfc that we want to release the vha->fc_vport
*/
- fc_vport_terminate(lport->npiv_vport);
-
- scsi_host_put(sh);
+ fc_vport_terminate(npiv_vha->fc_vport);
+ scsi_host_put(base_vha->host);
kfree(lport);
}
@@ -1769,14 +1830,16 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
.tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
.tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
- .tpg_check_demo_mode = tcm_qla2xxx_check_false,
- .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true,
- .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
- .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
+ .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
+ .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode,
+ .tpg_check_prod_mode_write_protect =
+ tcm_qla2xxx_check_prod_write_protect,
.tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
.tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
.tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
.tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
+ .check_stop_free = tcm_qla2xxx_check_stop_free,
.release_cmd = tcm_qla2xxx_release_cmd,
.put_session = tcm_qla2xxx_put_session,
.shutdown_session = tcm_qla2xxx_shutdown_session,
@@ -1871,7 +1934,8 @@ static int tcm_qla2xxx_register_configfs(void)
* Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
*/
npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
- npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
+ npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs =
+ tcm_qla2xxx_tpg_attrs;
npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 771f7b816443..275d8b9a7a34 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -70,12 +70,8 @@ struct tcm_qla2xxx_lport {
struct tcm_qla2xxx_fc_loopid *lport_loopid_map;
/* Pointer to struct scsi_qla_host from qla2xxx LLD */
struct scsi_qla_host *qla_vha;
- /* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */
- struct scsi_qla_host *qla_npiv_vp;
/* Pointer to struct qla_tgt pointer */
struct qla_tgt lport_qla_tgt;
- /* Pointer to struct fc_vport for NPIV vport from libfc */
- struct fc_vport *npiv_vport;
/* Pointer to TPG=1 for non NPIV mode */
struct tcm_qla2xxx_tpg *tpg_1;
/* Returned by tcm_qla2xxx_make_lport() */
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c
index cf8fdf1d1257..04a0027dbca0 100644
--- a/drivers/scsi/qla4xxx/ql4_bsg.c
+++ b/drivers/scsi/qla4xxx/ql4_bsg.c
@@ -446,6 +446,363 @@ leave:
return rval;
}
+static void ql4xxx_execute_diag_cmd(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint8_t *rsp_ptr = NULL;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_ERROR;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
+
+ if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+ ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
+ __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ goto exit_diag_mem_test;
+ }
+
+ bsg_reply->reply_payload_rcv_len = 0;
+ memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
+ sizeof(uint32_t) * MBOX_REG_COUNT);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
+ __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
+ mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
+ mbox_cmd[7]));
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
+ &mbox_sts[0]);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
+ __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
+ mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
+ mbox_sts[7]));
+
+ if (status == QLA_SUCCESS)
+ bsg_reply->result = DID_OK << 16;
+ else
+ bsg_reply->result = DID_ERROR << 16;
+
+ /* Send mbox_sts to application */
+ bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
+ rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
+ memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
+
+exit_diag_mem_test:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: bsg_reply->result = x%x, status = %s\n",
+ __func__, bsg_reply->result, STATUS(status)));
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+}
+
+static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha,
+ int wait_for_link)
+{
+ int status = QLA_SUCCESS;
+
+ if (!wait_for_completion_timeout(&ha->idc_comp, (IDC_COMP_TOV * HZ))) {
+ ql4_printk(KERN_INFO, ha, "%s: IDC Complete notification not received, Waiting for another %d timeout",
+ __func__, ha->idc_extend_tmo);
+ if (ha->idc_extend_tmo) {
+ if (!wait_for_completion_timeout(&ha->idc_comp,
+ (ha->idc_extend_tmo * HZ))) {
+ ha->notify_idc_comp = 0;
+ ha->notify_link_up_comp = 0;
+ ql4_printk(KERN_WARNING, ha, "%s: IDC Complete notification not received",
+ __func__);
+ status = QLA_ERROR;
+ goto exit_wait;
+ } else {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: IDC Complete notification received\n",
+ __func__));
+ }
+ }
+ } else {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: IDC Complete notification received\n",
+ __func__));
+ }
+ ha->notify_idc_comp = 0;
+
+ if (wait_for_link) {
+ if (!wait_for_completion_timeout(&ha->link_up_comp,
+ (IDC_COMP_TOV * HZ))) {
+ ha->notify_link_up_comp = 0;
+ ql4_printk(KERN_WARNING, ha, "%s: LINK UP notification not received",
+ __func__);
+ status = QLA_ERROR;
+ goto exit_wait;
+ } else {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: LINK UP notification received\n",
+ __func__));
+ }
+ ha->notify_link_up_comp = 0;
+ }
+
+exit_wait:
+ return status;
+}
+
+static int qla4_83xx_pre_loopback_config(struct scsi_qla_host *ha,
+ uint32_t *mbox_cmd)
+{
+ uint32_t config = 0;
+ int status = QLA_SUCCESS;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
+
+ status = qla4_83xx_get_port_config(ha, &config);
+ if (status != QLA_SUCCESS)
+ goto exit_pre_loopback_config;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Default port config=%08X\n",
+ __func__, config));
+
+ if ((config & ENABLE_INTERNAL_LOOPBACK) ||
+ (config & ENABLE_EXTERNAL_LOOPBACK)) {
+ ql4_printk(KERN_INFO, ha, "%s: Loopback diagnostics already in progress. Invalid requiest\n",
+ __func__);
+ goto exit_pre_loopback_config;
+ }
+
+ if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
+ config |= ENABLE_INTERNAL_LOOPBACK;
+
+ if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
+ config |= ENABLE_EXTERNAL_LOOPBACK;
+
+ config &= ~ENABLE_DCBX;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: New port config=%08X\n",
+ __func__, config));
+
+ ha->notify_idc_comp = 1;
+ ha->notify_link_up_comp = 1;
+
+ /* get the link state */
+ qla4xxx_get_firmware_state(ha);
+
+ status = qla4_83xx_set_port_config(ha, &config);
+ if (status != QLA_SUCCESS) {
+ ha->notify_idc_comp = 0;
+ ha->notify_link_up_comp = 0;
+ goto exit_pre_loopback_config;
+ }
+exit_pre_loopback_config:
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
+ STATUS(status)));
+ return status;
+}
+
+static int qla4_83xx_post_loopback_config(struct scsi_qla_host *ha,
+ uint32_t *mbox_cmd)
+{
+ int status = QLA_SUCCESS;
+ uint32_t config = 0;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
+
+ status = qla4_83xx_get_port_config(ha, &config);
+ if (status != QLA_SUCCESS)
+ goto exit_post_loopback_config;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: port config=%08X\n", __func__,
+ config));
+
+ if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
+ config &= ~ENABLE_INTERNAL_LOOPBACK;
+ else if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
+ config &= ~ENABLE_EXTERNAL_LOOPBACK;
+
+ config |= ENABLE_DCBX;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Restore default port config=%08X\n", __func__,
+ config));
+
+ ha->notify_idc_comp = 1;
+ if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP)
+ ha->notify_link_up_comp = 1;
+
+ status = qla4_83xx_set_port_config(ha, &config);
+ if (status != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "%s: Scheduling adapter reset\n",
+ __func__);
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ clear_bit(AF_LOOPBACK, &ha->flags);
+ goto exit_post_loopback_config;
+ }
+
+exit_post_loopback_config:
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
+ STATUS(status)));
+ return status;
+}
+
+static void qla4xxx_execute_diag_loopback_cmd(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint8_t *rsp_ptr = NULL;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int wait_for_link = 1;
+ int status = QLA_ERROR;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (test_bit(AF_LOOPBACK, &ha->flags)) {
+ ql4_printk(KERN_INFO, ha, "%s: Loopback Diagnostics already in progress. Invalid Request\n",
+ __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ goto exit_loopback_cmd;
+ }
+
+ if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+ ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
+ __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ goto exit_loopback_cmd;
+ }
+
+ memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
+ sizeof(uint32_t) * MBOX_REG_COUNT);
+
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ status = qla4_83xx_pre_loopback_config(ha, mbox_cmd);
+ if (status != QLA_SUCCESS) {
+ bsg_reply->result = DID_ERROR << 16;
+ goto exit_loopback_cmd;
+ }
+
+ status = qla4_83xx_wait_for_loopback_config_comp(ha,
+ wait_for_link);
+ if (status != QLA_SUCCESS) {
+ bsg_reply->result = DID_TIME_OUT << 16;
+ goto restore;
+ }
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
+ __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
+ mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
+ mbox_cmd[7]));
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
+ &mbox_sts[0]);
+
+ if (status == QLA_SUCCESS)
+ bsg_reply->result = DID_OK << 16;
+ else
+ bsg_reply->result = DID_ERROR << 16;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
+ __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
+ mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
+ mbox_sts[7]));
+
+ /* Send mbox_sts to application */
+ bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
+ rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
+ memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
+restore:
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ status = qla4_83xx_post_loopback_config(ha, mbox_cmd);
+ if (status != QLA_SUCCESS) {
+ bsg_reply->result = DID_ERROR << 16;
+ goto exit_loopback_cmd;
+ }
+
+ /* for pre_loopback_config() wait for LINK UP only
+ * if PHY LINK is UP */
+ if (!(ha->addl_fw_state & FW_ADDSTATE_LINK_UP))
+ wait_for_link = 0;
+
+ status = qla4_83xx_wait_for_loopback_config_comp(ha,
+ wait_for_link);
+ if (status != QLA_SUCCESS) {
+ bsg_reply->result = DID_TIME_OUT << 16;
+ goto exit_loopback_cmd;
+ }
+ }
+exit_loopback_cmd:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: bsg_reply->result = x%x, status = %s\n",
+ __func__, bsg_reply->result, STATUS(status)));
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+}
+
+static int qla4xxx_execute_diag_test(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ uint32_t diag_cmd;
+ int rval = -EINVAL;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
+
+ diag_cmd = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ if (diag_cmd == MBOX_CMD_DIAG_TEST) {
+ switch (bsg_req->rqst_data.h_vendor.vendor_cmd[2]) {
+ case QL_DIAG_CMD_TEST_DDR_SIZE:
+ case QL_DIAG_CMD_TEST_DDR_RW:
+ case QL_DIAG_CMD_TEST_ONCHIP_MEM_RW:
+ case QL_DIAG_CMD_TEST_NVRAM:
+ case QL_DIAG_CMD_TEST_FLASH_ROM:
+ case QL_DIAG_CMD_TEST_DMA_XFER:
+ case QL_DIAG_CMD_SELF_DDR_RW:
+ case QL_DIAG_CMD_SELF_ONCHIP_MEM_RW:
+ /* Execute diag test for adapter RAM/FLASH */
+ ql4xxx_execute_diag_cmd(bsg_job);
+ /* Always return success as we want to sent bsg_reply
+ * to Application */
+ rval = QLA_SUCCESS;
+ break;
+
+ case QL_DIAG_CMD_TEST_INT_LOOPBACK:
+ case QL_DIAG_CMD_TEST_EXT_LOOPBACK:
+ /* Execute diag test for Network */
+ qla4xxx_execute_diag_loopback_cmd(bsg_job);
+ /* Always return success as we want to sent bsg_reply
+ * to Application */
+ rval = QLA_SUCCESS;
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha, "%s: Invalid diag test: 0x%x\n",
+ __func__,
+ bsg_req->rqst_data.h_vendor.vendor_cmd[2]);
+ }
+ } else if ((diag_cmd == MBOX_CMD_SET_LED_CONFIG) ||
+ (diag_cmd == MBOX_CMD_GET_LED_CONFIG)) {
+ ql4xxx_execute_diag_cmd(bsg_job);
+ rval = QLA_SUCCESS;
+ } else {
+ ql4_printk(KERN_ERR, ha, "%s: Invalid diag cmd: 0x%x\n",
+ __func__, diag_cmd);
+ }
+
+ return rval;
+}
+
/**
* qla4xxx_process_vendor_specific - handle vendor specific bsg request
* @job: iscsi_bsg_job to handle
@@ -479,6 +836,9 @@ int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
case QLISCSI_VND_GET_ACB:
return qla4xxx_bsg_get_acb(bsg_job);
+ case QLISCSI_VND_DIAG_TEST:
+ return qla4xxx_execute_diag_test(bsg_job);
+
default:
ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: "
"0x%x\n", __func__, bsg_req->msgcode);
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.h b/drivers/scsi/qla4xxx/ql4_bsg.h
index c6a0364509fd..88c2401910c0 100644
--- a/drivers/scsi/qla4xxx/ql4_bsg.h
+++ b/drivers/scsi/qla4xxx/ql4_bsg.h
@@ -15,5 +15,18 @@
#define QLISCSI_VND_UPDATE_NVRAM 5
#define QLISCSI_VND_RESTORE_DEFAULTS 6
#define QLISCSI_VND_GET_ACB 7
+#define QLISCSI_VND_DIAG_TEST 8
+
+/* QLISCSI_VND_DIAG_CMD sub code */
+#define QL_DIAG_CMD_TEST_DDR_SIZE 0x2
+#define QL_DIAG_CMD_TEST_DDR_RW 0x3
+#define QL_DIAG_CMD_TEST_ONCHIP_MEM_RW 0x4
+#define QL_DIAG_CMD_TEST_NVRAM 0x5 /* Only ISP4XXX */
+#define QL_DIAG_CMD_TEST_FLASH_ROM 0x6
+#define QL_DIAG_CMD_TEST_INT_LOOPBACK 0x7
+#define QL_DIAG_CMD_TEST_EXT_LOOPBACK 0x8
+#define QL_DIAG_CMD_TEST_DMA_XFER 0x9 /* Only ISP4XXX */
+#define QL_DIAG_CMD_SELF_DDR_RW 0xC
+#define QL_DIAG_CMD_SELF_ONCHIP_MEM_RW 0xD
#endif
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 084d1fd59c9e..aa67bb9a4426 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -73,6 +73,7 @@
#define QLA_SUCCESS 0
#define QLA_ERROR 1
+#define STATUS(status) status == QLA_ERROR ? "FAILED" : "SUCCEEDED"
/*
* Data bit definitions
@@ -179,6 +180,10 @@
n &= ~v; \
}
+#define OP_STATE(o, f, p) { \
+ p = (o & f) ? "enable" : "disable"; \
+}
+
/*
* Retry & Timeout Values
*/
@@ -206,6 +211,8 @@
#define MAX_RESET_HA_RETRIES 2
#define FW_ALIVE_WAIT_TOV 3
#define IDC_EXTEND_TOV 8
+#define IDC_COMP_TOV 5
+#define LINK_UP_COMP_TOV 30
#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
@@ -476,6 +483,34 @@ struct ipaddress_config {
uint16_t eth_mtu_size;
uint16_t ipv4_port;
uint16_t ipv6_port;
+ uint8_t control;
+ uint16_t ipv6_tcp_options;
+ uint8_t tcp_wsf;
+ uint8_t ipv6_tcp_wsf;
+ uint8_t ipv4_tos;
+ uint8_t ipv4_cache_id;
+ uint8_t ipv6_cache_id;
+ uint8_t ipv4_alt_cid_len;
+ uint8_t ipv4_alt_cid[11];
+ uint8_t ipv4_vid_len;
+ uint8_t ipv4_vid[11];
+ uint8_t ipv4_ttl;
+ uint16_t ipv6_flow_lbl;
+ uint8_t ipv6_traffic_class;
+ uint8_t ipv6_hop_limit;
+ uint32_t ipv6_nd_reach_time;
+ uint32_t ipv6_nd_rexmit_timer;
+ uint32_t ipv6_nd_stale_timeout;
+ uint8_t ipv6_dup_addr_detect_count;
+ uint32_t ipv6_gw_advrt_mtu;
+ uint16_t def_timeout;
+ uint8_t abort_timer;
+ uint16_t iscsi_options;
+ uint16_t iscsi_max_pdu_size;
+ uint16_t iscsi_first_burst_len;
+ uint16_t iscsi_max_outstnd_r2t;
+ uint16_t iscsi_max_burst_len;
+ uint8_t iscsi_name[224];
};
#define QL4_CHAP_MAX_NAME_LEN 256
@@ -790,6 +825,11 @@ struct scsi_qla_host {
uint32_t pf_bit;
struct qla4_83xx_idc_information idc_info;
struct addr_ctrl_blk *saved_acb;
+ int notify_idc_comp;
+ int notify_link_up_comp;
+ int idc_extend_tmo;
+ struct completion idc_comp;
+ struct completion link_up_comp;
};
struct ql4_task_data {
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 1243e5942b76..8d4092b33c07 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -410,6 +410,7 @@ struct qla_flt_region {
#define DDB_DS_LOGIN_IN_PROCESS 0x07
#define MBOX_CMD_GET_FW_STATE 0x0069
#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A
+#define MBOX_CMD_DIAG_TEST 0x0075
#define MBOX_CMD_GET_SYS_INFO 0x0078
#define MBOX_CMD_GET_NVRAM 0x0078 /* For 40xx */
#define MBOX_CMD_SET_NVRAM 0x0079 /* For 40xx */
@@ -425,8 +426,17 @@ struct qla_flt_region {
#define MBOX_CMD_GET_IP_ADDR_STATE 0x0091
#define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092
#define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093
+#define MBOX_CMD_SET_PORT_CONFIG 0x0122
+#define MBOX_CMD_GET_PORT_CONFIG 0x0123
+#define MBOX_CMD_SET_LED_CONFIG 0x0125
+#define MBOX_CMD_GET_LED_CONFIG 0x0126
#define MBOX_CMD_MINIDUMP 0x0129
+/* Port Config */
+#define ENABLE_INTERNAL_LOOPBACK 0x04
+#define ENABLE_EXTERNAL_LOOPBACK 0x08
+#define ENABLE_DCBX 0x10
+
/* Minidump subcommand */
#define MINIDUMP_GET_SIZE_SUBCOMMAND 0x00
#define MINIDUMP_GET_TMPLT_SUBCOMMAND 0x01
@@ -535,10 +545,6 @@ struct qla_flt_region {
#define FLASH_OPT_COMMIT 2
#define FLASH_OPT_RMW_COMMIT 3
-/* Loopback type */
-#define ENABLE_INTERNAL_LOOPBACK 0x04
-#define ENABLE_EXTERNAL_LOOPBACK 0x08
-
/* generic defines to enable/disable params */
#define QL4_PARAM_DISABLE 0
#define QL4_PARAM_ENABLE 1
@@ -551,6 +557,7 @@ struct addr_ctrl_blk {
#define IFCB_VER_MIN 0x01
#define IFCB_VER_MAX 0x02
uint8_t control; /* 01 */
+#define CTRLOPT_NEW_CONN_DISABLE 0x0002
uint16_t fw_options; /* 02-03 */
#define FWOPT_HEARTBEAT_ENABLE 0x1000
@@ -582,11 +589,40 @@ struct addr_ctrl_blk {
uint32_t shdwreg_addr_hi; /* 2C-2F */
uint16_t iscsi_opts; /* 30-31 */
+#define ISCSIOPTS_HEADER_DIGEST_EN 0x2000
+#define ISCSIOPTS_DATA_DIGEST_EN 0x1000
+#define ISCSIOPTS_IMMEDIATE_DATA_EN 0x0800
+#define ISCSIOPTS_INITIAL_R2T_EN 0x0400
+#define ISCSIOPTS_DATA_SEQ_INORDER_EN 0x0200
+#define ISCSIOPTS_DATA_PDU_INORDER_EN 0x0100
+#define ISCSIOPTS_CHAP_AUTH_EN 0x0080
+#define ISCSIOPTS_SNACK_EN 0x0040
+#define ISCSIOPTS_DISCOVERY_LOGOUT_EN 0x0020
+#define ISCSIOPTS_BIDI_CHAP_EN 0x0010
+#define ISCSIOPTS_DISCOVERY_AUTH_EN 0x0008
+#define ISCSIOPTS_STRICT_LOGIN_COMP_EN 0x0004
+#define ISCSIOPTS_ERL 0x0003
uint16_t ipv4_tcp_opts; /* 32-33 */
+#define TCPOPT_DELAYED_ACK_DISABLE 0x8000
#define TCPOPT_DHCP_ENABLE 0x0200
+#define TCPOPT_DNS_SERVER_IP_EN 0x0100
+#define TCPOPT_SLP_DA_INFO_EN 0x0080
+#define TCPOPT_NAGLE_ALGO_DISABLE 0x0020
+#define TCPOPT_WINDOW_SCALE_DISABLE 0x0010
+#define TCPOPT_TIMER_SCALE 0x000E
+#define TCPOPT_TIMESTAMP_ENABLE 0x0001
uint16_t ipv4_ip_opts; /* 34-35 */
#define IPOPT_IPV4_PROTOCOL_ENABLE 0x8000
+#define IPOPT_IPV4_TOS_EN 0x4000
#define IPOPT_VLAN_TAGGING_ENABLE 0x2000
+#define IPOPT_GRAT_ARP_EN 0x1000
+#define IPOPT_ALT_CID_EN 0x0800
+#define IPOPT_REQ_VID_EN 0x0400
+#define IPOPT_USE_VID_EN 0x0200
+#define IPOPT_LEARN_IQN_EN 0x0100
+#define IPOPT_FRAGMENTATION_DISABLE 0x0010
+#define IPOPT_IN_FORWARD_EN 0x0008
+#define IPOPT_ARP_REDIRECT_EN 0x0004
uint16_t iscsi_max_pdu_size; /* 36-37 */
uint8_t ipv4_tos; /* 38 */
@@ -637,15 +673,24 @@ struct addr_ctrl_blk {
uint32_t cookie; /* 200-203 */
uint16_t ipv6_port; /* 204-205 */
uint16_t ipv6_opts; /* 206-207 */
-#define IPV6_OPT_IPV6_PROTOCOL_ENABLE 0x8000
-#define IPV6_OPT_VLAN_TAGGING_ENABLE 0x2000
+#define IPV6_OPT_IPV6_PROTOCOL_ENABLE 0x8000
+#define IPV6_OPT_VLAN_TAGGING_ENABLE 0x2000
+#define IPV6_OPT_GRAT_NEIGHBOR_ADV_EN 0x1000
+#define IPV6_OPT_REDIRECT_EN 0x0004
uint16_t ipv6_addtl_opts; /* 208-209 */
+#define IPV6_ADDOPT_IGNORE_ICMP_ECHO_REQ 0x0040
+#define IPV6_ADDOPT_MLD_EN 0x0004
#define IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE 0x0002 /* Pri ACB
Only */
#define IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR 0x0001
uint16_t ipv6_tcp_opts; /* 20A-20B */
+#define IPV6_TCPOPT_DELAYED_ACK_DISABLE 0x8000
+#define IPV6_TCPOPT_NAGLE_ALGO_DISABLE 0x0020
+#define IPV6_TCPOPT_WINDOW_SCALE_DISABLE 0x0010
+#define IPV6_TCPOPT_TIMER_SCALE 0x000E
+#define IPV6_TCPOPT_TIMESTAMP_EN 0x0001
uint8_t ipv6_tcp_wsf; /* 20C */
uint16_t ipv6_flow_lbl; /* 20D-20F */
uint8_t ipv6_dflt_rtr_addr[16]; /* 210-21F */
@@ -1252,7 +1297,88 @@ struct response {
};
struct ql_iscsi_stats {
- uint8_t reserved1[656]; /* 0000-028F */
+ uint64_t mac_tx_frames; /* 0000–0007 */
+ uint64_t mac_tx_bytes; /* 0008–000F */
+ uint64_t mac_tx_multicast_frames; /* 0010–0017 */
+ uint64_t mac_tx_broadcast_frames; /* 0018–001F */
+ uint64_t mac_tx_pause_frames; /* 0020–0027 */
+ uint64_t mac_tx_control_frames; /* 0028–002F */
+ uint64_t mac_tx_deferral; /* 0030–0037 */
+ uint64_t mac_tx_excess_deferral; /* 0038–003F */
+ uint64_t mac_tx_late_collision; /* 0040–0047 */
+ uint64_t mac_tx_abort; /* 0048–004F */
+ uint64_t mac_tx_single_collision; /* 0050–0057 */
+ uint64_t mac_tx_multiple_collision; /* 0058–005F */
+ uint64_t mac_tx_collision; /* 0060–0067 */
+ uint64_t mac_tx_frames_dropped; /* 0068–006F */
+ uint64_t mac_tx_jumbo_frames; /* 0070–0077 */
+ uint64_t mac_rx_frames; /* 0078–007F */
+ uint64_t mac_rx_bytes; /* 0080–0087 */
+ uint64_t mac_rx_unknown_control_frames; /* 0088–008F */
+ uint64_t mac_rx_pause_frames; /* 0090–0097 */
+ uint64_t mac_rx_control_frames; /* 0098–009F */
+ uint64_t mac_rx_dribble; /* 00A0–00A7 */
+ uint64_t mac_rx_frame_length_error; /* 00A8–00AF */
+ uint64_t mac_rx_jabber; /* 00B0–00B7 */
+ uint64_t mac_rx_carrier_sense_error; /* 00B8–00BF */
+ uint64_t mac_rx_frame_discarded; /* 00C0–00C7 */
+ uint64_t mac_rx_frames_dropped; /* 00C8–00CF */
+ uint64_t mac_crc_error; /* 00D0–00D7 */
+ uint64_t mac_encoding_error; /* 00D8–00DF */
+ uint64_t mac_rx_length_error_large; /* 00E0–00E7 */
+ uint64_t mac_rx_length_error_small; /* 00E8–00EF */
+ uint64_t mac_rx_multicast_frames; /* 00F0–00F7 */
+ uint64_t mac_rx_broadcast_frames; /* 00F8–00FF */
+ uint64_t ip_tx_packets; /* 0100–0107 */
+ uint64_t ip_tx_bytes; /* 0108–010F */
+ uint64_t ip_tx_fragments; /* 0110–0117 */
+ uint64_t ip_rx_packets; /* 0118–011F */
+ uint64_t ip_rx_bytes; /* 0120–0127 */
+ uint64_t ip_rx_fragments; /* 0128–012F */
+ uint64_t ip_datagram_reassembly; /* 0130–0137 */
+ uint64_t ip_invalid_address_error; /* 0138–013F */
+ uint64_t ip_error_packets; /* 0140–0147 */
+ uint64_t ip_fragrx_overlap; /* 0148–014F */
+ uint64_t ip_fragrx_outoforder; /* 0150–0157 */
+ uint64_t ip_datagram_reassembly_timeout; /* 0158–015F */
+ uint64_t ipv6_tx_packets; /* 0160–0167 */
+ uint64_t ipv6_tx_bytes; /* 0168–016F */
+ uint64_t ipv6_tx_fragments; /* 0170–0177 */
+ uint64_t ipv6_rx_packets; /* 0178–017F */
+ uint64_t ipv6_rx_bytes; /* 0180–0187 */
+ uint64_t ipv6_rx_fragments; /* 0188–018F */
+ uint64_t ipv6_datagram_reassembly; /* 0190–0197 */
+ uint64_t ipv6_invalid_address_error; /* 0198–019F */
+ uint64_t ipv6_error_packets; /* 01A0–01A7 */
+ uint64_t ipv6_fragrx_overlap; /* 01A8–01AF */
+ uint64_t ipv6_fragrx_outoforder; /* 01B0–01B7 */
+ uint64_t ipv6_datagram_reassembly_timeout; /* 01B8–01BF */
+ uint64_t tcp_tx_segments; /* 01C0–01C7 */
+ uint64_t tcp_tx_bytes; /* 01C8–01CF */
+ uint64_t tcp_rx_segments; /* 01D0–01D7 */
+ uint64_t tcp_rx_byte; /* 01D8–01DF */
+ uint64_t tcp_duplicate_ack_retx; /* 01E0–01E7 */
+ uint64_t tcp_retx_timer_expired; /* 01E8–01EF */
+ uint64_t tcp_rx_duplicate_ack; /* 01F0–01F7 */
+ uint64_t tcp_rx_pure_ackr; /* 01F8–01FF */
+ uint64_t tcp_tx_delayed_ack; /* 0200–0207 */
+ uint64_t tcp_tx_pure_ack; /* 0208–020F */
+ uint64_t tcp_rx_segment_error; /* 0210–0217 */
+ uint64_t tcp_rx_segment_outoforder; /* 0218–021F */
+ uint64_t tcp_rx_window_probe; /* 0220–0227 */
+ uint64_t tcp_rx_window_update; /* 0228–022F */
+ uint64_t tcp_tx_window_probe_persist; /* 0230–0237 */
+ uint64_t ecc_error_correction; /* 0238–023F */
+ uint64_t iscsi_pdu_tx; /* 0240-0247 */
+ uint64_t iscsi_data_bytes_tx; /* 0248-024F */
+ uint64_t iscsi_pdu_rx; /* 0250-0257 */
+ uint64_t iscsi_data_bytes_rx; /* 0258-025F */
+ uint64_t iscsi_io_completed; /* 0260-0267 */
+ uint64_t iscsi_unexpected_io_rx; /* 0268-026F */
+ uint64_t iscsi_format_error; /* 0270-0277 */
+ uint64_t iscsi_hdr_digest_error; /* 0278-027F */
+ uint64_t iscsi_data_digest_error; /* 0280-0287 */
+ uint64_t iscsi_sequence_error; /* 0288-028F */
uint32_t tx_cmd_pdu; /* 0290-0293 */
uint32_t tx_resp_pdu; /* 0294-0297 */
uint32_t rx_cmd_pdu; /* 0298-029B */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 5cef2527180a..d67c50e0b896 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -276,6 +276,9 @@ int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config);
int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha,
uint64_t addr, uint32_t *data, uint32_t count);
+uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state);
+int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config);
+int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config);
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 7dff09f09b71..a3c8bc7706c2 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -606,6 +606,36 @@ static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha)
return rval;
}
+static void qla4xxx_update_ipaddr_state(struct scsi_qla_host *ha,
+ uint32_t ipaddr_idx,
+ uint32_t ipaddr_fw_state)
+{
+ uint8_t ipaddr_state;
+ uint8_t ip_idx;
+
+ ip_idx = ipaddr_idx & 0xF;
+ ipaddr_state = qla4xxx_set_ipaddr_state((uint8_t)ipaddr_fw_state);
+
+ switch (ip_idx) {
+ case 0:
+ ha->ip_config.ipv4_addr_state = ipaddr_state;
+ break;
+ case 1:
+ ha->ip_config.ipv6_link_local_state = ipaddr_state;
+ break;
+ case 2:
+ ha->ip_config.ipv6_addr0_state = ipaddr_state;
+ break;
+ case 3:
+ ha->ip_config.ipv6_addr1_state = ipaddr_state;
+ break;
+ default:
+ ql4_printk(KERN_INFO, ha, "%s: Invalid IPADDR index %d\n",
+ __func__, ip_idx);
+ }
+}
+
+
/**
* qla4xxx_isr_decode_mailbox - decodes mailbox status
* @ha: Pointer to host adapter structure.
@@ -620,6 +650,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
int i;
uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
__le32 __iomem *mailbox_out;
+ uint32_t opcode = 0;
if (is_qla8032(ha) || is_qla8042(ha))
mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0];
@@ -698,6 +729,11 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKUP,
sizeof(mbox_sts),
(uint8_t *) mbox_sts);
+
+ if ((is_qla8032(ha) || is_qla8042(ha)) &&
+ ha->notify_link_up_comp)
+ complete(&ha->link_up_comp);
+
break;
case MBOX_ASTS_LINK_DOWN:
@@ -741,6 +777,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
"mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0],
mbox_sts[2], mbox_sts[3]);
+ qla4xxx_update_ipaddr_state(ha, mbox_sts[5],
+ mbox_sts[3]);
/* mbox_sts[2] = Old ACB state
* mbox_sts[3] = new ACB state */
if ((mbox_sts[3] == ACB_STATE_VALID) &&
@@ -841,8 +879,6 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
break;
case MBOX_ASTS_IDC_REQUEST_NOTIFICATION:
- {
- uint32_t opcode;
if (is_qla8032(ha) || is_qla8042(ha)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
@@ -862,7 +898,6 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
}
}
break;
- }
case MBOX_ASTS_IDC_COMPLETE:
if (is_qla8032(ha) || is_qla8042(ha)) {
@@ -875,6 +910,14 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
"scsi:%ld: AEN %04x IDC Complete notification\n",
ha->host_no, mbox_sts[0]));
+ opcode = mbox_sts[1] >> 16;
+ if (ha->notify_idc_comp)
+ complete(&ha->idc_comp);
+
+ if ((opcode == MBOX_CMD_SET_PORT_CONFIG) ||
+ (opcode == MBOX_CMD_PORT_RESET))
+ ha->idc_info.info2 = mbox_sts[3];
+
if (qla4_83xx_loopback_in_progress(ha)) {
set_bit(AF_LOOPBACK, &ha->flags);
} else {
@@ -907,6 +950,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: AEN %04x Received IDC Extend Timeout notification\n",
ha->host_no, mbox_sts[0]));
+ /* new IDC timeout */
+ ha->idc_extend_tmo = mbox_sts[1];
break;
case MBOX_ASTS_INITIALIZATION_FAILED:
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 22cbd005bdf4..9ae8ca3b69f9 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -418,6 +418,38 @@ qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
return QLA_SUCCESS;
}
+uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state)
+{
+ uint8_t ipaddr_state;
+
+ switch (fw_ipaddr_state) {
+ case IP_ADDRSTATE_UNCONFIGURED:
+ ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED;
+ break;
+ case IP_ADDRSTATE_INVALID:
+ ipaddr_state = ISCSI_IPDDRESS_STATE_INVALID;
+ break;
+ case IP_ADDRSTATE_ACQUIRING:
+ ipaddr_state = ISCSI_IPDDRESS_STATE_ACQUIRING;
+ break;
+ case IP_ADDRSTATE_TENTATIVE:
+ ipaddr_state = ISCSI_IPDDRESS_STATE_TENTATIVE;
+ break;
+ case IP_ADDRSTATE_DEPRICATED:
+ ipaddr_state = ISCSI_IPDDRESS_STATE_DEPRECATED;
+ break;
+ case IP_ADDRSTATE_PREFERRED:
+ ipaddr_state = ISCSI_IPDDRESS_STATE_VALID;
+ break;
+ case IP_ADDRSTATE_DISABLING:
+ ipaddr_state = ISCSI_IPDDRESS_STATE_DISABLING;
+ break;
+ default:
+ ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED;
+ }
+ return ipaddr_state;
+}
+
static void
qla4xxx_update_local_ip(struct scsi_qla_host *ha,
struct addr_ctrl_blk *init_fw_cb)
@@ -425,7 +457,7 @@ qla4xxx_update_local_ip(struct scsi_qla_host *ha,
ha->ip_config.tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts);
ha->ip_config.ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts);
ha->ip_config.ipv4_addr_state =
- le16_to_cpu(init_fw_cb->ipv4_addr_state);
+ qla4xxx_set_ipaddr_state(init_fw_cb->ipv4_addr_state);
ha->ip_config.eth_mtu_size =
le16_to_cpu(init_fw_cb->eth_mtu_size);
ha->ip_config.ipv4_port = le16_to_cpu(init_fw_cb->ipv4_port);
@@ -434,6 +466,8 @@ qla4xxx_update_local_ip(struct scsi_qla_host *ha,
ha->ip_config.ipv6_options = le16_to_cpu(init_fw_cb->ipv6_opts);
ha->ip_config.ipv6_addl_options =
le16_to_cpu(init_fw_cb->ipv6_addtl_opts);
+ ha->ip_config.ipv6_tcp_options =
+ le16_to_cpu(init_fw_cb->ipv6_tcp_opts);
}
/* Save IPv4 Address Info */
@@ -448,17 +482,65 @@ qla4xxx_update_local_ip(struct scsi_qla_host *ha,
sizeof(init_fw_cb->ipv4_gw_addr)));
ha->ip_config.ipv4_vlan_tag = be16_to_cpu(init_fw_cb->ipv4_vlan_tag);
+ ha->ip_config.control = init_fw_cb->control;
+ ha->ip_config.tcp_wsf = init_fw_cb->ipv4_tcp_wsf;
+ ha->ip_config.ipv4_tos = init_fw_cb->ipv4_tos;
+ ha->ip_config.ipv4_cache_id = init_fw_cb->ipv4_cacheid;
+ ha->ip_config.ipv4_alt_cid_len = init_fw_cb->ipv4_dhcp_alt_cid_len;
+ memcpy(ha->ip_config.ipv4_alt_cid, init_fw_cb->ipv4_dhcp_alt_cid,
+ min(sizeof(ha->ip_config.ipv4_alt_cid),
+ sizeof(init_fw_cb->ipv4_dhcp_alt_cid)));
+ ha->ip_config.ipv4_vid_len = init_fw_cb->ipv4_dhcp_vid_len;
+ memcpy(ha->ip_config.ipv4_vid, init_fw_cb->ipv4_dhcp_vid,
+ min(sizeof(ha->ip_config.ipv4_vid),
+ sizeof(init_fw_cb->ipv4_dhcp_vid)));
+ ha->ip_config.ipv4_ttl = init_fw_cb->ipv4_ttl;
+ ha->ip_config.def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
+ ha->ip_config.abort_timer = init_fw_cb->abort_timer;
+ ha->ip_config.iscsi_options = le16_to_cpu(init_fw_cb->iscsi_opts);
+ ha->ip_config.iscsi_max_pdu_size =
+ le16_to_cpu(init_fw_cb->iscsi_max_pdu_size);
+ ha->ip_config.iscsi_first_burst_len =
+ le16_to_cpu(init_fw_cb->iscsi_fburst_len);
+ ha->ip_config.iscsi_max_outstnd_r2t =
+ le16_to_cpu(init_fw_cb->iscsi_max_outstnd_r2t);
+ ha->ip_config.iscsi_max_burst_len =
+ le16_to_cpu(init_fw_cb->iscsi_max_burst_len);
+ memcpy(ha->ip_config.iscsi_name, init_fw_cb->iscsi_name,
+ min(sizeof(ha->ip_config.iscsi_name),
+ sizeof(init_fw_cb->iscsi_name)));
if (is_ipv6_enabled(ha)) {
/* Save IPv6 Address */
ha->ip_config.ipv6_link_local_state =
- le16_to_cpu(init_fw_cb->ipv6_lnk_lcl_addr_state);
+ qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_lnk_lcl_addr_state);
ha->ip_config.ipv6_addr0_state =
- le16_to_cpu(init_fw_cb->ipv6_addr0_state);
+ qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr0_state);
ha->ip_config.ipv6_addr1_state =
- le16_to_cpu(init_fw_cb->ipv6_addr1_state);
- ha->ip_config.ipv6_default_router_state =
- le16_to_cpu(init_fw_cb->ipv6_dflt_rtr_state);
+ qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr1_state);
+
+ switch (le16_to_cpu(init_fw_cb->ipv6_dflt_rtr_state)) {
+ case IPV6_RTRSTATE_UNKNOWN:
+ ha->ip_config.ipv6_default_router_state =
+ ISCSI_ROUTER_STATE_UNKNOWN;
+ break;
+ case IPV6_RTRSTATE_MANUAL:
+ ha->ip_config.ipv6_default_router_state =
+ ISCSI_ROUTER_STATE_MANUAL;
+ break;
+ case IPV6_RTRSTATE_ADVERTISED:
+ ha->ip_config.ipv6_default_router_state =
+ ISCSI_ROUTER_STATE_ADVERTISED;
+ break;
+ case IPV6_RTRSTATE_STALE:
+ ha->ip_config.ipv6_default_router_state =
+ ISCSI_ROUTER_STATE_STALE;
+ break;
+ default:
+ ha->ip_config.ipv6_default_router_state =
+ ISCSI_ROUTER_STATE_UNKNOWN;
+ }
+
ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE;
ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80;
@@ -479,6 +561,23 @@ qla4xxx_update_local_ip(struct scsi_qla_host *ha,
ha->ip_config.ipv6_vlan_tag =
be16_to_cpu(init_fw_cb->ipv6_vlan_tag);
ha->ip_config.ipv6_port = le16_to_cpu(init_fw_cb->ipv6_port);
+ ha->ip_config.ipv6_cache_id = init_fw_cb->ipv6_cache_id;
+ ha->ip_config.ipv6_flow_lbl =
+ le16_to_cpu(init_fw_cb->ipv6_flow_lbl);
+ ha->ip_config.ipv6_traffic_class =
+ init_fw_cb->ipv6_traffic_class;
+ ha->ip_config.ipv6_hop_limit = init_fw_cb->ipv6_hop_limit;
+ ha->ip_config.ipv6_nd_reach_time =
+ le32_to_cpu(init_fw_cb->ipv6_nd_reach_time);
+ ha->ip_config.ipv6_nd_rexmit_timer =
+ le32_to_cpu(init_fw_cb->ipv6_nd_rexmit_timer);
+ ha->ip_config.ipv6_nd_stale_timeout =
+ le32_to_cpu(init_fw_cb->ipv6_nd_stale_timeout);
+ ha->ip_config.ipv6_dup_addr_detect_count =
+ init_fw_cb->ipv6_dup_addr_detect_count;
+ ha->ip_config.ipv6_gw_advrt_mtu =
+ le32_to_cpu(init_fw_cb->ipv6_gw_advrt_mtu);
+ ha->ip_config.ipv6_tcp_wsf = init_fw_cb->ipv6_tcp_wsf;
}
}
@@ -2317,3 +2416,46 @@ exit_config_acb:
rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
return rval;
}
+
+int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_GET_PORT_CONFIG;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+ mbox_cmd, mbox_sts);
+ if (status == QLA_SUCCESS)
+ *config = mbox_sts[1];
+ else
+ ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
+ mbox_sts[0]);
+
+ return status;
+}
+
+int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_SET_PORT_CONFIG;
+ mbox_cmd[1] = *config;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+ mbox_cmd, mbox_sts);
+ if (status != QLA_SUCCESS)
+ ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
+ mbox_sts[0]);
+
+ return status;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index a28d5e624aab..c21adc338cf1 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -151,6 +151,7 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data,
int len);
+static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len);
/*
* SCSI host template entry points
@@ -262,6 +263,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
.login_flashnode = qla4xxx_sysfs_ddb_login,
.logout_flashnode = qla4xxx_sysfs_ddb_logout,
.logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid,
+ .get_host_stats = qla4xxx_get_host_stats,
};
static struct scsi_transport_template *qla4xxx_scsi_transport;
@@ -419,6 +421,7 @@ static umode_t qla4_attr_is_visible(int param_type, int param)
case ISCSI_PARAM_EXP_STATSN:
case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
+ case ISCSI_PARAM_LOCAL_IPADDR:
return S_IRUGO;
default:
return 0;
@@ -440,6 +443,65 @@ static umode_t qla4_attr_is_visible(int param_type, int param)
case ISCSI_NET_PARAM_VLAN_ENABLED:
case ISCSI_NET_PARAM_MTU:
case ISCSI_NET_PARAM_PORT:
+ case ISCSI_NET_PARAM_IPADDR_STATE:
+ case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE:
+ case ISCSI_NET_PARAM_IPV6_ROUTER_STATE:
+ case ISCSI_NET_PARAM_DELAYED_ACK_EN:
+ case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
+ case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
+ case ISCSI_NET_PARAM_TCP_WSF:
+ case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
+ case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
+ case ISCSI_NET_PARAM_CACHE_ID:
+ case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
+ case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
+ case ISCSI_NET_PARAM_IPV4_TOS_EN:
+ case ISCSI_NET_PARAM_IPV4_TOS:
+ case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
+ case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
+ case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
+ case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
+ case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
+ case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
+ case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
+ case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
+ case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
+ case ISCSI_NET_PARAM_REDIRECT_EN:
+ case ISCSI_NET_PARAM_IPV4_TTL:
+ case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
+ case ISCSI_NET_PARAM_IPV6_MLD_EN:
+ case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
+ case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
+ case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
+ case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
+ case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
+ case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
+ case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
+ case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_IFACE_PARAM:
+ switch (param) {
+ case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
+ case ISCSI_IFACE_PARAM_HDRDGST_EN:
+ case ISCSI_IFACE_PARAM_DATADGST_EN:
+ case ISCSI_IFACE_PARAM_IMM_DATA_EN:
+ case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
+ case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
+ case ISCSI_IFACE_PARAM_ERL:
+ case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_IFACE_PARAM_FIRST_BURST:
+ case ISCSI_IFACE_PARAM_MAX_R2T:
+ case ISCSI_IFACE_PARAM_MAX_BURST:
+ case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
+ case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
+ case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
+ case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
+ case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
+ case ISCSI_IFACE_PARAM_INITIATOR_NAME:
return S_IRUGO;
default:
return 0;
@@ -511,6 +573,65 @@ static umode_t qla4_attr_is_visible(int param_type, int param)
return 0;
}
+/**
+ * qla4xxx_create chap_list - Create CHAP list from FLASH
+ * @ha: pointer to adapter structure
+ *
+ * Read flash and make a list of CHAP entries, during login when a CHAP entry
+ * is received, it will be checked in this list. If entry exist then the CHAP
+ * entry index is set in the DDB. If CHAP entry does not exist in this list
+ * then a new entry is added in FLASH in CHAP table and the index obtained is
+ * used in the DDB.
+ **/
+static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
+{
+ int rval = 0;
+ uint8_t *chap_flash_data = NULL;
+ uint32_t offset;
+ dma_addr_t chap_dma;
+ uint32_t chap_size = 0;
+
+ if (is_qla40XX(ha))
+ chap_size = MAX_CHAP_ENTRIES_40XX *
+ sizeof(struct ql4_chap_table);
+ else /* Single region contains CHAP info for both
+ * ports which is divided into half for each port.
+ */
+ chap_size = ha->hw.flt_chap_size / 2;
+
+ chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
+ &chap_dma, GFP_KERNEL);
+ if (!chap_flash_data) {
+ ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
+ return;
+ }
+
+ if (is_qla40XX(ha)) {
+ offset = FLASH_CHAP_OFFSET;
+ } else {
+ offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
+ if (ha->port_num == 1)
+ offset += chap_size;
+ }
+
+ rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
+ if (rval != QLA_SUCCESS)
+ goto exit_chap_list;
+
+ if (ha->chap_list == NULL)
+ ha->chap_list = vmalloc(chap_size);
+ if (ha->chap_list == NULL) {
+ ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
+ goto exit_chap_list;
+ }
+
+ memset(ha->chap_list, 0, chap_size);
+ memcpy(ha->chap_list, chap_flash_data, chap_size);
+
+exit_chap_list:
+ dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma);
+}
+
static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
int16_t chap_index,
struct ql4_chap_table **chap_entry)
@@ -624,6 +745,8 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
goto exit_get_chap_list;
}
+ qla4xxx_create_chap_list(ha);
+
chap_rec = (struct iscsi_chap_rec *) buf;
mutex_lock(&ha->chap_sem);
for (i = chap_tbl_idx; i < max_chap_entries; i++) {
@@ -802,6 +925,7 @@ static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
int type;
int rem = len;
int rc = 0;
+ int size;
memset(&chap_rec, 0, sizeof(chap_rec));
@@ -816,12 +940,14 @@ static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
chap_rec.chap_type = param_info->value[0];
break;
case ISCSI_CHAP_PARAM_USERNAME:
- memcpy(chap_rec.username, param_info->value,
- param_info->len);
+ size = min_t(size_t, sizeof(chap_rec.username),
+ param_info->len);
+ memcpy(chap_rec.username, param_info->value, size);
break;
case ISCSI_CHAP_PARAM_PASSWORD:
- memcpy(chap_rec.password, param_info->value,
- param_info->len);
+ size = min_t(size_t, sizeof(chap_rec.password),
+ param_info->len);
+ memcpy(chap_rec.password, param_info->value, size);
break;
case ISCSI_CHAP_PARAM_PASSWORD_LEN:
chap_rec.password_length = param_info->value[0];
@@ -888,113 +1014,646 @@ exit_set_chap:
return rc;
}
+
+static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct iscsi_offload_host_stats *host_stats = NULL;
+ int host_stats_size;
+ int ret = 0;
+ int ddb_idx = 0;
+ struct ql_iscsi_stats *ql_iscsi_stats = NULL;
+ int stats_size;
+ dma_addr_t iscsi_stats_dma;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__));
+
+ host_stats_size = sizeof(struct iscsi_offload_host_stats);
+
+ if (host_stats_size != len) {
+ ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n",
+ __func__, len, host_stats_size);
+ ret = -EINVAL;
+ goto exit_host_stats;
+ }
+ host_stats = (struct iscsi_offload_host_stats *)buf;
+
+ if (!buf) {
+ ret = -ENOMEM;
+ goto exit_host_stats;
+ }
+
+ stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
+
+ ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
+ &iscsi_stats_dma, GFP_KERNEL);
+ if (!ql_iscsi_stats) {
+ ql4_printk(KERN_ERR, ha,
+ "Unable to allocate memory for iscsi stats\n");
+ goto exit_host_stats;
+ }
+
+ ret = qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size,
+ iscsi_stats_dma);
+ if (ret != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha,
+ "Unable to retrieve iscsi stats\n");
+ goto exit_host_stats;
+ }
+ host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames);
+ host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes);
+ host_stats->mactx_multicast_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames);
+ host_stats->mactx_broadcast_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames);
+ host_stats->mactx_pause_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames);
+ host_stats->mactx_control_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames);
+ host_stats->mactx_deferral =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_deferral);
+ host_stats->mactx_excess_deferral =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral);
+ host_stats->mactx_late_collision =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision);
+ host_stats->mactx_abort = le64_to_cpu(ql_iscsi_stats->mac_tx_abort);
+ host_stats->mactx_single_collision =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision);
+ host_stats->mactx_multiple_collision =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision);
+ host_stats->mactx_collision =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_collision);
+ host_stats->mactx_frames_dropped =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped);
+ host_stats->mactx_jumbo_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames);
+ host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames);
+ host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes);
+ host_stats->macrx_unknown_control_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames);
+ host_stats->macrx_pause_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames);
+ host_stats->macrx_control_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames);
+ host_stats->macrx_dribble =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_dribble);
+ host_stats->macrx_frame_length_error =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error);
+ host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber);
+ host_stats->macrx_carrier_sense_error =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error);
+ host_stats->macrx_frame_discarded =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded);
+ host_stats->macrx_frames_dropped =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped);
+ host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error);
+ host_stats->mac_encoding_error =
+ le64_to_cpu(ql_iscsi_stats->mac_encoding_error);
+ host_stats->macrx_length_error_large =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large);
+ host_stats->macrx_length_error_small =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small);
+ host_stats->macrx_multicast_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames);
+ host_stats->macrx_broadcast_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames);
+ host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets);
+ host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes);
+ host_stats->iptx_fragments =
+ le64_to_cpu(ql_iscsi_stats->ip_tx_fragments);
+ host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets);
+ host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes);
+ host_stats->iprx_fragments =
+ le64_to_cpu(ql_iscsi_stats->ip_rx_fragments);
+ host_stats->ip_datagram_reassembly =
+ le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly);
+ host_stats->ip_invalid_address_error =
+ le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error);
+ host_stats->ip_error_packets =
+ le64_to_cpu(ql_iscsi_stats->ip_error_packets);
+ host_stats->ip_fragrx_overlap =
+ le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap);
+ host_stats->ip_fragrx_outoforder =
+ le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder);
+ host_stats->ip_datagram_reassembly_timeout =
+ le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout);
+ host_stats->ipv6tx_packets =
+ le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets);
+ host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes);
+ host_stats->ipv6tx_fragments =
+ le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments);
+ host_stats->ipv6rx_packets =
+ le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets);
+ host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes);
+ host_stats->ipv6rx_fragments =
+ le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments);
+ host_stats->ipv6_datagram_reassembly =
+ le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly);
+ host_stats->ipv6_invalid_address_error =
+ le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error);
+ host_stats->ipv6_error_packets =
+ le64_to_cpu(ql_iscsi_stats->ipv6_error_packets);
+ host_stats->ipv6_fragrx_overlap =
+ le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap);
+ host_stats->ipv6_fragrx_outoforder =
+ le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder);
+ host_stats->ipv6_datagram_reassembly_timeout =
+ le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout);
+ host_stats->tcptx_segments =
+ le64_to_cpu(ql_iscsi_stats->tcp_tx_segments);
+ host_stats->tcptx_bytes = le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes);
+ host_stats->tcprx_segments =
+ le64_to_cpu(ql_iscsi_stats->tcp_rx_segments);
+ host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte);
+ host_stats->tcp_duplicate_ack_retx =
+ le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx);
+ host_stats->tcp_retx_timer_expired =
+ le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired);
+ host_stats->tcprx_duplicate_ack =
+ le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack);
+ host_stats->tcprx_pure_ackr =
+ le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr);
+ host_stats->tcptx_delayed_ack =
+ le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack);
+ host_stats->tcptx_pure_ack =
+ le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack);
+ host_stats->tcprx_segment_error =
+ le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error);
+ host_stats->tcprx_segment_outoforder =
+ le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder);
+ host_stats->tcprx_window_probe =
+ le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe);
+ host_stats->tcprx_window_update =
+ le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update);
+ host_stats->tcptx_window_probe_persist =
+ le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist);
+ host_stats->ecc_error_correction =
+ le64_to_cpu(ql_iscsi_stats->ecc_error_correction);
+ host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx);
+ host_stats->iscsi_data_bytes_tx =
+ le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx);
+ host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx);
+ host_stats->iscsi_data_bytes_rx =
+ le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx);
+ host_stats->iscsi_io_completed =
+ le64_to_cpu(ql_iscsi_stats->iscsi_io_completed);
+ host_stats->iscsi_unexpected_io_rx =
+ le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx);
+ host_stats->iscsi_format_error =
+ le64_to_cpu(ql_iscsi_stats->iscsi_format_error);
+ host_stats->iscsi_hdr_digest_error =
+ le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error);
+ host_stats->iscsi_data_digest_error =
+ le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error);
+ host_stats->iscsi_sequence_error =
+ le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error);
+exit_host_stats:
+ if (ql_iscsi_stats)
+ dma_free_coherent(&ha->pdev->dev, host_stats_size,
+ ql_iscsi_stats, iscsi_stats_dma);
+
+ ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n",
+ __func__);
+ return ret;
+}
+
static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
enum iscsi_param_type param_type,
int param, char *buf)
{
struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
struct scsi_qla_host *ha = to_qla_host(shost);
+ int ival;
+ char *pval = NULL;
int len = -ENOSYS;
- if (param_type != ISCSI_NET_PARAM)
- return -ENOSYS;
+ if (param_type == ISCSI_NET_PARAM) {
+ switch (param) {
+ case ISCSI_NET_PARAM_IPV4_ADDR:
+ len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
+ break;
+ case ISCSI_NET_PARAM_IPV4_SUBNET:
+ len = sprintf(buf, "%pI4\n",
+ &ha->ip_config.subnet_mask);
+ break;
+ case ISCSI_NET_PARAM_IPV4_GW:
+ len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
+ break;
+ case ISCSI_NET_PARAM_IFACE_ENABLE:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ OP_STATE(ha->ip_config.ipv4_options,
+ IPOPT_IPV4_PROTOCOL_ENABLE, pval);
+ } else {
+ OP_STATE(ha->ip_config.ipv6_options,
+ IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval);
+ }
- switch (param) {
- case ISCSI_NET_PARAM_IPV4_ADDR:
- len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
- break;
- case ISCSI_NET_PARAM_IPV4_SUBNET:
- len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
- break;
- case ISCSI_NET_PARAM_IPV4_GW:
- len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
- break;
- case ISCSI_NET_PARAM_IFACE_ENABLE:
- if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
- len = sprintf(buf, "%s\n",
- (ha->ip_config.ipv4_options &
- IPOPT_IPV4_PROTOCOL_ENABLE) ?
- "enabled" : "disabled");
- else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
len = sprintf(buf, "%s\n",
- (ha->ip_config.ipv6_options &
- IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
- "enabled" : "disabled");
- break;
- case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
- len = sprintf(buf, "%s\n",
- (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
- "dhcp" : "static");
- break;
- case ISCSI_NET_PARAM_IPV6_ADDR:
- if (iface->iface_num == 0)
- len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
- if (iface->iface_num == 1)
- len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
- break;
- case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
- len = sprintf(buf, "%pI6\n",
- &ha->ip_config.ipv6_link_local_addr);
- break;
- case ISCSI_NET_PARAM_IPV6_ROUTER:
- len = sprintf(buf, "%pI6\n",
- &ha->ip_config.ipv6_default_router_addr);
- break;
- case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
- len = sprintf(buf, "%s\n",
- (ha->ip_config.ipv6_addl_options &
- IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
- "nd" : "static");
- break;
- case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
- len = sprintf(buf, "%s\n",
- (ha->ip_config.ipv6_addl_options &
- IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
- "auto" : "static");
- break;
- case ISCSI_NET_PARAM_VLAN_ID:
- if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ (ha->ip_config.tcp_options &
+ TCPOPT_DHCP_ENABLE) ?
+ "dhcp" : "static");
+ break;
+ case ISCSI_NET_PARAM_IPV6_ADDR:
+ if (iface->iface_num == 0)
+ len = sprintf(buf, "%pI6\n",
+ &ha->ip_config.ipv6_addr0);
+ if (iface->iface_num == 1)
+ len = sprintf(buf, "%pI6\n",
+ &ha->ip_config.ipv6_addr1);
+ break;
+ case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
+ len = sprintf(buf, "%pI6\n",
+ &ha->ip_config.ipv6_link_local_addr);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ROUTER:
+ len = sprintf(buf, "%pI6\n",
+ &ha->ip_config.ipv6_default_router_addr);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
+ pval = (ha->ip_config.ipv6_addl_options &
+ IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
+ "nd" : "static";
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
+ pval = (ha->ip_config.ipv6_addl_options &
+ IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
+ "auto" : "static";
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_VLAN_ID:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ ival = ha->ip_config.ipv4_vlan_tag &
+ ISCSI_MAX_VLAN_ID;
+ else
+ ival = ha->ip_config.ipv6_vlan_tag &
+ ISCSI_MAX_VLAN_ID;
+
+ len = sprintf(buf, "%d\n", ival);
+ break;
+ case ISCSI_NET_PARAM_VLAN_PRIORITY:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ ival = (ha->ip_config.ipv4_vlan_tag >> 13) &
+ ISCSI_MAX_VLAN_PRIORITY;
+ else
+ ival = (ha->ip_config.ipv6_vlan_tag >> 13) &
+ ISCSI_MAX_VLAN_PRIORITY;
+
+ len = sprintf(buf, "%d\n", ival);
+ break;
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ OP_STATE(ha->ip_config.ipv4_options,
+ IPOPT_VLAN_TAGGING_ENABLE, pval);
+ } else {
+ OP_STATE(ha->ip_config.ipv6_options,
+ IPV6_OPT_VLAN_TAGGING_ENABLE, pval);
+ }
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_MTU:
+ len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
+ break;
+ case ISCSI_NET_PARAM_PORT:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv4_port);
+ else
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv6_port);
+ break;
+ case ISCSI_NET_PARAM_IPADDR_STATE:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ pval = iscsi_get_ipaddress_state_name(
+ ha->ip_config.ipv4_addr_state);
+ } else {
+ if (iface->iface_num == 0)
+ pval = iscsi_get_ipaddress_state_name(
+ ha->ip_config.ipv6_addr0_state);
+ else if (iface->iface_num == 1)
+ pval = iscsi_get_ipaddress_state_name(
+ ha->ip_config.ipv6_addr1_state);
+ }
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE:
+ pval = iscsi_get_ipaddress_state_name(
+ ha->ip_config.ipv6_link_local_state);
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ROUTER_STATE:
+ pval = iscsi_get_router_state_name(
+ ha->ip_config.ipv6_default_router_state);
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_DELAYED_ACK_EN:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ OP_STATE(~ha->ip_config.tcp_options,
+ TCPOPT_DELAYED_ACK_DISABLE, pval);
+ } else {
+ OP_STATE(~ha->ip_config.ipv6_tcp_options,
+ IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval);
+ }
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ OP_STATE(~ha->ip_config.tcp_options,
+ TCPOPT_NAGLE_ALGO_DISABLE, pval);
+ } else {
+ OP_STATE(~ha->ip_config.ipv6_tcp_options,
+ IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval);
+ }
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ OP_STATE(~ha->ip_config.tcp_options,
+ TCPOPT_WINDOW_SCALE_DISABLE, pval);
+ } else {
+ OP_STATE(~ha->ip_config.ipv6_tcp_options,
+ IPV6_TCPOPT_WINDOW_SCALE_DISABLE,
+ pval);
+ }
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_TCP_WSF:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.tcp_wsf);
+ else
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv6_tcp_wsf);
+ break;
+ case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ ival = (ha->ip_config.tcp_options &
+ TCPOPT_TIMER_SCALE) >> 1;
+ else
+ ival = (ha->ip_config.ipv6_tcp_options &
+ IPV6_TCPOPT_TIMER_SCALE) >> 1;
+
+ len = sprintf(buf, "%d\n", ival);
+ break;
+ case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ OP_STATE(ha->ip_config.tcp_options,
+ TCPOPT_TIMESTAMP_ENABLE, pval);
+ } else {
+ OP_STATE(ha->ip_config.ipv6_tcp_options,
+ IPV6_TCPOPT_TIMESTAMP_EN, pval);
+ }
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_CACHE_ID:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv4_cache_id);
+ else
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv6_cache_id);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
+ OP_STATE(ha->ip_config.tcp_options,
+ TCPOPT_DNS_SERVER_IP_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
+ OP_STATE(ha->ip_config.tcp_options,
+ TCPOPT_SLP_DA_INFO_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_TOS_EN:
+ OP_STATE(ha->ip_config.ipv4_options,
+ IPOPT_IPV4_TOS_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_TOS:
+ len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos);
+ break;
+ case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
+ OP_STATE(ha->ip_config.ipv4_options,
+ IPOPT_GRAT_ARP_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
+ OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN,
+ pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
+ pval = (ha->ip_config.ipv4_alt_cid_len) ?
+ (char *)ha->ip_config.ipv4_alt_cid : "";
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
+ OP_STATE(ha->ip_config.ipv4_options,
+ IPOPT_REQ_VID_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
+ OP_STATE(ha->ip_config.ipv4_options,
+ IPOPT_USE_VID_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
+ pval = (ha->ip_config.ipv4_vid_len) ?
+ (char *)ha->ip_config.ipv4_vid : "";
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
+ OP_STATE(ha->ip_config.ipv4_options,
+ IPOPT_LEARN_IQN_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
+ OP_STATE(~ha->ip_config.ipv4_options,
+ IPOPT_FRAGMENTATION_DISABLE, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
+ OP_STATE(ha->ip_config.ipv4_options,
+ IPOPT_IN_FORWARD_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_REDIRECT_EN:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ OP_STATE(ha->ip_config.ipv4_options,
+ IPOPT_ARP_REDIRECT_EN, pval);
+ } else {
+ OP_STATE(ha->ip_config.ipv6_options,
+ IPV6_OPT_REDIRECT_EN, pval);
+ }
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_TTL:
+ len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl);
+ break;
+ case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
+ OP_STATE(ha->ip_config.ipv6_options,
+ IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV6_MLD_EN:
+ OP_STATE(ha->ip_config.ipv6_addl_options,
+ IPV6_ADDOPT_MLD_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
+ len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl);
+ break;
+ case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
len = sprintf(buf, "%d\n",
- (ha->ip_config.ipv4_vlan_tag &
- ISCSI_MAX_VLAN_ID));
- else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
+ ha->ip_config.ipv6_traffic_class);
+ break;
+ case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
len = sprintf(buf, "%d\n",
- (ha->ip_config.ipv6_vlan_tag &
- ISCSI_MAX_VLAN_ID));
- break;
- case ISCSI_NET_PARAM_VLAN_PRIORITY:
- if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ ha->ip_config.ipv6_hop_limit);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
len = sprintf(buf, "%d\n",
- ((ha->ip_config.ipv4_vlan_tag >> 13) &
- ISCSI_MAX_VLAN_PRIORITY));
- else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
+ ha->ip_config.ipv6_nd_reach_time);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
len = sprintf(buf, "%d\n",
- ((ha->ip_config.ipv6_vlan_tag >> 13) &
- ISCSI_MAX_VLAN_PRIORITY));
- break;
- case ISCSI_NET_PARAM_VLAN_ENABLED:
- if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
- len = sprintf(buf, "%s\n",
- (ha->ip_config.ipv4_options &
- IPOPT_VLAN_TAGGING_ENABLE) ?
- "enabled" : "disabled");
- else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
- len = sprintf(buf, "%s\n",
- (ha->ip_config.ipv6_options &
- IPV6_OPT_VLAN_TAGGING_ENABLE) ?
- "enabled" : "disabled");
- break;
- case ISCSI_NET_PARAM_MTU:
- len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
- break;
- case ISCSI_NET_PARAM_PORT:
- if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
- len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
- else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
- len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
- break;
- default:
- len = -ENOSYS;
+ ha->ip_config.ipv6_nd_rexmit_timer);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv6_nd_stale_timeout);
+ break;
+ case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv6_dup_addr_detect_count);
+ break;
+ case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv6_gw_advrt_mtu);
+ break;
+ default:
+ len = -ENOSYS;
+ }
+ } else if (param_type == ISCSI_IFACE_PARAM) {
+ switch (param) {
+ case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
+ len = sprintf(buf, "%d\n", ha->ip_config.def_timeout);
+ break;
+ case ISCSI_IFACE_PARAM_HDRDGST_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_HEADER_DIGEST_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_DATADGST_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_DATA_DIGEST_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_IMM_DATA_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_IMMEDIATE_DATA_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_INITIAL_R2T_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_DATA_SEQ_INORDER_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_DATA_PDU_INORDER_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_ERL:
+ len = sprintf(buf, "%d\n",
+ (ha->ip_config.iscsi_options &
+ ISCSIOPTS_ERL));
+ break;
+ case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
+ len = sprintf(buf, "%u\n",
+ ha->ip_config.iscsi_max_pdu_size *
+ BYTE_UNITS);
+ break;
+ case ISCSI_IFACE_PARAM_FIRST_BURST:
+ len = sprintf(buf, "%u\n",
+ ha->ip_config.iscsi_first_burst_len *
+ BYTE_UNITS);
+ break;
+ case ISCSI_IFACE_PARAM_MAX_R2T:
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.iscsi_max_outstnd_r2t);
+ break;
+ case ISCSI_IFACE_PARAM_MAX_BURST:
+ len = sprintf(buf, "%u\n",
+ ha->ip_config.iscsi_max_burst_len *
+ BYTE_UNITS);
+ break;
+ case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_CHAP_AUTH_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_BIDI_CHAP_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_DISCOVERY_AUTH_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_INITIATOR_NAME:
+ len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name);
+ break;
+ default:
+ len = -ENOSYS;
+ }
}
return len;
@@ -1366,8 +2025,8 @@ static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
cpu_to_le16(
IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
else
- ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
- "IPv6 addr\n");
+ ql4_printk(KERN_ERR, ha,
+ "Invalid autocfg setting for IPv6 addr\n");
break;
case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
/* Autocfg applies to even interface */
@@ -1383,8 +2042,8 @@ static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
else
- ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
- "IPv6 linklocal addr\n");
+ ql4_printk(KERN_ERR, ha,
+ "Invalid autocfg setting for IPv6 linklocal addr\n");
break;
case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
/* Autocfg applies to even interface */
@@ -1433,6 +2092,135 @@ static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
init_fw_cb->ipv6_port =
cpu_to_le16(*(uint16_t *)iface_param->value);
break;
+ case ISCSI_NET_PARAM_DELAYED_ACK_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+ init_fw_cb->ipv6_tcp_opts |=
+ cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE);
+ else
+ init_fw_cb->ipv6_tcp_opts &=
+ cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE);
+ break;
+ case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+ init_fw_cb->ipv6_tcp_opts |=
+ cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE);
+ else
+ init_fw_cb->ipv6_tcp_opts &=
+ cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE);
+ break;
+ case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+ init_fw_cb->ipv6_tcp_opts |=
+ cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE);
+ else
+ init_fw_cb->ipv6_tcp_opts &=
+ cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE);
+ break;
+ case ISCSI_NET_PARAM_TCP_WSF:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_tcp_wsf = iface_param->value[0];
+ break;
+ case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_tcp_opts &=
+ cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE);
+ init_fw_cb->ipv6_tcp_opts |=
+ cpu_to_le16((iface_param->value[0] << 1) &
+ IPV6_TCPOPT_TIMER_SCALE);
+ break;
+ case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv6_tcp_opts |=
+ cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN);
+ else
+ init_fw_cb->ipv6_tcp_opts &=
+ cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv6_opts |=
+ cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN);
+ else
+ init_fw_cb->ipv6_opts &=
+ cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN);
+ break;
+ case ISCSI_NET_PARAM_REDIRECT_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv6_opts |=
+ cpu_to_le16(IPV6_OPT_REDIRECT_EN);
+ else
+ init_fw_cb->ipv6_opts &=
+ cpu_to_le16(~IPV6_OPT_REDIRECT_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV6_MLD_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv6_addtl_opts |=
+ cpu_to_le16(IPV6_ADDOPT_MLD_EN);
+ else
+ init_fw_cb->ipv6_addtl_opts &=
+ cpu_to_le16(~IPV6_ADDOPT_MLD_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_flow_lbl =
+ cpu_to_le16(*(uint16_t *)iface_param->value);
+ break;
+ case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_traffic_class = iface_param->value[0];
+ break;
+ case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_hop_limit = iface_param->value[0];
+ break;
+ case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_nd_reach_time =
+ cpu_to_le32(*(uint32_t *)iface_param->value);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_nd_rexmit_timer =
+ cpu_to_le32(*(uint32_t *)iface_param->value);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_nd_stale_timeout =
+ cpu_to_le32(*(uint32_t *)iface_param->value);
+ break;
+ case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0];
+ break;
+ case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_gw_advrt_mtu =
+ cpu_to_le32(*(uint32_t *)iface_param->value);
+ break;
default:
ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
iface_param->param);
@@ -1501,6 +2289,195 @@ static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
init_fw_cb->ipv4_port =
cpu_to_le16(*(uint16_t *)iface_param->value);
break;
+ case ISCSI_NET_PARAM_DELAYED_ACK_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+ init_fw_cb->ipv4_tcp_opts |=
+ cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE);
+ else
+ init_fw_cb->ipv4_tcp_opts &=
+ cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE);
+ break;
+ case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+ init_fw_cb->ipv4_tcp_opts |=
+ cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE);
+ else
+ init_fw_cb->ipv4_tcp_opts &=
+ cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE);
+ break;
+ case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+ init_fw_cb->ipv4_tcp_opts |=
+ cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE);
+ else
+ init_fw_cb->ipv4_tcp_opts &=
+ cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE);
+ break;
+ case ISCSI_NET_PARAM_TCP_WSF:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv4_tcp_wsf = iface_param->value[0];
+ break;
+ case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE);
+ init_fw_cb->ipv4_tcp_opts |=
+ cpu_to_le16((iface_param->value[0] << 1) &
+ TCPOPT_TIMER_SCALE);
+ break;
+ case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_tcp_opts |=
+ cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE);
+ else
+ init_fw_cb->ipv4_tcp_opts &=
+ cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_tcp_opts |=
+ cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN);
+ else
+ init_fw_cb->ipv4_tcp_opts &=
+ cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_tcp_opts |=
+ cpu_to_le16(TCPOPT_SLP_DA_INFO_EN);
+ else
+ init_fw_cb->ipv4_tcp_opts &=
+ cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV4_TOS_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_IPV4_TOS_EN);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_IPV4_TOS_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV4_TOS:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv4_tos = iface_param->value[0];
+ break;
+ case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_GRAT_ARP_EN);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_GRAT_ARP_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_ALT_CID_EN);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_ALT_CID_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
+ if (iface_param->iface_num & 0x1)
+ break;
+ memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value,
+ (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1));
+ init_fw_cb->ipv4_dhcp_alt_cid_len =
+ strlen(init_fw_cb->ipv4_dhcp_alt_cid);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_REQ_VID_EN);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_REQ_VID_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_USE_VID_EN);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_USE_VID_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
+ if (iface_param->iface_num & 0x1)
+ break;
+ memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value,
+ (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1));
+ init_fw_cb->ipv4_dhcp_vid_len =
+ strlen(init_fw_cb->ipv4_dhcp_vid);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_LEARN_IQN_EN);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_LEARN_IQN_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE);
+ break;
+ case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_IN_FORWARD_EN);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_IN_FORWARD_EN);
+ break;
+ case ISCSI_NET_PARAM_REDIRECT_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_ARP_REDIRECT_EN);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_ARP_REDIRECT_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV4_TTL:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv4_ttl = iface_param->value[0];
+ break;
default:
ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
iface_param->param);
@@ -1508,6 +2485,168 @@ static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
}
}
+static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha,
+ struct iscsi_iface_param_info *iface_param,
+ struct addr_ctrl_blk *init_fw_cb)
+{
+ switch (iface_param->param) {
+ case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->def_timeout =
+ cpu_to_le16(*(uint16_t *)iface_param->value);
+ break;
+ case ISCSI_IFACE_PARAM_HDRDGST_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN);
+ break;
+ case ISCSI_IFACE_PARAM_DATADGST_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN);
+ break;
+ case ISCSI_IFACE_PARAM_IMM_DATA_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN);
+ break;
+ case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN);
+ break;
+ case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN);
+ break;
+ case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN);
+ break;
+ case ISCSI_IFACE_PARAM_ERL:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL);
+ init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] &
+ ISCSIOPTS_ERL);
+ break;
+ case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->iscsi_max_pdu_size =
+ cpu_to_le32(*(uint32_t *)iface_param->value) /
+ BYTE_UNITS;
+ break;
+ case ISCSI_IFACE_PARAM_FIRST_BURST:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->iscsi_fburst_len =
+ cpu_to_le32(*(uint32_t *)iface_param->value) /
+ BYTE_UNITS;
+ break;
+ case ISCSI_IFACE_PARAM_MAX_R2T:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->iscsi_max_outstnd_r2t =
+ cpu_to_le16(*(uint16_t *)iface_param->value);
+ break;
+ case ISCSI_IFACE_PARAM_MAX_BURST:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->iscsi_max_burst_len =
+ cpu_to_le32(*(uint32_t *)iface_param->value) /
+ BYTE_UNITS;
+ break;
+ case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN);
+ break;
+ case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN);
+ break;
+ case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN);
+ break;
+ case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN);
+ break;
+ case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN);
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n",
+ iface_param->param);
+ break;
+ }
+}
+
static void
qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
{
@@ -1565,40 +2704,47 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
nla_for_each_attr(attr, data, len, rem) {
iface_param = nla_data(attr);
- if (iface_param->param_type != ISCSI_NET_PARAM)
- continue;
-
- switch (iface_param->iface_type) {
- case ISCSI_IFACE_TYPE_IPV4:
- switch (iface_param->iface_num) {
- case 0:
- qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
- break;
- default:
+ if (iface_param->param_type == ISCSI_NET_PARAM) {
+ switch (iface_param->iface_type) {
+ case ISCSI_IFACE_TYPE_IPV4:
+ switch (iface_param->iface_num) {
+ case 0:
+ qla4xxx_set_ipv4(ha, iface_param,
+ init_fw_cb);
+ break;
+ default:
/* Cannot have more than one IPv4 interface */
- ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
- "number = %d\n",
- iface_param->iface_num);
+ ql4_printk(KERN_ERR, ha,
+ "Invalid IPv4 iface number = %d\n",
+ iface_param->iface_num);
+ break;
+ }
break;
- }
- break;
- case ISCSI_IFACE_TYPE_IPV6:
- switch (iface_param->iface_num) {
- case 0:
- case 1:
- qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
+ case ISCSI_IFACE_TYPE_IPV6:
+ switch (iface_param->iface_num) {
+ case 0:
+ case 1:
+ qla4xxx_set_ipv6(ha, iface_param,
+ init_fw_cb);
+ break;
+ default:
+ /* Cannot have more than two IPv6 interface */
+ ql4_printk(KERN_ERR, ha,
+ "Invalid IPv6 iface number = %d\n",
+ iface_param->iface_num);
+ break;
+ }
break;
default:
- /* Cannot have more than two IPv6 interface */
- ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
- "number = %d\n",
- iface_param->iface_num);
+ ql4_printk(KERN_ERR, ha,
+ "Invalid iface type\n");
break;
}
- break;
- default:
- ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
- break;
+ } else if (iface_param->param_type == ISCSI_IFACE_PARAM) {
+ qla4xxx_set_iscsi_param(ha, iface_param,
+ init_fw_cb);
+ } else {
+ continue;
}
}
@@ -2538,6 +3684,7 @@ static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
unsigned long options = 0;
uint16_t ddb_link;
uint16_t disc_parent;
+ char ip_addr[DDB_IPADDR_LEN];
options = le16_to_cpu(fw_ddb_entry->options);
conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
@@ -2619,6 +3766,14 @@ static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS,
(char *)fw_ddb_entry->iscsi_alias, 0);
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE) {
+ memset(ip_addr, 0, sizeof(ip_addr));
+ sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr);
+ iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR,
+ (char *)ip_addr, 0);
+ }
}
static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
@@ -5030,64 +6185,6 @@ kset_free:
}
-/**
- * qla4xxx_create chap_list - Create CHAP list from FLASH
- * @ha: pointer to adapter structure
- *
- * Read flash and make a list of CHAP entries, during login when a CHAP entry
- * is received, it will be checked in this list. If entry exist then the CHAP
- * entry index is set in the DDB. If CHAP entry does not exist in this list
- * then a new entry is added in FLASH in CHAP table and the index obtained is
- * used in the DDB.
- **/
-static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
-{
- int rval = 0;
- uint8_t *chap_flash_data = NULL;
- uint32_t offset;
- dma_addr_t chap_dma;
- uint32_t chap_size = 0;
-
- if (is_qla40XX(ha))
- chap_size = MAX_CHAP_ENTRIES_40XX *
- sizeof(struct ql4_chap_table);
- else /* Single region contains CHAP info for both
- * ports which is divided into half for each port.
- */
- chap_size = ha->hw.flt_chap_size / 2;
-
- chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
- &chap_dma, GFP_KERNEL);
- if (!chap_flash_data) {
- ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
- return;
- }
- if (is_qla40XX(ha))
- offset = FLASH_CHAP_OFFSET;
- else {
- offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
- if (ha->port_num == 1)
- offset += chap_size;
- }
-
- rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
- if (rval != QLA_SUCCESS)
- goto exit_chap_list;
-
- if (ha->chap_list == NULL)
- ha->chap_list = vmalloc(chap_size);
- if (ha->chap_list == NULL) {
- ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
- goto exit_chap_list;
- }
-
- memcpy(ha->chap_list, chap_flash_data, chap_size);
-
-exit_chap_list:
- dma_free_coherent(&ha->pdev->dev, chap_size,
- chap_flash_data, chap_dma);
-}
-
static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
struct ql4_tuple_ddb *tddb)
{
@@ -7521,6 +8618,9 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
mutex_init(&ha->chap_sem);
init_completion(&ha->mbx_intr_comp);
init_completion(&ha->disable_acb_comp);
+ init_completion(&ha->idc_comp);
+ init_completion(&ha->link_up_comp);
+ init_completion(&ha->disable_acb_comp);
spin_lock_init(&ha->hardware_lock);
spin_lock_init(&ha->work_lock);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index f4fef72c9bcd..9b2946658683 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.04.00-k1"
+#define QLA4XXX_DRIVER_VERSION "5.04.00-k3"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index fe0bcb18fb26..d8afec8317cf 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -297,6 +297,7 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
cmd->device = dev;
INIT_LIST_HEAD(&cmd->list);
+ INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
spin_lock_irqsave(&dev->list_lock, flags);
list_add_tail(&cmd->list, &dev->cmd_list);
spin_unlock_irqrestore(&dev->list_lock, flags);
@@ -353,6 +354,8 @@ void scsi_put_command(struct scsi_cmnd *cmd)
list_del_init(&cmd->list);
spin_unlock_irqrestore(&cmd->device->list_lock, flags);
+ cancel_delayed_work(&cmd->abort_work);
+
__scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
}
EXPORT_SYMBOL(scsi_put_command);
@@ -742,15 +745,13 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
}
/**
- * scsi_done - Enqueue the finished SCSI command into the done queue.
+ * scsi_done - Invoke completion on finished SCSI command.
* @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
* ownership back to SCSI Core -- i.e. the LLDD has finished with it.
*
* Description: This function is the mid-level's (SCSI Core) interrupt routine,
* which regains ownership of the SCSI command (de facto) from a LLDD, and
- * enqueues the command to the done queue for further processing.
- *
- * This is the producer of the done queue who enqueues at the tail.
+ * calls blk_complete_request() for further processing.
*
* This function is interrupt context safe.
*/
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 80b8b10edf41..2decc6417518 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2873,13 +2873,13 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
return 0;
}
-static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf)
+static ssize_t delay_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
}
-static ssize_t sdebug_delay_store(struct device_driver * ddp,
- const char * buf, size_t count)
+static ssize_t delay_store(struct device_driver *ddp, const char *buf,
+ size_t count)
{
int delay;
char work[20];
@@ -2892,16 +2892,15 @@ static ssize_t sdebug_delay_store(struct device_driver * ddp,
}
return -EINVAL;
}
-DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show,
- sdebug_delay_store);
+static DRIVER_ATTR_RW(delay);
-static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf)
+static ssize_t opts_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
}
-static ssize_t sdebug_opts_store(struct device_driver * ddp,
- const char * buf, size_t count)
+static ssize_t opts_store(struct device_driver *ddp, const char *buf,
+ size_t count)
{
int opts;
char work[20];
@@ -2921,15 +2920,14 @@ opts_done:
scsi_debug_cmnd_count = 0;
return count;
}
-DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show,
- sdebug_opts_store);
+static DRIVER_ATTR_RW(opts);
-static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf)
+static ssize_t ptype_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
}
-static ssize_t sdebug_ptype_store(struct device_driver * ddp,
- const char * buf, size_t count)
+static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
+ size_t count)
{
int n;
@@ -2939,14 +2937,14 @@ static ssize_t sdebug_ptype_store(struct device_driver * ddp,
}
return -EINVAL;
}
-DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store);
+static DRIVER_ATTR_RW(ptype);
-static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf)
+static ssize_t dsense_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
}
-static ssize_t sdebug_dsense_store(struct device_driver * ddp,
- const char * buf, size_t count)
+static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
+ size_t count)
{
int n;
@@ -2956,15 +2954,14 @@ static ssize_t sdebug_dsense_store(struct device_driver * ddp,
}
return -EINVAL;
}
-DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
- sdebug_dsense_store);
+static DRIVER_ATTR_RW(dsense);
-static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
+static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
}
-static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
- const char * buf, size_t count)
+static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
+ size_t count)
{
int n;
@@ -2974,15 +2971,14 @@ static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
}
return -EINVAL;
}
-DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
- sdebug_fake_rw_store);
+static DRIVER_ATTR_RW(fake_rw);
-static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
+static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
}
-static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
- const char * buf, size_t count)
+static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
+ size_t count)
{
int n;
@@ -2992,15 +2988,14 @@ static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
}
return -EINVAL;
}
-DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show,
- sdebug_no_lun_0_store);
+static DRIVER_ATTR_RW(no_lun_0);
-static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf)
+static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
}
-static ssize_t sdebug_num_tgts_store(struct device_driver * ddp,
- const char * buf, size_t count)
+static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
+ size_t count)
{
int n;
@@ -3011,27 +3006,26 @@ static ssize_t sdebug_num_tgts_store(struct device_driver * ddp,
}
return -EINVAL;
}
-DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show,
- sdebug_num_tgts_store);
+static DRIVER_ATTR_RW(num_tgts);
-static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf)
+static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
}
-DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL);
+static DRIVER_ATTR_RO(dev_size_mb);
-static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf)
+static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
}
-DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL);
+static DRIVER_ATTR_RO(num_parts);
-static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf)
+static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
}
-static ssize_t sdebug_every_nth_store(struct device_driver * ddp,
- const char * buf, size_t count)
+static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
+ size_t count)
{
int nth;
@@ -3042,15 +3036,14 @@ static ssize_t sdebug_every_nth_store(struct device_driver * ddp,
}
return -EINVAL;
}
-DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show,
- sdebug_every_nth_store);
+static DRIVER_ATTR_RW(every_nth);
-static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf)
+static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
}
-static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
- const char * buf, size_t count)
+static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
+ size_t count)
{
int n;
@@ -3061,15 +3054,14 @@ static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
}
return -EINVAL;
}
-DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
- sdebug_max_luns_store);
+static DRIVER_ATTR_RW(max_luns);
-static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
+static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
}
-static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
- const char * buf, size_t count)
+static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
+ size_t count)
{
int n;
@@ -3080,27 +3072,26 @@ static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
}
return -EINVAL;
}
-DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
- sdebug_max_queue_store);
+static DRIVER_ATTR_RW(max_queue);
-static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
+static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
}
-DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
+static DRIVER_ATTR_RO(no_uld);
-static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
+static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
}
-DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL);
+static DRIVER_ATTR_RO(scsi_level);
-static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf)
+static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
}
-static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,
- const char * buf, size_t count)
+static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
+ size_t count)
{
int n;
@@ -3113,16 +3104,15 @@ static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,
}
return -EINVAL;
}
-DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show,
- sdebug_virtual_gb_store);
+static DRIVER_ATTR_RW(virtual_gb);
-static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf)
+static ssize_t add_host_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
}
-static ssize_t sdebug_add_host_store(struct device_driver * ddp,
- const char * buf, size_t count)
+static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
+ size_t count)
{
int delta_hosts;
@@ -3139,16 +3129,14 @@ static ssize_t sdebug_add_host_store(struct device_driver * ddp,
}
return count;
}
-DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
- sdebug_add_host_store);
+static DRIVER_ATTR_RW(add_host);
-static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
- char * buf)
+static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
}
-static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
- const char * buf, size_t count)
+static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
+ size_t count)
{
int n;
@@ -3158,40 +3146,39 @@ static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
}
return -EINVAL;
}
-DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
- sdebug_vpd_use_hostno_store);
+static DRIVER_ATTR_RW(vpd_use_hostno);
-static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
+static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
}
-DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
+static DRIVER_ATTR_RO(sector_size);
-static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf)
+static ssize_t dix_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
}
-DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL);
+static DRIVER_ATTR_RO(dix);
-static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf)
+static ssize_t dif_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
}
-DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
+static DRIVER_ATTR_RO(dif);
-static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
+static ssize_t guard_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
}
-DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
+static DRIVER_ATTR_RO(guard);
-static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
+static ssize_t ato_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
}
-DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
+static DRIVER_ATTR_RO(ato);
-static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
+static ssize_t map_show(struct device_driver *ddp, char *buf)
{
ssize_t count;
@@ -3206,15 +3193,14 @@ static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
return count;
}
-DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
+static DRIVER_ATTR_RO(map);
-static ssize_t sdebug_removable_show(struct device_driver *ddp,
- char *buf)
+static ssize_t removable_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
}
-static ssize_t sdebug_removable_store(struct device_driver *ddp,
- const char *buf, size_t count)
+static ssize_t removable_store(struct device_driver *ddp, const char *buf,
+ size_t count)
{
int n;
@@ -3224,74 +3210,43 @@ static ssize_t sdebug_removable_store(struct device_driver *ddp,
}
return -EINVAL;
}
-DRIVER_ATTR(removable, S_IRUGO | S_IWUSR, sdebug_removable_show,
- sdebug_removable_store);
+static DRIVER_ATTR_RW(removable);
-
-/* Note: The following function creates attribute files in the
+/* Note: The following array creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
files (over those found in the /sys/module/scsi_debug/parameters
directory) is that auxiliary actions can be triggered when an attribute
is changed. For example see: sdebug_add_host_store() above.
*/
-static int do_create_driverfs_files(void)
-{
- int ret;
- ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_removable);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
- ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map);
- return ret;
-}
-
-static void do_remove_driverfs_files(void)
-{
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_removable);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay);
- driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
-}
+static struct attribute *sdebug_drv_attrs[] = {
+ &driver_attr_delay.attr,
+ &driver_attr_opts.attr,
+ &driver_attr_ptype.attr,
+ &driver_attr_dsense.attr,
+ &driver_attr_fake_rw.attr,
+ &driver_attr_no_lun_0.attr,
+ &driver_attr_num_tgts.attr,
+ &driver_attr_dev_size_mb.attr,
+ &driver_attr_num_parts.attr,
+ &driver_attr_every_nth.attr,
+ &driver_attr_max_luns.attr,
+ &driver_attr_max_queue.attr,
+ &driver_attr_no_uld.attr,
+ &driver_attr_scsi_level.attr,
+ &driver_attr_virtual_gb.attr,
+ &driver_attr_add_host.attr,
+ &driver_attr_vpd_use_hostno.attr,
+ &driver_attr_sector_size.attr,
+ &driver_attr_dix.attr,
+ &driver_attr_dif.attr,
+ &driver_attr_guard.attr,
+ &driver_attr_ato.attr,
+ &driver_attr_map.attr,
+ &driver_attr_removable.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(sdebug_drv);
struct device *pseudo_primary;
@@ -3456,12 +3411,6 @@ static int __init scsi_debug_init(void)
ret);
goto bus_unreg;
}
- ret = do_create_driverfs_files();
- if (ret < 0) {
- printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
- ret);
- goto del_files;
- }
init_all_queued();
@@ -3482,9 +3431,6 @@ static int __init scsi_debug_init(void)
}
return 0;
-del_files:
- do_remove_driverfs_files();
- driver_unregister(&sdebug_driverfs_driver);
bus_unreg:
bus_unregister(&pseudo_lld_bus);
dev_unreg:
@@ -3506,7 +3452,6 @@ static void __exit scsi_debug_exit(void)
stop_all_queued();
for (; k; k--)
sdebug_remove_adapter();
- do_remove_driverfs_files();
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
@@ -4096,4 +4041,5 @@ static struct bus_type pseudo_lld_bus = {
.match = pseudo_lld_bus_match,
.probe = sdebug_driver_probe,
.remove = sdebug_driver_remove,
+ .drv_groups = sdebug_drv_groups,
};
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index e8bee9f0ad0f..78b004da2885 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -53,6 +53,8 @@ static void scsi_eh_done(struct scsi_cmnd *scmd);
#define HOST_RESET_SETTLE_TIME (10)
static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
+static int scsi_try_to_abort_cmd(struct scsi_host_template *,
+ struct scsi_cmnd *);
/* called with shost->host_lock held */
void scsi_eh_wakeup(struct Scsi_Host *shost)
@@ -89,17 +91,138 @@ EXPORT_SYMBOL_GPL(scsi_schedule_eh);
static int scsi_host_eh_past_deadline(struct Scsi_Host *shost)
{
- if (!shost->last_reset || !shost->eh_deadline)
+ if (!shost->last_reset || shost->eh_deadline == -1)
return 0;
- if (time_before(jiffies,
- shost->last_reset + shost->eh_deadline))
+ /*
+ * 32bit accesses are guaranteed to be atomic
+ * (on all supported architectures), so instead
+ * of using a spinlock we can as well double check
+ * if eh_deadline has been set to 'off' during the
+ * time_before call.
+ */
+ if (time_before(jiffies, shost->last_reset + shost->eh_deadline) &&
+ shost->eh_deadline > -1)
return 0;
return 1;
}
/**
+ * scmd_eh_abort_handler - Handle command aborts
+ * @work: command to be aborted.
+ */
+void
+scmd_eh_abort_handler(struct work_struct *work)
+{
+ struct scsi_cmnd *scmd =
+ container_of(work, struct scsi_cmnd, abort_work.work);
+ struct scsi_device *sdev = scmd->device;
+ int rtn;
+
+ if (scsi_host_eh_past_deadline(sdev->host)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "scmd %p eh timeout, not aborting\n",
+ scmd));
+ } else {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "aborting command %p\n", scmd));
+ rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
+ if (rtn == SUCCESS) {
+ scmd->result |= DID_TIME_OUT << 16;
+ if (scsi_host_eh_past_deadline(sdev->host)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "scmd %p eh timeout, "
+ "not retrying aborted "
+ "command\n", scmd));
+ } else if (!scsi_noretry_cmd(scmd) &&
+ (++scmd->retries <= scmd->allowed)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_WARNING, scmd,
+ "scmd %p retry "
+ "aborted command\n", scmd));
+ scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
+ return;
+ } else {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_WARNING, scmd,
+ "scmd %p finish "
+ "aborted command\n", scmd));
+ scsi_finish_command(scmd);
+ return;
+ }
+ } else {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "scmd %p abort failed, rtn %d\n",
+ scmd, rtn));
+ }
+ }
+
+ if (!scsi_eh_scmd_add(scmd, 0)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_WARNING, scmd,
+ "scmd %p terminate "
+ "aborted command\n", scmd));
+ scmd->result |= DID_TIME_OUT << 16;
+ scsi_finish_command(scmd);
+ }
+}
+
+/**
+ * scsi_abort_command - schedule a command abort
+ * @scmd: scmd to abort.
+ *
+ * We only need to abort commands after a command timeout
+ */
+static int
+scsi_abort_command(struct scsi_cmnd *scmd)
+{
+ struct scsi_device *sdev = scmd->device;
+ struct Scsi_Host *shost = sdev->host;
+ unsigned long flags;
+
+ if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
+ /*
+ * Retry after abort failed, escalate to next level.
+ */
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "scmd %p previous abort failed\n", scmd));
+ cancel_delayed_work(&scmd->abort_work);
+ return FAILED;
+ }
+
+ /*
+ * Do not try a command abort if
+ * SCSI EH has already started.
+ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_in_recovery(shost)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "scmd %p not aborting, host in recovery\n",
+ scmd));
+ return FAILED;
+ }
+
+ if (shost->eh_deadline != -1 && !shost->last_reset)
+ shost->last_reset = jiffies;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED;
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "scmd %p abort scheduled\n", scmd));
+ queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100);
+ return SUCCESS;
+}
+
+/**
* scsi_eh_scmd_add - add scsi cmd to error handling.
* @scmd: scmd to run eh on.
* @eh_flag: optional SCSI_EH flag.
@@ -121,10 +244,12 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
goto out_unlock;
- if (shost->eh_deadline && !shost->last_reset)
+ if (shost->eh_deadline != -1 && !shost->last_reset)
shost->last_reset = jiffies;
ret = 1;
+ if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED)
+ eh_flag &= ~SCSI_EH_CANCEL_CMD;
scmd->eh_eflags |= eh_flag;
list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
shost->host_failed++;
@@ -153,7 +278,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
trace_scsi_dispatch_cmd_timeout(scmd);
scsi_log_completion(scmd, TIMEOUT_ERROR);
- if (host->eh_deadline && !host->last_reset)
+ if (host->eh_deadline != -1 && !host->last_reset)
host->last_reset = jiffies;
if (host->transportt->eh_timed_out)
@@ -161,6 +286,10 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
else if (host->hostt->eh_timed_out)
rtn = host->hostt->eh_timed_out(scmd);
+ if (rtn == BLK_EH_NOT_HANDLED && !host->hostt->no_async_abort)
+ if (scsi_abort_command(scmd) == SUCCESS)
+ return BLK_EH_NOT_HANDLED;
+
scmd->result |= DID_TIME_OUT << 16;
if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
@@ -941,12 +1070,6 @@ retry:
scsi_eh_restore_cmnd(scmd, &ses);
- if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
- struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
- if (sdrv->eh_action)
- rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn);
- }
-
return rtn;
}
@@ -964,6 +1087,16 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
return scsi_send_eh_cmnd(scmd, NULL, 0, scmd->device->eh_timeout, ~0);
}
+static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
+{
+ if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
+ struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
+ if (sdrv->eh_action)
+ rtn = sdrv->eh_action(scmd, rtn);
+ }
+ return rtn;
+}
+
/**
* scsi_eh_finish_cmd - Handle a cmd that eh is finished with.
* @scmd: Original SCSI cmd that eh has finished.
@@ -1010,7 +1143,6 @@ int scsi_eh_get_sense(struct list_head *work_q,
struct scsi_cmnd *scmd, *next;
struct Scsi_Host *shost;
int rtn;
- unsigned long flags;
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
@@ -1018,16 +1150,13 @@ int scsi_eh_get_sense(struct list_head *work_q,
continue;
shost = scmd->device->host;
- spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_eh_past_deadline(shost)) {
- spin_unlock_irqrestore(shost->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"skip %s, past eh deadline\n",
__func__));
break;
}
- spin_unlock_irqrestore(shost->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
"%s: requesting sense\n",
current->comm));
@@ -1113,26 +1242,21 @@ static int scsi_eh_test_devices(struct list_head *cmd_list,
struct scsi_cmnd *scmd, *next;
struct scsi_device *sdev;
int finish_cmds;
- unsigned long flags;
while (!list_empty(cmd_list)) {
scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
sdev = scmd->device;
if (!try_stu) {
- spin_lock_irqsave(sdev->host->host_lock, flags);
if (scsi_host_eh_past_deadline(sdev->host)) {
/* Push items back onto work_q */
list_splice_init(cmd_list, work_q);
- spin_unlock_irqrestore(sdev->host->host_lock,
- flags);
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, sdev->host,
"skip %s, past eh deadline",
__func__));
break;
}
- spin_unlock_irqrestore(sdev->host->host_lock, flags);
}
finish_cmds = !scsi_device_online(scmd->device) ||
@@ -1142,7 +1266,9 @@ static int scsi_eh_test_devices(struct list_head *cmd_list,
list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
if (scmd->device == sdev) {
- if (finish_cmds)
+ if (finish_cmds &&
+ (try_stu ||
+ scsi_eh_action(scmd, SUCCESS) == SUCCESS))
scsi_eh_finish_cmd(scmd, done_q);
else
list_move_tail(&scmd->eh_entry, work_q);
@@ -1171,15 +1297,12 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
LIST_HEAD(check_list);
int rtn;
struct Scsi_Host *shost;
- unsigned long flags;
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD))
continue;
shost = scmd->device->host;
- spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_eh_past_deadline(shost)) {
- spin_unlock_irqrestore(shost->host_lock, flags);
list_splice_init(&check_list, work_q);
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
@@ -1187,7 +1310,6 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
__func__));
return list_empty(work_q);
}
- spin_unlock_irqrestore(shost->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:"
"0x%p\n", current->comm,
scmd));
@@ -1251,19 +1373,15 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
{
struct scsi_cmnd *scmd, *stu_scmd, *next;
struct scsi_device *sdev;
- unsigned long flags;
shost_for_each_device(sdev, shost) {
- spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_eh_past_deadline(shost)) {
- spin_unlock_irqrestore(shost->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"skip %s, past eh deadline\n",
__func__));
break;
}
- spin_unlock_irqrestore(shost->host_lock, flags);
stu_scmd = NULL;
list_for_each_entry(scmd, work_q, eh_entry)
if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
@@ -1283,7 +1401,8 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
!scsi_eh_tur(stu_scmd)) {
list_for_each_entry_safe(scmd, next,
work_q, eh_entry) {
- if (scmd->device == sdev)
+ if (scmd->device == sdev &&
+ scsi_eh_action(scmd, SUCCESS) == SUCCESS)
scsi_eh_finish_cmd(scmd, done_q);
}
}
@@ -1316,20 +1435,16 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
{
struct scsi_cmnd *scmd, *bdr_scmd, *next;
struct scsi_device *sdev;
- unsigned long flags;
int rtn;
shost_for_each_device(sdev, shost) {
- spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_eh_past_deadline(shost)) {
- spin_unlock_irqrestore(shost->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"skip %s, past eh deadline\n",
__func__));
break;
}
- spin_unlock_irqrestore(shost->host_lock, flags);
bdr_scmd = NULL;
list_for_each_entry(scmd, work_q, eh_entry)
if (scmd->device == sdev) {
@@ -1350,7 +1465,8 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
!scsi_eh_tur(bdr_scmd)) {
list_for_each_entry_safe(scmd, next,
work_q, eh_entry) {
- if (scmd->device == sdev)
+ if (scmd->device == sdev &&
+ scsi_eh_action(scmd, rtn) != FAILED)
scsi_eh_finish_cmd(scmd,
done_q);
}
@@ -1389,11 +1505,8 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
struct scsi_cmnd *next, *scmd;
int rtn;
unsigned int id;
- unsigned long flags;
- spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_eh_past_deadline(shost)) {
- spin_unlock_irqrestore(shost->host_lock, flags);
/* push back on work queue for further processing */
list_splice_init(&check_list, work_q);
list_splice_init(&tmp_list, work_q);
@@ -1403,7 +1516,6 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
__func__));
return list_empty(work_q);
}
- spin_unlock_irqrestore(shost->host_lock, flags);
scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);
id = scmd_id(scmd);
@@ -1448,7 +1560,6 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
LIST_HEAD(check_list);
unsigned int channel;
int rtn;
- unsigned long flags;
/*
* we really want to loop over the various channels, and do this on
@@ -1458,9 +1569,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
*/
for (channel = 0; channel <= shost->max_channel; channel++) {
- spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_eh_past_deadline(shost)) {
- spin_unlock_irqrestore(shost->host_lock, flags);
list_splice_init(&check_list, work_q);
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
@@ -1468,7 +1577,6 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
__func__));
return list_empty(work_q);
}
- spin_unlock_irqrestore(shost->host_lock, flags);
chan_scmd = NULL;
list_for_each_entry(scmd, work_q, eh_entry) {
@@ -1569,7 +1677,7 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
}
/**
- * scsi_noretry_cmd - determinte if command should be failed fast
+ * scsi_noretry_cmd - determine if command should be failed fast
* @scmd: SCSI cmd to examine.
*/
int scsi_noretry_cmd(struct scsi_cmnd *scmd)
@@ -1577,6 +1685,8 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd)
switch (host_byte(scmd->result)) {
case DID_OK:
break;
+ case DID_TIME_OUT:
+ goto check_type;
case DID_BUS_BUSY:
return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT);
case DID_PARITY:
@@ -1590,18 +1700,19 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd)
return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
}
- switch (status_byte(scmd->result)) {
- case CHECK_CONDITION:
- /*
- * assume caller has checked sense and determinted
- * the check condition was retryable.
- */
- if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
- scmd->request->cmd_type == REQ_TYPE_BLOCK_PC)
- return 1;
- }
+ if (status_byte(scmd->result) != CHECK_CONDITION)
+ return 0;
- return 0;
+check_type:
+ /*
+ * assume caller has checked sense and determined
+ * the check condition was retryable.
+ */
+ if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
+ scmd->request->cmd_type == REQ_TYPE_BLOCK_PC)
+ return 1;
+ else
+ return 0;
}
/**
@@ -1651,9 +1762,13 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
* looks good. drop through, and check the next byte.
*/
break;
+ case DID_ABORT:
+ if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
+ scmd->result |= DID_TIME_OUT << 16;
+ return SUCCESS;
+ }
case DID_NO_CONNECT:
case DID_BAD_TARGET:
- case DID_ABORT:
/*
* note - this means that we just report the status back
* to the top level driver, not that we actually think
@@ -1999,7 +2114,7 @@ static void scsi_unjam_host(struct Scsi_Host *shost)
scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
spin_lock_irqsave(shost->host_lock, flags);
- if (shost->eh_deadline)
+ if (shost->eh_deadline != -1)
shost->last_reset = 0;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_eh_flush_done_q(&eh_done_q);
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index af4c050ce6e4..001e9ceda4c3 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -16,6 +16,8 @@
#include "scsi_priv.h"
+#ifdef CONFIG_PM_SLEEP
+
static int scsi_dev_type_suspend(struct device *dev, int (*cb)(struct device *))
{
int err;
@@ -43,8 +45,6 @@ static int scsi_dev_type_resume(struct device *dev, int (*cb)(struct device *))
return err;
}
-#ifdef CONFIG_PM_SLEEP
-
static int
scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *))
{
@@ -145,38 +145,22 @@ static int scsi_bus_restore(struct device *dev)
#ifdef CONFIG_PM_RUNTIME
-static int sdev_blk_runtime_suspend(struct scsi_device *sdev,
- int (*cb)(struct device *))
+static int sdev_runtime_suspend(struct device *dev)
{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ struct scsi_device *sdev = to_scsi_device(dev);
int err;
err = blk_pre_runtime_suspend(sdev->request_queue);
if (err)
return err;
- if (cb)
- err = cb(&sdev->sdev_gendev);
+ if (pm && pm->runtime_suspend)
+ err = pm->runtime_suspend(dev);
blk_post_runtime_suspend(sdev->request_queue, err);
return err;
}
-static int sdev_runtime_suspend(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int (*cb)(struct device *) = pm ? pm->runtime_suspend : NULL;
- struct scsi_device *sdev = to_scsi_device(dev);
- int err;
-
- if (sdev->request_queue->dev)
- return sdev_blk_runtime_suspend(sdev, cb);
-
- err = scsi_dev_type_suspend(dev, cb);
- if (err == -EAGAIN)
- pm_schedule_suspend(dev, jiffies_to_msecs(
- round_jiffies_up_relative(HZ/10)));
- return err;
-}
-
static int scsi_runtime_suspend(struct device *dev)
{
int err = 0;
@@ -190,31 +174,20 @@ static int scsi_runtime_suspend(struct device *dev)
return err;
}
-static int sdev_blk_runtime_resume(struct scsi_device *sdev,
- int (*cb)(struct device *))
+static int sdev_runtime_resume(struct device *dev)
{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int err = 0;
blk_pre_runtime_resume(sdev->request_queue);
- if (cb)
- err = cb(&sdev->sdev_gendev);
+ if (pm && pm->runtime_resume)
+ err = pm->runtime_resume(dev);
blk_post_runtime_resume(sdev->request_queue, err);
return err;
}
-static int sdev_runtime_resume(struct device *dev)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int (*cb)(struct device *) = pm ? pm->runtime_resume : NULL;
-
- if (sdev->request_queue->dev)
- return sdev_blk_runtime_resume(sdev, cb);
- else
- return scsi_dev_type_resume(dev, cb);
-}
-
static int scsi_runtime_resume(struct device *dev)
{
int err = 0;
@@ -235,14 +208,11 @@ static int scsi_runtime_idle(struct device *dev)
/* Insert hooks here for targets, hosts, and transport classes */
if (scsi_is_sdev_device(dev)) {
- struct scsi_device *sdev = to_scsi_device(dev);
-
- if (sdev->request_queue->dev) {
- pm_runtime_mark_last_busy(dev);
- pm_runtime_autosuspend(dev);
- return -EBUSY;
- }
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_autosuspend(dev);
+ return -EBUSY;
}
+
return 0;
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 8f9a0cadc296..f079a598bed4 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -19,6 +19,7 @@ struct scsi_nl_hdr;
* Scsi Error Handler Flags
*/
#define SCSI_EH_CANCEL_CMD 0x0001 /* Cancel this cmd */
+#define SCSI_EH_ABORT_SCHEDULED 0x0002 /* Abort has been scheduled */
#define SCSI_SENSE_VALID(scmd) \
(((scmd)->sense_buffer[0] & 0x70) == 0x70)
@@ -66,6 +67,7 @@ extern int __init scsi_init_devinfo(void);
extern void scsi_exit_devinfo(void);
/* scsi_error.c */
+extern void scmd_eh_abort_handler(struct work_struct *work);
extern enum blk_eh_timer_return scsi_times_out(struct request *req);
extern int scsi_error_handler(void *host);
extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 8ff62c26a41c..9117d0bf408e 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -287,7 +287,9 @@ show_shost_eh_deadline(struct device *dev,
{
struct Scsi_Host *shost = class_to_shost(dev);
- return sprintf(buf, "%d\n", shost->eh_deadline / HZ);
+ if (shost->eh_deadline == -1)
+ return snprintf(buf, strlen("off") + 2, "off\n");
+ return sprintf(buf, "%u\n", shost->eh_deadline / HZ);
}
static ssize_t
@@ -296,22 +298,34 @@ store_shost_eh_deadline(struct device *dev, struct device_attribute *attr,
{
struct Scsi_Host *shost = class_to_shost(dev);
int ret = -EINVAL;
- int deadline;
- unsigned long flags;
+ unsigned long deadline, flags;
if (shost->transportt && shost->transportt->eh_strategy_handler)
return ret;
- if (sscanf(buf, "%d\n", &deadline) == 1) {
- spin_lock_irqsave(shost->host_lock, flags);
- if (scsi_host_in_recovery(shost))
- ret = -EBUSY;
- else {
+ if (!strncmp(buf, "off", strlen("off")))
+ deadline = -1;
+ else {
+ ret = kstrtoul(buf, 10, &deadline);
+ if (ret)
+ return ret;
+ if (deadline * HZ > UINT_MAX)
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_in_recovery(shost))
+ ret = -EBUSY;
+ else {
+ if (deadline == -1)
+ shost->eh_deadline = -1;
+ else
shost->eh_deadline = deadline * HZ;
- ret = count;
- }
- spin_unlock_irqrestore(shost->host_lock, flags);
+
+ ret = count;
}
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
return ret;
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index de5b4d9bb022..0102a2d70dd8 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -305,20 +305,71 @@ show_##type##_##name(struct device *dev, struct device_attribute *attr, \
iscsi_iface_attr_show(type, name, ISCSI_NET_PARAM, param) \
static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL);
-/* generic read only ipvi4 attribute */
+#define iscsi_iface_attr(type, name, param) \
+ iscsi_iface_attr_show(type, name, ISCSI_IFACE_PARAM, param) \
+static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL);
+
+/* generic read only ipv4 attribute */
iscsi_iface_net_attr(ipv4_iface, ipaddress, ISCSI_NET_PARAM_IPV4_ADDR);
iscsi_iface_net_attr(ipv4_iface, gateway, ISCSI_NET_PARAM_IPV4_GW);
iscsi_iface_net_attr(ipv4_iface, subnet, ISCSI_NET_PARAM_IPV4_SUBNET);
iscsi_iface_net_attr(ipv4_iface, bootproto, ISCSI_NET_PARAM_IPV4_BOOTPROTO);
+iscsi_iface_net_attr(ipv4_iface, dhcp_dns_address_en,
+ ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN);
+iscsi_iface_net_attr(ipv4_iface, dhcp_slp_da_info_en,
+ ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN);
+iscsi_iface_net_attr(ipv4_iface, tos_en, ISCSI_NET_PARAM_IPV4_TOS_EN);
+iscsi_iface_net_attr(ipv4_iface, tos, ISCSI_NET_PARAM_IPV4_TOS);
+iscsi_iface_net_attr(ipv4_iface, grat_arp_en,
+ ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN);
+iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id_en,
+ ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN);
+iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id,
+ ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID);
+iscsi_iface_net_attr(ipv4_iface, dhcp_req_vendor_id_en,
+ ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN);
+iscsi_iface_net_attr(ipv4_iface, dhcp_use_vendor_id_en,
+ ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN);
+iscsi_iface_net_attr(ipv4_iface, dhcp_vendor_id,
+ ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID);
+iscsi_iface_net_attr(ipv4_iface, dhcp_learn_iqn_en,
+ ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN);
+iscsi_iface_net_attr(ipv4_iface, fragment_disable,
+ ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE);
+iscsi_iface_net_attr(ipv4_iface, incoming_forwarding_en,
+ ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN);
+iscsi_iface_net_attr(ipv4_iface, ttl, ISCSI_NET_PARAM_IPV4_TTL);
/* generic read only ipv6 attribute */
iscsi_iface_net_attr(ipv6_iface, ipaddress, ISCSI_NET_PARAM_IPV6_ADDR);
-iscsi_iface_net_attr(ipv6_iface, link_local_addr, ISCSI_NET_PARAM_IPV6_LINKLOCAL);
+iscsi_iface_net_attr(ipv6_iface, link_local_addr,
+ ISCSI_NET_PARAM_IPV6_LINKLOCAL);
iscsi_iface_net_attr(ipv6_iface, router_addr, ISCSI_NET_PARAM_IPV6_ROUTER);
iscsi_iface_net_attr(ipv6_iface, ipaddr_autocfg,
ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG);
iscsi_iface_net_attr(ipv6_iface, link_local_autocfg,
ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG);
+iscsi_iface_net_attr(ipv6_iface, link_local_state,
+ ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE);
+iscsi_iface_net_attr(ipv6_iface, router_state,
+ ISCSI_NET_PARAM_IPV6_ROUTER_STATE);
+iscsi_iface_net_attr(ipv6_iface, grat_neighbor_adv_en,
+ ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN);
+iscsi_iface_net_attr(ipv6_iface, mld_en, ISCSI_NET_PARAM_IPV6_MLD_EN);
+iscsi_iface_net_attr(ipv6_iface, flow_label, ISCSI_NET_PARAM_IPV6_FLOW_LABEL);
+iscsi_iface_net_attr(ipv6_iface, traffic_class,
+ ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS);
+iscsi_iface_net_attr(ipv6_iface, hop_limit, ISCSI_NET_PARAM_IPV6_HOP_LIMIT);
+iscsi_iface_net_attr(ipv6_iface, nd_reachable_tmo,
+ ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO);
+iscsi_iface_net_attr(ipv6_iface, nd_rexmit_time,
+ ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME);
+iscsi_iface_net_attr(ipv6_iface, nd_stale_tmo,
+ ISCSI_NET_PARAM_IPV6_ND_STALE_TMO);
+iscsi_iface_net_attr(ipv6_iface, dup_addr_detect_cnt,
+ ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT);
+iscsi_iface_net_attr(ipv6_iface, router_adv_link_mtu,
+ ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU);
/* common read only iface attribute */
iscsi_iface_net_attr(iface, enabled, ISCSI_NET_PARAM_IFACE_ENABLE);
@@ -327,6 +378,40 @@ iscsi_iface_net_attr(iface, vlan_priority, ISCSI_NET_PARAM_VLAN_PRIORITY);
iscsi_iface_net_attr(iface, vlan_enabled, ISCSI_NET_PARAM_VLAN_ENABLED);
iscsi_iface_net_attr(iface, mtu, ISCSI_NET_PARAM_MTU);
iscsi_iface_net_attr(iface, port, ISCSI_NET_PARAM_PORT);
+iscsi_iface_net_attr(iface, ipaddress_state, ISCSI_NET_PARAM_IPADDR_STATE);
+iscsi_iface_net_attr(iface, delayed_ack_en, ISCSI_NET_PARAM_DELAYED_ACK_EN);
+iscsi_iface_net_attr(iface, tcp_nagle_disable,
+ ISCSI_NET_PARAM_TCP_NAGLE_DISABLE);
+iscsi_iface_net_attr(iface, tcp_wsf_disable, ISCSI_NET_PARAM_TCP_WSF_DISABLE);
+iscsi_iface_net_attr(iface, tcp_wsf, ISCSI_NET_PARAM_TCP_WSF);
+iscsi_iface_net_attr(iface, tcp_timer_scale, ISCSI_NET_PARAM_TCP_TIMER_SCALE);
+iscsi_iface_net_attr(iface, tcp_timestamp_en, ISCSI_NET_PARAM_TCP_TIMESTAMP_EN);
+iscsi_iface_net_attr(iface, cache_id, ISCSI_NET_PARAM_CACHE_ID);
+iscsi_iface_net_attr(iface, redirect_en, ISCSI_NET_PARAM_REDIRECT_EN);
+
+/* common iscsi specific settings attributes */
+iscsi_iface_attr(iface, def_taskmgmt_tmo, ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO);
+iscsi_iface_attr(iface, header_digest, ISCSI_IFACE_PARAM_HDRDGST_EN);
+iscsi_iface_attr(iface, data_digest, ISCSI_IFACE_PARAM_DATADGST_EN);
+iscsi_iface_attr(iface, immediate_data, ISCSI_IFACE_PARAM_IMM_DATA_EN);
+iscsi_iface_attr(iface, initial_r2t, ISCSI_IFACE_PARAM_INITIAL_R2T_EN);
+iscsi_iface_attr(iface, data_seq_in_order,
+ ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN);
+iscsi_iface_attr(iface, data_pdu_in_order, ISCSI_IFACE_PARAM_PDU_INORDER_EN);
+iscsi_iface_attr(iface, erl, ISCSI_IFACE_PARAM_ERL);
+iscsi_iface_attr(iface, max_recv_dlength, ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH);
+iscsi_iface_attr(iface, first_burst_len, ISCSI_IFACE_PARAM_FIRST_BURST);
+iscsi_iface_attr(iface, max_outstanding_r2t, ISCSI_IFACE_PARAM_MAX_R2T);
+iscsi_iface_attr(iface, max_burst_len, ISCSI_IFACE_PARAM_MAX_BURST);
+iscsi_iface_attr(iface, chap_auth, ISCSI_IFACE_PARAM_CHAP_AUTH_EN);
+iscsi_iface_attr(iface, bidi_chap, ISCSI_IFACE_PARAM_BIDI_CHAP_EN);
+iscsi_iface_attr(iface, discovery_auth_optional,
+ ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL);
+iscsi_iface_attr(iface, discovery_logout,
+ ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN);
+iscsi_iface_attr(iface, strict_login_comp_en,
+ ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN);
+iscsi_iface_attr(iface, initiator_name, ISCSI_IFACE_PARAM_INITIATOR_NAME);
static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
@@ -335,6 +420,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
struct iscsi_iface *iface = iscsi_dev_to_iface(dev);
struct iscsi_transport *t = iface->transport;
int param;
+ int param_type;
if (attr == &dev_attr_iface_enabled.attr)
param = ISCSI_NET_PARAM_IFACE_ENABLE;
@@ -348,6 +434,60 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
param = ISCSI_NET_PARAM_MTU;
else if (attr == &dev_attr_iface_port.attr)
param = ISCSI_NET_PARAM_PORT;
+ else if (attr == &dev_attr_iface_ipaddress_state.attr)
+ param = ISCSI_NET_PARAM_IPADDR_STATE;
+ else if (attr == &dev_attr_iface_delayed_ack_en.attr)
+ param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
+ else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
+ param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
+ else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
+ param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
+ else if (attr == &dev_attr_iface_tcp_wsf.attr)
+ param = ISCSI_NET_PARAM_TCP_WSF;
+ else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
+ param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
+ else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
+ param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
+ else if (attr == &dev_attr_iface_cache_id.attr)
+ param = ISCSI_NET_PARAM_CACHE_ID;
+ else if (attr == &dev_attr_iface_redirect_en.attr)
+ param = ISCSI_NET_PARAM_REDIRECT_EN;
+ else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
+ param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
+ else if (attr == &dev_attr_iface_header_digest.attr)
+ param = ISCSI_IFACE_PARAM_HDRDGST_EN;
+ else if (attr == &dev_attr_iface_data_digest.attr)
+ param = ISCSI_IFACE_PARAM_DATADGST_EN;
+ else if (attr == &dev_attr_iface_immediate_data.attr)
+ param = ISCSI_IFACE_PARAM_IMM_DATA_EN;
+ else if (attr == &dev_attr_iface_initial_r2t.attr)
+ param = ISCSI_IFACE_PARAM_INITIAL_R2T_EN;
+ else if (attr == &dev_attr_iface_data_seq_in_order.attr)
+ param = ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN;
+ else if (attr == &dev_attr_iface_data_pdu_in_order.attr)
+ param = ISCSI_IFACE_PARAM_PDU_INORDER_EN;
+ else if (attr == &dev_attr_iface_erl.attr)
+ param = ISCSI_IFACE_PARAM_ERL;
+ else if (attr == &dev_attr_iface_max_recv_dlength.attr)
+ param = ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH;
+ else if (attr == &dev_attr_iface_first_burst_len.attr)
+ param = ISCSI_IFACE_PARAM_FIRST_BURST;
+ else if (attr == &dev_attr_iface_max_outstanding_r2t.attr)
+ param = ISCSI_IFACE_PARAM_MAX_R2T;
+ else if (attr == &dev_attr_iface_max_burst_len.attr)
+ param = ISCSI_IFACE_PARAM_MAX_BURST;
+ else if (attr == &dev_attr_iface_chap_auth.attr)
+ param = ISCSI_IFACE_PARAM_CHAP_AUTH_EN;
+ else if (attr == &dev_attr_iface_bidi_chap.attr)
+ param = ISCSI_IFACE_PARAM_BIDI_CHAP_EN;
+ else if (attr == &dev_attr_iface_discovery_auth_optional.attr)
+ param = ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL;
+ else if (attr == &dev_attr_iface_discovery_logout.attr)
+ param = ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN;
+ else if (attr == &dev_attr_iface_strict_login_comp_en.attr)
+ param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN;
+ else if (attr == &dev_attr_iface_initiator_name.attr)
+ param = ISCSI_IFACE_PARAM_INITIATOR_NAME;
else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
if (attr == &dev_attr_ipv4_iface_ipaddress.attr)
param = ISCSI_NET_PARAM_IPV4_ADDR;
@@ -357,6 +497,42 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
param = ISCSI_NET_PARAM_IPV4_SUBNET;
else if (attr == &dev_attr_ipv4_iface_bootproto.attr)
param = ISCSI_NET_PARAM_IPV4_BOOTPROTO;
+ else if (attr ==
+ &dev_attr_ipv4_iface_dhcp_dns_address_en.attr)
+ param = ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN;
+ else if (attr ==
+ &dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr)
+ param = ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN;
+ else if (attr == &dev_attr_ipv4_iface_tos_en.attr)
+ param = ISCSI_NET_PARAM_IPV4_TOS_EN;
+ else if (attr == &dev_attr_ipv4_iface_tos.attr)
+ param = ISCSI_NET_PARAM_IPV4_TOS;
+ else if (attr == &dev_attr_ipv4_iface_grat_arp_en.attr)
+ param = ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN;
+ else if (attr ==
+ &dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr)
+ param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN;
+ else if (attr == &dev_attr_ipv4_iface_dhcp_alt_client_id.attr)
+ param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID;
+ else if (attr ==
+ &dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr)
+ param = ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN;
+ else if (attr ==
+ &dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr)
+ param = ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN;
+ else if (attr == &dev_attr_ipv4_iface_dhcp_vendor_id.attr)
+ param = ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID;
+ else if (attr ==
+ &dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr)
+ param = ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN;
+ else if (attr ==
+ &dev_attr_ipv4_iface_fragment_disable.attr)
+ param = ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE;
+ else if (attr ==
+ &dev_attr_ipv4_iface_incoming_forwarding_en.attr)
+ param = ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN;
+ else if (attr == &dev_attr_ipv4_iface_ttl.attr)
+ param = ISCSI_NET_PARAM_IPV4_TTL;
else
return 0;
} else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) {
@@ -370,6 +546,31 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
param = ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG;
else if (attr == &dev_attr_ipv6_iface_link_local_autocfg.attr)
param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG;
+ else if (attr == &dev_attr_ipv6_iface_link_local_state.attr)
+ param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE;
+ else if (attr == &dev_attr_ipv6_iface_router_state.attr)
+ param = ISCSI_NET_PARAM_IPV6_ROUTER_STATE;
+ else if (attr ==
+ &dev_attr_ipv6_iface_grat_neighbor_adv_en.attr)
+ param = ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN;
+ else if (attr == &dev_attr_ipv6_iface_mld_en.attr)
+ param = ISCSI_NET_PARAM_IPV6_MLD_EN;
+ else if (attr == &dev_attr_ipv6_iface_flow_label.attr)
+ param = ISCSI_NET_PARAM_IPV6_FLOW_LABEL;
+ else if (attr == &dev_attr_ipv6_iface_traffic_class.attr)
+ param = ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS;
+ else if (attr == &dev_attr_ipv6_iface_hop_limit.attr)
+ param = ISCSI_NET_PARAM_IPV6_HOP_LIMIT;
+ else if (attr == &dev_attr_ipv6_iface_nd_reachable_tmo.attr)
+ param = ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO;
+ else if (attr == &dev_attr_ipv6_iface_nd_rexmit_time.attr)
+ param = ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME;
+ else if (attr == &dev_attr_ipv6_iface_nd_stale_tmo.attr)
+ param = ISCSI_NET_PARAM_IPV6_ND_STALE_TMO;
+ else if (attr == &dev_attr_ipv6_iface_dup_addr_detect_cnt.attr)
+ param = ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT;
+ else if (attr == &dev_attr_ipv6_iface_router_adv_link_mtu.attr)
+ param = ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU;
else
return 0;
} else {
@@ -377,7 +578,32 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
return 0;
}
- return t->attr_is_visible(ISCSI_NET_PARAM, param);
+ switch (param) {
+ case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
+ case ISCSI_IFACE_PARAM_HDRDGST_EN:
+ case ISCSI_IFACE_PARAM_DATADGST_EN:
+ case ISCSI_IFACE_PARAM_IMM_DATA_EN:
+ case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
+ case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
+ case ISCSI_IFACE_PARAM_ERL:
+ case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_IFACE_PARAM_FIRST_BURST:
+ case ISCSI_IFACE_PARAM_MAX_R2T:
+ case ISCSI_IFACE_PARAM_MAX_BURST:
+ case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
+ case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
+ case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
+ case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
+ case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
+ case ISCSI_IFACE_PARAM_INITIATOR_NAME:
+ param_type = ISCSI_IFACE_PARAM;
+ break;
+ default:
+ param_type = ISCSI_NET_PARAM;
+ }
+
+ return t->attr_is_visible(param_type, param);
}
static struct attribute *iscsi_iface_attrs[] = {
@@ -396,6 +622,59 @@ static struct attribute *iscsi_iface_attrs[] = {
&dev_attr_ipv6_iface_link_local_autocfg.attr,
&dev_attr_iface_mtu.attr,
&dev_attr_iface_port.attr,
+ &dev_attr_iface_ipaddress_state.attr,
+ &dev_attr_iface_delayed_ack_en.attr,
+ &dev_attr_iface_tcp_nagle_disable.attr,
+ &dev_attr_iface_tcp_wsf_disable.attr,
+ &dev_attr_iface_tcp_wsf.attr,
+ &dev_attr_iface_tcp_timer_scale.attr,
+ &dev_attr_iface_tcp_timestamp_en.attr,
+ &dev_attr_iface_cache_id.attr,
+ &dev_attr_iface_redirect_en.attr,
+ &dev_attr_iface_def_taskmgmt_tmo.attr,
+ &dev_attr_iface_header_digest.attr,
+ &dev_attr_iface_data_digest.attr,
+ &dev_attr_iface_immediate_data.attr,
+ &dev_attr_iface_initial_r2t.attr,
+ &dev_attr_iface_data_seq_in_order.attr,
+ &dev_attr_iface_data_pdu_in_order.attr,
+ &dev_attr_iface_erl.attr,
+ &dev_attr_iface_max_recv_dlength.attr,
+ &dev_attr_iface_first_burst_len.attr,
+ &dev_attr_iface_max_outstanding_r2t.attr,
+ &dev_attr_iface_max_burst_len.attr,
+ &dev_attr_iface_chap_auth.attr,
+ &dev_attr_iface_bidi_chap.attr,
+ &dev_attr_iface_discovery_auth_optional.attr,
+ &dev_attr_iface_discovery_logout.attr,
+ &dev_attr_iface_strict_login_comp_en.attr,
+ &dev_attr_iface_initiator_name.attr,
+ &dev_attr_ipv4_iface_dhcp_dns_address_en.attr,
+ &dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr,
+ &dev_attr_ipv4_iface_tos_en.attr,
+ &dev_attr_ipv4_iface_tos.attr,
+ &dev_attr_ipv4_iface_grat_arp_en.attr,
+ &dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr,
+ &dev_attr_ipv4_iface_dhcp_alt_client_id.attr,
+ &dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr,
+ &dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr,
+ &dev_attr_ipv4_iface_dhcp_vendor_id.attr,
+ &dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr,
+ &dev_attr_ipv4_iface_fragment_disable.attr,
+ &dev_attr_ipv4_iface_incoming_forwarding_en.attr,
+ &dev_attr_ipv4_iface_ttl.attr,
+ &dev_attr_ipv6_iface_link_local_state.attr,
+ &dev_attr_ipv6_iface_router_state.attr,
+ &dev_attr_ipv6_iface_grat_neighbor_adv_en.attr,
+ &dev_attr_ipv6_iface_mld_en.attr,
+ &dev_attr_ipv6_iface_flow_label.attr,
+ &dev_attr_ipv6_iface_traffic_class.attr,
+ &dev_attr_ipv6_iface_hop_limit.attr,
+ &dev_attr_ipv6_iface_nd_reachable_tmo.attr,
+ &dev_attr_ipv6_iface_nd_rexmit_time.attr,
+ &dev_attr_ipv6_iface_nd_stale_tmo.attr,
+ &dev_attr_ipv6_iface_dup_addr_detect_cnt.attr,
+ &dev_attr_ipv6_iface_router_adv_link_mtu.attr,
NULL,
};
@@ -404,6 +683,61 @@ static struct attribute_group iscsi_iface_group = {
.is_visible = iscsi_iface_attr_is_visible,
};
+/* convert iscsi_ipaddress_state values to ascii string name */
+static const struct {
+ enum iscsi_ipaddress_state value;
+ char *name;
+} iscsi_ipaddress_state_names[] = {
+ {ISCSI_IPDDRESS_STATE_UNCONFIGURED, "Unconfigured" },
+ {ISCSI_IPDDRESS_STATE_ACQUIRING, "Acquiring" },
+ {ISCSI_IPDDRESS_STATE_TENTATIVE, "Tentative" },
+ {ISCSI_IPDDRESS_STATE_VALID, "Valid" },
+ {ISCSI_IPDDRESS_STATE_DISABLING, "Disabling" },
+ {ISCSI_IPDDRESS_STATE_INVALID, "Invalid" },
+ {ISCSI_IPDDRESS_STATE_DEPRECATED, "Deprecated" },
+};
+
+char *iscsi_get_ipaddress_state_name(enum iscsi_ipaddress_state port_state)
+{
+ int i;
+ char *state = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(iscsi_ipaddress_state_names); i++) {
+ if (iscsi_ipaddress_state_names[i].value == port_state) {
+ state = iscsi_ipaddress_state_names[i].name;
+ break;
+ }
+ }
+ return state;
+}
+EXPORT_SYMBOL_GPL(iscsi_get_ipaddress_state_name);
+
+/* convert iscsi_router_state values to ascii string name */
+static const struct {
+ enum iscsi_router_state value;
+ char *name;
+} iscsi_router_state_names[] = {
+ {ISCSI_ROUTER_STATE_UNKNOWN, "Unknown" },
+ {ISCSI_ROUTER_STATE_ADVERTISED, "Advertised" },
+ {ISCSI_ROUTER_STATE_MANUAL, "Manual" },
+ {ISCSI_ROUTER_STATE_STALE, "Stale" },
+};
+
+char *iscsi_get_router_state_name(enum iscsi_router_state router_state)
+{
+ int i;
+ char *state = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(iscsi_router_state_names); i++) {
+ if (iscsi_router_state_names[i].value == router_state) {
+ state = iscsi_router_state_names[i].name;
+ break;
+ }
+ }
+ return state;
+}
+EXPORT_SYMBOL_GPL(iscsi_get_router_state_name);
+
struct iscsi_iface *
iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *transport,
uint32_t iface_type, uint32_t iface_num, int dd_size)
@@ -3082,6 +3416,73 @@ exit_logout_sid:
}
static int
+iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
+{
+ struct iscsi_uevent *ev = nlmsg_data(nlh);
+ struct Scsi_Host *shost = NULL;
+ struct iscsi_internal *priv;
+ struct sk_buff *skbhost_stats;
+ struct nlmsghdr *nlhhost_stats;
+ struct iscsi_uevent *evhost_stats;
+ int host_stats_size = 0;
+ int len, err = 0;
+ char *buf;
+
+ if (!transport->get_host_stats)
+ return -EINVAL;
+
+ priv = iscsi_if_transport_lookup(transport);
+ if (!priv)
+ return -EINVAL;
+
+ host_stats_size = sizeof(struct iscsi_offload_host_stats);
+ len = nlmsg_total_size(sizeof(*ev) + host_stats_size);
+
+ shost = scsi_host_lookup(ev->u.get_host_stats.host_no);
+ if (!shost) {
+ pr_err("%s: failed. Cound not find host no %u\n",
+ __func__, ev->u.get_host_stats.host_no);
+ return -ENODEV;
+ }
+
+ do {
+ int actual_size;
+
+ skbhost_stats = alloc_skb(len, GFP_KERNEL);
+ if (!skbhost_stats) {
+ pr_err("cannot deliver host stats: OOM\n");
+ err = -ENOMEM;
+ goto exit_host_stats;
+ }
+
+ nlhhost_stats = __nlmsg_put(skbhost_stats, 0, 0, 0,
+ (len - sizeof(*nlhhost_stats)), 0);
+ evhost_stats = nlmsg_data(nlhhost_stats);
+ memset(evhost_stats, 0, sizeof(*evhost_stats));
+ evhost_stats->transport_handle = iscsi_handle(transport);
+ evhost_stats->type = nlh->nlmsg_type;
+ evhost_stats->u.get_host_stats.host_no =
+ ev->u.get_host_stats.host_no;
+ buf = (char *)((char *)evhost_stats + sizeof(*evhost_stats));
+ memset(buf, 0, host_stats_size);
+
+ err = transport->get_host_stats(shost, buf, host_stats_size);
+
+ actual_size = nlmsg_total_size(sizeof(*ev) + host_stats_size);
+ skb_trim(skbhost_stats, NLMSG_ALIGN(actual_size));
+ nlhhost_stats->nlmsg_len = actual_size;
+
+ err = iscsi_multicast_skb(skbhost_stats, ISCSI_NL_GRP_ISCSID,
+ GFP_KERNEL);
+ } while (err < 0 && err != -ECONNREFUSED);
+
+exit_host_stats:
+ scsi_host_put(shost);
+ return err;
+}
+
+
+static int
iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
{
int err = 0;
@@ -3260,6 +3661,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
err = iscsi_set_chap(transport, ev,
nlmsg_attrlen(nlh, sizeof(*ev)));
break;
+ case ISCSI_UEVENT_GET_HOST_STATS:
+ err = iscsi_get_host_stats(transport, nlh);
+ break;
default:
err = -ENOSYS;
break;
@@ -3368,6 +3772,7 @@ iscsi_conn_attr(ipv6_flow_label, ISCSI_PARAM_IPV6_FLOW_LABEL);
iscsi_conn_attr(is_fw_assigned_ipv6, ISCSI_PARAM_IS_FW_ASSIGNED_IPV6);
iscsi_conn_attr(tcp_xmit_wsf, ISCSI_PARAM_TCP_XMIT_WSF);
iscsi_conn_attr(tcp_recv_wsf, ISCSI_PARAM_TCP_RECV_WSF);
+iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR);
#define iscsi_conn_ep_attr_show(param) \
@@ -3437,6 +3842,7 @@ static struct attribute *iscsi_conn_attrs[] = {
&dev_attr_conn_is_fw_assigned_ipv6.attr,
&dev_attr_conn_tcp_xmit_wsf.attr,
&dev_attr_conn_tcp_recv_wsf.attr,
+ &dev_attr_conn_local_ipaddr.attr,
NULL,
};
@@ -3506,6 +3912,8 @@ static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
param = ISCSI_PARAM_TCP_XMIT_WSF;
else if (attr == &dev_attr_conn_tcp_recv_wsf.attr)
param = ISCSI_PARAM_TCP_RECV_WSF;
+ else if (attr == &dev_attr_conn_local_ipaddr.attr)
+ param = ISCSI_PARAM_LOCAL_IPADDR;
else {
WARN_ONCE(1, "Invalid conn attr");
return 0;
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 2700a5a09bd4..d47ffc8d3e43 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -64,10 +64,14 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
/**
* srp_tmo_valid() - check timeout combination validity
+ * @reconnect_delay: Reconnect delay in seconds.
+ * @fast_io_fail_tmo: Fast I/O fail timeout in seconds.
+ * @dev_loss_tmo: Device loss timeout in seconds.
*
* The combination of the timeout parameters must be such that SCSI commands
* are finished in a reasonable time. Hence do not allow the fast I/O fail
- * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT. Furthermore, these
+ * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT nor allow dev_loss_tmo to
+ * exceed that limit if failing I/O fast has been disabled. Furthermore, these
* parameters must be such that multipath can detect failed paths timely.
* Hence do not allow all three parameters to be disabled simultaneously.
*/
@@ -79,6 +83,9 @@ int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, int dev_loss_tmo)
return -EINVAL;
if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
return -EINVAL;
+ if (fast_io_fail_tmo < 0 &&
+ dev_loss_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
+ return -EINVAL;
if (dev_loss_tmo >= LONG_MAX / HZ)
return -EINVAL;
if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 &&
@@ -368,6 +375,7 @@ invalid:
/**
* srp_reconnect_work() - reconnect and schedule a new attempt if necessary
+ * @work: Work structure used for scheduling this operation.
*/
static void srp_reconnect_work(struct work_struct *work)
{
@@ -408,6 +416,7 @@ static void __rport_fail_io_fast(struct srp_rport *rport)
/**
* rport_fast_io_fail_timedout() - fast I/O failure timeout handler
+ * @work: Work structure used for scheduling this operation.
*/
static void rport_fast_io_fail_timedout(struct work_struct *work)
{
@@ -426,6 +435,7 @@ static void rport_fast_io_fail_timedout(struct work_struct *work)
/**
* rport_dev_loss_timedout() - device loss timeout handler
+ * @work: Work structure used for scheduling this operation.
*/
static void rport_dev_loss_timedout(struct work_struct *work)
{
@@ -452,42 +462,35 @@ static void __srp_start_tl_fail_timers(struct srp_rport *rport)
lockdep_assert_held(&rport->mutex);
- if (!rport->deleted) {
- delay = rport->reconnect_delay;
- fast_io_fail_tmo = rport->fast_io_fail_tmo;
- dev_loss_tmo = rport->dev_loss_tmo;
- pr_debug("%s current state: %d\n",
- dev_name(&shost->shost_gendev), rport->state);
+ delay = rport->reconnect_delay;
+ fast_io_fail_tmo = rport->fast_io_fail_tmo;
+ dev_loss_tmo = rport->dev_loss_tmo;
+ pr_debug("%s current state: %d\n", dev_name(&shost->shost_gendev),
+ rport->state);
- if (delay > 0)
- queue_delayed_work(system_long_wq,
- &rport->reconnect_work,
- 1UL * delay * HZ);
- if (fast_io_fail_tmo >= 0 &&
- srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
- pr_debug("%s new state: %d\n",
- dev_name(&shost->shost_gendev),
- rport->state);
- scsi_target_block(&shost->shost_gendev);
+ if (rport->state == SRP_RPORT_LOST)
+ return;
+ if (delay > 0)
+ queue_delayed_work(system_long_wq, &rport->reconnect_work,
+ 1UL * delay * HZ);
+ if (srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
+ pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev),
+ rport->state);
+ scsi_target_block(&shost->shost_gendev);
+ if (fast_io_fail_tmo >= 0)
queue_delayed_work(system_long_wq,
&rport->fast_io_fail_work,
1UL * fast_io_fail_tmo * HZ);
- }
if (dev_loss_tmo >= 0)
queue_delayed_work(system_long_wq,
&rport->dev_loss_work,
1UL * dev_loss_tmo * HZ);
- } else {
- pr_debug("%s has already been deleted\n",
- dev_name(&shost->shost_gendev));
- srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST);
- scsi_target_unblock(&shost->shost_gendev,
- SDEV_TRANSPORT_OFFLINE);
}
}
/**
* srp_start_tl_fail_timers() - start the transport layer failure timers
+ * @rport: SRP target port.
*
* Start the transport layer fast I/O failure and device loss timers. Do not
* modify a timer that was already started.
@@ -502,6 +505,7 @@ EXPORT_SYMBOL(srp_start_tl_fail_timers);
/**
* scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
+ * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
*/
static int scsi_request_fn_active(struct Scsi_Host *shost)
{
@@ -522,6 +526,7 @@ static int scsi_request_fn_active(struct Scsi_Host *shost)
/**
* srp_reconnect_rport() - reconnect to an SRP target port
+ * @rport: SRP target port.
*
* Blocks SCSI command queueing before invoking reconnect() such that
* queuecommand() won't be invoked concurrently with reconnect() from outside
@@ -556,7 +561,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
scsi_target_block(&shost->shost_gendev);
while (scsi_request_fn_active(shost))
msleep(20);
- res = i->f->reconnect(rport);
+ res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
pr_debug("%s (state %d): transport.reconnect() returned %d\n",
dev_name(&shost->shost_gendev), rport->state, res);
if (res == 0) {
@@ -578,9 +583,9 @@ int srp_reconnect_rport(struct srp_rport *rport)
spin_unlock_irq(shost->host_lock);
} else if (rport->state == SRP_RPORT_RUNNING) {
/*
- * srp_reconnect_rport() was invoked with fast_io_fail
- * off. Mark the port as failed and start the TL failure
- * timers if these had not yet been started.
+ * srp_reconnect_rport() has been invoked with fast_io_fail
+ * and dev_loss off. Mark the port as failed and start the TL
+ * failure timers if these had not yet been started.
*/
__rport_fail_io_fast(rport);
scsi_target_unblock(&shost->shost_gendev,
@@ -599,6 +604,7 @@ EXPORT_SYMBOL(srp_reconnect_rport);
/**
* srp_timed_out() - SRP transport intercept of the SCSI timeout EH
+ * @scmd: SCSI command.
*
* If a timeout occurs while an rport is in the blocked state, ask the SCSI
* EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core
@@ -622,10 +628,6 @@ static void srp_rport_release(struct device *dev)
{
struct srp_rport *rport = dev_to_rport(dev);
- cancel_delayed_work_sync(&rport->reconnect_work);
- cancel_delayed_work_sync(&rport->fast_io_fail_work);
- cancel_delayed_work_sync(&rport->dev_loss_work);
-
put_device(dev->parent);
kfree(rport);
}
@@ -674,6 +676,7 @@ static int srp_host_match(struct attribute_container *cont, struct device *dev)
/**
* srp_rport_get() - increment rport reference count
+ * @rport: SRP target port.
*/
void srp_rport_get(struct srp_rport *rport)
{
@@ -683,6 +686,7 @@ EXPORT_SYMBOL(srp_rport_get);
/**
* srp_rport_put() - decrement rport reference count
+ * @rport: SRP target port.
*/
void srp_rport_put(struct srp_rport *rport)
{
@@ -780,12 +784,6 @@ void srp_rport_del(struct srp_rport *rport)
device_del(dev);
transport_destroy_device(dev);
- mutex_lock(&rport->mutex);
- if (rport->state == SRP_RPORT_BLOCKED)
- __rport_fail_io_fast(rport);
- rport->deleted = true;
- mutex_unlock(&rport->mutex);
-
put_device(dev);
}
EXPORT_SYMBOL_GPL(srp_rport_del);
@@ -810,6 +808,27 @@ void srp_remove_host(struct Scsi_Host *shost)
}
EXPORT_SYMBOL_GPL(srp_remove_host);
+/**
+ * srp_stop_rport_timers - stop the transport layer recovery timers
+ *
+ * Must be called after srp_remove_host() and scsi_remove_host(). The caller
+ * must hold a reference on the rport (rport->dev) and on the SCSI host
+ * (rport->dev.parent).
+ */
+void srp_stop_rport_timers(struct srp_rport *rport)
+{
+ mutex_lock(&rport->mutex);
+ if (rport->state == SRP_RPORT_BLOCKED)
+ __rport_fail_io_fast(rport);
+ srp_rport_set_state(rport, SRP_RPORT_LOST);
+ mutex_unlock(&rport->mutex);
+
+ cancel_delayed_work_sync(&rport->reconnect_work);
+ cancel_delayed_work_sync(&rport->fast_io_fail_work);
+ cancel_delayed_work_sync(&rport->dev_loss_work);
+}
+EXPORT_SYMBOL_GPL(srp_stop_rport_timers);
+
static int srp_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,
int result)
{
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 69725f7c32c1..470954aba728 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -110,7 +110,7 @@ static int sd_suspend_runtime(struct device *);
static int sd_resume(struct device *);
static void sd_rescan(struct device *);
static int sd_done(struct scsi_cmnd *);
-static int sd_eh_action(struct scsi_cmnd *, unsigned char *, int, int);
+static int sd_eh_action(struct scsi_cmnd *, int);
static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
static void scsi_disk_release(struct device *cdev);
static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
@@ -801,7 +801,7 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)
if (sdkp->device->no_write_same)
return BLKPREP_KILL;
- BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size);
+ BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
sector >>= ilog2(sdp->sector_size) - 9;
nr_sectors >>= ilog2(sdp->sector_size) - 9;
@@ -1551,23 +1551,23 @@ static const struct block_device_operations sd_fops = {
/**
* sd_eh_action - error handling callback
* @scmd: sd-issued command that has failed
- * @eh_cmnd: The command that was sent during error handling
- * @eh_cmnd_len: Length of eh_cmnd in bytes
* @eh_disp: The recovery disposition suggested by the midlayer
*
- * This function is called by the SCSI midlayer upon completion of
- * an error handling command (TEST UNIT READY, START STOP UNIT,
- * etc.) The command sent to the device by the error handler is
- * stored in eh_cmnd. The result of sending the eh command is
- * passed in eh_disp.
+ * This function is called by the SCSI midlayer upon completion of an
+ * error test command (currently TEST UNIT READY). The result of sending
+ * the eh command is passed in eh_disp. We're looking for devices that
+ * fail medium access commands but are OK with non access commands like
+ * test unit ready (so wrongly see the device as having a successful
+ * recovery)
**/
-static int sd_eh_action(struct scsi_cmnd *scmd, unsigned char *eh_cmnd,
- int eh_cmnd_len, int eh_disp)
+static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
{
struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
if (!scsi_device_online(scmd->device) ||
- !scsi_medium_access_command(scmd))
+ !scsi_medium_access_command(scmd) ||
+ host_byte(scmd->result) != DID_TIME_OUT ||
+ eh_disp != SUCCESS)
return eh_disp;
/*
@@ -1577,9 +1577,7 @@ static int sd_eh_action(struct scsi_cmnd *scmd, unsigned char *eh_cmnd,
* process of recovering or has it suffered an internal failure
* that prevents access to the storage medium.
*/
- if (host_byte(scmd->result) == DID_TIME_OUT && eh_disp == SUCCESS &&
- eh_cmnd_len && eh_cmnd[0] == TEST_UNIT_READY)
- sdkp->medium_access_timed_out++;
+ sdkp->medium_access_timed_out++;
/*
* If the device keeps failing read/write commands but TEST UNIT
@@ -1628,7 +1626,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
end_lba <<= 1;
} else {
/* be careful ... don't want any overflows */
- u64 factor = scmd->device->sector_size / 512;
+ unsigned int factor = scmd->device->sector_size / 512;
do_div(start_lba, factor);
do_div(end_lba, factor);
}
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 6174ca4ea275..a7a691d0af7d 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -365,7 +365,6 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,
struct bio *bio;
struct scsi_disk *sdkp;
struct sd_dif_tuple *sdt;
- unsigned int i, j;
u32 phys, virt;
sdkp = rq->bio->bi_bdev->bd_disk->private_data;
@@ -376,19 +375,21 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,
phys = hw_sector & 0xffffffff;
__rq_for_each_bio(bio, rq) {
- struct bio_vec *iv;
+ struct bio_vec iv;
+ struct bvec_iter iter;
+ unsigned int j;
/* Already remapped? */
if (bio_flagged(bio, BIO_MAPPED_INTEGRITY))
break;
- virt = bio->bi_integrity->bip_sector & 0xffffffff;
+ virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
- bip_for_each_vec(iv, bio->bi_integrity, i) {
- sdt = kmap_atomic(iv->bv_page)
- + iv->bv_offset;
+ bip_for_each_vec(iv, bio->bi_integrity, iter) {
+ sdt = kmap_atomic(iv.bv_page)
+ + iv.bv_offset;
- for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
+ for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
if (be32_to_cpu(sdt->ref_tag) == virt)
sdt->ref_tag = cpu_to_be32(phys);
@@ -414,7 +415,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
struct scsi_disk *sdkp;
struct bio *bio;
struct sd_dif_tuple *sdt;
- unsigned int i, j, sectors, sector_sz;
+ unsigned int j, sectors, sector_sz;
u32 phys, virt;
sdkp = scsi_disk(scmd->request->rq_disk);
@@ -430,15 +431,16 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
phys >>= 3;
__rq_for_each_bio(bio, scmd->request) {
- struct bio_vec *iv;
+ struct bio_vec iv;
+ struct bvec_iter iter;
- virt = bio->bi_integrity->bip_sector & 0xffffffff;
+ virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
- bip_for_each_vec(iv, bio->bi_integrity, i) {
- sdt = kmap_atomic(iv->bv_page)
- + iv->bv_offset;
+ bip_for_each_vec(iv, bio->bi_integrity, iter) {
+ sdt = kmap_atomic(iv.bv_page)
+ + iv.bv_offset;
- for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
+ for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
if (sectors == 0) {
kunmap_atomic(sdt);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 119d67f9c47e..40d85929aefe 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -161,14 +161,10 @@ static inline struct scsi_cd *scsi_cd_get(struct gendisk *disk)
goto out;
cd = scsi_cd(disk);
kref_get(&cd->kref);
- if (scsi_device_get(cd->device))
- goto out_put;
- if (!scsi_autopm_get_device(cd->device))
- goto out;
-
- out_put:
- kref_put(&cd->kref, sr_kref_release);
- cd = NULL;
+ if (scsi_device_get(cd->device)) {
+ kref_put(&cd->kref, sr_kref_release);
+ cd = NULL;
+ }
out:
mutex_unlock(&sr_ref_mutex);
return cd;
@@ -180,7 +176,6 @@ static void scsi_cd_put(struct scsi_cd *cd)
mutex_lock(&sr_ref_mutex);
kref_put(&cd->kref, sr_kref_release);
- scsi_autopm_put_device(sdev);
scsi_device_put(sdev);
mutex_unlock(&sr_ref_mutex);
}
@@ -558,8 +553,6 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
void __user *argp = (void __user *)arg;
int ret;
- scsi_autopm_get_device(cd->device);
-
mutex_lock(&sr_mutex);
/*
@@ -591,7 +584,6 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
out:
mutex_unlock(&sr_mutex);
- scsi_autopm_put_device(cd->device);
return ret;
}
@@ -599,17 +591,11 @@ static unsigned int sr_block_check_events(struct gendisk *disk,
unsigned int clearing)
{
struct scsi_cd *cd = scsi_cd(disk);
- unsigned int ret;
- if (atomic_read(&cd->device->disk_events_disable_depth) == 0) {
- scsi_autopm_get_device(cd->device);
- ret = cdrom_check_events(&cd->cdi, clearing);
- scsi_autopm_put_device(cd->device);
- } else {
- ret = 0;
- }
+ if (atomic_read(&cd->device->disk_events_disable_depth))
+ return 0;
- return ret;
+ return cdrom_check_events(&cd->cdi, clearing);
}
static int sr_block_revalidate_disk(struct gendisk *disk)
@@ -617,8 +603,6 @@ static int sr_block_revalidate_disk(struct gendisk *disk)
struct scsi_cd *cd = scsi_cd(disk);
struct scsi_sense_hdr sshdr;
- scsi_autopm_get_device(cd->device);
-
/* if the unit is not ready, nothing more to do */
if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
goto out;
@@ -626,7 +610,6 @@ static int sr_block_revalidate_disk(struct gendisk *disk)
sr_cd_check(&cd->cdi);
get_sectorsize(cd);
out:
- scsi_autopm_put_device(cd->device);
return 0;
}
@@ -747,6 +730,12 @@ static int sr_probe(struct device *dev)
if (register_cdrom(&cd->cdi))
goto fail_put;
+ /*
+ * Initialize block layer runtime PM stuffs before the
+ * periodic event checking request gets started in add_disk.
+ */
+ blk_pm_runtime_init(sdev->request_queue, dev);
+
dev_set_drvdata(dev, cd);
disk->flags |= GENHD_FL_REMOVABLE;
add_disk(disk);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index ff44b3c2cff2..a1d6986261a3 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3719,7 +3719,7 @@ static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma)
{
- int segs, nbr, max_segs, b_size, order, got;
+ int segs, max_segs, b_size, order, got;
gfp_t priority;
if (new_size <= STbuffer->buffer_size)
@@ -3729,9 +3729,6 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
normalize_buffer(STbuffer); /* Avoid extra segment */
max_segs = STbuffer->use_sg;
- nbr = max_segs - STbuffer->frp_segs;
- if (nbr <= 0)
- return 0;
priority = GFP_KERNEL | __GFP_NOWARN;
if (need_dma)
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index bac55f7f69f9..6d3ee1ab6362 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -1531,7 +1531,7 @@ static int sym_iomap_device(struct sym_device *device)
struct pci_bus_region bus_addr;
int i = 2;
- pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[1]);
+ pcibios_resource_to_bus(pdev->bus, &bus_addr, &pdev->resource[1]);
device->mmio_base = bus_addr.start;
if (device->chip.features & FE_RAM) {
@@ -1541,7 +1541,8 @@ static int sym_iomap_device(struct sym_device *device)
*/
if (!pdev->resource[i].flags)
i++;
- pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[i]);
+ pcibios_resource_to_bus(pdev->bus, &bus_addr,
+ &pdev->resource[i]);
device->ram_base = bus_addr.start;
}
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index c3173dced870..16bfd50cd3fe 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -956,6 +956,10 @@ static void virtscsi_remove(struct virtio_device *vdev)
#ifdef CONFIG_PM_SLEEP
static int virtscsi_freeze(struct virtio_device *vdev)
{
+ struct Scsi_Host *sh = virtio_scsi_host(vdev);
+ struct virtio_scsi *vscsi = shost_priv(sh);
+
+ unregister_hotcpu_notifier(&vscsi->nb);
virtscsi_remove_vqs(vdev);
return 0;
}
@@ -964,8 +968,17 @@ static int virtscsi_restore(struct virtio_device *vdev)
{
struct Scsi_Host *sh = virtio_scsi_host(vdev);
struct virtio_scsi *vscsi = shost_priv(sh);
+ int err;
+
+ err = virtscsi_init(vdev, vscsi);
+ if (err)
+ return err;
+
+ err = register_hotcpu_notifier(&vscsi->nb);
+ if (err)
+ vdev->config->del_vqs(vdev);
- return virtscsi_init(vdev, vscsi);
+ return err;
}
#endif
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
index cbf3476c68cd..aff31991aea9 100644
--- a/drivers/scsi/zorro7xx.c
+++ b/drivers/scsi/zorro7xx.c
@@ -104,7 +104,7 @@ static int zorro7xx_init_one(struct zorro_dev *z,
if (ioaddr > 0x01000000)
hostdata->base = ioremap(ioaddr, zorro_resource_len(z));
else
- hostdata->base = (void __iomem *)ZTWO_VADDR(ioaddr);
+ hostdata->base = ZTWO_VADDR(ioaddr);
hostdata->clock = 50;
hostdata->chip710 = 1;
diff --git a/drivers/sfi/sfi_acpi.c b/drivers/sfi/sfi_acpi.c
index f5b4ca581541..d277b36eb389 100644
--- a/drivers/sfi/sfi_acpi.c
+++ b/drivers/sfi/sfi_acpi.c
@@ -60,9 +60,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
-#include <acpi/acpi.h>
-
-#include <linux/sfi.h>
+#include <linux/sfi_acpi.h>
#include "sfi_core.h"
/*
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index eb1f1ef5fa2e..581ee2a8856b 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -118,6 +118,13 @@ config SPI_BCM63XX
help
Enable support for the SPI controller on the Broadcom BCM63xx SoCs.
+config SPI_BCM63XX_HSSPI
+ tristate "Broadcom BCM63XX HS SPI controller driver"
+ depends on BCM63XX || COMPILE_TEST
+ help
+ This enables support for the High Speed SPI controller present on
+ newer Broadcom BCM63XX SoCs.
+
config SPI_BITBANG
tristate "Utilities for Bitbanging SPI masters"
help
@@ -159,7 +166,6 @@ config SPI_DAVINCI
tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
depends on ARCH_DAVINCI || ARCH_KEYSTONE
select SPI_BITBANG
- select TI_EDMA
help
SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
@@ -301,6 +307,7 @@ config SPI_OMAP_UWIRE
config SPI_OMAP24XX
tristate "McSPI driver for OMAP"
+ depends on ARM || ARM64 || AVR32 || HEXAGON || MIPS || SH
depends on ARCH_OMAP2PLUS || COMPILE_TEST
help
SPI master controller for OMAP24XX and later Multichannel SPI
@@ -369,10 +376,10 @@ config SPI_PXA2XX_PCI
def_tristate SPI_PXA2XX && PCI
config SPI_RSPI
- tristate "Renesas RSPI controller"
- depends on (SUPERH || ARCH_SHMOBILE) && SH_DMAE_BASE
+ tristate "Renesas RSPI/QSPI controller"
+ depends on (SUPERH && SH_DMAE_BASE) || ARCH_SHMOBILE
help
- SPI driver for Renesas RSPI blocks.
+ SPI driver for Renesas RSPI and QSPI blocks.
config SPI_S3C24XX
tristate "Samsung S3C24XX series SPI"
@@ -395,7 +402,7 @@ config SPI_S3C24XX_FIQ
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
depends on PLAT_SAMSUNG
- select S3C64XX_DMA if ARCH_S3C64XX
+ select S3C64XX_PL080 if ARCH_S3C64XX
help
SPI driver for Samsung S3C64XX and newer SoCs.
@@ -407,7 +414,8 @@ config SPI_SC18IS602
config SPI_SH_MSIOF
tristate "SuperH MSIOF SPI controller"
- depends on (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
+ depends on HAVE_CLK
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
select SPI_BITBANG
help
SPI driver for SuperH and SH Mobile MSIOF blocks.
@@ -448,6 +456,7 @@ config SPI_MXS
config SPI_TEGRA114
tristate "NVIDIA Tegra114 SPI Controller"
depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
+ depends on RESET_CONTROLLER
help
SPI driver for NVIDIA Tegra114 SPI Controller interface. This controller
is different than the older SoCs SPI controller and also register interface
@@ -456,6 +465,7 @@ config SPI_TEGRA114
config SPI_TEGRA20_SFLASH
tristate "Nvidia Tegra20 Serial flash Controller"
depends on ARCH_TEGRA || COMPILE_TEST
+ depends on RESET_CONTROLLER
help
SPI driver for Nvidia Tegra20 Serial flash Controller interface.
The main usecase of this controller is to use spi flash as boot
@@ -464,6 +474,7 @@ config SPI_TEGRA20_SFLASH
config SPI_TEGRA20_SLINK
tristate "Nvidia Tegra20/Tegra30 SLINK Controller"
depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
+ depends on RESET_CONTROLLER
help
SPI driver for Nvidia Tegra20/Tegra30 SLINK Controller interface.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index ab8d8644af0e..95af48d2d360 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
+obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o
obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o
obj-$(CONFIG_SPI_BFIN_V3) += spi-bfin-v3.o
obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index 595b62cb545d..5d7deaf62867 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -220,8 +220,6 @@ static int altera_spi_probe(struct platform_device *pdev)
/* setup the state for the bitbang driver */
hw->bitbang.master = master;
- if (!hw->bitbang.master)
- return err;
hw->bitbang.chipselect = altera_spi_chipsel;
hw->bitbang.txrx_bufs = altera_spi_txrx;
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 821bf7ac218d..31534b51715a 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -243,21 +243,21 @@ static int ath79_spi_probe(struct platform_device *pdev)
goto err_put_master;
}
- sp->base = ioremap(r->start, resource_size(r));
+ sp->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!sp->base) {
ret = -ENXIO;
goto err_put_master;
}
- sp->clk = clk_get(&pdev->dev, "ahb");
+ sp->clk = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(sp->clk)) {
ret = PTR_ERR(sp->clk);
- goto err_unmap;
+ goto err_put_master;
}
ret = clk_enable(sp->clk);
if (ret)
- goto err_clk_put;
+ goto err_put_master;
rate = DIV_ROUND_UP(clk_get_rate(sp->clk), MHZ);
if (!rate) {
@@ -280,10 +280,6 @@ err_disable:
ath79_spi_disable(sp);
err_clk_disable:
clk_disable(sp->clk);
-err_clk_put:
- clk_put(sp->clk);
-err_unmap:
- iounmap(sp->base);
err_put_master:
spi_master_put(sp->bitbang.master);
@@ -297,8 +293,6 @@ static int ath79_spi_remove(struct platform_device *pdev)
spi_bitbang_stop(&sp->bitbang);
ath79_spi_disable(sp);
clk_disable(sp->clk);
- clk_put(sp->clk);
- iounmap(sp->base);
spi_master_put(sp->bitbang.master);
return 0;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 273db0beb2b8..b0842f751016 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -189,6 +189,8 @@
*/
#define DMA_MIN_BYTES 16
+#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
struct atmel_spi_dma {
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
@@ -220,17 +222,13 @@ struct atmel_spi {
int irq;
struct clk *clk;
struct platform_device *pdev;
- struct spi_device *stay;
- u8 stopping;
- struct list_head queue;
- struct tasklet_struct tasklet;
struct spi_transfer *current_transfer;
unsigned long current_remaining_bytes;
- struct spi_transfer *next_transfer;
- unsigned long next_remaining_bytes;
int done_status;
+ struct completion xfer_completion;
+
/* scratch buffer */
void *buffer;
dma_addr_t buffer_dma;
@@ -241,6 +239,9 @@ struct atmel_spi {
bool use_pdc;
/* dmaengine data */
struct atmel_spi_dma dma;
+
+ bool keep_cs;
+ bool cs_active;
};
/* Controller-specific per-slave state */
@@ -376,17 +377,6 @@ static inline bool atmel_spi_use_dma(struct atmel_spi *as,
return as->use_dma && xfer->len >= DMA_MIN_BYTES;
}
-static inline int atmel_spi_xfer_is_last(struct spi_message *msg,
- struct spi_transfer *xfer)
-{
- return msg->transfers.prev == &xfer->transfer_list;
-}
-
-static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer)
-{
- return xfer->delay_usecs == 0 && !xfer->cs_change;
-}
-
static int atmel_spi_dma_slave_config(struct atmel_spi *as,
struct dma_slave_config *slave_config,
u8 bits_per_word)
@@ -513,23 +503,20 @@ static void dma_callback(void *data)
struct spi_master *master = data;
struct atmel_spi *as = spi_master_get_devdata(master);
- /* trigger SPI tasklet */
- tasklet_schedule(&as->tasklet);
+ complete(&as->xfer_completion);
}
/*
* Next transfer using PIO.
- * lock is held, spi tasklet is blocked
*/
static void atmel_spi_next_xfer_pio(struct spi_master *master,
struct spi_transfer *xfer)
{
struct atmel_spi *as = spi_master_get_devdata(master);
+ unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n");
- as->current_remaining_bytes = xfer->len;
-
/* Make sure data is not remaining in RDR */
spi_readl(as, RDR);
while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
@@ -537,13 +524,14 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
cpu_relax();
}
- if (xfer->tx_buf)
+ if (xfer->tx_buf) {
if (xfer->bits_per_word > 8)
- spi_writel(as, TDR, *(u16 *)(xfer->tx_buf));
+ spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos));
else
- spi_writel(as, TDR, *(u8 *)(xfer->tx_buf));
- else
+ spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
+ } else {
spi_writel(as, TDR, 0);
+ }
dev_dbg(master->dev.parent,
" start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
@@ -556,7 +544,6 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
/*
* Submit next transfer for DMA.
- * lock is held, spi tasklet is blocked
*/
static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
struct spi_transfer *xfer,
@@ -694,74 +681,90 @@ static void atmel_spi_next_xfer_data(struct spi_master *master,
*plen = len;
}
+static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ u32 scbr, csr;
+ unsigned long bus_hz;
+
+ /* v1 chips start out at half the peripheral bus speed. */
+ bus_hz = clk_get_rate(as->clk);
+ if (!atmel_spi_is_v2(as))
+ bus_hz /= 2;
+
+ /*
+ * Calculate the lowest divider that satisfies the
+ * constraint, assuming div32/fdiv/mbz == 0.
+ */
+ if (xfer->speed_hz)
+ scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz);
+ else
+ /*
+ * This can happend if max_speed is null.
+ * In this case, we set the lowest possible speed
+ */
+ scbr = 0xff;
+
+ /*
+ * If the resulting divider doesn't fit into the
+ * register bitfield, we can't satisfy the constraint.
+ */
+ if (scbr >= (1 << SPI_SCBR_SIZE)) {
+ dev_err(&spi->dev,
+ "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
+ xfer->speed_hz, scbr, bus_hz/255);
+ return -EINVAL;
+ }
+ if (scbr == 0) {
+ dev_err(&spi->dev,
+ "setup: %d Hz too high, scbr %u; max %ld Hz\n",
+ xfer->speed_hz, scbr, bus_hz);
+ return -EINVAL;
+ }
+ csr = spi_readl(as, CSR0 + 4 * spi->chip_select);
+ csr = SPI_BFINS(SCBR, scbr, csr);
+ spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
+
+ return 0;
+}
+
/*
* Submit next transfer for PDC.
* lock is held, spi irq is blocked
*/
static void atmel_spi_pdc_next_xfer(struct spi_master *master,
- struct spi_message *msg)
+ struct spi_message *msg,
+ struct spi_transfer *xfer)
{
struct atmel_spi *as = spi_master_get_devdata(master);
- struct spi_transfer *xfer;
- u32 len, remaining;
- u32 ieval;
+ u32 len;
dma_addr_t tx_dma, rx_dma;
- if (!as->current_transfer)
- xfer = list_entry(msg->transfers.next,
- struct spi_transfer, transfer_list);
- else if (!as->next_transfer)
- xfer = list_entry(as->current_transfer->transfer_list.next,
- struct spi_transfer, transfer_list);
- else
- xfer = NULL;
-
- if (xfer) {
- spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
-
- len = xfer->len;
- atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
- remaining = xfer->len - len;
-
- spi_writel(as, RPR, rx_dma);
- spi_writel(as, TPR, tx_dma);
-
- if (msg->spi->bits_per_word > 8)
- len >>= 1;
- spi_writel(as, RCR, len);
- spi_writel(as, TCR, len);
-
- dev_dbg(&msg->spi->dev,
- " start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
- xfer, xfer->len, xfer->tx_buf,
- (unsigned long long)xfer->tx_dma, xfer->rx_buf,
- (unsigned long long)xfer->rx_dma);
- } else {
- xfer = as->next_transfer;
- remaining = as->next_remaining_bytes;
- }
+ spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
- as->current_transfer = xfer;
- as->current_remaining_bytes = remaining;
+ len = as->current_remaining_bytes;
+ atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
+ as->current_remaining_bytes -= len;
- if (remaining > 0)
- len = remaining;
- else if (!atmel_spi_xfer_is_last(msg, xfer)
- && atmel_spi_xfer_can_be_chained(xfer)) {
- xfer = list_entry(xfer->transfer_list.next,
- struct spi_transfer, transfer_list);
- len = xfer->len;
- } else
- xfer = NULL;
+ spi_writel(as, RPR, rx_dma);
+ spi_writel(as, TPR, tx_dma);
- as->next_transfer = xfer;
+ if (msg->spi->bits_per_word > 8)
+ len >>= 1;
+ spi_writel(as, RCR, len);
+ spi_writel(as, TCR, len);
- if (xfer) {
- u32 total;
+ dev_dbg(&msg->spi->dev,
+ " start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
+ xfer, xfer->len, xfer->tx_buf,
+ (unsigned long long)xfer->tx_dma, xfer->rx_buf,
+ (unsigned long long)xfer->rx_dma);
- total = len;
+ if (as->current_remaining_bytes) {
+ len = as->current_remaining_bytes;
atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
- as->next_remaining_bytes = total - len;
+ as->current_remaining_bytes -= len;
spi_writel(as, RNPR, rx_dma);
spi_writel(as, TNPR, tx_dma);
@@ -776,11 +779,6 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
xfer, xfer->len, xfer->tx_buf,
(unsigned long long)xfer->tx_dma, xfer->rx_buf,
(unsigned long long)xfer->rx_dma);
- ieval = SPI_BIT(ENDRX) | SPI_BIT(OVRES);
- } else {
- spi_writel(as, RNCR, 0);
- spi_writel(as, TNCR, 0);
- ieval = SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) | SPI_BIT(OVRES);
}
/* REVISIT: We're waiting for ENDRX before we start the next
@@ -793,83 +791,11 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
*
* It should be doable, though. Just not now...
*/
- spi_writel(as, IER, ieval);
+ spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES));
spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
}
/*
- * Choose way to submit next transfer and start it.
- * lock is held, spi tasklet is blocked
- */
-static void atmel_spi_dma_next_xfer(struct spi_master *master,
- struct spi_message *msg)
-{
- struct atmel_spi *as = spi_master_get_devdata(master);
- struct spi_transfer *xfer;
- u32 remaining, len;
-
- remaining = as->current_remaining_bytes;
- if (remaining) {
- xfer = as->current_transfer;
- len = remaining;
- } else {
- if (!as->current_transfer)
- xfer = list_entry(msg->transfers.next,
- struct spi_transfer, transfer_list);
- else
- xfer = list_entry(
- as->current_transfer->transfer_list.next,
- struct spi_transfer, transfer_list);
-
- as->current_transfer = xfer;
- len = xfer->len;
- }
-
- if (atmel_spi_use_dma(as, xfer)) {
- u32 total = len;
- if (!atmel_spi_next_xfer_dma_submit(master, xfer, &len)) {
- as->current_remaining_bytes = total - len;
- return;
- } else {
- dev_err(&msg->spi->dev, "unable to use DMA, fallback to PIO\n");
- }
- }
-
- /* use PIO if error appened using DMA */
- atmel_spi_next_xfer_pio(master, xfer);
-}
-
-static void atmel_spi_next_message(struct spi_master *master)
-{
- struct atmel_spi *as = spi_master_get_devdata(master);
- struct spi_message *msg;
- struct spi_device *spi;
-
- BUG_ON(as->current_transfer);
-
- msg = list_entry(as->queue.next, struct spi_message, queue);
- spi = msg->spi;
-
- dev_dbg(master->dev.parent, "start message %p for %s\n",
- msg, dev_name(&spi->dev));
-
- /* select chip if it's not still active */
- if (as->stay) {
- if (as->stay != spi) {
- cs_deactivate(as, as->stay);
- cs_activate(as, spi);
- }
- as->stay = NULL;
- } else
- cs_activate(as, spi);
-
- if (as->use_pdc)
- atmel_spi_pdc_next_xfer(master, msg);
- else
- atmel_spi_dma_next_xfer(master, msg);
-}
-
-/*
* For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
* - The buffer is either valid for CPU access, else NULL
* - If the buffer is valid, so is its DMA address
@@ -924,41 +850,7 @@ static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as)
spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
}
-static void
-atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
- struct spi_message *msg, int stay)
-{
- if (!stay || as->done_status < 0)
- cs_deactivate(as, msg->spi);
- else
- as->stay = msg->spi;
-
- list_del(&msg->queue);
- msg->status = as->done_status;
-
- dev_dbg(master->dev.parent,
- "xfer complete: %u bytes transferred\n",
- msg->actual_length);
-
- atmel_spi_unlock(as);
- msg->complete(msg->context);
- atmel_spi_lock(as);
-
- as->current_transfer = NULL;
- as->next_transfer = NULL;
- as->done_status = 0;
-
- /* continue if needed */
- if (list_empty(&as->queue) || as->stopping) {
- if (as->use_pdc)
- atmel_spi_disable_pdc_transfer(as);
- } else {
- atmel_spi_next_message(master);
- }
-}
-
/* Called from IRQ
- * lock is held
*
* Must update "current_remaining_bytes" to keep track of data
* to transfer.
@@ -966,9 +858,7 @@ atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
static void
atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
{
- u8 *txp;
u8 *rxp;
- u16 *txp16;
u16 *rxp16;
unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
@@ -990,96 +880,12 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
} else {
as->current_remaining_bytes--;
}
-
- if (as->current_remaining_bytes) {
- if (xfer->tx_buf) {
- if (xfer->bits_per_word > 8) {
- txp16 = (u16 *)(((u8 *)xfer->tx_buf)
- + xfer_pos + 2);
- spi_writel(as, TDR, *txp16);
- } else {
- txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1;
- spi_writel(as, TDR, *txp);
- }
- } else {
- spi_writel(as, TDR, 0);
- }
- }
-}
-
-/* Tasklet
- * Called from DMA callback + pio transfer and overrun IRQ.
- */
-static void atmel_spi_tasklet_func(unsigned long data)
-{
- struct spi_master *master = (struct spi_master *)data;
- struct atmel_spi *as = spi_master_get_devdata(master);
- struct spi_message *msg;
- struct spi_transfer *xfer;
-
- dev_vdbg(master->dev.parent, "atmel_spi_tasklet_func\n");
-
- atmel_spi_lock(as);
-
- xfer = as->current_transfer;
-
- if (xfer == NULL)
- /* already been there */
- goto tasklet_out;
-
- msg = list_entry(as->queue.next, struct spi_message, queue);
-
- if (as->current_remaining_bytes == 0) {
- if (as->done_status < 0) {
- /* error happened (overrun) */
- if (atmel_spi_use_dma(as, xfer))
- atmel_spi_stop_dma(as);
- } else {
- /* only update length if no error */
- msg->actual_length += xfer->len;
- }
-
- if (atmel_spi_use_dma(as, xfer))
- if (!msg->is_dma_mapped)
- atmel_spi_dma_unmap_xfer(master, xfer);
-
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
-
- if (atmel_spi_xfer_is_last(msg, xfer) || as->done_status < 0) {
- /* report completed (or erroneous) message */
- atmel_spi_msg_done(master, as, msg, xfer->cs_change);
- } else {
- if (xfer->cs_change) {
- cs_deactivate(as, msg->spi);
- udelay(1);
- cs_activate(as, msg->spi);
- }
-
- /*
- * Not done yet. Submit the next transfer.
- *
- * FIXME handle protocol options for xfer
- */
- atmel_spi_dma_next_xfer(master, msg);
- }
- } else {
- /*
- * Keep going, we still have data to send in
- * the current transfer.
- */
- atmel_spi_dma_next_xfer(master, msg);
- }
-
-tasklet_out:
- atmel_spi_unlock(as);
}
/* Interrupt
*
* No need for locking in this Interrupt handler: done_status is the
- * only information modified. What we need is the update of this field
- * before tasklet runs. This is ensured by using barrier.
+ * only information modified.
*/
static irqreturn_t
atmel_spi_pio_interrupt(int irq, void *dev_id)
@@ -1107,8 +913,6 @@ atmel_spi_pio_interrupt(int irq, void *dev_id)
*
* We will also not process any remaning transfers in
* the message.
- *
- * All actions are done in tasklet with done_status indication
*/
as->done_status = -EIO;
smp_wmb();
@@ -1116,7 +920,7 @@ atmel_spi_pio_interrupt(int irq, void *dev_id)
/* Clear any overrun happening while cleaning up */
spi_readl(as, SR);
- tasklet_schedule(&as->tasklet);
+ complete(&as->xfer_completion);
} else if (pending & SPI_BIT(RDRF)) {
atmel_spi_lock(as);
@@ -1125,11 +929,10 @@ atmel_spi_pio_interrupt(int irq, void *dev_id)
ret = IRQ_HANDLED;
xfer = as->current_transfer;
atmel_spi_pump_pio_data(as, xfer);
- if (!as->current_remaining_bytes) {
- /* no more data to xfer, kick tasklet */
+ if (!as->current_remaining_bytes)
spi_writel(as, IDR, pending);
- tasklet_schedule(&as->tasklet);
- }
+
+ complete(&as->xfer_completion);
}
atmel_spi_unlock(as);
@@ -1147,116 +950,35 @@ atmel_spi_pdc_interrupt(int irq, void *dev_id)
{
struct spi_master *master = dev_id;
struct atmel_spi *as = spi_master_get_devdata(master);
- struct spi_message *msg;
- struct spi_transfer *xfer;
u32 status, pending, imr;
int ret = IRQ_NONE;
- atmel_spi_lock(as);
-
- xfer = as->current_transfer;
- msg = list_entry(as->queue.next, struct spi_message, queue);
-
imr = spi_readl(as, IMR);
status = spi_readl(as, SR);
pending = status & imr;
if (pending & SPI_BIT(OVRES)) {
- int timeout;
ret = IRQ_HANDLED;
spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX)
| SPI_BIT(OVRES)));
- /*
- * When we get an overrun, we disregard the current
- * transfer. Data will not be copied back from any
- * bounce buffer and msg->actual_len will not be
- * updated with the last xfer.
- *
- * We will also not process any remaning transfers in
- * the message.
- *
- * First, stop the transfer and unmap the DMA buffers.
- */
- spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
- if (!msg->is_dma_mapped)
- atmel_spi_dma_unmap_xfer(master, xfer);
-
- /* REVISIT: udelay in irq is unfriendly */
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
-
- dev_warn(master->dev.parent, "overrun (%u/%u remaining)\n",
- spi_readl(as, TCR), spi_readl(as, RCR));
-
- /*
- * Clean up DMA registers and make sure the data
- * registers are empty.
- */
- spi_writel(as, RNCR, 0);
- spi_writel(as, TNCR, 0);
- spi_writel(as, RCR, 0);
- spi_writel(as, TCR, 0);
- for (timeout = 1000; timeout; timeout--)
- if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
- break;
- if (!timeout)
- dev_warn(master->dev.parent,
- "timeout waiting for TXEMPTY");
- while (spi_readl(as, SR) & SPI_BIT(RDRF))
- spi_readl(as, RDR);
-
/* Clear any overrun happening while cleaning up */
spi_readl(as, SR);
as->done_status = -EIO;
- atmel_spi_msg_done(master, as, msg, 0);
+
+ complete(&as->xfer_completion);
+
} else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) {
ret = IRQ_HANDLED;
spi_writel(as, IDR, pending);
- if (as->current_remaining_bytes == 0) {
- msg->actual_length += xfer->len;
-
- if (!msg->is_dma_mapped)
- atmel_spi_dma_unmap_xfer(master, xfer);
-
- /* REVISIT: udelay in irq is unfriendly */
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
-
- if (atmel_spi_xfer_is_last(msg, xfer)) {
- /* report completed message */
- atmel_spi_msg_done(master, as, msg,
- xfer->cs_change);
- } else {
- if (xfer->cs_change) {
- cs_deactivate(as, msg->spi);
- udelay(1);
- cs_activate(as, msg->spi);
- }
-
- /*
- * Not done yet. Submit the next transfer.
- *
- * FIXME handle protocol options for xfer
- */
- atmel_spi_pdc_next_xfer(master, msg);
- }
- } else {
- /*
- * Keep going, we still have data to send in
- * the current transfer.
- */
- atmel_spi_pdc_next_xfer(master, msg);
- }
+ complete(&as->xfer_completion);
}
- atmel_spi_unlock(as);
-
return ret;
}
@@ -1264,17 +986,13 @@ static int atmel_spi_setup(struct spi_device *spi)
{
struct atmel_spi *as;
struct atmel_spi_device *asd;
- u32 scbr, csr;
+ u32 csr;
unsigned int bits = spi->bits_per_word;
- unsigned long bus_hz;
unsigned int npcs_pin;
int ret;
as = spi_master_get_devdata(spi->master);
- if (as->stopping)
- return -ESHUTDOWN;
-
if (spi->chip_select > spi->master->num_chipselect) {
dev_dbg(&spi->dev,
"setup: invalid chipselect %u (%u defined)\n",
@@ -1290,33 +1008,7 @@ static int atmel_spi_setup(struct spi_device *spi)
return -EINVAL;
}
- /* v1 chips start out at half the peripheral bus speed. */
- bus_hz = clk_get_rate(as->clk);
- if (!atmel_spi_is_v2(as))
- bus_hz /= 2;
-
- if (spi->max_speed_hz) {
- /*
- * Calculate the lowest divider that satisfies the
- * constraint, assuming div32/fdiv/mbz == 0.
- */
- scbr = DIV_ROUND_UP(bus_hz, spi->max_speed_hz);
-
- /*
- * If the resulting divider doesn't fit into the
- * register bitfield, we can't satisfy the constraint.
- */
- if (scbr >= (1 << SPI_SCBR_SIZE)) {
- dev_dbg(&spi->dev,
- "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
- spi->max_speed_hz, scbr, bus_hz/255);
- return -EINVAL;
- }
- } else
- /* speed zero means "as slow as possible" */
- scbr = 0xff;
-
- csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8);
+ csr = SPI_BF(BITS, bits - 8);
if (spi->mode & SPI_CPOL)
csr |= SPI_BIT(CPOL);
if (!(spi->mode & SPI_CPHA))
@@ -1352,19 +1044,13 @@ static int atmel_spi_setup(struct spi_device *spi)
asd->npcs_pin = npcs_pin;
spi->controller_state = asd;
gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH));
- } else {
- atmel_spi_lock(as);
- if (as->stay == spi)
- as->stay = NULL;
- cs_deactivate(as, spi);
- atmel_spi_unlock(as);
}
asd->csr = csr;
dev_dbg(&spi->dev,
- "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n",
- bus_hz / scbr, bits, spi->mode, spi->chip_select, csr);
+ "setup: bpw %u mode 0x%x -> csr%d %08x\n",
+ bits, spi->mode, spi->chip_select, csr);
if (!atmel_spi_is_v2(as))
spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
@@ -1372,103 +1058,218 @@ static int atmel_spi_setup(struct spi_device *spi)
return 0;
}
-static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
+static int atmel_spi_one_transfer(struct spi_master *master,
+ struct spi_message *msg,
+ struct spi_transfer *xfer)
{
struct atmel_spi *as;
- struct spi_transfer *xfer;
- struct device *controller = spi->master->dev.parent;
+ struct spi_device *spi = msg->spi;
u8 bits;
+ u32 len;
struct atmel_spi_device *asd;
+ int timeout;
+ int ret;
- as = spi_master_get_devdata(spi->master);
-
- dev_dbg(controller, "new message %p submitted for %s\n",
- msg, dev_name(&spi->dev));
+ as = spi_master_get_devdata(master);
- if (unlikely(list_empty(&msg->transfers)))
+ if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) {
+ dev_dbg(&spi->dev, "missing rx or tx buf\n");
return -EINVAL;
+ }
- if (as->stopping)
- return -ESHUTDOWN;
+ if (xfer->bits_per_word) {
+ asd = spi->controller_state;
+ bits = (asd->csr >> 4) & 0xf;
+ if (bits != xfer->bits_per_word - 8) {
+ dev_dbg(&spi->dev,
+ "you can't yet change bits_per_word in transfers\n");
+ return -ENOPROTOOPT;
+ }
+ }
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) {
- dev_dbg(&spi->dev, "missing rx or tx buf\n");
+ if (xfer->bits_per_word > 8) {
+ if (xfer->len % 2) {
+ dev_dbg(&spi->dev,
+ "buffer len should be 16 bits aligned\n");
return -EINVAL;
}
+ }
- if (xfer->bits_per_word) {
- asd = spi->controller_state;
- bits = (asd->csr >> 4) & 0xf;
- if (bits != xfer->bits_per_word - 8) {
- dev_dbg(&spi->dev,
- "you can't yet change bits_per_word in transfers\n");
- return -ENOPROTOOPT;
+ /*
+ * DMA map early, for performance (empties dcache ASAP) and
+ * better fault reporting.
+ */
+ if ((!msg->is_dma_mapped)
+ && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) {
+ if (atmel_spi_dma_map_xfer(as, xfer) < 0)
+ return -ENOMEM;
+ }
+
+ atmel_spi_set_xfer_speed(as, msg->spi, xfer);
+
+ as->done_status = 0;
+ as->current_transfer = xfer;
+ as->current_remaining_bytes = xfer->len;
+ while (as->current_remaining_bytes) {
+ reinit_completion(&as->xfer_completion);
+
+ if (as->use_pdc) {
+ atmel_spi_pdc_next_xfer(master, msg, xfer);
+ } else if (atmel_spi_use_dma(as, xfer)) {
+ len = as->current_remaining_bytes;
+ ret = atmel_spi_next_xfer_dma_submit(master,
+ xfer, &len);
+ if (ret) {
+ dev_err(&spi->dev,
+ "unable to use DMA, fallback to PIO\n");
+ atmel_spi_next_xfer_pio(master, xfer);
+ } else {
+ as->current_remaining_bytes -= len;
}
+ } else {
+ atmel_spi_next_xfer_pio(master, xfer);
}
- if (xfer->bits_per_word > 8) {
- if (xfer->len % 2) {
- dev_dbg(&spi->dev, "buffer len should be 16 bits aligned\n");
- return -EINVAL;
- }
+ ret = wait_for_completion_timeout(&as->xfer_completion,
+ SPI_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(&spi->dev,
+ "spi trasfer timeout, err %d\n", ret);
+ as->done_status = -EIO;
+ } else {
+ ret = 0;
}
- /* FIXME implement these protocol options!! */
- if (xfer->speed_hz < spi->max_speed_hz) {
- dev_dbg(&spi->dev, "can't change speed in transfer\n");
- return -ENOPROTOOPT;
+ if (as->done_status)
+ break;
+ }
+
+ if (as->done_status) {
+ if (as->use_pdc) {
+ dev_warn(master->dev.parent,
+ "overrun (%u/%u remaining)\n",
+ spi_readl(as, TCR), spi_readl(as, RCR));
+
+ /*
+ * Clean up DMA registers and make sure the data
+ * registers are empty.
+ */
+ spi_writel(as, RNCR, 0);
+ spi_writel(as, TNCR, 0);
+ spi_writel(as, RCR, 0);
+ spi_writel(as, TCR, 0);
+ for (timeout = 1000; timeout; timeout--)
+ if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
+ break;
+ if (!timeout)
+ dev_warn(master->dev.parent,
+ "timeout waiting for TXEMPTY");
+ while (spi_readl(as, SR) & SPI_BIT(RDRF))
+ spi_readl(as, RDR);
+
+ /* Clear any overrun happening while cleaning up */
+ spi_readl(as, SR);
+
+ } else if (atmel_spi_use_dma(as, xfer)) {
+ atmel_spi_stop_dma(as);
}
- /*
- * DMA map early, for performance (empties dcache ASAP) and
- * better fault reporting.
- */
- if ((!msg->is_dma_mapped) && (atmel_spi_use_dma(as, xfer)
- || as->use_pdc)) {
- if (atmel_spi_dma_map_xfer(as, xfer) < 0)
- return -ENOMEM;
+ if (!msg->is_dma_mapped
+ && (atmel_spi_use_dma(as, xfer) || as->use_pdc))
+ atmel_spi_dma_unmap_xfer(master, xfer);
+
+ return 0;
+
+ } else {
+ /* only update length if no error */
+ msg->actual_length += xfer->len;
+ }
+
+ if (!msg->is_dma_mapped
+ && (atmel_spi_use_dma(as, xfer) || as->use_pdc))
+ atmel_spi_dma_unmap_xfer(master, xfer);
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+
+ if (xfer->cs_change) {
+ if (list_is_last(&xfer->transfer_list,
+ &msg->transfers)) {
+ as->keep_cs = true;
+ } else {
+ as->cs_active = !as->cs_active;
+ if (as->cs_active)
+ cs_activate(as, msg->spi);
+ else
+ cs_deactivate(as, msg->spi);
}
}
-#ifdef VERBOSE
+ return 0;
+}
+
+static int atmel_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct atmel_spi *as;
+ struct spi_transfer *xfer;
+ struct spi_device *spi = msg->spi;
+ int ret = 0;
+
+ as = spi_master_get_devdata(master);
+
+ dev_dbg(&spi->dev, "new message %p submitted for %s\n",
+ msg, dev_name(&spi->dev));
+
+ if (unlikely(list_empty(&msg->transfers)))
+ return -EINVAL;
+
+ atmel_spi_lock(as);
+ cs_activate(as, spi);
+
+ as->cs_active = true;
+ as->keep_cs = false;
+
+ msg->status = 0;
+ msg->actual_length = 0;
+
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- dev_dbg(controller,
+ ret = atmel_spi_one_transfer(master, msg, xfer);
+ if (ret)
+ goto msg_done;
+ }
+
+ if (as->use_pdc)
+ atmel_spi_disable_pdc_transfer(as);
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ dev_dbg(&spi->dev,
" xfer %p: len %u tx %p/%08x rx %p/%08x\n",
xfer, xfer->len,
xfer->tx_buf, xfer->tx_dma,
xfer->rx_buf, xfer->rx_dma);
}
-#endif
- msg->status = -EINPROGRESS;
- msg->actual_length = 0;
+msg_done:
+ if (!as->keep_cs)
+ cs_deactivate(as, msg->spi);
- atmel_spi_lock(as);
- list_add_tail(&msg->queue, &as->queue);
- if (!as->current_transfer)
- atmel_spi_next_message(spi->master);
atmel_spi_unlock(as);
- return 0;
+ msg->status = as->done_status;
+ spi_finalize_current_message(spi->master);
+
+ return ret;
}
static void atmel_spi_cleanup(struct spi_device *spi)
{
- struct atmel_spi *as = spi_master_get_devdata(spi->master);
struct atmel_spi_device *asd = spi->controller_state;
unsigned gpio = (unsigned) spi->controller_data;
if (!asd)
return;
- atmel_spi_lock(as);
- if (as->stay == spi) {
- as->stay = NULL;
- cs_deactivate(as, spi);
- }
- atmel_spi_unlock(as);
-
spi->controller_state = NULL;
gpio_free(gpio);
kfree(asd);
@@ -1510,7 +1311,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- clk = clk_get(&pdev->dev, "spi_clk");
+ clk = devm_clk_get(&pdev->dev, "spi_clk");
if (IS_ERR(clk))
return PTR_ERR(clk);
@@ -1527,7 +1328,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
master->bus_num = pdev->id;
master->num_chipselect = master->dev.of_node ? 0 : 4;
master->setup = atmel_spi_setup;
- master->transfer = atmel_spi_transfer;
+ master->transfer_one_message = atmel_spi_transfer_one_message;
master->cleanup = atmel_spi_cleanup;
platform_set_drvdata(pdev, master);
@@ -1543,7 +1344,6 @@ static int atmel_spi_probe(struct platform_device *pdev)
goto out_free;
spin_lock_init(&as->lock);
- INIT_LIST_HEAD(&as->queue);
as->pdev = pdev;
as->regs = devm_ioremap_resource(&pdev->dev, regs);
@@ -1555,6 +1355,8 @@ static int atmel_spi_probe(struct platform_device *pdev)
as->irq = irq;
as->clk = clk;
+ init_completion(&as->xfer_completion);
+
atmel_get_caps(as);
as->use_dma = false;
@@ -1570,14 +1372,11 @@ static int atmel_spi_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
if (as->use_pdc) {
- ret = request_irq(irq, atmel_spi_pdc_interrupt, 0,
- dev_name(&pdev->dev), master);
+ ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pdc_interrupt,
+ 0, dev_name(&pdev->dev), master);
} else {
- tasklet_init(&as->tasklet, atmel_spi_tasklet_func,
- (unsigned long)master);
-
- ret = request_irq(irq, atmel_spi_pio_interrupt, 0,
- dev_name(&pdev->dev), master);
+ ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pio_interrupt,
+ 0, dev_name(&pdev->dev), master);
}
if (ret)
goto out_unmap_regs;
@@ -1603,7 +1402,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
(unsigned long)regs->start, irq);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
goto out_free_dma;
@@ -1617,15 +1416,11 @@ out_free_dma:
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
clk_disable_unprepare(clk);
out_free_irq:
- free_irq(irq, master);
out_unmap_regs:
out_free_buffer:
- if (!as->use_pdc)
- tasklet_kill(&as->tasklet);
dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
as->buffer_dma);
out_free:
- clk_put(clk);
spi_master_put(master);
return ret;
}
@@ -1634,12 +1429,9 @@ static int atmel_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct atmel_spi *as = spi_master_get_devdata(master);
- struct spi_message *msg;
- struct spi_transfer *xfer;
/* reset the hardware and block queue progress */
spin_lock_irq(&as->lock);
- as->stopping = 1;
if (as->use_dma) {
atmel_spi_stop_dma(as);
atmel_spi_release_dma(as);
@@ -1650,28 +1442,10 @@ static int atmel_spi_remove(struct platform_device *pdev)
spi_readl(as, SR);
spin_unlock_irq(&as->lock);
- /* Terminate remaining queued transfers */
- list_for_each_entry(msg, &as->queue, queue) {
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (!msg->is_dma_mapped
- && (atmel_spi_use_dma(as, xfer)
- || as->use_pdc))
- atmel_spi_dma_unmap_xfer(master, xfer);
- }
- msg->status = -ESHUTDOWN;
- msg->complete(msg->context);
- }
-
- if (!as->use_pdc)
- tasklet_kill(&as->tasklet);
dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
as->buffer_dma);
clk_disable_unprepare(as->clk);
- clk_put(as->clk);
- free_irq(as->irq, master);
-
- spi_unregister_master(master);
return 0;
}
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 9025edd7dc45..8a89dd1f2654 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -347,8 +347,8 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
clk_prepare_enable(bs->clk);
- err = request_irq(bs->irq, bcm2835_spi_interrupt, 0,
- dev_name(&pdev->dev), master);
+ err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0,
+ dev_name(&pdev->dev), master);
if (err) {
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
goto out_clk_disable;
@@ -361,13 +361,11 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
err = devm_spi_register_master(&pdev->dev, master);
if (err) {
dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
- goto out_free_irq;
+ goto out_clk_disable;
}
return 0;
-out_free_irq:
- free_irq(bs->irq, master);
out_clk_disable:
clk_disable_unprepare(bs->clk);
out_master_put:
@@ -380,8 +378,6 @@ static int bcm2835_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct bcm2835_spi *bs = spi_master_get_devdata(master);
- free_irq(bs->irq, master);
-
/* Clear FIFOs, and disable the HW block */
bcm2835_wr(bs, BCM2835_SPI_CS,
BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
new file mode 100644
index 000000000000..b528f9fc8bc0
--- /dev/null
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -0,0 +1,475 @@
+/*
+ * Broadcom BCM63XX High Speed SPI Controller driver
+ *
+ * Copyright 2000-2010 Broadcom Corporation
+ * Copyright 2012-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+
+#define HSSPI_GLOBAL_CTRL_REG 0x0
+#define GLOBAL_CTRL_CS_POLARITY_SHIFT 0
+#define GLOBAL_CTRL_CS_POLARITY_MASK 0x000000ff
+#define GLOBAL_CTRL_PLL_CLK_CTRL_SHIFT 8
+#define GLOBAL_CTRL_PLL_CLK_CTRL_MASK 0x0000ff00
+#define GLOBAL_CTRL_CLK_GATE_SSOFF BIT(16)
+#define GLOBAL_CTRL_CLK_POLARITY BIT(17)
+#define GLOBAL_CTRL_MOSI_IDLE BIT(18)
+
+#define HSSPI_GLOBAL_EXT_TRIGGER_REG 0x4
+
+#define HSSPI_INT_STATUS_REG 0x8
+#define HSSPI_INT_STATUS_MASKED_REG 0xc
+#define HSSPI_INT_MASK_REG 0x10
+
+#define HSSPI_PINGx_CMD_DONE(i) BIT((i * 8) + 0)
+#define HSSPI_PINGx_RX_OVER(i) BIT((i * 8) + 1)
+#define HSSPI_PINGx_TX_UNDER(i) BIT((i * 8) + 2)
+#define HSSPI_PINGx_POLL_TIMEOUT(i) BIT((i * 8) + 3)
+#define HSSPI_PINGx_CTRL_INVAL(i) BIT((i * 8) + 4)
+
+#define HSSPI_INT_CLEAR_ALL 0xff001f1f
+
+#define HSSPI_PINGPONG_COMMAND_REG(x) (0x80 + (x) * 0x40)
+#define PINGPONG_CMD_COMMAND_MASK 0xf
+#define PINGPONG_COMMAND_NOOP 0
+#define PINGPONG_COMMAND_START_NOW 1
+#define PINGPONG_COMMAND_START_TRIGGER 2
+#define PINGPONG_COMMAND_HALT 3
+#define PINGPONG_COMMAND_FLUSH 4
+#define PINGPONG_CMD_PROFILE_SHIFT 8
+#define PINGPONG_CMD_SS_SHIFT 12
+
+#define HSSPI_PINGPONG_STATUS_REG(x) (0x84 + (x) * 0x40)
+
+#define HSSPI_PROFILE_CLK_CTRL_REG(x) (0x100 + (x) * 0x20)
+#define CLK_CTRL_FREQ_CTRL_MASK 0x0000ffff
+#define CLK_CTRL_SPI_CLK_2X_SEL BIT(14)
+#define CLK_CTRL_ACCUM_RST_ON_LOOP BIT(15)
+
+#define HSSPI_PROFILE_SIGNAL_CTRL_REG(x) (0x104 + (x) * 0x20)
+#define SIGNAL_CTRL_LATCH_RISING BIT(12)
+#define SIGNAL_CTRL_LAUNCH_RISING BIT(13)
+#define SIGNAL_CTRL_ASYNC_INPUT_PATH BIT(16)
+
+#define HSSPI_PROFILE_MODE_CTRL_REG(x) (0x108 + (x) * 0x20)
+#define MODE_CTRL_MULTIDATA_RD_STRT_SHIFT 8
+#define MODE_CTRL_MULTIDATA_WR_STRT_SHIFT 12
+#define MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT 16
+#define MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT 18
+#define MODE_CTRL_MODE_3WIRE BIT(20)
+#define MODE_CTRL_PREPENDBYTE_CNT_SHIFT 24
+
+#define HSSPI_FIFO_REG(x) (0x200 + (x) * 0x200)
+
+
+#define HSSPI_OP_CODE_SHIFT 13
+#define HSSPI_OP_SLEEP (0 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_READ_WRITE (1 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_WRITE (2 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_READ (3 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_SETIRQ (4 << HSSPI_OP_CODE_SHIFT)
+
+#define HSSPI_BUFFER_LEN 512
+#define HSSPI_OPCODE_LEN 2
+
+#define HSSPI_MAX_PREPEND_LEN 15
+
+#define HSSPI_MAX_SYNC_CLOCK 30000000
+
+#define HSSPI_BUS_NUM 1 /* 0 is legacy SPI */
+
+struct bcm63xx_hsspi {
+ struct completion done;
+ struct mutex bus_mutex;
+
+ struct platform_device *pdev;
+ struct clk *clk;
+ void __iomem *regs;
+ u8 __iomem *fifo;
+
+ u32 speed_hz;
+ u8 cs_polarity;
+};
+
+static void bcm63xx_hsspi_set_cs(struct bcm63xx_hsspi *bs, unsigned cs,
+ bool active)
+{
+ u32 reg;
+
+ mutex_lock(&bs->bus_mutex);
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ reg &= ~BIT(cs);
+ if (active == !(bs->cs_polarity & BIT(cs)))
+ reg |= BIT(cs);
+
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ mutex_unlock(&bs->bus_mutex);
+}
+
+static void bcm63xx_hsspi_set_clk(struct bcm63xx_hsspi *bs,
+ struct spi_device *spi, int hz)
+{
+ unsigned profile = spi->chip_select;
+ u32 reg;
+
+ reg = DIV_ROUND_UP(2048, DIV_ROUND_UP(bs->speed_hz, hz));
+ __raw_writel(CLK_CTRL_ACCUM_RST_ON_LOOP | reg,
+ bs->regs + HSSPI_PROFILE_CLK_CTRL_REG(profile));
+
+ reg = __raw_readl(bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
+ if (hz > HSSPI_MAX_SYNC_CLOCK)
+ reg |= SIGNAL_CTRL_ASYNC_INPUT_PATH;
+ else
+ reg &= ~SIGNAL_CTRL_ASYNC_INPUT_PATH;
+ __raw_writel(reg, bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
+
+ mutex_lock(&bs->bus_mutex);
+ /* setup clock polarity */
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ reg &= ~GLOBAL_CTRL_CLK_POLARITY;
+ if (spi->mode & SPI_CPOL)
+ reg |= GLOBAL_CTRL_CLK_POLARITY;
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ mutex_unlock(&bs->bus_mutex);
+}
+
+static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
+ unsigned chip_select = spi->chip_select;
+ u16 opcode = 0;
+ int pending = t->len;
+ int step_size = HSSPI_BUFFER_LEN;
+ const u8 *tx = t->tx_buf;
+ u8 *rx = t->rx_buf;
+
+ bcm63xx_hsspi_set_clk(bs, spi, t->speed_hz);
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, true);
+
+ if (tx && rx)
+ opcode = HSSPI_OP_READ_WRITE;
+ else if (tx)
+ opcode = HSSPI_OP_WRITE;
+ else if (rx)
+ opcode = HSSPI_OP_READ;
+
+ if (opcode != HSSPI_OP_READ)
+ step_size -= HSSPI_OPCODE_LEN;
+
+ __raw_writel(0 << MODE_CTRL_PREPENDBYTE_CNT_SHIFT |
+ 2 << MODE_CTRL_MULTIDATA_WR_STRT_SHIFT |
+ 2 << MODE_CTRL_MULTIDATA_RD_STRT_SHIFT | 0xff,
+ bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
+
+ while (pending > 0) {
+ int curr_step = min_t(int, step_size, pending);
+
+ init_completion(&bs->done);
+ if (tx) {
+ memcpy_toio(bs->fifo + HSSPI_OPCODE_LEN, tx, curr_step);
+ tx += curr_step;
+ }
+
+ __raw_writew(opcode | curr_step, bs->fifo);
+
+ /* enable interrupt */
+ __raw_writel(HSSPI_PINGx_CMD_DONE(0),
+ bs->regs + HSSPI_INT_MASK_REG);
+
+ /* start the transfer */
+ __raw_writel(!chip_select << PINGPONG_CMD_SS_SHIFT |
+ chip_select << PINGPONG_CMD_PROFILE_SHIFT |
+ PINGPONG_COMMAND_START_NOW,
+ bs->regs + HSSPI_PINGPONG_COMMAND_REG(0));
+
+ if (wait_for_completion_timeout(&bs->done, HZ) == 0) {
+ dev_err(&bs->pdev->dev, "transfer timed out!\n");
+ return -ETIMEDOUT;
+ }
+
+ if (rx) {
+ memcpy_fromio(rx, bs->fifo, curr_step);
+ rx += curr_step;
+ }
+
+ pending -= curr_step;
+ }
+
+ return 0;
+}
+
+static int bcm63xx_hsspi_setup(struct spi_device *spi)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ reg = __raw_readl(bs->regs +
+ HSSPI_PROFILE_SIGNAL_CTRL_REG(spi->chip_select));
+ reg &= ~(SIGNAL_CTRL_LAUNCH_RISING | SIGNAL_CTRL_LATCH_RISING);
+ if (spi->mode & SPI_CPHA)
+ reg |= SIGNAL_CTRL_LAUNCH_RISING;
+ else
+ reg |= SIGNAL_CTRL_LATCH_RISING;
+ __raw_writel(reg, bs->regs +
+ HSSPI_PROFILE_SIGNAL_CTRL_REG(spi->chip_select));
+
+ mutex_lock(&bs->bus_mutex);
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ /* only change actual polarities if there is no transfer */
+ if ((reg & GLOBAL_CTRL_CS_POLARITY_MASK) == bs->cs_polarity) {
+ if (spi->mode & SPI_CS_HIGH)
+ reg |= BIT(spi->chip_select);
+ else
+ reg &= ~BIT(spi->chip_select);
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ }
+
+ if (spi->mode & SPI_CS_HIGH)
+ bs->cs_polarity |= BIT(spi->chip_select);
+ else
+ bs->cs_polarity &= ~BIT(spi->chip_select);
+
+ mutex_unlock(&bs->bus_mutex);
+
+ return 0;
+}
+
+static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ struct spi_transfer *t;
+ struct spi_device *spi = msg->spi;
+ int status = -EINVAL;
+ int dummy_cs;
+ u32 reg;
+
+ /* This controller does not support keeping CS active during idle.
+ * To work around this, we use the following ugly hack:
+ *
+ * a. Invert the target chip select's polarity so it will be active.
+ * b. Select a "dummy" chip select to use as the hardware target.
+ * c. Invert the dummy chip select's polarity so it will be inactive
+ * during the actual transfers.
+ * d. Tell the hardware to send to the dummy chip select. Thanks to
+ * the multiplexed nature of SPI the actual target will receive
+ * the transfer and we see its response.
+ *
+ * e. At the end restore the polarities again to their default values.
+ */
+
+ dummy_cs = !spi->chip_select;
+ bcm63xx_hsspi_set_cs(bs, dummy_cs, true);
+
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+ status = bcm63xx_hsspi_do_txrx(spi, t);
+ if (status)
+ break;
+
+ msg->actual_length += t->len;
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (t->cs_change)
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, false);
+ }
+
+ mutex_lock(&bs->bus_mutex);
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ reg &= ~GLOBAL_CTRL_CS_POLARITY_MASK;
+ reg |= bs->cs_polarity;
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ mutex_unlock(&bs->bus_mutex);
+
+ msg->status = status;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static irqreturn_t bcm63xx_hsspi_interrupt(int irq, void *dev_id)
+{
+ struct bcm63xx_hsspi *bs = (struct bcm63xx_hsspi *)dev_id;
+
+ if (__raw_readl(bs->regs + HSSPI_INT_STATUS_MASKED_REG) == 0)
+ return IRQ_NONE;
+
+ __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+
+ complete(&bs->done);
+
+ return IRQ_HANDLED;
+}
+
+static int bcm63xx_hsspi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct bcm63xx_hsspi *bs;
+ struct resource *res_mem;
+ void __iomem *regs;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ int irq, ret;
+ u32 reg, rate;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no irq\n");
+ return -ENXIO;
+ }
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res_mem);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ clk = devm_clk_get(dev, "hsspi");
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rate = clk_get_rate(clk);
+ if (!rate)
+ return -EINVAL;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*bs));
+ if (!master) {
+ ret = -ENOMEM;
+ goto out_disable_clk;
+ }
+
+ bs = spi_master_get_devdata(master);
+ bs->pdev = pdev;
+ bs->clk = clk;
+ bs->regs = regs;
+ bs->speed_hz = rate;
+ bs->fifo = (u8 __iomem *)(bs->regs + HSSPI_FIFO_REG(0));
+
+ mutex_init(&bs->bus_mutex);
+
+ master->bus_num = HSSPI_BUS_NUM;
+ master->num_chipselect = 8;
+ master->setup = bcm63xx_hsspi_setup;
+ master->transfer_one_message = bcm63xx_hsspi_transfer_one;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->auto_runtime_pm = true;
+
+ platform_set_drvdata(pdev, master);
+
+ /* Initialize the hardware */
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+
+ /* clean up any pending interrupts */
+ __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+
+ /* read out default CS polarities */
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ bs->cs_polarity = reg & GLOBAL_CTRL_CS_POLARITY_MASK;
+ __raw_writel(reg | GLOBAL_CTRL_CLK_GATE_SSOFF,
+ bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ ret = devm_request_irq(dev, irq, bcm63xx_hsspi_interrupt, IRQF_SHARED,
+ pdev->name, bs);
+
+ if (ret)
+ goto out_put_master;
+
+ /* register and we are done */
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ goto out_put_master;
+
+ return 0;
+
+out_put_master:
+ spi_master_put(master);
+out_disable_clk:
+ clk_disable_unprepare(clk);
+ return ret;
+}
+
+
+static int bcm63xx_hsspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+
+ /* reset the hardware and block queue progress */
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bcm63xx_hsspi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+static int bcm63xx_hsspi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(bs->clk);
+ if (ret)
+ return ret;
+
+ spi_master_resume(master);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops bcm63xx_hsspi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(bcm63xx_hsspi_suspend, bcm63xx_hsspi_resume)
+};
+
+static struct platform_driver bcm63xx_hsspi_driver = {
+ .driver = {
+ .name = "bcm63xx-hsspi",
+ .owner = THIS_MODULE,
+ .pm = &bcm63xx_hsspi_pm_ops,
+ },
+ .probe = bcm63xx_hsspi_probe,
+ .remove = bcm63xx_hsspi_remove,
+};
+
+module_platform_driver(bcm63xx_hsspi_driver);
+
+MODULE_ALIAS("platform:bcm63xx_hsspi");
+MODULE_DESCRIPTION("Broadcom BCM63xx High Speed SPI Controller driver");
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 469ecd876358..77286aef2adf 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -169,8 +169,6 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
transfer_list);
}
- len -= prepend_len;
-
init_completion(&bs->done);
/* Fill in the Message control register */
@@ -205,13 +203,7 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
if (!timeout)
return -ETIMEDOUT;
- /* read out all data */
- rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
-
- if (do_rx && rx_tail != len)
- return -EIO;
-
- if (!rx_tail)
+ if (!do_rx)
return 0;
len = 0;
@@ -345,22 +337,19 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "no irq\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
- clk = clk_get(dev, "spi");
+ clk = devm_clk_get(dev, "spi");
if (IS_ERR(clk)) {
dev_err(dev, "no clock for device\n");
- ret = PTR_ERR(clk);
- goto out;
+ return PTR_ERR(clk);
}
master = spi_alloc_master(dev, sizeof(*bs));
if (!master) {
dev_err(dev, "out of memory\n");
- ret = -ENOMEM;
- goto out_clk;
+ return -ENOMEM;
}
bs = spi_master_get_devdata(master);
@@ -408,7 +397,10 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
}
/* Initialize hardware */
- clk_prepare_enable(bs->clk);
+ ret = clk_prepare_enable(bs->clk);
+ if (ret)
+ goto out_err;
+
bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
/* register and we are done */
@@ -427,9 +419,6 @@ out_clk_disable:
clk_disable_unprepare(clk);
out_err:
spi_master_put(master);
-out_clk:
- clk_put(clk);
-out:
return ret;
}
@@ -443,12 +432,11 @@ static int bcm63xx_spi_remove(struct platform_device *pdev)
/* HW shutdown */
clk_disable_unprepare(bs->clk);
- clk_put(bs->clk);
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int bcm63xx_spi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
@@ -465,29 +453,27 @@ static int bcm63xx_spi_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ int ret;
- clk_prepare_enable(bs->clk);
+ ret = clk_prepare_enable(bs->clk);
+ if (ret)
+ return ret;
spi_master_resume(master);
return 0;
}
+#endif
static const struct dev_pm_ops bcm63xx_spi_pm_ops = {
- .suspend = bcm63xx_spi_suspend,
- .resume = bcm63xx_spi_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(bcm63xx_spi_suspend, bcm63xx_spi_resume)
};
-#define BCM63XX_SPI_PM_OPS (&bcm63xx_spi_pm_ops)
-#else
-#define BCM63XX_SPI_PM_OPS NULL
-#endif
-
static struct platform_driver bcm63xx_spi_driver = {
.driver = {
.name = "bcm63xx-spi",
.owner = THIS_MODULE,
- .pm = BCM63XX_SPI_PM_OPS,
+ .pm = &bcm63xx_spi_pm_ops,
},
.probe = bcm63xx_spi_probe,
.remove = bcm63xx_spi_remove,
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
index c16bf853c3eb..c616e41521be 100644
--- a/drivers/spi/spi-bitbang-txrx.h
+++ b/drivers/spi/spi-bitbang-txrx.h
@@ -38,7 +38,7 @@
*
* Since this is software, the timings may not be exactly what your board's
* chips need ... there may be several reasons you'd need to tweak timings
- * in these routines, not just make to make it faster or slower to match a
+ * in these routines, not just to make it faster or slower to match a
* particular CPU clock rate.
*/
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 6f03d7e6435d..374ba4a48a9e 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -1,7 +1,7 @@
/*
* CLPS711X SPI bus driver
*
- * Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
+ * Copyright (C) 2012-2014 Alexander Shiyan <shc_work@mail.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -198,7 +198,7 @@ static int spi_clps711x_probe(struct platform_device *pdev)
ret = -EINVAL;
goto err_out;
}
- if (gpio_request(hw->chipselect[i], DRIVER_NAME)) {
+ if (devm_gpio_request(&pdev->dev, hw->chipselect[i], NULL)) {
dev_err(&pdev->dev, "Can't get CS GPIO %i\n", i);
ret = -EINVAL;
goto err_out;
@@ -240,38 +240,21 @@ static int spi_clps711x_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to register master\n");
err_out:
- while (--i >= 0)
- if (gpio_is_valid(hw->chipselect[i]))
- gpio_free(hw->chipselect[i]);
-
spi_master_put(master);
return ret;
}
-static int spi_clps711x_remove(struct platform_device *pdev)
-{
- int i;
- struct spi_master *master = platform_get_drvdata(pdev);
- struct spi_clps711x_data *hw = spi_master_get_devdata(master);
-
- for (i = 0; i < master->num_chipselect; i++)
- if (gpio_is_valid(hw->chipselect[i]))
- gpio_free(hw->chipselect[i]);
-
- return 0;
-}
-
static struct platform_driver clps711x_spi_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
.probe = spi_clps711x_probe,
- .remove = spi_clps711x_remove,
};
module_platform_driver(clps711x_spi_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
MODULE_DESCRIPTION("CLPS711X SPI bus driver");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index cc5b75d10c38..cabed8f9119e 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -397,44 +397,31 @@ static int mcfqspi_probe(struct platform_device *pdev)
mcfqspi = spi_master_get_devdata(master);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_dbg(&pdev->dev, "platform_get_resource failed\n");
- status = -ENXIO;
+ mcfqspi->iobase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mcfqspi->iobase)) {
+ status = PTR_ERR(mcfqspi->iobase);
goto fail0;
}
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_dbg(&pdev->dev, "request_mem_region failed\n");
- status = -EBUSY;
- goto fail0;
- }
-
- mcfqspi->iobase = ioremap(res->start, resource_size(res));
- if (!mcfqspi->iobase) {
- dev_dbg(&pdev->dev, "ioremap failed\n");
- status = -ENOMEM;
- goto fail1;
- }
-
mcfqspi->irq = platform_get_irq(pdev, 0);
if (mcfqspi->irq < 0) {
dev_dbg(&pdev->dev, "platform_get_irq failed\n");
status = -ENXIO;
- goto fail2;
+ goto fail0;
}
- status = request_irq(mcfqspi->irq, mcfqspi_irq_handler, 0,
- pdev->name, mcfqspi);
+ status = devm_request_irq(&pdev->dev, mcfqspi->irq, mcfqspi_irq_handler,
+ 0, pdev->name, mcfqspi);
if (status) {
dev_dbg(&pdev->dev, "request_irq failed\n");
- goto fail2;
+ goto fail0;
}
- mcfqspi->clk = clk_get(&pdev->dev, "qspi_clk");
+ mcfqspi->clk = devm_clk_get(&pdev->dev, "qspi_clk");
if (IS_ERR(mcfqspi->clk)) {
dev_dbg(&pdev->dev, "clk_get failed\n");
status = PTR_ERR(mcfqspi->clk);
- goto fail3;
+ goto fail0;
}
clk_enable(mcfqspi->clk);
@@ -445,7 +432,7 @@ static int mcfqspi_probe(struct platform_device *pdev)
status = mcfqspi_cs_setup(mcfqspi);
if (status) {
dev_dbg(&pdev->dev, "error initializing cs_control\n");
- goto fail4;
+ goto fail1;
}
init_waitqueue_head(&mcfqspi->waitq);
@@ -459,10 +446,10 @@ static int mcfqspi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
- status = spi_register_master(master);
+ status = devm_spi_register_master(&pdev->dev, master);
if (status) {
dev_dbg(&pdev->dev, "spi_register_master failed\n");
- goto fail5;
+ goto fail2;
}
pm_runtime_enable(mcfqspi->dev);
@@ -470,17 +457,10 @@ static int mcfqspi_probe(struct platform_device *pdev)
return 0;
-fail5:
- mcfqspi_cs_teardown(mcfqspi);
-fail4:
- clk_disable(mcfqspi->clk);
- clk_put(mcfqspi->clk);
-fail3:
- free_irq(mcfqspi->irq, mcfqspi);
fail2:
- iounmap(mcfqspi->iobase);
+ mcfqspi_cs_teardown(mcfqspi);
fail1:
- release_mem_region(res->start, resource_size(res));
+ clk_disable(mcfqspi->clk);
fail0:
spi_master_put(master);
@@ -501,11 +481,6 @@ static int mcfqspi_remove(struct platform_device *pdev)
mcfqspi_cs_teardown(mcfqspi);
clk_disable(mcfqspi->clk);
- clk_put(mcfqspi->clk);
- free_irq(mcfqspi->irq, mcfqspi);
- iounmap(mcfqspi->iobase);
- release_mem_region(res->start, resource_size(res));
- spi_unregister_master(master);
return 0;
}
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 50b2d88c8190..5e7389faa2a0 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -396,10 +396,6 @@ static int davinci_spi_setup(struct spi_device *spi)
dspi = spi_master_get_devdata(spi->master);
pdata = &dspi->pdata;
- /* if bits per word length is zero then set it default 8 */
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
if (!(spi->mode & SPI_NO_CS)) {
if ((pdata->chip_sel == NULL) ||
(pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS))
@@ -853,7 +849,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct davinci_spi *dspi;
struct davinci_spi_platform_data *pdata;
- struct resource *r, *mem;
+ struct resource *r;
resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
int i = 0, ret = 0;
@@ -894,39 +890,33 @@ static int davinci_spi_probe(struct platform_device *pdev)
dspi->pbase = r->start;
- mem = request_mem_region(r->start, resource_size(r), pdev->name);
- if (mem == NULL) {
- ret = -EBUSY;
+ dspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(dspi->base)) {
+ ret = PTR_ERR(dspi->base);
goto free_master;
}
- dspi->base = ioremap(r->start, resource_size(r));
- if (dspi->base == NULL) {
- ret = -ENOMEM;
- goto release_region;
- }
-
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq <= 0) {
ret = -EINVAL;
- goto unmap_io;
+ goto free_master;
}
- ret = request_threaded_irq(dspi->irq, davinci_spi_irq, dummy_thread_fn,
- 0, dev_name(&pdev->dev), dspi);
+ ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq,
+ dummy_thread_fn, 0, dev_name(&pdev->dev), dspi);
if (ret)
- goto unmap_io;
+ goto free_master;
dspi->bitbang.master = master;
if (dspi->bitbang.master == NULL) {
ret = -ENODEV;
- goto irq_free;
+ goto free_master;
}
- dspi->clk = clk_get(&pdev->dev, NULL);
+ dspi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dspi->clk)) {
ret = -ENODEV;
- goto irq_free;
+ goto free_master;
}
clk_prepare_enable(dspi->clk);
@@ -963,8 +953,8 @@ static int davinci_spi_probe(struct platform_device *pdev)
goto free_clk;
dev_info(&pdev->dev, "DMA: supported\n");
- dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, "
- "event queue: %d\n", dma_rx_chan, dma_tx_chan,
+ dev_info(&pdev->dev, "DMA: RX channel: %pa, TX channel: %pa, "
+ "event queue: %d\n", &dma_rx_chan, &dma_tx_chan,
pdata->dma_event_q);
}
@@ -1015,13 +1005,6 @@ free_dma:
dma_release_channel(dspi->dma_tx);
free_clk:
clk_disable_unprepare(dspi->clk);
- clk_put(dspi->clk);
-irq_free:
- free_irq(dspi->irq, dspi);
-unmap_io:
- iounmap(dspi->base);
-release_region:
- release_mem_region(dspi->pbase, resource_size(r));
free_master:
spi_master_put(master);
err:
@@ -1041,7 +1024,6 @@ static int davinci_spi_remove(struct platform_device *pdev)
{
struct davinci_spi *dspi;
struct spi_master *master;
- struct resource *r;
master = platform_get_drvdata(pdev);
dspi = spi_master_get_devdata(master);
@@ -1049,11 +1031,6 @@ static int davinci_spi_remove(struct platform_device *pdev)
spi_bitbang_stop(&dspi->bitbang);
clk_disable_unprepare(dspi->clk);
- clk_put(dspi->clk);
- free_irq(dspi->irq, dspi);
- iounmap(dspi->base);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(dspi->pbase, resource_size(r));
spi_master_put(master);
return 0;
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 168c620947f4..9af56cdf1540 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -30,14 +30,13 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
{
struct dw_spi_mmio *dwsmmio;
struct dw_spi *dws;
- struct resource *mem, *ioarea;
+ struct resource *mem;
int ret;
- dwsmmio = kzalloc(sizeof(struct dw_spi_mmio), GFP_KERNEL);
- if (!dwsmmio) {
- ret = -ENOMEM;
- goto err_end;
- }
+ dwsmmio = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_mmio),
+ GFP_KERNEL);
+ if (!dwsmmio)
+ return -ENOMEM;
dws = &dwsmmio->dws;
@@ -45,80 +44,51 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "no mem resource?\n");
- ret = -EINVAL;
- goto err_kfree;
+ return -EINVAL;
}
- ioarea = request_mem_region(mem->start, resource_size(mem),
- pdev->name);
- if (!ioarea) {
- dev_err(&pdev->dev, "SPI region already claimed\n");
- ret = -EBUSY;
- goto err_kfree;
- }
-
- dws->regs = ioremap_nocache(mem->start, resource_size(mem));
- if (!dws->regs) {
- dev_err(&pdev->dev, "SPI region already mapped\n");
- ret = -ENOMEM;
- goto err_release_reg;
+ dws->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dws->regs)) {
+ dev_err(&pdev->dev, "SPI region map failed\n");
+ return PTR_ERR(dws->regs);
}
dws->irq = platform_get_irq(pdev, 0);
if (dws->irq < 0) {
dev_err(&pdev->dev, "no irq resource?\n");
- ret = dws->irq; /* -ENXIO */
- goto err_unmap;
+ return dws->irq; /* -ENXIO */
}
- dwsmmio->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(dwsmmio->clk)) {
- ret = PTR_ERR(dwsmmio->clk);
- goto err_unmap;
- }
- clk_enable(dwsmmio->clk);
+ dwsmmio->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dwsmmio->clk))
+ return PTR_ERR(dwsmmio->clk);
+ ret = clk_prepare_enable(dwsmmio->clk);
+ if (ret)
+ return ret;
- dws->parent_dev = &pdev->dev;
dws->bus_num = 0;
dws->num_cs = 4;
dws->max_freq = clk_get_rate(dwsmmio->clk);
- ret = dw_spi_add_host(dws);
+ ret = dw_spi_add_host(&pdev->dev, dws);
if (ret)
- goto err_clk;
+ goto out;
platform_set_drvdata(pdev, dwsmmio);
return 0;
-err_clk:
- clk_disable(dwsmmio->clk);
- clk_put(dwsmmio->clk);
- dwsmmio->clk = NULL;
-err_unmap:
- iounmap(dws->regs);
-err_release_reg:
- release_mem_region(mem->start, resource_size(mem));
-err_kfree:
- kfree(dwsmmio);
-err_end:
+out:
+ clk_disable_unprepare(dwsmmio->clk);
return ret;
}
static int dw_spi_mmio_remove(struct platform_device *pdev)
{
struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
- struct resource *mem;
-
- clk_disable(dwsmmio->clk);
- clk_put(dwsmmio->clk);
- dwsmmio->clk = NULL;
+ clk_disable_unprepare(dwsmmio->clk);
dw_spi_remove_host(&dwsmmio->dws);
- iounmap(dwsmmio->dws.regs);
- kfree(dwsmmio);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(mem->start, resource_size(mem));
return 0;
}
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 66fa9955ea14..3f3dc1226edf 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -43,35 +43,25 @@ static int spi_pci_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "found PCI SPI controller(ID: %04x:%04x)\n",
pdev->vendor, pdev->device);
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret)
return ret;
- dwpci = kzalloc(sizeof(struct dw_spi_pci), GFP_KERNEL);
- if (!dwpci) {
- ret = -ENOMEM;
- goto err_disable;
- }
+ dwpci = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_pci),
+ GFP_KERNEL);
+ if (!dwpci)
+ return -ENOMEM;
dwpci->pdev = pdev;
dws = &dwpci->dws;
/* Get basic io resource and map it */
dws->paddr = pci_resource_start(pdev, pci_bar);
- dws->iolen = pci_resource_len(pdev, pci_bar);
- ret = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
+ ret = pcim_iomap_regions(pdev, 1, dev_name(&pdev->dev));
if (ret)
- goto err_kfree;
-
- dws->regs = ioremap_nocache((unsigned long)dws->paddr,
- pci_resource_len(pdev, pci_bar));
- if (!dws->regs) {
- ret = -ENOMEM;
- goto err_release_reg;
- }
+ return ret;
- dws->parent_dev = &pdev->dev;
dws->bus_num = 0;
dws->num_cs = 4;
dws->irq = pdev->irq;
@@ -83,26 +73,17 @@ static int spi_pci_probe(struct pci_dev *pdev,
if (pdev->device == 0x0800) {
ret = dw_spi_mid_init(dws);
if (ret)
- goto err_unmap;
+ return ret;
}
- ret = dw_spi_add_host(dws);
+ ret = dw_spi_add_host(&pdev->dev, dws);
if (ret)
- goto err_unmap;
+ return ret;
/* PCI hook and SPI hook use the same drv data */
pci_set_drvdata(pdev, dwpci);
- return 0;
-err_unmap:
- iounmap(dws->regs);
-err_release_reg:
- pci_release_region(pdev, pci_bar);
-err_kfree:
- kfree(dwpci);
-err_disable:
- pci_disable_device(pdev);
- return ret;
+ return 0;
}
static void spi_pci_remove(struct pci_dev *pdev)
@@ -110,10 +91,6 @@ static void spi_pci_remove(struct pci_dev *pdev)
struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
dw_spi_remove_host(&dwpci->dws);
- iounmap(dwpci->dws.regs);
- pci_release_region(pdev, 0);
- kfree(dwpci);
- pci_disable_device(pdev);
}
#ifdef CONFIG_PM
@@ -148,7 +125,7 @@ static int spi_resume(struct pci_dev *pdev)
#define spi_resume NULL
#endif
-static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+static const struct pci_device_id pci_ids[] = {
/* Intel MID platform SPI controller 0 */
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
{},
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index b897c4adb39d..bf98d63d92b3 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -427,7 +427,6 @@ static void pump_transfers(unsigned long data)
dws->tx_end = dws->tx + transfer->len;
dws->rx = transfer->rx_buf;
dws->rx_end = dws->rx + transfer->len;
- dws->cs_change = transfer->cs_change;
dws->len = dws->cur_transfer->len;
if (chip != dws->prev_chip)
cs_change = 1;
@@ -620,9 +619,11 @@ static int dw_spi_setup(struct spi_device *spi)
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (!chip) {
- chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+ chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data),
+ GFP_KERNEL);
if (!chip)
return -ENOMEM;
+ spi_set_ctldata(spi, chip);
}
/*
@@ -667,7 +668,6 @@ static int dw_spi_setup(struct spi_device *spi)
| (spi->mode << SPI_MODE_OFFSET)
| (chip->tmode << SPI_TMOD_OFFSET);
- spi_set_ctldata(spi, chip);
return 0;
}
@@ -776,18 +776,16 @@ static void spi_hw_init(struct dw_spi *dws)
}
}
-int dw_spi_add_host(struct dw_spi *dws)
+int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
{
struct spi_master *master;
int ret;
BUG_ON(dws == NULL);
- master = spi_alloc_master(dws->parent_dev, 0);
- if (!master) {
- ret = -ENOMEM;
- goto exit;
- }
+ master = spi_alloc_master(dev, 0);
+ if (!master)
+ return -ENOMEM;
dws->master = master;
dws->type = SSI_MOTO_SPI;
@@ -797,7 +795,7 @@ int dw_spi_add_host(struct dw_spi *dws)
snprintf(dws->name, sizeof(dws->name), "dw_spi%d",
dws->bus_num);
- ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED,
+ ret = devm_request_irq(dev, dws->irq, dw_spi_irq, IRQF_SHARED,
dws->name, dws);
if (ret < 0) {
dev_err(&master->dev, "can not get IRQ\n");
@@ -836,7 +834,7 @@ int dw_spi_add_host(struct dw_spi *dws)
}
spi_master_set_devdata(master, dws);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(dev, master);
if (ret) {
dev_err(&master->dev, "problem registering spi master\n");
goto err_queue_alloc;
@@ -851,10 +849,8 @@ err_queue_alloc:
dws->dma_ops->dma_exit(dws);
err_diable_hw:
spi_enable_chip(dws, 0);
- free_irq(dws->irq, dws);
err_free_master:
spi_master_put(master);
-exit:
return ret;
}
EXPORT_SYMBOL_GPL(dw_spi_add_host);
@@ -878,10 +874,6 @@ void dw_spi_remove_host(struct dw_spi *dws)
spi_enable_chip(dws, 0);
/* Disable clk */
spi_set_clk(dws, 0);
- free_irq(dws->irq, dws);
-
- /* Disconnect from the SPI framework */
- spi_unregister_master(dws->master);
}
EXPORT_SYMBOL_GPL(dw_spi_remove_host);
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 9c57c078031e..587643dae11e 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -92,13 +92,11 @@ struct dw_spi_dma_ops {
struct dw_spi {
struct spi_master *master;
struct spi_device *cur_dev;
- struct device *parent_dev;
enum dw_ssi_type type;
char name[16];
void __iomem *regs;
unsigned long paddr;
- u32 iolen;
int irq;
u32 fifo_len; /* depth of the FIFO buffer */
u32 max_freq; /* max bus freq supported */
@@ -135,7 +133,6 @@ struct dw_spi {
u8 n_bytes; /* current is a 1/2 bytes op */
u8 max_bits_per_word; /* maxim is 16b */
u32 dma_width;
- int cs_change;
irqreturn_t (*transfer_handler)(struct dw_spi *dws);
void (*cs_control)(u32 command);
@@ -231,7 +228,7 @@ struct dw_spi_chip {
void (*cs_control)(u32 command);
};
-extern int dw_spi_add_host(struct dw_spi *dws);
+extern int dw_spi_add_host(struct device *dev, struct dw_spi *dws);
extern void dw_spi_remove_host(struct dw_spi *dws);
extern int dw_spi_suspend_host(struct dw_spi *dws);
extern int dw_spi_resume_host(struct dw_spi *dws);
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
index c7a74f0ef892..dd5bd468e962 100644
--- a/drivers/spi/spi-falcon.c
+++ b/drivers/spi/spi-falcon.c
@@ -433,21 +433,12 @@ static int falcon_sflash_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
spi_master_put(master);
return ret;
}
-static int falcon_sflash_remove(struct platform_device *pdev)
-{
- struct falcon_sflash *priv = platform_get_drvdata(pdev);
-
- spi_unregister_master(priv->master);
-
- return 0;
-}
-
static const struct of_device_id falcon_sflash_match[] = {
{ .compatible = "lantiq,sflash-falcon" },
{},
@@ -456,7 +447,6 @@ MODULE_DEVICE_TABLE(of, falcon_sflash_match);
static struct platform_driver falcon_sflash_driver = {
.probe = falcon_sflash_probe,
- .remove = falcon_sflash_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 8641b03bdd7a..ec79f726672a 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -320,8 +320,10 @@ static void dspi_chipselect(struct spi_device *spi, int value)
switch (value) {
case BITBANG_CS_ACTIVE:
pushr |= SPI_PUSHR_CONT;
+ break;
case BITBANG_CS_INACTIVE:
pushr &= ~SPI_PUSHR_CONT;
+ break;
}
writel(pushr, dspi->base + SPI_PUSHR);
@@ -373,9 +375,6 @@ static int dspi_setup(struct spi_device *spi)
if (!spi->max_speed_hz)
return -EINVAL;
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
return dspi_setup_transfer(spi, NULL);
}
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 80d8f40f7e05..428dc7a6b62e 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -705,7 +705,7 @@ static int of_fsl_espi_probe(struct platform_device *ofdev)
goto err;
irq = irq_of_parse_and_map(np, 0);
- if (!ret) {
+ if (!irq) {
ret = -EINVAL;
goto err;
}
@@ -727,6 +727,66 @@ static int of_fsl_espi_remove(struct platform_device *dev)
return mpc8xxx_spi_remove(&dev->dev);
}
+#ifdef CONFIG_PM_SLEEP
+static int of_fsl_espi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_espi_reg *reg_base;
+ u32 regval;
+ int ret;
+
+ mpc8xxx_spi = spi_master_get_devdata(master);
+ reg_base = mpc8xxx_spi->reg_base;
+
+ ret = spi_master_suspend(master);
+ if (ret) {
+ dev_warn(dev, "cannot suspend master\n");
+ return ret;
+ }
+
+ regval = mpc8xxx_spi_read_reg(&reg_base->mode);
+ regval &= ~SPMODE_ENABLE;
+ mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+
+ return 0;
+}
+
+static int of_fsl_espi_resume(struct device *dev)
+{
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_espi_reg *reg_base;
+ u32 regval;
+ int i;
+
+ mpc8xxx_spi = spi_master_get_devdata(master);
+ reg_base = mpc8xxx_spi->reg_base;
+
+ /* SPI controller initializations */
+ mpc8xxx_spi_write_reg(&reg_base->mode, 0);
+ mpc8xxx_spi_write_reg(&reg_base->mask, 0);
+ mpc8xxx_spi_write_reg(&reg_base->command, 0);
+ mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff);
+
+ /* Init eSPI CS mode register */
+ for (i = 0; i < pdata->max_chipselect; i++)
+ mpc8xxx_spi_write_reg(&reg_base->csmode[i], CSMODE_INIT_VAL);
+
+ /* Enable SPI interface */
+ regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
+
+ mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops espi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(of_fsl_espi_suspend, of_fsl_espi_resume)
+};
+
static const struct of_device_id of_fsl_espi_match[] = {
{ .compatible = "fsl,mpc8536-espi" },
{}
@@ -738,6 +798,7 @@ static struct platform_driver fsl_espi_driver = {
.name = "fsl_espi",
.owner = THIS_MODULE,
.of_match_table = of_fsl_espi_match,
+ .pm = &espi_pm,
},
.probe = of_fsl_espi_probe,
.remove = of_fsl_espi_remove,
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 3fb09f981980..7beeb29472ac 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -115,17 +115,17 @@ spi_to_pdata(const struct spi_device *spi)
static inline void setsck(const struct spi_device *spi, int is_on)
{
- gpio_set_value(SPI_SCK_GPIO, is_on);
+ gpio_set_value_cansleep(SPI_SCK_GPIO, is_on);
}
static inline void setmosi(const struct spi_device *spi, int is_on)
{
- gpio_set_value(SPI_MOSI_GPIO, is_on);
+ gpio_set_value_cansleep(SPI_MOSI_GPIO, is_on);
}
static inline int getmiso(const struct spi_device *spi)
{
- return !!gpio_get_value(SPI_MISO_GPIO);
+ return !!gpio_get_value_cansleep(SPI_MISO_GPIO);
}
#undef pdata
@@ -229,7 +229,7 @@ static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
if (cs != SPI_GPIO_NO_CHIPSELECT) {
/* SPI is normally active-low */
- gpio_set_value(cs, (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
+ gpio_set_value_cansleep(cs, (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
}
}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index b80f2f70fef7..a5474ef9d2a0 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -206,7 +206,8 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin,
#define MX51_ECSPI_STAT_RR (1 << 3)
/* MX51 eCSPI */
-static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi)
+static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi,
+ unsigned int *fres)
{
/*
* there are two 4-bit dividers, the pre-divider divides by
@@ -234,6 +235,10 @@ static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi)
pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
__func__, fin, fspi, post, pre);
+
+ /* Resulting frequency for the SCLK line. */
+ *fres = (fin / (pre + 1)) >> post;
+
return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
(post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
}
@@ -264,6 +269,7 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
struct spi_imx_config *config)
{
u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0;
+ u32 clk = config->speed_hz, delay;
/*
* The hardware seems to have a race condition when changing modes. The
@@ -275,7 +281,7 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
/* set clock speed */
- ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz);
+ ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz, &clk);
/* set chip select to use */
ctrl |= MX51_ECSPI_CTRL_CS(config->cs);
@@ -297,6 +303,23 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
+ /*
+ * Wait until the changes in the configuration register CONFIGREG
+ * propagate into the hardware. It takes exactly one tick of the
+ * SCLK clock, but we will wait two SCLK clock just to be sure. The
+ * effect of the delay it takes for the hardware to apply changes
+ * is noticable if the SCLK clock run very slow. In such a case, if
+ * the polarity of SCLK should be inverted, the GPIO ChipSelect might
+ * be asserted before the SCLK polarity changes, which would disrupt
+ * the SPI communication as the device on the other end would consider
+ * the change of SCLK polarity as a clock tick already.
+ */
+ delay = (2 * 1000000) / clk;
+ if (likely(delay < 10)) /* SCLK is faster than 100 kHz */
+ udelay(delay);
+ else /* SCLK is _very_ slow */
+ usleep_range(delay, delay + 10);
+
return 0;
}
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 87676587d783..5032141eeeec 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -40,6 +40,7 @@ struct mpc512x_psc_spi {
unsigned int irq;
u8 bits_per_word;
struct clk *clk_mclk;
+ struct clk *clk_ipg;
u32 mclk_rate;
struct completion txisrdone;
@@ -475,8 +476,6 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
struct spi_master *master;
int ret;
void *tempp;
- int psc_num;
- char clk_name[16];
struct clk *clk;
master = spi_alloc_master(dev, sizeof *mps);
@@ -504,7 +503,7 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
master->cleanup = mpc512x_psc_spi_cleanup;
master->dev.of_node = dev->of_node;
- tempp = ioremap(regaddr, size);
+ tempp = devm_ioremap(dev, regaddr, size);
if (!tempp) {
dev_err(dev, "could not ioremap I/O port range\n");
ret = -EFAULT;
@@ -513,43 +512,48 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
mps->psc = tempp;
mps->fifo =
(struct mpc512x_psc_fifo *)(tempp + sizeof(struct mpc52xx_psc));
-
- ret = request_irq(mps->irq, mpc512x_psc_spi_isr, IRQF_SHARED,
- "mpc512x-psc-spi", mps);
+ ret = devm_request_irq(dev, mps->irq, mpc512x_psc_spi_isr, IRQF_SHARED,
+ "mpc512x-psc-spi", mps);
if (ret)
goto free_master;
init_completion(&mps->txisrdone);
- psc_num = master->bus_num;
- snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num);
- clk = devm_clk_get(dev, clk_name);
+ clk = devm_clk_get(dev, "mclk");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- goto free_irq;
+ goto free_master;
}
ret = clk_prepare_enable(clk);
if (ret)
- goto free_irq;
+ goto free_master;
mps->clk_mclk = clk;
mps->mclk_rate = clk_get_rate(clk);
+ clk = devm_clk_get(dev, "ipg");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto free_mclk_clock;
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto free_mclk_clock;
+ mps->clk_ipg = clk;
+
ret = mpc512x_psc_spi_port_config(master, mps);
if (ret < 0)
- goto free_clock;
+ goto free_ipg_clock;
ret = devm_spi_register_master(dev, master);
if (ret < 0)
- goto free_clock;
+ goto free_ipg_clock;
return ret;
-free_clock:
+free_ipg_clock:
+ clk_disable_unprepare(mps->clk_ipg);
+free_mclk_clock:
clk_disable_unprepare(mps->clk_mclk);
-free_irq:
- free_irq(mps->irq, mps);
free_master:
- if (mps->psc)
- iounmap(mps->psc);
spi_master_put(master);
return ret;
@@ -561,9 +565,7 @@ static int mpc512x_psc_spi_do_remove(struct device *dev)
struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
clk_disable_unprepare(mps->clk_mclk);
- free_irq(mps->irq, mps);
- if (mps->psc)
- iounmap(mps->psc);
+ clk_disable_unprepare(mps->clk_ipg);
return 0;
}
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 3adebfa22e3d..79e5aa2250c8 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -111,14 +111,6 @@ static int mxs_spi_setup_transfer(struct spi_device *dev,
return 0;
}
-static int mxs_spi_setup(struct spi_device *dev)
-{
- if (!dev->bits_per_word)
- dev->bits_per_word = 8;
-
- return 0;
-}
-
static u32 mxs_spi_cs_to_reg(unsigned cs)
{
u32 select = 0;
@@ -502,7 +494,6 @@ static int mxs_spi_probe(struct platform_device *pdev)
return -ENOMEM;
master->transfer_one_message = mxs_spi_transfer_one;
- master->setup = mxs_spi_setup;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->num_chipselect = 3;
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index e0c32bc69ee2..bae97ffec4b9 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -57,7 +57,6 @@ struct nuc900_spi {
const unsigned char *tx;
unsigned char *rx;
struct clk *clk;
- struct resource *ioarea;
struct spi_master *master;
struct spi_device *curdev;
struct device *dev;
@@ -344,8 +343,7 @@ static int nuc900_spi_probe(struct platform_device *pdev)
master = spi_alloc_master(&pdev->dev, sizeof(struct nuc900_spi));
if (master == NULL) {
dev_err(&pdev->dev, "No memory for spi_master\n");
- err = -ENOMEM;
- goto err_nomem;
+ return -ENOMEM;
}
hw = spi_master_get_devdata(master);
@@ -363,6 +361,8 @@ static int nuc900_spi_probe(struct platform_device *pdev)
init_completion(&hw->done);
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ if (hw->pdata->lsb)
+ master->mode_bits |= SPI_LSB_FIRST;
master->num_chipselect = hw->pdata->num_cs;
master->bus_num = hw->pdata->bus_num;
hw->bitbang.master = hw->master;
@@ -370,46 +370,31 @@ static int nuc900_spi_probe(struct platform_device *pdev)
hw->bitbang.txrx_bufs = nuc900_spi_txrx;
hw->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (hw->res == NULL) {
- dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
- err = -ENOENT;
- goto err_pdata;
- }
-
- hw->ioarea = request_mem_region(hw->res->start,
- resource_size(hw->res), pdev->name);
-
- if (hw->ioarea == NULL) {
- dev_err(&pdev->dev, "Cannot reserve region\n");
- err = -ENXIO;
+ hw->regs = devm_ioremap_resource(&pdev->dev, hw->res);
+ if (IS_ERR(hw->regs)) {
+ err = PTR_ERR(hw->regs);
goto err_pdata;
}
- hw->regs = ioremap(hw->res->start, resource_size(hw->res));
- if (hw->regs == NULL) {
- dev_err(&pdev->dev, "Cannot map IO\n");
- err = -ENXIO;
- goto err_iomap;
- }
-
hw->irq = platform_get_irq(pdev, 0);
if (hw->irq < 0) {
dev_err(&pdev->dev, "No IRQ specified\n");
err = -ENOENT;
- goto err_irq;
+ goto err_pdata;
}
- err = request_irq(hw->irq, nuc900_spi_irq, 0, pdev->name, hw);
+ err = devm_request_irq(&pdev->dev, hw->irq, nuc900_spi_irq, 0,
+ pdev->name, hw);
if (err) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
- goto err_irq;
+ goto err_pdata;
}
- hw->clk = clk_get(&pdev->dev, "spi");
+ hw->clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(hw->clk)) {
dev_err(&pdev->dev, "No clock for device\n");
err = PTR_ERR(hw->clk);
- goto err_clk;
+ goto err_pdata;
}
mfp_set_groupg(&pdev->dev, NULL);
@@ -425,17 +410,8 @@ static int nuc900_spi_probe(struct platform_device *pdev)
err_register:
clk_disable(hw->clk);
- clk_put(hw->clk);
-err_clk:
- free_irq(hw->irq, hw);
-err_irq:
- iounmap(hw->regs);
-err_iomap:
- release_mem_region(hw->res->start, resource_size(hw->res));
- kfree(hw->ioarea);
err_pdata:
spi_master_put(hw->master);
-err_nomem:
return err;
}
@@ -443,18 +419,8 @@ static int nuc900_spi_remove(struct platform_device *dev)
{
struct nuc900_spi *hw = platform_get_drvdata(dev);
- free_irq(hw->irq, hw);
-
spi_bitbang_stop(&hw->bitbang);
-
clk_disable(hw->clk);
- clk_put(hw->clk);
-
- iounmap(hw->regs);
-
- release_mem_region(hw->res->start, resource_size(hw->res));
- kfree(hw->ioarea);
-
spi_master_put(hw->master);
return 0;
}
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index 91c668596202..f7c896e2981e 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -153,62 +153,22 @@ static int tiny_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
}
wait_for_completion(&hw->done);
- } else if (txp && rxp) {
- /* we need to tighten the transfer loop */
- writeb(*txp++, hw->base + TINY_SPI_TXDATA);
- if (t->len > 1) {
- writeb(*txp++, hw->base + TINY_SPI_TXDATA);
- for (i = 2; i < t->len; i++) {
- u8 rx, tx = *txp++;
- tiny_spi_wait_txr(hw);
- rx = readb(hw->base + TINY_SPI_TXDATA);
- writeb(tx, hw->base + TINY_SPI_TXDATA);
- *rxp++ = rx;
- }
- tiny_spi_wait_txr(hw);
- *rxp++ = readb(hw->base + TINY_SPI_TXDATA);
- }
- tiny_spi_wait_txe(hw);
- *rxp++ = readb(hw->base + TINY_SPI_RXDATA);
- } else if (rxp) {
- writeb(0, hw->base + TINY_SPI_TXDATA);
- if (t->len > 1) {
- writeb(0,
- hw->base + TINY_SPI_TXDATA);
- for (i = 2; i < t->len; i++) {
- u8 rx;
- tiny_spi_wait_txr(hw);
- rx = readb(hw->base + TINY_SPI_TXDATA);
- writeb(0, hw->base + TINY_SPI_TXDATA);
- *rxp++ = rx;
- }
- tiny_spi_wait_txr(hw);
- *rxp++ = readb(hw->base + TINY_SPI_TXDATA);
- }
- tiny_spi_wait_txe(hw);
- *rxp++ = readb(hw->base + TINY_SPI_RXDATA);
- } else if (txp) {
- writeb(*txp++, hw->base + TINY_SPI_TXDATA);
- if (t->len > 1) {
- writeb(*txp++, hw->base + TINY_SPI_TXDATA);
- for (i = 2; i < t->len; i++) {
- u8 tx = *txp++;
- tiny_spi_wait_txr(hw);
- writeb(tx, hw->base + TINY_SPI_TXDATA);
- }
- }
- tiny_spi_wait_txe(hw);
} else {
- writeb(0, hw->base + TINY_SPI_TXDATA);
- if (t->len > 1) {
- writeb(0, hw->base + TINY_SPI_TXDATA);
- for (i = 2; i < t->len; i++) {
+ /* we need to tighten the transfer loop */
+ writeb(txp ? *txp++ : 0, hw->base + TINY_SPI_TXDATA);
+ for (i = 1; i < t->len; i++) {
+ writeb(txp ? *txp++ : 0, hw->base + TINY_SPI_TXDATA);
+
+ if (rxp || (i != t->len - 1))
tiny_spi_wait_txr(hw);
- writeb(0, hw->base + TINY_SPI_TXDATA);
- }
+ if (rxp)
+ *rxp++ = readb(hw->base + TINY_SPI_TXDATA);
}
tiny_spi_wait_txe(hw);
+ if (rxp)
+ *rxp++ = readb(hw->base + TINY_SPI_RXDATA);
}
+
return t->len;
}
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index b6ed82beb01d..0d32054bfc0d 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -470,31 +470,12 @@ err:
return status;
}
-static int omap1_spi100k_remove(struct platform_device *pdev)
-{
- struct spi_master *master;
- struct omap1_spi100k *spi100k;
- struct resource *r;
- int status = 0;
-
- master = platform_get_drvdata(pdev);
- spi100k = spi_master_get_devdata(master);
-
- if (status != 0)
- return status;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- return 0;
-}
-
static struct platform_driver omap1_spi100k_driver = {
.driver = {
.name = "omap1_spi100k",
.owner = THIS_MODULE,
},
.probe = omap1_spi100k_probe,
- .remove = omap1_spi100k_remove,
};
module_platform_driver(omap1_spi100k_driver);
@@ -502,4 +483,3 @@ module_platform_driver(omap1_spi100k_driver);
MODULE_DESCRIPTION("OMAP7xx SPI 100k controller driver");
MODULE_AUTHOR("Fabrice Crohas <fcrohas@gmail.com>");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 443df39840bc..a72127f08e39 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -157,14 +157,14 @@ static inline void mcspi_write_reg(struct spi_master *master,
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
- __raw_writel(val, mcspi->base + idx);
+ writel_relaxed(val, mcspi->base + idx);
}
static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
- return __raw_readl(mcspi->base + idx);
+ return readl_relaxed(mcspi->base + idx);
}
static inline void mcspi_write_cs_reg(const struct spi_device *spi,
@@ -172,14 +172,14 @@ static inline void mcspi_write_cs_reg(const struct spi_device *spi,
{
struct omap2_mcspi_cs *cs = spi->controller_state;
- __raw_writel(val, cs->base + idx);
+ writel_relaxed(val, cs->base + idx);
}
static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
{
struct omap2_mcspi_cs *cs = spi->controller_state;
- return __raw_readl(cs->base + idx);
+ return readl_relaxed(cs->base + idx);
}
static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
@@ -338,7 +338,7 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
list_for_each_entry(cs, &ctx->cs, node)
- __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
+ writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
}
static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
@@ -346,9 +346,9 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(1000);
- while (!(__raw_readl(reg) & bit)) {
+ while (!(readl_relaxed(reg) & bit)) {
if (time_after(jiffies, timeout)) {
- if (!(__raw_readl(reg) & bit))
+ if (!(readl_relaxed(reg) & bit))
return -ETIMEDOUT;
else
return 0;
@@ -675,7 +675,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
}
dev_vdbg(&spi->dev, "write-%d %02x\n",
word_len, *tx);
- __raw_writel(*tx++, tx_reg);
+ writel_relaxed(*tx++, tx_reg);
}
if (rx != NULL) {
if (mcspi_wait_for_reg_bit(chstat_reg,
@@ -687,7 +687,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
if (c == 1 && tx == NULL &&
(l & OMAP2_MCSPI_CHCONF_TURBO)) {
omap2_mcspi_set_enable(spi, 0);
- *rx++ = __raw_readl(rx_reg);
+ *rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %02x\n",
word_len, *(rx - 1));
if (mcspi_wait_for_reg_bit(chstat_reg,
@@ -701,7 +701,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
omap2_mcspi_set_enable(spi, 0);
}
- *rx++ = __raw_readl(rx_reg);
+ *rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %02x\n",
word_len, *(rx - 1));
}
@@ -722,7 +722,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
}
dev_vdbg(&spi->dev, "write-%d %04x\n",
word_len, *tx);
- __raw_writel(*tx++, tx_reg);
+ writel_relaxed(*tx++, tx_reg);
}
if (rx != NULL) {
if (mcspi_wait_for_reg_bit(chstat_reg,
@@ -734,7 +734,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
if (c == 2 && tx == NULL &&
(l & OMAP2_MCSPI_CHCONF_TURBO)) {
omap2_mcspi_set_enable(spi, 0);
- *rx++ = __raw_readl(rx_reg);
+ *rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %04x\n",
word_len, *(rx - 1));
if (mcspi_wait_for_reg_bit(chstat_reg,
@@ -748,7 +748,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
omap2_mcspi_set_enable(spi, 0);
}
- *rx++ = __raw_readl(rx_reg);
+ *rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %04x\n",
word_len, *(rx - 1));
}
@@ -769,7 +769,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
}
dev_vdbg(&spi->dev, "write-%d %08x\n",
word_len, *tx);
- __raw_writel(*tx++, tx_reg);
+ writel_relaxed(*tx++, tx_reg);
}
if (rx != NULL) {
if (mcspi_wait_for_reg_bit(chstat_reg,
@@ -781,7 +781,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
if (c == 4 && tx == NULL &&
(l & OMAP2_MCSPI_CHCONF_TURBO)) {
omap2_mcspi_set_enable(spi, 0);
- *rx++ = __raw_readl(rx_reg);
+ *rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %08x\n",
word_len, *(rx - 1));
if (mcspi_wait_for_reg_bit(chstat_reg,
@@ -795,7 +795,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
omap2_mcspi_set_enable(spi, 0);
}
- *rx++ = __raw_readl(rx_reg);
+ *rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %08x\n",
word_len, *(rx - 1));
}
@@ -1107,7 +1107,7 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
/* RX_ONLY mode needs dummy data in TX reg */
if (t->tx_buf == NULL)
- __raw_writel(0, cs->base
+ writel_relaxed(0, cs->base
+ OMAP2_MCSPI_TX0);
if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
@@ -1470,9 +1470,9 @@ static int omap2_mcspi_resume(struct device *dev)
* change in account.
*/
cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
- __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
+ writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
- __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
+ writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
}
}
pm_runtime_mark_last_busy(mcspi->dev);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 744841e095e4..7f2121fe2622 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -434,7 +434,7 @@ static int orion_spi_probe(struct platform_device *pdev)
spi = spi_master_get_devdata(master);
spi->master = master;
- spi->clk = clk_get(&pdev->dev, NULL);
+ spi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(spi->clk)) {
status = PTR_ERR(spi->clk);
goto out;
@@ -465,7 +465,6 @@ static int orion_spi_probe(struct platform_device *pdev)
out_rel_clk:
clk_disable_unprepare(spi->clk);
- clk_put(spi->clk);
out:
spi_master_put(master);
return status;
@@ -481,7 +480,6 @@ static int orion_spi_remove(struct platform_device *pdev)
spi = spi_master_get_devdata(master);
clk_disable_unprepare(spi->clk);
- clk_put(spi->clk);
return 0;
}
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 74bc18775658..3f006d3ed2a8 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -62,7 +62,7 @@ static void ce4100_spi_remove(struct pci_dev *dev)
platform_device_unregister(pdev);
}
-static DEFINE_PCI_DEVICE_TABLE(ce4100_spi_devices) = {
+static const struct pci_device_id ce4100_spi_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) },
{ },
};
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 7765b1999537..c702fc536a77 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1066,6 +1066,8 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
pdata->num_chipselect = 1;
pdata->enable_dma = true;
+ pdata->tx_chan_id = -1;
+ pdata->rx_chan_id = -1;
return pdata;
}
@@ -1266,7 +1268,7 @@ static void pxa2xx_spi_shutdown(struct platform_device *pdev)
dev_err(&pdev->dev, "shutdown failed with %d\n", status);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int pxa2xx_spi_suspend(struct device *dev)
{
struct driver_data *drv_data = dev_get_drvdata(dev);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 9e829cee7357..28987d9fcfe5 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -37,117 +37,145 @@
#include <linux/spi/spi.h>
#include <linux/spi/rspi.h>
-#define RSPI_SPCR 0x00
-#define RSPI_SSLP 0x01
-#define RSPI_SPPCR 0x02
-#define RSPI_SPSR 0x03
-#define RSPI_SPDR 0x04
-#define RSPI_SPSCR 0x08
-#define RSPI_SPSSR 0x09
-#define RSPI_SPBR 0x0a
-#define RSPI_SPDCR 0x0b
-#define RSPI_SPCKD 0x0c
-#define RSPI_SSLND 0x0d
-#define RSPI_SPND 0x0e
-#define RSPI_SPCR2 0x0f
-#define RSPI_SPCMD0 0x10
-#define RSPI_SPCMD1 0x12
-#define RSPI_SPCMD2 0x14
-#define RSPI_SPCMD3 0x16
-#define RSPI_SPCMD4 0x18
-#define RSPI_SPCMD5 0x1a
-#define RSPI_SPCMD6 0x1c
-#define RSPI_SPCMD7 0x1e
+#define RSPI_SPCR 0x00 /* Control Register */
+#define RSPI_SSLP 0x01 /* Slave Select Polarity Register */
+#define RSPI_SPPCR 0x02 /* Pin Control Register */
+#define RSPI_SPSR 0x03 /* Status Register */
+#define RSPI_SPDR 0x04 /* Data Register */
+#define RSPI_SPSCR 0x08 /* Sequence Control Register */
+#define RSPI_SPSSR 0x09 /* Sequence Status Register */
+#define RSPI_SPBR 0x0a /* Bit Rate Register */
+#define RSPI_SPDCR 0x0b /* Data Control Register */
+#define RSPI_SPCKD 0x0c /* Clock Delay Register */
+#define RSPI_SSLND 0x0d /* Slave Select Negation Delay Register */
+#define RSPI_SPND 0x0e /* Next-Access Delay Register */
+#define RSPI_SPCR2 0x0f /* Control Register 2 */
+#define RSPI_SPCMD0 0x10 /* Command Register 0 */
+#define RSPI_SPCMD1 0x12 /* Command Register 1 */
+#define RSPI_SPCMD2 0x14 /* Command Register 2 */
+#define RSPI_SPCMD3 0x16 /* Command Register 3 */
+#define RSPI_SPCMD4 0x18 /* Command Register 4 */
+#define RSPI_SPCMD5 0x1a /* Command Register 5 */
+#define RSPI_SPCMD6 0x1c /* Command Register 6 */
+#define RSPI_SPCMD7 0x1e /* Command Register 7 */
+#define RSPI_SPBFCR 0x20 /* Buffer Control Register */
+#define RSPI_SPBFDR 0x22 /* Buffer Data Count Setting Register */
/*qspi only */
-#define QSPI_SPBFCR 0x18
-#define QSPI_SPBDCR 0x1a
-#define QSPI_SPBMUL0 0x1c
-#define QSPI_SPBMUL1 0x20
-#define QSPI_SPBMUL2 0x24
-#define QSPI_SPBMUL3 0x28
-
-/* SPCR */
-#define SPCR_SPRIE 0x80
-#define SPCR_SPE 0x40
-#define SPCR_SPTIE 0x20
-#define SPCR_SPEIE 0x10
-#define SPCR_MSTR 0x08
-#define SPCR_MODFEN 0x04
-#define SPCR_TXMD 0x02
-#define SPCR_SPMS 0x01
-
-/* SSLP */
-#define SSLP_SSL1P 0x02
-#define SSLP_SSL0P 0x01
-
-/* SPPCR */
-#define SPPCR_MOIFE 0x20
-#define SPPCR_MOIFV 0x10
+#define QSPI_SPBFCR 0x18 /* Buffer Control Register */
+#define QSPI_SPBDCR 0x1a /* Buffer Data Count Register */
+#define QSPI_SPBMUL0 0x1c /* Transfer Data Length Multiplier Setting Register 0 */
+#define QSPI_SPBMUL1 0x20 /* Transfer Data Length Multiplier Setting Register 1 */
+#define QSPI_SPBMUL2 0x24 /* Transfer Data Length Multiplier Setting Register 2 */
+#define QSPI_SPBMUL3 0x28 /* Transfer Data Length Multiplier Setting Register 3 */
+
+/* SPCR - Control Register */
+#define SPCR_SPRIE 0x80 /* Receive Interrupt Enable */
+#define SPCR_SPE 0x40 /* Function Enable */
+#define SPCR_SPTIE 0x20 /* Transmit Interrupt Enable */
+#define SPCR_SPEIE 0x10 /* Error Interrupt Enable */
+#define SPCR_MSTR 0x08 /* Master/Slave Mode Select */
+#define SPCR_MODFEN 0x04 /* Mode Fault Error Detection Enable */
+/* RSPI on SH only */
+#define SPCR_TXMD 0x02 /* TX Only Mode (vs. Full Duplex) */
+#define SPCR_SPMS 0x01 /* 3-wire Mode (vs. 4-wire) */
+/* QSPI on R-Car M2 only */
+#define SPCR_WSWAP 0x02 /* Word Swap of read-data for DMAC */
+#define SPCR_BSWAP 0x01 /* Byte Swap of read-data for DMAC */
+
+/* SSLP - Slave Select Polarity Register */
+#define SSLP_SSL1P 0x02 /* SSL1 Signal Polarity Setting */
+#define SSLP_SSL0P 0x01 /* SSL0 Signal Polarity Setting */
+
+/* SPPCR - Pin Control Register */
+#define SPPCR_MOIFE 0x20 /* MOSI Idle Value Fixing Enable */
+#define SPPCR_MOIFV 0x10 /* MOSI Idle Fixed Value */
#define SPPCR_SPOM 0x04
-#define SPPCR_SPLP2 0x02
-#define SPPCR_SPLP 0x01
-
-/* SPSR */
-#define SPSR_SPRF 0x80
-#define SPSR_SPTEF 0x20
-#define SPSR_PERF 0x08
-#define SPSR_MODF 0x04
-#define SPSR_IDLNF 0x02
-#define SPSR_OVRF 0x01
-
-/* SPSCR */
-#define SPSCR_SPSLN_MASK 0x07
-
-/* SPSSR */
-#define SPSSR_SPECM_MASK 0x70
-#define SPSSR_SPCP_MASK 0x07
-
-/* SPDCR */
-#define SPDCR_SPLW 0x20
-#define SPDCR_SPRDTD 0x10
+#define SPPCR_SPLP2 0x02 /* Loopback Mode 2 (non-inverting) */
+#define SPPCR_SPLP 0x01 /* Loopback Mode (inverting) */
+
+#define SPPCR_IO3FV 0x04 /* Single-/Dual-SPI Mode IO3 Output Fixed Value */
+#define SPPCR_IO2FV 0x04 /* Single-/Dual-SPI Mode IO2 Output Fixed Value */
+
+/* SPSR - Status Register */
+#define SPSR_SPRF 0x80 /* Receive Buffer Full Flag */
+#define SPSR_TEND 0x40 /* Transmit End */
+#define SPSR_SPTEF 0x20 /* Transmit Buffer Empty Flag */
+#define SPSR_PERF 0x08 /* Parity Error Flag */
+#define SPSR_MODF 0x04 /* Mode Fault Error Flag */
+#define SPSR_IDLNF 0x02 /* RSPI Idle Flag */
+#define SPSR_OVRF 0x01 /* Overrun Error Flag */
+
+/* SPSCR - Sequence Control Register */
+#define SPSCR_SPSLN_MASK 0x07 /* Sequence Length Specification */
+
+/* SPSSR - Sequence Status Register */
+#define SPSSR_SPECM_MASK 0x70 /* Command Error Mask */
+#define SPSSR_SPCP_MASK 0x07 /* Command Pointer Mask */
+
+/* SPDCR - Data Control Register */
+#define SPDCR_TXDMY 0x80 /* Dummy Data Transmission Enable */
+#define SPDCR_SPLW1 0x40 /* Access Width Specification (RZ) */
+#define SPDCR_SPLW0 0x20 /* Access Width Specification (RZ) */
+#define SPDCR_SPLLWORD (SPDCR_SPLW1 | SPDCR_SPLW0)
+#define SPDCR_SPLWORD SPDCR_SPLW1
+#define SPDCR_SPLBYTE SPDCR_SPLW0
+#define SPDCR_SPLW 0x20 /* Access Width Specification (SH) */
+#define SPDCR_SPRDTD 0x10 /* Receive Transmit Data Select */
#define SPDCR_SLSEL1 0x08
#define SPDCR_SLSEL0 0x04
-#define SPDCR_SLSEL_MASK 0x0c
+#define SPDCR_SLSEL_MASK 0x0c /* SSL1 Output Select */
#define SPDCR_SPFC1 0x02
#define SPDCR_SPFC0 0x01
+#define SPDCR_SPFC_MASK 0x03 /* Frame Count Setting (1-4) */
-/* SPCKD */
-#define SPCKD_SCKDL_MASK 0x07
+/* SPCKD - Clock Delay Register */
+#define SPCKD_SCKDL_MASK 0x07 /* Clock Delay Setting (1-8) */
-/* SSLND */
-#define SSLND_SLNDL_MASK 0x07
+/* SSLND - Slave Select Negation Delay Register */
+#define SSLND_SLNDL_MASK 0x07 /* SSL Negation Delay Setting (1-8) */
-/* SPND */
-#define SPND_SPNDL_MASK 0x07
+/* SPND - Next-Access Delay Register */
+#define SPND_SPNDL_MASK 0x07 /* Next-Access Delay Setting (1-8) */
-/* SPCR2 */
-#define SPCR2_PTE 0x08
-#define SPCR2_SPIE 0x04
-#define SPCR2_SPOE 0x02
-#define SPCR2_SPPE 0x01
+/* SPCR2 - Control Register 2 */
+#define SPCR2_PTE 0x08 /* Parity Self-Test Enable */
+#define SPCR2_SPIE 0x04 /* Idle Interrupt Enable */
+#define SPCR2_SPOE 0x02 /* Odd Parity Enable (vs. Even) */
+#define SPCR2_SPPE 0x01 /* Parity Enable */
-/* SPCMDn */
-#define SPCMD_SCKDEN 0x8000
-#define SPCMD_SLNDEN 0x4000
-#define SPCMD_SPNDEN 0x2000
-#define SPCMD_LSBF 0x1000
-#define SPCMD_SPB_MASK 0x0f00
+/* SPCMDn - Command Registers */
+#define SPCMD_SCKDEN 0x8000 /* Clock Delay Setting Enable */
+#define SPCMD_SLNDEN 0x4000 /* SSL Negation Delay Setting Enable */
+#define SPCMD_SPNDEN 0x2000 /* Next-Access Delay Enable */
+#define SPCMD_LSBF 0x1000 /* LSB First */
+#define SPCMD_SPB_MASK 0x0f00 /* Data Length Setting */
#define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
#define SPCMD_SPB_8BIT 0x0000 /* qspi only */
#define SPCMD_SPB_16BIT 0x0100
#define SPCMD_SPB_20BIT 0x0000
#define SPCMD_SPB_24BIT 0x0100
#define SPCMD_SPB_32BIT 0x0200
-#define SPCMD_SSLKP 0x0080
-#define SPCMD_SSLA_MASK 0x0030
-#define SPCMD_BRDV_MASK 0x000c
-#define SPCMD_CPOL 0x0002
-#define SPCMD_CPHA 0x0001
-
-/* SPBFCR */
-#define SPBFCR_TXRST 0x80 /* qspi only */
-#define SPBFCR_RXRST 0x40 /* qspi only */
+#define SPCMD_SSLKP 0x0080 /* SSL Signal Level Keeping */
+#define SPCMD_SPIMOD_MASK 0x0060 /* SPI Operating Mode (QSPI only) */
+#define SPCMD_SPIMOD1 0x0040
+#define SPCMD_SPIMOD0 0x0020
+#define SPCMD_SPIMOD_SINGLE 0
+#define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0
+#define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1
+#define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */
+#define SPCMD_SSLA_MASK 0x0030 /* SSL Assert Signal Setting (RSPI) */
+#define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */
+#define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */
+#define SPCMD_CPHA 0x0001 /* Clock Phase Setting */
+
+/* SPBFCR - Buffer Control Register */
+#define SPBFCR_TXRST 0x80 /* Transmit Buffer Data Reset (qspi only) */
+#define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset (qspi only) */
+#define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */
+#define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */
+
+#define DUMMY_DATA 0x00
struct rspi_data {
void __iomem *addr;
@@ -158,7 +186,8 @@ struct rspi_data {
wait_queue_head_t wait;
spinlock_t lock;
struct clk *clk;
- unsigned char spsr;
+ u8 spsr;
+ u16 spcmd;
const struct spi_ops *ops;
/* for dmaengine */
@@ -170,34 +199,35 @@ struct rspi_data {
unsigned dma_callbacked:1;
};
-static void rspi_write8(struct rspi_data *rspi, u8 data, u16 offset)
+static void rspi_write8(const struct rspi_data *rspi, u8 data, u16 offset)
{
iowrite8(data, rspi->addr + offset);
}
-static void rspi_write16(struct rspi_data *rspi, u16 data, u16 offset)
+static void rspi_write16(const struct rspi_data *rspi, u16 data, u16 offset)
{
iowrite16(data, rspi->addr + offset);
}
-static void rspi_write32(struct rspi_data *rspi, u32 data, u16 offset)
+static void rspi_write32(const struct rspi_data *rspi, u32 data, u16 offset)
{
iowrite32(data, rspi->addr + offset);
}
-static u8 rspi_read8(struct rspi_data *rspi, u16 offset)
+static u8 rspi_read8(const struct rspi_data *rspi, u16 offset)
{
return ioread8(rspi->addr + offset);
}
-static u16 rspi_read16(struct rspi_data *rspi, u16 offset)
+static u16 rspi_read16(const struct rspi_data *rspi, u16 offset)
{
return ioread16(rspi->addr + offset);
}
/* optional functions */
struct spi_ops {
- int (*set_config_register)(struct rspi_data *rspi, int access_size);
+ int (*set_config_register)(const struct rspi_data *rspi,
+ int access_size);
int (*send_pio)(struct rspi_data *rspi, struct spi_message *mesg,
struct spi_transfer *t);
int (*receive_pio)(struct rspi_data *rspi, struct spi_message *mesg,
@@ -208,7 +238,8 @@ struct spi_ops {
/*
* functions for RSPI
*/
-static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
+static int rspi_set_config_register(const struct rspi_data *rspi,
+ int access_size)
{
int spbr;
@@ -231,7 +262,7 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
rspi_write8(rspi, 0x00, RSPI_SPCR2);
/* Sets SPCMD */
- rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | SPCMD_SSLKP,
+ rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | rspi->spcmd,
RSPI_SPCMD0);
/* Sets RSPI mode */
@@ -243,7 +274,8 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
/*
* functions for QSPI
*/
-static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
+static int qspi_set_config_register(const struct rspi_data *rspi,
+ int access_size)
{
u16 spcmd;
int spbr;
@@ -268,10 +300,10 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
spcmd = SPCMD_SPB_8BIT;
else if (access_size == 16)
spcmd = SPCMD_SPB_16BIT;
- else if (access_size == 32)
+ else
spcmd = SPCMD_SPB_32BIT;
- spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SSLKP | SPCMD_SPNDEN;
+ spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | rspi->spcmd | SPCMD_SPNDEN;
/* Resets transfer data length */
rspi_write32(rspi, 0, QSPI_SPBMUL0);
@@ -292,12 +324,12 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
-static void rspi_enable_irq(struct rspi_data *rspi, u8 enable)
+static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
{
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
}
-static void rspi_disable_irq(struct rspi_data *rspi, u8 disable)
+static void rspi_disable_irq(const struct rspi_data *rspi, u8 disable)
{
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
}
@@ -316,12 +348,12 @@ static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
return 0;
}
-static void rspi_assert_ssl(struct rspi_data *rspi)
+static void rspi_assert_ssl(const struct rspi_data *rspi)
{
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
}
-static void rspi_negate_ssl(struct rspi_data *rspi)
+static void rspi_negate_ssl(const struct rspi_data *rspi)
{
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
}
@@ -330,9 +362,7 @@ static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
struct spi_transfer *t)
{
int remain = t->len;
- u8 *data;
-
- data = (u8 *)t->tx_buf;
+ const u8 *data = t->tx_buf;
while (remain > 0) {
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD,
RSPI_SPCR);
@@ -348,7 +378,7 @@ static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
remain--;
}
- /* Waiting for the last transmition */
+ /* Waiting for the last transmission */
rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
return 0;
@@ -358,12 +388,11 @@ static int qspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
struct spi_transfer *t)
{
int remain = t->len;
- u8 *data;
+ const u8 *data = t->tx_buf;
rspi_write8(rspi, SPBFCR_TXRST, QSPI_SPBFCR);
rspi_write8(rspi, 0x00, QSPI_SPBFCR);
- data = (u8 *)t->tx_buf;
while (remain > 0) {
if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
@@ -383,7 +412,7 @@ static int qspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
remain--;
}
- /* Waiting for the last transmition */
+ /* Waiting for the last transmission */
rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
return 0;
@@ -399,8 +428,8 @@ static void rspi_dma_complete(void *arg)
wake_up_interruptible(&rspi->wait);
}
-static int rspi_dma_map_sg(struct scatterlist *sg, void *buf, unsigned len,
- struct dma_chan *chan,
+static int rspi_dma_map_sg(struct scatterlist *sg, const void *buf,
+ unsigned len, struct dma_chan *chan,
enum dma_transfer_direction dir)
{
sg_init_table(sg, 1);
@@ -440,12 +469,13 @@ static void rspi_memory_from_8bit(void *buf, const void *data, unsigned len)
static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
{
struct scatterlist sg;
- void *buf = NULL;
+ const void *buf = NULL;
struct dma_async_tx_descriptor *desc;
unsigned len;
int ret = 0;
if (rspi->dma_width_16bit) {
+ void *tmp;
/*
* If DMAC bus width is 16-bit, the driver allocates a dummy
* buffer. And, the driver converts original data into the
@@ -454,13 +484,14 @@ static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
* DMAC data: 1st byte, dummy, 2nd byte, dummy ...
*/
len = t->len * 2;
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
+ tmp = kmalloc(len, GFP_KERNEL);
+ if (!tmp)
return -ENOMEM;
- rspi_memory_to_8bit(buf, t->tx_buf, t->len);
+ rspi_memory_to_8bit(tmp, t->tx_buf, t->len);
+ buf = tmp;
} else {
len = t->len;
- buf = (void *)t->tx_buf;
+ buf = t->tx_buf;
}
if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) {
@@ -508,16 +539,16 @@ end_nomap:
return ret;
}
-static void rspi_receive_init(struct rspi_data *rspi)
+static void rspi_receive_init(const struct rspi_data *rspi)
{
- unsigned char spsr;
+ u8 spsr;
spsr = rspi_read8(rspi, RSPI_SPSR);
if (spsr & SPSR_SPRF)
rspi_read16(rspi, RSPI_SPDR); /* dummy read */
if (spsr & SPSR_OVRF)
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
- RSPI_SPCR);
+ RSPI_SPSR);
}
static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
@@ -528,7 +559,7 @@ static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
rspi_receive_init(rspi);
- data = (u8 *)t->rx_buf;
+ data = t->rx_buf;
while (remain > 0) {
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD,
RSPI_SPCR);
@@ -539,7 +570,7 @@ static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
return -ETIMEDOUT;
}
/* dummy write for generate clock */
- rspi_write16(rspi, 0x00, RSPI_SPDR);
+ rspi_write16(rspi, DUMMY_DATA, RSPI_SPDR);
if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
dev_err(&rspi->master->dev,
@@ -556,9 +587,9 @@ static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
return 0;
}
-static void qspi_receive_init(struct rspi_data *rspi)
+static void qspi_receive_init(const struct rspi_data *rspi)
{
- unsigned char spsr;
+ u8 spsr;
spsr = rspi_read8(rspi, RSPI_SPSR);
if (spsr & SPSR_SPRF)
@@ -575,7 +606,7 @@ static int qspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
qspi_receive_init(rspi);
- data = (u8 *)t->rx_buf;
+ data = t->rx_buf;
while (remain > 0) {
if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
@@ -584,7 +615,7 @@ static int qspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
return -ETIMEDOUT;
}
/* dummy write for generate clock */
- rspi_write8(rspi, 0x00, RSPI_SPDR);
+ rspi_write8(rspi, DUMMY_DATA, RSPI_SPDR);
if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
dev_err(&rspi->master->dev,
@@ -704,7 +735,7 @@ end_nomap:
return ret;
}
-static int rspi_is_dma(struct rspi_data *rspi, struct spi_transfer *t)
+static int rspi_is_dma(const struct rspi_data *rspi, struct spi_transfer *t)
{
if (t->tx_buf && rspi->chan_tx)
return 1;
@@ -771,10 +802,14 @@ static int rspi_setup(struct spi_device *spi)
{
struct rspi_data *rspi = spi_master_get_devdata(spi->master);
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
rspi->max_speed_hz = spi->max_speed_hz;
+ rspi->spcmd = SPCMD_SSLKP;
+ if (spi->mode & SPI_CPOL)
+ rspi->spcmd |= SPCMD_CPOL;
+ if (spi->mode & SPI_CPHA)
+ rspi->spcmd |= SPCMD_CPHA;
+
set_config_register(rspi, 8);
return 0;
@@ -802,10 +837,10 @@ static void rspi_cleanup(struct spi_device *spi)
static irqreturn_t rspi_irq(int irq, void *_sr)
{
- struct rspi_data *rspi = (struct rspi_data *)_sr;
- unsigned long spsr;
+ struct rspi_data *rspi = _sr;
+ u8 spsr;
irqreturn_t ret = IRQ_NONE;
- unsigned char disable_irq = 0;
+ u8 disable_irq = 0;
rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
if (spsr & SPSR_SPRF)
@@ -825,7 +860,7 @@ static irqreturn_t rspi_irq(int irq, void *_sr)
static int rspi_request_dma(struct rspi_data *rspi,
struct platform_device *pdev)
{
- struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
+ const struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dma_cap_mask_t mask;
struct dma_slave_config cfg;
@@ -887,11 +922,8 @@ static int rspi_remove(struct platform_device *pdev)
{
struct rspi_data *rspi = platform_get_drvdata(pdev);
- spi_unregister_master(rspi->master);
rspi_release_dma(rspi);
- free_irq(platform_get_irq(pdev, 0), rspi);
- clk_put(rspi->clk);
- iounmap(rspi->addr);
+ clk_disable(rspi->clk);
return 0;
}
@@ -903,7 +935,7 @@ static int rspi_probe(struct platform_device *pdev)
struct rspi_data *rspi;
int ret, irq;
char clk_name[16];
- struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
+ const struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
const struct spi_ops *ops;
const struct platform_device_id *id_entry = pdev->id_entry;
@@ -913,12 +945,6 @@ static int rspi_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "there is no set_config_register\n");
return -ENODEV;
}
- /* get base addr */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(res == NULL)) {
- dev_err(&pdev->dev, "invalid resource\n");
- return -EINVAL;
- }
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@@ -936,19 +962,20 @@ static int rspi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rspi);
rspi->ops = ops;
rspi->master = master;
- rspi->addr = ioremap(res->start, resource_size(res));
- if (rspi->addr == NULL) {
- dev_err(&pdev->dev, "ioremap error.\n");
- ret = -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rspi->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rspi->addr)) {
+ ret = PTR_ERR(rspi->addr);
goto error1;
}
snprintf(clk_name, sizeof(clk_name), "%s%d", id_entry->name, pdev->id);
- rspi->clk = clk_get(&pdev->dev, clk_name);
+ rspi->clk = devm_clk_get(&pdev->dev, clk_name);
if (IS_ERR(rspi->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
ret = PTR_ERR(rspi->clk);
- goto error2;
+ goto error1;
}
clk_enable(rspi->clk);
@@ -957,45 +984,45 @@ static int rspi_probe(struct platform_device *pdev)
INIT_WORK(&rspi->ws, rspi_work);
init_waitqueue_head(&rspi->wait);
- master->num_chipselect = rspi_pd->num_chipselect;
- if (!master->num_chipselect)
+ if (rspi_pd && rspi_pd->num_chipselect)
+ master->num_chipselect = rspi_pd->num_chipselect;
+ else
master->num_chipselect = 2; /* default */
master->bus_num = pdev->id;
master->setup = rspi_setup;
master->transfer = rspi_transfer;
master->cleanup = rspi_cleanup;
+ master->mode_bits = SPI_CPHA | SPI_CPOL;
- ret = request_irq(irq, rspi_irq, 0, dev_name(&pdev->dev), rspi);
+ ret = devm_request_irq(&pdev->dev, irq, rspi_irq, 0,
+ dev_name(&pdev->dev), rspi);
if (ret < 0) {
dev_err(&pdev->dev, "request_irq error\n");
- goto error3;
+ goto error2;
}
rspi->irq = irq;
ret = rspi_request_dma(rspi, pdev);
if (ret < 0) {
dev_err(&pdev->dev, "rspi_request_dma failed.\n");
- goto error4;
+ goto error3;
}
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master error.\n");
- goto error4;
+ goto error3;
}
dev_info(&pdev->dev, "probed\n");
return 0;
-error4:
- rspi_release_dma(rspi);
- free_irq(irq, rspi);
error3:
- clk_put(rspi->clk);
+ rspi_release_dma(rspi);
error2:
- iounmap(rspi->addr);
+ clk_disable(rspi->clk);
error1:
spi_master_put(master);
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index 0dc32a11bd3c..746424aa5353 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -29,7 +29,6 @@
#include <plat/regs-spi.h>
-#include <plat/fiq.h>
#include <asm/fiq.h>
#include "spi-s3c24xx-fiq.h"
@@ -78,14 +77,12 @@ struct s3c24xx_spi {
unsigned char *rx;
struct clk *clk;
- struct resource *ioarea;
struct spi_master *master;
struct spi_device *curdev;
struct device *dev;
struct s3c2410_spi_info *pdata;
};
-
#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT)
#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP)
@@ -517,8 +514,7 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
if (master == NULL) {
dev_err(&pdev->dev, "No memory for spi_master\n");
- err = -ENOMEM;
- goto err_nomem;
+ return -ENOMEM;
}
hw = spi_master_get_devdata(master);
@@ -562,48 +558,32 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang);
/* find and map our resources */
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
- err = -ENOENT;
- goto err_no_iores;
- }
-
- hw->ioarea = request_mem_region(res->start, resource_size(res),
- pdev->name);
-
- if (hw->ioarea == NULL) {
- dev_err(&pdev->dev, "Cannot reserve region\n");
- err = -ENXIO;
- goto err_no_iores;
- }
-
- hw->regs = ioremap(res->start, resource_size(res));
- if (hw->regs == NULL) {
- dev_err(&pdev->dev, "Cannot map IO\n");
- err = -ENXIO;
- goto err_no_iomap;
+ hw->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->regs)) {
+ err = PTR_ERR(hw->regs);
+ goto err_no_pdata;
}
hw->irq = platform_get_irq(pdev, 0);
if (hw->irq < 0) {
dev_err(&pdev->dev, "No IRQ specified\n");
err = -ENOENT;
- goto err_no_irq;
+ goto err_no_pdata;
}
- err = request_irq(hw->irq, s3c24xx_spi_irq, 0, pdev->name, hw);
+ err = devm_request_irq(&pdev->dev, hw->irq, s3c24xx_spi_irq, 0,
+ pdev->name, hw);
if (err) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
- goto err_no_irq;
+ goto err_no_pdata;
}
- hw->clk = clk_get(&pdev->dev, "spi");
+ hw->clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(hw->clk)) {
dev_err(&pdev->dev, "No clock for device\n");
err = PTR_ERR(hw->clk);
- goto err_no_clk;
+ goto err_no_pdata;
}
/* setup any gpio we can */
@@ -615,7 +595,8 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
goto err_register;
}
- err = gpio_request(pdata->pin_cs, dev_name(&pdev->dev));
+ err = devm_gpio_request(&pdev->dev, pdata->pin_cs,
+ dev_name(&pdev->dev));
if (err) {
dev_err(&pdev->dev, "Failed to get gpio for cs\n");
goto err_register;
@@ -639,27 +620,10 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
return 0;
err_register:
- if (hw->set_cs == s3c24xx_spi_gpiocs)
- gpio_free(pdata->pin_cs);
-
clk_disable(hw->clk);
- clk_put(hw->clk);
-
- err_no_clk:
- free_irq(hw->irq, hw);
- err_no_irq:
- iounmap(hw->regs);
-
- err_no_iomap:
- release_resource(hw->ioarea);
- kfree(hw->ioarea);
-
- err_no_iores:
err_no_pdata:
spi_master_put(hw->master);
-
- err_nomem:
return err;
}
@@ -668,19 +632,7 @@ static int s3c24xx_spi_remove(struct platform_device *dev)
struct s3c24xx_spi *hw = platform_get_drvdata(dev);
spi_bitbang_stop(&hw->bitbang);
-
clk_disable(hw->clk);
- clk_put(hw->clk);
-
- free_irq(hw->irq, hw);
- iounmap(hw->regs);
-
- if (hw->set_cs == s3c24xx_spi_gpiocs)
- gpio_free(hw->pdata->pin_cs);
-
- release_resource(hw->ioarea);
- kfree(hw->ioarea);
-
spi_master_put(hw->master);
return 0;
}
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 4c4b0a1219a7..ae907dde1371 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -890,7 +890,7 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
unsigned long flags;
int use_dma;
- reinit_completion(&sdd->xfer_completion);
+ reinit_completion(&sdd->xfer_completion);
/* Only BPW and Speed may change across transfers */
bpw = xfer->bits_per_word;
@@ -927,9 +927,6 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
/* Start the signals */
writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
- /* Start the signals */
- writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
-
spin_unlock_irqrestore(&sdd->lock, flags);
status = wait_for_xfer(sdd, xfer, use_dma);
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 9eda21d739c6..121c2e1dea36 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -183,18 +183,11 @@ static int sc18is602_setup_transfer(struct sc18is602 *hw, u32 hz, u8 mode)
static int sc18is602_check_transfer(struct spi_device *spi,
struct spi_transfer *t, int tlen)
{
- int bpw;
uint32_t hz;
if (t && t->len + tlen > SC18IS602_BUFSIZ)
return -EINVAL;
- bpw = spi->bits_per_word;
- if (t && t->bits_per_word)
- bpw = t->bits_per_word;
- if (bpw != 8)
- return -EINVAL;
-
hz = spi->max_speed_hz;
if (t && t->speed_hz)
hz = t->speed_hz;
@@ -254,9 +247,6 @@ error:
static int sc18is602_setup(struct spi_device *spi)
{
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
if (spi->mode & ~(SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST))
return -EINVAL;
@@ -315,11 +305,12 @@ static int sc18is602_probe(struct i2c_client *client,
}
master->bus_num = client->adapter->nr;
master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
master->setup = sc18is602_setup;
master->transfer_one_message = sc18is602_transfer_one;
master->dev.of_node = np;
- error = spi_register_master(master);
+ error = devm_spi_register_master(dev, master);
if (error)
goto error_reg;
@@ -330,16 +321,6 @@ error_reg:
return error;
}
-static int sc18is602_remove(struct i2c_client *client)
-{
- struct sc18is602 *hw = i2c_get_clientdata(client);
- struct spi_master *master = hw->master;
-
- spi_unregister_master(master);
-
- return 0;
-}
-
static const struct i2c_device_id sc18is602_id[] = {
{ "sc18is602", sc18is602 },
{ "sc18is602b", sc18is602b },
@@ -353,7 +334,6 @@ static struct i2c_driver sc18is602_driver = {
.name = "sc18is602",
},
.probe = sc18is602_probe,
- .remove = sc18is602_remove,
.id_table = sc18is602_id,
};
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 292567ab4c6c..82d2f922ffa0 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -197,7 +197,7 @@ static int hspi_transfer_one_message(struct spi_master *master,
hspi_write(hspi, SPTBR, tx);
- /* wait recive */
+ /* wait receive */
ret = hspi_status_check_timeout(hspi, 0x4, 0x4);
if (ret < 0)
break;
@@ -353,4 +353,4 @@ module_platform_driver(hspi_driver);
MODULE_DESCRIPTION("SuperH HSPI bus driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
-MODULE_ALIAS("platform:sh_spi");
+MODULE_ALIAS("platform:sh-hspi");
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index c74298cf70e2..81cc02f5f9b0 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -152,7 +152,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
size_t k;
if (!WARN_ON(!spi_hz || !parent_rate))
- div = parent_rate / spi_hz;
+ div = DIV_ROUND_UP(parent_rate, spi_hz);
/* TODO: make more fine grained */
@@ -169,7 +169,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
u32 cpol, u32 cpha,
- u32 tx_hi_z, u32 lsb_first)
+ u32 tx_hi_z, u32 lsb_first, u32 cs_high)
{
u32 tmp;
int edge;
@@ -182,8 +182,12 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
* 1 1 11 11 1 1
*/
sh_msiof_write(p, FCTR, 0);
- sh_msiof_write(p, TMDR1, 0xe2000005 | (lsb_first << 24));
- sh_msiof_write(p, RMDR1, 0x22000005 | (lsb_first << 24));
+
+ tmp = 0;
+ tmp |= !cs_high << 25;
+ tmp |= lsb_first << 24;
+ sh_msiof_write(p, TMDR1, 0xe0000005 | tmp);
+ sh_msiof_write(p, RMDR1, 0x20000005 | tmp);
tmp = 0xa0000000;
tmp |= cpol << 30; /* TSCKIZ */
@@ -417,11 +421,12 @@ static void sh_msiof_spi_chipselect(struct spi_device *spi, int is_on)
sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
!!(spi->mode & SPI_CPHA),
!!(spi->mode & SPI_3WIRE),
- !!(spi->mode & SPI_LSB_FIRST));
+ !!(spi->mode & SPI_LSB_FIRST),
+ !!(spi->mode & SPI_CS_HIGH));
}
/* use spi->controller data for CS (same strategy as spi_gpio) */
- gpio_set_value((unsigned)spi->controller_data, value);
+ gpio_set_value((uintptr_t)spi->controller_data, value);
if (is_on == BITBANG_CS_INACTIVE) {
if (test_and_clear_bit(0, &p->flags)) {
@@ -635,8 +640,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
master = spi_alloc_master(&pdev->dev, sizeof(struct sh_msiof_spi_priv));
if (master == NULL) {
dev_err(&pdev->dev, "failed to allocate spi master\n");
- ret = -ENOMEM;
- goto err0;
+ return -ENOMEM;
}
p = spi_master_get_devdata(master);
@@ -655,32 +659,38 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
init_completion(&p->done);
- p->clk = clk_get(&pdev->dev, NULL);
+ p->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(p->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
ret = PTR_ERR(p->clk);
goto err1;
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
i = platform_get_irq(pdev, 0);
- if (!r || i < 0) {
- dev_err(&pdev->dev, "cannot get platform resources\n");
+ if (i < 0) {
+ dev_err(&pdev->dev, "cannot get platform IRQ\n");
ret = -ENOENT;
- goto err2;
+ goto err1;
}
- p->mapbase = ioremap_nocache(r->start, resource_size(r));
- if (!p->mapbase) {
- dev_err(&pdev->dev, "unable to ioremap\n");
- ret = -ENXIO;
- goto err2;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ p->mapbase = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(p->mapbase)) {
+ ret = PTR_ERR(p->mapbase);
+ goto err1;
}
- ret = request_irq(i, sh_msiof_spi_irq, 0,
- dev_name(&pdev->dev), p);
+ ret = devm_request_irq(&pdev->dev, i, sh_msiof_spi_irq, 0,
+ dev_name(&pdev->dev), p);
if (ret) {
dev_err(&pdev->dev, "unable to request irq\n");
- goto err3;
+ goto err1;
+ }
+
+ ret = clk_prepare(p->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to prepare clock\n");
+ goto err1;
}
p->pdev = pdev;
@@ -719,13 +729,9 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
return 0;
pm_runtime_disable(&pdev->dev);
- err3:
- iounmap(p->mapbase);
- err2:
- clk_put(p->clk);
+ clk_unprepare(p->clk);
err1:
spi_master_put(master);
- err0:
return ret;
}
@@ -737,9 +743,7 @@ static int sh_msiof_spi_remove(struct platform_device *pdev)
ret = spi_bitbang_stop(&p->bitbang);
if (!ret) {
pm_runtime_disable(&pdev->dev);
- free_irq(platform_get_irq(pdev, 0), p);
- iounmap(p->mapbase);
- clk_put(p->clk);
+ clk_unprepare(p->clk);
spi_master_put(p->bitbang.master);
}
return ret;
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index c120a70094f2..f6f2c7010177 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -171,7 +171,6 @@ static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
int remain = t->len;
int cur_len;
unsigned char *data;
- unsigned long tmp;
long ret;
if (t->len)
@@ -213,9 +212,7 @@ static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
}
if (list_is_last(&t->transfer_list, &mesg->transfers)) {
- tmp = spi_sh_read(ss, SPI_SH_CR1);
- tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB);
- spi_sh_write(ss, tmp, SPI_SH_CR1);
+ spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
ss->cr1 &= ~SPI_SH_TBE;
@@ -239,7 +236,6 @@ static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
int remain = t->len;
int cur_len;
unsigned char *data;
- unsigned long tmp;
long ret;
if (t->len > SPI_SH_MAX_BYTE)
@@ -247,9 +243,7 @@ static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
else
spi_sh_write(ss, t->len, SPI_SH_CR3);
- tmp = spi_sh_read(ss, SPI_SH_CR1);
- tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB);
- spi_sh_write(ss, tmp, SPI_SH_CR1);
+ spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
spi_sh_wait_write_buffer_empty(ss);
@@ -358,9 +352,6 @@ static int spi_sh_setup(struct spi_device *spi)
{
struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
pr_debug("%s: enter\n", __func__);
spi_sh_write(ss, 0xfe, SPI_SH_CR1); /* SPI sycle stop */
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index ed5e501c4652..e430689c3837 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -536,16 +536,9 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
static int spi_sirfsoc_setup(struct spi_device *spi)
{
- struct sirfsoc_spi *sspi;
-
if (!spi->max_speed_hz)
return -EINVAL;
- sspi = spi_master_get_devdata(spi->master);
-
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
return spi_sirfsoc_setup_transfer(spi, NULL);
}
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index aaecfb3ebf58..413c71843492 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -17,7 +17,6 @@
*/
#include <linux/clk.h>
-#include <linux/clk/tegra.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
@@ -34,6 +33,7 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/reset.h>
#include <linux/spi/spi.h>
#define SPI_COMMAND1 0x000
@@ -54,11 +54,8 @@
#define SPI_CS_SS_VAL (1 << 20)
#define SPI_CS_SW_HW (1 << 21)
/* SPI_CS_POL_INACTIVE bits are default high */
-#define SPI_CS_POL_INACTIVE 22
-#define SPI_CS_POL_INACTIVE_0 (1 << 22)
-#define SPI_CS_POL_INACTIVE_1 (1 << 23)
-#define SPI_CS_POL_INACTIVE_2 (1 << 24)
-#define SPI_CS_POL_INACTIVE_3 (1 << 25)
+ /* n from 0 to 3 */
+#define SPI_CS_POL_INACTIVE(n) (1 << (22 + (n)))
#define SPI_CS_POL_INACTIVE_MASK (0xF << 22)
#define SPI_CS_SEL_0 (0 << 26)
@@ -165,26 +162,22 @@
#define MAX_HOLD_CYCLES 16
#define SPI_DEFAULT_SPEED 25000000
-#define MAX_CHIP_SELECT 4
-#define SPI_FIFO_DEPTH 64
-
struct tegra_spi_data {
struct device *dev;
struct spi_master *master;
spinlock_t lock;
struct clk *clk;
+ struct reset_control *rst;
void __iomem *base;
phys_addr_t phys;
unsigned irq;
- int dma_req_sel;
u32 spi_max_frequency;
u32 cur_speed;
struct spi_device *cur_spi;
struct spi_device *cs_control;
unsigned cur_pos;
- unsigned cur_len;
unsigned words_per_32bit;
unsigned bytes_per_word;
unsigned curr_dma_words;
@@ -204,12 +197,10 @@ struct tegra_spi_data {
u32 rx_status;
u32 status_reg;
bool is_packed;
- unsigned long packed_size;
u32 command1_reg;
u32 dma_control_reg;
u32 def_command1_reg;
- u32 spi_cs_timing;
struct completion xfer_completion;
struct spi_transfer *curr_xfer;
@@ -227,14 +218,14 @@ struct tegra_spi_data {
static int tegra_spi_runtime_suspend(struct device *dev);
static int tegra_spi_runtime_resume(struct device *dev);
-static inline unsigned long tegra_spi_readl(struct tegra_spi_data *tspi,
+static inline u32 tegra_spi_readl(struct tegra_spi_data *tspi,
unsigned long reg)
{
return readl(tspi->base + reg);
}
static inline void tegra_spi_writel(struct tegra_spi_data *tspi,
- unsigned long val, unsigned long reg)
+ u32 val, unsigned long reg)
{
writel(val, tspi->base + reg);
@@ -245,7 +236,7 @@ static inline void tegra_spi_writel(struct tegra_spi_data *tspi,
static void tegra_spi_clear_status(struct tegra_spi_data *tspi)
{
- unsigned long val;
+ u32 val;
/* Write 1 to clear status register */
val = tegra_spi_readl(tspi, SPI_TRANS_STATUS);
@@ -296,10 +287,9 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
{
unsigned nbytes;
unsigned tx_empty_count;
- unsigned long fifo_status;
+ u32 fifo_status;
unsigned max_n_32bit;
unsigned i, count;
- unsigned long x;
unsigned int written_words;
unsigned fifo_words_left;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
@@ -313,9 +303,9 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
nbytes = written_words * tspi->bytes_per_word;
max_n_32bit = DIV_ROUND_UP(nbytes, 4);
for (count = 0; count < max_n_32bit; count++) {
- x = 0;
+ u32 x = 0;
for (i = 0; (i < 4) && nbytes; i++, nbytes--)
- x |= (*tx_buf++) << (i*8);
+ x |= (u32)(*tx_buf++) << (i * 8);
tegra_spi_writel(tspi, x, SPI_TX_FIFO);
}
} else {
@@ -323,10 +313,10 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
written_words = max_n_32bit;
nbytes = written_words * tspi->bytes_per_word;
for (count = 0; count < max_n_32bit; count++) {
- x = 0;
+ u32 x = 0;
for (i = 0; nbytes && (i < tspi->bytes_per_word);
i++, nbytes--)
- x |= ((*tx_buf++) << i*8);
+ x |= (u32)(*tx_buf++) << (i * 8);
tegra_spi_writel(tspi, x, SPI_TX_FIFO);
}
}
@@ -338,9 +328,8 @@ static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
unsigned rx_full_count;
- unsigned long fifo_status;
+ u32 fifo_status;
unsigned i, count;
- unsigned long x;
unsigned int read_words = 0;
unsigned len;
u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
@@ -350,20 +339,16 @@ static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
if (tspi->is_packed) {
len = tspi->curr_dma_words * tspi->bytes_per_word;
for (count = 0; count < rx_full_count; count++) {
- x = tegra_spi_readl(tspi, SPI_RX_FIFO);
+ u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO);
for (i = 0; len && (i < 4); i++, len--)
*rx_buf++ = (x >> i*8) & 0xFF;
}
tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
read_words += tspi->curr_dma_words;
} else {
- unsigned int rx_mask;
- unsigned int bits_per_word = t->bits_per_word;
-
- rx_mask = (1 << bits_per_word) - 1;
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
for (count = 0; count < rx_full_count; count++) {
- x = tegra_spi_readl(tspi, SPI_RX_FIFO);
- x &= rx_mask;
+ u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO) & rx_mask;
for (i = 0; (i < tspi->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
@@ -376,27 +361,24 @@ static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
- unsigned len;
-
/* Make the dma buffer to read by cpu */
dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
tspi->dma_buf_size, DMA_TO_DEVICE);
if (tspi->is_packed) {
- len = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
} else {
unsigned int i;
unsigned int count;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
- unsigned int x;
for (count = 0; count < tspi->curr_dma_words; count++) {
- x = 0;
+ u32 x = 0;
for (i = 0; consume && (i < tspi->bytes_per_word);
i++, consume--)
- x |= ((*tx_buf++) << i * 8);
+ x |= (u32)(*tx_buf++) << (i * 8);
tspi->tx_dma_buf[count] = x;
}
}
@@ -410,27 +392,21 @@ static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
- unsigned len;
-
/* Make the dma buffer to read by cpu */
dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
tspi->dma_buf_size, DMA_FROM_DEVICE);
if (tspi->is_packed) {
- len = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
} else {
unsigned int i;
unsigned int count;
unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
- unsigned int x;
- unsigned int rx_mask;
- unsigned int bits_per_word = t->bits_per_word;
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
- rx_mask = (1 << bits_per_word) - 1;
for (count = 0; count < tspi->curr_dma_words; count++) {
- x = tspi->rx_dma_buf[count];
- x &= rx_mask;
+ u32 x = tspi->rx_dma_buf[count] & rx_mask;
for (i = 0; (i < tspi->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
@@ -490,16 +466,16 @@ static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
static int tegra_spi_start_dma_based_transfer(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
- unsigned long val;
+ u32 val;
unsigned int len;
int ret = 0;
- unsigned long status;
+ u32 status;
/* Make sure that Rx and Tx fifo are empty */
status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
- dev_err(tspi->dev,
- "Rx/Tx fifo are not empty status 0x%08lx\n", status);
+ dev_err(tspi->dev, "Rx/Tx fifo are not empty status 0x%08x\n",
+ (unsigned)status);
return -EIO;
}
@@ -564,7 +540,7 @@ static int tegra_spi_start_dma_based_transfer(
static int tegra_spi_start_cpu_based_transfer(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
- unsigned long val;
+ u32 val;
unsigned cur_words;
if (tspi->cur_direction & DATA_DIR_TX)
@@ -600,15 +576,15 @@ static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
dma_addr_t dma_phys;
int ret;
struct dma_slave_config dma_sconfig;
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- dma_chan = dma_request_channel(mask, NULL, NULL);
- if (!dma_chan) {
- dev_err(tspi->dev,
- "Dma channel is not available, will try later\n");
- return -EPROBE_DEFER;
+
+ dma_chan = dma_request_slave_channel_reason(tspi->dev,
+ dma_to_memory ? "rx" : "tx");
+ if (IS_ERR(dma_chan)) {
+ ret = PTR_ERR(dma_chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(tspi->dev,
+ "Dma channel is not available: %d\n", ret);
+ return ret;
}
dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
@@ -619,7 +595,6 @@ static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
return -ENOMEM;
}
- dma_sconfig.slave_id = tspi->dma_req_sel;
if (dma_to_memory) {
dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -677,13 +652,13 @@ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
dma_release_channel(dma_chan);
}
-static unsigned long tegra_spi_setup_transfer_one(struct spi_device *spi,
+static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
struct spi_transfer *t, bool is_first_of_msg)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
u32 speed = t->speed_hz;
u8 bits_per_word = t->bits_per_word;
- unsigned long command1;
+ u32 command1;
int req_mode;
if (speed != tspi->cur_speed) {
@@ -738,7 +713,7 @@ static unsigned long tegra_spi_setup_transfer_one(struct spi_device *spi,
}
static int tegra_spi_start_transfer_one(struct spi_device *spi,
- struct spi_transfer *t, unsigned long command1)
+ struct spi_transfer *t, u32 command1)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
unsigned total_fifo_words;
@@ -763,8 +738,8 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
tegra_spi_writel(tspi, command1, SPI_COMMAND1);
tspi->command1_reg = command1;
- dev_dbg(tspi->dev, "The def 0x%x and written 0x%lx\n",
- tspi->def_command1_reg, command1);
+ dev_dbg(tspi->dev, "The def 0x%x and written 0x%x\n",
+ tspi->def_command1_reg, (unsigned)command1);
if (total_fifo_words > SPI_FIFO_DEPTH)
ret = tegra_spi_start_dma_based_transfer(tspi, t);
@@ -776,15 +751,9 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
static int tegra_spi_setup(struct spi_device *spi)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
- unsigned long val;
+ u32 val;
unsigned long flags;
int ret;
- unsigned int cs_pol_bit[MAX_CHIP_SELECT] = {
- SPI_CS_POL_INACTIVE_0,
- SPI_CS_POL_INACTIVE_1,
- SPI_CS_POL_INACTIVE_2,
- SPI_CS_POL_INACTIVE_3,
- };
dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
spi->bits_per_word,
@@ -806,9 +775,9 @@ static int tegra_spi_setup(struct spi_device *spi)
spin_lock_irqsave(&tspi->lock, flags);
val = tspi->def_command1_reg;
if (spi->mode & SPI_CS_HIGH)
- val &= ~cs_pol_bit[spi->chip_select];
+ val &= ~SPI_CS_POL_INACTIVE(spi->chip_select);
else
- val |= cs_pol_bit[spi->chip_select];
+ val |= SPI_CS_POL_INACTIVE(spi->chip_select);
tspi->def_command1_reg = val;
tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
spin_unlock_irqrestore(&tspi->lock, flags);
@@ -842,7 +811,7 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
msg->actual_length = 0;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- unsigned long cmd1;
+ u32 cmd1;
reinit_completion(&tspi->xfer_completion);
@@ -918,9 +887,9 @@ static irqreturn_t handle_cpu_based_xfer(struct tegra_spi_data *tspi)
tspi->status_reg);
dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n",
tspi->command1_reg, tspi->dma_control_reg);
- tegra_periph_reset_assert(tspi->clk);
+ reset_control_assert(tspi->rst);
udelay(2);
- tegra_periph_reset_deassert(tspi->clk);
+ reset_control_deassert(tspi->rst);
complete(&tspi->xfer_completion);
goto exit;
}
@@ -990,9 +959,9 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_spi_data *tspi)
tspi->status_reg);
dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n",
tspi->command1_reg, tspi->dma_control_reg);
- tegra_periph_reset_assert(tspi->clk);
+ reset_control_assert(tspi->rst);
udelay(2);
- tegra_periph_reset_deassert(tspi->clk);
+ reset_control_deassert(tspi->rst);
complete(&tspi->xfer_completion);
spin_unlock_irqrestore(&tspi->lock, flags);
return IRQ_HANDLED;
@@ -1054,11 +1023,6 @@ static void tegra_spi_parse_dt(struct platform_device *pdev,
struct tegra_spi_data *tspi)
{
struct device_node *np = pdev->dev.of_node;
- u32 of_dma[2];
-
- if (of_property_read_u32_array(np, "nvidia,dma-request-selector",
- of_dma, 2) >= 0)
- tspi->dma_req_sel = of_dma[1];
if (of_property_read_u32(np, "spi-max-frequency",
&tspi->spi_max_frequency))
@@ -1127,25 +1091,25 @@ static int tegra_spi_probe(struct platform_device *pdev)
goto exit_free_irq;
}
+ tspi->rst = devm_reset_control_get(&pdev->dev, "spi");
+ if (IS_ERR(tspi->rst)) {
+ dev_err(&pdev->dev, "can not get reset\n");
+ ret = PTR_ERR(tspi->rst);
+ goto exit_free_irq;
+ }
+
tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
- if (tspi->dma_req_sel) {
- ret = tegra_spi_init_dma_param(tspi, true);
- if (ret < 0) {
- dev_err(&pdev->dev, "RxDma Init failed, err %d\n", ret);
- goto exit_free_irq;
- }
-
- ret = tegra_spi_init_dma_param(tspi, false);
- if (ret < 0) {
- dev_err(&pdev->dev, "TxDma Init failed, err %d\n", ret);
- goto exit_rx_dma_free;
- }
- tspi->max_buf_size = tspi->dma_buf_size;
- init_completion(&tspi->tx_dma_complete);
- init_completion(&tspi->rx_dma_complete);
- }
+ ret = tegra_spi_init_dma_param(tspi, true);
+ if (ret < 0)
+ goto exit_free_irq;
+ ret = tegra_spi_init_dma_param(tspi, false);
+ if (ret < 0)
+ goto exit_rx_dma_free;
+ tspi->max_buf_size = tspi->dma_buf_size;
+ init_completion(&tspi->tx_dma_complete);
+ init_completion(&tspi->rx_dma_complete);
init_completion(&tspi->xfer_completion);
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 4dc8e8129459..08794977f21a 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -32,8 +32,8 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/reset.h>
#include <linux/spi/spi.h>
-#include <linux/clk/tegra.h>
#define SPI_COMMAND 0x000
#define SPI_GO BIT(30)
@@ -118,6 +118,7 @@ struct tegra_sflash_data {
spinlock_t lock;
struct clk *clk;
+ struct reset_control *rst;
void __iomem *base;
unsigned irq;
u32 spi_max_frequency;
@@ -148,14 +149,14 @@ struct tegra_sflash_data {
static int tegra_sflash_runtime_suspend(struct device *dev);
static int tegra_sflash_runtime_resume(struct device *dev);
-static inline unsigned long tegra_sflash_readl(struct tegra_sflash_data *tsd,
+static inline u32 tegra_sflash_readl(struct tegra_sflash_data *tsd,
unsigned long reg)
{
return readl(tsd->base + reg);
}
static inline void tegra_sflash_writel(struct tegra_sflash_data *tsd,
- unsigned long val, unsigned long reg)
+ u32 val, unsigned long reg)
{
writel(val, tsd->base + reg);
}
@@ -185,7 +186,7 @@ static unsigned tegra_sflash_fill_tx_fifo_from_client_txbuf(
struct tegra_sflash_data *tsd, struct spi_transfer *t)
{
unsigned nbytes;
- unsigned long status;
+ u32 status;
unsigned max_n_32bit = tsd->curr_xfer_words;
u8 *tx_buf = (u8 *)t->tx_buf + tsd->cur_tx_pos;
@@ -196,11 +197,11 @@ static unsigned tegra_sflash_fill_tx_fifo_from_client_txbuf(
status = tegra_sflash_readl(tsd, SPI_STATUS);
while (!(status & SPI_TXF_FULL)) {
int i;
- unsigned int x = 0;
+ u32 x = 0;
for (i = 0; nbytes && (i < tsd->bytes_per_word);
i++, nbytes--)
- x |= ((*tx_buf++) << i*8);
+ x |= (u32)(*tx_buf++) << (i * 8);
tegra_sflash_writel(tsd, x, SPI_TX_FIFO);
if (!nbytes)
break;
@@ -214,16 +215,14 @@ static unsigned tegra_sflash_fill_tx_fifo_from_client_txbuf(
static int tegra_sflash_read_rx_fifo_to_client_rxbuf(
struct tegra_sflash_data *tsd, struct spi_transfer *t)
{
- unsigned long status;
+ u32 status;
unsigned int read_words = 0;
u8 *rx_buf = (u8 *)t->rx_buf + tsd->cur_rx_pos;
status = tegra_sflash_readl(tsd, SPI_STATUS);
while (!(status & SPI_RXF_EMPTY)) {
int i;
- unsigned long x;
-
- x = tegra_sflash_readl(tsd, SPI_RX_FIFO);
+ u32 x = tegra_sflash_readl(tsd, SPI_RX_FIFO);
for (i = 0; (i < tsd->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
read_words++;
@@ -236,7 +235,7 @@ static int tegra_sflash_read_rx_fifo_to_client_rxbuf(
static int tegra_sflash_start_cpu_based_transfer(
struct tegra_sflash_data *tsd, struct spi_transfer *t)
{
- unsigned long val = 0;
+ u32 val = 0;
unsigned cur_words;
if (tsd->cur_direction & DATA_DIR_TX)
@@ -266,7 +265,7 @@ static int tegra_sflash_start_transfer_one(struct spi_device *spi,
{
struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master);
u32 speed;
- unsigned long command;
+ u32 command;
speed = t->speed_hz;
if (speed != tsd->cur_speed) {
@@ -313,7 +312,7 @@ static int tegra_sflash_start_transfer_one(struct spi_device *spi,
tegra_sflash_writel(tsd, command, SPI_COMMAND);
tsd->command_reg = command;
- return tegra_sflash_start_cpu_based_transfer(tsd, t);
+ return tegra_sflash_start_cpu_based_transfer(tsd, t);
}
static int tegra_sflash_setup(struct spi_device *spi)
@@ -389,9 +388,9 @@ static irqreturn_t handle_cpu_based_xfer(struct tegra_sflash_data *tsd)
dev_err(tsd->dev,
"CpuXfer 0x%08x:0x%08x\n", tsd->command_reg,
tsd->dma_control_reg);
- tegra_periph_reset_assert(tsd->clk);
+ reset_control_assert(tsd->rst);
udelay(2);
- tegra_periph_reset_deassert(tsd->clk);
+ reset_control_deassert(tsd->rst);
complete(&tsd->xfer_completion);
goto exit;
}
@@ -505,6 +504,13 @@ static int tegra_sflash_probe(struct platform_device *pdev)
goto exit_free_irq;
}
+ tsd->rst = devm_reset_control_get(&pdev->dev, "spi");
+ if (IS_ERR(tsd->rst)) {
+ dev_err(&pdev->dev, "can not get reset\n");
+ ret = PTR_ERR(tsd->rst);
+ goto exit_free_irq;
+ }
+
init_completion(&tsd->xfer_completion);
pm_runtime_enable(&pdev->dev);
if (!pm_runtime_enabled(&pdev->dev)) {
@@ -520,9 +526,9 @@ static int tegra_sflash_probe(struct platform_device *pdev)
}
/* Reset controller */
- tegra_periph_reset_assert(tsd->clk);
+ reset_control_assert(tsd->rst);
udelay(2);
- tegra_periph_reset_deassert(tsd->clk);
+ reset_control_deassert(tsd->rst);
tsd->def_command_reg = SPI_M_S | SPI_CS_SW;
tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index e66715ba37ed..be3a069879c3 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -33,8 +33,8 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/reset.h>
#include <linux/spi/spi.h>
-#include <linux/clk/tegra.h>
#define SLINK_COMMAND 0x000
#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
@@ -167,10 +167,10 @@ struct tegra_slink_data {
spinlock_t lock;
struct clk *clk;
+ struct reset_control *rst;
void __iomem *base;
phys_addr_t phys;
unsigned irq;
- int dma_req_sel;
u32 spi_max_frequency;
u32 cur_speed;
@@ -196,7 +196,7 @@ struct tegra_slink_data {
u32 rx_status;
u32 status_reg;
bool is_packed;
- unsigned long packed_size;
+ u32 packed_size;
u32 command_reg;
u32 command2_reg;
@@ -220,14 +220,14 @@ struct tegra_slink_data {
static int tegra_slink_runtime_suspend(struct device *dev);
static int tegra_slink_runtime_resume(struct device *dev);
-static inline unsigned long tegra_slink_readl(struct tegra_slink_data *tspi,
+static inline u32 tegra_slink_readl(struct tegra_slink_data *tspi,
unsigned long reg)
{
return readl(tspi->base + reg);
}
static inline void tegra_slink_writel(struct tegra_slink_data *tspi,
- unsigned long val, unsigned long reg)
+ u32 val, unsigned long reg)
{
writel(val, tspi->base + reg);
@@ -238,38 +238,30 @@ static inline void tegra_slink_writel(struct tegra_slink_data *tspi,
static void tegra_slink_clear_status(struct tegra_slink_data *tspi)
{
- unsigned long val;
- unsigned long val_write = 0;
+ u32 val_write;
- val = tegra_slink_readl(tspi, SLINK_STATUS);
+ tegra_slink_readl(tspi, SLINK_STATUS);
/* Write 1 to clear status register */
val_write = SLINK_RDY | SLINK_FIFO_ERROR;
tegra_slink_writel(tspi, val_write, SLINK_STATUS);
}
-static unsigned long tegra_slink_get_packed_size(struct tegra_slink_data *tspi,
+static u32 tegra_slink_get_packed_size(struct tegra_slink_data *tspi,
struct spi_transfer *t)
{
- unsigned long val;
-
switch (tspi->bytes_per_word) {
case 0:
- val = SLINK_PACK_SIZE_4;
- break;
+ return SLINK_PACK_SIZE_4;
case 1:
- val = SLINK_PACK_SIZE_8;
- break;
+ return SLINK_PACK_SIZE_8;
case 2:
- val = SLINK_PACK_SIZE_16;
- break;
+ return SLINK_PACK_SIZE_16;
case 4:
- val = SLINK_PACK_SIZE_32;
- break;
+ return SLINK_PACK_SIZE_32;
default:
- val = 0;
+ return 0;
}
- return val;
}
static unsigned tegra_slink_calculate_curr_xfer_param(
@@ -312,10 +304,9 @@ static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf(
{
unsigned nbytes;
unsigned tx_empty_count;
- unsigned long fifo_status;
+ u32 fifo_status;
unsigned max_n_32bit;
unsigned i, count;
- unsigned long x;
unsigned int written_words;
unsigned fifo_words_left;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
@@ -329,9 +320,9 @@ static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf(
nbytes = written_words * tspi->bytes_per_word;
max_n_32bit = DIV_ROUND_UP(nbytes, 4);
for (count = 0; count < max_n_32bit; count++) {
- x = 0;
+ u32 x = 0;
for (i = 0; (i < 4) && nbytes; i++, nbytes--)
- x |= (*tx_buf++) << (i*8);
+ x |= (u32)(*tx_buf++) << (i * 8);
tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
}
} else {
@@ -339,10 +330,10 @@ static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf(
written_words = max_n_32bit;
nbytes = written_words * tspi->bytes_per_word;
for (count = 0; count < max_n_32bit; count++) {
- x = 0;
+ u32 x = 0;
for (i = 0; nbytes && (i < tspi->bytes_per_word);
i++, nbytes--)
- x |= ((*tx_buf++) << i*8);
+ x |= (u32)(*tx_buf++) << (i * 8);
tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
}
}
@@ -354,9 +345,8 @@ static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
struct tegra_slink_data *tspi, struct spi_transfer *t)
{
unsigned rx_full_count;
- unsigned long fifo_status;
+ u32 fifo_status;
unsigned i, count;
- unsigned long x;
unsigned int read_words = 0;
unsigned len;
u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
@@ -366,7 +356,7 @@ static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
if (tspi->is_packed) {
len = tspi->curr_dma_words * tspi->bytes_per_word;
for (count = 0; count < rx_full_count; count++) {
- x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
+ u32 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
for (i = 0; len && (i < 4); i++, len--)
*rx_buf++ = (x >> i*8) & 0xFF;
}
@@ -374,7 +364,7 @@ static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
read_words += tspi->curr_dma_words;
} else {
for (count = 0; count < rx_full_count; count++) {
- x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
+ u32 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
for (i = 0; (i < tspi->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
@@ -387,27 +377,24 @@ static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
static void tegra_slink_copy_client_txbuf_to_spi_txbuf(
struct tegra_slink_data *tspi, struct spi_transfer *t)
{
- unsigned len;
-
/* Make the dma buffer to read by cpu */
dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
tspi->dma_buf_size, DMA_TO_DEVICE);
if (tspi->is_packed) {
- len = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
} else {
unsigned int i;
unsigned int count;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
- unsigned int x;
for (count = 0; count < tspi->curr_dma_words; count++) {
- x = 0;
+ u32 x = 0;
for (i = 0; consume && (i < tspi->bytes_per_word);
i++, consume--)
- x |= ((*tx_buf++) << i * 8);
+ x |= (u32)(*tx_buf++) << (i * 8);
tspi->tx_dma_buf[count] = x;
}
}
@@ -434,14 +421,10 @@ static void tegra_slink_copy_spi_rxbuf_to_client_rxbuf(
unsigned int i;
unsigned int count;
unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
- unsigned int x;
- unsigned int rx_mask, bits_per_word;
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
- bits_per_word = t->bits_per_word;
- rx_mask = (1 << bits_per_word) - 1;
for (count = 0; count < tspi->curr_dma_words; count++) {
- x = tspi->rx_dma_buf[count];
- x &= rx_mask;
+ u32 x = tspi->rx_dma_buf[count] & rx_mask;
for (i = 0; (i < tspi->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
@@ -501,17 +484,16 @@ static int tegra_slink_start_rx_dma(struct tegra_slink_data *tspi, int len)
static int tegra_slink_start_dma_based_transfer(
struct tegra_slink_data *tspi, struct spi_transfer *t)
{
- unsigned long val;
- unsigned long test_val;
+ u32 val;
unsigned int len;
int ret = 0;
- unsigned long status;
+ u32 status;
/* Make sure that Rx and Tx fifo are empty */
status = tegra_slink_readl(tspi, SLINK_STATUS);
if ((status & SLINK_FIFO_EMPTY) != SLINK_FIFO_EMPTY) {
- dev_err(tspi->dev,
- "Rx/Tx fifo are not empty status 0x%08lx\n", status);
+ dev_err(tspi->dev, "Rx/Tx fifo are not empty status 0x%08x\n",
+ (unsigned)status);
return -EIO;
}
@@ -551,9 +533,9 @@ static int tegra_slink_start_dma_based_transfer(
}
/* Wait for tx fifo to be fill before starting slink */
- test_val = tegra_slink_readl(tspi, SLINK_STATUS);
- while (!(test_val & SLINK_TX_FULL))
- test_val = tegra_slink_readl(tspi, SLINK_STATUS);
+ status = tegra_slink_readl(tspi, SLINK_STATUS);
+ while (!(status & SLINK_TX_FULL))
+ status = tegra_slink_readl(tspi, SLINK_STATUS);
}
if (tspi->cur_direction & DATA_DIR_RX) {
@@ -587,7 +569,7 @@ static int tegra_slink_start_dma_based_transfer(
static int tegra_slink_start_cpu_based_transfer(
struct tegra_slink_data *tspi, struct spi_transfer *t)
{
- unsigned long val;
+ u32 val;
unsigned cur_words;
val = tspi->packed_size;
@@ -629,15 +611,15 @@ static int tegra_slink_init_dma_param(struct tegra_slink_data *tspi,
dma_addr_t dma_phys;
int ret;
struct dma_slave_config dma_sconfig;
- dma_cap_mask_t mask;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- dma_chan = dma_request_channel(mask, NULL, NULL);
- if (!dma_chan) {
- dev_err(tspi->dev,
- "Dma channel is not available, will try later\n");
- return -EPROBE_DEFER;
+ dma_chan = dma_request_slave_channel_reason(tspi->dev,
+ dma_to_memory ? "rx" : "tx");
+ if (IS_ERR(dma_chan)) {
+ ret = PTR_ERR(dma_chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(tspi->dev,
+ "Dma channel is not available: %d\n", ret);
+ return ret;
}
dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
@@ -648,7 +630,6 @@ static int tegra_slink_init_dma_param(struct tegra_slink_data *tspi,
return -ENOMEM;
}
- dma_sconfig.slave_id = tspi->dma_req_sel;
if (dma_to_memory) {
dma_sconfig.src_addr = tspi->phys + SLINK_RX_FIFO;
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -714,8 +695,8 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
u8 bits_per_word;
unsigned total_fifo_words;
int ret;
- unsigned long command;
- unsigned long command2;
+ u32 command;
+ u32 command2;
bits_per_word = t->bits_per_word;
speed = t->speed_hz;
@@ -762,17 +743,18 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
static int tegra_slink_setup(struct spi_device *spi)
{
- struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
- unsigned long val;
- unsigned long flags;
- int ret;
- unsigned int cs_pol_bit[MAX_CHIP_SELECT] = {
+ static const u32 cs_pol_bit[MAX_CHIP_SELECT] = {
SLINK_CS_POLARITY,
SLINK_CS_POLARITY1,
SLINK_CS_POLARITY2,
SLINK_CS_POLARITY3,
};
+ struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
+ u32 val;
+ unsigned long flags;
+ int ret;
+
dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
spi->bits_per_word,
spi->mode & SPI_CPOL ? "" : "~",
@@ -884,9 +866,9 @@ static irqreturn_t handle_cpu_based_xfer(struct tegra_slink_data *tspi)
dev_err(tspi->dev,
"CpuXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
tspi->command2_reg, tspi->dma_control_reg);
- tegra_periph_reset_assert(tspi->clk);
+ reset_control_assert(tspi->rst);
udelay(2);
- tegra_periph_reset_deassert(tspi->clk);
+ reset_control_deassert(tspi->rst);
complete(&tspi->xfer_completion);
goto exit;
}
@@ -957,9 +939,9 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_slink_data *tspi)
dev_err(tspi->dev,
"DmaXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
tspi->command2_reg, tspi->dma_control_reg);
- tegra_periph_reset_assert(tspi->clk);
+ reset_control_assert(tspi->rst);
udelay(2);
- tegra_periph_reset_deassert(tspi->clk);
+ reset_control_assert(tspi->rst);
complete(&tspi->xfer_completion);
spin_unlock_irqrestore(&tspi->lock, flags);
return IRQ_HANDLED;
@@ -1020,11 +1002,6 @@ static irqreturn_t tegra_slink_isr(int irq, void *context_data)
static void tegra_slink_parse_dt(struct tegra_slink_data *tspi)
{
struct device_node *np = tspi->dev->of_node;
- u32 of_dma[2];
-
- if (of_property_read_u32_array(np, "nvidia,dma-request-selector",
- of_dma, 2) >= 0)
- tspi->dma_req_sel = of_dma[1];
if (of_property_read_u32(np, "spi-max-frequency",
&tspi->spi_max_frequency))
@@ -1118,25 +1095,25 @@ static int tegra_slink_probe(struct platform_device *pdev)
goto exit_free_irq;
}
+ tspi->rst = devm_reset_control_get(&pdev->dev, "spi");
+ if (IS_ERR(tspi->rst)) {
+ dev_err(&pdev->dev, "can not get reset\n");
+ ret = PTR_ERR(tspi->rst);
+ goto exit_free_irq;
+ }
+
tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
- if (tspi->dma_req_sel) {
- ret = tegra_slink_init_dma_param(tspi, true);
- if (ret < 0) {
- dev_err(&pdev->dev, "RxDma Init failed, err %d\n", ret);
- goto exit_free_irq;
- }
-
- ret = tegra_slink_init_dma_param(tspi, false);
- if (ret < 0) {
- dev_err(&pdev->dev, "TxDma Init failed, err %d\n", ret);
- goto exit_rx_dma_free;
- }
- tspi->max_buf_size = tspi->dma_buf_size;
- init_completion(&tspi->tx_dma_complete);
- init_completion(&tspi->rx_dma_complete);
- }
+ ret = tegra_slink_init_dma_param(tspi, true);
+ if (ret < 0)
+ goto exit_free_irq;
+ ret = tegra_slink_init_dma_param(tspi, false);
+ if (ret < 0)
+ goto exit_rx_dma_free;
+ tspi->max_buf_size = tspi->dma_buf_size;
+ init_completion(&tspi->tx_dma_complete);
+ init_completion(&tspi->rx_dma_complete);
init_completion(&tspi->xfer_completion);
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 4396bd448540..3d09265b5133 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -46,6 +46,8 @@ struct ti_qspi {
struct spi_master *master;
void __iomem *base;
+ void __iomem *ctrl_base;
+ void __iomem *mmap_base;
struct clk *fclk;
struct device *dev;
@@ -54,6 +56,8 @@ struct ti_qspi {
u32 spi_max_frequency;
u32 cmd;
u32 dc;
+
+ bool ctrl_mod;
};
#define QSPI_PID (0x0)
@@ -204,53 +208,36 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
txbuf = t->tx_buf;
cmd = qspi->cmd | QSPI_WR_SNGL;
count = t->len;
- wlen = t->bits_per_word;
+ wlen = t->bits_per_word >> 3; /* in bytes */
while (count) {
switch (wlen) {
- case 8:
+ case 1:
dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
cmd, qspi->dc, *txbuf);
writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG);
- ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
- ret = wait_for_completion_timeout(&qspi->transfer_complete,
- QSPI_COMPLETION_TIMEOUT);
- if (ret == 0) {
- dev_err(qspi->dev, "write timed out\n");
- return -ETIMEDOUT;
- }
- txbuf += 1;
- count -= 1;
break;
- case 16:
+ case 2:
dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %04x\n",
cmd, qspi->dc, *txbuf);
writew(*((u16 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
- ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
- ret = wait_for_completion_timeout(&qspi->transfer_complete,
- QSPI_COMPLETION_TIMEOUT);
- if (ret == 0) {
- dev_err(qspi->dev, "write timed out\n");
- return -ETIMEDOUT;
- }
- txbuf += 2;
- count -= 2;
break;
- case 32:
+ case 4:
dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %08x\n",
cmd, qspi->dc, *txbuf);
writel(*((u32 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
- ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
- ret = wait_for_completion_timeout(&qspi->transfer_complete,
- QSPI_COMPLETION_TIMEOUT);
- if (ret == 0) {
- dev_err(qspi->dev, "write timed out\n");
- return -ETIMEDOUT;
- }
- txbuf += 4;
- count -= 4;
break;
}
+
+ ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
+ ret = wait_for_completion_timeout(&qspi->transfer_complete,
+ QSPI_COMPLETION_TIMEOUT);
+ if (ret == 0) {
+ dev_err(qspi->dev, "write timed out\n");
+ return -ETIMEDOUT;
+ }
+ txbuf += wlen;
+ count -= wlen;
}
return 0;
@@ -276,7 +263,7 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
break;
}
count = t->len;
- wlen = t->bits_per_word;
+ wlen = t->bits_per_word >> 3; /* in bytes */
while (count) {
dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
@@ -288,22 +275,18 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
return -ETIMEDOUT;
}
switch (wlen) {
- case 8:
+ case 1:
*rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG);
- rxbuf += 1;
- count -= 1;
break;
- case 16:
+ case 2:
*((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
- rxbuf += 2;
- count -= 2;
break;
- case 32:
+ case 4:
*((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
- rxbuf += 4;
- count -= 4;
break;
}
+ rxbuf += wlen;
+ count -= wlen;
}
return 0;
@@ -417,10 +400,8 @@ out:
static int ti_qspi_runtime_resume(struct device *dev)
{
struct ti_qspi *qspi;
- struct spi_master *master;
- master = dev_get_drvdata(dev);
- qspi = spi_master_get_devdata(master);
+ qspi = dev_get_drvdata(dev);
ti_qspi_restore_ctx(qspi);
return 0;
@@ -437,7 +418,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
{
struct ti_qspi *qspi;
struct spi_master *master;
- struct resource *r;
+ struct resource *r, *res_ctrl, *res_mmap;
struct device_node *np = pdev->dev.of_node;
u32 max_freq;
int ret = 0, num_cs, irq;
@@ -464,7 +445,35 @@ static int ti_qspi_probe(struct platform_device *pdev)
qspi->dev = &pdev->dev;
platform_set_drvdata(pdev, qspi);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
+ if (r == NULL) {
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+ }
+
+ res_mmap = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "qspi_mmap");
+ if (res_mmap == NULL) {
+ res_mmap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res_mmap == NULL) {
+ dev_err(&pdev->dev,
+ "memory mapped resource not required\n");
+ return -ENODEV;
+ }
+ }
+
+ res_ctrl = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "qspi_ctrlmod");
+ if (res_ctrl == NULL) {
+ res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (res_ctrl == NULL) {
+ dev_dbg(&pdev->dev,
+ "control module resources not required\n");
+ }
+ }
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@@ -480,6 +489,23 @@ static int ti_qspi_probe(struct platform_device *pdev)
goto free_master;
}
+ if (res_ctrl) {
+ qspi->ctrl_mod = true;
+ qspi->ctrl_base = devm_ioremap_resource(&pdev->dev, res_ctrl);
+ if (IS_ERR(qspi->ctrl_base)) {
+ ret = PTR_ERR(qspi->ctrl_base);
+ goto free_master;
+ }
+ }
+
+ if (res_mmap) {
+ qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap);
+ if (IS_ERR(qspi->mmap_base)) {
+ ret = PTR_ERR(qspi->mmap_base);
+ goto free_master;
+ }
+ }
+
ret = devm_request_irq(&pdev->dev, irq, ti_qspi_isr, 0,
dev_name(&pdev->dev), qspi);
if (ret < 0) {
@@ -516,13 +542,9 @@ free_master:
static int ti_qspi_remove(struct platform_device *pdev)
{
- struct spi_master *master;
- struct ti_qspi *qspi;
+ struct ti_qspi *qspi = platform_get_drvdata(pdev);
int ret;
- master = platform_get_drvdata(pdev);
- qspi = spi_master_get_devdata(master);
-
ret = pm_runtime_get_sync(qspi->dev);
if (ret < 0) {
dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
@@ -534,8 +556,6 @@ static int ti_qspi_remove(struct platform_device *pdev)
pm_runtime_put(qspi->dev);
pm_runtime_disable(&pdev->dev);
- spi_unregister_master(master);
-
return 0;
}
@@ -547,7 +567,7 @@ static struct platform_driver ti_qspi_driver = {
.probe = ti_qspi_probe,
.remove = ti_qspi_remove,
.driver = {
- .name = "ti,dra7xxx-qspi",
+ .name = "ti-qspi",
.owner = THIS_MODULE,
.pm = &ti_qspi_pm_ops,
.of_match_table = ti_qspi_match,
@@ -559,3 +579,4 @@ module_platform_driver(ti_qspi_driver);
MODULE_AUTHOR("Sourav Poddar <sourav.poddar@ti.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("TI QSPI controller driver");
+MODULE_ALIAS("platform:ti-qspi");
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 446131308acb..2e7f38c7a961 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -217,7 +217,7 @@ struct pch_pd_dev_save {
struct pch_spi_board_data *board_dat;
};
-static DEFINE_PCI_DEVICE_TABLE(pch_spi_pcidev_id) = {
+static const struct pci_device_id pch_spi_pcidev_id[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
@@ -466,12 +466,6 @@ static void pch_spi_reset(struct spi_master *master)
static int pch_spi_setup(struct spi_device *pspi)
{
- /* check bits per word */
- if (pspi->bits_per_word == 0) {
- pspi->bits_per_word = 8;
- dev_dbg(&pspi->dev, "%s 8 bits per word\n", __func__);
- }
-
/* Check baud rate setting */
/* if baud rate of chip is greater than
max we can support,return error */
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index 18c9bb2b5f39..6191ced514b2 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -348,7 +348,7 @@ static int txx9spi_probe(struct platform_device *dev)
INIT_LIST_HEAD(&c->queue);
init_waitqueue_head(&c->waitq);
- c->clk = clk_get(&dev->dev, "spi-baseclk");
+ c->clk = devm_clk_get(&dev->dev, "spi-baseclk");
if (IS_ERR(c->clk)) {
ret = PTR_ERR(c->clk);
c->clk = NULL;
@@ -356,7 +356,6 @@ static int txx9spi_probe(struct platform_device *dev)
}
ret = clk_enable(c->clk);
if (ret) {
- clk_put(c->clk);
c->clk = NULL;
goto exit;
}
@@ -415,10 +414,8 @@ exit_busy:
exit:
if (c->workqueue)
destroy_workqueue(c->workqueue);
- if (c->clk) {
+ if (c->clk)
clk_disable(c->clk);
- clk_put(c->clk);
- }
spi_master_put(master);
return ret;
}
@@ -430,7 +427,6 @@ static int txx9spi_remove(struct platform_device *dev)
destroy_workqueue(c->workqueue);
clk_disable(c->clk);
- clk_put(c->clk);
return 0;
}
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index 4258c712ad3c..24c40b13dab1 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -231,22 +231,13 @@ static int spi_xcomm_probe(struct i2c_client *i2c,
master->dev.of_node = i2c->dev.of_node;
i2c_set_clientdata(i2c, master);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&i2c->dev, master);
if (ret < 0)
spi_master_put(master);
return ret;
}
-static int spi_xcomm_remove(struct i2c_client *i2c)
-{
- struct spi_master *master = i2c_get_clientdata(i2c);
-
- spi_unregister_master(master);
-
- return 0;
-}
-
static const struct i2c_device_id spi_xcomm_ids[] = {
{ "spi-xcomm" },
{ },
@@ -259,7 +250,6 @@ static struct i2c_driver spi_xcomm_driver = {
},
.id_table = spi_xcomm_ids,
.probe = spi_xcomm_probe,
- .remove = spi_xcomm_remove,
};
module_i2c_driver(spi_xcomm_driver);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 349ebba4b199..d0b28bba38be 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -58,6 +58,11 @@ static ssize_t
modalias_show(struct device *dev, struct device_attribute *a, char *buf)
{
const struct spi_device *spi = to_spi_device(dev);
+ int len;
+
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
+ if (len != -ENODEV)
+ return len;
return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
}
@@ -114,6 +119,11 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)
static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
{
const struct spi_device *spi = to_spi_device(dev);
+ int rc;
+
+ rc = acpi_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
return 0;
@@ -370,6 +380,17 @@ static void spi_dev_set_name(struct spi_device *spi)
spi->chip_select);
}
+static int spi_dev_check(struct device *dev, void *data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_device *new_spi = data;
+
+ if (spi->master == new_spi->master &&
+ spi->chip_select == new_spi->chip_select)
+ return -EBUSY;
+ return 0;
+}
+
/**
* spi_add_device - Add spi_device allocated with spi_alloc_device
* @spi: spi_device to register
@@ -384,7 +405,6 @@ int spi_add_device(struct spi_device *spi)
static DEFINE_MUTEX(spi_add_lock);
struct spi_master *master = spi->master;
struct device *dev = master->dev.parent;
- struct device *d;
int status;
/* Chipselects are numbered 0..max; validate. */
@@ -404,12 +424,10 @@ int spi_add_device(struct spi_device *spi)
*/
mutex_lock(&spi_add_lock);
- d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev));
- if (d != NULL) {
+ status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
+ if (status) {
dev_err(dev, "chipselect %d already in use\n",
spi->chip_select);
- put_device(d);
- status = -EBUSY;
goto done;
}
@@ -591,8 +609,10 @@ static int spi_transfer_one_message(struct spi_master *master,
goto out;
}
- if (ret > 0)
+ if (ret > 0) {
+ ret = 0;
wait_for_completion(&master->xfer_completion);
+ }
trace_spi_transfer_stop(msg, xfer);
@@ -632,7 +652,7 @@ out:
*
* Called by SPI drivers using the core transfer_one_message()
* implementation to notify it that the current interrupt driven
- * transfer has finised and the next one may be scheduled.
+ * transfer has finished and the next one may be scheduled.
*/
void spi_finalize_current_transfer(struct spi_master *master)
{
@@ -685,7 +705,7 @@ static void spi_pump_messages(struct kthread_work *work)
}
/* Extract head of queue */
master->cur_msg =
- list_entry(master->queue.next, struct spi_message, queue);
+ list_first_entry(&master->queue, struct spi_message, queue);
list_del_init(&master->cur_msg->queue);
if (master->busy)
@@ -791,11 +811,8 @@ struct spi_message *spi_get_next_queued_message(struct spi_master *master)
/* get a pointer to the next message, if any */
spin_lock_irqsave(&master->queue_lock, flags);
- if (list_empty(&master->queue))
- next = NULL;
- else
- next = list_entry(master->queue.next,
- struct spi_message, queue);
+ next = list_first_entry_or_null(&master->queue, struct spi_message,
+ queue);
spin_unlock_irqrestore(&master->queue_lock, flags);
return next;
@@ -1596,15 +1613,11 @@ int spi_setup(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_setup);
-static int __spi_async(struct spi_device *spi, struct spi_message *message)
+static int __spi_validate(struct spi_device *spi, struct spi_message *message)
{
struct spi_master *master = spi->master;
struct spi_transfer *xfer;
- message->spi = spi;
-
- trace_spi_message_submit(message);
-
if (list_empty(&message->transfers))
return -EINVAL;
if (!message->complete)
@@ -1667,9 +1680,8 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
if (xfer->rx_buf && !xfer->rx_nbits)
xfer->rx_nbits = SPI_NBITS_SINGLE;
/* check transfer tx/rx_nbits:
- * 1. keep the value is not out of single, dual and quad
- * 2. keep tx/rx_nbits is contained by mode in spi_device
- * 3. if SPI_3WIRE, tx/rx_nbits should be in single
+ * 1. check the value matches one of single, dual and quad
+ * 2. check tx/rx_nbits match the mode in spi_device
*/
if (xfer->tx_buf) {
if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
@@ -1682,9 +1694,6 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
!(spi->mode & SPI_TX_QUAD))
return -EINVAL;
- if ((spi->mode & SPI_3WIRE) &&
- (xfer->tx_nbits != SPI_NBITS_SINGLE))
- return -EINVAL;
}
/* check transfer rx_nbits */
if (xfer->rx_buf) {
@@ -1698,13 +1707,22 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
!(spi->mode & SPI_RX_QUAD))
return -EINVAL;
- if ((spi->mode & SPI_3WIRE) &&
- (xfer->rx_nbits != SPI_NBITS_SINGLE))
- return -EINVAL;
}
}
message->status = -EINPROGRESS;
+
+ return 0;
+}
+
+static int __spi_async(struct spi_device *spi, struct spi_message *message)
+{
+ struct spi_master *master = spi->master;
+
+ message->spi = spi;
+
+ trace_spi_message_submit(message);
+
return master->transfer(spi, message);
}
@@ -1743,6 +1761,10 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
int ret;
unsigned long flags;
+ ret = __spi_validate(spi, message);
+ if (ret != 0)
+ return ret;
+
spin_lock_irqsave(&master->bus_lock_spinlock, flags);
if (master->bus_lock_flag)
@@ -1791,6 +1813,10 @@ int spi_async_locked(struct spi_device *spi, struct spi_message *message)
int ret;
unsigned long flags;
+ ret = __spi_validate(spi, message);
+ if (ret != 0)
+ return ret;
+
spin_lock_irqsave(&master->bus_lock_spinlock, flags);
ret = __spi_async(spi, message);
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 2cd9b0e44a41..75b3603906c1 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -168,6 +168,7 @@ config SSB_DRIVER_GIGE
config SSB_DRIVER_GPIO
bool "SSB GPIO driver"
depends on SSB && GPIOLIB
+ select IRQ_DOMAIN if SSB_EMBEDDED
help
Driver to provide access to the GPIO pins on the bus.
diff --git a/drivers/ssb/driver_chipcommon_sflash.c b/drivers/ssb/driver_chipcommon_sflash.c
index 50328de712fa..937fc31971a7 100644
--- a/drivers/ssb/driver_chipcommon_sflash.c
+++ b/drivers/ssb/driver_chipcommon_sflash.c
@@ -37,7 +37,7 @@ static const struct ssb_sflash_tbl_e ssb_sflash_st_tbl[] = {
{ "M25P32", 0x15, 0x10000, 64, },
{ "M25P64", 0x16, 0x10000, 128, },
{ "M25FL128", 0x17, 0x10000, 256, },
- { 0 },
+ { NULL },
};
static const struct ssb_sflash_tbl_e ssb_sflash_sst_tbl[] = {
@@ -55,7 +55,7 @@ static const struct ssb_sflash_tbl_e ssb_sflash_sst_tbl[] = {
{ "SST25VF016", 0x41, 0x1000, 512, },
{ "SST25VF032", 0x4a, 0x1000, 1024, },
{ "SST25VF064", 0x4b, 0x1000, 2048, },
- { 0 },
+ { NULL },
};
static const struct ssb_sflash_tbl_e ssb_sflash_at_tbl[] = {
@@ -66,7 +66,7 @@ static const struct ssb_sflash_tbl_e ssb_sflash_at_tbl[] = {
{ "AT45DB161", 0x2c, 512, 4096, },
{ "AT45DB321", 0x34, 512, 8192, },
{ "AT45DB642", 0x3c, 1024, 8192, },
- { 0 },
+ { NULL },
};
static void ssb_sflash_cmd(struct ssb_chipcommon *cc, u32 opcode)
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index dc109de228c6..ba350d2035c0 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -9,16 +9,40 @@
*/
#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/export.h>
#include <linux/ssb/ssb.h>
#include "ssb_private.h"
+
+/**************************************************
+ * Shared
+ **************************************************/
+
static struct ssb_bus *ssb_gpio_get_bus(struct gpio_chip *chip)
{
return container_of(chip, struct ssb_bus, gpio);
}
+#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
+static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ if (bus->bustype == SSB_BUSTYPE_SSB)
+ return irq_find_mapping(bus->irq_domain, gpio);
+ else
+ return -EINVAL;
+}
+#endif
+
+/**************************************************
+ * ChipCommon
+ **************************************************/
+
static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned gpio)
{
struct ssb_bus *bus = ssb_gpio_get_bus(chip);
@@ -74,19 +98,129 @@ static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned gpio)
ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 0);
}
-static int ssb_gpio_chipco_to_irq(struct gpio_chip *chip, unsigned gpio)
+#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
+static void ssb_gpio_irq_chipco_mask(struct irq_data *d)
{
- struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+ struct ssb_bus *bus = irq_data_get_irq_chip_data(d);
+ int gpio = irqd_to_hwirq(d);
- if (bus->bustype == SSB_BUSTYPE_SSB)
- return ssb_mips_irq(bus->chipco.dev) + 2;
- else
- return -EINVAL;
+ ssb_chipco_gpio_intmask(&bus->chipco, BIT(gpio), 0);
+}
+
+static void ssb_gpio_irq_chipco_unmask(struct irq_data *d)
+{
+ struct ssb_bus *bus = irq_data_get_irq_chip_data(d);
+ int gpio = irqd_to_hwirq(d);
+ u32 val = ssb_chipco_gpio_in(&bus->chipco, BIT(gpio));
+
+ ssb_chipco_gpio_polarity(&bus->chipco, BIT(gpio), val);
+ ssb_chipco_gpio_intmask(&bus->chipco, BIT(gpio), BIT(gpio));
+}
+
+static struct irq_chip ssb_gpio_irq_chipco_chip = {
+ .name = "SSB-GPIO-CC",
+ .irq_mask = ssb_gpio_irq_chipco_mask,
+ .irq_unmask = ssb_gpio_irq_chipco_unmask,
+};
+
+static irqreturn_t ssb_gpio_irq_chipco_handler(int irq, void *dev_id)
+{
+ struct ssb_bus *bus = dev_id;
+ struct ssb_chipcommon *chipco = &bus->chipco;
+ u32 val = chipco_read32(chipco, SSB_CHIPCO_GPIOIN);
+ u32 mask = chipco_read32(chipco, SSB_CHIPCO_GPIOIRQ);
+ u32 pol = chipco_read32(chipco, SSB_CHIPCO_GPIOPOL);
+ unsigned long irqs = (val ^ pol) & mask;
+ int gpio;
+
+ if (!irqs)
+ return IRQ_NONE;
+
+ for_each_set_bit(gpio, &irqs, bus->gpio.ngpio)
+ generic_handle_irq(ssb_gpio_to_irq(&bus->gpio, gpio));
+ ssb_chipco_gpio_polarity(chipco, irqs, val & irqs);
+
+ return IRQ_HANDLED;
+}
+
+static int ssb_gpio_irq_chipco_domain_init(struct ssb_bus *bus)
+{
+ struct ssb_chipcommon *chipco = &bus->chipco;
+ struct gpio_chip *chip = &bus->gpio;
+ int gpio, hwirq, err;
+
+ if (bus->bustype != SSB_BUSTYPE_SSB)
+ return 0;
+
+ bus->irq_domain = irq_domain_add_linear(NULL, chip->ngpio,
+ &irq_domain_simple_ops, chipco);
+ if (!bus->irq_domain) {
+ err = -ENODEV;
+ goto err_irq_domain;
+ }
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ int irq = irq_create_mapping(bus->irq_domain, gpio);
+
+ irq_set_chip_data(irq, bus);
+ irq_set_chip_and_handler(irq, &ssb_gpio_irq_chipco_chip,
+ handle_simple_irq);
+ }
+
+ hwirq = ssb_mips_irq(bus->chipco.dev) + 2;
+ err = request_irq(hwirq, ssb_gpio_irq_chipco_handler, IRQF_SHARED,
+ "gpio", bus);
+ if (err)
+ goto err_req_irq;
+
+ ssb_chipco_gpio_intmask(&bus->chipco, ~0, 0);
+ chipco_set32(chipco, SSB_CHIPCO_IRQMASK, SSB_CHIPCO_IRQ_GPIO);
+
+ return 0;
+
+err_req_irq:
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ int irq = irq_find_mapping(bus->irq_domain, gpio);
+
+ irq_dispose_mapping(irq);
+ }
+ irq_domain_remove(bus->irq_domain);
+err_irq_domain:
+ return err;
+}
+
+static void ssb_gpio_irq_chipco_domain_exit(struct ssb_bus *bus)
+{
+ struct ssb_chipcommon *chipco = &bus->chipco;
+ struct gpio_chip *chip = &bus->gpio;
+ int gpio;
+
+ if (bus->bustype != SSB_BUSTYPE_SSB)
+ return;
+
+ chipco_mask32(chipco, SSB_CHIPCO_IRQMASK, ~SSB_CHIPCO_IRQ_GPIO);
+ free_irq(ssb_mips_irq(bus->chipco.dev) + 2, chipco);
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ int irq = irq_find_mapping(bus->irq_domain, gpio);
+
+ irq_dispose_mapping(irq);
+ }
+ irq_domain_remove(bus->irq_domain);
+}
+#else
+static int ssb_gpio_irq_chipco_domain_init(struct ssb_bus *bus)
+{
+ return 0;
}
+static void ssb_gpio_irq_chipco_domain_exit(struct ssb_bus *bus)
+{
+}
+#endif
+
static int ssb_gpio_chipco_init(struct ssb_bus *bus)
{
struct gpio_chip *chip = &bus->gpio;
+ int err;
chip->label = "ssb_chipco_gpio";
chip->owner = THIS_MODULE;
@@ -96,7 +230,9 @@ static int ssb_gpio_chipco_init(struct ssb_bus *bus)
chip->set = ssb_gpio_chipco_set_value;
chip->direction_input = ssb_gpio_chipco_direction_input;
chip->direction_output = ssb_gpio_chipco_direction_output;
- chip->to_irq = ssb_gpio_chipco_to_irq;
+#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
+ chip->to_irq = ssb_gpio_to_irq;
+#endif
chip->ngpio = 16;
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
@@ -106,9 +242,23 @@ static int ssb_gpio_chipco_init(struct ssb_bus *bus)
else
chip->base = -1;
- return gpiochip_add(chip);
+ err = ssb_gpio_irq_chipco_domain_init(bus);
+ if (err)
+ return err;
+
+ err = gpiochip_add(chip);
+ if (err) {
+ ssb_gpio_irq_chipco_domain_exit(bus);
+ return err;
+ }
+
+ return 0;
}
+/**************************************************
+ * EXTIF
+ **************************************************/
+
#ifdef CONFIG_SSB_DRIVER_EXTIF
static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned gpio)
@@ -145,19 +295,127 @@ static int ssb_gpio_extif_direction_output(struct gpio_chip *chip,
return 0;
}
-static int ssb_gpio_extif_to_irq(struct gpio_chip *chip, unsigned gpio)
+#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
+static void ssb_gpio_irq_extif_mask(struct irq_data *d)
{
- struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+ struct ssb_bus *bus = irq_data_get_irq_chip_data(d);
+ int gpio = irqd_to_hwirq(d);
- if (bus->bustype == SSB_BUSTYPE_SSB)
- return ssb_mips_irq(bus->extif.dev) + 2;
- else
- return -EINVAL;
+ ssb_extif_gpio_intmask(&bus->extif, BIT(gpio), 0);
+}
+
+static void ssb_gpio_irq_extif_unmask(struct irq_data *d)
+{
+ struct ssb_bus *bus = irq_data_get_irq_chip_data(d);
+ int gpio = irqd_to_hwirq(d);
+ u32 val = ssb_extif_gpio_in(&bus->extif, BIT(gpio));
+
+ ssb_extif_gpio_polarity(&bus->extif, BIT(gpio), val);
+ ssb_extif_gpio_intmask(&bus->extif, BIT(gpio), BIT(gpio));
+}
+
+static struct irq_chip ssb_gpio_irq_extif_chip = {
+ .name = "SSB-GPIO-EXTIF",
+ .irq_mask = ssb_gpio_irq_extif_mask,
+ .irq_unmask = ssb_gpio_irq_extif_unmask,
+};
+
+static irqreturn_t ssb_gpio_irq_extif_handler(int irq, void *dev_id)
+{
+ struct ssb_bus *bus = dev_id;
+ struct ssb_extif *extif = &bus->extif;
+ u32 val = ssb_read32(extif->dev, SSB_EXTIF_GPIO_IN);
+ u32 mask = ssb_read32(extif->dev, SSB_EXTIF_GPIO_INTMASK);
+ u32 pol = ssb_read32(extif->dev, SSB_EXTIF_GPIO_INTPOL);
+ unsigned long irqs = (val ^ pol) & mask;
+ int gpio;
+
+ if (!irqs)
+ return IRQ_NONE;
+
+ for_each_set_bit(gpio, &irqs, bus->gpio.ngpio)
+ generic_handle_irq(ssb_gpio_to_irq(&bus->gpio, gpio));
+ ssb_extif_gpio_polarity(extif, irqs, val & irqs);
+
+ return IRQ_HANDLED;
+}
+
+static int ssb_gpio_irq_extif_domain_init(struct ssb_bus *bus)
+{
+ struct ssb_extif *extif = &bus->extif;
+ struct gpio_chip *chip = &bus->gpio;
+ int gpio, hwirq, err;
+
+ if (bus->bustype != SSB_BUSTYPE_SSB)
+ return 0;
+
+ bus->irq_domain = irq_domain_add_linear(NULL, chip->ngpio,
+ &irq_domain_simple_ops, extif);
+ if (!bus->irq_domain) {
+ err = -ENODEV;
+ goto err_irq_domain;
+ }
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ int irq = irq_create_mapping(bus->irq_domain, gpio);
+
+ irq_set_chip_data(irq, bus);
+ irq_set_chip_and_handler(irq, &ssb_gpio_irq_extif_chip,
+ handle_simple_irq);
+ }
+
+ hwirq = ssb_mips_irq(bus->extif.dev) + 2;
+ err = request_irq(hwirq, ssb_gpio_irq_extif_handler, IRQF_SHARED,
+ "gpio", bus);
+ if (err)
+ goto err_req_irq;
+
+ ssb_extif_gpio_intmask(&bus->extif, ~0, 0);
+
+ return 0;
+
+err_req_irq:
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ int irq = irq_find_mapping(bus->irq_domain, gpio);
+
+ irq_dispose_mapping(irq);
+ }
+ irq_domain_remove(bus->irq_domain);
+err_irq_domain:
+ return err;
+}
+
+static void ssb_gpio_irq_extif_domain_exit(struct ssb_bus *bus)
+{
+ struct ssb_extif *extif = &bus->extif;
+ struct gpio_chip *chip = &bus->gpio;
+ int gpio;
+
+ if (bus->bustype != SSB_BUSTYPE_SSB)
+ return;
+
+ free_irq(ssb_mips_irq(bus->extif.dev) + 2, extif);
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ int irq = irq_find_mapping(bus->irq_domain, gpio);
+
+ irq_dispose_mapping(irq);
+ }
+ irq_domain_remove(bus->irq_domain);
}
+#else
+static int ssb_gpio_irq_extif_domain_init(struct ssb_bus *bus)
+{
+ return 0;
+}
+
+static void ssb_gpio_irq_extif_domain_exit(struct ssb_bus *bus)
+{
+}
+#endif
static int ssb_gpio_extif_init(struct ssb_bus *bus)
{
struct gpio_chip *chip = &bus->gpio;
+ int err;
chip->label = "ssb_extif_gpio";
chip->owner = THIS_MODULE;
@@ -165,7 +423,9 @@ static int ssb_gpio_extif_init(struct ssb_bus *bus)
chip->set = ssb_gpio_extif_set_value;
chip->direction_input = ssb_gpio_extif_direction_input;
chip->direction_output = ssb_gpio_extif_direction_output;
- chip->to_irq = ssb_gpio_extif_to_irq;
+#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
+ chip->to_irq = ssb_gpio_to_irq;
+#endif
chip->ngpio = 5;
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
@@ -175,7 +435,17 @@ static int ssb_gpio_extif_init(struct ssb_bus *bus)
else
chip->base = -1;
- return gpiochip_add(chip);
+ err = ssb_gpio_irq_extif_domain_init(bus);
+ if (err)
+ return err;
+
+ err = gpiochip_add(chip);
+ if (err) {
+ ssb_gpio_irq_extif_domain_exit(bus);
+ return err;
+ }
+
+ return 0;
}
#else
@@ -185,6 +455,10 @@ static int ssb_gpio_extif_init(struct ssb_bus *bus)
}
#endif
+/**************************************************
+ * Init
+ **************************************************/
+
int ssb_gpio_init(struct ssb_bus *bus)
{
if (ssb_chipco_available(&bus->chipco))
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 32a811d11c25..2fead3820849 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -593,6 +593,13 @@ static int ssb_attach_queued_buses(void)
ssb_pcicore_init(&bus->pcicore);
if (bus->bustype == SSB_BUSTYPE_SSB)
ssb_watchdog_register(bus);
+
+ err = ssb_gpio_init(bus);
+ if (err == -ENOTSUPP)
+ ssb_dbg("GPIO driver not activated\n");
+ else if (err)
+ ssb_dbg("Error registering GPIO driver: %i\n", err);
+
ssb_bus_may_powerdown(bus);
err = ssb_devices_register(bus);
@@ -830,11 +837,6 @@ static int ssb_bus_register(struct ssb_bus *bus,
ssb_chipcommon_init(&bus->chipco);
ssb_extif_init(&bus->extif);
ssb_mipscore_init(&bus->mipscore);
- err = ssb_gpio_init(bus);
- if (err == -ENOTSUPP)
- ssb_dbg("GPIO driver not activated\n");
- else if (err)
- ssb_dbg("Error registering GPIO driver: %i\n", err);
err = ssb_fetch_invariants(bus, get_invariants);
if (err) {
ssb_bus_may_powerdown(bus);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 3bfdaa8d80a9..99375f0a9440 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -52,8 +52,12 @@ source "drivers/staging/rtl8712/Kconfig"
source "drivers/staging/rtl8188eu/Kconfig"
+source "drivers/staging/rtl8821ae/Kconfig"
+
source "drivers/staging/rts5139/Kconfig"
+source "drivers/staging/rts5208/Kconfig"
+
source "drivers/staging/frontier/Kconfig"
source "drivers/staging/phison/Kconfig"
@@ -74,10 +78,6 @@ source "drivers/staging/sep/Kconfig"
source "drivers/staging/iio/Kconfig"
-source "drivers/staging/zsmalloc/Kconfig"
-
-source "drivers/staging/zram/Kconfig"
-
source "drivers/staging/wlags49_h2/Kconfig"
source "drivers/staging/wlags49_h25/Kconfig"
@@ -138,12 +138,8 @@ source "drivers/staging/netlogic/Kconfig"
source "drivers/staging/mt29f_spinand/Kconfig"
-source "drivers/staging/dwc2/Kconfig"
-
source "drivers/staging/lustre/Kconfig"
-source "drivers/staging/btmtk_usb/Kconfig"
-
source "drivers/staging/xillybus/Kconfig"
source "drivers/staging/dgnc/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index b0d3303b4680..ddc3c4a5d39d 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -18,7 +18,9 @@ obj-$(CONFIG_RTL8192U) += rtl8192u/
obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_R8188EU) += rtl8188eu/
+obj-$(CONFIG_R8821AE) += rtl8821ae/
obj-$(CONFIG_RTS5139) += rts5139/
+obj-$(CONFIG_RTS5208) += rts5208/
obj-$(CONFIG_TRANZPORT) += frontier/
obj-$(CONFIG_IDE_PHISON) += phison/
obj-$(CONFIG_LINE6_USB) += line6/
@@ -31,8 +33,6 @@ obj-$(CONFIG_VT6656) += vt6656/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/
-obj-$(CONFIG_ZRAM) += zram/
-obj-$(CONFIG_ZSMALLOC) += zsmalloc/
obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
obj-$(CONFIG_FB_SM7XX) += sm7xxfb/
@@ -60,9 +60,7 @@ obj-$(CONFIG_DGRP) += dgrp/
obj-$(CONFIG_SB105X) += sb105x/
obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
obj-$(CONFIG_GOLDFISH) += goldfish/
-obj-$(CONFIG_USB_DWC2) += dwc2/
obj-$(CONFIG_LUSTRE_FS) += lustre/
-obj-$(CONFIG_USB_BTMTK) += btmtk_usb/
obj-$(CONFIG_XILLYBUS) += xillybus/
obj-$(CONFIG_DGNC) += dgnc/
obj-$(CONFIG_DGAP) += dgap/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 22cf17dcb7da..b6b869261f32 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -98,6 +98,8 @@ config SW_SYNC_USER
*WARNING* improper use of this can result in deadlocking kernel
drivers from userspace.
+source "drivers/staging/android/ion/Kconfig"
+
endif # if ANDROID
endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index c136299e05af..0a01e1914905 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -1,5 +1,7 @@
ccflags-y += -I$(src) # needed for trace events
+obj-y += ion/
+
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
obj-$(CONFIG_ASHMEM) += ashmem.o
obj-$(CONFIG_ANDROID_LOGGER) += logger.o
diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c
index 647694f43dcf..2fc7cdd4c4e3 100644
--- a/drivers/staging/android/alarm-dev.c
+++ b/drivers/staging/android/alarm-dev.c
@@ -68,11 +68,10 @@ static struct devalarm alarms[ANDROID_ALARM_TYPE_COUNT];
*/
static int is_wakeup(enum android_alarm_type type)
{
- return (type == ANDROID_ALARM_RTC_WAKEUP ||
- type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP);
+ return type == ANDROID_ALARM_RTC_WAKEUP ||
+ type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP;
}
-
static void devalarm_start(struct devalarm *alrm, ktime_t exp)
{
if (is_wakeup(alrm->type))
@@ -111,7 +110,6 @@ static void alarm_clear(enum android_alarm_type alarm_type)
}
alarm_enabled &= ~alarm_type_mask;
spin_unlock_irqrestore(&alarm_slock, flags);
-
}
static void alarm_set(enum android_alarm_type alarm_type,
@@ -280,6 +278,7 @@ static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
+
#ifdef CONFIG_COMPAT
static long alarm_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
@@ -371,7 +370,6 @@ static void devalarm_triggered(struct devalarm *alarm)
spin_unlock_irqrestore(&alarm_slock, flags);
}
-
static enum hrtimer_restart devalarm_hrthandler(struct hrtimer *hrt)
{
struct devalarm *devalrm = container_of(hrt, struct devalarm, u.hrt);
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 23948f167012..713a97226787 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -295,21 +295,29 @@ static ssize_t ashmem_read(struct file *file, char __user *buf,
/* If size is not set, or set to 0, always return EOF. */
if (asma->size == 0)
- goto out;
+ goto out_unlock;
if (!asma->file) {
ret = -EBADF;
- goto out;
+ goto out_unlock;
}
- ret = asma->file->f_op->read(asma->file, buf, len, pos);
- if (ret < 0)
- goto out;
+ mutex_unlock(&ashmem_mutex);
- /** Update backing file pos, since f_ops->read() doesn't */
- asma->file->f_pos = *pos;
+ /*
+ * asma and asma->file are used outside the lock here. We assume
+ * once asma->file is set it will never be changed, and will not
+ * be destroyed until all references to the file are dropped and
+ * ashmem_release is called.
+ */
+ ret = asma->file->f_op->read(asma->file, buf, len, pos);
+ if (ret >= 0) {
+ /** Update backing file pos, since f_ops->read() doesn't */
+ asma->file->f_pos = *pos;
+ }
+ return ret;
-out:
+out_unlock:
mutex_unlock(&ashmem_mutex);
return ret;
}
@@ -498,6 +506,7 @@ out:
static int set_name(struct ashmem_area *asma, void __user *name)
{
+ int len;
int ret = 0;
char local_name[ASHMEM_NAME_LEN];
@@ -510,21 +519,19 @@ static int set_name(struct ashmem_area *asma, void __user *name)
* variable that does not need protection and later copy the local
* variable to the structure member with lock held.
*/
- if (copy_from_user(local_name, name, ASHMEM_NAME_LEN))
- return -EFAULT;
-
+ len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
+ if (len < 0)
+ return len;
+ if (len == ASHMEM_NAME_LEN)
+ local_name[ASHMEM_NAME_LEN - 1] = '\0';
mutex_lock(&ashmem_mutex);
/* cannot change an existing mapping's name */
- if (unlikely(asma->file)) {
+ if (unlikely(asma->file))
ret = -EINVAL;
- goto out;
- }
- memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN,
- local_name, ASHMEM_NAME_LEN);
- asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
-out:
- mutex_unlock(&ashmem_mutex);
+ else
+ strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
+ mutex_unlock(&ashmem_mutex);
return ret;
}
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
new file mode 100644
index 000000000000..0f8fec1f84e5
--- /dev/null
+++ b/drivers/staging/android/ion/Kconfig
@@ -0,0 +1,35 @@
+menuconfig ION
+ bool "Ion Memory Manager"
+ depends on HAVE_MEMBLOCK
+ select GENERIC_ALLOCATOR
+ select DMA_SHARED_BUFFER
+ ---help---
+ Chose this option to enable the ION Memory Manager,
+ used by Android to efficiently allocate buffers
+ from userspace that can be shared between drivers.
+ If you're not using Android its probably safe to
+ say N here.
+
+config ION_TEST
+ tristate "Ion Test Device"
+ depends on ION
+ help
+ Choose this option to create a device that can be used to test the
+ kernel and device side ION functions.
+
+config ION_DUMMY
+ bool "Dummy Ion driver"
+ depends on ION
+ help
+ Provides a dummy ION driver that registers the
+ /dev/ion device and some basic heaps. This can
+ be used for testing the ION infrastructure if
+ one doesn't have access to hardware drivers that
+ use ION.
+
+config ION_TEGRA
+ tristate "Ion for Tegra"
+ depends on ARCH_TEGRA && ION
+ help
+ Choose this option if you wish to use ion on an nVidia Tegra.
+
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
new file mode 100644
index 000000000000..b56fd2bf2b4f
--- /dev/null
+++ b/drivers/staging/android/ion/Makefile
@@ -0,0 +1,10 @@
+obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
+ ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
+obj-$(CONFIG_ION_TEST) += ion_test.o
+ifdef CONFIG_COMPAT
+obj-$(CONFIG_ION) += compat_ion.o
+endif
+
+obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o
+obj-$(CONFIG_ION_TEGRA) += tegra/
+
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
new file mode 100644
index 000000000000..ee3a7380e53b
--- /dev/null
+++ b/drivers/staging/android/ion/compat_ion.c
@@ -0,0 +1,195 @@
+/*
+ * drivers/staging/android/ion/compat_ion.c
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include "ion.h"
+#include "compat_ion.h"
+
+/* See drivers/staging/android/uapi/ion.h for the definition of these structs */
+struct compat_ion_allocation_data {
+ compat_size_t len;
+ compat_size_t align;
+ compat_uint_t heap_id_mask;
+ compat_uint_t flags;
+ compat_int_t handle;
+};
+
+struct compat_ion_custom_data {
+ compat_uint_t cmd;
+ compat_ulong_t arg;
+};
+
+struct compat_ion_handle_data {
+ compat_int_t handle;
+};
+
+#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
+ struct compat_ion_allocation_data)
+#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \
+ struct compat_ion_handle_data)
+#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \
+ struct compat_ion_custom_data)
+
+static int compat_get_ion_allocation_data(
+ struct compat_ion_allocation_data __user *data32,
+ struct ion_allocation_data __user *data)
+{
+ compat_size_t s;
+ compat_uint_t u;
+ compat_int_t i;
+ int err;
+
+ err = get_user(s, &data32->len);
+ err |= put_user(s, &data->len);
+ err |= get_user(s, &data32->align);
+ err |= put_user(s, &data->align);
+ err |= get_user(u, &data32->heap_id_mask);
+ err |= put_user(u, &data->heap_id_mask);
+ err |= get_user(u, &data32->flags);
+ err |= put_user(u, &data->flags);
+ err |= get_user(i, &data32->handle);
+ err |= put_user(i, &data->handle);
+
+ return err;
+}
+
+static int compat_get_ion_handle_data(
+ struct compat_ion_handle_data __user *data32,
+ struct ion_handle_data __user *data)
+{
+ compat_int_t i;
+ int err;
+
+ err = get_user(i, &data32->handle);
+ err |= put_user(i, &data->handle);
+
+ return err;
+}
+
+static int compat_put_ion_allocation_data(
+ struct compat_ion_allocation_data __user *data32,
+ struct ion_allocation_data __user *data)
+{
+ compat_size_t s;
+ compat_uint_t u;
+ compat_int_t i;
+ int err;
+
+ err = get_user(s, &data->len);
+ err |= put_user(s, &data32->len);
+ err |= get_user(s, &data->align);
+ err |= put_user(s, &data32->align);
+ err |= get_user(u, &data->heap_id_mask);
+ err |= put_user(u, &data32->heap_id_mask);
+ err |= get_user(u, &data->flags);
+ err |= put_user(u, &data32->flags);
+ err |= get_user(i, &data->handle);
+ err |= put_user(i, &data32->handle);
+
+ return err;
+}
+
+static int compat_get_ion_custom_data(
+ struct compat_ion_custom_data __user *data32,
+ struct ion_custom_data __user *data)
+{
+ compat_uint_t cmd;
+ compat_ulong_t arg;
+ int err;
+
+ err = get_user(cmd, &data32->cmd);
+ err |= put_user(cmd, &data->cmd);
+ err |= get_user(arg, &data32->arg);
+ err |= put_user(arg, &data->arg);
+
+ return err;
+};
+
+long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ if (!filp->f_op || !filp->f_op->unlocked_ioctl)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case COMPAT_ION_IOC_ALLOC:
+ {
+ struct compat_ion_allocation_data __user *data32;
+ struct ion_allocation_data __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_ion_allocation_data(data32, data);
+ if (err)
+ return err;
+ ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC,
+ (unsigned long)data);
+ err = compat_put_ion_allocation_data(data32, data);
+ return ret ? ret : err;
+ }
+ case COMPAT_ION_IOC_FREE:
+ {
+ struct compat_ion_handle_data __user *data32;
+ struct ion_handle_data __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_ion_handle_data(data32, data);
+ if (err)
+ return err;
+
+ return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE,
+ (unsigned long)data);
+ }
+ case COMPAT_ION_IOC_CUSTOM: {
+ struct compat_ion_custom_data __user *data32;
+ struct ion_custom_data __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_ion_custom_data(data32, data);
+ if (err)
+ return err;
+
+ return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM,
+ (unsigned long)data);
+ }
+ case ION_IOC_SHARE:
+ case ION_IOC_MAP:
+ case ION_IOC_IMPORT:
+ case ION_IOC_SYNC:
+ return filp->f_op->unlocked_ioctl(filp, cmd,
+ (unsigned long)compat_ptr(arg));
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h
new file mode 100644
index 000000000000..c2ad5893dfda
--- /dev/null
+++ b/drivers/staging/android/ion/compat_ion.h
@@ -0,0 +1,30 @@
+/*
+
+ * drivers/staging/android/ion/compat_ion.h
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_COMPAT_ION_H
+#define _LINUX_COMPAT_ION_H
+
+#if IS_ENABLED(CONFIG_COMPAT)
+
+long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+#else
+
+#define compat_ion_ioctl NULL
+
+#endif /* CONFIG_COMPAT */
+#endif /* _LINUX_COMPAT_ION_H */
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
new file mode 100644
index 000000000000..574066ff73f8
--- /dev/null
+++ b/drivers/staging/android/ion/ion.c
@@ -0,0 +1,1549 @@
+/*
+
+ * drivers/staging/android/ion/ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/freezer.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/miscdevice.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+#include <linux/idr.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+#include "compat_ion.h"
+
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev: the actual misc device
+ * @buffers: an rb tree of all the existing buffers
+ * @buffer_lock: lock protecting the tree of buffers
+ * @lock: rwsem protecting the tree of heaps and clients
+ * @heaps: list of all the heaps in the system
+ * @user_clients: list of all the clients created from userspace
+ */
+struct ion_device {
+ struct miscdevice dev;
+ struct rb_root buffers;
+ struct mutex buffer_lock;
+ struct rw_semaphore lock;
+ struct plist_head heaps;
+ long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
+ unsigned long arg);
+ struct rb_root clients;
+ struct dentry *debug_root;
+};
+
+/**
+ * struct ion_client - a process/hw block local address space
+ * @node: node in the tree of all clients
+ * @dev: backpointer to ion device
+ * @handles: an rb tree of all the handles in this client
+ * @idr: an idr space for allocating handle ids
+ * @lock: lock protecting the tree of handles
+ * @name: used for debugging
+ * @task: used for debugging
+ *
+ * A client represents a list of buffers this client may access.
+ * The mutex stored here is used to protect both handles tree
+ * as well as the handles themselves, and should be held while modifying either.
+ */
+struct ion_client {
+ struct rb_node node;
+ struct ion_device *dev;
+ struct rb_root handles;
+ struct idr idr;
+ struct mutex lock;
+ const char *name;
+ struct task_struct *task;
+ pid_t pid;
+ struct dentry *debug_root;
+};
+
+/**
+ * ion_handle - a client local reference to a buffer
+ * @ref: reference count
+ * @client: back pointer to the client the buffer resides in
+ * @buffer: pointer to the buffer
+ * @node: node in the client's handle rbtree
+ * @kmap_cnt: count of times this client has mapped to kernel
+ * @id: client-unique id allocated by client->idr
+ *
+ * Modifications to node, map_cnt or mapping should be protected by the
+ * lock in the client. Other fields are never changed after initialization.
+ */
+struct ion_handle {
+ struct kref ref;
+ struct ion_client *client;
+ struct ion_buffer *buffer;
+ struct rb_node node;
+ unsigned int kmap_cnt;
+ int id;
+};
+
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
+{
+ return (buffer->flags & ION_FLAG_CACHED) &&
+ !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
+}
+
+bool ion_buffer_cached(struct ion_buffer *buffer)
+{
+ return !!(buffer->flags & ION_FLAG_CACHED);
+}
+
+static inline struct page *ion_buffer_page(struct page *page)
+{
+ return (struct page *)((unsigned long)page & ~(1UL));
+}
+
+static inline bool ion_buffer_page_is_dirty(struct page *page)
+{
+ return !!((unsigned long)page & 1UL);
+}
+
+static inline void ion_buffer_page_dirty(struct page **page)
+{
+ *page = (struct page *)((unsigned long)(*page) | 1UL);
+}
+
+static inline void ion_buffer_page_clean(struct page **page)
+{
+ *page = (struct page *)((unsigned long)(*page) & ~(1UL));
+}
+
+/* this function should only be called while dev->lock is held */
+static void ion_buffer_add(struct ion_device *dev,
+ struct ion_buffer *buffer)
+{
+ struct rb_node **p = &dev->buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_buffer *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_buffer, node);
+
+ if (buffer < entry) {
+ p = &(*p)->rb_left;
+ } else if (buffer > entry) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: buffer already found.", __func__);
+ BUG();
+ }
+ }
+
+ rb_link_node(&buffer->node, parent, p);
+ rb_insert_color(&buffer->node, &dev->buffers);
+}
+
+/* this function should only be called while dev->lock is held */
+static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
+ struct ion_device *dev,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags)
+{
+ struct ion_buffer *buffer;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int i, ret;
+
+ buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+
+ buffer->heap = heap;
+ buffer->flags = flags;
+ kref_init(&buffer->ref);
+
+ ret = heap->ops->allocate(heap, buffer, len, align, flags);
+
+ if (ret) {
+ if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
+ goto err2;
+
+ ion_heap_freelist_drain(heap, 0);
+ ret = heap->ops->allocate(heap, buffer, len, align,
+ flags);
+ if (ret)
+ goto err2;
+ }
+
+ buffer->dev = dev;
+ buffer->size = len;
+
+ table = heap->ops->map_dma(heap, buffer);
+ if (WARN_ONCE(table == NULL,
+ "heap->ops->map_dma should return ERR_PTR on error"))
+ table = ERR_PTR(-EINVAL);
+ if (IS_ERR(table)) {
+ heap->ops->free(buffer);
+ kfree(buffer);
+ return ERR_PTR(PTR_ERR(table));
+ }
+ buffer->sg_table = table;
+ if (ion_buffer_fault_user_mappings(buffer)) {
+ int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ struct scatterlist *sg;
+ int i, j, k = 0;
+
+ buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
+ if (!buffer->pages) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+
+ for (j = 0; j < sg->length / PAGE_SIZE; j++)
+ buffer->pages[k++] = page++;
+ }
+
+ if (ret)
+ goto err;
+ }
+
+ buffer->dev = dev;
+ buffer->size = len;
+ INIT_LIST_HEAD(&buffer->vmas);
+ mutex_init(&buffer->lock);
+ /* this will set up dma addresses for the sglist -- it is not
+ technically correct as per the dma api -- a specific
+ device isn't really taking ownership here. However, in practice on
+ our systems the only dma_address space is physical addresses.
+ Additionally, we can't afford the overhead of invalidating every
+ allocation via dma_map_sg. The implicit contract here is that
+ memory comming from the heaps is ready for dma, ie if it has a
+ cached mapping that mapping has been invalidated */
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
+ sg_dma_address(sg) = sg_phys(sg);
+ mutex_lock(&dev->buffer_lock);
+ ion_buffer_add(dev, buffer);
+ mutex_unlock(&dev->buffer_lock);
+ return buffer;
+
+err:
+ heap->ops->unmap_dma(heap, buffer);
+ heap->ops->free(buffer);
+err1:
+ if (buffer->pages)
+ vfree(buffer->pages);
+err2:
+ kfree(buffer);
+ return ERR_PTR(ret);
+}
+
+void ion_buffer_destroy(struct ion_buffer *buffer)
+{
+ if (WARN_ON(buffer->kmap_cnt > 0))
+ buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+ buffer->heap->ops->free(buffer);
+ if (buffer->pages)
+ vfree(buffer->pages);
+ kfree(buffer);
+}
+
+static void _ion_buffer_destroy(struct kref *kref)
+{
+ struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
+ struct ion_heap *heap = buffer->heap;
+ struct ion_device *dev = buffer->dev;
+
+ mutex_lock(&dev->buffer_lock);
+ rb_erase(&buffer->node, &dev->buffers);
+ mutex_unlock(&dev->buffer_lock);
+
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ ion_heap_freelist_add(heap, buffer);
+ else
+ ion_buffer_destroy(buffer);
+}
+
+static void ion_buffer_get(struct ion_buffer *buffer)
+{
+ kref_get(&buffer->ref);
+}
+
+static int ion_buffer_put(struct ion_buffer *buffer)
+{
+ return kref_put(&buffer->ref, _ion_buffer_destroy);
+}
+
+static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
+{
+ mutex_lock(&buffer->lock);
+ buffer->handle_count++;
+ mutex_unlock(&buffer->lock);
+}
+
+static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
+{
+ /*
+ * when a buffer is removed from a handle, if it is not in
+ * any other handles, copy the taskcomm and the pid of the
+ * process it's being removed from into the buffer. At this
+ * point there will be no way to track what processes this buffer is
+ * being used by, it only exists as a dma_buf file descriptor.
+ * The taskcomm and pid can provide a debug hint as to where this fd
+ * is in the system
+ */
+ mutex_lock(&buffer->lock);
+ buffer->handle_count--;
+ BUG_ON(buffer->handle_count < 0);
+ if (!buffer->handle_count) {
+ struct task_struct *task;
+
+ task = current->group_leader;
+ get_task_comm(buffer->task_comm, task);
+ buffer->pid = task_pid_nr(task);
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+static struct ion_handle *ion_handle_create(struct ion_client *client,
+ struct ion_buffer *buffer)
+{
+ struct ion_handle *handle;
+
+ handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+ kref_init(&handle->ref);
+ RB_CLEAR_NODE(&handle->node);
+ handle->client = client;
+ ion_buffer_get(buffer);
+ ion_buffer_add_to_handle(buffer);
+ handle->buffer = buffer;
+
+ return handle;
+}
+
+static void ion_handle_kmap_put(struct ion_handle *);
+
+static void ion_handle_destroy(struct kref *kref)
+{
+ struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
+ struct ion_client *client = handle->client;
+ struct ion_buffer *buffer = handle->buffer;
+
+ mutex_lock(&buffer->lock);
+ while (handle->kmap_cnt)
+ ion_handle_kmap_put(handle);
+ mutex_unlock(&buffer->lock);
+
+ idr_remove(&client->idr, handle->id);
+ if (!RB_EMPTY_NODE(&handle->node))
+ rb_erase(&handle->node, &client->handles);
+
+ ion_buffer_remove_from_handle(buffer);
+ ion_buffer_put(buffer);
+
+ kfree(handle);
+}
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
+{
+ return handle->buffer;
+}
+
+static void ion_handle_get(struct ion_handle *handle)
+{
+ kref_get(&handle->ref);
+}
+
+static int ion_handle_put(struct ion_handle *handle)
+{
+ struct ion_client *client = handle->client;
+ int ret;
+
+ mutex_lock(&client->lock);
+ ret = kref_put(&handle->ref, ion_handle_destroy);
+ mutex_unlock(&client->lock);
+
+ return ret;
+}
+
+static struct ion_handle *ion_handle_lookup(struct ion_client *client,
+ struct ion_buffer *buffer)
+{
+ struct rb_node *n = client->handles.rb_node;
+
+ while (n) {
+ struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
+ if (buffer < entry->buffer)
+ n = n->rb_left;
+ else if (buffer > entry->buffer)
+ n = n->rb_right;
+ else
+ return entry;
+ }
+ return ERR_PTR(-EINVAL);
+}
+
+static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+ int id)
+{
+ struct ion_handle *handle;
+
+ mutex_lock(&client->lock);
+ handle = idr_find(&client->idr, id);
+ if (handle)
+ ion_handle_get(handle);
+ mutex_unlock(&client->lock);
+
+ return handle ? handle : ERR_PTR(-EINVAL);
+}
+
+static bool ion_handle_validate(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ WARN_ON(!mutex_is_locked(&client->lock));
+ return (idr_find(&client->idr, handle->id) == handle);
+}
+
+static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
+{
+ int id;
+ struct rb_node **p = &client->handles.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_handle *entry;
+
+ id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ handle->id = id;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_handle, node);
+
+ if (handle->buffer < entry->buffer)
+ p = &(*p)->rb_left;
+ else if (handle->buffer > entry->buffer)
+ p = &(*p)->rb_right;
+ else
+ WARN(1, "%s: buffer already found.", __func__);
+ }
+
+ rb_link_node(&handle->node, parent, p);
+ rb_insert_color(&handle->node, &client->handles);
+
+ return 0;
+}
+
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+ size_t align, unsigned int heap_id_mask,
+ unsigned int flags)
+{
+ struct ion_handle *handle;
+ struct ion_device *dev = client->dev;
+ struct ion_buffer *buffer = NULL;
+ struct ion_heap *heap;
+ int ret;
+
+ pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
+ len, align, heap_id_mask, flags);
+ /*
+ * traverse the list of heaps available in this system in priority
+ * order. If the heap type is supported by the client, and matches the
+ * request of the caller allocate from it. Repeat until allocate has
+ * succeeded or all heaps have been tried
+ */
+ len = PAGE_ALIGN(len);
+
+ if (!len)
+ return ERR_PTR(-EINVAL);
+
+ down_read(&dev->lock);
+ plist_for_each_entry(heap, &dev->heaps, node) {
+ /* if the caller didn't specify this heap id */
+ if (!((1 << heap->id) & heap_id_mask))
+ continue;
+ buffer = ion_buffer_create(heap, dev, len, align, flags);
+ if (!IS_ERR(buffer))
+ break;
+ }
+ up_read(&dev->lock);
+
+ if (buffer == NULL)
+ return ERR_PTR(-ENODEV);
+
+ if (IS_ERR(buffer))
+ return ERR_PTR(PTR_ERR(buffer));
+
+ handle = ion_handle_create(client, buffer);
+
+ /*
+ * ion_buffer_create will create a buffer with a ref_cnt of 1,
+ * and ion_handle_create will take a second reference, drop one here
+ */
+ ion_buffer_put(buffer);
+
+ if (IS_ERR(handle))
+ return handle;
+
+ mutex_lock(&client->lock);
+ ret = ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
+ if (ret) {
+ ion_handle_put(handle);
+ handle = ERR_PTR(ret);
+ }
+
+ return handle;
+}
+EXPORT_SYMBOL(ion_alloc);
+
+void ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+ bool valid_handle;
+
+ BUG_ON(client != handle->client);
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+
+ if (!valid_handle) {
+ WARN(1, "%s: invalid handle passed to free.\n", __func__);
+ mutex_unlock(&client->lock);
+ return;
+ }
+ mutex_unlock(&client->lock);
+ ion_handle_put(handle);
+}
+EXPORT_SYMBOL(ion_free);
+
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct ion_buffer *buffer;
+ int ret;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+
+ buffer = handle->buffer;
+
+ if (!buffer->heap->ops->phys) {
+ pr_err("%s: ion_phys is not implemented by this heap.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return -ENODEV;
+ }
+ mutex_unlock(&client->lock);
+ ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
+ return ret;
+}
+EXPORT_SYMBOL(ion_phys);
+
+static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
+{
+ void *vaddr;
+
+ if (buffer->kmap_cnt) {
+ buffer->kmap_cnt++;
+ return buffer->vaddr;
+ }
+ vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
+ if (WARN_ONCE(vaddr == NULL,
+ "heap->ops->map_kernel should return ERR_PTR on error"))
+ return ERR_PTR(-EINVAL);
+ if (IS_ERR(vaddr))
+ return vaddr;
+ buffer->vaddr = vaddr;
+ buffer->kmap_cnt++;
+ return vaddr;
+}
+
+static void *ion_handle_kmap_get(struct ion_handle *handle)
+{
+ struct ion_buffer *buffer = handle->buffer;
+ void *vaddr;
+
+ if (handle->kmap_cnt) {
+ handle->kmap_cnt++;
+ return buffer->vaddr;
+ }
+ vaddr = ion_buffer_kmap_get(buffer);
+ if (IS_ERR(vaddr))
+ return vaddr;
+ handle->kmap_cnt++;
+ return vaddr;
+}
+
+static void ion_buffer_kmap_put(struct ion_buffer *buffer)
+{
+ buffer->kmap_cnt--;
+ if (!buffer->kmap_cnt) {
+ buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ buffer->vaddr = NULL;
+ }
+}
+
+static void ion_handle_kmap_put(struct ion_handle *handle)
+{
+ struct ion_buffer *buffer = handle->buffer;
+
+ handle->kmap_cnt--;
+ if (!handle->kmap_cnt)
+ ion_buffer_kmap_put(buffer);
+}
+
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ void *vaddr;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_kernel.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+
+ buffer = handle->buffer;
+
+ if (!handle->buffer->heap->ops->map_kernel) {
+ pr_err("%s: map_kernel is not implemented by this heap.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&buffer->lock);
+ vaddr = ion_handle_kmap_get(handle);
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+ return vaddr;
+}
+EXPORT_SYMBOL(ion_map_kernel);
+
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+
+ mutex_lock(&client->lock);
+ buffer = handle->buffer;
+ mutex_lock(&buffer->lock);
+ ion_handle_kmap_put(handle);
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+}
+EXPORT_SYMBOL(ion_unmap_kernel);
+
+static int ion_debug_client_show(struct seq_file *s, void *unused)
+{
+ struct ion_client *client = s->private;
+ struct rb_node *n;
+ size_t sizes[ION_NUM_HEAP_IDS] = {0};
+ const char *names[ION_NUM_HEAP_IDS] = {NULL};
+ int i;
+
+ mutex_lock(&client->lock);
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ struct ion_handle *handle = rb_entry(n, struct ion_handle,
+ node);
+ unsigned int id = handle->buffer->heap->id;
+
+ if (!names[id])
+ names[id] = handle->buffer->heap->name;
+ sizes[id] += handle->buffer->size;
+ }
+ mutex_unlock(&client->lock);
+
+ seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
+ for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
+ if (!names[i])
+ continue;
+ seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
+ }
+ return 0;
+}
+
+static int ion_debug_client_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ion_debug_client_show, inode->i_private);
+}
+
+static const struct file_operations debug_client_fops = {
+ .open = ion_debug_client_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+struct ion_client *ion_client_create(struct ion_device *dev,
+ const char *name)
+{
+ struct ion_client *client;
+ struct task_struct *task;
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+ struct ion_client *entry;
+ char debug_name[64];
+ pid_t pid;
+
+ get_task_struct(current->group_leader);
+ task_lock(current->group_leader);
+ pid = task_pid_nr(current->group_leader);
+ /* don't bother to store task struct for kernel threads,
+ they can't be killed anyway */
+ if (current->group_leader->flags & PF_KTHREAD) {
+ put_task_struct(current->group_leader);
+ task = NULL;
+ } else {
+ task = current->group_leader;
+ }
+ task_unlock(current->group_leader);
+
+ client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
+ if (!client) {
+ if (task)
+ put_task_struct(current->group_leader);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ client->dev = dev;
+ client->handles = RB_ROOT;
+ idr_init(&client->idr);
+ mutex_init(&client->lock);
+ client->name = name;
+ client->task = task;
+ client->pid = pid;
+
+ down_write(&dev->lock);
+ p = &dev->clients.rb_node;
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_client, node);
+
+ if (client < entry)
+ p = &(*p)->rb_left;
+ else if (client > entry)
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&client->node, parent, p);
+ rb_insert_color(&client->node, &dev->clients);
+
+ snprintf(debug_name, 64, "%u", client->pid);
+ client->debug_root = debugfs_create_file(debug_name, 0664,
+ dev->debug_root, client,
+ &debug_client_fops);
+ up_write(&dev->lock);
+
+ return client;
+}
+EXPORT_SYMBOL(ion_client_create);
+
+void ion_client_destroy(struct ion_client *client)
+{
+ struct ion_device *dev = client->dev;
+ struct rb_node *n;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ while ((n = rb_first(&client->handles))) {
+ struct ion_handle *handle = rb_entry(n, struct ion_handle,
+ node);
+ ion_handle_destroy(&handle->ref);
+ }
+
+ idr_destroy(&client->idr);
+
+ down_write(&dev->lock);
+ if (client->task)
+ put_task_struct(client->task);
+ rb_erase(&client->node, &dev->clients);
+ debugfs_remove_recursive(client->debug_root);
+ up_write(&dev->lock);
+
+ kfree(client);
+}
+EXPORT_SYMBOL(ion_client_destroy);
+
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ struct sg_table *table;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_dma.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = handle->buffer;
+ table = buffer->sg_table;
+ mutex_unlock(&client->lock);
+ return table;
+}
+EXPORT_SYMBOL(ion_sg_table);
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+ struct device *dev,
+ enum dma_data_direction direction);
+
+static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dma_buf *dmabuf = attachment->dmabuf;
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ ion_buffer_sync_for_device(buffer, attachment->dev, direction);
+ return buffer->sg_table;
+}
+
+static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+}
+
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+ size_t size, enum dma_data_direction dir)
+{
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, size, 0);
+ /*
+ * This is not correct - sg_dma_address needs a dma_addr_t that is valid
+ * for the the targeted device, but this works on the currently targeted
+ * hardware.
+ */
+ sg_dma_address(&sg) = page_to_phys(page);
+ dma_sync_sg_for_device(dev, &sg, 1, dir);
+}
+
+struct ion_vma_list {
+ struct list_head list;
+ struct vm_area_struct *vma;
+};
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+ struct device *dev,
+ enum dma_data_direction dir)
+{
+ struct ion_vma_list *vma_list;
+ int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ int i;
+
+ pr_debug("%s: syncing for device %s\n", __func__,
+ dev ? dev_name(dev) : "null");
+
+ if (!ion_buffer_fault_user_mappings(buffer))
+ return;
+
+ mutex_lock(&buffer->lock);
+ for (i = 0; i < pages; i++) {
+ struct page *page = buffer->pages[i];
+
+ if (ion_buffer_page_is_dirty(page))
+ ion_pages_sync_for_device(dev, ion_buffer_page(page),
+ PAGE_SIZE, dir);
+
+ ion_buffer_page_clean(buffer->pages + i);
+ }
+ list_for_each_entry(vma_list, &buffer->vmas, list) {
+ struct vm_area_struct *vma = vma_list->vma;
+
+ zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
+ NULL);
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ unsigned long pfn;
+ int ret;
+
+ mutex_lock(&buffer->lock);
+ ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
+ BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
+
+ pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
+ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ mutex_unlock(&buffer->lock);
+ if (ret)
+ return VM_FAULT_ERROR;
+
+ return VM_FAULT_NOPAGE;
+}
+
+static void ion_vm_open(struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ struct ion_vma_list *vma_list;
+
+ vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
+ if (!vma_list)
+ return;
+ vma_list->vma = vma;
+ mutex_lock(&buffer->lock);
+ list_add(&vma_list->list, &buffer->vmas);
+ mutex_unlock(&buffer->lock);
+ pr_debug("%s: adding %p\n", __func__, vma);
+}
+
+static void ion_vm_close(struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ struct ion_vma_list *vma_list, *tmp;
+
+ pr_debug("%s\n", __func__);
+ mutex_lock(&buffer->lock);
+ list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
+ if (vma_list->vma != vma)
+ continue;
+ list_del(&vma_list->list);
+ kfree(vma_list);
+ pr_debug("%s: deleting %p\n", __func__, vma);
+ break;
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+static struct vm_operations_struct ion_vma_ops = {
+ .open = ion_vm_open,
+ .close = ion_vm_close,
+ .fault = ion_vm_fault,
+};
+
+static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ int ret = 0;
+
+ if (!buffer->heap->ops->map_user) {
+ pr_err("%s: this heap does not define a method for mapping "
+ "to userspace\n", __func__);
+ return -EINVAL;
+ }
+
+ if (ion_buffer_fault_user_mappings(buffer)) {
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
+ VM_DONTDUMP;
+ vma->vm_private_data = buffer;
+ vma->vm_ops = &ion_vma_ops;
+ ion_vm_open(vma);
+ return 0;
+ }
+
+ if (!(buffer->flags & ION_FLAG_CACHED))
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ mutex_lock(&buffer->lock);
+ /* now map it to userspace */
+ ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+ mutex_unlock(&buffer->lock);
+
+ if (ret)
+ pr_err("%s: failure mapping buffer to userspace\n",
+ __func__);
+
+ return ret;
+}
+
+static void ion_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ ion_buffer_put(buffer);
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ return buffer->vaddr + offset * PAGE_SIZE;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+ void *ptr)
+{
+ return;
+}
+
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
+ size_t len,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+
+ if (!buffer->heap->ops->map_kernel) {
+ pr_err("%s: map kernel is not implemented by this heap.\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&buffer->lock);
+ vaddr = ion_buffer_kmap_get(buffer);
+ mutex_unlock(&buffer->lock);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+ return 0;
+}
+
+static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
+ size_t len,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ ion_buffer_kmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+}
+
+static struct dma_buf_ops dma_buf_ops = {
+ .map_dma_buf = ion_map_dma_buf,
+ .unmap_dma_buf = ion_unmap_dma_buf,
+ .mmap = ion_mmap,
+ .release = ion_dma_buf_release,
+ .begin_cpu_access = ion_dma_buf_begin_cpu_access,
+ .end_cpu_access = ion_dma_buf_end_cpu_access,
+ .kmap_atomic = ion_dma_buf_kmap,
+ .kunmap_atomic = ion_dma_buf_kunmap,
+ .kmap = ion_dma_buf_kmap,
+ .kunmap = ion_dma_buf_kunmap,
+};
+
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ struct dma_buf *dmabuf;
+ bool valid_handle;
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+ if (!valid_handle) {
+ WARN(1, "%s: invalid handle passed to share.\n", __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = handle->buffer;
+ ion_buffer_get(buffer);
+ mutex_unlock(&client->lock);
+
+ dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
+ if (IS_ERR(dmabuf)) {
+ ion_buffer_put(buffer);
+ return dmabuf;
+ }
+
+ return dmabuf;
+}
+EXPORT_SYMBOL(ion_share_dma_buf);
+
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
+{
+ struct dma_buf *dmabuf;
+ int fd;
+
+ dmabuf = ion_share_dma_buf(client, handle);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ fd = dma_buf_fd(dmabuf, O_CLOEXEC);
+ if (fd < 0)
+ dma_buf_put(dmabuf);
+
+ return fd;
+}
+EXPORT_SYMBOL(ion_share_dma_buf_fd);
+
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+{
+ struct dma_buf *dmabuf;
+ struct ion_buffer *buffer;
+ struct ion_handle *handle;
+ int ret;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return ERR_PTR(PTR_ERR(dmabuf));
+ /* if this memory came from ion */
+
+ if (dmabuf->ops != &dma_buf_ops) {
+ pr_err("%s: can not import dmabuf from another exporter\n",
+ __func__);
+ dma_buf_put(dmabuf);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = dmabuf->priv;
+
+ mutex_lock(&client->lock);
+ /* if a handle exists for this buffer just take a reference to it */
+ handle = ion_handle_lookup(client, buffer);
+ if (!IS_ERR(handle)) {
+ ion_handle_get(handle);
+ mutex_unlock(&client->lock);
+ goto end;
+ }
+ mutex_unlock(&client->lock);
+
+ handle = ion_handle_create(client, buffer);
+ if (IS_ERR(handle))
+ goto end;
+
+ mutex_lock(&client->lock);
+ ret = ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
+ if (ret) {
+ ion_handle_put(handle);
+ handle = ERR_PTR(ret);
+ }
+
+end:
+ dma_buf_put(dmabuf);
+ return handle;
+}
+EXPORT_SYMBOL(ion_import_dma_buf);
+
+static int ion_sync_for_device(struct ion_client *client, int fd)
+{
+ struct dma_buf *dmabuf;
+ struct ion_buffer *buffer;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ /* if this memory came from ion */
+ if (dmabuf->ops != &dma_buf_ops) {
+ pr_err("%s: can not sync dmabuf from another exporter\n",
+ __func__);
+ dma_buf_put(dmabuf);
+ return -EINVAL;
+ }
+ buffer = dmabuf->priv;
+
+ dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_BIDIRECTIONAL);
+ dma_buf_put(dmabuf);
+ return 0;
+}
+
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int ion_ioctl_dir(unsigned int cmd)
+{
+ switch (cmd) {
+ case ION_IOC_SYNC:
+ case ION_IOC_FREE:
+ case ION_IOC_CUSTOM:
+ return _IOC_WRITE;
+ default:
+ return _IOC_DIR(cmd);
+ }
+}
+
+static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct ion_client *client = filp->private_data;
+ struct ion_device *dev = client->dev;
+ struct ion_handle *cleanup_handle = NULL;
+ int ret = 0;
+ unsigned int dir;
+
+ union {
+ struct ion_fd_data fd;
+ struct ion_allocation_data allocation;
+ struct ion_handle_data handle;
+ struct ion_custom_data custom;
+ } data;
+
+ dir = ion_ioctl_dir(cmd);
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ if (dir & _IOC_WRITE)
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ION_IOC_ALLOC:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_alloc(client, data.allocation.len,
+ data.allocation.align,
+ data.allocation.heap_id_mask,
+ data.allocation.flags);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ data.allocation.handle = handle->id;
+
+ cleanup_handle = handle;
+ break;
+ }
+ case ION_IOC_FREE:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_handle_get_by_id(client, data.handle.handle);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ ion_free(client, handle);
+ ion_handle_put(handle);
+ break;
+ }
+ case ION_IOC_SHARE:
+ case ION_IOC_MAP:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_handle_get_by_id(client, data.handle.handle);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ data.fd.fd = ion_share_dma_buf_fd(client, handle);
+ ion_handle_put(handle);
+ if (data.fd.fd < 0)
+ ret = data.fd.fd;
+ break;
+ }
+ case ION_IOC_IMPORT:
+ {
+ struct ion_handle *handle;
+ handle = ion_import_dma_buf(client, data.fd.fd);
+ if (IS_ERR(handle))
+ ret = PTR_ERR(handle);
+ else
+ data.handle.handle = handle->id;
+ break;
+ }
+ case ION_IOC_SYNC:
+ {
+ ret = ion_sync_for_device(client, data.fd.fd);
+ break;
+ }
+ case ION_IOC_CUSTOM:
+ {
+ if (!dev->custom_ioctl)
+ return -ENOTTY;
+ ret = dev->custom_ioctl(client, data.custom.cmd,
+ data.custom.arg);
+ break;
+ }
+ default:
+ return -ENOTTY;
+ }
+
+ if (dir & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+ if (cleanup_handle)
+ ion_free(client, cleanup_handle);
+ return -EFAULT;
+ }
+ }
+ return ret;
+}
+
+static int ion_release(struct inode *inode, struct file *file)
+{
+ struct ion_client *client = file->private_data;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ ion_client_destroy(client);
+ return 0;
+}
+
+static int ion_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+ struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
+ struct ion_client *client;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ client = ion_client_create(dev, "user");
+ if (IS_ERR(client))
+ return PTR_ERR(client);
+ file->private_data = client;
+
+ return 0;
+}
+
+static const struct file_operations ion_fops = {
+ .owner = THIS_MODULE,
+ .open = ion_open,
+ .release = ion_release,
+ .unlocked_ioctl = ion_ioctl,
+ .compat_ioctl = compat_ion_ioctl,
+};
+
+static size_t ion_debug_heap_total(struct ion_client *client,
+ unsigned int id)
+{
+ size_t size = 0;
+ struct rb_node *n;
+
+ mutex_lock(&client->lock);
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ struct ion_handle *handle = rb_entry(n,
+ struct ion_handle,
+ node);
+ if (handle->buffer->heap->id == id)
+ size += handle->buffer->size;
+ }
+ mutex_unlock(&client->lock);
+ return size;
+}
+
+static int ion_debug_heap_show(struct seq_file *s, void *unused)
+{
+ struct ion_heap *heap = s->private;
+ struct ion_device *dev = heap->dev;
+ struct rb_node *n;
+ size_t total_size = 0;
+ size_t total_orphaned_size = 0;
+
+ seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
+ seq_printf(s, "----------------------------------------------------\n");
+
+ for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
+ struct ion_client *client = rb_entry(n, struct ion_client,
+ node);
+ size_t size = ion_debug_heap_total(client, heap->id);
+ if (!size)
+ continue;
+ if (client->task) {
+ char task_comm[TASK_COMM_LEN];
+
+ get_task_comm(task_comm, client->task);
+ seq_printf(s, "%16.s %16u %16zu\n", task_comm,
+ client->pid, size);
+ } else {
+ seq_printf(s, "%16.s %16u %16zu\n", client->name,
+ client->pid, size);
+ }
+ }
+ seq_printf(s, "----------------------------------------------------\n");
+ seq_printf(s, "orphaned allocations (info is from last known client):"
+ "\n");
+ mutex_lock(&dev->buffer_lock);
+ for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+ struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
+ node);
+ if (buffer->heap->id != heap->id)
+ continue;
+ total_size += buffer->size;
+ if (!buffer->handle_count) {
+ seq_printf(s, "%16.s %16u %16zu %d %d\n",
+ buffer->task_comm, buffer->pid,
+ buffer->size, buffer->kmap_cnt,
+ atomic_read(&buffer->ref.refcount));
+ total_orphaned_size += buffer->size;
+ }
+ }
+ mutex_unlock(&dev->buffer_lock);
+ seq_printf(s, "----------------------------------------------------\n");
+ seq_printf(s, "%16.s %16zu\n", "total orphaned",
+ total_orphaned_size);
+ seq_printf(s, "%16.s %16zu\n", "total ", total_size);
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ seq_printf(s, "%16.s %16zu\n", "deferred free",
+ heap->free_list_size);
+ seq_printf(s, "----------------------------------------------------\n");
+
+ if (heap->debug_show)
+ heap->debug_show(heap, s, unused);
+
+ return 0;
+}
+
+static int ion_debug_heap_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ion_debug_heap_show, inode->i_private);
+}
+
+static const struct file_operations debug_heap_fops = {
+ .open = ion_debug_heap_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#ifdef DEBUG_HEAP_SHRINKER
+static int debug_shrink_set(void *data, u64 val)
+{
+ struct ion_heap *heap = data;
+ struct shrink_control sc;
+ int objs;
+
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
+
+ if (!val)
+ return 0;
+
+ objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+ sc.nr_to_scan = objs;
+
+ heap->shrinker.shrink(&heap->shrinker, &sc);
+ return 0;
+}
+
+static int debug_shrink_get(void *data, u64 *val)
+{
+ struct ion_heap *heap = data;
+ struct shrink_control sc;
+ int objs;
+
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
+
+ objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+ *val = objs;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
+ debug_shrink_set, "%llu\n");
+#endif
+
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
+{
+ if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
+ !heap->ops->unmap_dma)
+ pr_err("%s: can not add heap with invalid ops struct.\n",
+ __func__);
+
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ ion_heap_init_deferred_free(heap);
+
+ heap->dev = dev;
+ down_write(&dev->lock);
+ /* use negative heap->id to reverse the priority -- when traversing
+ the list later attempt higher id numbers first */
+ plist_node_init(&heap->node, -heap->id);
+ plist_add(&heap->node, &dev->heaps);
+ debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
+ &debug_heap_fops);
+#ifdef DEBUG_HEAP_SHRINKER
+ if (heap->shrinker.shrink) {
+ char debug_name[64];
+
+ snprintf(debug_name, 64, "%s_shrink", heap->name);
+ debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
+ &debug_shrink_fops);
+ }
+#endif
+ up_write(&dev->lock);
+}
+
+struct ion_device *ion_device_create(long (*custom_ioctl)
+ (struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg))
+{
+ struct ion_device *idev;
+ int ret;
+
+ idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
+ if (!idev)
+ return ERR_PTR(-ENOMEM);
+
+ idev->dev.minor = MISC_DYNAMIC_MINOR;
+ idev->dev.name = "ion";
+ idev->dev.fops = &ion_fops;
+ idev->dev.parent = NULL;
+ ret = misc_register(&idev->dev);
+ if (ret) {
+ pr_err("ion: failed to register misc device.\n");
+ return ERR_PTR(ret);
+ }
+
+ idev->debug_root = debugfs_create_dir("ion", NULL);
+ if (!idev->debug_root)
+ pr_err("ion: failed to create debug files.\n");
+
+ idev->custom_ioctl = custom_ioctl;
+ idev->buffers = RB_ROOT;
+ mutex_init(&idev->buffer_lock);
+ init_rwsem(&idev->lock);
+ plist_head_init(&idev->heaps);
+ idev->clients = RB_ROOT;
+ return idev;
+}
+
+void ion_device_destroy(struct ion_device *dev)
+{
+ misc_deregister(&dev->dev);
+ /* XXX need to free the heaps and clients ? */
+ kfree(dev);
+}
+
+void __init ion_reserve(struct ion_platform_data *data)
+{
+ int i;
+
+ for (i = 0; i < data->nr; i++) {
+ if (data->heaps[i].size == 0)
+ continue;
+
+ if (data->heaps[i].base == 0) {
+ phys_addr_t paddr;
+ paddr = memblock_alloc_base(data->heaps[i].size,
+ data->heaps[i].align,
+ MEMBLOCK_ALLOC_ANYWHERE);
+ if (!paddr) {
+ pr_err("%s: error allocating memblock for "
+ "heap %d\n",
+ __func__, i);
+ continue;
+ }
+ data->heaps[i].base = paddr;
+ } else {
+ int ret = memblock_reserve(data->heaps[i].base,
+ data->heaps[i].size);
+ if (ret)
+ pr_err("memblock reserve of %zx@%lx failed\n",
+ data->heaps[i].size,
+ data->heaps[i].base);
+ }
+ pr_info("%s: %s reserved base %lx size %zu\n", __func__,
+ data->heaps[i].name,
+ data->heaps[i].base,
+ data->heaps[i].size);
+ }
+}
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
new file mode 100644
index 000000000000..dcd2a0cdb192
--- /dev/null
+++ b/drivers/staging/android/ion/ion.h
@@ -0,0 +1,204 @@
+/*
+ * drivers/staging/android/ion/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ION_H
+#define _LINUX_ION_H
+
+#include <linux/types.h>
+
+#include "../uapi/ion.h"
+
+struct ion_handle;
+struct ion_device;
+struct ion_heap;
+struct ion_mapper;
+struct ion_client;
+struct ion_buffer;
+
+/* This should be removed some day when phys_addr_t's are fully
+ plumbed in the kernel, and all instances of ion_phys_addr_t should
+ be converted to phys_addr_t. For the time being many kernel interfaces
+ do not accept phys_addr_t's that would have to */
+#define ion_phys_addr_t unsigned long
+
+/**
+ * struct ion_platform_heap - defines a heap in the given platform
+ * @type: type of the heap from ion_heap_type enum
+ * @id: unique identifier for heap. When allocating higher numbers
+ * will be allocated from first. At allocation these are passed
+ * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
+ * @name: used for debug purposes
+ * @base: base address of heap in physical memory if applicable
+ * @size: size of the heap in bytes if applicable
+ * @align: required alignment in physical memory if applicable
+ * @priv: private info passed from the board file
+ *
+ * Provided by the board file.
+ */
+struct ion_platform_heap {
+ enum ion_heap_type type;
+ unsigned int id;
+ const char *name;
+ ion_phys_addr_t base;
+ size_t size;
+ ion_phys_addr_t align;
+ void *priv;
+};
+
+/**
+ * struct ion_platform_data - array of platform heaps passed from board file
+ * @nr: number of structures in the array
+ * @heaps: array of platform_heap structions
+ *
+ * Provided by the board file in the form of platform data to a platform device.
+ */
+struct ion_platform_data {
+ int nr;
+ struct ion_platform_heap *heaps;
+};
+
+/**
+ * ion_reserve() - reserve memory for ion heaps if applicable
+ * @data: platform data specifying starting physical address and
+ * size
+ *
+ * Calls memblock reserve to set aside memory for heaps that are
+ * located at specific memory addresses or of specfic sizes not
+ * managed by the kernel
+ */
+void ion_reserve(struct ion_platform_data *data);
+
+/**
+ * ion_client_create() - allocate a client and returns it
+ * @dev: the global ion device
+ * @heap_type_mask: mask of heaps this client can allocate from
+ * @name: used for debugging
+ */
+struct ion_client *ion_client_create(struct ion_device *dev,
+ const char *name);
+
+/**
+ * ion_client_destroy() - free's a client and all it's handles
+ * @client: the client
+ *
+ * Free the provided client and all it's resources including
+ * any handles it is holding.
+ */
+void ion_client_destroy(struct ion_client *client);
+
+/**
+ * ion_alloc - allocate ion memory
+ * @client: the client
+ * @len: size of the allocation
+ * @align: requested allocation alignment, lots of hardware blocks
+ * have alignment requirements of some kind
+ * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set
+ * heaps will be tried in order from highest to lowest
+ * id
+ * @flags: heap flags, the low 16 bits are consumed by ion, the
+ * high 16 bits are passed on to the respective heap and
+ * can be heap custom
+ *
+ * Allocate memory in one of the heaps provided in heap mask and return
+ * an opaque handle to it.
+ */
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+ size_t align, unsigned int heap_id_mask,
+ unsigned int flags);
+
+/**
+ * ion_free - free a handle
+ * @client: the client
+ * @handle: the handle to free
+ *
+ * Free the provided handle.
+ */
+void ion_free(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_phys - returns the physical address and len of a handle
+ * @client: the client
+ * @handle: the handle
+ * @addr: a pointer to put the address in
+ * @len: a pointer to put the length in
+ *
+ * This function queries the heap for a particular handle to get the
+ * handle's physical address. It't output is only correct if
+ * a heap returns physically contiguous memory -- in other cases
+ * this api should not be implemented -- ion_sg_table should be used
+ * instead. Returns -EINVAL if the handle is invalid. This has
+ * no implications on the reference counting of the handle --
+ * the returned value may not be valid if the caller is not
+ * holding a reference.
+ */
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * ion_map_dma - return an sg_table describing a handle
+ * @client: the client
+ * @handle: the handle
+ *
+ * This function returns the sg_table describing
+ * a particular ion handle.
+ */
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
+ * ion_map_kernel - create mapping for the given handle
+ * @client: the client
+ * @handle: handle to map
+ *
+ * Map the given handle into the kernel and return a kernel address that
+ * can be used to access this address.
+ */
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_unmap_kernel() - destroy a kernel mapping for a handle
+ * @client: the client
+ * @handle: handle to unmap
+ */
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf() - share buffer as dma-buf
+ * @client: the client
+ * @handle: the handle
+ */
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd
+ * @client: the client
+ * @handle: the handle
+ */
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
+ * @client: the client
+ * @fd: the dma-buf fd
+ *
+ * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
+ * import that fd and return a handle representing it. If a dma-buf from
+ * another exporter is passed in this function will return ERR_PTR(-EINVAL)
+ */
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
+
+#endif /* _LINUX_ION_H */
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
new file mode 100644
index 000000000000..3cb05b9b0e93
--- /dev/null
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -0,0 +1,194 @@
+/*
+ * drivers/staging/android/ion/ion_carveout_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_carveout_heap {
+ struct ion_heap heap;
+ struct gen_pool *pool;
+ ion_phys_addr_t base;
+};
+
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+ unsigned long size,
+ unsigned long align)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+ unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
+
+ if (!offset)
+ return ION_CARVEOUT_ALLOCATE_FAIL;
+
+ return offset;
+}
+
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+
+ if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
+ return;
+ gen_pool_free(carveout_heap->pool, addr, size);
+}
+
+static int ion_carveout_heap_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+ *addr = paddr;
+ *len = buffer->size;
+ return 0;
+}
+
+static int ion_carveout_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ struct sg_table *table;
+ ion_phys_addr_t paddr;
+ int ret;
+
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
+ goto err_free;
+
+ paddr = ion_carveout_allocate(heap, size, align);
+ if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
+ ret = -ENOMEM;
+ goto err_free_table;
+ }
+
+ sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
+ buffer->priv_virt = table;
+
+ return 0;
+
+err_free_table:
+ sg_free_table(table);
+err_free:
+ kfree(table);
+ return ret;
+}
+
+static void ion_carveout_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+ ion_heap_buffer_zero(buffer);
+
+ if (ion_buffer_cached(buffer))
+ dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+ DMA_BIDIRECTIONAL);
+
+ ion_carveout_free(heap, paddr, buffer->size);
+ sg_free_table(table);
+ kfree(table);
+}
+
+static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static struct ion_heap_ops carveout_heap_ops = {
+ .allocate = ion_carveout_heap_allocate,
+ .free = ion_carveout_heap_free,
+ .phys = ion_carveout_heap_phys,
+ .map_dma = ion_carveout_heap_map_dma,
+ .unmap_dma = ion_carveout_heap_unmap_dma,
+ .map_user = ion_heap_map_user,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_carveout_heap *carveout_heap;
+ int ret;
+
+ struct page *page;
+ size_t size;
+
+ page = pfn_to_page(PFN_DOWN(heap_data->base));
+ size = heap_data->size;
+
+ ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+ ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+ if (ret)
+ return ERR_PTR(ret);
+
+ carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
+ if (!carveout_heap)
+ return ERR_PTR(-ENOMEM);
+
+ carveout_heap->pool = gen_pool_create(12, -1);
+ if (!carveout_heap->pool) {
+ kfree(carveout_heap);
+ return ERR_PTR(-ENOMEM);
+ }
+ carveout_heap->base = heap_data->base;
+ gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
+ -1);
+ carveout_heap->heap.ops = &carveout_heap_ops;
+ carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
+ carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+
+ return &carveout_heap->heap;
+}
+
+void ion_carveout_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+
+ gen_pool_destroy(carveout_heap->pool);
+ kfree(carveout_heap);
+ carveout_heap = NULL;
+}
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
new file mode 100644
index 000000000000..d40f5f831808
--- /dev/null
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -0,0 +1,195 @@
+/*
+ * drivers/staging/android/ion/ion_chunk_heap.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_chunk_heap {
+ struct ion_heap heap;
+ struct gen_pool *pool;
+ ion_phys_addr_t base;
+ unsigned long chunk_size;
+ unsigned long size;
+ unsigned long allocated;
+};
+
+static int ion_chunk_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ struct ion_chunk_heap *chunk_heap =
+ container_of(heap, struct ion_chunk_heap, heap);
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int ret, i;
+ unsigned long num_chunks;
+ unsigned long allocated_size;
+
+ if (align > chunk_heap->chunk_size)
+ return -EINVAL;
+
+ allocated_size = ALIGN(size, chunk_heap->chunk_size);
+ num_chunks = allocated_size / chunk_heap->chunk_size;
+
+ if (allocated_size > chunk_heap->size - chunk_heap->allocated)
+ return -ENOMEM;
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+ ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
+ if (ret) {
+ kfree(table);
+ return ret;
+ }
+
+ sg = table->sgl;
+ for (i = 0; i < num_chunks; i++) {
+ unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
+ chunk_heap->chunk_size);
+ if (!paddr)
+ goto err;
+ sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
+ chunk_heap->chunk_size, 0);
+ sg = sg_next(sg);
+ }
+
+ buffer->priv_virt = table;
+ chunk_heap->allocated += allocated_size;
+ return 0;
+err:
+ sg = table->sgl;
+ for (i -= 1; i >= 0; i--) {
+ gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+ sg->length);
+ sg = sg_next(sg);
+ }
+ sg_free_table(table);
+ kfree(table);
+ return -ENOMEM;
+}
+
+static void ion_chunk_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+ struct ion_chunk_heap *chunk_heap =
+ container_of(heap, struct ion_chunk_heap, heap);
+ struct sg_table *table = buffer->priv_virt;
+ struct scatterlist *sg;
+ int i;
+ unsigned long allocated_size;
+
+ allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
+
+ ion_heap_buffer_zero(buffer);
+
+ if (ion_buffer_cached(buffer))
+ dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+ DMA_BIDIRECTIONAL);
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+ sg->length);
+ }
+ chunk_heap->allocated -= allocated_size;
+ sg_free_table(table);
+ kfree(table);
+}
+
+static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static struct ion_heap_ops chunk_heap_ops = {
+ .allocate = ion_chunk_heap_allocate,
+ .free = ion_chunk_heap_free,
+ .map_dma = ion_chunk_heap_map_dma,
+ .unmap_dma = ion_chunk_heap_unmap_dma,
+ .map_user = ion_heap_map_user,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_chunk_heap *chunk_heap;
+ int ret;
+ struct page *page;
+ size_t size;
+
+ page = pfn_to_page(PFN_DOWN(heap_data->base));
+ size = heap_data->size;
+
+ ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+ ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+ if (ret)
+ return ERR_PTR(ret);
+
+ chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
+ if (!chunk_heap)
+ return ERR_PTR(-ENOMEM);
+
+ chunk_heap->chunk_size = (unsigned long)heap_data->priv;
+ chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
+ PAGE_SHIFT, -1);
+ if (!chunk_heap->pool) {
+ ret = -ENOMEM;
+ goto error_gen_pool_create;
+ }
+ chunk_heap->base = heap_data->base;
+ chunk_heap->size = heap_data->size;
+ chunk_heap->allocated = 0;
+
+ gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
+ chunk_heap->heap.ops = &chunk_heap_ops;
+ chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
+ chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+ pr_info("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base,
+ heap_data->size, heap_data->align);
+
+ return &chunk_heap->heap;
+
+error_gen_pool_create:
+ kfree(chunk_heap);
+ return ERR_PTR(ret);
+}
+
+void ion_chunk_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_chunk_heap *chunk_heap =
+ container_of(heap, struct ion_chunk_heap, heap);
+
+ gen_pool_destroy(chunk_heap->pool);
+ kfree(chunk_heap);
+ chunk_heap = NULL;
+}
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
new file mode 100644
index 000000000000..f0f98897e4b9
--- /dev/null
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -0,0 +1,218 @@
+/*
+ * drivers/staging/android/ion/ion_cma_heap.c
+ *
+ * Copyright (C) Linaro 2012
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+
+#define ION_CMA_ALLOCATE_FAILED -1
+
+struct ion_cma_heap {
+ struct ion_heap heap;
+ struct device *dev;
+};
+
+#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
+
+struct ion_cma_buffer_info {
+ void *cpu_addr;
+ dma_addr_t handle;
+ struct sg_table *table;
+};
+
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ * This function could be replaced by dma_common_get_sgtable
+ * as soon as it will avalaible.
+ */
+static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size)
+{
+ struct page *page = virt_to_page(cpu_addr);
+ int ret;
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (unlikely(ret))
+ return ret;
+
+ sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+ return 0;
+}
+
+/* ION CMA heap operations functions */
+static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
+ unsigned long len, unsigned long align,
+ unsigned long flags)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+ struct device *dev = cma_heap->dev;
+ struct ion_cma_buffer_info *info;
+
+ dev_dbg(dev, "Request buffer allocation len %ld\n", len);
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ return -EINVAL;
+
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
+ info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
+ if (!info) {
+ dev_err(dev, "Can't allocate buffer info\n");
+ return ION_CMA_ALLOCATE_FAILED;
+ }
+
+ info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
+ GFP_HIGHUSER | __GFP_ZERO);
+
+ if (!info->cpu_addr) {
+ dev_err(dev, "Fail to allocate buffer\n");
+ goto err;
+ }
+
+ info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!info->table) {
+ dev_err(dev, "Fail to allocate sg table\n");
+ goto free_mem;
+ }
+
+ if (ion_cma_get_sgtable
+ (dev, info->table, info->cpu_addr, info->handle, len))
+ goto free_table;
+ /* keep this for memory release */
+ buffer->priv_virt = info;
+ dev_dbg(dev, "Allocate buffer %p\n", buffer);
+ return 0;
+
+free_table:
+ kfree(info->table);
+free_mem:
+ dma_free_coherent(dev, len, info->cpu_addr, info->handle);
+err:
+ kfree(info);
+ return ION_CMA_ALLOCATE_FAILED;
+}
+
+static void ion_cma_free(struct ion_buffer *buffer)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+ struct device *dev = cma_heap->dev;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ dev_dbg(dev, "Release buffer %p\n", buffer);
+ /* release memory */
+ dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
+ /* release sg table */
+ sg_free_table(info->table);
+ kfree(info->table);
+ kfree(info);
+}
+
+/* return physical address in addr */
+static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+ struct device *dev = cma_heap->dev;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
+ &info->handle);
+
+ *addr = info->handle;
+ *len = buffer->size;
+
+ return 0;
+}
+
+static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ return info->table;
+}
+
+static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+ struct device *dev = cma_heap->dev;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
+ buffer->size);
+}
+
+static void *ion_cma_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+ /* kernel memory mapping has been done at allocation time */
+ return info->cpu_addr;
+}
+
+static void ion_cma_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
+static struct ion_heap_ops ion_cma_ops = {
+ .allocate = ion_cma_allocate,
+ .free = ion_cma_free,
+ .map_dma = ion_cma_heap_map_dma,
+ .unmap_dma = ion_cma_heap_unmap_dma,
+ .phys = ion_cma_phys,
+ .map_user = ion_cma_mmap,
+ .map_kernel = ion_cma_map_kernel,
+ .unmap_kernel = ion_cma_unmap_kernel,
+};
+
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
+{
+ struct ion_cma_heap *cma_heap;
+
+ cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
+
+ if (!cma_heap)
+ return ERR_PTR(-ENOMEM);
+
+ cma_heap->heap.ops = &ion_cma_ops;
+ /* get device from private heaps data, later it will be
+ * used to make the link with reserved CMA memory */
+ cma_heap->dev = data->priv;
+ cma_heap->heap.type = ION_HEAP_TYPE_DMA;
+ return &cma_heap->heap;
+}
+
+void ion_cma_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+
+ kfree(cma_heap);
+}
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
new file mode 100644
index 000000000000..01cdc8aee898
--- /dev/null
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -0,0 +1,158 @@
+/*
+ * drivers/gpu/ion/ion_dummy_driver.c
+ *
+ * Copyright (C) 2013 Linaro, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/sizes.h>
+#include <linux/io.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_device *idev;
+struct ion_heap **heaps;
+
+void *carveout_ptr;
+void *chunk_ptr;
+
+struct ion_platform_heap dummy_heaps[] = {
+ {
+ .id = ION_HEAP_TYPE_SYSTEM,
+ .type = ION_HEAP_TYPE_SYSTEM,
+ .name = "system",
+ },
+ {
+ .id = ION_HEAP_TYPE_SYSTEM_CONTIG,
+ .type = ION_HEAP_TYPE_SYSTEM_CONTIG,
+ .name = "system contig",
+ },
+ {
+ .id = ION_HEAP_TYPE_CARVEOUT,
+ .type = ION_HEAP_TYPE_CARVEOUT,
+ .name = "carveout",
+ .size = SZ_4M,
+ },
+ {
+ .id = ION_HEAP_TYPE_CHUNK,
+ .type = ION_HEAP_TYPE_CHUNK,
+ .name = "chunk",
+ .size = SZ_4M,
+ .align = SZ_16K,
+ .priv = (void *)(SZ_16K),
+ },
+};
+
+struct ion_platform_data dummy_ion_pdata = {
+ .nr = ARRAY_SIZE(dummy_heaps),
+ .heaps = dummy_heaps,
+};
+
+static int __init ion_dummy_init(void)
+{
+ int i, err;
+
+ idev = ion_device_create(NULL);
+ heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr,
+ GFP_KERNEL);
+ if (!heaps)
+ return -ENOMEM;
+
+
+ /* Allocate a dummy carveout heap */
+ carveout_ptr = alloc_pages_exact(
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size,
+ GFP_KERNEL);
+ if (carveout_ptr)
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base =
+ virt_to_phys(carveout_ptr);
+ else
+ pr_err("ion_dummy: Could not allocate carveout\n");
+
+ /* Allocate a dummy chunk heap */
+ chunk_ptr = alloc_pages_exact(
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size,
+ GFP_KERNEL);
+ if (chunk_ptr)
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr);
+ else
+ pr_err("ion_dummy: Could not allocate chunk\n");
+
+ for (i = 0; i < dummy_ion_pdata.nr; i++) {
+ struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
+
+ if (heap_data->type == ION_HEAP_TYPE_CARVEOUT &&
+ !heap_data->base)
+ continue;
+
+ if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base)
+ continue;
+
+ heaps[i] = ion_heap_create(heap_data);
+ if (IS_ERR_OR_NULL(heaps[i])) {
+ err = PTR_ERR(heaps[i]);
+ goto err;
+ }
+ ion_device_add_heap(idev, heaps[i]);
+ }
+ return 0;
+err:
+ for (i = 0; i < dummy_ion_pdata.nr; i++) {
+ if (heaps[i])
+ ion_heap_destroy(heaps[i]);
+ }
+ kfree(heaps);
+
+ if (carveout_ptr) {
+ free_pages_exact(carveout_ptr,
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
+ carveout_ptr = NULL;
+ }
+ if (chunk_ptr) {
+ free_pages_exact(chunk_ptr,
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
+ chunk_ptr = NULL;
+ }
+ return err;
+}
+device_initcall(ion_dummy_init);
+
+static void __exit ion_dummy_exit(void)
+{
+ int i;
+
+ ion_device_destroy(idev);
+
+ for (i = 0; i < dummy_ion_pdata.nr; i++)
+ ion_heap_destroy(heaps[i]);
+ kfree(heaps);
+
+ if (carveout_ptr) {
+ free_pages_exact(carveout_ptr,
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
+ carveout_ptr = NULL;
+ }
+ if (chunk_ptr) {
+ free_pages_exact(chunk_ptr,
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
+ chunk_ptr = NULL;
+ }
+
+ return;
+}
+__exitcall(ion_dummy_exit);
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
new file mode 100644
index 000000000000..37e64d51394c
--- /dev/null
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -0,0 +1,318 @@
+/*
+ * drivers/staging/android/ion/ion_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/sched.h>
+#include <linux/scatterlist.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+void *ion_heap_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct scatterlist *sg;
+ int i, j;
+ void *vaddr;
+ pgprot_t pgprot;
+ struct sg_table *table = buffer->sg_table;
+ int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ struct page **pages = vmalloc(sizeof(struct page *) * npages);
+ struct page **tmp = pages;
+
+ if (!pages)
+ return NULL;
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
+ struct page *page = sg_page(sg);
+ BUG_ON(i >= npages);
+ for (j = 0; j < npages_this_entry; j++)
+ *(tmp++) = page++;
+ }
+ vaddr = vmap(pages, npages, VM_MAP, pgprot);
+ vfree(pages);
+
+ if (vaddr == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+
+void ion_heap_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ vunmap(buffer->vaddr);
+}
+
+int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ struct sg_table *table = buffer->sg_table;
+ unsigned long addr = vma->vm_start;
+ unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+ struct scatterlist *sg;
+ int i;
+ int ret;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+ unsigned long remainder = vma->vm_end - addr;
+ unsigned long len = sg->length;
+
+ if (offset >= sg->length) {
+ offset -= sg->length;
+ continue;
+ } else if (offset) {
+ page += offset / PAGE_SIZE;
+ len = sg->length - offset;
+ offset = 0;
+ }
+ len = min(len, remainder);
+ ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
+ vma->vm_page_prot);
+ if (ret)
+ return ret;
+ addr += len;
+ if (addr >= vma->vm_end)
+ return 0;
+ }
+ return 0;
+}
+
+static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
+{
+ void *addr = vm_map_ram(pages, num, -1, pgprot);
+ if (!addr)
+ return -ENOMEM;
+ memset(addr, 0, PAGE_SIZE * num);
+ vm_unmap_ram(addr, num);
+
+ return 0;
+}
+
+static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
+ pgprot_t pgprot)
+{
+ int p = 0;
+ int ret = 0;
+ struct sg_page_iter piter;
+ struct page *pages[32];
+
+ for_each_sg_page(sgl, &piter, nents, 0) {
+ pages[p++] = sg_page_iter_page(&piter);
+ if (p == ARRAY_SIZE(pages)) {
+ ret = ion_heap_clear_pages(pages, p, pgprot);
+ if (ret)
+ return ret;
+ p = 0;
+ }
+ }
+ if (p)
+ ret = ion_heap_clear_pages(pages, p, pgprot);
+
+ return ret;
+}
+
+int ion_heap_buffer_zero(struct ion_buffer *buffer)
+{
+ struct sg_table *table = buffer->sg_table;
+ pgprot_t pgprot;
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
+}
+
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
+{
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, size, 0);
+ return ion_heap_sglist_zero(&sg, 1, pgprot);
+}
+
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
+{
+ spin_lock(&heap->free_lock);
+ list_add(&buffer->list, &heap->free_list);
+ heap->free_list_size += buffer->size;
+ spin_unlock(&heap->free_lock);
+ wake_up(&heap->waitqueue);
+}
+
+size_t ion_heap_freelist_size(struct ion_heap *heap)
+{
+ size_t size;
+
+ spin_lock(&heap->free_lock);
+ size = heap->free_list_size;
+ spin_unlock(&heap->free_lock);
+
+ return size;
+}
+
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+{
+ struct ion_buffer *buffer;
+ size_t total_drained = 0;
+
+ if (ion_heap_freelist_size(heap) == 0)
+ return 0;
+
+ spin_lock(&heap->free_lock);
+ if (size == 0)
+ size = heap->free_list_size;
+
+ while (!list_empty(&heap->free_list)) {
+ if (total_drained >= size)
+ break;
+ buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+ list);
+ list_del(&buffer->list);
+ heap->free_list_size -= buffer->size;
+ total_drained += buffer->size;
+ spin_unlock(&heap->free_lock);
+ ion_buffer_destroy(buffer);
+ spin_lock(&heap->free_lock);
+ }
+ spin_unlock(&heap->free_lock);
+
+ return total_drained;
+}
+
+static int ion_heap_deferred_free(void *data)
+{
+ struct ion_heap *heap = data;
+
+ while (true) {
+ struct ion_buffer *buffer;
+
+ wait_event_freezable(heap->waitqueue,
+ ion_heap_freelist_size(heap) > 0);
+
+ spin_lock(&heap->free_lock);
+ if (list_empty(&heap->free_list)) {
+ spin_unlock(&heap->free_lock);
+ continue;
+ }
+ buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+ list);
+ list_del(&buffer->list);
+ heap->free_list_size -= buffer->size;
+ spin_unlock(&heap->free_lock);
+ ion_buffer_destroy(buffer);
+ }
+
+ return 0;
+}
+
+int ion_heap_init_deferred_free(struct ion_heap *heap)
+{
+ struct sched_param param = { .sched_priority = 0 };
+
+ INIT_LIST_HEAD(&heap->free_list);
+ heap->free_list_size = 0;
+ spin_lock_init(&heap->free_lock);
+ init_waitqueue_head(&heap->waitqueue);
+ heap->task = kthread_run(ion_heap_deferred_free, heap,
+ "%s", heap->name);
+ if (IS_ERR(heap->task)) {
+ pr_err("%s: creating thread for deferred free failed\n",
+ __func__);
+ return PTR_RET(heap->task);
+ }
+ sched_setscheduler(heap->task, SCHED_IDLE, &param);
+ return 0;
+}
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_heap *heap = NULL;
+
+ switch (heap_data->type) {
+ case ION_HEAP_TYPE_SYSTEM_CONTIG:
+ heap = ion_system_contig_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_SYSTEM:
+ heap = ion_system_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_CARVEOUT:
+ heap = ion_carveout_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_CHUNK:
+ heap = ion_chunk_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_DMA:
+ heap = ion_cma_heap_create(heap_data);
+ break;
+ default:
+ pr_err("%s: Invalid heap type %d\n", __func__,
+ heap_data->type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (IS_ERR_OR_NULL(heap)) {
+ pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
+ __func__, heap_data->name, heap_data->type,
+ heap_data->base, heap_data->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ heap->name = heap_data->name;
+ heap->id = heap_data->id;
+ return heap;
+}
+
+void ion_heap_destroy(struct ion_heap *heap)
+{
+ if (!heap)
+ return;
+
+ switch (heap->type) {
+ case ION_HEAP_TYPE_SYSTEM_CONTIG:
+ ion_system_contig_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_SYSTEM:
+ ion_system_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_CARVEOUT:
+ ion_carveout_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_CHUNK:
+ ion_chunk_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_DMA:
+ ion_cma_heap_destroy(heap);
+ break;
+ default:
+ pr_err("%s: Invalid heap type %d\n", __func__,
+ heap->type);
+ }
+}
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
new file mode 100644
index 000000000000..fa693c23681a
--- /dev/null
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -0,0 +1,195 @@
+/*
+ * drivers/staging/android/ion/ion_mem_pool.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "ion_priv.h"
+
+struct ion_page_pool_item {
+ struct page *page;
+ struct list_head list;
+};
+
+static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
+{
+ struct page *page = alloc_pages(pool->gfp_mask, pool->order);
+
+ if (!page)
+ return NULL;
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
+ DMA_BIDIRECTIONAL);
+ return page;
+}
+
+static void ion_page_pool_free_pages(struct ion_page_pool *pool,
+ struct page *page)
+{
+ __free_pages(page, pool->order);
+}
+
+static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
+{
+ struct ion_page_pool_item *item;
+
+ item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ mutex_lock(&pool->mutex);
+ item->page = page;
+ if (PageHighMem(page)) {
+ list_add_tail(&item->list, &pool->high_items);
+ pool->high_count++;
+ } else {
+ list_add_tail(&item->list, &pool->low_items);
+ pool->low_count++;
+ }
+ mutex_unlock(&pool->mutex);
+ return 0;
+}
+
+static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
+{
+ struct ion_page_pool_item *item;
+ struct page *page;
+
+ if (high) {
+ BUG_ON(!pool->high_count);
+ item = list_first_entry(&pool->high_items,
+ struct ion_page_pool_item, list);
+ pool->high_count--;
+ } else {
+ BUG_ON(!pool->low_count);
+ item = list_first_entry(&pool->low_items,
+ struct ion_page_pool_item, list);
+ pool->low_count--;
+ }
+
+ list_del(&item->list);
+ page = item->page;
+ kfree(item);
+ return page;
+}
+
+void *ion_page_pool_alloc(struct ion_page_pool *pool)
+{
+ struct page *page = NULL;
+
+ BUG_ON(!pool);
+
+ mutex_lock(&pool->mutex);
+ if (pool->high_count)
+ page = ion_page_pool_remove(pool, true);
+ else if (pool->low_count)
+ page = ion_page_pool_remove(pool, false);
+ mutex_unlock(&pool->mutex);
+
+ if (!page)
+ page = ion_page_pool_alloc_pages(pool);
+
+ return page;
+}
+
+void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
+{
+ int ret;
+
+ ret = ion_page_pool_add(pool, page);
+ if (ret)
+ ion_page_pool_free_pages(pool, page);
+}
+
+static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
+{
+ int total = 0;
+
+ total += high ? (pool->high_count + pool->low_count) *
+ (1 << pool->order) :
+ pool->low_count * (1 << pool->order);
+ return total;
+}
+
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+ int nr_to_scan)
+{
+ int nr_freed = 0;
+ int i;
+ bool high;
+
+ high = !!(gfp_mask & __GFP_HIGHMEM);
+
+ if (nr_to_scan == 0)
+ return ion_page_pool_total(pool, high);
+
+ for (i = 0; i < nr_to_scan; i++) {
+ struct page *page;
+
+ mutex_lock(&pool->mutex);
+ if (pool->low_count) {
+ page = ion_page_pool_remove(pool, false);
+ } else if (high && pool->high_count) {
+ page = ion_page_pool_remove(pool, true);
+ } else {
+ mutex_unlock(&pool->mutex);
+ break;
+ }
+ mutex_unlock(&pool->mutex);
+ ion_page_pool_free_pages(pool, page);
+ nr_freed += (1 << pool->order);
+ }
+
+ return nr_freed;
+}
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
+{
+ struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
+ GFP_KERNEL);
+ if (!pool)
+ return NULL;
+ pool->high_count = 0;
+ pool->low_count = 0;
+ INIT_LIST_HEAD(&pool->low_items);
+ INIT_LIST_HEAD(&pool->high_items);
+ pool->gfp_mask = gfp_mask;
+ pool->order = order;
+ mutex_init(&pool->mutex);
+ plist_node_init(&pool->list, order);
+
+ return pool;
+}
+
+void ion_page_pool_destroy(struct ion_page_pool *pool)
+{
+ kfree(pool);
+}
+
+static int __init ion_page_pool_init(void)
+{
+ return 0;
+}
+
+static void __exit ion_page_pool_exit(void)
+{
+}
+
+module_init(ion_page_pool_init);
+module_exit(ion_page_pool_exit);
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
new file mode 100644
index 000000000000..fc2e4fccf69d
--- /dev/null
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -0,0 +1,361 @@
+/*
+ * drivers/staging/android/ion/ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ION_PRIV_H
+#define _ION_PRIV_H
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/shrinker.h>
+#include <linux/types.h>
+
+#include "ion.h"
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
+
+/**
+ * struct ion_buffer - metadata for a particular buffer
+ * @ref: refernce count
+ * @node: node in the ion_device buffers tree
+ * @dev: back pointer to the ion_device
+ * @heap: back pointer to the heap the buffer came from
+ * @flags: buffer specific flags
+ * @size: size of the buffer
+ * @priv_virt: private data to the buffer representable as
+ * a void *
+ * @priv_phys: private data to the buffer representable as
+ * an ion_phys_addr_t (and someday a phys_addr_t)
+ * @lock: protects the buffers cnt fields
+ * @kmap_cnt: number of times the buffer is mapped to the kernel
+ * @vaddr: the kenrel mapping if kmap_cnt is not zero
+ * @dmap_cnt: number of times the buffer is mapped for dma
+ * @sg_table: the sg table for the buffer if dmap_cnt is not zero
+ * @pages: flat array of pages in the buffer -- used by fault
+ * handler and only valid for buffers that are faulted in
+ * @vmas: list of vma's mapping this buffer
+ * @handle_count: count of handles referencing this buffer
+ * @task_comm: taskcomm of last client to reference this buffer in a
+ * handle, used for debugging
+ * @pid: pid of last client to reference this buffer in a
+ * handle, used for debugging
+*/
+struct ion_buffer {
+ struct kref ref;
+ union {
+ struct rb_node node;
+ struct list_head list;
+ };
+ struct ion_device *dev;
+ struct ion_heap *heap;
+ unsigned long flags;
+ size_t size;
+ union {
+ void *priv_virt;
+ ion_phys_addr_t priv_phys;
+ };
+ struct mutex lock;
+ int kmap_cnt;
+ void *vaddr;
+ int dmap_cnt;
+ struct sg_table *sg_table;
+ struct page **pages;
+ struct list_head vmas;
+ /* used to track orphaned buffers */
+ int handle_count;
+ char task_comm[TASK_COMM_LEN];
+ pid_t pid;
+};
+void ion_buffer_destroy(struct ion_buffer *buffer);
+
+/**
+ * struct ion_heap_ops - ops to operate on a given heap
+ * @allocate: allocate memory
+ * @free: free memory
+ * @phys get physical address of a buffer (only define on
+ * physically contiguous heaps)
+ * @map_dma map the memory for dma to a scatterlist
+ * @unmap_dma unmap the memory for dma
+ * @map_kernel map memory to the kernel
+ * @unmap_kernel unmap memory to the kernel
+ * @map_user map memory to userspace
+ *
+ * allocate, phys, and map_user return 0 on success, -errno on error.
+ * map_dma and map_kernel return pointer on success, ERR_PTR on error.
+ */
+struct ion_heap_ops {
+ int (*allocate) (struct ion_heap *heap,
+ struct ion_buffer *buffer, unsigned long len,
+ unsigned long align, unsigned long flags);
+ void (*free) (struct ion_buffer *buffer);
+ int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len);
+ struct sg_table *(*map_dma) (struct ion_heap *heap,
+ struct ion_buffer *buffer);
+ void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
+ void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+ void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+ int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
+ struct vm_area_struct *vma);
+};
+
+/**
+ * heap flags - flags between the heaps and core ion code
+ */
+#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
+
+/**
+ * struct ion_heap - represents a heap in the system
+ * @node: rb node to put the heap on the device's tree of heaps
+ * @dev: back pointer to the ion_device
+ * @type: type of heap
+ * @ops: ops struct as above
+ * @flags: flags
+ * @id: id of heap, also indicates priority of this heap when
+ * allocating. These are specified by platform data and
+ * MUST be unique
+ * @name: used for debugging
+ * @shrinker: a shrinker for the heap, if the heap caches system
+ * memory, it must define a shrinker to return it on low
+ * memory conditions, this includes system memory cached
+ * in the deferred free lists for heaps that support it
+ * @free_list: free list head if deferred free is used
+ * @free_list_size size of the deferred free list in bytes
+ * @lock: protects the free list
+ * @waitqueue: queue to wait on from deferred free thread
+ * @task: task struct of deferred free thread
+ * @debug_show: called when heap debug file is read to add any
+ * heap specific debug info to output
+ *
+ * Represents a pool of memory from which buffers can be made. In some
+ * systems the only heap is regular system memory allocated via vmalloc.
+ * On others, some blocks might require large physically contiguous buffers
+ * that are allocated from a specially reserved heap.
+ */
+struct ion_heap {
+ struct plist_node node;
+ struct ion_device *dev;
+ enum ion_heap_type type;
+ struct ion_heap_ops *ops;
+ unsigned long flags;
+ unsigned int id;
+ const char *name;
+ struct shrinker shrinker;
+ struct list_head free_list;
+ size_t free_list_size;
+ spinlock_t free_lock;
+ wait_queue_head_t waitqueue;
+ struct task_struct *task;
+ int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
+};
+
+/**
+ * ion_buffer_cached - this ion buffer is cached
+ * @buffer: buffer
+ *
+ * indicates whether this ion buffer is cached
+ */
+bool ion_buffer_cached(struct ion_buffer *buffer);
+
+/**
+ * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
+ * @buffer: buffer
+ *
+ * indicates whether userspace mappings of this buffer will be faulted
+ * in, this can affect how buffers are allocated from the heap.
+ */
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
+
+/**
+ * ion_device_create - allocates and returns an ion device
+ * @custom_ioctl: arch specific ioctl function if applicable
+ *
+ * returns a valid device or -PTR_ERR
+ */
+struct ion_device *ion_device_create(long (*custom_ioctl)
+ (struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg));
+
+/**
+ * ion_device_destroy - free and device and it's resource
+ * @dev: the device
+ */
+void ion_device_destroy(struct ion_device *dev);
+
+/**
+ * ion_device_add_heap - adds a heap to the ion device
+ * @dev: the device
+ * @heap: the heap to add
+ */
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
+
+/**
+ * some helpers for common operations on buffers using the sg_table
+ * and vaddr fields
+ */
+void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
+void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
+int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
+ struct vm_area_struct *);
+int ion_heap_buffer_zero(struct ion_buffer *buffer);
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
+
+/**
+ * ion_heap_init_deferred_free -- initialize deferred free functionality
+ * @heap: the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
+ * be called to setup deferred frees. Calls to free the buffer will
+ * return immediately and the actual free will occur some time later
+ */
+int ion_heap_init_deferred_free(struct ion_heap *heap);
+
+/**
+ * ion_heap_freelist_add - add a buffer to the deferred free list
+ * @heap: the heap
+ * @buffer: the buffer
+ *
+ * Adds an item to the deferred freelist.
+ */
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
+
+/**
+ * ion_heap_freelist_drain - drain the deferred free list
+ * @heap: the heap
+ * @size: ammount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed. The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ */
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
+
+/**
+ * ion_heap_freelist_size - returns the size of the freelist in bytes
+ * @heap: the heap
+ */
+size_t ion_heap_freelist_size(struct ion_heap *heap);
+
+
+/**
+ * functions for creating and destroying the built in ion heaps.
+ * architectures can add their own custom architecture specific
+ * heaps as appropriate.
+ */
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *);
+void ion_heap_destroy(struct ion_heap *);
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
+void ion_system_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
+void ion_system_contig_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
+void ion_carveout_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
+void ion_chunk_heap_destroy(struct ion_heap *);
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
+void ion_cma_heap_destroy(struct ion_heap *);
+
+/**
+ * kernel api to allocate/free from carveout -- used when carveout is
+ * used to back an architecture specific custom heap
+ */
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
+ unsigned long align);
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size);
+/**
+ * The carveout heap returns physical addresses, since 0 may be a valid
+ * physical address, this is used to indicate allocation failed
+ */
+#define ION_CARVEOUT_ALLOCATE_FAIL -1
+
+/**
+ * functions for creating and destroying a heap pool -- allows you
+ * to keep a pool of pre allocated memory to use from your heap. Keeping
+ * a pool of memory that is ready for dma, ie any cached mapping have been
+ * invalidated from the cache, provides a significant peformance benefit on
+ * many systems */
+
+/**
+ * struct ion_page_pool - pagepool struct
+ * @high_count: number of highmem items in the pool
+ * @low_count: number of lowmem items in the pool
+ * @high_items: list of highmem items
+ * @low_items: list of lowmem items
+ * @shrinker: a shrinker for the items
+ * @mutex: lock protecting this struct and especially the count
+ * item list
+ * @alloc: function to be used to allocate pageory when the pool
+ * is empty
+ * @free: function to be used to free pageory back to the system
+ * when the shrinker fires
+ * @gfp_mask: gfp_mask to use from alloc
+ * @order: order of pages in the pool
+ * @list: plist node for list of pools
+ *
+ * Allows you to keep a pool of pre allocated pages to use from your heap.
+ * Keeping a pool of pages that is ready for dma, ie any cached mapping have
+ * been invalidated from the cache, provides a significant peformance benefit
+ * on many systems
+ */
+struct ion_page_pool {
+ int high_count;
+ int low_count;
+ struct list_head high_items;
+ struct list_head low_items;
+ struct mutex mutex;
+ gfp_t gfp_mask;
+ unsigned int order;
+ struct plist_node list;
+};
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
+void ion_page_pool_destroy(struct ion_page_pool *);
+void *ion_page_pool_alloc(struct ion_page_pool *);
+void ion_page_pool_free(struct ion_page_pool *, struct page *);
+
+/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
+ * @pool: the pool
+ * @gfp_mask: the memory type to reclaim
+ * @nr_to_scan: number of items to shrink in pages
+ *
+ * returns the number of items freed in pages
+ */
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+ int nr_to_scan);
+
+/**
+ * ion_pages_sync_for_device - cache flush pages for use with the specified
+ * device
+ * @dev: the device the pages will be used with
+ * @page: the first page to be flushed
+ * @size: size in bytes of region to be flushed
+ * @dir: direction of dma transfer
+ */
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+ size_t size, enum dma_data_direction dir);
+
+#endif /* _ION_PRIV_H */
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
new file mode 100644
index 000000000000..9849f3963e75
--- /dev/null
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -0,0 +1,492 @@
+/*
+ * drivers/staging/android/ion/ion_system_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/page.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
+ __GFP_NORETRY) & ~__GFP_WAIT;
+static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
+static const unsigned int orders[] = {8, 4, 0};
+static const int num_orders = ARRAY_SIZE(orders);
+static int order_to_index(unsigned int order)
+{
+ int i;
+ for (i = 0; i < num_orders; i++)
+ if (order == orders[i])
+ return i;
+ BUG();
+ return -1;
+}
+
+static unsigned int order_to_size(int order)
+{
+ return PAGE_SIZE << order;
+}
+
+struct ion_system_heap {
+ struct ion_heap heap;
+ struct ion_page_pool **pools;
+};
+
+struct page_info {
+ struct page *page;
+ unsigned int order;
+ struct list_head list;
+};
+
+static struct page *alloc_buffer_page(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long order)
+{
+ bool cached = ion_buffer_cached(buffer);
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ struct page *page;
+
+ if (!cached) {
+ page = ion_page_pool_alloc(pool);
+ } else {
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ if (order > 4)
+ gfp_flags = high_order_gfp_flags;
+ page = alloc_pages(gfp_flags, order);
+ if (!page)
+ return NULL;
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
+ }
+ if (!page)
+ return NULL;
+
+ return page;
+}
+
+static void free_buffer_page(struct ion_system_heap *heap,
+ struct ion_buffer *buffer, struct page *page,
+ unsigned int order)
+{
+ bool cached = ion_buffer_cached(buffer);
+
+ if (!cached) {
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ ion_page_pool_free(pool, page);
+ } else {
+ __free_pages(page, order);
+ }
+}
+
+
+static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size,
+ unsigned int max_order)
+{
+ struct page *page;
+ struct page_info *info;
+ int i;
+
+ info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ for (i = 0; i < num_orders; i++) {
+ if (size < order_to_size(orders[i]))
+ continue;
+ if (max_order < orders[i])
+ continue;
+
+ page = alloc_buffer_page(heap, buffer, orders[i]);
+ if (!page)
+ continue;
+
+ info->page = page;
+ info->order = orders[i];
+ INIT_LIST_HEAD(&info->list);
+ return info;
+ }
+ kfree(info);
+
+ return NULL;
+}
+
+static int ion_system_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int ret;
+ struct list_head pages;
+ struct page_info *info, *tmp_info;
+ int i = 0;
+ unsigned long size_remaining = PAGE_ALIGN(size);
+ unsigned int max_order = orders[0];
+
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
+ if (size / PAGE_SIZE > totalram_pages / 2)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pages);
+ while (size_remaining > 0) {
+ info = alloc_largest_available(sys_heap, buffer, size_remaining,
+ max_order);
+ if (!info)
+ goto err;
+ list_add_tail(&info->list, &pages);
+ size_remaining -= (1 << info->order) * PAGE_SIZE;
+ max_order = info->order;
+ i++;
+ }
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ goto err;
+
+ ret = sg_alloc_table(table, i, GFP_KERNEL);
+ if (ret)
+ goto err1;
+
+ sg = table->sgl;
+ list_for_each_entry_safe(info, tmp_info, &pages, list) {
+ struct page *page = info->page;
+ sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
+ sg = sg_next(sg);
+ list_del(&info->list);
+ kfree(info);
+ }
+
+ buffer->priv_virt = table;
+ return 0;
+err1:
+ kfree(table);
+err:
+ list_for_each_entry_safe(info, tmp_info, &pages, list) {
+ free_buffer_page(sys_heap, buffer, info->page, info->order);
+ kfree(info);
+ }
+ return -ENOMEM;
+}
+
+static void ion_system_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ struct sg_table *table = buffer->sg_table;
+ bool cached = ion_buffer_cached(buffer);
+ struct scatterlist *sg;
+ LIST_HEAD(pages);
+ int i;
+
+ /* uncached pages come from the page pools, zero them before returning
+ for security purposes (other allocations are zerod at alloc time */
+ if (!cached)
+ ion_heap_buffer_zero(buffer);
+
+ for_each_sg(table->sgl, sg, table->nents, i)
+ free_buffer_page(sys_heap, buffer, sg_page(sg),
+ get_order(sg->length));
+ sg_free_table(table);
+ kfree(table);
+}
+
+static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_system_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static struct ion_heap_ops system_heap_ops = {
+ .allocate = ion_system_heap_allocate,
+ .free = ion_system_heap_free,
+ .map_dma = ion_system_heap_map_dma,
+ .unmap_dma = ion_system_heap_unmap_dma,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+ .map_user = ion_heap_map_user,
+};
+
+static unsigned long ion_system_heap_shrink_count(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+ shrinker);
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int nr_total = 0;
+ int i;
+
+ /* total number of items is whatever the page pools are holding
+ plus whatever's in the freelist */
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->pools[i];
+ nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0);
+ }
+ nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE;
+ return nr_total;
+
+}
+
+static unsigned long ion_system_heap_shrink_scan(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+
+ struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+ shrinker);
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int nr_freed = 0;
+ int i;
+
+ if (sc->nr_to_scan == 0)
+ goto end;
+
+ /* shrink the free list first, no point in zeroing the memory if
+ we're just going to reclaim it */
+ nr_freed += ion_heap_freelist_drain(heap, sc->nr_to_scan * PAGE_SIZE) /
+ PAGE_SIZE;
+
+ if (nr_freed >= sc->nr_to_scan)
+ goto end;
+
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->pools[i];
+
+ nr_freed += ion_page_pool_shrink(pool, sc->gfp_mask,
+ sc->nr_to_scan);
+ if (nr_freed >= sc->nr_to_scan)
+ break;
+ }
+
+end:
+ return nr_freed;
+
+}
+
+static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
+ void *unused)
+{
+
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int i;
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->pools[i];
+ seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
+ pool->high_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->high_count);
+ seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
+ pool->low_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->low_count);
+ }
+ return 0;
+}
+
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
+{
+ struct ion_system_heap *heap;
+ int i;
+
+ heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+ heap->heap.ops = &system_heap_ops;
+ heap->heap.type = ION_HEAP_TYPE_SYSTEM;
+ heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+ heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
+ GFP_KERNEL);
+ if (!heap->pools)
+ goto err_alloc_pools;
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool;
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ if (orders[i] > 4)
+ gfp_flags = high_order_gfp_flags;
+ pool = ion_page_pool_create(gfp_flags, orders[i]);
+ if (!pool)
+ goto err_create_pool;
+ heap->pools[i] = pool;
+ }
+
+ heap->heap.shrinker.scan_objects = ion_system_heap_shrink_scan;
+ heap->heap.shrinker.count_objects = ion_system_heap_shrink_count;
+ heap->heap.shrinker.seeks = DEFAULT_SEEKS;
+ heap->heap.shrinker.batch = 0;
+ register_shrinker(&heap->heap.shrinker);
+ heap->heap.debug_show = ion_system_heap_debug_show;
+ return &heap->heap;
+err_create_pool:
+ for (i = 0; i < num_orders; i++)
+ if (heap->pools[i])
+ ion_page_pool_destroy(heap->pools[i]);
+ kfree(heap->pools);
+err_alloc_pools:
+ kfree(heap);
+ return ERR_PTR(-ENOMEM);
+}
+
+void ion_system_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int i;
+
+ for (i = 0; i < num_orders; i++)
+ ion_page_pool_destroy(sys_heap->pools[i]);
+ kfree(sys_heap->pools);
+ kfree(sys_heap);
+}
+
+static int ion_system_contig_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags)
+{
+ int order = get_order(len);
+ struct page *page;
+ struct sg_table *table;
+ unsigned long i;
+ int ret;
+
+ if (align > (PAGE_SIZE << order))
+ return -EINVAL;
+
+ page = alloc_pages(low_order_gfp_flags, order);
+ if (!page)
+ return -ENOMEM;
+
+ split_page(page, order);
+
+ len = PAGE_ALIGN(len);
+ for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
+ __free_page(page + i);
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ sg_set_page(table->sgl, page, len, 0);
+
+ buffer->priv_virt = table;
+
+ ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
+
+ return 0;
+
+out:
+ for (i = 0; i < len >> PAGE_SHIFT; i++)
+ __free_page(page + i);
+ kfree(table);
+ return ret;
+}
+
+static void ion_system_contig_heap_free(struct ion_buffer *buffer)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
+ unsigned long i;
+
+ for (i = 0; i < pages; i++)
+ __free_page(page + i);
+ sg_free_table(table);
+ kfree(table);
+}
+
+static int ion_system_contig_heap_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ *addr = page_to_phys(page);
+ *len = buffer->size;
+ return 0;
+}
+
+static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
+static struct ion_heap_ops kmalloc_ops = {
+ .allocate = ion_system_contig_heap_allocate,
+ .free = ion_system_contig_heap_free,
+ .phys = ion_system_contig_heap_phys,
+ .map_dma = ion_system_contig_heap_map_dma,
+ .unmap_dma = ion_system_contig_heap_unmap_dma,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+ .map_user = ion_heap_map_user,
+};
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
+{
+ struct ion_heap *heap;
+
+ heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+ heap->ops = &kmalloc_ops;
+ heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
+ return heap;
+}
+
+void ion_system_contig_heap_destroy(struct ion_heap *heap)
+{
+ kfree(heap);
+}
+
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
new file mode 100644
index 000000000000..654acb5c8eba
--- /dev/null
+++ b/drivers/staging/android/ion/ion_test.c
@@ -0,0 +1,282 @@
+/*
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "ion-test: " fmt
+
+#include <linux/dma-buf.h>
+#include <linux/dma-direction.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "ion.h"
+#include "../uapi/ion_test.h"
+
+#define u64_to_uptr(x) ((void __user *)(unsigned long)(x))
+
+struct ion_test_device {
+ struct miscdevice misc;
+};
+
+struct ion_test_data {
+ struct dma_buf *dma_buf;
+ struct device *dev;
+};
+
+static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf,
+ void __user *ptr, size_t offset, size_t size, bool write)
+{
+ int ret = 0;
+ struct dma_buf_attachment *attach;
+ struct sg_table *table;
+ pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
+ enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ struct sg_page_iter sg_iter;
+ unsigned long offset_page;
+
+ attach = dma_buf_attach(dma_buf, dev);
+ if (IS_ERR(attach))
+ return PTR_ERR(attach);
+
+ table = dma_buf_map_attachment(attach, dir);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ offset_page = offset >> PAGE_SHIFT;
+ offset %= PAGE_SIZE;
+
+ for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) {
+ struct page *page = sg_page_iter_page(&sg_iter);
+ void *vaddr = vmap(&page, 1, VM_MAP, pgprot);
+ size_t to_copy = PAGE_SIZE - offset;
+
+ to_copy = min(to_copy, size);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (write)
+ ret = copy_from_user(vaddr + offset, ptr, to_copy);
+ else
+ ret = copy_to_user(ptr, vaddr + offset, to_copy);
+
+ vunmap(vaddr);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+ size -= to_copy;
+ if (!size)
+ break;
+ ptr += to_copy;
+ offset = 0;
+ }
+
+err:
+ dma_buf_unmap_attachment(attach, table, dir);
+ dma_buf_detach(dma_buf, attach);
+ return ret;
+}
+
+static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
+ size_t offset, size_t size, bool write)
+{
+ int ret;
+ unsigned long page_offset = offset >> PAGE_SHIFT;
+ size_t copy_offset = offset % PAGE_SIZE;
+ size_t copy_size = size;
+ enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ if (offset > dma_buf->size || size > dma_buf->size - offset)
+ return -EINVAL;
+
+ ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir);
+ if (ret)
+ return ret;
+
+ while (copy_size > 0) {
+ size_t to_copy;
+ void *vaddr = dma_buf_kmap(dma_buf, page_offset);
+
+ if (!vaddr)
+ goto err;
+
+ to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size);
+
+ if (write)
+ ret = copy_from_user(vaddr + copy_offset, ptr, to_copy);
+ else
+ ret = copy_to_user(ptr, vaddr + copy_offset, to_copy);
+
+ dma_buf_kunmap(dma_buf, page_offset, vaddr);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ copy_size -= to_copy;
+ ptr += to_copy;
+ page_offset++;
+ copy_offset = 0;
+ }
+err:
+ dma_buf_end_cpu_access(dma_buf, offset, size, dir);
+ return ret;
+}
+
+static long ion_test_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct ion_test_data *test_data = filp->private_data;
+ int ret = 0;
+
+ union {
+ struct ion_test_rw_data test_rw;
+ } data;
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ION_IOC_TEST_SET_FD:
+ {
+ struct dma_buf *dma_buf = NULL;
+ int fd = arg;
+
+ if (fd >= 0) {
+ dma_buf = dma_buf_get((int)arg);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+ }
+ if (test_data->dma_buf)
+ dma_buf_put(test_data->dma_buf);
+ test_data->dma_buf = dma_buf;
+ break;
+ }
+ case ION_IOC_TEST_DMA_MAPPING:
+ {
+ ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf,
+ u64_to_uptr(data.test_rw.ptr),
+ data.test_rw.offset, data.test_rw.size,
+ data.test_rw.write);
+ break;
+ }
+ case ION_IOC_TEST_KERNEL_MAPPING:
+ {
+ ret = ion_handle_test_kernel(test_data->dma_buf,
+ u64_to_uptr(data.test_rw.ptr),
+ data.test_rw.offset, data.test_rw.size,
+ data.test_rw.write);
+ break;
+ }
+ default:
+ return -ENOTTY;
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+ return -EFAULT;
+ }
+ return ret;
+}
+
+static int ion_test_open(struct inode *inode, struct file *file)
+{
+ struct ion_test_data *data;
+ struct miscdevice *miscdev = file->private_data;
+
+ data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = miscdev->parent;
+
+ file->private_data = data;
+
+ return 0;
+}
+
+static int ion_test_release(struct inode *inode, struct file *file)
+{
+ struct ion_test_data *data = file->private_data;
+
+ kfree(data);
+
+ return 0;
+}
+
+static const struct file_operations ion_test_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ion_test_ioctl,
+ .compat_ioctl = ion_test_ioctl,
+ .open = ion_test_open,
+ .release = ion_test_release,
+};
+
+static int __init ion_test_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct ion_test_device *testdev;
+
+ testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device),
+ GFP_KERNEL);
+ if (!testdev)
+ return -ENOMEM;
+
+ testdev->misc.minor = MISC_DYNAMIC_MINOR;
+ testdev->misc.name = "ion-test";
+ testdev->misc.fops = &ion_test_fops;
+ testdev->misc.parent = &pdev->dev;
+ ret = misc_register(&testdev->misc);
+ if (ret) {
+ pr_err("failed to register misc device.\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, testdev);
+
+ return 0;
+}
+
+static struct platform_driver ion_test_platform_driver = {
+ .driver = {
+ .name = "ion-test",
+ },
+};
+
+static int __init ion_test_init(void)
+{
+ platform_device_register_simple("ion-test", -1, NULL, 0);
+ return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
+}
+
+static void __exit ion_test_exit(void)
+{
+ platform_driver_unregister(&ion_test_platform_driver);
+}
+
+module_init(ion_test_init);
+module_exit(ion_test_exit);
diff --git a/drivers/staging/android/ion/tegra/Makefile b/drivers/staging/android/ion/tegra/Makefile
new file mode 100644
index 000000000000..11cd003fb08f
--- /dev/null
+++ b/drivers/staging/android/ion/tegra/Makefile
@@ -0,0 +1 @@
+obj-y += tegra_ion.o
diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c
new file mode 100644
index 000000000000..3474c65f87fa
--- /dev/null
+++ b/drivers/staging/android/ion/tegra/tegra_ion.c
@@ -0,0 +1,84 @@
+/*
+ * drivers/gpu/tegra/tegra_ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "../ion.h"
+#include "../ion_priv.h"
+
+static struct ion_device *idev;
+static int num_heaps;
+static struct ion_heap **heaps;
+
+static int tegra_ion_probe(struct platform_device *pdev)
+{
+ struct ion_platform_data *pdata = pdev->dev.platform_data;
+ int err;
+ int i;
+
+ num_heaps = pdata->nr;
+
+ heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
+
+ idev = ion_device_create(NULL);
+ if (IS_ERR_OR_NULL(idev)) {
+ kfree(heaps);
+ return PTR_ERR(idev);
+ }
+
+ /* create the heaps as specified in the board file */
+ for (i = 0; i < num_heaps; i++) {
+ struct ion_platform_heap *heap_data = &pdata->heaps[i];
+
+ heaps[i] = ion_heap_create(heap_data);
+ if (IS_ERR_OR_NULL(heaps[i])) {
+ err = PTR_ERR(heaps[i]);
+ goto err;
+ }
+ ion_device_add_heap(idev, heaps[i]);
+ }
+ platform_set_drvdata(pdev, idev);
+ return 0;
+err:
+ for (i = 0; i < num_heaps; i++) {
+ if (heaps[i])
+ ion_heap_destroy(heaps[i]);
+ }
+ kfree(heaps);
+ return err;
+}
+
+static int tegra_ion_remove(struct platform_device *pdev)
+{
+ struct ion_device *idev = platform_get_drvdata(pdev);
+ int i;
+
+ ion_device_destroy(idev);
+ for (i = 0; i < num_heaps; i++)
+ ion_heap_destroy(heaps[i]);
+ kfree(heaps);
+ return 0;
+}
+
+static struct platform_driver ion_driver = {
+ .probe = tegra_ion_probe,
+ .remove = tegra_ion_remove,
+ .driver = { .name = "ion-tegra" }
+};
+
+module_platform_driver(ion_driver);
+
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
index 585040be5f18..5aaf71d6974b 100644
--- a/drivers/staging/android/sw_sync.h
+++ b/drivers/staging/android/sw_sync.h
@@ -35,10 +35,27 @@ struct sw_sync_pt {
u32 value;
};
+#if IS_ENABLED(CONFIG_SW_SYNC)
struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
+#else
+static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+{
+ return NULL;
+}
+
+static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+{
+}
+
+static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
+ u32 value)
+{
+ return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
#endif /* __KERNEL __ */
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 38e5d3b5ed9b..3d05f662110b 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -79,27 +79,27 @@ static void sync_timeline_free(struct kref *kref)
container_of(kref, struct sync_timeline, kref);
unsigned long flags;
- if (obj->ops->release_obj)
- obj->ops->release_obj(obj);
-
spin_lock_irqsave(&sync_timeline_list_lock, flags);
list_del(&obj->sync_timeline_list);
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+ if (obj->ops->release_obj)
+ obj->ops->release_obj(obj);
+
kfree(obj);
}
void sync_timeline_destroy(struct sync_timeline *obj)
{
obj->destroyed = true;
+ smp_wmb();
/*
- * If this is not the last reference, signal any children
- * that their parent is going away.
+ * signal any children that their parent is going away.
*/
+ sync_timeline_signal(obj);
- if (!kref_put(&obj->kref, sync_timeline_free))
- sync_timeline_signal(obj);
+ kref_put(&obj->kref, sync_timeline_free);
}
EXPORT_SYMBOL(sync_timeline_destroy);
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index 38ea986dc70f..62e2255b1c1e 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -28,7 +28,7 @@ struct sync_fence;
/**
* struct sync_timeline_ops - sync object implementation ops
- * @driver_name: name of the implentation
+ * @driver_name: name of the implementation
* @dup: duplicate a sync_pt
* @has_signaled: returns:
* 1 if pt has signaled
@@ -37,12 +37,12 @@ struct sync_fence;
* @compare: returns:
* 1 if b will signal before a
* 0 if a and b will signal at the same time
- * -1 if a will signabl before b
+ * -1 if a will signal before b
* @free_pt: called before sync_pt is freed
* @release_obj: called before sync_timeline is freed
* @print_obj: deprecated
* @print_pt: deprecated
- * @fill_driver_data: write implmentation specific driver data to data.
+ * @fill_driver_data: write implementation specific driver data to data.
* should return an error if there is not enough room
* as specified by size. This information is returned
* to userspace by SYNC_IOC_FENCE_INFO.
@@ -88,9 +88,9 @@ struct sync_timeline_ops {
/**
* struct sync_timeline - sync object
* @kref: reference count on fence.
- * @ops: ops that define the implementaiton of the sync_timeline
+ * @ops: ops that define the implementation of the sync_timeline
* @name: name of the sync_timeline. Useful for debugging
- * @destoryed: set when sync_timeline is destroyed
+ * @destroyed: set when sync_timeline is destroyed
* @child_list_head: list of children sync_pts for this sync_timeline
* @child_list_lock: lock protecting @child_list_head, destroyed, and
* sync_pt.status
@@ -119,12 +119,12 @@ struct sync_timeline {
* @parent: sync_timeline to which this sync_pt belongs
* @child_list: membership in sync_timeline.child_list_head
* @active_list: membership in sync_timeline.active_list_head
- * @signaled_list: membership in temorary signaled_list on stack
+ * @signaled_list: membership in temporary signaled_list on stack
* @fence: sync_fence to which the sync_pt belongs
* @pt_list: membership in sync_fence.pt_list_head
* @status: 1: signaled, 0:active, <0: error
* @timestamp: time which sync_pt status transitioned from active to
- * singaled or error.
+ * signaled or error.
*/
struct sync_pt {
struct sync_timeline *parent;
@@ -145,9 +145,9 @@ struct sync_pt {
/**
* struct sync_fence - sync fence
* @file: file representing this fence
- * @kref: referenace count on fence.
+ * @kref: reference count on fence.
* @name: name of sync_fence. Useful for debugging
- * @pt_list_head: list of sync_pts in ths fence. immutable once fence
+ * @pt_list_head: list of sync_pts in the fence. immutable once fence
* is created
* @waiter_list_head: list of asynchronous waiters on this fence
* @waiter_list_lock: lock protecting @waiter_list_head and @status
@@ -201,23 +201,23 @@ static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter,
/**
* sync_timeline_create() - creates a sync object
- * @ops: specifies the implemention ops for the object
+ * @ops: specifies the implementation ops for the object
* @size: size to allocate for this obj
* @name: sync_timeline name
*
- * Creates a new sync_timeline which will use the implemetation specified by
- * @ops. @size bytes will be allocated allowing for implemntation specific
- * data to be kept after the generic sync_timeline stuct.
+ * Creates a new sync_timeline which will use the implementation specified by
+ * @ops. @size bytes will be allocated allowing for implementation specific
+ * data to be kept after the generic sync_timeline struct.
*/
struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
int size, const char *name);
/**
- * sync_timeline_destory() - destorys a sync object
+ * sync_timeline_destroy() - destroys a sync object
* @obj: sync_timeline to destroy
*
- * A sync implemntation should call this when the @obj is going away
- * (i.e. module unload.) @obj won't actually be freed until all its childern
+ * A sync implementation should call this when the @obj is going away
+ * (i.e. module unload.) @obj won't actually be freed until all its children
* sync_pts are freed.
*/
void sync_timeline_destroy(struct sync_timeline *obj);
@@ -226,7 +226,7 @@ void sync_timeline_destroy(struct sync_timeline *obj);
* sync_timeline_signal() - signal a status change on a sync_timeline
* @obj: sync_timeline to signal
*
- * A sync implemntation should call this any time one of it's sync_pts
+ * A sync implementation should call this any time one of it's sync_pts
* has signaled or has an error condition.
*/
void sync_timeline_signal(struct sync_timeline *obj);
@@ -236,8 +236,8 @@ void sync_timeline_signal(struct sync_timeline *obj);
* @parent: sync_pt's parent sync_timeline
* @size: size to allocate for this pt
*
- * Creates a new sync_pt as a chiled of @parent. @size bytes will be
- * allocated allowing for implemntation specific data to be kept after
+ * Creates a new sync_pt as a child of @parent. @size bytes will be
+ * allocated allowing for implementation specific data to be kept after
* the generic sync_timeline struct.
*/
struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size);
@@ -287,7 +287,7 @@ struct sync_fence *sync_fence_merge(const char *name,
struct sync_fence *sync_fence_fdget(int fd);
/**
- * sync_fence_put() - puts a refernnce of a sync fence
+ * sync_fence_put() - puts a reference of a sync fence
* @fence: fence to put
*
* Puts a reference on @fence. If this is the last reference, the fence and
@@ -297,7 +297,7 @@ void sync_fence_put(struct sync_fence *fence);
/**
* sync_fence_install() - installs a fence into a file descriptor
- * @fence: fence to instal
+ * @fence: fence to install
* @fd: file descriptor in which to install the fence
*
* Installs @fence into @fd. @fd's should be acquired through get_unused_fd().
@@ -359,10 +359,10 @@ struct sync_merge_data {
* struct sync_pt_info - detailed sync_pt information
* @len: length of sync_pt_info including any driver_data
* @obj_name: name of parent sync_timeline
- * @driver_name: name of driver implmenting the parent
+ * @driver_name: name of driver implementing the parent
* @status: status of the sync_pt 0:active 1:signaled <0:error
* @timestamp_ns: timestamp of status change in nanoseconds
- * @driver_data: any driver dependant data
+ * @driver_data: any driver dependent data
*/
struct sync_pt_info {
__u32 len;
@@ -377,7 +377,7 @@ struct sync_pt_info {
/**
* struct sync_fence_info_data - data returned from fence info ioctl
* @len: ioctl caller writes the size of the buffer its passing in.
- * ioctl returns length of sync_fence_data reutnred to userspace
+ * ioctl returns length of sync_fence_data returned to userspace
* including pt_info.
* @name: name of fence
* @status: status of fence. 1: signaled 0:active <0:error
@@ -418,7 +418,7 @@ struct sync_fence_info_data {
* pt_info.
*
* pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
- * To itterate over the sync_pt_infos, use the sync_pt_info.len field.
+ * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
*/
#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
struct sync_fence_info_data)
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
new file mode 100644
index 000000000000..f09e7c154d69
--- /dev/null
+++ b/drivers/staging/android/uapi/ion.h
@@ -0,0 +1,196 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_H
+#define _UAPI_LINUX_ION_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+typedef int ion_user_handle_t;
+
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
+ * carveout heap, allocations are physically
+ * contiguous
+ * @ION_HEAP_TYPE_DMA: memory allocated via DMA API
+ * @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask
+ * is used to identify the heaps, so only 32
+ * total heap types are supported
+ */
+enum ion_heap_type {
+ ION_HEAP_TYPE_SYSTEM,
+ ION_HEAP_TYPE_SYSTEM_CONTIG,
+ ION_HEAP_TYPE_CARVEOUT,
+ ION_HEAP_TYPE_CHUNK,
+ ION_HEAP_TYPE_DMA,
+ ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
+ are at the end of this enum */
+ ION_NUM_HEAPS = 16,
+};
+
+#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
+#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
+#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
+#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
+
+#define ION_NUM_HEAP_IDS sizeof(unsigned int) * 8
+
+/**
+ * allocation flags - the lower 16 bits are used by core ion, the upper 16
+ * bits are reserved for use by the heaps themselves.
+ */
+#define ION_FLAG_CACHED 1 /* mappings of this buffer should be
+ cached, ion will do cache
+ maintenance when the buffer is
+ mapped for dma */
+#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created
+ at mmap time, if this is set
+ caches must be managed manually */
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len: size of the allocation
+ * @align: required alignment of the allocation
+ * @heap_id_mask: mask of heap ids to allocate from
+ * @flags: flags passed to heap
+ * @handle: pointer that will be populated with a cookie to use to
+ * refer to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+ size_t len;
+ size_t align;
+ unsigned int heap_id_mask;
+ unsigned int flags;
+ ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
+ * @handle: a handle
+ * @fd: a file descriptor representing that handle
+ *
+ * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
+ * the handle returned from ion alloc, and the kernel returns the file
+ * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace
+ * provides the file descriptor and the kernel returns the handle.
+ */
+struct ion_fd_data {
+ ion_user_handle_t handle;
+ int fd;
+};
+
+/**
+ * struct ion_handle_data - a handle passed to/from the kernel
+ * @handle: a handle
+ */
+struct ion_handle_data {
+ ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
+ * @cmd: the custom ioctl function to call
+ * @arg: additional data to pass to the custom ioctl, typically a user
+ * pointer to a predefined structure
+ *
+ * This works just like the regular cmd and arg fields of an ioctl.
+ */
+struct ion_custom_data {
+ unsigned int cmd;
+ unsigned long arg;
+};
+
+#define ION_IOC_MAGIC 'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
+ struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_FREE - free memory
+ *
+ * Takes an ion_handle_data struct and frees the handle.
+ */
+#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
+/**
+ * DOC: ION_IOC_MAP - get a file descriptor to mmap
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle. Returns the struct with the fd field set to a file
+ * descriptor open in the current address space. This file descriptor
+ * can then be used as an argument to mmap.
+ */
+#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle. Returns the struct with the fd field set to a file
+ * descriptor open in the current address space. This file descriptor
+ * can then be passed to another process. The corresponding opaque handle can
+ * be retrieved via ION_IOC_IMPORT.
+ */
+#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_IMPORT - imports a shared file descriptor
+ *
+ * Takes an ion_fd_data struct with the fd field populated with a valid file
+ * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
+ * filed set to the corresponding opaque handle.
+ */
+#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
+ *
+ * Deprecated in favor of using the dma_buf api's correctly (syncing
+ * will happend automatically when the buffer is mapped to a device).
+ * If necessary should be used after touching a cached buffer from the cpu,
+ * this will make the buffer in memory coherent.
+ */
+#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
+ *
+ * Takes the argument of the architecture specific ioctl to call and
+ * passes appropriate userdata for that ioctl
+ */
+#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+
+#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h
new file mode 100644
index 000000000000..ffef06f63133
--- /dev/null
+++ b/drivers/staging/android/uapi/ion_test.h
@@ -0,0 +1,70 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_TEST_H
+#define _UAPI_LINUX_ION_TEST_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct ion_test_rw_data - metadata passed to the kernel to read handle
+ * @ptr: a pointer to an area at least as large as size
+ * @offset: offset into the ion buffer to start reading
+ * @size: size to read or write
+ * @write: 1 to write, 0 to read
+ */
+struct ion_test_rw_data {
+ __u64 ptr;
+ __u64 offset;
+ __u64 size;
+ int write;
+ int __padding;
+};
+
+#define ION_IOC_MAGIC 'I'
+
+/**
+ * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver
+ *
+ * Attaches a dma buf fd to the test driver. Passing a second fd or -1 will
+ * release the first fd.
+ */
+#define ION_IOC_TEST_SET_FD \
+ _IO(ION_IOC_MAGIC, 0xf0)
+
+/**
+ * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA
+ *
+ * Reads or writes the memory from a handle using an uncached mapping. Can be
+ * used by unit tests to emulate a DMA engine as close as possible. Only
+ * expected to be used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_DMA_MAPPING \
+ _IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data)
+
+/**
+ * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle
+ *
+ * Reads or writes the memory from a handle using a kernel mapping. Can be
+ * used by unit tests to test heap map_kernel functions. Only expected to be
+ * used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_KERNEL_MAPPING \
+ _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data)
+
+
+#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/bcm/Adapter.h b/drivers/staging/bcm/Adapter.h
index 9cd59871adb2..f0d6f0c38207 100644
--- a/drivers/staging/bcm/Adapter.h
+++ b/drivers/staging/bcm/Adapter.h
@@ -378,7 +378,7 @@ struct bcm_mini_adapter {
UINT uiFlashLayoutMinorVersion;
bool bAllDSDWriteAllow;
bool bSigCorrupted;
- /* this should be set who so ever want to change the Headers. after Wrtie it should be reset immediately. */
+ /* this should be set who so ever want to change the Headers. after Write it should be reset immediately. */
bool bHeaderChangeAllowed;
int SelectedChip;
bool bEndPointHalted;
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 87b74ca84c42..f1b6de0293c8 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -160,7 +160,9 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
struct bcm_ioctl_buffer IoBuffer;
int bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX", cmd, arg);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX",
+ cmd, arg);
if (_IOC_TYPE(cmd) != BCM_IOCTL)
return -EFAULT;
@@ -266,7 +268,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(uiTempVar == EEPROM_REJECT_REG_3) ||
(uiTempVar == EEPROM_REJECT_REG_4))) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "EEPROM Access Denied, not in VSG Mode\n");
return -EFAULT;
}
@@ -274,9 +277,11 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(PUINT)sWrmBuffer.Data, sizeof(ULONG));
if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Done\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "WRM Done\n");
} else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Failed\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "WRM Failed\n");
Status = -EFAULT;
}
break;
@@ -291,7 +296,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device in Idle Mode, Blocking Rdms\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Device in Idle Mode, Blocking Rdms\n");
return -EACCES;
}
@@ -317,7 +323,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if ((((ULONG)sRdmBuffer.Register & 0x0F000000) != 0x0F000000) ||
((ULONG)sRdmBuffer.Register & 0x3)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM Done On invalid Address : %x Access Denied.\n",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "RDM Done On invalid Address : %x Access Denied.\n",
(int)sRdmBuffer.Register);
kfree(temp_buff);
@@ -325,7 +332,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
}
uiTempVar = sRdmBuffer.Register & EEPROM_REJECT_MASK;
- bytes = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register, (PUINT)temp_buff, IoBuffer.OutputLength);
+ bytes = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register,
+ (PUINT)temp_buff, IoBuffer.OutputLength);
if (bytes > 0) {
Status = STATUS_SUCCESS;
@@ -349,7 +357,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device in Idle Mode, Blocking Wrms\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Device in Idle Mode, Blocking Wrms\n");
return -EACCES;
}
@@ -367,7 +376,9 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if ((((ULONG)sWrmBuffer.Register & 0x0F000000) != 0x0F000000) ||
((ULONG)sWrmBuffer.Register & 0x3)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM Done On invalid Address : %x Access Denied.\n", (int)sWrmBuffer.Register);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "WRM Done On invalid Address : %x Access Denied.\n",
+ (int)sWrmBuffer.Register);
return -EINVAL;
}
@@ -379,17 +390,21 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(uiTempVar == EEPROM_REJECT_REG_4)) &&
(cmd == IOCTL_BCM_REGISTER_WRITE)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "EEPROM Access Denied, not in VSG Mode\n");
return -EFAULT;
}
Status = wrmaltWithLock(Adapter, (UINT)sWrmBuffer.Register,
- (PUINT)sWrmBuffer.Data, sWrmBuffer.Length);
+ (PUINT)sWrmBuffer.Data,
+ sWrmBuffer.Length);
if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, "WRM Done\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG,
+ DBG_LVL_ALL, "WRM Done\n");
} else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Failed\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "WRM Failed\n");
Status = -EFAULT;
}
break;
@@ -405,7 +420,9 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "GPIO Can't be set/clear in Low power Mode");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "GPIO Can't be set/clear in Low power Mode");
return -EACCES;
}
@@ -423,7 +440,10 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
value = (1<<uiBit);
if (IsReqGpioIsLedInNVM(Adapter, value) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Sorry, Requested GPIO<0x%X> is not correspond to LED !!!", value);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "Sorry, Requested GPIO<0x%X> is not correspond to LED !!!",
+ value);
Status = -EINVAL;
break;
}
@@ -431,27 +451,42 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
/* Set - setting 1 */
if (uiOperation) {
/* Set the gpio output register */
- Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, (PUINT)(&value), sizeof(UINT));
+ Status = wrmaltWithLock(Adapter,
+ BCM_GPIO_OUTPUT_SET_REG,
+ (PUINT)(&value), sizeof(UINT));
if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO bit\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Set the GPIO bit\n");
} else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to set the %dth GPIO\n", uiBit);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Failed to set the %dth GPIO\n",
+ uiBit);
break;
}
} else {
/* Set the gpio output register */
- Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, (PUINT)(&value), sizeof(UINT));
+ Status = wrmaltWithLock(Adapter,
+ BCM_GPIO_OUTPUT_CLR_REG,
+ (PUINT)(&value), sizeof(UINT));
if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO bit\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Set the GPIO bit\n");
} else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to clear the %dth GPIO\n", uiBit);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Failed to clear the %dth GPIO\n",
+ uiBit);
break;
}
}
- bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER,
+ (PUINT)ucResetValue, sizeof(UINT));
if (bytes < 0) {
Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
@@ -467,9 +502,13 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(PUINT)ucResetValue, sizeof(UINT));
if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO to output Mode\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "Set the GPIO to output Mode\n");
} else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to put GPIO in Output Mode\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "Failed to put GPIO in Output Mode\n");
break;
}
}
@@ -477,13 +516,16 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
case BCM_LED_THREAD_STATE_CHANGE_REQ: {
struct bcm_user_thread_req threadReq = {0};
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "User made LED thread InActive");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "User made LED thread InActive");
if ((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus == TRUE) ||
(Adapter->bPreparingForLowPowerMode == TRUE)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "GPIO Can't be set/clear in Low power Mode");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "GPIO Can't be set/clear in Low power Mode");
Status = -EACCES;
break;
}
@@ -500,10 +542,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
/* if LED thread is running(Actively or Inactively) set it state to make inactive */
if (Adapter->LEDInfo.led_thread_running) {
if (threadReq.ThreadState == LED_THREAD_ACTIVATION_REQ) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Activating thread req");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Activating thread req");
Adapter->DriverState = LED_THREAD_ACTIVE;
} else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "DeActivating Thread req.....");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "DeActivating Thread req.....");
Adapter->DriverState = LED_THREAD_INACTIVE;
}
@@ -540,7 +586,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (bytes < 0) {
Status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM Failed\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "RDM Failed\n");
return Status;
} else {
Status = STATUS_SUCCESS;
@@ -570,9 +617,11 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
return -EFAULT;
if (IsReqGpioIsLedInNVM(Adapter, pgpio_multi_info[WIMAX_IDX].uiGPIOMask) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
"Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
- pgpio_multi_info[WIMAX_IDX].uiGPIOMask, Adapter->gpioBitMap);
+ pgpio_multi_info[WIMAX_IDX].uiGPIOMask,
+ Adapter->gpioBitMap);
Status = -EINVAL;
break;
}
@@ -590,7 +639,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(PUINT)ucResetValue, sizeof(ULONG));
if (Status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM to BCM_GPIO_OUTPUT_SET_REG Failed.");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "WRM to BCM_GPIO_OUTPUT_SET_REG Failed.");
return Status;
}
@@ -603,7 +653,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, (PUINT)ucResetValue, sizeof(ULONG));
if (Status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM to BCM_GPIO_OUTPUT_CLR_REG Failed.");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "WRM to BCM_GPIO_OUTPUT_CLR_REG Failed.");
return Status;
}
}
@@ -613,7 +664,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (bytes < 0) {
Status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM to GPIO_PIN_STATE_REGISTER Failed.");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "RDM to GPIO_PIN_STATE_REGISTER Failed.");
return Status;
} else {
Status = STATUS_SUCCESS;
@@ -1190,7 +1242,7 @@ cntrlEnd:
break;
case IOCTL_BCM_CAL_INIT: {
- UINT uiSectorSize = 0 ;
+ UINT uiSectorSize = 0;
if (Adapter->eNVMType == NVM_FLASH) {
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
@@ -1403,7 +1455,7 @@ cntrlEnd:
case IOCTL_BCM_FLASH2X_SECTION_READ: {
struct bcm_flash2x_readwrite sFlash2xRead = {0};
- PUCHAR pReadBuff = NULL ;
+ PUCHAR pReadBuff = NULL;
UINT NOB = 0;
UINT BuffSize = 0;
UINT ReadBytes = 0;
@@ -1438,7 +1490,7 @@ cntrlEnd:
else
BuffSize = NOB;
- ReadOffset = sFlash2xRead.offset ;
+ ReadOffset = sFlash2xRead.offset;
OutPutBuff = IoBuffer.OutputBuffer;
pReadBuff = (PCHAR)kzalloc(BuffSize , GFP_KERNEL);
@@ -1483,7 +1535,7 @@ cntrlEnd:
NOB = NOB - ReadBytes;
if (NOB) {
ReadOffset = ReadOffset + ReadBytes;
- OutPutBuff = OutPutBuff + ReadBytes ;
+ OutPutBuff = OutPutBuff + ReadBytes;
}
}
@@ -1538,7 +1590,7 @@ cntrlEnd:
if (NOB > Adapter->uiSectorSize)
BuffSize = Adapter->uiSectorSize;
else
- BuffSize = NOB ;
+ BuffSize = NOB;
pWriteBuff = kmalloc(BuffSize, GFP_KERNEL);
@@ -1718,12 +1770,12 @@ cntrlEnd:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "NOB :%x", sCopySectStrut.numOfBytes);
if (IsSectionExistInFlash(Adapter, sCopySectStrut.SrcSection) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Source Section<%x> does not exixt in Flash ", sCopySectStrut.SrcSection);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Source Section<%x> does not exist in Flash ", sCopySectStrut.SrcSection);
return -EINVAL;
}
if (IsSectionExistInFlash(Adapter, sCopySectStrut.DstSection) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Destinatio Section<%x> does not exixt in Flash ", sCopySectStrut.DstSection);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Destinatio Section<%x> does not exist in Flash ", sCopySectStrut.DstSection);
return -EINVAL;
}
@@ -1828,7 +1880,7 @@ cntrlEnd:
SectOfset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal);
if (SectOfset == INVALID_OFFSET) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Provided Section val <%d> does not exixt in Flash 2.x", eFlash2xSectionVal);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Provided Section val <%d> does not exist in Flash 2.x", eFlash2xSectionVal);
return -EINVAL;
}
@@ -1841,10 +1893,10 @@ cntrlEnd:
case IOCTL_BCM_NVM_RAW_READ: {
struct bcm_nvm_readwrite stNVMRead;
- INT NOB ;
- INT BuffSize ;
+ INT NOB;
+ INT BuffSize;
INT ReadOffset = 0;
- UINT ReadBytes = 0 ;
+ UINT ReadBytes = 0;
PUCHAR pReadBuff;
void __user *OutPutBuff;
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
index 53fee2f9a498..8dfdd2732bdc 100644
--- a/drivers/staging/bcm/Bcmnet.c
+++ b/drivers/staging/bcm/Bcmnet.c
@@ -39,7 +39,8 @@ static INT bcm_close(struct net_device *dev)
return 0;
}
-static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
return ClassifyPacket(netdev_priv(dev), skb);
}
diff --git a/drivers/staging/bcm/DDRInit.c b/drivers/staging/bcm/DDRInit.c
index 9f7e30f637ea..ed285b2d892d 100644
--- a/drivers/staging/bcm/DDRInit.c
+++ b/drivers/staging/bcm/DDRInit.c
@@ -5,882 +5,865 @@
#define DDR_DUMP_INTERNAL_DEVICE_MEMORY 0xBFC02B00
#define MIPS_CLOCK_REG 0x0f000820
- //DDR INIT-133Mhz
-#define T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 12 //index for 0x0F007000
-static struct bcm_ddr_setting asT3_DDRSetting133MHz[]= {// # DPLL Clock Setting
- {0x0F000800,0x00007212},
- {0x0f000820,0x07F13FFF},
- {0x0f000810,0x00000F95},
- {0x0f000860,0x00000000},
- {0x0f000880,0x000003DD},
- // Changed source for X-bar and MIPS clock to APLL
- {0x0f000840,0x0FFF1B00},
- {0x0f000870,0x00000002},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0F00a084,0x1Cffffff},
- {0x0F00a080,0x1C000000},
- {0x0F00a04C,0x0000000C},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01010100},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020001},// POP - 0x00020001 Normal 0x01020001
- {0x0F007020,0x04030107}, //Normal - 0x04030107 POP - 0x05030107
- {0x0F007024,0x02000007},
- {0x0F007028,0x02020202},
- {0x0F00702c,0x0206060a},//ROB- 0x0205050a,//0x0206060a
- {0x0F007030,0x05000000},
- {0x0F007034,0x00000003},
- {0x0F007038,0x110a0200},//ROB - 0x110a0200,//0x180a0200,// 0x1f0a0200
- {0x0F00703C,0x02101010},//ROB - 0x02101010,//0x02101018},
- {0x0F007040,0x45751200},//ROB - 0x45751200,//0x450f1200},
- {0x0F007044,0x110a0d00},//ROB - 0x110a0d00//0x111f0d00
- {0x0F007048,0x081b0306},
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0000001c},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x0010246c},
- {0x0F007064,0x00000010},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00007000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- //# Enable BW improvement within memory controller
- {0x0F007094,0x00000104},
- //# Enable 2 ports within X-bar
- {0x0F00A000,0x00000016},
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000}
- };
-//80Mhz
-#define T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 10 //index for 0x0F007000
-static struct bcm_ddr_setting asT3_DDRSetting80MHz[]= {// # DPLL Clock Setting
- {0x0f000810,0x00000F95},
- {0x0f000820,0x07f1ffff},
- {0x0f000860,0x00000000},
- {0x0f000880,0x000003DD},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0F00a084,0x1Cffffff},
- {0x0F00a080,0x1C000000},
- {0x0F00a000,0x00000016},
- {0x0F00a04C,0x0000000C},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01000000},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020000},
- {0x0F007020,0x04020107},
- {0x0F007024,0x00000007},
- {0x0F007028,0x02020201},
- {0x0F00702c,0x0204040a},
- {0x0F007030,0x04000000},
- {0x0F007034,0x00000002},
- {0x0F007038,0x1F060200},
- {0x0F00703C,0x1C22221F},
- {0x0F007040,0x8A006600},
- {0x0F007044,0x221a0800},
- {0x0F007048,0x02690204},
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0000001c},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x000A15D6},
- {0x0F007064,0x0000000A},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00004000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- {0x0F007094,0x00000104},
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000}
- };
-//100Mhz
-#define T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 13 //index for 0x0F007000
-static struct bcm_ddr_setting asT3_DDRSetting100MHz[]= {// # DPLL Clock Setting
- {0x0F000800,0x00007008},
- {0x0f000810,0x00000F95},
- {0x0f000820,0x07F13E3F},
- {0x0f000860,0x00000000},
- {0x0f000880,0x000003DD},
- // Changed source for X-bar and MIPS clock to APLL
- //0x0f000840,0x0FFF1800,
- {0x0f000840,0x0FFF1B00},
- {0x0f000870,0x00000002},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0F00a084,0x1Cffffff},
- {0x0F00a080,0x1C000000},
- {0x0F00a04C,0x0000000C},
- //# Enable 2 ports within X-bar
- {0x0F00A000,0x00000016},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01010100},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020001}, // POP - 0x00020000 Normal 0x01020000
- {0x0F007020,0x04020107},//Normal - 0x04030107 POP - 0x05030107
- {0x0F007024,0x00000007},
- {0x0F007028,0x01020201},
- {0x0F00702c,0x0204040A},
- {0x0F007030,0x06000000},
- {0x0F007034,0x00000004},
- {0x0F007038,0x20080200},
- {0x0F00703C,0x02030320},
- {0x0F007040,0x6E7F1200},
- {0x0F007044,0x01190A00},
- {0x0F007048,0x06120305},//0x02690204 // 0x06120305
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0000001C},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x00082ED6},
- {0x0F007064,0x0000000A},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00005000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- //# Enable BW improvement within memory controller
- {0x0F007094,0x00000104},
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000}
- };
+/* DDR INIT-133Mhz */
+#define T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 12 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3_DDRSetting133MHz[] = { /* DPLL Clock Setting */
+ {0x0F000800, 0x00007212},
+ {0x0f000820, 0x07F13FFF},
+ {0x0f000810, 0x00000F95},
+ {0x0f000860, 0x00000000},
+ {0x0f000880, 0x000003DD},
+ /* Changed source for X-bar and MIPS clock to APLL */
+ {0x0f000840, 0x0FFF1B00},
+ {0x0f000870, 0x00000002},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0F00a084, 0x1Cffffff},
+ {0x0F00a080, 0x1C000000},
+ {0x0F00a04C, 0x0000000C},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01010100},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020001},
+ {0x0F007020, 0x04030107},
+ {0x0F007024, 0x02000007},
+ {0x0F007028, 0x02020202},
+ {0x0F00702c, 0x0206060a},
+ {0x0F007030, 0x05000000},
+ {0x0F007034, 0x00000003},
+ {0x0F007038, 0x110a0200},
+ {0x0F00703C, 0x02101010},
+ {0x0F007040, 0x45751200},
+ {0x0F007044, 0x110a0d00},
+ {0x0F007048, 0x081b0306},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0000001c},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x0010246c},
+ {0x0F007064, 0x00000010},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00007000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ /* Enable BW improvement within memory controller */
+ {0x0F007094, 0x00000104},
+ /* Enable 2 ports within X-bar */
+ {0x0F00A000, 0x00000016},
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000}
+};
+/* 80Mhz */
+#define T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 10 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3_DDRSetting80MHz[] = { /* DPLL Clock Setting */
+ {0x0f000810, 0x00000F95},
+ {0x0f000820, 0x07f1ffff},
+ {0x0f000860, 0x00000000},
+ {0x0f000880, 0x000003DD},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0F00a084, 0x1Cffffff},
+ {0x0F00a080, 0x1C000000},
+ {0x0F00a000, 0x00000016},
+ {0x0F00a04C, 0x0000000C},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01000000},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020000},
+ {0x0F007020, 0x04020107},
+ {0x0F007024, 0x00000007},
+ {0x0F007028, 0x02020201},
+ {0x0F00702c, 0x0204040a},
+ {0x0F007030, 0x04000000},
+ {0x0F007034, 0x00000002},
+ {0x0F007038, 0x1F060200},
+ {0x0F00703C, 0x1C22221F},
+ {0x0F007040, 0x8A006600},
+ {0x0F007044, 0x221a0800},
+ {0x0F007048, 0x02690204},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0000001c},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x000A15D6},
+ {0x0F007064, 0x0000000A},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00004000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ {0x0F007094, 0x00000104},
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000}
+};
+/* 100Mhz */
+#define T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 13 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3_DDRSetting100MHz[] = { /* DPLL Clock Setting */
+ {0x0F000800, 0x00007008},
+ {0x0f000810, 0x00000F95},
+ {0x0f000820, 0x07F13E3F},
+ {0x0f000860, 0x00000000},
+ {0x0f000880, 0x000003DD},
+ /* Changed source for X-bar and MIPS clock to APLL */
+ {0x0f000840, 0x0FFF1B00},
+ {0x0f000870, 0x00000002},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0F00a084, 0x1Cffffff},
+ {0x0F00a080, 0x1C000000},
+ {0x0F00a04C, 0x0000000C},
+ /* Enable 2 ports within X-bar */
+ {0x0F00A000, 0x00000016},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01010100},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020001},
+ {0x0F007020, 0x04020107},
+ {0x0F007024, 0x00000007},
+ {0x0F007028, 0x01020201},
+ {0x0F00702c, 0x0204040A},
+ {0x0F007030, 0x06000000},
+ {0x0F007034, 0x00000004},
+ {0x0F007038, 0x20080200},
+ {0x0F00703C, 0x02030320},
+ {0x0F007040, 0x6E7F1200},
+ {0x0F007044, 0x01190A00},
+ {0x0F007048, 0x06120305},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0000001C},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x00082ED6},
+ {0x0F007064, 0x0000000A},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00005000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ /* Enable BW improvement within memory controller */
+ {0x0F007094, 0x00000104},
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000}
+};
-//Net T3B DDR Settings
-//DDR INIT-133Mhz
+/* Net T3B DDR Settings
+ * DDR INIT-133Mhz
+ */
static struct bcm_ddr_setting asDPLL_266MHZ[] = {
- {0x0F000800,0x00007212},
- {0x0f000820,0x07F13FFF},
- {0x0f000810,0x00000F95},
- {0x0f000860,0x00000000},
- {0x0f000880,0x000003DD},
- // Changed source for X-bar and MIPS clock to APLL
- {0x0f000840,0x0FFF1B00},
- {0x0f000870,0x00000002}
- };
+ {0x0F000800, 0x00007212},
+ {0x0f000820, 0x07F13FFF},
+ {0x0f000810, 0x00000F95},
+ {0x0f000860, 0x00000000},
+ {0x0f000880, 0x000003DD},
+ /* Changed source for X-bar and MIPS clock to APLL */
+ {0x0f000840, 0x0FFF1B00},
+ {0x0f000870, 0x00000002}
+};
-#define T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 11 //index for 0x0F007000
-static struct bcm_ddr_setting asT3B_DDRSetting133MHz[] = {// # DPLL Clock Setting
- {0x0f000810,0x00000F95},
- {0x0f000810,0x00000F95},
- {0x0f000810,0x00000F95},
- {0x0f000820,0x07F13652},
- {0x0f000840,0x0FFF0800},
- // Changed source for X-bar and MIPS clock to APLL
- {0x0f000880,0x000003DD},
- {0x0f000860,0x00000000},
- // Changed source for X-bar and MIPS clock to APLL
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0F00a084,0x1Cffffff},
- {0x0F00a080,0x1C000000},
- //# Enable 2 ports within X-bar
- {0x0F00A000,0x00000016},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01010100},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020001},// POP - 0x00020001 Normal 0x01020001
- {0x0F007020,0x04030107}, //Normal - 0x04030107 POP - 0x05030107
- {0x0F007024,0x02000007},
- {0x0F007028,0x02020202},
- {0x0F00702c,0x0206060a},//ROB- 0x0205050a,//0x0206060a
- {0x0F007030,0x05000000},
- {0x0F007034,0x00000003},
- {0x0F007038,0x130a0200},//ROB - 0x110a0200,//0x180a0200,// 0x1f0a0200
- {0x0F00703C,0x02101012},//ROB - 0x02101010,//0x02101018},
- {0x0F007040,0x457D1200},//ROB - 0x45751200,//0x450f1200},
- {0x0F007044,0x11130d00},//ROB - 0x110a0d00//0x111f0d00
- {0x0F007048,0x040D0306},
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0000001c},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x0010246c},
- {0x0F007064,0x00000012},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00007000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- //# Enable BW improvement within memory controller
- {0x0F007094,0x00000104},
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000},
- };
+#define T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 11 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3B_DDRSetting133MHz[] = { /* DPLL Clock Setting */
+ {0x0f000810, 0x00000F95},
+ {0x0f000810, 0x00000F95},
+ {0x0f000810, 0x00000F95},
+ {0x0f000820, 0x07F13652},
+ {0x0f000840, 0x0FFF0800},
+ /* Changed source for X-bar and MIPS clock to APLL */
+ {0x0f000880, 0x000003DD},
+ {0x0f000860, 0x00000000},
+ /* Changed source for X-bar and MIPS clock to APLL */
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0F00a084, 0x1Cffffff},
+ {0x0F00a080, 0x1C000000},
+ /* Enable 2 ports within X-bar */
+ {0x0F00A000, 0x00000016},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01010100},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020001},
+ {0x0F007020, 0x04030107},
+ {0x0F007024, 0x02000007},
+ {0x0F007028, 0x02020202},
+ {0x0F00702c, 0x0206060a},
+ {0x0F007030, 0x05000000},
+ {0x0F007034, 0x00000003},
+ {0x0F007038, 0x130a0200},
+ {0x0F00703C, 0x02101012},
+ {0x0F007040, 0x457D1200},
+ {0x0F007044, 0x11130d00},
+ {0x0F007048, 0x040D0306},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0000001c},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x0010246c},
+ {0x0F007064, 0x00000012},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00007000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ /* Enable BW improvement within memory controller */
+ {0x0F007094, 0x00000104},
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000},
+ };
-#define T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 9 //index for 0x0F007000
-static struct bcm_ddr_setting asT3B_DDRSetting80MHz[] = {// # DPLL Clock Setting
- {0x0f000810,0x00000F95},
- {0x0f000820,0x07F13FFF},
- {0x0f000840,0x0FFF1F00},
- {0x0f000880,0x000003DD},
- {0x0f000860,0x00000000},
+#define T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 9 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3B_DDRSetting80MHz[] = { /* DPLL Clock Setting */
+ {0x0f000810, 0x00000F95},
+ {0x0f000820, 0x07F13FFF},
+ {0x0f000840, 0x0FFF1F00},
+ {0x0f000880, 0x000003DD},
+ {0x0f000860, 0x00000000},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0F00a084,0x1Cffffff},
- {0x0F00a080,0x1C000000},
- {0x0F00a000,0x00000016},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01000000},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020000},
- {0x0F007020,0x04020107},
- {0x0F007024,0x00000007},
- {0x0F007028,0x02020201},
- {0x0F00702c,0x0204040a},
- {0x0F007030,0x04000000},
- {0x0F007034,0x02000002},
- {0x0F007038,0x1F060202},
- {0x0F00703C,0x1C22221F},
- {0x0F007040,0x8A006600},
- {0x0F007044,0x221a0800},
- {0x0F007048,0x02690204},
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0100001c},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x000A15D6},
- {0x0F007064,0x0000000A},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00004000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- {0x0F007094,0x00000104},
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000}
- };
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0F00a084, 0x1Cffffff},
+ {0x0F00a080, 0x1C000000},
+ {0x0F00a000, 0x00000016},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01000000},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020000},
+ {0x0F007020, 0x04020107},
+ {0x0F007024, 0x00000007},
+ {0x0F007028, 0x02020201},
+ {0x0F00702c, 0x0204040a},
+ {0x0F007030, 0x04000000},
+ {0x0F007034, 0x02000002},
+ {0x0F007038, 0x1F060202},
+ {0x0F00703C, 0x1C22221F},
+ {0x0F007040, 0x8A006600},
+ {0x0F007044, 0x221a0800},
+ {0x0F007048, 0x02690204},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0100001c},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x000A15D6},
+ {0x0F007064, 0x0000000A},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00004000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ {0x0F007094, 0x00000104},
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000}
+};
-//100Mhz
-#define T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 9 //index for 0x0F007000
-static struct bcm_ddr_setting asT3B_DDRSetting100MHz[] = {// # DPLL Clock Setting
- {0x0f000810,0x00000F95},
- {0x0f000820,0x07F1369B},
- {0x0f000840,0x0FFF0800},
- {0x0f000880,0x000003DD},
- {0x0f000860,0x00000000},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0F00a084,0x1Cffffff},
- {0x0F00a080,0x1C000000},
- //# Enable 2 ports within X-bar
- {0x0F00A000,0x00000016},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01010100},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020000}, // POP - 0x00020000 Normal 0x01020000
- {0x0F007020,0x04020107},//Normal - 0x04030107 POP - 0x05030107
- {0x0F007024,0x00000007},
- {0x0F007028,0x01020201},
- {0x0F00702c,0x0204040A},
- {0x0F007030,0x06000000},
- {0x0F007034,0x02000004},
- {0x0F007038,0x20080200},
- {0x0F00703C,0x02030320},
- {0x0F007040,0x6E7F1200},
- {0x0F007044,0x01190A00},
- {0x0F007048,0x06120305},//0x02690204 // 0x06120305
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0100001C},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x00082ED6},
- {0x0F007064,0x0000000A},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00005000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- //# Enable BW improvement within memory controller
- {0x0F007094,0x00000104},
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000}
- };
+/* 100Mhz */
+#define T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 9 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3B_DDRSetting100MHz[] = { /* DPLL Clock Setting */
+ {0x0f000810, 0x00000F95},
+ {0x0f000820, 0x07F1369B},
+ {0x0f000840, 0x0FFF0800},
+ {0x0f000880, 0x000003DD},
+ {0x0f000860, 0x00000000},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0F00a084, 0x1Cffffff},
+ {0x0F00a080, 0x1C000000},
+ /* Enable 2 ports within X-bar */
+ {0x0F00A000, 0x00000016},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01010100},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020000},
+ {0x0F007020, 0x04020107},
+ {0x0F007024, 0x00000007},
+ {0x0F007028, 0x01020201},
+ {0x0F00702c, 0x0204040A},
+ {0x0F007030, 0x06000000},
+ {0x0F007034, 0x02000004},
+ {0x0F007038, 0x20080200},
+ {0x0F00703C, 0x02030320},
+ {0x0F007040, 0x6E7F1200},
+ {0x0F007044, 0x01190A00},
+ {0x0F007048, 0x06120305},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0100001C},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x00082ED6},
+ {0x0F007064, 0x0000000A},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00005000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ /* Enable BW improvement within memory controller */
+ {0x0F007094, 0x00000104},
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000}
+};
-#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 9 //index for 0x0F007000
-static struct bcm_ddr_setting asT3LP_DDRSetting133MHz[]= {// # DPLL Clock Setting
- {0x0f000820,0x03F1365B},
- {0x0f000810,0x00002F95},
- {0x0f000880,0x000003DD},
- // Changed source for X-bar and MIPS clock to APLL
- {0x0f000840,0x0FFF0000},
- {0x0f000860,0x00000000},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0F00a084,0x1Cffffff},
- {0x0F00a080,0x1C000000},
- {0x0F00A000,0x00000016},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01010100},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020001},// POP - 0x00020001 Normal 0x01020001
- {0x0F007020,0x04030107}, //Normal - 0x04030107 POP - 0x05030107
- {0x0F007024,0x02000007},
- {0x0F007028,0x02020200},
- {0x0F00702c,0x0206060a},//ROB- 0x0205050a,//0x0206060a
- {0x0F007030,0x05000000},
- {0x0F007034,0x00000003},
- {0x0F007038,0x200a0200},//ROB - 0x110a0200,//0x180a0200,// 0x1f0a0200
- {0x0F00703C,0x02101020},//ROB - 0x02101010,//0x02101018,
- {0x0F007040,0x45711200},//ROB - 0x45751200,//0x450f1200,
- {0x0F007044,0x110D0D00},//ROB - 0x110a0d00//0x111f0d00
- {0x0F007048,0x04080306},
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0100001c},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x0010245F},
- {0x0F007064,0x00000010},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00007000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- {0x0F007088,0x01000001},
- {0x0F00708c,0x00000101},
- {0x0F007090,0x00000000},
- //# Enable BW improvement within memory controller
- {0x0F007094,0x00040000},
- {0x0F007098,0x00000000},
- {0x0F0070c8,0x00000104},
- //# Enable 2 ports within X-bar
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000}
+#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 9 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3LP_DDRSetting133MHz[] = { /* DPLL Clock Setting */
+ {0x0f000820, 0x03F1365B},
+ {0x0f000810, 0x00002F95},
+ {0x0f000880, 0x000003DD},
+ /* Changed source for X-bar and MIPS clock to APLL */
+ {0x0f000840, 0x0FFF0000},
+ {0x0f000860, 0x00000000},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0F00a084, 0x1Cffffff},
+ {0x0F00a080, 0x1C000000},
+ {0x0F00A000, 0x00000016},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01010100},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020001},
+ {0x0F007020, 0x04030107},
+ {0x0F007024, 0x02000007},
+ {0x0F007028, 0x02020200},
+ {0x0F00702c, 0x0206060a},
+ {0x0F007030, 0x05000000},
+ {0x0F007034, 0x00000003},
+ {0x0F007038, 0x200a0200},
+ {0x0F00703C, 0x02101020},
+ {0x0F007040, 0x45711200},
+ {0x0F007044, 0x110D0D00},
+ {0x0F007048, 0x04080306},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0100001c},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x0010245F},
+ {0x0F007064, 0x00000010},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00007000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ {0x0F007088, 0x01000001},
+ {0x0F00708c, 0x00000101},
+ {0x0F007090, 0x00000000},
+ /* Enable BW improvement within memory controller */
+ {0x0F007094, 0x00040000},
+ {0x0F007098, 0x00000000},
+ {0x0F0070c8, 0x00000104},
+ /* Enable 2 ports within X-bar */
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000}
};
-#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 11 //index for 0x0F007000
-static struct bcm_ddr_setting asT3LP_DDRSetting100MHz[]= {// # DPLL Clock Setting
- {0x0f000810,0x00002F95},
- {0x0f000820,0x03F1369B},
- {0x0f000840,0x0fff0000},
- {0x0f000860,0x00000000},
- {0x0f000880,0x000003DD},
- // Changed source for X-bar and MIPS clock to APLL
- {0x0f000840,0x0FFF0000},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0F00a084,0x1Cffffff},
- {0x0F00a080,0x1C000000},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01010100},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020000},// POP - 0x00020001 Normal 0x01020001
- {0x0F007020,0x04020107}, //Normal - 0x04030107 POP - 0x05030107
- {0x0F007024,0x00000007},
- {0x0F007028,0x01020200},
- {0x0F00702c,0x0204040a},//ROB- 0x0205050a,//0x0206060a
- {0x0F007030,0x06000000},
- {0x0F007034,0x00000004},
- {0x0F007038,0x1F080200},//ROB - 0x110a0200,//0x180a0200,// 0x1f0a0200
- {0x0F00703C,0x0203031F},//ROB - 0x02101010,//0x02101018,
- {0x0F007040,0x6e001200},//ROB - 0x45751200,//0x450f1200,
- {0x0F007044,0x011a0a00},//ROB - 0x110a0d00//0x111f0d00
- {0x0F007048,0x03000305},
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0100001c},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x00082ED6},
- {0x0F007064,0x0000000A},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00005000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- {0x0F007088,0x01000001},
- {0x0F00708c,0x00000101},
- {0x0F007090,0x00000000},
- {0x0F007094,0x00010000},
- {0x0F007098,0x00000000},
- {0x0F0070C8,0x00000104},
- //# Enable 2 ports within X-bar
- {0x0F00A000,0x00000016},
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000}
+#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 11 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3LP_DDRSetting100MHz[] = { /* DPLL Clock Setting */
+ {0x0f000810, 0x00002F95},
+ {0x0f000820, 0x03F1369B},
+ {0x0f000840, 0x0fff0000},
+ {0x0f000860, 0x00000000},
+ {0x0f000880, 0x000003DD},
+ /* Changed source for X-bar and MIPS clock to APLL */
+ {0x0f000840, 0x0FFF0000},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0F00a084, 0x1Cffffff},
+ {0x0F00a080, 0x1C000000},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01010100},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020000},
+ {0x0F007020, 0x04020107},
+ {0x0F007024, 0x00000007},
+ {0x0F007028, 0x01020200},
+ {0x0F00702c, 0x0204040a},
+ {0x0F007030, 0x06000000},
+ {0x0F007034, 0x00000004},
+ {0x0F007038, 0x1F080200},
+ {0x0F00703C, 0x0203031F},
+ {0x0F007040, 0x6e001200},
+ {0x0F007044, 0x011a0a00},
+ {0x0F007048, 0x03000305},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0100001c},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x00082ED6},
+ {0x0F007064, 0x0000000A},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00005000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ {0x0F007088, 0x01000001},
+ {0x0F00708c, 0x00000101},
+ {0x0F007090, 0x00000000},
+ {0x0F007094, 0x00010000},
+ {0x0F007098, 0x00000000},
+ {0x0F0070C8, 0x00000104},
+ /* Enable 2 ports within X-bar */
+ {0x0F00A000, 0x00000016},
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000}
};
-#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 9 //index for 0x0F007000
-static struct bcm_ddr_setting asT3LP_DDRSetting80MHz[]= {// # DPLL Clock Setting
- {0x0f000820,0x07F13FFF},
- {0x0f000810,0x00002F95},
- {0x0f000860,0x00000000},
- {0x0f000880,0x000003DD},
- {0x0f000840,0x0FFF1F00},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0F00a084,0x1Cffffff},
- {0x0F00a080,0x1C000000},
- {0x0F00A000,0x00000016},
- {0x0f007000,0x00010001},
- {0x0f007004,0x01000000},
- {0x0f007008,0x01000001},
- {0x0f00700c,0x00000000},
- {0x0f007010,0x01000000},
- {0x0f007014,0x01000100},
- {0x0f007018,0x01000000},
- {0x0f00701c,0x01020000},
- {0x0f007020,0x04020107},
- {0x0f007024,0x00000007},
- {0x0f007028,0x02020200},
- {0x0f00702c,0x0204040a},
- {0x0f007030,0x04000000},
- {0x0f007034,0x00000002},
- {0x0f007038,0x1d060200},
- {0x0f00703c,0x1c22221d},
- {0x0f007040,0x8A116600},
- {0x0f007044,0x222d0800},
- {0x0f007048,0x02690204},
- {0x0f00704c,0x00000000},
- {0x0f007050,0x0100001c},
- {0x0f007054,0x00000000},
- {0x0f007058,0x00000000},
- {0x0f00705c,0x00000000},
- {0x0f007060,0x000A15D6},
- {0x0f007064,0x0000000A},
- {0x0f007068,0x00000000},
- {0x0f00706c,0x00000001},
- {0x0f007070,0x00004000},
- {0x0f007074,0x00000000},
- {0x0f007078,0x00000000},
- {0x0f00707c,0x00000000},
- {0x0f007080,0x00000000},
- {0x0f007084,0x00000000},
- {0x0f007088,0x01000001},
- {0x0f00708c,0x00000101},
- {0x0f007090,0x00000000},
- {0x0f007094,0x00010000},
- {0x0f007098,0x00000000},
- {0x0F0070C8,0x00000104},
- {0x0F007018,0x01010000}
+#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 9 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3LP_DDRSetting80MHz[] = { /* DPLL Clock Setting */
+ {0x0f000820, 0x07F13FFF},
+ {0x0f000810, 0x00002F95},
+ {0x0f000860, 0x00000000},
+ {0x0f000880, 0x000003DD},
+ {0x0f000840, 0x0FFF1F00},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0F00a084, 0x1Cffffff},
+ {0x0F00a080, 0x1C000000},
+ {0x0F00A000, 0x00000016},
+ {0x0f007000, 0x00010001},
+ {0x0f007004, 0x01000000},
+ {0x0f007008, 0x01000001},
+ {0x0f00700c, 0x00000000},
+ {0x0f007010, 0x01000000},
+ {0x0f007014, 0x01000100},
+ {0x0f007018, 0x01000000},
+ {0x0f00701c, 0x01020000},
+ {0x0f007020, 0x04020107},
+ {0x0f007024, 0x00000007},
+ {0x0f007028, 0x02020200},
+ {0x0f00702c, 0x0204040a},
+ {0x0f007030, 0x04000000},
+ {0x0f007034, 0x00000002},
+ {0x0f007038, 0x1d060200},
+ {0x0f00703c, 0x1c22221d},
+ {0x0f007040, 0x8A116600},
+ {0x0f007044, 0x222d0800},
+ {0x0f007048, 0x02690204},
+ {0x0f00704c, 0x00000000},
+ {0x0f007050, 0x0100001c},
+ {0x0f007054, 0x00000000},
+ {0x0f007058, 0x00000000},
+ {0x0f00705c, 0x00000000},
+ {0x0f007060, 0x000A15D6},
+ {0x0f007064, 0x0000000A},
+ {0x0f007068, 0x00000000},
+ {0x0f00706c, 0x00000001},
+ {0x0f007070, 0x00004000},
+ {0x0f007074, 0x00000000},
+ {0x0f007078, 0x00000000},
+ {0x0f00707c, 0x00000000},
+ {0x0f007080, 0x00000000},
+ {0x0f007084, 0x00000000},
+ {0x0f007088, 0x01000001},
+ {0x0f00708c, 0x00000101},
+ {0x0f007090, 0x00000000},
+ {0x0f007094, 0x00010000},
+ {0x0f007098, 0x00000000},
+ {0x0F0070C8, 0x00000104},
+ {0x0F007018, 0x01010000}
};
-///T3 LP-B (UMA-B)
+/* T3 LP-B (UMA-B) */
-#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ 7 //index for 0x0F007000
-static struct bcm_ddr_setting asT3LPB_DDRSetting160MHz[]= {// # DPLL Clock Setting
-
- {0x0f000820,0x03F137DB},
- {0x0f000810,0x01842795},
- {0x0f000860,0x00000000},
- {0x0f000880,0x000003DD},
- {0x0f000840,0x0FFF0400},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0f003050,0x00000021},//this is flash/eeprom clock divisor which set the flash clock to 20 MHz
- {0x0F00a084,0x1Cffffff},//Now dump from her in internal memory
- {0x0F00a080,0x1C000000},
- {0x0F00A000,0x00000016},
- {0x0f007000,0x00010001},
- {0x0f007004,0x01000001},
- {0x0f007008,0x01000101},
- {0x0f00700c,0x00000000},
- {0x0f007010,0x01000100},
- {0x0f007014,0x01000100},
- {0x0f007018,0x01000000},
- {0x0f00701c,0x01020000},
- {0x0f007020,0x04030107},
- {0x0f007024,0x02000007},
- {0x0f007028,0x02020200},
- {0x0f00702c,0x0206060a},
- {0x0f007030,0x050d0d00},
- {0x0f007034,0x00000003},
- {0x0f007038,0x170a0200},
- {0x0f00703c,0x02101012},
- {0x0f007040,0x45161200},
- {0x0f007044,0x11250c00},
- {0x0f007048,0x04da0307},
- {0x0f00704c,0x00000000},
- {0x0f007050,0x0000001c},
- {0x0f007054,0x00000000},
- {0x0f007058,0x00000000},
- {0x0f00705c,0x00000000},
- {0x0f007060,0x00142bb6},
- {0x0f007064,0x20430014},
- {0x0f007068,0x00000000},
- {0x0f00706c,0x00000001},
- {0x0f007070,0x00009000},
- {0x0f007074,0x00000000},
- {0x0f007078,0x00000000},
- {0x0f00707c,0x00000000},
- {0x0f007080,0x00000000},
- {0x0f007084,0x00000000},
- {0x0f007088,0x01000001},
- {0x0f00708c,0x00000101},
- {0x0f007090,0x00000000},
- {0x0f007094,0x00040000},
- {0x0f007098,0x00000000},
- {0x0F0070C8,0x00000104},
- {0x0F007018,0x01010000}
+#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ 7 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3LPB_DDRSetting160MHz[] = { /* DPLL Clock Setting */
+ {0x0f000820, 0x03F137DB},
+ {0x0f000810, 0x01842795},
+ {0x0f000860, 0x00000000},
+ {0x0f000880, 0x000003DD},
+ {0x0f000840, 0x0FFF0400},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0f003050, 0x00000021}, /* this is flash/eeprom clock divisor which set the flash clock to 20 MHz */
+ {0x0F00a084, 0x1Cffffff}, /* Now dump from her in internal memory */
+ {0x0F00a080, 0x1C000000},
+ {0x0F00A000, 0x00000016},
+ {0x0f007000, 0x00010001},
+ {0x0f007004, 0x01000001},
+ {0x0f007008, 0x01000101},
+ {0x0f00700c, 0x00000000},
+ {0x0f007010, 0x01000100},
+ {0x0f007014, 0x01000100},
+ {0x0f007018, 0x01000000},
+ {0x0f00701c, 0x01020000},
+ {0x0f007020, 0x04030107},
+ {0x0f007024, 0x02000007},
+ {0x0f007028, 0x02020200},
+ {0x0f00702c, 0x0206060a},
+ {0x0f007030, 0x050d0d00},
+ {0x0f007034, 0x00000003},
+ {0x0f007038, 0x170a0200},
+ {0x0f00703c, 0x02101012},
+ {0x0f007040, 0x45161200},
+ {0x0f007044, 0x11250c00},
+ {0x0f007048, 0x04da0307},
+ {0x0f00704c, 0x00000000},
+ {0x0f007050, 0x0000001c},
+ {0x0f007054, 0x00000000},
+ {0x0f007058, 0x00000000},
+ {0x0f00705c, 0x00000000},
+ {0x0f007060, 0x00142bb6},
+ {0x0f007064, 0x20430014},
+ {0x0f007068, 0x00000000},
+ {0x0f00706c, 0x00000001},
+ {0x0f007070, 0x00009000},
+ {0x0f007074, 0x00000000},
+ {0x0f007078, 0x00000000},
+ {0x0f00707c, 0x00000000},
+ {0x0f007080, 0x00000000},
+ {0x0f007084, 0x00000000},
+ {0x0f007088, 0x01000001},
+ {0x0f00708c, 0x00000101},
+ {0x0f007090, 0x00000000},
+ {0x0f007094, 0x00040000},
+ {0x0f007098, 0x00000000},
+ {0x0F0070C8, 0x00000104},
+ {0x0F007018, 0x01010000}
};
-#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 7 //index for 0x0F007000
-static struct bcm_ddr_setting asT3LPB_DDRSetting133MHz[]= {// # DPLL Clock Setting
- {0x0f000820,0x03F1365B},
- {0x0f000810,0x00002F95},
- {0x0f000880,0x000003DD},
- // Changed source for X-bar and MIPS clock to APLL
- {0x0f000840,0x0FFF0000},
- {0x0f000860,0x00000000},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0f003050,0x00000021},//flash/eeprom clock divisor which set the flash clock to 20 MHz
- {0x0F00a084,0x1Cffffff},//dump from here in internal memory
- {0x0F00a080,0x1C000000},
- {0x0F00A000,0x00000016},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01010100},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020001},// POP - 0x00020001 Normal 0x01020001
- {0x0F007020,0x04030107}, //Normal - 0x04030107 POP - 0x05030107
- {0x0F007024,0x02000007},
- {0x0F007028,0x02020200},
- {0x0F00702c,0x0206060a},//ROB- 0x0205050a,//0x0206060a
- {0x0F007030,0x05000000},
- {0x0F007034,0x00000003},
- {0x0F007038,0x190a0200},//ROB - 0x110a0200,//0x180a0200,// 0x1f0a0200
- {0x0F00703C,0x02101017},//ROB - 0x02101010,//0x02101018,
- {0x0F007040,0x45171200},//ROB - 0x45751200,//0x450f1200,
- {0x0F007044,0x11290D00},//ROB - 0x110a0d00//0x111f0d00
- {0x0F007048,0x04080306},
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0100001c},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x0010245F},
- {0x0F007064,0x00000010},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00007000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- {0x0F007088,0x01000001},
- {0x0F00708c,0x00000101},
- {0x0F007090,0x00000000},
- //# Enable BW improvement within memory controller
- {0x0F007094,0x00040000},
- {0x0F007098,0x00000000},
- {0x0F0070c8,0x00000104},
- //# Enable 2 ports within X-bar
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000}
+#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 7 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3LPB_DDRSetting133MHz[] = { /* DPLL Clock Setting */
+ {0x0f000820, 0x03F1365B},
+ {0x0f000810, 0x00002F95},
+ {0x0f000880, 0x000003DD},
+ /* Changed source for X-bar and MIPS clock to APLL */
+ {0x0f000840, 0x0FFF0000},
+ {0x0f000860, 0x00000000},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0f003050, 0x00000021}, /* flash/eeprom clock divisor which set the flash clock to 20 MHz */
+ {0x0F00a084, 0x1Cffffff}, /* dump from here in internal memory */
+ {0x0F00a080, 0x1C000000},
+ {0x0F00A000, 0x00000016},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01010100},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020001},
+ {0x0F007020, 0x04030107},
+ {0x0F007024, 0x02000007},
+ {0x0F007028, 0x02020200},
+ {0x0F00702c, 0x0206060a},
+ {0x0F007030, 0x05000000},
+ {0x0F007034, 0x00000003},
+ {0x0F007038, 0x190a0200},
+ {0x0F00703C, 0x02101017},
+ {0x0F007040, 0x45171200},
+ {0x0F007044, 0x11290D00},
+ {0x0F007048, 0x04080306},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0100001c},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x0010245F},
+ {0x0F007064, 0x00000010},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00007000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ {0x0F007088, 0x01000001},
+ {0x0F00708c, 0x00000101},
+ {0x0F007090, 0x00000000},
+ /* Enable BW improvement within memory controller */
+ {0x0F007094, 0x00040000},
+ {0x0F007098, 0x00000000},
+ {0x0F0070c8, 0x00000104},
+ /* Enable 2 ports within X-bar */
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000}
};
-#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 8 //index for 0x0F007000
-static struct bcm_ddr_setting asT3LPB_DDRSetting100MHz[]= {// # DPLL Clock Setting
- {0x0f000810,0x00002F95},
- {0x0f000820,0x03F1369B},
- {0x0f000840,0x0fff0000},
- {0x0f000860,0x00000000},
- {0x0f000880,0x000003DD},
- // Changed source for X-bar and MIPS clock to APLL
- {0x0f000840,0x0FFF0000},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0f003050,0x00000021},//flash/eeprom clock divisor which set the flash clock to 20 MHz
- {0x0F00a084,0x1Cffffff}, //dump from here in internal memory
- {0x0F00a080,0x1C000000},
- //Memcontroller Default values
- {0x0F007000,0x00010001},
- {0x0F007004,0x01010100},
- {0x0F007008,0x01000001},
- {0x0F00700c,0x00000000},
- {0x0F007010,0x01000000},
- {0x0F007014,0x01000100},
- {0x0F007018,0x01000000},
- {0x0F00701c,0x01020000},// POP - 0x00020001 Normal 0x01020001
- {0x0F007020,0x04020107}, //Normal - 0x04030107 POP - 0x05030107
- {0x0F007024,0x00000007},
- {0x0F007028,0x01020200},
- {0x0F00702c,0x0204040a},//ROB- 0x0205050a,//0x0206060a
- {0x0F007030,0x06000000},
- {0x0F007034,0x00000004},
- {0x0F007038,0x1F080200},//ROB - 0x110a0200,//0x180a0200,// 0x1f0a0200
- {0x0F00703C,0x0203031F},//ROB - 0x02101010,//0x02101018,
- {0x0F007040,0x6e001200},//ROB - 0x45751200,//0x450f1200,
- {0x0F007044,0x011a0a00},//ROB - 0x110a0d00//0x111f0d00
- {0x0F007048,0x03000305},
- {0x0F00704c,0x00000000},
- {0x0F007050,0x0100001c},
- {0x0F007054,0x00000000},
- {0x0F007058,0x00000000},
- {0x0F00705c,0x00000000},
- {0x0F007060,0x00082ED6},
- {0x0F007064,0x0000000A},
- {0x0F007068,0x00000000},
- {0x0F00706c,0x00000001},
- {0x0F007070,0x00005000},
- {0x0F007074,0x00000000},
- {0x0F007078,0x00000000},
- {0x0F00707C,0x00000000},
- {0x0F007080,0x00000000},
- {0x0F007084,0x00000000},
- {0x0F007088,0x01000001},
- {0x0F00708c,0x00000101},
- {0x0F007090,0x00000000},
- {0x0F007094,0x00010000},
- {0x0F007098,0x00000000},
- {0x0F0070C8,0x00000104},
- //# Enable 2 ports within X-bar
- {0x0F00A000,0x00000016},
- //# Enable start bit within memory controller
- {0x0F007018,0x01010000}
+#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 8 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3LPB_DDRSetting100MHz[] = { /* DPLL Clock Setting */
+ {0x0f000810, 0x00002F95},
+ {0x0f000820, 0x03F1369B},
+ {0x0f000840, 0x0fff0000},
+ {0x0f000860, 0x00000000},
+ {0x0f000880, 0x000003DD},
+ /* Changed source for X-bar and MIPS clock to APLL */
+ {0x0f000840, 0x0FFF0000},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0f003050, 0x00000021}, /* flash/eeprom clock divisor which set the flash clock to 20 MHz */
+ {0x0F00a084, 0x1Cffffff}, /* dump from here in internal memory */
+ {0x0F00a080, 0x1C000000},
+ /* Memcontroller Default values */
+ {0x0F007000, 0x00010001},
+ {0x0F007004, 0x01010100},
+ {0x0F007008, 0x01000001},
+ {0x0F00700c, 0x00000000},
+ {0x0F007010, 0x01000000},
+ {0x0F007014, 0x01000100},
+ {0x0F007018, 0x01000000},
+ {0x0F00701c, 0x01020000},
+ {0x0F007020, 0x04020107},
+ {0x0F007024, 0x00000007},
+ {0x0F007028, 0x01020200},
+ {0x0F00702c, 0x0204040a},
+ {0x0F007030, 0x06000000},
+ {0x0F007034, 0x00000004},
+ {0x0F007038, 0x1F080200},
+ {0x0F00703C, 0x0203031F},
+ {0x0F007040, 0x6e001200},
+ {0x0F007044, 0x011a0a00},
+ {0x0F007048, 0x03000305},
+ {0x0F00704c, 0x00000000},
+ {0x0F007050, 0x0100001c},
+ {0x0F007054, 0x00000000},
+ {0x0F007058, 0x00000000},
+ {0x0F00705c, 0x00000000},
+ {0x0F007060, 0x00082ED6},
+ {0x0F007064, 0x0000000A},
+ {0x0F007068, 0x00000000},
+ {0x0F00706c, 0x00000001},
+ {0x0F007070, 0x00005000},
+ {0x0F007074, 0x00000000},
+ {0x0F007078, 0x00000000},
+ {0x0F00707C, 0x00000000},
+ {0x0F007080, 0x00000000},
+ {0x0F007084, 0x00000000},
+ {0x0F007088, 0x01000001},
+ {0x0F00708c, 0x00000101},
+ {0x0F007090, 0x00000000},
+ {0x0F007094, 0x00010000},
+ {0x0F007098, 0x00000000},
+ {0x0F0070C8, 0x00000104},
+ /* Enable 2 ports within X-bar */
+ {0x0F00A000, 0x00000016},
+ /* Enable start bit within memory controller */
+ {0x0F007018, 0x01010000}
};
-#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 7 //index for 0x0F007000
-static struct bcm_ddr_setting asT3LPB_DDRSetting80MHz[]= {// # DPLL Clock Setting
- {0x0f000820,0x07F13FFF},
- {0x0f000810,0x00002F95},
- {0x0f000860,0x00000000},
- {0x0f000880,0x000003DD},
- {0x0f000840,0x0FFF1F00},
- {0x0F00a044,0x1fffffff},
- {0x0F00a040,0x1f000000},
- {0x0f003050,0x00000021},//flash/eeprom clock divisor which set the flash clock to 20 MHz
- {0x0F00a084,0x1Cffffff},// dump from here in internal memory
- {0x0F00a080,0x1C000000},
- {0x0F00A000,0x00000016},
- {0x0f007000,0x00010001},
- {0x0f007004,0x01000000},
- {0x0f007008,0x01000001},
- {0x0f00700c,0x00000000},
- {0x0f007010,0x01000000},
- {0x0f007014,0x01000100},
- {0x0f007018,0x01000000},
- {0x0f00701c,0x01020000},
- {0x0f007020,0x04020107},
- {0x0f007024,0x00000007},
- {0x0f007028,0x02020200},
- {0x0f00702c,0x0204040a},
- {0x0f007030,0x04000000},
- {0x0f007034,0x00000002},
- {0x0f007038,0x1d060200},
- {0x0f00703c,0x1c22221d},
- {0x0f007040,0x8A116600},
- {0x0f007044,0x222d0800},
- {0x0f007048,0x02690204},
- {0x0f00704c,0x00000000},
- {0x0f007050,0x0100001c},
- {0x0f007054,0x00000000},
- {0x0f007058,0x00000000},
- {0x0f00705c,0x00000000},
- {0x0f007060,0x000A15D6},
- {0x0f007064,0x0000000A},
- {0x0f007068,0x00000000},
- {0x0f00706c,0x00000001},
- {0x0f007070,0x00004000},
- {0x0f007074,0x00000000},
- {0x0f007078,0x00000000},
- {0x0f00707c,0x00000000},
- {0x0f007080,0x00000000},
- {0x0f007084,0x00000000},
- {0x0f007088,0x01000001},
- {0x0f00708c,0x00000101},
- {0x0f007090,0x00000000},
- {0x0f007094,0x00010000},
- {0x0f007098,0x00000000},
- {0x0F0070C8,0x00000104},
- {0x0F007018,0x01010000}
+#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 7 /* index for 0x0F007000 */
+static struct bcm_ddr_setting asT3LPB_DDRSetting80MHz[] = { /* DPLL Clock Setting */
+ {0x0f000820, 0x07F13FFF},
+ {0x0f000810, 0x00002F95},
+ {0x0f000860, 0x00000000},
+ {0x0f000880, 0x000003DD},
+ {0x0f000840, 0x0FFF1F00},
+ {0x0F00a044, 0x1fffffff},
+ {0x0F00a040, 0x1f000000},
+ {0x0f003050, 0x00000021}, /* flash/eeprom clock divisor which set the flash clock to 20 MHz */
+ {0x0F00a084, 0x1Cffffff}, /* dump from here in internal memory */
+ {0x0F00a080, 0x1C000000},
+ {0x0F00A000, 0x00000016},
+ {0x0f007000, 0x00010001},
+ {0x0f007004, 0x01000000},
+ {0x0f007008, 0x01000001},
+ {0x0f00700c, 0x00000000},
+ {0x0f007010, 0x01000000},
+ {0x0f007014, 0x01000100},
+ {0x0f007018, 0x01000000},
+ {0x0f00701c, 0x01020000},
+ {0x0f007020, 0x04020107},
+ {0x0f007024, 0x00000007},
+ {0x0f007028, 0x02020200},
+ {0x0f00702c, 0x0204040a},
+ {0x0f007030, 0x04000000},
+ {0x0f007034, 0x00000002},
+ {0x0f007038, 0x1d060200},
+ {0x0f00703c, 0x1c22221d},
+ {0x0f007040, 0x8A116600},
+ {0x0f007044, 0x222d0800},
+ {0x0f007048, 0x02690204},
+ {0x0f00704c, 0x00000000},
+ {0x0f007050, 0x0100001c},
+ {0x0f007054, 0x00000000},
+ {0x0f007058, 0x00000000},
+ {0x0f00705c, 0x00000000},
+ {0x0f007060, 0x000A15D6},
+ {0x0f007064, 0x0000000A},
+ {0x0f007068, 0x00000000},
+ {0x0f00706c, 0x00000001},
+ {0x0f007070, 0x00004000},
+ {0x0f007074, 0x00000000},
+ {0x0f007078, 0x00000000},
+ {0x0f00707c, 0x00000000},
+ {0x0f007080, 0x00000000},
+ {0x0f007084, 0x00000000},
+ {0x0f007088, 0x01000001},
+ {0x0f00708c, 0x00000101},
+ {0x0f007090, 0x00000000},
+ {0x0f007094, 0x00010000},
+ {0x0f007098, 0x00000000},
+ {0x0F0070C8, 0x00000104},
+ {0x0F007018, 0x01010000}
};
int ddr_init(struct bcm_mini_adapter *Adapter)
{
- struct bcm_ddr_setting *psDDRSetting=NULL;
- ULONG RegCount=0;
+ struct bcm_ddr_setting *psDDRSetting = NULL;
+ ULONG RegCount = 0;
UINT value = 0;
UINT uiResetValue = 0;
UINT uiClockSetting = 0;
int retval = STATUS_SUCCESS;
- switch (Adapter->chip_id)
- {
+ switch (Adapter->chip_id) {
case 0xbece3200:
- switch (Adapter->DDRSetting)
- {
- case DDR_80_MHZ:
- psDDRSetting=asT3LP_DDRSetting80MHz;
- RegCount=(sizeof(asT3LP_DDRSetting80MHz)/
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_100_MHZ:
- psDDRSetting=asT3LP_DDRSetting100MHz;
- RegCount=(sizeof(asT3LP_DDRSetting100MHz)/
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_133_MHZ:
- psDDRSetting=asT3LP_DDRSetting133MHz;
- RegCount=(sizeof(asT3LP_DDRSetting133MHz)/
- sizeof(struct bcm_ddr_setting));
- if(Adapter->bMipsConfig == MIPS_200_MHZ)
- {
- uiClockSetting = 0x03F13652;
- }
- else
- {
- uiClockSetting = 0x03F1365B;
- }
- break;
- default:
- return -EINVAL;
- }
+ switch (Adapter->DDRSetting) {
+ case DDR_80_MHZ:
+ psDDRSetting = asT3LP_DDRSetting80MHz;
+ RegCount = (sizeof(asT3LP_DDRSetting80MHz)/
+ sizeof(struct bcm_ddr_setting));
+ break;
+ case DDR_100_MHZ:
+ psDDRSetting = asT3LP_DDRSetting100MHz;
+ RegCount = (sizeof(asT3LP_DDRSetting100MHz)/
+ sizeof(struct bcm_ddr_setting));
+ break;
+ case DDR_133_MHZ:
+ psDDRSetting = asT3LP_DDRSetting133MHz;
+ RegCount = (sizeof(asT3LP_DDRSetting133MHz)/
+ sizeof(struct bcm_ddr_setting));
+ if (Adapter->bMipsConfig == MIPS_200_MHZ)
+ uiClockSetting = 0x03F13652;
+ else
+ uiClockSetting = 0x03F1365B;
+ break;
+ default:
+ return -EINVAL;
+ }
break;
case T3LPB:
case BCS220_2:
case BCS220_2BC:
case BCS250_BC:
- case BCS220_3 :
+ case BCS220_3:
/* Set bit 2 and bit 6 to 1 for BBIC 2mA drive
* (please check current value and additionally set these bits)
*/
- if( (Adapter->chip_id != BCS220_2) &&
- (Adapter->chip_id != BCS220_2BC) &&
- (Adapter->chip_id != BCS220_3) )
- {
- retval= rdmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
- return retval;
- }
- uiResetValue |= 0x44;
- retval = wrmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
- return retval;
- }
+ if ((Adapter->chip_id != BCS220_2) &&
+ (Adapter->chip_id != BCS220_2BC) &&
+ (Adapter->chip_id != BCS220_3)) {
+ retval = rdmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
+ if (retval < 0) {
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
+ return retval;
}
- switch(Adapter->DDRSetting)
- {
+ uiResetValue |= 0x44;
+ retval = wrmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
+ if (retval < 0) {
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
+ return retval;
+ }
+ }
+ switch (Adapter->DDRSetting) {
- case DDR_80_MHZ:
- psDDRSetting = asT3LPB_DDRSetting80MHz;
- RegCount=(sizeof(asT3B_DDRSetting80MHz)/
- sizeof(struct bcm_ddr_setting));
+ case DDR_80_MHZ:
+ psDDRSetting = asT3LPB_DDRSetting80MHz;
+ RegCount = (sizeof(asT3B_DDRSetting80MHz)/
+ sizeof(struct bcm_ddr_setting));
break;
- case DDR_100_MHZ:
- psDDRSetting=asT3LPB_DDRSetting100MHz;
- RegCount=(sizeof(asT3B_DDRSetting100MHz)/
- sizeof(struct bcm_ddr_setting));
+ case DDR_100_MHZ:
+ psDDRSetting = asT3LPB_DDRSetting100MHz;
+ RegCount = (sizeof(asT3B_DDRSetting100MHz)/
+ sizeof(struct bcm_ddr_setting));
break;
- case DDR_133_MHZ:
- psDDRSetting = asT3LPB_DDRSetting133MHz;
- RegCount=(sizeof(asT3B_DDRSetting133MHz)/
- sizeof(struct bcm_ddr_setting));
+ case DDR_133_MHZ:
+ psDDRSetting = asT3LPB_DDRSetting133MHz;
+ RegCount = (sizeof(asT3B_DDRSetting133MHz)/
+ sizeof(struct bcm_ddr_setting));
- if(Adapter->bMipsConfig == MIPS_200_MHZ)
- {
- uiClockSetting = 0x03F13652;
- }
- else
- {
- uiClockSetting = 0x03F1365B;
- }
+ if (Adapter->bMipsConfig == MIPS_200_MHZ)
+ uiClockSetting = 0x03F13652;
+ else
+ uiClockSetting = 0x03F1365B;
break;
- case DDR_160_MHZ:
- psDDRSetting = asT3LPB_DDRSetting160MHz;
- RegCount = sizeof(asT3LPB_DDRSetting160MHz)/sizeof(struct bcm_ddr_setting);
+ case DDR_160_MHZ:
+ psDDRSetting = asT3LPB_DDRSetting160MHz;
+ RegCount = sizeof(asT3LPB_DDRSetting160MHz)/sizeof(struct bcm_ddr_setting);
- if(Adapter->bMipsConfig == MIPS_200_MHZ)
- {
- uiClockSetting = 0x03F137D2;
- }
- else
- {
- uiClockSetting = 0x03F137DB;
- }
- }
+ if (Adapter->bMipsConfig == MIPS_200_MHZ)
+ uiClockSetting = 0x03F137D2;
+ else
+ uiClockSetting = 0x03F137DB;
+ }
break;
case 0xbece0110:
@@ -888,68 +871,59 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
case 0xbece0121:
case 0xbece0130:
case 0xbece0300:
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "DDR Setting: %x\n", Adapter->DDRSetting);
- switch (Adapter->DDRSetting)
- {
- case DDR_80_MHZ:
- psDDRSetting = asT3_DDRSetting80MHz;
- RegCount = (sizeof(asT3_DDRSetting80MHz)/
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3_DDRSetting100MHz;
- RegCount = (sizeof(asT3_DDRSetting100MHz)/
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_133_MHZ:
- psDDRSetting = asT3_DDRSetting133MHz;
- RegCount = (sizeof(asT3_DDRSetting133MHz)/
- sizeof(struct bcm_ddr_setting));
- break;
- default:
- return -EINVAL;
- }
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "DDR Setting: %x\n", Adapter->DDRSetting);
+ switch (Adapter->DDRSetting) {
+ case DDR_80_MHZ:
+ psDDRSetting = asT3_DDRSetting80MHz;
+ RegCount = (sizeof(asT3_DDRSetting80MHz)/
+ sizeof(struct bcm_ddr_setting));
+ break;
+ case DDR_100_MHZ:
+ psDDRSetting = asT3_DDRSetting100MHz;
+ RegCount = (sizeof(asT3_DDRSetting100MHz)/
+ sizeof(struct bcm_ddr_setting));
+ break;
+ case DDR_133_MHZ:
+ psDDRSetting = asT3_DDRSetting133MHz;
+ RegCount = (sizeof(asT3_DDRSetting133MHz)/
+ sizeof(struct bcm_ddr_setting));
+ break;
+ default:
+ return -EINVAL;
+ }
case 0xbece0310:
{
- switch (Adapter->DDRSetting)
- {
- case DDR_80_MHZ:
- psDDRSetting = asT3B_DDRSetting80MHz;
- RegCount=(sizeof(asT3B_DDRSetting80MHz)/
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_100_MHZ:
- psDDRSetting=asT3B_DDRSetting100MHz;
- RegCount=(sizeof(asT3B_DDRSetting100MHz)/
- sizeof(struct bcm_ddr_setting));
+ switch (Adapter->DDRSetting) {
+ case DDR_80_MHZ:
+ psDDRSetting = asT3B_DDRSetting80MHz;
+ RegCount = (sizeof(asT3B_DDRSetting80MHz)/
+ sizeof(struct bcm_ddr_setting));
+ break;
+ case DDR_100_MHZ:
+ psDDRSetting = asT3B_DDRSetting100MHz;
+ RegCount = (sizeof(asT3B_DDRSetting100MHz)/
+ sizeof(struct bcm_ddr_setting));
break;
- case DDR_133_MHZ:
+ case DDR_133_MHZ:
- if(Adapter->bDPLLConfig == PLL_266_MHZ)//266Mhz PLL selected.
- {
- memcpy(asT3B_DDRSetting133MHz, asDPLL_266MHZ,
- sizeof(asDPLL_266MHZ));
- psDDRSetting = asT3B_DDRSetting133MHz;
- RegCount=(sizeof(asT3B_DDRSetting133MHz)/
- sizeof(struct bcm_ddr_setting));
- }
+ if (Adapter->bDPLLConfig == PLL_266_MHZ) { /* 266Mhz PLL selected. */
+ memcpy(asT3B_DDRSetting133MHz, asDPLL_266MHZ,
+ sizeof(asDPLL_266MHZ));
+ psDDRSetting = asT3B_DDRSetting133MHz;
+ RegCount = (sizeof(asT3B_DDRSetting133MHz)/
+ sizeof(struct bcm_ddr_setting));
+ } else {
+ psDDRSetting = asT3B_DDRSetting133MHz;
+ RegCount = (sizeof(asT3B_DDRSetting133MHz)/
+ sizeof(struct bcm_ddr_setting));
+ if (Adapter->bMipsConfig == MIPS_200_MHZ)
+ uiClockSetting = 0x07F13652;
else
- {
- psDDRSetting = asT3B_DDRSetting133MHz;
- RegCount=(sizeof(asT3B_DDRSetting133MHz)/
- sizeof(struct bcm_ddr_setting));
- if(Adapter->bMipsConfig == MIPS_200_MHZ)
- {
- uiClockSetting = 0x07F13652;
- }
- else
- {
- uiClockSetting = 0x07F1365B;
- }
- }
- break;
- default:
- return -EINVAL;
+ uiClockSetting = 0x07F1365B;
+ }
+ break;
+ default:
+ return -EINVAL;
}
break;
@@ -958,20 +932,15 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
return -EINVAL;
}
- value=0;
+ value = 0;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Register Count is =%lu\n", RegCount);
- while(RegCount && !retval)
- {
- if(uiClockSetting && psDDRSetting->ulRegAddress == MIPS_CLOCK_REG)
- {
+ while (RegCount && !retval) {
+ if (uiClockSetting && psDDRSetting->ulRegAddress == MIPS_CLOCK_REG)
value = uiClockSetting;
- }
else
- {
value = psDDRSetting->ulRegValue;
- }
retval = wrmalt(Adapter, psDDRSetting->ulRegAddress, &value, sizeof(value));
- if(STATUS_SUCCESS != retval) {
+ if (STATUS_SUCCESS != retval) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
break;
}
@@ -980,36 +949,34 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
psDDRSetting++;
}
- if(Adapter->chip_id >= 0xbece3300 )
- {
+ if (Adapter->chip_id >= 0xbece3300) {
mdelay(3);
- if( (Adapter->chip_id != BCS220_2)&&
- (Adapter->chip_id != BCS220_2BC)&&
- (Adapter->chip_id != BCS220_3))
- {
+ if ((Adapter->chip_id != BCS220_2) &&
+ (Adapter->chip_id != BCS220_2BC) &&
+ (Adapter->chip_id != BCS220_3)) {
/* drive MDDR to half in case of UMA-B: */
uiResetValue = 0x01010001;
retval = wrmalt(Adapter, (UINT)0x0F007018, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x00040020;
retval = wrmalt(Adapter, (UINT)0x0F007094, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x01020101;
retval = wrmalt(Adapter, (UINT)0x0F00701c, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x01010000;
retval = wrmalt(Adapter, (UINT)0x0F007018, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
@@ -1020,75 +987,72 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
* This is to be done only for Hybrid PMU mode.
* with the current h/w there is no way to detect this.
* and since we dont have internal PMU lets do it under UMA-B chip id.
- * we will change this when we will have internal PMU.
- */
- if(Adapter->PmuMode == HYBRID_MODE_7C)
- {
+ * we will change this when we will have internal PMU.
+ */
+ if (Adapter->PmuMode == HYBRID_MODE_7C) {
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x1322a8;
retval = wrmalt(Adapter, (UINT)0x0f000d1c, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x132296;
retval = wrmalt(Adapter, (UINT)0x0f000d14, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
- }
- else if(Adapter->PmuMode == HYBRID_MODE_6 )
- {
+ } else if (Adapter->PmuMode == HYBRID_MODE_6) {
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x6003229a;
retval = wrmalt(Adapter, (UINT)0x0f000d14, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x1322a8;
retval = wrmalt(Adapter, (UINT)0x0f000d1c, &uiResetValue, sizeof(uiResetValue));
- if(retval < 0) {
+ if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
@@ -1101,179 +1065,167 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
int download_ddr_settings(struct bcm_mini_adapter *Adapter)
{
- struct bcm_ddr_setting *psDDRSetting=NULL;
- ULONG RegCount=0;
+ struct bcm_ddr_setting *psDDRSetting = NULL;
+ ULONG RegCount = 0;
unsigned long ul_ddr_setting_load_addr = DDR_DUMP_INTERNAL_DEVICE_MEMORY;
UINT value = 0;
int retval = STATUS_SUCCESS;
bool bOverrideSelfRefresh = false;
- switch (Adapter->chip_id)
- {
+ switch (Adapter->chip_id) {
case 0xbece3200:
- switch (Adapter->DDRSetting)
- {
- case DDR_80_MHZ:
- psDDRSetting = asT3LP_DDRSetting80MHz;
- RegCount = ARRAY_SIZE(asT3LP_DDRSetting80MHz);
- RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ ;
- psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ switch (Adapter->DDRSetting) {
+ case DDR_80_MHZ:
+ psDDRSetting = asT3LP_DDRSetting80MHz;
+ RegCount = ARRAY_SIZE(asT3LP_DDRSetting80MHz);
+ RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ break;
+ case DDR_100_MHZ:
+ psDDRSetting = asT3LP_DDRSetting100MHz;
+ RegCount = ARRAY_SIZE(asT3LP_DDRSetting100MHz);
+ RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
+ psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
+ break;
+ case DDR_133_MHZ:
+ bOverrideSelfRefresh = TRUE;
+ psDDRSetting = asT3LP_DDRSetting133MHz;
+ RegCount = ARRAY_SIZE(asT3LP_DDRSetting133MHz);
+ RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
+ psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
break;
- case DDR_100_MHZ:
- psDDRSetting = asT3LP_DDRSetting100MHz;
- RegCount = ARRAY_SIZE(asT3LP_DDRSetting100MHz);
- RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ ;
- psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- break;
- case DDR_133_MHZ:
- bOverrideSelfRefresh = TRUE;
- psDDRSetting = asT3LP_DDRSetting133MHz;
- RegCount = ARRAY_SIZE(asT3LP_DDRSetting133MHz);
- RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ ;
- psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- break;
default:
return -EINVAL;
- }
+ }
break;
case T3LPB:
case BCS220_2:
case BCS220_2BC:
case BCS250_BC:
- case BCS220_3 :
- switch (Adapter->DDRSetting)
- {
- case DDR_80_MHZ:
- psDDRSetting = asT3LPB_DDRSetting80MHz;
- RegCount=ARRAY_SIZE(asT3LPB_DDRSetting80MHz);
- RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ ;
- psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ case BCS220_3:
+ switch (Adapter->DDRSetting) {
+ case DDR_80_MHZ:
+ psDDRSetting = asT3LPB_DDRSetting80MHz;
+ RegCount = ARRAY_SIZE(asT3LPB_DDRSetting80MHz);
+ RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ break;
+ case DDR_100_MHZ:
+ psDDRSetting = asT3LPB_DDRSetting100MHz;
+ RegCount = ARRAY_SIZE(asT3LPB_DDRSetting100MHz);
+ RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
+ psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
+ break;
+ case DDR_133_MHZ:
+ bOverrideSelfRefresh = TRUE;
+ psDDRSetting = asT3LPB_DDRSetting133MHz;
+ RegCount = ARRAY_SIZE(asT3LPB_DDRSetting133MHz);
+ RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
+ psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
break;
- case DDR_100_MHZ:
- psDDRSetting = asT3LPB_DDRSetting100MHz;
- RegCount = ARRAY_SIZE(asT3LPB_DDRSetting100MHz);
- RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ ;
- psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- break;
- case DDR_133_MHZ:
- bOverrideSelfRefresh = TRUE;
- psDDRSetting = asT3LPB_DDRSetting133MHz;
- RegCount = ARRAY_SIZE(asT3LPB_DDRSetting133MHz);
- RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ ;
- psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- break;
- case DDR_160_MHZ:
- bOverrideSelfRefresh = TRUE;
- psDDRSetting = asT3LPB_DDRSetting160MHz;
- RegCount = ARRAY_SIZE(asT3LPB_DDRSetting160MHz);
- RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ;
- psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ;
+ case DDR_160_MHZ:
+ bOverrideSelfRefresh = TRUE;
+ psDDRSetting = asT3LPB_DDRSetting160MHz;
+ RegCount = ARRAY_SIZE(asT3LPB_DDRSetting160MHz);
+ RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ;
+ psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ;
- break;
- default:
- return -EINVAL;
- }
+ break;
+ default:
+ return -EINVAL;
+ }
break;
case 0xbece0300:
- switch (Adapter->DDRSetting)
- {
- case DDR_80_MHZ:
- psDDRSetting = asT3_DDRSetting80MHz;
- RegCount = ARRAY_SIZE(asT3_DDRSetting80MHz);
- RegCount-=T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ ;
- psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ switch (Adapter->DDRSetting) {
+ case DDR_80_MHZ:
+ psDDRSetting = asT3_DDRSetting80MHz;
+ RegCount = ARRAY_SIZE(asT3_DDRSetting80MHz);
+ RegCount -= T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
break;
- case DDR_100_MHZ:
- psDDRSetting = asT3_DDRSetting100MHz;
- RegCount = ARRAY_SIZE(asT3_DDRSetting100MHz);
- RegCount-=T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ ;
- psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- break;
- case DDR_133_MHZ:
- psDDRSetting = asT3_DDRSetting133MHz;
- RegCount = ARRAY_SIZE(asT3_DDRSetting133MHz);
- RegCount-=T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ ;
- psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ ;
- break;
- default:
- return -EINVAL;
- }
+ case DDR_100_MHZ:
+ psDDRSetting = asT3_DDRSetting100MHz;
+ RegCount = ARRAY_SIZE(asT3_DDRSetting100MHz);
+ RegCount -= T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
+ psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
+ break;
+ case DDR_133_MHZ:
+ psDDRSetting = asT3_DDRSetting133MHz;
+ RegCount = ARRAY_SIZE(asT3_DDRSetting133MHz);
+ RegCount -= T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
+ psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
+ break;
+ default:
+ return -EINVAL;
+ }
break;
case 0xbece0310:
{
- switch (Adapter->DDRSetting)
- {
- case DDR_80_MHZ:
- psDDRSetting = asT3B_DDRSetting80MHz;
- RegCount = ARRAY_SIZE(asT3B_DDRSetting80MHz);
- RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ ;
- psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3B_DDRSetting100MHz;
- RegCount = ARRAY_SIZE(asT3B_DDRSetting100MHz);
- RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ ;
- psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- break;
- case DDR_133_MHZ:
- bOverrideSelfRefresh = TRUE;
- psDDRSetting = asT3B_DDRSetting133MHz;
- RegCount = ARRAY_SIZE(asT3B_DDRSetting133MHz);
- RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ ;
- psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- break;
- }
- break;
+ switch (Adapter->DDRSetting) {
+ case DDR_80_MHZ:
+ psDDRSetting = asT3B_DDRSetting80MHz;
+ RegCount = ARRAY_SIZE(asT3B_DDRSetting80MHz);
+ RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
+ break;
+ case DDR_100_MHZ:
+ psDDRSetting = asT3B_DDRSetting100MHz;
+ RegCount = ARRAY_SIZE(asT3B_DDRSetting100MHz);
+ RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
+ psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
+ break;
+ case DDR_133_MHZ:
+ bOverrideSelfRefresh = TRUE;
+ psDDRSetting = asT3B_DDRSetting133MHz;
+ RegCount = ARRAY_SIZE(asT3B_DDRSetting133MHz);
+ RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
+ psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
+ break;
+ }
+ break;
}
default:
return -EINVAL;
}
- //total number of Register that has to be dumped
- value =RegCount ;
+ /* total number of Register that has to be dumped */
+ value = RegCount;
retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value, sizeof(value));
- if(retval)
- {
+ if (retval) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
return retval;
}
ul_ddr_setting_load_addr += sizeof(ULONG);
- /*signature */
- value =(0x1d1e0dd0);
+ /* signature */
+ value = (0x1d1e0dd0);
retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value, sizeof(value));
- if(retval)
- {
+ if (retval) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
return retval;
}
ul_ddr_setting_load_addr += sizeof(ULONG);
- RegCount*=(sizeof(struct bcm_ddr_setting)/sizeof(ULONG));
+ RegCount *= (sizeof(struct bcm_ddr_setting)/sizeof(ULONG));
- while(RegCount && !retval)
- {
- value = psDDRSetting->ulRegAddress ;
- retval = wrmalt( Adapter, ul_ddr_setting_load_addr, &value, sizeof(value));
+ while (RegCount && !retval) {
+ value = psDDRSetting->ulRegAddress;
+ retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value, sizeof(value));
ul_ddr_setting_load_addr += sizeof(ULONG);
- if(!retval)
- {
- if(bOverrideSelfRefresh && (psDDRSetting->ulRegAddress == 0x0F007018))
- {
+ if (!retval) {
+ if (bOverrideSelfRefresh && (psDDRSetting->ulRegAddress == 0x0F007018)) {
value = (psDDRSetting->ulRegValue |(1<<8));
- if(STATUS_SUCCESS != wrmalt(Adapter, ul_ddr_setting_load_addr,
- &value, sizeof(value))){
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
- break;
- }
+ if (STATUS_SUCCESS != wrmalt(Adapter, ul_ddr_setting_load_addr,
+ &value, sizeof(value))) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
+ break;
}
- else
- {
+ } else {
value = psDDRSetting->ulRegValue;
- if(STATUS_SUCCESS != wrmalt(Adapter, ul_ddr_setting_load_addr ,
- &value, sizeof(value))){
+ if (STATUS_SUCCESS != wrmalt(Adapter, ul_ddr_setting_load_addr ,
+ &value, sizeof(value))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
break;
}
@@ -1283,7 +1235,5 @@ int download_ddr_settings(struct bcm_mini_adapter *Adapter)
RegCount--;
psDDRSetting++;
}
- return retval;
+ return retval;
}
-
-
diff --git a/drivers/staging/bcm/InterfaceDld.c b/drivers/staging/bcm/InterfaceDld.c
index 463bdee9dfca..005e4607b260 100644
--- a/drivers/staging/bcm/InterfaceDld.c
+++ b/drivers/staging/bcm/InterfaceDld.c
@@ -20,18 +20,10 @@ int InterfaceFileDownload(PVOID arg, struct file *flp, unsigned int on_chip_loc)
MAX_TRANSFER_CTRL_BYTE_USB, &pos);
set_fs(oldfs);
if (len <= 0) {
- if (len < 0) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_INITEXIT, MP_INIT,
- DBG_LVL_ALL, "len < 0");
+ if (len < 0)
errno = len;
- } else {
+ else
errno = 0;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_INITEXIT, MP_INIT,
- DBG_LVL_ALL,
- "Got end of file!");
- }
break;
}
/* BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_INITEXIT, MP_INIT,
@@ -39,12 +31,8 @@ int InterfaceFileDownload(PVOID arg, struct file *flp, unsigned int on_chip_loc)
* MAX_TRANSFER_CTRL_BYTE_USB);
*/
errno = InterfaceWRM(psIntfAdapter, on_chip_loc, buff, len);
- if (errno) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_PRINTK, 0, 0,
- "WRM Failed! status: %d", errno);
+ if (errno)
break;
- }
on_chip_loc += MAX_TRANSFER_CTRL_BYTE_USB;
}
@@ -52,7 +40,8 @@ int InterfaceFileDownload(PVOID arg, struct file *flp, unsigned int on_chip_loc)
return errno;
}
-int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_chip_loc)
+int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp,
+ unsigned int on_chip_loc)
{
char *buff, *buff_readback;
unsigned int reg = 0;
@@ -80,32 +69,28 @@ int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_c
while (1) {
oldfs = get_fs();
set_fs(get_ds());
- len = vfs_read(flp, (void __force __user *)buff, MAX_TRANSFER_CTRL_BYTE_USB, &pos);
+ len = vfs_read(flp, (void __force __user *)buff,
+ MAX_TRANSFER_CTRL_BYTE_USB, &pos);
set_fs(oldfs);
fw_down++;
if (len <= 0) {
- if (len < 0) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len < 0");
+ if (len < 0)
errno = len;
- } else {
+ else
errno = 0;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Got end of file!");
- }
break;
}
- bytes = InterfaceRDM(psIntfAdapter, on_chip_loc, buff_readback, len);
+ bytes = InterfaceRDM(psIntfAdapter, on_chip_loc,
+ buff_readback, len);
if (bytes < 0) {
Status = bytes;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "RDM of len %d Failed! %d", len, reg);
goto exit;
}
reg++;
if ((len-sizeof(unsigned int)) < 4) {
if (memcmp(buff_readback, buff, len)) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Firmware Download is not proper %d", fw_down);
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Length is: %d", len);
Status = -EIO;
goto exit;
}
@@ -113,10 +98,8 @@ int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_c
len -= 4;
while (len) {
- if (*(unsigned int *)&buff_readback[len] != *(unsigned int *)&buff[len]) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Firmware Download is not proper %d", fw_down);
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Val from Binary %x, Val From Read Back %x ", *(unsigned int *)&buff[len], *(unsigned int*)&buff_readback[len]);
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len =%x!!!", len);
+ if (*(unsigned int *)&buff_readback[len] !=
+ *(unsigned int *)&buff[len]) {
Status = -EIO;
goto exit;
}
@@ -132,13 +115,15 @@ exit:
return Status;
}
-static int bcm_download_config_file(struct bcm_mini_adapter *Adapter, struct bcm_firmware_info *psFwInfo)
+static int bcm_download_config_file(struct bcm_mini_adapter *Adapter,
+ struct bcm_firmware_info *psFwInfo)
{
int retval = STATUS_SUCCESS;
B_UINT32 value = 0;
if (Adapter->pstargetparams == NULL) {
- Adapter->pstargetparams = kmalloc(sizeof(struct bcm_target_params), GFP_KERNEL);
+ Adapter->pstargetparams =
+ kmalloc(sizeof(struct bcm_target_params), GFP_KERNEL);
if (Adapter->pstargetparams == NULL)
return -ENOMEM;
}
@@ -146,7 +131,9 @@ static int bcm_download_config_file(struct bcm_mini_adapter *Adapter, struct bcm
if (psFwInfo->u32FirmwareLength != sizeof(struct bcm_target_params))
return -EIO;
- retval = copy_from_user(Adapter->pstargetparams, psFwInfo->pvMappedFirmwareAddress, psFwInfo->u32FirmwareLength);
+ retval = copy_from_user(Adapter->pstargetparams,
+ psFwInfo->pvMappedFirmwareAddress,
+ psFwInfo->u32FirmwareLength);
if (retval) {
kfree(Adapter->pstargetparams);
Adapter->pstargetparams = NULL;
@@ -160,52 +147,54 @@ static int bcm_download_config_file(struct bcm_mini_adapter *Adapter, struct bcm
BcmInitNVM(Adapter);
retval = InitLedSettings(Adapter);
- if (retval) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "INIT LED Failed\n");
+ if (retval)
return retval;
- }
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ if (Adapter->LEDInfo.led_thread_running &
+ BCM_LED_THREAD_RUNNING_ACTIVELY) {
Adapter->LEDInfo.bLedInitDone = false;
Adapter->DriverState = DRIVER_INIT;
wake_up(&Adapter->LEDInfo.notify_led_event);
}
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ if (Adapter->LEDInfo.led_thread_running &
+ BCM_LED_THREAD_RUNNING_ACTIVELY) {
Adapter->DriverState = FW_DOWNLOAD;
wake_up(&Adapter->LEDInfo.notify_led_event);
}
/* Initialize the DDR Controller */
retval = ddr_init(Adapter);
- if (retval) {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "DDR Init Failed\n");
+ if (retval)
return retval;
- }
value = 0;
- wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, &value, sizeof(value));
- wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, &value, sizeof(value));
+ wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4,
+ &value, sizeof(value));
+ wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8,
+ &value, sizeof(value));
if (Adapter->eNVMType == NVM_FLASH) {
retval = PropagateCalParamsFromFlashToMemory(Adapter);
- if (retval) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "propagaion of cal param failed with status :%d", retval);
+ if (retval)
return retval;
- }
}
- retval = buffDnldVerify(Adapter, (PUCHAR)Adapter->pstargetparams, sizeof(struct bcm_target_params), CONFIG_BEGIN_ADDR);
+ retval = buffDnldVerify(Adapter, (PUCHAR)Adapter->pstargetparams,
+ sizeof(struct bcm_target_params), CONFIG_BEGIN_ADDR);
if (retval)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "configuration file not downloaded properly");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT,
+ MP_INIT, DBG_LVL_ALL,
+ "configuration file not downloaded properly");
else
Adapter->bCfgDownloaded = TRUE;
return retval;
}
-int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, struct bcm_firmware_info *psFwInfo)
+int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter,
+ struct bcm_firmware_info *psFwInfo)
{
int retval = STATUS_SUCCESS;
PUCHAR buff = NULL;
@@ -215,9 +204,9 @@ int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, struct bcm_firmware_
* Application
*/
atomic_set(&Adapter->uiMBupdate, false);
- if (!Adapter->bCfgDownloaded && psFwInfo->u32StartingAddress != CONFIG_BEGIN_ADDR) {
+ if (!Adapter->bCfgDownloaded &&
+ psFwInfo->u32StartingAddress != CONFIG_BEGIN_ADDR) {
/* Can't Download Firmware. */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Download the config File first\n");
return -EINVAL;
}
@@ -226,14 +215,13 @@ int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, struct bcm_firmware_
retval = bcm_download_config_file(Adapter, psFwInfo);
} else {
buff = kzalloc(psFwInfo->u32FirmwareLength, GFP_KERNEL);
- if (buff == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed in allocation memory");
+ if (buff == NULL)
return -ENOMEM;
- }
- retval = copy_from_user(buff, psFwInfo->pvMappedFirmwareAddress, psFwInfo->u32FirmwareLength);
+ retval = copy_from_user(buff,
+ psFwInfo->pvMappedFirmwareAddress,
+ psFwInfo->u32FirmwareLength);
if (retval != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "copying buffer from user space failed");
retval = -EFAULT;
goto error;
}
@@ -243,10 +231,8 @@ int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, struct bcm_firmware_
psFwInfo->u32FirmwareLength,
psFwInfo->u32StartingAddress);
- if (retval != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "f/w download failed status :%d", retval);
+ if (retval != STATUS_SUCCESS)
goto error;
- }
}
error:
@@ -254,7 +240,9 @@ error:
return retval;
}
-static INT buffDnld(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer, UINT u32FirmwareLength, ULONG u32StartingAddress)
+static INT buffDnld(struct bcm_mini_adapter *Adapter,
+ PUCHAR mappedbuffer, UINT u32FirmwareLength,
+ ULONG u32StartingAddress)
{
unsigned int len = 0;
int retval = STATUS_SUCCESS;
@@ -264,10 +252,8 @@ static INT buffDnld(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer, UINT
len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
retval = wrm(Adapter, u32StartingAddress, mappedbuffer, len);
- if (retval) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "wrm failed with status :%d", retval);
+ if (retval)
break;
- }
u32StartingAddress += len;
u32FirmwareLength -= len;
mappedbuffer += len;
@@ -275,17 +261,17 @@ static INT buffDnld(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer, UINT
return retval;
}
-static INT buffRdbkVerify(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer, UINT u32FirmwareLength, ULONG u32StartingAddress)
+static INT buffRdbkVerify(struct bcm_mini_adapter *Adapter,
+ PUCHAR mappedbuffer, UINT u32FirmwareLength,
+ ULONG u32StartingAddress)
{
UINT len = u32FirmwareLength;
INT retval = STATUS_SUCCESS;
PUCHAR readbackbuff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
int bytes;
- if (NULL == readbackbuff) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "MEMORY ALLOCATION FAILED");
+ if (NULL == readbackbuff)
return -ENOMEM;
- }
while (u32FirmwareLength && !retval) {
len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
@@ -293,7 +279,6 @@ static INT buffRdbkVerify(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer,
if (bytes < 0) {
retval = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "rdm failed with status %d", retval);
break;
}
@@ -312,21 +297,22 @@ static INT buffRdbkVerify(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer,
return retval;
}
-INT buffDnldVerify(struct bcm_mini_adapter *Adapter, unsigned char *mappedbuffer, unsigned int u32FirmwareLength, unsigned long u32StartingAddress)
+INT buffDnldVerify(struct bcm_mini_adapter *Adapter,
+ unsigned char *mappedbuffer,
+ unsigned int u32FirmwareLength,
+ unsigned long u32StartingAddress)
{
INT status = STATUS_SUCCESS;
- status = buffDnld(Adapter, mappedbuffer, u32FirmwareLength, u32StartingAddress);
- if (status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Buffer download failed");
+ status = buffDnld(Adapter, mappedbuffer,
+ u32FirmwareLength, u32StartingAddress);
+ if (status != STATUS_SUCCESS)
goto error;
- }
- status = buffRdbkVerify(Adapter, mappedbuffer, u32FirmwareLength, u32StartingAddress);
- if (status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Buffer readback verifier failed");
+ status = buffRdbkVerify(Adapter, mappedbuffer,
+ u32FirmwareLength, u32StartingAddress);
+ if (status != STATUS_SUCCESS)
goto error;
- }
error:
return status;
}
diff --git a/drivers/staging/bcm/InterfaceIdleMode.c b/drivers/staging/bcm/InterfaceIdleMode.c
index 5959fbdcd1be..fecf81ffe066 100644
--- a/drivers/staging/bcm/InterfaceIdleMode.c
+++ b/drivers/staging/bcm/InterfaceIdleMode.c
@@ -1,32 +1,37 @@
#include "headers.h"
/*
-Function: InterfaceIdleModeWakeup
+Function: InterfaceIdleModeWakeup
-Description: This is the hardware specific Function for waking up HW device from Idle mode.
- A software abort pattern is written to the device to wake it and necessary power state
- transitions from host are performed here.
+Description: This is the hardware specific Function for
+ waking up HW device from Idle mode.
+ A software abort pattern is written to the
+ device to wake it and necessary power state
+ transitions from host are performed here.
-Input parameters: IN struct bcm_mini_adapter *Adapter - Miniport Adapter Context
+Input parameters: IN struct bcm_mini_adapter *Adapter
+ - Miniport Adapter Context
-
-Return: BCM_STATUS_SUCCESS - If Wakeup of the HW Interface was successful.
- Other - If an error occurred.
+Return: BCM_STATUS_SUCCESS - If Wakeup of the HW Interface
+ was successful.
+ Other - If an error occurred.
*/
-
/*
-Function: InterfaceIdleModeRespond
+Function: InterfaceIdleModeRespond
-Description: This is the hardware specific Function for responding to Idle mode request from target.
- Necessary power state transitions from host for idle mode or other device specific
- initializations are performed here.
+Description: This is the hardware specific Function for
+ responding to Idle mode request from target.
+ Necessary power state transitions from host for
+ idle mode or other device specific initializations
+ are performed here.
-Input parameters: IN struct bcm_mini_adapter * Adapter - Miniport Adapter Context
+Input parameters: IN struct bcm_mini_adapter * Adapter
+ - Miniport Adapter Context
-
-Return: BCM_STATUS_SUCCESS - If Idle mode response related HW configuration was successful.
- Other - If an error occurred.
+Return: BCM_STATUS_SUCCESS - If Idle mode response related
+ HW configuration was successful.
+ Other - If an error occurred.
*/
/*
@@ -36,59 +41,59 @@ this value will be at address bfc02fa4.just before value d0ea1dle.
Set time value by writing at bfc02f98 7d0
checking the Ack timer expire on kannon by running command
-d qcslog .. if it shows e means host has not send response to f/w with in 200 ms. Response should be
+d qcslog .. if it shows e means host has not send response
+to f/w with in 200 ms. Response should be
send to f/w with in 200 ms after the Idle/Shutdown req issued
*/
-int InterfaceIdleModeRespond(struct bcm_mini_adapter *Adapter, unsigned int *puiBuffer)
+int InterfaceIdleModeRespond(struct bcm_mini_adapter *Adapter,
+ unsigned int *puiBuffer)
{
int status = STATUS_SUCCESS;
unsigned int uiRegRead = 0;
int bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "SubType of Message :0x%X", ntohl(*puiBuffer));
-
if (ntohl(*puiBuffer) == GO_TO_IDLE_MODE_PAYLOAD) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, " Got GO_TO_IDLE_MODE_PAYLOAD(210) Msg Subtype");
- if (ntohl(*(puiBuffer+1)) == 0 ) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Got IDLE MODE WAKE UP Response From F/W");
+ if (ntohl(*(puiBuffer+1)) == 0) {
- status = wrmalt (Adapter, SW_ABORT_IDLEMODE_LOC, &uiRegRead, sizeof(uiRegRead));
- if (status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "wrm failed while clearing Idle Mode Reg");
+ status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC,
+ &uiRegRead, sizeof(uiRegRead));
+ if (status)
return status;
- }
- if (Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
- uiRegRead = 0x00000000 ;
- status = wrmalt (Adapter, DEBUG_INTERRUPT_GENERATOR_REGISTOR, &uiRegRead, sizeof(uiRegRead));
- if (status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "wrm failed while clearing Idle Mode Reg");
+ if (Adapter->ulPowerSaveMode ==
+ DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
+ uiRegRead = 0x00000000;
+ status = wrmalt(Adapter,
+ DEBUG_INTERRUPT_GENERATOR_REGISTOR,
+ &uiRegRead, sizeof(uiRegRead));
+ if (status)
return status;
- }
}
- /* Below Register should not br read in case of Manual and Protocol Idle mode */
- else if (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) {
+ /* Below Register should not br read in case of
+ * Manual and Protocol Idle mode */
+ else if (Adapter->ulPowerSaveMode !=
+ DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) {
/* clear on read Register */
- bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0, &uiRegRead, sizeof(uiRegRead));
+ bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0,
+ &uiRegRead, sizeof(uiRegRead));
if (bytes < 0) {
status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm failed while clearing H/W Abort Reg0");
return status;
}
/* clear on read Register */
- bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG1, &uiRegRead, sizeof(uiRegRead));
+ bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG1,
+ &uiRegRead, sizeof(uiRegRead));
if (bytes < 0) {
status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm failed while clearing H/W Abort Reg1");
return status;
}
}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Device Up from Idle Mode");
- /* Set Idle Mode Flag to False and Clear IdleMode reg. */
+ /* Set Idle Mode Flag to False and
+ * Clear IdleMode reg. */
Adapter->IdleMode = false;
Adapter->bTriedToWakeUpFromlowPowerMode = false;
@@ -96,124 +101,123 @@ int InterfaceIdleModeRespond(struct bcm_mini_adapter *Adapter, unsigned int *pui
} else {
if (TRUE == Adapter->IdleMode)
- {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Device is already in Idle mode....");
- return status ;
- }
+ return status;
uiRegRead = 0;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Got Req from F/W to go in IDLE mode \n");
if (Adapter->chip_id == BCS220_2 ||
Adapter->chip_id == BCS220_2BC ||
Adapter->chip_id == BCS250_BC ||
Adapter->chip_id == BCS220_3) {
- bytes = rdmalt(Adapter, HPM_CONFIG_MSW, &uiRegRead, sizeof(uiRegRead));
+ bytes = rdmalt(Adapter, HPM_CONFIG_MSW,
+ &uiRegRead, sizeof(uiRegRead));
if (bytes < 0) {
status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "rdm failed while Reading HPM_CONFIG_LDO145 Reg 0\n");
return status;
}
uiRegRead |= (1<<17);
- status = wrmalt (Adapter, HPM_CONFIG_MSW, &uiRegRead, sizeof(uiRegRead));
- if (status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "wrm failed while clearing Idle Mode Reg\n");
+ status = wrmalt(Adapter, HPM_CONFIG_MSW,
+ &uiRegRead, sizeof(uiRegRead));
+ if (status)
return status;
- }
-
}
SendIdleModeResponse(Adapter);
}
} else if (ntohl(*puiBuffer) == IDLE_MODE_SF_UPDATE_MSG) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "OverRiding Service Flow Params");
OverrideServiceFlowParams(Adapter, puiBuffer);
}
return status;
}
-static int InterfaceAbortIdlemode(struct bcm_mini_adapter *Adapter, unsigned int Pattern)
+static int InterfaceAbortIdlemode(struct bcm_mini_adapter *Adapter,
+ unsigned int Pattern)
{
- int status = STATUS_SUCCESS;
+ int status = STATUS_SUCCESS;
unsigned int value;
- unsigned int chip_id ;
+ unsigned int chip_id;
unsigned long timeout = 0, itr = 0;
- int lenwritten = 0;
- unsigned char aucAbortPattern[8] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
- struct bcm_interface_adapter *psInterfaceAdapter = Adapter->pvInterfaceAdapter;
+ int lenwritten = 0;
+ unsigned char aucAbortPattern[8] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF};
+ struct bcm_interface_adapter *psInterfaceAdapter =
+ Adapter->pvInterfaceAdapter;
/* Abort Bus suspend if its already suspended */
- if ((TRUE == psInterfaceAdapter->bSuspended) && (TRUE == Adapter->bDoSuspend)) {
- status = usb_autopm_get_interface(psInterfaceAdapter->interface);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Bus got wakeup..Aborting Idle mode... status:%d \n", status);
-
- }
-
- if ((Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING)
- ||
- (Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)) {
+ if ((TRUE == psInterfaceAdapter->bSuspended) &&
+ (TRUE == Adapter->bDoSuspend))
+ status = usb_autopm_get_interface(
+ psInterfaceAdapter->interface);
+
+ if ((Adapter->ulPowerSaveMode ==
+ DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) ||
+ (Adapter->ulPowerSaveMode ==
+ DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)) {
/* write the SW abort pattern. */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Writing pattern<%d> to SW_ABORT_IDLEMODE_LOC\n", Pattern);
- status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC, &Pattern, sizeof(Pattern));
- if (status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "WRM to Register SW_ABORT_IDLEMODE_LOC failed..");
- return status;
- }
+ status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC,
+ &Pattern, sizeof(Pattern));
+ if (status)
+ return status;
}
- if (Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
+ if (Adapter->ulPowerSaveMode ==
+ DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
value = 0x80000000;
- status = wrmalt(Adapter, DEBUG_INTERRUPT_GENERATOR_REGISTOR, &value, sizeof(value));
+ status = wrmalt(Adapter,
+ DEBUG_INTERRUPT_GENERATOR_REGISTOR,
+ &value, sizeof(value));
if (status)
- {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "WRM to DEBUG_INTERRUPT_GENERATOR_REGISTOR Register failed");
return status;
- }
- } else if (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) {
+ } else if (Adapter->ulPowerSaveMode !=
+ DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) {
/*
* Get a Interrupt Out URB and send 8 Bytes Down
* To be Done in Thread Context.
* Not using Asynchronous Mechanism.
*/
- status = usb_interrupt_msg (psInterfaceAdapter->udev,
+ status = usb_interrupt_msg(psInterfaceAdapter->udev,
usb_sndintpipe(psInterfaceAdapter->udev,
psInterfaceAdapter->sIntrOut.int_out_endpointAddr),
aucAbortPattern,
8,
&lenwritten,
5000);
- if (status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Sending Abort pattern down fails with status:%d..\n", status);
+ if (status)
return status;
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "NOB Sent down :%d", lenwritten);
- }
+ else
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ IDLE_MODE, DBG_LVL_ALL,
+ "NOB Sent down :%d", lenwritten);
/* mdelay(25); */
- timeout = jiffies + msecs_to_jiffies(50) ;
- while ( timeout > jiffies ) {
- itr++ ;
+ timeout = jiffies + msecs_to_jiffies(50);
+ while (time_after(timeout, jiffies)) {
+ itr++;
rdmalt(Adapter, CHIP_ID_REG, &chip_id, sizeof(UINT));
if (0xbece3200 == (chip_id&~(0xF0)))
chip_id = chip_id&~(0xF0);
if (chip_id == Adapter->chip_id)
break;
}
- if (timeout < jiffies )
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Not able to read chip-id even after 25 msec");
+ if (time_before(timeout, jiffies))
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ IDLE_MODE, DBG_LVL_ALL,
+ "Not able to read chip-id even after 25 msec");
else
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Number of completed iteration to read chip-id :%lu", itr);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ IDLE_MODE, DBG_LVL_ALL,
+ "Number of completed iteration to"
+ "read chip-id :%lu", itr);
- status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC, &Pattern, sizeof(status));
- if (status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM to Register SW_ABORT_IDLEMODE_LOC failed..");
+ status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC,
+ &Pattern, sizeof(status));
+ if (status)
return status;
- }
}
return status;
}
@@ -221,9 +225,10 @@ int InterfaceIdleModeWakeup(struct bcm_mini_adapter *Adapter)
{
ULONG Status = 0;
if (Adapter->bTriedToWakeUpFromlowPowerMode) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Wake up already attempted.. ignoring\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ IDLE_MODE, DBG_LVL_ALL,
+ "Wake up already attempted.. ignoring\n");
} else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Writing Low Power Mode Abort pattern to the Device\n");
Adapter->bTriedToWakeUpFromlowPowerMode = TRUE;
InterfaceAbortIdlemode(Adapter, Adapter->usIdleModePattern);
@@ -237,30 +242,33 @@ void InterfaceHandleShutdownModeWakeup(struct bcm_mini_adapter *Adapter)
INT Status = 0;
int bytes;
- if (Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
+ if (Adapter->ulPowerSaveMode ==
+ DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
/* clear idlemode interrupt. */
uiRegVal = 0;
- Status = wrmalt(Adapter, DEBUG_INTERRUPT_GENERATOR_REGISTOR, &uiRegVal, sizeof(uiRegVal));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,"WRM to DEBUG_INTERRUPT_GENERATOR_REGISTOR Failed with err :%d", Status);
+ Status = wrmalt(Adapter,
+ DEBUG_INTERRUPT_GENERATOR_REGISTOR,
+ &uiRegVal, sizeof(uiRegVal));
+ if (Status)
return;
- }
}
- else {
+ else {
- /* clear Interrupt EP registers. */
- bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0, &uiRegVal, sizeof(uiRegVal));
+/* clear Interrupt EP registers. */
+ bytes = rdmalt(Adapter,
+ DEVICE_INT_OUT_EP_REG0,
+ &uiRegVal, sizeof(uiRegVal));
if (bytes < 0) {
Status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM of DEVICE_INT_OUT_EP_REG0 failed with Err :%d", Status);
return;
}
- bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG1, &uiRegVal, sizeof(uiRegVal));
+ bytes = rdmalt(Adapter,
+ DEVICE_INT_OUT_EP_REG1,
+ &uiRegVal, sizeof(uiRegVal));
if (bytes < 0) {
Status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM of DEVICE_INT_OUT_EP_REG1 failed with Err :%d", Status);
return;
}
}
diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c
index 3acdb58a10f5..94f32728f7c8 100644
--- a/drivers/staging/bcm/InterfaceInit.c
+++ b/drivers/staging/bcm/InterfaceInit.c
@@ -69,7 +69,7 @@ static void InterfaceAdapterFree(struct bcm_interface_adapter *psIntfAdapter)
static void ConfigureEndPointTypesThroughEEPROM(struct bcm_mini_adapter *Adapter)
{
- unsigned long ulReg = 0;
+ u32 ulReg;
int bytes;
/* Program EP2 MAX_PKT_SIZE */
@@ -96,7 +96,7 @@ static void ConfigureEndPointTypesThroughEEPROM(struct bcm_mini_adapter *Adapter
BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x140, 4, TRUE);
/* Program TX EP as interrupt(Alternate Setting) */
- bytes = rdmalt(Adapter, 0x0F0110F8, (u32 *)&ulReg, sizeof(u32));
+ bytes = rdmalt(Adapter, 0x0F0110F8, &ulReg, sizeof(u32));
if (bytes < 0) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
"reading of Tx EP failed\n");
@@ -119,18 +119,18 @@ static void ConfigureEndPointTypesThroughEEPROM(struct bcm_mini_adapter *Adapter
* Update EEPROM Version.
* Read 4 bytes from 508 and modify 511 and 510.
*/
- ReadBeceemEEPROM(Adapter, 0x1FC, (PUINT)&ulReg);
+ ReadBeceemEEPROM(Adapter, 0x1FC, &ulReg);
ulReg &= 0x0101FFFF;
BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1FC, 4, TRUE);
/* Update length field if required. Also make the string NULL terminated. */
- ReadBeceemEEPROM(Adapter, 0xA8, (PUINT)&ulReg);
+ ReadBeceemEEPROM(Adapter, 0xA8, &ulReg);
if ((ulReg&0x00FF0000)>>16 > 0x30) {
ulReg = (ulReg&0xFF00FFFF)|(0x30<<16);
BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0xA8, 4, TRUE);
}
- ReadBeceemEEPROM(Adapter, 0x148, (PUINT)&ulReg);
+ ReadBeceemEEPROM(Adapter, 0x148, &ulReg);
if ((ulReg&0x00FF0000)>>16 > 0x30) {
ulReg = (ulReg&0xFF00FFFF)|(0x30<<16);
BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x148, 4, TRUE);
diff --git a/drivers/staging/bcm/InterfaceRx.c b/drivers/staging/bcm/InterfaceRx.c
index f2973f5e503a..11008173f915 100644
--- a/drivers/staging/bcm/InterfaceRx.c
+++ b/drivers/staging/bcm/InterfaceRx.c
@@ -1,11 +1,11 @@
#include "headers.h"
-static int SearchVcid(struct bcm_mini_adapter *Adapter,unsigned short usVcid)
+static int SearchVcid(struct bcm_mini_adapter *Adapter, unsigned short usVcid)
{
- int iIndex=0;
+ int iIndex = 0;
- for(iIndex=(NO_OF_QUEUES-1);iIndex>=0;iIndex--)
- if(Adapter->PackInfo[iIndex].usVCID_Value == usVcid)
+ for (iIndex = (NO_OF_QUEUES-1); iIndex >= 0; iIndex--)
+ if (Adapter->PackInfo[iIndex].usVCID_Value == usVcid)
return iIndex;
return NO_OF_QUEUES+1;
@@ -18,15 +18,14 @@ GetBulkInRcb(struct bcm_interface_adapter *psIntfAdapter)
struct bcm_usb_rcb *pRcb = NULL;
UINT index = 0;
- if((atomic_read(&psIntfAdapter->uNumRcbUsed) < MAXIMUM_USB_RCB) &&
- (psIntfAdapter->psAdapter->StopAllXaction == false))
- {
+ if ((atomic_read(&psIntfAdapter->uNumRcbUsed) < MAXIMUM_USB_RCB) &&
+ (psIntfAdapter->psAdapter->StopAllXaction == false)) {
index = atomic_read(&psIntfAdapter->uCurrRcb);
pRcb = &psIntfAdapter->asUsbRcb[index];
pRcb->bUsed = TRUE;
- pRcb->psIntfAdapter= psIntfAdapter;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Got Rx desc %d used %d",
- index, atomic_read(&psIntfAdapter->uNumRcbUsed));
+ pRcb->psIntfAdapter = psIntfAdapter;
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Got Rx desc %d used %d",
+ index, atomic_read(&psIntfAdapter->uNumRcbUsed));
index = (index + 1) % MAXIMUM_USB_RCB;
atomic_set(&psIntfAdapter->uCurrRcb, index);
atomic_inc(&psIntfAdapter->uNumRcbUsed);
@@ -40,9 +39,8 @@ static void read_bulk_callback(struct urb *urb)
struct sk_buff *skb = NULL;
bool bHeaderSupressionEnabled = false;
int QueueIndex = NO_OF_QUEUES + 1;
- UINT uiIndex=0;
+ UINT uiIndex = 0;
int process_done = 1;
- //int idleflag = 0 ;
struct bcm_usb_rcb *pRcb = (struct bcm_usb_rcb *)urb->context;
struct bcm_interface_adapter *psIntfAdapter = pRcb->psIntfAdapter;
struct bcm_mini_adapter *Adapter = psIntfAdapter->psAdapter;
@@ -52,49 +50,40 @@ static void read_bulk_callback(struct urb *urb)
pr_info(PFX "%s: rx urb status %d length %d\n",
Adapter->dev->name, urb->status, urb->actual_length);
- if((Adapter->device_removed == TRUE) ||
- (TRUE == Adapter->bEndPointHalted) ||
- (0 == urb->actual_length)
- )
- {
- pRcb->bUsed = false;
- atomic_dec(&psIntfAdapter->uNumRcbUsed);
+ if ((Adapter->device_removed == TRUE) ||
+ (TRUE == Adapter->bEndPointHalted) ||
+ (0 == urb->actual_length)) {
+ pRcb->bUsed = false;
+ atomic_dec(&psIntfAdapter->uNumRcbUsed);
return;
}
- if(urb->status != STATUS_SUCCESS)
- {
- if(urb->status == -EPIPE)
- {
- Adapter->bEndPointHalted = TRUE ;
+ if (urb->status != STATUS_SUCCESS) {
+ if (urb->status == -EPIPE) {
+ Adapter->bEndPointHalted = TRUE;
wake_up(&Adapter->tx_packet_wait_queue);
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,"Rx URB has got cancelled. status :%d", urb->status);
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Rx URB has got cancelled. status :%d", urb->status);
}
pRcb->bUsed = false;
- atomic_dec(&psIntfAdapter->uNumRcbUsed);
- urb->status = STATUS_SUCCESS ;
- return ;
+ atomic_dec(&psIntfAdapter->uNumRcbUsed);
+ urb->status = STATUS_SUCCESS;
+ return;
}
- if(Adapter->bDoSuspend && (Adapter->bPreparingForLowPowerMode))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,"device is going in low power mode while PMU option selected..hence rx packet should not be process");
- return ;
+ if (Adapter->bDoSuspend && (Adapter->bPreparingForLowPowerMode)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "device is going in low power mode while PMU option selected..hence rx packet should not be process");
+ return;
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Read back done len %d\n", pLeader->PLength);
- if(!pLeader->PLength)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Leader Length 0");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Read back done len %d\n", pLeader->PLength);
+ if (!pLeader->PLength) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Leader Length 0");
atomic_dec(&psIntfAdapter->uNumRcbUsed);
return;
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Leader Status:0x%hX, Length:0x%hX, VCID:0x%hX", pLeader->Status,pLeader->PLength,pLeader->Vcid);
- if(MAX_CNTL_PKT_SIZE < pLeader->PLength)
- {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Leader Status:0x%hX, Length:0x%hX, VCID:0x%hX", pLeader->Status, pLeader->PLength, pLeader->Vcid);
+ if (MAX_CNTL_PKT_SIZE < pLeader->PLength) {
if (netif_msg_rx_err(Adapter))
pr_info(PFX "%s: corrupted leader length...%d\n",
Adapter->dev->name, pLeader->PLength);
@@ -103,65 +92,58 @@ static void read_bulk_callback(struct urb *urb)
return;
}
- QueueIndex = SearchVcid( Adapter,pLeader->Vcid);
- if(QueueIndex < NO_OF_QUEUES)
- {
+ QueueIndex = SearchVcid(Adapter, pLeader->Vcid);
+ if (QueueIndex < NO_OF_QUEUES) {
bHeaderSupressionEnabled =
Adapter->PackInfo[QueueIndex].bHeaderSuppressionEnabled;
bHeaderSupressionEnabled =
bHeaderSupressionEnabled & Adapter->bPHSEnabled;
}
- skb = dev_alloc_skb (pLeader->PLength + SKB_RESERVE_PHS_BYTES + SKB_RESERVE_ETHERNET_HEADER);//2 //2 for allignment
- if(!skb)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "NO SKBUFF!!! Dropping the Packet");
+ skb = dev_alloc_skb(pLeader->PLength + SKB_RESERVE_PHS_BYTES + SKB_RESERVE_ETHERNET_HEADER);
+ if (!skb) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "NO SKBUFF!!! Dropping the Packet");
atomic_dec(&psIntfAdapter->uNumRcbUsed);
return;
}
- /* If it is a control Packet, then call handle_bcm_packet ()*/
- if((ntohs(pLeader->Vcid) == VCID_CONTROL_PACKET) ||
- (!(pLeader->Status >= 0x20 && pLeader->Status <= 0x3F)))
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_CTRL, DBG_LVL_ALL, "Received control pkt...");
+ /* If it is a control Packet, then call handle_bcm_packet ()*/
+ if ((ntohs(pLeader->Vcid) == VCID_CONTROL_PACKET) ||
+ (!(pLeader->Status >= 0x20 && pLeader->Status <= 0x3F))) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_CTRL, DBG_LVL_ALL, "Received control pkt...");
*(PUSHORT)skb->data = pLeader->Status;
- memcpy(skb->data+sizeof(USHORT), urb->transfer_buffer +
- (sizeof(struct bcm_leader)), pLeader->PLength);
+ memcpy(skb->data+sizeof(USHORT), urb->transfer_buffer +
+ (sizeof(struct bcm_leader)), pLeader->PLength);
skb->len = pLeader->PLength + sizeof(USHORT);
spin_lock(&Adapter->control_queue_lock);
- ENQUEUEPACKET(Adapter->RxControlHead,Adapter->RxControlTail,skb);
+ ENQUEUEPACKET(Adapter->RxControlHead, Adapter->RxControlTail, skb);
spin_unlock(&Adapter->control_queue_lock);
atomic_inc(&Adapter->cntrlpktCnt);
wake_up(&Adapter->process_rx_cntrlpkt);
- }
- else
- {
+ } else {
/*
- * Data Packet, Format a proper Ethernet Header
- * and give it to the stack
- */
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "Received Data pkt...");
+ * Data Packet, Format a proper Ethernet Header
+ * and give it to the stack
+ */
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "Received Data pkt...");
skb_reserve(skb, 2 + SKB_RESERVE_PHS_BYTES);
memcpy(skb->data+ETH_HLEN, (PUCHAR)urb->transfer_buffer + sizeof(struct bcm_leader), pLeader->PLength);
skb->dev = Adapter->dev;
/* currently skb->len has extra ETH_HLEN bytes in the beginning */
- skb_put (skb, pLeader->PLength + ETH_HLEN);
- Adapter->PackInfo[QueueIndex].uiTotalRxBytes+=pLeader->PLength;
- Adapter->PackInfo[QueueIndex].uiThisPeriodRxBytes+= pLeader->PLength;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "Received Data pkt of len :0x%X", pLeader->PLength);
+ skb_put(skb, pLeader->PLength + ETH_HLEN);
+ Adapter->PackInfo[QueueIndex].uiTotalRxBytes += pLeader->PLength;
+ Adapter->PackInfo[QueueIndex].uiThisPeriodRxBytes += pLeader->PLength;
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "Received Data pkt of len :0x%X", pLeader->PLength);
- if(netif_running(Adapter->dev))
- {
+ if (netif_running(Adapter->dev)) {
/* Moving ahead by ETH_HLEN to the data ptr as received from FW */
skb_pull(skb, ETH_HLEN);
PHSReceive(Adapter, pLeader->Vcid, skb, &skb->len,
- NULL,bHeaderSupressionEnabled);
+ NULL, bHeaderSupressionEnabled);
- if(!Adapter->PackInfo[QueueIndex].bEthCSSupport)
- {
+ if (!Adapter->PackInfo[QueueIndex].bEthCSSupport) {
skb_push(skb, ETH_HLEN);
memcpy(skb->data, skb->dev->dev_addr, 6);
@@ -169,29 +151,26 @@ static void read_bulk_callback(struct urb *urb)
(*(skb->data+11))++;
*(skb->data+12) = 0x08;
*(skb->data+13) = 0x00;
- pLeader->PLength+=ETH_HLEN;
+ pLeader->PLength += ETH_HLEN;
}
skb->protocol = eth_type_trans(skb, Adapter->dev);
process_done = netif_rx(skb);
- }
- else
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "i/f not up hance freeing SKB...");
+ } else {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "i/f not up hance freeing SKB...");
dev_kfree_skb(skb);
}
++Adapter->dev->stats.rx_packets;
Adapter->dev->stats.rx_bytes += pLeader->PLength;
- for(uiIndex = 0 ; uiIndex < MIBS_MAX_HIST_ENTRIES ; uiIndex++)
- {
- if((pLeader->PLength <= MIBS_PKTSIZEHIST_RANGE*(uiIndex+1))
- && (pLeader->PLength > MIBS_PKTSIZEHIST_RANGE*(uiIndex)))
+ for (uiIndex = 0; uiIndex < MIBS_MAX_HIST_ENTRIES; uiIndex++) {
+ if ((pLeader->PLength <= MIBS_PKTSIZEHIST_RANGE*(uiIndex+1)) &&
+ (pLeader->PLength > MIBS_PKTSIZEHIST_RANGE*(uiIndex)))
Adapter->aRxPktSizeHist[uiIndex]++;
}
}
- Adapter->PrevNumRecvDescs++;
+ Adapter->PrevNumRecvDescs++;
pRcb->bUsed = false;
atomic_dec(&psIntfAdapter->uNumRcbUsed);
}
@@ -201,23 +180,18 @@ static int ReceiveRcb(struct bcm_interface_adapter *psIntfAdapter, struct bcm_us
struct urb *urb = pRcb->urb;
int retval = 0;
- usb_fill_bulk_urb(urb, psIntfAdapter->udev, usb_rcvbulkpipe(
- psIntfAdapter->udev, psIntfAdapter->sBulkIn.bulk_in_endpointAddr),
- urb->transfer_buffer, BCM_USB_MAX_READ_LENGTH, read_bulk_callback,
- pRcb);
- if(false == psIntfAdapter->psAdapter->device_removed &&
- false == psIntfAdapter->psAdapter->bEndPointHalted &&
- false == psIntfAdapter->bSuspended &&
- false == psIntfAdapter->bPreparingForBusSuspend)
- {
+ usb_fill_bulk_urb(urb, psIntfAdapter->udev, usb_rcvbulkpipe(psIntfAdapter->udev, psIntfAdapter->sBulkIn.bulk_in_endpointAddr),
+ urb->transfer_buffer, BCM_USB_MAX_READ_LENGTH, read_bulk_callback, pRcb);
+ if (false == psIntfAdapter->psAdapter->device_removed &&
+ false == psIntfAdapter->psAdapter->bEndPointHalted &&
+ false == psIntfAdapter->bSuspended &&
+ false == psIntfAdapter->bPreparingForBusSuspend) {
retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "failed submitting read urb, error %d", retval);
- //if this return value is because of pipe halt. need to clear this.
- if(retval == -EPIPE)
- {
- psIntfAdapter->psAdapter->bEndPointHalted = TRUE ;
+ if (retval) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "failed submitting read urb, error %d", retval);
+ /* if this return value is because of pipe halt. need to clear this. */
+ if (retval == -EPIPE) {
+ psIntfAdapter->psAdapter->bEndPointHalted = TRUE;
wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue);
}
@@ -240,25 +214,20 @@ Return: TRUE - If Rx was successful.
Other - If an error occurred.
*/
-bool InterfaceRx (struct bcm_interface_adapter *psIntfAdapter)
+bool InterfaceRx(struct bcm_interface_adapter *psIntfAdapter)
{
USHORT RxDescCount = NUM_RX_DESC - atomic_read(&psIntfAdapter->uNumRcbUsed);
struct bcm_usb_rcb *pRcb = NULL;
-// RxDescCount = psIntfAdapter->psAdapter->CurrNumRecvDescs -
-// psIntfAdapter->psAdapter->PrevNumRecvDescs;
- while(RxDescCount)
- {
+ while (RxDescCount) {
pRcb = GetBulkInRcb(psIntfAdapter);
- if(pRcb == NULL)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Unable to get Rcb pointer");
+ if (pRcb == NULL) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "Unable to get Rcb pointer");
return false;
}
- //atomic_inc(&psIntfAdapter->uNumRcbUsed);
ReceiveRcb(psIntfAdapter, pRcb);
RxDescCount--;
- }
+ }
return TRUE;
}
diff --git a/drivers/staging/bcm/InterfaceTx.c b/drivers/staging/bcm/InterfaceTx.c
index b9c2784e9811..ea7707b8e60e 100644
--- a/drivers/staging/bcm/InterfaceTx.c
+++ b/drivers/staging/bcm/InterfaceTx.c
@@ -3,26 +3,22 @@
/*this is transmit call-back(BULK OUT)*/
static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
{
- struct bcm_usb_tcb *pTcb= (struct bcm_usb_tcb *)urb->context;
+ struct bcm_usb_tcb *pTcb = (struct bcm_usb_tcb *)urb->context;
struct bcm_interface_adapter *psIntfAdapter = pTcb->psIntfAdapter;
struct bcm_link_request *pControlMsg = (struct bcm_link_request *)urb->transfer_buffer;
- struct bcm_mini_adapter *psAdapter = psIntfAdapter->psAdapter ;
- bool bpowerDownMsg = false ;
+ struct bcm_mini_adapter *psAdapter = psIntfAdapter->psAdapter;
+ bool bpowerDownMsg = false;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if (unlikely(netif_msg_tx_done(Adapter)))
- pr_info(PFX "%s: transmit status %d\n", Adapter->dev->name, urb->status);
+ if (unlikely(netif_msg_tx_done(Adapter)))
+ pr_info(PFX "%s: transmit status %d\n", Adapter->dev->name, urb->status);
- if(urb->status != STATUS_SUCCESS)
- {
- if(urb->status == -EPIPE)
- {
- psIntfAdapter->psAdapter->bEndPointHalted = TRUE ;
+ if (urb->status != STATUS_SUCCESS) {
+ if (urb->status == -EPIPE) {
+ psIntfAdapter->psAdapter->bEndPointHalted = TRUE;
wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue);
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Tx URB has got cancelled. status :%d", urb->status);
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Tx URB has got cancelled. status :%d", urb->status);
}
}
@@ -31,69 +27,59 @@ static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
- if(TRUE == psAdapter->bPreparingForLowPowerMode)
- {
-
- if(((pControlMsg->szData[0] == GO_TO_IDLE_MODE_PAYLOAD) &&
- (pControlMsg->szData[1] == TARGET_CAN_GO_TO_IDLE_MODE)))
+ if (TRUE == psAdapter->bPreparingForLowPowerMode) {
- {
- bpowerDownMsg = TRUE ;
- //This covers the bus err while Idle Request msg sent down.
- if(urb->status != STATUS_SUCCESS)
- {
- psAdapter->bPreparingForLowPowerMode = false ;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Idle Mode Request msg failed to reach to Modem");
- //Signalling the cntrl pkt path in Ioctl
+ if (((pControlMsg->szData[0] == GO_TO_IDLE_MODE_PAYLOAD) &&
+ (pControlMsg->szData[1] == TARGET_CAN_GO_TO_IDLE_MODE))) {
+ bpowerDownMsg = TRUE;
+ /* This covers the bus err while Idle Request msg sent down. */
+ if (urb->status != STATUS_SUCCESS) {
+ psAdapter->bPreparingForLowPowerMode = false;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Idle Mode Request msg failed to reach to Modem");
+ /* Signalling the cntrl pkt path in Ioctl */
wake_up(&psAdapter->lowpower_mode_wait_queue);
StartInterruptUrb(psIntfAdapter);
goto err_exit;
}
- if(psAdapter->bDoSuspend == false)
- {
+ if (psAdapter->bDoSuspend == false) {
psAdapter->IdleMode = TRUE;
- //since going in Idle mode completed hence making this var false;
- psAdapter->bPreparingForLowPowerMode = false ;
+ /* since going in Idle mode completed hence making this var false */
+ psAdapter->bPreparingForLowPowerMode = false;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Host Entered in Idle Mode State...");
- //Signalling the cntrl pkt path in Ioctl
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Host Entered in Idle Mode State...");
+ /* Signalling the cntrl pkt path in Ioctl*/
wake_up(&psAdapter->lowpower_mode_wait_queue);
}
- }
- else if((pControlMsg->Leader.Status == LINK_UP_CONTROL_REQ) &&
+ } else if ((pControlMsg->Leader.Status == LINK_UP_CONTROL_REQ) &&
(pControlMsg->szData[0] == LINK_UP_ACK) &&
(pControlMsg->szData[1] == LINK_SHUTDOWN_REQ_FROM_FIRMWARE) &&
- (pControlMsg->szData[2] == SHUTDOWN_ACK_FROM_DRIVER))
- {
- //This covers the bus err while shutdown Request msg sent down.
- if(urb->status != STATUS_SUCCESS)
- {
- psAdapter->bPreparingForLowPowerMode = false ;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Shutdown Request Msg failed to reach to Modem");
- //Signalling the cntrl pkt path in Ioctl
+ (pControlMsg->szData[2] == SHUTDOWN_ACK_FROM_DRIVER)) {
+ /* This covers the bus err while shutdown Request msg sent down. */
+ if (urb->status != STATUS_SUCCESS) {
+ psAdapter->bPreparingForLowPowerMode = false;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Shutdown Request Msg failed to reach to Modem");
+ /* Signalling the cntrl pkt path in Ioctl */
wake_up(&psAdapter->lowpower_mode_wait_queue);
StartInterruptUrb(psIntfAdapter);
goto err_exit;
}
- bpowerDownMsg = TRUE ;
- if(psAdapter->bDoSuspend == false)
- {
+ bpowerDownMsg = TRUE;
+ if (psAdapter->bDoSuspend == false) {
psAdapter->bShutStatus = TRUE;
- //since going in shutdown mode completed hence making this var false;
- psAdapter->bPreparingForLowPowerMode = false ;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Host Entered in shutdown Mode State...");
- //Signalling the cntrl pkt path in Ioctl
+ /* since going in shutdown mode completed hence making this var false */
+ psAdapter->bPreparingForLowPowerMode = false;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Host Entered in shutdown Mode State...");
+ /* Signalling the cntrl pkt path in Ioctl */
wake_up(&psAdapter->lowpower_mode_wait_queue);
}
}
- if(psAdapter->bDoSuspend && bpowerDownMsg)
- {
- //issuing bus suspend request
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"Issuing the Bus suspend request to USB stack");
+ if (psAdapter->bDoSuspend && bpowerDownMsg) {
+ /* issuing bus suspend request */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Issuing the Bus suspend request to USB stack");
psIntfAdapter->bPreparingForBusSuspend = TRUE;
schedule_work(&psIntfAdapter->usbSuspendWork);
@@ -101,9 +87,9 @@ static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
}
-err_exit :
+err_exit:
usb_free_coherent(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ urb->transfer_buffer, urb->transfer_dma);
}
@@ -112,14 +98,13 @@ static struct bcm_usb_tcb *GetBulkOutTcb(struct bcm_interface_adapter *psIntfAda
struct bcm_usb_tcb *pTcb = NULL;
UINT index = 0;
- if((atomic_read(&psIntfAdapter->uNumTcbUsed) < MAXIMUM_USB_TCB) &&
- (psIntfAdapter->psAdapter->StopAllXaction ==false))
- {
+ if ((atomic_read(&psIntfAdapter->uNumTcbUsed) < MAXIMUM_USB_TCB) &&
+ (psIntfAdapter->psAdapter->StopAllXaction == false)) {
index = atomic_read(&psIntfAdapter->uCurrTcb);
pTcb = &psIntfAdapter->asUsbTcb[index];
pTcb->bUsed = TRUE;
- pTcb->psIntfAdapter= psIntfAdapter;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Got Tx desc %d used %d",
+ pTcb->psIntfAdapter = psIntfAdapter;
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Got Tx desc %d used %d",
index, atomic_read(&psIntfAdapter->uNumTcbUsed));
index = (index + 1) % MAXIMUM_USB_TCB;
atomic_set(&psIntfAdapter->uCurrTcb, index);
@@ -135,44 +120,37 @@ static int TransmitTcb(struct bcm_interface_adapter *psIntfAdapter, struct bcm_u
int retval = 0;
urb->transfer_buffer = usb_alloc_coherent(psIntfAdapter->udev, len,
- GFP_ATOMIC, &urb->transfer_dma);
- if (!urb->transfer_buffer)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Error allocating memory\n");
+ GFP_ATOMIC, &urb->transfer_dma);
+ if (!urb->transfer_buffer) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "Error allocating memory\n");
return -ENOMEM;
}
memcpy(urb->transfer_buffer, data, len);
urb->transfer_buffer_length = len;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Sending Bulk out packet\n");
- //For T3B,INT OUT end point will be used as bulk out end point
- if((psIntfAdapter->psAdapter->chip_id == T3B) && (psIntfAdapter->bHighSpeedDevice == TRUE))
- {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Sending Bulk out packet\n");
+ /* For T3B,INT OUT end point will be used as bulk out end point */
+ if ((psIntfAdapter->psAdapter->chip_id == T3B) && (psIntfAdapter->bHighSpeedDevice == TRUE)) {
usb_fill_int_urb(urb, psIntfAdapter->udev,
- psIntfAdapter->sBulkOut.bulk_out_pipe,
+ psIntfAdapter->sBulkOut.bulk_out_pipe,
urb->transfer_buffer, len, write_bulk_callback, pTcb,
psIntfAdapter->sBulkOut.int_out_interval);
- }
- else
- {
+ } else {
usb_fill_bulk_urb(urb, psIntfAdapter->udev,
psIntfAdapter->sBulkOut.bulk_out_pipe,
urb->transfer_buffer, len, write_bulk_callback, pTcb);
}
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* For DMA transfer */
- if(false == psIntfAdapter->psAdapter->device_removed &&
+ if (false == psIntfAdapter->psAdapter->device_removed &&
false == psIntfAdapter->psAdapter->bEndPointHalted &&
false == psIntfAdapter->bSuspended &&
- false == psIntfAdapter->bPreparingForBusSuspend)
- {
+ false == psIntfAdapter->bPreparingForBusSuspend) {
retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "failed submitting write urb, error %d", retval);
- if(retval == -EPIPE)
- {
- psIntfAdapter->psAdapter->bEndPointHalted = TRUE ;
+ if (retval) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "failed submitting write urb, error %d", retval);
+ if (retval == -EPIPE) {
+ psIntfAdapter->psAdapter->bEndPointHalted = TRUE;
wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue);
}
}
@@ -182,13 +160,12 @@ static int TransmitTcb(struct bcm_interface_adapter *psIntfAdapter, struct bcm_u
int InterfaceTransmitPacket(PVOID arg, PVOID data, UINT len)
{
- struct bcm_usb_tcb *pTcb= NULL;
+ struct bcm_usb_tcb *pTcb = NULL;
struct bcm_interface_adapter *psIntfAdapter = arg;
- pTcb= GetBulkOutTcb(psIntfAdapter);
- if(pTcb == NULL)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "No URB to transmit packet, dropping packet");
+ pTcb = GetBulkOutTcb(psIntfAdapter);
+ if (pTcb == NULL) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "No URB to transmit packet, dropping packet");
return -EFAULT;
}
return TransmitTcb(psIntfAdapter, pTcb, data, len);
diff --git a/drivers/staging/bcm/PHSModule.c b/drivers/staging/bcm/PHSModule.c
index 892ebc65cdd3..afc7bcc3e54b 100644
--- a/drivers/staging/bcm/PHSModule.c
+++ b/drivers/staging/bcm/PHSModule.c
@@ -1280,11 +1280,11 @@ static int phs_decompress(unsigned char *in_buf,
if (bit == SUPPRESS) {
*out_buf = *phsf;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "\nDECOMP:In phss %d phsf %d ouput %d",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "\nDECOMP:In phss %d phsf %d output %d",
phss, *phsf, *out_buf);
} else {
*out_buf = *in_buf;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "\nDECOMP:In phss %d input %d ouput %d",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "\nDECOMP:In phss %d input %d output %d",
phss, *in_buf, *out_buf);
in_buf++;
size++;
diff --git a/drivers/staging/bcm/Qos.c b/drivers/staging/bcm/Qos.c
index 1609a2bdc522..0727599bf5fa 100644
--- a/drivers/staging/bcm/Qos.c
+++ b/drivers/staging/bcm/Qos.c
@@ -24,7 +24,7 @@ static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex);
*
* Returns - TRUE(If address matches) else FAIL .
*********************************************************************/
-bool MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulSrcIP)
+static bool MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulSrcIP)
{
UCHAR ucLoopIndex = 0;
@@ -58,7 +58,7 @@ bool MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulSr
*
* Returns - TRUE(If address matches) else FAIL .
*********************************************************************/
-bool MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulDestIP)
+static bool MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulDestIP)
{
UCHAR ucLoopIndex = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
@@ -91,7 +91,7 @@ bool MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulD
*
* Returns - TRUE(If address matches) else FAIL.
**************************************************************************/
-bool MatchTos(struct bcm_classifier_rule *pstClassifierRule, UCHAR ucTypeOfService)
+static bool MatchTos(struct bcm_classifier_rule *pstClassifierRule, UCHAR ucTypeOfService)
{
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
diff --git a/drivers/staging/bcm/nvm.c b/drivers/staging/bcm/nvm.c
index 9e5f955a1a08..fca164f51f4b 100644
--- a/drivers/staging/bcm/nvm.c
+++ b/drivers/staging/bcm/nvm.c
@@ -1355,67 +1355,6 @@ BeceemFlashBulkWriteStatus_EXIT:
}
/*
- * Procedure: PropagateCalParamsFromEEPROMToMemory
- *
- * Description: Dumps the calibration section of EEPROM to DDR.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-int PropagateCalParamsFromEEPROMToMemory(struct bcm_mini_adapter *Adapter)
-{
- PCHAR pBuff = kmalloc(BUFFER_4K, GFP_KERNEL);
- unsigned int uiEepromSize = 0;
- unsigned int uiIndex = 0;
- unsigned int uiBytesToCopy = 0;
- unsigned int uiCalStartAddr = EEPROM_CALPARAM_START;
- unsigned int uiMemoryLoc = EEPROM_CAL_DATA_INTERNAL_LOC;
- unsigned int value;
- int Status = 0;
-
- if (!pBuff)
- return -ENOMEM;
-
- if (0 != BeceemEEPROMBulkRead(Adapter, &uiEepromSize, EEPROM_SIZE_OFFSET, 4)) {
- kfree(pBuff);
- return -1;
- }
-
- uiEepromSize >>= 16;
- if (uiEepromSize > 1024 * 1024) {
- kfree(pBuff);
- return -1;
- }
-
- uiBytesToCopy = MIN(BUFFER_4K, uiEepromSize);
-
- while (uiBytesToCopy) {
- if (0 != BeceemEEPROMBulkRead(Adapter, (PUINT)pBuff, uiCalStartAddr, uiBytesToCopy)) {
- Status = -1;
- break;
- }
- wrm(Adapter, uiMemoryLoc, (PCHAR)(((PULONG)pBuff) + uiIndex), uiBytesToCopy);
- uiMemoryLoc += uiBytesToCopy;
- uiEepromSize -= uiBytesToCopy;
- uiCalStartAddr += uiBytesToCopy;
- uiIndex += uiBytesToCopy / 4;
- uiBytesToCopy = MIN(BUFFER_4K, uiEepromSize);
-
- }
- value = 0xbeadbead;
- wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, &value, sizeof(value));
- value = 0xbeadbead;
- wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, &value, sizeof(value));
- kfree(pBuff);
-
- return Status;
-}
-
-/*
* Procedure: PropagateCalParamsFromFlashToMemory
*
* Description: Dumps the calibration section of EEPROM to DDR.
@@ -2873,7 +2812,7 @@ int BcmFlash2xBulkRead(struct bcm_mini_adapter *Adapter,
SectionStartOffset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal);
if (SectionStartOffset == STATUS_FAILURE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "This Section<%d> does not exixt in Flash 2.x Map ", eFlash2xSectionVal);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "This Section<%d> does not exist in Flash 2.x Map ", eFlash2xSectionVal);
return -EINVAL;
}
@@ -2936,7 +2875,7 @@ int BcmFlash2xBulkWrite(struct bcm_mini_adapter *Adapter,
FlashSectValStartOffset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectVal);
if (FlashSectValStartOffset == STATUS_FAILURE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "This Section<%d> does not exixt in Flash Map 2.x", eFlash2xSectVal);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "This Section<%d> does not exist in Flash Map 2.x", eFlash2xSectVal);
return -EINVAL;
}
@@ -3911,7 +3850,7 @@ int validateFlash2xReadWrite(struct bcm_mini_adapter *Adapter, struct bcm_flash2
uiNumOfBytes = psFlash2xReadWrite->numOfBytes;
if (IsSectionExistInFlash(Adapter, psFlash2xReadWrite->Section) != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%x> does not exixt in Flash", psFlash2xReadWrite->Section);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%x> does not exist in Flash", psFlash2xReadWrite->Section);
return false;
}
uiSectStartOffset = BcmGetSectionValStartOffset(Adapter, psFlash2xReadWrite->Section);
@@ -3944,6 +3883,15 @@ int validateFlash2xReadWrite(struct bcm_mini_adapter *Adapter, struct bcm_flash2
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "End offset :%x\n", uiSectEndOffset);
+ /* psFlash2xReadWrite->offset and uiNumOfBytes are user controlled and can lead to integer overflows */
+ if (psFlash2xReadWrite->offset > uiSectEndOffset) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid Request....");
+ return false;
+ }
+ if (uiNumOfBytes > uiSectEndOffset) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid Request....");
+ return false;
+ }
/* Checking the boundary condition */
if ((uiSectStartOffset + psFlash2xReadWrite->offset + uiNumOfBytes) <= uiSectEndOffset)
return TRUE;
@@ -4530,13 +4478,13 @@ int IsSectionWritable(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section
int Status = false;
if (IsSectionExistInFlash(Adapter, Section) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section <%d> does not exixt", Section);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section <%d> does not exist", Section);
return false;
}
offset = BcmGetSectionValStartOffset(Adapter, Section);
if (offset == INVALID_OFFSET) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%d> does not exixt", Section);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%d> does not exist", Section);
return false;
}
diff --git a/drivers/staging/btmtk_usb/Kconfig b/drivers/staging/btmtk_usb/Kconfig
deleted file mode 100644
index a425ebda6c7a..000000000000
--- a/drivers/staging/btmtk_usb/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-config USB_BTMTK
- tristate "Mediatek Bluetooth support"
- depends on USB && BT && m
- ---help---
- Say Y here if you wish to control a MTK USB Bluetooth.
-
- This option depends on 'USB' support being enabled
-
- To compile this driver as a module, choose M here: the
- module will be called btmtk_usb.
-
diff --git a/drivers/staging/btmtk_usb/Makefile b/drivers/staging/btmtk_usb/Makefile
deleted file mode 100644
index 4d6c9d764621..000000000000
--- a/drivers/staging/btmtk_usb/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_USB_BTMTK) += btmtk_usb.o
diff --git a/drivers/staging/btmtk_usb/README b/drivers/staging/btmtk_usb/README
deleted file mode 100644
index c046c8e96b2d..000000000000
--- a/drivers/staging/btmtk_usb/README
+++ /dev/null
@@ -1,14 +0,0 @@
--build driver modules
- make
-
--install driver modules
- make install
-
--remove driver modules
- make clean
-
--dynamic debug message
- turn on CONFIG_DYNAMIC_DEBUG compiler flag for current kernel
- mount -t debugfs none /sys/kernel/debug/
- echo "module module_name +p" > /sys/kernel/debug/dynamic_debug/control(turn on debug messages, module name such as btmtk_usb)
- echo "module module_name -p" > /sys/kernel/debug/dynamic_debug/control(turn off debug messages, module name such as btmtk_usb)
diff --git a/drivers/staging/btmtk_usb/TODO b/drivers/staging/btmtk_usb/TODO
deleted file mode 100644
index a71d1297942d..000000000000
--- a/drivers/staging/btmtk_usb/TODO
+++ /dev/null
@@ -1,10 +0,0 @@
-TODO:
- - checkpatch.pl clean
- - determine if the driver should not be using a duplicate
- version of the usb-bluetooth interface code, but should
- be merged into the drivers/bluetooth/ directory and
- infrastructure instead.
- - review by the bluetooth developer community
-
-Please send any patches for this driver to Yu-Chen, Cho <acho@suse.com> and
-jay.hung@mediatek.com
diff --git a/drivers/staging/btmtk_usb/btmtk_usb.c b/drivers/staging/btmtk_usb/btmtk_usb.c
deleted file mode 100644
index 9a5ebd6cc512..000000000000
--- a/drivers/staging/btmtk_usb/btmtk_usb.c
+++ /dev/null
@@ -1,1810 +0,0 @@
-/*
- * MediaTek Bluetooth USB Driver
- *
- * Copyright (C) 2013, MediaTek co.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- * or on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/skbuff.h>
-#include <linux/completion.h>
-#include <linux/firmware.h>
-#include <linux/usb.h>
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
-
-#include "btmtk_usb.h"
-
-#define VERSION "1.0.4"
-#define MT7650_FIRMWARE "mt7650.bin"
-#define MT7662_FIRMWARE "mt7662.bin"
-
-static struct usb_driver btmtk_usb_driver;
-
-
-static int btmtk_usb_load_rom_patch(struct btmtk_usb_data *);
-static int btmtk_usb_load_fw(struct btmtk_usb_data *);
-
-static void hex_dump(char *str, u8 *src_buf, u32 src_buf_len)
-{
- unsigned char *pt;
- int x;
-
- pt = src_buf;
-
- BT_DBG("%s: %p, len = %d\n", str, src_buf, src_buf_len);
-
- for (x = 0; x < src_buf_len; x++) {
- if (x % 16 == 0)
- BT_DBG("0x%04x : ", x);
- BT_DBG("%02x ", ((unsigned char)pt[x]));
- if (x % 16 == 15)
- BT_DBG("\n");
- }
-
- BT_DBG("\n");
-}
-
-static int btmtk_usb_reset(struct usb_device *udev)
-{
- int ret;
-
- BT_DBG("%s\n", __func__);
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x01,
- DEVICE_VENDOR_REQUEST_OUT, 0x01, 0x00,
- NULL, 0x00, CONTROL_TIMEOUT_JIFFIES);
-
- if (ret < 0) {
- BT_ERR("%s error(%d)\n", __func__, ret);
- return ret;
- }
-
- if (ret > 0)
- ret = 0;
-
- return ret;
-}
-
-static int btmtk_usb_io_read32(struct btmtk_usb_data *data, u32 reg, u32 *val)
-{
- u8 request = data->r_request;
- struct usb_device *udev = data->udev;
- int ret;
- __le32 val_le;
-
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), request,
- DEVICE_VENDOR_REQUEST_IN, 0x0, reg, data->io_buf,
- 4, CONTROL_TIMEOUT_JIFFIES);
-
- if (ret < 0) {
- *val = 0xffffffff;
- BT_ERR("%s error(%d), reg=%x, value=%x\n",
- __func__, ret, reg, *val);
- return ret;
- }
-
- memmove(&val_le, data->io_buf, 4);
-
- *val = le32_to_cpu(val_le);
-
- if (ret > 0)
- ret = 0;
-
- return ret;
-}
-
-static int btmtk_usb_io_write32(struct btmtk_usb_data *data, u32 reg, u32 val)
-{
- u16 value, index;
- u8 request = data->w_request;
- struct usb_device *udev = data->udev;
- int ret;
-
- index = (u16)reg;
- value = val & 0x0000ffff;
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), request,
- DEVICE_VENDOR_REQUEST_OUT, value, index,
- NULL, 0, CONTROL_TIMEOUT_JIFFIES);
-
- if (ret < 0) {
- BT_ERR("%s error(%d), reg=%x, value=%x\n",
- __func__, ret, reg, val);
- return ret;
- }
-
- index = (u16)(reg + 2);
- value = (val & 0xffff0000) >> 16;
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- request, DEVICE_VENDOR_REQUEST_OUT,
- value, index, NULL, 0, CONTROL_TIMEOUT_JIFFIES);
-
- if (ret < 0) {
- BT_ERR("%s error(%d), reg=%x, value=%x\n",
- __func__, ret, reg, val);
- return ret;
- }
-
- if (ret > 0)
- ret = 0;
-
- return ret;
-}
-
-static int btmtk_usb_switch_iobase(struct btmtk_usb_data *data, int base)
-{
- int ret = 0;
-
- switch (base) {
- case SYSCTL:
- data->w_request = 0x42;
- data->r_request = 0x47;
- break;
- case WLAN:
- data->w_request = 0x02;
- data->r_request = 0x07;
- break;
-
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static void btmtk_usb_cap_init(struct btmtk_usb_data *data)
-{
- const struct firmware *firmware;
- struct usb_device *udev = data->udev;
- int ret;
-
- btmtk_usb_io_read32(data, 0x00, &data->chip_id);
-
- BT_DBG("chip id = %x\n", data->chip_id);
-
- if (is_mt7630(data) || is_mt7650(data)) {
- data->need_load_fw = 1;
- data->need_load_rom_patch = 0;
- ret = request_firmware(&firmware, MT7650_FIRMWARE, &udev->dev);
- if (ret < 0) {
- if (ret == -ENOENT) {
- BT_ERR("Firmware file \"%s\" not found\n",
- MT7650_FIRMWARE);
- } else {
- BT_ERR("Firmware file \"%s\" request failed (err=%d)\n",
- MT7650_FIRMWARE, ret);
- }
- } else {
- BT_DBG("Firmware file \"%s\" Found\n",
- MT7650_FIRMWARE);
- /* load firmware here */
- data->firmware = firmware;
- btmtk_usb_load_fw(data);
- }
- release_firmware(firmware);
- } else if (is_mt7632(data) || is_mt7662(data)) {
- data->need_load_fw = 0;
- data->need_load_rom_patch = 1;
- data->rom_patch_offset = 0x90000;
- ret = request_firmware(&firmware, MT7662_FIRMWARE, &udev->dev);
- if (ret < 0) {
- if (ret == -ENOENT) {
- BT_ERR("Firmware file \"%s\" not found\n",
- MT7662_FIRMWARE);
- } else {
- BT_ERR("Firmware file \"%s\" request failed (err=%d)\n",
- MT7662_FIRMWARE, ret);
- }
- } else {
- BT_DBG("Firmware file \"%s\" Found\n", MT7662_FIRMWARE);
- /* load rom patch here */
- data->firmware = firmware;
- data->rom_patch_len = firmware->size;
- btmtk_usb_load_rom_patch(data);
- }
- release_firmware(firmware);
- } else {
- BT_ERR("unknow chip(%x)\n", data->chip_id);
- }
-}
-
-static u16 checksume16(u8 *pData, int len)
-{
- int sum = 0;
-
- while (len > 1) {
- sum += *((u16 *)pData);
-
- pData = pData + 2;
-
- if (sum & 0x80000000)
- sum = (sum & 0xFFFF) + (sum >> 16);
-
- len -= 2;
- }
-
- if (len)
- sum += *((u8 *)pData);
-
- while (sum >> 16)
- sum = (sum & 0xFFFF) + (sum >> 16);
-
- return ~sum;
-}
-
-static int btmtk_usb_chk_crc(struct btmtk_usb_data *data, u32 checksum_len)
-{
- int ret = 0;
- struct usb_device *udev = data->udev;
-
- BT_DBG("%s\n", __func__);
-
- memmove(data->io_buf, &data->rom_patch_offset, 4);
- memmove(&data->io_buf[4], &checksum_len, 4);
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x1,
- DEVICE_VENDOR_REQUEST_IN, 0x20, 0x00, data->io_buf,
- 8, CONTROL_TIMEOUT_JIFFIES);
-
- if (ret < 0)
- BT_ERR("%s error(%d)\n", __func__, ret);
-
- return ret;
-}
-
-static u16 btmtk_usb_get_crc(struct btmtk_usb_data *data)
-{
- int ret = 0;
- struct usb_device *udev = data->udev;
- u16 crc, count = 0;
- __le16 crc_le;
-
- BT_DBG("%s\n", __func__);
-
- while (1) {
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- 0x01, DEVICE_VENDOR_REQUEST_IN,
- 0x21, 0x00, data->io_buf, 2,
- CONTROL_TIMEOUT_JIFFIES);
-
- if (ret < 0) {
- crc = 0xFFFF;
- BT_ERR("%s error(%d)\n", __func__, ret);
- }
-
- memmove(&crc_le, data->io_buf, 2);
-
- crc = le16_to_cpu(crc_le);
-
- if (crc != 0xFFFF)
- break;
-
- mdelay(100);
-
- if (count++ > 100) {
- BT_ERR("Query CRC over %d times\n", count);
- break;
- }
- }
-
- return crc;
-}
-
-static int btmtk_usb_reset_wmt(struct btmtk_usb_data *data)
-{
- int ret = 0;
-
- /* reset command */
- u8 cmd[8] = {0x6F, 0xFC, 0x05, 0x01, 0x07, 0x01, 0x00, 0x04};
-
- memmove(data->io_buf, cmd, 8);
-
- BT_DBG("%s\n", __func__);
-
- ret = usb_control_msg(data->udev, usb_sndctrlpipe(data->udev, 0), 0x01,
- DEVICE_CLASS_REQUEST_OUT, 0x12, 0x00,
- data->io_buf, 8, CONTROL_TIMEOUT_JIFFIES);
-
- if (ret)
- BT_ERR("%s:(%d)\n", __func__, ret);
-
- return ret;
-}
-
-static void load_rom_patch_complete(struct urb *urb)
-{
-
- struct completion *sent_to_mcu_done = (struct completion *)urb->context;
-
- complete(sent_to_mcu_done);
-}
-
-static int btmtk_usb_load_rom_patch(struct btmtk_usb_data *data)
-{
- u32 loop = 0;
- u32 value;
- s32 sent_len;
- int ret = 0, total_checksum = 0;
- struct urb *urb;
- u32 patch_len = 0;
- u32 cur_len = 0;
- dma_addr_t data_dma;
- struct completion sent_to_mcu_done;
- int first_block = 1;
- unsigned char phase;
- void *buf;
- char *pos;
- unsigned int pipe;
- pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress);
-
- if (!data->firmware) {
- BT_ERR("%s:please assign a rom patch\n", __func__);
- return -1;
- }
-
-load_patch_protect:
- btmtk_usb_switch_iobase(data, WLAN);
- btmtk_usb_io_read32(data, SEMAPHORE_03, &value);
- loop++;
-
- if (((value & 0x01) == 0x00) && (loop < 600)) {
- mdelay(1);
- goto load_patch_protect;
- }
-
- btmtk_usb_io_write32(data, 0x1004, 0x2c);
-
- btmtk_usb_switch_iobase(data, SYSCTL);
-
- btmtk_usb_io_write32(data, 0x1c, 0x30);
-
- /* Enable USB_DMA_CFG */
- btmtk_usb_io_write32(data, 0x9018, 0x00c00020);
-
- btmtk_usb_switch_iobase(data, WLAN);
-
- /* check ROM patch if upgrade */
- btmtk_usb_io_read32(data, COM_REG0, &value);
-
- if ((value & 0x02) == 0x02)
- goto error0;
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
-
- if (!urb) {
- ret = -ENOMEM;
- goto error0;
- }
-
- buf = usb_alloc_coherent(data->udev, UPLOAD_PATCH_UNIT,
- GFP_ATOMIC, &data_dma);
-
- if (!buf) {
- ret = -ENOMEM;
- goto error1;
- }
-
- pos = buf;
- BT_DBG("loading rom patch");
-
- init_completion(&sent_to_mcu_done);
-
- cur_len = 0x00;
- patch_len = data->rom_patch_len - PATCH_INFO_SIZE;
-
- /* loading rom patch */
- while (1) {
- s32 sent_len_max = UPLOAD_PATCH_UNIT - PATCH_HEADER_SIZE;
- sent_len = min_t(s32, (patch_len - cur_len), sent_len_max);
-
- BT_DBG("patch_len = %d\n", patch_len);
- BT_DBG("cur_len = %d\n", cur_len);
- BT_DBG("sent_len = %d\n", sent_len);
-
- if (sent_len <= 0)
- break;
-
- if (first_block == 1) {
- if (sent_len < sent_len_max)
- phase = PATCH_PHASE3;
- else
- phase = PATCH_PHASE1;
- first_block = 0;
- } else if (sent_len == sent_len_max) {
- phase = PATCH_PHASE2;
- } else {
- phase = PATCH_PHASE3;
- }
-
- /* prepare HCI header */
- pos[0] = 0x6F;
- pos[1] = 0xFC;
- pos[2] = (sent_len + 5) & 0xFF;
- pos[3] = ((sent_len + 5) >> 8) & 0xFF;
-
- /* prepare WMT header */
- pos[4] = 0x01;
- pos[5] = 0x01;
- pos[6] = (sent_len + 1) & 0xFF;
- pos[7] = ((sent_len + 1) >> 8) & 0xFF;
-
- pos[8] = phase;
-
- memcpy(&pos[9],
- data->firmware->data + PATCH_INFO_SIZE + cur_len,
- sent_len);
-
- BT_DBG("sent_len + PATCH_HEADER_SIZE = %d, phase = %d\n",
- sent_len + PATCH_HEADER_SIZE, phase);
-
- usb_fill_bulk_urb(urb,
- data->udev,
- pipe,
- buf,
- sent_len + PATCH_HEADER_SIZE,
- load_rom_patch_complete,
- &sent_to_mcu_done);
-
- urb->transfer_dma = data_dma;
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
-
- ret = usb_submit_urb(urb, GFP_ATOMIC);
-
- if (ret)
- goto error2;
-
- if (!wait_for_completion_timeout(&sent_to_mcu_done,
- msecs_to_jiffies(1000))) {
- usb_kill_urb(urb);
- BT_ERR("upload rom_patch timeout\n");
- goto error2;
- }
-
- BT_DBG(".");
-
- mdelay(200);
-
- cur_len += sent_len;
-
- }
-
- total_checksum = checksume16(
- (u8 *)data->firmware->data + PATCH_INFO_SIZE,
- patch_len);
-
- BT_DBG("Send checksum req..\n");
-
- btmtk_usb_chk_crc(data, patch_len);
-
- mdelay(20);
-
- if (total_checksum != btmtk_usb_get_crc(data)) {
- BT_ERR("checksum fail!, local(0x%x) <> fw(0x%x)\n",
- total_checksum, btmtk_usb_get_crc(data));
- ret = -1;
- goto error2;
- }
-
- mdelay(20);
-
- ret = btmtk_usb_reset_wmt(data);
-
- mdelay(20);
-
-error2:
- usb_free_coherent(data->udev, UPLOAD_PATCH_UNIT, buf, data_dma);
-error1:
- usb_free_urb(urb);
-error0:
- btmtk_usb_io_write32(data, SEMAPHORE_03, 0x1);
- return ret;
-}
-
-
-static int load_fw_iv(struct btmtk_usb_data *data)
-{
- int ret;
- struct usb_device *udev = data->udev;
- char *buf = kmalloc(64, GFP_ATOMIC);
-
- memmove(buf, data->firmware->data + 32, 64);
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x01,
- DEVICE_VENDOR_REQUEST_OUT, 0x12, 0x0, buf, 64,
- CONTROL_TIMEOUT_JIFFIES);
-
- if (ret < 0) {
- BT_ERR("%s error(%d) step4\n", __func__, ret);
- kfree(buf);
- return ret;
- }
-
- if (ret > 0)
- ret = 0;
-
- kfree(buf);
-
- return ret;
-}
-
-static void load_fw_complete(struct urb *urb)
-{
-
- struct completion *sent_to_mcu_done = (struct completion *)urb->context;
-
- complete(sent_to_mcu_done);
-}
-
-static int btmtk_usb_load_fw(struct btmtk_usb_data *data)
-{
- struct usb_device *udev = data->udev;
- struct urb *urb;
- void *buf;
- u32 cur_len = 0;
- u32 packet_header = 0;
- __le32 packet_header_le;
- u32 value;
- u32 ilm_len = 0, dlm_len = 0;
- u16 fw_ver, build_ver;
- u32 loop = 0;
- dma_addr_t data_dma;
- int ret = 0, sent_len;
- struct completion sent_to_mcu_done;
- unsigned int pipe;
- pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress);
-
- if (!data->firmware) {
- BT_ERR("%s:please assign a fw\n", __func__);
- return -1;
- }
-
- BT_DBG("bulk_tx_ep = %x\n", data->bulk_tx_ep->bEndpointAddress);
-
-loadfw_protect:
- btmtk_usb_switch_iobase(data, WLAN);
- btmtk_usb_io_read32(data, SEMAPHORE_00, &value);
- loop++;
-
- if (((value & 0x1) == 0) && (loop < 10000))
- goto loadfw_protect;
-
- /* check MCU if ready */
- btmtk_usb_io_read32(data, COM_REG0, &value);
-
- if ((value & 0x01) == 0x01)
- goto error0;
-
- /* Enable MPDMA TX and EP2 load FW mode */
- btmtk_usb_io_write32(data, 0x238, 0x1c000000);
-
- btmtk_usb_reset(udev);
- mdelay(100);
-
- ilm_len = (*(data->firmware->data + 3) << 24)
- | (*(data->firmware->data + 2) << 16)
- | (*(data->firmware->data + 1) << 8)
- | (*data->firmware->data);
-
- dlm_len = (*(data->firmware->data + 7) << 24)
- | (*(data->firmware->data + 6) << 16)
- | (*(data->firmware->data + 5) << 8)
- | (*(data->firmware->data + 4));
-
- fw_ver = (*(data->firmware->data + 11) << 8) |
- (*(data->firmware->data + 10));
-
- build_ver = (*(data->firmware->data + 9) << 8) |
- (*(data->firmware->data + 8));
-
- BT_DBG("fw version:%d.%d.%02d ",
- (fw_ver & 0xf000) >> 8,
- (fw_ver & 0x0f00) >> 8,
- (fw_ver & 0x00ff));
-
- BT_DBG("build:%x\n", build_ver);
-
- BT_DBG("build Time =");
-
- for (loop = 0; loop < 16; loop++)
- BT_DBG("%c", *(data->firmware->data + 16 + loop));
-
- BT_DBG("\n");
-
- BT_DBG("ILM length = %d(bytes)\n", ilm_len);
- BT_DBG("DLM length = %d(bytes)\n", dlm_len);
-
- btmtk_usb_switch_iobase(data, SYSCTL);
-
- /* U2M_PDMA rx_ring_base_ptr */
- btmtk_usb_io_write32(data, 0x790, 0x400230);
-
- /* U2M_PDMA rx_ring_max_cnt */
- btmtk_usb_io_write32(data, 0x794, 0x1);
-
- /* U2M_PDMA cpu_idx */
- btmtk_usb_io_write32(data, 0x798, 0x1);
-
- /* U2M_PDMA enable */
- btmtk_usb_io_write32(data, 0x704, 0x44);
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
-
- if (!urb) {
- ret = -ENOMEM;
- goto error1;
- }
-
- buf = usb_alloc_coherent(udev, 14592, GFP_ATOMIC, &data_dma);
-
- if (!buf) {
- ret = -ENOMEM;
- goto error2;
- }
-
- BT_DBG("loading fw");
-
- init_completion(&sent_to_mcu_done);
-
- btmtk_usb_switch_iobase(data, SYSCTL);
-
- cur_len = 0x40;
-
- /* Loading ILM */
- while (1) {
- sent_len = min_t(s32, (ilm_len - cur_len), 14336);
-
- if (sent_len > 0) {
- packet_header &= ~(0xffffffff);
- packet_header |= (sent_len << 16);
- packet_header_le = cpu_to_le32(packet_header);
-
- memmove(buf, &packet_header_le, 4);
- memmove(buf + 4, data->firmware->data + 32 + cur_len,
- sent_len);
-
- /* U2M_PDMA descriptor */
- btmtk_usb_io_write32(data, 0x230, cur_len);
-
- while ((sent_len % 4) != 0)
- sent_len++;
-
- /* U2M_PDMA length */
- btmtk_usb_io_write32(data, 0x234, sent_len << 16);
-
- usb_fill_bulk_urb(urb,
- udev,
- pipe,
- buf,
- sent_len + 4,
- load_fw_complete,
- &sent_to_mcu_done);
-
- urb->transfer_dma = data_dma;
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
-
- ret = usb_submit_urb(urb, GFP_ATOMIC);
-
- if (ret)
- goto error3;
-
- if (!wait_for_completion_timeout(&sent_to_mcu_done,
- msecs_to_jiffies(1000))) {
- usb_kill_urb(urb);
- BT_ERR("upload ilm fw timeout\n");
- goto error3;
- }
-
- BT_DBG(".");
-
- mdelay(200);
-
- cur_len += sent_len;
- } else {
- break;
- }
- }
-
- init_completion(&sent_to_mcu_done);
- cur_len = 0x00;
-
- /* Loading DLM */
- while (1) {
- sent_len = min_t(s32, (dlm_len - cur_len), 14336);
-
- if (sent_len <= 0)
- break;
-
- packet_header &= ~(0xffffffff);
- packet_header |= (sent_len << 16);
- packet_header_le = cpu_to_le32(packet_header);
-
- memmove(buf, &packet_header_le, 4);
- memmove(buf + 4,
- data->firmware->data + 32 + ilm_len + cur_len,
- sent_len);
-
- /* U2M_PDMA descriptor */
- btmtk_usb_io_write32(data, 0x230, 0x80000 + cur_len);
-
- while ((sent_len % 4) != 0) {
- BT_DBG("sent_len is not divided by 4\n");
- sent_len++;
- }
-
- /* U2M_PDMA length */
- btmtk_usb_io_write32(data, 0x234, sent_len << 16);
-
- usb_fill_bulk_urb(urb,
- udev,
- pipe,
- buf,
- sent_len + 4,
- load_fw_complete,
- &sent_to_mcu_done);
-
- urb->transfer_dma = data_dma;
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
-
- ret = usb_submit_urb(urb, GFP_ATOMIC);
-
- if (ret)
- goto error3;
-
- if (!wait_for_completion_timeout(&sent_to_mcu_done,
- msecs_to_jiffies(1000))) {
- usb_kill_urb(urb);
- BT_ERR("upload dlm fw timeout\n");
- goto error3;
- }
-
- BT_DBG(".");
-
- mdelay(500);
-
- cur_len += sent_len;
-
- }
-
- /* upload 64bytes interrupt vector */
- ret = load_fw_iv(data);
- mdelay(100);
-
- btmtk_usb_switch_iobase(data, WLAN);
-
- /* check MCU if ready */
- loop = 0;
-
- do {
- btmtk_usb_io_read32(data, COM_REG0, &value);
-
- if (value == 0x01)
- break;
-
- mdelay(10);
- loop++;
- } while (loop <= 100);
-
- if (loop > 1000) {
- BT_ERR("wait for 100 times\n");
- ret = -ENODEV;
- }
-
-error3:
- usb_free_coherent(udev, 14592, buf, data_dma);
-error2:
- usb_free_urb(urb);
-error1:
- /* Disbale load fw mode */
- btmtk_usb_io_read32(data, 0x238, &value);
- value = value & ~(0x10000000);
- btmtk_usb_io_write32(data, 0x238, value);
-error0:
- btmtk_usb_io_write32(data, SEMAPHORE_00, 0x1);
- return ret;
-}
-
-static int inc_tx(struct btmtk_usb_data *data)
-{
- unsigned long flags;
- int rv;
-
- spin_lock_irqsave(&data->txlock, flags);
- rv = test_bit(BTUSB_SUSPENDING, &data->flags);
- if (!rv)
- data->tx_in_flight++;
- spin_unlock_irqrestore(&data->txlock, flags);
-
- return rv;
-}
-
-static void btmtk_usb_intr_complete(struct urb *urb)
-{
- struct hci_dev *hdev = urb->context;
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- int err;
-
- BT_DBG("%s: %s urb %p status %d count %d\n", __func__, hdev->name,
- urb, urb->status, urb->actual_length);
-
- if (!test_bit(HCI_RUNNING, &hdev->flags))
- return;
-
- if (urb->status == 0) {
- hdev->stat.byte_rx += urb->actual_length;
-
- hex_dump("hci event", urb->transfer_buffer, urb->actual_length);
-
- if (hci_recv_fragment(hdev, HCI_EVENT_PKT,
- urb->transfer_buffer,
- urb->actual_length) < 0) {
- BT_ERR("%s corrupted event packet", hdev->name);
- hdev->stat.err_rx++;
- }
- }
-
- if (!test_bit(BTUSB_INTR_RUNNING, &data->flags))
- return;
-
- usb_mark_last_busy(data->udev);
- usb_anchor_urb(urb, &data->intr_anchor);
-
- err = usb_submit_urb(urb, GFP_ATOMIC);
-
- if (err < 0) {
- /* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected */
- if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p failed to resubmit (%d)",
- hdev->name, urb, -err);
- usb_unanchor_urb(urb);
- }
-}
-
-static int btmtk_usb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
-{
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- struct urb *urb;
- unsigned char *buf;
- unsigned int pipe;
- int err, size;
-
- BT_DBG("%s\n", __func__);
-
- if (!data->intr_ep)
- return -ENODEV;
-
- urb = usb_alloc_urb(0, mem_flags);
- if (!urb)
- return -ENOMEM;
-
- size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
-
- buf = kmalloc(size, mem_flags);
- if (!buf) {
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- pipe = usb_rcvintpipe(data->udev, data->intr_ep->bEndpointAddress);
-
- usb_fill_int_urb(urb, data->udev, pipe, buf, size,
- btmtk_usb_intr_complete, hdev,
- data->intr_ep->bInterval);
-
- urb->transfer_flags |= URB_FREE_BUFFER;
-
- usb_anchor_urb(urb, &data->intr_anchor);
-
- err = usb_submit_urb(urb, mem_flags);
- if (err < 0) {
- if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p submission failed (%d)",
- hdev->name, urb, -err);
- usb_unanchor_urb(urb);
- }
-
- usb_free_urb(urb);
-
- return err;
-
-}
-
-static void btmtk_usb_bulk_in_complete(struct urb *urb)
-{
- struct hci_dev *hdev = urb->context;
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- int err;
-
- BT_DBG("%s:%s urb %p status %d count %d", __func__, hdev->name,
- urb, urb->status, urb->actual_length);
-
- if (!test_bit(HCI_RUNNING, &hdev->flags))
- return;
-
- if (urb->status == 0) {
- hdev->stat.byte_rx += urb->actual_length;
-
- if (hci_recv_fragment(hdev, HCI_ACLDATA_PKT,
- urb->transfer_buffer,
- urb->actual_length) < 0) {
- BT_ERR("%s corrupted ACL packet", hdev->name);
- hdev->stat.err_rx++;
- }
- }
-
- if (!test_bit(BTUSB_BULK_RUNNING, &data->flags))
- return;
-
- usb_anchor_urb(urb, &data->bulk_anchor);
- usb_mark_last_busy(data->udev);
-
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0) {
- /* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected */
- if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p failed to resubmit (%d)",
- hdev->name, urb, -err);
- usb_unanchor_urb(urb);
- }
-}
-
-static int btmtk_usb_submit_bulk_in_urb(struct hci_dev *hdev, gfp_t mem_flags)
-{
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- struct urb *urb;
- unsigned char *buf;
- unsigned int pipe;
- int err, size = HCI_MAX_FRAME_SIZE;
-
- BT_DBG("%s:%s\n", __func__, hdev->name);
-
- if (!data->bulk_rx_ep)
- return -ENODEV;
-
- urb = usb_alloc_urb(0, mem_flags);
- if (!urb)
- return -ENOMEM;
-
- buf = kmalloc(size, mem_flags);
- if (!buf) {
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- pipe = usb_rcvbulkpipe(data->udev, data->bulk_rx_ep->bEndpointAddress);
-
- usb_fill_bulk_urb(urb, data->udev, pipe, buf, size,
- btmtk_usb_bulk_in_complete, hdev);
-
- urb->transfer_flags |= URB_FREE_BUFFER;
-
- usb_mark_last_busy(data->udev);
- usb_anchor_urb(urb, &data->bulk_anchor);
-
- err = usb_submit_urb(urb, mem_flags);
- if (err < 0) {
- if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p submission failed (%d)",
- hdev->name, urb, -err);
- usb_unanchor_urb(urb);
- }
-
- usb_free_urb(urb);
-
- return err;
-}
-
-static void btmtk_usb_isoc_in_complete(struct urb *urb)
-
-{
- struct hci_dev *hdev = urb->context;
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- int i, err;
-
- BT_DBG("%s: %s urb %p status %d count %d", __func__, hdev->name,
- urb, urb->status, urb->actual_length);
-
- if (!test_bit(HCI_RUNNING, &hdev->flags))
- return;
-
- if (urb->status == 0) {
- for (i = 0; i < urb->number_of_packets; i++) {
- unsigned int offset = urb->iso_frame_desc[i].offset;
- unsigned int length;
- length = urb->iso_frame_desc[i].actual_length;
-
- if (urb->iso_frame_desc[i].status)
- continue;
-
- hdev->stat.byte_rx += length;
-
- if (hci_recv_fragment(hdev, HCI_SCODATA_PKT,
- urb->transfer_buffer + offset,
- length) < 0) {
- BT_ERR("%s corrupted SCO packet", hdev->name);
- hdev->stat.err_rx++;
- }
- }
- }
-
- if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags))
- return;
-
- usb_anchor_urb(urb, &data->isoc_anchor);
-
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0) {
- /* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected */
- if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p failed to resubmit (%d)",
- hdev->name, urb, -err);
- usb_unanchor_urb(urb);
- }
-}
-
-static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
-{
- int i, offset = 0;
-
- BT_DBG("len %d mtu %d", len, mtu);
-
- for (i = 0; i < BTUSB_MAX_ISOC_FRAMES && len >= mtu;
- i++, offset += mtu, len -= mtu) {
- urb->iso_frame_desc[i].offset = offset;
- urb->iso_frame_desc[i].length = mtu;
- }
-
- if (len && i < BTUSB_MAX_ISOC_FRAMES) {
- urb->iso_frame_desc[i].offset = offset;
- urb->iso_frame_desc[i].length = len;
- i++;
- }
-
- urb->number_of_packets = i;
-}
-
-static int btmtk_usb_submit_isoc_in_urb(struct hci_dev *hdev, gfp_t mem_flags)
-{
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- struct urb *urb;
- unsigned char *buf;
- unsigned int pipe;
- int err, size;
-
- BT_DBG("%s\n", __func__);
-
- if (!data->isoc_rx_ep)
- return -ENODEV;
-
- urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, mem_flags);
- if (!urb)
- return -ENOMEM;
-
- size = le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize) *
- BTUSB_MAX_ISOC_FRAMES;
-
- buf = kmalloc(size, mem_flags);
- if (!buf) {
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress);
-
- usb_fill_int_urb(urb, data->udev, pipe, buf, size,
- btmtk_usb_isoc_in_complete, hdev,
- data->isoc_rx_ep->bInterval);
-
- urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP;
-
- __fill_isoc_descriptor(urb, size,
- le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize));
-
- usb_anchor_urb(urb, &data->isoc_anchor);
-
- err = usb_submit_urb(urb, mem_flags);
- if (err < 0) {
- if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p submission failed (%d)",
- hdev->name, urb, -err);
- usb_unanchor_urb(urb);
- }
-
- usb_free_urb(urb);
-
- return err;
-}
-
-static int btmtk_usb_open(struct hci_dev *hdev)
-{
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- int err;
-
- BT_DBG("%s\n", __func__);
-
- err = usb_autopm_get_interface(data->intf);
- if (err < 0)
- return err;
-
- data->intf->needs_remote_wakeup = 1;
-
- if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
- goto done;
-
- if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
- goto done;
-
- err = btmtk_usb_submit_intr_urb(hdev, GFP_KERNEL);
- if (err < 0)
- goto failed;
-
- err = btmtk_usb_submit_bulk_in_urb(hdev, GFP_KERNEL);
- if (err < 0) {
- usb_kill_anchored_urbs(&data->intr_anchor);
- goto failed;
- }
-
- set_bit(BTUSB_BULK_RUNNING, &data->flags);
- btmtk_usb_submit_bulk_in_urb(hdev, GFP_KERNEL);
-
-done:
- usb_autopm_put_interface(data->intf);
- return 0;
-
-failed:
- clear_bit(BTUSB_INTR_RUNNING, &data->flags);
- clear_bit(HCI_RUNNING, &hdev->flags);
- usb_autopm_put_interface(data->intf);
- return err;
-}
-
-static void btmtk_usb_stop_traffic(struct btmtk_usb_data *data)
-{
- BT_DBG("%s\n", __func__);
-
- usb_kill_anchored_urbs(&data->intr_anchor);
- usb_kill_anchored_urbs(&data->bulk_anchor);
- usb_kill_anchored_urbs(&data->isoc_anchor);
-}
-
-static int btmtk_usb_close(struct hci_dev *hdev)
-{
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- int err;
-
- BT_DBG("%s\n", __func__);
-
- if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
- return 0;
-
- cancel_work_sync(&data->work);
- cancel_work_sync(&data->waker);
-
- clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
- clear_bit(BTUSB_BULK_RUNNING, &data->flags);
- clear_bit(BTUSB_INTR_RUNNING, &data->flags);
-
- btmtk_usb_stop_traffic(data);
-
- err = usb_autopm_get_interface(data->intf);
- if (err < 0)
- goto failed;
-
- data->intf->needs_remote_wakeup = 0;
- usb_autopm_put_interface(data->intf);
-
-failed:
- usb_scuttle_anchored_urbs(&data->deferred);
- return 0;
-}
-
-static int btmtk_usb_flush(struct hci_dev *hdev)
-{
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
-
- BT_DBG("%s\n", __func__);
-
- usb_kill_anchored_urbs(&data->tx_anchor);
-
- return 0;
-}
-
-static void btmtk_usb_tx_complete(struct urb *urb)
-{
- struct sk_buff *skb = urb->context;
- struct hci_dev *hdev = (struct hci_dev *)skb->dev;
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
-
- BT_DBG("%s: %s urb %p status %d count %d\n", __func__, hdev->name,
- urb, urb->status, urb->actual_length);
-
- if (!test_bit(HCI_RUNNING, &hdev->flags))
- goto done;
-
- if (!urb->status)
- hdev->stat.byte_tx += urb->transfer_buffer_length;
- else
- hdev->stat.err_tx++;
-
-done:
- spin_lock(&data->txlock);
- data->tx_in_flight--;
- spin_unlock(&data->txlock);
-
- kfree(urb->setup_packet);
-
- kfree_skb(skb);
-}
-
-static void btmtk_usb_isoc_tx_complete(struct urb *urb)
-{
- struct sk_buff *skb = urb->context;
- struct hci_dev *hdev = (struct hci_dev *) skb->dev;
-
- BT_DBG("%s: %s urb %p status %d count %d", __func__, hdev->name,
- urb, urb->status, urb->actual_length);
-
- if (!test_bit(HCI_RUNNING, &hdev->flags))
- goto done;
-
- if (!urb->status)
- hdev->stat.byte_tx += urb->transfer_buffer_length;
- else
- hdev->stat.err_tx++;
-
-done:
- kfree(urb->setup_packet);
-
- kfree_skb(skb);
-}
-
-static int btmtk_usb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- struct usb_ctrlrequest *dr;
- struct urb *urb;
- unsigned int pipe;
- int err;
-
- BT_DBG("%s\n", __func__);
-
- if (!test_bit(HCI_RUNNING, &hdev->flags))
- return -EBUSY;
-
- switch (bt_cb(skb)->pkt_type) {
- case HCI_COMMAND_PKT:
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb)
- return -ENOMEM;
-
- dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
- if (!dr) {
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- dr->bRequestType = data->cmdreq_type;
- dr->bRequest = 0;
- dr->wIndex = 0;
- dr->wValue = 0;
- dr->wLength = __cpu_to_le16(skb->len);
-
- pipe = usb_sndctrlpipe(data->udev, 0x00);
-
- if (test_bit(HCI_RUNNING, &hdev->flags)) {
- u16 op_code;
- memcpy(&op_code, skb->data, 2);
- BT_DBG("ogf = %x\n", (op_code & 0xfc00) >> 10);
- BT_DBG("ocf = %x\n", op_code & 0x03ff);
- hex_dump("hci command", skb->data, skb->len);
-
- }
-
- usb_fill_control_urb(urb, data->udev, pipe, (void *) dr,
- skb->data, skb->len,
- btmtk_usb_tx_complete, skb);
-
- hdev->stat.cmd_tx++;
- break;
-
- case HCI_ACLDATA_PKT:
- if (!data->bulk_tx_ep)
- return -ENODEV;
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb)
- return -ENOMEM;
-
- pipe = usb_sndbulkpipe(data->udev,
- data->bulk_tx_ep->bEndpointAddress);
-
- usb_fill_bulk_urb(urb, data->udev, pipe, skb->data,
- skb->len, btmtk_usb_tx_complete, skb);
-
- hdev->stat.acl_tx++;
- BT_DBG("HCI_ACLDATA_PKT:\n");
- break;
-
- case HCI_SCODATA_PKT:
- if (!data->isoc_tx_ep || hdev->conn_hash.sco_num < 1)
- return -ENODEV;
-
- urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC);
- if (!urb)
- return -ENOMEM;
-
- pipe = usb_sndisocpipe(data->udev,
- data->isoc_tx_ep->bEndpointAddress);
-
- usb_fill_int_urb(urb, data->udev, pipe,
- skb->data, skb->len, btmtk_usb_isoc_tx_complete,
- skb, data->isoc_tx_ep->bInterval);
-
- urb->transfer_flags = URB_ISO_ASAP;
-
- __fill_isoc_descriptor(urb, skb->len,
- le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize));
-
- hdev->stat.sco_tx++;
- BT_DBG("HCI_SCODATA_PKT:\n");
- goto skip_waking;
-
- default:
- return -EILSEQ;
- }
-
- err = inc_tx(data);
-
- if (err) {
- usb_anchor_urb(urb, &data->deferred);
- schedule_work(&data->waker);
- err = 0;
- goto done;
- }
-
-skip_waking:
- usb_anchor_urb(urb, &data->tx_anchor);
-
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0) {
- if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p submission failed (%d)",
- hdev->name, urb, -err);
- kfree(urb->setup_packet);
- usb_unanchor_urb(urb);
- } else {
- usb_mark_last_busy(data->udev);
- }
-
-done:
- usb_free_urb(urb);
- return err;
-}
-
-static void btmtk_usb_notify(struct hci_dev *hdev, unsigned int evt)
-{
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
-
- BT_DBG("%s evt %d", hdev->name, evt);
-
- if (hdev->conn_hash.sco_num != data->sco_num) {
- data->sco_num = hdev->conn_hash.sco_num;
- schedule_work(&data->work);
- }
-}
-
-static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting)
-{
- struct btmtk_usb_data *data = hci_get_drvdata(hdev);
- struct usb_interface *intf = data->isoc;
- struct usb_endpoint_descriptor *ep_desc;
- int i, err;
-
- if (!data->isoc)
- return -ENODEV;
-
- err = usb_set_interface(data->udev, 1, altsetting);
- if (err < 0) {
- BT_ERR("%s setting interface failed (%d)", hdev->name, -err);
- return err;
- }
-
- data->isoc_altsetting = altsetting;
-
- data->isoc_tx_ep = NULL;
- data->isoc_rx_ep = NULL;
-
- for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
- ep_desc = &intf->cur_altsetting->endpoint[i].desc;
-
- if (!data->isoc_tx_ep && usb_endpoint_is_isoc_out(ep_desc)) {
- data->isoc_tx_ep = ep_desc;
- continue;
- }
-
- if (!data->isoc_rx_ep && usb_endpoint_is_isoc_in(ep_desc)) {
- data->isoc_rx_ep = ep_desc;
- continue;
- }
- }
-
- if (!data->isoc_tx_ep || !data->isoc_rx_ep) {
- BT_ERR("%s invalid SCO descriptors", hdev->name);
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void btmtk_usb_work(struct work_struct *work)
-{
- struct btmtk_usb_data *data = container_of(work, struct btmtk_usb_data,
- work);
- struct hci_dev *hdev = data->hdev;
- int new_alts;
- int err;
-
- BT_DBG("%s\n", __func__);
-
- if (hdev->conn_hash.sco_num > 0) {
- if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
- err = usb_autopm_get_interface(data->isoc ?
- data->isoc : data->intf);
- if (err < 0) {
- clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
- usb_kill_anchored_urbs(&data->isoc_anchor);
- return;
- }
-
- set_bit(BTUSB_DID_ISO_RESUME, &data->flags);
- }
-
- if (hdev->voice_setting & 0x0020) {
- static const int alts[3] = { 2, 4, 5 };
- new_alts = alts[hdev->conn_hash.sco_num - 1];
- } else {
- new_alts = hdev->conn_hash.sco_num;
- }
-
- if (data->isoc_altsetting != new_alts) {
- clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
- usb_kill_anchored_urbs(&data->isoc_anchor);
-
- if (__set_isoc_interface(hdev, new_alts) < 0)
- return;
- }
-
- if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) {
- if (btmtk_usb_submit_isoc_in_urb(hdev, GFP_KERNEL) < 0)
- clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
- else
- btmtk_usb_submit_isoc_in_urb(hdev, GFP_KERNEL);
- }
- } else {
- clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
- usb_kill_anchored_urbs(&data->isoc_anchor);
-
- __set_isoc_interface(hdev, 0);
-
- if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags))
- usb_autopm_put_interface(data->isoc ?
- data->isoc : data->intf);
- }
-}
-
-static void btmtk_usb_waker(struct work_struct *work)
-{
- struct btmtk_usb_data *data = container_of(work, struct btmtk_usb_data,
- waker);
- int err;
-
- err = usb_autopm_get_interface(data->intf);
-
- if (err < 0)
- return;
-
- usb_autopm_put_interface(data->intf);
-}
-
-static int btmtk_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct btmtk_usb_data *data;
- struct usb_endpoint_descriptor *ep_desc;
- int i, err;
- struct hci_dev *hdev;
-
- /* interface numbers are hardcoded in the spec */
- if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
- return -ENODEV;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
-
- if (!data)
- return -ENOMEM;
-
- for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
- ep_desc = &intf->cur_altsetting->endpoint[i].desc;
-
- if (!data->intr_ep && usb_endpoint_is_int_in(ep_desc)) {
- data->intr_ep = ep_desc;
- continue;
- }
-
- if (!data->bulk_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) {
- data->bulk_tx_ep = ep_desc;
- continue;
- }
-
- if (!data->bulk_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) {
- data->bulk_rx_ep = ep_desc;
- continue;
- }
- }
-
- if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) {
- kfree(data);
- return -ENODEV;
- }
-
- data->cmdreq_type = USB_TYPE_CLASS;
-
- data->udev = interface_to_usbdev(intf);
- data->intf = intf;
-
- spin_lock_init(&data->lock);
- INIT_WORK(&data->work, btmtk_usb_work);
- INIT_WORK(&data->waker, btmtk_usb_waker);
- spin_lock_init(&data->txlock);
-
- init_usb_anchor(&data->tx_anchor);
- init_usb_anchor(&data->intr_anchor);
- init_usb_anchor(&data->bulk_anchor);
- init_usb_anchor(&data->isoc_anchor);
- init_usb_anchor(&data->deferred);
-
- hdev = hci_alloc_dev();
- if (!hdev) {
- kfree(data);
- return -ENOMEM;
- }
-
- hdev->bus = HCI_USB;
-
- hci_set_drvdata(hdev, data);
-
- data->hdev = hdev;
-
- SET_HCIDEV_DEV(hdev, &intf->dev);
-
- hdev->open = btmtk_usb_open;
- hdev->close = btmtk_usb_close;
- hdev->flush = btmtk_usb_flush;
- hdev->send = btmtk_usb_send_frame;
- hdev->notify = btmtk_usb_notify;
-
- /* Interface numbers are hardcoded in the specification */
- data->isoc = usb_ifnum_to_if(data->udev, 1);
-
- if (data->isoc) {
- err = usb_driver_claim_interface(&btmtk_usb_driver,
- data->isoc, data);
- if (err < 0) {
- hci_free_dev(hdev);
- kfree(data);
- return err;
- }
- }
-
- data->io_buf = kmalloc(256, GFP_KERNEL);
- if (!data->io_buf) {
- hci_free_dev(hdev);
- kfree(data);
- return -ENOMEM;
- }
-
- btmtk_usb_switch_iobase(data, WLAN);
-
- btmtk_usb_cap_init(data);
-
- err = hci_register_dev(hdev);
- if (err < 0) {
- hci_free_dev(hdev);
- kfree(data);
- return err;
- }
-
- usb_set_intfdata(intf, data);
-
- return 0;
-}
-
-static void btmtk_usb_disconnect(struct usb_interface *intf)
-{
- struct btmtk_usb_data *data = usb_get_intfdata(intf);
- struct hci_dev *hdev;
-
- BT_DBG("%s\n", __func__);
-
- if (!data)
- return;
-
- hdev = data->hdev;
- usb_set_intfdata(data->intf, NULL);
-
- if (data->isoc)
- usb_set_intfdata(data->isoc, NULL);
-
- hci_unregister_dev(hdev);
-
- if (intf == data->isoc)
- usb_driver_release_interface(&btmtk_usb_driver, data->intf);
- else if (data->isoc)
- usb_driver_release_interface(&btmtk_usb_driver, data->isoc);
-
- hci_free_dev(hdev);
-
- kfree(data->io_buf);
-
- kfree(data);
-}
-
-#ifdef CONFIG_PM
-static int btmtk_usb_suspend(struct usb_interface *intf, pm_message_t message)
-{
- struct btmtk_usb_data *data = usb_get_intfdata(intf);
-
- BT_DBG("%s\n", __func__);
-
- if (data->suspend_count++)
- return 0;
-
- spin_lock_irq(&data->txlock);
- if (!(PMSG_IS_AUTO(message) && data->tx_in_flight)) {
- set_bit(BTUSB_SUSPENDING, &data->flags);
- spin_unlock_irq(&data->txlock);
- } else {
- spin_unlock_irq(&data->txlock);
- data->suspend_count--;
- return -EBUSY;
- }
-
- cancel_work_sync(&data->work);
-
- btmtk_usb_stop_traffic(data);
- usb_kill_anchored_urbs(&data->tx_anchor);
-
- return 0;
-}
-
-static void play_deferred(struct btmtk_usb_data *data)
-{
- struct urb *urb;
- int err;
-
- while ((urb = usb_get_from_anchor(&data->deferred))) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0)
- break;
-
- data->tx_in_flight++;
- }
-
- usb_scuttle_anchored_urbs(&data->deferred);
-}
-
-static int btmtk_usb_resume(struct usb_interface *intf)
-{
- struct btmtk_usb_data *data = usb_get_intfdata(intf);
- struct hci_dev *hdev = data->hdev;
- int err = 0;
-
- BT_DBG("%s\n", __func__);
-
- if (--data->suspend_count)
- return 0;
-
- if (!test_bit(HCI_RUNNING, &hdev->flags))
- goto done;
-
- if (test_bit(BTUSB_INTR_RUNNING, &data->flags)) {
- err = btmtk_usb_submit_intr_urb(hdev, GFP_NOIO);
- if (err < 0) {
- clear_bit(BTUSB_INTR_RUNNING, &data->flags);
- goto failed;
- }
- }
-
- if (test_bit(BTUSB_BULK_RUNNING, &data->flags)) {
- err = btmtk_usb_submit_bulk_in_urb(hdev, GFP_NOIO);
- if (err < 0) {
- clear_bit(BTUSB_BULK_RUNNING, &data->flags);
- goto failed;
- }
-
- btmtk_usb_submit_bulk_in_urb(hdev, GFP_NOIO);
- }
-
- if (test_bit(BTUSB_ISOC_RUNNING, &data->flags)) {
- if (btmtk_usb_submit_isoc_in_urb(hdev, GFP_NOIO) < 0)
- clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
- else
- btmtk_usb_submit_isoc_in_urb(hdev, GFP_NOIO);
- }
-
- spin_lock_irq(&data->txlock);
- play_deferred(data);
- clear_bit(BTUSB_SUSPENDING, &data->flags);
- spin_unlock_irq(&data->txlock);
- schedule_work(&data->work);
-
- return 0;
-
-failed:
- usb_scuttle_anchored_urbs(&data->deferred);
-done:
- spin_lock_irq(&data->txlock);
- clear_bit(BTUSB_SUSPENDING, &data->flags);
- spin_unlock_irq(&data->txlock);
-
- return err;
-}
-#endif
-
-static struct usb_device_id btmtk_usb_table[] = {
- /* Mediatek MT7650 */
- { USB_DEVICE(0x0e8d, 0x7650) },
- { USB_DEVICE(0x0e8d, 0x7630) },
- { USB_DEVICE(0x0e8d, 0x763e) },
- /* Mediatek MT662 */
- { USB_DEVICE(0x0e8d, 0x7662) },
- { USB_DEVICE(0x0e8d, 0x7632) },
- { } /* Terminating entry */
-};
-
-static struct usb_driver btmtk_usb_driver = {
- .name = "btmtk_usb",
- .probe = btmtk_usb_probe,
- .disconnect = btmtk_usb_disconnect,
-#ifdef CONFIG_PM
- .suspend = btmtk_usb_suspend,
- .resume = btmtk_usb_resume,
-#endif
- .id_table = btmtk_usb_table,
- .supports_autosuspend = 1,
- .disable_hub_initiated_lpm = 1,
-};
-
-module_usb_driver(btmtk_usb_driver);
-
-MODULE_DESCRIPTION("Mediatek Bluetooth USB driver ver " VERSION);
-MODULE_VERSION(VERSION);
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(MT7650_FIRMWARE);
-MODULE_FIRMWARE(MT7662_FIRMWARE);
diff --git a/drivers/staging/btmtk_usb/btmtk_usb.h b/drivers/staging/btmtk_usb/btmtk_usb.h
deleted file mode 100644
index 12f0d3b27bfe..000000000000
--- a/drivers/staging/btmtk_usb/btmtk_usb.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * MediaTek Bluetooth USB Driver
- *
- * Copyright (C) 2013, MediaTek co.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- * or on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- */
-
-#ifndef __BTMTK_USB_H__
-#define __BTMTK_USB_H_
-
-/* Memory map for MTK BT */
-
-/* SYS Control */
-#define SYSCTL 0x400000
-
-/* WLAN */
-#define WLAN 0x410000
-
-/* MCUCTL */
-#define INT_LEVEL 0x0718
-#define COM_REG0 0x0730
-#define SEMAPHORE_00 0x07B0
-#define SEMAPHORE_01 0x07B4
-#define SEMAPHORE_02 0x07B8
-#define SEMAPHORE_03 0x07BC
-
-/* Chip definition */
-
-#define CONTROL_TIMEOUT_JIFFIES ((300 * HZ) / 100)
-#define DEVICE_VENDOR_REQUEST_OUT 0x40
-#define DEVICE_VENDOR_REQUEST_IN 0xc0
-#define DEVICE_CLASS_REQUEST_OUT 0x20
-
-#define BTUSB_MAX_ISOC_FRAMES 10
-#define BTUSB_INTR_RUNNING 0
-#define BTUSB_BULK_RUNNING 1
-#define BTUSB_ISOC_RUNNING 2
-#define BTUSB_SUSPENDING 3
-#define BTUSB_DID_ISO_RESUME 4
-
-/* ROM Patch */
-#define PATCH_HCI_HEADER_SIZE 4
-#define PATCH_WMT_HEADER_SIZE 5
-#define PATCH_HEADER_SIZE (PATCH_HCI_HEADER_SIZE + PATCH_WMT_HEADER_SIZE)
-#define UPLOAD_PATCH_UNIT 2048
-#define PATCH_INFO_SIZE 30
-#define PATCH_PHASE1 1
-#define PATCH_PHASE2 2
-#define PATCH_PHASE3 3
-
-struct btmtk_usb_data {
- struct hci_dev *hdev;
- struct usb_device *udev;
- struct usb_interface *intf;
- struct usb_interface *isoc;
-
- spinlock_t lock;
-
- unsigned long flags;
- struct work_struct work;
- struct work_struct waker;
-
- struct usb_anchor tx_anchor;
- struct usb_anchor intr_anchor;
- struct usb_anchor bulk_anchor;
- struct usb_anchor isoc_anchor;
- struct usb_anchor deferred;
- int tx_in_flight;
- spinlock_t txlock;
-
- struct usb_endpoint_descriptor *intr_ep;
- struct usb_endpoint_descriptor *bulk_tx_ep;
- struct usb_endpoint_descriptor *bulk_rx_ep;
- struct usb_endpoint_descriptor *isoc_tx_ep;
- struct usb_endpoint_descriptor *isoc_rx_ep;
-
- __u8 cmdreq_type;
-
- unsigned int sco_num;
- int isoc_altsetting;
- int suspend_count;
-
- /* request for different io operation */
- u8 w_request;
- u8 r_request;
-
- /* io buffer for usb control transfer */
- char *io_buf;
-
- struct semaphore fw_upload_sem;
-
- /* unsigned char *fw_image; */
- /* unsigned char *rom_patch; */
- const struct firmware *firmware;
- u32 chip_id;
- u8 need_load_fw;
- u8 need_load_rom_patch;
- u32 rom_patch_offset;
- u32 rom_patch_len;
-};
-
-static inline int is_mt7630(struct btmtk_usb_data *data)
-{
- return ((data->chip_id & 0xffff0000) == 0x76300000);
-}
-
-static inline int is_mt7650(struct btmtk_usb_data *data)
-{
- return ((data->chip_id & 0xffff0000) == 0x76500000);
-}
-
-static inline int is_mt7632(struct btmtk_usb_data *data)
-{
- return ((data->chip_id & 0xffff0000) == 0x76320000);
-}
-
-static inline int is_mt7662(struct btmtk_usb_data *data)
-{
- return ((data->chip_id & 0xffff0000) == 0x76620000);
-}
-
-#endif
diff --git a/drivers/staging/ced1401/ced_ioc.c b/drivers/staging/ced1401/ced_ioc.c
index 62efd74b8c04..bf532b1bd345 100644
--- a/drivers/staging/ced1401/ced_ioc.c
+++ b/drivers/staging/ced1401/ced_ioc.c
@@ -19,7 +19,6 @@
*/
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
@@ -630,7 +629,7 @@ int ClearArea(DEVICE_EXTENSION *pdx, int nArea)
}
spin_unlock_irq(&pdx->stagedLock);
- if (pPages) { /* if we decided to release the memory */
+ if (pPages) { /* if we decided to release the memory */
/* Now we must undo the pinning down of the pages. We will assume the worst and mark */
/* all the pages as dirty. Don't be tempted to move this up above as you must not be */
/* holding a spin lock to do this stuff as it is not atomic. */
diff --git a/drivers/staging/ced1401/usb1401.c b/drivers/staging/ced1401/usb1401.c
index 97c55f9e5151..efc310ca789e 100644
--- a/drivers/staging/ced1401/usb1401.c
+++ b/drivers/staging/ced1401/usb1401.c
@@ -89,7 +89,6 @@ synchronous non-Urb based transfers.
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/highmem.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index bfa27e7fc016..89e25b4203ad 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -884,6 +884,12 @@ config COMEDI_GSC_HPDI
To compile this driver as a module, choose M here: the module will be
called gsc_hpdi.
+config COMEDI_MF6X4
+ tristate "Humusoft MF634 and MF624 DAQ Card support"
+ ---help---
+ This driver supports both Humusoft MF634 and MF624 Data acquisition
+ cards. The legacy Humusoft MF614 card is not supported.
+
config COMEDI_ICP_MULTI
tristate "Inova ICP_MULTI support"
---help---
diff --git a/drivers/staging/comedi/Makefile b/drivers/staging/comedi/Makefile
index e6dfc98f8c8e..fae2d9090006 100644
--- a/drivers/staging/comedi/Makefile
+++ b/drivers/staging/comedi/Makefile
@@ -1,3 +1,5 @@
+ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG
+
comedi-y := comedi_fops.o range.o drivers.o \
comedi_buf.o
comedi-$(CONFIG_COMEDI_PCI_DRIVERS) += comedi_pci.o
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
index 4e26bd7fc84f..924fce977985 100644
--- a/drivers/staging/comedi/comedi_buf.c
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -16,6 +16,7 @@
*/
#include <linux/vmalloc.h>
+#include <linux/slab.h>
#include "comedidev.h"
#include "comedi_internal.h"
@@ -26,31 +27,21 @@
#define COMEDI_PAGE_PROTECTION PAGE_KERNEL
#endif
-static void __comedi_buf_free(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned n_pages)
+static void comedi_buf_map_kref_release(struct kref *kref)
{
- struct comedi_async *async = s->async;
+ struct comedi_buf_map *bm =
+ container_of(kref, struct comedi_buf_map, refcount);
struct comedi_buf_page *buf;
- unsigned i;
-
- if (async->prealloc_buf) {
- vunmap(async->prealloc_buf);
- async->prealloc_buf = NULL;
- async->prealloc_bufsz = 0;
- }
+ unsigned int i;
- if (!async->buf_page_list)
- return;
-
- for (i = 0; i < n_pages; ++i) {
- buf = &async->buf_page_list[i];
- if (buf->virt_addr) {
+ if (bm->page_list) {
+ for (i = 0; i < bm->n_pages; i++) {
+ buf = &bm->page_list[i];
clear_bit(PG_reserved,
&(virt_to_page(buf->virt_addr)->flags));
- if (s->async_dma_dir != DMA_NONE) {
+ if (bm->dma_dir != DMA_NONE) {
#ifdef CONFIG_HAS_DMA
- dma_free_coherent(dev->hw_dev,
+ dma_free_coherent(bm->dma_hw_dev,
PAGE_SIZE,
buf->virt_addr,
buf->dma_addr);
@@ -59,10 +50,26 @@ static void __comedi_buf_free(struct comedi_device *dev,
free_page((unsigned long)buf->virt_addr);
}
}
+ vfree(bm->page_list);
}
- vfree(async->buf_page_list);
- async->buf_page_list = NULL;
- async->n_buf_pages = 0;
+ if (bm->dma_dir != DMA_NONE)
+ put_device(bm->dma_hw_dev);
+ kfree(bm);
+}
+
+static void __comedi_buf_free(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct comedi_async *async = s->async;
+
+ if (async->prealloc_buf) {
+ vunmap(async->prealloc_buf);
+ async->prealloc_buf = NULL;
+ async->prealloc_bufsz = 0;
+ }
+
+ comedi_buf_map_put(async->buf_map);
+ async->buf_map = NULL;
}
static void __comedi_buf_alloc(struct comedi_device *dev,
@@ -71,6 +78,7 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
{
struct comedi_async *async = s->async;
struct page **pages = NULL;
+ struct comedi_buf_map *bm;
struct comedi_buf_page *buf;
unsigned i;
@@ -80,18 +88,29 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
return;
}
- async->buf_page_list = vzalloc(sizeof(*buf) * n_pages);
- if (async->buf_page_list)
+ bm = kzalloc(sizeof(*async->buf_map), GFP_KERNEL);
+ if (!bm)
+ return;
+
+ async->buf_map = bm;
+ kref_init(&bm->refcount);
+ bm->dma_dir = s->async_dma_dir;
+ if (bm->dma_dir != DMA_NONE)
+ /* Need ref to hardware device to free buffer later. */
+ bm->dma_hw_dev = get_device(dev->hw_dev);
+
+ bm->page_list = vzalloc(sizeof(*buf) * n_pages);
+ if (bm->page_list)
pages = vmalloc(sizeof(struct page *) * n_pages);
if (!pages)
return;
for (i = 0; i < n_pages; i++) {
- buf = &async->buf_page_list[i];
- if (s->async_dma_dir != DMA_NONE)
+ buf = &bm->page_list[i];
+ if (bm->dma_dir != DMA_NONE)
#ifdef CONFIG_HAS_DMA
- buf->virt_addr = dma_alloc_coherent(dev->hw_dev,
+ buf->virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
PAGE_SIZE,
&buf->dma_addr,
GFP_KERNEL |
@@ -108,6 +127,7 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
pages[i] = virt_to_page(buf->virt_addr);
}
+ bm->n_pages = i;
/* vmap the prealloc_buf if all the pages were allocated */
if (i == n_pages)
@@ -117,6 +137,26 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
vfree(pages);
}
+void comedi_buf_map_get(struct comedi_buf_map *bm)
+{
+ if (bm)
+ kref_get(&bm->refcount);
+}
+
+int comedi_buf_map_put(struct comedi_buf_map *bm)
+{
+ if (bm)
+ return kref_put(&bm->refcount, comedi_buf_map_kref_release);
+ return 1;
+}
+
+bool comedi_buf_is_mmapped(struct comedi_async *async)
+{
+ struct comedi_buf_map *bm = async->buf_map;
+
+ return bm && (atomic_read(&bm->refcount.refcount) > 1);
+}
+
int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned long new_size)
{
@@ -130,7 +170,7 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
return 0;
/* deallocate old buffer */
- __comedi_buf_free(dev, s, async->n_buf_pages);
+ __comedi_buf_free(dev, s);
/* allocate new buffer */
if (new_size) {
@@ -140,10 +180,9 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
if (!async->prealloc_buf) {
/* allocation failed */
- __comedi_buf_free(dev, s, n_pages);
+ __comedi_buf_free(dev, s);
return -ENOMEM;
}
- async->n_buf_pages = n_pages;
}
async->prealloc_bufsz = new_size;
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index f3d59e2a1152..c22c617b0da1 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -16,8 +16,6 @@
GNU General Public License for more details.
*/
-#undef DEBUG
-
#include "comedi_compat32.h"
#include <linux/module.h>
@@ -47,15 +45,6 @@
#define COMEDI_NUM_SUBDEVICE_MINORS \
(COMEDI_NUM_MINORS - COMEDI_NUM_BOARD_MINORS)
-#ifdef CONFIG_COMEDI_DEBUG
-int comedi_debug;
-EXPORT_SYMBOL_GPL(comedi_debug);
-module_param(comedi_debug, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(comedi_debug,
- "enable comedi core and driver debugging if non-zero (default 0)"
- );
-#endif
-
static int comedi_num_legacy_minors;
module_param(comedi_num_legacy_minors, int, S_IRUGO);
MODULE_PARM_DESC(comedi_num_legacy_minors,
@@ -89,11 +78,38 @@ static struct cdev comedi_cdev;
static void comedi_device_init(struct comedi_device *dev)
{
+ kref_init(&dev->refcount);
spin_lock_init(&dev->spinlock);
mutex_init(&dev->mutex);
+ init_rwsem(&dev->attach_lock);
dev->minor = -1;
}
+static void comedi_dev_kref_release(struct kref *kref)
+{
+ struct comedi_device *dev =
+ container_of(kref, struct comedi_device, refcount);
+
+ mutex_destroy(&dev->mutex);
+ put_device(dev->class_dev);
+ kfree(dev);
+}
+
+int comedi_dev_put(struct comedi_device *dev)
+{
+ if (dev)
+ return kref_put(&dev->refcount, comedi_dev_kref_release);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(comedi_dev_put);
+
+static struct comedi_device *comedi_dev_get(struct comedi_device *dev)
+{
+ if (dev)
+ kref_get(&dev->refcount);
+ return dev;
+}
+
static void comedi_device_cleanup(struct comedi_device *dev)
{
struct module *driver_module = NULL;
@@ -104,14 +120,9 @@ static void comedi_device_cleanup(struct comedi_device *dev)
if (dev->attached)
driver_module = dev->driver->module;
comedi_device_detach(dev);
- while (dev->use_count > 0) {
- if (driver_module)
- module_put(driver_module);
- module_put(THIS_MODULE);
- dev->use_count--;
- }
+ if (driver_module && dev->use_count)
+ module_put(driver_module);
mutex_unlock(&dev->mutex);
- mutex_destroy(&dev->mutex);
}
static bool comedi_clear_board_dev(struct comedi_device *dev)
@@ -142,17 +153,17 @@ static struct comedi_device *comedi_clear_board_minor(unsigned minor)
static void comedi_free_board_dev(struct comedi_device *dev)
{
if (dev) {
+ comedi_device_cleanup(dev);
if (dev->class_dev) {
device_destroy(comedi_class,
MKDEV(COMEDI_MAJOR, dev->minor));
}
- comedi_device_cleanup(dev);
- kfree(dev);
+ comedi_dev_put(dev);
}
}
static struct comedi_subdevice
-*comedi_subdevice_from_minor(unsigned minor)
+*comedi_subdevice_from_minor(const struct comedi_device *dev, unsigned minor)
{
struct comedi_subdevice *s;
unsigned int i = minor - COMEDI_NUM_BOARD_MINORS;
@@ -160,37 +171,45 @@ static struct comedi_subdevice
BUG_ON(i >= COMEDI_NUM_SUBDEVICE_MINORS);
mutex_lock(&comedi_subdevice_minor_table_lock);
s = comedi_subdevice_minor_table[i];
+ if (s && s->device != dev)
+ s = NULL;
mutex_unlock(&comedi_subdevice_minor_table_lock);
return s;
}
-static struct comedi_device *comedi_dev_from_board_minor(unsigned minor)
+static struct comedi_device *comedi_dev_get_from_board_minor(unsigned minor)
{
struct comedi_device *dev;
BUG_ON(minor >= COMEDI_NUM_BOARD_MINORS);
mutex_lock(&comedi_board_minor_table_lock);
- dev = comedi_board_minor_table[minor];
+ dev = comedi_dev_get(comedi_board_minor_table[minor]);
mutex_unlock(&comedi_board_minor_table_lock);
return dev;
}
-static struct comedi_device *comedi_dev_from_subdevice_minor(unsigned minor)
+static struct comedi_device *comedi_dev_get_from_subdevice_minor(unsigned minor)
{
+ struct comedi_device *dev;
struct comedi_subdevice *s;
+ unsigned int i = minor - COMEDI_NUM_BOARD_MINORS;
- s = comedi_subdevice_from_minor(minor);
- return s ? s->device : NULL;
+ BUG_ON(i >= COMEDI_NUM_SUBDEVICE_MINORS);
+ mutex_lock(&comedi_subdevice_minor_table_lock);
+ s = comedi_subdevice_minor_table[i];
+ dev = comedi_dev_get(s ? s->device : NULL);
+ mutex_unlock(&comedi_subdevice_minor_table_lock);
+ return dev;
}
-struct comedi_device *comedi_dev_from_minor(unsigned minor)
+struct comedi_device *comedi_dev_get_from_minor(unsigned minor)
{
if (minor < COMEDI_NUM_BOARD_MINORS)
- return comedi_dev_from_board_minor(minor);
+ return comedi_dev_get_from_board_minor(minor);
else
- return comedi_dev_from_subdevice_minor(minor);
+ return comedi_dev_get_from_subdevice_minor(minor);
}
-EXPORT_SYMBOL_GPL(comedi_dev_from_minor);
+EXPORT_SYMBOL_GPL(comedi_dev_get_from_minor);
static struct comedi_subdevice *
comedi_read_subdevice(const struct comedi_device *dev, unsigned int minor)
@@ -198,10 +217,8 @@ comedi_read_subdevice(const struct comedi_device *dev, unsigned int minor)
struct comedi_subdevice *s;
if (minor >= COMEDI_NUM_BOARD_MINORS) {
- s = comedi_subdevice_from_minor(minor);
- if (!s || s->device != dev)
- return NULL;
- if (s->subdev_flags & SDF_CMD_READ)
+ s = comedi_subdevice_from_minor(dev, minor);
+ if (s == NULL || (s->subdev_flags & SDF_CMD_READ))
return s;
}
return dev->read_subdev;
@@ -213,10 +230,8 @@ comedi_write_subdevice(const struct comedi_device *dev, unsigned int minor)
struct comedi_subdevice *s;
if (minor >= COMEDI_NUM_BOARD_MINORS) {
- s = comedi_subdevice_from_minor(minor);
- if (!s || s->device != dev)
- return NULL;
- if (s->subdev_flags & SDF_CMD_WRITE)
+ s = comedi_subdevice_from_minor(dev, minor);
+ if (s == NULL || (s->subdev_flags & SDF_CMD_WRITE))
return s;
}
return dev->write_subdev;
@@ -232,11 +247,13 @@ static int resize_async_buffer(struct comedi_device *dev,
return -EPERM;
if (s->busy) {
- DPRINTK("subdevice is busy, cannot resize buffer\n");
+ dev_dbg(dev->class_dev,
+ "subdevice is busy, cannot resize buffer\n");
return -EBUSY;
}
- if (async->mmap_count) {
- DPRINTK("subdevice is mmapped, cannot resize buffer\n");
+ if (comedi_buf_is_mmapped(async)) {
+ dev_dbg(dev->class_dev,
+ "subdevice is mmapped, cannot resize buffer\n");
return -EBUSY;
}
@@ -254,8 +271,8 @@ static int resize_async_buffer(struct comedi_device *dev,
return retval;
}
- DPRINTK("comedi%i subd %d buffer resized to %i bytes\n",
- dev->minor, s->index, async->prealloc_bufsz);
+ dev_dbg(dev->class_dev, "subd %d buffer resized to %i bytes\n",
+ s->index, async->prealloc_bufsz);
return 0;
}
@@ -269,7 +286,7 @@ static ssize_t max_read_buffer_kb_show(struct device *csdev,
struct comedi_subdevice *s;
unsigned int size = 0;
- dev = comedi_dev_from_minor(minor);
+ dev = comedi_dev_get_from_minor(minor);
if (!dev)
return -ENODEV;
@@ -279,6 +296,7 @@ static ssize_t max_read_buffer_kb_show(struct device *csdev,
size = s->async->max_bufsize / 1024;
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
return snprintf(buf, PAGE_SIZE, "%i\n", size);
}
@@ -299,7 +317,7 @@ static ssize_t max_read_buffer_kb_store(struct device *csdev,
return -EINVAL;
size *= 1024;
- dev = comedi_dev_from_minor(minor);
+ dev = comedi_dev_get_from_minor(minor);
if (!dev)
return -ENODEV;
@@ -311,6 +329,7 @@ static ssize_t max_read_buffer_kb_store(struct device *csdev,
err = -EINVAL;
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
return err ? err : count;
}
static DEVICE_ATTR_RW(max_read_buffer_kb);
@@ -323,7 +342,7 @@ static ssize_t read_buffer_kb_show(struct device *csdev,
struct comedi_subdevice *s;
unsigned int size = 0;
- dev = comedi_dev_from_minor(minor);
+ dev = comedi_dev_get_from_minor(minor);
if (!dev)
return -ENODEV;
@@ -333,6 +352,7 @@ static ssize_t read_buffer_kb_show(struct device *csdev,
size = s->async->prealloc_bufsz / 1024;
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
return snprintf(buf, PAGE_SIZE, "%i\n", size);
}
@@ -353,7 +373,7 @@ static ssize_t read_buffer_kb_store(struct device *csdev,
return -EINVAL;
size *= 1024;
- dev = comedi_dev_from_minor(minor);
+ dev = comedi_dev_get_from_minor(minor);
if (!dev)
return -ENODEV;
@@ -365,6 +385,7 @@ static ssize_t read_buffer_kb_store(struct device *csdev,
err = -EINVAL;
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
return err ? err : count;
}
static DEVICE_ATTR_RW(read_buffer_kb);
@@ -378,7 +399,7 @@ static ssize_t max_write_buffer_kb_show(struct device *csdev,
struct comedi_subdevice *s;
unsigned int size = 0;
- dev = comedi_dev_from_minor(minor);
+ dev = comedi_dev_get_from_minor(minor);
if (!dev)
return -ENODEV;
@@ -388,6 +409,7 @@ static ssize_t max_write_buffer_kb_show(struct device *csdev,
size = s->async->max_bufsize / 1024;
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
return snprintf(buf, PAGE_SIZE, "%i\n", size);
}
@@ -408,7 +430,7 @@ static ssize_t max_write_buffer_kb_store(struct device *csdev,
return -EINVAL;
size *= 1024;
- dev = comedi_dev_from_minor(minor);
+ dev = comedi_dev_get_from_minor(minor);
if (!dev)
return -ENODEV;
@@ -420,6 +442,7 @@ static ssize_t max_write_buffer_kb_store(struct device *csdev,
err = -EINVAL;
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
return err ? err : count;
}
static DEVICE_ATTR_RW(max_write_buffer_kb);
@@ -432,7 +455,7 @@ static ssize_t write_buffer_kb_show(struct device *csdev,
struct comedi_subdevice *s;
unsigned int size = 0;
- dev = comedi_dev_from_minor(minor);
+ dev = comedi_dev_get_from_minor(minor);
if (!dev)
return -ENODEV;
@@ -442,6 +465,7 @@ static ssize_t write_buffer_kb_show(struct device *csdev,
size = s->async->prealloc_bufsz / 1024;
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
return snprintf(buf, PAGE_SIZE, "%i\n", size);
}
@@ -462,7 +486,7 @@ static ssize_t write_buffer_kb_store(struct device *csdev,
return -EINVAL;
size *= 1024;
- dev = comedi_dev_from_minor(minor);
+ dev = comedi_dev_get_from_minor(minor);
if (!dev)
return -ENODEV;
@@ -474,6 +498,7 @@ static ssize_t write_buffer_kb_store(struct device *csdev,
err = -EINVAL;
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
return err ? err : count;
}
static DEVICE_ATTR_RW(write_buffer_kb);
@@ -562,12 +587,13 @@ static void do_become_nonbusy(struct comedi_device *dev,
async->inttrig = NULL;
kfree(async->cmd.chanlist);
async->cmd.chanlist = NULL;
+ s->busy = NULL;
+ wake_up_interruptible_all(&s->async->wait_head);
} else {
dev_err(dev->class_dev,
"BUG: (?) do_become_nonbusy called with async=NULL\n");
+ s->busy = NULL;
}
-
- s->busy = NULL;
}
static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
@@ -582,6 +608,21 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
return ret;
}
+void comedi_device_cancel_all(struct comedi_device *dev)
+{
+ struct comedi_subdevice *s;
+ int i;
+
+ if (!dev->attached)
+ return;
+
+ for (i = 0; i < dev->n_subdevices; i++) {
+ s = &dev->subdevices[i];
+ if (s->async)
+ do_cancel(dev, s);
+ }
+}
+
static int is_device_busy(struct comedi_device *dev)
{
struct comedi_subdevice *s;
@@ -594,7 +635,7 @@ static int is_device_busy(struct comedi_device *dev)
s = &dev->subdevices[i];
if (s->busy)
return 1;
- if (s->async && s->async->mmap_count)
+ if (s->async && comedi_buf_is_mmapped(s->async))
return 1;
}
@@ -684,7 +725,8 @@ static int do_bufconfig_ioctl(struct comedi_device *dev,
async = s->async;
if (!async) {
- DPRINTK("subdevice does not have async capability\n");
+ dev_dbg(dev->class_dev,
+ "subdevice does not have async capability\n");
bc.size = 0;
bc.maximum_size = 0;
goto copyback;
@@ -931,7 +973,8 @@ static int do_bufinfo_ioctl(struct comedi_device *dev,
async = s->async;
if (!async) {
- DPRINTK("subdevice does not have async capability\n");
+ dev_dbg(dev->class_dev,
+ "subdevice does not have async capability\n");
bi.buf_write_ptr = 0;
bi.buf_read_ptr = 0;
bi.buf_write_count = 0;
@@ -1083,19 +1126,20 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
break;
}
if (insn->subdev >= dev->n_subdevices) {
- DPRINTK("%d not usable subdevice\n",
+ dev_dbg(dev->class_dev,
+ "%d not usable subdevice\n",
insn->subdev);
ret = -EINVAL;
break;
}
s = &dev->subdevices[insn->subdev];
if (!s->async) {
- DPRINTK("no async\n");
+ dev_dbg(dev->class_dev, "no async\n");
ret = -EINVAL;
break;
}
if (!s->async->inttrig) {
- DPRINTK("no inttrig\n");
+ dev_dbg(dev->class_dev, "no inttrig\n");
ret = -EAGAIN;
break;
}
@@ -1104,7 +1148,7 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
ret = 1;
break;
default:
- DPRINTK("invalid insn\n");
+ dev_dbg(dev->class_dev, "invalid insn\n");
ret = -EINVAL;
break;
}
@@ -1113,21 +1157,23 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
unsigned int maxdata;
if (insn->subdev >= dev->n_subdevices) {
- DPRINTK("subdevice %d out of range\n", insn->subdev);
+ dev_dbg(dev->class_dev, "subdevice %d out of range\n",
+ insn->subdev);
ret = -EINVAL;
goto out;
}
s = &dev->subdevices[insn->subdev];
if (s->type == COMEDI_SUBD_UNUSED) {
- DPRINTK("%d not usable subdevice\n", insn->subdev);
+ dev_dbg(dev->class_dev, "%d not usable subdevice\n",
+ insn->subdev);
ret = -EIO;
goto out;
}
/* are we locked? (ioctl lock) */
if (s->lock && s->lock != file) {
- DPRINTK("device locked\n");
+ dev_dbg(dev->class_dev, "device locked\n");
ret = -EACCES;
goto out;
}
@@ -1135,7 +1181,7 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
ret = comedi_check_chanlist(s, 1, &insn->chanspec);
if (ret < 0) {
ret = -EINVAL;
- DPRINTK("bad chanspec\n");
+ dev_dbg(dev->class_dev, "bad chanspec\n");
goto out;
}
@@ -1156,7 +1202,8 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
for (i = 0; i < insn->n; ++i) {
if (data[i] > maxdata) {
ret = -EINVAL;
- DPRINTK("bad data value(s)\n");
+ dev_dbg(dev->class_dev,
+ "bad data value(s)\n");
break;
}
}
@@ -1238,35 +1285,35 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
data = kmalloc(sizeof(unsigned int) * MAX_SAMPLES, GFP_KERNEL);
if (!data) {
- DPRINTK("kmalloc failed\n");
ret = -ENOMEM;
goto error;
}
insns = kcalloc(insnlist.n_insns, sizeof(*insns), GFP_KERNEL);
if (!insns) {
- DPRINTK("kmalloc failed\n");
ret = -ENOMEM;
goto error;
}
if (copy_from_user(insns, insnlist.insns,
sizeof(*insns) * insnlist.n_insns)) {
- DPRINTK("copy_from_user failed\n");
+ dev_dbg(dev->class_dev, "copy_from_user failed\n");
ret = -EFAULT;
goto error;
}
for (i = 0; i < insnlist.n_insns; i++) {
if (insns[i].n > MAX_SAMPLES) {
- DPRINTK("number of samples too large\n");
+ dev_dbg(dev->class_dev,
+ "number of samples too large\n");
ret = -EINVAL;
goto error;
}
if (insns[i].insn & INSN_MASK_WRITE) {
if (copy_from_user(data, insns[i].data,
insns[i].n * sizeof(unsigned int))) {
- DPRINTK("copy_from_user failed\n");
+ dev_dbg(dev->class_dev,
+ "copy_from_user failed\n");
ret = -EFAULT;
goto error;
}
@@ -1277,7 +1324,8 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
if (insns[i].insn & INSN_MASK_READ) {
if (copy_to_user(insns[i].data, data,
insns[i].n * sizeof(unsigned int))) {
- DPRINTK("copy_to_user failed\n");
+ dev_dbg(dev->class_dev,
+ "copy_to_user failed\n");
ret = -EFAULT;
goto error;
}
@@ -1367,14 +1415,14 @@ static int do_cmd_ioctl(struct comedi_device *dev,
unsigned int __user *user_chanlist;
if (copy_from_user(&cmd, arg, sizeof(cmd))) {
- DPRINTK("bad cmd address\n");
+ dev_dbg(dev->class_dev, "bad cmd address\n");
return -EFAULT;
}
/* save user's chanlist pointer so it can be restored later */
user_chanlist = (unsigned int __user *)cmd.chanlist;
if (cmd.subdev >= dev->n_subdevices) {
- DPRINTK("%d no such subdevice\n", cmd.subdev);
+ dev_dbg(dev->class_dev, "%d no such subdevice\n", cmd.subdev);
return -ENODEV;
}
@@ -1382,38 +1430,38 @@ static int do_cmd_ioctl(struct comedi_device *dev,
async = s->async;
if (s->type == COMEDI_SUBD_UNUSED) {
- DPRINTK("%d not valid subdevice\n", cmd.subdev);
+ dev_dbg(dev->class_dev, "%d not valid subdevice\n", cmd.subdev);
return -EIO;
}
if (!s->do_cmd || !s->do_cmdtest || !s->async) {
- DPRINTK("subdevice %i does not support commands\n",
- cmd.subdev);
+ dev_dbg(dev->class_dev,
+ "subdevice %i does not support commands\n", cmd.subdev);
return -EIO;
}
/* are we locked? (ioctl lock) */
if (s->lock && s->lock != file) {
- DPRINTK("subdevice locked\n");
+ dev_dbg(dev->class_dev, "subdevice locked\n");
return -EACCES;
}
/* are we busy? */
if (s->busy) {
- DPRINTK("subdevice busy\n");
+ dev_dbg(dev->class_dev, "subdevice busy\n");
return -EBUSY;
}
/* make sure channel/gain list isn't too long */
if (cmd.chanlist_len > s->len_chanlist) {
- DPRINTK("channel/gain list too long %u > %d\n",
+ dev_dbg(dev->class_dev, "channel/gain list too long %u > %d\n",
cmd.chanlist_len, s->len_chanlist);
return -EINVAL;
}
/* make sure channel/gain list isn't too short */
if (cmd.chanlist_len < 1) {
- DPRINTK("channel/gain list too short %u < 1\n",
+ dev_dbg(dev->class_dev, "channel/gain list too short %u < 1\n",
cmd.chanlist_len);
return -EINVAL;
}
@@ -1425,7 +1473,9 @@ static int do_cmd_ioctl(struct comedi_device *dev,
async->cmd.chanlist_len * sizeof(int));
if (IS_ERR(async->cmd.chanlist)) {
ret = PTR_ERR(async->cmd.chanlist);
- DPRINTK("memdup_user failed with code %d\n", ret);
+ async->cmd.chanlist = NULL;
+ dev_dbg(dev->class_dev, "memdup_user failed with code %d\n",
+ ret);
goto cleanup;
}
@@ -1434,20 +1484,20 @@ static int do_cmd_ioctl(struct comedi_device *dev,
async->cmd.chanlist_len,
async->cmd.chanlist);
if (ret < 0) {
- DPRINTK("bad chanlist\n");
+ dev_dbg(dev->class_dev, "bad chanlist\n");
goto cleanup;
}
ret = s->do_cmdtest(dev, s, &async->cmd);
if (async->cmd.flags & TRIG_BOGUS || ret) {
- DPRINTK("test returned %d\n", ret);
+ dev_dbg(dev->class_dev, "test returned %d\n", ret);
cmd = async->cmd;
/* restore chanlist pointer before copying back */
cmd.chanlist = (unsigned int __force *)user_chanlist;
cmd.data = NULL;
if (copy_to_user(arg, &cmd, sizeof(cmd))) {
- DPRINTK("fault writing cmd\n");
+ dev_dbg(dev->class_dev, "fault writing cmd\n");
ret = -EFAULT;
goto cleanup;
}
@@ -1457,7 +1507,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
if (!async->prealloc_bufsz) {
ret = -ENOMEM;
- DPRINTK("no buffer (?)\n");
+ dev_dbg(dev->class_dev, "no buffer (?)\n");
goto cleanup;
}
@@ -1469,8 +1519,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
if (async->cmd.flags & TRIG_WAKE_EOS)
async->cb_mask |= COMEDI_CB_EOS;
- comedi_set_subdevice_runflags(s, SRF_USER | SRF_ERROR | SRF_RUNNING,
- SRF_USER | SRF_RUNNING);
+ comedi_set_subdevice_runflags(s, SRF_ERROR | SRF_RUNNING, SRF_RUNNING);
/* set s->busy _after_ setting SRF_RUNNING flag to avoid race with
* comedi_read() or comedi_write() */
@@ -1510,32 +1559,32 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
unsigned int __user *user_chanlist;
if (copy_from_user(&cmd, arg, sizeof(cmd))) {
- DPRINTK("bad cmd address\n");
+ dev_dbg(dev->class_dev, "bad cmd address\n");
return -EFAULT;
}
/* save user's chanlist pointer so it can be restored later */
user_chanlist = (unsigned int __user *)cmd.chanlist;
if (cmd.subdev >= dev->n_subdevices) {
- DPRINTK("%d no such subdevice\n", cmd.subdev);
+ dev_dbg(dev->class_dev, "%d no such subdevice\n", cmd.subdev);
return -ENODEV;
}
s = &dev->subdevices[cmd.subdev];
if (s->type == COMEDI_SUBD_UNUSED) {
- DPRINTK("%d not valid subdevice\n", cmd.subdev);
+ dev_dbg(dev->class_dev, "%d not valid subdevice\n", cmd.subdev);
return -EIO;
}
if (!s->do_cmd || !s->do_cmdtest) {
- DPRINTK("subdevice %i does not support commands\n",
- cmd.subdev);
+ dev_dbg(dev->class_dev,
+ "subdevice %i does not support commands\n", cmd.subdev);
return -EIO;
}
/* make sure channel/gain list isn't too long */
if (cmd.chanlist_len > s->len_chanlist) {
- DPRINTK("channel/gain list too long %d > %d\n",
+ dev_dbg(dev->class_dev, "channel/gain list too long %d > %d\n",
cmd.chanlist_len, s->len_chanlist);
ret = -EINVAL;
goto cleanup;
@@ -1547,14 +1596,16 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
cmd.chanlist_len * sizeof(int));
if (IS_ERR(chanlist)) {
ret = PTR_ERR(chanlist);
- DPRINTK("memdup_user exited with code %d", ret);
+ chanlist = NULL;
+ dev_dbg(dev->class_dev,
+ "memdup_user exited with code %d", ret);
goto cleanup;
}
/* make sure each element in channel/gain list is valid */
ret = comedi_check_chanlist(s, cmd.chanlist_len, chanlist);
if (ret < 0) {
- DPRINTK("bad chanlist\n");
+ dev_dbg(dev->class_dev, "bad chanlist\n");
goto cleanup;
}
@@ -1567,7 +1618,7 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
cmd.chanlist = (unsigned int __force *)user_chanlist;
if (copy_to_user(arg, &cmd, sizeof(cmd))) {
- DPRINTK("bad cmd address\n");
+ dev_dbg(dev->class_dev, "bad cmd address\n");
ret = -EFAULT;
goto cleanup;
}
@@ -1700,8 +1751,6 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
return -EBUSY;
ret = do_cancel(dev, s);
- if (comedi_get_subdevice_runflags(s) & SRF_USER)
- wake_up_interruptible(&s->async->wait_head);
return ret;
}
@@ -1748,12 +1797,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = comedi_dev_from_minor(minor);
+ struct comedi_device *dev = file->private_data;
int rc;
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->mutex);
/* Device config is special, because it must work on
@@ -1782,7 +1828,7 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
}
if (!dev->attached) {
- DPRINTK("no driver configured on /dev/comedi%i\n", dev->minor);
+ dev_dbg(dev->class_dev, "no driver attached\n");
rc = -ENODEV;
goto done;
}
@@ -1852,28 +1898,18 @@ done:
static void comedi_vm_open(struct vm_area_struct *area)
{
- struct comedi_async *async;
- struct comedi_device *dev;
-
- async = area->vm_private_data;
- dev = async->subdevice->device;
+ struct comedi_buf_map *bm;
- mutex_lock(&dev->mutex);
- async->mmap_count++;
- mutex_unlock(&dev->mutex);
+ bm = area->vm_private_data;
+ comedi_buf_map_get(bm);
}
static void comedi_vm_close(struct vm_area_struct *area)
{
- struct comedi_async *async;
- struct comedi_device *dev;
-
- async = area->vm_private_data;
- dev = async->subdevice->device;
+ struct comedi_buf_map *bm;
- mutex_lock(&dev->mutex);
- async->mmap_count--;
- mutex_unlock(&dev->mutex);
+ bm = area->vm_private_data;
+ comedi_buf_map_put(bm);
}
static struct vm_operations_struct comedi_vm_ops = {
@@ -1884,22 +1920,20 @@ static struct vm_operations_struct comedi_vm_ops = {
static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
{
const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = comedi_dev_from_minor(minor);
+ struct comedi_device *dev = file->private_data;
struct comedi_subdevice *s;
struct comedi_async *async;
+ struct comedi_buf_map *bm;
unsigned long start = vma->vm_start;
unsigned long size;
int n_pages;
int i;
int retval;
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->mutex);
if (!dev->attached) {
- DPRINTK("no driver configured on comedi%i\n", dev->minor);
+ dev_dbg(dev->class_dev, "no driver attached\n");
retval = -ENODEV;
goto done;
}
@@ -1920,7 +1954,7 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
}
if (vma->vm_pgoff != 0) {
- DPRINTK("comedi: mmap() offset must be 0.\n");
+ dev_dbg(dev->class_dev, "mmap() offset must be 0.\n");
retval = -EINVAL;
goto done;
}
@@ -1936,8 +1970,13 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
}
n_pages = size >> PAGE_SHIFT;
+ bm = async->buf_map;
+ if (!bm || n_pages > bm->n_pages) {
+ retval = -EINVAL;
+ goto done;
+ }
for (i = 0; i < n_pages; ++i) {
- struct comedi_buf_page *buf = &async->buf_page_list[i];
+ struct comedi_buf_page *buf = &bm->page_list[i];
if (remap_pfn_range(vma, start,
page_to_pfn(virt_to_page(buf->virt_addr)),
@@ -1949,9 +1988,9 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
}
vma->vm_ops = &comedi_vm_ops;
- vma->vm_private_data = async;
+ vma->vm_private_data = bm;
- async->mmap_count++;
+ vma->vm_ops->open(vma);
retval = 0;
done:
@@ -1963,16 +2002,13 @@ static unsigned int comedi_poll(struct file *file, poll_table *wait)
{
unsigned int mask = 0;
const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = comedi_dev_from_minor(minor);
+ struct comedi_device *dev = file->private_data;
struct comedi_subdevice *s;
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->mutex);
if (!dev->attached) {
- DPRINTK("no driver configured on comedi%i\n", dev->minor);
+ dev_dbg(dev->class_dev, "no driver attached\n");
goto done;
}
@@ -2008,39 +2044,75 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = comedi_dev_from_minor(minor);
+ struct comedi_device *dev = file->private_data;
+ bool on_wait_queue = false;
+ bool attach_locked;
+ unsigned int old_detach_count;
- if (!dev)
- return -ENODEV;
+ /* Protect against device detachment during operation. */
+ down_read(&dev->attach_lock);
+ attach_locked = true;
+ old_detach_count = dev->detach_count;
if (!dev->attached) {
- DPRINTK("no driver configured on comedi%i\n", dev->minor);
- return -ENODEV;
+ dev_dbg(dev->class_dev, "no driver attached\n");
+ retval = -ENODEV;
+ goto out;
}
s = comedi_write_subdevice(dev, minor);
- if (!s || !s->async)
- return -EIO;
+ if (!s || !s->async) {
+ retval = -EIO;
+ goto out;
+ }
async = s->async;
if (!s->busy || !nbytes)
- return 0;
- if (s->busy != file)
- return -EACCES;
+ goto out;
+ if (s->busy != file) {
+ retval = -EACCES;
+ goto out;
+ }
add_wait_queue(&async->wait_head, &wait);
+ on_wait_queue = true;
while (nbytes > 0 && !retval) {
set_current_state(TASK_INTERRUPTIBLE);
if (!comedi_is_subdevice_running(s)) {
if (count == 0) {
- mutex_lock(&dev->mutex);
+ struct comedi_subdevice *new_s;
+
if (comedi_is_subdevice_in_error(s))
retval = -EPIPE;
else
retval = 0;
- do_become_nonbusy(dev, s);
+ /*
+ * To avoid deadlock, cannot acquire dev->mutex
+ * while dev->attach_lock is held. Need to
+ * remove task from the async wait queue before
+ * releasing dev->attach_lock, as it might not
+ * be valid afterwards.
+ */
+ remove_wait_queue(&async->wait_head, &wait);
+ on_wait_queue = false;
+ up_read(&dev->attach_lock);
+ attach_locked = false;
+ mutex_lock(&dev->mutex);
+ /*
+ * Become non-busy unless things have changed
+ * behind our back. Checking dev->detach_count
+ * is unchanged ought to be sufficient (unless
+ * there have been 2**32 detaches in the
+ * meantime!), but check the subdevice pointer
+ * as well just in case.
+ */
+ new_s = comedi_write_subdevice(dev, minor);
+ if (dev->attached &&
+ old_detach_count == dev->detach_count &&
+ s == new_s && new_s->async == async)
+ do_become_nonbusy(dev, s);
mutex_unlock(&dev->mutex);
}
break;
@@ -2090,8 +2162,12 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
buf += n;
break; /* makes device work like a pipe */
}
+out:
+ if (on_wait_queue)
+ remove_wait_queue(&async->wait_head, &wait);
set_current_state(TASK_RUNNING);
- remove_wait_queue(&async->wait_head, &wait);
+ if (attach_locked)
+ up_read(&dev->attach_lock);
return count ? count : retval;
}
@@ -2104,25 +2180,35 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = comedi_dev_from_minor(minor);
+ struct comedi_device *dev = file->private_data;
+ unsigned int old_detach_count;
+ bool become_nonbusy = false;
+ bool attach_locked;
- if (!dev)
- return -ENODEV;
+ /* Protect against device detachment during operation. */
+ down_read(&dev->attach_lock);
+ attach_locked = true;
+ old_detach_count = dev->detach_count;
if (!dev->attached) {
- DPRINTK("no driver configured on comedi%i\n", dev->minor);
- return -ENODEV;
+ dev_dbg(dev->class_dev, "no driver attached\n");
+ retval = -ENODEV;
+ goto out;
}
s = comedi_read_subdevice(dev, minor);
- if (!s || !s->async)
- return -EIO;
+ if (!s || !s->async) {
+ retval = -EIO;
+ goto out;
+ }
async = s->async;
if (!s->busy || !nbytes)
- return 0;
- if (s->busy != file)
- return -EACCES;
+ goto out;
+ if (s->busy != file) {
+ retval = -EACCES;
+ goto out;
+ }
add_wait_queue(&async->wait_head, &wait);
while (nbytes > 0 && !retval) {
@@ -2140,13 +2226,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
if (n == 0) {
if (!comedi_is_subdevice_running(s)) {
- mutex_lock(&dev->mutex);
- do_become_nonbusy(dev, s);
if (comedi_is_subdevice_in_error(s))
retval = -EPIPE;
else
retval = 0;
- mutex_unlock(&dev->mutex);
+ become_nonbusy = true;
break;
}
if (file->f_flags & O_NONBLOCK) {
@@ -2184,14 +2268,37 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
buf += n;
break; /* makes device work like a pipe */
}
- if (comedi_is_subdevice_idle(s)) {
+ remove_wait_queue(&async->wait_head, &wait);
+ set_current_state(TASK_RUNNING);
+ if (become_nonbusy || comedi_is_subdevice_idle(s)) {
+ struct comedi_subdevice *new_s;
+
+ /*
+ * To avoid deadlock, cannot acquire dev->mutex
+ * while dev->attach_lock is held.
+ */
+ up_read(&dev->attach_lock);
+ attach_locked = false;
mutex_lock(&dev->mutex);
- if (async->buf_read_count - async->buf_write_count == 0)
- do_become_nonbusy(dev, s);
+ /*
+ * Check device hasn't become detached behind our back.
+ * Checking dev->detach_count is unchanged ought to be
+ * sufficient (unless there have been 2**32 detaches in the
+ * meantime!), but check the subdevice pointer as well just in
+ * case.
+ */
+ new_s = comedi_read_subdevice(dev, minor);
+ if (dev->attached && old_detach_count == dev->detach_count &&
+ s == new_s && new_s->async == async) {
+ if (become_nonbusy ||
+ async->buf_read_count - async->buf_write_count == 0)
+ do_become_nonbusy(dev, s);
+ }
mutex_unlock(&dev->mutex);
}
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&async->wait_head, &wait);
+out:
+ if (attach_locked)
+ up_read(&dev->attach_lock);
return count ? count : retval;
}
@@ -2199,10 +2306,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
static int comedi_open(struct inode *inode, struct file *file)
{
const unsigned minor = iminor(inode);
- struct comedi_device *dev = comedi_dev_from_minor(minor);
+ struct comedi_device *dev = comedi_dev_get_from_minor(minor);
+ int rc;
if (!dev) {
- DPRINTK("invalid minor number\n");
+ pr_debug("invalid minor number\n");
return -ENODEV;
}
@@ -2223,9 +2331,9 @@ static int comedi_open(struct inode *inode, struct file *file)
if (dev->attached)
goto ok;
if (!capable(CAP_NET_ADMIN) && dev->in_request_module) {
- DPRINTK("in request module\n");
- mutex_unlock(&dev->mutex);
- return -ENODEV;
+ dev_dbg(dev->class_dev, "in request module\n");
+ rc = -ENODEV;
+ goto out;
}
if (capable(CAP_NET_ADMIN) && dev->in_request_module)
goto ok;
@@ -2241,59 +2349,49 @@ static int comedi_open(struct inode *inode, struct file *file)
dev->in_request_module = false;
if (!dev->attached && !capable(CAP_NET_ADMIN)) {
- DPRINTK("not attached and not CAP_NET_ADMIN\n");
- mutex_unlock(&dev->mutex);
- return -ENODEV;
+ dev_dbg(dev->class_dev, "not attached and not CAP_NET_ADMIN\n");
+ rc = -ENODEV;
+ goto out;
}
ok:
- __module_get(THIS_MODULE);
-
- if (dev->attached) {
+ if (dev->attached && dev->use_count == 0) {
if (!try_module_get(dev->driver->module)) {
- module_put(THIS_MODULE);
- mutex_unlock(&dev->mutex);
- return -ENOSYS;
+ rc = -ENOSYS;
+ goto out;
}
- }
-
- if (dev->attached && dev->use_count == 0 && dev->open) {
- int rc = dev->open(dev);
- if (rc < 0) {
- module_put(dev->driver->module);
- module_put(THIS_MODULE);
- mutex_unlock(&dev->mutex);
- return rc;
+ if (dev->open) {
+ rc = dev->open(dev);
+ if (rc < 0) {
+ module_put(dev->driver->module);
+ goto out;
+ }
}
}
dev->use_count++;
+ file->private_data = dev;
+ rc = 0;
+out:
mutex_unlock(&dev->mutex);
-
- return 0;
+ if (rc)
+ comedi_dev_put(dev);
+ return rc;
}
static int comedi_fasync(int fd, struct file *file, int on)
{
- const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = comedi_dev_from_minor(minor);
-
- if (!dev)
- return -ENODEV;
+ struct comedi_device *dev = file->private_data;
return fasync_helper(fd, file, on, &dev->async_queue);
}
static int comedi_close(struct inode *inode, struct file *file)
{
- const unsigned minor = iminor(inode);
- struct comedi_device *dev = comedi_dev_from_minor(minor);
+ struct comedi_device *dev = file->private_data;
struct comedi_subdevice *s = NULL;
int i;
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->mutex);
if (dev->subdevices) {
@@ -2306,16 +2404,16 @@ static int comedi_close(struct inode *inode, struct file *file)
s->lock = NULL;
}
}
- if (dev->attached && dev->use_count == 1 && dev->close)
- dev->close(dev);
-
- module_put(THIS_MODULE);
- if (dev->attached)
+ if (dev->attached && dev->use_count == 1) {
+ if (dev->close)
+ dev->close(dev);
module_put(dev->driver->module);
+ }
dev->use_count--;
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
return 0;
}
@@ -2346,8 +2444,6 @@ void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
unsigned runflags = 0;
unsigned runflags_mask = 0;
- /* DPRINTK("comedi_event 0x%x\n",mask); */
-
if (!comedi_is_subdevice_running(s))
return;
@@ -2368,16 +2464,11 @@ void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
}
if (async->cb_mask & s->async->events) {
- if (comedi_get_subdevice_runflags(s) & SRF_USER) {
- wake_up_interruptible(&async->wait_head);
- if (s->subdev_flags & SDF_CMD_READ)
- kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
- if (s->subdev_flags & SDF_CMD_WRITE)
- kill_fasync(&dev->async_queue, SIGIO, POLL_OUT);
- } else {
- if (async->cb_func)
- async->cb_func(s->async->events, async->cb_arg);
- }
+ wake_up_interruptible(&async->wait_head);
+ if (s->subdev_flags & SDF_CMD_READ)
+ kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
+ if (s->subdev_flags & SDF_CMD_WRITE)
+ kill_fasync(&dev->async_queue, SIGIO, POLL_OUT);
}
s->async->events = 0;
}
@@ -2408,7 +2499,7 @@ struct comedi_device *comedi_alloc_board_minor(struct device *hardware_device)
if (i == COMEDI_NUM_BOARD_MINORS) {
mutex_unlock(&dev->mutex);
comedi_device_cleanup(dev);
- kfree(dev);
+ comedi_dev_put(dev);
pr_err("comedi: error: ran out of minor numbers for board device files.\n");
return ERR_PTR(-EBUSY);
}
@@ -2416,7 +2507,7 @@ struct comedi_device *comedi_alloc_board_minor(struct device *hardware_device)
csdev = device_create(comedi_class, hardware_device,
MKDEV(COMEDI_MAJOR, i), NULL, "comedi%i", i);
if (!IS_ERR(csdev))
- dev->class_dev = csdev;
+ dev->class_dev = get_device(csdev);
/* Note: dev->mutex needs to be unlocked by the caller. */
return dev;
diff --git a/drivers/staging/comedi/comedi_internal.h b/drivers/staging/comedi/comedi_internal.h
index fda1a7ba0e16..9a746570f161 100644
--- a/drivers/staging/comedi/comedi_internal.h
+++ b/drivers/staging/comedi/comedi_internal.h
@@ -16,7 +16,11 @@ void comedi_free_subdevice_minor(struct comedi_subdevice *s);
int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned long new_size);
void comedi_buf_reset(struct comedi_async *async);
+bool comedi_buf_is_mmapped(struct comedi_async *async);
+void comedi_buf_map_get(struct comedi_buf_map *bm);
+int comedi_buf_map_put(struct comedi_buf_map *bm);
unsigned int comedi_buf_write_n_allocated(struct comedi_async *async);
+void comedi_device_cancel_all(struct comedi_device *dev);
extern unsigned int comedi_default_buf_size_kb;
extern unsigned int comedi_default_buf_maxsize_kb;
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index 143be8076a2e..f82bd4256d51 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -20,14 +20,13 @@
#define _COMEDIDEV_H
#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
+#include <linux/rwsem.h>
+#include <linux/kref.h>
#include "comedi.h"
-#define DPRINTK(format, args...) do { \
- if (comedi_debug) \
- pr_debug("comedi: " format, ## args); \
-} while (0)
-
#define COMEDI_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
#define COMEDI_VERSION_CODE COMEDI_VERSION(COMEDI_MAJORVERSION, \
COMEDI_MINORVERSION, COMEDI_MICROVERSION)
@@ -100,18 +99,22 @@ struct comedi_buf_page {
dma_addr_t dma_addr;
};
+struct comedi_buf_map {
+ struct device *dma_hw_dev;
+ struct comedi_buf_page *page_list;
+ unsigned int n_pages;
+ enum dma_data_direction dma_dir;
+ struct kref refcount;
+};
+
struct comedi_async {
struct comedi_subdevice *subdevice;
void *prealloc_buf; /* pre-allocated buffer */
unsigned int prealloc_bufsz; /* buffer size, in bytes */
- /* virtual and dma address of each page */
- struct comedi_buf_page *buf_page_list;
- unsigned n_buf_pages; /* num elements in buf_page_list */
+ struct comedi_buf_map *buf_map; /* map of buffer pages */
unsigned int max_bufsize; /* maximum buffer size, bytes */
- /* current number of mmaps of prealloc_buf */
- unsigned int mmap_count;
/* byte count for writer (write completed) */
unsigned int buf_write_count;
@@ -141,10 +144,7 @@ struct comedi_async {
wait_queue_head_t wait_head;
- /* callback stuff */
unsigned int cb_mask;
- int (*cb_func) (unsigned int flags, void *);
- void *cb_arg;
int (*inttrig) (struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int x);
@@ -173,6 +173,7 @@ struct comedi_device {
struct device *class_dev;
int minor;
+ unsigned int detach_count;
/* hw_dev is passed to dma_alloc_coherent when allocating async buffers
* for subdevices that have async_dma_dir set to something other than
* DMA_NONE */
@@ -185,6 +186,8 @@ struct comedi_device {
bool ioenabled:1;
spinlock_t spinlock;
struct mutex mutex;
+ struct rw_semaphore attach_lock;
+ struct kref refcount;
int n_subdevices;
struct comedi_subdevice *subdevices;
@@ -208,12 +211,6 @@ static inline const void *comedi_board(const struct comedi_device *dev)
return dev->board_ptr;
}
-#ifdef CONFIG_COMEDI_DEBUG
-extern int comedi_debug;
-#else
-static const int comedi_debug;
-#endif
-
/*
* function prototypes
*/
@@ -231,7 +228,8 @@ enum comedi_minor_bits {
static const unsigned COMEDI_SUBDEVICE_MINOR_SHIFT = 4;
static const unsigned COMEDI_SUBDEVICE_MINOR_OFFSET = 1;
-struct comedi_device *comedi_dev_from_minor(unsigned minor);
+struct comedi_device *comedi_dev_get_from_minor(unsigned minor);
+int comedi_dev_put(struct comedi_device *dev);
void init_polling(void);
void cleanup_polling(void);
@@ -240,7 +238,6 @@ void stop_polling(struct comedi_device *);
/* subdevice runflags */
enum subdevice_runflags {
- SRF_USER = 0x00000001,
SRF_RT = 0x00000002,
/* indicates an COMEDI_CB_ERROR event has occurred since the last
* command was started */
@@ -410,6 +407,7 @@ void comedi_driver_unregister(struct comedi_driver *);
#define PCI_VENDOR_ID_IOTECH 0x1616
#define PCI_VENDOR_ID_CONTEC 0x1221
#define PCI_VENDOR_ID_RTD 0x1435
+#define PCI_VENDOR_ID_HUMUSOFT 0x186c
struct pci_dev;
struct pci_driver;
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 8f02bf66e20b..5b15033a94bf 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -95,7 +95,7 @@ int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices)
}
EXPORT_SYMBOL_GPL(comedi_alloc_subdevices);
-static void cleanup_device(struct comedi_device *dev)
+static void comedi_device_detach_cleanup(struct comedi_device *dev)
{
int i;
struct comedi_subdevice *s;
@@ -133,10 +133,14 @@ static void cleanup_device(struct comedi_device *dev)
void comedi_device_detach(struct comedi_device *dev)
{
+ comedi_device_cancel_all(dev);
+ down_write(&dev->attach_lock);
dev->attached = false;
+ dev->detach_count++;
if (dev->driver)
dev->driver->detach(dev);
- cleanup_device(dev);
+ comedi_device_detach_cleanup(dev);
+ up_write(&dev->attach_lock);
}
static int poll_invalid(struct comedi_device *dev, struct comedi_subdevice *s)
@@ -355,8 +359,9 @@ static int comedi_device_postconfig(struct comedi_device *dev)
ret = __comedi_device_postconfig(dev);
if (ret < 0)
return ret;
- smp_wmb();
+ down_write(&dev->attach_lock);
dev->attached = true;
+ up_write(&dev->attach_lock);
return 0;
}
@@ -446,7 +451,7 @@ int comedi_load_firmware(struct comedi_device *dev,
release_firmware(fw);
}
- return ret;
+ return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL_GPL(comedi_load_firmware);
@@ -598,8 +603,12 @@ int comedi_auto_config(struct device *hardware_device,
}
dev = comedi_alloc_board_minor(hardware_device);
- if (IS_ERR(dev))
+ if (IS_ERR(dev)) {
+ dev_warn(hardware_device,
+ "driver '%s' could not create device.\n",
+ driver->driver_name);
return PTR_ERR(dev);
+ }
/* Note: comedi_alloc_board_minor() locked dev->mutex. */
dev->driver = driver;
@@ -607,12 +616,22 @@ int comedi_auto_config(struct device *hardware_device,
ret = driver->auto_attach(dev, context);
if (ret >= 0)
ret = comedi_device_postconfig(dev);
- if (ret < 0)
- comedi_device_detach(dev);
mutex_unlock(&dev->mutex);
- if (ret < 0)
+ if (ret < 0) {
+ dev_warn(hardware_device,
+ "driver '%s' failed to auto-configure device.\n",
+ driver->driver_name);
comedi_release_hardware_device(hardware_device);
+ } else {
+ /*
+ * class_dev should be set properly here
+ * after a successful auto config
+ */
+ dev_info(dev->class_dev,
+ "driver '%s' has successfully auto-configured '%s'.\n",
+ driver->driver_name, dev->board_name);
+ }
return ret;
}
EXPORT_SYMBOL_GPL(comedi_auto_config);
@@ -657,7 +676,7 @@ void comedi_driver_unregister(struct comedi_driver *driver)
/* check for devices using this driver */
for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
- struct comedi_device *dev = comedi_dev_from_minor(i);
+ struct comedi_device *dev = comedi_dev_get_from_minor(i);
if (!dev)
continue;
@@ -671,6 +690,7 @@ void comedi_driver_unregister(struct comedi_driver *driver)
comedi_device_detach(dev);
}
mutex_unlock(&dev->mutex);
+ comedi_dev_put(dev);
}
}
EXPORT_SYMBOL_GPL(comedi_driver_unregister);
diff --git a/drivers/staging/comedi/drivers/8255.c b/drivers/staging/comedi/drivers/8255.c
index b4009e863414..48817f087d97 100644
--- a/drivers/staging/comedi/drivers/8255.c
+++ b/drivers/staging/comedi/drivers/8255.c
@@ -94,7 +94,7 @@ I/O port base address can be found in the output of 'lspci -v'.
struct subdev_8255_private {
unsigned long iobase;
- int (*io) (int, int, int, unsigned long);
+ int (*io)(int, int, int, unsigned long);
};
static int subdev_8255_io(int dir, int port, int data, unsigned long iobase)
@@ -262,7 +262,7 @@ static int subdev_8255_cancel(struct comedi_device *dev,
}
int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int (*io) (int, int, int, unsigned long),
+ int (*io)(int, int, int, unsigned long),
unsigned long iobase)
{
struct subdev_8255_private *spriv;
@@ -289,7 +289,7 @@ int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s,
EXPORT_SYMBOL_GPL(subdev_8255_init);
int subdev_8255_init_irq(struct comedi_device *dev, struct comedi_subdevice *s,
- int (*io) (int, int, int, unsigned long),
+ int (*io)(int, int, int, unsigned long),
unsigned long iobase)
{
int ret;
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
index 432e3f9c3301..8a57c3c1ade0 100644
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ b/drivers/staging/comedi/drivers/8255_pci.c
@@ -63,7 +63,8 @@ enum pci_8255_boardid {
BOARD_ADLINK_PCI7296,
BOARD_CB_PCIDIO24,
BOARD_CB_PCIDIO24H,
- BOARD_CB_PCIDIO48H,
+ BOARD_CB_PCIDIO48H_OLD,
+ BOARD_CB_PCIDIO48H_NEW,
BOARD_CB_PCIDIO96H,
BOARD_NI_PCIDIO96,
BOARD_NI_PCIDIO96B,
@@ -106,11 +107,16 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
.dio_badr = 2,
.n_8255 = 1,
},
- [BOARD_CB_PCIDIO48H] = {
+ [BOARD_CB_PCIDIO48H_OLD] = {
.name = "cb_pci-dio48h",
.dio_badr = 1,
.n_8255 = 2,
},
+ [BOARD_CB_PCIDIO48H_NEW] = {
+ .name = "cb_pci-dio48h",
+ .dio_badr = 2,
+ .n_8255 = 2,
+ },
[BOARD_CB_PCIDIO96H] = {
.name = "cb_pci-dio96h",
.dio_badr = 2,
@@ -257,13 +263,16 @@ static int pci_8255_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &pci_8255_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = {
+static const struct pci_device_id pci_8255_pci_table[] = {
{ PCI_VDEVICE(ADLINK, 0x7224), BOARD_ADLINK_PCI7224 },
{ PCI_VDEVICE(ADLINK, 0x7248), BOARD_ADLINK_PCI7248 },
{ PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 },
{ PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 },
{ PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H },
- { PCI_VDEVICE(CB, 0x000b), BOARD_CB_PCIDIO48H },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000),
+ .driver_data = BOARD_CB_PCIDIO48H_OLD },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b),
+ .driver_data = BOARD_CB_PCIDIO48H_NEW },
{ PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H },
{ PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 },
{ PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B },
diff --git a/drivers/staging/comedi/drivers/Makefile b/drivers/staging/comedi/drivers/Makefile
index 94cbd2618fc8..2706f583d8f0 100644
--- a/drivers/staging/comedi/drivers/Makefile
+++ b/drivers/staging/comedi/drivers/Makefile
@@ -1,5 +1,6 @@
# Makefile for individual comedi drivers
#
+ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG
# Comedi "helper" modules
@@ -110,6 +111,7 @@ obj-$(CONFIG_COMEDI_NI_PCIMIO) += ni_pcimio.o
obj-$(CONFIG_COMEDI_RTD520) += rtd520.o
obj-$(CONFIG_COMEDI_S626) += s626.o
obj-$(CONFIG_COMEDI_SSV_DNP) += ssv_dnp.o
+obj-$(CONFIG_COMEDI_MF6X4) += mf6x4.o
# Comedi PCMCIA drivers
obj-$(CONFIG_COMEDI_CB_DAS16_CS) += cb_das16_cs.o
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
index 3c9eec84f0eb..bd05857b82f2 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
@@ -1414,7 +1414,7 @@ static void v_APCI3120_InterruptDma(int irq, void *d)
{
struct comedi_device *dev = d;
struct addi_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned int next_dma_buf, samplesinbuf;
unsigned long low_word, high_word, var;
unsigned int ui_Tmp;
@@ -1568,8 +1568,8 @@ static void v_APCI3120_InterruptDma(int irq, void *d)
static int i_APCI3120_InterruptHandleEos(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
int n_chan, i;
- struct comedi_subdevice *s = &dev->subdevices[0];
int err = 1;
n_chan = devpriv->ui_AiNbrofChannels;
@@ -1593,11 +1593,11 @@ static void v_APCI3120_Interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct addi_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned short int_daq;
unsigned int int_amcc, ui_Check, i;
unsigned short us_TmpValue;
unsigned char b_DummyRead;
- struct comedi_subdevice *s = &dev->subdevices[0];
ui_Check = 1;
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
index dc73d4d348ed..8c85a09d1c66 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
@@ -2590,8 +2590,8 @@ static int i_APCI3200_CommandAnalogInput(struct comedi_device *dev,
static int i_APCI3200_InterruptHandleEos(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned int ui_StatusRegister = 0;
- struct comedi_subdevice *s = &dev->subdevices[0];
/* BEGIN JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
/* comedi_async *async = s->async; */
diff --git a/drivers/staging/comedi/drivers/addi_apci_035.c b/drivers/staging/comedi/drivers/addi_apci_035.c
index 8d229b2f0973..ccd49211ea17 100644
--- a/drivers/staging/comedi/drivers/addi_apci_035.c
+++ b/drivers/staging/comedi/drivers/addi_apci_035.c
@@ -58,7 +58,7 @@ static int apci035_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci035_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci035_pci_table) = {
+static const struct pci_device_id apci035_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x0300) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c
index 34ab0679e992..0daa0ea63b5e 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1032.c
@@ -325,8 +325,8 @@ static int apci1032_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[1];
if (dev->irq) {
dev->read_subdev = s;
- s->type = COMEDI_SUBD_DI | SDF_CMD_READ;
- s->subdev_flags = SDF_READABLE;
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
s->n_chan = 1;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -364,7 +364,7 @@ static int apci1032_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci1032_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci1032_pci_table) = {
+static const struct pci_device_id apci1032_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1003) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c
index ae9ded63dcec..74f7ace8adbc 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1500.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1500.c
@@ -57,7 +57,7 @@ static int apci1500_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci1500_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci1500_pci_table) = {
+static const struct pci_device_id apci1500_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMCC, 0x80fc) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_1516.c b/drivers/staging/comedi/drivers/addi_apci_1516.c
index 9d1b1425c60b..e9c5291c77cd 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1516.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1516.c
@@ -206,7 +206,7 @@ static int apci1516_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci1516_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci1516_pci_table) = {
+static const struct pci_device_id apci1516_pci_table[] = {
{ PCI_VDEVICE(ADDIDATA, 0x1000), BOARD_APCI1016 },
{ PCI_VDEVICE(ADDIDATA, 0x1001), BOARD_APCI1516 },
{ PCI_VDEVICE(ADDIDATA, 0x1002), BOARD_APCI2016 },
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
index c5717d63e16a..6248284caaf5 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1564.c
@@ -55,7 +55,7 @@ static int apci1564_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci1564_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci1564_pci_table) = {
+static const struct pci_device_id apci1564_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1006) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_16xx.c b/drivers/staging/comedi/drivers/addi_apci_16xx.c
index 5ee204bcbeef..28df4b50b87a 100644
--- a/drivers/staging/comedi/drivers/addi_apci_16xx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_16xx.c
@@ -168,7 +168,7 @@ static int apci16xx_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci16xx_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci16xx_pci_table) = {
+static const struct pci_device_id apci16xx_pci_table[] = {
{ PCI_VDEVICE(ADDIDATA, 0x1009), BOARD_APCI1648 },
{ PCI_VDEVICE(ADDIDATA, 0x100a), BOARD_APCI1696 },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/addi_apci_2032.c b/drivers/staging/comedi/drivers/addi_apci_2032.c
index c77ee8732d38..c9b933cb5987 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2032.c
@@ -359,7 +359,7 @@ static int apci2032_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci2032_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci2032_pci_table) = {
+static const struct pci_device_id apci2032_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1004) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_2200.c b/drivers/staging/comedi/drivers/addi_apci_2200.c
index 7fb32e778d8b..e1a916546d18 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2200.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2200.c
@@ -134,7 +134,7 @@ static int apci2200_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci2200_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci2200_pci_table) = {
+static const struct pci_device_id apci2200_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1005) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_3120.c b/drivers/staging/comedi/drivers/addi_apci_3120.c
index 67d09e8afb2e..1e6fa56c516e 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3120.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3120.c
@@ -230,7 +230,7 @@ static int apci3120_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci3120_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci3120_pci_table) = {
+static const struct pci_device_id apci3120_pci_table[] = {
{ PCI_VDEVICE(AMCC, 0x818d), BOARD_APCI3120 },
{ PCI_VDEVICE(AMCC, 0x828d), BOARD_APCI3001 },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/addi_apci_3200.c b/drivers/staging/comedi/drivers/addi_apci_3200.c
index 1213d5aa6bea..9868a5369aff 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3200.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3200.c
@@ -109,7 +109,7 @@ static int apci3200_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci3200_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci3200_pci_table) = {
+static const struct pci_device_id apci3200_pci_table[] = {
{ PCI_VDEVICE(ADDIDATA, 0x3000), BOARD_APCI3200 },
{ PCI_VDEVICE(ADDIDATA, 0x3007), BOARD_APCI3300 },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/addi_apci_3501.c b/drivers/staging/comedi/drivers/addi_apci_3501.c
index 6138440b919e..4cb69ec54c9b 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3501.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3501.c
@@ -428,7 +428,7 @@ static int apci3501_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci3501_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci3501_pci_table) = {
+static const struct pci_device_id apci3501_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3001) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_3xxx.c b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
index 761cbf8f964b..ceadf8eff47f 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3xxx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
@@ -915,7 +915,7 @@ static int apci3xxx_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &apci3xxx_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(apci3xxx_pci_table) = {
+static const struct pci_device_id apci3xxx_pci_table[] = {
{ PCI_VDEVICE(ADDIDATA, 0x3010), BOARD_APCI3000_16 },
{ PCI_VDEVICE(ADDIDATA, 0x300f), BOARD_APCI3000_8 },
{ PCI_VDEVICE(ADDIDATA, 0x300e), BOARD_APCI3000_4 },
diff --git a/drivers/staging/comedi/drivers/adl_pci6208.c b/drivers/staging/comedi/drivers/adl_pci6208.c
index dd092c7954a9..5c1413abc52d 100644
--- a/drivers/staging/comedi/drivers/adl_pci6208.c
+++ b/drivers/staging/comedi/drivers/adl_pci6208.c
@@ -242,7 +242,7 @@ static int adl_pci6208_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(adl_pci6208_pci_table) = {
+static const struct pci_device_id adl_pci6208_pci_table[] = {
{ PCI_VDEVICE(ADLINK, 0x6208), BOARD_PCI6208 },
{ PCI_VDEVICE(ADLINK, 0x6216), BOARD_PCI6216 },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
index 5617f5ca384a..6f622b4de698 100644
--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
+++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
@@ -259,7 +259,7 @@ static int adl_pci7x3x_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(adl_pci7x3x_pci_table) = {
+static const struct pci_device_id adl_pci7x3x_pci_table[] = {
{ PCI_VDEVICE(ADLINK, 0x7230), BOARD_PCI7230 },
{ PCI_VDEVICE(ADLINK, 0x7233), BOARD_PCI7233 },
{ PCI_VDEVICE(ADLINK, 0x7234), BOARD_PCI7234 },
diff --git a/drivers/staging/comedi/drivers/adl_pci8164.c b/drivers/staging/comedi/drivers/adl_pci8164.c
index b3d009285ed4..300df55a2802 100644
--- a/drivers/staging/comedi/drivers/adl_pci8164.c
+++ b/drivers/staging/comedi/drivers/adl_pci8164.c
@@ -145,7 +145,7 @@ static int adl_pci8164_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(adl_pci8164_pci_table) = {
+static const struct pci_device_id adl_pci8164_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADLINK, 0x8164) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
index eab8da2c3d66..363f2e42a27f 100644
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ b/drivers/staging/comedi/drivers/adl_pci9111.c
@@ -125,8 +125,7 @@ TODO:
PLX9052_INTCSR_LI2STAT)
static const struct comedi_lrange pci9111_ai_range = {
- 5,
- {
+ 5, {
BIP_RANGE(10),
BIP_RANGE(5),
BIP_RANGE(2.5),
@@ -470,11 +469,6 @@ static int pci9111_ai_do_cmd(struct comedi_device *dev,
struct pci9111_private_data *dev_private = dev->private;
struct comedi_cmd *async_cmd = &s->async->cmd;
- if (!dev->irq) {
- comedi_error(dev,
- "no irq assigned for PCI9111, cannot do hardware conversion");
- return -1;
- }
/* Set channel scan limit */
/* PCI9111 allows only scanning from channel 0 to channel n */
/* TODO: handle the case of an external multiplexer */
@@ -858,12 +852,11 @@ static int pci9111_auto_attach(struct comedi_device *dev,
pci9111_reset(dev);
- if (pcidev->irq > 0) {
- ret = request_irq(dev->irq, pci9111_interrupt,
+ if (pcidev->irq) {
+ ret = request_irq(pcidev->irq, pci9111_interrupt,
IRQF_SHARED, dev->board_name, dev);
- if (ret)
- return ret;
- dev->irq = pcidev->irq;
+ if (ret == 0)
+ dev->irq = pcidev->irq;
}
ret = comedi_alloc_subdevices(dev, 4);
@@ -871,18 +864,21 @@ static int pci9111_auto_attach(struct comedi_device *dev,
return ret;
s = &dev->subdevices[0];
- dev->read_subdev = s;
s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_COMMON;
s->n_chan = 16;
s->maxdata = 0xffff;
- s->len_chanlist = 16;
s->range_table = &pci9111_ai_range;
- s->cancel = pci9111_ai_cancel;
s->insn_read = pci9111_ai_insn_read;
- s->do_cmdtest = pci9111_ai_do_cmd_test;
- s->do_cmd = pci9111_ai_do_cmd;
- s->munge = pci9111_ai_munge;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->do_cmdtest = pci9111_ai_do_cmd_test;
+ s->do_cmd = pci9111_ai_do_cmd;
+ s->cancel = pci9111_ai_cancel;
+ s->munge = pci9111_ai_munge;
+ }
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_AO;
@@ -938,7 +934,7 @@ static int pci9111_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(pci9111_pci_table) = {
+static const struct pci_device_id pci9111_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI9111_HR_DEVICE_ID) },
/* { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI9111_HG_DEVICE_ID) }, */
{ 0 }
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
index 986489641ed7..4bdd9720e9eb 100644
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ b/drivers/staging/comedi/drivers/adl_pci9118.c
@@ -194,28 +194,30 @@ Configuration options:
#define EXTTRG_AI 0 /* ext trg is used by AI */
-static const struct comedi_lrange range_pci9118dg_hr = { 8, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
+static const struct comedi_lrange range_pci9118dg_hr = {
+ 8, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range_pci9118hg = { 8, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.005),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01)
- }
+static const struct comedi_lrange range_pci9118hg = {
+ 8, {
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.005),
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.01)
+ }
};
#define PCI9118_BIPOLAR_RANGES 4 /*
@@ -1126,7 +1128,7 @@ static irqreturn_t interrupt_pci9118(int irq, void *d)
}
}
- (devpriv->int_ai_func) (dev, &dev->subdevices[0], int_adstat,
+ (devpriv->int_ai_func) (dev, dev->read_subdev, int_adstat,
int_amcc, int_daq);
}
@@ -1965,7 +1967,6 @@ static int pci9118_common_attach(struct comedi_device *dev, int disable_irq,
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct comedi_subdevice *s;
int ret, pages, i;
- unsigned int irq;
u16 u16w;
dev->board_name = this_board->name;
@@ -2036,12 +2037,18 @@ static int pci9118_common_attach(struct comedi_device *dev, int disable_irq,
pci_write_config_word(pcidev, PCI_COMMAND, u16w | 64);
/* Enable parity check for parity error */
+ if (!disable_irq && pcidev->irq) {
+ ret = request_irq(pcidev->irq, interrupt_pci9118, IRQF_SHARED,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = pcidev->irq;
+ }
+
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
s = &dev->subdevices[0];
- dev->read_subdev = s;
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF;
if (devpriv->usemux)
@@ -2050,11 +2057,17 @@ static int pci9118_common_attach(struct comedi_device *dev, int disable_irq,
s->n_chan = this_board->n_aichan;
s->maxdata = this_board->ai_maxdata;
- s->len_chanlist = this_board->n_aichanlist;
s->range_table = this_board->rangelist_ai;
- s->cancel = pci9118_ai_cancel;
s->insn_read = pci9118_insn_read_ai;
- s->munge = pci9118_ai_munge;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = this_board->n_aichanlist;
+ s->do_cmdtest = pci9118_ai_cmdtest;
+ s->do_cmd = pci9118_ai_cmd;
+ s->cancel = pci9118_ai_cancel;
+ s->munge = pci9118_ai_munge;
+ }
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_AO;
@@ -2100,27 +2113,7 @@ static int pci9118_common_attach(struct comedi_device *dev, int disable_irq,
break;
}
- if (disable_irq)
- irq = 0;
- else
- irq = pcidev->irq;
- if (irq > 0) {
- if (request_irq(irq, interrupt_pci9118, IRQF_SHARED,
- dev->board_name, dev)) {
- dev_warn(dev->class_dev,
- "unable to allocate IRQ %u, DISABLING IT\n",
- irq);
- } else {
- dev->irq = irq;
- /* Enable AI commands */
- s = &dev->subdevices[0];
- s->subdev_flags |= SDF_CMD_READ;
- s->do_cmdtest = pci9118_ai_cmdtest;
- s->do_cmd = pci9118_ai_cmd;
- }
- }
-
- pci9118_report_attach(dev, irq);
+ pci9118_report_attach(dev, dev->irq);
return 0;
}
@@ -2217,7 +2210,7 @@ static int adl_pci9118_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(adl_pci9118_pci_table) = {
+static const struct pci_device_id adl_pci9118_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMCC, 0x80d9) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/adq12b.c b/drivers/staging/comedi/drivers/adq12b.c
index 8150a67cd1fb..3190ef7d285e 100644
--- a/drivers/staging/comedi/drivers/adq12b.c
+++ b/drivers/staging/comedi/drivers/adq12b.c
@@ -97,21 +97,22 @@ If you do not specify any options, they will default to
#define TIMEOUT 20
/* available ranges through the PGA gains */
-static const struct comedi_lrange range_adq12b_ai_bipolar = { 4, {
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- BIP_RANGE(0.5)
- }
+static const struct comedi_lrange range_adq12b_ai_bipolar = {
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(2),
+ BIP_RANGE(1),
+ BIP_RANGE(0.5)
+ }
};
-static const struct comedi_lrange range_adq12b_ai_unipolar = { 4, {
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1),
- UNI_RANGE
- (0.5)
- }
+static const struct comedi_lrange range_adq12b_ai_unipolar = {
+ 4, {
+ UNI_RANGE(5),
+ UNI_RANGE(2),
+ UNI_RANGE(1),
+ UNI_RANGE(0.5)
+ }
};
struct adq12b_private {
@@ -162,8 +163,6 @@ static int adq12b_ai_rinsn(struct comedi_device *dev,
hi = inb(dev->iobase + ADQ12B_ADHIG);
lo = inb(dev->iobase + ADQ12B_ADLOW);
- /* printk("debug: chan=%d range=%d status=%d hi=%d lo=%d\n",
- channel, range, status, hi, lo); */
data[n] = (hi << 8) | lo;
}
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index c3fdcabe9aec..d9ad2c0fdda2 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -115,65 +115,70 @@ Configuration options:
/* D/A synchronized control (PCI1720_SYNCONT) */
#define Syncont_SC0 1 /* set synchronous output mode */
-static const struct comedi_lrange range_pci1710_3 = { 9, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- BIP_RANGE(10),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
+static const struct comedi_lrange range_pci1710_3 = {
+ 9, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ BIP_RANGE(10),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
static const char range_codes_pci1710_3[] = { 0x00, 0x01, 0x02, 0x03, 0x04,
0x10, 0x11, 0x12, 0x13 };
-static const struct comedi_lrange range_pci1710hg = { 12, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.005),
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.01),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01)
- }
+static const struct comedi_lrange range_pci1710hg = {
+ 12, {
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.005),
+ BIP_RANGE(10),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.01),
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.01)
+ }
};
static const char range_codes_pci1710hg[] = { 0x00, 0x01, 0x02, 0x03, 0x04,
0x05, 0x06, 0x07, 0x10, 0x11,
0x12, 0x13 };
-static const struct comedi_lrange range_pci17x1 = { 5, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625)
- }
+static const struct comedi_lrange range_pci17x1 = {
+ 5, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625)
+ }
};
static const char range_codes_pci17x1[] = { 0x00, 0x01, 0x02, 0x03, 0x04 };
-static const struct comedi_lrange range_pci1720 = { 4, {
- UNI_RANGE(5),
- UNI_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(10)
- }
+static const struct comedi_lrange range_pci1720 = {
+ 4, {
+ UNI_RANGE(5),
+ UNI_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(10)
+ }
};
-static const struct comedi_lrange range_pci171x_da = { 2, {
- UNI_RANGE(5),
- UNI_RANGE(10),
- }
+static const struct comedi_lrange range_pci171x_da = {
+ 2, {
+ UNI_RANGE(5),
+ UNI_RANGE(10)
+ }
};
enum pci1710_boardid {
@@ -489,6 +494,7 @@ static int pci171x_insn_write_ao(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
struct pci1710_private *devpriv = dev->private;
+ unsigned int val;
int n, chan, range, ofs;
chan = CR_CHAN(insn->chanspec);
@@ -504,11 +510,14 @@ static int pci171x_insn_write_ao(struct comedi_device *dev,
outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF);
ofs = PCI171x_DA1;
}
+ val = devpriv->ao_data[chan];
- for (n = 0; n < insn->n; n++)
- outw(data[n], dev->iobase + ofs);
+ for (n = 0; n < insn->n; n++) {
+ val = data[n];
+ outw(val, dev->iobase + ofs);
+ }
- devpriv->ao_data[chan] = data[n];
+ devpriv->ao_data[chan] = val;
return n;
@@ -674,6 +683,7 @@ static int pci1720_insn_write_ao(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
struct pci1710_private *devpriv = dev->private;
+ unsigned int val;
int n, rangereg, chan;
chan = CR_CHAN(insn->chanspec);
@@ -683,13 +693,15 @@ static int pci1720_insn_write_ao(struct comedi_device *dev,
outb(rangereg, dev->iobase + PCI1720_RANGE);
devpriv->da_ranges = rangereg;
}
+ val = devpriv->ao_data[chan];
for (n = 0; n < insn->n; n++) {
- outw(data[n], dev->iobase + PCI1720_DA0 + (chan << 1));
+ val = data[n];
+ outw(val, dev->iobase + PCI1720_DA0 + (chan << 1));
outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */
}
- devpriv->ao_data[chan] = data[n];
+ devpriv->ao_data[chan] = val;
return n;
}
@@ -731,7 +743,7 @@ static void interrupt_pci1710_every_sample(void *d)
{
struct comedi_device *dev = d;
struct pci1710_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
int m;
#ifdef PCI171x_PARANOIDCHECK
const struct boardtype *this_board = comedi_board(dev);
@@ -740,16 +752,15 @@ static void interrupt_pci1710_every_sample(void *d)
m = inw(dev->iobase + PCI171x_STATUS);
if (m & Status_FE) {
- printk("comedi%d: A/D FIFO empty (%4x)\n", dev->minor, m);
+ dev_dbg(dev->class_dev, "A/D FIFO empty (%4x)\n", m);
pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
return;
}
if (m & Status_FF) {
- printk
- ("comedi%d: A/D FIFO Full status (Fatal Error!) (%4x)\n",
- dev->minor, m);
+ dev_dbg(dev->class_dev,
+ "A/D FIFO Full status (Fatal Error!) (%4x)\n", m);
pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
@@ -825,12 +836,12 @@ static int move_block_from_fifo(struct comedi_device *dev,
sampl = inw(dev->iobase + PCI171x_AD_DATA);
if (this_board->cardtype != TYPE_PCI1713)
if ((sampl & 0xf000) != devpriv->act_chanlist[j]) {
- printk
- ("comedi%d: A/D FIFO data dropout: received data from channel %d, expected %d! (%d/%d/%d/%d/%d/%4x)\n",
- dev->minor, (sampl & 0xf000) >> 12,
- (devpriv->act_chanlist[j] & 0xf000) >> 12,
- i, j, devpriv->ai_act_scan, n, turn,
- sampl);
+ dev_dbg(dev->class_dev,
+ "A/D FIFO data dropout: received data from channel %d, expected %d! (%d/%d/%d/%d/%d/%4x)\n",
+ (sampl & 0xf000) >> 12,
+ (devpriv->act_chanlist[j] & 0xf000) >> 12,
+ i, j, devpriv->ai_act_scan, n, turn,
+ sampl);
pci171x_ai_cancel(dev, s);
s->async->events |=
COMEDI_CB_EOA | COMEDI_CB_ERROR;
@@ -860,22 +871,20 @@ static void interrupt_pci1710_half_fifo(void *d)
struct comedi_device *dev = d;
const struct boardtype *this_board = comedi_board(dev);
struct pci1710_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
int m, samplesinbuf;
m = inw(dev->iobase + PCI171x_STATUS);
if (!(m & Status_FH)) {
- printk("comedi%d: A/D FIFO not half full! (%4x)\n",
- dev->minor, m);
+ dev_dbg(dev->class_dev, "A/D FIFO not half full! (%4x)\n", m);
pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
return;
}
if (m & Status_FF) {
- printk
- ("comedi%d: A/D FIFO Full status (Fatal Error!) (%4x)\n",
- dev->minor, m);
+ dev_dbg(dev->class_dev,
+ "A/D FIFO Full status (Fatal Error!) (%4x)\n", m);
pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
@@ -1267,21 +1276,21 @@ static int pci1710_auto_attach(struct comedi_device *dev,
if (this_board->n_aichan) {
s = &dev->subdevices[subdev];
- dev->read_subdev = s;
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND;
if (this_board->n_aichand)
s->subdev_flags |= SDF_DIFF;
s->n_chan = this_board->n_aichan;
s->maxdata = this_board->ai_maxdata;
- s->len_chanlist = this_board->n_aichan;
s->range_table = this_board->rangelist_ai;
- s->cancel = pci171x_ai_cancel;
s->insn_read = pci171x_insn_read_ai;
if (dev->irq) {
+ dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
s->do_cmdtest = pci171x_ai_cmdtest;
s->do_cmd = pci171x_ai_cmd;
+ s->cancel = pci171x_ai_cancel;
}
devpriv->i8254_osc_base = I8254_OSC_BASE_10MHZ;
subdev++;
@@ -1374,7 +1383,7 @@ static int adv_pci1710_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(adv_pci1710_pci_table) = {
+static const struct pci_device_id adv_pci1710_pci_table[] = {
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADVANTECH, 0x1710,
PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050),
diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c
index bd4f781b4b24..72394267ddfe 100644
--- a/drivers/staging/comedi/drivers/adv_pci1723.c
+++ b/drivers/staging/comedi/drivers/adv_pci1723.c
@@ -306,7 +306,7 @@ static int adv_pci1723_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(adv_pci1723_pci_table) = {
+static const struct pci_device_id adv_pci1723_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1723) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/adv_pci1724.c b/drivers/staging/comedi/drivers/adv_pci1724.c
index 009a3039fc4f..af670acb03d8 100644
--- a/drivers/staging/comedi/drivers/adv_pci1724.c
+++ b/drivers/staging/comedi/drivers/adv_pci1724.c
@@ -116,8 +116,8 @@ enum board_id_contents {
BOARD_ID_MASK = 0xf
};
-static const struct comedi_lrange ao_ranges_1724 = { 4,
- {
+static const struct comedi_lrange ao_ranges_1724 = {
+ 4, {
BIP_RANGE(10),
RANGE_mA(0, 20),
RANGE_mA(4, 20),
@@ -381,7 +381,7 @@ static int adv_pci1724_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(adv_pci1724_pci_table) = {
+static const struct pci_device_id adv_pci1724_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1724) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index 6bac665261f8..d4bd61d84daf 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -1188,7 +1188,7 @@ static int adv_pci_dio_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &adv_pci_dio_driver, cardtype);
}
-static DEFINE_PCI_DEVICE_TABLE(adv_pci_dio_pci_table) = {
+static const struct pci_device_id adv_pci_dio_pci_table[] = {
{ PCI_VDEVICE(ADVANTECH, 0x1730), TYPE_PCI1730 },
{ PCI_VDEVICE(ADVANTECH, 0x1733), TYPE_PCI1733 },
{ PCI_VDEVICE(ADVANTECH, 0x1734), TYPE_PCI1734 },
diff --git a/drivers/staging/comedi/drivers/aio_aio12_8.c b/drivers/staging/comedi/drivers/aio_aio12_8.c
index abb28498b58c..68c3fb3226ca 100644
--- a/drivers/staging/comedi/drivers/aio_aio12_8.c
+++ b/drivers/staging/comedi/drivers/aio_aio12_8.c
@@ -181,13 +181,12 @@ static int aio_aio12_8_ao_write(struct comedi_device *dev,
}
static const struct comedi_lrange range_aio_aio12_8 = {
- 4,
- {
- UNI_RANGE(5),
- BIP_RANGE(5),
- UNI_RANGE(10),
- BIP_RANGE(10),
- }
+ 4, {
+ UNI_RANGE(5),
+ BIP_RANGE(5),
+ UNI_RANGE(10),
+ BIP_RANGE(10)
+ }
};
static int aio_aio12_8_attach(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/amcc_s5933.h b/drivers/staging/comedi/drivers/amcc_s5933.h
index b810d5f3d971..2ba736444610 100644
--- a/drivers/staging/comedi/drivers/amcc_s5933.h
+++ b/drivers/staging/comedi/drivers/amcc_s5933.h
@@ -145,12 +145,12 @@
#define AINT_READ_COMPL 0x00008000
#define AINT_WRITE_COMPL 0x00004000
-#define AINT_OMB_ENABLE 0x00001000
-#define AINT_OMB_SELECT 0x00000c00
+#define AINT_OMB_ENABLE 0x00001000
+#define AINT_OMB_SELECT 0x00000c00
#define AINT_OMB_BYTE 0x00000300
-#define AINT_IMB_ENABLE 0x00000010
-#define AINT_IMB_SELECT 0x0000000c
+#define AINT_IMB_ENABLE 0x00000010
+#define AINT_IMB_SELECT 0x0000000c
#define AINT_IMB_BYTE 0x00000003
/* these are bits from various different registers, needs cleanup XXX */
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_common.c b/drivers/staging/comedi/drivers/amplc_dio200_common.c
index 2e4bf284d52c..9c9559ffc643 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_common.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_common.c
@@ -513,7 +513,7 @@ static int dio200_subdev_intr_cmd(struct comedi_device *dev,
int event = 0;
spin_lock_irqsave(&subpriv->spinlock, flags);
- subpriv->active = 1;
+ subpriv->active = true;
/* Set up end of acquisition. */
switch (cmd->stop_src) {
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_pci.c b/drivers/staging/comedi/drivers/amplc_dio200_pci.c
index a810a2416443..e0367380b37a 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_pci.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_pci.c
@@ -439,7 +439,7 @@ static struct comedi_driver dio200_pci_comedi_driver = {
.detach = dio200_pci_detach,
};
-static DEFINE_PCI_DEVICE_TABLE(dio200_pci_table) = {
+static const struct pci_device_id dio200_pci_table[] = {
{
PCI_VDEVICE(AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI215),
pci215_model
diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c
index 98075f999c9f..31734e1142fd 100644
--- a/drivers/staging/comedi/drivers/amplc_pc236.c
+++ b/drivers/staging/comedi/drivers/amplc_pc236.c
@@ -356,7 +356,7 @@ static int pc236_intr_cancel(struct comedi_device *dev,
static irqreturn_t pc236_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
- struct comedi_subdevice *s = &dev->subdevices[1];
+ struct comedi_subdevice *s = dev->read_subdev;
int handled;
handled = pc236_intr_check(dev);
@@ -567,7 +567,7 @@ static struct comedi_driver amplc_pc236_driver = {
};
#if DO_PCI
-static DEFINE_PCI_DEVICE_TABLE(pc236_pci_table) = {
+static const struct pci_device_id pc236_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI236) },
{0}
};
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index 810e397d8fd7..ae4048a624fa 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -267,17 +267,16 @@ Caveats:
/* The software selectable internal ranges for PCI224 (option[2] == 0). */
static const struct comedi_lrange range_pci224_internal = {
- 8,
- {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- }
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
static const unsigned short hwrange_pci224_internal[8] = {
@@ -293,11 +292,10 @@ static const unsigned short hwrange_pci224_internal[8] = {
/* The software selectable external ranges for PCI224 (option[2] == 1). */
static const struct comedi_lrange range_pci224_external = {
- 2,
- {
- RANGE_ext(-1, 1), /* bipolar [-Vref,+Vref] */
- RANGE_ext(0, 1), /* unipolar [0,+Vref] */
- }
+ 2, {
+ RANGE_ext(-1, 1), /* bipolar [-Vref,+Vref] */
+ RANGE_ext(0, 1) /* unipolar [0,+Vref] */
+ }
};
static const unsigned short hwrange_pci224_external[2] = {
@@ -308,19 +306,17 @@ static const unsigned short hwrange_pci224_external[2] = {
/* The hardware selectable Vref*2 external range for PCI234
* (option[2] == 1, option[3+n] == 0). */
static const struct comedi_lrange range_pci234_ext2 = {
- 1,
- {
- RANGE_ext(-2, 2),
- }
+ 1, {
+ RANGE_ext(-2, 2)
+ }
};
/* The hardware selectable Vref external range for PCI234
* (option[2] == 1, option[3+n] == 1). */
static const struct comedi_lrange range_pci234_ext = {
- 1,
- {
- RANGE_ext(-1, 1),
- }
+ 1, {
+ RANGE_ext(-1, 1)
+ }
};
/* This serves for all the PCI234 ranges. */
@@ -909,16 +905,14 @@ pci224_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
}
if (errors) {
if (errors & dupchan_err) {
- DPRINTK("comedi%d: " DRIVER_NAME
- ": ao_cmdtest: "
- "entries in chanlist must contain no "
- "duplicate channels\n", dev->minor);
+ dev_dbg(dev->class_dev,
+ "%s: entries in chanlist must contain no duplicate channels\n",
+ __func__);
}
if (errors & range_err) {
- DPRINTK("comedi%d: " DRIVER_NAME
- ": ao_cmdtest: "
- "entries in chanlist must all have "
- "the same range index\n", dev->minor);
+ dev_dbg(dev->class_dev,
+ "%s: entries in chanlist must all have the same range index\n",
+ __func__);
}
err++;
}
@@ -1142,7 +1136,7 @@ static irqreturn_t pci224_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct pci224_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->write_subdev;
struct comedi_cmd *cmd;
unsigned char intstat, valid_intstat;
unsigned char curenab;
@@ -1498,7 +1492,7 @@ static int amplc_pci224_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(amplc_pci224_pci_table) = {
+static const struct pci_device_id amplc_pci224_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI224) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI234) },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index a97bbd6ca3db..c08dfbbe4062 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -546,15 +546,16 @@ static const unsigned int pci230_timebase[8] = {
};
/* PCI230 analogue input range table */
-static const struct comedi_lrange pci230_ai_range = { 7, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5)
- }
+static const struct comedi_lrange pci230_ai_range = {
+ 7, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5)
+ }
};
/* PCI230 analogue gain bits for each input range. */
@@ -564,10 +565,11 @@ static const unsigned char pci230_ai_gain[7] = { 0, 1, 2, 3, 1, 2, 3 };
static const unsigned char pci230_ai_bipolar[7] = { 1, 1, 1, 1, 0, 0, 0 };
/* PCI230 analogue output range table */
-static const struct comedi_lrange pci230_ao_range = { 2, {
- UNI_RANGE(10),
- BIP_RANGE(10)
- }
+static const struct comedi_lrange pci230_ao_range = {
+ 2, {
+ UNI_RANGE(10),
+ BIP_RANGE(10)
+ }
};
/* PCI230 daccon bipolar flag for each analogue output range. */
@@ -818,9 +820,9 @@ static int pci230_ai_rinsn(struct comedi_device *dev,
if (aref == AREF_DIFF) {
/* Differential. */
if (chan >= s->n_chan / 2) {
- DPRINTK("comedi%d: amplc_pci230: ai_rinsn: "
- "differential channel number out of range "
- "0 to %u\n", dev->minor, (s->n_chan / 2) - 1);
+ dev_dbg(dev->class_dev,
+ "%s: differential channel number out of range 0 to %u\n",
+ __func__, (s->n_chan / 2) - 1);
return -EINVAL;
}
}
@@ -1092,14 +1094,14 @@ static int pci230_ao_cmdtest(struct comedi_device *dev,
if (errors != 0) {
err++;
if ((errors & seq_err) != 0) {
- DPRINTK("comedi%d: amplc_pci230: ao_cmdtest: "
- "channel numbers must increase\n",
- dev->minor);
+ dev_dbg(dev->class_dev,
+ "%s: channel numbers must increase\n",
+ __func__);
}
if ((errors & range_err) != 0) {
- DPRINTK("comedi%d: amplc_pci230: ao_cmdtest: "
- "channels must have the same range\n",
- dev->minor);
+ dev_dbg(dev->class_dev,
+ "%s: channels must have the same range\n",
+ __func__);
}
}
}
@@ -1835,33 +1837,29 @@ static int pci230_ai_cmdtest(struct comedi_device *dev,
if (errors != 0) {
err++;
if ((errors & seq_err) != 0) {
- DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: "
- "channel numbers must increase or "
- "sequence must repeat exactly\n",
- dev->minor);
+ dev_dbg(dev->class_dev,
+ "%s: channel numbers must increase or sequence must repeat exactly\n",
+ __func__);
}
if ((errors & rangepair_err) != 0) {
- DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: "
- "single-ended channel pairs must "
- "have the same range\n", dev->minor);
+ dev_dbg(dev->class_dev,
+ "%s: single-ended channel pairs must have the same range\n",
+ __func__);
}
if ((errors & polarity_err) != 0) {
- DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: "
- "channel sequence ranges must be all "
- "bipolar or all unipolar\n",
- dev->minor);
+ dev_dbg(dev->class_dev,
+ "%s: channel sequence ranges must be all bipolar or all unipolar\n",
+ __func__);
}
if ((errors & aref_err) != 0) {
- DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: "
- "channel sequence analogue references "
- "must be all the same (single-ended "
- "or differential)\n", dev->minor);
+ dev_dbg(dev->class_dev,
+ "%s: channel sequence analogue references must be all the same (single-ended or differential)\n",
+ __func__);
}
if ((errors & diffchan_err) != 0) {
- DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: "
- "differential channel number out of "
- "range 0 to %u\n", dev->minor,
- (s->n_chan / 2) - 1);
+ dev_dbg(dev->class_dev,
+ "%s: differential channel number out of range 0 to %u\n",
+ __func__, (s->n_chan / 2) - 1);
}
if ((errors & buggy_chan0_err) != 0) {
dev_info(dev->class_dev,
@@ -2637,7 +2635,7 @@ static int pci230_attach_common(struct comedi_device *dev,
struct comedi_subdevice *s;
unsigned long iobase1, iobase2;
/* PCI230's I/O spaces 1 and 2 respectively. */
- int irq_hdl, rc;
+ int rc;
comedi_set_hw_dev(dev, &pci_dev->dev);
@@ -2709,16 +2707,12 @@ static int pci230_attach_common(struct comedi_device *dev,
outw(devpriv->adcg, dev->iobase + PCI230_ADCG);
outw(devpriv->adccon | PCI230_ADC_FIFO_RESET,
dev->iobase + PCI230_ADCCON);
- /* Register the interrupt handler. */
- irq_hdl = request_irq(pci_dev->irq, pci230_interrupt,
- IRQF_SHARED, "amplc_pci230", dev);
- if (irq_hdl < 0) {
- dev_warn(dev->class_dev,
- "unable to register irq %u, commands will not be available\n",
- pci_dev->irq);
- } else {
- dev->irq = pci_dev->irq;
- dev_dbg(dev->class_dev, "registered irq %u\n", pci_dev->irq);
+
+ if (pci_dev->irq) {
+ rc = request_irq(pci_dev->irq, pci230_interrupt, IRQF_SHARED,
+ dev->board_name, dev);
+ if (rc == 0)
+ dev->irq = pci_dev->irq;
}
rc = comedi_alloc_subdevices(dev, 3);
@@ -2734,14 +2728,14 @@ static int pci230_attach_common(struct comedi_device *dev,
s->range_table = &pci230_ai_range;
s->insn_read = &pci230_ai_rinsn;
s->len_chanlist = 256; /* but there are restrictions. */
- /* Only register commands if the interrupt handler is installed. */
- if (irq_hdl == 0) {
+ if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->do_cmd = &pci230_ai_cmd;
s->do_cmdtest = &pci230_ai_cmdtest;
s->cancel = pci230_ai_cancel;
}
+
s = &dev->subdevices[1];
/* analog output subdevice */
if (thisboard->ao_chans > 0) {
@@ -2753,9 +2747,7 @@ static int pci230_attach_common(struct comedi_device *dev,
s->insn_write = &pci230_ao_winsn;
s->insn_read = &pci230_ao_rinsn;
s->len_chanlist = thisboard->ao_chans;
- /* Only register commands if the interrupt handler is
- * installed. */
- if (irq_hdl == 0) {
+ if (dev->irq) {
dev->write_subdev = s;
s->subdev_flags |= SDF_CMD_WRITE;
s->do_cmd = &pci230_ao_cmd;
@@ -2765,6 +2757,7 @@ static int pci230_attach_common(struct comedi_device *dev,
} else {
s->type = COMEDI_SUBD_UNUSED;
}
+
s = &dev->subdevices[2];
/* digital i/o subdevice */
if (thisboard->have_dio) {
@@ -2856,7 +2849,7 @@ static int amplc_pci230_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(amplc_pci230_pci_table) = {
+static const struct pci_device_id amplc_pci230_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI230) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI260) },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/amplc_pci263.c b/drivers/staging/comedi/drivers/amplc_pci263.c
index 4bd4ef8e88cd..be28e6cbea20 100644
--- a/drivers/staging/comedi/drivers/amplc_pci263.c
+++ b/drivers/staging/comedi/drivers/amplc_pci263.c
@@ -96,7 +96,7 @@ static struct comedi_driver amplc_pci263_driver = {
.detach = comedi_pci_disable,
};
-static DEFINE_PCI_DEVICE_TABLE(pci263_pci_table) = {
+static const struct pci_device_id pci263_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI263) },
{0}
};
diff --git a/drivers/staging/comedi/drivers/c6xdigio.c b/drivers/staging/comedi/drivers/c6xdigio.c
index 217aa19cdc32..5034f663eec9 100644
--- a/drivers/staging/comedi/drivers/c6xdigio.c
+++ b/drivers/staging/comedi/drivers/c6xdigio.c
@@ -94,8 +94,6 @@ static void C6X_pwmInit(unsigned long baseAddr)
{
int timeout = 0;
-/* printk("Inside C6X_pwmInit\n"); */
-
WriteByteToHwPort(baseAddr, 0x70);
while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0)
&& (timeout < C6XDIGIO_TIME_OUT)) {
@@ -132,8 +130,6 @@ static void C6X_pwmOutput(unsigned long baseAddr, unsigned channel, int value)
int timeout = 0;
unsigned tmp;
- /* printk("Inside C6X_pwmOutput\n"); */
-
pwm.cmd = value;
if (pwm.cmd > 498)
pwm.cmd = 498;
@@ -200,8 +196,6 @@ static int C6X_encInput(unsigned long baseAddr, unsigned channel)
int timeout = 0;
int tmp;
- /* printk("Inside C6X_encInput\n"); */
-
enc.value = 0;
if (channel == 0)
ppcmd = 0x48;
@@ -295,8 +289,6 @@ static void C6X_encResetAll(unsigned long baseAddr)
{
unsigned timeout = 0;
-/* printk("Inside C6X_encResetAll\n"); */
-
WriteByteToHwPort(baseAddr, 0x68);
while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0)
&& (timeout < C6XDIGIO_TIME_OUT)) {
@@ -322,14 +314,6 @@ static void C6X_encResetAll(unsigned long baseAddr)
}
}
-static int c6xdigio_pwmo_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- printk(KERN_DEBUG "c6xdigio_pwmo_insn_read %x\n", insn->n);
- return insn->n;
-}
-
static int c6xdigio_pwmo_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -338,7 +322,6 @@ static int c6xdigio_pwmo_insn_write(struct comedi_device *dev,
int i;
int chan = CR_CHAN(insn->chanspec);
- /* printk("c6xdigio_pwmo_insn_write %x\n", insn->n); */
for (i = 0; i < insn->n; i++) {
C6X_pwmOutput(dev->iobase, chan, data[i]);
/* devpriv->ao_readback[chan] = data[i]; */
@@ -346,31 +329,10 @@ static int c6xdigio_pwmo_insn_write(struct comedi_device *dev,
return i;
}
-/* static int c6xdigio_ei_init_insn_read(struct comedi_device *dev, */
-/* struct comedi_subdevice *s, */
-/* struct comedi_insn *insn, */
-/* unsigned int *data) */
-/* { */
-/* printk("c6xdigio_ei_init_insn_read %x\n", insn->n); */
-/* return insn->n; */
-/* } */
-
-/* static int c6xdigio_ei_init_insn_write(struct comedi_device *dev, */
-/* struct comedi_subdevice *s, */
-/* struct comedi_insn *insn, */
-/* unsigned int *data) */
-/* { */
-/* int i; */
-/* int chan = CR_CHAN(insn->chanspec); */
- /* *//* C6X_encResetAll( dev->iobase ); */
- /* *//* return insn->n; */
-/* } */
-
static int c6xdigio_ei_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
- /* printk("c6xdigio_ei__insn_read %x\n", insn->n); */
int n;
int chan = CR_CHAN(insn->chanspec);
@@ -382,12 +344,8 @@ static int c6xdigio_ei_insn_read(struct comedi_device *dev,
static void board_init(struct comedi_device *dev)
{
-
- /* printk("Inside board_init\n"); */
-
C6X_pwmInit(dev->iobase);
C6X_encResetAll(dev->iobase);
-
}
static const struct pnp_device_id c6xdigio_pnp_tbl[] = {
@@ -426,7 +384,6 @@ static int c6xdigio_attach(struct comedi_device *dev,
s->subdev_flags = SDF_WRITEABLE;
s->n_chan = 2;
/* s->trig[0] = c6xdigio_pwmo; */
- s->insn_read = c6xdigio_pwmo_insn_read;
s->insn_write = c6xdigio_pwmo_insn_write;
s->maxdata = 500;
s->range_table = &range_bipolar10; /* A suitable lie */
@@ -441,17 +398,6 @@ static int c6xdigio_attach(struct comedi_device *dev,
s->maxdata = 0xffffff;
s->range_table = &range_unknown;
- /* s = &dev->subdevices[2]; */
- /* pwm output subdevice */
- /* s->type = COMEDI_SUBD_COUNTER; // Not sure what to put here */
- /* s->subdev_flags = SDF_WRITEABLE; */
- /* s->n_chan = 1; */
- /* s->trig[0] = c6xdigio_ei_init; */
- /* s->insn_read = c6xdigio_ei_init_insn_read; */
- /* s->insn_write = c6xdigio_ei_init_insn_write; */
- /* s->maxdata = 0xFFFF; // Really just a don't care */
- /* s->range_table = &range_unknown; // Not sure what to put here */
-
/* I will call this init anyway but more than likely the DSP board */
/* will not be connected when device driver is loaded. */
board_init(dev);
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index e72a403db17c..9819be092f8d 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -181,43 +181,40 @@ static inline unsigned int DAC_DATA_REG(unsigned int channel)
/* analog input ranges for most boards */
static const struct comedi_lrange cb_pcidas_ranges = {
- 8,
- {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
/* pci-das1001 input ranges */
static const struct comedi_lrange cb_pcidas_alt_ranges = {
- 8,
- {
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.01),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01)
- }
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.01),
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.01)
+ }
};
/* analog output ranges */
static const struct comedi_lrange cb_pcidas_ao_ranges = {
- 4,
- {
- BIP_RANGE(5),
- BIP_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(10),
- }
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(10)
+ }
};
enum trimpot_model {
@@ -1614,7 +1611,7 @@ static int cb_pcidas_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(cb_pcidas_pci_table) = {
+static const struct pci_device_id cb_pcidas_pci_table[] = {
{ PCI_VDEVICE(CB, 0x0001), BOARD_PCIDAS1602_16 },
{ PCI_VDEVICE(CB, 0x000f), BOARD_PCIDAS1200 },
{ PCI_VDEVICE(CB, 0x0010), BOARD_PCIDAS1602_12 },
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index ff5206536be3..4fff1738e3f8 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -94,15 +94,6 @@ TODO:
#include "plx9080.h"
#include "comedi_fc.h"
-#undef PCIDAS64_DEBUG /* disable debugging code */
-/* #define PCIDAS64_DEBUG enable debugging code */
-
-#ifdef PCIDAS64_DEBUG
-#define DEBUG_PRINT(format, args...) pr_debug(format, ## args)
-#else
-#define DEBUG_PRINT(format, args...) no_printk(format, ## args)
-#endif
-
#define TIMER_BASE 25 /* 40MHz master clock */
/* 100kHz 'prescaled' clock for slow acquisition,
* maybe I'll support this someday */
@@ -438,91 +429,85 @@ static inline uint8_t attenuate_bit(unsigned int channel)
/* analog input ranges for 64xx boards */
static const struct comedi_lrange ai_ranges_64xx = {
- 8,
- {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
/* analog input ranges for 60xx boards */
static const struct comedi_lrange ai_ranges_60xx = {
- 4,
- {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- }
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05)
+ }
};
/* analog input ranges for 6030, etc boards */
static const struct comedi_lrange ai_ranges_6030 = {
- 14,
- {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- BIP_RANGE(0.5),
- BIP_RANGE(0.2),
- BIP_RANGE(0.1),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1),
- UNI_RANGE(0.5),
- UNI_RANGE(0.2),
- UNI_RANGE(0.1),
- }
+ 14, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2),
+ BIP_RANGE(1),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.2),
+ BIP_RANGE(0.1),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2),
+ UNI_RANGE(1),
+ UNI_RANGE(0.5),
+ UNI_RANGE(0.2),
+ UNI_RANGE(0.1)
+ }
};
/* analog input ranges for 6052, etc boards */
static const struct comedi_lrange ai_ranges_6052 = {
- 15,
- {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1),
- BIP_RANGE(0.5),
- BIP_RANGE(0.25),
- BIP_RANGE(0.1),
- BIP_RANGE(0.05),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1),
- UNI_RANGE(0.5),
- UNI_RANGE(0.2),
- UNI_RANGE(0.1),
- }
+ 15, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.25),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.05),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2),
+ UNI_RANGE(1),
+ UNI_RANGE(0.5),
+ UNI_RANGE(0.2),
+ UNI_RANGE(0.1)
+ }
};
/* analog input ranges for 4020 board */
static const struct comedi_lrange ai_ranges_4020 = {
- 2,
- {
- BIP_RANGE(5),
- BIP_RANGE(1),
- }
+ 2, {
+ BIP_RANGE(5),
+ BIP_RANGE(1)
+ }
};
/* analog output ranges */
static const struct comedi_lrange ao_ranges_64xx = {
- 4,
- {
- BIP_RANGE(5),
- BIP_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(10),
- }
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(10)
+ }
};
static const int ao_range_code_64xx[] = {
@@ -537,11 +522,10 @@ static const int ao_range_code_60xx[] = {
};
static const struct comedi_lrange ao_ranges_6030 = {
- 2,
- {
- BIP_RANGE(10),
- UNI_RANGE(10),
- }
+ 2, {
+ BIP_RANGE(10),
+ UNI_RANGE(10)
+ }
};
static const int ao_range_code_6030[] = {
@@ -550,11 +534,10 @@ static const int ao_range_code_6030[] = {
};
static const struct comedi_lrange ao_ranges_4020 = {
- 2,
- {
- BIP_RANGE(5),
- BIP_RANGE(10),
- }
+ 2, {
+ BIP_RANGE(5),
+ BIP_RANGE(10)
+ }
};
static const int ao_range_code_4020[] = {
@@ -1252,8 +1235,6 @@ static void disable_ai_interrupts(struct comedi_device *dev)
writew(devpriv->intr_enable_bits,
devpriv->main_iobase + INTR_ENABLE_REG);
spin_unlock_irqrestore(&dev->spinlock, flags);
-
- DEBUG_PRINT("intr enable bits 0x%x\n", devpriv->intr_enable_bits);
}
static void enable_ai_interrupts(struct comedi_device *dev,
@@ -1277,7 +1258,6 @@ static void enable_ai_interrupts(struct comedi_device *dev,
devpriv->intr_enable_bits |= bits;
writew(devpriv->intr_enable_bits,
devpriv->main_iobase + INTR_ENABLE_REG);
- DEBUG_PRINT("intr enable bits 0x%x\n", devpriv->intr_enable_bits);
spin_unlock_irqrestore(&dev->spinlock, flags);
}
@@ -1292,38 +1272,6 @@ static void init_plx9080(struct comedi_device *dev)
devpriv->plx_control_bits =
readl(devpriv->plx9080_iobase + PLX_CONTROL_REG);
- /* plx9080 dump */
- DEBUG_PRINT(" plx interrupt status 0x%x\n",
- readl(plx_iobase + PLX_INTRCS_REG));
- DEBUG_PRINT(" plx id bits 0x%x\n", readl(plx_iobase + PLX_ID_REG));
- DEBUG_PRINT(" plx control reg 0x%x\n", devpriv->plx_control_bits);
- DEBUG_PRINT(" plx mode/arbitration reg 0x%x\n",
- readl(plx_iobase + PLX_MARB_REG));
- DEBUG_PRINT(" plx region0 reg 0x%x\n",
- readl(plx_iobase + PLX_REGION0_REG));
- DEBUG_PRINT(" plx region1 reg 0x%x\n",
- readl(plx_iobase + PLX_REGION1_REG));
-
- DEBUG_PRINT(" plx revision 0x%x\n",
- readl(plx_iobase + PLX_REVISION_REG));
- DEBUG_PRINT(" plx dma channel 0 mode 0x%x\n",
- readl(plx_iobase + PLX_DMA0_MODE_REG));
- DEBUG_PRINT(" plx dma channel 1 mode 0x%x\n",
- readl(plx_iobase + PLX_DMA1_MODE_REG));
- DEBUG_PRINT(" plx dma channel 0 pci address 0x%x\n",
- readl(plx_iobase + PLX_DMA0_PCI_ADDRESS_REG));
- DEBUG_PRINT(" plx dma channel 0 local address 0x%x\n",
- readl(plx_iobase + PLX_DMA0_LOCAL_ADDRESS_REG));
- DEBUG_PRINT(" plx dma channel 0 transfer size 0x%x\n",
- readl(plx_iobase + PLX_DMA0_TRANSFER_SIZE_REG));
- DEBUG_PRINT(" plx dma channel 0 descriptor 0x%x\n",
- readl(plx_iobase + PLX_DMA0_DESCRIPTOR_REG));
- DEBUG_PRINT(" plx dma channel 0 command status 0x%x\n",
- readb(plx_iobase + PLX_DMA0_CS_REG));
- DEBUG_PRINT(" plx dma channel 0 threshold 0x%x\n",
- readl(plx_iobase + PLX_DMA0_THRESHOLD_REG));
- DEBUG_PRINT(" plx bigend 0x%x\n", readl(plx_iobase + PLX_BIGEND_REG));
-
#ifdef __BIG_ENDIAN
bits = BIGEND_DMA0 | BIGEND_DMA1;
#else
@@ -1417,9 +1365,6 @@ static int set_ai_fifo_segment_length(struct comedi_device *dev,
devpriv->ai_fifo_segment_length = num_increments * increment_size;
- DEBUG_PRINT("set hardware fifo segment length to %i\n",
- devpriv->ai_fifo_segment_length);
-
return devpriv->ai_fifo_segment_length;
}
@@ -1441,8 +1386,6 @@ static int set_ai_fifo_size(struct comedi_device *dev, unsigned int num_samples)
num_samples = retval * fifo->num_segments * fifo->sample_packing_ratio;
- DEBUG_PRINT("set hardware fifo size to %i\n", num_samples);
-
return num_samples;
}
@@ -1538,8 +1481,6 @@ static int alloc_and_init_dma_members(struct comedi_device *dev)
if (devpriv->ai_dma_desc == NULL)
return -ENOMEM;
- DEBUG_PRINT("ai dma descriptors start at bus addr 0x%llx\n",
- (unsigned long long)devpriv->ai_dma_desc_bus_addr);
if (ao_cmd_is_supported(thisboard)) {
devpriv->ao_dma_desc =
pci_alloc_consistent(pcidev,
@@ -1548,9 +1489,6 @@ static int alloc_and_init_dma_members(struct comedi_device *dev)
&devpriv->ao_dma_desc_bus_addr);
if (devpriv->ao_dma_desc == NULL)
return -ENOMEM;
-
- DEBUG_PRINT("ao dma descriptors start at bus addr 0x%llx\n",
- (unsigned long long)devpriv->ao_dma_desc_bus_addr);
}
/* initialize dma descriptors */
for (i = 0; i < ai_dma_ring_count(thisboard); i++) {
@@ -1650,8 +1588,6 @@ static void i2c_write_byte(struct comedi_device *dev, uint8_t byte)
uint8_t bit;
unsigned int num_bits = 8;
- DEBUG_PRINT("writing to i2c byte 0x%x\n", byte);
-
for (bit = 1 << (num_bits - 1); bit; bit >>= 1) {
i2c_set_scl(dev, 0);
if ((byte & bit))
@@ -1738,7 +1674,6 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned long flags;
static const int timeout = 100;
- DEBUG_PRINT("chanspec 0x%x\n", insn->chanspec);
channel = CR_CHAN(insn->chanspec);
range = CR_RANGE(insn->chanspec);
aref = CR_AREF(insn->chanspec);
@@ -1766,7 +1701,6 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
if (insn->chanspec & CR_ALT_SOURCE) {
unsigned int cal_en_bit;
- DEBUG_PRINT("reading calibration source\n");
if (thisboard->layout == LAYOUT_60XX)
cal_en_bit = CAL_EN_60XX_BIT;
else
@@ -1800,7 +1734,6 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
devpriv->i2c_cal_range_bits &= ~ADC_SRC_4020_MASK;
if (insn->chanspec & CR_ALT_SOURCE) {
- DEBUG_PRINT("reading calibration source\n");
devpriv->i2c_cal_range_bits |=
adc_src_4020_bits(devpriv->calibration_source);
} else { /* select BNC inputs */
@@ -1839,7 +1772,6 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
/* wait for data */
for (i = 0; i < timeout; i++) {
bits = readw(devpriv->main_iobase + HW_STATUS_REG);
- DEBUG_PRINT(" pipe bits 0x%x\n", pipe_full_bits(bits));
if (thisboard->layout == LAYOUT_4020) {
if (readw(devpriv->main_iobase +
ADC_WRITE_PNTR_REG))
@@ -1850,7 +1782,6 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
}
udelay(1);
}
- DEBUG_PRINT(" looped %i times waiting for data\n", i);
if (i == timeout) {
comedi_error(dev, " analog input read insn timed out");
dev_info(dev->class_dev, "status 0x%x\n", bits);
@@ -1884,7 +1815,6 @@ static int ai_config_calibration_source(struct comedi_device *dev,
return -EINVAL;
}
- DEBUG_PRINT("setting calibration source to %i\n", source);
devpriv->calibration_source = source;
return 2;
@@ -2368,7 +2298,6 @@ static void set_ai_pacing(struct comedi_device *dev, struct comedi_cmd *cmd)
/* load lower 16 bits of convert interval */
writew(convert_counter & 0xffff,
devpriv->main_iobase + ADC_SAMPLE_INTERVAL_LOWER_REG);
- DEBUG_PRINT("convert counter 0x%x\n", convert_counter);
/* load upper 8 bits of convert interval */
writew((convert_counter >> 16) & 0xff,
devpriv->main_iobase + ADC_SAMPLE_INTERVAL_UPPER_REG);
@@ -2378,7 +2307,6 @@ static void set_ai_pacing(struct comedi_device *dev, struct comedi_cmd *cmd)
/* load upper 8 bits of scan delay */
writew((scan_counter >> 16) & 0xff,
devpriv->main_iobase + ADC_DELAY_INTERVAL_UPPER_REG);
- DEBUG_PRINT("scan counter 0x%x\n", scan_counter);
}
static int use_internal_queue_6xxx(const struct comedi_cmd *cmd)
@@ -2469,9 +2397,6 @@ static int setup_channel_queue(struct comedi_device *dev,
writew(bits,
devpriv->main_iobase +
ADC_QUEUE_FIFO_REG);
- DEBUG_PRINT(
- "wrote 0x%x to external channel queue\n",
- bits);
}
/* doing a queue clear is not specified in board docs,
* but required for reliable operation */
@@ -2593,7 +2518,6 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
}
writew(devpriv->adc_control1_bits,
devpriv->main_iobase + ADC_CONTROL1_REG);
- DEBUG_PRINT("control1 bits 0x%x\n", devpriv->adc_control1_bits);
spin_unlock_irqrestore(&dev->spinlock, flags);
/* clear adc buffer */
@@ -2645,17 +2569,14 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (use_hw_sample_counter(cmd))
bits |= ADC_SAMPLE_COUNTER_EN_BIT;
writew(bits, devpriv->main_iobase + ADC_CONTROL0_REG);
- DEBUG_PRINT("control0 bits 0x%x\n", bits);
devpriv->ai_cmd_running = 1;
spin_unlock_irqrestore(&dev->spinlock, flags);
/* start acquisition */
- if (cmd->start_src == TRIG_NOW) {
+ if (cmd->start_src == TRIG_NOW)
writew(0, devpriv->main_iobase + ADC_START_REG);
- DEBUG_PRINT("soft trig\n");
- }
return 0;
}
@@ -2690,10 +2611,6 @@ static void pio_drain_ai_fifo_16(struct comedi_device *dev)
read_segment = adc_upper_read_ptr_code(prepost_bits);
write_segment = adc_upper_write_ptr_code(prepost_bits);
- DEBUG_PRINT(" rd seg %i, wrt seg %i, rd idx %i, wrt idx %i\n",
- read_segment, write_segment, read_index,
- write_index);
-
if (read_segment != write_segment)
num_samples =
devpriv->ai_fifo_segment_length - read_index;
@@ -2715,8 +2632,6 @@ static void pio_drain_ai_fifo_16(struct comedi_device *dev)
break;
}
- DEBUG_PRINT(" read %i samples from fifo\n", num_samples);
-
for (i = 0; i < num_samples; i++) {
cfc_write_to_buffer(s,
readw(devpriv->main_iobase +
@@ -2812,11 +2727,6 @@ static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel)
num_samples * sizeof(uint16_t));
devpriv->ai_dma_index = (devpriv->ai_dma_index + 1) %
ai_dma_ring_count(thisboard);
-
- DEBUG_PRINT("next buffer addr 0x%lx\n",
- (unsigned long)devpriv->
- ai_buffer_bus_addr[devpriv->ai_dma_index]);
- DEBUG_PRINT("pci addr reg 0x%x\n", next_transfer_addr);
}
/* XXX check for dma ring buffer overrun
* (use end-of-chain bit to mark last unused buffer) */
@@ -2845,24 +2755,17 @@ static void handle_ai_interrupt(struct comedi_device *dev,
if (plx_status & ICS_DMA1_A) { /* dma chan 1 interrupt */
writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
- DEBUG_PRINT("dma1 status 0x%x\n", dma1_status);
if (dma1_status & PLX_DMA_EN_BIT)
drain_dma_buffers(dev, 1);
-
- DEBUG_PRINT(" cleared dma ch1 interrupt\n");
}
spin_unlock_irqrestore(&dev->spinlock, flags);
- if (status & ADC_DONE_BIT)
- DEBUG_PRINT("adc done interrupt\n");
-
/* drain fifo with pio */
if ((status & ADC_DONE_BIT) ||
((cmd->flags & TRIG_WAKE_EOS) &&
(status & ADC_INTR_PENDING_BIT) &&
(thisboard->layout != LAYOUT_4020))) {
- DEBUG_PRINT("pio fifo drain\n");
spin_lock_irqsave(&dev->spinlock, flags);
if (devpriv->ai_cmd_running) {
spin_unlock_irqrestore(&dev->spinlock, flags);
@@ -2947,7 +2850,6 @@ static void restart_ao_dma(struct comedi_device *dev)
dma_desc_bits =
readl(devpriv->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG);
dma_desc_bits &= ~PLX_END_OF_CHAIN_BIT;
- DEBUG_PRINT("restarting ao dma, descriptor reg 0x%x\n", dma_desc_bits);
load_first_dma_descriptor(dev, 0, dma_desc_bits);
dma_start_sync(dev, 0);
@@ -2963,10 +2865,6 @@ static unsigned int load_ao_dma_buffer(struct comedi_device *dev,
buffer_index = devpriv->ao_dma_index;
prev_buffer_index = prev_ao_dma_index(dev);
- DEBUG_PRINT("attempting to load ao buffer %i (0x%llx)\n", buffer_index,
- (unsigned long long)devpriv->ao_buffer_bus_addr[
- buffer_index]);
-
num_bytes = comedi_buf_read_n_available(dev->write_subdev->async);
if (num_bytes > DMA_BUFFER_SIZE)
num_bytes = DMA_BUFFER_SIZE;
@@ -2977,8 +2875,6 @@ static unsigned int load_ao_dma_buffer(struct comedi_device *dev,
if (num_bytes == 0)
return 0;
- DEBUG_PRINT("loading %i bytes\n", num_bytes);
-
num_bytes = cfc_read_array_from_buffer(dev->write_subdev,
devpriv->
ao_buffer[buffer_index],
@@ -3052,14 +2948,12 @@ static void handle_ao_interrupt(struct comedi_device *dev,
writeb(PLX_CLEAR_DMA_INTR_BIT,
devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
spin_unlock_irqrestore(&dev->spinlock, flags);
- DEBUG_PRINT("dma0 status 0x%x\n", dma0_status);
if (dma0_status & PLX_DMA_EN_BIT) {
load_ao_dma(dev, cmd);
/* try to recover from dma end-of-chain event */
if (ao_dma_needs_restart(dev, dma0_status))
restart_ao_dma(dev);
}
- DEBUG_PRINT(" cleared dma ch0 interrupt\n");
} else {
spin_unlock_irqrestore(&dev->spinlock, flags);
}
@@ -3068,12 +2962,6 @@ static void handle_ao_interrupt(struct comedi_device *dev,
async->events |= COMEDI_CB_EOA;
if (ao_stopped_by_error(dev, cmd))
async->events |= COMEDI_CB_ERROR;
- DEBUG_PRINT("plx dma0 desc reg 0x%x\n",
- readl(devpriv->plx9080_iobase +
- PLX_DMA0_DESCRIPTOR_REG));
- DEBUG_PRINT("plx dma0 address reg 0x%x\n",
- readl(devpriv->plx9080_iobase +
- PLX_DMA0_PCI_ADDRESS_REG));
}
cfc_handle_events(dev, s);
}
@@ -3089,15 +2977,12 @@ static irqreturn_t handle_interrupt(int irq, void *d)
plx_status = readl(devpriv->plx9080_iobase + PLX_INTRCS_REG);
status = readw(devpriv->main_iobase + HW_STATUS_REG);
- DEBUG_PRINT("hw status 0x%x, plx status 0x%x\n", status, plx_status);
-
/* an interrupt before all the postconfig stuff gets done could
* cause a NULL dereference if we continue through the
* interrupt handler */
- if (!dev->attached) {
- DEBUG_PRINT("premature interrupt, ignoring\n");
+ if (!dev->attached)
return IRQ_HANDLED;
- }
+
handle_ai_interrupt(dev, status, plx_status);
handle_ao_interrupt(dev, status, plx_status);
@@ -3105,11 +2990,8 @@ static irqreturn_t handle_interrupt(int irq, void *d)
if (plx_status & ICS_LDIA) { /* clear local doorbell interrupt */
plx_bits = readl(devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
writel(plx_bits, devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
- DEBUG_PRINT(" cleared local doorbell bits 0x%x\n", plx_bits);
}
- DEBUG_PRINT("exiting handler\n");
-
return IRQ_HANDLED;
}
@@ -3130,7 +3012,6 @@ static int ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
abort_dma(dev, 1);
- DEBUG_PRINT("ai canceled\n");
return 0;
}
@@ -3458,7 +3339,6 @@ static int dio_callback(int dir, int port, int data, unsigned long arg)
void __iomem *iobase = (void __iomem *)arg;
if (dir) {
writeb(data, iobase + port);
- DEBUG_PRINT("wrote 0x%x to port %i\n", data, port);
return 0;
} else {
return readb(iobase + port);
@@ -4046,11 +3926,6 @@ static int auto_attach(struct comedi_device *dev,
return -ENOMEM;
}
- DEBUG_PRINT(" plx9080 remapped to 0x%p\n", devpriv->plx9080_iobase);
- DEBUG_PRINT(" main remapped to 0x%p\n", devpriv->main_iobase);
- DEBUG_PRINT(" diocounter remapped to 0x%p\n",
- devpriv->dio_counter_iobase);
-
/* figure out what local addresses are */
local_range = readl(devpriv->plx9080_iobase + PLX_LAS0RNG_REG) &
LRNG_MEM_MASK;
@@ -4065,9 +3940,6 @@ static int auto_attach(struct comedi_device *dev,
devpriv->local1_iobase = ((uint32_t)devpriv->dio_counter_phys_iobase &
~local_range) | local_decode;
- DEBUG_PRINT(" local 0 io addr 0x%x\n", devpriv->local0_iobase);
- DEBUG_PRINT(" local 1 io addr 0x%x\n", devpriv->local1_iobase);
-
retval = alloc_and_init_dma_members(dev);
if (retval < 0)
return retval;
@@ -4161,7 +4033,7 @@ static int cb_pcidas64_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(cb_pcidas64_pci_table) = {
+static const struct pci_device_id cb_pcidas64_pci_table[] = {
{ PCI_VDEVICE(CB, 0x001d), BOARD_PCIDAS6402_16 },
{ PCI_VDEVICE(CB, 0x001e), BOARD_PCIDAS6402_12 },
{ PCI_VDEVICE(CB, 0x0035), BOARD_PCIDAS64_M1_16 },
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c
index 94f115820279..8cca0518cfda 100644
--- a/drivers/staging/comedi/drivers/cb_pcidda.c
+++ b/drivers/staging/comedi/drivers/cb_pcidda.c
@@ -407,7 +407,7 @@ static int cb_pcidda_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(cb_pcidda_pci_table) = {
+static const struct pci_device_id cb_pcidda_pci_table[] = {
{ PCI_VDEVICE(CB, 0x0020), BOARD_DDA02_12 },
{ PCI_VDEVICE(CB, 0x0021), BOARD_DDA04_12 },
{ PCI_VDEVICE(CB, 0x0022), BOARD_DDA08_12 },
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index 30520d4c16a6..57295d189ff6 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -44,9 +44,6 @@ See http://www.mccdaq.com/PDFs/Manuals/pcim-das1602-16.pdf for more details.
#include "plx9052.h"
#include "8255.h"
-/* #define CBPCIMDAS_DEBUG */
-#undef CBPCIMDAS_DEBUG
-
/* Registers for the PCIM-DAS1602/16 */
/* sizes of io regions (bytes) */
@@ -145,10 +142,9 @@ static int cb_pcimdas_ai_rinsn(struct comedi_device *dev,
if (!busy)
break;
}
- if (i == TIMEOUT) {
- printk("timeout\n");
+ if (i == TIMEOUT)
return -ETIMEDOUT;
- }
+
/* read data */
data[n] = inw(dev->iobase + 0);
}
@@ -222,15 +218,6 @@ static int cb_pcimdas_auto_attach(struct comedi_device *dev,
devpriv->BADR3 = pci_resource_start(pcidev, 3);
iobase_8255 = pci_resource_start(pcidev, 4);
-/* Dont support IRQ yet */
-/* get irq */
-/* if(request_irq(pcidev->irq, cb_pcimdas_interrupt, IRQF_SHARED, "cb_pcimdas", dev )) */
-/* { */
-/* printk(" unable to allocate irq %u\n", pcidev->irq); */
-/* return -EINVAL; */
-/* } */
-/* dev->irq = pcidev->irq; */
-
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
return ret;
@@ -288,7 +275,7 @@ static int cb_pcimdas_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(cb_pcimdas_pci_table) = {
+static const struct pci_device_id cb_pcimdas_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0056) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/cb_pcimdda.c b/drivers/staging/comedi/drivers/cb_pcimdda.c
index edf17b63096f..43a86630a66e 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdda.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdda.c
@@ -206,7 +206,7 @@ static int cb_pcimdda_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(cb_pcimdda_pci_table) = {
+static const struct pci_device_id cb_pcimdda_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CB, PCI_ID_PCIM_DDA06_16) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index 16c07802107f..d539eaf53b63 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -74,11 +74,10 @@ static const int nano_per_micro = 1000;
/* fake analog input ranges */
static const struct comedi_lrange waveform_ai_ranges = {
- 2,
- {
- BIP_RANGE(10),
- BIP_RANGE(5),
- }
+ 2, {
+ BIP_RANGE(10),
+ BIP_RANGE(5)
+ }
};
static unsigned short fake_sawtooth(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/contec_pci_dio.c b/drivers/staging/comedi/drivers/contec_pci_dio.c
index 89836c0828d9..323a7f39cd97 100644
--- a/drivers/staging/comedi/drivers/contec_pci_dio.c
+++ b/drivers/staging/comedi/drivers/contec_pci_dio.c
@@ -111,7 +111,7 @@ static int contec_pci_dio_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(contec_pci_dio_pci_table) = {
+static const struct pci_device_id contec_pci_dio_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CONTEC, PCI_DEVICE_ID_PIO1616L) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index de920ccff400..ce153fcb8b2a 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -772,7 +772,7 @@ static int daqboard2000_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(daqboard2000_pci_table) = {
+static const struct pci_device_id daqboard2000_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_IOTECH, 0x0409) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index 15dd33e3e1c7..e5c0ee9a09c2 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -120,46 +120,49 @@
/* gainlist same as _pgx_ below */
-static const struct comedi_lrange range_das08_pgl = { 9, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
+static const struct comedi_lrange range_das08_pgl = {
+ 9, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range_das08_pgh = { 12, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(1),
- BIP_RANGE(0.5),
- BIP_RANGE(0.1),
- BIP_RANGE(0.05),
- BIP_RANGE(0.01),
- BIP_RANGE(0.005),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01),
- }
+static const struct comedi_lrange range_das08_pgh = {
+ 12, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(1),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.01),
+ BIP_RANGE(0.005),
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.01)
+ }
};
-static const struct comedi_lrange range_das08_pgm = { 9, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.01),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01)
- }
+static const struct comedi_lrange range_das08_pgm = {
+ 9, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.01),
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.01)
+ }
}; /*
cio-das08jr.pdf
diff --git a/drivers/staging/comedi/drivers/das08_pci.c b/drivers/staging/comedi/drivers/das08_pci.c
index 3a6d3725b25f..d94af09151b0 100644
--- a/drivers/staging/comedi/drivers/das08_pci.c
+++ b/drivers/staging/comedi/drivers/das08_pci.c
@@ -89,7 +89,7 @@ static int das08_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(das08_pci_table) = {
+static const struct pci_device_id das08_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CB, PCI_DEVICE_ID_PCIDAS08) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index fce9acfe8084..fee5facff8dd 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -110,18 +110,18 @@ irq can be omitted, although the cmd interface will not work without it.
#define DAS16M1_82C55 0x400
#define DAS16M1_8254_THIRD 0x404
-static const struct comedi_lrange range_das16m1 = { 9,
- {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- BIP_RANGE(10),
- }
+static const struct comedi_lrange range_das16m1 = {
+ 9, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25),
+ BIP_RANGE(10)
+ }
};
struct das16m1_private_struct {
@@ -269,11 +269,6 @@ static int das16m1_cmd_exec(struct comedi_device *dev,
struct comedi_cmd *cmd = &async->cmd;
unsigned int byte, i;
- if (dev->irq == 0) {
- comedi_error(dev, "irq required to execute comedi_cmd");
- return -1;
- }
-
/* disable interrupts and internal pacer */
devpriv->control_state &= ~INTE & ~PACER_MASK;
outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
@@ -508,38 +503,26 @@ static irqreturn_t das16m1_interrupt(int irq, void *d)
static int das16m1_irq_bits(unsigned int irq)
{
- int ret;
-
switch (irq) {
case 10:
- ret = 0x0;
- break;
+ return 0x0;
case 11:
- ret = 0x1;
- break;
+ return 0x1;
case 12:
- ret = 0x2;
- break;
+ return 0x2;
case 15:
- ret = 0x3;
- break;
+ return 0x3;
case 2:
- ret = 0x4;
- break;
+ return 0x4;
case 3:
- ret = 0x5;
- break;
+ return 0x5;
case 5:
- ret = 0x6;
- break;
+ return 0x6;
case 7:
- ret = 0x7;
- break;
+ return 0x7;
default:
- return -1;
- break;
+ return 0x0;
}
- return ret << 4;
}
/*
@@ -553,7 +536,6 @@ static int das16m1_attach(struct comedi_device *dev,
struct das16m1_private_struct *devpriv;
struct comedi_subdevice *s;
int ret;
- unsigned int irq;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
@@ -569,24 +551,12 @@ static int das16m1_attach(struct comedi_device *dev,
return ret;
devpriv->extra_iobase = dev->iobase + DAS16M1_82C55;
- /* now for the irq */
- irq = it->options[1];
- /* make sure it is valid */
- if (das16m1_irq_bits(irq) >= 0) {
- ret = request_irq(irq, das16m1_interrupt, 0,
- dev->driver->driver_name, dev);
- if (ret < 0)
- return ret;
- dev->irq = irq;
- printk
- ("irq %u\n", irq);
- } else if (irq == 0) {
- printk
- (", no irq\n");
- } else {
- comedi_error(dev, "invalid irq\n"
- " valid irqs are 2, 3, 5, 7, 10, 11, 12, or 15\n");
- return -EINVAL;
+ /* only irqs 2, 3, 4, 5, 6, 7, 10, 11, 12, 14, and 15 are valid */
+ if ((1 << it->options[1]) & 0xdcfc) {
+ ret = request_irq(it->options[1], das16m1_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
}
ret = comedi_alloc_subdevices(dev, 4);
@@ -594,20 +564,22 @@ static int das16m1_attach(struct comedi_device *dev,
return ret;
s = &dev->subdevices[0];
- dev->read_subdev = s;
/* ai */
s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_DIFF;
s->n_chan = 8;
- s->subdev_flags = SDF_DIFF;
- s->len_chanlist = 256;
s->maxdata = (1 << 12) - 1;
s->range_table = &range_das16m1;
s->insn_read = das16m1_ai_rinsn;
- s->do_cmdtest = das16m1_cmd_test;
- s->do_cmd = das16m1_cmd_exec;
- s->cancel = das16m1_cancel;
- s->poll = das16m1_poll;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = 256;
+ s->do_cmdtest = das16m1_cmd_test;
+ s->do_cmd = das16m1_cmd_exec;
+ s->cancel = das16m1_cancel;
+ s->poll = das16m1_poll;
+ }
s = &dev->subdevices[1];
/* di */
@@ -640,10 +612,7 @@ static int das16m1_attach(struct comedi_device *dev,
outb(0, dev->iobase + DAS16M1_DIO);
/* set the interrupt level */
- if (dev->irq)
- devpriv->control_state = das16m1_irq_bits(dev->irq);
- else
- devpriv->control_state = 0;
+ devpriv->control_state = das16m1_irq_bits(dev->irq) << 4;
outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
return 0;
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index 1880038956d0..320d95a5f47b 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -178,31 +178,29 @@ enum {
/* analog input ranges */
static const struct comedi_lrange range_ai_das1801 = {
- 8,
- {
- RANGE(-5, 5),
- RANGE(-1, 1),
- RANGE(-0.1, 0.1),
- RANGE(-0.02, 0.02),
- RANGE(0, 5),
- RANGE(0, 1),
- RANGE(0, 0.1),
- RANGE(0, 0.02),
- }
+ 8, {
+ BIP_RANGE(5),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.02),
+ UNI_RANGE(5),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.02)
+ }
};
static const struct comedi_lrange range_ai_das1802 = {
- 8,
- {
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE(-2.5, 2.5),
- RANGE(-1.25, 1.25),
- RANGE(0, 10),
- RANGE(0, 5),
- RANGE(0, 2.5),
- RANGE(0, 1.25),
- }
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
struct das1800_board {
@@ -445,10 +443,9 @@ struct das1800_private {
/* analog out range for 'ao' boards */
/*
static const struct comedi_lrange range_ao_2 = {
- 2,
- {
- RANGE(-10, 10),
- RANGE(-5, 5),
+ 2, {
+ BIP_RANGE(10),
+ BIP_RANGE(5)
}
};
*/
@@ -462,7 +459,7 @@ static inline uint16_t munge_bipolar_sample(const struct comedi_device *dev,
return sample;
}
-static void munge_data(struct comedi_device *dev, uint16_t * array,
+static void munge_data(struct comedi_device *dev, uint16_t *array,
unsigned int num_elements)
{
unsigned int i;
@@ -644,7 +641,7 @@ static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
static void das1800_ai_handler(struct comedi_device *dev)
{
struct das1800_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned int status = inb(dev->iobase + DAS1800_STATUS);
@@ -1150,12 +1147,6 @@ static int das1800_ai_do_cmd(struct comedi_device *dev,
struct comedi_async *async = s->async;
const struct comedi_cmd *cmd = &async->cmd;
- if (!dev->irq) {
- comedi_error(dev,
- "no irq assigned for das-1800, cannot do hardware conversions");
- return -1;
- }
-
/* disable dma on TRIG_WAKE_EOS, or TRIG_RT
* (because dma in handler is unsafe at hard real-time priority) */
if (cmd->flags & (TRIG_WAKE_EOS | TRIG_RT))
@@ -1522,43 +1513,34 @@ static int das1800_attach(struct comedi_device *dev,
devpriv->iobase2 = iobase2;
}
- /* grab our IRQ */
- if (irq) {
- if (request_irq(irq, das1800_interrupt, 0,
- dev->driver->driver_name, dev)) {
- dev_dbg(dev->class_dev, "unable to allocate irq %u\n",
- irq);
- return -EINVAL;
- }
- }
- dev->irq = irq;
+ if (irq == 3 || irq == 5 || irq == 7 || irq == 10 || irq == 11 ||
+ irq == 15) {
+ ret = request_irq(irq, das1800_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0) {
+ dev->irq = irq;
- /* set bits that tell card which irq to use */
- switch (irq) {
- case 0:
- break;
- case 3:
- devpriv->irq_dma_bits |= 0x8;
- break;
- case 5:
- devpriv->irq_dma_bits |= 0x10;
- break;
- case 7:
- devpriv->irq_dma_bits |= 0x18;
- break;
- case 10:
- devpriv->irq_dma_bits |= 0x28;
- break;
- case 11:
- devpriv->irq_dma_bits |= 0x30;
- break;
- case 15:
- devpriv->irq_dma_bits |= 0x38;
- break;
- default:
- dev_err(dev->class_dev, "irq out of range\n");
- return -EINVAL;
- break;
+ switch (irq) {
+ case 3:
+ devpriv->irq_dma_bits |= 0x8;
+ break;
+ case 5:
+ devpriv->irq_dma_bits |= 0x10;
+ break;
+ case 7:
+ devpriv->irq_dma_bits |= 0x18;
+ break;
+ case 10:
+ devpriv->irq_dma_bits |= 0x28;
+ break;
+ case 11:
+ devpriv->irq_dma_bits |= 0x30;
+ break;
+ case 15:
+ devpriv->irq_dma_bits |= 0x38;
+ break;
+ }
+ }
}
ret = das1800_init_dma(dev, dma0, dma1);
@@ -1578,20 +1560,23 @@ static int das1800_attach(struct comedi_device *dev,
/* analog input subdevice */
s = &dev->subdevices[0];
- dev->read_subdev = s;
s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND;
if (thisboard->common)
s->subdev_flags |= SDF_COMMON;
s->n_chan = thisboard->qram_len;
- s->len_chanlist = thisboard->qram_len;
s->maxdata = (1 << thisboard->resolution) - 1;
s->range_table = thisboard->range_ai;
- s->do_cmd = das1800_ai_do_cmd;
- s->do_cmdtest = das1800_ai_do_cmdtest;
s->insn_read = das1800_ai_rinsn;
- s->poll = das1800_ai_poll;
- s->cancel = das1800_cancel;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->do_cmd = das1800_ai_do_cmd;
+ s->do_cmdtest = das1800_ai_do_cmdtest;
+ s->poll = das1800_ai_poll;
+ s->cancel = das1800_cancel;
+ }
/* analog out */
s = &dev->subdevices[1];
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index fb25cb847032..43027ee500e3 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -152,21 +152,12 @@ static irqreturn_t intr_handler(int irq, void *d)
dev_warn(dev->class_dev, "BUG: spurious interrupt\n");
return IRQ_HANDLED;
}
-#ifdef DEBUG
- printk("das6402: interrupt! das6402_irqcount=%i\n",
- devpriv->das6402_irqcount);
- printk("das6402: iobase+2=%i\n", inw_p(dev->iobase + 2));
-#endif
das6402_ai_fifo_dregs(dev, s);
if (s->async->buf_write_count >= devpriv->ai_bytes_to_read) {
outw_p(SCANL, dev->iobase + 2); /* clears the fifo */
outb(0x07, dev->iobase + 8); /* clears all flip-flops */
-#ifdef DEBUG
- printk("das6402: Got %i samples\n\n",
- devpriv->das6402_wordsread - diff);
-#endif
s->async->events |= COMEDI_CB_EOA;
comedi_event(dev, s);
}
@@ -211,7 +202,7 @@ static int das6402_ai_cancel(struct comedi_device *dev,
#ifdef unused
static int das6402_ai_mode2(struct comedi_device *dev,
- struct comedi_subdevice *s, comedi_trig * it)
+ struct comedi_subdevice *s, comedi_trig *it)
{
struct das6402_private *devpriv = dev->private;
diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
index b04a5633f754..78a19629ff56 100644
--- a/drivers/staging/comedi/drivers/dmm32at.c
+++ b/drivers/staging/comedi/drivers/dmm32at.c
@@ -124,13 +124,12 @@ Configuration Options:
/* board AI ranges in comedi structure */
static const struct comedi_lrange dmm32at_airanges = {
- 4,
- {
- UNI_RANGE(10),
- UNI_RANGE(5),
- BIP_RANGE(10),
- BIP_RANGE(5),
- }
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ BIP_RANGE(10),
+ BIP_RANGE(5)
+ }
};
/* register values for above ranges */
@@ -145,13 +144,12 @@ static const unsigned char dmm32at_rangebits[] = {
* board. The application should only use the range set by the jumper
*/
static const struct comedi_lrange dmm32at_aoranges = {
- 4,
- {
- UNI_RANGE(10),
- UNI_RANGE(5),
- BIP_RANGE(10),
- BIP_RANGE(5),
- }
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ BIP_RANGE(10),
+ BIP_RANGE(5)
+ }
};
struct dmm32at_private {
@@ -182,8 +180,6 @@ static int dmm32at_ai_rinsn(struct comedi_device *dev,
chan = CR_CHAN(insn->chanspec) & (s->n_chan - 1);
range = CR_RANGE(insn->chanspec);
- /* printk("channel=0x%02x, range=%d\n",chan,range); */
-
/* zero scan and fifo control and reset fifo */
outb(DMM32AT_FIFORESET, dev->iobase + DMM32AT_FIFOCNTRL);
@@ -199,10 +195,8 @@ static int dmm32at_ai_rinsn(struct comedi_device *dev,
if ((status & DMM32AT_STATUS) == 0)
break;
}
- if (i == 40000) {
- printk(KERN_WARNING "dmm32at: timeout\n");
+ if (i == 40000)
return -ETIMEDOUT;
- }
/* convert n samples */
for (n = 0; n < insn->n; n++) {
@@ -214,10 +208,8 @@ static int dmm32at_ai_rinsn(struct comedi_device *dev,
if ((status & DMM32AT_STATUS) == 0)
break;
}
- if (i == 40000) {
- printk(KERN_WARNING "dmm32at: timeout\n");
+ if (i == 40000)
return -ETIMEDOUT;
- }
/* read data */
lsb = inb(dev->iobase + DMM32AT_AILSB);
@@ -453,10 +445,8 @@ static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if ((status & DMM32AT_STATUS) == 0)
break;
}
- if (i == 40000) {
- printk(KERN_WARNING "dmm32at: timeout\n");
+ if (i == 40000)
return -ETIMEDOUT;
- }
if (devpriv->ai_scans_left > 1) {
/* start the clock and enable the interrupts */
@@ -467,8 +457,6 @@ static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
outb(0xff, dev->iobase + DMM32AT_CONV);
}
-/* printk("dmmat32 in command\n"); */
-
/* for(i=0;i<cmd->chanlist_len;i++) */
/* comedi_buf_put(s->async,i*100); */
@@ -556,7 +544,6 @@ static int dmm32at_ao_winsn(struct comedi_device *dev,
lo = data[i] & 0x00ff;
/* high byte also contains channel number */
hi = (data[i] >> 8) + chan * (1 << 6);
- /* printk("writing 0x%02x 0x%02x\n",hi,lo); */
/* write the low and high values to the board */
outb(lo, dev->iobase + DMM32AT_DACLSB);
outb(hi, dev->iobase + DMM32AT_DACMSB);
@@ -567,10 +554,9 @@ static int dmm32at_ao_winsn(struct comedi_device *dev,
if ((status & DMM32AT_DACBUSY) == 0)
break;
}
- if (i == 40000) {
- printk(KERN_WARNING "dmm32at: timeout\n");
+ if (i == 40000)
return -ETIMEDOUT;
- }
+
/* dummy read to update trigger the output */
status = inb(dev->iobase + DMM32AT_DACMSB);
@@ -682,9 +668,6 @@ static int dmm32at_attach(struct comedi_device *dev,
int ret;
struct comedi_subdevice *s;
unsigned char aihi, ailo, fifostat, aistat, intstat, airback;
- unsigned int irq;
-
- irq = it->options[1];
ret = comedi_request_region(dev, it->options[0], DMM32AT_MEMSIZE);
if (ret)
@@ -723,26 +706,17 @@ static int dmm32at_attach(struct comedi_device *dev,
intstat = inb(dev->iobase + DMM32AT_INTCLOCK);
airback = inb(dev->iobase + DMM32AT_AIRBACK);
- printk(KERN_DEBUG "dmm32at: lo=0x%02x hi=0x%02x fifostat=0x%02x\n",
- ailo, aihi, fifostat);
- printk(KERN_DEBUG
- "dmm32at: aistat=0x%02x intstat=0x%02x airback=0x%02x\n",
- aistat, intstat, airback);
-
if ((ailo != 0x00) || (aihi != 0x1f) || (fifostat != 0x80) ||
(aistat != 0x60 || (intstat != 0x00) || airback != 0x0c)) {
- printk(KERN_ERR "dmmat32: board detection failed\n");
+ dev_err(dev->class_dev, "board detection failed\n");
return -EIO;
}
- /* board is there, register interrupt */
- if (irq) {
- ret = request_irq(irq, dmm32at_isr, 0, dev->board_name, dev);
- if (ret < 0) {
- printk(KERN_ERR "dmm32at: irq conflict\n");
- return ret;
- }
- dev->irq = irq;
+ if (it->options[1]) {
+ ret = request_irq(it->options[1], dmm32at_isr, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
}
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
@@ -754,20 +728,22 @@ static int dmm32at_attach(struct comedi_device *dev,
return ret;
s = &dev->subdevices[0];
- dev->read_subdev = s;
/* analog input subdevice */
s->type = COMEDI_SUBD_AI;
/* we support single-ended (ground) and differential */
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
s->n_chan = 32;
s->maxdata = 0xffff;
s->range_table = &dmm32at_airanges;
- s->len_chanlist = 32; /* This is the maximum chanlist length that
- the board can handle */
s->insn_read = dmm32at_ai_rinsn;
- s->do_cmd = dmm32at_ai_cmd;
- s->do_cmdtest = dmm32at_ai_cmdtest;
- s->cancel = dmm32at_ai_cancel;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = 32;
+ s->do_cmd = dmm32at_ai_cmd;
+ s->do_cmdtest = dmm32at_ai_cmdtest;
+ s->cancel = dmm32at_ai_cancel;
+ }
s = &dev->subdevices[1];
/* analog output subdevice */
@@ -799,11 +775,7 @@ static int dmm32at_attach(struct comedi_device *dev,
s->insn_bits = dmm32at_dio_insn_bits;
s->insn_config = dmm32at_dio_insn_config;
- /* success */
- printk(KERN_INFO "comedi%d: dmm32at: attached\n", dev->minor);
-
- return 1;
-
+ return 0;
}
static struct comedi_driver dmm32at_driver = {
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
index 811c8c59c017..d4d4e4b497dc 100644
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ b/drivers/staging/comedi/drivers/dt2801.c
@@ -90,58 +90,42 @@ Configuration options:
#if 0
/* ignore 'defined but not used' warning */
-static const struct comedi_lrange range_dt2801_ai_pgh_bipolar = { 4, {
- RANGE(-10,
- 10),
- RANGE(-5,
- 5),
- RANGE
- (-2.5,
- 2.5),
- RANGE
- (-1.25,
- 1.25),
- }
+static const struct comedi_lrange range_dt2801_ai_pgh_bipolar = {
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25)
+ }
};
#endif
-static const struct comedi_lrange range_dt2801_ai_pgl_bipolar = { 4, {
- RANGE(-10,
- 10),
- RANGE(-1,
- 1),
- RANGE
- (-0.1,
- 0.1),
- RANGE
- (-0.02,
- 0.02),
- }
+static const struct comedi_lrange range_dt2801_ai_pgl_bipolar = {
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.02)
+ }
};
#if 0
/* ignore 'defined but not used' warning */
-static const struct comedi_lrange range_dt2801_ai_pgh_unipolar = { 4, {
- RANGE(0,
- 10),
- RANGE(0,
- 5),
- RANGE(0,
- 2.5),
- RANGE(0,
- 1.25),
- }
+static const struct comedi_lrange range_dt2801_ai_pgh_unipolar = {
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
#endif
-static const struct comedi_lrange range_dt2801_ai_pgl_unipolar = { 4, {
- RANGE(0,
- 10),
- RANGE(0,
- 1),
- RANGE(0,
- 0.1),
- RANGE(0,
- 0.02),
- }
+static const struct comedi_lrange range_dt2801_ai_pgl_unipolar = {
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.02)
+ }
};
struct dt2801_board {
@@ -289,13 +273,6 @@ static int dt2801_writedata(struct comedi_device *dev, unsigned int data)
outb_p(data & 0xff, dev->iobase + DT2801_DATA);
return 0;
}
-#if 0
- if (stat & DT_S_READY) {
- printk
- ("dt2801: ready flag set (bad!) in dt2801_writedata()\n");
- return -EIO;
- }
-#endif
} while (--timeout > 0);
return -ETIME;
@@ -343,11 +320,11 @@ static int dt2801_writecmd(struct comedi_device *dev, int command)
stat = inb_p(dev->iobase + DT2801_STATUS);
if (stat & DT_S_COMPOSITE_ERROR) {
- printk
- ("dt2801: composite-error in dt2801_writecmd(), ignoring\n");
+ dev_dbg(dev->class_dev,
+ "composite-error in %s, ignoring\n", __func__);
}
if (!(stat & DT_S_READY))
- printk("dt2801: !ready in dt2801_writecmd(), ignoring\n");
+ dev_dbg(dev->class_dev, "!ready in %s, ignoring\n", __func__);
outb_p(command, dev->iobase + DT2801_CMD);
return 0;
@@ -359,17 +336,12 @@ static int dt2801_reset(struct comedi_device *dev)
unsigned int stat;
int timeout;
- DPRINTK("dt2801: resetting board...\n");
- DPRINTK("fingerprint: 0x%02x 0x%02x\n", inb_p(dev->iobase),
- inb_p(dev->iobase + 1));
-
/* pull random data from data port */
inb_p(dev->iobase + DT2801_DATA);
inb_p(dev->iobase + DT2801_DATA);
inb_p(dev->iobase + DT2801_DATA);
inb_p(dev->iobase + DT2801_DATA);
- DPRINTK("dt2801: stop\n");
/* dt2801_writecmd(dev,DT_C_STOP); */
outb_p(DT_C_STOP, dev->iobase + DT2801_CMD);
@@ -382,12 +354,10 @@ static int dt2801_reset(struct comedi_device *dev)
break;
} while (timeout--);
if (!timeout)
- printk("dt2801: timeout 1 status=0x%02x\n", stat);
+ dev_dbg(dev->class_dev, "timeout 1 status=0x%02x\n", stat);
- /* printk("dt2801: reading dummy\n"); */
/* dt2801_readdata(dev,&board_code); */
- DPRINTK("dt2801: reset\n");
outb_p(DT_C_RESET, dev->iobase + DT2801_CMD);
/* dt2801_writecmd(dev,DT_C_RESET); */
@@ -399,13 +369,10 @@ static int dt2801_reset(struct comedi_device *dev)
break;
} while (timeout--);
if (!timeout)
- printk("dt2801: timeout 2 status=0x%02x\n", stat);
+ dev_dbg(dev->class_dev, "timeout 2 status=0x%02x\n", stat);
- DPRINTK("dt2801: reading code\n");
dt2801_readdata(dev, &board_code);
- DPRINTK("dt2801: ok. code=0x%02x\n", board_code);
-
return board_code;
}
@@ -465,12 +432,12 @@ static int dt2801_error(struct comedi_device *dev, int stat)
{
if (stat < 0) {
if (stat == -ETIME)
- printk("dt2801: timeout\n");
+ dev_dbg(dev->class_dev, "timeout\n");
else
- printk("dt2801: error %d\n", stat);
+ dev_dbg(dev->class_dev, "error %d\n", stat);
return stat;
}
- printk("dt2801: error status 0x%02x, resetting...\n", stat);
+ dev_dbg(dev->class_dev, "error status 0x%02x, resetting...\n", stat);
dt2801_reset(dev);
dt2801_reset(dev);
@@ -601,8 +568,8 @@ static int dt2801_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (boardtypes[type].boardcode == board_code)
goto havetype;
}
- printk("dt2801: unrecognized board code=0x%02x, contact author\n",
- board_code);
+ dev_dbg(dev->class_dev,
+ "unrecognized board code=0x%02x, contact author\n", board_code);
type = 0;
havetype:
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
index 0ca02fa7ba1b..4271903facd7 100644
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ b/drivers/staging/comedi/drivers/dt2811.c
@@ -42,60 +42,59 @@ Configuration options:
*/
#include <linux/module.h>
-#include <linux/interrupt.h>
#include "../comedidev.h"
static const struct comedi_lrange range_dt2811_pgh_ai_5_unipolar = {
4, {
- RANGE(0, 5),
- RANGE(0, 2.5),
- RANGE(0, 1.25),
- RANGE(0, 0.625)
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25),
+ UNI_RANGE(0.625)
}
};
static const struct comedi_lrange range_dt2811_pgh_ai_2_5_bipolar = {
4, {
- RANGE(-2.5, 2.5),
- RANGE(-1.25, 1.25),
- RANGE(-0.625, 0.625),
- RANGE(-0.3125, 0.3125)
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ BIP_RANGE(0.3125)
}
};
static const struct comedi_lrange range_dt2811_pgh_ai_5_bipolar = {
4, {
- RANGE(-5, 5),
- RANGE(-2.5, 2.5),
- RANGE(-1.25, 1.25),
- RANGE(-0.625, 0.625)
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625)
}
};
static const struct comedi_lrange range_dt2811_pgl_ai_5_unipolar = {
4, {
- RANGE(0, 5),
- RANGE(0, 0.5),
- RANGE(0, 0.05),
- RANGE(0, 0.01)
+ UNI_RANGE(5),
+ UNI_RANGE(0.5),
+ UNI_RANGE(0.05),
+ UNI_RANGE(0.01)
}
};
static const struct comedi_lrange range_dt2811_pgl_ai_2_5_bipolar = {
4, {
- RANGE(-2.5, 2.5),
- RANGE(-0.25, 0.25),
- RANGE(-0.025, 0.025),
- RANGE(-0.005, 0.005)
+ BIP_RANGE(2.5),
+ BIP_RANGE(0.25),
+ BIP_RANGE(0.025),
+ BIP_RANGE(0.005)
}
};
static const struct comedi_lrange range_dt2811_pgl_ai_5_bipolar = {
4, {
- RANGE(-5, 5),
- RANGE(-0.5, 0.5),
- RANGE(-0.05, 0.05),
- RANGE(-0.01, 0.01)
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.01)
}
};
@@ -227,33 +226,6 @@ static const struct comedi_lrange *dac_range_types[] = {
#define DT2811_TIMEOUT 5
-#if 0
-static irqreturn_t dt2811_interrupt(int irq, void *d)
-{
- int lo, hi;
- int data;
- struct comedi_device *dev = d;
- struct dt2811_private *devpriv = dev->private;
-
- if (!dev->attached) {
- comedi_error(dev, "spurious interrupt");
- return IRQ_HANDLED;
- }
-
- lo = inb(dev->iobase + DT2811_ADDATLO);
- hi = inb(dev->iobase + DT2811_ADDATHI);
-
- data = lo + (hi << 8);
-
- if (!(--devpriv->ntrig)) {
- /* how to turn off acquisition */
- s->async->events |= COMEDI_SB_EOA;
- }
- comedi_event(dev, s);
- return IRQ_HANDLED;
-}
-#endif
-
static int dt2811_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
@@ -278,35 +250,6 @@ static int dt2811_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s,
return i;
}
-#if 0
-/* Wow. This is code from the Comedi stone age. But it hasn't been
- * replaced, so I'll let it stay. */
-int dt2811_adtrig(kdev_t minor, comedi_adtrig *adtrig)
-{
- struct comedi_device *dev = comedi_devices + minor;
-
- if (adtrig->n < 1)
- return 0;
- dev->curadchan = adtrig->chan;
- switch (dev->i_admode) {
- case COMEDI_MDEMAND:
- dev->ntrig = adtrig->n - 1;
- /* not necessary */
- /*printk("dt2811: AD soft trigger\n"); */
- /*outb(DT2811_CLRERROR|DT2811_INTENB,
- dev->iobase+DT2811_ADCSR); */
- outb(dev->curadchan, dev->iobase + DT2811_ADGCR);
- do_gettimeofday(&trigtime);
- break;
- case COMEDI_MCONTS:
- dev->ntrig = adtrig->n;
- break;
- }
-
- return 0;
-}
-#endif
-
static int dt2811_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
@@ -386,10 +329,7 @@ static int dt2811_do_insn_bits(struct comedi_device *dev,
*/
static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
- /* int i, irq; */
- /* unsigned long irqs; */
- /* long flags; */
-
+ /* int i; */
const struct dt2811_board *board = comedi_board(dev);
struct dt2811_private *devpriv;
int ret;
@@ -406,45 +346,6 @@ static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
i = inb(dev->iobase + DT2811_ADDATHI);
#endif
-#if 0
- irq = it->options[1];
- if (irq < 0) {
- save_flags(flags);
- sti();
- irqs = probe_irq_on();
-
- outb(DT2811_CLRERROR | DT2811_INTENB,
- dev->iobase + DT2811_ADCSR);
- outb(0, dev->iobase + DT2811_ADGCR);
-
- udelay(100);
-
- irq = probe_irq_off(irqs);
- restore_flags(flags);
-
- /*outb(DT2811_CLRERROR|DT2811_INTENB,
- dev->iobase+DT2811_ADCSR);*/
-
- if (inb(dev->iobase + DT2811_ADCSR) & DT2811_ADERROR)
- printk(KERN_ERR "error probing irq (bad)\n");
- dev->irq = 0;
- if (irq > 0) {
- i = inb(dev->iobase + DT2811_ADDATLO);
- i = inb(dev->iobase + DT2811_ADDATHI);
- printk(KERN_INFO "(irq = %d)\n", irq);
- ret = request_irq(irq, dt2811_interrupt, 0,
- dev->board_name, dev);
- if (ret < 0)
- return -EIO;
- dev->irq = irq;
- } else if (irq == 0) {
- printk(KERN_INFO "(no irq)\n");
- } else {
- printk(KERN_ERR "( multiple irq's -- this is bad! )\n");
- }
- }
-#endif
-
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/dt2814.c b/drivers/staging/comedi/drivers/dt2814.c
index 6514b9e00552..abad6e49c1c1 100644
--- a/drivers/staging/comedi/drivers/dt2814.c
+++ b/drivers/staging/comedi/drivers/dt2814.c
@@ -80,15 +80,12 @@ static int dt2814_ai_insn_read(struct comedi_device *dev,
outb(chan, dev->iobase + DT2814_CSR);
for (i = 0; i < DT2814_TIMEOUT; i++) {
status = inb(dev->iobase + DT2814_CSR);
- printk(KERN_INFO "dt2814: status: %02x\n", status);
udelay(10);
if (status & DT2814_FINISH)
break;
}
- if (i >= DT2814_TIMEOUT) {
- printk(KERN_INFO "dt2814: status: %02x\n", status);
+ if (i >= DT2814_TIMEOUT)
return -ETIMEDOUT;
- }
hi = inb(dev->iobase + DT2814_DATA);
lo = inb(dev->iobase + DT2814_DATA);
@@ -200,7 +197,7 @@ static irqreturn_t dt2814_interrupt(int irq, void *d)
int lo, hi;
struct comedi_device *dev = d;
struct dt2814_private *devpriv = dev->private;
- struct comedi_subdevice *s;
+ struct comedi_subdevice *s = dev->read_subdev;
int data;
if (!dev->attached) {
@@ -208,8 +205,6 @@ static irqreturn_t dt2814_interrupt(int irq, void *d)
return IRQ_HANDLED;
}
- s = &dev->subdevices[0];
-
hi = inb(dev->iobase + DT2814_DATA);
lo = inb(dev->iobase + DT2814_DATA);
@@ -238,9 +233,9 @@ static irqreturn_t dt2814_interrupt(int irq, void *d)
static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct dt2814_private *devpriv;
- int i, irq;
- int ret;
struct comedi_subdevice *s;
+ int ret;
+ int i;
ret = comedi_request_region(dev, it->options[0], DT2814_SIZE);
if (ret)
@@ -249,49 +244,17 @@ static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it)
outb(0, dev->iobase + DT2814_CSR);
udelay(100);
if (inb(dev->iobase + DT2814_CSR) & DT2814_ERR) {
- printk(KERN_ERR "reset error (fatal)\n");
+ dev_err(dev->class_dev, "reset error (fatal)\n");
return -EIO;
}
i = inb(dev->iobase + DT2814_DATA);
i = inb(dev->iobase + DT2814_DATA);
- irq = it->options[1];
-#if 0
- if (irq < 0) {
- save_flags(flags);
- sti();
- irqs = probe_irq_on();
-
- outb(0, dev->iobase + DT2814_CSR);
-
- udelay(100);
-
- irq = probe_irq_off(irqs);
- restore_flags(flags);
- if (inb(dev->iobase + DT2814_CSR) & DT2814_ERR)
- printk(KERN_DEBUG "error probing irq (bad)\n");
-
-
- i = inb(dev->iobase + DT2814_DATA);
- i = inb(dev->iobase + DT2814_DATA);
- }
-#endif
- dev->irq = 0;
- if (irq > 0) {
- if (request_irq(irq, dt2814_interrupt, 0, "dt2814", dev)) {
- printk(KERN_WARNING "(irq %d unavailable)\n", irq);
- } else {
- printk(KERN_INFO "( irq = %d )\n", irq);
- dev->irq = irq;
- }
- } else if (irq == 0) {
- printk(KERN_WARNING "(no irq)\n");
- } else {
-#if 0
- printk(KERN_DEBUG "(probe returned multiple irqs--bad)\n");
-#else
- printk(KERN_WARNING "(irq probe not implemented)\n");
-#endif
+ if (it->options[1]) {
+ ret = request_irq(it->options[1], dt2814_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
}
ret = comedi_alloc_subdevices(dev, 1);
@@ -303,16 +266,19 @@ static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return -ENOMEM;
s = &dev->subdevices[0];
- dev->read_subdev = s;
s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = 16; /* XXX */
- s->len_chanlist = 1;
s->insn_read = dt2814_ai_insn_read;
- s->do_cmd = dt2814_ai_cmd;
- s->do_cmdtest = dt2814_ai_cmdtest;
s->maxdata = 0xfff;
s->range_table = &range_unknown; /* XXX */
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = 1;
+ s->do_cmd = dt2814_ai_cmd;
+ s->do_cmdtest = dt2814_ai_cmdtest;
+ }
return 0;
}
diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c
index 34040f0175e8..ee24717821e1 100644
--- a/drivers/staging/comedi/drivers/dt2815.c
+++ b/drivers/staging/comedi/drivers/dt2815.c
@@ -107,8 +107,9 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
status = dt2815_wait_for_status(dev, 0x00);
if (status != 0) {
- printk(KERN_WARNING "dt2815: failed to write low byte "
- "on %d reason %x\n", chan, status);
+ dev_dbg(dev->class_dev,
+ "failed to write low byte on %d reason %x\n",
+ chan, status);
return -EBUSY;
}
@@ -116,8 +117,9 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
status = dt2815_wait_for_status(dev, 0x10);
if (status != 0x10) {
- printk(KERN_WARNING "dt2815: failed to write high byte "
- "on %d reason %x\n", chan, status);
+ dev_dbg(dev->class_dev,
+ "failed to write high byte on %d reason %x\n",
+ chan, status);
return -EBUSY;
}
devpriv->ao_readback[chan] = data[i];
@@ -200,12 +202,13 @@ static int dt2815_attach(struct comedi_device *dev, struct comedi_devconfig *it)
unsigned int program;
program = (it->options[4] & 0x3) << 3 | 0x7;
outb(program, dev->iobase + DT2815_DATA);
- printk(KERN_INFO ", program: 0x%x (@t=%d)\n",
- program, i);
+ dev_dbg(dev->class_dev, "program: 0x%x (@t=%d)\n",
+ program, i);
break;
} else if (status != 0x00) {
- printk(KERN_WARNING "dt2815: unexpected status 0x%x "
- "(@t=%d)\n", status, i);
+ dev_dbg(dev->class_dev,
+ "unexpected status 0x%x (@t=%d)\n",
+ status, i);
if (status & 0x60)
outb(0x00, dev->iobase + DT2815_STATUS);
}
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index a01e6b553887..895f73a19023 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -63,8 +63,6 @@ Notes:
#include "comedi_fc.h"
-#define DEBUG
-
#define DT2821_TIMEOUT 100 /* 500 us */
#define DT2821_SIZE 0x10
@@ -156,55 +154,55 @@ Notes:
static const struct comedi_lrange range_dt282x_ai_lo_bipolar = {
4, {
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE(-2.5, 2.5),
- RANGE(-1.25, 1.25)
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25)
}
};
static const struct comedi_lrange range_dt282x_ai_lo_unipolar = {
4, {
- RANGE(0, 10),
- RANGE(0, 5),
- RANGE(0, 2.5),
- RANGE(0, 1.25)
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
}
};
static const struct comedi_lrange range_dt282x_ai_5_bipolar = {
4, {
- RANGE(-5, 5),
- RANGE(-2.5, 2.5),
- RANGE(-1.25, 1.25),
- RANGE(-0.625, 0.625)
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625)
}
};
static const struct comedi_lrange range_dt282x_ai_5_unipolar = {
4, {
- RANGE(0, 5),
- RANGE(0, 2.5),
- RANGE(0, 1.25),
- RANGE(0, 0.625),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25),
+ UNI_RANGE(0.625)
}
};
static const struct comedi_lrange range_dt282x_ai_hi_bipolar = {
4, {
- RANGE(-10, 10),
- RANGE(-1, 1),
- RANGE(-0.1, 0.1),
- RANGE(-0.02, 0.02)
+ BIP_RANGE(10),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.02)
}
};
static const struct comedi_lrange range_dt282x_ai_hi_unipolar = {
4, {
- RANGE(0, 10),
- RANGE(0, 1),
- RANGE(0, 0.1),
- RANGE(0, 0.02)
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.02)
}
};
@@ -308,15 +306,15 @@ static void dt282x_munge(struct comedi_device *dev, unsigned short *buf,
static void dt282x_ao_dma_interrupt(struct comedi_device *dev)
{
struct dt282x_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->write_subdev;
void *ptr;
int size;
int i;
- struct comedi_subdevice *s = &dev->subdevices[1];
outw(devpriv->supcsr | DT2821_CLRDMADNE, dev->iobase + DT2821_SUPCSR);
if (!s->async->prealloc_buf) {
- printk(KERN_ERR "async->data disappeared. dang!\n");
+ dev_err(dev->class_dev, "no buffer in %s\n", __func__);
return;
}
@@ -329,7 +327,7 @@ static void dt282x_ao_dma_interrupt(struct comedi_device *dev)
size = cfc_read_array_from_buffer(s, ptr, devpriv->dma_maxsize);
if (size == 0) {
- printk(KERN_ERR "dt282x: AO underrun\n");
+ dev_err(dev->class_dev, "AO underrun\n");
dt282x_ao_cancel(dev, s);
s->async->events |= COMEDI_CB_OVERFLOW;
return;
@@ -341,16 +339,16 @@ static void dt282x_ao_dma_interrupt(struct comedi_device *dev)
static void dt282x_ai_dma_interrupt(struct comedi_device *dev)
{
struct dt282x_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
void *ptr;
int size;
int i;
int ret;
- struct comedi_subdevice *s = &dev->subdevices[0];
outw(devpriv->supcsr | DT2821_CLRDMADNE, dev->iobase + DT2821_SUPCSR);
if (!s->async->prealloc_buf) {
- printk(KERN_ERR "async->data disappeared. dang!\n");
+ dev_err(dev->class_dev, "no buffer in %s\n", __func__);
return;
}
@@ -371,7 +369,7 @@ static void dt282x_ai_dma_interrupt(struct comedi_device *dev)
devpriv->nread -= size / 2;
if (devpriv->nread < 0) {
- printk(KERN_INFO "dt282x: off by one\n");
+ dev_info(dev->class_dev, "nread off by one\n");
devpriv->nread = 0;
}
if (!devpriv->nread) {
@@ -450,8 +448,8 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct dt282x_private *devpriv = dev->private;
- struct comedi_subdevice *s;
- struct comedi_subdevice *s_ao;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_subdevice *s_ao = dev->write_subdev;
unsigned int supcsr, adcsr, dacsr;
int handled = 0;
@@ -460,8 +458,6 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
return IRQ_HANDLED;
}
- s = &dev->subdevices[0];
- s_ao = &dev->subdevices[1];
adcsr = inw(dev->iobase + DT2821_ADCSR);
dacsr = inw(dev->iobase + DT2821_DACSR);
supcsr = inw(dev->iobase + DT2821_SUPCSR);
@@ -481,13 +477,6 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
handled = 1;
}
if (dacsr & DT2821_DAERR) {
-#if 0
- static int warn = 5;
- if (--warn <= 0) {
- disable_irq(dev->irq);
- printk(KERN_INFO "disabling irq\n");
- }
-#endif
comedi_error(dev, "D/A error");
dt282x_ao_cancel(dev, s_ao);
s->async->events |= COMEDI_CB_ERROR;
@@ -520,8 +509,7 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
}
#endif
comedi_event(dev, s);
- /* printk("adcsr=0x%02x dacsr-0x%02x supcsr=0x%02x\n",
- adcsr, dacsr, supcsr); */
+
return IRQ_RETVAL(handled);
}
@@ -894,7 +882,7 @@ static int dt282x_ao_inttrig(struct comedi_device *dev,
size = cfc_read_array_from_buffer(s, devpriv->dma[0].buf,
devpriv->dma_maxsize);
if (size == 0) {
- printk(KERN_ERR "dt282x: AO underrun\n");
+ dev_err(dev->class_dev, "AO underrun\n");
return -EPIPE;
}
prep_ao_dma(dev, 0, size);
@@ -902,7 +890,7 @@ static int dt282x_ao_inttrig(struct comedi_device *dev,
size = cfc_read_array_from_buffer(s, devpriv->dma[1].buf,
devpriv->dma_maxsize);
if (size == 0) {
- printk(KERN_ERR "dt282x: AO underrun\n");
+ dev_err(dev->class_dev, "AO underrun\n");
return -EPIPE;
}
prep_ao_dma(dev, 1, size);
@@ -1062,10 +1050,8 @@ static int dt282x_grab_dma(struct comedi_device *dev, int dma1, int dma2)
devpriv->usedma = 0;
- if (!dma1 && !dma2) {
- printk(KERN_ERR " (no dma)");
+ if (!dma1 && !dma2)
return 0;
- }
if (dma1 == dma2 || dma1 < 5 || dma2 < 5 || dma1 > 7 || dma2 > 7)
return -EINVAL;
@@ -1090,12 +1076,8 @@ static int dt282x_grab_dma(struct comedi_device *dev, int dma1, int dma2)
devpriv->dma_maxsize = PAGE_SIZE;
devpriv->dma[0].buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
devpriv->dma[1].buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
- if (!devpriv->dma[0].buf || !devpriv->dma[1].buf) {
- printk(KERN_ERR " can't get DMA memory");
+ if (!devpriv->dma[0].buf || !devpriv->dma[1].buf)
return -ENOMEM;
- }
-
- printk(KERN_INFO " (dma=%d,%d)", dma1, dma2);
devpriv->usedma = 1;
@@ -1120,9 +1102,9 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct dt282x_board *board = comedi_board(dev);
struct dt282x_private *devpriv;
- int i, irq;
- int ret;
struct comedi_subdevice *s;
+ int ret;
+ int i;
ret = comedi_request_region(dev, it->options[0], DT2821_SIZE);
if (ret)
@@ -1130,14 +1112,6 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
outw(DT2821_BDINIT, dev->iobase + DT2821_SUPCSR);
i = inw(dev->iobase + DT2821_ADCSR);
-#ifdef DEBUG
- printk(KERN_DEBUG " fingerprint=%x,%x,%x,%x,%x",
- inw(dev->iobase + DT2821_ADCSR),
- inw(dev->iobase + DT2821_CHANCSR),
- inw(dev->iobase + DT2821_DACSR),
- inw(dev->iobase + DT2821_SUPCSR),
- inw(dev->iobase + DT2821_TMRCTR));
-#endif
if (((inw(dev->iobase + DT2821_ADCSR) & DT2821_ADCSR_MASK)
!= DT2821_ADCSR_VAL) ||
@@ -1149,58 +1123,28 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
!= DT2821_SUPCSR_VAL) ||
((inw(dev->iobase + DT2821_TMRCTR) & DT2821_TMRCTR_MASK)
!= DT2821_TMRCTR_VAL)) {
- printk(KERN_ERR " board not found");
+ dev_err(dev->class_dev, "board not found\n");
return -EIO;
}
/* should do board test */
- irq = it->options[opt_irq];
-#if 0
- if (irq < 0) {
- unsigned long flags;
- int irqs;
-
- save_flags(flags);
- sti();
- irqs = probe_irq_on();
-
- /* trigger interrupt */
-
- udelay(100);
-
- irq = probe_irq_off(irqs);
- restore_flags(flags);
- if (0 /* error */)
- printk(KERN_ERR " error probing irq (bad)");
- }
-#endif
- if (irq > 0) {
- printk(KERN_INFO " ( irq = %d )", irq);
- ret = request_irq(irq, dt282x_interrupt, 0,
+ if (it->options[opt_irq] > 0) {
+ ret = request_irq(it->options[opt_irq], dt282x_interrupt, 0,
dev->board_name, dev);
- if (ret < 0) {
- printk(KERN_ERR " failed to get irq\n");
- return -EIO;
- }
- dev->irq = irq;
- } else if (irq == 0) {
- printk(KERN_INFO " (no irq)");
- } else {
-#if 0
- printk(KERN_INFO " (probe returned multiple irqs--bad)");
-#else
- printk(KERN_INFO " (irq probe not implemented)");
-#endif
+ if (ret == 0)
+ dev->irq = it->options[opt_irq];
}
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- ret = dt282x_grab_dma(dev, it->options[opt_dma1],
- it->options[opt_dma2]);
- if (ret < 0)
- return ret;
+ if (dev->irq) {
+ ret = dt282x_grab_dma(dev, it->options[opt_dma1],
+ it->options[opt_dma2]);
+ if (ret < 0)
+ return ret;
+ }
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
@@ -1208,22 +1152,25 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s = &dev->subdevices[0];
- dev->read_subdev = s;
/* ai subdevice */
s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ |
+ s->subdev_flags = SDF_READABLE |
((it->options[opt_diff]) ? SDF_DIFF : SDF_COMMON);
s->n_chan =
(it->options[opt_diff]) ? board->adchan_di : board->adchan_se;
s->insn_read = dt282x_ai_insn_read;
- s->do_cmdtest = dt282x_ai_cmdtest;
- s->do_cmd = dt282x_ai_cmd;
- s->cancel = dt282x_ai_cancel;
s->maxdata = (1 << board->adbits) - 1;
- s->len_chanlist = 16;
s->range_table =
opt_ai_range_lkup(board->ispgl, it->options[opt_ai_range]);
devpriv->ad_2scomp = it->options[opt_ai_twos];
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = 16;
+ s->do_cmdtest = dt282x_ai_cmdtest;
+ s->do_cmd = dt282x_ai_cmd;
+ s->cancel = dt282x_ai_cancel;
+ }
s = &dev->subdevices[1];
@@ -1231,15 +1178,10 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (s->n_chan) {
/* ao subsystem */
s->type = COMEDI_SUBD_AO;
- dev->write_subdev = s;
- s->subdev_flags = SDF_WRITABLE | SDF_CMD_WRITE;
+ s->subdev_flags = SDF_WRITABLE;
s->insn_read = dt282x_ao_insn_read;
s->insn_write = dt282x_ao_insn_write;
- s->do_cmdtest = dt282x_ao_cmdtest;
- s->do_cmd = dt282x_ao_cmd;
- s->cancel = dt282x_ao_cancel;
s->maxdata = (1 << board->dabits) - 1;
- s->len_chanlist = 2;
s->range_table_list = devpriv->darangelist;
devpriv->darangelist[0] =
opt_ao_range_lkup(it->options[opt_ao0_range]);
@@ -1247,6 +1189,14 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
opt_ao_range_lkup(it->options[opt_ao1_range]);
devpriv->da0_2scomp = it->options[opt_ao0_twos];
devpriv->da1_2scomp = it->options[opt_ao1_twos];
+ if (dev->irq) {
+ dev->write_subdev = s;
+ s->subdev_flags |= SDF_CMD_WRITE;
+ s->len_chanlist = 2;
+ s->do_cmdtest = dt282x_ao_cmdtest;
+ s->do_cmd = dt282x_ao_cmd;
+ s->cancel = dt282x_ao_cancel;
+ }
} else {
s->type = COMEDI_SUBD_UNUSED;
}
@@ -1261,8 +1211,6 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 1;
s->range_table = &range_digital;
- printk(KERN_INFO "\n");
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 292226eeff92..f52a4476cb73 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -48,8 +48,6 @@ AO commands are not supported.
you the docs without one, also.
*/
-#define DEBUG 1
-
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
@@ -253,24 +251,6 @@ struct dt3k_private {
unsigned int ai_rear;
};
-#ifdef DEBUG
-static char *intr_flags[] = {
- "AdFull", "AdSwError", "AdHwError", "DaEmpty",
- "DaSwError", "DaHwError", "CtDone", "CmDone",
-};
-
-static void debug_intr_flags(unsigned int flags)
-{
- int i;
- printk(KERN_DEBUG "dt3k: intr_flags:");
- for (i = 0; i < 8; i++) {
- if (flags & (1 << i))
- printk(KERN_CONT " %s", intr_flags[i]);
- }
- printk(KERN_CONT "\n");
-}
-#endif
-
#define TIMEOUT 100
static void dt3k_send_cmd(struct comedi_device *dev, unsigned int cmd)
@@ -372,17 +352,13 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct dt3k_private *devpriv = dev->private;
- struct comedi_subdevice *s;
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned int status;
if (!dev->attached)
return IRQ_NONE;
- s = &dev->subdevices[0];
status = readw(devpriv->io_addr + DPR_Intr_Flag);
-#ifdef DEBUG
- debug_intr_flags(status);
-#endif
if (status & DT3000_ADFULL) {
dt3k_ai_empty_fifo(dev, s);
@@ -725,29 +701,33 @@ static int dt3000_auto_attach(struct comedi_device *dev,
if (!devpriv->io_addr)
return -ENOMEM;
- ret = request_irq(pcidev->irq, dt3k_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret)
- return ret;
- dev->irq = pcidev->irq;
+ if (pcidev->irq) {
+ ret = request_irq(pcidev->irq, dt3k_interrupt, IRQF_SHARED,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = pcidev->irq;
+ }
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
s = &dev->subdevices[0];
- dev->read_subdev = s;
/* ai subdevice */
s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
s->n_chan = this_board->adchan;
s->insn_read = dt3k_ai_insn;
s->maxdata = (1 << this_board->adbits) - 1;
- s->len_chanlist = 512;
s->range_table = &range_dt3000_ai; /* XXX */
- s->do_cmd = dt3k_ai_cmd;
- s->do_cmdtest = dt3k_ai_cmdtest;
- s->cancel = dt3k_ai_cancel;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = 512;
+ s->do_cmd = dt3k_ai_cmd;
+ s->do_cmdtest = dt3k_ai_cmdtest;
+ s->cancel = dt3k_ai_cancel;
+ }
s = &dev->subdevices[1];
/* ao subsystem */
@@ -818,7 +798,7 @@ static int dt3000_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &dt3000_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(dt3000_pci_table) = {
+static const struct pci_device_id dt3000_pci_table[] = {
{ PCI_VDEVICE(DT, 0x0022), BOARD_DT3001 },
{ PCI_VDEVICE(DT, 0x0023), BOARD_DT3002 },
{ PCI_VDEVICE(DT, 0x0024), BOARD_DT3003 },
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
index 73af600c1725..b3aeb6fb2ad0 100644
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ b/drivers/staging/comedi/drivers/dt9812.c
@@ -41,7 +41,6 @@ for my needs.
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
diff --git a/drivers/staging/comedi/drivers/dyna_pci10xx.c b/drivers/staging/comedi/drivers/dyna_pci10xx.c
index f2a9f1c2f3b6..f224825830ba 100644
--- a/drivers/staging/comedi/drivers/dyna_pci10xx.c
+++ b/drivers/staging/comedi/drivers/dyna_pci10xx.c
@@ -42,11 +42,12 @@
#define READ_TIMEOUT 50
-static const struct comedi_lrange range_pci1050_ai = { 3, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- UNI_RANGE(10)
- }
+static const struct comedi_lrange range_pci1050_ai = {
+ 3, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ UNI_RANGE(10)
+ }
};
static const char range_codes_pci1050_ai[] = { 0x00, 0x10, 0x30 };
@@ -90,8 +91,7 @@ static int dyna_pci10xx_insn_read_ai(struct comedi_device *dev,
goto conv_finish;
}
data[n] = 0;
- printk(KERN_DEBUG "comedi: dyna_pci10xx: "
- "timeout reading analog input\n");
+ dev_dbg(dev->class_dev, "timeout reading analog input\n");
continue;
conv_finish:
/* mask the first 4 bits - EOC bits */
@@ -260,7 +260,7 @@ static int dyna_pci10xx_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(dyna_pci10xx_pci_table) = {
+static const struct pci_device_id dyna_pci10xx_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PLX, 0x1050) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/fl512.c b/drivers/staging/comedi/drivers/fl512.c
index e3ff4c438979..a99ddf00ddc4 100644
--- a/drivers/staging/comedi/drivers/fl512.c
+++ b/drivers/staging/comedi/drivers/fl512.c
@@ -16,8 +16,6 @@ Configuration options:
[0] - I/O port base address
*/
-#define DEBUG 0
-
#include <linux/module.h>
#include "../comedidev.h"
@@ -28,15 +26,16 @@ struct fl512_private {
unsigned short ao_readback[2];
};
-static const struct comedi_lrange range_fl512 = { 4, {
- BIP_RANGE(0.5),
- BIP_RANGE(1),
- BIP_RANGE(5),
- BIP_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(5),
- UNI_RANGE(10),
- }
+static const struct comedi_lrange range_fl512 = {
+ 4, {
+ BIP_RANGE(0.5),
+ BIP_RANGE(1),
+ BIP_RANGE(5),
+ BIP_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(5),
+ UNI_RANGE(10)
+ }
};
/*
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index 559bf5583530..de60a2871d70 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -60,15 +60,6 @@ static int hpdi_cancel(struct comedi_device *dev, struct comedi_subdevice *s);
static irqreturn_t handle_interrupt(int irq, void *d);
static int dio_config_block_size(struct comedi_device *dev, unsigned int *data);
-#undef HPDI_DEBUG /* disable debugging messages */
-/* #define HPDI_DEBUG enable debugging code */
-
-#ifdef HPDI_DEBUG
-#define DEBUG_PRINT(format, args...) pr_debug(format , ## args)
-#else
-#define DEBUG_PRINT(format, args...) no_printk(pr_fmt(format), ## args)
-#endif
-
#define TIMER_BASE 50 /* 20MHz master clock */
#define DMA_BUFFER_SIZE 0x10000
#define NUM_DMA_BUFFERS 4
@@ -260,32 +251,6 @@ static void init_plx9080(struct comedi_device *dev)
uint32_t bits;
void __iomem *plx_iobase = devpriv->plx9080_iobase;
- /* plx9080 dump */
- DEBUG_PRINT(" plx interrupt status 0x%x\n",
- readl(plx_iobase + PLX_INTRCS_REG));
- DEBUG_PRINT(" plx id bits 0x%x\n", readl(plx_iobase + PLX_ID_REG));
- DEBUG_PRINT(" plx control reg 0x%x\n",
- readl(devpriv->plx9080_iobase + PLX_CONTROL_REG));
-
- DEBUG_PRINT(" plx revision 0x%x\n",
- readl(plx_iobase + PLX_REVISION_REG));
- DEBUG_PRINT(" plx dma channel 0 mode 0x%x\n",
- readl(plx_iobase + PLX_DMA0_MODE_REG));
- DEBUG_PRINT(" plx dma channel 1 mode 0x%x\n",
- readl(plx_iobase + PLX_DMA1_MODE_REG));
- DEBUG_PRINT(" plx dma channel 0 pci address 0x%x\n",
- readl(plx_iobase + PLX_DMA0_PCI_ADDRESS_REG));
- DEBUG_PRINT(" plx dma channel 0 local address 0x%x\n",
- readl(plx_iobase + PLX_DMA0_LOCAL_ADDRESS_REG));
- DEBUG_PRINT(" plx dma channel 0 transfer size 0x%x\n",
- readl(plx_iobase + PLX_DMA0_TRANSFER_SIZE_REG));
- DEBUG_PRINT(" plx dma channel 0 descriptor 0x%x\n",
- readl(plx_iobase + PLX_DMA0_DESCRIPTOR_REG));
- DEBUG_PRINT(" plx dma channel 0 command status 0x%x\n",
- readb(plx_iobase + PLX_DMA0_CS_REG));
- DEBUG_PRINT(" plx dma channel 0 threshold 0x%x\n",
- readl(plx_iobase + PLX_DMA0_THRESHOLD_REG));
- DEBUG_PRINT(" plx bigend 0x%x\n", readl(plx_iobase + PLX_BIGEND_REG));
#ifdef __BIG_ENDIAN
bits = BIGEND_DMA0 | BIGEND_DMA1;
#else
@@ -395,10 +360,6 @@ static int setup_dma_descriptors(struct comedi_device *dev,
if (transfer_size == 0)
return -1;
- DEBUG_PRINT(" transfer_size %i\n", transfer_size);
- DEBUG_PRINT(" descriptors at 0x%lx\n",
- (unsigned long)devpriv->dma_desc_phys_addr);
-
buffer_offset = 0;
buffer_index = 0;
for (i = 0; i < NUM_DMA_DESCRIPTORS &&
@@ -423,21 +384,11 @@ static int setup_dma_descriptors(struct comedi_device *dev,
buffer_offset = 0;
buffer_index++;
}
-
- DEBUG_PRINT(" desc %i\n", i);
- DEBUG_PRINT(" start addr virt 0x%p, phys 0x%lx\n",
- devpriv->desc_dio_buffer[i],
- (unsigned long)devpriv->dma_desc[i].
- pci_start_addr);
- DEBUG_PRINT(" next 0x%lx\n",
- (unsigned long)devpriv->dma_desc[i].next);
}
devpriv->num_dma_descriptors = i;
/* fix last descriptor to point back to first */
devpriv->dma_desc[i - 1].next =
cpu_to_le32(devpriv->dma_desc_phys_addr | next_bits);
- DEBUG_PRINT(" desc %i next fixup 0x%lx\n", i - 1,
- (unsigned long)devpriv->dma_desc[i - 1].next);
devpriv->block_size = transfer_size;
@@ -489,9 +440,6 @@ static int hpdi_auto_attach(struct comedi_device *dev,
return -ENOMEM;
}
- DEBUG_PRINT(" plx9080 remapped to 0x%p\n", devpriv->plx9080_iobase);
- DEBUG_PRINT(" hpdi remapped to 0x%p\n", devpriv->hpdi_iobase);
-
init_plx9080(dev);
/* get irq */
@@ -510,9 +458,6 @@ static int hpdi_auto_attach(struct comedi_device *dev,
devpriv->dio_buffer[i] =
pci_alloc_consistent(pcidev, DMA_BUFFER_SIZE,
&devpriv->dio_buffer_phys_addr[i]);
- DEBUG_PRINT("dio_buffer at virt 0x%p, phys 0x%lx\n",
- devpriv->dio_buffer[i],
- (unsigned long)devpriv->dio_buffer_phys_addr[i]);
}
/* allocate dma descriptors */
devpriv->dma_desc = pci_alloc_consistent(pcidev,
@@ -687,8 +632,6 @@ static int di_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
hpdi_writel(dev, RX_FIFO_RESET_BIT, BOARD_CONTROL_REG);
- DEBUG_PRINT("hpdi: in di_cmd\n");
-
abort_dma(dev, 0);
devpriv->dma_desc_index = 0;
@@ -725,7 +668,6 @@ static int di_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
writel(intr_bit(RX_FULL_INTR),
devpriv->hpdi_iobase + INTERRUPT_CONTROL_REG);
- DEBUG_PRINT("hpdi: starting rx\n");
hpdi_writel(dev, RX_ENABLE_BIT, BOARD_CONTROL_REG);
return 0;
@@ -778,11 +720,6 @@ static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel)
num_samples * sizeof(uint32_t));
devpriv->dma_desc_index++;
devpriv->dma_desc_index %= devpriv->num_dma_descriptors;
-
- DEBUG_PRINT("next desc addr 0x%lx\n", (unsigned long)
- devpriv->dma_desc[devpriv->dma_desc_index].
- next);
- DEBUG_PRINT("pci addr reg 0x%x\n", next_transfer_addr);
}
/* XXX check for buffer overrun somehow */
}
@@ -812,7 +749,6 @@ static irqreturn_t handle_interrupt(int irq, void *d)
async->events = 0;
if (hpdi_intr_status) {
- DEBUG_PRINT("hpdi: intr status 0x%x, ", hpdi_intr_status);
writel(hpdi_intr_status,
devpriv->hpdi_iobase + INTERRUPT_STATUS_REG);
}
@@ -823,10 +759,8 @@ static irqreturn_t handle_interrupt(int irq, void *d)
writeb((dma0_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
- DEBUG_PRINT("dma0 status 0x%x\n", dma0_status);
if (dma0_status & PLX_DMA_EN_BIT)
drain_dma_buffers(dev, 0);
- DEBUG_PRINT(" cleared dma ch0 interrupt\n");
}
spin_unlock_irqrestore(&dev->spinlock, flags);
@@ -836,9 +770,6 @@ static irqreturn_t handle_interrupt(int irq, void *d)
if (plx_status & ICS_DMA1_A) { /* XXX *//* dma chan 1 interrupt */
writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
- DEBUG_PRINT("dma1 status 0x%x\n", dma1_status);
-
- DEBUG_PRINT(" cleared dma ch1 interrupt\n");
}
spin_unlock_irqrestore(&dev->spinlock, flags);
@@ -846,15 +777,11 @@ static irqreturn_t handle_interrupt(int irq, void *d)
if (plx_status & ICS_LDIA) { /* clear local doorbell interrupt */
plx_bits = readl(devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
writel(plx_bits, devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
- DEBUG_PRINT(" cleared local doorbell bits 0x%x\n", plx_bits);
}
if (hpdi_board_status & RX_OVERRUN_BIT) {
comedi_error(dev, "rx fifo overrun");
async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- DEBUG_PRINT("dma0_status 0x%x\n",
- (int)readb(devpriv->plx9080_iobase +
- PLX_DMA0_CS_REG));
}
if (hpdi_board_status & RX_UNDERRUN_BIT) {
@@ -865,11 +792,6 @@ static irqreturn_t handle_interrupt(int irq, void *d)
if (devpriv->dio_count == 0)
async->events |= COMEDI_CB_EOA;
- DEBUG_PRINT("board status 0x%x, ", hpdi_board_status);
- DEBUG_PRINT("plx status 0x%x\n", plx_status);
- if (async->events)
- DEBUG_PRINT(" events 0x%x\n", async->events);
-
cfc_handle_events(dev, s);
return IRQ_HANDLED;
@@ -914,7 +836,7 @@ static int gsc_hpdi_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &gsc_hpdi_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(gsc_hpdi_pci_table) = {
+static const struct pci_device_id gsc_hpdi_pci_table[] = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9080, PCI_VENDOR_ID_PLX,
0x2400, 0, 0, 0},
{ 0 }
diff --git a/drivers/staging/comedi/drivers/icp_multi.c b/drivers/staging/comedi/drivers/icp_multi.c
index 1e16641ec52d..80539b2bea1a 100644
--- a/drivers/staging/comedi/drivers/icp_multi.c
+++ b/drivers/staging/comedi/drivers/icp_multi.c
@@ -91,12 +91,13 @@ Configuration options: not applicable, uses PCI auto config
#define Status_IRQ 0x00ff /* All interrupts */
/* Define analogue range */
-static const struct comedi_lrange range_analog = { 4, {
- UNI_RANGE(5),
- UNI_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(10)
- }
+static const struct comedi_lrange range_analog = {
+ 4, {
+ UNI_RANGE(5),
+ UNI_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(10)
+ }
};
static const char range_codes_analog[] = { 0x00, 0x20, 0x10, 0x30 };
@@ -597,7 +598,7 @@ static int icp_multi_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &icp_multi_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(icp_multi_pci_table) = {
+static const struct pci_device_id icp_multi_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ICP, PCI_DEVICE_ID_ICP_MULTI) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index b52d58e5de27..6100c12c164f 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -807,7 +807,7 @@ static int jr3_pci_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &jr3_pci_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(jr3_pci_pci_table) = {
+static const struct pci_device_id jr3_pci_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL) },
{ PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW) },
{ PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL) },
diff --git a/drivers/staging/comedi/drivers/ke_counter.c b/drivers/staging/comedi/drivers/ke_counter.c
index 15589f62a619..6b9846fd8c48 100644
--- a/drivers/staging/comedi/drivers/ke_counter.c
+++ b/drivers/staging/comedi/drivers/ke_counter.c
@@ -139,7 +139,7 @@ static int ke_counter_pci_probe(struct pci_dev *dev,
id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(ke_counter_pci_table) = {
+static const struct pci_device_id ke_counter_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_KOLTER, CNT_CARD_DEVICE_ID) },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index 3d12e9135926..e739bcd66a04 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -328,13 +328,12 @@ static const struct me4000_board me4000_boards[] = {
};
static const struct comedi_lrange me4000_ai_range = {
- 4,
- {
- UNI_RANGE(2.5),
- UNI_RANGE(10),
- BIP_RANGE(2.5),
- BIP_RANGE(10),
- }
+ 4, {
+ UNI_RANGE(2.5),
+ UNI_RANGE(10),
+ BIP_RANGE(2.5),
+ BIP_RANGE(10)
+ }
};
#define FIRMWARE_NOT_AVAILABLE 1
@@ -1105,7 +1104,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
{
unsigned int tmp;
struct comedi_device *dev = dev_id;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
int i;
int c = 0;
unsigned int lval;
@@ -1116,12 +1115,6 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
/* Reset all events */
s->async->events = 0;
- /* Check if irq number is right */
- if (irq != dev->irq) {
- dev_err(dev->class_dev, "Incorrect interrupt num: %d\n", irq);
- return IRQ_HANDLED;
- }
-
if (inl(dev->iobase + ME4000_IRQ_STATUS_REG) &
ME4000_IRQ_STATUS_BIT_AI_HF) {
/* Read status register to find out what happened */
@@ -1505,6 +1498,13 @@ static int me4000_auto_attach(struct comedi_device *dev,
me4000_reset(dev);
+ if (pcidev->irq > 0) {
+ result = request_irq(pcidev->irq, me4000_ai_isr, IRQF_SHARED,
+ dev->board_name, dev);
+ if (result == 0)
+ dev->irq = pcidev->irq;
+ }
+
result = comedi_alloc_subdevices(dev, 4);
if (result)
return result;
@@ -1525,22 +1525,12 @@ static int me4000_auto_attach(struct comedi_device *dev,
s->range_table = &me4000_ai_range;
s->insn_read = me4000_ai_insn_read;
- if (pcidev->irq > 0) {
- if (request_irq(pcidev->irq, me4000_ai_isr,
- IRQF_SHARED, dev->board_name, dev)) {
- dev_warn(dev->class_dev,
- "request_irq failed\n");
- } else {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->cancel = me4000_ai_cancel;
- s->do_cmdtest = me4000_ai_do_cmd_test;
- s->do_cmd = me4000_ai_do_cmd;
-
- dev->irq = pcidev->irq;
- }
- } else {
- dev_warn(dev->class_dev, "No interrupt available\n");
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->cancel = me4000_ai_cancel;
+ s->do_cmdtest = me4000_ai_do_cmd_test;
+ s->do_cmd = me4000_ai_do_cmd;
}
} else {
s->type = COMEDI_SUBD_UNUSED;
@@ -1635,7 +1625,7 @@ static int me4000_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &me4000_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(me4000_pci_table) = {
+static const struct pci_device_id me4000_pci_table[] = {
{ PCI_VDEVICE(MEILHAUS, 0x4650), BOARD_ME4650 },
{ PCI_VDEVICE(MEILHAUS, 0x4660), BOARD_ME4660 },
{ PCI_VDEVICE(MEILHAUS, 0x4661), BOARD_ME4660I },
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index 24ec9ef9b1a0..7f6687896401 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -576,7 +576,7 @@ static int me_daq_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &me_daq_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(me_daq_pci_table) = {
+static const struct pci_device_id me_daq_pci_table[] = {
{ PCI_VDEVICE(MEILHAUS, 0x2600), BOARD_ME2600 },
{ PCI_VDEVICE(MEILHAUS, 0x2000), BOARD_ME2000 },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/mf6x4.c b/drivers/staging/comedi/drivers/mf6x4.c
new file mode 100644
index 000000000000..81b78e053f4e
--- /dev/null
+++ b/drivers/staging/comedi/drivers/mf6x4.c
@@ -0,0 +1,354 @@
+/*
+ * comedi/drivers/mf6x4.c
+ * Driver for Humusoft MF634 and MF624 Data acquisition cards
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Driver: mf6x4
+ * Description: Humusoft MF634 and MF624 Data acquisition card driver
+ * Devices: Humusoft MF634, Humusoft MF624
+ * Author: Rostislav Lisovy <lisovy@gmail.com>
+ * Status: works
+ * Updated:
+ * Configuration Options: none
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "../comedidev.h"
+
+/* Registers present in BAR0 memory region */
+#define MF624_GPIOC_R 0x54
+
+#define MF6X4_GPIOC_EOLC /* End Of Last Conversion */ (1 << 17)
+#define MF6X4_GPIOC_LDAC /* Load DACs */ (1 << 23)
+#define MF6X4_GPIOC_DACEN (1 << 26)
+
+/* BAR1 registers */
+#define MF6X4_DIN_R 0x10
+#define MF6X4_DIN_M 0xff
+#define MF6X4_DOUT_R 0x10
+#define MF6X4_DOUT_M 0xff
+
+#define MF6X4_ADSTART_R 0x20
+#define MF6X4_ADDATA_R 0x00
+#define MF6X4_ADCTRL_R 0x00
+#define MF6X4_ADCTRL_M 0xff
+
+#define MF6X4_DA0_R 0x20
+#define MF6X4_DA1_R 0x22
+#define MF6X4_DA2_R 0x24
+#define MF6X4_DA3_R 0x26
+#define MF6X4_DA4_R 0x28
+#define MF6X4_DA5_R 0x2a
+#define MF6X4_DA6_R 0x2c
+#define MF6X4_DA7_R 0x2e
+/* Map DAC cahnnel id to real HW-dependent offset value */
+#define MF6X4_DAC_R(x) (0x20 + ((x) * 2))
+#define MF6X4_DA_M 0x3fff
+
+/* BAR2 registers */
+#define MF634_GPIOC_R 0x68
+
+enum mf6x4_boardid {
+ BOARD_MF634,
+ BOARD_MF624,
+};
+
+struct mf6x4_board {
+ const char *name;
+ unsigned int bar_nums[3]; /* We need to keep track of the
+ order of BARs used by the cards */
+};
+
+static const struct mf6x4_board mf6x4_boards[] = {
+ [BOARD_MF634] = {
+ .name = "mf634",
+ .bar_nums = {0, 2, 3},
+ },
+ [BOARD_MF624] = {
+ .name = "mf624",
+ .bar_nums = {0, 2, 4},
+ },
+};
+
+struct mf6x4_private {
+ /*
+ * Documentation for both MF634 and MF624 describes registers
+ * present in BAR0, 1 and 2 regions.
+ * The real (i.e. in HW) BAR numbers are different for MF624
+ * and MF634 yet we will call them 0, 1, 2
+ */
+ void __iomem *bar0_mem;
+ void __iomem *bar1_mem;
+ void __iomem *bar2_mem;
+
+ /*
+ * This configuration register has the same function and fields
+ * for both cards however it lies in different BARs on different
+ * offsets -- this variable makes the access easier
+ */
+ void __iomem *gpioc_R;
+
+ /* DAC value cache -- used for insn_read function */
+ int ao_readback[8];
+};
+
+static int mf6x4_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ struct mf6x4_private *devpriv = dev->private;
+
+ data[1] = ioread16(devpriv->bar1_mem + MF6X4_DIN_R) & MF6X4_DIN_M;
+
+ return insn->n;
+}
+
+static int mf6x4_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ struct mf6x4_private *devpriv = dev->private;
+
+ if (comedi_dio_update_state(s, data))
+ iowrite16(s->state & MF6X4_DOUT_M,
+ devpriv->bar1_mem + MF6X4_DOUT_R);
+
+ data[1] = s->state;
+
+ return insn->n;
+}
+
+static int mf6x4_ai_wait_for_eoc(struct comedi_device *dev,
+ unsigned int timeout)
+{
+ struct mf6x4_private *devpriv = dev->private;
+ unsigned int eolc;
+
+ while (timeout--) {
+ eolc = ioread32(devpriv->gpioc_R) & MF6X4_GPIOC_EOLC;
+ if (eolc)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIME;
+}
+
+static int mf6x4_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ struct mf6x4_private *devpriv = dev->private;
+ int chan = CR_CHAN(insn->chanspec);
+ int ret;
+ int i;
+ int d;
+
+ /* Set the ADC channel number in the scan list */
+ iowrite16((1 << chan) & MF6X4_ADCTRL_M,
+ devpriv->bar1_mem + MF6X4_ADCTRL_R);
+
+ for (i = 0; i < insn->n; i++) {
+ /* Trigger ADC conversion by reading ADSTART */
+ ioread16(devpriv->bar1_mem + MF6X4_ADSTART_R);
+
+ ret = mf6x4_ai_wait_for_eoc(dev, 100);
+ if (ret)
+ return ret;
+
+ /* Read the actual value */
+ d = ioread16(devpriv->bar1_mem + MF6X4_ADDATA_R);
+ d &= s->maxdata;
+ data[i] = d;
+ }
+
+ iowrite16(0x0, devpriv->bar1_mem + MF6X4_ADCTRL_R);
+
+ return insn->n;
+}
+
+static int mf6x4_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ struct mf6x4_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ uint32_t gpioc;
+ int i;
+
+ /* Enable instantaneous update of converters outputs + Enable DACs */
+ gpioc = ioread32(devpriv->gpioc_R);
+ iowrite32((gpioc & ~MF6X4_GPIOC_LDAC) | MF6X4_GPIOC_DACEN,
+ devpriv->gpioc_R);
+
+ for (i = 0; i < insn->n; i++) {
+ iowrite16(data[i] & MF6X4_DA_M,
+ devpriv->bar1_mem + MF6X4_DAC_R(chan));
+
+ devpriv->ao_readback[chan] = data[i];
+ }
+
+ return insn->n;
+}
+
+static int mf6x4_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ struct mf6x4_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
+
+ for (i = 0; i < insn->n; i++)
+ data[i] = devpriv->ao_readback[chan];
+
+ return insn->n;
+}
+
+static int mf6x4_auto_attach(struct comedi_device *dev, unsigned long context)
+{
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+ const struct mf6x4_board *board = NULL;
+ struct mf6x4_private *devpriv;
+ struct comedi_subdevice *s;
+ int ret;
+
+ if (context < ARRAY_SIZE(mf6x4_boards))
+ board = &mf6x4_boards[context];
+ else
+ return -ENODEV;
+
+ dev->board_ptr = board;
+ dev->board_name = board->name;
+
+ ret = comedi_pci_enable(dev);
+ if (ret)
+ return ret;
+
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
+ return -ENOMEM;
+
+ devpriv->bar0_mem = pci_ioremap_bar(pcidev, board->bar_nums[0]);
+ if (!devpriv->bar0_mem)
+ return -ENODEV;
+
+ devpriv->bar1_mem = pci_ioremap_bar(pcidev, board->bar_nums[1]);
+ if (!devpriv->bar1_mem)
+ return -ENODEV;
+
+ devpriv->bar2_mem = pci_ioremap_bar(pcidev, board->bar_nums[2]);
+ if (!devpriv->bar2_mem)
+ return -ENODEV;
+
+ if (board == &mf6x4_boards[BOARD_MF634])
+ devpriv->gpioc_R = devpriv->bar2_mem + MF634_GPIOC_R;
+ else
+ devpriv->gpioc_R = devpriv->bar0_mem + MF624_GPIOC_R;
+
+
+ ret = comedi_alloc_subdevices(dev, 4);
+ if (ret)
+ return ret;
+
+ /* ADC */
+ s = &dev->subdevices[0];
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND;
+ s->n_chan = 8;
+ s->maxdata = 0x3fff; /* 14 bits ADC */
+ s->range_table = &range_bipolar10;
+ s->insn_read = mf6x4_ai_insn_read;
+
+ /* DAC */
+ s = &dev->subdevices[1];
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 8;
+ s->maxdata = 0x3fff; /* 14 bits DAC */
+ s->range_table = &range_bipolar10;
+ s->insn_write = mf6x4_ao_insn_write;
+ s->insn_read = mf6x4_ao_insn_read;
+
+ /* DIN */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = mf6x4_di_insn_bits;
+
+ /* DOUT */
+ s = &dev->subdevices[3];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = mf6x4_do_insn_bits;
+
+ return 0;
+}
+
+static void mf6x4_detach(struct comedi_device *dev)
+{
+ struct mf6x4_private *devpriv = dev->private;
+
+ if (devpriv->bar0_mem)
+ iounmap(devpriv->bar0_mem);
+ if (devpriv->bar1_mem)
+ iounmap(devpriv->bar1_mem);
+ if (devpriv->bar2_mem)
+ iounmap(devpriv->bar2_mem);
+
+ comedi_pci_disable(dev);
+}
+
+static struct comedi_driver mf6x4_driver = {
+ .driver_name = "mf6x4",
+ .module = THIS_MODULE,
+ .auto_attach = mf6x4_auto_attach,
+ .detach = mf6x4_detach,
+};
+
+static int mf6x4_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ return comedi_pci_auto_config(dev, &mf6x4_driver, id->driver_data);
+}
+
+static const struct pci_device_id mf6x4_pci_table[] = {
+ { PCI_VDEVICE(HUMUSOFT, 0x0634), BOARD_MF634 },
+ { PCI_VDEVICE(HUMUSOFT, 0x0624), BOARD_MF624 },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, mf6x4_pci_table);
+
+static struct pci_driver mf6x4_pci_driver = {
+ .name = "mf6x4",
+ .id_table = mf6x4_pci_table,
+ .probe = mf6x4_pci_probe,
+ .remove = comedi_pci_auto_unconfig,
+};
+
+module_comedi_pci_driver(mf6x4_driver, mf6x4_pci_driver);
+
+MODULE_AUTHOR("Rostislav Lisovy <lisovy@gmail.com>");
+MODULE_DESCRIPTION("Comedi MF634 and MF624 DAQ cards driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index 35cb4ace7970..9c9a0ee432cf 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -288,7 +288,6 @@ void mite_dma_arm(struct mite_channel *mite_chan)
int chor;
unsigned long flags;
- MDPRINTK("mite_dma_arm ch%i\n", mite_chan->channel);
/*
* memory barrier is intended to insure any twiddling with the buffer
* is done before writing to the mite to arm dma transfer
@@ -329,8 +328,6 @@ int mite_buf_change(struct mite_dma_descriptor_ring *ring,
n_links = async->prealloc_bufsz >> PAGE_SHIFT;
- MDPRINTK("ring->hw_dev=%p, n_links=0x%04x\n", ring->hw_dev, n_links);
-
ring->descriptors =
dma_alloc_coherent(ring->hw_dev,
n_links * sizeof(struct mite_dma_descriptor),
@@ -345,7 +342,7 @@ int mite_buf_change(struct mite_dma_descriptor_ring *ring,
for (i = 0; i < n_links; i++) {
ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE);
ring->descriptors[i].addr =
- cpu_to_le32(async->buf_page_list[i].dma_addr);
+ cpu_to_le32(async->buf_map->page_list[i].dma_addr);
ring->descriptors[i].next =
cpu_to_le32(ring->descriptors_dma_addr + (i +
1) *
@@ -368,8 +365,6 @@ void mite_prep_dma(struct mite_channel *mite_chan,
unsigned int chor, chcr, mcr, dcr, lkcr;
struct mite_struct *mite = mite_chan->mite;
- MDPRINTK("mite_prep_dma ch%i\n", mite_chan->channel);
-
/* reset DMA and FIFO */
chor = CHOR_DMARESET | CHOR_FRESET;
writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
@@ -448,8 +443,6 @@ void mite_prep_dma(struct mite_channel *mite_chan,
/* starting address for link chaining */
writel(mite_chan->ring->descriptors_dma_addr,
mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
-
- MDPRINTK("exit mite_prep_dma\n");
}
EXPORT_SYMBOL_GPL(mite_prep_dma);
@@ -515,8 +508,6 @@ unsigned mite_dma_tcr(struct mite_channel *mite_chan)
lkar = readl(mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
tcr = readl(mite->mite_io_addr + MITE_TCR(mite_chan->channel));
- MDPRINTK("mite_dma_tcr ch%i, lkar=0x%08x tcr=%d\n", mite_chan->channel,
- lkar, tcr);
return tcr;
}
@@ -642,140 +633,6 @@ int mite_done(struct mite_channel *mite_chan)
}
EXPORT_SYMBOL_GPL(mite_done);
-#ifdef DEBUG_MITE
-
-/* names of bits in mite registers */
-
-static const char *const mite_CHOR_strings[] = {
- "start", "cont", "stop", "abort",
- "freset", "clrlc", "clrrb", "clrdone",
- "clr_lpause", "set_lpause", "clr_send_tc",
- "set_send_tc", "12", "13", "14",
- "15", "16", "17", "18",
- "19", "20", "21", "22",
- "23", "24", "25", "26",
- "27", "28", "29", "30",
- "dmareset",
-};
-
-static const char *const mite_CHCR_strings[] = {
- "continue", "ringbuff", "2", "3",
- "4", "5", "6", "7",
- "8", "9", "10", "11",
- "12", "13", "bursten", "fifodis",
- "clr_cont_rb_ie", "set_cont_rb_ie", "clr_lc_ie", "set_lc_ie",
- "clr_drdy_ie", "set_drdy_ie", "clr_mrdy_ie", "set_mrdy_ie",
- "clr_done_ie", "set_done_ie", "clr_sar_ie", "set_sar_ie",
- "clr_linkp_ie", "set_linkp_ie", "clr_dma_ie", "set_dma_ie",
-};
-
-static const char *const mite_MCR_strings[] = {
- "amdevice", "1", "2", "3",
- "4", "5", "portio", "portvxi",
- "psizebyte", "psizehalf (byte & half = word)", "aseqxp1", "11",
- "12", "13", "blocken", "berhand",
- "reqsintlim/reqs0", "reqs1", "reqs2", "rd32",
- "rd512", "rl1", "rl2", "rl8",
- "24", "25", "26", "27",
- "28", "29", "30", "stopen",
-};
-
-static const char *const mite_DCR_strings[] = {
- "amdevice", "1", "2", "3",
- "4", "5", "portio", "portvxi",
- "psizebyte", "psizehalf (byte & half = word)", "aseqxp1", "aseqxp2",
- "aseqxp8", "13", "blocken", "berhand",
- "reqsintlim", "reqs1", "reqs2", "rd32",
- "rd512", "rl1", "rl2", "rl8",
- "23", "24", "25", "27",
- "28", "wsdevc", "wsdevs", "rwdevpack",
-};
-
-static const char *const mite_LKCR_strings[] = {
- "amdevice", "1", "2", "3",
- "4", "5", "portio", "portvxi",
- "psizebyte", "psizehalf (byte & half = word)", "asequp", "aseqdown",
- "12", "13", "14", "berhand",
- "16", "17", "18", "rd32",
- "rd512", "rl1", "rl2", "rl8",
- "24", "25", "26", "27",
- "28", "29", "30", "chngend",
-};
-
-static const char *const mite_CHSR_strings[] = {
- "d.err0", "d.err1", "m.err0", "m.err1",
- "l.err0", "l.err1", "drq0", "drq1",
- "end", "xferr", "operr0", "operr1",
- "stops", "habort", "sabort", "error",
- "16", "conts_rb", "18", "linkc",
- "20", "drdy", "22", "mrdy",
- "24", "done", "26", "sars",
- "28", "lpauses", "30", "int",
-};
-
-static void mite_decode(const char *const *bit_str, unsigned int bits)
-{
- int i;
-
- for (i = 31; i >= 0; i--) {
- if (bits & (1 << i))
- pr_debug(" %s\n", bit_str[i]);
- }
-}
-
-void mite_dump_regs(struct mite_channel *mite_chan)
-{
- void __iomem *mite_io_addr = mite_chan->mite->mite_io_addr;
- unsigned int offset;
- unsigned int value;
- int channel = mite_chan->channel;
-
- pr_debug("mite_dump_regs ch%i\n", channel);
- pr_debug("mite address is =%p\n", mite_io_addr);
-
- offset = MITE_CHOR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[CHOR] at 0x%08x =0x%08x\n", offset, value);
- mite_decode(mite_CHOR_strings, value);
- offset = MITE_CHCR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[CHCR] at 0x%08x =0x%08x\n", offset, value);
- mite_decode(mite_CHCR_strings, value);
- offset = MITE_TCR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[TCR] at 0x%08x =0x%08x\n", offset, value);
- offset = MITE_MCR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[MCR] at 0x%08x =0x%08x\n", offset, value);
- mite_decode(mite_MCR_strings, value);
- offset = MITE_MAR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[MAR] at 0x%08x =0x%08x\n", offset, value);
- offset = MITE_DCR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[DCR] at 0x%08x =0x%08x\n", offset, value);
- mite_decode(mite_DCR_strings, value);
- offset = MITE_DAR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[DAR] at 0x%08x =0x%08x\n", offset, value);
- offset = MITE_LKCR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[LKCR] at 0x%08x =0x%08x\n", offset, value);
- mite_decode(mite_LKCR_strings, value);
- offset = MITE_LKAR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[LKAR] at 0x%08x =0x%08x\n", offset, value);
- offset = MITE_CHSR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[CHSR] at 0x%08x =0x%08x\n", offset, value);
- mite_decode(mite_CHSR_strings, value);
- offset = MITE_FCR(channel);
- value = readl(mite_io_addr + offset);
- pr_debug("mite status[FCR] at 0x%08x =0x%08x\n", offset, value);
-}
-EXPORT_SYMBOL_GPL(mite_dump_regs);
-#endif
-
static int __init mite_module_init(void)
{
return 0;
diff --git a/drivers/staging/comedi/drivers/mite.h b/drivers/staging/comedi/drivers/mite.h
index 8423b8bf3384..bcf2f972376e 100644
--- a/drivers/staging/comedi/drivers/mite.h
+++ b/drivers/staging/comedi/drivers/mite.h
@@ -24,15 +24,8 @@
#include <linux/slab.h>
#include "../comedidev.h"
-/* #define DEBUG_MITE */
#define PCIMIO_COMPAT
-#ifdef DEBUG_MITE
-#define MDPRINTK(format, args...) pr_debug(format , ## args)
-#else
-#define MDPRINTK(format, args...) do { } while (0)
-#endif
-
#define MAX_MITE_DMA_CHANNELS 8
struct mite_dma_descriptor {
@@ -129,11 +122,6 @@ void mite_prep_dma(struct mite_channel *mite_chan,
int mite_buf_change(struct mite_dma_descriptor_ring *ring,
struct comedi_async *async);
-#ifdef DEBUG_MITE
-void mite_print_chsr(unsigned int chsr);
-void mite_dump_regs(struct mite_channel *mite_chan);
-#endif
-
static inline int CHAN_OFFSET(int channel)
{
return 0x500 + 0x100 * channel;
diff --git a/drivers/staging/comedi/drivers/mpc624.c b/drivers/staging/comedi/drivers/mpc624.c
index acbaeee6250c..fe4621ea65c3 100644
--- a/drivers/staging/comedi/drivers/mpc624.c
+++ b/drivers/staging/comedi/drivers/mpc624.c
@@ -159,11 +159,6 @@ static int mpc624_ai_rinsn(struct comedi_device *dev,
* We always write 0 to GNSWA bit, so the channel range is +-/10.1Vdc
*/
outb(insn->chanspec, dev->iobase + MPC624_GNMUXCH);
-/* printk("Channel %d:\n", insn->chanspec); */
- if (!insn->n) {
- printk(KERN_INFO "MPC624: Warning, no data to acquire\n");
- return 0;
- }
for (n = 0; n < insn->n; n++) {
/* Trigger the conversion */
@@ -182,11 +177,9 @@ static int mpc624_ai_rinsn(struct comedi_device *dev,
else
break;
}
- if (i == TIMEOUT) {
- printk(KERN_ERR "MPC624: timeout (%dms)\n", TIMEOUT);
- data[n] = 0;
+ if (i == TIMEOUT)
return -ETIMEDOUT;
- }
+
/* Start reading data */
data_in = 0;
data_out = devpriv->ulConvertionRate;
@@ -245,11 +238,11 @@ static int mpc624_ai_rinsn(struct comedi_device *dev,
*/
if (data_in & MPC624_EOC_BIT)
- printk(KERN_INFO "MPC624:EOC bit is set (data_in=%lu)!",
- data_in);
+ dev_dbg(dev->class_dev,
+ "EOC bit is set (data_in=%lu)!", data_in);
if (data_in & MPC624_DMY_BIT)
- printk(KERN_INFO "MPC624:DMY bit is set (data_in=%lu)!",
- data_in);
+ dev_dbg(dev->class_dev,
+ "DMY bit is set (data_in=%lu)!", data_in);
if (data_in & MPC624_SGN_BIT) { /* Volatge is positive */
/*
* comedi operates on unsigned numbers, so mask off EOC
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index 85aa9609d6a2..860fc81fb11c 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -455,7 +455,7 @@ static int ni6527_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ni6527_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(ni6527_pci_table) = {
+static const struct pci_device_id ni6527_pci_table[] = {
{ PCI_VDEVICE(NI, 0x2b10), BOARD_PXI6527 },
{ PCI_VDEVICE(NI, 0x2b20), BOARD_PCI6527 },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 853f62b2b1a9..6e42001f686e 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -43,9 +43,6 @@ except maybe the 6514.
*/
-#define DEBUG 1
-#define DEBUG_FLAGS
-
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -430,7 +427,7 @@ static irqreturn_t ni_65xx_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct ni_65xx_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[2];
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned int status;
status = readb(devpriv->mite->daq_io_addr + Change_Status);
@@ -741,7 +738,7 @@ static int ni_65xx_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ni_65xx_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(ni_65xx_pci_table) = {
+static const struct pci_device_id ni_65xx_pci_table[] = {
{ PCI_VDEVICE(NI, 0x1710), BOARD_PXI6509 },
{ PCI_VDEVICE(NI, 0x7085), BOARD_PCI6509 },
{ PCI_VDEVICE(NI, 0x7086), BOARD_PXI6528 },
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 8a991dcab24a..df42e3906171 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -55,112 +55,112 @@ for 4 */
#define MAX_DMA_CHANNEL 4
/* See Register-Level Programmer Manual page 3.1 */
-enum NI_660x_Register {
- G0InterruptAcknowledge,
- G0StatusRegister,
- G1InterruptAcknowledge,
- G1StatusRegister,
- G01StatusRegister,
- G0CommandRegister,
- STCDIOParallelInput,
- G1CommandRegister,
- G0HWSaveRegister,
- G1HWSaveRegister,
- STCDIOOutput,
- STCDIOControl,
- G0SWSaveRegister,
- G1SWSaveRegister,
- G0ModeRegister,
- G01JointStatus1Register,
- G1ModeRegister,
- STCDIOSerialInput,
- G0LoadARegister,
- G01JointStatus2Register,
- G0LoadBRegister,
- G1LoadARegister,
- G1LoadBRegister,
- G0InputSelectRegister,
- G1InputSelectRegister,
- G0AutoincrementRegister,
- G1AutoincrementRegister,
- G01JointResetRegister,
- G0InterruptEnable,
- G1InterruptEnable,
- G0CountingModeRegister,
- G1CountingModeRegister,
- G0SecondGateRegister,
- G1SecondGateRegister,
- G0DMAConfigRegister,
- G0DMAStatusRegister,
- G1DMAConfigRegister,
- G1DMAStatusRegister,
- G2InterruptAcknowledge,
- G2StatusRegister,
- G3InterruptAcknowledge,
- G3StatusRegister,
- G23StatusRegister,
- G2CommandRegister,
- G3CommandRegister,
- G2HWSaveRegister,
- G3HWSaveRegister,
- G2SWSaveRegister,
- G3SWSaveRegister,
- G2ModeRegister,
- G23JointStatus1Register,
- G3ModeRegister,
- G2LoadARegister,
- G23JointStatus2Register,
- G2LoadBRegister,
- G3LoadARegister,
- G3LoadBRegister,
- G2InputSelectRegister,
- G3InputSelectRegister,
- G2AutoincrementRegister,
- G3AutoincrementRegister,
- G23JointResetRegister,
- G2InterruptEnable,
- G3InterruptEnable,
- G2CountingModeRegister,
- G3CountingModeRegister,
- G3SecondGateRegister,
- G2SecondGateRegister,
- G2DMAConfigRegister,
- G2DMAStatusRegister,
- G3DMAConfigRegister,
- G3DMAStatusRegister,
- DIO32Input,
- DIO32Output,
- ClockConfigRegister,
- GlobalInterruptStatusRegister,
- DMAConfigRegister,
- GlobalInterruptConfigRegister,
- IOConfigReg0_1,
- IOConfigReg2_3,
- IOConfigReg4_5,
- IOConfigReg6_7,
- IOConfigReg8_9,
- IOConfigReg10_11,
- IOConfigReg12_13,
- IOConfigReg14_15,
- IOConfigReg16_17,
- IOConfigReg18_19,
- IOConfigReg20_21,
- IOConfigReg22_23,
- IOConfigReg24_25,
- IOConfigReg26_27,
- IOConfigReg28_29,
- IOConfigReg30_31,
- IOConfigReg32_33,
- IOConfigReg34_35,
- IOConfigReg36_37,
- IOConfigReg38_39,
- NumRegisters,
+enum ni_660x_register {
+ NI660X_G0_INT_ACK,
+ NI660X_G0_STATUS,
+ NI660X_G1_INT_ACK,
+ NI660X_G1_STATUS,
+ NI660X_G01_STATUS,
+ NI660X_G0_CMD,
+ NI660X_STC_DIO_PARALLEL_INPUT,
+ NI660X_G1_CMD,
+ NI660X_G0_HW_SAVE,
+ NI660X_G1_HW_SAVE,
+ NI660X_STC_DIO_OUTPUT,
+ NI660X_STC_DIO_CONTROL,
+ NI660X_G0_SW_SAVE,
+ NI660X_G1_SW_SAVE,
+ NI660X_G0_MODE,
+ NI660X_G01_STATUS1,
+ NI660X_G1_MODE,
+ NI660X_STC_DIO_SERIAL_INPUT,
+ NI660X_G0_LOADA,
+ NI660X_G01_STATUS2,
+ NI660X_G0_LOADB,
+ NI660X_G1_LOADA,
+ NI660X_G1_LOADB,
+ NI660X_G0_INPUT_SEL,
+ NI660X_G1_INPUT_SEL,
+ NI660X_G0_AUTO_INC,
+ NI660X_G1_AUTO_INC,
+ NI660X_G01_RESET,
+ NI660X_G0_INT_ENA,
+ NI660X_G1_INT_ENA,
+ NI660X_G0_CNT_MODE,
+ NI660X_G1_CNT_MODE,
+ NI660X_G0_GATE2,
+ NI660X_G1_GATE2,
+ NI660X_G0_DMA_CFG,
+ NI660X_G0_DMA_STATUS,
+ NI660X_G1_DMA_CFG,
+ NI660X_G1_DMA_STATUS,
+ NI660X_G2_INT_ACK,
+ NI660X_G2_STATUS,
+ NI660X_G3_INT_ACK,
+ NI660X_G3_STATUS,
+ NI660X_G23_STATUS,
+ NI660X_G2_CMD,
+ NI660X_G3_CMD,
+ NI660X_G2_HW_SAVE,
+ NI660X_G3_HW_SAVE,
+ NI660X_G2_SW_SAVE,
+ NI660X_G3_SW_SAVE,
+ NI660X_G2_MODE,
+ NI660X_G23_STATUS1,
+ NI660X_G3_MODE,
+ NI660X_G2_LOADA,
+ NI660X_G23_STATUS2,
+ NI660X_G2_LOADB,
+ NI660X_G3_LOADA,
+ NI660X_G3_LOADB,
+ NI660X_G2_INPUT_SEL,
+ NI660X_G3_INPUT_SEL,
+ NI660X_G2_AUTO_INC,
+ NI660X_G3_AUTO_INC,
+ NI660X_G23_RESET,
+ NI660X_G2_INT_ENA,
+ NI660X_G3_INT_ENA,
+ NI660X_G2_CNT_MODE,
+ NI660X_G3_CNT_MODE,
+ NI660X_G3_GATE2,
+ NI660X_G2_GATE2,
+ NI660X_G2_DMA_CFG,
+ NI660X_G2_DMA_STATUS,
+ NI660X_G3_DMA_CFG,
+ NI660X_G3_DMA_STATUS,
+ NI660X_DIO32_INPUT,
+ NI660X_DIO32_OUTPUT,
+ NI660X_CLK_CFG,
+ NI660X_GLOBAL_INT_STATUS,
+ NI660X_DMA_CFG,
+ NI660X_GLOBAL_INT_CFG,
+ NI660X_IO_CFG_0_1,
+ NI660X_IO_CFG_2_3,
+ NI660X_IO_CFG_4_5,
+ NI660X_IO_CFG_6_7,
+ NI660X_IO_CFG_8_9,
+ NI660X_IO_CFG_10_11,
+ NI660X_IO_CFG_12_13,
+ NI660X_IO_CFG_14_15,
+ NI660X_IO_CFG_16_17,
+ NI660X_IO_CFG_18_19,
+ NI660X_IO_CFG_20_21,
+ NI660X_IO_CFG_22_23,
+ NI660X_IO_CFG_24_25,
+ NI660X_IO_CFG_26_27,
+ NI660X_IO_CFG_28_29,
+ NI660X_IO_CFG_30_31,
+ NI660X_IO_CFG_32_33,
+ NI660X_IO_CFG_34_35,
+ NI660X_IO_CFG_36_37,
+ NI660X_IO_CFG_38_39,
+ NI660X_NUM_REGS,
};
static inline unsigned IOConfigReg(unsigned pfi_channel)
{
- unsigned reg = IOConfigReg0_1 + pfi_channel / 2;
- BUG_ON(reg > IOConfigReg38_39);
+ unsigned reg = NI660X_IO_CFG_0_1 + pfi_channel / 2;
+ BUG_ON(reg > NI660X_IO_CFG_38_39);
return reg;
}
@@ -200,7 +200,7 @@ struct NI_660xRegisterData {
enum ni_660x_register_width size; /* 1 byte, 2 bytes, or 4 bytes */
};
-static const struct NI_660xRegisterData registerData[NumRegisters] = {
+static const struct NI_660xRegisterData registerData[NI660X_NUM_REGS] = {
{"G0 Interrupt Acknowledge", 0x004, NI_660x_WRITE, DATA_2B},
{"G0 Status Register", 0x004, NI_660x_READ, DATA_2B},
{"G1 Interrupt Acknowledge", 0x006, NI_660x_WRITE, DATA_2B},
@@ -347,11 +347,6 @@ static inline unsigned dma_select_mask(unsigned dma_channel)
enum dma_selection {
dma_selection_none = 0x1f,
};
-static inline unsigned dma_selection_counter(unsigned counter_index)
-{
- BUG_ON(counter_index >= counters_per_chip);
- return counter_index;
-}
static inline unsigned dma_select_bits(unsigned dma_channel, unsigned selection)
{
@@ -444,229 +439,158 @@ static inline unsigned ni_660x_num_counters(struct comedi_device *dev)
return board->n_chips * counters_per_chip;
}
-static enum NI_660x_Register ni_gpct_to_660x_register(enum ni_gpct_register reg)
+static enum ni_660x_register ni_gpct_to_660x_register(enum ni_gpct_register reg)
{
- enum NI_660x_Register ni_660x_register;
switch (reg) {
- case NITIO_G0_Autoincrement_Reg:
- ni_660x_register = G0AutoincrementRegister;
- break;
- case NITIO_G1_Autoincrement_Reg:
- ni_660x_register = G1AutoincrementRegister;
- break;
- case NITIO_G2_Autoincrement_Reg:
- ni_660x_register = G2AutoincrementRegister;
- break;
- case NITIO_G3_Autoincrement_Reg:
- ni_660x_register = G3AutoincrementRegister;
- break;
- case NITIO_G0_Command_Reg:
- ni_660x_register = G0CommandRegister;
- break;
- case NITIO_G1_Command_Reg:
- ni_660x_register = G1CommandRegister;
- break;
- case NITIO_G2_Command_Reg:
- ni_660x_register = G2CommandRegister;
- break;
- case NITIO_G3_Command_Reg:
- ni_660x_register = G3CommandRegister;
- break;
- case NITIO_G0_HW_Save_Reg:
- ni_660x_register = G0HWSaveRegister;
- break;
- case NITIO_G1_HW_Save_Reg:
- ni_660x_register = G1HWSaveRegister;
- break;
- case NITIO_G2_HW_Save_Reg:
- ni_660x_register = G2HWSaveRegister;
- break;
- case NITIO_G3_HW_Save_Reg:
- ni_660x_register = G3HWSaveRegister;
- break;
- case NITIO_G0_SW_Save_Reg:
- ni_660x_register = G0SWSaveRegister;
- break;
- case NITIO_G1_SW_Save_Reg:
- ni_660x_register = G1SWSaveRegister;
- break;
- case NITIO_G2_SW_Save_Reg:
- ni_660x_register = G2SWSaveRegister;
- break;
- case NITIO_G3_SW_Save_Reg:
- ni_660x_register = G3SWSaveRegister;
- break;
- case NITIO_G0_Mode_Reg:
- ni_660x_register = G0ModeRegister;
- break;
- case NITIO_G1_Mode_Reg:
- ni_660x_register = G1ModeRegister;
- break;
- case NITIO_G2_Mode_Reg:
- ni_660x_register = G2ModeRegister;
- break;
- case NITIO_G3_Mode_Reg:
- ni_660x_register = G3ModeRegister;
- break;
- case NITIO_G0_LoadA_Reg:
- ni_660x_register = G0LoadARegister;
- break;
- case NITIO_G1_LoadA_Reg:
- ni_660x_register = G1LoadARegister;
- break;
- case NITIO_G2_LoadA_Reg:
- ni_660x_register = G2LoadARegister;
- break;
- case NITIO_G3_LoadA_Reg:
- ni_660x_register = G3LoadARegister;
- break;
- case NITIO_G0_LoadB_Reg:
- ni_660x_register = G0LoadBRegister;
- break;
- case NITIO_G1_LoadB_Reg:
- ni_660x_register = G1LoadBRegister;
- break;
- case NITIO_G2_LoadB_Reg:
- ni_660x_register = G2LoadBRegister;
- break;
- case NITIO_G3_LoadB_Reg:
- ni_660x_register = G3LoadBRegister;
- break;
- case NITIO_G0_Input_Select_Reg:
- ni_660x_register = G0InputSelectRegister;
- break;
- case NITIO_G1_Input_Select_Reg:
- ni_660x_register = G1InputSelectRegister;
- break;
- case NITIO_G2_Input_Select_Reg:
- ni_660x_register = G2InputSelectRegister;
- break;
- case NITIO_G3_Input_Select_Reg:
- ni_660x_register = G3InputSelectRegister;
- break;
- case NITIO_G01_Status_Reg:
- ni_660x_register = G01StatusRegister;
- break;
- case NITIO_G23_Status_Reg:
- ni_660x_register = G23StatusRegister;
- break;
- case NITIO_G01_Joint_Reset_Reg:
- ni_660x_register = G01JointResetRegister;
- break;
- case NITIO_G23_Joint_Reset_Reg:
- ni_660x_register = G23JointResetRegister;
- break;
- case NITIO_G01_Joint_Status1_Reg:
- ni_660x_register = G01JointStatus1Register;
- break;
- case NITIO_G23_Joint_Status1_Reg:
- ni_660x_register = G23JointStatus1Register;
- break;
- case NITIO_G01_Joint_Status2_Reg:
- ni_660x_register = G01JointStatus2Register;
- break;
- case NITIO_G23_Joint_Status2_Reg:
- ni_660x_register = G23JointStatus2Register;
- break;
- case NITIO_G0_Counting_Mode_Reg:
- ni_660x_register = G0CountingModeRegister;
- break;
- case NITIO_G1_Counting_Mode_Reg:
- ni_660x_register = G1CountingModeRegister;
- break;
- case NITIO_G2_Counting_Mode_Reg:
- ni_660x_register = G2CountingModeRegister;
- break;
- case NITIO_G3_Counting_Mode_Reg:
- ni_660x_register = G3CountingModeRegister;
- break;
- case NITIO_G0_Second_Gate_Reg:
- ni_660x_register = G0SecondGateRegister;
- break;
- case NITIO_G1_Second_Gate_Reg:
- ni_660x_register = G1SecondGateRegister;
- break;
- case NITIO_G2_Second_Gate_Reg:
- ni_660x_register = G2SecondGateRegister;
- break;
- case NITIO_G3_Second_Gate_Reg:
- ni_660x_register = G3SecondGateRegister;
- break;
- case NITIO_G0_DMA_Config_Reg:
- ni_660x_register = G0DMAConfigRegister;
- break;
- case NITIO_G0_DMA_Status_Reg:
- ni_660x_register = G0DMAStatusRegister;
- break;
- case NITIO_G1_DMA_Config_Reg:
- ni_660x_register = G1DMAConfigRegister;
- break;
- case NITIO_G1_DMA_Status_Reg:
- ni_660x_register = G1DMAStatusRegister;
- break;
- case NITIO_G2_DMA_Config_Reg:
- ni_660x_register = G2DMAConfigRegister;
- break;
- case NITIO_G2_DMA_Status_Reg:
- ni_660x_register = G2DMAStatusRegister;
- break;
- case NITIO_G3_DMA_Config_Reg:
- ni_660x_register = G3DMAConfigRegister;
- break;
- case NITIO_G3_DMA_Status_Reg:
- ni_660x_register = G3DMAStatusRegister;
- break;
- case NITIO_G0_Interrupt_Acknowledge_Reg:
- ni_660x_register = G0InterruptAcknowledge;
- break;
- case NITIO_G1_Interrupt_Acknowledge_Reg:
- ni_660x_register = G1InterruptAcknowledge;
- break;
- case NITIO_G2_Interrupt_Acknowledge_Reg:
- ni_660x_register = G2InterruptAcknowledge;
- break;
- case NITIO_G3_Interrupt_Acknowledge_Reg:
- ni_660x_register = G3InterruptAcknowledge;
- break;
- case NITIO_G0_Status_Reg:
- ni_660x_register = G0StatusRegister;
- break;
- case NITIO_G1_Status_Reg:
- ni_660x_register = G1StatusRegister;
- break;
- case NITIO_G2_Status_Reg:
- ni_660x_register = G2StatusRegister;
- break;
- case NITIO_G3_Status_Reg:
- ni_660x_register = G3StatusRegister;
- break;
- case NITIO_G0_Interrupt_Enable_Reg:
- ni_660x_register = G0InterruptEnable;
- break;
- case NITIO_G1_Interrupt_Enable_Reg:
- ni_660x_register = G1InterruptEnable;
- break;
- case NITIO_G2_Interrupt_Enable_Reg:
- ni_660x_register = G2InterruptEnable;
- break;
- case NITIO_G3_Interrupt_Enable_Reg:
- ni_660x_register = G3InterruptEnable;
- break;
+ case NITIO_G0_AUTO_INC:
+ return NI660X_G0_AUTO_INC;
+ case NITIO_G1_AUTO_INC:
+ return NI660X_G1_AUTO_INC;
+ case NITIO_G2_AUTO_INC:
+ return NI660X_G2_AUTO_INC;
+ case NITIO_G3_AUTO_INC:
+ return NI660X_G3_AUTO_INC;
+ case NITIO_G0_CMD:
+ return NI660X_G0_CMD;
+ case NITIO_G1_CMD:
+ return NI660X_G1_CMD;
+ case NITIO_G2_CMD:
+ return NI660X_G2_CMD;
+ case NITIO_G3_CMD:
+ return NI660X_G3_CMD;
+ case NITIO_G0_HW_SAVE:
+ return NI660X_G0_HW_SAVE;
+ case NITIO_G1_HW_SAVE:
+ return NI660X_G1_HW_SAVE;
+ case NITIO_G2_HW_SAVE:
+ return NI660X_G2_HW_SAVE;
+ case NITIO_G3_HW_SAVE:
+ return NI660X_G3_HW_SAVE;
+ case NITIO_G0_SW_SAVE:
+ return NI660X_G0_SW_SAVE;
+ case NITIO_G1_SW_SAVE:
+ return NI660X_G1_SW_SAVE;
+ case NITIO_G2_SW_SAVE:
+ return NI660X_G2_SW_SAVE;
+ case NITIO_G3_SW_SAVE:
+ return NI660X_G3_SW_SAVE;
+ case NITIO_G0_MODE:
+ return NI660X_G0_MODE;
+ case NITIO_G1_MODE:
+ return NI660X_G1_MODE;
+ case NITIO_G2_MODE:
+ return NI660X_G2_MODE;
+ case NITIO_G3_MODE:
+ return NI660X_G3_MODE;
+ case NITIO_G0_LOADA:
+ return NI660X_G0_LOADA;
+ case NITIO_G1_LOADA:
+ return NI660X_G1_LOADA;
+ case NITIO_G2_LOADA:
+ return NI660X_G2_LOADA;
+ case NITIO_G3_LOADA:
+ return NI660X_G3_LOADA;
+ case NITIO_G0_LOADB:
+ return NI660X_G0_LOADB;
+ case NITIO_G1_LOADB:
+ return NI660X_G1_LOADB;
+ case NITIO_G2_LOADB:
+ return NI660X_G2_LOADB;
+ case NITIO_G3_LOADB:
+ return NI660X_G3_LOADB;
+ case NITIO_G0_INPUT_SEL:
+ return NI660X_G0_INPUT_SEL;
+ case NITIO_G1_INPUT_SEL:
+ return NI660X_G1_INPUT_SEL;
+ case NITIO_G2_INPUT_SEL:
+ return NI660X_G2_INPUT_SEL;
+ case NITIO_G3_INPUT_SEL:
+ return NI660X_G3_INPUT_SEL;
+ case NITIO_G01_STATUS:
+ return NI660X_G01_STATUS;
+ case NITIO_G23_STATUS:
+ return NI660X_G23_STATUS;
+ case NITIO_G01_RESET:
+ return NI660X_G01_RESET;
+ case NITIO_G23_RESET:
+ return NI660X_G23_RESET;
+ case NITIO_G01_STATUS1:
+ return NI660X_G01_STATUS1;
+ case NITIO_G23_STATUS1:
+ return NI660X_G23_STATUS1;
+ case NITIO_G01_STATUS2:
+ return NI660X_G01_STATUS2;
+ case NITIO_G23_STATUS2:
+ return NI660X_G23_STATUS2;
+ case NITIO_G0_CNT_MODE:
+ return NI660X_G0_CNT_MODE;
+ case NITIO_G1_CNT_MODE:
+ return NI660X_G1_CNT_MODE;
+ case NITIO_G2_CNT_MODE:
+ return NI660X_G2_CNT_MODE;
+ case NITIO_G3_CNT_MODE:
+ return NI660X_G3_CNT_MODE;
+ case NITIO_G0_GATE2:
+ return NI660X_G0_GATE2;
+ case NITIO_G1_GATE2:
+ return NI660X_G1_GATE2;
+ case NITIO_G2_GATE2:
+ return NI660X_G2_GATE2;
+ case NITIO_G3_GATE2:
+ return NI660X_G3_GATE2;
+ case NITIO_G0_DMA_CFG:
+ return NI660X_G0_DMA_CFG;
+ case NITIO_G0_DMA_STATUS:
+ return NI660X_G0_DMA_STATUS;
+ case NITIO_G1_DMA_CFG:
+ return NI660X_G1_DMA_CFG;
+ case NITIO_G1_DMA_STATUS:
+ return NI660X_G1_DMA_STATUS;
+ case NITIO_G2_DMA_CFG:
+ return NI660X_G2_DMA_CFG;
+ case NITIO_G2_DMA_STATUS:
+ return NI660X_G2_DMA_STATUS;
+ case NITIO_G3_DMA_CFG:
+ return NI660X_G3_DMA_CFG;
+ case NITIO_G3_DMA_STATUS:
+ return NI660X_G3_DMA_STATUS;
+ case NITIO_G0_INT_ACK:
+ return NI660X_G0_INT_ACK;
+ case NITIO_G1_INT_ACK:
+ return NI660X_G1_INT_ACK;
+ case NITIO_G2_INT_ACK:
+ return NI660X_G2_INT_ACK;
+ case NITIO_G3_INT_ACK:
+ return NI660X_G3_INT_ACK;
+ case NITIO_G0_STATUS:
+ return NI660X_G0_STATUS;
+ case NITIO_G1_STATUS:
+ return NI660X_G1_STATUS;
+ case NITIO_G2_STATUS:
+ return NI660X_G2_STATUS;
+ case NITIO_G3_STATUS:
+ return NI660X_G3_STATUS;
+ case NITIO_G0_INT_ENA:
+ return NI660X_G0_INT_ENA;
+ case NITIO_G1_INT_ENA:
+ return NI660X_G1_INT_ENA;
+ case NITIO_G2_INT_ENA:
+ return NI660X_G2_INT_ENA;
+ case NITIO_G3_INT_ENA:
+ return NI660X_G3_INT_ENA;
default:
BUG();
return 0;
- break;
}
- return ni_660x_register;
}
static inline void ni_660x_write_register(struct comedi_device *dev,
- unsigned chip_index, unsigned bits,
- enum NI_660x_Register reg)
+ unsigned chip, unsigned bits,
+ enum ni_660x_register reg)
{
struct ni_660x_private *devpriv = dev->private;
void __iomem *write_address =
- devpriv->mite->daq_io_addr + GPCT_OFFSET[chip_index] +
+ devpriv->mite->daq_io_addr + GPCT_OFFSET[chip] +
registerData[reg].offset;
switch (registerData[reg].size) {
@@ -683,12 +607,12 @@ static inline void ni_660x_write_register(struct comedi_device *dev,
}
static inline unsigned ni_660x_read_register(struct comedi_device *dev,
- unsigned chip_index,
- enum NI_660x_Register reg)
+ unsigned chip,
+ enum ni_660x_register reg)
{
struct ni_660x_private *devpriv = dev->private;
void __iomem *read_address =
- devpriv->mite->daq_io_addr + GPCT_OFFSET[chip_index] +
+ devpriv->mite->daq_io_addr + GPCT_OFFSET[chip] +
registerData[reg].offset;
switch (registerData[reg].size) {
@@ -709,18 +633,20 @@ static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits,
enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
- enum NI_660x_Register ni_660x_register = ni_gpct_to_660x_register(reg);
- ni_660x_write_register(dev, counter->chip_index, bits,
- ni_660x_register);
+ enum ni_660x_register ni_660x_register = ni_gpct_to_660x_register(reg);
+ unsigned chip = counter->chip_index;
+
+ ni_660x_write_register(dev, chip, bits, ni_660x_register);
}
static unsigned ni_gpct_read_register(struct ni_gpct *counter,
enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
- enum NI_660x_Register ni_660x_register = ni_gpct_to_660x_register(reg);
- return ni_660x_read_register(dev, counter->chip_index,
- ni_660x_register);
+ enum ni_660x_register ni_660x_register = ni_gpct_to_660x_register(reg);
+ unsigned chip = counter->chip_index;
+
+ return ni_660x_read_register(dev, chip, ni_660x_register);
}
static inline struct mite_dma_descriptor_ring *mite_ring(struct ni_660x_private
@@ -728,7 +654,9 @@ static inline struct mite_dma_descriptor_ring *mite_ring(struct ni_660x_private
struct ni_gpct
*counter)
{
- return priv->mite_rings[counter->chip_index][counter->counter_index];
+ unsigned chip = counter->chip_index;
+
+ return priv->mite_rings[chip][counter->counter_index];
}
static inline void ni_660x_set_dma_channel(struct comedi_device *dev,
@@ -736,18 +664,17 @@ static inline void ni_660x_set_dma_channel(struct comedi_device *dev,
struct ni_gpct *counter)
{
struct ni_660x_private *devpriv = dev->private;
+ unsigned chip = counter->chip_index;
unsigned long flags;
spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->dma_configuration_soft_copies[counter->chip_index] &=
- ~dma_select_mask(mite_channel);
- devpriv->dma_configuration_soft_copies[counter->chip_index] |=
- dma_select_bits(mite_channel,
- dma_selection_counter(counter->counter_index));
- ni_660x_write_register(dev, counter->chip_index,
- devpriv->dma_configuration_soft_copies
- [counter->chip_index] |
- dma_reset_bit(mite_channel), DMAConfigRegister);
+ devpriv->dma_configuration_soft_copies[chip] &=
+ ~dma_select_mask(mite_channel);
+ devpriv->dma_configuration_soft_copies[chip] |=
+ dma_select_bits(mite_channel, counter->counter_index);
+ ni_660x_write_register(dev, chip,
+ devpriv->dma_configuration_soft_copies[chip] |
+ dma_reset_bit(mite_channel), NI660X_DMA_CFG);
mmiowb();
spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
}
@@ -757,16 +684,17 @@ static inline void ni_660x_unset_dma_channel(struct comedi_device *dev,
struct ni_gpct *counter)
{
struct ni_660x_private *devpriv = dev->private;
+ unsigned chip = counter->chip_index;
unsigned long flags;
spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->dma_configuration_soft_copies[counter->chip_index] &=
+ devpriv->dma_configuration_soft_copies[chip] &=
~dma_select_mask(mite_channel);
- devpriv->dma_configuration_soft_copies[counter->chip_index] |=
+ devpriv->dma_configuration_soft_copies[chip] |=
dma_select_bits(mite_channel, dma_selection_none);
- ni_660x_write_register(dev, counter->chip_index,
- devpriv->dma_configuration_soft_copies
- [counter->chip_index], DMAConfigRegister);
+ ni_660x_write_register(dev, chip,
+ devpriv->dma_configuration_soft_copies[chip],
+ NI660X_DMA_CFG);
mmiowb();
spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
}
@@ -815,11 +743,9 @@ static void ni_660x_release_mite_channel(struct comedi_device *dev,
static int ni_660x_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
+ struct ni_gpct *counter = s->private;
int retval;
- struct ni_gpct *counter = subdev_to_counter(s);
-/* const struct comedi_cmd *cmd = &s->async->cmd; */
-
retval = ni_660x_request_mite_channel(dev, counter, COMEDI_INPUT);
if (retval) {
comedi_error(dev,
@@ -827,22 +753,13 @@ static int ni_660x_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
return retval;
}
ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL);
- retval = ni_tio_cmd(counter, s->async);
-
- return retval;
-}
-
-static int ni_660x_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- struct ni_gpct *counter = subdev_to_counter(s);
- return ni_tio_cmdtest(counter, cmd);
+ return ni_tio_cmd(dev, s);
}
static int ni_660x_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
- struct ni_gpct *counter = subdev_to_counter(s);
+ struct ni_gpct *counter = s->private;
int retval;
retval = ni_tio_cancel(counter);
@@ -850,23 +767,28 @@ static int ni_660x_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
return retval;
}
-static void set_tio_counterswap(struct comedi_device *dev, int chipset)
+static void set_tio_counterswap(struct comedi_device *dev, int chip)
{
- /* See P. 3.5 of the Register-Level Programming manual. The
- CounterSwap bit has to be set on the second chip, otherwise
- it will try to use the same pins as the first chip.
+ unsigned bits = 0;
+
+ /*
+ * See P. 3.5 of the Register-Level Programming manual.
+ * The CounterSwap bit has to be set on the second chip,
+ * otherwise it will try to use the same pins as the
+ * first chip.
*/
- if (chipset)
- ni_660x_write_register(dev, chipset, CounterSwap,
- ClockConfigRegister);
- else
- ni_660x_write_register(dev, chipset, 0, ClockConfigRegister);
+ if (chip)
+ bits = CounterSwap;
+
+ ni_660x_write_register(dev, chip, bits, NI660X_CLK_CFG);
}
static void ni_660x_handle_gpct_interrupt(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- ni_tio_handle_interrupt(subdev_to_counter(s), s);
+ struct ni_gpct *counter = s->private;
+
+ ni_tio_handle_interrupt(counter, s);
if (s->async->events) {
if (s->async->events & (COMEDI_CB_EOA | COMEDI_CB_ERROR |
COMEDI_CB_OVERFLOW)) {
@@ -901,11 +823,12 @@ static int ni_660x_input_poll(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct ni_660x_private *devpriv = dev->private;
+ struct ni_gpct *counter = s->private;
unsigned long flags;
/* lock to avoid race with comedi_poll */
spin_lock_irqsave(&devpriv->interrupt_lock, flags);
- mite_sync_input_dma(subdev_to_counter(s)->mite_chan, s->async);
+ mite_sync_input_dma(counter->mite_chan, s->async);
spin_unlock_irqrestore(&devpriv->interrupt_lock, flags);
return comedi_buf_read_n_available(s->async);
}
@@ -915,10 +838,10 @@ static int ni_660x_buf_change(struct comedi_device *dev,
unsigned long new_size)
{
struct ni_660x_private *devpriv = dev->private;
+ struct ni_gpct *counter = s->private;
int ret;
- ret = mite_buf_change(mite_ring(devpriv, subdev_to_counter(s)),
- s->async);
+ ret = mite_buf_change(mite_ring(devpriv, counter), s->async);
if (ret < 0)
return ret;
@@ -974,13 +897,6 @@ static void ni_660x_free_mite_rings(struct comedi_device *dev)
}
}
-static int
-ni_660x_GPCT_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- return ni_tio_rinsn(subdev_to_counter(s), insn, data);
-}
-
static void init_tio_chip(struct comedi_device *dev, int chipset)
{
struct ni_660x_private *devpriv = dev->private;
@@ -994,25 +910,11 @@ static void init_tio_chip(struct comedi_device *dev, int chipset)
}
ni_660x_write_register(dev, chipset,
devpriv->dma_configuration_soft_copies[chipset],
- DMAConfigRegister);
+ NI660X_DMA_CFG);
for (i = 0; i < NUM_PFI_CHANNELS; ++i)
ni_660x_write_register(dev, chipset, 0, IOConfigReg(i));
}
-static int
-ni_660x_GPCT_insn_config(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- return ni_tio_insn_config(subdev_to_counter(s), insn, data);
-}
-
-static int ni_660x_GPCT_winsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- return ni_tio_winsn(subdev_to_counter(s), insn, data);
-}
-
static int ni_660x_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
@@ -1024,13 +926,13 @@ static int ni_660x_dio_insn_bits(struct comedi_device *dev,
s->state &= ~(data[0] << base_bitfield_channel);
s->state |= (data[0] & data[1]) << base_bitfield_channel;
/* Write out the new digital output lines */
- ni_660x_write_register(dev, 0, s->state, DIO32Output);
+ ni_660x_write_register(dev, 0, s->state, NI660X_DIO32_OUTPUT);
}
/* on return, data[1] contains the value of the digital
* input and output lines. */
- data[1] =
- (ni_660x_read_register(dev, 0,
- DIO32Input) >> base_bitfield_channel);
+ data[1] = (ni_660x_read_register(dev, 0, NI660X_DIO32_INPUT) >>
+ base_bitfield_channel);
+
return insn->n;
}
@@ -1215,7 +1117,7 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
s->insn_config = ni_660x_dio_insn_config;
/* we use the ioconfig registers to control dio direction, so zero
output enables in stc dio control reg */
- ni_660x_write_register(dev, 0, 0, STCDIOControl);
+ ni_660x_write_register(dev, 0, 0, NI660X_STC_DIO_CONTROL);
devpriv->counter_dev = ni_gpct_device_construct(dev,
&ni_gpct_write_register,
@@ -1234,12 +1136,12 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
SDF_CMD_READ /* | SDF_CMD_WRITE */ ;
s->n_chan = 3;
s->maxdata = 0xffffffff;
- s->insn_read = ni_660x_GPCT_rinsn;
- s->insn_write = ni_660x_GPCT_winsn;
- s->insn_config = ni_660x_GPCT_insn_config;
+ s->insn_read = ni_tio_insn_read;
+ s->insn_write = ni_tio_insn_write;
+ s->insn_config = ni_tio_insn_config;
s->do_cmd = &ni_660x_cmd;
s->len_chanlist = 1;
- s->do_cmdtest = &ni_660x_cmdtest;
+ s->do_cmdtest = ni_tio_cmdtest;
s->cancel = &ni_660x_cancel;
s->poll = &ni_660x_input_poll;
s->async_dma_dir = DMA_BIDIRECTIONAL;
@@ -1284,7 +1186,7 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
if (board->n_chips > 1)
global_interrupt_config_bits |= Cascade_Int_Enable_Bit;
ni_660x_write_register(dev, 0, global_interrupt_config_bits,
- GlobalInterruptConfigRegister);
+ NI660X_GLOBAL_INT_CFG);
dev_info(dev->class_dev, "ni_660x: %s attached\n", dev->board_name);
return 0;
}
@@ -1320,7 +1222,7 @@ static int ni_660x_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ni_660x_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(ni_660x_pci_table) = {
+static const struct pci_device_id ni_660x_pci_table[] = {
{ PCI_VDEVICE(NI, 0x1310), BOARD_PCI6602 },
{ PCI_VDEVICE(NI, 0x1360), BOARD_PXI6602 },
{ PCI_VDEVICE(NI, 0x2c60), BOARD_PCI6601 },
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index e4414cf110e7..8550fdc4ccd3 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -282,7 +282,7 @@ static int ni_670x_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ni_670x_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(ni_670x_pci_table) = {
+static const struct pci_device_id ni_670x_pci_table[] = {
{ PCI_VDEVICE(NI, 0x1290), BOARD_PCI6704 },
{ PCI_VDEVICE(NI, 0x1920), BOARD_PXI6704 },
{ PCI_VDEVICE(NI, 0x2c90), BOARD_PCI6703 },
diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c
index 63c847932eb8..f83eb9ebe278 100644
--- a/drivers/staging/comedi/drivers/ni_at_a2150.c
+++ b/drivers/staging/comedi/drivers/ni_at_a2150.c
@@ -74,9 +74,6 @@ TRIG_WAKE_EOS
#define A2150_SIZE 28
#define A2150_DMA_BUFFER_SIZE 0xff00 /* size in bytes of dma buffer */
-/* #define A2150_DEBUG enable debugging code */
-#undef A2150_DEBUG /* disable debugging code */
-
/* Registers and bits */
#define CONFIG_REG 0x0
#define CHANNEL_BITS(x) ((x) & 0x7)
@@ -127,10 +124,9 @@ struct a2150_board {
/* analog input range */
static const struct comedi_lrange range_a2150 = {
- 1,
- {
- RANGE(-2.828, 2.828),
- }
+ 1, {
+ BIP_RANGE(2.828)
+ }
};
/* enum must match board indices */
@@ -167,19 +163,6 @@ static int a2150_get_timing(struct comedi_device *dev, unsigned int *period,
static int a2150_set_chanlist(struct comedi_device *dev,
unsigned int start_channel,
unsigned int num_channels);
-#ifdef A2150_DEBUG
-
-static void ni_dump_regs(struct comedi_device *dev)
-{
- struct a2150_private *devpriv = dev->private;
-
- printk("config bits 0x%x\n", devpriv->config_bits);
- printk("irq dma bits 0x%x\n", devpriv->irq_dma_bits);
- printk("status bits 0x%x\n", inw(dev->iobase + STATUS_REG));
-}
-
-#endif
-
/* interrupt service routine */
static irqreturn_t a2150_interrupt(int irq, void *d)
{
@@ -411,11 +394,6 @@ static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
unsigned int old_config_bits = devpriv->config_bits;
unsigned int trigger_bits;
- if (!dev->irq || !devpriv->dma) {
- comedi_error(dev,
- " irq and dma required, cannot do hardware conversions");
- return -1;
- }
if (cmd->flags & TRIG_RT) {
comedi_error(dev,
" dma incompatible with hard real-time interrupt (TRIG_RT), aborting");
@@ -506,9 +484,6 @@ static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* start acquisition for soft trigger */
if (cmd->start_src == TRIG_NOW)
outw(0, dev->iobase + FIFO_START_REG);
-#ifdef A2150_DEBUG
- ni_dump_regs(dev);
-#endif
return 0;
}
@@ -573,13 +548,7 @@ static int a2150_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
comedi_error(dev, "timeout");
return -ETIME;
}
-#ifdef A2150_DEBUG
- ni_dump_regs(dev);
-#endif
data[n] = inw(dev->iobase + FIFO_DATA_REG);
-#ifdef A2150_DEBUG
- printk(" data is %i\n", data[n]);
-#endif
data[n] ^= 0x8000;
}
@@ -728,46 +697,35 @@ static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- /* grab our IRQ */
- if (irq) {
- /* check that irq is supported */
- if (irq < 3 || irq == 8 || irq == 13 || irq > 15) {
- printk(" invalid irq line %u\n", irq);
- return -EINVAL;
- }
- if (request_irq(irq, a2150_interrupt, 0,
- dev->driver->driver_name, dev)) {
- printk("unable to allocate irq %u\n", irq);
- return -EINVAL;
+ dev->board_ptr = a2150_boards + a2150_probe(dev);
+ thisboard = comedi_board(dev);
+ dev->board_name = thisboard->name;
+
+ if ((irq >= 3 && irq <= 7) || (irq >= 9 && irq <= 12) ||
+ irq == 14 || irq == 15) {
+ ret = request_irq(irq, a2150_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0) {
+ devpriv->irq_dma_bits |= IRQ_LVL_BITS(irq);
+ dev->irq = irq;
}
- devpriv->irq_dma_bits |= IRQ_LVL_BITS(irq);
- dev->irq = irq;
}
- /* initialize dma */
- if (dma) {
- if (dma == 4 || dma > 7) {
- printk(" invalid dma channel %u\n", dma);
- return -EINVAL;
- }
- if (request_dma(dma, dev->driver->driver_name)) {
- printk(" failed to allocate dma channel %u\n", dma);
- return -EINVAL;
- }
- devpriv->dma = dma;
- devpriv->dma_buffer =
- kmalloc(A2150_DMA_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
- if (devpriv->dma_buffer == NULL)
- return -ENOMEM;
- disable_dma(dma);
- set_dma_mode(dma, DMA_MODE_READ);
+ if (dev->irq && dma <= 7 && dma != 4) {
+ ret = request_dma(dma, dev->board_name);
+ if (ret == 0) {
+ devpriv->dma = dma;
+ devpriv->dma_buffer = kmalloc(A2150_DMA_BUFFER_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!devpriv->dma_buffer)
+ return -ENOMEM;
- devpriv->irq_dma_bits |= DMA_CHAN_BITS(dma);
- }
+ disable_dma(dma);
+ set_dma_mode(dma, DMA_MODE_READ);
- dev->board_ptr = a2150_boards + a2150_probe(dev);
- thisboard = comedi_board(dev);
- dev->board_name = thisboard->name;
+ devpriv->irq_dma_bits |= DMA_CHAN_BITS(dma);
+ }
+ }
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
@@ -775,17 +733,20 @@ static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* analog input subdevice */
s = &dev->subdevices[0];
- dev->read_subdev = s;
s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_OTHER | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_OTHER;
s->n_chan = 4;
- s->len_chanlist = 4;
s->maxdata = 0xffff;
s->range_table = &range_a2150;
- s->do_cmd = a2150_ai_cmd;
- s->do_cmdtest = a2150_ai_cmdtest;
s->insn_read = a2150_ai_rinsn;
- s->cancel = a2150_cancel;
+ if (dev->irq && devpriv->dma) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->do_cmd = a2150_ai_cmd;
+ s->do_cmdtest = a2150_ai_cmdtest;
+ s->cancel = a2150_cancel;
+ }
/* need to do this for software counting of completed conversions, to
* prevent hardware count from stopping acquisition */
diff --git a/drivers/staging/comedi/drivers/ni_atmio.c b/drivers/staging/comedi/drivers/ni_atmio.c
index 856c73d8b7cd..d03935257b97 100644
--- a/drivers/staging/comedi/drivers/ni_atmio.c
+++ b/drivers/staging/comedi/drivers/ni_atmio.c
@@ -98,8 +98,6 @@ are not supported.
#include "ni_stc.h"
#include "8255.h"
-#undef DEBUG
-
#define ATMIO 1
#undef PCIMIO
@@ -437,19 +435,6 @@ static int ni_atmio_attach(struct comedi_device *dev,
if (ret)
return ret;
-#ifdef DEBUG
- /* board existence sanity check */
- {
- int i;
-
- printk(" board fingerprint:");
- for (i = 0; i < 16; i += 2) {
- printk(" %04x %02x", inw(dev->iobase + i),
- inb(dev->iobase + i + 1));
- }
- }
-#endif
-
/* get board type */
board = ni_getboardtype(dev);
diff --git a/drivers/staging/comedi/drivers/ni_atmio16d.c b/drivers/staging/comedi/drivers/ni_atmio16d.c
index a9f7d40d6db2..e8cd5ddb85c5 100644
--- a/drivers/staging/comedi/drivers/ni_atmio16d.c
+++ b/drivers/staging/comedi/drivers/ni_atmio16d.c
@@ -105,40 +105,31 @@ struct atmio16_board_t {
};
/* range structs */
-static const struct comedi_lrange range_atmio16d_ai_10_bipolar = { 4, {
- BIP_RANGE
- (10),
- BIP_RANGE
- (1),
- BIP_RANGE
- (0.1),
- BIP_RANGE
- (0.02)
- }
+static const struct comedi_lrange range_atmio16d_ai_10_bipolar = {
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.02)
+ }
};
-static const struct comedi_lrange range_atmio16d_ai_5_bipolar = { 4, {
- BIP_RANGE
- (5),
- BIP_RANGE
- (0.5),
- BIP_RANGE
- (0.05),
- BIP_RANGE
- (0.01)
- }
+static const struct comedi_lrange range_atmio16d_ai_5_bipolar = {
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.01)
+ }
};
-static const struct comedi_lrange range_atmio16d_ai_unipolar = { 4, {
- UNI_RANGE
- (10),
- UNI_RANGE
- (1),
- UNI_RANGE
- (0.1),
- UNI_RANGE
- (0.02)
- }
+static const struct comedi_lrange range_atmio16d_ai_unipolar = {
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.02)
+ }
};
/* private data struct */
@@ -229,7 +220,7 @@ static void reset_atmio16d(struct comedi_device *dev)
static irqreturn_t atmio16d_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
comedi_buf_put(s->async, inw(dev->iobase + AD_FIFO_REG));
@@ -495,18 +486,13 @@ static int atmio16d_ai_insn_read(struct comedi_device *dev,
break;
}
if (status & STAT_AD_OVERFLOW) {
- printk(KERN_INFO "atmio16d: a/d FIFO overflow\n");
outw(0, dev->iobase + AD_CLEAR_REG);
-
return -ETIME;
}
}
/* end waiting, now check if it timed out */
- if (t == ATMIO16D_TIMEOUT) {
- printk(KERN_INFO "atmio16d: timeout\n");
-
+ if (t == ATMIO16D_TIMEOUT)
return -ETIME;
- }
}
return i;
@@ -636,7 +622,6 @@ static int atmio16d_attach(struct comedi_device *dev,
const struct atmio16_board_t *board = comedi_board(dev);
struct atmio16d_private *devpriv;
struct comedi_subdevice *s;
- unsigned int irq;
int ret;
ret = comedi_request_region(dev, it->options[0], ATMIO16D_SIZE);
@@ -654,19 +639,11 @@ static int atmio16d_attach(struct comedi_device *dev,
/* reset the atmio16d hardware */
reset_atmio16d(dev);
- /* check if our interrupt is available and get it */
- irq = it->options[1];
- if (irq) {
-
- ret = request_irq(irq, atmio16d_interrupt, 0, "atmio16d", dev);
- if (ret < 0) {
- printk(KERN_INFO "failed to allocate irq %u\n", irq);
- return ret;
- }
- dev->irq = irq;
- printk(KERN_INFO "( irq = %u )\n", irq);
- } else {
- printk(KERN_INFO "( no irq )");
+ if (it->options[1]) {
+ ret = request_irq(it->options[1], atmio16d_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
}
/* set device options */
@@ -682,16 +659,11 @@ static int atmio16d_attach(struct comedi_device *dev,
/* setup sub-devices */
s = &dev->subdevices[0];
- dev->read_subdev = s;
/* ai subdevice */
s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = (devpriv->adc_mux ? 16 : 8);
- s->len_chanlist = 16;
s->insn_read = atmio16d_ai_insn_read;
- s->do_cmdtest = atmio16d_ai_cmdtest;
- s->do_cmd = atmio16d_ai_cmd;
- s->cancel = atmio16d_ai_cancel;
s->maxdata = 0xfff; /* 4095 decimal */
switch (devpriv->adc_range) {
case adc_bipolar10:
@@ -704,6 +676,14 @@ static int atmio16d_attach(struct comedi_device *dev,
s->range_table = &range_atmio16d_ai_unipolar;
break;
}
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = 16;
+ s->do_cmdtest = atmio16d_ai_cmdtest;
+ s->do_cmd = atmio16d_ai_cmd;
+ s->cancel = atmio16d_ai_cancel;
+ }
/* ao subdevice */
s = &dev->subdevices[1];
@@ -756,7 +736,6 @@ static int atmio16d_attach(struct comedi_device *dev,
s->n_chan = 0;
s->maxdata = 0
#endif
- printk("\n");
return 0;
}
diff --git a/drivers/staging/comedi/drivers/ni_labpc_pci.c b/drivers/staging/comedi/drivers/ni_labpc_pci.c
index 8be681fca907..739597068297 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_pci.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_pci.c
@@ -107,7 +107,7 @@ static struct comedi_driver labpc_pci_comedi_driver = {
.detach = labpc_pci_detach,
};
-static DEFINE_PCI_DEVICE_TABLE(labpc_pci_table) = {
+static const struct pci_device_id labpc_pci_table[] = {
{ PCI_VDEVICE(NI, 0x161), BOARD_NI_PCI1200 },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 5113397bfecf..457b88481db0 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -52,10 +52,6 @@
fully tested as yet. Terry Barnaby, BEAM Ltd.
*/
-/* #define DEBUG_INTERRUPT */
-/* #define DEBUG_STATUS_A */
-/* #define DEBUG_STATUS_B */
-
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/delay.h>
@@ -63,10 +59,6 @@
#include "mite.h"
#include "comedi_fc.h"
-#ifndef MDPRINTK
-#define MDPRINTK(format, args...)
-#endif
-
/* A timeout count */
#define NI_TIMEOUT 1000
static const unsigned old_RTSI_clock_channel = 7;
@@ -86,111 +78,109 @@ static const short ni_gainlkup[][16] = {
[ai_gain_6143] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
};
-static const struct comedi_lrange range_ni_E_ai = { 16, {
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE(-2.5, 2.5),
- RANGE(-1, 1),
- RANGE(-0.5, 0.5),
- RANGE(-0.25, 0.25),
- RANGE(-0.1, 0.1),
- RANGE(-0.05, 0.05),
- RANGE(0, 20),
- RANGE(0, 10),
- RANGE(0, 5),
- RANGE(0, 2),
- RANGE(0, 1),
- RANGE(0, 0.5),
- RANGE(0, 0.2),
- RANGE(0, 0.1),
- }
+static const struct comedi_lrange range_ni_E_ai = {
+ 16, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.25),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.05),
+ UNI_RANGE(20),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2),
+ UNI_RANGE(1),
+ UNI_RANGE(0.5),
+ UNI_RANGE(0.2),
+ UNI_RANGE(0.1)
+ }
};
-static const struct comedi_lrange range_ni_E_ai_limited = { 8, {
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE(-1, 1),
- RANGE(-0.1,
- 0.1),
- RANGE(0, 10),
- RANGE(0, 5),
- RANGE(0, 1),
- RANGE(0, 0.1),
- }
+static const struct comedi_lrange range_ni_E_ai_limited = {
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1)
+ }
};
-static const struct comedi_lrange range_ni_E_ai_limited14 = { 14, {
- RANGE(-10,
- 10),
- RANGE(-5, 5),
- RANGE(-2, 2),
- RANGE(-1, 1),
- RANGE(-0.5,
- 0.5),
- RANGE(-0.2,
- 0.2),
- RANGE(-0.1,
- 0.1),
- RANGE(0, 10),
- RANGE(0, 5),
- RANGE(0, 2),
- RANGE(0, 1),
- RANGE(0,
- 0.5),
- RANGE(0,
- 0.2),
- RANGE(0,
- 0.1),
- }
+static const struct comedi_lrange range_ni_E_ai_limited14 = {
+ 14, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2),
+ BIP_RANGE(1),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.2),
+ BIP_RANGE(0.1),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2),
+ UNI_RANGE(1),
+ UNI_RANGE(0.5),
+ UNI_RANGE(0.2),
+ UNI_RANGE(0.1)
+ }
};
-static const struct comedi_lrange range_ni_E_ai_bipolar4 = { 4, {
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE(-0.5,
- 0.5),
- RANGE(-0.05,
- 0.05),
- }
+static const struct comedi_lrange range_ni_E_ai_bipolar4 = {
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05)
+ }
};
-static const struct comedi_lrange range_ni_E_ai_611x = { 8, {
- RANGE(-50, 50),
- RANGE(-20, 20),
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE(-2, 2),
- RANGE(-1, 1),
- RANGE(-0.5, 0.5),
- RANGE(-0.2, 0.2),
- }
+static const struct comedi_lrange range_ni_E_ai_611x = {
+ 8, {
+ BIP_RANGE(50),
+ BIP_RANGE(20),
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2),
+ BIP_RANGE(1),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.2)
+ }
};
-static const struct comedi_lrange range_ni_M_ai_622x = { 4, {
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE(-1, 1),
- RANGE(-0.2, 0.2),
- }
+static const struct comedi_lrange range_ni_M_ai_622x = {
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(1),
+ BIP_RANGE(0.2)
+ }
};
-static const struct comedi_lrange range_ni_M_ai_628x = { 7, {
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE(-2, 2),
- RANGE(-1, 1),
- RANGE(-0.5, 0.5),
- RANGE(-0.2, 0.2),
- RANGE(-0.1, 0.1),
- }
+static const struct comedi_lrange range_ni_M_ai_628x = {
+ 7, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2),
+ BIP_RANGE(1),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.2),
+ BIP_RANGE(0.1)
+ }
};
-static const struct comedi_lrange range_ni_E_ao_ext = { 4, {
- RANGE(-10, 10),
- RANGE(0, 10),
- RANGE_ext(-1, 1),
- RANGE_ext(0, 1),
- }
+static const struct comedi_lrange range_ni_E_ao_ext = {
+ 4, {
+ BIP_RANGE(10),
+ UNI_RANGE(10),
+ RANGE_ext(-1, 1),
+ RANGE_ext(0, 1)
+ }
};
static const struct comedi_lrange *const ni_range_lkup[] = {
@@ -266,17 +256,6 @@ static int ni_rtsi_insn_config(struct comedi_device *dev,
static void caldac_setup(struct comedi_device *dev, struct comedi_subdevice *s);
static int ni_read_eeprom(struct comedi_device *dev, int addr);
-#ifdef DEBUG_STATUS_A
-static void ni_mio_print_status_a(int status);
-#else
-#define ni_mio_print_status_a(a)
-#endif
-#ifdef DEBUG_STATUS_B
-static void ni_mio_print_status_b(int status);
-#else
-#define ni_mio_print_status_b(a)
-#endif
-
static int ni_ai_reset(struct comedi_device *dev, struct comedi_subdevice *s);
#ifndef PCIDMA
static void ni_handle_fifo_half_full(struct comedi_device *dev);
@@ -297,19 +276,8 @@ static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s);
static int ni_8255_callback(int dir, int port, int data, unsigned long arg);
-static int ni_gpct_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
-static int ni_gpct_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
-static int ni_gpct_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
#ifdef PCIDMA
static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s);
-static int ni_gpct_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd);
#endif
static int ni_gpct_cancel(struct comedi_device *dev,
struct comedi_subdevice *s);
@@ -322,10 +290,6 @@ static int cs5529_do_conversion(struct comedi_device *dev,
static int cs5529_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
-#ifdef NI_CS5529_DEBUG
-static unsigned int cs5529_config_read(struct comedi_device *dev,
- unsigned int reg_select_bits);
-#endif
static void cs5529_config_write(struct comedi_device *dev, unsigned int value,
unsigned int reg_select_bits);
@@ -487,11 +451,10 @@ static inline void ni_set_gpct_dma_channel(struct comedi_device *dev,
{
unsigned bitfield;
- if (mite_channel >= 0) {
+ if (mite_channel >= 0)
bitfield = GPCT_DMA_Select_Bits(gpct_index, mite_channel);
- } else {
+ else
bitfield = 0;
- }
ni_set_bitfield(dev, G0_G1_Select, GPCT_DMA_Select_Mask(gpct_index),
bitfield);
}
@@ -907,9 +870,8 @@ static void mite_handle_b_linkc(struct mite_struct *mite,
unsigned long flags;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->ao_mite_chan) {
+ if (devpriv->ao_mite_chan)
mite_sync_output_dma(devpriv->ao_mite_chan, s->async);
- }
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
}
@@ -957,9 +919,8 @@ static void ni_handle_eos(struct comedi_device *dev, struct comedi_subdevice *s)
#endif
}
/* handle special case of single scan using AI_End_On_End_Of_Scan */
- if ((devpriv->ai_cmd2 & AI_End_On_End_Of_Scan)) {
+ if ((devpriv->ai_cmd2 & AI_End_On_End_Of_Scan))
shutdown_ai_command(dev);
- }
}
static void shutdown_ai_command(struct comedi_device *dev)
@@ -1023,19 +984,15 @@ static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status)
struct ni_private *devpriv = dev->private;
unsigned short ack = 0;
- if (a_status & AI_SC_TC_St) {
+ if (a_status & AI_SC_TC_St)
ack |= AI_SC_TC_Interrupt_Ack;
- }
- if (a_status & AI_START1_St) {
+ if (a_status & AI_START1_St)
ack |= AI_START1_Interrupt_Ack;
- }
- if (a_status & AI_START_St) {
+ if (a_status & AI_START_St)
ack |= AI_START_Interrupt_Ack;
- }
- if (a_status & AI_STOP_St) {
+ if (a_status & AI_STOP_St)
/* not sure why we used to ack the START here also, instead of doing it independently. Frank Hess 2007-07-06 */
- ack |= AI_STOP_Interrupt_Ack /*| AI_START_Interrupt_Ack */ ;
- }
+ ack |= AI_STOP_Interrupt_Ack /*| AI_START_Interrupt_Ack */;
if (ack)
devpriv->stc_writew(dev, ack, Interrupt_A_Ack_Register);
}
@@ -1050,16 +1007,9 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if (s->type == COMEDI_SUBD_UNUSED)
return;
-#ifdef DEBUG_INTERRUPT
- printk
- ("ni_mio_common: interrupt: a_status=%04x ai_mite_status=%08x\n",
- status, ai_mite_status);
- ni_mio_print_status_a(status);
-#endif
#ifdef PCIDMA
- if (ai_mite_status & CHSR_LINKC) {
+ if (ai_mite_status & CHSR_LINKC)
ni_sync_ai_dma(dev);
- }
if (ai_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
@@ -1067,7 +1017,6 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
printk
("unknown mite interrupt, ack! (ai_mite_status=%08x)\n",
ai_mite_status);
- /* mite_print_chsr(ai_mite_status); */
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
/* disable_irq(dev->irq); */
}
@@ -1092,7 +1041,6 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
AI_SC_TC_Error_St)) {
printk("ni_mio_common: ai error a_status=%04x\n",
status);
- ni_mio_print_status_a(status);
shutdown_ai_command(dev);
@@ -1105,12 +1053,8 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
return;
}
if (status & AI_SC_TC_St) {
-#ifdef DEBUG_INTERRUPT
- printk("ni_mio_common: SC_TC interrupt\n");
-#endif
- if (!devpriv->ai_continuous) {
+ if (!devpriv->ai_continuous)
shutdown_ai_command(dev);
- }
}
}
#ifndef PCIDMA
@@ -1129,20 +1073,10 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
}
#endif /* !PCIDMA */
- if ((status & AI_STOP_St)) {
+ if ((status & AI_STOP_St))
ni_handle_eos(dev, s);
- }
ni_event(dev, s);
-
-#ifdef DEBUG_INTERRUPT
- status = devpriv->stc_readw(dev, AI_Status_1_Register);
- if (status & Interrupt_A_St) {
- printk
- ("handle_a_interrupt: didn't clear interrupt? status=0x%x\n",
- status);
- }
-#endif
}
static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status)
@@ -1150,27 +1084,20 @@ static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status)
struct ni_private *devpriv = dev->private;
unsigned short ack = 0;
- if (b_status & AO_BC_TC_St) {
+ if (b_status & AO_BC_TC_St)
ack |= AO_BC_TC_Interrupt_Ack;
- }
- if (b_status & AO_Overrun_St) {
+ if (b_status & AO_Overrun_St)
ack |= AO_Error_Interrupt_Ack;
- }
- if (b_status & AO_START_St) {
+ if (b_status & AO_START_St)
ack |= AO_START_Interrupt_Ack;
- }
- if (b_status & AO_START1_St) {
+ if (b_status & AO_START1_St)
ack |= AO_START1_Interrupt_Ack;
- }
- if (b_status & AO_UC_TC_St) {
+ if (b_status & AO_UC_TC_St)
ack |= AO_UC_TC_Interrupt_Ack;
- }
- if (b_status & AO_UI2_TC_St) {
+ if (b_status & AO_UI2_TC_St)
ack |= AO_UI2_TC_Interrupt_Ack;
- }
- if (b_status & AO_UPDATE_St) {
+ if (b_status & AO_UPDATE_St)
ack |= AO_UPDATE_Interrupt_Ack;
- }
if (ack)
devpriv->stc_writew(dev, ack, Interrupt_B_Ack_Register);
}
@@ -1182,17 +1109,10 @@ static void handle_b_interrupt(struct comedi_device *dev,
struct comedi_subdevice *s = &dev->subdevices[NI_AO_SUBDEV];
/* unsigned short ack=0; */
-#ifdef DEBUG_INTERRUPT
- printk("ni_mio_common: interrupt: b_status=%04x m1_status=%08x\n",
- b_status, ao_mite_status);
- ni_mio_print_status_b(b_status);
-#endif
-
#ifdef PCIDMA
/* Currently, mite.c requires us to handle LINKC */
- if (ao_mite_status & CHSR_LINKC) {
+ if (ao_mite_status & CHSR_LINKC)
mite_handle_b_linkc(devpriv->mite, dev);
- }
if (ao_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
@@ -1200,7 +1120,6 @@ static void handle_b_interrupt(struct comedi_device *dev,
printk
("unknown mite interrupt, ack! (ao_mite_status=%08x)\n",
ao_mite_status);
- /* mite_print_chsr(ao_mite_status); */
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
}
#endif
@@ -1214,12 +1133,9 @@ static void handle_b_interrupt(struct comedi_device *dev,
s->async->events |= COMEDI_CB_OVERFLOW;
}
- if (b_status & AO_BC_TC_St) {
- MDPRINTK
- ("ni_mio_common: AO BC_TC status=0x%04x status2=0x%04x\n",
- b_status, devpriv->stc_readw(dev, AO_Status_2_Register));
+ if (b_status & AO_BC_TC_St)
s->async->events |= COMEDI_CB_EOA;
- }
+
#ifndef PCIDMA
if (b_status & AO_FIFO_Request_St) {
int ret;
@@ -1238,50 +1154,6 @@ static void handle_b_interrupt(struct comedi_device *dev,
ni_event(dev, s);
}
-#ifdef DEBUG_STATUS_A
-static const char *const status_a_strings[] = {
- "passthru0", "fifo", "G0_gate", "G0_TC",
- "stop", "start", "sc_tc", "start1",
- "start2", "sc_tc_error", "overflow", "overrun",
- "fifo_empty", "fifo_half_full", "fifo_full", "interrupt_a"
-};
-
-static void ni_mio_print_status_a(int status)
-{
- int i;
-
- printk("A status:");
- for (i = 15; i >= 0; i--) {
- if (status & (1 << i)) {
- printk(" %s", status_a_strings[i]);
- }
- }
- printk("\n");
-}
-#endif
-
-#ifdef DEBUG_STATUS_B
-static const char *const status_b_strings[] = {
- "passthru1", "fifo", "G1_gate", "G1_TC",
- "UI2_TC", "UPDATE", "UC_TC", "BC_TC",
- "start1", "overrun", "start", "bc_tc_error",
- "fifo_empty", "fifo_half_full", "fifo_full", "interrupt_b"
-};
-
-static void ni_mio_print_status_b(int status)
-{
- int i;
-
- printk("B status:");
- for (i = 15; i >= 0; i--) {
- if (status & (1 << i)) {
- printk(" %s", status_b_strings[i]);
- }
- }
- printk("\n");
-}
-#endif
-
#ifndef PCIDMA
static void ni_ao_fifo_load(struct comedi_device *dev,
@@ -1324,9 +1196,8 @@ static void ni_ao_fifo_load(struct comedi_device *dev,
chan %= cmd->chanlist_len;
}
async->cur_chan = chan;
- if (err == 0) {
+ if (err == 0)
async->events |= COMEDI_CB_OVERFLOW;
- }
}
/*
@@ -2392,7 +2263,6 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
unsigned int stop_count;
int interrupt_a_enable = 0;
- MDPRINTK("ni_ai_cmd\n");
if (dev->irq == 0) {
comedi_error(dev, "cannot run command without an irq");
return -EIO;
@@ -2630,15 +2500,11 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
ni_set_bits(dev, Interrupt_A_Enable_Register,
interrupt_a_enable, 1);
-
- MDPRINTK("Interrupt_A_Enable_Register = 0x%04x\n",
- devpriv->int_a_enable_reg);
} else {
/* interrupt on nothing */
ni_set_bits(dev, Interrupt_A_Enable_Register, ~0, 0);
/* XXX start polling if necessary */
- MDPRINTK("interrupting on nothing\n");
}
/* end configuration */
@@ -2664,7 +2530,6 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (retval)
return retval;
}
- /* mite_dump_regs(devpriv->mite); */
#endif
switch (cmd->start_src) {
@@ -2682,8 +2547,6 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
break;
}
- MDPRINTK("exit ni_ai_cmd\n");
-
return 0;
}
@@ -3248,11 +3111,10 @@ static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
devpriv->ao_mode2 &= ~AO_BC_Initial_Load_Source;
devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
- if (cmd->stop_src == TRIG_NONE) {
+ if (cmd->stop_src == TRIG_NONE)
devpriv->stc_writel(dev, 0xffffff, AO_BC_Load_A_Register);
- } else {
+ else
devpriv->stc_writel(dev, 0, AO_BC_Load_A_Register);
- }
devpriv->stc_writew(dev, AO_BC_Load, AO_Command_1_Register);
devpriv->ao_mode2 &= ~AO_UC_Initial_Load_Source;
devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
@@ -3513,9 +3375,8 @@ static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s)
if (board->reg_type & ni_reg_6xxx_mask) {
unsigned immediate_bits = 0;
unsigned i;
- for (i = 0; i < s->n_chan; ++i) {
+ for (i = 0; i < s->n_chan; ++i)
immediate_bits |= 1 << i;
- }
ao_win_out(immediate_bits, AO_Immediate_671x);
ao_win_out(CLEAR_WG, AO_Misc_611x);
}
@@ -3689,9 +3550,8 @@ static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
return -EIO;
}
retval = ni_request_cdo_mite_channel(dev);
- if (retval < 0) {
+ if (retval < 0)
return retval;
- }
s->async->inttrig = &ni_cdo_inttrig;
return 0;
}
@@ -3773,9 +3633,8 @@ static void handle_cdio_interrupt(struct comedi_device *dev)
unsigned long flags;
#endif
- if ((board->reg_type & ni_reg_m_series_mask) == 0) {
+ if ((board->reg_type & ni_reg_m_series_mask) == 0)
return;
- }
#ifdef PCIDMA
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->cdo_mite_chan) {
@@ -3793,15 +3652,15 @@ static void handle_cdio_interrupt(struct comedi_device *dev)
cdio_status = ni_readl(M_Offset_CDIO_Status);
if (cdio_status & (CDO_Overrun_Bit | CDO_Underflow_Bit)) {
-/* printk("cdio error: statux=0x%x\n", cdio_status); */
+ /* printk("cdio error: statux=0x%x\n", cdio_status); */
ni_writel(CDO_Error_Interrupt_Confirm_Bit, M_Offset_CDIO_Command); /* XXX just guessing this is needed and does something useful */
s->async->events |= COMEDI_CB_OVERFLOW;
}
if (cdio_status & CDO_FIFO_Empty_Bit) {
-/* printk("cdio fifo empty\n"); */
+ /* printk("cdio fifo empty\n"); */
ni_writel(CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit,
M_Offset_CDIO_Command);
-/* s->async->events |= COMEDI_CB_EOA; */
+ /* s->async->events |= COMEDI_CB_EOA; */
}
ni_event(dev, s);
}
@@ -3819,10 +3678,6 @@ static int ni_serial_insn_config(struct comedi_device *dev,
switch (data[0]) {
case INSN_CONFIG_SERIAL_CLOCK:
-
-#ifdef DEBUG_DIO
- printk("SPI serial clock Config cd\n", data[1]);
-#endif
devpriv->serial_hw_mode = 1;
devpriv->dio_control |= DIO_HW_Serial_Enable;
@@ -3874,9 +3729,8 @@ static int ni_serial_insn_config(struct comedi_device *dev,
case INSN_CONFIG_BIDIRECTIONAL_DATA:
- if (devpriv->serial_interval_ns == 0) {
+ if (devpriv->serial_interval_ns == 0)
return -EINVAL;
- }
byte_out = data[1] & 0xFF;
@@ -3911,10 +3765,6 @@ static int ni_serial_hw_readwrite8(struct comedi_device *dev,
unsigned int status1;
int err = 0, count = 20;
-#ifdef DEBUG_DIO
- printk("ni_serial_hw_readwrite8: outputting 0x%x\n", data_out);
-#endif
-
devpriv->dio_output &= ~DIO_Serial_Data_Mask;
devpriv->dio_output |= DIO_Serial_Data_Out(data_out);
devpriv->stc_writew(dev, devpriv->dio_output, DIO_Output_Register);
@@ -3948,12 +3798,8 @@ static int ni_serial_hw_readwrite8(struct comedi_device *dev,
DIO_Serial_IO_In_Progress_St goes high one bit too early. */
udelay((devpriv->serial_interval_ns + 999) / 1000);
- if (data_in != NULL) {
+ if (data_in != NULL)
*data_in = devpriv->stc_readw(dev, DIO_Serial_Input_Register);
-#ifdef DEBUG_DIO
- printk("ni_serial_hw_readwrite8: inputted 0x%x\n", *data_in);
-#endif
- }
Error:
devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register);
@@ -3969,10 +3815,6 @@ static int ni_serial_sw_readwrite8(struct comedi_device *dev,
struct ni_private *devpriv = dev->private;
unsigned char mask, input = 0;
-#ifdef DEBUG_DIO
- printk("ni_serial_sw_readwrite8: outputting 0x%x\n", data_out);
-#endif
-
/* Wait for one bit before transfer */
udelay((devpriv->serial_interval_ns + 999) / 1000);
@@ -3981,9 +3823,8 @@ static int ni_serial_sw_readwrite8(struct comedi_device *dev,
because it is a per-subdevice field, and serial is
a separate subdevice from DIO. */
devpriv->dio_output &= ~DIO_SDOUT;
- if (data_out & mask) {
+ if (data_out & mask)
devpriv->dio_output |= DIO_SDOUT;
- }
devpriv->stc_writew(dev, devpriv->dio_output,
DIO_Output_Register);
@@ -4003,15 +3844,12 @@ static int ni_serial_sw_readwrite8(struct comedi_device *dev,
/* Input current bit */
if (devpriv->stc_readw(dev,
- DIO_Parallel_Input_Register) & DIO_SDIN)
- {
-/* printk("DIO_P_I_R: 0x%x\n", devpriv->stc_readw(dev, DIO_Parallel_Input_Register)); */
+ DIO_Parallel_Input_Register) & DIO_SDIN) {
+ /* printk("DIO_P_I_R: 0x%x\n", devpriv->stc_readw(dev, DIO_Parallel_Input_Register)); */
input |= mask;
}
}
-#ifdef DEBUG_DIO
- printk("ni_serial_sw_readwrite8: inputted 0x%x\n", input);
-#endif
+
if (data_in)
*data_in = input;
@@ -4023,9 +3861,8 @@ static void mio_common_detach(struct comedi_device *dev)
struct ni_private *devpriv = dev->private;
if (devpriv) {
- if (devpriv->counter_dev) {
+ if (devpriv->counter_dev)
ni_gpct_device_destroy(devpriv->counter_dev);
- }
}
}
@@ -4044,82 +3881,82 @@ static unsigned ni_gpct_to_stc_register(enum ni_gpct_register reg)
{
unsigned stc_register;
switch (reg) {
- case NITIO_G0_Autoincrement_Reg:
+ case NITIO_G0_AUTO_INC:
stc_register = G_Autoincrement_Register(0);
break;
- case NITIO_G1_Autoincrement_Reg:
+ case NITIO_G1_AUTO_INC:
stc_register = G_Autoincrement_Register(1);
break;
- case NITIO_G0_Command_Reg:
+ case NITIO_G0_CMD:
stc_register = G_Command_Register(0);
break;
- case NITIO_G1_Command_Reg:
+ case NITIO_G1_CMD:
stc_register = G_Command_Register(1);
break;
- case NITIO_G0_HW_Save_Reg:
+ case NITIO_G0_HW_SAVE:
stc_register = G_HW_Save_Register(0);
break;
- case NITIO_G1_HW_Save_Reg:
+ case NITIO_G1_HW_SAVE:
stc_register = G_HW_Save_Register(1);
break;
- case NITIO_G0_SW_Save_Reg:
+ case NITIO_G0_SW_SAVE:
stc_register = G_Save_Register(0);
break;
- case NITIO_G1_SW_Save_Reg:
+ case NITIO_G1_SW_SAVE:
stc_register = G_Save_Register(1);
break;
- case NITIO_G0_Mode_Reg:
+ case NITIO_G0_MODE:
stc_register = G_Mode_Register(0);
break;
- case NITIO_G1_Mode_Reg:
+ case NITIO_G1_MODE:
stc_register = G_Mode_Register(1);
break;
- case NITIO_G0_LoadA_Reg:
+ case NITIO_G0_LOADA:
stc_register = G_Load_A_Register(0);
break;
- case NITIO_G1_LoadA_Reg:
+ case NITIO_G1_LOADA:
stc_register = G_Load_A_Register(1);
break;
- case NITIO_G0_LoadB_Reg:
+ case NITIO_G0_LOADB:
stc_register = G_Load_B_Register(0);
break;
- case NITIO_G1_LoadB_Reg:
+ case NITIO_G1_LOADB:
stc_register = G_Load_B_Register(1);
break;
- case NITIO_G0_Input_Select_Reg:
+ case NITIO_G0_INPUT_SEL:
stc_register = G_Input_Select_Register(0);
break;
- case NITIO_G1_Input_Select_Reg:
+ case NITIO_G1_INPUT_SEL:
stc_register = G_Input_Select_Register(1);
break;
- case NITIO_G01_Status_Reg:
+ case NITIO_G01_STATUS:
stc_register = G_Status_Register;
break;
- case NITIO_G01_Joint_Reset_Reg:
+ case NITIO_G01_RESET:
stc_register = Joint_Reset_Register;
break;
- case NITIO_G01_Joint_Status1_Reg:
+ case NITIO_G01_STATUS1:
stc_register = Joint_Status_1_Register;
break;
- case NITIO_G01_Joint_Status2_Reg:
+ case NITIO_G01_STATUS2:
stc_register = Joint_Status_2_Register;
break;
- case NITIO_G0_Interrupt_Acknowledge_Reg:
+ case NITIO_G0_INT_ACK:
stc_register = Interrupt_A_Ack_Register;
break;
- case NITIO_G1_Interrupt_Acknowledge_Reg:
+ case NITIO_G1_INT_ACK:
stc_register = Interrupt_B_Ack_Register;
break;
- case NITIO_G0_Status_Reg:
+ case NITIO_G0_STATUS:
stc_register = AI_Status_1_Register;
break;
- case NITIO_G1_Status_Reg:
+ case NITIO_G1_STATUS:
stc_register = AO_Status_1_Register;
break;
- case NITIO_G0_Interrupt_Enable_Reg:
+ case NITIO_G0_INT_ENA:
stc_register = Interrupt_A_Enable_Register;
break;
- case NITIO_G1_Interrupt_Enable_Reg:
+ case NITIO_G1_INT_ENA:
stc_register = Interrupt_B_Enable_Register;
break;
default:
@@ -4147,52 +3984,52 @@ static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits,
switch (reg) {
/* m-series-only registers */
- case NITIO_G0_Counting_Mode_Reg:
+ case NITIO_G0_CNT_MODE:
ni_writew(bits, M_Offset_G0_Counting_Mode);
break;
- case NITIO_G1_Counting_Mode_Reg:
+ case NITIO_G1_CNT_MODE:
ni_writew(bits, M_Offset_G1_Counting_Mode);
break;
- case NITIO_G0_Second_Gate_Reg:
+ case NITIO_G0_GATE2:
ni_writew(bits, M_Offset_G0_Second_Gate);
break;
- case NITIO_G1_Second_Gate_Reg:
+ case NITIO_G1_GATE2:
ni_writew(bits, M_Offset_G1_Second_Gate);
break;
- case NITIO_G0_DMA_Config_Reg:
+ case NITIO_G0_DMA_CFG:
ni_writew(bits, M_Offset_G0_DMA_Config);
break;
- case NITIO_G1_DMA_Config_Reg:
+ case NITIO_G1_DMA_CFG:
ni_writew(bits, M_Offset_G1_DMA_Config);
break;
- case NITIO_G0_ABZ_Reg:
+ case NITIO_G0_ABZ:
ni_writew(bits, M_Offset_G0_MSeries_ABZ);
break;
- case NITIO_G1_ABZ_Reg:
+ case NITIO_G1_ABZ:
ni_writew(bits, M_Offset_G1_MSeries_ABZ);
break;
/* 32 bit registers */
- case NITIO_G0_LoadA_Reg:
- case NITIO_G1_LoadA_Reg:
- case NITIO_G0_LoadB_Reg:
- case NITIO_G1_LoadB_Reg:
+ case NITIO_G0_LOADA:
+ case NITIO_G1_LOADA:
+ case NITIO_G0_LOADB:
+ case NITIO_G1_LOADB:
stc_register = ni_gpct_to_stc_register(reg);
devpriv->stc_writel(dev, bits, stc_register);
break;
/* 16 bit registers */
- case NITIO_G0_Interrupt_Enable_Reg:
+ case NITIO_G0_INT_ENA:
BUG_ON(bits & ~gpct_interrupt_a_enable_mask);
ni_set_bitfield(dev, Interrupt_A_Enable_Register,
gpct_interrupt_a_enable_mask, bits);
break;
- case NITIO_G1_Interrupt_Enable_Reg:
+ case NITIO_G1_INT_ENA:
BUG_ON(bits & ~gpct_interrupt_b_enable_mask);
ni_set_bitfield(dev, Interrupt_B_Enable_Register,
gpct_interrupt_b_enable_mask, bits);
break;
- case NITIO_G01_Joint_Reset_Reg:
+ case NITIO_G01_RESET:
BUG_ON(bits & ~gpct_joint_reset_mask);
/* fall-through */
default:
@@ -4210,21 +4047,18 @@ static unsigned ni_gpct_read_register(struct ni_gpct *counter,
switch (reg) {
/* m-series only registers */
- case NITIO_G0_DMA_Status_Reg:
+ case NITIO_G0_DMA_STATUS:
return ni_readw(M_Offset_G0_DMA_Status);
- break;
- case NITIO_G1_DMA_Status_Reg:
+ case NITIO_G1_DMA_STATUS:
return ni_readw(M_Offset_G1_DMA_Status);
- break;
/* 32 bit registers */
- case NITIO_G0_HW_Save_Reg:
- case NITIO_G1_HW_Save_Reg:
- case NITIO_G0_SW_Save_Reg:
- case NITIO_G1_SW_Save_Reg:
+ case NITIO_G0_HW_SAVE:
+ case NITIO_G1_HW_SAVE:
+ case NITIO_G0_SW_SAVE:
+ case NITIO_G1_SW_SAVE:
stc_register = ni_gpct_to_stc_register(reg);
return devpriv->stc_readl(dev, stc_register);
- break;
/* 16 bit registers */
default:
@@ -4391,11 +4225,10 @@ static int ni_E_init(struct comedi_device *dev)
s->maxdata = (1 << board->aobits) - 1;
s->range_table = board->ao_range_table;
s->insn_read = &ni_ao_insn_read;
- if (board->reg_type & ni_reg_6xxx_mask) {
+ if (board->reg_type & ni_reg_6xxx_mask)
s->insn_write = &ni_ao_insn_write_671x;
- } else {
+ else
s->insn_write = &ni_ao_insn_write;
- }
s->insn_config = &ni_ao_insn_config;
#ifdef PCIDMA
if (board->n_aochan) {
@@ -4429,7 +4262,7 @@ static int ni_E_init(struct comedi_device *dev)
s->n_chan = board->num_p0_dio_channels;
if (board->reg_type & ni_reg_m_series_mask) {
s->subdev_flags |=
- SDF_LSAMPL | SDF_CMD_WRITE /* | SDF_CMD_READ */ ;
+ SDF_LSAMPL | SDF_CMD_WRITE /* | SDF_CMD_READ */;
s->insn_bits = &ni_m_series_dio_insn_bits;
s->insn_config = &ni_m_series_dio_insn_config;
s->do_cmd = &ni_cdio_cmd;
@@ -4449,11 +4282,10 @@ static int ni_E_init(struct comedi_device *dev)
/* 8255 device */
s = &dev->subdevices[NI_8255_DIO_SUBDEV];
- if (board->has_8255) {
+ if (board->has_8255)
subdev_8255_init(dev, s, ni_8255_callback, (unsigned long)dev);
- } else {
+ else
s->type = COMEDI_SUBD_UNUSED;
- }
/* formerly general purpose counter/timer device, but no longer used */
s = &dev->subdevices[NI_UNUSED_SUBDEV];
@@ -4511,9 +4343,8 @@ static int ni_E_init(struct comedi_device *dev)
s->n_chan = 10;
}
s->maxdata = 1;
- if (board->reg_type & ni_reg_m_series_mask) {
+ if (board->reg_type & ni_reg_m_series_mask)
s->insn_bits = &ni_pfi_insn_bits;
- }
s->insn_config = &ni_pfi_insn_config;
ni_set_bits(dev, IO_Bidirection_Pin_Register, ~0, 0);
@@ -4553,11 +4384,10 @@ static int ni_E_init(struct comedi_device *dev)
s->insn_config = ni_rtsi_insn_config;
ni_rtsi_init(dev);
- if (board->reg_type & ni_reg_m_series_mask) {
+ if (board->reg_type & ni_reg_m_series_mask)
counter_variant = ni_gpct_variant_m_series;
- } else {
+ else
counter_variant = ni_gpct_variant_e_series;
- }
devpriv->counter_dev = ni_gpct_device_construct(dev,
&ni_gpct_write_register,
&ni_gpct_read_register,
@@ -4573,14 +4403,14 @@ static int ni_E_init(struct comedi_device *dev)
s->maxdata = 0xffffffff;
else
s->maxdata = 0xffffff;
- s->insn_read = &ni_gpct_insn_read;
- s->insn_write = &ni_gpct_insn_write;
- s->insn_config = &ni_gpct_insn_config;
+ s->insn_read = ni_tio_insn_read;
+ s->insn_write = ni_tio_insn_read;
+ s->insn_config = ni_tio_insn_config;
#ifdef PCIDMA
s->subdev_flags |= SDF_CMD_READ /* | SDF_CMD_WRITE */;
s->do_cmd = &ni_gpct_cmd;
s->len_chanlist = 1;
- s->do_cmdtest = &ni_gpct_cmdtest;
+ s->do_cmdtest = ni_tio_cmdtest;
s->cancel = &ni_gpct_cancel;
s->async_dma_dir = DMA_BIDIRECTIONAL;
#endif
@@ -4901,7 +4731,7 @@ static int pack_ad8842(int addr, int val, int *bitstring);
struct caldac_struct {
int n_chans;
int n_bits;
- int (*packbits) (int, int, int *);
+ int (*packbits)(int, int, int *);
};
static struct caldac_struct caldacs[] = {
@@ -4944,9 +4774,8 @@ static void caldac_setup(struct comedi_device *dev, struct comedi_subdevice *s)
if (diffbits) {
unsigned int *maxdata_list;
- if (n_chans > MAX_N_CALDACS) {
+ if (n_chans > MAX_N_CALDACS)
printk("BUG! MAX_N_CALDACS too small\n");
- }
s->maxdata_list = maxdata_list = devpriv->caldac_maxdata_list;
chan = 0;
for (i = 0; i < n_dacs; i++) {
@@ -5143,36 +4972,11 @@ static void GPCT_Reset(struct comedi_device *dev, int chan)
#endif
-static int ni_gpct_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct ni_gpct *counter = s->private;
- return ni_tio_insn_config(counter, insn, data);
-}
-
-static int ni_gpct_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct ni_gpct *counter = s->private;
- return ni_tio_rinsn(counter, insn, data);
-}
-
-static int ni_gpct_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct ni_gpct *counter = s->private;
- return ni_tio_winsn(counter, insn, data);
-}
-
#ifdef PCIDMA
static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
- int retval;
struct ni_gpct *counter = s->private;
-/* const struct comedi_cmd *cmd = &s->async->cmd; */
+ int retval;
retval = ni_request_gpct_mite_channel(dev, counter->counter_index,
COMEDI_INPUT);
@@ -5183,19 +4987,8 @@ static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
}
ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL);
ni_e_series_enable_second_irq(dev, counter->counter_index, 1);
- retval = ni_tio_cmd(counter, s->async);
- return retval;
-}
-#endif
-#ifdef PCIDMA
-static int ni_gpct_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- struct ni_gpct *counter = s->private;
-
- return ni_tio_cmdtest(counter, cmd);
- return -ENOTSUPP;
+ return ni_tio_cmd(dev, s);
}
#endif
@@ -5330,9 +5123,8 @@ static int ni_config_filter(struct comedi_device *dev, unsigned pfi_channel,
struct ni_private *devpriv __maybe_unused = dev->private;
unsigned bits;
- if ((board->reg_type & ni_reg_m_series_mask) == 0) {
+ if ((board->reg_type & ni_reg_m_series_mask) == 0)
return -ENOTSUPP;
- }
bits = ni_readl(M_Offset_PFI_Filter);
bits &= ~MSeries_PFI_Filter_Select_Mask(pfi_channel);
bits |= MSeries_PFI_Filter_Select_Bits(pfi_channel, filter);
@@ -5413,9 +5205,8 @@ static void ni_rtsi_init(struct comedi_device *dev)
/* Set clock mode to internal */
devpriv->clock_and_fout2 = MSeries_RTSI_10MHz_Bit;
- if (ni_set_master_clock(dev, NI_MIO_INTERNAL_CLOCK, 0) < 0) {
+ if (ni_set_master_clock(dev, NI_MIO_INTERNAL_CLOCK, 0) < 0)
printk("ni_set_master_clock failed, bug?");
- }
/* default internal lines routing to RTSI bus lines */
devpriv->rtsi_trig_a_output_reg =
RTSI_Trig_Output_Bits(0,
@@ -5598,9 +5389,8 @@ static int ni_mseries_set_pll_master_clock(struct comedi_device *dev,
devpriv->clock_source = source;
/* it seems to typically take a few hundred microseconds for PLL to lock */
for (i = 0; i < timeout; ++i) {
- if (ni_readw(M_Offset_PLL_Status) & MSeries_PLL_Locked_Bit) {
+ if (ni_readw(M_Offset_PLL_Status) & MSeries_PLL_Locked_Bit)
break;
- }
udelay(1);
}
if (i == timeout) {
@@ -5822,13 +5612,11 @@ static int cs5529_wait_for_idle(struct comedi_device *dev)
for (i = 0; i < timeout; i++) {
status = ni_ao_win_inw(dev, CAL_ADC_Status_67xx);
- if ((status & CSS_ADC_BUSY) == 0) {
+ if ((status & CSS_ADC_BUSY) == 0)
break;
- }
set_current_state(TASK_INTERRUPTIBLE);
- if (schedule_timeout(1)) {
+ if (schedule_timeout(1))
return -EIO;
- }
}
/* printk("looped %i times waiting for idle\n", i); */
if (i == timeout) {
@@ -5854,9 +5642,8 @@ static void cs5529_command(struct comedi_device *dev, unsigned short value)
udelay(1);
}
/* printk("looped %i times writing command to cs5529\n", i); */
- if (i == timeout) {
+ if (i == timeout)
comedi_error(dev, "possible problem - never saw adc go busy?");
- }
}
/* write to cs5529 register */
@@ -5873,25 +5660,6 @@ static void cs5529_config_write(struct comedi_device *dev, unsigned int value,
comedi_error(dev, "time or signal in cs5529_config_write()");
}
-#ifdef NI_CS5529_DEBUG
-/* read from cs5529 register */
-static unsigned int cs5529_config_read(struct comedi_device *dev,
- unsigned int reg_select_bits)
-{
- unsigned int value;
-
- reg_select_bits &= CSCMD_REGISTER_SELECT_MASK;
- cs5529_command(dev, CSCMD_COMMAND | CSCMD_READ | reg_select_bits);
- if (cs5529_wait_for_idle(dev))
- comedi_error(dev, "timeout or signal in cs5529_config_read()");
- value = (ni_ao_win_inw(dev,
- CAL_ADC_Config_Data_High_Word_67xx) << 16) &
- 0xff0000;
- value |= ni_ao_win_inw(dev, CAL_ADC_Config_Data_Low_Word_67xx) & 0xffff;
- return value;
-}
-#endif
-
static int cs5529_do_conversion(struct comedi_device *dev, unsigned short *data)
{
int retval;
@@ -5968,12 +5736,5 @@ static int init_cs5529(struct comedi_device *dev)
if (cs5529_wait_for_idle(dev))
comedi_error(dev, "timeout or signal in init_cs5529()\n");
#endif
-#ifdef NI_CS5529_DEBUG
- printk("config: 0x%x\n", cs5529_config_read(dev,
- CSCMD_CONFIG_REGISTER));
- printk("gain: 0x%x\n", cs5529_config_read(dev, CSCMD_GAIN_REGISTER));
- printk("offset: 0x%x\n", cs5529_config_read(dev,
- CSCMD_OFFSET_REGISTER));
-#endif
return 0;
}
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
index 229a273f2016..de421486b758 100644
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
@@ -47,8 +47,6 @@ See the notes in the ni_atmio.o driver.
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
-#undef DEBUG
-
#define ATMIO 1
#undef PCIMIO
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index e3a8fa96d9b3..30c46a3c1767 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -47,8 +47,6 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org
*/
#define USE_DMA
-/* #define DEBUG 1 */
-/* #define DEBUG_FLAGS */
#include <linux/module.h>
#include <linux/delay.h>
@@ -60,13 +58,6 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org
#include "comedi_fc.h"
#include "mite.h"
-#undef DPRINTK
-#ifdef DEBUG
-#define DPRINTK(format, args...) pr_debug(format, ## args)
-#else
-#define DPRINTK(format, args...) do { } while (0)
-#endif
-
#define PCI_DIO_SIZE 4096
#define PCI_MITE_SIZE 4096
@@ -319,14 +310,6 @@ static int ni_pcidio_ns_to_timer(int *nanosec, int round_mode);
static int setup_mite_dma(struct comedi_device *dev,
struct comedi_subdevice *s);
-#ifdef DEBUG_FLAGS
-static void ni_pcidio_print_flags(unsigned int flags);
-static void ni_pcidio_print_status(unsigned int status);
-#else
-#define ni_pcidio_print_flags(x)
-#define ni_pcidio_print_status(x)
-#endif
-
static int ni_pcidio_request_di_mite_channel(struct comedi_device *dev)
{
struct nidio96_private *devpriv = dev->private;
@@ -401,7 +384,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct nidio96_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct mite_struct *mite = devpriv->mite;
@@ -427,19 +410,10 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
Interrupt_And_Window_Status);
flags = readb(devpriv->mite->daq_io_addr + Group_1_Flags);
- DPRINTK("ni_pcidio_interrupt: status=0x%02x,flags=0x%02x\n",
- status, flags);
- ni_pcidio_print_flags(flags);
- ni_pcidio_print_status(status);
-
spin_lock(&devpriv->mite_channel_lock);
if (devpriv->di_mite_chan)
m_status = mite_get_status(devpriv->di_mite_chan);
-#ifdef MITE_DEBUG
- mite_print_chsr(m_status);
-#endif
- /* mite_dump_regs(mite); */
if (m_status & CHSR_INT) {
if (m_status & CHSR_LINKC) {
writel(CHOR_CLRLC,
@@ -450,7 +424,8 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
}
if (m_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_DRDY |
CHSR_DRQ1 | CHSR_MRDY)) {
- DPRINTK("unknown mite interrupt, disabling IRQ\n");
+ dev_dbg(dev->class_dev,
+ "unknown mite interrupt, disabling IRQ\n");
async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
disable_irq(dev->irq);
}
@@ -460,7 +435,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
while (status & DataLeft) {
work++;
if (work > 20) {
- DPRINTK("too much work in interrupt\n");
+ dev_dbg(dev->class_dev, "too much work in interrupt\n");
writeb(0x00,
devpriv->mite->daq_io_addr +
Master_DMA_And_Interrupt_Control);
@@ -470,11 +445,11 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
flags &= IntEn;
if (flags & TransferReady) {
- /* DPRINTK("TransferReady\n"); */
while (flags & TransferReady) {
work++;
if (work > 100) {
- DPRINTK("too much work in interrupt\n");
+ dev_dbg(dev->class_dev,
+ "too much work in interrupt\n");
writeb(0x00,
devpriv->mite->daq_io_addr +
Master_DMA_And_Interrupt_Control
@@ -488,21 +463,13 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
data2 = (auxdata & 0xffff0000) >> 16;
comedi_buf_put(async, data1);
comedi_buf_put(async, data2);
- /* DPRINTK("read:%d, %d\n",data1,data2); */
flags = readb(devpriv->mite->daq_io_addr +
Group_1_Flags);
}
- /* DPRINTK("buf_int_count: %d\n",
- async->buf_int_count); */
- /* DPRINTK("1) IntEn=%d,flags=%d,status=%d\n",
- IntEn,flags,status); */
- /* ni_pcidio_print_flags(flags); */
- /* ni_pcidio_print_status(status); */
async->events |= COMEDI_CB_BLOCK;
}
if (flags & CountExpired) {
- DPRINTK("CountExpired\n");
writeb(ClearExpired,
devpriv->mite->daq_io_addr +
Group_1_Second_Clear);
@@ -511,41 +478,26 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
writeb(0x00, devpriv->mite->daq_io_addr + OpMode);
break;
} else if (flags & Waited) {
- DPRINTK("Waited\n");
writeb(ClearWaited,
devpriv->mite->daq_io_addr +
Group_1_First_Clear);
async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
break;
} else if (flags & PrimaryTC) {
- DPRINTK("PrimaryTC\n");
writeb(ClearPrimaryTC,
devpriv->mite->daq_io_addr +
Group_1_First_Clear);
async->events |= COMEDI_CB_EOA;
} else if (flags & SecondaryTC) {
- DPRINTK("SecondaryTC\n");
writeb(ClearSecondaryTC,
devpriv->mite->daq_io_addr +
Group_1_First_Clear);
async->events |= COMEDI_CB_EOA;
}
-#if 0
- else {
- DPRINTK("ni_pcidio: unknown interrupt\n");
- async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- writeb(0x00,
- devpriv->mite->daq_io_addr +
- Master_DMA_And_Interrupt_Control);
- }
-#endif
+
flags = readb(devpriv->mite->daq_io_addr + Group_1_Flags);
status = readb(devpriv->mite->daq_io_addr +
Interrupt_And_Window_Status);
- /* DPRINTK("loop end: IntEn=0x%02x,flags=0x%02x,"
- "status=0x%02x\n", IntEn, flags, status); */
- /* ni_pcidio_print_flags(flags); */
- /* ni_pcidio_print_status(status); */
}
out:
@@ -562,82 +514,6 @@ out:
return IRQ_HANDLED;
}
-#ifdef DEBUG_FLAGS
-static const char *bit_set_string(unsigned int bits, unsigned int bit,
- const char *const strings[])
-{
- return (bits & (1U << bit)) ? strings[bit] : "";
-}
-
-static const char *const flags_strings[] = {
- " TransferReady", " CountExpired", " 2", " 3",
- " 4", " Waited", " PrimaryTC", " SecondaryTC",
-};
-
-
-static void ni_pcidio_print_flags(unsigned int flags)
-{
- pr_debug("group_1_flags:%s%s%s%s%s%s%s%s\n",
- bit_set_string(flags, 7, flags_strings),
- bit_set_string(flags, 6, flags_strings),
- bit_set_string(flags, 5, flags_strings),
- bit_set_string(flags, 4, flags_strings),
- bit_set_string(flags, 3, flags_strings),
- bit_set_string(flags, 2, flags_strings),
- bit_set_string(flags, 1, flags_strings),
- bit_set_string(flags, 0, flags_strings));
-}
-
-static const char *const status_strings[] = {
- " DataLeft1", " Reserved1", " Req1", " StopTrig1",
- " DataLeft2", " Reserved2", " Req2", " StopTrig2",
-};
-
-static void ni_pcidio_print_status(unsigned int flags)
-{
- pr_debug("group_status:%s%s%s%s%s%s%s%s\n",
- bit_set_string(flags, 7, status_strings),
- bit_set_string(flags, 6, status_strings),
- bit_set_string(flags, 5, status_strings),
- bit_set_string(flags, 4, status_strings),
- bit_set_string(flags, 3, status_strings),
- bit_set_string(flags, 2, status_strings),
- bit_set_string(flags, 1, status_strings),
- bit_set_string(flags, 0, status_strings));
-}
-#endif
-
-#ifdef unused
-static void debug_int(struct comedi_device *dev)
-{
- struct nidio96_private *devpriv = dev->private;
- int a, b;
- static int n_int;
- struct timeval tv;
-
- do_gettimeofday(&tv);
- a = readb(devpriv->mite->daq_io_addr + Group_Status);
- b = readb(devpriv->mite->daq_io_addr + Group_1_Flags);
-
- if (n_int < 10) {
- DPRINTK("status 0x%02x flags 0x%02x time %06d\n", a, b,
- (int)tv.tv_usec);
- }
-
- while (b & 1) {
- writew(0xff, devpriv->mite->daq_io_addr + Group_1_FIFO);
- b = readb(devpriv->mite->daq_io_addr + Group_1_Flags);
- }
-
- b = readb(devpriv->mite->daq_io_addr + Group_1_Flags);
-
- if (n_int < 10) {
- DPRINTK("new status 0x%02x\n", b);
- n_int++;
- }
-}
-#endif
-
static int ni_pcidio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -883,7 +759,6 @@ static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
s->async->inttrig = ni_pcidio_inttrig;
}
- DPRINTK("ni_pcidio: command started\n");
return 0;
}
@@ -1074,6 +949,19 @@ static int pci_6534_upload_firmware(struct comedi_device *dev)
return ret;
}
+static void nidio_reset_board(struct comedi_device *dev)
+{
+ struct nidio96_private *devpriv = dev->private;
+ void __iomem *daq_mmio = devpriv->mite->daq_io_addr;
+
+ writel(0, daq_mmio + Port_IO(0));
+ writel(0, daq_mmio + Port_Pin_Directions(0));
+ writel(0, daq_mmio + Port_Pin_Mask(0));
+
+ /* disable interrupts on board */
+ writeb(0, daq_mmio + Master_DMA_And_Interrupt_Control);
+}
+
static int nidio_auto_attach(struct comedi_device *dev,
unsigned long context)
{
@@ -1115,13 +1003,14 @@ static int nidio_auto_attach(struct comedi_device *dev,
if (devpriv->di_mite_ring == NULL)
return -ENOMEM;
- irq = mite_irq(devpriv->mite);
if (board->uses_firmware) {
ret = pci_6534_upload_firmware(dev);
if (ret < 0)
return ret;
}
+ nidio_reset_board(dev);
+
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
return ret;
@@ -1149,21 +1038,13 @@ static int nidio_auto_attach(struct comedi_device *dev,
s->async_dma_dir = DMA_BIDIRECTIONAL;
s->poll = &ni_pcidio_poll;
- writel(0, devpriv->mite->daq_io_addr + Port_IO(0));
- writel(0, devpriv->mite->daq_io_addr + Port_Pin_Directions(0));
- writel(0, devpriv->mite->daq_io_addr + Port_Pin_Mask(0));
-
- /* disable interrupts on board */
- writeb(0x00,
- devpriv->mite->daq_io_addr +
- Master_DMA_And_Interrupt_Control);
-
- ret = request_irq(irq, nidio_interrupt, IRQF_SHARED,
- "ni_pcidio", dev);
- if (ret < 0)
- dev_warn(dev->class_dev, "irq not available\n");
-
- dev->irq = irq;
+ irq = mite_irq(devpriv->mite);
+ if (irq) {
+ ret = request_irq(irq, nidio_interrupt, IRQF_SHARED,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = irq;
+ }
return 0;
}
@@ -1200,7 +1081,7 @@ static int ni_pcidio_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ni_pcidio_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(ni_pcidio_pci_table) = {
+static const struct pci_device_id ni_pcidio_pci_table[] = {
{ PCI_VDEVICE(NI, 0x1150), BOARD_PCIDIO_32HS },
{ PCI_VDEVICE(NI, 0x12b0), BOARD_PCI6534 },
{ PCI_VDEVICE(NI, 0x1320), BOARD_PXI6533 },
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 536be83af549..0ed980455875 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -116,8 +116,6 @@ Bugs:
#include "ni_stc.h"
#include "mite.h"
-/* #define PCI_DEBUG */
-
#define PCIDMA
#define PCIMIO 1
@@ -134,24 +132,26 @@ Bugs:
63 different possibilities. An AO channel
can not act as it's own OFFSET or REFERENCE.
*/
-static const struct comedi_lrange range_ni_M_628x_ao = { 8, {
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE(-2, 2),
- RANGE(-1, 1),
- RANGE(-5, 15),
- RANGE(0, 10),
- RANGE(3, 7),
- RANGE(4, 6),
- RANGE_ext(-1, 1)
- }
+static const struct comedi_lrange range_ni_M_628x_ao = {
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2),
+ BIP_RANGE(1),
+ RANGE(-5, 15),
+ UNI_RANGE(10),
+ RANGE(3, 7),
+ RANGE(4, 6),
+ RANGE_ext(-1, 1)
+ }
};
-static const struct comedi_lrange range_ni_M_625x_ao = { 3, {
- RANGE(-10, 10),
- RANGE(-5, 5),
- RANGE_ext(-1, 1)
- }
+static const struct comedi_lrange range_ni_M_625x_ao = {
+ 3, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ RANGE_ext(-1, 1)
+ }
};
enum ni_pcimio_boardid {
@@ -1178,9 +1178,9 @@ static void m_series_stc_writew(struct comedi_device *dev, uint16_t data,
offset = M_Offset_AO_FIFO_Clear;
break;
case DIO_Control_Register:
- printk
- ("%s: FIXME: register 0x%x does not map cleanly on to m-series boards.\n",
- __func__, reg);
+ dev_dbg(dev->class_dev,
+ "%s: FIXME: register 0x%x does not map cleanly on to m-series boards.\n",
+ __func__, reg);
return;
break;
case G_Autoincrement_Register(0):
@@ -1471,6 +1471,7 @@ static int pcimio_auto_attach(struct comedi_device *dev,
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct ni_board_struct *board = NULL;
struct ni_private *devpriv;
+ unsigned int irq;
int ret;
if (context < ARRAY_SIZE(ni_boards))
@@ -1532,18 +1533,12 @@ static int pcimio_auto_attach(struct comedi_device *dev,
if (board->reg_type == ni_reg_6143)
init_6143(dev);
- dev->irq = mite_irq(devpriv->mite);
-
- if (dev->irq == 0) {
- pr_warn("unknown irq (bad)\n");
- } else {
- pr_debug("( irq = %u )\n", dev->irq);
- ret = request_irq(dev->irq, ni_E_interrupt, NI_E_IRQ_FLAGS,
- DRV_NAME, dev);
- if (ret < 0) {
- pr_warn("irq not available\n");
- dev->irq = 0;
- }
+ irq = mite_irq(devpriv->mite);
+ if (irq) {
+ ret = request_irq(irq, ni_E_interrupt, NI_E_IRQ_FLAGS,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = irq;
}
ret = ni_E_init(dev);
@@ -1639,7 +1634,7 @@ static int ni_pcimio_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &ni_pcimio_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(ni_pcimio_pci_table) = {
+static const struct pci_device_id ni_pcimio_pci_table[] = {
{ PCI_VDEVICE(NI, 0x0162), BOARD_PCIMIO_16XE_50 }, /* 0x1620? */
{ PCI_VDEVICE(NI, 0x1170), BOARD_PCIMIO_16XE_10 },
{ PCI_VDEVICE(NI, 0x1180), BOARD_PCIMIO_16E_1 },
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index 9b120c77d83a..92691b491c24 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -53,10 +53,6 @@ static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter,
unsigned generic_clock_source);
static unsigned ni_tio_generic_clock_src_select(const struct ni_gpct *counter);
-MODULE_AUTHOR("Comedi <comedi@comedi.org>");
-MODULE_DESCRIPTION("Comedi support for NI general-purpose counters");
-MODULE_LICENSE("GPL");
-
static inline enum Gi_Counting_Mode_Reg_Bits Gi_Alternate_Sync_Bit(enum
ni_gpct_variant
variant)
@@ -277,19 +273,6 @@ static inline unsigned NI_660x_RTSI_Second_Gate_Select(unsigned n)
static const unsigned int counter_status_mask =
COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING;
-static int __init ni_tio_init_module(void)
-{
- return 0;
-}
-
-module_init(ni_tio_init_module);
-
-static void __exit ni_tio_cleanup_module(void)
-{
-}
-
-module_exit(ni_tio_cleanup_module);
-
struct ni_gpct_device *ni_gpct_device_construct(struct comedi_device *dev,
void (*write_register) (struct
ni_gpct
@@ -362,74 +345,64 @@ static int ni_tio_second_gate_registers_present(const struct ni_gpct_device
static void ni_tio_reset_count_and_disarm(struct ni_gpct *counter)
{
- write_register(counter, Gi_Reset_Bit(counter->counter_index),
- NITIO_Gxx_Joint_Reset_Reg(counter->counter_index));
+ unsigned cidx = counter->counter_index;
+
+ write_register(counter, Gi_Reset_Bit(cidx), NITIO_RESET_REG(cidx));
}
void ni_tio_init_counter(struct ni_gpct *counter)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned cidx = counter->counter_index;
ni_tio_reset_count_and_disarm(counter);
+
/* initialize counter registers */
- counter_dev->regs[NITIO_Gi_Autoincrement_Reg(counter->counter_index)] =
- 0x0;
- write_register(counter,
- counter_dev->
- regs[NITIO_Gi_Autoincrement_Reg(counter->counter_index)],
- NITIO_Gi_Autoincrement_Reg(counter->counter_index));
- ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index),
+ counter_dev->regs[NITIO_AUTO_INC_REG(cidx)] = 0x0;
+ write_register(counter, counter_dev->regs[NITIO_AUTO_INC_REG(cidx)],
+ NITIO_AUTO_INC_REG(cidx));
+
+ ni_tio_set_bits(counter, NITIO_CMD_REG(cidx),
~0, Gi_Synchronize_Gate_Bit);
- ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index), ~0,
- 0);
- counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)] = 0x0;
- write_register(counter,
- counter_dev->
- regs[NITIO_Gi_LoadA_Reg(counter->counter_index)],
- NITIO_Gi_LoadA_Reg(counter->counter_index));
- counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)] = 0x0;
- write_register(counter,
- counter_dev->
- regs[NITIO_Gi_LoadB_Reg(counter->counter_index)],
- NITIO_Gi_LoadB_Reg(counter->counter_index));
- ni_tio_set_bits(counter,
- NITIO_Gi_Input_Select_Reg(counter->counter_index), ~0,
- 0);
- if (ni_tio_counting_mode_registers_present(counter_dev)) {
- ni_tio_set_bits(counter,
- NITIO_Gi_Counting_Mode_Reg(counter->
- counter_index), ~0,
- 0);
- }
+
+ ni_tio_set_bits(counter, NITIO_MODE_REG(cidx), ~0, 0);
+
+ counter_dev->regs[NITIO_LOADA_REG(cidx)] = 0x0;
+ write_register(counter, counter_dev->regs[NITIO_LOADA_REG(cidx)],
+ NITIO_LOADA_REG(cidx));
+
+ counter_dev->regs[NITIO_LOADB_REG(cidx)] = 0x0;
+ write_register(counter, counter_dev->regs[NITIO_LOADB_REG(cidx)],
+ NITIO_LOADB_REG(cidx));
+
+ ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), ~0, 0);
+
+ if (ni_tio_counting_mode_registers_present(counter_dev))
+ ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx), ~0, 0);
+
if (ni_tio_second_gate_registers_present(counter_dev)) {
- counter_dev->
- regs[NITIO_Gi_Second_Gate_Reg(counter->counter_index)] =
- 0x0;
+ counter_dev->regs[NITIO_GATE2_REG(cidx)] = 0x0;
write_register(counter,
- counter_dev->
- regs[NITIO_Gi_Second_Gate_Reg
- (counter->counter_index)],
- NITIO_Gi_Second_Gate_Reg(counter->
- counter_index));
+ counter_dev->regs[NITIO_GATE2_REG(cidx)],
+ NITIO_GATE2_REG(cidx));
}
- ni_tio_set_bits(counter,
- NITIO_Gi_DMA_Config_Reg(counter->counter_index), ~0,
- 0x0);
- ni_tio_set_bits(counter,
- NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index),
- ~0, 0x0);
+
+ ni_tio_set_bits(counter, NITIO_DMA_CFG_REG(cidx), ~0, 0x0);
+
+ ni_tio_set_bits(counter, NITIO_INT_ENA_REG(cidx), ~0, 0x0);
}
EXPORT_SYMBOL_GPL(ni_tio_init_counter);
static unsigned int ni_tio_counter_status(struct ni_gpct *counter)
{
- unsigned int status = 0;
+ unsigned cidx = counter->counter_index;
const unsigned bits = read_register(counter,
- NITIO_Gxx_Status_Reg(counter->
- counter_index));
- if (bits & Gi_Armed_Bit(counter->counter_index)) {
+ NITIO_SHARED_STATUS_REG(cidx));
+ unsigned int status = 0;
+
+ if (bits & Gi_Armed_Bit(cidx)) {
status |= COMEDI_COUNTER_ARMED;
- if (bits & Gi_Counting_Bit(counter->counter_index))
+ if (bits & Gi_Counting_Bit(cidx))
status |= COMEDI_COUNTER_COUNTING;
}
return status;
@@ -438,8 +411,8 @@ static unsigned int ni_tio_counter_status(struct ni_gpct *counter)
static void ni_tio_set_sync_mode(struct ni_gpct *counter, int force_alt_sync)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned counting_mode_reg =
- NITIO_Gi_Counting_Mode_Reg(counter->counter_index);
+ unsigned cidx = counter->counter_index;
+ const unsigned counting_mode_reg = NITIO_CNT_MODE_REG(cidx);
static const uint64_t min_normal_sync_period_ps = 25000;
const uint64_t clock_period_ps = ni_tio_clock_period_ps(counter,
ni_tio_generic_clock_src_select
@@ -476,6 +449,7 @@ static void ni_tio_set_sync_mode(struct ni_gpct *counter, int force_alt_sync)
static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned cidx = counter->counter_index;
unsigned mode_reg_mask;
unsigned mode_reg_values;
unsigned input_select_bits = 0;
@@ -502,7 +476,7 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
default:
break;
}
- ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_MODE_REG(cidx),
mode_reg_mask, mode_reg_values);
if (ni_tio_counting_mode_registers_present(counter_dev)) {
@@ -515,15 +489,13 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
Gi_Index_Phase_Bitshift) & Gi_Index_Phase_Mask;
if (mode & NI_GPCT_INDEX_ENABLE_BIT)
counting_mode_bits |= Gi_Index_Mode_Bit;
- ni_tio_set_bits(counter,
- NITIO_Gi_Counting_Mode_Reg(counter->
- counter_index),
+ ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
Gi_Counting_Mode_Mask | Gi_Index_Phase_Mask |
Gi_Index_Mode_Bit, counting_mode_bits);
ni_tio_set_sync_mode(counter, 0);
}
- ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_CMD_REG(cidx),
Gi_Up_Down_Mask,
(mode >> NI_GPCT_COUNTING_DIRECTION_SHIFT) <<
Gi_Up_Down_Shift);
@@ -532,8 +504,7 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
input_select_bits |= Gi_Or_Gate_Bit;
if (mode & NI_GPCT_INVERT_OUTPUT_BIT)
input_select_bits |= Gi_Output_Polarity_Bit;
- ni_tio_set_bits(counter,
- NITIO_Gi_Input_Select_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
Gi_Gate_Select_Load_Source_Bit | Gi_Or_Gate_Bit |
Gi_Output_Polarity_Bit, input_select_bits);
@@ -543,7 +514,7 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
-
+ unsigned cidx = counter->counter_index;
unsigned command_transient_bits = 0;
if (arm) {
@@ -581,9 +552,7 @@ int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger)
}
break;
}
- ni_tio_set_bits(counter,
- NITIO_Gi_Counting_Mode_Reg
- (counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
Gi_HW_Arm_Select_Mask
(counter_dev->variant) |
Gi_HW_Arm_Enable_Bit,
@@ -592,8 +561,7 @@ int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger)
} else {
command_transient_bits |= Gi_Disarm_Bit;
}
- ni_tio_set_bits_transient(counter,
- NITIO_Gi_Command_Reg(counter->counter_index),
+ ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx),
0, 0, command_transient_bits);
return 0;
}
@@ -717,8 +685,8 @@ static void ni_tio_set_source_subselect(struct ni_gpct *counter,
unsigned int clock_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned second_gate_reg =
- NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+ unsigned cidx = counter->counter_index;
+ const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
if (counter_dev->variant != ni_gpct_variant_m_series)
return;
@@ -747,6 +715,7 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter,
unsigned int period_ns)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned cidx = counter->counter_index;
unsigned input_select_bits = 0;
static const uint64_t pico_per_nano = 1000;
@@ -766,8 +735,7 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter,
}
if (clock_source & NI_GPCT_INVERT_CLOCK_SRC_BIT)
input_select_bits |= Gi_Source_Polarity_Bit;
- ni_tio_set_bits(counter,
- NITIO_Gi_Input_Select_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
Gi_Source_Select_Mask | Gi_Source_Polarity_Bit,
input_select_bits);
ni_tio_set_source_subselect(counter, clock_source);
@@ -791,9 +759,7 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter,
return -EINVAL;
break;
}
- ni_tio_set_bits(counter,
- NITIO_Gi_Counting_Mode_Reg(counter->
- counter_index),
+ ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
Gi_Prescale_X2_Bit(counter_dev->variant) |
Gi_Prescale_X8_Bit(counter_dev->variant),
counting_mode_bits);
@@ -806,15 +772,12 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter,
static unsigned ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned counting_mode_bits = ni_tio_get_soft_copy(counter,
- NITIO_Gi_Counting_Mode_Reg
- (counter->
- counter_index));
+ unsigned cidx = counter->counter_index;
+ const unsigned counting_mode_bits =
+ ni_tio_get_soft_copy(counter, NITIO_CNT_MODE_REG(cidx));
unsigned bits = 0;
- if (ni_tio_get_soft_copy(counter,
- NITIO_Gi_Input_Select_Reg
- (counter->counter_index)) &
+ if (ni_tio_get_soft_copy(counter, NITIO_INPUT_SEL_REG(cidx)) &
Gi_Source_Polarity_Bit)
bits |= NI_GPCT_INVERT_CLOCK_SRC_BIT;
if (counting_mode_bits & Gi_Prescale_X2_Bit(counter_dev->variant))
@@ -827,15 +790,13 @@ static unsigned ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
static unsigned ni_m_series_clock_src_select(const struct ni_gpct *counter)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned second_gate_reg =
- NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+ unsigned cidx = counter->counter_index;
+ const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
unsigned clock_source = 0;
unsigned i;
- const unsigned input_select = (ni_tio_get_soft_copy(counter,
- NITIO_Gi_Input_Select_Reg
- (counter->counter_index))
- & Gi_Source_Select_Mask) >>
- Gi_Source_Select_Shift;
+ const unsigned input_select =
+ (ni_tio_get_soft_copy(counter, NITIO_INPUT_SEL_REG(cidx)) &
+ Gi_Source_Select_Mask) >> Gi_Source_Select_Shift;
switch (input_select) {
case NI_M_Series_Timebase_1_Clock:
@@ -895,12 +856,11 @@ static unsigned ni_m_series_clock_src_select(const struct ni_gpct *counter)
static unsigned ni_660x_clock_src_select(const struct ni_gpct *counter)
{
unsigned clock_source = 0;
+ unsigned cidx = counter->counter_index;
+ const unsigned input_select =
+ (ni_tio_get_soft_copy(counter, NITIO_INPUT_SEL_REG(cidx)) &
+ Gi_Source_Select_Mask) >> Gi_Source_Select_Shift;
unsigned i;
- const unsigned input_select = (ni_tio_get_soft_copy(counter,
- NITIO_Gi_Input_Select_Reg
- (counter->counter_index))
- & Gi_Source_Select_Mask) >>
- Gi_Source_Select_Shift;
switch (input_select) {
case NI_660x_Timebase_1_Clock:
@@ -1022,6 +982,7 @@ static void ni_tio_set_first_gate_modifiers(struct ni_gpct *counter,
unsigned int gate_source)
{
const unsigned mode_mask = Gi_Gate_Polarity_Bit | Gi_Gating_Mode_Mask;
+ unsigned cidx = counter->counter_index;
unsigned mode_values = 0;
if (gate_source & CR_INVERT)
@@ -1030,7 +991,7 @@ static void ni_tio_set_first_gate_modifiers(struct ni_gpct *counter,
mode_values |= Gi_Rising_Edge_Gating_Bits;
else
mode_values |= Gi_Level_Gating_Bits;
- ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_MODE_REG(cidx),
mode_mask, mode_values);
}
@@ -1038,6 +999,7 @@ static int ni_660x_set_first_gate(struct ni_gpct *counter,
unsigned int gate_source)
{
const unsigned selected_gate = CR_CHAN(gate_source);
+ unsigned cidx = counter->counter_index;
/* bits of selected_gate that may be meaningful to input select register */
const unsigned selected_gate_mask = 0x1f;
unsigned ni_660x_gate_select;
@@ -1075,8 +1037,7 @@ static int ni_660x_set_first_gate(struct ni_gpct *counter,
return -EINVAL;
break;
}
- ni_tio_set_bits(counter,
- NITIO_Gi_Input_Select_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
Gi_Gate_Select_Mask,
Gi_Gate_Select_Bits(ni_660x_gate_select));
return 0;
@@ -1086,6 +1047,7 @@ static int ni_m_series_set_first_gate(struct ni_gpct *counter,
unsigned int gate_source)
{
const unsigned selected_gate = CR_CHAN(gate_source);
+ unsigned cidx = counter->counter_index;
/* bits of selected_gate that may be meaningful to input select register */
const unsigned selected_gate_mask = 0x1f;
unsigned ni_m_series_gate_select;
@@ -1124,8 +1086,7 @@ static int ni_m_series_set_first_gate(struct ni_gpct *counter,
return -EINVAL;
break;
}
- ni_tio_set_bits(counter,
- NITIO_Gi_Input_Select_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
Gi_Gate_Select_Mask,
Gi_Gate_Select_Bits(ni_m_series_gate_select));
return 0;
@@ -1135,8 +1096,8 @@ static int ni_660x_set_second_gate(struct ni_gpct *counter,
unsigned int gate_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned second_gate_reg =
- NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+ unsigned cidx = counter->counter_index;
+ const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
const unsigned selected_second_gate = CR_CHAN(gate_source);
/* bits of second_gate that may be meaningful to second gate register */
static const unsigned selected_second_gate_mask = 0x1f;
@@ -1194,8 +1155,8 @@ static int ni_m_series_set_second_gate(struct ni_gpct *counter,
unsigned int gate_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned second_gate_reg =
- NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+ unsigned cidx = counter->counter_index;
+ const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
const unsigned selected_second_gate = CR_CHAN(gate_source);
/* bits of second_gate that may be meaningful to second gate register */
static const unsigned selected_second_gate_mask = 0x1f;
@@ -1222,15 +1183,13 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
unsigned int gate_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned second_gate_reg =
- NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+ unsigned cidx = counter->counter_index;
+ const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
switch (gate_index) {
case 0:
if (CR_CHAN(gate_source) == NI_GPCT_DISABLED_GATE_SELECT) {
- ni_tio_set_bits(counter,
- NITIO_Gi_Mode_Reg(counter->
- counter_index),
+ ni_tio_set_bits(counter, NITIO_MODE_REG(cidx),
Gi_Gating_Mode_Mask,
Gi_Gating_Disabled_Bits);
return 0;
@@ -1292,11 +1251,12 @@ static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned index,
unsigned int source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned cidx = counter->counter_index;
if (counter_dev->variant == ni_gpct_variant_m_series) {
unsigned int abz_reg, shift, mask;
- abz_reg = NITIO_Gi_ABZ_Reg(counter->counter_index);
+ abz_reg = NITIO_ABZ_REG(cidx);
switch (index) {
case NI_GPCT_SOURCE_ENCODER_A:
shift = 10;
@@ -1319,7 +1279,6 @@ static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned index,
counter_dev->regs[abz_reg] &= ~mask;
counter_dev->regs[abz_reg] |= (source << shift) & mask;
write_register(counter, counter_dev->regs[abz_reg], abz_reg);
-/* printk("%s %x %d %d\n", __func__, counter_dev->regs[abz_reg], index, source); */
return 0;
}
return -EINVAL;
@@ -1491,12 +1450,10 @@ static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned gate_index,
unsigned int *gate_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned mode_bits = ni_tio_get_soft_copy(counter,
- NITIO_Gi_Mode_Reg
- (counter->
- counter_index));
- const unsigned second_gate_reg =
- NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+ unsigned cidx = counter->counter_index;
+ const unsigned mode_bits =
+ ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx));
+ const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
unsigned gate_select_bits;
switch (gate_index) {
@@ -1508,8 +1465,7 @@ static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned gate_index,
} else {
gate_select_bits =
(ni_tio_get_soft_copy(counter,
- NITIO_Gi_Input_Select_Reg
- (counter->counter_index)) &
+ NITIO_INPUT_SEL_REG(cidx)) &
Gi_Gate_Select_Mask) >> Gi_Gate_Select_Shift;
}
switch (counter_dev->variant) {
@@ -1577,9 +1533,13 @@ static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned gate_index,
return 0;
}
-int ni_tio_insn_config(struct ni_gpct *counter,
- struct comedi_insn *insn, unsigned int *data)
+int ni_tio_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ struct ni_gpct *counter = s->private;
+
switch (data[0]) {
case INSN_CONFIG_SET_COUNTER_MODE:
return ni_tio_set_counter_mode(counter, data[1]);
@@ -1623,11 +1583,15 @@ int ni_tio_insn_config(struct ni_gpct *counter,
}
EXPORT_SYMBOL_GPL(ni_tio_insn_config);
-int ni_tio_rinsn(struct ni_gpct *counter, struct comedi_insn *insn,
- unsigned int *data)
+int ni_tio_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ struct ni_gpct *counter = s->private;
struct ni_gpct_device *counter_dev = counter->counter_dev;
const unsigned channel = CR_CHAN(insn->chanspec);
+ unsigned cidx = counter->counter_index;
unsigned first_read;
unsigned second_read;
unsigned correct_read;
@@ -1636,65 +1600,57 @@ int ni_tio_rinsn(struct ni_gpct *counter, struct comedi_insn *insn,
return 0;
switch (channel) {
case 0:
- ni_tio_set_bits(counter,
- NITIO_Gi_Command_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_CMD_REG(cidx),
Gi_Save_Trace_Bit, 0);
- ni_tio_set_bits(counter,
- NITIO_Gi_Command_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_CMD_REG(cidx),
Gi_Save_Trace_Bit, Gi_Save_Trace_Bit);
/* The count doesn't get latched until the next clock edge, so it is possible the count
may change (once) while we are reading. Since the read of the SW_Save_Reg isn't
atomic (apparently even when it's a 32 bit register according to 660x docs),
we need to read twice and make sure the reading hasn't changed. If it has,
a third read will be correct since the count value will definitely have latched by then. */
- first_read =
- read_register(counter,
- NITIO_Gi_SW_Save_Reg(counter->counter_index));
- second_read =
- read_register(counter,
- NITIO_Gi_SW_Save_Reg(counter->counter_index));
+ first_read = read_register(counter, NITIO_SW_SAVE_REG(cidx));
+ second_read = read_register(counter, NITIO_SW_SAVE_REG(cidx));
if (first_read != second_read)
correct_read =
- read_register(counter,
- NITIO_Gi_SW_Save_Reg(counter->
- counter_index));
+ read_register(counter, NITIO_SW_SAVE_REG(cidx));
else
correct_read = first_read;
data[0] = correct_read;
return 0;
break;
case 1:
- data[0] =
- counter_dev->
- regs[NITIO_Gi_LoadA_Reg(counter->counter_index)];
+ data[0] = counter_dev->regs[NITIO_LOADA_REG(cidx)];
break;
case 2:
- data[0] =
- counter_dev->
- regs[NITIO_Gi_LoadB_Reg(counter->counter_index)];
+ data[0] = counter_dev->regs[NITIO_LOADB_REG(cidx)];
break;
}
return 0;
}
-EXPORT_SYMBOL_GPL(ni_tio_rinsn);
+EXPORT_SYMBOL_GPL(ni_tio_insn_read);
static unsigned ni_tio_next_load_register(struct ni_gpct *counter)
{
- const unsigned bits = read_register(counter,
- NITIO_Gxx_Status_Reg(counter->
- counter_index));
+ unsigned cidx = counter->counter_index;
+ const unsigned bits =
+ read_register(counter, NITIO_SHARED_STATUS_REG(cidx));
- if (bits & Gi_Next_Load_Source_Bit(counter->counter_index))
- return NITIO_Gi_LoadB_Reg(counter->counter_index);
+ if (bits & Gi_Next_Load_Source_Bit(cidx))
+ return NITIO_LOADB_REG(cidx);
else
- return NITIO_Gi_LoadA_Reg(counter->counter_index);
+ return NITIO_LOADA_REG(cidx);
}
-int ni_tio_winsn(struct ni_gpct *counter, struct comedi_insn *insn,
- unsigned int *data)
+int ni_tio_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ struct ni_gpct *counter = s->private;
struct ni_gpct_device *counter_dev = counter->counter_dev;
const unsigned channel = CR_CHAN(insn->chanspec);
+ unsigned cidx = counter->counter_index;
unsigned load_reg;
if (insn->n < 1)
@@ -1705,24 +1661,18 @@ int ni_tio_winsn(struct ni_gpct *counter, struct comedi_insn *insn,
/* Don't disturb load source select, just use whichever load register is already selected. */
load_reg = ni_tio_next_load_register(counter);
write_register(counter, data[0], load_reg);
- ni_tio_set_bits_transient(counter,
- NITIO_Gi_Command_Reg(counter->
- counter_index),
+ ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx),
0, 0, Gi_Load_Bit);
/* restore state of load reg to whatever the user set last set it to */
write_register(counter, counter_dev->regs[load_reg], load_reg);
break;
case 1:
- counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)] =
- data[0];
- write_register(counter, data[0],
- NITIO_Gi_LoadA_Reg(counter->counter_index));
+ counter_dev->regs[NITIO_LOADA_REG(cidx)] = data[0];
+ write_register(counter, data[0], NITIO_LOADA_REG(cidx));
break;
case 2:
- counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)] =
- data[0];
- write_register(counter, data[0],
- NITIO_Gi_LoadB_Reg(counter->counter_index));
+ counter_dev->regs[NITIO_LOADB_REG(cidx)] = data[0];
+ write_register(counter, data[0], NITIO_LOADB_REG(cidx));
break;
default:
return -EINVAL;
@@ -1730,4 +1680,19 @@ int ni_tio_winsn(struct ni_gpct *counter, struct comedi_insn *insn,
}
return 0;
}
-EXPORT_SYMBOL_GPL(ni_tio_winsn);
+EXPORT_SYMBOL_GPL(ni_tio_insn_write);
+
+static int __init ni_tio_init_module(void)
+{
+ return 0;
+}
+module_init(ni_tio_init_module);
+
+static void __exit ni_tio_cleanup_module(void)
+{
+}
+module_exit(ni_tio_cleanup_module);
+
+MODULE_AUTHOR("Comedi <comedi@comedi.org>");
+MODULE_DESCRIPTION("Comedi support for NI general-purpose counters");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_tio.h b/drivers/staging/comedi/drivers/ni_tio.h
index 7e13697b3254..68378dab4e70 100644
--- a/drivers/staging/comedi/drivers/ni_tio.h
+++ b/drivers/staging/comedi/drivers/ni_tio.h
@@ -25,77 +25,77 @@ struct mite_struct;
struct ni_gpct_device;
enum ni_gpct_register {
- NITIO_G0_Autoincrement_Reg,
- NITIO_G1_Autoincrement_Reg,
- NITIO_G2_Autoincrement_Reg,
- NITIO_G3_Autoincrement_Reg,
- NITIO_G0_Command_Reg,
- NITIO_G1_Command_Reg,
- NITIO_G2_Command_Reg,
- NITIO_G3_Command_Reg,
- NITIO_G0_HW_Save_Reg,
- NITIO_G1_HW_Save_Reg,
- NITIO_G2_HW_Save_Reg,
- NITIO_G3_HW_Save_Reg,
- NITIO_G0_SW_Save_Reg,
- NITIO_G1_SW_Save_Reg,
- NITIO_G2_SW_Save_Reg,
- NITIO_G3_SW_Save_Reg,
- NITIO_G0_Mode_Reg,
- NITIO_G1_Mode_Reg,
- NITIO_G2_Mode_Reg,
- NITIO_G3_Mode_Reg,
- NITIO_G0_LoadA_Reg,
- NITIO_G1_LoadA_Reg,
- NITIO_G2_LoadA_Reg,
- NITIO_G3_LoadA_Reg,
- NITIO_G0_LoadB_Reg,
- NITIO_G1_LoadB_Reg,
- NITIO_G2_LoadB_Reg,
- NITIO_G3_LoadB_Reg,
- NITIO_G0_Input_Select_Reg,
- NITIO_G1_Input_Select_Reg,
- NITIO_G2_Input_Select_Reg,
- NITIO_G3_Input_Select_Reg,
- NITIO_G0_Counting_Mode_Reg,
- NITIO_G1_Counting_Mode_Reg,
- NITIO_G2_Counting_Mode_Reg,
- NITIO_G3_Counting_Mode_Reg,
- NITIO_G0_Second_Gate_Reg,
- NITIO_G1_Second_Gate_Reg,
- NITIO_G2_Second_Gate_Reg,
- NITIO_G3_Second_Gate_Reg,
- NITIO_G01_Status_Reg,
- NITIO_G23_Status_Reg,
- NITIO_G01_Joint_Reset_Reg,
- NITIO_G23_Joint_Reset_Reg,
- NITIO_G01_Joint_Status1_Reg,
- NITIO_G23_Joint_Status1_Reg,
- NITIO_G01_Joint_Status2_Reg,
- NITIO_G23_Joint_Status2_Reg,
- NITIO_G0_DMA_Config_Reg,
- NITIO_G1_DMA_Config_Reg,
- NITIO_G2_DMA_Config_Reg,
- NITIO_G3_DMA_Config_Reg,
- NITIO_G0_DMA_Status_Reg,
- NITIO_G1_DMA_Status_Reg,
- NITIO_G2_DMA_Status_Reg,
- NITIO_G3_DMA_Status_Reg,
- NITIO_G0_ABZ_Reg,
- NITIO_G1_ABZ_Reg,
- NITIO_G0_Interrupt_Acknowledge_Reg,
- NITIO_G1_Interrupt_Acknowledge_Reg,
- NITIO_G2_Interrupt_Acknowledge_Reg,
- NITIO_G3_Interrupt_Acknowledge_Reg,
- NITIO_G0_Status_Reg,
- NITIO_G1_Status_Reg,
- NITIO_G2_Status_Reg,
- NITIO_G3_Status_Reg,
- NITIO_G0_Interrupt_Enable_Reg,
- NITIO_G1_Interrupt_Enable_Reg,
- NITIO_G2_Interrupt_Enable_Reg,
- NITIO_G3_Interrupt_Enable_Reg,
- NITIO_Num_Registers,
+ NITIO_G0_AUTO_INC,
+ NITIO_G1_AUTO_INC,
+ NITIO_G2_AUTO_INC,
+ NITIO_G3_AUTO_INC,
+ NITIO_G0_CMD,
+ NITIO_G1_CMD,
+ NITIO_G2_CMD,
+ NITIO_G3_CMD,
+ NITIO_G0_HW_SAVE,
+ NITIO_G1_HW_SAVE,
+ NITIO_G2_HW_SAVE,
+ NITIO_G3_HW_SAVE,
+ NITIO_G0_SW_SAVE,
+ NITIO_G1_SW_SAVE,
+ NITIO_G2_SW_SAVE,
+ NITIO_G3_SW_SAVE,
+ NITIO_G0_MODE,
+ NITIO_G1_MODE,
+ NITIO_G2_MODE,
+ NITIO_G3_MODE,
+ NITIO_G0_LOADA,
+ NITIO_G1_LOADA,
+ NITIO_G2_LOADA,
+ NITIO_G3_LOADA,
+ NITIO_G0_LOADB,
+ NITIO_G1_LOADB,
+ NITIO_G2_LOADB,
+ NITIO_G3_LOADB,
+ NITIO_G0_INPUT_SEL,
+ NITIO_G1_INPUT_SEL,
+ NITIO_G2_INPUT_SEL,
+ NITIO_G3_INPUT_SEL,
+ NITIO_G0_CNT_MODE,
+ NITIO_G1_CNT_MODE,
+ NITIO_G2_CNT_MODE,
+ NITIO_G3_CNT_MODE,
+ NITIO_G0_GATE2,
+ NITIO_G1_GATE2,
+ NITIO_G2_GATE2,
+ NITIO_G3_GATE2,
+ NITIO_G01_STATUS,
+ NITIO_G23_STATUS,
+ NITIO_G01_RESET,
+ NITIO_G23_RESET,
+ NITIO_G01_STATUS1,
+ NITIO_G23_STATUS1,
+ NITIO_G01_STATUS2,
+ NITIO_G23_STATUS2,
+ NITIO_G0_DMA_CFG,
+ NITIO_G1_DMA_CFG,
+ NITIO_G2_DMA_CFG,
+ NITIO_G3_DMA_CFG,
+ NITIO_G0_DMA_STATUS,
+ NITIO_G1_DMA_STATUS,
+ NITIO_G2_DMA_STATUS,
+ NITIO_G3_DMA_STATUS,
+ NITIO_G0_ABZ,
+ NITIO_G1_ABZ,
+ NITIO_G0_INT_ACK,
+ NITIO_G1_INT_ACK,
+ NITIO_G2_INT_ACK,
+ NITIO_G3_INT_ACK,
+ NITIO_G0_STATUS,
+ NITIO_G1_STATUS,
+ NITIO_G2_STATUS,
+ NITIO_G3_STATUS,
+ NITIO_G0_INT_ENA,
+ NITIO_G1_INT_ENA,
+ NITIO_G2_INT_ENA,
+ NITIO_G3_INT_ENA,
+ NITIO_NUM_REGS,
};
enum ni_gpct_variant {
@@ -122,48 +122,35 @@ struct ni_gpct_device {
enum ni_gpct_variant variant;
struct ni_gpct *counters;
unsigned num_counters;
- unsigned regs[NITIO_Num_Registers];
+ unsigned regs[NITIO_NUM_REGS];
spinlock_t regs_lock;
};
-extern struct ni_gpct_device *ni_gpct_device_construct(struct comedi_device
- *dev,
- void (*write_register)
- (struct ni_gpct *
- counter, unsigned bits,
- enum ni_gpct_register
- reg),
- unsigned (*read_register)
- (struct ni_gpct *
- counter,
- enum ni_gpct_register
- reg),
- enum ni_gpct_variant
- variant,
- unsigned num_counters);
-extern void ni_gpct_device_destroy(struct ni_gpct_device *counter_dev);
-extern void ni_tio_init_counter(struct ni_gpct *counter);
-extern int ni_tio_rinsn(struct ni_gpct *counter,
- struct comedi_insn *insn, unsigned int *data);
-extern int ni_tio_insn_config(struct ni_gpct *counter,
- struct comedi_insn *insn, unsigned int *data);
-extern int ni_tio_winsn(struct ni_gpct *counter,
- struct comedi_insn *insn, unsigned int *data);
-extern int ni_tio_cmd(struct ni_gpct *counter, struct comedi_async *async);
-extern int ni_tio_cmdtest(struct ni_gpct *counter, struct comedi_cmd *cmd);
-extern int ni_tio_cancel(struct ni_gpct *counter);
-extern void ni_tio_handle_interrupt(struct ni_gpct *counter,
- struct comedi_subdevice *s);
-extern void ni_tio_set_mite_channel(struct ni_gpct *counter,
- struct mite_channel *mite_chan);
-extern void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
- int *gate_error, int *tc_error,
- int *perm_stale_data,
- int *stale_data);
-
-static inline struct ni_gpct *subdev_to_counter(struct comedi_subdevice *s)
-{
- return s->private;
-}
+struct ni_gpct_device *
+ni_gpct_device_construct(struct comedi_device *,
+ void (*write_register)(struct ni_gpct *,
+ unsigned bits,
+ enum ni_gpct_register),
+ unsigned (*read_register)(struct ni_gpct *,
+ enum ni_gpct_register),
+ enum ni_gpct_variant,
+ unsigned num_counters);
+void ni_gpct_device_destroy(struct ni_gpct_device *);
+void ni_tio_init_counter(struct ni_gpct *);
+int ni_tio_insn_read(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_insn *, unsigned int *data);
+int ni_tio_insn_config(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_insn *, unsigned int *data);
+int ni_tio_insn_write(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_insn *, unsigned int *data);
+int ni_tio_cmd(struct comedi_device *, struct comedi_subdevice *);
+int ni_tio_cmdtest(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_cmd *);
+int ni_tio_cancel(struct ni_gpct *);
+void ni_tio_handle_interrupt(struct ni_gpct *, struct comedi_subdevice *);
+void ni_tio_set_mite_channel(struct ni_gpct *, struct mite_channel *);
+void ni_tio_acknowledge_and_confirm(struct ni_gpct *,
+ int *gate_error, int *tc_error,
+ int *perm_stale_data, int *stale_data);
#endif /* _COMEDI_NI_TIO_H */
diff --git a/drivers/staging/comedi/drivers/ni_tio_internal.h b/drivers/staging/comedi/drivers/ni_tio_internal.h
index b009876754a8..15b81b8fc5c4 100644
--- a/drivers/staging/comedi/drivers/ni_tio_internal.h
+++ b/drivers/staging/comedi/drivers/ni_tio_internal.h
@@ -21,409 +21,26 @@
#include "ni_tio.h"
-static inline enum ni_gpct_register NITIO_Gi_Autoincrement_Reg(unsigned
- counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_Autoincrement_Reg;
- break;
- case 1:
- return NITIO_G1_Autoincrement_Reg;
- break;
- case 2:
- return NITIO_G2_Autoincrement_Reg;
- break;
- case 3:
- return NITIO_G3_Autoincrement_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_Command_Reg(unsigned counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_Command_Reg;
- break;
- case 1:
- return NITIO_G1_Command_Reg;
- break;
- case 2:
- return NITIO_G2_Command_Reg;
- break;
- case 3:
- return NITIO_G3_Command_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_Counting_Mode_Reg(unsigned
- counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_Counting_Mode_Reg;
- break;
- case 1:
- return NITIO_G1_Counting_Mode_Reg;
- break;
- case 2:
- return NITIO_G2_Counting_Mode_Reg;
- break;
- case 3:
- return NITIO_G3_Counting_Mode_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_Input_Select_Reg(unsigned
- counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_Input_Select_Reg;
- break;
- case 1:
- return NITIO_G1_Input_Select_Reg;
- break;
- case 2:
- return NITIO_G2_Input_Select_Reg;
- break;
- case 3:
- return NITIO_G3_Input_Select_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gxx_Joint_Reset_Reg(unsigned
- counter_index)
-{
- switch (counter_index) {
- case 0:
- case 1:
- return NITIO_G01_Joint_Reset_Reg;
- break;
- case 2:
- case 3:
- return NITIO_G23_Joint_Reset_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gxx_Joint_Status1_Reg(unsigned
- counter_index)
-{
- switch (counter_index) {
- case 0:
- case 1:
- return NITIO_G01_Joint_Status1_Reg;
- break;
- case 2:
- case 3:
- return NITIO_G23_Joint_Status1_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gxx_Joint_Status2_Reg(unsigned
- counter_index)
-{
- switch (counter_index) {
- case 0:
- case 1:
- return NITIO_G01_Joint_Status2_Reg;
- break;
- case 2:
- case 3:
- return NITIO_G23_Joint_Status2_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gxx_Status_Reg(unsigned counter_index)
-{
- switch (counter_index) {
- case 0:
- case 1:
- return NITIO_G01_Status_Reg;
- break;
- case 2:
- case 3:
- return NITIO_G23_Status_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_LoadA_Reg(unsigned counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_LoadA_Reg;
- break;
- case 1:
- return NITIO_G1_LoadA_Reg;
- break;
- case 2:
- return NITIO_G2_LoadA_Reg;
- break;
- case 3:
- return NITIO_G3_LoadA_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_LoadB_Reg(unsigned counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_LoadB_Reg;
- break;
- case 1:
- return NITIO_G1_LoadB_Reg;
- break;
- case 2:
- return NITIO_G2_LoadB_Reg;
- break;
- case 3:
- return NITIO_G3_LoadB_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_Mode_Reg(unsigned counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_Mode_Reg;
- break;
- case 1:
- return NITIO_G1_Mode_Reg;
- break;
- case 2:
- return NITIO_G2_Mode_Reg;
- break;
- case 3:
- return NITIO_G3_Mode_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_SW_Save_Reg(int counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_SW_Save_Reg;
- break;
- case 1:
- return NITIO_G1_SW_Save_Reg;
- break;
- case 2:
- return NITIO_G2_SW_Save_Reg;
- break;
- case 3:
- return NITIO_G3_SW_Save_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_Second_Gate_Reg(int counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_Second_Gate_Reg;
- break;
- case 1:
- return NITIO_G1_Second_Gate_Reg;
- break;
- case 2:
- return NITIO_G2_Second_Gate_Reg;
- break;
- case 3:
- return NITIO_G3_Second_Gate_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_DMA_Config_Reg(int counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_DMA_Config_Reg;
- break;
- case 1:
- return NITIO_G1_DMA_Config_Reg;
- break;
- case 2:
- return NITIO_G2_DMA_Config_Reg;
- break;
- case 3:
- return NITIO_G3_DMA_Config_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_DMA_Status_Reg(int counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_DMA_Status_Reg;
- break;
- case 1:
- return NITIO_G1_DMA_Status_Reg;
- break;
- case 2:
- return NITIO_G2_DMA_Status_Reg;
- break;
- case 3:
- return NITIO_G3_DMA_Status_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_ABZ_Reg(int counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_ABZ_Reg;
- break;
- case 1:
- return NITIO_G1_ABZ_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_Interrupt_Acknowledge_Reg(
- int counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_Interrupt_Acknowledge_Reg;
- break;
- case 1:
- return NITIO_G1_Interrupt_Acknowledge_Reg;
- break;
- case 2:
- return NITIO_G2_Interrupt_Acknowledge_Reg;
- break;
- case 3:
- return NITIO_G3_Interrupt_Acknowledge_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_Status_Reg(int counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_Status_Reg;
- break;
- case 1:
- return NITIO_G1_Status_Reg;
- break;
- case 2:
- return NITIO_G2_Status_Reg;
- break;
- case 3:
- return NITIO_G3_Status_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline enum ni_gpct_register NITIO_Gi_Interrupt_Enable_Reg(
- int counter_index)
-{
- switch (counter_index) {
- case 0:
- return NITIO_G0_Interrupt_Enable_Reg;
- break;
- case 1:
- return NITIO_G1_Interrupt_Enable_Reg;
- break;
- case 2:
- return NITIO_G2_Interrupt_Enable_Reg;
- break;
- case 3:
- return NITIO_G3_Interrupt_Enable_Reg;
- break;
- default:
- BUG();
- break;
- }
- return 0;
-}
+#define NITIO_AUTO_INC_REG(x) (NITIO_G0_AUTO_INC + (x))
+#define NITIO_CMD_REG(x) (NITIO_G0_CMD + (x))
+#define NITIO_HW_SAVE_REG(x) (NITIO_G0_HW_SAVE + (x))
+#define NITIO_SW_SAVE_REG(x) (NITIO_G0_SW_SAVE + (x))
+#define NITIO_MODE_REG(x) (NITIO_G0_MODE + (x))
+#define NITIO_LOADA_REG(x) (NITIO_G0_LOADA + (x))
+#define NITIO_LOADB_REG(x) (NITIO_G0_LOADB + (x))
+#define NITIO_INPUT_SEL_REG(x) (NITIO_G0_INPUT_SEL + (x))
+#define NITIO_CNT_MODE_REG(x) (NITIO_G0_CNT_MODE + (x))
+#define NITIO_GATE2_REG(x) (NITIO_G0_GATE2 + (x))
+#define NITIO_SHARED_STATUS_REG(x) (NITIO_G01_STATUS + ((x) / 2))
+#define NITIO_RESET_REG(x) (NITIO_G01_RESET + ((x) / 2))
+#define NITIO_STATUS1_REG(x) (NITIO_G01_STATUS1 + ((x) / 2))
+#define NITIO_STATUS2_REG(x) (NITIO_G01_STATUS2 + ((x) / 2))
+#define NITIO_DMA_CFG_REG(x) (NITIO_G0_DMA_CFG + (x))
+#define NITIO_DMA_STATUS_REG(x) (NITIO_G0_DMA_STATUS + (x))
+#define NITIO_ABZ_REG(x) (NITIO_G0_ABZ + (x))
+#define NITIO_INT_ACK_REG(x) (NITIO_G0_INT_ACK + (x))
+#define NITIO_STATUS_REG(x) (NITIO_G0_STATUS + (x))
+#define NITIO_INT_ENA_REG(x) (NITIO_G0_INT_ENA + (x))
enum Gi_Auto_Increment_Reg_Bits {
Gi_Auto_Increment_Mask = 0xff
@@ -699,14 +316,14 @@ static inline unsigned Gi_Gate_Interrupt_Enable_Bit(unsigned counter_index)
static inline void write_register(struct ni_gpct *counter, unsigned bits,
enum ni_gpct_register reg)
{
- BUG_ON(reg >= NITIO_Num_Registers);
+ BUG_ON(reg >= NITIO_NUM_REGS);
counter->counter_dev->write_register(counter, bits, reg);
}
static inline unsigned read_register(struct ni_gpct *counter,
enum ni_gpct_register reg)
{
- BUG_ON(reg >= NITIO_Num_Registers);
+ BUG_ON(reg >= NITIO_NUM_REGS);
return counter->counter_dev->read_register(counter, reg);
}
@@ -738,7 +355,7 @@ static inline void ni_tio_set_bits_transient(struct ni_gpct *counter,
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned long flags;
- BUG_ON(register_index >= NITIO_Num_Registers);
+ BUG_ON(register_index >= NITIO_NUM_REGS);
spin_lock_irqsave(&counter_dev->regs_lock, flags);
counter_dev->regs[register_index] &= ~bit_mask;
counter_dev->regs[register_index] |= (bit_values & bit_mask);
@@ -773,7 +390,7 @@ static inline unsigned ni_tio_get_soft_copy(const struct ni_gpct *counter,
unsigned long flags;
unsigned value;
- BUG_ON(register_index >= NITIO_Num_Registers);
+ BUG_ON(register_index >= NITIO_NUM_REGS);
spin_lock_irqsave(&counter_dev->regs_lock, flags);
value = counter_dev->regs[register_index];
spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index 45691efefd05..7d64f8892f08 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -49,14 +49,11 @@ TODO:
#include "ni_tio_internal.h"
#include "mite.h"
-MODULE_AUTHOR("Comedi <comedi@comedi.org>");
-MODULE_DESCRIPTION("Comedi command support for NI general-purpose counters");
-MODULE_LICENSE("GPL");
-
static void ni_tio_configure_dma(struct ni_gpct *counter, short enable,
short read_not_write)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned cidx = counter->counter_index;
unsigned input_select_bits = 0;
if (enable) {
@@ -65,8 +62,7 @@ static void ni_tio_configure_dma(struct ni_gpct *counter, short enable,
else
input_select_bits |= Gi_Write_Acknowledges_Irq;
}
- ni_tio_set_bits(counter,
- NITIO_Gi_Input_Select_Reg(counter->counter_index),
+ ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
Gi_Read_Acknowledges_Irq | Gi_Write_Acknowledges_Irq,
input_select_bits);
switch (counter_dev->variant) {
@@ -83,9 +79,7 @@ static void ni_tio_configure_dma(struct ni_gpct *counter, short enable,
}
if (read_not_write == 0)
gi_dma_config_bits |= Gi_DMA_Write_Bit;
- ni_tio_set_bits(counter,
- NITIO_Gi_DMA_Config_Reg(counter->
- counter_index),
+ ni_tio_set_bits(counter, NITIO_DMA_CFG_REG(cidx),
Gi_DMA_Enable_Bit | Gi_DMA_Int_Bit |
Gi_DMA_Write_Bit, gi_dma_config_bits);
}
@@ -122,6 +116,7 @@ static int ni_tio_input_inttrig(struct comedi_device *dev,
static int ni_tio_input_cmd(struct ni_gpct *counter, struct comedi_async *async)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned cidx = counter->counter_index;
struct comedi_cmd *cmd = &async->cmd;
int retval = 0;
@@ -140,8 +135,7 @@ static int ni_tio_input_cmd(struct ni_gpct *counter, struct comedi_async *async)
BUG();
break;
}
- ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index),
- Gi_Save_Trace_Bit, 0);
+ ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), Gi_Save_Trace_Bit, 0);
ni_tio_configure_dma(counter, 1, 1);
switch (cmd->start_src) {
case TRIG_NOW:
@@ -185,6 +179,7 @@ static int ni_tio_output_cmd(struct ni_gpct *counter,
static int ni_tio_cmd_setup(struct ni_gpct *counter, struct comedi_async *async)
{
struct comedi_cmd *cmd = &async->cmd;
+ unsigned cidx = counter->counter_index;
int set_gate_source = 0;
unsigned gate_source;
int retval = 0;
@@ -199,19 +194,17 @@ static int ni_tio_cmd_setup(struct ni_gpct *counter, struct comedi_async *async)
if (set_gate_source)
retval = ni_tio_set_gate_src(counter, 0, gate_source);
if (cmd->flags & TRIG_WAKE_EOS) {
- ni_tio_set_bits(counter,
- NITIO_Gi_Interrupt_Enable_Reg(counter->
- counter_index),
- Gi_Gate_Interrupt_Enable_Bit(counter->
- counter_index),
- Gi_Gate_Interrupt_Enable_Bit(counter->
- counter_index));
+ ni_tio_set_bits(counter, NITIO_INT_ENA_REG(cidx),
+ Gi_Gate_Interrupt_Enable_Bit(cidx),
+ Gi_Gate_Interrupt_Enable_Bit(cidx));
}
return retval;
}
-int ni_tio_cmd(struct ni_gpct *counter, struct comedi_async *async)
+int ni_tio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
+ struct ni_gpct *counter = s->private;
+ struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
int retval = 0;
unsigned long flags;
@@ -237,8 +230,11 @@ int ni_tio_cmd(struct ni_gpct *counter, struct comedi_async *async)
}
EXPORT_SYMBOL_GPL(ni_tio_cmd);
-int ni_tio_cmdtest(struct ni_gpct *counter, struct comedi_cmd *cmd)
+int ni_tio_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
+ struct ni_gpct *counter = s->private;
int err = 0;
unsigned int sources;
@@ -301,6 +297,7 @@ EXPORT_SYMBOL_GPL(ni_tio_cmdtest);
int ni_tio_cancel(struct ni_gpct *counter)
{
+ unsigned cidx = counter->counter_index;
unsigned long flags;
ni_tio_arm(counter, 0, 0);
@@ -310,10 +307,8 @@ int ni_tio_cancel(struct ni_gpct *counter)
spin_unlock_irqrestore(&counter->lock, flags);
ni_tio_configure_dma(counter, 0, 0);
- ni_tio_set_bits(counter,
- NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index),
- Gi_Gate_Interrupt_Enable_Bit(counter->counter_index),
- 0x0);
+ ni_tio_set_bits(counter, NITIO_INT_ENA_REG(cidx),
+ Gi_Gate_Interrupt_Enable_Bit(cidx), 0x0);
return 0;
}
EXPORT_SYMBOL_GPL(ni_tio_cancel);
@@ -353,14 +348,11 @@ void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, int *gate_error,
int *tc_error, int *perm_stale_data,
int *stale_data)
{
+ unsigned cidx = counter->counter_index;
const unsigned short gxx_status = read_register(counter,
- NITIO_Gxx_Status_Reg
- (counter->
- counter_index));
+ NITIO_SHARED_STATUS_REG(cidx));
const unsigned short gi_status = read_register(counter,
- NITIO_Gi_Status_Reg
- (counter->
- counter_index));
+ NITIO_STATUS_REG(cidx));
unsigned ack = 0;
if (gate_error)
@@ -372,8 +364,8 @@ void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, int *gate_error,
if (stale_data)
*stale_data = 0;
- if (gxx_status & Gi_Gate_Error_Bit(counter->counter_index)) {
- ack |= Gi_Gate_Error_Confirm_Bit(counter->counter_index);
+ if (gxx_status & Gi_Gate_Error_Bit(cidx)) {
+ ack |= Gi_Gate_Error_Confirm_Bit(cidx);
if (gate_error) {
/*660x don't support automatic acknowledgement
of gate interrupt via dma read/write
@@ -384,8 +376,8 @@ void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, int *gate_error,
}
}
}
- if (gxx_status & Gi_TC_Error_Bit(counter->counter_index)) {
- ack |= Gi_TC_Error_Confirm_Bit(counter->counter_index);
+ if (gxx_status & Gi_TC_Error_Bit(cidx)) {
+ ack |= Gi_TC_Error_Confirm_Bit(cidx);
if (tc_error)
*tc_error = 1;
}
@@ -396,21 +388,15 @@ void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, int *gate_error,
ack |= Gi_Gate_Interrupt_Ack_Bit;
}
if (ack)
- write_register(counter, ack,
- NITIO_Gi_Interrupt_Acknowledge_Reg
- (counter->counter_index));
- if (ni_tio_get_soft_copy
- (counter,
- NITIO_Gi_Mode_Reg(counter->counter_index)) &
+ write_register(counter, ack, NITIO_INT_ACK_REG(cidx));
+ if (ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx)) &
Gi_Loading_On_Gate_Bit) {
- if (gxx_status & Gi_Stale_Data_Bit(counter->counter_index)) {
+ if (gxx_status & Gi_Stale_Data_Bit(cidx)) {
if (stale_data)
*stale_data = 1;
}
- if (read_register(counter,
- NITIO_Gxx_Joint_Status2_Reg
- (counter->counter_index)) &
- Gi_Permanent_Stale_Bit(counter->counter_index)) {
+ if (read_register(counter, NITIO_STATUS2_REG(cidx)) &
+ Gi_Permanent_Stale_Bit(cidx)) {
dev_info(counter->counter_dev->dev->class_dev,
"%s: Gi_Permanent_Stale_Data detected.\n",
__func__);
@@ -424,6 +410,7 @@ EXPORT_SYMBOL_GPL(ni_tio_acknowledge_and_confirm);
void ni_tio_handle_interrupt(struct ni_gpct *counter,
struct comedi_subdevice *s)
{
+ unsigned cidx = counter->counter_index;
unsigned gpct_mite_status;
unsigned long flags;
int gate_error;
@@ -442,9 +429,8 @@ void ni_tio_handle_interrupt(struct ni_gpct *counter,
switch (counter->counter_dev->variant) {
case ni_gpct_variant_m_series:
case ni_gpct_variant_660x:
- if (read_register(counter,
- NITIO_Gi_DMA_Status_Reg
- (counter->counter_index)) & Gi_DRQ_Error_Bit) {
+ if (read_register(counter, NITIO_DMA_STATUS_REG(cidx)) &
+ Gi_DRQ_Error_Bit) {
dev_notice(counter->counter_dev->dev->class_dev,
"%s: Gi_DRQ_Error detected.\n", __func__);
s->async->events |= COMEDI_CB_OVERFLOW;
@@ -484,11 +470,13 @@ static int __init ni_tiocmd_init_module(void)
{
return 0;
}
-
module_init(ni_tiocmd_init_module);
static void __exit ni_tiocmd_cleanup_module(void)
{
}
-
module_exit(ni_tiocmd_cleanup_module);
+
+MODULE_AUTHOR("Comedi <comedi@comedi.org>");
+MODULE_DESCRIPTION("Comedi command support for NI general-purpose counters");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl812.c b/drivers/staging/comedi/drivers/pcl812.c
index 03315abcca19..53613b385f35 100644
--- a/drivers/staging/comedi/drivers/pcl812.c
+++ b/drivers/staging/comedi/drivers/pcl812.c
@@ -162,156 +162,172 @@
#define MAX_CHANLIST_LEN 256 /* length of scan list */
-static const struct comedi_lrange range_pcl812pg_ai = { 5, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- BIP_RANGE(0.3125),
- }
+static const struct comedi_lrange range_pcl812pg_ai = {
+ 5, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ BIP_RANGE(0.3125)
+ }
};
-static const struct comedi_lrange range_pcl812pg2_ai = { 5, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- }
+static const struct comedi_lrange range_pcl812pg2_ai = {
+ 5, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625)
+ }
};
-static const struct comedi_lrange range812_bipolar1_25 = { 1, {
- BIP_RANGE(1.25),
- }
+static const struct comedi_lrange range812_bipolar1_25 = {
+ 1, {
+ BIP_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range812_bipolar0_625 = { 1, {
- BIP_RANGE
- (0.625),
- }
+static const struct comedi_lrange range812_bipolar0_625 = {
+ 1, {
+ BIP_RANGE(0.625)
+ }
};
-static const struct comedi_lrange range812_bipolar0_3125 = { 1, {
- BIP_RANGE
- (0.3125),
- }
+static const struct comedi_lrange range812_bipolar0_3125 = {
+ 1, {
+ BIP_RANGE(0.3125)
+ }
};
-static const struct comedi_lrange range_pcl813b_ai = { 4, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- }
+static const struct comedi_lrange range_pcl813b_ai = {
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625)
+ }
};
-static const struct comedi_lrange range_pcl813b2_ai = { 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- }
+static const struct comedi_lrange range_pcl813b2_ai = {
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range_iso813_1_ai = { 5, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- BIP_RANGE(0.3125),
- }
+static const struct comedi_lrange range_iso813_1_ai = {
+ 5, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ BIP_RANGE(0.3125)
+ }
};
-static const struct comedi_lrange range_iso813_1_2_ai = { 5, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- UNI_RANGE(0.625),
- }
+static const struct comedi_lrange range_iso813_1_2_ai = {
+ 5, {
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25),
+ UNI_RANGE(0.625)
+ }
};
-static const struct comedi_lrange range_iso813_2_ai = { 4, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- }
+static const struct comedi_lrange range_iso813_2_ai = {
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625)
+ }
};
-static const struct comedi_lrange range_iso813_2_2_ai = { 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- }
+static const struct comedi_lrange range_iso813_2_2_ai = {
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range_acl8113_1_ai = { 4, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- }
+static const struct comedi_lrange range_acl8113_1_ai = {
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625)
+ }
};
-static const struct comedi_lrange range_acl8113_1_2_ai = { 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- }
+static const struct comedi_lrange range_acl8113_1_2_ai = {
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range_acl8113_2_ai = { 3, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- }
+static const struct comedi_lrange range_acl8113_2_ai = {
+ 3, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range_acl8113_2_2_ai = { 3, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- }
+static const struct comedi_lrange range_acl8113_2_2_ai = {
+ 3, {
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5)
+ }
};
-static const struct comedi_lrange range_acl8112dg_ai = { 9, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- BIP_RANGE(10),
- }
+static const struct comedi_lrange range_acl8112dg_ai = {
+ 9, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25),
+ BIP_RANGE(10)
+ }
};
-static const struct comedi_lrange range_acl8112hg_ai = { 12, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.005),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01),
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.01),
- }
+static const struct comedi_lrange range_acl8112hg_ai = {
+ 12, {
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.005),
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.01),
+ BIP_RANGE(10),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.01)
+ }
};
-static const struct comedi_lrange range_a821pgh_ai = { 4, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.005),
- }
+static const struct comedi_lrange range_a821pgh_ai = {
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.005)
+ }
};
struct pcl812_board {
@@ -404,9 +420,7 @@ static int pcl812_ai_insn_read(struct comedi_device *dev,
goto conv_finish;
udelay(1);
}
- printk
- ("comedi%d: pcl812: (%s at 0x%lx) A/D insn read timeout\n",
- dev->minor, dev->board_name, dev->iobase);
+ dev_dbg(dev->class_dev, "A/D insn read timeout\n");
outb(devpriv->mode_reg_int | 0, dev->iobase + PCL812_MODE);
return -ETIME;
@@ -441,9 +455,7 @@ static int acl8216_ai_insn_read(struct comedi_device *dev,
goto conv_finish;
udelay(1);
}
- printk
- ("comedi%d: pcl812: (%s at 0x%lx) A/D insn read timeout\n",
- dev->minor, dev->board_name, dev->iobase);
+ dev_dbg(dev->class_dev, "A/D insn read timeout\n");
outb(0, dev->iobase + PCL812_MODE);
return -ETIME;
@@ -759,7 +771,7 @@ static irqreturn_t interrupt_pcl812_ai_int(int irq, void *d)
unsigned int mask, timeout;
struct comedi_device *dev = d;
struct pcl812_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned int next_chan;
s->async->events = 0;
@@ -786,10 +798,7 @@ static irqreturn_t interrupt_pcl812_ai_int(int irq, void *d)
}
if (err) {
- printk
- ("comedi%d: pcl812: (%s at 0x%lx) "
- "A/D cmd IRQ without DRDY!\n",
- dev->minor, dev->board_name, dev->iobase);
+ dev_dbg(dev->class_dev, "A/D cmd IRQ without DRDY!\n");
pcl812_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
@@ -865,7 +874,7 @@ static irqreturn_t interrupt_pcl812_ai_dma(int irq, void *d)
{
struct comedi_device *dev = d;
struct pcl812_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned long dma_flags;
int len, bufptr;
unsigned short *ptr;
@@ -1095,7 +1104,6 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
const struct pcl812_board *board = comedi_board(dev);
struct pcl812_private *devpriv;
int ret, subdev;
- unsigned int irq;
unsigned int dma;
unsigned long pages;
struct comedi_subdevice *s;
@@ -1109,31 +1117,13 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (!devpriv)
return -ENOMEM;
- irq = 0;
- if (board->IRQbits != 0) { /* board support IRQ */
- irq = it->options[1];
- if (irq) { /* we want to use IRQ */
- if (((1 << irq) & board->IRQbits) == 0) {
- printk
- (", IRQ %u is out of allowed range, "
- "DISABLING IT", irq);
- irq = 0; /* Bad IRQ */
- } else {
- if (request_irq(irq, interrupt_pcl812, 0,
- dev->board_name, dev)) {
- printk
- (", unable to allocate IRQ %u, "
- "DISABLING IT", irq);
- irq = 0; /* Can't use IRQ */
- } else {
- printk(KERN_INFO ", irq=%u", irq);
- }
- }
- }
+ if ((1 << it->options[1]) & board->IRQbits) {
+ ret = request_irq(it->options[1], interrupt_pcl812, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
}
- dev->irq = irq;
-
dma = 0;
devpriv->dma = dma;
if (!dev->irq)
@@ -1141,21 +1131,22 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (board->DMAbits != 0) { /* board support DMA */
dma = it->options[2];
if (((1 << dma) & board->DMAbits) == 0) {
- printk(", DMA is out of allowed range, FAIL!\n");
+ dev_err(dev->class_dev,
+ "DMA is out of allowed range, FAIL!\n");
return -EINVAL; /* Bad DMA */
}
ret = request_dma(dma, dev->board_name);
if (ret) {
- printk(KERN_ERR ", unable to allocate DMA %u, FAIL!\n",
- dma);
+ dev_err(dev->class_dev,
+ "unable to allocate DMA %u, FAIL!\n", dma);
return -EBUSY; /* DMA isn't free */
}
devpriv->dma = dma;
- printk(KERN_INFO ", dma=%u", dma);
pages = 1; /* we want 8KB */
devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[0]) {
- printk(", unable to allocate DMA buffer, FAIL!\n");
+ dev_err(dev->class_dev,
+ "unable to allocate DMA buffer, FAIL!\n");
/*
* maybe experiment with try_to_free_pages()
* will help ....
@@ -1167,7 +1158,8 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv->hwdmasize[0] = PAGE_SIZE * (1 << pages);
devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[1]) {
- printk(KERN_ERR ", unable to allocate DMA buffer, FAIL!\n");
+ dev_err(dev->class_dev,
+ "unable to allocate DMA buffer, FAIL!\n");
return -EBUSY;
}
devpriv->dmapages[1] = pages;
@@ -1225,7 +1217,6 @@ no_dma:
break;
}
s->maxdata = board->ai_maxdata;
- s->len_chanlist = MAX_CHANLIST_LEN;
s->range_table = board->rangelist_ai;
if (board->board_type == boardACL8216)
s->insn_read = acl8216_ai_insn_read;
@@ -1233,13 +1224,14 @@ no_dma:
s->insn_read = pcl812_ai_insn_read;
devpriv->use_MPC = board->haveMPC508;
- s->cancel = pcl812_ai_cancel;
if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = MAX_CHANLIST_LEN;
s->do_cmdtest = pcl812_ai_cmdtest;
s->do_cmd = pcl812_ai_cmd;
s->poll = pcl812_ai_poll;
+ s->cancel = pcl812_ai_cancel;
}
switch (board->board_type) {
case boardPCL812PG:
@@ -1269,10 +1261,6 @@ no_dma:
default:
s->range_table = &range_bipolar10;
break;
- printk
- (", incorrect range number %d, changing "
- "to 0 (+/-10V)", it->options[4]);
- break;
}
break;
break;
@@ -1299,10 +1287,6 @@ no_dma:
default:
s->range_table = &range_iso813_1_ai;
break;
- printk
- (", incorrect range number %d, "
- "changing to 0 ", it->options[1]);
- break;
}
break;
case boardACL8113:
@@ -1324,10 +1308,6 @@ no_dma:
default:
s->range_table = &range_acl8113_1_ai;
break;
- printk
- (", incorrect range number %d, "
- "changing to 0 ", it->options[1]);
- break;
}
break;
}
@@ -1341,7 +1321,6 @@ no_dma:
s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
s->n_chan = board->n_aochan;
s->maxdata = 0xfff;
- s->len_chanlist = 1;
s->range_table = board->rangelist_ao;
s->insn_read = pcl812_ao_insn_read;
s->insn_write = pcl812_ao_insn_write;
@@ -1370,7 +1349,6 @@ no_dma:
s->subdev_flags = SDF_READABLE;
s->n_chan = board->n_dichan;
s->maxdata = 1;
- s->len_chanlist = board->n_dichan;
s->range_table = &range_digital;
s->insn_bits = pcl812_di_insn_bits;
subdev++;
@@ -1383,7 +1361,6 @@ no_dma:
s->subdev_flags = SDF_WRITABLE;
s->n_chan = board->n_dochan;
s->maxdata = 1;
- s->len_chanlist = board->n_dochan;
s->range_table = &range_digital;
s->insn_bits = pcl812_do_insn_bits;
subdev++;
@@ -1402,7 +1379,7 @@ no_dma:
break;
case boardA821:
devpriv->max_812_ai_mode0_rangewait = 1;
- devpriv->mode_reg_int = (irq << 4) & 0xf0;
+ devpriv->mode_reg_int = (dev->irq << 4) & 0xf0;
break;
case boardPCL813B:
case boardPCL813:
@@ -1413,7 +1390,6 @@ no_dma:
break;
}
- printk(KERN_INFO "\n");
devpriv->valid = 1;
pcl812_reset(dev);
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index ab9d2bd26a20..e9d470459933 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -44,14 +44,10 @@ Configuration Options:
#include "comedi_fc.h"
#include "8253.h"
-#define DEBUG(x) x
-
/* boards constants */
/* IO space len */
#define PCLx1x_RANGE 16
-/* #define outb(x,y) printk("OUTB(%x, 200+%d)\n", x,y-0x200); outb(x,y) */
-
/* INTEL 8254 counters */
#define PCL816_CTR0 4
#define PCL816_CTR1 5
@@ -85,16 +81,17 @@ Configuration Options:
#define MAGIC_DMA_WORD 0x5a5a
-static const struct comedi_lrange range_pcl816 = { 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- }
+static const struct comedi_lrange range_pcl816 = {
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
struct pcl816_board {
@@ -132,7 +129,6 @@ struct pcl816_private {
unsigned int ai_scans; /* len of scanlist */
unsigned char ai_neverending; /* if=1, then we do neverending record (you must use cancel()) */
- int irq_free; /* 1=have allocated IRQ */
int irq_blocked; /* 1=IRQ now uses any subdev */
int irq_was_now_closed; /* when IRQ finish, there's stored int816_mode for last interrupt */
int int816_mode; /* who now uses IRQ - 1=AI1 int, 2=AI1 dma, 3=AI3 int, 4AI3 dma */
@@ -143,7 +139,6 @@ struct pcl816_private {
unsigned int ai_act_chanlist_pos; /* actual position in MUX list */
unsigned int ai_n_chan; /* how many channels per scan */
unsigned int ai_poll_ptr; /* how many sampes transfer poll */
- struct comedi_subdevice *sub_ai; /* ptr to AI subdevice */
};
/*
@@ -176,7 +171,6 @@ static int pcl816_ai_insn_read(struct comedi_device *dev,
int n;
int timeout;
- DPRINTK("mode 0 analog input\n");
/* software trigger, DMA and INT off */
outb(0, dev->iobase + PCL816_CONTROL);
/* clear INT (conversion end) flag */
@@ -228,7 +222,7 @@ static irqreturn_t interrupt_pcl816_ai_mode13_int(int irq, void *d)
{
struct comedi_device *dev = d;
struct pcl816_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned char low, hi;
int timeout = 50; /* wait max 50us */
@@ -322,7 +316,7 @@ static irqreturn_t interrupt_pcl816_ai_mode13_dma(int irq, void *d)
{
struct comedi_device *dev = d;
struct pcl816_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
int len, bufptr, this_dma_buf;
unsigned long dma_flags;
unsigned short *ptr;
@@ -372,8 +366,6 @@ static irqreturn_t interrupt_pcl816(int irq, void *d)
struct comedi_device *dev = d;
struct pcl816_private *devpriv = dev->private;
- DPRINTK("<I>");
-
if (!dev->attached) {
comedi_error(dev, "premature interrupt");
return IRQ_HANDLED;
@@ -389,8 +381,7 @@ static irqreturn_t interrupt_pcl816(int irq, void *d)
}
outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
- if (!dev->irq || !devpriv->irq_free || !devpriv->irq_blocked ||
- !devpriv->int816_mode) {
+ if (!dev->irq || !devpriv->irq_blocked || !devpriv->int816_mode) {
if (devpriv->irq_was_now_closed) {
devpriv->irq_was_now_closed = 0;
/* comedi_error(dev,"last IRQ.."); */
@@ -405,22 +396,6 @@ static irqreturn_t interrupt_pcl816(int irq, void *d)
/*
==============================================================================
- COMMAND MODE
-*/
-static void pcl816_cmdtest_out(int e, struct comedi_cmd *cmd)
-{
- printk(KERN_INFO "pcl816 e=%d startsrc=%x scansrc=%x convsrc=%x\n", e,
- cmd->start_src, cmd->scan_begin_src, cmd->convert_src);
- printk(KERN_INFO "pcl816 e=%d startarg=%d scanarg=%d convarg=%d\n", e,
- cmd->start_arg, cmd->scan_begin_arg, cmd->convert_arg);
- printk(KERN_INFO "pcl816 e=%d stopsrc=%x scanend=%x\n", e,
- cmd->stop_src, cmd->scan_end_src);
- printk(KERN_INFO "pcl816 e=%d stoparg=%d scanendarg=%d chanlistlen=%d\n",
- e, cmd->stop_arg, cmd->scan_end_arg, cmd->chanlist_len);
-}
-
-/*
-==============================================================================
*/
static int pcl816_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
@@ -429,10 +404,6 @@ static int pcl816_ai_cmdtest(struct comedi_device *dev,
int err = 0;
int tmp, divisor1 = 0, divisor2 = 0;
- DEBUG(printk(KERN_INFO "pcl816 pcl812_ai_cmdtest\n");
- pcl816_cmdtest_out(-1, cmd);
- );
-
/* Step 1 : check if triggers are trivially valid */
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
@@ -566,15 +537,6 @@ static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_neverending = 1;
}
- /* don't we want wake up every scan? */
- if ((cmd->flags & TRIG_WAKE_EOS)) {
- printk(KERN_INFO
- "pl816: You wankt WAKE_EOS but I dont want handle it");
- /* devpriv->ai_eos=1; */
- /* if (devpriv->ai_n_chan==1) */
- /* devpriv->dma=0; // DMA is useless for this situation */
- }
-
if (devpriv->dma) {
bytes = devpriv->hwdmasize[0];
if (!devpriv->ai_neverending) {
@@ -630,7 +592,6 @@ static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
break;
}
- DPRINTK("pcl816 END: pcl812_ai_cmd()\n");
return 0;
}
@@ -685,8 +646,6 @@ static int pcl816_ai_cancel(struct comedi_device *dev,
{
struct pcl816_private *devpriv = dev->private;
-/* DEBUG(printk("pcl816_ai_cancel()\n");) */
-
if (devpriv->irq_blocked > 0) {
switch (devpriv->int816_mode) {
case INT_TYPE_AI1_DMA:
@@ -719,9 +678,7 @@ static int pcl816_ai_cancel(struct comedi_device *dev,
break;
}
}
-
- DEBUG(printk("comedi: pcl816_ai_cancel() successful\n");)
- return 0;
+ return 0;
}
/*
@@ -788,8 +745,8 @@ start_pacer(struct comedi_device *dev, int mode, unsigned int divisor1,
udelay(1);
if (mode == 1) {
- DPRINTK("mode %d, divisor1 %d, divisor2 %d\n", mode, divisor1,
- divisor2);
+ dev_dbg(dev->class_dev, "mode %d, divisor1 %d, divisor2 %d\n",
+ mode, divisor1, divisor2);
outb(divisor2 & 0xff, dev->iobase + PCL816_CTR2);
outb((divisor2 >> 8) & 0xff, dev->iobase + PCL816_CTR2);
outb(divisor1 & 0xff, dev->iobase + PCL816_CTR1);
@@ -823,11 +780,6 @@ check_channel_list(struct comedi_device *dev,
/* first channel is every time ok */
chansegment[0] = chanlist[0];
for (i = 1, seglen = 1; i < chanlen; i++, seglen++) {
- /* build part of chanlist */
- DEBUG(printk(KERN_INFO "%d. %d %d\n", i,
- CR_CHAN(chanlist[i]),
- CR_RANGE(chanlist[i]));)
-
/* we detect loop, this must by finish */
if (chanlist[0] == chanlist[i])
break;
@@ -835,12 +787,10 @@ check_channel_list(struct comedi_device *dev,
(CR_CHAN(chansegment[i - 1]) + 1) % chanlen;
if (nowmustbechan != CR_CHAN(chanlist[i])) {
/* channel list isn't continuous :-( */
- printk(KERN_WARNING
- "comedi%d: pcl816: channel list must "
- "be continuous! chanlist[%i]=%d but "
- "must be %d or %d!\n", dev->minor,
- i, CR_CHAN(chanlist[i]), nowmustbechan,
- CR_CHAN(chanlist[0]));
+ dev_dbg(dev->class_dev,
+ "channel list must be continuous! chanlist[%i]=%d but must be %d or %d!\n",
+ i, CR_CHAN(chanlist[i]), nowmustbechan,
+ CR_CHAN(chanlist[0]));
return 0;
}
/* well, this is next correct channel in list */
@@ -849,22 +799,15 @@ check_channel_list(struct comedi_device *dev,
/* check whole chanlist */
for (i = 0, segpos = 0; i < chanlen; i++) {
- DEBUG(printk("%d %d=%d %d\n",
- CR_CHAN(chansegment[i % seglen]),
- CR_RANGE(chansegment[i % seglen]),
- CR_CHAN(chanlist[i]),
- CR_RANGE(chanlist[i]));)
if (chanlist[i] != chansegment[i % seglen]) {
- printk(KERN_WARNING
- "comedi%d: pcl816: bad channel or range"
- " number! chanlist[%i]=%d,%d,%d and not"
- " %d,%d,%d!\n", dev->minor, i,
- CR_CHAN(chansegment[i]),
- CR_RANGE(chansegment[i]),
- CR_AREF(chansegment[i]),
- CR_CHAN(chanlist[i % seglen]),
- CR_RANGE(chanlist[i % seglen]),
- CR_AREF(chansegment[i % seglen]));
+ dev_dbg(dev->class_dev,
+ "bad channel or range number! chanlist[%i]=%d,%d,%d and not %d,%d,%d!\n",
+ i, CR_CHAN(chansegment[i]),
+ CR_RANGE(chansegment[i]),
+ CR_AREF(chansegment[i]),
+ CR_CHAN(chanlist[i % seglen]),
+ CR_RANGE(chanlist[i % seglen]),
+ CR_AREF(chansegment[i % seglen]));
return 0; /* chan/gain list is strange */
}
}
@@ -909,7 +852,7 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
const struct pcl816_board *board = comedi_board(dev);
struct pcl816_private *devpriv;
int ret;
- unsigned int irq, dma;
+ unsigned int dma;
unsigned long pages;
/* int i; */
struct comedi_subdevice *s;
@@ -919,7 +862,7 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return ret;
if (pcl816_check(dev->iobase)) {
- printk(KERN_ERR ", I cann't detect board. FAIL!\n");
+ dev_err(dev->class_dev, "I can't detect board. FAIL!\n");
return -EIO;
}
@@ -927,43 +870,20 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (!devpriv)
return -ENOMEM;
- /* grab our IRQ */
- irq = 0;
- if (board->IRQbits != 0) { /* board support IRQ */
- irq = it->options[1];
- if (irq) { /* we want to use IRQ */
- if (((1 << irq) & board->IRQbits) == 0) {
- printk
- (", IRQ %u is out of allowed range, "
- "DISABLING IT", irq);
- irq = 0; /* Bad IRQ */
- } else {
- if (request_irq(irq, interrupt_pcl816, 0,
- dev->board_name, dev)) {
- printk
- (", unable to allocate IRQ %u, "
- "DISABLING IT", irq);
- irq = 0; /* Can't use IRQ */
- } else {
- printk(KERN_INFO ", irq=%u", irq);
- }
- }
- }
+ if ((1 << it->options[1]) & board->IRQbits) {
+ ret = request_irq(it->options[1], interrupt_pcl816, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
}
- dev->irq = irq;
- if (irq) /* 1=we have allocated irq */
- devpriv->irq_free = 1;
- else
- devpriv->irq_free = 0;
-
devpriv->irq_blocked = 0; /* number of subdevice which use IRQ */
devpriv->int816_mode = 0; /* mode of irq */
/* grab our DMA */
dma = 0;
devpriv->dma = dma;
- if (!devpriv->irq_free)
+ if (!dev->irq)
goto no_dma; /* if we haven't IRQ, we can't use DMA */
if (board->DMAbits != 0) { /* board support DMA */
@@ -972,23 +892,24 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
goto no_dma; /* DMA disabled */
if (((1 << dma) & board->DMAbits) == 0) {
- printk(", DMA is out of allowed range, FAIL!\n");
+ dev_err(dev->class_dev,
+ "DMA is out of allowed range, FAIL!\n");
return -EINVAL; /* Bad DMA */
}
ret = request_dma(dma, dev->board_name);
if (ret) {
- printk(KERN_ERR
- ", unable to allocate DMA %u, FAIL!\n", dma);
+ dev_err(dev->class_dev,
+ "unable to allocate DMA %u, FAIL!\n", dma);
return -EBUSY; /* DMA isn't free */
}
devpriv->dma = dma;
- printk(KERN_INFO ", dma=%u", dma);
pages = 2; /* we need 16KB */
devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[0]) {
- printk(", unable to allocate DMA buffer, FAIL!\n");
+ dev_err(dev->class_dev,
+ "unable to allocate DMA buffer, FAIL!\n");
/*
* maybe experiment with try_to_free_pages()
* will help ....
@@ -998,13 +919,11 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv->dmapages[0] = pages;
devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]);
devpriv->hwdmasize[0] = (1 << pages) * PAGE_SIZE;
- /* printk("%d %d %ld, ",devpriv->dmapages[0],devpriv->hwdmasize[0],PAGE_SIZE); */
devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[1]) {
- printk(KERN_ERR
- ", unable to allocate DMA buffer, "
- "FAIL!\n");
+ dev_err(dev->class_dev,
+ "unable to allocate DMA buffer, FAIL!\n");
return -EBUSY;
}
devpriv->dmapages[1] = pages;
@@ -1029,20 +948,20 @@ no_dma:
s = &dev->subdevices[0];
if (board->n_aichan > 0) {
s->type = COMEDI_SUBD_AI;
- devpriv->sub_ai = s;
- dev->read_subdev = s;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
+ s->subdev_flags = SDF_CMD_READ | SDF_DIFF;
s->n_chan = board->n_aichan;
- s->subdev_flags |= SDF_DIFF;
- /* printk (", %dchans DIFF DAC - %d", s->n_chan, i); */
s->maxdata = board->ai_maxdata;
- s->len_chanlist = board->ai_chanlist;
s->range_table = board->ai_range_type;
- s->cancel = pcl816_ai_cancel;
- s->do_cmdtest = pcl816_ai_cmdtest;
- s->do_cmd = pcl816_ai_cmd;
- s->poll = pcl816_ai_poll;
s->insn_read = pcl816_ai_insn_read;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = board->ai_chanlist;
+ s->do_cmdtest = pcl816_ai_cmdtest;
+ s->do_cmd = pcl816_ai_cmd;
+ s->poll = pcl816_ai_poll;
+ s->cancel = pcl816_ai_cancel;
+ }
} else {
s->type = COMEDI_SUBD_UNUSED;
}
@@ -1075,8 +994,6 @@ case COMEDI_SUBD_DO:
pcl816_reset(dev);
- printk("\n");
-
return 0;
}
@@ -1085,7 +1002,7 @@ static void pcl816_detach(struct comedi_device *dev)
struct pcl816_private *devpriv = dev->private;
if (dev->private) {
- pcl816_ai_cancel(dev, devpriv->sub_ai);
+ pcl816_ai_cancel(dev, dev->read_subdev);
pcl816_reset(dev);
if (devpriv->dma)
free_dma(devpriv->dma);
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index 9e4d7e860509..fa1758ad49d5 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -188,56 +188,78 @@ A word or two about DMA. Driver support DMA operations at two ways:
#define MAGIC_DMA_WORD 0x5a5a
-static const struct comedi_lrange range_pcl818h_ai = { 9, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- BIP_RANGE(10),
- }
+static const struct comedi_lrange range_pcl818h_ai = {
+ 9, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25),
+ BIP_RANGE(10)
+ }
};
-static const struct comedi_lrange range_pcl818hg_ai = { 10, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.005),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01),
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.01),
- }
+static const struct comedi_lrange range_pcl818hg_ai = {
+ 10, {
+ BIP_RANGE(5),
+ BIP_RANGE(0.5),
+ BIP_RANGE(0.05),
+ BIP_RANGE(0.005),
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.01),
+ BIP_RANGE(10),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.01)
+ }
};
-static const struct comedi_lrange range_pcl818l_l_ai = { 4, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- }
+static const struct comedi_lrange range_pcl818l_l_ai = {
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625)
+ }
};
-static const struct comedi_lrange range_pcl818l_h_ai = { 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- }
+static const struct comedi_lrange range_pcl818l_h_ai = {
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25)
+ }
+};
+
+static const struct comedi_lrange range718_bipolar1 = {
+ 1, {
+ BIP_RANGE(1)
+ }
};
-static const struct comedi_lrange range718_bipolar1 = { 1, {BIP_RANGE(1),} };
static const struct comedi_lrange range718_bipolar0_5 = {
- 1, {BIP_RANGE(0.5),} };
-static const struct comedi_lrange range718_unipolar2 = { 1, {UNI_RANGE(2),} };
-static const struct comedi_lrange range718_unipolar1 = { 1, {BIP_RANGE(1),} };
+ 1, {
+ BIP_RANGE(0.5)
+ }
+};
+
+static const struct comedi_lrange range718_unipolar2 = {
+ 1, {
+ UNI_RANGE(2)
+ }
+};
+
+static const struct comedi_lrange range718_unipolar1 = {
+ 1, {
+ BIP_RANGE(1)
+ }
+};
struct pcl818_board {
@@ -274,7 +296,6 @@ struct pcl818_private {
unsigned char neverending_ai; /* if=1, then we do neverending record (you must use cancel()) */
unsigned int ns_min; /* manimal allowed delay between samples (in us) for actual card */
int i8253_osc_base; /* 1/frequency of on board oscilator in ns */
- int irq_free; /* 1=have allocated IRQ */
int irq_blocked; /* 1=IRQ now uses any subdev */
int irq_was_now_closed; /* when IRQ finish, there's stored int818_mode for last interrupt */
int ai_mode; /* who now uses IRQ - 1=AI1 int, 2=AI1 dma, 3=AI3 int, 4AI3 dma */
@@ -291,7 +312,6 @@ struct pcl818_private {
unsigned int ai_data_len; /* len of data buffer */
unsigned int ai_timer1; /* timers */
unsigned int ai_timer2;
- struct comedi_subdevice *sub_ai; /* ptr to AI subdevice */
unsigned char usefifo; /* 1=use fifo */
unsigned int ao_readback[2];
};
@@ -441,7 +461,7 @@ static irqreturn_t interrupt_pcl818_ai_mode13_int(int irq, void *d)
{
struct comedi_device *dev = d;
struct pcl818_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned char low;
int timeout = 50; /* wait max 50us */
@@ -463,10 +483,10 @@ conv_finish:
outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
if ((low & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */
- printk
- ("comedi: A/D mode1/3 IRQ - channel dropout %x!=%x !\n",
- (low & 0xf),
- devpriv->act_chanlist[devpriv->act_chanlist_pos]);
+ dev_dbg(dev->class_dev,
+ "A/D mode1/3 IRQ - channel dropout %x!=%x !\n",
+ (low & 0xf),
+ devpriv->act_chanlist[devpriv->act_chanlist_pos]);
pcl818_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
@@ -478,7 +498,6 @@ conv_finish:
s->async->cur_chan++;
if (s->async->cur_chan >= devpriv->ai_n_chan) {
- /* printk("E"); */
s->async->cur_chan = 0;
devpriv->ai_act_scan--;
}
@@ -501,7 +520,7 @@ static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d)
{
struct comedi_device *dev = d;
struct pcl818_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
int i, len, bufptr;
unsigned long flags;
unsigned short *ptr;
@@ -523,7 +542,6 @@ static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d)
release_dma_lock(flags);
enable_dma(devpriv->dma);
}
- printk("comedi: A/D mode1/3 IRQ \n");
devpriv->dma_runs_to_end--;
outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
@@ -534,11 +552,11 @@ static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d)
for (i = 0; i < len; i++) {
if ((ptr[bufptr] & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */
- printk
- ("comedi: A/D mode1/3 DMA - channel dropout %d(card)!=%d(chanlist) at %d !\n",
- (ptr[bufptr] & 0xf),
- devpriv->act_chanlist[devpriv->act_chanlist_pos],
- devpriv->act_chanlist_pos);
+ dev_dbg(dev->class_dev,
+ "A/D mode1/3 DMA - channel dropout %d(card)!=%d(chanlist) at %d !\n",
+ (ptr[bufptr] & 0xf),
+ devpriv->act_chanlist[devpriv->act_chanlist_pos],
+ devpriv->act_chanlist_pos);
pcl818_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
@@ -562,7 +580,6 @@ static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d)
pcl818_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
comedi_event(dev, s);
- /* printk("done int ai13 dma\n"); */
return IRQ_HANDLED;
}
}
@@ -580,7 +597,7 @@ static irqreturn_t interrupt_pcl818_ai_mode13_fifo(int irq, void *d)
{
struct comedi_device *dev = d;
struct pcl818_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
int i, len;
unsigned char lo;
@@ -612,10 +629,10 @@ static irqreturn_t interrupt_pcl818_ai_mode13_fifo(int irq, void *d)
for (i = 0; i < len; i++) {
lo = inb(dev->iobase + PCL818_FI_DATALO);
if ((lo & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */
- printk
- ("comedi: A/D mode1/3 FIFO - channel dropout %d!=%d !\n",
- (lo & 0xf),
- devpriv->act_chanlist[devpriv->act_chanlist_pos]);
+ dev_dbg(dev->class_dev,
+ "A/D mode1/3 FIFO - channel dropout %d!=%d !\n",
+ (lo & 0xf),
+ devpriv->act_chanlist[devpriv->act_chanlist_pos]);
pcl818_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
@@ -661,7 +678,6 @@ static irqreturn_t interrupt_pcl818(int irq, void *d)
comedi_error(dev, "premature interrupt");
return IRQ_HANDLED;
}
- /* printk("I\n"); */
if (devpriv->irq_blocked && devpriv->irq_was_now_closed) {
if ((devpriv->neverending_ai || (!devpriv->neverending_ai &&
@@ -673,10 +689,9 @@ static irqreturn_t interrupt_pcl818(int irq, void *d)
being reprogrammed while a DMA transfer is in
progress.
*/
- struct comedi_subdevice *s = &dev->subdevices[0];
devpriv->ai_act_scan = 0;
devpriv->neverending_ai = 0;
- pcl818_ai_cancel(dev, s);
+ pcl818_ai_cancel(dev, dev->read_subdev);
}
outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
@@ -705,8 +720,7 @@ static irqreturn_t interrupt_pcl818(int irq, void *d)
outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
- if ((!dev->irq) || (!devpriv->irq_free) || (!devpriv->irq_blocked)
- || (!devpriv->ai_mode)) {
+ if (!devpriv->irq_blocked || !devpriv->ai_mode) {
comedi_error(dev, "bad IRQ!");
return IRQ_NONE;
}
@@ -726,7 +740,6 @@ static void pcl818_ai_mode13dma_int(int mode, struct comedi_device *dev,
unsigned int flags;
unsigned int bytes;
- printk("mode13dma_int, mode: %d\n", mode);
disable_dma(devpriv->dma); /* disable dma */
bytes = devpriv->hwdmasize[0];
if (!devpriv->neverending_ai) {
@@ -753,7 +766,7 @@ static void pcl818_ai_mode13dma_int(int mode, struct comedi_device *dev,
} else {
devpriv->ai_mode = INT_TYPE_AI3_DMA;
outb(0x86 | (dev->irq << 4), dev->iobase + PCL818_CONTROL); /* Ext trig+IRQ+DMA */
- };
+ }
}
/*
@@ -768,12 +781,6 @@ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev,
int divisor1 = 0, divisor2 = 0;
unsigned int seglen;
- dev_dbg(dev->class_dev, "pcl818_ai_cmd_mode()\n");
- if (!dev->irq) {
- comedi_error(dev, "IRQ not defined!");
- return -EINVAL;
- }
-
if (devpriv->irq_blocked)
return -EBUSY;
@@ -824,7 +831,6 @@ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev,
case 0:
if (!devpriv->usefifo) {
/* IRQ */
- /* printk("IRQ\n"); */
if (mode == 1) {
devpriv->ai_mode = INT_TYPE_AI1_INT;
/* Pacer+IRQ */
@@ -853,7 +859,6 @@ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev,
start_pacer(dev, mode, divisor1, divisor2);
- dev_dbg(dev->class_dev, "pcl818_ai_cmd_mode() end\n");
return 0;
}
@@ -899,10 +904,6 @@ static int check_channel_list(struct comedi_device *dev,
chansegment[0] = chanlist[0];
/* build part of chanlist */
for (i = 1, seglen = 1; i < n_chan; i++, seglen++) {
-
- /* printk("%d. %d * %d\n",i,
- * CR_CHAN(it->chanlist[i]),CR_RANGE(it->chanlist[i]));*/
-
/* we detect loop, this must by finish */
if (chanlist[0] == chanlist[i])
@@ -910,10 +911,10 @@ static int check_channel_list(struct comedi_device *dev,
nowmustbechan =
(CR_CHAN(chansegment[i - 1]) + 1) % s->n_chan;
if (nowmustbechan != CR_CHAN(chanlist[i])) { /* channel list isn't continuous :-( */
- printk
- ("comedi%d: pcl818: channel list must be continuous! chanlist[%i]=%d but must be %d or %d!\n",
- dev->minor, i, CR_CHAN(chanlist[i]),
- nowmustbechan, CR_CHAN(chanlist[0]));
+ dev_dbg(dev->class_dev,
+ "channel list must be continuous! chanlist[%i]=%d but must be %d or %d!\n",
+ i, CR_CHAN(chanlist[i]), nowmustbechan,
+ CR_CHAN(chanlist[0]));
return 0;
}
/* well, this is next correct channel in list */
@@ -922,23 +923,21 @@ static int check_channel_list(struct comedi_device *dev,
/* check whole chanlist */
for (i = 0, segpos = 0; i < n_chan; i++) {
- /* printk("%d %d=%d %d\n",CR_CHAN(chansegment[i%seglen]),CR_RANGE(chansegment[i%seglen]),CR_CHAN(it->chanlist[i]),CR_RANGE(it->chanlist[i])); */
if (chanlist[i] != chansegment[i % seglen]) {
- printk
- ("comedi%d: pcl818: bad channel or range number! chanlist[%i]=%d,%d,%d and not %d,%d,%d!\n",
- dev->minor, i, CR_CHAN(chansegment[i]),
- CR_RANGE(chansegment[i]),
- CR_AREF(chansegment[i]),
- CR_CHAN(chanlist[i % seglen]),
- CR_RANGE(chanlist[i % seglen]),
- CR_AREF(chansegment[i % seglen]));
+ dev_dbg(dev->class_dev,
+ "bad channel or range number! chanlist[%i]=%d,%d,%d and not %d,%d,%d!\n",
+ i, CR_CHAN(chansegment[i]),
+ CR_RANGE(chansegment[i]),
+ CR_AREF(chansegment[i]),
+ CR_CHAN(chanlist[i % seglen]),
+ CR_RANGE(chanlist[i % seglen]),
+ CR_AREF(chansegment[i % seglen]));
return 0; /* chan/gain list is strange */
}
}
} else {
seglen = 1;
}
- printk("check_channel_list: seglen %d\n", seglen);
return seglen;
}
@@ -1067,7 +1066,6 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
struct comedi_cmd *cmd = &s->async->cmd;
int retval;
- dev_dbg(dev->class_dev, "pcl818_ai_cmd()\n");
devpriv->ai_n_chan = cmd->chanlist_len;
devpriv->ai_chanlist = cmd->chanlist;
devpriv->ai_flags = cmd->flags;
@@ -1084,7 +1082,6 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (cmd->convert_src == TRIG_TIMER) { /* mode 1 */
devpriv->ai_timer1 = cmd->convert_arg;
retval = pcl818_ai_cmd_mode(1, dev, s);
- dev_dbg(dev->class_dev, "pcl818_ai_cmd() end\n");
return retval;
}
if (cmd->convert_src == TRIG_EXT) { /* mode 3 */
@@ -1105,7 +1102,6 @@ static int pcl818_ai_cancel(struct comedi_device *dev,
struct pcl818_private *devpriv = dev->private;
if (devpriv->irq_blocked > 0) {
- dev_dbg(dev->class_dev, "pcl818_ai_cancel()\n");
devpriv->irq_was_now_closed = 1;
switch (devpriv->ai_mode) {
@@ -1149,7 +1145,6 @@ static int pcl818_ai_cancel(struct comedi_device *dev,
}
end:
- dev_dbg(dev->class_dev, "pcl818_ai_cancel() end\n");
return 0;
}
@@ -1216,7 +1211,6 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
const struct pcl818_board *board = comedi_board(dev);
struct pcl818_private *devpriv;
int ret;
- unsigned int irq;
int dma;
unsigned long pages;
struct comedi_subdevice *s;
@@ -1240,50 +1234,28 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return -EIO;
}
- /* grab our IRQ */
- irq = 0;
- if (board->IRQbits != 0) { /* board support IRQ */
- irq = it->options[1];
- if (irq) { /* we want to use IRQ */
- if (((1 << irq) & board->IRQbits) == 0) {
- printk
- (", IRQ %u is out of allowed range, DISABLING IT",
- irq);
- irq = 0; /* Bad IRQ */
- } else {
- if (request_irq(irq, interrupt_pcl818, 0,
- dev->board_name, dev)) {
- printk
- (", unable to allocate IRQ %u, DISABLING IT",
- irq);
- irq = 0; /* Can't use IRQ */
- } else {
- printk(KERN_DEBUG "irq=%u", irq);
- }
- }
- }
+ if ((1 << it->options[1]) & board->IRQbits) {
+ ret = request_irq(it->options[1], interrupt_pcl818, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
}
- dev->irq = irq;
- if (irq)
- devpriv->irq_free = 1; /* 1=we have allocated irq */
- else
- devpriv->irq_free = 0;
-
devpriv->irq_blocked = 0; /* number of subdevice which use IRQ */
devpriv->ai_mode = 0; /* mode of irq */
/* grab our DMA */
dma = 0;
devpriv->dma = dma;
- if (!devpriv->irq_free)
+ if (!dev->irq)
goto no_dma; /* if we haven't IRQ, we can't use DMA */
if (board->DMAbits != 0) { /* board support DMA */
dma = it->options[2];
if (dma < 1)
goto no_dma; /* DMA disabled */
if (((1 << dma) & board->DMAbits) == 0) {
- printk(KERN_ERR "DMA is out of allowed range, FAIL!\n");
+ dev_err(dev->class_dev,
+ "DMA is out of allowed range, FAIL!\n");
return -EINVAL; /* Bad DMA */
}
ret = request_dma(dma, dev->board_name);
@@ -1298,7 +1270,6 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv->dmapages[0] = pages;
devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]);
devpriv->hwdmasize[0] = (1 << pages) * PAGE_SIZE;
- /* printk("%d %d %ld, ",devpriv->dmapages[0],devpriv->hwdmasize[0],PAGE_SIZE); */
devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[1])
return -EBUSY;
@@ -1318,27 +1289,24 @@ no_dma:
s->type = COMEDI_SUBD_UNUSED;
} else {
s->type = COMEDI_SUBD_AI;
- devpriv->sub_ai = s;
s->subdev_flags = SDF_READABLE;
if (check_single_ended(dev->iobase)) {
s->n_chan = board->n_aichan_se;
s->subdev_flags |= SDF_COMMON | SDF_GROUND;
- printk(", %dchans S.E. DAC", s->n_chan);
} else {
s->n_chan = board->n_aichan_diff;
s->subdev_flags |= SDF_DIFF;
- printk(", %dchans DIFF DAC", s->n_chan);
}
s->maxdata = board->ai_maxdata;
- s->len_chanlist = s->n_chan;
s->range_table = board->ai_range_type;
- s->cancel = pcl818_ai_cancel;
s->insn_read = pcl818_ai_insn_read;
- if (irq) {
+ if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
s->do_cmdtest = ai_cmdtest;
s->do_cmd = ai_cmd;
+ s->cancel = pcl818_ai_cancel;
}
if (board->is_818) {
if ((it->options[4] == 1) || (it->options[4] == 10))
@@ -1387,7 +1355,6 @@ no_dma:
s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
s->n_chan = board->n_aochan;
s->maxdata = board->ao_maxdata;
- s->len_chanlist = board->n_aochan;
s->range_table = board->ao_range_type;
s->insn_read = pcl818_ao_insn_read;
s->insn_write = pcl818_ao_insn_write;
@@ -1412,7 +1379,6 @@ no_dma:
s->subdev_flags = SDF_READABLE;
s->n_chan = board->n_dichan;
s->maxdata = 1;
- s->len_chanlist = board->n_dichan;
s->range_table = &range_digital;
s->insn_bits = pcl818_di_insn_bits;
}
@@ -1425,7 +1391,6 @@ no_dma:
s->subdev_flags = SDF_WRITABLE;
s->n_chan = board->n_dochan;
s->maxdata = 1;
- s->len_chanlist = board->n_dochan;
s->range_table = &range_digital;
s->insn_bits = pcl818_do_insn_bits;
}
@@ -1446,8 +1411,6 @@ no_dma:
pcl818_reset(dev);
- printk("\n");
-
return 0;
}
@@ -1456,7 +1419,7 @@ static void pcl818_detach(struct comedi_device *dev)
struct pcl818_private *devpriv = dev->private;
if (devpriv) {
- pcl818_ai_cancel(dev, devpriv->sub_ai);
+ pcl818_ai_cancel(dev, dev->read_subdev);
pcl818_reset(dev);
if (devpriv->dma)
free_dma(devpriv->dma);
diff --git a/drivers/staging/comedi/drivers/pcm3724.c b/drivers/staging/comedi/drivers/pcm3724.c
index cc1dc7f66e5b..f4a49bd649f0 100644
--- a/drivers/staging/comedi/drivers/pcm3724.c
+++ b/drivers/staging/comedi/drivers/pcm3724.c
@@ -70,14 +70,11 @@ static int subdev_8255_cb(int dir, int port, int data, unsigned long arg)
{
unsigned long iobase = arg;
unsigned char inbres;
- /* printk("8255cb %d %d %d %lx\n", dir,port,data,arg); */
if (dir) {
- /* printk("8255 cb outb(%x, %lx)\n", data, iobase+port); */
outb(data, iobase + port);
return 0;
} else {
inbres = inb(iobase + port);
- /* printk("8255 cb inb(%lx) = %x\n", iobase+port, inbres); */
return inbres;
}
}
@@ -137,8 +134,6 @@ static void do_3724_config(struct comedi_device *dev,
port_8255_cfg = dev->iobase + SIZE_8255 + _8255_CR;
outb(buffer_config, dev->iobase + 8); /* update buffer register */
- /* printk("pcm3724 buffer_config (%lx) %d, %x\n",
- dev->iobase + _8255_CR, chanspec, buffer_config); */
outb(config, port_8255_cfg);
}
@@ -177,7 +172,6 @@ static void enable_chan(struct comedi_device *dev, struct comedi_subdevice *s,
if (priv->dio_2 & 0xff)
gatecfg |= GATE_A1;
- /* printk("gate control %x\n", gatecfg); */
outb(gatecfg, dev->iobase + 9);
}
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index 14cee3ac92c5..c388f7f32227 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -1,76 +1,76 @@
/*
- comedi/drivers/pcmmio.c
- Driver for Winsystems PC-104 based multifunction IO board.
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2007 Calin A. Culianu <calin@ajvar.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * pcmmio.c
+ * Driver for Winsystems PC-104 based multifunction IO board.
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2007 Calin A. Culianu <calin@ajvar.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
/*
-Driver: pcmmio
-Description: A driver for the PCM-MIO multifunction board
-Devices: [Winsystems] PCM-MIO (pcmmio)
-Author: Calin Culianu <calin@ajvar.org>
-Updated: Wed, May 16 2007 16:21:10 -0500
-Status: works
-
-A driver for the relatively new PCM-MIO multifunction board from
-Winsystems. This board is a PC-104 based I/O board. It contains
-four subdevices:
- subdevice 0 - 16 channels of 16-bit AI
- subdevice 1 - 8 channels of 16-bit AO
- subdevice 2 - first 24 channels of the 48 channel of DIO
- (with edge-triggered interrupt support)
- subdevice 3 - last 24 channels of the 48 channel DIO
- (no interrupt support for this bank of channels)
-
- Some notes:
-
- Synchronous reads and writes are the only things implemented for AI and AO,
- even though the hardware itself can do streaming acquisition, etc. Anyone
- want to add asynchronous I/O for AI/AO as a feature? Be my guest...
-
- Asynchronous I/O for the DIO subdevices *is* implemented, however! They are
- basically edge-triggered interrupts for any configuration of the first
- 24 DIO-lines.
-
- Also note that this interrupt support is untested.
-
- A few words about edge-detection IRQ support (commands on DIO):
-
- * To use edge-detection IRQ support for the DIO subdevice, pass the IRQ
- of the board to the comedi_config command. The board IRQ is not jumpered
- but rather configured through software, so any IRQ from 1-15 is OK.
-
- * Due to the genericity of the comedi API, you need to create a special
- comedi_command in order to use edge-triggered interrupts for DIO.
-
- * Use comedi_commands with TRIG_NOW. Your callback will be called each
- time an edge is detected on the specified DIO line(s), and the data
- values will be two sample_t's, which should be concatenated to form
- one 32-bit unsigned int. This value is the mask of channels that had
- edges detected from your channel list. Note that the bits positions
- in the mask correspond to positions in your chanlist when you
- specified the command and *not* channel id's!
-
- * To set the polarity of the edge-detection interrupts pass a nonzero value
- for either CR_RANGE or CR_AREF for edge-up polarity, or a zero
- value for both CR_RANGE and CR_AREF if you want edge-down polarity.
-
-Configuration Options:
- [0] - I/O port base address
- [1] - IRQ (optional -- for edge-detect interrupt support only,
- leave out if you don't need this feature)
-*/
+ * Driver: pcmmio
+ * Description: A driver for the PCM-MIO multifunction board
+ * Devices: (Winsystems) PCM-MIO [pcmmio]
+ * Author: Calin Culianu <calin@ajvar.org>
+ * Updated: Wed, May 16 2007 16:21:10 -0500
+ * Status: works
+ *
+ * A driver for the PCM-MIO multifunction board from Winsystems. This
+ * is a PC-104 based I/O board. It contains four subdevices:
+ *
+ * subdevice 0 - 16 channels of 16-bit AI
+ * subdevice 1 - 8 channels of 16-bit AO
+ * subdevice 2 - first 24 channels of the 48 channel of DIO
+ * (with edge-triggered interrupt support)
+ * subdevice 3 - last 24 channels of the 48 channel DIO
+ * (no interrupt support for this bank of channels)
+ *
+ * Some notes:
+ *
+ * Synchronous reads and writes are the only things implemented for analog
+ * input and output. The hardware itself can do streaming acquisition, etc.
+ *
+ * Asynchronous I/O for the DIO subdevices *is* implemented, however! They
+ * are basically edge-triggered interrupts for any configuration of the
+ * channels in subdevice 2.
+ *
+ * Also note that this interrupt support is untested.
+ *
+ * A few words about edge-detection IRQ support (commands on DIO):
+ *
+ * To use edge-detection IRQ support for the DIO subdevice, pass the IRQ
+ * of the board to the comedi_config command. The board IRQ is not jumpered
+ * but rather configured through software, so any IRQ from 1-15 is OK.
+ *
+ * Due to the genericity of the comedi API, you need to create a special
+ * comedi_command in order to use edge-triggered interrupts for DIO.
+ *
+ * Use comedi_commands with TRIG_NOW. Your callback will be called each
+ * time an edge is detected on the specified DIO line(s), and the data
+ * values will be two sample_t's, which should be concatenated to form
+ * one 32-bit unsigned int. This value is the mask of channels that had
+ * edges detected from your channel list. Note that the bits positions
+ * in the mask correspond to positions in your chanlist when you
+ * specified the command and *not* channel id's!
+ *
+ * To set the polarity of the edge-detection interrupts pass a nonzero value
+ * for either CR_RANGE or CR_AREF for edge-up polarity, or a zero
+ * value for both CR_RANGE and CR_AREF if you want edge-down polarity.
+ *
+ * Configuration Options:
+ * [0] - I/O port base address
+ * [1] - IRQ (optional -- for edge-detect interrupt support only,
+ * leave out if you don't need this feature)
+ */
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -80,232 +80,211 @@ Configuration Options:
#include "comedi_fc.h"
-/* This stuff is all from pcmuio.c -- it refers to the DIO subdevices only */
-#define CHANS_PER_PORT 8
-#define PORTS_PER_ASIC 6
-#define INTR_PORTS_PER_ASIC 3
-#define MAX_CHANS_PER_SUBDEV 24 /* number of channels per comedi subdevice */
-#define PORTS_PER_SUBDEV (MAX_CHANS_PER_SUBDEV/CHANS_PER_PORT)
-#define CHANS_PER_ASIC (CHANS_PER_PORT*PORTS_PER_ASIC)
-#define INTR_CHANS_PER_ASIC 24
-#define INTR_PORTS_PER_SUBDEV (INTR_CHANS_PER_ASIC/CHANS_PER_PORT)
-#define MAX_DIO_CHANS (PORTS_PER_ASIC*1*CHANS_PER_PORT)
-#define MAX_ASICS (MAX_DIO_CHANS/CHANS_PER_ASIC)
-#define CALC_N_DIO_SUBDEVS(nchans) ((nchans)/MAX_CHANS_PER_SUBDEV + (!!((nchans)%MAX_CHANS_PER_SUBDEV)) /*+ (nchans > INTR_CHANS_PER_ASIC ? 2 : 1)*/)
-/* IO Memory sizes */
-#define ASIC_IOSIZE (0x0B)
-#define PCMMIO48_IOSIZE ASIC_IOSIZE
-
-/* Some offsets - these are all in the 16byte IO memory offset from
- the base address. Note that there is a paging scheme to swap out
- offsets 0x8-0xA using the PAGELOCK register. See the table below.
-
- Register(s) Pages R/W? Description
- --------------------------------------------------------------
- REG_PORTx All R/W Read/Write/Configure IO
- REG_INT_PENDING All ReadOnly Quickly see which INT_IDx has int.
- REG_PAGELOCK All WriteOnly Select a page
- REG_POLx Pg. 1 only WriteOnly Select edge-detection polarity
- REG_ENABx Pg. 2 only WriteOnly Enable/Disable edge-detect. int.
- REG_INT_IDx Pg. 3 only R/W See which ports/bits have ints.
+/*
+ * Register I/O map
*/
-#define REG_PORT0 0x0
-#define REG_PORT1 0x1
-#define REG_PORT2 0x2
-#define REG_PORT3 0x3
-#define REG_PORT4 0x4
-#define REG_PORT5 0x5
-#define REG_INT_PENDING 0x6
-#define REG_PAGELOCK 0x7 /*
- * page selector register, upper 2 bits select
- * a page and bits 0-5 are used to 'lock down'
- * a particular port above to make it readonly.
- */
-#define REG_POL0 0x8
-#define REG_POL1 0x9
-#define REG_POL2 0xA
-#define REG_ENAB0 0x8
-#define REG_ENAB1 0x9
-#define REG_ENAB2 0xA
-#define REG_INT_ID0 0x8
-#define REG_INT_ID1 0x9
-#define REG_INT_ID2 0xA
-
-#define NUM_PAGED_REGS 3
-#define NUM_PAGES 4
-#define FIRST_PAGED_REG 0x8
-#define REG_PAGE_BITOFFSET 6
-#define REG_LOCK_BITOFFSET 0
-#define REG_PAGE_MASK (~((0x1<<REG_PAGE_BITOFFSET)-1))
-#define REG_LOCK_MASK (~(REG_PAGE_MASK))
-#define PAGE_POL 1
-#define PAGE_ENAB 2
-#define PAGE_INT_ID 3
-
-static const struct comedi_lrange ranges_ai = {
- 4, {RANGE(-5., 5.), RANGE(-10., 10.), RANGE(0., 5.), RANGE(0., 10.)}
-};
+#define PCMMIO_AI_LSB_REG 0x00
+#define PCMMIO_AI_MSB_REG 0x01
+#define PCMMIO_AI_CMD_REG 0x02
+#define PCMMIO_AI_CMD_SE (1 << 7)
+#define PCMMIO_AI_CMD_ODD_CHAN (1 << 6)
+#define PCMMIO_AI_CMD_CHAN_SEL(x) (((x) & 0x3) << 4)
+#define PCMMIO_AI_CMD_RANGE(x) (((x) & 0x3) << 2)
+#define PCMMIO_RESOURCE_REG 0x02
+#define PCMMIO_RESOURCE_IRQ(x) (((x) & 0xf) << 0)
+#define PCMMIO_AI_STATUS_REG 0x03
+#define PCMMIO_AI_STATUS_DATA_READY (1 << 7)
+#define PCMMIO_AI_STATUS_DATA_DMA_PEND (1 << 6)
+#define PCMMIO_AI_STATUS_CMD_DMA_PEND (1 << 5)
+#define PCMMIO_AI_STATUS_IRQ_PEND (1 << 4)
+#define PCMMIO_AI_STATUS_DATA_DRQ_ENA (1 << 2)
+#define PCMMIO_AI_STATUS_REG_SEL (1 << 3)
+#define PCMMIO_AI_STATUS_CMD_DRQ_ENA (1 << 1)
+#define PCMMIO_AI_STATUS_IRQ_ENA (1 << 0)
+#define PCMMIO_AI_RES_ENA_REG 0x03
+#define PCMMIO_AI_RES_ENA_CMD_REG_ACCESS (0 << 3)
+#define PCMMIO_AI_RES_ENA_AI_RES_ACCESS (1 << 3)
+#define PCMMIO_AI_RES_ENA_DIO_RES_ACCESS (1 << 4)
+#define PCMMIO_AI_2ND_ADC_OFFSET 0x04
+
+#define PCMMIO_AO_LSB_REG 0x08
+#define PCMMIO_AO_LSB_SPAN(x) (((x) & 0xf) << 0)
+#define PCMMIO_AO_MSB_REG 0x09
+#define PCMMIO_AO_CMD_REG 0x0a
+#define PCMMIO_AO_CMD_WR_SPAN (0x2 << 4)
+#define PCMMIO_AO_CMD_WR_CODE (0x3 << 4)
+#define PCMMIO_AO_CMD_UPDATE (0x4 << 4)
+#define PCMMIO_AO_CMD_UPDATE_ALL (0x5 << 4)
+#define PCMMIO_AO_CMD_WR_SPAN_UPDATE (0x6 << 4)
+#define PCMMIO_AO_CMD_WR_CODE_UPDATE (0x7 << 4)
+#define PCMMIO_AO_CMD_WR_SPAN_UPDATE_ALL (0x8 << 4)
+#define PCMMIO_AO_CMD_WR_CODE_UPDATE_ALL (0x9 << 4)
+#define PCMMIO_AO_CMD_RD_B1_SPAN (0xa << 4)
+#define PCMMIO_AO_CMD_RD_B1_CODE (0xb << 4)
+#define PCMMIO_AO_CMD_RD_B2_SPAN (0xc << 4)
+#define PCMMIO_AO_CMD_RD_B2_CODE (0xd << 4)
+#define PCMMIO_AO_CMD_NOP (0xf << 4)
+#define PCMMIO_AO_CMD_CHAN_SEL(x) (((x) & 0x03) << 1)
+#define PCMMIO_AO_CMD_CHAN_SEL_ALL (0x0f << 0)
+#define PCMMIO_AO_STATUS_REG 0x0b
+#define PCMMIO_AO_STATUS_DATA_READY (1 << 7)
+#define PCMMIO_AO_STATUS_DATA_DMA_PEND (1 << 6)
+#define PCMMIO_AO_STATUS_CMD_DMA_PEND (1 << 5)
+#define PCMMIO_AO_STATUS_IRQ_PEND (1 << 4)
+#define PCMMIO_AO_STATUS_DATA_DRQ_ENA (1 << 2)
+#define PCMMIO_AO_STATUS_REG_SEL (1 << 3)
+#define PCMMIO_AO_STATUS_CMD_DRQ_ENA (1 << 1)
+#define PCMMIO_AO_STATUS_IRQ_ENA (1 << 0)
+#define PCMMIO_AO_RESOURCE_ENA_REG 0x0b
+#define PCMMIO_AO_2ND_DAC_OFFSET 0x04
-static const struct comedi_lrange ranges_ao = {
- 6, {RANGE(0., 5.), RANGE(0., 10.), RANGE(-5., 5.), RANGE(-10., 10.),
- RANGE(-2.5, 2.5), RANGE(-2.5, 7.5)}
+/*
+ * WinSystems WS16C48
+ *
+ * Offset Page 0 Page 1 Page 2 Page 3
+ * ------ ----------- ----------- ----------- -----------
+ * 0x10 Port 0 I/O Port 0 I/O Port 0 I/O Port 0 I/O
+ * 0x11 Port 1 I/O Port 1 I/O Port 1 I/O Port 1 I/O
+ * 0x12 Port 2 I/O Port 2 I/O Port 2 I/O Port 2 I/O
+ * 0x13 Port 3 I/O Port 3 I/O Port 3 I/O Port 3 I/O
+ * 0x14 Port 4 I/O Port 4 I/O Port 4 I/O Port 4 I/O
+ * 0x15 Port 5 I/O Port 5 I/O Port 5 I/O Port 5 I/O
+ * 0x16 INT_PENDING INT_PENDING INT_PENDING INT_PENDING
+ * 0x17 Page/Lock Page/Lock Page/Lock Page/Lock
+ * 0x18 N/A POL_0 ENAB_0 INT_ID0
+ * 0x19 N/A POL_1 ENAB_1 INT_ID1
+ * 0x1a N/A POL_2 ENAB_2 INT_ID2
+ */
+#define PCMMIO_PORT_REG(x) (0x10 + (x))
+#define PCMMIO_INT_PENDING_REG 0x16
+#define PCMMIO_PAGE_LOCK_REG 0x17
+#define PCMMIO_LOCK_PORT(x) ((1 << (x)) & 0x3f)
+#define PCMMIO_PAGE(x) (((x) & 0x3) << 6)
+#define PCMMIO_PAGE_MASK PCMUIO_PAGE(3)
+#define PCMMIO_PAGE_POL 1
+#define PCMMIO_PAGE_ENAB 2
+#define PCMMIO_PAGE_INT_ID 3
+#define PCMMIO_PAGE_REG(x) (0x18 + (x))
+
+static const struct comedi_lrange pcmmio_ai_ranges = {
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(10)
+ }
};
-/* this structure is for data unique to this subdevice. */
-struct pcmmio_subdev_private {
-
- union {
- /* for DIO: mapping of halfwords (bytes)
- in port/chanarray to iobase */
- unsigned long iobases[PORTS_PER_SUBDEV];
-
- /* for AI/AO */
- unsigned long iobase;
- };
- union {
- struct {
-
- /* The below is only used for intr subdevices */
- struct {
- /*
- * if non-negative, this subdev has an
- * interrupt asic
- */
- int asic;
- /*
- * if nonnegative, the first channel id for
- * interrupts.
- */
- int first_chan;
- /*
- * the number of asic channels in this subdev
- * that have interrutps
- */
- int num_asic_chans;
- /*
- * if nonnegative, the first channel id with
- * respect to the asic that has interrupts
- */
- int asic_chan;
- /*
- * subdev-relative channel mask for channels
- * we are interested in
- */
- int enabled_mask;
- int active;
- int stop_count;
- int continuous;
- spinlock_t spinlock;
- } intr;
- } dio;
- struct {
- /* the last unsigned int data written */
- unsigned int shadow_samples[8];
- } ao;
- };
+static const struct comedi_lrange pcmmio_ao_ranges = {
+ 6, {
+ UNI_RANGE(5),
+ UNI_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(10),
+ BIP_RANGE(2.5),
+ RANGE(-2.5, 7.5)
+ }
};
-/*
- * this structure is for data unique to this hardware driver. If
- * several hardware drivers keep similar information in this structure,
- * feel free to suggest moving the variable to the struct comedi_device struct.
- */
struct pcmmio_private {
- /* stuff for DIO */
- struct {
- unsigned char pagelock; /* current page and lock */
- /* shadow of POLx registers */
- unsigned char pol[NUM_PAGED_REGS];
- /* shadow of ENABx registers */
- unsigned char enab[NUM_PAGED_REGS];
- int num;
- unsigned long iobase;
- unsigned int irq;
- spinlock_t spinlock;
- } asics[MAX_ASICS];
- struct pcmmio_subdev_private *sprivs;
+ spinlock_t pagelock; /* protects the page registers */
+ spinlock_t spinlock; /* protects the member variables */
+ unsigned int enabled_mask;
+ unsigned int stop_count;
+ unsigned int active:1;
+ unsigned int continuous:1;
+
+ unsigned int ao_readback[8];
};
-#define subpriv ((struct pcmmio_subdev_private *)s->private)
+static void pcmmio_dio_write(struct comedi_device *dev, unsigned int val,
+ int page, int port)
+{
+ struct pcmmio_private *devpriv = dev->private;
+ unsigned long iobase = dev->iobase;
+ unsigned long flags;
+
+ spin_lock_irqsave(&devpriv->pagelock, flags);
+ if (page == 0) {
+ /* Port registers are valid for any page */
+ outb(val & 0xff, iobase + PCMMIO_PORT_REG(port + 0));
+ outb((val >> 8) & 0xff, iobase + PCMMIO_PORT_REG(port + 1));
+ outb((val >> 16) & 0xff, iobase + PCMMIO_PORT_REG(port + 2));
+ } else {
+ outb(PCMMIO_PAGE(page), iobase + PCMMIO_PAGE_LOCK_REG);
+ outb(val & 0xff, iobase + PCMMIO_PAGE_REG(0));
+ outb((val >> 8) & 0xff, iobase + PCMMIO_PAGE_REG(1));
+ outb((val >> 16) & 0xff, iobase + PCMMIO_PAGE_REG(2));
+ }
+ spin_unlock_irqrestore(&devpriv->pagelock, flags);
+}
+
+static unsigned int pcmmio_dio_read(struct comedi_device *dev,
+ int page, int port)
+{
+ struct pcmmio_private *devpriv = dev->private;
+ unsigned long iobase = dev->iobase;
+ unsigned long flags;
+ unsigned int val;
+
+ spin_lock_irqsave(&devpriv->pagelock, flags);
+ if (page == 0) {
+ /* Port registers are valid for any page */
+ val = inb(iobase + PCMMIO_PORT_REG(port + 0));
+ val |= (inb(iobase + PCMMIO_PORT_REG(port + 1)) << 8);
+ val |= (inb(iobase + PCMMIO_PORT_REG(port + 2)) << 16);
+ } else {
+ outb(PCMMIO_PAGE(page), iobase + PCMMIO_PAGE_LOCK_REG);
+ val = inb(iobase + PCMMIO_PAGE_REG(0));
+ val |= (inb(iobase + PCMMIO_PAGE_REG(1)) << 8);
+ val |= (inb(iobase + PCMMIO_PAGE_REG(2)) << 16);
+ }
+ spin_unlock_irqrestore(&devpriv->pagelock, flags);
+
+ return val;
+}
-/* DIO devices are slightly special. Although it is possible to
- * implement the insn_read/insn_write interface, it is much more
- * useful to applications if you implement the insn_bits interface.
- * This allows packed reading/writing of the DIO channels. The
- * comedi core can convert between insn_bits and insn_read/write */
+/*
+ * Each channel can be individually programmed for input or output.
+ * Writing a '0' to a channel causes the corresponding output pin
+ * to go to a high-z state (pulled high by an external 10K resistor).
+ * This allows it to be used as an input. When used in the input mode,
+ * a read reflects the inverted state of the I/O pin, such that a
+ * high on the pin will read as a '0' in the register. Writing a '1'
+ * to a bit position causes the pin to sink current (up to 12mA),
+ * effectively pulling it low.
+ */
static int pcmmio_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int byte_no;
-
- /* NOTE:
- reading a 0 means this channel was high
- writine a 0 sets the channel high
- reading a 1 means this channel was low
- writing a 1 means set this channel low
-
- Therefore everything is always inverted. */
-
- /* The insn data is a mask in data[0] and the new data
- * in data[1], each channel cooresponding to a bit. */
-
-#ifdef DAMMIT_ITS_BROKEN
- /* DEBUG */
- printk(KERN_DEBUG "write mask: %08x data: %08x\n", data[0], data[1]);
-#endif
-
- s->state = 0;
-
- for (byte_no = 0; byte_no < s->n_chan / CHANS_PER_PORT; ++byte_no) {
- /* address of 8-bit port */
- unsigned long ioaddr = subpriv->iobases[byte_no],
- /* bit offset of port in 32-bit doubleword */
- offset = byte_no * 8;
- /* this 8-bit port's data */
- unsigned char byte = 0,
- /* The write mask for this port (if any) */
- write_mask_byte = (data[0] >> offset) & 0xff,
- /* The data byte for this port */
- data_byte = (data[1] >> offset) & 0xff;
-
- byte = inb(ioaddr); /* read all 8-bits for this port */
-
-#ifdef DAMMIT_ITS_BROKEN
- /* DEBUG */
- printk
- (KERN_DEBUG "byte %d wmb %02x db %02x offset %02d io %04x,"
- " data_in %02x ", byte_no, (unsigned)write_mask_byte,
- (unsigned)data_byte, offset, ioaddr, (unsigned)byte);
-#endif
-
- if (write_mask_byte) {
- /*
- * this byte has some write_bits
- * -- so set the output lines
- */
- /* clear bits for write mask */
- byte &= ~write_mask_byte;
- /* set to inverted data_byte */
- byte |= ~data_byte & write_mask_byte;
- /* Write out the new digital output state */
- outb(byte, ioaddr);
- }
-#ifdef DAMMIT_ITS_BROKEN
- /* DEBUG */
- printk(KERN_DEBUG "data_out_byte %02x\n", (unsigned)byte);
-#endif
- /* save the digital input lines for this byte.. */
- s->state |= ((unsigned int)byte) << offset;
+ /* subdevice 2 uses ports 0-2, subdevice 3 uses ports 3-5 */
+ int port = s->index == 2 ? 0 : 3;
+ unsigned int chanmask = (1 << s->n_chan) - 1;
+ unsigned int mask;
+ unsigned int val;
+
+ mask = comedi_dio_update_state(s, data);
+ if (mask) {
+ /*
+ * Outputs are inverted, invert the state and
+ * update the channels.
+ *
+ * The s->io_bits mask makes sure the input channels
+ * are '0' so that the outputs pins stay in a high
+ * z-state.
+ */
+ val = ~s->state & chanmask;
+ val &= s->io_bits;
+ pcmmio_dio_write(dev, val, 0, port);
}
- /* now return the DIO lines to data[1] - note they came inverted! */
- data[1] = ~s->state;
+ /* get inverted state of the channels from the port */
+ val = pcmmio_dio_read(dev, 0, port);
-#ifdef DAMMIT_ITS_BROKEN
- /* DEBUG */
- printk(KERN_DEBUG "s->state %08x data_out %08x\n", s->state, data[1]);
-#endif
+ /* return the true state of the channels */
+ data[1] = ~val & chanmask;
return insn->n;
}
@@ -315,376 +294,172 @@ static int pcmmio_dio_insn_config(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- unsigned int chan = CR_CHAN(insn->chanspec);
- int byte_no = chan / 8;
- int bit_no = chan % 8;
+ /* subdevice 2 uses ports 0-2, subdevice 3 uses ports 3-5 */
+ int port = s->index == 2 ? 0 : 3;
int ret;
ret = comedi_dio_insn_config(dev, s, insn, data, 0);
if (ret)
return ret;
- if (data[0] == INSN_CONFIG_DIO_INPUT) {
- unsigned long ioaddr = subpriv->iobases[byte_no];
- unsigned char val;
-
- val = inb(ioaddr);
- val &= ~(1 << bit_no);
- outb(val, ioaddr);
- }
+ if (data[0] == INSN_CONFIG_DIO_INPUT)
+ pcmmio_dio_write(dev, s->io_bits, 0, port);
return insn->n;
}
-static void switch_page(struct comedi_device *dev, int asic, int page)
+static void pcmmio_reset(struct comedi_device *dev)
{
- struct pcmmio_private *devpriv = dev->private;
-
- if (asic < 0 || asic >= 1)
- return; /* paranoia */
- if (page < 0 || page >= NUM_PAGES)
- return; /* more paranoia */
-
- devpriv->asics[asic].pagelock &= ~REG_PAGE_MASK;
- devpriv->asics[asic].pagelock |= page << REG_PAGE_BITOFFSET;
-
- /* now write out the shadow register */
- outb(devpriv->asics[asic].pagelock,
- devpriv->asics[asic].iobase + REG_PAGELOCK);
+ /* Clear all the DIO port bits */
+ pcmmio_dio_write(dev, 0, 0, 0);
+ pcmmio_dio_write(dev, 0, 0, 3);
+
+ /* Clear all the paged registers */
+ pcmmio_dio_write(dev, 0, PCMMIO_PAGE_POL, 0);
+ pcmmio_dio_write(dev, 0, PCMMIO_PAGE_ENAB, 0);
+ pcmmio_dio_write(dev, 0, PCMMIO_PAGE_INT_ID, 0);
}
-static void init_asics(struct comedi_device *dev)
-{ /* sets up an
- ASIC chip to defaults */
+/* devpriv->spinlock is already locked */
+static void pcmmio_stop_intr(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
struct pcmmio_private *devpriv = dev->private;
- int asic;
-
- for (asic = 0; asic < 1; ++asic) {
- int port, page;
- unsigned long baseaddr = devpriv->asics[asic].iobase;
-
- switch_page(dev, asic, 0); /* switch back to page 0 */
-
- /* first, clear all the DIO port bits */
- for (port = 0; port < PORTS_PER_ASIC; ++port)
- outb(0, baseaddr + REG_PORT0 + port);
-
- /* Next, clear all the paged registers for each page */
- for (page = 1; page < NUM_PAGES; ++page) {
- int reg;
- /* now clear all the paged registers */
- switch_page(dev, asic, page);
- for (reg = FIRST_PAGED_REG;
- reg < FIRST_PAGED_REG + NUM_PAGED_REGS; ++reg)
- outb(0, baseaddr + reg);
- }
- /* DEBUG set rising edge interrupts on port0 of both asics */
- /*switch_page(dev, asic, PAGE_POL);
- outb(0xff, baseaddr + REG_POL0);
- switch_page(dev, asic, PAGE_ENAB);
- outb(0xff, baseaddr + REG_ENAB0); */
- /* END DEBUG */
+ devpriv->enabled_mask = 0;
+ devpriv->active = 0;
+ s->async->inttrig = NULL;
- /* switch back to default page 0 */
- switch_page(dev, asic, 0);
- }
+ /* disable all dio interrupts */
+ pcmmio_dio_write(dev, 0, PCMMIO_PAGE_ENAB, 0);
}
-#ifdef notused
-static void lock_port(struct comedi_device *dev, int asic, int port)
+static void pcmmio_handle_dio_intr(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int triggered)
{
struct pcmmio_private *devpriv = dev->private;
+ unsigned int oldevents = s->async->events;
+ unsigned int len = s->async->cmd.chanlist_len;
+ unsigned int val = 0;
+ unsigned long flags;
+ int i;
- if (asic < 0 || asic >= 1)
- return; /* paranoia */
- if (port < 0 || port >= PORTS_PER_ASIC)
- return; /* more paranoia */
+ spin_lock_irqsave(&devpriv->spinlock, flags);
- devpriv->asics[asic].pagelock |= 0x1 << port;
- /* now write out the shadow register */
- outb(devpriv->asics[asic].pagelock,
- devpriv->asics[asic].iobase + REG_PAGELOCK);
- return;
-}
+ if (!devpriv->active)
+ goto done;
-static void unlock_port(struct comedi_device *dev, int asic, int port)
-{
- struct pcmmio_private *devpriv = dev->private;
+ if (!(triggered & devpriv->enabled_mask))
+ goto done;
- if (asic < 0 || asic >= 1)
- return; /* paranoia */
- if (port < 0 || port >= PORTS_PER_ASIC)
- return; /* more paranoia */
- devpriv->asics[asic].pagelock &= ~(0x1 << port) | REG_LOCK_MASK;
- /* now write out the shadow register */
- outb(devpriv->asics[asic].pagelock,
- devpriv->asics[asic].iobase + REG_PAGELOCK);
-}
-#endif /* notused */
+ for (i = 0; i < len; i++) {
+ unsigned int chan = CR_CHAN(s->async->cmd.chanlist[i]);
-static void pcmmio_stop_intr(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcmmio_private *devpriv = dev->private;
- int nports, firstport, asic, port;
+ if (triggered & (1 << chan))
+ val |= (1 << i);
+ }
- asic = subpriv->dio.intr.asic;
- if (asic < 0)
- return; /* not an interrupt subdev */
+ /* Write the scan to the buffer. */
+ if (comedi_buf_put(s->async, val) &&
+ comedi_buf_put(s->async, val >> 16)) {
+ s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
+ } else {
+ /* Overflow! Stop acquisition!! */
+ /* TODO: STOP_ACQUISITION_CALL_HERE!! */
+ pcmmio_stop_intr(dev, s);
+ }
- subpriv->dio.intr.enabled_mask = 0;
- subpriv->dio.intr.active = 0;
- s->async->inttrig = NULL;
- nports = subpriv->dio.intr.num_asic_chans / CHANS_PER_PORT;
- firstport = subpriv->dio.intr.asic_chan / CHANS_PER_PORT;
- switch_page(dev, asic, PAGE_ENAB);
- for (port = firstport; port < firstport + nports; ++port) {
- /* disable all intrs for this subdev.. */
- outb(0, devpriv->asics[asic].iobase + REG_ENAB0 + port);
+ /* Check for end of acquisition. */
+ if (!devpriv->continuous) {
+ /* stop_src == TRIG_COUNT */
+ if (devpriv->stop_count > 0) {
+ devpriv->stop_count--;
+ if (devpriv->stop_count == 0) {
+ s->async->events |= COMEDI_CB_EOA;
+ /* TODO: STOP_ACQUISITION_CALL_HERE!! */
+ pcmmio_stop_intr(dev, s);
+ }
+ }
}
+
+done:
+ spin_unlock_irqrestore(&devpriv->spinlock, flags);
+
+ if (oldevents != s->async->events)
+ comedi_event(dev, s);
}
static irqreturn_t interrupt_pcmmio(int irq, void *d)
{
- int asic, got1 = 0;
- struct comedi_device *dev = (struct comedi_device *)d;
- struct pcmmio_private *devpriv = dev->private;
- int i;
+ struct comedi_device *dev = d;
+ struct comedi_subdevice *s = dev->read_subdev;
+ unsigned int triggered;
+ unsigned char int_pend;
- for (asic = 0; asic < MAX_ASICS; ++asic) {
- if (irq == devpriv->asics[asic].irq) {
- unsigned long flags;
- unsigned triggered = 0;
- unsigned long iobase = devpriv->asics[asic].iobase;
- /* it is an interrupt for ASIC #asic */
- unsigned char int_pend;
-
- spin_lock_irqsave(&devpriv->asics[asic].spinlock,
- flags);
-
- int_pend = inb(iobase + REG_INT_PENDING) & 0x07;
-
- if (int_pend) {
- int port;
- for (port = 0; port < INTR_PORTS_PER_ASIC;
- ++port) {
- if (int_pend & (0x1 << port)) {
- unsigned char
- io_lines_with_edges = 0;
- switch_page(dev, asic,
- PAGE_INT_ID);
- io_lines_with_edges =
- inb(iobase +
- REG_INT_ID0 + port);
-
- if (io_lines_with_edges)
- /*
- * clear pending
- * interrupt
- */
- outb(0, iobase +
- REG_INT_ID0 +
- port);
-
- triggered |=
- io_lines_with_edges <<
- port * 8;
- }
- }
-
- ++got1;
- }
+ /* are there any interrupts pending */
+ int_pend = inb(dev->iobase + PCMMIO_INT_PENDING_REG) & 0x07;
+ if (!int_pend)
+ return IRQ_NONE;
- spin_unlock_irqrestore(&devpriv->asics[asic].spinlock,
- flags);
-
- if (triggered) {
- struct comedi_subdevice *s;
- /*
- * TODO here: dispatch io lines to subdevs
- * with commands..
- */
- printk
- (KERN_DEBUG "got edge detect interrupt %d asic %d which_chans: %06x\n",
- irq, asic, triggered);
- for (i = 2; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- /*
- * this is an interrupt subdev,
- * and it matches this asic!
- */
- if (subpriv->dio.intr.asic == asic) {
- unsigned long flags;
- unsigned oldevents;
-
- spin_lock_irqsave(&subpriv->dio.
- intr.spinlock,
- flags);
-
- oldevents = s->async->events;
-
- if (subpriv->dio.intr.active) {
- unsigned mytrig =
- ((triggered >>
- subpriv->dio.intr.asic_chan)
- &
- ((0x1 << subpriv->
- dio.intr.
- num_asic_chans) -
- 1)) << subpriv->
- dio.intr.first_chan;
- if (mytrig &
- subpriv->dio.
- intr.enabled_mask) {
- unsigned int val
- = 0;
- unsigned int n,
- ch, len;
-
- len =
- s->
- async->cmd.chanlist_len;
- for (n = 0;
- n < len;
- n++) {
- ch = CR_CHAN(s->async->cmd.chanlist[n]);
- if (mytrig & (1U << ch))
- val |= (1U << n);
- }
- /* Write the scan to the buffer. */
- if (comedi_buf_put(s->async, val)
- &&
- comedi_buf_put
- (s->async,
- val >> 16)) {
- s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
- } else {
- /* Overflow! Stop acquisition!! */
- /* TODO: STOP_ACQUISITION_CALL_HERE!! */
- pcmmio_stop_intr
- (dev,
- s);
- }
-
- /* Check for end of acquisition. */
- if (!subpriv->dio.intr.continuous) {
- /* stop_src == TRIG_COUNT */
- if (subpriv->dio.intr.stop_count > 0) {
- subpriv->dio.intr.stop_count--;
- if (subpriv->dio.intr.stop_count == 0) {
- s->async->events |= COMEDI_CB_EOA;
- /* TODO: STOP_ACQUISITION_CALL_HERE!! */
- pcmmio_stop_intr
- (dev,
- s);
- }
- }
- }
- }
- }
-
- spin_unlock_irqrestore
- (&subpriv->dio.intr.
- spinlock, flags);
-
- if (oldevents !=
- s->async->events) {
- comedi_event(dev, s);
- }
-
- }
-
- }
- }
+ /* get, and clear, the pending interrupts */
+ triggered = pcmmio_dio_read(dev, PCMMIO_PAGE_INT_ID, 0);
+ pcmmio_dio_write(dev, 0, PCMMIO_PAGE_INT_ID, 0);
+
+ pcmmio_handle_dio_intr(dev, s, triggered);
- }
- }
- if (!got1)
- return IRQ_NONE; /* interrupt from other source */
return IRQ_HANDLED;
}
+/* devpriv->spinlock is already locked */
static int pcmmio_start_intr(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pcmmio_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int bits = 0;
+ unsigned int pol_bits = 0;
+ int i;
- if (!subpriv->dio.intr.continuous && subpriv->dio.intr.stop_count == 0) {
+ if (!devpriv->continuous && devpriv->stop_count == 0) {
/* An empty acquisition! */
s->async->events |= COMEDI_CB_EOA;
- subpriv->dio.intr.active = 0;
+ devpriv->active = 0;
return 1;
- } else {
- unsigned bits = 0, pol_bits = 0, n;
- int nports, firstport, asic, port;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- asic = subpriv->dio.intr.asic;
- if (asic < 0)
- return 1; /* not an interrupt
- subdev */
- subpriv->dio.intr.enabled_mask = 0;
- subpriv->dio.intr.active = 1;
- nports = subpriv->dio.intr.num_asic_chans / CHANS_PER_PORT;
- firstport = subpriv->dio.intr.asic_chan / CHANS_PER_PORT;
- if (cmd->chanlist) {
- for (n = 0; n < cmd->chanlist_len; n++) {
- bits |= (1U << CR_CHAN(cmd->chanlist[n]));
- pol_bits |= (CR_AREF(cmd->chanlist[n])
- || CR_RANGE(cmd->
- chanlist[n]) ? 1U : 0U)
- << CR_CHAN(cmd->chanlist[n]);
- }
- }
- bits &= ((0x1 << subpriv->dio.intr.num_asic_chans) -
- 1) << subpriv->dio.intr.first_chan;
- subpriv->dio.intr.enabled_mask = bits;
-
- {
- /*
- * the below code configures the board
- * to use a specific IRQ from 0-15.
- */
- unsigned char b;
- /*
- * set resource enable register
- * to enable IRQ operation
- */
- outb(1 << 4, dev->iobase + 3);
- /* set bits 0-3 of b to the irq number from 0-15 */
- b = dev->irq & ((1 << 4) - 1);
- outb(b, dev->iobase + 2);
- /* done, we told the board what irq to use */
- }
+ }
- switch_page(dev, asic, PAGE_ENAB);
- for (port = firstport; port < firstport + nports; ++port) {
- unsigned enab =
- bits >> (subpriv->dio.intr.first_chan + (port -
- firstport)
- * 8) & 0xff, pol =
- pol_bits >> (subpriv->dio.intr.first_chan +
- (port - firstport) * 8) & 0xff;
- /* set enab intrs for this subdev.. */
- outb(enab,
- devpriv->asics[asic].iobase + REG_ENAB0 + port);
- switch_page(dev, asic, PAGE_POL);
- outb(pol,
- devpriv->asics[asic].iobase + REG_ENAB0 + port);
+ devpriv->enabled_mask = 0;
+ devpriv->active = 1;
+ if (cmd->chanlist) {
+ for (i = 0; i < cmd->chanlist_len; i++) {
+ unsigned int chanspec = cmd->chanlist[i];
+ unsigned int chan = CR_CHAN(chanspec);
+ unsigned int range = CR_RANGE(chanspec);
+ unsigned int aref = CR_AREF(chanspec);
+
+ bits |= (1 << chan);
+ pol_bits |= (((aref || range) ? 1 : 0) << chan);
}
}
+ bits &= ((1 << s->n_chan) - 1);
+ devpriv->enabled_mask = bits;
+
+ /* set polarity and enable interrupts */
+ pcmmio_dio_write(dev, pol_bits, PCMMIO_PAGE_POL, 0);
+ pcmmio_dio_write(dev, bits, PCMMIO_PAGE_ENAB, 0);
+
return 0;
}
static int pcmmio_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
+ struct pcmmio_private *devpriv = dev->private;
unsigned long flags;
- spin_lock_irqsave(&subpriv->dio.intr.spinlock, flags);
- if (subpriv->dio.intr.active)
+ spin_lock_irqsave(&devpriv->spinlock, flags);
+ if (devpriv->active)
pcmmio_stop_intr(dev, s);
- spin_unlock_irqrestore(&subpriv->dio.intr.spinlock, flags);
+ spin_unlock_irqrestore(&devpriv->spinlock, flags);
return 0;
}
@@ -696,17 +471,18 @@ static int
pcmmio_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int trignum)
{
+ struct pcmmio_private *devpriv = dev->private;
unsigned long flags;
int event = 0;
if (trignum != 0)
return -EINVAL;
- spin_lock_irqsave(&subpriv->dio.intr.spinlock, flags);
+ spin_lock_irqsave(&devpriv->spinlock, flags);
s->async->inttrig = NULL;
- if (subpriv->dio.intr.active)
+ if (devpriv->active)
event = pcmmio_start_intr(dev, s);
- spin_unlock_irqrestore(&subpriv->dio.intr.spinlock, flags);
+ spin_unlock_irqrestore(&devpriv->spinlock, flags);
if (event)
comedi_event(dev, s);
@@ -719,23 +495,24 @@ pcmmio_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s,
*/
static int pcmmio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
+ struct pcmmio_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned long flags;
int event = 0;
- spin_lock_irqsave(&subpriv->dio.intr.spinlock, flags);
- subpriv->dio.intr.active = 1;
+ spin_lock_irqsave(&devpriv->spinlock, flags);
+ devpriv->active = 1;
/* Set up end of acquisition. */
switch (cmd->stop_src) {
case TRIG_COUNT:
- subpriv->dio.intr.continuous = 0;
- subpriv->dio.intr.stop_count = cmd->stop_arg;
+ devpriv->continuous = 0;
+ devpriv->stop_count = cmd->stop_arg;
break;
default:
/* TRIG_NONE */
- subpriv->dio.intr.continuous = 1;
- subpriv->dio.intr.stop_count = 0;
+ devpriv->continuous = 1;
+ devpriv->stop_count = 0;
break;
}
@@ -749,7 +526,7 @@ static int pcmmio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
event = pcmmio_start_intr(dev, s);
break;
}
- spin_unlock_irqrestore(&subpriv->dio.intr.spinlock, flags);
+ spin_unlock_irqrestore(&devpriv->spinlock, flags);
if (event)
comedi_event(dev, s);
@@ -812,188 +589,171 @@ static int pcmmio_cmdtest(struct comedi_device *dev,
return 0;
}
-static int adc_wait_ready(unsigned long iobase)
+static int pcmmio_ai_wait_for_eoc(unsigned long iobase, unsigned int timeout)
{
- unsigned long retry = 100000;
- while (retry--)
- if (inb(iobase + 3) & 0x80)
+ unsigned char status;
+
+ while (timeout--) {
+ status = inb(iobase + PCMMIO_AI_STATUS_REG);
+ if (status & PCMMIO_AI_STATUS_DATA_READY)
return 0;
- return 1;
+ }
+ return -ETIME;
}
-/* All this is for AI and AO */
-static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int pcmmio_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int n;
- unsigned long iobase = subpriv->iobase;
+ unsigned long iobase = dev->iobase;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int aref = CR_AREF(insn->chanspec);
+ unsigned char cmd = 0;
+ unsigned int val;
+ int ret;
+ int i;
/*
- 1. write the CMD byte (to BASE+2)
- 2. read junk lo byte (BASE+0)
- 3. read junk hi byte (BASE+1)
- 4. (mux settled so) write CMD byte again (BASE+2)
- 5. read valid lo byte(BASE+0)
- 6. read valid hi byte(BASE+1)
-
- Additionally note that the BASE += 4 if the channel >= 8
+ * The PCM-MIO uses two Linear Tech LTC1859CG 8-channel A/D converters.
+ * The devices use a full duplex serial interface which transmits and
+ * receives data simultaneously. An 8-bit command is shifted into the
+ * ADC interface to configure it for the next conversion. At the same
+ * time, the data from the previous conversion is shifted out of the
+ * device. Consequently, the conversion result is delayed by one
+ * conversion from the command word.
+ *
+ * Setup the cmd for the conversions then do a dummy conversion to
+ * flush the junk data. Then do each conversion requested by the
+ * comedi_insn. Note that the last conversion will leave junk data
+ * in ADC which will get flushed on the next comedi_insn.
*/
- /* convert n samples */
- for (n = 0; n < insn->n; n++) {
- unsigned chan = CR_CHAN(insn->chanspec), range =
- CR_RANGE(insn->chanspec), aref = CR_AREF(insn->chanspec);
- unsigned char command_byte = 0;
- unsigned iooffset = 0;
- unsigned short sample, adc_adjust = 0;
-
- if (chan > 7)
- chan -= 8, iooffset = 4; /*
- * use the second dword
- * for channels > 7
- */
-
- if (aref != AREF_DIFF) {
- aref = AREF_GROUND;
- command_byte |= 1 << 7; /*
- * set bit 7 to indicate
- * single-ended
- */
- }
- if (range < 2)
- adc_adjust = 0x8000; /*
- * bipolar ranges
- * (-5,5 .. -10,10 need to be
- * adjusted -- that is.. they
- * need to wrap around by
- * adding 0x8000
- */
-
- if (chan % 2) {
- command_byte |= 1 << 6; /*
- * odd-numbered channels
- * have bit 6 set
- */
- }
-
- /* select the channel, bits 4-5 == chan/2 */
- command_byte |= ((chan / 2) & 0x3) << 4;
+ if (chan > 7) {
+ chan -= 8;
+ iobase += PCMMIO_AI_2ND_ADC_OFFSET;
+ }
- /* set the range, bits 2-3 */
- command_byte |= (range & 0x3) << 2;
+ if (aref == AREF_GROUND)
+ cmd |= PCMMIO_AI_CMD_SE;
+ if (chan % 2)
+ cmd |= PCMMIO_AI_CMD_ODD_CHAN;
+ cmd |= PCMMIO_AI_CMD_CHAN_SEL(chan / 2);
+ cmd |= PCMMIO_AI_CMD_RANGE(range);
- /* need to do this twice to make sure mux settled */
- /* chan/range/aref select */
- outb(command_byte, iobase + iooffset + 2);
+ outb(cmd, iobase + PCMMIO_AI_CMD_REG);
+ ret = pcmmio_ai_wait_for_eoc(iobase, 100000);
+ if (ret)
+ return ret;
- /* wait for the adc to say it finised the conversion */
- adc_wait_ready(iobase + iooffset);
+ val = inb(iobase + PCMMIO_AI_LSB_REG);
+ val |= inb(iobase + PCMMIO_AI_MSB_REG) << 8;
- /* select the chan/range/aref AGAIN */
- outb(command_byte, iobase + iooffset + 2);
+ for (i = 0; i < insn->n; i++) {
+ outb(cmd, iobase + PCMMIO_AI_CMD_REG);
+ ret = pcmmio_ai_wait_for_eoc(iobase, 100000);
+ if (ret)
+ return ret;
- adc_wait_ready(iobase + iooffset);
+ val = inb(iobase + PCMMIO_AI_LSB_REG);
+ val |= inb(iobase + PCMMIO_AI_MSB_REG) << 8;
- /* read data lo byte */
- sample = inb(iobase + iooffset + 0);
+ /* bipolar data is two's complement */
+ if (comedi_range_is_bipolar(s, range))
+ val = comedi_offset_munge(s, val);
- /* read data hi byte */
- sample |= inb(iobase + iooffset + 1) << 8;
- sample += adc_adjust; /* adjustment .. munge data */
- data[n] = sample;
+ data[i] = val;
}
- /* return the number of samples read/written */
- return n;
+
+ return insn->n;
}
-static int ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int pcmmio_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int n;
- for (n = 0; n < insn->n; n++) {
- unsigned chan = CR_CHAN(insn->chanspec);
- if (chan < s->n_chan)
- data[n] = subpriv->ao.shadow_samples[chan];
- }
- return n;
+ struct pcmmio_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
+
+ for (i = 0; i < insn->n; i++)
+ data[i] = devpriv->ao_readback[chan];
+
+ return insn->n;
}
-static int wait_dac_ready(unsigned long iobase)
+static int pcmmio_ao_wait_for_eoc(unsigned long iobase, unsigned int timeout)
{
- unsigned long retry = 100000L;
+ unsigned char status;
- /* This may seem like an absurd way to handle waiting and violates the
- "no busy waiting" policy. The fact is that the hardware is
- normally so fast that we usually only need one time through the loop
- anyway. The longer timeout is for rare occasions and for detecting
- non-existent hardware. */
-
- while (retry--) {
- if (inb(iobase + 3) & 0x80)
+ while (timeout--) {
+ status = inb(iobase + PCMMIO_AO_STATUS_REG);
+ if (status & PCMMIO_AO_STATUS_DATA_READY)
return 0;
-
}
- return 1;
+ return -ETIME;
}
-static int ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int pcmmio_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int n;
- unsigned iobase = subpriv->iobase, iooffset = 0;
-
- for (n = 0; n < insn->n; n++) {
- unsigned chan = CR_CHAN(insn->chanspec), range =
- CR_RANGE(insn->chanspec);
- if (chan < s->n_chan) {
- unsigned char command_byte = 0, range_byte =
- range & ((1 << 4) - 1);
- if (chan >= 4)
- chan -= 4, iooffset += 4;
- /* set the range.. */
- outb(range_byte, iobase + iooffset + 0);
- outb(0, iobase + iooffset + 1);
-
- /* tell it to begin */
- command_byte = (chan << 1) | 0x60;
- outb(command_byte, iobase + iooffset + 2);
-
- wait_dac_ready(iobase + iooffset);
-
- /* low order byte */
- outb(data[n] & 0xff, iobase + iooffset + 0);
-
- /* high order byte */
- outb((data[n] >> 8) & 0xff, iobase + iooffset + 1);
-
- /*
- * set bit 4 of command byte to indicate
- * data is loaded and trigger conversion
- */
- command_byte = 0x70 | (chan << 1);
- /* trigger converion */
- outb(command_byte, iobase + iooffset + 2);
-
- wait_dac_ready(iobase + iooffset);
-
- /* save to shadow register for ao_rinsn */
- subpriv->ao.shadow_samples[chan] = data[n];
- }
+ struct pcmmio_private *devpriv = dev->private;
+ unsigned long iobase = dev->iobase;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int val = devpriv->ao_readback[chan];
+ unsigned char cmd = 0;
+ int ret;
+ int i;
+
+ /*
+ * The PCM-MIO has two Linear Tech LTC2704 DAC devices. Each device
+ * is a 4-channel converter with software-selectable output range.
+ */
+
+ if (chan > 3) {
+ cmd |= PCMMIO_AO_CMD_CHAN_SEL(chan - 4);
+ iobase += PCMMIO_AO_2ND_DAC_OFFSET;
+ } else {
+ cmd |= PCMMIO_AO_CMD_CHAN_SEL(chan);
}
- return n;
+
+ /* set the range for the channel */
+ outb(PCMMIO_AO_LSB_SPAN(range), iobase + PCMMIO_AO_LSB_REG);
+ outb(0, iobase + PCMMIO_AO_MSB_REG);
+ outb(cmd | PCMMIO_AO_CMD_WR_SPAN_UPDATE, iobase + PCMMIO_AO_CMD_REG);
+ ret = pcmmio_ao_wait_for_eoc(iobase, 100000);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < insn->n; i++) {
+ val = data[i];
+
+ /* write the data to the channel */
+ outb(val & 0xff, iobase + PCMMIO_AO_LSB_REG);
+ outb((val >> 8) & 0xff, iobase + PCMMIO_AO_MSB_REG);
+ outb(cmd | PCMMIO_AO_CMD_WR_CODE_UPDATE,
+ iobase + PCMMIO_AO_CMD_REG);
+ ret = pcmmio_ao_wait_for_eoc(iobase, 100000);
+ if (ret)
+ return ret;
+
+ devpriv->ao_readback[chan] = val;
+ }
+
+ return insn->n;
}
static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct pcmmio_private *devpriv;
struct comedi_subdevice *s;
- int sdev_no, chans_left, n_dio_subdevs, n_subdevs, port, asic,
- thisasic_chanct = 0;
- unsigned int irq[MAX_ASICS];
int ret;
- irq[0] = it->options[1];
-
ret = comedi_request_region(dev, it->options[0], 32);
if (ret)
return ret;
@@ -1002,177 +762,99 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (!devpriv)
return -ENOMEM;
- for (asic = 0; asic < MAX_ASICS; ++asic) {
- devpriv->asics[asic].num = asic;
- devpriv->asics[asic].iobase =
- dev->iobase + 16 + asic * ASIC_IOSIZE;
- /*
- * this gets actually set at the end of this function when we
- * request_irqs
- */
- devpriv->asics[asic].irq = 0;
- spin_lock_init(&devpriv->asics[asic].spinlock);
- }
+ spin_lock_init(&devpriv->pagelock);
+ spin_lock_init(&devpriv->spinlock);
- chans_left = CHANS_PER_ASIC * 1;
- n_dio_subdevs = CALC_N_DIO_SUBDEVS(chans_left);
- n_subdevs = n_dio_subdevs + 2;
- devpriv->sprivs =
- kcalloc(n_subdevs, sizeof(struct pcmmio_subdev_private),
- GFP_KERNEL);
- if (!devpriv->sprivs) {
- printk(KERN_ERR "comedi%d: cannot allocate subdevice private data structures\n",
- dev->minor);
- return -ENOMEM;
+ pcmmio_reset(dev);
+
+ if (it->options[1]) {
+ ret = request_irq(it->options[1], interrupt_pcmmio, 0,
+ dev->board_name, dev);
+ if (ret == 0) {
+ dev->irq = it->options[1];
+
+ /* configure the interrupt routing on the board */
+ outb(PCMMIO_AI_RES_ENA_DIO_RES_ACCESS,
+ dev->iobase + PCMMIO_AI_RES_ENA_REG);
+ outb(PCMMIO_RESOURCE_IRQ(dev->irq),
+ dev->iobase + PCMMIO_RESOURCE_REG);
+ }
}
- ret = comedi_alloc_subdevices(dev, n_subdevs);
+ ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
- /* First, AI */
+ /* Analog Input subdevice */
s = &dev->subdevices[0];
- s->private = &devpriv->sprivs[0];
- s->maxdata = 0xffff;
- s->range_table = &ranges_ai;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
- s->type = COMEDI_SUBD_AI;
- s->n_chan = 16;
- s->len_chanlist = s->n_chan;
- s->insn_read = ai_rinsn;
- subpriv->iobase = dev->iobase + 0;
- /* initialize the resource enable register by clearing it */
- outb(0, subpriv->iobase + 3);
- outb(0, subpriv->iobase + 4 + 3);
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
+ s->n_chan = 16;
+ s->maxdata = 0xffff;
+ s->range_table = &pcmmio_ai_ranges;
+ s->insn_read = pcmmio_ai_insn_read;
- /* Next, AO */
- s = &dev->subdevices[1];
- s->private = &devpriv->sprivs[1];
- s->maxdata = 0xffff;
- s->range_table = &ranges_ao;
- s->subdev_flags = SDF_READABLE;
- s->type = COMEDI_SUBD_AO;
- s->n_chan = 8;
- s->len_chanlist = s->n_chan;
- s->insn_read = ao_rinsn;
- s->insn_write = ao_winsn;
- subpriv->iobase = dev->iobase + 8;
/* initialize the resource enable register by clearing it */
- outb(0, subpriv->iobase + 3);
- outb(0, subpriv->iobase + 4 + 3);
-
- port = 0;
- asic = 0;
- for (sdev_no = 2; sdev_no < dev->n_subdevices; ++sdev_no) {
- int byte_no;
-
- s = &dev->subdevices[sdev_no];
- s->private = &devpriv->sprivs[sdev_no];
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->type = COMEDI_SUBD_DIO;
- s->insn_bits = pcmmio_dio_insn_bits;
- s->insn_config = pcmmio_dio_insn_config;
- s->n_chan = min(chans_left, MAX_CHANS_PER_SUBDEV);
- subpriv->dio.intr.asic = -1;
- subpriv->dio.intr.first_chan = -1;
- subpriv->dio.intr.asic_chan = -1;
- subpriv->dio.intr.num_asic_chans = -1;
- subpriv->dio.intr.active = 0;
- s->len_chanlist = 1;
-
- /* save the ioport address for each 'port' of 8 channels in the
- subdevice */
- for (byte_no = 0; byte_no < PORTS_PER_SUBDEV; ++byte_no, ++port) {
- if (port >= PORTS_PER_ASIC) {
- port = 0;
- ++asic;
- thisasic_chanct = 0;
- }
- subpriv->iobases[byte_no] =
- devpriv->asics[asic].iobase + port;
-
- if (thisasic_chanct <
- CHANS_PER_PORT * INTR_PORTS_PER_ASIC
- && subpriv->dio.intr.asic < 0) {
- /*
- * this is an interrupt subdevice,
- * so setup the struct
- */
- subpriv->dio.intr.asic = asic;
- subpriv->dio.intr.active = 0;
- subpriv->dio.intr.stop_count = 0;
- subpriv->dio.intr.first_chan = byte_no * 8;
- subpriv->dio.intr.asic_chan = thisasic_chanct;
- subpriv->dio.intr.num_asic_chans =
- s->n_chan - subpriv->dio.intr.first_chan;
- s->cancel = pcmmio_cancel;
- s->do_cmd = pcmmio_cmd;
- s->do_cmdtest = pcmmio_cmdtest;
- s->len_chanlist =
- subpriv->dio.intr.num_asic_chans;
- }
- thisasic_chanct += CHANS_PER_PORT;
- }
- spin_lock_init(&subpriv->dio.intr.spinlock);
-
- chans_left -= s->n_chan;
+ outb(PCMMIO_AI_RES_ENA_CMD_REG_ACCESS,
+ dev->iobase + PCMMIO_AI_RES_ENA_REG);
+ outb(PCMMIO_AI_RES_ENA_CMD_REG_ACCESS,
+ dev->iobase + PCMMIO_AI_RES_ENA_REG + PCMMIO_AI_2ND_ADC_OFFSET);
- if (!chans_left) {
- /*
- * reset the asic to our first asic,
- * to do intr subdevs
- */
- asic = 0;
- port = 0;
- }
+ /* Analog Output subdevice */
+ s = &dev->subdevices[1];
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 8;
+ s->maxdata = 0xffff;
+ s->range_table = &pcmmio_ao_ranges;
+ s->insn_read = pcmmio_ao_insn_read;
+ s->insn_write = pcmmio_ao_insn_write;
+ /* initialize the resource enable register by clearing it */
+ outb(0, dev->iobase + PCMMIO_AO_RESOURCE_ENA_REG);
+ outb(0, dev->iobase + PCMMIO_AO_2ND_DAC_OFFSET +
+ PCMMIO_AO_RESOURCE_ENA_REG);
+
+ /* Digital I/O subdevice with interrupt support */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 24;
+ s->maxdata = 1;
+ s->len_chanlist = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcmmio_dio_insn_bits;
+ s->insn_config = pcmmio_dio_insn_config;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->cancel = pcmmio_cancel;
+ s->do_cmd = pcmmio_cmd;
+ s->do_cmdtest = pcmmio_cmdtest;
}
- init_asics(dev); /* clear out all the registers, basically */
-
- for (asic = 0; irq[0] && asic < MAX_ASICS; ++asic) {
- if (irq[asic]
- && request_irq(irq[asic], interrupt_pcmmio,
- IRQF_SHARED, dev->board_name, dev)) {
- int i;
- /* unroll the allocated irqs.. */
- for (i = asic - 1; i >= 0; --i) {
- free_irq(irq[i], dev);
- devpriv->asics[i].irq = irq[i] = 0;
- }
- irq[asic] = 0;
- }
- devpriv->asics[asic].irq = irq[asic];
- }
+ /* Digital I/O subdevice */
+ s = &dev->subdevices[3];
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 24;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcmmio_dio_insn_bits;
+ s->insn_config = pcmmio_dio_insn_config;
- return 1;
-}
-
-static void pcmmio_detach(struct comedi_device *dev)
-{
- struct pcmmio_private *devpriv = dev->private;
- int i;
-
- if (devpriv) {
- for (i = 0; i < MAX_ASICS; ++i) {
- if (devpriv->asics[i].irq)
- free_irq(devpriv->asics[i].irq, dev);
- }
- kfree(devpriv->sprivs);
- }
- comedi_legacy_detach(dev);
+ return 0;
}
static struct comedi_driver pcmmio_driver = {
.driver_name = "pcmmio",
.module = THIS_MODULE,
.attach = pcmmio_attach,
- .detach = pcmmio_detach,
+ .detach = comedi_legacy_detach,
};
module_comedi_driver(pcmmio_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for Winsystems PCM-MIO PC/104 board");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
index 954fa96a50ac..a8f390f7a874 100644
--- a/drivers/staging/comedi/drivers/pcmuio.c
+++ b/drivers/staging/comedi/drivers/pcmuio.c
@@ -75,7 +75,6 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include "../comedidev.h"
@@ -127,36 +126,53 @@ static const struct pcmuio_board pcmuio_boards[] = {
},
};
-struct pcmuio_subdev_private {
- /* The below is only used for intr subdevices */
- struct {
- /* if non-negative, this subdev has an interrupt asic */
- int asic;
- /*
- * subdev-relative channel mask for channels
- * we are interested in
- */
- int enabled_mask;
- int active;
- int stop_count;
- int continuous;
- spinlock_t spinlock;
- } intr;
+struct pcmuio_asic {
+ spinlock_t pagelock; /* protects the page registers */
+ spinlock_t spinlock; /* protects member variables */
+ unsigned int enabled_mask;
+ unsigned int stop_count;
+ unsigned int active:1;
+ unsigned int continuous:1;
};
struct pcmuio_private {
- struct {
- unsigned int irq;
- spinlock_t spinlock;
- } asics[PCMUIO_MAX_ASICS];
- struct pcmuio_subdev_private *sprivs;
+ struct pcmuio_asic asics[PCMUIO_MAX_ASICS];
+ unsigned int irq2;
};
+static inline unsigned long pcmuio_asic_iobase(struct comedi_device *dev,
+ int asic)
+{
+ return dev->iobase + (asic * PCMUIO_ASIC_IOSIZE);
+}
+
+static inline int pcmuio_subdevice_to_asic(struct comedi_subdevice *s)
+{
+ /*
+ * subdevice 0 and 1 are handled by the first asic
+ * subdevice 2 and 3 are handled by the second asic
+ */
+ return s->index / 2;
+}
+
+static inline int pcmuio_subdevice_to_port(struct comedi_subdevice *s)
+{
+ /*
+ * subdevice 0 and 2 use port registers 0-2
+ * subdevice 1 and 3 use port registers 3-5
+ */
+ return (s->index % 2) ? 3 : 0;
+}
+
static void pcmuio_write(struct comedi_device *dev, unsigned int val,
int asic, int page, int port)
{
- unsigned long iobase = dev->iobase + (asic * PCMUIO_ASIC_IOSIZE);
+ struct pcmuio_private *devpriv = dev->private;
+ struct pcmuio_asic *chip = &devpriv->asics[asic];
+ unsigned long iobase = pcmuio_asic_iobase(dev, asic);
+ unsigned long flags;
+ spin_lock_irqsave(&chip->pagelock, flags);
if (page == 0) {
/* Port registers are valid for any page */
outb(val & 0xff, iobase + PCMUIO_PORT_REG(port + 0));
@@ -168,14 +184,19 @@ static void pcmuio_write(struct comedi_device *dev, unsigned int val,
outb((val >> 8) & 0xff, iobase + PCMUIO_PAGE_REG(1));
outb((val >> 16) & 0xff, iobase + PCMUIO_PAGE_REG(2));
}
+ spin_unlock_irqrestore(&chip->pagelock, flags);
}
static unsigned int pcmuio_read(struct comedi_device *dev,
int asic, int page, int port)
{
- unsigned long iobase = dev->iobase + (asic * PCMUIO_ASIC_IOSIZE);
+ struct pcmuio_private *devpriv = dev->private;
+ struct pcmuio_asic *chip = &devpriv->asics[asic];
+ unsigned long iobase = pcmuio_asic_iobase(dev, asic);
+ unsigned long flags;
unsigned int val;
+ spin_lock_irqsave(&chip->pagelock, flags);
if (page == 0) {
/* Port registers are valid for any page */
val = inb(iobase + PCMUIO_PORT_REG(port + 0));
@@ -187,6 +208,7 @@ static unsigned int pcmuio_read(struct comedi_device *dev,
val |= (inb(iobase + PCMUIO_PAGE_REG(1)) << 8);
val |= (inb(iobase + PCMUIO_PAGE_REG(2)) << 16);
}
+ spin_unlock_irqrestore(&chip->pagelock, flags);
return val;
}
@@ -203,30 +225,35 @@ static unsigned int pcmuio_read(struct comedi_device *dev,
*/
static int pcmuio_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned int mask = data[0] & s->io_bits; /* outputs only */
- unsigned int bits = data[1];
- int asic = s->index / 2;
- int port = (s->index % 2) ? 3 : 0;
+ int asic = pcmuio_subdevice_to_asic(s);
+ int port = pcmuio_subdevice_to_port(s);
+ unsigned int chanmask = (1 << s->n_chan) - 1;
+ unsigned int mask;
unsigned int val;
- /* get inverted state of the channels from the port */
- val = pcmuio_read(dev, asic, 0, port);
-
- /* get the true state of the channels */
- s->state = val ^ ((0x1 << s->n_chan) - 1);
-
+ mask = comedi_dio_update_state(s, data);
if (mask) {
- s->state &= ~mask;
- s->state |= (mask & bits);
-
- /* invert the state and update the channels */
- val = s->state ^ ((0x1 << s->n_chan) - 1);
+ /*
+ * Outputs are inverted, invert the state and
+ * update the channels.
+ *
+ * The s->io_bits mask makes sure the input channels
+ * are '0' so that the outputs pins stay in a high
+ * z-state.
+ */
+ val = ~s->state & chanmask;
+ val &= s->io_bits;
pcmuio_write(dev, val, asic, 0, port);
}
- data[1] = s->state;
+ /* get inverted state of the channels from the port */
+ val = pcmuio_read(dev, asic, 0, port);
+
+ /* return the true state of the channels */
+ data[1] = ~val & chanmask;
return insn->n;
}
@@ -236,8 +263,8 @@ static int pcmuio_dio_insn_config(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- int asic = s->index / 2;
- int port = (s->index % 2) ? 3 : 0;
+ int asic = pcmuio_subdevice_to_asic(s);
+ int port = pcmuio_subdevice_to_port(s);
int ret;
ret = comedi_dio_insn_config(dev, s, insn, data, 0);
@@ -267,18 +294,16 @@ static void pcmuio_reset(struct comedi_device *dev)
}
}
+/* chip->spinlock is already locked */
static void pcmuio_stop_intr(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct pcmuio_subdev_private *subpriv = s->private;
- int asic;
-
- asic = subpriv->intr.asic;
- if (asic < 0)
- return; /* not an interrupt subdev */
+ struct pcmuio_private *devpriv = dev->private;
+ int asic = pcmuio_subdevice_to_asic(s);
+ struct pcmuio_asic *chip = &devpriv->asics[asic];
- subpriv->intr.enabled_mask = 0;
- subpriv->intr.active = 0;
+ chip->enabled_mask = 0;
+ chip->active = 0;
s->async->inttrig = NULL;
/* disable all intrs for this subdev.. */
@@ -289,29 +314,27 @@ static void pcmuio_handle_intr_subdev(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned triggered)
{
- struct pcmuio_subdev_private *subpriv = s->private;
+ struct pcmuio_private *devpriv = dev->private;
+ int asic = pcmuio_subdevice_to_asic(s);
+ struct pcmuio_asic *chip = &devpriv->asics[asic];
unsigned int len = s->async->cmd.chanlist_len;
unsigned oldevents = s->async->events;
unsigned int val = 0;
unsigned long flags;
- unsigned mytrig;
unsigned int i;
- spin_lock_irqsave(&subpriv->intr.spinlock, flags);
+ spin_lock_irqsave(&chip->spinlock, flags);
- if (!subpriv->intr.active)
+ if (!chip->active)
goto done;
- mytrig = triggered;
- mytrig &= ((0x1 << s->n_chan) - 1);
-
- if (!(mytrig & subpriv->intr.enabled_mask))
+ if (!(triggered & chip->enabled_mask))
goto done;
for (i = 0; i < len; i++) {
unsigned int chan = CR_CHAN(s->async->cmd.chanlist[i]);
- if (mytrig & (1U << chan))
- val |= (1U << i);
+ if (triggered & (1 << chan))
+ val |= (1 << i);
}
/* Write the scan to the buffer. */
@@ -325,11 +348,11 @@ static void pcmuio_handle_intr_subdev(struct comedi_device *dev,
}
/* Check for end of acquisition. */
- if (!subpriv->intr.continuous) {
+ if (!chip->continuous) {
/* stop_src == TRIG_COUNT */
- if (subpriv->intr.stop_count > 0) {
- subpriv->intr.stop_count--;
- if (subpriv->intr.stop_count == 0) {
+ if (chip->stop_count > 0) {
+ chip->stop_count--;
+ if (chip->stop_count == 0) {
s->async->events |= COMEDI_CB_EOA;
/* TODO: STOP_ACQUISITION_CALL_HERE!! */
pcmuio_stop_intr(dev, s);
@@ -338,7 +361,7 @@ static void pcmuio_handle_intr_subdev(struct comedi_device *dev,
}
done:
- spin_unlock_irqrestore(&subpriv->intr.spinlock, flags);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
if (oldevents != s->async->events)
comedi_event(dev, s);
@@ -346,114 +369,93 @@ done:
static int pcmuio_handle_asic_interrupt(struct comedi_device *dev, int asic)
{
- struct pcmuio_private *devpriv = dev->private;
- struct pcmuio_subdev_private *subpriv;
- unsigned long iobase = dev->iobase + (asic * PCMUIO_ASIC_IOSIZE);
- unsigned int triggered = 0;
- int got1 = 0;
- unsigned long flags;
- unsigned char int_pend;
- int i;
+ /* there are could be two asics so we can't use dev->read_subdev */
+ struct comedi_subdevice *s = &dev->subdevices[asic * 2];
+ unsigned long iobase = pcmuio_asic_iobase(dev, asic);
+ unsigned int val;
- spin_lock_irqsave(&devpriv->asics[asic].spinlock, flags);
+ /* are there any interrupts pending */
+ val = inb(iobase + PCMUIO_INT_PENDING_REG) & 0x07;
+ if (!val)
+ return 0;
- int_pend = inb(iobase + PCMUIO_INT_PENDING_REG) & 0x07;
- if (int_pend) {
- triggered = pcmuio_read(dev, asic, PCMUIO_PAGE_INT_ID, 0);
- pcmuio_write(dev, 0, asic, PCMUIO_PAGE_INT_ID, 0);
+ /* get, and clear, the pending interrupts */
+ val = pcmuio_read(dev, asic, PCMUIO_PAGE_INT_ID, 0);
+ pcmuio_write(dev, 0, asic, PCMUIO_PAGE_INT_ID, 0);
- ++got1;
- }
+ /* handle the pending interrupts */
+ pcmuio_handle_intr_subdev(dev, s, val);
- spin_unlock_irqrestore(&devpriv->asics[asic].spinlock, flags);
-
- if (triggered) {
- struct comedi_subdevice *s;
- /* TODO here: dispatch io lines to subdevs with commands.. */
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- subpriv = s->private;
- if (subpriv->intr.asic == asic) {
- /*
- * This is an interrupt subdev, and it
- * matches this asic!
- */
- pcmuio_handle_intr_subdev(dev, s,
- triggered);
- }
- }
- }
- return got1;
+ return 1;
}
static irqreturn_t pcmuio_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct pcmuio_private *devpriv = dev->private;
- int got1 = 0;
- int asic;
+ int handled = 0;
- for (asic = 0; asic < PCMUIO_MAX_ASICS; ++asic) {
- if (irq == devpriv->asics[asic].irq) {
- /* it is an interrupt for ASIC #asic */
- if (pcmuio_handle_asic_interrupt(dev, asic))
- got1++;
- }
- }
- if (!got1)
- return IRQ_NONE; /* interrupt from other source */
- return IRQ_HANDLED;
+ if (irq == dev->irq)
+ handled += pcmuio_handle_asic_interrupt(dev, 0);
+ if (irq == devpriv->irq2)
+ handled += pcmuio_handle_asic_interrupt(dev, 1);
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
}
+/* chip->spinlock is already locked */
static int pcmuio_start_intr(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct pcmuio_subdev_private *subpriv = s->private;
+ struct pcmuio_private *devpriv = dev->private;
+ int asic = pcmuio_subdevice_to_asic(s);
+ struct pcmuio_asic *chip = &devpriv->asics[asic];
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int bits = 0;
+ unsigned int pol_bits = 0;
+ int i;
- if (!subpriv->intr.continuous && subpriv->intr.stop_count == 0) {
+ if (!chip->continuous && chip->stop_count == 0) {
/* An empty acquisition! */
s->async->events |= COMEDI_CB_EOA;
- subpriv->intr.active = 0;
+ chip->active = 0;
return 1;
- } else {
- unsigned bits = 0, pol_bits = 0, n;
- int asic;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- asic = subpriv->intr.asic;
- if (asic < 0)
- return 1; /* not an interrupt
- subdev */
- subpriv->intr.enabled_mask = 0;
- subpriv->intr.active = 1;
- if (cmd->chanlist) {
- for (n = 0; n < cmd->chanlist_len; n++) {
- bits |= (1U << CR_CHAN(cmd->chanlist[n]));
- pol_bits |= (CR_AREF(cmd->chanlist[n])
- || CR_RANGE(cmd->
- chanlist[n]) ? 1U : 0U)
- << CR_CHAN(cmd->chanlist[n]);
- }
- }
- bits &= ((0x1 << s->n_chan) - 1);
- subpriv->intr.enabled_mask = bits;
+ }
- /* set pol and enab intrs for this subdev.. */
- pcmuio_write(dev, pol_bits, asic, PCMUIO_PAGE_POL, 0);
- pcmuio_write(dev, bits, asic, PCMUIO_PAGE_ENAB, 0);
+ chip->enabled_mask = 0;
+ chip->active = 1;
+ if (cmd->chanlist) {
+ for (i = 0; i < cmd->chanlist_len; i++) {
+ unsigned int chanspec = cmd->chanlist[i];
+ unsigned int chan = CR_CHAN(chanspec);
+ unsigned int range = CR_RANGE(chanspec);
+ unsigned int aref = CR_AREF(chanspec);
+
+ bits |= (1 << chan);
+ pol_bits |= ((aref || range) ? 1 : 0) << chan;
+ }
}
+ bits &= ((1 << s->n_chan) - 1);
+ chip->enabled_mask = bits;
+
+ /* set pol and enab intrs for this subdev.. */
+ pcmuio_write(dev, pol_bits, asic, PCMUIO_PAGE_POL, 0);
+ pcmuio_write(dev, bits, asic, PCMUIO_PAGE_ENAB, 0);
+
return 0;
}
static int pcmuio_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
- struct pcmuio_subdev_private *subpriv = s->private;
+ struct pcmuio_private *devpriv = dev->private;
+ int asic = pcmuio_subdevice_to_asic(s);
+ struct pcmuio_asic *chip = &devpriv->asics[asic];
unsigned long flags;
- spin_lock_irqsave(&subpriv->intr.spinlock, flags);
- if (subpriv->intr.active)
+ spin_lock_irqsave(&chip->spinlock, flags);
+ if (chip->active)
pcmuio_stop_intr(dev, s);
- spin_unlock_irqrestore(&subpriv->intr.spinlock, flags);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
return 0;
}
@@ -465,19 +467,21 @@ static int
pcmuio_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int trignum)
{
- struct pcmuio_subdev_private *subpriv = s->private;
+ struct pcmuio_private *devpriv = dev->private;
+ int asic = pcmuio_subdevice_to_asic(s);
+ struct pcmuio_asic *chip = &devpriv->asics[asic];
unsigned long flags;
int event = 0;
if (trignum != 0)
return -EINVAL;
- spin_lock_irqsave(&subpriv->intr.spinlock, flags);
+ spin_lock_irqsave(&chip->spinlock, flags);
s->async->inttrig = NULL;
- if (subpriv->intr.active)
+ if (chip->active)
event = pcmuio_start_intr(dev, s);
- spin_unlock_irqrestore(&subpriv->intr.spinlock, flags);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
if (event)
comedi_event(dev, s);
@@ -490,24 +494,26 @@ pcmuio_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s,
*/
static int pcmuio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
- struct pcmuio_subdev_private *subpriv = s->private;
+ struct pcmuio_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
+ int asic = pcmuio_subdevice_to_asic(s);
+ struct pcmuio_asic *chip = &devpriv->asics[asic];
unsigned long flags;
int event = 0;
- spin_lock_irqsave(&subpriv->intr.spinlock, flags);
- subpriv->intr.active = 1;
+ spin_lock_irqsave(&chip->spinlock, flags);
+ chip->active = 1;
/* Set up end of acquisition. */
switch (cmd->stop_src) {
case TRIG_COUNT:
- subpriv->intr.continuous = 0;
- subpriv->intr.stop_count = cmd->stop_arg;
+ chip->continuous = 0;
+ chip->stop_count = cmd->stop_arg;
break;
default:
/* TRIG_NONE */
- subpriv->intr.continuous = 1;
- subpriv->intr.stop_count = 0;
+ chip->continuous = 1;
+ chip->stop_count = 0;
break;
}
@@ -521,7 +527,7 @@ static int pcmuio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
event = pcmuio_start_intr(dev, s);
break;
}
- spin_unlock_irqrestore(&subpriv->intr.spinlock, flags);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
if (event)
comedi_event(dev, s);
@@ -589,13 +595,8 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
const struct pcmuio_board *board = comedi_board(dev);
struct comedi_subdevice *s;
struct pcmuio_private *devpriv;
- struct pcmuio_subdev_private *subpriv;
- int sdev_no, n_subdevs, asic;
- unsigned int irq[PCMUIO_MAX_ASICS];
int ret;
-
- irq[0] = it->options[1];
- irq[1] = it->options[2];
+ int i;
ret = comedi_request_region(dev, it->options[0],
board->num_asics * PCMUIO_ASIC_IOSIZE);
@@ -606,62 +607,60 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (!devpriv)
return -ENOMEM;
- for (asic = 0; asic < PCMUIO_MAX_ASICS; ++asic)
- spin_lock_init(&devpriv->asics[asic].spinlock);
+ for (i = 0; i < PCMUIO_MAX_ASICS; ++i) {
+ struct pcmuio_asic *chip = &devpriv->asics[i];
- n_subdevs = board->num_asics * 2;
- devpriv->sprivs = kcalloc(n_subdevs, sizeof(*subpriv), GFP_KERNEL);
- if (!devpriv->sprivs)
- return -ENOMEM;
+ spin_lock_init(&chip->pagelock);
+ spin_lock_init(&chip->spinlock);
+ }
- ret = comedi_alloc_subdevices(dev, n_subdevs);
- if (ret)
- return ret;
+ pcmuio_reset(dev);
- for (sdev_no = 0; sdev_no < (int)dev->n_subdevices; ++sdev_no) {
- s = &dev->subdevices[sdev_no];
- subpriv = &devpriv->sprivs[sdev_no];
- s->private = subpriv;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->type = COMEDI_SUBD_DIO;
- s->insn_bits = pcmuio_dio_insn_bits;
- s->insn_config = pcmuio_dio_insn_config;
- s->n_chan = 24;
-
- /* subdevices 0 and 2 suppport interrupts */
- if ((sdev_no % 2) == 0) {
- /* setup the interrupt subdevice */
- subpriv->intr.asic = sdev_no / 2;
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->cancel = pcmuio_cancel;
- s->do_cmd = pcmuio_cmd;
- s->do_cmdtest = pcmuio_cmdtest;
- s->len_chanlist = s->n_chan;
- } else {
- subpriv->intr.asic = -1;
- s->len_chanlist = 1;
+ if (it->options[1]) {
+ /* request the irq for the 1st asic */
+ ret = request_irq(it->options[1], pcmuio_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
+ }
+
+ if (board->num_asics == 2) {
+ if (it->options[2] == dev->irq) {
+ /* the same irq (or none) is used by both asics */
+ devpriv->irq2 = it->options[2];
+ } else if (it->options[2]) {
+ /* request the irq for the 2nd asic */
+ ret = request_irq(it->options[2], pcmuio_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ devpriv->irq2 = it->options[2];
}
- spin_lock_init(&subpriv->intr.spinlock);
}
- pcmuio_reset(dev);
+ ret = comedi_alloc_subdevices(dev, board->num_asics * 2);
+ if (ret)
+ return ret;
- for (asic = 0; irq[0] && asic < PCMUIO_MAX_ASICS; ++asic) {
- if (irq[asic]
- && request_irq(irq[asic], pcmuio_interrupt,
- IRQF_SHARED, board->name, dev)) {
- int i;
- /* unroll the allocated irqs.. */
- for (i = asic - 1; i >= 0; --i) {
- free_irq(irq[i], dev);
- devpriv->asics[i].irq = irq[i] = 0;
- }
- irq[asic] = 0;
+ for (i = 0; i < dev->n_subdevices; ++i) {
+ s = &dev->subdevices[i];
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 24;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcmuio_dio_insn_bits;
+ s->insn_config = pcmuio_dio_insn_config;
+
+ /* subdevices 0 and 2 can suppport interrupts */
+ if ((i == 0 && dev->irq) || (i == 2 && devpriv->irq2)) {
+ /* setup the interrupt subdevice */
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->cancel = pcmuio_cancel;
+ s->do_cmd = pcmuio_cmd;
+ s->do_cmdtest = pcmuio_cmdtest;
}
- devpriv->asics[asic].irq = irq[asic];
}
return 0;
@@ -670,14 +669,13 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static void pcmuio_detach(struct comedi_device *dev)
{
struct pcmuio_private *devpriv = dev->private;
- int i;
if (devpriv) {
- for (i = 0; i < PCMUIO_MAX_ASICS; ++i) {
- if (devpriv->asics[i].irq)
- free_irq(devpriv->asics[i].irq, dev);
- }
- kfree(devpriv->sprivs);
+ pcmuio_reset(dev);
+
+ /* free the 2nd irq if used, the core will free the 1st one */
+ if (devpriv->irq2 && devpriv->irq2 != dev->irq)
+ free_irq(devpriv->irq2, dev);
}
comedi_legacy_detach(dev);
}
diff --git a/drivers/staging/comedi/drivers/plx9080.h b/drivers/staging/comedi/drivers/plx9080.h
index 0d254a1b78a7..55e3c2e2bc52 100644
--- a/drivers/staging/comedi/drivers/plx9080.h
+++ b/drivers/staging/comedi/drivers/plx9080.h
@@ -402,12 +402,9 @@ static inline int plx9080_abort_dma(void __iomem *iobase, unsigned int channel)
udelay(1);
dma_status = readb(dma_cs_addr);
}
- if (i == timeout) {
- printk
- ("plx9080: cancel() timed out waiting for dma %i done clear\n",
- channel);
+ if (i == timeout)
return -ETIMEDOUT;
- }
+
/* disable and abort channel */
writeb(PLX_DMA_ABORT_BIT, dma_cs_addr);
/* wait for dma done bit */
@@ -416,12 +413,8 @@ static inline int plx9080_abort_dma(void __iomem *iobase, unsigned int channel)
udelay(1);
dma_status = readb(dma_cs_addr);
}
- if (i == timeout) {
- printk
- ("plx9080: cancel() timed out waiting for dma %i done set\n",
- channel);
+ if (i == timeout)
return -ETIMEDOUT;
- }
return 0;
}
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 44c8712ed9e0..0f026afde9be 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -495,8 +495,6 @@ static unsigned short rtd_convert_chan_gain(struct comedi_device *dev,
case AREF_OTHER: /* ??? */
break;
}
- /*printk ("chan=%d r=%d a=%d -> 0x%x\n",
- chan, range, aref, r); */
return r;
}
@@ -606,7 +604,6 @@ static int rtd_ai_rinsn(struct comedi_device *dev,
/* read data */
d = readw(devpriv->las1 + LAS1_ADC_FIFO);
- /*printk ("rtd520: Got 0x%x after %d usec\n", d, ii+1); */
d = d >> 3; /* low 3 bits are marker lines */
if (test_bit(0, devpriv->chan_is_bipolar))
/* convert to comedi unsigned data */
@@ -692,7 +689,7 @@ static int ai_read_dregs(struct comedi_device *dev, struct comedi_subdevice *s)
static irqreturn_t rtd_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ struct comedi_subdevice *s = dev->read_subdev;
struct rtd_private *devpriv = dev->private;
u32 overrun;
u16 status;
@@ -1427,7 +1424,7 @@ static int rtd520_pci_probe(struct pci_dev *dev,
return comedi_pci_auto_config(dev, &rtd520_driver, id->driver_data);
}
-static DEFINE_PCI_DEVICE_TABLE(rtd520_pci_table) = {
+static const struct pci_device_id rtd520_pci_table[] = {
{ PCI_VDEVICE(RTD, 0x7520), BOARD_DM7520 },
{ PCI_VDEVICE(RTD, 0x4520), BOARD_PCI4520 },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index b486099b543d..19da1dbea494 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -199,7 +199,7 @@ static bool s626_mc_test(struct comedi_device *dev,
static const struct comedi_lrange s626_range_table = {
2, {
BIP_RANGE(5),
- BIP_RANGE(10),
+ BIP_RANGE(10)
}
};
@@ -1614,12 +1614,13 @@ static irqreturn_t s626_irq_handler(int irq, void *d)
static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
{
struct s626_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_cmd *cmd = &s->async->cmd;
uint32_t *rps;
uint32_t jmp_adrs;
uint16_t i;
uint16_t n;
uint32_t local_ppl;
- struct comedi_cmd *cmd = &dev->subdevices->async->cmd;
/* Stop RPS program in case it is currently running */
s626_mc_disable(dev, S626_MC1_ERPS1, S626_P_MC1);
@@ -2079,12 +2080,6 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (cmd == NULL)
return -EINVAL;
- if (dev->irq == 0) {
- comedi_error(dev,
- "s626_ai_cmd: cannot run command without an irq");
- return -EIO;
- }
-
s626_ai_load_polllist(ppl, cmd);
devpriv->ai_cmd_running = 1;
devpriv->ai_convert_count = 0;
@@ -2645,6 +2640,7 @@ static void s626_initialize(struct comedi_device *dev)
* a defined state after a PCI reset.
*/
{
+ struct comedi_subdevice *s = dev->read_subdev;
uint8_t poll_list;
uint16_t adc_data;
uint16_t start_val;
@@ -2656,7 +2652,7 @@ static void s626_initialize(struct comedi_device *dev)
s626_reset_adc(dev, &poll_list);
/* Get initial ADC value */
- s626_ai_rinsn(dev, dev->subdevices, NULL, data);
+ s626_ai_rinsn(dev, s, NULL, data);
start_val = data[0];
/*
@@ -2670,7 +2666,7 @@ static void s626_initialize(struct comedi_device *dev)
* being unusually quiet or at the rail.
*/
for (index = 0; index < 500; index++) {
- s626_ai_rinsn(dev, dev->subdevices, NULL, data);
+ s626_ai_rinsn(dev, s, NULL, data);
adc_data = data[0];
if (adc_data != start_val)
break;
@@ -2833,7 +2829,7 @@ static int s626_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[0];
/* analog input subdevice */
s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_DIFF;
s->n_chan = S626_ADC_CHANNELS;
s->maxdata = 0x3fff;
s->range_table = &s626_range_table;
@@ -2841,6 +2837,7 @@ static int s626_auto_attach(struct comedi_device *dev,
s->insn_read = s626_ai_insn_read;
if (dev->irq) {
dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
s->do_cmd = s626_ai_cmd;
s->do_cmdtest = s626_ai_cmdtest;
s->cancel = s626_ai_cancel;
@@ -2965,7 +2962,7 @@ static int s626_pci_probe(struct pci_dev *dev,
* also subvendor:subdevice ids, because otherwise it will conflict with
* Philips SAA7146 media/dvb based cards.
*/
-static DEFINE_PCI_DEVICE_TABLE(s626_pci_table) = {
+static const struct pci_device_id s626_pci_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_PHILIPS, PCI_DEVICE_ID_PHILIPS_SAA7146,
0x6000, 0x0272) },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/skel.c b/drivers/staging/comedi/drivers/skel.c
index daee2f42bde0..77e2059ff62e 100644
--- a/drivers/staging/comedi/drivers/skel.c
+++ b/drivers/staging/comedi/drivers/skel.c
@@ -698,7 +698,7 @@ static int skel_pci_probe(struct pci_dev *dev,
* This is used by modprobe to translate PCI IDs to drivers.
* Should only be used for PCI and ISA-PnP devices
*/
-static DEFINE_PCI_DEVICE_TABLE(skel_pci_table) = {
+static const struct pci_device_id skel_pci_table[] = {
{ PCI_VDEVICE(SKEL, 0x0100), BOARD_SKEL100 },
{ PCI_VDEVICE(SKEL, 0x0200), BOARD_SKEL200 },
{ 0 }
diff --git a/drivers/staging/comedi/drivers/unioxx5.c b/drivers/staging/comedi/drivers/unioxx5.c
index 93eec2fc254c..adf7cb7086cc 100644
--- a/drivers/staging/comedi/drivers/unioxx5.c
+++ b/drivers/staging/comedi/drivers/unioxx5.c
@@ -38,7 +38,6 @@ Devices: [Fastwel] UNIOxx-5 (unioxx5),
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/delay.h>
@@ -91,12 +90,14 @@ static int __unioxx5_define_chan_offset(int chan_num)
}
#if 0 /* not used? */
-static void __unioxx5_digital_config(struct unioxx5_subd_priv *usp, int mode)
+static void __unioxx5_digital_config(struct comedi_subdevice *s, int mode)
{
+ struct unioxx5_subd_priv *usp = s->private;
+ struct device *csdev = s->device->class_dev;
int i, mask;
mask = (mode == ALL_2_OUTPUT) ? 0xFF : 0x00;
- printk("COMEDI: mode = %d\n", mask);
+ dev_dbg(csdev, "mode = %d\n", mask);
outb(1, usp->usp_iobase + 0);
@@ -135,15 +136,18 @@ static void __unioxx5_analog_config(struct unioxx5_subd_priv *usp, int channel)
usp->usp_prev_cn_val[channel_offset - 1] = conf;
}
-static int __unioxx5_digital_read(struct unioxx5_subd_priv *usp,
+static int __unioxx5_digital_read(struct comedi_subdevice *s,
unsigned int *data, int channel, int minor)
{
+ struct unioxx5_subd_priv *usp = s->private;
+ struct device *csdev = s->device->class_dev;
int channel_offset, mask = 1 << (channel & 0x07);
channel_offset = __unioxx5_define_chan_offset(channel);
if (channel_offset < 0) {
- pr_err("comedi%d: undefined channel %d. channel range is 0 .. 23\n",
- minor, channel);
+ dev_err(csdev,
+ "undefined channel %d. channel range is 0 .. 23\n",
+ channel);
return 0;
}
@@ -157,9 +161,11 @@ static int __unioxx5_digital_read(struct unioxx5_subd_priv *usp,
return 1;
}
-static int __unioxx5_analog_read(struct unioxx5_subd_priv *usp,
+static int __unioxx5_analog_read(struct comedi_subdevice *s,
unsigned int *data, int channel, int minor)
{
+ struct unioxx5_subd_priv *usp = s->private;
+ struct device *csdev = s->device->class_dev;
int module_no, read_ch;
char control;
@@ -168,8 +174,9 @@ static int __unioxx5_analog_read(struct unioxx5_subd_priv *usp,
/* defining if given module can work on input */
if (usp->usp_module_type[module_no] & MODULE_OUTPUT_MASK) {
- pr_err("comedi%d: module in position %d with id 0x%02x is for output only",
- minor, module_no, usp->usp_module_type[module_no]);
+ dev_err(csdev,
+ "module in position %d with id 0x%02x is for output only",
+ module_no, usp->usp_module_type[module_no]);
return 0;
}
@@ -185,7 +192,7 @@ static int __unioxx5_analog_read(struct unioxx5_subd_priv *usp,
/* if four bytes readding error occurs - return 0(false) */
if ((control & Rx4CA_ERR_MASK)) {
- printk("COMEDI: 4 bytes error\n");
+ dev_err(csdev, "4 bytes error\n");
return 0;
}
@@ -197,16 +204,19 @@ static int __unioxx5_analog_read(struct unioxx5_subd_priv *usp,
return 1;
}
-static int __unioxx5_digital_write(struct unioxx5_subd_priv *usp,
+static int __unioxx5_digital_write(struct comedi_subdevice *s,
unsigned int *data, int channel, int minor)
{
+ struct unioxx5_subd_priv *usp = s->private;
+ struct device *csdev = s->device->class_dev;
int channel_offset, val;
int mask = 1 << (channel & 0x07);
channel_offset = __unioxx5_define_chan_offset(channel);
if (channel_offset < 0) {
- pr_err("comedi%d: undefined channel %d. channel range is 0 .. 23\n",
- minor, channel);
+ dev_err(csdev,
+ "undefined channel %d. channel range is 0 .. 23\n",
+ channel);
return 0;
}
@@ -225,9 +235,11 @@ static int __unioxx5_digital_write(struct unioxx5_subd_priv *usp,
return 1;
}
-static int __unioxx5_analog_write(struct unioxx5_subd_priv *usp,
+static int __unioxx5_analog_write(struct comedi_subdevice *s,
unsigned int *data, int channel, int minor)
{
+ struct unioxx5_subd_priv *usp = s->private;
+ struct device *csdev = s->device->class_dev;
int module, i;
module = channel / 2; /* definig module number(0 .. 11) */
@@ -235,8 +247,9 @@ static int __unioxx5_analog_write(struct unioxx5_subd_priv *usp,
/* defining if given module can work on output */
if (!(usp->usp_module_type[module] & MODULE_OUTPUT_MASK)) {
- pr_err("comedi%d: module in position %d with id 0x%0x is for input only!\n",
- minor, module, usp->usp_module_type[module]);
+ dev_err(csdev,
+ "module in position %d with id 0x%0x is for input only!\n",
+ module, usp->usp_module_type[module]);
return 0;
}
@@ -273,10 +286,10 @@ static int unioxx5_subdev_read(struct comedi_device *dev,
type = usp->usp_module_type[channel / 2];
if (type == MODULE_DIGITAL) {
- if (!__unioxx5_digital_read(usp, data, channel, dev->minor))
+ if (!__unioxx5_digital_read(subdev, data, channel, dev->minor))
return -1;
} else {
- if (!__unioxx5_analog_read(usp, data, channel, dev->minor))
+ if (!__unioxx5_analog_read(subdev, data, channel, dev->minor))
return -1;
}
@@ -295,10 +308,10 @@ static int unioxx5_subdev_write(struct comedi_device *dev,
type = usp->usp_module_type[channel / 2];
if (type == MODULE_DIGITAL) {
- if (!__unioxx5_digital_write(usp, data, channel, dev->minor))
+ if (!__unioxx5_digital_write(subdev, data, channel, dev->minor))
return -1;
} else {
- if (!__unioxx5_analog_write(usp, data, channel, dev->minor))
+ if (!__unioxx5_analog_write(subdev, data, channel, dev->minor))
return -1;
}
@@ -318,16 +331,15 @@ static int unioxx5_insn_config(struct comedi_device *dev,
if (type != MODULE_DIGITAL) {
dev_err(dev->class_dev,
- "comedi%d: channel configuration accessible only for digital modules\n",
- dev->minor);
+ "channel configuration accessible only for digital modules\n");
return -1;
}
channel_offset = __unioxx5_define_chan_offset(channel);
if (channel_offset < 0) {
dev_err(dev->class_dev,
- "comedi%d: undefined channel %d. channel range is 0 .. 23\n",
- dev->minor, channel);
+ "undefined channel %d. channel range is 0 .. 23\n",
+ channel);
return -1;
}
@@ -342,8 +354,7 @@ static int unioxx5_insn_config(struct comedi_device *dev,
flags |= mask;
break;
default:
- dev_err(dev->class_dev,
- "comedi%d: unknown flag\n", dev->minor);
+ dev_err(dev->class_dev, "unknown flag\n");
return -1;
}
@@ -435,9 +446,10 @@ static int unioxx5_attach(struct comedi_device *dev,
dev->iobase = iobase;
iobase += UNIOXX5_SUBDEV_BASE;
+ n_subd = 0;
- /* defining number of subdevices and getting they types (it must be 'g01') */
- for (i = n_subd = 0, ba = iobase; i < 4; i++, ba += UNIOXX5_SUBDEV_ODDS) {
+ /* getting number of subdevices with types 'g01' */
+ for (i = 0, ba = iobase; i < 4; i++, ba += UNIOXX5_SUBDEV_ODDS) {
id = inb(ba + 0xE);
num = inb(ba + 0xF);
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index da1d501d9e4e..71db683098d6 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -80,7 +80,6 @@ sampling rate. If you sample two channels you get 4kHz and so on.
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/usb.h>
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index 9707dd1239c4..5f85c55c1109 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -37,7 +37,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/usb.h>
@@ -138,7 +137,10 @@
* comedi constants
*/
static const struct comedi_lrange range_usbduxfast_ai_range = {
- 2, {BIP_RANGE(0.75), BIP_RANGE(0.5)}
+ 2, {
+ BIP_RANGE(0.75),
+ BIP_RANGE(0.5)
+ }
};
/*
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index a5363ded3668..88c60b6020c4 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -43,12 +43,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/usb.h>
#include <linux/fcntl.h>
#include <linux/compiler.h>
+#include <asm/unaligned.h>
#include "comedi_fc.h"
#include "../comedidev.h"
@@ -793,7 +793,8 @@ static int usbduxsigma_ai_insn_read(struct comedi_device *dev,
}
/* 32 bits big endian from the A/D converter */
- val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf) + 1)));
+ val = be32_to_cpu(get_unaligned((uint32_t
+ *)(devpriv->insn_buf + 1)));
val &= 0x00ffffff; /* strip status byte */
val ^= 0x00800000; /* convert to unsigned */
@@ -1358,7 +1359,7 @@ static int usbduxsigma_getstatusinfo(struct comedi_device *dev, int chan)
return ret;
/* 32 bits big endian from the A/D converter */
- val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf)+1)));
+ val = be32_to_cpu(get_unaligned((uint32_t *)(devpriv->insn_buf + 1)));
val &= 0x00ffffff; /* strip status byte */
val ^= 0x00800000; /* convert to unsigned */
@@ -1656,11 +1657,13 @@ static int usbduxsigma_auto_attach(struct comedi_device *dev,
}
offset = usbduxsigma_getstatusinfo(dev, 0);
- if (offset < 0)
+ if (offset < 0) {
dev_err(dev->class_dev,
- "Communication to USBDUXSIGMA failed! Check firmware and cabling\n");
+ "Communication to USBDUXSIGMA failed! Check firmware and cabling.\n");
+ return offset;
+ }
- dev_info(dev->class_dev, "attached, ADC_zero = %x\n", offset);
+ dev_info(dev->class_dev, "ADC_zero = %x\n", offset);
return 0;
}
diff --git a/drivers/staging/comedi/kcomedilib/Makefile b/drivers/staging/comedi/kcomedilib/Makefile
index 18ee99bdde08..3aff8ed08e2d 100644
--- a/drivers/staging/comedi/kcomedilib/Makefile
+++ b/drivers/staging/comedi/kcomedilib/Makefile
@@ -1,3 +1,5 @@
+ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG
+
obj-$(CONFIG_COMEDI_KCOMEDILIB) += kcomedilib.o
kcomedilib-objs := kcomedilib_main.o
diff --git a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
index cd60677a3ed2..7dc5a18e69d4 100644
--- a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
@@ -35,7 +35,7 @@ MODULE_LICENSE("GPL");
struct comedi_device *comedi_open(const char *filename)
{
- struct comedi_device *dev;
+ struct comedi_device *dev, *retval = NULL;
unsigned int minor;
if (strncmp(filename, "/dev/comedi", 11) != 0)
@@ -46,24 +46,27 @@ struct comedi_device *comedi_open(const char *filename)
if (minor >= COMEDI_NUM_BOARD_MINORS)
return NULL;
- dev = comedi_dev_from_minor(minor);
-
- if (!dev || !dev->attached)
+ dev = comedi_dev_get_from_minor(minor);
+ if (!dev)
return NULL;
- if (!try_module_get(dev->driver->module))
- return NULL;
+ down_read(&dev->attach_lock);
+ if (dev->attached)
+ retval = dev;
+ else
+ retval = NULL;
+ up_read(&dev->attach_lock);
+
+ if (retval == NULL)
+ comedi_dev_put(dev);
- return dev;
+ return retval;
}
EXPORT_SYMBOL_GPL(comedi_open);
-int comedi_close(struct comedi_device *d)
+int comedi_close(struct comedi_device *dev)
{
- struct comedi_device *dev = (struct comedi_device *)d;
-
- module_put(dev->driver->module);
-
+ comedi_dev_put(dev);
return 0;
}
EXPORT_SYMBOL_GPL(comedi_close);
@@ -73,7 +76,14 @@ static int comedi_do_insn(struct comedi_device *dev,
unsigned int *data)
{
struct comedi_subdevice *s;
- int ret = 0;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+
+ if (!dev->attached) {
+ ret = -EINVAL;
+ goto error;
+ }
/* a subdevice instruction */
if (insn->subdev >= dev->n_subdevices) {
@@ -120,6 +130,7 @@ static int comedi_do_insn(struct comedi_device *dev,
s->busy = NULL;
error:
+ mutex_unlock(&dev->mutex);
return ret;
}
@@ -169,9 +180,6 @@ int comedi_dio_bitfield2(struct comedi_device *dev, unsigned int subdev,
unsigned int shift;
int ret;
- if (subdev >= dev->n_subdevices)
- return -EINVAL;
-
base_channel = CR_CHAN(base_channel);
n_chan = comedi_get_n_channels(dev, subdev);
if (base_channel >= n_chan)
@@ -211,23 +219,33 @@ int comedi_find_subdevice_by_type(struct comedi_device *dev, int type,
unsigned int subd)
{
struct comedi_subdevice *s;
-
- if (subd > dev->n_subdevices)
- return -ENODEV;
-
- for (; subd < dev->n_subdevices; subd++) {
- s = &dev->subdevices[subd];
- if (s->type == type)
- return subd;
- }
- return -1;
+ int ret = -ENODEV;
+
+ down_read(&dev->attach_lock);
+ if (dev->attached)
+ for (; subd < dev->n_subdevices; subd++) {
+ s = &dev->subdevices[subd];
+ if (s->type == type) {
+ ret = subd;
+ break;
+ }
+ }
+ up_read(&dev->attach_lock);
+ return ret;
}
EXPORT_SYMBOL_GPL(comedi_find_subdevice_by_type);
int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice)
{
- struct comedi_subdevice *s = &dev->subdevices[subdevice];
+ int n;
+
+ down_read(&dev->attach_lock);
+ if (!dev->attached || subdevice >= dev->n_subdevices)
+ n = 0;
+ else
+ n = dev->subdevices[subdevice].n_chan;
+ up_read(&dev->attach_lock);
- return s->n_chan;
+ return n;
}
EXPORT_SYMBOL_GPL(comedi_get_n_channels);
diff --git a/drivers/staging/comedi/proc.c b/drivers/staging/comedi/proc.c
index ade00035d3bb..da6bc5855ebc 100644
--- a/drivers/staging/comedi/proc.c
+++ b/drivers/staging/comedi/proc.c
@@ -41,16 +41,20 @@ static int comedi_read(struct seq_file *m, void *v)
"driver_name, board_name, n_subdevices");
for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
- struct comedi_device *dev = comedi_dev_from_minor(i);
+ struct comedi_device *dev = comedi_dev_get_from_minor(i);
+
if (!dev)
continue;
+ down_read(&dev->attach_lock);
if (dev->attached) {
devices_q = 1;
seq_printf(m, "%2d: %-20s %-20s %4d\n",
i, dev->driver->driver_name,
dev->board_name, dev->n_subdevices);
}
+ up_read(&dev->attach_lock);
+ comedi_dev_put(dev);
}
if (!devices_q)
seq_puts(m, "no devices\n");
diff --git a/drivers/staging/comedi/range.c b/drivers/staging/comedi/range.c
index 8fde55495d34..46b3da686384 100644
--- a/drivers/staging/comedi/range.c
+++ b/drivers/staging/comedi/range.c
@@ -83,8 +83,10 @@ int do_rangeinfo_ioctl(struct comedi_device *dev,
}
if (RANGE_LENGTH(it.range_type) != lr->length) {
- DPRINTK("wrong length %d should be %d (0x%08x)\n",
- RANGE_LENGTH(it.range_type), lr->length, it.range_type);
+ dev_dbg(dev->class_dev,
+ "wrong length %d should be %d (0x%08x)\n",
+ RANGE_LENGTH(it.range_type),
+ lr->length, it.range_type);
return -EINVAL;
}
@@ -123,7 +125,8 @@ static int aref_invalid(struct comedi_subdevice *s, unsigned int chanspec)
default:
break;
}
- DPRINTK("subdevice does not support aref %i", aref);
+ dev_dbg(s->device->class_dev, "subdevice does not support aref %i",
+ aref);
return 1;
}
diff --git a/drivers/staging/crystalhd/bc_dts_glob_lnx.h b/drivers/staging/crystalhd/bc_dts_glob_lnx.h
index 981708f3ee39..92b0cff248cb 100644
--- a/drivers/staging/crystalhd/bc_dts_glob_lnx.h
+++ b/drivers/staging/crystalhd/bc_dts_glob_lnx.h
@@ -229,7 +229,7 @@ enum BC_DRV_CMD {
DRV_CMD_REG_RD, /* Read Device Register */
DRV_CMD_REG_WR, /* Write Device Register */
DRV_CMD_FPGA_RD, /* Read FPGA Register */
- DRV_CMD_FPGA_WR, /* Wrtie FPGA Reister */
+ DRV_CMD_FPGA_WR, /* Write FPGA Register */
DRV_CMD_MEM_RD, /* Read Device Memory */
DRV_CMD_MEM_WR, /* Write Device Memory */
DRV_CMD_RD_PCI_CFG, /* Read PCI Config Space */
diff --git a/drivers/staging/crystalhd/crystalhd_cmds.c b/drivers/staging/crystalhd/crystalhd_cmds.c
index 07a2f24d0d47..642f438793c3 100644
--- a/drivers/staging/crystalhd/crystalhd_cmds.c
+++ b/drivers/staging/crystalhd/crystalhd_cmds.c
@@ -798,7 +798,7 @@ static const struct crystalhd_cmd_tbl g_crystalhd_cproc_tbl[] = {
*
* Current gstreamer frame work does not provide any power management
* related notification to user mode decoder plug-in. As a work-around
- * we pass on the power mangement notification to our plug-in by completing
+ * we pass on the power management notification to our plug-in by completing
* all outstanding requests with BC_STS_IO_USER_ABORT return code.
*/
enum BC_STATUS crystalhd_suspend(struct crystalhd_cmd *ctx,
@@ -1059,7 +1059,7 @@ bool crystalhd_cmd_interrupt(struct crystalhd_cmd *ctx)
{
if (!ctx) {
BCMLOG_ERR("Invalid arg..\n");
- return 0;
+ return false;
}
return crystalhd_hw_interrupt(ctx->adp, &ctx->hw_ctx);
diff --git a/drivers/staging/crystalhd/crystalhd_cmds.h b/drivers/staging/crystalhd/crystalhd_cmds.h
index 377cd9d68b08..b5bf59dbcde9 100644
--- a/drivers/staging/crystalhd/crystalhd_cmds.h
+++ b/drivers/staging/crystalhd/crystalhd_cmds.h
@@ -29,7 +29,7 @@
/*
* NOTE:: This is the main interface file between the Linux layer
- * and the harware layer. This file will use the definitions
+ * and the hardware layer. This file will use the definitions
* from _dts_glob and dts_defs etc.. which are defined for
* windows.
*/
diff --git a/drivers/staging/crystalhd/crystalhd_fw_if.h b/drivers/staging/crystalhd/crystalhd_fw_if.h
index 4b363a5069d7..05615e2a231a 100644
--- a/drivers/staging/crystalhd/crystalhd_fw_if.h
+++ b/drivers/staging/crystalhd/crystalhd_fw_if.h
@@ -115,7 +115,7 @@ struct fgt_sei {
unsigned char model_id; /* Model id. */
/* +unused SE based on Thomson spec */
- unsigned char color_desc_flag; /* Separate color descrition flag. */
+ unsigned char color_desc_flag; /* Separate color description flag. */
unsigned char bit_depth_luma; /* Bit depth luma minus 8. */
unsigned char bit_depth_chroma; /* Bit depth chroma minus 8. */
unsigned char full_range_flag; /* Full range flag. */
diff --git a/drivers/staging/crystalhd/crystalhd_hw.c b/drivers/staging/crystalhd/crystalhd_hw.c
index 043bd49843ff..8d0680d93684 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.c
+++ b/drivers/staging/crystalhd/crystalhd_hw.c
@@ -398,7 +398,7 @@ static void crystalhd_hw_free_rx_pkt(struct crystalhd_hw *hw,
* Call back from TX - IOQ deletion.
*
* This routine will release the TX DMA rings allocated
- * druing setup_dma rings interface.
+ * during setup_dma rings interface.
*
* Memory is allocated per DMA ring basis. This is just
* a place holder to be able to create the dio queues.
diff --git a/drivers/staging/crystalhd/crystalhd_hw.h b/drivers/staging/crystalhd/crystalhd_hw.h
index 37809442c553..d5cb68dfe695 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.h
+++ b/drivers/staging/crystalhd/crystalhd_hw.h
@@ -46,7 +46,7 @@
#define Cpu2HstMbx1 0x00100F04
#define MbxStat1 0x00100F08
#define Stream2Host_Intr_Sts 0x00100F24
-#define C011_RET_SUCCESS 0x0 /* Reutrn status of firmware command. */
+#define C011_RET_SUCCESS 0x0 /* Return status of firmware command. */
/* TS input status register */
#define TS_StreamAFIFOStatus 0x0010044C
@@ -141,7 +141,7 @@ union link_misc_perst_deco_ctrl {
uint32_t reserved0:3; /* Reserved.No Effect*/
uint32_t stop_bcm_7412_clk:1; /* 1 ->Stops branch of
27MHz clk used to clk BCM7412*/
- uint32_t reserved1:27; /* Reseved. No Effect*/
+ uint32_t reserved1:27; /* Reserved. No Effect*/
};
uint32_t whole_reg;
@@ -176,7 +176,7 @@ union link_misc_perst_decoder_ctrl {
uint32_t res0:3; /* Reserved.No Effect*/
uint32_t stop_7412_clk:1; /* 1 ->Stops branch of 27MHz
clk used to clk BCM7412*/
- uint32_t res1:27; /* Reseved. No Effect */
+ uint32_t res1:27; /* Reserved. No Effect */
};
uint32_t whole_reg;
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c
index 190b9b924368..99eefd0291c3 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.c
+++ b/drivers/staging/crystalhd/crystalhd_lnx.c
@@ -703,7 +703,7 @@ static int chd_dec_pci_resume(struct pci_dev *pdev)
}
#endif
-static DEFINE_PCI_DEVICE_TABLE(chd_dec_pci_id_table) = {
+static const struct pci_device_id chd_dec_pci_id_table[] = {
{ PCI_VDEVICE(BROADCOM, 0x1612), 8 },
{ 0, },
};
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.h b/drivers/staging/crystalhd/crystalhd_lnx.h
index bac572a8bc2e..816e1cd5db62 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.h
+++ b/drivers/staging/crystalhd/crystalhd_lnx.h
@@ -37,7 +37,6 @@
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
@@ -53,7 +52,7 @@
/* OS specific PCI information structure and adapter information. */
struct crystalhd_adp {
- /* Hardware borad/PCI specifics */
+ /* Hardware board/PCI specifics */
char name[32];
struct pci_dev *pdev;
diff --git a/drivers/staging/crystalhd/crystalhd_misc.c b/drivers/staging/crystalhd/crystalhd_misc.c
index 51f698052aff..c3d024406337 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.c
+++ b/drivers/staging/crystalhd/crystalhd_misc.c
@@ -389,7 +389,7 @@ void *bc_kern_dma_alloc(struct crystalhd_adp *adp, uint32_t sz,
void *temp = NULL;
if (!adp || !sz || !phy_addr) {
- BCMLOG_ERR("Invalide Arg..\n");
+ BCMLOG_ERR("Invalid Arg..\n");
return temp;
}
@@ -415,7 +415,7 @@ void bc_kern_dma_free(struct crystalhd_adp *adp, uint32_t sz, void *ka,
dma_addr_t phy_addr)
{
if (!adp || !ka || !sz || !phy_addr) {
- BCMLOG_ERR("Invalide Arg..\n");
+ BCMLOG_ERR("Invalid Arg..\n");
return;
}
diff --git a/drivers/staging/crystalhd/crystalhd_misc.h b/drivers/staging/crystalhd/crystalhd_misc.h
index aa736c8855de..77ab72a2a061 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.h
+++ b/drivers/staging/crystalhd/crystalhd_misc.h
@@ -206,7 +206,7 @@ extern void crystalhd_show_buffer(uint32_t off, uint8_t *buff,
enum _chd_log_levels {
BCMLOG_ERROR = 0x80000000, /* Don't disable this option */
BCMLOG_DATA = 0x40000000, /* Data, enable by default */
- BCMLOG_SPINLOCK = 0x20000000, /* Spcial case for Spin locks*/
+ BCMLOG_SPINLOCK = 0x20000000, /* Special case for Spin locks*/
/* Following are allowed only in debug mode */
BCMLOG_INFO = 0x00000001, /* Generic informational */
diff --git a/drivers/staging/cxt1e1/comet.c b/drivers/staging/cxt1e1/comet.c
index 46a0d92173e0..c4c8c0f9c959 100644
--- a/drivers/staging/cxt1e1/comet.c
+++ b/drivers/staging/cxt1e1/comet.c
@@ -28,9 +28,9 @@ extern int cxt1e1_log_level;
#define COMET_NUM_UNITS 5 /* Number of points per entry in table */
/* forward references */
-static void SetPwrLevel(comet_t *comet);
-static void WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table);
-static void WrtXmtWaveformTbl(ci_t *ci, comet_t *comet, u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS]);
+static void SetPwrLevel(struct s_comet_reg *comet);
+static void WrtRcvEqualizerTbl(ci_t *ci, struct s_comet_reg *comet, u_int32_t *table);
+static void WrtXmtWaveformTbl(ci_t *ci, struct s_comet_reg *comet, u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS]);
void *TWV_table[12] = {
@@ -58,7 +58,7 @@ lbo_tbl_lkup(int t1, int lbo) {
return lbo - 1;
}
-void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
+void init_comet(void *ci, struct s_comet_reg *comet, u_int32_t port_mode, int clockmaster,
u_int8_t moreParams)
{
u_int8_t isT1mode;
@@ -159,8 +159,7 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
/* 60: t1 ALMI cfg */
/* Configure Line Coding */
- switch (port_mode)
- {
+ switch (port_mode) {
/* 1 - T1 B8ZS */
case CFG_FRAME_SF:
pci_write_32((u_int32_t *) &comet->cdrc_cfg, 0);
@@ -286,8 +285,7 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
/* 0x30: "BRIF cfg"; 0x20 is 'CMODE', 0x03 is (bit) rate */
/* note "rate bits can only be set once after reset" */
- if (clockmaster)
- {
+ if (clockmaster) {
/* CMODE == clockMode, 0=clock master (so all 3 others should be slave) */
/* rate = 1.544 Mb/s */
if (isT1mode)
@@ -302,16 +300,17 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
/* Master Mode i.e.FPMODE=0 (@0x20) */
pci_write_32((u_int32_t *) &comet->brif_fpcfg, 0x00);
- if ((moreParams & CFG_CLK_PORT_MASK) == CFG_CLK_PORT_INTERNAL)
- {
+ if ((moreParams & CFG_CLK_PORT_MASK) == CFG_CLK_PORT_INTERNAL) {
if (cxt1e1_log_level >= LOG_SBEBUG12)
- pr_info(">> %s: clockmaster internal clock\n", __func__);
+ pr_info(">> %s: clockmaster internal clock\n",
+ __func__);
/* internal oscillator */
pci_write_32((u_int32_t *) &comet->tx_time, 0x0d);
} else {
/* external clock source */
if (cxt1e1_log_level >= LOG_SBEBUG12)
- pr_info(">> %s: clockmaster external clock\n", __func__);
+ pr_info(">> %s: clockmaster external clock\n",
+ __func__);
/* loop timing(external) */
pci_write_32((u_int32_t *) &comet->tx_time, 0x09);
}
@@ -399,7 +398,7 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
** Returns: Nothing
*/
static void
-WrtXmtWaveform(ci_t *ci, comet_t *comet, u_int32_t sample, u_int32_t unit, u_int8_t data)
+WrtXmtWaveform(ci_t *ci, struct s_comet_reg *comet, u_int32_t sample, u_int32_t unit, u_int8_t data)
{
u_int8_t WaveformAddr;
@@ -417,19 +416,20 @@ WrtXmtWaveform(ci_t *ci, comet_t *comet, u_int32_t sample, u_int32_t unit, u_int
** Returns: Nothing
*/
static void
-WrtXmtWaveformTbl(ci_t *ci, comet_t *comet,
+WrtXmtWaveformTbl(ci_t *ci, struct s_comet_reg *comet,
u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS])
{
u_int32_t sample, unit;
- for (sample = 0; sample < COMET_NUM_SAMPLES; sample++)
- {
+ for (sample = 0; sample < COMET_NUM_SAMPLES; sample++) {
for (unit = 0; unit < COMET_NUM_UNITS; unit++)
- WrtXmtWaveform(ci, comet, sample, unit, table[sample][unit]);
+ WrtXmtWaveform(ci, comet, sample, unit,
+ table[sample][unit]);
}
/* Enable transmitter and set output amplitude */
- pci_write_32((u_int32_t *) &comet->xlpg_cfg, table[COMET_NUM_SAMPLES][0]);
+ pci_write_32((u_int32_t *) &comet->xlpg_cfg,
+ table[COMET_NUM_SAMPLES][0]);
}
@@ -444,7 +444,7 @@ WrtXmtWaveformTbl(ci_t *ci, comet_t *comet,
*/
static void
-WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
+WrtRcvEqualizerTbl(ci_t *ci, struct s_comet_reg *comet, u_int32_t *table)
{
u_int32_t ramaddr;
volatile u_int32_t value;
@@ -457,7 +457,8 @@ WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
/* for write order preservation when Optimizing driver */
pci_flush_write(ci);
/* write the addr, initiate a read */
- pci_write_32((u_int32_t *) &comet->rlps_eq_iaddr, (u_int8_t) ramaddr);
+ pci_write_32((u_int32_t *) &comet->rlps_eq_iaddr,
+ (u_int8_t) ramaddr);
/* for write order preservation when Optimizing driver */
pci_flush_write(ci);
/*
@@ -470,9 +471,12 @@ WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
}
value = *table++;
- pci_write_32((u_int32_t *) &comet->rlps_idata3, (u_int8_t) (value >> 24));
- pci_write_32((u_int32_t *) &comet->rlps_idata2, (u_int8_t) (value >> 16));
- pci_write_32((u_int32_t *) &comet->rlps_idata1, (u_int8_t) (value >> 8));
+ pci_write_32((u_int32_t *) &comet->rlps_idata3,
+ (u_int8_t) (value >> 24));
+ pci_write_32((u_int32_t *) &comet->rlps_idata2,
+ (u_int8_t) (value >> 16));
+ pci_write_32((u_int32_t *) &comet->rlps_idata1,
+ (u_int8_t) (value >> 8));
pci_write_32((u_int32_t *) &comet->rlps_idata0, (u_int8_t) value);
/* for write order preservation when Optimizing driver */
pci_flush_write(ci);
@@ -484,7 +488,8 @@ WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
/* for write order preservation when optimizing driver */
pci_flush_write(ci);
/* write the addr, initiate a read */
- pci_write_32((u_int32_t *) &comet->rlps_eq_iaddr, (u_int8_t) ramaddr);
+ pci_write_32((u_int32_t *) &comet->rlps_eq_iaddr,
+ (u_int8_t) ramaddr);
/* for write order preservation when optimizing driver */
pci_flush_write(ci);
@@ -508,7 +513,7 @@ WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
*/
static void
-SetPwrLevel(comet_t *comet)
+SetPwrLevel(struct s_comet_reg *comet)
{
volatile u_int32_t temp;
@@ -550,12 +555,11 @@ SetPwrLevel(comet_t *comet)
*/
#if 0
static void
-SetCometOps(comet_t *comet)
+SetCometOps(struct s_comet_reg *comet)
{
volatile u_int8_t rd_value;
- if (comet == mConfig.C4Func1Base + (COMET0_OFFSET >> 2))
- {
+ if (comet == mConfig.C4Func1Base + (COMET0_OFFSET >> 2)) {
/* read the BRIF Configuration */
rd_value = (u_int8_t) pci_read_32((u_int32_t *) &comet->brif_cfg);
rd_value &= ~0x20;
diff --git a/drivers/staging/cxt1e1/comet.h b/drivers/staging/cxt1e1/comet.h
index 03b9bb77a809..d5d286e47a4b 100644
--- a/drivers/staging/cxt1e1/comet.h
+++ b/drivers/staging/cxt1e1/comet.h
@@ -25,304 +25,313 @@
#define VINT32 volatile u_int32_t
-struct s_comet_reg
-{
- VINT32 gbl_cfg; /* 00 Global Cfg */
- VINT32 clkmon; /* 01 Clk Monitor */
- VINT32 rx_opt; /* 02 RX Options */
- VINT32 rx_line_cfg; /* 03 RX Line Interface Cfg */
- VINT32 tx_line_cfg; /* 04 TX Line Interface Cfg */
- VINT32 tx_frpass; /* 05 TX Framing & Bypass Options */
- VINT32 tx_time; /* 06 TX Timing Options */
- VINT32 intr_1; /* 07 Intr Source #1 */
- VINT32 intr_2; /* 08 Intr Source #2 */
- VINT32 intr_3; /* 09 Intr Source #3 */
- VINT32 mdiag; /* 0A Master Diagnostics */
- VINT32 mtest; /* 0B Master Test */
- VINT32 adiag; /* 0C Analog Diagnostics */
- VINT32 rev_id; /* 0D Rev/Chip Id/Global PMON Update */
+struct s_comet_reg {
+ VINT32 gbl_cfg; /* 00 Global Cfg */
+ VINT32 clkmon; /* 01 Clk Monitor */
+ VINT32 rx_opt; /* 02 RX Options */
+ VINT32 rx_line_cfg; /* 03 RX Line Interface Cfg */
+ VINT32 tx_line_cfg; /* 04 TX Line Interface Cfg */
+ VINT32 tx_frpass; /* 05 TX Framing & Bypass Options */
+ VINT32 tx_time; /* 06 TX Timing Options */
+ VINT32 intr_1; /* 07 Intr Source #1 */
+ VINT32 intr_2; /* 08 Intr Source #2 */
+ VINT32 intr_3; /* 09 Intr Source #3 */
+ VINT32 mdiag; /* 0A Master Diagnostics */
+ VINT32 mtest; /* 0B Master Test */
+ VINT32 adiag; /* 0C Analog Diagnostics */
+ VINT32 rev_id; /* 0D Rev/Chip Id/Global PMON Update */
#define pmon rev_id
- VINT32 reset; /* 0E Reset */
- VINT32 prgd_phctl; /* 0F PRGD Positioning/Ctl & HDLC Ctl */
- VINT32 cdrc_cfg; /* 10 CDRC Cfg */
- VINT32 cdrc_ien; /* 11 CDRC Intr Enable */
- VINT32 cdrc_ists; /* 12 CDRC Intr Sts */
- VINT32 cdrc_alos; /* 13 CDRC Alternate Loss of Signal */
-
- VINT32 rjat_ists; /* 14 RJAT Intr Sts */
- VINT32 rjat_n1clk; /* 15 RJAT Reference Clk Divisor (N1) Ctl */
- VINT32 rjat_n2clk; /* 16 RJAT Output Clk Divisor (N2) Ctl */
- VINT32 rjat_cfg; /* 17 RJAT Cfg */
-
- VINT32 tjat_ists; /* 18 TJAT Intr Sts */
- VINT32 tjat_n1clk; /* 19 TJAT Reference Clk Divisor (N1) Ctl */
- VINT32 tjat_n2clk; /* 1A TJAT Output Clk Divisor (N2) Ctl */
- VINT32 tjat_cfg; /* 1B TJAT Cfg */
-
- VINT32 rx_elst_cfg; /* 1C RX-ELST Cfg */
- VINT32 rx_elst_ists; /* 1D RX-ELST Intr Sts */
- VINT32 rx_elst_idle; /* 1E RX-ELST Idle Code */
- VINT32 _rx_elst_res1f; /* 1F RX-ELST Reserved */
-
- VINT32 tx_elst_cfg; /* 20 TX-ELST Cfg */
- VINT32 tx_elst_ists; /* 21 TX-ELST Intr Sts */
- VINT32 _tx_elst_res22; /* 22 TX-ELST Reserved */
- VINT32 _tx_elst_res23; /* 23 TX-ELST Reserved */
- VINT32 __res24; /* 24 Reserved */
- VINT32 __res25; /* 25 Reserved */
- VINT32 __res26; /* 26 Reserved */
- VINT32 __res27; /* 27 Reserved */
-
- VINT32 rxce1_ctl; /* 28 RXCE RX Data Link 1 Ctl */
- VINT32 rxce1_bits; /* 29 RXCE RX Data Link 1 Bit Select */
- VINT32 rxce2_ctl; /* 2A RXCE RX Data Link 2 Ctl */
- VINT32 rxce2_bits; /* 2B RXCE RX Data Link 2 Bit Select */
- VINT32 rxce3_ctl; /* 2C RXCE RX Data Link 3 Ctl */
- VINT32 rxce3_bits; /* 2D RXCE RX Data Link 3 Bit Select */
- VINT32 _rxce_res2E; /* 2E RXCE Reserved */
- VINT32 _rxce_res2F; /* 2F RXCE Reserved */
-
- VINT32 brif_cfg; /* 30 BRIF RX Backplane Cfg */
- VINT32 brif_fpcfg; /* 31 BRIF RX Backplane Frame Pulse Cfg */
- VINT32 brif_pfcfg; /* 32 BRIF RX Backplane Parity/F-Bit Cfg */
- VINT32 brif_tsoff; /* 33 BRIF RX Backplane Time Slot Offset */
- VINT32 brif_boff; /* 34 BRIF RX Backplane Bit Offset */
- VINT32 _brif_res35; /* 35 BRIF RX Backplane Reserved */
- VINT32 _brif_res36; /* 36 BRIF RX Backplane Reserved */
- VINT32 _brif_res37; /* 37 BRIF RX Backplane Reserved */
-
- VINT32 txci1_ctl; /* 38 TXCI TX Data Link 1 Ctl */
- VINT32 txci1_bits; /* 39 TXCI TX Data Link 2 Bit Select */
- VINT32 txci2_ctl; /* 3A TXCI TX Data Link 1 Ctl */
- VINT32 txci2_bits; /* 3B TXCI TX Data Link 2 Bit Select */
- VINT32 txci3_ctl; /* 3C TXCI TX Data Link 1 Ctl */
- VINT32 txci3_bits; /* 3D TXCI TX Data Link 2 Bit Select */
- VINT32 _txci_res3E; /* 3E TXCI Reserved */
- VINT32 _txci_res3F; /* 3F TXCI Reserved */
-
- VINT32 btif_cfg; /* 40 BTIF TX Backplane Cfg */
- VINT32 btif_fpcfg; /* 41 BTIF TX Backplane Frame Pulse Cfg */
- VINT32 btif_pcfgsts; /* 42 BTIF TX Backplane Parity Cfg & Sts */
- VINT32 btif_tsoff; /* 43 BTIF TX Backplane Time Slot Offset */
- VINT32 btif_boff; /* 44 BTIF TX Backplane Bit Offset */
- VINT32 _btif_res45; /* 45 BTIF TX Backplane Reserved */
- VINT32 _btif_res46; /* 46 BTIF TX Backplane Reserved */
- VINT32 _btif_res47; /* 47 BTIF TX Backplane Reserved */
- VINT32 t1_frmr_cfg; /* 48 T1 FRMR Cfg */
- VINT32 t1_frmr_ien; /* 49 T1 FRMR Intr Enable */
- VINT32 t1_frmr_ists; /* 4A T1 FRMR Intr Sts */
- VINT32 __res_4B; /* 4B Reserved */
- VINT32 ibcd_cfg; /* 4C IBCD Cfg */
- VINT32 ibcd_ies; /* 4D IBCD Intr Enable/Sts */
- VINT32 ibcd_act; /* 4E IBCD Activate Code */
- VINT32 ibcd_deact; /* 4F IBCD Deactivate Code */
-
- VINT32 sigx_cfg; /* 50 SIGX Cfg/Change of Signaling State */
- VINT32 sigx_acc_cos; /* 51 SIGX uP Access Sts/Change of Signaling State */
- VINT32 sigx_iac_cos; /* 52 SIGX Channel Indirect
- * Addr/Ctl/Change of Signaling State */
- VINT32 sigx_idb_cos; /* 53 SIGX Channel Indirect Data
- * Buffer/Change of Signaling State */
-
- VINT32 t1_xbas_cfg; /* 54 T1 XBAS Cfg */
- VINT32 t1_xbas_altx; /* 55 T1 XBAS Alarm TX */
- VINT32 t1_xibc_ctl; /* 56 T1 XIBC Ctl */
- VINT32 t1_xibc_lbcode; /* 57 T1 XIBC Loopback Code */
-
- VINT32 pmon_ies; /* 58 PMON Intr Enable/Sts */
- VINT32 pmon_fberr; /* 59 PMON Framing Bit Err Cnt */
- VINT32 pmon_feb_lsb; /* 5A PMON OFF/COFA/Far End Block Err Cnt (LSB) */
- VINT32 pmon_feb_msb; /* 5B PMON OFF/COFA/Far End Block Err Cnt (MSB) */
- VINT32 pmon_bed_lsb; /* 5C PMON Bit/Err/CRCE Cnt (LSB) */
- VINT32 pmon_bed_msb; /* 5D PMON Bit/Err/CRCE Cnt (MSB) */
- VINT32 pmon_lvc_lsb; /* 5E PMON LVC Cnt (LSB) */
- VINT32 pmon_lvc_msb; /* 5F PMON LVC Cnt (MSB) */
-
- VINT32 t1_almi_cfg; /* 60 T1 ALMI Cfg */
- VINT32 t1_almi_ien; /* 61 T1 ALMI Intr Enable */
- VINT32 t1_almi_ists; /* 62 T1 ALMI Intr Sts */
- VINT32 t1_almi_detsts; /* 63 T1 ALMI Alarm Detection Sts */
-
- VINT32 _t1_pdvd_res64; /* 64 T1 PDVD Reserved */
- VINT32 t1_pdvd_ies; /* 65 T1 PDVD Intr Enable/Sts */
- VINT32 _t1_xboc_res66; /* 66 T1 XBOC Reserved */
- VINT32 t1_xboc_code; /* 67 T1 XBOC Code */
- VINT32 _t1_xpde_res68; /* 68 T1 XPDE Reserved */
- VINT32 t1_xpde_ies; /* 69 T1 XPDE Intr Enable/Sts */
-
- VINT32 t1_rboc_ena; /* 6A T1 RBOC Enable */
- VINT32 t1_rboc_sts; /* 6B T1 RBOC Code Sts */
-
- VINT32 t1_tpsc_cfg; /* 6C TPSC Cfg */
- VINT32 t1_tpsc_sts; /* 6D TPSC uP Access Sts */
- VINT32 t1_tpsc_ciaddr; /* 6E TPSC Channel Indirect
- * Addr/Ctl */
- VINT32 t1_tpsc_cidata; /* 6F TPSC Channel Indirect Data
- * Buffer */
- VINT32 t1_rpsc_cfg; /* 70 RPSC Cfg */
- VINT32 t1_rpsc_sts; /* 71 RPSC uP Access Sts */
- VINT32 t1_rpsc_ciaddr; /* 72 RPSC Channel Indirect
- * Addr/Ctl */
- VINT32 t1_rpsc_cidata; /* 73 RPSC Channel Indirect Data
- * Buffer */
- VINT32 __res74; /* 74 Reserved */
- VINT32 __res75; /* 75 Reserved */
- VINT32 __res76; /* 76 Reserved */
- VINT32 __res77; /* 77 Reserved */
-
- VINT32 t1_aprm_cfg; /* 78 T1 APRM Cfg/Ctl */
- VINT32 t1_aprm_load; /* 79 T1 APRM Manual Load */
- VINT32 t1_aprm_ists; /* 7A T1 APRM Intr Sts */
- VINT32 t1_aprm_1sec_2; /* 7B T1 APRM One Second Content Octet 2 */
- VINT32 t1_aprm_1sec_3; /* 7C T1 APRM One Second Content Octet 3 */
- VINT32 t1_aprm_1sec_4; /* 7D T1 APRM One Second Content Octet 4 */
- VINT32 t1_aprm_1sec_5; /* 7E T1 APRM One Second Content MSB (Octect 5) */
- VINT32 t1_aprm_1sec_6; /* 7F T1 APRM One Second Content MSB (Octect 6) */
-
- VINT32 e1_tran_cfg; /* 80 E1 TRAN Cfg */
- VINT32 e1_tran_txalarm; /* 81 E1 TRAN TX Alarm/Diagnostic Ctl */
- VINT32 e1_tran_intctl; /* 82 E1 TRAN International Ctl */
- VINT32 e1_tran_extrab; /* 83 E1 TRAN Extra Bits Ctl */
- VINT32 e1_tran_ien; /* 84 E1 TRAN Intr Enable */
- VINT32 e1_tran_ists; /* 85 E1 TRAN Intr Sts */
- VINT32 e1_tran_nats; /* 86 E1 TRAN National Bit Codeword
- * Select */
- VINT32 e1_tran_nat; /* 87 E1 TRAN National Bit Codeword */
- VINT32 __res88; /* 88 Reserved */
- VINT32 __res89; /* 89 Reserved */
- VINT32 __res8A; /* 8A Reserved */
- VINT32 __res8B; /* 8B Reserved */
-
- VINT32 _t1_frmr_res8C; /* 8C T1 FRMR Reserved */
- VINT32 _t1_frmr_res8D; /* 8D T1 FRMR Reserved */
- VINT32 __res8E; /* 8E Reserved */
- VINT32 __res8F; /* 8F Reserved */
-
- VINT32 e1_frmr_aopts; /* 90 E1 FRMR Frame Alignment Options */
- VINT32 e1_frmr_mopts; /* 91 E1 FRMR Maintenance Mode Options */
- VINT32 e1_frmr_ien; /* 92 E1 FRMR Framing Sts Intr Enable */
- VINT32 e1_frmr_mien; /* 93 E1 FRMR Maintenance/Alarm Sts Intr Enable */
- VINT32 e1_frmr_ists; /* 94 E1 FRMR Framing Sts Intr Indication */
- VINT32 e1_frmr_mists; /* 95 E1 FRMR Maintenance/Alarm Sts Indication Enable */
- VINT32 e1_frmr_sts; /* 96 E1 FRMR Framing Sts */
- VINT32 e1_frmr_masts; /* 97 E1 FRMR Maintenance/Alarm Sts */
- VINT32 e1_frmr_nat_bits; /* 98 E1 FRMR International/National Bits */
- VINT32 e1_frmr_crc_lsb; /* 99 E1 FRMR CRC Err Cnt - LSB */
- VINT32 e1_frmr_crc_msb; /* 9A E1 FRMR CRC Err Cnt - MSB */
- VINT32 e1_frmr_nat_ien; /* 9B E1 FRMR National Bit Codeword Intr Enables */
- VINT32 e1_frmr_nat_ists; /* 9C E1 FRMR National Bit Codeword Intr/Sts */
- VINT32 e1_frmr_nat; /* 9D E1 FRMR National Bit Codewords */
- VINT32 e1_frmr_fp_ien; /* 9E E1 FRMR Frame Pulse/Alarm Intr Enables */
- VINT32 e1_frmr_fp_ists; /* 9F E1 FRMR Frame Pulse/Alarm Intr/Sts */
-
- VINT32 __resA0; /* A0 Reserved */
- VINT32 __resA1; /* A1 Reserved */
- VINT32 __resA2; /* A2 Reserved */
- VINT32 __resA3; /* A3 Reserved */
- VINT32 __resA4; /* A4 Reserved */
- VINT32 __resA5; /* A5 Reserved */
- VINT32 __resA6; /* A6 Reserved */
- VINT32 __resA7; /* A7 Reserved */
-
- VINT32 tdpr1_cfg; /* A8 TDPR #1 Cfg */
- VINT32 tdpr1_utl; /* A9 TDPR #1 Upper TX Threshold */
- VINT32 tdpr1_ltl; /* AA TDPR #1 Lower TX Threshold */
- VINT32 tdpr1_ien; /* AB TDPR #1 Intr Enable */
- VINT32 tdpr1_ists; /* AC TDPR #1 Intr Sts/UDR Clear */
- VINT32 tdpr1_data; /* AD TDPR #1 TX Data */
- VINT32 __resAE; /* AE Reserved */
- VINT32 __resAF; /* AF Reserved */
- VINT32 tdpr2_cfg; /* B0 TDPR #2 Cfg */
- VINT32 tdpr2_utl; /* B1 TDPR #2 Upper TX Threshold */
- VINT32 tdpr2_ltl; /* B2 TDPR #2 Lower TX Threshold */
- VINT32 tdpr2_ien; /* B3 TDPR #2 Intr Enable */
- VINT32 tdpr2_ists; /* B4 TDPR #2 Intr Sts/UDR Clear */
- VINT32 tdpr2_data; /* B5 TDPR #2 TX Data */
- VINT32 __resB6; /* B6 Reserved */
- VINT32 __resB7; /* B7 Reserved1 */
- VINT32 tdpr3_cfg; /* B8 TDPR #3 Cfg */
- VINT32 tdpr3_utl; /* B9 TDPR #3 Upper TX Threshold */
- VINT32 tdpr3_ltl; /* BA TDPR #3 Lower TX Threshold */
- VINT32 tdpr3_ien; /* BB TDPR #3 Intr Enable */
- VINT32 tdpr3_ists; /* BC TDPR #3 Intr Sts/UDR Clear */
- VINT32 tdpr3_data; /* BD TDPR #3 TX Data */
- VINT32 __resBE; /* BE Reserved */
- VINT32 __resBF; /* BF Reserved */
-
- VINT32 rdlc1_cfg; /* C0 RDLC #1 Cfg */
- VINT32 rdlc1_intctl; /* C1 RDLC #1 Intr Ctl */
- VINT32 rdlc1_sts; /* C2 RDLC #1 Sts */
- VINT32 rdlc1_data; /* C3 RDLC #1 Data */
- VINT32 rdlc1_paddr; /* C4 RDLC #1 Primary Addr Match */
- VINT32 rdlc1_saddr; /* C5 RDLC #1 Secondary Addr Match */
- VINT32 __resC6; /* C6 Reserved */
- VINT32 __resC7; /* C7 Reserved */
- VINT32 rdlc2_cfg; /* C8 RDLC #2 Cfg */
- VINT32 rdlc2_intctl; /* C9 RDLC #2 Intr Ctl */
- VINT32 rdlc2_sts; /* CA RDLC #2 Sts */
- VINT32 rdlc2_data; /* CB RDLC #2 Data */
- VINT32 rdlc2_paddr; /* CC RDLC #2 Primary Addr Match */
- VINT32 rdlc2_saddr; /* CD RDLC #2 Secondary Addr Match */
- VINT32 __resCE; /* CE Reserved */
- VINT32 __resCF; /* CF Reserved */
- VINT32 rdlc3_cfg; /* D0 RDLC #3 Cfg */
- VINT32 rdlc3_intctl; /* D1 RDLC #3 Intr Ctl */
- VINT32 rdlc3_sts; /* D2 RDLC #3 Sts */
- VINT32 rdlc3_data; /* D3 RDLC #3 Data */
- VINT32 rdlc3_paddr; /* D4 RDLC #3 Primary Addr Match */
- VINT32 rdlc3_saddr; /* D5 RDLC #3 Secondary Addr Match */
-
- VINT32 csu_cfg; /* D6 CSU Cfg */
- VINT32 _csu_resD7; /* D7 CSU Reserved */
-
- VINT32 rlps_idata3; /* D8 RLPS Indirect Data, 24-31 */
- VINT32 rlps_idata2; /* D9 RLPS Indirect Data, 16-23 */
- VINT32 rlps_idata1; /* DA RLPS Indirect Data, 8-15 */
- VINT32 rlps_idata0; /* DB RLPS Indirect Data, 0-7 */
- VINT32 rlps_eqvr; /* DC RLPS Equalizer Voltage Reference
- * (E1 missing) */
- VINT32 _rlps_resDD; /* DD RLPS Reserved */
- VINT32 _rlps_resDE; /* DE RLPS Reserved */
- VINT32 _rlps_resDF; /* DF RLPS Reserved */
-
- VINT32 prgd_ctl; /* E0 PRGD Ctl */
- VINT32 prgd_ies; /* E1 PRGD Intr Enable/Sts */
- VINT32 prgd_shift_len; /* E2 PRGD Shift Length */
- VINT32 prgd_tap; /* E3 PRGD Tap */
- VINT32 prgd_errin; /* E4 PRGD Err Insertion */
- VINT32 _prgd_resE5; /* E5 PRGD Reserved */
- VINT32 _prgd_resE6; /* E6 PRGD Reserved */
- VINT32 _prgd_resE7; /* E7 PRGD Reserved */
- VINT32 prgd_patin1; /* E8 PRGD Pattern Insertion #1 */
- VINT32 prgd_patin2; /* E9 PRGD Pattern Insertion #2 */
- VINT32 prgd_patin3; /* EA PRGD Pattern Insertion #3 */
- VINT32 prgd_patin4; /* EB PRGD Pattern Insertion #4 */
- VINT32 prgd_patdet1; /* EC PRGD Pattern Detector #1 */
- VINT32 prgd_patdet2; /* ED PRGD Pattern Detector #2 */
- VINT32 prgd_patdet3; /* EE PRGD Pattern Detector #3 */
- VINT32 prgd_patdet4; /* EF PRGD Pattern Detector #4 */
-
- VINT32 xlpg_cfg; /* F0 XLPG Line Driver Cfg */
- VINT32 xlpg_ctlsts; /* F1 XLPG Ctl/Sts */
- VINT32 xlpg_pwave_addr; /* F2 XLPG Pulse Waveform Storage Write Addr */
- VINT32 xlpg_pwave_data; /* F3 XLPG Pulse Waveform Storage Data */
- VINT32 xlpg_atest_pctl; /* F4 XLPG Analog Test Positive Ctl */
- VINT32 xlpg_atest_nctl; /* F5 XLPG Analog Test Negative Ctl */
- VINT32 xlpg_fdata_sel; /* F6 XLPG Fuse Data Select */
- VINT32 _xlpg_resF7; /* F7 XLPG Reserved */
-
- VINT32 rlps_cfgsts; /* F8 RLPS Cfg & Sts */
- VINT32 rlps_alos_thresh; /* F9 RLPS ALOS Detection/Clearance Threshold */
- VINT32 rlps_alos_dper; /* FA RLPS ALOS Detection Period */
- VINT32 rlps_alos_cper; /* FB RLPS ALOS Clearance Period */
- VINT32 rlps_eq_iaddr; /* FC RLPS Equalization Indirect Addr */
- VINT32 rlps_eq_rwsel; /* FD RLPS Equalization Read/WriteB Select */
- VINT32 rlps_eq_ctlsts; /* FE RLPS Equalizer Loop Sts & Ctl */
- VINT32 rlps_eq_cfg; /* FF RLPS Equalizer Cfg */
+ VINT32 reset; /* 0E Reset */
+ VINT32 prgd_phctl; /* 0F PRGD Positioning/Ctl & HDLC Ctl */
+ VINT32 cdrc_cfg; /* 10 CDRC Cfg */
+ VINT32 cdrc_ien; /* 11 CDRC Intr Enable */
+ VINT32 cdrc_ists; /* 12 CDRC Intr Sts */
+ VINT32 cdrc_alos; /* 13 CDRC Alternate Loss of Signal */
+
+ VINT32 rjat_ists; /* 14 RJAT Intr Sts */
+ VINT32 rjat_n1clk; /* 15 RJAT Reference Clk Divisor (N1) Ctl */
+ VINT32 rjat_n2clk; /* 16 RJAT Output Clk Divisor (N2) Ctl */
+ VINT32 rjat_cfg; /* 17 RJAT Cfg */
+
+ VINT32 tjat_ists; /* 18 TJAT Intr Sts */
+ VINT32 tjat_n1clk; /* 19 TJAT Reference Clk Divisor (N1) Ctl */
+ VINT32 tjat_n2clk; /* 1A TJAT Output Clk Divisor (N2) Ctl */
+ VINT32 tjat_cfg; /* 1B TJAT Cfg */
+
+ VINT32 rx_elst_cfg; /* 1C RX-ELST Cfg */
+ VINT32 rx_elst_ists; /* 1D RX-ELST Intr Sts */
+ VINT32 rx_elst_idle; /* 1E RX-ELST Idle Code */
+ VINT32 _rx_elst_res1f; /* 1F RX-ELST Reserved */
+
+ VINT32 tx_elst_cfg; /* 20 TX-ELST Cfg */
+ VINT32 tx_elst_ists; /* 21 TX-ELST Intr Sts */
+ VINT32 _tx_elst_res22; /* 22 TX-ELST Reserved */
+ VINT32 _tx_elst_res23; /* 23 TX-ELST Reserved */
+ VINT32 __res24; /* 24 Reserved */
+ VINT32 __res25; /* 25 Reserved */
+ VINT32 __res26; /* 26 Reserved */
+ VINT32 __res27; /* 27 Reserved */
+
+ VINT32 rxce1_ctl; /* 28 RXCE RX Data Link 1 Ctl */
+ VINT32 rxce1_bits; /* 29 RXCE RX Data Link 1 Bit Select */
+ VINT32 rxce2_ctl; /* 2A RXCE RX Data Link 2 Ctl */
+ VINT32 rxce2_bits; /* 2B RXCE RX Data Link 2 Bit Select */
+ VINT32 rxce3_ctl; /* 2C RXCE RX Data Link 3 Ctl */
+ VINT32 rxce3_bits; /* 2D RXCE RX Data Link 3 Bit Select */
+ VINT32 _rxce_res2E; /* 2E RXCE Reserved */
+ VINT32 _rxce_res2F; /* 2F RXCE Reserved */
+
+ VINT32 brif_cfg; /* 30 BRIF RX Backplane Cfg */
+ VINT32 brif_fpcfg; /* 31 BRIF RX Backplane Frame Pulse Cfg */
+ VINT32 brif_pfcfg; /* 32 BRIF RX Backplane Parity/F-Bit Cfg */
+ VINT32 brif_tsoff; /* 33 BRIF RX Backplane Time Slot Offset */
+ VINT32 brif_boff; /* 34 BRIF RX Backplane Bit Offset */
+ VINT32 _brif_res35; /* 35 BRIF RX Backplane Reserved */
+ VINT32 _brif_res36; /* 36 BRIF RX Backplane Reserved */
+ VINT32 _brif_res37; /* 37 BRIF RX Backplane Reserved */
+
+ VINT32 txci1_ctl; /* 38 TXCI TX Data Link 1 Ctl */
+ VINT32 txci1_bits; /* 39 TXCI TX Data Link 2 Bit Select */
+ VINT32 txci2_ctl; /* 3A TXCI TX Data Link 1 Ctl */
+ VINT32 txci2_bits; /* 3B TXCI TX Data Link 2 Bit Select */
+ VINT32 txci3_ctl; /* 3C TXCI TX Data Link 1 Ctl */
+ VINT32 txci3_bits; /* 3D TXCI TX Data Link 2 Bit Select */
+ VINT32 _txci_res3E; /* 3E TXCI Reserved */
+ VINT32 _txci_res3F; /* 3F TXCI Reserved */
+
+ VINT32 btif_cfg; /* 40 BTIF TX Backplane Cfg */
+ VINT32 btif_fpcfg; /* 41 BTIF TX Backplane Frame Pulse Cfg */
+ VINT32 btif_pcfgsts; /* 42 BTIF TX Backplane Parity Cfg & Sts */
+ VINT32 btif_tsoff; /* 43 BTIF TX Backplane Time Slot Offset */
+ VINT32 btif_boff; /* 44 BTIF TX Backplane Bit Offset */
+ VINT32 _btif_res45; /* 45 BTIF TX Backplane Reserved */
+ VINT32 _btif_res46; /* 46 BTIF TX Backplane Reserved */
+ VINT32 _btif_res47; /* 47 BTIF TX Backplane Reserved */
+ VINT32 t1_frmr_cfg; /* 48 T1 FRMR Cfg */
+ VINT32 t1_frmr_ien; /* 49 T1 FRMR Intr Enable */
+ VINT32 t1_frmr_ists; /* 4A T1 FRMR Intr Sts */
+ VINT32 __res_4B; /* 4B Reserved */
+ VINT32 ibcd_cfg; /* 4C IBCD Cfg */
+ VINT32 ibcd_ies; /* 4D IBCD Intr Enable/Sts */
+ VINT32 ibcd_act; /* 4E IBCD Activate Code */
+ VINT32 ibcd_deact; /* 4F IBCD Deactivate Code */
+
+ VINT32 sigx_cfg; /* 50 SIGX Cfg/Change of Signaling State */
+ VINT32 sigx_acc_cos; /* 51 SIGX
+ * uP Access Sts/Change of Signaling State */
+ VINT32 sigx_iac_cos; /* 52 SIGX Channel Indirect
+ * Addr/Ctl/Change of Signaling State */
+ VINT32 sigx_idb_cos; /* 53 SIGX Channel Indirect Data
+ * Buffer/Change of Signaling State */
+
+ VINT32 t1_xbas_cfg; /* 54 T1 XBAS Cfg */
+ VINT32 t1_xbas_altx; /* 55 T1 XBAS Alarm TX */
+ VINT32 t1_xibc_ctl; /* 56 T1 XIBC Ctl */
+ VINT32 t1_xibc_lbcode; /* 57 T1 XIBC Loopback Code */
+
+ VINT32 pmon_ies; /* 58 PMON Intr Enable/Sts */
+ VINT32 pmon_fberr; /* 59 PMON Framing Bit Err Cnt */
+ VINT32 pmon_feb_lsb; /* 5A PMON
+ * OFF/COFA/Far End Block Err Cnt (LSB) */
+ VINT32 pmon_feb_msb; /* 5B PMON
+ * OFF/COFA/Far End Block Err Cnt (MSB) */
+ VINT32 pmon_bed_lsb; /* 5C PMON Bit/Err/CRCE Cnt (LSB) */
+ VINT32 pmon_bed_msb; /* 5D PMON Bit/Err/CRCE Cnt (MSB) */
+ VINT32 pmon_lvc_lsb; /* 5E PMON LVC Cnt (LSB) */
+ VINT32 pmon_lvc_msb; /* 5F PMON LVC Cnt (MSB) */
+
+ VINT32 t1_almi_cfg; /* 60 T1 ALMI Cfg */
+ VINT32 t1_almi_ien; /* 61 T1 ALMI Intr Enable */
+ VINT32 t1_almi_ists; /* 62 T1 ALMI Intr Sts */
+ VINT32 t1_almi_detsts; /* 63 T1 ALMI Alarm Detection Sts */
+
+ VINT32 _t1_pdvd_res64; /* 64 T1 PDVD Reserved */
+ VINT32 t1_pdvd_ies; /* 65 T1 PDVD Intr Enable/Sts */
+ VINT32 _t1_xboc_res66; /* 66 T1 XBOC Reserved */
+ VINT32 t1_xboc_code; /* 67 T1 XBOC Code */
+ VINT32 _t1_xpde_res68; /* 68 T1 XPDE Reserved */
+ VINT32 t1_xpde_ies; /* 69 T1 XPDE Intr Enable/Sts */
+
+ VINT32 t1_rboc_ena; /* 6A T1 RBOC Enable */
+ VINT32 t1_rboc_sts; /* 6B T1 RBOC Code Sts */
+
+ VINT32 t1_tpsc_cfg; /* 6C TPSC Cfg */
+ VINT32 t1_tpsc_sts; /* 6D TPSC uP Access Sts */
+ VINT32 t1_tpsc_ciaddr; /* 6E TPSC Channel Indirect
+ * Addr/Ctl */
+ VINT32 t1_tpsc_cidata; /* 6F TPSC Channel Indirect Data
+ * Buffer */
+ VINT32 t1_rpsc_cfg; /* 70 RPSC Cfg */
+ VINT32 t1_rpsc_sts; /* 71 RPSC uP Access Sts */
+ VINT32 t1_rpsc_ciaddr; /* 72 RPSC Channel Indirect
+ * Addr/Ctl */
+ VINT32 t1_rpsc_cidata; /* 73 RPSC Channel Indirect Data
+ * Buffer */
+ VINT32 __res74; /* 74 Reserved */
+ VINT32 __res75; /* 75 Reserved */
+ VINT32 __res76; /* 76 Reserved */
+ VINT32 __res77; /* 77 Reserved */
+
+ VINT32 t1_aprm_cfg; /* 78 T1 APRM Cfg/Ctl */
+ VINT32 t1_aprm_load; /* 79 T1 APRM Manual Load */
+ VINT32 t1_aprm_ists; /* 7A T1 APRM Intr Sts */
+ VINT32 t1_aprm_1sec_2; /* 7B T1 APRM One Second Content Octet 2 */
+ VINT32 t1_aprm_1sec_3; /* 7C T1 APRM One Second Content Octet 3 */
+ VINT32 t1_aprm_1sec_4; /* 7D T1 APRM One Second Content Octet 4 */
+ VINT32 t1_aprm_1sec_5; /* 7E T1 APRM
+ * One Second Content MSB (Octect 5) */
+ VINT32 t1_aprm_1sec_6; /* 7F T1 APRM
+ * One Second Content MSB (Octect 6) */
+
+ VINT32 e1_tran_cfg; /* 80 E1 TRAN Cfg */
+ VINT32 e1_tran_txalarm; /* 81 E1 TRAN TX Alarm/Diagnostic Ctl */
+ VINT32 e1_tran_intctl; /* 82 E1 TRAN International Ctl */
+ VINT32 e1_tran_extrab; /* 83 E1 TRAN Extra Bits Ctl */
+ VINT32 e1_tran_ien; /* 84 E1 TRAN Intr Enable */
+ VINT32 e1_tran_ists; /* 85 E1 TRAN Intr Sts */
+ VINT32 e1_tran_nats; /* 86 E1 TRAN National Bit Codeword
+ * Select */
+ VINT32 e1_tran_nat; /* 87 E1 TRAN National Bit Codeword */
+ VINT32 __res88; /* 88 Reserved */
+ VINT32 __res89; /* 89 Reserved */
+ VINT32 __res8A; /* 8A Reserved */
+ VINT32 __res8B; /* 8B Reserved */
+
+ VINT32 _t1_frmr_res8C; /* 8C T1 FRMR Reserved */
+ VINT32 _t1_frmr_res8D; /* 8D T1 FRMR Reserved */
+ VINT32 __res8E; /* 8E Reserved */
+ VINT32 __res8F; /* 8F Reserved */
+
+ VINT32 e1_frmr_aopts; /* 90 E1 FRMR Frame Alignment Options */
+ VINT32 e1_frmr_mopts; /* 91 E1 FRMR Maintenance Mode Options */
+ VINT32 e1_frmr_ien; /* 92 E1 FRMR Framing Sts Intr Enable */
+ VINT32 e1_frmr_mien; /* 93 E1 FRMR
+ * Maintenance/Alarm Sts Intr Enable */
+ VINT32 e1_frmr_ists; /* 94 E1 FRMR Framing Sts Intr Indication */
+ VINT32 e1_frmr_mists; /* 95 E1 FRMR
+ * Maintenance/Alarm Sts Indication Enable */
+ VINT32 e1_frmr_sts; /* 96 E1 FRMR Framing Sts */
+ VINT32 e1_frmr_masts; /* 97 E1 FRMR Maintenance/Alarm Sts */
+ VINT32 e1_frmr_nat_bits; /* 98 E1 FRMR International/National Bits */
+ VINT32 e1_frmr_crc_lsb; /* 99 E1 FRMR CRC Err Cnt - LSB */
+ VINT32 e1_frmr_crc_msb; /* 9A E1 FRMR CRC Err Cnt - MSB */
+ VINT32 e1_frmr_nat_ien; /* 9B E1 FRMR
+ * National Bit Codeword Intr Enables */
+ VINT32 e1_frmr_nat_ists; /* 9C E1 FRMR
+ * National Bit Codeword Intr/Sts */
+ VINT32 e1_frmr_nat; /* 9D E1 FRMR National Bit Codewords */
+ VINT32 e1_frmr_fp_ien; /* 9E E1 FRMR
+ * Frame Pulse/Alarm Intr Enables */
+ VINT32 e1_frmr_fp_ists; /* 9F E1 FRMR Frame Pulse/Alarm Intr/Sts */
+
+ VINT32 __resA0; /* A0 Reserved */
+ VINT32 __resA1; /* A1 Reserved */
+ VINT32 __resA2; /* A2 Reserved */
+ VINT32 __resA3; /* A3 Reserved */
+ VINT32 __resA4; /* A4 Reserved */
+ VINT32 __resA5; /* A5 Reserved */
+ VINT32 __resA6; /* A6 Reserved */
+ VINT32 __resA7; /* A7 Reserved */
+
+ VINT32 tdpr1_cfg; /* A8 TDPR #1 Cfg */
+ VINT32 tdpr1_utl; /* A9 TDPR #1 Upper TX Threshold */
+ VINT32 tdpr1_ltl; /* AA TDPR #1 Lower TX Threshold */
+ VINT32 tdpr1_ien; /* AB TDPR #1 Intr Enable */
+ VINT32 tdpr1_ists; /* AC TDPR #1 Intr Sts/UDR Clear */
+ VINT32 tdpr1_data; /* AD TDPR #1 TX Data */
+ VINT32 __resAE; /* AE Reserved */
+ VINT32 __resAF; /* AF Reserved */
+ VINT32 tdpr2_cfg; /* B0 TDPR #2 Cfg */
+ VINT32 tdpr2_utl; /* B1 TDPR #2 Upper TX Threshold */
+ VINT32 tdpr2_ltl; /* B2 TDPR #2 Lower TX Threshold */
+ VINT32 tdpr2_ien; /* B3 TDPR #2 Intr Enable */
+ VINT32 tdpr2_ists; /* B4 TDPR #2 Intr Sts/UDR Clear */
+ VINT32 tdpr2_data; /* B5 TDPR #2 TX Data */
+ VINT32 __resB6; /* B6 Reserved */
+ VINT32 __resB7; /* B7 Reserved1 */
+ VINT32 tdpr3_cfg; /* B8 TDPR #3 Cfg */
+ VINT32 tdpr3_utl; /* B9 TDPR #3 Upper TX Threshold */
+ VINT32 tdpr3_ltl; /* BA TDPR #3 Lower TX Threshold */
+ VINT32 tdpr3_ien; /* BB TDPR #3 Intr Enable */
+ VINT32 tdpr3_ists; /* BC TDPR #3 Intr Sts/UDR Clear */
+ VINT32 tdpr3_data; /* BD TDPR #3 TX Data */
+ VINT32 __resBE; /* BE Reserved */
+ VINT32 __resBF; /* BF Reserved */
+
+ VINT32 rdlc1_cfg; /* C0 RDLC #1 Cfg */
+ VINT32 rdlc1_intctl; /* C1 RDLC #1 Intr Ctl */
+ VINT32 rdlc1_sts; /* C2 RDLC #1 Sts */
+ VINT32 rdlc1_data; /* C3 RDLC #1 Data */
+ VINT32 rdlc1_paddr; /* C4 RDLC #1 Primary Addr Match */
+ VINT32 rdlc1_saddr; /* C5 RDLC #1 Secondary Addr Match */
+ VINT32 __resC6; /* C6 Reserved */
+ VINT32 __resC7; /* C7 Reserved */
+ VINT32 rdlc2_cfg; /* C8 RDLC #2 Cfg */
+ VINT32 rdlc2_intctl; /* C9 RDLC #2 Intr Ctl */
+ VINT32 rdlc2_sts; /* CA RDLC #2 Sts */
+ VINT32 rdlc2_data; /* CB RDLC #2 Data */
+ VINT32 rdlc2_paddr; /* CC RDLC #2 Primary Addr Match */
+ VINT32 rdlc2_saddr; /* CD RDLC #2 Secondary Addr Match */
+ VINT32 __resCE; /* CE Reserved */
+ VINT32 __resCF; /* CF Reserved */
+ VINT32 rdlc3_cfg; /* D0 RDLC #3 Cfg */
+ VINT32 rdlc3_intctl; /* D1 RDLC #3 Intr Ctl */
+ VINT32 rdlc3_sts; /* D2 RDLC #3 Sts */
+ VINT32 rdlc3_data; /* D3 RDLC #3 Data */
+ VINT32 rdlc3_paddr; /* D4 RDLC #3 Primary Addr Match */
+ VINT32 rdlc3_saddr; /* D5 RDLC #3 Secondary Addr Match */
+
+ VINT32 csu_cfg; /* D6 CSU Cfg */
+ VINT32 _csu_resD7; /* D7 CSU Reserved */
+
+ VINT32 rlps_idata3; /* D8 RLPS Indirect Data, 24-31 */
+ VINT32 rlps_idata2; /* D9 RLPS Indirect Data, 16-23 */
+ VINT32 rlps_idata1; /* DA RLPS Indirect Data, 8-15 */
+ VINT32 rlps_idata0; /* DB RLPS Indirect Data, 0-7 */
+ VINT32 rlps_eqvr; /* DC RLPS Equalizer Voltage Reference
+ * (E1 missing) */
+ VINT32 _rlps_resDD; /* DD RLPS Reserved */
+ VINT32 _rlps_resDE; /* DE RLPS Reserved */
+ VINT32 _rlps_resDF; /* DF RLPS Reserved */
+
+ VINT32 prgd_ctl; /* E0 PRGD Ctl */
+ VINT32 prgd_ies; /* E1 PRGD Intr Enable/Sts */
+ VINT32 prgd_shift_len; /* E2 PRGD Shift Length */
+ VINT32 prgd_tap; /* E3 PRGD Tap */
+ VINT32 prgd_errin; /* E4 PRGD Err Insertion */
+ VINT32 _prgd_resE5; /* E5 PRGD Reserved */
+ VINT32 _prgd_resE6; /* E6 PRGD Reserved */
+ VINT32 _prgd_resE7; /* E7 PRGD Reserved */
+ VINT32 prgd_patin1; /* E8 PRGD Pattern Insertion #1 */
+ VINT32 prgd_patin2; /* E9 PRGD Pattern Insertion #2 */
+ VINT32 prgd_patin3; /* EA PRGD Pattern Insertion #3 */
+ VINT32 prgd_patin4; /* EB PRGD Pattern Insertion #4 */
+ VINT32 prgd_patdet1; /* EC PRGD Pattern Detector #1 */
+ VINT32 prgd_patdet2; /* ED PRGD Pattern Detector #2 */
+ VINT32 prgd_patdet3; /* EE PRGD Pattern Detector #3 */
+ VINT32 prgd_patdet4; /* EF PRGD Pattern Detector #4 */
+
+ VINT32 xlpg_cfg; /* F0 XLPG Line Driver Cfg */
+ VINT32 xlpg_ctlsts; /* F1 XLPG Ctl/Sts */
+ VINT32 xlpg_pwave_addr; /* F2 XLPG
+ * Pulse Waveform Storage Write Addr */
+ VINT32 xlpg_pwave_data; /* F3 XLPG Pulse Waveform Storage Data */
+ VINT32 xlpg_atest_pctl; /* F4 XLPG Analog Test Positive Ctl */
+ VINT32 xlpg_atest_nctl; /* F5 XLPG Analog Test Negative Ctl */
+ VINT32 xlpg_fdata_sel; /* F6 XLPG Fuse Data Select */
+ VINT32 _xlpg_resF7; /* F7 XLPG Reserved */
+
+ VINT32 rlps_cfgsts; /* F8 RLPS Cfg & Sts */
+ VINT32 rlps_alos_thresh; /* F9 RLPS
+ * ALOS Detection/Clearance Threshold */
+ VINT32 rlps_alos_dper; /* FA RLPS ALOS Detection Period */
+ VINT32 rlps_alos_cper; /* FB RLPS ALOS Clearance Period */
+ VINT32 rlps_eq_iaddr; /* FC RLPS Equalization Indirect Addr */
+ VINT32 rlps_eq_rwsel; /* FD RLPS Equalization Read/WriteB Select */
+ VINT32 rlps_eq_ctlsts; /* FE RLPS Equalizer Loop Sts & Ctl */
+ VINT32 rlps_eq_cfg; /* FF RLPS Equalizer Cfg */
};
-typedef struct s_comet_reg comet_t;
-
/* 00AH: MDIAG Register bit definitions */
#define COMET_MDIAG_ID5 0x40
#define COMET_MDIAG_LBMASK 0x3F
@@ -338,7 +347,7 @@ typedef struct s_comet_reg comet_t;
#ifdef __KERNEL__
extern void
-init_comet(void *, comet_t *, u_int32_t, int, u_int8_t);
+init_comet(void *, struct s_comet_reg *, u_int32_t, int, u_int8_t);
#endif
#endif /* _INC_COMET_H_ */
diff --git a/drivers/staging/cxt1e1/functions.c b/drivers/staging/cxt1e1/functions.c
index d021b312ffa2..95218e283966 100644
--- a/drivers/staging/cxt1e1/functions.c
+++ b/drivers/staging/cxt1e1/functions.c
@@ -274,7 +274,7 @@ VMETRO_TRACE (void *x)
void
VMETRO_TRIGGER (ci_t *ci, int x)
{
- comet_t *comet;
+ struct s_comet_reg *comet;
volatile u_int32_t data;
comet = ci->port[0].cometbase; /* default to COMET # 0 */
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
index 9b483739881a..4a08e16e42f7 100644
--- a/drivers/staging/cxt1e1/linux.c
+++ b/drivers/staging/cxt1e1/linux.c
@@ -770,9 +770,9 @@ do_del_chan (struct net_device *musycc_dev, void *data)
if (cp.channum > 999)
return -EINVAL;
snprintf (buf, sizeof(buf), CHANNAME "%d", cp.channum);
- if (!(dev = dev_get_by_name (&init_net, buf)))
- return -ENOENT;
- dev_put (dev);
+ dev = __dev_get_by_name(&init_net, buf);
+ if (!dev)
+ return -ENODEV;
ret = do_deluser (dev, 1);
if (ret)
return ret;
@@ -792,19 +792,18 @@ do_reset (struct net_device *musycc_dev, void *data)
char buf[sizeof (CHANNAME) + 3];
sprintf (buf, CHANNAME "%d", i);
- if (!(ndev = dev_get_by_name(&init_net, buf)))
- continue;
+ ndev = __dev_get_by_name(&init_net, buf);
+ if (!ndev)
+ continue;
priv = dev_to_hdlc (ndev)->priv;
if ((unsigned long) (priv->ci) ==
(unsigned long) (netdev_priv(musycc_dev)))
{
ndev->flags &= ~IFF_UP;
- dev_put (ndev);
netif_stop_queue (ndev);
do_deluser (ndev, 1);
- } else
- dev_put (ndev);
+ }
}
return 0;
}
diff --git a/drivers/staging/cxt1e1/musycc.c b/drivers/staging/cxt1e1/musycc.c
index 0ba8c3ae673b..7a3a30cd0f7f 100644
--- a/drivers/staging/cxt1e1/musycc.c
+++ b/drivers/staging/cxt1e1/musycc.c
@@ -1,5 +1,5 @@
-unsigned int max_intcnt = 0;
-unsigned int max_bh = 0;
+static unsigned int max_intcnt = 0;
+static unsigned int max_bh = 0;
/*-----------------------------------------------------------------------------
* musycc.c -
diff --git a/drivers/staging/cxt1e1/pmcc4_drv.c b/drivers/staging/cxt1e1/pmcc4_drv.c
index 4028ea11c442..a9d95753be20 100644
--- a/drivers/staging/cxt1e1/pmcc4_drv.c
+++ b/drivers/staging/cxt1e1/pmcc4_drv.c
@@ -194,7 +194,7 @@ checkPorts (ci_t *ci)
* alarms conflicts with NCOMM's interrupt servicing implementation.
*/
- comet_t *comet;
+ struct s_comet_reg *comet;
volatile u_int32_t value;
u_int32_t copyVal, LEDval;
@@ -507,7 +507,7 @@ c4_cleanup (void)
int
c4_get_portcfg (ci_t *ci)
{
- comet_t *comet;
+ struct s_comet_reg *comet;
int portnum, mask;
u_int32_t wdata, rdata;
@@ -561,7 +561,7 @@ c4_init (ci_t *ci, u_char *func0, u_char *func1)
for (portnum = 0; portnum < MUSYCC_NPORTS; portnum++)
{
pi = &ci->port[portnum];
- pi->cometbase = (comet_t *) ((u_int32_t *) (func1 + COMET_OFFSET (portnum)));
+ pi->cometbase = (struct s_comet_reg *) ((u_int32_t *) (func1 + COMET_OFFSET (portnum)));
pi->reg = (struct musycc_globalr *) ((u_char *) ci->reg + (portnum * 0x800));
pi->portnum = portnum;
pi->p.portnum = portnum;
@@ -693,7 +693,7 @@ c4_init2 (ci_t *ci)
int
c4_loop_port (ci_t *ci, int portnum, u_int8_t cmd)
{
- comet_t *comet;
+ struct s_comet_reg *comet;
volatile u_int32_t loopValue;
comet = ci->port[portnum].cometbase;
@@ -752,7 +752,7 @@ c4_loop_port (ci_t *ci, int portnum, u_int8_t cmd)
status_t
c4_frame_rw (ci_t *ci, struct sbecom_port_param *pp)
{
- comet_t *comet;
+ struct s_comet_reg *comet;
volatile u_int32_t data;
if (pp->portnum >= ci->max_port)/* sanity check */
diff --git a/drivers/staging/cxt1e1/pmcc4_private.h b/drivers/staging/cxt1e1/pmcc4_private.h
index b2b6e3702630..7edbd4e492e3 100644
--- a/drivers/staging/cxt1e1/pmcc4_private.h
+++ b/drivers/staging/cxt1e1/pmcc4_private.h
@@ -133,7 +133,7 @@ struct c4_port_info
void *regram_saved; /* Original malloc value may have non-2KB
* boundary. Need to save for use when
* freeing. */
- comet_t *cometbase;
+ struct s_comet_reg *cometbase;
struct sbe_card_info *up;
/*
diff --git a/drivers/staging/cxt1e1/sbeid.c b/drivers/staging/cxt1e1/sbeid.c
index 6ec51bccceb1..97c5c6e7e299 100644
--- a/drivers/staging/cxt1e1/sbeid.c
+++ b/drivers/staging/cxt1e1/sbeid.c
@@ -20,190 +20,185 @@
#include "sbe_bid.h"
char *
-sbeid_get_bdname (ci_t *ci)
+sbeid_get_bdname(ci_t *ci)
{
- char *np = NULL;
+ char *np = NULL;
- switch (ci->brd_id)
- {
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1):
- np = "wanPTMC-256T3 <E1>";
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1):
- np = "wanPTMC-256T3 <T1>";
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1):
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1_L):
- np = "wanPMC-C4T1E1";
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1):
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1_L):
- np = "wanPMC-C2T1E1";
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1):
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1_L):
- np = "wanPMC-C1T1E1";
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1):
- np = "wanPCI-C4T1E1";
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1):
- np = "wanPCI-C2T1E1";
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1):
- np = "wanPCI-C1T1E1";
- break;
- default:
- /*** np = "<unknown>"; ***/
- np = "wanPCI-CxT1E1";
- break;
- }
+ switch (ci->brd_id) {
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1):
+ np = "wanPTMC-256T3 <E1>";
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1):
+ np = "wanPTMC-256T3 <T1>";
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1):
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1_L):
+ np = "wanPMC-C4T1E1";
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1):
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1_L):
+ np = "wanPMC-C2T1E1";
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1):
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1_L):
+ np = "wanPMC-C1T1E1";
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1):
+ np = "wanPCI-C4T1E1";
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1):
+ np = "wanPCI-C2T1E1";
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1):
+ np = "wanPCI-C1T1E1";
+ break;
+ default:
+ /*** np = "<unknown>"; ***/
+ np = "wanPCI-CxT1E1";
+ break;
+ }
- return np;
+ return np;
}
/* given the presetting of brd_id, set the corresponding hdw_id */
void
-sbeid_set_hdwbid (ci_t *ci)
+sbeid_set_hdwbid(ci_t *ci)
{
- /*
- * set SBE's unique hardware identification (for legacy boards might not
- * have this register implemented)
- */
+ /*
+ * set SBE's unique hardware identification (for legacy boards might not
+ * have this register implemented)
+ */
- switch (ci->brd_id)
- {
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1):
- ci->hdw_bid = SBE_BID_256T3_E1; /* 0x46 - SBE wanPTMC-256T3 (E1
- * Version) */
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1):
- ci->hdw_bid = SBE_BID_256T3_T1; /* 0x42 - SBE wanPTMC-256T3 (T1
- * Version) */
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1):
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1_L):
- /*
- * This Board ID is a generic identification. Use the found number
- * of ports to further define this hardware.
- */
- switch (ci->max_port)
- {
- default: /* shouldn't need a default, but have one
- * anyway */
- case 4:
- ci->hdw_bid = SBE_BID_PMC_C4T1E1; /* 0xC4 - SBE wanPMC-C4T1E1 */
- break;
- case 2:
- ci->hdw_bid = SBE_BID_PMC_C2T1E1; /* 0xC2 - SBE wanPMC-C2T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1);
- break;
- case 1:
- ci->hdw_bid = SBE_BID_PMC_C1T1E1; /* 0xC1 - SBE wanPMC-C1T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1);
- break;
- }
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1):
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1_L):
- ci->hdw_bid = SBE_BID_PMC_C2T1E1; /* 0xC2 - SBE wanPMC-C2T1E1 */
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1):
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1_L):
- ci->hdw_bid = SBE_BID_PMC_C1T1E1; /* 0xC1 - SBE wanPMC-C1T1E1 */
- break;
+ switch (ci->brd_id) {
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1):
+ ci->hdw_bid = SBE_BID_256T3_E1; /* 0x46 - SBE wanPTMC-256T3 (E1
+ * Version) */
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1):
+ ci->hdw_bid = SBE_BID_256T3_T1; /* 0x42 - SBE wanPTMC-256T3 (T1
+ * Version) */
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1):
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1_L):
+ /*
+ * This Board ID is a generic identification. Use the found number
+ * of ports to further define this hardware.
+ */
+ switch (ci->max_port) {
+ default: /* shouldn't need a default, but have one
+ * anyway */
+ case 4:
+ ci->hdw_bid = SBE_BID_PMC_C4T1E1; /* 0xC4 - SBE wanPMC-C4T1E1 */
+ break;
+ case 2:
+ ci->hdw_bid = SBE_BID_PMC_C2T1E1; /* 0xC2 - SBE wanPMC-C2T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1);
+ break;
+ case 1:
+ ci->hdw_bid = SBE_BID_PMC_C1T1E1; /* 0xC1 - SBE wanPMC-C1T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1);
+ break;
+ }
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1):
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1_L):
+ ci->hdw_bid = SBE_BID_PMC_C2T1E1; /* 0xC2 - SBE wanPMC-C2T1E1 */
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1):
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1_L):
+ ci->hdw_bid = SBE_BID_PMC_C1T1E1; /* 0xC1 - SBE wanPMC-C1T1E1 */
+ break;
#ifdef SBE_PMCC4_ENABLE
- /*
- * This case is entered as a result of the inability to obtain the
- * <bid> from the board's EEPROM. Assume a PCI board and set
- * <hdsbid> according to the number ofr found ports.
- */
- case 0:
- /* start by assuming 4-port for ZERO casing */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1);
- /* drop thru to set hdw_bid and alternate PCI CxT1E1 settings */
+ /*
+ * This case is entered as a result of the inability to obtain the
+ * <bid> from the board's EEPROM. Assume a PCI board and set
+ * <hdsbid> according to the number ofr found ports.
+ */
+ case 0:
+ /* start by assuming 4-port for ZERO casing */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1);
+ /* drop thru to set hdw_bid and alternate PCI CxT1E1 settings */
#endif
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1):
- /*
- * This Board ID is a generic identification. Use the number of
- * found ports to further define this hardware.
- */
- switch (ci->max_port)
- {
- default: /* shouldn't need a default, but have one
- * anyway */
- case 4:
- ci->hdw_bid = SBE_BID_PCI_C4T1E1; /* 0x04 - SBE wanPCI-C4T1E1 */
- break;
- case 2:
- ci->hdw_bid = SBE_BID_PCI_C2T1E1; /* 0x02 - SBE wanPCI-C2T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1);
- break;
- case 1:
- ci->hdw_bid = SBE_BID_PCI_C1T1E1; /* 0x01 - SBE wanPCI-C1T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1);
- break;
- }
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1):
- ci->hdw_bid = SBE_BID_PCI_C2T1E1; /* 0x02 - SBE wanPCI-C2T1E1 */
- break;
- case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1):
- ci->hdw_bid = SBE_BID_PCI_C1T1E1; /* 0x01 - SBE wanPCI-C1T1E1 */
- break;
- default:
- /*** bid = "<unknown>"; ***/
- ci->hdw_bid = SBE_BID_PMC_C4T1E1; /* 0x41 - SBE wanPTMC-C4T1E1 */
- break;
- }
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1):
+ /*
+ * This Board ID is a generic identification. Use the number of
+ * found ports to further define this hardware.
+ */
+ switch (ci->max_port) {
+ default: /* shouldn't need a default, but have one
+ * anyway */
+ case 4:
+ ci->hdw_bid = SBE_BID_PCI_C4T1E1; /* 0x04 - SBE wanPCI-C4T1E1 */
+ break;
+ case 2:
+ ci->hdw_bid = SBE_BID_PCI_C2T1E1; /* 0x02 - SBE wanPCI-C2T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1);
+ break;
+ case 1:
+ ci->hdw_bid = SBE_BID_PCI_C1T1E1; /* 0x01 - SBE wanPCI-C1T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1);
+ break;
+ }
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1):
+ ci->hdw_bid = SBE_BID_PCI_C2T1E1; /* 0x02 - SBE wanPCI-C2T1E1 */
+ break;
+ case SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1):
+ ci->hdw_bid = SBE_BID_PCI_C1T1E1; /* 0x01 - SBE wanPCI-C1T1E1 */
+ break;
+ default:
+ /*** bid = "<unknown>"; ***/
+ ci->hdw_bid = SBE_BID_PMC_C4T1E1; /* 0x41 - SBE wanPTMC-C4T1E1 */
+ break;
+ }
}
/* given the presetting of hdw_bid, set the corresponding brd_id */
void
-sbeid_set_bdtype (ci_t *ci)
+sbeid_set_bdtype(ci_t *ci)
{
- /* set SBE's unique PCI VENDOR/DEVID */
- switch (ci->hdw_bid)
- {
- case SBE_BID_C1T3: /* SBE wanPMC-C1T3 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T3);
- break;
- case SBE_BID_C24TE1: /* SBE wanPTMC-C24TE1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_C24TE1);
- break;
- case SBE_BID_256T3_E1: /* SBE wanPTMC-256T3 E1 Version */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1);
- break;
- case SBE_BID_256T3_T1: /* SBE wanPTMC-256T3 T1 Version */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1);
- break;
- case SBE_BID_PMC_C4T1E1: /* 0xC4 - SBE wanPMC-C4T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1);
- break;
- case SBE_BID_PMC_C2T1E1: /* 0xC2 - SBE wanPMC-C2T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1);
- break;
- case SBE_BID_PMC_C1T1E1: /* 0xC1 - SBE wanPMC-C1T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1);
- break;
- case SBE_BID_PCI_C4T1E1: /* 0x04 - SBE wanPCI-C4T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1);
- break;
- case SBE_BID_PCI_C2T1E1: /* 0x02 - SBE wanPCI-C2T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1);
- break;
- case SBE_BID_PCI_C1T1E1: /* 0x01 - SBE wanPCI-C1T1E1 */
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1);
- break;
+ /* set SBE's unique PCI VENDOR/DEVID */
+ switch (ci->hdw_bid) {
+ case SBE_BID_C1T3: /* SBE wanPMC-C1T3 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T3);
+ break;
+ case SBE_BID_C24TE1: /* SBE wanPTMC-C24TE1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_C24TE1);
+ break;
+ case SBE_BID_256T3_E1: /* SBE wanPTMC-256T3 E1 Version */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1);
+ break;
+ case SBE_BID_256T3_T1: /* SBE wanPTMC-256T3 T1 Version */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1);
+ break;
+ case SBE_BID_PMC_C4T1E1: /* 0xC4 - SBE wanPMC-C4T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1);
+ break;
+ case SBE_BID_PMC_C2T1E1: /* 0xC2 - SBE wanPMC-C2T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1);
+ break;
+ case SBE_BID_PMC_C1T1E1: /* 0xC1 - SBE wanPMC-C1T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1);
+ break;
+ case SBE_BID_PCI_C4T1E1: /* 0x04 - SBE wanPCI-C4T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1);
+ break;
+ case SBE_BID_PCI_C2T1E1: /* 0x02 - SBE wanPCI-C2T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1);
+ break;
+ case SBE_BID_PCI_C1T1E1: /* 0x01 - SBE wanPCI-C1T1E1 */
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1);
+ break;
- default:
- /*** hdw_bid = "<unknown>"; ***/
- ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1);
- break;
- }
+ default:
+ /*** hdw_bid = "<unknown>"; ***/
+ ci->brd_id = SBE_BOARD_ID(PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1);
+ break;
+ }
}
diff --git a/drivers/staging/dgap/Kconfig b/drivers/staging/dgap/Kconfig
index 31f1d7533eec..3bbe9e122365 100644
--- a/drivers/staging/dgap/Kconfig
+++ b/drivers/staging/dgap/Kconfig
@@ -1,6 +1,6 @@
config DGAP
tristate "Digi EPCA PCI products"
default n
- depends on TTY
+ depends on TTY && HAS_IOMEM
---help---
Driver for the Digi International EPCA PCI based product line
diff --git a/drivers/staging/dgap/dgap_conf.h b/drivers/staging/dgap/dgap_conf.h
index 88097013ed04..484ed726a4d6 100644
--- a/drivers/staging/dgap/dgap_conf.h
+++ b/drivers/staging/dgap/dgap_conf.h
@@ -138,7 +138,7 @@
#define CU 91
#define PRINT 92
#define XPRINT 93
-#define CMAJOR 94
+#define CMAJOR 94
#define ALTPIN 95
#define STARTO 96
#define USEINTR 97
@@ -262,9 +262,9 @@ struct cnode {
} module;
char *ttyname;
-
+
char *cuname;
-
+
char *printname;
int majornumber;
diff --git a/drivers/staging/dgap/dgap_driver.c b/drivers/staging/dgap/dgap_driver.c
index 4c1515ee56e5..089d017fc291 100644
--- a/drivers/staging/dgap/dgap_driver.c
+++ b/drivers/staging/dgap/dgap_driver.c
@@ -506,7 +506,7 @@ static int dgap_found_board(struct pci_dev *pdev, int id)
/* get the board structure and prep it */
brd = dgap_Board[dgap_NumBoards] =
- (struct board_t *) dgap_driver_kzmalloc(sizeof(struct board_t), GFP_KERNEL);
+ (struct board_t *) kzalloc(sizeof(struct board_t), GFP_KERNEL);
if (!brd) {
APR(("memory allocation for board structure failed\n"));
return(-ENOMEM);
@@ -514,7 +514,7 @@ static int dgap_found_board(struct pci_dev *pdev, int id)
/* make a temporary message buffer for the boot messages */
brd->msgbuf = brd->msgbuf_head =
- (char *) dgap_driver_kzmalloc(sizeof(char) * 8192, GFP_KERNEL);
+ (char *) kzalloc(sizeof(char) * 8192, GFP_KERNEL);
if(!brd->msgbuf) {
kfree(brd);
APR(("memory allocation for board msgbuf failed\n"));
@@ -925,20 +925,6 @@ static void dgap_init_globals(void)
/*
- * dgap_driver_kzmalloc()
- *
- * Malloc and clear memory,
- */
-void *dgap_driver_kzmalloc(size_t size, int priority)
-{
- void *p = kmalloc(size, priority);
- if(p)
- memset(p, 0, size);
- return(p);
-}
-
-
-/*
* dgap_mbuf()
*
* Used to print to the message buffer during board init.
diff --git a/drivers/staging/dgap/dgap_driver.h b/drivers/staging/dgap/dgap_driver.h
index 7d631e80c00e..2f7a55a7e40d 100644
--- a/drivers/staging/dgap/dgap_driver.h
+++ b/drivers/staging/dgap/dgap_driver.h
@@ -578,7 +578,6 @@ struct channel_t {
*************************************************************************/
extern int dgap_ms_sleep(ulong ms);
-extern void *dgap_driver_kzmalloc(size_t size, int priority);
extern char *dgap_ioctl_name(int cmd);
extern void dgap_do_bios_load(struct board_t *brd, uchar __user *ubios, int len);
extern void dgap_do_fep_load(struct board_t *brd, uchar __user *ufep, int len);
diff --git a/drivers/staging/dgap/dgap_fep5.c b/drivers/staging/dgap/dgap_fep5.c
index 794cf9db8b83..f75831a422e8 100644
--- a/drivers/staging/dgap/dgap_fep5.c
+++ b/drivers/staging/dgap/dgap_fep5.c
@@ -6,16 +6,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*
* NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
*
@@ -39,6 +29,7 @@
#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
#include <linux/tty.h>
#include <linux/tty_flip.h> /* For tty_schedule_flip */
+#include <linux/slab.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
#include <linux/sched.h>
@@ -75,7 +66,7 @@ void dgap_do_config_load(uchar __user *uaddr, int len)
char buf[U2BSIZE];
int n;
- to_addr = dgap_config_buf = dgap_driver_kzmalloc(len + 1, GFP_ATOMIC);
+ to_addr = dgap_config_buf = kzalloc(len + 1, GFP_ATOMIC);
if (!dgap_config_buf) {
DPR_INIT(("dgap_do_config_load - unable to allocate memory for file\n"));
dgap_driver_state = DRIVER_NEED_CONFIG_LOAD;
@@ -99,7 +90,7 @@ void dgap_do_config_load(uchar __user *uaddr, int len)
to_addr += n;
from_addr += n;
n = U2BSIZE;
- }
+ }
dgap_config_buf[orig_len] = '\0';
@@ -130,8 +121,8 @@ int dgap_after_config_loaded(void)
/*
* allocate flip buffer for board.
*/
- dgap_Board[i]->flipbuf = dgap_driver_kzmalloc(MYFLIPLEN, GFP_ATOMIC);
- dgap_Board[i]->flipflagbuf = dgap_driver_kzmalloc(MYFLIPLEN, GFP_ATOMIC);
+ dgap_Board[i]->flipbuf = kzalloc(MYFLIPLEN, GFP_ATOMIC);
+ dgap_Board[i]->flipflagbuf = kzalloc(MYFLIPLEN, GFP_ATOMIC);
}
return rc;
@@ -166,9 +157,9 @@ static int dgap_usertoboard(struct board_t *brd, char *to_addr, char __user *fro
/* increment counts */
len -= n;
to_addr += n;
- from_addr += n;
+ from_addr += n;
n = U2BSIZE;
- }
+ }
return 0;
}
@@ -195,7 +186,7 @@ void dgap_do_bios_load(struct board_t *brd, uchar __user *ubios, int len)
*/
for (i = 0; i < 16; i++)
writeb(0, addr + POSTAREA + i);
-
+
/*
* Download bios
*/
@@ -364,7 +355,7 @@ static void dgap_do_reset_board(struct board_t *brd)
int i = 0;
if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase || !brd->re_map_port) {
- DPR_INIT(("dgap_do_reset_board() start. bad values. brd: %p mem: %p io: %p\n",
+ DPR_INIT(("dgap_do_reset_board() start. bad values. brd: %p mem: %p io: %p\n",
brd, brd ? brd->re_map_membase : 0, brd ? brd->re_map_port : 0));
return;
}
@@ -470,7 +461,7 @@ static void dgap_get_vpd(struct board_t *brd)
/*
* To get to the OTPROM memory, we have to send the boards base
- * address or'ed with 1 into the PCI Rom Address location.
+ * address or'ed with 1 into the PCI Rom Address location.
*/
magic = brd->membase | 0x01;
pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
@@ -492,7 +483,7 @@ static void dgap_get_vpd(struct board_t *brd)
* for the VPD offset.
*/
while (base_offset <= EXPANSION_ROM_SIZE) {
-
+
/*
* Lots of magic numbers here.
*
@@ -551,7 +542,7 @@ static void dgap_get_vpd(struct board_t *brd)
*/
void dgap_poll_tasklet(unsigned long data)
{
- struct board_t *bd = (struct board_t *) data;
+ struct board_t *bd = (struct board_t *) data;
ulong lock_flags;
ulong lock_flags2;
char *vaddr;
@@ -816,13 +807,13 @@ out:
*
*=======================================================================*/
void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint ncmds)
-{
+{
char *vaddr = NULL;
struct cm_t *cm_addr = NULL;
uint count;
uint n;
u16 head;
- u16 tail;
+ u16 tail;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
@@ -833,7 +824,7 @@ void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint n
if (ch->ch_bd->state == BOARD_FAILED) {
DPR_CORE(("%s:%d board is in failed state.\n", __FILE__, __LINE__));
return;
- }
+ }
/*
* Make sure the pointers are in range before
@@ -847,13 +838,13 @@ void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint n
cm_addr = (struct cm_t *) (vaddr + CMDBUF);
head = readw(&(cm_addr->cm_head));
- /*
+ /*
* Forget it if pointers out of range.
*/
if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
DPR_CORE(("%s:%d pointers out of range, failing board!\n", __FILE__, __LINE__));
ch->ch_bd->state = BOARD_FAILED;
- return;
+ return;
}
/*
@@ -869,7 +860,7 @@ void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint n
writew(head, &(cm_addr->cm_head));
/*
- * Wait if necessary before updating the head
+ * Wait if necessary before updating the head
* pointer to limit the number of outstanding
* commands to the FEP. If the time spent waiting
* is outlandish, declare the FEP dead.
@@ -890,14 +881,14 @@ void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint n
return;
}
udelay(10);
- }
+ }
}
/*=======================================================================
*
* dgap_cmdw - Sends a 1 word command to the FEP.
- *
+ *
* ch - Pointer to channel structure.
* cmd - Command to be sent.
* word - Integer containing word to be sent.
@@ -936,7 +927,7 @@ void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds)
cm_addr = (struct cm_t *) (vaddr + CMDBUF);
head = readw(&(cm_addr->cm_head));
- /*
+ /*
* Forget it if pointers out of range.
*/
if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
@@ -958,7 +949,7 @@ void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds)
/*
* Wait if necessary before updating the head
- * pointer to limit the number of outstanding
+ * pointer to limit the number of outstanding
* commands to the FEP. If the time spent waiting
* is outlandish, declare the FEP dead.
*/
@@ -978,7 +969,7 @@ void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds)
return;
}
udelay(10);
- }
+ }
}
@@ -986,7 +977,7 @@ void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds)
/*=======================================================================
*
* dgap_cmdw_ext - Sends a extended word command to the FEP.
- *
+ *
* ch - Pointer to channel structure.
* cmd - Command to be sent.
* word - Integer containing word to be sent.
@@ -1025,7 +1016,7 @@ static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
cm_addr = (struct cm_t *) (vaddr + CMDBUF);
head = readw(&(cm_addr->cm_head));
- /*
+ /*
* Forget it if pointers out of range.
*/
if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
@@ -1060,7 +1051,7 @@ static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
/*
* Wait if necessary before updating the head
- * pointer to limit the number of outstanding
+ * pointer to limit the number of outstanding
* commands to the FEP. If the time spent waiting
* is outlandish, declare the FEP dead.
*/
@@ -1080,7 +1071,7 @@ static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
return;
}
udelay(10);
- }
+ }
}
@@ -1102,7 +1093,7 @@ void dgap_wmove(struct channel_t *ch, char *buf, uint cnt)
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
-
+
/*
* Check parameters.
*/
@@ -1172,9 +1163,9 @@ uint dgap_get_custom_baud(struct channel_t *ch)
/*
* Go get from fep mem, what the fep
- * believes the custom baud rate is.
+ * believes the custom baud rate is.
*/
- offset = ((((*(unsigned short *)(vaddr + ECS_SEG)) << 4) +
+ offset = ((((*(unsigned short *)(vaddr + ECS_SEG)) << 4) +
(ch->ch_portnum * 0x28) + LINE_SPEED));
value = readw(vaddr + offset);
@@ -1210,7 +1201,7 @@ void dgap_firmware_reset_port(struct channel_t *ch)
/*=======================================================================
- *
+ *
* dgap_param - Set Digi parameters.
*
* struct tty_struct * - TTY for port.
@@ -1244,7 +1235,7 @@ int dgap_param(struct tty_struct *tty)
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return -ENXIO;
- bs = ch->ch_bs;
+ bs = ch->ch_bs;
if (!bs)
return -ENXIO;
@@ -1284,13 +1275,13 @@ int dgap_param(struct tty_struct *tty)
/*
* Now go get from fep mem, what the fep
- * believes the custom baud rate is.
+ * believes the custom baud rate is.
*/
ch->ch_baud_info = ch->ch_custom_speed = dgap_get_custom_baud(ch);
DPR_PARAM(("param: Got %d speed\n", ch->ch_custom_speed));
- /* Handle transition from B0 */
+ /* Handle transition from B0 */
if (ch->ch_flags & CH_BAUD0) {
ch->ch_flags &= ~(CH_BAUD0);
ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
@@ -1352,7 +1343,7 @@ int dgap_param(struct tty_struct *tty)
baud = 0;
}
- if (baud == 0)
+ if (baud == 0)
baud = 9600;
ch->ch_baud_info = baud;
@@ -1425,7 +1416,7 @@ int dgap_param(struct tty_struct *tty)
dgap_cmdw(ch, SCFLAG, (u16) cflag, 0);
}
- /* Handle transition from B0 */
+ /* Handle transition from B0 */
if (ch->ch_flags & CH_BAUD0) {
ch->ch_flags &= ~(CH_BAUD0);
ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
@@ -1475,7 +1466,7 @@ int dgap_param(struct tty_struct *tty)
if (ch->ch_digi.digi_flags & RTSPACE)
hflow |= D_RTS(ch);
if (ch->ch_digi.digi_flags & DTRPACE)
- hflow |= D_DTR(ch);
+ hflow |= D_DTR(ch);
if (ch->ch_digi.digi_flags & CTSPACE)
hflow |= D_CTS(ch);
if (ch->ch_digi.digi_flags & DSRPACE)
@@ -1488,7 +1479,7 @@ int dgap_param(struct tty_struct *tty)
/* Okay to have channel and board locks held calling this */
dgap_cmdb(ch, SHFLOW, (uchar) hflow, 0xff, 0);
- }
+ }
/*
@@ -1507,7 +1498,7 @@ int dgap_param(struct tty_struct *tty)
}
/*
- * Set modem control lines.
+ * Set modem control lines.
*/
mval ^= ch->ch_mforce & (mval ^ ch->ch_mval);
@@ -1524,12 +1515,12 @@ int dgap_param(struct tty_struct *tty)
}
/*
- * Read modem signals, and then call carrier function.
+ * Read modem signals, and then call carrier function.
*/
ch->ch_mistat = readb(&(bs->m_stat));
dgap_carrier(ch);
- /*
+ /*
* Set the start and stop characters.
*/
if (ch->ch_startc != ch->ch_fepstartc || ch->ch_stopc != ch->ch_fepstopc) {
@@ -1542,7 +1533,7 @@ int dgap_param(struct tty_struct *tty)
/*
* Set the Auxiliary start and stop characters.
- */
+ */
if (ch->ch_astartc != ch->ch_fepastartc || ch->ch_astopc != ch->ch_fepastopc) {
ch->ch_fepastartc = ch->ch_astartc;
ch->ch_fepastopc = ch->ch_astopc;
@@ -1609,7 +1600,7 @@ void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf, unsigned char *
} else {
/* save value examination in next state */
ch->pscan_savechar = c;
- ch->pscan_state = 2;
+ ch->pscan_state = 2;
}
break;
@@ -1637,7 +1628,7 @@ void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf, unsigned char *
count += 1;
ch->pscan_state = 0;
- }
+ }
}
*len = count;
DPR_PSCAN(("dgap_parity_scan finish\n"));
@@ -1721,9 +1712,8 @@ static int dgap_event(struct board_t *bd)
/*
* Make sure the interrupt is valid.
*/
- if ( port >= bd->nasync) {
+ if (port >= bd->nasync)
goto next;
- }
if (!(reason & (IFMODEM | IFBREAK | IFTLW | IFTEM | IFDATA))) {
goto next;
@@ -1779,7 +1769,7 @@ static int dgap_event(struct board_t *bd)
}
/*
- * Process Modem change signals.
+ * Process Modem change signals.
*/
if (reason & IFMODEM) {
ch->ch_mistat = modem;
@@ -1813,7 +1803,7 @@ static int dgap_event(struct board_t *bd)
ch->ch_tun.un_flags &= ~UN_LOW;
if (ch->ch_tun.un_flags & UN_ISOPEN) {
- if ((ch->ch_tun.un_tty->flags &
+ if ((ch->ch_tun.un_tty->flags &
(1 << TTY_DO_WRITE_WAKEUP)) &&
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
ch->ch_tun.un_tty->ldisc->ops->write_wakeup)
@@ -1841,7 +1831,7 @@ static int dgap_event(struct board_t *bd)
if (ch->ch_pun.un_flags & UN_LOW) {
ch->ch_pun.un_flags &= ~UN_LOW;
if (ch->ch_pun.un_flags & UN_ISOPEN) {
- if ((ch->ch_pun.un_tty->flags &
+ if ((ch->ch_pun.un_tty->flags &
(1 << TTY_DO_WRITE_WAKEUP)) &&
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
ch->ch_pun.un_tty->ldisc->ops->write_wakeup)
@@ -1879,7 +1869,7 @@ static int dgap_event(struct board_t *bd)
if (ch->ch_tun.un_flags & UN_EMPTY) {
ch->ch_tun.un_flags &= ~UN_EMPTY;
if (ch->ch_tun.un_flags & UN_ISOPEN) {
- if ((ch->ch_tun.un_tty->flags &
+ if ((ch->ch_tun.un_tty->flags &
(1 << TTY_DO_WRITE_WAKEUP)) &&
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
ch->ch_tun.un_tty->ldisc->ops->write_wakeup)
@@ -1905,7 +1895,7 @@ static int dgap_event(struct board_t *bd)
if (ch->ch_pun.un_flags & UN_EMPTY) {
ch->ch_pun.un_flags &= ~UN_EMPTY;
if (ch->ch_pun.un_flags & UN_ISOPEN) {
- if ((ch->ch_pun.un_tty->flags &
+ if ((ch->ch_pun.un_tty->flags &
(1 << TTY_DO_WRITE_WAKEUP)) &&
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
ch->ch_pun.un_tty->ldisc->ops->write_wakeup)
@@ -1945,4 +1935,4 @@ next:
DGAP_UNLOCK(bd->bd_lock, lock_flags);
return 0;
-}
+}
diff --git a/drivers/staging/dgap/dgap_parse.c b/drivers/staging/dgap/dgap_parse.c
index ff9d19449b43..36fd93d3f5f6 100644
--- a/drivers/staging/dgap/dgap_parse.c
+++ b/drivers/staging/dgap/dgap_parse.c
@@ -42,6 +42,7 @@
#include "dgap_types.h"
#include "dgap_fep5.h"
#include "dgap_driver.h"
+#include "dgap_parse.h"
#include "dgap_conf.h"
diff --git a/drivers/staging/dgap/dgap_trace.c b/drivers/staging/dgap/dgap_trace.c
index 0f9a9569ea27..a53db9e0a577 100644
--- a/drivers/staging/dgap/dgap_trace.c
+++ b/drivers/staging/dgap/dgap_trace.c
@@ -17,15 +17,15 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
- * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
*
* This is shared code between Digi's CVS archive and the
* Linux Kernel sources.
* Changing the source just for reformatting needlessly breaks
* our CVS diff history.
*
- * Send any bug fixes/changes to: Eng.Linux at digi dot com.
- * Thank you.
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
*
*/
@@ -37,6 +37,7 @@
#include <linux/vmalloc.h>
#include "dgap_driver.h"
+#include "dgap_trace.h"
#define TRC_TO_CONSOLE 1
@@ -107,16 +108,16 @@ void dgap_tracef(const char *fmt, ...)
dgap_trcbufi = 0;
initd++;
- printk("dgap: tracing enabled - " TRC_DTRC
+ printk("dgap: tracing enabled - " TRC_DTRC
" 0x%lx 0x%x\n",
- (unsigned long)dgap_trcbuf,
+ (unsigned long)dgap_trcbuf,
dgap_trcbuf_size);
}
# if defined(TRC_ON_OVERFLOW_WRAP_AROUND)
/*
* This is the less CPU-intensive way to do things. We simply
- * wrap around before we fall off the end of the buffer. A
+ * wrap around before we fall off the end of the buffer. A
* tilde (~) demarcates the current end of the trace.
*
* This method should be used if you are concerned about race
@@ -131,14 +132,14 @@ void dgap_tracef(const char *fmt, ...)
dgap_trcbufi = 0;
}
- strcpy(&dgap_trcbuf[dgap_trcbufi], buf);
+ strcpy(&dgap_trcbuf[dgap_trcbufi], buf);
dgap_trcbufi += lenbuf;
dgap_trcbuf[dgap_trcbufi] = '~';
# elif defined(TRC_ON_OVERFLOW_SHIFT_BUFFER)
/*
* This is the more CPU-intensive way to do things. If we
- * venture into the last 1/8 of the buffer, we shift the
+ * venture into the last 1/8 of the buffer, we shift the
* last 7/8 of the buffer forward, wiping out the first 1/8.
* Advantage: No wrap-around, only truncation from the
* beginning.
diff --git a/drivers/staging/dgap/dgap_tty.c b/drivers/staging/dgap/dgap_tty.c
index 2a7a37298da4..39fb4dfb8b7e 100644
--- a/drivers/staging/dgap/dgap_tty.c
+++ b/drivers/staging/dgap/dgap_tty.c
@@ -17,22 +17,22 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
- * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
*
* This is shared code between Digi's CVS archive and the
* Linux Kernel sources.
* Changing the source just for reformatting needlessly breaks
* our CVS diff history.
*
- * Send any bug fixes/changes to: Eng.Linux at digi dot com.
- * Thank you.
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
*/
/************************************************************************
- *
+ *
* This file implements the tty driver functionality for the
* FEP5 based product lines.
- *
+ *
************************************************************************
*
* $Id: dgap_tty.c,v 1.3 2011/06/23 12:11:31 markh Exp $
@@ -155,7 +155,7 @@ static const struct tty_operations dgap_tty_ops = {
.flush_chars = dgap_tty_flush_chars,
.ioctl = dgap_tty_ioctl,
.set_termios = dgap_tty_set_termios,
- .stop = dgap_tty_stop,
+ .stop = dgap_tty_stop,
.start = dgap_tty_start,
.throttle = dgap_tty_throttle,
.unthrottle = dgap_tty_unthrottle,
@@ -173,11 +173,11 @@ static const struct tty_operations dgap_tty_ops = {
/************************************************************************
- *
+ *
* TTY Initialization/Cleanup Functions
- *
+ *
************************************************************************/
-
+
/*
* dgap_tty_preinit()
*
@@ -187,7 +187,7 @@ int dgap_tty_preinit(void)
{
unsigned long flags;
- DGAP_LOCK(dgap_global_lock, flags);
+ DGAP_LOCK(dgap_global_lock, flags);
/*
* Allocate a buffer for doing the copy from user space to
@@ -202,7 +202,7 @@ int dgap_tty_preinit(void)
DPR_INIT(("unable to allocate tmp write buf"));
return (-ENOMEM);
}
-
+
DGAP_UNLOCK(dgap_global_lock, flags);
return(0);
}
@@ -226,14 +226,14 @@ int dgap_tty_register(struct board_t *brd)
brd->SerialDriver->name_base = 0;
brd->SerialDriver->major = 0;
brd->SerialDriver->minor_start = 0;
- brd->SerialDriver->type = TTY_DRIVER_TYPE_SERIAL;
- brd->SerialDriver->subtype = SERIAL_TYPE_NORMAL;
+ brd->SerialDriver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->SerialDriver->subtype = SERIAL_TYPE_NORMAL;
brd->SerialDriver->init_termios = DgapDefaultTermios;
brd->SerialDriver->driver_name = DRVSTR;
brd->SerialDriver->flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
/* The kernel wants space to store pointers to tty_structs */
- brd->SerialDriver->ttys = dgap_driver_kzmalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
+ brd->SerialDriver->ttys = kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
if (!brd->SerialDriver->ttys)
return(-ENOMEM);
@@ -259,14 +259,14 @@ int dgap_tty_register(struct board_t *brd)
brd->PrintDriver->name_base = 0;
brd->PrintDriver->major = 0;
brd->PrintDriver->minor_start = 0;
- brd->PrintDriver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->PrintDriver->type = TTY_DRIVER_TYPE_SERIAL;
brd->PrintDriver->subtype = SERIAL_TYPE_NORMAL;
brd->PrintDriver->init_termios = DgapDefaultTermios;
brd->PrintDriver->driver_name = DRVSTR;
brd->PrintDriver->flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
/* The kernel wants space to store pointers to tty_structs */
- brd->PrintDriver->ttys = dgap_driver_kzmalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
+ brd->PrintDriver->ttys = kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
if (!brd->PrintDriver->ttys)
return(-ENOMEM);
@@ -380,7 +380,7 @@ int dgap_tty_init(struct board_t *brd)
*/
for (i = 0; i < brd->nasync; i++) {
if (!brd->channels[i]) {
- brd->channels[i] = dgap_driver_kzmalloc(sizeof(struct channel_t), GFP_ATOMIC);
+ brd->channels[i] = kzalloc(sizeof(struct channel_t), GFP_ATOMIC);
if (!brd->channels[i]) {
DPR_CORE(("%s:%d Unable to allocate memory for channel struct\n",
__FILE__, __LINE__));
@@ -450,7 +450,7 @@ int dgap_tty_init(struct board_t *brd)
/*
* Set queue water marks, interrupt mask,
- * and general tty parameters.
+ * and general tty parameters.
*/
ch->ch_tlw = tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) : ch->ch_tsize / 2;
@@ -479,7 +479,7 @@ int dgap_tty_init(struct board_t *brd)
writew(0, &(ch->ch_bs->edelay));
else
writew(100, &(ch->ch_bs->edelay));
-
+
writeb(1, &(ch->ch_bs->idata));
}
@@ -506,7 +506,7 @@ void dgap_tty_post_uninit(void)
* dgap_tty_uninit()
*
* Uninitialize the TTY portion of this driver. Free all memory and
- * resources.
+ * resources.
*/
void dgap_tty_uninit(struct board_t *brd)
{
@@ -611,7 +611,7 @@ static void dgap_sniff_nowait_nolock(struct channel_t *ch, uchar *text, uchar *b
if (n == 0) {
return;
}
-
+
/*
* Copy as much data as will fit.
*/
@@ -661,9 +661,9 @@ static void dgap_sniff_nowait_nolock(struct channel_t *ch, uchar *text, uchar *b
/*=======================================================================
*
* dgap_input - Process received data.
- *
+ *
* ch - Pointer to channel structure.
- *
+ *
*=======================================================================*/
void dgap_input(struct channel_t *ch)
@@ -704,8 +704,8 @@ void dgap_input(struct channel_t *ch)
DGAP_LOCK(bd->bd_lock, lock_flags);
DGAP_LOCK(ch->ch_lock, lock_flags2);
- /*
- * Figure the number of characters in the buffer.
+ /*
+ * Figure the number of characters in the buffer.
* Exit immediately if none.
*/
@@ -775,13 +775,13 @@ void dgap_input(struct channel_t *ch)
len = min(len, (N_TTY_BUF_SIZE - 1));
ld = tty_ldisc_ref(tp);
-
+
#ifdef TTY_DONT_FLIP
/*
* If the DONT_FLIP flag is on, don't flush our buffer, and act
- * like the ld doesn't have any space to put the data right now.
+ * like the ld doesn't have any space to put the data right now.
*/
- if (test_bit(TTY_DONT_FLIP, &tp->flags))
+ if (test_bit(TTY_DONT_FLIP, &tp->flags))
len = 0;
#endif
@@ -879,9 +879,9 @@ void dgap_input(struct channel_t *ch)
}
-/************************************************************************
+/************************************************************************
* Determines when CARRIER changes state and takes appropriate
- * action.
+ * action.
************************************************************************/
void dgap_carrier(struct channel_t *ch)
{
@@ -889,7 +889,7 @@ void dgap_carrier(struct channel_t *ch)
int virt_carrier = 0;
int phys_carrier = 0;
-
+
DPR_CARR(("dgap_carrier called...\n"));
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
@@ -917,11 +917,11 @@ void dgap_carrier(struct channel_t *ch)
if (ch->ch_digi.digi_flags & DIGI_FORCEDCD) {
virt_carrier = 1;
- }
+ }
if (ch->ch_c_cflag & CLOCAL) {
virt_carrier = 1;
- }
+ }
DPR_CARR(("DCD: physical: %d virt: %d\n", phys_carrier, virt_carrier));
@@ -968,7 +968,7 @@ void dgap_carrier(struct channel_t *ch)
* "make pretend that carrier is there".
*/
if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0) &&
- (phys_carrier == 0))
+ (phys_carrier == 0))
{
/*
@@ -991,7 +991,7 @@ void dgap_carrier(struct channel_t *ch)
tty_hangup(ch->ch_tun.un_tty);
}
- if (ch->ch_pun.un_open_count > 0) {
+ if (ch->ch_pun.un_open_count > 0) {
DPR_CARR(("Sending pr hangup\n"));
tty_hangup(ch->ch_pun.un_tty);
}
@@ -1002,7 +1002,7 @@ void dgap_carrier(struct channel_t *ch)
*/
if (virt_carrier == 1)
ch->ch_flags |= CH_FCAR;
- else
+ else
ch->ch_flags &= ~CH_FCAR;
if (phys_carrier == 1)
@@ -1013,9 +1013,9 @@ void dgap_carrier(struct channel_t *ch)
/************************************************************************
- *
+ *
* TTY Entry points and helper functions
- *
+ *
************************************************************************/
/*
@@ -1165,7 +1165,7 @@ static int dgap_tty_open(struct tty_struct *tty, struct file *file)
*/
dgap_param(tty);
- /*
+ /*
* follow protocol for opening port
*/
@@ -1195,13 +1195,13 @@ static int dgap_tty_open(struct tty_struct *tty, struct file *file)
}
-/*
+/*
* dgap_block_til_ready()
*
* Wait for DCD, if needed.
*/
static int dgap_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch)
-{
+{
int retval = 0;
struct un_t *un = NULL;
ulong lock_flags;
@@ -1246,7 +1246,7 @@ static int dgap_block_til_ready(struct tty_struct *tty, struct file *file, struc
* If either unit is in the middle of the fragile part of close,
* we just cannot touch the channel safely.
* Go back to sleep, knowing that when the channel can be
- * touched safely, the close routine will signal the
+ * touched safely, the close routine will signal the
* ch_wait_flags to wake us back up.
*/
if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING)) {
@@ -1354,7 +1354,7 @@ static int dgap_block_til_ready(struct tty_struct *tty, struct file *file, struc
* dgap_tty_hangup()
*
* Hangup the port. Like a close, but don't wait for output to drain.
- */
+ */
static void dgap_tty_hangup(struct tty_struct *tty)
{
struct board_t *bd;
@@ -1436,7 +1436,7 @@ static void dgap_tty_close(struct tty_struct *tty, struct file *file)
*/
APR(("tty->count is 1, un open count is %d\n", un->un_open_count));
un->un_open_count = 1;
- }
+ }
if (--un->un_open_count < 0) {
APR(("bad serial port open count of %d\n", un->un_open_count));
@@ -1497,7 +1497,7 @@ static void dgap_tty_close(struct tty_struct *tty, struct file *file)
dgap_cmdb( ch, SMODEM, 0, D_DTR(ch)|D_RTS(ch), 0 );
/*
- * Go to sleep to ensure RTS/DTR
+ * Go to sleep to ensure RTS/DTR
* have been dropped for modems to see it.
*/
if (ch->ch_close_delay) {
@@ -1535,7 +1535,7 @@ static void dgap_tty_close(struct tty_struct *tty, struct file *file)
wake_up_interruptible(&un->un_flags_wait);
DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
+
DPR_BASIC(("dgap_tty_close - complete\n"));
}
@@ -1637,7 +1637,7 @@ static int dgap_tty_chars_in_buffer(struct tty_struct *tty)
}
}
- DPR_WRITE(("dgap_tty_chars_in_buffer. Port: %x - %d (head: %d tail: %d tsize: %d)\n",
+ DPR_WRITE(("dgap_tty_chars_in_buffer. Port: %x - %d (head: %d tail: %d tsize: %d)\n",
ch->ch_portnum, chars, thead, ttail, ch->ch_tsize));
return(chars);
}
@@ -1702,14 +1702,14 @@ static int dgap_wait_for_drain(struct tty_struct *tty)
}
-/*
+/*
* dgap_maxcps_room
*
* Reduces bytes_available to the max number of characters
* that can be sent currently given the maxcps value, and
* returns the new bytes_available. This only affects printer
* output.
- */
+ */
static int dgap_maxcps_room(struct tty_struct *tty, int bytes_available)
{
struct channel_t *ch = NULL;
@@ -1750,7 +1750,7 @@ static int dgap_maxcps_room(struct tty_struct *tty, int bytes_available)
}
else {
/* no room in the buffer */
- cps_limit = 0;
+ cps_limit = 0;
}
bytes_available = min(cps_limit, bytes_available);
@@ -1793,7 +1793,7 @@ static inline void dgap_set_firmware_event(struct un_t *un, unsigned int event)
* dgap_tty_write_room()
*
* Return space available in Tx buffer
- */
+ */
static int dgap_tty_write_room(struct tty_struct *tty)
{
struct channel_t *ch = NULL;
@@ -1831,7 +1831,7 @@ static int dgap_tty_write_room(struct tty_struct *tty)
ret = dgap_maxcps_room(tty, ret);
/*
- * If we are printer device, leave space for
+ * If we are printer device, leave space for
* possibly both the on and off strings.
*/
if (un->un_type == DGAP_PRINT) {
@@ -1856,7 +1856,7 @@ static int dgap_tty_write_room(struct tty_struct *tty)
*/
dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
+
DPR_WRITE(("dgap_tty_write_room - %d tail: %d head: %d\n", ret, tail, head));
return(ret);
@@ -1867,7 +1867,7 @@ static int dgap_tty_write_room(struct tty_struct *tty)
* dgap_tty_put_char()
*
* Put a character into ch->ch_buf
- *
+ *
* - used by the line discipline for OPOST processing
*/
static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c)
@@ -2094,7 +2094,7 @@ static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf, int
if (from_user) {
DGAP_UNLOCK(ch->ch_lock, lock_flags);
up(&dgap_TmpWriteSem);
- }
+ }
else {
DGAP_UNLOCK(ch->ch_lock, lock_flags);
}
@@ -2206,12 +2206,12 @@ static int dgap_tty_tiocmset(struct tty_struct *tty, struct file *file,
if (set & TIOCM_RTS) {
ch->ch_mforce |= D_RTS(ch);
ch->ch_mval |= D_RTS(ch);
- }
+ }
if (set & TIOCM_DTR) {
ch->ch_mforce |= D_DTR(ch);
ch->ch_mval |= D_DTR(ch);
- }
+ }
if (clear & TIOCM_RTS) {
ch->ch_mforce |= D_RTS(ch);
@@ -2316,7 +2316,7 @@ static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout)
/*
* dgap_send_xchar()
- *
+ *
* send a high priority character, called by ld.
*/
static void dgap_tty_send_xchar(struct tty_struct *tty, char c)
@@ -2529,7 +2529,7 @@ static int dgap_set_modem_info(struct tty_struct *tty, unsigned int command, uns
/*
- * dgap_tty_digigeta()
+ * dgap_tty_digigeta()
*
* Ioctl to get the information for ditty.
*
@@ -2571,7 +2571,7 @@ static int dgap_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retin
/*
- * dgap_tty_digiseta()
+ * dgap_tty_digiseta()
*
* Ioctl to set the information for ditty.
*
@@ -2614,10 +2614,10 @@ static int dgap_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_i
memcpy(&ch->ch_digi, &new_digi, sizeof(struct digi_t));
- if (ch->ch_digi.digi_maxcps < 1)
+ if (ch->ch_digi.digi_maxcps < 1)
ch->ch_digi.digi_maxcps = 1;
- if (ch->ch_digi.digi_maxcps > 10000)
+ if (ch->ch_digi.digi_maxcps > 10000)
ch->ch_digi.digi_maxcps = 10000;
if (ch->ch_digi.digi_bufsize < 10)
@@ -2647,7 +2647,7 @@ static int dgap_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_i
/*
- * dgap_tty_digigetedelay()
+ * dgap_tty_digigetedelay()
*
* Ioctl to get the current edelay setting.
*
@@ -2689,7 +2689,7 @@ static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo)
/*
- * dgap_tty_digisetedelay()
+ * dgap_tty_digisetedelay()
*
* Ioctl to set the EDELAY setting
*
@@ -2783,7 +2783,7 @@ static int dgap_tty_digigetcustombaud(struct tty_struct *tty, int __user *retinf
/*
- * dgap_tty_digisetcustombaud()
+ * dgap_tty_digisetcustombaud()
*
* Ioctl to set the custom baud rate setting
*/
@@ -2898,7 +2898,7 @@ static void dgap_tty_throttle(struct tty_struct *tty)
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
-
+
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
@@ -2938,7 +2938,7 @@ static void dgap_tty_unthrottle(struct tty_struct *tty)
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
-
+
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
@@ -2979,7 +2979,7 @@ static void dgap_tty_start(struct tty_struct *tty)
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
-
+
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
@@ -3016,7 +3016,7 @@ static void dgap_tty_stop(struct tty_struct *tty)
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
-
+
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
@@ -3039,7 +3039,7 @@ static void dgap_tty_stop(struct tty_struct *tty)
}
-/*
+/*
* dgap_tty_flush_chars()
*
* Flush the cook buffer
@@ -3066,7 +3066,7 @@ static void dgap_tty_flush_chars(struct tty_struct *tty)
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
-
+
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
@@ -3092,7 +3092,7 @@ static void dgap_tty_flush_chars(struct tty_struct *tty)
/*
* dgap_tty_flush_buffer()
- *
+ *
* Flush Tx buffer (make in == out)
*/
static void dgap_tty_flush_buffer(struct tty_struct *tty)
@@ -3110,7 +3110,7 @@ static void dgap_tty_flush_buffer(struct tty_struct *tty)
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
-
+
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
@@ -3153,7 +3153,7 @@ static void dgap_tty_flush_buffer(struct tty_struct *tty)
* The IOCTL function and all of its helpers
*
*****************************************************************************/
-
+
/*
* dgap_tty_ioctl()
*
@@ -3186,7 +3186,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return (-ENODEV);
- DPR_IOCTL(("dgap_tty_ioctl start on port %d - cmd %s (%x), arg %lx\n",
+ DPR_IOCTL(("dgap_tty_ioctl start on port %d - cmd %s (%x), arg %lx\n",
ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
DGAP_LOCK(bd->bd_lock, lock_flags);
@@ -3205,7 +3205,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
case TCSBRK:
/*
- * TCSBRK is SVID version: non-zero arg --> no break
+ * TCSBRK is SVID version: non-zero arg --> no break
* this behaviour is exploited by tcdrain().
*
* According to POSIX.1 spec (7.2.2.1.2) breaks should be
@@ -3236,7 +3236,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DGAP_UNLOCK(ch->ch_lock, lock_flags2);
DGAP_UNLOCK(bd->bd_lock, lock_flags);
- DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
return(0);
@@ -3270,7 +3270,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DGAP_UNLOCK(ch->ch_lock, lock_flags2);
DGAP_UNLOCK(bd->bd_lock, lock_flags);
- DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
return(0);
@@ -3303,11 +3303,11 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DGAP_UNLOCK(ch->ch_lock, lock_flags2);
DGAP_UNLOCK(bd->bd_lock, lock_flags);
- DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
return 0;
-
+
case TIOCCBRK:
/*
* FEP5 doesn't support turning off a break unconditionally.
@@ -3343,7 +3343,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DGAP_UNLOCK(bd->bd_lock, lock_flags);
return(0);
-
+
case TIOCMGET:
DGAP_UNLOCK(ch->ch_lock, lock_flags2);
DGAP_UNLOCK(bd->bd_lock, lock_flags);
@@ -3359,8 +3359,8 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/*
* Here are any additional ioctl's that we want to implement
*/
-
- case TCFLSH:
+
+ case TCFLSH:
/*
* The linux tty driver doesn't have a flush
* input routine for the driver, assuming all backed
@@ -3369,7 +3369,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
* act on the ioctl, but then lie and say we didn't
* so the line discipline will process the flush
* also.
- */
+ */
rc = tty_check_change(tty);
if (rc) {
DGAP_UNLOCK(ch->ch_lock, lock_flags2);
@@ -3407,13 +3407,13 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
tty_wakeup(tty);
DGAP_LOCK(bd->bd_lock, lock_flags);
DGAP_LOCK(ch->ch_lock, lock_flags2);
- }
+ }
- /* pretend we didn't recognize this IOCTL */
+ /* pretend we didn't recognize this IOCTL */
DGAP_UNLOCK(ch->ch_lock, lock_flags2);
DGAP_UNLOCK(bd->bd_lock, lock_flags);
- DPR_IOCTL(("dgap_tty_ioctl (LINE:%d) finish on port %d - cmd %s (%x), arg %lx\n",
+ DPR_IOCTL(("dgap_tty_ioctl (LINE:%d) finish on port %d - cmd %s (%x), arg %lx\n",
__LINE__, ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
return(-ENOIOCTLCMD);
@@ -3445,7 +3445,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
return(-EINTR);
}
- DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
/* pretend we didn't recognize this */
@@ -3462,7 +3462,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
}
/* pretend we didn't recognize this */
- return(-ENOIOCTLCMD);
+ return(-ENOIOCTLCMD);
case TCXONC:
/*
@@ -3572,7 +3572,7 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
DGAP_UNLOCK(bd->bd_lock, lock_flags);
DPR_IOCTL(("dgap_tty_ioctl - in default\n"));
- DPR_IOCTL(("dgap_tty_ioctl end - cmd %s (%x), arg %lx\n",
+ DPR_IOCTL(("dgap_tty_ioctl end - cmd %s (%x), arg %lx\n",
dgap_ioctl_name(cmd), cmd, arg));
return(-ENOIOCTLCMD);
diff --git a/drivers/staging/dgap/downld.c b/drivers/staging/dgap/downld.c
index 638c5da43c85..1f4aa2eca437 100644
--- a/drivers/staging/dgap/downld.c
+++ b/drivers/staging/dgap/downld.c
@@ -24,7 +24,7 @@
**
** This is the daemon that sends the fep, bios, and concentrator images
** from user space to the driver.
-** BUGS:
+** BUGS:
** If the file changes in the middle of the download, you probably
** will get what you deserve.
**
@@ -121,7 +121,7 @@ struct downld_t *dp; /* conc. download */
/*
- * The same for either the FEP or the BIOS.
+ * The same for either the FEP or the BIOS.
* Append the downldio header, issue the ioctl, then free
* the buffer. Not horribly CPU efficient, but quite RAM efficient.
*/
@@ -136,7 +136,7 @@ void squirt(int req_type, int bdid, struct image_info *ii)
/*
* If this binary comes from a file, stat it to see how
* large it is. Yes, we intentionally do this each
- * time for the binary may change between loads.
+ * time for the binary may change between loads.
*/
if (ii->pathname) {
@@ -144,7 +144,7 @@ void squirt(int req_type, int bdid, struct image_info *ii)
if (sfd < 0 ) {
myperror(ii->pathname);
- goto squirt_end;
+ goto squirt_end;
}
if (fstat(sfd, &sb) == -1 ) {
@@ -152,7 +152,7 @@ void squirt(int req_type, int bdid, struct image_info *ii)
goto squirt_end;
}
- ii->len = sb.st_size ;
+ ii->len = sb.st_size;
}
size_buf = ii->len + sizeof(struct downldio);
@@ -165,7 +165,7 @@ void squirt(int req_type, int bdid, struct image_info *ii)
dliop = (struct downldio *) malloc(size_buf);
if (dliop == NULL) {
- fprintf(stderr,"%s: can't get %d bytes of memory; aborting\n",
+ fprintf(stderr,"%s: can't get %d bytes of memory; aborting\n",
pgm, size_buf);
exit (1);
}
@@ -185,7 +185,7 @@ void squirt(int req_type, int bdid, struct image_info *ii)
if (debugflag)
printf("sending %d bytes of %s %s from %s\n",
- ii->len,
+ ii->len,
(ii->type == IFEP) ? "FEP" : (ii->type == IBIOS) ? "BIOS" : "CONFIG",
ii->name ? ii->name : "",
(ii->pathname) ? ii->pathname : "internal image" );
@@ -209,13 +209,13 @@ squirt_end:
/*
- * See if we need to reload the download image in core
- *
+ * See if we need to reload the download image in core
+ *
*/
void consider_file_rescan(struct image_info *ii)
{
- int sfd ;
- int len ;
+ int sfd;
+ int len;
struct stat sb;
/* This operation only makes sense when we're working from a file */
@@ -232,14 +232,14 @@ void consider_file_rescan(struct image_info *ii)
myperror(ii->pathname);
exit(1);
}
-
- /* If the file hasn't changed since we last did this,
- * and we have not done a free() on the image, bail
+
+ /* If the file hasn't changed since we last did this,
+ * and we have not done a free() on the image, bail
*/
if (ii->image && (sb.st_mtime == ii->mtime))
goto end_rescan;
- ii->len = len = sb.st_size ;
+ ii->len = len = sb.st_size;
/* Record the timestamp of the file */
ii->mtime = sb.st_mtime;
@@ -249,12 +249,12 @@ void consider_file_rescan(struct image_info *ii)
* have a memory leak.
*/
if ( ii->image ) {
- free( ii->image );
+ free( ii->image );
/* ii->image = NULL; */ /* not necessary */
}
- /* This image will be kept only long enough for the
- * download to happen. After sending the last block,
+ /* This image will be kept only long enough for the
+ * download to happen. After sending the last block,
* it will be freed
*/
ii->image = malloc(len) ;
@@ -267,14 +267,14 @@ void consider_file_rescan(struct image_info *ii)
}
if (read(sfd, ii->image, len) < len) {
- fprintf(stderr,"%s: read error on %s; aborting\n",
+ fprintf(stderr,"%s: read error on %s; aborting\n",
pgm, ii->pathname);
exit (1);
}
end_rescan:
close(sfd);
-
+
}
}
@@ -284,12 +284,12 @@ end_rescan:
struct image_info * find_conc_image()
{
- int x ;
- struct image_info *i = NULL ;
+ int x;
+ struct image_info *i = NULL;
for ( x = 0; x < nimages; x++ ) {
i=&image_list[x];
-
+
if(i->type != ICONC)
continue;
@@ -305,8 +305,8 @@ struct image_info * find_conc_image()
*/
if ((dp->dl_type != 'P' ) && ( ip->dl_srev == dp->dl_srev ))
return i;
- }
- return NULL ;
+ }
+ return NULL;
}
@@ -378,7 +378,7 @@ int main(int argc, char **argv)
** the list before built in images so that the command line images
** can override the built in ones.
*/
-
+
/* allocate space for the list */
nimages = argc - 2;
@@ -390,15 +390,15 @@ int main(int argc, char **argv)
nimages += count;
/* Really should just remove the variable "image_list".... robertl */
- image_list = images ;
-
+ image_list = images;
+
/* get the images from the command line */
for(x = 2; x < argc; x++) {
- int xx;
+ int xx;
/*
- * strip off any leading path information for
- * determining file type
+ * strip off any leading path information for
+ * determining file type
*/
if( (fname = strrchr(argv[x],'/')) == NULL)
fname = argv[x];
@@ -406,18 +406,18 @@ int main(int argc, char **argv)
fname++; /* skip the slash */
for (xx = 0; xx < count; xx++) {
- if (strcmp(fname, images[xx].fname) == 0 ) {
+ if (strcmp(fname, images[xx].fname) == 0 ) {
images[xx].pathname = argv[x];
/* image should be NULL until */
/* space is malloced */
- images[xx].image = NULL ;
+ images[xx].image = NULL;
}
}
}
sleep(3);
-
+
/*
** Endless loop: get a request from the fep, and service that request.
*/
@@ -425,7 +425,7 @@ int main(int argc, char **argv)
/* get the request */
if (debugflag)
printf("b4 get ioctl...");
-
+
if (ioctl(fd,DIGI_DLREQ_GET, &dlio) == -1 ) {
if (errorprint) {
fprintf(stderr,
@@ -438,7 +438,7 @@ int main(int argc, char **argv)
if (debugflag)
printf("dlio.req_type is %d bd %d\n",
dlio.req_type,dlio.bdid);
-
+
switch(dlio.req_type) {
case DLREQ_BIOS:
/*
@@ -447,18 +447,18 @@ int main(int argc, char **argv)
for ( x = 0; x < nimages; x++ ) {
if(image_list[x].type != IBIOS)
continue;
-
- if ((dlio.image.fi.type & FAMILY) ==
+
+ if ((dlio.image.fi.type & FAMILY) ==
image_list[x].family) {
-
- if ( image_list[x].family == T_CX ) {
- if ((dlio.image.fi.type & BUSTYPE)
+
+ if ( image_list[x].family == T_CX ) {
+ if ((dlio.image.fi.type & BUSTYPE)
== T_PCIBUS ) {
- if ( image_list[x].subtype
+ if ( image_list[x].subtype
== T_PCIBUS )
break;
}
- else {
+ else {
break;
}
}
@@ -466,15 +466,15 @@ int main(int argc, char **argv)
/* If subtype of image is T_PCIBUS, it is */
/* a PCI EPC image, so the board must */
/* have bus type T_PCIBUS to match */
- if ((dlio.image.fi.type & BUSTYPE)
+ if ((dlio.image.fi.type & BUSTYPE)
== T_PCIBUS ) {
- if ( image_list[x].subtype
+ if ( image_list[x].subtype
== T_PCIBUS )
break;
}
- else {
+ else {
/* NON PCI EPC doesn't use PCI image */
- if ( image_list[x].subtype
+ if ( image_list[x].subtype
!= T_PCIBUS )
break;
}
@@ -484,12 +484,12 @@ int main(int argc, char **argv)
}
else if ((dlio.image.fi.type & SUBTYPE) == image_list[x].subtype) {
/* PCXR board will break out of the loop here */
- if ( image_list[x].subtype == T_PCXR ) {
+ if ( image_list[x].subtype == T_PCXR ) {
break;
}
}
}
-
+
if ( x >= nimages) {
/*
** no valid images exist
@@ -514,7 +514,7 @@ int main(int argc, char **argv)
}
squirt(dlio.req_type, dlio.bdid, &image_list[x]);
break ;
-
+
case DLREQ_FEP:
/*
** find the fep image for this type
@@ -522,17 +522,17 @@ int main(int argc, char **argv)
for ( x = 0; x < nimages; x++ ) {
if(image_list[x].type != IFEP)
continue;
- if( (dlio.image.fi.type & FAMILY) ==
+ if( (dlio.image.fi.type & FAMILY) ==
image_list[x].family ) {
- if ( image_list[x].family == T_CX ) {
+ if ( image_list[x].family == T_CX ) {
/* C/X PCI board */
- if ((dlio.image.fi.type & BUSTYPE)
+ if ((dlio.image.fi.type & BUSTYPE)
== T_PCIBUS ) {
if ( image_list[x].subtype
== T_PCIBUS )
break;
}
- else {
+ else {
/* Regular CX */
break;
}
@@ -541,15 +541,15 @@ int main(int argc, char **argv)
/* If subtype of image is T_PCIBUS, it is */
/* a PCI EPC image, so the board must */
/* have bus type T_PCIBUS to match */
- if ((dlio.image.fi.type & BUSTYPE)
+ if ((dlio.image.fi.type & BUSTYPE)
== T_PCIBUS ) {
- if ( image_list[x].subtype
+ if ( image_list[x].subtype
== T_PCIBUS )
break;
}
- else {
+ else {
/* NON PCI EPC doesn't use PCI image */
- if ( image_list[x].subtype
+ if ( image_list[x].subtype
!= T_PCIBUS )
break;
}
@@ -559,12 +559,12 @@ int main(int argc, char **argv)
}
else if ((dlio.image.fi.type & SUBTYPE) == image_list[x].subtype) {
/* PCXR board will break out of the loop here */
- if ( image_list[x].subtype == T_PCXR ) {
+ if ( image_list[x].subtype == T_PCXR ) {
break;
}
}
}
-
+
if ( x >= nimages) {
/*
** no valid images exist
@@ -613,7 +613,7 @@ int main(int argc, char **argv)
}
break;
-
+
case DLREQ_CONFIG:
for ( x = 0; x < nimages; x++ ) {
if(image_list[x].type != ICONFIG)
@@ -658,15 +658,15 @@ int main(int argc, char **argv)
*/
for ( x = 0; x < nimages; x++ ) {
ii=&image_list[x];
-
+
if(image_list[x].type != ICONC)
continue;
-
+
consider_file_rescan(ii) ;
-
+
ip = (struct downld_t *) image_list[x].image;
if (ip == NULL) continue;
-
+
/*
* When I removed Clusterport, I kept only the
* code that I was SURE wasn't ClusterPort.
@@ -674,11 +674,11 @@ int main(int argc, char **argv)
*/
if ((dp->dl_type != 'P' ) &&
- (ip->dl_lrev <= dp->dl_lrev ) &&
+ (ip->dl_lrev <= dp->dl_lrev ) &&
( dp->dl_lrev <= ip->dl_hrev))
break;
}
-
+
if ( x >= nimages ) {
/*
** No valid images exist
@@ -691,7 +691,7 @@ int main(int argc, char **argv)
}
continue;
}
-
+
} else {
/*
** find image version required
@@ -706,40 +706,40 @@ int main(int argc, char **argv)
continue;
}
}
-
+
/*
** download block of image
*/
-
+
offset = 1024 * dp->dl_seq;
-
+
/*
** test if block requested within image
*/
- if ( offset < ii->len ) {
-
+ if ( offset < ii->len ) {
+
/*
** if it is, determine block size, set segment,
** set size, set pointers, and copy block
*/
if (( bsize = ii->len - offset ) > 1024 )
bsize = 1024;
-
+
/*
** copy image version info to download area
*/
dp->dl_srev = ip->dl_srev;
dp->dl_lrev = ip->dl_lrev;
dp->dl_hrev = ip->dl_hrev;
-
+
dp->dl_seg = (64 * dp->dl_seq) + ip->dl_seg;
dp->dl_size = bsize;
-
+
down = (char *)&dp->dl_data[0];
image = (char *)((char *)ip + offset);
-
+
memcpy(down, image, bsize);
- }
+ }
else {
/*
** Image has been downloaded, set segment and
@@ -747,24 +747,24 @@ int main(int argc, char **argv)
*/
dp->dl_seg = ip->dl_seg;
dp->dl_size = 0;
-
+
/* Now, we can release the concentrator */
/* image from memory if we're running */
/* from filesystem images */
-
+
if (ii->pathname)
if (ii->image) {
free(ii->image);
- ii->image = NULL ;
- }
+ ii->image = NULL;
+ }
}
-
+
if (debugflag)
printf(
"sending conc dl section %d to %s from %s\n",
dp->dl_seq, ii->name,
ii->pathname ? ii->pathname : "Internal Image");
-
+
if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1 ) {
if (errorprint) {
fprintf(stderr,
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index fdc1aabc7fde..708adbbcedbd 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -119,7 +119,10 @@ static inline void cls_set_cts_flow_control(struct channel_t *ch)
/* Write old LCR value back out, which turns enhanced access off */
writeb(lcrb, &ch->ch_cls_uart->lcr);
- /* Enable interrupts for CTS flow, turn off interrupts for received XOFF chars */
+ /*
+ * Enable interrupts for CTS flow, turn off interrupts for
+ * received XOFF chars
+ */
ier |= (UART_EXAR654_IER_CTSDSR);
ier &= ~(UART_EXAR654_IER_XOFF);
writeb(ier, &ch->ch_cls_uart->ier);
@@ -167,7 +170,10 @@ static inline void cls_set_ixon_flow_control(struct channel_t *ch)
/* Write old LCR value back out, which turns enhanced access off */
writeb(lcrb, &ch->ch_cls_uart->lcr);
- /* Disable interrupts for CTS flow, turn on interrupts for received XOFF chars */
+ /*
+ * Disable interrupts for CTS flow, turn on interrupts for
+ * received XOFF chars
+ */
ier &= ~(UART_EXAR654_IER_CTSDSR);
ier |= (UART_EXAR654_IER_XOFF);
writeb(ier, &ch->ch_cls_uart->ier);
@@ -207,7 +213,10 @@ static inline void cls_set_no_output_flow_control(struct channel_t *ch)
/* Write old LCR value back out, which turns enhanced access off */
writeb(lcrb, &ch->ch_cls_uart->lcr);
- /* Disable interrupts for CTS flow, turn off interrupts for received XOFF chars */
+ /*
+ * Disable interrupts for CTS flow, turn off interrupts for
+ * received XOFF chars
+ */
ier &= ~(UART_EXAR654_IER_CTSDSR);
ier &= ~(UART_EXAR654_IER_XOFF);
writeb(ier, &ch->ch_cls_uart->ier);
@@ -220,8 +229,8 @@ static inline void cls_set_no_output_flow_control(struct channel_t *ch)
&ch->ch_cls_uart->isr_fcr);
ch->ch_r_watermark = 0;
- ch->ch_t_tlevel = 16;
- ch->ch_r_tlevel = 16;
+ ch->ch_t_tlevel = 16;
+ ch->ch_r_tlevel = 16;
}
@@ -350,8 +359,8 @@ static inline void cls_set_no_input_flow_control(struct channel_t *ch)
UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
&ch->ch_cls_uart->isr_fcr);
- ch->ch_t_tlevel = 16;
- ch->ch_r_tlevel = 16;
+ ch->ch_t_tlevel = 16;
+ ch->ch_r_tlevel = 16;
}
@@ -380,12 +389,13 @@ static inline void cls_clear_break(struct channel_t *ch, int force)
/* Turn break off, and unset some variables */
if (ch->ch_flags & CH_BREAK_SENDING) {
- if ((jiffies >= ch->ch_stop_sending_break) || force) {
+ if (time_after(jiffies, ch->ch_stop_sending_break) || force) {
uchar temp = readb(&ch->ch_cls_uart->lcr);
- writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr);
+ writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr);
ch->ch_flags &= ~(CH_BREAK_SENDING);
ch->ch_stop_sending_break = 0;
- DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n", jiffies));
+ DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n",
+ jiffies));
}
}
DGNC_UNLOCK(ch->ch_lock, lock_flags);
@@ -420,7 +430,8 @@ static inline void cls_parse_isr(struct dgnc_board *brd, uint port)
if (isr & UART_IIR_NO_INT)
break;
- DPR_INTR(("%s:%d port: %x isr: %x\n", __FILE__, __LINE__, port, isr));
+ DPR_INTR(("%s:%d port: %x isr: %x\n", __FILE__, __LINE__,
+ port, isr));
/* Receive Interrupt pending */
if (isr & (UART_IIR_RDI | UART_IIR_RDI_TIMEOUT)) {
@@ -473,11 +484,11 @@ static void cls_param(struct tty_struct *tty)
uchar uart_lcr = 0;
uchar ier = 0;
uchar uart_ier = 0;
- uint baud = 9600;
+ uint baud = 9600;
int quot = 0;
- struct dgnc_board *bd;
+ struct dgnc_board *bd;
struct channel_t *ch;
- struct un_t *un;
+ struct un_t *un;
if (!tty || tty->magic != TTY_MAGIC)
return;
@@ -495,7 +506,8 @@ static void cls_param(struct tty_struct *tty)
return;
DPR_PARAM(("param start: tdev: %x cflags: %x oflags: %x iflags: %x\n",
- ch->ch_tun.un_dev, ch->ch_c_cflag, ch->ch_c_oflag, ch->ch_c_iflag));
+ ch->ch_tun.un_dev, ch->ch_c_cflag, ch->ch_c_oflag,
+ ch->ch_c_iflag));
/*
* If baud rate is zero, flush queues, and set mval to drop DTR.
@@ -506,7 +518,7 @@ static void cls_param(struct tty_struct *tty)
ch->ch_w_head = ch->ch_w_tail = 0;
cls_flush_uart_write(ch);
- cls_flush_uart_read(ch);
+ cls_flush_uart_read(ch);
/* The baudrate is B0 so all modem lines are to be dropped. */
ch->ch_flags |= (CH_BAUD0);
@@ -558,8 +570,12 @@ static void cls_param(struct tty_struct *tty)
4800, 9600, 19200, 38400 }
};
- /* Only use the TXPrint baud rate if the terminal unit is NOT open */
- if (!(ch->ch_tun.un_flags & UN_ISOPEN) && (un->un_type == DGNC_PRINT))
+ /*
+ * Only use the TXPrint baud rate if the terminal
+ * unit is NOT open
+ */
+ if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
+ (un->un_type == DGNC_PRINT))
baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
else
baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
@@ -572,7 +588,8 @@ static void cls_param(struct tty_struct *tty)
jindex = baud;
- if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) && (jindex < 16)) {
+ if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) &&
+ (jindex < 16)) {
baud = bauds[iindex][jindex];
} else {
DPR_IOCTL(("baud indices were out of range (%d)(%d)",
@@ -598,13 +615,11 @@ static void cls_param(struct tty_struct *tty)
}
}
- if (ch->ch_c_cflag & PARENB) {
+ if (ch->ch_c_cflag & PARENB)
lcr |= UART_LCR_PARITY;
- }
- if (!(ch->ch_c_cflag & PARODD)) {
+ if (!(ch->ch_c_cflag & PARODD))
lcr |= UART_LCR_EPAR;
- }
/*
* Not all platforms support mark/space parity,
@@ -648,31 +663,28 @@ static void cls_param(struct tty_struct *tty)
writeb((quot & 0xff), &ch->ch_cls_uart->txrx);
writeb((quot >> 8), &ch->ch_cls_uart->ier);
writeb(lcr, &ch->ch_cls_uart->lcr);
- }
+ }
if (uart_lcr != lcr)
writeb(lcr, &ch->ch_cls_uart->lcr);
- if (ch->ch_c_cflag & CREAD) {
+ if (ch->ch_c_cflag & CREAD)
ier |= (UART_IER_RDI | UART_IER_RLSI);
- }
- else {
+ else
ier &= ~(UART_IER_RDI | UART_IER_RLSI);
- }
/*
* Have the UART interrupt on modem signal changes ONLY when
* we are in hardware flow control mode, or CLOCAL/FORCEDCD is not set.
*/
- if ((ch->ch_digi.digi_flags & CTSPACE) || (ch->ch_digi.digi_flags & RTSPACE) ||
- (ch->ch_c_cflag & CRTSCTS) || !(ch->ch_digi.digi_flags & DIGI_FORCEDCD) ||
+ if ((ch->ch_digi.digi_flags & CTSPACE) ||
+ (ch->ch_digi.digi_flags & RTSPACE) ||
+ (ch->ch_c_cflag & CRTSCTS) ||
+ !(ch->ch_digi.digi_flags & DIGI_FORCEDCD) ||
!(ch->ch_c_cflag & CLOCAL))
- {
- ier |= UART_IER_MSI;
- }
- else {
- ier &= ~UART_IER_MSI;
- }
+ ier |= UART_IER_MSI;
+ else
+ ier &= ~UART_IER_MSI;
ier |= UART_IER_THRI;
@@ -681,29 +693,33 @@ static void cls_param(struct tty_struct *tty)
if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
cls_set_cts_flow_control(ch);
- }
- else if (ch->ch_c_iflag & IXON) {
- /* If start/stop is set to disable, then we should disable flow control */
- if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
+ } else if (ch->ch_c_iflag & IXON) {
+ /*
+ * If start/stop is set to disable, then we should
+ * disable flow control
+ */
+ if ((ch->ch_startc == _POSIX_VDISABLE) ||
+ (ch->ch_stopc == _POSIX_VDISABLE))
cls_set_no_output_flow_control(ch);
else
cls_set_ixon_flow_control(ch);
- }
- else {
+ } else {
cls_set_no_output_flow_control(ch);
}
if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
cls_set_rts_flow_control(ch);
- }
- else if (ch->ch_c_iflag & IXOFF) {
- /* If start/stop is set to disable, then we should disable flow control */
- if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
+ } else if (ch->ch_c_iflag & IXOFF) {
+ /*
+ * If start/stop is set to disable, then we should disable
+ * flow control
+ */
+ if ((ch->ch_startc == _POSIX_VDISABLE) ||
+ (ch->ch_stopc == _POSIX_VDISABLE))
cls_set_no_input_flow_control(ch);
else
cls_set_ixoff_flow_control(ch);
- }
- else {
+ } else {
cls_set_no_input_flow_control(ch);
}
@@ -719,7 +735,7 @@ static void cls_param(struct tty_struct *tty)
*/
static void cls_tasklet(unsigned long data)
{
- struct dgnc_board *bd = (struct dgnc_board *) data;
+ struct dgnc_board *bd = (struct dgnc_board *) data;
struct channel_t *ch;
ulong lock_flags;
int i;
@@ -802,7 +818,8 @@ static irqreturn_t cls_intr(int irq, void *voidbrd)
unsigned long lock_flags;
if (!brd) {
- APR(("Received interrupt (%d) with null board associated\n", irq));
+ APR(("Received interrupt (%d) with null board associated\n",
+ irq));
return IRQ_NONE;
}
@@ -810,7 +827,9 @@ static irqreturn_t cls_intr(int irq, void *voidbrd)
* Check to make sure its for us.
*/
if (brd->magic != DGNC_BOARD_MAGIC) {
- APR(("Received interrupt (%d) with a board pointer that wasn't ours!\n", irq));
+ APR((
+ "Received interrupt (%d) with a board pointer "
+ "that wasn't ours!\n", irq));
return IRQ_NONE;
}
@@ -826,7 +845,9 @@ static irqreturn_t cls_intr(int irq, void *voidbrd)
/* If 0, no interrupts pending */
if (!poll_reg) {
- DPR_INTR(("Kernel interrupted to me, but no pending interrupts...\n"));
+ DPR_INTR((
+ "Kernel interrupted to me, but no pending "
+ "interrupts...\n"));
DGNC_UNLOCK(brd->bd_intr_lock, lock_flags);
return IRQ_NONE;
}
@@ -834,9 +855,8 @@ static irqreturn_t cls_intr(int irq, void *voidbrd)
DPR_INTR(("%s:%d poll_reg: %x\n", __FILE__, __LINE__, poll_reg));
/* Parse each port to find out what caused the interrupt */
- for (i = 0; i < brd->nasync; i++) {
+ for (i = 0; i < brd->nasync; i++)
cls_parse_isr(brd, i);
- }
/*
* Schedule tasklet to more in-depth servicing at a better time.
@@ -868,8 +888,8 @@ static void cls_enable_receiver(struct channel_t *ch)
static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
{
- int qleft = 0;
- uchar linestatus = 0;
+ int qleft = 0;
+ uchar linestatus = 0;
uchar error_mask = 0;
ushort head;
ushort tail;
@@ -885,7 +905,8 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
tail = ch->ch_r_tail;
/* Store how much space we have left in the queue */
- if ((qleft = tail - head - 1) < 0)
+ qleft = (tail - head - 1);
+ if (qleft < 0)
qleft += RQUEUEMASK + 1;
/*
@@ -912,9 +933,9 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
}
/*
- * If our queue is full, we have no choice but to drop some data.
- * The assumption is that HWFLOW or SWFLOW should have stopped
- * things way way before we got to this point.
+ * If our queue is full, we have no choice but to drop some
+ * data. The assumption is that HWFLOW or SWFLOW should have
+ * stopped things way way before we got to this point.
*
* I decided that I wanted to ditch the oldest data first,
* I hope thats okay with everyone? Yes? Good.
@@ -928,13 +949,16 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
qleft++;
}
- ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE | UART_LSR_FE);
+ ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE
+ | UART_LSR_FE);
ch->ch_rqueue[head] = readb(&ch->ch_cls_uart->txrx);
- dgnc_sniff_nowait_nolock(ch, "UART READ", ch->ch_rqueue + head, 1);
+ dgnc_sniff_nowait_nolock(ch, "UART READ",
+ ch->ch_rqueue + head, 1);
qleft--;
- DPR_READ(("DATA/LSR pair: %x %x\n", ch->ch_rqueue[head], ch->ch_equeue[head]));
+ DPR_READ(("DATA/LSR pair: %x %x\n", ch->ch_rqueue[head],
+ ch->ch_equeue[head]));
if (ch->ch_equeue[head] & UART_LSR_PE)
ch->ch_err_parity++;
@@ -966,22 +990,19 @@ static int cls_drain(struct tty_struct *tty, uint seconds)
{
ulong lock_flags;
struct channel_t *ch;
- struct un_t *un;
+ struct un_t *un;
int rc = 0;
- if (!tty || tty->magic != TTY_MAGIC) {
+ if (!tty || tty->magic != TTY_MAGIC)
return -ENXIO;
- }
un = (struct un_t *) tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC) {
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
return -ENXIO;
- }
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return -ENXIO;
- }
DGNC_LOCK(ch->ch_lock, lock_flags);
un->un_flags |= UN_EMPTY;
@@ -990,24 +1011,25 @@ static int cls_drain(struct tty_struct *tty, uint seconds)
/*
* NOTE: Do something with time passed in.
*/
- rc = wait_event_interruptible(un->un_flags_wait, ((un->un_flags & UN_EMPTY) == 0));
+ rc = wait_event_interruptible(un->un_flags_wait,
+ ((un->un_flags & UN_EMPTY) == 0));
/* If ret is non-zero, user ctrl-c'ed us */
if (rc)
DPR_IOCTL(("%d Drain - User ctrl c'ed\n", __LINE__));
- return rc;
+ return rc;
}
/* Channel lock MUST be held before calling this function! */
static void cls_flush_uart_write(struct channel_t *ch)
{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
- }
- writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_cls_uart->isr_fcr);
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT),
+ &ch->ch_cls_uart->isr_fcr);
udelay(10);
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
@@ -1017,9 +1039,8 @@ static void cls_flush_uart_write(struct channel_t *ch)
/* Channel lock MUST be held before calling this function! */
static void cls_flush_uart_read(struct channel_t *ch)
{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
- }
/*
* For complete POSIX compatibility, we should be purging the
@@ -1032,7 +1053,8 @@ static void cls_flush_uart_read(struct channel_t *ch)
* So for now, we will leave the code #ifdef'ed out...
*/
#if 0
- writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), &ch->ch_cls_uart->isr_fcr);
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
#endif
udelay(10);
}
@@ -1059,7 +1081,8 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
}
/* If port is "stopped", don't send any data to the UART */
- if ((ch->ch_flags & CH_FORCED_STOP) || (ch->ch_flags & CH_BREAK_SENDING)) {
+ if ((ch->ch_flags & CH_FORCED_STOP) ||
+ (ch->ch_flags & CH_BREAK_SENDING)) {
DGNC_UNLOCK(ch->ch_lock, lock_flags);
return;
}
@@ -1071,10 +1094,10 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
n = 32;
- /* cache head and tail of queue */
- head = ch->ch_w_head & WQUEUEMASK;
- tail = ch->ch_w_tail & WQUEUEMASK;
- qlen = (head - tail) & WQUEUEMASK;
+ /* cache head and tail of queue */
+ head = ch->ch_w_head & WQUEUEMASK;
+ tail = ch->ch_w_tail & WQUEUEMASK;
+ qlen = (head - tail) & WQUEUEMASK;
/* Find minimum of the FIFO space, versus queue length */
n = min(n, qlen);
@@ -1083,7 +1106,8 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
/*
* If RTS Toggle mode is on, turn on RTS now if not already set,
- * and make sure we get an event when the data transfer has completed.
+ * and make sure we get an event when the data transfer has
+ * completed.
*/
if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
if (!(ch->ch_mostat & UART_MCR_RTS)) {
@@ -1095,7 +1119,8 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
/*
* If DTR Toggle mode is on, turn on DTR now if not already set,
- * and make sure we get an event when the data transfer has completed.
+ * and make sure we get an event when the data transfer has
+ * completed.
*/
if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
if (!(ch->ch_mostat & UART_MCR_DTR)) {
@@ -1105,7 +1130,8 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
ch->ch_tun.un_flags |= (UN_EMPTY);
}
writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_cls_uart->txrx);
- dgnc_sniff_nowait_nolock(ch, "UART WRITE", ch->ch_wqueue + ch->ch_w_tail, 1);
+ dgnc_sniff_nowait_nolock(ch, "UART WRITE",
+ ch->ch_wqueue + ch->ch_w_tail, 1);
DPR_WRITE(("Tx data: %x\n", ch->ch_wqueue[ch->ch_w_tail]));
ch->ch_w_tail++;
ch->ch_w_tail &= WQUEUEMASK;
@@ -1125,17 +1151,20 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
static void cls_parse_modem(struct channel_t *ch, uchar signals)
{
- volatile uchar msignals = signals;
+ uchar msignals = signals;
+ ulong lock_flags;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
- DPR_MSIGS(("cls_parse_modem: port: %d signals: %d\n", ch->ch_portnum, msignals));
+ DPR_MSIGS(("cls_parse_modem: port: %d signals: %d\n",
+ ch->ch_portnum, msignals));
/*
* Do altpin switching. Altpin switches DCD and DSR.
* This prolly breaks DSRPACE, so we should be more clever here.
*/
+ DGNC_LOCK(ch->ch_lock, lock_flags);
if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
uchar mswap = signals;
if (mswap & UART_MSR_DDCD) {
@@ -1155,10 +1184,15 @@ static void cls_parse_modem(struct channel_t *ch, uchar signals)
msignals |= UART_MSR_DCD;
}
}
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
- /* Scrub off lower bits. They signify delta's, which I don't care about */
+ /*
+ * Scrub off lower bits. They signify delta's, which I don't
+ * care about
+ */
signals &= 0xf0;
+ DGNC_LOCK(ch->ch_lock, lock_flags);
if (msignals & UART_MSR_DCD)
ch->ch_mistat |= UART_MSR_DCD;
else
@@ -1178,9 +1212,11 @@ static void cls_parse_modem(struct channel_t *ch, uchar signals)
ch->ch_mistat |= UART_MSR_CTS;
else
ch->ch_mistat &= ~UART_MSR_CTS;
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
- DPR_MSIGS(("Port: %d DTR: %d RTS: %d CTS: %d DSR: %d " "RI: %d CD: %d\n",
+ DPR_MSIGS((
+ "Port: %d DTR: %d RTS: %d CTS: %d DSR: %d " "RI: %d CD: %d\n",
ch->ch_portnum,
!!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_DTR),
!!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_RTS),
@@ -1204,7 +1240,7 @@ static void cls_assert_modem_signals(struct channel_t *ch)
if (ch->ch_flags & CH_LOOPBACK)
out |= UART_MCR_LOOP;
- writeb(out, &ch->ch_cls_uart->mcr);
+ writeb(out, &ch->ch_cls_uart->mcr);
/* Give time for the UART to actually drop the signals */
udelay(10);
@@ -1219,7 +1255,7 @@ static void cls_send_start_character(struct channel_t *ch)
if (ch->ch_startc != _POSIX_VDISABLE) {
ch->ch_xon_sends++;
writeb(ch->ch_startc, &ch->ch_cls_uart->txrx);
- }
+ }
}
@@ -1231,7 +1267,7 @@ static void cls_send_stop_character(struct channel_t *ch)
if (ch->ch_stopc != _POSIX_VDISABLE) {
ch->ch_xoff_sends++;
writeb(ch->ch_stopc, &ch->ch_cls_uart->txrx);
- }
+ }
}
@@ -1259,10 +1295,11 @@ static void cls_uart_init(struct channel_t *ch)
/* Write old LCR value back out, which turns enhanced access off */
writeb(lcrb, &ch->ch_cls_uart->lcr);
- /* Clear out UART and FIFO */
+ /* Clear out UART and FIFO */
readb(&ch->ch_cls_uart->txrx);
- writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT), &ch->ch_cls_uart->isr_fcr);
+ writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT),
+ &ch->ch_cls_uart->isr_fcr);
udelay(10);
ch->ch_flags |= (CH_FIFO_ENABLED | CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
@@ -1302,8 +1339,7 @@ static uint cls_get_uart_bytes_left(struct channel_t *ch)
if (ch->ch_flags & CH_TX_FIFO_EMPTY)
tasklet_schedule(&ch->ch_bd->helper_tasklet);
left = 1;
- }
- else {
+ } else {
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
left = 0;
}
@@ -1333,10 +1369,11 @@ static void cls_send_break(struct channel_t *ch, int msecs)
writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr);
ch->ch_flags &= ~(CH_BREAK_SENDING);
ch->ch_stop_sending_break = 0;
- DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n", jiffies));
+ DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n",
+ jiffies));
}
return;
- }
+ }
/*
* Set the time we should stop sending the break.
@@ -1350,7 +1387,9 @@ static void cls_send_break(struct channel_t *ch, int msecs)
uchar temp = readb(&ch->ch_cls_uart->lcr);
writeb((temp | UART_LCR_SBC), &ch->ch_cls_uart->lcr);
ch->ch_flags |= (CH_BREAK_SENDING);
- DPR_IOCTL(("Port %d. Starting UART_LCR_SBC! start: %lx should end: %lx\n",
+ DPR_IOCTL((
+ "Port %d. Starting UART_LCR_SBC! start: %lx "
+ "should end: %lx\n",
ch->ch_portnum, jiffies, ch->ch_stop_sending_break));
}
}
@@ -1373,8 +1412,8 @@ static void cls_send_immediate_char(struct channel_t *ch, unsigned char c)
static void cls_vpd(struct dgnc_board *brd)
{
- ulong vpdbase; /* Start of io base of the card */
- u8 __iomem *re_map_vpdbase;/* Remapped memory of the card */
+ ulong vpdbase; /* Start of io base of the card */
+ u8 __iomem *re_map_vpdbase;/* Remapped memory of the card */
int i = 0;
@@ -1389,12 +1428,12 @@ static void cls_vpd(struct dgnc_board *brd)
if (!re_map_vpdbase)
return;
- /* Store the VPD into our buffer */
- for (i = 0; i < 0x40; i++) {
+ /* Store the VPD into our buffer */
+ for (i = 0; i < 0x40; i++) {
brd->vpd[i] = readb(re_map_vpdbase + i);
- printk("%x ", brd->vpd[i]);
- }
- printk("\n");
+ pr_info("%x ", brd->vpd[i]);
+ }
+ pr_info("\n");
if (re_map_vpdbase)
iounmap(re_map_vpdbase);
diff --git a/drivers/staging/dgnc/dgnc_trace.c b/drivers/staging/dgnc/dgnc_trace.c
index a98b7d4255c8..2f62f2a43542 100644
--- a/drivers/staging/dgnc/dgnc_trace.c
+++ b/drivers/staging/dgnc/dgnc_trace.c
@@ -35,6 +35,7 @@
#include <linux/vmalloc.h>
#include "dgnc_driver.h"
+#include "dgnc_trace.h"
#define TRC_TO_CONSOLE 1
@@ -63,16 +64,16 @@ void dgnc_tracef(const char *fmt, ...)
void dgnc_tracef(const char *fmt, ...)
{
- va_list ap;
- char buf[TRC_MAXMSG+1];
- size_t lenbuf;
- int i;
- static int failed = FALSE;
+ va_list ap;
+ char buf[TRC_MAXMSG+1];
+ size_t lenbuf;
+ int i;
+ static int failed = FALSE;
# if defined(TRC_TO_KMEM)
unsigned long flags;
#endif
- if(failed)
+ if (failed)
return;
# if defined(TRC_TO_KMEM)
DGNC_LOCK(dgnc_tracef_lock, flags);
@@ -86,7 +87,7 @@ void dgnc_tracef(const char *fmt, ...)
# if defined(TRC_TO_KMEM)
{
- static int initd=0;
+ static int initd = 0;
/*
* Now, in addition to (or instead of) printing this stuff out
@@ -95,7 +96,7 @@ void dgnc_tracef(const char *fmt, ...)
*/
if (!initd) {
dgnc_trcbuf = (char *) vmalloc(dgnc_trcbuf_size);
- if(!dgnc_trcbuf) {
+ if (!dgnc_trcbuf) {
failed = TRUE;
printk("dgnc: tracing init failed!\n");
return;
@@ -179,6 +180,6 @@ void dgnc_tracef(const char *fmt, ...)
*/
void dgnc_tracer_free(void)
{
- if(dgnc_trcbuf)
+ if (dgnc_trcbuf)
vfree(dgnc_trcbuf);
}
diff --git a/drivers/staging/dgrp/dgrp_driver.c b/drivers/staging/dgrp/dgrp_driver.c
index 08eedf0867e6..b60a8da6350a 100644
--- a/drivers/staging/dgrp/dgrp_driver.c
+++ b/drivers/staging/dgrp/dgrp_driver.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/tty.h>
-#include <linux/init.h>
/*
* PortServer includes
diff --git a/drivers/staging/dgrp/dgrp_tty.c b/drivers/staging/dgrp/dgrp_tty.c
index 0d52de3729c6..7a9694c1d9c4 100644
--- a/drivers/staging/dgrp/dgrp_tty.c
+++ b/drivers/staging/dgrp/dgrp_tty.c
@@ -371,7 +371,7 @@ static void drp_param(struct ch_struct *ch)
ch->ch_flag |= CH_BAUD0;
}
} else if (ch->ch_custom_speed) {
- ch->ch_brate = PORTSERVER_DIVIDEND / ch->ch_custom_speed ;
+ ch->ch_brate = PORTSERVER_DIVIDEND / ch->ch_custom_speed;
if (ch->ch_flag & CH_BAUD0) {
ch->ch_mout |= DM_DTR | DM_RTS;
@@ -752,7 +752,7 @@ static int dgrp_tty_open(struct tty_struct *tty, struct file *file)
if (ch->ch_open_error != 0 && otype == ch->ch_otype) {
retval = (ch->ch_open_error <= 2) ?
- delay_error : -ENXIO ;
+ delay_error : -ENXIO;
goto unlock;
}
diff --git a/drivers/staging/dwc2/TODO b/drivers/staging/dwc2/TODO
deleted file mode 100644
index 282470d55315..000000000000
--- a/drivers/staging/dwc2/TODO
+++ /dev/null
@@ -1,33 +0,0 @@
-TODO:
- - Dan Carpenter would like to see some cleanups to the microframe
- scheduler code:
- http://www.mail-archive.com/linux-usb@vger.kernel.org/msg26650.html
-
- - Should merge the NAK holdoff patch from Raspberry Pi
- (http://marc.info/?l=linux-usb&m=137625067103833). But as it stands
- that patch is incomplete, it needs more investigation to see if it
- can be made to work for non-Raspberry Pi platforms that lack the
- special FIQ interrupt that the Pi has. Without this patch, the driver
- has a high interrupt rate (8K/sec).
-
- - The Raspberry Pi platform needs to have support for its FIQ interrupt
- added, to get the same level of functionality as the downstream
- driver. The raspberrypi.org developers have indicated they are
- willing to help with that.
-
- - Some of the default driver parameters (see 'struct dwc2_core_params'
- in core.h) won't work for many platforms. So DT attributes will need
- to be added for some of these. But that can be done as-needed as new
- platforms are added.
-
- - Eventually the driver should be merged with the s3c-hsotg peripheral
- mode driver, so that both modes of operation can be supported with a
- single driver. But I think that can wait till after the driver has
- been moved to mainline.
-
- - After that, OTG support can be added. I'm not sure how much demand
- there is for that, though, so I have that as a low priority.
-
-Please send any patches for this driver to Paul Zimmerman <paulz@synopsys.com>
-and Greg Kroah-Hartman <gregkh@linuxfoundation.org>. And please CC linux-usb
-<linux-usb@vger.kernel.org> too.
diff --git a/drivers/staging/et131x/README b/drivers/staging/et131x/README
index 8da96a6d2c92..3befc45fab8a 100644
--- a/drivers/staging/et131x/README
+++ b/drivers/staging/et131x/README
@@ -13,10 +13,6 @@ TODO:
- Implement NAPI support
- In et131x_tx(), don't return NETDEV_TX_BUSY, just drop the packet with kfree_skb().
- Reduce the number of split lines by careful consideration of variable names etc.
- - Do this in et131x.c:
- struct fbr_lookup *fbr;
- fbr = rx_local->fbr[id];
- Then replace all the instances of "rx_local->fbr[id]" with fbr.
Please send patches to:
Greg Kroah-Hartman <gregkh@linuxfoundation.org>
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index ab8b29d2cb26..e516bb69f3b4 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -54,7 +54,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -813,20 +812,21 @@ static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
{
/* Setup the receive dma configuration register for normal operation */
u32 csr = ET_RXDMA_CSR_FBR1_ENABLE;
+ struct rx_ring *rx_ring = &adapter->rx_ring;
- if (adapter->rx_ring.fbr[1]->buffsize == 4096)
+ if (rx_ring->fbr[1]->buffsize == 4096)
csr |= ET_RXDMA_CSR_FBR1_SIZE_LO;
- else if (adapter->rx_ring.fbr[1]->buffsize == 8192)
+ else if (rx_ring->fbr[1]->buffsize == 8192)
csr |= ET_RXDMA_CSR_FBR1_SIZE_HI;
- else if (adapter->rx_ring.fbr[1]->buffsize == 16384)
+ else if (rx_ring->fbr[1]->buffsize == 16384)
csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI;
csr |= ET_RXDMA_CSR_FBR0_ENABLE;
- if (adapter->rx_ring.fbr[0]->buffsize == 256)
+ if (rx_ring->fbr[0]->buffsize == 256)
csr |= ET_RXDMA_CSR_FBR0_SIZE_LO;
- else if (adapter->rx_ring.fbr[0]->buffsize == 512)
+ else if (rx_ring->fbr[0]->buffsize == 512)
csr |= ET_RXDMA_CSR_FBR0_SIZE_HI;
- else if (adapter->rx_ring.fbr[0]->buffsize == 1024)
+ else if (rx_ring->fbr[0]->buffsize == 1024)
csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI;
writel(csr, &adapter->regs->rxdma.csr);
@@ -968,7 +968,7 @@ static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
/* Set up the if mode bits */
cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK;
- if (phydev && phydev->speed == SPEED_1000) {
+ if (phydev->speed == SPEED_1000) {
cfg2 |= ET_MAC_CFG2_IFMODE_1000;
/* Phy mode bit */
ifctrl &= ~ET_MAC_IFCTRL_PHYMODE;
@@ -999,11 +999,11 @@ static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX;
/* Turn on duplex if needed */
- if (phydev && phydev->duplex == DUPLEX_FULL)
+ if (phydev->duplex == DUPLEX_FULL)
cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX;
ifctrl &= ~ET_MAC_IFCTRL_GHDMODE;
- if (phydev && phydev->duplex == DUPLEX_HALF)
+ if (phydev->duplex == DUPLEX_HALF)
ifctrl |= ET_MAC_IFCTRL_GHDMODE;
writel(ifctrl, &mac->if_ctrl);
@@ -1039,9 +1039,7 @@ static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
*/
static int et1310_in_phy_coma(struct et131x_adapter *adapter)
{
- u32 pmcsr;
-
- pmcsr = readl(&adapter->regs->global.pm_csr);
+ u32 pmcsr = readl(&adapter->regs->global.pm_csr);
return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
}
@@ -1351,8 +1349,6 @@ static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
* @addr: the address of the transceiver
* @reg: the register to read
* @value: pointer to a 16-bit value in which the value will be stored
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
*/
static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
u8 reg, u16 *value)
@@ -1425,10 +1421,6 @@ static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
* @adapter: pointer to our private adapter structure
* @reg: the register to read
* @value: 16-bit value to write
- *
- * FIXME: one caller in netdev still
- *
- * Return 0 on success, errno on failure (as defined in errno.h)
*/
static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
{
@@ -1494,10 +1486,10 @@ static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
return status;
}
-/* Still used from _mac for BIT_READ */
-static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
- u16 action, u16 regnum, u16 bitnum,
- u8 *value)
+static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter,
+ u16 regnum,
+ u16 bitnum,
+ u8 *value)
{
u16 reg;
u16 mask = 1 << bitnum;
@@ -1505,22 +1497,7 @@ static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
/* Read the requested register */
et131x_mii_read(adapter, regnum, &reg);
- switch (action) {
- case TRUEPHY_BIT_READ:
- *value = (reg & mask) >> bitnum;
- break;
-
- case TRUEPHY_BIT_SET:
- et131x_mii_write(adapter, regnum, reg | mask);
- break;
-
- case TRUEPHY_BIT_CLEAR:
- et131x_mii_write(adapter, regnum, reg & ~mask);
- break;
-
- default:
- break;
- }
+ *value = (reg & mask) >> bitnum;
}
static void et1310_config_flow_control(struct et131x_adapter *adapter)
@@ -1532,27 +1509,19 @@ static void et1310_config_flow_control(struct et131x_adapter *adapter)
} else {
char remote_pause, remote_async_pause;
- et1310_phy_access_mii_bit(adapter,
- TRUEPHY_BIT_READ, 5, 10, &remote_pause);
- et1310_phy_access_mii_bit(adapter,
- TRUEPHY_BIT_READ, 5, 11,
- &remote_async_pause);
+ et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause);
+ et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause);
- if ((remote_pause == TRUEPHY_BIT_SET) &&
- (remote_async_pause == TRUEPHY_BIT_SET)) {
+ if (remote_pause && remote_async_pause) {
adapter->flowcontrol = adapter->wanted_flow;
- } else if ((remote_pause == TRUEPHY_BIT_SET) &&
- (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
+ } else if (remote_pause && !remote_async_pause) {
if (adapter->wanted_flow == FLOW_BOTH)
adapter->flowcontrol = FLOW_BOTH;
else
adapter->flowcontrol = FLOW_NONE;
- } else if ((remote_pause == TRUEPHY_BIT_CLEAR) &&
- (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
+ } else if (!remote_pause && !remote_async_pause) {
adapter->flowcontrol = FLOW_NONE;
- } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT &&
- * remote_async_pause == TRUEPHY_SET_BIT)
- */
+ } else {
if (adapter->wanted_flow == FLOW_BOTH)
adapter->flowcontrol = FLOW_RXONLY;
else
@@ -1561,9 +1530,7 @@ static void et1310_config_flow_control(struct et131x_adapter *adapter)
}
}
-/* et1310_update_macstat_host_counters - Update the local copy of the statistics
- * @adapter: pointer to the adapter structure
- */
+/* et1310_update_macstat_host_counters - Update local copy of the statistics */
static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
{
struct ce_stats *stats = &adapter->stats;
@@ -1589,7 +1556,6 @@ static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
}
/* et1310_handle_macstat_interrupt
- * @adapter: pointer to the adapter structure
*
* One of the MACSTAT counters has wrapped. Update the local copy of
* the statistics held in the adapter structure, checking the "wrap"
@@ -1679,7 +1645,7 @@ static int et131x_mdio_reset(struct mii_bus *bus)
return 0;
}
-/* et1310_phy_power_down - PHY power control
+/* et1310_phy_power_switch - PHY power control
* @adapter: device to control
* @down: true for off/false for back on
*
@@ -1688,7 +1654,7 @@ static int et131x_mdio_reset(struct mii_bus *bus)
* Can't you see that this code processed
* Phy power, phy power..
*/
-static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
+static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
{
u16 data;
@@ -1699,10 +1665,7 @@ static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
et131x_mii_write(adapter, MII_BMCR, data);
}
-/* et131x_xcvr_init - Init the phy if we are setting it into force mode
- * @adapter: pointer to our private adapter structure
- *
- */
+/* et131x_xcvr_init - Init the phy if we are setting it into force mode */
static void et131x_xcvr_init(struct et131x_adapter *adapter)
{
u16 lcr2;
@@ -1731,7 +1694,6 @@ static void et131x_xcvr_init(struct et131x_adapter *adapter)
}
/* et131x_configure_global_regs - configure JAGCore global regs
- * @adapter: pointer to our adapter structure
*
* Used to configure the global registers on the JAGCore
*/
@@ -1776,9 +1738,7 @@ static void et131x_configure_global_regs(struct et131x_adapter *adapter)
writel(0, &regs->watchdog_timer);
}
-/* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence
- * @adapter: pointer to our adapter structure
- */
+/* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence */
static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
{
struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
@@ -1821,6 +1781,7 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
u32 __iomem *min_des;
u32 __iomem *base_hi;
u32 __iomem *base_lo;
+ struct fbr_lookup *fbr = rx_local->fbr[id];
if (id == 0) {
num_des = &rx_dma->fbr0_num_des;
@@ -1837,12 +1798,10 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
}
/* Now's the best time to initialize FBR contents */
- fbr_entry =
- (struct fbr_desc *) rx_local->fbr[id]->ring_virtaddr;
- for (entry = 0;
- entry < rx_local->fbr[id]->num_entries; entry++) {
- fbr_entry->addr_hi = rx_local->fbr[id]->bus_high[entry];
- fbr_entry->addr_lo = rx_local->fbr[id]->bus_low[entry];
+ fbr_entry = fbr->ring_virtaddr;
+ for (entry = 0; entry < fbr->num_entries; entry++) {
+ fbr_entry->addr_hi = fbr->bus_high[entry];
+ fbr_entry->addr_lo = fbr->bus_low[entry];
fbr_entry->word2 = entry;
fbr_entry++;
}
@@ -1850,19 +1809,16 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
/* Set the address and parameters of Free buffer ring 1 and 0
* into the 1310's registers
*/
- writel(upper_32_bits(rx_local->fbr[id]->ring_physaddr),
- base_hi);
- writel(lower_32_bits(rx_local->fbr[id]->ring_physaddr),
- base_lo);
- writel(rx_local->fbr[id]->num_entries - 1, num_des);
+ writel(upper_32_bits(fbr->ring_physaddr), base_hi);
+ writel(lower_32_bits(fbr->ring_physaddr), base_lo);
+ writel(fbr->num_entries - 1, num_des);
writel(ET_DMA10_WRAP, full_offset);
/* This variable tracks the free buffer ring 1 full position,
* so it has to match the above.
*/
- rx_local->fbr[id]->local_full = ET_DMA10_WRAP;
- writel(((rx_local->fbr[id]->num_entries *
- LO_MARK_PERCENT_FOR_RX) / 100) - 1,
+ fbr->local_full = ET_DMA10_WRAP;
+ writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
min_des);
}
@@ -1884,7 +1840,6 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
}
/* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
- * @adapter: pointer to our private adapter structure
*
* Configure the transmit engine with the ring buffers we have created
* and prepare it for use.
@@ -1892,33 +1847,26 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
{
struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
/* Load the hardware with the start of the transmit descriptor ring. */
- writel(upper_32_bits(adapter->tx_ring.tx_desc_ring_pa),
- &txdma->pr_base_hi);
- writel(lower_32_bits(adapter->tx_ring.tx_desc_ring_pa),
- &txdma->pr_base_lo);
+ writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi);
+ writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo);
/* Initialise the transmit DMA engine */
writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
/* Load the completion writeback physical address */
- writel(upper_32_bits(adapter->tx_ring.tx_status_pa),
- &txdma->dma_wb_base_hi);
- writel(lower_32_bits(adapter->tx_ring.tx_status_pa),
- &txdma->dma_wb_base_lo);
+ writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi);
+ writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo);
- *adapter->tx_ring.tx_status = 0;
+ *tx_ring->tx_status = 0;
writel(0, &txdma->service_request);
- adapter->tx_ring.send_idx = 0;
+ tx_ring->send_idx = 0;
}
-/* et131x_adapter_setup - Set the adapter up as per cassini+ documentation
- * @adapter: pointer to our private adapter structure
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
+/* et131x_adapter_setup - Set the adapter up as per cassini+ documentation */
static void et131x_adapter_setup(struct et131x_adapter *adapter)
{
/* Configure the JAGCore */
@@ -1938,13 +1886,11 @@ static void et131x_adapter_setup(struct et131x_adapter *adapter)
et1310_config_macstat_regs(adapter);
- et1310_phy_power_down(adapter, 0);
+ et1310_phy_power_switch(adapter, 0);
et131x_xcvr_init(adapter);
}
-/* et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310
- * @adapter: pointer to our private adapter structure
- */
+/* et131x_soft_reset - Issue soft reset to the hardware, complete for ET1310 */
static void et131x_soft_reset(struct et131x_adapter *adapter)
{
u32 reg;
@@ -1965,7 +1911,6 @@ static void et131x_soft_reset(struct et131x_adapter *adapter)
}
/* et131x_enable_interrupts - enable interrupt
- * @adapter: et131x device
*
* Enable the appropriate interrupts on the ET131x according to our
* configuration
@@ -1976,7 +1921,7 @@ static void et131x_enable_interrupts(struct et131x_adapter *adapter)
/* Enable all global interrupts */
if (adapter->flowcontrol == FLOW_TXONLY ||
- adapter->flowcontrol == FLOW_BOTH)
+ adapter->flowcontrol == FLOW_BOTH)
mask = INT_MASK_ENABLE;
else
mask = INT_MASK_ENABLE_NO_FLOW;
@@ -1985,7 +1930,6 @@ static void et131x_enable_interrupts(struct et131x_adapter *adapter)
}
/* et131x_disable_interrupts - interrupt disable
- * @adapter: et131x device
*
* Block all interrupts from the et131x device at the device itself
*/
@@ -1995,9 +1939,7 @@ static void et131x_disable_interrupts(struct et131x_adapter *adapter)
writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
}
-/* et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
- * @adapter: pointer to our adapter structure
- */
+/* et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310 */
static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
{
/* Setup the tramsmit dma configuration register */
@@ -2005,9 +1947,7 @@ static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
&adapter->regs->txdma.csr);
}
-/* et131x_enable_txrx - Enable tx/rx queues
- * @netdev: device to be enabled
- */
+/* et131x_enable_txrx - Enable tx/rx queues */
static void et131x_enable_txrx(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -2024,9 +1964,7 @@ static void et131x_enable_txrx(struct net_device *netdev)
netif_start_queue(netdev);
}
-/* et131x_disable_txrx - Disable tx/rx queues
- * @netdev: device to be disabled
- */
+/* et131x_disable_txrx - Disable tx/rx queues */
static void et131x_disable_txrx(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -2042,18 +1980,12 @@ static void et131x_disable_txrx(struct net_device *netdev)
et131x_disable_interrupts(adapter);
}
-/* et131x_init_send - Initialize send data structures
- * @adapter: pointer to our private adapter structure
- */
+/* et131x_init_send - Initialize send data structures */
static void et131x_init_send(struct et131x_adapter *adapter)
{
- struct tcb *tcb;
u32 ct;
- struct tx_ring *tx_ring;
-
- /* Setup some convenience pointers */
- tx_ring = &adapter->tx_ring;
- tcb = adapter->tx_ring.tcb_ring;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
+ struct tcb *tcb = tx_ring->tcb_ring;
tx_ring->tcb_qhead = tcb;
@@ -2076,7 +2008,6 @@ static void et131x_init_send(struct et131x_adapter *adapter)
}
/* et1310_enable_phy_coma - called when network cable is unplugged
- * @adapter: pointer to our adapter structure
*
* driver receive an phy status change interrupt while in D0 and check that
* phy_status is down.
@@ -2104,11 +2035,6 @@ static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
/* Save the GbE PHY speed and duplex modes. Need to restore this
* when cable is plugged back in
*/
- /* TODO - when PM is re-enabled, check if we need to
- * perform a similar task as this -
- * adapter->pdown_speed = adapter->ai_force_speed;
- * adapter->pdown_duplex = adapter->ai_force_duplex;
- */
/* Stop sending packets. */
spin_lock_irqsave(&adapter->send_hw_lock, flags);
@@ -2128,9 +2054,7 @@ static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
writel(pmcsr, &adapter->regs->global.pm_csr);
}
-/* et1310_disable_phy_coma - Disable the Phy Coma Mode
- * @adapter: pointer to our adapter structure
- */
+/* et1310_disable_phy_coma - Disable the Phy Coma Mode */
static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
{
u32 pmcsr;
@@ -2145,11 +2069,6 @@ static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
/* Restore the GbE PHY speed and duplex modes;
* Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
*/
- /* TODO - when PM is re-enabled, check if we need to
- * perform a similar task as this -
- * adapter->ai_force_speed = adapter->pdown_speed;
- * adapter->ai_force_duplex = adapter->pdown_duplex;
- */
/* Re-initialize the send structures */
et131x_init_send(adapter);
@@ -2183,15 +2102,12 @@ static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
tmp_free_buff_ring ^= ET_DMA10_WRAP;
}
/* For the 1023 case */
- tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP);
+ tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP);
*free_buff_ring = tmp_free_buff_ring;
return tmp_free_buff_ring;
}
/* et131x_rx_dma_memory_alloc
- * @adapter: pointer to our private adapter structure
- *
- * Returns 0 on success and errno on failure (as defined in errno.h)
*
* Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
* and the Packet Status Ring.
@@ -2203,10 +2119,8 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
u32 bufsize;
u32 pktstat_ringsize;
u32 fbr_chunksize;
- struct rx_ring *rx_ring;
-
- /* Setup some convenience pointers */
- rx_ring = &adapter->rx_ring;
+ struct rx_ring *rx_ring = &adapter->rx_ring;
+ struct fbr_lookup *fbr;
/* Alloc memory for the lookup table */
rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
@@ -2247,20 +2161,18 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
rx_ring->fbr[1]->num_entries = 128;
}
- adapter->rx_ring.psr_num_entries =
- adapter->rx_ring.fbr[0]->num_entries +
- adapter->rx_ring.fbr[1]->num_entries;
+ rx_ring->psr_num_entries = rx_ring->fbr[0]->num_entries +
+ rx_ring->fbr[1]->num_entries;
for (id = 0; id < NUM_FBRS; id++) {
+ fbr = rx_ring->fbr[id];
/* Allocate an area of memory for Free Buffer Ring */
- bufsize =
- (sizeof(struct fbr_desc) * rx_ring->fbr[id]->num_entries);
- rx_ring->fbr[id]->ring_virtaddr =
- dma_alloc_coherent(&adapter->pdev->dev,
- bufsize,
- &rx_ring->fbr[id]->ring_physaddr,
- GFP_KERNEL);
- if (!rx_ring->fbr[id]->ring_virtaddr) {
+ bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
+ fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
+ bufsize,
+ &fbr->ring_physaddr,
+ GFP_KERNEL);
+ if (!fbr->ring_virtaddr) {
dev_err(&adapter->pdev->dev,
"Cannot alloc memory for Free Buffer Ring %d\n", id);
return -ENOMEM;
@@ -2268,25 +2180,25 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
}
for (id = 0; id < NUM_FBRS; id++) {
- fbr_chunksize = (FBR_CHUNKS * rx_ring->fbr[id]->buffsize);
+ fbr = rx_ring->fbr[id];
+ fbr_chunksize = (FBR_CHUNKS * fbr->buffsize);
- for (i = 0;
- i < (rx_ring->fbr[id]->num_entries / FBR_CHUNKS); i++) {
+ for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) {
dma_addr_t fbr_tmp_physaddr;
- rx_ring->fbr[id]->mem_virtaddrs[i] = dma_alloc_coherent(
+ fbr->mem_virtaddrs[i] = dma_alloc_coherent(
&adapter->pdev->dev, fbr_chunksize,
- &rx_ring->fbr[id]->mem_physaddrs[i],
+ &fbr->mem_physaddrs[i],
GFP_KERNEL);
- if (!rx_ring->fbr[id]->mem_virtaddrs[i]) {
+ if (!fbr->mem_virtaddrs[i]) {
dev_err(&adapter->pdev->dev,
"Could not alloc memory\n");
return -ENOMEM;
}
/* See NOTE in "Save Physical Address" comment above */
- fbr_tmp_physaddr = rx_ring->fbr[id]->mem_physaddrs[i];
+ fbr_tmp_physaddr = fbr->mem_physaddrs[i];
for (j = 0; j < FBR_CHUNKS; j++) {
u32 index = (i * FBR_CHUNKS) + j;
@@ -2294,26 +2206,25 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
/* Save the Virtual address of this index for
* quick access later
*/
- rx_ring->fbr[id]->virt[index] =
- (u8 *) rx_ring->fbr[id]->mem_virtaddrs[i] +
- (j * rx_ring->fbr[id]->buffsize);
+ fbr->virt[index] = (u8 *)fbr->mem_virtaddrs[i] +
+ (j * fbr->buffsize);
/* now store the physical address in the
* descriptor so the device can access it
*/
- rx_ring->fbr[id]->bus_high[index] =
+ fbr->bus_high[index] =
upper_32_bits(fbr_tmp_physaddr);
- rx_ring->fbr[id]->bus_low[index] =
+ fbr->bus_low[index] =
lower_32_bits(fbr_tmp_physaddr);
- fbr_tmp_physaddr += rx_ring->fbr[id]->buffsize;
+ fbr_tmp_physaddr += fbr->buffsize;
}
}
}
/* Allocate an area of memory for FIFO of Packet Status ring entries */
pktstat_ringsize =
- sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries;
+ sizeof(struct pkt_stat_desc) * rx_ring->psr_num_entries;
rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
pktstat_ringsize,
@@ -2325,8 +2236,6 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
"Cannot alloc memory for Packet Status Ring\n");
return -ENOMEM;
}
- pr_info("Packet Status Ring %llx\n",
- (unsigned long long) rx_ring->ps_ring_physaddr);
/* NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
* ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
@@ -2345,7 +2254,6 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
return -ENOMEM;
}
rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
- pr_info("PRS %llx\n", (unsigned long long)rx_ring->rx_status_bus);
/* The RFDs are going to be put on lists later on, so initialize the
* lists now.
@@ -2354,9 +2262,7 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
return 0;
}
-/* et131x_rx_dma_memory_free - Free all memory allocated within this module.
- * @adapter: pointer to our private adapter structure
- */
+/* et131x_rx_dma_memory_free - Free all memory allocated within this module */
static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
{
u8 id;
@@ -2364,17 +2270,15 @@ static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
u32 bufsize;
u32 pktstat_ringsize;
struct rfd *rfd;
- struct rx_ring *rx_ring;
-
- /* Setup some convenience pointers */
- rx_ring = &adapter->rx_ring;
+ struct rx_ring *rx_ring = &adapter->rx_ring;
+ struct fbr_lookup *fbr;
/* Free RFDs and associated packet descriptors */
WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
while (!list_empty(&rx_ring->recv_list)) {
- rfd = (struct rfd *) list_entry(rx_ring->recv_list.next,
- struct rfd, list_node);
+ rfd = list_entry(rx_ring->recv_list.next,
+ struct rfd, list_node);
list_del(&rfd->list_node);
rfd->skb = NULL;
@@ -2383,40 +2287,41 @@ static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
/* Free Free Buffer Rings */
for (id = 0; id < NUM_FBRS; id++) {
- if (!rx_ring->fbr[id]->ring_virtaddr)
+ fbr = rx_ring->fbr[id];
+
+ if (!fbr->ring_virtaddr)
continue;
/* First the packet memory */
for (index = 0;
- index < (rx_ring->fbr[id]->num_entries / FBR_CHUNKS);
+ index < fbr->num_entries / FBR_CHUNKS;
index++) {
- if (rx_ring->fbr[id]->mem_virtaddrs[index]) {
- bufsize =
- rx_ring->fbr[id]->buffsize * FBR_CHUNKS;
+ if (fbr->mem_virtaddrs[index]) {
+ bufsize = fbr->buffsize * FBR_CHUNKS;
dma_free_coherent(&adapter->pdev->dev,
- bufsize,
- rx_ring->fbr[id]->mem_virtaddrs[index],
- rx_ring->fbr[id]->mem_physaddrs[index]);
+ bufsize,
+ fbr->mem_virtaddrs[index],
+ fbr->mem_physaddrs[index]);
- rx_ring->fbr[id]->mem_virtaddrs[index] = NULL;
+ fbr->mem_virtaddrs[index] = NULL;
}
}
- bufsize =
- sizeof(struct fbr_desc) * rx_ring->fbr[id]->num_entries;
+ bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
- dma_free_coherent(&adapter->pdev->dev, bufsize,
- rx_ring->fbr[id]->ring_virtaddr,
- rx_ring->fbr[id]->ring_physaddr);
+ dma_free_coherent(&adapter->pdev->dev,
+ bufsize,
+ fbr->ring_virtaddr,
+ fbr->ring_physaddr);
- rx_ring->fbr[id]->ring_virtaddr = NULL;
+ fbr->ring_virtaddr = NULL;
}
/* Free Packet Status Ring */
if (rx_ring->ps_ring_virtaddr) {
pktstat_ringsize = sizeof(struct pkt_stat_desc) *
- adapter->rx_ring.psr_num_entries;
+ rx_ring->psr_num_entries;
dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
rx_ring->ps_ring_virtaddr,
@@ -2441,20 +2346,12 @@ static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
rx_ring->num_ready_recv = 0;
}
-/* et131x_init_recv - Initialize receive data structures.
- * @adapter: pointer to our private adapter structure
- *
- * Returns 0 on success and errno on failure (as defined in errno.h)
- */
+/* et131x_init_recv - Initialize receive data structures */
static int et131x_init_recv(struct et131x_adapter *adapter)
{
struct rfd *rfd;
u32 rfdct;
- u32 numrfd = 0;
- struct rx_ring *rx_ring;
-
- /* Setup some convenience pointers */
- rx_ring = &adapter->rx_ring;
+ struct rx_ring *rx_ring = &adapter->rx_ring;
/* Setup each RFD */
for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
@@ -2467,24 +2364,18 @@ static int et131x_init_recv(struct et131x_adapter *adapter)
/* Add this RFD to the recv_list */
list_add_tail(&rfd->list_node, &rx_ring->recv_list);
- /* Increment both the available RFD's, and the total RFD's. */
+ /* Increment the available RFD's */
rx_ring->num_ready_recv++;
- numrfd++;
}
return 0;
}
-/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate.
- * @adapter: pointer to our adapter structure
- */
+/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */
static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
{
struct phy_device *phydev = adapter->phydev;
- if (!phydev)
- return;
-
/* For version B silicon, we do not use the RxDMA timer for 10 and 100
* Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
*/
@@ -2505,11 +2396,13 @@ static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
u16 buff_index = rfd->bufferindex;
u8 ring_index = rfd->ringindex;
unsigned long flags;
+ struct fbr_lookup *fbr = rx_local->fbr[ring_index];
/* We don't use any of the OOB data besides status. Otherwise, we
* need to clean up OOB data
*/
- if (buff_index < rx_local->fbr[ring_index]->num_entries) {
+ if (buff_index < fbr->num_entries) {
+ u32 free_buff_ring;
u32 __iomem *offset;
struct fbr_desc *next;
@@ -2520,22 +2413,20 @@ static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
else
offset = &rx_dma->fbr1_full_offset;
- next = (struct fbr_desc *)
- (rx_local->fbr[ring_index]->ring_virtaddr) +
- INDEX10(rx_local->fbr[ring_index]->local_full);
+ next = (struct fbr_desc *)(fbr->ring_virtaddr) +
+ INDEX10(fbr->local_full);
/* Handle the Free Buffer Ring advancement here. Write
* the PA / Buffer Index for the returned buffer into
* the oldest (next to be freed)FBR entry
*/
- next->addr_hi = rx_local->fbr[ring_index]->bus_high[buff_index];
- next->addr_lo = rx_local->fbr[ring_index]->bus_low[buff_index];
+ next->addr_hi = fbr->bus_high[buff_index];
+ next->addr_lo = fbr->bus_low[buff_index];
next->word2 = buff_index;
- writel(bump_free_buff_ring(
- &rx_local->fbr[ring_index]->local_full,
- rx_local->fbr[ring_index]->num_entries - 1),
- offset);
+ free_buff_ring = bump_free_buff_ring(&fbr->local_full,
+ fbr->num_entries - 1);
+ writel(free_buff_ring, offset);
spin_unlock_irqrestore(&adapter->fbr_lock, flags);
} else {
@@ -2555,7 +2446,6 @@ static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
}
/* nic_rx_pkts - Checks the hardware for available packets
- * @adapter: pointer to our adapter
*
* Returns rfd, a pointer to our MPRFD.
*
@@ -2580,6 +2470,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
u32 word0;
u32 word1;
struct sk_buff *skb;
+ struct fbr_lookup *fbr;
/* RX Status block is written by the DMA engine prior to every
* interrupt. It contains the next to be used entry in the Packet
@@ -2601,6 +2492,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
*/
len = psr->word1 & 0xFFFF;
ring_index = (psr->word1 >> 26) & 0x03;
+ fbr = rx_local->fbr[ring_index];
buff_index = (psr->word1 >> 16) & 0x3FF;
word0 = psr->word0;
@@ -2616,8 +2508,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset);
- if (ring_index > 1 ||
- buff_index > rx_local->fbr[ring_index]->num_entries - 1) {
+ if (ring_index > 1 || buff_index > fbr->num_entries - 1) {
/* Illegal buffer or ring index cannot be used by S/W*/
dev_err(&adapter->pdev->dev,
"NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n",
@@ -2629,7 +2520,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
spin_lock_irqsave(&adapter->rcv_lock, flags);
element = rx_local->recv_list.next;
- rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
+ rfd = list_entry(element, struct rfd, list_node);
if (!rfd) {
spin_unlock_irqrestore(&adapter->rcv_lock, flags);
@@ -2670,7 +2561,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
&& !(adapter->packet_filter & ET131X_PACKET_TYPE_PROMISCUOUS)
&& !(adapter->packet_filter &
ET131X_PACKET_TYPE_ALL_MULTICAST)) {
- buf = rx_local->fbr[ring_index]->virt[buff_index];
+ buf = fbr->virt[buff_index];
/* Loop through our list to see if the destination
* address of this packet matches one in our list.
@@ -2708,7 +2599,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
adapter->stats.unicast_pkts_rcvd++;
}
- if (len == 0) {
+ if (!len) {
rfd->len = 0;
goto out;
}
@@ -2723,9 +2614,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
adapter->net_stats.rx_bytes += rfd->len;
- memcpy(skb_put(skb, rfd->len),
- rx_local->fbr[ring_index]->virt[buff_index],
- rfd->len);
+ memcpy(skb_put(skb, rfd->len), fbr->virt[buff_index], rfd->len);
skb->protocol = eth_type_trans(skb, adapter->netdev);
skb->ip_summed = CHECKSUM_NONE;
@@ -2737,7 +2626,6 @@ out:
}
/* et131x_handle_recv_interrupt - Interrupt handler for receive processing
- * @adapter: pointer to our adapter
*
* Assumption, Rcv spinlock has been acquired.
*/
@@ -2746,11 +2634,12 @@ static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
struct rfd *rfd = NULL;
u32 count = 0;
bool done = true;
+ struct rx_ring *rx_ring = &adapter->rx_ring;
/* Process up to available RFD's */
while (count < NUM_PACKETS_HANDLED) {
- if (list_empty(&adapter->rx_ring.recv_list)) {
- WARN_ON(adapter->rx_ring.num_ready_recv != 0);
+ if (list_empty(&rx_ring->recv_list)) {
+ WARN_ON(rx_ring->num_ready_recv != 0);
done = false;
break;
}
@@ -2774,25 +2663,22 @@ static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
adapter->net_stats.rx_packets++;
/* Set the status on the packet, either resources or success */
- if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK)
+ if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK)
dev_warn(&adapter->pdev->dev, "RFD's are running out\n");
count++;
}
if (count == NUM_PACKETS_HANDLED || !done) {
- adapter->rx_ring.unfinished_receives = true;
+ rx_ring->unfinished_receives = true;
writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
&adapter->regs->global.watchdog_timer);
} else
/* Watchdog timer will disable itself if appropriate. */
- adapter->rx_ring.unfinished_receives = false;
+ rx_ring->unfinished_receives = false;
}
/* et131x_tx_dma_memory_alloc
- * @adapter: pointer to our private adapter structure
- *
- * Returns 0 on success and errno on failure (as defined in errno.h).
*
* Allocates memory that will be visible both to the device and to the CPU.
* The OS will pass us packets, pointers to which we will insert in the Tx
@@ -2806,18 +2692,17 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
struct tx_ring *tx_ring = &adapter->tx_ring;
/* Allocate memory for the TCB's (Transmit Control Block) */
- adapter->tx_ring.tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
- GFP_ATOMIC | GFP_DMA);
- if (!adapter->tx_ring.tcb_ring)
+ tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
+ GFP_ATOMIC | GFP_DMA);
+ if (!tx_ring->tcb_ring)
return -ENOMEM;
desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
- tx_ring->tx_desc_ring =
- (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev,
- desc_size,
- &tx_ring->tx_desc_ring_pa,
- GFP_KERNEL);
- if (!adapter->tx_ring.tx_desc_ring) {
+ tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev,
+ desc_size,
+ &tx_ring->tx_desc_ring_pa,
+ GFP_KERNEL);
+ if (!tx_ring->tx_desc_ring) {
dev_err(&adapter->pdev->dev,
"Cannot alloc memory for Tx Ring\n");
return -ENOMEM;
@@ -2835,51 +2720,46 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
sizeof(u32),
&tx_ring->tx_status_pa,
GFP_KERNEL);
- if (!adapter->tx_ring.tx_status_pa) {
+ if (!tx_ring->tx_status_pa) {
dev_err(&adapter->pdev->dev,
- "Cannot alloc memory for Tx status block\n");
+ "Cannot alloc memory for Tx status block\n");
return -ENOMEM;
}
return 0;
}
-/* et131x_tx_dma_memory_free - Free all memory allocated within this module
- * @adapter: pointer to our private adapter structure
- *
- * Returns 0 on success and errno on failure (as defined in errno.h).
- */
+/* et131x_tx_dma_memory_free - Free all memory allocated within this module */
static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
{
int desc_size = 0;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
- if (adapter->tx_ring.tx_desc_ring) {
+ if (tx_ring->tx_desc_ring) {
/* Free memory relating to Tx rings here */
desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
dma_free_coherent(&adapter->pdev->dev,
- desc_size,
- adapter->tx_ring.tx_desc_ring,
- adapter->tx_ring.tx_desc_ring_pa);
- adapter->tx_ring.tx_desc_ring = NULL;
+ desc_size,
+ tx_ring->tx_desc_ring,
+ tx_ring->tx_desc_ring_pa);
+ tx_ring->tx_desc_ring = NULL;
}
/* Free memory for the Tx status block */
- if (adapter->tx_ring.tx_status) {
+ if (tx_ring->tx_status) {
dma_free_coherent(&adapter->pdev->dev,
- sizeof(u32),
- adapter->tx_ring.tx_status,
- adapter->tx_ring.tx_status_pa);
+ sizeof(u32),
+ tx_ring->tx_status,
+ tx_ring->tx_status_pa);
- adapter->tx_ring.tx_status = NULL;
+ tx_ring->tx_status = NULL;
}
/* Free the memory for the tcb structures */
- kfree(adapter->tx_ring.tcb_ring);
+ kfree(tx_ring->tcb_ring);
}
/* nic_send_packet - NIC specific send handler for version B silicon.
* @adapter: pointer to our adapter
* @tcb: pointer to struct tcb
- *
- * Returns 0 or errno.
*/
static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
{
@@ -2893,6 +2773,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
unsigned long flags;
struct phy_device *phydev = adapter->phydev;
dma_addr_t dma_addr;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
/* Part of the optimizations of this send routine restrict us to
* sending 24 fragments at a pass. In practice we should never see
@@ -2968,11 +2849,11 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
}
if (phydev && phydev->speed == SPEED_1000) {
- if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
+ if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) {
/* Last element & Interrupt flag */
desc[frag - 1].flags =
TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
- adapter->tx_ring.since_irq = 0;
+ tx_ring->since_irq = 0;
} else { /* Last element */
desc[frag - 1].flags = TXDESC_FLAG_LASTPKT;
}
@@ -2982,12 +2863,12 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
desc[0].flags |= TXDESC_FLAG_FIRSTPKT;
- tcb->index_start = adapter->tx_ring.send_idx;
+ tcb->index_start = tx_ring->send_idx;
tcb->stale = 0;
spin_lock_irqsave(&adapter->send_hw_lock, flags);
- thiscopy = NUM_DESC_PER_RING_TX - INDEX10(adapter->tx_ring.send_idx);
+ thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx);
if (thiscopy >= frag) {
remainder = 0;
@@ -2996,52 +2877,51 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
remainder = frag - thiscopy;
}
- memcpy(adapter->tx_ring.tx_desc_ring +
- INDEX10(adapter->tx_ring.send_idx), desc,
+ memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx),
+ desc,
sizeof(struct tx_desc) * thiscopy);
- add_10bit(&adapter->tx_ring.send_idx, thiscopy);
+ add_10bit(&tx_ring->send_idx, thiscopy);
- if (INDEX10(adapter->tx_ring.send_idx) == 0 ||
- INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
- adapter->tx_ring.send_idx &= ~ET_DMA10_MASK;
- adapter->tx_ring.send_idx ^= ET_DMA10_WRAP;
+ if (INDEX10(tx_ring->send_idx) == 0 ||
+ INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) {
+ tx_ring->send_idx &= ~ET_DMA10_MASK;
+ tx_ring->send_idx ^= ET_DMA10_WRAP;
}
if (remainder) {
- memcpy(adapter->tx_ring.tx_desc_ring,
+ memcpy(tx_ring->tx_desc_ring,
desc + thiscopy,
sizeof(struct tx_desc) * remainder);
- add_10bit(&adapter->tx_ring.send_idx, remainder);
+ add_10bit(&tx_ring->send_idx, remainder);
}
- if (INDEX10(adapter->tx_ring.send_idx) == 0) {
- if (adapter->tx_ring.send_idx)
+ if (INDEX10(tx_ring->send_idx) == 0) {
+ if (tx_ring->send_idx)
tcb->index = NUM_DESC_PER_RING_TX - 1;
else
tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
} else
- tcb->index = adapter->tx_ring.send_idx - 1;
+ tcb->index = tx_ring->send_idx - 1;
spin_lock(&adapter->tcb_send_qlock);
- if (adapter->tx_ring.send_tail)
- adapter->tx_ring.send_tail->next = tcb;
+ if (tx_ring->send_tail)
+ tx_ring->send_tail->next = tcb;
else
- adapter->tx_ring.send_head = tcb;
+ tx_ring->send_head = tcb;
- adapter->tx_ring.send_tail = tcb;
+ tx_ring->send_tail = tcb;
WARN_ON(tcb->next != NULL);
- adapter->tx_ring.used++;
+ tx_ring->used++;
spin_unlock(&adapter->tcb_send_qlock);
/* Write the new write pointer back to the device. */
- writel(adapter->tx_ring.send_idx,
- &adapter->regs->txdma.service_request);
+ writel(tx_ring->send_idx, &adapter->regs->txdma.service_request);
/* For Gig only, we use Tx Interrupt coalescing. Enable the software
* timer to wake us up if this packet isn't followed by N more.
@@ -3056,19 +2936,16 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
}
/* send_packet - Do the work to send a packet
- * @skb: the packet(s) to send
- * @adapter: a pointer to the device's private adapter structure
- *
- * Return 0 in almost all cases; non-zero value in extreme hard failure only.
*
* Assumption: Send spinlock has been acquired
*/
static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
{
int status;
- struct tcb *tcb = NULL;
+ struct tcb *tcb;
u16 *shbufva;
unsigned long flags;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
/* All packets must have at least a MAC address and a protocol type */
if (skb->len < ETH_HLEN)
@@ -3077,17 +2954,17 @@ static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
/* Get a TCB for this packet */
spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
- tcb = adapter->tx_ring.tcb_qhead;
+ tcb = tx_ring->tcb_qhead;
if (tcb == NULL) {
spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
return -ENOMEM;
}
- adapter->tx_ring.tcb_qhead = tcb->next;
+ tx_ring->tcb_qhead = tcb->next;
- if (adapter->tx_ring.tcb_qhead == NULL)
- adapter->tx_ring.tcb_qtail = NULL;
+ if (tx_ring->tcb_qhead == NULL)
+ tx_ring->tcb_qtail = NULL;
spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
@@ -3111,30 +2988,26 @@ static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
if (status != 0) {
spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
- if (adapter->tx_ring.tcb_qtail)
- adapter->tx_ring.tcb_qtail->next = tcb;
+ if (tx_ring->tcb_qtail)
+ tx_ring->tcb_qtail->next = tcb;
else
/* Apparently ready Q is empty. */
- adapter->tx_ring.tcb_qhead = tcb;
+ tx_ring->tcb_qhead = tcb;
- adapter->tx_ring.tcb_qtail = tcb;
+ tx_ring->tcb_qtail = tcb;
spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
return status;
}
- WARN_ON(adapter->tx_ring.used > NUM_TCB);
+ WARN_ON(tx_ring->used > NUM_TCB);
return 0;
}
-/* et131x_send_packets - This function is called by the OS to send packets
- * @skb: the packet(s) to send
- * @netdev:device on which to TX the above packet(s)
- *
- * Return 0 in almost all cases; non-zero value in extreme hard failure only
- */
+/* et131x_send_packets - This function is called by the OS to send packets */
static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
{
int status = 0;
struct et131x_adapter *adapter = netdev_priv(netdev);
+ struct tx_ring *tx_ring = &adapter->tx_ring;
/* Send these packets
*
@@ -3143,7 +3016,7 @@ static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
*/
/* TCB is not available */
- if (adapter->tx_ring.used >= NUM_TCB) {
+ if (tx_ring->used >= NUM_TCB) {
/* NOTE: If there's an error on send, no need to queue the
* packet under Linux; if we just send an error up to the
* netif layer, it will resend the skb to us.
@@ -3187,6 +3060,7 @@ static inline void free_send_packet(struct et131x_adapter *adapter,
unsigned long flags;
struct tx_desc *desc = NULL;
struct net_device_stats *stats = &adapter->net_stats;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
u64 dma_addr;
if (tcb->flags & FMP_DEST_BROAD)
@@ -3204,9 +3078,8 @@ static inline void free_send_packet(struct et131x_adapter *adapter,
* they point to
*/
do {
- desc = (struct tx_desc *)
- (adapter->tx_ring.tx_desc_ring +
- INDEX10(tcb->index_start));
+ desc = tx_ring->tx_desc_ring +
+ INDEX10(tcb->index_start);
dma_addr = desc->addr_lo;
dma_addr |= (u64)desc->addr_hi << 32;
@@ -3221,8 +3094,7 @@ static inline void free_send_packet(struct et131x_adapter *adapter,
tcb->index_start &= ~ET_DMA10_MASK;
tcb->index_start ^= ET_DMA10_WRAP;
}
- } while (desc != (adapter->tx_ring.tx_desc_ring +
- INDEX10(tcb->index)));
+ } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index));
dev_kfree_skb_any(tcb->skb);
}
@@ -3234,20 +3106,19 @@ static inline void free_send_packet(struct et131x_adapter *adapter,
adapter->net_stats.tx_packets++;
- if (adapter->tx_ring.tcb_qtail)
- adapter->tx_ring.tcb_qtail->next = tcb;
+ if (tx_ring->tcb_qtail)
+ tx_ring->tcb_qtail->next = tcb;
else
/* Apparently ready Q is empty. */
- adapter->tx_ring.tcb_qhead = tcb;
+ tx_ring->tcb_qhead = tcb;
- adapter->tx_ring.tcb_qtail = tcb;
+ tx_ring->tcb_qtail = tcb;
spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
- WARN_ON(adapter->tx_ring.used < 0);
+ WARN_ON(tx_ring->used < 0);
}
/* et131x_free_busy_send_packets - Free and complete the stopped active sends
- * @adapter: pointer to our adapter
*
* Assumption - Send spinlock has been acquired
*/
@@ -3256,21 +3127,22 @@ static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
struct tcb *tcb;
unsigned long flags;
u32 freed = 0;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
/* Any packets being sent? Check the first TCB on the send list */
spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
- tcb = adapter->tx_ring.send_head;
+ tcb = tx_ring->send_head;
while (tcb != NULL && freed < NUM_TCB) {
struct tcb *next = tcb->next;
- adapter->tx_ring.send_head = next;
+ tx_ring->send_head = next;
if (next == NULL)
- adapter->tx_ring.send_tail = NULL;
+ tx_ring->send_tail = NULL;
- adapter->tx_ring.used--;
+ tx_ring->used--;
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
@@ -3279,18 +3151,17 @@ static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
- tcb = adapter->tx_ring.send_head;
+ tcb = tx_ring->send_head;
}
WARN_ON(freed == NUM_TCB);
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
- adapter->tx_ring.used = 0;
+ tx_ring->used = 0;
}
/* et131x_handle_send_interrupt - Interrupt handler for sending processing
- * @adapter: pointer to our adapter
*
* Re-claim the send resources, complete sends and get more to send from
* the send wait queue.
@@ -3303,6 +3174,7 @@ static void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
u32 serviced;
struct tcb *tcb;
u32 index;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
serviced = readl(&adapter->regs->txdma.new_service_complete);
index = INDEX10(serviced);
@@ -3312,41 +3184,41 @@ static void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
*/
spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
- tcb = adapter->tx_ring.send_head;
+ tcb = tx_ring->send_head;
while (tcb &&
((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
index < INDEX10(tcb->index)) {
- adapter->tx_ring.used--;
- adapter->tx_ring.send_head = tcb->next;
+ tx_ring->used--;
+ tx_ring->send_head = tcb->next;
if (tcb->next == NULL)
- adapter->tx_ring.send_tail = NULL;
+ tx_ring->send_tail = NULL;
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
free_send_packet(adapter, tcb);
spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
/* Goto the next packet */
- tcb = adapter->tx_ring.send_head;
+ tcb = tx_ring->send_head;
}
while (tcb &&
!((serviced ^ tcb->index) & ET_DMA10_WRAP)
&& index > (tcb->index & ET_DMA10_MASK)) {
- adapter->tx_ring.used--;
- adapter->tx_ring.send_head = tcb->next;
+ tx_ring->used--;
+ tx_ring->send_head = tcb->next;
if (tcb->next == NULL)
- adapter->tx_ring.send_tail = NULL;
+ tx_ring->send_tail = NULL;
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
free_send_packet(adapter, tcb);
spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
/* Goto the next packet */
- tcb = adapter->tx_ring.send_head;
+ tcb = tx_ring->send_head;
}
/* Wake up the queue when we hit a low-water mark */
- if (adapter->tx_ring.used <= NUM_TCB / 3)
+ if (tx_ring->used <= NUM_TCB / 3)
netif_wake_queue(adapter->netdev);
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
@@ -3548,9 +3420,7 @@ static struct ethtool_ops et131x_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
-/* et131x_hwaddr_init - set up the MAC Address on the ET1310
- * @adapter: pointer to our private adapter structure
- */
+/* et131x_hwaddr_init - set up the MAC Address on the ET1310 */
static void et131x_hwaddr_init(struct et131x_adapter *adapter)
{
/* If have our default mac from init and no mac address from
@@ -3580,14 +3450,12 @@ static void et131x_hwaddr_init(struct et131x_adapter *adapter)
}
/* et131x_pci_init - initial PCI setup
- * @adapter: pointer to our private adapter structure
- * @pdev: our PCI device
*
* Perform the initial setup of PCI registers and if possible initialise
* the MAC address. At this point the I/O registers have yet to be mapped
*/
static int et131x_pci_init(struct et131x_adapter *adapter,
- struct pci_dev *pdev)
+ struct pci_dev *pdev)
{
u16 max_payload;
int i, rc;
@@ -3704,21 +3572,14 @@ static void et131x_error_timer_handler(unsigned long data)
mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000);
}
-/* et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
- * @adapter: pointer to our private adapter structure
- */
+/* et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx */
static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
{
- /* Free DMA memory */
et131x_tx_dma_memory_free(adapter);
et131x_rx_dma_memory_free(adapter);
}
/* et131x_adapter_memory_alloc
- * @adapter: pointer to our private adapter structure
- *
- * Returns 0 on success, errno on failure (as defined in errno.h).
- *
* Allocate all the memory blocks for send, receive and others.
*/
static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
@@ -3727,14 +3588,14 @@ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
/* Allocate memory for the Tx Ring */
status = et131x_tx_dma_memory_alloc(adapter);
- if (status != 0) {
+ if (status) {
dev_err(&adapter->pdev->dev,
"et131x_tx_dma_memory_alloc FAILED\n");
return status;
}
/* Receive buffer memory allocation */
status = et131x_rx_dma_memory_alloc(adapter);
- if (status != 0) {
+ if (status) {
dev_err(&adapter->pdev->dev,
"et131x_rx_dma_memory_alloc FAILED\n");
et131x_tx_dma_memory_free(adapter);
@@ -3744,8 +3605,7 @@ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
/* Init receive data structures */
status = et131x_init_recv(adapter);
if (status) {
- dev_err(&adapter->pdev->dev,
- "et131x_init_recv FAILED\n");
+ dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n");
et131x_adapter_memory_free(adapter);
}
return status;
@@ -3756,97 +3616,89 @@ static void et131x_adjust_link(struct net_device *netdev)
struct et131x_adapter *adapter = netdev_priv(netdev);
struct phy_device *phydev = adapter->phydev;
- if (phydev && phydev->link != adapter->link) {
- /* Check to see if we are in coma mode and if
- * so, disable it because we will not be able
- * to read PHY values until we are out.
- */
- if (et1310_in_phy_coma(adapter))
- et1310_disable_phy_coma(adapter);
-
- adapter->link = phydev->link;
- phy_print_status(phydev);
-
- if (phydev->link) {
- adapter->boot_coma = 20;
- if (phydev && phydev->speed == SPEED_10) {
- /* NOTE - Is there a way to query this without
- * TruePHY?
- * && TRU_QueryCoreType(adapter->hTruePhy, 0)==
- * EMI_TRUEPHY_A13O) {
- */
- u16 register18;
-
- et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
- &register18);
- et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
- register18 | 0x4);
- et131x_mii_write(adapter, PHY_INDEX_REG,
- register18 | 0x8402);
- et131x_mii_write(adapter, PHY_DATA_REG,
- register18 | 511);
- et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
- register18);
- }
+ if (!phydev)
+ return;
+ if (phydev->link == adapter->link)
+ return;
- et1310_config_flow_control(adapter);
+ /* Check to see if we are in coma mode and if
+ * so, disable it because we will not be able
+ * to read PHY values until we are out.
+ */
+ if (et1310_in_phy_coma(adapter))
+ et1310_disable_phy_coma(adapter);
- if (phydev && phydev->speed == SPEED_1000 &&
- adapter->registry_jumbo_packet > 2048) {
- u16 reg;
+ adapter->link = phydev->link;
+ phy_print_status(phydev);
- et131x_mii_read(adapter, PHY_CONFIG, &reg);
- reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
- reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
- et131x_mii_write(adapter, PHY_CONFIG, reg);
- }
+ if (phydev->link) {
+ adapter->boot_coma = 20;
+ if (phydev->speed == SPEED_10) {
+ u16 register18;
+
+ et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
+ &register18);
+ et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
+ register18 | 0x4);
+ et131x_mii_write(adapter, PHY_INDEX_REG,
+ register18 | 0x8402);
+ et131x_mii_write(adapter, PHY_DATA_REG,
+ register18 | 511);
+ et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
+ register18);
+ }
- et131x_set_rx_dma_timer(adapter);
- et1310_config_mac_regs2(adapter);
- } else {
- adapter->boot_coma = 0;
+ et1310_config_flow_control(adapter);
- if (phydev->speed == SPEED_10) {
- /* NOTE - Is there a way to query this without
- * TruePHY?
- * && TRU_QueryCoreType(adapter->hTruePhy, 0) ==
- * EMI_TRUEPHY_A13O)
- */
- u16 register18;
-
- et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
- &register18);
- et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
- register18 | 0x4);
- et131x_mii_write(adapter, PHY_INDEX_REG,
- register18 | 0x8402);
- et131x_mii_write(adapter, PHY_DATA_REG,
- register18 | 511);
- et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
- register18);
- }
+ if (phydev->speed == SPEED_1000 &&
+ adapter->registry_jumbo_packet > 2048) {
+ u16 reg;
- /* Free the packets being actively sent & stopped */
- et131x_free_busy_send_packets(adapter);
+ et131x_mii_read(adapter, PHY_CONFIG, &reg);
+ reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
+ reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
+ et131x_mii_write(adapter, PHY_CONFIG, reg);
+ }
- /* Re-initialize the send structures */
- et131x_init_send(adapter);
+ et131x_set_rx_dma_timer(adapter);
+ et1310_config_mac_regs2(adapter);
+ } else {
+ adapter->boot_coma = 0;
+
+ if (phydev->speed == SPEED_10) {
+ u16 register18;
+
+ et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
+ &register18);
+ et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
+ register18 | 0x4);
+ et131x_mii_write(adapter, PHY_INDEX_REG,
+ register18 | 0x8402);
+ et131x_mii_write(adapter, PHY_DATA_REG,
+ register18 | 511);
+ et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
+ register18);
+ }
- /* Bring the device back to the state it was during
- * init prior to autonegotiation being complete. This
- * way, when we get the auto-neg complete interrupt,
- * we can complete init by calling config_mac_regs2.
- */
- et131x_soft_reset(adapter);
+ /* Free the packets being actively sent & stopped */
+ et131x_free_busy_send_packets(adapter);
- /* Setup ET1310 as per the documentation */
- et131x_adapter_setup(adapter);
+ /* Re-initialize the send structures */
+ et131x_init_send(adapter);
- /* perform reset of tx/rx */
- et131x_disable_txrx(netdev);
- et131x_enable_txrx(netdev);
- }
+ /* Bring the device back to the state it was during
+ * init prior to autonegotiation being complete. This
+ * way, when we get the auto-neg complete interrupt,
+ * we can complete init by calling config_mac_regs2.
+ */
+ et131x_soft_reset(adapter);
+
+ /* Setup ET1310 as per the documentation */
+ et131x_adapter_setup(adapter);
+ /* perform reset of tx/rx */
+ et131x_disable_txrx(netdev);
+ et131x_enable_txrx(netdev);
}
}
@@ -3883,21 +3735,20 @@ static int et131x_mii_probe(struct net_device *netdev)
phydev->advertising = phydev->supported;
adapter->phydev = phydev;
- dev_info(&adapter->pdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+ dev_info(&adapter->pdev->dev,
+ "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
phydev->drv->name, dev_name(&phydev->dev));
return 0;
}
/* et131x_adapter_init
- * @adapter: pointer to the private adapter struct
- * @pdev: pointer to the PCI device
*
* Initialize the data structures for the et131x_adapter object and link
* them together with the platform provided device structures.
*/
static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
- struct pci_dev *pdev)
+ struct pci_dev *pdev)
{
static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
@@ -3925,7 +3776,6 @@ static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
}
/* et131x_pci_remove
- * @pdev: a pointer to the device's pci_dev structure
*
* Registered in the pci_driver structure, this function is called when the
* PCI subsystem detects that a PCI device which matches the information
@@ -3952,9 +3802,7 @@ static void et131x_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-/* et131x_up - Bring up a device for use.
- * @netdev: device to be opened
- */
+/* et131x_up - Bring up a device for use. */
static void et131x_up(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -3963,9 +3811,7 @@ static void et131x_up(struct net_device *netdev)
phy_start(adapter->phydev);
}
-/* et131x_down - Bring down the device
- * @netdev: device to be brought down
- */
+/* et131x_down - Bring down the device */
static void et131x_down(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4022,7 +3868,9 @@ static irqreturn_t et131x_isr(int irq, void *dev_id)
{
bool handled = true;
struct net_device *netdev = (struct net_device *)dev_id;
- struct et131x_adapter *adapter = NULL;
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+ struct rx_ring *rx_ring = &adapter->rx_ring;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
u32 status;
if (!netif_device_present(netdev)) {
@@ -4030,8 +3878,6 @@ static irqreturn_t et131x_isr(int irq, void *dev_id)
goto out;
}
- adapter = netdev_priv(netdev);
-
/* If the adapter is in low power state, then it should not
* recognize any interrupt
*/
@@ -4061,13 +3907,13 @@ static irqreturn_t et131x_isr(int irq, void *dev_id)
/* This is our interrupt, so process accordingly */
if (status & ET_INTR_WATCHDOG) {
- struct tcb *tcb = adapter->tx_ring.send_head;
+ struct tcb *tcb = tx_ring->send_head;
if (tcb)
if (++tcb->stale > 1)
status |= ET_INTR_TXDMA_ISR;
- if (adapter->rx_ring.unfinished_receives)
+ if (rx_ring->unfinished_receives)
status |= ET_INTR_RXDMA_XFR_DONE;
else if (tcb == NULL)
writel(0, &adapter->regs->global.watchdog_timer);
@@ -4075,7 +3921,7 @@ static irqreturn_t et131x_isr(int irq, void *dev_id)
status &= ~ET_INTR_WATCHDOG;
}
- if (status == 0) {
+ if (!status) {
/* This interrupt has in some way been "handled" by
* the ISR. Either it was a spurious Rx interrupt, or
* it was a Tx interrupt that has been filtered by
@@ -4101,7 +3947,6 @@ out:
}
/* et131x_isr_handler - The ISR handler
- * @p_adapter, a pointer to the device's private adapter structure
*
* scheduled to run in a deferred context by the ISR. This is where the ISR's
* work actually gets done.
@@ -4125,17 +3970,15 @@ static void et131x_isr_handler(struct work_struct *work)
if (status & ET_INTR_RXDMA_XFR_DONE)
et131x_handle_recv_interrupt(adapter);
- status &= 0xffffffd7;
+ status &= ~(ET_INTR_TXDMA_ERR | ET_INTR_RXDMA_XFR_DONE);
if (!status)
goto out;
/* Handle the TXDMA Error interrupt */
if (status & ET_INTR_TXDMA_ERR) {
- u32 txdma_err;
-
/* Following read also clears the register (COR) */
- txdma_err = readl(&iomem->txdma.tx_dma_error);
+ u32 txdma_err = readl(&iomem->txdma.tx_dma_error);
dev_warn(&adapter->pdev->dev,
"TXDMA_ERR interrupt, error = %d\n",
@@ -4281,11 +4124,7 @@ out:
et131x_enable_interrupts(adapter);
}
-/* et131x_stats - Return the current device statistics.
- * @netdev: device whose stats are being queried
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
+/* et131x_stats - Return the current device statistics */
static struct net_device_stats *et131x_stats(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4327,11 +4166,7 @@ static struct net_device_stats *et131x_stats(struct net_device *netdev)
return stats;
}
-/* et131x_open - Open the device for use.
- * @netdev: device to be opened
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
+/* et131x_open - Open the device for use. */
static int et131x_open(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4360,11 +4195,7 @@ static int et131x_open(struct net_device *netdev)
return result;
}
-/* et131x_close - Close the device
- * @netdev: device to be closed
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
+/* et131x_close - Close the device */
static int et131x_close(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4382,8 +4213,6 @@ static int et131x_close(struct net_device *netdev)
* @netdev: device on which the control request is being made
* @reqbuf: a pointer to the IOCTL request buffer
* @cmd: the IOCTL command code
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
*/
static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
int cmd)
@@ -4400,8 +4229,6 @@ static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
* @adapter: pointer to our private adapter structure
*
* FIXME: lot of dups with MAC code
- *
- * Returns 0 on success, errno on failure
*/
static int et131x_set_packet_filter(struct et131x_adapter *adapter)
{
@@ -4460,9 +4287,7 @@ static int et131x_set_packet_filter(struct et131x_adapter *adapter)
return status;
}
-/* et131x_multicast - The handler to configure multicasting on the interface
- * @netdev: a pointer to a net_device struct representing the device
- */
+/* et131x_multicast - The handler to configure multicasting on the interface */
static void et131x_multicast(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4522,27 +4347,21 @@ static void et131x_multicast(struct net_device *netdev)
* NOTE - This block will always update the multicast_list with the
* hardware, even if the addresses aren't the same.
*/
- if (packet_filter != adapter->packet_filter) {
- /* Call the device's filter function */
+ if (packet_filter != adapter->packet_filter)
et131x_set_packet_filter(adapter);
- }
+
spin_unlock_irqrestore(&adapter->lock, flags);
}
-/* et131x_tx - The handler to tx a packet on the device
- * @skb: data to be Tx'd
- * @netdev: device on which data is to be Tx'd
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
+/* et131x_tx - The handler to tx a packet on the device */
static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
{
int status = 0;
struct et131x_adapter *adapter = netdev_priv(netdev);
+ struct tx_ring *tx_ring = &adapter->tx_ring;
/* stop the queue if it's getting full */
- if (adapter->tx_ring.used >= NUM_TCB - 1 &&
- !netif_queue_stopped(netdev))
+ if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
netif_stop_queue(netdev);
/* Save the timestamp for the TX timeout watchdog */
@@ -4562,7 +4381,6 @@ static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
}
/* et131x_tx_timeout - Timeout handler
- * @netdev: a pointer to a net_device struct representing the device
*
* The handler called when a Tx request times out. The timeout period is
* specified by the 'tx_timeo" element in the net_device structure (see
@@ -4571,6 +4389,7 @@ static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
static void et131x_tx_timeout(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
+ struct tx_ring *tx_ring = &adapter->tx_ring;
struct tcb *tcb;
unsigned long flags;
@@ -4593,7 +4412,7 @@ static void et131x_tx_timeout(struct net_device *netdev)
/* Is send stuck? */
spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
- tcb = adapter->tx_ring.send_head;
+ tcb = tx_ring->send_head;
if (tcb != NULL) {
tcb->count++;
@@ -4619,12 +4438,7 @@ static void et131x_tx_timeout(struct net_device *netdev)
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
}
-/* et131x_change_mtu - The handler called to change the MTU for the device
- * @netdev: device whose MTU is to be changed
- * @new_mtu: the desired MTU
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- */
+/* et131x_change_mtu - The handler called to change the MTU for the device */
static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
{
int result = 0;
@@ -4669,22 +4483,13 @@ static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
return result;
}
-/* et131x_set_mac_addr - handler to change the MAC address for the device
- * @netdev: device whose MAC is to be changed
- * @new_mac: the desired MAC address
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
- *
- * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14
- */
+/* et131x_set_mac_addr - handler to change the MAC address for the device */
static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
{
int result = 0;
struct et131x_adapter *adapter = netdev_priv(netdev);
struct sockaddr *address = new_mac;
- /* begin blux */
-
if (adapter == NULL)
return -ENODEV;
@@ -4746,15 +4551,13 @@ static const struct net_device_ops et131x_netdev_ops = {
* @pdev: a pointer to the device's pci_dev structure
* @ent: this device's entry in the pci_device_id table
*
- * Returns 0 on success, errno on failure (as defined in errno.h)
- *
* Registered in the pci_driver structure, this function is called when the
* PCI subsystem finds a new PCI device which matches the information
* contained in the pci_device_id table. This routine is the equivalent to
* a device insertion routine.
*/
static int et131x_pci_setup(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
struct net_device *netdev;
struct et131x_adapter *adapter;
@@ -4930,7 +4733,7 @@ err_disable:
goto out;
}
-static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
+static const struct pci_device_id et131x_pci_table[] = {
{ PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
{ PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
{0,}
diff --git a/drivers/staging/et131x/et131x.h b/drivers/staging/et131x/et131x.h
index bbe78a703a23..2ac6e9980117 100644
--- a/drivers/staging/et131x/et131x.h
+++ b/drivers/staging/et131x/et131x.h
@@ -1668,43 +1668,3 @@ struct address_map {
#define LED_100TX_SHIFT 4
/* MI Register 29 - 31: Reserved Reg(0x1D - 0x1E) */
-
-/* Defines for PHY access routines */
-
-/* Define bit operation flags */
-#define TRUEPHY_BIT_CLEAR 0
-#define TRUEPHY_BIT_SET 1
-#define TRUEPHY_BIT_READ 2
-
-/* Define read/write operation flags */
-#ifndef TRUEPHY_READ
-#define TRUEPHY_READ 0
-#define TRUEPHY_WRITE 1
-#define TRUEPHY_MASK 2
-#endif
-
-/* Define master/slave configuration values */
-#define TRUEPHY_CFG_SLAVE 0
-#define TRUEPHY_CFG_MASTER 1
-
-/* Define MDI/MDI-X settings */
-#define TRUEPHY_MDI 0
-#define TRUEPHY_MDIX 1
-#define TRUEPHY_AUTO_MDI_MDIX 2
-
-/* Define 10Base-T link polarities */
-#define TRUEPHY_POLARITY_NORMAL 0
-#define TRUEPHY_POLARITY_INVERTED 1
-
-/* Define auto-negotiation results */
-#define TRUEPHY_ANEG_NOT_COMPLETE 0
-#define TRUEPHY_ANEG_COMPLETE 1
-#define TRUEPHY_ANEG_DISABLED 2
-
-/* Define duplex advertisement flags */
-#define TRUEPHY_ADV_DUPLEX_NONE 0x00
-#define TRUEPHY_ADV_DUPLEX_FULL 0x01
-#define TRUEPHY_ADV_DUPLEX_HALF 0x02
-#define TRUEPHY_ADV_DUPLEX_BOTH \
- (TRUEPHY_ADV_DUPLEX_FULL | TRUEPHY_ADV_DUPLEX_HALF)
-
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index 817f837b240d..edd5cef300d0 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -35,7 +35,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kobject.h>
diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c
index 074b0e5bcc68..0e499ce5f0d7 100644
--- a/drivers/staging/frontier/tranzport.c
+++ b/drivers/staging/frontier/tranzport.c
@@ -34,7 +34,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/boot.h b/drivers/staging/ft1000/ft1000-pcmcia/boot.h
index 9dce54eae1cf..60c015c1c28a 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/boot.h
+++ b/drivers/staging/ft1000/ft1000-pcmcia/boot.h
@@ -1,158 +1,158 @@
-//---------------------------------------------------------------------------
-// FT1000 driver for Flarion Flash OFDM NIC Device
-//
-// Copyright (C) 2002 Flarion Technologies, All rights reserved.
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License as published by the Free
-// Software Foundation; either version 2 of the License, or (at your option) any
-// later version. This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-// more details. You should have received a copy of the GNU General Public
-// License along with this program; if not, write to the
-// Free Software Foundation, Inc., 59 Temple Place -
-// Suite 330, Boston, MA 02111-1307, USA.
-//---------------------------------------------------------------------------
-//
-// File: boot.h
-//
-// Description: boatloader
-//
-// History:
-// 1/11/05 Whc Ported to Linux.
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+ FT1000 driver for Flarion Flash OFDM NIC Device
+
+ Copyright (C) 2002 Flarion Technologies, All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option) any
+ later version. This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details. You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the
+ Free Software Foundation, Inc., 59 Temple Place -
+ Suite 330, Boston, MA 02111-1307, USA.
+ ---------------------------------------------------------------------------
+
+ File: boot.h
+
+ Description: boatloader
+
+ History:
+ 1/11/05 Whc Ported to Linux.
+
+---------------------------------------------------------------------------*/
#ifndef _BOOTH_
#define _BOOTH_
-// Official bootloader
-static unsigned char bootimage [] = {
-0x00,0x00,0x01,0x5E,0x00,0x00
-,0x00,0x00,0x00,0x00,0x02,0xD7
-,0x00,0x00,0x01,0x5E,0x46,0xB3
-,0xE6,0x02,0x00,0x98,0xE6,0x8C
-,0x00,0x98,0xFB,0x92,0xFF,0xFF
-,0x98,0xFB,0x94,0xFF,0xFF,0x98
-,0xFB,0x06,0x08,0x00,0x98,0xFB
-,0x96,0x84,0x00,0x98,0xFB,0x08
-,0x1C,0x00,0x98,0xFB,0x51,0x25
-,0x10,0x1C,0x00,0xE6,0x51,0x01
-,0x07,0xFD,0x4C,0xFF,0x20,0xF5
-,0x51,0x02,0x20,0x08,0x00,0x4C
-,0xFF,0x20,0x3C,0x00,0xC0,0x64
-,0x98,0xC0,0x66,0x98,0xC0,0x68
-,0x98,0xC0,0x6A,0x98,0xC0,0x6C
-,0x98,0x90,0x08,0x90,0x09,0x90
-,0x0A,0x90,0x0B,0x90,0x0C,0x90
-,0x0D,0x90,0x0E,0x90,0x0F,0x90
-,0x04,0x90,0x06,0xFB,0x51,0x22
-,0x16,0x08,0x03,0xFB,0x51,0x52
-,0x16,0x08,0x04,0xFB,0x51,0x24
-,0x2B,0x08,0x06,0xFB,0x51,0x54
-,0x2B,0x08,0x07,0xFB,0x51,0x24
-,0x2B,0x08,0x09,0xFB,0x51,0x54
-,0x2B,0x08,0x0A,0xFB,0x51,0x12
-,0x16,0x08,0x0C,0xFB,0x51,0x52
-,0x16,0x08,0x0D,0x78,0x00,0x00
-,0x00,0x16,0x00,0x00,0xEC,0x31
-,0xAE,0x00,0x00,0x81,0x4C,0x0F
-,0xE6,0x43,0xFF,0xEC,0x31,0x4E
-,0x00,0x00,0x91,0xEC,0x31,0xAE
-,0x00,0x00,0x91,0x4C,0x0F,0xE6
-,0x43,0xFF,0xEC,0x31,0x5E,0x00
-,0x00,0xA1,0xEB,0x31,0x08,0x00
-,0x00,0xA6,0xEB,0x31,0x08,0x00
-,0x00,0xAC,0x3C,0x00,0xEB,0x31
-,0x08,0x00,0x00,0xA8,0x76,0xFE
-,0xFE,0x08,0xEB,0x31,0x08,0x20
-,0x00,0x00,0x76,0xFF,0xFF,0x18
-,0xED,0x31,0x08,0x20,0x00,0x00
-,0x26,0x10,0x04,0x10,0xF5,0x3C
-,0x01,0x3C,0x00,0x08,0x01,0x12
-,0x3C,0x11,0x3C,0x00,0x08,0x01
-,0x0B,0x08,0x00,0x6D,0xEC,0x31
-,0xAE,0x20,0x00,0x06,0xED,0x4D
-,0x08,0x00,0x00,0x67,0x80,0x6F
-,0x00,0x01,0x0B,0x6F,0x00,0x02
-,0x2E,0x76,0xEE,0x01,0x48,0x06
-,0x01,0x39,0xED,0x4D,0x18,0x00
-,0x02,0xED,0x4D,0x08,0x00,0x04
-,0x14,0x06,0xA4,0xED,0x31,0x22
-,0x00,0x00,0xAC,0x76,0xEE,0x07
-,0x48,0x6D,0x22,0x01,0x1E,0x08
-,0x01,0x58,0xEB,0x31,0x08,0x00
-,0x00,0xAC,0x06,0xFF,0xBA,0x3C
-,0x00,0xEB,0x31,0x08,0x20,0x00
-,0x04,0x3C,0x30,0xEB,0x31,0x08
-,0x20,0x00,0x02,0x3C,0x10,0xEB
-,0x31,0x08,0x20,0x00,0x00,0xED
-,0x31,0x08,0x20,0x00,0x00,0x04
-,0x10,0xF7,0xED,0x31,0x08,0x00
-,0x00,0xA2,0x91,0x00,0x9C,0x3C
-,0x80,0xEB,0x31,0x08,0x20,0x00
-,0x04,0x3C,0x20,0xEB,0x31,0x08
-,0x20,0x00,0x02,0x3C,0x10,0xEB
-,0x31,0x08,0x20,0x00,0x00,0xED
-,0x31,0x08,0x20,0x00,0x00,0x04
-,0x10,0xF7,0xED,0x31,0x08,0x20
-,0x00,0x04,0x42,0x10,0x90,0x08
-,0xEC,0x31,0xAE,0x20,0x00,0x06
-,0xA4,0x41,0x08,0x00,0xB6,0xED
-,0x41,0x28,0x7D,0xFF,0xFF,0x22
-,0xB3,0x40,0x98,0x2A,0x32,0xEB
-,0x41,0x28,0xB4,0x43,0xFC,0x05
-,0xFF,0xE6,0xA0,0x31,0x20,0x00
-,0x06,0xEB,0x31,0x08,0x20,0x00
-,0x04,0x3C,0x20,0xEB,0x31,0x08
-,0x20,0x00,0x02,0x3C,0x10,0xEB
-,0x31,0x08,0x20,0x00,0x00,0xED
-,0x31,0x08,0x20,0x00,0x00,0x04
-,0x10,0xF7,0xED,0x31,0x08,0x20
-,0x00,0x04,0x42,0x10,0x90,0x08
-,0xEC,0x31,0xAE,0x20,0x00,0x06
-,0xA4,0x41,0x08,0x00,0x68,0xED
-,0x41,0x28,0x7D,0xFF,0xFF,0x22
-,0xB3,0x40,0x98,0x2A,0x32,0xEB
-,0x41,0x28,0xB4,0x43,0xFC,0x05
-,0xFF,0xE6,0x48,0x04,0xEB,0x31
-,0x08,0x20,0x00,0x04,0xEB,0x31
-,0x18,0x20,0x00,0x02,0x3C,0x11
-,0xEB,0x31,0x18,0x20,0x00,0x00
-,0xED,0x31,0x08,0x20,0x00,0x00
-,0x04,0x10,0xF7,0xED,0x31,0x08
-,0x20,0x00,0x02,0x66,0x00,0x6F
-,0x00,0x01,0x16,0x76,0xEE,0x06
-,0x48,0x4A,0x1E,0x48,0x04,0xED
-,0x31,0x08,0x20,0x00,0x04,0xEB
-,0x31,0x08,0x00,0x00,0xA4,0x48
-,0x04,0xED,0x31,0x08,0x20,0x00
-,0x04,0xEB,0x31,0x08,0x00,0x00
-,0xA2,0x48,0x04,0x20,0x20,0x4A
-,0x7C,0x46,0x82,0x50,0x05,0x50
-,0x15,0xB5,0x1E,0x98,0xED,0x31
-,0x08,0x00,0x00,0xA8,0x10,0x47
-,0x3B,0x2C,0x01,0xDB,0x40,0x11
-,0x98,0xC1,0x1E,0x98,0x10,0x07
-,0x30,0xF9,0x40,0x07,0x18,0x98
-,0x2A,0x10,0xEB,0x31,0x08,0x00
-,0x00,0xA8,0xA4,0x1E,0x98,0xBB
-,0x1E,0x98,0x50,0x14,0x50,0x04
-,0x46,0x83,0x48,0x04,0x02,0x01
-,0x00,0x50,0x05,0x50,0x15,0x10
-,0x87,0x3F,0x90,0x2B,0x18,0x01
-,0x00,0xC0,0x31,0x00,0x00,0xAE
-,0xDF,0x41,0x00,0x08,0x00,0x1A
-,0x42,0x11,0x67,0x01,0xDF,0x41
-,0x02,0x08,0x00,0x10,0x42,0x11
-,0x62,0x01,0xB4,0x43,0x4A,0x68
-,0x50,0x14,0x50,0x04,0x24,0x10
-,0x48,0x04,0xF2,0x31,0x00,0x01
-,0x00,0x00,0xAE,0xF6,0x31,0x00
-,0x01,0x00,0x00,0xAE,0x62,0xE4
-,0xE5,0x61,0x04,0x48,0x04,0xE5
-,0x63,0x05,0x48,0x04,0x20,0x20
-,0x00,0x00,0x00,0x00
+/* Official bootloader */
+static unsigned char bootimage[] = {
+ 0x00, 0x00, 0x01, 0x5E, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0xD7,
+ 0x00, 0x00, 0x01, 0x5E, 0x46, 0xB3,
+ 0xE6, 0x02, 0x00, 0x98, 0xE6, 0x8C,
+ 0x00, 0x98, 0xFB, 0x92, 0xFF, 0xFF,
+ 0x98, 0xFB, 0x94, 0xFF, 0xFF, 0x98,
+ 0xFB, 0x06, 0x08, 0x00, 0x98, 0xFB,
+ 0x96, 0x84, 0x00, 0x98, 0xFB, 0x08,
+ 0x1C, 0x00, 0x98, 0xFB, 0x51, 0x25,
+ 0x10, 0x1C, 0x00, 0xE6, 0x51, 0x01,
+ 0x07, 0xFD, 0x4C, 0xFF, 0x20, 0xF5,
+ 0x51, 0x02, 0x20, 0x08, 0x00, 0x4C,
+ 0xFF, 0x20, 0x3C, 0x00, 0xC0, 0x64,
+ 0x98, 0xC0, 0x66, 0x98, 0xC0, 0x68,
+ 0x98, 0xC0, 0x6A, 0x98, 0xC0, 0x6C,
+ 0x98, 0x90, 0x08, 0x90, 0x09, 0x90,
+ 0x0A, 0x90, 0x0B, 0x90, 0x0C, 0x90,
+ 0x0D, 0x90, 0x0E, 0x90, 0x0F, 0x90,
+ 0x04, 0x90, 0x06, 0xFB, 0x51, 0x22,
+ 0x16, 0x08, 0x03, 0xFB, 0x51, 0x52,
+ 0x16, 0x08, 0x04, 0xFB, 0x51, 0x24,
+ 0x2B, 0x08, 0x06, 0xFB, 0x51, 0x54,
+ 0x2B, 0x08, 0x07, 0xFB, 0x51, 0x24,
+ 0x2B, 0x08, 0x09, 0xFB, 0x51, 0x54,
+ 0x2B, 0x08, 0x0A, 0xFB, 0x51, 0x12,
+ 0x16, 0x08, 0x0C, 0xFB, 0x51, 0x52,
+ 0x16, 0x08, 0x0D, 0x78, 0x00, 0x00,
+ 0x00, 0x16, 0x00, 0x00, 0xEC, 0x31,
+ 0xAE, 0x00, 0x00, 0x81, 0x4C, 0x0F,
+ 0xE6, 0x43, 0xFF, 0xEC, 0x31, 0x4E,
+ 0x00, 0x00, 0x91, 0xEC, 0x31, 0xAE,
+ 0x00, 0x00, 0x91, 0x4C, 0x0F, 0xE6,
+ 0x43, 0xFF, 0xEC, 0x31, 0x5E, 0x00,
+ 0x00, 0xA1, 0xEB, 0x31, 0x08, 0x00,
+ 0x00, 0xA6, 0xEB, 0x31, 0x08, 0x00,
+ 0x00, 0xAC, 0x3C, 0x00, 0xEB, 0x31,
+ 0x08, 0x00, 0x00, 0xA8, 0x76, 0xFE,
+ 0xFE, 0x08, 0xEB, 0x31, 0x08, 0x20,
+ 0x00, 0x00, 0x76, 0xFF, 0xFF, 0x18,
+ 0xED, 0x31, 0x08, 0x20, 0x00, 0x00,
+ 0x26, 0x10, 0x04, 0x10, 0xF5, 0x3C,
+ 0x01, 0x3C, 0x00, 0x08, 0x01, 0x12,
+ 0x3C, 0x11, 0x3C, 0x00, 0x08, 0x01,
+ 0x0B, 0x08, 0x00, 0x6D, 0xEC, 0x31,
+ 0xAE, 0x20, 0x00, 0x06, 0xED, 0x4D,
+ 0x08, 0x00, 0x00, 0x67, 0x80, 0x6F,
+ 0x00, 0x01, 0x0B, 0x6F, 0x00, 0x02,
+ 0x2E, 0x76, 0xEE, 0x01, 0x48, 0x06,
+ 0x01, 0x39, 0xED, 0x4D, 0x18, 0x00,
+ 0x02, 0xED, 0x4D, 0x08, 0x00, 0x04,
+ 0x14, 0x06, 0xA4, 0xED, 0x31, 0x22,
+ 0x00, 0x00, 0xAC, 0x76, 0xEE, 0x07,
+ 0x48, 0x6D, 0x22, 0x01, 0x1E, 0x08,
+ 0x01, 0x58, 0xEB, 0x31, 0x08, 0x00,
+ 0x00, 0xAC, 0x06, 0xFF, 0xBA, 0x3C,
+ 0x00, 0xEB, 0x31, 0x08, 0x20, 0x00,
+ 0x04, 0x3C, 0x30, 0xEB, 0x31, 0x08,
+ 0x20, 0x00, 0x02, 0x3C, 0x10, 0xEB,
+ 0x31, 0x08, 0x20, 0x00, 0x00, 0xED,
+ 0x31, 0x08, 0x20, 0x00, 0x00, 0x04,
+ 0x10, 0xF7, 0xED, 0x31, 0x08, 0x00,
+ 0x00, 0xA2, 0x91, 0x00, 0x9C, 0x3C,
+ 0x80, 0xEB, 0x31, 0x08, 0x20, 0x00,
+ 0x04, 0x3C, 0x20, 0xEB, 0x31, 0x08,
+ 0x20, 0x00, 0x02, 0x3C, 0x10, 0xEB,
+ 0x31, 0x08, 0x20, 0x00, 0x00, 0xED,
+ 0x31, 0x08, 0x20, 0x00, 0x00, 0x04,
+ 0x10, 0xF7, 0xED, 0x31, 0x08, 0x20,
+ 0x00, 0x04, 0x42, 0x10, 0x90, 0x08,
+ 0xEC, 0x31, 0xAE, 0x20, 0x00, 0x06,
+ 0xA4, 0x41, 0x08, 0x00, 0xB6, 0xED,
+ 0x41, 0x28, 0x7D, 0xFF, 0xFF, 0x22,
+ 0xB3, 0x40, 0x98, 0x2A, 0x32, 0xEB,
+ 0x41, 0x28, 0xB4, 0x43, 0xFC, 0x05,
+ 0xFF, 0xE6, 0xA0, 0x31, 0x20, 0x00,
+ 0x06, 0xEB, 0x31, 0x08, 0x20, 0x00,
+ 0x04, 0x3C, 0x20, 0xEB, 0x31, 0x08,
+ 0x20, 0x00, 0x02, 0x3C, 0x10, 0xEB,
+ 0x31, 0x08, 0x20, 0x00, 0x00, 0xED,
+ 0x31, 0x08, 0x20, 0x00, 0x00, 0x04,
+ 0x10, 0xF7, 0xED, 0x31, 0x08, 0x20,
+ 0x00, 0x04, 0x42, 0x10, 0x90, 0x08,
+ 0xEC, 0x31, 0xAE, 0x20, 0x00, 0x06,
+ 0xA4, 0x41, 0x08, 0x00, 0x68, 0xED,
+ 0x41, 0x28, 0x7D, 0xFF, 0xFF, 0x22,
+ 0xB3, 0x40, 0x98, 0x2A, 0x32, 0xEB,
+ 0x41, 0x28, 0xB4, 0x43, 0xFC, 0x05,
+ 0xFF, 0xE6, 0x48, 0x04, 0xEB, 0x31,
+ 0x08, 0x20, 0x00, 0x04, 0xEB, 0x31,
+ 0x18, 0x20, 0x00, 0x02, 0x3C, 0x11,
+ 0xEB, 0x31, 0x18, 0x20, 0x00, 0x00,
+ 0xED, 0x31, 0x08, 0x20, 0x00, 0x00,
+ 0x04, 0x10, 0xF7, 0xED, 0x31, 0x08,
+ 0x20, 0x00, 0x02, 0x66, 0x00, 0x6F,
+ 0x00, 0x01, 0x16, 0x76, 0xEE, 0x06,
+ 0x48, 0x4A, 0x1E, 0x48, 0x04, 0xED,
+ 0x31, 0x08, 0x20, 0x00, 0x04, 0xEB,
+ 0x31, 0x08, 0x00, 0x00, 0xA4, 0x48,
+ 0x04, 0xED, 0x31, 0x08, 0x20, 0x00,
+ 0x04, 0xEB, 0x31, 0x08, 0x00, 0x00,
+ 0xA2, 0x48, 0x04, 0x20, 0x20, 0x4A,
+ 0x7C, 0x46, 0x82, 0x50, 0x05, 0x50,
+ 0x15, 0xB5, 0x1E, 0x98, 0xED, 0x31,
+ 0x08, 0x00, 0x00, 0xA8, 0x10, 0x47,
+ 0x3B, 0x2C, 0x01, 0xDB, 0x40, 0x11,
+ 0x98, 0xC1, 0x1E, 0x98, 0x10, 0x07,
+ 0x30, 0xF9, 0x40, 0x07, 0x18, 0x98,
+ 0x2A, 0x10, 0xEB, 0x31, 0x08, 0x00,
+ 0x00, 0xA8, 0xA4, 0x1E, 0x98, 0xBB,
+ 0x1E, 0x98, 0x50, 0x14, 0x50, 0x04,
+ 0x46, 0x83, 0x48, 0x04, 0x02, 0x01,
+ 0x00, 0x50, 0x05, 0x50, 0x15, 0x10,
+ 0x87, 0x3F, 0x90, 0x2B, 0x18, 0x01,
+ 0x00, 0xC0, 0x31, 0x00, 0x00, 0xAE,
+ 0xDF, 0x41, 0x00, 0x08, 0x00, 0x1A,
+ 0x42, 0x11, 0x67, 0x01, 0xDF, 0x41,
+ 0x02, 0x08, 0x00, 0x10, 0x42, 0x11,
+ 0x62, 0x01, 0xB4, 0x43, 0x4A, 0x68,
+ 0x50, 0x14, 0x50, 0x04, 0x24, 0x10,
+ 0x48, 0x04, 0xF2, 0x31, 0x00, 0x01,
+ 0x00, 0x00, 0xAE, 0xF6, 0x31, 0x00,
+ 0x01, 0x00, 0x00, 0xAE, 0x62, 0xE4,
+ 0xE5, 0x61, 0x04, 0x48, 0x04, 0xE5,
+ 0x63, 0x05, 0x48, 0x04, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0x00
};
#endif
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
index 29d0a72f0d65..d6421b9b5981 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
@@ -15,7 +15,7 @@
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place -
Suite 330, Boston, MA 02111-1307, USA.
------------------------------------------------------------------------------*/
+ -------------------------------------------------------------------------*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -80,19 +80,19 @@ MODULE_SUPPORTED_DEVICE("FT1000");
#define MAX_RCV_LOOP 100
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_read_fifo_len
-// Description: This function will read the ASIC Uplink FIFO status register
-// which will return the number of bytes remaining in the Uplink FIFO.
-// Sixteen bytes are subtracted to make sure that the ASIC does not
-// reach its threshold.
-// Input:
-// dev - network device structure
-// Output:
-// value - number of bytes available in the ASIC Uplink FIFO.
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_read_fifo_len
+ Description: This function will read the ASIC Uplink FIFO status register
+ which will return the number of bytes remaining in the Uplink FIFO.
+ Sixteen bytes are subtracted to make sure that the ASIC does not
+ reach its threshold.
+ Input:
+ dev - network device structure
+ Output:
+ value - number of bytes available in the ASIC Uplink FIFO.
+
+ -------------------------------------------------------------------------*/
static inline u16 ft1000_read_fifo_len(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -103,25 +103,25 @@ static inline u16 ft1000_read_fifo_len(struct net_device *dev)
return (ft1000_read_reg(dev, FT1000_REG_MAG_UFSR) - 16);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_read_dpram
-// Description: This function will read the specific area of dpram
-// (Electrabuzz ASIC only)
-// Input:
-// dev - device structure
-// offset - index of dpram
-// Output:
-// value - value of dpram
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_read_dpram
+ Description: This function will read the specific area of dpram
+ (Electrabuzz ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ Output:
+ value - value of dpram
+
+ -------------------------------------------------------------------------*/
u16 ft1000_read_dpram(struct net_device *dev, int offset)
{
struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
u16 data;
- // Provide mutual exclusive access while reading ASIC registers.
+ /* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock, flags);
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
data = ft1000_read_reg(dev, FT1000_REG_DPRAM_DATA);
@@ -130,54 +130,54 @@ u16 ft1000_read_dpram(struct net_device *dev, int offset)
return (data);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_write_dpram
-// Description: This function will write to a specific area of dpram
-// (Electrabuzz ASIC only)
-// Input:
-// dev - device structure
-// offset - index of dpram
-// value - value to write
-// Output:
-// none.
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_write_dpram
+ Description: This function will write to a specific area of dpram
+ (Electrabuzz ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ value - value to write
+ Output:
+ none.
+
+ -------------------------------------------------------------------------*/
static inline void ft1000_write_dpram(struct net_device *dev,
int offset, u16 value)
{
struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
- // Provide mutual exclusive access while reading ASIC registers.
+ /* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock, flags);
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, value);
spin_unlock_irqrestore(&info->dpram_lock, flags);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_read_dpram_mag_16
-// Description: This function will read the specific area of dpram
-// (Magnemite ASIC only)
-// Input:
-// dev - device structure
-// offset - index of dpram
-// Output:
-// value - value of dpram
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_read_dpram_mag_16
+ Description: This function will read the specific area of dpram
+ (Magnemite ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ Output:
+ value - value of dpram
+
+ -------------------------------------------------------------------------*/
u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index)
{
struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
u16 data;
- // Provide mutual exclusive access while reading ASIC registers.
+ /* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock, flags);
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
- // check if we want to read upper or lower 32-bit word
+ /* check if we want to read upper or lower 32-bit word */
if (Index) {
data = ft1000_read_reg(dev, FT1000_REG_MAG_DPDATAL);
} else {
@@ -188,26 +188,26 @@ u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index)
return (data);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_write_dpram_mag_16
-// Description: This function will write to a specific area of dpram
-// (Magnemite ASIC only)
-// Input:
-// dev - device structure
-// offset - index of dpram
-// value - value to write
-// Output:
-// none.
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_write_dpram_mag_16
+ Description: This function will write to a specific area of dpram
+ (Magnemite ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ value - value to write
+ Output:
+ none.
+
+ -------------------------------------------------------------------------*/
static inline void ft1000_write_dpram_mag_16(struct net_device *dev,
int offset, u16 value, int Index)
{
struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
- // Provide mutual exclusive access while reading ASIC registers.
+ /* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock, flags);
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
if (Index) {
@@ -218,25 +218,25 @@ static inline void ft1000_write_dpram_mag_16(struct net_device *dev,
spin_unlock_irqrestore(&info->dpram_lock, flags);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_read_dpram_mag_32
-// Description: This function will read the specific area of dpram
-// (Magnemite ASIC only)
-// Input:
-// dev - device structure
-// offset - index of dpram
-// Output:
-// value - value of dpram
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_read_dpram_mag_32
+ Description: This function will read the specific area of dpram
+ (Magnemite ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ Output:
+ value - value of dpram
+
+ -------------------------------------------------------------------------*/
u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset)
{
struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
u32 data;
- // Provide mutual exclusive access while reading ASIC registers.
+ /* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock, flags);
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
data = inl(dev->base_addr + FT1000_REG_MAG_DPDATAL);
@@ -245,41 +245,41 @@ u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset)
return (data);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_write_dpram_mag_32
-// Description: This function will write to a specific area of dpram
-// (Magnemite ASIC only)
-// Input:
-// dev - device structure
-// offset - index of dpram
-// value - value to write
-// Output:
-// none.
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_write_dpram_mag_32
+ Description: This function will write to a specific area of dpram
+ (Magnemite ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ value - value to write
+ Output:
+ none.
+
+ -------------------------------------------------------------------------*/
void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value)
{
struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
- // Provide mutual exclusive access while reading ASIC registers.
+ /* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock, flags);
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
outl(value, dev->base_addr + FT1000_REG_MAG_DPDATAL);
spin_unlock_irqrestore(&info->dpram_lock, flags);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_enable_interrupts
-// Description: This function will enable interrupts base on the current interrupt mask.
-// Input:
-// dev - device structure
-// Output:
-// None.
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_enable_interrupts
+ Description: This function will enable interrupts base on the current interrupt mask.
+ Input:
+ dev - device structure
+ Output:
+ None.
+
+ -------------------------------------------------------------------------*/
static void ft1000_enable_interrupts(struct net_device *dev)
{
u16 tempword;
@@ -292,16 +292,16 @@ static void ft1000_enable_interrupts(struct net_device *dev)
tempword);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_disable_interrupts
-// Description: This function will disable all interrupts.
-// Input:
-// dev - device structure
-// Output:
-// None.
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_disable_interrupts
+ Description: This function will disable all interrupts.
+ Input:
+ dev - device structure
+ Output:
+ None.
+
+ -------------------------------------------------------------------------*/
static void ft1000_disable_interrupts(struct net_device *dev)
{
u16 tempword;
@@ -314,17 +314,17 @@ static void ft1000_disable_interrupts(struct net_device *dev)
tempword);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_reset_asic
-// Description: This function will call the Card Service function to reset the
-// ASIC.
-// Input:
-// dev - device structure
-// Output:
-// none
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_reset_asic
+ Description: This function will call the Card Service function to reset the
+ ASIC.
+ Input:
+ dev - device structure
+ Output:
+ none
+
+ -------------------------------------------------------------------------*/
static void ft1000_reset_asic(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -335,21 +335,23 @@ static void ft1000_reset_asic(struct net_device *dev)
(*info->ft1000_reset) (pcmcia->link);
- // Let's use the register provided by the Magnemite ASIC to reset the
- // ASIC and DSP.
+ /*
+ * Let's use the register provided by the Magnemite ASIC to reset the
+ * ASIC and DSP.
+ */
if (info->AsicID == MAGNEMITE_ID) {
ft1000_write_reg(dev, FT1000_REG_RESET,
(DSP_RESET_BIT | ASIC_RESET_BIT));
}
mdelay(1);
if (info->AsicID == ELECTRABUZZ_ID) {
- // set watermark to -1 in order to not generate an interrupt
+ /* set watermark to -1 in order to not generate an interrupt */
ft1000_write_reg(dev, FT1000_REG_WATERMARK, 0xffff);
} else {
- // set watermark to -1 in order to not generate an interrupt
+ /* set watermark to -1 in order to not generate an interrupt */
ft1000_write_reg(dev, FT1000_REG_MAG_WATERMARK, 0xffff);
}
- // clear interrupts
+ /* clear interrupts */
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
DEBUG(1, "ft1000_hw: interrupt status register = 0x%x\n", tempword);
ft1000_write_reg(dev, FT1000_REG_SUP_ISR, tempword);
@@ -358,17 +360,17 @@ static void ft1000_reset_asic(struct net_device *dev)
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_reset_card
-// Description: This function will reset the card
-// Input:
-// dev - device structure
-// Output:
-// status - false (card reset fail)
-// true (card reset successful)
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_reset_card
+ Description: This function will reset the card
+ Input:
+ dev - device structure
+ Output:
+ status - false (card reset fail)
+ true (card reset successful)
+
+ -------------------------------------------------------------------------*/
static int ft1000_reset_card(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -384,9 +386,9 @@ static int ft1000_reset_card(struct net_device *dev)
info->squeseqnum = 0;
ft1000_disable_interrupts(dev);
-// del_timer(&poll_timer);
+ /* del_timer(&poll_timer); */
- // Make sure we free any memory reserve for provisioning
+ /* Make sure we free any memory reserve for provisioning */
while (list_empty(&info->prov_list) == 0) {
DEBUG(0,
"ft1000_hw:ft1000_reset_card:deleting provisioning record\n");
@@ -406,7 +408,7 @@ static int ft1000_reset_card(struct net_device *dev)
(DSP_RESET_BIT | ASIC_RESET_BIT));
}
- // Copy DSP session record into info block if this is not a coldstart
+ /* Copy DSP session record into info block if this is not a coldstart */
if (ft1000_card_present == 1) {
spin_lock_irqsave(&info->dpram_lock, flags);
if (info->AsicID == ELECTRABUZZ_ID) {
@@ -430,29 +432,29 @@ static int ft1000_reset_card(struct net_device *dev)
DEBUG(1, "ft1000_hw:ft1000_reset_card:resetting ASIC\n");
mdelay(10);
- //reset ASIC
+ /* reset ASIC */
ft1000_reset_asic(dev);
DEBUG(1, "ft1000_hw:ft1000_reset_card:downloading dsp image\n");
if (info->AsicID == MAGNEMITE_ID) {
- // Put dsp in reset and take ASIC out of reset
+ /* Put dsp in reset and take ASIC out of reset */
DEBUG(0,
"ft1000_hw:ft1000_reset_card:Put DSP in reset and take ASIC out of reset\n");
ft1000_write_reg(dev, FT1000_REG_RESET, DSP_RESET_BIT);
- // Setting MAGNEMITE ASIC to big endian mode
+ /* Setting MAGNEMITE ASIC to big endian mode */
ft1000_write_reg(dev, FT1000_REG_SUP_CTRL, HOST_INTF_BE);
- // Download bootloader
+ /* Download bootloader */
card_bootload(dev);
- // Take DSP out of reset
+ /* Take DSP out of reset */
ft1000_write_reg(dev, FT1000_REG_RESET, 0);
- // FLARION_DSP_ACTIVE;
+ /* FLARION_DSP_ACTIVE; */
mdelay(10);
DEBUG(0, "ft1000_hw:ft1000_reset_card:Take DSP out of reset\n");
- // Wait for 0xfefe indicating dsp ready before starting download
+ /* Wait for 0xfefe indicating dsp ready before starting download */
for (i = 0; i < 50; i++) {
tempword =
ft1000_read_dpram_mag_16(dev, FT1000_MAG_DPRAM_FEFE,
@@ -470,7 +472,7 @@ static int ft1000_reset_card(struct net_device *dev)
}
} else {
- // Take DSP out of reset
+ /* Take DSP out of reset */
ft1000_write_reg(dev, FT1000_REG_RESET, ~DSP_RESET_BIT);
mdelay(10);
}
@@ -485,17 +487,19 @@ static int ft1000_reset_card(struct net_device *dev)
mdelay(10);
if (info->AsicID == ELECTRABUZZ_ID) {
- // Need to initialize the FIFO length counter to zero in order to sync up
- // with the DSP
+ /*
+ * Need to initialize the FIFO length counter to zero in order to sync up
+ * with the DSP
+ */
info->fifo_cnt = 0;
ft1000_write_dpram(dev, FT1000_FIFO_LEN, info->fifo_cnt);
- // Initialize DSP heartbeat area to ho
+ /* Initialize DSP heartbeat area to ho */
ft1000_write_dpram(dev, FT1000_HI_HO, ho);
tempword = ft1000_read_dpram(dev, FT1000_HI_HO);
DEBUG(1, "ft1000_hw:ft1000_reset_asic:hi_ho value = 0x%x\n",
tempword);
} else {
- // Initialize DSP heartbeat area to ho
+ /* Initialize DSP heartbeat area to ho */
ft1000_write_dpram_mag_16(dev, FT1000_MAG_HI_HO, ho_mag,
FT1000_MAG_HI_HO_INDX);
tempword =
@@ -509,40 +513,44 @@ static int ft1000_reset_card(struct net_device *dev)
ft1000_enable_interrupts(dev);
/* Schedule heartbeat process to run every 2 seconds */
-// poll_timer.expires = jiffies + (2*HZ);
-// poll_timer.data = (u_long)dev;
-// add_timer(&poll_timer);
+ /* poll_timer.expires = jiffies + (2*HZ); */
+ /* poll_timer.data = (u_long)dev; */
+ /* add_timer(&poll_timer); */
return true;
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_chkcard
-// Description: This function will check if the device is presently available on
-// the system.
-// Input:
-// dev - device structure
-// Output:
-// status - false (device is not present)
-// true (device is present)
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_chkcard
+ Description: This function will check if the device is presently available on
+ the system.
+ Input:
+ dev - device structure
+ Output:
+ status - false (device is not present)
+ true (device is present)
+
+ -------------------------------------------------------------------------*/
static int ft1000_chkcard(struct net_device *dev)
{
u16 tempword;
- // Mask register is used to check for device presence since it is never
- // set to zero.
+ /*
+ * Mask register is used to check for device presence since it is never
+ * set to zero.
+ */
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_IMASK);
if (tempword == 0) {
DEBUG(1,
"ft1000_hw:ft1000_chkcard: IMASK = 0 Card not detected\n");
return false;
}
- // The system will return the value of 0xffff for the version register
- // if the device is not present.
+ /*
+ * The system will return the value of 0xffff for the version register
+ * if the device is not present.
+ */
tempword = ft1000_read_reg(dev, FT1000_REG_ASIC_ID);
if (tempword == 0xffff) {
DEBUG(1,
@@ -553,17 +561,17 @@ static int ft1000_chkcard(struct net_device *dev)
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_hbchk
-// Description: This function will perform the heart beat check of the DSP as
-// well as the ASIC.
-// Input:
-// dev - device structure
-// Output:
-// none
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_hbchk
+ Description: This function will perform the heart beat check of the DSP as
+ well as the ASIC.
+ Input:
+ dev - device structure
+ Output:
+ none
+
+ -------------------------------------------------------------------------*/
static void ft1000_hbchk(u_long data)
{
struct net_device *dev = (struct net_device *)data;
@@ -574,7 +582,7 @@ static void ft1000_hbchk(u_long data)
info = netdev_priv(dev);
if (info->CardReady == 1) {
- // Perform dsp heartbeat check
+ /* Perform dsp heartbeat check */
if (info->AsicID == ELECTRABUZZ_ID) {
tempword = ft1000_read_dpram(dev, FT1000_HI_HO);
} else {
@@ -585,7 +593,7 @@ static void ft1000_hbchk(u_long data)
}
DEBUG(1, "ft1000_hw:ft1000_hbchk:hi_ho value = 0x%x\n",
tempword);
- // Let's perform another check if ho is not detected
+ /* Let's perform another check if ho is not detected */
if (tempword != ho) {
if (info->AsicID == ELECTRABUZZ_ID) {
tempword = ft1000_read_dpram(dev, FT1000_HI_HO);
@@ -639,7 +647,7 @@ static void ft1000_hbchk(u_long data)
}
tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
- // Let's check doorbell again if fail
+ /* Let's check doorbell again if fail */
if (tempword & FT1000_DB_HB) {
tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
}
@@ -686,8 +694,10 @@ static void ft1000_hbchk(u_long data)
add_timer(&poll_timer);
return;
}
- // Set dedicated area to hi and ring appropriate doorbell according
- // to hi/ho heartbeat protocol
+ /*
+ * Set dedicated area to hi and ring appropriate doorbell according
+ * to hi/ho heartbeat protocol
+ */
if (info->AsicID == ELECTRABUZZ_ID) {
ft1000_write_dpram(dev, FT1000_HI_HO, hi);
} else {
@@ -703,7 +713,7 @@ static void ft1000_hbchk(u_long data)
(dev, FT1000_MAG_HI_HO,
FT1000_MAG_HI_HO_INDX));
}
- // Let's write hi again if fail
+ /* Let's write hi again if fail */
if (tempword != hi) {
if (info->AsicID == ELECTRABUZZ_ID) {
ft1000_write_dpram(dev, FT1000_HI_HO, hi);
@@ -774,14 +784,14 @@ static void ft1000_hbchk(u_long data)
add_timer(&poll_timer);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_send_cmd
-// Description:
-// Input:
-// Output:
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_send_cmd
+ Description:
+ Input:
+ Output:
+
+ -------------------------------------------------------------------------*/
static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size, u16 qtype)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -790,17 +800,19 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size,
unsigned long flags;
size += sizeof(struct pseudo_hdr);
- // check for odd byte and increment to 16-bit word align value
+ /* check for odd byte and increment to 16-bit word align value */
if ((size & 0x0001)) {
size++;
}
DEBUG(1, "FT1000:ft1000_send_cmd:total length = %d\n", size);
DEBUG(1, "FT1000:ft1000_send_cmd:length = %d\n", ntohs(*ptempbuffer));
- // put message into slow queue area
- // All messages are in the form total_len + pseudo header + message body
+ /*
+ * put message into slow queue area
+ * All messages are in the form total_len + pseudo header + message body
+ */
spin_lock_irqsave(&info->dpram_lock, flags);
- // Make sure SLOWQ doorbell is clear
+ /* Make sure SLOWQ doorbell is clear */
tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
i=0;
while (tempword & FT1000_DB_DPRAM_TX) {
@@ -816,9 +828,9 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size,
if (info->AsicID == ELECTRABUZZ_ID) {
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
FT1000_DPRAM_TX_BASE);
- // Write total length to dpram
+ /* Write total length to dpram */
ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, size);
- // Write pseudo header and messgae body
+ /* Write pseudo header and messgae body */
for (i = 0; i < (size >> 1); i++) {
DEBUG(1, "FT1000:ft1000_send_cmd:data %d = 0x%x\n", i,
*ptempbuffer);
@@ -828,9 +840,9 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size,
} else {
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
FT1000_DPRAM_MAG_TX_BASE);
- // Write total length to dpram
+ /* Write total length to dpram */
ft1000_write_reg(dev, FT1000_REG_MAG_DPDATAH, htons(size));
- // Write pseudo header and messgae body
+ /* Write pseudo header and messgae body */
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
FT1000_DPRAM_MAG_TX_BASE + 1);
for (i = 0; i < (size >> 2); i++) {
@@ -850,23 +862,23 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size,
}
spin_unlock_irqrestore(&info->dpram_lock, flags);
- // ring doorbell to notify DSP that we have a message ready
+ /* ring doorbell to notify DSP that we have a message ready */
ft1000_write_reg(dev, FT1000_REG_DOORBELL, FT1000_DB_DPRAM_TX);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_receive_cmd
-// Description: This function will read a message from the dpram area.
-// Input:
-// dev - network device structure
-// pbuffer - caller supply address to buffer
-// pnxtph - pointer to next pseudo header
-// Output:
-// Status = 0 (unsuccessful)
-// = 1 (successful)
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_receive_cmd
+ Description: This function will read a message from the dpram area.
+ Input:
+ dev - network device structure
+ pbuffer - caller supply address to buffer
+ pnxtph - pointer to next pseudo header
+ Output:
+ Status = 0 (unsuccessful)
+ = 1 (successful)
+
+ -------------------------------------------------------------------------*/
static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
int maxsz, u16 *pnxtph)
{
@@ -919,7 +931,7 @@ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
FT1000_REG_MAG_DPDATAH);
pbuffer++;
}
- //copy odd aligned word
+ /* copy odd aligned word */
*pbuffer = inw(dev->base_addr + FT1000_REG_MAG_DPDATAL);
DEBUG(1, "ft1000_hw:received data = 0x%x\n", *pbuffer);
pbuffer++;
@@ -928,14 +940,16 @@ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
pbuffer++;
}
if (size & 0x0001) {
- //copy odd byte from fifo
+ /* copy odd byte from fifo */
tempword = ft1000_read_reg(dev, FT1000_REG_DPRAM_DATA);
*pbuffer = ntohs(tempword);
}
spin_unlock_irqrestore(&info->dpram_lock, flags);
- // Check if pseudo header checksum is good
- // Calculate pseudo header checksum
+ /*
+ * Check if pseudo header checksum is good
+ * Calculate pseudo header checksum
+ */
tempword = *ppseudohdr++;
for (i = 1; i < 7; i++) {
tempword ^= *ppseudohdr++;
@@ -943,24 +957,24 @@ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
if ((tempword != *ppseudohdr)) {
DEBUG(1,
"FT1000:ft1000_receive_cmd:Pseudo header checksum mismatch\n");
- // Drop this message
+ /* Drop this message */
return false;
}
return true;
}
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_proc_drvmsg
-// Description: This function will process the various driver messages.
-// Input:
-// dev - device structure
-// pnxtph - pointer to next pseudo header
-// Output:
-// none
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_proc_drvmsg
+ Description: This function will process the various driver messages.
+ Input:
+ dev - device structure
+ pnxtph - pointer to next pseudo header
+ Output:
+ none
+
+ -------------------------------------------------------------------------*/
static void ft1000_proc_drvmsg(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -988,7 +1002,7 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
}
if ( ft1000_receive_cmd(dev, &cmdbuffer[0], MAX_CMD_SQSIZE, &tempword) ) {
- // Get the message type which is total_len + PSEUDO header + msgtype + message body
+ /* Get the message type which is total_len + PSEUDO header + msgtype + message body */
pdrvmsg = (struct drv_msg *) & cmdbuffer[0];
msgtype = ntohs(pdrvmsg->type);
DEBUG(1, "Command message type = 0x%x\n", msgtype);
@@ -999,7 +1013,7 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
mdelay(25);
while (list_empty(&info->prov_list) == 0) {
DEBUG(0, "Sending a provisioning message\n");
- // Make sure SLOWQ doorbell is clear
+ /* Make sure SLOWQ doorbell is clear */
tempword =
ft1000_read_reg(dev, FT1000_REG_DOORBELL);
i = 0;
@@ -1018,10 +1032,10 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
pmsg = (u16 *) ptr->pprov_data;
ppseudo_hdr = (struct pseudo_hdr *) pmsg;
- // Insert slow queue sequence number
+ /* Insert slow queue sequence number */
ppseudo_hdr->seq_num = info->squeseqnum++;
ppseudo_hdr->portsrc = 0;
- // Calculate new checksum
+ /* Calculate new checksum */
ppseudo_hdr->checksum = *pmsg++;
DEBUG(1, "checksum = 0x%x\n",
ppseudo_hdr->checksum);
@@ -1036,8 +1050,10 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
kfree(ptr->pprov_data);
kfree(ptr);
}
- // Indicate adapter is ready to take application messages after all
- // provisioning messages are sent
+ /*
+ * Indicate adapter is ready to take application messages after all
+ * provisioning messages are sent
+ */
info->CardReady = 1;
break;
case MEDIA_STATE:
@@ -1118,8 +1134,10 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
break;
case DSP_GET_INFO:
DEBUG(1, "FT1000:drivermsg:Got DSP_GET_INFO\n");
- // copy dsp info block to dsp
- // allow any outstanding ioctl to finish
+ /*
+ * copy dsp info block to dsp
+ * allow any outstanding ioctl to finish
+ */
mdelay(10);
tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX) {
@@ -1132,8 +1150,10 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
}
if ((tempword & FT1000_DB_DPRAM_TX) == 0) {
- // Put message into Slow Queue
- // Form Pseudo header
+ /*
+ * Put message into Slow Queue
+ * Form Pseudo header
+ */
pmsg = (u16 *) info->DSPInfoBlk;
ppseudo_hdr = (struct pseudo_hdr *) pmsg;
ppseudo_hdr->length =
@@ -1147,11 +1167,11 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
ppseudo_hdr->rsvd1 = 0;
ppseudo_hdr->rsvd2 = 0;
ppseudo_hdr->qos_class = 0;
- // Insert slow queue sequence number
+ /* Insert slow queue sequence number */
ppseudo_hdr->seq_num = info->squeseqnum++;
- // Insert application id
+ /* Insert application id */
ppseudo_hdr->portsrc = 0;
- // Calculate new checksum
+ /* Calculate new checksum */
ppseudo_hdr->checksum = *pmsg++;
for (i = 1; i < 7; i++) {
ppseudo_hdr->checksum ^= *pmsg++;
@@ -1165,8 +1185,10 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
break;
case GET_DRV_ERR_RPT_MSG:
DEBUG(1, "FT1000:drivermsg:Got GET_DRV_ERR_RPT_MSG\n");
- // copy driver error message to dsp
- // allow any outstanding ioctl to finish
+ /*
+ * copy driver error message to dsp
+ * allow any outstanding ioctl to finish
+ */
mdelay(10);
tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX) {
@@ -1179,8 +1201,10 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
}
if ((tempword & FT1000_DB_DPRAM_TX) == 0) {
- // Put message into Slow Queue
- // Form Pseudo header
+ /*
+ * Put message into Slow Queue
+ * Form Pseudo header
+ */
pmsg = (u16 *) & tempbuffer[0];
ppseudo_hdr = (struct pseudo_hdr *) pmsg;
ppseudo_hdr->length = htons(0x0012);
@@ -1193,11 +1217,11 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
ppseudo_hdr->rsvd1 = 0;
ppseudo_hdr->rsvd2 = 0;
ppseudo_hdr->qos_class = 0;
- // Insert slow queue sequence number
+ /* Insert slow queue sequence number */
ppseudo_hdr->seq_num = info->squeseqnum++;
- // Insert application id
+ /* Insert application id */
ppseudo_hdr->portsrc = 0;
- // Calculate new checksum
+ /* Calculate new checksum */
ppseudo_hdr->checksum = *pmsg++;
for (i=1; i<7; i++) {
ppseudo_hdr->checksum ^= *pmsg++;
@@ -1228,18 +1252,18 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
}
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_parse_dpram_msg
-// Description: This function will parse the message received from the DSP
-// via the DPRAM interface.
-// Input:
-// dev - device structure
-// Output:
-// status - FAILURE
-// SUCCESS
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_parse_dpram_msg
+ Description: This function will parse the message received from the DSP
+ via the DPRAM interface.
+ Input:
+ dev - device structure
+ Output:
+ status - FAILURE
+ SUCCESS
+
+ -------------------------------------------------------------------------*/
static int ft1000_parse_dpram_msg(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -1255,7 +1279,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
DEBUG(1, "Doorbell = 0x%x\n", doorbell);
if (doorbell & FT1000_ASIC_RESET_REQ) {
- // Copy DSP session record from info block
+ /* Copy DSP session record from info block */
spin_lock_irqsave(&info->dpram_lock, flags);
if (info->AsicID == ELECTRABUZZ_ID) {
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
@@ -1274,7 +1298,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
}
spin_unlock_irqrestore(&info->dpram_lock, flags);
- // clear ASIC RESET request
+ /* clear ASIC RESET request */
ft1000_write_reg(dev, FT1000_REG_DOORBELL,
FT1000_ASIC_RESET_REQ);
DEBUG(1, "Got an ASIC RESET Request\n");
@@ -1282,7 +1306,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
FT1000_ASIC_RESET_DSP);
if (info->AsicID == MAGNEMITE_ID) {
- // Setting MAGNEMITE ASIC to big endian mode
+ /* Setting MAGNEMITE ASIC to big endian mode */
ft1000_write_reg(dev, FT1000_REG_SUP_CTRL,
HOST_INTF_BE);
}
@@ -1315,8 +1339,10 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
if ((total_len < MAX_CMD_SQSIZE) && (total_len > sizeof(struct pseudo_hdr))) {
total_len += nxtph;
cnt = 0;
- // ft1000_read_reg will return a value that needs to be byteswap
- // in order to get DSP_QID_OFFSET.
+ /*
+ * ft1000_read_reg will return a value that needs to be byteswap
+ * in order to get DSP_QID_OFFSET.
+ */
if (info->AsicID == ELECTRABUZZ_ID) {
portid =
(ft1000_read_dpram
@@ -1332,7 +1358,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
DEBUG(1, "DSP_QID = 0x%x\n", portid);
if (portid == DRIVERID) {
- // We are assumming one driver message from the DSP at a time.
+ /* We are assumming one driver message from the DSP at a time. */
ft1000_proc_drvmsg(dev);
}
}
@@ -1340,7 +1366,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
}
if (doorbell & FT1000_DB_COND_RESET) {
- // Reset ASIC and DSP
+ /* Reset ASIC and DSP */
if (info->AsicID == ELECTRABUZZ_ID) {
info->DSP_TIME[0] =
ft1000_read_dpram(dev, FT1000_DSP_TIMER0);
@@ -1370,7 +1396,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
ft1000_write_reg(dev, FT1000_REG_DOORBELL,
FT1000_DB_COND_RESET);
}
- // let's clear any unexpected doorbells from DSP
+ /* let's clear any unexpected doorbells from DSP */
doorbell =
doorbell & ~(FT1000_DB_DPRAM_RX | FT1000_ASIC_RESET_REQ |
FT1000_DB_COND_RESET | 0xff00);
@@ -1383,18 +1409,18 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_flush_fifo
-// Description: This function will flush one packet from the downlink
-// FIFO.
-// Input:
-// dev - device structure
-// drv_err - driver error causing the flush fifo
-// Output:
-// None.
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_flush_fifo
+ Description: This function will flush one packet from the downlink
+ FIFO.
+ Input:
+ dev - device structure
+ drv_err - driver error causing the flush fifo
+ Output:
+ None.
+
+ -------------------------------------------------------------------------*/
static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -1432,7 +1458,7 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
ft1000_reset_card(dev);
return;
} else {
- // Flush corrupted pkt from FIFO
+ /* Flush corrupted pkt from FIFO */
i = 0;
do {
if (info->AsicID == ELECTRABUZZ_ID) {
@@ -1447,8 +1473,10 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
inw(dev->base_addr + FT1000_REG_MAG_DFSR);
}
i++;
- // This should never happen unless the ASIC is broken.
- // We must reset to recover.
+ /*
+ * This should never happen unless the ASIC is broken.
+ * We must reset to recover.
+ */
if ((i > 2048) || (tempword == 0)) {
if (info->AsicID == ELECTRABUZZ_ID) {
info->DSP_TIME[0] =
@@ -1482,17 +1510,19 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
FT1000_MAG_DSP_TIMER3_INDX);
}
if (tempword == 0) {
- // Let's check if ASIC reads are still ok by reading the Mask register
- // which is never zero at this point of the code.
+ /*
+ * Let's check if ASIC reads are still ok by reading the Mask register
+ * which is never zero at this point of the code.
+ */
tempword =
inw(dev->base_addr +
FT1000_REG_SUP_IMASK);
if (tempword == 0) {
- // This indicates that we can not communicate with the ASIC
+ /* This indicates that we can not communicate with the ASIC */
info->DrvErrNum =
FIFO_FLUSH_BADCNT;
} else {
- // Let's assume that we really flush the FIFO
+ /* Let's assume that we really flush the FIFO */
pcmcia->PktIntfErr++;
return;
}
@@ -1506,9 +1536,9 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
if (info->AsicID == ELECTRABUZZ_ID) {
i++;
DEBUG(0, "Flushing FIFO complete = %x\n", tempword);
- // Flush last word in FIFO.
+ /* Flush last word in FIFO. */
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
- // Update FIFO counter for DSP
+ /* Update FIFO counter for DSP */
i = i * 2;
DEBUG(0, "Flush Data byte count to dsp = %d\n", i);
info->fifo_cnt += i;
@@ -1516,7 +1546,7 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
info->fifo_cnt);
} else {
DEBUG(0, "Flushing FIFO complete = %x\n", tempword);
- // Flush last word in FIFO
+ /* Flush last word in FIFO */
templong = inl(dev->base_addr + FT1000_REG_MAG_DFR);
tempword = inw(dev->base_addr + FT1000_REG_SUP_STAT);
DEBUG(0, "FT1000_REG_SUP_STAT = 0x%x\n", tempword);
@@ -1529,19 +1559,19 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
}
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_copy_up_pkt
-// Description: This function will pull Flarion packets out of the Downlink
-// FIFO and convert it to an ethernet packet. The ethernet packet will
-// then be deliver to the TCP/IP stack.
-// Input:
-// dev - device structure
-// Output:
-// status - FAILURE
-// SUCCESS
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_copy_up_pkt
+ Description: This function will pull Flarion packets out of the Downlink
+ FIFO and convert it to an ethernet packet. The ethernet packet will
+ then be deliver to the TCP/IP stack.
+ Input:
+ dev - device structure
+ Output:
+ status - FAILURE
+ SUCCESS
+
+ -------------------------------------------------------------------------*/
static int ft1000_copy_up_pkt(struct net_device *dev)
{
u16 tempword;
@@ -1556,7 +1586,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
u32 templong;
DEBUG(1, "ft1000_copy_up_pkt\n");
- // Read length
+ /* Read length */
if (info->AsicID == ELECTRABUZZ_ID) {
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
len = tempword;
@@ -1570,7 +1600,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
if (len > ENET_MAX_SIZE) {
DEBUG(0, "size of ethernet packet invalid\n");
if (info->AsicID == MAGNEMITE_ID) {
- // Read High word to complete 32 bit access
+ /* Read High word to complete 32 bit access */
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
}
ft1000_flush_fifo(dev, DSP_PKTLEN_INFO);
@@ -1582,7 +1612,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
if (skb == NULL) {
DEBUG(0, "No Network buffers available\n");
- // Read High word to complete 32 bit access
+ /* Read High word to complete 32 bit access */
if (info->AsicID == MAGNEMITE_ID) {
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
}
@@ -1592,13 +1622,13 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
}
pbuffer = (u8 *) skb_put(skb, len + 12);
- // Pseudo header
+ /* Pseudo header */
if (info->AsicID == ELECTRABUZZ_ID) {
for (i = 1; i < 7; i++) {
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
chksum ^= tempword;
}
- // read checksum value
+ /* read checksum value */
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
} else {
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
@@ -1625,7 +1655,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
DEBUG(1, "Pseudo = 0x%x\n", tempword);
chksum ^= tempword;
- // read checksum value
+ /* read checksum value */
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
DEBUG(1, "Pseudo = 0x%x\n", tempword);
}
@@ -1638,10 +1668,10 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
kfree_skb(skb);
return FAILURE;
}
- //subtract the number of bytes read already
+ /* subtract the number of bytes read already */
ptemp = pbuffer;
- // fake MAC address
+ /* fake MAC address */
*pbuffer++ = dev->dev_addr[0];
*pbuffer++ = dev->dev_addr[1];
*pbuffer++ = dev->dev_addr[2];
@@ -1666,7 +1696,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
}
}
- // Need to read one more word if odd byte
+ /* Need to read one more word if odd byte */
if (len & 0x0001) {
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
*pbuffer++ = (u8) (tempword >> 8);
@@ -1679,7 +1709,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
*ptemplong++ = templong;
}
- // Need to read one more word if odd align.
+ /* Need to read one more word if odd align. */
if (len & 0x0003) {
templong = inl(dev->base_addr + FT1000_REG_MAG_DFR);
DEBUG(1, "Data = 0x%8x\n", templong);
@@ -1699,11 +1729,11 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
netif_rx(skb);
info->stats.rx_packets++;
- // Add on 12 bytes for MAC address which was removed
+ /* Add on 12 bytes for MAC address which was removed */
info->stats.rx_bytes += (len + 12);
if (info->AsicID == ELECTRABUZZ_ID) {
- // track how many bytes have been read from FIFO - round up to 16 bit word
+ /* track how many bytes have been read from FIFO - round up to 16 bit word */
tempword = len + 16;
if (tempword & 0x01)
tempword++;
@@ -1715,21 +1745,21 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
return SUCCESS;
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_copy_down_pkt
-// Description: This function will take an ethernet packet and convert it to
-// a Flarion packet prior to sending it to the ASIC Downlink
-// FIFO.
-// Input:
-// dev - device structure
-// packet - address of ethernet packet
-// len - length of IP packet
-// Output:
-// status - FAILURE
-// SUCCESS
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+
+ Function: ft1000_copy_down_pkt
+ Description: This function will take an ethernet packet and convert it to
+ a Flarion packet prior to sending it to the ASIC Downlink
+ FIFO.
+ Input:
+ dev - device structure
+ packet - address of ethernet packet
+ len - length of IP packet
+ Output:
+ status - FAILURE
+ SUCCESS
+
+ -------------------------------------------------------------------------*/
static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -1744,7 +1774,7 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
DEBUG(1, "ft1000_hw: copy_down_pkt()\n");
- // Check if there is room on the FIFO
+ /* Check if there is room on the FIFO */
if (len > ft1000_read_fifo_len(dev)) {
udelay(10);
if (len > ft1000_read_fifo_len(dev)) {
@@ -1769,15 +1799,15 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
return SUCCESS;
}
}
- // Create pseudo header and send pseudo/ip to hardware
+ /* Create pseudo header and send pseudo/ip to hardware */
if (info->AsicID == ELECTRABUZZ_ID) {
pseudo.blk.length = len;
} else {
pseudo.blk.length = ntohs(len);
}
- pseudo.blk.source = DSPID; // Need to swap to get in correct order
+ pseudo.blk.source = DSPID; /* Need to swap to get in correct order */
pseudo.blk.destination = HOSTID;
- pseudo.blk.portdest = NETWORKID; // Need to swap to get in correct order
+ pseudo.blk.portdest = NETWORKID; /* Need to swap to get in correct order */
pseudo.blk.portsrc = DSPAIRID;
pseudo.blk.sh_str_id = 0;
pseudo.blk.control = 0;
@@ -1791,14 +1821,14 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
pseudo.blk.checksum ^= pseudo.buff[i];
}
- // Production Mode
+ /* Production Mode */
if (info->AsicID == ELECTRABUZZ_ID) {
- // copy first word to UFIFO_BEG reg
+ /* copy first word to UFIFO_BEG reg */
ft1000_write_reg(dev, FT1000_REG_UFIFO_BEG, pseudo.buff[0]);
DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 0 BEG = 0x%04x\n",
pseudo.buff[0]);
- // copy subsequent words to UFIFO_MID reg
+ /* copy subsequent words to UFIFO_MID reg */
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[1]);
DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 1 MID = 0x%04x\n",
pseudo.buff[1]);
@@ -1821,7 +1851,7 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 7 MID = 0x%04x\n",
pseudo.buff[7]);
- // Write PPP type + IP Packet into Downlink FIFO
+ /* Write PPP type + IP Packet into Downlink FIFO */
for (i = 0; i < (len >> 1) - 1; i++) {
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID,
htons(*packet));
@@ -1831,7 +1861,7 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
packet++;
}
- // Check for odd byte
+ /* Check for odd byte */
if (len & 0x0001) {
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID,
htons(*packet));
@@ -1870,12 +1900,12 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
*(u32 *) & pseudo.buff[6]);
plong = (u32 *) packet;
- // Write PPP type + IP Packet into Downlink FIFO
+ /* Write PPP type + IP Packet into Downlink FIFO */
for (i = 0; i < (len >> 2); i++) {
outl(*plong++, dev->base_addr + FT1000_REG_MAG_UFDR);
}
- // Check for odd alignment
+ /* Check for odd alignment */
if (len & 0x0003) {
DEBUG(1,
"ft1000_hw:ft1000_copy_down_pkt:data = 0x%8x\n",
@@ -1886,7 +1916,7 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
}
info->stats.tx_packets++;
- // Add 14 bytes for MAC address plus ethernet type
+ /* Add 14 bytes for MAC address plus ethernet type */
info->stats.tx_bytes += (len + 14);
return SUCCESS;
}
@@ -1931,7 +1961,7 @@ static int ft1000_close(struct net_device *dev)
ft1000_disable_interrupts(dev);
ft1000_write_reg(dev, FT1000_REG_RESET, DSP_RESET_BIT);
- //reset ASIC
+ /* reset ASIC */
ft1000_reset_asic(dev);
}
return 0;
@@ -1995,10 +2025,10 @@ static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
ft1000_disable_interrupts(dev);
- // Read interrupt type
+ /* Read interrupt type */
inttype = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
- // Make sure we process all interrupt before leaving the ISR due to the edge trigger interrupt type
+ /* Make sure we process all interrupt before leaving the ISR due to the edge trigger interrupt type */
while (inttype) {
if (inttype & ISR_DOORBELL_PEND)
ft1000_parse_dpram_msg(dev);
@@ -2008,7 +2038,7 @@ static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
cnt = 0;
do {
- // Check if we have packets in the Downlink FIFO
+ /* Check if we have packets in the Downlink FIFO */
if (info->AsicID == ELECTRABUZZ_ID) {
tempword =
ft1000_read_reg(dev,
@@ -2027,12 +2057,12 @@ static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
} while (cnt < MAX_RCV_LOOP);
}
- // clear interrupts
+ /* clear interrupts */
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
DEBUG(1, "ft1000_hw: interrupt status register = 0x%x\n", tempword);
ft1000_write_reg(dev, FT1000_REG_SUP_ISR, tempword);
- // Read interrupt type
+ /* Read interrupt type */
inttype = ft1000_read_reg (dev, FT1000_REG_SUP_ISR);
DEBUG(1,"ft1000_hw: interrupt status register after clear = 0x%x\n",inttype);
}
@@ -2044,7 +2074,7 @@ void stop_ft1000_card(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
struct prov_record *ptr;
-// int cnt;
+ /* int cnt; */
DEBUG(0, "ft1000_hw: stop_ft1000_card()\n");
@@ -2053,7 +2083,7 @@ void stop_ft1000_card(struct net_device *dev)
netif_stop_queue(dev);
ft1000_disable_interrupts(dev);
- // Make sure we free any memory reserve for provisioning
+ /* Make sure we free any memory reserve for provisioning */
while (list_empty(&info->prov_list) == 0) {
ptr = list_entry(info->prov_list.next, struct prov_record, list);
list_del(&ptr->list);
@@ -2109,7 +2139,7 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
struct ft1000_pcmcia *pcmcia;
struct net_device *dev;
- static const struct net_device_ops ft1000ops = // Slavius 21.10.2009 due to kernel changes
+ static const struct net_device_ops ft1000ops = /* Slavius 21.10.2009 due to kernel changes */
{
.ndo_open = &ft1000_open,
.ndo_stop = &ft1000_close,
@@ -2169,12 +2199,12 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
info->squeseqnum = 0;
-// dev->hard_start_xmit = &ft1000_start_xmit;
-// dev->get_stats = &ft1000_stats;
-// dev->open = &ft1000_open;
-// dev->stop = &ft1000_close;
+ /* dev->hard_start_xmit = &ft1000_start_xmit; */
+ /* dev->get_stats = &ft1000_stats; */
+ /* dev->open = &ft1000_open; */
+ /* dev->stop = &ft1000_close; */
- dev->netdev_ops = &ft1000ops; // Slavius 21.10.2009 due to kernel changes
+ dev->netdev_ops = &ft1000ops; /* Slavius 21.10.2009 due to kernel changes */
DEBUG(0, "device name = %s\n", dev->name);
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
index 68a55ce69200..ffdc7f597a96 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
@@ -560,6 +560,8 @@ static long ft1000_ioctl(struct file *file, unsigned int command,
/* Get the length field to see how many bytes to copy */
result = get_user(msgsz, (__u16 __user *)argp);
+ if (result)
+ break;
msgsz = ntohs(msgsz);
/* DEBUG("FT1000:ft1000_ioctl: length of message = %d\n", msgsz); */
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
index 12f333fa59b5..cab9cdf6273e 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
@@ -4,7 +4,6 @@
* This file is part of Express Card USB Driver
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -55,7 +54,7 @@
#define MAX_LENGTH 0x7f0
-// Temporary download mechanism for Magnemite
+/* Temporary download mechanism for Magnemite */
#define DWNLD_MAG_TYPE_LOC 0x00
#define DWNLD_MAG_LEN_LOC 0x01
#define DWNLD_MAG_ADDR_LOC 0x02
@@ -74,36 +73,36 @@
#define HANDSHAKE_MAG_TIMEOUT_VALUE 0xF1F1
-// New Magnemite downloader
+/* New Magnemite downloader */
#define DWNLD_MAG1_HANDSHAKE_LOC 0x00
#define DWNLD_MAG1_TYPE_LOC 0x01
#define DWNLD_MAG1_SIZE_LOC 0x02
#define DWNLD_MAG1_PS_HDR_LOC 0x03
struct dsp_file_hdr {
- long version_id; // Version ID of this image format.
- long package_id; // Package ID of code release.
- long build_date; // Date/time stamp when file was built.
- long commands_offset; // Offset to attached commands in Pseudo Hdr format.
- long loader_offset; // Offset to bootloader code.
- long loader_code_address; // Start address of bootloader.
- long loader_code_end; // Where bootloader code ends.
- long loader_code_size;
- long version_data_offset; // Offset were scrambled version data begins.
- long version_data_size; // Size, in words, of scrambled version data.
- long nDspImages; // Number of DSP images in file.
+ long version_id; /* Version ID of this image format. */
+ long package_id; /* Package ID of code release. */
+ long build_date; /* Date/time stamp when file was built. */
+ long commands_offset; /* Offset to attached commands in Pseudo Hdr format. */
+ long loader_offset; /* Offset to bootloader code. */
+ long loader_code_address; /* Start address of bootloader. */
+ long loader_code_end; /* Where bootloader code ends. */
+ long loader_code_size;
+ long version_data_offset; /* Offset were scrambled version data begins. */
+ long version_data_size; /* Size, in words, of scrambled version data. */
+ long nDspImages; /* Number of DSP images in file. */
};
#pragma pack(1)
struct dsp_image_info {
- long coff_date; // Date/time when DSP Coff image was built.
- long begin_offset; // Offset in file where image begins.
- long end_offset; // Offset in file where image begins.
- long run_address; // On chip Start address of DSP code.
- long image_size; // Size of image.
- long version; // Embedded version # of DSP code.
- unsigned short checksum; // DSP File checksum
- unsigned short pad1;
+ long coff_date; /* Date/time when DSP Coff image was built. */
+ long begin_offset; /* Offset in file where image begins. */
+ long end_offset; /* Offset in file where image begins. */
+ long run_address; /* On chip Start address of DSP code. */
+ long image_size; /* Size of image. */
+ long version; /* Embedded version # of DSP code. */
+ unsigned short checksum; /* DSP File checksum */
+ unsigned short pad1;
};
@@ -151,7 +150,7 @@ static int check_usb_db(struct ft1000_usb *ft1000dev)
}
}
- return HANDSHAKE_MAG_TIMEOUT_VALUE;
+ return -1;
}
/* gets the handshake and compares it with the expected value */
@@ -172,9 +171,8 @@ static u16 get_handshake(struct ft1000_usb *ft1000dev, u16 expected_value)
ft1000dev->fcodeldr);
ft1000dev->fcodeldr = 0;
status = check_usb_db(ft1000dev);
- if (status != STATUS_SUCCESS) {
+ if (status != 0) {
DEBUG("get_handshake: check_usb_db failed\n");
- status = STATUS_FAILURE;
break;
}
status = ft1000_write_register(ft1000dev,
@@ -202,7 +200,7 @@ static u16 get_handshake(struct ft1000_usb *ft1000dev, u16 expected_value)
}
/* write the handshake value to the handshake location */
-static void put_handshake(struct ft1000_usb *ft1000dev,u16 handshake_value)
+static void put_handshake(struct ft1000_usb *ft1000dev, u16 handshake_value)
{
u32 tempx;
u16 tempword;
@@ -268,11 +266,12 @@ static u16 get_handshake_usb(struct ft1000_usb *ft1000dev, u16 expected_value)
return HANDSHAKE_TIMEOUT_VALUE;
}
-static void put_handshake_usb(struct ft1000_usb *ft1000dev,u16 handshake_value)
+static void put_handshake_usb(struct ft1000_usb *ft1000dev, u16 handshake_value)
{
int i;
- for (i=0; i<1000; i++);
+ for (i = 0; i < 1000; i++)
+ ;
}
static u16 get_request_type(struct ft1000_usb *ft1000dev)
@@ -450,7 +449,7 @@ static int write_dpram32_and_check(struct ft1000_usb *ft1000dev,
static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile,
long word_length)
{
- int status = STATUS_SUCCESS;
+ int status = 0;
u16 dpram;
int loopcnt, i;
u16 tempword;
@@ -499,7 +498,7 @@ static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile,
} else {
status = write_dpram32_and_check(ft1000dev, tempbuffer,
dpram);
- if (status != STATUS_SUCCESS) {
+ if (status != 0) {
DEBUG("FT1000:download:Write failed tempbuffer[31] = 0x%x\n", tempbuffer[31]);
break;
}
@@ -509,9 +508,9 @@ static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile,
return status;
}
-static void usb_dnld_complete (struct urb *urb)
+static void usb_dnld_complete(struct urb *urb)
{
- //DEBUG("****** usb_dnld_complete\n");
+ /* DEBUG("****** usb_dnld_complete\n"); */
}
/* writes a block of DSP image to DPRAM
@@ -523,7 +522,7 @@ static void usb_dnld_complete (struct urb *urb)
static int write_blk_fifo(struct ft1000_usb *ft1000dev, u16 **pUsFile,
u8 **pUcFile, long word_length)
{
- int Status = STATUS_SUCCESS;
+ int Status = 0;
int byte_length;
byte_length = word_length * 4;
@@ -586,12 +585,12 @@ static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file,
/*NdisMSleep (100); */
if (word_length > MAX_LENGTH) {
DEBUG("FT1000:download:Download error: Max length exceeded\n");
- return STATUS_FAILURE;
+ return -1;
}
if ((word_length * 2 + (long)c_file) > (long)endpoint) {
/* Error, beyond boot code range.*/
DEBUG("FT1000:download:Download error: Requested len=%d exceeds BOOT code boundary.\n", (int)word_length);
- return STATUS_FAILURE;
+ return -1;
}
if (word_length & 0x1)
word_length++;
@@ -601,11 +600,11 @@ static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file,
status = write_blk(ft1000dev, s_file, c_file, word_length);
/*DEBUG("write_blk returned %d\n", status); */
} else {
- write_blk_fifo(ft1000dev, s_file, c_file, word_length);
+ status = write_blk_fifo(ft1000dev, s_file, c_file, word_length);
if (ft1000dev->usbboot == 0)
ft1000dev->usbboot++;
if (ft1000dev->usbboot == 1)
- ft1000_write_dpram16(ft1000dev,
+ status |= ft1000_write_dpram16(ft1000dev,
DWNLD_MAG1_PS_HDR_LOC, 0, 0);
}
return status;
@@ -615,7 +614,7 @@ static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file,
int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
u32 FileLength)
{
- int status = STATUS_SUCCESS;
+ int status = 0;
u32 state;
u16 handshake;
struct pseudo_hdr *pseudo_header;
@@ -651,9 +650,9 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
ft1000dev->usbboot = 0;
ft1000dev->dspalive = 0xffff;
- //
- // Get version id of file, at first 4 bytes of file, for newer files.
- //
+ /*
+ * Get version id of file, at first 4 bytes of file, for newer files.
+ */
state = STATE_START_DWNLD;
@@ -670,7 +669,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
loader_code_size = file_hdr->loader_code_size;
correct_version = false;
- while ((status == STATUS_SUCCESS) && (state != STATE_DONE_FILE)) {
+ while ((status == 0) && (state != STATE_DONE_FILE)) {
switch (state) {
case STATE_START_DWNLD:
status = scram_start_dwnld(ft1000dev, &handshake,
@@ -702,8 +701,8 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
/* Reposition ptrs to beginning of code section */
s_file = (u16 *) (boot_end);
c_file = (u8 *) (boot_end);
- //DEBUG("FT1000:download:s_file = 0x%8x\n", (int)s_file);
- //DEBUG("FT1000:download:c_file = 0x%8x\n", (int)c_file);
+ /* DEBUG("FT1000:download:s_file = 0x%8x\n", (int)s_file); */
+ /* DEBUG("FT1000:download:c_file = 0x%8x\n", (int)c_file); */
state = STATE_CODE_DWNLD;
ft1000dev->fcodeldr = 1;
break;
@@ -717,7 +716,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
DEBUG
("FT1000:download:Download error: Bad request type=%d in BOOT download state.\n",
request);
- status = STATUS_FAILURE;
+ status = -1;
break;
}
if (ft1000dev->usbboot)
@@ -729,13 +728,13 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
} else {
DEBUG
("FT1000:download:Download error: Handshake failed\n");
- status = STATUS_FAILURE;
+ status = -1;
}
break;
case STATE_CODE_DWNLD:
- //DEBUG("FT1000:STATE_CODE_DWNLD\n");
+ /* DEBUG("FT1000:STATE_CODE_DWNLD\n"); */
ft1000dev->bootmode = 0;
if (ft1000dev->usbboot)
handshake =
@@ -773,7 +772,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
} else {
DEBUG
("FT1000:download:Download error: Got Run address request before image offset request.\n");
- status = STATUS_FAILURE;
+ status = -1;
break;
}
break;
@@ -789,7 +788,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
} else {
DEBUG
("FT1000:download:Download error: Got Size request before image offset request.\n");
- status = STATUS_FAILURE;
+ status = -1;
break;
}
break;
@@ -805,11 +804,11 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
state = STATE_DONE_DWNLD;
break;
case REQUEST_CODE_SEGMENT:
- //DEBUG("FT1000:download: REQUEST_CODE_SEGMENT - CODELOADER\n");
+ /* DEBUG("FT1000:download: REQUEST_CODE_SEGMENT - CODELOADER\n"); */
if (!correct_version) {
DEBUG
("FT1000:download:Download error: Got Code Segment request before image offset request.\n");
- status = STATUS_FAILURE;
+ status = -1;
break;
}
@@ -823,7 +822,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
case REQUEST_MAILBOX_DATA:
DEBUG
("FT1000:download: REQUEST_MAILBOX_DATA\n");
- // Convert length from byte count to word count. Make sure we round up.
+ /* Convert length from byte count to word count. Make sure we round up. */
word_length =
(long)(pft1000info->DSPInfoBlklen +
1) / 2;
@@ -836,7 +835,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
* Position ASIC DPRAM auto-increment pointer.
*/
- data = (u16 *) & mailbox_data->data[0];
+ data = (u16 *) &mailbox_data->data[0];
dpram = (u16) DWNLD_MAG1_PS_HDR_LOC;
if (word_length & 0x1)
word_length++;
@@ -850,7 +849,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
status =
fix_ft1000_write_dpram32
(ft1000dev, dpram++,
- (u8 *) & templong);
+ (u8 *) &templong);
}
break;
@@ -939,7 +938,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
}
dsp_img_info++;
- } //end of for
+ } /* end of for */
if (!correct_version) {
/*
@@ -948,7 +947,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
DEBUG
("FT1000:download:Download error: Bad Version Request = 0x%x.\n",
(int)requested_version);
- status = STATUS_FAILURE;
+ status = -1;
break;
}
break;
@@ -957,7 +956,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
DEBUG
("FT1000:download:Download error: Bad request type=%d in CODE download state.\n",
request);
- status = STATUS_FAILURE;
+ status = -1;
break;
}
if (ft1000dev->usbboot)
@@ -969,7 +968,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
} else {
DEBUG
("FT1000:download:Download error: Handshake failed\n");
- status = STATUS_FAILURE;
+ status = -1;
}
break;
@@ -1002,7 +1001,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
(u32) (pseudo_header_len +
sizeof(struct
pseudo_hdr)));
- // link provisioning data
+ /* link provisioning data */
pprov_record =
kmalloc(sizeof(struct prov_record),
GFP_ATOMIC);
@@ -1013,7 +1012,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
list,
&pft1000info->
prov_list);
- // Move to next entry if available
+ /* Move to next entry if available */
c_file =
(u8 *) ((unsigned long)
c_file +
@@ -1026,14 +1025,14 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
}
} else {
kfree(pbuffer);
- status = STATUS_FAILURE;
+ status = -1;
}
} else {
- status = STATUS_FAILURE;
+ status = -1;
}
} else {
/* Checksum did not compute */
- status = STATUS_FAILURE;
+ status = -1;
}
DEBUG
("ft1000:download: after STATE_SECTION_PROV, state = %d, status= %d\n",
@@ -1046,23 +1045,23 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
break;
default:
- status = STATUS_FAILURE;
+ status = -1;
break;
} /* End Switch */
- if (status != STATUS_SUCCESS)
+ if (status != 0)
break;
/****
// Check if Card is present
status = Harley_Read_Register(&temp, FT1000_REG_SUP_IMASK);
if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0x0000) ) {
- break;
+ break;
}
status = Harley_Read_Register(&temp, FT1000_REG_ASIC_ID);
if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0xffff) ) {
- break;
+ break;
}
****/
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index 0d4931b2c2e2..a433e33049b5 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -1,12 +1,9 @@
-//=====================================================
-// CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved.
-//
-//
-// This file is part of Express Card USB Driver
-//
-// $Id:
-//====================================================
-#include <linux/init.h>
+/* CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved.
+*
+*
+* This file is part of Express Card USB Driver
+*/
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -27,7 +24,9 @@
#define HARLEY_READ_OPERATION 0xc1
#define HARLEY_WRITE_OPERATION 0x41
-//#define JDEBUG
+#if 0
+#define JDEBUG
+#endif
static int ft1000_submit_rx_urb(struct ft1000_info *info);
@@ -35,32 +34,22 @@ static u8 tempbuffer[1600];
#define MAX_RCV_LOOP 100
-//---------------------------------------------------------------------------
-// Function: ft1000_control
-//
-// Parameters: ft1000_usb - device structure
-// pipe - usb control message pipe
-// request - control request
-// requesttype - control message request type
-// value - value to be written or 0
-// index - register index
-// data - data buffer to hold the read/write values
-// size - data size
-// timeout - control message time out value
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function sends a control message via USB interface synchronously
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* send a control message via USB interface synchronously
+* Parameters: ft1000_usb - device structure
+* pipe - usb control message pipe
+* request - control request
+* requesttype - control message request type
+* value - value to be written or 0
+* index - register index
+* data - data buffer to hold the read/write values
+* size - data size
+* timeout - control message time out value
+*/
static int ft1000_control(struct ft1000_usb *ft1000dev, unsigned int pipe,
u8 request, u8 requesttype, u16 value, u16 index,
void *data, u16 size, int timeout)
{
- u16 ret;
+ int ret;
if ((ft1000dev == NULL) || (ft1000dev->dev == NULL)) {
DEBUG("ft1000dev or ft1000dev->dev == NULL, failure\n");
@@ -76,26 +65,11 @@ static int ft1000_control(struct ft1000_usb *ft1000dev, unsigned int pipe,
return ret;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_read_register
-//
-// Parameters: ft1000_usb - device structure
-// Data - data buffer to hold the value read
-// nRegIndex - register index
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function returns the value in a register
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-
-int ft1000_read_register(struct ft1000_usb *ft1000dev, u16* Data,
+/* returns the value in a register */
+int ft1000_read_register(struct ft1000_usb *ft1000dev, u16 *Data,
u16 nRegIndx)
{
- int ret = STATUS_SUCCESS;
+ int ret = 0;
ret = ft1000_control(ft1000dev,
usb_rcvctrlpipe(ft1000dev->dev, 0),
@@ -110,25 +84,11 @@ int ft1000_read_register(struct ft1000_usb *ft1000dev, u16* Data,
return ret;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_write_register
-//
-// Parameters: ft1000_usb - device structure
-// value - value to write into a register
-// nRegIndex - register index
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function writes the value in a register
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* writes the value in a register */
int ft1000_write_register(struct ft1000_usb *ft1000dev, u16 value,
u16 nRegIndx)
{
- int ret = STATUS_SUCCESS;
+ int ret = 0;
ret = ft1000_control(ft1000dev,
usb_sndctrlpipe(ft1000dev->dev, 0),
@@ -143,27 +103,11 @@ int ft1000_write_register(struct ft1000_usb *ft1000dev, u16 value,
return ret;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_read_dpram32
-//
-// Parameters: ft1000_usb - device structure
-// indx - starting address to read
-// buffer - data buffer to hold the data read
-// cnt - number of byte read from DPRAM
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function read a number of bytes from DPRAM
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-
+/* read a number of bytes from DPRAM */
int ft1000_read_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
u16 cnt)
{
- int ret = STATUS_SUCCESS;
+ int ret = 0;
ret = ft1000_control(ft1000dev,
usb_rcvctrlpipe(ft1000dev->dev, 0),
@@ -178,26 +122,11 @@ int ft1000_read_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
return ret;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_write_dpram32
-//
-// Parameters: ft1000_usb - device structure
-// indx - starting address to write the data
-// buffer - data buffer to write into DPRAM
-// cnt - number of bytes to write
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function writes into DPRAM a number of bytes
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* writes into DPRAM a number of bytes */
int ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
u16 cnt)
{
- int ret = STATUS_SUCCESS;
+ int ret = 0;
if (cnt % 4)
cnt += cnt - (cnt % 4);
@@ -215,26 +144,11 @@ int ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
return ret;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_read_dpram16
-//
-// Parameters: ft1000_usb - device structure
-// indx - starting address to read
-// buffer - data buffer to hold the data read
-// hightlow - high or low 16 bit word
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function read 16 bits from DPRAM
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* read 16 bits from DPRAM */
int ft1000_read_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
u8 highlow)
{
- int ret = STATUS_SUCCESS;
+ int ret = 0;
u8 request;
if (highlow == 0)
@@ -255,25 +169,11 @@ int ft1000_read_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
return ret;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_write_dpram16
-//
-// Parameters: ft1000_usb - device structure
-// indx - starting address to write the data
-// value - 16bits value to write
-// hightlow - high or low 16 bit word
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function writes into DPRAM a number of bytes
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-int ft1000_write_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u16 value, u8 highlow)
+/* write into DPRAM a number of bytes */
+int ft1000_write_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u16 value,
+ u8 highlow)
{
- int ret = STATUS_SUCCESS;
+ int ret = 0;
u8 request;
if (highlow == 0)
@@ -294,33 +194,18 @@ int ft1000_write_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u16 value, u8 h
return ret;
}
-//---------------------------------------------------------------------------
-// Function: fix_ft1000_read_dpram32
-//
-// Parameters: ft1000_usb - device structure
-// indx - starting address to read
-// buffer - data buffer to hold the data read
-//
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function read DPRAM 4 words at a time
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* read DPRAM 4 words at a time */
int fix_ft1000_read_dpram32(struct ft1000_usb *ft1000dev, u16 indx,
u8 *buffer)
{
u8 buf[16];
u16 pos;
- int ret = STATUS_SUCCESS;
+ int ret = 0;
pos = (indx / 4) * 4;
ret = ft1000_read_dpram32(ft1000dev, pos, buf, 16);
- if (ret == STATUS_SUCCESS) {
+ if (ret == 0) {
pos = (indx % 4) * 4;
*buffer++ = buf[pos++];
*buffer++ = buf[pos++];
@@ -338,22 +223,7 @@ int fix_ft1000_read_dpram32(struct ft1000_usb *ft1000dev, u16 indx,
}
-//---------------------------------------------------------------------------
-// Function: fix_ft1000_write_dpram32
-//
-// Parameters: ft1000_usb - device structure
-// indx - starting address to write
-// buffer - data buffer to write
-//
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function write to DPRAM 4 words at a time
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* Description: This function write to DPRAM 4 words at a time */
int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer)
{
u16 pos1;
@@ -362,13 +232,13 @@ int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer)
u8 buf[32];
u8 resultbuffer[32];
u8 *pdata;
- int ret = STATUS_SUCCESS;
+ int ret = 0;
pos1 = (indx / 4) * 4;
pdata = buffer;
ret = ft1000_read_dpram32(ft1000dev, pos1, buf, 16);
- if (ret == STATUS_SUCCESS) {
+ if (ret == 0) {
pos2 = (indx % 4)*4;
buf[pos2++] = *buffer++;
buf[pos2++] = *buffer++;
@@ -382,24 +252,24 @@ int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer)
ret = ft1000_read_dpram32(ft1000dev, pos1, (u8 *)&resultbuffer[0], 16);
- if (ret == STATUS_SUCCESS) {
+ if (ret == 0) {
buffer = pdata;
for (i = 0; i < 16; i++) {
if (buf[i] != resultbuffer[i])
- ret = STATUS_FAILURE;
+ ret = -1;
}
}
- if (ret == STATUS_FAILURE) {
+ if (ret == -1) {
ret = ft1000_write_dpram32(ft1000dev, pos1,
(u8 *)&tempbuffer[0], 16);
ret = ft1000_read_dpram32(ft1000dev, pos1,
(u8 *)&resultbuffer[0], 16);
- if (ret == STATUS_SUCCESS) {
+ if (ret == 0) {
buffer = pdata;
for (i = 0; i < 16; i++) {
if (tempbuffer[i] != resultbuffer[i]) {
- ret = STATUS_FAILURE;
+ ret = -1;
DEBUG("%s Failed to write\n",
__func__);
}
@@ -410,20 +280,10 @@ int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer)
return ret;
}
-
-//------------------------------------------------------------------------
-//
-// Function: card_reset_dsp
-//
-// Synopsis: This function is called to reset or activate the DSP
-//
-// Arguments: value - reset or activate
-//
-// Returns: None
-//-----------------------------------------------------------------------
+/* reset or activate the DSP */
static void card_reset_dsp(struct ft1000_usb *ft1000dev, bool value)
{
- u16 status = STATUS_SUCCESS;
+ int status = 0;
u16 tempword;
status = ft1000_write_register(ft1000dev, HOST_INTF_BE,
@@ -457,21 +317,11 @@ static void card_reset_dsp(struct ft1000_usb *ft1000dev, bool value)
}
}
-//---------------------------------------------------------------------------
-// Function: card_send_command
-//
-// Parameters: ft1000_usb - device structure
-// ptempbuffer - command buffer
-// size - command buffer size
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function sends a command to ASIC
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* send a command to ASIC
+* Parameters: ft1000_usb - device structure
+* ptempbuffer - command buffer
+* size - command buffer size
+*/
void card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer,
int size)
{
@@ -486,7 +336,7 @@ void card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer,
ft1000_read_register(ft1000dev, &temp, FT1000_REG_DOORBELL);
if (temp & 0x0100)
- msleep(10);
+ usleep_range(900, 1100);
/* check for odd word */
size = size + 2;
@@ -496,29 +346,21 @@ void card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer,
size += 4 - (size % 4);
ft1000_write_dpram32(ft1000dev, 0, commandbuf, size);
- msleep(1);
+ usleep_range(900, 1100);
ft1000_write_register(ft1000dev, FT1000_DB_DPRAM_TX,
FT1000_REG_DOORBELL);
- msleep(1);
+ usleep_range(900, 1100);
ft1000_read_register(ft1000dev, &temp, FT1000_REG_DOORBELL);
- if ((temp & 0x0100) == 0) {
- //DEBUG("card_send_command: Message sent\n");
- }
+#if 0
+ if ((temp & 0x0100) == 0)
+ DEBUG("card_send_command: Message sent\n");
+#endif
}
-//--------------------------------------------------------------------------
-//
-// Function: dsp_reload
-//
-// Synopsis: This function is called to load or reload the DSP
-//
-// Arguments: ft1000dev - device structure
-//
-// Returns: None
-//-----------------------------------------------------------------------
+/* load or reload the DSP */
int dsp_reload(struct ft1000_usb *ft1000dev)
{
int status;
@@ -559,7 +401,7 @@ int dsp_reload(struct ft1000_usb *ft1000dev)
/* call codeloader */
status = scram_dnldr(ft1000dev, pFileStart, FileLength);
- if (status != STATUS_SUCCESS)
+ if (status != 0)
return -EIO;
msleep(1000);
@@ -569,17 +411,7 @@ int dsp_reload(struct ft1000_usb *ft1000dev)
return 0;
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_reset_asic
-// Description: This function will call the Card Service function to reset the
-// ASIC.
-// Input:
-// dev - device structure
-// Output:
-// none
-//
-//---------------------------------------------------------------------------
+/* call the Card Service function to reset the ASIC. */
static void ft1000_reset_asic(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -607,18 +439,6 @@ static void ft1000_reset_asic(struct net_device *dev)
DEBUG("ft1000_hw: interrupt status register = 0x%x\n", tempword);
}
-
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_reset_card
-// Description: This function will reset the card
-// Input:
-// dev - device structure
-// Output:
-// status - FALSE (card reset fail)
-// TRUE (card reset successful)
-//
-//---------------------------------------------------------------------------
static int ft1000_reset_card(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
@@ -666,19 +486,7 @@ static int ft1000_reset_card(struct net_device *dev)
return TRUE;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_usb_transmit_complete
-//
-// Parameters: urb - transmitted usb urb
-//
-//
-// Returns: none
-//
-// Description: This is the callback function when a urb is transmitted
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* callback function when a urb is transmitted */
static void ft1000_usb_transmit_complete(struct urb *urb)
{
@@ -690,22 +498,10 @@ static void ft1000_usb_transmit_complete(struct urb *urb)
netif_wake_queue(ft1000dev->net);
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_copy_down_pkt
-// Description: This function will take an ethernet packet and convert it to
-// a Flarion packet prior to sending it to the ASIC Downlink
-// FIFO.
-// Input:
-// dev - device structure
-// packet - address of ethernet packet
-// len - length of IP packet
-// Output:
-// status - FAILURE
-// SUCCESS
-//
-//---------------------------------------------------------------------------
-static int ft1000_copy_down_pkt(struct net_device *netdev, u8 * packet, u16 len)
+/* take an ethernet packet and convert it to a Flarion
+* packet prior to sending it to the ASIC Downlink FIFO.
+*/
+static int ft1000_copy_down_pkt(struct net_device *netdev, u8 *packet, u16 len)
{
struct ft1000_info *pInfo = netdev_priv(netdev);
struct ft1000_usb *pFt1000Dev = pInfo->priv;
@@ -769,20 +565,10 @@ static int ft1000_copy_down_pkt(struct net_device *netdev, u8 * packet, u16 len)
return 0;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_start_xmit
-//
-// Parameters: skb - socket buffer to be sent
-// dev - network device
-//
-//
-// Returns: none
-//
-// Description: transmit a ethernet packet
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* transmit an ethernet packet
+* Parameters: skb - socket buffer to be sent
+* dev - network device
+*/
static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ft1000_info *pInfo = netdev_priv(dev);
@@ -827,20 +613,7 @@ err:
return NETDEV_TX_OK;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_open
-//
-// Parameters:
-// dev - network device
-//
-//
-// Returns: none
-//
-// Description: open the network driver
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* open the network driver */
static int ft1000_open(struct net_device *dev)
{
struct ft1000_info *pInfo = netdev_priv(dev);
@@ -871,29 +644,14 @@ static struct net_device_stats *ft1000_netdev_stats(struct net_device *dev)
return &(info->stats);
}
-static const struct net_device_ops ftnet_ops =
-{
+static const struct net_device_ops ftnet_ops = {
.ndo_open = &ft1000_open,
.ndo_stop = &ft1000_close,
.ndo_start_xmit = &ft1000_start_xmit,
.ndo_get_stats = &ft1000_netdev_stats,
};
-//---------------------------------------------------------------------------
-// Function: init_ft1000_netdev
-//
-// Parameters: ft1000dev - device structure
-//
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function initialize the network device
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-
+/* initialize the network device */
static int ft1000_reset(void *dev)
{
ft1000_reset_card(dev);
@@ -931,14 +689,14 @@ int init_ft1000_netdev(struct ft1000_usb *ft1000dev)
card_nr[1] = '\0';
ret_val = kstrtou8(card_nr, 10, &gCardIndex);
if (ret_val) {
- printk(KERN_ERR "Can't parse netdev\n");
+ netdev_err(ft1000dev->net, "Can't parse netdev\n");
goto err_net;
}
ft1000dev->CardNumber = gCardIndex;
DEBUG("card number = %d\n", ft1000dev->CardNumber);
} else {
- printk(KERN_ERR "ft1000: Invalid device name\n");
+ netdev_err(ft1000dev->net, "ft1000: Invalid device name\n");
ret_val = -ENXIO;
goto err_net;
}
@@ -1014,20 +772,7 @@ err_net:
return ret_val;
}
-//---------------------------------------------------------------------------
-// Function: reg_ft1000_netdev
-//
-// Parameters: ft1000dev - device structure
-//
-//
-// Returns: STATUS_SUCCESS - success
-// STATUS_FAILURE - failure
-//
-// Description: This function register the network driver
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* register the network driver */
int reg_ft1000_netdev(struct ft1000_usb *ft1000dev,
struct usb_interface *intf)
{
@@ -1060,19 +805,9 @@ int reg_ft1000_netdev(struct ft1000_usb *ft1000dev,
return 0;
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_copy_up_pkt
-// Description: This function will take a packet from the FIFO up link and
-// convert it into an ethernet packet and deliver it to the IP stack
-// Input:
-// urb - the receiving usb urb
-//
-// Output:
-// status - FAILURE
-// SUCCESS
-//
-//---------------------------------------------------------------------------
+/* take a packet from the FIFO up link and
+* convert it into an ethernet packet and deliver it to the IP stack
+*/
static int ft1000_copy_up_pkt(struct urb *urb)
{
struct ft1000_info *info = urb->context;
@@ -1090,9 +825,9 @@ static int ft1000_copy_up_pkt(struct urb *urb)
if (ft1000dev->status & FT1000_STATUS_CLOSING) {
DEBUG("network driver is closed, return\n");
- return STATUS_SUCCESS;
+ return 0;
}
- // Read length
+ /* Read length */
len = urb->transfer_buffer_length;
lena = urb->actual_length;
@@ -1105,7 +840,7 @@ static int ft1000_copy_up_pkt(struct urb *urb)
if (tempword != *chksum) {
info->stats.rx_errors++;
ft1000_submit_rx_urb(info);
- return STATUS_FAILURE;
+ return -1;
}
skb = dev_alloc_skb(len + 12 + 2);
@@ -1114,7 +849,7 @@ static int ft1000_copy_up_pkt(struct urb *urb)
DEBUG("ft1000_copy_up_pkt: No Network buffers available\n");
info->stats.rx_errors++;
ft1000_submit_rx_urb(info);
- return STATUS_FAILURE;
+ return -1;
}
pbuffer = (u8 *) skb_put(skb, len + 12);
@@ -1151,23 +886,11 @@ static int ft1000_copy_up_pkt(struct urb *urb)
ft1000_submit_rx_urb(info);
- return SUCCESS;
+ return 0;
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_submit_rx_urb
-// Description: the receiving function of the network driver
-//
-// Input:
-// info - a private structure contains the device information
-//
-// Output:
-// status - FAILURE
-// SUCCESS
-//
-//---------------------------------------------------------------------------
+/* the receiving function of the network driver */
static int ft1000_submit_rx_urb(struct ft1000_info *info)
{
int result;
@@ -1196,20 +919,7 @@ static int ft1000_submit_rx_urb(struct ft1000_info *info)
return 0;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_close
-//
-// Parameters:
-// net - network device
-//
-//
-// Returns: none
-//
-// Description: close the network driver
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/* close the network driver */
int ft1000_close(struct net_device *net)
{
struct ft1000_info *pInfo = netdev_priv(net);
@@ -1227,26 +937,14 @@ int ft1000_close(struct net_device *net)
return 0;
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_chkcard
-// Description: This function will check if the device is presently available on
-// the system.
-// Input:
-// dev - device structure
-// Output:
-// status - FALSE (device is not present)
-// TRUE (device is present)
-//
-//---------------------------------------------------------------------------
+/* check if the device is presently available on the system. */
static int ft1000_chkcard(struct ft1000_usb *dev)
{
u16 tempword;
- u16 status;
+ int status;
if (dev->fCondResetPend) {
- DEBUG
- ("ft1000_hw:ft1000_chkcard:Card is being reset, return FALSE\n");
+ DEBUG("ft1000_hw:ft1000_chkcard:Card is being reset, return FALSE\n");
return TRUE;
}
/* Mask register is used to check for device presence since it is never
@@ -1254,8 +952,7 @@ static int ft1000_chkcard(struct ft1000_usb *dev)
*/
status = ft1000_read_register(dev, &tempword, FT1000_REG_SUP_IMASK);
if (tempword == 0) {
- DEBUG
- ("ft1000_hw:ft1000_chkcard: IMASK = 0 Card not detected\n");
+ DEBUG("ft1000_hw:ft1000_chkcard: IMASK = 0 Card not detected\n");
return FALSE;
}
/* The system will return the value of 0xffff for the version register
@@ -1264,30 +961,22 @@ static int ft1000_chkcard(struct ft1000_usb *dev)
status = ft1000_read_register(dev, &tempword, FT1000_REG_ASIC_ID);
if (tempword != 0x1b01) {
dev->status |= FT1000_STATUS_CLOSING;
- DEBUG
- ("ft1000_hw:ft1000_chkcard: Version = 0xffff Card not detected\n");
+ DEBUG("ft1000_hw:ft1000_chkcard: Version = 0xffff Card not detected\n");
return FALSE;
}
return TRUE;
}
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_receive_cmd
-// Description: This function will read a message from the dpram area.
-// Input:
-// dev - network device structure
-// pbuffer - caller supply address to buffer
-// pnxtph - pointer to next pseudo header
-// Output:
-// Status = 0 (unsuccessful)
-// = 1 (successful)
-//
-//---------------------------------------------------------------------------
+/* read a message from the dpram area.
+* Input:
+* dev - network device structure
+* pbuffer - caller supply address to buffer
+*/
static bool ft1000_receive_cmd(struct ft1000_usb *dev, u16 *pbuffer,
- int maxsz, u16 *pnxtph)
+ int maxsz)
{
- u16 size, ret;
+ u16 size;
+ int ret;
u16 *ppseudohdr;
int i;
u16 tempword;
@@ -1359,7 +1048,7 @@ static int ft1000_dsp_prov(void *arg)
struct prov_record *ptr;
struct pseudo_hdr *ppseudo_hdr;
u16 *pmsg;
- u16 status;
+ int status;
u16 TempShortBuf[256];
DEBUG("*** DspProv Entered\n");
@@ -1381,7 +1070,7 @@ static int ft1000_dsp_prov(void *arg)
i++;
if (i == 10) {
DEBUG("FT1000:ft1000_dsp_prov:message drop\n");
- return STATUS_FAILURE;
+ return -1;
}
ft1000_read_register(dev, &tempword,
FT1000_REG_DOORBELL);
@@ -1405,9 +1094,8 @@ static int ft1000_dsp_prov(void *arg)
ppseudo_hdr->portsrc = 0;
/* Calculate new checksum */
ppseudo_hdr->checksum = *pmsg++;
- for (i = 1; i < 7; i++) {
+ for (i = 1; i < 7; i++)
ppseudo_hdr->checksum ^= *pmsg++;
- }
TempShortBuf[0] = 0;
TempShortBuf[1] = htons(len);
@@ -1425,7 +1113,7 @@ static int ft1000_dsp_prov(void *arg)
kfree(ptr->pprov_data);
kfree(ptr);
}
- msleep(10);
+ usleep_range(9000, 11000);
}
DEBUG("DSP Provisioning List Entry finished\n");
@@ -1435,7 +1123,7 @@ static int ft1000_dsp_prov(void *arg)
dev->fProvComplete = true;
info->CardReady = 1;
- return STATUS_SUCCESS;
+ return 0;
}
static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
@@ -1449,7 +1137,7 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
u16 i;
struct pseudo_hdr *ppseudo_hdr;
u16 *pmsg;
- u16 status;
+ int status;
union {
u8 byte[2];
u16 wrd;
@@ -1457,7 +1145,7 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
char *cmdbuffer = kmalloc(1600, GFP_KERNEL);
if (!cmdbuffer)
- return STATUS_FAILURE;
+ return -1;
status = ft1000_read_dpram32(dev, 0x200, cmdbuffer, size);
@@ -1481,154 +1169,179 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
DEBUG("ft1000_proc_drvmsg:Command message type = 0x%x\n", msgtype);
switch (msgtype) {
case MEDIA_STATE:{
- DEBUG
- ("ft1000_proc_drvmsg:Command message type = MEDIA_STATE");
-
- pmediamsg = (struct media_msg *)&cmdbuffer[0];
- if (info->ProgConStat != 0xFF) {
- if (pmediamsg->state) {
- DEBUG("Media is up\n");
- if (info->mediastate == 0) {
- if (dev->NetDevRegDone) {
- netif_wake_queue(dev->
- net);
- }
- info->mediastate = 1;
- }
- } else {
- DEBUG("Media is down\n");
- if (info->mediastate == 1) {
- info->mediastate = 0;
- if (dev->NetDevRegDone) {
- }
- info->ConTm = 0;
- }
+ DEBUG("ft1000_proc_drvmsg:Command message type = MEDIA_STATE");
+ pmediamsg = (struct media_msg *)&cmdbuffer[0];
+ if (info->ProgConStat != 0xFF) {
+ if (pmediamsg->state) {
+ DEBUG("Media is up\n");
+ if (info->mediastate == 0) {
+ if (dev->NetDevRegDone)
+ netif_wake_queue(dev->net);
+ info->mediastate = 1;
}
} else {
DEBUG("Media is down\n");
if (info->mediastate == 1) {
info->mediastate = 0;
- info->ConTm = 0;
+ if (dev->NetDevRegDone)
+ info->ConTm = 0;
}
}
- break;
+ } else {
+ DEBUG("Media is down\n");
+ if (info->mediastate == 1) {
+ info->mediastate = 0;
+ info->ConTm = 0;
+ }
}
+ break;
+ }
case DSP_INIT_MSG:{
- DEBUG
- ("ft1000_proc_drvmsg:Command message type = DSP_INIT_MSG");
-
- pdspinitmsg = (struct dsp_init_msg *)&cmdbuffer[2];
- memcpy(info->DspVer, pdspinitmsg->DspVer, DSPVERSZ);
- DEBUG("DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n",
- info->DspVer[0], info->DspVer[1], info->DspVer[2],
- info->DspVer[3]);
- memcpy(info->HwSerNum, pdspinitmsg->HwSerNum,
- HWSERNUMSZ);
- memcpy(info->Sku, pdspinitmsg->Sku, SKUSZ);
- memcpy(info->eui64, pdspinitmsg->eui64, EUISZ);
- DEBUG("EUI64=%2x.%2x.%2x.%2x.%2x.%2x.%2x.%2x\n",
- info->eui64[0], info->eui64[1], info->eui64[2],
- info->eui64[3], info->eui64[4], info->eui64[5],
- info->eui64[6], info->eui64[7]);
- dev->net->dev_addr[0] = info->eui64[0];
- dev->net->dev_addr[1] = info->eui64[1];
- dev->net->dev_addr[2] = info->eui64[2];
- dev->net->dev_addr[3] = info->eui64[5];
- dev->net->dev_addr[4] = info->eui64[6];
- dev->net->dev_addr[5] = info->eui64[7];
-
- if (ntohs(pdspinitmsg->length) ==
- (sizeof(struct dsp_init_msg) - 20)) {
- memcpy(info->ProductMode,
- pdspinitmsg->ProductMode, MODESZ);
- memcpy(info->RfCalVer, pdspinitmsg->RfCalVer,
- CALVERSZ);
- memcpy(info->RfCalDate, pdspinitmsg->RfCalDate,
- CALDATESZ);
- DEBUG("RFCalVer = 0x%2x 0x%2x\n",
- info->RfCalVer[0], info->RfCalVer[1]);
- }
- break;
+ DEBUG("ft1000_proc_drvmsg:Command message type = DSP_INIT_MSG");
+ pdspinitmsg = (struct dsp_init_msg *)&cmdbuffer[2];
+ memcpy(info->DspVer, pdspinitmsg->DspVer, DSPVERSZ);
+ DEBUG("DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n",
+ info->DspVer[0], info->DspVer[1], info->DspVer[2],
+ info->DspVer[3]);
+ memcpy(info->HwSerNum, pdspinitmsg->HwSerNum,
+ HWSERNUMSZ);
+ memcpy(info->Sku, pdspinitmsg->Sku, SKUSZ);
+ memcpy(info->eui64, pdspinitmsg->eui64, EUISZ);
+ DEBUG("EUI64=%2x.%2x.%2x.%2x.%2x.%2x.%2x.%2x\n",
+ info->eui64[0], info->eui64[1], info->eui64[2],
+ info->eui64[3], info->eui64[4], info->eui64[5],
+ info->eui64[6], info->eui64[7]);
+ dev->net->dev_addr[0] = info->eui64[0];
+ dev->net->dev_addr[1] = info->eui64[1];
+ dev->net->dev_addr[2] = info->eui64[2];
+ dev->net->dev_addr[3] = info->eui64[5];
+ dev->net->dev_addr[4] = info->eui64[6];
+ dev->net->dev_addr[5] = info->eui64[7];
+
+ if (ntohs(pdspinitmsg->length) ==
+ (sizeof(struct dsp_init_msg) - 20)) {
+ memcpy(info->ProductMode, pdspinitmsg->ProductMode,
+ MODESZ);
+ memcpy(info->RfCalVer, pdspinitmsg->RfCalVer, CALVERSZ);
+ memcpy(info->RfCalDate, pdspinitmsg->RfCalDate,
+ CALDATESZ);
+ DEBUG("RFCalVer = 0x%2x 0x%2x\n", info->RfCalVer[0],
+ info->RfCalVer[1]);
}
+ break;
+ }
case DSP_PROVISION:{
- DEBUG
- ("ft1000_proc_drvmsg:Command message type = DSP_PROVISION\n");
+ DEBUG("ft1000_proc_drvmsg:Command message type = DSP_PROVISION\n");
- /* kick off dspprov routine to start provisioning
- * Send provisioning data to DSP
- */
- if (list_empty(&info->prov_list) == 0) {
- dev->fProvComplete = false;
- status = ft1000_dsp_prov(dev);
- if (status != STATUS_SUCCESS)
- goto out;
- } else {
- dev->fProvComplete = true;
- status =
- ft1000_write_register(dev, FT1000_DB_HB,
- FT1000_REG_DOORBELL);
- DEBUG
- ("FT1000:drivermsg:No more DSP provisioning data in dsp image\n");
- }
- DEBUG("ft1000_proc_drvmsg:DSP PROVISION is done\n");
- break;
+ /* kick off dspprov routine to start provisioning
+ * Send provisioning data to DSP
+ */
+ if (list_empty(&info->prov_list) == 0) {
+ dev->fProvComplete = false;
+ status = ft1000_dsp_prov(dev);
+ if (status != 0)
+ goto out;
+ } else {
+ dev->fProvComplete = true;
+ status = ft1000_write_register(dev, FT1000_DB_HB,
+ FT1000_REG_DOORBELL);
+ DEBUG("FT1000:drivermsg:No more DSP provisioning data in dsp image\n");
}
+ DEBUG("ft1000_proc_drvmsg:DSP PROVISION is done\n");
+ break;
+ }
case DSP_STORE_INFO:{
- DEBUG
- ("ft1000_proc_drvmsg:Command message type = DSP_STORE_INFO");
-
- DEBUG("FT1000:drivermsg:Got DSP_STORE_INFO\n");
- tempword = ntohs(pdrvmsg->length);
- info->DSPInfoBlklen = tempword;
- if (tempword < (MAX_DSP_SESS_REC - 4)) {
- pmsg = (u16 *) &pdrvmsg->data[0];
- for (i = 0; i < ((tempword + 1) / 2); i++) {
- DEBUG
- ("FT1000:drivermsg:dsp info data = 0x%x\n",
- *pmsg);
- info->DSPInfoBlk[i + 10] = *pmsg++;
- }
- } else {
- info->DSPInfoBlklen = 0;
+ DEBUG("ft1000_proc_drvmsg:Command message type = DSP_STORE_INFO");
+ DEBUG("FT1000:drivermsg:Got DSP_STORE_INFO\n");
+ tempword = ntohs(pdrvmsg->length);
+ info->DSPInfoBlklen = tempword;
+ if (tempword < (MAX_DSP_SESS_REC - 4)) {
+ pmsg = (u16 *) &pdrvmsg->data[0];
+ for (i = 0; i < ((tempword + 1) / 2); i++) {
+ DEBUG("FT1000:drivermsg:dsp info data = 0x%x\n", *pmsg);
+ info->DSPInfoBlk[i + 10] = *pmsg++;
}
- break;
+ } else {
+ info->DSPInfoBlklen = 0;
}
+ break;
+ }
case DSP_GET_INFO:{
- DEBUG("FT1000:drivermsg:Got DSP_GET_INFO\n");
- /* copy dsp info block to dsp */
- dev->DrvMsgPend = 1;
- /* allow any outstanding ioctl to finish */
+ DEBUG("FT1000:drivermsg:Got DSP_GET_INFO\n");
+ /* copy dsp info block to dsp */
+ dev->DrvMsgPend = 1;
+ /* allow any outstanding ioctl to finish */
+ mdelay(10);
+ status = ft1000_read_register(dev, &tempword,
+ FT1000_REG_DOORBELL);
+ if (tempword & FT1000_DB_DPRAM_TX) {
mdelay(10);
- status =
- ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
+ status = ft1000_read_register(dev, &tempword,
+ FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX) {
mdelay(10);
- status =
- ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- mdelay(10);
- status =
- ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX)
- break;
- }
+ status = ft1000_read_register(dev, &tempword,
+ FT1000_REG_DOORBELL);
+ if (tempword & FT1000_DB_DPRAM_TX)
+ break;
}
- /* Put message into Slow Queue
- * Form Pseudo header
- */
- pmsg = (u16 *) info->DSPInfoBlk;
- *pmsg++ = 0;
- *pmsg++ =
- htons(info->DSPInfoBlklen + 20 +
- info->DSPInfoBlklen);
- ppseudo_hdr =
- (struct pseudo_hdr *)(u16 *) &info->DSPInfoBlk[2];
- ppseudo_hdr->length =
- htons(info->DSPInfoBlklen + 4 +
- info->DSPInfoBlklen);
+ }
+ /* Put message into Slow Queue Form Pseudo header */
+ pmsg = (u16 *) info->DSPInfoBlk;
+ *pmsg++ = 0;
+ *pmsg++ = htons(info->DSPInfoBlklen + 20 + info->DSPInfoBlklen);
+ ppseudo_hdr =
+ (struct pseudo_hdr *)(u16 *) &info->DSPInfoBlk[2];
+ ppseudo_hdr->length = htons(info->DSPInfoBlklen + 4
+ + info->DSPInfoBlklen);
+ ppseudo_hdr->source = 0x10;
+ ppseudo_hdr->destination = 0x20;
+ ppseudo_hdr->portdest = 0;
+ ppseudo_hdr->portsrc = 0;
+ ppseudo_hdr->sh_str_id = 0;
+ ppseudo_hdr->control = 0;
+ ppseudo_hdr->rsvd1 = 0;
+ ppseudo_hdr->rsvd2 = 0;
+ ppseudo_hdr->qos_class = 0;
+ /* Insert slow queue sequence number */
+ ppseudo_hdr->seq_num = info->squeseqnum++;
+ /* Insert application id */
+ ppseudo_hdr->portsrc = 0;
+ /* Calculate new checksum */
+ ppseudo_hdr->checksum = *pmsg++;
+ for (i = 1; i < 7; i++)
+ ppseudo_hdr->checksum ^= *pmsg++;
+
+ info->DSPInfoBlk[10] = 0x7200;
+ info->DSPInfoBlk[11] = htons(info->DSPInfoBlklen);
+ status = ft1000_write_dpram32(dev, 0,
+ (u8 *)&info->DSPInfoBlk[0],
+ (unsigned short)(info->DSPInfoBlklen + 22));
+ status = ft1000_write_register(dev, FT1000_DB_DPRAM_TX,
+ FT1000_REG_DOORBELL);
+ dev->DrvMsgPend = 0;
+ break;
+ }
+ case GET_DRV_ERR_RPT_MSG:{
+ DEBUG("FT1000:drivermsg:Got GET_DRV_ERR_RPT_MSG\n");
+ /* copy driver error message to dsp */
+ dev->DrvMsgPend = 1;
+ /* allow any outstanding ioctl to finish */
+ mdelay(10);
+ status = ft1000_read_register(dev, &tempword,
+ FT1000_REG_DOORBELL);
+ if (tempword & FT1000_DB_DPRAM_TX) {
+ mdelay(10);
+ status = ft1000_read_register(dev, &tempword,
+ FT1000_REG_DOORBELL);
+ if (tempword & FT1000_DB_DPRAM_TX)
+ mdelay(10);
+ }
+ if ((tempword & FT1000_DB_DPRAM_TX) == 0) {
+ /* Put message into Slow Queue Form Pseudo header */
+ pmsg = (u16 *) &tempbuffer[0];
+ ppseudo_hdr = (struct pseudo_hdr *)pmsg;
+ ppseudo_hdr->length = htons(0x0012);
ppseudo_hdr->source = 0x10;
ppseudo_hdr->destination = 0x20;
ppseudo_hdr->portdest = 0;
@@ -1647,293 +1360,245 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
for (i = 1; i < 7; i++)
ppseudo_hdr->checksum ^= *pmsg++;
- info->DSPInfoBlk[10] = 0x7200;
- info->DSPInfoBlk[11] = htons(info->DSPInfoBlklen);
- status =
- ft1000_write_dpram32(dev, 0,
- (u8 *) &info->DSPInfoBlk[0],
- (unsigned short)(info->
- DSPInfoBlklen
- + 22));
- status =
- ft1000_write_register(dev, FT1000_DB_DPRAM_TX,
- FT1000_REG_DOORBELL);
- dev->DrvMsgPend = 0;
-
- break;
+ pmsg = (u16 *) &tempbuffer[16];
+ *pmsg++ = htons(RSP_DRV_ERR_RPT_MSG);
+ *pmsg++ = htons(0x000e);
+ *pmsg++ = htons(info->DSP_TIME[0]);
+ *pmsg++ = htons(info->DSP_TIME[1]);
+ *pmsg++ = htons(info->DSP_TIME[2]);
+ *pmsg++ = htons(info->DSP_TIME[3]);
+ convert.byte[0] = info->DspVer[0];
+ convert.byte[1] = info->DspVer[1];
+ *pmsg++ = convert.wrd;
+ convert.byte[0] = info->DspVer[2];
+ convert.byte[1] = info->DspVer[3];
+ *pmsg++ = convert.wrd;
+ *pmsg++ = htons(info->DrvErrNum);
+
+ card_send_command(dev, (unsigned char *)&tempbuffer[0],
+ (u16)(0x0012 + PSEUDOSZ));
+ info->DrvErrNum = 0;
}
-
- case GET_DRV_ERR_RPT_MSG:{
- DEBUG("FT1000:drivermsg:Got GET_DRV_ERR_RPT_MSG\n");
- /* copy driver error message to dsp */
- dev->DrvMsgPend = 1;
- /* allow any outstanding ioctl to finish */
- mdelay(10);
- status =
- ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- mdelay(10);
- status =
- ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX)
- mdelay(10);
- }
-
- if ((tempword & FT1000_DB_DPRAM_TX) == 0) {
- /* Put message into Slow Queue
- * Form Pseudo header
- */
- pmsg = (u16 *) &tempbuffer[0];
- ppseudo_hdr = (struct pseudo_hdr *)pmsg;
- ppseudo_hdr->length = htons(0x0012);
- ppseudo_hdr->source = 0x10;
- ppseudo_hdr->destination = 0x20;
- ppseudo_hdr->portdest = 0;
- ppseudo_hdr->portsrc = 0;
- ppseudo_hdr->sh_str_id = 0;
- ppseudo_hdr->control = 0;
- ppseudo_hdr->rsvd1 = 0;
- ppseudo_hdr->rsvd2 = 0;
- ppseudo_hdr->qos_class = 0;
- /* Insert slow queue sequence number */
- ppseudo_hdr->seq_num = info->squeseqnum++;
- /* Insert application id */
- ppseudo_hdr->portsrc = 0;
- /* Calculate new checksum */
- ppseudo_hdr->checksum = *pmsg++;
- for (i = 1; i < 7; i++)
- ppseudo_hdr->checksum ^= *pmsg++;
-
- pmsg = (u16 *) &tempbuffer[16];
- *pmsg++ = htons(RSP_DRV_ERR_RPT_MSG);
- *pmsg++ = htons(0x000e);
- *pmsg++ = htons(info->DSP_TIME[0]);
- *pmsg++ = htons(info->DSP_TIME[1]);
- *pmsg++ = htons(info->DSP_TIME[2]);
- *pmsg++ = htons(info->DSP_TIME[3]);
- convert.byte[0] = info->DspVer[0];
- convert.byte[1] = info->DspVer[1];
- *pmsg++ = convert.wrd;
- convert.byte[0] = info->DspVer[2];
- convert.byte[1] = info->DspVer[3];
- *pmsg++ = convert.wrd;
- *pmsg++ = htons(info->DrvErrNum);
-
- card_send_command(dev,
- (unsigned char *)&tempbuffer[0],
- (u16) (0x0012 + PSEUDOSZ));
- info->DrvErrNum = 0;
- }
- dev->DrvMsgPend = 0;
-
- break;
- }
-
+ dev->DrvMsgPend = 0;
+ break;
+ }
default:
break;
}
- status = STATUS_SUCCESS;
+ status = 0;
out:
kfree(cmdbuffer);
DEBUG("return from ft1000_proc_drvmsg\n");
return status;
}
-int ft1000_poll(void* dev_id)
+/* Check which application has registered for dsp broadcast messages */
+static int dsp_broadcast_msg_id(struct ft1000_usb *dev)
{
- struct ft1000_usb *dev = (struct ft1000_usb *)dev_id;
- struct ft1000_info *info = netdev_priv(dev->net);
+ struct dpram_blk *pdpram_blk;
+ unsigned long flags;
+ int i;
- u16 tempword;
- u16 status;
- u16 size;
- int i;
- u16 data;
- u16 modulo;
- u16 portid;
- u16 nxtph;
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ if ((dev->app_info[i].DspBCMsgFlag)
+ && (dev->app_info[i].fileobject)
+ && (dev->app_info[i].NumOfMsg
+ < MAX_MSG_LIMIT)) {
+ pdpram_blk = ft1000_get_buffer(&freercvpool);
+ if (pdpram_blk == NULL) {
+ DEBUG("Out of memory in free receive command pool\n");
+ dev->app_info[i].nRxMsgMiss++;
+ return -1;
+ }
+ if (ft1000_receive_cmd(dev, pdpram_blk->pbuffer,
+ MAX_CMD_SQSIZE)) {
+ /* Put message into the
+ * appropriate application block
+ */
+ dev->app_info[i].nRxMsg++;
+ spin_lock_irqsave(&free_buff_lock, flags);
+ list_add_tail(&pdpram_blk->list,
+ &dev->app_info[i] .app_sqlist);
+ dev->app_info[i].NumOfMsg++;
+ spin_unlock_irqrestore(&free_buff_lock, flags);
+ wake_up_interruptible(&dev->app_info[i]
+ .wait_dpram_msg);
+ } else {
+ dev->app_info[i].nRxMsgMiss++;
+ ft1000_free_buffer(pdpram_blk, &freercvpool);
+ DEBUG("pdpram_blk::ft1000_get_buffer NULL\n");
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
+
+static int handle_misc_portid(struct ft1000_usb *dev)
+{
struct dpram_blk *pdpram_blk;
- struct pseudo_hdr *ppseudo_hdr;
- unsigned long flags;
-
- if (ft1000_chkcard(dev) == FALSE) {
- DEBUG("ft1000_poll::ft1000_chkcard: failed\n");
- return STATUS_FAILURE;
- }
-
- status = ft1000_read_register (dev, &tempword, FT1000_REG_DOORBELL);
-
- if ( !status )
- {
-
- if (tempword & FT1000_DB_DPRAM_RX) {
-
- status = ft1000_read_dpram16(dev, 0x200, (u8 *)&data, 0);
- size = ntohs(data) + 16 + 2;
- if (size % 4) {
- modulo = 4 - (size % 4);
- size = size + modulo;
- }
- status = ft1000_read_dpram16(dev, 0x201, (u8 *)&portid, 1);
- portid &= 0xff;
-
- if (size < MAX_CMD_SQSIZE) {
- switch (portid)
- {
- case DRIVERID:
- DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_DPRAM_RX : portid DRIVERID\n");
-
- status = ft1000_proc_drvmsg (dev, size);
- if (status != STATUS_SUCCESS )
- return status;
- break;
- case DSPBCMSGID:
- // This is a dsp broadcast message
- // Check which application has registered for dsp broadcast messages
-
- for (i=0; i<MAX_NUM_APP; i++) {
- if ( (dev->app_info[i].DspBCMsgFlag) && (dev->app_info[i].fileobject) &&
- (dev->app_info[i].NumOfMsg < MAX_MSG_LIMIT) )
- {
- nxtph = FT1000_DPRAM_RX_BASE + 2;
- pdpram_blk = ft1000_get_buffer (&freercvpool);
- if (pdpram_blk != NULL) {
- if ( ft1000_receive_cmd(dev, pdpram_blk->pbuffer, MAX_CMD_SQSIZE, &nxtph) ) {
- ppseudo_hdr = (struct pseudo_hdr *)pdpram_blk->pbuffer;
- // Put message into the appropriate application block
- dev->app_info[i].nRxMsg++;
- spin_lock_irqsave(&free_buff_lock, flags);
- list_add_tail(&pdpram_blk->list, &dev->app_info[i].app_sqlist);
- dev->app_info[i].NumOfMsg++;
- spin_unlock_irqrestore(&free_buff_lock, flags);
- wake_up_interruptible(&dev->app_info[i].wait_dpram_msg);
- }
- else {
- dev->app_info[i].nRxMsgMiss++;
- // Put memory back to free pool
- ft1000_free_buffer(pdpram_blk, &freercvpool);
- DEBUG("pdpram_blk::ft1000_get_buffer NULL\n");
- }
- }
- else {
- DEBUG("Out of memory in free receive command pool\n");
- dev->app_info[i].nRxMsgMiss++;
- }
- }
- }
- break;
- default:
- pdpram_blk = ft1000_get_buffer (&freercvpool);
-
- if (pdpram_blk != NULL) {
- if ( ft1000_receive_cmd(dev, pdpram_blk->pbuffer, MAX_CMD_SQSIZE, &nxtph) ) {
- ppseudo_hdr = (struct pseudo_hdr *)pdpram_blk->pbuffer;
- // Search for correct application block
- for (i=0; i<MAX_NUM_APP; i++) {
- if (dev->app_info[i].app_id == ppseudo_hdr->portdest) {
- break;
- }
- }
-
- if (i == MAX_NUM_APP) {
- DEBUG("FT1000:ft1000_parse_dpram_msg: No application matching id = %d\n", ppseudo_hdr->portdest);
- // Put memory back to free pool
- ft1000_free_buffer(pdpram_blk, &freercvpool);
- }
- else {
- if (dev->app_info[i].NumOfMsg > MAX_MSG_LIMIT) {
- // Put memory back to free pool
- ft1000_free_buffer(pdpram_blk, &freercvpool);
- }
- else {
- dev->app_info[i].nRxMsg++;
- // Put message into the appropriate application block
- list_add_tail(&pdpram_blk->list, &dev->app_info[i].app_sqlist);
- dev->app_info[i].NumOfMsg++;
- }
- }
- }
- else {
- // Put memory back to free pool
- ft1000_free_buffer(pdpram_blk, &freercvpool);
- }
- }
- else {
- DEBUG("Out of memory in free receive command pool\n");
- }
- break;
- }
- }
- else {
- DEBUG("FT1000:dpc:Invalid total length for SlowQ = %d\n", size);
- }
- status = ft1000_write_register (dev, FT1000_DB_DPRAM_RX, FT1000_REG_DOORBELL);
- }
- else if (tempword & FT1000_DSP_ASIC_RESET) {
-
- // Let's reset the ASIC from the Host side as well
- status = ft1000_write_register (dev, ASIC_RESET_BIT, FT1000_REG_RESET);
- status = ft1000_read_register (dev, &tempword, FT1000_REG_RESET);
- i = 0;
- while (tempword & ASIC_RESET_BIT) {
- status = ft1000_read_register (dev, &tempword, FT1000_REG_RESET);
- msleep(10);
- i++;
- if (i==100)
- break;
- }
- if (i==100) {
- DEBUG("Unable to reset ASIC\n");
- return STATUS_SUCCESS;
- }
- msleep(10);
- // Program WMARK register
- status = ft1000_write_register (dev, 0x600, FT1000_REG_MAG_WATERMARK);
- // clear ASIC reset doorbell
- status = ft1000_write_register (dev, FT1000_DSP_ASIC_RESET, FT1000_REG_DOORBELL);
- msleep(10);
- }
- else if (tempword & FT1000_ASIC_RESET_REQ) {
- DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_ASIC_RESET_REQ\n");
-
- // clear ASIC reset request from DSP
- status = ft1000_write_register (dev, FT1000_ASIC_RESET_REQ, FT1000_REG_DOORBELL);
- status = ft1000_write_register (dev, HOST_INTF_BE, FT1000_REG_SUP_CTRL);
- // copy dsp session record from Adapter block
- status = ft1000_write_dpram32 (dev, 0, (u8 *)&info->DSPSess.Rec[0], 1024);
- // Program WMARK register
- status = ft1000_write_register (dev, 0x600, FT1000_REG_MAG_WATERMARK);
- // ring doorbell to tell DSP that ASIC is out of reset
- status = ft1000_write_register (dev, FT1000_ASIC_RESET_DSP, FT1000_REG_DOORBELL);
- }
- else if (tempword & FT1000_DB_COND_RESET) {
- DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_COND_RESET\n");
-
- if (!dev->fAppMsgPend) {
- // Reset ASIC and DSP
-
- status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER0, (u8 *)&(info->DSP_TIME[0]), FT1000_MAG_DSP_TIMER0_INDX);
- status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER1, (u8 *)&(info->DSP_TIME[1]), FT1000_MAG_DSP_TIMER1_INDX);
- status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER2, (u8 *)&(info->DSP_TIME[2]), FT1000_MAG_DSP_TIMER2_INDX);
- status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER3, (u8 *)&(info->DSP_TIME[3]), FT1000_MAG_DSP_TIMER3_INDX);
- info->CardReady = 0;
- info->DrvErrNum = DSP_CONDRESET_INFO;
- DEBUG("ft1000_hw:DSP conditional reset requested\n");
- info->ft1000_reset(dev->net);
- }
- else {
- dev->fProvComplete = false;
- dev->fCondResetPend = true;
- }
-
- ft1000_write_register(dev, FT1000_DB_COND_RESET, FT1000_REG_DOORBELL);
- }
-
- }
-
- return STATUS_SUCCESS;
+ int i;
+
+ pdpram_blk = ft1000_get_buffer(&freercvpool);
+ if (pdpram_blk == NULL) {
+ DEBUG("Out of memory in free receive command pool\n");
+ return -1;
+ }
+ if (!ft1000_receive_cmd(dev, pdpram_blk->pbuffer, MAX_CMD_SQSIZE))
+ goto exit_failure;
+ /* Search for correct application block */
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ if (dev->app_info[i].app_id == ((struct pseudo_hdr *)
+ pdpram_blk->pbuffer)->portdest)
+ break;
+ }
+ if (i == MAX_NUM_APP) {
+ DEBUG("FT1000:ft1000_parse_dpram_msg: No application matching id = %d\n", ((struct pseudo_hdr *)pdpram_blk->pbuffer)->portdest);
+ goto exit_failure;
+ } else if (dev->app_info[i].NumOfMsg > MAX_MSG_LIMIT) {
+ goto exit_failure;
+ } else {
+ dev->app_info[i].nRxMsg++;
+ /* Put message into the appropriate application block */
+ list_add_tail(&pdpram_blk->list, &dev->app_info[i].app_sqlist);
+ dev->app_info[i].NumOfMsg++;
+ }
+ return 0;
+
+exit_failure:
+ ft1000_free_buffer(pdpram_blk, &freercvpool);
+ return -1;
+}
+
+int ft1000_poll(void *dev_id)
+{
+ struct ft1000_usb *dev = (struct ft1000_usb *)dev_id;
+ struct ft1000_info *info = netdev_priv(dev->net);
+ u16 tempword;
+ int status;
+ u16 size;
+ int i;
+ u16 data;
+ u16 modulo;
+ u16 portid;
+
+ if (ft1000_chkcard(dev) == FALSE) {
+ DEBUG("ft1000_poll::ft1000_chkcard: failed\n");
+ return -1;
+ }
+ status = ft1000_read_register(dev, &tempword, FT1000_REG_DOORBELL);
+ if (!status) {
+ if (tempword & FT1000_DB_DPRAM_RX) {
+ status = ft1000_read_dpram16(dev,
+ 0x200, (u8 *)&data, 0);
+ size = ntohs(data) + 16 + 2;
+ if (size % 4) {
+ modulo = 4 - (size % 4);
+ size = size + modulo;
+ }
+ status = ft1000_read_dpram16(dev, 0x201,
+ (u8 *)&portid, 1);
+ portid &= 0xff;
+ if (size < MAX_CMD_SQSIZE) {
+ switch (portid) {
+ case DRIVERID:
+ DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_DPRAM_RX : portid DRIVERID\n");
+ status = ft1000_proc_drvmsg(dev, size);
+ if (status != 0)
+ return status;
+ break;
+ case DSPBCMSGID:
+ status = dsp_broadcast_msg_id(dev);
+ break;
+ default:
+ status = handle_misc_portid(dev);
+ break;
+ }
+ } else
+ DEBUG("FT1000:dpc:Invalid total length for SlowQ = %d\n", size);
+ status = ft1000_write_register(dev,
+ FT1000_DB_DPRAM_RX,
+ FT1000_REG_DOORBELL);
+ } else if (tempword & FT1000_DSP_ASIC_RESET) {
+ /* Let's reset the ASIC from the Host side as well */
+ status = ft1000_write_register(dev, ASIC_RESET_BIT,
+ FT1000_REG_RESET);
+ status = ft1000_read_register(dev, &tempword,
+ FT1000_REG_RESET);
+ i = 0;
+ while (tempword & ASIC_RESET_BIT) {
+ status = ft1000_read_register(dev, &tempword,
+ FT1000_REG_RESET);
+ usleep_range(9000, 11000);
+ i++;
+ if (i == 100)
+ break;
+ }
+ if (i == 100) {
+ DEBUG("Unable to reset ASIC\n");
+ return 0;
+ }
+ usleep_range(9000, 11000);
+ /* Program WMARK register */
+ status = ft1000_write_register(dev, 0x600,
+ FT1000_REG_MAG_WATERMARK);
+ /* clear ASIC reset doorbell */
+ status = ft1000_write_register(dev,
+ FT1000_DSP_ASIC_RESET,
+ FT1000_REG_DOORBELL);
+ usleep_range(9000, 11000);
+ } else if (tempword & FT1000_ASIC_RESET_REQ) {
+ DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_ASIC_RESET_REQ\n");
+ /* clear ASIC reset request from DSP */
+ status = ft1000_write_register(dev,
+ FT1000_ASIC_RESET_REQ,
+ FT1000_REG_DOORBELL);
+ status = ft1000_write_register(dev, HOST_INTF_BE,
+ FT1000_REG_SUP_CTRL);
+ /* copy dsp session record from Adapter block */
+ status = ft1000_write_dpram32(dev, 0,
+ (u8 *)&info->DSPSess.Rec[0], 1024);
+ status = ft1000_write_register(dev, 0x600,
+ FT1000_REG_MAG_WATERMARK);
+ /* ring doorbell to tell DSP that
+ * ASIC is out of reset
+ * */
+ status = ft1000_write_register(dev,
+ FT1000_ASIC_RESET_DSP,
+ FT1000_REG_DOORBELL);
+ } else if (tempword & FT1000_DB_COND_RESET) {
+ DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_COND_RESET\n");
+ if (!dev->fAppMsgPend) {
+ /* Reset ASIC and DSP */
+ status = ft1000_read_dpram16(dev,
+ FT1000_MAG_DSP_TIMER0,
+ (u8 *)&(info->DSP_TIME[0]),
+ FT1000_MAG_DSP_TIMER0_INDX);
+ status = ft1000_read_dpram16(dev,
+ FT1000_MAG_DSP_TIMER1,
+ (u8 *)&(info->DSP_TIME[1]),
+ FT1000_MAG_DSP_TIMER1_INDX);
+ status = ft1000_read_dpram16(dev,
+ FT1000_MAG_DSP_TIMER2,
+ (u8 *)&(info->DSP_TIME[2]),
+ FT1000_MAG_DSP_TIMER2_INDX);
+ status = ft1000_read_dpram16(dev,
+ FT1000_MAG_DSP_TIMER3,
+ (u8 *)&(info->DSP_TIME[3]),
+ FT1000_MAG_DSP_TIMER3_INDX);
+ info->CardReady = 0;
+ info->DrvErrNum = DSP_CONDRESET_INFO;
+ DEBUG("ft1000_hw:DSP conditional reset requested\n");
+ info->ft1000_reset(dev->net);
+ } else {
+ dev->fProvComplete = false;
+ dev->fCondResetPend = true;
+ }
+ ft1000_write_register(dev, FT1000_DB_COND_RESET,
+ FT1000_REG_DOORBELL);
+ }
+ }
+ return 0;
}
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
index 5ead942be680..2575d0d6bff3 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
@@ -33,13 +33,13 @@
#define seq_putx(m, message, size, var) \
seq_printf(m, message); \
- for(i = 0; i < (size - 1); i++) \
+ for (i = 0; i < (size - 1); i++) \
seq_printf(m, "%02x:", var[i]); \
seq_printf(m, "%02x\n", var[i])
#define seq_putd(m, message, size, var) \
seq_printf(m, message); \
- for(i = 0; i < (size - 1); i++) \
+ for (i = 0; i < (size - 1); i++) \
seq_printf(m, "%d.", var[i]); \
seq_printf(m, "%d\n", var[i])
@@ -47,14 +47,14 @@
#define FTNET_PROC init_net.proc_net
-int ft1000_read_dpram16 (struct ft1000_usb *ft1000dev, u16 indx,
+int ft1000_read_dpram16(struct ft1000_usb *ft1000dev, u16 indx,
u8 *buffer, u8 highlow);
static int ft1000ReadProc(struct seq_file *m, void *v)
{
- static const char *status[] = {
- "Idle (Disconnect)",
+ static const char *status[] = {
+ "Idle (Disconnect)",
"Searching",
"Active (Connected)",
"Waiting for L2",
@@ -127,10 +127,10 @@ static int ft1000ReadProc(struct seq_file *m, void *v)
}
seq_printf(m, "Connection Time: %02ld:%02ld:%02ld\n",
- ((delta / 3600) % 24), ((delta / 60) % 60), (delta % 60));
+ ((delta / 3600) % 24), ((delta / 60) % 60), (delta % 60));
seq_printf(m, "Connection Time[s]: %ld\n", delta);
seq_printf(m, "Asic ID: %s\n",
- (info->AsicID) == ELECTRABUZZ_ID ? "ELECTRABUZZ ASIC" : "MAGNEMITE ASIC");
+ (info->AsicID) == ELECTRABUZZ_ID ? "ELECTRABUZZ ASIC" : "MAGNEMITE ASIC");
seq_putx(m, "SKU: ", SKUSZ, info->Sku);
seq_putx(m, "EUI64: ", EUISZ, info->eui64);
seq_putd(m, "DSP version number: ", DSPVERSZ, info->DspVer);
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
index a8dd1e54878c..e40763e60dbd 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
@@ -7,7 +7,6 @@
* $Id:
*====================================================
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
@@ -45,13 +44,13 @@ static int ft1000_poll_thread(void *arg)
msleep(10);
if (!gPollingfailed) {
ret = ft1000_poll(arg);
- if (ret != STATUS_SUCCESS) {
+ if (ret != 0) {
DEBUG("ft1000_poll_thread: polling failed\n");
gPollingfailed = true;
}
}
}
- return STATUS_SUCCESS;
+ return 0;
}
static int ft1000_probe(struct usb_interface *interface,
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
index e8d00a930dc6..a6fdd524ee6f 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
@@ -11,8 +11,6 @@
#define PSEUDOSZ 16
-#define SUCCESS 0x00
-
struct app_info_block {
u32 nTxMsg; /* DPRAM msg sent to DSP with app_id */
u32 nRxMsg; /* DPRAM msg rcv from dsp with app_id */
@@ -31,9 +29,6 @@ struct app_info_block {
#define FALSE 0
#define TRUE 1
-#define STATUS_SUCCESS 0
-#define STATUS_FAILURE 0x1001
-
#define FT1000_STATUS_CLOSING 0x01
#define DSPBCMSGID 0x10
diff --git a/drivers/staging/fwserial/Kconfig b/drivers/staging/fwserial/Kconfig
index a0812d99136f..9c7c9267d52c 100644
--- a/drivers/staging/fwserial/Kconfig
+++ b/drivers/staging/fwserial/Kconfig
@@ -9,3 +9,23 @@ config FIREWIRE_SERIAL
To compile this driver as a module, say M here: the module will
be called firewire-serial.
+
+if FIREWIRE_SERIAL
+
+config FWTTY_MAX_TOTAL_PORTS
+ int "Maximum number of serial ports supported"
+ default "64"
+ help
+ Set this to the maximum number of serial ports you want the
+ firewire-serial driver to support.
+
+config FWTTY_MAX_CARD_PORTS
+ int "Maximum number of serial ports supported per adapter"
+ range 0 FWTTY_MAX_TOTAL_PORTS
+ default "32"
+ help
+ Set this to the maximum number of serial ports each firewire
+ adapter supports. The actual number of serial ports registered
+ is set with the module parameter "ttys".
+
+endif
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 62df009e5ac7..8af136e9c9dc 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -136,14 +136,14 @@ static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card,
#ifdef FWTTY_PROFILING
-static void profile_fifo_avail(struct fwtty_port *port, unsigned *stat)
+static void fwtty_profile_fifo(struct fwtty_port *port, unsigned *stat)
{
spin_lock_bh(&port->lock);
- profile_size_distrib(stat, dma_fifo_avail(&port->tx_fifo));
+ fwtty_profile_data(stat, dma_fifo_avail(&port->tx_fifo));
spin_unlock_bh(&port->lock);
}
-static void dump_profile(struct seq_file *m, struct stats *stats)
+static void fwtty_dump_profile(struct seq_file *m, struct stats *stats)
{
/* for each stat, print sum of 0 to 2^k, then individually */
int k = 4;
@@ -183,8 +183,8 @@ static void dump_profile(struct seq_file *m, struct stats *stats)
}
#else
-#define profile_fifo_avail(port, stat)
-#define dump_profile(m, stats)
+#define fwtty_profile_fifo(port, stat)
+#define fwtty_dump_profile(m, stats)
#endif
/*
@@ -456,16 +456,27 @@ static int fwtty_write_port_status(struct fwtty_port *port)
return err;
}
-static void __fwtty_throttle(struct fwtty_port *port, struct tty_struct *tty)
+static void fwtty_throttle_port(struct fwtty_port *port)
{
+ struct tty_struct *tty;
unsigned old;
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+ return;
+
+ spin_lock_bh(&port->lock);
+
old = port->mctrl;
port->mctrl |= OOB_RX_THROTTLE;
if (C_CRTSCTS(tty))
port->mctrl &= ~TIOCM_RTS;
if (~old & OOB_RX_THROTTLE)
__fwtty_write_port_status(port);
+
+ spin_unlock_bh(&port->lock);
+
+ tty_kref_put(tty);
}
/**
@@ -532,80 +543,14 @@ static void fwtty_emit_breaks(struct work_struct *work)
port->icount.brk += brk;
}
-static void fwtty_pushrx(struct work_struct *work)
-{
- struct fwtty_port *port = to_port(work, push);
- struct tty_struct *tty;
- struct buffered_rx *buf, *next;
- int n, c = 0;
-
- spin_lock_bh(&port->lock);
- list_for_each_entry_safe(buf, next, &port->buf_list, list) {
- n = tty_insert_flip_string_fixed_flag(&port->port, buf->data,
- TTY_NORMAL, buf->n);
- c += n;
- port->buffered -= n;
- if (n < buf->n) {
- if (n > 0) {
- memmove(buf->data, buf->data + n, buf->n - n);
- buf->n -= n;
- }
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- __fwtty_throttle(port, tty);
- tty_kref_put(tty);
- }
- break;
- } else {
- list_del(&buf->list);
- kfree(buf);
- }
- }
- if (c > 0)
- tty_flip_buffer_push(&port->port);
-
- if (list_empty(&port->buf_list))
- clear_bit(BUFFERING_RX, &port->flags);
- spin_unlock_bh(&port->lock);
-}
-
-static int fwtty_buffer_rx(struct fwtty_port *port, unsigned char *d, size_t n)
-{
- struct buffered_rx *buf;
- size_t size = (n + sizeof(struct buffered_rx) + 0xFF) & ~0xFF;
-
- if (port->buffered + n > HIGH_WATERMARK) {
- fwtty_err_ratelimited(port, "overflowed rx buffer: buffered: %d new: %zu wtrmk: %d\n",
- port->buffered, n, HIGH_WATERMARK);
- return 0;
- }
- buf = kmalloc(size, GFP_ATOMIC);
- if (!buf)
- return 0;
- INIT_LIST_HEAD(&buf->list);
- buf->n = n;
- memcpy(buf->data, d, n);
-
- spin_lock_bh(&port->lock);
- list_add_tail(&buf->list, &port->buf_list);
- port->buffered += n;
- if (port->buffered > port->stats.watermark)
- port->stats.watermark = port->buffered;
- set_bit(BUFFERING_RX, &port->flags);
- spin_unlock_bh(&port->lock);
-
- return n;
-}
-
static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
{
- struct tty_struct *tty;
int c, n = len;
unsigned lsr;
int err = 0;
fwtty_dbg(port, "%d\n", n);
- profile_size_distrib(port->stats.reads, n);
+ fwtty_profile_data(port->stats.reads, n);
if (port->write_only) {
n = 0;
@@ -636,31 +581,24 @@ static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
goto out;
}
- if (!test_bit(BUFFERING_RX, &port->flags)) {
- c = tty_insert_flip_string_fixed_flag(&port->port, data,
- TTY_NORMAL, n);
- if (c > 0)
- tty_flip_buffer_push(&port->port);
- n -= c;
-
- if (n) {
- /* start buffering and throttling */
- n -= fwtty_buffer_rx(port, &data[c], n);
-
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- spin_lock_bh(&port->lock);
- __fwtty_throttle(port, tty);
- spin_unlock_bh(&port->lock);
- tty_kref_put(tty);
- }
- }
- } else
- n -= fwtty_buffer_rx(port, data, n);
+ c = tty_insert_flip_string_fixed_flag(&port->port, data, TTY_NORMAL, n);
+ if (c > 0)
+ tty_flip_buffer_push(&port->port);
+ n -= c;
if (n) {
port->overrun = true;
err = -EIO;
+ fwtty_err_ratelimited(port, "flip buffer overrun\n");
+
+ } else {
+ /* throttle the sender if remaining flip buffer space has
+ * reached high watermark to avoid losing data which may be
+ * in-flight. Since the AR request context is 32k, that much
+ * data may have _already_ been acked.
+ */
+ if (tty_buffer_space_avail(&port->port) < HIGH_WATERMARK)
+ fwtty_throttle_port(port);
}
out:
@@ -821,7 +759,7 @@ static int fwtty_tx(struct fwtty_port *port, bool drain)
if (n == -EAGAIN)
++port->stats.tx_stall;
else if (n == -ENODATA)
- profile_size_distrib(port->stats.txns, 0);
+ fwtty_profile_data(port->stats.txns, 0);
else {
++port->stats.fifo_errs;
fwtty_err_ratelimited(port, "fifo err: %d\n",
@@ -830,7 +768,7 @@ static int fwtty_tx(struct fwtty_port *port, bool drain)
break;
}
- profile_size_distrib(port->stats.txns, txn->dma_pended.len);
+ fwtty_profile_data(port->stats.txns, txn->dma_pended.len);
fwtty_send_txn_async(peer, txn, TCODE_WRITE_BLOCK_REQUEST,
peer->fifo_addr, txn->dma_pended.data,
@@ -1101,20 +1039,13 @@ static int fwtty_port_activate(struct tty_port *tty_port,
static void fwtty_port_shutdown(struct tty_port *tty_port)
{
struct fwtty_port *port = to_port(tty_port, port);
- struct buffered_rx *buf, *next;
/* TODO: cancel outstanding transactions */
cancel_delayed_work_sync(&port->emit_breaks);
cancel_delayed_work_sync(&port->drain);
- cancel_work_sync(&port->push);
spin_lock_bh(&port->lock);
- list_for_each_entry_safe(buf, next, &port->buf_list, list) {
- list_del(&buf->list);
- kfree(buf);
- }
- port->buffered = 0;
port->flags = 0;
port->break_ctl = 0;
port->overrun = 0;
@@ -1184,7 +1115,7 @@ static int fwtty_write(struct tty_struct *tty, const unsigned char *buf, int c)
int n, len;
fwtty_dbg(port, "%d\n", c);
- profile_size_distrib(port->stats.writes, c);
+ fwtty_profile_data(port->stats.writes, c);
spin_lock_bh(&port->lock);
n = dma_fifo_in(&port->tx_fifo, buf, c);
@@ -1262,9 +1193,7 @@ static void fwtty_unthrottle(struct tty_struct *tty)
fwtty_dbg(port, "CRTSCTS: %d\n", (C_CRTSCTS(tty) != 0));
- profile_fifo_avail(port, port->stats.unthrottle);
-
- schedule_work(&port->push);
+ fwtty_profile_fifo(port, port->stats.unthrottle);
spin_lock_bh(&port->lock);
port->mctrl &= ~OOB_RX_THROTTLE;
@@ -1523,15 +1452,14 @@ static void fwtty_debugfs_show_port(struct seq_file *m, struct fwtty_port *port)
seq_printf(m, " dr:%d st:%d err:%d lost:%d", stats.dropped,
stats.tx_stall, stats.fifo_errs, stats.lost);
- seq_printf(m, " pkts:%d thr:%d wtrmk:%d", stats.sent, stats.throttled,
- stats.watermark);
+ seq_printf(m, " pkts:%d thr:%d", stats.sent, stats.throttled);
if (port->port.console) {
seq_puts(m, "\n ");
(*port->fwcon_ops->proc_show)(m, port->con_data);
}
- dump_profile(m, &port->stats);
+ fwtty_dump_profile(m, &port->stats);
}
static void fwtty_debugfs_show_peer(struct seq_file *m, struct fwtty_peer *peer)
@@ -2297,13 +2225,12 @@ static int fwserial_create(struct fw_unit *unit)
port->index = FWTTY_INVALID_INDEX;
port->port.ops = &fwtty_port_ops;
port->serial = serial;
+ tty_buffer_set_limit(&port->port, 128 * 1024);
spin_lock_init(&port->lock);
INIT_DELAYED_WORK(&port->drain, fwtty_drain_tx);
INIT_DELAYED_WORK(&port->emit_breaks, fwtty_emit_breaks);
INIT_WORK(&port->hangup, fwtty_do_hangup);
- INIT_WORK(&port->push, fwtty_pushrx);
- INIT_LIST_HEAD(&port->buf_list);
init_waitqueue_head(&port->wait_tx);
port->max_payload = link_speed_to_max_payload(SCODE_100);
dma_fifo_init(&port->tx_fifo);
diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h
index 24635014a2ac..54f7f9b9b212 100644
--- a/drivers/staging/fwserial/fwserial.h
+++ b/drivers/staging/fwserial/fwserial.h
@@ -22,14 +22,14 @@
#ifdef FWTTY_PROFILING
#define DISTRIBUTION_MAX_SIZE 8192
#define DISTRIBUTION_MAX_INDEX (ilog2(DISTRIBUTION_MAX_SIZE) + 1)
-static inline void profile_size_distrib(unsigned stat[], unsigned val)
+static inline void fwtty_profile_data(unsigned stat[], unsigned val)
{
int n = (val) ? min(ilog2(val) + 1, DISTRIBUTION_MAX_INDEX) : 0;
++stat[n];
}
#else
#define DISTRIBUTION_MAX_INDEX 0
-#define profile_size_distrib(st, n)
+#define fwtty_profile_data(st, n)
#endif
/* Parameters for both VIRT_CABLE_PLUG & VIRT_CABLE_PLUG_RSP mgmt codes */
@@ -166,7 +166,6 @@ struct stats {
unsigned sent;
unsigned lost;
unsigned throttled;
- unsigned watermark;
unsigned reads[DISTRIBUTION_MAX_INDEX + 1];
unsigned writes[DISTRIBUTION_MAX_INDEX + 1];
unsigned txns[DISTRIBUTION_MAX_INDEX + 1];
@@ -183,12 +182,6 @@ struct fwconsole_ops {
#define FWCON_NOTIFY_ATTACH 1
#define FWCON_NOTIFY_DETACH 2
-struct buffered_rx {
- struct list_head list;
- size_t n;
- unsigned char data[0];
-};
-
/**
* fwtty_port: structure used to track/represent underlying tty_port
* @port: underlying tty_port
@@ -223,11 +216,6 @@ struct buffered_rx {
* The work can race with the writer but concurrent sending is
* prevented with the IN_TX flag. Scheduled under lock to
* limit scheduling when fifo has just been drained.
- * @push: work responsible for pushing buffered rx to the ldisc.
- * rx can become buffered if the tty buffer is filled before the
- * ldisc throttles the sender.
- * @buf_list: list of buffered rx yet to be sent to ldisc
- * @buffered: byte count of buffered rx
* @tx_fifo: fifo used to store & block-up writes for dma to remote
* @max_payload: max bytes transmissable per dma (based on peer's max_payload)
* @status_mask: UART_LSR_* bitmask significant to rx (based on termios)
@@ -267,9 +255,6 @@ struct fwtty_port {
spinlock_t lock;
unsigned mctrl;
struct delayed_work drain;
- struct work_struct push;
- struct list_head buf_list;
- int buffered;
struct dma_fifo tx_fifo;
int max_payload;
unsigned status_mask;
@@ -291,7 +276,6 @@ struct fwtty_port {
/* bit #s for flags field */
#define IN_TX 0
#define STOP_TX 1
-#define BUFFERING_RX 2
/* bitmasks for special mctrl/mstatus bits */
#define OOB_RX_THROTTLE 0x00010000
@@ -307,8 +291,8 @@ struct fwtty_port {
#define FREQ_BREAKS (HZ / 50)
/* Ports are allocated in blocks of num_ports for each fw_card */
-#define MAX_CARD_PORTS 32 /* max # of ports per card */
-#define MAX_TOTAL_PORTS 64 /* max # of ports total */
+#define MAX_CARD_PORTS CONFIG_FWTTY_MAX_CARD_PORTS
+#define MAX_TOTAL_PORTS CONFIG_FWTTY_MAX_TOTAL_PORTS
/* tuning parameters */
#define FWTTY_PORT_TXFIFO_LEN 4096
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index c57a6ba5d010..74a03608b2dd 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -44,18 +44,6 @@
*/
#define DEFAULT_MTU_SIZE 1500
-#define gdm_dev_endian(n) (\
- n->phy_dev->get_endian(n->phy_dev->priv_dev))
-
-#define gdm_lte_hci_send(n, d, l) (\
- n->phy_dev->send_hci_func(n->phy_dev->priv_dev, d, l, NULL, NULL))
-
-#define gdm_lte_sdu_send(n, d, l, c, b, i, t) (\
- n->phy_dev->send_sdu_func(n->phy_dev->priv_dev, d, l, n->pdn_table.dft_eps_id, 0, c, b, i, t))
-
-#define gdm_lte_rcv_with_cb(n, c, b, e) (\
- n->rcv_func(n->priv_dev, c, b, e))
-
#define IP_VERSION_4 4
#define IP_VERSION_6 6
@@ -458,13 +446,11 @@ static int gdm_lte_tx(struct sk_buff *skb, struct net_device *dev)
sscanf(dev->name, "lte%d", &idx);
- ret = gdm_lte_sdu_send(nic,
- data_buf,
- data_len,
- tx_complete,
- nic,
- idx,
- nic_type);
+ ret = nic->phy_dev->send_sdu_func(nic->phy_dev->priv_dev,
+ data_buf, data_len,
+ nic->pdn_table.dft_eps_id, 0,
+ tx_complete, nic, idx,
+ nic_type);
if (ret == TX_NO_BUFFER || ret == TX_NO_SPC) {
netif_stop_queue(dev);
@@ -503,14 +489,18 @@ static int gdm_lte_event_send(struct net_device *dev, char *buf, int len)
sscanf(dev->name, "lte%d", &idx);
return netlink_send(lte_event.sock, idx, 0, buf,
- gdm_dev16_to_cpu(gdm_dev_endian(nic), hci->len) + HCI_HEADER_SIZE);
+ gdm_dev16_to_cpu(
+ nic->phy_dev->get_endian(
+ nic->phy_dev->priv_dev), hci->len)
+ + HCI_HEADER_SIZE);
}
static void gdm_lte_event_rcv(struct net_device *dev, u16 type, void *msg, int len)
{
struct nic *nic = netdev_priv(dev);
- gdm_lte_hci_send(nic, msg, len);
+ nic->phy_dev->send_hci_func(nic->phy_dev->priv_dev, msg, len, NULL,
+ NULL);
}
int gdm_lte_event_init(void)
@@ -688,8 +678,14 @@ static void gdm_lte_pdn_table(struct net_device *dev, char *buf, int len)
if (pdn_table->activate) {
nic->pdn_table.activate = pdn_table->activate;
- nic->pdn_table.dft_eps_id = gdm_dev32_to_cpu(gdm_dev_endian(nic), pdn_table->dft_eps_id);
- nic->pdn_table.nic_type = gdm_dev32_to_cpu(gdm_dev_endian(nic), pdn_table->nic_type);
+ nic->pdn_table.dft_eps_id = gdm_dev32_to_cpu(
+ nic->phy_dev->get_endian(
+ nic->phy_dev->priv_dev),
+ pdn_table->dft_eps_id);
+ nic->pdn_table.nic_type = gdm_dev32_to_cpu(
+ nic->phy_dev->get_endian(
+ nic->phy_dev->priv_dev),
+ pdn_table->nic_type);
netdev_info(dev, "pdn activated, nic_type=0x%x\n",
nic->pdn_table.nic_type);
@@ -762,7 +758,7 @@ void start_rx_proc(struct phy_dev *phy_dev)
int i;
for (i = 0; i < MAX_RX_SUBMIT_COUNT; i++)
- gdm_lte_rcv_with_cb(phy_dev, rx_complete, phy_dev, USB_COMPLETE);
+ phy_dev->rcv_func(phy_dev->priv_dev, rx_complete, phy_dev, USB_COMPLETE);
}
static struct net_device_ops gdm_netdev_ops = {
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index 62163673976c..2fa3a5a6580f 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -158,7 +158,6 @@ static int up_to_host(struct mux_rx *r)
unsigned int start_flag;
unsigned int payload_size;
unsigned short packet_type;
- int remain;
int dummy_cnt;
u32 packet_size_sum = r->offset;
int index;
@@ -176,8 +175,7 @@ static int up_to_host(struct mux_rx *r)
break;
}
- remain = (MUX_HEADER_SIZE + payload_size) % 4;
- dummy_cnt = remain ? (4-remain) : 0;
+ dummy_cnt = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
if (len - packet_size_sum <
MUX_HEADER_SIZE + payload_size + dummy_cnt) {
@@ -361,7 +359,6 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
struct mux_pkt_header *mux_header;
struct mux_tx *t = NULL;
static u32 seq_num = 1;
- int remain;
int dummy_cnt;
int total_len;
int ret;
@@ -375,8 +372,7 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
spin_lock_irqsave(&mux_dev->write_lock, flags);
- remain = (MUX_HEADER_SIZE + len) % 4;
- dummy_cnt = remain ? (4 - remain) : 0;
+ dummy_cnt = ALIGN(MUX_HEADER_SIZE + len, 4);
total_len = len + MUX_HEADER_SIZE + dummy_cnt;
diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
index c0f7cd75116b..fe47cd3eb2ed 100644
--- a/drivers/staging/gdm724x/gdm_tty.c
+++ b/drivers/staging/gdm724x/gdm_tty.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
index 781134af69d1..33458a583142 100644
--- a/drivers/staging/gdm724x/gdm_usb.c
+++ b/drivers/staging/gdm724x/gdm_usb.c
@@ -830,24 +830,19 @@ static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id
if (bInterfaceNumber > NETWORK_INTERFACE) {
pr_info("not a network device\n");
- return -1;
+ return -ENODEV;
}
- phy_dev = kmalloc(sizeof(struct phy_dev), GFP_ATOMIC);
- if (!phy_dev) {
- ret = -ENOMEM;
- goto out;
- }
+ phy_dev = kzalloc(sizeof(struct phy_dev), GFP_KERNEL);
+ if (!phy_dev)
+ return -ENOMEM;
- udev = kmalloc(sizeof(struct lte_udev), GFP_ATOMIC);
+ udev = kzalloc(sizeof(struct lte_udev), GFP_KERNEL);
if (!udev) {
ret = -ENOMEM;
- goto out;
+ goto err_udev;
}
- memset(phy_dev, 0, sizeof(struct phy_dev));
- memset(udev, 0, sizeof(struct lte_udev));
-
phy_dev->priv_dev = (void *)udev;
phy_dev->send_hci_func = gdm_usb_hci_send;
phy_dev->send_sdu_func = gdm_usb_sdu_send;
@@ -858,7 +853,7 @@ static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id
ret = init_usb(udev);
if (ret < 0) {
pr_err("init_usb func failed\n");
- goto out;
+ goto err_init_usb;
}
udev->intf = intf;
@@ -875,23 +870,22 @@ static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id
ret = request_mac_address(udev);
if (ret < 0) {
pr_err("request Mac address failed\n");
- goto out;
+ goto err_mac_address;
}
start_rx_proc(phy_dev);
-out:
-
- if (ret < 0) {
- kfree(phy_dev);
- if (udev) {
- release_usb(udev);
- kfree(udev);
- }
- }
-
usb_get_dev(usbdev);
usb_set_intfdata(intf, phy_dev);
+ return 0;
+
+err_mac_address:
+ release_usb(udev);
+err_init_usb:
+ kfree(udev);
+err_udev:
+ kfree(phy_dev);
+
return ret;
}
diff --git a/drivers/staging/gdm72xx/gdm_qos.c b/drivers/staging/gdm72xx/gdm_qos.c
index cc3692439a5c..50d43ada0936 100644
--- a/drivers/staging/gdm72xx/gdm_qos.c
+++ b/drivers/staging/gdm72xx/gdm_qos.c
@@ -97,7 +97,7 @@ void gdm_qos_init(void *nic_ptr)
struct qos_cb_s *qcb = &nic->qos;
int i;
- for (i = 0 ; i < QOS_MAX; i++) {
+ for (i = 0; i < QOS_MAX; i++) {
INIT_LIST_HEAD(&qcb->qos_list[i]);
qcb->csr[i].qos_buf_count = 0;
qcb->csr[i].enabled = 0;
diff --git a/drivers/staging/gdm72xx/gdm_usb.c b/drivers/staging/gdm72xx/gdm_usb.c
index e0cb2ffb41be..cdeffe75496b 100644
--- a/drivers/staging/gdm72xx/gdm_usb.c
+++ b/drivers/staging/gdm72xx/gdm_usb.c
@@ -635,11 +635,14 @@ static int gdm_usb_probe(struct usb_interface *intf,
#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */
ret = register_wimax_device(phy_dev, &intf->dev);
+ if (ret)
+ release_usb(udev);
out:
if (ret) {
kfree(phy_dev);
kfree(udev);
+ usb_put_dev(usbdev);
} else {
usb_set_intfdata(intf, phy_dev);
}
@@ -780,9 +783,10 @@ static int k_mode_thread(void *arg)
spin_lock_irqsave(&k_lock, flags2);
}
+ wait_event_interruptible_lock_irq(k_wait,
+ !list_empty(&k_list) || k_mode_stop,
+ k_lock);
spin_unlock_irqrestore(&k_lock, flags2);
-
- interruptible_sleep_on(&k_wait);
}
return 0;
}
diff --git a/drivers/staging/gdm72xx/sdio_boot.c b/drivers/staging/gdm72xx/sdio_boot.c
index 4302fcbdfdc3..cbe5dcfc2ac9 100644
--- a/drivers/staging/gdm72xx/sdio_boot.c
+++ b/drivers/staging/gdm72xx/sdio_boot.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
diff --git a/drivers/staging/goldfish/goldfish_nand.c b/drivers/staging/goldfish/goldfish_nand.c
index 81e2ad4038fe..eca0873098cd 100644
--- a/drivers/staging/goldfish/goldfish_nand.c
+++ b/drivers/staging/goldfish/goldfish_nand.c
@@ -22,7 +22,6 @@
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/vmalloc.h>
-#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/platform_device.h>
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h
index 35154d60faf6..c9fedb79e3a2 100644
--- a/drivers/staging/iio/Documentation/iio_utils.h
+++ b/drivers/staging/iio/Documentation/iio_utils.h
@@ -77,7 +77,6 @@ struct iio_channel_info {
uint64_t mask;
unsigned be;
unsigned is_signed;
- unsigned enabled;
unsigned location;
};
@@ -335,6 +334,7 @@ inline int build_channel_array(const char *device_dir,
while (ent = readdir(dp), ent != NULL) {
if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"),
"_en") == 0) {
+ int current_enabled = 0;
current = &(*ci_array)[count++];
ret = asprintf(&filename,
"%s/%s", scan_el_dir, ent->d_name);
@@ -350,10 +350,10 @@ inline int build_channel_array(const char *device_dir,
ret = -errno;
goto error_cleanup_array;
}
- fscanf(sysfsfp, "%u", &current->enabled);
+ fscanf(sysfsfp, "%u", &current_enabled);
fclose(sysfsfp);
- if (!current->enabled) {
+ if (!current_enabled) {
free(filename);
count--;
continue;
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
index 4c9364b63c77..6f38ca95f9bb 100644
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ b/drivers/staging/iio/accel/adis16220_core.c
@@ -439,13 +439,13 @@ static int adis16220_probe(struct spi_device *spi)
indio_dev->channels = adis16220_channels;
indio_dev->num_channels = ARRAY_SIZE(adis16220_channels);
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret)
return ret;
ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &accel_bin);
if (ret)
- goto error_unregister_dev;
+ return ret;
ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &adc1_bin);
if (ret)
@@ -470,8 +470,6 @@ error_rm_adc1_bin:
sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
error_rm_accel_bin:
sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
-error_unregister_dev:
- iio_device_unregister(indio_dev);
return ret;
}
@@ -482,7 +480,6 @@ static int adis16220_remove(struct spi_device *spi)
sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc2_bin);
sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
- iio_device_unregister(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index 735c0a34fa93..898653c09279 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -676,10 +676,10 @@ static const struct attribute_group lis3l02dq_attribute_group = {
static const struct iio_info lis3l02dq_info = {
.read_raw = &lis3l02dq_read_raw,
.write_raw = &lis3l02dq_write_raw,
- .read_event_value_new = &lis3l02dq_read_thresh,
- .write_event_value_new = &lis3l02dq_write_thresh,
- .write_event_config_new = &lis3l02dq_write_event_config,
- .read_event_config_new = &lis3l02dq_read_event_config,
+ .read_event_value = &lis3l02dq_read_thresh,
+ .write_event_value = &lis3l02dq_write_thresh,
+ .write_event_config = &lis3l02dq_write_event_config,
+ .read_event_config = &lis3l02dq_read_event_config,
.driver_module = THIS_MODULE,
.attrs = &lis3l02dq_attribute_group,
};
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index c49e6ef9d05f..7f6ccdfaf168 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -1126,20 +1126,20 @@ static const struct iio_info sca3000_info = {
.attrs = &sca3000_attribute_group,
.read_raw = &sca3000_read_raw,
.event_attrs = &sca3000_event_attribute_group,
- .read_event_value_new = &sca3000_read_thresh,
- .write_event_value_new = &sca3000_write_thresh,
- .read_event_config_new = &sca3000_read_event_config,
- .write_event_config_new = &sca3000_write_event_config,
+ .read_event_value = &sca3000_read_thresh,
+ .write_event_value = &sca3000_write_thresh,
+ .read_event_config = &sca3000_read_event_config,
+ .write_event_config = &sca3000_write_event_config,
.driver_module = THIS_MODULE,
};
static const struct iio_info sca3000_info_with_temp = {
.attrs = &sca3000_attribute_group_with_temp,
.read_raw = &sca3000_read_raw,
- .read_event_value_new = &sca3000_read_thresh,
- .write_event_value_new = &sca3000_write_thresh,
- .read_event_config_new = &sca3000_read_event_config,
- .write_event_config_new = &sca3000_write_event_config,
+ .read_event_value = &sca3000_read_thresh,
+ .write_event_value = &sca3000_write_thresh,
+ .read_event_config = &sca3000_read_event_config,
+ .write_event_config = &sca3000_write_event_config,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index e3d643001952..363329808a4f 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -103,6 +103,7 @@ config AD7280
config LPC32XX_ADC
tristate "NXP LPC32XX ADC"
depends on ARCH_LPC32XX || COMPILE_TEST
+ depends on HAS_IOMEM
help
Say yes here to build support for the integrated ADC inside the
LPC32XX SoC. Note that this feature uses the same hardware as the
@@ -128,6 +129,7 @@ config MXS_LRADC
config SPEAR_ADC
tristate "ST SPEAr ADC"
depends on PLAT_SPEAR || COMPILE_TEST
+ depends on HAS_IOMEM
help
Say yes here to build support for the integrated ADC inside the
ST SPEAr SoC. Provides direct access via sysfs.
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index 8209fa542a8a..1ac11f64827c 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -134,6 +134,8 @@ struct ad7280_state {
unsigned char aux_threshhigh;
unsigned char aux_threshlow;
unsigned char cb_mask[AD7280A_MAX_CHAIN];
+
+ __be32 buf[2] ____cacheline_aligned;
};
static void ad7280_crc8_build_table(unsigned char *crc_tab)
@@ -189,22 +191,22 @@ static void ad7280_delay(struct ad7280_state *st)
msleep(1);
}
-static int __ad7280_read32(struct spi_device *spi, unsigned *val)
+static int __ad7280_read32(struct ad7280_state *st, unsigned *val)
{
- unsigned rx_buf, tx_buf = cpu_to_be32(AD7280A_READ_TXVAL);
int ret;
-
struct spi_transfer t = {
- .tx_buf = &tx_buf,
- .rx_buf = &rx_buf,
+ .tx_buf = &st->buf[0],
+ .rx_buf = &st->buf[1],
.len = 4,
};
- ret = spi_sync_transfer(spi, &t, 1);
+ st->buf[0] = cpu_to_be32(AD7280A_READ_TXVAL);
+
+ ret = spi_sync_transfer(st->spi, &t, 1);
if (ret)
return ret;
- *val = be32_to_cpu(rx_buf);
+ *val = be32_to_cpu(st->buf[1]);
return 0;
}
@@ -216,9 +218,9 @@ static int ad7280_write(struct ad7280_state *st, unsigned devaddr,
(val & 0xFF) << 13 | all << 12);
reg |= ad7280_calc_crc8(st->crc_tab, reg >> 11) << 3 | 0x2;
- reg = cpu_to_be32(reg);
+ st->buf[0] = cpu_to_be32(reg);
- return spi_write(st->spi, &reg, 4);
+ return spi_write(st->spi, &st->buf[0], 4);
}
static int ad7280_read(struct ad7280_state *st, unsigned devaddr,
@@ -248,7 +250,7 @@ static int ad7280_read(struct ad7280_state *st, unsigned devaddr,
if (ret)
return ret;
- __ad7280_read32(st->spi, &tmp);
+ __ad7280_read32(st, &tmp);
if (ad7280_check_crc(st, tmp))
return -EIO;
@@ -286,7 +288,7 @@ static int ad7280_read_channel(struct ad7280_state *st, unsigned devaddr,
ad7280_delay(st);
- __ad7280_read32(st->spi, &tmp);
+ __ad7280_read32(st, &tmp);
if (ad7280_check_crc(st, tmp))
return -EIO;
@@ -319,7 +321,7 @@ static int ad7280_read_all_channels(struct ad7280_state *st, unsigned cnt,
ad7280_delay(st);
for (i = 0; i < cnt; i++) {
- __ad7280_read32(st->spi, &tmp);
+ __ad7280_read32(st, &tmp);
if (ad7280_check_crc(st, tmp))
return -EIO;
@@ -362,7 +364,7 @@ static int ad7280_chain_setup(struct ad7280_state *st)
return ret;
for (n = 0; n <= AD7280A_MAX_CHAIN; n++) {
- __ad7280_read32(st->spi, &val);
+ __ad7280_read32(st, &val);
if (val == 0)
return n - 1;
diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c
index d13f8aeeb62f..357cef2a6f4c 100644
--- a/drivers/staging/iio/adc/ad7291.c
+++ b/drivers/staging/iio/adc/ad7291.c
@@ -452,10 +452,10 @@ static const struct iio_chan_spec ad7291_channels[] = {
static const struct iio_info ad7291_info = {
.read_raw = &ad7291_read_raw,
- .read_event_config_new = &ad7291_read_event_config,
- .write_event_config_new = &ad7291_write_event_config,
- .read_event_value_new = &ad7291_read_event_value,
- .write_event_value_new = &ad7291_write_event_value,
+ .read_event_config = &ad7291_read_event_config,
+ .write_event_config = &ad7291_write_event_config,
+ .read_event_value = &ad7291_read_event_value,
+ .write_event_value = &ad7291_write_event_value,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index 2083673a79ca..f0f05f195d2c 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -239,7 +239,12 @@ static const struct attribute_group ad7606_attribute_group_range = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
.scan_index = num, \
- .scan_type = IIO_ST('s', 16, 16, 0), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
}
static const struct iio_chan_spec ad7606_8_channels[] = {
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 9f48e5c74eed..2369cf28412e 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -412,7 +412,7 @@ static int ad7816_probe(struct spi_device *spi_dev)
return ret;
}
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(&spi_dev->dev, indio_dev);
if (ret)
return ret;
@@ -422,15 +422,6 @@ static int ad7816_probe(struct spi_device *spi_dev)
return 0;
}
-static int ad7816_remove(struct spi_device *spi_dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(&spi_dev->dev);
-
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-
static const struct spi_device_id ad7816_id[] = {
{ "ad7816", 0 },
{ "ad7817", 0 },
@@ -446,7 +437,6 @@ static struct spi_driver ad7816_driver = {
.owner = THIS_MODULE,
},
.probe = ad7816_probe,
- .remove = ad7816_remove,
.id_table = ad7816_id,
};
module_spi_driver(ad7816_driver);
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index 9428be82b655..5708ffc62aec 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -377,9 +377,9 @@ static const struct iio_info ad7991_info = {
static const struct iio_info ad7993_4_7_8_info = {
.read_raw = &ad799x_read_raw,
.event_attrs = &ad799x_event_attrs_group,
- .read_event_config_new = &ad799x_read_event_config,
- .read_event_value_new = &ad799x_read_event_value,
- .write_event_value_new = &ad799x_write_event_value,
+ .read_event_config = &ad799x_read_event_config,
+ .read_event_value = &ad799x_read_event_value,
+ .write_event_value = &ad799x_write_event_value,
.driver_module = THIS_MODULE,
.update_scan_mode = ad7997_8_update_scan_mode,
};
@@ -393,7 +393,7 @@ static const struct iio_event_spec ad799x_events[] = {
}, {
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_FALLING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE),
}, {
.type = IIO_EV_TYPE_THRESH,
@@ -409,7 +409,13 @@ static const struct iio_event_spec ad799x_events[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.scan_index = (_index), \
- .scan_type = IIO_ST('u', _realbits, 16, 12 - (_realbits)), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_realbits), \
+ .storagebits = 16, \
+ .shift = 12 - (_realbits), \
+ .endianness = IIO_BE, \
+ }, \
.event_spec = _ev_spec, \
.num_event_specs = _num_ev_spec, \
}
@@ -588,7 +594,8 @@ static int ad799x_probe(struct i2c_client *client,
return 0;
error_free_irq:
- free_irq(client->irq, indio_dev);
+ if (client->irq > 0)
+ free_irq(client->irq, indio_dev);
error_cleanup_ring:
ad799x_ring_cleanup(indio_dev);
error_disable_reg:
diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c
index ef0a21d8ce15..a876ce755351 100644
--- a/drivers/staging/iio/adc/lpc32xx_adc.c
+++ b/drivers/staging/iio/adc/lpc32xx_adc.c
@@ -183,7 +183,7 @@ static int lpc32xx_adc_probe(struct platform_device *pdev)
iodev->channels = lpc32xx_adc_iio_channels;
iodev->num_channels = ARRAY_SIZE(lpc32xx_adc_iio_channels);
- retval = iio_device_register(iodev);
+ retval = devm_iio_device_register(&pdev->dev, iodev);
if (retval)
return retval;
@@ -192,15 +192,6 @@ static int lpc32xx_adc_probe(struct platform_device *pdev)
return 0;
}
-static int lpc32xx_adc_remove(struct platform_device *pdev)
-{
- struct iio_dev *iodev = platform_get_drvdata(pdev);
-
- iio_device_unregister(iodev);
-
- return 0;
-}
-
#ifdef CONFIG_OF
static const struct of_device_id lpc32xx_adc_match[] = {
{ .compatible = "nxp,lpc3220-adc" },
@@ -211,7 +202,6 @@ MODULE_DEVICE_TABLE(of, lpc32xx_adc_match);
static struct platform_driver lpc32xx_adc_driver = {
.probe = lpc32xx_adc_probe,
- .remove = lpc32xx_adc_remove,
.driver = {
.name = MOD_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index e2dd7830b320..7fc66a6a6e36 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -38,6 +38,7 @@
#include <linux/clk.h>
#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
@@ -111,16 +112,59 @@ static const char * const mx28_lradc_irq_names[] = {
struct mxs_lradc_of_config {
const int irq_count;
const char * const *irq_name;
+ const uint32_t *vref_mv;
+};
+
+#define VREF_MV_BASE 1850
+
+static const uint32_t mx23_vref_mv[LRADC_MAX_TOTAL_CHANS] = {
+ VREF_MV_BASE, /* CH0 */
+ VREF_MV_BASE, /* CH1 */
+ VREF_MV_BASE, /* CH2 */
+ VREF_MV_BASE, /* CH3 */
+ VREF_MV_BASE, /* CH4 */
+ VREF_MV_BASE, /* CH5 */
+ VREF_MV_BASE * 2, /* CH6 VDDIO */
+ VREF_MV_BASE * 4, /* CH7 VBATT */
+ VREF_MV_BASE, /* CH8 Temp sense 0 */
+ VREF_MV_BASE, /* CH9 Temp sense 1 */
+ VREF_MV_BASE, /* CH10 */
+ VREF_MV_BASE, /* CH11 */
+ VREF_MV_BASE, /* CH12 USB_DP */
+ VREF_MV_BASE, /* CH13 USB_DN */
+ VREF_MV_BASE, /* CH14 VBG */
+ VREF_MV_BASE * 4, /* CH15 VDD5V */
+};
+
+static const uint32_t mx28_vref_mv[LRADC_MAX_TOTAL_CHANS] = {
+ VREF_MV_BASE, /* CH0 */
+ VREF_MV_BASE, /* CH1 */
+ VREF_MV_BASE, /* CH2 */
+ VREF_MV_BASE, /* CH3 */
+ VREF_MV_BASE, /* CH4 */
+ VREF_MV_BASE, /* CH5 */
+ VREF_MV_BASE, /* CH6 */
+ VREF_MV_BASE * 4, /* CH7 VBATT */
+ VREF_MV_BASE, /* CH8 Temp sense 0 */
+ VREF_MV_BASE, /* CH9 Temp sense 1 */
+ VREF_MV_BASE * 2, /* CH10 VDDIO */
+ VREF_MV_BASE, /* CH11 VTH */
+ VREF_MV_BASE * 2, /* CH12 VDDA */
+ VREF_MV_BASE, /* CH13 VDDD */
+ VREF_MV_BASE, /* CH14 VBG */
+ VREF_MV_BASE * 4, /* CH15 VDD5V */
};
static const struct mxs_lradc_of_config mxs_lradc_of_config[] = {
[IMX23_LRADC] = {
.irq_count = ARRAY_SIZE(mx23_lradc_irq_names),
.irq_name = mx23_lradc_irq_names,
+ .vref_mv = mx23_vref_mv,
},
[IMX28_LRADC] = {
.irq_count = ARRAY_SIZE(mx28_lradc_irq_names),
.irq_name = mx28_lradc_irq_names,
+ .vref_mv = mx28_vref_mv,
},
};
@@ -141,6 +185,16 @@ enum lradc_ts_plate {
LRADC_SAMPLE_VALID,
};
+enum mxs_lradc_divbytwo {
+ MXS_LRADC_DIV_DISABLED = 0,
+ MXS_LRADC_DIV_ENABLED,
+};
+
+struct mxs_lradc_scale {
+ unsigned int integer;
+ unsigned int nano;
+};
+
struct mxs_lradc {
struct device *dev;
void __iomem *base;
@@ -155,6 +209,10 @@ struct mxs_lradc {
struct completion completion;
+ const uint32_t *vref_mv;
+ struct mxs_lradc_scale scale_avail[LRADC_MAX_TOTAL_CHANS][2];
+ unsigned long is_divided;
+
/*
* Touchscreen LRADC channels receives a private slot in the CTRL4
* register, the slot #7. Therefore only 7 slots instead of 8 in the
@@ -243,6 +301,7 @@ struct mxs_lradc {
#define LRADC_CTRL1_LRADC_IRQ_OFFSET 0
#define LRADC_CTRL2 0x20
+#define LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET 24
#define LRADC_CTRL2_TEMPSENSE_PWD (1 << 15)
#define LRADC_STATUS 0x40
@@ -759,20 +818,11 @@ static void mxs_lradc_handle_touch(struct mxs_lradc *lradc)
/*
* Raw I/O operations
*/
-static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
- const struct iio_chan_spec *chan,
- int *val, int *val2, long m)
+static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
{
struct mxs_lradc *lradc = iio_priv(iio_dev);
int ret;
- if (m != IIO_CHAN_INFO_RAW)
- return -EINVAL;
-
- /* Check for invalid channel */
- if (chan->channel > LRADC_MAX_TOTAL_CHANS)
- return -EINVAL;
-
/*
* See if there is no buffered operation in progess. If there is, simply
* bail out. This can be improved to support both buffered and raw IO at
@@ -797,7 +847,7 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
/* Clean the slot's previous content, then set new one. */
mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0), LRADC_CTRL4);
- mxs_lradc_reg_set(lradc, chan->channel, LRADC_CTRL4);
+ mxs_lradc_reg_set(lradc, chan, LRADC_CTRL4);
mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(0));
@@ -824,9 +874,202 @@ err:
return ret;
}
+static int mxs_lradc_read_temp(struct iio_dev *iio_dev, int *val)
+{
+ int ret, min, max;
+
+ ret = mxs_lradc_read_single(iio_dev, 8, &min);
+ if (ret != IIO_VAL_INT)
+ return ret;
+
+ ret = mxs_lradc_read_single(iio_dev, 9, &max);
+ if (ret != IIO_VAL_INT)
+ return ret;
+
+ *val = max - min;
+
+ return IIO_VAL_INT;
+}
+
+static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long m)
+{
+ struct mxs_lradc *lradc = iio_priv(iio_dev);
+
+ /* Check for invalid channel */
+ if (chan->channel > LRADC_MAX_TOTAL_CHANS)
+ return -EINVAL;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_TEMP)
+ return mxs_lradc_read_temp(iio_dev, val);
+
+ return mxs_lradc_read_single(iio_dev, chan->channel, val);
+
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_TEMP) {
+ /* From the datasheet, we have to multiply by 1.012 and
+ * divide by 4
+ */
+ *val = 0;
+ *val2 = 253000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ *val = lradc->vref_mv[chan->channel];
+ *val2 = chan->scan_type.realbits -
+ test_bit(chan->channel, &lradc->is_divided);
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_CHAN_INFO_OFFSET:
+ if (chan->type == IIO_TEMP) {
+ /* The calculated value from the ADC is in Kelvin, we
+ * want Celsius for hwmon so the offset is
+ * -272.15 * scale
+ */
+ *val = -1075;
+ *val2 = 691699;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return -EINVAL;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int mxs_lradc_write_raw(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long m)
+{
+ struct mxs_lradc *lradc = iio_priv(iio_dev);
+ struct mxs_lradc_scale *scale_avail =
+ lradc->scale_avail[chan->channel];
+ int ret;
+
+ ret = mutex_trylock(&lradc->lock);
+ if (!ret)
+ return -EBUSY;
+
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+ ret = -EINVAL;
+ if (val == scale_avail[MXS_LRADC_DIV_DISABLED].integer &&
+ val2 == scale_avail[MXS_LRADC_DIV_DISABLED].nano) {
+ /* divider by two disabled */
+ writel(1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
+ lradc->base + LRADC_CTRL2 + STMP_OFFSET_REG_CLR);
+ clear_bit(chan->channel, &lradc->is_divided);
+ ret = 0;
+ } else if (val == scale_avail[MXS_LRADC_DIV_ENABLED].integer &&
+ val2 == scale_avail[MXS_LRADC_DIV_ENABLED].nano) {
+ /* divider by two enabled */
+ writel(1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
+ lradc->base + LRADC_CTRL2 + STMP_OFFSET_REG_SET);
+ set_bit(chan->channel, &lradc->is_divided);
+ ret = 0;
+ }
+
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&lradc->lock);
+
+ return ret;
+}
+
+static int mxs_lradc_write_raw_get_fmt(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ long m)
+{
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static ssize_t mxs_lradc_show_scale_available_ch(struct device *dev,
+ struct device_attribute *attr,
+ char *buf,
+ int ch)
+{
+ struct iio_dev *iio = dev_to_iio_dev(dev);
+ struct mxs_lradc *lradc = iio_priv(iio);
+ int i, len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(lradc->scale_avail[ch]); i++)
+ len += sprintf(buf + len, "%d.%09u ",
+ lradc->scale_avail[ch][i].integer,
+ lradc->scale_avail[ch][i].nano);
+
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+static ssize_t mxs_lradc_show_scale_available(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev_attr *iio_attr = to_iio_dev_attr(attr);
+
+ return mxs_lradc_show_scale_available_ch(dev, attr, buf,
+ iio_attr->address);
+}
+
+#define SHOW_SCALE_AVAILABLE_ATTR(ch) \
+static IIO_DEVICE_ATTR(in_voltage##ch##_scale_available, S_IRUGO, \
+ mxs_lradc_show_scale_available, NULL, ch)
+
+SHOW_SCALE_AVAILABLE_ATTR(0);
+SHOW_SCALE_AVAILABLE_ATTR(1);
+SHOW_SCALE_AVAILABLE_ATTR(2);
+SHOW_SCALE_AVAILABLE_ATTR(3);
+SHOW_SCALE_AVAILABLE_ATTR(4);
+SHOW_SCALE_AVAILABLE_ATTR(5);
+SHOW_SCALE_AVAILABLE_ATTR(6);
+SHOW_SCALE_AVAILABLE_ATTR(7);
+SHOW_SCALE_AVAILABLE_ATTR(10);
+SHOW_SCALE_AVAILABLE_ATTR(11);
+SHOW_SCALE_AVAILABLE_ATTR(12);
+SHOW_SCALE_AVAILABLE_ATTR(13);
+SHOW_SCALE_AVAILABLE_ATTR(14);
+SHOW_SCALE_AVAILABLE_ATTR(15);
+
+static struct attribute *mxs_lradc_attributes[] = {
+ &iio_dev_attr_in_voltage0_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage1_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage2_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage3_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage4_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage5_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage6_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage7_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage10_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage11_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage12_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage13_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage14_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage15_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group mxs_lradc_attribute_group = {
+ .attrs = mxs_lradc_attributes,
+};
+
static const struct iio_info mxs_lradc_iio_info = {
.driver_module = THIS_MODULE,
.read_raw = mxs_lradc_read_raw,
+ .write_raw = mxs_lradc_write_raw,
+ .write_raw_get_fmt = mxs_lradc_write_raw_get_fmt,
+ .attrs = &mxs_lradc_attribute_group,
};
static int mxs_lradc_ts_open(struct input_dev *dev)
@@ -1133,8 +1376,10 @@ static const struct iio_buffer_setup_ops mxs_lradc_buffer_ops = {
.type = (chan_type), \
.indexed = 1, \
.scan_index = (idx), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
.channel = (idx), \
+ .address = (idx), \
.scan_type = { \
.sign = 'u', \
.realbits = LRADC_RESOLUTION, \
@@ -1151,8 +1396,17 @@ static const struct iio_chan_spec mxs_lradc_chan_spec[] = {
MXS_ADC_CHAN(5, IIO_VOLTAGE),
MXS_ADC_CHAN(6, IIO_VOLTAGE),
MXS_ADC_CHAN(7, IIO_VOLTAGE), /* VBATT */
- MXS_ADC_CHAN(8, IIO_TEMP), /* Temp sense 0 */
- MXS_ADC_CHAN(9, IIO_TEMP), /* Temp sense 1 */
+ /* Combined Temperature sensors */
+ {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .scan_index = 8,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .channel = 8,
+ .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,},
+ },
MXS_ADC_CHAN(10, IIO_VOLTAGE), /* VDDIO */
MXS_ADC_CHAN(11, IIO_VOLTAGE), /* VTH */
MXS_ADC_CHAN(12, IIO_VOLTAGE), /* VDDA */
@@ -1271,7 +1525,8 @@ static int mxs_lradc_probe(struct platform_device *pdev)
struct iio_dev *iio;
struct resource *iores;
int ret = 0, touch_ret;
- int i;
+ int i, s;
+ unsigned int scale_uv;
/* Allocate the IIO device. */
iio = devm_iio_device_alloc(dev, sizeof(*lradc));
@@ -1316,6 +1571,8 @@ static int mxs_lradc_probe(struct platform_device *pdev)
return ret;
}
+ lradc->vref_mv = of_cfg->vref_mv;
+
platform_set_drvdata(pdev, iio);
init_completion(&lradc->completion);
@@ -1339,6 +1596,26 @@ static int mxs_lradc_probe(struct platform_device *pdev)
if (ret)
goto err_trig;
+ /* Populate available ADC input ranges */
+ for (i = 0; i < LRADC_MAX_TOTAL_CHANS; i++) {
+ for (s = 0; s < ARRAY_SIZE(lradc->scale_avail[i]); s++) {
+ /*
+ * [s=0] = optional divider by two disabled (default)
+ * [s=1] = optional divider by two enabled
+ *
+ * The scale is calculated by doing:
+ * Vref >> (realbits - s)
+ * which multiplies by two on the second component
+ * of the array.
+ */
+ scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >>
+ (LRADC_RESOLUTION - s);
+ lradc->scale_avail[i][s].nano =
+ do_div(scale_uv, 100000000) * 10;
+ lradc->scale_avail[i][s].integer = scale_uv;
+ }
+ }
+
/* Configure the hardware. */
ret = mxs_lradc_hw_init(lradc);
if (ret)
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index 0feea5541d02..75ddd4f801a3 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -108,11 +108,6 @@ static int adt7316_i2c_probe(struct i2c_client *client,
return adt7316_probe(&client->dev, &bus, id->name);
}
-static int adt7316_i2c_remove(struct i2c_client *client)
-{
- return adt7316_remove(&client->dev);
-}
-
static const struct i2c_device_id adt7316_i2c_id[] = {
{ "adt7316", 0 },
{ "adt7317", 0 },
@@ -132,7 +127,6 @@ static struct i2c_driver adt7316_driver = {
.owner = THIS_MODULE,
},
.probe = adt7316_i2c_probe,
- .remove = adt7316_i2c_remove,
.id_table = adt7316_i2c_id,
};
module_i2c_driver(adt7316_driver);
diff --git a/drivers/staging/iio/addac/adt7316-spi.c b/drivers/staging/iio/addac/adt7316-spi.c
index 7f4f0a8245b4..e480abb72e4a 100644
--- a/drivers/staging/iio/addac/adt7316-spi.c
+++ b/drivers/staging/iio/addac/adt7316-spi.c
@@ -116,11 +116,6 @@ static int adt7316_spi_probe(struct spi_device *spi_dev)
return adt7316_probe(&spi_dev->dev, &bus, spi_dev->modalias);
}
-static int adt7316_spi_remove(struct spi_device *spi_dev)
-{
- return adt7316_remove(&spi_dev->dev);
-}
-
static const struct spi_device_id adt7316_spi_id[] = {
{ "adt7316", 0 },
{ "adt7317", 0 },
@@ -140,7 +135,6 @@ static struct spi_driver adt7316_driver = {
.owner = THIS_MODULE,
},
.probe = adt7316_spi_probe,
- .remove = adt7316_spi_remove,
.id_table = adt7316_spi_id,
};
module_spi_driver(adt7316_driver);
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 80266e801d56..16a8201228ff 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -2166,7 +2166,7 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
if (ret)
return -EIO;
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(dev, indio_dev);
if (ret)
return ret;
@@ -2177,16 +2177,6 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
}
EXPORT_SYMBOL(adt7316_probe);
-int adt7316_remove(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
-
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-EXPORT_SYMBOL(adt7316_remove);
-
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
MODULE_DESCRIPTION("Analog Devices ADT7316/7/8 and ADT7516/7/9 digital"
" temperature sensor, ADC and DAC driver");
diff --git a/drivers/staging/iio/addac/adt7316.h b/drivers/staging/iio/addac/adt7316.h
index 4d3efff46ae7..2dbfb499528d 100644
--- a/drivers/staging/iio/addac/adt7316.h
+++ b/drivers/staging/iio/addac/adt7316.h
@@ -31,6 +31,5 @@ extern const struct dev_pm_ops adt7316_pm_ops;
#define ADT7316_PM_OPS NULL
#endif
int adt7316_probe(struct device *dev, struct adt7316_bus *bus, const char *name);
-int adt7316_remove(struct device *dev);
#endif
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index 7e7f9890a642..047af2376300 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -576,10 +576,10 @@ static const struct iio_info ad7150_info = {
.event_attrs = &ad7150_event_attribute_group,
.driver_module = THIS_MODULE,
.read_raw = &ad7150_read_raw,
- .read_event_config_new = &ad7150_read_event_config,
- .write_event_config_new = &ad7150_write_event_config,
- .read_event_value_new = &ad7150_read_event_value,
- .write_event_value_new = &ad7150_write_event_value,
+ .read_event_config = &ad7150_read_event_config,
+ .write_event_config = &ad7150_write_event_config,
+ .read_event_value = &ad7150_read_event_value,
+ .write_event_value = &ad7150_write_event_value,
};
/*
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 862d68d99630..cbb1588d591f 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -105,6 +105,11 @@ struct ad7746_chip_info {
u8 vt_setup;
u8 capdac[2][2];
s8 capdac_set;
+
+ union {
+ __be32 d32;
+ u8 d8[4];
+ } data ____cacheline_aligned;
};
enum ad7746_chan {
@@ -566,11 +571,6 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
int ret, delay;
u8 regval, reg;
- union {
- u32 d32;
- u8 d8[4];
- } data;
-
mutex_lock(&indio_dev->mlock);
switch (mask) {
@@ -591,12 +591,12 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
/* Now read the actual register */
ret = i2c_smbus_read_i2c_block_data(chip->client,
- chan->address >> 8, 3, &data.d8[1]);
+ chan->address >> 8, 3, &chip->data.d8[1]);
if (ret < 0)
goto out;
- *val = (be32_to_cpu(data.d32) & 0xFFFFFF) - 0x800000;
+ *val = (be32_to_cpu(chip->data.d32) & 0xFFFFFF) - 0x800000;
switch (chan->type) {
case IIO_TEMP:
diff --git a/drivers/staging/iio/frequency/ad9832.h b/drivers/staging/iio/frequency/ad9832.h
index c5b701f8aabb..386f4dc8c9a1 100644
--- a/drivers/staging/iio/frequency/ad9832.h
+++ b/drivers/staging/iio/frequency/ad9832.h
@@ -92,9 +92,9 @@ struct ad9832_state {
* transfer buffers to live in their own cache lines.
*/
union {
- unsigned short freq_data[4]____cacheline_aligned;
- unsigned short phase_data[2];
- unsigned short data;
+ __be16 freq_data[4]____cacheline_aligned;
+ __be16 phase_data[2];
+ __be16 data;
};
};
diff --git a/drivers/staging/iio/frequency/ad9834.h b/drivers/staging/iio/frequency/ad9834.h
index ed5ed8d0007f..8ca6e52bae6b 100644
--- a/drivers/staging/iio/frequency/ad9834.h
+++ b/drivers/staging/iio/frequency/ad9834.h
@@ -65,8 +65,8 @@ struct ad9834_state {
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
*/
- unsigned short data ____cacheline_aligned;
- unsigned short freq_data[2] ;
+ __be16 data ____cacheline_aligned;
+ __be16 freq_data[2];
};
diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c
index 6d3d771154f3..d5d395c2e3e4 100644
--- a/drivers/staging/iio/gyro/adis16060_core.c
+++ b/drivers/staging/iio/gyro/adis16060_core.c
@@ -167,7 +167,7 @@ static int adis16060_r_probe(struct spi_device *spi)
indio_dev->channels = adis16060_channels;
indio_dev->num_channels = ARRAY_SIZE(adis16060_channels);
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret)
return ret;
@@ -175,13 +175,6 @@ static int adis16060_r_probe(struct spi_device *spi)
return 0;
}
-/* fixme, confirm ordering in this function */
-static int adis16060_r_remove(struct spi_device *spi)
-{
- iio_device_unregister(spi_get_drvdata(spi));
- return 0;
-}
-
static int adis16060_w_probe(struct spi_device *spi)
{
int ret;
@@ -211,7 +204,6 @@ static struct spi_driver adis16060_r_driver = {
.owner = THIS_MODULE,
},
.probe = adis16060_r_probe,
- .remove = adis16060_r_remove,
};
static struct spi_driver adis16060_w_driver = {
diff --git a/drivers/staging/iio/iio_simple_dummy.c b/drivers/staging/iio/iio_simple_dummy.c
index 1fac9894b18c..fd334a03a49a 100644
--- a/drivers/staging/iio/iio_simple_dummy.c
+++ b/drivers/staging/iio/iio_simple_dummy.c
@@ -370,10 +370,10 @@ static const struct iio_info iio_dummy_info = {
.read_raw = &iio_dummy_read_raw,
.write_raw = &iio_dummy_write_raw,
#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
- .read_event_config_new = &iio_simple_dummy_read_event_config,
- .write_event_config_new = &iio_simple_dummy_write_event_config,
- .read_event_value_new = &iio_simple_dummy_read_event_value,
- .write_event_value_new = &iio_simple_dummy_write_event_value,
+ .read_event_config = &iio_simple_dummy_read_event_config,
+ .write_event_config = &iio_simple_dummy_write_event_config,
+ .read_event_value = &iio_simple_dummy_read_event_value,
+ .write_event_value = &iio_simple_dummy_write_event_value,
#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
};
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 0a4298b744e6..2b96665da8a2 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -629,7 +629,7 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
struct iio_buffer *buffer;
buffer = iio_kfifo_allocate(indio_dev);
- if (buffer)
+ if (!buffer)
return -ENOMEM;
iio_device_attach_buffer(indio_dev, buffer);
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 488e690388c9..3660a43b5f08 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -585,7 +585,7 @@ static int isl29018_probe(struct i2c_client *client,
indio_dev->name = id->name;
indio_dev->dev.parent = &client->dev;
indio_dev->modes = INDIO_DIRECT_MODE;
- err = iio_device_register(indio_dev);
+ err = devm_iio_device_register(&client->dev, indio_dev);
if (err) {
dev_err(&client->dev, "iio registration fails\n");
return err;
@@ -594,16 +594,6 @@ static int isl29018_probe(struct i2c_client *client,
return 0;
}
-static int isl29018_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- dev_dbg(&client->dev, "%s()\n", __func__);
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int isl29018_suspend(struct device *dev)
{
@@ -664,7 +654,6 @@ static struct i2c_driver isl29018_driver = {
.of_match_table = isl29018_of_match,
},
.probe = isl29018_probe,
- .remove = isl29018_remove,
.id_table = isl29018_id,
};
module_i2c_driver(isl29018_driver);
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index 18805029d2a9..1e538086d48b 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -1672,10 +1672,10 @@ static const struct iio_info tsl2X7X_device_info[] = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
- .read_event_value_new = &tsl2x7x_read_thresh,
- .write_event_value_new = &tsl2x7x_write_thresh,
- .read_event_config_new = &tsl2x7x_read_interrupt_config,
- .write_event_config_new = &tsl2x7x_write_interrupt_config,
+ .read_event_value = &tsl2x7x_read_thresh,
+ .write_event_value = &tsl2x7x_write_thresh,
+ .read_event_config = &tsl2x7x_read_interrupt_config,
+ .write_event_config = &tsl2x7x_write_interrupt_config,
},
[PRX] = {
.attrs = &tsl2X7X_device_attr_group_tbl[PRX],
@@ -1683,10 +1683,10 @@ static const struct iio_info tsl2X7X_device_info[] = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
- .read_event_value_new = &tsl2x7x_read_thresh,
- .write_event_value_new = &tsl2x7x_write_thresh,
- .read_event_config_new = &tsl2x7x_read_interrupt_config,
- .write_event_config_new = &tsl2x7x_write_interrupt_config,
+ .read_event_value = &tsl2x7x_read_thresh,
+ .write_event_value = &tsl2x7x_write_thresh,
+ .read_event_config = &tsl2x7x_read_interrupt_config,
+ .write_event_config = &tsl2x7x_write_interrupt_config,
},
[ALSPRX] = {
.attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX],
@@ -1694,10 +1694,10 @@ static const struct iio_info tsl2X7X_device_info[] = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
- .read_event_value_new = &tsl2x7x_read_thresh,
- .write_event_value_new = &tsl2x7x_write_thresh,
- .read_event_config_new = &tsl2x7x_read_interrupt_config,
- .write_event_config_new = &tsl2x7x_write_interrupt_config,
+ .read_event_value = &tsl2x7x_read_thresh,
+ .write_event_value = &tsl2x7x_write_thresh,
+ .read_event_config = &tsl2x7x_read_interrupt_config,
+ .write_event_config = &tsl2x7x_write_interrupt_config,
},
[PRX2] = {
.attrs = &tsl2X7X_device_attr_group_tbl[PRX2],
@@ -1705,10 +1705,10 @@ static const struct iio_info tsl2X7X_device_info[] = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
- .read_event_value_new = &tsl2x7x_read_thresh,
- .write_event_value_new = &tsl2x7x_write_thresh,
- .read_event_config_new = &tsl2x7x_read_interrupt_config,
- .write_event_config_new = &tsl2x7x_write_interrupt_config,
+ .read_event_value = &tsl2x7x_read_thresh,
+ .write_event_value = &tsl2x7x_write_thresh,
+ .read_event_config = &tsl2x7x_read_interrupt_config,
+ .write_event_config = &tsl2x7x_write_interrupt_config,
},
[ALSPRX2] = {
.attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX2],
@@ -1716,10 +1716,10 @@ static const struct iio_info tsl2X7X_device_info[] = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
- .read_event_value_new = &tsl2x7x_read_thresh,
- .write_event_value_new = &tsl2x7x_write_thresh,
- .read_event_config_new = &tsl2x7x_read_interrupt_config,
- .write_event_config_new = &tsl2x7x_write_interrupt_config,
+ .read_event_value = &tsl2x7x_read_thresh,
+ .write_event_value = &tsl2x7x_write_thresh,
+ .read_event_config = &tsl2x7x_read_interrupt_config,
+ .write_event_config = &tsl2x7x_write_interrupt_config,
},
};
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index 99421f90d189..d4f4dd90c699 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -451,7 +451,12 @@ done:
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_SAMP_FREQ), \
.scan_index = idx, \
- .scan_type = IIO_ST('s', 16, 16, IIO_BE), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
}
static const struct iio_chan_spec hmc5843_channels[] = {
@@ -624,10 +629,17 @@ static const struct i2c_device_id hmc5843_id[] = {
};
MODULE_DEVICE_TABLE(i2c, hmc5843_id);
+static const struct of_device_id hmc5843_of_match[] = {
+ { .compatible = "honeywell,hmc5843" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, hmc5843_of_match);
+
static struct i2c_driver hmc5843_driver = {
.driver = {
.name = "hmc5843",
.pm = HMC5843_PM_OPS,
+ .of_match_table = hmc5843_of_match,
},
.id_table = hmc5843_id,
.probe = hmc5843_probe,
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index 62d30179301f..36eedd8a0ea9 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -131,7 +131,7 @@ static int ad2s1200_probe(struct spi_device *spi)
indio_dev->num_channels = ARRAY_SIZE(ad2s1200_channels);
indio_dev->name = spi_get_device_id(spi)->name;
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret)
return ret;
@@ -142,13 +142,6 @@ static int ad2s1200_probe(struct spi_device *spi)
return 0;
}
-static int ad2s1200_remove(struct spi_device *spi)
-{
- iio_device_unregister(spi_get_drvdata(spi));
-
- return 0;
-}
-
static const struct spi_device_id ad2s1200_id[] = {
{ "ad2s1200" },
{ "ad2s1205" },
@@ -162,7 +155,6 @@ static struct spi_driver ad2s1200_driver = {
.owner = THIS_MODULE,
},
.probe = ad2s1200_probe,
- .remove = ad2s1200_remove,
.id_table = ad2s1200_id,
};
module_spi_driver(ad2s1200_driver);
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig
index 5032ff7c2259..78319ad176cd 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/staging/imx-drm/Kconfig
@@ -53,3 +53,9 @@ config DRM_IMX_IPUV3
depends on DRM_IMX_IPUV3_CORE
help
Choose this if you have a i.MX5 or i.MX6 processor.
+
+config DRM_IMX_HDMI
+ tristate "Freescale i.MX DRM HDMI"
+ depends on DRM_IMX
+ help
+ Choose this if you want to use HDMI on i.MX6.
diff --git a/drivers/staging/imx-drm/Makefile b/drivers/staging/imx-drm/Makefile
index 8742432d7b01..4677585b5ad5 100644
--- a/drivers/staging/imx-drm/Makefile
+++ b/drivers/staging/imx-drm/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_DRM_IMX_IPUV3_CORE) += ipu-v3/
imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o
obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o
+obj-$(CONFIG_DRM_IMX_HDMI) += imx-hdmi.o
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 6bd015ac9d68..236ed66f116a 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -88,8 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm)
imx_drm_device_put();
- drm_mode_config_cleanup(imxdrm->drm);
- drm_kms_helper_poll_fini(imxdrm->drm);
+ drm_vblank_cleanup(drm);
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
return 0;
}
@@ -141,19 +142,19 @@ EXPORT_SYMBOL_GPL(imx_drm_crtc_panel_format);
int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
{
- return drm_vblank_get(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+ return drm_vblank_get(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
}
EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get);
void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc)
{
- drm_vblank_put(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+ drm_vblank_put(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
}
EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put);
void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc)
{
- drm_handle_vblank(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+ drm_handle_vblank(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
}
EXPORT_SYMBOL_GPL(imx_drm_handle_vblank);
@@ -199,8 +200,8 @@ static void imx_drm_driver_preclose(struct drm_device *drm,
if (!file->is_master)
return;
- for (i = 0; i < 4; i++)
- imx_drm_disable_vblank(drm , i);
+ for (i = 0; i < MAX_CRTC; i++)
+ imx_drm_disable_vblank(drm, i);
}
static const struct file_operations imx_drm_driver_fops = {
@@ -369,28 +370,6 @@ static void imx_drm_connector_unregister(
}
/*
- * register a crtc to the drm core
- */
-static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- int ret;
-
- drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
- imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
- ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
- if (ret)
- return ret;
-
- drm_crtc_helper_add(imx_drm_crtc->crtc,
- imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
-
- drm_mode_group_reinit(imxdrm->drm);
-
- return 0;
-}
-
-/*
* Called by the CRTC driver when all CRTCs are registered. This
* puts all the pieces together and initializes the driver.
* Once this is called no more CRTCs can be registered since
@@ -422,31 +401,39 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
mutex_lock(&imxdrm->mutex);
- drm_kms_helper_poll_init(imxdrm->drm);
+ drm_kms_helper_poll_init(drm);
/* setup the grouping for the legacy output */
- ret = drm_mode_group_init_legacy_group(imxdrm->drm,
- &imxdrm->drm->primary->mode_group);
+ ret = drm_mode_group_init_legacy_group(drm,
+ &drm->primary->mode_group);
if (ret)
- goto err_init;
+ goto err_kms;
- ret = drm_vblank_init(imxdrm->drm, MAX_CRTC);
+ ret = drm_vblank_init(drm, MAX_CRTC);
if (ret)
- goto err_init;
+ goto err_kms;
/*
* with vblank_disable_allowed = true, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
- imxdrm->drm->vblank_disable_allowed = true;
+ drm->vblank_disable_allowed = true;
- if (!imx_drm_device_get())
+ if (!imx_drm_device_get()) {
ret = -EINVAL;
+ goto err_vblank;
+ }
- ret = 0;
+ platform_set_drvdata(drm->platformdev, drm);
+ mutex_unlock(&imxdrm->mutex);
+ return 0;
-err_init:
+err_vblank:
+ drm_vblank_cleanup(drm);
+err_kms:
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
mutex_unlock(&imxdrm->mutex);
return ret;
@@ -492,6 +479,15 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
mutex_lock(&imxdrm->mutex);
+ /*
+ * The vblank arrays are dimensioned by MAX_CRTC - we can't
+ * pass IDs greater than this to those functions.
+ */
+ if (imxdrm->pipes >= MAX_CRTC) {
+ ret = -EINVAL;
+ goto err_busy;
+ }
+
if (imxdrm->drm->open_count) {
ret = -EBUSY;
goto err_busy;
@@ -517,10 +513,18 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
*new_crtc = imx_drm_crtc;
- ret = imx_drm_crtc_register(imx_drm_crtc);
+ ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
if (ret)
goto err_register;
+ drm_crtc_helper_add(crtc,
+ imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
+
+ drm_crtc_init(imxdrm->drm, crtc,
+ imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
+
+ drm_mode_group_reinit(imxdrm->drm);
+
imx_drm_update_possible_crtcs();
mutex_unlock(&imxdrm->mutex);
@@ -528,6 +532,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
return 0;
err_register:
+ list_del(&imx_drm_crtc->list);
kfree(imx_drm_crtc);
err_alloc:
err_busy:
@@ -829,7 +834,7 @@ static int imx_drm_platform_probe(struct platform_device *pdev)
static int imx_drm_platform_remove(struct platform_device *pdev)
{
- drm_platform_exit(&imx_drm_driver, pdev);
+ drm_put_dev(platform_get_drvdata(pdev));
return 0;
}
diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c
new file mode 100644
index 000000000000..62ce0e86f14b
--- /dev/null
+++ b/drivers/staging/imx-drm/imx-hdmi.c
@@ -0,0 +1,1912 @@
+/*
+ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * SH-Mobile High-Definition Multimedia Interface (HDMI) driver
+ * for SLISHDMI13T and SLIPHDMIT IP cores
+ *
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ */
+
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/hdmi.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/of_device.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+
+#include "ipu-v3/imx-ipu-v3.h"
+#include "imx-hdmi.h"
+#include "imx-drm.h"
+
+#define HDMI_EDID_LEN 512
+
+#define RGB 0
+#define YCBCR444 1
+#define YCBCR422_16BITS 2
+#define YCBCR422_8BITS 3
+#define XVYCC444 4
+
+enum hdmi_datamap {
+ RGB444_8B = 0x01,
+ RGB444_10B = 0x03,
+ RGB444_12B = 0x05,
+ RGB444_16B = 0x07,
+ YCbCr444_8B = 0x09,
+ YCbCr444_10B = 0x0B,
+ YCbCr444_12B = 0x0D,
+ YCbCr444_16B = 0x0F,
+ YCbCr422_8B = 0x16,
+ YCbCr422_10B = 0x14,
+ YCbCr422_12B = 0x12,
+};
+
+enum imx_hdmi_devtype {
+ IMX6Q_HDMI,
+ IMX6DL_HDMI,
+};
+
+static const u16 csc_coeff_default[3][4] = {
+ { 0x2000, 0x0000, 0x0000, 0x0000 },
+ { 0x0000, 0x2000, 0x0000, 0x0000 },
+ { 0x0000, 0x0000, 0x2000, 0x0000 }
+};
+
+static const u16 csc_coeff_rgb_out_eitu601[3][4] = {
+ { 0x2000, 0x6926, 0x74fd, 0x010e },
+ { 0x2000, 0x2cdd, 0x0000, 0x7e9a },
+ { 0x2000, 0x0000, 0x38b4, 0x7e3b }
+};
+
+static const u16 csc_coeff_rgb_out_eitu709[3][4] = {
+ { 0x2000, 0x7106, 0x7a02, 0x00a7 },
+ { 0x2000, 0x3264, 0x0000, 0x7e6d },
+ { 0x2000, 0x0000, 0x3b61, 0x7e25 }
+};
+
+static const u16 csc_coeff_rgb_in_eitu601[3][4] = {
+ { 0x2591, 0x1322, 0x074b, 0x0000 },
+ { 0x6535, 0x2000, 0x7acc, 0x0200 },
+ { 0x6acd, 0x7534, 0x2000, 0x0200 }
+};
+
+static const u16 csc_coeff_rgb_in_eitu709[3][4] = {
+ { 0x2dc5, 0x0d9b, 0x049e, 0x0000 },
+ { 0x62f0, 0x2000, 0x7d11, 0x0200 },
+ { 0x6756, 0x78ab, 0x2000, 0x0200 }
+};
+
+struct hdmi_vmode {
+ bool mdvi;
+ bool mhsyncpolarity;
+ bool mvsyncpolarity;
+ bool minterlaced;
+ bool mdataenablepolarity;
+
+ unsigned int mpixelclock;
+ unsigned int mpixelrepetitioninput;
+ unsigned int mpixelrepetitionoutput;
+};
+
+struct hdmi_data_info {
+ unsigned int enc_in_format;
+ unsigned int enc_out_format;
+ unsigned int enc_color_depth;
+ unsigned int colorimetry;
+ unsigned int pix_repet_factor;
+ unsigned int hdcp_enable;
+ struct hdmi_vmode video_mode;
+};
+
+struct imx_hdmi {
+ struct drm_connector connector;
+ struct imx_drm_connector *imx_drm_connector;
+ struct drm_encoder encoder;
+ struct imx_drm_encoder *imx_drm_encoder;
+
+ enum imx_hdmi_devtype dev_type;
+ struct device *dev;
+ struct clk *isfr_clk;
+ struct clk *iahb_clk;
+
+ struct hdmi_data_info hdmi_data;
+ int vic;
+
+ u8 edid[HDMI_EDID_LEN];
+ bool cable_plugin;
+
+ bool phy_enabled;
+ struct drm_display_mode previous_mode;
+
+ struct regmap *regmap;
+ struct i2c_adapter *ddc;
+ void __iomem *regs;
+
+ unsigned long pixel_clk_rate;
+ unsigned int sample_rate;
+ int ratio;
+};
+
+static void imx_hdmi_set_ipu_di_mux(struct imx_hdmi *hdmi, int ipu_di)
+{
+ regmap_update_bits(hdmi->regmap, IOMUXC_GPR3,
+ IMX6Q_GPR3_HDMI_MUX_CTL_MASK,
+ ipu_di << IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT);
+}
+
+static inline void hdmi_writeb(struct imx_hdmi *hdmi, u8 val, int offset)
+{
+ writeb(val, hdmi->regs + offset);
+}
+
+static inline u8 hdmi_readb(struct imx_hdmi *hdmi, int offset)
+{
+ return readb(hdmi->regs + offset);
+}
+
+static void hdmi_mask_writeb(struct imx_hdmi *hdmi, u8 data, unsigned int reg,
+ u8 shift, u8 mask)
+{
+ u8 value = hdmi_readb(hdmi, reg) & ~mask;
+ value |= (data << shift) & mask;
+ hdmi_writeb(hdmi, value, reg);
+}
+
+static void hdmi_set_clock_regenerator_n(struct imx_hdmi *hdmi,
+ unsigned int value)
+{
+ u8 val;
+
+ hdmi_writeb(hdmi, value & 0xff, HDMI_AUD_N1);
+ hdmi_writeb(hdmi, (value >> 8) & 0xff, HDMI_AUD_N2);
+ hdmi_writeb(hdmi, (value >> 16) & 0x0f, HDMI_AUD_N3);
+
+ /* nshift factor = 0 */
+ val = hdmi_readb(hdmi, HDMI_AUD_CTS3);
+ val &= ~HDMI_AUD_CTS3_N_SHIFT_MASK;
+ hdmi_writeb(hdmi, val, HDMI_AUD_CTS3);
+}
+
+static void hdmi_regenerate_cts(struct imx_hdmi *hdmi, unsigned int cts)
+{
+ u8 val;
+
+ /* Must be set/cleared first */
+ val = hdmi_readb(hdmi, HDMI_AUD_CTS3);
+ val &= ~HDMI_AUD_CTS3_CTS_MANUAL;
+ hdmi_writeb(hdmi, val, HDMI_AUD_CTS3);
+
+ hdmi_writeb(hdmi, cts & 0xff, HDMI_AUD_CTS1);
+ hdmi_writeb(hdmi, (cts >> 8) & 0xff, HDMI_AUD_CTS2);
+ hdmi_writeb(hdmi, ((cts >> 16) & HDMI_AUD_CTS3_AUDCTS19_16_MASK) |
+ HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3);
+}
+
+static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk,
+ unsigned int ratio)
+{
+ unsigned int n = (128 * freq) / 1000;
+
+ switch (freq) {
+ case 32000:
+ if (pixel_clk == 25170000)
+ n = (ratio == 150) ? 9152 : 4576;
+ else if (pixel_clk == 27020000)
+ n = (ratio == 150) ? 8192 : 4096;
+ else if (pixel_clk == 74170000 || pixel_clk == 148350000)
+ n = 11648;
+ else
+ n = 4096;
+ break;
+
+ case 44100:
+ if (pixel_clk == 25170000)
+ n = 7007;
+ else if (pixel_clk == 74170000)
+ n = 17836;
+ else if (pixel_clk == 148350000)
+ n = (ratio == 150) ? 17836 : 8918;
+ else
+ n = 6272;
+ break;
+
+ case 48000:
+ if (pixel_clk == 25170000)
+ n = (ratio == 150) ? 9152 : 6864;
+ else if (pixel_clk == 27020000)
+ n = (ratio == 150) ? 8192 : 6144;
+ else if (pixel_clk == 74170000)
+ n = 11648;
+ else if (pixel_clk == 148350000)
+ n = (ratio == 150) ? 11648 : 5824;
+ else
+ n = 6144;
+ break;
+
+ case 88200:
+ n = hdmi_compute_n(44100, pixel_clk, ratio) * 2;
+ break;
+
+ case 96000:
+ n = hdmi_compute_n(48000, pixel_clk, ratio) * 2;
+ break;
+
+ case 176400:
+ n = hdmi_compute_n(44100, pixel_clk, ratio) * 4;
+ break;
+
+ case 192000:
+ n = hdmi_compute_n(48000, pixel_clk, ratio) * 4;
+ break;
+
+ default:
+ break;
+ }
+
+ return n;
+}
+
+static unsigned int hdmi_compute_cts(unsigned int freq, unsigned long pixel_clk,
+ unsigned int ratio)
+{
+ unsigned int cts = 0;
+
+ pr_debug("%s: freq: %d pixel_clk: %ld ratio: %d\n", __func__, freq,
+ pixel_clk, ratio);
+
+ switch (freq) {
+ case 32000:
+ if (pixel_clk == 297000000) {
+ cts = 222750;
+ break;
+ }
+ case 48000:
+ case 96000:
+ case 192000:
+ switch (pixel_clk) {
+ case 25200000:
+ case 27000000:
+ case 54000000:
+ case 74250000:
+ case 148500000:
+ cts = pixel_clk / 1000;
+ break;
+ case 297000000:
+ cts = 247500;
+ break;
+ /*
+ * All other TMDS clocks are not supported by
+ * DWC_hdmi_tx. The TMDS clocks divided or
+ * multiplied by 1,001 coefficients are not
+ * supported.
+ */
+ default:
+ break;
+ }
+ break;
+ case 44100:
+ case 88200:
+ case 176400:
+ switch (pixel_clk) {
+ case 25200000:
+ cts = 28000;
+ break;
+ case 27000000:
+ cts = 30000;
+ break;
+ case 54000000:
+ cts = 60000;
+ break;
+ case 74250000:
+ cts = 82500;
+ break;
+ case 148500000:
+ cts = 165000;
+ break;
+ case 297000000:
+ cts = 247500;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (ratio == 100)
+ return cts;
+ else
+ return (cts * ratio) / 100;
+}
+
+static void hdmi_get_pixel_clk(struct imx_hdmi *hdmi)
+{
+ unsigned long rate;
+
+ rate = 65000000; /* FIXME */
+
+ if (rate)
+ hdmi->pixel_clk_rate = rate;
+}
+
+static void hdmi_set_clk_regenerator(struct imx_hdmi *hdmi)
+{
+ unsigned int clk_n, clk_cts;
+
+ clk_n = hdmi_compute_n(hdmi->sample_rate, hdmi->pixel_clk_rate,
+ hdmi->ratio);
+ clk_cts = hdmi_compute_cts(hdmi->sample_rate, hdmi->pixel_clk_rate,
+ hdmi->ratio);
+
+ if (!clk_cts) {
+ dev_dbg(hdmi->dev, "%s: pixel clock not supported: %lu\n",
+ __func__, hdmi->pixel_clk_rate);
+ return;
+ }
+
+ dev_dbg(hdmi->dev, "%s: samplerate=%d ratio=%d pixelclk=%lu N=%d cts=%d\n",
+ __func__, hdmi->sample_rate, hdmi->ratio,
+ hdmi->pixel_clk_rate, clk_n, clk_cts);
+
+ hdmi_set_clock_regenerator_n(hdmi, clk_n);
+ hdmi_regenerate_cts(hdmi, clk_cts);
+}
+
+static void hdmi_init_clk_regenerator(struct imx_hdmi *hdmi)
+{
+ unsigned int clk_n, clk_cts;
+
+ clk_n = hdmi_compute_n(hdmi->sample_rate, hdmi->pixel_clk_rate,
+ hdmi->ratio);
+ clk_cts = hdmi_compute_cts(hdmi->sample_rate, hdmi->pixel_clk_rate,
+ hdmi->ratio);
+
+ if (!clk_cts) {
+ dev_dbg(hdmi->dev, "%s: pixel clock not supported: %lu\n",
+ __func__, hdmi->pixel_clk_rate);
+ return;
+ }
+
+ dev_dbg(hdmi->dev, "%s: samplerate=%d ratio=%d pixelclk=%lu N=%d cts=%d\n",
+ __func__, hdmi->sample_rate, hdmi->ratio,
+ hdmi->pixel_clk_rate, clk_n, clk_cts);
+
+ hdmi_set_clock_regenerator_n(hdmi, clk_n);
+ hdmi_regenerate_cts(hdmi, clk_cts);
+}
+
+static void hdmi_clk_regenerator_update_pixel_clock(struct imx_hdmi *hdmi)
+{
+ /* Get pixel clock from ipu */
+ hdmi_get_pixel_clk(hdmi);
+ hdmi_set_clk_regenerator(hdmi);
+}
+
+/*
+ * this submodule is responsible for the video data synchronization.
+ * for example, for RGB 4:4:4 input, the data map is defined as
+ * pin{47~40} <==> R[7:0]
+ * pin{31~24} <==> G[7:0]
+ * pin{15~8} <==> B[7:0]
+ */
+static void hdmi_video_sample(struct imx_hdmi *hdmi)
+{
+ int color_format = 0;
+ u8 val;
+
+ if (hdmi->hdmi_data.enc_in_format == RGB) {
+ if (hdmi->hdmi_data.enc_color_depth == 8)
+ color_format = 0x01;
+ else if (hdmi->hdmi_data.enc_color_depth == 10)
+ color_format = 0x03;
+ else if (hdmi->hdmi_data.enc_color_depth == 12)
+ color_format = 0x05;
+ else if (hdmi->hdmi_data.enc_color_depth == 16)
+ color_format = 0x07;
+ else
+ return;
+ } else if (hdmi->hdmi_data.enc_in_format == YCBCR444) {
+ if (hdmi->hdmi_data.enc_color_depth == 8)
+ color_format = 0x09;
+ else if (hdmi->hdmi_data.enc_color_depth == 10)
+ color_format = 0x0B;
+ else if (hdmi->hdmi_data.enc_color_depth == 12)
+ color_format = 0x0D;
+ else if (hdmi->hdmi_data.enc_color_depth == 16)
+ color_format = 0x0F;
+ else
+ return;
+ } else if (hdmi->hdmi_data.enc_in_format == YCBCR422_8BITS) {
+ if (hdmi->hdmi_data.enc_color_depth == 8)
+ color_format = 0x16;
+ else if (hdmi->hdmi_data.enc_color_depth == 10)
+ color_format = 0x14;
+ else if (hdmi->hdmi_data.enc_color_depth == 12)
+ color_format = 0x12;
+ else
+ return;
+ }
+
+ val = HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_DISABLE |
+ ((color_format << HDMI_TX_INVID0_VIDEO_MAPPING_OFFSET) &
+ HDMI_TX_INVID0_VIDEO_MAPPING_MASK);
+ hdmi_writeb(hdmi, val, HDMI_TX_INVID0);
+
+ /* Enable TX stuffing: When DE is inactive, fix the output data to 0 */
+ val = HDMI_TX_INSTUFFING_BDBDATA_STUFFING_ENABLE |
+ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_ENABLE |
+ HDMI_TX_INSTUFFING_GYDATA_STUFFING_ENABLE;
+ hdmi_writeb(hdmi, val, HDMI_TX_INSTUFFING);
+ hdmi_writeb(hdmi, 0x0, HDMI_TX_GYDATA0);
+ hdmi_writeb(hdmi, 0x0, HDMI_TX_GYDATA1);
+ hdmi_writeb(hdmi, 0x0, HDMI_TX_RCRDATA0);
+ hdmi_writeb(hdmi, 0x0, HDMI_TX_RCRDATA1);
+ hdmi_writeb(hdmi, 0x0, HDMI_TX_BCBDATA0);
+ hdmi_writeb(hdmi, 0x0, HDMI_TX_BCBDATA1);
+}
+
+static int is_color_space_conversion(struct imx_hdmi *hdmi)
+{
+ return (hdmi->hdmi_data.enc_in_format !=
+ hdmi->hdmi_data.enc_out_format);
+}
+
+static int is_color_space_decimation(struct imx_hdmi *hdmi)
+{
+ return ((hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS) &&
+ (hdmi->hdmi_data.enc_in_format == RGB ||
+ hdmi->hdmi_data.enc_in_format == YCBCR444));
+}
+
+static int is_color_space_interpolation(struct imx_hdmi *hdmi)
+{
+ return ((hdmi->hdmi_data.enc_in_format == YCBCR422_8BITS) &&
+ (hdmi->hdmi_data.enc_out_format == RGB ||
+ hdmi->hdmi_data.enc_out_format == YCBCR444));
+}
+
+static void imx_hdmi_update_csc_coeffs(struct imx_hdmi *hdmi)
+{
+ const u16 (*csc_coeff)[3][4] = &csc_coeff_default;
+ u32 csc_scale = 1;
+ u8 val;
+
+ if (is_color_space_conversion(hdmi)) {
+ if (hdmi->hdmi_data.enc_out_format == RGB) {
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
+ csc_coeff = &csc_coeff_rgb_out_eitu601;
+ else
+ csc_coeff = &csc_coeff_rgb_out_eitu709;
+ } else if (hdmi->hdmi_data.enc_in_format == RGB) {
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
+ csc_coeff = &csc_coeff_rgb_in_eitu601;
+ else
+ csc_coeff = &csc_coeff_rgb_in_eitu709;
+ csc_scale = 0;
+ }
+ }
+
+ hdmi_writeb(hdmi, ((*csc_coeff)[0][0] & 0xff), HDMI_CSC_COEF_A1_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[0][0] >> 8), HDMI_CSC_COEF_A1_MSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[0][1] & 0xff), HDMI_CSC_COEF_A2_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[0][1] >> 8), HDMI_CSC_COEF_A2_MSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[0][2] & 0xff), HDMI_CSC_COEF_A3_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[0][2] >> 8), HDMI_CSC_COEF_A3_MSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[0][3] & 0xff), HDMI_CSC_COEF_A4_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[0][3] >> 8), HDMI_CSC_COEF_A4_MSB);
+
+ hdmi_writeb(hdmi, ((*csc_coeff)[1][0] & 0xff), HDMI_CSC_COEF_B1_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[1][0] >> 8), HDMI_CSC_COEF_B1_MSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[1][1] & 0xff), HDMI_CSC_COEF_B2_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[1][1] >> 8), HDMI_CSC_COEF_B2_MSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[1][2] & 0xff), HDMI_CSC_COEF_B3_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[1][2] >> 8), HDMI_CSC_COEF_B3_MSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[1][3] & 0xff), HDMI_CSC_COEF_B4_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[1][3] >> 8), HDMI_CSC_COEF_B4_MSB);
+
+ hdmi_writeb(hdmi, ((*csc_coeff)[2][0] & 0xff), HDMI_CSC_COEF_C1_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[2][0] >> 8), HDMI_CSC_COEF_C1_MSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[2][1] & 0xff), HDMI_CSC_COEF_C2_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[2][1] >> 8), HDMI_CSC_COEF_C2_MSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[2][2] & 0xff), HDMI_CSC_COEF_C3_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[2][2] >> 8), HDMI_CSC_COEF_C3_MSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[2][3] & 0xff), HDMI_CSC_COEF_C4_LSB);
+ hdmi_writeb(hdmi, ((*csc_coeff)[2][3] >> 8), HDMI_CSC_COEF_C4_MSB);
+
+ val = hdmi_readb(hdmi, HDMI_CSC_SCALE);
+ val &= ~HDMI_CSC_SCALE_CSCSCALE_MASK;
+ val |= csc_scale & HDMI_CSC_SCALE_CSCSCALE_MASK;
+ hdmi_writeb(hdmi, val, HDMI_CSC_SCALE);
+}
+
+static void hdmi_video_csc(struct imx_hdmi *hdmi)
+{
+ int color_depth = 0;
+ int interpolation = HDMI_CSC_CFG_INTMODE_DISABLE;
+ int decimation = 0;
+ u8 val;
+
+ /* YCC422 interpolation to 444 mode */
+ if (is_color_space_interpolation(hdmi))
+ interpolation = HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA1;
+ else if (is_color_space_decimation(hdmi))
+ decimation = HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3;
+
+ if (hdmi->hdmi_data.enc_color_depth == 8)
+ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_24BPP;
+ else if (hdmi->hdmi_data.enc_color_depth == 10)
+ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_30BPP;
+ else if (hdmi->hdmi_data.enc_color_depth == 12)
+ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_36BPP;
+ else if (hdmi->hdmi_data.enc_color_depth == 16)
+ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_48BPP;
+ else
+ return;
+
+ /* Configure the CSC registers */
+ hdmi_writeb(hdmi, interpolation | decimation, HDMI_CSC_CFG);
+ val = hdmi_readb(hdmi, HDMI_CSC_SCALE);
+ val &= ~HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK;
+ val |= color_depth;
+ hdmi_writeb(hdmi, val, HDMI_CSC_SCALE);
+
+ imx_hdmi_update_csc_coeffs(hdmi);
+}
+
+/*
+ * HDMI video packetizer is used to packetize the data.
+ * for example, if input is YCC422 mode or repeater is used,
+ * data should be repacked this module can be bypassed.
+ */
+static void hdmi_video_packetize(struct imx_hdmi *hdmi)
+{
+ unsigned int color_depth = 0;
+ unsigned int remap_size = HDMI_VP_REMAP_YCC422_16bit;
+ unsigned int output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_PP;
+ struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data;
+ u8 val;
+
+ if (hdmi_data->enc_out_format == RGB
+ || hdmi_data->enc_out_format == YCBCR444) {
+ if (!hdmi_data->enc_color_depth)
+ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
+ else if (hdmi_data->enc_color_depth == 8) {
+ color_depth = 4;
+ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
+ } else if (hdmi_data->enc_color_depth == 10)
+ color_depth = 5;
+ else if (hdmi_data->enc_color_depth == 12)
+ color_depth = 6;
+ else if (hdmi_data->enc_color_depth == 16)
+ color_depth = 7;
+ else
+ return;
+ } else if (hdmi_data->enc_out_format == YCBCR422_8BITS) {
+ if (!hdmi_data->enc_color_depth ||
+ hdmi_data->enc_color_depth == 8)
+ remap_size = HDMI_VP_REMAP_YCC422_16bit;
+ else if (hdmi_data->enc_color_depth == 10)
+ remap_size = HDMI_VP_REMAP_YCC422_20bit;
+ else if (hdmi_data->enc_color_depth == 12)
+ remap_size = HDMI_VP_REMAP_YCC422_24bit;
+ else
+ return;
+ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422;
+ } else
+ return;
+
+ /* set the packetizer registers */
+ val = ((color_depth << HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET) &
+ HDMI_VP_PR_CD_COLOR_DEPTH_MASK) |
+ ((hdmi_data->pix_repet_factor <<
+ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_OFFSET) &
+ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK);
+ hdmi_writeb(hdmi, val, HDMI_VP_PR_CD);
+
+ val = hdmi_readb(hdmi, HDMI_VP_STUFF);
+ val &= ~HDMI_VP_STUFF_PR_STUFFING_MASK;
+ val |= HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE;
+ hdmi_writeb(hdmi, val, HDMI_VP_STUFF);
+
+ /* Data from pixel repeater block */
+ if (hdmi_data->pix_repet_factor > 1) {
+ val = hdmi_readb(hdmi, HDMI_VP_CONF);
+ val &= ~(HDMI_VP_CONF_PR_EN_MASK |
+ HDMI_VP_CONF_BYPASS_SELECT_MASK);
+ val |= HDMI_VP_CONF_PR_EN_ENABLE |
+ HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER;
+ hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ } else { /* data from packetizer block */
+ val = hdmi_readb(hdmi, HDMI_VP_CONF);
+ val &= ~(HDMI_VP_CONF_PR_EN_MASK |
+ HDMI_VP_CONF_BYPASS_SELECT_MASK);
+ val |= HDMI_VP_CONF_PR_EN_DISABLE |
+ HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER;
+ hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ }
+
+ val = hdmi_readb(hdmi, HDMI_VP_STUFF);
+ val &= ~HDMI_VP_STUFF_IDEFAULT_PHASE_MASK;
+ val |= 1 << HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET;
+ hdmi_writeb(hdmi, val, HDMI_VP_STUFF);
+
+ hdmi_writeb(hdmi, remap_size, HDMI_VP_REMAP);
+
+ if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_PP) {
+ val = hdmi_readb(hdmi, HDMI_VP_CONF);
+ val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
+ HDMI_VP_CONF_PP_EN_ENMASK |
+ HDMI_VP_CONF_YCC422_EN_MASK);
+ val |= HDMI_VP_CONF_BYPASS_EN_DISABLE |
+ HDMI_VP_CONF_PP_EN_ENABLE |
+ HDMI_VP_CONF_YCC422_EN_DISABLE;
+ hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ } else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422) {
+ val = hdmi_readb(hdmi, HDMI_VP_CONF);
+ val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
+ HDMI_VP_CONF_PP_EN_ENMASK |
+ HDMI_VP_CONF_YCC422_EN_MASK);
+ val |= HDMI_VP_CONF_BYPASS_EN_DISABLE |
+ HDMI_VP_CONF_PP_EN_DISABLE |
+ HDMI_VP_CONF_YCC422_EN_ENABLE;
+ hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ } else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS) {
+ val = hdmi_readb(hdmi, HDMI_VP_CONF);
+ val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
+ HDMI_VP_CONF_PP_EN_ENMASK |
+ HDMI_VP_CONF_YCC422_EN_MASK);
+ val |= HDMI_VP_CONF_BYPASS_EN_ENABLE |
+ HDMI_VP_CONF_PP_EN_DISABLE |
+ HDMI_VP_CONF_YCC422_EN_DISABLE;
+ hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ } else {
+ return;
+ }
+
+ val = hdmi_readb(hdmi, HDMI_VP_STUFF);
+ val &= ~(HDMI_VP_STUFF_PP_STUFFING_MASK |
+ HDMI_VP_STUFF_YCC422_STUFFING_MASK);
+ val |= HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE |
+ HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE;
+ hdmi_writeb(hdmi, val, HDMI_VP_STUFF);
+
+ val = hdmi_readb(hdmi, HDMI_VP_CONF);
+ val &= ~HDMI_VP_CONF_OUTPUT_SELECTOR_MASK;
+ val |= output_select;
+ hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+}
+
+static inline void hdmi_phy_test_clear(struct imx_hdmi *hdmi,
+ unsigned char bit)
+{
+ u8 val = hdmi_readb(hdmi, HDMI_PHY_TST0);
+ val &= ~HDMI_PHY_TST0_TSTCLR_MASK;
+ val |= (bit << HDMI_PHY_TST0_TSTCLR_OFFSET) &
+ HDMI_PHY_TST0_TSTCLR_MASK;
+ hdmi_writeb(hdmi, val, HDMI_PHY_TST0);
+}
+
+static inline void hdmi_phy_test_enable(struct imx_hdmi *hdmi,
+ unsigned char bit)
+{
+ u8 val = hdmi_readb(hdmi, HDMI_PHY_TST0);
+ val &= ~HDMI_PHY_TST0_TSTEN_MASK;
+ val |= (bit << HDMI_PHY_TST0_TSTEN_OFFSET) &
+ HDMI_PHY_TST0_TSTEN_MASK;
+ hdmi_writeb(hdmi, val, HDMI_PHY_TST0);
+}
+
+static inline void hdmi_phy_test_clock(struct imx_hdmi *hdmi,
+ unsigned char bit)
+{
+ u8 val = hdmi_readb(hdmi, HDMI_PHY_TST0);
+ val &= ~HDMI_PHY_TST0_TSTCLK_MASK;
+ val |= (bit << HDMI_PHY_TST0_TSTCLK_OFFSET) &
+ HDMI_PHY_TST0_TSTCLK_MASK;
+ hdmi_writeb(hdmi, val, HDMI_PHY_TST0);
+}
+
+static inline void hdmi_phy_test_din(struct imx_hdmi *hdmi,
+ unsigned char bit)
+{
+ hdmi_writeb(hdmi, bit, HDMI_PHY_TST1);
+}
+
+static inline void hdmi_phy_test_dout(struct imx_hdmi *hdmi,
+ unsigned char bit)
+{
+ hdmi_writeb(hdmi, bit, HDMI_PHY_TST2);
+}
+
+static bool hdmi_phy_wait_i2c_done(struct imx_hdmi *hdmi, int msec)
+{
+ unsigned char val = 0;
+ val = hdmi_readb(hdmi, HDMI_IH_I2CMPHY_STAT0) & 0x3;
+ while (!val) {
+ udelay(1000);
+ if (msec-- == 0)
+ return false;
+ val = hdmi_readb(hdmi, HDMI_IH_I2CMPHY_STAT0) & 0x3;
+ }
+ return true;
+}
+
+static void __hdmi_phy_i2c_write(struct imx_hdmi *hdmi, unsigned short data,
+ unsigned char addr)
+{
+ hdmi_writeb(hdmi, 0xFF, HDMI_IH_I2CMPHY_STAT0);
+ hdmi_writeb(hdmi, addr, HDMI_PHY_I2CM_ADDRESS_ADDR);
+ hdmi_writeb(hdmi, (unsigned char)(data >> 8),
+ HDMI_PHY_I2CM_DATAO_1_ADDR);
+ hdmi_writeb(hdmi, (unsigned char)(data >> 0),
+ HDMI_PHY_I2CM_DATAO_0_ADDR);
+ hdmi_writeb(hdmi, HDMI_PHY_I2CM_OPERATION_ADDR_WRITE,
+ HDMI_PHY_I2CM_OPERATION_ADDR);
+ hdmi_phy_wait_i2c_done(hdmi, 1000);
+}
+
+static int hdmi_phy_i2c_write(struct imx_hdmi *hdmi, unsigned short data,
+ unsigned char addr)
+{
+ __hdmi_phy_i2c_write(hdmi, data, addr);
+ return 0;
+}
+
+static void imx_hdmi_phy_enable_power(struct imx_hdmi *hdmi, u8 enable)
+{
+ hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_PDZ_OFFSET,
+ HDMI_PHY_CONF0_PDZ_MASK);
+}
+
+static void imx_hdmi_phy_enable_tmds(struct imx_hdmi *hdmi, u8 enable)
+{
+ hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_ENTMDS_OFFSET,
+ HDMI_PHY_CONF0_ENTMDS_MASK);
+}
+
+static void imx_hdmi_phy_gen2_pddq(struct imx_hdmi *hdmi, u8 enable)
+{
+ hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET,
+ HDMI_PHY_CONF0_GEN2_PDDQ_MASK);
+}
+
+static void imx_hdmi_phy_gen2_txpwron(struct imx_hdmi *hdmi, u8 enable)
+{
+ hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_GEN2_TXPWRON_OFFSET,
+ HDMI_PHY_CONF0_GEN2_TXPWRON_MASK);
+}
+
+static void imx_hdmi_phy_sel_data_en_pol(struct imx_hdmi *hdmi, u8 enable)
+{
+ hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_SELDATAENPOL_OFFSET,
+ HDMI_PHY_CONF0_SELDATAENPOL_MASK);
+}
+
+static void imx_hdmi_phy_sel_interface_control(struct imx_hdmi *hdmi, u8 enable)
+{
+ hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
+ HDMI_PHY_CONF0_SELDIPIF_OFFSET,
+ HDMI_PHY_CONF0_SELDIPIF_MASK);
+}
+
+static int hdmi_phy_configure(struct imx_hdmi *hdmi, unsigned char prep,
+ unsigned char res, int cscon)
+{
+ u8 val, msec;
+
+ /* color resolution 0 is 8 bit colour depth */
+ if (!res)
+ res = 8;
+
+ if (prep)
+ return -EINVAL;
+ else if (res != 8 && res != 12)
+ return -EINVAL;
+
+ /* Enable csc path */
+ if (cscon)
+ val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH;
+ else
+ val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS;
+
+ hdmi_writeb(hdmi, val, HDMI_MC_FLOWCTRL);
+
+ /* gen2 tx power off */
+ imx_hdmi_phy_gen2_txpwron(hdmi, 0);
+
+ /* gen2 pddq */
+ imx_hdmi_phy_gen2_pddq(hdmi, 1);
+
+ /* PHY reset */
+ hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_DEASSERT, HDMI_MC_PHYRSTZ);
+ hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_ASSERT, HDMI_MC_PHYRSTZ);
+
+ hdmi_writeb(hdmi, HDMI_MC_HEACPHY_RST_ASSERT, HDMI_MC_HEACPHY_RST);
+
+ hdmi_phy_test_clear(hdmi, 1);
+ hdmi_writeb(hdmi, HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2,
+ HDMI_PHY_I2CM_SLAVE_ADDR);
+ hdmi_phy_test_clear(hdmi, 0);
+
+ if (hdmi->hdmi_data.video_mode.mpixelclock <= 45250000) {
+ switch (res) {
+ case 8:
+ /* PLL/MPLL Cfg */
+ hdmi_phy_i2c_write(hdmi, 0x01e0, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x0000, 0x15); /* GMPCTRL */
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x21e1, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x0000, 0x15);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x41e2, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x0000, 0x15);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 92500000) {
+ switch (res) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x0140, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x2141, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x4142, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
+ default:
+ return -EINVAL;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 148500000) {
+ switch (res) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x00a0, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x20a1, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x40a2, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (res) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x00a0, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x2001, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x000f, 0x15);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x4002, 0x06);
+ hdmi_phy_i2c_write(hdmi, 0x000f, 0x15);
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (hdmi->hdmi_data.video_mode.mpixelclock <= 54000000) {
+ switch (res) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10); /* CURRCTRL */
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 58400000) {
+ switch (res) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 72000000) {
+ switch (res) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 74250000) {
+ switch (res) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x0b5c, 0x10);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 118800000) {
+ switch (res) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 216000000) {
+ switch (res) {
+ case 8:
+ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
+ break;
+ case 10:
+ hdmi_phy_i2c_write(hdmi, 0x0b5c, 0x10);
+ break;
+ case 12:
+ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ dev_err(hdmi->dev,
+ "Pixel clock %d - unsupported by HDMI\n",
+ hdmi->hdmi_data.video_mode.mpixelclock);
+ return -EINVAL;
+ }
+
+ hdmi_phy_i2c_write(hdmi, 0x0000, 0x13); /* PLLPHBYCTRL */
+ hdmi_phy_i2c_write(hdmi, 0x0006, 0x17);
+ /* RESISTANCE TERM 133Ohm Cfg */
+ hdmi_phy_i2c_write(hdmi, 0x0005, 0x19); /* TXTERM */
+ /* PREEMP Cgf 0.00 */
+ hdmi_phy_i2c_write(hdmi, 0x800d, 0x09); /* CKSYMTXCTRL */
+ /* TX/CK LVL 10 */
+ hdmi_phy_i2c_write(hdmi, 0x01ad, 0x0E); /* VLEVCTRL */
+ /* REMOVE CLK TERM */
+ hdmi_phy_i2c_write(hdmi, 0x8000, 0x05); /* CKCALCTRL */
+
+ imx_hdmi_phy_enable_power(hdmi, 1);
+
+ /* toggle TMDS enable */
+ imx_hdmi_phy_enable_tmds(hdmi, 0);
+ imx_hdmi_phy_enable_tmds(hdmi, 1);
+
+ /* gen2 tx power on */
+ imx_hdmi_phy_gen2_txpwron(hdmi, 1);
+ imx_hdmi_phy_gen2_pddq(hdmi, 0);
+
+ /*Wait for PHY PLL lock */
+ msec = 5;
+ do {
+ val = hdmi_readb(hdmi, HDMI_PHY_STAT0) & HDMI_PHY_TX_PHY_LOCK;
+ if (!val)
+ break;
+
+ if (msec == 0) {
+ dev_err(hdmi->dev, "PHY PLL not locked\n");
+ return -ETIMEDOUT;
+ }
+
+ udelay(1000);
+ msec--;
+ } while (1);
+
+ return 0;
+}
+
+static int imx_hdmi_phy_init(struct imx_hdmi *hdmi)
+{
+ int i, ret;
+ bool cscon = false;
+
+ /*check csc whether needed activated in HDMI mode */
+ cscon = (is_color_space_conversion(hdmi) &&
+ !hdmi->hdmi_data.video_mode.mdvi);
+
+ /* HDMI Phy spec says to do the phy initialization sequence twice */
+ for (i = 0; i < 2; i++) {
+ imx_hdmi_phy_sel_data_en_pol(hdmi, 1);
+ imx_hdmi_phy_sel_interface_control(hdmi, 0);
+ imx_hdmi_phy_enable_tmds(hdmi, 0);
+ imx_hdmi_phy_enable_power(hdmi, 0);
+
+ /* Enable CSC */
+ ret = hdmi_phy_configure(hdmi, 0, 8, cscon);
+ if (ret)
+ return ret;
+ }
+
+ hdmi->phy_enabled = true;
+ return 0;
+}
+
+static void hdmi_tx_hdcp_config(struct imx_hdmi *hdmi)
+{
+ u8 de, val;
+
+ if (hdmi->hdmi_data.video_mode.mdataenablepolarity)
+ de = HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_HIGH;
+ else
+ de = HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_LOW;
+
+ /* disable rx detect */
+ val = hdmi_readb(hdmi, HDMI_A_HDCPCFG0);
+ val &= HDMI_A_HDCPCFG0_RXDETECT_MASK;
+ val |= HDMI_A_HDCPCFG0_RXDETECT_DISABLE;
+ hdmi_writeb(hdmi, val, HDMI_A_HDCPCFG0);
+
+ val = hdmi_readb(hdmi, HDMI_A_VIDPOLCFG);
+ val &= HDMI_A_VIDPOLCFG_DATAENPOL_MASK;
+ val |= de;
+ hdmi_writeb(hdmi, val, HDMI_A_VIDPOLCFG);
+
+ val = hdmi_readb(hdmi, HDMI_A_HDCPCFG1);
+ val &= HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK;
+ val |= HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_DISABLE;
+ hdmi_writeb(hdmi, val, HDMI_A_HDCPCFG1);
+}
+
+static void hdmi_config_AVI(struct imx_hdmi *hdmi)
+{
+ u8 val, pix_fmt, under_scan;
+ u8 act_ratio, coded_ratio, colorimetry, ext_colorimetry;
+ bool aspect_16_9;
+
+ aspect_16_9 = false; /* FIXME */
+
+ /* AVI Data Byte 1 */
+ if (hdmi->hdmi_data.enc_out_format == YCBCR444)
+ pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR444;
+ else if (hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS)
+ pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR422;
+ else
+ pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_RGB;
+
+ under_scan = HDMI_FC_AVICONF0_SCAN_INFO_NODATA;
+
+ /*
+ * Active format identification data is present in the AVI InfoFrame.
+ * Under scan info, no bar data
+ */
+ val = pix_fmt | under_scan |
+ HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT |
+ HDMI_FC_AVICONF0_BAR_DATA_NO_DATA;
+
+ hdmi_writeb(hdmi, val, HDMI_FC_AVICONF0);
+
+ /* AVI Data Byte 2 -Set the Aspect Ratio */
+ if (aspect_16_9) {
+ act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_16_9;
+ coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_16_9;
+ } else {
+ act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_4_3;
+ coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_4_3;
+ }
+
+ /* Set up colorimetry */
+ if (hdmi->hdmi_data.enc_out_format == XVYCC444) {
+ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO;
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
+ ext_colorimetry =
+ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+ else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
+ ext_colorimetry =
+ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709;
+ } else if (hdmi->hdmi_data.enc_out_format != RGB) {
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
+ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE;
+ else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
+ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR;
+ ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+ } else { /* Carries no data */
+ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_NO_DATA;
+ ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+ }
+
+ val = colorimetry | coded_ratio | act_ratio;
+ hdmi_writeb(hdmi, val, HDMI_FC_AVICONF1);
+
+ /* AVI Data Byte 3 */
+ val = HDMI_FC_AVICONF2_IT_CONTENT_NO_DATA | ext_colorimetry |
+ HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT |
+ HDMI_FC_AVICONF2_SCALING_NONE;
+ hdmi_writeb(hdmi, val, HDMI_FC_AVICONF2);
+
+ /* AVI Data Byte 4 */
+ hdmi_writeb(hdmi, hdmi->vic, HDMI_FC_AVIVID);
+
+ /* AVI Data Byte 5- set up input and output pixel repetition */
+ val = (((hdmi->hdmi_data.video_mode.mpixelrepetitioninput + 1) <<
+ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_OFFSET) &
+ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_MASK) |
+ ((hdmi->hdmi_data.video_mode.mpixelrepetitionoutput <<
+ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET) &
+ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK);
+ hdmi_writeb(hdmi, val, HDMI_FC_PRCONF);
+
+ /* IT Content and quantization range = don't care */
+ val = HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GRAPHICS |
+ HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED;
+ hdmi_writeb(hdmi, val, HDMI_FC_AVICONF3);
+
+ /* AVI Data Bytes 6-13 */
+ hdmi_writeb(hdmi, 0, HDMI_FC_AVIETB0);
+ hdmi_writeb(hdmi, 0, HDMI_FC_AVIETB1);
+ hdmi_writeb(hdmi, 0, HDMI_FC_AVISBB0);
+ hdmi_writeb(hdmi, 0, HDMI_FC_AVISBB1);
+ hdmi_writeb(hdmi, 0, HDMI_FC_AVIELB0);
+ hdmi_writeb(hdmi, 0, HDMI_FC_AVIELB1);
+ hdmi_writeb(hdmi, 0, HDMI_FC_AVISRB0);
+ hdmi_writeb(hdmi, 0, HDMI_FC_AVISRB1);
+}
+
+static void hdmi_av_composer(struct imx_hdmi *hdmi,
+ const struct drm_display_mode *mode)
+{
+ u8 inv_val;
+ struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode;
+ int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len;
+
+ vmode->mhsyncpolarity = !!(mode->flags & DRM_MODE_FLAG_PHSYNC);
+ vmode->mvsyncpolarity = !!(mode->flags & DRM_MODE_FLAG_PVSYNC);
+ vmode->minterlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ vmode->mpixelclock = mode->clock * 1000;
+
+ dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock);
+
+ /* Set up HDMI_FC_INVIDCONF */
+ inv_val = (hdmi->hdmi_data.hdcp_enable ?
+ HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE :
+ HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE);
+
+ inv_val |= (vmode->mvsyncpolarity ?
+ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH :
+ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW);
+
+ inv_val |= (vmode->mhsyncpolarity ?
+ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH :
+ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW);
+
+ inv_val |= (vmode->mdataenablepolarity ?
+ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH :
+ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_LOW);
+
+ if (hdmi->vic == 39)
+ inv_val |= HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH;
+ else
+ inv_val |= (vmode->minterlaced ?
+ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH :
+ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW);
+
+ inv_val |= (vmode->minterlaced ?
+ HDMI_FC_INVIDCONF_IN_I_P_INTERLACED :
+ HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE);
+
+ inv_val |= (vmode->mdvi ?
+ HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE :
+ HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE);
+
+ hdmi_writeb(hdmi, inv_val, HDMI_FC_INVIDCONF);
+
+ /* Set up horizontal active pixel width */
+ hdmi_writeb(hdmi, mode->hdisplay >> 8, HDMI_FC_INHACTV1);
+ hdmi_writeb(hdmi, mode->hdisplay, HDMI_FC_INHACTV0);
+
+ /* Set up vertical active lines */
+ hdmi_writeb(hdmi, mode->vdisplay >> 8, HDMI_FC_INVACTV1);
+ hdmi_writeb(hdmi, mode->vdisplay, HDMI_FC_INVACTV0);
+
+ /* Set up horizontal blanking pixel region width */
+ hblank = mode->htotal - mode->hdisplay;
+ hdmi_writeb(hdmi, hblank >> 8, HDMI_FC_INHBLANK1);
+ hdmi_writeb(hdmi, hblank, HDMI_FC_INHBLANK0);
+
+ /* Set up vertical blanking pixel region width */
+ vblank = mode->vtotal - mode->vdisplay;
+ hdmi_writeb(hdmi, vblank, HDMI_FC_INVBLANK);
+
+ /* Set up HSYNC active edge delay width (in pixel clks) */
+ h_de_hs = mode->hsync_start - mode->hdisplay;
+ hdmi_writeb(hdmi, h_de_hs >> 8, HDMI_FC_HSYNCINDELAY1);
+ hdmi_writeb(hdmi, h_de_hs, HDMI_FC_HSYNCINDELAY0);
+
+ /* Set up VSYNC active edge delay (in lines) */
+ v_de_vs = mode->vsync_start - mode->vdisplay;
+ hdmi_writeb(hdmi, v_de_vs, HDMI_FC_VSYNCINDELAY);
+
+ /* Set up HSYNC active pulse width (in pixel clks) */
+ hsync_len = mode->hsync_end - mode->hsync_start;
+ hdmi_writeb(hdmi, hsync_len >> 8, HDMI_FC_HSYNCINWIDTH1);
+ hdmi_writeb(hdmi, hsync_len, HDMI_FC_HSYNCINWIDTH0);
+
+ /* Set up VSYNC active edge delay (in lines) */
+ vsync_len = mode->vsync_end - mode->vsync_start;
+ hdmi_writeb(hdmi, vsync_len, HDMI_FC_VSYNCINWIDTH);
+}
+
+static void imx_hdmi_phy_disable(struct imx_hdmi *hdmi)
+{
+ if (!hdmi->phy_enabled)
+ return;
+
+ imx_hdmi_phy_enable_tmds(hdmi, 0);
+ imx_hdmi_phy_enable_power(hdmi, 0);
+
+ hdmi->phy_enabled = false;
+}
+
+/* HDMI Initialization Step B.4 */
+static void imx_hdmi_enable_video_path(struct imx_hdmi *hdmi)
+{
+ u8 clkdis;
+
+ /* control period minimum duration */
+ hdmi_writeb(hdmi, 12, HDMI_FC_CTRLDUR);
+ hdmi_writeb(hdmi, 32, HDMI_FC_EXCTRLDUR);
+ hdmi_writeb(hdmi, 1, HDMI_FC_EXCTRLSPAC);
+
+ /* Set to fill TMDS data channels */
+ hdmi_writeb(hdmi, 0x0B, HDMI_FC_CH0PREAM);
+ hdmi_writeb(hdmi, 0x16, HDMI_FC_CH1PREAM);
+ hdmi_writeb(hdmi, 0x21, HDMI_FC_CH2PREAM);
+
+ /* Enable pixel clock and tmds data path */
+ clkdis = 0x7F;
+ clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE;
+ hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
+
+ clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
+ hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
+
+ /* Enable csc path */
+ if (is_color_space_conversion(hdmi)) {
+ clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
+ hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
+ }
+}
+
+static void hdmi_enable_audio_clk(struct imx_hdmi *hdmi)
+{
+ u8 clkdis;
+
+ clkdis = hdmi_readb(hdmi, HDMI_MC_CLKDIS);
+ clkdis &= ~HDMI_MC_CLKDIS_AUDCLK_DISABLE;
+ hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
+}
+
+/* Workaround to clear the overflow condition */
+static void imx_hdmi_clear_overflow(struct imx_hdmi *hdmi)
+{
+ int count;
+ u8 val;
+
+ /* TMDS software reset */
+ hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, HDMI_MC_SWRSTZ);
+
+ val = hdmi_readb(hdmi, HDMI_FC_INVIDCONF);
+ if (hdmi->dev_type == IMX6DL_HDMI) {
+ hdmi_writeb(hdmi, val, HDMI_FC_INVIDCONF);
+ return;
+ }
+
+ for (count = 0; count < 4; count++)
+ hdmi_writeb(hdmi, val, HDMI_FC_INVIDCONF);
+}
+
+static void hdmi_enable_overflow_interrupts(struct imx_hdmi *hdmi)
+{
+ hdmi_writeb(hdmi, 0, HDMI_FC_MASK2);
+ hdmi_writeb(hdmi, 0, HDMI_IH_MUTE_FC_STAT2);
+}
+
+static void hdmi_disable_overflow_interrupts(struct imx_hdmi *hdmi)
+{
+ hdmi_writeb(hdmi, HDMI_IH_MUTE_FC_STAT2_OVERFLOW_MASK,
+ HDMI_IH_MUTE_FC_STAT2);
+}
+
+static int imx_hdmi_setup(struct imx_hdmi *hdmi, struct drm_display_mode *mode)
+{
+ int ret;
+
+ hdmi_disable_overflow_interrupts(hdmi);
+
+ hdmi->vic = drm_match_cea_mode(mode);
+
+ if (!hdmi->vic) {
+ dev_dbg(hdmi->dev, "Non-CEA mode used in HDMI\n");
+ hdmi->hdmi_data.video_mode.mdvi = true;
+ } else {
+ dev_dbg(hdmi->dev, "CEA mode used vic=%d\n", hdmi->vic);
+ hdmi->hdmi_data.video_mode.mdvi = false;
+ }
+
+ if ((hdmi->vic == 6) || (hdmi->vic == 7) ||
+ (hdmi->vic == 21) || (hdmi->vic == 22) ||
+ (hdmi->vic == 2) || (hdmi->vic == 3) ||
+ (hdmi->vic == 17) || (hdmi->vic == 18))
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
+ else
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
+
+ if ((hdmi->vic == 10) || (hdmi->vic == 11) ||
+ (hdmi->vic == 12) || (hdmi->vic == 13) ||
+ (hdmi->vic == 14) || (hdmi->vic == 15) ||
+ (hdmi->vic == 25) || (hdmi->vic == 26) ||
+ (hdmi->vic == 27) || (hdmi->vic == 28) ||
+ (hdmi->vic == 29) || (hdmi->vic == 30) ||
+ (hdmi->vic == 35) || (hdmi->vic == 36) ||
+ (hdmi->vic == 37) || (hdmi->vic == 38))
+ hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 1;
+ else
+ hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
+
+ hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0;
+
+ /* TODO: Get input format from IPU (via FB driver interface) */
+ hdmi->hdmi_data.enc_in_format = RGB;
+
+ hdmi->hdmi_data.enc_out_format = RGB;
+
+ hdmi->hdmi_data.enc_color_depth = 8;
+ hdmi->hdmi_data.pix_repet_factor = 0;
+ hdmi->hdmi_data.hdcp_enable = 0;
+ hdmi->hdmi_data.video_mode.mdataenablepolarity = true;
+
+ /* HDMI Initialization Step B.1 */
+ hdmi_av_composer(hdmi, mode);
+
+ /* HDMI Initializateion Step B.2 */
+ ret = imx_hdmi_phy_init(hdmi);
+ if (ret)
+ return ret;
+
+ /* HDMI Initialization Step B.3 */
+ imx_hdmi_enable_video_path(hdmi);
+
+ /* not for DVI mode */
+ if (hdmi->hdmi_data.video_mode.mdvi)
+ dev_dbg(hdmi->dev, "%s DVI mode\n", __func__);
+ else {
+ dev_dbg(hdmi->dev, "%s CEA mode\n", __func__);
+
+ /* HDMI Initialization Step E - Configure audio */
+ hdmi_clk_regenerator_update_pixel_clock(hdmi);
+ hdmi_enable_audio_clk(hdmi);
+
+ /* HDMI Initialization Step F - Configure AVI InfoFrame */
+ hdmi_config_AVI(hdmi);
+ }
+
+ hdmi_video_packetize(hdmi);
+ hdmi_video_csc(hdmi);
+ hdmi_video_sample(hdmi);
+ hdmi_tx_hdcp_config(hdmi);
+
+ imx_hdmi_clear_overflow(hdmi);
+ if (hdmi->cable_plugin && !hdmi->hdmi_data.video_mode.mdvi)
+ hdmi_enable_overflow_interrupts(hdmi);
+
+ return 0;
+}
+
+/* Wait until we are registered to enable interrupts */
+static int imx_hdmi_fb_registered(struct imx_hdmi *hdmi)
+{
+ hdmi_writeb(hdmi, HDMI_PHY_I2CM_INT_ADDR_DONE_POL,
+ HDMI_PHY_I2CM_INT_ADDR);
+
+ hdmi_writeb(hdmi, HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL |
+ HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL,
+ HDMI_PHY_I2CM_CTLINT_ADDR);
+
+ /* enable cable hot plug irq */
+ hdmi_writeb(hdmi, (u8)~HDMI_PHY_HPD, HDMI_PHY_MASK0);
+
+ /* Clear Hotplug interrupts */
+ hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD, HDMI_IH_PHY_STAT0);
+
+ /* Unmute interrupts */
+ hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0);
+
+ return 0;
+}
+
+static void initialize_hdmi_ih_mutes(struct imx_hdmi *hdmi)
+{
+ u8 ih_mute;
+
+ /*
+ * Boot up defaults are:
+ * HDMI_IH_MUTE = 0x03 (disabled)
+ * HDMI_IH_MUTE_* = 0x00 (enabled)
+ *
+ * Disable top level interrupt bits in HDMI block
+ */
+ ih_mute = hdmi_readb(hdmi, HDMI_IH_MUTE) |
+ HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT |
+ HDMI_IH_MUTE_MUTE_ALL_INTERRUPT;
+
+ hdmi_writeb(hdmi, ih_mute, HDMI_IH_MUTE);
+
+ /* by default mask all interrupts */
+ hdmi_writeb(hdmi, 0xff, HDMI_VP_MASK);
+ hdmi_writeb(hdmi, 0xff, HDMI_FC_MASK0);
+ hdmi_writeb(hdmi, 0xff, HDMI_FC_MASK1);
+ hdmi_writeb(hdmi, 0xff, HDMI_FC_MASK2);
+ hdmi_writeb(hdmi, 0xff, HDMI_PHY_MASK0);
+ hdmi_writeb(hdmi, 0xff, HDMI_PHY_I2CM_INT_ADDR);
+ hdmi_writeb(hdmi, 0xff, HDMI_PHY_I2CM_CTLINT_ADDR);
+ hdmi_writeb(hdmi, 0xff, HDMI_AUD_INT);
+ hdmi_writeb(hdmi, 0xff, HDMI_AUD_SPDIFINT);
+ hdmi_writeb(hdmi, 0xff, HDMI_AUD_HBR_MASK);
+ hdmi_writeb(hdmi, 0xff, HDMI_GP_MASK);
+ hdmi_writeb(hdmi, 0xff, HDMI_A_APIINTMSK);
+ hdmi_writeb(hdmi, 0xff, HDMI_CEC_MASK);
+ hdmi_writeb(hdmi, 0xff, HDMI_I2CM_INT);
+ hdmi_writeb(hdmi, 0xff, HDMI_I2CM_CTLINT);
+
+ /* Disable interrupts in the IH_MUTE_* registers */
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_FC_STAT0);
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_FC_STAT1);
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_FC_STAT2);
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_AS_STAT0);
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_PHY_STAT0);
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_I2CM_STAT0);
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_CEC_STAT0);
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_VP_STAT0);
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_I2CMPHY_STAT0);
+ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_AHBDMAAUD_STAT0);
+
+ /* Enable top level interrupt bits in HDMI block */
+ ih_mute &= ~(HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT |
+ HDMI_IH_MUTE_MUTE_ALL_INTERRUPT);
+ hdmi_writeb(hdmi, ih_mute, HDMI_IH_MUTE);
+}
+
+static void imx_hdmi_poweron(struct imx_hdmi *hdmi)
+{
+ imx_hdmi_setup(hdmi, &hdmi->previous_mode);
+}
+
+static void imx_hdmi_poweroff(struct imx_hdmi *hdmi)
+{
+ imx_hdmi_phy_disable(hdmi);
+}
+
+static enum drm_connector_status imx_hdmi_connector_detect(struct drm_connector
+ *connector, bool force)
+{
+ /* FIXME */
+ return connector_status_connected;
+}
+
+static void imx_hdmi_connector_destroy(struct drm_connector *connector)
+{
+}
+
+static int imx_hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi,
+ connector);
+ struct edid *edid;
+ int ret;
+
+ if (!hdmi->ddc)
+ return 0;
+
+ edid = drm_get_edid(connector, hdmi->ddc);
+ if (edid) {
+ dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n",
+ edid->width_cm, edid->height_cm);
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ } else {
+ dev_dbg(hdmi->dev, "failed to get edid\n");
+ }
+
+ return 0;
+}
+
+static int imx_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *imx_hdmi_connector_best_encoder(struct drm_connector
+ *connector)
+{
+ struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi,
+ connector);
+
+ return &hdmi->encoder;
+}
+
+static void imx_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
+
+ imx_hdmi_setup(hdmi, mode);
+
+ /* Store the display mode for plugin/DKMS poweron events */
+ memcpy(&hdmi->previous_mode, mode, sizeof(hdmi->previous_mode));
+}
+
+static bool imx_hdmi_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void imx_hdmi_encoder_disable(struct drm_encoder *encoder)
+{
+}
+
+static void imx_hdmi_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
+
+ if (mode)
+ imx_hdmi_poweroff(hdmi);
+ else
+ imx_hdmi_poweron(hdmi);
+}
+
+static void imx_hdmi_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
+
+ imx_hdmi_poweroff(hdmi);
+ imx_drm_crtc_panel_format(encoder->crtc, DRM_MODE_ENCODER_NONE,
+ V4L2_PIX_FMT_RGB24);
+}
+
+static void imx_hdmi_encoder_commit(struct drm_encoder *encoder)
+{
+ struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
+ int mux = imx_drm_encoder_get_mux_id(hdmi->imx_drm_encoder,
+ encoder->crtc);
+
+ imx_hdmi_set_ipu_di_mux(hdmi, mux);
+
+ imx_hdmi_poweron(hdmi);
+}
+
+static void imx_hdmi_encoder_destroy(struct drm_encoder *encoder)
+{
+ return;
+}
+
+static struct drm_encoder_funcs imx_hdmi_encoder_funcs = {
+ .destroy = imx_hdmi_encoder_destroy,
+};
+
+static struct drm_encoder_helper_funcs imx_hdmi_encoder_helper_funcs = {
+ .dpms = imx_hdmi_encoder_dpms,
+ .prepare = imx_hdmi_encoder_prepare,
+ .commit = imx_hdmi_encoder_commit,
+ .mode_set = imx_hdmi_encoder_mode_set,
+ .mode_fixup = imx_hdmi_encoder_mode_fixup,
+ .disable = imx_hdmi_encoder_disable,
+};
+
+static struct drm_connector_funcs imx_hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = imx_hdmi_connector_detect,
+ .destroy = imx_hdmi_connector_destroy,
+};
+
+static struct drm_connector_helper_funcs imx_hdmi_connector_helper_funcs = {
+ .get_modes = imx_hdmi_connector_get_modes,
+ .mode_valid = imx_hdmi_connector_mode_valid,
+ .best_encoder = imx_hdmi_connector_best_encoder,
+};
+
+static irqreturn_t imx_hdmi_irq(int irq, void *dev_id)
+{
+ struct imx_hdmi *hdmi = dev_id;
+ u8 intr_stat;
+ u8 phy_int_pol;
+ u8 val;
+
+ intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0);
+
+ phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0);
+
+ if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
+ if (phy_int_pol & HDMI_PHY_HPD) {
+ dev_dbg(hdmi->dev, "EVENT=plugin\n");
+
+ val = hdmi_readb(hdmi, HDMI_PHY_POL0);
+ val &= ~HDMI_PHY_HPD;
+ hdmi_writeb(hdmi, val, HDMI_PHY_POL0);
+
+ imx_hdmi_poweron(hdmi);
+ } else {
+ dev_dbg(hdmi->dev, "EVENT=plugout\n");
+
+ val = hdmi_readb(hdmi, HDMI_PHY_POL0);
+ val |= HDMI_PHY_HPD;
+ hdmi_writeb(hdmi, val, HDMI_PHY_POL0);
+
+ imx_hdmi_poweroff(hdmi);
+ }
+ }
+
+ hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0);
+
+ return IRQ_HANDLED;
+}
+
+static int imx_hdmi_register(struct imx_hdmi *hdmi)
+{
+ int ret;
+
+ hdmi->connector.funcs = &imx_hdmi_connector_funcs;
+ hdmi->encoder.funcs = &imx_hdmi_encoder_funcs;
+
+ hdmi->encoder.encoder_type = DRM_MODE_ENCODER_TMDS;
+ hdmi->connector.connector_type = DRM_MODE_CONNECTOR_HDMIA;
+
+ drm_encoder_helper_add(&hdmi->encoder, &imx_hdmi_encoder_helper_funcs);
+ ret = imx_drm_add_encoder(&hdmi->encoder, &hdmi->imx_drm_encoder,
+ THIS_MODULE);
+ if (ret) {
+ dev_err(hdmi->dev, "adding encoder failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_connector_helper_add(&hdmi->connector,
+ &imx_hdmi_connector_helper_funcs);
+
+ ret = imx_drm_add_connector(&hdmi->connector,
+ &hdmi->imx_drm_connector, THIS_MODULE);
+ if (ret) {
+ imx_drm_remove_encoder(hdmi->imx_drm_encoder);
+ dev_err(hdmi->dev, "adding connector failed: %d\n", ret);
+ return ret;
+ }
+
+ hdmi->connector.encoder = &hdmi->encoder;
+
+ drm_mode_connector_attach_encoder(&hdmi->connector, &hdmi->encoder);
+
+ return 0;
+}
+
+static struct platform_device_id imx_hdmi_devtype[] = {
+ {
+ .name = "imx6q-hdmi",
+ .driver_data = IMX6Q_HDMI,
+ }, {
+ .name = "imx6dl-hdmi",
+ .driver_data = IMX6DL_HDMI,
+ }, { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, imx_hdmi_devtype);
+
+static const struct of_device_id imx_hdmi_dt_ids[] = {
+{ .compatible = "fsl,imx6q-hdmi", .data = &imx_hdmi_devtype[IMX6Q_HDMI], },
+{ .compatible = "fsl,imx6dl-hdmi", .data = &imx_hdmi_devtype[IMX6DL_HDMI], },
+{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_hdmi_dt_ids);
+
+static int imx_hdmi_platform_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(imx_hdmi_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *ddc_node;
+ struct imx_hdmi *hdmi;
+ struct resource *iores;
+ int ret, irq;
+
+ hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->dev = &pdev->dev;
+
+ if (of_id) {
+ const struct platform_device_id *device_id = of_id->data;
+ hdmi->dev_type = device_id->driver_data;
+ }
+
+ ddc_node = of_parse_phandle(np, "ddc", 0);
+ if (ddc_node) {
+ hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
+ if (!hdmi->ddc)
+ dev_dbg(hdmi->dev, "failed to read ddc node\n");
+
+ of_node_put(ddc_node);
+ } else {
+ dev_dbg(hdmi->dev, "no ddc property found\n");
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ ret = devm_request_irq(&pdev->dev, irq, imx_hdmi_irq, 0,
+ dev_name(&pdev->dev), hdmi);
+ if (ret)
+ return ret;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hdmi->regs = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
+
+ hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
+ if (IS_ERR(hdmi->regmap))
+ return PTR_ERR(hdmi->regmap);
+
+ hdmi->isfr_clk = devm_clk_get(hdmi->dev, "isfr");
+ if (IS_ERR(hdmi->isfr_clk)) {
+ ret = PTR_ERR(hdmi->isfr_clk);
+ dev_err(hdmi->dev,
+ "Unable to get HDMI isfr clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(hdmi->isfr_clk);
+ if (ret) {
+ dev_err(hdmi->dev,
+ "Cannot enable HDMI isfr clock: %d\n", ret);
+ return ret;
+ }
+
+ hdmi->iahb_clk = devm_clk_get(hdmi->dev, "iahb");
+ if (IS_ERR(hdmi->iahb_clk)) {
+ ret = PTR_ERR(hdmi->iahb_clk);
+ dev_err(hdmi->dev,
+ "Unable to get HDMI iahb clk: %d\n", ret);
+ goto err_isfr;
+ }
+
+ ret = clk_prepare_enable(hdmi->iahb_clk);
+ if (ret) {
+ dev_err(hdmi->dev,
+ "Cannot enable HDMI iahb clock: %d\n", ret);
+ goto err_isfr;
+ }
+
+ /* Product and revision IDs */
+ dev_info(&pdev->dev,
+ "Detected HDMI controller 0x%x:0x%x:0x%x:0x%x\n",
+ hdmi_readb(hdmi, HDMI_DESIGN_ID),
+ hdmi_readb(hdmi, HDMI_REVISION_ID),
+ hdmi_readb(hdmi, HDMI_PRODUCT_ID0),
+ hdmi_readb(hdmi, HDMI_PRODUCT_ID1));
+
+ initialize_hdmi_ih_mutes(hdmi);
+
+ /*
+ * To prevent overflows in HDMI_IH_FC_STAT2, set the clk regenerator
+ * N and cts values before enabling phy
+ */
+ hdmi_init_clk_regenerator(hdmi);
+
+ /*
+ * Configure registers related to HDMI interrupt
+ * generation before registering IRQ.
+ */
+ hdmi_writeb(hdmi, HDMI_PHY_HPD, HDMI_PHY_POL0);
+
+ /* Clear Hotplug interrupts */
+ hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD, HDMI_IH_PHY_STAT0);
+
+ ret = imx_hdmi_fb_registered(hdmi);
+ if (ret)
+ goto err_iahb;
+
+ ret = imx_hdmi_register(hdmi);
+ if (ret)
+ goto err_iahb;
+
+ imx_drm_encoder_add_possible_crtcs(hdmi->imx_drm_encoder, np);
+
+ platform_set_drvdata(pdev, hdmi);
+
+ return 0;
+
+err_iahb:
+ clk_disable_unprepare(hdmi->iahb_clk);
+err_isfr:
+ clk_disable_unprepare(hdmi->isfr_clk);
+
+ return ret;
+}
+
+static int imx_hdmi_platform_remove(struct platform_device *pdev)
+{
+ struct imx_hdmi *hdmi = platform_get_drvdata(pdev);
+ struct drm_connector *connector = &hdmi->connector;
+ struct drm_encoder *encoder = &hdmi->encoder;
+
+ drm_mode_connector_detach_encoder(connector, encoder);
+ imx_drm_remove_connector(hdmi->imx_drm_connector);
+ imx_drm_remove_encoder(hdmi->imx_drm_encoder);
+
+ clk_disable_unprepare(hdmi->iahb_clk);
+ clk_disable_unprepare(hdmi->isfr_clk);
+ i2c_put_adapter(hdmi->ddc);
+
+ return 0;
+}
+
+static struct platform_driver imx_hdmi_driver = {
+ .probe = imx_hdmi_platform_probe,
+ .remove = imx_hdmi_platform_remove,
+ .driver = {
+ .name = "imx-hdmi",
+ .owner = THIS_MODULE,
+ .of_match_table = imx_hdmi_dt_ids,
+ },
+};
+
+module_platform_driver(imx_hdmi_driver);
+
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_DESCRIPTION("i.MX6 HDMI transmitter driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx-hdmi");
diff --git a/drivers/staging/imx-drm/imx-hdmi.h b/drivers/staging/imx-drm/imx-hdmi.h
new file mode 100644
index 000000000000..39b677689db6
--- /dev/null
+++ b/drivers/staging/imx-drm/imx-hdmi.h
@@ -0,0 +1,1032 @@
+/*
+ * Copyright (C) 2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __IMX_HDMI_H__
+#define __IMX_HDMI_H__
+
+/* Identification Registers */
+#define HDMI_DESIGN_ID 0x0000
+#define HDMI_REVISION_ID 0x0001
+#define HDMI_PRODUCT_ID0 0x0002
+#define HDMI_PRODUCT_ID1 0x0003
+#define HDMI_CONFIG0_ID 0x0004
+#define HDMI_CONFIG1_ID 0x0005
+#define HDMI_CONFIG2_ID 0x0006
+#define HDMI_CONFIG3_ID 0x0007
+
+/* Interrupt Registers */
+#define HDMI_IH_FC_STAT0 0x0100
+#define HDMI_IH_FC_STAT1 0x0101
+#define HDMI_IH_FC_STAT2 0x0102
+#define HDMI_IH_AS_STAT0 0x0103
+#define HDMI_IH_PHY_STAT0 0x0104
+#define HDMI_IH_I2CM_STAT0 0x0105
+#define HDMI_IH_CEC_STAT0 0x0106
+#define HDMI_IH_VP_STAT0 0x0107
+#define HDMI_IH_I2CMPHY_STAT0 0x0108
+#define HDMI_IH_AHBDMAAUD_STAT0 0x0109
+
+#define HDMI_IH_MUTE_FC_STAT0 0x0180
+#define HDMI_IH_MUTE_FC_STAT1 0x0181
+#define HDMI_IH_MUTE_FC_STAT2 0x0182
+#define HDMI_IH_MUTE_AS_STAT0 0x0183
+#define HDMI_IH_MUTE_PHY_STAT0 0x0184
+#define HDMI_IH_MUTE_I2CM_STAT0 0x0185
+#define HDMI_IH_MUTE_CEC_STAT0 0x0186
+#define HDMI_IH_MUTE_VP_STAT0 0x0187
+#define HDMI_IH_MUTE_I2CMPHY_STAT0 0x0188
+#define HDMI_IH_MUTE_AHBDMAAUD_STAT0 0x0189
+#define HDMI_IH_MUTE 0x01FF
+
+/* Video Sample Registers */
+#define HDMI_TX_INVID0 0x0200
+#define HDMI_TX_INSTUFFING 0x0201
+#define HDMI_TX_GYDATA0 0x0202
+#define HDMI_TX_GYDATA1 0x0203
+#define HDMI_TX_RCRDATA0 0x0204
+#define HDMI_TX_RCRDATA1 0x0205
+#define HDMI_TX_BCBDATA0 0x0206
+#define HDMI_TX_BCBDATA1 0x0207
+
+/* Video Packetizer Registers */
+#define HDMI_VP_STATUS 0x0800
+#define HDMI_VP_PR_CD 0x0801
+#define HDMI_VP_STUFF 0x0802
+#define HDMI_VP_REMAP 0x0803
+#define HDMI_VP_CONF 0x0804
+#define HDMI_VP_STAT 0x0805
+#define HDMI_VP_INT 0x0806
+#define HDMI_VP_MASK 0x0807
+#define HDMI_VP_POL 0x0808
+
+/* Frame Composer Registers */
+#define HDMI_FC_INVIDCONF 0x1000
+#define HDMI_FC_INHACTV0 0x1001
+#define HDMI_FC_INHACTV1 0x1002
+#define HDMI_FC_INHBLANK0 0x1003
+#define HDMI_FC_INHBLANK1 0x1004
+#define HDMI_FC_INVACTV0 0x1005
+#define HDMI_FC_INVACTV1 0x1006
+#define HDMI_FC_INVBLANK 0x1007
+#define HDMI_FC_HSYNCINDELAY0 0x1008
+#define HDMI_FC_HSYNCINDELAY1 0x1009
+#define HDMI_FC_HSYNCINWIDTH0 0x100A
+#define HDMI_FC_HSYNCINWIDTH1 0x100B
+#define HDMI_FC_VSYNCINDELAY 0x100C
+#define HDMI_FC_VSYNCINWIDTH 0x100D
+#define HDMI_FC_INFREQ0 0x100E
+#define HDMI_FC_INFREQ1 0x100F
+#define HDMI_FC_INFREQ2 0x1010
+#define HDMI_FC_CTRLDUR 0x1011
+#define HDMI_FC_EXCTRLDUR 0x1012
+#define HDMI_FC_EXCTRLSPAC 0x1013
+#define HDMI_FC_CH0PREAM 0x1014
+#define HDMI_FC_CH1PREAM 0x1015
+#define HDMI_FC_CH2PREAM 0x1016
+#define HDMI_FC_AVICONF3 0x1017
+#define HDMI_FC_GCP 0x1018
+#define HDMI_FC_AVICONF0 0x1019
+#define HDMI_FC_AVICONF1 0x101A
+#define HDMI_FC_AVICONF2 0x101B
+#define HDMI_FC_AVIVID 0x101C
+#define HDMI_FC_AVIETB0 0x101D
+#define HDMI_FC_AVIETB1 0x101E
+#define HDMI_FC_AVISBB0 0x101F
+#define HDMI_FC_AVISBB1 0x1020
+#define HDMI_FC_AVIELB0 0x1021
+#define HDMI_FC_AVIELB1 0x1022
+#define HDMI_FC_AVISRB0 0x1023
+#define HDMI_FC_AVISRB1 0x1024
+#define HDMI_FC_AUDICONF0 0x1025
+#define HDMI_FC_AUDICONF1 0x1026
+#define HDMI_FC_AUDICONF2 0x1027
+#define HDMI_FC_AUDICONF3 0x1028
+#define HDMI_FC_VSDIEEEID0 0x1029
+#define HDMI_FC_VSDSIZE 0x102A
+#define HDMI_FC_VSDIEEEID1 0x1030
+#define HDMI_FC_VSDIEEEID2 0x1031
+#define HDMI_FC_VSDPAYLOAD0 0x1032
+#define HDMI_FC_VSDPAYLOAD1 0x1033
+#define HDMI_FC_VSDPAYLOAD2 0x1034
+#define HDMI_FC_VSDPAYLOAD3 0x1035
+#define HDMI_FC_VSDPAYLOAD4 0x1036
+#define HDMI_FC_VSDPAYLOAD5 0x1037
+#define HDMI_FC_VSDPAYLOAD6 0x1038
+#define HDMI_FC_VSDPAYLOAD7 0x1039
+#define HDMI_FC_VSDPAYLOAD8 0x103A
+#define HDMI_FC_VSDPAYLOAD9 0x103B
+#define HDMI_FC_VSDPAYLOAD10 0x103C
+#define HDMI_FC_VSDPAYLOAD11 0x103D
+#define HDMI_FC_VSDPAYLOAD12 0x103E
+#define HDMI_FC_VSDPAYLOAD13 0x103F
+#define HDMI_FC_VSDPAYLOAD14 0x1040
+#define HDMI_FC_VSDPAYLOAD15 0x1041
+#define HDMI_FC_VSDPAYLOAD16 0x1042
+#define HDMI_FC_VSDPAYLOAD17 0x1043
+#define HDMI_FC_VSDPAYLOAD18 0x1044
+#define HDMI_FC_VSDPAYLOAD19 0x1045
+#define HDMI_FC_VSDPAYLOAD20 0x1046
+#define HDMI_FC_VSDPAYLOAD21 0x1047
+#define HDMI_FC_VSDPAYLOAD22 0x1048
+#define HDMI_FC_VSDPAYLOAD23 0x1049
+#define HDMI_FC_SPDVENDORNAME0 0x104A
+#define HDMI_FC_SPDVENDORNAME1 0x104B
+#define HDMI_FC_SPDVENDORNAME2 0x104C
+#define HDMI_FC_SPDVENDORNAME3 0x104D
+#define HDMI_FC_SPDVENDORNAME4 0x104E
+#define HDMI_FC_SPDVENDORNAME5 0x104F
+#define HDMI_FC_SPDVENDORNAME6 0x1050
+#define HDMI_FC_SPDVENDORNAME7 0x1051
+#define HDMI_FC_SDPPRODUCTNAME0 0x1052
+#define HDMI_FC_SDPPRODUCTNAME1 0x1053
+#define HDMI_FC_SDPPRODUCTNAME2 0x1054
+#define HDMI_FC_SDPPRODUCTNAME3 0x1055
+#define HDMI_FC_SDPPRODUCTNAME4 0x1056
+#define HDMI_FC_SDPPRODUCTNAME5 0x1057
+#define HDMI_FC_SDPPRODUCTNAME6 0x1058
+#define HDMI_FC_SDPPRODUCTNAME7 0x1059
+#define HDMI_FC_SDPPRODUCTNAME8 0x105A
+#define HDMI_FC_SDPPRODUCTNAME9 0x105B
+#define HDMI_FC_SDPPRODUCTNAME10 0x105C
+#define HDMI_FC_SDPPRODUCTNAME11 0x105D
+#define HDMI_FC_SDPPRODUCTNAME12 0x105E
+#define HDMI_FC_SDPPRODUCTNAME13 0x105F
+#define HDMI_FC_SDPPRODUCTNAME14 0x1060
+#define HDMI_FC_SPDPRODUCTNAME15 0x1061
+#define HDMI_FC_SPDDEVICEINF 0x1062
+#define HDMI_FC_AUDSCONF 0x1063
+#define HDMI_FC_AUDSSTAT 0x1064
+#define HDMI_FC_DATACH0FILL 0x1070
+#define HDMI_FC_DATACH1FILL 0x1071
+#define HDMI_FC_DATACH2FILL 0x1072
+#define HDMI_FC_CTRLQHIGH 0x1073
+#define HDMI_FC_CTRLQLOW 0x1074
+#define HDMI_FC_ACP0 0x1075
+#define HDMI_FC_ACP28 0x1076
+#define HDMI_FC_ACP27 0x1077
+#define HDMI_FC_ACP26 0x1078
+#define HDMI_FC_ACP25 0x1079
+#define HDMI_FC_ACP24 0x107A
+#define HDMI_FC_ACP23 0x107B
+#define HDMI_FC_ACP22 0x107C
+#define HDMI_FC_ACP21 0x107D
+#define HDMI_FC_ACP20 0x107E
+#define HDMI_FC_ACP19 0x107F
+#define HDMI_FC_ACP18 0x1080
+#define HDMI_FC_ACP17 0x1081
+#define HDMI_FC_ACP16 0x1082
+#define HDMI_FC_ACP15 0x1083
+#define HDMI_FC_ACP14 0x1084
+#define HDMI_FC_ACP13 0x1085
+#define HDMI_FC_ACP12 0x1086
+#define HDMI_FC_ACP11 0x1087
+#define HDMI_FC_ACP10 0x1088
+#define HDMI_FC_ACP9 0x1089
+#define HDMI_FC_ACP8 0x108A
+#define HDMI_FC_ACP7 0x108B
+#define HDMI_FC_ACP6 0x108C
+#define HDMI_FC_ACP5 0x108D
+#define HDMI_FC_ACP4 0x108E
+#define HDMI_FC_ACP3 0x108F
+#define HDMI_FC_ACP2 0x1090
+#define HDMI_FC_ACP1 0x1091
+#define HDMI_FC_ISCR1_0 0x1092
+#define HDMI_FC_ISCR1_16 0x1093
+#define HDMI_FC_ISCR1_15 0x1094
+#define HDMI_FC_ISCR1_14 0x1095
+#define HDMI_FC_ISCR1_13 0x1096
+#define HDMI_FC_ISCR1_12 0x1097
+#define HDMI_FC_ISCR1_11 0x1098
+#define HDMI_FC_ISCR1_10 0x1099
+#define HDMI_FC_ISCR1_9 0x109A
+#define HDMI_FC_ISCR1_8 0x109B
+#define HDMI_FC_ISCR1_7 0x109C
+#define HDMI_FC_ISCR1_6 0x109D
+#define HDMI_FC_ISCR1_5 0x109E
+#define HDMI_FC_ISCR1_4 0x109F
+#define HDMI_FC_ISCR1_3 0x10A0
+#define HDMI_FC_ISCR1_2 0x10A1
+#define HDMI_FC_ISCR1_1 0x10A2
+#define HDMI_FC_ISCR2_15 0x10A3
+#define HDMI_FC_ISCR2_14 0x10A4
+#define HDMI_FC_ISCR2_13 0x10A5
+#define HDMI_FC_ISCR2_12 0x10A6
+#define HDMI_FC_ISCR2_11 0x10A7
+#define HDMI_FC_ISCR2_10 0x10A8
+#define HDMI_FC_ISCR2_9 0x10A9
+#define HDMI_FC_ISCR2_8 0x10AA
+#define HDMI_FC_ISCR2_7 0x10AB
+#define HDMI_FC_ISCR2_6 0x10AC
+#define HDMI_FC_ISCR2_5 0x10AD
+#define HDMI_FC_ISCR2_4 0x10AE
+#define HDMI_FC_ISCR2_3 0x10AF
+#define HDMI_FC_ISCR2_2 0x10B0
+#define HDMI_FC_ISCR2_1 0x10B1
+#define HDMI_FC_ISCR2_0 0x10B2
+#define HDMI_FC_DATAUTO0 0x10B3
+#define HDMI_FC_DATAUTO1 0x10B4
+#define HDMI_FC_DATAUTO2 0x10B5
+#define HDMI_FC_DATMAN 0x10B6
+#define HDMI_FC_DATAUTO3 0x10B7
+#define HDMI_FC_RDRB0 0x10B8
+#define HDMI_FC_RDRB1 0x10B9
+#define HDMI_FC_RDRB2 0x10BA
+#define HDMI_FC_RDRB3 0x10BB
+#define HDMI_FC_RDRB4 0x10BC
+#define HDMI_FC_RDRB5 0x10BD
+#define HDMI_FC_RDRB6 0x10BE
+#define HDMI_FC_RDRB7 0x10BF
+#define HDMI_FC_STAT0 0x10D0
+#define HDMI_FC_INT0 0x10D1
+#define HDMI_FC_MASK0 0x10D2
+#define HDMI_FC_POL0 0x10D3
+#define HDMI_FC_STAT1 0x10D4
+#define HDMI_FC_INT1 0x10D5
+#define HDMI_FC_MASK1 0x10D6
+#define HDMI_FC_POL1 0x10D7
+#define HDMI_FC_STAT2 0x10D8
+#define HDMI_FC_INT2 0x10D9
+#define HDMI_FC_MASK2 0x10DA
+#define HDMI_FC_POL2 0x10DB
+#define HDMI_FC_PRCONF 0x10E0
+
+#define HDMI_FC_GMD_STAT 0x1100
+#define HDMI_FC_GMD_EN 0x1101
+#define HDMI_FC_GMD_UP 0x1102
+#define HDMI_FC_GMD_CONF 0x1103
+#define HDMI_FC_GMD_HB 0x1104
+#define HDMI_FC_GMD_PB0 0x1105
+#define HDMI_FC_GMD_PB1 0x1106
+#define HDMI_FC_GMD_PB2 0x1107
+#define HDMI_FC_GMD_PB3 0x1108
+#define HDMI_FC_GMD_PB4 0x1109
+#define HDMI_FC_GMD_PB5 0x110A
+#define HDMI_FC_GMD_PB6 0x110B
+#define HDMI_FC_GMD_PB7 0x110C
+#define HDMI_FC_GMD_PB8 0x110D
+#define HDMI_FC_GMD_PB9 0x110E
+#define HDMI_FC_GMD_PB10 0x110F
+#define HDMI_FC_GMD_PB11 0x1110
+#define HDMI_FC_GMD_PB12 0x1111
+#define HDMI_FC_GMD_PB13 0x1112
+#define HDMI_FC_GMD_PB14 0x1113
+#define HDMI_FC_GMD_PB15 0x1114
+#define HDMI_FC_GMD_PB16 0x1115
+#define HDMI_FC_GMD_PB17 0x1116
+#define HDMI_FC_GMD_PB18 0x1117
+#define HDMI_FC_GMD_PB19 0x1118
+#define HDMI_FC_GMD_PB20 0x1119
+#define HDMI_FC_GMD_PB21 0x111A
+#define HDMI_FC_GMD_PB22 0x111B
+#define HDMI_FC_GMD_PB23 0x111C
+#define HDMI_FC_GMD_PB24 0x111D
+#define HDMI_FC_GMD_PB25 0x111E
+#define HDMI_FC_GMD_PB26 0x111F
+#define HDMI_FC_GMD_PB27 0x1120
+
+#define HDMI_FC_DBGFORCE 0x1200
+#define HDMI_FC_DBGAUD0CH0 0x1201
+#define HDMI_FC_DBGAUD1CH0 0x1202
+#define HDMI_FC_DBGAUD2CH0 0x1203
+#define HDMI_FC_DBGAUD0CH1 0x1204
+#define HDMI_FC_DBGAUD1CH1 0x1205
+#define HDMI_FC_DBGAUD2CH1 0x1206
+#define HDMI_FC_DBGAUD0CH2 0x1207
+#define HDMI_FC_DBGAUD1CH2 0x1208
+#define HDMI_FC_DBGAUD2CH2 0x1209
+#define HDMI_FC_DBGAUD0CH3 0x120A
+#define HDMI_FC_DBGAUD1CH3 0x120B
+#define HDMI_FC_DBGAUD2CH3 0x120C
+#define HDMI_FC_DBGAUD0CH4 0x120D
+#define HDMI_FC_DBGAUD1CH4 0x120E
+#define HDMI_FC_DBGAUD2CH4 0x120F
+#define HDMI_FC_DBGAUD0CH5 0x1210
+#define HDMI_FC_DBGAUD1CH5 0x1211
+#define HDMI_FC_DBGAUD2CH5 0x1212
+#define HDMI_FC_DBGAUD0CH6 0x1213
+#define HDMI_FC_DBGAUD1CH6 0x1214
+#define HDMI_FC_DBGAUD2CH6 0x1215
+#define HDMI_FC_DBGAUD0CH7 0x1216
+#define HDMI_FC_DBGAUD1CH7 0x1217
+#define HDMI_FC_DBGAUD2CH7 0x1218
+#define HDMI_FC_DBGTMDS0 0x1219
+#define HDMI_FC_DBGTMDS1 0x121A
+#define HDMI_FC_DBGTMDS2 0x121B
+
+/* HDMI Source PHY Registers */
+#define HDMI_PHY_CONF0 0x3000
+#define HDMI_PHY_TST0 0x3001
+#define HDMI_PHY_TST1 0x3002
+#define HDMI_PHY_TST2 0x3003
+#define HDMI_PHY_STAT0 0x3004
+#define HDMI_PHY_INT0 0x3005
+#define HDMI_PHY_MASK0 0x3006
+#define HDMI_PHY_POL0 0x3007
+
+/* HDMI Master PHY Registers */
+#define HDMI_PHY_I2CM_SLAVE_ADDR 0x3020
+#define HDMI_PHY_I2CM_ADDRESS_ADDR 0x3021
+#define HDMI_PHY_I2CM_DATAO_1_ADDR 0x3022
+#define HDMI_PHY_I2CM_DATAO_0_ADDR 0x3023
+#define HDMI_PHY_I2CM_DATAI_1_ADDR 0x3024
+#define HDMI_PHY_I2CM_DATAI_0_ADDR 0x3025
+#define HDMI_PHY_I2CM_OPERATION_ADDR 0x3026
+#define HDMI_PHY_I2CM_INT_ADDR 0x3027
+#define HDMI_PHY_I2CM_CTLINT_ADDR 0x3028
+#define HDMI_PHY_I2CM_DIV_ADDR 0x3029
+#define HDMI_PHY_I2CM_SOFTRSTZ_ADDR 0x302a
+#define HDMI_PHY_I2CM_SS_SCL_HCNT_1_ADDR 0x302b
+#define HDMI_PHY_I2CM_SS_SCL_HCNT_0_ADDR 0x302c
+#define HDMI_PHY_I2CM_SS_SCL_LCNT_1_ADDR 0x302d
+#define HDMI_PHY_I2CM_SS_SCL_LCNT_0_ADDR 0x302e
+#define HDMI_PHY_I2CM_FS_SCL_HCNT_1_ADDR 0x302f
+#define HDMI_PHY_I2CM_FS_SCL_HCNT_0_ADDR 0x3030
+#define HDMI_PHY_I2CM_FS_SCL_LCNT_1_ADDR 0x3031
+#define HDMI_PHY_I2CM_FS_SCL_LCNT_0_ADDR 0x3032
+
+/* Audio Sampler Registers */
+#define HDMI_AUD_CONF0 0x3100
+#define HDMI_AUD_CONF1 0x3101
+#define HDMI_AUD_INT 0x3102
+#define HDMI_AUD_CONF2 0x3103
+#define HDMI_AUD_N1 0x3200
+#define HDMI_AUD_N2 0x3201
+#define HDMI_AUD_N3 0x3202
+#define HDMI_AUD_CTS1 0x3203
+#define HDMI_AUD_CTS2 0x3204
+#define HDMI_AUD_CTS3 0x3205
+#define HDMI_AUD_INPUTCLKFS 0x3206
+#define HDMI_AUD_SPDIFINT 0x3302
+#define HDMI_AUD_CONF0_HBR 0x3400
+#define HDMI_AUD_HBR_STATUS 0x3401
+#define HDMI_AUD_HBR_INT 0x3402
+#define HDMI_AUD_HBR_POL 0x3403
+#define HDMI_AUD_HBR_MASK 0x3404
+
+/*
+ * Generic Parallel Audio Interface Registers
+ * Not used as GPAUD interface is not enabled in hw
+ */
+#define HDMI_GP_CONF0 0x3500
+#define HDMI_GP_CONF1 0x3501
+#define HDMI_GP_CONF2 0x3502
+#define HDMI_GP_STAT 0x3503
+#define HDMI_GP_INT 0x3504
+#define HDMI_GP_MASK 0x3505
+#define HDMI_GP_POL 0x3506
+
+/* Audio DMA Registers */
+#define HDMI_AHB_DMA_CONF0 0x3600
+#define HDMI_AHB_DMA_START 0x3601
+#define HDMI_AHB_DMA_STOP 0x3602
+#define HDMI_AHB_DMA_THRSLD 0x3603
+#define HDMI_AHB_DMA_STRADDR0 0x3604
+#define HDMI_AHB_DMA_STRADDR1 0x3605
+#define HDMI_AHB_DMA_STRADDR2 0x3606
+#define HDMI_AHB_DMA_STRADDR3 0x3607
+#define HDMI_AHB_DMA_STPADDR0 0x3608
+#define HDMI_AHB_DMA_STPADDR1 0x3609
+#define HDMI_AHB_DMA_STPADDR2 0x360a
+#define HDMI_AHB_DMA_STPADDR3 0x360b
+#define HDMI_AHB_DMA_BSTADDR0 0x360c
+#define HDMI_AHB_DMA_BSTADDR1 0x360d
+#define HDMI_AHB_DMA_BSTADDR2 0x360e
+#define HDMI_AHB_DMA_BSTADDR3 0x360f
+#define HDMI_AHB_DMA_MBLENGTH0 0x3610
+#define HDMI_AHB_DMA_MBLENGTH1 0x3611
+#define HDMI_AHB_DMA_STAT 0x3612
+#define HDMI_AHB_DMA_INT 0x3613
+#define HDMI_AHB_DMA_MASK 0x3614
+#define HDMI_AHB_DMA_POL 0x3615
+#define HDMI_AHB_DMA_CONF1 0x3616
+#define HDMI_AHB_DMA_BUFFSTAT 0x3617
+#define HDMI_AHB_DMA_BUFFINT 0x3618
+#define HDMI_AHB_DMA_BUFFMASK 0x3619
+#define HDMI_AHB_DMA_BUFFPOL 0x361a
+
+/* Main Controller Registers */
+#define HDMI_MC_SFRDIV 0x4000
+#define HDMI_MC_CLKDIS 0x4001
+#define HDMI_MC_SWRSTZ 0x4002
+#define HDMI_MC_OPCTRL 0x4003
+#define HDMI_MC_FLOWCTRL 0x4004
+#define HDMI_MC_PHYRSTZ 0x4005
+#define HDMI_MC_LOCKONCLOCK 0x4006
+#define HDMI_MC_HEACPHY_RST 0x4007
+
+/* Color Space Converter Registers */
+#define HDMI_CSC_CFG 0x4100
+#define HDMI_CSC_SCALE 0x4101
+#define HDMI_CSC_COEF_A1_MSB 0x4102
+#define HDMI_CSC_COEF_A1_LSB 0x4103
+#define HDMI_CSC_COEF_A2_MSB 0x4104
+#define HDMI_CSC_COEF_A2_LSB 0x4105
+#define HDMI_CSC_COEF_A3_MSB 0x4106
+#define HDMI_CSC_COEF_A3_LSB 0x4107
+#define HDMI_CSC_COEF_A4_MSB 0x4108
+#define HDMI_CSC_COEF_A4_LSB 0x4109
+#define HDMI_CSC_COEF_B1_MSB 0x410A
+#define HDMI_CSC_COEF_B1_LSB 0x410B
+#define HDMI_CSC_COEF_B2_MSB 0x410C
+#define HDMI_CSC_COEF_B2_LSB 0x410D
+#define HDMI_CSC_COEF_B3_MSB 0x410E
+#define HDMI_CSC_COEF_B3_LSB 0x410F
+#define HDMI_CSC_COEF_B4_MSB 0x4110
+#define HDMI_CSC_COEF_B4_LSB 0x4111
+#define HDMI_CSC_COEF_C1_MSB 0x4112
+#define HDMI_CSC_COEF_C1_LSB 0x4113
+#define HDMI_CSC_COEF_C2_MSB 0x4114
+#define HDMI_CSC_COEF_C2_LSB 0x4115
+#define HDMI_CSC_COEF_C3_MSB 0x4116
+#define HDMI_CSC_COEF_C3_LSB 0x4117
+#define HDMI_CSC_COEF_C4_MSB 0x4118
+#define HDMI_CSC_COEF_C4_LSB 0x4119
+
+/* HDCP Encryption Engine Registers */
+#define HDMI_A_HDCPCFG0 0x5000
+#define HDMI_A_HDCPCFG1 0x5001
+#define HDMI_A_HDCPOBS0 0x5002
+#define HDMI_A_HDCPOBS1 0x5003
+#define HDMI_A_HDCPOBS2 0x5004
+#define HDMI_A_HDCPOBS3 0x5005
+#define HDMI_A_APIINTCLR 0x5006
+#define HDMI_A_APIINTSTAT 0x5007
+#define HDMI_A_APIINTMSK 0x5008
+#define HDMI_A_VIDPOLCFG 0x5009
+#define HDMI_A_OESSWCFG 0x500A
+#define HDMI_A_TIMER1SETUP0 0x500B
+#define HDMI_A_TIMER1SETUP1 0x500C
+#define HDMI_A_TIMER2SETUP0 0x500D
+#define HDMI_A_TIMER2SETUP1 0x500E
+#define HDMI_A_100MSCFG 0x500F
+#define HDMI_A_2SCFG0 0x5010
+#define HDMI_A_2SCFG1 0x5011
+#define HDMI_A_5SCFG0 0x5012
+#define HDMI_A_5SCFG1 0x5013
+#define HDMI_A_SRMVERLSB 0x5014
+#define HDMI_A_SRMVERMSB 0x5015
+#define HDMI_A_SRMCTRL 0x5016
+#define HDMI_A_SFRSETUP 0x5017
+#define HDMI_A_I2CHSETUP 0x5018
+#define HDMI_A_INTSETUP 0x5019
+#define HDMI_A_PRESETUP 0x501A
+#define HDMI_A_SRM_BASE 0x5020
+
+/* CEC Engine Registers */
+#define HDMI_CEC_CTRL 0x7D00
+#define HDMI_CEC_STAT 0x7D01
+#define HDMI_CEC_MASK 0x7D02
+#define HDMI_CEC_POLARITY 0x7D03
+#define HDMI_CEC_INT 0x7D04
+#define HDMI_CEC_ADDR_L 0x7D05
+#define HDMI_CEC_ADDR_H 0x7D06
+#define HDMI_CEC_TX_CNT 0x7D07
+#define HDMI_CEC_RX_CNT 0x7D08
+#define HDMI_CEC_TX_DATA0 0x7D10
+#define HDMI_CEC_TX_DATA1 0x7D11
+#define HDMI_CEC_TX_DATA2 0x7D12
+#define HDMI_CEC_TX_DATA3 0x7D13
+#define HDMI_CEC_TX_DATA4 0x7D14
+#define HDMI_CEC_TX_DATA5 0x7D15
+#define HDMI_CEC_TX_DATA6 0x7D16
+#define HDMI_CEC_TX_DATA7 0x7D17
+#define HDMI_CEC_TX_DATA8 0x7D18
+#define HDMI_CEC_TX_DATA9 0x7D19
+#define HDMI_CEC_TX_DATA10 0x7D1a
+#define HDMI_CEC_TX_DATA11 0x7D1b
+#define HDMI_CEC_TX_DATA12 0x7D1c
+#define HDMI_CEC_TX_DATA13 0x7D1d
+#define HDMI_CEC_TX_DATA14 0x7D1e
+#define HDMI_CEC_TX_DATA15 0x7D1f
+#define HDMI_CEC_RX_DATA0 0x7D20
+#define HDMI_CEC_RX_DATA1 0x7D21
+#define HDMI_CEC_RX_DATA2 0x7D22
+#define HDMI_CEC_RX_DATA3 0x7D23
+#define HDMI_CEC_RX_DATA4 0x7D24
+#define HDMI_CEC_RX_DATA5 0x7D25
+#define HDMI_CEC_RX_DATA6 0x7D26
+#define HDMI_CEC_RX_DATA7 0x7D27
+#define HDMI_CEC_RX_DATA8 0x7D28
+#define HDMI_CEC_RX_DATA9 0x7D29
+#define HDMI_CEC_RX_DATA10 0x7D2a
+#define HDMI_CEC_RX_DATA11 0x7D2b
+#define HDMI_CEC_RX_DATA12 0x7D2c
+#define HDMI_CEC_RX_DATA13 0x7D2d
+#define HDMI_CEC_RX_DATA14 0x7D2e
+#define HDMI_CEC_RX_DATA15 0x7D2f
+#define HDMI_CEC_LOCK 0x7D30
+#define HDMI_CEC_WKUPCTRL 0x7D31
+
+/* I2C Master Registers (E-DDC) */
+#define HDMI_I2CM_SLAVE 0x7E00
+#define HDMI_I2CMESS 0x7E01
+#define HDMI_I2CM_DATAO 0x7E02
+#define HDMI_I2CM_DATAI 0x7E03
+#define HDMI_I2CM_OPERATION 0x7E04
+#define HDMI_I2CM_INT 0x7E05
+#define HDMI_I2CM_CTLINT 0x7E06
+#define HDMI_I2CM_DIV 0x7E07
+#define HDMI_I2CM_SEGADDR 0x7E08
+#define HDMI_I2CM_SOFTRSTZ 0x7E09
+#define HDMI_I2CM_SEGPTR 0x7E0A
+#define HDMI_I2CM_SS_SCL_HCNT_1_ADDR 0x7E0B
+#define HDMI_I2CM_SS_SCL_HCNT_0_ADDR 0x7E0C
+#define HDMI_I2CM_SS_SCL_LCNT_1_ADDR 0x7E0D
+#define HDMI_I2CM_SS_SCL_LCNT_0_ADDR 0x7E0E
+#define HDMI_I2CM_FS_SCL_HCNT_1_ADDR 0x7E0F
+#define HDMI_I2CM_FS_SCL_HCNT_0_ADDR 0x7E10
+#define HDMI_I2CM_FS_SCL_LCNT_1_ADDR 0x7E11
+#define HDMI_I2CM_FS_SCL_LCNT_0_ADDR 0x7E12
+
+enum {
+/* IH_FC_INT2 field values */
+ HDMI_IH_FC_INT2_OVERFLOW_MASK = 0x03,
+ HDMI_IH_FC_INT2_LOW_PRIORITY_OVERFLOW = 0x02,
+ HDMI_IH_FC_INT2_HIGH_PRIORITY_OVERFLOW = 0x01,
+
+/* IH_FC_STAT2 field values */
+ HDMI_IH_FC_STAT2_OVERFLOW_MASK = 0x03,
+ HDMI_IH_FC_STAT2_LOW_PRIORITY_OVERFLOW = 0x02,
+ HDMI_IH_FC_STAT2_HIGH_PRIORITY_OVERFLOW = 0x01,
+
+/* IH_PHY_STAT0 field values */
+ HDMI_IH_PHY_STAT0_RX_SENSE3 = 0x20,
+ HDMI_IH_PHY_STAT0_RX_SENSE2 = 0x10,
+ HDMI_IH_PHY_STAT0_RX_SENSE1 = 0x8,
+ HDMI_IH_PHY_STAT0_RX_SENSE0 = 0x4,
+ HDMI_IH_PHY_STAT0_TX_PHY_LOCK = 0x2,
+ HDMI_IH_PHY_STAT0_HPD = 0x1,
+
+/* IH_MUTE_I2CMPHY_STAT0 field values */
+ HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYDONE = 0x2,
+ HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYERROR = 0x1,
+
+/* IH_AHBDMAAUD_STAT0 field values */
+ HDMI_IH_AHBDMAAUD_STAT0_ERROR = 0x20,
+ HDMI_IH_AHBDMAAUD_STAT0_LOST = 0x10,
+ HDMI_IH_AHBDMAAUD_STAT0_RETRY = 0x08,
+ HDMI_IH_AHBDMAAUD_STAT0_DONE = 0x04,
+ HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL = 0x02,
+ HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY = 0x01,
+
+/* IH_MUTE_FC_STAT2 field values */
+ HDMI_IH_MUTE_FC_STAT2_OVERFLOW_MASK = 0x03,
+ HDMI_IH_MUTE_FC_STAT2_LOW_PRIORITY_OVERFLOW = 0x02,
+ HDMI_IH_MUTE_FC_STAT2_HIGH_PRIORITY_OVERFLOW = 0x01,
+
+/* IH_MUTE_AHBDMAAUD_STAT0 field values */
+ HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR = 0x20,
+ HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST = 0x10,
+ HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY = 0x08,
+ HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE = 0x04,
+ HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL = 0x02,
+ HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY = 0x01,
+
+/* IH_MUTE field values */
+ HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT = 0x2,
+ HDMI_IH_MUTE_MUTE_ALL_INTERRUPT = 0x1,
+
+/* TX_INVID0 field values */
+ HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_MASK = 0x80,
+ HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_ENABLE = 0x80,
+ HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_DISABLE = 0x00,
+ HDMI_TX_INVID0_VIDEO_MAPPING_MASK = 0x1F,
+ HDMI_TX_INVID0_VIDEO_MAPPING_OFFSET = 0,
+
+/* TX_INSTUFFING field values */
+ HDMI_TX_INSTUFFING_BDBDATA_STUFFING_MASK = 0x4,
+ HDMI_TX_INSTUFFING_BDBDATA_STUFFING_ENABLE = 0x4,
+ HDMI_TX_INSTUFFING_BDBDATA_STUFFING_DISABLE = 0x0,
+ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_MASK = 0x2,
+ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_ENABLE = 0x2,
+ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_DISABLE = 0x0,
+ HDMI_TX_INSTUFFING_GYDATA_STUFFING_MASK = 0x1,
+ HDMI_TX_INSTUFFING_GYDATA_STUFFING_ENABLE = 0x1,
+ HDMI_TX_INSTUFFING_GYDATA_STUFFING_DISABLE = 0x0,
+
+/* VP_PR_CD field values */
+ HDMI_VP_PR_CD_COLOR_DEPTH_MASK = 0xF0,
+ HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET = 4,
+ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK = 0x0F,
+ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_OFFSET = 0,
+
+/* VP_STUFF field values */
+ HDMI_VP_STUFF_IDEFAULT_PHASE_MASK = 0x20,
+ HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET = 5,
+ HDMI_VP_STUFF_IFIX_PP_TO_LAST_MASK = 0x10,
+ HDMI_VP_STUFF_IFIX_PP_TO_LAST_OFFSET = 4,
+ HDMI_VP_STUFF_ICX_GOTO_P0_ST_MASK = 0x8,
+ HDMI_VP_STUFF_ICX_GOTO_P0_ST_OFFSET = 3,
+ HDMI_VP_STUFF_YCC422_STUFFING_MASK = 0x4,
+ HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE = 0x4,
+ HDMI_VP_STUFF_YCC422_STUFFING_DIRECT_MODE = 0x0,
+ HDMI_VP_STUFF_PP_STUFFING_MASK = 0x2,
+ HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE = 0x2,
+ HDMI_VP_STUFF_PP_STUFFING_DIRECT_MODE = 0x0,
+ HDMI_VP_STUFF_PR_STUFFING_MASK = 0x1,
+ HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE = 0x1,
+ HDMI_VP_STUFF_PR_STUFFING_DIRECT_MODE = 0x0,
+
+/* VP_CONF field values */
+ HDMI_VP_CONF_BYPASS_EN_MASK = 0x40,
+ HDMI_VP_CONF_BYPASS_EN_ENABLE = 0x40,
+ HDMI_VP_CONF_BYPASS_EN_DISABLE = 0x00,
+ HDMI_VP_CONF_PP_EN_ENMASK = 0x20,
+ HDMI_VP_CONF_PP_EN_ENABLE = 0x20,
+ HDMI_VP_CONF_PP_EN_DISABLE = 0x00,
+ HDMI_VP_CONF_PR_EN_MASK = 0x10,
+ HDMI_VP_CONF_PR_EN_ENABLE = 0x10,
+ HDMI_VP_CONF_PR_EN_DISABLE = 0x00,
+ HDMI_VP_CONF_YCC422_EN_MASK = 0x8,
+ HDMI_VP_CONF_YCC422_EN_ENABLE = 0x8,
+ HDMI_VP_CONF_YCC422_EN_DISABLE = 0x0,
+ HDMI_VP_CONF_BYPASS_SELECT_MASK = 0x4,
+ HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER = 0x4,
+ HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER = 0x0,
+ HDMI_VP_CONF_OUTPUT_SELECTOR_MASK = 0x3,
+ HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS = 0x3,
+ HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422 = 0x1,
+ HDMI_VP_CONF_OUTPUT_SELECTOR_PP = 0x0,
+
+/* VP_REMAP field values */
+ HDMI_VP_REMAP_MASK = 0x3,
+ HDMI_VP_REMAP_YCC422_24bit = 0x2,
+ HDMI_VP_REMAP_YCC422_20bit = 0x1,
+ HDMI_VP_REMAP_YCC422_16bit = 0x0,
+
+/* FC_INVIDCONF field values */
+ HDMI_FC_INVIDCONF_HDCP_KEEPOUT_MASK = 0x80,
+ HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE = 0x80,
+ HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE = 0x00,
+ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_MASK = 0x40,
+ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH = 0x40,
+ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW = 0x00,
+ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_MASK = 0x20,
+ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH = 0x20,
+ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW = 0x00,
+ HDMI_FC_INVIDCONF_DE_IN_POLARITY_MASK = 0x10,
+ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH = 0x10,
+ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_LOW = 0x00,
+ HDMI_FC_INVIDCONF_DVI_MODEZ_MASK = 0x8,
+ HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE = 0x8,
+ HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE = 0x0,
+ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_MASK = 0x2,
+ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH = 0x2,
+ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW = 0x0,
+ HDMI_FC_INVIDCONF_IN_I_P_MASK = 0x1,
+ HDMI_FC_INVIDCONF_IN_I_P_INTERLACED = 0x1,
+ HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE = 0x0,
+
+/* FC_AUDICONF0 field values */
+ HDMI_FC_AUDICONF0_CC_OFFSET = 4,
+ HDMI_FC_AUDICONF0_CC_MASK = 0x70,
+ HDMI_FC_AUDICONF0_CT_OFFSET = 0,
+ HDMI_FC_AUDICONF0_CT_MASK = 0xF,
+
+/* FC_AUDICONF1 field values */
+ HDMI_FC_AUDICONF1_SS_OFFSET = 3,
+ HDMI_FC_AUDICONF1_SS_MASK = 0x18,
+ HDMI_FC_AUDICONF1_SF_OFFSET = 0,
+ HDMI_FC_AUDICONF1_SF_MASK = 0x7,
+
+/* FC_AUDICONF3 field values */
+ HDMI_FC_AUDICONF3_LFEPBL_OFFSET = 5,
+ HDMI_FC_AUDICONF3_LFEPBL_MASK = 0x60,
+ HDMI_FC_AUDICONF3_DM_INH_OFFSET = 4,
+ HDMI_FC_AUDICONF3_DM_INH_MASK = 0x10,
+ HDMI_FC_AUDICONF3_LSV_OFFSET = 0,
+ HDMI_FC_AUDICONF3_LSV_MASK = 0xF,
+
+/* FC_AUDSCHNLS0 field values */
+ HDMI_FC_AUDSCHNLS0_CGMSA_OFFSET = 4,
+ HDMI_FC_AUDSCHNLS0_CGMSA_MASK = 0x30,
+ HDMI_FC_AUDSCHNLS0_COPYRIGHT_OFFSET = 0,
+ HDMI_FC_AUDSCHNLS0_COPYRIGHT_MASK = 0x01,
+
+/* FC_AUDSCHNLS3-6 field values */
+ HDMI_FC_AUDSCHNLS3_OIEC_CH0_OFFSET = 0,
+ HDMI_FC_AUDSCHNLS3_OIEC_CH0_MASK = 0x0f,
+ HDMI_FC_AUDSCHNLS3_OIEC_CH1_OFFSET = 4,
+ HDMI_FC_AUDSCHNLS3_OIEC_CH1_MASK = 0xf0,
+ HDMI_FC_AUDSCHNLS4_OIEC_CH2_OFFSET = 0,
+ HDMI_FC_AUDSCHNLS4_OIEC_CH2_MASK = 0x0f,
+ HDMI_FC_AUDSCHNLS4_OIEC_CH3_OFFSET = 4,
+ HDMI_FC_AUDSCHNLS4_OIEC_CH3_MASK = 0xf0,
+
+ HDMI_FC_AUDSCHNLS5_OIEC_CH0_OFFSET = 0,
+ HDMI_FC_AUDSCHNLS5_OIEC_CH0_MASK = 0x0f,
+ HDMI_FC_AUDSCHNLS5_OIEC_CH1_OFFSET = 4,
+ HDMI_FC_AUDSCHNLS5_OIEC_CH1_MASK = 0xf0,
+ HDMI_FC_AUDSCHNLS6_OIEC_CH2_OFFSET = 0,
+ HDMI_FC_AUDSCHNLS6_OIEC_CH2_MASK = 0x0f,
+ HDMI_FC_AUDSCHNLS6_OIEC_CH3_OFFSET = 4,
+ HDMI_FC_AUDSCHNLS6_OIEC_CH3_MASK = 0xf0,
+
+/* HDMI_FC_AUDSCHNLS7 field values */
+ HDMI_FC_AUDSCHNLS7_ACCURACY_OFFSET = 4,
+ HDMI_FC_AUDSCHNLS7_ACCURACY_MASK = 0x30,
+
+/* HDMI_FC_AUDSCHNLS8 field values */
+ HDMI_FC_AUDSCHNLS8_ORIGSAMPFREQ_MASK = 0xf0,
+ HDMI_FC_AUDSCHNLS8_ORIGSAMPFREQ_OFFSET = 4,
+ HDMI_FC_AUDSCHNLS8_WORDLEGNTH_MASK = 0x0f,
+ HDMI_FC_AUDSCHNLS8_WORDLEGNTH_OFFSET = 0,
+
+/* FC_AUDSCONF field values */
+ HDMI_FC_AUDSCONF_AUD_PACKET_SAMPFIT_MASK = 0xF0,
+ HDMI_FC_AUDSCONF_AUD_PACKET_SAMPFIT_OFFSET = 4,
+ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_MASK = 0x1,
+ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_OFFSET = 0,
+ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT1 = 0x1,
+ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT0 = 0x0,
+
+/* FC_STAT2 field values */
+ HDMI_FC_STAT2_OVERFLOW_MASK = 0x03,
+ HDMI_FC_STAT2_LOW_PRIORITY_OVERFLOW = 0x02,
+ HDMI_FC_STAT2_HIGH_PRIORITY_OVERFLOW = 0x01,
+
+/* FC_INT2 field values */
+ HDMI_FC_INT2_OVERFLOW_MASK = 0x03,
+ HDMI_FC_INT2_LOW_PRIORITY_OVERFLOW = 0x02,
+ HDMI_FC_INT2_HIGH_PRIORITY_OVERFLOW = 0x01,
+
+/* FC_MASK2 field values */
+ HDMI_FC_MASK2_OVERFLOW_MASK = 0x03,
+ HDMI_FC_MASK2_LOW_PRIORITY_OVERFLOW = 0x02,
+ HDMI_FC_MASK2_HIGH_PRIORITY_OVERFLOW = 0x01,
+
+/* FC_PRCONF field values */
+ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_MASK = 0xF0,
+ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_OFFSET = 4,
+ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK = 0x0F,
+ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET = 0,
+
+/* FC_AVICONF0-FC_AVICONF3 field values */
+ HDMI_FC_AVICONF0_PIX_FMT_MASK = 0x03,
+ HDMI_FC_AVICONF0_PIX_FMT_RGB = 0x00,
+ HDMI_FC_AVICONF0_PIX_FMT_YCBCR422 = 0x01,
+ HDMI_FC_AVICONF0_PIX_FMT_YCBCR444 = 0x02,
+ HDMI_FC_AVICONF0_ACTIVE_FMT_MASK = 0x40,
+ HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT = 0x40,
+ HDMI_FC_AVICONF0_ACTIVE_FMT_NO_INFO = 0x00,
+ HDMI_FC_AVICONF0_BAR_DATA_MASK = 0x0C,
+ HDMI_FC_AVICONF0_BAR_DATA_NO_DATA = 0x00,
+ HDMI_FC_AVICONF0_BAR_DATA_VERT_BAR = 0x04,
+ HDMI_FC_AVICONF0_BAR_DATA_HORIZ_BAR = 0x08,
+ HDMI_FC_AVICONF0_BAR_DATA_VERT_HORIZ_BAR = 0x0C,
+ HDMI_FC_AVICONF0_SCAN_INFO_MASK = 0x30,
+ HDMI_FC_AVICONF0_SCAN_INFO_OVERSCAN = 0x10,
+ HDMI_FC_AVICONF0_SCAN_INFO_UNDERSCAN = 0x20,
+ HDMI_FC_AVICONF0_SCAN_INFO_NODATA = 0x00,
+
+ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_MASK = 0x0F,
+ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_USE_CODED = 0x08,
+ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_4_3 = 0x09,
+ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_16_9 = 0x0A,
+ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_14_9 = 0x0B,
+ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_MASK = 0x30,
+ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_NO_DATA = 0x00,
+ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_4_3 = 0x10,
+ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_16_9 = 0x20,
+ HDMI_FC_AVICONF1_COLORIMETRY_MASK = 0xC0,
+ HDMI_FC_AVICONF1_COLORIMETRY_NO_DATA = 0x00,
+ HDMI_FC_AVICONF1_COLORIMETRY_SMPTE = 0x40,
+ HDMI_FC_AVICONF1_COLORIMETRY_ITUR = 0x80,
+ HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO = 0xC0,
+
+ HDMI_FC_AVICONF2_SCALING_MASK = 0x03,
+ HDMI_FC_AVICONF2_SCALING_NONE = 0x00,
+ HDMI_FC_AVICONF2_SCALING_HORIZ = 0x01,
+ HDMI_FC_AVICONF2_SCALING_VERT = 0x02,
+ HDMI_FC_AVICONF2_SCALING_HORIZ_VERT = 0x03,
+ HDMI_FC_AVICONF2_RGB_QUANT_MASK = 0x0C,
+ HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT = 0x00,
+ HDMI_FC_AVICONF2_RGB_QUANT_LIMITED_RANGE = 0x04,
+ HDMI_FC_AVICONF2_RGB_QUANT_FULL_RANGE = 0x08,
+ HDMI_FC_AVICONF2_EXT_COLORIMETRY_MASK = 0x70,
+ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601 = 0x00,
+ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709 = 0x10,
+ HDMI_FC_AVICONF2_EXT_COLORIMETRY_SYCC601 = 0x20,
+ HDMI_FC_AVICONF2_EXT_COLORIMETRY_ADOBE_YCC601 = 0x30,
+ HDMI_FC_AVICONF2_EXT_COLORIMETRY_ADOBE_RGB = 0x40,
+ HDMI_FC_AVICONF2_IT_CONTENT_MASK = 0x80,
+ HDMI_FC_AVICONF2_IT_CONTENT_NO_DATA = 0x00,
+ HDMI_FC_AVICONF2_IT_CONTENT_VALID = 0x80,
+
+ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_MASK = 0x03,
+ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GRAPHICS = 0x00,
+ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_PHOTO = 0x01,
+ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_CINEMA = 0x02,
+ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GAME = 0x03,
+ HDMI_FC_AVICONF3_QUANT_RANGE_MASK = 0x0C,
+ HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED = 0x00,
+ HDMI_FC_AVICONF3_QUANT_RANGE_FULL = 0x04,
+
+/* FC_DBGFORCE field values */
+ HDMI_FC_DBGFORCE_FORCEAUDIO = 0x10,
+ HDMI_FC_DBGFORCE_FORCEVIDEO = 0x1,
+
+/* PHY_CONF0 field values */
+ HDMI_PHY_CONF0_PDZ_MASK = 0x80,
+ HDMI_PHY_CONF0_PDZ_OFFSET = 7,
+ HDMI_PHY_CONF0_ENTMDS_MASK = 0x40,
+ HDMI_PHY_CONF0_ENTMDS_OFFSET = 6,
+ HDMI_PHY_CONF0_SPARECTRL = 0x20,
+ HDMI_PHY_CONF0_GEN2_PDDQ_MASK = 0x10,
+ HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET = 4,
+ HDMI_PHY_CONF0_GEN2_TXPWRON_MASK = 0x8,
+ HDMI_PHY_CONF0_GEN2_TXPWRON_OFFSET = 3,
+ HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_MASK = 0x4,
+ HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_OFFSET = 2,
+ HDMI_PHY_CONF0_SELDATAENPOL_MASK = 0x2,
+ HDMI_PHY_CONF0_SELDATAENPOL_OFFSET = 1,
+ HDMI_PHY_CONF0_SELDIPIF_MASK = 0x1,
+ HDMI_PHY_CONF0_SELDIPIF_OFFSET = 0,
+
+/* PHY_TST0 field values */
+ HDMI_PHY_TST0_TSTCLR_MASK = 0x20,
+ HDMI_PHY_TST0_TSTCLR_OFFSET = 5,
+ HDMI_PHY_TST0_TSTEN_MASK = 0x10,
+ HDMI_PHY_TST0_TSTEN_OFFSET = 4,
+ HDMI_PHY_TST0_TSTCLK_MASK = 0x1,
+ HDMI_PHY_TST0_TSTCLK_OFFSET = 0,
+
+/* PHY_STAT0 field values */
+ HDMI_PHY_RX_SENSE3 = 0x80,
+ HDMI_PHY_RX_SENSE2 = 0x40,
+ HDMI_PHY_RX_SENSE1 = 0x20,
+ HDMI_PHY_RX_SENSE0 = 0x10,
+ HDMI_PHY_HPD = 0x02,
+ HDMI_PHY_TX_PHY_LOCK = 0x01,
+
+/* PHY_I2CM_SLAVE_ADDR field values */
+ HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2 = 0x69,
+ HDMI_PHY_I2CM_SLAVE_ADDR_HEAC_PHY = 0x49,
+
+/* PHY_I2CM_OPERATION_ADDR field values */
+ HDMI_PHY_I2CM_OPERATION_ADDR_WRITE = 0x10,
+ HDMI_PHY_I2CM_OPERATION_ADDR_READ = 0x1,
+
+/* HDMI_PHY_I2CM_INT_ADDR */
+ HDMI_PHY_I2CM_INT_ADDR_DONE_POL = 0x08,
+ HDMI_PHY_I2CM_INT_ADDR_DONE_MASK = 0x04,
+
+/* HDMI_PHY_I2CM_CTLINT_ADDR */
+ HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL = 0x80,
+ HDMI_PHY_I2CM_CTLINT_ADDR_NAC_MASK = 0x40,
+ HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL = 0x08,
+ HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_MASK = 0x04,
+
+/* AUD_CTS3 field values */
+ HDMI_AUD_CTS3_N_SHIFT_OFFSET = 5,
+ HDMI_AUD_CTS3_N_SHIFT_MASK = 0xe0,
+ HDMI_AUD_CTS3_N_SHIFT_1 = 0,
+ HDMI_AUD_CTS3_N_SHIFT_16 = 0x20,
+ HDMI_AUD_CTS3_N_SHIFT_32 = 0x40,
+ HDMI_AUD_CTS3_N_SHIFT_64 = 0x60,
+ HDMI_AUD_CTS3_N_SHIFT_128 = 0x80,
+ HDMI_AUD_CTS3_N_SHIFT_256 = 0xa0,
+ /* note that the CTS3 MANUAL bit has been removed
+ from our part. Can't set it, will read as 0. */
+ HDMI_AUD_CTS3_CTS_MANUAL = 0x10,
+ HDMI_AUD_CTS3_AUDCTS19_16_MASK = 0x0f,
+
+/* AHB_DMA_CONF0 field values */
+ HDMI_AHB_DMA_CONF0_SW_FIFO_RST_OFFSET = 7,
+ HDMI_AHB_DMA_CONF0_SW_FIFO_RST_MASK = 0x80,
+ HDMI_AHB_DMA_CONF0_HBR = 0x10,
+ HDMI_AHB_DMA_CONF0_EN_HLOCK_OFFSET = 3,
+ HDMI_AHB_DMA_CONF0_EN_HLOCK_MASK = 0x08,
+ HDMI_AHB_DMA_CONF0_INCR_TYPE_OFFSET = 1,
+ HDMI_AHB_DMA_CONF0_INCR_TYPE_MASK = 0x06,
+ HDMI_AHB_DMA_CONF0_INCR4 = 0x0,
+ HDMI_AHB_DMA_CONF0_INCR8 = 0x2,
+ HDMI_AHB_DMA_CONF0_INCR16 = 0x4,
+ HDMI_AHB_DMA_CONF0_BURST_MODE = 0x1,
+
+/* HDMI_AHB_DMA_START field values */
+ HDMI_AHB_DMA_START_START_OFFSET = 0,
+ HDMI_AHB_DMA_START_START_MASK = 0x01,
+
+/* HDMI_AHB_DMA_STOP field values */
+ HDMI_AHB_DMA_STOP_STOP_OFFSET = 0,
+ HDMI_AHB_DMA_STOP_STOP_MASK = 0x01,
+
+/* AHB_DMA_STAT, AHB_DMA_INT, AHB_DMA_MASK, AHB_DMA_POL field values */
+ HDMI_AHB_DMA_DONE = 0x80,
+ HDMI_AHB_DMA_RETRY_SPLIT = 0x40,
+ HDMI_AHB_DMA_LOSTOWNERSHIP = 0x20,
+ HDMI_AHB_DMA_ERROR = 0x10,
+ HDMI_AHB_DMA_FIFO_THREMPTY = 0x04,
+ HDMI_AHB_DMA_FIFO_FULL = 0x02,
+ HDMI_AHB_DMA_FIFO_EMPTY = 0x01,
+
+/* AHB_DMA_BUFFSTAT, AHB_DMA_BUFFINT,AHB_DMA_BUFFMASK,AHB_DMA_BUFFPOL values */
+ HDMI_AHB_DMA_BUFFSTAT_FULL = 0x02,
+ HDMI_AHB_DMA_BUFFSTAT_EMPTY = 0x01,
+
+/* MC_CLKDIS field values */
+ HDMI_MC_CLKDIS_HDCPCLK_DISABLE = 0x40,
+ HDMI_MC_CLKDIS_CECCLK_DISABLE = 0x20,
+ HDMI_MC_CLKDIS_CSCCLK_DISABLE = 0x10,
+ HDMI_MC_CLKDIS_AUDCLK_DISABLE = 0x8,
+ HDMI_MC_CLKDIS_PREPCLK_DISABLE = 0x4,
+ HDMI_MC_CLKDIS_TMDSCLK_DISABLE = 0x2,
+ HDMI_MC_CLKDIS_PIXELCLK_DISABLE = 0x1,
+
+/* MC_SWRSTZ field values */
+ HDMI_MC_SWRSTZ_TMDSSWRST_REQ = 0x02,
+
+/* MC_FLOWCTRL field values */
+ HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_MASK = 0x1,
+ HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH = 0x1,
+ HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS = 0x0,
+
+/* MC_PHYRSTZ field values */
+ HDMI_MC_PHYRSTZ_ASSERT = 0x0,
+ HDMI_MC_PHYRSTZ_DEASSERT = 0x1,
+
+/* MC_HEACPHY_RST field values */
+ HDMI_MC_HEACPHY_RST_ASSERT = 0x1,
+ HDMI_MC_HEACPHY_RST_DEASSERT = 0x0,
+
+/* CSC_CFG field values */
+ HDMI_CSC_CFG_INTMODE_MASK = 0x30,
+ HDMI_CSC_CFG_INTMODE_OFFSET = 4,
+ HDMI_CSC_CFG_INTMODE_DISABLE = 0x00,
+ HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA1 = 0x10,
+ HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA2 = 0x20,
+ HDMI_CSC_CFG_DECMODE_MASK = 0x3,
+ HDMI_CSC_CFG_DECMODE_OFFSET = 0,
+ HDMI_CSC_CFG_DECMODE_DISABLE = 0x0,
+ HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA1 = 0x1,
+ HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA2 = 0x2,
+ HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3 = 0x3,
+
+/* CSC_SCALE field values */
+ HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK = 0xF0,
+ HDMI_CSC_SCALE_CSC_COLORDE_PTH_24BPP = 0x00,
+ HDMI_CSC_SCALE_CSC_COLORDE_PTH_30BPP = 0x50,
+ HDMI_CSC_SCALE_CSC_COLORDE_PTH_36BPP = 0x60,
+ HDMI_CSC_SCALE_CSC_COLORDE_PTH_48BPP = 0x70,
+ HDMI_CSC_SCALE_CSCSCALE_MASK = 0x03,
+
+/* A_HDCPCFG0 field values */
+ HDMI_A_HDCPCFG0_ELVENA_MASK = 0x80,
+ HDMI_A_HDCPCFG0_ELVENA_ENABLE = 0x80,
+ HDMI_A_HDCPCFG0_ELVENA_DISABLE = 0x00,
+ HDMI_A_HDCPCFG0_I2CFASTMODE_MASK = 0x40,
+ HDMI_A_HDCPCFG0_I2CFASTMODE_ENABLE = 0x40,
+ HDMI_A_HDCPCFG0_I2CFASTMODE_DISABLE = 0x00,
+ HDMI_A_HDCPCFG0_BYPENCRYPTION_MASK = 0x20,
+ HDMI_A_HDCPCFG0_BYPENCRYPTION_ENABLE = 0x20,
+ HDMI_A_HDCPCFG0_BYPENCRYPTION_DISABLE = 0x00,
+ HDMI_A_HDCPCFG0_SYNCRICHECK_MASK = 0x10,
+ HDMI_A_HDCPCFG0_SYNCRICHECK_ENABLE = 0x10,
+ HDMI_A_HDCPCFG0_SYNCRICHECK_DISABLE = 0x00,
+ HDMI_A_HDCPCFG0_AVMUTE_MASK = 0x8,
+ HDMI_A_HDCPCFG0_AVMUTE_ENABLE = 0x8,
+ HDMI_A_HDCPCFG0_AVMUTE_DISABLE = 0x0,
+ HDMI_A_HDCPCFG0_RXDETECT_MASK = 0x4,
+ HDMI_A_HDCPCFG0_RXDETECT_ENABLE = 0x4,
+ HDMI_A_HDCPCFG0_RXDETECT_DISABLE = 0x0,
+ HDMI_A_HDCPCFG0_EN11FEATURE_MASK = 0x2,
+ HDMI_A_HDCPCFG0_EN11FEATURE_ENABLE = 0x2,
+ HDMI_A_HDCPCFG0_EN11FEATURE_DISABLE = 0x0,
+ HDMI_A_HDCPCFG0_HDMIDVI_MASK = 0x1,
+ HDMI_A_HDCPCFG0_HDMIDVI_HDMI = 0x1,
+ HDMI_A_HDCPCFG0_HDMIDVI_DVI = 0x0,
+
+/* A_HDCPCFG1 field values */
+ HDMI_A_HDCPCFG1_DISSHA1CHECK_MASK = 0x8,
+ HDMI_A_HDCPCFG1_DISSHA1CHECK_DISABLE = 0x8,
+ HDMI_A_HDCPCFG1_DISSHA1CHECK_ENABLE = 0x0,
+ HDMI_A_HDCPCFG1_PH2UPSHFTENC_MASK = 0x4,
+ HDMI_A_HDCPCFG1_PH2UPSHFTENC_ENABLE = 0x4,
+ HDMI_A_HDCPCFG1_PH2UPSHFTENC_DISABLE = 0x0,
+ HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK = 0x2,
+ HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_DISABLE = 0x2,
+ HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_ENABLE = 0x0,
+ HDMI_A_HDCPCFG1_SWRESET_MASK = 0x1,
+ HDMI_A_HDCPCFG1_SWRESET_ASSERT = 0x0,
+
+/* A_VIDPOLCFG field values */
+ HDMI_A_VIDPOLCFG_UNENCRYPTCONF_MASK = 0x60,
+ HDMI_A_VIDPOLCFG_UNENCRYPTCONF_OFFSET = 5,
+ HDMI_A_VIDPOLCFG_DATAENPOL_MASK = 0x10,
+ HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_HIGH = 0x10,
+ HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_LOW = 0x0,
+ HDMI_A_VIDPOLCFG_VSYNCPOL_MASK = 0x8,
+ HDMI_A_VIDPOLCFG_VSYNCPOL_ACTIVE_HIGH = 0x8,
+ HDMI_A_VIDPOLCFG_VSYNCPOL_ACTIVE_LOW = 0x0,
+ HDMI_A_VIDPOLCFG_HSYNCPOL_MASK = 0x2,
+ HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_HIGH = 0x2,
+ HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_LOW = 0x0,
+};
+#endif /* __IMX_HDMI_H__ */
diff --git a/drivers/staging/imx-drm/imx-ldb.c b/drivers/staging/imx-drm/imx-ldb.c
index 654bf03e05ff..7e593296ac47 100644
--- a/drivers/staging/imx-drm/imx-ldb.c
+++ b/drivers/staging/imx-drm/imx-ldb.c
@@ -167,9 +167,8 @@ static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
/* set display clock mux to LDB input clock */
ret = clk_set_parent(ldb->clk_sel[mux], ldb->clk[chno]);
- if (ret) {
+ if (ret)
dev_err(ldb->dev, "unable to set di%d parent clock to ldb_di%d\n", mux, chno);
- }
}
static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
@@ -414,7 +413,7 @@ enum {
LVDS_BIT_MAP_JEIDA
};
-static const char *imx_ldb_bit_mappings[] = {
+static const char * const imx_ldb_bit_mappings[] = {
[LVDS_BIT_MAP_SPWG] = "spwg",
[LVDS_BIT_MAP_JEIDA] = "jeida",
};
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index 680f4c8fa081..9abc7ca8b6cf 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -114,7 +114,6 @@ struct imx_tve {
struct drm_encoder encoder;
struct imx_drm_encoder *imx_drm_encoder;
struct device *dev;
- spinlock_t enable_lock; /* serializes tve_enable/disable */
spinlock_t lock; /* register lock */
bool enabled;
int mode;
@@ -146,10 +145,8 @@ __releases(&tve->lock)
static void tve_enable(struct imx_tve *tve)
{
- unsigned long flags;
int ret;
- spin_lock_irqsave(&tve->enable_lock, flags);
if (!tve->enabled) {
tve->enabled = true;
clk_prepare_enable(tve->clk);
@@ -169,23 +166,18 @@ static void tve_enable(struct imx_tve *tve)
TVE_CD_SM_IEN |
TVE_CD_LM_IEN |
TVE_CD_MON_END_IEN);
-
- spin_unlock_irqrestore(&tve->enable_lock, flags);
}
static void tve_disable(struct imx_tve *tve)
{
- unsigned long flags;
int ret;
- spin_lock_irqsave(&tve->enable_lock, flags);
if (tve->enabled) {
tve->enabled = false;
ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
TVE_IPU_CLK_EN | TVE_EN, 0);
clk_disable_unprepare(tve->clk);
}
- spin_unlock_irqrestore(&tve->enable_lock, flags);
}
static int tve_setup_tvout(struct imx_tve *tve)
@@ -568,7 +560,7 @@ static const char *imx_tve_modes[] = {
[TVE_MODE_VGA] = "vga",
};
-const int of_get_tve_mode(struct device_node *np)
+static const int of_get_tve_mode(struct device_node *np)
{
const char *bm;
int ret, i;
@@ -601,7 +593,6 @@ static int imx_tve_probe(struct platform_device *pdev)
tve->dev = &pdev->dev;
spin_lock_init(&tve->lock);
- spin_lock_init(&tve->enable_lock);
ddc_node = of_parse_phandle(np, "ddc", 0);
if (ddc_node) {
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
index 7a22ce619ed2..ca85d3d70ae3 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/export.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/reset.h>
#include <linux/platform_device.h>
#include <linux/err.h>
@@ -996,35 +995,35 @@ static const struct ipu_platform_reg client_reg[] = {
},
};
+static DEFINE_MUTEX(ipu_client_id_mutex);
static int ipu_client_id;
-static int ipu_add_subdevice_pdata(struct device *dev,
- const struct ipu_platform_reg *reg)
-{
- struct platform_device *pdev;
-
- pdev = platform_device_register_data(dev, reg->name, ipu_client_id++,
- &reg->pdata, sizeof(struct ipu_platform_reg));
-
- return PTR_ERR_OR_ZERO(pdev);
-}
-
static int ipu_add_client_devices(struct ipu_soc *ipu)
{
- int ret;
- int i;
+ struct device *dev = ipu->dev;
+ unsigned i;
+ int id, ret;
+
+ mutex_lock(&ipu_client_id_mutex);
+ id = ipu_client_id;
+ ipu_client_id += ARRAY_SIZE(client_reg);
+ mutex_unlock(&ipu_client_id_mutex);
for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
const struct ipu_platform_reg *reg = &client_reg[i];
- ret = ipu_add_subdevice_pdata(ipu->dev, reg);
- if (ret)
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_data(dev, reg->name,
+ id++, &reg->pdata, sizeof(reg->pdata));
+
+ if (IS_ERR(pdev))
goto err_register;
}
return 0;
err_register:
- platform_device_unregister_children(to_platform_device(ipu->dev));
+ platform_device_unregister_children(to_platform_device(dev));
return ret;
}
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index ce6ba987ec91..22be104fbda9 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -218,7 +218,8 @@ static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
if (ipu_crtc->newfb) {
ipu_crtc->newfb = NULL;
- ipu_plane_set_base(ipu_crtc->plane[0], ipu_crtc->base.fb, 0, 0);
+ ipu_plane_set_base(ipu_crtc->plane[0], ipu_crtc->base.fb,
+ ipu_crtc->plane[0]->x, ipu_crtc->plane[0]->y);
ipu_crtc_handle_pageflip(ipu_crtc);
}
diff --git a/drivers/staging/imx-drm/ipuv3-plane.c b/drivers/staging/imx-drm/ipuv3-plane.c
index d97454a0dffd..34b642a12f8b 100644
--- a/drivers/staging/imx-drm/ipuv3-plane.c
+++ b/drivers/staging/imx-drm/ipuv3-plane.c
@@ -64,6 +64,7 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
{
struct ipu_ch_param __iomem *cpmem;
struct drm_gem_cma_object *cma_obj;
+ unsigned long eba;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
if (!cma_obj) {
@@ -76,8 +77,15 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
cpmem = ipu_get_cpmem(ipu_plane->ipu_ch);
ipu_cpmem_set_stride(cpmem, fb->pitches[0]);
- ipu_cpmem_set_buffer(cpmem, 0, cma_obj->paddr + fb->offsets[0] +
- fb->pitches[0] * y + x);
+
+ eba = cma_obj->paddr + fb->offsets[0] +
+ fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
+ ipu_cpmem_set_buffer(cpmem, 0, eba);
+ ipu_cpmem_set_buffer(cpmem, 1, eba);
+
+ /* cache offsets for subsequent pageflips */
+ ipu_plane->x = x;
+ ipu_plane->y = y;
return 0;
}
diff --git a/drivers/staging/imx-drm/parallel-display.c b/drivers/staging/imx-drm/parallel-display.c
index 24aa9beedcfb..351d61dede00 100644
--- a/drivers/staging/imx-drm/parallel-display.c
+++ b/drivers/staging/imx-drm/parallel-display.c
@@ -23,6 +23,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <linux/videodev2.h>
+#include <video/of_display_timing.h>
#include "imx-drm.h"
@@ -74,7 +75,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
if (np) {
struct drm_display_mode *mode = drm_mode_create(connector->dev);
- of_get_drm_display_mode(np, &imxpd->mode, 0);
+ of_get_drm_display_mode(np, &imxpd->mode, OF_USE_NATIVE_MODE);
drm_mode_copy(mode, &imxpd->mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
drm_mode_probed_add(connector, mode);
diff --git a/drivers/staging/keucr/smcommon.h b/drivers/staging/keucr/smcommon.h
index 4d57203b64d8..1d2752a1d5c4 100644
--- a/drivers/staging/keucr/smcommon.h
+++ b/drivers/staging/keucr/smcommon.h
@@ -4,7 +4,7 @@
/***************************************************************************
-Define Difinetion
+Define Definition
***************************************************************************/
#define SMSUCCESS 0x0000 /* SUCCESS */
#define ERROR 0xFFFF /* ERROR */
diff --git a/drivers/staging/keucr/smil.h b/drivers/staging/keucr/smil.h
index 1538d7bd600f..9136e9447261 100644
--- a/drivers/staging/keucr/smil.h
+++ b/drivers/staging/keucr/smil.h
@@ -45,7 +45,7 @@ Retry Counter Definition
Hardware ECC Definition
***************************************************************************/
#define HW_ECC_SUPPORTED 1 /* Hardware ECC Supported */
-/* No difinition for Software ECC */
+/* No definition for Software ECC */
/***************************************************************************
SmartMedia Command & Status Definition
@@ -189,12 +189,6 @@ struct keucr_media_area {
WORD PhyBlock; /* Physical Block Number on Zone 0 */
};
-
-extern BYTE IsSSFDCCompliance;
-extern BYTE IsXDCompliance;
-
-extern DWORD ErrXDCode;
-extern DWORD ErrCode;
extern WORD ReadBlock;
extern WORD WriteBlock;
extern DWORD MediaChange;
diff --git a/drivers/staging/keucr/smilecc.c b/drivers/staging/keucr/smilecc.c
index 3085f1d4a4eb..6b8f7d7a7436 100644
--- a/drivers/staging/keucr/smilecc.c
+++ b/drivers/staging/keucr/smilecc.c
@@ -139,7 +139,7 @@ BYTE correct_data(BYTE *data, BYTE *eccdata, BYTE ecc1, BYTE ecc2, BYTE ecc3)
BYTE bit; /* Bit address of cor. DATA */
d1 = ecc1^eccdata[1]; d2 = ecc2^eccdata[0]; /* Compare LP's */
- d3 = ecc3^eccdata[2]; /* Comapre CP's */
+ d3 = ecc3^eccdata[2]; /* Compare CP's */
d = ((DWORD)d1<<16) /* Result of comparison */
+((DWORD)d2<<8)
+(DWORD)d3;
diff --git a/drivers/staging/keucr/smilmain.c b/drivers/staging/keucr/smilmain.c
index 2786808fde9f..09d07e05102f 100644
--- a/drivers/staging/keucr/smilmain.c
+++ b/drivers/staging/keucr/smilmain.c
@@ -4,49 +4,28 @@
#include "smcommon.h"
#include "smil.h"
-int Check_D_LogCHS(WORD *, BYTE *, BYTE *);
-void Initialize_D_Media(void);
-void PowerOff_D_Media(void);
-int Check_D_MediaPower(void);
-int Check_D_MediaExist(void);
-int Check_D_MediaWP(void);
-int Check_D_MediaFmt(struct us_data *);
-int Check_D_MediaFmtForEraseAll(struct us_data *);
-int Conv_D_MediaAddr(struct us_data *, DWORD);
-int Inc_D_MediaAddr(struct us_data *);
-int Check_D_FirstSect(void);
-int Check_D_LastSect(void);
-int Media_D_ReadOneSect(struct us_data *, WORD, BYTE *);
-int Media_D_WriteOneSect(struct us_data *, WORD, BYTE *);
-int Media_D_CopyBlockHead(struct us_data *);
-int Media_D_CopyBlockTail(struct us_data *);
-int Media_D_EraseOneBlock(void);
-int Media_D_EraseAllBlock(void);
-
-int Copy_D_BlockAll(struct us_data *, DWORD);
-int Copy_D_BlockHead(struct us_data *);
-int Copy_D_BlockTail(struct us_data *);
-int Reassign_D_BlockHead(struct us_data *);
-
-int Assign_D_WriteBlock(void);
-int Release_D_ReadBlock(struct us_data *);
-int Release_D_WriteBlock(struct us_data *);
-int Release_D_CopySector(struct us_data *);
-
-int Copy_D_PhyOneSect(struct us_data *);
-int Read_D_PhyOneSect(struct us_data *, WORD, BYTE *);
-int Write_D_PhyOneSect(struct us_data *, WORD, BYTE *);
-int Erase_D_PhyOneBlock(struct us_data *);
-
-int Set_D_PhyFmtValue(struct us_data *);
-int Search_D_CIS(struct us_data *);
-int Make_D_LogTable(struct us_data *);
-void Check_D_BlockIsFull(void);
-
-int MarkFail_D_PhyOneBlock(struct us_data *);
-
-DWORD ErrXDCode;
-DWORD ErrCode;
+static int Conv_D_MediaAddr(struct us_data *, DWORD);
+static int Inc_D_MediaAddr(struct us_data *);
+static int Media_D_ReadOneSect(struct us_data *, WORD, BYTE *);
+
+static int Copy_D_BlockAll(struct us_data *, DWORD);
+
+static int Assign_D_WriteBlock(void);
+static int Release_D_ReadBlock(struct us_data *);
+static int Release_D_WriteBlock(struct us_data *);
+static int Release_D_CopySector(struct us_data *);
+
+static int Copy_D_PhyOneSect(struct us_data *);
+static int Read_D_PhyOneSect(struct us_data *, WORD, BYTE *);
+static int Erase_D_PhyOneBlock(struct us_data *);
+
+static int Set_D_PhyFmtValue(struct us_data *);
+static int Search_D_CIS(struct us_data *);
+static int Make_D_LogTable(struct us_data *);
+
+static int MarkFail_D_PhyOneBlock(struct us_data *);
+
+static DWORD ErrCode;
static BYTE WorkBuf[SECTSIZE];
static BYTE Redundant[REDTSIZE];
static BYTE WorkRedund[REDTSIZE];
@@ -65,10 +44,6 @@ static BYTE BitData[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
#define Clr_D_Bit(a, b) (a[(BYTE)((b) / 8)] &= ~BitData[(b) % 8])
#define Chk_D_Bit(a, b) (a[(BYTE)((b) / 8)] & BitData[(b) % 8])
-BYTE IsSSFDCCompliance;
-BYTE IsXDCompliance;
-
-
/* ----- SM_FreeMem() ------------------------------------------------- */
int SM_FreeMem(void)
{
@@ -167,7 +142,7 @@ int Media_D_CopySector(struct us_data *us, DWORD start, WORD count, BYTE *buf)
}
/* ----- Release_D_CopySector() ------------------------------------------ */
-int Release_D_CopySector(struct us_data *us)
+static int Release_D_CopySector(struct us_data *us)
{
Log2Phy[Media.Zone][Media.LogBlock] = WriteBlock;
Media.PhyBlock = ReadBlock;
@@ -211,7 +186,7 @@ int Check_D_MediaFmt(struct us_data *us)
/* SmartMedia Physical Address Control Subroutine */
/* ----- Conv_D_MediaAddr() --------------------------------------------- */
-int Conv_D_MediaAddr(struct us_data *us, DWORD addr)
+static int Conv_D_MediaAddr(struct us_data *us, DWORD addr)
{
DWORD temp;
@@ -240,7 +215,7 @@ int Conv_D_MediaAddr(struct us_data *us, DWORD addr)
}
/* ----- Inc_D_MediaAddr() ---------------------------------------------- */
-int Inc_D_MediaAddr(struct us_data *us)
+static int Inc_D_MediaAddr(struct us_data *us)
{
WORD LogBlock = Media.LogBlock;
@@ -290,7 +265,7 @@ int Inc_D_MediaAddr(struct us_data *us)
/* SmartMedia Read/Write Subroutine with Retry */
/* ----- Media_D_ReadOneSect() ------------------------------------------ */
-int Media_D_ReadOneSect(struct us_data *us, WORD count, BYTE *buf)
+static int Media_D_ReadOneSect(struct us_data *us, WORD count, BYTE *buf)
{
DWORD err, retry;
@@ -334,7 +309,7 @@ int Media_D_ReadOneSect(struct us_data *us, WORD count, BYTE *buf)
/* SmartMedia Physical Sector Data Copy Subroutine */
/* ----- Copy_D_BlockAll() ---------------------------------------------- */
-int Copy_D_BlockAll(struct us_data *us, DWORD mode)
+static int Copy_D_BlockAll(struct us_data *us, DWORD mode)
{
BYTE sect;
@@ -371,7 +346,7 @@ int Copy_D_BlockAll(struct us_data *us, DWORD mode)
/* SmartMedia Physical Block Assign/Release Subroutine */
/* ----- Assign_D_WriteBlock() ------------------------------------------ */
-int Assign_D_WriteBlock(void)
+static int Assign_D_WriteBlock(void)
{
ReadBlock = Media.PhyBlock;
@@ -404,7 +379,7 @@ int Assign_D_WriteBlock(void)
}
/* ----- Release_D_ReadBlock() ------------------------------------------ */
-int Release_D_ReadBlock(struct us_data *us)
+static int Release_D_ReadBlock(struct us_data *us)
{
DWORD mode;
@@ -438,7 +413,7 @@ int Release_D_ReadBlock(struct us_data *us)
}
/* ----- Release_D_WriteBlock() ----------------------------------------- */
-int Release_D_WriteBlock(struct us_data *us)
+static int Release_D_WriteBlock(struct us_data *us)
{
SectCopyMode = COMPLETED;
Media.PhyBlock = WriteBlock;
@@ -452,12 +427,12 @@ int Release_D_WriteBlock(struct us_data *us)
/* SmartMedia Physical Sector Data Copy Subroutine */
/* ----- Copy_D_PhyOneSect() -------------------------------------------- */
-int Copy_D_PhyOneSect(struct us_data *us)
+static int Copy_D_PhyOneSect(struct us_data *us)
{
int i;
DWORD err, retry;
- /* pr_info("Copy_D_PhyOneSect --- Secotr = %x\n", Media.Sector); */
+ /* pr_info("Copy_D_PhyOneSect --- Sector = %x\n", Media.Sector); */
if (ReadBlock != NO_ASSIGN) {
Media.PhyBlock = ReadBlock;
for (retry = 0; retry < 2; retry++) {
@@ -529,7 +504,7 @@ int Copy_D_PhyOneSect(struct us_data *us)
/* SmartMedia Physical Sector Read/Write/Erase Subroutine */
/* ----- Read_D_PhyOneSect() -------------------------------------------- */
-int Read_D_PhyOneSect(struct us_data *us, WORD count, BYTE *buf)
+static int Read_D_PhyOneSect(struct us_data *us, WORD count, BYTE *buf)
{
int i;
DWORD retry;
@@ -580,7 +555,7 @@ int Read_D_PhyOneSect(struct us_data *us, WORD count, BYTE *buf)
}
/* ----- Erase_D_PhyOneBlock() ------------------------------------------ */
-int Erase_D_PhyOneBlock(struct us_data *us)
+static int Erase_D_PhyOneBlock(struct us_data *us)
{
if (Ssfdc_D_EraseBlock(us)) {
ErrCode = ERR_HwError;
@@ -597,7 +572,7 @@ int Erase_D_PhyOneBlock(struct us_data *us)
/* SmartMedia Physical Format Check Local Subroutine */
/* ----- Set_D_PhyFmtValue() -------------------------------------------- */
-int Set_D_PhyFmtValue(struct us_data *us)
+static int Set_D_PhyFmtValue(struct us_data *us)
{
if (Set_D_SsfdcModel(us->SM_DeviceID))
return ERROR;
@@ -606,7 +581,7 @@ int Set_D_PhyFmtValue(struct us_data *us)
}
/* ----- Search_D_CIS() ------------------------------------------------- */
-int Search_D_CIS(struct us_data *us)
+static int Search_D_CIS(struct us_data *us)
{
Media.Zone = 0;
Media.Sector = 0;
@@ -660,7 +635,7 @@ int Search_D_CIS(struct us_data *us)
}
/* ----- Make_D_LogTable() ---------------------------------------------- */
-int Make_D_LogTable(struct us_data *us)
+static int Make_D_LogTable(struct us_data *us)
{
WORD phyblock, logblock;
@@ -761,7 +736,7 @@ int Make_D_LogTable(struct us_data *us)
}
/* ----- MarkFail_D_PhyOneBlock() --------------------------------------- */
-int MarkFail_D_PhyOneBlock(struct us_data *us)
+static int MarkFail_D_PhyOneBlock(struct us_data *us)
{
BYTE sect;
diff --git a/drivers/staging/keucr/smilsub.c b/drivers/staging/keucr/smilsub.c
index 346c5702f411..16da9a9b4033 100644
--- a/drivers/staging/keucr/smilsub.c
+++ b/drivers/staging/keucr/smilsub.c
@@ -6,45 +6,16 @@
#include "smcommon.h"
#include "smil.h"
-void _Set_D_SsfdcRdCmd(BYTE);
-void _Set_D_SsfdcRdAddr(BYTE);
-void _Set_D_SsfdcRdChip(void);
-void _Set_D_SsfdcRdStandby(void);
-void _Start_D_SsfdcRdHwECC(void);
-void _Stop_D_SsfdcRdHwECC(void);
-void _Load_D_SsfdcRdHwECC(BYTE);
-void _Set_D_SsfdcWrCmd(BYTE);
-void _Set_D_SsfdcWrAddr(BYTE);
-void _Set_D_SsfdcWrBlock(void);
-void _Set_D_SsfdcWrStandby(void);
-void _Start_D_SsfdcWrHwECC(void);
-void _Load_D_SsfdcWrHwECC(BYTE);
-int _Check_D_SsfdcBusy(WORD);
-int _Check_D_SsfdcStatus(void);
-void _Reset_D_SsfdcErr(void);
-void _Read_D_SsfdcBuf(BYTE *);
-void _Write_D_SsfdcBuf(BYTE *);
-void _Read_D_SsfdcByte(BYTE *);
-void _ReadRedt_D_SsfdcBuf(BYTE *);
-void _WriteRedt_D_SsfdcBuf(BYTE *);
-BYTE _Check_D_DevCode(BYTE);
-
-void _Set_D_ECCdata(BYTE, BYTE *);
-void _Calc_D_ECCdata(BYTE *);
-
+static BYTE _Check_D_DevCode(BYTE);
+static DWORD ErrXDCode;
+static BYTE IsSSFDCCompliance;
+static BYTE IsXDCompliance;
struct keucr_media_info Ssfdc;
struct keucr_media_address Media;
struct keucr_media_area CisArea;
static BYTE EccBuf[6];
-extern PBYTE SMHostAddr;
-extern DWORD ErrXDCode;
-
-extern WORD ReadBlock;
-extern WORD WriteBlock;
-
-
#define EVEN 0 /* Even Page for 256byte/page */
#define ODD 1 /* Odd Page for 256byte/page */
diff --git a/drivers/staging/keucr/smscsi.c b/drivers/staging/keucr/smscsi.c
index 572d6489b66b..5c03eca4dba8 100644
--- a/drivers/staging/keucr/smscsi.c
+++ b/drivers/staging/keucr/smscsi.c
@@ -11,16 +11,12 @@
#include "transport.h"
#include "smil.h"
-int SM_SCSI_Test_Unit_Ready(struct us_data *us, struct scsi_cmnd *srb);
-int SM_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb);
-int SM_SCSI_Mode_Sense(struct us_data *us, struct scsi_cmnd *srb);
-int SM_SCSI_Start_Stop(struct us_data *us, struct scsi_cmnd *srb);
-int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb);
-int SM_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb);
-int SM_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb);
-
-extern PBYTE SMHostAddr;
-extern DWORD ErrXDCode;
+static int SM_SCSI_Test_Unit_Ready(struct us_data *us, struct scsi_cmnd *srb);
+static int SM_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb);
+static int SM_SCSI_Mode_Sense(struct us_data *us, struct scsi_cmnd *srb);
+static int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb);
+static int SM_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb);
+static int SM_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb);
/* ----- SM_SCSIIrp() -------------------------------------------------- */
int SM_SCSIIrp(struct us_data *us, struct scsi_cmnd *srb)
@@ -57,7 +53,7 @@ int SM_SCSIIrp(struct us_data *us, struct scsi_cmnd *srb)
}
/* ----- SM_SCSI_Test_Unit_Ready() ------------------------------------- */
-int SM_SCSI_Test_Unit_Ready(struct us_data *us, struct scsi_cmnd *srb)
+static int SM_SCSI_Test_Unit_Ready(struct us_data *us, struct scsi_cmnd *srb)
{
if (us->SM_Status.Insert && us->SM_Status.Ready)
return USB_STOR_TRANSPORT_GOOD;
@@ -70,7 +66,7 @@ int SM_SCSI_Test_Unit_Ready(struct us_data *us, struct scsi_cmnd *srb)
}
/* ----- SM_SCSI_Inquiry() --------------------------------------------- */
-int SM_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb)
+static int SM_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb)
{
BYTE data_ptr[36] = {0x00, 0x80, 0x02, 0x00, 0x1F, 0x00, 0x00, 0x00,
0x55, 0x53, 0x42, 0x32, 0x2E, 0x30, 0x20,
@@ -84,7 +80,7 @@ int SM_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb)
/* ----- SM_SCSI_Mode_Sense() ------------------------------------------ */
-int SM_SCSI_Mode_Sense(struct us_data *us, struct scsi_cmnd *srb)
+static int SM_SCSI_Mode_Sense(struct us_data *us, struct scsi_cmnd *srb)
{
BYTE mediaNoWP[12] = {0x0b, 0x00, 0x00, 0x08, 0x00, 0x00,
0x71, 0xc0, 0x00, 0x00, 0x02, 0x00};
@@ -101,7 +97,7 @@ int SM_SCSI_Mode_Sense(struct us_data *us, struct scsi_cmnd *srb)
}
/* ----- SM_SCSI_Read_Capacity() --------------------------------------- */
-int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb)
+static int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb)
{
unsigned int offset = 0;
struct scatterlist *sg = NULL;
@@ -133,7 +129,7 @@ int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb)
}
/* ----- SM_SCSI_Read() -------------------------------------------------- */
-int SM_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
+static int SM_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
{
int result = 0;
PBYTE Cdb = srb->cmnd;
@@ -165,7 +161,7 @@ int SM_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
}
/* ----- SM_SCSI_Write() -------------------------------------------------- */
-int SM_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb)
+static int SM_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb)
{
int result = 0;
PBYTE Cdb = srb->cmnd;
diff --git a/drivers/staging/keucr/usb.c b/drivers/staging/keucr/usb.c
index a84ee6303368..3e3ca6365fbc 100644
--- a/drivers/staging/keucr/usb.c
+++ b/drivers/staging/keucr/usb.c
@@ -2,7 +2,6 @@
#include <linux/errno.h>
#include <linux/freezer.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
index cc5d62d2b01f..7a6d85ebb29b 100644
--- a/drivers/staging/line6/driver.c
+++ b/drivers/staging/line6/driver.c
@@ -38,6 +38,7 @@ static const struct usb_device_id line6_id_table[] = {
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_GUITARPORT)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_POCKETPOD)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODHD300)},
+ {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODHD400)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODHD500)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODSTUDIO_GX)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODSTUDIO_UX1)},
@@ -64,6 +65,7 @@ static struct line6_properties line6_properties_table[] = {
{ LINE6_BIT_GUITARPORT, "GuitarPort", "GuitarPort", LINE6_BIT_PCM },
{ LINE6_BIT_POCKETPOD, "PocketPOD", "Pocket POD", LINE6_BIT_CONTROL },
{ LINE6_BIT_PODHD300, "PODHD300", "POD HD300", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_PODHD400, "PODHD400", "POD HD400", LINE6_BIT_CONTROL_PCM_HWMON },
{ LINE6_BIT_PODHD500, "PODHD500", "POD HD500", LINE6_BIT_CONTROL_PCM_HWMON },
{ LINE6_BIT_PODSTUDIO_GX, "PODStudioGX", "POD Studio GX", LINE6_BIT_PCM },
{ LINE6_BIT_PODSTUDIO_UX1, "PODStudioUX1", "POD Studio UX1", LINE6_BIT_PCM },
@@ -352,6 +354,7 @@ static void line6_data_received(struct urb *urb)
break;
case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD400:
case LINE6_DEVID_PODHD500:
break; /* let userspace handle MIDI */
@@ -684,6 +687,7 @@ static int line6_probe(struct usb_interface *interface,
case LINE6_DEVID_PODXT:
case LINE6_DEVID_PODXTPRO:
case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD400:
alternate = 5;
break;
@@ -738,6 +742,7 @@ static int line6_probe(struct usb_interface *interface,
break;
case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD400:
size = sizeof(struct usb_line6_podhd);
ep_read = 0x84;
ep_write = 0x03;
@@ -896,6 +901,7 @@ static int line6_probe(struct usb_interface *interface,
break;
case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD400:
case LINE6_DEVID_PODHD500:
ret = line6_podhd_init(interface,
(struct usb_line6_podhd *)line6);
@@ -1023,6 +1029,7 @@ static void line6_disconnect(struct usb_interface *interface)
break;
case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD400:
case LINE6_DEVID_PODHD500:
line6_podhd_disconnect(interface);
break;
diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
index 6a0648cd03a7..df8331bce175 100644
--- a/drivers/staging/line6/pcm.c
+++ b/drivers/staging/line6/pcm.c
@@ -436,6 +436,7 @@ int line6_init_pcm(struct usb_line6 *line6,
case LINE6_DEVID_PODXTLIVE:
case LINE6_DEVID_PODXTPRO:
case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD400:
ep_read = 0x82;
ep_write = 0x01;
break;
diff --git a/drivers/staging/line6/usbdefs.h b/drivers/staging/line6/usbdefs.h
index 43eb54008a2b..90cadddec56e 100644
--- a/drivers/staging/line6/usbdefs.h
+++ b/drivers/staging/line6/usbdefs.h
@@ -25,6 +25,7 @@
#define LINE6_DEVID_GUITARPORT 0x4750
#define LINE6_DEVID_POCKETPOD 0x5051
#define LINE6_DEVID_PODHD300 0x5057
+#define LINE6_DEVID_PODHD400 0x5058
#define LINE6_DEVID_PODHD500 0x414D
#define LINE6_DEVID_PODSTUDIO_GX 0x4153
#define LINE6_DEVID_PODSTUDIO_UX1 0x4150
@@ -48,6 +49,7 @@ enum {
LINE6_INDEX_GUITARPORT,
LINE6_INDEX_POCKETPOD,
LINE6_INDEX_PODHD300,
+ LINE6_INDEX_PODHD400,
LINE6_INDEX_PODHD500,
LINE6_INDEX_PODSTUDIO_GX,
LINE6_INDEX_PODSTUDIO_UX1,
@@ -68,6 +70,7 @@ enum {
LINE6_BIT(GUITARPORT),
LINE6_BIT(POCKETPOD),
LINE6_BIT(PODHD300),
+ LINE6_BIT(PODHD400),
LINE6_BIT(PODHD500),
LINE6_BIT(PODSTUDIO_GX),
LINE6_BIT(PODSTUDIO_UX1),
@@ -88,7 +91,9 @@ enum {
LINE6_BITS_PODXTALL = LINE6_BIT_PODXT | LINE6_BIT_PODXTLIVE |
LINE6_BIT_PODXTPRO,
LINE6_BITS_PODX3ALL = LINE6_BIT_PODX3 | LINE6_BIT_PODX3LIVE,
- LINE6_BITS_PODHDALL = LINE6_BIT_PODHD300 | LINE6_BIT_PODHD500,
+ LINE6_BITS_PODHDALL = LINE6_BIT_PODHD300 |
+ LINE6_BIT_PODHD400 |
+ LINE6_BIT_PODHD500,
LINE6_BITS_BASSPODXTALL = LINE6_BIT_BASSPODXT |
LINE6_BIT_BASSPODXTLIVE |
LINE6_BIT_BASSPODXTPRO
diff --git a/drivers/staging/lustre/TODO b/drivers/staging/lustre/TODO
index 22742d6d62a8..0a2b6cb3775e 100644
--- a/drivers/staging/lustre/TODO
+++ b/drivers/staging/lustre/TODO
@@ -9,5 +9,6 @@
* Other minor misc cleanups...
Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, Andreas Dilger
-<andreas.dilger@intel.com> and Peng Tao <tao.peng@emc.com>. CCing
-hpdd-discuss <hpdd-discuss@lists.01.org> would be great too.
+<andreas.dilger@intel.com>, Oleg Drokin <oleg.drokin@intel.com> and
+Peng Tao <tao.peng@emc.com>. CCing hpdd-discuss <hpdd-discuss@lists.01.org>
+would be great too.
diff --git a/drivers/staging/lustre/include/linux/libcfs/curproc.h b/drivers/staging/lustre/include/linux/libcfs/curproc.h
index de8e35b796ab..507d16b9213c 100644
--- a/drivers/staging/lustre/include/linux/libcfs/curproc.h
+++ b/drivers/staging/lustre/include/linux/libcfs/curproc.h
@@ -61,7 +61,6 @@ int cfs_curproc_groups_nr(void);
*/
/* check if task is running in compat mode.*/
-int current_is_32bit(void);
#define current_pid() (current->pid)
#define current_comm() (current->comm)
int cfs_get_environ(const char *key, char *value, int *val_len);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 687dbab2c4ec..4a6c7da72174 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -181,8 +181,6 @@ static inline void *__container_of(void *ptr, unsigned long shift)
#define container_of0(ptr, type, member) \
((type *)__container_of((void *)(ptr), offsetof(type, member)))
-#define SET_BUT_UNUSED(a) do { } while(sizeof(a) - sizeof(a))
-
#define _LIBCFS_H
#endif /* _LIBCFS_H */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index 40282b70bd1b..2bd4885ce06c 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -82,76 +82,75 @@ struct ptldebug_header {
__u32 ph_line_num;
} __attribute__((packed));
-
#define PH_FLAG_FIRST_RECORD 1
/* Debugging subsystems (32 bits, non-overlapping) */
/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
-#define S_UNDEFINED 0x00000001
-#define S_MDC 0x00000002
-#define S_MDS 0x00000004
-#define S_OSC 0x00000008
-#define S_OST 0x00000010
-#define S_CLASS 0x00000020
-#define S_LOG 0x00000040
-#define S_LLITE 0x00000080
-#define S_RPC 0x00000100
-#define S_MGMT 0x00000200
-#define S_LNET 0x00000400
-#define S_LND 0x00000800 /* ALL LNDs */
-#define S_PINGER 0x00001000
-#define S_FILTER 0x00002000
+#define S_UNDEFINED 0x00000001
+#define S_MDC 0x00000002
+#define S_MDS 0x00000004
+#define S_OSC 0x00000008
+#define S_OST 0x00000010
+#define S_CLASS 0x00000020
+#define S_LOG 0x00000040
+#define S_LLITE 0x00000080
+#define S_RPC 0x00000100
+#define S_MGMT 0x00000200
+#define S_LNET 0x00000400
+#define S_LND 0x00000800 /* ALL LNDs */
+#define S_PINGER 0x00001000
+#define S_FILTER 0x00002000
/* unused */
-#define S_ECHO 0x00008000
-#define S_LDLM 0x00010000
-#define S_LOV 0x00020000
-#define S_LQUOTA 0x00040000
+#define S_ECHO 0x00008000
+#define S_LDLM 0x00010000
+#define S_LOV 0x00020000
+#define S_LQUOTA 0x00040000
#define S_OSD 0x00080000
/* unused */
/* unused */
/* unused */
-#define S_LMV 0x00800000 /* b_new_cmd */
+#define S_LMV 0x00800000 /* b_new_cmd */
/* unused */
-#define S_SEC 0x02000000 /* upcall cache */
-#define S_GSS 0x04000000 /* b_new_cmd */
+#define S_SEC 0x02000000 /* upcall cache */
+#define S_GSS 0x04000000 /* b_new_cmd */
/* unused */
-#define S_MGC 0x10000000
-#define S_MGS 0x20000000
-#define S_FID 0x40000000 /* b_new_cmd */
-#define S_FLD 0x80000000 /* b_new_cmd */
+#define S_MGC 0x10000000
+#define S_MGS 0x20000000
+#define S_FID 0x40000000 /* b_new_cmd */
+#define S_FLD 0x80000000 /* b_new_cmd */
/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
/* Debugging masks (32 bits, non-overlapping) */
/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
-#define D_TRACE 0x00000001 /* ENTRY/EXIT markers */
-#define D_INODE 0x00000002
-#define D_SUPER 0x00000004
-#define D_EXT2 0x00000008 /* anything from ext2_debug */
-#define D_MALLOC 0x00000010 /* print malloc, free information */
-#define D_CACHE 0x00000020 /* cache-related items */
-#define D_INFO 0x00000040 /* general information */
-#define D_IOCTL 0x00000080 /* ioctl related information */
-#define D_NETERROR 0x00000100 /* network errors */
-#define D_NET 0x00000200 /* network communications */
-#define D_WARNING 0x00000400 /* CWARN(...) == CDEBUG (D_WARNING, ...) */
-#define D_BUFFS 0x00000800
-#define D_OTHER 0x00001000
-#define D_DENTRY 0x00002000
-#define D_NETTRACE 0x00004000
-#define D_PAGE 0x00008000 /* bulk page handling */
-#define D_DLMTRACE 0x00010000
-#define D_ERROR 0x00020000 /* CERROR(...) == CDEBUG (D_ERROR, ...) */
-#define D_EMERG 0x00040000 /* CEMERG(...) == CDEBUG (D_EMERG, ...) */
-#define D_HA 0x00080000 /* recovery and failover */
-#define D_RPCTRACE 0x00100000 /* for distributed debugging */
-#define D_VFSTRACE 0x00200000
-#define D_READA 0x00400000 /* read-ahead */
-#define D_MMAP 0x00800000
-#define D_CONFIG 0x01000000
-#define D_CONSOLE 0x02000000
-#define D_QUOTA 0x04000000
-#define D_SEC 0x08000000
-#define D_LFSCK 0x10000000 /* For both OI scrub and LFSCK */
+#define D_TRACE 0x00000001 /* ENTRY/EXIT markers */
+#define D_INODE 0x00000002
+#define D_SUPER 0x00000004
+#define D_EXT2 0x00000008 /* anything from ext2_debug */
+#define D_MALLOC 0x00000010 /* print malloc, free information */
+#define D_CACHE 0x00000020 /* cache-related items */
+#define D_INFO 0x00000040 /* general information */
+#define D_IOCTL 0x00000080 /* ioctl related information */
+#define D_NETERROR 0x00000100 /* network errors */
+#define D_NET 0x00000200 /* network communications */
+#define D_WARNING 0x00000400 /* CWARN(...) == CDEBUG (D_WARNING, ...) */
+#define D_BUFFS 0x00000800
+#define D_OTHER 0x00001000
+#define D_DENTRY 0x00002000
+#define D_NETTRACE 0x00004000
+#define D_PAGE 0x00008000 /* bulk page handling */
+#define D_DLMTRACE 0x00010000
+#define D_ERROR 0x00020000 /* CERROR(...) == CDEBUG (D_ERROR, ...) */
+#define D_EMERG 0x00040000 /* CEMERG(...) == CDEBUG (D_EMERG, ...) */
+#define D_HA 0x00080000 /* recovery and failover */
+#define D_RPCTRACE 0x00100000 /* for distributed debugging */
+#define D_VFSTRACE 0x00200000
+#define D_READA 0x00400000 /* read-ahead */
+#define D_MMAP 0x00800000
+#define D_CONFIG 0x01000000
+#define D_CONSOLE 0x02000000
+#define D_QUOTA 0x04000000
+#define D_SEC 0x08000000
+#define D_LFSCK 0x10000000 /* For both OI scrub and LFSCK */
/* keep these in sync with lnet/{utils,libcfs}/debug.c */
#define D_HSM D_TRACE
@@ -166,41 +165,39 @@ struct ptldebug_header {
#define CDEBUG_DEFAULT_MIN_DELAY ((cfs_time_seconds(1) + 1) / 2) /* jiffies */
#define CDEBUG_DEFAULT_BACKOFF 2
struct cfs_debug_limit_state {
- cfs_time_t cdls_next;
- unsigned int cdls_delay;
+ cfs_time_t cdls_next;
+ unsigned int cdls_delay;
int cdls_count;
};
struct libcfs_debug_msg_data {
- const char *msg_file;
- const char *msg_fn;
- int msg_subsys;
- int msg_line;
- int msg_mask;
- struct cfs_debug_limit_state *msg_cdls;
+ const char *msg_file;
+ const char *msg_fn;
+ int msg_subsys;
+ int msg_line;
+ int msg_mask;
+ struct cfs_debug_limit_state *msg_cdls;
};
-#define LIBCFS_DEBUG_MSG_DATA_INIT(data, mask, cdls) \
-do { \
- (data)->msg_subsys = DEBUG_SUBSYSTEM; \
- (data)->msg_file = __FILE__; \
- (data)->msg_fn = __FUNCTION__; \
- (data)->msg_line = __LINE__; \
- (data)->msg_cdls = (cdls); \
- (data)->msg_mask = (mask); \
+#define LIBCFS_DEBUG_MSG_DATA_INIT(data, mask, cdls) \
+do { \
+ (data)->msg_subsys = DEBUG_SUBSYSTEM; \
+ (data)->msg_file = __FILE__; \
+ (data)->msg_fn = __FUNCTION__; \
+ (data)->msg_line = __LINE__; \
+ (data)->msg_cdls = (cdls); \
+ (data)->msg_mask = (mask); \
} while (0)
-#define LIBCFS_DEBUG_MSG_DATA_DECL(dataname, mask, cdls) \
- static struct libcfs_debug_msg_data dataname = { \
- .msg_subsys = DEBUG_SUBSYSTEM, \
- .msg_file = __FILE__, \
- .msg_fn = __FUNCTION__, \
- .msg_line = __LINE__, \
- .msg_cdls = (cdls) }; \
+#define LIBCFS_DEBUG_MSG_DATA_DECL(dataname, mask, cdls) \
+ static struct libcfs_debug_msg_data dataname = { \
+ .msg_subsys = DEBUG_SUBSYSTEM, \
+ .msg_file = __FILE__, \
+ .msg_fn = __FUNCTION__, \
+ .msg_line = __LINE__, \
+ .msg_cdls = (cdls) }; \
dataname.msg_mask = (mask);
-
-
/**
* Filters out logging messages based on mask and subsystem.
*/
@@ -210,34 +207,31 @@ static inline int cfs_cdebug_show(unsigned int mask, unsigned int subsystem)
((libcfs_debug & mask) && (libcfs_subsystem_debug & subsystem));
}
-#define __CDEBUG(cdls, mask, format, ...) \
-do { \
- static struct libcfs_debug_msg_data msgdata; \
+#define __CDEBUG(cdls, mask, format, ...) \
+do { \
+ static struct libcfs_debug_msg_data msgdata; \
\
- CFS_CHECK_STACK(&msgdata, mask, cdls); \
+ CFS_CHECK_STACK(&msgdata, mask, cdls); \
\
- if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- LIBCFS_DEBUG_MSG_DATA_INIT(&msgdata, mask, cdls); \
- libcfs_debug_msg(&msgdata, format, ## __VA_ARGS__); \
- } \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_INIT(&msgdata, mask, cdls); \
+ libcfs_debug_msg(&msgdata, format, ## __VA_ARGS__); \
+ } \
} while (0)
#define CDEBUG(mask, format, ...) __CDEBUG(NULL, mask, format, ## __VA_ARGS__)
-#define CDEBUG_LIMIT(mask, format, ...) \
-do { \
- static struct cfs_debug_limit_state cdls; \
- \
- __CDEBUG(&cdls, mask, format, ## __VA_ARGS__);\
+#define CDEBUG_LIMIT(mask, format, ...) \
+do { \
+ static struct cfs_debug_limit_state cdls; \
+ \
+ __CDEBUG(&cdls, mask, format, ## __VA_ARGS__); \
} while (0)
-
-
-
-#define CWARN(format, ...) CDEBUG_LIMIT(D_WARNING, format, ## __VA_ARGS__)
-#define CERROR(format, ...) CDEBUG_LIMIT(D_ERROR, format, ## __VA_ARGS__)
-#define CNETERR(format, a...) CDEBUG_LIMIT(D_NETERROR, format, ## a)
-#define CEMERG(format, ...) CDEBUG_LIMIT(D_EMERG, format, ## __VA_ARGS__)
+#define CWARN(format, ...) CDEBUG_LIMIT(D_WARNING, format, ## __VA_ARGS__)
+#define CERROR(format, ...) CDEBUG_LIMIT(D_ERROR, format, ## __VA_ARGS__)
+#define CNETERR(format, a...) CDEBUG_LIMIT(D_NETERROR, format, ## a)
+#define CEMERG(format, ...) CDEBUG_LIMIT(D_EMERG, format, ## __VA_ARGS__)
#define LCONSOLE(mask, format, ...) CDEBUG(D_CONSOLE | (mask), format, ## __VA_ARGS__)
#define LCONSOLE_INFO(format, ...) CDEBUG_LIMIT(D_CONSOLE, format, ## __VA_ARGS__)
@@ -248,20 +242,18 @@ do { \
#define LCONSOLE_EMERG(format, ...) CDEBUG(D_CONSOLE | D_EMERG, format, ## __VA_ARGS__)
-
void libcfs_log_goto(struct libcfs_debug_msg_data *, const char *, long_ptr_t);
-#define GOTO(label, rc) \
-do { \
+#define GOTO(label, rc) \
+do { \
if (cfs_cdebug_show(D_TRACE, DEBUG_SUBSYSTEM)) { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_TRACE, NULL); \
- libcfs_log_goto(&msgdata, #label, (long_ptr_t)(rc)); \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_TRACE, NULL); \
+ libcfs_log_goto(&msgdata, #label, (long_ptr_t)(rc)); \
} else { \
- (void)(rc); \
- } \
- goto label; \
+ (void)(rc); \
+ } \
+ goto label; \
} while (0)
-
extern int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
const char *format1, ...)
__attribute__ ((format (printf, 2, 3)));
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
index 5be367973508..74dda57b98a8 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
@@ -69,6 +69,7 @@ struct libcfs_ioctl_data {
char ioc_bulk[0];
};
+#define ioc_priority ioc_u32[0]
struct libcfs_ioctl_hdr {
__u32 ioc_len;
@@ -110,41 +111,38 @@ struct libcfs_ioctl_handler {
#define IOC_LIBCFS_TYPE 'e'
#define IOC_LIBCFS_MIN_NR 30
/* libcfs ioctls */
-#define IOC_LIBCFS_PANIC _IOWR('e', 30, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_LWT_CONTROL _IOWR('e', 33, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_LWT_SNAPSHOT _IOWR('e', 34, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_LWT_LOOKUP_STRING _IOWR('e', 35, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_MEMHOG _IOWR('e', 36, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_PING_TEST _IOWR('e', 37, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_PANIC _IOWR('e', 30, long)
+#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, long)
+#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, long)
+#define IOC_LIBCFS_MEMHOG _IOWR('e', 36, long)
+#define IOC_LIBCFS_PING_TEST _IOWR('e', 37, long)
/* lnet ioctls */
-#define IOC_LIBCFS_GET_NI _IOWR('e', 50, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_ADD_ROUTE _IOWR('e', 52, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_DEL_ROUTE _IOWR('e', 53, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_GET_ROUTE _IOWR('e', 54, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_PING _IOWR('e', 61, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_LNETST _IOWR('e', 63, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_GET_NI _IOWR('e', 50, long)
+#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long)
+#define IOC_LIBCFS_ADD_ROUTE _IOWR('e', 52, long)
+#define IOC_LIBCFS_DEL_ROUTE _IOWR('e', 53, long)
+#define IOC_LIBCFS_GET_ROUTE _IOWR('e', 54, long)
+#define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, long)
+#define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, long)
+#define IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, long)
+#define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, long)
+#define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, long)
+#define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, long)
+#define IOC_LIBCFS_PING _IOWR('e', 61, long)
+#define IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, long)
+#define IOC_LIBCFS_LNETST _IOWR('e', 63, long)
/* lnd ioctls */
-#define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_PUSH_CONNECTION _IOWR('e', 72, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_GET_CONN _IOWR('e', 73, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_DEL_PEER _IOWR('e', 74, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_ADD_PEER _IOWR('e', 75, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_GET_PEER _IOWR('e', 76, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_GET_TXDESC _IOWR('e', 77, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_ADD_INTERFACE _IOWR('e', 78, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, IOCTL_LIBCFS_TYPE)
-#define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, long)
+#define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, long)
+#define IOC_LIBCFS_PUSH_CONNECTION _IOWR('e', 72, long)
+#define IOC_LIBCFS_GET_CONN _IOWR('e', 73, long)
+#define IOC_LIBCFS_DEL_PEER _IOWR('e', 74, long)
+#define IOC_LIBCFS_ADD_PEER _IOWR('e', 75, long)
+#define IOC_LIBCFS_GET_PEER _IOWR('e', 76, long)
+#define IOC_LIBCFS_GET_TXDESC _IOWR('e', 77, long)
+#define IOC_LIBCFS_ADD_INTERFACE _IOWR('e', 78, long)
+#define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, long)
+#define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, long)
#define IOC_LIBCFS_MAX_NR 80
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
index 596a15fc8996..037ae8a6d531 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
@@ -61,6 +61,8 @@ struct kuc_hdr {
__u16 kuc_msglen; /* Including header */
} __attribute__((aligned(sizeof(__u64))));
+#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr)+CR_MAXSIZE)
+
#define KUC_MAGIC 0x191C /*Lustre9etLinC */
#define KUC_FL_BLOCK 0x01 /* Wait for send */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index d0d942ced01a..dddccca120c9 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -120,7 +120,7 @@ do { \
do { \
LASSERT(!in_interrupt() || \
((size) <= LIBCFS_VMALLOC_SIZE && \
- ((mask) & GFP_ATOMIC)) != 0); \
+ ((mask) & __GFP_WAIT) == 0)); \
} while (0)
#define LIBCFS_ALLOC_POST(ptr, size) \
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/kp30.h b/drivers/staging/lustre/include/linux/libcfs/linux/kp30.h
index c204b677796f..a09fed3c6ea8 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/kp30.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/kp30.h
@@ -42,7 +42,6 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/stat.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/unistd.h>
#include <linux/kmod.h>
@@ -63,142 +62,15 @@
#include <linux/smp.h>
#include <linux/ctype.h>
#include <linux/compiler.h>
-#ifdef HAVE_MM_INLINE
-# include <linux/mm_inline.h>
-#endif
+#include <linux/mm_inline.h>
#include <linux/kallsyms.h>
#include <linux/moduleparam.h>
#include <linux/scatterlist.h>
#include <linux/libcfs/linux/portals_compat25.h>
-
-/******************************************************************************/
-/* Module parameter support */
-#define CFS_MODULE_PARM(name, t, type, perm, desc) \
- module_param(name, type, perm);\
- MODULE_PARM_DESC(name, desc)
-
-#define CFS_SYSFS_MODULE_PARM 1 /* module parameters accessible via sysfs */
-
-/******************************************************************************/
-/* Light-weight trace
- * Support for temporary event tracing with minimal Heisenberg effect. */
-#define LWT_SUPPORT 0
-
-#define LWT_MEMORY (16<<20)
-
-#ifndef KLWT_SUPPORT
-# if !defined(BITS_PER_LONG)
-# error "BITS_PER_LONG not defined"
-# endif
-
-/* kernel hasn't defined this? */
-typedef struct {
- long long lwte_when;
- char *lwte_where;
- void *lwte_task;
- long lwte_p1;
- long lwte_p2;
- long lwte_p3;
- long lwte_p4;
-# if BITS_PER_LONG > 32
- long lwte_pad;
-# endif
-} lwt_event_t;
-#endif /* !KLWT_SUPPORT */
-
-#if LWT_SUPPORT
-# if !KLWT_SUPPORT
-
-typedef struct _lwt_page {
- struct list_head lwtp_list;
- struct page *lwtp_page;
- lwt_event_t *lwtp_events;
-} lwt_page_t;
-
-typedef struct {
- int lwtc_current_index;
- lwt_page_t *lwtc_current_page;
-} lwt_cpu_t;
-
-extern int lwt_enabled;
-extern lwt_cpu_t lwt_cpus[];
-
-/* Note that we _don't_ define LWT_EVENT at all if LWT_SUPPORT isn't set.
- * This stuff is meant for finding specific problems; it never stays in
- * production code... */
-
-#define LWTSTR(n) #n
-#define LWTWHERE(f,l) f ":" LWTSTR(l)
-#define LWT_EVENTS_PER_PAGE (PAGE_CACHE_SIZE / sizeof (lwt_event_t))
-
-#define LWT_EVENT(p1, p2, p3, p4) \
-do { \
- unsigned long flags; \
- lwt_cpu_t *cpu; \
- lwt_page_t *p; \
- lwt_event_t *e; \
- \
- if (lwt_enabled) { \
- local_irq_save (flags); \
- \
- cpu = &lwt_cpus[smp_processor_id()]; \
- p = cpu->lwtc_current_page; \
- e = &p->lwtp_events[cpu->lwtc_current_index++]; \
- \
- if (cpu->lwtc_current_index >= LWT_EVENTS_PER_PAGE) { \
- cpu->lwtc_current_page = \
- list_entry (p->lwtp_list.next, \
- lwt_page_t, lwtp_list); \
- cpu->lwtc_current_index = 0; \
- } \
- \
- e->lwte_when = get_cycles(); \
- e->lwte_where = LWTWHERE(__FILE__,__LINE__); \
- e->lwte_task = current; \
- e->lwte_p1 = (long)(p1); \
- e->lwte_p2 = (long)(p2); \
- e->lwte_p3 = (long)(p3); \
- e->lwte_p4 = (long)(p4); \
- \
- local_irq_restore (flags); \
- } \
-} while (0)
-
-#endif /* !KLWT_SUPPORT */
-
-extern int lwt_init (void);
-extern void lwt_fini (void);
-extern int lwt_lookup_string (int *size, char *knlptr,
- char *usrptr, int usrsize);
-extern int lwt_control (int enable, int clear);
-extern int lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size,
- void *user_ptr, int user_size);
-#endif /* LWT_SUPPORT */
-
-/* ------------------------------------------------------------------ */
-
-#define IOCTL_LIBCFS_TYPE long
-
-#ifdef __CYGWIN__
-# ifndef BITS_PER_LONG
-# define BITS_PER_LONG 64
-# endif
-#endif
-
-# define LI_POISON 0x5a5a5a5a
-#if BITS_PER_LONG > 32
-# define LL_POISON 0x5a5a5a5a5a5a5a5aL
-#else
-# define LL_POISON 0x5a5a5a5aL
-#endif
-# define LP_POISON ((void *)LL_POISON)
-
/* this is a bit chunky */
-#define _LWORDSIZE BITS_PER_LONG
-
# define LPU64 "%llu"
# define LPD64 "%lld"
# define LPX64 "%#llx"
@@ -218,24 +90,4 @@ extern int lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size,
*/
# define LPPID "%d"
-
-#undef _LWORDSIZE
-
-/* compat macroses */
-
-
-#ifndef get_cpu
-# ifdef CONFIG_PREEMPT
-# define get_cpu() ({ preempt_disable(); smp_processor_id(); })
-# define put_cpu() preempt_enable()
-# else
-# define get_cpu() smp_processor_id()
-# define put_cpu()
-# endif
-#else
-#endif /* get_cpu & put_cpu */
-
-#define INIT_CTL_NAME(a)
-#define INIT_STRATEGY(a)
-
#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
index 60ecaf63f9fb..a7bca40e9fb7 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
@@ -49,7 +49,6 @@
#include <linux/libcfs/linux/linux-mem.h>
#include <linux/libcfs/linux/linux-prim.h>
#include <linux/libcfs/linux/linux-lock.h>
-#include <linux/libcfs/linux/linux-fs.h>
#include <linux/libcfs/linux/linux-tcpip.h>
#include <linux/libcfs/linux/linux-bitops.h>
#include <linux/libcfs/linux/linux-types.h>
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-fs.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-fs.h
deleted file mode 100644
index eebf138f21e5..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-fs.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/linux/linux-fs.h
- *
- * Basic library routines.
- */
-
-#ifndef __LIBCFS_LINUX_CFS_FS_H__
-#define __LIBCFS_LINUX_CFS_FS_H__
-
-#ifndef __LIBCFS_LIBCFS_H__
-#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
-#endif
-
-
-#include <linux/fs.h>
-#include <linux/stat.h>
-#include <linux/mount.h>
-#include <linux/backing-dev.h>
-#include <linux/posix_acl_xattr.h>
-
-#define filp_size(f) \
- (i_size_read((f)->f_dentry->d_inode))
-#define filp_poff(f) \
- (&(f)->f_pos)
-
-# define do_fsync(fp, flag) \
- ((fp)->f_op->fsync(fp, 0, LLONG_MAX, flag))
-
-#define filp_read(fp, buf, size, pos) \
- ((fp)->f_op->read((fp), (buf), (size), pos))
-
-#define filp_write(fp, buf, size, pos) \
- ((fp)->f_op->write((fp), (buf), (size), pos))
-
-#define filp_fsync(fp) \
- do_fsync(fp, 1)
-
-#define flock_type(fl) ((fl)->fl_type)
-#define flock_set_type(fl, type) do { (fl)->fl_type = (type); } while (0)
-#define flock_pid(fl) ((fl)->fl_pid)
-#define flock_set_pid(fl, pid) do { (fl)->fl_pid = (pid); } while (0)
-#define flock_start(fl) ((fl)->fl_start)
-#define flock_set_start(fl, st) do { (fl)->fl_start = (st); } while (0)
-#define flock_end(fl) ((fl)->fl_end)
-#define flock_set_end(fl, end) do { (fl)->fl_end = (end); } while (0)
-
-#ifndef IFSHIFT
-#define IFSHIFT 12
-#endif
-
-#ifndef IFTODT
-#define IFTODT(type) (((type) & S_IFMT) >> IFSHIFT)
-#endif
-#ifndef DTTOIF
-#define DTTOIF(dirtype) ((dirtype) << IFSHIFT)
-#endif
-
-#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h
index 1ec4ca1a6e32..2aeff27b1641 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h
@@ -47,7 +47,6 @@
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index bf301048c7ab..3ac2bb5fd2db 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -650,12 +650,13 @@ extern lnet_ni_t *lnet_net2ni(__u32 net);
int lnet_notify(lnet_ni_t *ni, lnet_nid_t peer, int alive, cfs_time_t when);
void lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, cfs_time_t when);
-int lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway_nid);
+int lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway_nid,
+ unsigned int priority);
int lnet_check_routes(void);
int lnet_del_route(__u32 net, lnet_nid_t gw_nid);
void lnet_destroy_routes(void);
int lnet_get_route(int idx, __u32 *net, __u32 *hops,
- lnet_nid_t *gateway, __u32 *alive);
+ lnet_nid_t *gateway, __u32 *alive, __u32 *priority);
void lnet_proc_init(void);
void lnet_proc_fini(void);
int lnet_rtrpools_alloc(int im_a_router);
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index e579e7ed5070..dd8edcf1b5c0 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -478,7 +478,6 @@ typedef struct lnet_peer {
lnet_rc_data_t *lp_rcd; /* router checker state */
} lnet_peer_t;
-
/* peer hash size */
#define LNET_PEER_HASH_BITS 9
#define LNET_PEER_HASH_SIZE (1 << LNET_PEER_HASH_BITS)
@@ -504,6 +503,7 @@ typedef struct {
int lr_seq; /* sequence for round-robin */
unsigned int lr_downis; /* number of down NIs */
unsigned int lr_hops; /* how far I am */
+ unsigned int lr_priority; /* route priority */
} lnet_route_t;
#define LNET_REMOTE_NETS_HASH_DEFAULT (1U << 7)
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index 4f63b7acb9d7..c833ce8544d3 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -383,14 +383,6 @@ typedef enum {
typedef unsigned LNET_SEQ_BASETYPE lnet_seq_t;
#define LNET_SEQ_GT(a,b) (((signed LNET_SEQ_BASETYPE)((a) - (b))) > 0)
-/* XXX
- * cygwin need the pragma line, not clear if it's needed in other places.
- * checking!!!
- */
-#ifdef __CYGWIN__
-#pragma pack(push, 4)
-#endif
-
/**
* Information about an event on a MD.
*/
@@ -462,9 +454,6 @@ typedef struct {
*/
volatile lnet_seq_t sequence;
} lnet_event_t;
-#ifdef __CYGWIN__
-#pragma pop
-#endif
/**
* Event queue handler function type.
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 86397f96b033..644a0000130a 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -3230,7 +3230,6 @@ void __exit
kiblnd_module_fini (void)
{
lnet_unregister_lnd(&the_o2iblnd);
- kiblnd_tunables_fini();
}
int __init
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index 938df0cf8c64..ce05d558b223 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -50,7 +50,6 @@
#include <asm/uaccess.h>
#include <asm/io.h>
-#include <linux/init.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/list.h>
@@ -106,9 +105,6 @@ typedef struct
int *kib_fmr_pool_size; /* # FMRs in pool */
int *kib_fmr_flush_trigger; /* When to trigger FMR flush */
int *kib_fmr_cache; /* enable FMR pool cache? */
-#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
- ctl_table_header_t *kib_sysctl; /* sysctl interface */
-#endif
int *kib_require_priv_port;/* accept only privileged ports */
int *kib_use_priv_port; /* use privileged port for active connect */
/* # threads on each CPT */
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 26b49a24b3df..6f58ead20393 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -529,8 +529,7 @@ kiblnd_kvaddr_to_page (unsigned long vaddr)
{
struct page *page;
- if (vaddr >= VMALLOC_START &&
- vaddr < VMALLOC_END) {
+ if (is_vmalloc_addr((void *)vaddr)) {
page = vmalloc_to_page ((void *)vaddr);
LASSERT (page != NULL);
return page;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index 92dc5672e2dd..cefdfb6b1bec 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -41,95 +41,95 @@
#include "o2iblnd.h"
static int service = 987;
-CFS_MODULE_PARM(service, "i", int, 0444,
- "service number (within RDMA_PS_TCP)");
+module_param(service, int, 0444);
+MODULE_PARM_DESC(service, "service number (within RDMA_PS_TCP)");
static int cksum = 0;
-CFS_MODULE_PARM(cksum, "i", int, 0644,
- "set non-zero to enable message (not RDMA) checksums");
+module_param(cksum, int, 0644);
+MODULE_PARM_DESC(cksum, "set non-zero to enable message (not RDMA) checksums");
static int timeout = 50;
-CFS_MODULE_PARM(timeout, "i", int, 0644,
- "timeout (seconds)");
+module_param(timeout, int, 0644);
+MODULE_PARM_DESC(timeout, "timeout (seconds)");
/* Number of threads in each scheduler pool which is percpt,
* we will estimate reasonable value based on CPUs if it's set to zero. */
static int nscheds;
-CFS_MODULE_PARM(nscheds, "i", int, 0444,
- "number of threads in each scheduler pool");
+module_param(nscheds, int, 0444);
+MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool");
/* NB: this value is shared by all CPTs, it can grow at runtime */
static int ntx = 512;
-CFS_MODULE_PARM(ntx, "i", int, 0444,
- "# of message descriptors allocated for each pool");
+module_param(ntx, int, 0444);
+MODULE_PARM_DESC(ntx, "# of message descriptors allocated for each pool");
/* NB: this value is shared by all CPTs */
static int credits = 256;
-CFS_MODULE_PARM(credits, "i", int, 0444,
- "# concurrent sends");
+module_param(credits, int, 0444);
+MODULE_PARM_DESC(credits, "# concurrent sends");
static int peer_credits = 8;
-CFS_MODULE_PARM(peer_credits, "i", int, 0444,
- "# concurrent sends to 1 peer");
+module_param(peer_credits, int, 0444);
+MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer");
static int peer_credits_hiw = 0;
-CFS_MODULE_PARM(peer_credits_hiw, "i", int, 0444,
- "when eagerly to return credits");
+module_param(peer_credits_hiw, int, 0444);
+MODULE_PARM_DESC(peer_credits_hiw, "when eagerly to return credits");
static int peer_buffer_credits = 0;
-CFS_MODULE_PARM(peer_buffer_credits, "i", int, 0444,
- "# per-peer router buffer credits");
+module_param(peer_buffer_credits, int, 0444);
+MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits");
static int peer_timeout = 180;
-CFS_MODULE_PARM(peer_timeout, "i", int, 0444,
- "Seconds without aliveness news to declare peer dead (<=0 to disable)");
+module_param(peer_timeout, int, 0444);
+MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
static char *ipif_name = "ib0";
-CFS_MODULE_PARM(ipif_name, "s", charp, 0444,
- "IPoIB interface name");
+module_param(ipif_name, charp, 0444);
+MODULE_PARM_DESC(ipif_name, "IPoIB interface name");
static int retry_count = 5;
-CFS_MODULE_PARM(retry_count, "i", int, 0644,
- "Retransmissions when no ACK received");
+module_param(retry_count, int, 0644);
+MODULE_PARM_DESC(retry_count, "Retransmissions when no ACK received");
static int rnr_retry_count = 6;
-CFS_MODULE_PARM(rnr_retry_count, "i", int, 0644,
- "RNR retransmissions");
+module_param(rnr_retry_count, int, 0644);
+MODULE_PARM_DESC(rnr_retry_count, "RNR retransmissions");
static int keepalive = 100;
-CFS_MODULE_PARM(keepalive, "i", int, 0644,
- "Idle time in seconds before sending a keepalive");
+module_param(keepalive, int, 0644);
+MODULE_PARM_DESC(keepalive, "Idle time in seconds before sending a keepalive");
static int ib_mtu = 0;
-CFS_MODULE_PARM(ib_mtu, "i", int, 0444,
- "IB MTU 256/512/1024/2048/4096");
+module_param(ib_mtu, int, 0444);
+MODULE_PARM_DESC(ib_mtu, "IB MTU 256/512/1024/2048/4096");
static int concurrent_sends = 0;
-CFS_MODULE_PARM(concurrent_sends, "i", int, 0444,
- "send work-queue sizing");
+module_param(concurrent_sends, int, 0444);
+MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing");
static int map_on_demand = 0;
-CFS_MODULE_PARM(map_on_demand, "i", int, 0444,
- "map on demand");
+module_param(map_on_demand, int, 0444);
+MODULE_PARM_DESC(map_on_demand, "map on demand");
/* NB: this value is shared by all CPTs, it can grow at runtime */
static int fmr_pool_size = 512;
-CFS_MODULE_PARM(fmr_pool_size, "i", int, 0444,
- "size of fmr pool on each CPT (>= ntx / 4)");
+module_param(fmr_pool_size, int, 0444);
+MODULE_PARM_DESC(fmr_pool_size, "size of fmr pool on each CPT (>= ntx / 4)");
/* NB: this value is shared by all CPTs, it can grow at runtime */
static int fmr_flush_trigger = 384;
-CFS_MODULE_PARM(fmr_flush_trigger, "i", int, 0444,
- "# dirty FMRs that triggers pool flush");
+module_param(fmr_flush_trigger, int, 0444);
+MODULE_PARM_DESC(fmr_flush_trigger, "# dirty FMRs that triggers pool flush");
static int fmr_cache = 1;
-CFS_MODULE_PARM(fmr_cache, "i", int, 0444,
- "non-zero to enable FMR caching");
+module_param(fmr_cache, int, 0444);
+MODULE_PARM_DESC(fmr_cache, "non-zero to enable FMR caching");
/* NB: this value is shared by all CPTs, it can grow at runtime */
static int pmr_pool_size = 512;
-CFS_MODULE_PARM(pmr_pool_size, "i", int, 0444,
- "size of MR cache pmr pool on each CPT");
+module_param(pmr_pool_size, int, 0444);
+MODULE_PARM_DESC(pmr_pool_size, "size of MR cache pmr pool on each CPT");
/*
* 0: disable failover
@@ -137,17 +137,17 @@ CFS_MODULE_PARM(pmr_pool_size, "i", int, 0444,
* 2: force to failover (for debug)
*/
static int dev_failover = 0;
-CFS_MODULE_PARM(dev_failover, "i", int, 0444,
- "HCA failover for bonding (0 off, 1 on, other values reserved)");
+module_param(dev_failover, int, 0444);
+MODULE_PARM_DESC(dev_failover, "HCA failover for bonding (0 off, 1 on, other values reserved)");
static int require_privileged_port = 0;
-CFS_MODULE_PARM(require_privileged_port, "i", int, 0644,
- "require privileged port when accepting connection");
+module_param(require_privileged_port, int, 0644);
+MODULE_PARM_DESC(require_privileged_port, "require privileged port when accepting connection");
static int use_privileged_port = 1;
-CFS_MODULE_PARM(use_privileged_port, "i", int, 0644,
- "use privileged port when initiating connection");
+module_param(use_privileged_port, int, 0644);
+MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection");
kib_tunables_t kiblnd_tunables = {
.kib_dev_failover = &dev_failover,
@@ -176,261 +176,6 @@ kib_tunables_t kiblnd_tunables = {
.kib_nscheds = &nscheds
};
-#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
-
-static char ipif_basename_space[32];
-
-
-enum {
- O2IBLND_SERVICE = 1,
- O2IBLND_CKSUM,
- O2IBLND_TIMEOUT,
- O2IBLND_NTX,
- O2IBLND_CREDITS,
- O2IBLND_PEER_TXCREDITS,
- O2IBLND_PEER_CREDITS_HIW,
- O2IBLND_PEER_RTRCREDITS,
- O2IBLND_PEER_TIMEOUT,
- O2IBLND_IPIF_BASENAME,
- O2IBLND_RETRY_COUNT,
- O2IBLND_RNR_RETRY_COUNT,
- O2IBLND_KEEPALIVE,
- O2IBLND_CONCURRENT_SENDS,
- O2IBLND_IB_MTU,
- O2IBLND_MAP_ON_DEMAND,
- O2IBLND_FMR_POOL_SIZE,
- O2IBLND_FMR_FLUSH_TRIGGER,
- O2IBLND_FMR_CACHE,
- O2IBLND_PMR_POOL_SIZE,
- O2IBLND_DEV_FAILOVER
-};
-
-static ctl_table_t kiblnd_ctl_table[] = {
- {
- .ctl_name = O2IBLND_SERVICE,
- .procname = "service",
- .data = &service,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_CKSUM,
- .procname = "cksum",
- .data = &cksum,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_TIMEOUT,
- .procname = "timeout",
- .data = &timeout,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_NTX,
- .procname = "ntx",
- .data = &ntx,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_CREDITS,
- .procname = "credits",
- .data = &credits,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_PEER_TXCREDITS,
- .procname = "peer_credits",
- .data = &peer_credits,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_PEER_CREDITS_HIW,
- .procname = "peer_credits_hiw",
- .data = &peer_credits_hiw,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_PEER_RTRCREDITS,
- .procname = "peer_buffer_credits",
- .data = &peer_buffer_credits,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_PEER_TIMEOUT,
- .procname = "peer_timeout",
- .data = &peer_timeout,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_IPIF_BASENAME,
- .procname = "ipif_name",
- .data = ipif_basename_space,
- .maxlen = sizeof(ipif_basename_space),
- .mode = 0444,
- .proc_handler = &proc_dostring
- },
- {
- .ctl_name = O2IBLND_RETRY_COUNT,
- .procname = "retry_count",
- .data = &retry_count,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_RNR_RETRY_COUNT,
- .procname = "rnr_retry_count",
- .data = &rnr_retry_count,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_KEEPALIVE,
- .procname = "keepalive",
- .data = &keepalive,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_CONCURRENT_SENDS,
- .procname = "concurrent_sends",
- .data = &concurrent_sends,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_IB_MTU,
- .procname = "ib_mtu",
- .data = &ib_mtu,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_MAP_ON_DEMAND,
- .procname = "map_on_demand",
- .data = &map_on_demand,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
-
- {
- .ctl_name = O2IBLND_FMR_POOL_SIZE,
- .procname = "fmr_pool_size",
- .data = &fmr_pool_size,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_FMR_FLUSH_TRIGGER,
- .procname = "fmr_flush_trigger",
- .data = &fmr_flush_trigger,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_FMR_CACHE,
- .procname = "fmr_cache",
- .data = &fmr_cache,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_PMR_POOL_SIZE,
- .procname = "pmr_pool_size",
- .data = &pmr_pool_size,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = O2IBLND_DEV_FAILOVER,
- .procname = "dev_failover",
- .data = &dev_failover,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {0}
-};
-
-static ctl_table_t kiblnd_top_ctl_table[] = {
- {
- .ctl_name = CTL_O2IBLND,
- .procname = "o2iblnd",
- .data = NULL,
- .maxlen = 0,
- .mode = 0555,
- .child = kiblnd_ctl_table
- },
- {0}
-};
-
-void
-kiblnd_initstrtunable(char *space, char *str, int size)
-{
- strncpy(space, str, size);
- space[size-1] = 0;
-}
-
-void
-kiblnd_sysctl_init (void)
-{
- kiblnd_initstrtunable(ipif_basename_space, ipif_name,
- sizeof(ipif_basename_space));
-
- kiblnd_tunables.kib_sysctl =
- register_sysctl_table(kiblnd_top_ctl_table);
-
- if (kiblnd_tunables.kib_sysctl == NULL)
- CWARN("Can't setup /proc tunables\n");
-}
-
-void
-kiblnd_sysctl_fini (void)
-{
- if (kiblnd_tunables.kib_sysctl != NULL)
- unregister_sysctl_table(kiblnd_tunables.kib_sysctl);
-}
-
-#else
-
-void
-kiblnd_sysctl_init (void)
-{
-}
-
-void
-kiblnd_sysctl_fini (void)
-{
-}
-
-#endif
-
int
kiblnd_tunables_init (void)
{
@@ -482,12 +227,5 @@ kiblnd_tunables_init (void)
*kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits);
}
- kiblnd_sysctl_init();
return 0;
}
-
-void
-kiblnd_tunables_fini (void)
-{
- kiblnd_sysctl_fini();
-}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 2ddc3aadb8d6..8f74d0be32f1 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -2866,7 +2866,6 @@ void __exit
ksocknal_module_fini (void)
{
lnet_unregister_lnd(&the_ksocklnd);
- ksocknal_tunables_fini();
}
int __init
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index b483e0c3a69a..df2be7a7f46e 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -124,9 +124,6 @@ typedef struct
unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload size */
int *ksnd_zc_recv; /* enable ZC receive (for Chelsio TOE) */
int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to enable ZC receive */
-#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
- ctl_table_header_t *ksnd_sysctl; /* sysctl interface */
-#endif
} ksock_tunables_t;
typedef struct
@@ -592,9 +589,6 @@ extern int ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem,
int *rxmem, int *nagle);
extern int ksocknal_tunables_init(void);
-extern void ksocknal_tunables_fini(void);
-extern int ksocknal_lib_tunables_init(void);
-extern void ksocknal_lib_tunables_fini(void);
extern void ksocknal_lib_csum_tx(ksock_tx_t *tx);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 68a4f52ec998..b7b53b579c85 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -924,7 +924,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
int
ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
{
- int mpflag = 0;
+ int mpflag = 1;
int type = lntmsg->msg_type;
lnet_process_id_t target = lntmsg->msg_target;
unsigned int payload_niov = lntmsg->msg_niov;
@@ -993,8 +993,9 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
/* The first fragment will be set later in pro_pack */
rc = ksocknal_launch_packet(ni, tx, target);
- if (lntmsg->msg_vmflush)
+ if (!mpflag)
cfs_memory_pressure_restore(mpflag);
+
if (rc == 0)
return (0);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
index a1c6a519bf5b..80141aa32c21 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
@@ -36,313 +36,6 @@
#include "socklnd.h"
-# if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
-
-
-enum {
- SOCKLND_TIMEOUT = 1,
- SOCKLND_CREDITS,
- SOCKLND_PEER_TXCREDITS,
- SOCKLND_PEER_RTRCREDITS,
- SOCKLND_PEER_TIMEOUT,
- SOCKLND_NCONNDS,
- SOCKLND_RECONNECTS_MIN,
- SOCKLND_RECONNECTS_MAX,
- SOCKLND_EAGER_ACK,
- SOCKLND_ZERO_COPY,
- SOCKLND_TYPED,
- SOCKLND_BULK_MIN,
- SOCKLND_RX_BUFFER_SIZE,
- SOCKLND_TX_BUFFER_SIZE,
- SOCKLND_NAGLE,
- SOCKLND_IRQ_AFFINITY,
- SOCKLND_ROUND_ROBIN,
- SOCKLND_KEEPALIVE,
- SOCKLND_KEEPALIVE_IDLE,
- SOCKLND_KEEPALIVE_COUNT,
- SOCKLND_KEEPALIVE_INTVL,
- SOCKLND_BACKOFF_INIT,
- SOCKLND_BACKOFF_MAX,
- SOCKLND_PROTOCOL,
- SOCKLND_ZERO_COPY_RECV,
- SOCKLND_ZERO_COPY_RECV_MIN_NFRAGS
-};
-
-static ctl_table_t ksocknal_ctl_table[] = {
- {
- .ctl_name = SOCKLND_TIMEOUT,
- .procname = "timeout",
- .data = &ksocknal_tunables.ksnd_timeout,
- .maxlen = sizeof (int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_CREDITS,
- .procname = "credits",
- .data = &ksocknal_tunables.ksnd_credits,
- .maxlen = sizeof (int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_PEER_TXCREDITS,
- .procname = "peer_credits",
- .data = &ksocknal_tunables.ksnd_peertxcredits,
- .maxlen = sizeof (int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_PEER_RTRCREDITS,
- .procname = "peer_buffer_credits",
- .data = &ksocknal_tunables.ksnd_peerrtrcredits,
- .maxlen = sizeof (int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_PEER_TIMEOUT,
- .procname = "peer_timeout",
- .data = &ksocknal_tunables.ksnd_peertimeout,
- .maxlen = sizeof (int),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_NCONNDS,
- .procname = "nconnds",
- .data = &ksocknal_tunables.ksnd_nconnds,
- .maxlen = sizeof (int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_RECONNECTS_MIN,
- .procname = "min_reconnectms",
- .data = &ksocknal_tunables.ksnd_min_reconnectms,
- .maxlen = sizeof (int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_RECONNECTS_MAX,
- .procname = "max_reconnectms",
- .data = &ksocknal_tunables.ksnd_max_reconnectms,
- .maxlen = sizeof (int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_EAGER_ACK,
- .procname = "eager_ack",
- .data = &ksocknal_tunables.ksnd_eager_ack,
- .maxlen = sizeof (int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_ZERO_COPY,
- .procname = "zero_copy",
- .data = &ksocknal_tunables.ksnd_zc_min_payload,
- .maxlen = sizeof (int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_ZERO_COPY_RECV,
- .procname = "zero_copy_recv",
- .data = &ksocknal_tunables.ksnd_zc_recv,
- .maxlen = sizeof (int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
-
- {
- .ctl_name = SOCKLND_ZERO_COPY_RECV_MIN_NFRAGS,
- .procname = "zero_copy_recv",
- .data = &ksocknal_tunables.ksnd_zc_recv_min_nfrags,
- .maxlen = sizeof (int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_TYPED,
- .procname = "typed",
- .data = &ksocknal_tunables.ksnd_typed_conns,
- .maxlen = sizeof (int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_BULK_MIN,
- .procname = "min_bulk",
- .data = &ksocknal_tunables.ksnd_min_bulk,
- .maxlen = sizeof (int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_RX_BUFFER_SIZE,
- .procname = "rx_buffer_size",
- .data = &ksocknal_tunables.ksnd_rx_buffer_size,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_TX_BUFFER_SIZE,
- .procname = "tx_buffer_size",
- .data = &ksocknal_tunables.ksnd_tx_buffer_size,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_NAGLE,
- .procname = "nagle",
- .data = &ksocknal_tunables.ksnd_nagle,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_ROUND_ROBIN,
- .procname = "round_robin",
- .data = &ksocknal_tunables.ksnd_round_robin,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_KEEPALIVE,
- .procname = "keepalive",
- .data = &ksocknal_tunables.ksnd_keepalive,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_KEEPALIVE_IDLE,
- .procname = "keepalive_idle",
- .data = &ksocknal_tunables.ksnd_keepalive_idle,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_KEEPALIVE_COUNT,
- .procname = "keepalive_count",
- .data = &ksocknal_tunables.ksnd_keepalive_count,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
- {
- .ctl_name = SOCKLND_KEEPALIVE_INTVL,
- .procname = "keepalive_intvl",
- .data = &ksocknal_tunables.ksnd_keepalive_intvl,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
-#if SOCKNAL_VERSION_DEBUG
- {
- .ctl_name = SOCKLND_PROTOCOL,
- .procname = "protocol",
- .data = &ksocknal_tunables.ksnd_protocol,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- },
-#endif
- {0}
-};
-
-
-ctl_table_t ksocknal_top_ctl_table[] = {
- {
- .ctl_name = CTL_SOCKLND,
- .procname = "socknal",
- .data = NULL,
- .maxlen = 0,
- .mode = 0555,
- .child = ksocknal_ctl_table
- },
- { 0 }
-};
-
-int
-ksocknal_lib_tunables_init ()
-{
- if (!*ksocknal_tunables.ksnd_typed_conns) {
- int rc = -EINVAL;
-#if SOCKNAL_VERSION_DEBUG
- if (*ksocknal_tunables.ksnd_protocol < 3)
- rc = 0;
-#endif
- if (rc != 0) {
- CERROR("Protocol V3.x MUST have typed connections\n");
- return rc;
- }
- }
-
- if (*ksocknal_tunables.ksnd_zc_recv_min_nfrags < 2)
- *ksocknal_tunables.ksnd_zc_recv_min_nfrags = 2;
- if (*ksocknal_tunables.ksnd_zc_recv_min_nfrags > LNET_MAX_IOV)
- *ksocknal_tunables.ksnd_zc_recv_min_nfrags = LNET_MAX_IOV;
-
- ksocknal_tunables.ksnd_sysctl =
- register_sysctl_table(ksocknal_top_ctl_table);
-
- if (ksocknal_tunables.ksnd_sysctl == NULL)
- CWARN("Can't setup /proc tunables\n");
-
- return 0;
-}
-
-void
-ksocknal_lib_tunables_fini(void)
-{
- if (ksocknal_tunables.ksnd_sysctl != NULL)
- unregister_sysctl_table(ksocknal_tunables.ksnd_sysctl);
-}
-#else
-int
-ksocknal_lib_tunables_init(void)
-{
- return 0;
-}
-
-void
-ksocknal_lib_tunables_fini(void)
-{
-}
-#endif /* # if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM */
-
int
ksocknal_lib_get_conn_addrs (ksock_conn_t *conn)
{
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h
index 1cfc1b168bed..025cb65ddc70 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h
@@ -54,7 +54,6 @@
#include <asm/uaccess.h>
#include <asm/irq.h>
-#include <linux/init.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/list.h>
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
index 8a474f64abbe..54c0019904ff 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
@@ -22,123 +22,123 @@
#include "socklnd.h"
static int sock_timeout = 50;
-CFS_MODULE_PARM(sock_timeout, "i", int, 0644,
- "dead socket timeout (seconds)");
+module_param(sock_timeout, int, 0644);
+MODULE_PARM_DESC(sock_timeout, "dead socket timeout (seconds)");
static int credits = 256;
-CFS_MODULE_PARM(credits, "i", int, 0444,
- "# concurrent sends");
+module_param(credits, int, 0444);
+MODULE_PARM_DESC(credits, "# concurrent sends");
static int peer_credits = 8;
-CFS_MODULE_PARM(peer_credits, "i", int, 0444,
- "# concurrent sends to 1 peer");
+module_param(peer_credits, int, 0444);
+MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer");
static int peer_buffer_credits = 0;
-CFS_MODULE_PARM(peer_buffer_credits, "i", int, 0444,
- "# per-peer router buffer credits");
+module_param(peer_buffer_credits, int, 0444);
+MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits");
static int peer_timeout = 180;
-CFS_MODULE_PARM(peer_timeout, "i", int, 0444,
- "Seconds without aliveness news to declare peer dead (<=0 to disable)");
+module_param(peer_timeout, int, 0444);
+MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
/* Number of daemons in each thread pool which is percpt,
* we will estimate reasonable value based on CPUs if it's not set. */
static unsigned int nscheds;
-CFS_MODULE_PARM(nscheds, "i", int, 0444,
- "# scheduler daemons in each pool while starting");
+module_param(nscheds, int, 0444);
+MODULE_PARM_DESC(nscheds, "# scheduler daemons in each pool while starting");
static int nconnds = 4;
-CFS_MODULE_PARM(nconnds, "i", int, 0444,
- "# connection daemons while starting");
+module_param(nconnds, int, 0444);
+MODULE_PARM_DESC(nconnds, "# connection daemons while starting");
static int nconnds_max = 64;
-CFS_MODULE_PARM(nconnds_max, "i", int, 0444,
- "max # connection daemons");
+module_param(nconnds_max, int, 0444);
+MODULE_PARM_DESC(nconnds_max, "max # connection daemons");
static int min_reconnectms = 1000;
-CFS_MODULE_PARM(min_reconnectms, "i", int, 0644,
- "min connection retry interval (mS)");
+module_param(min_reconnectms, int, 0644);
+MODULE_PARM_DESC(min_reconnectms, "min connection retry interval (mS)");
static int max_reconnectms = 60000;
-CFS_MODULE_PARM(max_reconnectms, "i", int, 0644,
- "max connection retry interval (mS)");
+module_param(max_reconnectms, int, 0644);
+MODULE_PARM_DESC(max_reconnectms, "max connection retry interval (mS)");
# define DEFAULT_EAGER_ACK 0
static int eager_ack = DEFAULT_EAGER_ACK;
-CFS_MODULE_PARM(eager_ack, "i", int, 0644,
- "send tcp ack packets eagerly");
+module_param(eager_ack, int, 0644);
+MODULE_PARM_DESC(eager_ack, "send tcp ack packets eagerly");
static int typed_conns = 1;
-CFS_MODULE_PARM(typed_conns, "i", int, 0444,
- "use different sockets for bulk");
+module_param(typed_conns, int, 0444);
+MODULE_PARM_DESC(typed_conns, "use different sockets for bulk");
static int min_bulk = (1<<10);
-CFS_MODULE_PARM(min_bulk, "i", int, 0644,
- "smallest 'large' message");
+module_param(min_bulk, int, 0644);
+MODULE_PARM_DESC(min_bulk, "smallest 'large' message");
# define DEFAULT_BUFFER_SIZE 0
static int tx_buffer_size = DEFAULT_BUFFER_SIZE;
-CFS_MODULE_PARM(tx_buffer_size, "i", int, 0644,
- "socket tx buffer size (0 for system default)");
+module_param(tx_buffer_size, int, 0644);
+MODULE_PARM_DESC(tx_buffer_size, "socket tx buffer size (0 for system default)");
static int rx_buffer_size = DEFAULT_BUFFER_SIZE;
-CFS_MODULE_PARM(rx_buffer_size, "i", int, 0644,
- "socket rx buffer size (0 for system default)");
+module_param(rx_buffer_size, int, 0644);
+MODULE_PARM_DESC(rx_buffer_size, "socket rx buffer size (0 for system default)");
static int nagle = 0;
-CFS_MODULE_PARM(nagle, "i", int, 0644,
- "enable NAGLE?");
+module_param(nagle, int, 0644);
+MODULE_PARM_DESC(nagle, "enable NAGLE?");
static int round_robin = 1;
-CFS_MODULE_PARM(round_robin, "i", int, 0644,
- "Round robin for multiple interfaces");
+module_param(round_robin, int, 0644);
+MODULE_PARM_DESC(round_robin, "Round robin for multiple interfaces");
static int keepalive = 30;
-CFS_MODULE_PARM(keepalive, "i", int, 0644,
- "# seconds before send keepalive");
+module_param(keepalive, int, 0644);
+MODULE_PARM_DESC(keepalive, "# seconds before send keepalive");
static int keepalive_idle = 30;
-CFS_MODULE_PARM(keepalive_idle, "i", int, 0644,
- "# idle seconds before probe");
+module_param(keepalive_idle, int, 0644);
+MODULE_PARM_DESC(keepalive_idle, "# idle seconds before probe");
#define DEFAULT_KEEPALIVE_COUNT 5
static int keepalive_count = DEFAULT_KEEPALIVE_COUNT;
-CFS_MODULE_PARM(keepalive_count, "i", int, 0644,
- "# missed probes == dead");
+module_param(keepalive_count, int, 0644);
+MODULE_PARM_DESC(keepalive_count, "# missed probes == dead");
static int keepalive_intvl = 5;
-CFS_MODULE_PARM(keepalive_intvl, "i", int, 0644,
- "seconds between probes");
+module_param(keepalive_intvl, int, 0644);
+MODULE_PARM_DESC(keepalive_intvl, "seconds between probes");
static int enable_csum = 0;
-CFS_MODULE_PARM(enable_csum, "i", int, 0644,
- "enable check sum");
+module_param(enable_csum, int, 0644);
+MODULE_PARM_DESC(enable_csum, "enable check sum");
static int inject_csum_error = 0;
-CFS_MODULE_PARM(inject_csum_error, "i", int, 0644,
- "set non-zero to inject a checksum error");
+module_param(inject_csum_error, int, 0644);
+MODULE_PARM_DESC(inject_csum_error, "set non-zero to inject a checksum error");
static int nonblk_zcack = 1;
-CFS_MODULE_PARM(nonblk_zcack, "i", int, 0644,
- "always send ZC-ACK on non-blocking connection");
+module_param(nonblk_zcack, int, 0644);
+MODULE_PARM_DESC(nonblk_zcack, "always send ZC-ACK on non-blocking connection");
static unsigned int zc_min_payload = (16 << 10);
-CFS_MODULE_PARM(zc_min_payload, "i", int, 0644,
- "minimum payload size to zero copy");
+module_param(zc_min_payload, int, 0644);
+MODULE_PARM_DESC(zc_min_payload, "minimum payload size to zero copy");
static unsigned int zc_recv = 0;
-CFS_MODULE_PARM(zc_recv, "i", int, 0644,
- "enable ZC recv for Chelsio driver");
+module_param(zc_recv, int, 0644);
+MODULE_PARM_DESC(zc_recv, "enable ZC recv for Chelsio driver");
static unsigned int zc_recv_min_nfrags = 16;
-CFS_MODULE_PARM(zc_recv_min_nfrags, "i", int, 0644,
- "minimum # of fragments to enable ZC recv");
+module_param(zc_recv_min_nfrags, int, 0644);
+MODULE_PARM_DESC(zc_recv_min_nfrags, "minimum # of fragments to enable ZC recv");
#if SOCKNAL_VERSION_DEBUG
static int protocol = 3;
-CFS_MODULE_PARM(protocol, "i", int, 0644,
- "protocol version");
+module_param(protocol, int, 0644);
+MODULE_PARM_DESC(protocol, "protocol version");
#endif
ksock_tunables_t ksocknal_tunables;
@@ -181,18 +181,8 @@ int ksocknal_tunables_init(void)
ksocknal_tunables.ksnd_protocol = &protocol;
#endif
-#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
- ksocknal_tunables.ksnd_sysctl = NULL;
-#endif
-
if (*ksocknal_tunables.ksnd_zc_min_payload < (2 << 10))
*ksocknal_tunables.ksnd_zc_min_payload = (2 << 10);
- /* initialize platform-sepcific tunables */
- return ksocknal_lib_tunables_init();
+ return 0;
};
-
-void ksocknal_tunables_fini(void)
-{
- ksocknal_lib_tunables_fini();
-}
diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
index 92c60a756644..cb2ecd717714 100644
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ b/drivers/staging/lustre/lnet/lnet/acceptor.c
@@ -64,14 +64,14 @@ lnet_accept_magic(__u32 magic, __u32 constant)
static char *accept = "secure";
-CFS_MODULE_PARM(accept, "s", charp, 0444,
- "Accept connections (secure|all|none)");
-CFS_MODULE_PARM(accept_port, "i", int, 0444,
- "Acceptor's port (same on all nodes)");
-CFS_MODULE_PARM(accept_backlog, "i", int, 0444,
- "Acceptor's listen backlog");
-CFS_MODULE_PARM(accept_timeout, "i", int, 0644,
- "Acceptor's timeout (seconds)");
+module_param(accept, charp, 0444);
+MODULE_PARM_DESC(accept, "Accept connections (secure|all|none)");
+module_param(accept_port, int, 0444);
+MODULE_PARM_DESC(accept_port, "Acceptor's port (same on all nodes)");
+module_param(accept_backlog, int, 0444);
+MODULE_PARM_DESC(accept_backlog, "Acceptor's listen backlog");
+module_param(accept_timeout, int, 0644);
+MODULE_PARM_DESC(accept_timeout, "Acceptor's timeout (seconds)");
static char *accept_type;
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 160a4292c6ce..c562ff3e9283 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -45,20 +45,20 @@ EXPORT_SYMBOL(the_lnet);
static char *ip2nets = "";
-CFS_MODULE_PARM(ip2nets, "s", charp, 0444,
- "LNET network <- IP table");
+module_param(ip2nets, charp, 0444);
+MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
static char *networks = "";
-CFS_MODULE_PARM(networks, "s", charp, 0444,
- "local networks");
+module_param(networks, charp, 0444);
+MODULE_PARM_DESC(networks, "local networks");
static char *routes = "";
-CFS_MODULE_PARM(routes, "s", charp, 0444,
- "routes to non-local networks");
+module_param(routes, charp, 0444);
+MODULE_PARM_DESC(routes, "routes to non-local networks");
static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
-CFS_MODULE_PARM(rnet_htable_size, "i", int, 0444,
- "size of remote network hash table");
+module_param(rnet_htable_size, int, 0444);
+MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
char *
lnet_get_routes(void)
@@ -1436,7 +1436,7 @@ LNetCtl(unsigned int cmd, void *arg)
case IOC_LIBCFS_ADD_ROUTE:
rc = lnet_add_route(data->ioc_net, data->ioc_count,
- data->ioc_nid);
+ data->ioc_nid, data->ioc_priority);
return (rc != 0) ? rc : lnet_check_routes();
case IOC_LIBCFS_DEL_ROUTE:
@@ -1445,7 +1445,8 @@ LNetCtl(unsigned int cmd, void *arg)
case IOC_LIBCFS_GET_ROUTE:
return lnet_get_route(data->ioc_count,
&data->ioc_net, &data->ioc_count,
- &data->ioc_nid, &data->ioc_flags);
+ &data->ioc_nid, &data->ioc_flags,
+ &data->ioc_priority);
case IOC_LIBCFS_NOTIFY_ROUTER:
return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
cfs_time_current() -
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index de323f779db8..6a07b0a65d12 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -603,6 +603,37 @@ lnet_parse_hops(char *str, unsigned int *hops)
*hops > 0 && *hops < 256);
}
+#define LNET_PRIORITY_SEPARATOR (':')
+
+int
+lnet_parse_priority(char *str, unsigned int *priority, char **token)
+{
+ int nob;
+ char *sep;
+ int len;
+
+ sep = strchr(str, LNET_PRIORITY_SEPARATOR);
+ if (sep == NULL) {
+ *priority = 0;
+ return 0;
+ }
+ len = strlen(sep + 1);
+
+ if ((sscanf((sep+1), "%u%n", priority, &nob) < 1) || (len != nob)) {
+ /* Update the caller's token pointer so it treats the found
+ priority as the token to report in the error message. */
+ *token += sep - str + 1;
+ return -1;
+ }
+
+ CDEBUG(D_NET, "gateway %s, priority %d, nob %d\n", str, *priority, nob);
+
+ /*
+ * Change priority separator to \0 to be able to parse NID
+ */
+ *sep = '\0';
+ return 0;
+}
int
lnet_parse_route(char *str, int *im_a_router)
@@ -624,6 +655,7 @@ lnet_parse_route(char *str, int *im_a_router)
int myrc = -1;
unsigned int hops;
int got_hops = 0;
+ unsigned int priority = 0;
INIT_LIST_HEAD(&gateways);
INIT_LIST_HEAD(&nets);
@@ -691,6 +723,11 @@ lnet_parse_route(char *str, int *im_a_router)
LNET_NETTYP(net) == LOLND)
goto token_error;
} else {
+ rc = lnet_parse_priority(ltb->ltb_text,
+ &priority, &token);
+ if (rc < 0)
+ goto token_error;
+
nid = libcfs_str2nid(ltb->ltb_text);
if (nid == LNET_NID_ANY ||
LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
@@ -720,7 +757,7 @@ lnet_parse_route(char *str, int *im_a_router)
continue;
}
- rc = lnet_add_route(net, hops, nid);
+ rc = lnet_add_route(net, hops, nid, priority);
if (rc != 0) {
CERROR("Can't create route to %s via %s\n",
libcfs_net2str(net),
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index b6f8ad38628b..bbf43ae04ed0 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -43,8 +43,8 @@
#include <linux/lnet/lib-lnet.h>
static int local_nid_dist_zero = 1;
-CFS_MODULE_PARM(local_nid_dist_zero, "i", int, 0444,
- "Reserved");
+module_param(local_nid_dist_zero, int, 0444);
+MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
int
lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
@@ -1074,6 +1074,12 @@ lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
lnet_peer_t *p1 = r1->lr_gateway;
lnet_peer_t *p2 = r2->lr_gateway;
+ if (r1->lr_priority < r2->lr_priority)
+ return 1;
+
+ if (r1->lr_priority > r2->lr_priority)
+ return -1;
+
if (r1->lr_hops < r2->lr_hops)
return 1;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
index 61ae88be6f02..761f1e12f847 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-msg.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c
@@ -43,7 +43,7 @@
#include <linux/lnet/lib-lnet.h>
void
-lnet_build_unlink_event (lnet_libmd_t *md, lnet_event_t *ev)
+lnet_build_unlink_event(lnet_libmd_t *md, lnet_event_t *ev)
{
memset(ev, 0, sizeof(*ev));
@@ -362,7 +362,7 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
int rc;
int status = msg->msg_ev.status;
- LASSERT (msg->msg_onactivelist);
+ LASSERT(msg->msg_onactivelist);
if (status == 0 && msg->msg_ack) {
/* Only send an ACK if the PUT completed successfully */
@@ -432,7 +432,7 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
}
void
-lnet_finalize (lnet_ni_t *ni, lnet_msg_t *msg, int status)
+lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status)
{
struct lnet_msg_container *container;
int my_slot;
@@ -440,7 +440,7 @@ lnet_finalize (lnet_ni_t *ni, lnet_msg_t *msg, int status)
int rc;
int i;
- LASSERT (!in_interrupt ());
+ LASSERT(!in_interrupt());
if (msg == NULL)
return;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
index 9b9e7d3139b0..6fffd5e96f9c 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -40,8 +40,8 @@
/* NB: add /proc interfaces in upcoming patches */
int portal_rotor = LNET_PTL_ROTOR_HASH_RT;
-CFS_MODULE_PARM(portal_rotor, "i", int, 0644,
- "redirect PUTs to different cpu-partitions");
+module_param(portal_rotor, int, 0644);
+MODULE_PARM_DESC(portal_rotor, "redirect PUTs to different cpu-partitions");
static int
lnet_ptl_match_type(unsigned int index, lnet_process_id_t match_id,
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index 6db8774ff7b7..3bd42a485a32 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -38,8 +38,8 @@
#include <linux/lnet/lib-lnet.h>
static int config_on_load;
-CFS_MODULE_PARM(config_on_load, "i", int, 0444,
- "configure network at module load");
+module_param(config_on_load, int, 0444);
+MODULE_PARM_DESC(config_on_load, "configure network at module load");
static struct mutex lnet_config_mutex;
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index a326ce06bc76..d1ee44232eef 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -34,25 +34,25 @@
#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
static char *forwarding = "";
-CFS_MODULE_PARM(forwarding, "s", charp, 0444,
- "Explicitly enable/disable forwarding between networks");
+module_param(forwarding, charp, 0444);
+MODULE_PARM_DESC(forwarding, "Explicitly enable/disable forwarding between networks");
static int tiny_router_buffers;
-CFS_MODULE_PARM(tiny_router_buffers, "i", int, 0444,
- "# of 0 payload messages to buffer in the router");
+module_param(tiny_router_buffers, int, 0444);
+MODULE_PARM_DESC(tiny_router_buffers, "# of 0 payload messages to buffer in the router");
static int small_router_buffers;
-CFS_MODULE_PARM(small_router_buffers, "i", int, 0444,
- "# of small (1 page) messages to buffer in the router");
+module_param(small_router_buffers, int, 0444);
+MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer in the router");
static int large_router_buffers;
-CFS_MODULE_PARM(large_router_buffers, "i", int, 0444,
- "# of large messages to buffer in the router");
+module_param(large_router_buffers, int, 0444);
+MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router");
static int peer_buffer_credits = 0;
-CFS_MODULE_PARM(peer_buffer_credits, "i", int, 0444,
- "# router buffer credits per peer");
+module_param(peer_buffer_credits, int, 0444);
+MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer");
static int auto_down = 1;
-CFS_MODULE_PARM(auto_down, "i", int, 0444,
- "Automatically mark peers down on comms error");
+module_param(auto_down, int, 0444);
+MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error");
int
lnet_peer_buffer_credits(lnet_ni_t *ni)
@@ -81,24 +81,24 @@ lnet_peer_buffer_credits(lnet_ni_t *ni)
#endif
static int check_routers_before_use = 0;
-CFS_MODULE_PARM(check_routers_before_use, "i", int, 0444,
- "Assume routers are down and ping them before use");
+module_param(check_routers_before_use, int, 0444);
+MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use");
static int avoid_asym_router_failure = 1;
-CFS_MODULE_PARM(avoid_asym_router_failure, "i", int, 0644,
- "Avoid asymmetrical router failures (0 to disable)");
+module_param(avoid_asym_router_failure, int, 0644);
+MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)");
static int dead_router_check_interval = 60;
-CFS_MODULE_PARM(dead_router_check_interval, "i", int, 0644,
- "Seconds between dead router health checks (<= 0 to disable)");
+module_param(dead_router_check_interval, int, 0644);
+MODULE_PARM_DESC(dead_router_check_interval, "Seconds between dead router health checks (<= 0 to disable)");
static int live_router_check_interval = 60;
-CFS_MODULE_PARM(live_router_check_interval, "i", int, 0644,
- "Seconds between live router health checks (<= 0 to disable)");
+module_param(live_router_check_interval, int, 0644);
+MODULE_PARM_DESC(live_router_check_interval, "Seconds between live router health checks (<= 0 to disable)");
static int router_ping_timeout = 50;
-CFS_MODULE_PARM(router_ping_timeout, "i", int, 0644,
- "Seconds to wait for the reply to a router health query");
+module_param(router_ping_timeout, int, 0644);
+MODULE_PARM_DESC(router_ping_timeout, "Seconds to wait for the reply to a router health query");
int
lnet_peers_start_down(void)
@@ -301,7 +301,8 @@ lnet_add_route_to_rnet (lnet_remotenet_t *rnet, lnet_route_t *route)
}
int
-lnet_add_route (__u32 net, unsigned int hops, lnet_nid_t gateway)
+lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway,
+ unsigned int priority)
{
struct list_head *e;
lnet_remotenet_t *rnet;
@@ -311,8 +312,8 @@ lnet_add_route (__u32 net, unsigned int hops, lnet_nid_t gateway)
int add_route;
int rc;
- CDEBUG(D_NET, "Add route: net %s hops %u gw %s\n",
- libcfs_net2str(net), hops, libcfs_nid2str(gateway));
+ CDEBUG(D_NET, "Add route: net %s hops %u priority %u gw %s\n",
+ libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
if (gateway == LNET_NID_ANY ||
LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
@@ -342,6 +343,7 @@ lnet_add_route (__u32 net, unsigned int hops, lnet_nid_t gateway)
rnet->lrn_net = net;
route->lr_hops = hops;
route->lr_net = net;
+ route->lr_priority = priority;
lnet_net_lock(LNET_LOCK_EX);
@@ -552,7 +554,7 @@ lnet_destroy_routes (void)
int
lnet_get_route(int idx, __u32 *net, __u32 *hops,
- lnet_nid_t *gateway, __u32 *alive)
+ lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
{
struct list_head *e1;
struct list_head *e2;
@@ -574,10 +576,11 @@ lnet_get_route(int idx, __u32 *net, __u32 *hops,
lr_list);
if (idx-- == 0) {
- *net = rnet->lrn_net;
- *hops = route->lr_hops;
- *gateway = route->lr_gateway->lp_nid;
- *alive = route->lr_gateway->lp_alive;
+ *net = rnet->lrn_net;
+ *hops = route->lr_hops;
+ *priority = route->lr_priority;
+ *gateway = route->lr_gateway->lp_nid;
+ *alive = route->lr_gateway->lp_alive;
lnet_net_unlock(cpt);
return 0;
}
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index 5e47de36c184..20d53e08705e 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -174,8 +174,8 @@ int LL_PROC_PROTO(proc_lnet_routes)
the_lnet.ln_routing ? "enabled" : "disabled");
LASSERT(tmpstr + tmpsiz - s > 0);
- s += snprintf(s, tmpstr + tmpsiz - s, "%-8s %4s %7s %s\n",
- "net", "hops", "state", "router");
+ s += snprintf(s, tmpstr + tmpsiz - s, "%-8s %4s %8s %7s %s\n",
+ "net", "hops", "priority", "state", "router");
LASSERT(tmpstr + tmpsiz - s > 0);
lnet_net_lock(0);
@@ -229,14 +229,16 @@ int LL_PROC_PROTO(proc_lnet_routes)
}
if (route != NULL) {
- __u32 net = rnet->lrn_net;
- unsigned int hops = route->lr_hops;
- lnet_nid_t nid = route->lr_gateway->lp_nid;
- int alive = route->lr_gateway->lp_alive;
+ __u32 net = rnet->lrn_net;
+ unsigned int hops = route->lr_hops;
+ unsigned int priority = route->lr_priority;
+ lnet_nid_t nid = route->lr_gateway->lp_nid;
+ int alive = route->lr_gateway->lp_alive;
s += snprintf(s, tmpstr + tmpsiz - s,
- "%-8s %4u %7s %s\n",
+ "%-8s %4u %8u %7s %s\n",
libcfs_net2str(net), hops,
+ priority,
alive ? "up" : "down",
libcfs_nid2str(nid));
LASSERT(tmpstr + tmpsiz - s > 0);
@@ -855,55 +857,46 @@ static ctl_table_t lnet_table[] = {
* to go via /proc for portability.
*/
{
- INIT_CTL_NAME(PSDEV_LNET_STATS)
.procname = "stats",
.mode = 0644,
.proc_handler = &proc_lnet_stats,
},
{
- INIT_CTL_NAME(PSDEV_LNET_ROUTES)
.procname = "routes",
.mode = 0444,
.proc_handler = &proc_lnet_routes,
},
{
- INIT_CTL_NAME(PSDEV_LNET_ROUTERS)
.procname = "routers",
.mode = 0444,
.proc_handler = &proc_lnet_routers,
},
{
- INIT_CTL_NAME(PSDEV_LNET_PEERS)
.procname = "peers",
.mode = 0444,
.proc_handler = &proc_lnet_peers,
},
{
- INIT_CTL_NAME(PSDEV_LNET_PEERS)
.procname = "buffers",
.mode = 0444,
.proc_handler = &proc_lnet_buffers,
},
{
- INIT_CTL_NAME(PSDEV_LNET_NIS)
.procname = "nis",
.mode = 0444,
.proc_handler = &proc_lnet_nis,
},
{
- INIT_CTL_NAME(PSDEV_LNET_PTL_ROTOR)
.procname = "portal_rotor",
.mode = 0644,
.proc_handler = &proc_lnet_portal_rotor,
},
{
- INIT_CTL_NAME(0)
}
};
static ctl_table_t top_table[] = {
{
- INIT_CTL_NAME(CTL_LNET)
.procname = "lnet",
.mode = 0555,
.data = NULL,
@@ -911,28 +904,23 @@ static ctl_table_t top_table[] = {
.child = lnet_table,
},
{
- INIT_CTL_NAME(0)
}
};
void
lnet_proc_init(void)
{
-#ifdef CONFIG_SYSCTL
if (lnet_table_header == NULL)
lnet_table_header = register_sysctl_table(top_table);
-#endif
}
void
lnet_proc_fini(void)
{
-#ifdef CONFIG_SYSCTL
if (lnet_table_header != NULL)
unregister_sysctl_table(lnet_table_header);
lnet_table_header = NULL;
-#endif
}
#else
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index b7613c828e76..3f8020cb93e6 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -41,11 +41,12 @@
#include "selftest.h"
static int brw_srv_workitems = SFW_TEST_WI_MAX;
-CFS_MODULE_PARM(brw_srv_workitems, "i", int, 0644, "# BRW server workitems");
+module_param(brw_srv_workitems, int, 0644);
+MODULE_PARM_DESC(brw_srv_workitems, "# BRW server workitems");
static int brw_inject_errors;
-CFS_MODULE_PARM(brw_inject_errors, "i", int, 0644,
- "# data errors to inject randomly, zero by default");
+module_param(brw_inject_errors, int, 0644);
+MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default");
static void
brw_client_fini(sfw_test_instance_t *tsi)
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index bce3d3bde6b2..68e1a171209c 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -96,11 +96,11 @@ lst_session_info_ioctl(lstio_session_info_args_t *args)
{
/* no checking of key */
- if (args->lstio_ses_idp == NULL || /* address for ouput sid */
- args->lstio_ses_keyp == NULL || /* address for ouput key */
- args->lstio_ses_featp == NULL || /* address for ouput features */
+ if (args->lstio_ses_idp == NULL || /* address for output sid */
+ args->lstio_ses_keyp == NULL || /* address for output key */
+ args->lstio_ses_featp == NULL || /* address for output features */
args->lstio_ses_ndinfo == NULL || /* address for output ndinfo */
- args->lstio_ses_namep == NULL || /* address for ouput name */
+ args->lstio_ses_namep == NULL || /* address for output name */
args->lstio_ses_nmlen <= 0 ||
args->lstio_ses_nmlen > LST_NAME_SIZE)
return -EINVAL;
@@ -723,12 +723,12 @@ lst_stat_query_ioctl(lstio_stat_args_t *args)
int lst_test_add_ioctl(lstio_test_args_t *args)
{
- char *name;
- char *srcgrp = NULL;
- char *dstgrp = NULL;
- void *param = NULL;
- int ret = 0;
- int rc = -ENOMEM;
+ char *batch_name;
+ char *src_name = NULL;
+ char *dst_name = NULL;
+ void *param = NULL;
+ int ret = 0;
+ int rc = -ENOMEM;
if (args->lstio_tes_resultp == NULL ||
args->lstio_tes_retp == NULL ||
@@ -755,16 +755,16 @@ int lst_test_add_ioctl(lstio_test_args_t *args)
args->lstio_tes_param_len > PAGE_CACHE_SIZE - sizeof(lstcon_test_t)))
return -EINVAL;
- LIBCFS_ALLOC(name, args->lstio_tes_bat_nmlen + 1);
- if (name == NULL)
+ LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
+ if (batch_name == NULL)
return rc;
- LIBCFS_ALLOC(srcgrp, args->lstio_tes_sgrp_nmlen + 1);
- if (srcgrp == NULL)
+ LIBCFS_ALLOC(src_name, args->lstio_tes_sgrp_nmlen + 1);
+ if (src_name == NULL)
goto out;
- LIBCFS_ALLOC(dstgrp, args->lstio_tes_dgrp_nmlen + 1);
- if (dstgrp == NULL)
+ LIBCFS_ALLOC(dst_name, args->lstio_tes_dgrp_nmlen + 1);
+ if (dst_name == NULL)
goto out;
if (args->lstio_tes_param != NULL) {
@@ -774,39 +774,37 @@ int lst_test_add_ioctl(lstio_test_args_t *args)
}
rc = -EFAULT;
- if (copy_from_user(name,
- args->lstio_tes_bat_name,
- args->lstio_tes_bat_nmlen) ||
- copy_from_user(srcgrp,
- args->lstio_tes_sgrp_name,
- args->lstio_tes_sgrp_nmlen) ||
- copy_from_user(dstgrp,
- args->lstio_tes_dgrp_name,
- args->lstio_tes_dgrp_nmlen) ||
+ if (copy_from_user(batch_name, args->lstio_tes_bat_name,
+ args->lstio_tes_bat_nmlen) ||
+ copy_from_user(src_name, args->lstio_tes_sgrp_name,
+ args->lstio_tes_sgrp_nmlen) ||
+ copy_from_user(dst_name, args->lstio_tes_dgrp_name,
+ args->lstio_tes_dgrp_nmlen) ||
copy_from_user(param, args->lstio_tes_param,
args->lstio_tes_param_len))
goto out;
- rc = lstcon_test_add(name,
+ rc = lstcon_test_add(batch_name,
args->lstio_tes_type,
args->lstio_tes_loop,
args->lstio_tes_concur,
args->lstio_tes_dist, args->lstio_tes_span,
- srcgrp, dstgrp, param, args->lstio_tes_param_len,
+ src_name, dst_name, param,
+ args->lstio_tes_param_len,
&ret, args->lstio_tes_resultp);
if (ret != 0)
rc = (copy_to_user(args->lstio_tes_retp, &ret,
sizeof(ret))) ? -EFAULT : 0;
out:
- if (name != NULL)
- LIBCFS_FREE(name, args->lstio_tes_bat_nmlen + 1);
+ if (batch_name != NULL)
+ LIBCFS_FREE(batch_name, args->lstio_tes_bat_nmlen + 1);
- if (srcgrp != NULL)
- LIBCFS_FREE(srcgrp, args->lstio_tes_sgrp_nmlen + 1);
+ if (src_name != NULL)
+ LIBCFS_FREE(src_name, args->lstio_tes_sgrp_nmlen + 1);
- if (dstgrp != NULL)
- LIBCFS_FREE(dstgrp, args->lstio_tes_dgrp_nmlen + 1);
+ if (dst_name != NULL)
+ LIBCFS_FREE(dst_name, args->lstio_tes_dgrp_nmlen + 1);
if (param != NULL)
LIBCFS_FREE(param, args->lstio_tes_param_len);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 9a52f25b72e9..53d58924737b 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -311,7 +311,7 @@ lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error)
sfw_abort_rpc(rpc);
- if (error != ETIMEDOUT)
+ if (error != ETIMEDOUT)
continue;
nd = crpc->crp_node;
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index f1152e4fbcc4..2a8eddc7db52 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -265,7 +265,7 @@ lstcon_group_decref(lstcon_group_t *grp)
}
static int
-lstcon_group_find(char *name, lstcon_group_t **grpp)
+lstcon_group_find(const char *name, lstcon_group_t **grpp)
{
lstcon_group_t *grp;
@@ -614,7 +614,7 @@ lstcon_group_del(char *name)
lstcon_group_put(grp);
/* -ref for session, it's destroyed,
- * status can't be rolled back, destroy group anway */
+ * status can't be rolled back, destroy group anyway */
lstcon_group_put(grp);
return rc;
@@ -831,7 +831,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p,
}
int
-lstcon_batch_find(char *name, lstcon_batch_t **batpp)
+lstcon_batch_find(const char *name, lstcon_batch_t **batpp)
{
lstcon_batch_t *bat;
@@ -1237,41 +1237,77 @@ again:
goto again;
}
-int
-lstcon_test_add(char *name, int type, int loop, int concur,
- int dist, int span, char *src_name, char * dst_name,
- void *param, int paramlen, int *retp,
- struct list_head *result_up)
+static int
+lstcon_verify_batch(const char *name, lstcon_batch_t **batch)
{
- lstcon_group_t *src_grp = NULL;
- lstcon_group_t *dst_grp = NULL;
- lstcon_test_t *test = NULL;
- lstcon_batch_t *batch;
- int rc;
+ int rc;
- rc = lstcon_batch_find(name, &batch);
+ rc = lstcon_batch_find(name, batch);
if (rc != 0) {
CDEBUG(D_NET, "Can't find batch %s\n", name);
return rc;
}
- if (batch->bat_state != LST_BATCH_IDLE) {
+ if ((*batch)->bat_state != LST_BATCH_IDLE) {
CDEBUG(D_NET, "Can't change running batch %s\n", name);
- return rc;
+ return -EINVAL;
}
- rc = lstcon_group_find(src_name, &src_grp);
+ return 0;
+}
+
+static int
+lstcon_verify_group(const char *name, lstcon_group_t **grp)
+{
+ int rc;
+ lstcon_ndlink_t *ndl;
+
+ rc = lstcon_group_find(name, grp);
if (rc != 0) {
- CDEBUG(D_NET, "Can't find group %s\n", src_name);
- goto out;
+ CDEBUG(D_NET, "can't find group %s\n", name);
+ return rc;
}
- rc = lstcon_group_find(dst_name, &dst_grp);
- if (rc != 0) {
- CDEBUG(D_NET, "Can't find group %s\n", dst_name);
- goto out;
+ list_for_each_entry(ndl, &(*grp)->grp_ndl_list, ndl_link) {
+ if (ndl->ndl_node->nd_state == LST_NODE_ACTIVE)
+ return 0;
}
+ CDEBUG(D_NET, "Group %s has no ACTIVE nodes\n", name);
+
+ return -EINVAL;
+}
+
+int
+lstcon_test_add(char *batch_name, int type, int loop,
+ int concur, int dist, int span,
+ char *src_name, char *dst_name,
+ void *param, int paramlen, int *retp,
+ struct list_head *result_up)
+{
+ lstcon_test_t *test = NULL;
+ int rc;
+ lstcon_group_t *src_grp = NULL;
+ lstcon_group_t *dst_grp = NULL;
+ lstcon_batch_t *batch = NULL;
+
+ /*
+ * verify that a batch of the given name exists, and the groups
+ * that will be part of the batch exist and have at least one
+ * active node
+ */
+ rc = lstcon_verify_batch(batch_name, &batch);
+ if (rc != 0)
+ goto out;
+
+ rc = lstcon_verify_group(src_name, &src_grp);
+ if (rc != 0)
+ goto out;
+
+ rc = lstcon_verify_group(dst_name, &dst_grp);
+ if (rc != 0)
+ goto out;
+
if (dst_grp->grp_userland)
*retp = 1;
@@ -1284,18 +1320,18 @@ lstcon_test_add(char *name, int type, int loop, int concur,
}
memset(test, 0, offsetof(lstcon_test_t, tes_param[paramlen]));
- test->tes_hdr.tsb_id = batch->bat_hdr.tsb_id;
- test->tes_batch = batch;
- test->tes_type = type;
- test->tes_oneside = 0; /* TODO */
- test->tes_loop = loop;
+ test->tes_hdr.tsb_id = batch->bat_hdr.tsb_id;
+ test->tes_batch = batch;
+ test->tes_type = type;
+ test->tes_oneside = 0; /* TODO */
+ test->tes_loop = loop;
test->tes_concur = concur;
- test->tes_stop_onerr = 1; /* TODO */
- test->tes_span = span;
- test->tes_dist = dist;
+ test->tes_stop_onerr = 1; /* TODO */
+ test->tes_span = span;
+ test->tes_dist = dist;
test->tes_cliidx = 0; /* just used for creating RPC */
- test->tes_src_grp = src_grp;
- test->tes_dst_grp = dst_grp;
+ test->tes_src_grp = src_grp;
+ test->tes_dst_grp = dst_grp;
INIT_LIST_HEAD(&test->tes_trans_list);
if (param != NULL) {
@@ -1310,7 +1346,8 @@ lstcon_test_add(char *name, int type, int loop, int concur,
if (lstcon_trans_stat()->trs_rpc_errno != 0 ||
lstcon_trans_stat()->trs_fwk_errno != 0)
- CDEBUG(D_NET, "Failed to add test %d to batch %s\n", type, name);
+ CDEBUG(D_NET, "Failed to add test %d to batch %s\n", type,
+ batch_name);
/* add to test list anyway, so user can check what's going on */
list_add_tail(&test->tes_link, &batch->bat_test_list);
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index e61b26687dbb..393dc0f64109 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -100,7 +100,7 @@ typedef struct {
struct list_head *bat_cli_hash; /* hash table of client nodes */
struct list_head bat_srv_list; /* list head of server nodes */
struct list_head *bat_srv_hash; /* hash table of server nodes */
-} lstcon_batch_t; /*** (tests ) batch descritptor */
+} lstcon_batch_t; /*** (tests ) batch descriptor */
typedef struct lstcon_test {
lstcon_tsb_hdr_t tes_hdr; /* test batch header */
@@ -224,9 +224,9 @@ extern int lstcon_group_stat(char *grp_name, int timeout,
struct list_head *result_up);
extern int lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
int timeout, struct list_head *result_up);
-extern int lstcon_test_add(char *name, int type, int loop, int concur,
- int dist, int span, char *src_name, char * dst_name,
+extern int lstcon_test_add(char *batch_name, int type, int loop,
+ int concur, int dist, int span,
+ char *src_name, char *dst_name,
void *param, int paramlen, int *retp,
struct list_head *result_up);
-
#endif
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index 483c78564dae..050723a0243a 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -46,12 +46,12 @@
lst_sid_t LST_INVALID_SID = {LNET_NID_ANY, -1};
static int session_timeout = 100;
-CFS_MODULE_PARM(session_timeout, "i", int, 0444,
- "test session timeout in seconds (100 by default, 0 == never)");
+module_param(session_timeout, int, 0444);
+MODULE_PARM_DESC(session_timeout, "test session timeout in seconds (100 by default, 0 == never)");
static int rpc_timeout = 64;
-CFS_MODULE_PARM(rpc_timeout, "i", int, 0644,
- "rpc timeout in seconds (64 by default, 0 == never)");
+module_param(rpc_timeout, int, 0644);
+MODULE_PARM_DESC(rpc_timeout, "rpc timeout in seconds (64 by default, 0 == never)");
#define sfw_unpack_id(id) \
do { \
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index f0f919482b56..750cac4afbb2 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -45,7 +45,8 @@
#define LST_PING_TEST_MAGIC 0xbabeface
int ping_srv_workitems = SFW_TEST_WI_MAX;
-CFS_MODULE_PARM(ping_srv_workitems, "i", int, 0644, "# PING server workitems");
+module_param(ping_srv_workitems, int, 0644);
+MODULE_PARM_DESC(ping_srv_workitems, "# PING server workitems");
typedef struct {
spinlock_t pnd_lock; /* serialize */
@@ -189,7 +190,7 @@ ping_server_handle(struct srpc_server_rpc *rpc)
LASSERT (reqstmsg->msg_type == srpc_service2request(sv->sv_id));
if (req->pnr_magic != LST_PING_TEST_MAGIC) {
- CERROR ("Unexpect magic %08x from %s\n",
+ CERROR ("Unexpected magic %08x from %s\n",
req->pnr_magic, libcfs_id2str(rpc->srpc_peer));
return -EINVAL;
}
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 7659a26676bb..d838985f51cb 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -124,7 +124,6 @@ srpc_bulk_t *
srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
{
srpc_bulk_t *bk;
- struct page **pages;
int i;
LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
@@ -140,7 +139,6 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
bk->bk_sink = sink;
bk->bk_len = bulk_len;
bk->bk_niov = bulk_npg;
- UNUSED(pages);
for (i = 0; i < bulk_npg; i++) {
struct page *pg;
@@ -718,7 +716,7 @@ srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
if (scd->scd_buf_adjust < 0 &&
scd->scd_buf_total == 0 && scd->scd_buf_posting == 0) {
CDEBUG(D_INFO,
- "Try to recyle %d buffers but nothing left\n",
+ "Try to recycle %d buffers but nothing left\n",
scd->scd_buf_adjust);
scd->scd_buf_adjust = 0;
}
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 8053b0563ff3..228927e0f962 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -350,7 +350,7 @@ typedef struct {
} sfw_batch_t;
typedef struct {
- int (*tso_init)(struct sfw_test_instance *tsi); /* intialize test client */
+ int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test client */
void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test client */
int (*tso_prep_rpc)(struct sfw_test_unit *tsu,
lnet_process_id_t dest,
@@ -572,9 +572,6 @@ swi_state2str (int state)
#undef STATE2STR
}
-#define UNUSED(x) ( (void)(x) )
-
-
#define selftest_wait_events() cfs_pause(cfs_time_seconds(1) / 10)
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index 82fd363679cb..b8e50ef0bb4e 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -171,19 +171,14 @@ stt_check_timers(cfs_time_t *last)
int
stt_timer_main(void *arg)
{
- int rc = 0;
- UNUSED(arg);
-
- SET_BUT_UNUSED(rc);
-
cfs_block_allsigs();
while (!stt_data.stt_shuttingdown) {
stt_check_timers(&stt_data.stt_prev_slot);
- rc = wait_event_timeout(stt_data.stt_waitq,
- stt_data.stt_shuttingdown,
- cfs_time_seconds(STTIMER_SLOTTIME));
+ wait_event_timeout(stt_data.stt_waitq,
+ stt_data.stt_shuttingdown,
+ cfs_time_seconds(STTIMER_SLOTTIME));
}
spin_lock(&stt_data.stt_lock);
diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
index 93d59b6a60da..209e4c7e6f8a 100644
--- a/drivers/staging/lustre/lustre/Kconfig
+++ b/drivers/staging/lustre/lustre/Kconfig
@@ -55,6 +55,6 @@ config LUSTRE_TRANSLATE_ERRNOS
default y
config LUSTRE_LLITE_LLOOP
- bool "Lustre virtual block device"
+ tristate "Lustre virtual block device"
depends on LUSTRE_FS && BLOCK
default m
diff --git a/drivers/staging/lustre/lustre/fid/Makefile b/drivers/staging/lustre/lustre/fid/Makefile
index ed21bea162ba..d24f2df7c0af 100644
--- a/drivers/staging/lustre/lustre/fid/Makefile
+++ b/drivers/staging/lustre/lustre/fid/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_LUSTRE_FS) += fid.o
-fid-y := fid_request.o lproc_fid.o fid_lib.o
+fid-y := fid_request.o fid_lib.o
+fid-$(CONFIG_PROC_FS) += lproc_fid.o
ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c
index 294070da9d43..ddd813cab501 100644
--- a/drivers/staging/lustre/lustre/fid/lproc_fid.c
+++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c
@@ -54,7 +54,6 @@
#include <lustre_fid.h>
#include "fid_internal.h"
-#ifdef LPROCFS
/*
* Note: this function is only used for testing, it is no safe for production
* use.
@@ -209,4 +208,3 @@ struct lprocfs_vars seq_client_proc_list[] = {
{ "fid", &lprocfs_fid_fid_fops },
{ NULL }
};
-#endif
diff --git a/drivers/staging/lustre/lustre/fld/Makefile b/drivers/staging/lustre/lustre/fld/Makefile
index 90d46d84fbbb..640fba4b827d 100644
--- a/drivers/staging/lustre/lustre/fld/Makefile
+++ b/drivers/staging/lustre/lustre/fld/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_LUSTRE_FS) += fld.o
-fld-y := fld_request.o fld_cache.o lproc_fld.o
+fld-y := fld_request.o fld_cache.o
+fld-$(CONFIG_PROC_FS) += lproc_fld.o
ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 45315101848c..6c379301df82 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -307,7 +307,7 @@ static void fld_cache_overlap_handle(struct fld_cache *cache,
const mdsno_t mdt = range->lsr_index;
/* this is overlap case, these case are checking overlapping with
- * prev range only. fixup will handle overlaping with next range. */
+ * prev range only. fixup will handle overlapping with next range. */
if (f_curr->fce_range.lsr_index == mdt) {
f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start,
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index 56686b138ac1..5f3935cc0fd7 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -190,5 +190,4 @@ fld_target_name(struct lu_fld_target *tar)
return (const char *)tar->ft_exp->exp_obd->obd_name;
}
-extern struct proc_dir_entry *fld_type_proc_dir;
#endif /* __FLD_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index e47fd50b2a2e..896f9fe83ffd 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -274,9 +274,9 @@ int fld_client_del_target(struct lu_client_fld *fld, __u64 idx)
}
EXPORT_SYMBOL(fld_client_del_target);
-#ifdef LPROCFS
struct proc_dir_entry *fld_type_proc_dir = NULL;
+#ifdef LPROCFS
static int fld_client_proc_init(struct lu_client_fld *fld)
{
int rc;
@@ -504,10 +504,7 @@ static int __init fld_mod_init(void)
fld_type_proc_dir = lprocfs_register(LUSTRE_FLD_NAME,
proc_lustre_root,
NULL, NULL);
- if (IS_ERR(fld_type_proc_dir))
- return PTR_ERR(fld_type_proc_dir);
-
- return 0;
+ return PTR_ERR_OR_ZERO(fld_type_proc_dir);
}
static void __exit fld_mod_exit(void)
diff --git a/drivers/staging/lustre/lustre/fld/lproc_fld.c b/drivers/staging/lustre/lustre/fld/lproc_fld.c
index 052f7d51a07c..530adde46963 100644
--- a/drivers/staging/lustre/lustre/fld/lproc_fld.c
+++ b/drivers/staging/lustre/lustre/fld/lproc_fld.c
@@ -56,7 +56,6 @@
#include <lustre_fid.h>
#include "fld_internal.h"
-#ifdef LPROCFS
static int
fld_proc_targets_seq_show(struct seq_file *m, void *unused)
{
@@ -162,5 +161,3 @@ struct lprocfs_vars fld_client_proc_list[] = {
{ "hash", &fld_proc_hash_fops },
{ "cache_flush", &fld_proc_cache_flush_fops },
{ NULL }};
-
-#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index c485206fc6c2..4d692dcd96cf 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -2388,7 +2388,11 @@ struct cl_io {
* Right now, only two opertaions need to verify layout: glimpse
* and setattr.
*/
- ci_verify_layout:1;
+ ci_verify_layout:1,
+ /**
+ * file is released, restore has to to be triggered by vvp layer
+ */
+ ci_restore_needed:1;
/**
* Number of pages owned by this IO. For invariant checking.
*/
diff --git a/drivers/staging/lustre/lustre/include/dt_object.h b/drivers/staging/lustre/lustre/include/dt_object.h
index e116bb21b529..9304c269afa9 100644
--- a/drivers/staging/lustre/lustre/include/dt_object.h
+++ b/drivers/staging/lustre/lustre/include/dt_object.h
@@ -692,7 +692,7 @@ struct local_oid_storage {
struct dt_object *los_obj;
/* data used to generate new fids */
- struct mutex los_id_lock;
+ struct mutex los_id_lock;
__u64 los_seq;
__u32 los_last_oid;
};
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_acl.h b/drivers/staging/lustre/lustre/include/linux/lustre_acl.h
index ff4fc4ff2894..778b123ce31e 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_acl.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_acl.h
@@ -47,17 +47,17 @@
#error Shoud not include direectly. use #include <lustre_acl.h> instead
#endif
-# include <linux/fs.h>
-# include <linux/dcache.h>
-# ifdef CONFIG_FS_POSIX_ACL
-# include <linux/posix_acl_xattr.h>
-# define LUSTRE_POSIX_ACL_MAX_ENTRIES 32
-# define LUSTRE_POSIX_ACL_MAX_SIZE \
+#include <linux/fs.h>
+#include <linux/dcache.h>
+
+#include <linux/posix_acl_xattr.h>
+#define LUSTRE_POSIX_ACL_MAX_ENTRIES 32
+#define LUSTRE_POSIX_ACL_MAX_SIZE \
(sizeof(posix_acl_xattr_header) + \
LUSTRE_POSIX_ACL_MAX_ENTRIES * sizeof(posix_acl_xattr_entry))
-# endif /* CONFIG_FS_POSIX_ACL */
-# include <linux/lustre_intent.h>
-# include <linux/xattr.h> /* XATTR_{REPLACE,CREATE} */
+
+#include <linux/lustre_intent.h>
+#include <linux/xattr.h> /* XATTR_{REPLACE,CREATE} */
#ifndef LUSTRE_POSIX_ACL_MAX_SIZE
# define LUSTRE_POSIX_ACL_MAX_SIZE 0
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_debug.h b/drivers/staging/lustre/lustre/include/linux/lustre_debug.h
deleted file mode 100644
index 11deac7248ae..000000000000
--- a/drivers/staging/lustre/lustre/include/linux/lustre_debug.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LINUX_LUSTRE_DEBUG_H
-#define _LINUX_LUSTRE_DEBUG_H
-
-#ifndef _LUSTRE_DEBUG_H
-#error Do not #include this file directly. #include <lprocfs_status.h> instead
-#endif
-
-#define LL_CDEBUG_PAGE(mask, page, fmt, arg...) \
- CDEBUG(mask, "page %p map %p index %lu flags %lx count %u priv %0lx: "\
- fmt, page, page->mapping, page->index, (long)page->flags, \
- page_count(page), page_private(page), ## arg)
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_intent.h b/drivers/staging/lustre/lustre/include/linux/lustre_intent.h
index b10ddfa7df29..c491d52d86a2 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_intent.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_intent.h
@@ -52,8 +52,8 @@ struct lustre_intent_data {
struct lookup_intent {
int it_op;
- int it_flags;
int it_create_mode;
+ __u64 it_flags;
union {
struct lustre_intent_data lustre;
} d;
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_lite.h b/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
index 9e5df8dabe80..df9391275617 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
@@ -88,6 +88,7 @@ enum {
LPROC_LL_ALLOC_INODE,
LPROC_LL_SETXATTR,
LPROC_LL_GETXATTR,
+ LPROC_LL_GETXATTR_HITS,
LPROC_LL_LISTXATTR,
LPROC_LL_REMOVEXATTR,
LPROC_LL_INODE_PERM,
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index 56b05728f611..428e3e4ce05c 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -370,6 +370,10 @@ static inline void s2dhms(struct dhms *ts, time_t secs)
#define JOBSTATS_DISABLE "disable"
#define JOBSTATS_PROCNAME_UID "procname_uid"
+extern int lprocfs_write_frac_helper(const char *buffer, unsigned long count,
+ int *val, int mult);
+extern int lprocfs_read_frac_helper(char *buffer, unsigned long count,
+ long val, int mult);
#ifdef LPROCFS
extern int lprocfs_stats_alloc_one(struct lprocfs_stats *stats,
@@ -641,11 +645,7 @@ extern int lprocfs_rd_filesfree(struct seq_file *m, void *data);
extern int lprocfs_write_helper(const char *buffer, unsigned long count,
int *val);
-extern int lprocfs_write_frac_helper(const char *buffer, unsigned long count,
- int *val, int mult);
extern int lprocfs_seq_read_frac_helper(struct seq_file *m, long val, int mult);
-extern int lprocfs_read_frac_helper(char *buffer, unsigned long count,
- long val, int mult);
extern int lprocfs_write_u64_helper(const char *buffer, unsigned long count,
__u64 *val);
extern int lprocfs_write_frac_u64_helper(const char *buffer,
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index d5b8225ef1a7..6773bca1e0d8 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -398,17 +398,6 @@ static inline int lu_device_is_md(const struct lu_device *d)
}
/**
- * Flags for the object layers.
- */
-enum lu_object_flags {
- /**
- * this flags is set if lu_object_operations::loo_object_init() has
- * been called for this layer. Used by lu_object_alloc().
- */
- LU_OBJECT_ALLOCATED = (1 << 0)
-};
-
-/**
* Common object attributes.
*/
struct lu_attr {
@@ -486,14 +475,6 @@ struct lu_object {
*/
struct list_head lo_linkage;
/**
- * Depth. Top level layer depth is 0.
- */
- int lo_depth;
- /**
- * Flags from enum lu_object_flags.
- */
- __u32 lo_flags;
- /**
* Link to the device, for debugging.
*/
struct lu_ref_link lo_dev_ref;
diff --git a/drivers/staging/lustre/lustre/include/lu_target.h b/drivers/staging/lustre/lustre/include/lu_target.h
deleted file mode 100644
index 8d48cf4e27ee..000000000000
--- a/drivers/staging/lustre/lustre/include/lu_target.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LUSTRE_LU_TARGET_H
-#define _LUSTRE_LU_TARGET_H
-
-#include <dt_object.h>
-#include <lustre_disk.h>
-
-struct lu_target {
- struct obd_device *lut_obd;
- struct dt_device *lut_bottom;
- /** last_rcvd file */
- struct dt_object *lut_last_rcvd;
- /* transaction callbacks */
- struct dt_txn_callback lut_txn_cb;
- /** server data in last_rcvd file */
- struct lr_server_data lut_lsd;
- /** Server last transaction number */
- __u64 lut_last_transno;
- /** Lock protecting last transaction number */
- spinlock_t lut_translock;
- /** Lock protecting client bitmap */
- spinlock_t lut_client_bitmap_lock;
- /** Bitmap of known clients */
- unsigned long *lut_client_bitmap;
-};
-
-typedef void (*tgt_cb_t)(struct lu_target *lut, __u64 transno,
- void *data, int err);
-struct tgt_commit_cb {
- tgt_cb_t tgt_cb_func;
- void *tgt_cb_data;
-};
-
-void tgt_boot_epoch_update(struct lu_target *lut);
-int tgt_last_commit_cb_add(struct thandle *th, struct lu_target *lut,
- struct obd_export *exp, __u64 transno);
-int tgt_new_client_cb_add(struct thandle *th, struct obd_export *exp);
-int tgt_init(const struct lu_env *env, struct lu_target *lut,
- struct obd_device *obd, struct dt_device *dt);
-void tgt_fini(const struct lu_env *env, struct lu_target *lut);
-int tgt_client_alloc(struct obd_export *exp);
-void tgt_client_free(struct obd_export *exp);
-int tgt_client_del(const struct lu_env *env, struct obd_export *exp);
-int tgt_client_add(const struct lu_env *env, struct obd_export *exp, int);
-int tgt_client_new(const struct lu_env *env, struct obd_export *exp);
-int tgt_client_data_read(const struct lu_env *env, struct lu_target *tg,
- struct lsd_client_data *lcd, loff_t *off, int index);
-int tgt_client_data_write(const struct lu_env *env, struct lu_target *tg,
- struct lsd_client_data *lcd, loff_t *off, struct thandle *th);
-int tgt_server_data_read(const struct lu_env *env, struct lu_target *tg);
-int tgt_server_data_write(const struct lu_env *env, struct lu_target *tg,
- struct thandle *th);
-int tgt_server_data_update(const struct lu_env *env, struct lu_target *tg, int sync);
-int tgt_truncate_last_rcvd(const struct lu_env *env, struct lu_target *tg, loff_t off);
-
-#endif /* __LUSTRE_LU_TARGET_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre/liblustreapi.h b/drivers/staging/lustre/lustre/include/lustre/liblustreapi.h
deleted file mode 100644
index 707eb74fdf68..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre/liblustreapi.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-/*
- * NOTE: This file is DEPRECATED! Please include lustreapi.h directly
- * instead of this file. This file will be removed from a future version
- * of lustre!
- */
-
-#ifndef _LIBLUSTREAPI_H_
-#define _LIBLUSTREAPI_H_
-
-#include <lustre/lustreapi.h>
-#warning "Including liblustreapi.h is deprecated. Include lustreapi.h directly."
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 5ca18d016014..5da31c54924a 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -321,8 +321,11 @@ static inline int range_compare_loc(const struct lu_seq_range *r1,
* xattr.
*/
enum lma_compat {
- LMAC_HSM = 0x00000001,
- LMAC_SOM = 0x00000002,
+ LMAC_HSM = 0x00000001,
+ LMAC_SOM = 0x00000002,
+ LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */
+ LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
+ * under /O/<seq>/d<x>. */
};
/**
@@ -331,10 +334,10 @@ enum lma_compat {
* This information is stored in lustre_mdt_attrs::lma_incompat.
*/
enum lma_incompat {
- LMAI_RELEASED = 0x0000001, /* file is released */
- LMAI_AGENT = 0x00000002, /* agent inode */
- LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
- is on the remote MDT */
+ LMAI_RELEASED = 0x00000001, /* file is released */
+ LMAI_AGENT = 0x00000002, /* agent inode */
+ LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
+ is on the remote MDT */
};
#define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT)
@@ -1025,6 +1028,18 @@ struct luda_type {
__u16 lt_type;
};
+#ifndef IFSHIFT
+#define IFSHIFT 12
+#endif
+
+#ifndef IFTODT
+#define IFTODT(type) (((type) & S_IFMT) >> IFSHIFT)
+#endif
+#ifndef DTTOIF
+#define DTTOIF(dirtype) ((dirtype) << IFSHIFT)
+#endif
+
+
struct lu_dirpage {
__u64 ldp_hash_start;
__u64 ldp_hash_end;
@@ -1353,7 +1368,7 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
OBD_CONNECT_EINPROGRESS | \
OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \
OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\
- OBD_CONNECT_PINGLESS)
+ OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE)
#define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
@@ -1725,10 +1740,7 @@ static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */
#define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */
#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */
-
-/* OBD_MD_MDTIDX is used to get MDT index, but it is never been used overwire,
- * and it is already obsolete since 2.3 */
-/* #define OBD_MD_MDTIDX (0x0000000800000000ULL) */
+#define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */
#define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */
#define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
@@ -1740,7 +1752,9 @@ static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
#define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */
#define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */
#define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
- * under lock */
+ * under lock; for xattr
+ * requests means the
+ * client holds the lock */
#define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
#define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
@@ -1749,6 +1763,7 @@ static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
#define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */
#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
+#define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */
#define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \
@@ -1756,6 +1771,9 @@ static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP)
+#define OBD_MD_FLXATTRLOCKED OBD_MD_FLGETATTRLOCK
+#define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
+
/* don't forget obdo_fid which is way down at the bottom so it can
* come after the definition of llog_cookie */
@@ -2120,6 +2138,7 @@ extern void lustre_swab_generic_32s (__u32 *val);
#define DISP_ENQ_OPEN_REF 0x00800000
#define DISP_ENQ_CREATE_REF 0x01000000
#define DISP_OPEN_LOCK 0x02000000
+#define DISP_OPEN_LEASE 0x04000000
/* INODE LOCK PARTS */
#define MDS_INODELOCK_LOOKUP 0x000001 /* dentry, mode, owner, group */
@@ -2127,8 +2146,9 @@ extern void lustre_swab_generic_32s (__u32 *val);
#define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
#define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
#define MDS_INODELOCK_PERM 0x000010 /* for permission */
+#define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */
-#define MDS_INODELOCK_MAXSHIFT 4
+#define MDS_INODELOCK_MAXSHIFT 5
/* This FULL lock is useful to take on unlink sort of operations */
#define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)
@@ -2207,6 +2227,11 @@ static inline int ll_inode_to_ext_flags(int iflags)
((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
}
+/* 64 possible states */
+enum md_transient_state {
+ MS_RESTORE = (1 << 0), /* restore is running */
+};
+
struct mdt_body {
struct lu_fid fid1;
struct lu_fid fid2;
@@ -2218,7 +2243,9 @@ struct mdt_body {
obd_time ctime;
__u64 blocks; /* XID, in the case of MDS_READPAGE */
__u64 ioepoch;
- __u64 unused1; /* was "ino" until 2.4.0 */
+ __u64 t_state; /* transient file state defined in
+ * enum md_transient_state
+ * was "ino" until 2.4.0 */
__u32 fsuid;
__u32 fsgid;
__u32 capability;
@@ -2373,6 +2400,11 @@ extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa);
* hsm restore) */
#define MDS_OPEN_VOLATILE 0400000000000ULL /* File is volatile = created
unlinked */
+#define MDS_OPEN_LEASE 01000000000000ULL /* Open the file and grant lease
+ * delegation, succeed if it's not
+ * being opened with conflict mode.
+ */
+#define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */
/* permission for create non-directory file */
#define MAY_CREATE (1 << 7)
@@ -2391,7 +2423,7 @@ extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa);
/* lfs rgetfacl permission check */
#define MAY_RGETFACL (1 << 14)
-enum {
+enum mds_op_bias {
MDS_CHECK_SPLIT = 1 << 0,
MDS_CROSS_REF = 1 << 1,
MDS_VTX_BYPASS = 1 << 2,
@@ -2404,6 +2436,7 @@ enum {
MDS_DATA_MODIFIED = 1 << 9,
MDS_CREATE_VOLATILE = 1 << 10,
MDS_OWNEROVERRIDE = 1 << 11,
+ MDS_HSM_RELEASE = 1 << 12,
};
/* instance of mdt_reint_rec */
@@ -3727,5 +3760,14 @@ struct mdc_swap_layouts {
void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
+struct close_data {
+ struct lustre_handle cd_handle;
+ struct lu_fid cd_fid;
+ __u64 cd_data_version;
+ __u64 cd_reserved[8];
+};
+
+void lustre_swab_close_data(struct close_data *data);
+
#endif
/** @} lustreidl */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index c7bd4473a1d0..7893d83e131f 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -244,6 +244,9 @@ struct ost_id {
#define LL_IOC_LMV_SETSTRIPE _IOWR('f', 240, struct lmv_user_md)
#define LL_IOC_LMV_GETSTRIPE _IOWR('f', 241, struct lmv_user_md)
#define LL_IOC_REMOVE_ENTRY _IOWR('f', 242, __u64)
+#define LL_IOC_SET_LEASE _IOWR('f', 243, long)
+#define LL_IOC_GET_LEASE _IO('f', 244)
+#define LL_IOC_HSM_IMPORT _IOWR('f', 245, struct hsm_user_import)
#define LL_STATFS_LMV 1
#define LL_STATFS_LOV 2
@@ -425,8 +428,8 @@ struct obd_uuid {
char uuid[UUID_MAX];
};
-static inline int obd_uuid_equals(const struct obd_uuid *u1,
- const struct obd_uuid *u2)
+static inline bool obd_uuid_equals(const struct obd_uuid *u1,
+ const struct obd_uuid *u2)
{
return strcmp((char *)u1->uuid, (char *)u2->uuid) == 0;
}
@@ -443,7 +446,7 @@ static inline void obd_str2uuid(struct obd_uuid *uuid, const char *tmp)
}
/* For printf's only, make sure uuid is terminated */
-static inline char *obd_uuid2str(struct obd_uuid *uuid)
+static inline char *obd_uuid2str(const struct obd_uuid *uuid)
{
if (uuid->uuid[sizeof(*uuid) - 1] != '\0') {
/* Obviously not safe, but for printfs, no real harm done...
@@ -620,10 +623,13 @@ struct if_quotactl {
};
/* swap layout flags */
-#define SWAP_LAYOUTS_CHECK_DV1 (1 << 0)
-#define SWAP_LAYOUTS_CHECK_DV2 (1 << 1)
-#define SWAP_LAYOUTS_KEEP_MTIME (1 << 2)
-#define SWAP_LAYOUTS_KEEP_ATIME (1 << 3)
+#define SWAP_LAYOUTS_CHECK_DV1 (1 << 0)
+#define SWAP_LAYOUTS_CHECK_DV2 (1 << 1)
+#define SWAP_LAYOUTS_KEEP_MTIME (1 << 2)
+#define SWAP_LAYOUTS_KEEP_ATIME (1 << 3)
+
+/* Swap XATTR_NAME_HSM as well, only on the MDT so far */
+#define SWAP_LAYOUTS_MDS_HSM (1 << 31)
struct lustre_swap_layouts {
__u64 sl_flags;
__u32 sl_fd;
@@ -754,7 +760,8 @@ static inline void hsm_set_cl_error(int *flags, int error)
*flags |= (error << CLF_HSM_ERR_L);
}
-#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + sizeof(struct changelog_rec))
+#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + \
+ sizeof(struct changelog_ext_rec))
struct changelog_rec {
__u16 cr_namelen;
@@ -1118,13 +1125,27 @@ static inline int hal_size(struct hsm_action_list *hal)
sz = sizeof(*hal) + cfs_size_round(strlen(hal->hal_fsname));
hai = hai_zero(hal);
- for (i = 0 ; i < hal->hal_count ; i++) {
+ for (i = 0; i < hal->hal_count; i++, hai = hai_next(hai))
sz += cfs_size_round(hai->hai_len);
- hai = hai_next(hai);
- }
- return(sz);
+
+ return sz;
}
+/* HSM file import
+ * describe the attributes to be set on imported file
+ */
+struct hsm_user_import {
+ __u64 hui_size;
+ __u64 hui_atime;
+ __u64 hui_mtime;
+ __u32 hui_atime_ns;
+ __u32 hui_mtime_ns;
+ __u32 hui_uid;
+ __u32 hui_gid;
+ __u32 hui_mode;
+ __u32 hui_archive_id;
+};
+
/* Copytool progress reporting */
#define HP_FLAG_COMPLETED 0x01
#define HP_FLAG_RETRY 0x02
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustreapi.h b/drivers/staging/lustre/lustre/include/lustre/lustreapi.h
deleted file mode 100644
index 63da66506639..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre/lustreapi.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LUSTREAPI_H_
-#define _LUSTREAPI_H_
-
-/** \defgroup llapi llapi
- *
- * @{
- */
-
-#include <lustre/lustre_user.h>
-
-typedef void (*llapi_cb_t)(char *obd_type_name, char *obd_name, char *obd_uuid, void *args);
-
-/* lustreapi message severity level */
-enum llapi_message_level {
- LLAPI_MSG_OFF = 0,
- LLAPI_MSG_FATAL = 1,
- LLAPI_MSG_ERROR = 2,
- LLAPI_MSG_WARN = 3,
- LLAPI_MSG_NORMAL = 4,
- LLAPI_MSG_INFO = 5,
- LLAPI_MSG_DEBUG = 6,
- LLAPI_MSG_MAX
-};
-
-/* the bottom three bits reserved for llapi_message_level */
-#define LLAPI_MSG_MASK 0x00000007
-#define LLAPI_MSG_NO_ERRNO 0x00000010
-
-extern void llapi_msg_set_level(int level);
-extern void llapi_error(int level, int rc, char *fmt, ...);
-#define llapi_err_noerrno(level, fmt, a...) \
- llapi_error((level) | LLAPI_MSG_NO_ERRNO, 0, fmt, ## a)
-extern void llapi_printf(int level, char *fmt, ...);
-extern int llapi_file_create(const char *name, unsigned long long stripe_size,
- int stripe_offset, int stripe_count,
- int stripe_pattern);
-extern int llapi_file_open(const char *name, int flags, int mode,
- unsigned long long stripe_size, int stripe_offset,
- int stripe_count, int stripe_pattern);
-extern int llapi_file_create_pool(const char *name,
- unsigned long long stripe_size,
- int stripe_offset, int stripe_count,
- int stripe_pattern, char *pool_name);
-extern int llapi_file_open_pool(const char *name, int flags, int mode,
- unsigned long long stripe_size,
- int stripe_offset, int stripe_count,
- int stripe_pattern, char *pool_name);
-extern int llapi_poollist(const char *name);
-extern int llapi_get_poollist(const char *name, char **poollist, int list_size,
- char *buffer, int buffer_size);
-extern int llapi_get_poolmembers(const char *poolname, char **members,
- int list_size, char *buffer, int buffer_size);
-extern int llapi_file_get_stripe(const char *path, struct lov_user_md *lum);
-#define HAVE_LLAPI_FILE_LOOKUP
-extern int llapi_file_lookup(int dirfd, const char *name);
-
-#define VERBOSE_COUNT 0x1
-#define VERBOSE_SIZE 0x2
-#define VERBOSE_OFFSET 0x4
-#define VERBOSE_POOL 0x8
-#define VERBOSE_DETAIL 0x10
-#define VERBOSE_OBJID 0x20
-#define VERBOSE_GENERATION 0x40
-#define VERBOSE_MDTINDEX 0x80
-#define VERBOSE_ALL (VERBOSE_COUNT | VERBOSE_SIZE | VERBOSE_OFFSET | \
- VERBOSE_POOL | VERBOSE_OBJID | VERBOSE_GENERATION)
-
-struct find_param {
- unsigned int maxdepth;
- time_t atime;
- time_t mtime;
- time_t ctime;
- int asign; /* cannot be bitfields due to using pointers to */
- int csign; /* access them during argument parsing. */
- int msign;
- int type;
- int size_sign:2, /* these need to be signed values */
- stripesize_sign:2,
- stripecount_sign:2;
- unsigned long long size;
- unsigned long long size_units;
- uid_t uid;
- gid_t gid;
-
- unsigned long zeroend:1,
- recursive:1,
- exclude_pattern:1,
- exclude_type:1,
- exclude_obd:1,
- exclude_mdt:1,
- exclude_gid:1,
- exclude_uid:1,
- check_gid:1, /* group ID */
- check_uid:1, /* user ID */
- check_pool:1, /* LOV pool name */
- check_size:1, /* file size */
- exclude_pool:1,
- exclude_size:1,
- exclude_atime:1,
- exclude_mtime:1,
- exclude_ctime:1,
- get_lmv:1, /* get MDT list from LMV */
- raw:1, /* do not fill in defaults */
- check_stripesize:1, /* LOV stripe size */
- exclude_stripesize:1,
- check_stripecount:1, /* LOV stripe count */
- exclude_stripecount:1;
-
- int verbose;
- int quiet;
-
- /* regular expression */
- char *pattern;
-
- char *print_fmt;
-
- struct obd_uuid *obduuid;
- int num_obds;
- int num_alloc_obds;
- int obdindex;
- int *obdindexes;
-
- struct obd_uuid *mdtuuid;
- int num_mdts;
- int num_alloc_mdts;
- int mdtindex;
- int *mdtindexes;
- int file_mdtindex;
-
- int lumlen;
- struct lov_user_mds_data *lmd;
-
- char poolname[LOV_MAXPOOLNAME + 1];
-
- int fp_lmv_count;
- struct lmv_user_md *fp_lmv_md;
-
- unsigned long long stripesize;
- unsigned long long stripesize_units;
- unsigned long long stripecount;
-
- /* In-process parameters. */
- unsigned long got_uuids:1,
- obds_printed:1,
- have_fileinfo:1; /* file attrs and LOV xattr */
- unsigned int depth;
- dev_t st_dev;
-};
-
-extern int llapi_ostlist(char *path, struct find_param *param);
-extern int llapi_uuid_match(char *real_uuid, char *search_uuid);
-extern int llapi_getstripe(char *path, struct find_param *param);
-extern int llapi_find(char *path, struct find_param *param);
-
-extern int llapi_file_fget_mdtidx(int fd, int *mdtidx);
-extern int llapi_dir_create_pool(const char *name, int flags, int stripe_offset,
- int stripe_count, int stripe_pattern,
- char *poolname);
-int llapi_direntry_remove(char *dname);
-extern int llapi_obd_statfs(char *path, __u32 type, __u32 index,
- struct obd_statfs *stat_buf,
- struct obd_uuid *uuid_buf);
-extern int llapi_ping(char *obd_type, char *obd_name);
-extern int llapi_target_check(int num_types, char **obd_types, char *dir);
-extern int llapi_file_get_lov_uuid(const char *path, struct obd_uuid *lov_uuid);
-extern int llapi_file_get_lmv_uuid(const char *path, struct obd_uuid *lmv_uuid);
-extern int llapi_file_fget_lov_uuid(int fd, struct obd_uuid *lov_uuid);
-extern int llapi_lov_get_uuids(int fd, struct obd_uuid *uuidp, int *ost_count);
-extern int llapi_lmv_get_uuids(int fd, struct obd_uuid *uuidp, int *mdt_count);
-extern int llapi_is_lustre_mnttype(const char *type);
-extern int llapi_search_ost(char *fsname, char *poolname, char *ostname);
-extern int llapi_get_obd_count(char *mnt, int *count, int is_mdt);
-extern int parse_size(char *optarg, unsigned long long *size,
- unsigned long long *size_units, int bytes_spec);
-extern int llapi_search_mounts(const char *pathname, int index,
- char *mntdir, char *fsname);
-extern int llapi_search_fsname(const char *pathname, char *fsname);
-extern int llapi_getname(const char *path, char *buf, size_t size);
-
-extern void llapi_ping_target(char *obd_type, char *obd_name,
- char *obd_uuid, void *args);
-
-extern int llapi_search_rootpath(char *pathname, const char *fsname);
-
-struct mntent;
-#define HAVE_LLAPI_IS_LUSTRE_MNT
-extern int llapi_is_lustre_mnt(struct mntent *mnt);
-extern int llapi_quotachown(char *path, int flag);
-extern int llapi_quotacheck(char *mnt, int check_type);
-extern int llapi_poll_quotacheck(char *mnt, struct if_quotacheck *qchk);
-extern int llapi_quotactl(char *mnt, struct if_quotactl *qctl);
-extern int llapi_target_iterate(int type_num, char **obd_type, void *args,
- llapi_cb_t cb);
-extern int llapi_get_connect_flags(const char *mnt, __u64 *flags);
-extern int llapi_lsetfacl(int argc, char *argv[]);
-extern int llapi_lgetfacl(int argc, char *argv[]);
-extern int llapi_rsetfacl(int argc, char *argv[]);
-extern int llapi_rgetfacl(int argc, char *argv[]);
-extern int llapi_cp(int argc, char *argv[]);
-extern int llapi_ls(int argc, char *argv[]);
-extern int llapi_fid2path(const char *device, const char *fidstr, char *path,
- int pathlen, long long *recno, int *linkno);
-extern int llapi_path2fid(const char *path, lustre_fid *fid);
-extern int llapi_fd2fid(const int fd, lustre_fid *fid);
-
-extern int llapi_get_version(char *buffer, int buffer_size, char **version);
-extern int llapi_get_data_version(int fd, __u64 *data_version, __u64 flags);
-extern int llapi_hsm_state_get(const char *path, struct hsm_user_state *hus);
-extern int llapi_hsm_state_set(const char *path, __u64 setmask, __u64 clearmask,
- __u32 archive_id);
-
-extern int llapi_create_volatile_idx(char *directory, int idx, int mode);
-static inline int llapi_create_volatile(char *directory, int mode)
-{
- return llapi_create_volatile_idx(directory, -1, mode);
-}
-
-
-extern int llapi_fswap_layouts(const int fd1, const int fd2,
- __u64 dv1, __u64 dv2, __u64 flags);
-extern int llapi_swap_layouts(const char *path1, const char *path2,
- __u64 dv1, __u64 dv2, __u64 flags);
-
-/* Changelog interface. priv is private state, managed internally
- by these functions */
-#define CHANGELOG_FLAG_FOLLOW 0x01 /* Not yet implemented */
-#define CHANGELOG_FLAG_BLOCK 0x02 /* Blocking IO makes sense in case of
- slow user parsing of the records, but it also prevents us from cleaning
- up if the records are not consumed. */
-
-/* Records received are in extentded format now, though most of them are still
- * written in disk in changelog_rec format (to save space and time), it's
- * converted to extented format in the lustre api to ease changelog analysis. */
-#define HAVE_CHANGELOG_EXTEND_REC 1
-
-extern int llapi_changelog_start(void **priv, int flags, const char *mdtname,
- long long startrec);
-extern int llapi_changelog_fini(void **priv);
-extern int llapi_changelog_recv(void *priv, struct changelog_ext_rec **rech);
-extern int llapi_changelog_free(struct changelog_ext_rec **rech);
-/* Allow records up to endrec to be destroyed; requires registered id. */
-extern int llapi_changelog_clear(const char *mdtname, const char *idstr,
- long long endrec);
-
-/* HSM copytool interface.
- * priv is private state, managed internally by these functions
- */
-struct hsm_copytool_private;
-extern int llapi_hsm_copytool_start(struct hsm_copytool_private **priv,
- char *fsname, int flags,
- int archive_count, int *archives);
-extern int llapi_hsm_copytool_fini(struct hsm_copytool_private **priv);
-extern int llapi_hsm_copytool_recv(struct hsm_copytool_private *priv,
- struct hsm_action_list **hal, int *msgsize);
-extern int llapi_hsm_copytool_free(struct hsm_action_list **hal);
-extern int llapi_hsm_copy_start(char *mnt, struct hsm_copy *copy,
- const struct hsm_action_item *hai);
-extern int llapi_hsm_copy_end(char *mnt, struct hsm_copy *copy,
- const struct hsm_progress *hp);
-extern int llapi_hsm_progress(char *mnt, struct hsm_progress *hp);
-extern int llapi_hsm_import(const char *dst, int archive, struct stat *st,
- unsigned long long stripe_size, int stripe_offset,
- int stripe_count, int stripe_pattern,
- char *pool_name, lustre_fid *newfid);
-
-/* HSM user interface */
-extern struct hsm_user_request *llapi_hsm_user_request_alloc(int itemcount,
- int data_len);
-extern int llapi_hsm_request(char *mnt, struct hsm_user_request *request);
-extern int llapi_hsm_current_action(const char *path,
- struct hsm_current_action *hca);
-/** @} llapi */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_debug.h b/drivers/staging/lustre/lustre/include/lustre_debug.h
index 3d9e4462af43..7ec91edd6800 100644
--- a/drivers/staging/lustre/lustre/include/lustre_debug.h
+++ b/drivers/staging/lustre/lustre/include/lustre_debug.h
@@ -45,25 +45,6 @@
#include <lustre_net.h>
#include <obd.h>
-#include <linux/lustre_debug.h>
-
-#define ASSERT_MAX_SIZE_MB 60000ULL
-#define ASSERT_PAGE_INDEX(index, OP) \
-do { if (index > ASSERT_MAX_SIZE_MB << (20 - PAGE_CACHE_SHIFT)) { \
- CERROR("bad page index %lu > %llu\n", index, \
- ASSERT_MAX_SIZE_MB << (20 - PAGE_CACHE_SHIFT)); \
- libcfs_debug = ~0UL; \
- OP; \
-}} while(0)
-
-#define ASSERT_FILE_OFFSET(offset, OP) \
-do { if (offset > ASSERT_MAX_SIZE_MB << 20) { \
- CERROR("bad file offset %llu > %llu\n", offset, \
- ASSERT_MAX_SIZE_MB << 20); \
- libcfs_debug = ~0UL; \
- OP; \
-}} while(0)
-
/* lib/debug.c */
void dump_lniobuf(struct niobuf_local *lnb);
int dump_req(struct ptlrpc_request *req);
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index 9228b165b258..1de9a8bed497 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -50,6 +50,7 @@
#include <linux/libcfs/libcfs.h>
#include <linux/lnet/types.h>
+#include <linux/backing-dev.h>
/****************** on-disk files *********************/
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index bc2b82ffae92..ec4bb5e3c13e 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -1285,10 +1285,11 @@ void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client);
void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client);
void ldlm_namespace_get(struct ldlm_namespace *ns);
void ldlm_namespace_put(struct ldlm_namespace *ns);
-int ldlm_proc_setup(void);
#ifdef LPROCFS
+int ldlm_proc_setup(void);
void ldlm_proc_cleanup(void);
#else
+static inline int ldlm_proc_setup(void) { return 0; }
static inline void ldlm_proc_cleanup(void) {}
#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
index 8c34d9d4d258..75716f17f64b 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
@@ -35,7 +35,7 @@
#ifndef LDLM_ALL_FLAGS_MASK
/** l_flags bits marked as "all_flags" bits */
-#define LDLM_FL_ALL_FLAGS_MASK 0x007FFFFFC08F132FULL
+#define LDLM_FL_ALL_FLAGS_MASK 0x00FFFFFFC08F132FULL
/** l_flags bits marked as "ast" bits */
#define LDLM_FL_AST_MASK 0x0000000080000000ULL
@@ -53,7 +53,7 @@
#define LDLM_FL_INHERIT_MASK 0x0000000000800000ULL
/** l_flags bits marked as "local_only" bits */
-#define LDLM_FL_LOCAL_ONLY_MASK 0x007FFFFF00000000ULL
+#define LDLM_FL_LOCAL_ONLY_MASK 0x00FFFFFF00000000ULL
/** l_flags bits marked as "on_wire" bits */
#define LDLM_FL_ON_WIRE_MASK 0x00000000C08F132FULL
@@ -358,6 +358,12 @@
#define ldlm_set_ns_srv(_l) LDLM_SET_FLAG(( _l), 1ULL << 54)
#define ldlm_clear_ns_srv(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 54)
+/** Flag whether this lock can be reused. Used by exclusive open. */
+#define LDLM_FL_EXCL 0x0080000000000000ULL /* bit 55 */
+#define ldlm_is_excl(_l) LDLM_TEST_FLAG((_l), 1ULL << 55)
+#define ldlm_set_excl(_l) LDLM_SET_FLAG((_l), 1ULL << 55)
+#define ldlm_clear_excl(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 55)
+
/** test for ldlm_lock flag bit set */
#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
@@ -414,47 +420,49 @@ static int hf_lustre_ldlm_fl_server_lock = -1;
static int hf_lustre_ldlm_fl_res_locked = -1;
static int hf_lustre_ldlm_fl_waited = -1;
static int hf_lustre_ldlm_fl_ns_srv = -1;
+static int hf_lustre_ldlm_fl_excl = -1;
const value_string lustre_ldlm_flags_vals[] = {
- {LDLM_FL_LOCK_CHANGED, "LDLM_FL_LOCK_CHANGED"},
- {LDLM_FL_BLOCK_GRANTED, "LDLM_FL_BLOCK_GRANTED"},
- {LDLM_FL_BLOCK_CONV, "LDLM_FL_BLOCK_CONV"},
- {LDLM_FL_BLOCK_WAIT, "LDLM_FL_BLOCK_WAIT"},
- {LDLM_FL_AST_SENT, "LDLM_FL_AST_SENT"},
- {LDLM_FL_REPLAY, "LDLM_FL_REPLAY"},
- {LDLM_FL_INTENT_ONLY, "LDLM_FL_INTENT_ONLY"},
- {LDLM_FL_HAS_INTENT, "LDLM_FL_HAS_INTENT"},
- {LDLM_FL_DISCARD_DATA, "LDLM_FL_DISCARD_DATA"},
- {LDLM_FL_NO_TIMEOUT, "LDLM_FL_NO_TIMEOUT"},
- {LDLM_FL_BLOCK_NOWAIT, "LDLM_FL_BLOCK_NOWAIT"},
- {LDLM_FL_TEST_LOCK, "LDLM_FL_TEST_LOCK"},
- {LDLM_FL_CANCEL_ON_BLOCK, "LDLM_FL_CANCEL_ON_BLOCK"},
- {LDLM_FL_DENY_ON_CONTENTION, "LDLM_FL_DENY_ON_CONTENTION"},
- {LDLM_FL_AST_DISCARD_DATA, "LDLM_FL_AST_DISCARD_DATA"},
- {LDLM_FL_FAIL_LOC, "LDLM_FL_FAIL_LOC"},
- {LDLM_FL_SKIPPED, "LDLM_FL_SKIPPED"},
- {LDLM_FL_CBPENDING, "LDLM_FL_CBPENDING"},
- {LDLM_FL_WAIT_NOREPROC, "LDLM_FL_WAIT_NOREPROC"},
- {LDLM_FL_CANCEL, "LDLM_FL_CANCEL"},
- {LDLM_FL_LOCAL_ONLY, "LDLM_FL_LOCAL_ONLY"},
- {LDLM_FL_FAILED, "LDLM_FL_FAILED"},
- {LDLM_FL_CANCELING, "LDLM_FL_CANCELING"},
- {LDLM_FL_LOCAL, "LDLM_FL_LOCAL"},
- {LDLM_FL_LVB_READY, "LDLM_FL_LVB_READY"},
- {LDLM_FL_KMS_IGNORE, "LDLM_FL_KMS_IGNORE"},
- {LDLM_FL_CP_REQD, "LDLM_FL_CP_REQD"},
- {LDLM_FL_CLEANED, "LDLM_FL_CLEANED"},
- {LDLM_FL_ATOMIC_CB, "LDLM_FL_ATOMIC_CB"},
- {LDLM_FL_BL_AST, "LDLM_FL_BL_AST"},
- {LDLM_FL_BL_DONE, "LDLM_FL_BL_DONE"},
- {LDLM_FL_NO_LRU, "LDLM_FL_NO_LRU"},
- {LDLM_FL_FAIL_NOTIFIED, "LDLM_FL_FAIL_NOTIFIED"},
- {LDLM_FL_DESTROYED, "LDLM_FL_DESTROYED"},
- {LDLM_FL_SERVER_LOCK, "LDLM_FL_SERVER_LOCK"},
- {LDLM_FL_RES_LOCKED, "LDLM_FL_RES_LOCKED"},
- {LDLM_FL_WAITED, "LDLM_FL_WAITED"},
- {LDLM_FL_NS_SRV, "LDLM_FL_NS_SRV"},
- { 0, NULL }
+ {LDLM_FL_LOCK_CHANGED, "LDLM_FL_LOCK_CHANGED"},
+ {LDLM_FL_BLOCK_GRANTED, "LDLM_FL_BLOCK_GRANTED"},
+ {LDLM_FL_BLOCK_CONV, "LDLM_FL_BLOCK_CONV"},
+ {LDLM_FL_BLOCK_WAIT, "LDLM_FL_BLOCK_WAIT"},
+ {LDLM_FL_AST_SENT, "LDLM_FL_AST_SENT"},
+ {LDLM_FL_REPLAY, "LDLM_FL_REPLAY"},
+ {LDLM_FL_INTENT_ONLY, "LDLM_FL_INTENT_ONLY"},
+ {LDLM_FL_HAS_INTENT, "LDLM_FL_HAS_INTENT"},
+ {LDLM_FL_DISCARD_DATA, "LDLM_FL_DISCARD_DATA"},
+ {LDLM_FL_NO_TIMEOUT, "LDLM_FL_NO_TIMEOUT"},
+ {LDLM_FL_BLOCK_NOWAIT, "LDLM_FL_BLOCK_NOWAIT"},
+ {LDLM_FL_TEST_LOCK, "LDLM_FL_TEST_LOCK"},
+ {LDLM_FL_CANCEL_ON_BLOCK, "LDLM_FL_CANCEL_ON_BLOCK"},
+ {LDLM_FL_DENY_ON_CONTENTION, "LDLM_FL_DENY_ON_CONTENTION"},
+ {LDLM_FL_AST_DISCARD_DATA, "LDLM_FL_AST_DISCARD_DATA"},
+ {LDLM_FL_FAIL_LOC, "LDLM_FL_FAIL_LOC"},
+ {LDLM_FL_SKIPPED, "LDLM_FL_SKIPPED"},
+ {LDLM_FL_CBPENDING, "LDLM_FL_CBPENDING"},
+ {LDLM_FL_WAIT_NOREPROC, "LDLM_FL_WAIT_NOREPROC"},
+ {LDLM_FL_CANCEL, "LDLM_FL_CANCEL"},
+ {LDLM_FL_LOCAL_ONLY, "LDLM_FL_LOCAL_ONLY"},
+ {LDLM_FL_FAILED, "LDLM_FL_FAILED"},
+ {LDLM_FL_CANCELING, "LDLM_FL_CANCELING"},
+ {LDLM_FL_LOCAL, "LDLM_FL_LOCAL"},
+ {LDLM_FL_LVB_READY, "LDLM_FL_LVB_READY"},
+ {LDLM_FL_KMS_IGNORE, "LDLM_FL_KMS_IGNORE"},
+ {LDLM_FL_CP_REQD, "LDLM_FL_CP_REQD"},
+ {LDLM_FL_CLEANED, "LDLM_FL_CLEANED"},
+ {LDLM_FL_ATOMIC_CB, "LDLM_FL_ATOMIC_CB"},
+ {LDLM_FL_BL_AST, "LDLM_FL_BL_AST"},
+ {LDLM_FL_BL_DONE, "LDLM_FL_BL_DONE"},
+ {LDLM_FL_NO_LRU, "LDLM_FL_NO_LRU"},
+ {LDLM_FL_FAIL_NOTIFIED, "LDLM_FL_FAIL_NOTIFIED"},
+ {LDLM_FL_DESTROYED, "LDLM_FL_DESTROYED"},
+ {LDLM_FL_SERVER_LOCK, "LDLM_FL_SERVER_LOCK"},
+ {LDLM_FL_RES_LOCKED, "LDLM_FL_RES_LOCKED"},
+ {LDLM_FL_WAITED, "LDLM_FL_WAITED"},
+ {LDLM_FL_NS_SRV, "LDLM_FL_NS_SRV"},
+ {LDLM_FL_EXCL, "LDLM_FL_EXCL"},
+ { 0, NULL }
};
#endif /* WIRESHARK_COMPILE */
#endif /* LDLM_ALL_FLAGS_MASK */
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index ff119532dafb..84a897eed1df 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -431,12 +431,6 @@ struct lu_server_seq {
struct seq_server_site *lss_site;
};
-struct com_thread_info;
-int seq_query(struct com_thread_info *info);
-
-struct ptlrpc_request;
-int seq_handle(struct ptlrpc_request *req);
-
/* Server methods */
int seq_server_init(struct lu_server_seq *seq,
diff --git a/drivers/staging/lustre/lustre/include/lustre_ha.h b/drivers/staging/lustre/lustre/include/lustre_ha.h
index 105f6d61eef0..f3ae02b3eef3 100644
--- a/drivers/staging/lustre/lustre/include/lustre_ha.h
+++ b/drivers/staging/lustre/lustre/include/lustre_ha.h
@@ -58,9 +58,6 @@ void ptlrpc_activate_import(struct obd_import *imp);
void ptlrpc_deactivate_import(struct obd_import *imp);
void ptlrpc_invalidate_import(struct obd_import *imp);
void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt);
-int ptlrpc_check_suspend(void);
-void ptlrpc_activate_timeouts(struct obd_import *imp);
-void ptlrpc_deactivate_timeouts(struct obd_import *imp);
/** @} ha */
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index 5e11107d4c66..609a090484a6 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -81,11 +81,12 @@ struct client_obd *client_conn2cli(struct lustre_handle *conn);
struct md_open_data;
struct obd_client_handle {
- struct lustre_handle och_fh;
- struct lu_fid och_fid;
- struct md_open_data *och_mod;
- __u32 och_magic;
- int och_flags;
+ struct lustre_handle och_fh;
+ struct lu_fid och_fid;
+ struct md_open_data *och_mod;
+ struct lustre_handle och_lease_handle; /* open lock for lease */
+ __u32 och_magic;
+ fmode_t och_flags;
};
#define OBD_CLIENT_HANDLE_MAGIC 0xd15ea5ed
diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h
index 721aa05dff3b..896c7576aa0f 100644
--- a/drivers/staging/lustre/lustre/include/lustre_log.h
+++ b/drivers/staging/lustre/lustre/include/lustre_log.h
@@ -136,7 +136,11 @@ int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt,
struct llog_handle **lgh, struct llog_logid *logid,
char *name, enum llog_open_param open_param);
int llog_close(const struct lu_env *env, struct llog_handle *cathandle);
-int llog_get_size(struct llog_handle *loghandle);
+int llog_is_empty(const struct lu_env *env, struct llog_ctxt *ctxt,
+ char *name);
+int llog_backup(const struct lu_env *env, struct obd_device *obd,
+ struct llog_ctxt *ctxt, struct llog_ctxt *bak_ctxt,
+ char *name, char *backup);
/* llog_process flags */
#define LLOG_FLAG_NODEAMON 0x0001
@@ -382,6 +386,13 @@ static inline int llog_data_len(int len)
return cfs_size_round(len);
}
+static inline int llog_get_size(struct llog_handle *loghandle)
+{
+ if (loghandle && loghandle->lgh_hdr)
+ return loghandle->lgh_hdr->llh_count;
+ return 0;
+}
+
static inline struct llog_ctxt *llog_ctxt_get(struct llog_ctxt *ctxt)
{
atomic_inc(&ctxt->loc_refcount);
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index 19000259a5e4..c1e02702b931 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -48,12 +48,9 @@
* @{
*/
-# include <linux/fs.h>
-# include <linux/dcache.h>
-# ifdef CONFIG_FS_POSIX_ACL
-# include <linux/posix_acl_xattr.h>
-# endif /* CONFIG_FS_POSIX_ACL */
-# include <linux/lustre_intent.h>
+#include <linux/fs.h>
+#include <linux/dcache.h>
+#include <linux/lustre_intent.h>
#include <lustre_handles.h>
#include <linux/libcfs/libcfs.h>
#include <obd_class.h>
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index 72edf01b58a2..d8d088035428 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -264,241 +264,7 @@
#define LDLM_MAXREQSIZE (5 * 1024)
#define LDLM_MAXREPSIZE (1024)
- /*
- * MDS threads constants:
- *
- * Please see examples in "Thread Constants", MDS threads number will be at
- * the comparable level of old versions, unless the server has many cores.
- */
-#ifndef MDS_MAX_THREADS
-#define MDS_MAX_THREADS 1024
-#define MDS_MAX_OTHR_THREADS 256
-
-#else /* MDS_MAX_THREADS */
-#if MDS_MAX_THREADS < PTLRPC_NTHRS_INIT
-#undef MDS_MAX_THREADS
-#define MDS_MAX_THREADS PTLRPC_NTHRS_INIT
-#endif
-#define MDS_MAX_OTHR_THREADS max(PTLRPC_NTHRS_INIT, MDS_MAX_THREADS / 2)
-#endif
-
-/* default service */
-#define MDS_THR_FACTOR 8
-#define MDS_NTHRS_INIT PTLRPC_NTHRS_INIT
-#define MDS_NTHRS_MAX MDS_MAX_THREADS
-#define MDS_NTHRS_BASE min(64, MDS_NTHRS_MAX)
-
-/* read-page service */
-#define MDS_RDPG_THR_FACTOR 4
-#define MDS_RDPG_NTHRS_INIT PTLRPC_NTHRS_INIT
-#define MDS_RDPG_NTHRS_MAX MDS_MAX_OTHR_THREADS
-#define MDS_RDPG_NTHRS_BASE min(48, MDS_RDPG_NTHRS_MAX)
-
-/* these should be removed when we remove setattr service in the future */
-#define MDS_SETA_THR_FACTOR 4
-#define MDS_SETA_NTHRS_INIT PTLRPC_NTHRS_INIT
-#define MDS_SETA_NTHRS_MAX MDS_MAX_OTHR_THREADS
-#define MDS_SETA_NTHRS_BASE min(48, MDS_SETA_NTHRS_MAX)
-
-/* non-affinity threads */
-#define MDS_OTHR_NTHRS_INIT PTLRPC_NTHRS_INIT
-#define MDS_OTHR_NTHRS_MAX MDS_MAX_OTHR_THREADS
-
-#define MDS_NBUFS 64
-
-/**
- * Assume file name length = FNAME_MAX = 256 (true for ext3).
- * path name length = PATH_MAX = 4096
- * LOV MD size max = EA_MAX = 24 * 2000
- * (NB: 24 is size of lov_ost_data)
- * LOV LOGCOOKIE size max = 32 * 2000
- * (NB: 32 is size of llog_cookie)
- * symlink: FNAME_MAX + PATH_MAX <- largest
- * link: FNAME_MAX + PATH_MAX (mds_rec_link < mds_rec_create)
- * rename: FNAME_MAX + FNAME_MAX
- * open: FNAME_MAX + EA_MAX
- *
- * MDS_MAXREQSIZE ~= 4736 bytes =
- * lustre_msg + ldlm_request + mdt_body + mds_rec_create + FNAME_MAX + PATH_MAX
- * MDS_MAXREPSIZE ~= 8300 bytes = lustre_msg + llog_header
- *
- * Realistic size is about 512 bytes (20 character name + 128 char symlink),
- * except in the open case where there are a large number of OSTs in a LOV.
- */
-#define MDS_MAXREQSIZE (5 * 1024) /* >= 4736 */
-#define MDS_MAXREPSIZE (9 * 1024) /* >= 8300 */
-
-/**
- * MDS incoming request with LOV EA
- * 24 = sizeof(struct lov_ost_data), i.e: replay of opencreate
- */
-#define MDS_LOV_MAXREQSIZE max(MDS_MAXREQSIZE, \
- 362 + LOV_MAX_STRIPE_COUNT * 24)
-/**
- * MDS outgoing reply with LOV EA
- *
- * NB: max reply size Lustre 2.4+ client can get from old MDS is:
- * LOV_MAX_STRIPE_COUNT * (llog_cookie + lov_ost_data) + extra bytes
- *
- * but 2.4 or later MDS will never send reply with llog_cookie to any
- * version client. This macro is defined for server side reply buffer size.
- */
-#define MDS_LOV_MAXREPSIZE MDS_LOV_MAXREQSIZE
-
-/**
- * This is the size of a maximum REINT_SETXATTR request:
- *
- * lustre_msg 56 (32 + 4 x 5 + 4)
- * ptlrpc_body 184
- * mdt_rec_setxattr 136
- * lustre_capa 120
- * name 256 (XATTR_NAME_MAX)
- * value 65536 (XATTR_SIZE_MAX)
- */
-#define MDS_EA_MAXREQSIZE 66288
-
-/**
- * These are the maximum request and reply sizes (rounded up to 1 KB
- * boundaries) for the "regular" MDS_REQUEST_PORTAL and MDS_REPLY_PORTAL.
- */
-#define MDS_REG_MAXREQSIZE (((max(MDS_EA_MAXREQSIZE, \
- MDS_LOV_MAXREQSIZE) + 1023) >> 10) << 10)
-#define MDS_REG_MAXREPSIZE MDS_REG_MAXREQSIZE
-
-/**
- * The update request includes all of updates from the create, which might
- * include linkea (4K maxim), together with other updates, we set it to 9K:
- * lustre_msg + ptlrpc_body + UPDATE_BUF_SIZE (8K)
- */
-#define MDS_OUT_MAXREQSIZE (9 * 1024)
-#define MDS_OUT_MAXREPSIZE MDS_MAXREPSIZE
-
-/** MDS_BUFSIZE = max_reqsize (w/o LOV EA) + max sptlrpc payload size */
-#define MDS_BUFSIZE max(MDS_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \
- 8 * 1024)
-
-/**
- * MDS_REG_BUFSIZE should at least be MDS_REG_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD.
- * However, we need to allocate a much larger buffer for it because LNet
- * requires each MD(rqbd) has at least MDS_REQ_MAXREQSIZE bytes left to avoid
- * dropping of maximum-sized incoming request. So if MDS_REG_BUFSIZE is only a
- * little larger than MDS_REG_MAXREQSIZE, then it can only fit in one request
- * even there are about MDS_REG_MAX_REQSIZE bytes left in a rqbd, and memory
- * utilization is very low.
- *
- * In the meanwhile, size of rqbd can't be too large, because rqbd can't be
- * reused until all requests fit in it have been processed and released,
- * which means one long blocked request can prevent the rqbd be reused.
- * Now we set request buffer size to 160 KB, so even each rqbd is unlinked
- * from LNet with unused 65 KB, buffer utilization will be about 59%.
- * Please check LU-2432 for details.
- */
-#define MDS_REG_BUFSIZE max(MDS_REG_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \
- 160 * 1024)
-
-/**
- * MDS_OUT_BUFSIZE = max_out_reqsize + max sptlrpc payload (~1K) which is
- * about 10K, for the same reason as MDS_REG_BUFSIZE, we also give some
- * extra bytes to each request buffer to improve buffer utilization rate.
- */
-#define MDS_OUT_BUFSIZE max(MDS_OUT_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \
- 24 * 1024)
-
-/** FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc */
-#define FLD_MAXREQSIZE (160)
-
-/** FLD_MAXREPSIZE == lustre_msg + ptlrpc_body */
-#define FLD_MAXREPSIZE (152)
-#define FLD_BUFSIZE (1 << 12)
-
-/**
- * SEQ_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + lu_range +
- * __u32 padding */
-#define SEQ_MAXREQSIZE (160)
-
-/** SEQ_MAXREPSIZE == lustre_msg + ptlrpc_body + lu_range */
-#define SEQ_MAXREPSIZE (152)
-#define SEQ_BUFSIZE (1 << 12)
-
-/** MGS threads must be >= 3, see bug 22458 comment #28 */
-#define MGS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1)
-#define MGS_NTHRS_MAX 32
-
-#define MGS_NBUFS 64
-#define MGS_BUFSIZE (8 * 1024)
-#define MGS_MAXREQSIZE (7 * 1024)
-#define MGS_MAXREPSIZE (9 * 1024)
-
- /*
- * OSS threads constants:
- *
- * Given 8 as factor and 64 as base threads number
- *
- * example 1):
- * On 8-core server configured to 2 partitions, we will have
- * 64 + 8 * 4 = 96 threads for each partition, 192 total threads.
- *
- * example 2):
- * On 32-core machine configured to 4 partitions, we will have
- * 64 + 8 * 8 = 112 threads for each partition, so total threads number
- * will be 112 * 4 = 448.
- *
- * example 3):
- * On 64-core machine configured to 4 partitions, we will have
- * 64 + 16 * 8 = 192 threads for each partition, so total threads number
- * will be 192 * 4 = 768 which is above limit OSS_NTHRS_MAX(512), so we
- * cut off the value to OSS_NTHRS_MAX(512) / 4 which is 128 threads
- * for each partition.
- *
- * So we can see that with these constants, threads number wil be at the
- * similar level of old versions, unless the server has many cores.
- */
- /* depress threads factor for VM with small memory size */
-#define OSS_THR_FACTOR min_t(int, 8, \
- NUM_CACHEPAGES >> (28 - PAGE_CACHE_SHIFT))
-#define OSS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1)
-#define OSS_NTHRS_BASE 64
-#define OSS_NTHRS_MAX 512
-
-/* threads for handling "create" request */
-#define OSS_CR_THR_FACTOR 1
-#define OSS_CR_NTHRS_INIT PTLRPC_NTHRS_INIT
-#define OSS_CR_NTHRS_BASE 8
-#define OSS_CR_NTHRS_MAX 64
-
-/**
- * OST_IO_MAXREQSIZE ~=
- * lustre_msg + ptlrpc_body + obdo + obd_ioobj +
- * DT_MAX_BRW_PAGES * niobuf_remote
- *
- * - single object with 16 pages is 512 bytes
- * - OST_IO_MAXREQSIZE must be at least 1 page of cookies plus some spillover
- * - Must be a multiple of 1024
- * - actual size is about 18K
- */
-#define _OST_MAXREQSIZE_SUM (sizeof(struct lustre_msg) + \
- sizeof(struct ptlrpc_body) + \
- sizeof(struct obdo) + \
- sizeof(struct obd_ioobj) + \
- sizeof(struct niobuf_remote) * DT_MAX_BRW_PAGES)
-/**
- * FIEMAP request can be 4K+ for now
- */
#define OST_MAXREQSIZE (5 * 1024)
-#define OST_IO_MAXREQSIZE max_t(int, OST_MAXREQSIZE, \
- (((_OST_MAXREQSIZE_SUM - 1) | (1024 - 1)) + 1))
-
-#define OST_MAXREPSIZE (9 * 1024)
-#define OST_IO_MAXREPSIZE OST_MAXREPSIZE
-
-#define OST_NBUFS 64
-/** OST_BUFSIZE = max_reqsize + max sptlrpc payload size */
-#define OST_BUFSIZE max_t(int, OST_MAXREQSIZE + 1024, 16 * 1024)
-/**
- * OST_IO_MAXREQSIZE is 18K, giving extra 46K can increase buffer utilization
- * rate of request buffer, please check comment of MDS_LOV_BUFSIZE for details.
- */
-#define OST_IO_BUFSIZE max_t(int, OST_IO_MAXREQSIZE + 1024, 64 * 1024)
/* Macro to hide a typecast. */
#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
@@ -3403,10 +3169,8 @@ int ptlrpc_del_timeout_client(struct list_head *obd_list,
enum timeout_event event);
struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
int ptlrpc_obd_ping(struct obd_device *obd);
-cfs_time_t ptlrpc_suspend_wakeup_time(void);
void ping_evictor_start(void);
void ping_evictor_stop(void);
-int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req);
void ptlrpc_pinger_ir_up(void);
void ptlrpc_pinger_ir_down(void);
/** @} */
@@ -3470,15 +3234,6 @@ static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) {}
#endif
/** @} */
-/* ptlrpc/llog_server.c */
-int llog_origin_handle_open(struct ptlrpc_request *req);
-int llog_origin_handle_destroy(struct ptlrpc_request *req);
-int llog_origin_handle_prev_block(struct ptlrpc_request *req);
-int llog_origin_handle_next_block(struct ptlrpc_request *req);
-int llog_origin_handle_read_header(struct ptlrpc_request *req);
-int llog_origin_handle_close(struct ptlrpc_request *req);
-int llog_origin_handle_cancel(struct ptlrpc_request *req);
-
/* ptlrpc/llog_client.c */
extern struct llog_operations llog_client_ops;
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
index f4d3820865f1..a83db61a30be 100644
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
@@ -164,6 +164,7 @@ extern struct req_format RQF_UPDATE_OBJ;
*/
extern struct req_format RQF_MDS_GETATTR_NAME;
extern struct req_format RQF_MDS_CLOSE;
+extern struct req_format RQF_MDS_RELEASE_CLOSE;
extern struct req_format RQF_MDS_PIN;
extern struct req_format RQF_MDS_UNPIN;
extern struct req_format RQF_MDS_CONNECT;
@@ -229,6 +230,7 @@ extern struct req_format RQF_LDLM_INTENT_GETATTR;
extern struct req_format RQF_LDLM_INTENT_OPEN;
extern struct req_format RQF_LDLM_INTENT_CREATE;
extern struct req_format RQF_LDLM_INTENT_UNLINK;
+extern struct req_format RQF_LDLM_INTENT_GETXATTR;
extern struct req_format RQF_LDLM_INTENT_QUOTA;
extern struct req_format RQF_LDLM_CANCEL;
extern struct req_format RQF_LDLM_CALLBACK;
@@ -245,6 +247,8 @@ extern struct req_format RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK;
extern struct req_format RQF_LLOG_ORIGIN_HANDLE_READ_HEADER;
extern struct req_format RQF_LLOG_ORIGIN_CONNECT;
+extern struct req_format RQF_CONNECT;
+
extern struct req_msg_field RMF_GENERIC_DATA;
extern struct req_msg_field RMF_PTLRPC_BODY;
extern struct req_msg_field RMF_MDT_BODY;
@@ -260,6 +264,7 @@ extern struct req_msg_field RMF_GETINFO_VAL;
extern struct req_msg_field RMF_GETINFO_VALLEN;
extern struct req_msg_field RMF_GETINFO_KEY;
extern struct req_msg_field RMF_IDX_INFO;
+extern struct req_msg_field RMF_CLOSE_DATA;
/*
* connection handle received in MDS_CONNECT request.
@@ -275,6 +280,8 @@ extern struct req_msg_field RMF_LAYOUT_INTENT;
extern struct req_msg_field RMF_MDT_MD;
extern struct req_msg_field RMF_REC_REINT;
extern struct req_msg_field RMF_EADATA;
+extern struct req_msg_field RMF_EAVALS;
+extern struct req_msg_field RMF_EAVALS_LENS;
extern struct req_msg_field RMF_ACL;
extern struct req_msg_field RMF_LOGCOOKIES;
extern struct req_msg_field RMF_CAPA1;
diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h
index 70b8b133a5c3..885247d28b3a 100644
--- a/drivers/staging/lustre/lustre/include/lustre_sec.h
+++ b/drivers/staging/lustre/lustre/include/lustre_sec.h
@@ -903,12 +903,6 @@ struct ptlrpc_bulk_sec_desc {
/*
- * lprocfs
- */
-struct proc_dir_entry;
-extern struct proc_dir_entry *sptlrpc_proc_root;
-
-/*
* round size up to next power of 2, for slab allocation.
* @size must be sane (can't overflow after round up)
*/
@@ -1067,7 +1061,18 @@ void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx);
/* misc */
const char * sec2target_str(struct ptlrpc_sec *sec);
+/*
+ * lprocfs
+ */
+#ifdef LPROCFS
+struct proc_dir_entry;
+extern struct proc_dir_entry *sptlrpc_proc_root;
int sptlrpc_lprocfs_cliobd_attach(struct obd_device *dev);
+#else
+#define sptlrpc_proc_root NULL
+static inline int sptlrpc_lprocfs_cliobd_attach(struct obd_device *dev)
+{ return 0; }
+#endif
/*
* server side
diff --git a/drivers/staging/lustre/lustre/include/md_object.h b/drivers/staging/lustre/lustre/include/md_object.h
index daf93afe3feb..7b45b47b48f9 100644
--- a/drivers/staging/lustre/lustre/include/md_object.h
+++ b/drivers/staging/lustre/lustre/include/md_object.h
@@ -352,8 +352,8 @@ struct md_device_operations {
int (*mdo_root_get)(const struct lu_env *env, struct md_device *m,
struct lu_fid *f);
- int (*mdo_maxsize_get)(const struct lu_env *env, struct md_device *m,
- int *md_size, int *cookie_size);
+ int (*mdo_maxeasize_get)(const struct lu_env *env, struct md_device *m,
+ int *easize);
int (*mdo_statfs)(const struct lu_env *env, struct md_device *m,
struct obd_statfs *sfs);
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index d0aea15b7c39..c3470ce62cff 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -399,8 +399,8 @@ struct client_obd {
/* mgc datastruct */
struct semaphore cl_mgc_sem;
- struct vfsmount *cl_mgc_vfsmnt;
- struct dentry *cl_mgc_configs_dir;
+ struct local_oid_storage *cl_mgc_los;
+ struct dt_object *cl_mgc_configs_dir;
atomic_t cl_mgc_refcount;
struct obd_export *cl_mgc_mgsexp;
@@ -1022,6 +1022,7 @@ struct lu_context;
#define IT_LAYOUT (1 << 10)
#define IT_QUOTA_DQACQ (1 << 11)
#define IT_QUOTA_CONN (1 << 12)
+#define IT_SETXATTR (1 << 13)
static inline int it_to_lock_mode(struct lookup_intent *it)
{
@@ -1031,6 +1032,10 @@ static inline int it_to_lock_mode(struct lookup_intent *it)
else if (it->it_op & (IT_READDIR | IT_GETATTR | IT_OPEN | IT_LOOKUP |
IT_LAYOUT))
return LCK_CR;
+ else if (it->it_op & IT_GETXATTR)
+ return LCK_PR;
+ else if (it->it_op & IT_SETXATTR)
+ return LCK_PW;
LASSERTF(0, "Invalid it_op: %d\n", it->it_op);
return -EINVAL;
@@ -1070,7 +1075,7 @@ struct md_op_data {
struct obd_capa *op_capa2;
/* Various operation flags. */
- __u32 op_bias;
+ enum mds_op_bias op_bias;
/* Operation type */
__u32 op_opc;
@@ -1084,6 +1089,10 @@ struct md_op_data {
/* used to transfer info between the stacks of MD client
* see enum op_cli_flags */
__u32 op_cli_flags;
+
+ /* File object data version for HSM release, on client */
+ __u64 op_data_version;
+ struct lustre_handle op_lease_handle;
};
enum op_cli_flags {
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index 9697e7faff2f..977bc231df9d 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -256,6 +256,7 @@ int obd_alloc_fail(const void *ptr, const char *name, const char *type,
#define OBD_FAIL_OSD_SCRUB_FATAL 0x192
#define OBD_FAIL_OSD_FID_MAPPING 0x193
#define OBD_FAIL_OSD_LMA_INCOMPAT 0x194
+#define OBD_FAIL_OSD_COMPAT_INVALID_ENTRY 0x195
#define OBD_FAIL_OST 0x200
#define OBD_FAIL_OST_CONNECT_NET 0x201
@@ -416,6 +417,13 @@ int obd_alloc_fail(const void *ptr, const char *name, const char *type,
#define OBD_FAIL_MGC_PAUSE_PROCESS_LOG 0x903
#define OBD_FAIL_MGS_PAUSE_REQ 0x904
#define OBD_FAIL_MGS_PAUSE_TARGET_REG 0x905
+#define OBD_FAIL_MGS_CONNECT_NET 0x906
+#define OBD_FAIL_MGS_DISCONNECT_NET 0x907
+#define OBD_FAIL_MGS_SET_INFO_NET 0x908
+#define OBD_FAIL_MGS_EXCEPTION_NET 0x909
+#define OBD_FAIL_MGS_TARGET_REG_NET 0x90a
+#define OBD_FAIL_MGS_TARGET_DEL_NET 0x90b
+#define OBD_FAIL_MGS_CONFIG_READ_NET 0x90c
#define OBD_FAIL_QUOTA_DQACQ_NET 0xA01
#define OBD_FAIL_QUOTA_EDQUOT 0xA02
@@ -457,6 +465,7 @@ int obd_alloc_fail(const void *ptr, const char *name, const char *type,
#define OBD_FAIL_LOCK_STATE_WAIT_INTR 0x1402
#define OBD_FAIL_LOV_INIT 0x1403
#define OBD_FAIL_GLIMPSE_DELAY 0x1404
+#define OBD_FAIL_LLITE_XATTR_ENOMEM 0x1405
#define OBD_FAIL_FID_INDIR 0x1501
#define OBD_FAIL_FID_INLMA 0x1502
@@ -498,6 +507,8 @@ int obd_alloc_fail(const void *ptr, const char *name, const char *type,
extern atomic_t libcfs_kmemory;
+extern void obd_update_maxusage(void);
+
#ifdef LPROCFS
#define obd_memory_add(size) \
lprocfs_counter_add(obd_memory, OBD_MEMORY_STAT, (long)(size))
@@ -516,7 +527,6 @@ extern atomic_t libcfs_kmemory;
lprocfs_stats_collector(obd_memory, OBD_MEMORY_PAGES_STAT, \
LPROCFS_FIELDS_FLAGS_SUM)
-extern void obd_update_maxusage(void);
extern __u64 obd_memory_max(void);
extern __u64 obd_pages_max(void);
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
index e60c04d5393a..94b164127e0c 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -79,27 +79,27 @@ static struct lu_kmem_descr ccc_caches[] = {
{
.ckd_cache = &ccc_lock_kmem,
.ckd_name = "ccc_lock_kmem",
- .ckd_size = sizeof (struct ccc_lock)
+ .ckd_size = sizeof(struct ccc_lock)
},
{
.ckd_cache = &ccc_object_kmem,
.ckd_name = "ccc_object_kmem",
- .ckd_size = sizeof (struct ccc_object)
+ .ckd_size = sizeof(struct ccc_object)
},
{
.ckd_cache = &ccc_thread_kmem,
.ckd_name = "ccc_thread_kmem",
- .ckd_size = sizeof (struct ccc_thread_info),
+ .ckd_size = sizeof(struct ccc_thread_info),
},
{
.ckd_cache = &ccc_session_kmem,
.ckd_name = "ccc_session_kmem",
- .ckd_size = sizeof (struct ccc_session)
+ .ckd_size = sizeof(struct ccc_session)
},
{
.ckd_cache = &ccc_req_kmem,
.ckd_name = "ccc_req_kmem",
- .ckd_size = sizeof (struct ccc_req)
+ .ckd_size = sizeof(struct ccc_req)
},
{
.ckd_cache = NULL
@@ -162,7 +162,7 @@ struct lu_context_key ccc_session_key = {
/* type constructor/destructor: ccc_type_{init,fini,start,stop}(). */
-// LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key);
+/* LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key); */
int ccc_device_init(const struct lu_env *env, struct lu_device *d,
const char *name, struct lu_device *next)
@@ -1006,6 +1006,12 @@ again:
cl_io_fini(env, io);
if (unlikely(io->ci_need_restart))
goto again;
+ /* HSM import case: file is released, cannot be restored
+ * no need to fail except if restore registration failed
+ * with -ENODATA */
+ if (result == -ENODATA && io->ci_restore_needed &&
+ io->ci_result != -ENODATA)
+ result = 0;
cl_env_put(env, &refcheck);
return result;
}
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
index 2b4dbeebcd5d..e04c2d37c249 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
@@ -140,7 +140,9 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
if (rc) {
- LASSERT(rc < 0);
+ /* Does not make sense to take GL for released layout */
+ if (rc > 0)
+ rc = -ENOTSUPP;
cl_env_put(env, &refcheck);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index 39fcdacc51ed..c9aae132f98a 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -95,20 +95,12 @@ ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
lock->l_policy_data.l_flock.start));
}
-static inline int ldlm_flock_blocking_link(struct ldlm_lock *req,
- struct ldlm_lock *lock)
+static inline void ldlm_flock_blocking_link(struct ldlm_lock *req,
+ struct ldlm_lock *lock)
{
- int rc = 0;
-
/* For server only */
if (req->l_export == NULL)
- return 0;
-
- if (unlikely(req->l_export->exp_flock_hash == NULL)) {
- rc = ldlm_init_flock_export(req->l_export);
- if (rc)
- goto error;
- }
+ return;
LASSERT(hlist_unhashed(&req->l_exp_flock_hash));
@@ -121,8 +113,6 @@ static inline int ldlm_flock_blocking_link(struct ldlm_lock *req,
cfs_hash_add(req->l_export->exp_flock_hash,
&req->l_policy_data.l_flock.owner,
&req->l_exp_flock_hash);
-error:
- return rc;
}
static inline void ldlm_flock_blocking_unlink(struct ldlm_lock *req)
@@ -250,7 +240,6 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
int overlaps = 0;
int splitted = 0;
const struct ldlm_callback_suite null_cbs = { NULL };
- int rc;
CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
LPU64" end "LPU64"\n", *flags,
@@ -328,12 +317,8 @@ reprocess:
/* add lock to blocking list before deadlock
* check to prevent race */
- rc = ldlm_flock_blocking_link(req, lock);
- if (rc) {
- ldlm_flock_destroy(req, mode, *flags);
- *err = rc;
- return LDLM_ITER_STOP;
- }
+ ldlm_flock_blocking_link(req, lock);
+
if (ldlm_flock_deadlock(req, lock)) {
ldlm_flock_blocking_unlink(req);
ldlm_flock_destroy(req, mode, *flags);
@@ -665,23 +650,20 @@ granted:
/* fcntl(F_GETLK) request */
/* The old mode was saved in getlk->fl_type so that if the mode
* in the lock changes we can decref the appropriate refcount.*/
- ldlm_flock_destroy(lock, flock_type(getlk),
- LDLM_FL_WAIT_NOREPROC);
+ ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC);
switch (lock->l_granted_mode) {
case LCK_PR:
- flock_set_type(getlk, F_RDLCK);
+ getlk->fl_type = F_RDLCK;
break;
case LCK_PW:
- flock_set_type(getlk, F_WRLCK);
+ getlk->fl_type = F_WRLCK;
break;
default:
- flock_set_type(getlk, F_UNLCK);
+ getlk->fl_type = F_UNLCK;
}
- flock_set_pid(getlk, (pid_t)lock->l_policy_data.l_flock.pid);
- flock_set_start(getlk,
- (loff_t)lock->l_policy_data.l_flock.start);
- flock_set_end(getlk,
- (loff_t)lock->l_policy_data.l_flock.end);
+ getlk->fl_pid = (pid_t)lock->l_policy_data.l_flock.pid;
+ getlk->fl_start = (loff_t)lock->l_policy_data.l_flock.start;
+ getlk->fl_end = (loff_t)lock->l_policy_data.l_flock.end;
} else {
__u64 noreproc = LDLM_FL_WAIT_NOREPROC;
@@ -816,6 +798,9 @@ static cfs_hash_ops_t ldlm_export_flock_ops = {
int ldlm_init_flock_export(struct obd_export *exp)
{
+ if (strcmp(exp->exp_obd->obd_type->typ_name, LUSTRE_MDT_NAME) != 0)
+ return 0;
+
exp->exp_flock_hash =
cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
HASH_EXP_LOCK_CUR_BITS,
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index 3900a69742fc..692623beee12 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -145,6 +145,8 @@ char *ldlm_it2str(int it)
return "getxattr";
case IT_LAYOUT:
return "layout";
+ case IT_SETXATTR:
+ return "setxattr";
default:
CERROR("Unknown intent %d\n", it);
return "UNKNOWN";
@@ -799,7 +801,7 @@ void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
* Removes reader/writer reference for LDLM lock \a lock.
* Assumes LDLM lock is already locked.
* only called in ldlm_flock_destroy and for local locks.
- * Does NOT add lock to LRU if no r/w references left to accomodate flock locks
+ * Does NOT add lock to LRU if no r/w references left to accommodate flock locks
* that cannot be placed in LRU.
*/
void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
@@ -1129,6 +1131,11 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
if (lock == old_lock)
break;
+ /* Check if this lock can be matched.
+ * Used by LU-2919(exclusive open) for open lease lock */
+ if (ldlm_is_excl(lock))
+ continue;
+
/* llite sometimes wants to match locks that will be
* canceled when their users drop, but we allow it to match
* if it passes in CBPENDING and the lock still has users.
@@ -1247,7 +1254,7 @@ EXPORT_SYMBOL(ldlm_lock_allow_match);
* list will be considered
* If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
* to be canceled can still be matched as long as they still have reader
- * or writer refernces
+ * or writer referneces
* If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
* just tell us if we would have matched.
*
@@ -2090,8 +2097,8 @@ void ldlm_cancel_locks_for_export(struct obd_export *exp)
/**
* Downgrade an exclusive lock.
*
- * A fast variant of ldlm_lock_convert for convertion of exclusive
- * locks. The convertion is always successful.
+ * A fast variant of ldlm_lock_convert for conversion of exclusive
+ * locks. The conversion is always successful.
* Used by Commit on Sharing (COS) code.
*
* \param lock A lock to convert
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index fde9bcd1d48d..3ed020eb89c0 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -49,12 +49,12 @@
#include "ldlm_internal.h"
static int ldlm_num_threads;
-CFS_MODULE_PARM(ldlm_num_threads, "i", int, 0444,
- "number of DLM service threads to start");
+module_param(ldlm_num_threads, int, 0444);
+MODULE_PARM_DESC(ldlm_num_threads, "number of DLM service threads to start");
static char *ldlm_cpts;
-CFS_MODULE_PARM(ldlm_cpts, "s", charp, 0444,
- "CPU partitions ldlm threads should run on");
+module_param(ldlm_cpts, charp, 0444);
+MODULE_PARM_DESC(ldlm_cpts, "CPU partitions ldlm threads should run on");
extern struct kmem_cache *ldlm_resource_slab;
extern struct kmem_cache *ldlm_lock_slab;
@@ -597,45 +597,6 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
rc = ldlm_handle_setinfo(req);
ldlm_callback_reply(req, rc);
return 0;
- case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
- CERROR("shouldn't be handling OBD_LOG_CANCEL on DLM thread\n");
- req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
- return 0;
- rc = llog_origin_handle_cancel(req);
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
- return 0;
- ldlm_callback_reply(req, rc);
- return 0;
- case LLOG_ORIGIN_HANDLE_CREATE:
- req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
- return 0;
- rc = llog_origin_handle_open(req);
- ldlm_callback_reply(req, rc);
- return 0;
- case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
- req_capsule_set(&req->rq_pill,
- &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
- return 0;
- rc = llog_origin_handle_next_block(req);
- ldlm_callback_reply(req, rc);
- return 0;
- case LLOG_ORIGIN_HANDLE_READ_HEADER:
- req_capsule_set(&req->rq_pill,
- &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
- return 0;
- rc = llog_origin_handle_read_header(req);
- ldlm_callback_reply(req, rc);
- return 0;
- case LLOG_ORIGIN_HANDLE_CLOSE:
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
- return 0;
- rc = llog_origin_handle_close(req);
- ldlm_callback_reply(req, rc);
- return 0;
case OBD_QC_CALLBACK:
req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
@@ -1003,6 +964,7 @@ static cfs_hash_ops_t ldlm_export_lock_ops = {
int ldlm_init_export(struct obd_export *exp)
{
+ int rc;
exp->exp_lock_hash =
cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
HASH_EXP_LOCK_CUR_BITS,
@@ -1016,7 +978,14 @@ int ldlm_init_export(struct obd_export *exp)
if (!exp->exp_lock_hash)
return -ENOMEM;
+ rc = ldlm_init_flock_export(exp);
+ if (rc)
+ GOTO(err, rc);
+
return 0;
+err:
+ ldlm_destroy_export(exp);
+ return rc;
}
EXPORT_SYMBOL(ldlm_init_export);
@@ -1043,11 +1012,9 @@ static int ldlm_setup(void)
if (ldlm_state == NULL)
return -ENOMEM;
-#ifdef LPROCFS
rc = ldlm_proc_setup();
if (rc != 0)
GOTO(out, rc);
-#endif
memset(&conf, 0, sizeof(conf));
conf = (typeof(conf)) {
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 0025ee6356da..6758646f575f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -638,6 +638,7 @@ int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
}
EXPORT_SYMBOL(ldlm_pool_setup);
+#ifdef LPROCFS
static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
{
int granted, grant_rate, cancel_rate, grant_step;
@@ -822,6 +823,14 @@ static void ldlm_pool_proc_fini(struct ldlm_pool *pl)
pl->pl_proc_dir = NULL;
}
}
+#else /* !LPROCFS */
+static int ldlm_pool_proc_init(struct ldlm_pool *pl)
+{
+ return 0;
+}
+
+static void ldlm_pool_proc_fini(struct ldlm_pool *pl) {}
+#endif /* LPROCFS */
int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
int idx, ldlm_side_t client)
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index dcc278403136..c0e54aead2ce 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -68,8 +68,8 @@
#include "ldlm_internal.h"
int ldlm_enqueue_min = OBD_TIMEOUT_DEFAULT;
-CFS_MODULE_PARM(ldlm_enqueue_min, "i", int, 0644,
- "lock enqueue timeout minimum");
+module_param(ldlm_enqueue_min, int, 0644);
+MODULE_PARM_DESC(ldlm_enqueue_min, "lock enqueue timeout minimum");
/* in client side, whether the cached locks will be canceled before replay */
unsigned int ldlm_cancel_unused_locks_before_replay = 1;
@@ -97,9 +97,6 @@ int ldlm_expired_completion_wait(void *data)
if (lock->l_conn_export == NULL) {
static cfs_time_t next_dump = 0, last_dump = 0;
- if (ptlrpc_check_suspend())
- return 0;
-
LCONSOLE_WARN("lock timed out (enqueued at "CFS_TIME_T", "
CFS_DURATION_T"s ago)\n",
lock->l_last_activity,
@@ -610,18 +607,12 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
lock->l_req_mode = newmode;
}
- if (memcmp(reply->lock_desc.l_resource.lr_name.name,
- lock->l_resource->lr_name.name,
- sizeof(struct ldlm_res_id))) {
- CDEBUG(D_INFO, "remote intent success, locking "
- "(%ld,%ld,%ld) instead of "
- "(%ld,%ld,%ld)\n",
- (long)reply->lock_desc.l_resource.lr_name.name[0],
- (long)reply->lock_desc.l_resource.lr_name.name[1],
- (long)reply->lock_desc.l_resource.lr_name.name[2],
- (long)lock->l_resource->lr_name.name[0],
- (long)lock->l_resource->lr_name.name[1],
- (long)lock->l_resource->lr_name.name[2]);
+ if (!ldlm_res_eq(&reply->lock_desc.l_resource.lr_name,
+ &lock->l_resource->lr_name)) {
+ CDEBUG(D_INFO, "remote intent success, locking "DLDLMRES
+ " instead of "DLDLMRES"\n",
+ PLDLMRES(&reply->lock_desc.l_resource),
+ PLDLMRES(lock->l_resource));
rc = ldlm_lock_change_resource(ns, lock,
&reply->lock_desc.l_resource.lr_name);
@@ -790,7 +781,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
dlm = req_capsule_client_get(pill, &RMF_DLM_REQ);
LASSERT(dlm);
/* Skip first lock handler in ldlm_request_pack(),
- * this method will incrment @lock_count according
+ * this method will increment @lock_count according
* to the lock handle amount actually written to
* the buffer. */
dlm->lock_count = canceloff;
@@ -910,7 +901,7 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
lock->l_conn_export = exp;
lock->l_export = NULL;
lock->l_blocking_ast = einfo->ei_cb_bl;
- lock->l_flags |= (*flags & LDLM_FL_NO_LRU);
+ lock->l_flags |= (*flags & (LDLM_FL_NO_LRU | LDLM_FL_EXCL));
/* lock not sent to server yet */
@@ -1333,7 +1324,7 @@ int ldlm_cli_cancel(struct lustre_handle *lockh,
}
rc = ldlm_cli_cancel_local(lock);
- if (rc == LDLM_FL_LOCAL_ONLY) {
+ if (rc == LDLM_FL_LOCAL_ONLY || cancel_flags & LCF_LOCAL) {
LDLM_LOCK_RELEASE(lock);
return 0;
}
@@ -1593,7 +1584,7 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
* the beginning of LRU list);
*
* flags & LDLM_CANCEL_SHRINK - cancel not more than \a count locks according to
- * memory pressre policy function;
+ * memory pressure policy function;
*
* flags & LDLM_CANCEL_AGED - cancel \a count locks according to "aged policy".
*
@@ -1912,7 +1903,8 @@ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
0, flags | LCF_BL_AST, opaque);
rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags);
if (rc != ELDLM_OK)
- CERROR("ldlm_cli_cancel_unused_resource: %d\n", rc);
+ CERROR("canceling unused lock "DLDLMRES": rc = %d\n",
+ PLDLMRES(res), rc);
LDLM_RESOURCE_DELREF(res);
ldlm_resource_putref(res);
@@ -1930,15 +1922,10 @@ static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs, struct cfs_hash_bd *
{
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
struct ldlm_cli_cancel_arg *lc = arg;
- int rc;
- rc = ldlm_cli_cancel_unused_resource(ldlm_res_to_ns(res), &res->lr_name,
- NULL, LCK_MINMODE,
- lc->lc_flags, lc->lc_opaque);
- if (rc != 0) {
- CERROR("ldlm_cli_cancel_unused ("LPU64"): %d\n",
- res->lr_name.name[0], rc);
- }
+ ldlm_cli_cancel_unused_resource(ldlm_res_to_ns(res), &res->lr_name,
+ NULL, LCK_MINMODE,
+ lc->lc_flags, lc->lc_opaque);
/* must return 0 for hash iteration */
return 0;
}
@@ -2089,7 +2076,7 @@ static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
lock, &lock->l_pending_chain.next,&lock->l_pending_chain.prev);
/* bug 9573: don't replay locks left after eviction, or
* bug 17614: locks being actively cancelled. Get a reference
- * on a lock so that it does not disapear under us (e.g. due to cancel)
+ * on a lock so that it does not disappear under us (e.g. due to cancel)
*/
if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_CANCELING))) {
list_add(&lock->l_pending_chain, list);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index 77e022bf8bcc..5f89864cda14 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -162,7 +162,7 @@ static int lprocfs_ns_resources_seq_show(struct seq_file *m, void *v)
struct cfs_hash_bd bd;
int i;
- /* result is not strictly consistant */
+ /* result is not strictly consistent */
cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i)
res += cfs_hash_bd_count_get(&bd);
return lprocfs_rd_u64(m, &res);
@@ -762,16 +762,9 @@ static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
lock_res(res);
- CERROR("Namespace %s resource refcount nonzero "
- "(%d) after lock cleanup; forcing "
- "cleanup.\n",
- ldlm_ns_name(ldlm_res_to_ns(res)),
- atomic_read(&res->lr_refcount) - 1);
-
- CERROR("Resource: %p ("LPU64"/"LPU64"/"LPU64"/"
- LPU64") (rc: %d)\n", res,
- res->lr_name.name[0], res->lr_name.name[1],
- res->lr_name.name[2], res->lr_name.name[3],
+ CERROR("%s: namespace resource "DLDLMRES
+ " (%p) refcount nonzero (%d) after lock cleanup; forcing cleanup.\n",
+ ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
atomic_read(&res->lr_refcount) - 1);
ldlm_resource_dump(D_ERROR, res);
@@ -881,7 +874,7 @@ void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
/*
* With all requests dropped and the import inactive
- * we are gaurenteed all reference will be dropped.
+ * we are guaranteed all reference will be dropped.
*/
rc = __ldlm_namespace_free(ns, 1);
LASSERT(rc == 0);
@@ -1403,10 +1396,8 @@ void ldlm_resource_dump(int level, struct ldlm_resource *res)
if (!((libcfs_debug | D_ERROR) & level))
return;
- CDEBUG(level, "--- Resource: %p ("LPU64"/"LPU64"/"LPU64"/"LPU64
- ") (rc: %d)\n", res, res->lr_name.name[0], res->lr_name.name[1],
- res->lr_name.name[2], res->lr_name.name[3],
- atomic_read(&res->lr_refcount));
+ CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
+ PLDLMRES(res), res, atomic_read(&res->lr_refcount));
if (!list_empty(&res->lr_granted)) {
CDEBUG(level, "Granted locks (in reverse order):\n");
diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lustre/libcfs/debug.c
index 9b9c45116eee..f30c84f195aa 100644
--- a/drivers/staging/lustre/lustre/libcfs/debug.c
+++ b/drivers/staging/lustre/lustre/libcfs/debug.c
@@ -47,44 +47,44 @@
static char debug_file_name[1024];
unsigned int libcfs_subsystem_debug = ~0;
-CFS_MODULE_PARM(libcfs_subsystem_debug, "i", int, 0644,
- "Lustre kernel debug subsystem mask");
+module_param(libcfs_subsystem_debug, int, 0644);
+MODULE_PARM_DESC(libcfs_subsystem_debug, "Lustre kernel debug subsystem mask");
EXPORT_SYMBOL(libcfs_subsystem_debug);
unsigned int libcfs_debug = (D_CANTMASK |
D_NETERROR | D_HA | D_CONFIG | D_IOCTL);
-CFS_MODULE_PARM(libcfs_debug, "i", int, 0644,
- "Lustre kernel debug mask");
+module_param(libcfs_debug, int, 0644);
+MODULE_PARM_DESC(libcfs_debug, "Lustre kernel debug mask");
EXPORT_SYMBOL(libcfs_debug);
unsigned int libcfs_debug_mb = 0;
-CFS_MODULE_PARM(libcfs_debug_mb, "i", uint, 0644,
- "Total debug buffer size.");
+module_param(libcfs_debug_mb, uint, 0644);
+MODULE_PARM_DESC(libcfs_debug_mb, "Total debug buffer size.");
EXPORT_SYMBOL(libcfs_debug_mb);
unsigned int libcfs_printk = D_CANTMASK;
-CFS_MODULE_PARM(libcfs_printk, "i", uint, 0644,
- "Lustre kernel debug console mask");
+module_param(libcfs_printk, uint, 0644);
+MODULE_PARM_DESC(libcfs_printk, "Lustre kernel debug console mask");
EXPORT_SYMBOL(libcfs_printk);
unsigned int libcfs_console_ratelimit = 1;
-CFS_MODULE_PARM(libcfs_console_ratelimit, "i", uint, 0644,
- "Lustre kernel debug console ratelimit (0 to disable)");
+module_param(libcfs_console_ratelimit, uint, 0644);
+MODULE_PARM_DESC(libcfs_console_ratelimit, "Lustre kernel debug console ratelimit (0 to disable)");
EXPORT_SYMBOL(libcfs_console_ratelimit);
unsigned int libcfs_console_max_delay;
-CFS_MODULE_PARM(libcfs_console_max_delay, "l", uint, 0644,
- "Lustre kernel debug console max delay (jiffies)");
+module_param(libcfs_console_max_delay, uint, 0644);
+MODULE_PARM_DESC(libcfs_console_max_delay, "Lustre kernel debug console max delay (jiffies)");
EXPORT_SYMBOL(libcfs_console_max_delay);
unsigned int libcfs_console_min_delay;
-CFS_MODULE_PARM(libcfs_console_min_delay, "l", uint, 0644,
- "Lustre kernel debug console min delay (jiffies)");
+module_param(libcfs_console_min_delay, uint, 0644);
+MODULE_PARM_DESC(libcfs_console_min_delay, "Lustre kernel debug console min delay (jiffies)");
EXPORT_SYMBOL(libcfs_console_min_delay);
unsigned int libcfs_console_backoff = CDEBUG_DEFAULT_BACKOFF;
-CFS_MODULE_PARM(libcfs_console_backoff, "i", uint, 0644,
- "Lustre kernel debug console backoff factor");
+module_param(libcfs_console_backoff, uint, 0644);
+MODULE_PARM_DESC(libcfs_console_backoff, "Lustre kernel debug console backoff factor");
EXPORT_SYMBOL(libcfs_console_backoff);
unsigned int libcfs_debug_binary = 1;
@@ -103,8 +103,8 @@ unsigned int libcfs_watchdog_ratelimit = 300;
EXPORT_SYMBOL(libcfs_watchdog_ratelimit);
unsigned int libcfs_panic_on_lbug = 1;
-CFS_MODULE_PARM(libcfs_panic_on_lbug, "i", uint, 0644,
- "Lustre kernel panic on LBUG");
+module_param(libcfs_panic_on_lbug, uint, 0644);
+MODULE_PARM_DESC(libcfs_panic_on_lbug, "Lustre kernel panic on LBUG");
EXPORT_SYMBOL(libcfs_panic_on_lbug);
atomic_t libcfs_kmemory = ATOMIC_INIT(0);
@@ -116,9 +116,9 @@ char libcfs_debug_file_path_arr[PATH_MAX] = LIBCFS_DEBUG_FILE_PATH_DEFAULT;
/* We need to pass a pointer here, but elsewhere this must be a const */
char *libcfs_debug_file_path;
-CFS_MODULE_PARM(libcfs_debug_file_path, "s", charp, 0644,
- "Path for dumping debug logs, "
- "set 'NONE' to prevent log dumping");
+module_param(libcfs_debug_file_path, charp, 0644);
+MODULE_PARM_DESC(libcfs_debug_file_path,
+ "Path for dumping debug logs, set 'NONE' to prevent log dumping");
int libcfs_panic_in_progress;
diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c
index e3e0578b27f9..6d2b455d1be4 100644
--- a/drivers/staging/lustre/lustre/libcfs/hash.c
+++ b/drivers/staging/lustre/lustre/libcfs/hash.c
@@ -51,11 +51,11 @@
* - move all stuff to libcfs
* - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
* - ignore hs_rwlock if without CFS_HASH_REHASH setting
- * - buckets are allocated one by one(intead of contiguous memory),
+ * - buckets are allocated one by one(instead of contiguous memory),
* to avoid unnecessary cacheline conflict
*
* 2010-03-01: Liang Zhen <zhen.liang@sun.com>
- * - "bucket" is a group of hlist_head now, user can speicify bucket size
+ * - "bucket" is a group of hlist_head now, user can specify bucket size
* by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
* one lock for reducing memory overhead.
*
@@ -112,8 +112,8 @@
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
static unsigned int warn_on_depth = 8;
-CFS_MODULE_PARM(warn_on_depth, "i", uint, 0644,
- "warning when hash depth is high.");
+module_param(warn_on_depth, uint, 0644);
+MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
#endif
struct cfs_wi_sched *cfs_sched_rehash;
@@ -1386,7 +1386,7 @@ cfs_hash_for_each_enter(struct cfs_hash *hs)
/*
* NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
* because it's just an unreliable signal to rehash-thread,
- * rehash-thread will try to finsih rehash ASAP when seeing this.
+ * rehash-thread will try to finish rehash ASAP when seeing this.
*/
hs->hs_iterating = 1;
@@ -1394,7 +1394,7 @@ cfs_hash_for_each_enter(struct cfs_hash *hs)
hs->hs_iterators++;
/* NB: iteration is mostly called by service thread,
- * we tend to cancel pending rehash-requst, instead of
+ * we tend to cancel pending rehash-request, instead of
* blocking service thread, we will relaunch rehash request
* after iteration */
if (cfs_hash_is_rehashing(hs))
diff --git a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c b/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
index 74a0db5c154a..7b2c31599327 100644
--- a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
+++ b/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
@@ -193,7 +193,7 @@ EXPORT_SYMBOL(libcfs_kkuc_msg_put);
/* Broadcast groups are global across all mounted filesystems;
* i.e. registering for a group on 1 fs will get messages for that
* group from any fs */
-/** A single group reigstration has a uid and a file pointer */
+/** A single group registration has a uid and a file pointer */
struct kkuc_reg {
struct list_head kr_chain;
int kr_uid;
@@ -206,7 +206,7 @@ static DECLARE_RWSEM(kg_sem);
/** Add a receiver to a broadcast group
* @param filp pipe to write into
- * @param uid identidier for this receiver
+ * @param uid identifier for this receiver
* @param group group number
*/
int libcfs_kkuc_group_add(struct file *filp, int uid, int group, __u32 data)
@@ -330,9 +330,8 @@ int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
down_read(&kg_sem);
list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
- if (reg->kr_fp != NULL) {
+ if (reg->kr_fp != NULL)
rc = cb_func(reg->kr_data, cb_arg);
- }
}
up_read(&kg_sem);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
index 00ab8fdc1053..58bb256ee047 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
@@ -47,7 +47,8 @@
* >1 : specify number of partitions
*/
static int cpu_npartitions;
-CFS_MODULE_PARM(cpu_npartitions, "i", int, 0444, "# of CPU partitions");
+module_param(cpu_npartitions, int, 0444);
+MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions");
/**
* modparam for setting CPU partitions patterns:
@@ -61,7 +62,8 @@ CFS_MODULE_PARM(cpu_npartitions, "i", int, 0444, "# of CPU partitions");
* NB: If user specified cpu_pattern, cpu_npartitions will be ignored
*/
static char *cpu_pattern = "";
-CFS_MODULE_PARM(cpu_pattern, "s", charp, 0444, "CPU partitions pattern");
+module_param(cpu_pattern, charp, 0444);
+MODULE_PARM_DESC(cpu_pattern, "CPU partitions pattern");
struct cfs_cpt_data {
/* serialize hotplug etc */
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
index 0bf8e5d87f1a..a2ef64c3403d 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
@@ -140,18 +140,6 @@ int cfs_capable(cfs_cap_t cap)
return capable(cfs_cap_unpack(cap));
}
-/* Check if task is running in 32-bit API mode, for the purpose of
- * userspace binary interfaces. On 32-bit Linux this is (unfortunately)
- * always true, even if the application is using LARGEFILE64 and 64-bit
- * APIs, because Linux provides no way for the filesystem to know if it
- * is called via 32-bit or 64-bit APIs. Other clients may vary. On
- * 64-bit systems, this will only be true if the binary is calling a
- * 32-bit system call. */
-int current_is_32bit(void)
-{
- return is_compat_task();
-}
-
static int cfs_access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, int write)
{
@@ -311,7 +299,6 @@ EXPORT_SYMBOL(cfs_cap_raised);
EXPORT_SYMBOL(cfs_curproc_cap_pack);
EXPORT_SYMBOL(cfs_curproc_cap_unpack);
EXPORT_SYMBOL(cfs_capable);
-EXPORT_SYMBOL(current_is_32bit);
/*
* Local variables:
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
index cc9829ffbdcb..c7bc7fcccb8e 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
@@ -46,13 +46,10 @@
#include <asm/kgdb.h>
#endif
-#define LINUX_WAITQ(w) ((wait_queue_t *) w)
-#define LINUX_WAITQ_HEAD(w) ((wait_queue_head_t *) w)
-
void
init_waitqueue_entry_current(wait_queue_t *link)
{
- init_waitqueue_entry(LINUX_WAITQ(link), current);
+ init_waitqueue_entry(link, current);
}
EXPORT_SYMBOL(init_waitqueue_entry_current);
@@ -74,9 +71,9 @@ add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link)
{
unsigned long flags;
- spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
- __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
- spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
+ spin_lock_irqsave(&waitq->lock, flags);
+ __add_wait_queue_exclusive(waitq, link);
+ spin_unlock_irqrestore(&waitq->lock, flags);
}
EXPORT_SYMBOL(add_wait_queue_exclusive_head);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
index fc6c97749487..e947b9128c58 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
@@ -65,9 +65,7 @@
#include <asm/div64.h>
#include "tracefile.h"
-#ifdef CONFIG_SYSCTL
static ctl_table_header_t *lnet_table_header = NULL;
-#endif
extern char lnet_upcall[1024];
/**
* The path of debug log dump upcall script.
@@ -371,7 +369,6 @@ static ctl_table_t lnet_table[] = {
* to go via /proc for portability.
*/
{
- INIT_CTL_NAME(PSDEV_DEBUG)
.procname = "debug",
.data = &libcfs_debug,
.maxlen = sizeof(int),
@@ -379,7 +376,6 @@ static ctl_table_t lnet_table[] = {
.proc_handler = &proc_dobitmasks,
},
{
- INIT_CTL_NAME(PSDEV_SUBSYSTEM_DEBUG)
.procname = "subsystem_debug",
.data = &libcfs_subsystem_debug,
.maxlen = sizeof(int),
@@ -387,7 +383,6 @@ static ctl_table_t lnet_table[] = {
.proc_handler = &proc_dobitmasks,
},
{
- INIT_CTL_NAME(PSDEV_PRINTK)
.procname = "printk",
.data = &libcfs_printk,
.maxlen = sizeof(int),
@@ -395,7 +390,6 @@ static ctl_table_t lnet_table[] = {
.proc_handler = &proc_dobitmasks,
},
{
- INIT_CTL_NAME(PSDEV_CONSOLE_RATELIMIT)
.procname = "console_ratelimit",
.data = &libcfs_console_ratelimit,
.maxlen = sizeof(int),
@@ -403,21 +397,18 @@ static ctl_table_t lnet_table[] = {
.proc_handler = &proc_dointvec
},
{
- INIT_CTL_NAME(PSDEV_CONSOLE_MAX_DELAY_CS)
.procname = "console_max_delay_centisecs",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_console_max_delay_cs
},
{
- INIT_CTL_NAME(PSDEV_CONSOLE_MIN_DELAY_CS)
.procname = "console_min_delay_centisecs",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_console_min_delay_cs
},
{
- INIT_CTL_NAME(PSDEV_CONSOLE_BACKOFF)
.procname = "console_backoff",
.maxlen = sizeof(int),
.mode = 0644,
@@ -425,7 +416,6 @@ static ctl_table_t lnet_table[] = {
},
{
- INIT_CTL_NAME(PSDEV_DEBUG_PATH)
.procname = "debug_path",
.data = libcfs_debug_file_path_arr,
.maxlen = sizeof(libcfs_debug_file_path_arr),
@@ -434,7 +424,6 @@ static ctl_table_t lnet_table[] = {
},
{
- INIT_CTL_NAME(PSDEV_CPT_TABLE)
.procname = "cpu_partition_table",
.maxlen = 128,
.mode = 0444,
@@ -442,7 +431,6 @@ static ctl_table_t lnet_table[] = {
},
{
- INIT_CTL_NAME(PSDEV_LNET_UPCALL)
.procname = "upcall",
.data = lnet_upcall,
.maxlen = sizeof(lnet_upcall),
@@ -450,7 +438,6 @@ static ctl_table_t lnet_table[] = {
.proc_handler = &proc_dostring,
},
{
- INIT_CTL_NAME(PSDEV_LNET_DEBUG_LOG_UPCALL)
.procname = "debug_log_upcall",
.data = lnet_debug_log_upcall,
.maxlen = sizeof(lnet_debug_log_upcall),
@@ -458,54 +445,44 @@ static ctl_table_t lnet_table[] = {
.proc_handler = &proc_dostring,
},
{
- INIT_CTL_NAME(PSDEV_LNET_MEMUSED)
.procname = "lnet_memused",
.data = (int *)&libcfs_kmemory.counter,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = &proc_dointvec,
- INIT_STRATEGY(&sysctl_intvec)
},
{
- INIT_CTL_NAME(PSDEV_LNET_CATASTROPHE)
.procname = "catastrophe",
.data = &libcfs_catastrophe,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = &proc_dointvec,
- INIT_STRATEGY(&sysctl_intvec)
},
{
- INIT_CTL_NAME(PSDEV_LNET_PANIC_ON_LBUG)
.procname = "panic_on_lbug",
.data = &libcfs_panic_on_lbug,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
- INIT_STRATEGY(&sysctl_intvec)
},
{
- INIT_CTL_NAME(PSDEV_LNET_DUMP_KERNEL)
.procname = "dump_kernel",
.maxlen = 256,
.mode = 0200,
.proc_handler = &proc_dump_kernel,
},
{
- INIT_CTL_NAME(PSDEV_LNET_DAEMON_FILE)
.procname = "daemon_file",
.mode = 0644,
.maxlen = 256,
.proc_handler = &proc_daemon_file,
},
{
- INIT_CTL_NAME(PSDEV_LNET_DEBUG_MB)
.procname = "debug_mb",
.mode = 0644,
.proc_handler = &proc_debug_mb,
},
{
- INIT_CTL_NAME(PSDEV_LNET_WATCHDOG_RATELIMIT)
.procname = "watchdog_ratelimit",
.data = &libcfs_watchdog_ratelimit,
.maxlen = sizeof(int),
@@ -514,7 +491,7 @@ static ctl_table_t lnet_table[] = {
.extra1 = &min_watchdog_ratelimit,
.extra2 = &max_watchdog_ratelimit,
},
- { INIT_CTL_NAME(PSDEV_LNET_FORCE_LBUG)
+ {
.procname = "force_lbug",
.data = NULL,
.maxlen = 0,
@@ -522,7 +499,6 @@ static ctl_table_t lnet_table[] = {
.proc_handler = &libcfs_force_lbug
},
{
- INIT_CTL_NAME(PSDEV_LNET_FAIL_LOC)
.procname = "fail_loc",
.data = &cfs_fail_loc,
.maxlen = sizeof(cfs_fail_loc),
@@ -530,7 +506,6 @@ static ctl_table_t lnet_table[] = {
.proc_handler = &proc_fail_loc
},
{
- INIT_CTL_NAME(PSDEV_LNET_FAIL_VAL)
.procname = "fail_val",
.data = &cfs_fail_val,
.maxlen = sizeof(int),
@@ -538,14 +513,11 @@ static ctl_table_t lnet_table[] = {
.proc_handler = &proc_dointvec
},
{
- INIT_CTL_NAME(0)
}
};
-#ifdef CONFIG_SYSCTL
static ctl_table_t top_table[] = {
{
- INIT_CTL_NAME(CTL_LNET)
.procname = "lnet",
.mode = 0555,
.data = NULL,
@@ -553,26 +525,20 @@ static ctl_table_t top_table[] = {
.child = lnet_table,
},
{
- INIT_CTL_NAME(0)
}
};
-#endif
int insert_proc(void)
{
-#ifdef CONFIG_SYSCTL
if (lnet_table_header == NULL)
lnet_table_header = register_sysctl_table(top_table);
-#endif
return 0;
}
void remove_proc(void)
{
-#ifdef CONFIG_SYSCTL
if (lnet_table_header != NULL)
unregister_sysctl_table(lnet_table_header);
lnet_table_header = NULL;
-#endif
}
diff --git a/drivers/staging/lustre/lustre/libcfs/lwt.c b/drivers/staging/lustre/lustre/libcfs/lwt.c
deleted file mode 100644
index b631f7dde8e7..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/lwt.c
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/libcfs/lwt.c
- *
- * Author: Eric Barton <eeb@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/libcfs/libcfs.h>
-
-#if LWT_SUPPORT
-
-#if !KLWT_SUPPORT
-int lwt_enabled;
-lwt_cpu_t lwt_cpus[NR_CPUS];
-#endif
-
-int lwt_pages_per_cpu;
-
-/* NB only root is allowed to retrieve LWT info; it's an open door into the
- * kernel... */
-
-int
-lwt_lookup_string (int *size, char *knl_ptr,
- char *user_ptr, int user_size)
-{
- int maxsize = 128;
-
- /* knl_ptr was retrieved from an LWT snapshot and the caller wants to
- * turn it into a string. NB we can crash with an access violation
- * trying to determine the string length, so we're trusting our
- * caller... */
-
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
- return (-EPERM);
-
- if (user_size > 0 &&
- maxsize > user_size)
- maxsize = user_size;
-
- *size = strnlen (knl_ptr, maxsize - 1) + 1;
-
- if (user_ptr != NULL) {
- if (user_size < 4)
- return (-EINVAL);
-
- if (copy_to_user (user_ptr, knl_ptr, *size))
- return (-EFAULT);
-
- /* Did I truncate the string? */
- if (knl_ptr[*size - 1] != 0)
- copy_to_user (user_ptr + *size - 4, "...", 4);
- }
-
- return (0);
-}
-
-int
-lwt_control (int enable, int clear)
-{
- lwt_page_t *p;
- int i;
- int j;
-
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
- return (-EPERM);
-
- if (!enable) {
- LWT_EVENT(0,0,0,0);
- lwt_enabled = 0;
- mb();
- /* give people some time to stop adding traces */
- schedule_timeout(10);
- }
-
- for (i = 0; i < num_online_cpus(); i++) {
- p = lwt_cpus[i].lwtc_current_page;
-
- if (p == NULL)
- return (-ENODATA);
-
- if (!clear)
- continue;
-
- for (j = 0; j < lwt_pages_per_cpu; j++) {
- memset (p->lwtp_events, 0, PAGE_CACHE_SIZE);
-
- p = list_entry (p->lwtp_list.next,
- lwt_page_t, lwtp_list);
- }
- }
-
- if (enable) {
- lwt_enabled = 1;
- mb();
- LWT_EVENT(0,0,0,0);
- }
-
- return (0);
-}
-
-int
-lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size,
- void *user_ptr, int user_size)
-{
- const int events_per_page = PAGE_CACHE_SIZE / sizeof(lwt_event_t);
- const int bytes_per_page = events_per_page * sizeof(lwt_event_t);
- lwt_page_t *p;
- int i;
- int j;
-
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
- return (-EPERM);
-
- *ncpu = num_online_cpus();
- *total_size = num_online_cpus() * lwt_pages_per_cpu *
- bytes_per_page;
- *now = get_cycles();
-
- if (user_ptr == NULL)
- return (0);
-
- for (i = 0; i < num_online_cpus(); i++) {
- p = lwt_cpus[i].lwtc_current_page;
-
- if (p == NULL)
- return (-ENODATA);
-
- for (j = 0; j < lwt_pages_per_cpu; j++) {
- if (copy_to_user(user_ptr, p->lwtp_events,
- bytes_per_page))
- return (-EFAULT);
-
- user_ptr = ((char *)user_ptr) + bytes_per_page;
- p = list_entry(p->lwtp_list.next,
- lwt_page_t, lwtp_list);
- }
- }
-
- return (0);
-}
-
-int
-lwt_init ()
-{
- int i;
- int j;
-
- for (i = 0; i < num_online_cpus(); i++)
- if (lwt_cpus[i].lwtc_current_page != NULL)
- return (-EALREADY);
-
- LASSERT (!lwt_enabled);
-
- /* NULL pointers, zero scalars */
- memset (lwt_cpus, 0, sizeof (lwt_cpus));
- lwt_pages_per_cpu =
- LWT_MEMORY / (num_online_cpus() * PAGE_CACHE_SIZE);
-
- for (i = 0; i < num_online_cpus(); i++)
- for (j = 0; j < lwt_pages_per_cpu; j++) {
- struct page *page = alloc_page (GFP_KERNEL);
- lwt_page_t *lwtp;
-
- if (page == NULL) {
- CERROR ("Can't allocate page\n");
- lwt_fini ();
- return (-ENOMEM);
- }
-
- LIBCFS_ALLOC(lwtp, sizeof (*lwtp));
- if (lwtp == NULL) {
- CERROR ("Can't allocate lwtp\n");
- __free_page(page);
- lwt_fini ();
- return (-ENOMEM);
- }
-
- lwtp->lwtp_page = page;
- lwtp->lwtp_events = page_address(page);
- memset (lwtp->lwtp_events, 0, PAGE_CACHE_SIZE);
-
- if (j == 0) {
- INIT_LIST_HEAD (&lwtp->lwtp_list);
- lwt_cpus[i].lwtc_current_page = lwtp;
- } else {
- list_add (&lwtp->lwtp_list,
- &lwt_cpus[i].lwtc_current_page->lwtp_list);
- }
- }
-
- lwt_enabled = 1;
- mb();
-
- LWT_EVENT(0,0,0,0);
-
- return (0);
-}
-
-void
-lwt_fini ()
-{
- int i;
-
- lwt_control(0, 0);
-
- for (i = 0; i < num_online_cpus(); i++)
- while (lwt_cpus[i].lwtc_current_page != NULL) {
- lwt_page_t *lwtp = lwt_cpus[i].lwtc_current_page;
-
- if (list_empty (&lwtp->lwtp_list)) {
- lwt_cpus[i].lwtc_current_page = NULL;
- } else {
- lwt_cpus[i].lwtc_current_page =
- list_entry (lwtp->lwtp_list.next,
- lwt_page_t, lwtp_list);
-
- list_del (&lwtp->lwtp_list);
- }
-
- __free_page (lwtp->lwtp_page);
- LIBCFS_FREE (lwtp, sizeof (*lwtp));
- }
-}
-
-EXPORT_SYMBOL(lwt_enabled);
-EXPORT_SYMBOL(lwt_cpus);
-
-EXPORT_SYMBOL(lwt_init);
-EXPORT_SYMBOL(lwt_fini);
-EXPORT_SYMBOL(lwt_lookup_string);
-EXPORT_SYMBOL(lwt_control);
-EXPORT_SYMBOL(lwt_snapshot);
-#endif
diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
index f3108c7f818e..24ae26d5def3 100644
--- a/drivers/staging/lustre/lustre/libcfs/module.c
+++ b/drivers/staging/lustre/lustre/libcfs/module.c
@@ -235,41 +235,6 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
return -EINVAL;
libcfs_debug_mark_buffer(data->ioc_inlbuf1);
return 0;
-#if LWT_SUPPORT
- case IOC_LIBCFS_LWT_CONTROL:
- err = lwt_control ((data->ioc_flags & 1) != 0,
- (data->ioc_flags & 2) != 0);
- break;
-
- case IOC_LIBCFS_LWT_SNAPSHOT: {
- cfs_cycles_t now;
- int ncpu;
- int total_size;
-
- err = lwt_snapshot (&now, &ncpu, &total_size,
- data->ioc_pbuf1, data->ioc_plen1);
- data->ioc_u64[0] = now;
- data->ioc_u32[0] = ncpu;
- data->ioc_u32[1] = total_size;
-
- /* Hedge against broken user/kernel typedefs (e.g. cycles_t) */
- data->ioc_u32[2] = sizeof(lwt_event_t);
- data->ioc_u32[3] = offsetof(lwt_event_t, lwte_where);
-
- if (err == 0 &&
- libcfs_ioctl_popdata(arg, data, sizeof (*data)))
- err = -EFAULT;
- break;
- }
-
- case IOC_LIBCFS_LWT_LOOKUP_STRING:
- err = lwt_lookup_string (&data->ioc_count, data->ioc_pbuf1,
- data->ioc_pbuf2, data->ioc_plen2);
- if (err == 0 &&
- libcfs_ioctl_popdata(arg, data, sizeof (*data)))
- err = -EFAULT;
- break;
-#endif
case IOC_LIBCFS_MEMHOG:
if (pfile->private_data == NULL) {
err = -EINVAL;
@@ -392,17 +357,10 @@ static int init_libcfs_module(void)
if (rc != 0)
goto cleanup_debug;
-#if LWT_SUPPORT
- rc = lwt_init();
- if (rc != 0) {
- CERROR("lwt_init: error %d\n", rc);
- goto cleanup_debug;
- }
-#endif
rc = misc_register(&libcfs_dev);
if (rc) {
CERROR("misc_register: error %d\n", rc);
- goto cleanup_lwt;
+ goto cleanup_cpu;
}
rc = cfs_wi_startup();
@@ -422,7 +380,7 @@ static int init_libcfs_module(void)
rc = cfs_crypto_register();
if (rc) {
- CERROR("cfs_crypto_regster: error %d\n", rc);
+ CERROR("cfs_crypto_register: error %d\n", rc);
goto cleanup_wi;
}
@@ -441,10 +399,8 @@ static int init_libcfs_module(void)
cfs_wi_shutdown();
cleanup_deregister:
misc_deregister(&libcfs_dev);
- cleanup_lwt:
-#if LWT_SUPPORT
- lwt_fini();
-#endif
+cleanup_cpu:
+ cfs_cpu_fini();
cleanup_debug:
libcfs_debug_cleanup();
return rc;
@@ -471,9 +427,6 @@ static void exit_libcfs_module(void)
if (rc)
CERROR("misc_deregister error %d\n", rc);
-#if LWT_SUPPORT
- lwt_fini();
-#endif
cfs_cpu_fini();
if (atomic_read(&libcfs_kmemory) != 0)
diff --git a/drivers/staging/lustre/lustre/libcfs/nidstrings.c b/drivers/staging/lustre/lustre/libcfs/nidstrings.c
index 99c9e9d2493f..732ae5540bf4 100644
--- a/drivers/staging/lustre/lustre/libcfs/nidstrings.c
+++ b/drivers/staging/lustre/lustre/libcfs/nidstrings.c
@@ -56,11 +56,11 @@
*/
static char libcfs_nidstrings[LNET_NIDSTR_COUNT][LNET_NIDSTR_SIZE];
-static int libcfs_nidstring_idx = 0;
+static int libcfs_nidstring_idx;
static spinlock_t libcfs_nidstring_lock;
-void libcfs_init_nidstrings (void)
+void libcfs_init_nidstrings(void)
{
spin_lock_init(&libcfs_nidstring_lock);
}
@@ -69,7 +69,7 @@ void libcfs_init_nidstrings (void)
# define NIDSTR_UNLOCK(f) spin_unlock_irqrestore(&libcfs_nidstring_lock, f)
static char *
-libcfs_next_nidstring (void)
+libcfs_next_nidstring(void)
{
char *str;
unsigned long flags;
@@ -326,6 +326,7 @@ libcfs_isknown_lnd(int type)
{
return libcfs_lnd2netstrfns(type) != NULL;
}
+EXPORT_SYMBOL(libcfs_isknown_lnd);
char *
libcfs_lnd2modname(int lnd)
@@ -334,6 +335,7 @@ libcfs_lnd2modname(int lnd)
return (nf == NULL) ? NULL : nf->nf_modname;
}
+EXPORT_SYMBOL(libcfs_lnd2modname);
char *
libcfs_lnd2str(int lnd)
@@ -348,6 +350,7 @@ libcfs_lnd2str(int lnd)
snprintf(str, LNET_NIDSTR_SIZE, "?%u?", lnd);
return str;
}
+EXPORT_SYMBOL(libcfs_lnd2str);
int
libcfs_str2lnd(const char *str)
@@ -359,6 +362,7 @@ libcfs_str2lnd(const char *str)
return -1;
}
+EXPORT_SYMBOL(libcfs_str2lnd);
char *
libcfs_net2str(__u32 net)
@@ -377,6 +381,7 @@ libcfs_net2str(__u32 net)
return str;
}
+EXPORT_SYMBOL(libcfs_net2str);
char *
libcfs_nid2str(lnet_nid_t nid)
@@ -410,6 +415,7 @@ libcfs_nid2str(lnet_nid_t nid)
return str;
}
+EXPORT_SYMBOL(libcfs_nid2str);
static struct netstrfns *
libcfs_str2net_internal(const char *str, __u32 *net)
@@ -458,6 +464,7 @@ libcfs_str2net(const char *str)
return LNET_NIDNET(LNET_NID_ANY);
}
+EXPORT_SYMBOL(libcfs_str2net);
lnet_nid_t
libcfs_str2nid(const char *str)
@@ -475,7 +482,7 @@ libcfs_str2nid(const char *str)
sep = str + strlen(str);
net = LNET_MKNET(SOCKLND, 0);
nf = libcfs_lnd2netstrfns(SOCKLND);
- LASSERT (nf != NULL);
+ LASSERT(nf != NULL);
}
if (!nf->nf_str2addr(str, (int)(sep - str), &addr))
@@ -483,6 +490,7 @@ libcfs_str2nid(const char *str)
return LNET_MKNID(net, addr);
}
+EXPORT_SYMBOL(libcfs_str2nid);
char *
libcfs_id2str(lnet_process_id_t id)
@@ -500,6 +508,7 @@ libcfs_id2str(lnet_process_id_t id)
(id.pid & ~LNET_PID_USERFLAG), libcfs_nid2str(id.nid));
return str;
}
+EXPORT_SYMBOL(libcfs_id2str);
int
libcfs_str2anynid(lnet_nid_t *nidp, const char *str)
@@ -512,6 +521,7 @@ libcfs_str2anynid(lnet_nid_t *nidp, const char *str)
*nidp = libcfs_str2nid(str);
return *nidp != LNET_NID_ANY;
}
+EXPORT_SYMBOL(libcfs_str2anynid);
/**
* Nid range list syntax.
@@ -765,6 +775,7 @@ cfs_free_nidlist(struct list_head *list)
LIBCFS_FREE(nr, sizeof(struct nidrange));
}
}
+EXPORT_SYMBOL(cfs_free_nidlist);
/**
* Parses nid range list.
@@ -803,6 +814,7 @@ cfs_parse_nidlist(char *str, int len, struct list_head *nidlist)
}
return 1;
}
+EXPORT_SYMBOL(cfs_parse_nidlist);
/*
* Nf_match_addr method for networks using numeric addresses
@@ -848,18 +860,4 @@ int cfs_match_nid(lnet_nid_t nid, struct list_head *nidlist)
}
return 0;
}
-
-
-EXPORT_SYMBOL(libcfs_isknown_lnd);
-EXPORT_SYMBOL(libcfs_lnd2modname);
-EXPORT_SYMBOL(libcfs_lnd2str);
-EXPORT_SYMBOL(libcfs_str2lnd);
-EXPORT_SYMBOL(libcfs_net2str);
-EXPORT_SYMBOL(libcfs_nid2str);
-EXPORT_SYMBOL(libcfs_str2net);
-EXPORT_SYMBOL(libcfs_str2nid);
-EXPORT_SYMBOL(libcfs_id2str);
-EXPORT_SYMBOL(libcfs_str2anynid);
-EXPORT_SYMBOL(cfs_free_nidlist);
-EXPORT_SYMBOL(cfs_parse_nidlist);
EXPORT_SYMBOL(cfs_match_nid);
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lustre/libcfs/tracefile.c
index f71a3cc63ad8..54290ce6bb43 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.c
@@ -678,6 +678,7 @@ int cfs_tracefile_dump_all_pages(char *filename)
struct file *filp;
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
+ char *buf;
int rc;
DECL_MMSPACE;
@@ -708,8 +709,11 @@ int cfs_tracefile_dump_all_pages(char *filename)
__LASSERT_TAGE_INVARIANT(tage);
- rc = filp_write(filp, page_address(tage->page),
- tage->used, filp_poff(filp));
+ buf = kmap(tage->page);
+ rc = vfs_write(filp, (__force const char __user *)buf,
+ tage->used, &filp->f_pos);
+ kunmap(tage->page);
+
if (rc != (int)tage->used) {
printk(KERN_WARNING "wanted to write %u but wrote "
"%d\n", tage->used, rc);
@@ -721,7 +725,7 @@ int cfs_tracefile_dump_all_pages(char *filename)
cfs_tage_free(tage);
}
MMSPACE_CLOSE;
- rc = filp_fsync(filp);
+ rc = vfs_fsync(filp, 1);
if (rc)
printk(KERN_ERR "sync returns %d\n", rc);
close:
@@ -971,6 +975,7 @@ static int tracefiled(void *arg)
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
struct file *filp;
+ char *buf;
int last_loop = 0;
int rc;
@@ -1020,11 +1025,14 @@ static int tracefiled(void *arg)
if (f_pos >= (off_t)cfs_tracefile_size)
f_pos = 0;
- else if (f_pos > (off_t)filp_size(filp))
- f_pos = filp_size(filp);
+ else if (f_pos > i_size_read(filp->f_dentry->d_inode))
+ f_pos = i_size_read(filp->f_dentry->d_inode);
+
+ buf = kmap(tage->page);
+ rc = vfs_write(filp, (__force const char __user *)buf,
+ tage->used, &f_pos);
+ kunmap(tage->page);
- rc = filp_write(filp, page_address(tage->page),
- tage->used, &f_pos);
if (rc != (int)tage->used) {
printk(KERN_WARNING "wanted to write %u "
"but wrote %d\n", tage->used, rc);
diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile
index f493e0740004..c76f3cfedab0 100644
--- a/drivers/staging/lustre/lustre/llite/Makefile
+++ b/drivers/staging/lustre/lustre/llite/Makefile
@@ -1,12 +1,13 @@
obj-$(CONFIG_LUSTRE_FS) += lustre.o
obj-$(CONFIG_LUSTRE_LLITE_LLOOP) += llite_lloop.o
lustre-y := dcache.o dir.o file.o llite_close.o llite_lib.o llite_nfs.o \
- rw.o lproc_llite.o namei.o symlink.o llite_mmap.o \
- xattr.o remote_perm.o llite_rmtacl.o llite_capa.o \
+ rw.o namei.o symlink.o llite_mmap.o \
+ xattr.o xattr_cache.o remote_perm.o llite_rmtacl.o llite_capa.o \
rw26.o super25.o statahead.o \
../lclient/glimpse.o ../lclient/lcommon_cl.o ../lclient/lcommon_misc.o \
vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o
+lustre-$(CONFIG_PROC_FS) += lproc_llite.o
llite_lloop-y := lloop.o
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index e7629be39739..cbd663ed030c 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -404,7 +404,6 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
struct inode *inode = de->d_inode;
struct ll_inode_info *lli = ll_i2info(inode);
struct obd_client_handle **och_p;
- __u64 *och_usecount;
__u64 ibits;
/*
@@ -418,37 +417,32 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
*/
- if (it->it_flags & FMODE_WRITE) {
+ if (it->it_flags & FMODE_WRITE)
och_p = &lli->lli_mds_write_och;
- och_usecount = &lli->lli_open_fd_write_count;
- } else if (it->it_flags & FMODE_EXEC) {
+ else if (it->it_flags & FMODE_EXEC)
och_p = &lli->lli_mds_exec_och;
- och_usecount = &lli->lli_open_fd_exec_count;
- } else {
+ else
och_p = &lli->lli_mds_read_och;
- och_usecount = &lli->lli_open_fd_read_count;
- }
+
/* Check for the proper lock. */
ibits = MDS_INODELOCK_LOOKUP;
if (!ll_have_md_lock(inode, &ibits, LCK_MINMODE))
goto do_lock;
mutex_lock(&lli->lli_och_mutex);
if (*och_p) { /* Everything is open already, do nothing */
- /*(*och_usecount)++; Do not let them steal our open
- handle from under us */
- SET_BUT_UNUSED(och_usecount);
- /* XXX The code above was my original idea, but in case
- we have the handle, but we cannot use it due to later
- checks (e.g. O_CREAT|O_EXCL flags set), nobody
- would decrement counter increased here. So we just
- hope the lock won't be invalidated in between. But
- if it would be, we'll reopen the open request to
- MDS later during file open path */
+ /* Originally it was idea to do not let them steal our
+ * open handle from under us by (*och_usecount)++ here.
+ * But in case we have the handle, but we cannot use it
+ * due to later checks (e.g. O_CREAT|O_EXCL flags set),
+ * nobody would decrement counter increased here. So we
+ * just hope the lock won't be invalidated in between.
+ * But if it would be, we'll reopen the open request to
+ * MDS later during file open path.
+ */
mutex_unlock(&lli->lli_och_mutex);
return 1;
- } else {
- mutex_unlock(&lli->lli_och_mutex);
}
+ mutex_unlock(&lli->lli_och_mutex);
}
if (it->it_op == IT_GETATTR) {
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 1f079034bd8f..52b7731bcc38 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -1086,7 +1086,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
break;
case Q_GETQUOTA:
if (((type == USRQUOTA &&
- uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
+ !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
(type == GRPQUOTA &&
!in_egroup_p(make_kgid(&init_user_ns, id)))) &&
(!cfs_capable(CFS_CAP_SYS_ADMIN) ||
@@ -1809,8 +1809,28 @@ out_rmdir:
return -EFAULT;
}
- rc = obd_iocontrol(cmd, ll_i2mdexp(inode), totalsize,
- hur, NULL);
+ if (hur->hur_request.hr_action == HUA_RELEASE) {
+ const struct lu_fid *fid;
+ struct inode *f;
+ int i;
+
+ for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
+ fid = &hur->hur_user_item[i].hui_fid;
+ f = search_inode_for_lustre(inode->i_sb, fid);
+ if (IS_ERR(f)) {
+ rc = PTR_ERR(f);
+ break;
+ }
+
+ rc = ll_hsm_release(f);
+ iput(f);
+ if (rc != 0)
+ break;
+ }
+ } else {
+ rc = obd_iocontrol(cmd, ll_i2mdexp(inode), totalsize,
+ hur, NULL);
+ }
OBD_FREE_LARGE(hur, totalsize);
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index fb85a58db058..c12821aedc2f 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -115,7 +115,8 @@ out:
static int ll_close_inode_openhandle(struct obd_export *md_exp,
struct inode *inode,
- struct obd_client_handle *och)
+ struct obd_client_handle *och,
+ const __u64 *data_version)
{
struct obd_export *exp = ll_i2mdexp(inode);
struct md_op_data *op_data;
@@ -139,6 +140,13 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
GOTO(out, rc = -ENOMEM); // XXX We leak openhandle and request here.
ll_prepare_close(inode, op_data, och);
+ if (data_version != NULL) {
+ /* Pass in data_version implies release. */
+ op_data->op_bias |= MDS_HSM_RELEASE;
+ op_data->op_data_version = *data_version;
+ op_data->op_lease_handle = och->och_lease_handle;
+ op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
+ }
epoch_close = (op_data->op_flags & MF_EPOCH_CLOSE);
rc = md_close(md_exp, op_data, och->och_mod, &req);
if (rc == -EAGAIN) {
@@ -167,14 +175,20 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
spin_unlock(&lli->lli_lock);
}
- ll_finish_md_op_data(op_data);
-
if (rc == 0) {
rc = ll_objects_destroy(req, inode);
if (rc)
CERROR("inode %lu ll_objects destroy: rc = %d\n",
inode->i_ino, rc);
}
+ if (rc == 0 && op_data->op_bias & MDS_HSM_RELEASE) {
+ struct mdt_body *body;
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ if (!(body->valid & OBD_MD_FLRELEASED))
+ rc = -EBUSY;
+ }
+
+ ll_finish_md_op_data(op_data);
out:
if (exp_connect_som(exp) && !epoch_close &&
@@ -224,7 +238,7 @@ int ll_md_real_close(struct inode *inode, int flags)
if (och) { /* There might be a race and somebody have freed this och
already */
rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
- inode, och);
+ inode, och, NULL);
}
return rc;
@@ -241,6 +255,24 @@ int ll_md_close(struct obd_export *md_exp, struct inode *inode,
if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
ll_put_grouplock(inode, file, fd->fd_grouplock.cg_gid);
+ if (fd->fd_lease_och != NULL) {
+ bool lease_broken;
+
+ /* Usually the lease is not released when the
+ * application crashed, we need to release here. */
+ rc = ll_lease_close(fd->fd_lease_och, inode, &lease_broken);
+ CDEBUG(rc ? D_ERROR : D_INODE, "Clean up lease "DFID" %d/%d\n",
+ PFID(&lli->lli_fid), rc, lease_broken);
+
+ fd->fd_lease_och = NULL;
+ }
+
+ if (fd->fd_och != NULL) {
+ rc = ll_close_inode_openhandle(md_exp, inode, fd->fd_och, NULL);
+ fd->fd_och = NULL;
+ GOTO(out, rc);
+ }
+
/* Let's see if we have good enough OPEN lock on the file and if
we can skip talking to MDS */
if (file->f_dentry->d_inode) { /* Can this ever be false? */
@@ -277,6 +309,7 @@ int ll_md_close(struct obd_export *md_exp, struct inode *inode,
file, file->f_dentry, file->f_dentry->d_name.name);
}
+out:
LUSTRE_FPRIVATE(file) = NULL;
ll_file_data_put(fd);
ll_capa_close(inode);
@@ -431,22 +464,18 @@ void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch)
}
}
-static int ll_och_fill(struct obd_export *md_exp, struct ll_inode_info *lli,
- struct lookup_intent *it, struct obd_client_handle *och)
+static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
+ struct obd_client_handle *och)
{
struct ptlrpc_request *req = it->d.lustre.it_data;
struct mdt_body *body;
- LASSERT(och);
-
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body != NULL); /* reply already checked out */
-
- memcpy(&och->och_fh, &body->handle, sizeof(body->handle));
+ och->och_fh = body->handle;
+ och->och_fid = body->fid1;
+ och->och_lease_handle.cookie = it->d.lustre.it_lock_handle;
och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
- och->och_fid = lli->lli_fid;
och->och_flags = it->it_flags;
- ll_ioepoch_open(lli, body->ioepoch);
return md_set_open_replay_data(md_exp, och, req);
}
@@ -466,20 +495,17 @@ int ll_local_open(struct file *file, struct lookup_intent *it,
struct mdt_body *body;
int rc;
- rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, lli, it, och);
- if (rc)
+ rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
+ if (rc != 0)
return rc;
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if ((it->it_flags & FMODE_WRITE) &&
- (body->valid & OBD_MD_FLSIZE))
- CDEBUG(D_INODE, "Epoch "LPU64" opened on "DFID"\n",
- lli->lli_ioepoch, PFID(&lli->lli_fid));
+ ll_ioepoch_open(lli, body->ioepoch);
}
LUSTRE_FPRIVATE(file) = fd;
ll_readahead_init(inode, &fd->fd_ras);
- fd->fd_omode = it->it_flags;
+ fd->fd_omode = it->it_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
return 0;
}
@@ -681,6 +707,198 @@ out_openerr:
return rc;
}
+static int ll_md_blocking_lease_ast(struct ldlm_lock *lock,
+ struct ldlm_lock_desc *desc, void *data, int flag)
+{
+ int rc;
+ struct lustre_handle lockh;
+
+ switch (flag) {
+ case LDLM_CB_BLOCKING:
+ ldlm_lock2handle(lock, &lockh);
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
+ if (rc < 0) {
+ CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
+ return rc;
+ }
+ break;
+ case LDLM_CB_CANCELING:
+ /* do nothing */
+ break;
+ }
+ return 0;
+}
+
+/**
+ * Acquire a lease and open the file.
+ */
+struct obd_client_handle *ll_lease_open(struct inode *inode, struct file *file,
+ fmode_t fmode, __u64 open_flags)
+{
+ struct lookup_intent it = { .it_op = IT_OPEN };
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct md_op_data *op_data;
+ struct ptlrpc_request *req;
+ struct lustre_handle old_handle = { 0 };
+ struct obd_client_handle *och = NULL;
+ int rc;
+ int rc2;
+
+ if (fmode != FMODE_WRITE && fmode != FMODE_READ)
+ return ERR_PTR(-EINVAL);
+
+ if (file != NULL) {
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct obd_client_handle **och_p;
+ __u64 *och_usecount;
+
+ if (!(fmode & file->f_mode) || (file->f_mode & FMODE_EXEC))
+ return ERR_PTR(-EPERM);
+
+ /* Get the openhandle of the file */
+ rc = -EBUSY;
+ mutex_lock(&lli->lli_och_mutex);
+ if (fd->fd_lease_och != NULL) {
+ mutex_unlock(&lli->lli_och_mutex);
+ return ERR_PTR(rc);
+ }
+
+ if (fd->fd_och == NULL) {
+ if (file->f_mode & FMODE_WRITE) {
+ LASSERT(lli->lli_mds_write_och != NULL);
+ och_p = &lli->lli_mds_write_och;
+ och_usecount = &lli->lli_open_fd_write_count;
+ } else {
+ LASSERT(lli->lli_mds_read_och != NULL);
+ och_p = &lli->lli_mds_read_och;
+ och_usecount = &lli->lli_open_fd_read_count;
+ }
+ if (*och_usecount == 1) {
+ fd->fd_och = *och_p;
+ *och_p = NULL;
+ *och_usecount = 0;
+ rc = 0;
+ }
+ }
+ mutex_unlock(&lli->lli_och_mutex);
+ if (rc < 0) /* more than 1 opener */
+ return ERR_PTR(rc);
+
+ LASSERT(fd->fd_och != NULL);
+ old_handle = fd->fd_och->och_fh;
+ }
+
+ OBD_ALLOC_PTR(och);
+ if (och == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ GOTO(out, rc = PTR_ERR(op_data));
+
+ /* To tell the MDT this openhandle is from the same owner */
+ op_data->op_handle = old_handle;
+
+ it.it_flags = fmode | open_flags;
+ it.it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID | MDS_OPEN_LEASE;
+ rc = md_intent_lock(sbi->ll_md_exp, op_data, NULL, 0, &it, 0, &req,
+ ll_md_blocking_lease_ast,
+ /* LDLM_FL_NO_LRU: To not put the lease lock into LRU list, otherwise
+ * it can be cancelled which may mislead applications that the lease is
+ * broken;
+ * LDLM_FL_EXCL: Set this flag so that it won't be matched by normal
+ * open in ll_md_blocking_ast(). Otherwise as ll_md_blocking_lease_ast
+ * doesn't deal with openhandle, so normal openhandle will be leaked. */
+ LDLM_FL_NO_LRU | LDLM_FL_EXCL);
+ ll_finish_md_op_data(op_data);
+ if (req != NULL) {
+ ptlrpc_req_finished(req);
+ it_clear_disposition(&it, DISP_ENQ_COMPLETE);
+ }
+ if (rc < 0)
+ GOTO(out_release_it, rc);
+
+ if (it_disposition(&it, DISP_LOOKUP_NEG))
+ GOTO(out_release_it, rc = -ENOENT);
+
+ rc = it_open_error(DISP_OPEN_OPEN, &it);
+ if (rc)
+ GOTO(out_release_it, rc);
+
+ LASSERT(it_disposition(&it, DISP_ENQ_OPEN_REF));
+ ll_och_fill(sbi->ll_md_exp, &it, och);
+
+ if (!it_disposition(&it, DISP_OPEN_LEASE)) /* old server? */
+ GOTO(out_close, rc = -EOPNOTSUPP);
+
+ /* already get lease, handle lease lock */
+ ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
+ if (it.d.lustre.it_lock_mode == 0 ||
+ it.d.lustre.it_lock_bits != MDS_INODELOCK_OPEN) {
+ /* open lock must return for lease */
+ CERROR(DFID "lease granted but no open lock, %d/%llu.\n",
+ PFID(ll_inode2fid(inode)), it.d.lustre.it_lock_mode,
+ it.d.lustre.it_lock_bits);
+ GOTO(out_close, rc = -EPROTO);
+ }
+
+ ll_intent_release(&it);
+ return och;
+
+out_close:
+ rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL);
+ if (rc2)
+ CERROR("Close openhandle returned %d\n", rc2);
+
+ /* cancel open lock */
+ if (it.d.lustre.it_lock_mode != 0) {
+ ldlm_lock_decref_and_cancel(&och->och_lease_handle,
+ it.d.lustre.it_lock_mode);
+ it.d.lustre.it_lock_mode = 0;
+ }
+out_release_it:
+ ll_intent_release(&it);
+out:
+ OBD_FREE_PTR(och);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL(ll_lease_open);
+
+/**
+ * Release lease and close the file.
+ * It will check if the lease has ever broken.
+ */
+int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
+ bool *lease_broken)
+{
+ struct ldlm_lock *lock;
+ bool cancelled = true;
+ int rc;
+
+ lock = ldlm_handle2lock(&och->och_lease_handle);
+ if (lock != NULL) {
+ lock_res_and_lock(lock);
+ cancelled = ldlm_is_cancel(lock);
+ unlock_res_and_lock(lock);
+ ldlm_lock_put(lock);
+ }
+
+ CDEBUG(D_INODE, "lease for "DFID" broken? %d\n",
+ PFID(&ll_i2info(inode)->lli_fid), cancelled);
+
+ if (!cancelled)
+ ldlm_cli_cancel(&och->och_lease_handle, 0);
+ if (lease_broken != NULL)
+ *lease_broken = cancelled;
+
+ rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och,
+ NULL);
+ return rc;
+}
+EXPORT_SYMBOL(ll_lease_close);
+
/* Fills the obdo with the attributes for the lsm */
static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
struct obd_capa *capa, struct obdo *obdo,
@@ -905,7 +1123,7 @@ out:
cl_io_fini(env, io);
/* If any bit been read/written (result != 0), we just return
* short read/write instead of restart io. */
- if (result == 0 && io->ci_need_restart) {
+ if ((result == 0 || result == -ENODATA) && io->ci_need_restart) {
CDEBUG(D_VFSTRACE, "Restart %s on %s from %lld, count:%zd\n",
iot == CIT_READ ? "read" : "write",
file->f_dentry->d_name.name, *ppos, count);
@@ -930,48 +1148,16 @@ out:
return result;
}
-
-/*
- * XXX: exact copy from kernel code (__generic_file_aio_write_nolock)
- */
-static int ll_file_get_iov_count(const struct iovec *iov,
- unsigned long *nr_segs, size_t *count)
-{
- size_t cnt = 0;
- unsigned long seg;
-
- for (seg = 0; seg < *nr_segs; seg++) {
- const struct iovec *iv = &iov[seg];
-
- /*
- * If any segment has a negative length, or the cumulative
- * length ever wraps negative then return -EINVAL.
- */
- cnt += iv->iov_len;
- if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
- return -EINVAL;
- if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
- continue;
- if (seg == 0)
- return -EFAULT;
- *nr_segs = seg;
- cnt -= iv->iov_len; /* This segment is no good */
- break;
- }
- *count = cnt;
- return 0;
-}
-
static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct lu_env *env;
struct vvp_io_args *args;
- size_t count;
+ size_t count = 0;
ssize_t result;
int refcheck;
- result = ll_file_get_iov_count(iov, &nr_segs, &count);
+ result = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (result)
return result;
@@ -1026,11 +1212,11 @@ static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
{
struct lu_env *env;
struct vvp_io_args *args;
- size_t count;
+ size_t count = 0;
ssize_t result;
int refcheck;
- result = ll_file_get_iov_count(iov, &nr_segs, &count);
+ result = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
if (result)
return result;
@@ -1482,12 +1668,11 @@ int ll_release_openhandle(struct dentry *dentry, struct lookup_intent *it)
if (!och)
GOTO(out, rc = -ENOMEM);
- ll_och_fill(ll_i2sbi(inode)->ll_md_exp,
- ll_i2info(inode), it, och);
+ ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
- inode, och);
- out:
+ inode, och, NULL);
+out:
/* this one is in place of ll_file_open */
if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
ptlrpc_req_finished(it->d.lustre.it_data);
@@ -1692,6 +1877,53 @@ out:
return rc;
}
+/*
+ * Trigger a HSM release request for the provided inode.
+ */
+int ll_hsm_release(struct inode *inode)
+{
+ struct cl_env_nest nest;
+ struct lu_env *env;
+ struct obd_client_handle *och = NULL;
+ __u64 data_version = 0;
+ int rc;
+
+
+ CDEBUG(D_INODE, "%s: Releasing file "DFID".\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(&ll_i2info(inode)->lli_fid));
+
+ och = ll_lease_open(inode, NULL, FMODE_WRITE, MDS_OPEN_RELEASE);
+ if (IS_ERR(och))
+ GOTO(out, rc = PTR_ERR(och));
+
+ /* Grab latest data_version and [am]time values */
+ rc = ll_data_version(inode, &data_version, 1);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ GOTO(out, rc = PTR_ERR(env));
+
+ ll_merge_lvb(env, inode);
+ cl_env_nested_put(&nest, env);
+
+ /* Release the file.
+ * NB: lease lock handle is released in mdc_hsm_release_pack() because
+ * we still need it to pack l_remote_handle to MDT. */
+ rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och,
+ &data_version);
+ och = NULL;
+
+
+out:
+ if (och != NULL && !IS_ERR(och)) /* close the file */
+ ll_lease_close(och, inode, NULL);
+
+ return rc;
+}
+
struct ll_swap_stack {
struct iattr ia1, ia2;
__u64 dv1, dv2;
@@ -1853,6 +2085,86 @@ free:
return rc;
}
+static int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss)
+{
+ struct md_op_data *op_data;
+ int rc;
+
+ /* Non-root users are forbidden to set or clear flags which are
+ * NOT defined in HSM_USER_MASK. */
+ if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) &&
+ !cfs_capable(CFS_CAP_SYS_ADMIN))
+ return -EPERM;
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, hss);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ rc = obd_iocontrol(LL_IOC_HSM_STATE_SET, ll_i2mdexp(inode),
+ sizeof(*op_data), op_data, NULL);
+
+ ll_finish_md_op_data(op_data);
+
+ return rc;
+}
+
+static int ll_hsm_import(struct inode *inode, struct file *file,
+ struct hsm_user_import *hui)
+{
+ struct hsm_state_set *hss = NULL;
+ struct iattr *attr = NULL;
+ int rc;
+
+
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+ /* set HSM flags */
+ OBD_ALLOC_PTR(hss);
+ if (hss == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ hss->hss_valid = HSS_SETMASK | HSS_ARCHIVE_ID;
+ hss->hss_archive_id = hui->hui_archive_id;
+ hss->hss_setmask = HS_ARCHIVED | HS_EXISTS | HS_RELEASED;
+ rc = ll_hsm_state_set(inode, hss);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ OBD_ALLOC_PTR(attr);
+ if (attr == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ attr->ia_mode = hui->hui_mode & (S_IRWXU | S_IRWXG | S_IRWXO);
+ attr->ia_mode |= S_IFREG;
+ attr->ia_uid = make_kuid(&init_user_ns, hui->hui_uid);
+ attr->ia_gid = make_kgid(&init_user_ns, hui->hui_gid);
+ attr->ia_size = hui->hui_size;
+ attr->ia_mtime.tv_sec = hui->hui_mtime;
+ attr->ia_mtime.tv_nsec = hui->hui_mtime_ns;
+ attr->ia_atime.tv_sec = hui->hui_atime;
+ attr->ia_atime.tv_nsec = hui->hui_atime_ns;
+
+ attr->ia_valid = ATTR_SIZE | ATTR_MODE | ATTR_FORCE |
+ ATTR_UID | ATTR_GID |
+ ATTR_MTIME | ATTR_MTIME_SET |
+ ATTR_ATIME | ATTR_ATIME_SET;
+
+ rc = ll_setattr_raw(file->f_dentry, attr, true);
+ if (rc == -ENODATA)
+ rc = 0;
+
+out:
+ if (hss != NULL)
+ OBD_FREE_PTR(hss);
+
+ if (attr != NULL)
+ OBD_FREE_PTR(attr);
+
+ return rc;
+}
+
long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file->f_dentry->d_inode;
@@ -2014,37 +2326,19 @@ long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return rc;
}
case LL_IOC_HSM_STATE_SET: {
- struct md_op_data *op_data;
struct hsm_state_set *hss;
int rc;
OBD_ALLOC_PTR(hss);
if (hss == NULL)
return -ENOMEM;
+
if (copy_from_user(hss, (char *)arg, sizeof(*hss))) {
OBD_FREE_PTR(hss);
return -EFAULT;
}
- /* Non-root users are forbidden to set or clear flags which are
- * NOT defined in HSM_USER_MASK. */
- if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK)
- && !cfs_capable(CFS_CAP_SYS_ADMIN)) {
- OBD_FREE_PTR(hss);
- return -EPERM;
- }
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, hss);
- if (IS_ERR(op_data)) {
- OBD_FREE_PTR(hss);
- return PTR_ERR(op_data);
- }
-
- rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
- op_data, NULL);
-
- ll_finish_md_op_data(op_data);
+ rc = ll_hsm_state_set(inode, hss);
OBD_FREE_PTR(hss);
return rc;
@@ -2075,6 +2369,107 @@ long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
OBD_FREE_PTR(hca);
return rc;
}
+ case LL_IOC_SET_LEASE: {
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_client_handle *och = NULL;
+ bool lease_broken;
+ fmode_t mode = 0;
+
+ switch (arg) {
+ case F_WRLCK:
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EPERM;
+ mode = FMODE_WRITE;
+ break;
+ case F_RDLCK:
+ if (!(file->f_mode & FMODE_READ))
+ return -EPERM;
+ mode = FMODE_READ;
+ break;
+ case F_UNLCK:
+ mutex_lock(&lli->lli_och_mutex);
+ if (fd->fd_lease_och != NULL) {
+ och = fd->fd_lease_och;
+ fd->fd_lease_och = NULL;
+ }
+ mutex_unlock(&lli->lli_och_mutex);
+
+ if (och != NULL) {
+ mode = och->och_flags &
+ (FMODE_READ|FMODE_WRITE);
+ rc = ll_lease_close(och, inode, &lease_broken);
+ if (rc == 0 && lease_broken)
+ mode = 0;
+ } else {
+ rc = -ENOLCK;
+ }
+
+ /* return the type of lease or error */
+ return rc < 0 ? rc : (int)mode;
+ default:
+ return -EINVAL;
+ }
+
+ CDEBUG(D_INODE, "Set lease with mode %d\n", mode);
+
+ /* apply for lease */
+ och = ll_lease_open(inode, file, mode, 0);
+ if (IS_ERR(och))
+ return PTR_ERR(och);
+
+ rc = 0;
+ mutex_lock(&lli->lli_och_mutex);
+ if (fd->fd_lease_och == NULL) {
+ fd->fd_lease_och = och;
+ och = NULL;
+ }
+ mutex_unlock(&lli->lli_och_mutex);
+ if (och != NULL) {
+ /* impossible now that only excl is supported for now */
+ ll_lease_close(och, inode, &lease_broken);
+ rc = -EBUSY;
+ }
+ return rc;
+ }
+ case LL_IOC_GET_LEASE: {
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ldlm_lock *lock = NULL;
+
+ rc = 0;
+ mutex_lock(&lli->lli_och_mutex);
+ if (fd->fd_lease_och != NULL) {
+ struct obd_client_handle *och = fd->fd_lease_och;
+
+ lock = ldlm_handle2lock(&och->och_lease_handle);
+ if (lock != NULL) {
+ lock_res_and_lock(lock);
+ if (!ldlm_is_cancel(lock))
+ rc = och->och_flags &
+ (FMODE_READ | FMODE_WRITE);
+ unlock_res_and_lock(lock);
+ ldlm_lock_put(lock);
+ }
+ }
+ mutex_unlock(&lli->lli_och_mutex);
+ return rc;
+ }
+ case LL_IOC_HSM_IMPORT: {
+ struct hsm_user_import *hui;
+
+ OBD_ALLOC_PTR(hui);
+ if (hui == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(hui, (void *)arg, sizeof(*hui))) {
+ OBD_FREE_PTR(hui);
+ return -EFAULT;
+ }
+
+ rc = ll_hsm_import(inode, file, hui);
+
+ OBD_FREE_PTR(hui);
+ return rc;
+ }
default: {
int err;
@@ -2435,7 +2830,8 @@ int ll_have_md_lock(struct inode *inode, __u64 *bits, ldlm_mode_t l_req_mode)
}
ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
- struct lustre_handle *lockh, __u64 flags)
+ struct lustre_handle *lockh, __u64 flags,
+ ldlm_mode_t mode)
{
ldlm_policy_data_t policy = { .l_inodebits = {bits}};
struct lu_fid *fid;
@@ -2445,8 +2841,8 @@ ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
rc = md_lock_match(ll_i2mdexp(inode), LDLM_FL_BLOCK_GRANTED|flags,
- fid, LDLM_IBITS, &policy,
- LCK_CR|LCK_CW|LCK_PR|LCK_PW, lockh);
+ fid, LDLM_IBITS, &policy, mode, lockh);
+
return rc;
}
@@ -2581,7 +2977,15 @@ int ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it,
LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_lvb.lvb_mtime;
LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_lvb.lvb_ctime;
} else {
- rc = ll_glimpse_size(inode);
+ /* In case of restore, the MDT has the right size and has
+ * already send it back without granting the layout lock,
+ * inode is up-to-date so glimpse is useless.
+ * Also to glimpse we need the layout, in case of a running
+ * restore the MDT holds the layout lock so the glimpse will
+ * block up to the end of restore (getattr will block)
+ */
+ if (!(ll_i2info(inode)->lli_flags & LLIF_FILE_RESTORING))
+ rc = ll_glimpse_size(inode);
}
return rc;
}
@@ -2628,6 +3032,38 @@ int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
return ll_getattr_it(mnt, de, &it, stat);
}
+int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ __u64 start, __u64 len)
+{
+ int rc;
+ size_t num_bytes;
+ struct ll_user_fiemap *fiemap;
+ unsigned int extent_count = fieinfo->fi_extents_max;
+
+ num_bytes = sizeof(*fiemap) + (extent_count *
+ sizeof(struct ll_fiemap_extent));
+ OBD_ALLOC_LARGE(fiemap, num_bytes);
+
+ if (fiemap == NULL)
+ return -ENOMEM;
+
+ fiemap->fm_flags = fieinfo->fi_flags;
+ fiemap->fm_extent_count = fieinfo->fi_extents_max;
+ fiemap->fm_start = start;
+ fiemap->fm_length = len;
+ memcpy(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
+ sizeof(struct ll_fiemap_extent));
+
+ rc = ll_do_fiemap(inode, fiemap, num_bytes);
+
+ fieinfo->fi_flags = fiemap->fm_flags;
+ fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents;
+ memcpy(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
+ fiemap->fm_mapped_extents * sizeof(struct ll_fiemap_extent));
+
+ OBD_FREE_LARGE(fiemap, num_bytes);
+ return rc;
+}
struct posix_acl * ll_get_acl(struct inode *inode, int type)
{
@@ -2676,17 +3112,12 @@ int ll_inode_permission(struct inode *inode, int mask)
return rc;
}
-#define READ_METHOD aio_read
-#define READ_FUNCTION ll_file_aio_read
-#define WRITE_METHOD aio_write
-#define WRITE_FUNCTION ll_file_aio_write
-
/* -o localflock - only provides locally consistent flock locks */
struct file_operations ll_file_operations = {
.read = ll_file_read,
- .READ_METHOD = READ_FUNCTION,
+ .aio_read = ll_file_aio_read,
.write = ll_file_write,
- .WRITE_METHOD = WRITE_FUNCTION,
+ .aio_write = ll_file_aio_write,
.unlocked_ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
@@ -2699,9 +3130,9 @@ struct file_operations ll_file_operations = {
struct file_operations ll_file_operations_flock = {
.read = ll_file_read,
- .READ_METHOD = READ_FUNCTION,
+ .aio_read = ll_file_aio_read,
.write = ll_file_write,
- .WRITE_METHOD = WRITE_FUNCTION,
+ .aio_write = ll_file_aio_write,
.unlocked_ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
@@ -2717,9 +3148,9 @@ struct file_operations ll_file_operations_flock = {
/* These are for -o noflock - to return ENOSYS on flock calls */
struct file_operations ll_file_operations_noflock = {
.read = ll_file_read,
- .READ_METHOD = READ_FUNCTION,
+ .aio_read = ll_file_aio_read,
.write = ll_file_write,
- .WRITE_METHOD = WRITE_FUNCTION,
+ .aio_write = ll_file_aio_write,
.unlocked_ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
@@ -2740,6 +3171,7 @@ struct inode_operations ll_file_inode_operations = {
.getxattr = ll_getxattr,
.listxattr = ll_listxattr,
.removexattr = ll_removexattr,
+ .fiemap = ll_fiemap,
.get_acl = ll_get_acl,
};
@@ -3086,7 +3518,8 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
/* mostly layout lock is caching on the local side, so try to match
* it before grabbing layout lock mutex. */
- mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0);
+ mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
+ LCK_CR | LCK_CW | LCK_PR | LCK_PW);
if (mode != 0) { /* hit cached lock */
rc = ll_layout_lock_set(&lockh, mode, inode, gen, false);
if (rc == 0)
@@ -3101,7 +3534,8 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
again:
/* try again. Maybe somebody else has done this. */
- mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0);
+ mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
+ LCK_CR | LCK_CW | LCK_PR | LCK_PW);
if (mode != 0) { /* hit cached lock */
rc = ll_layout_lock_set(&lockh, mode, inode, gen, true);
if (rc == -EAGAIN)
@@ -3150,3 +3584,30 @@ again:
return rc;
}
+
+/**
+ * This function send a restore request to the MDT
+ */
+int ll_layout_restore(struct inode *inode)
+{
+ struct hsm_user_request *hur;
+ int len, rc;
+
+ len = sizeof(struct hsm_user_request) +
+ sizeof(struct hsm_user_item);
+ OBD_ALLOC(hur, len);
+ if (hur == NULL)
+ return -ENOMEM;
+
+ hur->hur_request.hr_action = HUA_RESTORE;
+ hur->hur_request.hr_archive_id = 0;
+ hur->hur_request.hr_flags = 0;
+ memcpy(&hur->hur_user_item[0].hui_fid, &ll_i2info(inode)->lli_fid,
+ sizeof(hur->hur_user_item[0].hui_fid));
+ hur->hur_user_item[0].hui_extent.length = -1;
+ hur->hur_request.hr_itemcount = 1;
+ rc = obd_iocontrol(LL_IOC_HSM_REQUEST, cl_i2sbi(inode)->ll_md_exp,
+ len, hur, NULL);
+ OBD_FREE(hur, len);
+ return rc;
+}
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 47e443d90fe1..7ee5c02783f9 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -46,6 +46,8 @@
#include <lclient.h>
#include <lustre_mdc.h>
#include <linux/lustre_intent.h>
+#include <linux/compat.h>
+#include <linux/posix_acl_xattr.h>
#ifndef FMODE_EXEC
#define FMODE_EXEC 0
@@ -124,6 +126,10 @@ enum lli_flags {
LLIF_SRVLOCK = (1 << 5),
/* File data is modified. */
LLIF_DATA_MODIFIED = (1 << 6),
+ /* File is being restored */
+ LLIF_FILE_RESTORING = (1 << 7),
+ /* Xattr cache is attached to the file */
+ LLIF_XATTR_CACHE = (1 << 8),
};
struct ll_inode_info {
@@ -276,8 +282,27 @@ struct ll_inode_info {
struct mutex lli_layout_mutex;
/* valid only inside LAYOUT ibits lock, protected by lli_layout_mutex */
__u32 lli_layout_gen;
+
+ struct rw_semaphore lli_xattrs_list_rwsem;
+ struct mutex lli_xattrs_enq_lock;
+ struct list_head lli_xattrs;/* ll_xattr_entry->xe_list */
};
+int ll_xattr_cache_destroy(struct inode *inode);
+
+int ll_xattr_cache_get(struct inode *inode,
+ const char *name,
+ char *buffer,
+ size_t size,
+ __u64 valid);
+
+int ll_xattr_cache_update(struct inode *inode,
+ const char *name,
+ const char *newval,
+ size_t size,
+ __u64 valid,
+ int flags);
+
/*
* Locking to guarantee consistency of non-atomic updates to long long i_size,
* consistency between file size and KMS.
@@ -399,6 +424,7 @@ enum stats_track_type {
#define LL_SBI_VERBOSE 0x10000 /* verbose mount/umount */
#define LL_SBI_LAYOUT_LOCK 0x20000 /* layout lock support */
#define LL_SBI_USER_FID2PATH 0x40000 /* allow fid2path by unprivileged users */
+#define LL_SBI_XATTR_CACHE 0x80000 /* support for xattr cache */
#define LL_SBI_FLAGS { \
"nolck", \
@@ -406,6 +432,7 @@ enum stats_track_type {
"flock", \
"xattr", \
"acl", \
+ "???", \
"rmt_client", \
"mds_capa", \
"oss_capa", \
@@ -418,7 +445,9 @@ enum stats_track_type {
"agl", \
"verbose", \
"layout", \
- "user_fid2path" }
+ "user_fid2path",\
+ "xattr", \
+}
/* default value for ll_sb_info->contention_time */
#define SBI_DEFAULT_CONTENTION_SECONDS 60
@@ -458,7 +487,8 @@ struct ll_sb_info {
struct lu_fid ll_root_fid; /* root object fid */
int ll_flags;
- int ll_umounting:1;
+ unsigned int ll_umounting:1,
+ ll_xattr_cache_enabled:1;
struct list_head ll_conn_chain; /* per-conn chain of SBs */
struct lustre_client_ocd ll_lco;
@@ -607,10 +637,14 @@ extern struct kmem_cache *ll_file_data_slab;
struct lustre_handle;
struct ll_file_data {
struct ll_readahead_state fd_ras;
- int fd_omode;
struct ccc_grouplock fd_grouplock;
__u64 lfd_pos;
__u32 fd_flags;
+ fmode_t fd_omode;
+ /* openhandle if lease exists for this file.
+ * Borrow lli->lli_och_mutex to protect assignment */
+ struct obd_client_handle *fd_lease_och;
+ struct obd_client_handle *fd_och;
struct file *fd_file;
/* Indicate whether need to report failure when close.
* true: failure is known, not report again.
@@ -643,7 +677,12 @@ static inline int ll_need_32bit_api(struct ll_sb_info *sbi)
#if BITS_PER_LONG == 32
return 1;
#else
- return unlikely(current_is_32bit() || (sbi->ll_flags & LL_SBI_32BIT_API));
+ return unlikely(
+#ifdef CONFIG_COMPAT
+ is_compat_task() ||
+#endif
+ (sbi->ll_flags & LL_SBI_32BIT_API)
+ );
#endif
}
@@ -663,15 +702,22 @@ int lprocfs_register_mountpoint(struct proc_dir_entry *parent,
void lprocfs_unregister_mountpoint(struct ll_sb_info *sbi);
void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count);
void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars);
+void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
+ struct ll_file_data *file, loff_t pos,
+ size_t count, int rw);
#else
static inline int lprocfs_register_mountpoint(struct proc_dir_entry *parent,
struct super_block *sb, char *osc, char *mdc){return 0;}
static inline void lprocfs_unregister_mountpoint(struct ll_sb_info *sbi) {}
-static void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count) {}
-static void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars)
+static inline
+void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count) {}
+static inline void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars)
{
memset(lvars, 0, sizeof(*lvars));
}
+static inline void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
+ struct ll_file_data *file, loff_t pos,
+ size_t count, int rw) {}
#endif
@@ -720,7 +766,8 @@ extern int ll_inode_revalidate_it(struct dentry *, struct lookup_intent *,
extern int ll_have_md_lock(struct inode *inode, __u64 *bits,
ldlm_mode_t l_req_mode);
extern ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
- struct lustre_handle *lockh, __u64 flags);
+ struct lustre_handle *lockh, __u64 flags,
+ ldlm_mode_t mode);
int __ll_inode_revalidate_it(struct dentry *, struct lookup_intent *,
__u64 bits);
int ll_revalidate_nd(struct dentry *dentry, unsigned int flags);
@@ -746,9 +793,6 @@ int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
struct md_open_data **mod);
void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
struct lustre_handle *fh);
-extern void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
- struct ll_file_data *file, loff_t pos,
- size_t count, int rw);
int ll_getattr_it(struct vfsmount *mnt, struct dentry *de,
struct lookup_intent *it, struct kstat *stat);
int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat);
@@ -775,6 +819,12 @@ int ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg);
int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg);
int ll_fid2path(struct inode *inode, void *arg);
int ll_data_version(struct inode *inode, __u64 *data_version, int extent_lock);
+int ll_hsm_release(struct inode *inode);
+
+struct obd_client_handle *ll_lease_open(struct inode *inode, struct file *file,
+ fmode_t mode, __u64 flags);
+int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
+ bool *lease_broken);
/* llite/dcache.c */
@@ -801,7 +851,7 @@ void ll_kill_super(struct super_block *sb);
struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock);
struct inode *ll_inode_from_lock(struct ldlm_lock *lock);
void ll_clear_inode(struct inode *inode);
-int ll_setattr_raw(struct dentry *dentry, struct iattr *attr);
+int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import);
int ll_setattr(struct dentry *de, struct iattr *attr);
int ll_statfs(struct dentry *de, struct kstatfs *sfs);
int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
@@ -1578,5 +1628,9 @@ enum {
int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf);
int ll_layout_refresh(struct inode *inode, __u32 *gen);
+int ll_layout_restore(struct inode *inode);
+
+int ll_xattr_init(void);
+void ll_xattr_fini(void);
#endif /* LLITE_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index fd584ff7e2df..6cfdb9e4b74b 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -56,6 +56,7 @@
#include "llite_internal.h"
struct kmem_cache *ll_file_data_slab;
+struct proc_dir_entry *proc_lustre_fs_root;
LIST_HEAD(ll_super_blocks);
DEFINE_SPINLOCK(ll_sb_lock);
@@ -209,7 +210,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH|
OBD_CONNECT_EINPROGRESS |
OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
- OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
+ OBD_CONNECT_LAYOUTLOCK |
+ OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE;
if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
data->ocd_connect_flags |= OBD_CONNECT_SOM;
@@ -383,6 +385,17 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
}
+ if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
+ if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
+ LCONSOLE_INFO(
+ "%s: disabling xattr cache due to unknown maximum xattr size.\n",
+ dt);
+ } else {
+ sbi->ll_flags |= LL_SBI_XATTR_CACHE;
+ sbi->ll_xattr_cache_enabled = 1;
+ }
+ }
+
obd = class_name2obd(dt);
if (!obd) {
CERROR("DT %s: not setup or attached\n", dt);
@@ -922,6 +935,9 @@ void ll_lli_init(struct ll_inode_info *lli)
lli->lli_layout_gen = LL_LAYOUT_GEN_NONE;
lli->lli_clob = NULL;
+ init_rwsem(&lli->lli_xattrs_list_rwsem);
+ mutex_init(&lli->lli_xattrs_enq_lock);
+
LASSERT(lli->lli_vfs_inode.i_mode != 0);
if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
mutex_init(&lli->lli_readdir_mutex);
@@ -1194,6 +1210,8 @@ void ll_clear_inode(struct inode *inode)
lli->lli_symlink_name = NULL;
}
+ ll_xattr_cache_destroy(inode);
+
if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
LASSERT(lli->lli_posix_acl == NULL);
if (lli->lli_remote_perms) {
@@ -1346,19 +1364,24 @@ static int ll_setattr_ost(struct inode *inode, struct iattr *attr)
* to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
* I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
* at the same time.
+ *
+ * In case of HSMimport, we only set attr on MDS.
*/
-int ll_setattr_raw(struct dentry *dentry, struct iattr *attr)
+int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
{
struct inode *inode = dentry->d_inode;
struct ll_inode_info *lli = ll_i2info(inode);
struct md_op_data *op_data = NULL;
struct md_open_data *mod = NULL;
+ bool file_is_released = false;
int rc = 0, rc1 = 0;
- CDEBUG(D_VFSTRACE, "%s: setattr inode %p/fid:"DFID" from %llu to %llu, "
- "valid %x\n", ll_get_fsname(inode->i_sb, NULL, 0), inode,
+ CDEBUG(D_VFSTRACE,
+ "%s: setattr inode %p/fid:"DFID
+ " from %llu to %llu, valid %x, hsm_import %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0), inode,
PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size,
- attr->ia_valid);
+ attr->ia_valid, hsm_import);
if (attr->ia_valid & ATTR_SIZE) {
/* Check new size against VFS/VM file size limit and rlimit */
@@ -1436,10 +1459,40 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr)
(attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
op_data->op_flags = MF_EPOCH_OPEN;
+ /* truncate on a released file must failed with -ENODATA,
+ * so size must not be set on MDS for released file
+ * but other attributes must be set
+ */
+ if (S_ISREG(inode->i_mode)) {
+ struct lov_stripe_md *lsm;
+ __u32 gen;
+
+ ll_layout_refresh(inode, &gen);
+ lsm = ccc_inode_lsm_get(inode);
+ if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED)
+ file_is_released = true;
+ ccc_inode_lsm_put(inode, lsm);
+ }
+
+ /* if not in HSM import mode, clear size attr for released file
+ * we clear the attribute send to MDT in op_data, not the original
+ * received from caller in attr which is used later to
+ * decide return code */
+ if (file_is_released && (attr->ia_valid & ATTR_SIZE) && !hsm_import)
+ op_data->op_attr.ia_valid &= ~ATTR_SIZE;
+
rc = ll_md_setattr(dentry, op_data, &mod);
if (rc)
GOTO(out, rc);
+ /* truncate failed (only when non HSM import), others succeed */
+ if (file_is_released) {
+ if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
+ GOTO(out, rc = -ENODATA);
+ else
+ GOTO(out, rc = 0);
+ }
+
/* RPC to MDT is sent, cancel data modification flag */
if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
spin_lock(&lli->lli_lock);
@@ -1473,7 +1526,7 @@ out:
if (!S_ISDIR(inode->i_mode)) {
up_write(&lli->lli_trunc_sem);
mutex_lock(&inode->i_mutex);
- if (attr->ia_valid & ATTR_SIZE)
+ if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
inode_dio_wait(inode);
}
@@ -1508,7 +1561,7 @@ int ll_setattr(struct dentry *de, struct iattr *attr)
!(attr->ia_valid & ATTR_KILL_SGID))
attr->ia_valid |= ATTR_KILL_SGID;
- return ll_setattr_raw(de, attr);
+ return ll_setattr_raw(de, attr, false);
}
int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
@@ -1721,7 +1774,9 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
* lock on the client and set LLIF_MDS_SIZE_LOCK holding
* it. */
mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
- &lockh, LDLM_FL_CBPENDING);
+ &lockh, LDLM_FL_CBPENDING,
+ LCK_CR | LCK_CW |
+ LCK_PR | LCK_PW);
if (mode) {
if (lli->lli_flags & (LLIF_DONE_WRITING |
LLIF_EPOCH_PENDING |
@@ -1761,6 +1816,11 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
LASSERT(md->oss_capa);
ll_add_capa(inode, md->oss_capa);
}
+
+ if (body->valid & OBD_MD_TSTATE) {
+ if (body->t_state & MS_RESTORE)
+ lli->lli_flags |= LLIF_FILE_RESTORING;
+ }
}
void ll_read_inode2(struct inode *inode, void *opaque)
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index e2421ea61352..0718905adeb2 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -194,10 +194,10 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
struct cl_object *obj = ll_i2info(inode)->lli_clob;
pgoff_t offset;
int ret;
- int i;
int rw;
obd_count page_count = 0;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
struct bio *bio;
ssize_t bytes;
@@ -220,15 +220,15 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
for (bio = head; bio != NULL; bio = bio->bi_next) {
LASSERT(rw == bio->bi_rw);
- offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset;
- bio_for_each_segment(bvec, bio, i) {
- BUG_ON(bvec->bv_offset != 0);
- BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE);
+ offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
+ bio_for_each_segment(bvec, bio, iter) {
+ BUG_ON(bvec.bv_offset != 0);
+ BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
- pages[page_count] = bvec->bv_page;
+ pages[page_count] = bvec.bv_page;
offsets[page_count] = offset;
page_count++;
- offset += bvec->bv_len;
+ offset += bvec.bv_len;
}
LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
}
@@ -313,7 +313,8 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
bio = &lo->lo_bio;
while (*bio && (*bio)->bi_rw == rw) {
CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
- (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size,
+ (unsigned long long)(*bio)->bi_iter.bi_sector,
+ (*bio)->bi_iter.bi_size,
page_count, (*bio)->bi_vcnt);
if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
break;
@@ -347,7 +348,8 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
goto err;
CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
- (unsigned long long)old_bio->bi_sector, old_bio->bi_size);
+ (unsigned long long)old_bio->bi_iter.bi_sector,
+ old_bio->bi_iter.bi_size);
spin_lock_irq(&lo->lo_lock);
inactive = (lo->lo_state != LLOOP_BOUND);
@@ -367,7 +369,7 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
loop_add_bio(lo, old_bio);
return;
err:
- cfs_bio_io_error(old_bio, old_bio->bi_size);
+ cfs_bio_io_error(old_bio, old_bio->bi_iter.bi_size);
}
@@ -378,7 +380,7 @@ static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
while (bio) {
struct bio *tmp = bio->bi_next;
bio->bi_next = NULL;
- cfs_bio_endio(bio, bio->bi_size, ret);
+ cfs_bio_endio(bio, bio->bi_iter.bi_size, ret);
bio = tmp;
}
}
@@ -856,7 +858,8 @@ static void lloop_exit(void)
module_init(lloop_init);
module_exit(lloop_exit);
-CFS_MODULE_PARM(max_loop, "i", int, 0444, "maximum of lloop_device");
+module_param(max_loop, int, 0444);
+MODULE_PARM_DESC(max_loop, "maximum of lloop_device");
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre virtual block device");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 4bf09c4a0c9d..a9a104a6a4ee 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -42,9 +42,6 @@
#include "llite_internal.h"
-struct proc_dir_entry *proc_lustre_fs_root;
-
-#ifdef LPROCFS
/* /proc/lustre/llite mount point registration */
extern struct file_operations vvp_dump_pgcache_file_ops;
struct file_operations ll_rw_extents_stats_fops;
@@ -723,6 +720,41 @@ static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
}
LPROC_SEQ_FOPS_RO(ll_sbi_flags);
+static int ll_xattr_cache_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = m->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ int rc;
+
+ rc = seq_printf(m, "%u\n", sbi->ll_xattr_cache_enabled);
+
+ return rc;
+}
+
+static ssize_t ll_xattr_cache_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ struct super_block *sb = seq->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ int val, rc;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+
+ if (val != 0 && val != 1)
+ return -ERANGE;
+
+ if (val == 1 && !(sbi->ll_flags & LL_SBI_XATTR_CACHE))
+ return -ENOTSUPP;
+
+ sbi->ll_xattr_cache_enabled = val;
+
+ return count;
+}
+LPROC_SEQ_FOPS(ll_xattr_cache);
+
static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
{ "uuid", &ll_sb_uuid_fops, 0, 0 },
//{ "mntpt_path", ll_rd_path, 0, 0 },
@@ -751,6 +783,7 @@ static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
{ "lazystatfs", &ll_lazystatfs_fops, 0 },
{ "max_easize", &ll_maxea_size_fops, 0, 0 },
{ "sbi_flags", &ll_sbi_flags_fops, 0, 0 },
+ { "xattr_cache", &ll_xattr_cache_fops, 0, 0 },
{ 0 }
};
@@ -802,6 +835,7 @@ struct llite_file_opcode {
{ LPROC_LL_ALLOC_INODE, LPROCFS_TYPE_REGS, "alloc_inode" },
{ LPROC_LL_SETXATTR, LPROCFS_TYPE_REGS, "setxattr" },
{ LPROC_LL_GETXATTR, LPROCFS_TYPE_REGS, "getxattr" },
+ { LPROC_LL_GETXATTR_HITS, LPROCFS_TYPE_REGS, "getxattr_hits" },
{ LPROC_LL_LISTXATTR, LPROCFS_TYPE_REGS, "listxattr" },
{ LPROC_LL_REMOVEXATTR, LPROCFS_TYPE_REGS, "removexattr" },
{ LPROC_LL_INODE_PERM, LPROCFS_TYPE_REGS, "inode_permission" },
@@ -1367,4 +1401,3 @@ void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars)
lvars->module_vars = NULL;
lvars->obd_vars = lprocfs_llite_obd_vars;
}
-#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 90bbdae824ac..fc8d264f6c9a 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -223,6 +223,10 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
break;
LASSERT(lock->l_flags & LDLM_FL_CANCELING);
+
+ if (bits & MDS_INODELOCK_XATTR)
+ ll_xattr_cache_destroy(inode);
+
/* For OPEN locks we differentiate between lock modes
* LCK_CR, LCK_CW, LCK_PR - bug 22891 */
if (bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE |
@@ -233,12 +237,9 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
ll_have_md_lock(inode, &bits, mode);
fid = ll_inode2fid(inode);
- if (lock->l_resource->lr_name.name[0] != fid_seq(fid) ||
- lock->l_resource->lr_name.name[1] != fid_oid(fid) ||
- lock->l_resource->lr_name.name[2] != fid_ver(fid)) {
+ if (!fid_res_name_eq(fid, &lock->l_resource->lr_name))
LDLM_ERROR(lock, "data mismatch with object "
DFID" (%p)", PFID(fid), inode);
- }
if (bits & MDS_INODELOCK_OPEN) {
int flags = 0;
@@ -526,8 +527,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
icbd.icbd_childp = &dentry;
icbd.icbd_parent = parent;
- if (it->it_op & IT_CREAT ||
- (it->it_op & IT_OPEN && it->it_create_mode & O_CREAT))
+ if (it->it_op & IT_CREAT)
opc = LUSTRE_OPC_CREATE;
else
opc = LUSTRE_OPC_ANY;
@@ -626,7 +626,7 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
return -ENOMEM;
it->it_op = IT_OPEN;
- if (mode) {
+ if (open_flags & O_CREAT) {
it->it_op |= IT_CREAT;
lookup_flags |= LOOKUP_CREATE;
}
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index 0beaf4e76b4b..e21e1c760a8e 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -187,11 +187,15 @@ static int __init init_lustre_lite(void)
if (rc == 0)
rc = vvp_global_init();
+ if (rc == 0)
+ rc = ll_xattr_init();
+
return rc;
}
static void __exit exit_lustre_lite(void)
{
+ ll_xattr_fini();
vvp_global_fini();
del_timer(&ll_capa_timer);
ll_capa_thread_stop();
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 3ff664ce7503..93cbfbb7e7f7 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -121,8 +121,38 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- CDEBUG(D_VFSTRACE, "ignore/verify layout %d/%d, layout version %d.\n",
- io->ci_ignore_layout, io->ci_verify_layout, cio->cui_layout_gen);
+ CDEBUG(D_VFSTRACE, DFID
+ " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
+ PFID(lu_object_fid(&obj->co_lu)),
+ io->ci_ignore_layout, io->ci_verify_layout,
+ cio->cui_layout_gen, io->ci_restore_needed);
+
+ if (io->ci_restore_needed == 1) {
+ int rc;
+
+ /* file was detected release, we need to restore it
+ * before finishing the io
+ */
+ rc = ll_layout_restore(ccc_object_inode(obj));
+ /* if restore registration failed, no restart,
+ * we will return -ENODATA */
+ /* The layout will change after restore, so we need to
+ * block on layout lock hold by the MDT
+ * as MDT will not send new layout in lvb (see LU-3124)
+ * we have to explicitly fetch it, all this will be done
+ * by ll_layout_refresh()
+ */
+ if (rc == 0) {
+ io->ci_restore_needed = 0;
+ io->ci_need_restart = 1;
+ io->ci_verify_layout = 1;
+ } else {
+ io->ci_restore_needed = 1;
+ io->ci_need_restart = 0;
+ io->ci_verify_layout = 0;
+ io->ci_result = rc;
+ }
+ }
if (!io->ci_ignore_layout && io->ci_verify_layout) {
__u32 gen = 0;
@@ -130,9 +160,17 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
/* check layout version */
ll_layout_refresh(ccc_object_inode(obj), &gen);
io->ci_need_restart = cio->cui_layout_gen != gen;
- if (io->ci_need_restart)
- CDEBUG(D_VFSTRACE, "layout changed from %d to %d.\n",
- cio->cui_layout_gen, gen);
+ if (io->ci_need_restart) {
+ CDEBUG(D_VFSTRACE,
+ DFID" layout changed from %d to %d.\n",
+ PFID(lu_object_fid(&obj->co_lu)),
+ cio->cui_layout_gen, gen);
+ /* today successful restore is the only possible
+ * case */
+ /* restore was done, clear restoring state */
+ ll_i2info(ccc_object_inode(obj))->lli_flags &=
+ ~LLIF_FILE_RESTORING;
+ }
}
}
@@ -590,8 +628,11 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
if (vmf->page) {
- LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
- vmf->virtual_address);
+ CDEBUG(D_PAGE,
+ "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n",
+ vmf->page, vmf->page->mapping, vmf->page->index,
+ (long)vmf->page->flags, page_count(vmf->page),
+ page_private(vmf->page), vmf->virtual_address);
if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
lock_page(vmf->page);
cfio->fault.ft_flags &= VM_FAULT_LOCKED;
@@ -1111,6 +1152,12 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CDEBUG(D_VFSTRACE, DFID
+ " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
+ PFID(lu_object_fid(&obj->co_lu)),
+ io->ci_ignore_layout, io->ci_verify_layout,
+ cio->cui_layout_gen, io->ci_restore_needed);
+
CL_IO_SLICE_CLEAN(cio, cui_cl);
cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
vio->cui_ra_window_set = 0;
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index 33173fce478f..25973dedd9a2 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -138,7 +138,7 @@ int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
lli->lli_layout_gen,
conf->u.coc_md->lsm->lsm_layout_gen);
- lli->lli_has_smd = true;
+ lli->lli_has_smd = lsm_has_objects(conf->u.coc_md->lsm);
lli->lli_layout_gen = conf->u.coc_md->lsm->lsm_layout_gen;
} else {
CDEBUG(D_VFSTRACE, "layout lock destroyed: %u.\n",
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index bcf86bac30a9..3a7d03c12dd9 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -109,12 +109,12 @@ int ll_setxattr_common(struct inode *inode, const char *name,
int flags, __u64 valid)
{
struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req;
+ struct ptlrpc_request *req = NULL;
int xattr_type, rc;
struct obd_capa *oc;
+ struct rmtacl_ctl_entry *rce = NULL;
#ifdef CONFIG_FS_POSIX_ACL
posix_acl_xattr_header *new_value = NULL;
- struct rmtacl_ctl_entry *rce = NULL;
ext_acl_xattr_header *acl = NULL;
#endif
const char *pv = value;
@@ -183,11 +183,17 @@ int ll_setxattr_common(struct inode *inode, const char *name,
valid |= rce_ops2valid(rce->rce_ops);
}
#endif
- oc = ll_mdscapa_get(inode);
- rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
- valid, name, pv, size, 0, flags, ll_i2suppgid(inode),
- &req);
- capa_put(oc);
+ if (sbi->ll_xattr_cache_enabled &&
+ (rce == NULL || rce->rce_ops == RMT_LSETFACL)) {
+ rc = ll_xattr_cache_update(inode, name, pv, size, valid, flags);
+ } else {
+ oc = ll_mdscapa_get(inode);
+ rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
+ valid, name, pv, size, 0, flags,
+ ll_i2suppgid(inode), &req);
+ capa_put(oc);
+ }
+
#ifdef CONFIG_FS_POSIX_ACL
if (new_value != NULL)
lustre_posix_acl_xattr_free(new_value, size);
@@ -352,48 +358,54 @@ int ll_getxattr_common(struct inode *inode, const char *name,
#endif
do_getxattr:
- oc = ll_mdscapa_get(inode);
- rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
- valid | (rce ? rce_ops2valid(rce->rce_ops) : 0),
- name, NULL, 0, size, 0, &req);
- capa_put(oc);
- if (rc) {
- if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
- LCONSOLE_INFO("Disabling user_xattr feature because "
- "it is not supported on the server\n");
- sbi->ll_flags &= ~LL_SBI_USER_XATTR;
- }
- return rc;
- }
+ if (sbi->ll_xattr_cache_enabled && (rce == NULL ||
+ rce->rce_ops == RMT_LGETFACL ||
+ rce->rce_ops == RMT_LSETFACL)) {
+ rc = ll_xattr_cache_get(inode, name, buffer, size, valid);
+ if (rc < 0)
+ GOTO(out_xattr, rc);
+ } else {
+ oc = ll_mdscapa_get(inode);
+ rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
+ valid | (rce ? rce_ops2valid(rce->rce_ops) : 0),
+ name, NULL, 0, size, 0, &req);
+ capa_put(oc);
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body);
+ if (rc < 0)
+ GOTO(out_xattr, rc);
- /* only detect the xattr size */
- if (size == 0)
- GOTO(out, rc = body->eadatasize);
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ LASSERT(body);
- if (size < body->eadatasize) {
- CERROR("server bug: replied size %u > %u\n",
- body->eadatasize, (int)size);
- GOTO(out, rc = -ERANGE);
- }
+ /* only detect the xattr size */
+ if (size == 0)
+ GOTO(out, rc = body->eadatasize);
+
+ if (size < body->eadatasize) {
+ CERROR("server bug: replied size %u > %u\n",
+ body->eadatasize, (int)size);
+ GOTO(out, rc = -ERANGE);
+ }
- if (body->eadatasize == 0)
- GOTO(out, rc = -ENODATA);
+ if (body->eadatasize == 0)
+ GOTO(out, rc = -ENODATA);
- /* do not need swab xattr data */
- xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
- body->eadatasize);
- if (!xdata)
- GOTO(out, rc = -EFAULT);
+ /* do not need swab xattr data */
+ xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
+ body->eadatasize);
+ if (!xdata)
+ GOTO(out, rc = -EFAULT);
+
+ memcpy(buffer, xdata, body->eadatasize);
+ rc = body->eadatasize;
+ }
#ifdef CONFIG_FS_POSIX_ACL
- if (body->eadatasize >= 0 && rce && rce->rce_ops == RMT_LSETFACL) {
+ if (rce && rce->rce_ops == RMT_LSETFACL) {
ext_acl_xattr_header *acl;
- acl = lustre_posix_acl_xattr_2ext((posix_acl_xattr_header *)xdata,
- body->eadatasize);
+ acl = lustre_posix_acl_xattr_2ext(
+ (posix_acl_xattr_header *)buffer, rc);
if (IS_ERR(acl))
GOTO(out, rc = PTR_ERR(acl));
@@ -406,12 +418,12 @@ do_getxattr:
}
#endif
- if (body->eadatasize == 0) {
- rc = -ENODATA;
- } else {
- LASSERT(buffer);
- memcpy(buffer, xdata, body->eadatasize);
- rc = body->eadatasize;
+out_xattr:
+ if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
+ LCONSOLE_INFO(
+ "%s: disabling user_xattr feature because it is not supported on the server: rc = %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0), rc);
+ sbi->ll_flags &= ~LL_SBI_USER_XATTR;
}
out:
ptlrpc_req_finished(req);
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
new file mode 100644
index 000000000000..3e3be1f13502
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -0,0 +1,617 @@
+/*
+ * Copyright 2012 Xyratex Technology Limited
+ *
+ * Author: Andrew Perepechko <Andrew_Perepechko@xyratex.com>
+ *
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <obd_support.h>
+#include <lustre_lite.h>
+#include <lustre_dlm.h>
+#include <lustre_ver.h>
+#include "llite_internal.h"
+
+/* If we ever have hundreds of extended attributes, we might want to consider
+ * using a hash or a tree structure instead of list for faster lookups.
+ */
+struct ll_xattr_entry {
+ struct list_head xe_list; /* protected with
+ * lli_xattrs_list_rwsem */
+ char *xe_name; /* xattr name, \0-terminated */
+ char *xe_value; /* xattr value */
+ unsigned xe_namelen; /* strlen(xe_name) + 1 */
+ unsigned xe_vallen; /* xattr value length */
+};
+
+static struct kmem_cache *xattr_kmem;
+static struct lu_kmem_descr xattr_caches[] = {
+ {
+ .ckd_cache = &xattr_kmem,
+ .ckd_name = "xattr_kmem",
+ .ckd_size = sizeof(struct ll_xattr_entry)
+ },
+ {
+ .ckd_cache = NULL
+ }
+};
+
+int ll_xattr_init(void)
+{
+ return lu_kmem_init(xattr_caches);
+}
+
+void ll_xattr_fini(void)
+{
+ lu_kmem_fini(xattr_caches);
+}
+
+/**
+ * Initializes xattr cache for an inode.
+ *
+ * This initializes the xattr list and marks cache presence.
+ */
+static void ll_xattr_cache_init(struct ll_inode_info *lli)
+{
+
+
+ LASSERT(lli != NULL);
+
+ INIT_LIST_HEAD(&lli->lli_xattrs);
+ lli->lli_flags |= LLIF_XATTR_CACHE;
+}
+
+/**
+ * This looks for a specific extended attribute.
+ *
+ * Find in @cache and return @xattr_name attribute in @xattr,
+ * for the NULL @xattr_name return the first cached @xattr.
+ *
+ * \retval 0 success
+ * \retval -ENODATA if not found
+ */
+static int ll_xattr_cache_find(struct list_head *cache,
+ const char *xattr_name,
+ struct ll_xattr_entry **xattr)
+{
+ struct ll_xattr_entry *entry;
+
+
+
+ list_for_each_entry(entry, cache, xe_list) {
+ /* xattr_name == NULL means look for any entry */
+ if (xattr_name == NULL ||
+ strcmp(xattr_name, entry->xe_name) == 0) {
+ *xattr = entry;
+ CDEBUG(D_CACHE, "find: [%s]=%.*s\n",
+ entry->xe_name, entry->xe_vallen,
+ entry->xe_value);
+ return 0;
+ }
+ }
+
+ return -ENODATA;
+}
+
+/**
+ * This adds or updates an xattr.
+ *
+ * Add @xattr_name attr with @xattr_val value and @xattr_val_len length,
+ * if the attribute already exists, then update its value.
+ *
+ * \retval 0 success
+ * \retval -ENOMEM if no memory could be allocated for the cached attr
+ */
+static int ll_xattr_cache_add(struct list_head *cache,
+ const char *xattr_name,
+ const char *xattr_val,
+ unsigned xattr_val_len)
+{
+ struct ll_xattr_entry *xattr;
+
+
+
+ if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
+ /* Found a cached EA, update it */
+
+ if (xattr_val_len != xattr->xe_vallen) {
+ char *val;
+ OBD_ALLOC(val, xattr_val_len);
+ if (val == NULL) {
+ CDEBUG(D_CACHE,
+ "failed to allocate %u bytes for xattr %s update\n",
+ xattr_val_len, xattr_name);
+ return -ENOMEM;
+ }
+ OBD_FREE(xattr->xe_value, xattr->xe_vallen);
+ xattr->xe_value = val;
+ xattr->xe_vallen = xattr_val_len;
+ }
+ memcpy(xattr->xe_value, xattr_val, xattr_val_len);
+
+ CDEBUG(D_CACHE, "update: [%s]=%.*s\n", xattr_name,
+ xattr_val_len, xattr_val);
+
+ return 0;
+ }
+
+ OBD_SLAB_ALLOC_PTR_GFP(xattr, xattr_kmem, __GFP_IO);
+ if (xattr == NULL) {
+ CDEBUG(D_CACHE, "failed to allocate xattr\n");
+ return -ENOMEM;
+ }
+
+ xattr->xe_namelen = strlen(xattr_name) + 1;
+
+ OBD_ALLOC(xattr->xe_name, xattr->xe_namelen);
+ if (!xattr->xe_name) {
+ CDEBUG(D_CACHE, "failed to alloc xattr name %u\n",
+ xattr->xe_namelen);
+ goto err_name;
+ }
+ OBD_ALLOC(xattr->xe_value, xattr_val_len);
+ if (!xattr->xe_value) {
+ CDEBUG(D_CACHE, "failed to alloc xattr value %d\n",
+ xattr_val_len);
+ goto err_value;
+ }
+
+ memcpy(xattr->xe_name, xattr_name, xattr->xe_namelen);
+ memcpy(xattr->xe_value, xattr_val, xattr_val_len);
+ xattr->xe_vallen = xattr_val_len;
+ list_add(&xattr->xe_list, cache);
+
+ CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name,
+ xattr_val_len, xattr_val);
+
+ return 0;
+err_value:
+ OBD_FREE(xattr->xe_name, xattr->xe_namelen);
+err_name:
+ OBD_SLAB_FREE_PTR(xattr, xattr_kmem);
+
+ return -ENOMEM;
+}
+
+/**
+ * This removes an extended attribute from cache.
+ *
+ * Remove @xattr_name attribute from @cache.
+ *
+ * \retval 0 success
+ * \retval -ENODATA if @xattr_name is not cached
+ */
+static int ll_xattr_cache_del(struct list_head *cache,
+ const char *xattr_name)
+{
+ struct ll_xattr_entry *xattr;
+
+
+
+ CDEBUG(D_CACHE, "del xattr: %s\n", xattr_name);
+
+ if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
+ list_del(&xattr->xe_list);
+ OBD_FREE(xattr->xe_name, xattr->xe_namelen);
+ OBD_FREE(xattr->xe_value, xattr->xe_vallen);
+ OBD_SLAB_FREE_PTR(xattr, xattr_kmem);
+
+ return 0;
+ }
+
+ return -ENODATA;
+}
+
+/**
+ * This iterates cached extended attributes.
+ *
+ * Walk over cached attributes in @cache and
+ * fill in @xld_buffer or only calculate buffer
+ * size if @xld_buffer is NULL.
+ *
+ * \retval >= 0 buffer list size
+ * \retval -ENODATA if the list cannot fit @xld_size buffer
+ */
+static int ll_xattr_cache_list(struct list_head *cache,
+ char *xld_buffer,
+ int xld_size)
+{
+ struct ll_xattr_entry *xattr, *tmp;
+ int xld_tail = 0;
+
+
+
+ list_for_each_entry_safe(xattr, tmp, cache, xe_list) {
+ CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n",
+ xld_buffer, xld_tail, xattr->xe_name);
+
+ if (xld_buffer) {
+ xld_size -= xattr->xe_namelen;
+ if (xld_size < 0)
+ break;
+ memcpy(&xld_buffer[xld_tail],
+ xattr->xe_name, xattr->xe_namelen);
+ }
+ xld_tail += xattr->xe_namelen;
+ }
+
+ if (xld_size < 0)
+ return -ERANGE;
+
+ return xld_tail;
+}
+
+/**
+ * Check if the xattr cache is initialized (filled).
+ *
+ * \retval 0 @cache is not initialized
+ * \retval 1 @cache is initialized
+ */
+int ll_xattr_cache_valid(struct ll_inode_info *lli)
+{
+ return !!(lli->lli_flags & LLIF_XATTR_CACHE);
+}
+
+/**
+ * This finalizes the xattr cache.
+ *
+ * Free all xattr memory. @lli is the inode info pointer.
+ *
+ * \retval 0 no error occured
+ */
+static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
+{
+
+
+ if (!ll_xattr_cache_valid(lli))
+ return 0;
+
+ while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0)
+ ; /* empty loop */
+ lli->lli_flags &= ~LLIF_XATTR_CACHE;
+
+ return 0;
+}
+
+int ll_xattr_cache_destroy(struct inode *inode)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ int rc;
+
+
+
+ down_write(&lli->lli_xattrs_list_rwsem);
+ rc = ll_xattr_cache_destroy_locked(lli);
+ up_write(&lli->lli_xattrs_list_rwsem);
+
+ return rc;
+}
+
+/**
+ * Match or enqueue a PR or PW LDLM lock.
+ *
+ * Find or request an LDLM lock with xattr data.
+ * Since LDLM does not provide API for atomic match_or_enqueue,
+ * the function handles it with a separate enq lock.
+ * If successful, the function exits with the list lock held.
+ *
+ * \retval 0 no error occured
+ * \retval -ENOMEM not enough memory
+ */
+static int ll_xattr_find_get_lock(struct inode *inode,
+ struct lookup_intent *oit,
+ struct ptlrpc_request **req)
+{
+ ldlm_mode_t mode;
+ struct lustre_handle lockh = { 0 };
+ struct md_op_data *op_data;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ldlm_enqueue_info einfo = { .ei_type = LDLM_IBITS,
+ .ei_mode = it_to_lock_mode(oit),
+ .ei_cb_bl = ll_md_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast };
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct obd_export *exp = sbi->ll_md_exp;
+ int rc;
+
+
+
+ mutex_lock(&lli->lli_xattrs_enq_lock);
+ /* Try matching first. */
+ mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0,
+ oit->it_op == IT_SETXATTR ? LCK_PW :
+ (LCK_PR | LCK_PW));
+ if (mode != 0) {
+ /* fake oit in mdc_revalidate_lock() manner */
+ oit->d.lustre.it_lock_handle = lockh.cookie;
+ oit->d.lustre.it_lock_mode = mode;
+ goto out;
+ }
+
+ /* Enqueue if the lock isn't cached locally. */
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data)) {
+ mutex_unlock(&lli->lli_xattrs_enq_lock);
+ return PTR_ERR(op_data);
+ }
+
+ op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS |
+ OBD_MD_FLXATTRLOCKED;
+#ifdef CONFIG_FS_POSIX_ACL
+ /* If working with ACLs, we would like to cache local ACLs */
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
+ op_data->op_valid |= OBD_MD_FLRMTLGETFACL;
+#endif
+
+ rc = md_enqueue(exp, &einfo, oit, op_data, &lockh, NULL, 0, NULL, 0);
+ ll_finish_md_op_data(op_data);
+
+ if (rc < 0) {
+ CDEBUG(D_CACHE,
+ "md_intent_lock failed with %d for fid "DFID"\n",
+ rc, PFID(ll_inode2fid(inode)));
+ mutex_unlock(&lli->lli_xattrs_enq_lock);
+ return rc;
+ }
+
+ *req = (struct ptlrpc_request *)oit->d.lustre.it_data;
+out:
+ down_write(&lli->lli_xattrs_list_rwsem);
+ mutex_unlock(&lli->lli_xattrs_enq_lock);
+
+ return 0;
+}
+
+/**
+ * Refill the xattr cache.
+ *
+ * Fetch and cache the whole of xattrs for @inode, acquiring
+ * a read or a write xattr lock depending on operation in @oit.
+ * Intent is dropped on exit unless the operation is setxattr.
+ *
+ * \retval 0 no error occured
+ * \retval -EPROTO network protocol error
+ * \retval -ENOMEM not enough memory for the cache
+ */
+static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ptlrpc_request *req = NULL;
+ const char *xdata, *xval, *xtail, *xvtail;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct mdt_body *body;
+ __u32 *xsizes;
+ int rc = 0, i;
+
+
+
+ rc = ll_xattr_find_get_lock(inode, oit, &req);
+ if (rc)
+ GOTO(out_no_unlock, rc);
+
+ /* Do we have the data at this point? */
+ if (ll_xattr_cache_valid(lli)) {
+ ll_stats_ops_tally(sbi, LPROC_LL_GETXATTR_HITS, 1);
+ GOTO(out_maybe_drop, rc = 0);
+ }
+
+ /* Matched but no cache? Cancelled on error by a parallel refill. */
+ if (unlikely(req == NULL)) {
+ CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n");
+ GOTO(out_maybe_drop, rc = -EIO);
+ }
+
+ if (oit->d.lustre.it_status < 0) {
+ CDEBUG(D_CACHE, "getxattr intent returned %d for fid "DFID"\n",
+ oit->d.lustre.it_status, PFID(ll_inode2fid(inode)));
+ GOTO(out_destroy, rc = oit->d.lustre.it_status);
+ }
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ if (body == NULL) {
+ CERROR("no MDT BODY in the refill xattr reply\n");
+ GOTO(out_destroy, rc = -EPROTO);
+ }
+ /* do not need swab xattr data */
+ xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
+ body->eadatasize);
+ xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS,
+ body->aclsize);
+ xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS,
+ body->max_mdsize * sizeof(__u32));
+ if (xdata == NULL || xval == NULL || xsizes == NULL) {
+ CERROR("wrong setxattr reply\n");
+ GOTO(out_destroy, rc = -EPROTO);
+ }
+
+ xtail = xdata + body->eadatasize;
+ xvtail = xval + body->aclsize;
+
+ CDEBUG(D_CACHE, "caching: xdata=%p xtail=%p\n", xdata, xtail);
+
+ ll_xattr_cache_init(lli);
+
+ for (i = 0; i < body->max_mdsize; i++) {
+ CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval);
+ /* Perform consistency checks: attr names and vals in pill */
+ if (memchr(xdata, 0, xtail - xdata) == NULL) {
+ CERROR("xattr protocol violation (names are broken)\n");
+ rc = -EPROTO;
+ } else if (xval + *xsizes > xvtail) {
+ CERROR("xattr protocol violation (vals are broken)\n");
+ rc = -EPROTO;
+ } else if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_XATTR_ENOMEM)) {
+ rc = -ENOMEM;
+ } else {
+ rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
+ *xsizes);
+ }
+ if (rc < 0) {
+ ll_xattr_cache_destroy_locked(lli);
+ GOTO(out_destroy, rc);
+ }
+ xdata += strlen(xdata) + 1;
+ xval += *xsizes;
+ xsizes++;
+ }
+
+ if (xdata != xtail || xval != xvtail)
+ CERROR("a hole in xattr data\n");
+
+ ll_set_lock_data(sbi->ll_md_exp, inode, oit, NULL);
+
+ GOTO(out_maybe_drop, rc);
+out_maybe_drop:
+ /* drop lock on error or getxattr */
+ if (rc != 0 || oit->it_op != IT_SETXATTR)
+ ll_intent_drop_lock(oit);
+
+ if (rc != 0)
+ up_write(&lli->lli_xattrs_list_rwsem);
+out_no_unlock:
+ ptlrpc_req_finished(req);
+
+ return rc;
+
+out_destroy:
+ up_write(&lli->lli_xattrs_list_rwsem);
+
+ ldlm_lock_decref_and_cancel((struct lustre_handle *)
+ &oit->d.lustre.it_lock_handle,
+ oit->d.lustre.it_lock_mode);
+
+ goto out_no_unlock;
+}
+
+/**
+ * Get an xattr value or list xattrs using the write-through cache.
+ *
+ * Get the xattr value (@valid has OBD_MD_FLXATTR set) of @name or
+ * list xattr names (@valid has OBD_MD_FLXATTRLS set) for @inode.
+ * The resulting value/list is stored in @buffer if the former
+ * is not larger than @size.
+ *
+ * \retval 0 no error occured
+ * \retval -EPROTO network protocol error
+ * \retval -ENOMEM not enough memory for the cache
+ * \retval -ERANGE the buffer is not large enough
+ * \retval -ENODATA no such attr or the list is empty
+ */
+int ll_xattr_cache_get(struct inode *inode,
+ const char *name,
+ char *buffer,
+ size_t size,
+ __u64 valid)
+{
+ struct lookup_intent oit = { .it_op = IT_GETXATTR };
+ struct ll_inode_info *lli = ll_i2info(inode);
+ int rc = 0;
+
+
+
+ LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRLS));
+
+ down_read(&lli->lli_xattrs_list_rwsem);
+ if (!ll_xattr_cache_valid(lli)) {
+ up_read(&lli->lli_xattrs_list_rwsem);
+ rc = ll_xattr_cache_refill(inode, &oit);
+ if (rc)
+ return rc;
+ downgrade_write(&lli->lli_xattrs_list_rwsem);
+ } else {
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR_HITS, 1);
+ }
+
+ if (valid & OBD_MD_FLXATTR) {
+ struct ll_xattr_entry *xattr;
+
+ rc = ll_xattr_cache_find(&lli->lli_xattrs, name, &xattr);
+ if (rc == 0) {
+ rc = xattr->xe_vallen;
+ /* zero size means we are only requested size in rc */
+ if (size != 0) {
+ if (size >= xattr->xe_vallen)
+ memcpy(buffer, xattr->xe_value,
+ xattr->xe_vallen);
+ else
+ rc = -ERANGE;
+ }
+ }
+ } else if (valid & OBD_MD_FLXATTRLS) {
+ rc = ll_xattr_cache_list(&lli->lli_xattrs,
+ size ? buffer : NULL, size);
+ }
+
+ GOTO(out, rc);
+out:
+ up_read(&lli->lli_xattrs_list_rwsem);
+
+ return rc;
+}
+
+
+/**
+ * Set/update an xattr value or remove xattr using the write-through cache.
+ *
+ * Set/update the xattr value (if @valid has OBD_MD_FLXATTR) of @name to @newval
+ * or
+ * remove the xattr @name (@valid has OBD_MD_FLXATTRRM set) from @inode.
+ * @flags is either XATTR_CREATE or XATTR_REPLACE as defined by setxattr(2)
+ *
+ * \retval 0 no error occured
+ * \retval -EPROTO network protocol error
+ * \retval -ENOMEM not enough memory for the cache
+ * \retval -ERANGE the buffer is not large enough
+ * \retval -ENODATA no such attr (in the removal case)
+ */
+int ll_xattr_cache_update(struct inode *inode,
+ const char *name,
+ const char *newval,
+ size_t size,
+ __u64 valid,
+ int flags)
+{
+ struct lookup_intent oit = { .it_op = IT_SETXATTR };
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ptlrpc_request *req = NULL;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_capa *oc;
+ int rc;
+
+
+
+ LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRRM));
+
+ rc = ll_xattr_cache_refill(inode, &oit);
+ if (rc)
+ return rc;
+
+ oc = ll_mdscapa_get(inode);
+ rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
+ valid | OBD_MD_FLXATTRLOCKED, name, newval,
+ size, 0, flags, ll_i2suppgid(inode), &req);
+ capa_put(oc);
+
+ if (rc) {
+ ll_intent_drop_lock(&oit);
+ GOTO(out, rc);
+ }
+
+ if (valid & OBD_MD_FLXATTR)
+ rc = ll_xattr_cache_add(&lli->lli_xattrs, name, newval, size);
+ else if (valid & OBD_MD_FLXATTRRM)
+ rc = ll_xattr_cache_del(&lli->lli_xattrs, name);
+
+ ll_intent_drop_lock(&oit);
+ GOTO(out, rc);
+out:
+ up_write(&lli->lli_xattrs_list_rwsem);
+ ptlrpc_req_finished(req);
+
+ return rc;
+}
diff --git a/drivers/staging/lustre/lustre/lmv/Makefile b/drivers/staging/lustre/lustre/lmv/Makefile
index 8cc81ade126c..9162ef724aea 100644
--- a/drivers/staging/lustre/lustre/lmv/Makefile
+++ b/drivers/staging/lustre/lustre/lmv/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_LUSTRE_FS) += lmv.o
-lmv-y := lmv_obd.o lmv_intent.o lmv_fld.o lproc_lmv.o
-
+lmv-y := lmv_obd.o lmv_intent.o lmv_fld.o
+lmv-$(CONFIG_PROC_FS) += lproc_lmv.o
ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
index 0b2d38d1362b..fd6b5ec61d8a 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
@@ -37,7 +37,6 @@
#define DEBUG_SUBSYSTEM S_LMV
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/pagemap.h>
#include <asm/div64.h>
#include <linux/seq_file.h>
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index 511b3b4b699b..56dedceaf0a0 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -37,7 +37,6 @@
#define DEBUG_SUBSYSTEM S_LMV
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/pagemap.h>
#include <asm/div64.h>
#include <linux/seq_file.h>
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index c2866046fc38..1bddd8f62fbf 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -628,7 +628,7 @@ static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
rc = obd_fid_fini(tgt->ltd_exp->exp_obd);
if (rc)
- CERROR("Can't finanize fids factory\n");
+ CERROR("Can't finalize fids factory\n");
CDEBUG(D_INFO, "Disconnected from %s(%s) successfully\n",
tgt->ltd_exp->exp_obd->obd_name,
@@ -712,7 +712,7 @@ repeat_fid2path:
GOTO(out_fid2path, rc);
/* If remote_gf != NULL, it means just building the
- * path on the remote MDT, copy this path segement to gf */
+ * path on the remote MDT, copy this path segment to gf */
if (remote_gf != NULL) {
struct getinfo_fid2path *ori_gf;
char *ptr;
@@ -1212,7 +1212,7 @@ static int lmv_placement_policy(struct obd_device *obd,
/**
* If stripe_offset is provided during setdirstripe
- * (setdirstripe -i xx), xx MDS will be choosen.
+ * (setdirstripe -i xx), xx MDS will be chosen.
*/
if (op_data->op_cli_flags & CLI_SET_MEA) {
struct lmv_user_md *lum;
diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
index edb5a3a99d57..b355d01410e4 100644
--- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
+++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
@@ -41,10 +41,6 @@
#include <lprocfs_status.h>
#include <obd_class.h>
-#ifndef LPROCFS
-static struct lprocfs_vars lprocfs_module_vars[] = { {0} };
-static struct lprocfs_vars lprocfs_obd_vars[] = { {0} };
-#else
static int lmv_numobd_seq_show(struct seq_file *m, void *v)
{
struct obd_device *dev = (struct obd_device *)m->private;
@@ -226,7 +222,6 @@ struct file_operations lmv_proc_target_fops = {
.release = seq_release,
};
-#endif /* LPROCFS */
void lprocfs_lmv_init_vars(struct lprocfs_static_vars *lvars)
{
lvars->module_vars = lprocfs_lmv_module_vars;
diff --git a/drivers/staging/lustre/lustre/lov/Makefile b/drivers/staging/lustre/lustre/lov/Makefile
index 67eaec29bef1..9a5f26d5558d 100644
--- a/drivers/staging/lustre/lustre/lov/Makefile
+++ b/drivers/staging/lustre/lustre/lov/Makefile
@@ -1,8 +1,9 @@
obj-$(CONFIG_LUSTRE_FS) += lov.o
-lov-y := lov_log.o lov_obd.o lov_pack.o lproc_lov.o lov_offset.o lov_merge.o \
+lov-y := lov_log.o lov_obd.o lov_pack.o lov_offset.o lov_merge.o \
lov_request.o lov_ea.o lov_dev.o lov_object.o lov_page.o \
lov_lock.o lov_io.o lovsub_dev.o lovsub_object.o lovsub_page.o \
lovsub_lock.o lovsub_io.o lov_pool.o
+lov-$(CONFIG_PROC_FS) += lproc_lov.o
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index 4276124d92e9..3965d5e4e725 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -168,6 +168,22 @@ enum lov_layout_type {
LLT_NR
};
+static inline char *llt2str(enum lov_layout_type llt)
+{
+ switch (llt) {
+ case LLT_EMPTY:
+ return "EMPTY";
+ case LLT_RAID0:
+ return "RAID0";
+ case LLT_RELEASED:
+ return "RELEASED";
+ case LLT_NR:
+ LBUG();
+ }
+ LBUG();
+ return "";
+}
+
/**
* lov-specific file state.
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index 796da8930876..2b22a03c038e 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -283,8 +283,8 @@ void lsm_free_plain(struct lov_stripe_md *lsm);
int lovea_destroy_object(struct lov_obd *lov, struct lov_stripe_md *lsm,
struct obdo *oa, void *data);
/* lproc_lov.c */
-extern struct file_operations lov_proc_target_fops;
#ifdef LPROCFS
+extern const struct file_operations lov_proc_target_fops;
void lprocfs_lov_init_vars(struct lprocfs_static_vars *lvars);
#else
static inline void lprocfs_lov_init_vars(struct lprocfs_static_vars *lvars)
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 2792fa5c4be2..5a6ab70ed0a1 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -947,14 +947,23 @@ int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
LASSERTF(0, "invalid type %d\n", io->ci_type);
case CIT_MISC:
case CIT_FSYNC:
- result = +1;
+ result = 1;
break;
case CIT_SETATTR:
+ /* the truncate to 0 is managed by MDT:
+ * - in open, for open O_TRUNC
+ * - in setattr, for truncate
+ */
+ /* the truncate is for size > 0 so triggers a restore */
+ if (cl_io_is_trunc(io))
+ io->ci_restore_needed = 1;
+ result = -ENODATA;
+ break;
case CIT_READ:
case CIT_WRITE:
case CIT_FAULT:
- /* TODO: need to restore the file. */
- result = -EBADF;
+ io->ci_restore_needed = 1;
+ result = -ENODATA;
break;
}
if (result == 0) {
diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c
index 26bc719b6dc8..ed2726e523e8 100644
--- a/drivers/staging/lustre/lustre/lov/lov_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lov_lock.c
@@ -71,7 +71,7 @@ static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
/*
* FIXME: We tend to use the subio's env & io to call the sublock
* lock operations because osc lock sometimes stores some control
- * variables in thread's IO infomation(Now only lockless information).
+ * variables in thread's IO information(Now only lockless information).
* However, if the lock's host(object) is different from the object
* for current IO, we have no way to get the subenv and subio because
* they are not initialized at all. As a temp fix, in this case,
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index d204fedea348..9defa55d9919 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -156,7 +156,7 @@ int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
kms = lov_size_to_stripe(lsm, size, stripe);
CDEBUG(D_INODE,
"stripe %d KMS %sing "LPU64"->"LPU64"\n",
- stripe, kms > loi->loi_kms ? "increas":"shrink",
+ stripe, kms > loi->loi_kms ? "increase":"shrink",
loi->loi_kms, kms);
loi_kms_set(loi, loi->loi_lvb.lvb_size = kms);
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index 4783450774cd..50a77c5ef69a 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -1174,7 +1174,7 @@ static int lov_getattr_interpret(struct ptlrpc_request_set *rqset,
struct lov_request_set *lovset = (struct lov_request_set *)data;
int err;
- /* don't do attribute merge if this aysnc op failed */
+ /* don't do attribute merge if this async op failed */
if (rc)
atomic_set(&lovset->set_completes, 0);
err = lov_fini_getattr_set(lovset);
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index cf2fa8abfb1d..df8b5b5b7cf4 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -393,13 +393,13 @@ static int lov_print_empty(const struct lu_env *env, void *cookie,
static int lov_print_raid0(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o)
{
- struct lov_object *lov = lu2lov(o);
- struct lov_layout_raid0 *r0 = lov_r0(lov);
- struct lov_stripe_md *lsm = lov->lo_lsm;
- int i;
+ struct lov_object *lov = lu2lov(o);
+ struct lov_layout_raid0 *r0 = lov_r0(lov);
+ struct lov_stripe_md *lsm = lov->lo_lsm;
+ int i;
- (*p)(env, cookie, "stripes: %d, %svalid, lsm{%p 0x%08X %d %u %u}: \n",
- r0->lo_nr, lov->lo_layout_invalid ? "in" : "", lsm,
+ (*p)(env, cookie, "stripes: %d, %s, lsm{%p 0x%08X %d %u %u}:\n",
+ r0->lo_nr, lov->lo_layout_invalid ? "invalid" : "valid", lsm,
lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
lsm->lsm_stripe_count, lsm->lsm_layout_gen);
for (i = 0; i < r0->lo_nr; ++i) {
@@ -408,8 +408,9 @@ static int lov_print_raid0(const struct lu_env *env, void *cookie,
if (r0->lo_sub[i] != NULL) {
sub = lovsub2lu(r0->lo_sub[i]);
lu_object_print(env, cookie, p, sub);
- } else
+ } else {
(*p)(env, cookie, "sub %d absent\n", i);
+ }
}
return 0;
}
@@ -417,7 +418,14 @@ static int lov_print_raid0(const struct lu_env *env, void *cookie,
static int lov_print_released(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o)
{
- (*p)(env, cookie, "released\n");
+ struct lov_object *lov = lu2lov(o);
+ struct lov_stripe_md *lsm = lov->lo_lsm;
+
+ (*p)(env, cookie,
+ "released: %s, lsm{%p 0x%08X %d %u %u}:\n",
+ lov->lo_layout_invalid ? "invalid" : "valid", lsm,
+ lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
+ lsm->lsm_stripe_count, lsm->lsm_layout_gen);
return 0;
}
@@ -662,6 +670,10 @@ static int lov_layout_change(const struct lu_env *unused,
return PTR_ERR(env);
}
+ CDEBUG(D_INODE, DFID" from %s to %s\n",
+ PFID(lu_object_fid(lov2lu(lov))),
+ llt2str(lov->lo_type), llt2str(llt));
+
old_ops = &lov_dispatch[lov->lo_type];
new_ops = &lov_dispatch[llt];
@@ -750,8 +762,9 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
if (conf->u.coc_md != NULL)
lsm = conf->u.coc_md->lsm;
if ((lsm == NULL && lov->lo_lsm == NULL) ||
- (lsm != NULL && lov->lo_lsm != NULL &&
- lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen)) {
+ ((lsm != NULL && lov->lo_lsm != NULL) &&
+ (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
+ (lov->lo_lsm->lsm_pattern == lsm->lsm_pattern))) {
/* same version of layout */
lov->lo_layout_invalid = false;
GOTO(out, result = 0);
@@ -767,6 +780,8 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
out:
lov_conf_unlock(lov);
+ CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n",
+ PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
return result;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index ec6f6e0572ae..27ed27e6fa6a 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -105,24 +105,22 @@ void lov_dump_lmm(int level, void *lmm)
{
int magic;
- magic = ((struct lov_mds_md_v1 *)(lmm))->lmm_magic;
+ magic = le32_to_cpu(((struct lov_mds_md *)lmm)->lmm_magic);
switch (magic) {
case LOV_MAGIC_V1:
- return lov_dump_lmm_v1(level, (struct lov_mds_md_v1 *)(lmm));
+ lov_dump_lmm_v1(level, (struct lov_mds_md_v1 *)lmm);
+ break;
case LOV_MAGIC_V3:
- return lov_dump_lmm_v3(level, (struct lov_mds_md_v3 *)(lmm));
+ lov_dump_lmm_v3(level, (struct lov_mds_md_v3 *)lmm);
+ break;
default:
- CERROR("Cannot recognize lmm_magic %x", magic);
+ CDEBUG(level, "unrecognized lmm_magic %x, assuming %x\n",
+ magic, LOV_MAGIC_V1);
+ lov_dump_lmm_common(level, lmm);
+ break;
}
- return;
}
-#define LMM_ASSERT(test) \
-do { \
- if (!(test)) lov_dump_lmm(D_ERROR, lmm); \
- LASSERT(test); /* so we know what assertion failed */ \
-} while (0)
-
/* Pack LOV object metadata for disk storage. It is packed in LE byte
* order and is opaque to the networking layer.
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index a1701dfe4083..3bda0c1f3c19 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -453,7 +453,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
INIT_HLIST_NODE(&new_pool->pool_hash);
#ifdef LPROCFS
- /* we need this assert seq_file is not implementated for liblustre */
+ /* we need this assert seq_file is not implemented for liblustre */
/* get ref for /proc file */
lov_pool_getref(new_pool);
new_pool->pool_proc_entry = lprocfs_add_simple(lov->lov_pool_proc_entry,
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index bf324ae2946f..ca81cac9041c 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -835,7 +835,7 @@ int lov_fini_getattr_set(struct lov_request_set *set)
return rc;
}
-/* The callback for osc_getattr_async that finilizes a request info when a
+/* The callback for osc_getattr_async that finalizes a request info when a
* response is received. */
static int cb_getattr_update(void *cookie, int rc)
{
@@ -1017,7 +1017,7 @@ int lov_update_setattr_set(struct lov_request_set *set,
return rc;
}
-/* The callback for osc_setattr_async that finilizes a request info when a
+/* The callback for osc_setattr_async that finalizes a request info when a
* response is received. */
static int cb_setattr_update(void *cookie, int rc)
{
@@ -1140,7 +1140,7 @@ int lov_update_punch_set(struct lov_request_set *set,
return rc;
}
-/* The callback for osc_punch that finilizes a request info when a response
+/* The callback for osc_punch that finalizes a request info when a response
* is received. */
static int cb_update_punch(void *cookie, int rc)
{
@@ -1236,8 +1236,8 @@ int lov_fini_sync_set(struct lov_request_set *set)
return rc;
}
-/* The callback for osc_sync that finilizes a request info when a
- * response is recieved. */
+/* The callback for osc_sync that finalizes a request info when a
+ * response is received. */
static int cb_sync_update(void *cookie, int rc)
{
struct obd_info *oinfo = cookie;
@@ -1407,7 +1407,7 @@ void lov_update_statfs(struct obd_statfs *osfs, struct obd_statfs *lov_sfs,
}
}
-/* The callback for osc_statfs_async that finilizes a request info when a
+/* The callback for osc_statfs_async that finalizes a request info when a
* response is received. */
static int cb_statfs_update(void *cookie, int rc)
{
@@ -1485,7 +1485,7 @@ int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
continue;
}
- /* skip targets that have been explicitely disabled by the
+ /* skip targets that have been explicitly disabled by the
* administrator */
if (!lov->lov_tgts[i]->ltd_exp) {
CDEBUG(D_HA, "lov idx %d administratively disabled\n", i);
diff --git a/drivers/staging/lustre/lustre/lov/lproc_lov.c b/drivers/staging/lustre/lustre/lov/lproc_lov.c
index 15744e13a3f2..bd7da56b0713 100644
--- a/drivers/staging/lustre/lustre/lov/lproc_lov.c
+++ b/drivers/staging/lustre/lustre/lov/lproc_lov.c
@@ -41,7 +41,6 @@
#include <linux/seq_file.h>
#include "lov_internal.h"
-#ifdef LPROCFS
static int lov_stripesize_seq_show(struct seq_file *m, void *v)
{
struct obd_device *dev = (struct obd_device *)m->private;
@@ -260,29 +259,29 @@ LPROC_SEQ_FOPS_RO_TYPE(lov, kbytesfree);
LPROC_SEQ_FOPS_RO_TYPE(lov, kbytesavail);
struct lprocfs_vars lprocfs_lov_obd_vars[] = {
- { "uuid", &lov_uuid_fops, 0, 0 },
- { "stripesize", &lov_stripesize_fops, 0 },
- { "stripeoffset", &lov_stripeoffset_fops, 0 },
- { "stripecount", &lov_stripecount_fops, 0 },
- { "stripetype", &lov_stripetype_fops, 0 },
- { "numobd", &lov_numobd_fops, 0, 0 },
- { "activeobd", &lov_activeobd_fops, 0, 0 },
- { "filestotal", &lov_filestotal_fops, 0, 0 },
- { "filesfree", &lov_filesfree_fops, 0, 0 },
- /*{ "filegroups", lprocfs_rd_filegroups, 0, 0 },*/
- { "blocksize", &lov_blksize_fops, 0, 0 },
- { "kbytestotal", &lov_kbytestotal_fops, 0, 0 },
- { "kbytesfree", &lov_kbytesfree_fops, 0, 0 },
- { "kbytesavail", &lov_kbytesavail_fops, 0, 0 },
- { "desc_uuid", &lov_desc_uuid_fops, 0, 0 },
- { 0 }
+ { "uuid", &lov_uuid_fops, NULL, 0 },
+ { "stripesize", &lov_stripesize_fops, NULL },
+ { "stripeoffset", &lov_stripeoffset_fops, NULL },
+ { "stripecount", &lov_stripecount_fops, NULL },
+ { "stripetype", &lov_stripetype_fops, NULL },
+ { "numobd", &lov_numobd_fops, NULL, 0 },
+ { "activeobd", &lov_activeobd_fops, NULL, 0 },
+ { "filestotal", &lov_filestotal_fops, NULL, 0 },
+ { "filesfree", &lov_filesfree_fops, NULL, 0 },
+ /*{ "filegroups", lprocfs_rd_filegroups, NULL, 0 },*/
+ { "blocksize", &lov_blksize_fops, NULL, 0 },
+ { "kbytestotal", &lov_kbytestotal_fops, NULL, 0 },
+ { "kbytesfree", &lov_kbytesfree_fops, NULL, 0 },
+ { "kbytesavail", &lov_kbytesavail_fops, NULL, 0 },
+ { "desc_uuid", &lov_desc_uuid_fops, NULL, 0 },
+ { NULL }
};
LPROC_SEQ_FOPS_RO_TYPE(lov, numrefs);
static struct lprocfs_vars lprocfs_lov_module_vars[] = {
- { "num_refs", &lov_numrefs_fops, 0, 0 },
- { 0 }
+ { "num_refs", &lov_numrefs_fops, NULL, 0 },
+ { NULL }
};
void lprocfs_lov_init_vars(struct lprocfs_static_vars *lvars)
@@ -291,11 +290,10 @@ void lprocfs_lov_init_vars(struct lprocfs_static_vars *lvars)
lvars->obd_vars = lprocfs_lov_obd_vars;
}
-struct file_operations lov_proc_target_fops = {
+const struct file_operations lov_proc_target_fops = {
.owner = THIS_MODULE,
.open = lov_target_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = lprocfs_seq_release,
};
-#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/lvfs/Makefile b/drivers/staging/lustre/lustre/lvfs/Makefile
index f50b1c574385..e0367c3fc416 100644
--- a/drivers/staging/lustre/lustre/lvfs/Makefile
+++ b/drivers/staging/lustre/lustre/lvfs/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_LUSTRE_FS) += lvfs.o
-lvfs-y := lvfs_linux.o fsfilt.o lvfs_lib.o
+lvfs-y := lvfs_linux.o fsfilt.o
+lvfs-$(CONFIG_PROC_FS) += lvfs_lib.o
ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/lvfs/fsfilt_ext3.c b/drivers/staging/lustre/lustre/lvfs/fsfilt_ext3.c
deleted file mode 100644
index ee75994003e1..000000000000
--- a/drivers/staging/lustre/lustre/lvfs/fsfilt_ext3.c
+++ /dev/null
@@ -1,760 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/lvfs/fsfilt_ext3.c
- *
- * Author: Andreas Dilger <adilger@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_FILTER
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/pagemap.h>
-#include <ldiskfs/ldiskfs_config.h>
-#include <ext4/ext4.h>
-#include <ext4/ext4_jbd2.h>
-#include <linux/bitops.h>
-#include <linux/quota.h>
-
-#include <linux/libcfs/libcfs.h>
-#include <lustre_fsfilt.h>
-#include <obd.h>
-#include <linux/lustre_compat25.h>
-#include <linux/lprocfs_status.h>
-
-#include <ext4/ext4_extents.h>
-
-#ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
-#define ext3_ext_pblock(ex) ext_pblock((ex))
-#endif
-
-/* for kernels 2.6.18 and later */
-#define FSFILT_SINGLEDATA_TRANS_BLOCKS(sb) EXT3_SINGLEDATA_TRANS_BLOCKS(sb)
-
-#define fsfilt_ext3_ext_insert_extent(handle, inode, path, newext, flag) \
- ext3_ext_insert_extent(handle, inode, path, newext, flag)
-
-#define ext3_mb_discard_inode_preallocations(inode) \
- ext3_discard_preallocations(inode)
-
-#define fsfilt_log_start_commit(journal, tid) jbd2_log_start_commit(journal, tid)
-#define fsfilt_log_wait_commit(journal, tid) jbd2_log_wait_commit(journal, tid)
-
-static struct kmem_cache *fcb_cache;
-
-struct fsfilt_cb_data {
- struct ext4_journal_cb_entry cb_jcb; /* private data - MUST BE FIRST */
- fsfilt_cb_t cb_func; /* MDS/OBD completion function */
- struct obd_device *cb_obd; /* MDS/OBD completion device */
- __u64 cb_last_rcvd; /* MDS/OST last committed operation */
- void *cb_data; /* MDS/OST completion function data */
-};
-
-static char *fsfilt_ext3_get_label(struct super_block *sb)
-{
- return EXT3_SB(sb)->s_es->s_volume_name;
-}
-
-/* kernel has ext4_blocks_for_truncate since linux-3.1.1 */
-# include <ext4/truncate.h>
-
-/*
- * We don't currently need any additional blocks for rmdir and
- * unlink transactions because we are storing the OST oa_id inside
- * the inode (which we will be changing anyways as part of this
- * transaction).
- */
-static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private,
- int logs)
-{
- /* For updates to the last received file */
- int nblocks = FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb);
- journal_t *journal;
- void *handle;
-
- if (current->journal_info) {
- CDEBUG(D_INODE, "increasing refcount on %p\n",
- current->journal_info);
- goto journal_start;
- }
-
- switch(op) {
- case FSFILT_OP_UNLINK:
- /* delete one file + create/update logs for each stripe */
- nblocks += EXT3_DELETE_TRANS_BLOCKS(inode->i_sb);
- nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
- FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb)) * logs;
- break;
- case FSFILT_OP_CANCEL_UNLINK:
- LASSERT(logs == 1);
-
- /* blocks for log header bitmap update OR
- * blocks for catalog header bitmap update + unlink of logs +
- * blocks for delete the inode (include blocks truncating). */
- nblocks = (LLOG_CHUNK_SIZE >> inode->i_blkbits) +
- EXT3_DELETE_TRANS_BLOCKS(inode->i_sb) +
- ext4_blocks_for_truncate(inode) + 3;
- break;
- default: CERROR("unknown transaction start op %d\n", op);
- LBUG();
- }
-
- LASSERT(current->journal_info == desc_private);
- journal = EXT3_SB(inode->i_sb)->s_journal;
- if (nblocks > journal->j_max_transaction_buffers) {
- CWARN("too many credits %d for op %ux%u using %d instead\n",
- nblocks, op, logs, journal->j_max_transaction_buffers);
- nblocks = journal->j_max_transaction_buffers;
- }
-
- journal_start:
- LASSERTF(nblocks > 0, "can't start %d credit transaction\n", nblocks);
- handle = ext3_journal_start(inode, nblocks);
-
- if (!IS_ERR(handle))
- LASSERT(current->journal_info == handle);
- else
- CERROR("error starting handle for op %u (%u credits): rc %ld\n",
- op, nblocks, PTR_ERR(handle));
- return handle;
-}
-
-static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
-{
- int rc;
- handle_t *handle = h;
-
- LASSERT(current->journal_info == handle);
- if (force_sync)
- handle->h_sync = 1; /* recovery likes this */
-
- rc = ext3_journal_stop(handle);
-
- return rc;
-}
-
-#ifndef EXT3_EXTENTS_FL
-#define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
-#endif
-
-#ifndef EXT_ASSERT
-#define EXT_ASSERT(cond) BUG_ON(!(cond))
-#endif
-
-#define EXT_GENERATION(inode) (EXT4_I(inode)->i_ext_generation)
-#define ext3_ext_base inode
-#define ext3_ext_base2inode(inode) (inode)
-#define EXT_DEPTH(inode) ext_depth(inode)
-#define fsfilt_ext3_ext_walk_space(inode, block, num, cb, cbdata) \
- ext3_ext_walk_space(inode, block, num, cb, cbdata);
-
-struct bpointers {
- unsigned long *blocks;
- unsigned long start;
- int num;
- int init_num;
- int create;
-};
-
-static long ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
- unsigned long block, int *aflags)
-{
- struct ext3_inode_info *ei = EXT3_I(inode);
- unsigned long bg_start;
- unsigned long colour;
- int depth;
-
- if (path) {
- struct ext3_extent *ex;
- depth = path->p_depth;
-
- /* try to predict block placement */
- if ((ex = path[depth].p_ext))
- return ext4_ext_pblock(ex) + (block - le32_to_cpu(ex->ee_block));
-
- /* it looks index is empty
- * try to find starting from index itself */
- if (path[depth].p_bh)
- return path[depth].p_bh->b_blocknr;
- }
-
- /* OK. use inode's group */
- bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
- le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
- colour = (current->pid % 16) *
- (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
- return bg_start + colour + block;
-}
-
-#define ll_unmap_underlying_metadata(sb, blocknr) \
- unmap_underlying_metadata((sb)->s_bdev, blocknr)
-
-#ifndef EXT3_MB_HINT_GROUP_ALLOC
-static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
- struct ext3_ext_path *path, unsigned long block,
- unsigned long *count, int *err)
-{
- unsigned long pblock, goal;
- int aflags = 0;
- struct inode *inode = ext3_ext_base2inode(base);
-
- goal = ext3_ext_find_goal(inode, path, block, &aflags);
- aflags |= 2; /* block have been already reserved */
- pblock = ext3_mb_new_blocks(handle, inode, goal, count, aflags, err);
- return pblock;
-
-}
-#else
-static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
- struct ext3_ext_path *path, unsigned long block,
- unsigned long *count, int *err)
-{
- struct inode *inode = ext3_ext_base2inode(base);
- struct ext3_allocation_request ar;
- unsigned long pblock;
- int aflags;
-
- /* find neighbour allocated blocks */
- ar.lleft = block;
- *err = ext3_ext_search_left(base, path, &ar.lleft, &ar.pleft);
- if (*err)
- return 0;
- ar.lright = block;
- *err = ext3_ext_search_right(base, path, &ar.lright, &ar.pright);
- if (*err)
- return 0;
-
- /* allocate new block */
- ar.goal = ext3_ext_find_goal(inode, path, block, &aflags);
- ar.inode = inode;
- ar.logical = block;
- ar.len = *count;
- ar.flags = EXT3_MB_HINT_DATA;
- pblock = ext3_mb_new_blocks(handle, &ar, err);
- *count = ar.len;
- return pblock;
-}
-#endif
-
-static int ext3_ext_new_extent_cb(struct ext3_ext_base *base,
- struct ext3_ext_path *path,
- struct ext3_ext_cache *cex,
-#ifdef HAVE_EXT_PREPARE_CB_EXTENT
- struct ext3_extent *ex,
-#endif
- void *cbdata)
-{
- struct bpointers *bp = cbdata;
- struct inode *inode = ext3_ext_base2inode(base);
- struct ext3_extent nex;
- unsigned long pblock;
- unsigned long tgen;
- int err, i;
- unsigned long count;
- handle_t *handle;
-
-#ifdef EXT3_EXT_CACHE_EXTENT
- if (cex->ec_type == EXT3_EXT_CACHE_EXTENT)
-#else
- if ((cex->ec_len != 0) && (cex->ec_start != 0))
-#endif
- {
- err = EXT_CONTINUE;
- goto map;
- }
-
- if (bp->create == 0) {
- i = 0;
- if (cex->ec_block < bp->start)
- i = bp->start - cex->ec_block;
- if (i >= cex->ec_len)
- CERROR("nothing to do?! i = %d, e_num = %u\n",
- i, cex->ec_len);
- for (; i < cex->ec_len && bp->num; i++) {
- *(bp->blocks) = 0;
- bp->blocks++;
- bp->num--;
- bp->start++;
- }
-
- return EXT_CONTINUE;
- }
-
- tgen = EXT_GENERATION(base);
- count = ext3_ext_calc_credits_for_insert(base, path);
-
- handle = ext3_journal_start(inode, count+EXT3_ALLOC_NEEDED+1);
- if (IS_ERR(handle)) {
- return PTR_ERR(handle);
- }
-
- if (tgen != EXT_GENERATION(base)) {
- /* the tree has changed. so path can be invalid at moment */
- ext3_journal_stop(handle);
- return EXT_REPEAT;
- }
-
- /* In 2.6.32 kernel, ext4_ext_walk_space()'s callback func is not
- * protected by i_data_sem as whole. so we patch it to store
- * generation to path and now verify the tree hasn't changed */
- down_write((&EXT4_I(inode)->i_data_sem));
-
- /* validate extent, make sure the extent tree does not changed */
- if (EXT_GENERATION(base) != path[0].p_generation) {
- /* cex is invalid, try again */
- up_write(&EXT4_I(inode)->i_data_sem);
- ext3_journal_stop(handle);
- return EXT_REPEAT;
- }
-
- count = cex->ec_len;
- pblock = new_blocks(handle, base, path, cex->ec_block, &count, &err);
- if (!pblock)
- goto out;
- EXT_ASSERT(count <= cex->ec_len);
-
- /* insert new extent */
- nex.ee_block = cpu_to_le32(cex->ec_block);
- ext3_ext_store_pblock(&nex, pblock);
- nex.ee_len = cpu_to_le16(count);
- err = fsfilt_ext3_ext_insert_extent(handle, base, path, &nex, 0);
- if (err) {
- /* free data blocks we just allocated */
- /* not a good idea to call discard here directly,
- * but otherwise we'd need to call it every free() */
-#ifdef EXT3_MB_HINT_GROUP_ALLOC
- ext3_mb_discard_inode_preallocations(inode);
-#endif
-#ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */
- ext3_free_blocks(handle, inode, NULL, ext4_ext_pblock(&nex),
- cpu_to_le16(nex.ee_len), 0);
-#else
- ext3_free_blocks(handle, inode, ext4_ext_pblock(&nex),
- cpu_to_le16(nex.ee_len), 0);
-#endif
- goto out;
- }
-
- /*
- * Putting len of the actual extent we just inserted,
- * we are asking ext3_ext_walk_space() to continue
- * scaning after that block
- */
- cex->ec_len = le16_to_cpu(nex.ee_len);
- cex->ec_start = ext4_ext_pblock(&nex);
- BUG_ON(le16_to_cpu(nex.ee_len) == 0);
- BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
-
-out:
- up_write((&EXT4_I(inode)->i_data_sem));
- ext3_journal_stop(handle);
-map:
- if (err >= 0) {
- /* map blocks */
- if (bp->num == 0) {
- CERROR("hmm. why do we find this extent?\n");
- CERROR("initial space: %lu:%u\n",
- bp->start, bp->init_num);
-#ifdef EXT3_EXT_CACHE_EXTENT
- CERROR("current extent: %u/%u/%llu %d\n",
- cex->ec_block, cex->ec_len,
- (unsigned long long)cex->ec_start,
- cex->ec_type);
-#else
- CERROR("current extent: %u/%u/%llu\n",
- cex->ec_block, cex->ec_len,
- (unsigned long long)cex->ec_start);
-#endif
- }
- i = 0;
- if (cex->ec_block < bp->start)
- i = bp->start - cex->ec_block;
- if (i >= cex->ec_len)
- CERROR("nothing to do?! i = %d, e_num = %u\n",
- i, cex->ec_len);
- for (; i < cex->ec_len && bp->num; i++) {
- *(bp->blocks) = cex->ec_start + i;
-#ifdef EXT3_EXT_CACHE_EXTENT
- if (cex->ec_type != EXT3_EXT_CACHE_EXTENT)
-#else
- if ((cex->ec_len == 0) || (cex->ec_start == 0))
-#endif
- {
- /* unmap any possible underlying metadata from
- * the block device mapping. bug 6998. */
- ll_unmap_underlying_metadata(inode->i_sb,
- *(bp->blocks));
- }
- bp->blocks++;
- bp->num--;
- bp->start++;
- }
- }
- return err;
-}
-
-int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
- unsigned long num, unsigned long *blocks,
- int create)
-{
- struct ext3_ext_base *base = inode;
- struct bpointers bp;
- int err;
-
- CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
- block, block + num - 1, (unsigned) inode->i_ino);
-
- bp.blocks = blocks;
- bp.start = block;
- bp.init_num = bp.num = num;
- bp.create = create;
-
- err = fsfilt_ext3_ext_walk_space(base, block, num,
- ext3_ext_new_extent_cb, &bp);
- ext3_ext_invalidate_cache(base);
-
- return err;
-}
-
-int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
- int pages, unsigned long *blocks,
- int create)
-{
- int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
- int rc = 0, i = 0;
- struct page *fp = NULL;
- int clen = 0;
-
- CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
- inode->i_ino, pages, (*page)->index);
-
- /* pages are sorted already. so, we just have to find
- * contig. space and process them properly */
- while (i < pages) {
- if (fp == NULL) {
- /* start new extent */
- fp = *page++;
- clen = 1;
- i++;
- continue;
- } else if (fp->index + clen == (*page)->index) {
- /* continue the extent */
- page++;
- clen++;
- i++;
- continue;
- }
-
- /* process found extent */
- rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
- clen * blocks_per_page, blocks,
- create);
- if (rc)
- GOTO(cleanup, rc);
-
- /* look for next extent */
- fp = NULL;
- blocks += blocks_per_page * clen;
- }
-
- if (fp)
- rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
- clen * blocks_per_page, blocks,
- create);
-cleanup:
- return rc;
-}
-
-int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
- int pages, unsigned long *blocks,
- int create)
-{
- int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
- unsigned long *b;
- int rc = 0, i;
-
- for (i = 0, b = blocks; i < pages; i++, page++) {
- rc = ext3_map_inode_page(inode, *page, b, create);
- if (rc) {
- CERROR("ino %lu, blk %lu create %d: rc %d\n",
- inode->i_ino, *b, create, rc);
- break;
- }
-
- b += blocks_per_page;
- }
- return rc;
-}
-
-int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
- int pages, unsigned long *blocks,
- int create, struct mutex *optional_mutex)
-{
- int rc;
-
- if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
- rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
- blocks, create);
- return rc;
- }
- if (optional_mutex != NULL)
- mutex_lock(optional_mutex);
- rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks, create);
- if (optional_mutex != NULL)
- mutex_unlock(optional_mutex);
-
- return rc;
-}
-
-int fsfilt_ext3_read(struct inode *inode, void *buf, int size, loff_t *offs)
-{
- unsigned long block;
- struct buffer_head *bh;
- int err, blocksize, csize, boffs, osize = size;
-
- /* prevent reading after eof */
- spin_lock(&inode->i_lock);
- if (i_size_read(inode) < *offs + size) {
- size = i_size_read(inode) - *offs;
- spin_unlock(&inode->i_lock);
- if (size < 0) {
- CDEBUG(D_EXT2, "size %llu is too short for read @%llu\n",
- i_size_read(inode), *offs);
- return -EBADR;
- } else if (size == 0) {
- return 0;
- }
- } else {
- spin_unlock(&inode->i_lock);
- }
-
- blocksize = 1 << inode->i_blkbits;
-
- while (size > 0) {
- block = *offs >> inode->i_blkbits;
- boffs = *offs & (blocksize - 1);
- csize = min(blocksize - boffs, size);
- bh = ext3_bread(NULL, inode, block, 0, &err);
- if (!bh) {
- CERROR("can't read block: %d\n", err);
- return err;
- }
-
- memcpy(buf, bh->b_data + boffs, csize);
- brelse(bh);
-
- *offs += csize;
- buf += csize;
- size -= csize;
- }
- return osize;
-}
-EXPORT_SYMBOL(fsfilt_ext3_read);
-
-static int fsfilt_ext3_read_record(struct file * file, void *buf,
- int size, loff_t *offs)
-{
- int rc;
- rc = fsfilt_ext3_read(file->f_dentry->d_inode, buf, size, offs);
- if (rc > 0)
- rc = 0;
- return rc;
-}
-
-int fsfilt_ext3_write_handle(struct inode *inode, void *buf, int bufsize,
- loff_t *offs, handle_t *handle)
-{
- struct buffer_head *bh = NULL;
- loff_t old_size = i_size_read(inode), offset = *offs;
- loff_t new_size = i_size_read(inode);
- unsigned long block;
- int err = 0, blocksize = 1 << inode->i_blkbits, size, boffs;
-
- while (bufsize > 0) {
- if (bh != NULL)
- brelse(bh);
-
- block = offset >> inode->i_blkbits;
- boffs = offset & (blocksize - 1);
- size = min(blocksize - boffs, bufsize);
- bh = ext3_bread(handle, inode, block, 1, &err);
- if (!bh) {
- CERROR("can't read/create block: %d\n", err);
- break;
- }
-
- err = ext3_journal_get_write_access(handle, bh);
- if (err) {
- CERROR("journal_get_write_access() returned error %d\n",
- err);
- break;
- }
- LASSERT(bh->b_data + boffs + size <= bh->b_data + bh->b_size);
- memcpy(bh->b_data + boffs, buf, size);
- err = ext3_journal_dirty_metadata(handle, bh);
- if (err) {
- CERROR("journal_dirty_metadata() returned error %d\n",
- err);
- break;
- }
- if (offset + size > new_size)
- new_size = offset + size;
- offset += size;
- bufsize -= size;
- buf += size;
- }
- if (bh)
- brelse(bh);
-
- /* correct in-core and on-disk sizes */
- if (new_size > i_size_read(inode)) {
- spin_lock(&inode->i_lock);
- if (new_size > i_size_read(inode))
- i_size_write(inode, new_size);
- if (i_size_read(inode) > EXT3_I(inode)->i_disksize)
- EXT3_I(inode)->i_disksize = i_size_read(inode);
- if (i_size_read(inode) > old_size) {
- spin_unlock(&inode->i_lock);
- mark_inode_dirty(inode);
- } else {
- spin_unlock(&inode->i_lock);
- }
- }
-
- if (err == 0)
- *offs = offset;
- return err;
-}
-EXPORT_SYMBOL(fsfilt_ext3_write_handle);
-
-static int fsfilt_ext3_write_record(struct file *file, void *buf, int bufsize,
- loff_t *offs, int force_sync)
-{
- struct inode *inode = file->f_dentry->d_inode;
- handle_t *handle;
- int err, block_count = 0, blocksize;
-
- /* Determine how many transaction credits are needed */
- blocksize = 1 << inode->i_blkbits;
- block_count = (*offs & (blocksize - 1)) + bufsize;
- block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
-
- handle = ext3_journal_start(inode,
- block_count * EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + 2);
- if (IS_ERR(handle)) {
- CERROR("can't start transaction for %d blocks (%d bytes)\n",
- block_count * EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + 2,
- bufsize);
- return PTR_ERR(handle);
- }
-
- err = fsfilt_ext3_write_handle(inode, buf, bufsize, offs, handle);
-
- if (!err && force_sync)
- handle->h_sync = 1; /* recovery likes this */
-
- ext3_journal_stop(handle);
-
- return err;
-}
-
-static int fsfilt_ext3_setup(struct super_block *sb)
-{
- if (!EXT3_HAS_COMPAT_FEATURE(sb,
- EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
- CERROR("ext3 mounted without journal\n");
- return -EINVAL;
- }
-
-#ifdef S_PDIROPS
- CWARN("Enabling PDIROPS\n");
- set_opt(EXT3_SB(sb)->s_mount_opt, PDIROPS);
- sb->s_flags |= S_PDIROPS;
-#endif
- if (!EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
- CWARN("filesystem doesn't have dir_index feature enabled\n");
- return 0;
-}
-static struct fsfilt_operations fsfilt_ext3_ops = {
- .fs_type = "ext3",
- .fs_owner = THIS_MODULE,
- .fs_getlabel = fsfilt_ext3_get_label,
- .fs_start = fsfilt_ext3_start,
- .fs_commit = fsfilt_ext3_commit,
- .fs_map_inode_pages = fsfilt_ext3_map_inode_pages,
- .fs_write_record = fsfilt_ext3_write_record,
- .fs_read_record = fsfilt_ext3_read_record,
- .fs_setup = fsfilt_ext3_setup,
-};
-
-static int __init fsfilt_ext3_init(void)
-{
- int rc;
-
- fcb_cache = kmem_cache_create("fsfilt_ext3_fcb",
- sizeof(struct fsfilt_cb_data), 0, 0);
- if (!fcb_cache) {
- CERROR("error allocating fsfilt journal callback cache\n");
- GOTO(out, rc = -ENOMEM);
- }
-
- rc = fsfilt_register_ops(&fsfilt_ext3_ops);
-
- if (rc) {
- int err = kmem_cache_destroy(fcb_cache);
- LASSERTF(err == 0, "error destroying new cache: rc %d\n", err);
- }
-out:
- return rc;
-}
-
-static void __exit fsfilt_ext3_exit(void)
-{
- int rc;
-
- fsfilt_unregister_ops(&fsfilt_ext3_ops);
- rc = kmem_cache_destroy(fcb_cache);
- LASSERTF(rc == 0, "couldn't destroy fcb_cache slab\n");
-}
-
-module_init(fsfilt_ext3_init);
-module_exit(fsfilt_ext3_exit);
-
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/lustre/lustre/lvfs/lvfs_lib.c b/drivers/staging/lustre/lustre/lvfs/lvfs_lib.c
index b21e40cdacab..7e47fc4a7e4e 100644
--- a/drivers/staging/lustre/lustre/lvfs/lvfs_lib.c
+++ b/drivers/staging/lustre/lustre/lvfs/lvfs_lib.c
@@ -43,7 +43,6 @@
#include <lustre_lib.h>
#include <lprocfs_status.h>
-#ifdef LPROCFS
void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount)
{
struct lprocfs_counter *percpu_cntr;
@@ -169,4 +168,3 @@ int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid)
return rc;
}
EXPORT_SYMBOL(lprocfs_stats_alloc_one);
-#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c b/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
index 09474e7553dd..428ffd8c37b7 100644
--- a/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
+++ b/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
@@ -47,7 +47,6 @@
#include <linux/quotaops.h>
#include <linux/libcfs/libcfs.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/lustre_compat25.h>
#include <lvfs.h>
diff --git a/drivers/staging/lustre/lustre/mdc/Makefile b/drivers/staging/lustre/lustre/mdc/Makefile
index 93bae242e761..4c0bed14de80 100644
--- a/drivers/staging/lustre/lustre/mdc/Makefile
+++ b/drivers/staging/lustre/lustre/mdc/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_LUSTRE_FS) += mdc.o
-mdc-y := mdc_request.o mdc_reint.o lproc_mdc.o mdc_lib.o mdc_locks.o
+mdc-y := mdc_request.o mdc_reint.o mdc_lib.o mdc_locks.o
+mdc-$(CONFIG_PROC_FS) += lproc_mdc.o
ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
index e0b8f1866253..2663480a68c5 100644
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
@@ -39,8 +39,6 @@
#include <obd_class.h>
#include <lprocfs_status.h>
-#ifdef LPROCFS
-
static int mdc_max_rpcs_in_flight_seq_show(struct seq_file *m, void *v)
{
struct obd_device *dev = m->private;
@@ -214,4 +212,3 @@ void lprocfs_mdc_init_vars(struct lprocfs_static_vars *lvars)
lvars->module_vars = lprocfs_mdc_module_vars;
lvars->obd_vars = lprocfs_mdc_obd_vars;
}
-#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
index 2aeff0ecec34..506982996c0e 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h
+++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
@@ -69,9 +69,10 @@ void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
const void *data, int datalen, __u32 mode, __u32 uid,
__u32 gid, cfs_cap_t capability, __u64 rdev);
void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- __u32 mode, __u64 rdev, __u32 flags, const void *data,
+ __u32 mode, __u64 rdev, __u64 flags, const void *data,
int datalen);
void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
+void mdc_getxattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
const char *old, int oldlen, const char *new, int newlen);
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index b2de47803679..91f6876dac3f 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -174,12 +174,13 @@ void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
}
}
-static __u64 mds_pack_open_flags(__u32 flags, __u32 mode)
+static __u64 mds_pack_open_flags(__u64 flags, __u32 mode)
{
__u64 cr_flags = (flags & (FMODE_READ | FMODE_WRITE |
MDS_OPEN_HAS_EA | MDS_OPEN_HAS_OBJS |
MDS_OPEN_OWNEROVERRIDE | MDS_OPEN_LOCK |
- MDS_OPEN_BY_FID));
+ MDS_OPEN_BY_FID | MDS_OPEN_LEASE |
+ MDS_OPEN_RELEASE));
if (flags & O_CREAT)
cr_flags |= MDS_OPEN_CREAT;
if (flags & O_EXCL)
@@ -207,7 +208,7 @@ static __u64 mds_pack_open_flags(__u32 flags, __u32 mode)
/* packing of MDS records */
void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- __u32 mode, __u64 rdev, __u32 flags, const void *lmm,
+ __u32 mode, __u64 rdev, __u64 flags, const void *lmm,
int lmmlen)
{
struct mdt_rec_create *rec;
@@ -234,6 +235,7 @@ void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
rec->cr_suppgid2 = op_data->op_suppgids[1];
rec->cr_bias = op_data->op_bias;
rec->cr_umask = current_umask();
+ rec->cr_old_handle = op_data->op_handle;
mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1);
/* the next buffer is child capa, which is used for replay,
@@ -489,6 +491,28 @@ void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags,
}
}
+static void mdc_hsm_release_pack(struct ptlrpc_request *req,
+ struct md_op_data *op_data)
+{
+ if (op_data->op_bias & MDS_HSM_RELEASE) {
+ struct close_data *data;
+ struct ldlm_lock *lock;
+
+ data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA);
+ LASSERT(data != NULL);
+
+ lock = ldlm_handle2lock(&op_data->op_lease_handle);
+ if (lock != NULL) {
+ data->cd_handle = lock->l_remote_handle;
+ ldlm_lock_put(lock);
+ }
+ ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);
+
+ data->cd_data_version = op_data->op_data_version;
+ data->cd_fid = op_data->op_fid2;
+ }
+}
+
void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
{
struct mdt_ioepoch *epoch;
@@ -500,6 +524,7 @@ void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
mdc_setattr_pack_rec(rec, op_data);
mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1);
mdc_ioepoch_pack(epoch, op_data);
+ mdc_hsm_release_pack(req, op_data);
}
static int mdc_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index fb5a9959bf7a..8aa7c80c2002 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -39,7 +39,6 @@
# include <linux/module.h>
# include <linux/pagemap.h>
# include <linux/miscdevice.h>
-# include <linux/init.h>
#include <lustre_acl.h>
#include <obd_class.h>
@@ -75,6 +74,12 @@ EXPORT_SYMBOL(it_clear_disposition);
int it_open_error(int phase, struct lookup_intent *it)
{
+ if (it_disposition(it, DISP_OPEN_LEASE)) {
+ if (phase >= DISP_OPEN_LEASE)
+ return it->d.lustre.it_status;
+ else
+ return 0;
+ }
if (it_disposition(it, DISP_OPEN_OPEN)) {
if (phase >= DISP_OPEN_OPEN)
return it->d.lustre.it_status;
@@ -281,14 +286,21 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
/* XXX: openlock is not cancelled for cross-refs. */
/* If inode is known, cancel conflicting OPEN locks. */
if (fid_is_sane(&op_data->op_fid2)) {
- if (it->it_flags & (FMODE_WRITE|MDS_OPEN_TRUNC))
- mode = LCK_CW;
+ if (it->it_flags & MDS_OPEN_LEASE) { /* try to get lease */
+ if (it->it_flags & FMODE_WRITE)
+ mode = LCK_EX;
+ else
+ mode = LCK_PR;
+ } else {
+ if (it->it_flags & (FMODE_WRITE|MDS_OPEN_TRUNC))
+ mode = LCK_CW;
#ifdef FMODE_EXEC
- else if (it->it_flags & FMODE_EXEC)
- mode = LCK_PR;
+ else if (it->it_flags & FMODE_EXEC)
+ mode = LCK_PR;
#endif
- else
- mode = LCK_CR;
+ else
+ mode = LCK_CR;
+ }
count = mdc_resource_get_unused(exp, &op_data->op_fid2,
&cancels, mode,
MDS_INODELOCK_OPEN);
@@ -347,6 +359,62 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
return req;
}
+static struct ptlrpc_request *
+mdc_intent_getxattr_pack(struct obd_export *exp,
+ struct lookup_intent *it,
+ struct md_op_data *op_data)
+{
+ struct ptlrpc_request *req;
+ struct ldlm_intent *lit;
+ int rc, count = 0, maxdata;
+ LIST_HEAD(cancels);
+
+
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_LDLM_INTENT_GETXATTR);
+ if (req == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+
+ if (it->it_op == IT_SETXATTR)
+ /* If we want to upgrade to LCK_PW, let's cancel LCK_PR
+ * locks now. This avoids unnecessary ASTs. */
+ count = mdc_resource_get_unused(exp, &op_data->op_fid1,
+ &cancels, LCK_PW,
+ MDS_INODELOCK_XATTR);
+
+ rc = ldlm_prep_enqueue_req(exp, req, &cancels, count);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return ERR_PTR(rc);
+ }
+
+ /* pack the intent */
+ lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
+ lit->opc = IT_GETXATTR;
+
+ maxdata = class_exp2cliimp(exp)->imp_connect_data.ocd_max_easize;
+
+ /* pack the intended request */
+ mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1,
+ op_data->op_valid, maxdata, -1, 0);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
+ RCL_SERVER, maxdata);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_EAVALS,
+ RCL_SERVER, maxdata);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_EAVALS_LENS,
+ RCL_SERVER, maxdata);
+
+ ptlrpc_request_set_replen(req);
+
+ return req;
+}
+
static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp,
struct lookup_intent *it,
struct md_op_data *op_data)
@@ -722,6 +790,8 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
{ .l_inodebits = { MDS_INODELOCK_UPDATE } };
static const ldlm_policy_data_t layout_policy =
{ .l_inodebits = { MDS_INODELOCK_LAYOUT } };
+ static const ldlm_policy_data_t getxattr_policy = {
+ .l_inodebits = { MDS_INODELOCK_XATTR } };
ldlm_policy_data_t const *policy = &lookup_policy;
int generation, resends = 0;
struct ldlm_reply *lockrep;
@@ -738,6 +808,8 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
policy = &update_policy;
else if (it->it_op & IT_LAYOUT)
policy = &layout_policy;
+ else if (it->it_op & (IT_GETXATTR | IT_SETXATTR))
+ policy = &getxattr_policy;
}
LASSERT(reqp == NULL);
@@ -768,9 +840,10 @@ resend:
} else if (it->it_op & IT_LAYOUT) {
if (!imp_connect_lvb_type(class_exp2cliimp(exp)))
return -EOPNOTSUPP;
-
req = mdc_intent_layout_pack(exp, it, op_data);
lvb_type = LVB_T_LAYOUT;
+ } else if (it->it_op & (IT_GETXATTR | IT_SETXATTR)) {
+ req = mdc_intent_getxattr_pack(exp, it, op_data);
} else {
LBUG();
return -EINVAL;
@@ -958,13 +1031,8 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
LASSERTF(fid_res_name_eq(&mdt_body->fid1,
&lock->l_resource->lr_name),
- "Lock res_id: %lu/%lu/%lu, fid: %lu/%lu/%lu.\n",
- (unsigned long)lock->l_resource->lr_name.name[0],
- (unsigned long)lock->l_resource->lr_name.name[1],
- (unsigned long)lock->l_resource->lr_name.name[2],
- (unsigned long)fid_seq(&mdt_body->fid1),
- (unsigned long)fid_oid(&mdt_body->fid1),
- (unsigned long)fid_ver(&mdt_body->fid1));
+ "Lock res_id: "DLDLMRES", fid: "DFID"\n",
+ PLDLMRES(lock->l_resource), PFID(&mdt_body->fid1));
LDLM_LOCK_PUT(lock);
memcpy(&old_lock, lockh, sizeof(*lockh));
@@ -1065,10 +1133,10 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
LASSERT(it);
CDEBUG(D_DLMTRACE, "(name: %.*s,"DFID") in obj "DFID
- ", intent: %s flags %#o\n", op_data->op_namelen,
- op_data->op_name, PFID(&op_data->op_fid2),
- PFID(&op_data->op_fid1), ldlm_it2str(it->it_op),
- it->it_flags);
+ ", intent: %s flags %#Lo\n", op_data->op_namelen,
+ op_data->op_name, PFID(&op_data->op_fid2),
+ PFID(&op_data->op_fid1), ldlm_it2str(it->it_op),
+ it->it_flags);
lockh.cookie = 0;
if (fid_is_sane(&op_data->op_fid2) &&
@@ -1194,9 +1262,10 @@ int mdc_intent_getattr_async(struct obd_export *exp,
int rc = 0;
__u64 flags = LDLM_FL_HAS_INTENT;
- CDEBUG(D_DLMTRACE,"name: %.*s in inode "DFID", intent: %s flags %#o\n",
- op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
- ldlm_it2str(it->it_op), it->it_flags);
+ CDEBUG(D_DLMTRACE,
+ "name: %.*s in inode "DFID", intent: %s flags %#Lo\n",
+ op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
+ ldlm_it2str(it->it_op), it->it_flags);
fid_build_reg_res_name(&op_data->op_fid1, &res_id);
req = mdc_intent_getattr_pack(exp, it, op_data);
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index ed3a7a05557f..83013927e131 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -800,10 +800,27 @@ int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
{
struct obd_device *obd = class_exp2obd(exp);
struct ptlrpc_request *req;
- int rc;
+ struct req_format *req_fmt;
+ int rc;
+ int saved_rc = 0;
+
+
+ req_fmt = &RQF_MDS_CLOSE;
+ if (op_data->op_bias & MDS_HSM_RELEASE) {
+ req_fmt = &RQF_MDS_RELEASE_CLOSE;
+
+ /* allocate a FID for volatile file */
+ rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
+ if (rc < 0) {
+ CERROR("%s: "DFID" failed to allocate FID: %d\n",
+ obd->obd_name, PFID(&op_data->op_fid1), rc);
+ /* save the errcode and proceed to close */
+ saved_rc = rc;
+ }
+ }
*request = NULL;
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_CLOSE);
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt);
if (req == NULL)
return -ENOMEM;
@@ -893,7 +910,7 @@ int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
}
*request = req;
mdc_close_handle_reply(req, op_data, rc);
- return rc;
+ return rc < 0 ? rc : saved_rc;
}
int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
@@ -1413,7 +1430,7 @@ static struct kuc_hdr *changelog_kuc_hdr(char *buf, int len, int flags)
{
struct kuc_hdr *lh = (struct kuc_hdr *)buf;
- LASSERT(len <= CR_MAXSIZE);
+ LASSERT(len <= KUC_CHANGELOG_MSG_MAXSIZE);
lh->kuc_magic = KUC_MAGIC;
lh->kuc_transport = KUC_TRANSPORT_CHANGELOG;
@@ -1486,7 +1503,7 @@ static int mdc_changelog_send_thread(void *csdata)
CDEBUG(D_CHANGELOG, "changelog to fp=%p start "LPU64"\n",
cs->cs_fp, cs->cs_startrec);
- OBD_ALLOC(cs->cs_buf, CR_MAXSIZE);
+ OBD_ALLOC(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
if (cs->cs_buf == NULL)
GOTO(out, rc = -ENOMEM);
@@ -1523,7 +1540,7 @@ out:
if (ctxt)
llog_ctxt_put(ctxt);
if (cs->cs_buf)
- OBD_FREE(cs->cs_buf, CR_MAXSIZE);
+ OBD_FREE(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
OBD_FREE_PTR(cs);
return rc;
}
@@ -1743,6 +1760,7 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
GOTO(out, rc);
case LL_IOC_HSM_STATE_SET:
rc = mdc_ioc_hsm_state_set(exp, karg);
+ GOTO(out, rc);
case LL_IOC_HSM_ACTION:
rc = mdc_ioc_hsm_current_action(exp, karg);
GOTO(out, rc);
@@ -1814,8 +1832,8 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
struct obd_quotactl *oqctl;
OBD_ALLOC_PTR(oqctl);
- if (!oqctl)
- return -ENOMEM;
+ if (oqctl == NULL)
+ GOTO(out, rc = -ENOMEM);
QCTL_COPY(oqctl, qctl);
rc = obd_quotactl(exp, oqctl);
@@ -1824,23 +1842,21 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
qctl->qc_valid = QC_MDTIDX;
qctl->obd_uuid = obd->u.cli.cl_target_uuid;
}
+
OBD_FREE_PTR(oqctl);
- break;
+ GOTO(out, rc);
}
- case LL_IOC_GET_CONNECT_FLAGS: {
- if (copy_to_user(uarg,
- exp_connect_flags_ptr(exp),
- sizeof(__u64)))
+ case LL_IOC_GET_CONNECT_FLAGS:
+ if (copy_to_user(uarg, exp_connect_flags_ptr(exp),
+ sizeof(*exp_connect_flags_ptr(exp))))
GOTO(out, rc = -EFAULT);
- else
- GOTO(out, rc = 0);
- }
- case LL_IOC_LOV_SWAP_LAYOUTS: {
+
+ GOTO(out, rc = 0);
+ case LL_IOC_LOV_SWAP_LAYOUTS:
rc = mdc_ioc_swap_layouts(exp, karg);
- break;
- }
+ GOTO(out, rc);
default:
- CERROR("mdc_ioctl(): unrecognised ioctl %#x\n", cmd);
+ CERROR("unrecognised ioctl: cmd = %#x\n", cmd);
GOTO(out, rc = -ENOTTY);
}
out:
@@ -1920,10 +1936,8 @@ static void lustre_swab_hal(struct hsm_action_list *h)
__swab32s(&h->hal_archive_id);
__swab64s(&h->hal_flags);
hai = hai_zero(h);
- for (i = 0; i < h->hal_count; i++) {
+ for (i = 0; i < h->hal_count; i++, hai = hai_next(hai))
lustre_swab_hai(hai);
- hai = hai_next(hai);
- }
}
static void lustre_swab_kuch(struct kuc_hdr *l)
@@ -2062,15 +2076,6 @@ int mdc_set_info_async(const struct lu_env *env,
sptlrpc_import_flush_my_ctx(imp);
return 0;
}
- if (KEY_IS(KEY_MDS_CONN)) {
- /* mds-mds import */
- spin_lock(&imp->imp_lock);
- imp->imp_server_timeout = 1;
- spin_unlock(&imp->imp_lock);
- imp->imp_client->cli_request_portal = MDS_MDS_PORTAL;
- CDEBUG(D_OTHER, "%s: timeout / 2\n", exp->exp_obd->obd_name);
- return 0;
- }
if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
keylen, key, vallen, val, set);
@@ -2580,27 +2585,6 @@ static int mdc_renew_capa(struct obd_export *exp, struct obd_capa *oc,
return 0;
}
-static int mdc_connect(const struct lu_env *env,
- struct obd_export **exp,
- struct obd_device *obd, struct obd_uuid *cluuid,
- struct obd_connect_data *data,
- void *localdata)
-{
- struct obd_import *imp = obd->u.cli.cl_import;
-
- /* mds-mds import features */
- if (data && (data->ocd_connect_flags & OBD_CONNECT_MDS_MDS)) {
- spin_lock(&imp->imp_lock);
- imp->imp_server_timeout = 1;
- spin_unlock(&imp->imp_lock);
- imp->imp_client->cli_request_portal = MDS_MDS_PORTAL;
- CDEBUG(D_OTHER, "%s: Set 'mds' portal and timeout\n",
- obd->obd_name);
- }
-
- return client_connect_import(env, exp, obd, cluuid, data, NULL);
-}
-
struct obd_ops mdc_obd_ops = {
.o_owner = THIS_MODULE,
.o_setup = mdc_setup,
@@ -2608,7 +2592,7 @@ struct obd_ops mdc_obd_ops = {
.o_cleanup = mdc_cleanup,
.o_add_conn = client_import_add_conn,
.o_del_conn = client_import_del_conn,
- .o_connect = mdc_connect,
+ .o_connect = client_connect_import,
.o_disconnect = client_disconnect_export,
.o_iocontrol = mdc_iocontrol,
.o_set_info_async = mdc_set_info_async,
diff --git a/drivers/staging/lustre/lustre/mgc/Makefile b/drivers/staging/lustre/lustre/mgc/Makefile
index 267246344e1c..2f5ee649456d 100644
--- a/drivers/staging/lustre/lustre/mgc/Makefile
+++ b/drivers/staging/lustre/lustre/mgc/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_LUSTRE_FS) += mgc.o
-mgc-y := mgc_request.o lproc_mgc.o
+mgc-y := mgc_request.o
+mgc-$(CONFIG_PROC_FS) += lproc_mgc.o
ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/mgc/libmgc.c b/drivers/staging/lustre/lustre/mgc/libmgc.c
index 7b4947cec3a8..9b40c57d3cd4 100644
--- a/drivers/staging/lustre/lustre/mgc/libmgc.c
+++ b/drivers/staging/lustre/lustre/mgc/libmgc.c
@@ -99,11 +99,8 @@ static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
static int mgc_cleanup(struct obd_device *obd)
{
- struct client_obd *cli = &obd->u.cli;
int rc;
- LASSERT(cli->cl_mgc_vfsmnt == NULL);
-
ptlrpcd_decref();
rc = client_obd_cleanup(obd);
diff --git a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
index ebecec2b0078..1506af13f1bf 100644
--- a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
+++ b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
@@ -40,8 +40,6 @@
#include <lprocfs_status.h>
#include "mgc_internal.h"
-#ifdef LPROCFS
-
LPROC_SEQ_FOPS_RO_TYPE(mgc, uuid);
LPROC_SEQ_FOPS_RO_TYPE(mgc, connect_flags);
LPROC_SEQ_FOPS_RO_TYPE(mgc, server_uuid);
@@ -80,4 +78,3 @@ void lprocfs_mgc_init_vars(struct lprocfs_static_vars *lvars)
lvars->module_vars = lprocfs_mgc_module_vars;
lvars->obd_vars = lprocfs_mgc_obd_vars;
}
-#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_internal.h b/drivers/staging/lustre/lustre/mgc/mgc_internal.h
index dbd698272a84..73b454898844 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_internal.h
+++ b/drivers/staging/lustre/lustre/mgc/mgc_internal.h
@@ -48,7 +48,7 @@
void lprocfs_mgc_init_vars(struct lprocfs_static_vars *lvars);
int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data);
#else
-static void lprocfs_mgc_init_vars(struct lprocfs_static_vars *lvars)
+static inline void lprocfs_mgc_init_vars(struct lprocfs_static_vars *lvars)
{
memset(lvars, 0, sizeof(*lvars));
}
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 12a9ede21a85..3bdbb94e020f 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -41,17 +41,14 @@
#define DEBUG_SUBSYSTEM S_MGC
#define D_MGC D_CONFIG /*|D_WARNING*/
-# include <linux/module.h>
-# include <linux/pagemap.h>
-# include <linux/miscdevice.h>
-# include <linux/init.h>
-
+#include <linux/module.h>
#include <obd_class.h>
#include <lustre_dlm.h>
#include <lprocfs_status.h>
#include <lustre_log.h>
-#include <lustre_fsfilt.h>
#include <lustre_disk.h>
+#include <dt_object.h>
+
#include "mgc_internal.h"
static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id,
@@ -73,7 +70,7 @@ static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id,
memset(res_id, 0, sizeof(*res_id));
res_id->name[0] = cpu_to_le64(resname);
/* XXX: unfortunately, sptlprc and config llog share one lock */
- switch(type) {
+ switch (type) {
case CONFIG_T_CONFIG:
case CONFIG_T_SPTLRPC:
resname = 0;
@@ -400,6 +397,7 @@ static int config_log_end(char *logname, struct config_llog_instance *cfg)
return rc;
}
+#ifdef LPROCFS
int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data)
{
struct obd_device *obd = data;
@@ -423,6 +421,7 @@ int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data)
return 0;
}
+#endif
/* reenqueue any lost locks */
#define RQ_RUNNING 0x1
@@ -578,97 +577,175 @@ static void mgc_requeue_add(struct config_llog_data *cld)
}
/********************** class fns **********************/
+static int mgc_local_llog_init(const struct lu_env *env,
+ struct obd_device *obd,
+ struct obd_device *disk)
+{
+ struct llog_ctxt *ctxt;
+ int rc;
+
+ rc = llog_setup(env, obd, &obd->obd_olg, LLOG_CONFIG_ORIG_CTXT, disk,
+ &llog_osd_ops);
+ if (rc)
+ return rc;
+
+ ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
+ LASSERT(ctxt);
+ ctxt->loc_dir = obd->u.cli.cl_mgc_configs_dir;
+ llog_ctxt_put(ctxt);
-static int mgc_fs_setup(struct obd_device *obd, struct super_block *sb,
- struct vfsmount *mnt)
+ return 0;
+}
+
+static int mgc_local_llog_fini(const struct lu_env *env,
+ struct obd_device *obd)
{
- struct lvfs_run_ctxt saved;
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct client_obd *cli = &obd->u.cli;
- struct dentry *dentry;
- char *label;
- int err = 0;
+ struct llog_ctxt *ctxt;
+
+ ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
+ llog_cleanup(env, ctxt);
+
+ return 0;
+}
+
+static int mgc_fs_setup(struct obd_device *obd, struct super_block *sb)
+{
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ struct client_obd *cli = &obd->u.cli;
+ struct lu_fid rfid, fid;
+ struct dt_object *root, *dto;
+ struct lu_env *env;
+ int rc = 0;
LASSERT(lsi);
- LASSERT(lsi->lsi_srv_mnt == mnt);
+ LASSERT(lsi->lsi_dt_dev);
+
+ OBD_ALLOC_PTR(env);
+ if (env == NULL)
+ return -ENOMEM;
/* The mgc fs exclusion sem. Only one fs can be setup at a time. */
down(&cli->cl_mgc_sem);
cfs_cleanup_group_info();
- obd->obd_fsops = fsfilt_get_ops(lsi->lsi_fstype);
- if (IS_ERR(obd->obd_fsops)) {
- up(&cli->cl_mgc_sem);
- CERROR("%s: No fstype %s: rc = %ld\n", lsi->lsi_fstype,
- obd->obd_name, PTR_ERR(obd->obd_fsops));
- return PTR_ERR(obd->obd_fsops);
- }
+ /* Setup the configs dir */
+ rc = lu_env_init(env, LCT_MG_THREAD);
+ if (rc)
+ GOTO(out_err, rc);
- cli->cl_mgc_vfsmnt = mnt;
- err = fsfilt_setup(obd, mnt->mnt_sb);
- if (err)
- GOTO(err_ops, err);
-
- OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
- obd->obd_lvfs_ctxt.pwdmnt = mnt;
- obd->obd_lvfs_ctxt.pwd = mnt->mnt_root;
- obd->obd_lvfs_ctxt.fs = get_ds();
-
- push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- dentry = ll_lookup_one_len(MOUNT_CONFIGS_DIR, cfs_fs_pwd(current->fs),
- strlen(MOUNT_CONFIGS_DIR));
- pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- if (IS_ERR(dentry)) {
- err = PTR_ERR(dentry);
- CERROR("cannot lookup %s directory: rc = %d\n",
- MOUNT_CONFIGS_DIR, err);
- GOTO(err_ops, err);
- }
- cli->cl_mgc_configs_dir = dentry;
+ fid.f_seq = FID_SEQ_LOCAL_NAME;
+ fid.f_oid = 1;
+ fid.f_ver = 0;
+ rc = local_oid_storage_init(env, lsi->lsi_dt_dev, &fid,
+ &cli->cl_mgc_los);
+ if (rc)
+ GOTO(out_env, rc);
+
+ rc = dt_root_get(env, lsi->lsi_dt_dev, &rfid);
+ if (rc)
+ GOTO(out_env, rc);
+
+ root = dt_locate_at(env, lsi->lsi_dt_dev, &rfid,
+ &cli->cl_mgc_los->los_dev->dd_lu_dev);
+ if (unlikely(IS_ERR(root)))
+ GOTO(out_los, rc = PTR_ERR(root));
+
+ dto = local_file_find_or_create(env, cli->cl_mgc_los, root,
+ MOUNT_CONFIGS_DIR,
+ S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO);
+ lu_object_put_nocache(env, &root->do_lu);
+ if (IS_ERR(dto))
+ GOTO(out_los, rc = PTR_ERR(dto));
+
+ cli->cl_mgc_configs_dir = dto;
+
+ LASSERT(lsi->lsi_osd_exp->exp_obd->obd_lvfs_ctxt.dt);
+ rc = mgc_local_llog_init(env, obd, lsi->lsi_osd_exp->exp_obd);
+ if (rc)
+ GOTO(out_llog, rc);
/* We take an obd ref to insure that we can't get to mgc_cleanup
- without calling mgc_fs_cleanup first. */
+ * without calling mgc_fs_cleanup first. */
class_incref(obd, "mgc_fs", obd);
- label = fsfilt_get_label(obd, mnt->mnt_sb);
- if (label)
- CDEBUG(D_MGC, "MGC using disk labelled=%s\n", label);
-
/* We keep the cl_mgc_sem until mgc_fs_cleanup */
- return 0;
-
-err_ops:
- fsfilt_put_ops(obd->obd_fsops);
- obd->obd_fsops = NULL;
- cli->cl_mgc_vfsmnt = NULL;
- up(&cli->cl_mgc_sem);
- return err;
+out_llog:
+ if (rc) {
+ lu_object_put(env, &cli->cl_mgc_configs_dir->do_lu);
+ cli->cl_mgc_configs_dir = NULL;
+ }
+out_los:
+ if (rc < 0) {
+ local_oid_storage_fini(env, cli->cl_mgc_los);
+ cli->cl_mgc_los = NULL;
+ up(&cli->cl_mgc_sem);
+ }
+out_env:
+ lu_env_fini(env);
+out_err:
+ OBD_FREE_PTR(env);
+ return rc;
}
static int mgc_fs_cleanup(struct obd_device *obd)
{
- struct client_obd *cli = &obd->u.cli;
- int rc = 0;
+ struct lu_env env;
+ struct client_obd *cli = &obd->u.cli;
+ int rc;
- LASSERT(cli->cl_mgc_vfsmnt != NULL);
+ LASSERT(cli->cl_mgc_los != NULL);
- if (cli->cl_mgc_configs_dir != NULL) {
- struct lvfs_run_ctxt saved;
- push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- l_dput(cli->cl_mgc_configs_dir);
- cli->cl_mgc_configs_dir = NULL;
- pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- class_decref(obd, "mgc_fs", obd);
- }
+ rc = lu_env_init(&env, LCT_MG_THREAD);
+ if (rc)
+ GOTO(unlock, rc);
+
+ mgc_local_llog_fini(&env, obd);
+
+ lu_object_put_nocache(&env, &cli->cl_mgc_configs_dir->do_lu);
+ cli->cl_mgc_configs_dir = NULL;
- cli->cl_mgc_vfsmnt = NULL;
- if (obd->obd_fsops)
- fsfilt_put_ops(obd->obd_fsops);
+ local_oid_storage_fini(&env, cli->cl_mgc_los);
+ cli->cl_mgc_los = NULL;
+ lu_env_fini(&env);
+unlock:
+ class_decref(obd, "mgc_fs", obd);
up(&cli->cl_mgc_sem);
- return rc;
+ return 0;
+}
+
+static int mgc_llog_init(const struct lu_env *env, struct obd_device *obd)
+{
+ struct llog_ctxt *ctxt;
+ int rc;
+
+ /* setup only remote ctxt, the local disk context is switched per each
+ * filesystem during mgc_fs_setup() */
+ rc = llog_setup(env, obd, &obd->obd_olg, LLOG_CONFIG_REPL_CTXT, obd,
+ &llog_client_ops);
+ if (rc)
+ return rc;
+
+ ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
+ LASSERT(ctxt);
+
+ llog_initiator_connect(ctxt);
+ llog_ctxt_put(ctxt);
+
+ return 0;
+}
+
+static int mgc_llog_fini(const struct lu_env *env, struct obd_device *obd)
+{
+ struct llog_ctxt *ctxt;
+
+ ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
+ if (ctxt)
+ llog_cleanup(env, ctxt);
+
+ return 0;
}
static atomic_t mgc_count = ATOMIC_INIT(0);
@@ -694,7 +771,7 @@ static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
}
}
obd_cleanup_client_import(obd);
- rc = obd_llog_finish(obd, 0);
+ rc = mgc_llog_fini(NULL, obd);
if (rc != 0)
CERROR("failed to cleanup llogging subsystems\n");
break;
@@ -704,11 +781,8 @@ static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
static int mgc_cleanup(struct obd_device *obd)
{
- struct client_obd *cli = &obd->u.cli;
int rc;
- LASSERT(cli->cl_mgc_vfsmnt == NULL);
-
/* COMPAT_146 - old config logs may have added profiles we don't
know about */
if (obd->obd_type->typ_refcnt <= 1)
@@ -733,7 +807,7 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
if (rc)
GOTO(err_decref, rc);
- rc = obd_llog_init(obd, &obd->obd_olg, obd, NULL);
+ rc = mgc_llog_init(NULL, obd);
if (rc) {
CERROR("failed to setup llogging subsystems\n");
GOTO(err_cleanup, rc);
@@ -788,8 +862,8 @@ static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
/* We've given up the lock, prepare ourselves to update. */
LDLM_DEBUG(lock, "MGC cancel CB");
- CDEBUG(D_MGC, "Lock res "LPX64" (%.8s)\n",
- lock->l_resource->lr_name.name[0],
+ CDEBUG(D_MGC, "Lock res "DLDLMRES" (%.8s)\n",
+ PLDLMRES(lock->l_resource),
(char *)&lock->l_resource->lr_name.name[0]);
if (!cld) {
@@ -1011,23 +1085,23 @@ int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
}
if (KEY_IS(KEY_SET_FS)) {
struct super_block *sb = (struct super_block *)val;
- struct lustre_sb_info *lsi;
+
if (vallen != sizeof(struct super_block))
return -EINVAL;
- lsi = s2lsi(sb);
- rc = mgc_fs_setup(exp->exp_obd, sb, lsi->lsi_srv_mnt);
- if (rc) {
+
+ rc = mgc_fs_setup(exp->exp_obd, sb);
+ if (rc)
CERROR("set_fs got %d\n", rc);
- }
+
return rc;
}
if (KEY_IS(KEY_CLEAR_FS)) {
if (vallen != 0)
return -EINVAL;
rc = mgc_fs_cleanup(exp->exp_obd);
- if (rc) {
+ if (rc)
CERROR("clear_fs got %d\n", rc);
- }
+
return rc;
}
if (KEY_IS(KEY_SET_INFO)) {
@@ -1145,49 +1219,6 @@ static int mgc_import_event(struct obd_device *obd,
return rc;
}
-static int mgc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
- struct obd_device *tgt, int *index)
-{
- struct llog_ctxt *ctxt;
- int rc;
-
- LASSERT(olg == &obd->obd_olg);
-
-
- rc = llog_setup(NULL, obd, olg, LLOG_CONFIG_REPL_CTXT, tgt,
- &llog_client_ops);
- if (rc)
- GOTO(out, rc);
-
- ctxt = llog_group_get_ctxt(olg, LLOG_CONFIG_REPL_CTXT);
- if (!ctxt)
- GOTO(out, rc = -ENODEV);
-
- llog_initiator_connect(ctxt);
- llog_ctxt_put(ctxt);
-
- return 0;
-out:
- ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
- if (ctxt)
- llog_cleanup(NULL, ctxt);
- return rc;
-}
-
-static int mgc_llog_finish(struct obd_device *obd, int count)
-{
- struct llog_ctxt *ctxt;
-
- ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
- if (ctxt)
- llog_cleanup(NULL, ctxt);
-
- ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
- if (ctxt)
- llog_cleanup(NULL, ctxt);
- return 0;
-}
-
enum {
CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT),
CONFIG_READ_NRPAGES = 4
@@ -1540,17 +1571,58 @@ out:
return rc;
}
+static int mgc_llog_local_copy(const struct lu_env *env,
+ struct obd_device *obd,
+ struct llog_ctxt *rctxt,
+ struct llog_ctxt *lctxt, char *logname)
+{
+ char *temp_log;
+ int rc;
+
+
+
+ /*
+ * - copy it to backup using llog_backup()
+ * - copy remote llog to logname using llog_backup()
+ * - if failed then move bakup to logname again
+ */
+
+ OBD_ALLOC(temp_log, strlen(logname) + 1);
+ if (!temp_log)
+ return -ENOMEM;
+ sprintf(temp_log, "%sT", logname);
+
+ /* make a copy of local llog at first */
+ rc = llog_backup(env, obd, lctxt, lctxt, logname, temp_log);
+ if (rc < 0 && rc != -ENOENT)
+ GOTO(out, rc);
+ /* copy remote llog to the local copy */
+ rc = llog_backup(env, obd, rctxt, lctxt, logname, logname);
+ if (rc == -ENOENT) {
+ /* no remote llog, delete local one too */
+ llog_erase(env, lctxt, NULL, logname);
+ } else if (rc < 0) {
+ /* error during backup, get local one back from the copy */
+ llog_backup(env, obd, lctxt, lctxt, temp_log, logname);
+out:
+ CERROR("%s: failed to copy remote log %s: rc = %d\n",
+ obd->obd_name, logname, rc);
+ }
+ llog_erase(env, lctxt, NULL, temp_log);
+ OBD_FREE(temp_log, strlen(logname) + 1);
+ return rc;
+}
/* local_only means it cannot get remote llogs */
static int mgc_process_cfg_log(struct obd_device *mgc,
- struct config_llog_data *cld,
- int local_only)
+ struct config_llog_data *cld, int local_only)
{
- struct llog_ctxt *ctxt, *lctxt = NULL;
- struct lvfs_run_ctxt *saved_ctxt;
- struct lustre_sb_info *lsi = NULL;
- int rc = 0, must_pop = 0;
- bool sptlrpc_started = false;
+ struct llog_ctxt *ctxt, *lctxt = NULL;
+ struct dt_object *cl_mgc_dir = mgc->u.cli.cl_mgc_configs_dir;
+ struct lustre_sb_info *lsi = NULL;
+ int rc = 0;
+ bool sptlrpc_started = false;
+ struct lu_env *env;
LASSERT(cld);
LASSERT(mutex_is_locked(&cld->cld_lock));
@@ -1565,20 +1637,48 @@ static int mgc_process_cfg_log(struct obd_device *mgc,
if (cld->cld_cfg.cfg_sb)
lsi = s2lsi(cld->cld_cfg.cfg_sb);
- ctxt = llog_get_context(mgc, LLOG_CONFIG_REPL_CTXT);
- if (!ctxt) {
- CERROR("missing llog context\n");
- return -EINVAL;
- }
-
- OBD_ALLOC_PTR(saved_ctxt);
- if (saved_ctxt == NULL)
+ OBD_ALLOC_PTR(env);
+ if (env == NULL)
return -ENOMEM;
+ rc = lu_env_init(env, LCT_MG_THREAD);
+ if (rc)
+ GOTO(out_free, rc);
+
+ ctxt = llog_get_context(mgc, LLOG_CONFIG_REPL_CTXT);
+ LASSERT(ctxt);
+
lctxt = llog_get_context(mgc, LLOG_CONFIG_ORIG_CTXT);
- if (local_only) { /* no local log at client side */
- GOTO(out_pop, rc = -EIO);
+ /* Copy the setup log locally if we can. Don't mess around if we're
+ * running an MGS though (logs are already local). */
+ if (lctxt && lsi && IS_SERVER(lsi) && !IS_MGS(lsi) &&
+ cl_mgc_dir != NULL &&
+ lu2dt_dev(cl_mgc_dir->do_lu.lo_dev) == lsi->lsi_dt_dev) {
+ if (!local_only)
+ /* Only try to copy log if we have the lock. */
+ rc = mgc_llog_local_copy(env, mgc, ctxt, lctxt,
+ cld->cld_logname);
+ if (local_only || rc) {
+ if (llog_is_empty(env, lctxt, cld->cld_logname)) {
+ LCONSOLE_ERROR_MSG(0x13a,
+ "Failed to get MGS log %s and no local copy.\n",
+ cld->cld_logname);
+ GOTO(out_pop, rc = -ENOTCONN);
+ }
+ CDEBUG(D_MGC,
+ "Failed to get MGS log %s, using local copy for now, will try to update later.\n",
+ cld->cld_logname);
+ }
+ /* Now, whether we copied or not, start using the local llog.
+ * If we failed to copy, we'll start using whatever the old
+ * log has. */
+ llog_ctxt_put(ctxt);
+ ctxt = lctxt;
+ lctxt = NULL;
+ } else {
+ if (local_only) /* no local log at client side */
+ GOTO(out_pop, rc = -EIO);
}
if (cld_is_sptlrpc(cld)) {
@@ -1587,19 +1687,16 @@ static int mgc_process_cfg_log(struct obd_device *mgc,
}
/* logname and instance info should be the same, so use our
- copy of the instance for the update. The cfg_last_idx will
- be updated here. */
- rc = class_config_parse_llog(NULL, ctxt, cld->cld_logname,
+ * copy of the instance for the update. The cfg_last_idx will
+ * be updated here. */
+ rc = class_config_parse_llog(env, ctxt, cld->cld_logname,
&cld->cld_cfg);
out_pop:
- llog_ctxt_put(ctxt);
+ __llog_ctxt_put(env, ctxt);
if (lctxt)
- llog_ctxt_put(lctxt);
- if (must_pop)
- pop_ctxt(saved_ctxt, &mgc->obd_lvfs_ctxt, NULL);
+ __llog_ctxt_put(env, lctxt);
- OBD_FREE_PTR(saved_ctxt);
/*
* update settings on existing OBDs. doing it inside
* of llog_process_lock so no device is attaching/detaching
@@ -1614,6 +1711,9 @@ out_pop:
strlen("-sptlrpc"));
}
+ lu_env_fini(env);
+out_free:
+ OBD_FREE_PTR(env);
return rc;
}
@@ -1700,7 +1800,7 @@ static int mgc_process_config(struct obd_device *obd, obd_count len, void *buf)
char *logname;
int rc = 0;
- switch(lcfg->lcfg_command) {
+ switch (lcfg->lcfg_command) {
case LCFG_LOV_ADD_OBD: {
/* Overloading this cfg command: register a new target */
struct mgs_target_info *mti;
@@ -1795,14 +1895,12 @@ struct obd_ops mgc_obd_ops = {
.o_del_conn = client_import_del_conn,
.o_connect = client_connect_import,
.o_disconnect = client_disconnect_export,
- //.o_enqueue = mgc_enqueue,
+ /* .o_enqueue = mgc_enqueue, */
.o_cancel = mgc_cancel,
- //.o_iocontrol = mgc_iocontrol,
+ /* .o_iocontrol = mgc_iocontrol, */
.o_set_info_async = mgc_set_info_async,
.o_get_info = mgc_get_info,
.o_import_event = mgc_import_event,
- .o_llog_init = mgc_llog_init,
- .o_llog_finish = mgc_llog_finish,
.o_process_config = mgc_process_config,
};
diff --git a/drivers/staging/lustre/lustre/obdclass/capa.c b/drivers/staging/lustre/lustre/obdclass/capa.c
index 68d797ba8ae4..be1c613383a6 100644
--- a/drivers/staging/lustre/lustre/obdclass/capa.c
+++ b/drivers/staging/lustre/lustre/obdclass/capa.c
@@ -46,7 +46,6 @@
#include <asm/unistd.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/crypto.h>
#include <obd_class.h>
@@ -273,10 +272,10 @@ int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key)
alg = &capa_hmac_algs[capa_alg(capa)];
tfm = crypto_alloc_hash(alg->ha_name, 0, 0);
- if (!tfm) {
+ if (IS_ERR(tfm)) {
CERROR("crypto_alloc_tfm failed, check whether your kernel"
"has crypto support!\n");
- return -ENOMEM;
+ return PTR_ERR(tfm);
}
keylen = alg->ha_keylen;
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index 4afd962cdb64..c93131e0d2da 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -54,7 +54,13 @@ struct list_head obd_types;
DEFINE_RWLOCK(obd_dev_lock);
__u64 obd_max_pages = 0;
+EXPORT_SYMBOL(obd_max_pages);
__u64 obd_max_alloc = 0;
+EXPORT_SYMBOL(obd_max_alloc);
+__u64 obd_alloc;
+EXPORT_SYMBOL(obd_alloc);
+__u64 obd_pages;
+EXPORT_SYMBOL(obd_pages);
DEFINE_SPINLOCK(obd_updatemax_lock);
/* The following are visible and mutable through /proc/sys/lustre/. */
@@ -501,8 +507,15 @@ int obd_init_checks(void)
}
extern spinlock_t obd_types_lock;
+#ifdef LPROCFS
extern int class_procfs_init(void);
extern int class_procfs_clean(void);
+#else
+static inline int class_procfs_init(void)
+{ return 0; }
+static inline int class_procfs_clean(void)
+{ return 0; }
+#endif
static int __init init_obdclass(void)
{
@@ -516,7 +529,7 @@ static int __init init_obdclass(void)
spin_lock_init(&obd_types_lock);
obd_zombie_impexp_init();
-#ifdef LPROCFS
+
obd_memory = lprocfs_alloc_stats(OBD_STATS_NUM,
LPROCFS_STATS_FLAG_NONE |
LPROCFS_STATS_FLAG_IRQ_SAFE);
@@ -531,7 +544,7 @@ static int __init init_obdclass(void)
lprocfs_counter_init(obd_memory, OBD_MEMORY_PAGES_STAT,
LPROCFS_CNTR_AVGMINMAX,
"pagesused", "pages");
-#endif
+
err = obd_init_checks();
if (err == -EOVERFLOW)
return err;
@@ -564,6 +577,9 @@ static int __init init_obdclass(void)
err = obd_init_caches();
if (err)
return err;
+
+ obd_sysctl_init();
+
err = class_procfs_init();
if (err)
return err;
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index f6fae16fc7f7..d9f750d42c80 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -193,7 +193,6 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
strcpy(type->typ_name, name);
spin_lock_init(&type->obd_type_lock);
-#ifdef LPROCFS
type->typ_procroot = lprocfs_register(type->typ_name, proc_lustre_root,
vars, type);
if (IS_ERR(type->typ_procroot)) {
@@ -201,7 +200,7 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
type->typ_procroot = NULL;
GOTO (failed, rc);
}
-#endif
+
if (ldt != NULL) {
type->typ_lu = ldt;
rc = lu_device_type_init(ldt);
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index d1a57ebfda95..121a856d5052 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -56,7 +56,6 @@
#include <linux/proc_fs.h>
#include <linux/fs.h>
#include <linux/poll.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/highmem.h>
#include <asm/io.h>
@@ -295,9 +294,6 @@ struct lprocfs_vars lprocfs_base[] = {
{ "jobid_var", &obd_proc_jobid_var_fops },
{ 0 }
};
-#else
-#define lprocfs_base NULL
-#endif /* LPROCFS */
static void *obd_device_list_seq_start(struct seq_file *p, loff_t *pos)
{
@@ -380,7 +376,6 @@ int class_procfs_init(void)
{
int rc = 0;
- obd_sysctl_init();
proc_lustre_root = lprocfs_register("fs/lustre", NULL,
lprocfs_base, NULL);
if (IS_ERR(proc_lustre_root)) {
@@ -404,3 +399,4 @@ int class_procfs_clean(void)
}
return 0;
}
+#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index acd2619227df..c1ef0c9b5a1a 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -282,7 +282,6 @@ int LL_PROC_PROTO(proc_at_history)
#ifdef CONFIG_SYSCTL
static ctl_table_t obd_table[] = {
{
- INIT_CTL_NAME(OBD_TIMEOUT)
.procname = "timeout",
.data = &obd_timeout,
.maxlen = sizeof(int),
@@ -290,7 +289,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_set_timeout
},
{
- INIT_CTL_NAME(OBD_DEBUG_PEER_ON_TIMEOUT)
.procname = "debug_peer_on_timeout",
.data = &obd_debug_peer_on_timeout,
.maxlen = sizeof(int),
@@ -298,7 +296,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_dointvec
},
{
- INIT_CTL_NAME(OBD_DUMP_ON_TIMEOUT)
.procname = "dump_on_timeout",
.data = &obd_dump_on_timeout,
.maxlen = sizeof(int),
@@ -306,7 +303,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_dointvec
},
{
- INIT_CTL_NAME(OBD_DUMP_ON_EVICTION)
.procname = "dump_on_eviction",
.data = &obd_dump_on_eviction,
.maxlen = sizeof(int),
@@ -314,7 +310,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_dointvec
},
{
- INIT_CTL_NAME(OBD_MEMUSED)
.procname = "memused",
.data = NULL,
.maxlen = 0,
@@ -322,7 +317,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_memory_alloc
},
{
- INIT_CTL_NAME(OBD_PAGESUSED)
.procname = "pagesused",
.data = NULL,
.maxlen = 0,
@@ -330,7 +324,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_pages_alloc
},
{
- INIT_CTL_NAME(OBD_MAXMEMUSED)
.procname = "memused_max",
.data = NULL,
.maxlen = 0,
@@ -338,7 +331,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_mem_max
},
{
- INIT_CTL_NAME(OBD_MAXPAGESUSED)
.procname = "pagesused_max",
.data = NULL,
.maxlen = 0,
@@ -346,7 +338,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_pages_max
},
{
- INIT_CTL_NAME(OBD_LDLM_TIMEOUT)
.procname = "ldlm_timeout",
.data = &ldlm_timeout,
.maxlen = sizeof(int),
@@ -354,7 +345,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_set_timeout
},
{
- INIT_CTL_NAME(OBD_ALLOC_FAIL_RATE)
.procname = "alloc_fail_rate",
.data = &obd_alloc_fail_rate,
.maxlen = sizeof(int),
@@ -362,7 +352,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_alloc_fail_rate
},
{
- INIT_CTL_NAME(OBD_MAX_DIRTY_PAGES)
.procname = "max_dirty_mb",
.data = &obd_max_dirty_pages,
.maxlen = sizeof(int),
@@ -370,7 +359,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_max_dirty_pages_in_mb
},
{
- INIT_CTL_NAME(OBD_AT_MIN)
.procname = "at_min",
.data = &at_min,
.maxlen = sizeof(int),
@@ -378,7 +366,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_at_min
},
{
- INIT_CTL_NAME(OBD_AT_MAX)
.procname = "at_max",
.data = &at_max,
.maxlen = sizeof(int),
@@ -386,7 +373,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_at_max
},
{
- INIT_CTL_NAME(OBD_AT_EXTRA)
.procname = "at_extra",
.data = &at_extra,
.maxlen = sizeof(int),
@@ -394,7 +380,6 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_at_extra
},
{
- INIT_CTL_NAME(OBD_AT_EARLY_MARGIN)
.procname = "at_early_margin",
.data = &at_early_margin,
.maxlen = sizeof(int),
@@ -402,26 +387,24 @@ static ctl_table_t obd_table[] = {
.proc_handler = &proc_at_early_margin
},
{
- INIT_CTL_NAME(OBD_AT_HISTORY)
.procname = "at_history",
.data = &at_history,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_at_history
},
- { INIT_CTL_NAME(0) }
+ {}
};
static ctl_table_t parent_table[] = {
{
- INIT_CTL_NAME(OBD_SYSCTL)
.procname = "lustre",
.data = NULL,
.maxlen = 0,
.mode = 0555,
.child = obd_table
},
- { INIT_CTL_NAME(0) }
+ {}
};
#endif
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 0cb44287502b..e0dfb089dd90 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -62,7 +62,7 @@ struct llog_handle *llog_alloc_handle(void)
OBD_ALLOC_PTR(loghandle);
if (loghandle == NULL)
- return ERR_PTR(-ENOMEM);
+ return NULL;
init_rwsem(&loghandle->lgh_lock);
spin_lock_init(&loghandle->lgh_hdr_lock);
@@ -265,31 +265,6 @@ out:
}
EXPORT_SYMBOL(llog_init_handle);
-int llog_copy_handler(const struct lu_env *env,
- struct llog_handle *llh,
- struct llog_rec_hdr *rec,
- void *data)
-{
- struct llog_rec_hdr local_rec = *rec;
- struct llog_handle *local_llh = (struct llog_handle *)data;
- char *cfg_buf = (char*) (rec + 1);
- struct lustre_cfg *lcfg;
- int rc = 0;
-
- /* Append all records */
- local_rec.lrh_len -= sizeof(*rec) + sizeof(struct llog_rec_tail);
- rc = llog_write(env, local_llh, &local_rec, NULL, 0,
- (void *)cfg_buf, -1);
-
- lcfg = (struct lustre_cfg *)cfg_buf;
- CDEBUG(D_INFO, "idx=%d, rc=%d, len=%d, cmd %x %s %s\n",
- rec->lrh_index, rc, rec->lrh_len, lcfg->lcfg_command,
- lustre_cfg_string(lcfg, 0), lustre_cfg_string(lcfg, 1));
-
- return rc;
-}
-EXPORT_SYMBOL(llog_copy_handler);
-
static int llog_process_thread(void *arg)
{
struct llog_process_info *lpi = arg;
@@ -493,14 +468,6 @@ int llog_process(const struct lu_env *env, struct llog_handle *loghandle,
}
EXPORT_SYMBOL(llog_process);
-inline int llog_get_size(struct llog_handle *loghandle)
-{
- if (loghandle && loghandle->lgh_hdr)
- return loghandle->lgh_hdr->llh_count;
- return 0;
-}
-EXPORT_SYMBOL(llog_get_size);
-
int llog_reverse_process(const struct lu_env *env,
struct llog_handle *loghandle, llog_cb_t cb,
void *data, void *catdata)
@@ -767,8 +734,9 @@ int llog_open_create(const struct lu_env *env, struct llog_ctxt *ctxt,
struct llog_handle **res, struct llog_logid *logid,
char *name)
{
- struct thandle *th;
- int rc;
+ struct dt_device *d;
+ struct thandle *th;
+ int rc;
rc = llog_open(env, ctxt, res, logid, name, LLOG_OPEN_NEW);
if (rc)
@@ -777,27 +745,21 @@ int llog_open_create(const struct lu_env *env, struct llog_ctxt *ctxt,
if (llog_exist(*res))
return 0;
- if ((*res)->lgh_obj != NULL) {
- struct dt_device *d;
+ LASSERT((*res)->lgh_obj != NULL);
- d = lu2dt_dev((*res)->lgh_obj->do_lu.lo_dev);
+ d = lu2dt_dev((*res)->lgh_obj->do_lu.lo_dev);
- th = dt_trans_create(env, d);
- if (IS_ERR(th))
- GOTO(out, rc = PTR_ERR(th));
+ th = dt_trans_create(env, d);
+ if (IS_ERR(th))
+ GOTO(out, rc = PTR_ERR(th));
- rc = llog_declare_create(env, *res, th);
- if (rc == 0) {
- rc = dt_trans_start_local(env, d, th);
- if (rc == 0)
- rc = llog_create(env, *res, th);
- }
- dt_trans_stop(env, d, th);
- } else {
- /* lvfs compat code */
- LASSERT((*res)->lgh_file == NULL);
- rc = llog_create(env, *res, NULL);
+ rc = llog_declare_create(env, *res, th);
+ if (rc == 0) {
+ rc = dt_trans_start_local(env, d, th);
+ if (rc == 0)
+ rc = llog_create(env, *res, th);
}
+ dt_trans_stop(env, d, th);
out:
if (rc)
llog_close(env, *res);
@@ -842,41 +804,34 @@ int llog_write(const struct lu_env *env, struct llog_handle *loghandle,
struct llog_rec_hdr *rec, struct llog_cookie *reccookie,
int cookiecount, void *buf, int idx)
{
- int rc;
+ struct dt_device *dt;
+ struct thandle *th;
+ int rc;
LASSERT(loghandle);
LASSERT(loghandle->lgh_ctxt);
+ LASSERT(loghandle->lgh_obj != NULL);
- if (loghandle->lgh_obj != NULL) {
- struct dt_device *dt;
- struct thandle *th;
-
- dt = lu2dt_dev(loghandle->lgh_obj->do_lu.lo_dev);
+ dt = lu2dt_dev(loghandle->lgh_obj->do_lu.lo_dev);
- th = dt_trans_create(env, dt);
- if (IS_ERR(th))
- return PTR_ERR(th);
+ th = dt_trans_create(env, dt);
+ if (IS_ERR(th))
+ return PTR_ERR(th);
- rc = llog_declare_write_rec(env, loghandle, rec, idx, th);
- if (rc)
- GOTO(out_trans, rc);
+ rc = llog_declare_write_rec(env, loghandle, rec, idx, th);
+ if (rc)
+ GOTO(out_trans, rc);
- rc = dt_trans_start_local(env, dt, th);
- if (rc)
- GOTO(out_trans, rc);
+ rc = dt_trans_start_local(env, dt, th);
+ if (rc)
+ GOTO(out_trans, rc);
- down_write(&loghandle->lgh_lock);
- rc = llog_write_rec(env, loghandle, rec, reccookie,
- cookiecount, buf, idx, th);
- up_write(&loghandle->lgh_lock);
+ down_write(&loghandle->lgh_lock);
+ rc = llog_write_rec(env, loghandle, rec, reccookie,
+ cookiecount, buf, idx, th);
+ up_write(&loghandle->lgh_lock);
out_trans:
- dt_trans_stop(env, dt, th);
- } else { /* lvfs compatibility */
- down_write(&loghandle->lgh_lock);
- rc = llog_write_rec(env, loghandle, rec, reccookie,
- cookiecount, buf, idx, NULL);
- up_write(&loghandle->lgh_lock);
- }
+ dt_trans_stop(env, dt, th);
return rc;
}
EXPORT_SYMBOL(llog_write);
@@ -932,3 +887,104 @@ out:
return rc;
}
EXPORT_SYMBOL(llog_close);
+
+int llog_is_empty(const struct lu_env *env, struct llog_ctxt *ctxt,
+ char *name)
+{
+ struct llog_handle *llh;
+ int rc = 0;
+
+ rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS);
+ if (rc < 0) {
+ if (likely(rc == -ENOENT))
+ rc = 0;
+ GOTO(out, rc);
+ }
+
+ rc = llog_init_handle(env, llh, LLOG_F_IS_PLAIN, NULL);
+ if (rc)
+ GOTO(out_close, rc);
+ rc = llog_get_size(llh);
+
+out_close:
+ llog_close(env, llh);
+out:
+ /* header is record 1 */
+ return rc <= 1;
+}
+EXPORT_SYMBOL(llog_is_empty);
+
+int llog_copy_handler(const struct lu_env *env, struct llog_handle *llh,
+ struct llog_rec_hdr *rec, void *data)
+{
+ struct llog_handle *copy_llh = data;
+
+ /* Append all records */
+ return llog_write(env, copy_llh, rec, NULL, 0, NULL, -1);
+}
+EXPORT_SYMBOL(llog_copy_handler);
+
+/* backup plain llog */
+int llog_backup(const struct lu_env *env, struct obd_device *obd,
+ struct llog_ctxt *ctxt, struct llog_ctxt *bctxt,
+ char *name, char *backup)
+{
+ struct llog_handle *llh, *bllh;
+ int rc;
+
+
+
+ /* open original log */
+ rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS);
+ if (rc < 0) {
+ /* the -ENOENT case is also reported to the caller
+ * but silently so it should handle that if needed.
+ */
+ if (rc != -ENOENT)
+ CERROR("%s: failed to open log %s: rc = %d\n",
+ obd->obd_name, name, rc);
+ return rc;
+ }
+
+ rc = llog_init_handle(env, llh, LLOG_F_IS_PLAIN, NULL);
+ if (rc)
+ GOTO(out_close, rc);
+
+ /* Make sure there's no old backup log */
+ rc = llog_erase(env, bctxt, NULL, backup);
+ if (rc < 0 && rc != -ENOENT)
+ GOTO(out_close, rc);
+
+ /* open backup log */
+ rc = llog_open_create(env, bctxt, &bllh, NULL, backup);
+ if (rc) {
+ CERROR("%s: failed to open backup logfile %s: rc = %d\n",
+ obd->obd_name, backup, rc);
+ GOTO(out_close, rc);
+ }
+
+ /* check that backup llog is not the same object as original one */
+ if (llh->lgh_obj == bllh->lgh_obj) {
+ CERROR("%s: backup llog %s to itself (%s), objects %p/%p\n",
+ obd->obd_name, name, backup, llh->lgh_obj,
+ bllh->lgh_obj);
+ GOTO(out_backup, rc = -EEXIST);
+ }
+
+ rc = llog_init_handle(env, bllh, LLOG_F_IS_PLAIN, NULL);
+ if (rc)
+ GOTO(out_backup, rc);
+
+ /* Copy log record by record */
+ rc = llog_process_or_fork(env, llh, llog_copy_handler, (void *)bllh,
+ NULL, false);
+ if (rc)
+ CERROR("%s: failed to backup log %s: rc = %d\n",
+ obd->obd_name, name, rc);
+out_backup:
+ llog_close(env, bllh);
+out_close:
+ llog_close(env, llh);
+ return rc;
+}
+EXPORT_SYMBOL(llog_backup);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_test.c b/drivers/staging/lustre/lustre/obdclass/llog_test.c
index 178f89eccab1..764068fc4ef7 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_test.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_test.c
@@ -947,6 +947,10 @@ static void lprocfs_llog_test_init_vars(struct lprocfs_static_vars *lvars)
lvars->module_vars = lprocfs_llog_test_module_vars;
lvars->obd_vars = lprocfs_llog_test_obd_vars;
}
+#else
+static void lprocfs_llog_test_init_vars(struct lprocfs_static_vars *lvars)
+{
+}
#endif
static int llog_test_cleanup(struct obd_device *obd)
@@ -1048,7 +1052,7 @@ static struct obd_ops llog_obd_ops = {
static int __init llog_test_init(void)
{
- struct lprocfs_static_vars lvars;
+ struct lprocfs_static_vars uninitialized_var(lvars);
lprocfs_llog_test_init_vars(&lvars);
return class_register_type(&llog_obd_ops, NULL,
diff --git a/drivers/staging/lustre/lustre/obdclass/local_storage.c b/drivers/staging/lustre/lustre/obdclass/local_storage.c
index cc19fbab0207..e79e4beb3628 100644
--- a/drivers/staging/lustre/lustre/obdclass/local_storage.c
+++ b/drivers/staging/lustre/lustre/obdclass/local_storage.c
@@ -246,7 +246,7 @@ int local_object_create(const struct lu_env *env,
struct dt_object_format *dof, struct thandle *th)
{
struct dt_thread_info *dti = dt_info(env);
- obd_id lastid;
+ __le64 lastid;
int rc;
rc = dt_create(env, o, attr, NULL, dof, th);
@@ -855,9 +855,12 @@ out_los:
(*los)->los_seq = fid_seq(first_fid);
(*los)->los_last_oid = le64_to_cpu(lastid);
(*los)->los_obj = o;
- /* read value should not be less than initial one */
- LASSERTF((*los)->los_last_oid >= first_oid, "%u < %u\n",
- (*los)->los_last_oid, first_oid);
+ /* Read value should not be less than initial one
+ * but possible after upgrade from older fs.
+ * In this case just switch to the first_oid in memory and
+ * it will be updated on disk with first object generated */
+ if ((*los)->los_last_oid < first_oid)
+ (*los)->los_last_oid = first_oid;
}
out:
mutex_unlock(&ls->ls_los_mutex);
diff --git a/drivers/staging/lustre/lustre/obdclass/local_storage.h b/drivers/staging/lustre/lustre/obdclass/local_storage.h
index d553c3752703..0f63b8c073b4 100644
--- a/drivers/staging/lustre/lustre/obdclass/local_storage.h
+++ b/drivers/staging/lustre/lustre/obdclass/local_storage.h
@@ -29,6 +29,8 @@
*
* Author: Mikhail Pershin <mike.pershin@intel.com>
*/
+#ifndef __LOCAL_STORAGE_H
+#define __LOCAL_STORAGE_H
#include <dt_object.h>
#include <obd.h>
@@ -86,3 +88,4 @@ struct los_ondisk {
};
#define LOS_MAGIC 0xdecafbee
+#endif
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 02d76f8dbcb9..ec3b605dae6b 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -46,11 +46,183 @@
#include <lustre/lustre_idl.h>
#include <linux/seq_file.h>
-#if defined(LPROCFS)
+static const char * const obd_connect_names[] = {
+ "read_only",
+ "lov_index",
+ "unused",
+ "write_grant",
+ "server_lock",
+ "version",
+ "request_portal",
+ "acl",
+ "xattr",
+ "create_on_write",
+ "truncate_lock",
+ "initial_transno",
+ "inode_bit_locks",
+ "join_file(obsolete)",
+ "getattr_by_fid",
+ "no_oh_for_devices",
+ "remote_client",
+ "remote_client_by_force",
+ "max_byte_per_rpc",
+ "64bit_qdata",
+ "mds_capability",
+ "oss_capability",
+ "early_lock_cancel",
+ "som",
+ "adaptive_timeouts",
+ "lru_resize",
+ "mds_mds_connection",
+ "real_conn",
+ "change_qunit_size",
+ "alt_checksum_algorithm",
+ "fid_is_enabled",
+ "version_recovery",
+ "pools",
+ "grant_shrink",
+ "skip_orphan",
+ "large_ea",
+ "full20",
+ "layout_lock",
+ "64bithash",
+ "object_max_bytes",
+ "imp_recov",
+ "jobstats",
+ "umask",
+ "einprogress",
+ "grant_param",
+ "flock_owner",
+ "lvb_type",
+ "nanoseconds_times",
+ "lightweight_conn",
+ "short_io",
+ "pingless",
+ "unknown",
+ NULL
+};
+
+int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep)
+{
+ __u64 mask = 1;
+ int i, ret = 0;
+
+ for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) {
+ if (flags & mask)
+ ret += snprintf(page + ret, count - ret, "%s%s",
+ ret ? sep : "", obd_connect_names[i]);
+ }
+ if (flags & ~(mask - 1))
+ ret += snprintf(page + ret, count - ret,
+ "%sunknown flags "LPX64,
+ ret ? sep : "", flags & ~(mask - 1));
+ return ret;
+}
+EXPORT_SYMBOL(obd_connect_flags2str);
+
+int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
+ int mult)
+{
+ long decimal_val, frac_val;
+ int prtn;
+
+ if (count < 10)
+ return -EINVAL;
+
+ decimal_val = val / mult;
+ prtn = snprintf(buffer, count, "%ld", decimal_val);
+ frac_val = val % mult;
+
+ if (prtn < (count - 4) && frac_val > 0) {
+ long temp_frac;
+ int i, temp_mult = 1, frac_bits = 0;
+
+ temp_frac = frac_val * 10;
+ buffer[prtn++] = '.';
+ while (frac_bits < 2 && (temp_frac / mult) < 1) {
+ /* only reserved 2 bits fraction */
+ buffer[prtn++] = '0';
+ temp_frac *= 10;
+ frac_bits++;
+ }
+ /*
+ * Need to think these cases :
+ * 1. #echo x.00 > /proc/xxx output result : x
+ * 2. #echo x.0x > /proc/xxx output result : x.0x
+ * 3. #echo x.x0 > /proc/xxx output result : x.x
+ * 4. #echo x.xx > /proc/xxx output result : x.xx
+ * Only reserved 2 bits fraction.
+ */
+ for (i = 0; i < (5 - prtn); i++)
+ temp_mult *= 10;
+
+ frac_bits = min((int)count - prtn, 3 - frac_bits);
+ prtn += snprintf(buffer + prtn, frac_bits, "%ld",
+ frac_val * temp_mult / mult);
+
+ prtn--;
+ while (buffer[prtn] < '1' || buffer[prtn] > '9') {
+ prtn--;
+ if (buffer[prtn] == '.') {
+ prtn--;
+ break;
+ }
+ }
+ prtn++;
+ }
+ buffer[prtn++] = '\n';
+ return prtn;
+}
+EXPORT_SYMBOL(lprocfs_read_frac_helper);
+
+int lprocfs_write_frac_helper(const char *buffer, unsigned long count,
+ int *val, int mult)
+{
+ char kernbuf[20], *end, *pbuf;
+
+ if (count > (sizeof(kernbuf) - 1))
+ return -EINVAL;
+
+ if (copy_from_user(kernbuf, buffer, count))
+ return -EFAULT;
+
+ kernbuf[count] = '\0';
+ pbuf = kernbuf;
+ if (*pbuf == '-') {
+ mult = -mult;
+ pbuf++;
+ }
+
+ *val = (int)simple_strtoul(pbuf, &end, 10) * mult;
+ if (pbuf == end)
+ return -EINVAL;
+
+ if (end != NULL && *end == '.') {
+ int temp_val, pow = 1;
+ int i;
+
+ pbuf = end + 1;
+ if (strlen(pbuf) > 5)
+ pbuf[5] = '\0'; /*only allow 5bits fractional*/
+
+ temp_val = (int)simple_strtoul(pbuf, &end, 10) * mult;
+
+ if (pbuf < end) {
+ for (i = 0; i < (end - pbuf); i++)
+ pow *= 10;
+
+ *val += temp_val / pow;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(lprocfs_write_frac_helper);
+
+#ifdef LPROCFS
static int lprocfs_no_percpu_stats = 0;
-CFS_MODULE_PARM(lprocfs_no_percpu_stats, "i", int, 0644,
- "Do not alloc percpu data for lprocfs stats");
+module_param(lprocfs_no_percpu_stats, int, 0644);
+MODULE_PARM_DESC(lprocfs_no_percpu_stats, "Do not alloc percpu data for lprocfs stats");
#define MAX_STRING_SIZE 128
@@ -420,7 +592,6 @@ void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
{
unsigned int num_entry;
struct lprocfs_counter *percpu_cntr;
- struct lprocfs_counter_header *cntr_header;
int i;
unsigned long flags = 0;
@@ -439,7 +610,6 @@ void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
for (i = 0; i < num_entry; i++) {
if (stats->ls_percpu[i] == NULL)
continue;
- cntr_header = &stats->ls_cnt_header[idx];
percpu_cntr = lprocfs_stats_counter_get(stats, i, idx);
cnt->lc_count += percpu_cntr->lc_count;
@@ -481,62 +651,6 @@ static int obd_import_flags2str(struct obd_import *imp, struct seq_file *m)
}
#undef flags2str
-static const char *obd_connect_names[] = {
- "read_only",
- "lov_index",
- "unused",
- "write_grant",
- "server_lock",
- "version",
- "request_portal",
- "acl",
- "xattr",
- "create_on_write",
- "truncate_lock",
- "initial_transno",
- "inode_bit_locks",
- "join_file(obsolete)",
- "getattr_by_fid",
- "no_oh_for_devices",
- "remote_client",
- "remote_client_by_force",
- "max_byte_per_rpc",
- "64bit_qdata",
- "mds_capability",
- "oss_capability",
- "early_lock_cancel",
- "som",
- "adaptive_timeouts",
- "lru_resize",
- "mds_mds_connection",
- "real_conn",
- "change_qunit_size",
- "alt_checksum_algorithm",
- "fid_is_enabled",
- "version_recovery",
- "pools",
- "grant_shrink",
- "skip_orphan",
- "large_ea",
- "full20",
- "layout_lock",
- "64bithash",
- "object_max_bytes",
- "imp_recov",
- "jobstats",
- "umask",
- "einprogress",
- "grant_param",
- "flock_owner",
- "lvb_type",
- "nanoseconds_times",
- "lightweight_conn",
- "short_io",
- "pingless",
- "unknown",
- NULL
-};
-
static void obd_connect_seq_flags2str(struct seq_file *m, __u64 flags, char *sep)
{
__u64 mask = 1;
@@ -555,24 +669,6 @@ static void obd_connect_seq_flags2str(struct seq_file *m, __u64 flags, char *sep
first ? sep : "", flags & ~(mask - 1));
}
-int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep)
-{
- __u64 mask = 1;
- int i, ret = 0;
-
- for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) {
- if (flags & mask)
- ret += snprintf(page + ret, count - ret, "%s%s",
- ret ? sep : "", obd_connect_names[i]);
- }
- if (flags & ~(mask - 1))
- ret += snprintf(page + ret, count - ret,
- "%sunknown flags "LPX64,
- ret ? sep : "", flags & ~(mask - 1));
- return ret;
-}
-EXPORT_SYMBOL(obd_connect_flags2str);
-
int lprocfs_rd_import(struct seq_file *m, void *data)
{
struct lprocfs_counter ret;
@@ -999,7 +1095,6 @@ EXPORT_SYMBOL(lprocfs_free_stats);
void lprocfs_clear_stats(struct lprocfs_stats *stats)
{
struct lprocfs_counter *percpu_cntr;
- struct lprocfs_counter_header *header;
int i;
int j;
unsigned int num_entry;
@@ -1011,7 +1106,6 @@ void lprocfs_clear_stats(struct lprocfs_stats *stats)
if (stats->ls_percpu[i] == NULL)
continue;
for (j = 0; j < stats->ls_num; j++) {
- header = &stats->ls_cnt_header[j];
percpu_cntr = lprocfs_stats_counter_get(stats, i, j);
percpu_cntr->lc_count = 0;
percpu_cntr->lc_min = LC_MIN_INIT;
@@ -1662,104 +1756,6 @@ int lprocfs_write_helper(const char *buffer, unsigned long count,
}
EXPORT_SYMBOL(lprocfs_write_helper);
-int lprocfs_write_frac_helper(const char *buffer, unsigned long count,
- int *val, int mult)
-{
- char kernbuf[20], *end, *pbuf;
-
- if (count > (sizeof(kernbuf) - 1))
- return -EINVAL;
-
- if (copy_from_user(kernbuf, buffer, count))
- return -EFAULT;
-
- kernbuf[count] = '\0';
- pbuf = kernbuf;
- if (*pbuf == '-') {
- mult = -mult;
- pbuf++;
- }
-
- *val = (int)simple_strtoul(pbuf, &end, 10) * mult;
- if (pbuf == end)
- return -EINVAL;
-
- if (end != NULL && *end == '.') {
- int temp_val, pow = 1;
- int i;
-
- pbuf = end + 1;
- if (strlen(pbuf) > 5)
- pbuf[5] = '\0'; /*only allow 5bits fractional*/
-
- temp_val = (int)simple_strtoul(pbuf, &end, 10) * mult;
-
- if (pbuf < end) {
- for (i = 0; i < (end - pbuf); i++)
- pow *= 10;
-
- *val += temp_val / pow;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_write_frac_helper);
-
-int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
- int mult)
-{
- long decimal_val, frac_val;
- int prtn;
-
- if (count < 10)
- return -EINVAL;
-
- decimal_val = val / mult;
- prtn = snprintf(buffer, count, "%ld", decimal_val);
- frac_val = val % mult;
-
- if (prtn < (count - 4) && frac_val > 0) {
- long temp_frac;
- int i, temp_mult = 1, frac_bits = 0;
-
- temp_frac = frac_val * 10;
- buffer[prtn++] = '.';
- while (frac_bits < 2 && (temp_frac / mult) < 1 ) {
- /* only reserved 2 bits fraction */
- buffer[prtn++] ='0';
- temp_frac *= 10;
- frac_bits++;
- }
- /*
- * Need to think these cases :
- * 1. #echo x.00 > /proc/xxx output result : x
- * 2. #echo x.0x > /proc/xxx output result : x.0x
- * 3. #echo x.x0 > /proc/xxx output result : x.x
- * 4. #echo x.xx > /proc/xxx output result : x.xx
- * Only reserved 2 bits fraction.
- */
- for (i = 0; i < (5 - prtn); i++)
- temp_mult *= 10;
-
- frac_bits = min((int)count - prtn, 3 - frac_bits);
- prtn += snprintf(buffer + prtn, frac_bits, "%ld",
- frac_val * temp_mult / mult);
-
- prtn--;
- while(buffer[prtn] < '1' || buffer[prtn] > '9') {
- prtn--;
- if (buffer[prtn] == '.') {
- prtn--;
- break;
- }
- }
- prtn++;
- }
- buffer[prtn++] ='\n';
- return prtn;
-}
-EXPORT_SYMBOL(lprocfs_read_frac_helper);
-
int lprocfs_seq_read_frac_helper(struct seq_file *m, long val, int mult)
{
long decimal_val, frac_val;
@@ -1983,4 +1979,4 @@ int lprocfs_obd_rd_max_pages_per_rpc(struct seq_file *m, void *data)
}
EXPORT_SYMBOL(lprocfs_obd_rd_max_pages_per_rpc);
-#endif /* LPROCFS*/
+#endif
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 212823ab937b..9887d8fffb6e 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -200,6 +200,8 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env,
struct lu_object *scan;
struct lu_object *top;
struct list_head *layers;
+ unsigned int init_mask = 0;
+ unsigned int init_flag;
int clean;
int result;
@@ -218,15 +220,17 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env,
*/
top->lo_header->loh_fid = *f;
layers = &top->lo_header->loh_layers;
+
do {
/*
* Call ->loo_object_init() repeatedly, until no more new
* object slices are created.
*/
clean = 1;
+ init_flag = 1;
list_for_each_entry(scan, layers, lo_linkage) {
- if (scan->lo_flags & LU_OBJECT_ALLOCATED)
- continue;
+ if (init_mask & init_flag)
+ goto next;
clean = 0;
scan->lo_header = top->lo_header;
result = scan->lo_ops->loo_object_init(env, scan, conf);
@@ -234,7 +238,9 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env,
lu_object_free(env, top);
return ERR_PTR(result);
}
- scan->lo_flags |= LU_OBJECT_ALLOCATED;
+ init_mask |= init_flag;
+next:
+ init_flag <<= 1;
}
} while (!clean);
@@ -423,7 +429,7 @@ LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
*/
struct lu_context_key lu_global_key = {
.lct_tags = LCT_MD_THREAD | LCT_DT_THREAD |
- LCT_MG_THREAD | LCT_CL_THREAD,
+ LCT_MG_THREAD | LCT_CL_THREAD | LCT_LOCAL,
.lct_init = lu_global_key_init,
.lct_fini = lu_global_key_fini
};
@@ -487,23 +493,25 @@ void lu_object_print(const struct lu_env *env, void *cookie,
{
static const char ruler[] = "........................................";
struct lu_object_header *top;
- int depth;
+ int depth = 4;
top = o->lo_header;
lu_object_header_print(env, cookie, printer, top);
- (*printer)(env, cookie, "{ \n");
- list_for_each_entry(o, &top->loh_layers, lo_linkage) {
- depth = o->lo_depth + 4;
+ (*printer)(env, cookie, "{\n");
+ list_for_each_entry(o, &top->loh_layers, lo_linkage) {
/*
* print `.' \a depth times followed by type name and address
*/
(*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
o->lo_dev->ld_type->ldt_name, o);
+
if (o->lo_ops->loo_object_print != NULL)
- o->lo_ops->loo_object_print(env, cookie, printer, o);
+ (*o->lo_ops->loo_object_print)(env, cookie, printer, o);
+
(*printer)(env, cookie, "\n");
}
+
(*printer)(env, cookie, "} header@%p\n", top);
}
EXPORT_SYMBOL(lu_object_print);
@@ -830,8 +838,8 @@ enum {
};
static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
-CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
- "Percentage of memory to be used as lu_object cache");
+module_param(lu_cache_percent, int, 0644);
+MODULE_PARM_DESC(lu_cache_percent, "Percentage of memory to be used as lu_object cache");
/**
* Return desired hash table order.
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index 68a4d6a0eb03..a69a630b596e 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -631,6 +631,9 @@ int lustre_put_lsi(struct super_block *sb)
CDEBUG(D_MOUNT, "put %p %d\n", sb, atomic_read(&lsi->lsi_mounts));
if (atomic_dec_and_test(&lsi->lsi_mounts)) {
if (IS_SERVER(lsi) && lsi->lsi_osd_exp) {
+ lu_device_put(&lsi->lsi_dt_dev->dd_lu_dev);
+ lsi->lsi_osd_exp->exp_obd->obd_lvfs_ctxt.dt = NULL;
+ lsi->lsi_dt_dev = NULL;
obd_disconnect(lsi->lsi_osd_exp);
/* wait till OSD is gone */
obd_zombie_barrier();
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 1fb0ac4e920d..9b2dea292363 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -1106,7 +1106,7 @@ static struct echo_object *cl_echo_object_find(struct echo_device *d,
/* coverity[overrun-buffer-val] */
obj = cl_object_find(env, echo_dev2cl(d), fid, &conf->eoc_cl);
if (IS_ERR(obj))
- GOTO(out, eco = (void*)obj);
+ GOTO(out, eco = (void *)obj);
eco = cl2echo_obj(obj);
if (eco->eo_deleted) {
diff --git a/drivers/staging/lustre/lustre/osc/Makefile b/drivers/staging/lustre/lustre/osc/Makefile
index bbd2f7707e9f..4488162d228a 100644
--- a/drivers/staging/lustre/lustre/osc/Makefile
+++ b/drivers/staging/lustre/lustre/osc/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_LUSTRE_FS) += osc.o
-osc-y := osc_request.o lproc_osc.o osc_dev.o osc_object.o \
+osc-y := osc_request.o osc_dev.o osc_object.o \
osc_page.o osc_lock.o osc_io.o osc_quota.o osc_cache.o
+osc-$(CONFIG_PROC_FS) += lproc_osc.o
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index ef10e2af787f..0b59fc16c50c 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -42,7 +42,6 @@
#include <linux/seq_file.h>
#include "osc_internal.h"
-#ifdef LPROCFS
static int osc_active_seq_show(struct seq_file *m, void *v)
{
struct obd_device *dev = m->private;
@@ -724,4 +723,3 @@ void lprocfs_osc_init_vars(struct lprocfs_static_vars *lvars)
lvars->module_vars = lprocfs_osc_module_vars;
lvars->obd_vars = lprocfs_osc_obd_vars;
}
-#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 00295da4ab3d..be4511e78c04 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -1703,7 +1703,7 @@ static int osc_list_maint(struct client_obd *cli, struct osc_object *osc)
return is_ready;
}
-/* this is trying to propogate async writeback errors back up to the
+/* this is trying to propagate async writeback errors back up to the
* application. As an async write fails we record the error code for later if
* the app does an fsync. As long as errors persist we force future rpcs to be
* sync so that the app can get a sync error and break the cycle of queueing
@@ -2006,7 +2006,7 @@ static struct osc_object *osc_next_obj(struct client_obd *cli)
/* then if we have cache waiters, return all objects with queued
* writes. This is especially important when many small files
* have filled up the cache and not been fired into rpcs because
- * they don't pass the nr_pending/object threshhold */
+ * they don't pass the nr_pending/object threshold */
if (!list_empty(&cli->cl_cache_waiters) &&
!list_empty(&cli->cl_loi_write_list))
return list_to_obj(&cli->cl_loi_write_list, write_item);
@@ -2226,7 +2226,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
/* Add this page into extent by the following steps:
* 1. if there exists an active extent for this IO, mostly this page
* can be added to the active extent and sometimes we need to
- * expand extent to accomodate this page;
+ * expand extent to accommodate this page;
* 2. otherwise, a new extent will be allocated. */
ext = oio->oi_active;
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index a3aa9b6596ef..9e7899fa4cc4 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -299,7 +299,7 @@ struct osc_lock {
ols_flush:1,
/**
* if set, the osc_lock is a glimpse lock. For glimpse locks, we treat
- * the EVAVAIL error as torerable, this will make upper logic happy
+ * the EVAVAIL error as tolerable, this will make upper logic happy
* to wait all glimpse locks to each OSTs to be completed.
* Glimpse lock converts to normal lock if the server lock is
* granted.
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index c90abfbb1d7a..ef7b9c2b208e 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -929,7 +929,7 @@ static void osc_lock_build_einfo(const struct lu_env *env,
* Determine if the lock should be converted into a lockless lock.
*
* Steps to check:
- * - if the lock has an explicite requirment for a non-lockless lock;
+ * - if the lock has an explicit requirement for a non-lockless lock;
* - if the io lock request type ci_lockreq;
* - send the enqueue rpc to ost to make the further decision;
* - special treat to truncate lockless lock
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index 6c20b8ecfb82..4909e486dc5c 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -587,7 +587,7 @@ static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
/* LRU pages are freed in batch mode. OSC should at least free this
* number of pages to avoid running out of LRU budget, and.. */
static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */
-/* free this number at most otherwise it will take too long time to finsih. */
+/* free this number at most otherwise it will take too long time to finish. */
static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
@@ -606,7 +606,7 @@ static int osc_cache_too_much(struct client_obd *cli)
return min(pages, lru_shrink_max);
/* if it's going to run out LRU slots, we should free some, but not
- * too much to maintain faireness among OSCs. */
+ * too much to maintain fairness among OSCs. */
if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
unsigned long tmp;
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index cb197782d9a3..ee6953ac7353 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -46,10 +46,6 @@
#include <obd_ost.h>
#include <obd_lov.h>
-#ifdef __CYGWIN__
-# include <ctype.h>
-#endif
-
#include <lustre_ha.h>
#include <lprocfs_status.h>
#include <lustre_log.h>
@@ -777,7 +773,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
osc_pack_capa(req, body, (struct obd_capa *)capa);
ptlrpc_request_set_replen(req);
- /* If osc_destory is for destroying the unlink orphan,
+ /* If osc_destroy is for destroying the unlink orphan,
* sent from MDT to OST, which should not be blocked here,
* because the process might be triggered by ptlrpcd, and
* it is not good to block ptlrpcd thread (b=16006)*/
@@ -1197,8 +1193,12 @@ static obd_count osc_checksum_bulk(int nob, obd_count pg_count,
cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
pga[i]->off & ~CFS_PAGE_MASK,
count);
- LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
- (int)(pga[i]->off & ~CFS_PAGE_MASK));
+ CDEBUG(D_PAGE,
+ "page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n",
+ pga[i]->pg, pga[i]->pg->mapping, pga[i]->pg->index,
+ (long)pga[i]->pg->flags, page_count(pga[i]->pg),
+ page_private(pga[i]->pg),
+ (int)(pga[i]->off & ~CFS_PAGE_MASK));
nob -= pga[i]->count;
pg_count--;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/Makefile b/drivers/staging/lustre/lustre/ptlrpc/Makefile
index 6d78b80487f2..1c338aaf18a6 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/Makefile
+++ b/drivers/staging/lustre/lustre/ptlrpc/Makefile
@@ -10,12 +10,13 @@ ldlm_objs += $(LDLM)ldlm_pool.o
ldlm_objs += $(LDLM)interval_tree.o
ptlrpc_objs := client.o recover.o connection.o niobuf.o pack_generic.o
ptlrpc_objs += events.o ptlrpc_module.o service.o pinger.o
-ptlrpc_objs += llog_net.o llog_client.o llog_server.o import.o ptlrpcd.o
+ptlrpc_objs += llog_net.o llog_client.o import.o ptlrpcd.o
ptlrpc_objs += pers.o lproc_ptlrpc.o wiretest.o layout.o
-ptlrpc_objs += sec.o sec_bulk.o sec_gc.o sec_config.o sec_lproc.o
+ptlrpc_objs += sec.o sec_bulk.o sec_gc.o sec_config.o
ptlrpc_objs += sec_null.o sec_plain.o nrs.o nrs_fifo.o
ptlrpc-y := $(ldlm_objs) $(ptlrpc_objs)
+ptlrpc-$(CONFIG_PROC_FS) += sec_lproc.o
ptlrpc-$(CONFIG_LUSTRE_TRANSLATE_ERRNOS) += errno.o
obj-$(CONFIG_PTLRPC_GSS) += gss/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index c2ab0c8c4d42..d90efe408414 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -200,7 +200,7 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
class_import_put(desc->bd_import);
if (unpin) {
- for (i = 0; i < desc->bd_iov_count ; i++)
+ for (i = 0; i < desc->bd_iov_count; i++)
page_cache_release(desc->bd_iov[i].kiov_page);
}
@@ -459,7 +459,7 @@ ptlrpc_init_rq_pool(int num_rq, int msgsize,
{
struct ptlrpc_request_pool *pool;
- OBD_ALLOC(pool, sizeof (struct ptlrpc_request_pool));
+ OBD_ALLOC(pool, sizeof(struct ptlrpc_request_pool));
if (!pool)
return NULL;
@@ -475,7 +475,7 @@ ptlrpc_init_rq_pool(int num_rq, int msgsize,
if (list_empty(&pool->prp_req_list)) {
/* have not allocated a single request for the pool */
- OBD_FREE(pool, sizeof (struct ptlrpc_request_pool));
+ OBD_FREE(pool, sizeof(struct ptlrpc_request_pool));
pool = NULL;
}
return pool;
@@ -881,7 +881,7 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
/* Requests on the set should either all be completed, or all be new */
expected_phase = (atomic_read(&set->set_remaining) == 0) ?
RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
- list_for_each (tmp, &set->set_requests) {
+ list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
list_entry(tmp, struct ptlrpc_request,
rq_set_chain);
@@ -912,7 +912,7 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
req->rq_invalid_rqset = 0;
spin_unlock(&req->rq_lock);
- ptlrpc_req_finished (req);
+ ptlrpc_req_finished(req);
}
LASSERT(atomic_read(&set->set_remaining) == 0);
@@ -1020,7 +1020,7 @@ static int ptlrpc_import_delay_req(struct obd_import *imp,
{
int delay = 0;
- LASSERT (status != NULL);
+ LASSERT(status != NULL);
*status = 0;
if (req->rq_ctx_init || req->rq_ctx_fini) {
@@ -1039,7 +1039,7 @@ static int ptlrpc_import_delay_req(struct obd_import *imp,
*status = -EIO;
} else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
imp->imp_state == LUSTRE_IMP_CONNECTING) {
- /* allow CONNECT even if import is invalid */ ;
+ /* allow CONNECT even if import is invalid */
if (atomic_read(&imp->imp_inval_count) != 0) {
DEBUG_REQ(D_ERROR, req, "invalidate in flight");
*status = -EIO;
@@ -1596,7 +1596,8 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
continue;
spin_lock(&imp->imp_lock);
- if (ptlrpc_import_delay_req(imp, req, &status)){
+ if (ptlrpc_import_delay_req(imp, req,
+ &status)) {
/* put on delay list - only if we wait
* recovery finished - before send */
list_del_init(&req->rq_list);
@@ -1752,7 +1753,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
- interpret:
+interpret:
LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
/* This moves to "unregistering" phase we need to wait for
@@ -1907,7 +1908,7 @@ int ptlrpc_expired_set(void *data)
/*
* A timeout expired. See which reqs it applies to...
*/
- list_for_each (tmp, &set->set_requests) {
+ list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
list_entry(tmp, struct ptlrpc_request,
rq_set_chain);
@@ -2688,7 +2689,7 @@ int ptlrpc_replay_req(struct ptlrpc_request *req)
LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
- LASSERT (sizeof (*aa) <= sizeof (req->rq_async_args));
+ LASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
memset(aa, 0, sizeof(*aa));
@@ -2962,7 +2963,7 @@ void *ptlrpcd_alloc_work(struct obd_import *imp,
init_waitqueue_head(&req->rq_set_waitq);
atomic_set(&req->rq_refcount, 1);
- CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
+ CLASSERT(sizeof(*args) <= sizeof(req->rq_async_args));
args = ptlrpc_req_async_args(req);
args->magic = PTLRPC_WORK_MAGIC;
args->cb = cb;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 58d089c3fef4..f66cfea87acf 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -56,9 +56,9 @@ void request_out_callback(lnet_event_t *ev)
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_request *req = cbid->cbid_arg;
- LASSERT (ev->type == LNET_EVENT_SEND ||
- ev->type == LNET_EVENT_UNLINK);
- LASSERT (ev->unlinked);
+ LASSERT(ev->type == LNET_EVENT_SEND ||
+ ev->type == LNET_EVENT_UNLINK);
+ LASSERT(ev->unlinked);
DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
@@ -90,9 +90,9 @@ void reply_in_callback(lnet_event_t *ev)
DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
- LASSERT (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
- LASSERT (ev->md.start == req->rq_repbuf);
- LASSERT (ev->offset + ev->mlength <= req->rq_repbuf_len);
+ LASSERT(ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
+ LASSERT(ev->md.start == req->rq_repbuf);
+ LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len);
/* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
for adaptive timeouts' early reply. */
LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
@@ -113,7 +113,7 @@ void reply_in_callback(lnet_event_t *ev)
goto out_wake;
}
- if (ev->mlength < ev->rlength ) {
+ if (ev->mlength < ev->rlength) {
CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
req->rq_replen, ev->rlength, ev->offset);
req->rq_reply_truncate = 1;
@@ -167,18 +167,18 @@ out_wake:
/*
* Client's bulk has been written/read
*/
-void client_bulk_callback (lnet_event_t *ev)
+void client_bulk_callback(lnet_event_t *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
struct ptlrpc_request *req;
- LASSERT ((desc->bd_type == BULK_PUT_SINK &&
- ev->type == LNET_EVENT_PUT) ||
- (desc->bd_type == BULK_GET_SOURCE &&
- ev->type == LNET_EVENT_GET) ||
- ev->type == LNET_EVENT_UNLINK);
- LASSERT (ev->unlinked);
+ LASSERT((desc->bd_type == BULK_PUT_SINK &&
+ ev->type == LNET_EVENT_PUT) ||
+ (desc->bd_type == BULK_GET_SOURCE &&
+ ev->type == LNET_EVENT_GET) ||
+ ev->type == LNET_EVENT_UNLINK);
+ LASSERT(ev->unlinked);
if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
ev->status = -EIO;
@@ -283,11 +283,11 @@ void request_in_callback(lnet_event_t *ev)
struct ptlrpc_service *service = svcpt->scp_service;
struct ptlrpc_request *req;
- LASSERT (ev->type == LNET_EVENT_PUT ||
- ev->type == LNET_EVENT_UNLINK);
- LASSERT ((char *)ev->md.start >= rqbd->rqbd_buffer);
- LASSERT ((char *)ev->md.start + ev->offset + ev->mlength <=
- rqbd->rqbd_buffer + service->srv_buf_size);
+ LASSERT(ev->type == LNET_EVENT_PUT ||
+ ev->type == LNET_EVENT_UNLINK);
+ LASSERT((char *)ev->md.start >= rqbd->rqbd_buffer);
+ LASSERT((char *)ev->md.start + ev->offset + ev->mlength <=
+ rqbd->rqbd_buffer + service->srv_buf_size);
CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
"event type %d, status %d, service %s\n",
@@ -300,9 +300,9 @@ void request_in_callback(lnet_event_t *ev)
* we'd have to re-post the rqbd, which we can't do in this
* context. */
req = &rqbd->rqbd_req;
- memset(req, 0, sizeof (*req));
+ memset(req, 0, sizeof(*req));
} else {
- LASSERT (ev->type == LNET_EVENT_PUT);
+ LASSERT(ev->type == LNET_EVENT_PUT);
if (ev->status != 0) {
/* We moaned above already... */
return;
@@ -381,19 +381,19 @@ void reply_out_callback(lnet_event_t *ev)
struct ptlrpc_reply_state *rs = cbid->cbid_arg;
struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
- LASSERT (ev->type == LNET_EVENT_SEND ||
- ev->type == LNET_EVENT_ACK ||
- ev->type == LNET_EVENT_UNLINK);
+ LASSERT(ev->type == LNET_EVENT_SEND ||
+ ev->type == LNET_EVENT_ACK ||
+ ev->type == LNET_EVENT_UNLINK);
if (!rs->rs_difficult) {
/* 'Easy' replies have no further processing so I drop the
* net's ref on 'rs' */
- LASSERT (ev->unlinked);
+ LASSERT(ev->unlinked);
ptlrpc_rs_decref(rs);
return;
}
- LASSERT (rs->rs_on_net);
+ LASSERT(rs->rs_on_net);
if (ev->unlinked) {
/* Last network callback. The net's ref on 'rs' stays put
@@ -419,18 +419,17 @@ static void ptlrpc_master_callback(lnet_event_t *ev)
void (*callback)(lnet_event_t *ev) = cbid->cbid_fn;
/* Honestly, it's best to find out early. */
- LASSERT (cbid->cbid_arg != LP_POISON);
- LASSERT (callback == request_out_callback ||
- callback == reply_in_callback ||
- callback == client_bulk_callback ||
- callback == request_in_callback ||
- callback == reply_out_callback
- );
-
- callback (ev);
+ LASSERT(cbid->cbid_arg != LP_POISON);
+ LASSERT(callback == request_out_callback ||
+ callback == reply_in_callback ||
+ callback == client_bulk_callback ||
+ callback == request_in_callback ||
+ callback == reply_out_callback);
+
+ callback(ev);
}
-int ptlrpc_uuid_to_peer (struct obd_uuid *uuid,
+int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
lnet_process_id_t *peer, lnet_nid_t *self)
{
int best_dist = 0;
@@ -538,7 +537,7 @@ int ptlrpc_ni_init(void)
/* We're not passing any limits yet... */
rc = LNetNIInit(pid);
if (rc < 0) {
- CDEBUG (D_NET, "Can't init network interface: %d\n", rc);
+ CDEBUG(D_NET, "Can't init network interface: %d\n", rc);
return (-ENOENT);
}
@@ -552,7 +551,7 @@ int ptlrpc_ni_init(void)
if (rc == 0)
return 0;
- CERROR ("Failed to allocate event queue: %d\n", rc);
+ CERROR("Failed to allocate event queue: %d\n", rc);
LNetNIFini();
return (-ENOMEM);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_asn1.h b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_asn1.h
index c70eb00796f9..bdfd83880422 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_asn1.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_asn1.h
@@ -64,9 +64,9 @@
#define G_REFLECT (-2045022961L)
#define G_WRONG_TOKID (-2045022960L)
-#define g_OID_equal(o1,o2) \
- (((o1)->len == (o2)->len) && \
- (memcmp((o1)->data,(o2)->data,(int) (o1)->len) == 0))
+#define g_OID_equal(o1, o2) \
+ (((o1)->len == (o2)->len) && \
+ (memcmp((o1)->data, (o2)->data, (int) (o1)->len) == 0))
__u32 g_verify_token_header(rawobj_t *mech,
int *body_size,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c
index b518d8a0aaba..7852bf30a3a0 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c
@@ -37,7 +37,6 @@
*/
#define DEBUG_SUBSYSTEM S_SEC
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/dcache.h>
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_err.h b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_err.h
index 13425796fa33..37ec101e14e5 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_err.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_err.h
@@ -106,14 +106,14 @@ typedef unsigned int OM_uint32;
* evaluates its argument only once.
*/
#define GSS_CALLING_ERROR(x) \
- ((x) & (GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET))
+ ((x) & (GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET))
#define GSS_ROUTINE_ERROR(x) \
- ((x) & (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET))
+ ((x) & (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET))
#define GSS_SUPPLEMENTARY_INFO(x) \
- ((x) & (GSS_C_SUPPLEMENTARY_MASK << GSS_C_SUPPLEMENTARY_OFFSET))
+ ((x) & (GSS_C_SUPPLEMENTARY_MASK << GSS_C_SUPPLEMENTARY_OFFSET))
#define GSS_ERROR(x) \
- ((x) & ((GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET) | \
- (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET)))
+ ((x) & ((GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET) | \
+ (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET)))
/*
* Now the actual status code definitions
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c
index 20b1638e7255..56c28286c9c1 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c
@@ -42,7 +42,6 @@
*/
#define DEBUG_SUBSYSTEM S_SEC
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mutex.h>
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c
index 188dbbfbd2f4..d43a13c69669 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c
@@ -165,7 +165,7 @@ void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, long timeout)
init_timer(timer);
timer->expires = timeout;
- timer->data = (unsigned long ) ctx;
+ timer->data = (unsigned long) ctx;
timer->function = ctx_upcall_timeout_kr;
add_timer(timer);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
index c106a9e049a7..b9fa3b4a40db 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
@@ -1276,7 +1276,7 @@ arc4_out_tfm:
arc4_out_key:
rawobj_free(&arc4_keye);
arc4_out:
- do {} while(0); /* just to avoid compile warning */
+ do {} while (0); /* just to avoid compile warning */
} else {
rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
3, data_desc, &cipher, 1);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_mech_switch.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_mech_switch.c
index 8cdad800382d..99462e085da7 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_mech_switch.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_mech_switch.c
@@ -44,7 +44,6 @@
*/
#define DEBUG_SUBSYSTEM S_SEC
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mutex.h>
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c b/drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c
index de100a14ab52..a0a74e5542ed 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c
@@ -35,7 +35,6 @@
*/
#define DEBUG_SUBSYSTEM S_SEC
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/dcache.h>
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c b/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c
index b42ddda9ee25..8ce6271a5daa 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c
@@ -483,7 +483,7 @@ int gss_do_check_seq(unsigned long *window, __u32 win_size, __u32 *max_seq,
memset(window, 0, win_size / 8);
*max_seq = seq_num;
} else {
- while(*max_seq < seq_num) {
+ while (*max_seq < seq_num) {
(*max_seq)++;
__clear_bit((*max_seq) % win_size, window);
}
@@ -804,7 +804,8 @@ int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
case PTLRPC_GSS_PROC_DATA:
pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
- if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
+ if (!req->rq_early &&
+ !equi(req->rq_pack_bulk == 1, pack_bulk)) {
CERROR("%s bulk flag in reply\n",
req->rq_pack_bulk ? "missing" : "unexpected");
return -EPROTO;
@@ -1009,7 +1010,8 @@ int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
case PTLRPC_GSS_PROC_DATA:
pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
- if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
+ if (!req->rq_early &&
+ !equi(req->rq_pack_bulk == 1, pack_bulk)) {
CERROR("%s bulk flag in reply\n",
req->rq_pack_bulk ? "missing" : "unexpected");
return -EPROTO;
@@ -1979,7 +1981,7 @@ int gss_svc_handle_init(struct ptlrpc_request *req,
return SECSVC_DROP;
}
- if (reqbuf->lm_bufcount < 3 || reqbuf->lm_bufcount > 4){
+ if (reqbuf->lm_bufcount < 3 || reqbuf->lm_bufcount > 4) {
CERROR("Invalid bufcount %d\n", reqbuf->lm_bufcount);
return SECSVC_DROP;
}
@@ -2369,7 +2371,7 @@ int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
if (swabbed)
gss_header_swabber(ghdr);
- switch(ghdr->gh_proc) {
+ switch (ghdr->gh_proc) {
case PTLRPC_GSS_PROC_INIT:
case PTLRPC_GSS_PROC_CONTINUE_INIT:
rc = gss_svc_handle_init(req, gw);
@@ -2388,7 +2390,7 @@ int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
switch (rc) {
case SECSVC_OK:
- LASSERT (grctx->src_ctx);
+ LASSERT(grctx->src_ctx);
req->rq_auth_gss = 1;
req->rq_auth_remote = grctx->src_ctx->gsc_remote;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index 7b96a0e88cdb..f465547eb95e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -72,23 +72,23 @@ static void __import_set_state(struct obd_import *imp,
}
/* A CLOSED import should remain so. */
-#define IMPORT_SET_STATE_NOLOCK(imp, state) \
-do { \
- if (imp->imp_state != LUSTRE_IMP_CLOSED) { \
- CDEBUG(D_HA, "%p %s: changing import state from %s to %s\n", \
- imp, obd2cli_tgt(imp->imp_obd), \
- ptlrpc_import_state_name(imp->imp_state), \
- ptlrpc_import_state_name(state)); \
- __import_set_state(imp, state); \
- } \
-} while(0)
+#define IMPORT_SET_STATE_NOLOCK(imp, state) \
+do { \
+ if (imp->imp_state != LUSTRE_IMP_CLOSED) { \
+ CDEBUG(D_HA, "%p %s: changing import state from %s to %s\n", \
+ imp, obd2cli_tgt(imp->imp_obd), \
+ ptlrpc_import_state_name(imp->imp_state), \
+ ptlrpc_import_state_name(state)); \
+ __import_set_state(imp, state); \
+ } \
+} while (0)
#define IMPORT_SET_STATE(imp, state) \
do { \
spin_lock(&imp->imp_lock); \
IMPORT_SET_STATE_NOLOCK(imp, state); \
spin_unlock(&imp->imp_lock); \
-} while(0)
+} while (0)
static int ptlrpc_connect_interpret(const struct lu_env *env,
@@ -170,7 +170,6 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
target_len, target_start,
libcfs_nid2str(imp->imp_connection->c_peer.nid));
}
- ptlrpc_deactivate_timeouts(imp);
IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
spin_unlock(&imp->imp_lock);
@@ -383,7 +382,6 @@ void ptlrpc_activate_import(struct obd_import *imp)
spin_lock(&imp->imp_lock);
imp->imp_invalid = 0;
- ptlrpc_activate_timeouts(imp);
spin_unlock(&imp->imp_lock);
obd_import_event(obd, imp, IMP_EVENT_ACTIVE);
}
@@ -680,7 +678,7 @@ int ptlrpc_connect_import(struct obd_import *imp)
ptlrpc_request_set_replen(request);
request->rq_interpret_reply = ptlrpc_connect_interpret;
- CLASSERT(sizeof (*aa) <= sizeof (request->rq_async_args));
+ CLASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
aa = ptlrpc_req_async_args(request);
memset(aa, 0, sizeof(*aa));
@@ -859,7 +857,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
if (MSG_CONNECT_RECONNECT & msg_flags) {
memset(&old_hdl, 0, sizeof(old_hdl));
if (!memcmp(&old_hdl, lustre_msg_get_handle(request->rq_repmsg),
- sizeof (old_hdl))) {
+ sizeof(old_hdl))) {
LCONSOLE_WARN("Reconnect to %s (at @%s) failed due "
"bad handle "LPX64"\n",
obd2cli_tgt(imp->imp_obd),
@@ -1135,9 +1133,11 @@ out:
if (ocd &&
(ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
(ocd->ocd_version != LUSTRE_VERSION_CODE)) {
- /* Actually servers are only supposed to refuse
- connection from liblustre clients, so we should
- never see this from VFS context */
+ /*
+ * Actually servers are only supposed to refuse
+ * connection from liblustre clients, so we
+ * should never see this from VFS context
+ */
LCONSOLE_ERROR_MSG(0x16a, "Server %s version "
"(%d.%d.%d.%d)"
" refused connection from this client "
@@ -1507,7 +1507,7 @@ int at_measured(struct adaptive_timeout *at, unsigned int val)
at->at_worst_time = now;
at->at_hist[0] = val;
at->at_binstart = now;
- } else if (now - at->at_binstart < binlimit ) {
+ } else if (now - at->at_binstart < binlimit) {
/* in bin 0 */
at->at_hist[0] = max(val, at->at_hist[0]);
at->at_current = max(val, at->at_current);
@@ -1517,7 +1517,7 @@ int at_measured(struct adaptive_timeout *at, unsigned int val)
/* move bins over */
shift = (now - at->at_binstart) / binlimit;
LASSERT(shift > 0);
- for(i = AT_BINS - 1; i >= 0; i--) {
+ for (i = AT_BINS - 1; i >= 0; i--) {
if (i >= shift) {
at->at_hist[i] = at->at_hist[i - shift];
maxv = max(maxv, at->at_hist[i]);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index d0a6e5689227..dfcb410fe485 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -145,6 +145,14 @@ static const struct req_msg_field *mdt_close_client[] = {
&RMF_CAPA1
};
+static const struct req_msg_field *mdt_release_close_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_EPOCH,
+ &RMF_REC_REINT,
+ &RMF_CAPA1,
+ &RMF_CLOSE_DATA
+};
+
static const struct req_msg_field *obd_statfs_server[] = {
&RMF_PTLRPC_BODY,
&RMF_OBD_STATFS
@@ -454,6 +462,25 @@ static const struct req_msg_field *ldlm_intent_unlink_client[] = {
&RMF_NAME
};
+static const struct req_msg_field *ldlm_intent_getxattr_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REQ,
+ &RMF_LDLM_INTENT,
+ &RMF_MDT_BODY,
+ &RMF_CAPA1,
+};
+
+static const struct req_msg_field *ldlm_intent_getxattr_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REP,
+ &RMF_MDT_BODY,
+ &RMF_MDT_MD,
+ &RMF_ACL, /* for req_capsule_extend/mdt_intent_policy */
+ &RMF_EADATA,
+ &RMF_EAVALS,
+ &RMF_EAVALS_LENS
+};
+
static const struct req_msg_field *mds_getxattr_client[] = {
&RMF_PTLRPC_BODY,
&RMF_MDT_BODY,
@@ -666,6 +693,7 @@ static struct req_format *req_formats[] = {
&RQF_MDS_GETXATTR,
&RQF_MDS_SYNC,
&RQF_MDS_CLOSE,
+ &RQF_MDS_RELEASE_CLOSE,
&RQF_MDS_PIN,
&RQF_MDS_UNPIN,
&RQF_MDS_READPAGE,
@@ -730,6 +758,7 @@ static struct req_format *req_formats[] = {
&RQF_LDLM_INTENT_OPEN,
&RQF_LDLM_INTENT_CREATE,
&RQF_LDLM_INTENT_UNLINK,
+ &RQF_LDLM_INTENT_GETXATTR,
&RQF_LDLM_INTENT_QUOTA,
&RQF_QUOTA_DQACQ,
&RQF_LOG_CANCEL,
@@ -738,7 +767,8 @@ static struct req_format *req_formats[] = {
&RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK,
&RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK,
&RQF_LLOG_ORIGIN_HANDLE_READ_HEADER,
- &RQF_LLOG_ORIGIN_CONNECT
+ &RQF_LLOG_ORIGIN_CONNECT,
+ &RQF_CONNECT,
};
struct req_msg_field {
@@ -884,6 +914,11 @@ struct req_msg_field RMF_PTLRPC_BODY =
sizeof(struct ptlrpc_body), lustre_swab_ptlrpc_body, NULL);
EXPORT_SYMBOL(RMF_PTLRPC_BODY);
+struct req_msg_field RMF_CLOSE_DATA =
+ DEFINE_MSGF("data_version", 0,
+ sizeof(struct close_data), lustre_swab_close_data, NULL);
+EXPORT_SYMBOL(RMF_CLOSE_DATA);
+
struct req_msg_field RMF_OBD_STATFS =
DEFINE_MSGF("obd_statfs", 0,
sizeof(struct obd_statfs), lustre_swab_obd_statfs, NULL);
@@ -998,6 +1033,9 @@ struct req_msg_field RMF_EADATA = DEFINE_MSGF("eadata", 0, -1,
NULL, NULL);
EXPORT_SYMBOL(RMF_EADATA);
+struct req_msg_field RMF_EAVALS = DEFINE_MSGF("eavals", 0, -1, NULL, NULL);
+EXPORT_SYMBOL(RMF_EAVALS);
+
struct req_msg_field RMF_ACL =
DEFINE_MSGF("acl", RMF_F_NO_SIZE_CHECK,
LUSTRE_POSIX_ACL_MAX_SIZE, NULL, NULL);
@@ -1049,6 +1087,11 @@ struct req_msg_field RMF_RCS =
lustre_swab_generic_32s, dump_rcs);
EXPORT_SYMBOL(RMF_RCS);
+struct req_msg_field RMF_EAVALS_LENS =
+ DEFINE_MSGF("eavals_lens", RMF_F_STRUCT_ARRAY, sizeof(__u32),
+ lustre_swab_generic_32s, NULL);
+EXPORT_SYMBOL(RMF_EAVALS_LENS);
+
struct req_msg_field RMF_OBD_ID =
DEFINE_MSGF("obd_id", 0,
sizeof(obd_id), lustre_swab_ost_last_id, NULL);
@@ -1406,11 +1449,22 @@ struct req_format RQF_LDLM_INTENT_UNLINK =
ldlm_intent_unlink_client, ldlm_intent_server);
EXPORT_SYMBOL(RQF_LDLM_INTENT_UNLINK);
+struct req_format RQF_LDLM_INTENT_GETXATTR =
+ DEFINE_REQ_FMT0("LDLM_INTENT_GETXATTR",
+ ldlm_intent_getxattr_client,
+ ldlm_intent_getxattr_server);
+EXPORT_SYMBOL(RQF_LDLM_INTENT_GETXATTR);
+
struct req_format RQF_MDS_CLOSE =
DEFINE_REQ_FMT0("MDS_CLOSE",
mdt_close_client, mds_last_unlink_server);
EXPORT_SYMBOL(RQF_MDS_CLOSE);
+struct req_format RQF_MDS_RELEASE_CLOSE =
+ DEFINE_REQ_FMT0("MDS_CLOSE",
+ mdt_release_close_client, mds_last_unlink_server);
+EXPORT_SYMBOL(RQF_MDS_RELEASE_CLOSE);
+
struct req_format RQF_MDS_PIN =
DEFINE_REQ_FMT0("MDS_PIN",
mdt_body_capa, mdt_body_only);
@@ -1504,6 +1558,10 @@ struct req_format RQF_LLOG_ORIGIN_CONNECT =
DEFINE_REQ_FMT0("LLOG_ORIGIN_CONNECT", llogd_conn_body_only, empty);
EXPORT_SYMBOL(RQF_LLOG_ORIGIN_CONNECT);
+struct req_format RQF_CONNECT =
+ DEFINE_REQ_FMT0("CONNECT", obd_connect_client, obd_connect_server);
+EXPORT_SYMBOL(RQF_CONNECT);
+
struct req_format RQF_OST_CONNECT =
DEFINE_REQ_FMT0("OST_CONNECT",
obd_connect_client, obd_connect_server);
@@ -1808,7 +1866,7 @@ swabber_dumper_helper(struct req_capsule *pill,
const struct req_msg_field *field,
enum req_location loc,
int offset,
- void *value, int len, int dump, void (*swabber)( void *))
+ void *value, int len, int dump, void (*swabber)(void *))
{
void *p;
int i;
@@ -1824,8 +1882,11 @@ swabber_dumper_helper(struct req_capsule *pill,
else
do_swab = 0;
+ if (!field->rmf_dumper)
+ dump = 0;
+
if (!(field->rmf_flags & RMF_F_STRUCT_ARRAY)) {
- if (dump && field->rmf_dumper) {
+ if (dump) {
CDEBUG(D_RPCTRACE, "Dump of %sfield %s follows\n",
do_swab ? "unswabbed " : "", field->rmf_name);
field->rmf_dumper(value);
@@ -1851,7 +1912,7 @@ swabber_dumper_helper(struct req_capsule *pill,
for (p = value, i = 0, n = len / field->rmf_size;
i < n;
i++, p += field->rmf_size) {
- if (dump && field->rmf_dumper) {
+ if (dump) {
CDEBUG(D_RPCTRACE, "Dump of %sarray field %s, "
"element %d follows\n",
do_swab ? "unswabbed " : "", field->rmf_name, i);
@@ -1860,7 +1921,7 @@ swabber_dumper_helper(struct req_capsule *pill,
if (!do_swab)
continue;
swabber(p);
- if (dump && field->rmf_dumper) {
+ if (dump) {
CDEBUG(D_RPCTRACE, "Dump of swabbed array field %s, "
"element %d follows\n", field->rmf_name, i);
field->rmf_dumper(value);
@@ -1883,7 +1944,7 @@ swabber_dumper_helper(struct req_capsule *pill,
static void *__req_capsule_get(struct req_capsule *pill,
const struct req_msg_field *field,
enum req_location loc,
- void (*swabber)( void *),
+ void (*swabber)(void *),
int dump)
{
const struct req_format *fmt;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
index 379e59477ea2..ab084541fddb 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
@@ -63,7 +63,7 @@
return (-EINVAL); \
} \
mutex_unlock(&ctxt->loc_mutex); \
-} while(0)
+} while (0)
#define LLOG_CLIENT_EXIT(ctxt, imp) do { \
mutex_lock(&ctxt->loc_mutex); \
@@ -72,7 +72,7 @@
ctxt->loc_imp, imp); \
class_import_put(imp); \
mutex_unlock(&ctxt->loc_mutex); \
-} while(0)
+} while (0)
/* This is a callback from the llog_* functions.
* Assumes caller has already pushed us into the kernel context. */
@@ -302,7 +302,7 @@ static int llog_client_read_header(const struct lu_env *env,
if (hdr == NULL)
GOTO(out, rc =-EFAULT);
- memcpy(handle->lgh_hdr, hdr, sizeof (*hdr));
+ memcpy(handle->lgh_hdr, hdr, sizeof(*hdr));
handle->lgh_last_idx = handle->lgh_hdr->llh_tail.lrt_index;
/* sanity checks */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_server.c b/drivers/staging/lustre/lustre/ptlrpc/llog_server.c
deleted file mode 100644
index af9d2ac391ef..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_server.c
+++ /dev/null
@@ -1,450 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/llog_server.c
- *
- * remote api for llog - server side
- *
- * Author: Andreas Dilger <adilger@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOG
-
-
-#include <obd_class.h>
-#include <lustre_log.h>
-#include <lustre_net.h>
-#include <lustre_fsfilt.h>
-
-#if defined(LUSTRE_LOG_SERVER)
-static int llog_origin_close(const struct lu_env *env, struct llog_handle *lgh)
-{
- if (lgh->lgh_hdr != NULL && lgh->lgh_hdr->llh_flags & LLOG_F_IS_CAT)
- return llog_cat_close(env, lgh);
- else
- return llog_close(env, lgh);
-}
-
-/* Only open is supported, no new llog can be created remotely */
-int llog_origin_handle_open(struct ptlrpc_request *req)
-{
- struct obd_export *exp = req->rq_export;
- struct obd_device *obd = exp->exp_obd;
- struct obd_device *disk_obd;
- struct lvfs_run_ctxt saved;
- struct llog_handle *loghandle;
- struct llogd_body *body;
- struct llog_logid *logid = NULL;
- struct llog_ctxt *ctxt;
- char *name = NULL;
- int rc;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (body == NULL)
- return -EFAULT;
-
- if (ostid_id(&body->lgd_logid.lgl_oi) > 0)
- logid = &body->lgd_logid;
-
- if (req_capsule_field_present(&req->rq_pill, &RMF_NAME, RCL_CLIENT)) {
- name = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
- if (name == NULL)
- return -EFAULT;
- CDEBUG(D_INFO, "%s: opening log %s\n", obd->obd_name, name);
- }
-
- ctxt = llog_get_context(obd, body->lgd_ctxt_idx);
- if (ctxt == NULL) {
- CDEBUG(D_WARNING, "%s: no ctxt. group=%p idx=%d name=%s\n",
- obd->obd_name, &obd->obd_olg, body->lgd_ctxt_idx, name);
- return -ENODEV;
- }
- disk_obd = ctxt->loc_exp->exp_obd;
- push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
-
- rc = llog_open(req->rq_svc_thread->t_env, ctxt, &loghandle, logid,
- name, LLOG_OPEN_EXISTS);
- if (rc)
- GOTO(out_pop, rc);
-
- rc = req_capsule_server_pack(&req->rq_pill);
- if (rc)
- GOTO(out_close, rc = -ENOMEM);
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
- body->lgd_logid = loghandle->lgh_id;
-
-out_close:
- llog_origin_close(req->rq_svc_thread->t_env, loghandle);
-out_pop:
- pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
- llog_ctxt_put(ctxt);
- return rc;
-}
-EXPORT_SYMBOL(llog_origin_handle_open);
-
-int llog_origin_handle_destroy(struct ptlrpc_request *req)
-{
- struct obd_device *disk_obd;
- struct lvfs_run_ctxt saved;
- struct llogd_body *body;
- struct llog_logid *logid = NULL;
- struct llog_ctxt *ctxt;
- int rc;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (body == NULL)
- return -EFAULT;
-
- if (ostid_id(&body->lgd_logid.lgl_oi) > 0)
- logid = &body->lgd_logid;
-
- if (!(body->lgd_llh_flags & LLOG_F_IS_PLAIN))
- CERROR("%s: wrong llog flags %x\n",
- req->rq_export->exp_obd->obd_name, body->lgd_llh_flags);
-
- ctxt = llog_get_context(req->rq_export->exp_obd, body->lgd_ctxt_idx);
- if (ctxt == NULL)
- return -ENODEV;
-
- disk_obd = ctxt->loc_exp->exp_obd;
- push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
-
- rc = req_capsule_server_pack(&req->rq_pill);
- /* erase only if no error and logid is valid */
- if (rc == 0)
- rc = llog_erase(req->rq_svc_thread->t_env, ctxt, logid, NULL);
- pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
- llog_ctxt_put(ctxt);
- return rc;
-}
-EXPORT_SYMBOL(llog_origin_handle_destroy);
-
-int llog_origin_handle_next_block(struct ptlrpc_request *req)
-{
- struct obd_device *disk_obd;
- struct llog_handle *loghandle;
- struct llogd_body *body;
- struct llogd_body *repbody;
- struct lvfs_run_ctxt saved;
- struct llog_ctxt *ctxt;
- __u32 flags;
- void *ptr;
- int rc;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (body == NULL)
- return -EFAULT;
-
- ctxt = llog_get_context(req->rq_export->exp_obd, body->lgd_ctxt_idx);
- if (ctxt == NULL)
- return -ENODEV;
-
- disk_obd = ctxt->loc_exp->exp_obd;
- push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
-
- rc = llog_open(req->rq_svc_thread->t_env, ctxt, &loghandle,
- &body->lgd_logid, NULL, LLOG_OPEN_EXISTS);
- if (rc)
- GOTO(out_pop, rc);
-
- flags = body->lgd_llh_flags;
- rc = llog_init_handle(req->rq_svc_thread->t_env, loghandle, flags,
- NULL);
- if (rc)
- GOTO(out_close, rc);
-
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER,
- LLOG_CHUNK_SIZE);
- rc = req_capsule_server_pack(&req->rq_pill);
- if (rc)
- GOTO(out_close, rc = -ENOMEM);
-
- repbody = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
- *repbody = *body;
-
- ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA);
- rc = llog_next_block(req->rq_svc_thread->t_env, loghandle,
- &repbody->lgd_saved_index, repbody->lgd_index,
- &repbody->lgd_cur_offset, ptr, LLOG_CHUNK_SIZE);
- if (rc)
- GOTO(out_close, rc);
-out_close:
- llog_origin_close(req->rq_svc_thread->t_env, loghandle);
-out_pop:
- pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
- llog_ctxt_put(ctxt);
- return rc;
-}
-EXPORT_SYMBOL(llog_origin_handle_next_block);
-
-int llog_origin_handle_prev_block(struct ptlrpc_request *req)
-{
- struct llog_handle *loghandle;
- struct llogd_body *body;
- struct llogd_body *repbody;
- struct obd_device *disk_obd;
- struct lvfs_run_ctxt saved;
- struct llog_ctxt *ctxt;
- __u32 flags;
- void *ptr;
- int rc;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (body == NULL)
- return -EFAULT;
-
- ctxt = llog_get_context(req->rq_export->exp_obd, body->lgd_ctxt_idx);
- if (ctxt == NULL)
- return -ENODEV;
-
- disk_obd = ctxt->loc_exp->exp_obd;
- push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
-
- rc = llog_open(req->rq_svc_thread->t_env, ctxt, &loghandle,
- &body->lgd_logid, NULL, LLOG_OPEN_EXISTS);
- if (rc)
- GOTO(out_pop, rc);
-
- flags = body->lgd_llh_flags;
- rc = llog_init_handle(req->rq_svc_thread->t_env, loghandle, flags,
- NULL);
- if (rc)
- GOTO(out_close, rc);
-
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER,
- LLOG_CHUNK_SIZE);
- rc = req_capsule_server_pack(&req->rq_pill);
- if (rc)
- GOTO(out_close, rc = -ENOMEM);
-
- repbody = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
- *repbody = *body;
-
- ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA);
- rc = llog_prev_block(req->rq_svc_thread->t_env, loghandle,
- body->lgd_index, ptr, LLOG_CHUNK_SIZE);
- if (rc)
- GOTO(out_close, rc);
-
-out_close:
- llog_origin_close(req->rq_svc_thread->t_env, loghandle);
-out_pop:
- pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
- llog_ctxt_put(ctxt);
- return rc;
-}
-EXPORT_SYMBOL(llog_origin_handle_prev_block);
-
-int llog_origin_handle_read_header(struct ptlrpc_request *req)
-{
- struct obd_device *disk_obd;
- struct llog_handle *loghandle;
- struct llogd_body *body;
- struct llog_log_hdr *hdr;
- struct lvfs_run_ctxt saved;
- struct llog_ctxt *ctxt;
- __u32 flags;
- int rc;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (body == NULL)
- return -EFAULT;
-
- ctxt = llog_get_context(req->rq_export->exp_obd, body->lgd_ctxt_idx);
- if (ctxt == NULL)
- return -ENODEV;
-
- disk_obd = ctxt->loc_exp->exp_obd;
- push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
-
- rc = llog_open(req->rq_svc_thread->t_env, ctxt, &loghandle,
- &body->lgd_logid, NULL, LLOG_OPEN_EXISTS);
- if (rc)
- GOTO(out_pop, rc);
-
- /*
- * llog_init_handle() reads the llog header
- */
- flags = body->lgd_llh_flags;
- rc = llog_init_handle(req->rq_svc_thread->t_env, loghandle, flags,
- NULL);
- if (rc)
- GOTO(out_close, rc);
- flags = loghandle->lgh_hdr->llh_flags;
-
- rc = req_capsule_server_pack(&req->rq_pill);
- if (rc)
- GOTO(out_close, rc = -ENOMEM);
-
- hdr = req_capsule_server_get(&req->rq_pill, &RMF_LLOG_LOG_HDR);
- *hdr = *loghandle->lgh_hdr;
-out_close:
- llog_origin_close(req->rq_svc_thread->t_env, loghandle);
-out_pop:
- pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
- llog_ctxt_put(ctxt);
- return rc;
-}
-EXPORT_SYMBOL(llog_origin_handle_read_header);
-
-int llog_origin_handle_close(struct ptlrpc_request *req)
-{
- /* Nothing to do */
- return 0;
-}
-EXPORT_SYMBOL(llog_origin_handle_close);
-
-int llog_origin_handle_cancel(struct ptlrpc_request *req)
-{
- int num_cookies, rc = 0, err, i, failed = 0;
- struct obd_device *disk_obd;
- struct llog_cookie *logcookies;
- struct llog_ctxt *ctxt = NULL;
- struct lvfs_run_ctxt saved;
- struct llog_handle *cathandle;
- struct inode *inode;
- void *handle;
-
- logcookies = req_capsule_client_get(&req->rq_pill, &RMF_LOGCOOKIES);
- num_cookies = req_capsule_get_size(&req->rq_pill, &RMF_LOGCOOKIES,
- RCL_CLIENT) / sizeof(*logcookies);
- if (logcookies == NULL || num_cookies == 0) {
- DEBUG_REQ(D_HA, req, "No llog cookies sent");
- return -EFAULT;
- }
-
- ctxt = llog_get_context(req->rq_export->exp_obd,
- logcookies->lgc_subsys);
- if (ctxt == NULL)
- return -ENODEV;
-
- disk_obd = ctxt->loc_exp->exp_obd;
- push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
- for (i = 0; i < num_cookies; i++, logcookies++) {
- cathandle = ctxt->loc_handle;
- LASSERT(cathandle != NULL);
- inode = cathandle->lgh_file->f_dentry->d_inode;
-
- handle = fsfilt_start_log(disk_obd, inode,
- FSFILT_OP_CANCEL_UNLINK, NULL, 1);
- if (IS_ERR(handle)) {
- CERROR("fsfilt_start_log() failed: %ld\n",
- PTR_ERR(handle));
- GOTO(pop_ctxt, rc = PTR_ERR(handle));
- }
-
- rc = llog_cat_cancel_records(req->rq_svc_thread->t_env,
- cathandle, 1, logcookies);
-
- /*
- * Do not raise -ENOENT errors for resent rpcs. This rec already
- * might be killed.
- */
- if (rc == -ENOENT &&
- (lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT)) {
- /*
- * Do not change this message, reply-single.sh test_59b
- * expects to find this in log.
- */
- CDEBUG(D_RPCTRACE, "RESENT cancel req %p - ignored\n",
- req);
- rc = 0;
- } else if (rc == 0) {
- CDEBUG(D_RPCTRACE, "Canceled %d llog-records\n",
- num_cookies);
- }
-
- err = fsfilt_commit(disk_obd, inode, handle, 0);
- if (err) {
- CERROR("Error committing transaction: %d\n", err);
- if (!rc)
- rc = err;
- failed++;
- GOTO(pop_ctxt, rc);
- } else if (rc)
- failed++;
- }
- GOTO(pop_ctxt, rc);
-pop_ctxt:
- pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
- if (rc)
- CERROR("Cancel %d of %d llog-records failed: %d\n",
- failed, num_cookies, rc);
-
- llog_ctxt_put(ctxt);
- return rc;
-}
-EXPORT_SYMBOL(llog_origin_handle_cancel);
-
-#else /* !__KERNEL__ */
-int llog_origin_handle_open(struct ptlrpc_request *req)
-{
- LBUG();
- return 0;
-}
-
-int llog_origin_handle_destroy(struct ptlrpc_request *req)
-{
- LBUG();
- return 0;
-}
-
-int llog_origin_handle_next_block(struct ptlrpc_request *req)
-{
- LBUG();
- return 0;
-}
-int llog_origin_handle_prev_block(struct ptlrpc_request *req)
-{
- LBUG();
- return 0;
-}
-int llog_origin_handle_read_header(struct ptlrpc_request *req)
-{
- LBUG();
- return 0;
-}
-int llog_origin_handle_close(struct ptlrpc_request *req)
-{
- LBUG();
- return 0;
-}
-int llog_origin_handle_cancel(struct ptlrpc_request *req)
-{
- LBUG();
- return 0;
-}
-#endif
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index bea44a3d4a2f..1be978609c59 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -46,8 +46,8 @@
struct ll_rpc_opcode {
- __u32 opcode;
- const char *opname;
+ __u32 opcode;
+ const char *opname;
} ll_rpc_opcode_table[LUSTRE_MAX_OPCODES] = {
{ OST_REPLY, "ost_reply" },
{ OST_GETATTR, "ost_getattr" },
@@ -114,10 +114,10 @@ struct ll_rpc_opcode {
{ MGS_SET_INFO, "mgs_set_info" },
{ MGS_CONFIG_READ, "mgs_config_read" },
{ OBD_PING, "obd_ping" },
- { OBD_LOG_CANCEL, "llog_origin_handle_cancel" },
+ { OBD_LOG_CANCEL, "llog_cancel" },
{ OBD_QC_CALLBACK, "obd_quota_callback" },
{ OBD_IDX_READ, "dt_index_read" },
- { LLOG_ORIGIN_HANDLE_CREATE, "llog_origin_handle_create" },
+ { LLOG_ORIGIN_HANDLE_CREATE, "llog_origin_handle_open" },
{ LLOG_ORIGIN_HANDLE_NEXT_BLOCK, "llog_origin_handle_next_block" },
{ LLOG_ORIGIN_HANDLE_READ_HEADER,"llog_origin_handle_read_header" },
{ LLOG_ORIGIN_HANDLE_WRITE_REC, "llog_origin_handle_write_rec" },
@@ -137,8 +137,8 @@ struct ll_rpc_opcode {
};
struct ll_eopcode {
- __u32 opcode;
- const char *opname;
+ __u32 opcode;
+ const char *opname;
} ll_eopcode_table[EXTRA_LAST_OPC] = {
{ LDLM_GLIMPSE_ENQUEUE, "ldlm_glimpse_enqueue" },
{ LDLM_PLAIN_ENQUEUE, "ldlm_plain_enqueue" },
@@ -221,7 +221,7 @@ void ptlrpc_lprocfs_register(struct proc_dir_entry *root, char *dir,
for (i = 0; i < EXTRA_LAST_OPC; i++) {
char *units;
- switch(i) {
+ switch (i) {
case BRW_WRITE_BYTES:
case BRW_READ_BYTES:
units = "bytes";
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index a0e009717a5a..3c6bf23415f9 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -47,17 +47,17 @@
* over \a conn connection to portal \a portal.
* Returns 0 on success or error code.
*/
-static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
- lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
- struct ptlrpc_connection *conn, int portal, __u64 xid,
- unsigned int offset)
+static int ptl_send_buf(lnet_handle_md_t *mdh, void *base, int len,
+ lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
+ struct ptlrpc_connection *conn, int portal, __u64 xid,
+ unsigned int offset)
{
int rc;
lnet_md_t md;
- LASSERT (portal != 0);
- LASSERT (conn != NULL);
- CDEBUG (D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
+ LASSERT(portal != 0);
+ LASSERT(conn != NULL);
+ CDEBUG(D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
md.start = base;
md.length = len;
md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
@@ -66,23 +66,24 @@ static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
md.eq_handle = ptlrpc_eq_h;
if (unlikely(ack == LNET_ACK_REQ &&
- OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK, OBD_FAIL_ONCE))){
+ OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK,
+ OBD_FAIL_ONCE))) {
/* don't ask for the ack to simulate failing client */
ack = LNET_NOACK_REQ;
}
- rc = LNetMDBind (md, LNET_UNLINK, mdh);
+ rc = LNetMDBind(md, LNET_UNLINK, mdh);
if (unlikely(rc != 0)) {
- CERROR ("LNetMDBind failed: %d\n", rc);
- LASSERT (rc == -ENOMEM);
+ CERROR("LNetMDBind failed: %d\n", rc);
+ LASSERT(rc == -ENOMEM);
return -ENOMEM;
}
CDEBUG(D_NET, "Sending %d bytes to portal %d, xid "LPD64", offset %u\n",
len, portal, xid, offset);
- rc = LNetPut (conn->c_self, *mdh, ack,
- conn->c_peer, portal, xid, offset, 0);
+ rc = LNetPut(conn->c_self, *mdh, ack,
+ conn->c_peer, portal, xid, offset, 0);
if (unlikely(rc != 0)) {
int rc2;
/* We're going to get an UNLINK event when I unlink below,
@@ -179,7 +180,7 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req)
LNET_UNLINK, LNET_INS_AFTER, &me_h);
if (rc != 0) {
CERROR("%s: LNetMEAttach failed x"LPU64"/%d: rc = %d\n",
- desc->bd_export->exp_obd->obd_name, xid,
+ desc->bd_import->imp_obd->obd_name, xid,
posted_md, rc);
break;
}
@@ -189,7 +190,7 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req)
&desc->bd_mds[posted_md]);
if (rc != 0) {
CERROR("%s: LNetMDAttach failed x"LPU64"/%d: rc = %d\n",
- desc->bd_export->exp_obd->obd_name, xid,
+ desc->bd_import->imp_obd->obd_name, xid,
posted_md, rc);
rc2 = LNetMEUnlink(me_h);
LASSERT(rc2 == 0);
@@ -219,7 +220,7 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req)
/* Holler if peer manages to touch buffers before he knows the xid */
if (desc->bd_md_count != total_md)
CWARN("%s: Peer %s touched %d buffers while I registered\n",
- desc->bd_export->exp_obd->obd_name, libcfs_id2str(peer),
+ desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer),
total_md - desc->bd_md_count);
spin_unlock(&desc->bd_lock);
@@ -363,14 +364,14 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
* request, or a saved copy if this is a req saved in
* target_queue_final_reply().
*/
- LASSERT (req->rq_no_reply == 0);
- LASSERT (req->rq_reqbuf != NULL);
- LASSERT (rs != NULL);
- LASSERT ((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
- LASSERT (req->rq_repmsg != NULL);
- LASSERT (req->rq_repmsg == rs->rs_msg);
- LASSERT (rs->rs_cb_id.cbid_fn == reply_out_callback);
- LASSERT (rs->rs_cb_id.cbid_arg == rs);
+ LASSERT(req->rq_no_reply == 0);
+ LASSERT(req->rq_reqbuf != NULL);
+ LASSERT(rs != NULL);
+ LASSERT((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
+ LASSERT(req->rq_repmsg != NULL);
+ LASSERT(req->rq_repmsg == rs->rs_msg);
+ LASSERT(rs->rs_cb_id.cbid_fn == reply_out_callback);
+ LASSERT(rs->rs_cb_id.cbid_arg == rs);
/* There may be no rq_export during failover */
@@ -423,12 +424,12 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
req->rq_sent = cfs_time_current_sec();
- rc = ptl_send_buf (&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
- (rs->rs_difficult && !rs->rs_no_ack) ?
- LNET_ACK_REQ : LNET_NOACK_REQ,
- &rs->rs_cb_id, conn,
- ptlrpc_req2svc(req)->srv_rep_portal,
- req->rq_xid, req->rq_reply_off);
+ rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
+ (rs->rs_difficult && !rs->rs_no_ack) ?
+ LNET_ACK_REQ : LNET_NOACK_REQ,
+ &rs->rs_cb_id, conn,
+ ptlrpc_req2svc(req)->srv_rep_portal,
+ req->rq_xid, req->rq_reply_off);
out:
if (unlikely(rc != 0))
ptlrpc_req_drop_rs(req);
@@ -437,7 +438,7 @@ out:
}
EXPORT_SYMBOL(ptlrpc_send_reply);
-int ptlrpc_reply (struct ptlrpc_request *req)
+int ptlrpc_reply(struct ptlrpc_request *req)
{
if (req->rq_no_reply)
return 0;
@@ -537,13 +538,13 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
/* bulk register should be done after wrap_request() */
if (request->rq_bulk != NULL) {
- rc = ptlrpc_register_bulk (request);
+ rc = ptlrpc_register_bulk(request);
if (rc != 0)
GOTO(out, rc);
}
if (!noreply) {
- LASSERT (request->rq_replen != 0);
+ LASSERT(request->rq_replen != 0);
if (request->rq_repbuf == NULL) {
LASSERT(request->rq_repdata == NULL);
LASSERT(request->rq_repmsg == NULL);
@@ -566,7 +567,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
LNET_UNLINK, LNET_INS_AFTER, &reply_me_h);
if (rc != 0) {
CERROR("LNetMEAttach failed: %d\n", rc);
- LASSERT (rc == -ENOMEM);
+ LASSERT(rc == -ENOMEM);
GOTO(cleanup_bulk, rc = -ENOMEM);
}
}
@@ -604,7 +605,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
&request->rq_reply_md_h);
if (rc != 0) {
CERROR("LNetMDAttach failed: %d\n", rc);
- LASSERT (rc == -ENOMEM);
+ LASSERT(rc == -ENOMEM);
spin_lock(&request->rq_lock);
/* ...but the MD attach didn't succeed... */
request->rq_receiving_reply = 0;
@@ -655,7 +656,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
* nobody apart from the PUT's target has the right nid+XID to
* access the reply buffer. */
rc2 = LNetMEUnlink(reply_me_h);
- LASSERT (rc2 == 0);
+ LASSERT(rc2 == 0);
/* UNLINKED callback called synchronously */
LASSERT(!request->rq_receiving_reply);
@@ -714,10 +715,10 @@ int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
if (rc == 0)
return (0);
- CERROR("LNetMDAttach failed: %d; \n", rc);
- LASSERT (rc == -ENOMEM);
- rc = LNetMEUnlink (me_h);
- LASSERT (rc == 0);
+ CERROR("LNetMDAttach failed: %d;\n", rc);
+ LASSERT(rc == -ENOMEM);
+ rc = LNetMEUnlink(me_h);
+ LASSERT(rc == 0);
rqbd->rqbd_refcount = 0;
return (-ENOMEM);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index cd2611a3b53d..464479c0f00b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -274,8 +274,8 @@ do { \
spin_unlock(&ptlrpc_rs_debug_lock); \
} while (0)
#else
-# define PTLRPC_RS_DEBUG_LRU_ADD(rs) do {} while(0)
-# define PTLRPC_RS_DEBUG_LRU_DEL(rs) do {} while(0)
+# define PTLRPC_RS_DEBUG_LRU_ADD(rs) do {} while (0)
+# define PTLRPC_RS_DEBUG_LRU_DEL(rs) do {} while (0)
#endif
struct ptlrpc_reply_state *
@@ -507,14 +507,14 @@ void lustre_free_reply_state(struct ptlrpc_reply_state *rs)
{
PTLRPC_RS_DEBUG_LRU_DEL(rs);
- LASSERT (atomic_read(&rs->rs_refcount) == 0);
- LASSERT (!rs->rs_difficult || rs->rs_handled);
- LASSERT (!rs->rs_on_net);
- LASSERT (!rs->rs_scheduled);
- LASSERT (rs->rs_export == NULL);
- LASSERT (rs->rs_nlocks == 0);
- LASSERT (list_empty(&rs->rs_exp_list));
- LASSERT (list_empty(&rs->rs_obd_list));
+ LASSERT(atomic_read(&rs->rs_refcount) == 0);
+ LASSERT(!rs->rs_difficult || rs->rs_handled);
+ LASSERT(!rs->rs_on_net);
+ LASSERT(!rs->rs_scheduled);
+ LASSERT(rs->rs_export == NULL);
+ LASSERT(rs->rs_nlocks == 0);
+ LASSERT(list_empty(&rs->rs_exp_list));
+ LASSERT(list_empty(&rs->rs_obd_list));
sptlrpc_svc_free_rs(rs);
}
@@ -548,8 +548,8 @@ static int lustre_unpack_msg_v2(struct lustre_msg_v2 *m, int len)
required_len = lustre_msg_hdr_size_v2(m->lm_bufcount);
if (len < required_len) {
/* didn't receive all the buffer lengths */
- CERROR ("message length %d too small for %d buflens\n",
- len, m->lm_bufcount);
+ CERROR("message length %d too small for %d buflens\n",
+ len, m->lm_bufcount);
return -EINVAL;
}
@@ -636,8 +636,8 @@ static inline int lustre_unpack_ptlrpc_body_v2(struct ptlrpc_request *req,
}
if ((pb->pb_version & ~LUSTRE_VERSION_MASK) != PTLRPC_MSG_VERSION) {
- CERROR("wrong lustre_msg version %08x\n", pb->pb_version);
- return -EINVAL;
+ CERROR("wrong lustre_msg version %08x\n", pb->pb_version);
+ return -EINVAL;
}
if (!inout)
@@ -749,7 +749,7 @@ char *lustre_msg_string(struct lustre_msg *m, int index, int max_len)
}
if (str == NULL) {
- CERROR ("can't unpack string in msg %p buffer[%d]\n", m, index);
+ CERROR("can't unpack string in msg %p buffer[%d]\n", m, index);
return NULL;
}
@@ -1653,25 +1653,25 @@ EXPORT_SYMBOL(do_set_info_async);
*/
void lustre_swab_ptlrpc_body(struct ptlrpc_body *b)
{
- __swab32s (&b->pb_type);
- __swab32s (&b->pb_version);
- __swab32s (&b->pb_opc);
- __swab32s (&b->pb_status);
- __swab64s (&b->pb_last_xid);
- __swab64s (&b->pb_last_seen);
- __swab64s (&b->pb_last_committed);
- __swab64s (&b->pb_transno);
- __swab32s (&b->pb_flags);
- __swab32s (&b->pb_op_flags);
- __swab32s (&b->pb_conn_cnt);
- __swab32s (&b->pb_timeout);
- __swab32s (&b->pb_service_time);
- __swab32s (&b->pb_limit);
- __swab64s (&b->pb_slv);
- __swab64s (&b->pb_pre_versions[0]);
- __swab64s (&b->pb_pre_versions[1]);
- __swab64s (&b->pb_pre_versions[2]);
- __swab64s (&b->pb_pre_versions[3]);
+ __swab32s(&b->pb_type);
+ __swab32s(&b->pb_version);
+ __swab32s(&b->pb_opc);
+ __swab32s(&b->pb_status);
+ __swab64s(&b->pb_last_xid);
+ __swab64s(&b->pb_last_seen);
+ __swab64s(&b->pb_last_committed);
+ __swab64s(&b->pb_transno);
+ __swab32s(&b->pb_flags);
+ __swab32s(&b->pb_op_flags);
+ __swab32s(&b->pb_conn_cnt);
+ __swab32s(&b->pb_timeout);
+ __swab32s(&b->pb_service_time);
+ __swab32s(&b->pb_limit);
+ __swab64s(&b->pb_slv);
+ __swab64s(&b->pb_pre_versions[0]);
+ __swab64s(&b->pb_pre_versions[1]);
+ __swab64s(&b->pb_pre_versions[2]);
+ __swab64s(&b->pb_pre_versions[3]);
CLASSERT(offsetof(typeof(*b), pb_padding) != 0);
/* While we need to maintain compatibility between
* clients and servers without ptlrpc_body_v2 (< 2.3)
@@ -1723,33 +1723,33 @@ void lustre_swab_connect(struct obd_connect_data *ocd)
CLASSERT(offsetof(typeof(*ocd), paddingF) != 0);
}
-void lustre_swab_obdo (struct obdo *o)
+void lustre_swab_obdo(struct obdo *o)
{
- __swab64s (&o->o_valid);
+ __swab64s(&o->o_valid);
lustre_swab_ost_id(&o->o_oi);
- __swab64s (&o->o_parent_seq);
- __swab64s (&o->o_size);
- __swab64s (&o->o_mtime);
- __swab64s (&o->o_atime);
- __swab64s (&o->o_ctime);
- __swab64s (&o->o_blocks);
- __swab64s (&o->o_grant);
- __swab32s (&o->o_blksize);
- __swab32s (&o->o_mode);
- __swab32s (&o->o_uid);
- __swab32s (&o->o_gid);
- __swab32s (&o->o_flags);
- __swab32s (&o->o_nlink);
- __swab32s (&o->o_parent_oid);
- __swab32s (&o->o_misc);
- __swab64s (&o->o_ioepoch);
- __swab32s (&o->o_stripe_idx);
- __swab32s (&o->o_parent_ver);
+ __swab64s(&o->o_parent_seq);
+ __swab64s(&o->o_size);
+ __swab64s(&o->o_mtime);
+ __swab64s(&o->o_atime);
+ __swab64s(&o->o_ctime);
+ __swab64s(&o->o_blocks);
+ __swab64s(&o->o_grant);
+ __swab32s(&o->o_blksize);
+ __swab32s(&o->o_mode);
+ __swab32s(&o->o_uid);
+ __swab32s(&o->o_gid);
+ __swab32s(&o->o_flags);
+ __swab32s(&o->o_nlink);
+ __swab32s(&o->o_parent_oid);
+ __swab32s(&o->o_misc);
+ __swab64s(&o->o_ioepoch);
+ __swab32s(&o->o_stripe_idx);
+ __swab32s(&o->o_parent_ver);
/* o_handle is opaque */
/* o_lcookie is swabbed elsewhere */
- __swab32s (&o->o_uid_h);
- __swab32s (&o->o_gid_h);
- __swab64s (&o->o_data_version);
+ __swab32s(&o->o_uid_h);
+ __swab32s(&o->o_gid_h);
+ __swab64s(&o->o_data_version);
CLASSERT(offsetof(typeof(*o), o_padding_4) != 0);
CLASSERT(offsetof(typeof(*o), o_padding_5) != 0);
CLASSERT(offsetof(typeof(*o), o_padding_6) != 0);
@@ -1757,19 +1757,19 @@ void lustre_swab_obdo (struct obdo *o)
}
EXPORT_SYMBOL(lustre_swab_obdo);
-void lustre_swab_obd_statfs (struct obd_statfs *os)
+void lustre_swab_obd_statfs(struct obd_statfs *os)
{
- __swab64s (&os->os_type);
- __swab64s (&os->os_blocks);
- __swab64s (&os->os_bfree);
- __swab64s (&os->os_bavail);
- __swab64s (&os->os_files);
- __swab64s (&os->os_ffree);
+ __swab64s(&os->os_type);
+ __swab64s(&os->os_blocks);
+ __swab64s(&os->os_bfree);
+ __swab64s(&os->os_bavail);
+ __swab64s(&os->os_files);
+ __swab64s(&os->os_ffree);
/* no need to swab os_fsid */
- __swab32s (&os->os_bsize);
- __swab32s (&os->os_namelen);
- __swab64s (&os->os_maxbytes);
- __swab32s (&os->os_state);
+ __swab32s(&os->os_bsize);
+ __swab32s(&os->os_namelen);
+ __swab64s(&os->os_maxbytes);
+ __swab32s(&os->os_state);
CLASSERT(offsetof(typeof(*os), os_fprecreated) != 0);
CLASSERT(offsetof(typeof(*os), os_spare2) != 0);
CLASSERT(offsetof(typeof(*os), os_spare3) != 0);
@@ -1790,17 +1790,17 @@ void lustre_swab_obd_ioobj(struct obd_ioobj *ioo)
}
EXPORT_SYMBOL(lustre_swab_obd_ioobj);
-void lustre_swab_niobuf_remote (struct niobuf_remote *nbr)
+void lustre_swab_niobuf_remote(struct niobuf_remote *nbr)
{
- __swab64s (&nbr->offset);
- __swab32s (&nbr->len);
- __swab32s (&nbr->flags);
+ __swab64s(&nbr->offset);
+ __swab32s(&nbr->len);
+ __swab32s(&nbr->flags);
}
EXPORT_SYMBOL(lustre_swab_niobuf_remote);
-void lustre_swab_ost_body (struct ost_body *b)
+void lustre_swab_ost_body(struct ost_body *b)
{
- lustre_swab_obdo (&b->oa);
+ lustre_swab_obdo(&b->oa);
}
EXPORT_SYMBOL(lustre_swab_ost_body);
@@ -1861,45 +1861,45 @@ void lustre_swab_lquota_lvb(struct lquota_lvb *lvb)
}
EXPORT_SYMBOL(lustre_swab_lquota_lvb);
-void lustre_swab_mdt_body (struct mdt_body *b)
+void lustre_swab_mdt_body(struct mdt_body *b)
{
- lustre_swab_lu_fid (&b->fid1);
- lustre_swab_lu_fid (&b->fid2);
+ lustre_swab_lu_fid(&b->fid1);
+ lustre_swab_lu_fid(&b->fid2);
/* handle is opaque */
- __swab64s (&b->valid);
- __swab64s (&b->size);
- __swab64s (&b->mtime);
- __swab64s (&b->atime);
- __swab64s (&b->ctime);
- __swab64s (&b->blocks);
- __swab64s (&b->ioepoch);
- CLASSERT(offsetof(typeof(*b), unused1) != 0);
- __swab32s (&b->fsuid);
- __swab32s (&b->fsgid);
- __swab32s (&b->capability);
- __swab32s (&b->mode);
- __swab32s (&b->uid);
- __swab32s (&b->gid);
- __swab32s (&b->flags);
- __swab32s (&b->rdev);
- __swab32s (&b->nlink);
+ __swab64s(&b->valid);
+ __swab64s(&b->size);
+ __swab64s(&b->mtime);
+ __swab64s(&b->atime);
+ __swab64s(&b->ctime);
+ __swab64s(&b->blocks);
+ __swab64s(&b->ioepoch);
+ __swab64s(&b->t_state);
+ __swab32s(&b->fsuid);
+ __swab32s(&b->fsgid);
+ __swab32s(&b->capability);
+ __swab32s(&b->mode);
+ __swab32s(&b->uid);
+ __swab32s(&b->gid);
+ __swab32s(&b->flags);
+ __swab32s(&b->rdev);
+ __swab32s(&b->nlink);
CLASSERT(offsetof(typeof(*b), unused2) != 0);
- __swab32s (&b->suppgid);
- __swab32s (&b->eadatasize);
- __swab32s (&b->aclsize);
- __swab32s (&b->max_mdsize);
- __swab32s (&b->max_cookiesize);
- __swab32s (&b->uid_h);
- __swab32s (&b->gid_h);
+ __swab32s(&b->suppgid);
+ __swab32s(&b->eadatasize);
+ __swab32s(&b->aclsize);
+ __swab32s(&b->max_mdsize);
+ __swab32s(&b->max_cookiesize);
+ __swab32s(&b->uid_h);
+ __swab32s(&b->gid_h);
CLASSERT(offsetof(typeof(*b), padding_5) != 0);
}
EXPORT_SYMBOL(lustre_swab_mdt_body);
-void lustre_swab_mdt_ioepoch (struct mdt_ioepoch *b)
+void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b)
{
/* handle is opaque */
- __swab64s (&b->ioepoch);
- __swab32s (&b->flags);
+ __swab64s(&b->ioepoch);
+ __swab32s(&b->flags);
CLASSERT(offsetof(typeof(*b), padding) != 0);
}
EXPORT_SYMBOL(lustre_swab_mdt_ioepoch);
@@ -1957,49 +1957,49 @@ void lustre_swab_mgs_config_res(struct mgs_config_res *body)
}
EXPORT_SYMBOL(lustre_swab_mgs_config_res);
-static void lustre_swab_obd_dqinfo (struct obd_dqinfo *i)
+static void lustre_swab_obd_dqinfo(struct obd_dqinfo *i)
{
- __swab64s (&i->dqi_bgrace);
- __swab64s (&i->dqi_igrace);
- __swab32s (&i->dqi_flags);
- __swab32s (&i->dqi_valid);
+ __swab64s(&i->dqi_bgrace);
+ __swab64s(&i->dqi_igrace);
+ __swab32s(&i->dqi_flags);
+ __swab32s(&i->dqi_valid);
}
-static void lustre_swab_obd_dqblk (struct obd_dqblk *b)
+static void lustre_swab_obd_dqblk(struct obd_dqblk *b)
{
- __swab64s (&b->dqb_ihardlimit);
- __swab64s (&b->dqb_isoftlimit);
- __swab64s (&b->dqb_curinodes);
- __swab64s (&b->dqb_bhardlimit);
- __swab64s (&b->dqb_bsoftlimit);
- __swab64s (&b->dqb_curspace);
- __swab64s (&b->dqb_btime);
- __swab64s (&b->dqb_itime);
- __swab32s (&b->dqb_valid);
+ __swab64s(&b->dqb_ihardlimit);
+ __swab64s(&b->dqb_isoftlimit);
+ __swab64s(&b->dqb_curinodes);
+ __swab64s(&b->dqb_bhardlimit);
+ __swab64s(&b->dqb_bsoftlimit);
+ __swab64s(&b->dqb_curspace);
+ __swab64s(&b->dqb_btime);
+ __swab64s(&b->dqb_itime);
+ __swab32s(&b->dqb_valid);
CLASSERT(offsetof(typeof(*b), dqb_padding) != 0);
}
-void lustre_swab_obd_quotactl (struct obd_quotactl *q)
+void lustre_swab_obd_quotactl(struct obd_quotactl *q)
{
- __swab32s (&q->qc_cmd);
- __swab32s (&q->qc_type);
- __swab32s (&q->qc_id);
- __swab32s (&q->qc_stat);
- lustre_swab_obd_dqinfo (&q->qc_dqinfo);
- lustre_swab_obd_dqblk (&q->qc_dqblk);
+ __swab32s(&q->qc_cmd);
+ __swab32s(&q->qc_type);
+ __swab32s(&q->qc_id);
+ __swab32s(&q->qc_stat);
+ lustre_swab_obd_dqinfo(&q->qc_dqinfo);
+ lustre_swab_obd_dqblk(&q->qc_dqblk);
}
EXPORT_SYMBOL(lustre_swab_obd_quotactl);
-void lustre_swab_mdt_remote_perm (struct mdt_remote_perm *p)
+void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p)
{
- __swab32s (&p->rp_uid);
- __swab32s (&p->rp_gid);
- __swab32s (&p->rp_fsuid);
- __swab32s (&p->rp_fsuid_h);
- __swab32s (&p->rp_fsgid);
- __swab32s (&p->rp_fsgid_h);
- __swab32s (&p->rp_access_perm);
- __swab32s (&p->rp_padding);
+ __swab32s(&p->rp_uid);
+ __swab32s(&p->rp_gid);
+ __swab32s(&p->rp_fsuid);
+ __swab32s(&p->rp_fsuid_h);
+ __swab32s(&p->rp_fsgid);
+ __swab32s(&p->rp_fsgid_h);
+ __swab32s(&p->rp_access_perm);
+ __swab32s(&p->rp_padding);
};
EXPORT_SYMBOL(lustre_swab_mdt_remote_perm);
@@ -2089,31 +2089,31 @@ void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr)
};
EXPORT_SYMBOL(lustre_swab_mdt_rec_reint);
-void lustre_swab_lov_desc (struct lov_desc *ld)
+void lustre_swab_lov_desc(struct lov_desc *ld)
{
- __swab32s (&ld->ld_tgt_count);
- __swab32s (&ld->ld_active_tgt_count);
- __swab32s (&ld->ld_default_stripe_count);
- __swab32s (&ld->ld_pattern);
- __swab64s (&ld->ld_default_stripe_size);
- __swab64s (&ld->ld_default_stripe_offset);
- __swab32s (&ld->ld_qos_maxage);
+ __swab32s(&ld->ld_tgt_count);
+ __swab32s(&ld->ld_active_tgt_count);
+ __swab32s(&ld->ld_default_stripe_count);
+ __swab32s(&ld->ld_pattern);
+ __swab64s(&ld->ld_default_stripe_size);
+ __swab64s(&ld->ld_default_stripe_offset);
+ __swab32s(&ld->ld_qos_maxage);
/* uuid endian insensitive */
}
EXPORT_SYMBOL(lustre_swab_lov_desc);
-void lustre_swab_lmv_desc (struct lmv_desc *ld)
+void lustre_swab_lmv_desc(struct lmv_desc *ld)
{
- __swab32s (&ld->ld_tgt_count);
- __swab32s (&ld->ld_active_tgt_count);
- __swab32s (&ld->ld_default_stripe_count);
- __swab32s (&ld->ld_pattern);
- __swab64s (&ld->ld_default_hash_size);
- __swab32s (&ld->ld_qos_maxage);
+ __swab32s(&ld->ld_tgt_count);
+ __swab32s(&ld->ld_active_tgt_count);
+ __swab32s(&ld->ld_default_stripe_count);
+ __swab32s(&ld->ld_pattern);
+ __swab64s(&ld->ld_default_hash_size);
+ __swab32s(&ld->ld_qos_maxage);
/* uuid endian insensitive */
}
-void lustre_swab_lmv_stripe_md (struct lmv_stripe_md *mea)
+void lustre_swab_lmv_stripe_md(struct lmv_stripe_md *mea)
{
__swab32s(&mea->mea_magic);
__swab32s(&mea->mea_count);
@@ -2142,7 +2142,7 @@ void lustre_swab_lmv_user_md(struct lmv_user_md *lum)
}
EXPORT_SYMBOL(lustre_swab_lmv_user_md);
-static void print_lum (struct lov_user_md *lum)
+static void print_lum(struct lov_user_md *lum)
{
CDEBUG(D_OTHER, "lov_user_md %p:\n", lum);
CDEBUG(D_OTHER, "\tlmm_magic: %#x\n", lum->lmm_magic);
@@ -2212,16 +2212,16 @@ void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
}
EXPORT_SYMBOL(lustre_swab_lov_user_md_objects);
-void lustre_swab_ldlm_res_id (struct ldlm_res_id *id)
+void lustre_swab_ldlm_res_id(struct ldlm_res_id *id)
{
int i;
for (i = 0; i < RES_NAME_SIZE; i++)
- __swab64s (&id->name[i]);
+ __swab64s(&id->name[i]);
}
EXPORT_SYMBOL(lustre_swab_ldlm_res_id);
-void lustre_swab_ldlm_policy_data (ldlm_wire_policy_data_t *d)
+void lustre_swab_ldlm_policy_data(ldlm_wire_policy_data_t *d)
{
/* the lock data is a union and the first two fields are always an
* extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
@@ -2234,46 +2234,46 @@ void lustre_swab_ldlm_policy_data (ldlm_wire_policy_data_t *d)
}
EXPORT_SYMBOL(lustre_swab_ldlm_policy_data);
-void lustre_swab_ldlm_intent (struct ldlm_intent *i)
+void lustre_swab_ldlm_intent(struct ldlm_intent *i)
{
- __swab64s (&i->opc);
+ __swab64s(&i->opc);
}
EXPORT_SYMBOL(lustre_swab_ldlm_intent);
-void lustre_swab_ldlm_resource_desc (struct ldlm_resource_desc *r)
+void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r)
{
- __swab32s (&r->lr_type);
+ __swab32s(&r->lr_type);
CLASSERT(offsetof(typeof(*r), lr_padding) != 0);
- lustre_swab_ldlm_res_id (&r->lr_name);
+ lustre_swab_ldlm_res_id(&r->lr_name);
}
EXPORT_SYMBOL(lustre_swab_ldlm_resource_desc);
-void lustre_swab_ldlm_lock_desc (struct ldlm_lock_desc *l)
+void lustre_swab_ldlm_lock_desc(struct ldlm_lock_desc *l)
{
- lustre_swab_ldlm_resource_desc (&l->l_resource);
- __swab32s (&l->l_req_mode);
- __swab32s (&l->l_granted_mode);
- lustre_swab_ldlm_policy_data (&l->l_policy_data);
+ lustre_swab_ldlm_resource_desc(&l->l_resource);
+ __swab32s(&l->l_req_mode);
+ __swab32s(&l->l_granted_mode);
+ lustre_swab_ldlm_policy_data(&l->l_policy_data);
}
EXPORT_SYMBOL(lustre_swab_ldlm_lock_desc);
-void lustre_swab_ldlm_request (struct ldlm_request *rq)
+void lustre_swab_ldlm_request(struct ldlm_request *rq)
{
- __swab32s (&rq->lock_flags);
- lustre_swab_ldlm_lock_desc (&rq->lock_desc);
- __swab32s (&rq->lock_count);
+ __swab32s(&rq->lock_flags);
+ lustre_swab_ldlm_lock_desc(&rq->lock_desc);
+ __swab32s(&rq->lock_count);
/* lock_handle[] opaque */
}
EXPORT_SYMBOL(lustre_swab_ldlm_request);
-void lustre_swab_ldlm_reply (struct ldlm_reply *r)
+void lustre_swab_ldlm_reply(struct ldlm_reply *r)
{
- __swab32s (&r->lock_flags);
+ __swab32s(&r->lock_flags);
CLASSERT(offsetof(typeof(*r), lock_padding) != 0);
- lustre_swab_ldlm_lock_desc (&r->lock_desc);
+ lustre_swab_ldlm_lock_desc(&r->lock_desc);
/* lock_handle opaque */
- __swab64s (&r->lock_policy_res1);
- __swab64s (&r->lock_policy_res2);
+ __swab64s(&r->lock_policy_res1);
+ __swab64s(&r->lock_policy_res2);
}
EXPORT_SYMBOL(lustre_swab_ldlm_reply);
@@ -2409,7 +2409,7 @@ static inline int rep_ptlrpc_body_swabbed(struct ptlrpc_request *req)
void _debug_req(struct ptlrpc_request *req,
struct libcfs_debug_msg_data *msgdata,
- const char *fmt, ... )
+ const char *fmt, ...)
{
int req_ok = req->rq_reqmsg != NULL;
int rep_ok = req->rq_repmsg != NULL;
@@ -2457,20 +2457,20 @@ EXPORT_SYMBOL(_debug_req);
void lustre_swab_lustre_capa(struct lustre_capa *c)
{
lustre_swab_lu_fid(&c->lc_fid);
- __swab64s (&c->lc_opc);
- __swab64s (&c->lc_uid);
- __swab64s (&c->lc_gid);
- __swab32s (&c->lc_flags);
- __swab32s (&c->lc_keyid);
- __swab32s (&c->lc_timeout);
- __swab32s (&c->lc_expiry);
+ __swab64s(&c->lc_opc);
+ __swab64s(&c->lc_uid);
+ __swab64s(&c->lc_gid);
+ __swab32s(&c->lc_flags);
+ __swab32s(&c->lc_keyid);
+ __swab32s(&c->lc_timeout);
+ __swab32s(&c->lc_expiry);
}
EXPORT_SYMBOL(lustre_swab_lustre_capa);
void lustre_swab_lustre_capa_key(struct lustre_capa_key *k)
{
- __swab64s (&k->lk_seq);
- __swab32s (&k->lk_keyid);
+ __swab64s(&k->lk_seq);
+ __swab32s(&k->lk_keyid);
CLASSERT(offsetof(typeof(*k), lk_padding) != 0);
}
EXPORT_SYMBOL(lustre_swab_lustre_capa_key);
@@ -2565,3 +2565,10 @@ void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl)
__swab64s(&msl->msl_flags);
}
EXPORT_SYMBOL(lustre_swab_swap_layouts);
+
+void lustre_swab_close_data(struct close_data *cd)
+{
+ lustre_swab_lu_fid(&cd->cd_fid);
+ __swab64s(&cd->cd_data_version);
+}
+EXPORT_SYMBOL(lustre_swab_close_data);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index 4d340f4a2198..6dff502ce23e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -45,7 +45,8 @@
#include "ptlrpc_internal.h"
static int suppress_pings;
-CFS_MODULE_PARM(suppress_pings, "i", int, 0644, "Suppress pings");
+module_param(suppress_pings, int, 0644);
+MODULE_PARM_DESC(suppress_pings, "Suppress pings");
struct mutex pinger_mutex;
static LIST_HEAD(pinger_imports);
@@ -140,9 +141,6 @@ static inline int ptlrpc_next_reconnect(struct obd_import *imp)
return cfs_time_shift(obd_timeout);
}
-static atomic_t suspend_timeouts = ATOMIC_INIT(0);
-static cfs_time_t suspend_wakeup_time = 0;
-
cfs_duration_t pinger_check_timeout(cfs_time_t time)
{
struct timeout_item *item;
@@ -162,67 +160,6 @@ cfs_duration_t pinger_check_timeout(cfs_time_t time)
cfs_time_current());
}
-static wait_queue_head_t suspend_timeouts_waitq;
-
-cfs_time_t ptlrpc_suspend_wakeup_time(void)
-{
- return suspend_wakeup_time;
-}
-
-void ptlrpc_deactivate_timeouts(struct obd_import *imp)
-{
- /*XXX: disabled for now, will be replaced by adaptive timeouts */
-#if 0
- if (imp->imp_no_timeout)
- return;
- imp->imp_no_timeout = 1;
- atomic_inc(&suspend_timeouts);
- CDEBUG(D_HA|D_WARNING, "deactivate timeouts %u\n",
- atomic_read(&suspend_timeouts));
-#endif
-}
-
-void ptlrpc_activate_timeouts(struct obd_import *imp)
-{
- /*XXX: disabled for now, will be replaced by adaptive timeouts */
-#if 0
- if (!imp->imp_no_timeout)
- return;
- imp->imp_no_timeout = 0;
- LASSERT(atomic_read(&suspend_timeouts) > 0);
- if (atomic_dec_and_test(&suspend_timeouts)) {
- suspend_wakeup_time = cfs_time_current();
- wake_up(&suspend_timeouts_waitq);
- }
- CDEBUG(D_HA|D_WARNING, "activate timeouts %u\n",
- atomic_read(&suspend_timeouts));
-#endif
-}
-
-int ptlrpc_check_suspend(void)
-{
- if (atomic_read(&suspend_timeouts))
- return 1;
- return 0;
-}
-
-int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req)
-{
- struct l_wait_info lwi;
-
- if (atomic_read(&suspend_timeouts)) {
- DEBUG_REQ(D_NET, req, "-- suspend %d regular timeout",
- atomic_read(&suspend_timeouts));
- lwi = LWI_INTR(NULL, NULL);
- l_wait_event(suspend_timeouts_waitq,
- atomic_read(&suspend_timeouts) == 0, &lwi);
- DEBUG_REQ(D_NET, req, "-- recharge regular timeout");
- return 1;
- }
- return 0;
-}
-
-
static bool ir_up;
void ptlrpc_pinger_ir_up(void)
@@ -377,7 +314,6 @@ int ptlrpc_start_pinger(void)
return -EALREADY;
init_waitqueue_head(&pinger_thread.t_ctl_waitq);
- init_waitqueue_head(&suspend_timeouts_waitq);
strcpy(pinger_thread.t_name, "ll_ping");
@@ -576,7 +512,7 @@ int ptlrpc_del_timeout_client(struct list_head *obd_list,
break;
}
}
- LASSERTF(ti != NULL, "ti is NULL ! \n");
+ LASSERTF(ti != NULL, "ti is NULL !\n");
if (list_empty(&ti->ti_obd_list)) {
list_del(&ti->ti_chain);
OBD_FREE_PTR(ti);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
index ab363477151d..e3b5a920bca2 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
@@ -77,13 +77,13 @@ void ptlrpc_lprocfs_register_service(struct proc_dir_entry *proc_entry,
struct ptlrpc_service *svc);
void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc);
void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount);
-void ptlrpc_lprocfs_do_request_stat (struct ptlrpc_request *req,
+void ptlrpc_lprocfs_do_request_stat(struct ptlrpc_request *req,
long q_usec, long work_usec);
#else
-#define ptlrpc_lprocfs_register_service(params...) do{}while(0)
-#define ptlrpc_lprocfs_unregister_service(params...) do{}while(0)
-#define ptlrpc_lprocfs_rpc_sent(params...) do{}while(0)
-#define ptlrpc_lprocfs_do_request_stat(params...) do{}while(0)
+#define ptlrpc_lprocfs_register_service(params...) do {} while (0)
+#define ptlrpc_lprocfs_unregister_service(params...) do {} while (0)
+#define ptlrpc_lprocfs_rpc_sent(params...) do {} while (0)
+#define ptlrpc_lprocfs_do_request_stat(params...) do {} while (0)
#endif /* LPROCFS */
/* NRS */
@@ -259,8 +259,14 @@ void sptlrpc_enc_pool_fini(void);
int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v);
/* sec_lproc.c */
+#ifdef LPROCFS
int sptlrpc_lproc_init(void);
void sptlrpc_lproc_fini(void);
+#else
+static inline int sptlrpc_lproc_init(void)
+{ return 0; }
+static inline void sptlrpc_lproc_fini(void) {}
+#endif
/* sec_gc.c */
int sptlrpc_gc_init(void);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
index 419e634854df..0efd35887a15 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
@@ -112,7 +112,7 @@ __init int ptlrpc_init(void)
return 0;
cleanup:
- switch(cleanup_phase) {
+ switch (cleanup_phase) {
case 8:
ptlrpc_nrs_fini();
case 7:
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index 89c9be96f454..2d26fd543d46 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -77,12 +77,12 @@ struct ptlrpcd {
};
static int max_ptlrpcds;
-CFS_MODULE_PARM(max_ptlrpcds, "i", int, 0644,
- "Max ptlrpcd thread count to be started.");
+module_param(max_ptlrpcds, int, 0644);
+MODULE_PARM_DESC(max_ptlrpcds, "Max ptlrpcd thread count to be started.");
static int ptlrpcd_bind_policy = PDB_POLICY_PAIR;
-CFS_MODULE_PARM(ptlrpcd_bind_policy, "i", int, 0644,
- "Ptlrpcd threads binding mode.");
+module_param(ptlrpcd_bind_policy, int, 0644);
+MODULE_PARM_DESC(ptlrpcd_bind_policy, "Ptlrpcd threads binding mode.");
static struct ptlrpcd *ptlrpcds;
struct mutex ptlrpcd_mutex;
@@ -600,7 +600,6 @@ static int ptlrpcd_bind(int index, int max)
int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
{
int rc;
- int env = 0;
/*
* Do not allow start second thread for one pc.
@@ -619,6 +618,7 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
pc->pc_set = ptlrpc_prep_set();
if (pc->pc_set == NULL)
GOTO(out, rc = -ENOMEM);
+
/*
* So far only "client" ptlrpcd uses an environment. In the future,
* ptlrpcd thread (or a thread-set) has to be given an argument,
@@ -626,40 +626,40 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
*/
rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
if (rc != 0)
- GOTO(out, rc);
+ GOTO(out_set, rc);
- env = 1;
{
struct task_struct *task;
-
if (index >= 0) {
rc = ptlrpcd_bind(index, max);
if (rc < 0)
- GOTO(out, rc);
+ GOTO(out_env, rc);
}
task = kthread_run(ptlrpcd, pc, "%s", pc->pc_name);
if (IS_ERR(task))
- GOTO(out, rc = PTR_ERR(task));
+ GOTO(out_env, rc = PTR_ERR(task));
- rc = 0;
wait_for_completion(&pc->pc_starting);
}
-out:
- if (rc) {
- if (pc->pc_set != NULL) {
- struct ptlrpc_request_set *set = pc->pc_set;
-
- spin_lock(&pc->pc_lock);
- pc->pc_set = NULL;
- spin_unlock(&pc->pc_lock);
- ptlrpc_set_destroy(set);
- }
- if (env != 0)
- lu_context_fini(&pc->pc_env.le_ctx);
- clear_bit(LIOD_BIND, &pc->pc_flags);
- clear_bit(LIOD_START, &pc->pc_flags);
+ return 0;
+
+out_env:
+ lu_context_fini(&pc->pc_env.le_ctx);
+
+out_set:
+ if (pc->pc_set != NULL) {
+ struct ptlrpc_request_set *set = pc->pc_set;
+
+ spin_lock(&pc->pc_lock);
+ pc->pc_set = NULL;
+ spin_unlock(&pc->pc_lock);
+ ptlrpc_set_destroy(set);
}
+ clear_bit(LIOD_BIND, &pc->pc_flags);
+
+out:
+ clear_bit(LIOD_START, &pc->pc_flags);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 21de868da522..590fa8df8b7f 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -44,19 +44,19 @@
/* The following are visible and mutable through /sys/module/ptlrpc */
int test_req_buffer_pressure = 0;
-CFS_MODULE_PARM(test_req_buffer_pressure, "i", int, 0444,
- "set non-zero to put pressure on request buffer pools");
-CFS_MODULE_PARM(at_min, "i", int, 0644,
- "Adaptive timeout minimum (sec)");
-CFS_MODULE_PARM(at_max, "i", int, 0644,
- "Adaptive timeout maximum (sec)");
-CFS_MODULE_PARM(at_history, "i", int, 0644,
- "Adaptive timeouts remember the slowest event that took place "
- "within this period (sec)");
-CFS_MODULE_PARM(at_early_margin, "i", int, 0644,
- "How soon before an RPC deadline to send an early reply");
-CFS_MODULE_PARM(at_extra, "i", int, 0644,
- "How much extra time to give with each early reply");
+module_param(test_req_buffer_pressure, int, 0444);
+MODULE_PARM_DESC(test_req_buffer_pressure, "set non-zero to put pressure on request buffer pools");
+module_param(at_min, int, 0644);
+MODULE_PARM_DESC(at_min, "Adaptive timeout minimum (sec)");
+module_param(at_max, int, 0644);
+MODULE_PARM_DESC(at_max, "Adaptive timeout maximum (sec)");
+module_param(at_history, int, 0644);
+MODULE_PARM_DESC(at_history,
+ "Adaptive timeouts remember the slowest event that took place within this period (sec)");
+module_param(at_early_margin, int, 0644);
+MODULE_PARM_DESC(at_early_margin, "How soon before an RPC deadline to send an early reply");
+module_param(at_extra, int, 0644);
+MODULE_PARM_DESC(at_extra, "How much extra time to give with each early reply");
/* forward ref */
@@ -386,7 +386,7 @@ ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs)
{
LASSERT(spin_is_locked(&rs->rs_svcpt->scp_rep_lock));
LASSERT(spin_is_locked(&rs->rs_lock));
- LASSERT (rs->rs_difficult);
+ LASSERT(rs->rs_difficult);
rs->rs_scheduled_ever = 1; /* flag any notification attempt */
if (rs->rs_scheduled) { /* being set up or already notified */
@@ -412,7 +412,7 @@ void ptlrpc_commit_replies(struct obd_export *exp)
spin_lock(&exp->exp_uncommitted_replies_lock);
list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
rs_obd_list) {
- LASSERT (rs->rs_difficult);
+ LASSERT(rs->rs_difficult);
/* VBR: per-export last_committed */
LASSERT(rs->rs_export);
if (rs->rs_transno <= exp->exp_last_committed) {
@@ -796,7 +796,7 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf,
LASSERT(rc == 0);
mutex_lock(&ptlrpc_all_services_mutex);
- list_add (&service->srv_list, &ptlrpc_all_services);
+ list_add(&service->srv_list, &ptlrpc_all_services);
mutex_unlock(&ptlrpc_all_services_mutex);
if (proc_entry != NULL)
@@ -1115,8 +1115,10 @@ static int ptlrpc_check_req(struct ptlrpc_request *req)
}
if (unlikely(req->rq_export->exp_obd &&
req->rq_export->exp_obd->obd_fail)) {
- /* Failing over, don't handle any more reqs, send
- error response instead. */
+ /*
+ * Failing over, don't handle any more reqs, send
+ * error response instead.
+ */
CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n",
req, req->rq_export->exp_obd->obd_name);
rc = -ENODEV;
@@ -1268,7 +1270,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
return -ETIMEDOUT;
}
- if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0){
+ if (!(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, "
"but no AT support");
return -ENOSYS;
@@ -1777,9 +1779,9 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
if (rc) {
- CERROR ("error unpacking ptlrpc body: ptl %d from %s x"
- LPU64"\n", svc->srv_req_portal,
- libcfs_id2str(req->rq_peer), req->rq_xid);
+ CERROR("error unpacking ptlrpc body: ptl %d from %s x"
+ LPU64"\n", svc->srv_req_portal,
+ libcfs_id2str(req->rq_peer), req->rq_xid);
goto err_req;
}
@@ -1798,7 +1800,7 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
goto err_req;
}
- switch(lustre_msg_get_opc(req->rq_reqmsg)) {
+ switch (lustre_msg_get_opc(req->rq_reqmsg)) {
case MDS_WRITEPAGE:
case OST_WRITE:
req->rq_bulk_write = 1;
@@ -1895,7 +1897,7 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
- if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
libcfs_debug_dumplog();
do_gettimeofday(&work_start);
@@ -1967,13 +1969,14 @@ put_conn:
lu_context_fini(&request->rq_session);
if (unlikely(cfs_time_current_sec() > request->rq_deadline)) {
- DEBUG_REQ(D_WARNING, request, "Request took longer "
- "than estimated ("CFS_DURATION_T":"CFS_DURATION_T"s);"
- " client may timeout.",
- cfs_time_sub(request->rq_deadline,
- request->rq_arrival_time.tv_sec),
- cfs_time_sub(cfs_time_current_sec(),
- request->rq_deadline));
+ DEBUG_REQ(D_WARNING, request,
+ "Request took longer than estimated ("
+ CFS_DURATION_T":"CFS_DURATION_T
+ "s); client may timeout.",
+ cfs_time_sub(request->rq_deadline,
+ request->rq_arrival_time.tv_sec),
+ cfs_time_sub(cfs_time_current_sec(),
+ request->rq_deadline));
}
do_gettimeofday(&work_end);
@@ -2037,13 +2040,13 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
exp = rs->rs_export;
- LASSERT (rs->rs_difficult);
- LASSERT (rs->rs_scheduled);
- LASSERT (list_empty(&rs->rs_list));
+ LASSERT(rs->rs_difficult);
+ LASSERT(rs->rs_scheduled);
+ LASSERT(list_empty(&rs->rs_list));
spin_lock(&exp->exp_lock);
/* Noop if removed already */
- list_del_init (&rs->rs_exp_list);
+ list_del_init(&rs->rs_exp_list);
spin_unlock(&exp->exp_lock);
/* The disk commit callback holds exp_uncommitted_replies_lock while it
@@ -2113,9 +2116,9 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
/* Off the net */
spin_unlock(&rs->rs_lock);
- class_export_put (exp);
+ class_export_put(exp);
rs->rs_export = NULL;
- ptlrpc_rs_decref (rs);
+ ptlrpc_rs_decref(rs);
if (atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
svc->srv_is_stopping)
wake_up_all(&svcpt->scp_waitq);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wirehdr.c b/drivers/staging/lustre/lustre/ptlrpc/wirehdr.c
deleted file mode 100644
index 93bc40b422ee..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/wirehdr.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-# ifdef CONFIG_FS_POSIX_ACL
-# include <linux/fs.h>
-# include <linux/posix_acl_xattr.h>
-# endif
-
-#include <obd_support.h>
-#include <obd_class.h>
-#include <lustre_net.h>
-#include <lustre_disk.h>
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index 9890bd9cfb93..3aa445952024 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -36,10 +36,8 @@
#define DEBUG_SUBSYSTEM S_RPC
-# ifdef CONFIG_FS_POSIX_ACL
-# include <linux/fs.h>
-# include <linux/posix_acl_xattr.h>
-# endif
+#include <linux/fs.h>
+#include <linux/posix_acl_xattr.h>
#include <obd_support.h>
#include <obd_class.h>
@@ -49,9 +47,10 @@ void lustre_assert_wire_constants(void)
{
/* Wire protocol assertions generated by 'wirecheck'
* (make -C lustre/utils newwiretest)
- * running on Linux deva 2.6.32.279.lustre #5 SMP Tue Apr 9 22:52:17 CST 2013 x86_64 x86_64 x
- * with gcc version 4.4.4 20100726 (Red Hat 4.4.4-13) (GCC) */
-
+ * running on Linux centos6-bis 2.6.32-358.0.1.el6-head
+ * #3 SMP Wed Apr 17 17:37:43 CEST 2013
+ * with gcc version 4.4.6 20110731 (Red Hat 4.4.6-3) (GCC)
+ */
/* Constants... */
LASSERTF(PTL_RPC_MSG_REQUEST == 4711, "found %lld\n",
@@ -432,6 +431,10 @@ void lustre_assert_wire_constants(void)
(unsigned)LMAC_HSM);
LASSERTF(LMAC_SOM == 0x00000002UL, "found 0x%.8xUL\n",
(unsigned)LMAC_SOM);
+ LASSERTF(LMAC_NOT_IN_OI == 0x00000004UL, "found 0x%.8xUL\n",
+ (unsigned)LMAC_NOT_IN_OI);
+ LASSERTF(LMAC_FID_ON_OST == 0x00000008UL, "found 0x%.8xUL\n",
+ (unsigned)LMAC_FID_ON_OST);
LASSERTF(OBJ_CREATE == 1, "found %lld\n",
(long long)OBJ_CREATE);
LASSERTF(OBJ_DESTROY == 2, "found %lld\n",
@@ -1335,6 +1338,8 @@ void lustre_assert_wire_constants(void)
OBD_MD_REINT);
LASSERTF(OBD_MD_MEA == (0x0000000400000000ULL), "found 0x%.16llxULL\n",
OBD_MD_MEA);
+ LASSERTF(OBD_MD_TSTATE == (0x0000000800000000ULL),
+ "found 0x%.16llxULL\n", OBD_MD_TSTATE);
LASSERTF(OBD_MD_FLXATTR == (0x0000001000000000ULL), "found 0x%.16llxULL\n",
OBD_MD_FLXATTR);
LASSERTF(OBD_MD_FLXATTRLS == (0x0000002000000000ULL), "found 0x%.16llxULL\n",
@@ -1918,10 +1923,11 @@ void lustre_assert_wire_constants(void)
(long long)(int)offsetof(struct mdt_body, blocks));
LASSERTF((int)sizeof(((struct mdt_body *)0)->blocks) == 8, "found %lld\n",
(long long)(int)sizeof(((struct mdt_body *)0)->blocks));
- LASSERTF((int)offsetof(struct mdt_body, unused1) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, unused1));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->unused1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->unused1));
+ LASSERTF((int)offsetof(struct mdt_body, t_state) == 96, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, t_state));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->t_state) == 8,
+ "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->t_state));
LASSERTF((int)offsetof(struct mdt_body, fsuid) == 104, "found %lld\n",
(long long)(int)offsetof(struct mdt_body, fsuid));
LASSERTF((int)sizeof(((struct mdt_body *)0)->fsuid) == 4, "found %lld\n",
@@ -4416,6 +4422,64 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct hsm_user_request *)0)->hur_user_item) == 0, "found %lld\n",
(long long)(int)sizeof(((struct hsm_user_request *)0)->hur_user_item));
+ /* Checks for struct hsm_user_import */
+ LASSERTF(sizeof(struct hsm_user_import) == 48, "found %lld\n",
+ (long long)sizeof(struct hsm_user_import));
+ LASSERTF(offsetof(struct hsm_user_import, hui_size) == 0,
+ "found %lld\n",
+ (long long)offsetof(struct hsm_user_import, hui_size));
+ LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_size) == 8,
+ "found %lld\n",
+ (long long)sizeof(((struct hsm_user_import *)0)->hui_size));
+ LASSERTF(offsetof(struct hsm_user_import, hui_uid) == 32,
+ "found %lld\n",
+ (long long)offsetof(struct hsm_user_import, hui_uid));
+ LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_uid) == 4,
+ "found %lld\n",
+ (long long)sizeof(((struct hsm_user_import *)0)->hui_uid));
+ LASSERTF(offsetof(struct hsm_user_import, hui_gid) == 36,
+ "found %lld\n",
+ (long long)offsetof(struct hsm_user_import, hui_gid));
+ LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_gid) == 4,
+ "found %lld\n",
+ (long long)sizeof(((struct hsm_user_import *)0)->hui_gid));
+ LASSERTF(offsetof(struct hsm_user_import, hui_mode) == 40,
+ "found %lld\n",
+ (long long)offsetof(struct hsm_user_import, hui_mode));
+ LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_mode) == 4,
+ "found %lld\n",
+ (long long)sizeof(((struct hsm_user_import *)0)->hui_mode));
+ LASSERTF(offsetof(struct hsm_user_import, hui_atime) == 8,
+ "found %lld\n",
+ (long long)offsetof(struct hsm_user_import, hui_atime));
+ LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_atime) == 8,
+ "found %lld\n",
+ (long long)sizeof(((struct hsm_user_import *)0)->hui_atime));
+ LASSERTF(offsetof(struct hsm_user_import, hui_atime_ns) == 24,
+ "found %lld\n",
+ (long long)(int)offsetof(struct hsm_user_import, hui_atime_ns));
+ LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_atime_ns) == 4,
+ "found %lld\n",
+ (long long)sizeof(((struct hsm_user_import *)0)->hui_atime_ns));
+ LASSERTF(offsetof(struct hsm_user_import, hui_mtime) == 16,
+ "found %lld\n",
+ (long long)offsetof(struct hsm_user_import, hui_mtime));
+ LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_mtime) == 8,
+ "found %lld\n",
+ (long long)sizeof(((struct hsm_user_import *)0)->hui_mtime));
+ LASSERTF(offsetof(struct hsm_user_import, hui_mtime_ns) == 28,
+ "found %lld\n",
+ (long long)offsetof(struct hsm_user_import, hui_mtime_ns));
+ LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_mtime_ns) == 4,
+ "found %lld\n",
+ (long long)sizeof(((struct hsm_user_import *)0)->hui_mtime_ns));
+ LASSERTF(offsetof(struct hsm_user_import, hui_archive_id) == 44,
+ "found %lld\n",
+ (long long)offsetof(struct hsm_user_import, hui_archive_id));
+ LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_archive_id) == 4,
+ "found %lld\n",
+ (long long)sizeof(((struct hsm_user_import *)0)->hui_archive_id));
+
/* Checks for struct update_buf */
LASSERTF((int)sizeof(struct update_buf) == 8, "found %lld\n",
(long long)(int)sizeof(struct update_buf));
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 46f1e619cbd8..22b0c9d6f046 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -21,6 +21,8 @@ if STAGING_MEDIA
# Please keep them in alphabetic order
source "drivers/staging/media/as102/Kconfig"
+source "drivers/staging/media/bcm2048/Kconfig"
+
source "drivers/staging/media/cxd2099/Kconfig"
source "drivers/staging/media/davinci_vpfe/Kconfig"
@@ -31,8 +33,14 @@ source "drivers/staging/media/go7007/Kconfig"
source "drivers/staging/media/msi3101/Kconfig"
+source "drivers/staging/media/omap24xx/Kconfig"
+
+source "drivers/staging/media/sn9c102/Kconfig"
+
source "drivers/staging/media/solo6x10/Kconfig"
+source "drivers/staging/media/omap4iss/Kconfig"
+
# Keep LIRC at the end, as it has sub-menus
source "drivers/staging/media/lirc/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index eb7f30b1ccd8..bedc62aaede6 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_DVB_AS102) += as102/
+obj-$(CONFIG_I2C_BCM2048) += bcm2048/
obj-$(CONFIG_DVB_CXD2099) += cxd2099/
obj-$(CONFIG_LIRC_STAGING) += lirc/
obj-$(CONFIG_SOLO6X10) += solo6x10/
@@ -6,3 +7,7 @@ obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
obj-$(CONFIG_USB_MSI3101) += msi3101/
obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
+obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
+obj-$(CONFIG_USB_SN9C102) += sn9c102/
+obj-$(CONFIG_VIDEO_OMAP2) += omap24xx/
+obj-$(CONFIG_VIDEO_TCM825X) += omap24xx/
diff --git a/drivers/staging/media/as102/as102_drv.c b/drivers/staging/media/as102/as102_drv.c
index ac92eaf6c74b..09d64cd67502 100644
--- a/drivers/staging/media/as102/as102_drv.c
+++ b/drivers/staging/media/as102/as102_drv.c
@@ -19,7 +19,6 @@
*/
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -112,8 +111,6 @@ static int as10x_pid_filter(struct as102_dev_t *dev,
struct as10x_bus_adapter_t *bus_adap = &dev->bus_adap;
int ret = -EFAULT;
- ENTER();
-
if (mutex_lock_interruptible(&dev->bus_adap.lock)) {
dprintk(debug, "mutex_lock_interruptible(lock) failed !\n");
return -EBUSY;
@@ -134,15 +131,14 @@ static int as10x_pid_filter(struct as102_dev_t *dev,
filter.pid = pid;
ret = as10x_cmd_add_PID_filter(bus_adap, &filter);
- dprintk(debug, "ADD_PID_FILTER([%02d -> %02d], 0x%04x) ret = %d\n",
+ dprintk(debug,
+ "ADD_PID_FILTER([%02d -> %02d], 0x%04x) ret = %d\n",
index, filter.idx, filter.pid, ret);
break;
}
}
mutex_unlock(&dev->bus_adap.lock);
-
- LEAVE();
return ret;
}
@@ -152,8 +148,6 @@ static int as102_dvb_dmx_start_feed(struct dvb_demux_feed *dvbdmxfeed)
struct dvb_demux *demux = dvbdmxfeed->demux;
struct as102_dev_t *as102_dev = demux->priv;
- ENTER();
-
if (mutex_lock_interruptible(&as102_dev->sem))
return -ERESTARTSYS;
@@ -165,7 +159,6 @@ static int as102_dvb_dmx_start_feed(struct dvb_demux_feed *dvbdmxfeed)
ret = as102_start_stream(as102_dev);
mutex_unlock(&as102_dev->sem);
- LEAVE();
return ret;
}
@@ -174,8 +167,6 @@ static int as102_dvb_dmx_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
struct dvb_demux *demux = dvbdmxfeed->demux;
struct as102_dev_t *as102_dev = demux->priv;
- ENTER();
-
if (mutex_lock_interruptible(&as102_dev->sem))
return -ERESTARTSYS;
@@ -187,7 +178,6 @@ static int as102_dvb_dmx_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
dvbdmxfeed->pid, 0);
mutex_unlock(&as102_dev->sem);
- LEAVE();
return 0;
}
diff --git a/drivers/staging/media/as102/as102_drv.h b/drivers/staging/media/as102/as102_drv.h
index b0e5a23bd532..a06837dcc05d 100644
--- a/drivers/staging/media/as102/as102_drv.h
+++ b/drivers/staging/media/as102/as102_drv.h
@@ -38,14 +38,6 @@ extern int elna_enable;
printk(args); \
} } while (0)
-#ifdef TRACE
-#define ENTER() pr_debug(">> enter %s\n", __func__)
-#define LEAVE() pr_debug("<< leave %s\n", __func__)
-#else
-#define ENTER()
-#define LEAVE()
-#endif
-
#define AS102_DEVICE_MAJOR 192
#define AS102_USB_BUF_SIZE 512
diff --git a/drivers/staging/media/as102/as102_fe.c b/drivers/staging/media/as102/as102_fe.c
index 9ce8c9daa2e7..b686b7617cdc 100644
--- a/drivers/staging/media/as102/as102_fe.c
+++ b/drivers/staging/media/as102/as102_fe.c
@@ -34,8 +34,6 @@ static int as102_fe_set_frontend(struct dvb_frontend *fe)
struct as102_dev_t *dev;
struct as10x_tune_args tune_args = { 0 };
- ENTER();
-
dev = (struct as102_dev_t *) fe->tuner_priv;
if (dev == NULL)
return -ENODEV;
@@ -52,7 +50,6 @@ static int as102_fe_set_frontend(struct dvb_frontend *fe)
mutex_unlock(&dev->bus_adap.lock);
- LEAVE();
return (ret < 0) ? -EINVAL : 0;
}
@@ -63,8 +60,6 @@ static int as102_fe_get_frontend(struct dvb_frontend *fe)
struct as102_dev_t *dev;
struct as10x_tps tps = { 0 };
- ENTER();
-
dev = (struct as102_dev_t *) fe->tuner_priv;
if (dev == NULL)
return -EINVAL;
@@ -80,13 +75,11 @@ static int as102_fe_get_frontend(struct dvb_frontend *fe)
mutex_unlock(&dev->bus_adap.lock);
- LEAVE();
return (ret < 0) ? -EINVAL : 0;
}
static int as102_fe_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *settings) {
- ENTER();
#if 0
dprintk(debug, "step_size = %d\n", settings->step_size);
@@ -97,7 +90,6 @@ static int as102_fe_get_tune_settings(struct dvb_frontend *fe,
settings->min_delay_ms = 1000;
- LEAVE();
return 0;
}
@@ -108,8 +100,6 @@ static int as102_fe_read_status(struct dvb_frontend *fe, fe_status_t *status)
struct as102_dev_t *dev;
struct as10x_tune_status tstate = { 0 };
- ENTER();
-
dev = (struct as102_dev_t *) fe->tuner_priv;
if (dev == NULL)
return -ENODEV;
@@ -151,8 +141,8 @@ static int as102_fe_read_status(struct dvb_frontend *fe, fe_status_t *status)
if (as10x_cmd_get_demod_stats(&dev->bus_adap,
(struct as10x_demod_stats *) &dev->demod_stats) < 0) {
memset(&dev->demod_stats, 0, sizeof(dev->demod_stats));
- dprintk(debug, "as10x_cmd_get_demod_stats failed "
- "(probably not tuned)\n");
+ dprintk(debug,
+ "as10x_cmd_get_demod_stats failed (probably not tuned)\n");
} else {
dprintk(debug,
"demod status: fc: 0x%08x, bad fc: 0x%08x, "
@@ -168,7 +158,6 @@ static int as102_fe_read_status(struct dvb_frontend *fe, fe_status_t *status)
out:
mutex_unlock(&dev->bus_adap.lock);
- LEAVE();
return ret;
}
@@ -183,15 +172,12 @@ static int as102_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct as102_dev_t *dev;
- ENTER();
-
dev = (struct as102_dev_t *) fe->tuner_priv;
if (dev == NULL)
return -ENODEV;
*snr = dev->demod_stats.mer;
- LEAVE();
return 0;
}
@@ -199,15 +185,12 @@ static int as102_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct as102_dev_t *dev;
- ENTER();
-
dev = (struct as102_dev_t *) fe->tuner_priv;
if (dev == NULL)
return -ENODEV;
*ber = dev->ber;
- LEAVE();
return 0;
}
@@ -216,15 +199,12 @@ static int as102_fe_read_signal_strength(struct dvb_frontend *fe,
{
struct as102_dev_t *dev;
- ENTER();
-
dev = (struct as102_dev_t *) fe->tuner_priv;
if (dev == NULL)
return -ENODEV;
*strength = (((0xffff * 400) * dev->signal_strength + 41000) * 2);
- LEAVE();
return 0;
}
@@ -232,8 +212,6 @@ static int as102_fe_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct as102_dev_t *dev;
- ENTER();
-
dev = (struct as102_dev_t *) fe->tuner_priv;
if (dev == NULL)
return -ENODEV;
@@ -243,7 +221,6 @@ static int as102_fe_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
else
*ucblocks = 0;
- LEAVE();
return 0;
}
@@ -252,8 +229,6 @@ static int as102_fe_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
struct as102_dev_t *dev;
int ret;
- ENTER();
-
dev = (struct as102_dev_t *) fe->tuner_priv;
if (dev == NULL)
return -ENODEV;
@@ -263,7 +238,8 @@ static int as102_fe_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
if (acquire) {
if (elna_enable)
- as10x_cmd_set_context(&dev->bus_adap, CONTEXT_LNA, dev->elna_cfg);
+ as10x_cmd_set_context(&dev->bus_adap,
+ CONTEXT_LNA, dev->elna_cfg);
ret = as10x_cmd_turn_on(&dev->bus_adap);
} else {
@@ -272,7 +248,6 @@ static int as102_fe_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
mutex_unlock(&dev->bus_adap.lock);
- LEAVE();
return ret;
}
@@ -581,8 +556,8 @@ static void as102_fe_copy_tune_parameters(struct as10x_tune_args *tune_args,
as102_fe_get_code_rate(params->code_rate_LP);
}
- dprintk(debug, "\thierarchy: 0x%02x "
- "selected: %s code_rate_%s: 0x%02x\n",
+ dprintk(debug,
+ "\thierarchy: 0x%02x selected: %s code_rate_%s: 0x%02x\n",
tune_args->hierarchy,
tune_args->hier_select == HIER_HIGH_PRIORITY ?
"HP" : "LP",
diff --git a/drivers/staging/media/as102/as102_fw.c b/drivers/staging/media/as102/as102_fw.c
index b9670ee41b4e..f33f752c0aad 100644
--- a/drivers/staging/media/as102/as102_fw.c
+++ b/drivers/staging/media/as102/as102_fw.c
@@ -26,10 +26,10 @@
#include "as102_drv.h"
#include "as102_fw.h"
-char as102_st_fw1[] = "as102_data1_st.hex";
-char as102_st_fw2[] = "as102_data2_st.hex";
-char as102_dt_fw1[] = "as102_data1_dt.hex";
-char as102_dt_fw2[] = "as102_data2_dt.hex";
+static const char as102_st_fw1[] = "as102_data1_st.hex";
+static const char as102_st_fw2[] = "as102_data2_st.hex";
+static const char as102_dt_fw1[] = "as102_data1_dt.hex";
+static const char as102_dt_fw2[] = "as102_data2_dt.hex";
static unsigned char atohx(unsigned char *dst, char *src)
{
@@ -109,8 +109,6 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
int total_read_bytes = 0, errno = 0;
unsigned char addr_has_changed = 0;
- ENTER();
-
for (total_read_bytes = 0; total_read_bytes < firmware->size; ) {
int read_bytes = 0, data_len = 0;
@@ -158,7 +156,6 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
}
}
error:
- LEAVE();
return (errno == 0) ? total_read_bytes : errno;
}
@@ -167,11 +164,9 @@ int as102_fw_upload(struct as10x_bus_adapter_t *bus_adap)
int errno = -EFAULT;
const struct firmware *firmware = NULL;
unsigned char *cmd_buf = NULL;
- char *fw1, *fw2;
+ const char *fw1, *fw2;
struct usb_device *dev = bus_adap->usb_dev;
- ENTER();
-
/* select fw file to upload */
if (dual_tuner) {
fw1 = as102_dt_fw1;
@@ -233,6 +228,5 @@ error:
kfree(cmd_buf);
release_firmware(firmware);
- LEAVE();
return errno;
}
diff --git a/drivers/staging/media/as102/as102_usb_drv.c b/drivers/staging/media/as102/as102_usb_drv.c
index 9f275f020150..e4a69454ebeb 100644
--- a/drivers/staging/media/as102/as102_usb_drv.c
+++ b/drivers/staging/media/as102/as102_usb_drv.c
@@ -92,7 +92,6 @@ static int as102_usb_xfer_cmd(struct as10x_bus_adapter_t *bus_adap,
unsigned char *recv_buf, int recv_buf_len)
{
int ret = 0;
- ENTER();
if (send_buf != NULL) {
ret = usb_control_msg(bus_adap->usb_dev,
@@ -140,7 +139,6 @@ static int as102_usb_xfer_cmd(struct as10x_bus_adapter_t *bus_adap,
#endif
}
- LEAVE();
return ret;
}
@@ -191,7 +189,7 @@ static int as102_read_ep2(struct as10x_bus_adapter_t *bus_adap,
return ret ? ret : actual_len;
}
-struct as102_priv_ops_t as102_priv_ops = {
+static struct as102_priv_ops_t as102_priv_ops = {
.upload_fw_pkt = as102_send_ep1,
.xfer_cmd = as102_usb_xfer_cmd,
.as102_read_ep2 = as102_read_ep2,
@@ -240,8 +238,6 @@ static void as102_free_usb_stream_buffer(struct as102_dev_t *dev)
{
int i;
- ENTER();
-
for (i = 0; i < MAX_STREAM_URB; i++)
usb_free_urb(dev->stream_urb[i]);
@@ -249,15 +245,12 @@ static void as102_free_usb_stream_buffer(struct as102_dev_t *dev)
MAX_STREAM_URB * AS102_USB_BUF_SIZE,
dev->stream,
dev->dma_addr);
- LEAVE();
}
static int as102_alloc_usb_stream_buffer(struct as102_dev_t *dev)
{
int i, ret = 0;
- ENTER();
-
dev->stream = usb_alloc_coherent(dev->bus_adap.usb_dev,
MAX_STREAM_URB * AS102_USB_BUF_SIZE,
GFP_KERNEL,
@@ -287,7 +280,6 @@ static int as102_alloc_usb_stream_buffer(struct as102_dev_t *dev)
dev->stream_urb[i] = urb;
}
- LEAVE();
return ret;
}
@@ -318,23 +310,17 @@ static void as102_usb_release(struct kref *kref)
{
struct as102_dev_t *as102_dev;
- ENTER();
-
as102_dev = container_of(kref, struct as102_dev_t, kref);
if (as102_dev != NULL) {
usb_put_dev(as102_dev->bus_adap.usb_dev);
kfree(as102_dev);
}
-
- LEAVE();
}
static void as102_usb_disconnect(struct usb_interface *intf)
{
struct as102_dev_t *as102_dev;
- ENTER();
-
/* extract as102_dev_t from usb_device private data */
as102_dev = usb_get_intfdata(intf);
@@ -353,8 +339,6 @@ static void as102_usb_disconnect(struct usb_interface *intf)
kref_put(&as102_dev->kref, as102_usb_release);
pr_info("%s: device has been disconnected\n", DRIVER_NAME);
-
- LEAVE();
}
static int as102_usb_probe(struct usb_interface *intf,
@@ -364,8 +348,6 @@ static int as102_usb_probe(struct usb_interface *intf,
struct as102_dev_t *as102_dev;
int i;
- ENTER();
-
/* This should never actually happen */
if (ARRAY_SIZE(as102_usb_id_table) !=
(sizeof(as102_device_names) / sizeof(const char *))) {
@@ -419,15 +401,21 @@ static int as102_usb_probe(struct usb_interface *intf,
/* request buffer allocation for streaming */
ret = as102_alloc_usb_stream_buffer(as102_dev);
if (ret != 0)
- goto failed;
+ goto failed_stream;
/* register dvb layer */
ret = as102_dvb_register(as102_dev);
+ if (ret != 0)
+ goto failed_dvb;
- LEAVE();
return ret;
+failed_dvb:
+ as102_free_usb_stream_buffer(as102_dev);
+failed_stream:
+ usb_deregister_dev(intf, &as102_usb_class_driver);
failed:
+ usb_put_dev(as102_dev->bus_adap.usb_dev);
usb_set_intfdata(intf, NULL);
kfree(as102_dev);
return ret;
@@ -439,8 +427,6 @@ static int as102_open(struct inode *inode, struct file *file)
struct usb_interface *intf = NULL;
struct as102_dev_t *dev = NULL;
- ENTER();
-
/* read minor from inode */
minor = iminor(inode);
@@ -467,7 +453,6 @@ static int as102_open(struct inode *inode, struct file *file)
kref_get(&dev->kref);
exit:
- LEAVE();
return ret;
}
@@ -476,15 +461,12 @@ static int as102_release(struct inode *inode, struct file *file)
int ret = 0;
struct as102_dev_t *dev = NULL;
- ENTER();
-
dev = file->private_data;
if (dev != NULL) {
/* decrement the count on our device */
kref_put(&dev->kref, as102_usb_release);
}
- LEAVE();
return ret;
}
diff --git a/drivers/staging/media/as102/as10x_cmd.c b/drivers/staging/media/as102/as10x_cmd.c
index a73df10982d0..9e49f15a7c9f 100644
--- a/drivers/staging/media/as102/as10x_cmd.c
+++ b/drivers/staging/media/as102/as10x_cmd.c
@@ -34,8 +34,6 @@ int as10x_cmd_turn_on(struct as10x_bus_adapter_t *adap)
int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -63,7 +61,6 @@ int as10x_cmd_turn_on(struct as10x_bus_adapter_t *adap)
error = as10x_rsp_parse(prsp, CONTROL_PROC_TURNON_RSP);
out:
- LEAVE();
return error;
}
@@ -78,8 +75,6 @@ int as10x_cmd_turn_off(struct as10x_bus_adapter_t *adap)
int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -106,7 +101,6 @@ int as10x_cmd_turn_off(struct as10x_bus_adapter_t *adap)
error = as10x_rsp_parse(prsp, CONTROL_PROC_TURNOFF_RSP);
out:
- LEAVE();
return error;
}
@@ -123,8 +117,6 @@ int as10x_cmd_set_tune(struct as10x_bus_adapter_t *adap,
int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *preq, *prsp;
- ENTER();
-
preq = adap->cmd;
prsp = adap->rsp;
@@ -164,7 +156,6 @@ int as10x_cmd_set_tune(struct as10x_bus_adapter_t *adap,
error = as10x_rsp_parse(prsp, CONTROL_PROC_SETTUNE_RSP);
out:
- LEAVE();
return error;
}
@@ -181,8 +172,6 @@ int as10x_cmd_get_tune_status(struct as10x_bus_adapter_t *adap,
int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *preq, *prsp;
- ENTER();
-
preq = adap->cmd;
prsp = adap->rsp;
@@ -220,7 +209,6 @@ int as10x_cmd_get_tune_status(struct as10x_bus_adapter_t *adap,
pstatus->BER = le16_to_cpu(prsp->body.get_tune_status.rsp.sts.BER);
out:
- LEAVE();
return error;
}
@@ -236,8 +224,6 @@ int as10x_cmd_get_tps(struct as10x_bus_adapter_t *adap, struct as10x_tps *ptps)
int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -281,7 +267,6 @@ int as10x_cmd_get_tps(struct as10x_bus_adapter_t *adap, struct as10x_tps *ptps)
ptps->cell_ID = le16_to_cpu(prsp->body.get_tps.rsp.tps.cell_ID);
out:
- LEAVE();
return error;
}
@@ -298,8 +283,6 @@ int as10x_cmd_get_demod_stats(struct as10x_bus_adapter_t *adap,
int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -343,7 +326,6 @@ int as10x_cmd_get_demod_stats(struct as10x_bus_adapter_t *adap,
prsp->body.get_demod_stats.rsp.stats.has_started;
out:
- LEAVE();
return error;
}
@@ -361,8 +343,6 @@ int as10x_cmd_get_impulse_resp(struct as10x_bus_adapter_t *adap,
int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -397,7 +377,6 @@ int as10x_cmd_get_impulse_resp(struct as10x_bus_adapter_t *adap,
*is_ready = prsp->body.get_impulse_rsp.rsp.is_ready;
out:
- LEAVE();
return error;
}
diff --git a/drivers/staging/media/as102/as10x_cmd_cfg.c b/drivers/staging/media/as102/as10x_cmd_cfg.c
index 4a2bbd766655..b1e300d88753 100644
--- a/drivers/staging/media/as102/as10x_cmd_cfg.c
+++ b/drivers/staging/media/as102/as10x_cmd_cfg.c
@@ -40,8 +40,6 @@ int as10x_cmd_get_context(struct as10x_bus_adapter_t *adap, uint16_t tag,
int error;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -81,7 +79,6 @@ int as10x_cmd_get_context(struct as10x_bus_adapter_t *adap, uint16_t tag,
}
out:
- LEAVE();
return error;
}
@@ -99,8 +96,6 @@ int as10x_cmd_set_context(struct as10x_bus_adapter_t *adap, uint16_t tag,
int error;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -136,7 +131,6 @@ int as10x_cmd_set_context(struct as10x_bus_adapter_t *adap, uint16_t tag,
error = as10x_context_rsp_parse(prsp, CONTROL_PROC_CONTEXT_RSP);
out:
- LEAVE();
return error;
}
@@ -156,8 +150,6 @@ int as10x_cmd_eLNA_change_mode(struct as10x_bus_adapter_t *adap, uint8_t mode)
int error;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -188,7 +180,6 @@ int as10x_cmd_eLNA_change_mode(struct as10x_bus_adapter_t *adap, uint8_t mode)
error = as10x_rsp_parse(prsp, CONTROL_PROC_ELNA_CHANGE_MODE_RSP);
out:
- LEAVE();
return error;
}
diff --git a/drivers/staging/media/as102/as10x_cmd_stream.c b/drivers/staging/media/as102/as10x_cmd_stream.c
index 6d000f60fb0e..1088ca1fe92f 100644
--- a/drivers/staging/media/as102/as10x_cmd_stream.c
+++ b/drivers/staging/media/as102/as10x_cmd_stream.c
@@ -34,8 +34,6 @@ int as10x_cmd_add_PID_filter(struct as10x_bus_adapter_t *adap,
int error;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -77,7 +75,6 @@ int as10x_cmd_add_PID_filter(struct as10x_bus_adapter_t *adap,
}
out:
- LEAVE();
return error;
}
@@ -94,8 +91,6 @@ int as10x_cmd_del_PID_filter(struct as10x_bus_adapter_t *adap,
int error;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -126,7 +121,6 @@ int as10x_cmd_del_PID_filter(struct as10x_bus_adapter_t *adap,
error = as10x_rsp_parse(prsp, CONTROL_PROC_REMOVEFILTER_RSP);
out:
- LEAVE();
return error;
}
@@ -141,8 +135,6 @@ int as10x_cmd_start_streaming(struct as10x_bus_adapter_t *adap)
int error;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -172,7 +164,6 @@ int as10x_cmd_start_streaming(struct as10x_bus_adapter_t *adap)
error = as10x_rsp_parse(prsp, CONTROL_PROC_START_STREAMING_RSP);
out:
- LEAVE();
return error;
}
@@ -187,8 +178,6 @@ int as10x_cmd_stop_streaming(struct as10x_bus_adapter_t *adap)
int8_t error;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -218,6 +207,5 @@ int as10x_cmd_stop_streaming(struct as10x_bus_adapter_t *adap)
error = as10x_rsp_parse(prsp, CONTROL_PROC_STOP_STREAMING_RSP);
out:
- LEAVE();
return error;
}
diff --git a/drivers/staging/media/bcm2048/Kconfig b/drivers/staging/media/bcm2048/Kconfig
new file mode 100644
index 000000000000..a9fc6e186494
--- /dev/null
+++ b/drivers/staging/media/bcm2048/Kconfig
@@ -0,0 +1,13 @@
+#
+# Multimedia Video device configuration
+#
+
+config I2C_BCM2048
+ tristate "Broadcom BCM2048 FM Radio Receiver support"
+ depends on I2C && VIDEO_V4L2 && RADIO_ADAPTERS
+ ---help---
+ Say Y here if you want support to BCM2048 FM Radio Receiver.
+ This device driver supports only i2c bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-bcm2048.
diff --git a/drivers/staging/media/bcm2048/Makefile b/drivers/staging/media/bcm2048/Makefile
new file mode 100644
index 000000000000..b4f5663d1408
--- /dev/null
+++ b/drivers/staging/media/bcm2048/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_I2C_BCM2048) += radio-bcm2048.o
diff --git a/drivers/staging/media/bcm2048/TODO b/drivers/staging/media/bcm2048/TODO
new file mode 100644
index 000000000000..051f85dbe89e
--- /dev/null
+++ b/drivers/staging/media/bcm2048/TODO
@@ -0,0 +1,24 @@
+TODO:
+
+From the initial code review:
+
+The main thing you need to do is to implement all the controls using the
+control framework (see Documentation/video4linux/v4l2-controls.txt).
+Most drivers are by now converted to the control framework, so you will
+find many examples of how to do this in drivers/media/radio.
+
+The sysfs stuff should be replaced by controls as well. A lot of the RDS
+support is now available as controls (although there may well be some
+missing features, but that is easy enough to add). Since the RDS data is
+actually read() from the device I am not sure whether the RDS
+properties/controls should be there at all.
+
+Correct Coding Style, as this driver also violates several Style
+rules, and do evil tricks, like returning from a function inside a
+macro.
+
+Finally this driver should probably be split up into two parts: one
+v4l2_subdev-based core driver and one platform driver. See e.g.
+radio-si4713/si4713-i2c.c as a good example. But I would wait with that
+until the rest of the driver is cleaned up. Then I have a better idea of
+whether this is necessary or not.
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
new file mode 100644
index 000000000000..b2cd3a85166d
--- /dev/null
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -0,0 +1,2744 @@
+/*
+ * drivers/staging/media/radio-bcm2048.c
+ *
+ * Driver for I2C Broadcom BCM2048 FM Radio Receiver:
+ *
+ * Copyright (C) Nokia Corporation
+ * Contact: Eero Nurkkala <ext-eero.nurkkala@nokia.com>
+ *
+ * Copyright (C) Nils Faerber <nils.faerber@kernelconcepts.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+/*
+ * History:
+ * Eero Nurkkala <ext-eero.nurkkala@nokia.com>
+ * Version 0.0.1
+ * - Initial implementation
+ * 2010-02-21 Nils Faerber <nils.faerber@kernelconcepts.de>
+ * Version 0.0.2
+ * - Add support for interrupt driven rds data reading
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/version.h>
+#include <linux/interrupt.h>
+#include <linux/sysfs.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/videodev2.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include "radio-bcm2048.h"
+
+/* driver definitions */
+#define BCM2048_DRIVER_AUTHOR "Eero Nurkkala <ext-eero.nurkkala@nokia.com>"
+#define BCM2048_DRIVER_NAME BCM2048_NAME
+#define BCM2048_DRIVER_VERSION KERNEL_VERSION(0, 0, 1)
+#define BCM2048_DRIVER_CARD "Broadcom bcm2048 FM Radio Receiver"
+#define BCM2048_DRIVER_DESC "I2C driver for BCM2048 FM Radio Receiver"
+
+/* I2C Control Registers */
+#define BCM2048_I2C_FM_RDS_SYSTEM 0x00
+#define BCM2048_I2C_FM_CTRL 0x01
+#define BCM2048_I2C_RDS_CTRL0 0x02
+#define BCM2048_I2C_RDS_CTRL1 0x03
+#define BCM2048_I2C_FM_AUDIO_PAUSE 0x04
+#define BCM2048_I2C_FM_AUDIO_CTRL0 0x05
+#define BCM2048_I2C_FM_AUDIO_CTRL1 0x06
+#define BCM2048_I2C_FM_SEARCH_CTRL0 0x07
+#define BCM2048_I2C_FM_SEARCH_CTRL1 0x08
+#define BCM2048_I2C_FM_SEARCH_TUNE_MODE 0x09
+#define BCM2048_I2C_FM_FREQ0 0x0a
+#define BCM2048_I2C_FM_FREQ1 0x0b
+#define BCM2048_I2C_FM_AF_FREQ0 0x0c
+#define BCM2048_I2C_FM_AF_FREQ1 0x0d
+#define BCM2048_I2C_FM_CARRIER 0x0e
+#define BCM2048_I2C_FM_RSSI 0x0f
+#define BCM2048_I2C_FM_RDS_MASK0 0x10
+#define BCM2048_I2C_FM_RDS_MASK1 0x11
+#define BCM2048_I2C_FM_RDS_FLAG0 0x12
+#define BCM2048_I2C_FM_RDS_FLAG1 0x13
+#define BCM2048_I2C_RDS_WLINE 0x14
+#define BCM2048_I2C_RDS_BLKB_MATCH0 0x16
+#define BCM2048_I2C_RDS_BLKB_MATCH1 0x17
+#define BCM2048_I2C_RDS_BLKB_MASK0 0x18
+#define BCM2048_I2C_RDS_BLKB_MASK1 0x19
+#define BCM2048_I2C_RDS_PI_MATCH0 0x1a
+#define BCM2048_I2C_RDS_PI_MATCH1 0x1b
+#define BCM2048_I2C_RDS_PI_MASK0 0x1c
+#define BCM2048_I2C_RDS_PI_MASK1 0x1d
+#define BCM2048_I2C_SPARE1 0x20
+#define BCM2048_I2C_SPARE2 0x21
+#define BCM2048_I2C_FM_RDS_REV 0x28
+#define BCM2048_I2C_SLAVE_CONFIGURATION 0x29
+#define BCM2048_I2C_RDS_DATA 0x80
+#define BCM2048_I2C_FM_BEST_TUNE_MODE 0x90
+
+/* BCM2048_I2C_FM_RDS_SYSTEM */
+#define BCM2048_FM_ON 0x01
+#define BCM2048_RDS_ON 0x02
+
+/* BCM2048_I2C_FM_CTRL */
+#define BCM2048_BAND_SELECT 0x01
+#define BCM2048_STEREO_MONO_AUTO_SELECT 0x02
+#define BCM2048_STEREO_MONO_MANUAL_SELECT 0x04
+#define BCM2048_STEREO_MONO_BLEND_SWITCH 0x08
+#define BCM2048_HI_LO_INJECTION 0x10
+
+/* BCM2048_I2C_RDS_CTRL0 */
+#define BCM2048_RBDS_RDS_SELECT 0x01
+#define BCM2048_FLUSH_FIFO 0x02
+
+/* BCM2048_I2C_FM_AUDIO_PAUSE */
+#define BCM2048_AUDIO_PAUSE_RSSI_TRESH 0x0f
+#define BCM2048_AUDIO_PAUSE_DURATION 0xf0
+
+/* BCM2048_I2C_FM_AUDIO_CTRL0 */
+#define BCM2048_RF_MUTE 0x01
+#define BCM2048_MANUAL_MUTE 0x02
+#define BCM2048_DAC_OUTPUT_LEFT 0x04
+#define BCM2048_DAC_OUTPUT_RIGHT 0x08
+#define BCM2048_AUDIO_ROUTE_DAC 0x10
+#define BCM2048_AUDIO_ROUTE_I2S 0x20
+#define BCM2048_DE_EMPHASIS_SELECT 0x40
+#define BCM2048_AUDIO_BANDWIDTH_SELECT 0x80
+
+/* BCM2048_I2C_FM_SEARCH_CTRL0 */
+#define BCM2048_SEARCH_RSSI_THRESHOLD 0x7f
+#define BCM2048_SEARCH_DIRECTION 0x80
+
+/* BCM2048_I2C_FM_SEARCH_TUNE_MODE */
+#define BCM2048_FM_AUTO_SEARCH 0x03
+
+/* BCM2048_I2C_FM_RSSI */
+#define BCM2048_RSSI_VALUE 0xff
+
+/* BCM2048_I2C_FM_RDS_MASK0 */
+/* BCM2048_I2C_FM_RDS_MASK1 */
+#define BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED 0x01
+#define BCM2048_FM_FLAG_SEARCH_TUNE_FAIL 0x02
+#define BCM2048_FM_FLAG_RSSI_LOW 0x04
+#define BCM2048_FM_FLAG_CARRIER_ERROR_HIGH 0x08
+#define BCM2048_FM_FLAG_AUDIO_PAUSE_INDICATION 0x10
+#define BCM2048_FLAG_STEREO_DETECTED 0x20
+#define BCM2048_FLAG_STEREO_ACTIVE 0x40
+
+/* BCM2048_I2C_RDS_DATA */
+#define BCM2048_SLAVE_ADDRESS 0x3f
+#define BCM2048_SLAVE_ENABLE 0x80
+
+/* BCM2048_I2C_FM_BEST_TUNE_MODE */
+#define BCM2048_BEST_TUNE_MODE 0x80
+
+#define BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED 0x01
+#define BCM2048_FM_FLAG_SEARCH_TUNE_FAIL 0x02
+#define BCM2048_FM_FLAG_RSSI_LOW 0x04
+#define BCM2048_FM_FLAG_CARRIER_ERROR_HIGH 0x08
+#define BCM2048_FM_FLAG_AUDIO_PAUSE_INDICATION 0x10
+#define BCM2048_FLAG_STEREO_DETECTED 0x20
+#define BCM2048_FLAG_STEREO_ACTIVE 0x40
+
+#define BCM2048_RDS_FLAG_FIFO_WLINE 0x02
+#define BCM2048_RDS_FLAG_B_BLOCK_MATCH 0x08
+#define BCM2048_RDS_FLAG_SYNC_LOST 0x10
+#define BCM2048_RDS_FLAG_PI_MATCH 0x20
+
+#define BCM2048_RDS_MARK_END_BYTE0 0x7C
+#define BCM2048_RDS_MARK_END_BYTEN 0xFF
+
+#define BCM2048_FM_FLAGS_ALL (FM_FLAG_SEARCH_TUNE_FINISHED | \
+ FM_FLAG_SEARCH_TUNE_FAIL | \
+ FM_FLAG_RSSI_LOW | \
+ FM_FLAG_CARRIER_ERROR_HIGH | \
+ FM_FLAG_AUDIO_PAUSE_INDICATION | \
+ FLAG_STEREO_DETECTED | FLAG_STEREO_ACTIVE)
+
+#define BCM2048_RDS_FLAGS_ALL (RDS_FLAG_FIFO_WLINE | \
+ RDS_FLAG_B_BLOCK_MATCH | \
+ RDS_FLAG_SYNC_LOST | RDS_FLAG_PI_MATCH)
+
+#define BCM2048_DEFAULT_TIMEOUT 1500
+#define BCM2048_AUTO_SEARCH_TIMEOUT 3000
+
+
+#define BCM2048_FREQDEV_UNIT 10000
+#define BCM2048_FREQV4L2_MULTI 625
+#define dev_to_v4l2(f) ((f * BCM2048_FREQDEV_UNIT) / BCM2048_FREQV4L2_MULTI)
+#define v4l2_to_dev(f) ((f * BCM2048_FREQV4L2_MULTI) / BCM2048_FREQDEV_UNIT)
+
+#define msb(x) ((u8)((u16) x >> 8))
+#define lsb(x) ((u8)((u16) x & 0x00FF))
+#define compose_u16(msb, lsb) (((u16)msb << 8) | lsb)
+
+#define BCM2048_DEFAULT_POWERING_DELAY 20
+#define BCM2048_DEFAULT_REGION 0x02
+#define BCM2048_DEFAULT_MUTE 0x01
+#define BCM2048_DEFAULT_RSSI_THRESHOLD 0x64
+#define BCM2048_DEFAULT_RDS_WLINE 0x7E
+
+#define BCM2048_FM_SEARCH_INACTIVE 0x00
+#define BCM2048_FM_PRE_SET_MODE 0x01
+#define BCM2048_FM_AUTO_SEARCH_MODE 0x02
+#define BCM2048_FM_AF_JUMP_MODE 0x03
+
+#define BCM2048_FREQUENCY_BASE 64000
+
+#define BCM2048_POWER_ON 0x01
+#define BCM2048_POWER_OFF 0x00
+
+#define BCM2048_ITEM_ENABLED 0x01
+#define BCM2048_SEARCH_DIRECTION_UP 0x01
+
+#define BCM2048_DE_EMPHASIS_75us 75
+#define BCM2048_DE_EMPHASIS_50us 50
+
+#define BCM2048_SCAN_FAIL 0x00
+#define BCM2048_SCAN_OK 0x01
+
+#define BCM2048_FREQ_ERROR_FLOOR -20
+#define BCM2048_FREQ_ERROR_ROOF 20
+
+/* -60 dB is reported as full signal strenght */
+#define BCM2048_RSSI_LEVEL_BASE -60
+#define BCM2048_RSSI_LEVEL_ROOF -100
+#define BCM2048_RSSI_LEVEL_ROOF_NEG 100
+#define BCM2048_SIGNAL_MULTIPLIER (0xFFFF / \
+ (BCM2048_RSSI_LEVEL_ROOF_NEG + \
+ BCM2048_RSSI_LEVEL_BASE))
+
+#define BCM2048_RDS_FIFO_DUPLE_SIZE 0x03
+#define BCM2048_RDS_CRC_MASK 0x0F
+#define BCM2048_RDS_CRC_NONE 0x00
+#define BCM2048_RDS_CRC_MAX_2BITS 0x04
+#define BCM2048_RDS_CRC_LEAST_2BITS 0x08
+#define BCM2048_RDS_CRC_UNRECOVARABLE 0x0C
+
+#define BCM2048_RDS_BLOCK_MASK 0xF0
+#define BCM2048_RDS_BLOCK_A 0x00
+#define BCM2048_RDS_BLOCK_B 0x10
+#define BCM2048_RDS_BLOCK_C 0x20
+#define BCM2048_RDS_BLOCK_D 0x30
+#define BCM2048_RDS_BLOCK_C_SCORED 0x40
+#define BCM2048_RDS_BLOCK_E 0x60
+
+#define BCM2048_RDS_RT 0x20
+#define BCM2048_RDS_PS 0x00
+
+#define BCM2048_RDS_GROUP_AB_MASK 0x08
+#define BCM2048_RDS_GROUP_A 0x00
+#define BCM2048_RDS_GROUP_B 0x08
+
+#define BCM2048_RDS_RT_AB_MASK 0x10
+#define BCM2048_RDS_RT_A 0x00
+#define BCM2048_RDS_RT_B 0x10
+#define BCM2048_RDS_RT_INDEX 0x0F
+
+#define BCM2048_RDS_PS_INDEX 0x03
+
+struct rds_info {
+ u16 rds_pi;
+#define BCM2048_MAX_RDS_RT (64 + 1)
+ u8 rds_rt[BCM2048_MAX_RDS_RT];
+ u8 rds_rt_group_b;
+ u8 rds_rt_ab;
+#define BCM2048_MAX_RDS_PS (8 + 1)
+ u8 rds_ps[BCM2048_MAX_RDS_PS];
+ u8 rds_ps_group;
+ u8 rds_ps_group_cnt;
+#define BCM2048_MAX_RDS_RADIO_TEXT 255
+ u8 radio_text[BCM2048_MAX_RDS_RADIO_TEXT + 3];
+ u8 text_len;
+};
+
+struct region_info {
+ u32 bottom_frequency;
+ u32 top_frequency;
+ u8 deemphasis;
+ u8 channel_spacing;
+ u8 region;
+};
+
+struct bcm2048_device {
+ struct i2c_client *client;
+ struct video_device *videodev;
+ struct work_struct work;
+ struct completion compl;
+ struct mutex mutex;
+ struct bcm2048_platform_data *platform_data;
+ struct rds_info rds_info;
+ struct region_info region_info;
+ u16 frequency;
+ u8 cache_fm_rds_system;
+ u8 cache_fm_ctrl;
+ u8 cache_fm_audio_ctrl0;
+ u8 cache_fm_search_ctrl0;
+ u8 power_state;
+ u8 rds_state;
+ u8 fifo_size;
+ u8 scan_state;
+ u8 mute_state;
+
+ /* for rds data device read */
+ wait_queue_head_t read_queue;
+ unsigned int users;
+ unsigned char rds_data_available;
+ unsigned int rd_index;
+};
+
+static int radio_nr = -1; /* radio device minor (-1 ==> auto assign) */
+module_param(radio_nr, int, 0);
+MODULE_PARM_DESC(radio_nr,
+ "Minor number for radio device (-1 ==> auto assign)");
+
+static struct region_info region_configs[] = {
+ /* USA */
+ {
+ .channel_spacing = 20,
+ .bottom_frequency = 87500,
+ .top_frequency = 108000,
+ .deemphasis = 75,
+ .region = 0,
+ },
+ /* Australia */
+ {
+ .channel_spacing = 20,
+ .bottom_frequency = 87500,
+ .top_frequency = 108000,
+ .deemphasis = 50,
+ .region = 1,
+ },
+ /* Europe */
+ {
+ .channel_spacing = 10,
+ .bottom_frequency = 87500,
+ .top_frequency = 108000,
+ .deemphasis = 50,
+ .region = 2,
+ },
+ /* Japan */
+ {
+ .channel_spacing = 10,
+ .bottom_frequency = 76000,
+ .top_frequency = 90000,
+ .deemphasis = 50,
+ .region = 3,
+ },
+ /* Japan wide band */
+ {
+ .channel_spacing = 10,
+ .bottom_frequency = 76000,
+ .top_frequency = 108000,
+ .deemphasis = 50,
+ .region = 4,
+ },
+};
+
+/*
+ * I2C Interface read / write
+ */
+static int bcm2048_send_command(struct bcm2048_device *bdev, unsigned int reg,
+ unsigned int value)
+{
+ struct i2c_client *client = bdev->client;
+ u8 data[2];
+
+ if (!bdev->power_state) {
+ dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n");
+ return -EIO;
+ }
+
+ data[0] = reg & 0xff;
+ data[1] = value & 0xff;
+
+ if (i2c_master_send(client, data, 2) == 2) {
+ return 0;
+ } else {
+ dev_err(&bdev->client->dev, "BCM I2C error!\n");
+ dev_err(&bdev->client->dev, "Is Bluetooth up and running?\n");
+ return -EIO;
+ }
+}
+
+static int bcm2048_recv_command(struct bcm2048_device *bdev, unsigned int reg,
+ u8 *value)
+{
+ struct i2c_client *client = bdev->client;
+
+ if (!bdev->power_state) {
+ dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n");
+ return -EIO;
+ }
+
+ value[0] = i2c_smbus_read_byte_data(client, reg & 0xff);
+
+ return 0;
+}
+
+static int bcm2048_recv_duples(struct bcm2048_device *bdev, unsigned int reg,
+ u8 *value, u8 duples)
+{
+ struct i2c_client *client = bdev->client;
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_msg msg[2];
+ u8 buf;
+
+ if (!bdev->power_state) {
+ dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n");
+ return -EIO;
+ }
+
+ buf = reg & 0xff;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = client->flags & I2C_M_TEN;
+ msg[0].len = 1;
+ msg[0].buf = &buf;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = client->flags & I2C_M_TEN;
+ msg[1].flags |= I2C_M_RD;
+ msg[1].len = duples;
+ msg[1].buf = value;
+
+ return i2c_transfer(adap, msg, 2);
+}
+
+/*
+ * BCM2048 - I2C register programming helpers
+ */
+static int bcm2048_set_power_state(struct bcm2048_device *bdev, u8 power)
+{
+ int err = 0;
+
+ mutex_lock(&bdev->mutex);
+
+ if (power) {
+ bdev->power_state = BCM2048_POWER_ON;
+ bdev->cache_fm_rds_system |= BCM2048_FM_ON;
+ } else {
+ bdev->cache_fm_rds_system &= ~BCM2048_FM_ON;
+ }
+
+ /*
+ * Warning! FM cannot be turned off because then
+ * the I2C communications get ruined!
+ * Comment off the "if (power)" when the chip works!
+ */
+ if (power)
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM,
+ bdev->cache_fm_rds_system);
+ msleep(BCM2048_DEFAULT_POWERING_DELAY);
+
+ if (!power)
+ bdev->power_state = BCM2048_POWER_OFF;
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_power_state(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err && (value & BCM2048_FM_ON))
+ return BCM2048_POWER_ON;
+
+ return err;
+}
+
+static int bcm2048_set_rds_no_lock(struct bcm2048_device *bdev, u8 rds_on)
+{
+ int err;
+ u8 flags;
+
+ bdev->cache_fm_rds_system &= ~BCM2048_RDS_ON;
+
+ if (rds_on) {
+ bdev->cache_fm_rds_system |= BCM2048_RDS_ON;
+ bdev->rds_state = BCM2048_RDS_ON;
+ flags = BCM2048_RDS_FLAG_FIFO_WLINE;
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1,
+ flags);
+ } else {
+ flags = 0;
+ bdev->rds_state = 0;
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1,
+ flags);
+ memset(&bdev->rds_info, 0, sizeof(bdev->rds_info));
+ }
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM,
+ bdev->cache_fm_rds_system);
+
+ return err;
+}
+
+static int bcm2048_get_rds_no_lock(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, &value);
+
+ if (!err && (value & BCM2048_RDS_ON))
+ return BCM2048_ITEM_ENABLED;
+
+ return err;
+}
+
+static int bcm2048_set_rds(struct bcm2048_device *bdev, u8 rds_on)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_set_rds_no_lock(bdev, rds_on);
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_rds(struct bcm2048_device *bdev)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_get_rds_no_lock(bdev);
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_rds_pi(struct bcm2048_device *bdev)
+{
+ return bdev->rds_info.rds_pi;
+}
+
+static int bcm2048_set_fm_automatic_stereo_mono(struct bcm2048_device *bdev,
+ u8 enabled)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ bdev->cache_fm_ctrl &= ~BCM2048_STEREO_MONO_AUTO_SELECT;
+
+ if (enabled)
+ bdev->cache_fm_ctrl |= BCM2048_STEREO_MONO_AUTO_SELECT;
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_CTRL,
+ bdev->cache_fm_ctrl);
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_set_fm_hi_lo_injection(struct bcm2048_device *bdev,
+ u8 hi_lo)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ bdev->cache_fm_ctrl &= ~BCM2048_HI_LO_INJECTION;
+
+ if (hi_lo)
+ bdev->cache_fm_ctrl |= BCM2048_HI_LO_INJECTION;
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_CTRL,
+ bdev->cache_fm_ctrl);
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_fm_hi_lo_injection(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_CTRL, &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err && (value & BCM2048_HI_LO_INJECTION))
+ return BCM2048_ITEM_ENABLED;
+
+ return err;
+}
+
+static int bcm2048_set_fm_frequency(struct bcm2048_device *bdev, u32 frequency)
+{
+ int err;
+
+ if (frequency < bdev->region_info.bottom_frequency ||
+ frequency > bdev->region_info.top_frequency)
+ return -EDOM;
+
+ frequency -= BCM2048_FREQUENCY_BASE;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_FREQ0, lsb(frequency));
+ err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_FREQ1,
+ msb(frequency));
+
+ if (!err)
+ bdev->frequency = frequency;
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_fm_frequency(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 lsb, msb;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_FREQ0, &lsb);
+ err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_FREQ1, &msb);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (err)
+ return err;
+
+ err = compose_u16(msb, lsb);
+ err += BCM2048_FREQUENCY_BASE;
+
+ return err;
+}
+
+static int bcm2048_set_fm_af_frequency(struct bcm2048_device *bdev,
+ u32 frequency)
+{
+ int err;
+
+ if (frequency < bdev->region_info.bottom_frequency ||
+ frequency > bdev->region_info.top_frequency)
+ return -EDOM;
+
+ frequency -= BCM2048_FREQUENCY_BASE;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AF_FREQ0,
+ lsb(frequency));
+ err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_AF_FREQ1,
+ msb(frequency));
+ if (!err)
+ bdev->frequency = frequency;
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_fm_af_frequency(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 lsb, msb;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AF_FREQ0, &lsb);
+ err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_AF_FREQ1, &msb);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (err)
+ return err;
+
+ err = compose_u16(msb, lsb);
+ err += BCM2048_FREQUENCY_BASE;
+
+ return err;
+}
+
+static int bcm2048_set_fm_deemphasis(struct bcm2048_device *bdev, int d)
+{
+ int err;
+ u8 deemphasis;
+
+ if (d == BCM2048_DE_EMPHASIS_75us)
+ deemphasis = BCM2048_DE_EMPHASIS_SELECT;
+ else
+ deemphasis = 0;
+
+ mutex_lock(&bdev->mutex);
+
+ bdev->cache_fm_audio_ctrl0 &= ~BCM2048_DE_EMPHASIS_SELECT;
+ bdev->cache_fm_audio_ctrl0 |= deemphasis;
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
+ bdev->cache_fm_audio_ctrl0);
+
+ if (!err)
+ bdev->region_info.deemphasis = d;
+
+ mutex_unlock(&bdev->mutex);
+
+ return err;
+}
+
+static int bcm2048_get_fm_deemphasis(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err) {
+ if (value & BCM2048_DE_EMPHASIS_SELECT)
+ return BCM2048_DE_EMPHASIS_75us;
+ else
+ return BCM2048_DE_EMPHASIS_50us;
+ }
+
+ return err;
+}
+
+static int bcm2048_set_region(struct bcm2048_device *bdev, u8 region)
+{
+ int err;
+ u32 new_frequency = 0;
+
+ if (region > ARRAY_SIZE(region_configs))
+ return -EINVAL;
+
+ mutex_lock(&bdev->mutex);
+ bdev->region_info = region_configs[region];
+ mutex_unlock(&bdev->mutex);
+
+ if (bdev->frequency < region_configs[region].bottom_frequency ||
+ bdev->frequency > region_configs[region].top_frequency)
+ new_frequency = region_configs[region].bottom_frequency;
+
+ if (new_frequency > 0) {
+ err = bcm2048_set_fm_frequency(bdev, new_frequency);
+
+ if (err)
+ goto done;
+ }
+
+ err = bcm2048_set_fm_deemphasis(bdev,
+ region_configs[region].deemphasis);
+
+done:
+ return err;
+}
+
+static int bcm2048_get_region(struct bcm2048_device *bdev)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+ err = bdev->region_info.region;
+ mutex_unlock(&bdev->mutex);
+
+ return err;
+}
+
+static int bcm2048_set_mute(struct bcm2048_device *bdev, u16 mute)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_RF_MUTE | BCM2048_MANUAL_MUTE);
+
+ if (mute)
+ bdev->cache_fm_audio_ctrl0 |= (BCM2048_RF_MUTE |
+ BCM2048_MANUAL_MUTE);
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
+ bdev->cache_fm_audio_ctrl0);
+
+ if (!err)
+ bdev->mute_state = mute;
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_mute(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ if (bdev->power_state) {
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
+ &value);
+ if (!err)
+ err = value & (BCM2048_RF_MUTE | BCM2048_MANUAL_MUTE);
+ } else {
+ err = bdev->mute_state;
+ }
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_set_audio_route(struct bcm2048_device *bdev, u8 route)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ route &= (BCM2048_AUDIO_ROUTE_DAC | BCM2048_AUDIO_ROUTE_I2S);
+ bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_AUDIO_ROUTE_DAC |
+ BCM2048_AUDIO_ROUTE_I2S);
+ bdev->cache_fm_audio_ctrl0 |= route;
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
+ bdev->cache_fm_audio_ctrl0);
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_audio_route(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return value & (BCM2048_AUDIO_ROUTE_DAC |
+ BCM2048_AUDIO_ROUTE_I2S);
+
+ return err;
+}
+
+static int bcm2048_set_dac_output(struct bcm2048_device *bdev, u8 channels)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_DAC_OUTPUT_LEFT |
+ BCM2048_DAC_OUTPUT_RIGHT);
+ bdev->cache_fm_audio_ctrl0 |= channels;
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
+ bdev->cache_fm_audio_ctrl0);
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_dac_output(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return value & (BCM2048_DAC_OUTPUT_LEFT |
+ BCM2048_DAC_OUTPUT_RIGHT);
+
+ return err;
+}
+
+static int bcm2048_set_fm_search_rssi_threshold(struct bcm2048_device *bdev,
+ u8 threshold)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ threshold &= BCM2048_SEARCH_RSSI_THRESHOLD;
+ bdev->cache_fm_search_ctrl0 &= ~BCM2048_SEARCH_RSSI_THRESHOLD;
+ bdev->cache_fm_search_ctrl0 |= threshold;
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0,
+ bdev->cache_fm_search_ctrl0);
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_fm_search_rssi_threshold(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0, &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return value & BCM2048_SEARCH_RSSI_THRESHOLD;
+
+ return err;
+}
+
+static int bcm2048_set_fm_search_mode_direction(struct bcm2048_device *bdev,
+ u8 direction)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ bdev->cache_fm_search_ctrl0 &= ~BCM2048_SEARCH_DIRECTION;
+
+ if (direction)
+ bdev->cache_fm_search_ctrl0 |= BCM2048_SEARCH_DIRECTION;
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0,
+ bdev->cache_fm_search_ctrl0);
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_fm_search_mode_direction(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0, &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err && (value & BCM2048_SEARCH_DIRECTION))
+ return BCM2048_SEARCH_DIRECTION_UP;
+
+ return err;
+}
+
+static int bcm2048_set_fm_search_tune_mode(struct bcm2048_device *bdev,
+ u8 mode)
+{
+ int err, timeout, restart_rds = 0;
+ u8 value, flags;
+
+ value = mode & BCM2048_FM_AUTO_SEARCH;
+
+ flags = BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED |
+ BCM2048_FM_FLAG_SEARCH_TUNE_FAIL;
+
+ mutex_lock(&bdev->mutex);
+
+ /*
+ * If RDS is enabled, and frequency is changed, RDS quits working.
+ * Thus, always restart RDS if it's enabled. Moreover, RDS must
+ * not be enabled while changing the frequency because it can
+ * provide a race to the mutex from the workqueue handler if RDS
+ * IRQ occurs while waiting for frequency changed IRQ.
+ */
+ if (bcm2048_get_rds_no_lock(bdev)) {
+ err = bcm2048_set_rds_no_lock(bdev, 0);
+ if (err)
+ goto unlock;
+ restart_rds = 1;
+ }
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK0, flags);
+
+ if (err)
+ goto unlock;
+
+ bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_TUNE_MODE, value);
+
+ if (mode != BCM2048_FM_AUTO_SEARCH_MODE)
+ timeout = BCM2048_DEFAULT_TIMEOUT;
+ else
+ timeout = BCM2048_AUTO_SEARCH_TIMEOUT;
+
+ if (!wait_for_completion_timeout(&bdev->compl,
+ msecs_to_jiffies(timeout)))
+ dev_err(&bdev->client->dev, "IRQ timeout.\n");
+
+ if (value)
+ if (!bdev->scan_state)
+ err = -EIO;
+
+unlock:
+ if (restart_rds)
+ err |= bcm2048_set_rds_no_lock(bdev, 1);
+
+ mutex_unlock(&bdev->mutex);
+
+ return err;
+}
+
+static int bcm2048_get_fm_search_tune_mode(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_TUNE_MODE,
+ &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return value & BCM2048_FM_AUTO_SEARCH;
+
+ return err;
+}
+
+static int bcm2048_set_rds_b_block_mask(struct bcm2048_device *bdev, u16 mask)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_send_command(bdev,
+ BCM2048_I2C_RDS_BLKB_MASK0, lsb(mask));
+ err |= bcm2048_send_command(bdev,
+ BCM2048_I2C_RDS_BLKB_MASK1, msb(mask));
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_rds_b_block_mask(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 lsb, msb;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev,
+ BCM2048_I2C_RDS_BLKB_MASK0, &lsb);
+ err |= bcm2048_recv_command(bdev,
+ BCM2048_I2C_RDS_BLKB_MASK1, &msb);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return compose_u16(msb, lsb);
+
+ return err;
+}
+
+static int bcm2048_set_rds_b_block_match(struct bcm2048_device *bdev,
+ u16 match)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_send_command(bdev,
+ BCM2048_I2C_RDS_BLKB_MATCH0, lsb(match));
+ err |= bcm2048_send_command(bdev,
+ BCM2048_I2C_RDS_BLKB_MATCH1, msb(match));
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_rds_b_block_match(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 lsb, msb;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev,
+ BCM2048_I2C_RDS_BLKB_MATCH0, &lsb);
+ err |= bcm2048_recv_command(bdev,
+ BCM2048_I2C_RDS_BLKB_MATCH1, &msb);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return compose_u16(msb, lsb);
+
+ return err;
+}
+
+static int bcm2048_set_rds_pi_mask(struct bcm2048_device *bdev, u16 mask)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_send_command(bdev,
+ BCM2048_I2C_RDS_PI_MASK0, lsb(mask));
+ err |= bcm2048_send_command(bdev,
+ BCM2048_I2C_RDS_PI_MASK1, msb(mask));
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_rds_pi_mask(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 lsb, msb;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev,
+ BCM2048_I2C_RDS_PI_MASK0, &lsb);
+ err |= bcm2048_recv_command(bdev,
+ BCM2048_I2C_RDS_PI_MASK1, &msb);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return compose_u16(msb, lsb);
+
+ return err;
+}
+
+static int bcm2048_set_rds_pi_match(struct bcm2048_device *bdev, u16 match)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_send_command(bdev,
+ BCM2048_I2C_RDS_PI_MATCH0, lsb(match));
+ err |= bcm2048_send_command(bdev,
+ BCM2048_I2C_RDS_PI_MATCH1, msb(match));
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_rds_pi_match(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 lsb, msb;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev,
+ BCM2048_I2C_RDS_PI_MATCH0, &lsb);
+ err |= bcm2048_recv_command(bdev,
+ BCM2048_I2C_RDS_PI_MATCH1, &msb);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return compose_u16(msb, lsb);
+
+ return err;
+}
+
+static int bcm2048_set_fm_rds_mask(struct bcm2048_device *bdev, u16 mask)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_send_command(bdev,
+ BCM2048_I2C_FM_RDS_MASK0, lsb(mask));
+ err |= bcm2048_send_command(bdev,
+ BCM2048_I2C_FM_RDS_MASK1, msb(mask));
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_fm_rds_mask(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value0, value1;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_MASK0, &value0);
+ err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_MASK1, &value1);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return compose_u16(value1, value0);
+
+ return err;
+}
+
+static int bcm2048_get_fm_rds_flags(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value0, value1;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG0, &value0);
+ err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG1, &value1);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return compose_u16(value1, value0);
+
+ return err;
+}
+
+static int bcm2048_get_region_bottom_frequency(struct bcm2048_device *bdev)
+{
+ return bdev->region_info.bottom_frequency;
+}
+
+static int bcm2048_get_region_top_frequency(struct bcm2048_device *bdev)
+{
+ return bdev->region_info.top_frequency;
+}
+
+static int bcm2048_set_fm_best_tune_mode(struct bcm2048_device *bdev, u8 mode)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ /* Perform read as the manual indicates */
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE,
+ &value);
+ value &= ~BCM2048_BEST_TUNE_MODE;
+
+ if (mode)
+ value |= BCM2048_BEST_TUNE_MODE;
+ err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE,
+ value);
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_fm_best_tune_mode(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE,
+ &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err && (value & BCM2048_BEST_TUNE_MODE))
+ return BCM2048_ITEM_ENABLED;
+
+ return err;
+}
+
+static int bcm2048_get_fm_carrier_error(struct bcm2048_device *bdev)
+{
+ int err = 0;
+ s8 value;
+
+ mutex_lock(&bdev->mutex);
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_CARRIER, &value);
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return value;
+
+ return err;
+}
+
+static int bcm2048_get_fm_rssi(struct bcm2048_device *bdev)
+{
+ int err;
+ s8 value;
+
+ mutex_lock(&bdev->mutex);
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RSSI, &value);
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return value;
+
+ return err;
+}
+
+static int bcm2048_set_rds_wline(struct bcm2048_device *bdev, u8 wline)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_RDS_WLINE, wline);
+
+ if (!err)
+ bdev->fifo_size = wline;
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_rds_wline(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_RDS_WLINE, &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err) {
+ bdev->fifo_size = value;
+ return value;
+ }
+
+ return err;
+}
+
+static int bcm2048_checkrev(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 version;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_REV, &version);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err) {
+ dev_info(&bdev->client->dev, "BCM2048 Version 0x%x\n",
+ version);
+ return version;
+ }
+
+ return err;
+}
+
+static int bcm2048_get_rds_rt(struct bcm2048_device *bdev, char *data)
+{
+ int err = 0, i, j = 0, ce = 0, cr = 0;
+ char data_buffer[BCM2048_MAX_RDS_RT+1];
+
+ mutex_lock(&bdev->mutex);
+
+ if (!bdev->rds_info.text_len) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ for (i = 0; i < BCM2048_MAX_RDS_RT; i++) {
+ if (bdev->rds_info.rds_rt[i]) {
+ ce = i;
+ /* Skip the carriage return */
+ if (bdev->rds_info.rds_rt[i] != 0x0d) {
+ data_buffer[j++] = bdev->rds_info.rds_rt[i];
+ } else {
+ cr = i;
+ break;
+ }
+ }
+ }
+
+ if (j <= BCM2048_MAX_RDS_RT)
+ data_buffer[j] = 0;
+
+ for (i = 0; i < BCM2048_MAX_RDS_RT; i++) {
+ if (!bdev->rds_info.rds_rt[i]) {
+ if (cr && (i < cr)) {
+ err = -EBUSY;
+ goto unlock;
+ }
+ if (i < ce) {
+ if (cr && (i >= cr))
+ break;
+ err = -EBUSY;
+ goto unlock;
+ }
+ }
+ }
+
+ memcpy(data, data_buffer, sizeof(data_buffer));
+
+unlock:
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_rds_ps(struct bcm2048_device *bdev, char *data)
+{
+ int err = 0, i, j = 0;
+ char data_buffer[BCM2048_MAX_RDS_PS+1];
+
+ mutex_lock(&bdev->mutex);
+
+ if (!bdev->rds_info.text_len) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ for (i = 0; i < BCM2048_MAX_RDS_PS; i++) {
+ if (bdev->rds_info.rds_ps[i]) {
+ data_buffer[j++] = bdev->rds_info.rds_ps[i];
+ } else {
+ if (i < (BCM2048_MAX_RDS_PS - 1)) {
+ err = -EBUSY;
+ goto unlock;
+ }
+ }
+ }
+
+ if (j <= BCM2048_MAX_RDS_PS)
+ data_buffer[j] = 0;
+
+ memcpy(data, data_buffer, sizeof(data_buffer));
+
+unlock:
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static void bcm2048_parse_rds_pi(struct bcm2048_device *bdev)
+{
+ int i, cnt = 0;
+ u16 pi;
+
+ for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) {
+
+ /* Block A match, only data without crc errors taken */
+ if (bdev->rds_info.radio_text[i] == BCM2048_RDS_BLOCK_A) {
+
+ pi = ((bdev->rds_info.radio_text[i+1] << 8) +
+ bdev->rds_info.radio_text[i+2]);
+
+ if (!bdev->rds_info.rds_pi) {
+ bdev->rds_info.rds_pi = pi;
+ return;
+ }
+ if (pi != bdev->rds_info.rds_pi) {
+ cnt++;
+ if (cnt > 3) {
+ bdev->rds_info.rds_pi = pi;
+ cnt = 0;
+ }
+ } else {
+ cnt = 0;
+ }
+ }
+ }
+}
+
+static int bcm2048_rds_block_crc(struct bcm2048_device *bdev, int i)
+{
+ return bdev->rds_info.radio_text[i] & BCM2048_RDS_CRC_MASK;
+}
+
+static void bcm2048_parse_rds_rt_block(struct bcm2048_device *bdev, int i,
+ int index, int crc)
+{
+ /* Good data will overwrite poor data */
+ if (crc) {
+ if (!bdev->rds_info.rds_rt[index])
+ bdev->rds_info.rds_rt[index] =
+ bdev->rds_info.radio_text[i+1];
+ if (!bdev->rds_info.rds_rt[index+1])
+ bdev->rds_info.rds_rt[index+1] =
+ bdev->rds_info.radio_text[i+2];
+ } else {
+ bdev->rds_info.rds_rt[index] = bdev->rds_info.radio_text[i+1];
+ bdev->rds_info.rds_rt[index+1] =
+ bdev->rds_info.radio_text[i+2];
+ }
+}
+
+static int bcm2048_parse_rt_match_b(struct bcm2048_device *bdev, int i)
+{
+ int crc, rt_id, rt_group_b, rt_ab, index = 0;
+
+ crc = bcm2048_rds_block_crc(bdev, i);
+
+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+ return -EIO;
+
+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
+ BCM2048_RDS_BLOCK_B) {
+
+ rt_id = (bdev->rds_info.radio_text[i+1] &
+ BCM2048_RDS_BLOCK_MASK);
+ rt_group_b = bdev->rds_info.radio_text[i+1] &
+ BCM2048_RDS_GROUP_AB_MASK;
+ rt_ab = bdev->rds_info.radio_text[i+2] &
+ BCM2048_RDS_RT_AB_MASK;
+
+ if (rt_group_b != bdev->rds_info.rds_rt_group_b) {
+ memset(bdev->rds_info.rds_rt, 0,
+ sizeof(bdev->rds_info.rds_rt));
+ bdev->rds_info.rds_rt_group_b = rt_group_b;
+ }
+
+ if (rt_id == BCM2048_RDS_RT) {
+ /* A to B or (vice versa), means: clear screen */
+ if (rt_ab != bdev->rds_info.rds_rt_ab) {
+ memset(bdev->rds_info.rds_rt, 0,
+ sizeof(bdev->rds_info.rds_rt));
+ bdev->rds_info.rds_rt_ab = rt_ab;
+ }
+
+ index = bdev->rds_info.radio_text[i+2] &
+ BCM2048_RDS_RT_INDEX;
+
+ if (bdev->rds_info.rds_rt_group_b)
+ index <<= 1;
+ else
+ index <<= 2;
+
+ return index;
+ }
+ }
+
+ return -EIO;
+}
+
+static int bcm2048_parse_rt_match_c(struct bcm2048_device *bdev, int i,
+ int index)
+{
+ int crc;
+
+ crc = bcm2048_rds_block_crc(bdev, i);
+
+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+ return 0;
+
+ BUG_ON((index+2) >= BCM2048_MAX_RDS_RT);
+
+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
+ BCM2048_RDS_BLOCK_C) {
+ if (bdev->rds_info.rds_rt_group_b)
+ return 1;
+ bcm2048_parse_rds_rt_block(bdev, i, index, crc);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void bcm2048_parse_rt_match_d(struct bcm2048_device *bdev, int i,
+ int index)
+{
+ int crc;
+
+ crc = bcm2048_rds_block_crc(bdev, i);
+
+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+ return;
+
+ BUG_ON((index+4) >= BCM2048_MAX_RDS_RT);
+
+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
+ BCM2048_RDS_BLOCK_D)
+ bcm2048_parse_rds_rt_block(bdev, i, index+2, crc);
+}
+
+static int bcm2048_parse_rds_rt(struct bcm2048_device *bdev)
+{
+ int i, index = 0, crc, match_b = 0, match_c = 0, match_d = 0;
+
+ for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) {
+
+ if (match_b) {
+ match_b = 0;
+ index = bcm2048_parse_rt_match_b(bdev, i);
+ if (index >= 0 && index <= (BCM2048_MAX_RDS_RT - 5))
+ match_c = 1;
+ continue;
+ } else if (match_c) {
+ match_c = 0;
+ if (bcm2048_parse_rt_match_c(bdev, i, index))
+ match_d = 1;
+ continue;
+ } else if (match_d) {
+ match_d = 0;
+ bcm2048_parse_rt_match_d(bdev, i, index);
+ continue;
+ }
+
+ /* Skip erroneous blocks due to messed up A block altogether */
+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK)
+ == BCM2048_RDS_BLOCK_A) {
+ crc = bcm2048_rds_block_crc(bdev, i);
+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+ continue;
+ /* Syncronize to a good RDS PI */
+ if (((bdev->rds_info.radio_text[i+1] << 8) +
+ bdev->rds_info.radio_text[i+2]) ==
+ bdev->rds_info.rds_pi)
+ match_b = 1;
+ }
+ }
+
+ return 0;
+}
+
+static void bcm2048_parse_rds_ps_block(struct bcm2048_device *bdev, int i,
+ int index, int crc)
+{
+ /* Good data will overwrite poor data */
+ if (crc) {
+ if (!bdev->rds_info.rds_ps[index])
+ bdev->rds_info.rds_ps[index] =
+ bdev->rds_info.radio_text[i+1];
+ if (!bdev->rds_info.rds_ps[index+1])
+ bdev->rds_info.rds_ps[index+1] =
+ bdev->rds_info.radio_text[i+2];
+ } else {
+ bdev->rds_info.rds_ps[index] = bdev->rds_info.radio_text[i+1];
+ bdev->rds_info.rds_ps[index+1] =
+ bdev->rds_info.radio_text[i+2];
+ }
+}
+
+static int bcm2048_parse_ps_match_c(struct bcm2048_device *bdev, int i,
+ int index)
+{
+ int crc;
+
+ crc = bcm2048_rds_block_crc(bdev, i);
+
+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+ return 0;
+
+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
+ BCM2048_RDS_BLOCK_C)
+ return 1;
+
+ return 0;
+}
+
+static void bcm2048_parse_ps_match_d(struct bcm2048_device *bdev, int i,
+ int index)
+{
+ int crc;
+
+ crc = bcm2048_rds_block_crc(bdev, i);
+
+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+ return;
+
+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
+ BCM2048_RDS_BLOCK_D)
+ bcm2048_parse_rds_ps_block(bdev, i, index, crc);
+}
+
+static int bcm2048_parse_ps_match_b(struct bcm2048_device *bdev, int i)
+{
+ int crc, index, ps_id, ps_group;
+
+ crc = bcm2048_rds_block_crc(bdev, i);
+
+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+ return -EIO;
+
+ /* Block B Radio PS match */
+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
+ BCM2048_RDS_BLOCK_B) {
+ ps_id = bdev->rds_info.radio_text[i+1] &
+ BCM2048_RDS_BLOCK_MASK;
+ ps_group = bdev->rds_info.radio_text[i+1] &
+ BCM2048_RDS_GROUP_AB_MASK;
+
+ /*
+ * Poor RSSI will lead to RDS data corruption
+ * So using 3 (same) sequential values to justify major changes
+ */
+ if (ps_group != bdev->rds_info.rds_ps_group) {
+ if (crc == BCM2048_RDS_CRC_NONE) {
+ bdev->rds_info.rds_ps_group_cnt++;
+ if (bdev->rds_info.rds_ps_group_cnt > 2) {
+ bdev->rds_info.rds_ps_group = ps_group;
+ bdev->rds_info.rds_ps_group_cnt = 0;
+ dev_err(&bdev->client->dev,
+ "RDS PS Group change!\n");
+ } else {
+ return -EIO;
+ }
+ } else {
+ bdev->rds_info.rds_ps_group_cnt = 0;
+ }
+ }
+
+ if (ps_id == BCM2048_RDS_PS) {
+ index = bdev->rds_info.radio_text[i+2] &
+ BCM2048_RDS_PS_INDEX;
+ index <<= 1;
+ return index;
+ }
+ }
+
+ return -EIO;
+}
+
+static void bcm2048_parse_rds_ps(struct bcm2048_device *bdev)
+{
+ int i, index = 0, crc, match_b = 0, match_c = 0, match_d = 0;
+
+ for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) {
+
+ if (match_b) {
+ match_b = 0;
+ index = bcm2048_parse_ps_match_b(bdev, i);
+ if (index >= 0 && index < (BCM2048_MAX_RDS_PS - 1))
+ match_c = 1;
+ continue;
+ } else if (match_c) {
+ match_c = 0;
+ if (bcm2048_parse_ps_match_c(bdev, i, index))
+ match_d = 1;
+ continue;
+ } else if (match_d) {
+ match_d = 0;
+ bcm2048_parse_ps_match_d(bdev, i, index);
+ continue;
+ }
+
+ /* Skip erroneous blocks due to messed up A block altogether */
+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK)
+ == BCM2048_RDS_BLOCK_A) {
+ crc = bcm2048_rds_block_crc(bdev, i);
+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+ continue;
+ /* Syncronize to a good RDS PI */
+ if (((bdev->rds_info.radio_text[i+1] << 8) +
+ bdev->rds_info.radio_text[i+2]) ==
+ bdev->rds_info.rds_pi)
+ match_b = 1;
+ }
+ }
+}
+
+static void bcm2048_rds_fifo_receive(struct bcm2048_device *bdev)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_duples(bdev, BCM2048_I2C_RDS_DATA,
+ bdev->rds_info.radio_text, bdev->fifo_size);
+ if (err != 2) {
+ dev_err(&bdev->client->dev, "RDS Read problem\n");
+ mutex_unlock(&bdev->mutex);
+ return;
+ }
+
+ bdev->rds_info.text_len = bdev->fifo_size;
+
+ bcm2048_parse_rds_pi(bdev);
+ bcm2048_parse_rds_rt(bdev);
+ bcm2048_parse_rds_ps(bdev);
+
+ mutex_unlock(&bdev->mutex);
+
+ wake_up_interruptible(&bdev->read_queue);
+}
+
+static int bcm2048_get_rds_data(struct bcm2048_device *bdev, char *data)
+{
+ int err = 0, i, p = 0;
+ char *data_buffer;
+
+ mutex_lock(&bdev->mutex);
+
+ if (!bdev->rds_info.text_len) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ data_buffer = kzalloc(BCM2048_MAX_RDS_RADIO_TEXT*5, GFP_KERNEL);
+ if (!data_buffer) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ for (i = 0; i < bdev->rds_info.text_len; i++) {
+ p += sprintf(data_buffer+p, "%x ",
+ bdev->rds_info.radio_text[i]);
+ }
+
+ memcpy(data, data_buffer, p);
+ kfree(data_buffer);
+
+unlock:
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+/*
+ * BCM2048 default initialization sequence
+ */
+static int bcm2048_init(struct bcm2048_device *bdev)
+{
+ int err;
+
+ err = bcm2048_set_power_state(bdev, BCM2048_POWER_ON);
+ if (err < 0)
+ goto exit;
+
+ err = bcm2048_set_audio_route(bdev, BCM2048_AUDIO_ROUTE_DAC);
+ if (err < 0)
+ goto exit;
+
+ err = bcm2048_set_dac_output(bdev, BCM2048_DAC_OUTPUT_LEFT |
+ BCM2048_DAC_OUTPUT_RIGHT);
+
+exit:
+ return err;
+}
+
+/*
+ * BCM2048 default deinitialization sequence
+ */
+static int bcm2048_deinit(struct bcm2048_device *bdev)
+{
+ int err;
+
+ err = bcm2048_set_audio_route(bdev, 0);
+ if (err < 0)
+ goto exit;
+
+ err = bcm2048_set_dac_output(bdev, 0);
+ if (err < 0)
+ goto exit;
+
+ err = bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
+ if (err < 0)
+ goto exit;
+
+exit:
+ return err;
+}
+
+/*
+ * BCM2048 probe sequence
+ */
+static int bcm2048_probe(struct bcm2048_device *bdev)
+{
+ int err;
+
+ err = bcm2048_set_power_state(bdev, BCM2048_POWER_ON);
+ if (err < 0)
+ goto unlock;
+
+ err = bcm2048_checkrev(bdev);
+ if (err < 0)
+ goto unlock;
+
+ err = bcm2048_set_mute(bdev, BCM2048_DEFAULT_MUTE);
+ if (err < 0)
+ goto unlock;
+
+ err = bcm2048_set_region(bdev, BCM2048_DEFAULT_REGION);
+ if (err < 0)
+ goto unlock;
+
+ err = bcm2048_set_fm_search_rssi_threshold(bdev,
+ BCM2048_DEFAULT_RSSI_THRESHOLD);
+ if (err < 0)
+ goto unlock;
+
+ err = bcm2048_set_fm_automatic_stereo_mono(bdev, BCM2048_ITEM_ENABLED);
+ if (err < 0)
+ goto unlock;
+
+ err = bcm2048_get_rds_wline(bdev);
+ if (err < BCM2048_DEFAULT_RDS_WLINE)
+ err = bcm2048_set_rds_wline(bdev, BCM2048_DEFAULT_RDS_WLINE);
+ if (err < 0)
+ goto unlock;
+
+ err = bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
+
+ init_waitqueue_head(&bdev->read_queue);
+ bdev->rds_data_available = 0;
+ bdev->rd_index = 0;
+ bdev->users = 0;
+
+unlock:
+ return err;
+}
+
+/*
+ * BCM2048 workqueue handler
+ */
+static void bcm2048_work(struct work_struct *work)
+{
+ struct bcm2048_device *bdev;
+ u8 flag_lsb, flag_msb, flags;
+
+ bdev = container_of(work, struct bcm2048_device, work);
+ bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG0, &flag_lsb);
+ bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG1, &flag_msb);
+
+ if (flag_lsb & (BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED |
+ BCM2048_FM_FLAG_SEARCH_TUNE_FAIL)) {
+
+ if (flag_lsb & BCM2048_FM_FLAG_SEARCH_TUNE_FAIL)
+ bdev->scan_state = BCM2048_SCAN_FAIL;
+ else
+ bdev->scan_state = BCM2048_SCAN_OK;
+
+ complete(&bdev->compl);
+ }
+
+ if (flag_msb & BCM2048_RDS_FLAG_FIFO_WLINE) {
+ bcm2048_rds_fifo_receive(bdev);
+ if (bdev->rds_state) {
+ flags = BCM2048_RDS_FLAG_FIFO_WLINE;
+ bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1,
+ flags);
+ }
+ bdev->rds_data_available = 1;
+ bdev->rd_index = 0; /* new data, new start */
+ }
+}
+
+/*
+ * BCM2048 interrupt handler
+ */
+static irqreturn_t bcm2048_handler(int irq, void *dev)
+{
+ struct bcm2048_device *bdev = dev;
+
+ dev_dbg(&bdev->client->dev, "IRQ called, queuing work\n");
+ if (bdev->power_state)
+ schedule_work(&bdev->work);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * BCM2048 sysfs interface definitions
+ */
+#define property_write(prop, type, mask, check) \
+static ssize_t bcm2048_##prop##_write(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+{ \
+ struct bcm2048_device *bdev = dev_get_drvdata(dev); \
+ type value; \
+ int err; \
+ \
+ if (!bdev) \
+ return -ENODEV; \
+ \
+ sscanf(buf, mask, &value); \
+ \
+ if (check) \
+ return -EDOM; \
+ \
+ err = bcm2048_set_##prop(bdev, value); \
+ \
+ return err < 0 ? err : count; \
+}
+
+#define property_read(prop, size, mask) \
+static ssize_t bcm2048_##prop##_read(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct bcm2048_device *bdev = dev_get_drvdata(dev); \
+ int value; \
+ \
+ if (!bdev) \
+ return -ENODEV; \
+ \
+ value = bcm2048_get_##prop(bdev); \
+ \
+ if (value >= 0) \
+ value = sprintf(buf, mask "\n", value); \
+ \
+ return value; \
+}
+
+#define property_signed_read(prop, size, mask) \
+static ssize_t bcm2048_##prop##_read(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct bcm2048_device *bdev = dev_get_drvdata(dev); \
+ size value; \
+ \
+ if (!bdev) \
+ return -ENODEV; \
+ \
+ value = bcm2048_get_##prop(bdev); \
+ \
+ value = sprintf(buf, mask "\n", value); \
+ \
+ return value; \
+}
+
+#define DEFINE_SYSFS_PROPERTY(prop, signal, size, mask, check) \
+property_write(prop, signal size, mask, check) \
+property_read(prop, size, mask)
+
+#define property_str_read(prop, size) \
+static ssize_t bcm2048_##prop##_read(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct bcm2048_device *bdev = dev_get_drvdata(dev); \
+ int count; \
+ u8 *out; \
+ \
+ if (!bdev) \
+ return -ENODEV; \
+ \
+ out = kzalloc(size + 1, GFP_KERNEL); \
+ if (!out) \
+ return -ENOMEM; \
+ \
+ bcm2048_get_##prop(bdev, out); \
+ count = sprintf(buf, "%s\n", out); \
+ \
+ kfree(out); \
+ \
+ return count; \
+}
+
+DEFINE_SYSFS_PROPERTY(power_state, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(mute, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(audio_route, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(dac_output, unsigned, int, "%u", 0)
+
+DEFINE_SYSFS_PROPERTY(fm_hi_lo_injection, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_frequency, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_af_frequency, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_deemphasis, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_rds_mask, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_best_tune_mode, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_search_rssi_threshold, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_search_mode_direction, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_search_tune_mode, unsigned, int, "%u", value > 3)
+
+DEFINE_SYSFS_PROPERTY(rds, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_b_block_mask, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_b_block_match, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_pi_mask, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_pi_match, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_wline, unsigned, int, "%u", 0)
+property_read(rds_pi, unsigned int, "%x")
+property_str_read(rds_rt, (BCM2048_MAX_RDS_RT + 1))
+property_str_read(rds_ps, (BCM2048_MAX_RDS_PS + 1))
+
+property_read(fm_rds_flags, unsigned int, "%u")
+property_str_read(rds_data, BCM2048_MAX_RDS_RADIO_TEXT*5)
+
+property_read(region_bottom_frequency, unsigned int, "%u")
+property_read(region_top_frequency, unsigned int, "%u")
+property_signed_read(fm_carrier_error, int, "%d")
+property_signed_read(fm_rssi, int, "%d")
+DEFINE_SYSFS_PROPERTY(region, unsigned, int, "%u", 0)
+
+static struct device_attribute attrs[] = {
+ __ATTR(power_state, S_IRUGO | S_IWUSR, bcm2048_power_state_read,
+ bcm2048_power_state_write),
+ __ATTR(mute, S_IRUGO | S_IWUSR, bcm2048_mute_read,
+ bcm2048_mute_write),
+ __ATTR(audio_route, S_IRUGO | S_IWUSR, bcm2048_audio_route_read,
+ bcm2048_audio_route_write),
+ __ATTR(dac_output, S_IRUGO | S_IWUSR, bcm2048_dac_output_read,
+ bcm2048_dac_output_write),
+ __ATTR(fm_hi_lo_injection, S_IRUGO | S_IWUSR,
+ bcm2048_fm_hi_lo_injection_read,
+ bcm2048_fm_hi_lo_injection_write),
+ __ATTR(fm_frequency, S_IRUGO | S_IWUSR, bcm2048_fm_frequency_read,
+ bcm2048_fm_frequency_write),
+ __ATTR(fm_af_frequency, S_IRUGO | S_IWUSR,
+ bcm2048_fm_af_frequency_read,
+ bcm2048_fm_af_frequency_write),
+ __ATTR(fm_deemphasis, S_IRUGO | S_IWUSR, bcm2048_fm_deemphasis_read,
+ bcm2048_fm_deemphasis_write),
+ __ATTR(fm_rds_mask, S_IRUGO | S_IWUSR, bcm2048_fm_rds_mask_read,
+ bcm2048_fm_rds_mask_write),
+ __ATTR(fm_best_tune_mode, S_IRUGO | S_IWUSR,
+ bcm2048_fm_best_tune_mode_read,
+ bcm2048_fm_best_tune_mode_write),
+ __ATTR(fm_search_rssi_threshold, S_IRUGO | S_IWUSR,
+ bcm2048_fm_search_rssi_threshold_read,
+ bcm2048_fm_search_rssi_threshold_write),
+ __ATTR(fm_search_mode_direction, S_IRUGO | S_IWUSR,
+ bcm2048_fm_search_mode_direction_read,
+ bcm2048_fm_search_mode_direction_write),
+ __ATTR(fm_search_tune_mode, S_IRUGO | S_IWUSR,
+ bcm2048_fm_search_tune_mode_read,
+ bcm2048_fm_search_tune_mode_write),
+ __ATTR(rds, S_IRUGO | S_IWUSR, bcm2048_rds_read,
+ bcm2048_rds_write),
+ __ATTR(rds_b_block_mask, S_IRUGO | S_IWUSR,
+ bcm2048_rds_b_block_mask_read,
+ bcm2048_rds_b_block_mask_write),
+ __ATTR(rds_b_block_match, S_IRUGO | S_IWUSR,
+ bcm2048_rds_b_block_match_read,
+ bcm2048_rds_b_block_match_write),
+ __ATTR(rds_pi_mask, S_IRUGO | S_IWUSR, bcm2048_rds_pi_mask_read,
+ bcm2048_rds_pi_mask_write),
+ __ATTR(rds_pi_match, S_IRUGO | S_IWUSR, bcm2048_rds_pi_match_read,
+ bcm2048_rds_pi_match_write),
+ __ATTR(rds_wline, S_IRUGO | S_IWUSR, bcm2048_rds_wline_read,
+ bcm2048_rds_wline_write),
+ __ATTR(rds_pi, S_IRUGO, bcm2048_rds_pi_read, NULL),
+ __ATTR(rds_rt, S_IRUGO, bcm2048_rds_rt_read, NULL),
+ __ATTR(rds_ps, S_IRUGO, bcm2048_rds_ps_read, NULL),
+ __ATTR(fm_rds_flags, S_IRUGO, bcm2048_fm_rds_flags_read, NULL),
+ __ATTR(region_bottom_frequency, S_IRUGO,
+ bcm2048_region_bottom_frequency_read, NULL),
+ __ATTR(region_top_frequency, S_IRUGO,
+ bcm2048_region_top_frequency_read, NULL),
+ __ATTR(fm_carrier_error, S_IRUGO,
+ bcm2048_fm_carrier_error_read, NULL),
+ __ATTR(fm_rssi, S_IRUGO,
+ bcm2048_fm_rssi_read, NULL),
+ __ATTR(region, S_IRUGO | S_IWUSR, bcm2048_region_read,
+ bcm2048_region_write),
+ __ATTR(rds_data, S_IRUGO, bcm2048_rds_data_read, NULL),
+};
+
+static int bcm2048_sysfs_unregister_properties(struct bcm2048_device *bdev,
+ int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ device_remove_file(&bdev->client->dev, &attrs[i]);
+
+ return 0;
+}
+
+static int bcm2048_sysfs_register_properties(struct bcm2048_device *bdev)
+{
+ int err = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(attrs); i++) {
+ if (device_create_file(&bdev->client->dev, &attrs[i]) != 0) {
+ dev_err(&bdev->client->dev,
+ "could not register sysfs entry\n");
+ err = -EBUSY;
+ bcm2048_sysfs_unregister_properties(bdev, i);
+ break;
+ }
+ }
+
+ return err;
+}
+
+
+static int bcm2048_fops_open(struct file *file)
+{
+ struct bcm2048_device *bdev = video_drvdata(file);
+
+ bdev->users++;
+ bdev->rd_index = 0;
+ bdev->rds_data_available = 0;
+
+ return 0;
+}
+
+static int bcm2048_fops_release(struct file *file)
+{
+ struct bcm2048_device *bdev = video_drvdata(file);
+
+ bdev->users--;
+
+ return 0;
+}
+
+static unsigned int bcm2048_fops_poll(struct file *file,
+ struct poll_table_struct *pts)
+{
+ struct bcm2048_device *bdev = video_drvdata(file);
+ int retval = 0;
+
+ poll_wait(file, &bdev->read_queue, pts);
+
+ if (bdev->rds_data_available)
+ retval = POLLIN | POLLRDNORM;
+
+ return retval;
+}
+
+static ssize_t bcm2048_fops_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct bcm2048_device *bdev = video_drvdata(file);
+ int i;
+ int retval = 0;
+
+ /* we return at least 3 bytes, one block */
+ count = (count / 3) * 3; /* only multiples of 3 */
+ if (count < 3)
+ return -ENOBUFS;
+
+ while (!bdev->rds_data_available) {
+ if (file->f_flags & O_NONBLOCK) {
+ retval = -EWOULDBLOCK;
+ goto done;
+ }
+ /* interruptible_sleep_on(&bdev->read_queue); */
+ if (wait_event_interruptible(bdev->read_queue,
+ bdev->rds_data_available) < 0) {
+ retval = -EINTR;
+ goto done;
+ }
+ }
+
+ mutex_lock(&bdev->mutex);
+ /* copy data to userspace */
+ i = bdev->fifo_size - bdev->rd_index;
+ if (count > i)
+ count = (i / 3) * 3;
+
+ i = 0;
+ while (i < count) {
+ unsigned char tmpbuf[3];
+ tmpbuf[i] = bdev->rds_info.radio_text[bdev->rd_index+i+2];
+ tmpbuf[i+1] = bdev->rds_info.radio_text[bdev->rd_index+i+1];
+ tmpbuf[i+2] = ((bdev->rds_info.radio_text[bdev->rd_index+i]
+ & 0xf0) >> 4);
+ if ((bdev->rds_info.radio_text[bdev->rd_index+i] &
+ BCM2048_RDS_CRC_MASK) == BCM2048_RDS_CRC_UNRECOVARABLE)
+ tmpbuf[i+2] |= 0x80;
+ if (copy_to_user(buf+i, tmpbuf, 3)) {
+ retval = -EFAULT;
+ break;
+ }
+ i += 3;
+ }
+
+ bdev->rd_index += i;
+ if (bdev->rd_index >= bdev->fifo_size)
+ bdev->rds_data_available = 0;
+
+ mutex_unlock(&bdev->mutex);
+ if (retval == 0)
+ retval = i;
+
+done:
+ return retval;
+}
+
+/*
+ * bcm2048_fops - file operations interface
+ */
+static const struct v4l2_file_operations bcm2048_fops = {
+ .owner = THIS_MODULE,
+ .ioctl = video_ioctl2,
+ /* for RDS read support */
+ .open = bcm2048_fops_open,
+ .release = bcm2048_fops_release,
+ .read = bcm2048_fops_read,
+ .poll = bcm2048_fops_poll
+};
+
+/*
+ * Video4Linux Interface
+ */
+static struct v4l2_queryctrl bcm2048_v4l2_queryctrl[] = {
+ {
+ .id = V4L2_CID_AUDIO_VOLUME,
+ .flags = V4L2_CTRL_FLAG_DISABLED,
+ },
+ {
+ .id = V4L2_CID_AUDIO_BALANCE,
+ .flags = V4L2_CTRL_FLAG_DISABLED,
+ },
+ {
+ .id = V4L2_CID_AUDIO_BASS,
+ .flags = V4L2_CTRL_FLAG_DISABLED,
+ },
+ {
+ .id = V4L2_CID_AUDIO_TREBLE,
+ .flags = V4L2_CTRL_FLAG_DISABLED,
+ },
+ {
+ .id = V4L2_CID_AUDIO_MUTE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Mute",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_AUDIO_LOUDNESS,
+ .flags = V4L2_CTRL_FLAG_DISABLED,
+ },
+};
+
+static int bcm2048_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *capability)
+{
+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+
+ strlcpy(capability->driver, BCM2048_DRIVER_NAME,
+ sizeof(capability->driver));
+ strlcpy(capability->card, BCM2048_DRIVER_CARD,
+ sizeof(capability->card));
+ snprintf(capability->bus_info, 32, "I2C: 0x%X", bdev->client->addr);
+ capability->version = BCM2048_DRIVER_VERSION;
+ capability->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
+ V4L2_CAP_HW_FREQ_SEEK;
+
+ return 0;
+}
+
+static int bcm2048_vidioc_g_input(struct file *filp, void *priv,
+ unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int bcm2048_vidioc_s_input(struct file *filp, void *priv,
+ unsigned int i)
+{
+ if (i)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int bcm2048_vidioc_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bcm2048_v4l2_queryctrl); i++) {
+ if (qc->id && qc->id == bcm2048_v4l2_queryctrl[i].id) {
+ *qc = bcm2048_v4l2_queryctrl[i];
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int bcm2048_vidioc_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+ int err = 0;
+
+ if (!bdev)
+ return -ENODEV;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ err = bcm2048_get_mute(bdev);
+ if (err >= 0)
+ ctrl->value = err;
+ break;
+ }
+
+ return err;
+}
+
+static int bcm2048_vidioc_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+ int err = 0;
+
+ if (!bdev)
+ return -ENODEV;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ if (ctrl->value) {
+ if (bdev->power_state) {
+ err = bcm2048_set_mute(bdev, ctrl->value);
+ err |= bcm2048_deinit(bdev);
+ }
+ } else {
+ if (!bdev->power_state) {
+ err = bcm2048_init(bdev);
+ err |= bcm2048_set_mute(bdev, ctrl->value);
+ }
+ }
+ break;
+ }
+
+ return err;
+}
+
+static int bcm2048_vidioc_g_audio(struct file *file, void *priv,
+ struct v4l2_audio *audio)
+{
+ if (audio->index > 1)
+ return -EINVAL;
+
+ strncpy(audio->name, "Radio", 32);
+ audio->capability = V4L2_AUDCAP_STEREO;
+
+ return 0;
+}
+
+static int bcm2048_vidioc_s_audio(struct file *file, void *priv,
+ const struct v4l2_audio *audio)
+{
+ if (audio->index != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int bcm2048_vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *tuner)
+{
+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+ s8 f_error;
+ s8 rssi;
+
+ if (!bdev)
+ return -ENODEV;
+
+ if (tuner->index > 0)
+ return -EINVAL;
+
+ strncpy(tuner->name, "FM Receiver", 32);
+ tuner->type = V4L2_TUNER_RADIO;
+ tuner->rangelow =
+ dev_to_v4l2(bcm2048_get_region_bottom_frequency(bdev));
+ tuner->rangehigh =
+ dev_to_v4l2(bcm2048_get_region_top_frequency(bdev));
+ tuner->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ tuner->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW;
+ tuner->audmode = V4L2_TUNER_MODE_STEREO;
+ tuner->afc = 0;
+ if (bdev->power_state) {
+ /*
+ * Report frequencies with high carrier errors to have zero
+ * signal level
+ */
+ f_error = bcm2048_get_fm_carrier_error(bdev);
+ if (f_error < BCM2048_FREQ_ERROR_FLOOR ||
+ f_error > BCM2048_FREQ_ERROR_ROOF) {
+ tuner->signal = 0;
+ } else {
+ /*
+ * RSSI level -60 dB is defined to report full
+ * signal strenght
+ */
+ rssi = bcm2048_get_fm_rssi(bdev);
+ if (rssi >= BCM2048_RSSI_LEVEL_BASE) {
+ tuner->signal = 0xFFFF;
+ } else if (rssi > BCM2048_RSSI_LEVEL_ROOF) {
+ tuner->signal = (rssi +
+ BCM2048_RSSI_LEVEL_ROOF_NEG)
+ * BCM2048_SIGNAL_MULTIPLIER;
+ } else {
+ tuner->signal = 0;
+ }
+ }
+ } else {
+ tuner->signal = 0;
+ }
+
+ return 0;
+}
+
+static int bcm2048_vidioc_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *tuner)
+{
+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+
+ if (!bdev)
+ return -ENODEV;
+
+ if (tuner->index > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int bcm2048_vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *freq)
+{
+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+ int err = 0;
+ int f;
+
+ if (!bdev->power_state)
+ return -ENODEV;
+
+ freq->type = V4L2_TUNER_RADIO;
+ f = bcm2048_get_fm_frequency(bdev);
+
+ if (f < 0)
+ err = f;
+ else
+ freq->frequency = dev_to_v4l2(f);
+
+ return err;
+}
+
+static int bcm2048_vidioc_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *freq)
+{
+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+ int err;
+
+ if (freq->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
+
+ if (!bdev->power_state)
+ return -ENODEV;
+
+ err = bcm2048_set_fm_frequency(bdev, v4l2_to_dev(freq->frequency));
+ err |= bcm2048_set_fm_search_tune_mode(bdev, BCM2048_FM_PRE_SET_MODE);
+
+ return err;
+}
+
+static int bcm2048_vidioc_s_hw_freq_seek(struct file *file, void *priv,
+ const struct v4l2_hw_freq_seek *seek)
+{
+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+ int err;
+
+ if (!bdev->power_state)
+ return -ENODEV;
+
+ if ((seek->tuner != 0) || (seek->type != V4L2_TUNER_RADIO))
+ return -EINVAL;
+
+ err = bcm2048_set_fm_search_mode_direction(bdev, seek->seek_upward);
+ err |= bcm2048_set_fm_search_tune_mode(bdev,
+ BCM2048_FM_AUTO_SEARCH_MODE);
+
+ return err;
+}
+
+static struct v4l2_ioctl_ops bcm2048_ioctl_ops = {
+ .vidioc_querycap = bcm2048_vidioc_querycap,
+ .vidioc_g_input = bcm2048_vidioc_g_input,
+ .vidioc_s_input = bcm2048_vidioc_s_input,
+ .vidioc_queryctrl = bcm2048_vidioc_queryctrl,
+ .vidioc_g_ctrl = bcm2048_vidioc_g_ctrl,
+ .vidioc_s_ctrl = bcm2048_vidioc_s_ctrl,
+ .vidioc_g_audio = bcm2048_vidioc_g_audio,
+ .vidioc_s_audio = bcm2048_vidioc_s_audio,
+ .vidioc_g_tuner = bcm2048_vidioc_g_tuner,
+ .vidioc_s_tuner = bcm2048_vidioc_s_tuner,
+ .vidioc_g_frequency = bcm2048_vidioc_g_frequency,
+ .vidioc_s_frequency = bcm2048_vidioc_s_frequency,
+ .vidioc_s_hw_freq_seek = bcm2048_vidioc_s_hw_freq_seek,
+};
+
+/*
+ * bcm2048_viddev_template - video device interface
+ */
+static struct video_device bcm2048_viddev_template = {
+ .fops = &bcm2048_fops,
+ .name = BCM2048_DRIVER_NAME,
+ .release = video_device_release,
+ .ioctl_ops = &bcm2048_ioctl_ops,
+};
+
+/*
+ * I2C driver interface
+ */
+static int bcm2048_i2c_driver_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct bcm2048_device *bdev;
+ int err, skip_release = 0;
+
+ bdev = kzalloc(sizeof(*bdev), GFP_KERNEL);
+ if (!bdev) {
+ dev_dbg(&client->dev, "Failed to alloc video device.\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ bdev->videodev = video_device_alloc();
+ if (!bdev->videodev) {
+ dev_dbg(&client->dev, "Failed to alloc video device.\n");
+ err = -ENOMEM;
+ goto free_bdev;
+ }
+
+ bdev->client = client;
+ i2c_set_clientdata(client, bdev);
+ mutex_init(&bdev->mutex);
+ init_completion(&bdev->compl);
+ INIT_WORK(&bdev->work, bcm2048_work);
+
+ if (client->irq) {
+ err = request_irq(client->irq,
+ bcm2048_handler, IRQF_TRIGGER_FALLING | IRQF_DISABLED,
+ client->name, bdev);
+ if (err < 0) {
+ dev_err(&client->dev, "Could not request IRQ\n");
+ goto free_vdev;
+ }
+ dev_dbg(&client->dev, "IRQ requested.\n");
+ } else {
+ dev_dbg(&client->dev, "IRQ not configured. Using timeouts.\n");
+ }
+
+ *bdev->videodev = bcm2048_viddev_template;
+ video_set_drvdata(bdev->videodev, bdev);
+ if (video_register_device(bdev->videodev, VFL_TYPE_RADIO, radio_nr)) {
+ dev_dbg(&client->dev, "Could not register video device.\n");
+ err = -EIO;
+ goto free_irq;
+ }
+
+ err = bcm2048_sysfs_register_properties(bdev);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Could not register sysfs interface.\n");
+ goto free_registration;
+ }
+
+ err = bcm2048_probe(bdev);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Failed to probe device information.\n");
+ goto free_sysfs;
+ }
+
+ return 0;
+
+free_sysfs:
+ bcm2048_sysfs_unregister_properties(bdev, ARRAY_SIZE(attrs));
+free_registration:
+ video_unregister_device(bdev->videodev);
+ /* video_unregister_device frees bdev->videodev */
+ bdev->videodev = NULL;
+ skip_release = 1;
+free_irq:
+ if (client->irq)
+ free_irq(client->irq, bdev);
+free_vdev:
+ if (!skip_release)
+ video_device_release(bdev->videodev);
+ i2c_set_clientdata(client, NULL);
+free_bdev:
+ kfree(bdev);
+exit:
+ return err;
+}
+
+static int __exit bcm2048_i2c_driver_remove(struct i2c_client *client)
+{
+ struct bcm2048_device *bdev = i2c_get_clientdata(client);
+ struct video_device *vd;
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ if (bdev) {
+ vd = bdev->videodev;
+
+ bcm2048_sysfs_unregister_properties(bdev, ARRAY_SIZE(attrs));
+
+ if (vd)
+ video_unregister_device(vd);
+
+ if (bdev->power_state)
+ bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
+
+ if (client->irq > 0)
+ free_irq(client->irq, bdev);
+
+ cancel_work_sync(&bdev->work);
+
+ kfree(bdev);
+ }
+
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+/*
+ * bcm2048_i2c_driver - i2c driver interface
+ */
+static const struct i2c_device_id bcm2048_id[] = {
+ { "bcm2048" , 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, bcm2048_id);
+
+static struct i2c_driver bcm2048_i2c_driver = {
+ .driver = {
+ .name = BCM2048_DRIVER_NAME,
+ },
+ .probe = bcm2048_i2c_driver_probe,
+ .remove = __exit_p(bcm2048_i2c_driver_remove),
+ .id_table = bcm2048_id,
+};
+
+/*
+ * Module Interface
+ */
+static int __init bcm2048_module_init(void)
+{
+ pr_info(BCM2048_DRIVER_DESC "\n");
+
+ return i2c_add_driver(&bcm2048_i2c_driver);
+}
+module_init(bcm2048_module_init);
+
+static void __exit bcm2048_module_exit(void)
+{
+ i2c_del_driver(&bcm2048_i2c_driver);
+}
+module_exit(bcm2048_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR(BCM2048_DRIVER_AUTHOR);
+MODULE_DESCRIPTION(BCM2048_DRIVER_DESC);
+MODULE_VERSION("0.0.2");
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.h b/drivers/staging/media/bcm2048/radio-bcm2048.h
new file mode 100644
index 000000000000..4c90a32db795
--- /dev/null
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.h
@@ -0,0 +1,30 @@
+/*
+ * drivers/staging/media/radio-bcm2048.h
+ *
+ * Property and command definitions for bcm2048 radio receiver chip.
+ *
+ * Copyright (C) Nokia Corporation
+ * Contact: Eero Nurkkala <ext-eero.nurkkala@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef BCM2048_H
+#define BCM2048_H
+
+#define BCM2048_NAME "bcm2048"
+#define BCM2048_I2C_ADDR 0x22
+
+#endif /* ifndef BCM2048_H */
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
index 822c487592a4..6cb74dacc69d 100644
--- a/drivers/staging/media/cxd2099/cxd2099.c
+++ b/drivers/staging/media/cxd2099/cxd2099.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/wait.h>
#include <linux/delay.h>
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
index 766a071b0a22..b7044a380fe3 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
@@ -1009,7 +1009,7 @@ static int ipipe_validate_yee_params(struct vpfe_ipipe_yee *yee)
yee->es_ofst_grad > YEE_THR_MASK)
return -EINVAL;
- for (i = 0; i < VPFE_IPIPE_MAX_SIZE_YEE_LUT ; i++)
+ for (i = 0; i < VPFE_IPIPE_MAX_SIZE_YEE_LUT; i++)
if (yee->table[i] > YEE_ENTRY_MASK)
return -EINVAL;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
index e027b92b54ef..2d36b60bdbf1 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
@@ -791,7 +791,7 @@ ipipe_set_3d_lut_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
/* valied table */
tbl = lut_3d->table;
- for (i = 0 ; i < VPFE_IPIPE_MAX_SIZE_3D_LUT; i++) {
+ for (i = 0; i < VPFE_IPIPE_MAX_SIZE_3D_LUT; i++) {
/* Each entry has 0-9 (B), 10-19 (G) and
20-29 R values */
val = tbl[i].b & D3_LUT_ENTRY_MASK;
@@ -899,7 +899,7 @@ ipipe_set_gbce_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
if (!gbce->table)
return;
- for (count = 0; count < VPFE_IPIPE_MAX_SIZE_GBCE_LUT ; count += 2)
+ for (count = 0; count < VPFE_IPIPE_MAX_SIZE_GBCE_LUT; count += 2)
w_ip_table(isp5_base_addr, ((gbce->table[count + 1] & mask) <<
GBCE_ENTRY_SHIFT) | (gbce->table[count] & mask),
((count/2) << 2) + GBCE_TB_START_ADDR);
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
index ff48fce94fcb..b942bf73c43f 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c
@@ -19,6 +19,7 @@
* Prabhakar Lad <prabhakar.lad@ti.com>
*/
+#include <linux/delay.h>
#include "dm365_isif.h"
#include "vpfe_mc_capture.h"
@@ -918,7 +919,7 @@ isif_config_dfc(struct vpfe_isif_device *isif, struct vpfe_isif_dfc *vdfc)
(0 << ISIF_VDFC_EN_SHIFT), DFCCTL);
isif_write(isif->isif_cfg.base_addr, 0x6, DFCMEMCTL);
- for (i = 0 ; i < vdfc->num_vdefects; i++) {
+ for (i = 0; i < vdfc->num_vdefects; i++) {
count = DFC_WRITE_WAIT_COUNT;
while (count &&
(isif_read(isif->isif_cfg.base_addr, DFCMEMCTL) & 0x2))
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 24d98a6866bb..1f3b0f9a8d10 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -346,7 +346,7 @@ static int vpfe_pipeline_disable(struct vpfe_pipeline *pipe)
}
mutex_unlock(&mdev->graph_mutex);
- return (ret == 0) ? ret : -ETIMEDOUT ;
+ return ret ? -ETIMEDOUT : 0;
}
/*
@@ -1201,6 +1201,8 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
unsigned long addr;
int ret;
+ if (count == 0)
+ return -ENOBUFS;
ret = mutex_lock_interruptible(&video->lock);
if (ret)
goto streamoff;
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index 081407be33ab..e729e52639c5 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -974,7 +974,7 @@ dt3155_remove(struct pci_dev *pdev)
kfree(pd);
}
-static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+static const struct pci_device_id pci_ids[] = {
{ PCI_DEVICE(DT3155_VENDOR_ID, DT3155_DEVICE_ID) },
{ 0, /* zero marks the end */ },
};
diff --git a/drivers/staging/media/go7007/go7007-driver.c b/drivers/staging/media/go7007/go7007-driver.c
index 3640df0aa0c1..6f1beca86b2b 100644
--- a/drivers/staging/media/go7007/go7007-driver.c
+++ b/drivers/staging/media/go7007/go7007-driver.c
@@ -16,7 +16,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
diff --git a/drivers/staging/media/go7007/go7007-fw.c b/drivers/staging/media/go7007/go7007-fw.c
index c2d0e58afc34..814ce08bc44d 100644
--- a/drivers/staging/media/go7007/go7007-fw.c
+++ b/drivers/staging/media/go7007/go7007-fw.c
@@ -25,7 +25,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/device.h>
@@ -722,7 +721,8 @@ static int vti_bitlen(struct go7007 *go)
{
unsigned int i, max_time_incr = go->sensor_framerate / go->fps_scale;
- for (i = 31; (max_time_incr & ((1 << i) - 1)) == max_time_incr; --i);
+ for (i = 31; (max_time_incr & ((1 << i) - 1)) == max_time_incr; --i)
+ ;
return i + 1;
}
diff --git a/drivers/staging/media/go7007/go7007-i2c.c b/drivers/staging/media/go7007/go7007-i2c.c
index 74f25e03c326..4cf4c0d65085 100644
--- a/drivers/staging/media/go7007/go7007-i2c.c
+++ b/drivers/staging/media/go7007/go7007-i2c.c
@@ -16,7 +16,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/list.h>
diff --git a/drivers/staging/media/go7007/go7007-loader.c b/drivers/staging/media/go7007/go7007-loader.c
index f846ad5819dc..eecb1f2a5574 100644
--- a/drivers/staging/media/go7007/go7007-loader.c
+++ b/drivers/staging/media/go7007/go7007-loader.c
@@ -16,7 +16,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/firmware.h>
@@ -60,7 +59,7 @@ static int go7007_loader_probe(struct usb_interface *interface,
if (usbdev->descriptor.bNumConfigurations != 1) {
dev_err(&interface->dev, "can't handle multiple config\n");
- return -ENODEV;
+ goto failed2;
}
vendor = le16_to_cpu(usbdev->descriptor.idVendor);
@@ -109,6 +108,7 @@ static int go7007_loader_probe(struct usb_interface *interface,
return 0;
failed2:
+ usb_put_dev(usbdev);
dev_err(&interface->dev, "probe failed\n");
return -ENODEV;
}
@@ -116,6 +116,7 @@ failed2:
static void go7007_loader_disconnect(struct usb_interface *interface)
{
dev_info(&interface->dev, "disconnect\n");
+ usb_put_dev(interface_to_usbdev(interface));
usb_set_intfdata(interface, NULL);
}
diff --git a/drivers/staging/media/go7007/go7007-usb.c b/drivers/staging/media/go7007/go7007-usb.c
index b658c2316df3..2f62be905cd1 100644
--- a/drivers/staging/media/go7007/go7007-usb.c
+++ b/drivers/staging/media/go7007/go7007-usb.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/slab.h>
diff --git a/drivers/staging/media/go7007/go7007-v4l2.c b/drivers/staging/media/go7007/go7007-v4l2.c
index 50eb69a8ef07..edc52e2630a9 100644
--- a/drivers/staging/media/go7007/go7007-v4l2.c
+++ b/drivers/staging/media/go7007/go7007-v4l2.c
@@ -16,7 +16,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
diff --git a/drivers/staging/media/go7007/s2250-board.c b/drivers/staging/media/go7007/s2250-board.c
index beaa98b9c85a..696a80756691 100644
--- a/drivers/staging/media/go7007/s2250-board.c
+++ b/drivers/staging/media/go7007/s2250-board.c
@@ -16,7 +16,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/usb.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
diff --git a/drivers/staging/media/go7007/saa7134-go7007.c b/drivers/staging/media/go7007/saa7134-go7007.c
index d80b235d72ee..6e2ca338cdd9 100644
--- a/drivers/staging/media/go7007/saa7134-go7007.c
+++ b/drivers/staging/media/go7007/saa7134-go7007.c
@@ -86,7 +86,7 @@ static const struct go7007_board_info board_voyager = {
.audio_main_div = 2,
.hpi_buffer_cap = 7,
.num_inputs = 1,
- .inputs = {
+ .inputs = {
{
.name = "SAA7134",
},
diff --git a/drivers/staging/media/go7007/snd-go7007.c b/drivers/staging/media/go7007/snd-go7007.c
index 4be0fa40a39a..16dd64920767 100644
--- a/drivers/staging/media/go7007/snd-go7007.c
+++ b/drivers/staging/media/go7007/snd-go7007.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/sched.h>
diff --git a/drivers/staging/media/lirc/lirc_igorplugusb.c b/drivers/staging/media/lirc/lirc_igorplugusb.c
index 28c8b0bcf5b2..f2dcc4a292da 100644
--- a/drivers/staging/media/lirc/lirc_igorplugusb.c
+++ b/drivers/staging/media/lirc/lirc_igorplugusb.c
@@ -363,8 +363,8 @@ static int igorplugusb_remote_poll(void *data, struct lirc_buffer *buf)
/*dummy*/ir->buf_in, /*dummy*/ir->len_in,
/*timeout*/HZ * USB_CTRL_GET_TIMEOUT);
if (ret < 0)
- printk(DRIVER_NAME "[%d]: SET_INFRABUFFER_EMPTY: "
- "error %d\n", ir->devnum, ret);
+ printk(DRIVER_NAME "[%d]: SET_INFRABUFFER_EMPTY: error %d\n",
+ ir->devnum, ret);
return 0;
} else if (ret < 0)
printk(DRIVER_NAME "[%d]: GET_INFRACODE: error %d\n",
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index ab2ae115b524..f2d396cc4a4c 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -23,7 +23,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -808,7 +807,8 @@ static int imon_probe(struct usb_interface *interface,
/* Input endpoint is mandatory */
if (!ir_ep_found) {
- dev_err(dev, "%s: no valid input (IR) endpoint found.\n", __func__);
+ dev_err(dev, "%s: no valid input (IR) endpoint found.\n",
+ __func__);
retval = -ENODEV;
alloc_status = 2;
goto alloc_status_switch;
@@ -878,8 +878,8 @@ static int imon_probe(struct usb_interface *interface,
alloc_status = 7;
goto unlock;
} else
- dev_info(dev, "Registered iMON driver "
- "(lirc minor: %d)\n", lirc_minor);
+ dev_info(dev, "Registered iMON driver (lirc minor: %d)\n",
+ lirc_minor);
/* Needed while unregistering! */
driver->minor = lirc_minor;
@@ -923,8 +923,8 @@ static int imon_probe(struct usb_interface *interface,
if (usb_register_dev(interface, &imon_class)) {
/* Not a fatal error, so ignore */
- dev_info(dev, "%s: could not get a minor number for "
- "display\n", __func__);
+ dev_info(dev, "%s: could not get a minor number for display\n",
+ __func__);
}
}
diff --git a/drivers/staging/media/lirc/lirc_parallel.c b/drivers/staging/media/lirc/lirc_parallel.c
index 41d110f8bc02..0b589892351a 100644
--- a/drivers/staging/media/lirc/lirc_parallel.c
+++ b/drivers/staging/media/lirc/lirc_parallel.c
@@ -220,7 +220,7 @@ static void rbuf_write(int signal)
wptr = nwptr;
}
-static void irq_handler(void *blah)
+static void lirc_lirc_irq_handler(void *blah)
{
struct timeval tv;
static struct timeval lasttv;
@@ -659,7 +659,7 @@ static int __init lirc_parallel_init(void)
goto exit_device_put;
}
ppdevice = parport_register_device(pport, LIRC_DRIVER_NAME,
- pf, kf, irq_handler, 0, NULL);
+ pf, kf, lirc_lirc_irq_handler, 0, NULL);
parport_put_port(pport);
if (ppdevice == NULL) {
pr_notice("parport_register_device() failed\n");
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index 68acca74ddb1..d2445fdd9015 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -37,7 +37,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index 2e3a98575d47..10c685d5de7c 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -428,8 +428,8 @@ static int init_timing_params(unsigned int new_duty_cycle,
period = 256 * 1000000L / freq;
pulse_width = period * duty_cycle / 100;
space_width = period - pulse_width;
- dprintk("in init_timing_params, freq=%d pulse=%ld, "
- "space=%ld\n", freq, pulse_width, space_width);
+ dprintk("in init_timing_params, freq=%d pulse=%ld, space=%ld\n",
+ freq, pulse_width, space_width);
return 0;
}
#endif /* USE_RDTSC */
@@ -650,7 +650,7 @@ static void frbwrite(int l)
rbwrite(l);
}
-static irqreturn_t irq_handler(int i, void *blah)
+static irqreturn_t lirc_irq_handler(int i, void *blah)
{
struct timeval tv;
int counter, dcd;
@@ -852,7 +852,7 @@ static int lirc_serial_probe(struct platform_device *dev)
return result;
#endif
- result = request_irq(irq, irq_handler,
+ result = request_irq(irq, lirc_irq_handler,
(share_irq ? IRQF_SHARED : 0),
LIRC_DRIVER_NAME, (void *)&hardware);
if (result < 0) {
@@ -974,7 +974,7 @@ static void set_use_dec(void *data)
spin_unlock_irqrestore(&hardware[type].lock, flags);
}
-static ssize_t lirc_write(struct file *file, const char *buf,
+static ssize_t lirc_write(struct file *file, const char __user *buf,
size_t n, loff_t *ppos)
{
int i, count;
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 0feeaadf29dc..e1feb6164593 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -767,8 +767,8 @@ static int fw_load(struct IR_tx *tx)
/* Request codeset data file */
ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", tx->ir->l.dev);
if (ret != 0) {
- zilog_error("firmware haup-ir-blaster.bin not available "
- "(%d)\n", ret);
+ zilog_error("firmware haup-ir-blaster.bin not available (%d)\n",
+ ret);
ret = ret < 0 ? ret : -EFAULT;
goto out;
}
diff --git a/drivers/staging/media/omap24xx/Kconfig b/drivers/staging/media/omap24xx/Kconfig
new file mode 100644
index 000000000000..82e569a21c46
--- /dev/null
+++ b/drivers/staging/media/omap24xx/Kconfig
@@ -0,0 +1,35 @@
+config VIDEO_V4L2_INT_DEVICE
+ tristate
+
+config VIDEO_OMAP2
+ tristate "OMAP2 Camera Capture Interface driver (DEPRECATED)"
+ depends on VIDEO_DEV && ARCH_OMAP2
+ select VIDEOBUF_DMA_SG
+ select VIDEO_V4L2_INT_DEVICE
+ ---help---
+ This is a v4l2 driver for the TI OMAP2 camera capture interface
+
+ It uses the deprecated int-device API. Since this driver is no
+ longer actively maintained and nobody is interested in converting
+ it to the subdev API, this driver will be removed soon.
+
+ If you do want to keep this driver in the kernel, and are willing
+ to convert it to the subdev API, then please contact the linux-media
+ mailinglist.
+
+config VIDEO_TCM825X
+ tristate "TCM825x camera sensor support (DEPRECATED)"
+ depends on I2C && VIDEO_V4L2
+ depends on MEDIA_CAMERA_SUPPORT
+ select VIDEO_V4L2_INT_DEVICE
+ ---help---
+ This is a driver for the Toshiba TCM825x VGA camera sensor.
+ It is used for example in Nokia N800.
+
+ It uses the deprecated int-device API. Since this driver is no
+ longer actively maintained and nobody is interested in converting
+ it to the subdev API, this driver will be removed soon.
+
+ If you do want to keep this driver in the kernel, and are willing
+ to convert it to the subdev API, then please contact the linux-media
+ mailinglist.
diff --git a/drivers/staging/media/omap24xx/Makefile b/drivers/staging/media/omap24xx/Makefile
new file mode 100644
index 000000000000..c2e7175599c2
--- /dev/null
+++ b/drivers/staging/media/omap24xx/Makefile
@@ -0,0 +1,5 @@
+omap2cam-objs := omap24xxcam.o omap24xxcam-dma.o
+
+obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o
+obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
+obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o
diff --git a/drivers/media/platform/omap24xxcam-dma.c b/drivers/staging/media/omap24xx/omap24xxcam-dma.c
index 9c00776d6583..9c00776d6583 100644
--- a/drivers/media/platform/omap24xxcam-dma.c
+++ b/drivers/staging/media/omap24xx/omap24xxcam-dma.c
diff --git a/drivers/media/platform/omap24xxcam.c b/drivers/staging/media/omap24xx/omap24xxcam.c
index d2b440c842b3..d2b440c842b3 100644
--- a/drivers/media/platform/omap24xxcam.c
+++ b/drivers/staging/media/omap24xx/omap24xxcam.c
diff --git a/drivers/media/platform/omap24xxcam.h b/drivers/staging/media/omap24xx/omap24xxcam.h
index 7f6f79155537..233bb40cfec3 100644
--- a/drivers/media/platform/omap24xxcam.h
+++ b/drivers/staging/media/omap24xx/omap24xxcam.h
@@ -28,8 +28,8 @@
#define OMAP24XXCAM_H
#include <media/videobuf-dma-sg.h>
-#include <media/v4l2-int-device.h>
#include <media/v4l2-device.h>
+#include "v4l2-int-device.h"
/*
*
diff --git a/drivers/media/i2c/tcm825x.c b/drivers/staging/media/omap24xx/tcm825x.c
index 9252529fc5dd..b1ae8e9c7e14 100644
--- a/drivers/media/i2c/tcm825x.c
+++ b/drivers/staging/media/omap24xx/tcm825x.c
@@ -28,7 +28,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
-#include <media/v4l2-int-device.h>
+#include "v4l2-int-device.h"
#include "tcm825x.h"
diff --git a/drivers/media/i2c/tcm825x.h b/drivers/staging/media/omap24xx/tcm825x.h
index 8ebab953963f..e2d1bcd0bcbe 100644
--- a/drivers/media/i2c/tcm825x.h
+++ b/drivers/staging/media/omap24xx/tcm825x.h
@@ -17,7 +17,7 @@
#include <linux/videodev2.h>
-#include <media/v4l2-int-device.h>
+#include "v4l2-int-device.h"
#define TCM825X_NAME "tcm825x"
diff --git a/drivers/media/v4l2-core/v4l2-int-device.c b/drivers/staging/media/omap24xx/v4l2-int-device.c
index f4473494af7a..427a89033a1d 100644
--- a/drivers/media/v4l2-core/v4l2-int-device.c
+++ b/drivers/staging/media/omap24xx/v4l2-int-device.c
@@ -28,7 +28,7 @@
#include <linux/string.h>
#include <linux/module.h>
-#include <media/v4l2-int-device.h>
+#include "v4l2-int-device.h"
static DEFINE_MUTEX(mutex);
static LIST_HEAD(int_list);
diff --git a/drivers/staging/media/omap24xx/v4l2-int-device.h b/drivers/staging/media/omap24xx/v4l2-int-device.h
new file mode 100644
index 000000000000..0286c95814ff
--- /dev/null
+++ b/drivers/staging/media/omap24xx/v4l2-int-device.h
@@ -0,0 +1,305 @@
+/*
+ * include/media/v4l2-int-device.h
+ *
+ * V4L2 internal ioctl interface.
+ *
+ * Copyright (C) 2007 Nokia Corporation.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef V4L2_INT_DEVICE_H
+#define V4L2_INT_DEVICE_H
+
+#include <media/v4l2-common.h>
+
+#define V4L2NAMESIZE 32
+
+/*
+ *
+ * The internal V4L2 device interface core.
+ *
+ */
+
+enum v4l2_int_type {
+ v4l2_int_type_master = 1,
+ v4l2_int_type_slave
+};
+
+struct module;
+
+struct v4l2_int_device;
+
+struct v4l2_int_master {
+ int (*attach)(struct v4l2_int_device *slave);
+ void (*detach)(struct v4l2_int_device *slave);
+};
+
+typedef int (v4l2_int_ioctl_func)(struct v4l2_int_device *);
+typedef int (v4l2_int_ioctl_func_0)(struct v4l2_int_device *);
+typedef int (v4l2_int_ioctl_func_1)(struct v4l2_int_device *, void *);
+
+struct v4l2_int_ioctl_desc {
+ int num;
+ v4l2_int_ioctl_func *func;
+};
+
+struct v4l2_int_slave {
+ /* Don't touch master. */
+ struct v4l2_int_device *master;
+
+ char attach_to[V4L2NAMESIZE];
+
+ int num_ioctls;
+ struct v4l2_int_ioctl_desc *ioctls;
+};
+
+struct v4l2_int_device {
+ /* Don't touch head. */
+ struct list_head head;
+
+ struct module *module;
+
+ char name[V4L2NAMESIZE];
+
+ enum v4l2_int_type type;
+ union {
+ struct v4l2_int_master *master;
+ struct v4l2_int_slave *slave;
+ } u;
+
+ void *priv;
+};
+
+void v4l2_int_device_try_attach_all(void);
+
+int v4l2_int_device_register(struct v4l2_int_device *d);
+void v4l2_int_device_unregister(struct v4l2_int_device *d);
+
+int v4l2_int_ioctl_0(struct v4l2_int_device *d, int cmd);
+int v4l2_int_ioctl_1(struct v4l2_int_device *d, int cmd, void *arg);
+
+/*
+ *
+ * Types and definitions for IOCTL commands.
+ *
+ */
+
+enum v4l2_power {
+ V4L2_POWER_OFF = 0,
+ V4L2_POWER_ON,
+ V4L2_POWER_STANDBY,
+};
+
+/* Slave interface type. */
+enum v4l2_if_type {
+ /*
+ * Parallel 8-, 10- or 12-bit interface, used by for example
+ * on certain image sensors.
+ */
+ V4L2_IF_TYPE_BT656,
+};
+
+enum v4l2_if_type_bt656_mode {
+ /*
+ * Modes without Bt synchronisation codes. Separate
+ * synchronisation signal lines are used.
+ */
+ V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT,
+ V4L2_IF_TYPE_BT656_MODE_NOBT_10BIT,
+ V4L2_IF_TYPE_BT656_MODE_NOBT_12BIT,
+ /*
+ * Use Bt synchronisation codes. The vertical and horizontal
+ * synchronisation is done based on synchronisation codes.
+ */
+ V4L2_IF_TYPE_BT656_MODE_BT_8BIT,
+ V4L2_IF_TYPE_BT656_MODE_BT_10BIT,
+};
+
+struct v4l2_if_type_bt656 {
+ /*
+ * 0: Frame begins when vsync is high.
+ * 1: Frame begins when vsync changes from low to high.
+ */
+ unsigned frame_start_on_rising_vs:1;
+ /* Use Bt synchronisation codes for sync correction. */
+ unsigned bt_sync_correct:1;
+ /* Swap every two adjacent image data elements. */
+ unsigned swap:1;
+ /* Inverted latch clock polarity from slave. */
+ unsigned latch_clk_inv:1;
+ /* Hs polarity. 0 is active high, 1 active low. */
+ unsigned nobt_hs_inv:1;
+ /* Vs polarity. 0 is active high, 1 active low. */
+ unsigned nobt_vs_inv:1;
+ enum v4l2_if_type_bt656_mode mode;
+ /* Minimum accepted bus clock for slave (in Hz). */
+ u32 clock_min;
+ /* Maximum accepted bus clock for slave. */
+ u32 clock_max;
+ /*
+ * Current wish of the slave. May only change in response to
+ * ioctls that affect image capture.
+ */
+ u32 clock_curr;
+};
+
+struct v4l2_ifparm {
+ enum v4l2_if_type if_type;
+ union {
+ struct v4l2_if_type_bt656 bt656;
+ } u;
+};
+
+/* IOCTL command numbers. */
+enum v4l2_int_ioctl_num {
+ /*
+ *
+ * "Proper" V4L ioctls, as in struct video_device.
+ *
+ */
+ vidioc_int_enum_fmt_cap_num = 1,
+ vidioc_int_g_fmt_cap_num,
+ vidioc_int_s_fmt_cap_num,
+ vidioc_int_try_fmt_cap_num,
+ vidioc_int_queryctrl_num,
+ vidioc_int_g_ctrl_num,
+ vidioc_int_s_ctrl_num,
+ vidioc_int_cropcap_num,
+ vidioc_int_g_crop_num,
+ vidioc_int_s_crop_num,
+ vidioc_int_g_parm_num,
+ vidioc_int_s_parm_num,
+ vidioc_int_querystd_num,
+ vidioc_int_s_std_num,
+ vidioc_int_s_video_routing_num,
+
+ /*
+ *
+ * Strictly internal ioctls.
+ *
+ */
+ /* Initialise the device when slave attaches to the master. */
+ vidioc_int_dev_init_num = 1000,
+ /* Delinitialise the device at slave detach. */
+ vidioc_int_dev_exit_num,
+ /* Set device power state. */
+ vidioc_int_s_power_num,
+ /*
+ * Get slave private data, e.g. platform-specific slave
+ * configuration used by the master.
+ */
+ vidioc_int_g_priv_num,
+ /* Get slave interface parameters. */
+ vidioc_int_g_ifparm_num,
+ /* Does the slave need to be reset after VIDIOC_DQBUF? */
+ vidioc_int_g_needs_reset_num,
+ vidioc_int_enum_framesizes_num,
+ vidioc_int_enum_frameintervals_num,
+
+ /*
+ *
+ * VIDIOC_INT_* ioctls.
+ *
+ */
+ /* VIDIOC_INT_RESET */
+ vidioc_int_reset_num,
+ /* VIDIOC_INT_INIT */
+ vidioc_int_init_num,
+
+ /*
+ *
+ * Start of private ioctls.
+ *
+ */
+ vidioc_int_priv_start_num = 2000,
+};
+
+/*
+ *
+ * IOCTL wrapper functions for better type checking.
+ *
+ */
+
+#define V4L2_INT_WRAPPER_0(name) \
+ static inline int vidioc_int_##name(struct v4l2_int_device *d) \
+ { \
+ return v4l2_int_ioctl_0(d, vidioc_int_##name##_num); \
+ } \
+ \
+ static inline struct v4l2_int_ioctl_desc \
+ vidioc_int_##name##_cb(int (*func) \
+ (struct v4l2_int_device *)) \
+ { \
+ struct v4l2_int_ioctl_desc desc; \
+ \
+ desc.num = vidioc_int_##name##_num; \
+ desc.func = (v4l2_int_ioctl_func *)func; \
+ \
+ return desc; \
+ }
+
+#define V4L2_INT_WRAPPER_1(name, arg_type, asterisk) \
+ static inline int vidioc_int_##name(struct v4l2_int_device *d, \
+ arg_type asterisk arg) \
+ { \
+ return v4l2_int_ioctl_1(d, vidioc_int_##name##_num, \
+ (void *)(unsigned long)arg); \
+ } \
+ \
+ static inline struct v4l2_int_ioctl_desc \
+ vidioc_int_##name##_cb(int (*func) \
+ (struct v4l2_int_device *, \
+ arg_type asterisk)) \
+ { \
+ struct v4l2_int_ioctl_desc desc; \
+ \
+ desc.num = vidioc_int_##name##_num; \
+ desc.func = (v4l2_int_ioctl_func *)func; \
+ \
+ return desc; \
+ }
+
+V4L2_INT_WRAPPER_1(enum_fmt_cap, struct v4l2_fmtdesc, *);
+V4L2_INT_WRAPPER_1(g_fmt_cap, struct v4l2_format, *);
+V4L2_INT_WRAPPER_1(s_fmt_cap, struct v4l2_format, *);
+V4L2_INT_WRAPPER_1(try_fmt_cap, struct v4l2_format, *);
+V4L2_INT_WRAPPER_1(queryctrl, struct v4l2_queryctrl, *);
+V4L2_INT_WRAPPER_1(g_ctrl, struct v4l2_control, *);
+V4L2_INT_WRAPPER_1(s_ctrl, struct v4l2_control, *);
+V4L2_INT_WRAPPER_1(cropcap, struct v4l2_cropcap, *);
+V4L2_INT_WRAPPER_1(g_crop, struct v4l2_crop, *);
+V4L2_INT_WRAPPER_1(s_crop, struct v4l2_crop, *);
+V4L2_INT_WRAPPER_1(g_parm, struct v4l2_streamparm, *);
+V4L2_INT_WRAPPER_1(s_parm, struct v4l2_streamparm, *);
+V4L2_INT_WRAPPER_1(querystd, v4l2_std_id, *);
+V4L2_INT_WRAPPER_1(s_std, v4l2_std_id, *);
+V4L2_INT_WRAPPER_1(s_video_routing, struct v4l2_routing, *);
+
+V4L2_INT_WRAPPER_0(dev_init);
+V4L2_INT_WRAPPER_0(dev_exit);
+V4L2_INT_WRAPPER_1(s_power, enum v4l2_power, );
+V4L2_INT_WRAPPER_1(g_priv, void, *);
+V4L2_INT_WRAPPER_1(g_ifparm, struct v4l2_ifparm, *);
+V4L2_INT_WRAPPER_1(g_needs_reset, void, *);
+V4L2_INT_WRAPPER_1(enum_framesizes, struct v4l2_frmsizeenum, *);
+V4L2_INT_WRAPPER_1(enum_frameintervals, struct v4l2_frmivalenum, *);
+
+V4L2_INT_WRAPPER_0(reset);
+V4L2_INT_WRAPPER_0(init);
+
+#endif
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
new file mode 100644
index 000000000000..b9fe753969bd
--- /dev/null
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -0,0 +1,12 @@
+config VIDEO_OMAP4
+ bool "OMAP 4 Camera support"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ Driver for an OMAP 4 ISS controller.
+
+config VIDEO_OMAP4_DEBUG
+ bool "OMAP 4 Camera debug messages"
+ depends on VIDEO_OMAP4
+ ---help---
+ Enable debug messages on OMAP 4 ISS controller driver.
diff --git a/drivers/staging/media/omap4iss/Makefile b/drivers/staging/media/omap4iss/Makefile
new file mode 100644
index 000000000000..a716ce936cf6
--- /dev/null
+++ b/drivers/staging/media/omap4iss/Makefile
@@ -0,0 +1,6 @@
+# Makefile for OMAP4 ISS driver
+
+omap4-iss-objs += \
+ iss.o iss_csi2.o iss_csiphy.o iss_ipipeif.o iss_ipipe.o iss_resizer.o iss_video.o
+
+obj-$(CONFIG_VIDEO_OMAP4) += omap4-iss.o
diff --git a/drivers/staging/media/omap4iss/TODO b/drivers/staging/media/omap4iss/TODO
new file mode 100644
index 000000000000..fcde88860a2c
--- /dev/null
+++ b/drivers/staging/media/omap4iss/TODO
@@ -0,0 +1,4 @@
+* Make the driver compile as a module
+* Fix FIFO/buffer overflows and underflows
+* Replace dummy resizer code with a real implementation
+* Fix checkpatch errors and warnings
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
new file mode 100644
index 000000000000..61fbfcd13582
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -0,0 +1,1563 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver
+ *
+ * Copyright (C) 2012, Texas Instruments
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#include "iss.h"
+#include "iss_regs.h"
+
+#define ISS_PRINT_REGISTER(iss, name)\
+ dev_dbg(iss->dev, "###ISS " #name "=0x%08x\n", \
+ iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_##name))
+
+static void iss_print_status(struct iss_device *iss)
+{
+ dev_dbg(iss->dev, "-------------ISS HL Register dump-------------\n");
+
+ ISS_PRINT_REGISTER(iss, HL_REVISION);
+ ISS_PRINT_REGISTER(iss, HL_SYSCONFIG);
+ ISS_PRINT_REGISTER(iss, HL_IRQSTATUS(5));
+ ISS_PRINT_REGISTER(iss, HL_IRQENABLE_SET(5));
+ ISS_PRINT_REGISTER(iss, HL_IRQENABLE_CLR(5));
+ ISS_PRINT_REGISTER(iss, CTRL);
+ ISS_PRINT_REGISTER(iss, CLKCTRL);
+ ISS_PRINT_REGISTER(iss, CLKSTAT);
+
+ dev_dbg(iss->dev, "-----------------------------------------------\n");
+}
+
+/*
+ * omap4iss_flush - Post pending L3 bus writes by doing a register readback
+ * @iss: OMAP4 ISS device
+ *
+ * In order to force posting of pending writes, we need to write and
+ * readback the same register, in this case the revision register.
+ *
+ * See this link for reference:
+ * http://www.mail-archive.com/linux-omap@vger.kernel.org/msg08149.html
+ */
+void omap4iss_flush(struct iss_device *iss)
+{
+ iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION, 0);
+ iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION);
+}
+
+/*
+ * iss_isp_enable_interrupts - Enable ISS ISP interrupts.
+ * @iss: OMAP4 ISS device
+ */
+static void omap4iss_isp_enable_interrupts(struct iss_device *iss)
+{
+ static const u32 isp_irq = ISP5_IRQ_OCP_ERR |
+ ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR |
+ ISP5_IRQ_RSZ_FIFO_OVF |
+ ISP5_IRQ_RSZ_INT_DMA |
+ ISP5_IRQ_ISIF_INT(0);
+
+ /* Enable ISP interrupts */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQSTATUS(0), isp_irq);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQENABLE_SET(0),
+ isp_irq);
+}
+
+/*
+ * iss_isp_disable_interrupts - Disable ISS interrupts.
+ * @iss: OMAP4 ISS device
+ */
+static void omap4iss_isp_disable_interrupts(struct iss_device *iss)
+{
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQENABLE_CLR(0), ~0);
+}
+
+/*
+ * iss_enable_interrupts - Enable ISS interrupts.
+ * @iss: OMAP4 ISS device
+ */
+static void iss_enable_interrupts(struct iss_device *iss)
+{
+ static const u32 hl_irq = ISS_HL_IRQ_CSIA | ISS_HL_IRQ_CSIB
+ | ISS_HL_IRQ_ISP(0);
+
+ /* Enable HL interrupts */
+ iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQSTATUS(5), hl_irq);
+ iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQENABLE_SET(5), hl_irq);
+
+ if (iss->regs[OMAP4_ISS_MEM_ISP_SYS1])
+ omap4iss_isp_enable_interrupts(iss);
+}
+
+/*
+ * iss_disable_interrupts - Disable ISS interrupts.
+ * @iss: OMAP4 ISS device
+ */
+static void iss_disable_interrupts(struct iss_device *iss)
+{
+ if (iss->regs[OMAP4_ISS_MEM_ISP_SYS1])
+ omap4iss_isp_disable_interrupts(iss);
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQENABLE_CLR(5), ~0);
+}
+
+int omap4iss_get_external_info(struct iss_pipeline *pipe,
+ struct media_link *link)
+{
+ struct iss_device *iss =
+ container_of(pipe, struct iss_video, pipe)->iss;
+ struct v4l2_subdev_format fmt;
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ if (!pipe->external)
+ return 0;
+
+ if (pipe->external_rate)
+ return 0;
+
+ memset(&fmt, 0, sizeof(fmt));
+
+ fmt.pad = link->source->index;
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(link->sink->entity),
+ pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return -EPIPE;
+
+ pipe->external_bpp = omap4iss_video_format_info(fmt.format.code)->bpp;
+
+ ctrl = v4l2_ctrl_find(pipe->external->ctrl_handler,
+ V4L2_CID_PIXEL_RATE);
+ if (ctrl == NULL) {
+ dev_warn(iss->dev, "no pixel rate control in subdev %s\n",
+ pipe->external->name);
+ return -EPIPE;
+ }
+
+ pipe->external_rate = v4l2_ctrl_g_ctrl_int64(ctrl);
+
+ return 0;
+}
+
+/*
+ * Configure the bridge. Valid inputs are
+ *
+ * IPIPEIF_INPUT_CSI2A: CSI2a receiver
+ * IPIPEIF_INPUT_CSI2B: CSI2b receiver
+ *
+ * The bridge and lane shifter are configured according to the selected input
+ * and the ISP platform data.
+ */
+void omap4iss_configure_bridge(struct iss_device *iss,
+ enum ipipeif_input_entity input)
+{
+ u32 issctrl_val;
+ u32 isp5ctrl_val;
+
+ issctrl_val = iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_CTRL);
+ issctrl_val &= ~ISS_CTRL_INPUT_SEL_MASK;
+ issctrl_val &= ~ISS_CTRL_CLK_DIV_MASK;
+
+ isp5ctrl_val = iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL);
+
+ switch (input) {
+ case IPIPEIF_INPUT_CSI2A:
+ issctrl_val |= ISS_CTRL_INPUT_SEL_CSI2A;
+ break;
+
+ case IPIPEIF_INPUT_CSI2B:
+ issctrl_val |= ISS_CTRL_INPUT_SEL_CSI2B;
+ break;
+
+ default:
+ return;
+ }
+
+ issctrl_val |= ISS_CTRL_SYNC_DETECT_VS_RAISING;
+
+ isp5ctrl_val |= ISP5_CTRL_VD_PULSE_EXT | ISP5_CTRL_PSYNC_CLK_SEL |
+ ISP5_CTRL_SYNC_ENABLE;
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_CTRL, issctrl_val);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL, isp5ctrl_val);
+}
+
+#if defined(DEBUG) && defined(ISS_ISR_DEBUG)
+static void iss_isr_dbg(struct iss_device *iss, u32 irqstatus)
+{
+ static const char * const name[] = {
+ "ISP_0",
+ "ISP_1",
+ "ISP_2",
+ "ISP_3",
+ "CSIA",
+ "CSIB",
+ "CCP2_0",
+ "CCP2_1",
+ "CCP2_2",
+ "CCP2_3",
+ "CBUFF",
+ "BTE",
+ "SIMCOP_0",
+ "SIMCOP_1",
+ "SIMCOP_2",
+ "SIMCOP_3",
+ "CCP2_8",
+ "HS_VS",
+ "18",
+ "19",
+ "20",
+ "21",
+ "22",
+ "23",
+ "24",
+ "25",
+ "26",
+ "27",
+ "28",
+ "29",
+ "30",
+ "31",
+ };
+ unsigned int i;
+
+ dev_dbg(iss->dev, "ISS IRQ: ");
+
+ for (i = 0; i < ARRAY_SIZE(name); i++) {
+ if ((1 << i) & irqstatus)
+ pr_cont("%s ", name[i]);
+ }
+ pr_cont("\n");
+}
+
+static void iss_isp_isr_dbg(struct iss_device *iss, u32 irqstatus)
+{
+ static const char * const name[] = {
+ "ISIF_0",
+ "ISIF_1",
+ "ISIF_2",
+ "ISIF_3",
+ "IPIPEREQ",
+ "IPIPELAST_PIX",
+ "IPIPEDMA",
+ "IPIPEBSC",
+ "IPIPEHST",
+ "IPIPEIF",
+ "AEW",
+ "AF",
+ "H3A",
+ "RSZ_REG",
+ "RSZ_LAST_PIX",
+ "RSZ_DMA",
+ "RSZ_CYC_RZA",
+ "RSZ_CYC_RZB",
+ "RSZ_FIFO_OVF",
+ "RSZ_FIFO_IN_BLK_ERR",
+ "20",
+ "21",
+ "RSZ_EOF0",
+ "RSZ_EOF1",
+ "H3A_EOF",
+ "IPIPE_EOF",
+ "26",
+ "IPIPE_DPC_INI",
+ "IPIPE_DPC_RNEW0",
+ "IPIPE_DPC_RNEW1",
+ "30",
+ "OCP_ERR",
+ };
+ unsigned int i;
+
+ dev_dbg(iss->dev, "ISP IRQ: ");
+
+ for (i = 0; i < ARRAY_SIZE(name); i++) {
+ if ((1 << i) & irqstatus)
+ pr_cont("%s ", name[i]);
+ }
+ pr_cont("\n");
+}
+#endif
+
+/*
+ * iss_isr - Interrupt Service Routine for ISS module.
+ * @irq: Not used currently.
+ * @_iss: Pointer to the OMAP4 ISS device
+ *
+ * Handles the corresponding callback if plugged in.
+ *
+ * Returns IRQ_HANDLED when IRQ was correctly handled, or IRQ_NONE when the
+ * IRQ wasn't handled.
+ */
+static irqreturn_t iss_isr(int irq, void *_iss)
+{
+ static const u32 ipipeif_events = ISP5_IRQ_IPIPEIF_IRQ |
+ ISP5_IRQ_ISIF_INT(0);
+ static const u32 resizer_events = ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR |
+ ISP5_IRQ_RSZ_FIFO_OVF |
+ ISP5_IRQ_RSZ_INT_DMA;
+ struct iss_device *iss = _iss;
+ u32 irqstatus;
+
+ irqstatus = iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQSTATUS(5));
+ iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQSTATUS(5), irqstatus);
+
+ if (irqstatus & ISS_HL_IRQ_CSIA)
+ omap4iss_csi2_isr(&iss->csi2a);
+
+ if (irqstatus & ISS_HL_IRQ_CSIB)
+ omap4iss_csi2_isr(&iss->csi2b);
+
+ if (irqstatus & ISS_HL_IRQ_ISP(0)) {
+ u32 isp_irqstatus = iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1,
+ ISP5_IRQSTATUS(0));
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQSTATUS(0),
+ isp_irqstatus);
+
+ if (isp_irqstatus & ISP5_IRQ_OCP_ERR)
+ dev_dbg(iss->dev, "ISP5 OCP Error!\n");
+
+ if (isp_irqstatus & ipipeif_events) {
+ omap4iss_ipipeif_isr(&iss->ipipeif,
+ isp_irqstatus & ipipeif_events);
+ }
+
+ if (isp_irqstatus & resizer_events)
+ omap4iss_resizer_isr(&iss->resizer,
+ isp_irqstatus & resizer_events);
+
+#if defined(DEBUG) && defined(ISS_ISR_DEBUG)
+ iss_isp_isr_dbg(iss, isp_irqstatus);
+#endif
+ }
+
+ omap4iss_flush(iss);
+
+#if defined(DEBUG) && defined(ISS_ISR_DEBUG)
+ iss_isr_dbg(iss, irqstatus);
+#endif
+
+ return IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline power management
+ *
+ * Entities must be powered up when part of a pipeline that contains at least
+ * one open video device node.
+ *
+ * To achieve this use the entity use_count field to track the number of users.
+ * For entities corresponding to video device nodes the use_count field stores
+ * the users count of the node. For entities corresponding to subdevs the
+ * use_count field stores the total number of users of all video device nodes
+ * in the pipeline.
+ *
+ * The omap4iss_pipeline_pm_use() function must be called in the open() and
+ * close() handlers of video device nodes. It increments or decrements the use
+ * count of all subdev entities in the pipeline.
+ *
+ * To react to link management on powered pipelines, the link setup notification
+ * callback updates the use count of all entities in the source and sink sides
+ * of the link.
+ */
+
+/*
+ * iss_pipeline_pm_use_count - Count the number of users of a pipeline
+ * @entity: The entity
+ *
+ * Return the total number of users of all video device nodes in the pipeline.
+ */
+static int iss_pipeline_pm_use_count(struct media_entity *entity)
+{
+ struct media_entity_graph graph;
+ int use = 0;
+
+ media_entity_graph_walk_start(&graph, entity);
+
+ while ((entity = media_entity_graph_walk_next(&graph))) {
+ if (media_entity_type(entity) == MEDIA_ENT_T_DEVNODE)
+ use += entity->use_count;
+ }
+
+ return use;
+}
+
+/*
+ * iss_pipeline_pm_power_one - Apply power change to an entity
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Change the entity use count by @change. If the entity is a subdev update its
+ * power state by calling the core::s_power operation when the use count goes
+ * from 0 to != 0 or from != 0 to 0.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int iss_pipeline_pm_power_one(struct media_entity *entity, int change)
+{
+ struct v4l2_subdev *subdev;
+
+ subdev = media_entity_type(entity) == MEDIA_ENT_T_V4L2_SUBDEV
+ ? media_entity_to_v4l2_subdev(entity) : NULL;
+
+ if (entity->use_count == 0 && change > 0 && subdev != NULL) {
+ int ret;
+
+ ret = v4l2_subdev_call(subdev, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+ }
+
+ entity->use_count += change;
+ WARN_ON(entity->use_count < 0);
+
+ if (entity->use_count == 0 && change < 0 && subdev != NULL)
+ v4l2_subdev_call(subdev, core, s_power, 0);
+
+ return 0;
+}
+
+/*
+ * iss_pipeline_pm_power - Apply power change to all entities in a pipeline
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Walk the pipeline to update the use count and the power state of all non-node
+ * entities.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int iss_pipeline_pm_power(struct media_entity *entity, int change)
+{
+ struct media_entity_graph graph;
+ struct media_entity *first = entity;
+ int ret = 0;
+
+ if (!change)
+ return 0;
+
+ media_entity_graph_walk_start(&graph, entity);
+
+ while (!ret && (entity = media_entity_graph_walk_next(&graph)))
+ if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
+ ret = iss_pipeline_pm_power_one(entity, change);
+
+ if (!ret)
+ return 0;
+
+ media_entity_graph_walk_start(&graph, first);
+
+ while ((first = media_entity_graph_walk_next(&graph))
+ && first != entity)
+ if (media_entity_type(first) != MEDIA_ENT_T_DEVNODE)
+ iss_pipeline_pm_power_one(first, -change);
+
+ return ret;
+}
+
+/*
+ * omap4iss_pipeline_pm_use - Update the use count of an entity
+ * @entity: The entity
+ * @use: Use (1) or stop using (0) the entity
+ *
+ * Update the use count of all entities in the pipeline and power entities on or
+ * off accordingly.
+ *
+ * Return 0 on success or a negative error code on failure. Powering entities
+ * off is assumed to never fail. No failure can occur when the use parameter is
+ * set to 0.
+ */
+int omap4iss_pipeline_pm_use(struct media_entity *entity, int use)
+{
+ int change = use ? 1 : -1;
+ int ret;
+
+ mutex_lock(&entity->parent->graph_mutex);
+
+ /* Apply use count to node. */
+ entity->use_count += change;
+ WARN_ON(entity->use_count < 0);
+
+ /* Apply power change to connected non-nodes. */
+ ret = iss_pipeline_pm_power(entity, change);
+ if (ret < 0)
+ entity->use_count -= change;
+
+ mutex_unlock(&entity->parent->graph_mutex);
+
+ return ret;
+}
+
+/*
+ * iss_pipeline_link_notify - Link management notification callback
+ * @link: The link
+ * @flags: New link flags that will be applied
+ *
+ * React to link management on powered pipelines by updating the use count of
+ * all entities in the source and sink sides of the link. Entities are powered
+ * on or off accordingly.
+ *
+ * Return 0 on success or a negative error code on failure. Powering entities
+ * off is assumed to never fail. This function will not fail for disconnection
+ * events.
+ */
+static int iss_pipeline_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
+{
+ struct media_entity *source = link->source->entity;
+ struct media_entity *sink = link->sink->entity;
+ int source_use = iss_pipeline_pm_use_count(source);
+ int sink_use = iss_pipeline_pm_use_count(sink);
+ int ret;
+
+ if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+ !(link->flags & MEDIA_LNK_FL_ENABLED)) {
+ /* Powering off entities is assumed to never fail. */
+ iss_pipeline_pm_power(source, -sink_use);
+ iss_pipeline_pm_power(sink, -source_use);
+ return 0;
+ }
+
+ if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+ (flags & MEDIA_LNK_FL_ENABLED)) {
+ ret = iss_pipeline_pm_power(source, sink_use);
+ if (ret < 0)
+ return ret;
+
+ ret = iss_pipeline_pm_power(sink, source_use);
+ if (ret < 0)
+ iss_pipeline_pm_power(source, -sink_use);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline stream management
+ */
+
+/*
+ * iss_pipeline_enable - Enable streaming on a pipeline
+ * @pipe: ISS pipeline
+ * @mode: Stream mode (single shot or continuous)
+ *
+ * Walk the entities chain starting at the pipeline output video node and start
+ * all modules in the chain in the given mode.
+ *
+ * Return 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise.
+ */
+static int iss_pipeline_enable(struct iss_pipeline *pipe,
+ enum iss_pipeline_stream_state mode)
+{
+ struct iss_device *iss = pipe->output->iss;
+ struct media_entity *entity;
+ struct media_pad *pad;
+ struct v4l2_subdev *subdev;
+ unsigned long flags;
+ int ret;
+
+ /* If one of the entities in the pipeline has crashed it will not work
+ * properly. Refuse to start streaming in that case. This check must be
+ * performed before the loop below to avoid starting entities if the
+ * pipeline won't start anyway (those entities would then likely fail to
+ * stop, making the problem worse).
+ */
+ if (pipe->entities & iss->crashed)
+ return -EIO;
+
+ spin_lock_irqsave(&pipe->lock, flags);
+ pipe->state &= ~(ISS_PIPELINE_IDLE_INPUT | ISS_PIPELINE_IDLE_OUTPUT);
+ spin_unlock_irqrestore(&pipe->lock, flags);
+
+ pipe->do_propagation = false;
+
+ entity = &pipe->output->video.entity;
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_entity_remote_pad(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ ret = v4l2_subdev_call(subdev, video, s_stream, mode);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+ }
+ iss_print_status(pipe->output->iss);
+ return 0;
+}
+
+/*
+ * iss_pipeline_disable - Disable streaming on a pipeline
+ * @pipe: ISS pipeline
+ *
+ * Walk the entities chain starting at the pipeline output video node and stop
+ * all modules in the chain. Wait synchronously for the modules to be stopped if
+ * necessary.
+ */
+static int iss_pipeline_disable(struct iss_pipeline *pipe)
+{
+ struct iss_device *iss = pipe->output->iss;
+ struct media_entity *entity;
+ struct media_pad *pad;
+ struct v4l2_subdev *subdev;
+ int failure = 0;
+ int ret;
+
+ entity = &pipe->output->video.entity;
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_entity_remote_pad(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ ret = v4l2_subdev_call(subdev, video, s_stream, 0);
+ if (ret < 0) {
+ dev_dbg(iss->dev, "%s: module stop timeout.\n",
+ subdev->name);
+ /* If the entity failed to stopped, assume it has
+ * crashed. Mark it as such, the ISS will be reset when
+ * applications will release it.
+ */
+ iss->crashed |= 1U << subdev->entity.id;
+ failure = -ETIMEDOUT;
+ }
+ }
+
+ return failure;
+}
+
+/*
+ * omap4iss_pipeline_set_stream - Enable/disable streaming on a pipeline
+ * @pipe: ISS pipeline
+ * @state: Stream state (stopped, single shot or continuous)
+ *
+ * Set the pipeline to the given stream state. Pipelines can be started in
+ * single-shot or continuous mode.
+ *
+ * Return 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise. The pipeline state is not updated when the operation
+ * fails, except when stopping the pipeline.
+ */
+int omap4iss_pipeline_set_stream(struct iss_pipeline *pipe,
+ enum iss_pipeline_stream_state state)
+{
+ int ret;
+
+ if (state == ISS_PIPELINE_STREAM_STOPPED)
+ ret = iss_pipeline_disable(pipe);
+ else
+ ret = iss_pipeline_enable(pipe, state);
+
+ if (ret == 0 || state == ISS_PIPELINE_STREAM_STOPPED)
+ pipe->stream_state = state;
+
+ return ret;
+}
+
+/*
+ * omap4iss_pipeline_cancel_stream - Cancel stream on a pipeline
+ * @pipe: ISS pipeline
+ *
+ * Cancelling a stream mark all buffers on all video nodes in the pipeline as
+ * erroneous and makes sure no new buffer can be queued. This function is called
+ * when a fatal error that prevents any further operation on the pipeline
+ * occurs.
+ */
+void omap4iss_pipeline_cancel_stream(struct iss_pipeline *pipe)
+{
+ if (pipe->input)
+ omap4iss_video_cancel_stream(pipe->input);
+ if (pipe->output)
+ omap4iss_video_cancel_stream(pipe->output);
+}
+
+/*
+ * iss_pipeline_is_last - Verify if entity has an enabled link to the output
+ * video node
+ * @me: ISS module's media entity
+ *
+ * Returns 1 if the entity has an enabled link to the output video node or 0
+ * otherwise. It's true only while pipeline can have no more than one output
+ * node.
+ */
+static int iss_pipeline_is_last(struct media_entity *me)
+{
+ struct iss_pipeline *pipe;
+ struct media_pad *pad;
+
+ if (!me->pipe)
+ return 0;
+ pipe = to_iss_pipeline(me);
+ if (pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED)
+ return 0;
+ pad = media_entity_remote_pad(&pipe->output->pad);
+ return pad->entity == me;
+}
+
+static int iss_reset(struct iss_device *iss)
+{
+ unsigned long timeout = 0;
+
+ iss_reg_set(iss, OMAP4_ISS_MEM_TOP, ISS_HL_SYSCONFIG,
+ ISS_HL_SYSCONFIG_SOFTRESET);
+
+ while (iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_SYSCONFIG) &
+ ISS_HL_SYSCONFIG_SOFTRESET) {
+ if (timeout++ > 100) {
+ dev_alert(iss->dev, "cannot reset ISS\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(10, 10);
+ }
+
+ iss->crashed = 0;
+ return 0;
+}
+
+static int iss_isp_reset(struct iss_device *iss)
+{
+ unsigned long timeout = 0;
+
+ /* Fist, ensure that the ISP is IDLE (no transactions happening) */
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_SYSCONFIG,
+ ISP5_SYSCONFIG_STANDBYMODE_MASK,
+ ISP5_SYSCONFIG_STANDBYMODE_SMART);
+
+ iss_reg_set(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL, ISP5_CTRL_MSTANDBY);
+
+ for (;;) {
+ if (iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL) &
+ ISP5_CTRL_MSTANDBY_WAIT)
+ break;
+ if (timeout++ > 1000) {
+ dev_alert(iss->dev, "cannot set ISP5 to standby\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(1000, 1500);
+ }
+
+ /* Now finally, do the reset */
+ iss_reg_set(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_SYSCONFIG,
+ ISP5_SYSCONFIG_SOFTRESET);
+
+ timeout = 0;
+ while (iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_SYSCONFIG) &
+ ISP5_SYSCONFIG_SOFTRESET) {
+ if (timeout++ > 1000) {
+ dev_alert(iss->dev, "cannot reset ISP5\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(1000, 1500);
+ }
+
+ return 0;
+}
+
+/*
+ * iss_module_sync_idle - Helper to sync module with its idle state
+ * @me: ISS submodule's media entity
+ * @wait: ISS submodule's wait queue for streamoff/interrupt synchronization
+ * @stopping: flag which tells module wants to stop
+ *
+ * This function checks if ISS submodule needs to wait for next interrupt. If
+ * yes, makes the caller to sleep while waiting for such event.
+ */
+int omap4iss_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
+ atomic_t *stopping)
+{
+ struct iss_pipeline *pipe = to_iss_pipeline(me);
+ struct iss_video *video = pipe->output;
+ unsigned long flags;
+
+ if (pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED ||
+ (pipe->stream_state == ISS_PIPELINE_STREAM_SINGLESHOT &&
+ !iss_pipeline_ready(pipe)))
+ return 0;
+
+ /*
+ * atomic_set() doesn't include memory barrier on ARM platform for SMP
+ * scenario. We'll call it here to avoid race conditions.
+ */
+ atomic_set(stopping, 1);
+ smp_wmb();
+
+ /*
+ * If module is the last one, it's writing to memory. In this case,
+ * it's necessary to check if the module is already paused due to
+ * DMA queue underrun or if it has to wait for next interrupt to be
+ * idle.
+ * If it isn't the last one, the function won't sleep but *stopping
+ * will still be set to warn next submodule caller's interrupt the
+ * module wants to be idle.
+ */
+ if (!iss_pipeline_is_last(me))
+ return 0;
+
+ spin_lock_irqsave(&video->qlock, flags);
+ if (video->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
+ spin_unlock_irqrestore(&video->qlock, flags);
+ atomic_set(stopping, 0);
+ smp_wmb();
+ return 0;
+ }
+ spin_unlock_irqrestore(&video->qlock, flags);
+ if (!wait_event_timeout(*wait, !atomic_read(stopping),
+ msecs_to_jiffies(1000))) {
+ atomic_set(stopping, 0);
+ smp_wmb();
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * omap4iss_module_sync_is_stopped - Helper to verify if module was stopping
+ * @wait: ISS submodule's wait queue for streamoff/interrupt synchronization
+ * @stopping: flag which tells module wants to stop
+ *
+ * This function checks if ISS submodule was stopping. In case of yes, it
+ * notices the caller by setting stopping to 0 and waking up the wait queue.
+ * Returns 1 if it was stopping or 0 otherwise.
+ */
+int omap4iss_module_sync_is_stopping(wait_queue_head_t *wait,
+ atomic_t *stopping)
+{
+ if (atomic_cmpxchg(stopping, 1, 0)) {
+ wake_up(wait);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------------
+ * Clock management
+ */
+
+#define ISS_CLKCTRL_MASK (ISS_CLKCTRL_CSI2_A |\
+ ISS_CLKCTRL_CSI2_B |\
+ ISS_CLKCTRL_ISP)
+
+static int __iss_subclk_update(struct iss_device *iss)
+{
+ u32 clk = 0;
+ int ret = 0, timeout = 1000;
+
+ if (iss->subclk_resources & OMAP4_ISS_SUBCLK_CSI2_A)
+ clk |= ISS_CLKCTRL_CSI2_A;
+
+ if (iss->subclk_resources & OMAP4_ISS_SUBCLK_CSI2_B)
+ clk |= ISS_CLKCTRL_CSI2_B;
+
+ if (iss->subclk_resources & OMAP4_ISS_SUBCLK_ISP)
+ clk |= ISS_CLKCTRL_ISP;
+
+ iss_reg_update(iss, OMAP4_ISS_MEM_TOP, ISS_CLKCTRL,
+ ISS_CLKCTRL_MASK, clk);
+
+ /* Wait for HW assertion */
+ while (--timeout > 0) {
+ udelay(1);
+ if ((iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_CLKSTAT) &
+ ISS_CLKCTRL_MASK) == clk)
+ break;
+ }
+
+ if (!timeout)
+ ret = -EBUSY;
+
+ return ret;
+}
+
+int omap4iss_subclk_enable(struct iss_device *iss,
+ enum iss_subclk_resource res)
+{
+ iss->subclk_resources |= res;
+
+ return __iss_subclk_update(iss);
+}
+
+int omap4iss_subclk_disable(struct iss_device *iss,
+ enum iss_subclk_resource res)
+{
+ iss->subclk_resources &= ~res;
+
+ return __iss_subclk_update(iss);
+}
+
+#define ISS_ISP5_CLKCTRL_MASK (ISP5_CTRL_BL_CLK_ENABLE |\
+ ISP5_CTRL_ISIF_CLK_ENABLE |\
+ ISP5_CTRL_H3A_CLK_ENABLE |\
+ ISP5_CTRL_RSZ_CLK_ENABLE |\
+ ISP5_CTRL_IPIPE_CLK_ENABLE |\
+ ISP5_CTRL_IPIPEIF_CLK_ENABLE)
+
+static void __iss_isp_subclk_update(struct iss_device *iss)
+{
+ u32 clk = 0;
+
+ if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_ISIF)
+ clk |= ISP5_CTRL_ISIF_CLK_ENABLE;
+
+ if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_H3A)
+ clk |= ISP5_CTRL_H3A_CLK_ENABLE;
+
+ if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_RSZ)
+ clk |= ISP5_CTRL_RSZ_CLK_ENABLE;
+
+ if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_IPIPE)
+ clk |= ISP5_CTRL_IPIPE_CLK_ENABLE;
+
+ if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_IPIPEIF)
+ clk |= ISP5_CTRL_IPIPEIF_CLK_ENABLE;
+
+ if (clk)
+ clk |= ISP5_CTRL_BL_CLK_ENABLE;
+
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL,
+ ISS_ISP5_CLKCTRL_MASK, clk);
+}
+
+void omap4iss_isp_subclk_enable(struct iss_device *iss,
+ enum iss_isp_subclk_resource res)
+{
+ iss->isp_subclk_resources |= res;
+
+ __iss_isp_subclk_update(iss);
+}
+
+void omap4iss_isp_subclk_disable(struct iss_device *iss,
+ enum iss_isp_subclk_resource res)
+{
+ iss->isp_subclk_resources &= ~res;
+
+ __iss_isp_subclk_update(iss);
+}
+
+/*
+ * iss_enable_clocks - Enable ISS clocks
+ * @iss: OMAP4 ISS device
+ *
+ * Return 0 if successful, or clk_enable return value if any of tthem fails.
+ */
+static int iss_enable_clocks(struct iss_device *iss)
+{
+ int ret;
+
+ ret = clk_enable(iss->iss_fck);
+ if (ret) {
+ dev_err(iss->dev, "clk_enable iss_fck failed\n");
+ return ret;
+ }
+
+ ret = clk_enable(iss->iss_ctrlclk);
+ if (ret) {
+ dev_err(iss->dev, "clk_enable iss_ctrlclk failed\n");
+ clk_disable(iss->iss_fck);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * iss_disable_clocks - Disable ISS clocks
+ * @iss: OMAP4 ISS device
+ */
+static void iss_disable_clocks(struct iss_device *iss)
+{
+ clk_disable(iss->iss_ctrlclk);
+ clk_disable(iss->iss_fck);
+}
+
+static void iss_put_clocks(struct iss_device *iss)
+{
+ if (iss->iss_fck) {
+ clk_put(iss->iss_fck);
+ iss->iss_fck = NULL;
+ }
+
+ if (iss->iss_ctrlclk) {
+ clk_put(iss->iss_ctrlclk);
+ iss->iss_ctrlclk = NULL;
+ }
+}
+
+static int iss_get_clocks(struct iss_device *iss)
+{
+ iss->iss_fck = clk_get(iss->dev, "iss_fck");
+ if (IS_ERR(iss->iss_fck)) {
+ dev_err(iss->dev, "Unable to get iss_fck clock info\n");
+ iss_put_clocks(iss);
+ return PTR_ERR(iss->iss_fck);
+ }
+
+ iss->iss_ctrlclk = clk_get(iss->dev, "iss_ctrlclk");
+ if (IS_ERR(iss->iss_ctrlclk)) {
+ dev_err(iss->dev, "Unable to get iss_ctrlclk clock info\n");
+ iss_put_clocks(iss);
+ return PTR_ERR(iss->iss_fck);
+ }
+
+ return 0;
+}
+
+/*
+ * omap4iss_get - Acquire the ISS resource.
+ *
+ * Initializes the clocks for the first acquire.
+ *
+ * Increment the reference count on the ISS. If the first reference is taken,
+ * enable clocks and power-up all submodules.
+ *
+ * Return a pointer to the ISS device structure, or NULL if an error occurred.
+ */
+struct iss_device *omap4iss_get(struct iss_device *iss)
+{
+ struct iss_device *__iss = iss;
+
+ if (iss == NULL)
+ return NULL;
+
+ mutex_lock(&iss->iss_mutex);
+ if (iss->ref_count > 0)
+ goto out;
+
+ if (iss_enable_clocks(iss) < 0) {
+ __iss = NULL;
+ goto out;
+ }
+
+ iss_enable_interrupts(iss);
+
+out:
+ if (__iss != NULL)
+ iss->ref_count++;
+ mutex_unlock(&iss->iss_mutex);
+
+ return __iss;
+}
+
+/*
+ * omap4iss_put - Release the ISS
+ *
+ * Decrement the reference count on the ISS. If the last reference is released,
+ * power-down all submodules, disable clocks and free temporary buffers.
+ */
+void omap4iss_put(struct iss_device *iss)
+{
+ if (iss == NULL)
+ return;
+
+ mutex_lock(&iss->iss_mutex);
+ BUG_ON(iss->ref_count == 0);
+ if (--iss->ref_count == 0) {
+ iss_disable_interrupts(iss);
+ /* Reset the ISS if an entity has failed to stop. This is the
+ * only way to recover from such conditions, although it would
+ * be worth investigating whether resetting the ISP only can't
+ * fix the problem in some cases.
+ */
+ if (iss->crashed)
+ iss_reset(iss);
+ iss_disable_clocks(iss);
+ }
+ mutex_unlock(&iss->iss_mutex);
+}
+
+static int iss_map_mem_resource(struct platform_device *pdev,
+ struct iss_device *iss,
+ enum iss_mem_resources res)
+{
+ struct resource *mem;
+
+ /* request the mem region for the camera registers */
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, res);
+ if (!mem) {
+ dev_err(iss->dev, "no mem resource?\n");
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
+ dev_err(iss->dev,
+ "cannot reserve camera register I/O region\n");
+ return -ENODEV;
+ }
+ iss->res[res] = mem;
+
+ /* map the region */
+ iss->regs[res] = ioremap_nocache(mem->start, resource_size(mem));
+ if (!iss->regs[res]) {
+ dev_err(iss->dev, "cannot map camera register I/O region\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void iss_unregister_entities(struct iss_device *iss)
+{
+ omap4iss_resizer_unregister_entities(&iss->resizer);
+ omap4iss_ipipe_unregister_entities(&iss->ipipe);
+ omap4iss_ipipeif_unregister_entities(&iss->ipipeif);
+ omap4iss_csi2_unregister_entities(&iss->csi2a);
+ omap4iss_csi2_unregister_entities(&iss->csi2b);
+
+ v4l2_device_unregister(&iss->v4l2_dev);
+ media_device_unregister(&iss->media_dev);
+}
+
+/*
+ * iss_register_subdev_group - Register a group of subdevices
+ * @iss: OMAP4 ISS device
+ * @board_info: I2C subdevs board information array
+ *
+ * Register all I2C subdevices in the board_info array. The array must be
+ * terminated by a NULL entry, and the first entry must be the sensor.
+ *
+ * Return a pointer to the sensor media entity if it has been successfully
+ * registered, or NULL otherwise.
+ */
+static struct v4l2_subdev *
+iss_register_subdev_group(struct iss_device *iss,
+ struct iss_subdev_i2c_board_info *board_info)
+{
+ struct v4l2_subdev *sensor = NULL;
+ unsigned int first;
+
+ if (board_info->board_info == NULL)
+ return NULL;
+
+ for (first = 1; board_info->board_info; ++board_info, first = 0) {
+ struct v4l2_subdev *subdev;
+ struct i2c_adapter *adapter;
+
+ adapter = i2c_get_adapter(board_info->i2c_adapter_id);
+ if (adapter == NULL) {
+ dev_err(iss->dev,
+ "%s: Unable to get I2C adapter %d for device %s\n",
+ __func__, board_info->i2c_adapter_id,
+ board_info->board_info->type);
+ continue;
+ }
+
+ subdev = v4l2_i2c_new_subdev_board(&iss->v4l2_dev, adapter,
+ board_info->board_info, NULL);
+ if (subdev == NULL) {
+ dev_err(iss->dev, "%s: Unable to register subdev %s\n",
+ __func__, board_info->board_info->type);
+ continue;
+ }
+
+ if (first)
+ sensor = subdev;
+ }
+
+ return sensor;
+}
+
+static int iss_register_entities(struct iss_device *iss)
+{
+ struct iss_platform_data *pdata = iss->pdata;
+ struct iss_v4l2_subdevs_group *subdevs;
+ int ret;
+
+ iss->media_dev.dev = iss->dev;
+ strlcpy(iss->media_dev.model, "TI OMAP4 ISS",
+ sizeof(iss->media_dev.model));
+ iss->media_dev.hw_revision = iss->revision;
+ iss->media_dev.link_notify = iss_pipeline_link_notify;
+ ret = media_device_register(&iss->media_dev);
+ if (ret < 0) {
+ dev_err(iss->dev, "%s: Media device registration failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ iss->v4l2_dev.mdev = &iss->media_dev;
+ ret = v4l2_device_register(iss->dev, &iss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(iss->dev, "%s: V4L2 device registration failed (%d)\n",
+ __func__, ret);
+ goto done;
+ }
+
+ /* Register internal entities */
+ ret = omap4iss_csi2_register_entities(&iss->csi2a, &iss->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap4iss_csi2_register_entities(&iss->csi2b, &iss->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap4iss_ipipeif_register_entities(&iss->ipipeif, &iss->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap4iss_ipipe_register_entities(&iss->ipipe, &iss->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap4iss_resizer_register_entities(&iss->resizer, &iss->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ /* Register external entities */
+ for (subdevs = pdata->subdevs; subdevs && subdevs->subdevs; ++subdevs) {
+ struct v4l2_subdev *sensor;
+ struct media_entity *input;
+ unsigned int flags;
+ unsigned int pad;
+
+ sensor = iss_register_subdev_group(iss, subdevs->subdevs);
+ if (sensor == NULL)
+ continue;
+
+ sensor->host_priv = subdevs;
+
+ /* Connect the sensor to the correct interface module.
+ * CSI2a receiver through CSIPHY1, or
+ * CSI2b receiver through CSIPHY2
+ */
+ switch (subdevs->interface) {
+ case ISS_INTERFACE_CSI2A_PHY1:
+ input = &iss->csi2a.subdev.entity;
+ pad = CSI2_PAD_SINK;
+ flags = MEDIA_LNK_FL_IMMUTABLE
+ | MEDIA_LNK_FL_ENABLED;
+ break;
+
+ case ISS_INTERFACE_CSI2B_PHY2:
+ input = &iss->csi2b.subdev.entity;
+ pad = CSI2_PAD_SINK;
+ flags = MEDIA_LNK_FL_IMMUTABLE
+ | MEDIA_LNK_FL_ENABLED;
+ break;
+
+ default:
+ dev_err(iss->dev, "%s: invalid interface type %u\n",
+ __func__, subdevs->interface);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = media_entity_create_link(&sensor->entity, 0, input, pad,
+ flags);
+ if (ret < 0)
+ goto done;
+ }
+
+ ret = v4l2_device_register_subdev_nodes(&iss->v4l2_dev);
+
+done:
+ if (ret < 0)
+ iss_unregister_entities(iss);
+
+ return ret;
+}
+
+static void iss_cleanup_modules(struct iss_device *iss)
+{
+ omap4iss_csi2_cleanup(iss);
+ omap4iss_ipipeif_cleanup(iss);
+ omap4iss_ipipe_cleanup(iss);
+ omap4iss_resizer_cleanup(iss);
+}
+
+static int iss_initialize_modules(struct iss_device *iss)
+{
+ int ret;
+
+ ret = omap4iss_csiphy_init(iss);
+ if (ret < 0) {
+ dev_err(iss->dev, "CSI PHY initialization failed\n");
+ goto error_csiphy;
+ }
+
+ ret = omap4iss_csi2_init(iss);
+ if (ret < 0) {
+ dev_err(iss->dev, "CSI2 initialization failed\n");
+ goto error_csi2;
+ }
+
+ ret = omap4iss_ipipeif_init(iss);
+ if (ret < 0) {
+ dev_err(iss->dev, "ISP IPIPEIF initialization failed\n");
+ goto error_ipipeif;
+ }
+
+ ret = omap4iss_ipipe_init(iss);
+ if (ret < 0) {
+ dev_err(iss->dev, "ISP IPIPE initialization failed\n");
+ goto error_ipipe;
+ }
+
+ ret = omap4iss_resizer_init(iss);
+ if (ret < 0) {
+ dev_err(iss->dev, "ISP RESIZER initialization failed\n");
+ goto error_resizer;
+ }
+
+ /* Connect the submodules. */
+ ret = media_entity_create_link(
+ &iss->csi2a.subdev.entity, CSI2_PAD_SOURCE,
+ &iss->ipipeif.subdev.entity, IPIPEIF_PAD_SINK, 0);
+ if (ret < 0)
+ goto error_link;
+
+ ret = media_entity_create_link(
+ &iss->csi2b.subdev.entity, CSI2_PAD_SOURCE,
+ &iss->ipipeif.subdev.entity, IPIPEIF_PAD_SINK, 0);
+ if (ret < 0)
+ goto error_link;
+
+ ret = media_entity_create_link(
+ &iss->ipipeif.subdev.entity, IPIPEIF_PAD_SOURCE_VP,
+ &iss->resizer.subdev.entity, RESIZER_PAD_SINK, 0);
+ if (ret < 0)
+ goto error_link;
+
+ ret = media_entity_create_link(
+ &iss->ipipeif.subdev.entity, IPIPEIF_PAD_SOURCE_VP,
+ &iss->ipipe.subdev.entity, IPIPE_PAD_SINK, 0);
+ if (ret < 0)
+ goto error_link;
+
+ ret = media_entity_create_link(
+ &iss->ipipe.subdev.entity, IPIPE_PAD_SOURCE_VP,
+ &iss->resizer.subdev.entity, RESIZER_PAD_SINK, 0);
+ if (ret < 0)
+ goto error_link;
+
+ return 0;
+
+error_link:
+ omap4iss_resizer_cleanup(iss);
+error_resizer:
+ omap4iss_ipipe_cleanup(iss);
+error_ipipe:
+ omap4iss_ipipeif_cleanup(iss);
+error_ipipeif:
+ omap4iss_csi2_cleanup(iss);
+error_csi2:
+error_csiphy:
+ return ret;
+}
+
+static int iss_probe(struct platform_device *pdev)
+{
+ struct iss_platform_data *pdata = pdev->dev.platform_data;
+ struct iss_device *iss;
+ unsigned int i;
+ int ret;
+
+ if (pdata == NULL)
+ return -EINVAL;
+
+ iss = kzalloc(sizeof(*iss), GFP_KERNEL);
+ if (!iss) {
+ dev_err(&pdev->dev, "Could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&iss->iss_mutex);
+
+ iss->dev = &pdev->dev;
+ iss->pdata = pdata;
+
+ iss->raw_dmamask = DMA_BIT_MASK(32);
+ iss->dev->dma_mask = &iss->raw_dmamask;
+ iss->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+ platform_set_drvdata(pdev, iss);
+
+ /* Clocks */
+ ret = iss_map_mem_resource(pdev, iss, OMAP4_ISS_MEM_TOP);
+ if (ret < 0)
+ goto error;
+
+ ret = iss_get_clocks(iss);
+ if (ret < 0)
+ goto error;
+
+ if (omap4iss_get(iss) == NULL)
+ goto error;
+
+ ret = iss_reset(iss);
+ if (ret < 0)
+ goto error_iss;
+
+ iss->revision = iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION);
+ dev_info(iss->dev, "Revision %08x found\n", iss->revision);
+
+ for (i = 1; i < OMAP4_ISS_MEM_LAST; i++) {
+ ret = iss_map_mem_resource(pdev, iss, i);
+ if (ret)
+ goto error_iss;
+ }
+
+ /* Configure BTE BW_LIMITER field to max recommended value (1 GB) */
+ iss_reg_update(iss, OMAP4_ISS_MEM_BTE, BTE_CTRL,
+ BTE_CTRL_BW_LIMITER_MASK,
+ 18 << BTE_CTRL_BW_LIMITER_SHIFT);
+
+ /* Perform ISP reset */
+ ret = omap4iss_subclk_enable(iss, OMAP4_ISS_SUBCLK_ISP);
+ if (ret < 0)
+ goto error_iss;
+
+ ret = iss_isp_reset(iss);
+ if (ret < 0)
+ goto error_iss;
+
+ dev_info(iss->dev, "ISP Revision %08x found\n",
+ iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_REVISION));
+
+ /* Interrupt */
+ iss->irq_num = platform_get_irq(pdev, 0);
+ if (iss->irq_num <= 0) {
+ dev_err(iss->dev, "No IRQ resource\n");
+ ret = -ENODEV;
+ goto error_iss;
+ }
+
+ if (request_irq(iss->irq_num, iss_isr, IRQF_SHARED, "OMAP4 ISS", iss)) {
+ dev_err(iss->dev, "Unable to request IRQ\n");
+ ret = -EINVAL;
+ goto error_iss;
+ }
+
+ /* Entities */
+ ret = iss_initialize_modules(iss);
+ if (ret < 0)
+ goto error_irq;
+
+ ret = iss_register_entities(iss);
+ if (ret < 0)
+ goto error_modules;
+
+ omap4iss_put(iss);
+
+ return 0;
+
+error_modules:
+ iss_cleanup_modules(iss);
+error_irq:
+ free_irq(iss->irq_num, iss);
+error_iss:
+ omap4iss_put(iss);
+error:
+ iss_put_clocks(iss);
+
+ for (i = 0; i < OMAP4_ISS_MEM_LAST; i++) {
+ if (iss->regs[i]) {
+ iounmap(iss->regs[i]);
+ iss->regs[i] = NULL;
+ }
+
+ if (iss->res[i]) {
+ release_mem_region(iss->res[i]->start,
+ resource_size(iss->res[i]));
+ iss->res[i] = NULL;
+ }
+ }
+ platform_set_drvdata(pdev, NULL);
+
+ mutex_destroy(&iss->iss_mutex);
+ kfree(iss);
+
+ return ret;
+}
+
+static int iss_remove(struct platform_device *pdev)
+{
+ struct iss_device *iss = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ iss_unregister_entities(iss);
+ iss_cleanup_modules(iss);
+
+ free_irq(iss->irq_num, iss);
+ iss_put_clocks(iss);
+
+ for (i = 0; i < OMAP4_ISS_MEM_LAST; i++) {
+ if (iss->regs[i]) {
+ iounmap(iss->regs[i]);
+ iss->regs[i] = NULL;
+ }
+
+ if (iss->res[i]) {
+ release_mem_region(iss->res[i]->start,
+ resource_size(iss->res[i]));
+ iss->res[i] = NULL;
+ }
+ }
+
+ kfree(iss);
+
+ return 0;
+}
+
+static struct platform_device_id omap4iss_id_table[] = {
+ { "omap4iss", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, omap4iss_id_table);
+
+static struct platform_driver iss_driver = {
+ .probe = iss_probe,
+ .remove = iss_remove,
+ .id_table = omap4iss_id_table,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "omap4iss",
+ },
+};
+
+module_platform_driver(iss_driver);
+
+MODULE_DESCRIPTION("TI OMAP4 ISS driver");
+MODULE_AUTHOR("Sergio Aguirre <sergio.a.aguirre@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ISS_VIDEO_DRIVER_VERSION);
diff --git a/drivers/staging/media/omap4iss/iss.h b/drivers/staging/media/omap4iss/iss.h
new file mode 100644
index 000000000000..346db9233171
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss.h
@@ -0,0 +1,236 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver
+ *
+ * Copyright (C) 2012 Texas Instruments.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _OMAP4_ISS_H_
+#define _OMAP4_ISS_H_
+
+#include <media/v4l2-device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+
+#include <media/omap4iss.h>
+
+#include "iss_regs.h"
+#include "iss_csiphy.h"
+#include "iss_csi2.h"
+#include "iss_ipipeif.h"
+#include "iss_ipipe.h"
+#include "iss_resizer.h"
+
+#define to_iss_device(ptr_module) \
+ container_of(ptr_module, struct iss_device, ptr_module)
+#define to_device(ptr_module) \
+ (to_iss_device(ptr_module)->dev)
+
+enum iss_mem_resources {
+ OMAP4_ISS_MEM_TOP,
+ OMAP4_ISS_MEM_CSI2_A_REGS1,
+ OMAP4_ISS_MEM_CAMERARX_CORE1,
+ OMAP4_ISS_MEM_CSI2_B_REGS1,
+ OMAP4_ISS_MEM_CAMERARX_CORE2,
+ OMAP4_ISS_MEM_BTE,
+ OMAP4_ISS_MEM_ISP_SYS1,
+ OMAP4_ISS_MEM_ISP_RESIZER,
+ OMAP4_ISS_MEM_ISP_IPIPE,
+ OMAP4_ISS_MEM_ISP_ISIF,
+ OMAP4_ISS_MEM_ISP_IPIPEIF,
+ OMAP4_ISS_MEM_LAST,
+};
+
+enum iss_subclk_resource {
+ OMAP4_ISS_SUBCLK_SIMCOP = (1 << 0),
+ OMAP4_ISS_SUBCLK_ISP = (1 << 1),
+ OMAP4_ISS_SUBCLK_CSI2_A = (1 << 2),
+ OMAP4_ISS_SUBCLK_CSI2_B = (1 << 3),
+ OMAP4_ISS_SUBCLK_CCP2 = (1 << 4),
+};
+
+enum iss_isp_subclk_resource {
+ OMAP4_ISS_ISP_SUBCLK_BL = (1 << 0),
+ OMAP4_ISS_ISP_SUBCLK_ISIF = (1 << 1),
+ OMAP4_ISS_ISP_SUBCLK_H3A = (1 << 2),
+ OMAP4_ISS_ISP_SUBCLK_RSZ = (1 << 3),
+ OMAP4_ISS_ISP_SUBCLK_IPIPE = (1 << 4),
+ OMAP4_ISS_ISP_SUBCLK_IPIPEIF = (1 << 5),
+};
+
+/*
+ * struct iss_reg - Structure for ISS register values.
+ * @reg: 32-bit Register address.
+ * @val: 32-bit Register value.
+ */
+struct iss_reg {
+ enum iss_mem_resources mmio_range;
+ u32 reg;
+ u32 val;
+};
+
+/*
+ * struct iss_device - ISS device structure.
+ * @crashed: Bitmask of crashed entities (indexed by entity ID)
+ */
+struct iss_device {
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+ struct device *dev;
+ u32 revision;
+
+ /* platform HW resources */
+ struct iss_platform_data *pdata;
+ unsigned int irq_num;
+
+ struct resource *res[OMAP4_ISS_MEM_LAST];
+ void __iomem *regs[OMAP4_ISS_MEM_LAST];
+
+ u64 raw_dmamask;
+
+ struct mutex iss_mutex; /* For handling ref_count field */
+ bool crashed;
+ int has_context;
+ int ref_count;
+
+ struct clk *iss_fck;
+ struct clk *iss_ctrlclk;
+
+ /* ISS modules */
+ struct iss_csi2_device csi2a;
+ struct iss_csi2_device csi2b;
+ struct iss_csiphy csiphy1;
+ struct iss_csiphy csiphy2;
+ struct iss_ipipeif_device ipipeif;
+ struct iss_ipipe_device ipipe;
+ struct iss_resizer_device resizer;
+
+ unsigned int subclk_resources;
+ unsigned int isp_subclk_resources;
+};
+
+#define v4l2_dev_to_iss_device(dev) \
+ container_of(dev, struct iss_device, v4l2_dev)
+
+int omap4iss_get_external_info(struct iss_pipeline *pipe,
+ struct media_link *link);
+
+int omap4iss_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
+ atomic_t *stopping);
+
+int omap4iss_module_sync_is_stopping(wait_queue_head_t *wait,
+ atomic_t *stopping);
+
+int omap4iss_pipeline_set_stream(struct iss_pipeline *pipe,
+ enum iss_pipeline_stream_state state);
+void omap4iss_pipeline_cancel_stream(struct iss_pipeline *pipe);
+
+void omap4iss_configure_bridge(struct iss_device *iss,
+ enum ipipeif_input_entity input);
+
+struct iss_device *omap4iss_get(struct iss_device *iss);
+void omap4iss_put(struct iss_device *iss);
+int omap4iss_subclk_enable(struct iss_device *iss,
+ enum iss_subclk_resource res);
+int omap4iss_subclk_disable(struct iss_device *iss,
+ enum iss_subclk_resource res);
+void omap4iss_isp_subclk_enable(struct iss_device *iss,
+ enum iss_isp_subclk_resource res);
+void omap4iss_isp_subclk_disable(struct iss_device *iss,
+ enum iss_isp_subclk_resource res);
+
+int omap4iss_pipeline_pm_use(struct media_entity *entity, int use);
+
+int omap4iss_register_entities(struct platform_device *pdev,
+ struct v4l2_device *v4l2_dev);
+void omap4iss_unregister_entities(struct platform_device *pdev);
+
+/*
+ * iss_reg_read - Read the value of an OMAP4 ISS register
+ * @iss: the ISS device
+ * @res: memory resource in which the register is located
+ * @offset: register offset in the memory resource
+ *
+ * Return the register value.
+ */
+static inline
+u32 iss_reg_read(struct iss_device *iss, enum iss_mem_resources res,
+ u32 offset)
+{
+ return readl(iss->regs[res] + offset);
+}
+
+/*
+ * iss_reg_write - Write a value to an OMAP4 ISS register
+ * @iss: the ISS device
+ * @res: memory resource in which the register is located
+ * @offset: register offset in the memory resource
+ * @value: value to be written
+ */
+static inline
+void iss_reg_write(struct iss_device *iss, enum iss_mem_resources res,
+ u32 offset, u32 value)
+{
+ writel(value, iss->regs[res] + offset);
+}
+
+/*
+ * iss_reg_clr - Clear bits in an OMAP4 ISS register
+ * @iss: the ISS device
+ * @res: memory resource in which the register is located
+ * @offset: register offset in the memory resource
+ * @clr: bit mask to be cleared
+ */
+static inline
+void iss_reg_clr(struct iss_device *iss, enum iss_mem_resources res,
+ u32 offset, u32 clr)
+{
+ u32 v = iss_reg_read(iss, res, offset);
+
+ iss_reg_write(iss, res, offset, v & ~clr);
+}
+
+/*
+ * iss_reg_set - Set bits in an OMAP4 ISS register
+ * @iss: the ISS device
+ * @res: memory resource in which the register is located
+ * @offset: register offset in the memory resource
+ * @set: bit mask to be set
+ */
+static inline
+void iss_reg_set(struct iss_device *iss, enum iss_mem_resources res,
+ u32 offset, u32 set)
+{
+ u32 v = iss_reg_read(iss, res, offset);
+
+ iss_reg_write(iss, res, offset, v | set);
+}
+
+/*
+ * iss_reg_update - Clear and set bits in an OMAP4 ISS register
+ * @iss: the ISS device
+ * @res: memory resource in which the register is located
+ * @offset: register offset in the memory resource
+ * @clr: bit mask to be cleared
+ * @set: bit mask to be set
+ *
+ * Clear the clr mask first and then set the set mask.
+ */
+static inline
+void iss_reg_update(struct iss_device *iss, enum iss_mem_resources res,
+ u32 offset, u32 clr, u32 set)
+{
+ u32 v = iss_reg_read(iss, res, offset);
+
+ iss_reg_write(iss, res, offset, (v & ~clr) | set);
+}
+
+#endif /* _OMAP4_ISS_H_ */
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
new file mode 100644
index 000000000000..61fc350eb251
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -0,0 +1,1343 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - CSI PHY module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <media/v4l2-common.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/mm.h>
+
+#include "iss.h"
+#include "iss_regs.h"
+#include "iss_csi2.h"
+
+/*
+ * csi2_if_enable - Enable CSI2 Receiver interface.
+ * @enable: enable flag
+ *
+ */
+static void csi2_if_enable(struct iss_csi2_device *csi2, u8 enable)
+{
+ struct iss_csi2_ctrl_cfg *currctrl = &csi2->ctrl;
+
+ iss_reg_update(csi2->iss, csi2->regs1, CSI2_CTRL, CSI2_CTRL_IF_EN,
+ enable ? CSI2_CTRL_IF_EN : 0);
+
+ currctrl->if_enable = enable;
+}
+
+/*
+ * csi2_recv_config - CSI2 receiver module configuration.
+ * @currctrl: iss_csi2_ctrl_cfg structure
+ *
+ */
+static void csi2_recv_config(struct iss_csi2_device *csi2,
+ struct iss_csi2_ctrl_cfg *currctrl)
+{
+ u32 reg = 0;
+
+ if (currctrl->frame_mode)
+ reg |= CSI2_CTRL_FRAME;
+ else
+ reg &= ~CSI2_CTRL_FRAME;
+
+ if (currctrl->vp_clk_enable)
+ reg |= CSI2_CTRL_VP_CLK_EN;
+ else
+ reg &= ~CSI2_CTRL_VP_CLK_EN;
+
+ if (currctrl->vp_only_enable)
+ reg |= CSI2_CTRL_VP_ONLY_EN;
+ else
+ reg &= ~CSI2_CTRL_VP_ONLY_EN;
+
+ reg &= ~CSI2_CTRL_VP_OUT_CTRL_MASK;
+ reg |= currctrl->vp_out_ctrl << CSI2_CTRL_VP_OUT_CTRL_SHIFT;
+
+ if (currctrl->ecc_enable)
+ reg |= CSI2_CTRL_ECC_EN;
+ else
+ reg &= ~CSI2_CTRL_ECC_EN;
+
+ /*
+ * Set MFlag assertion boundaries to:
+ * Low: 4/8 of FIFO size
+ * High: 6/8 of FIFO size
+ */
+ reg &= ~(CSI2_CTRL_MFLAG_LEVH_MASK | CSI2_CTRL_MFLAG_LEVL_MASK);
+ reg |= (2 << CSI2_CTRL_MFLAG_LEVH_SHIFT) |
+ (4 << CSI2_CTRL_MFLAG_LEVL_SHIFT);
+
+ /* Generation of 16x64-bit bursts (Recommended) */
+ reg |= CSI2_CTRL_BURST_SIZE_EXPAND;
+
+ /* Do Non-Posted writes (Recommended) */
+ reg |= CSI2_CTRL_NON_POSTED_WRITE;
+
+ /*
+ * Enforce Little endian for all formats, including:
+ * YUV4:2:2 8-bit and YUV4:2:0 Legacy
+ */
+ reg |= CSI2_CTRL_ENDIANNESS;
+
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTRL, reg);
+}
+
+static const unsigned int csi2_input_fmts[] = {
+ V4L2_MBUS_FMT_SGRBG10_1X10,
+ V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SRGGB10_1X10,
+ V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SBGGR10_1X10,
+ V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SGBRG10_1X10,
+ V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_MBUS_FMT_SGBRG8_1X8,
+ V4L2_MBUS_FMT_SGRBG8_1X8,
+ V4L2_MBUS_FMT_SRGGB8_1X8,
+ V4L2_MBUS_FMT_UYVY8_1X16,
+ V4L2_MBUS_FMT_YUYV8_1X16,
+};
+
+/* To set the format on the CSI2 requires a mapping function that takes
+ * the following inputs:
+ * - 3 different formats (at this time)
+ * - 2 destinations (mem, vp+mem) (vp only handled separately)
+ * - 2 decompression options (on, off)
+ * Output should be CSI2 frame format code
+ * Array indices as follows: [format][dest][decompr]
+ * Not all combinations are valid. 0 means invalid.
+ */
+static const u16 __csi2_fmt_map[][2][2] = {
+ /* RAW10 formats */
+ {
+ /* Output to memory */
+ {
+ /* No DPCM decompression */
+ CSI2_PIX_FMT_RAW10_EXP16,
+ /* DPCM decompression */
+ 0,
+ },
+ /* Output to both */
+ {
+ /* No DPCM decompression */
+ CSI2_PIX_FMT_RAW10_EXP16_VP,
+ /* DPCM decompression */
+ 0,
+ },
+ },
+ /* RAW10 DPCM8 formats */
+ {
+ /* Output to memory */
+ {
+ /* No DPCM decompression */
+ CSI2_USERDEF_8BIT_DATA1,
+ /* DPCM decompression */
+ CSI2_USERDEF_8BIT_DATA1_DPCM10,
+ },
+ /* Output to both */
+ {
+ /* No DPCM decompression */
+ CSI2_PIX_FMT_RAW8_VP,
+ /* DPCM decompression */
+ CSI2_USERDEF_8BIT_DATA1_DPCM10_VP,
+ },
+ },
+ /* RAW8 formats */
+ {
+ /* Output to memory */
+ {
+ /* No DPCM decompression */
+ CSI2_PIX_FMT_RAW8,
+ /* DPCM decompression */
+ 0,
+ },
+ /* Output to both */
+ {
+ /* No DPCM decompression */
+ CSI2_PIX_FMT_RAW8_VP,
+ /* DPCM decompression */
+ 0,
+ },
+ },
+ /* YUV422 formats */
+ {
+ /* Output to memory */
+ {
+ /* No DPCM decompression */
+ CSI2_PIX_FMT_YUV422_8BIT,
+ /* DPCM decompression */
+ 0,
+ },
+ /* Output to both */
+ {
+ /* No DPCM decompression */
+ CSI2_PIX_FMT_YUV422_8BIT_VP16,
+ /* DPCM decompression */
+ 0,
+ },
+ },
+};
+
+/*
+ * csi2_ctx_map_format - Map CSI2 sink media bus format to CSI2 format ID
+ * @csi2: ISS CSI2 device
+ *
+ * Returns CSI2 physical format id
+ */
+static u16 csi2_ctx_map_format(struct iss_csi2_device *csi2)
+{
+ const struct v4l2_mbus_framefmt *fmt = &csi2->formats[CSI2_PAD_SINK];
+ int fmtidx, destidx;
+
+ switch (fmt->code) {
+ case V4L2_MBUS_FMT_SGRBG10_1X10:
+ case V4L2_MBUS_FMT_SRGGB10_1X10:
+ case V4L2_MBUS_FMT_SBGGR10_1X10:
+ case V4L2_MBUS_FMT_SGBRG10_1X10:
+ fmtidx = 0;
+ break;
+ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8:
+ fmtidx = 1;
+ break;
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
+ case V4L2_MBUS_FMT_SGBRG8_1X8:
+ case V4L2_MBUS_FMT_SGRBG8_1X8:
+ case V4L2_MBUS_FMT_SRGGB8_1X8:
+ fmtidx = 2;
+ break;
+ case V4L2_MBUS_FMT_UYVY8_1X16:
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ fmtidx = 3;
+ break;
+ default:
+ WARN(1, KERN_ERR "CSI2: pixel format %08x unsupported!\n",
+ fmt->code);
+ return 0;
+ }
+
+ if (!(csi2->output & CSI2_OUTPUT_IPIPEIF) &&
+ !(csi2->output & CSI2_OUTPUT_MEMORY)) {
+ /* Neither output enabled is a valid combination */
+ return CSI2_PIX_FMT_OTHERS;
+ }
+
+ /* If we need to skip frames at the beginning of the stream disable the
+ * video port to avoid sending the skipped frames to the IPIPEIF.
+ */
+ destidx = csi2->frame_skip ? 0 : !!(csi2->output & CSI2_OUTPUT_IPIPEIF);
+
+ return __csi2_fmt_map[fmtidx][destidx][csi2->dpcm_decompress];
+}
+
+/*
+ * csi2_set_outaddr - Set memory address to save output image
+ * @csi2: Pointer to ISS CSI2a device.
+ * @addr: 32-bit memory address aligned on 32 byte boundary.
+ *
+ * Sets the memory address where the output will be saved.
+ *
+ * Returns 0 if successful, or -EINVAL if the address is not in the 32 byte
+ * boundary.
+ */
+static void csi2_set_outaddr(struct iss_csi2_device *csi2, u32 addr)
+{
+ struct iss_csi2_ctx_cfg *ctx = &csi2->contexts[0];
+
+ ctx->ping_addr = addr;
+ ctx->pong_addr = addr;
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PING_ADDR(ctx->ctxnum),
+ ctx->ping_addr);
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PONG_ADDR(ctx->ctxnum),
+ ctx->pong_addr);
+}
+
+/*
+ * is_usr_def_mapping - Checks whether USER_DEF_MAPPING should
+ * be enabled by CSI2.
+ * @format_id: mapped format id
+ *
+ */
+static inline int is_usr_def_mapping(u32 format_id)
+{
+ return (format_id & 0xf0) == 0x40 ? 1 : 0;
+}
+
+/*
+ * csi2_ctx_enable - Enable specified CSI2 context
+ * @ctxnum: Context number, valid between 0 and 7 values.
+ * @enable: enable
+ *
+ */
+static void csi2_ctx_enable(struct iss_csi2_device *csi2, u8 ctxnum, u8 enable)
+{
+ struct iss_csi2_ctx_cfg *ctx = &csi2->contexts[ctxnum];
+ u32 reg;
+
+ reg = iss_reg_read(csi2->iss, csi2->regs1, CSI2_CTX_CTRL1(ctxnum));
+
+ if (enable) {
+ unsigned int skip = 0;
+
+ if (csi2->frame_skip)
+ skip = csi2->frame_skip;
+ else if (csi2->output & CSI2_OUTPUT_MEMORY)
+ skip = 1;
+
+ reg &= ~CSI2_CTX_CTRL1_COUNT_MASK;
+ reg |= CSI2_CTX_CTRL1_COUNT_UNLOCK
+ | (skip << CSI2_CTX_CTRL1_COUNT_SHIFT)
+ | CSI2_CTX_CTRL1_CTX_EN;
+ } else {
+ reg &= ~CSI2_CTX_CTRL1_CTX_EN;
+ }
+
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL1(ctxnum), reg);
+ ctx->enabled = enable;
+}
+
+/*
+ * csi2_ctx_config - CSI2 context configuration.
+ * @ctx: context configuration
+ *
+ */
+static void csi2_ctx_config(struct iss_csi2_device *csi2,
+ struct iss_csi2_ctx_cfg *ctx)
+{
+ u32 reg;
+
+ /* Set up CSI2_CTx_CTRL1 */
+ if (ctx->eof_enabled)
+ reg = CSI2_CTX_CTRL1_EOF_EN;
+
+ if (ctx->eol_enabled)
+ reg |= CSI2_CTX_CTRL1_EOL_EN;
+
+ if (ctx->checksum_enabled)
+ reg |= CSI2_CTX_CTRL1_CS_EN;
+
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL1(ctx->ctxnum), reg);
+
+ /* Set up CSI2_CTx_CTRL2 */
+ reg = ctx->virtual_id << CSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT;
+ reg |= ctx->format_id << CSI2_CTX_CTRL2_FORMAT_SHIFT;
+
+ if (ctx->dpcm_decompress && ctx->dpcm_predictor)
+ reg |= CSI2_CTX_CTRL2_DPCM_PRED;
+
+ if (is_usr_def_mapping(ctx->format_id))
+ reg |= 2 << CSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT;
+
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL2(ctx->ctxnum), reg);
+
+ /* Set up CSI2_CTx_CTRL3 */
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL3(ctx->ctxnum),
+ ctx->alpha << CSI2_CTX_CTRL3_ALPHA_SHIFT);
+
+ /* Set up CSI2_CTx_DAT_OFST */
+ iss_reg_update(csi2->iss, csi2->regs1, CSI2_CTX_DAT_OFST(ctx->ctxnum),
+ CSI2_CTX_DAT_OFST_MASK, ctx->data_offset);
+
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PING_ADDR(ctx->ctxnum),
+ ctx->ping_addr);
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PONG_ADDR(ctx->ctxnum),
+ ctx->pong_addr);
+}
+
+/*
+ * csi2_timing_config - CSI2 timing configuration.
+ * @timing: csi2_timing_cfg structure
+ */
+static void csi2_timing_config(struct iss_csi2_device *csi2,
+ struct iss_csi2_timing_cfg *timing)
+{
+ u32 reg;
+
+ reg = iss_reg_read(csi2->iss, csi2->regs1, CSI2_TIMING);
+
+ if (timing->force_rx_mode)
+ reg |= CSI2_TIMING_FORCE_RX_MODE_IO1;
+ else
+ reg &= ~CSI2_TIMING_FORCE_RX_MODE_IO1;
+
+ if (timing->stop_state_16x)
+ reg |= CSI2_TIMING_STOP_STATE_X16_IO1;
+ else
+ reg &= ~CSI2_TIMING_STOP_STATE_X16_IO1;
+
+ if (timing->stop_state_4x)
+ reg |= CSI2_TIMING_STOP_STATE_X4_IO1;
+ else
+ reg &= ~CSI2_TIMING_STOP_STATE_X4_IO1;
+
+ reg &= ~CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK;
+ reg |= timing->stop_state_counter <<
+ CSI2_TIMING_STOP_STATE_COUNTER_IO1_SHIFT;
+
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_TIMING, reg);
+}
+
+/*
+ * csi2_irq_ctx_set - Enables CSI2 Context IRQs.
+ * @enable: Enable/disable CSI2 Context interrupts
+ */
+static void csi2_irq_ctx_set(struct iss_csi2_device *csi2, int enable)
+{
+ u32 reg = CSI2_CTX_IRQ_FE;
+ int i;
+
+ if (csi2->use_fs_irq)
+ reg |= CSI2_CTX_IRQ_FS;
+
+ for (i = 0; i < 8; i++) {
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(i),
+ reg);
+ if (enable)
+ iss_reg_set(csi2->iss, csi2->regs1,
+ CSI2_CTX_IRQENABLE(i), reg);
+ else
+ iss_reg_clr(csi2->iss, csi2->regs1,
+ CSI2_CTX_IRQENABLE(i), reg);
+ }
+}
+
+/*
+ * csi2_irq_complexio1_set - Enables CSI2 ComplexIO IRQs.
+ * @enable: Enable/disable CSI2 ComplexIO #1 interrupts
+ */
+static void csi2_irq_complexio1_set(struct iss_csi2_device *csi2, int enable)
+{
+ u32 reg;
+ reg = CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT |
+ CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER |
+ CSI2_COMPLEXIO_IRQ_STATEULPM5 |
+ CSI2_COMPLEXIO_IRQ_ERRCONTROL5 |
+ CSI2_COMPLEXIO_IRQ_ERRESC5 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTHS5 |
+ CSI2_COMPLEXIO_IRQ_STATEULPM4 |
+ CSI2_COMPLEXIO_IRQ_ERRCONTROL4 |
+ CSI2_COMPLEXIO_IRQ_ERRESC4 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTHS4 |
+ CSI2_COMPLEXIO_IRQ_STATEULPM3 |
+ CSI2_COMPLEXIO_IRQ_ERRCONTROL3 |
+ CSI2_COMPLEXIO_IRQ_ERRESC3 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTHS3 |
+ CSI2_COMPLEXIO_IRQ_STATEULPM2 |
+ CSI2_COMPLEXIO_IRQ_ERRCONTROL2 |
+ CSI2_COMPLEXIO_IRQ_ERRESC2 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTHS2 |
+ CSI2_COMPLEXIO_IRQ_STATEULPM1 |
+ CSI2_COMPLEXIO_IRQ_ERRCONTROL1 |
+ CSI2_COMPLEXIO_IRQ_ERRESC1 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTHS1;
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQSTATUS, reg);
+ if (enable)
+ iss_reg_set(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQENABLE,
+ reg);
+ else
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQENABLE,
+ 0);
+}
+
+/*
+ * csi2_irq_status_set - Enables CSI2 Status IRQs.
+ * @enable: Enable/disable CSI2 Status interrupts
+ */
+static void csi2_irq_status_set(struct iss_csi2_device *csi2, int enable)
+{
+ u32 reg;
+ reg = CSI2_IRQ_OCP_ERR |
+ CSI2_IRQ_SHORT_PACKET |
+ CSI2_IRQ_ECC_CORRECTION |
+ CSI2_IRQ_ECC_NO_CORRECTION |
+ CSI2_IRQ_COMPLEXIO_ERR |
+ CSI2_IRQ_FIFO_OVF |
+ CSI2_IRQ_CONTEXT0;
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_IRQSTATUS, reg);
+ if (enable)
+ iss_reg_set(csi2->iss, csi2->regs1, CSI2_IRQENABLE, reg);
+ else
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_IRQENABLE, 0);
+}
+
+/*
+ * omap4iss_csi2_reset - Resets the CSI2 module.
+ *
+ * Must be called with the phy lock held.
+ *
+ * Returns 0 if successful, or -EBUSY if power command didn't respond.
+ */
+int omap4iss_csi2_reset(struct iss_csi2_device *csi2)
+{
+ u8 soft_reset_retries = 0;
+ u32 reg;
+ int i;
+
+ if (!csi2->available)
+ return -ENODEV;
+
+ if (csi2->phy->phy_in_use)
+ return -EBUSY;
+
+ iss_reg_set(csi2->iss, csi2->regs1, CSI2_SYSCONFIG,
+ CSI2_SYSCONFIG_SOFT_RESET);
+
+ do {
+ reg = iss_reg_read(csi2->iss, csi2->regs1, CSI2_SYSSTATUS)
+ & CSI2_SYSSTATUS_RESET_DONE;
+ if (reg == CSI2_SYSSTATUS_RESET_DONE)
+ break;
+ soft_reset_retries++;
+ if (soft_reset_retries < 5)
+ usleep_range(100, 100);
+ } while (soft_reset_retries < 5);
+
+ if (soft_reset_retries == 5) {
+ dev_err(csi2->iss->dev,
+ "CSI2: Soft reset try count exceeded!\n");
+ return -EBUSY;
+ }
+
+ iss_reg_set(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_CFG,
+ CSI2_COMPLEXIO_CFG_RESET_CTRL);
+
+ i = 100;
+ do {
+ reg = iss_reg_read(csi2->iss, csi2->phy->phy_regs, REGISTER1)
+ & REGISTER1_RESET_DONE_CTRLCLK;
+ if (reg == REGISTER1_RESET_DONE_CTRLCLK)
+ break;
+ usleep_range(100, 100);
+ } while (--i > 0);
+
+ if (i == 0) {
+ dev_err(csi2->iss->dev,
+ "CSI2: Reset for CSI2_96M_FCLK domain Failed!\n");
+ return -EBUSY;
+ }
+
+ iss_reg_update(csi2->iss, csi2->regs1, CSI2_SYSCONFIG,
+ CSI2_SYSCONFIG_MSTANDBY_MODE_MASK |
+ CSI2_SYSCONFIG_AUTO_IDLE,
+ CSI2_SYSCONFIG_MSTANDBY_MODE_NO);
+
+ return 0;
+}
+
+static int csi2_configure(struct iss_csi2_device *csi2)
+{
+ const struct iss_v4l2_subdevs_group *pdata;
+ struct iss_csi2_timing_cfg *timing = &csi2->timing[0];
+ struct v4l2_subdev *sensor;
+ struct media_pad *pad;
+
+ /*
+ * CSI2 fields that can be updated while the context has
+ * been enabled or the interface has been enabled are not
+ * updated dynamically currently. So we do not allow to
+ * reconfigure if either has been enabled
+ */
+ if (csi2->contexts[0].enabled || csi2->ctrl.if_enable)
+ return -EBUSY;
+
+ pad = media_entity_remote_pad(&csi2->pads[CSI2_PAD_SINK]);
+ sensor = media_entity_to_v4l2_subdev(pad->entity);
+ pdata = sensor->host_priv;
+
+ csi2->frame_skip = 0;
+ v4l2_subdev_call(sensor, sensor, g_skip_frames, &csi2->frame_skip);
+
+ csi2->ctrl.vp_out_ctrl = pdata->bus.csi2.vpclk_div;
+ csi2->ctrl.frame_mode = ISS_CSI2_FRAME_IMMEDIATE;
+ csi2->ctrl.ecc_enable = pdata->bus.csi2.crc;
+
+ timing->force_rx_mode = 1;
+ timing->stop_state_16x = 1;
+ timing->stop_state_4x = 1;
+ timing->stop_state_counter = 0x1ff;
+
+ /*
+ * The CSI2 receiver can't do any format conversion except DPCM
+ * decompression, so every set_format call configures both pads
+ * and enables DPCM decompression as a special case:
+ */
+ if (csi2->formats[CSI2_PAD_SINK].code !=
+ csi2->formats[CSI2_PAD_SOURCE].code)
+ csi2->dpcm_decompress = true;
+ else
+ csi2->dpcm_decompress = false;
+
+ csi2->contexts[0].format_id = csi2_ctx_map_format(csi2);
+
+ if (csi2->video_out.bpl_padding == 0)
+ csi2->contexts[0].data_offset = 0;
+ else
+ csi2->contexts[0].data_offset = csi2->video_out.bpl_value;
+
+ /*
+ * Enable end of frame and end of line signals generation for
+ * context 0. These signals are generated from CSI2 receiver to
+ * qualify the last pixel of a frame and the last pixel of a line.
+ * Without enabling the signals CSI2 receiver writes data to memory
+ * beyond buffer size and/or data line offset is not handled correctly.
+ */
+ csi2->contexts[0].eof_enabled = 1;
+ csi2->contexts[0].eol_enabled = 1;
+
+ csi2_irq_complexio1_set(csi2, 1);
+ csi2_irq_ctx_set(csi2, 1);
+ csi2_irq_status_set(csi2, 1);
+
+ /* Set configuration (timings, format and links) */
+ csi2_timing_config(csi2, timing);
+ csi2_recv_config(csi2, &csi2->ctrl);
+ csi2_ctx_config(csi2, &csi2->contexts[0]);
+
+ return 0;
+}
+
+/*
+ * csi2_print_status - Prints CSI2 debug information.
+ */
+#define CSI2_PRINT_REGISTER(iss, regs, name)\
+ dev_dbg(iss->dev, "###CSI2 " #name "=0x%08x\n", \
+ iss_reg_read(iss, regs, CSI2_##name))
+
+static void csi2_print_status(struct iss_csi2_device *csi2)
+{
+ struct iss_device *iss = csi2->iss;
+
+ if (!csi2->available)
+ return;
+
+ dev_dbg(iss->dev, "-------------CSI2 Register dump-------------\n");
+
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, SYSCONFIG);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, SYSSTATUS);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, IRQENABLE);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, IRQSTATUS);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, CTRL);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, DBG_H);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, COMPLEXIO_CFG);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, COMPLEXIO_IRQSTATUS);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, SHORT_PACKET);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, COMPLEXIO_IRQENABLE);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, DBG_P);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, TIMING);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_CTRL1(0));
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_CTRL2(0));
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_DAT_OFST(0));
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_PING_ADDR(0));
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_PONG_ADDR(0));
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_IRQENABLE(0));
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_IRQSTATUS(0));
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_CTRL3(0));
+
+ dev_dbg(iss->dev, "--------------------------------------------\n");
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+/*
+ * csi2_isr_buffer - Does buffer handling at end-of-frame
+ * when writing to memory.
+ */
+static void csi2_isr_buffer(struct iss_csi2_device *csi2)
+{
+ struct iss_buffer *buffer;
+
+ csi2_ctx_enable(csi2, 0, 0);
+
+ buffer = omap4iss_video_buffer_next(&csi2->video_out);
+
+ /*
+ * Let video queue operation restart engine if there is an underrun
+ * condition.
+ */
+ if (buffer == NULL)
+ return;
+
+ csi2_set_outaddr(csi2, buffer->iss_addr);
+ csi2_ctx_enable(csi2, 0, 1);
+}
+
+static void csi2_isr_ctx(struct iss_csi2_device *csi2,
+ struct iss_csi2_ctx_cfg *ctx)
+{
+ unsigned int n = ctx->ctxnum;
+ u32 status;
+
+ status = iss_reg_read(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(n));
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(n), status);
+
+ /* Propagate frame number */
+ if (status & CSI2_CTX_IRQ_FS) {
+ struct iss_pipeline *pipe =
+ to_iss_pipeline(&csi2->subdev.entity);
+ if (pipe->do_propagation)
+ atomic_inc(&pipe->frame_number);
+ }
+
+ if (!(status & CSI2_CTX_IRQ_FE))
+ return;
+
+ /* Skip interrupts until we reach the frame skip count. The CSI2 will be
+ * automatically disabled, as the frame skip count has been programmed
+ * in the CSI2_CTx_CTRL1::COUNT field, so reenable it.
+ *
+ * It would have been nice to rely on the FRAME_NUMBER interrupt instead
+ * but it turned out that the interrupt is only generated when the CSI2
+ * writes to memory (the CSI2_CTx_CTRL1::COUNT field is decreased
+ * correctly and reaches 0 when data is forwarded to the video port only
+ * but no interrupt arrives). Maybe a CSI2 hardware bug.
+ */
+ if (csi2->frame_skip) {
+ csi2->frame_skip--;
+ if (csi2->frame_skip == 0) {
+ ctx->format_id = csi2_ctx_map_format(csi2);
+ csi2_ctx_config(csi2, ctx);
+ csi2_ctx_enable(csi2, n, 1);
+ }
+ return;
+ }
+
+ if (csi2->output & CSI2_OUTPUT_MEMORY)
+ csi2_isr_buffer(csi2);
+}
+
+/*
+ * omap4iss_csi2_isr - CSI2 interrupt handling.
+ */
+void omap4iss_csi2_isr(struct iss_csi2_device *csi2)
+{
+ struct iss_pipeline *pipe = to_iss_pipeline(&csi2->subdev.entity);
+ u32 csi2_irqstatus, cpxio1_irqstatus;
+ struct iss_device *iss = csi2->iss;
+
+ if (!csi2->available)
+ return;
+
+ csi2_irqstatus = iss_reg_read(csi2->iss, csi2->regs1, CSI2_IRQSTATUS);
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_IRQSTATUS, csi2_irqstatus);
+
+ /* Failure Cases */
+ if (csi2_irqstatus & CSI2_IRQ_COMPLEXIO_ERR) {
+ cpxio1_irqstatus = iss_reg_read(csi2->iss, csi2->regs1,
+ CSI2_COMPLEXIO_IRQSTATUS);
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQSTATUS,
+ cpxio1_irqstatus);
+ dev_dbg(iss->dev, "CSI2: ComplexIO Error IRQ %x\n",
+ cpxio1_irqstatus);
+ pipe->error = true;
+ }
+
+ if (csi2_irqstatus & (CSI2_IRQ_OCP_ERR |
+ CSI2_IRQ_SHORT_PACKET |
+ CSI2_IRQ_ECC_NO_CORRECTION |
+ CSI2_IRQ_COMPLEXIO_ERR |
+ CSI2_IRQ_FIFO_OVF)) {
+ dev_dbg(iss->dev,
+ "CSI2 Err: OCP:%d SHORT:%d ECC:%d CPXIO:%d OVF:%d\n",
+ csi2_irqstatus & CSI2_IRQ_OCP_ERR ? 1 : 0,
+ csi2_irqstatus & CSI2_IRQ_SHORT_PACKET ? 1 : 0,
+ csi2_irqstatus & CSI2_IRQ_ECC_NO_CORRECTION ? 1 : 0,
+ csi2_irqstatus & CSI2_IRQ_COMPLEXIO_ERR ? 1 : 0,
+ csi2_irqstatus & CSI2_IRQ_FIFO_OVF ? 1 : 0);
+ pipe->error = true;
+ }
+
+ if (omap4iss_module_sync_is_stopping(&csi2->wait, &csi2->stopping))
+ return;
+
+ /* Successful cases */
+ if (csi2_irqstatus & CSI2_IRQ_CONTEXT0)
+ csi2_isr_ctx(csi2, &csi2->contexts[0]);
+
+ if (csi2_irqstatus & CSI2_IRQ_ECC_CORRECTION)
+ dev_dbg(iss->dev, "CSI2: ECC correction done\n");
+}
+
+/* -----------------------------------------------------------------------------
+ * ISS video operations
+ */
+
+/*
+ * csi2_queue - Queues the first buffer when using memory output
+ * @video: The video node
+ * @buffer: buffer to queue
+ */
+static int csi2_queue(struct iss_video *video, struct iss_buffer *buffer)
+{
+ struct iss_csi2_device *csi2 = container_of(video,
+ struct iss_csi2_device, video_out);
+
+ csi2_set_outaddr(csi2, buffer->iss_addr);
+
+ /*
+ * If streaming was enabled before there was a buffer queued
+ * or underrun happened in the ISR, the hardware was not enabled
+ * and DMA queue flag ISS_VIDEO_DMAQUEUE_UNDERRUN is still set.
+ * Enable it now.
+ */
+ if (csi2->video_out.dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
+ /* Enable / disable context 0 and IRQs */
+ csi2_if_enable(csi2, 1);
+ csi2_ctx_enable(csi2, 0, 1);
+ iss_video_dmaqueue_flags_clr(&csi2->video_out);
+ }
+
+ return 0;
+}
+
+static const struct iss_video_operations csi2_issvideo_ops = {
+ .queue = csi2_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__csi2_get_format(struct iss_csi2_device *csi2, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ else
+ return &csi2->formats[pad];
+}
+
+static void
+csi2_try_format(struct iss_csi2_device *csi2, struct v4l2_subdev_fh *fh,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ enum v4l2_mbus_pixelcode pixelcode;
+ struct v4l2_mbus_framefmt *format;
+ const struct iss_format_info *info;
+ unsigned int i;
+
+ switch (pad) {
+ case CSI2_PAD_SINK:
+ /* Clamp the width and height to valid range (1-8191). */
+ for (i = 0; i < ARRAY_SIZE(csi2_input_fmts); i++) {
+ if (fmt->code == csi2_input_fmts[i])
+ break;
+ }
+
+ /* If not found, use SGRBG10 as default */
+ if (i >= ARRAY_SIZE(csi2_input_fmts))
+ fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+ fmt->width = clamp_t(u32, fmt->width, 1, 8191);
+ fmt->height = clamp_t(u32, fmt->height, 1, 8191);
+ break;
+
+ case CSI2_PAD_SOURCE:
+ /* Source format same as sink format, except for DPCM
+ * compression.
+ */
+ pixelcode = fmt->code;
+ format = __csi2_get_format(csi2, fh, CSI2_PAD_SINK, which);
+ memcpy(fmt, format, sizeof(*fmt));
+
+ /*
+ * Only Allow DPCM decompression, and check that the
+ * pattern is preserved
+ */
+ info = omap4iss_video_format_info(fmt->code);
+ if (info->uncompressed == pixelcode)
+ fmt->code = pixelcode;
+ break;
+ }
+
+ /* RGB, non-interlaced */
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * csi2_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+ const struct iss_format_info *info;
+
+ if (code->pad == CSI2_PAD_SINK) {
+ if (code->index >= ARRAY_SIZE(csi2_input_fmts))
+ return -EINVAL;
+
+ code->code = csi2_input_fmts[code->index];
+ } else {
+ format = __csi2_get_format(csi2, fh, CSI2_PAD_SINK,
+ V4L2_SUBDEV_FORMAT_TRY);
+ switch (code->index) {
+ case 0:
+ /* Passthrough sink pad code */
+ code->code = format->code;
+ break;
+ case 1:
+ /* Uncompressed code */
+ info = omap4iss_video_format_info(format->code);
+ if (info->uncompressed == format->code)
+ return -EINVAL;
+
+ code->code = info->uncompressed;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int csi2_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ csi2_try_format(csi2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ csi2_try_format(csi2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * csi2_get_format - Handle get format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __csi2_get_format(csi2, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * csi2_set_format - Handle set format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __csi2_get_format(csi2, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ csi2_try_format(csi2, fh, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ /* Propagate the format from sink to source */
+ if (fmt->pad == CSI2_PAD_SINK) {
+ format = __csi2_get_format(csi2, fh, CSI2_PAD_SOURCE,
+ fmt->which);
+ *format = fmt->format;
+ csi2_try_format(csi2, fh, CSI2_PAD_SOURCE, format, fmt->which);
+ }
+
+ return 0;
+}
+
+static int csi2_link_validate(struct v4l2_subdev *sd, struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct iss_pipeline *pipe = to_iss_pipeline(&csi2->subdev.entity);
+ int rval;
+
+ pipe->external = media_entity_to_v4l2_subdev(link->source->entity);
+ rval = omap4iss_get_external_info(pipe, link);
+ if (rval < 0)
+ return rval;
+
+ return v4l2_subdev_link_validate_default(sd, link, source_fmt,
+ sink_fmt);
+}
+
+/*
+ * csi2_init_formats - Initialize formats on all pads
+ * @sd: ISS CSI2 V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = CSI2_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format.format.width = 4096;
+ format.format.height = 4096;
+ csi2_set_format(sd, fh, &format);
+
+ return 0;
+}
+
+/*
+ * csi2_set_stream - Enable/Disable streaming on the CSI2 module
+ * @sd: ISS CSI2 V4L2 subdevice
+ * @enable: ISS pipeline stream state
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct iss_device *iss = csi2->iss;
+ struct iss_pipeline *pipe = to_iss_pipeline(&csi2->subdev.entity);
+ struct iss_video *video_out = &csi2->video_out;
+ int ret = 0;
+
+ if (csi2->state == ISS_PIPELINE_STREAM_STOPPED) {
+ if (enable == ISS_PIPELINE_STREAM_STOPPED)
+ return 0;
+
+ omap4iss_subclk_enable(iss, csi2->subclk);
+ }
+
+ switch (enable) {
+ case ISS_PIPELINE_STREAM_CONTINUOUS: {
+ ret = omap4iss_csiphy_config(iss, sd);
+ if (ret < 0)
+ return ret;
+
+ if (omap4iss_csiphy_acquire(csi2->phy) < 0)
+ return -ENODEV;
+ csi2->use_fs_irq = pipe->do_propagation;
+ csi2_configure(csi2);
+ csi2_print_status(csi2);
+
+ /*
+ * When outputting to memory with no buffer available, let the
+ * buffer queue handler start the hardware. A DMA queue flag
+ * ISS_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is
+ * a buffer available.
+ */
+ if (csi2->output & CSI2_OUTPUT_MEMORY &&
+ !(video_out->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_QUEUED))
+ break;
+ /* Enable context 0 and IRQs */
+ atomic_set(&csi2->stopping, 0);
+ csi2_ctx_enable(csi2, 0, 1);
+ csi2_if_enable(csi2, 1);
+ iss_video_dmaqueue_flags_clr(video_out);
+ break;
+ }
+ case ISS_PIPELINE_STREAM_STOPPED:
+ if (csi2->state == ISS_PIPELINE_STREAM_STOPPED)
+ return 0;
+ if (omap4iss_module_sync_idle(&sd->entity, &csi2->wait,
+ &csi2->stopping))
+ ret = -ETIMEDOUT;
+ csi2_ctx_enable(csi2, 0, 0);
+ csi2_if_enable(csi2, 0);
+ csi2_irq_ctx_set(csi2, 0);
+ omap4iss_csiphy_release(csi2->phy);
+ omap4iss_subclk_disable(iss, csi2->subclk);
+ iss_video_dmaqueue_flags_clr(video_out);
+ break;
+ }
+
+ csi2->state = enable;
+ return ret;
+}
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops csi2_video_ops = {
+ .s_stream = csi2_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
+ .enum_mbus_code = csi2_enum_mbus_code,
+ .enum_frame_size = csi2_enum_frame_size,
+ .get_fmt = csi2_get_format,
+ .set_fmt = csi2_set_format,
+ .link_validate = csi2_link_validate,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops csi2_ops = {
+ .video = &csi2_video_ops,
+ .pad = &csi2_pad_ops,
+};
+
+/* subdev internal operations */
+static const struct v4l2_subdev_internal_ops csi2_internal_ops = {
+ .open = csi2_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * csi2_link_setup - Setup CSI2 connections.
+ * @entity : Pointer to media entity structure
+ * @local : Pointer to local pad array
+ * @remote : Pointer to remote pad array
+ * @flags : Link flags
+ * return -EINVAL or zero on success
+ */
+static int csi2_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct iss_csi2_ctrl_cfg *ctrl = &csi2->ctrl;
+
+ /*
+ * The ISS core doesn't support pipelines with multiple video outputs.
+ * Revisit this when it will be implemented, and return -EBUSY for now.
+ */
+
+ switch (local->index | media_entity_type(remote->entity)) {
+ case CSI2_PAD_SOURCE | MEDIA_ENT_T_DEVNODE:
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (csi2->output & ~CSI2_OUTPUT_MEMORY)
+ return -EBUSY;
+ csi2->output |= CSI2_OUTPUT_MEMORY;
+ } else {
+ csi2->output &= ~CSI2_OUTPUT_MEMORY;
+ }
+ break;
+
+ case CSI2_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (csi2->output & ~CSI2_OUTPUT_IPIPEIF)
+ return -EBUSY;
+ csi2->output |= CSI2_OUTPUT_IPIPEIF;
+ } else {
+ csi2->output &= ~CSI2_OUTPUT_IPIPEIF;
+ }
+ break;
+
+ default:
+ /* Link from camera to CSI2 is fixed... */
+ return -EINVAL;
+ }
+
+ ctrl->vp_only_enable = csi2->output & CSI2_OUTPUT_MEMORY ? false : true;
+ ctrl->vp_clk_enable = !!(csi2->output & CSI2_OUTPUT_IPIPEIF);
+
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations csi2_media_ops = {
+ .link_setup = csi2_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+void omap4iss_csi2_unregister_entities(struct iss_csi2_device *csi2)
+{
+ v4l2_device_unregister_subdev(&csi2->subdev);
+ omap4iss_video_unregister(&csi2->video_out);
+}
+
+int omap4iss_csi2_register_entities(struct iss_csi2_device *csi2,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video nodes. */
+ ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap4iss_video_register(&csi2->video_out, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap4iss_csi2_unregister_entities(csi2);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISS CSI2 initialisation and cleanup
+ */
+
+/*
+ * csi2_init_entities - Initialize subdev and media entity.
+ * @csi2: Pointer to csi2 structure.
+ * return -ENOMEM or zero on success
+ */
+static int csi2_init_entities(struct iss_csi2_device *csi2, const char *subname)
+{
+ struct v4l2_subdev *sd = &csi2->subdev;
+ struct media_pad *pads = csi2->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+ char name[V4L2_SUBDEV_NAME_SIZE];
+
+ v4l2_subdev_init(sd, &csi2_ops);
+ sd->internal_ops = &csi2_internal_ops;
+ sprintf(name, "CSI2%s", subname);
+ snprintf(sd->name, sizeof(sd->name), "OMAP4 ISS %s", name);
+
+ sd->grp_id = 1 << 16; /* group ID for iss subdevs */
+ v4l2_set_subdevdata(sd, csi2);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+
+ me->ops = &csi2_media_ops;
+ ret = media_entity_init(me, CSI2_PADS_NUM, pads, 0);
+ if (ret < 0)
+ return ret;
+
+ csi2_init_formats(sd, NULL);
+
+ /* Video device node */
+ csi2->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ csi2->video_out.ops = &csi2_issvideo_ops;
+ csi2->video_out.bpl_alignment = 32;
+ csi2->video_out.bpl_zero_padding = 1;
+ csi2->video_out.bpl_max = 0x1ffe0;
+ csi2->video_out.iss = csi2->iss;
+ csi2->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
+
+ ret = omap4iss_video_init(&csi2->video_out, name);
+ if (ret < 0)
+ goto error_video;
+
+ /* Connect the CSI2 subdev to the video node. */
+ ret = media_entity_create_link(&csi2->subdev.entity, CSI2_PAD_SOURCE,
+ &csi2->video_out.video.entity, 0, 0);
+ if (ret < 0)
+ goto error_link;
+
+ return 0;
+
+error_link:
+ omap4iss_video_cleanup(&csi2->video_out);
+error_video:
+ media_entity_cleanup(&csi2->subdev.entity);
+ return ret;
+}
+
+/*
+ * omap4iss_csi2_init - Routine for module driver init
+ */
+int omap4iss_csi2_init(struct iss_device *iss)
+{
+ struct iss_csi2_device *csi2a = &iss->csi2a;
+ struct iss_csi2_device *csi2b = &iss->csi2b;
+ int ret;
+
+ csi2a->iss = iss;
+ csi2a->available = 1;
+ csi2a->regs1 = OMAP4_ISS_MEM_CSI2_A_REGS1;
+ csi2a->phy = &iss->csiphy1;
+ csi2a->subclk = OMAP4_ISS_SUBCLK_CSI2_A;
+ csi2a->state = ISS_PIPELINE_STREAM_STOPPED;
+ init_waitqueue_head(&csi2a->wait);
+
+ ret = csi2_init_entities(csi2a, "a");
+ if (ret < 0)
+ return ret;
+
+ csi2b->iss = iss;
+ csi2b->available = 1;
+ csi2b->regs1 = OMAP4_ISS_MEM_CSI2_B_REGS1;
+ csi2b->phy = &iss->csiphy2;
+ csi2b->subclk = OMAP4_ISS_SUBCLK_CSI2_B;
+ csi2b->state = ISS_PIPELINE_STREAM_STOPPED;
+ init_waitqueue_head(&csi2b->wait);
+
+ ret = csi2_init_entities(csi2b, "b");
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * omap4iss_csi2_cleanup - Routine for module driver cleanup
+ */
+void omap4iss_csi2_cleanup(struct iss_device *iss)
+{
+ struct iss_csi2_device *csi2a = &iss->csi2a;
+ struct iss_csi2_device *csi2b = &iss->csi2b;
+
+ omap4iss_video_cleanup(&csi2a->video_out);
+ media_entity_cleanup(&csi2a->subdev.entity);
+
+ omap4iss_video_cleanup(&csi2b->video_out);
+ media_entity_cleanup(&csi2b->subdev.entity);
+}
diff --git a/drivers/staging/media/omap4iss/iss_csi2.h b/drivers/staging/media/omap4iss/iss_csi2.h
new file mode 100644
index 000000000000..971aa7b08013
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_csi2.h
@@ -0,0 +1,158 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - CSI2 module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef OMAP4_ISS_CSI2_H
+#define OMAP4_ISS_CSI2_H
+
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include "iss_video.h"
+
+struct iss_csiphy;
+
+/* This is not an exhaustive list */
+enum iss_csi2_pix_formats {
+ CSI2_PIX_FMT_OTHERS = 0,
+ CSI2_PIX_FMT_YUV422_8BIT = 0x1e,
+ CSI2_PIX_FMT_YUV422_8BIT_VP = 0x9e,
+ CSI2_PIX_FMT_YUV422_8BIT_VP16 = 0xde,
+ CSI2_PIX_FMT_RAW10_EXP16 = 0xab,
+ CSI2_PIX_FMT_RAW10_EXP16_VP = 0x12f,
+ CSI2_PIX_FMT_RAW8 = 0x2a,
+ CSI2_PIX_FMT_RAW8_DPCM10_EXP16 = 0x2aa,
+ CSI2_PIX_FMT_RAW8_DPCM10_VP = 0x32a,
+ CSI2_PIX_FMT_RAW8_VP = 0x12a,
+ CSI2_USERDEF_8BIT_DATA1_DPCM10_VP = 0x340,
+ CSI2_USERDEF_8BIT_DATA1_DPCM10 = 0x2c0,
+ CSI2_USERDEF_8BIT_DATA1 = 0x40,
+};
+
+enum iss_csi2_irqevents {
+ OCP_ERR_IRQ = 0x4000,
+ SHORT_PACKET_IRQ = 0x2000,
+ ECC_CORRECTION_IRQ = 0x1000,
+ ECC_NO_CORRECTION_IRQ = 0x800,
+ COMPLEXIO2_ERR_IRQ = 0x400,
+ COMPLEXIO1_ERR_IRQ = 0x200,
+ FIFO_OVF_IRQ = 0x100,
+ CONTEXT7 = 0x80,
+ CONTEXT6 = 0x40,
+ CONTEXT5 = 0x20,
+ CONTEXT4 = 0x10,
+ CONTEXT3 = 0x8,
+ CONTEXT2 = 0x4,
+ CONTEXT1 = 0x2,
+ CONTEXT0 = 0x1,
+};
+
+enum iss_csi2_ctx_irqevents {
+ CTX_ECC_CORRECTION = 0x100,
+ CTX_LINE_NUMBER = 0x80,
+ CTX_FRAME_NUMBER = 0x40,
+ CTX_CS = 0x20,
+ CTX_LE = 0x8,
+ CTX_LS = 0x4,
+ CTX_FE = 0x2,
+ CTX_FS = 0x1,
+};
+
+enum iss_csi2_frame_mode {
+ ISS_CSI2_FRAME_IMMEDIATE,
+ ISS_CSI2_FRAME_AFTERFEC,
+};
+
+#define ISS_CSI2_MAX_CTX_NUM 7
+
+struct iss_csi2_ctx_cfg {
+ u8 ctxnum; /* context number 0 - 7 */
+ u8 dpcm_decompress;
+
+ /* Fields in CSI2_CTx_CTRL2 - locked by CSI2_CTx_CTRL1.CTX_EN */
+ u8 virtual_id;
+ u16 format_id; /* as in CSI2_CTx_CTRL2[9:0] */
+ u8 dpcm_predictor; /* 1: simple, 0: advanced */
+
+ /* Fields in CSI2_CTx_CTRL1/3 - Shadowed */
+ u16 alpha;
+ u16 data_offset;
+ u32 ping_addr;
+ u32 pong_addr;
+ u8 eof_enabled;
+ u8 eol_enabled;
+ u8 checksum_enabled;
+ u8 enabled;
+};
+
+struct iss_csi2_timing_cfg {
+ u8 ionum; /* IO1 or IO2 as in CSI2_TIMING */
+ unsigned force_rx_mode:1;
+ unsigned stop_state_16x:1;
+ unsigned stop_state_4x:1;
+ u16 stop_state_counter;
+};
+
+struct iss_csi2_ctrl_cfg {
+ bool vp_clk_enable;
+ bool vp_only_enable;
+ u8 vp_out_ctrl;
+ enum iss_csi2_frame_mode frame_mode;
+ bool ecc_enable;
+ bool if_enable;
+};
+
+#define CSI2_PAD_SINK 0
+#define CSI2_PAD_SOURCE 1
+#define CSI2_PADS_NUM 2
+
+#define CSI2_OUTPUT_IPIPEIF (1 << 0)
+#define CSI2_OUTPUT_MEMORY (1 << 1)
+
+struct iss_csi2_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[CSI2_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[CSI2_PADS_NUM];
+
+ struct iss_video video_out;
+ struct iss_device *iss;
+
+ u8 available; /* Is the IP present on the silicon? */
+
+ /* memory resources, as defined in enum iss_mem_resources */
+ unsigned int regs1;
+ unsigned int regs2;
+ /* ISP subclock, as defined in enum iss_isp_subclk_resource */
+ unsigned int subclk;
+
+ u32 output; /* output to IPIPEIF, memory or both? */
+ bool dpcm_decompress;
+ unsigned int frame_skip;
+ bool use_fs_irq;
+
+ struct iss_csiphy *phy;
+ struct iss_csi2_ctx_cfg contexts[ISS_CSI2_MAX_CTX_NUM + 1];
+ struct iss_csi2_timing_cfg timing[2];
+ struct iss_csi2_ctrl_cfg ctrl;
+ enum iss_pipeline_stream_state state;
+ wait_queue_head_t wait;
+ atomic_t stopping;
+};
+
+void omap4iss_csi2_isr(struct iss_csi2_device *csi2);
+int omap4iss_csi2_reset(struct iss_csi2_device *csi2);
+int omap4iss_csi2_init(struct iss_device *iss);
+void omap4iss_csi2_cleanup(struct iss_device *iss);
+void omap4iss_csi2_unregister_entities(struct iss_csi2_device *csi2);
+int omap4iss_csi2_register_entities(struct iss_csi2_device *csi2,
+ struct v4l2_device *vdev);
+#endif /* OMAP4_ISS_CSI2_H */
diff --git a/drivers/staging/media/omap4iss/iss_csiphy.c b/drivers/staging/media/omap4iss/iss_csiphy.c
new file mode 100644
index 000000000000..7c3d55d811ef
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_csiphy.c
@@ -0,0 +1,279 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - CSI PHY module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+
+#include "../../../../arch/arm/mach-omap2/control.h"
+
+#include "iss.h"
+#include "iss_regs.h"
+#include "iss_csiphy.h"
+
+/*
+ * csiphy_lanes_config - Configuration of CSIPHY lanes.
+ *
+ * Updates HW configuration.
+ * Called with phy->mutex taken.
+ */
+static void csiphy_lanes_config(struct iss_csiphy *phy)
+{
+ unsigned int i;
+ u32 reg;
+
+ reg = iss_reg_read(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG);
+
+ for (i = 0; i < phy->max_data_lanes; i++) {
+ reg &= ~(CSI2_COMPLEXIO_CFG_DATA_POL(i + 1) |
+ CSI2_COMPLEXIO_CFG_DATA_POSITION_MASK(i + 1));
+ reg |= (phy->lanes.data[i].pol ?
+ CSI2_COMPLEXIO_CFG_DATA_POL(i + 1) : 0);
+ reg |= (phy->lanes.data[i].pos <<
+ CSI2_COMPLEXIO_CFG_DATA_POSITION_SHIFT(i + 1));
+ }
+
+ reg &= ~(CSI2_COMPLEXIO_CFG_CLOCK_POL |
+ CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK);
+ reg |= phy->lanes.clk.pol ? CSI2_COMPLEXIO_CFG_CLOCK_POL : 0;
+ reg |= phy->lanes.clk.pos << CSI2_COMPLEXIO_CFG_CLOCK_POSITION_SHIFT;
+
+ iss_reg_write(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG, reg);
+}
+
+/*
+ * csiphy_set_power
+ * @power: Power state to be set.
+ *
+ * Returns 0 if successful, or -EBUSY if the retry count is exceeded.
+ */
+static int csiphy_set_power(struct iss_csiphy *phy, u32 power)
+{
+ u32 reg;
+ u8 retry_count;
+
+ iss_reg_update(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG,
+ CSI2_COMPLEXIO_CFG_PWD_CMD_MASK,
+ power | CSI2_COMPLEXIO_CFG_PWR_AUTO);
+
+ retry_count = 0;
+ do {
+ udelay(1);
+ reg = iss_reg_read(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG)
+ & CSI2_COMPLEXIO_CFG_PWD_STATUS_MASK;
+
+ if (reg != power >> 2)
+ retry_count++;
+
+ } while ((reg != power >> 2) && (retry_count < 250));
+
+ if (retry_count == 250) {
+ dev_err(phy->iss->dev, "CSI2 CIO set power failed!\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/*
+ * csiphy_dphy_config - Configure CSI2 D-PHY parameters.
+ *
+ * Called with phy->mutex taken.
+ */
+static void csiphy_dphy_config(struct iss_csiphy *phy)
+{
+ u32 reg;
+
+ /* Set up REGISTER0 */
+ reg = phy->dphy.ths_term << REGISTER0_THS_TERM_SHIFT;
+ reg |= phy->dphy.ths_settle << REGISTER0_THS_SETTLE_SHIFT;
+
+ iss_reg_write(phy->iss, phy->phy_regs, REGISTER0, reg);
+
+ /* Set up REGISTER1 */
+ reg = phy->dphy.tclk_term << REGISTER1_TCLK_TERM_SHIFT;
+ reg |= phy->dphy.tclk_miss << REGISTER1_CTRLCLK_DIV_FACTOR_SHIFT;
+ reg |= phy->dphy.tclk_settle << REGISTER1_TCLK_SETTLE_SHIFT;
+ reg |= 0xb8 << REGISTER1_DPHY_HS_SYNC_PATTERN_SHIFT;
+
+ iss_reg_write(phy->iss, phy->phy_regs, REGISTER1, reg);
+}
+
+/*
+ * TCLK values are OK at their reset values
+ */
+#define TCLK_TERM 0
+#define TCLK_MISS 1
+#define TCLK_SETTLE 14
+
+int omap4iss_csiphy_config(struct iss_device *iss,
+ struct v4l2_subdev *csi2_subdev)
+{
+ struct iss_csi2_device *csi2 = v4l2_get_subdevdata(csi2_subdev);
+ struct iss_pipeline *pipe = to_iss_pipeline(&csi2_subdev->entity);
+ struct iss_v4l2_subdevs_group *subdevs = pipe->external->host_priv;
+ struct iss_csiphy_dphy_cfg csi2phy;
+ int csi2_ddrclk_khz;
+ struct iss_csiphy_lanes_cfg *lanes;
+ unsigned int used_lanes = 0;
+ u32 cam_rx_ctrl;
+ unsigned int i;
+
+ lanes = &subdevs->bus.csi2.lanecfg;
+
+ /*
+ * SCM.CONTROL_CAMERA_RX
+ * - bit [31] : CSIPHY2 lane 2 enable (4460+ only)
+ * - bit [30:29] : CSIPHY2 per-lane enable (1 to 0)
+ * - bit [28:24] : CSIPHY1 per-lane enable (4 to 0)
+ * - bit [21] : CSIPHY2 CTRLCLK enable
+ * - bit [20:19] : CSIPHY2 config: 00 d-phy, 01/10 ccp2
+ * - bit [18] : CSIPHY1 CTRLCLK enable
+ * - bit [17:16] : CSIPHY1 config: 00 d-phy, 01/10 ccp2
+ */
+ cam_rx_ctrl = omap4_ctrl_pad_readl(
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX);
+
+
+ if (subdevs->interface == ISS_INTERFACE_CSI2A_PHY1) {
+ cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI21_LANEENABLE_MASK |
+ OMAP4_CAMERARX_CSI21_CAMMODE_MASK);
+ /* NOTE: Leave CSIPHY1 config to 0x0: D-PHY mode */
+ /* Enable all lanes for now */
+ cam_rx_ctrl |=
+ 0x1f << OMAP4_CAMERARX_CSI21_LANEENABLE_SHIFT;
+ /* Enable CTRLCLK */
+ cam_rx_ctrl |= OMAP4_CAMERARX_CSI21_CTRLCLKEN_MASK;
+ }
+
+ if (subdevs->interface == ISS_INTERFACE_CSI2B_PHY2) {
+ cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI22_LANEENABLE_MASK |
+ OMAP4_CAMERARX_CSI22_CAMMODE_MASK);
+ /* NOTE: Leave CSIPHY2 config to 0x0: D-PHY mode */
+ /* Enable all lanes for now */
+ cam_rx_ctrl |=
+ 0x3 << OMAP4_CAMERARX_CSI22_LANEENABLE_SHIFT;
+ /* Enable CTRLCLK */
+ cam_rx_ctrl |= OMAP4_CAMERARX_CSI22_CTRLCLKEN_MASK;
+ }
+
+ omap4_ctrl_pad_writel(cam_rx_ctrl,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX);
+
+ /* Reset used lane count */
+ csi2->phy->used_data_lanes = 0;
+
+ /* Clock and data lanes verification */
+ for (i = 0; i < csi2->phy->max_data_lanes; i++) {
+ if (lanes->data[i].pos == 0)
+ continue;
+
+ if (lanes->data[i].pol > 1 ||
+ lanes->data[i].pos > (csi2->phy->max_data_lanes + 1))
+ return -EINVAL;
+
+ if (used_lanes & (1 << lanes->data[i].pos))
+ return -EINVAL;
+
+ used_lanes |= 1 << lanes->data[i].pos;
+ csi2->phy->used_data_lanes++;
+ }
+
+ if (lanes->clk.pol > 1 ||
+ lanes->clk.pos > (csi2->phy->max_data_lanes + 1))
+ return -EINVAL;
+
+ if (lanes->clk.pos == 0 || used_lanes & (1 << lanes->clk.pos))
+ return -EINVAL;
+
+ csi2_ddrclk_khz = pipe->external_rate / 1000
+ / (2 * csi2->phy->used_data_lanes)
+ * pipe->external_bpp;
+
+ /*
+ * THS_TERM: Programmed value = ceil(12.5 ns/DDRClk period) - 1.
+ * THS_SETTLE: Programmed value = ceil(90 ns/DDRClk period) + 3.
+ */
+ csi2phy.ths_term = DIV_ROUND_UP(25 * csi2_ddrclk_khz, 2000000) - 1;
+ csi2phy.ths_settle = DIV_ROUND_UP(90 * csi2_ddrclk_khz, 1000000) + 3;
+ csi2phy.tclk_term = TCLK_TERM;
+ csi2phy.tclk_miss = TCLK_MISS;
+ csi2phy.tclk_settle = TCLK_SETTLE;
+
+ mutex_lock(&csi2->phy->mutex);
+ csi2->phy->dphy = csi2phy;
+ csi2->phy->lanes = *lanes;
+ mutex_unlock(&csi2->phy->mutex);
+
+ return 0;
+}
+
+int omap4iss_csiphy_acquire(struct iss_csiphy *phy)
+{
+ int rval;
+
+ mutex_lock(&phy->mutex);
+
+ rval = omap4iss_csi2_reset(phy->csi2);
+ if (rval)
+ goto done;
+
+ csiphy_dphy_config(phy);
+ csiphy_lanes_config(phy);
+
+ rval = csiphy_set_power(phy, CSI2_COMPLEXIO_CFG_PWD_CMD_ON);
+ if (rval)
+ goto done;
+
+ phy->phy_in_use = 1;
+
+done:
+ mutex_unlock(&phy->mutex);
+ return rval;
+}
+
+void omap4iss_csiphy_release(struct iss_csiphy *phy)
+{
+ mutex_lock(&phy->mutex);
+ if (phy->phy_in_use) {
+ csiphy_set_power(phy, CSI2_COMPLEXIO_CFG_PWD_CMD_OFF);
+ phy->phy_in_use = 0;
+ }
+ mutex_unlock(&phy->mutex);
+}
+
+/*
+ * omap4iss_csiphy_init - Initialize the CSI PHY frontends
+ */
+int omap4iss_csiphy_init(struct iss_device *iss)
+{
+ struct iss_csiphy *phy1 = &iss->csiphy1;
+ struct iss_csiphy *phy2 = &iss->csiphy2;
+
+ phy1->iss = iss;
+ phy1->csi2 = &iss->csi2a;
+ phy1->max_data_lanes = ISS_CSIPHY1_NUM_DATA_LANES;
+ phy1->used_data_lanes = 0;
+ phy1->cfg_regs = OMAP4_ISS_MEM_CSI2_A_REGS1;
+ phy1->phy_regs = OMAP4_ISS_MEM_CAMERARX_CORE1;
+ mutex_init(&phy1->mutex);
+
+ phy2->iss = iss;
+ phy2->csi2 = &iss->csi2b;
+ phy2->max_data_lanes = ISS_CSIPHY2_NUM_DATA_LANES;
+ phy2->used_data_lanes = 0;
+ phy2->cfg_regs = OMAP4_ISS_MEM_CSI2_B_REGS1;
+ phy2->phy_regs = OMAP4_ISS_MEM_CAMERARX_CORE2;
+ mutex_init(&phy2->mutex);
+
+ return 0;
+}
diff --git a/drivers/staging/media/omap4iss/iss_csiphy.h b/drivers/staging/media/omap4iss/iss_csiphy.h
new file mode 100644
index 000000000000..e9ca43955654
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_csiphy.h
@@ -0,0 +1,51 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - CSI PHY module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef OMAP4_ISS_CSI_PHY_H
+#define OMAP4_ISS_CSI_PHY_H
+
+#include <media/omap4iss.h>
+
+struct iss_csi2_device;
+
+struct iss_csiphy_dphy_cfg {
+ u8 ths_term;
+ u8 ths_settle;
+ u8 tclk_term;
+ unsigned tclk_miss:1;
+ u8 tclk_settle;
+};
+
+struct iss_csiphy {
+ struct iss_device *iss;
+ struct mutex mutex; /* serialize csiphy configuration */
+ u8 phy_in_use;
+ struct iss_csi2_device *csi2;
+
+ /* memory resources, as defined in enum iss_mem_resources */
+ unsigned int cfg_regs;
+ unsigned int phy_regs;
+
+ u8 max_data_lanes; /* number of CSI2 Data Lanes supported */
+ u8 used_data_lanes; /* number of CSI2 Data Lanes used */
+ struct iss_csiphy_lanes_cfg lanes;
+ struct iss_csiphy_dphy_cfg dphy;
+};
+
+int omap4iss_csiphy_config(struct iss_device *iss,
+ struct v4l2_subdev *csi2_subdev);
+int omap4iss_csiphy_acquire(struct iss_csiphy *phy);
+void omap4iss_csiphy_release(struct iss_csiphy *phy);
+int omap4iss_csiphy_init(struct iss_device *iss);
+
+#endif /* OMAP4_ISS_CSI_PHY_H */
diff --git a/drivers/staging/media/omap4iss/iss_ipipe.c b/drivers/staging/media/omap4iss/iss_ipipe.c
new file mode 100644
index 000000000000..6eaafc5e2eea
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_ipipe.c
@@ -0,0 +1,570 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - ISP IPIPE module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#include "iss.h"
+#include "iss_regs.h"
+#include "iss_ipipe.h"
+
+static struct v4l2_mbus_framefmt *
+__ipipe_get_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which);
+
+static const unsigned int ipipe_fmts[] = {
+ V4L2_MBUS_FMT_SGRBG10_1X10,
+ V4L2_MBUS_FMT_SRGGB10_1X10,
+ V4L2_MBUS_FMT_SBGGR10_1X10,
+ V4L2_MBUS_FMT_SGBRG10_1X10,
+};
+
+/*
+ * ipipe_print_status - Print current IPIPE Module register values.
+ * @ipipe: Pointer to ISS ISP IPIPE device.
+ *
+ * Also prints other debug information stored in the IPIPE module.
+ */
+#define IPIPE_PRINT_REGISTER(iss, name)\
+ dev_dbg(iss->dev, "###IPIPE " #name "=0x%08x\n", \
+ iss_reg_read(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_##name))
+
+static void ipipe_print_status(struct iss_ipipe_device *ipipe)
+{
+ struct iss_device *iss = to_iss_device(ipipe);
+
+ dev_dbg(iss->dev, "-------------IPIPE Register dump-------------\n");
+
+ IPIPE_PRINT_REGISTER(iss, SRC_EN);
+ IPIPE_PRINT_REGISTER(iss, SRC_MODE);
+ IPIPE_PRINT_REGISTER(iss, SRC_FMT);
+ IPIPE_PRINT_REGISTER(iss, SRC_COL);
+ IPIPE_PRINT_REGISTER(iss, SRC_VPS);
+ IPIPE_PRINT_REGISTER(iss, SRC_VSZ);
+ IPIPE_PRINT_REGISTER(iss, SRC_HPS);
+ IPIPE_PRINT_REGISTER(iss, SRC_HSZ);
+ IPIPE_PRINT_REGISTER(iss, GCK_MMR);
+ IPIPE_PRINT_REGISTER(iss, YUV_PHS);
+
+ dev_dbg(iss->dev, "-----------------------------------------------\n");
+}
+
+/*
+ * ipipe_enable - Enable/Disable IPIPE.
+ * @enable: enable flag
+ *
+ */
+static void ipipe_enable(struct iss_ipipe_device *ipipe, u8 enable)
+{
+ struct iss_device *iss = to_iss_device(ipipe);
+
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_EN,
+ IPIPE_SRC_EN_EN, enable ? IPIPE_SRC_EN_EN : 0);
+}
+
+/* -----------------------------------------------------------------------------
+ * Format- and pipeline-related configuration helpers
+ */
+
+static void ipipe_configure(struct iss_ipipe_device *ipipe)
+{
+ struct iss_device *iss = to_iss_device(ipipe);
+ struct v4l2_mbus_framefmt *format;
+
+ /* IPIPE_PAD_SINK */
+ format = &ipipe->formats[IPIPE_PAD_SINK];
+
+ /* NOTE: Currently just supporting pipeline IN: RGB, OUT: YUV422 */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_FMT,
+ IPIPE_SRC_FMT_RAW2YUV);
+
+ /* Enable YUV444 -> YUV422 conversion */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_YUV_PHS,
+ IPIPE_YUV_PHS_LPF);
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_VPS, 0);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_HPS, 0);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_VSZ,
+ (format->height - 2) & IPIPE_SRC_VSZ_MASK);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_HSZ,
+ (format->width - 1) & IPIPE_SRC_HSZ_MASK);
+
+ /* Ignore ipipeif_wrt signal, and operate on-the-fly. */
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_MODE,
+ IPIPE_SRC_MODE_WRT | IPIPE_SRC_MODE_OST);
+
+ /* HACK: Values tuned for Ducati SW (OV) */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_COL,
+ IPIPE_SRC_COL_EE_B | IPIPE_SRC_COL_EO_GB |
+ IPIPE_SRC_COL_OE_GR | IPIPE_SRC_COL_OO_R);
+
+ /* IPIPE_PAD_SOURCE_VP */
+ format = &ipipe->formats[IPIPE_PAD_SOURCE_VP];
+ /* Do nothing? */
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+/*
+ * ipipe_set_stream - Enable/Disable streaming on the IPIPE module
+ * @sd: ISP IPIPE V4L2 subdevice
+ * @enable: Enable/disable stream
+ */
+static int ipipe_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ struct iss_device *iss = to_iss_device(ipipe);
+ int ret = 0;
+
+ if (ipipe->state == ISS_PIPELINE_STREAM_STOPPED) {
+ if (enable == ISS_PIPELINE_STREAM_STOPPED)
+ return 0;
+
+ omap4iss_isp_subclk_enable(iss, OMAP4_ISS_ISP_SUBCLK_IPIPE);
+
+ /* Enable clk_arm_g0 */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_GCK_MMR,
+ IPIPE_GCK_MMR_REG);
+
+ /* Enable clk_pix_g[3:0] */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_GCK_PIX,
+ IPIPE_GCK_PIX_G3 | IPIPE_GCK_PIX_G2 |
+ IPIPE_GCK_PIX_G1 | IPIPE_GCK_PIX_G0);
+ }
+
+ switch (enable) {
+ case ISS_PIPELINE_STREAM_CONTINUOUS:
+
+ ipipe_configure(ipipe);
+ ipipe_print_status(ipipe);
+
+ atomic_set(&ipipe->stopping, 0);
+ ipipe_enable(ipipe, 1);
+ break;
+
+ case ISS_PIPELINE_STREAM_STOPPED:
+ if (ipipe->state == ISS_PIPELINE_STREAM_STOPPED)
+ return 0;
+ if (omap4iss_module_sync_idle(&sd->entity, &ipipe->wait,
+ &ipipe->stopping))
+ ret = -ETIMEDOUT;
+
+ ipipe_enable(ipipe, 0);
+ omap4iss_isp_subclk_disable(iss, OMAP4_ISS_ISP_SUBCLK_IPIPE);
+ break;
+ }
+
+ ipipe->state = enable;
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__ipipe_get_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ else
+ return &ipipe->formats[pad];
+}
+
+/*
+ * ipipe_try_format - Try video format on a pad
+ * @ipipe: ISS IPIPE device
+ * @fh : V4L2 subdev file handle
+ * @pad: Pad number
+ * @fmt: Format
+ */
+static void
+ipipe_try_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ struct v4l2_mbus_framefmt *format;
+ unsigned int width = fmt->width;
+ unsigned int height = fmt->height;
+ unsigned int i;
+
+ switch (pad) {
+ case IPIPE_PAD_SINK:
+ for (i = 0; i < ARRAY_SIZE(ipipe_fmts); i++) {
+ if (fmt->code == ipipe_fmts[i])
+ break;
+ }
+
+ /* If not found, use SGRBG10 as default */
+ if (i >= ARRAY_SIZE(ipipe_fmts))
+ fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+ /* Clamp the input size. */
+ fmt->width = clamp_t(u32, width, 1, 8192);
+ fmt->height = clamp_t(u32, height, 1, 8192);
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ break;
+
+ case IPIPE_PAD_SOURCE_VP:
+ format = __ipipe_get_format(ipipe, fh, IPIPE_PAD_SINK, which);
+ memcpy(fmt, format, sizeof(*fmt));
+
+ fmt->code = V4L2_MBUS_FMT_UYVY8_1X16;
+ fmt->width = clamp_t(u32, width, 32, fmt->width);
+ fmt->height = clamp_t(u32, height, 32, fmt->height);
+ fmt->colorspace = V4L2_COLORSPACE_JPEG;
+ break;
+ }
+
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * ipipe_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int ipipe_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ switch (code->pad) {
+ case IPIPE_PAD_SINK:
+ if (code->index >= ARRAY_SIZE(ipipe_fmts))
+ return -EINVAL;
+
+ code->code = ipipe_fmts[code->index];
+ break;
+
+ case IPIPE_PAD_SOURCE_VP:
+ /* FIXME: Forced format conversion inside IPIPE ? */
+ if (code->index != 0)
+ return -EINVAL;
+
+ code->code = V4L2_MBUS_FMT_UYVY8_1X16;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ipipe_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ ipipe_try_format(ipipe, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ ipipe_try_format(ipipe, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * ipipe_get_format - Retrieve the video format on a pad
+ * @sd : ISP IPIPE V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int ipipe_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ipipe_get_format(ipipe, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * ipipe_set_format - Set the video format on a pad
+ * @sd : ISP IPIPE V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int ipipe_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ipipe_get_format(ipipe, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ ipipe_try_format(ipipe, fh, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ /* Propagate the format from sink to source */
+ if (fmt->pad == IPIPE_PAD_SINK) {
+ format = __ipipe_get_format(ipipe, fh, IPIPE_PAD_SOURCE_VP,
+ fmt->which);
+ *format = fmt->format;
+ ipipe_try_format(ipipe, fh, IPIPE_PAD_SOURCE_VP, format,
+ fmt->which);
+ }
+
+ return 0;
+}
+
+static int ipipe_link_validate(struct v4l2_subdev *sd, struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ /* Check if the two ends match */
+ if (source_fmt->format.width != sink_fmt->format.width ||
+ source_fmt->format.height != sink_fmt->format.height)
+ return -EPIPE;
+
+ if (source_fmt->format.code != sink_fmt->format.code)
+ return -EPIPE;
+
+ return 0;
+}
+
+/*
+ * ipipe_init_formats - Initialize formats on all pads
+ * @sd: ISP IPIPE V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = IPIPE_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format.format.width = 4096;
+ format.format.height = 4096;
+ ipipe_set_format(sd, fh, &format);
+
+ return 0;
+}
+
+/* V4L2 subdev video operations */
+static const struct v4l2_subdev_video_ops ipipe_v4l2_video_ops = {
+ .s_stream = ipipe_set_stream,
+};
+
+/* V4L2 subdev pad operations */
+static const struct v4l2_subdev_pad_ops ipipe_v4l2_pad_ops = {
+ .enum_mbus_code = ipipe_enum_mbus_code,
+ .enum_frame_size = ipipe_enum_frame_size,
+ .get_fmt = ipipe_get_format,
+ .set_fmt = ipipe_set_format,
+ .link_validate = ipipe_link_validate,
+};
+
+/* V4L2 subdev operations */
+static const struct v4l2_subdev_ops ipipe_v4l2_ops = {
+ .video = &ipipe_v4l2_video_ops,
+ .pad = &ipipe_v4l2_pad_ops,
+};
+
+/* V4L2 subdev internal operations */
+static const struct v4l2_subdev_internal_ops ipipe_v4l2_internal_ops = {
+ .open = ipipe_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * ipipe_link_setup - Setup IPIPE connections
+ * @entity: IPIPE media entity
+ * @local: Pad at the local end of the link
+ * @remote: Pad at the remote end of the link
+ * @flags: Link flags
+ *
+ * return -EINVAL or zero on success
+ */
+static int ipipe_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ struct iss_device *iss = to_iss_device(ipipe);
+
+ switch (local->index | media_entity_type(remote->entity)) {
+ case IPIPE_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* Read from IPIPEIF. */
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ ipipe->input = IPIPE_INPUT_NONE;
+ break;
+ }
+
+ if (ipipe->input != IPIPE_INPUT_NONE)
+ return -EBUSY;
+
+ if (remote->entity == &iss->ipipeif.subdev.entity)
+ ipipe->input = IPIPE_INPUT_IPIPEIF;
+
+ break;
+
+ case IPIPE_PAD_SOURCE_VP | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* Send to RESIZER */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (ipipe->output & ~IPIPE_OUTPUT_VP)
+ return -EBUSY;
+ ipipe->output |= IPIPE_OUTPUT_VP;
+ } else {
+ ipipe->output &= ~IPIPE_OUTPUT_VP;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations ipipe_media_ops = {
+ .link_setup = ipipe_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * ipipe_init_entities - Initialize V4L2 subdev and media entity
+ * @ipipe: ISS ISP IPIPE module
+ *
+ * Return 0 on success and a negative error code on failure.
+ */
+static int ipipe_init_entities(struct iss_ipipe_device *ipipe)
+{
+ struct v4l2_subdev *sd = &ipipe->subdev;
+ struct media_pad *pads = ipipe->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ ipipe->input = IPIPE_INPUT_NONE;
+
+ v4l2_subdev_init(sd, &ipipe_v4l2_ops);
+ sd->internal_ops = &ipipe_v4l2_internal_ops;
+ strlcpy(sd->name, "OMAP4 ISS ISP IPIPE", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for iss subdevs */
+ v4l2_set_subdevdata(sd, ipipe);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[IPIPE_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[IPIPE_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
+
+ me->ops = &ipipe_media_ops;
+ ret = media_entity_init(me, IPIPE_PADS_NUM, pads, 0);
+ if (ret < 0)
+ return ret;
+
+ ipipe_init_formats(sd, NULL);
+
+ return 0;
+}
+
+void omap4iss_ipipe_unregister_entities(struct iss_ipipe_device *ipipe)
+{
+ media_entity_cleanup(&ipipe->subdev.entity);
+
+ v4l2_device_unregister_subdev(&ipipe->subdev);
+}
+
+int omap4iss_ipipe_register_entities(struct iss_ipipe_device *ipipe,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video node. */
+ ret = v4l2_device_register_subdev(vdev, &ipipe->subdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap4iss_ipipe_unregister_entities(ipipe);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP IPIPE initialisation and cleanup
+ */
+
+/*
+ * omap4iss_ipipe_init - IPIPE module initialization.
+ * @iss: Device pointer specific to the OMAP4 ISS.
+ *
+ * TODO: Get the initialisation values from platform data.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+int omap4iss_ipipe_init(struct iss_device *iss)
+{
+ struct iss_ipipe_device *ipipe = &iss->ipipe;
+
+ ipipe->state = ISS_PIPELINE_STREAM_STOPPED;
+ init_waitqueue_head(&ipipe->wait);
+
+ return ipipe_init_entities(ipipe);
+}
+
+/*
+ * omap4iss_ipipe_cleanup - IPIPE module cleanup.
+ * @iss: Device pointer specific to the OMAP4 ISS.
+ */
+void omap4iss_ipipe_cleanup(struct iss_device *iss)
+{
+ /* FIXME: are you sure there's nothing to do? */
+}
diff --git a/drivers/staging/media/omap4iss/iss_ipipe.h b/drivers/staging/media/omap4iss/iss_ipipe.h
new file mode 100644
index 000000000000..c22d9041f2a5
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_ipipe.h
@@ -0,0 +1,67 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - ISP IPIPE module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef OMAP4_ISS_IPIPE_H
+#define OMAP4_ISS_IPIPE_H
+
+#include "iss_video.h"
+
+enum ipipe_input_entity {
+ IPIPE_INPUT_NONE,
+ IPIPE_INPUT_IPIPEIF,
+};
+
+#define IPIPE_OUTPUT_VP (1 << 0)
+
+/* Sink and source IPIPE pads */
+#define IPIPE_PAD_SINK 0
+#define IPIPE_PAD_SOURCE_VP 1
+#define IPIPE_PADS_NUM 2
+
+/*
+ * struct iss_ipipe_device - Structure for the IPIPE module to store its own
+ * information
+ * @subdev: V4L2 subdevice
+ * @pads: Sink and source media entity pads
+ * @formats: Active video formats
+ * @input: Active input
+ * @output: Active outputs
+ * @error: A hardware error occurred during capture
+ * @state: Streaming state
+ * @wait: Wait queue used to stop the module
+ * @stopping: Stopping state
+ */
+struct iss_ipipe_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[IPIPE_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[IPIPE_PADS_NUM];
+
+ enum ipipe_input_entity input;
+ unsigned int output;
+ unsigned int error;
+
+ enum iss_pipeline_stream_state state;
+ wait_queue_head_t wait;
+ atomic_t stopping;
+};
+
+struct iss_device;
+
+int omap4iss_ipipe_register_entities(struct iss_ipipe_device *ipipe,
+ struct v4l2_device *vdev);
+void omap4iss_ipipe_unregister_entities(struct iss_ipipe_device *ipipe);
+
+int omap4iss_ipipe_init(struct iss_device *iss);
+void omap4iss_ipipe_cleanup(struct iss_device *iss);
+
+#endif /* OMAP4_ISS_IPIPE_H */
diff --git a/drivers/staging/media/omap4iss/iss_ipipeif.c b/drivers/staging/media/omap4iss/iss_ipipeif.c
new file mode 100644
index 000000000000..7bc145762499
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_ipipeif.c
@@ -0,0 +1,849 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - ISP IPIPEIF module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#include "iss.h"
+#include "iss_regs.h"
+#include "iss_ipipeif.h"
+
+static const unsigned int ipipeif_fmts[] = {
+ V4L2_MBUS_FMT_SGRBG10_1X10,
+ V4L2_MBUS_FMT_SRGGB10_1X10,
+ V4L2_MBUS_FMT_SBGGR10_1X10,
+ V4L2_MBUS_FMT_SGBRG10_1X10,
+ V4L2_MBUS_FMT_UYVY8_1X16,
+ V4L2_MBUS_FMT_YUYV8_1X16,
+};
+
+/*
+ * ipipeif_print_status - Print current IPIPEIF Module register values.
+ * @ipipeif: Pointer to ISS ISP IPIPEIF device.
+ *
+ * Also prints other debug information stored in the IPIPEIF module.
+ */
+#define IPIPEIF_PRINT_REGISTER(iss, name)\
+ dev_dbg(iss->dev, "###IPIPEIF " #name "=0x%08x\n", \
+ iss_reg_read(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_##name))
+
+#define ISIF_PRINT_REGISTER(iss, name)\
+ dev_dbg(iss->dev, "###ISIF " #name "=0x%08x\n", \
+ iss_reg_read(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_##name))
+
+#define ISP5_PRINT_REGISTER(iss, name)\
+ dev_dbg(iss->dev, "###ISP5 " #name "=0x%08x\n", \
+ iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_##name))
+
+static void ipipeif_print_status(struct iss_ipipeif_device *ipipeif)
+{
+ struct iss_device *iss = to_iss_device(ipipeif);
+
+ dev_dbg(iss->dev, "-------------IPIPEIF Register dump-------------\n");
+
+ IPIPEIF_PRINT_REGISTER(iss, CFG1);
+ IPIPEIF_PRINT_REGISTER(iss, CFG2);
+
+ ISIF_PRINT_REGISTER(iss, SYNCEN);
+ ISIF_PRINT_REGISTER(iss, CADU);
+ ISIF_PRINT_REGISTER(iss, CADL);
+ ISIF_PRINT_REGISTER(iss, MODESET);
+ ISIF_PRINT_REGISTER(iss, CCOLP);
+ ISIF_PRINT_REGISTER(iss, SPH);
+ ISIF_PRINT_REGISTER(iss, LNH);
+ ISIF_PRINT_REGISTER(iss, LNV);
+ ISIF_PRINT_REGISTER(iss, VDINT(0));
+ ISIF_PRINT_REGISTER(iss, HSIZE);
+
+ ISP5_PRINT_REGISTER(iss, SYSCONFIG);
+ ISP5_PRINT_REGISTER(iss, CTRL);
+ ISP5_PRINT_REGISTER(iss, IRQSTATUS(0));
+ ISP5_PRINT_REGISTER(iss, IRQENABLE_SET(0));
+ ISP5_PRINT_REGISTER(iss, IRQENABLE_CLR(0));
+
+ dev_dbg(iss->dev, "-----------------------------------------------\n");
+}
+
+static void ipipeif_write_enable(struct iss_ipipeif_device *ipipeif, u8 enable)
+{
+ struct iss_device *iss = to_iss_device(ipipeif);
+
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_SYNCEN,
+ ISIF_SYNCEN_DWEN, enable ? ISIF_SYNCEN_DWEN : 0);
+}
+
+/*
+ * ipipeif_enable - Enable/Disable IPIPEIF.
+ * @enable: enable flag
+ *
+ */
+static void ipipeif_enable(struct iss_ipipeif_device *ipipeif, u8 enable)
+{
+ struct iss_device *iss = to_iss_device(ipipeif);
+
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_SYNCEN,
+ ISIF_SYNCEN_SYEN, enable ? ISIF_SYNCEN_SYEN : 0);
+}
+
+/* -----------------------------------------------------------------------------
+ * Format- and pipeline-related configuration helpers
+ */
+
+/*
+ * ipipeif_set_outaddr - Set memory address to save output image
+ * @ipipeif: Pointer to ISP IPIPEIF device.
+ * @addr: 32-bit memory address aligned on 32 byte boundary.
+ *
+ * Sets the memory address where the output will be saved.
+ */
+static void ipipeif_set_outaddr(struct iss_ipipeif_device *ipipeif, u32 addr)
+{
+ struct iss_device *iss = to_iss_device(ipipeif);
+
+ /* Save address splitted in Base Address H & L */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CADU,
+ (addr >> (16 + 5)) & ISIF_CADU_MASK);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CADL,
+ (addr >> 5) & ISIF_CADL_MASK);
+}
+
+static void ipipeif_configure(struct iss_ipipeif_device *ipipeif)
+{
+ struct iss_device *iss = to_iss_device(ipipeif);
+ const struct iss_format_info *info;
+ struct v4l2_mbus_framefmt *format;
+ u32 isif_ccolp = 0;
+
+ omap4iss_configure_bridge(iss, ipipeif->input);
+
+ /* IPIPEIF_PAD_SINK */
+ format = &ipipeif->formats[IPIPEIF_PAD_SINK];
+
+ /* IPIPEIF with YUV422 input from ISIF */
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_CFG1,
+ IPIPEIF_CFG1_INPSRC1_MASK | IPIPEIF_CFG1_INPSRC2_MASK);
+
+ /* Select ISIF/IPIPEIF input format */
+ switch (format->code) {
+ case V4L2_MBUS_FMT_UYVY8_1X16:
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_MODESET,
+ ISIF_MODESET_CCDMD | ISIF_MODESET_INPMOD_MASK |
+ ISIF_MODESET_CCDW_MASK,
+ ISIF_MODESET_INPMOD_YCBCR16);
+
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_CFG2,
+ IPIPEIF_CFG2_YUV8, IPIPEIF_CFG2_YUV16);
+
+ break;
+ case V4L2_MBUS_FMT_SGRBG10_1X10:
+ isif_ccolp = ISIF_CCOLP_CP0_F0_GR |
+ ISIF_CCOLP_CP1_F0_R |
+ ISIF_CCOLP_CP2_F0_B |
+ ISIF_CCOLP_CP3_F0_GB;
+ goto cont_raw;
+ case V4L2_MBUS_FMT_SRGGB10_1X10:
+ isif_ccolp = ISIF_CCOLP_CP0_F0_R |
+ ISIF_CCOLP_CP1_F0_GR |
+ ISIF_CCOLP_CP2_F0_GB |
+ ISIF_CCOLP_CP3_F0_B;
+ goto cont_raw;
+ case V4L2_MBUS_FMT_SBGGR10_1X10:
+ isif_ccolp = ISIF_CCOLP_CP0_F0_B |
+ ISIF_CCOLP_CP1_F0_GB |
+ ISIF_CCOLP_CP2_F0_GR |
+ ISIF_CCOLP_CP3_F0_R;
+ goto cont_raw;
+ case V4L2_MBUS_FMT_SGBRG10_1X10:
+ isif_ccolp = ISIF_CCOLP_CP0_F0_GB |
+ ISIF_CCOLP_CP1_F0_B |
+ ISIF_CCOLP_CP2_F0_R |
+ ISIF_CCOLP_CP3_F0_GR;
+cont_raw:
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_CFG2,
+ IPIPEIF_CFG2_YUV16);
+
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_MODESET,
+ ISIF_MODESET_CCDMD | ISIF_MODESET_INPMOD_MASK |
+ ISIF_MODESET_CCDW_MASK, ISIF_MODESET_INPMOD_RAW |
+ ISIF_MODESET_CCDW_2BIT);
+
+ info = omap4iss_video_format_info(format->code);
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CGAMMAWD,
+ ISIF_CGAMMAWD_GWDI_MASK,
+ ISIF_CGAMMAWD_GWDI(info->bpp));
+
+ /* Set RAW Bayer pattern */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CCOLP,
+ isif_ccolp);
+ break;
+ }
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_SPH, 0 & ISIF_SPH_MASK);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_LNH,
+ (format->width - 1) & ISIF_LNH_MASK);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_LNV,
+ (format->height - 1) & ISIF_LNV_MASK);
+
+ /* Generate ISIF0 on the last line of the image */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_VDINT(0),
+ format->height - 1);
+
+ /* IPIPEIF_PAD_SOURCE_ISIF_SF */
+ format = &ipipeif->formats[IPIPEIF_PAD_SOURCE_ISIF_SF];
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_HSIZE,
+ (ipipeif->video_out.bpl_value >> 5) &
+ ISIF_HSIZE_HSIZE_MASK);
+
+ /* IPIPEIF_PAD_SOURCE_VP */
+ /* Do nothing? */
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+static void ipipeif_isr_buffer(struct iss_ipipeif_device *ipipeif)
+{
+ struct iss_buffer *buffer;
+
+ /* The ISIF generates VD0 interrupts even when writes are disabled.
+ * deal with it anyway). Disabling the ISIF when no buffer is available
+ * is thus not be enough, we need to handle the situation explicitly.
+ */
+ if (list_empty(&ipipeif->video_out.dmaqueue))
+ return;
+
+ ipipeif_write_enable(ipipeif, 0);
+
+ buffer = omap4iss_video_buffer_next(&ipipeif->video_out);
+ if (buffer == NULL)
+ return;
+
+ ipipeif_set_outaddr(ipipeif, buffer->iss_addr);
+
+ ipipeif_write_enable(ipipeif, 1);
+}
+
+/*
+ * ipipeif_isif0_isr - Handle ISIF0 event
+ * @ipipeif: Pointer to ISP IPIPEIF device.
+ *
+ * Executes LSC deferred enablement before next frame starts.
+ */
+static void ipipeif_isif0_isr(struct iss_ipipeif_device *ipipeif)
+{
+ struct iss_pipeline *pipe =
+ to_iss_pipeline(&ipipeif->subdev.entity);
+ if (pipe->do_propagation)
+ atomic_inc(&pipe->frame_number);
+
+ if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
+ ipipeif_isr_buffer(ipipeif);
+}
+
+/*
+ * omap4iss_ipipeif_isr - Configure ipipeif during interframe time.
+ * @ipipeif: Pointer to ISP IPIPEIF device.
+ * @events: IPIPEIF events
+ */
+void omap4iss_ipipeif_isr(struct iss_ipipeif_device *ipipeif, u32 events)
+{
+ if (omap4iss_module_sync_is_stopping(&ipipeif->wait,
+ &ipipeif->stopping))
+ return;
+
+ if (events & ISP5_IRQ_ISIF_INT(0))
+ ipipeif_isif0_isr(ipipeif);
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP video operations
+ */
+
+static int ipipeif_video_queue(struct iss_video *video,
+ struct iss_buffer *buffer)
+{
+ struct iss_ipipeif_device *ipipeif = container_of(video,
+ struct iss_ipipeif_device, video_out);
+
+ if (!(ipipeif->output & IPIPEIF_OUTPUT_MEMORY))
+ return -ENODEV;
+
+ ipipeif_set_outaddr(ipipeif, buffer->iss_addr);
+
+ /*
+ * If streaming was enabled before there was a buffer queued
+ * or underrun happened in the ISR, the hardware was not enabled
+ * and DMA queue flag ISS_VIDEO_DMAQUEUE_UNDERRUN is still set.
+ * Enable it now.
+ */
+ if (video->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
+ if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
+ ipipeif_write_enable(ipipeif, 1);
+ ipipeif_enable(ipipeif, 1);
+ iss_video_dmaqueue_flags_clr(video);
+ }
+
+ return 0;
+}
+
+static const struct iss_video_operations ipipeif_video_ops = {
+ .queue = ipipeif_video_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+#define IPIPEIF_DRV_SUBCLK_MASK (OMAP4_ISS_ISP_SUBCLK_IPIPEIF |\
+ OMAP4_ISS_ISP_SUBCLK_ISIF)
+/*
+ * ipipeif_set_stream - Enable/Disable streaming on the IPIPEIF module
+ * @sd: ISP IPIPEIF V4L2 subdevice
+ * @enable: Enable/disable stream
+ */
+static int ipipeif_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct iss_device *iss = to_iss_device(ipipeif);
+ struct iss_video *video_out = &ipipeif->video_out;
+ int ret = 0;
+
+ if (ipipeif->state == ISS_PIPELINE_STREAM_STOPPED) {
+ if (enable == ISS_PIPELINE_STREAM_STOPPED)
+ return 0;
+
+ omap4iss_isp_subclk_enable(iss, IPIPEIF_DRV_SUBCLK_MASK);
+ }
+
+ switch (enable) {
+ case ISS_PIPELINE_STREAM_CONTINUOUS:
+
+ ipipeif_configure(ipipeif);
+ ipipeif_print_status(ipipeif);
+
+ /*
+ * When outputting to memory with no buffer available, let the
+ * buffer queue handler start the hardware. A DMA queue flag
+ * ISS_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is
+ * a buffer available.
+ */
+ if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY &&
+ !(video_out->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_QUEUED))
+ break;
+
+ atomic_set(&ipipeif->stopping, 0);
+ if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
+ ipipeif_write_enable(ipipeif, 1);
+ ipipeif_enable(ipipeif, 1);
+ iss_video_dmaqueue_flags_clr(video_out);
+ break;
+
+ case ISS_PIPELINE_STREAM_STOPPED:
+ if (ipipeif->state == ISS_PIPELINE_STREAM_STOPPED)
+ return 0;
+ if (omap4iss_module_sync_idle(&sd->entity, &ipipeif->wait,
+ &ipipeif->stopping))
+ ret = -ETIMEDOUT;
+
+ if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
+ ipipeif_write_enable(ipipeif, 0);
+ ipipeif_enable(ipipeif, 0);
+ omap4iss_isp_subclk_disable(iss, IPIPEIF_DRV_SUBCLK_MASK);
+ iss_video_dmaqueue_flags_clr(video_out);
+ break;
+ }
+
+ ipipeif->state = enable;
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__ipipeif_get_format(struct iss_ipipeif_device *ipipeif,
+ struct v4l2_subdev_fh *fh, unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ else
+ return &ipipeif->formats[pad];
+}
+
+/*
+ * ipipeif_try_format - Try video format on a pad
+ * @ipipeif: ISS IPIPEIF device
+ * @fh : V4L2 subdev file handle
+ * @pad: Pad number
+ * @fmt: Format
+ */
+static void
+ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
+ struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ struct v4l2_mbus_framefmt *format;
+ unsigned int width = fmt->width;
+ unsigned int height = fmt->height;
+ unsigned int i;
+
+ switch (pad) {
+ case IPIPEIF_PAD_SINK:
+ /* TODO: If the IPIPEIF output formatter pad is connected
+ * directly to the resizer, only YUV formats can be used.
+ */
+ for (i = 0; i < ARRAY_SIZE(ipipeif_fmts); i++) {
+ if (fmt->code == ipipeif_fmts[i])
+ break;
+ }
+
+ /* If not found, use SGRBG10 as default */
+ if (i >= ARRAY_SIZE(ipipeif_fmts))
+ fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+ /* Clamp the input size. */
+ fmt->width = clamp_t(u32, width, 1, 8192);
+ fmt->height = clamp_t(u32, height, 1, 8192);
+ break;
+
+ case IPIPEIF_PAD_SOURCE_ISIF_SF:
+ format = __ipipeif_get_format(ipipeif, fh, IPIPEIF_PAD_SINK,
+ which);
+ memcpy(fmt, format, sizeof(*fmt));
+
+ /* The data formatter truncates the number of horizontal output
+ * pixels to a multiple of 16. To avoid clipping data, allow
+ * callers to request an output size bigger than the input size
+ * up to the nearest multiple of 16.
+ */
+ fmt->width = clamp_t(u32, width, 32, (fmt->width + 15) & ~15);
+ fmt->width &= ~15;
+ fmt->height = clamp_t(u32, height, 32, fmt->height);
+ break;
+
+ case IPIPEIF_PAD_SOURCE_VP:
+ format = __ipipeif_get_format(ipipeif, fh, IPIPEIF_PAD_SINK,
+ which);
+ memcpy(fmt, format, sizeof(*fmt));
+
+ fmt->width = clamp_t(u32, width, 32, fmt->width);
+ fmt->height = clamp_t(u32, height, 32, fmt->height);
+ break;
+ }
+
+ /* Data is written to memory unpacked, each 10-bit or 12-bit pixel is
+ * stored on 2 bytes.
+ */
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * ipipeif_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ switch (code->pad) {
+ case IPIPEIF_PAD_SINK:
+ if (code->index >= ARRAY_SIZE(ipipeif_fmts))
+ return -EINVAL;
+
+ code->code = ipipeif_fmts[code->index];
+ break;
+
+ case IPIPEIF_PAD_SOURCE_ISIF_SF:
+ case IPIPEIF_PAD_SOURCE_VP:
+ /* No format conversion inside IPIPEIF */
+ if (code->index != 0)
+ return -EINVAL;
+
+ format = __ipipeif_get_format(ipipeif, fh, IPIPEIF_PAD_SINK,
+ V4L2_SUBDEV_FORMAT_TRY);
+
+ code->code = format->code;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ipipeif_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ ipipeif_try_format(ipipeif, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ ipipeif_try_format(ipipeif, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * ipipeif_get_format - Retrieve the video format on a pad
+ * @sd : ISP IPIPEIF V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int ipipeif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ipipeif_get_format(ipipeif, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * ipipeif_set_format - Set the video format on a pad
+ * @sd : ISP IPIPEIF V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int ipipeif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ipipeif_get_format(ipipeif, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ ipipeif_try_format(ipipeif, fh, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ /* Propagate the format from sink to source */
+ if (fmt->pad == IPIPEIF_PAD_SINK) {
+ format = __ipipeif_get_format(ipipeif, fh,
+ IPIPEIF_PAD_SOURCE_ISIF_SF,
+ fmt->which);
+ *format = fmt->format;
+ ipipeif_try_format(ipipeif, fh, IPIPEIF_PAD_SOURCE_ISIF_SF,
+ format, fmt->which);
+
+ format = __ipipeif_get_format(ipipeif, fh,
+ IPIPEIF_PAD_SOURCE_VP,
+ fmt->which);
+ *format = fmt->format;
+ ipipeif_try_format(ipipeif, fh, IPIPEIF_PAD_SOURCE_VP, format,
+ fmt->which);
+ }
+
+ return 0;
+}
+
+static int ipipeif_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ /* Check if the two ends match */
+ if (source_fmt->format.width != sink_fmt->format.width ||
+ source_fmt->format.height != sink_fmt->format.height)
+ return -EPIPE;
+
+ if (source_fmt->format.code != sink_fmt->format.code)
+ return -EPIPE;
+
+ return 0;
+}
+
+/*
+ * ipipeif_init_formats - Initialize formats on all pads
+ * @sd: ISP IPIPEIF V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int ipipeif_init_formats(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = IPIPEIF_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format.format.width = 4096;
+ format.format.height = 4096;
+ ipipeif_set_format(sd, fh, &format);
+
+ return 0;
+}
+
+/* V4L2 subdev video operations */
+static const struct v4l2_subdev_video_ops ipipeif_v4l2_video_ops = {
+ .s_stream = ipipeif_set_stream,
+};
+
+/* V4L2 subdev pad operations */
+static const struct v4l2_subdev_pad_ops ipipeif_v4l2_pad_ops = {
+ .enum_mbus_code = ipipeif_enum_mbus_code,
+ .enum_frame_size = ipipeif_enum_frame_size,
+ .get_fmt = ipipeif_get_format,
+ .set_fmt = ipipeif_set_format,
+ .link_validate = ipipeif_link_validate,
+};
+
+/* V4L2 subdev operations */
+static const struct v4l2_subdev_ops ipipeif_v4l2_ops = {
+ .video = &ipipeif_v4l2_video_ops,
+ .pad = &ipipeif_v4l2_pad_ops,
+};
+
+/* V4L2 subdev internal operations */
+static const struct v4l2_subdev_internal_ops ipipeif_v4l2_internal_ops = {
+ .open = ipipeif_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * ipipeif_link_setup - Setup IPIPEIF connections
+ * @entity: IPIPEIF media entity
+ * @local: Pad at the local end of the link
+ * @remote: Pad at the remote end of the link
+ * @flags: Link flags
+ *
+ * return -EINVAL or zero on success
+ */
+static int ipipeif_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct iss_device *iss = to_iss_device(ipipeif);
+
+ switch (local->index | media_entity_type(remote->entity)) {
+ case IPIPEIF_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* Read from the sensor CSI2a or CSI2b. */
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ ipipeif->input = IPIPEIF_INPUT_NONE;
+ break;
+ }
+
+ if (ipipeif->input != IPIPEIF_INPUT_NONE)
+ return -EBUSY;
+
+ if (remote->entity == &iss->csi2a.subdev.entity)
+ ipipeif->input = IPIPEIF_INPUT_CSI2A;
+ else if (remote->entity == &iss->csi2b.subdev.entity)
+ ipipeif->input = IPIPEIF_INPUT_CSI2B;
+
+ break;
+
+ case IPIPEIF_PAD_SOURCE_ISIF_SF | MEDIA_ENT_T_DEVNODE:
+ /* Write to memory */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (ipipeif->output & ~IPIPEIF_OUTPUT_MEMORY)
+ return -EBUSY;
+ ipipeif->output |= IPIPEIF_OUTPUT_MEMORY;
+ } else {
+ ipipeif->output &= ~IPIPEIF_OUTPUT_MEMORY;
+ }
+ break;
+
+ case IPIPEIF_PAD_SOURCE_VP | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* Send to IPIPE/RESIZER */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (ipipeif->output & ~IPIPEIF_OUTPUT_VP)
+ return -EBUSY;
+ ipipeif->output |= IPIPEIF_OUTPUT_VP;
+ } else {
+ ipipeif->output &= ~IPIPEIF_OUTPUT_VP;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations ipipeif_media_ops = {
+ .link_setup = ipipeif_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * ipipeif_init_entities - Initialize V4L2 subdev and media entity
+ * @ipipeif: ISS ISP IPIPEIF module
+ *
+ * Return 0 on success and a negative error code on failure.
+ */
+static int ipipeif_init_entities(struct iss_ipipeif_device *ipipeif)
+{
+ struct v4l2_subdev *sd = &ipipeif->subdev;
+ struct media_pad *pads = ipipeif->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ ipipeif->input = IPIPEIF_INPUT_NONE;
+
+ v4l2_subdev_init(sd, &ipipeif_v4l2_ops);
+ sd->internal_ops = &ipipeif_v4l2_internal_ops;
+ strlcpy(sd->name, "OMAP4 ISS ISP IPIPEIF", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for iss subdevs */
+ v4l2_set_subdevdata(sd, ipipeif);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[IPIPEIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[IPIPEIF_PAD_SOURCE_ISIF_SF].flags = MEDIA_PAD_FL_SOURCE;
+ pads[IPIPEIF_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
+
+ me->ops = &ipipeif_media_ops;
+ ret = media_entity_init(me, IPIPEIF_PADS_NUM, pads, 0);
+ if (ret < 0)
+ return ret;
+
+ ipipeif_init_formats(sd, NULL);
+
+ ipipeif->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ ipipeif->video_out.ops = &ipipeif_video_ops;
+ ipipeif->video_out.iss = to_iss_device(ipipeif);
+ ipipeif->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
+ ipipeif->video_out.bpl_alignment = 32;
+ ipipeif->video_out.bpl_zero_padding = 1;
+ ipipeif->video_out.bpl_max = 0x1ffe0;
+
+ ret = omap4iss_video_init(&ipipeif->video_out, "ISP IPIPEIF");
+ if (ret < 0)
+ return ret;
+
+ /* Connect the IPIPEIF subdev to the video node. */
+ ret = media_entity_create_link(&ipipeif->subdev.entity,
+ IPIPEIF_PAD_SOURCE_ISIF_SF,
+ &ipipeif->video_out.video.entity, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+void omap4iss_ipipeif_unregister_entities(struct iss_ipipeif_device *ipipeif)
+{
+ media_entity_cleanup(&ipipeif->subdev.entity);
+
+ v4l2_device_unregister_subdev(&ipipeif->subdev);
+ omap4iss_video_unregister(&ipipeif->video_out);
+}
+
+int omap4iss_ipipeif_register_entities(struct iss_ipipeif_device *ipipeif,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video node. */
+ ret = v4l2_device_register_subdev(vdev, &ipipeif->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap4iss_video_register(&ipipeif->video_out, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap4iss_ipipeif_unregister_entities(ipipeif);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP IPIPEIF initialisation and cleanup
+ */
+
+/*
+ * omap4iss_ipipeif_init - IPIPEIF module initialization.
+ * @iss: Device pointer specific to the OMAP4 ISS.
+ *
+ * TODO: Get the initialisation values from platform data.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+int omap4iss_ipipeif_init(struct iss_device *iss)
+{
+ struct iss_ipipeif_device *ipipeif = &iss->ipipeif;
+
+ ipipeif->state = ISS_PIPELINE_STREAM_STOPPED;
+ init_waitqueue_head(&ipipeif->wait);
+
+ return ipipeif_init_entities(ipipeif);
+}
+
+/*
+ * omap4iss_ipipeif_cleanup - IPIPEIF module cleanup.
+ * @iss: Device pointer specific to the OMAP4 ISS.
+ */
+void omap4iss_ipipeif_cleanup(struct iss_device *iss)
+{
+ /* FIXME: are you sure there's nothing to do? */
+}
diff --git a/drivers/staging/media/omap4iss/iss_ipipeif.h b/drivers/staging/media/omap4iss/iss_ipipeif.h
new file mode 100644
index 000000000000..cbdccb982eee
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_ipipeif.h
@@ -0,0 +1,92 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - ISP IPIPEIF module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef OMAP4_ISS_IPIPEIF_H
+#define OMAP4_ISS_IPIPEIF_H
+
+#include "iss_video.h"
+
+enum ipipeif_input_entity {
+ IPIPEIF_INPUT_NONE,
+ IPIPEIF_INPUT_CSI2A,
+ IPIPEIF_INPUT_CSI2B
+};
+
+#define IPIPEIF_OUTPUT_MEMORY (1 << 0)
+#define IPIPEIF_OUTPUT_VP (1 << 1)
+
+/* Sink and source IPIPEIF pads */
+#define IPIPEIF_PAD_SINK 0
+#define IPIPEIF_PAD_SOURCE_ISIF_SF 1
+#define IPIPEIF_PAD_SOURCE_VP 2
+#define IPIPEIF_PADS_NUM 3
+
+/*
+ * struct iss_ipipeif_device - Structure for the IPIPEIF module to store its own
+ * information
+ * @subdev: V4L2 subdevice
+ * @pads: Sink and source media entity pads
+ * @formats: Active video formats
+ * @input: Active input
+ * @output: Active outputs
+ * @video_out: Output video node
+ * @error: A hardware error occurred during capture
+ * @alaw: A-law compression enabled (1) or disabled (0)
+ * @lpf: Low pass filter enabled (1) or disabled (0)
+ * @obclamp: Optical-black clamp enabled (1) or disabled (0)
+ * @fpc_en: Faulty pixels correction enabled (1) or disabled (0)
+ * @blcomp: Black level compensation configuration
+ * @clamp: Optical-black or digital clamp configuration
+ * @fpc: Faulty pixels correction configuration
+ * @lsc: Lens shading compensation configuration
+ * @update: Bitmask of controls to update during the next interrupt
+ * @shadow_update: Controls update in progress by userspace
+ * @syncif: Interface synchronization configuration
+ * @vpcfg: Video port configuration
+ * @underrun: A buffer underrun occurred and a new buffer has been queued
+ * @state: Streaming state
+ * @lock: Serializes shadow_update with interrupt handler
+ * @wait: Wait queue used to stop the module
+ * @stopping: Stopping state
+ * @ioctl_lock: Serializes ioctl calls and LSC requests freeing
+ */
+struct iss_ipipeif_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[IPIPEIF_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[IPIPEIF_PADS_NUM];
+
+ enum ipipeif_input_entity input;
+ unsigned int output;
+ struct iss_video video_out;
+ unsigned int error;
+
+ enum iss_pipeline_stream_state state;
+ wait_queue_head_t wait;
+ atomic_t stopping;
+};
+
+struct iss_device;
+
+int omap4iss_ipipeif_init(struct iss_device *iss);
+void omap4iss_ipipeif_cleanup(struct iss_device *iss);
+int omap4iss_ipipeif_register_entities(struct iss_ipipeif_device *ipipeif,
+ struct v4l2_device *vdev);
+void omap4iss_ipipeif_unregister_entities(struct iss_ipipeif_device *ipipeif);
+
+int omap4iss_ipipeif_busy(struct iss_ipipeif_device *ipipeif);
+void omap4iss_ipipeif_isr(struct iss_ipipeif_device *ipipeif, u32 events);
+void omap4iss_ipipeif_restore_context(struct iss_device *iss);
+void omap4iss_ipipeif_max_rate(struct iss_ipipeif_device *ipipeif,
+ unsigned int *max_rate);
+
+#endif /* OMAP4_ISS_IPIPEIF_H */
diff --git a/drivers/staging/media/omap4iss/iss_regs.h b/drivers/staging/media/omap4iss/iss_regs.h
new file mode 100644
index 000000000000..efd0291a86f7
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_regs.h
@@ -0,0 +1,901 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - Register defines
+ *
+ * Copyright (C) 2012 Texas Instruments.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _OMAP4_ISS_REGS_H_
+#define _OMAP4_ISS_REGS_H_
+
+/* ISS */
+#define ISS_HL_REVISION 0x0
+
+#define ISS_HL_SYSCONFIG 0x10
+#define ISS_HL_SYSCONFIG_IDLEMODE_SHIFT 2
+#define ISS_HL_SYSCONFIG_IDLEMODE_FORCEIDLE 0x0
+#define ISS_HL_SYSCONFIG_IDLEMODE_NOIDLE 0x1
+#define ISS_HL_SYSCONFIG_IDLEMODE_SMARTIDLE 0x2
+#define ISS_HL_SYSCONFIG_SOFTRESET (1 << 0)
+
+#define ISS_HL_IRQSTATUS_RAW(i) (0x20 + (0x10 * (i)))
+#define ISS_HL_IRQSTATUS(i) (0x24 + (0x10 * (i)))
+#define ISS_HL_IRQENABLE_SET(i) (0x28 + (0x10 * (i)))
+#define ISS_HL_IRQENABLE_CLR(i) (0x2c + (0x10 * (i)))
+
+#define ISS_HL_IRQ_HS_VS (1 << 17)
+#define ISS_HL_IRQ_SIMCOP(i) (1 << (12 + (i)))
+#define ISS_HL_IRQ_BTE (1 << 11)
+#define ISS_HL_IRQ_CBUFF (1 << 10)
+#define ISS_HL_IRQ_CCP2(i) (1 << ((i) > 3 ? 16 : 14 + (i)))
+#define ISS_HL_IRQ_CSIB (1 << 5)
+#define ISS_HL_IRQ_CSIA (1 << 4)
+#define ISS_HL_IRQ_ISP(i) (1 << (i))
+
+#define ISS_CTRL 0x80
+#define ISS_CTRL_CLK_DIV_MASK (3 << 4)
+#define ISS_CTRL_INPUT_SEL_MASK (3 << 2)
+#define ISS_CTRL_INPUT_SEL_CSI2A (0 << 2)
+#define ISS_CTRL_INPUT_SEL_CSI2B (1 << 2)
+#define ISS_CTRL_SYNC_DETECT_VS_RAISING (3 << 0)
+
+#define ISS_CLKCTRL 0x84
+#define ISS_CLKCTRL_VPORT2_CLK (1 << 30)
+#define ISS_CLKCTRL_VPORT1_CLK (1 << 29)
+#define ISS_CLKCTRL_VPORT0_CLK (1 << 28)
+#define ISS_CLKCTRL_CCP2 (1 << 4)
+#define ISS_CLKCTRL_CSI2_B (1 << 3)
+#define ISS_CLKCTRL_CSI2_A (1 << 2)
+#define ISS_CLKCTRL_ISP (1 << 1)
+#define ISS_CLKCTRL_SIMCOP (1 << 0)
+
+#define ISS_CLKSTAT 0x88
+#define ISS_CLKSTAT_VPORT2_CLK (1 << 30)
+#define ISS_CLKSTAT_VPORT1_CLK (1 << 29)
+#define ISS_CLKSTAT_VPORT0_CLK (1 << 28)
+#define ISS_CLKSTAT_CCP2 (1 << 4)
+#define ISS_CLKSTAT_CSI2_B (1 << 3)
+#define ISS_CLKSTAT_CSI2_A (1 << 2)
+#define ISS_CLKSTAT_ISP (1 << 1)
+#define ISS_CLKSTAT_SIMCOP (1 << 0)
+
+#define ISS_PM_STATUS 0x8c
+#define ISS_PM_STATUS_CBUFF_PM_MASK (3 << 12)
+#define ISS_PM_STATUS_BTE_PM_MASK (3 << 10)
+#define ISS_PM_STATUS_SIMCOP_PM_MASK (3 << 8)
+#define ISS_PM_STATUS_ISP_PM_MASK (3 << 6)
+#define ISS_PM_STATUS_CCP2_PM_MASK (3 << 4)
+#define ISS_PM_STATUS_CSI2_B_PM_MASK (3 << 2)
+#define ISS_PM_STATUS_CSI2_A_PM_MASK (3 << 0)
+
+#define REGISTER0 0x0
+#define REGISTER0_HSCLOCKCONFIG (1 << 24)
+#define REGISTER0_THS_TERM_MASK (0xff << 8)
+#define REGISTER0_THS_TERM_SHIFT 8
+#define REGISTER0_THS_SETTLE_MASK (0xff << 0)
+#define REGISTER0_THS_SETTLE_SHIFT 0
+
+#define REGISTER1 0x4
+#define REGISTER1_RESET_DONE_CTRLCLK (1 << 29)
+#define REGISTER1_CLOCK_MISS_DETECTOR_STATUS (1 << 25)
+#define REGISTER1_TCLK_TERM_MASK (0x3f << 18)
+#define REGISTER1_TCLK_TERM_SHIFT 18
+#define REGISTER1_DPHY_HS_SYNC_PATTERN_SHIFT 10
+#define REGISTER1_CTRLCLK_DIV_FACTOR_MASK (0x3 << 8)
+#define REGISTER1_CTRLCLK_DIV_FACTOR_SHIFT 8
+#define REGISTER1_TCLK_SETTLE_MASK (0xff << 0)
+#define REGISTER1_TCLK_SETTLE_SHIFT 0
+
+#define REGISTER2 0x8
+
+#define CSI2_SYSCONFIG 0x10
+#define CSI2_SYSCONFIG_MSTANDBY_MODE_MASK (3 << 12)
+#define CSI2_SYSCONFIG_MSTANDBY_MODE_FORCE (0 << 12)
+#define CSI2_SYSCONFIG_MSTANDBY_MODE_NO (1 << 12)
+#define CSI2_SYSCONFIG_MSTANDBY_MODE_SMART (2 << 12)
+#define CSI2_SYSCONFIG_SOFT_RESET (1 << 1)
+#define CSI2_SYSCONFIG_AUTO_IDLE (1 << 0)
+
+#define CSI2_SYSSTATUS 0x14
+#define CSI2_SYSSTATUS_RESET_DONE (1 << 0)
+
+#define CSI2_IRQSTATUS 0x18
+#define CSI2_IRQENABLE 0x1c
+
+/* Shared bits across CSI2_IRQENABLE and IRQSTATUS */
+
+#define CSI2_IRQ_OCP_ERR (1 << 14)
+#define CSI2_IRQ_SHORT_PACKET (1 << 13)
+#define CSI2_IRQ_ECC_CORRECTION (1 << 12)
+#define CSI2_IRQ_ECC_NO_CORRECTION (1 << 11)
+#define CSI2_IRQ_COMPLEXIO_ERR (1 << 9)
+#define CSI2_IRQ_FIFO_OVF (1 << 8)
+#define CSI2_IRQ_CONTEXT0 (1 << 0)
+
+#define CSI2_CTRL 0x40
+#define CSI2_CTRL_MFLAG_LEVH_MASK (7 << 20)
+#define CSI2_CTRL_MFLAG_LEVH_SHIFT 20
+#define CSI2_CTRL_MFLAG_LEVL_MASK (7 << 17)
+#define CSI2_CTRL_MFLAG_LEVL_SHIFT 17
+#define CSI2_CTRL_BURST_SIZE_EXPAND (1 << 16)
+#define CSI2_CTRL_VP_CLK_EN (1 << 15)
+#define CSI2_CTRL_NON_POSTED_WRITE (1 << 13)
+#define CSI2_CTRL_VP_ONLY_EN (1 << 11)
+#define CSI2_CTRL_VP_OUT_CTRL_MASK (3 << 8)
+#define CSI2_CTRL_VP_OUT_CTRL_SHIFT 8
+#define CSI2_CTRL_DBG_EN (1 << 7)
+#define CSI2_CTRL_BURST_SIZE_MASK (3 << 5)
+#define CSI2_CTRL_ENDIANNESS (1 << 4)
+#define CSI2_CTRL_FRAME (1 << 3)
+#define CSI2_CTRL_ECC_EN (1 << 2)
+#define CSI2_CTRL_IF_EN (1 << 0)
+
+#define CSI2_DBG_H 0x44
+
+#define CSI2_COMPLEXIO_CFG 0x50
+#define CSI2_COMPLEXIO_CFG_RESET_CTRL (1 << 30)
+#define CSI2_COMPLEXIO_CFG_RESET_DONE (1 << 29)
+#define CSI2_COMPLEXIO_CFG_PWD_CMD_MASK (3 << 27)
+#define CSI2_COMPLEXIO_CFG_PWD_CMD_OFF (0 << 27)
+#define CSI2_COMPLEXIO_CFG_PWD_CMD_ON (1 << 27)
+#define CSI2_COMPLEXIO_CFG_PWD_CMD_ULP (2 << 27)
+#define CSI2_COMPLEXIO_CFG_PWD_STATUS_MASK (3 << 25)
+#define CSI2_COMPLEXIO_CFG_PWD_STATUS_OFF (0 << 25)
+#define CSI2_COMPLEXIO_CFG_PWD_STATUS_ON (1 << 25)
+#define CSI2_COMPLEXIO_CFG_PWD_STATUS_ULP (2 << 25)
+#define CSI2_COMPLEXIO_CFG_PWR_AUTO (1 << 24)
+#define CSI2_COMPLEXIO_CFG_DATA_POL(i) (1 << (((i) * 4) + 3))
+#define CSI2_COMPLEXIO_CFG_DATA_POSITION_MASK(i) (7 << ((i) * 4))
+#define CSI2_COMPLEXIO_CFG_DATA_POSITION_SHIFT(i) ((i) * 4)
+#define CSI2_COMPLEXIO_CFG_CLOCK_POL (1 << 3)
+#define CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK (7 << 0)
+#define CSI2_COMPLEXIO_CFG_CLOCK_POSITION_SHIFT 0
+
+#define CSI2_COMPLEXIO_IRQSTATUS 0x54
+
+#define CSI2_SHORT_PACKET 0x5c
+
+#define CSI2_COMPLEXIO_IRQENABLE 0x60
+
+/* Shared bits across CSI2_COMPLEXIO_IRQENABLE and IRQSTATUS */
+#define CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT (1 << 26)
+#define CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER (1 << 25)
+#define CSI2_COMPLEXIO_IRQ_STATEULPM5 (1 << 24)
+#define CSI2_COMPLEXIO_IRQ_STATEULPM4 (1 << 23)
+#define CSI2_COMPLEXIO_IRQ_STATEULPM3 (1 << 22)
+#define CSI2_COMPLEXIO_IRQ_STATEULPM2 (1 << 21)
+#define CSI2_COMPLEXIO_IRQ_STATEULPM1 (1 << 20)
+#define CSI2_COMPLEXIO_IRQ_ERRCONTROL5 (1 << 19)
+#define CSI2_COMPLEXIO_IRQ_ERRCONTROL4 (1 << 18)
+#define CSI2_COMPLEXIO_IRQ_ERRCONTROL3 (1 << 17)
+#define CSI2_COMPLEXIO_IRQ_ERRCONTROL2 (1 << 16)
+#define CSI2_COMPLEXIO_IRQ_ERRCONTROL1 (1 << 15)
+#define CSI2_COMPLEXIO_IRQ_ERRESC5 (1 << 14)
+#define CSI2_COMPLEXIO_IRQ_ERRESC4 (1 << 13)
+#define CSI2_COMPLEXIO_IRQ_ERRESC3 (1 << 12)
+#define CSI2_COMPLEXIO_IRQ_ERRESC2 (1 << 11)
+#define CSI2_COMPLEXIO_IRQ_ERRESC1 (1 << 10)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5 (1 << 9)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4 (1 << 8)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3 (1 << 7)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2 (1 << 6)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1 (1 << 5)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTHS5 (1 << 4)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTHS4 (1 << 3)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTHS3 (1 << 2)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTHS2 (1 << 1)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTHS1 (1 << 0)
+
+#define CSI2_DBG_P 0x68
+
+#define CSI2_TIMING 0x6c
+#define CSI2_TIMING_FORCE_RX_MODE_IO1 (1 << 15)
+#define CSI2_TIMING_STOP_STATE_X16_IO1 (1 << 14)
+#define CSI2_TIMING_STOP_STATE_X4_IO1 (1 << 13)
+#define CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK (0x1fff << 0)
+#define CSI2_TIMING_STOP_STATE_COUNTER_IO1_SHIFT 0
+
+#define CSI2_CTX_CTRL1(i) (0x70 + (0x20 * i))
+#define CSI2_CTX_CTRL1_GENERIC (1 << 30)
+#define CSI2_CTX_CTRL1_TRANSCODE (0xf << 24)
+#define CSI2_CTX_CTRL1_FEC_NUMBER_MASK (0xff << 16)
+#define CSI2_CTX_CTRL1_COUNT_MASK (0xff << 8)
+#define CSI2_CTX_CTRL1_COUNT_SHIFT 8
+#define CSI2_CTX_CTRL1_EOF_EN (1 << 7)
+#define CSI2_CTX_CTRL1_EOL_EN (1 << 6)
+#define CSI2_CTX_CTRL1_CS_EN (1 << 5)
+#define CSI2_CTX_CTRL1_COUNT_UNLOCK (1 << 4)
+#define CSI2_CTX_CTRL1_PING_PONG (1 << 3)
+#define CSI2_CTX_CTRL1_CTX_EN (1 << 0)
+
+#define CSI2_CTX_CTRL2(i) (0x74 + (0x20 * i))
+#define CSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT 13
+#define CSI2_CTX_CTRL2_USER_DEF_MAP_MASK \
+ (0x3 << CSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT)
+#define CSI2_CTX_CTRL2_VIRTUAL_ID_MASK (3 << 11)
+#define CSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT 11
+#define CSI2_CTX_CTRL2_DPCM_PRED (1 << 10)
+#define CSI2_CTX_CTRL2_FORMAT_MASK (0x3ff << 0)
+#define CSI2_CTX_CTRL2_FORMAT_SHIFT 0
+
+#define CSI2_CTX_DAT_OFST(i) (0x78 + (0x20 * i))
+#define CSI2_CTX_DAT_OFST_MASK (0xfff << 5)
+
+#define CSI2_CTX_PING_ADDR(i) (0x7c + (0x20 * i))
+#define CSI2_CTX_PING_ADDR_MASK 0xffffffe0
+
+#define CSI2_CTX_PONG_ADDR(i) (0x80 + (0x20 * i))
+#define CSI2_CTX_PONG_ADDR_MASK CSI2_CTX_PING_ADDR_MASK
+
+#define CSI2_CTX_IRQENABLE(i) (0x84 + (0x20 * i))
+#define CSI2_CTX_IRQSTATUS(i) (0x88 + (0x20 * i))
+
+#define CSI2_CTX_CTRL3(i) (0x8c + (0x20 * i))
+#define CSI2_CTX_CTRL3_ALPHA_SHIFT 5
+#define CSI2_CTX_CTRL3_ALPHA_MASK \
+ (0x3fff << CSI2_CTX_CTRL3_ALPHA_SHIFT)
+
+/* Shared bits across CSI2_CTX_IRQENABLE and IRQSTATUS */
+#define CSI2_CTX_IRQ_ECC_CORRECTION (1 << 8)
+#define CSI2_CTX_IRQ_LINE_NUMBER (1 << 7)
+#define CSI2_CTX_IRQ_FRAME_NUMBER (1 << 6)
+#define CSI2_CTX_IRQ_CS (1 << 5)
+#define CSI2_CTX_IRQ_LE (1 << 3)
+#define CSI2_CTX_IRQ_LS (1 << 2)
+#define CSI2_CTX_IRQ_FE (1 << 1)
+#define CSI2_CTX_IRQ_FS (1 << 0)
+
+/* ISS BTE */
+#define BTE_CTRL (0x0030)
+#define BTE_CTRL_BW_LIMITER_MASK (0x3ff << 22)
+#define BTE_CTRL_BW_LIMITER_SHIFT 22
+
+/* ISS ISP_SYS1 */
+#define ISP5_REVISION (0x0000)
+#define ISP5_SYSCONFIG (0x0010)
+#define ISP5_SYSCONFIG_STANDBYMODE_MASK (3 << 4)
+#define ISP5_SYSCONFIG_STANDBYMODE_FORCE (0 << 4)
+#define ISP5_SYSCONFIG_STANDBYMODE_NO (1 << 4)
+#define ISP5_SYSCONFIG_STANDBYMODE_SMART (2 << 4)
+#define ISP5_SYSCONFIG_SOFTRESET (1 << 1)
+
+#define ISP5_IRQSTATUS(i) (0x0028 + (0x10 * (i)))
+#define ISP5_IRQENABLE_SET(i) (0x002c + (0x10 * (i)))
+#define ISP5_IRQENABLE_CLR(i) (0x0030 + (0x10 * (i)))
+
+/* Bits shared for ISP5_IRQ* registers */
+#define ISP5_IRQ_OCP_ERR (1 << 31)
+#define ISP5_IRQ_IPIPE_INT_DPC_RNEW1 (1 << 29)
+#define ISP5_IRQ_IPIPE_INT_DPC_RNEW0 (1 << 28)
+#define ISP5_IRQ_IPIPE_INT_DPC_INIT (1 << 27)
+#define ISP5_IRQ_IPIPE_INT_EOF (1 << 25)
+#define ISP5_IRQ_H3A_INT_EOF (1 << 24)
+#define ISP5_IRQ_RSZ_INT_EOF1 (1 << 23)
+#define ISP5_IRQ_RSZ_INT_EOF0 (1 << 22)
+#define ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR (1 << 19)
+#define ISP5_IRQ_RSZ_FIFO_OVF (1 << 18)
+#define ISP5_IRQ_RSZ_INT_CYC_RSZB (1 << 17)
+#define ISP5_IRQ_RSZ_INT_CYC_RSZA (1 << 16)
+#define ISP5_IRQ_RSZ_INT_DMA (1 << 15)
+#define ISP5_IRQ_RSZ_INT_LAST_PIX (1 << 14)
+#define ISP5_IRQ_RSZ_INT_REG (1 << 13)
+#define ISP5_IRQ_H3A_INT (1 << 12)
+#define ISP5_IRQ_AF_INT (1 << 11)
+#define ISP5_IRQ_AEW_INT (1 << 10)
+#define ISP5_IRQ_IPIPEIF_IRQ (1 << 9)
+#define ISP5_IRQ_IPIPE_INT_HST (1 << 8)
+#define ISP5_IRQ_IPIPE_INT_BSC (1 << 7)
+#define ISP5_IRQ_IPIPE_INT_DMA (1 << 6)
+#define ISP5_IRQ_IPIPE_INT_LAST_PIX (1 << 5)
+#define ISP5_IRQ_IPIPE_INT_REG (1 << 4)
+#define ISP5_IRQ_ISIF_INT(i) (1 << (i))
+
+#define ISP5_CTRL (0x006c)
+#define ISP5_CTRL_MSTANDBY (1 << 24)
+#define ISP5_CTRL_VD_PULSE_EXT (1 << 23)
+#define ISP5_CTRL_MSTANDBY_WAIT (1 << 20)
+#define ISP5_CTRL_BL_CLK_ENABLE (1 << 15)
+#define ISP5_CTRL_ISIF_CLK_ENABLE (1 << 14)
+#define ISP5_CTRL_H3A_CLK_ENABLE (1 << 13)
+#define ISP5_CTRL_RSZ_CLK_ENABLE (1 << 12)
+#define ISP5_CTRL_IPIPE_CLK_ENABLE (1 << 11)
+#define ISP5_CTRL_IPIPEIF_CLK_ENABLE (1 << 10)
+#define ISP5_CTRL_SYNC_ENABLE (1 << 9)
+#define ISP5_CTRL_PSYNC_CLK_SEL (1 << 8)
+
+/* ISS ISP ISIF register offsets */
+#define ISIF_SYNCEN (0x0000)
+#define ISIF_SYNCEN_DWEN (1 << 1)
+#define ISIF_SYNCEN_SYEN (1 << 0)
+
+#define ISIF_MODESET (0x0004)
+#define ISIF_MODESET_INPMOD_MASK (3 << 12)
+#define ISIF_MODESET_INPMOD_RAW (0 << 12)
+#define ISIF_MODESET_INPMOD_YCBCR16 (1 << 12)
+#define ISIF_MODESET_INPMOD_YCBCR8 (2 << 12)
+#define ISIF_MODESET_CCDW_MASK (7 << 8)
+#define ISIF_MODESET_CCDW_2BIT (2 << 8)
+#define ISIF_MODESET_CCDMD (1 << 7)
+#define ISIF_MODESET_SWEN (1 << 5)
+#define ISIF_MODESET_HDPOL (1 << 3)
+#define ISIF_MODESET_VDPOL (1 << 2)
+
+#define ISIF_SPH (0x0018)
+#define ISIF_SPH_MASK (0x7fff)
+
+#define ISIF_LNH (0x001c)
+#define ISIF_LNH_MASK (0x7fff)
+
+#define ISIF_LNV (0x0028)
+#define ISIF_LNV_MASK (0x7fff)
+
+#define ISIF_HSIZE (0x0034)
+#define ISIF_HSIZE_ADCR (1 << 12)
+#define ISIF_HSIZE_HSIZE_MASK (0xfff)
+
+#define ISIF_CADU (0x003c)
+#define ISIF_CADU_MASK (0x7ff)
+
+#define ISIF_CADL (0x0040)
+#define ISIF_CADL_MASK (0xffff)
+
+#define ISIF_CCOLP (0x004c)
+#define ISIF_CCOLP_CP0_F0_R (0 << 6)
+#define ISIF_CCOLP_CP0_F0_GR (1 << 6)
+#define ISIF_CCOLP_CP0_F0_B (3 << 6)
+#define ISIF_CCOLP_CP0_F0_GB (2 << 6)
+#define ISIF_CCOLP_CP1_F0_R (0 << 4)
+#define ISIF_CCOLP_CP1_F0_GR (1 << 4)
+#define ISIF_CCOLP_CP1_F0_B (3 << 4)
+#define ISIF_CCOLP_CP1_F0_GB (2 << 4)
+#define ISIF_CCOLP_CP2_F0_R (0 << 2)
+#define ISIF_CCOLP_CP2_F0_GR (1 << 2)
+#define ISIF_CCOLP_CP2_F0_B (3 << 2)
+#define ISIF_CCOLP_CP2_F0_GB (2 << 2)
+#define ISIF_CCOLP_CP3_F0_R (0 << 0)
+#define ISIF_CCOLP_CP3_F0_GR (1 << 0)
+#define ISIF_CCOLP_CP3_F0_B (3 << 0)
+#define ISIF_CCOLP_CP3_F0_GB (2 << 0)
+
+#define ISIF_VDINT(i) (0x0070 + (i) * 4)
+#define ISIF_VDINT_MASK (0x7fff)
+
+#define ISIF_CGAMMAWD (0x0080)
+#define ISIF_CGAMMAWD_GWDI_MASK (0xf << 1)
+#define ISIF_CGAMMAWD_GWDI(bpp) ((16 - (bpp)) << 1)
+
+#define ISIF_CCDCFG (0x0088)
+#define ISIF_CCDCFG_Y8POS (1 << 11)
+
+/* ISS ISP IPIPEIF register offsets */
+#define IPIPEIF_ENABLE (0x0000)
+
+#define IPIPEIF_CFG1 (0x0004)
+#define IPIPEIF_CFG1_INPSRC1_MASK (3 << 14)
+#define IPIPEIF_CFG1_INPSRC1_VPORT_RAW (0 << 14)
+#define IPIPEIF_CFG1_INPSRC1_SDRAM_RAW (1 << 14)
+#define IPIPEIF_CFG1_INPSRC1_ISIF_DARKFM (2 << 14)
+#define IPIPEIF_CFG1_INPSRC1_SDRAM_YUV (3 << 14)
+#define IPIPEIF_CFG1_INPSRC2_MASK (3 << 2)
+#define IPIPEIF_CFG1_INPSRC2_ISIF (0 << 2)
+#define IPIPEIF_CFG1_INPSRC2_SDRAM_RAW (1 << 2)
+#define IPIPEIF_CFG1_INPSRC2_ISIF_DARKFM (2 << 2)
+#define IPIPEIF_CFG1_INPSRC2_SDRAM_YUV (3 << 2)
+
+#define IPIPEIF_CFG2 (0x0030)
+#define IPIPEIF_CFG2_YUV8P (1 << 7)
+#define IPIPEIF_CFG2_YUV8 (1 << 6)
+#define IPIPEIF_CFG2_YUV16 (1 << 3)
+#define IPIPEIF_CFG2_VDPOL (1 << 2)
+#define IPIPEIF_CFG2_HDPOL (1 << 1)
+#define IPIPEIF_CFG2_INTSW (1 << 0)
+
+#define IPIPEIF_CLKDIV (0x0040)
+
+/* ISS ISP IPIPE register offsets */
+#define IPIPE_SRC_EN (0x0000)
+#define IPIPE_SRC_EN_EN (1 << 0)
+
+#define IPIPE_SRC_MODE (0x0004)
+#define IPIPE_SRC_MODE_WRT (1 << 1)
+#define IPIPE_SRC_MODE_OST (1 << 0)
+
+#define IPIPE_SRC_FMT (0x0008)
+#define IPIPE_SRC_FMT_RAW2YUV (0 << 0)
+#define IPIPE_SRC_FMT_RAW2RAW (1 << 0)
+#define IPIPE_SRC_FMT_RAW2STATS (2 << 0)
+#define IPIPE_SRC_FMT_YUV2YUV (3 << 0)
+
+#define IPIPE_SRC_COL (0x000c)
+#define IPIPE_SRC_COL_OO_R (0 << 6)
+#define IPIPE_SRC_COL_OO_GR (1 << 6)
+#define IPIPE_SRC_COL_OO_B (3 << 6)
+#define IPIPE_SRC_COL_OO_GB (2 << 6)
+#define IPIPE_SRC_COL_OE_R (0 << 4)
+#define IPIPE_SRC_COL_OE_GR (1 << 4)
+#define IPIPE_SRC_COL_OE_B (3 << 4)
+#define IPIPE_SRC_COL_OE_GB (2 << 4)
+#define IPIPE_SRC_COL_EO_R (0 << 2)
+#define IPIPE_SRC_COL_EO_GR (1 << 2)
+#define IPIPE_SRC_COL_EO_B (3 << 2)
+#define IPIPE_SRC_COL_EO_GB (2 << 2)
+#define IPIPE_SRC_COL_EE_R (0 << 0)
+#define IPIPE_SRC_COL_EE_GR (1 << 0)
+#define IPIPE_SRC_COL_EE_B (3 << 0)
+#define IPIPE_SRC_COL_EE_GB (2 << 0)
+
+#define IPIPE_SRC_VPS (0x0010)
+#define IPIPE_SRC_VPS_MASK (0xffff)
+
+#define IPIPE_SRC_VSZ (0x0014)
+#define IPIPE_SRC_VSZ_MASK (0x1fff)
+
+#define IPIPE_SRC_HPS (0x0018)
+#define IPIPE_SRC_HPS_MASK (0xffff)
+
+#define IPIPE_SRC_HSZ (0x001c)
+#define IPIPE_SRC_HSZ_MASK (0x1ffe)
+
+#define IPIPE_SEL_SBU (0x0020)
+
+#define IPIPE_SRC_STA (0x0024)
+
+#define IPIPE_GCK_MMR (0x0028)
+#define IPIPE_GCK_MMR_REG (1 << 0)
+
+#define IPIPE_GCK_PIX (0x002c)
+#define IPIPE_GCK_PIX_G3 (1 << 3)
+#define IPIPE_GCK_PIX_G2 (1 << 2)
+#define IPIPE_GCK_PIX_G1 (1 << 1)
+#define IPIPE_GCK_PIX_G0 (1 << 0)
+
+#define IPIPE_DPC_LUT_EN (0x0034)
+#define IPIPE_DPC_LUT_SEL (0x0038)
+#define IPIPE_DPC_LUT_ADR (0x003c)
+#define IPIPE_DPC_LUT_SIZ (0x0040)
+
+#define IPIPE_DPC_OTF_EN (0x0044)
+#define IPIPE_DPC_OTF_TYP (0x0048)
+#define IPIPE_DPC_OTF_2_D_THR_R (0x004c)
+#define IPIPE_DPC_OTF_2_D_THR_GR (0x0050)
+#define IPIPE_DPC_OTF_2_D_THR_GB (0x0054)
+#define IPIPE_DPC_OTF_2_D_THR_B (0x0058)
+#define IPIPE_DPC_OTF_2_C_THR_R (0x005c)
+#define IPIPE_DPC_OTF_2_C_THR_GR (0x0060)
+#define IPIPE_DPC_OTF_2_C_THR_GB (0x0064)
+#define IPIPE_DPC_OTF_2_C_THR_B (0x0068)
+#define IPIPE_DPC_OTF_3_SHF (0x006c)
+#define IPIPE_DPC_OTF_3_D_THR (0x0070)
+#define IPIPE_DPC_OTF_3_D_SPL (0x0074)
+#define IPIPE_DPC_OTF_3_D_MIN (0x0078)
+#define IPIPE_DPC_OTF_3_D_MAX (0x007c)
+#define IPIPE_DPC_OTF_3_C_THR (0x0080)
+#define IPIPE_DPC_OTF_3_C_SLP (0x0084)
+#define IPIPE_DPC_OTF_3_C_MIN (0x0088)
+#define IPIPE_DPC_OTF_3_C_MAX (0x008c)
+
+#define IPIPE_LSC_VOFT (0x0090)
+#define IPIPE_LSC_VA2 (0x0094)
+#define IPIPE_LSC_VA1 (0x0098)
+#define IPIPE_LSC_VS (0x009c)
+#define IPIPE_LSC_HOFT (0x00a0)
+#define IPIPE_LSC_HA2 (0x00a4)
+#define IPIPE_LSC_HA1 (0x00a8)
+#define IPIPE_LSC_HS (0x00ac)
+#define IPIPE_LSC_GAN_R (0x00b0)
+#define IPIPE_LSC_GAN_GR (0x00b4)
+#define IPIPE_LSC_GAN_GB (0x00b8)
+#define IPIPE_LSC_GAN_B (0x00bc)
+#define IPIPE_LSC_OFT_R (0x00c0)
+#define IPIPE_LSC_OFT_GR (0x00c4)
+#define IPIPE_LSC_OFT_GB (0x00c8)
+#define IPIPE_LSC_OFT_B (0x00cc)
+#define IPIPE_LSC_SHF (0x00d0)
+#define IPIPE_LSC_MAX (0x00d4)
+
+#define IPIPE_D2F_1ST_EN (0x00d8)
+#define IPIPE_D2F_1ST_TYP (0x00dc)
+#define IPIPE_D2F_1ST_THR_00 (0x00e0)
+#define IPIPE_D2F_1ST_THR_01 (0x00e4)
+#define IPIPE_D2F_1ST_THR_02 (0x00e8)
+#define IPIPE_D2F_1ST_THR_03 (0x00ec)
+#define IPIPE_D2F_1ST_THR_04 (0x00f0)
+#define IPIPE_D2F_1ST_THR_05 (0x00f4)
+#define IPIPE_D2F_1ST_THR_06 (0x00f8)
+#define IPIPE_D2F_1ST_THR_07 (0x00fc)
+#define IPIPE_D2F_1ST_STR_00 (0x0100)
+#define IPIPE_D2F_1ST_STR_01 (0x0104)
+#define IPIPE_D2F_1ST_STR_02 (0x0108)
+#define IPIPE_D2F_1ST_STR_03 (0x010c)
+#define IPIPE_D2F_1ST_STR_04 (0x0110)
+#define IPIPE_D2F_1ST_STR_05 (0x0114)
+#define IPIPE_D2F_1ST_STR_06 (0x0118)
+#define IPIPE_D2F_1ST_STR_07 (0x011c)
+#define IPIPE_D2F_1ST_SPR_00 (0x0120)
+#define IPIPE_D2F_1ST_SPR_01 (0x0124)
+#define IPIPE_D2F_1ST_SPR_02 (0x0128)
+#define IPIPE_D2F_1ST_SPR_03 (0x012c)
+#define IPIPE_D2F_1ST_SPR_04 (0x0130)
+#define IPIPE_D2F_1ST_SPR_05 (0x0134)
+#define IPIPE_D2F_1ST_SPR_06 (0x0138)
+#define IPIPE_D2F_1ST_SPR_07 (0x013c)
+#define IPIPE_D2F_1ST_EDG_MIN (0x0140)
+#define IPIPE_D2F_1ST_EDG_MAX (0x0144)
+#define IPIPE_D2F_2ND_EN (0x0148)
+#define IPIPE_D2F_2ND_TYP (0x014c)
+#define IPIPE_D2F_2ND_THR00 (0x0150)
+#define IPIPE_D2F_2ND_THR01 (0x0154)
+#define IPIPE_D2F_2ND_THR02 (0x0158)
+#define IPIPE_D2F_2ND_THR03 (0x015c)
+#define IPIPE_D2F_2ND_THR04 (0x0160)
+#define IPIPE_D2F_2ND_THR05 (0x0164)
+#define IPIPE_D2F_2ND_THR06 (0x0168)
+#define IPIPE_D2F_2ND_THR07 (0x016c)
+#define IPIPE_D2F_2ND_STR_00 (0x0170)
+#define IPIPE_D2F_2ND_STR_01 (0x0174)
+#define IPIPE_D2F_2ND_STR_02 (0x0178)
+#define IPIPE_D2F_2ND_STR_03 (0x017c)
+#define IPIPE_D2F_2ND_STR_04 (0x0180)
+#define IPIPE_D2F_2ND_STR_05 (0x0184)
+#define IPIPE_D2F_2ND_STR_06 (0x0188)
+#define IPIPE_D2F_2ND_STR_07 (0x018c)
+#define IPIPE_D2F_2ND_SPR_00 (0x0190)
+#define IPIPE_D2F_2ND_SPR_01 (0x0194)
+#define IPIPE_D2F_2ND_SPR_02 (0x0198)
+#define IPIPE_D2F_2ND_SPR_03 (0x019c)
+#define IPIPE_D2F_2ND_SPR_04 (0x01a0)
+#define IPIPE_D2F_2ND_SPR_05 (0x01a4)
+#define IPIPE_D2F_2ND_SPR_06 (0x01a8)
+#define IPIPE_D2F_2ND_SPR_07 (0x01ac)
+#define IPIPE_D2F_2ND_EDG_MIN (0x01b0)
+#define IPIPE_D2F_2ND_EDG_MAX (0x01b4)
+
+#define IPIPE_GIC_EN (0x01b8)
+#define IPIPE_GIC_TYP (0x01bc)
+#define IPIPE_GIC_GAN (0x01c0)
+#define IPIPE_GIC_NFGAIN (0x01c4)
+#define IPIPE_GIC_THR (0x01c8)
+#define IPIPE_GIC_SLP (0x01cc)
+
+#define IPIPE_WB2_OFT_R (0x01d0)
+#define IPIPE_WB2_OFT_GR (0x01d4)
+#define IPIPE_WB2_OFT_GB (0x01d8)
+#define IPIPE_WB2_OFT_B (0x01dc)
+
+#define IPIPE_WB2_WGN_R (0x01e0)
+#define IPIPE_WB2_WGN_GR (0x01e4)
+#define IPIPE_WB2_WGN_GB (0x01e8)
+#define IPIPE_WB2_WGN_B (0x01ec)
+
+#define IPIPE_CFA_MODE (0x01f0)
+#define IPIPE_CFA_2DIR_HPF_THR (0x01f4)
+#define IPIPE_CFA_2DIR_HPF_SLP (0x01f8)
+#define IPIPE_CFA_2DIR_MIX_THR (0x01fc)
+#define IPIPE_CFA_2DIR_MIX_SLP (0x0200)
+#define IPIPE_CFA_2DIR_DIR_TRH (0x0204)
+#define IPIPE_CFA_2DIR_DIR_SLP (0x0208)
+#define IPIPE_CFA_2DIR_NDWT (0x020c)
+#define IPIPE_CFA_MONO_HUE_FRA (0x0210)
+#define IPIPE_CFA_MONO_EDG_THR (0x0214)
+#define IPIPE_CFA_MONO_THR_MIN (0x0218)
+
+#define IPIPE_CFA_MONO_THR_SLP (0x021c)
+#define IPIPE_CFA_MONO_SLP_MIN (0x0220)
+#define IPIPE_CFA_MONO_SLP_SLP (0x0224)
+#define IPIPE_CFA_MONO_LPWT (0x0228)
+
+#define IPIPE_RGB1_MUL_RR (0x022c)
+#define IPIPE_RGB1_MUL_GR (0x0230)
+#define IPIPE_RGB1_MUL_BR (0x0234)
+#define IPIPE_RGB1_MUL_RG (0x0238)
+#define IPIPE_RGB1_MUL_GG (0x023c)
+#define IPIPE_RGB1_MUL_BG (0x0240)
+#define IPIPE_RGB1_MUL_RB (0x0244)
+#define IPIPE_RGB1_MUL_GB (0x0248)
+#define IPIPE_RGB1_MUL_BB (0x024c)
+#define IPIPE_RGB1_OFT_OR (0x0250)
+#define IPIPE_RGB1_OFT_OG (0x0254)
+#define IPIPE_RGB1_OFT_OB (0x0258)
+#define IPIPE_GMM_CFG (0x025c)
+#define IPIPE_RGB2_MUL_RR (0x0260)
+#define IPIPE_RGB2_MUL_GR (0x0264)
+#define IPIPE_RGB2_MUL_BR (0x0268)
+#define IPIPE_RGB2_MUL_RG (0x026c)
+#define IPIPE_RGB2_MUL_GG (0x0270)
+#define IPIPE_RGB2_MUL_BG (0x0274)
+#define IPIPE_RGB2_MUL_RB (0x0278)
+#define IPIPE_RGB2_MUL_GB (0x027c)
+#define IPIPE_RGB2_MUL_BB (0x0280)
+#define IPIPE_RGB2_OFT_OR (0x0284)
+#define IPIPE_RGB2_OFT_OG (0x0288)
+#define IPIPE_RGB2_OFT_OB (0x028c)
+
+#define IPIPE_YUV_ADJ (0x0294)
+#define IPIPE_YUV_MUL_RY (0x0298)
+#define IPIPE_YUV_MUL_GY (0x029c)
+#define IPIPE_YUV_MUL_BY (0x02a0)
+#define IPIPE_YUV_MUL_RCB (0x02a4)
+#define IPIPE_YUV_MUL_GCB (0x02a8)
+#define IPIPE_YUV_MUL_BCB (0x02ac)
+#define IPIPE_YUV_MUL_RCR (0x02b0)
+#define IPIPE_YUV_MUL_GCR (0x02b4)
+#define IPIPE_YUV_MUL_BCR (0x02b8)
+#define IPIPE_YUV_OFT_Y (0x02bc)
+#define IPIPE_YUV_OFT_CB (0x02c0)
+#define IPIPE_YUV_OFT_CR (0x02c4)
+
+#define IPIPE_YUV_PHS (0x02c8)
+#define IPIPE_YUV_PHS_LPF (1 << 1)
+#define IPIPE_YUV_PHS_POS (1 << 0)
+
+#define IPIPE_YEE_EN (0x02d4)
+#define IPIPE_YEE_TYP (0x02d8)
+#define IPIPE_YEE_SHF (0x02dc)
+#define IPIPE_YEE_MUL_00 (0x02e0)
+#define IPIPE_YEE_MUL_01 (0x02e4)
+#define IPIPE_YEE_MUL_02 (0x02e8)
+#define IPIPE_YEE_MUL_10 (0x02ec)
+#define IPIPE_YEE_MUL_11 (0x02f0)
+#define IPIPE_YEE_MUL_12 (0x02f4)
+#define IPIPE_YEE_MUL_20 (0x02f8)
+#define IPIPE_YEE_MUL_21 (0x02fc)
+#define IPIPE_YEE_MUL_22 (0x0300)
+#define IPIPE_YEE_THR (0x0304)
+#define IPIPE_YEE_E_GAN (0x0308)
+#define IPIPE_YEE_E_THR_1 (0x030c)
+#define IPIPE_YEE_E_THR_2 (0x0310)
+#define IPIPE_YEE_G_GAN (0x0314)
+#define IPIPE_YEE_G_OFT (0x0318)
+
+#define IPIPE_CAR_EN (0x031c)
+#define IPIPE_CAR_TYP (0x0320)
+#define IPIPE_CAR_SW (0x0324)
+#define IPIPE_CAR_HPF_TYP (0x0328)
+#define IPIPE_CAR_HPF_SHF (0x032c)
+#define IPIPE_CAR_HPF_THR (0x0330)
+#define IPIPE_CAR_GN1_GAN (0x0334)
+#define IPIPE_CAR_GN1_SHF (0x0338)
+#define IPIPE_CAR_GN1_MIN (0x033c)
+#define IPIPE_CAR_GN2_GAN (0x0340)
+#define IPIPE_CAR_GN2_SHF (0x0344)
+#define IPIPE_CAR_GN2_MIN (0x0348)
+#define IPIPE_CGS_EN (0x034c)
+#define IPIPE_CGS_GN1_L_THR (0x0350)
+#define IPIPE_CGS_GN1_L_GAIN (0x0354)
+#define IPIPE_CGS_GN1_L_SHF (0x0358)
+#define IPIPE_CGS_GN1_L_MIN (0x035c)
+#define IPIPE_CGS_GN1_H_THR (0x0360)
+#define IPIPE_CGS_GN1_H_GAIN (0x0364)
+#define IPIPE_CGS_GN1_H_SHF (0x0368)
+#define IPIPE_CGS_GN1_H_MIN (0x036c)
+#define IPIPE_CGS_GN2_L_THR (0x0370)
+#define IPIPE_CGS_GN2_L_GAIN (0x0374)
+#define IPIPE_CGS_GN2_L_SHF (0x0378)
+#define IPIPE_CGS_GN2_L_MIN (0x037c)
+
+#define IPIPE_BOX_EN (0x0380)
+#define IPIPE_BOX_MODE (0x0384)
+#define IPIPE_BOX_TYP (0x0388)
+#define IPIPE_BOX_SHF (0x038c)
+#define IPIPE_BOX_SDR_SAD_H (0x0390)
+#define IPIPE_BOX_SDR_SAD_L (0x0394)
+
+#define IPIPE_HST_EN (0x039c)
+#define IPIPE_HST_MODE (0x03a0)
+#define IPIPE_HST_SEL (0x03a4)
+#define IPIPE_HST_PARA (0x03a8)
+#define IPIPE_HST_0_VPS (0x03ac)
+#define IPIPE_HST_0_VSZ (0x03b0)
+#define IPIPE_HST_0_HPS (0x03b4)
+#define IPIPE_HST_0_HSZ (0x03b8)
+#define IPIPE_HST_1_VPS (0x03bc)
+#define IPIPE_HST_1_VSZ (0x03c0)
+#define IPIPE_HST_1_HPS (0x03c4)
+#define IPIPE_HST_1_HSZ (0x03c8)
+#define IPIPE_HST_2_VPS (0x03cc)
+#define IPIPE_HST_2_VSZ (0x03d0)
+#define IPIPE_HST_2_HPS (0x03d4)
+#define IPIPE_HST_2_HSZ (0x03d8)
+#define IPIPE_HST_3_VPS (0x03dc)
+#define IPIPE_HST_3_VSZ (0x03e0)
+#define IPIPE_HST_3_HPS (0x03e4)
+#define IPIPE_HST_3_HSZ (0x03e8)
+#define IPIPE_HST_TBL (0x03ec)
+#define IPIPE_HST_MUL_R (0x03f0)
+#define IPIPE_HST_MUL_GR (0x03f4)
+#define IPIPE_HST_MUL_GB (0x03f8)
+#define IPIPE_HST_MUL_B (0x03fc)
+
+#define IPIPE_BSC_EN (0x0400)
+#define IPIPE_BSC_MODE (0x0404)
+#define IPIPE_BSC_TYP (0x0408)
+#define IPIPE_BSC_ROW_VCT (0x040c)
+#define IPIPE_BSC_ROW_SHF (0x0410)
+#define IPIPE_BSC_ROW_VPO (0x0414)
+#define IPIPE_BSC_ROW_VNU (0x0418)
+#define IPIPE_BSC_ROW_VSKIP (0x041c)
+#define IPIPE_BSC_ROW_HPO (0x0420)
+#define IPIPE_BSC_ROW_HNU (0x0424)
+#define IPIPE_BSC_ROW_HSKIP (0x0428)
+#define IPIPE_BSC_COL_VCT (0x042c)
+#define IPIPE_BSC_COL_SHF (0x0430)
+#define IPIPE_BSC_COL_VPO (0x0434)
+#define IPIPE_BSC_COL_VNU (0x0438)
+#define IPIPE_BSC_COL_VSKIP (0x043c)
+#define IPIPE_BSC_COL_HPO (0x0440)
+#define IPIPE_BSC_COL_HNU (0x0444)
+#define IPIPE_BSC_COL_HSKIP (0x0448)
+
+#define IPIPE_BSC_EN (0x0400)
+
+/* ISS ISP Resizer register offsets */
+#define RSZ_REVISION (0x0000)
+#define RSZ_SYSCONFIG (0x0004)
+#define RSZ_SYSCONFIG_RSZB_CLK_EN (1 << 9)
+#define RSZ_SYSCONFIG_RSZA_CLK_EN (1 << 8)
+
+#define RSZ_IN_FIFO_CTRL (0x000c)
+#define RSZ_IN_FIFO_CTRL_THRLD_LOW_MASK (0x1ff << 16)
+#define RSZ_IN_FIFO_CTRL_THRLD_LOW_SHIFT 16
+#define RSZ_IN_FIFO_CTRL_THRLD_HIGH_MASK (0x1ff << 0)
+#define RSZ_IN_FIFO_CTRL_THRLD_HIGH_SHIFT 0
+
+#define RSZ_FRACDIV (0x0008)
+#define RSZ_FRACDIV_MASK (0xffff)
+
+#define RSZ_SRC_EN (0x0020)
+#define RSZ_SRC_EN_SRC_EN (1 << 0)
+
+#define RSZ_SRC_MODE (0x0024)
+#define RSZ_SRC_MODE_OST (1 << 0)
+#define RSZ_SRC_MODE_WRT (1 << 1)
+
+#define RSZ_SRC_FMT0 (0x0028)
+#define RSZ_SRC_FMT0_BYPASS (1 << 1)
+#define RSZ_SRC_FMT0_SEL (1 << 0)
+
+#define RSZ_SRC_FMT1 (0x002c)
+#define RSZ_SRC_FMT1_IN420 (1 << 1)
+
+#define RSZ_SRC_VPS (0x0030)
+#define RSZ_SRC_VSZ (0x0034)
+#define RSZ_SRC_HPS (0x0038)
+#define RSZ_SRC_HSZ (0x003c)
+#define RSZ_DMA_RZA (0x0040)
+#define RSZ_DMA_RZB (0x0044)
+#define RSZ_DMA_STA (0x0048)
+#define RSZ_GCK_MMR (0x004c)
+#define RSZ_GCK_MMR_MMR (1 << 0)
+
+#define RSZ_GCK_SDR (0x0054)
+#define RSZ_GCK_SDR_CORE (1 << 0)
+
+#define RSZ_IRQ_RZA (0x0058)
+#define RSZ_IRQ_RZA_MASK (0x1fff)
+
+#define RSZ_IRQ_RZB (0x005c)
+#define RSZ_IRQ_RZB_MASK (0x1fff)
+
+#define RSZ_YUV_Y_MIN (0x0060)
+#define RSZ_YUV_Y_MAX (0x0064)
+#define RSZ_YUV_C_MIN (0x0068)
+#define RSZ_YUV_C_MAX (0x006c)
+
+#define RSZ_SEQ (0x0074)
+#define RSZ_SEQ_HRVB (1 << 2)
+#define RSZ_SEQ_HRVA (1 << 0)
+
+#define RZA_EN (0x0078)
+#define RZA_MODE (0x007c)
+#define RZA_MODE_ONE_SHOT (1 << 0)
+
+#define RZA_420 (0x0080)
+#define RZA_I_VPS (0x0084)
+#define RZA_I_HPS (0x0088)
+#define RZA_O_VSZ (0x008c)
+#define RZA_O_HSZ (0x0090)
+#define RZA_V_PHS_Y (0x0094)
+#define RZA_V_PHS_C (0x0098)
+#define RZA_V_DIF (0x009c)
+#define RZA_V_TYP (0x00a0)
+#define RZA_V_LPF (0x00a4)
+#define RZA_H_PHS (0x00a8)
+#define RZA_H_DIF (0x00b0)
+#define RZA_H_TYP (0x00b4)
+#define RZA_H_LPF (0x00b8)
+#define RZA_DWN_EN (0x00bc)
+#define RZA_SDR_Y_BAD_H (0x00d0)
+#define RZA_SDR_Y_BAD_L (0x00d4)
+#define RZA_SDR_Y_SAD_H (0x00d8)
+#define RZA_SDR_Y_SAD_L (0x00dc)
+#define RZA_SDR_Y_OFT (0x00e0)
+#define RZA_SDR_Y_PTR_S (0x00e4)
+#define RZA_SDR_Y_PTR_E (0x00e8)
+#define RZA_SDR_C_BAD_H (0x00ec)
+#define RZA_SDR_C_BAD_L (0x00f0)
+#define RZA_SDR_C_SAD_H (0x00f4)
+#define RZA_SDR_C_SAD_L (0x00f8)
+#define RZA_SDR_C_OFT (0x00fc)
+#define RZA_SDR_C_PTR_S (0x0100)
+#define RZA_SDR_C_PTR_E (0x0104)
+
+#define RZB_EN (0x0108)
+#define RZB_MODE (0x010c)
+#define RZB_420 (0x0110)
+#define RZB_I_VPS (0x0114)
+#define RZB_I_HPS (0x0118)
+#define RZB_O_VSZ (0x011c)
+#define RZB_O_HSZ (0x0120)
+
+#define RZB_V_DIF (0x012c)
+#define RZB_V_TYP (0x0130)
+#define RZB_V_LPF (0x0134)
+
+#define RZB_H_DIF (0x0140)
+#define RZB_H_TYP (0x0144)
+#define RZB_H_LPF (0x0148)
+
+#define RZB_SDR_Y_BAD_H (0x0160)
+#define RZB_SDR_Y_BAD_L (0x0164)
+#define RZB_SDR_Y_SAD_H (0x0168)
+#define RZB_SDR_Y_SAD_L (0x016c)
+#define RZB_SDR_Y_OFT (0x0170)
+#define RZB_SDR_Y_PTR_S (0x0174)
+#define RZB_SDR_Y_PTR_E (0x0178)
+#define RZB_SDR_C_BAD_H (0x017c)
+#define RZB_SDR_C_BAD_L (0x0180)
+#define RZB_SDR_C_SAD_H (0x0184)
+#define RZB_SDR_C_SAD_L (0x0188)
+
+#define RZB_SDR_C_PTR_S (0x0190)
+#define RZB_SDR_C_PTR_E (0x0194)
+
+/* Shared Bitmasks between RZA & RZB */
+#define RSZ_EN_EN (1 << 0)
+
+#define RSZ_420_CEN (1 << 1)
+#define RSZ_420_YEN (1 << 0)
+
+#define RSZ_I_VPS_MASK (0x1fff)
+
+#define RSZ_I_HPS_MASK (0x1fff)
+
+#define RSZ_O_VSZ_MASK (0x1fff)
+
+#define RSZ_O_HSZ_MASK (0x1ffe)
+
+#define RSZ_V_PHS_Y_MASK (0x3fff)
+
+#define RSZ_V_PHS_C_MASK (0x3fff)
+
+#define RSZ_V_DIF_MASK (0x3fff)
+
+#define RSZ_V_TYP_C (1 << 1)
+#define RSZ_V_TYP_Y (1 << 0)
+
+#define RSZ_V_LPF_C_MASK (0x3f << 6)
+#define RSZ_V_LPF_C_SHIFT 6
+#define RSZ_V_LPF_Y_MASK (0x3f << 0)
+#define RSZ_V_LPF_Y_SHIFT 0
+
+#define RSZ_H_PHS_MASK (0x3fff)
+
+#define RSZ_H_DIF_MASK (0x3fff)
+
+#define RSZ_H_TYP_C (1 << 1)
+#define RSZ_H_TYP_Y (1 << 0)
+
+#define RSZ_H_LPF_C_MASK (0x3f << 6)
+#define RSZ_H_LPF_C_SHIFT 6
+#define RSZ_H_LPF_Y_MASK (0x3f << 0)
+#define RSZ_H_LPF_Y_SHIFT 0
+
+#define RSZ_DWN_EN_DWN_EN (1 << 0)
+
+#endif /* _OMAP4_ISS_REGS_H_ */
diff --git a/drivers/staging/media/omap4iss/iss_resizer.c b/drivers/staging/media/omap4iss/iss_resizer.c
new file mode 100644
index 000000000000..ae831b8985c9
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_resizer.c
@@ -0,0 +1,893 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - ISP RESIZER module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#include "iss.h"
+#include "iss_regs.h"
+#include "iss_resizer.h"
+
+static const unsigned int resizer_fmts[] = {
+ V4L2_MBUS_FMT_UYVY8_1X16,
+ V4L2_MBUS_FMT_YUYV8_1X16,
+};
+
+/*
+ * resizer_print_status - Print current RESIZER Module register values.
+ * @resizer: Pointer to ISS ISP RESIZER device.
+ *
+ * Also prints other debug information stored in the RESIZER module.
+ */
+#define RSZ_PRINT_REGISTER(iss, name)\
+ dev_dbg(iss->dev, "###RSZ " #name "=0x%08x\n", \
+ iss_reg_read(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_##name))
+
+#define RZA_PRINT_REGISTER(iss, name)\
+ dev_dbg(iss->dev, "###RZA " #name "=0x%08x\n", \
+ iss_reg_read(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_##name))
+
+static void resizer_print_status(struct iss_resizer_device *resizer)
+{
+ struct iss_device *iss = to_iss_device(resizer);
+
+ dev_dbg(iss->dev, "-------------RESIZER Register dump-------------\n");
+
+ RSZ_PRINT_REGISTER(iss, SYSCONFIG);
+ RSZ_PRINT_REGISTER(iss, IN_FIFO_CTRL);
+ RSZ_PRINT_REGISTER(iss, FRACDIV);
+ RSZ_PRINT_REGISTER(iss, SRC_EN);
+ RSZ_PRINT_REGISTER(iss, SRC_MODE);
+ RSZ_PRINT_REGISTER(iss, SRC_FMT0);
+ RSZ_PRINT_REGISTER(iss, SRC_FMT1);
+ RSZ_PRINT_REGISTER(iss, SRC_VPS);
+ RSZ_PRINT_REGISTER(iss, SRC_VSZ);
+ RSZ_PRINT_REGISTER(iss, SRC_HPS);
+ RSZ_PRINT_REGISTER(iss, SRC_HSZ);
+ RSZ_PRINT_REGISTER(iss, DMA_RZA);
+ RSZ_PRINT_REGISTER(iss, DMA_RZB);
+ RSZ_PRINT_REGISTER(iss, DMA_STA);
+ RSZ_PRINT_REGISTER(iss, GCK_MMR);
+ RSZ_PRINT_REGISTER(iss, GCK_SDR);
+ RSZ_PRINT_REGISTER(iss, IRQ_RZA);
+ RSZ_PRINT_REGISTER(iss, IRQ_RZB);
+ RSZ_PRINT_REGISTER(iss, YUV_Y_MIN);
+ RSZ_PRINT_REGISTER(iss, YUV_Y_MAX);
+ RSZ_PRINT_REGISTER(iss, YUV_C_MIN);
+ RSZ_PRINT_REGISTER(iss, YUV_C_MAX);
+ RSZ_PRINT_REGISTER(iss, SEQ);
+
+ RZA_PRINT_REGISTER(iss, EN);
+ RZA_PRINT_REGISTER(iss, MODE);
+ RZA_PRINT_REGISTER(iss, 420);
+ RZA_PRINT_REGISTER(iss, I_VPS);
+ RZA_PRINT_REGISTER(iss, I_HPS);
+ RZA_PRINT_REGISTER(iss, O_VSZ);
+ RZA_PRINT_REGISTER(iss, O_HSZ);
+ RZA_PRINT_REGISTER(iss, V_PHS_Y);
+ RZA_PRINT_REGISTER(iss, V_PHS_C);
+ RZA_PRINT_REGISTER(iss, V_DIF);
+ RZA_PRINT_REGISTER(iss, V_TYP);
+ RZA_PRINT_REGISTER(iss, V_LPF);
+ RZA_PRINT_REGISTER(iss, H_PHS);
+ RZA_PRINT_REGISTER(iss, H_DIF);
+ RZA_PRINT_REGISTER(iss, H_TYP);
+ RZA_PRINT_REGISTER(iss, H_LPF);
+ RZA_PRINT_REGISTER(iss, DWN_EN);
+ RZA_PRINT_REGISTER(iss, SDR_Y_BAD_H);
+ RZA_PRINT_REGISTER(iss, SDR_Y_BAD_L);
+ RZA_PRINT_REGISTER(iss, SDR_Y_SAD_H);
+ RZA_PRINT_REGISTER(iss, SDR_Y_SAD_L);
+ RZA_PRINT_REGISTER(iss, SDR_Y_OFT);
+ RZA_PRINT_REGISTER(iss, SDR_Y_PTR_S);
+ RZA_PRINT_REGISTER(iss, SDR_Y_PTR_E);
+ RZA_PRINT_REGISTER(iss, SDR_C_BAD_H);
+ RZA_PRINT_REGISTER(iss, SDR_C_BAD_L);
+ RZA_PRINT_REGISTER(iss, SDR_C_SAD_H);
+ RZA_PRINT_REGISTER(iss, SDR_C_SAD_L);
+ RZA_PRINT_REGISTER(iss, SDR_C_OFT);
+ RZA_PRINT_REGISTER(iss, SDR_C_PTR_S);
+ RZA_PRINT_REGISTER(iss, SDR_C_PTR_E);
+
+ dev_dbg(iss->dev, "-----------------------------------------------\n");
+}
+
+/*
+ * resizer_enable - Enable/Disable RESIZER.
+ * @enable: enable flag
+ *
+ */
+static void resizer_enable(struct iss_resizer_device *resizer, u8 enable)
+{
+ struct iss_device *iss = to_iss_device(resizer);
+
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_EN,
+ RSZ_SRC_EN_SRC_EN, enable ? RSZ_SRC_EN_SRC_EN : 0);
+
+ /* TODO: Enable RSZB */
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_EN, RSZ_EN_EN,
+ enable ? RSZ_EN_EN : 0);
+}
+
+/* -----------------------------------------------------------------------------
+ * Format- and pipeline-related configuration helpers
+ */
+
+/*
+ * resizer_set_outaddr - Set memory address to save output image
+ * @resizer: Pointer to ISP RESIZER device.
+ * @addr: 32-bit memory address aligned on 32 byte boundary.
+ *
+ * Sets the memory address where the output will be saved.
+ */
+static void resizer_set_outaddr(struct iss_resizer_device *resizer, u32 addr)
+{
+ struct iss_device *iss = to_iss_device(resizer);
+ struct v4l2_mbus_framefmt *informat, *outformat;
+
+ informat = &resizer->formats[RESIZER_PAD_SINK];
+ outformat = &resizer->formats[RESIZER_PAD_SOURCE_MEM];
+
+ /* Save address splitted in Base Address H & L */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_BAD_H,
+ (addr >> 16) & 0xffff);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_BAD_L,
+ addr & 0xffff);
+
+ /* SAD = BAD */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_SAD_H,
+ (addr >> 16) & 0xffff);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_SAD_L,
+ addr & 0xffff);
+
+ /* Program UV buffer address... Hardcoded to be contiguous! */
+ if ((informat->code == V4L2_MBUS_FMT_UYVY8_1X16) &&
+ (outformat->code == V4L2_MBUS_FMT_YUYV8_1_5X8)) {
+ u32 c_addr = addr + (resizer->video_out.bpl_value *
+ (outformat->height - 1));
+
+ /* Ensure Y_BAD_L[6:0] = C_BAD_L[6:0]*/
+ if ((c_addr ^ addr) & 0x7f) {
+ c_addr &= ~0x7f;
+ c_addr += 0x80;
+ c_addr |= addr & 0x7f;
+ }
+
+ /* Save address splitted in Base Address H & L */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_BAD_H,
+ (c_addr >> 16) & 0xffff);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_BAD_L,
+ c_addr & 0xffff);
+
+ /* SAD = BAD */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_SAD_H,
+ (c_addr >> 16) & 0xffff);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_SAD_L,
+ c_addr & 0xffff);
+ }
+}
+
+static void resizer_configure(struct iss_resizer_device *resizer)
+{
+ struct iss_device *iss = to_iss_device(resizer);
+ struct v4l2_mbus_framefmt *informat, *outformat;
+
+ informat = &resizer->formats[RESIZER_PAD_SINK];
+ outformat = &resizer->formats[RESIZER_PAD_SOURCE_MEM];
+
+ /* Disable pass-through more. Despite its name, the BYPASS bit controls
+ * pass-through mode, not bypass mode.
+ */
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_FMT0,
+ RSZ_SRC_FMT0_BYPASS);
+
+ /* Select RSZ input */
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_FMT0,
+ RSZ_SRC_FMT0_SEL,
+ resizer->input == RESIZER_INPUT_IPIPEIF ?
+ RSZ_SRC_FMT0_SEL : 0);
+
+ /* RSZ ignores WEN signal from IPIPE/IPIPEIF */
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_MODE,
+ RSZ_SRC_MODE_WRT);
+
+ /* Set Resizer in free-running mode */
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_MODE,
+ RSZ_SRC_MODE_OST);
+
+ /* Init Resizer A */
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_MODE,
+ RZA_MODE_ONE_SHOT);
+
+ /* Set size related things now */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_VPS, 0);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_HPS, 0);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_VSZ,
+ informat->height - 2);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_HSZ,
+ informat->width - 1);
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_I_VPS, 0);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_I_HPS, 0);
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_O_VSZ,
+ outformat->height - 2);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_O_HSZ,
+ outformat->width - 1);
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_V_DIF, 0x100);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_H_DIF, 0x100);
+
+ /* Buffer output settings */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_PTR_S, 0);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_PTR_E,
+ outformat->height - 1);
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_OFT,
+ resizer->video_out.bpl_value);
+
+ /* UYVY -> NV12 conversion */
+ if ((informat->code == V4L2_MBUS_FMT_UYVY8_1X16) &&
+ (outformat->code == V4L2_MBUS_FMT_YUYV8_1_5X8)) {
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_420,
+ RSZ_420_CEN | RSZ_420_YEN);
+
+ /* UV Buffer output settings */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_PTR_S,
+ 0);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_PTR_E,
+ outformat->height - 1);
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_OFT,
+ resizer->video_out.bpl_value);
+ } else {
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_420, 0);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+static void resizer_isr_buffer(struct iss_resizer_device *resizer)
+{
+ struct iss_buffer *buffer;
+
+ /* The whole resizer needs to be stopped. Disabling RZA only produces
+ * input FIFO overflows, most probably when the next frame is received.
+ */
+ resizer_enable(resizer, 0);
+
+ buffer = omap4iss_video_buffer_next(&resizer->video_out);
+ if (buffer == NULL)
+ return;
+
+ resizer_set_outaddr(resizer, buffer->iss_addr);
+
+ resizer_enable(resizer, 1);
+}
+
+/*
+ * resizer_isif0_isr - Handle ISIF0 event
+ * @resizer: Pointer to ISP RESIZER device.
+ *
+ * Executes LSC deferred enablement before next frame starts.
+ */
+static void resizer_int_dma_isr(struct iss_resizer_device *resizer)
+{
+ struct iss_pipeline *pipe =
+ to_iss_pipeline(&resizer->subdev.entity);
+ if (pipe->do_propagation)
+ atomic_inc(&pipe->frame_number);
+
+ resizer_isr_buffer(resizer);
+}
+
+/*
+ * omap4iss_resizer_isr - Configure resizer during interframe time.
+ * @resizer: Pointer to ISP RESIZER device.
+ * @events: RESIZER events
+ */
+void omap4iss_resizer_isr(struct iss_resizer_device *resizer, u32 events)
+{
+ struct iss_device *iss = to_iss_device(resizer);
+ struct iss_pipeline *pipe =
+ to_iss_pipeline(&resizer->subdev.entity);
+
+ if (events & (ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR |
+ ISP5_IRQ_RSZ_FIFO_OVF)) {
+ dev_dbg(iss->dev, "RSZ Err: FIFO_IN_BLK:%d, FIFO_OVF:%d\n",
+ events & ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR ? 1 : 0,
+ events & ISP5_IRQ_RSZ_FIFO_OVF ? 1 : 0);
+ omap4iss_pipeline_cancel_stream(pipe);
+ }
+
+ if (omap4iss_module_sync_is_stopping(&resizer->wait,
+ &resizer->stopping))
+ return;
+
+ if (events & ISP5_IRQ_RSZ_INT_DMA)
+ resizer_int_dma_isr(resizer);
+}
+
+/* -----------------------------------------------------------------------------
+ * ISS video operations
+ */
+
+static int resizer_video_queue(struct iss_video *video,
+ struct iss_buffer *buffer)
+{
+ struct iss_resizer_device *resizer = container_of(video,
+ struct iss_resizer_device, video_out);
+
+ if (!(resizer->output & RESIZER_OUTPUT_MEMORY))
+ return -ENODEV;
+
+ resizer_set_outaddr(resizer, buffer->iss_addr);
+
+ /*
+ * If streaming was enabled before there was a buffer queued
+ * or underrun happened in the ISR, the hardware was not enabled
+ * and DMA queue flag ISS_VIDEO_DMAQUEUE_UNDERRUN is still set.
+ * Enable it now.
+ */
+ if (video->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
+ resizer_enable(resizer, 1);
+ iss_video_dmaqueue_flags_clr(video);
+ }
+
+ return 0;
+}
+
+static const struct iss_video_operations resizer_video_ops = {
+ .queue = resizer_video_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+/*
+ * resizer_set_stream - Enable/Disable streaming on the RESIZER module
+ * @sd: ISP RESIZER V4L2 subdevice
+ * @enable: Enable/disable stream
+ */
+static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct iss_device *iss = to_iss_device(resizer);
+ struct iss_video *video_out = &resizer->video_out;
+ int ret = 0;
+
+ if (resizer->state == ISS_PIPELINE_STREAM_STOPPED) {
+ if (enable == ISS_PIPELINE_STREAM_STOPPED)
+ return 0;
+
+ omap4iss_isp_subclk_enable(iss, OMAP4_ISS_ISP_SUBCLK_RSZ);
+
+ iss_reg_set(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_MMR,
+ RSZ_GCK_MMR_MMR);
+ iss_reg_set(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_SDR,
+ RSZ_GCK_SDR_CORE);
+
+ /* FIXME: Enable RSZB also */
+ iss_reg_set(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SYSCONFIG,
+ RSZ_SYSCONFIG_RSZA_CLK_EN);
+ }
+
+ switch (enable) {
+ case ISS_PIPELINE_STREAM_CONTINUOUS:
+
+ resizer_configure(resizer);
+ resizer_print_status(resizer);
+
+ /*
+ * When outputting to memory with no buffer available, let the
+ * buffer queue handler start the hardware. A DMA queue flag
+ * ISS_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is
+ * a buffer available.
+ */
+ if (resizer->output & RESIZER_OUTPUT_MEMORY &&
+ !(video_out->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_QUEUED))
+ break;
+
+ atomic_set(&resizer->stopping, 0);
+ resizer_enable(resizer, 1);
+ iss_video_dmaqueue_flags_clr(video_out);
+ break;
+
+ case ISS_PIPELINE_STREAM_STOPPED:
+ if (resizer->state == ISS_PIPELINE_STREAM_STOPPED)
+ return 0;
+ if (omap4iss_module_sync_idle(&sd->entity, &resizer->wait,
+ &resizer->stopping))
+ ret = -ETIMEDOUT;
+
+ resizer_enable(resizer, 0);
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SYSCONFIG,
+ RSZ_SYSCONFIG_RSZA_CLK_EN);
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_SDR,
+ RSZ_GCK_SDR_CORE);
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_MMR,
+ RSZ_GCK_MMR_MMR);
+ omap4iss_isp_subclk_disable(iss, OMAP4_ISS_ISP_SUBCLK_RSZ);
+ iss_video_dmaqueue_flags_clr(video_out);
+ break;
+ }
+
+ resizer->state = enable;
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__resizer_get_format(struct iss_resizer_device *resizer,
+ struct v4l2_subdev_fh *fh, unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ else
+ return &resizer->formats[pad];
+}
+
+/*
+ * resizer_try_format - Try video format on a pad
+ * @resizer: ISS RESIZER device
+ * @fh : V4L2 subdev file handle
+ * @pad: Pad number
+ * @fmt: Format
+ */
+static void
+resizer_try_format(struct iss_resizer_device *resizer,
+ struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ enum v4l2_mbus_pixelcode pixelcode;
+ struct v4l2_mbus_framefmt *format;
+ unsigned int width = fmt->width;
+ unsigned int height = fmt->height;
+ unsigned int i;
+
+ switch (pad) {
+ case RESIZER_PAD_SINK:
+ for (i = 0; i < ARRAY_SIZE(resizer_fmts); i++) {
+ if (fmt->code == resizer_fmts[i])
+ break;
+ }
+
+ /* If not found, use UYVY as default */
+ if (i >= ARRAY_SIZE(resizer_fmts))
+ fmt->code = V4L2_MBUS_FMT_UYVY8_1X16;
+
+ /* Clamp the input size. */
+ fmt->width = clamp_t(u32, width, 1, 8192);
+ fmt->height = clamp_t(u32, height, 1, 8192);
+ break;
+
+ case RESIZER_PAD_SOURCE_MEM:
+ pixelcode = fmt->code;
+ format = __resizer_get_format(resizer, fh, RESIZER_PAD_SINK,
+ which);
+ memcpy(fmt, format, sizeof(*fmt));
+
+ if ((pixelcode == V4L2_MBUS_FMT_YUYV8_1_5X8) &&
+ (fmt->code == V4L2_MBUS_FMT_UYVY8_1X16))
+ fmt->code = pixelcode;
+
+ /* The data formatter truncates the number of horizontal output
+ * pixels to a multiple of 16. To avoid clipping data, allow
+ * callers to request an output size bigger than the input size
+ * up to the nearest multiple of 16.
+ */
+ fmt->width = clamp_t(u32, width, 32, (fmt->width + 15) & ~15);
+ fmt->width &= ~15;
+ fmt->height = clamp_t(u32, height, 32, fmt->height);
+ break;
+
+ }
+
+ fmt->colorspace = V4L2_COLORSPACE_JPEG;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * resizer_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ switch (code->pad) {
+ case RESIZER_PAD_SINK:
+ if (code->index >= ARRAY_SIZE(resizer_fmts))
+ return -EINVAL;
+
+ code->code = resizer_fmts[code->index];
+ break;
+
+ case RESIZER_PAD_SOURCE_MEM:
+ format = __resizer_get_format(resizer, fh, RESIZER_PAD_SINK,
+ V4L2_SUBDEV_FORMAT_TRY);
+
+ if (code->index == 0) {
+ code->code = format->code;
+ break;
+ }
+
+ switch (format->code) {
+ case V4L2_MBUS_FMT_UYVY8_1X16:
+ if (code->index == 1)
+ code->code = V4L2_MBUS_FMT_YUYV8_1_5X8;
+ else
+ return -EINVAL;
+ break;
+ default:
+ if (code->index != 0)
+ return -EINVAL;
+ }
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int resizer_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ resizer_try_format(resizer, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ resizer_try_format(resizer, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * resizer_get_format - Retrieve the video format on a pad
+ * @sd : ISP RESIZER V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __resizer_get_format(resizer, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * resizer_set_format - Set the video format on a pad
+ * @sd : ISP RESIZER V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __resizer_get_format(resizer, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ resizer_try_format(resizer, fh, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ /* Propagate the format from sink to source */
+ if (fmt->pad == RESIZER_PAD_SINK) {
+ format = __resizer_get_format(resizer, fh,
+ RESIZER_PAD_SOURCE_MEM,
+ fmt->which);
+ *format = fmt->format;
+ resizer_try_format(resizer, fh, RESIZER_PAD_SOURCE_MEM, format,
+ fmt->which);
+ }
+
+ return 0;
+}
+
+static int resizer_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ /* Check if the two ends match */
+ if (source_fmt->format.width != sink_fmt->format.width ||
+ source_fmt->format.height != sink_fmt->format.height)
+ return -EPIPE;
+
+ if (source_fmt->format.code != sink_fmt->format.code)
+ return -EPIPE;
+
+ return 0;
+}
+
+/*
+ * resizer_init_formats - Initialize formats on all pads
+ * @sd: ISP RESIZER V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int resizer_init_formats(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = RESIZER_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_UYVY8_1X16;
+ format.format.width = 4096;
+ format.format.height = 4096;
+ resizer_set_format(sd, fh, &format);
+
+ return 0;
+}
+
+/* V4L2 subdev video operations */
+static const struct v4l2_subdev_video_ops resizer_v4l2_video_ops = {
+ .s_stream = resizer_set_stream,
+};
+
+/* V4L2 subdev pad operations */
+static const struct v4l2_subdev_pad_ops resizer_v4l2_pad_ops = {
+ .enum_mbus_code = resizer_enum_mbus_code,
+ .enum_frame_size = resizer_enum_frame_size,
+ .get_fmt = resizer_get_format,
+ .set_fmt = resizer_set_format,
+ .link_validate = resizer_link_validate,
+};
+
+/* V4L2 subdev operations */
+static const struct v4l2_subdev_ops resizer_v4l2_ops = {
+ .video = &resizer_v4l2_video_ops,
+ .pad = &resizer_v4l2_pad_ops,
+};
+
+/* V4L2 subdev internal operations */
+static const struct v4l2_subdev_internal_ops resizer_v4l2_internal_ops = {
+ .open = resizer_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * resizer_link_setup - Setup RESIZER connections
+ * @entity: RESIZER media entity
+ * @local: Pad at the local end of the link
+ * @remote: Pad at the remote end of the link
+ * @flags: Link flags
+ *
+ * return -EINVAL or zero on success
+ */
+static int resizer_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct iss_device *iss = to_iss_device(resizer);
+
+ switch (local->index | media_entity_type(remote->entity)) {
+ case RESIZER_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* Read from IPIPE or IPIPEIF. */
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ resizer->input = RESIZER_INPUT_NONE;
+ break;
+ }
+
+ if (resizer->input != RESIZER_INPUT_NONE)
+ return -EBUSY;
+
+ if (remote->entity == &iss->ipipeif.subdev.entity)
+ resizer->input = RESIZER_INPUT_IPIPEIF;
+ else if (remote->entity == &iss->ipipe.subdev.entity)
+ resizer->input = RESIZER_INPUT_IPIPE;
+
+
+ break;
+
+ case RESIZER_PAD_SOURCE_MEM | MEDIA_ENT_T_DEVNODE:
+ /* Write to memory */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (resizer->output & ~RESIZER_OUTPUT_MEMORY)
+ return -EBUSY;
+ resizer->output |= RESIZER_OUTPUT_MEMORY;
+ } else {
+ resizer->output &= ~RESIZER_OUTPUT_MEMORY;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations resizer_media_ops = {
+ .link_setup = resizer_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * resizer_init_entities - Initialize V4L2 subdev and media entity
+ * @resizer: ISS ISP RESIZER module
+ *
+ * Return 0 on success and a negative error code on failure.
+ */
+static int resizer_init_entities(struct iss_resizer_device *resizer)
+{
+ struct v4l2_subdev *sd = &resizer->subdev;
+ struct media_pad *pads = resizer->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ resizer->input = RESIZER_INPUT_NONE;
+
+ v4l2_subdev_init(sd, &resizer_v4l2_ops);
+ sd->internal_ops = &resizer_v4l2_internal_ops;
+ strlcpy(sd->name, "OMAP4 ISS ISP resizer", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for iss subdevs */
+ v4l2_set_subdevdata(sd, resizer);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[RESIZER_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[RESIZER_PAD_SOURCE_MEM].flags = MEDIA_PAD_FL_SOURCE;
+
+ me->ops = &resizer_media_ops;
+ ret = media_entity_init(me, RESIZER_PADS_NUM, pads, 0);
+ if (ret < 0)
+ return ret;
+
+ resizer_init_formats(sd, NULL);
+
+ resizer->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ resizer->video_out.ops = &resizer_video_ops;
+ resizer->video_out.iss = to_iss_device(resizer);
+ resizer->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
+ resizer->video_out.bpl_alignment = 32;
+ resizer->video_out.bpl_zero_padding = 1;
+ resizer->video_out.bpl_max = 0x1ffe0;
+
+ ret = omap4iss_video_init(&resizer->video_out, "ISP resizer a");
+ if (ret < 0)
+ return ret;
+
+ /* Connect the RESIZER subdev to the video node. */
+ ret = media_entity_create_link(&resizer->subdev.entity,
+ RESIZER_PAD_SOURCE_MEM,
+ &resizer->video_out.video.entity, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+void omap4iss_resizer_unregister_entities(struct iss_resizer_device *resizer)
+{
+ media_entity_cleanup(&resizer->subdev.entity);
+
+ v4l2_device_unregister_subdev(&resizer->subdev);
+ omap4iss_video_unregister(&resizer->video_out);
+}
+
+int omap4iss_resizer_register_entities(struct iss_resizer_device *resizer,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video node. */
+ ret = v4l2_device_register_subdev(vdev, &resizer->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap4iss_video_register(&resizer->video_out, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap4iss_resizer_unregister_entities(resizer);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP RESIZER initialisation and cleanup
+ */
+
+/*
+ * omap4iss_resizer_init - RESIZER module initialization.
+ * @iss: Device pointer specific to the OMAP4 ISS.
+ *
+ * TODO: Get the initialisation values from platform data.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+int omap4iss_resizer_init(struct iss_device *iss)
+{
+ struct iss_resizer_device *resizer = &iss->resizer;
+
+ resizer->state = ISS_PIPELINE_STREAM_STOPPED;
+ init_waitqueue_head(&resizer->wait);
+
+ return resizer_init_entities(resizer);
+}
+
+/*
+ * omap4iss_resizer_cleanup - RESIZER module cleanup.
+ * @iss: Device pointer specific to the OMAP4 ISS.
+ */
+void omap4iss_resizer_cleanup(struct iss_device *iss)
+{
+ /* FIXME: are you sure there's nothing to do? */
+}
diff --git a/drivers/staging/media/omap4iss/iss_resizer.h b/drivers/staging/media/omap4iss/iss_resizer.h
new file mode 100644
index 000000000000..3727498b06a3
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_resizer.h
@@ -0,0 +1,75 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - ISP RESIZER module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef OMAP4_ISS_RESIZER_H
+#define OMAP4_ISS_RESIZER_H
+
+#include "iss_video.h"
+
+enum resizer_input_entity {
+ RESIZER_INPUT_NONE,
+ RESIZER_INPUT_IPIPE,
+ RESIZER_INPUT_IPIPEIF
+};
+
+#define RESIZER_OUTPUT_MEMORY (1 << 0)
+
+/* Sink and source RESIZER pads */
+#define RESIZER_PAD_SINK 0
+#define RESIZER_PAD_SOURCE_MEM 1
+#define RESIZER_PADS_NUM 2
+
+/*
+ * struct iss_resizer_device - Structure for the RESIZER module to store its own
+ * information
+ * @subdev: V4L2 subdevice
+ * @pads: Sink and source media entity pads
+ * @formats: Active video formats
+ * @input: Active input
+ * @output: Active outputs
+ * @video_out: Output video node
+ * @error: A hardware error occurred during capture
+ * @state: Streaming state
+ * @wait: Wait queue used to stop the module
+ * @stopping: Stopping state
+ */
+struct iss_resizer_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[RESIZER_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[RESIZER_PADS_NUM];
+
+ enum resizer_input_entity input;
+ unsigned int output;
+ struct iss_video video_out;
+ unsigned int error;
+
+ enum iss_pipeline_stream_state state;
+ wait_queue_head_t wait;
+ atomic_t stopping;
+};
+
+struct iss_device;
+
+int omap4iss_resizer_init(struct iss_device *iss);
+void omap4iss_resizer_cleanup(struct iss_device *iss);
+int omap4iss_resizer_register_entities(struct iss_resizer_device *resizer,
+ struct v4l2_device *vdev);
+void omap4iss_resizer_unregister_entities(struct iss_resizer_device *resizer);
+
+int omap4iss_resizer_busy(struct iss_resizer_device *resizer);
+void omap4iss_resizer_isr(struct iss_resizer_device *resizer, u32 events);
+void omap4iss_resizer_restore_context(struct iss_device *iss);
+void omap4iss_resizer_max_rate(struct iss_resizer_device *resizer,
+ unsigned int *max_rate);
+
+#endif /* OMAP4_ISS_RESIZER_H */
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
new file mode 100644
index 000000000000..8c7f35029cd5
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -0,0 +1,1226 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - Generic video node
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/clk.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+
+#include "iss_video.h"
+#include "iss.h"
+
+
+/* -----------------------------------------------------------------------------
+ * Helper functions
+ */
+
+static struct iss_format_info formats[] = {
+ { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_PIX_FMT_GREY, 8, "Greyscale 8 bpp", },
+ { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
+ V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_PIX_FMT_Y10, 10, "Greyscale 10 bpp", },
+ { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
+ V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_PIX_FMT_Y12, 12, "Greyscale 12 bpp", },
+ { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_PIX_FMT_SBGGR8, 8, "BGGR Bayer 8 bpp", },
+ { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
+ V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
+ V4L2_PIX_FMT_SGBRG8, 8, "GBRG Bayer 8 bpp", },
+ { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
+ V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
+ V4L2_PIX_FMT_SGRBG8, 8, "GRBG Bayer 8 bpp", },
+ { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
+ V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
+ V4L2_PIX_FMT_SRGGB8, 8, "RGGB Bayer 8 bpp", },
+ { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SGRBG10_1X10, 0,
+ V4L2_PIX_FMT_SGRBG10DPCM8, 8, "GRBG Bayer 10 bpp DPCM8", },
+ { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
+ V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_PIX_FMT_SBGGR10, 10, "BGGR Bayer 10 bpp", },
+ { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
+ V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
+ V4L2_PIX_FMT_SGBRG10, 10, "GBRG Bayer 10 bpp", },
+ { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
+ V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
+ V4L2_PIX_FMT_SGRBG10, 10, "GRBG Bayer 10 bpp", },
+ { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
+ V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
+ V4L2_PIX_FMT_SRGGB10, 10, "RGGB Bayer 10 bpp", },
+ { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
+ V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_PIX_FMT_SBGGR12, 12, "BGGR Bayer 12 bpp", },
+ { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
+ V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
+ V4L2_PIX_FMT_SGBRG12, 12, "GBRG Bayer 12 bpp", },
+ { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
+ V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
+ V4L2_PIX_FMT_SGRBG12, 12, "GRBG Bayer 12 bpp", },
+ { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
+ V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
+ V4L2_PIX_FMT_SRGGB12, 12, "RGGB Bayer 12 bpp", },
+ { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
+ V4L2_MBUS_FMT_UYVY8_1X16, 0,
+ V4L2_PIX_FMT_UYVY, 16, "YUV 4:2:2 (UYVY)", },
+ { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
+ V4L2_MBUS_FMT_YUYV8_1X16, 0,
+ V4L2_PIX_FMT_YUYV, 16, "YUV 4:2:2 (YUYV)", },
+ { V4L2_MBUS_FMT_YUYV8_1_5X8, V4L2_MBUS_FMT_YUYV8_1_5X8,
+ V4L2_MBUS_FMT_YUYV8_1_5X8, 0,
+ V4L2_PIX_FMT_NV12, 8, "YUV 4:2:0 (NV12)", },
+};
+
+const struct iss_format_info *
+omap4iss_video_format_info(enum v4l2_mbus_pixelcode code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (formats[i].code == code)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
+/*
+ * iss_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
+ * @video: ISS video instance
+ * @mbus: v4l2_mbus_framefmt format (input)
+ * @pix: v4l2_pix_format format (output)
+ *
+ * Fill the output pix structure with information from the input mbus format.
+ * The bytesperline and sizeimage fields are computed from the requested bytes
+ * per line value in the pix format and information from the video instance.
+ *
+ * Return the number of padding bytes at end of line.
+ */
+static unsigned int iss_video_mbus_to_pix(const struct iss_video *video,
+ const struct v4l2_mbus_framefmt *mbus,
+ struct v4l2_pix_format *pix)
+{
+ unsigned int bpl = pix->bytesperline;
+ unsigned int min_bpl;
+ unsigned int i;
+
+ memset(pix, 0, sizeof(*pix));
+ pix->width = mbus->width;
+ pix->height = mbus->height;
+
+ /* Skip the last format in the loop so that it will be selected if no
+ * match is found.
+ */
+ for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
+ if (formats[i].code == mbus->code)
+ break;
+ }
+
+ min_bpl = pix->width * ALIGN(formats[i].bpp, 8) / 8;
+
+ /* Clamp the requested bytes per line value. If the maximum bytes per
+ * line value is zero, the module doesn't support user configurable line
+ * sizes. Override the requested value with the minimum in that case.
+ */
+ if (video->bpl_max)
+ bpl = clamp(bpl, min_bpl, video->bpl_max);
+ else
+ bpl = min_bpl;
+
+ if (!video->bpl_zero_padding || bpl != min_bpl)
+ bpl = ALIGN(bpl, video->bpl_alignment);
+
+ pix->pixelformat = formats[i].pixelformat;
+ pix->bytesperline = bpl;
+ pix->sizeimage = pix->bytesperline * pix->height;
+ pix->colorspace = mbus->colorspace;
+ pix->field = mbus->field;
+
+ /* FIXME: Special case for NV12! We should make this nicer... */
+ if (pix->pixelformat == V4L2_PIX_FMT_NV12)
+ pix->sizeimage += (pix->bytesperline * pix->height) / 2;
+
+ return bpl - min_bpl;
+}
+
+static void iss_video_pix_to_mbus(const struct v4l2_pix_format *pix,
+ struct v4l2_mbus_framefmt *mbus)
+{
+ unsigned int i;
+
+ memset(mbus, 0, sizeof(*mbus));
+ mbus->width = pix->width;
+ mbus->height = pix->height;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (formats[i].pixelformat == pix->pixelformat)
+ break;
+ }
+
+ if (WARN_ON(i == ARRAY_SIZE(formats)))
+ return;
+
+ mbus->code = formats[i].code;
+ mbus->colorspace = pix->colorspace;
+ mbus->field = pix->field;
+}
+
+static struct v4l2_subdev *
+iss_video_remote_subdev(struct iss_video *video, u32 *pad)
+{
+ struct media_pad *remote;
+
+ remote = media_entity_remote_pad(&video->pad);
+
+ if (remote == NULL ||
+ media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ return NULL;
+
+ if (pad)
+ *pad = remote->index;
+
+ return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+/* Return a pointer to the ISS video instance at the far end of the pipeline. */
+static struct iss_video *
+iss_video_far_end(struct iss_video *video)
+{
+ struct media_entity_graph graph;
+ struct media_entity *entity = &video->video.entity;
+ struct media_device *mdev = entity->parent;
+ struct iss_video *far_end = NULL;
+
+ mutex_lock(&mdev->graph_mutex);
+ media_entity_graph_walk_start(&graph, entity);
+
+ while ((entity = media_entity_graph_walk_next(&graph))) {
+ if (entity == &video->video.entity)
+ continue;
+
+ if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
+ continue;
+
+ far_end = to_iss_video(media_entity_to_video_device(entity));
+ if (far_end->type != video->type)
+ break;
+
+ far_end = NULL;
+ }
+
+ mutex_unlock(&mdev->graph_mutex);
+ return far_end;
+}
+
+static int
+__iss_video_get_format(struct iss_video *video,
+ struct v4l2_mbus_framefmt *format)
+{
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ u32 pad;
+ int ret;
+
+ subdev = iss_video_remote_subdev(video, &pad);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ memset(&fmt, 0, sizeof(fmt));
+ fmt.pad = pad;
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+
+ mutex_lock(&video->mutex);
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ mutex_unlock(&video->mutex);
+
+ if (ret)
+ return ret;
+
+ *format = fmt.format;
+ return 0;
+}
+
+static int
+iss_video_check_format(struct iss_video *video, struct iss_video_fh *vfh)
+{
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_pix_format pixfmt;
+ int ret;
+
+ ret = __iss_video_get_format(video, &format);
+ if (ret < 0)
+ return ret;
+
+ pixfmt.bytesperline = 0;
+ ret = iss_video_mbus_to_pix(video, &format, &pixfmt);
+
+ if (vfh->format.fmt.pix.pixelformat != pixfmt.pixelformat ||
+ vfh->format.fmt.pix.height != pixfmt.height ||
+ vfh->format.fmt.pix.width != pixfmt.width ||
+ vfh->format.fmt.pix.bytesperline != pixfmt.bytesperline ||
+ vfh->format.fmt.pix.sizeimage != pixfmt.sizeimage)
+ return -EINVAL;
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Video queue operations
+ */
+
+static int iss_video_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *count, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct iss_video_fh *vfh = vb2_get_drv_priv(vq);
+ struct iss_video *video = vfh->video;
+
+ /* Revisit multi-planar support for NV12 */
+ *num_planes = 1;
+
+ sizes[0] = vfh->format.fmt.pix.sizeimage;
+ if (sizes[0] == 0)
+ return -EINVAL;
+
+ alloc_ctxs[0] = video->alloc_ctx;
+
+ *count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0]));
+
+ return 0;
+}
+
+static void iss_video_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb);
+
+ if (buffer->iss_addr)
+ buffer->iss_addr = 0;
+}
+
+static int iss_video_buf_prepare(struct vb2_buffer *vb)
+{
+ struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue);
+ struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb);
+ struct iss_video *video = vfh->video;
+ unsigned long size = vfh->format.fmt.pix.sizeimage;
+ dma_addr_t addr;
+
+ if (vb2_plane_size(vb, 0) < size)
+ return -ENOBUFS;
+
+ /* Refuse to prepare the buffer is the video node has registered an
+ * error. We don't need to take any lock here as the operation is
+ * inherently racy. The authoritative check will be performed in the
+ * queue handler, which can't return an error, this check is just a best
+ * effort to notify userspace as early as possible.
+ */
+ if (unlikely(video->error))
+ return -EIO;
+
+ addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ if (!IS_ALIGNED(addr, 32)) {
+ dev_dbg(video->iss->dev,
+ "Buffer address must be aligned to 32 bytes boundary.\n");
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+ buffer->iss_addr = addr;
+ return 0;
+}
+
+static void iss_video_buf_queue(struct vb2_buffer *vb)
+{
+ struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue);
+ struct iss_video *video = vfh->video;
+ struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb);
+ struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
+ unsigned long flags;
+ bool empty;
+
+ spin_lock_irqsave(&video->qlock, flags);
+
+ if (unlikely(video->error)) {
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&video->qlock, flags);
+ return;
+ }
+
+ empty = list_empty(&video->dmaqueue);
+ list_add_tail(&buffer->list, &video->dmaqueue);
+
+ spin_unlock_irqrestore(&video->qlock, flags);
+
+ if (empty) {
+ enum iss_pipeline_state state;
+ unsigned int start;
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ state = ISS_PIPELINE_QUEUE_OUTPUT;
+ else
+ state = ISS_PIPELINE_QUEUE_INPUT;
+
+ spin_lock_irqsave(&pipe->lock, flags);
+ pipe->state |= state;
+ video->ops->queue(video, buffer);
+ video->dmaqueue_flags |= ISS_VIDEO_DMAQUEUE_QUEUED;
+
+ start = iss_pipeline_ready(pipe);
+ if (start)
+ pipe->state |= ISS_PIPELINE_STREAM;
+ spin_unlock_irqrestore(&pipe->lock, flags);
+
+ if (start)
+ omap4iss_pipeline_set_stream(pipe,
+ ISS_PIPELINE_STREAM_SINGLESHOT);
+ }
+}
+
+static struct vb2_ops iss_video_vb2ops = {
+ .queue_setup = iss_video_queue_setup,
+ .buf_prepare = iss_video_buf_prepare,
+ .buf_queue = iss_video_buf_queue,
+ .buf_cleanup = iss_video_buf_cleanup,
+};
+
+/*
+ * omap4iss_video_buffer_next - Complete the current buffer and return the next
+ * @video: ISS video object
+ *
+ * Remove the current video buffer from the DMA queue and fill its timestamp,
+ * field count and state fields before waking up its completion handler.
+ *
+ * For capture video nodes, the buffer state is set to VB2_BUF_STATE_DONE if no
+ * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise.
+ *
+ * The DMA queue is expected to contain at least one buffer.
+ *
+ * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
+ * empty.
+ */
+struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video)
+{
+ struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
+ enum iss_pipeline_state state;
+ struct iss_buffer *buf;
+ unsigned long flags;
+ struct timespec ts;
+
+ spin_lock_irqsave(&video->qlock, flags);
+ if (WARN_ON(list_empty(&video->dmaqueue))) {
+ spin_unlock_irqrestore(&video->qlock, flags);
+ return NULL;
+ }
+
+ buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
+ list);
+ list_del(&buf->list);
+ spin_unlock_irqrestore(&video->qlock, flags);
+
+ ktime_get_ts(&ts);
+ buf->vb.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
+ buf->vb.v4l2_buf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+
+ /* Do frame number propagation only if this is the output video node.
+ * Frame number either comes from the CSI receivers or it gets
+ * incremented here if H3A is not active.
+ * Note: There is no guarantee that the output buffer will finish
+ * first, so the input number might lag behind by 1 in some cases.
+ */
+ if (video == pipe->output && !pipe->do_propagation)
+ buf->vb.v4l2_buf.sequence =
+ atomic_inc_return(&pipe->frame_number);
+ else
+ buf->vb.v4l2_buf.sequence = atomic_read(&pipe->frame_number);
+
+ vb2_buffer_done(&buf->vb, pipe->error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ pipe->error = false;
+
+ spin_lock_irqsave(&video->qlock, flags);
+ if (list_empty(&video->dmaqueue)) {
+ spin_unlock_irqrestore(&video->qlock, flags);
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ state = ISS_PIPELINE_QUEUE_OUTPUT
+ | ISS_PIPELINE_STREAM;
+ else
+ state = ISS_PIPELINE_QUEUE_INPUT
+ | ISS_PIPELINE_STREAM;
+
+ spin_lock_irqsave(&pipe->lock, flags);
+ pipe->state &= ~state;
+ if (video->pipe.stream_state == ISS_PIPELINE_STREAM_CONTINUOUS)
+ video->dmaqueue_flags |= ISS_VIDEO_DMAQUEUE_UNDERRUN;
+ spin_unlock_irqrestore(&pipe->lock, flags);
+ return NULL;
+ }
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
+ spin_lock(&pipe->lock);
+ pipe->state &= ~ISS_PIPELINE_STREAM;
+ spin_unlock(&pipe->lock);
+ }
+
+ buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
+ list);
+ spin_unlock_irqrestore(&video->qlock, flags);
+ buf->vb.state = VB2_BUF_STATE_ACTIVE;
+ return buf;
+}
+
+/*
+ * omap4iss_video_cancel_stream - Cancel stream on a video node
+ * @video: ISS video object
+ *
+ * Cancelling a stream mark all buffers on the video node as erroneous and makes
+ * sure no new buffer can be queued.
+ */
+void omap4iss_video_cancel_stream(struct iss_video *video)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&video->qlock, flags);
+
+ while (!list_empty(&video->dmaqueue)) {
+ struct iss_buffer *buf;
+
+ buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
+ list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ video->error = true;
+
+ spin_unlock_irqrestore(&video->qlock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int
+iss_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ struct iss_video *video = video_drvdata(file);
+
+ strlcpy(cap->driver, ISS_VIDEO_DRIVER_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, video->video.name, sizeof(cap->card));
+ strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ else
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
+ | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT;
+
+ return 0;
+}
+
+static int
+iss_video_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct iss_video *video = video_drvdata(file);
+ struct v4l2_mbus_framefmt format;
+ unsigned int index = f->index;
+ unsigned int i;
+ int ret;
+
+ if (f->type != video->type)
+ return -EINVAL;
+
+ ret = __iss_video_get_format(video, &format);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ const struct iss_format_info *info = &formats[i];
+
+ if (format.code != info->code)
+ continue;
+
+ if (index == 0) {
+ f->pixelformat = info->pixelformat;
+ strlcpy(f->description, info->description,
+ sizeof(f->description));
+ return 0;
+ }
+
+ index--;
+ }
+
+ return -EINVAL;
+}
+
+static int
+iss_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+ struct iss_video *video = video_drvdata(file);
+
+ if (format->type != video->type)
+ return -EINVAL;
+
+ mutex_lock(&video->mutex);
+ *format = vfh->format;
+ mutex_unlock(&video->mutex);
+
+ return 0;
+}
+
+static int
+iss_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+ struct iss_video *video = video_drvdata(file);
+ struct v4l2_mbus_framefmt fmt;
+
+ if (format->type != video->type)
+ return -EINVAL;
+
+ mutex_lock(&video->mutex);
+
+ /* Fill the bytesperline and sizeimage fields by converting to media bus
+ * format and back to pixel format.
+ */
+ iss_video_pix_to_mbus(&format->fmt.pix, &fmt);
+ iss_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
+
+ vfh->format = *format;
+
+ mutex_unlock(&video->mutex);
+ return 0;
+}
+
+static int
+iss_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct iss_video *video = video_drvdata(file);
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ u32 pad;
+ int ret;
+
+ if (format->type != video->type)
+ return -EINVAL;
+
+ subdev = iss_video_remote_subdev(video, &pad);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ iss_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
+
+ fmt.pad = pad;
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret)
+ return ret;
+
+ iss_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
+ return 0;
+}
+
+static int
+iss_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
+{
+ struct iss_video *video = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = iss_video_remote_subdev(video, NULL);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ mutex_lock(&video->mutex);
+ ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
+ mutex_unlock(&video->mutex);
+
+ return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
+}
+
+static int
+iss_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+{
+ struct iss_video *video = video_drvdata(file);
+ struct v4l2_subdev_format format;
+ struct v4l2_subdev *subdev;
+ u32 pad;
+ int ret;
+
+ subdev = iss_video_remote_subdev(video, &pad);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ /* Try the get crop operation first and fallback to get format if not
+ * implemented.
+ */
+ ret = v4l2_subdev_call(subdev, video, g_crop, crop);
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ format.pad = pad;
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
+ if (ret < 0)
+ return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
+
+ crop->c.left = 0;
+ crop->c.top = 0;
+ crop->c.width = format.format.width;
+ crop->c.height = format.format.height;
+
+ return 0;
+}
+
+static int
+iss_video_set_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
+{
+ struct iss_video *video = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = iss_video_remote_subdev(video, NULL);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ mutex_lock(&video->mutex);
+ ret = v4l2_subdev_call(subdev, video, s_crop, crop);
+ mutex_unlock(&video->mutex);
+
+ return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
+}
+
+static int
+iss_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+ struct iss_video *video = video_drvdata(file);
+
+ if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ video->type != a->type)
+ return -EINVAL;
+
+ memset(a, 0, sizeof(*a));
+ a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ a->parm.output.timeperframe = vfh->timeperframe;
+
+ return 0;
+}
+
+static int
+iss_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+ struct iss_video *video = video_drvdata(file);
+
+ if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ video->type != a->type)
+ return -EINVAL;
+
+ if (a->parm.output.timeperframe.denominator == 0)
+ a->parm.output.timeperframe.denominator = 1;
+
+ vfh->timeperframe = a->parm.output.timeperframe;
+
+ return 0;
+}
+
+static int
+iss_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+
+ return vb2_reqbufs(&vfh->queue, rb);
+}
+
+static int
+iss_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+
+ return vb2_querybuf(&vfh->queue, b);
+}
+
+static int
+iss_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+
+ return vb2_qbuf(&vfh->queue, b);
+}
+
+static int
+iss_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+
+ return vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK);
+}
+
+/*
+ * Stream management
+ *
+ * Every ISS pipeline has a single input and a single output. The input can be
+ * either a sensor or a video node. The output is always a video node.
+ *
+ * As every pipeline has an output video node, the ISS video objects at the
+ * pipeline output stores the pipeline state. It tracks the streaming state of
+ * both the input and output, as well as the availability of buffers.
+ *
+ * In sensor-to-memory mode, frames are always available at the pipeline input.
+ * Starting the sensor usually requires I2C transfers and must be done in
+ * interruptible context. The pipeline is started and stopped synchronously
+ * to the stream on/off commands. All modules in the pipeline will get their
+ * subdev set stream handler called. The module at the end of the pipeline must
+ * delay starting the hardware until buffers are available at its output.
+ *
+ * In memory-to-memory mode, starting/stopping the stream requires
+ * synchronization between the input and output. ISS modules can't be stopped
+ * in the middle of a frame, and at least some of the modules seem to become
+ * busy as soon as they're started, even if they don't receive a frame start
+ * event. For that reason frames need to be processed in single-shot mode. The
+ * driver needs to wait until a frame is completely processed and written to
+ * memory before restarting the pipeline for the next frame. Pipelined
+ * processing might be possible but requires more testing.
+ *
+ * Stream start must be delayed until buffers are available at both the input
+ * and output. The pipeline must be started in the videobuf queue callback with
+ * the buffers queue spinlock held. The modules subdev set stream operation must
+ * not sleep.
+ */
+static int
+iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+ struct iss_video *video = video_drvdata(file);
+ struct media_entity_graph graph;
+ struct media_entity *entity;
+ enum iss_pipeline_state state;
+ struct iss_pipeline *pipe;
+ struct iss_video *far_end;
+ unsigned long flags;
+ int ret;
+
+ if (type != video->type)
+ return -EINVAL;
+
+ mutex_lock(&video->stream_lock);
+
+ /* Start streaming on the pipeline. No link touching an entity in the
+ * pipeline can be activated or deactivated once streaming is started.
+ */
+ pipe = video->video.entity.pipe
+ ? to_iss_pipeline(&video->video.entity) : &video->pipe;
+ pipe->external = NULL;
+ pipe->external_rate = 0;
+ pipe->external_bpp = 0;
+ pipe->entities = 0;
+
+ if (video->iss->pdata->set_constraints)
+ video->iss->pdata->set_constraints(video->iss, true);
+
+ ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
+ if (ret < 0)
+ goto err_media_entity_pipeline_start;
+
+ entity = &video->video.entity;
+ media_entity_graph_walk_start(&graph, entity);
+ while ((entity = media_entity_graph_walk_next(&graph)))
+ pipe->entities |= 1 << entity->id;
+
+ /* Verify that the currently configured format matches the output of
+ * the connected subdev.
+ */
+ ret = iss_video_check_format(video, vfh);
+ if (ret < 0)
+ goto err_iss_video_check_format;
+
+ video->bpl_padding = ret;
+ video->bpl_value = vfh->format.fmt.pix.bytesperline;
+
+ /* Find the ISS video node connected at the far end of the pipeline and
+ * update the pipeline.
+ */
+ far_end = iss_video_far_end(video);
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ state = ISS_PIPELINE_STREAM_OUTPUT | ISS_PIPELINE_IDLE_OUTPUT;
+ pipe->input = far_end;
+ pipe->output = video;
+ } else {
+ if (far_end == NULL) {
+ ret = -EPIPE;
+ goto err_iss_video_check_format;
+ }
+
+ state = ISS_PIPELINE_STREAM_INPUT | ISS_PIPELINE_IDLE_INPUT;
+ pipe->input = video;
+ pipe->output = far_end;
+ }
+
+ spin_lock_irqsave(&pipe->lock, flags);
+ pipe->state &= ~ISS_PIPELINE_STREAM;
+ pipe->state |= state;
+ spin_unlock_irqrestore(&pipe->lock, flags);
+
+ /* Set the maximum time per frame as the value requested by userspace.
+ * This is a soft limit that can be overridden if the hardware doesn't
+ * support the request limit.
+ */
+ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ pipe->max_timeperframe = vfh->timeperframe;
+
+ video->queue = &vfh->queue;
+ INIT_LIST_HEAD(&video->dmaqueue);
+ spin_lock_init(&video->qlock);
+ video->error = false;
+ atomic_set(&pipe->frame_number, -1);
+
+ ret = vb2_streamon(&vfh->queue, type);
+ if (ret < 0)
+ goto err_iss_video_check_format;
+
+ /* In sensor-to-memory mode, the stream can be started synchronously
+ * to the stream on command. In memory-to-memory mode, it will be
+ * started when buffers are queued on both the input and output.
+ */
+ if (pipe->input == NULL) {
+ unsigned long flags;
+ ret = omap4iss_pipeline_set_stream(pipe,
+ ISS_PIPELINE_STREAM_CONTINUOUS);
+ if (ret < 0)
+ goto err_omap4iss_set_stream;
+ spin_lock_irqsave(&video->qlock, flags);
+ if (list_empty(&video->dmaqueue))
+ video->dmaqueue_flags |= ISS_VIDEO_DMAQUEUE_UNDERRUN;
+ spin_unlock_irqrestore(&video->qlock, flags);
+ }
+
+ mutex_unlock(&video->stream_lock);
+ return 0;
+
+err_omap4iss_set_stream:
+ vb2_streamoff(&vfh->queue, type);
+err_iss_video_check_format:
+ media_entity_pipeline_stop(&video->video.entity);
+err_media_entity_pipeline_start:
+ if (video->iss->pdata->set_constraints)
+ video->iss->pdata->set_constraints(video->iss, false);
+ video->queue = NULL;
+
+ mutex_unlock(&video->stream_lock);
+ return ret;
+}
+
+static int
+iss_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+ struct iss_video *video = video_drvdata(file);
+ struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
+ enum iss_pipeline_state state;
+ unsigned long flags;
+
+ if (type != video->type)
+ return -EINVAL;
+
+ mutex_lock(&video->stream_lock);
+
+ if (!vb2_is_streaming(&vfh->queue))
+ goto done;
+
+ /* Update the pipeline state. */
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ state = ISS_PIPELINE_STREAM_OUTPUT
+ | ISS_PIPELINE_QUEUE_OUTPUT;
+ else
+ state = ISS_PIPELINE_STREAM_INPUT
+ | ISS_PIPELINE_QUEUE_INPUT;
+
+ spin_lock_irqsave(&pipe->lock, flags);
+ pipe->state &= ~state;
+ spin_unlock_irqrestore(&pipe->lock, flags);
+
+ /* Stop the stream. */
+ omap4iss_pipeline_set_stream(pipe, ISS_PIPELINE_STREAM_STOPPED);
+ vb2_streamoff(&vfh->queue, type);
+ video->queue = NULL;
+
+ if (video->iss->pdata->set_constraints)
+ video->iss->pdata->set_constraints(video->iss, false);
+ media_entity_pipeline_stop(&video->video.entity);
+
+done:
+ mutex_unlock(&video->stream_lock);
+ return 0;
+}
+
+static int
+iss_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
+{
+ if (input->index > 0)
+ return -EINVAL;
+
+ strlcpy(input->name, "camera", sizeof(input->name));
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+
+ return 0;
+}
+
+static int
+iss_video_g_input(struct file *file, void *fh, unsigned int *input)
+{
+ *input = 0;
+
+ return 0;
+}
+
+static int
+iss_video_s_input(struct file *file, void *fh, unsigned int input)
+{
+ return input == 0 ? 0 : -EINVAL;
+}
+
+static const struct v4l2_ioctl_ops iss_video_ioctl_ops = {
+ .vidioc_querycap = iss_video_querycap,
+ .vidioc_enum_fmt_vid_cap = iss_video_enum_format,
+ .vidioc_g_fmt_vid_cap = iss_video_get_format,
+ .vidioc_s_fmt_vid_cap = iss_video_set_format,
+ .vidioc_try_fmt_vid_cap = iss_video_try_format,
+ .vidioc_g_fmt_vid_out = iss_video_get_format,
+ .vidioc_s_fmt_vid_out = iss_video_set_format,
+ .vidioc_try_fmt_vid_out = iss_video_try_format,
+ .vidioc_cropcap = iss_video_cropcap,
+ .vidioc_g_crop = iss_video_get_crop,
+ .vidioc_s_crop = iss_video_set_crop,
+ .vidioc_g_parm = iss_video_get_param,
+ .vidioc_s_parm = iss_video_set_param,
+ .vidioc_reqbufs = iss_video_reqbufs,
+ .vidioc_querybuf = iss_video_querybuf,
+ .vidioc_qbuf = iss_video_qbuf,
+ .vidioc_dqbuf = iss_video_dqbuf,
+ .vidioc_streamon = iss_video_streamon,
+ .vidioc_streamoff = iss_video_streamoff,
+ .vidioc_enum_input = iss_video_enum_input,
+ .vidioc_g_input = iss_video_g_input,
+ .vidioc_s_input = iss_video_s_input,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 file operations
+ */
+
+static int iss_video_open(struct file *file)
+{
+ struct iss_video *video = video_drvdata(file);
+ struct iss_video_fh *handle;
+ struct vb2_queue *q;
+ int ret = 0;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (handle == NULL)
+ return -ENOMEM;
+
+ v4l2_fh_init(&handle->vfh, &video->video);
+ v4l2_fh_add(&handle->vfh);
+
+ /* If this is the first user, initialise the pipeline. */
+ if (omap4iss_get(video->iss) == NULL) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ ret = omap4iss_pipeline_pm_use(&video->video.entity, 1);
+ if (ret < 0) {
+ omap4iss_put(video->iss);
+ goto done;
+ }
+
+ video->alloc_ctx = vb2_dma_contig_init_ctx(video->iss->dev);
+ if (IS_ERR(video->alloc_ctx)) {
+ ret = PTR_ERR(video->alloc_ctx);
+ omap4iss_put(video->iss);
+ goto done;
+ }
+
+ q = &handle->queue;
+
+ q->type = video->type;
+ q->io_modes = VB2_MMAP;
+ q->drv_priv = handle;
+ q->ops = &iss_video_vb2ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct iss_buffer);
+ q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
+ ret = vb2_queue_init(q);
+ if (ret) {
+ omap4iss_put(video->iss);
+ goto done;
+ }
+
+ memset(&handle->format, 0, sizeof(handle->format));
+ handle->format.type = video->type;
+ handle->timeperframe.denominator = 1;
+
+ handle->video = video;
+ file->private_data = &handle->vfh;
+
+done:
+ if (ret < 0) {
+ v4l2_fh_del(&handle->vfh);
+ kfree(handle);
+ }
+
+ return ret;
+}
+
+static int iss_video_release(struct file *file)
+{
+ struct iss_video *video = video_drvdata(file);
+ struct v4l2_fh *vfh = file->private_data;
+ struct iss_video_fh *handle = to_iss_video_fh(vfh);
+
+ /* Disable streaming and free the buffers queue resources. */
+ iss_video_streamoff(file, vfh, video->type);
+
+ omap4iss_pipeline_pm_use(&video->video.entity, 0);
+
+ /* Release the videobuf2 queue */
+ vb2_queue_release(&handle->queue);
+
+ /* Release the file handle. */
+ v4l2_fh_del(vfh);
+ kfree(handle);
+ file->private_data = NULL;
+
+ omap4iss_put(video->iss);
+
+ return 0;
+}
+
+static unsigned int iss_video_poll(struct file *file, poll_table *wait)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(file->private_data);
+
+ return vb2_poll(&vfh->queue, file, wait);
+}
+
+static int iss_video_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(file->private_data);
+
+ return vb2_mmap(&vfh->queue, vma);
+}
+
+static struct v4l2_file_operations iss_video_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = iss_video_open,
+ .release = iss_video_release,
+ .poll = iss_video_poll,
+ .mmap = iss_video_mmap,
+};
+
+/* -----------------------------------------------------------------------------
+ * ISS video core
+ */
+
+static const struct iss_video_operations iss_video_dummy_ops = {
+};
+
+int omap4iss_video_init(struct iss_video *video, const char *name)
+{
+ const char *direction;
+ int ret;
+
+ switch (video->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ direction = "output";
+ video->pad.flags = MEDIA_PAD_FL_SINK;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ direction = "input";
+ video->pad.flags = MEDIA_PAD_FL_SOURCE;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
+ if (ret < 0)
+ return ret;
+
+ mutex_init(&video->mutex);
+ atomic_set(&video->active, 0);
+
+ spin_lock_init(&video->pipe.lock);
+ mutex_init(&video->stream_lock);
+
+ /* Initialize the video device. */
+ if (video->ops == NULL)
+ video->ops = &iss_video_dummy_ops;
+
+ video->video.fops = &iss_video_fops;
+ snprintf(video->video.name, sizeof(video->video.name),
+ "OMAP4 ISS %s %s", name, direction);
+ video->video.vfl_type = VFL_TYPE_GRABBER;
+ video->video.release = video_device_release_empty;
+ video->video.ioctl_ops = &iss_video_ioctl_ops;
+ video->pipe.stream_state = ISS_PIPELINE_STREAM_STOPPED;
+
+ video_set_drvdata(&video->video, video);
+
+ return 0;
+}
+
+void omap4iss_video_cleanup(struct iss_video *video)
+{
+ media_entity_cleanup(&video->video.entity);
+ mutex_destroy(&video->stream_lock);
+ mutex_destroy(&video->mutex);
+}
+
+int omap4iss_video_register(struct iss_video *video, struct v4l2_device *vdev)
+{
+ int ret;
+
+ video->video.v4l2_dev = vdev;
+
+ ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
+ if (ret < 0)
+ dev_err(video->iss->dev,
+ "%s: could not register video device (%d)\n",
+ __func__, ret);
+
+ return ret;
+}
+
+void omap4iss_video_unregister(struct iss_video *video)
+{
+ video_unregister_device(&video->video);
+}
diff --git a/drivers/staging/media/omap4iss/iss_video.h b/drivers/staging/media/omap4iss/iss_video.h
new file mode 100644
index 000000000000..878e4a3082e7
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_video.h
@@ -0,0 +1,204 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - Generic video node
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef OMAP4_ISS_VIDEO_H
+#define OMAP4_ISS_VIDEO_H
+
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define ISS_VIDEO_DRIVER_NAME "issvideo"
+#define ISS_VIDEO_DRIVER_VERSION "0.0.2"
+
+struct iss_device;
+struct iss_video;
+struct v4l2_mbus_framefmt;
+struct v4l2_pix_format;
+
+/*
+ * struct iss_format_info - ISS media bus format information
+ * @code: V4L2 media bus format code
+ * @truncated: V4L2 media bus format code for the same format truncated to 10
+ * bits. Identical to @code if the format is 10 bits wide or less.
+ * @uncompressed: V4L2 media bus format code for the corresponding uncompressed
+ * format. Identical to @code if the format is not DPCM compressed.
+ * @flavor: V4L2 media bus format code for the same pixel layout but
+ * shifted to be 8 bits per pixel. =0 if format is not shiftable.
+ * @pixelformat: V4L2 pixel format FCC identifier
+ * @bpp: Bits per pixel
+ * @description: Human-readable format description
+ */
+struct iss_format_info {
+ enum v4l2_mbus_pixelcode code;
+ enum v4l2_mbus_pixelcode truncated;
+ enum v4l2_mbus_pixelcode uncompressed;
+ enum v4l2_mbus_pixelcode flavor;
+ u32 pixelformat;
+ unsigned int bpp;
+ const char *description;
+};
+
+enum iss_pipeline_stream_state {
+ ISS_PIPELINE_STREAM_STOPPED = 0,
+ ISS_PIPELINE_STREAM_CONTINUOUS = 1,
+ ISS_PIPELINE_STREAM_SINGLESHOT = 2,
+};
+
+enum iss_pipeline_state {
+ /* The stream has been started on the input video node. */
+ ISS_PIPELINE_STREAM_INPUT = 1,
+ /* The stream has been started on the output video node. */
+ ISS_PIPELINE_STREAM_OUTPUT = (1 << 1),
+ /* At least one buffer is queued on the input video node. */
+ ISS_PIPELINE_QUEUE_INPUT = (1 << 2),
+ /* At least one buffer is queued on the output video node. */
+ ISS_PIPELINE_QUEUE_OUTPUT = (1 << 3),
+ /* The input entity is idle, ready to be started. */
+ ISS_PIPELINE_IDLE_INPUT = (1 << 4),
+ /* The output entity is idle, ready to be started. */
+ ISS_PIPELINE_IDLE_OUTPUT = (1 << 5),
+ /* The pipeline is currently streaming. */
+ ISS_PIPELINE_STREAM = (1 << 6),
+};
+
+/*
+ * struct iss_pipeline - An OMAP4 ISS hardware pipeline
+ * @entities: Bitmask of entities in the pipeline (indexed by entity ID)
+ * @error: A hardware error occurred during capture
+ */
+struct iss_pipeline {
+ struct media_pipeline pipe;
+ spinlock_t lock; /* Pipeline state and queue flags */
+ unsigned int state;
+ enum iss_pipeline_stream_state stream_state;
+ struct iss_video *input;
+ struct iss_video *output;
+ unsigned int entities;
+ atomic_t frame_number;
+ bool do_propagation; /* of frame number */
+ bool error;
+ struct v4l2_fract max_timeperframe;
+ struct v4l2_subdev *external;
+ unsigned int external_rate;
+ int external_bpp;
+};
+
+#define to_iss_pipeline(__e) \
+ container_of((__e)->pipe, struct iss_pipeline, pipe)
+
+static inline int iss_pipeline_ready(struct iss_pipeline *pipe)
+{
+ return pipe->state == (ISS_PIPELINE_STREAM_INPUT |
+ ISS_PIPELINE_STREAM_OUTPUT |
+ ISS_PIPELINE_QUEUE_INPUT |
+ ISS_PIPELINE_QUEUE_OUTPUT |
+ ISS_PIPELINE_IDLE_INPUT |
+ ISS_PIPELINE_IDLE_OUTPUT);
+}
+
+/*
+ * struct iss_buffer - ISS buffer
+ * @buffer: ISS video buffer
+ * @iss_addr: Physical address of the buffer.
+ */
+struct iss_buffer {
+ /* common v4l buffer stuff -- must be first */
+ struct vb2_buffer vb;
+ struct list_head list;
+ dma_addr_t iss_addr;
+};
+
+#define to_iss_buffer(buf) container_of(buf, struct iss_buffer, buffer)
+
+enum iss_video_dmaqueue_flags {
+ /* Set if DMA queue becomes empty when ISS_PIPELINE_STREAM_CONTINUOUS */
+ ISS_VIDEO_DMAQUEUE_UNDERRUN = (1 << 0),
+ /* Set when queuing buffer to an empty DMA queue */
+ ISS_VIDEO_DMAQUEUE_QUEUED = (1 << 1),
+};
+
+#define iss_video_dmaqueue_flags_clr(video) \
+ ({ (video)->dmaqueue_flags = 0; })
+
+/*
+ * struct iss_video_operations - ISS video operations
+ * @queue: Resume streaming when a buffer is queued. Called on VIDIOC_QBUF
+ * if there was no buffer previously queued.
+ */
+struct iss_video_operations {
+ int(*queue)(struct iss_video *video, struct iss_buffer *buffer);
+};
+
+struct iss_video {
+ struct video_device video;
+ enum v4l2_buf_type type;
+ struct media_pad pad;
+
+ struct mutex mutex; /* format and crop settings */
+ atomic_t active;
+
+ struct iss_device *iss;
+
+ unsigned int capture_mem;
+ unsigned int bpl_alignment; /* alignment value */
+ unsigned int bpl_zero_padding; /* whether the alignment is optional */
+ unsigned int bpl_max; /* maximum bytes per line value */
+ unsigned int bpl_value; /* bytes per line value */
+ unsigned int bpl_padding; /* padding at end of line */
+
+ /* Pipeline state */
+ struct iss_pipeline pipe;
+ struct mutex stream_lock; /* pipeline and stream states */
+ bool error;
+
+ /* Video buffers queue */
+ struct vb2_queue *queue;
+ spinlock_t qlock; /* protects dmaqueue and error */
+ struct list_head dmaqueue;
+ enum iss_video_dmaqueue_flags dmaqueue_flags;
+ struct vb2_alloc_ctx *alloc_ctx;
+
+ const struct iss_video_operations *ops;
+};
+
+#define to_iss_video(vdev) container_of(vdev, struct iss_video, video)
+
+struct iss_video_fh {
+ struct v4l2_fh vfh;
+ struct iss_video *video;
+ struct vb2_queue queue;
+ struct v4l2_format format;
+ struct v4l2_fract timeperframe;
+};
+
+#define to_iss_video_fh(fh) container_of(fh, struct iss_video_fh, vfh)
+#define iss_video_queue_to_iss_video_fh(q) \
+ container_of(q, struct iss_video_fh, queue)
+
+int omap4iss_video_init(struct iss_video *video, const char *name);
+void omap4iss_video_cleanup(struct iss_video *video);
+int omap4iss_video_register(struct iss_video *video,
+ struct v4l2_device *vdev);
+void omap4iss_video_unregister(struct iss_video *video);
+struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video);
+void omap4iss_video_cancel_stream(struct iss_video *video);
+struct media_pad *omap4iss_video_remote_pad(struct iss_video *video);
+
+const struct iss_format_info *
+omap4iss_video_format_info(enum v4l2_mbus_pixelcode code);
+
+#endif /* OMAP4_ISS_VIDEO_H */
diff --git a/drivers/media/usb/sn9c102/Kconfig b/drivers/staging/media/sn9c102/Kconfig
index 6ebaf2940d06..c9aba59258d9 100644
--- a/drivers/media/usb/sn9c102/Kconfig
+++ b/drivers/staging/media/sn9c102/Kconfig
@@ -1,14 +1,17 @@
config USB_SN9C102
tristate "USB SN9C1xx PC Camera Controller support (DEPRECATED)"
- depends on VIDEO_V4L2
+ depends on VIDEO_V4L2 && MEDIA_USB_SUPPORT
---help---
- This driver is DEPRECATED please use the gspca sonixb and
+ This driver is DEPRECATED, please use the gspca sonixb and
sonixj modules instead.
Say Y here if you want support for cameras based on SONiX SN9C101,
SN9C102, SN9C103, SN9C105 and SN9C120 PC Camera Controllers.
- See <file:Documentation/video4linux/sn9c102.txt> for more info.
+ See <file:drivers/staging/media/sn9c102/sn9c102.txt> for more info.
+
+ If you have webcams that are only supported by this driver and not by
+ the gspca driver, then contact the linux-media mailinglist.
To compile this driver as a module, choose M here: the
module will be called sn9c102.
diff --git a/drivers/media/usb/sn9c102/Makefile b/drivers/staging/media/sn9c102/Makefile
index 7ecd5a90c7c9..7ecd5a90c7c9 100644
--- a/drivers/media/usb/sn9c102/Makefile
+++ b/drivers/staging/media/sn9c102/Makefile
diff --git a/drivers/media/usb/sn9c102/sn9c102.h b/drivers/staging/media/sn9c102/sn9c102.h
index 8a917f060503..8a917f060503 100644
--- a/drivers/media/usb/sn9c102/sn9c102.h
+++ b/drivers/staging/media/sn9c102/sn9c102.h
diff --git a/drivers/staging/media/sn9c102/sn9c102.txt b/drivers/staging/media/sn9c102/sn9c102.txt
new file mode 100644
index 000000000000..b4f67040403a
--- /dev/null
+++ b/drivers/staging/media/sn9c102/sn9c102.txt
@@ -0,0 +1,592 @@
+
+ SN9C1xx PC Camera Controllers
+ Driver for Linux
+ =============================
+
+ - Documentation -
+
+
+Index
+=====
+1. Copyright
+2. Disclaimer
+3. License
+4. Overview and features
+5. Module dependencies
+6. Module loading
+7. Module parameters
+8. Optional device control through "sysfs"
+9. Supported devices
+10. Notes for V4L2 application developers
+11. Video frame formats
+12. Contact information
+13. Credits
+
+
+1. Copyright
+============
+Copyright (C) 2004-2007 by Luca Risolia <luca.risolia@studio.unibo.it>
+
+
+2. Disclaimer
+=============
+SONiX is a trademark of SONiX Technology Company Limited, inc.
+This software is not sponsored or developed by SONiX.
+
+
+3. License
+==========
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+4. Overview and features
+========================
+This driver attempts to support the video interface of the devices assembling
+the SONiX SN9C101, SN9C102, SN9C103, SN9C105 and SN9C120 PC Camera Controllers
+("SN9C1xx" from now on).
+
+The driver relies on the Video4Linux2 and USB core modules. It has been
+designed to run properly on SMP systems as well.
+
+The latest version of the SN9C1xx driver can be found at the following URL:
+http://www.linux-projects.org/
+
+Some of the features of the driver are:
+
+- full compliance with the Video4Linux2 API (see also "Notes for V4L2
+ application developers" paragraph);
+- available mmap or read/poll methods for video streaming through isochronous
+ data transfers;
+- automatic detection of image sensor;
+- support for built-in microphone interface;
+- support for any window resolutions and optional panning within the maximum
+ pixel area of image sensor;
+- image downscaling with arbitrary scaling factors from 1, 2 and 4 in both
+ directions (see "Notes for V4L2 application developers" paragraph);
+- two different video formats for uncompressed or compressed data in low or
+ high compression quality (see also "Notes for V4L2 application developers"
+ and "Video frame formats" paragraphs);
+- full support for the capabilities of many of the possible image sensors that
+ can be connected to the SN9C1xx bridges, including, for instance, red, green,
+ blue and global gain adjustments and exposure (see "Supported devices"
+ paragraph for details);
+- use of default color settings for sunlight conditions;
+- dynamic I/O interface for both SN9C1xx and image sensor control and
+ monitoring (see "Optional device control through 'sysfs'" paragraph);
+- dynamic driver control thanks to various module parameters (see "Module
+ parameters" paragraph);
+- up to 64 cameras can be handled at the same time; they can be connected and
+ disconnected from the host many times without turning off the computer, if
+ the system supports hotplugging;
+- no known bugs.
+
+
+5. Module dependencies
+======================
+For it to work properly, the driver needs kernel support for Video4Linux and
+USB.
+
+The following options of the kernel configuration file must be enabled and
+corresponding modules must be compiled:
+
+ # Multimedia devices
+ #
+ CONFIG_VIDEO_DEV=m
+
+To enable advanced debugging functionality on the device through /sysfs:
+
+ # Multimedia devices
+ #
+ CONFIG_VIDEO_ADV_DEBUG=y
+
+ # USB support
+ #
+ CONFIG_USB=m
+
+In addition, depending on the hardware being used, the modules below are
+necessary:
+
+ # USB Host Controller Drivers
+ #
+ CONFIG_USB_EHCI_HCD=m
+ CONFIG_USB_UHCI_HCD=m
+ CONFIG_USB_OHCI_HCD=m
+
+The SN9C103, SN9c105 and SN9C120 controllers also provide a built-in microphone
+interface. It is supported by the USB Audio driver thanks to the ALSA API:
+
+ # Sound
+ #
+ CONFIG_SOUND=y
+
+ # Advanced Linux Sound Architecture
+ #
+ CONFIG_SND=m
+
+ # USB devices
+ #
+ CONFIG_SND_USB_AUDIO=m
+
+And finally:
+
+ # USB Multimedia devices
+ #
+ CONFIG_USB_SN9C102=m
+
+
+6. Module loading
+=================
+To use the driver, it is necessary to load the "sn9c102" module into memory
+after every other module required: "videodev", "v4l2_common", "compat_ioctl32",
+"usbcore" and, depending on the USB host controller you have, "ehci-hcd",
+"uhci-hcd" or "ohci-hcd".
+
+Loading can be done as shown below:
+
+ [root@localhost home]# modprobe sn9c102
+
+Note that the module is called "sn9c102" for historic reasons, although it
+does not just support the SN9C102.
+
+At this point all the devices supported by the driver and connected to the USB
+ports should be recognized. You can invoke "dmesg" to analyze kernel messages
+and verify that the loading process has gone well:
+
+ [user@localhost home]$ dmesg
+
+or, to isolate all the kernel messages generated by the driver:
+
+ [user@localhost home]$ dmesg | grep sn9c102
+
+
+7. Module parameters
+====================
+Module parameters are listed below:
+-------------------------------------------------------------------------------
+Name: video_nr
+Type: short array (min = 0, max = 64)
+Syntax: <-1|n[,...]>
+Description: Specify V4L2 minor mode number:
+ -1 = use next available
+ n = use minor number n
+ You can specify up to 64 cameras this way.
+ For example:
+ video_nr=-1,2,-1 would assign minor number 2 to the second
+ recognized camera and use auto for the first one and for every
+ other camera.
+Default: -1
+-------------------------------------------------------------------------------
+Name: force_munmap
+Type: bool array (min = 0, max = 64)
+Syntax: <0|1[,...]>
+Description: Force the application to unmap previously mapped buffer memory
+ before calling any VIDIOC_S_CROP or VIDIOC_S_FMT ioctl's. Not
+ all the applications support this feature. This parameter is
+ specific for each detected camera.
+ 0 = do not force memory unmapping
+ 1 = force memory unmapping (save memory)
+Default: 0
+-------------------------------------------------------------------------------
+Name: frame_timeout
+Type: uint array (min = 0, max = 64)
+Syntax: <0|n[,...]>
+Description: Timeout for a video frame in seconds before returning an I/O
+ error; 0 for infinity. This parameter is specific for each
+ detected camera and can be changed at runtime thanks to the
+ /sys filesystem interface.
+Default: 2
+-------------------------------------------------------------------------------
+Name: debug
+Type: ushort
+Syntax: <n>
+Description: Debugging information level, from 0 to 3:
+ 0 = none (use carefully)
+ 1 = critical errors
+ 2 = significant information
+ 3 = more verbose messages
+ Level 3 is useful for testing only. It also shows some more
+ information about the hardware being detected.
+ This parameter can be changed at runtime thanks to the /sys
+ filesystem interface.
+Default: 2
+-------------------------------------------------------------------------------
+
+
+8. Optional device control through "sysfs" [1]
+==========================================
+If the kernel has been compiled with the CONFIG_VIDEO_ADV_DEBUG option enabled,
+it is possible to read and write both the SN9C1xx and the image sensor
+registers by using the "sysfs" filesystem interface.
+
+Every time a supported device is recognized, a write-only file named "green" is
+created in the /sys/class/video4linux/videoX directory. You can set the green
+channel's gain by writing the desired value to it. The value may range from 0
+to 15 for the SN9C101 or SN9C102 bridges, from 0 to 127 for the SN9C103,
+SN9C105 and SN9C120 bridges.
+Similarly, only for the SN9C103, SN9C105 and SN9C120 controllers, blue and red
+gain control files are available in the same directory, for which accepted
+values may range from 0 to 127.
+
+There are other four entries in the directory above for each registered camera:
+"reg", "val", "i2c_reg" and "i2c_val". The first two files control the
+SN9C1xx bridge, while the other two control the sensor chip. "reg" and
+"i2c_reg" hold the values of the current register index where the following
+reading/writing operations are addressed at through "val" and "i2c_val". Their
+use is not intended for end-users. Note that "i2c_reg" and "i2c_val" will not
+be created if the sensor does not actually support the standard I2C protocol or
+its registers are not 8-bit long. Also, remember that you must be logged in as
+root before writing to them.
+
+As an example, suppose we were to want to read the value contained in the
+register number 1 of the sensor register table - which is usually the product
+identifier - of the camera registered as "/dev/video0":
+
+ [root@localhost #] cd /sys/class/video4linux/video0
+ [root@localhost #] echo 1 > i2c_reg
+ [root@localhost #] cat i2c_val
+
+Note that "cat" will fail if sensor registers cannot be read.
+
+Now let's set the green gain's register of the SN9C101 or SN9C102 chips to 2:
+
+ [root@localhost #] echo 0x11 > reg
+ [root@localhost #] echo 2 > val
+
+Note that the SN9C1xx always returns 0 when some of its registers are read.
+To avoid race conditions, all the I/O accesses to the above files are
+serialized.
+The sysfs interface also provides the "frame_header" entry, which exports the
+frame header of the most recent requested and captured video frame. The header
+is always 18-bytes long and is appended to every video frame by the SN9C1xx
+controllers. As an example, this additional information can be used by the user
+application for implementing auto-exposure features via software.
+
+The following table describes the frame header exported by the SN9C101 and
+SN9C102:
+
+Byte # Value or bits Description
+------ ------------- -----------
+0x00 0xFF Frame synchronisation pattern
+0x01 0xFF Frame synchronisation pattern
+0x02 0x00 Frame synchronisation pattern
+0x03 0xC4 Frame synchronisation pattern
+0x04 0xC4 Frame synchronisation pattern
+0x05 0x96 Frame synchronisation pattern
+0x06 [3:0] Read channel gain control = (1+R_GAIN/8)
+ [7:4] Blue channel gain control = (1+B_GAIN/8)
+0x07 [ 0 ] Compression mode. 0=No compression, 1=Compression enabled
+ [2:1] Maximum scale factor for compression
+ [ 3 ] 1 = USB fifo(2K bytes) is full
+ [ 4 ] 1 = Digital gain is finish
+ [ 5 ] 1 = Exposure is finish
+ [7:6] Frame index
+0x08 [7:0] Y sum inside Auto-Exposure area (low-byte)
+0x09 [7:0] Y sum inside Auto-Exposure area (high-byte)
+ where Y sum = (R/4 + 5G/16 + B/8) / 32
+0x0A [7:0] Y sum outside Auto-Exposure area (low-byte)
+0x0B [7:0] Y sum outside Auto-Exposure area (high-byte)
+ where Y sum = (R/4 + 5G/16 + B/8) / 128
+0x0C 0xXX Not used
+0x0D 0xXX Not used
+0x0E 0xXX Not used
+0x0F 0xXX Not used
+0x10 0xXX Not used
+0x11 0xXX Not used
+
+The following table describes the frame header exported by the SN9C103:
+
+Byte # Value or bits Description
+------ ------------- -----------
+0x00 0xFF Frame synchronisation pattern
+0x01 0xFF Frame synchronisation pattern
+0x02 0x00 Frame synchronisation pattern
+0x03 0xC4 Frame synchronisation pattern
+0x04 0xC4 Frame synchronisation pattern
+0x05 0x96 Frame synchronisation pattern
+0x06 [6:0] Read channel gain control = (1/2+R_GAIN/64)
+0x07 [6:0] Blue channel gain control = (1/2+B_GAIN/64)
+ [7:4]
+0x08 [ 0 ] Compression mode. 0=No compression, 1=Compression enabled
+ [2:1] Maximum scale factor for compression
+ [ 3 ] 1 = USB fifo(2K bytes) is full
+ [ 4 ] 1 = Digital gain is finish
+ [ 5 ] 1 = Exposure is finish
+ [7:6] Frame index
+0x09 [7:0] Y sum inside Auto-Exposure area (low-byte)
+0x0A [7:0] Y sum inside Auto-Exposure area (high-byte)
+ where Y sum = (R/4 + 5G/16 + B/8) / 32
+0x0B [7:0] Y sum outside Auto-Exposure area (low-byte)
+0x0C [7:0] Y sum outside Auto-Exposure area (high-byte)
+ where Y sum = (R/4 + 5G/16 + B/8) / 128
+0x0D [1:0] Audio frame number
+ [ 2 ] 1 = Audio is recording
+0x0E [7:0] Audio summation (low-byte)
+0x0F [7:0] Audio summation (high-byte)
+0x10 [7:0] Audio sample count
+0x11 [7:0] Audio peak data in audio frame
+
+The AE area (sx, sy, ex, ey) in the active window can be set by programming the
+registers 0x1c, 0x1d, 0x1e and 0x1f of the SN9C1xx controllers, where one unit
+corresponds to 32 pixels.
+
+[1] The frame headers exported by the SN9C105 and SN9C120 are not described.
+
+
+9. Supported devices
+====================
+None of the names of the companies as well as their products will be mentioned
+here. They have never collaborated with the author, so no advertising.
+
+From the point of view of a driver, what unambiguously identify a device are
+its vendor and product USB identifiers. Below is a list of known identifiers of
+devices assembling the SN9C1xx PC camera controllers:
+
+Vendor ID Product ID
+--------- ----------
+0x0458 0x7025
+0x045e 0x00f5
+0x045e 0x00f7
+0x0471 0x0327
+0x0471 0x0328
+0x0c45 0x6001
+0x0c45 0x6005
+0x0c45 0x6007
+0x0c45 0x6009
+0x0c45 0x600d
+0x0c45 0x6011
+0x0c45 0x6019
+0x0c45 0x6024
+0x0c45 0x6025
+0x0c45 0x6028
+0x0c45 0x6029
+0x0c45 0x602a
+0x0c45 0x602b
+0x0c45 0x602c
+0x0c45 0x602d
+0x0c45 0x602e
+0x0c45 0x6030
+0x0c45 0x603f
+0x0c45 0x6080
+0x0c45 0x6082
+0x0c45 0x6083
+0x0c45 0x6088
+0x0c45 0x608a
+0x0c45 0x608b
+0x0c45 0x608c
+0x0c45 0x608e
+0x0c45 0x608f
+0x0c45 0x60a0
+0x0c45 0x60a2
+0x0c45 0x60a3
+0x0c45 0x60a8
+0x0c45 0x60aa
+0x0c45 0x60ab
+0x0c45 0x60ac
+0x0c45 0x60ae
+0x0c45 0x60af
+0x0c45 0x60b0
+0x0c45 0x60b2
+0x0c45 0x60b3
+0x0c45 0x60b8
+0x0c45 0x60ba
+0x0c45 0x60bb
+0x0c45 0x60bc
+0x0c45 0x60be
+0x0c45 0x60c0
+0x0c45 0x60c2
+0x0c45 0x60c8
+0x0c45 0x60cc
+0x0c45 0x60ea
+0x0c45 0x60ec
+0x0c45 0x60ef
+0x0c45 0x60fa
+0x0c45 0x60fb
+0x0c45 0x60fc
+0x0c45 0x60fe
+0x0c45 0x6102
+0x0c45 0x6108
+0x0c45 0x610f
+0x0c45 0x6130
+0x0c45 0x6138
+0x0c45 0x613a
+0x0c45 0x613b
+0x0c45 0x613c
+0x0c45 0x613e
+
+The list above does not imply that all those devices work with this driver: up
+until now only the ones that assemble the following pairs of SN9C1xx bridges
+and image sensors are supported; kernel messages will always tell you whether
+this is the case (see "Module loading" paragraph):
+
+Image sensor / SN9C1xx bridge | SN9C10[12] SN9C103 SN9C105 SN9C120
+-------------------------------------------------------------------------------
+HV7131D Hynix Semiconductor | Yes No No No
+HV7131R Hynix Semiconductor | No Yes Yes Yes
+MI-0343 Micron Technology | Yes No No No
+MI-0360 Micron Technology | No Yes Yes Yes
+OV7630 OmniVision Technologies | Yes Yes Yes Yes
+OV7660 OmniVision Technologies | No No Yes Yes
+PAS106B PixArt Imaging | Yes No No No
+PAS202B PixArt Imaging | Yes Yes No No
+TAS5110C1B Taiwan Advanced Sensor | Yes No No No
+TAS5110D Taiwan Advanced Sensor | Yes No No No
+TAS5130D1B Taiwan Advanced Sensor | Yes No No No
+
+"Yes" means that the pair is supported by the driver, while "No" means that the
+pair does not exist or is not supported by the driver.
+
+Only some of the available control settings of each image sensor are supported
+through the V4L2 interface.
+
+Donations of new models for further testing and support would be much
+appreciated. Non-available hardware will not be supported by the author of this
+driver.
+
+
+10. Notes for V4L2 application developers
+=========================================
+This driver follows the V4L2 API specifications. In particular, it enforces two
+rules:
+
+- exactly one I/O method, either "mmap" or "read", is associated with each
+file descriptor. Once it is selected, the application must close and reopen the
+device to switch to the other I/O method;
+
+- although it is not mandatory, previously mapped buffer memory should always
+be unmapped before calling any "VIDIOC_S_CROP" or "VIDIOC_S_FMT" ioctl's.
+The same number of buffers as before will be allocated again to match the size
+of the new video frames, so you have to map the buffers again before any I/O
+attempts on them.
+
+Consistently with the hardware limits, this driver also supports image
+downscaling with arbitrary scaling factors from 1, 2 and 4 in both directions.
+However, the V4L2 API specifications don't correctly define how the scaling
+factor can be chosen arbitrarily by the "negotiation" of the "source" and
+"target" rectangles. To work around this flaw, we have added the convention
+that, during the negotiation, whenever the "VIDIOC_S_CROP" ioctl is issued, the
+scaling factor is restored to 1.
+
+This driver supports two different video formats: the first one is the "8-bit
+Sequential Bayer" format and can be used to obtain uncompressed video data
+from the device through the current I/O method, while the second one provides
+either "raw" compressed video data (without frame headers not related to the
+compressed data) or standard JPEG (with frame headers). The compression quality
+may vary from 0 to 1 and can be selected or queried thanks to the
+VIDIOC_S_JPEGCOMP and VIDIOC_G_JPEGCOMP V4L2 ioctl's. For maximum flexibility,
+both the default active video format and the default compression quality
+depend on how the image sensor being used is initialized.
+
+
+11. Video frame formats [1]
+=======================
+The SN9C1xx PC Camera Controllers can send images in two possible video
+formats over the USB: either native "Sequential RGB Bayer" or compressed.
+The compression is used to achieve high frame rates. With regard to the
+SN9C101, SN9C102 and SN9C103, the compression is based on the Huffman encoding
+algorithm described below, while with regard to the SN9C105 and SN9C120 the
+compression is based on the JPEG standard.
+The current video format may be selected or queried from the user application
+by calling the VIDIOC_S_FMT or VIDIOC_G_FMT ioctl's, as described in the V4L2
+API specifications.
+
+The name "Sequential Bayer" indicates the organization of the red, green and
+blue pixels in one video frame. Each pixel is associated with a 8-bit long
+value and is disposed in memory according to the pattern shown below:
+
+B[0] G[1] B[2] G[3] ... B[m-2] G[m-1]
+G[m] R[m+1] G[m+2] R[m+2] ... G[2m-2] R[2m-1]
+...
+... B[(n-1)(m-2)] G[(n-1)(m-1)]
+... G[n(m-2)] R[n(m-1)]
+
+The above matrix also represents the sequential or progressive read-out mode of
+the (n, m) Bayer color filter array used in many CCD or CMOS image sensors.
+
+The Huffman compressed video frame consists of a bitstream that encodes for
+every R, G, or B pixel the difference between the value of the pixel itself and
+some reference pixel value. Pixels are organised in the Bayer pattern and the
+Bayer sub-pixels are tracked individually and alternatingly. For example, in
+the first line values for the B and G1 pixels are alternatingly encoded, while
+in the second line values for the G2 and R pixels are alternatingly encoded.
+
+The pixel reference value is calculated as follows:
+- the 4 top left pixels are encoded in raw uncompressed 8-bit format;
+- the value in the top two rows is the value of the pixel left of the current
+ pixel;
+- the value in the left column is the value of the pixel above the current
+ pixel;
+- for all other pixels, the reference value is the average of the value of the
+ pixel on the left and the value of the pixel above the current pixel;
+- there is one code in the bitstream that specifies the value of a pixel
+ directly (in 4-bit resolution);
+- pixel values need to be clamped inside the range [0..255] for proper
+ decoding.
+
+The algorithm purely describes the conversion from compressed Bayer code used
+in the SN9C101, SN9C102 and SN9C103 chips to uncompressed Bayer. Additional
+steps are required to convert this to a color image (i.e. a color interpolation
+algorithm).
+
+The following Huffman codes have been found:
+0: +0 (relative to reference pixel value)
+100: +4
+101: -4?
+1110xxxx: set absolute value to xxxx.0000
+1101: +11
+1111: -11
+11001: +20
+110000: -20
+110001: ??? - these codes are apparently not used
+
+[1] The Huffman compression algorithm has been reverse-engineered and
+ documented by Bertrik Sikken.
+
+
+12. Contact information
+=======================
+The author may be contacted by e-mail at <luca.risolia@studio.unibo.it>.
+
+GPG/PGP encrypted e-mail's are accepted. The GPG key ID of the author is
+'FCE635A4'; the public 1024-bit key should be available at any keyserver;
+the fingerprint is: '88E8 F32F 7244 68BA 3958 5D40 99DA 5D2A FCE6 35A4'.
+
+
+13. Credits
+===========
+Many thanks to following persons for their contribute (listed in alphabetical
+order):
+
+- David Anderson for the donation of a webcam;
+- Luca Capello for the donation of a webcam;
+- Philippe Coval for having helped testing the PAS202BCA image sensor;
+- Joao Rodrigo Fuzaro, Joao Limirio, Claudio Filho and Caio Begotti for the
+ donation of a webcam;
+- Dennis Heitmann for the donation of a webcam;
+- Jon Hollstrom for the donation of a webcam;
+- Nick McGill for the donation of a webcam;
+- Carlos Eduardo Medaglia Dyonisio, who added the support for the PAS202BCB
+ image sensor;
+- Stefano Mozzi, who donated 45 EU;
+- Andrew Pearce for the donation of a webcam;
+- John Pullan for the donation of a webcam;
+- Bertrik Sikken, who reverse-engineered and documented the Huffman compression
+ algorithm used in the SN9C101, SN9C102 and SN9C103 controllers and
+ implemented the first decoder;
+- Ronny Standke for the donation of a webcam;
+- Mizuno Takafumi for the donation of a webcam;
+- an "anonymous" donator (who didn't want his name to be revealed) for the
+ donation of a webcam.
+- an anonymous donator for the donation of four webcams and two boards with ten
+ image sensors.
diff --git a/drivers/media/usb/sn9c102/sn9c102_config.h b/drivers/staging/media/sn9c102/sn9c102_config.h
index 0f4e0378b071..0f4e0378b071 100644
--- a/drivers/media/usb/sn9c102/sn9c102_config.h
+++ b/drivers/staging/media/sn9c102/sn9c102_config.h
diff --git a/drivers/media/usb/sn9c102/sn9c102_core.c b/drivers/staging/media/sn9c102/sn9c102_core.c
index 2cb44de2b92c..2cb44de2b92c 100644
--- a/drivers/media/usb/sn9c102/sn9c102_core.c
+++ b/drivers/staging/media/sn9c102/sn9c102_core.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_devtable.h b/drivers/staging/media/sn9c102/sn9c102_devtable.h
index b3d2cc729657..b3d2cc729657 100644
--- a/drivers/media/usb/sn9c102/sn9c102_devtable.h
+++ b/drivers/staging/media/sn9c102/sn9c102_devtable.h
diff --git a/drivers/media/usb/sn9c102/sn9c102_hv7131d.c b/drivers/staging/media/sn9c102/sn9c102_hv7131d.c
index 2dce5c908c8e..2dce5c908c8e 100644
--- a/drivers/media/usb/sn9c102/sn9c102_hv7131d.c
+++ b/drivers/staging/media/sn9c102/sn9c102_hv7131d.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_hv7131r.c b/drivers/staging/media/sn9c102/sn9c102_hv7131r.c
index 4295887ff609..4295887ff609 100644
--- a/drivers/media/usb/sn9c102/sn9c102_hv7131r.c
+++ b/drivers/staging/media/sn9c102/sn9c102_hv7131r.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_mi0343.c b/drivers/staging/media/sn9c102/sn9c102_mi0343.c
index 1f5b09bec89c..1f5b09bec89c 100644
--- a/drivers/media/usb/sn9c102/sn9c102_mi0343.c
+++ b/drivers/staging/media/sn9c102/sn9c102_mi0343.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_mi0360.c b/drivers/staging/media/sn9c102/sn9c102_mi0360.c
index d973fc1973d9..d973fc1973d9 100644
--- a/drivers/media/usb/sn9c102/sn9c102_mi0360.c
+++ b/drivers/staging/media/sn9c102/sn9c102_mi0360.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_mt9v111.c b/drivers/staging/media/sn9c102/sn9c102_mt9v111.c
index 95986eb492e4..95986eb492e4 100644
--- a/drivers/media/usb/sn9c102/sn9c102_mt9v111.c
+++ b/drivers/staging/media/sn9c102/sn9c102_mt9v111.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_ov7630.c b/drivers/staging/media/sn9c102/sn9c102_ov7630.c
index 803712c29f02..803712c29f02 100644
--- a/drivers/media/usb/sn9c102/sn9c102_ov7630.c
+++ b/drivers/staging/media/sn9c102/sn9c102_ov7630.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_ov7660.c b/drivers/staging/media/sn9c102/sn9c102_ov7660.c
index 7977795d342b..7977795d342b 100644
--- a/drivers/media/usb/sn9c102/sn9c102_ov7660.c
+++ b/drivers/staging/media/sn9c102/sn9c102_ov7660.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_pas106b.c b/drivers/staging/media/sn9c102/sn9c102_pas106b.c
index 81cd969c1b7b..81cd969c1b7b 100644
--- a/drivers/media/usb/sn9c102/sn9c102_pas106b.c
+++ b/drivers/staging/media/sn9c102/sn9c102_pas106b.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_pas202bcb.c b/drivers/staging/media/sn9c102/sn9c102_pas202bcb.c
index 2e86fdc86989..2e86fdc86989 100644
--- a/drivers/media/usb/sn9c102/sn9c102_pas202bcb.c
+++ b/drivers/staging/media/sn9c102/sn9c102_pas202bcb.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_sensor.h b/drivers/staging/media/sn9c102/sn9c102_sensor.h
index 3679970dba2c..3679970dba2c 100644
--- a/drivers/media/usb/sn9c102/sn9c102_sensor.h
+++ b/drivers/staging/media/sn9c102/sn9c102_sensor.h
diff --git a/drivers/media/usb/sn9c102/sn9c102_tas5110c1b.c b/drivers/staging/media/sn9c102/sn9c102_tas5110c1b.c
index 04cdfdde8564..04cdfdde8564 100644
--- a/drivers/media/usb/sn9c102/sn9c102_tas5110c1b.c
+++ b/drivers/staging/media/sn9c102/sn9c102_tas5110c1b.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_tas5110d.c b/drivers/staging/media/sn9c102/sn9c102_tas5110d.c
index 9372e6f9fcff..9372e6f9fcff 100644
--- a/drivers/media/usb/sn9c102/sn9c102_tas5110d.c
+++ b/drivers/staging/media/sn9c102/sn9c102_tas5110d.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_tas5130d1b.c b/drivers/staging/media/sn9c102/sn9c102_tas5130d1b.c
index a30bbc4389f5..a30bbc4389f5 100644
--- a/drivers/media/usb/sn9c102/sn9c102_tas5130d1b.c
+++ b/drivers/staging/media/sn9c102/sn9c102_tas5130d1b.c
diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
index 36750205d23f..480b7c4064cc 100644
--- a/drivers/staging/media/solo6x10/solo6x10-core.c
+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
@@ -669,7 +669,7 @@ static void solo_pci_remove(struct pci_dev *pdev)
free_solo_dev(solo_dev);
}
-static DEFINE_PCI_DEVICE_TABLE(solo_id_table) = {
+static const struct pci_device_id solo_id_table[] = {
/* 6010 based cards */
{ PCI_DEVICE(PCI_VENDOR_ID_SOFTLOGIC, PCI_DEVICE_ID_SOLO6010),
.driver_data = SOLO_DEV_6010 },
diff --git a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
index d582c5b84c14..ce9e5aaf7fd4 100644
--- a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
@@ -964,7 +964,7 @@ static int solo_enc_s_std(struct file *file, void *priv, v4l2_std_id std)
{
struct solo_enc_dev *solo_enc = video_drvdata(file);
- return solo_set_video_type(solo_enc->solo_dev, std & V4L2_STD_PAL);
+ return solo_set_video_type(solo_enc->solo_dev, std & V4L2_STD_625_50);
}
static int solo_enum_framesizes(struct file *file, void *priv,
diff --git a/drivers/staging/media/solo6x10/solo6x10-v4l2.c b/drivers/staging/media/solo6x10/solo6x10-v4l2.c
index 7b26de3488da..47e72dac9b13 100644
--- a/drivers/staging/media/solo6x10/solo6x10-v4l2.c
+++ b/drivers/staging/media/solo6x10/solo6x10-v4l2.c
@@ -527,7 +527,7 @@ static int solo_g_std(struct file *file, void *priv, v4l2_std_id *i)
return 0;
}
-int solo_set_video_type(struct solo_dev *solo_dev, bool type)
+int solo_set_video_type(struct solo_dev *solo_dev, bool is_50hz)
{
int i;
@@ -537,7 +537,8 @@ int solo_set_video_type(struct solo_dev *solo_dev, bool type)
for (i = 0; i < solo_dev->nr_chans; i++)
if (vb2_is_busy(&solo_dev->v4l2_enc[i]->vidq))
return -EBUSY;
- solo_dev->video_type = type;
+ solo_dev->video_type = is_50hz ? SOLO_VO_FMT_TYPE_PAL :
+ SOLO_VO_FMT_TYPE_NTSC;
/* Reconfigure for the new standard */
solo_disp_init(solo_dev);
solo_enc_init(solo_dev);
@@ -551,7 +552,7 @@ static int solo_s_std(struct file *file, void *priv, v4l2_std_id std)
{
struct solo_dev *solo_dev = video_drvdata(file);
- return solo_set_video_type(solo_dev, std & V4L2_STD_PAL);
+ return solo_set_video_type(solo_dev, std & V4L2_STD_625_50);
}
static int solo_s_ctrl(struct v4l2_ctrl *ctrl)
diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h
index f1bbb8cb74e6..8964f8be158e 100644
--- a/drivers/staging/media/solo6x10/solo6x10.h
+++ b/drivers/staging/media/solo6x10/solo6x10.h
@@ -398,7 +398,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
int desc_cnt);
/* Global s_std ioctl */
-int solo_set_video_type(struct solo_dev *solo_dev, bool type);
+int solo_set_video_type(struct solo_dev *solo_dev, bool is_50hz);
void solo_update_mode(struct solo_enc_dev *solo_enc);
/* Set the threshold for motion detection */
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 235d2b1ec593..d8ea25486a33 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -306,7 +306,8 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb)
+static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ void *accel_priv)
{
return (u16)smp_processor_id();
}
@@ -891,6 +892,11 @@ static int xlr_setup_mdio(struct xlr_net_priv *priv,
priv->mii_bus->write = xlr_mii_write;
priv->mii_bus->parent = &pdev->dev;
priv->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (priv->mii_bus->irq == NULL) {
+ pr_err("irq alloc failed\n");
+ mdiobus_free(priv->mii_bus);
+ return -ENOMEM;
+ }
priv->mii_bus->irq[priv->phy_addr] = priv->ndev->irq;
/* Scan only the enabled address */
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 49ea76b3435d..3ee0b1887a54 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -36,7 +36,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
-#include <linux/clk/tegra.h>
#include "nvec.h"
@@ -83,7 +82,7 @@ enum nvec_sleep_subcmds {
static struct nvec_chip *nvec_power_handle;
-static struct mfd_cell nvec_devices[] = {
+static const struct mfd_cell nvec_devices[] = {
{
.name = "nvec-kbd",
.id = 1,
@@ -734,9 +733,9 @@ static void tegra_init_i2c_slave(struct nvec_chip *nvec)
clk_prepare_enable(nvec->i2c_clk);
- tegra_periph_reset_assert(nvec->i2c_clk);
+ reset_control_assert(nvec->rst);
udelay(2);
- tegra_periph_reset_deassert(nvec->i2c_clk);
+ reset_control_deassert(nvec->rst);
val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
(0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
@@ -837,6 +836,12 @@ static int tegra_nvec_probe(struct platform_device *pdev)
return -ENODEV;
}
+ nvec->rst = devm_reset_control_get(&pdev->dev, "i2c");
+ if (IS_ERR(nvec->rst)) {
+ dev_err(nvec->dev, "failed to get controller reset\n");
+ return PTR_ERR(nvec->rst);
+ }
+
nvec->base = base;
nvec->irq = res->start;
nvec->i2c_clk = i2c_clk;
diff --git a/drivers/staging/nvec/nvec.h b/drivers/staging/nvec/nvec.h
index e880518935fb..e271375053fa 100644
--- a/drivers/staging/nvec/nvec.h
+++ b/drivers/staging/nvec/nvec.h
@@ -23,6 +23,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
+#include <linux/reset.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -109,7 +110,8 @@ struct nvec_msg {
* @irq: The IRQ of the I2C device
* @i2c_addr: The address of the I2C slave
* @base: The base of the memory mapped region of the I2C device
- * @clk: The clock of the I2C device
+ * @i2c_clk: The clock of the I2C device
+ * @rst: The reset of the I2C device
* @notifier_list: Notifiers to be called on received messages, see
* nvec_register_notifier()
* @rx_data: Received messages that have to be processed
@@ -139,6 +141,7 @@ struct nvec_chip {
int i2c_addr;
void __iomem *base;
struct clk *i2c_clk;
+ struct reset_control *rst;
struct atomic_notifier_head notifier_list;
struct list_head rx_data, tx_data;
struct notifier_block nvec_status_notifier;
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index d118952c0a74..5a001d9b4252 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -275,13 +275,6 @@ enum cvmx_usb_pipe_flags {
*/
#define MAX_TRANSFER_PACKETS ((1<<10)-1)
-enum {
- USB_CLOCK_TYPE_REF_12,
- USB_CLOCK_TYPE_REF_24,
- USB_CLOCK_TYPE_REF_48,
- USB_CLOCK_TYPE_CRYSTAL_12,
-};
-
/**
* Logical transactions may take numerous low level
* transactions, especially when splits are concerned. This
@@ -471,19 +464,6 @@ struct octeon_hcd {
/* Returns the IO address to push/pop stuff data from the FIFOs */
#define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000)
-static int octeon_usb_get_clock_type(void)
-{
- switch (cvmx_sysinfo_get()->board_type) {
- case CVMX_BOARD_TYPE_BBGW_REF:
- case CVMX_BOARD_TYPE_LANAI2_A:
- case CVMX_BOARD_TYPE_LANAI2_U:
- case CVMX_BOARD_TYPE_LANAI2_G:
- case CVMX_BOARD_TYPE_UBNT_E100:
- return USB_CLOCK_TYPE_CRYSTAL_12;
- }
- return USB_CLOCK_TYPE_REF_48;
-}
-
/**
* Read a USB 32bit CSR. It performs the necessary address swizzle
* for 32bit CSRs and logs the value in a readable format if
@@ -582,37 +562,6 @@ static inline int __cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
return 0; /* Data0 */
}
-
-/**
- * Return the number of USB ports supported by this Octeon
- * chip. If the chip doesn't support USB, or is not supported
- * by this API, a zero will be returned. Most Octeon chips
- * support one usb port, but some support two ports.
- * cvmx_usb_initialize() must be called on independent
- * struct cvmx_usb_state.
- *
- * Returns: Number of port, zero if usb isn't supported
- */
-static int cvmx_usb_get_num_ports(void)
-{
- int arch_ports = 0;
-
- if (OCTEON_IS_MODEL(OCTEON_CN56XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN52XX))
- arch_ports = 2;
- else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN31XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
- arch_ports = 1;
- else
- arch_ports = 0;
-
- return arch_ports;
-}
-
/**
* Initialize a USB port for use. This must be called before any
* other access to the Octeon USB port is made. The port starts
@@ -628,41 +577,16 @@ static int cvmx_usb_get_num_ports(void)
* Returns: 0 or a negative error code.
*/
static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
- int usb_port_number)
+ int usb_port_number,
+ enum cvmx_usb_initialize_flags flags)
{
union cvmx_usbnx_clk_ctl usbn_clk_ctl;
union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status;
- enum cvmx_usb_initialize_flags flags = 0;
int i;
/* At first allow 0-1 for the usb port number */
if ((usb_port_number < 0) || (usb_port_number > 1))
return -EINVAL;
- /* For all chips except 52XX there is only one port */
- if (!OCTEON_IS_MODEL(OCTEON_CN52XX) && (usb_port_number > 0))
- return -EINVAL;
- /* Try to determine clock type automatically */
- if (octeon_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12) {
- /* Only 12 MHZ crystals are supported */
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
- } else {
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
-
- switch (octeon_usb_get_clock_type()) {
- case USB_CLOCK_TYPE_REF_12:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
- break;
- case USB_CLOCK_TYPE_REF_24:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
- break;
- case USB_CLOCK_TYPE_REF_48:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
- break;
- default:
- return -EINVAL;
- break;
- }
- }
memset(usb, 0, sizeof(*usb));
usb->init_flags = flags;
@@ -3431,7 +3355,6 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
return 0;
}
-
static const struct hc_driver octeon_hc_driver = {
.description = "Octeon USB",
.product_desc = "Octeon Host Controller",
@@ -3448,15 +3371,74 @@ static const struct hc_driver octeon_hc_driver = {
.hub_control = octeon_usb_hub_control,
};
-
-static int octeon_usb_driver_probe(struct device *dev)
+static int octeon_usb_probe(struct platform_device *pdev)
{
int status;
- int usb_num = to_platform_device(dev)->id;
- int irq = platform_get_irq(to_platform_device(dev), 0);
+ int initialize_flags;
+ int usb_num;
+ struct resource *res_mem;
+ struct device_node *usbn_node;
+ int irq = platform_get_irq(pdev, 0);
+ struct device *dev = &pdev->dev;
struct octeon_hcd *priv;
struct usb_hcd *hcd;
unsigned long flags;
+ u32 clock_rate = 48000000;
+ bool is_crystal_clock = false;
+ const char *clock_type;
+ int i;
+
+ if (dev->of_node == NULL) {
+ dev_err(dev, "Error: empty of_node\n");
+ return -ENXIO;
+ }
+ usbn_node = dev->of_node->parent;
+
+ i = of_property_read_u32(usbn_node,
+ "refclk-frequency", &clock_rate);
+ if (i) {
+ dev_err(dev, "No USBN \"refclk-frequency\"\n");
+ return -ENXIO;
+ }
+ switch (clock_rate) {
+ case 12000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
+ break;
+ case 24000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
+ break;
+ case 48000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
+ break;
+ default:
+ dev_err(dev, "Illebal USBN \"refclk-frequency\" %u\n", clock_rate);
+ return -ENXIO;
+
+ }
+
+ i = of_property_read_string(usbn_node,
+ "refclk-type", &clock_type);
+
+ if (!i && strcmp("crystal", clock_type) == 0)
+ is_crystal_clock = true;
+
+ if (is_crystal_clock)
+ initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
+ else
+ initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_mem == NULL) {
+ dev_err(dev, "found no memory resource\n");
+ return -ENXIO;
+ }
+ usb_num = (res_mem->start >> 44) & 1;
+
+ if (irq < 0) {
+ /* Defective device tree, but we know how to fix it. */
+ irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56;
+ irq = irq_create_mapping(NULL, hwirq);
+ }
/*
* Set the DMA mask to 64bits so we get buffers already translated for
@@ -3465,6 +3447,26 @@ static int octeon_usb_driver_probe(struct device *dev)
dev->coherent_dma_mask = ~0;
dev->dma_mask = &dev->coherent_dma_mask;
+ /*
+ * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
+ * IOB priority registers. Under heavy network load USB
+ * hardware can be starved by the IOB causing a crash. Give
+ * it a priority boost if it has been waiting more than 400
+ * cycles to avoid this situation.
+ *
+ * Testing indicates that a cnt_val of 8192 is not sufficient,
+ * but no failures are seen with 4096. We choose a value of
+ * 400 to give a safety factor of 10.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
+
+ pri_cnt.u64 = 0;
+ pri_cnt.s.cnt_enb = 1;
+ pri_cnt.s.cnt_val = 400;
+ cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
+ }
+
hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev));
if (!hcd) {
dev_dbg(dev, "Failed to allocate memory for HCD\n");
@@ -3478,7 +3480,7 @@ static int octeon_usb_driver_probe(struct device *dev)
tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work, (unsigned long)priv);
INIT_LIST_HEAD(&priv->dequeue_list);
- status = cvmx_usb_initialize(&priv->usb, usb_num);
+ status = cvmx_usb_initialize(&priv->usb, usb_num, initialize_flags);
if (status) {
dev_dbg(dev, "USB initialization failed with %d\n", status);
kfree(hcd);
@@ -3492,21 +3494,23 @@ static int octeon_usb_driver_probe(struct device *dev)
cvmx_usb_poll(&priv->usb);
spin_unlock_irqrestore(&priv->lock, flags);
- status = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ status = usb_add_hcd(hcd, irq, 0);
if (status) {
dev_dbg(dev, "USB add HCD failed with %d\n", status);
kfree(hcd);
return -1;
}
+ device_wakeup_enable(hcd->self.controller);
- dev_dbg(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
+ dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
return 0;
}
-static int octeon_usb_driver_remove(struct device *dev)
+static int octeon_usb_remove(struct platform_device *pdev)
{
int status;
+ struct device *dev = &pdev->dev;
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct octeon_hcd *priv = hcd_to_octeon(hcd);
unsigned long flags;
@@ -3524,85 +3528,41 @@ static int octeon_usb_driver_remove(struct device *dev)
return 0;
}
-static struct device_driver octeon_usb_driver = {
- .name = "OcteonUSB",
- .bus = &platform_bus_type,
- .probe = octeon_usb_driver_probe,
- .remove = octeon_usb_driver_remove,
+static struct of_device_id octeon_usb_match[] = {
+ {
+ .compatible = "cavium,octeon-5750-usbc",
+ },
+ {},
};
+static struct platform_driver octeon_usb_driver = {
+ .driver = {
+ .name = "OcteonUSB",
+ .owner = THIS_MODULE,
+ .of_match_table = octeon_usb_match,
+ },
+ .probe = octeon_usb_probe,
+ .remove = octeon_usb_remove,
+};
-#define MAX_USB_PORTS 10
-static struct platform_device *pdev_glob[MAX_USB_PORTS];
-static int octeon_usb_registered;
-static int __init octeon_usb_module_init(void)
+static int __init octeon_usb_driver_init(void)
{
- int num_devices = cvmx_usb_get_num_ports();
- int device;
-
- if (usb_disabled() || num_devices == 0)
- return -ENODEV;
-
- if (driver_register(&octeon_usb_driver))
- return -ENOMEM;
-
- octeon_usb_registered = 1;
-
- /*
- * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
- * IOB priority registers. Under heavy network load USB
- * hardware can be starved by the IOB causing a crash. Give
- * it a priority boost if it has been waiting more than 400
- * cycles to avoid this situation.
- *
- * Testing indicates that a cnt_val of 8192 is not sufficient,
- * but no failures are seen with 4096. We choose a value of
- * 400 to give a safety factor of 10.
- */
- if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
-
- pri_cnt.u64 = 0;
- pri_cnt.s.cnt_enb = 1;
- pri_cnt.s.cnt_val = 400;
- cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
- }
-
- for (device = 0; device < num_devices; device++) {
- struct resource irq_resource;
- struct platform_device *pdev;
- memset(&irq_resource, 0, sizeof(irq_resource));
- irq_resource.start = (device == 0) ? OCTEON_IRQ_USB0 : OCTEON_IRQ_USB1;
- irq_resource.end = irq_resource.start;
- irq_resource.flags = IORESOURCE_IRQ;
- pdev = platform_device_register_simple((char *)octeon_usb_driver. name, device, &irq_resource, 1);
- if (IS_ERR(pdev)) {
- driver_unregister(&octeon_usb_driver);
- octeon_usb_registered = 0;
- return PTR_ERR(pdev);
- }
- if (device < MAX_USB_PORTS)
- pdev_glob[device] = pdev;
+ if (usb_disabled())
+ return 0;
- }
- return 0;
+ return platform_driver_register(&octeon_usb_driver);
}
+module_init(octeon_usb_driver_init);
-static void __exit octeon_usb_module_cleanup(void)
+static void __exit octeon_usb_driver_exit(void)
{
- int i;
+ if (usb_disabled())
+ return;
- for (i = 0; i < MAX_USB_PORTS; i++)
- if (pdev_glob[i]) {
- platform_device_unregister(pdev_glob[i]);
- pdev_glob[i] = NULL;
- }
- if (octeon_usb_registered)
- driver_unregister(&octeon_usb_driver);
+ platform_driver_unregister(&octeon_usb_driver);
}
+module_exit(octeon_usb_driver_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
-MODULE_DESCRIPTION("Cavium Networks Octeon USB Host driver.");
-module_init(octeon_usb_module_init);
-module_exit(octeon_usb_module_cleanup);
+MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver.");
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
index a417d4fce12c..eccfcc54cea8 100644
--- a/drivers/staging/octeon/ethernet-mdio.h
+++ b/drivers/staging/octeon/ethernet-mdio.h
@@ -27,7 +27,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include <linux/init.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/string.h>
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 0315f60497b7..a0f4868cfa13 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -29,7 +29,6 @@
#include <linux/cache.h>
#include <linux/cpumask.h>
#include <linux/netdevice.h>
-#include <linux/init.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/string.h>
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 9b4d0b546b89..47541e1608f3 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -27,7 +27,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include <linux/init.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/ratelimit.h>
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index bd6ca7164049..089dc4b9efd4 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -26,7 +26,6 @@
**********************************************************************/
#include <linux/platform_device.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 92b02891704d..26b4ec56fd30 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -255,17 +255,19 @@ static bool dcon_blank_fb(struct dcon_priv *dcon, bool blank)
{
int err;
+ console_lock();
if (!lock_fb_info(dcon->fbinfo)) {
+ console_unlock();
dev_err(&dcon->client->dev, "unable to lock framebuffer\n");
return false;
}
- console_lock();
+
dcon->ignore_fb_events = true;
err = fb_blank(dcon->fbinfo,
blank ? FB_BLANK_POWERDOWN : FB_BLANK_UNBLANK);
dcon->ignore_fb_events = false;
- console_unlock();
unlock_fb_info(dcon->fbinfo);
+ console_unlock();
if (err) {
dev_err(&dcon->client->dev, "couldn't %sblank framebuffer\n",
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
index 6ce0af9977d8..5de5981b3bba 100644
--- a/drivers/staging/ozwpan/ozcdev.c
+++ b/drivers/staging/ozwpan/ozcdev.c
@@ -448,7 +448,7 @@ int oz_cdev_start(struct oz_pd *pd, int resume)
}
spin_lock(&g_cdev.lock);
if ((g_cdev.active_pd == NULL) &&
- (memcmp(pd->mac_addr, g_cdev.active_addr, ETH_ALEN) == 0)) {
+ ether_addr_equal(pd->mac_addr, g_cdev.active_addr)) {
oz_pd_get(pd);
g_cdev.active_pd = pd;
oz_dbg(ON, "Active PD arrived\n");
diff --git a/drivers/staging/ozwpan/ozeltbuf.c b/drivers/staging/ozwpan/ozeltbuf.c
index 9b86486c6b11..bd560c67fc8c 100644
--- a/drivers/staging/ozwpan/ozeltbuf.c
+++ b/drivers/staging/ozwpan/ozeltbuf.c
@@ -3,7 +3,6 @@
* Released under the GNU General Public License Version 2 (GPLv2).
* -----------------------------------------------------------------------------
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include "ozdbg.h"
@@ -138,7 +137,7 @@ int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count)
oz_dbg(ON, "%s: (0x%x)\n", __func__, id);
- st = kzalloc(sizeof(struct oz_elt_stream), GFP_ATOMIC | __GFP_ZERO);
+ st = kzalloc(sizeof(struct oz_elt_stream), GFP_ATOMIC);
if (st == NULL)
return -ENOMEM;
atomic_set(&st->ref_count, 1);
diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
index d9c43c3282e7..efaf26f734c3 100644
--- a/drivers/staging/ozwpan/ozhcd.c
+++ b/drivers/staging/ozwpan/ozhcd.c
@@ -2270,6 +2270,8 @@ static int oz_plat_probe(struct platform_device *dev)
usb_put_hcd(hcd);
return -1;
}
+ device_wakeup_enable(hcd->self.controller);
+
spin_lock_bh(&g_hcdlock);
g_ozhcd = ozhcd;
spin_unlock_bh(&g_hcdlock);
diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c
index ab85a724a0e2..743695077346 100644
--- a/drivers/staging/ozwpan/ozpd.c
+++ b/drivers/staging/ozwpan/ozpd.c
@@ -4,7 +4,6 @@
* -----------------------------------------------------------------------------
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/sched.h>
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
index 88714ec85705..5d965cf06d59 100644
--- a/drivers/staging/ozwpan/ozproto.c
+++ b/drivers/staging/ozwpan/ozproto.c
@@ -4,11 +4,11 @@
* -----------------------------------------------------------------------------
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/errno.h>
#include <linux/ieee80211.h>
#include "ozdbg.h"
@@ -180,7 +180,7 @@ static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
spin_lock_bh(&g_polling_lock);
list_for_each(e, &g_pd_list) {
pd2 = container_of(e, struct oz_pd, link);
- if (memcmp(pd2->mac_addr, pd_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(pd2->mac_addr, pd_addr)) {
free_pd = pd;
pd = pd2;
break;
@@ -337,7 +337,7 @@ static void oz_rx_frame(struct sk_buff *skb)
oz_dbg(RX_FRAMES, "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
mac_hdr = skb_mac_header(skb);
- src_addr = &mac_hdr[ETH_ALEN] ;
+ src_addr = &mac_hdr[ETH_ALEN];
length = skb->len;
/* Check the version field */
@@ -597,7 +597,7 @@ struct oz_pd *oz_pd_find(const u8 *mac_addr)
spin_lock_bh(&g_polling_lock);
list_for_each(e, &g_pd_list) {
pd = container_of(e, struct oz_pd, link);
- if (memcmp(pd->mac_addr, mac_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(pd->mac_addr, mac_addr)) {
atomic_inc(&pd->ref_count);
spin_unlock_bh(&g_polling_lock);
return pd;
@@ -668,8 +668,8 @@ void oz_binding_add(const char *net_dev)
if (binding) {
binding->ptype.type = __constant_htons(OZ_ETHERTYPE);
binding->ptype.func = oz_pkt_recv;
- memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
if (net_dev && *net_dev) {
+ memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
oz_dbg(ON, "Adding binding: %s\n", net_dev);
binding->ptype.dev =
dev_get_by_name(&init_net, net_dev);
@@ -680,6 +680,7 @@ void oz_binding_add(const char *net_dev)
}
} else {
oz_dbg(ON, "Binding to all netcards\n");
+ memset(binding->name, 0, OZ_MAX_BINDING_LEN);
binding->ptype.dev = NULL;
}
if (binding) {
diff --git a/drivers/staging/ozwpan/ozusbsvc.c b/drivers/staging/ozwpan/ozusbsvc.c
index cf263791cb30..edd44c457a4b 100644
--- a/drivers/staging/ozwpan/ozusbsvc.c
+++ b/drivers/staging/ozwpan/ozusbsvc.c
@@ -11,7 +11,6 @@
* -----------------------------------------------------------------------------
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/sched.h>
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
index 228bffaa69c9..617f51cdaea7 100644
--- a/drivers/staging/ozwpan/ozusbsvc1.c
+++ b/drivers/staging/ozwpan/ozusbsvc1.c
@@ -5,7 +5,6 @@
* This file implements the protocol specific parts of the USB service for a PD.
* -----------------------------------------------------------------------------
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/sched.h>
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index cbc15c120981..ec4b1fd14021 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -1590,8 +1590,8 @@ static ssize_t keypad_read(struct file *file,
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
- interruptible_sleep_on(&keypad_read_wait);
- if (signal_pending(current))
+ if (wait_event_interruptible(keypad_read_wait,
+ keypad_buflen != 0))
return -EINTR;
}
diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
index 919cb95236fc..3826561e7742 100644
--- a/drivers/staging/phison/phison.c
+++ b/drivers/staging/phison/phison.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -69,7 +68,7 @@ static int phison_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
-static DEFINE_PCI_DEVICE_TABLE(phison_pci_tbl) = {
+static const struct pci_device_id phison_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PHISON, PCI_DEVICE_ID_PS5000),
PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 },
{ 0, },
diff --git a/drivers/staging/quickstart/quickstart.c b/drivers/staging/quickstart/quickstart.c
index 9f6ebdb23740..a85c3d68c462 100644
--- a/drivers/staging/quickstart/quickstart.c
+++ b/drivers/staging/quickstart/quickstart.c
@@ -31,7 +31,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <linux/input.h>
diff --git a/drivers/staging/rtl8187se/ieee80211/dot11d.c b/drivers/staging/rtl8187se/ieee80211/dot11d.c
index 9d2d5c58add2..4483c2c0307c 100644
--- a/drivers/staging/rtl8187se/ieee80211/dot11d.c
+++ b/drivers/staging/rtl8187se/ieee80211/dot11d.c
@@ -1,16 +1,6 @@
-//-----------------------------------------------------------------------------
-// File:
-// Dot11d.c
-//
-// Description:
-// Implement 802.11d.
-//
-//-----------------------------------------------------------------------------
-
#include "dot11d.h"
-void
-Dot11d_Init(struct ieee80211_device *ieee)
+void Dot11d_Init(struct ieee80211_device *ieee)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee);
@@ -22,23 +12,19 @@ Dot11d_Init(struct ieee80211_device *ieee)
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
RESET_CIE_WATCHDOG(ieee);
- printk("Dot11d_Init()\n");
+ netdev_info(ieee->dev, "Dot11d_Init()\n");
}
-//
-// Description:
-// Reset to the state as we are just entering a regulatory domain.
-//
-void
-Dot11d_Reset(struct ieee80211_device *ieee)
+/* Reset to the state as we are just entering a regulatory domain. */
+void Dot11d_Reset(struct ieee80211_device *ieee)
{
u32 i;
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee);
- // Clear old channel map
+ /* Clear old channel map */
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
- // Set new channel map
+ /* Set new channel map */
for (i = 1; i <= 11; i++)
(pDot11dInfo->channel_map)[i] = 1;
@@ -48,36 +34,30 @@ Dot11d_Reset(struct ieee80211_device *ieee)
pDot11dInfo->State = DOT11D_STATE_NONE;
pDot11dInfo->CountryIeLen = 0;
RESET_CIE_WATCHDOG(ieee);
-
- //printk("Dot11d_Reset()\n");
}
-//
-// Description:
-// Update country IE from Beacon or Probe Response
-// and configure PHY for operation in the regulatory domain.
-//
-// TODO:
-// Configure Tx power.
-//
-// Assumption:
-// 1. IS_DOT11D_ENABLE() is TRUE.
-// 2. Input IE is an valid one.
-//
-void
-Dot11d_UpdateCountryIe(
- struct ieee80211_device *dev,
- u8 *pTaddr,
- u16 CoutryIeLen,
- u8 *pCoutryIe
- )
+/*
+ * Description:
+ * Update country IE from Beacon or Probe Response and configure PHY for
+ * operation in the regulatory domain.
+ *
+ * TODO:
+ * Configure Tx power.
+ *
+ * Assumption:
+ * 1. IS_DOT11D_ENABLE() is TRUE.
+ * 2. Input IE is an valid one.
+ */
+void Dot11d_UpdateCountryIe(struct ieee80211_device *dev, u8 *pTaddr,
+ u16 CoutryIeLen, u8 *pCoutryIe)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 i, j, NumTriples, MaxChnlNum;
+ u8 index, MaxTxPowerInDbm;
PCHNL_TXPOWER_TRIPLE pTriple;
if ((CoutryIeLen - 3)%3 != 0) {
- printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
+ netdev_info(dev->dev, "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
Dot11d_Reset(dev);
return;
}
@@ -85,37 +65,47 @@ Dot11d_UpdateCountryIe(
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
MaxChnlNum = 0;
- NumTriples = (CoutryIeLen - 3) / 3; // skip 3-byte country string.
+ NumTriples = (CoutryIeLen - 3) / 3; /* skip 3-byte country string. */
pTriple = (PCHNL_TXPOWER_TRIPLE)(pCoutryIe + 3);
for (i = 0; i < NumTriples; i++) {
if (MaxChnlNum >= pTriple->FirstChnl) {
- // It is not in a monotonically increasing order, so stop processing.
- printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
+ /*
+ * It is not in a monotonically increasing order,
+ * so stop processing.
+ */
+ netdev_info(dev->dev,
+ "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
Dot11d_Reset(dev);
return;
}
- if (MAX_CHANNEL_NUMBER < (pTriple->FirstChnl + pTriple->NumChnls)) {
- // It is not a valid set of channel id, so stop processing.
- printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........2\n");
+ if (MAX_CHANNEL_NUMBER <
+ (pTriple->FirstChnl + pTriple->NumChnls)) {
+ /*
+ * It is not a valid set of channel id,
+ * so stop processing
+ */
+ netdev_info(dev->dev,
+ "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........2\n");
Dot11d_Reset(dev);
return;
}
- for (j = 0 ; j < pTriple->NumChnls; j++) {
- pDot11dInfo->channel_map[pTriple->FirstChnl + j] = 1;
- pDot11dInfo->MaxTxPwrDbmList[pTriple->FirstChnl + j] = pTriple->MaxTxPowerInDbm;
+ for (j = 0; j < pTriple->NumChnls; j++) {
+ index = pTriple->FirstChnl + j;
+ pDot11dInfo->channel_map[index] = 1;
+ MaxTxPowerInDbm = pTriple->MaxTxPowerInDbm;
+ pDot11dInfo->MaxTxPwrDbmList[index] = MaxTxPowerInDbm;
MaxChnlNum = pTriple->FirstChnl + j;
}
pTriple = (PCHNL_TXPOWER_TRIPLE)((u8 *)pTriple + 3);
}
#if 1
- //printk("Dot11d_UpdateCountryIe(): Channel List:\n");
- printk("Channel List:");
+ netdev_info(dev->dev, "Channel List:");
for (i = 1; i <= MAX_CHANNEL_NUMBER; i++)
if (pDot11dInfo->channel_map[i] > 0)
- printk(" %d", i);
- printk("\n");
+ netdev_info(dev->dev, " %d", i);
+ netdev_info(dev->dev, "\n");
#endif
UPDATE_CIE_SRC(dev, pTaddr);
@@ -125,31 +115,23 @@ Dot11d_UpdateCountryIe(
pDot11dInfo->State = DOT11D_STATE_LEARNED;
}
-u8
-DOT11D_GetMaxTxPwrInDbm(
- struct ieee80211_device *dev,
- u8 Channel
- )
+u8 DOT11D_GetMaxTxPwrInDbm(struct ieee80211_device *dev, u8 Channel)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 MaxTxPwrInDbm = 255;
if (MAX_CHANNEL_NUMBER < Channel) {
- printk("DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n");
+ netdev_info(dev->dev, "DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n");
return MaxTxPwrInDbm;
}
- if (pDot11dInfo->channel_map[Channel]) {
+ if (pDot11dInfo->channel_map[Channel])
MaxTxPwrInDbm = pDot11dInfo->MaxTxPwrDbmList[Channel];
- }
return MaxTxPwrInDbm;
}
-void
-DOT11D_ScanComplete(
- struct ieee80211_device *dev
- )
+void DOT11D_ScanComplete(struct ieee80211_device *dev)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
@@ -160,7 +142,7 @@ DOT11D_ScanComplete(
case DOT11D_STATE_DONE:
if (GET_CIE_WATCHDOG(dev) == 0) {
- // Reset country IE if previous one is gone.
+ /* Reset country IE if previous one is gone. */
Dot11d_Reset(dev);
}
break;
@@ -169,15 +151,12 @@ DOT11D_ScanComplete(
}
}
-int IsLegalChannel(
- struct ieee80211_device *dev,
- u8 channel
-)
+int IsLegalChannel(struct ieee80211_device *dev, u8 channel)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
if (MAX_CHANNEL_NUMBER < channel) {
- printk("IsLegalChannel(): Invalid Channel\n");
+ netdev_info(dev->dev, "IsLegalChannel(): Invalid Channel\n");
return 0;
}
if (pDot11dInfo->channel_map[channel] > 0)
@@ -185,10 +164,7 @@ int IsLegalChannel(
return 0;
}
-int ToLegalChannel(
- struct ieee80211_device *dev,
- u8 channel
-)
+int ToLegalChannel(struct ieee80211_device *dev, u8 channel)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 default_chn = 0;
@@ -202,7 +178,7 @@ int ToLegalChannel(
}
if (MAX_CHANNEL_NUMBER < channel) {
- printk("IsLegalChannel(): Invalid Channel\n");
+ netdev_info(dev->dev, "IsLegalChannel(): Invalid Channel\n");
return default_chn;
}
diff --git a/drivers/staging/rtl8187se/ieee80211/dot11d.h b/drivers/staging/rtl8187se/ieee80211/dot11d.h
index 029c2cab1e00..63f4f3c72f10 100644
--- a/drivers/staging/rtl8187se/ieee80211/dot11d.h
+++ b/drivers/staging/rtl8187se/ieee80211/dot11d.h
@@ -3,9 +3,9 @@
#include "ieee80211.h"
-//#define ENABLE_DOT11D
+/* #define ENABLE_DOT11D */
-//#define DOT11D_MAX_CHNL_NUM 83
+/* #define DOT11D_MAX_CHNL_NUM 83 */
typedef struct _CHNL_TXPOWER_TRIPLE {
u8 FirstChnl;
@@ -20,18 +20,18 @@ typedef enum _DOT11D_STATE {
}DOT11D_STATE;
typedef struct _RT_DOT11D_INFO {
- //DECLARE_RT_OBJECT(RT_DOT11D_INFO);
+ /* DECLARE_RT_OBJECT(RT_DOT12D_INFO); */
- bool bEnabled; // dot11MultiDomainCapabilityEnabled
+ bool bEnabled; /* dot11MultiDomainCapabilityEnabled */
- u16 CountryIeLen; // > 0 if CountryIeBuf[] contains valid country information element.
+ u16 CountryIeLen; /* > 0 if CountryIeBuf[] contains valid country information element. */
u8 CountryIeBuf[MAX_IE_LEN];
- u8 CountryIeSrcAddr[6]; // Source AP of the country IE.
+ u8 CountryIeSrcAddr[6]; /* Source AP of the country IE. */
u8 CountryIeWatchdog;
- u8 channel_map[MAX_CHANNEL_NUMBER+1]; //!!!Value 0: Invalid, 1: Valid (active scan), 2: Valid (passive scan)
- //u8 ChnlListLen; // #Bytes valid in ChnlList[].
- //u8 ChnlList[DOT11D_MAX_CHNL_NUM];
+ u8 channel_map[MAX_CHANNEL_NUMBER+1]; /* !!!Value 0: Invalid, 1: Valid (active scan), 2: Valid (passive scan) */
+ /* u8 ChnlListLen; // #Bytes valid in ChnlList[]. */
+ /* u8 ChnlList[DOT11D_MAX_CHNL_NUM]; */
u8 MaxTxPwrDbmList[MAX_CHANNEL_NUMBER+1];
DOT11D_STATE State;
@@ -58,43 +58,13 @@ typedef struct _RT_DOT11D_INFO {
#define IS_DOT11D_STATE_DONE(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->State == DOT11D_STATE_DONE)
+void Dot11d_Init(struct ieee80211_device *dev);
+void Dot11d_Reset(struct ieee80211_device *dev);
+void Dot11d_UpdateCountryIe(struct ieee80211_device *dev, u8 *pTaddr,
+ u16 CoutryIeLen, u8 *pCoutryIe);
+u8 DOT11D_GetMaxTxPwrInDbm(struct ieee80211_device *dev, u8 Channel);
+void DOT11D_ScanComplete(struct ieee80211_device *dev);
+int IsLegalChannel(struct ieee80211_device *dev, u8 channel);
+int ToLegalChannel(struct ieee80211_device *dev, u8 channel);
-void
-Dot11d_Init(
- struct ieee80211_device *dev
- );
-
-void
-Dot11d_Reset(
- struct ieee80211_device *dev
- );
-
-void
-Dot11d_UpdateCountryIe(
- struct ieee80211_device *dev,
- u8 * pTaddr,
- u16 CoutryIeLen,
- u8 * pCoutryIe
- );
-
-u8
-DOT11D_GetMaxTxPwrInDbm(
- struct ieee80211_device *dev,
- u8 Channel
- );
-
-void
-DOT11D_ScanComplete(
- struct ieee80211_device * dev
- );
-
-int IsLegalChannel(
- struct ieee80211_device * dev,
- u8 channel
-);
-
-int ToLegalChannel(
- struct ieee80211_device * dev,
- u8 channel
-);
-#endif // #ifndef __INC_DOT11D_H
+#endif /* #ifndef __INC_DOT11D_H */
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
index 7f015499cfae..09ffd9bc8991 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
@@ -90,6 +90,9 @@
#define IEEE_CRYPT_ALG_NAME_LEN 16
+extern int ieee80211_crypto_tkip_init(void);
+extern void ieee80211_crypto_tkip_exit(void);
+
//by amy for ps
typedef struct ieee_param {
u32 cmd;
@@ -1237,7 +1240,8 @@ static inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
return 1;
}
-static inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mode)
+static inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee,
+ int mode)
{
/*
* It is possible for both access points and our device to support
@@ -1300,19 +1304,16 @@ extern int ieee80211_set_encryption(struct ieee80211_device *ieee);
/* ieee80211_tx.c */
-extern int ieee80211_encrypt_fragment(
- struct ieee80211_device *ieee,
- struct sk_buff *frag,
- int hdr_len);
+extern int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
+ struct sk_buff *frag, int hdr_len);
-extern int ieee80211_rtl_xmit(struct sk_buff *skb,
- struct net_device *dev);
+extern int ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev);
extern void ieee80211_txb_free(struct ieee80211_txb *);
/* ieee80211_rx.c */
extern int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats);
+ struct ieee80211_rx_stats *rx_stats);
extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
struct ieee80211_hdr_4addr *header,
struct ieee80211_rx_stats *stats);
@@ -1328,25 +1329,28 @@ extern int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *key);
extern int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data* wrqu, char *extra);
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- struct iw_param *data, char *extra);
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra);
int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len);
/* ieee80211_softmac.c */
extern short ieee80211_is_54g(const struct ieee80211_network *net);
extern short ieee80211_is_shortslot(const struct ieee80211_network *net);
-extern int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats, u16 type,
- u16 stype);
-extern void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee80211_network *net);
-
-extern void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *ieee);
+extern int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee,
+ struct sk_buff *skb,
+ struct ieee80211_rx_stats *rx_stats,
+ u16 type, u16 stype);
+extern void ieee80211_softmac_new_net(struct ieee80211_device *ieee,
+ struct ieee80211_network *net);
+
+extern void ieee80211_softmac_xmit(struct ieee80211_txb *txb,
+ struct ieee80211_device *ieee);
extern void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee);
extern void ieee80211_start_bss(struct ieee80211_device *ieee);
extern void ieee80211_start_master_bss(struct ieee80211_device *ieee);
@@ -1368,16 +1372,17 @@ extern void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee);
extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
-extern int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_point *p);
+extern int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee,
+ struct iw_point *p);
extern void notify_wx_assoc_event(struct ieee80211_device *ieee);
extern void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success);
-extern void SendDisassociation(struct ieee80211_device *ieee,u8* asSta,u8 asRsn);
+extern void SendDisassociation(struct ieee80211_device *ieee, u8 *asSta,
+ u8 asRsn);
extern void ieee80211_rtl_start_scan(struct ieee80211_device *ieee);
//Add for RF power on power off by lizhaoming 080512
-extern void SendDisassociation(struct ieee80211_device *ieee,
- u8* asSta,
- u8 asRsn);
+extern void SendDisassociation(struct ieee80211_device *ieee, u8 *asSta,
+ u8 asRsn);
/* ieee80211_crypt_ccmp&tkip&wep.c */
extern void ieee80211_tkip_null(void);
@@ -1386,64 +1391,72 @@ extern void ieee80211_ccmp_null(void);
/* ieee80211_softmac_wx.c */
extern int ieee80211_wx_get_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *ext);
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *ext);
extern int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *awrq,
- char *extra);
+ struct iw_request_info *info,
+ union iwreq_data *awrq,
+ char *extra);
-extern int ieee80211_wx_get_essid(struct ieee80211_device *ieee, struct iw_request_info *a,union iwreq_data *wrqu,char *b);
+extern int ieee80211_wx_get_essid(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
extern int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
extern int ieee80211_wx_get_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+extern int ieee80211_wx_set_mode(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
-extern int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+extern int ieee80211_wx_set_scan(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
extern int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra);
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_get_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+extern int ieee80211_wx_get_mode(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
-extern int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+extern int ieee80211_wx_set_freq(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
-extern int ieee80211_wx_get_freq(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+extern int ieee80211_wx_get_freq(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
extern void ieee80211_wx_sync_scan_wq(struct work_struct *work);
extern int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
extern int ieee80211_wx_get_name(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-extern int ieee80211_wx_set_power(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra);
+extern int ieee80211_wx_set_power(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+
extern int ieee80211_wx_get_power(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
extern void ieee80211_softmac_ips_scan_syncro(struct ieee80211_device *ieee);
-extern void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee, short pwr);
+extern void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee,
+ short pwr);
extern const long ieee80211_wlan_frequencies[];
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
index 694eae3d4fda..101f0c0cdb0a 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
@@ -15,7 +15,6 @@
//#include <linux/config.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -39,8 +38,7 @@ struct ieee80211_crypto {
static struct ieee80211_crypto *hcrypt;
-void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee,
- int force)
+void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force)
{
struct list_head *ptr, *n;
struct ieee80211_crypt_data *entry;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
index f5949e89e5c2..c8013d373a77 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
@@ -11,9 +11,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-//#include <linux/config.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/skbuff.h>
@@ -61,7 +59,7 @@ struct ieee80211_ccmp_data {
};
void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm,
- const u8 pt[16], u8 ct[16])
+ const u8 pt[16], u8 ct[16])
{
crypto_cipher_encrypt_one((void *)tfm, ct, pt);
}
@@ -130,7 +128,6 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
(WLAN_FC_GET_STYPE(fc) & 0x08));
*/
- // fixed by David :2006.9.6
qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
(WLAN_FC_GET_STYPE(fc) & 0x80));
aad_len = 22;
@@ -212,7 +209,6 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos = skb_push(skb, CCMP_HDR_LEN);
memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
pos += hdr_len;
-// mic = skb_put(skb, CCMP_MIC_LEN);
i = CCMP_PN_LEN - 1;
while (i >= 0) {
@@ -232,7 +228,6 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = key->tx_pn[0];
hdr = (struct ieee80211_hdr_4addr *)skb->data;
- //mic is moved to here by john
mic = skb_put(skb, CCMP_MIC_LEN);
ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
@@ -416,9 +411,8 @@ static int ieee80211_ccmp_get_key(void *key, int len, u8 *seq, void *priv)
static char *ieee80211_ccmp_print_stats(char *p, void *priv)
{
struct ieee80211_ccmp_data *ccmp = priv;
- p += sprintf(p, "key[%d] alg=CCMP key_set=%d "
- "tx_pn=%pm rx_pn=%pm "
- "format_errors=%d replays=%d decrypt_errors=%d\n",
+ p += sprintf(p,
+ "key[%d] alg=CCMP key_set=%d tx_pn=%pm rx_pn=%pm format_errors=%d replays=%d decrypt_errors=%d\n",
ccmp->key_idx, ccmp->key_set,
ccmp->tx_pn, ccmp->rx_pn,
ccmp->dot11RSNAStatsCCMPFormatErrors,
@@ -430,7 +424,6 @@ static char *ieee80211_ccmp_print_stats(char *p, void *priv)
void ieee80211_ccmp_null(void)
{
-// printk("============>%s()\n", __func__);
return;
}
static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = {
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
index da24e430ca13..c5907968e1a7 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
@@ -9,9 +9,7 @@
* more details.
*/
-//#include <linux/config.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/skbuff.h>
@@ -65,7 +63,7 @@ struct ieee80211_tkip_data {
u8 rx_hdr[16], tx_hdr[16];
};
-static void * ieee80211_tkip_init(int key_idx)
+static void *ieee80211_tkip_init(int key_idx)
{
struct ieee80211_tkip_data *priv;
@@ -304,8 +302,8 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
- struct ieee80211_tkip_data *tkey = priv;
- struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4};
+ struct ieee80211_tkip_data *tkey = priv;
+ struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4};
int len;
u8 *pos;
struct ieee80211_hdr_4addr *hdr;
@@ -467,27 +465,27 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return keyidx;
}
-static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr,
- u8 * data, size_t data_len, u8 * mic)
+static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr,
+ u8 *data, size_t data_len, u8 *mic)
{
- struct hash_desc desc;
- struct scatterlist sg[2];
+ struct hash_desc desc;
+ struct scatterlist sg[2];
- if (tfm_michael == NULL) {
- printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
- return -1;
- }
+ if (tfm_michael == NULL) {
+ printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
+ return -1;
+ }
sg_init_table(sg, 2);
sg_set_buf(&sg[0], hdr, 16);
sg_set_buf(&sg[1], data, data_len);
- if (crypto_hash_setkey(tfm_michael, key, 8))
- return -1;
+ if (crypto_hash_setkey(tfm_michael, key, 8))
+ return -1;
- desc.tfm = tfm_michael;
- desc.flags = 0;
- return crypto_hash_digest(&desc, sg, data_len + 16, mic);
+ desc.tfm = tfm_michael;
+ desc.flags = 0;
+ return crypto_hash_digest(&desc, sg, data_len + 16, mic);
}
static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
@@ -521,7 +519,8 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
}
-static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv)
+static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len,
+ void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
u8 *pos;
@@ -538,12 +537,9 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *pri
michael_mic_hdr(skb, tkey->tx_hdr);
- // { david, 2006.9.1
- // fix the wpa process with wmm enabled.
if(IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) {
tkey->tx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
}
- // }
pos = skb_put(skb, 8);
if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr,
@@ -554,8 +550,8 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *pri
}
static void ieee80211_michael_mic_failure(struct net_device *dev,
- struct ieee80211_hdr_4addr *hdr,
- int keyidx)
+ struct ieee80211_hdr_4addr *hdr,
+ int keyidx)
{
union iwreq_data wrqu;
struct iw_michaelmicfailure ev;
@@ -575,7 +571,7 @@ static void ieee80211_michael_mic_failure(struct net_device *dev,
}
static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
- int hdr_len, void *priv)
+ int hdr_len, void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
u8 mic[8];
@@ -587,12 +583,9 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
return -1;
michael_mic_hdr(skb, tkey->rx_hdr);
- // { david, 2006.9.1
- // fix the wpa process with wmm enabled.
if(IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) {
tkey->rx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
}
- // }
if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr,
skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
@@ -688,7 +681,7 @@ static int ieee80211_tkip_get_key(void *key, int len, u8 *seq, void *priv)
}
-static char * ieee80211_tkip_print_stats(char *p, void *priv)
+static char *ieee80211_tkip_print_stats(char *p, void *priv)
{
struct ieee80211_tkip_data *tkip = priv;
p += sprintf(p, "key[%d] alg=TKIP key_set=%d "
@@ -746,6 +739,4 @@ void ieee80211_crypto_tkip_exit(void)
void ieee80211_tkip_null(void)
{
-// printk("============>%s()\n", __func__);
- return;
}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
index bba77141d9a3..f114f9a33e17 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
@@ -13,7 +13,6 @@
//#include <linux/config.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/skbuff.h>
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
index 304579096562..b522b57a2691 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
@@ -337,8 +337,9 @@ ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb,
/* Called only as a tasklet (software IRQ), by ieee80211_rx */
static inline int
-ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, struct sk_buff *skb,
- int keyidx, struct ieee80211_crypt_data *crypt)
+ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee,
+ struct sk_buff *skb, int keyidx,
+ struct ieee80211_crypt_data *crypt)
{
struct ieee80211_hdr_4addr *hdr;
int res, hdrlen;
@@ -366,7 +367,7 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, struct sk_buff *s
/* this function is stolen from ipw2200 driver*/
#define IEEE_PACKET_RETRY_TIME (5*HZ)
static int is_duplicate_packet(struct ieee80211_device *ieee,
- struct ieee80211_hdr_4addr *header)
+ struct ieee80211_hdr_4addr *header)
{
u16 fc = le16_to_cpu(header->frame_ctl);
u16 sc = le16_to_cpu(header->seq_ctl);
@@ -467,7 +468,7 @@ drop:
* IEEE 802.11 format, i.e., in the format it was sent over air.
* This function is called only as a tasklet (software IRQ). */
int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats)
+ struct ieee80211_rx_stats *rx_stats)
{
struct net_device *dev = ieee->dev;
//struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
@@ -794,9 +795,7 @@ static inline int ieee80211_is_ofdm_rate(u8 rate)
return 0;
}
-static inline int ieee80211_SignalStrengthTranslate(
- int CurrSS
- )
+static inline int ieee80211_SignalStrengthTranslate(int CurrSS)
{
int RetSS;
@@ -831,12 +830,10 @@ static inline int ieee80211_SignalStrengthTranslate(
return RetSS;
}
-static inline void ieee80211_extract_country_ie(
- struct ieee80211_device *ieee,
- struct ieee80211_info_element *info_element,
- struct ieee80211_network *network,
- u8 *addr2
-)
+static inline void
+ieee80211_extract_country_ie(struct ieee80211_device *ieee,
+ struct ieee80211_info_element *info_element,
+ struct ieee80211_network *network, u8 *addr2)
{
if (IS_DOT11D_ENABLE(ieee)) {
if (info_element->len != 0) {
@@ -858,10 +855,8 @@ static inline void ieee80211_extract_country_ie(
}
-static int
-ieee80211_TranslateToDbm(
- unsigned char SignalStrengthIndex // 0-100 index.
- )
+/* SignalStrengthIndex is 0-100 */
+static int ieee80211_TranslateToDbm(unsigned char SignalStrengthIndex)
{
unsigned char SignalPower; // in dBm.
@@ -1197,7 +1192,7 @@ static inline int is_same_network(struct ieee80211_network *src,
}
inline void update_network(struct ieee80211_network *dst,
- struct ieee80211_network *src)
+ struct ieee80211_network *src)
{
unsigned char quality = src->stats.signalstrength;
unsigned char signal = 0;
@@ -1281,10 +1276,10 @@ inline void update_network(struct ieee80211_network *dst,
}
-inline void ieee80211_process_probe_response(
- struct ieee80211_device *ieee,
- struct ieee80211_probe_response *beacon,
- struct ieee80211_rx_stats *stats)
+inline void
+ieee80211_process_probe_response(struct ieee80211_device *ieee,
+ struct ieee80211_probe_response *beacon,
+ struct ieee80211_rx_stats *stats)
{
struct ieee80211_network network;
struct ieee80211_network *target;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index 029070603f66..c27392d8b640 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -20,17 +20,17 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/etherdevice.h>
#include "dot11d.h"
u8 rsn_authen_cipher_suite[16][4] = {
- {0x00,0x0F,0xAC,0x00}, //Use group key, //Reserved
- {0x00,0x0F,0xAC,0x01}, //WEP-40 //RSNA default
- {0x00,0x0F,0xAC,0x02}, //TKIP //NONE //{used just as default}
- {0x00,0x0F,0xAC,0x03}, //WRAP-historical
- {0x00,0x0F,0xAC,0x04}, //CCMP
- {0x00,0x0F,0xAC,0x05}, //WEP-104
+ {0x00, 0x0F, 0xAC, 0x00}, //Use group key, //Reserved
+ {0x00, 0x0F, 0xAC, 0x01}, //WEP-40 //RSNA default
+ {0x00, 0x0F, 0xAC, 0x02}, //TKIP //NONE //{used just as default}
+ {0x00, 0x0F, 0xAC, 0x03}, //WRAP-historical
+ {0x00, 0x0F, 0xAC, 0x04}, //CCMP
+ {0x00, 0x0F, 0xAC, 0x05}, //WEP-104
};
short ieee80211_is_54g(const struct ieee80211_network *net)
@@ -47,7 +47,7 @@ short ieee80211_is_shortslot(const struct ieee80211_network *net)
* tag and the EXTENDED RATE MFIE tag if needed.
* It encludes two bytes per tag for the tag itself and its len
*/
-unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
+static unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
{
unsigned int rate_len = 0;
@@ -65,7 +65,7 @@ unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
* Then it updates the pointer so that
* it points after the new MFIE tag added.
*/
-void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p)
+static void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
@@ -82,7 +82,7 @@ void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p)
*tag_p = tag;
}
-void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
+static void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
@@ -106,7 +106,8 @@ void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
}
-void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p) {
+static void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p)
+{
u8 *tag = *tag_p;
*tag++ = MFIE_TYPE_GENERIC; //0
@@ -118,35 +119,33 @@ void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p) {
*tag++ = 0x00;
*tag++ = 0x01;
#ifdef SUPPORT_USPD
- if(ieee->current_network.wmm_info & 0x80) {
+ if (ieee->current_network.wmm_info & 0x80)
*tag++ = 0x0f|MAX_SP_Len;
- } else {
+ else
*tag++ = MAX_SP_Len;
- }
#else
*tag++ = MAX_SP_Len;
#endif
*tag_p = tag;
}
-void ieee80211_TURBO_Info(struct ieee80211_device *ieee, u8 **tag_p) {
+static void ieee80211_TURBO_Info(struct ieee80211_device *ieee, u8 **tag_p)
+{
u8 *tag = *tag_p;
-
- *tag++ = MFIE_TYPE_GENERIC; //0
- *tag++ = 7;
- *tag++ = 0x00;
- *tag++ = 0xe0;
- *tag++ = 0x4c;
- *tag++ = 0x01;//5
- *tag++ = 0x02;
- *tag++ = 0x11;
+ *tag++ = MFIE_TYPE_GENERIC; /* 0 */
+ *tag++ = 7;
+ *tag++ = 0x00;
+ *tag++ = 0xe0;
+ *tag++ = 0x4c;
+ *tag++ = 0x01; /* 5 */
+ *tag++ = 0x02;
+ *tag++ = 0x11;
*tag++ = 0x00;
-
*tag_p = tag;
printk(KERN_ALERT "This is enable turbo mode IE process\n");
}
-void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
+static void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
{
int nh;
nh = (ieee->mgmt_queue_head +1) % MGMT_QUEUE_NUM;
@@ -164,7 +163,7 @@ void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
//return 0;
}
-struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
+static struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
{
struct sk_buff *ret;
@@ -179,7 +178,7 @@ struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
return ret;
}
-void init_mgmt_queue(struct ieee80211_device *ieee)
+static void init_mgmt_queue(struct ieee80211_device *ieee)
{
ieee->mgmt_queue_tail = ieee->mgmt_queue_head = 0;
}
@@ -187,7 +186,8 @@ void init_mgmt_queue(struct ieee80211_device *ieee)
void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl);
-inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee)
+inline void softmac_mgmt_xmit(struct sk_buff *skb,
+ struct ieee80211_device *ieee)
{
unsigned long flags;
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
@@ -238,7 +238,8 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
}
-inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee)
+inline void softmac_ps_mgmt_xmit(struct sk_buff *skb,
+ struct ieee80211_device *ieee)
{
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
@@ -276,10 +277,9 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
// dev_kfree_skb_any(skb);//edit by thomas
}
//by amy for power save
-inline struct sk_buff *ieee80211_disassociate_skb(
- struct ieee80211_network *beacon,
- struct ieee80211_device *ieee,
- u8 asRsn)
+inline struct sk_buff *
+ieee80211_disassociate_skb(struct ieee80211_network *beacon,
+ struct ieee80211_device *ieee, u8 asRsn)
{
struct sk_buff *skb;
struct ieee80211_disassoc_frame *disass;
@@ -299,12 +299,7 @@ inline struct sk_buff *ieee80211_disassociate_skb(
disass->reasoncode = asRsn;
return skb;
}
-void
-SendDisassociation(
- struct ieee80211_device *ieee,
- u8* asSta,
- u8 asRsn
-)
+void SendDisassociation(struct ieee80211_device *ieee, u8 *asSta, u8 asRsn)
{
struct ieee80211_network *beacon = &ieee->current_network;
struct sk_buff *skb;
@@ -379,7 +374,7 @@ void ext_ieee80211_send_beacon_wq(struct ieee80211_device *ieee)
//spin_unlock_irqrestore(&ieee->beacon_lock,flags);
}
-void ieee80211_send_beacon(struct ieee80211_device *ieee)
+static void ieee80211_send_beacon(struct ieee80211_device *ieee)
{
struct sk_buff *skb;
@@ -404,7 +399,7 @@ void ieee80211_send_beacon(struct ieee80211_device *ieee)
}
-void ieee80211_send_beacon_cb(unsigned long _ieee)
+static void ieee80211_send_beacon_cb(unsigned long _ieee)
{
struct ieee80211_device *ieee =
(struct ieee80211_device *) _ieee;
@@ -415,7 +410,7 @@ void ieee80211_send_beacon_cb(unsigned long _ieee)
spin_unlock_irqrestore(&ieee->beacon_lock, flags);
}
-void ieee80211_send_probe(struct ieee80211_device *ieee)
+static void ieee80211_send_probe(struct ieee80211_device *ieee)
{
struct sk_buff *skb;
@@ -427,7 +422,7 @@ void ieee80211_send_probe(struct ieee80211_device *ieee)
}
}
-void ieee80211_send_probe_requests(struct ieee80211_device *ieee)
+static void ieee80211_send_probe_requests(struct ieee80211_device *ieee)
{
if (ieee->active_scan && (ieee->softmac_features & IEEE_SOFTMAC_PROBERQ)){
ieee80211_send_probe(ieee);
@@ -438,7 +433,7 @@ void ieee80211_send_probe_requests(struct ieee80211_device *ieee)
/* this performs syncro scan blocking the caller until all channels
* in the allowed channel map has been checked.
*/
-void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
+static void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
{
short ch = 0;
u8 channel_map[MAX_CHANNEL_NUMBER+1];
@@ -576,7 +571,7 @@ out:
DOT11D_ScanComplete(ieee);
}
-void ieee80211_softmac_scan_wq(struct work_struct *work)
+static void ieee80211_softmac_scan_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq);
@@ -619,7 +614,7 @@ out:
return;
}
-void ieee80211_beacons_start(struct ieee80211_device *ieee)
+static void ieee80211_beacons_start(struct ieee80211_device *ieee)
{
unsigned long flags;
@@ -631,7 +626,7 @@ void ieee80211_beacons_start(struct ieee80211_device *ieee)
spin_unlock_irqrestore(&ieee->beacon_lock,flags);
}
-void ieee80211_beacons_stop(struct ieee80211_device *ieee)
+static void ieee80211_beacons_stop(struct ieee80211_device *ieee)
{
unsigned long flags;
@@ -663,7 +658,7 @@ void ieee80211_start_send_beacons(struct ieee80211_device *ieee)
}
-void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
+static void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
{
// unsigned long flags;
@@ -735,8 +730,9 @@ void ieee80211_start_scan_syncro(struct ieee80211_device *ieee)
}
-inline struct sk_buff *ieee80211_authentication_req(struct ieee80211_network *beacon,
- struct ieee80211_device *ieee, int challengelen)
+inline struct sk_buff *
+ieee80211_authentication_req(struct ieee80211_network *beacon,
+ struct ieee80211_device *ieee, int challengelen)
{
struct sk_buff *skb;
struct ieee80211_authentication *auth;
@@ -768,7 +764,8 @@ inline struct sk_buff *ieee80211_authentication_req(struct ieee80211_network *be
}
-static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *dest)
+static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee,
+ u8 *dest)
{
u8 *tag;
int beacon_size;
@@ -969,7 +966,7 @@ static struct sk_buff *ieee80211_auth_resp(struct ieee80211_device *ieee,
}
-struct sk_buff* ieee80211_null_func(struct ieee80211_device *ieee,short pwr)
+static struct sk_buff *ieee80211_null_func(struct ieee80211_device *ieee, short pwr)
{
struct sk_buff *skb;
struct ieee80211_hdr_3addr* hdr;
@@ -995,7 +992,7 @@ struct sk_buff* ieee80211_null_func(struct ieee80211_device *ieee,short pwr)
}
-void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8* dest)
+static void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8 *dest)
{
struct sk_buff *buf = ieee80211_assoc_resp(ieee, dest);
@@ -1006,7 +1003,7 @@ void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8* dest)
}
-void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s, u8* dest)
+static void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s, u8 *dest)
{
struct sk_buff *buf = ieee80211_auth_resp(ieee, s, dest);
@@ -1017,7 +1014,7 @@ void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s, u8* dest)
}
-void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest)
+static void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest)
{
struct sk_buff *buf = ieee80211_probe_resp(ieee, dest);
@@ -1029,7 +1026,9 @@ void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest)
}
-inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beacon,struct ieee80211_device *ieee)
+inline struct sk_buff *
+ieee80211_association_req(struct ieee80211_network *beacon,
+ struct ieee80211_device *ieee)
{
struct sk_buff *skb;
//unsigned long flags;
@@ -1164,13 +1163,13 @@ void ieee80211_associate_abort(struct ieee80211_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
}
-void ieee80211_associate_abort_cb(unsigned long dev)
+static void ieee80211_associate_abort_cb(unsigned long dev)
{
ieee80211_associate_abort((struct ieee80211_device *) dev);
}
-void ieee80211_associate_step1(struct ieee80211_device *ieee)
+static void ieee80211_associate_step1(struct ieee80211_device *ieee)
{
struct ieee80211_network *beacon = &ieee->current_network;
struct sk_buff *skb;
@@ -1200,7 +1199,8 @@ void ieee80211_associate_step1(struct ieee80211_device *ieee)
}
}
-void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen)
+static void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge,
+ int chlen)
{
u8 *c;
struct sk_buff *skb;
@@ -1234,7 +1234,7 @@ void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge,
kfree(challenge);
}
-void ieee80211_associate_step2(struct ieee80211_device *ieee)
+static void ieee80211_associate_step2(struct ieee80211_device *ieee)
{
struct sk_buff* skb;
struct ieee80211_network *beacon = &ieee->current_network;
@@ -1256,7 +1256,7 @@ void ieee80211_associate_step2(struct ieee80211_device *ieee)
}
}
-void ieee80211_associate_complete_wq(struct work_struct *work)
+static void ieee80211_associate_complete_wq(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_complete_wq);
@@ -1277,7 +1277,7 @@ void ieee80211_associate_complete_wq(struct work_struct *work)
netif_carrier_on(ieee->dev);
}
-void ieee80211_associate_complete(struct ieee80211_device *ieee)
+static void ieee80211_associate_complete(struct ieee80211_device *ieee)
{
int i;
del_timer_sync(&ieee->associate_timer);
@@ -1291,7 +1291,7 @@ void ieee80211_associate_complete(struct ieee80211_device *ieee)
queue_work(ieee->wq, &ieee->associate_complete_wq);
}
-void ieee80211_associate_procedure_wq(struct work_struct *work)
+static void ieee80211_associate_procedure_wq(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_procedure_wq);
@@ -1310,7 +1310,8 @@ void ieee80211_associate_procedure_wq(struct work_struct *work)
up(&ieee->wx_sem);
}
-inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee80211_network *net)
+inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee,
+ struct ieee80211_network *net)
{
u8 tmp_ssid[IW_ESSID_MAX_SIZE+1];
int tmp_ssid_len = 0;
@@ -1423,7 +1424,7 @@ void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee)
}
-static inline u16 auth_parse(struct sk_buff *skb, u8** challenge, int *chlen)
+static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
{
struct ieee80211_authentication *a;
u8 *t;
@@ -1449,7 +1450,7 @@ static inline u16 auth_parse(struct sk_buff *skb, u8** challenge, int *chlen)
}
-int auth_rq_parse(struct sk_buff *skb,u8* dest)
+static int auth_rq_parse(struct sk_buff *skb, u8 *dest)
{
struct ieee80211_authentication *a;
@@ -1467,7 +1468,8 @@ int auth_rq_parse(struct sk_buff *skb,u8* dest)
return WLAN_STATUS_SUCCESS;
}
-static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb, u8 *src)
+static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb,
+ u8 *src)
{
u8 *tag;
u8 *skbend;
@@ -1505,7 +1507,7 @@ static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb,
}
-int assoc_rq_parse(struct sk_buff *skb,u8* dest)
+static int assoc_rq_parse(struct sk_buff *skb, u8 *dest)
{
struct ieee80211_assoc_request_frame *a;
@@ -1536,8 +1538,8 @@ static inline u16 assoc_parse(struct sk_buff *skb, int *aid)
return le16_to_cpu(a->status);
}
-static inline void
-ieee80211_rx_probe_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
+static inline void ieee80211_rx_probe_rq(struct ieee80211_device *ieee,
+ struct sk_buff *skb)
{
u8 dest[ETH_ALEN];
@@ -1551,8 +1553,8 @@ ieee80211_rx_probe_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
}
}
-inline void
-ieee80211_rx_auth_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
+inline void ieee80211_rx_auth_rq(struct ieee80211_device *ieee,
+ struct sk_buff *skb)
{
u8 dest[ETH_ALEN];
int status;
@@ -1595,7 +1597,8 @@ void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee, short pwr)
}
-short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h, u32 *time_l)
+static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
+ u32 *time_l)
{
int timeout = 0;
@@ -1648,7 +1651,7 @@ short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h, u32 *ti
}
-inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
+static inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
{
u32 th,tl;
@@ -1770,10 +1773,10 @@ void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success)
spin_unlock_irqrestore(&ieee->lock, flags);
}
-inline int
-ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats, u16 type,
- u16 stype)
+inline int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee,
+ struct sk_buff *skb,
+ struct ieee80211_rx_stats *rx_stats,
+ u16 type, u16 stype)
{
struct ieee80211_hdr_3addr *header = (struct ieee80211_hdr_3addr *) skb->data;
u16 errcode;
@@ -1976,7 +1979,8 @@ associate_complete:
* to the driver later, when it wakes the queue.
*/
-void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *ieee)
+void ieee80211_softmac_xmit(struct ieee80211_txb *txb,
+ struct ieee80211_device *ieee)
{
@@ -2013,7 +2017,7 @@ void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *
}
/* called with ieee->lock acquired */
-void ieee80211_resume_tx(struct ieee80211_device *ieee)
+static void ieee80211_resume_tx(struct ieee80211_device *ieee)
{
int i;
for(i = ieee->tx_pending.frag; i < ieee->tx_pending.txb->nr_frags; i++) {
@@ -2143,7 +2147,7 @@ void ieee80211_start_master_bss(struct ieee80211_device *ieee)
netif_carrier_on(ieee->dev);
}
-void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
+static void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
{
if(ieee->raw_tx){
@@ -2154,7 +2158,7 @@ void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
}
}
-void ieee80211_start_ibss_wq(struct work_struct *work)
+static void ieee80211_start_ibss_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq);
@@ -2327,7 +2331,7 @@ void ieee80211_disassociate(struct ieee80211_device *ieee)
ieee->state = IEEE80211_NOLINK;
}
-void ieee80211_associate_retry_wq(struct work_struct *work)
+static void ieee80211_associate_retry_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
@@ -2619,7 +2623,8 @@ static int ieee80211_wpa_enable(struct ieee80211_device *ieee, int value)
}
-void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee, char *wpa_ie, int wpa_ie_len)
+static void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee, char *wpa_ie,
+ int wpa_ie_len)
{
/* make sure WPA is enabled */
ieee80211_wpa_enable(ieee, 1);
@@ -2628,7 +2633,8 @@ void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee, char *wpa_ie, int
}
-static int ieee80211_wpa_mlme(struct ieee80211_device *ieee, int command, int reason)
+static int ieee80211_wpa_mlme(struct ieee80211_device *ieee, int command,
+ int reason)
{
int ret = 0;
@@ -2652,7 +2658,7 @@ static int ieee80211_wpa_mlme(struct ieee80211_device *ieee, int command, int re
static int ieee80211_wpa_set_wpa_ie(struct ieee80211_device *ieee,
- struct ieee_param *param, int plen)
+ struct ieee_param *param, int plen)
{
u8 *buf;
@@ -2706,7 +2712,8 @@ static int ieee80211_wpa_set_auth_algs(struct ieee80211_device *ieee, int value)
return ret;
}
-static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name, u32 value)
+static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name,
+ u32 value)
{
int ret=0;
unsigned long flags;
@@ -2784,7 +2791,7 @@ static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name, u32 v
/* implementation borrowed from hostap driver */
static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
- struct ieee_param *param, int param_len)
+ struct ieee_param *param, int param_len)
{
int ret = 0;
@@ -2931,7 +2938,8 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
return ret;
}
-int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_point *p)
+int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee,
+ struct iw_point *p)
{
struct ieee_param *param;
int ret=0;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
index e5282068e3de..46f35644126c 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
@@ -28,8 +28,9 @@ const long ieee80211_wlan_frequencies[] = {
};
-int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
+int ieee80211_wx_set_freq(struct ieee80211_device *ieee,
+ struct iw_request_info *a, union iwreq_data *wrqu,
+ char *b)
{
int ret;
struct iw_freq *fwrq = &wrqu->freq;
@@ -82,8 +83,8 @@ out:
int ieee80211_wx_get_freq(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
+ struct iw_request_info *a, union iwreq_data *wrqu,
+ char *b)
{
struct iw_freq *fwrq = &wrqu->freq;
@@ -97,8 +98,8 @@ int ieee80211_wx_get_freq(struct ieee80211_device *ieee,
}
int ieee80211_wx_get_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *extra)
{
unsigned long flags;
@@ -126,8 +127,7 @@ int ieee80211_wx_get_wap(struct ieee80211_device *ieee,
int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *awrq,
+ struct iw_request_info *info, union iwreq_data *awrq,
char *extra)
{
@@ -174,8 +174,9 @@ out:
return ret;
}
-int ieee80211_wx_get_essid(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
+int ieee80211_wx_get_essid(struct ieee80211_device *ieee,
+ struct iw_request_info *a, union iwreq_data *wrqu,
+ char *b)
{
int len, ret = 0;
unsigned long flags;
@@ -211,8 +212,8 @@ out:
}
int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *extra)
{
u32 target_rate = wrqu->bitrate.value;
@@ -230,8 +231,8 @@ int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
int ieee80211_wx_get_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *extra)
{
wrqu->bitrate.value = ieee->rate * 100000;
@@ -239,8 +240,9 @@ int ieee80211_wx_get_rate(struct ieee80211_device *ieee,
return 0;
}
-int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
+int ieee80211_wx_set_mode(struct ieee80211_device *ieee,
+ struct iw_request_info *a, union iwreq_data *wrqu,
+ char *b)
{
ieee->sync_scan_hurryup = 1;
@@ -305,8 +307,9 @@ void ieee80211_wx_sync_scan_wq(struct work_struct *work)
}
-int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
+int ieee80211_wx_set_scan(struct ieee80211_device *ieee,
+ struct iw_request_info *a, union iwreq_data *wrqu,
+ char *b)
{
int ret = 0;
@@ -333,8 +336,8 @@ out:
}
int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *a, union iwreq_data *wrqu,
+ char *extra)
{
int ret = 0, len;
@@ -395,8 +398,9 @@ out:
return ret;
}
-int ieee80211_wx_get_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
+int ieee80211_wx_get_mode(struct ieee80211_device *ieee,
+ struct iw_request_info *a, union iwreq_data *wrqu,
+ char *b)
{
wrqu->mode = ieee->iw_mode;
@@ -404,8 +408,8 @@ int ieee80211_wx_get_mode(struct ieee80211_device *ieee, struct iw_request_info
}
int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *extra)
{
int *parms = (int *)extra;
@@ -440,8 +444,8 @@ int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
}
int ieee80211_wx_get_name(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *extra)
{
strlcpy(wrqu->name, "802.11", IFNAMSIZ);
if (ieee->modulation & IEEE80211_CCK_MODULATION) {
@@ -464,8 +468,8 @@ int ieee80211_wx_get_name(struct ieee80211_device *ieee,
/* this is mostly stolen from hostap */
int ieee80211_wx_set_power(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *extra)
{
int ret = 0;
@@ -525,8 +529,8 @@ exit:
/* this is stolen from hostap */
int ieee80211_wx_get_power(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *extra)
{
int ret = 0;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
index f5a5219fe14d..0dc5ae414270 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
@@ -177,10 +177,8 @@ static inline int ieee80211_put_snap(u8 *data, u16 h_proto)
return SNAP_SIZE + sizeof(u16);
}
-int ieee80211_encrypt_fragment(
- struct ieee80211_device *ieee,
- struct sk_buff *frag,
- int hdr_len)
+int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
+ struct sk_buff *frag, int hdr_len)
{
struct ieee80211_crypt_data* crypt = ieee->crypt[ieee->tx_keyidx];
int res;
@@ -279,8 +277,8 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
* Classify the to-be send data packet
* Need to acquire the sent queue index.
*/
-static int
-ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
+static int ieee80211_classify(struct sk_buff *skb,
+ struct ieee80211_network *network)
{
struct ether_header *eh = (struct ether_header *)skb->data;
unsigned int wme_UP = 0;
@@ -310,8 +308,7 @@ ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
}
/* SKBs are added to the ieee->tx_queue. */
-int ieee80211_rtl_xmit(struct sk_buff *skb,
- struct net_device *dev)
+int ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ieee80211_device *ieee = netdev_priv(dev);
struct ieee80211_txb *txb = NULL;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
index 24d39ccc1337..3b7955f0ff98 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
@@ -633,8 +633,8 @@ done:
return ret;
}
int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct iw_mlme *mlme = (struct iw_mlme *) extra;
// printk("\ndkgadfslkdjgalskdf===============>%s(), cmd:%x\n", __func__, mlme->cmd);
@@ -653,8 +653,8 @@ int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
}
int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- struct iw_param *data, char *extra)
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra)
{
/*
struct ieee80211_security sec = {
diff --git a/drivers/staging/rtl8187se/r8180.h b/drivers/staging/rtl8187se/r8180.h
index d052f4a9a839..8999ec62450d 100644
--- a/drivers/staging/rtl8187se/r8180.h
+++ b/drivers/staging/rtl8187se/r8180.h
@@ -28,7 +28,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
//#include <linux/config.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/types.h>
@@ -639,20 +638,20 @@ typedef struct r8180_priv
((_ac) == WME_AC_BK) ? BK_PRIORITY : \
BE_PRIORITY)
-short rtl8180_tx(struct net_device *dev,u8* skbuf, int len,int priority,
- short morefrag,short fragdesc,int rate);
+short rtl8180_tx(struct net_device *dev, u8 *skbuf, int len, int priority,
+ short morefrag, short fragdesc, int rate);
u8 read_nic_byte(struct net_device *dev, int x);
u32 read_nic_dword(struct net_device *dev, int x);
-u16 read_nic_word(struct net_device *dev, int x) ;
-void write_nic_byte(struct net_device *dev, int x,u8 y);
-void write_nic_word(struct net_device *dev, int x,u16 y);
-void write_nic_dword(struct net_device *dev, int x,u32 y);
+u16 read_nic_word(struct net_device *dev, int x);
+void write_nic_byte(struct net_device *dev, int x, u8 y);
+void write_nic_word(struct net_device *dev, int x, u16 y);
+void write_nic_dword(struct net_device *dev, int x, u32 y);
void force_pci_posting(struct net_device *dev);
void rtl8180_rtx_disable(struct net_device *);
-void rtl8180_set_anaparam(struct net_device *dev,u32 a);
-void rtl8185_set_anaparam2(struct net_device *dev,u32 a);
+void rtl8180_set_anaparam(struct net_device *dev, u32 a);
+void rtl8185_set_anaparam2(struct net_device *dev, u32 a);
void rtl8180_set_hw_wep(struct net_device *dev);
void rtl8180_no_hw_wep(struct net_device *dev);
void rtl8180_update_msr(struct net_device *dev);
@@ -661,7 +660,7 @@ void rtl8180_beacon_rx_disable(struct net_device *dev);
int rtl8180_down(struct net_device *dev);
int rtl8180_up(struct net_device *dev);
void rtl8180_commit(struct net_device *dev);
-void rtl8180_set_chan(struct net_device *dev,short ch);
+void rtl8180_set_chan(struct net_device *dev, short ch);
void write_phy(struct net_device *dev, u8 adr, u8 data);
void write_phy_cck(struct net_device *dev, u8 adr, u32 data);
void write_phy_ofdm(struct net_device *dev, u8 adr, u32 data);
@@ -671,7 +670,8 @@ void IPSEnter(struct net_device *dev);
void IPSLeave(struct net_device *dev);
int get_curr_tx_free_desc(struct net_device *dev, int priority);
void UpdateInitialGain(struct net_device *dev);
-bool SetAntennaConfig87SE(struct net_device *dev, u8 DefaultAnt, bool bAntDiversity);
+bool SetAntennaConfig87SE(struct net_device *dev, u8 DefaultAnt,
+ bool bAntDiversity);
//#ifdef CONFIG_RTL8185B
void rtl8185b_adapter_start(struct net_device *dev);
@@ -684,6 +684,17 @@ void fix_tx_fifo(struct net_device *dev);
void rtl8225z2_SetTXPowerLevel(struct net_device *dev, short ch);
void rtl8180_rate_adapter(struct work_struct * work);
//#endif
-bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet, u32 ChangeSource);
+bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet,
+ u32 ChangeSource);
#endif
+
+/* fun with the built-in ieee80211 stack... */
+extern int ieee80211_crypto_init(void);
+extern void ieee80211_crypto_deinit(void);
+extern int ieee80211_crypto_tkip_init(void);
+extern void ieee80211_crypto_tkip_exit(void);
+extern int ieee80211_crypto_ccmp_init(void);
+extern void ieee80211_crypto_ccmp_exit(void);
+extern int ieee80211_crypto_wep_init(void);
+extern void ieee80211_crypto_wep_exit(void);
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 76a67386b927..6cafee22bec4 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -79,7 +79,7 @@ module_param(hwwep, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(hwwep, " Try to use hardware WEP support. Still broken and not available on all cards");
static int rtl8180_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id);
+ const struct pci_device_id *id);
static void rtl8180_pci_remove(struct pci_dev *pdev);
@@ -387,7 +387,8 @@ static short buffer_add(struct buffer **buffer, u32 *buf, dma_addr_t dma,
return 0;
}
-void buffer_free(struct net_device *dev, struct buffer **buffer, int len, short consistent)
+static void buffer_free(struct net_device *dev, struct buffer **buffer, int len,
+ short consistent)
{
struct buffer *tmp, *next;
@@ -1027,7 +1028,7 @@ inline u8 rtl8180_IsWirelessBMode(u16 rate)
u16 N_DBPSOfRate(u16 DataRate);
-u16 ComputeTxTime(u16 FrameLength, u16 DataRate, u8 bManagementFrame,
+static u16 ComputeTxTime(u16 FrameLength, u16 DataRate, u8 bManagementFrame,
u8 bShortPreamble)
{
u16 FrameTime;
@@ -1855,7 +1856,7 @@ short rtl8180_tx(struct net_device *dev, u8 *txbuf, int len, int priority,
if (remain == len && !descfrag) {
ownbit_flag = false;
- *tail = *tail | (1<<29) ; /* fist segment of the packet */
+ *tail = *tail | (1<<29); /* fist segment of the packet */
*tail = *tail | (len);
} else {
ownbit_flag = true;
@@ -2238,7 +2239,8 @@ static CHANNEL_LIST ChannelPlan[] = {
{{1,2,3,4,5,6,7,8,9,10,11,12,13},13} /* world wide 13: ch1~ch11 active scan, ch12~13 passive //lzm add 080826 */
};
-static void rtl8180_set_channel_map(u8 channel_plan, struct ieee80211_device *ieee)
+static void rtl8180_set_channel_map(u8 channel_plan,
+ struct ieee80211_device *ieee)
{
int i;
@@ -2340,7 +2342,7 @@ static void rtl8187se_eeprom_register_write(struct eeprom_93cx6 *eeprom)
udelay(10);
}
-short rtl8180_init(struct net_device *dev)
+static short rtl8180_init(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u16 word;
@@ -2830,11 +2832,8 @@ static struct net_device_stats *rtl8180_stats(struct net_device *dev)
/*
* Change current and default preamble mode.
*/
-bool
-MgntActSet_802_11_PowerSaveMode(
- struct r8180_priv *priv,
- RT_PS_MODE rtPsMode
-)
+static bool MgntActSet_802_11_PowerSaveMode(struct r8180_priv *priv,
+ RT_PS_MODE rtPsMode)
{
/* Currently, we do not change power save mode on IBSS mode. */
if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
@@ -2845,7 +2844,7 @@ MgntActSet_802_11_PowerSaveMode(
return true;
}
-void LeisurePSEnter(struct r8180_priv *priv)
+static void LeisurePSEnter(struct r8180_priv *priv)
{
if (priv->bLeisurePs) {
if (priv->ieee80211->ps == IEEE80211_PS_DISABLED)
@@ -2854,7 +2853,7 @@ void LeisurePSEnter(struct r8180_priv *priv)
}
}
-void LeisurePSLeave(struct r8180_priv *priv)
+static void LeisurePSLeave(struct r8180_priv *priv)
{
if (priv->bLeisurePs) {
if (priv->ieee80211->ps != IEEE80211_PS_DISABLED)
@@ -3078,7 +3077,7 @@ void rtl8180_commit(struct net_device *dev)
struct r8180_priv *priv = ieee80211_priv(dev);
if (priv->up == 0)
- return ;
+ return;
del_timer_sync(&priv->watch_dog_timer);
del_timer_sync(&priv->rateadapter_timer);
@@ -3161,7 +3160,7 @@ static const struct net_device_ops rtl8180_netdev_ops = {
};
static int rtl8180_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+ const struct pci_device_id *id)
{
unsigned long ioaddr = 0;
struct net_device *dev = NULL;
@@ -3310,16 +3309,6 @@ static void rtl8180_pci_remove(struct pci_dev *pdev)
DMESG("wlan driver removed\n");
}
-/* fun with the built-in ieee80211 stack... */
-extern int ieee80211_crypto_init(void);
-extern void ieee80211_crypto_deinit(void);
-extern int ieee80211_crypto_tkip_init(void);
-extern void ieee80211_crypto_tkip_exit(void);
-extern int ieee80211_crypto_ccmp_init(void);
-extern void ieee80211_crypto_ccmp_exit(void);
-extern int ieee80211_crypto_wep_init(void);
-extern void ieee80211_crypto_wep_exit(void);
-
static int __init rtl8180_pci_module_init(void)
{
int ret;
@@ -3446,7 +3435,7 @@ static void rtl8180_tx_isr(struct net_device *dev, int pri, short error)
default:
spin_unlock_irqrestore(&priv->tx_lock, flag);
- return ;
+ return;
}
nicv = (u32 *)((nic - nicbegin) + (u8 *)begin);
@@ -3537,7 +3526,7 @@ static void rtl8180_tx_isr(struct net_device *dev, int pri, short error)
spin_unlock_irqrestore(&priv->tx_lock, flag);
}
-irqreturn_t rtl8180_interrupt(int irq, void *netdev)
+static irqreturn_t rtl8180_interrupt(int irq, void *netdev)
{
struct net_device *dev = (struct net_device *) netdev;
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
diff --git a/drivers/staging/rtl8187se/r8180_dm.h b/drivers/staging/rtl8187se/r8180_dm.h
index 732c06ac1026..cb4046f346ef 100644
--- a/drivers/staging/rtl8187se/r8180_dm.h
+++ b/drivers/staging/rtl8187se/r8180_dm.h
@@ -5,7 +5,7 @@
/* #include "r8180_hw.h" */
/* #include "r8180_93cx6.h" */
void SwAntennaDiversityRxOk8185(struct net_device *dev, u8 SignalStrength);
-bool SetAntenna8185(struct net_device *dev, u8 u1bAntennaIndex);
+bool SetAntenna8185(struct net_device *dev, u8 u1bAntennaIndex);
bool SwitchAntenna(struct net_device *dev);
void SwAntennaDiversity(struct net_device *dev);
void SwAntennaDiversityTimerCallback(struct net_device *dev);
diff --git a/drivers/staging/rtl8187se/r8180_hw.h b/drivers/staging/rtl8187se/r8180_hw.h
index 92c05af557cf..e59d74f8ecfc 100644
--- a/drivers/staging/rtl8187se/r8180_hw.h
+++ b/drivers/staging/rtl8187se/r8180_hw.h
@@ -555,14 +555,14 @@
/* by amy for antenna */
#define EEPROM_SW_REVD_OFFSET 0x3f
-/* BIT[8-9] is for SW Antenna Diversity.
+/* BIT[8-9] is for SW Antenna Diversity.
* Only the value EEPROM_SW_AD_ENABLE means enable, other values are disable.
*/
#define EEPROM_SW_AD_MASK 0x0300
#define EEPROM_SW_AD_ENABLE 0x0100
/* BIT[10-11] determine if Antenna 1 is the Default Antenna.
- * Only the value EEPROM_DEF_ANT_1 means TRUE, other values are FALSE.
+ * Only the value EEPROM_DEF_ANT_1 means TRUE, other values are FALSE.
*/
#define EEPROM_DEF_ANT_MASK 0x0C00
#define EEPROM_DEF_ANT_1 0x0400
diff --git a/drivers/staging/rtl8187se/r8180_rtl8225.h b/drivers/staging/rtl8187se/r8180_rtl8225.h
index c94ca0794a5d..de084f07a071 100644
--- a/drivers/staging/rtl8187se/r8180_rtl8225.h
+++ b/drivers/staging/rtl8187se/r8180_rtl8225.h
@@ -28,7 +28,8 @@ u16 RF_ReadReg(struct net_device *dev, u8 offset);
void rtl8180_set_mode(struct net_device *dev, int mode);
void rtl8180_set_mode(struct net_device *dev, int mode);
-bool SetZebraRFPowerState8185(struct net_device *dev, RT_RF_POWER_STATE eRFPowerState);
+bool SetZebraRFPowerState8185(struct net_device *dev,
+ RT_RF_POWER_STATE eRFPowerState);
void rtl8225z4_rf_sleep(struct net_device *dev);
void rtl8225z4_rf_wakeup(struct net_device *dev);
diff --git a/drivers/staging/rtl8187se/r8180_wx.c b/drivers/staging/rtl8187se/r8180_wx.c
index 4e01653e098a..9b676e027cad 100644
--- a/drivers/staging/rtl8187se/r8180_wx.c
+++ b/drivers/staging/rtl8187se/r8180_wx.c
@@ -21,9 +21,10 @@
#include "r8180.h"
#include "r8180_hw.h"
+#include <net/iw_handler.h>
#include "ieee80211/dot11d.h"
-u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000,
+static u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000,
6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000};
#define RATE_COUNT ARRAY_SIZE(rtl8180_rates)
@@ -61,7 +62,7 @@ static int r8180_wx_set_key(struct net_device *dev,
return 0;
if (erq->length > 0) {
- u32* tkey = (u32*) key;
+ u32 *tkey = (u32 *) key;
priv->key0[0] = tkey[0];
priv->key0[1] = tkey[1];
priv->key0[2] = tkey[2];
@@ -74,8 +75,9 @@ static int r8180_wx_set_key(struct net_device *dev,
}
-static int r8180_wx_set_beaconinterval(struct net_device *dev, struct iw_request_info *aa,
- union iwreq_data *wrqu, char *b)
+static int r8180_wx_set_beaconinterval(struct net_device *dev,
+ struct iw_request_info *aa,
+ union iwreq_data *wrqu, char *b)
{
int *parms = (int *)b;
int bi = parms[0];
@@ -295,7 +297,7 @@ static int rtl8180_wx_get_range(struct net_device *dev,
}
if (val == IW_MAX_FREQUENCIES)
- break;
+ break;
}
range->num_frequency = val;
@@ -311,14 +313,14 @@ static int r8180_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
{
struct r8180_priv *priv = ieee80211_priv(dev);
int ret;
- struct ieee80211_device* ieee = priv->ieee80211;
+ struct ieee80211_device *ieee = priv->ieee80211;
if (priv->ieee80211->bHwRadioOff)
return 0;
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
- struct iw_scan_req* req = (struct iw_scan_req*)b;
+ struct iw_scan_req *req = (struct iw_scan_req *)b;
if (req->essid_len) {
ieee->current_network.ssid_len = req->essid_len;
memcpy(ieee->current_network.ssid, req->essid, req->essid_len);
@@ -473,9 +475,8 @@ static int r8180_wx_get_frag(struct net_device *dev,
static int r8180_wx_set_wap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *awrq,
- char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *awrq, char *extra)
{
int ret;
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -516,7 +517,8 @@ static int r8180_wx_set_enc(struct net_device *dev,
down(&priv->wx_sem);
- if (priv->hw_wep) ret = r8180_wx_set_key(dev, info, wrqu, key);
+ if (priv->hw_wep)
+ ret = r8180_wx_set_key(dev, info, wrqu, key);
else {
DMESG("Setting SW wep key");
ret = ieee80211_wx_set_encode(priv->ieee80211, info, wrqu, key);
@@ -537,11 +539,13 @@ static int r8180_wx_get_enc(struct net_device *dev,
}
-static int r8180_wx_set_scan_type(struct net_device *dev, struct iw_request_info *aa, union
- iwreq_data *wrqu, char *p) {
+static int r8180_wx_set_scan_type(struct net_device *dev,
+ struct iw_request_info *aa,
+ union iwreq_data *wrqu, char *p)
+{
struct r8180_priv *priv = ieee80211_priv(dev);
- int *parms = (int*)p;
+ int *parms = (int *)p;
int mode = parms[0];
if (priv->ieee80211->bHwRadioOff)
@@ -553,8 +557,8 @@ static int r8180_wx_set_scan_type(struct net_device *dev, struct iw_request_info
}
static int r8180_wx_set_retry(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
int err = 0;
@@ -601,8 +605,8 @@ exit:
}
static int r8180_wx_get_retry(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -625,8 +629,8 @@ static int r8180_wx_get_retry(struct net_device *dev,
}
static int r8180_wx_get_sens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
if (priv->rf_set_sens == NULL)
@@ -637,8 +641,8 @@ static int r8180_wx_get_sens(struct net_device *dev,
static int r8180_wx_set_sens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -666,8 +670,8 @@ exit:
static int r8180_wx_set_rawtx(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
int ret;
@@ -686,8 +690,8 @@ static int r8180_wx_set_rawtx(struct net_device *dev,
}
static int r8180_wx_get_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
int ret;
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -702,8 +706,8 @@ static int r8180_wx_get_power(struct net_device *dev,
}
static int r8180_wx_set_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
int ret;
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -728,8 +732,8 @@ static int r8180_wx_set_power(struct net_device *dev,
}
static int r8180_wx_set_rts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -750,8 +754,8 @@ static int r8180_wx_set_rts(struct net_device *dev,
return 0;
}
static int r8180_wx_get_rts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -841,8 +845,8 @@ static int r8180_wx_set_iwmode(struct net_device *dev,
return ret;
}
static int r8180_wx_get_preamble(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -858,8 +862,8 @@ static int r8180_wx_get_preamble(struct net_device *dev,
return 0;
}
static int r8180_wx_set_preamble(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
int ret = 0;
@@ -872,7 +876,7 @@ static int r8180_wx_set_preamble(struct net_device *dev,
if (*extra < 0 || *extra > 2)
ret = -1;
else
- priv->plcp_preamble_mode = *((short *)extra) ;
+ priv->plcp_preamble_mode = *((short *)extra);
@@ -881,8 +885,8 @@ static int r8180_wx_set_preamble(struct net_device *dev,
return ret;
}
static int r8180_wx_get_siglevel(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
int ret = 0;
@@ -900,8 +904,8 @@ static int r8180_wx_get_siglevel(struct net_device *dev,
return ret;
}
static int r8180_wx_get_sigqual(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
int ret = 0;
@@ -959,8 +963,8 @@ static int r8180_wx_reset_stats(struct net_device *dev,
}
static int r8180_wx_radio_on(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -978,8 +982,8 @@ static int r8180_wx_radio_on(struct net_device *dev,
}
static int r8180_wx_radio_off(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -996,8 +1000,8 @@ static int r8180_wx_radio_off(struct net_device *dev,
}
static int r8180_wx_get_channelplan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -1013,8 +1017,8 @@ static int r8180_wx_get_channelplan(struct net_device *dev,
return 0;
}
static int r8180_wx_set_channelplan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
int *val = (int *)extra;
@@ -1035,7 +1039,7 @@ static int r8180_wx_set_channelplan(struct net_device *dev,
/* Set new channel map */
for (i = 1; i <= DefaultChannelPlan[*val].Len; i++)
GET_DOT11D_INFO(priv->ieee80211)->channel_map[DefaultChannelPlan[*val].Channel[i-1]] = 1;
-
+
}
up(&priv->wx_sem);
@@ -1043,8 +1047,8 @@ static int r8180_wx_set_channelplan(struct net_device *dev,
}
static int r8180_wx_get_version(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
/* struct ieee80211_device *ieee; */
@@ -1059,8 +1063,8 @@ static int r8180_wx_get_version(struct net_device *dev,
/* added by amy 080818 */
/*receive datarate from user typing valid rate is from 2 to 108 (1 - 54M), if input 0, return to normal rate adaptive. */
static int r8180_wx_set_forcerate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u8 forcerate = *extra;
@@ -1070,8 +1074,7 @@ static int r8180_wx_set_forcerate(struct net_device *dev,
printk("==============>%s(): forcerate is %d\n", __func__, forcerate);
if ((forcerate == 2) || (forcerate == 4) || (forcerate == 11) || (forcerate == 22) || (forcerate == 12) ||
(forcerate == 18) || (forcerate == 24) || (forcerate == 36) || (forcerate == 48) || (forcerate == 72) ||
- (forcerate == 96) || (forcerate == 108))
- {
+ (forcerate == 96) || (forcerate == 108)) {
priv->ForcedDataRate = 1;
priv->ieee80211->rate = forcerate * 5;
} else if (forcerate == 0) {
@@ -1084,8 +1087,8 @@ static int r8180_wx_set_forcerate(struct net_device *dev,
}
static int r8180_wx_set_enc_ext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -1118,8 +1121,8 @@ static int r8180_wx_set_auth(struct net_device *dev,
}
static int r8180_wx_set_mlme(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
int ret = 0;
struct r8180_priv *priv = ieee80211_priv(dev);
@@ -1156,65 +1159,48 @@ static int r8180_wx_set_gen_ie(struct net_device *dev,
}
-static iw_handler r8180_wx_handlers[] = {
- NULL, /* SIOCSIWCOMMIT */
- r8180_wx_get_name, /* SIOCGIWNAME */
- dummy, /* SIOCSIWNWID */
- dummy, /* SIOCGIWNWID */
- r8180_wx_set_freq, /* SIOCSIWFREQ */
- r8180_wx_get_freq, /* SIOCGIWFREQ */
- r8180_wx_set_mode, /* SIOCSIWMODE */
- r8180_wx_get_mode, /* SIOCGIWMODE */
- r8180_wx_set_sens, /* SIOCSIWSENS */
- r8180_wx_get_sens, /* SIOCGIWSENS */
- NULL, /* SIOCSIWRANGE */
- rtl8180_wx_get_range, /* SIOCGIWRANGE */
- NULL, /* SIOCSIWPRIV */
- NULL, /* SIOCGIWPRIV */
- NULL, /* SIOCSIWSTATS */
- NULL, /* SIOCGIWSTATS */
- dummy, /* SIOCSIWSPY */
- dummy, /* SIOCGIWSPY */
- NULL, /* SIOCGIWTHRSPY */
- NULL, /* SIOCWIWTHRSPY */
- r8180_wx_set_wap, /* SIOCSIWAP */
- r8180_wx_get_wap, /* SIOCGIWAP */
- r8180_wx_set_mlme, /* SIOCSIWMLME*/
- dummy, /* SIOCGIWAPLIST -- deprecated */
- r8180_wx_set_scan, /* SIOCSIWSCAN */
- r8180_wx_get_scan, /* SIOCGIWSCAN */
- r8180_wx_set_essid, /* SIOCSIWESSID */
- r8180_wx_get_essid, /* SIOCGIWESSID */
- dummy, /* SIOCSIWNICKN */
- dummy, /* SIOCGIWNICKN */
- NULL, /* -- hole -- */
- NULL, /* -- hole -- */
- r8180_wx_set_rate, /* SIOCSIWRATE */
- r8180_wx_get_rate, /* SIOCGIWRATE */
- r8180_wx_set_rts, /* SIOCSIWRTS */
- r8180_wx_get_rts, /* SIOCGIWRTS */
- r8180_wx_set_frag, /* SIOCSIWFRAG */
- r8180_wx_get_frag, /* SIOCGIWFRAG */
- dummy, /* SIOCSIWTXPOW */
- dummy, /* SIOCGIWTXPOW */
- r8180_wx_set_retry, /* SIOCSIWRETRY */
- r8180_wx_get_retry, /* SIOCGIWRETRY */
- r8180_wx_set_enc, /* SIOCSIWENCODE */
- r8180_wx_get_enc, /* SIOCGIWENCODE */
- r8180_wx_set_power, /* SIOCSIWPOWER */
- r8180_wx_get_power, /* SIOCGIWPOWER */
- NULL, /*---hole---*/
- NULL, /*---hole---*/
- r8180_wx_set_gen_ie, /* SIOCSIWGENIE */
- NULL, /* SIOCSIWGENIE */
- r8180_wx_set_auth, /* SIOCSIWAUTH */
- NULL, /* SIOCSIWAUTH */
- r8180_wx_set_enc_ext, /* SIOCSIWENCODEEXT */
- NULL, /* SIOCSIWENCODEEXT */
- NULL, /* SIOCSIWPMKSA */
- NULL, /*---hole---*/
-};
+static const iw_handler r8180_wx_handlers[] = {
+ IW_HANDLER(SIOCGIWNAME, r8180_wx_get_name),
+ IW_HANDLER(SIOCSIWNWID, dummy),
+ IW_HANDLER(SIOCGIWNWID, dummy),
+ IW_HANDLER(SIOCSIWFREQ, r8180_wx_set_freq),
+ IW_HANDLER(SIOCGIWFREQ, r8180_wx_get_freq),
+ IW_HANDLER(SIOCSIWMODE, r8180_wx_set_mode),
+ IW_HANDLER(SIOCGIWMODE, r8180_wx_get_mode),
+ IW_HANDLER(SIOCSIWSENS, r8180_wx_set_sens),
+ IW_HANDLER(SIOCGIWSENS, r8180_wx_get_sens),
+ IW_HANDLER(SIOCGIWRANGE, rtl8180_wx_get_range),
+ IW_HANDLER(SIOCSIWSPY, dummy),
+ IW_HANDLER(SIOCGIWSPY, dummy),
+ IW_HANDLER(SIOCSIWAP, r8180_wx_set_wap),
+ IW_HANDLER(SIOCGIWAP, r8180_wx_get_wap),
+ IW_HANDLER(SIOCSIWMLME, r8180_wx_set_mlme),
+ IW_HANDLER(SIOCGIWAPLIST, dummy), /* deprecated */
+ IW_HANDLER(SIOCSIWSCAN, r8180_wx_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, r8180_wx_get_scan),
+ IW_HANDLER(SIOCSIWESSID, r8180_wx_set_essid),
+ IW_HANDLER(SIOCGIWESSID, r8180_wx_get_essid),
+ IW_HANDLER(SIOCSIWNICKN, dummy),
+ IW_HANDLER(SIOCGIWNICKN, dummy),
+ IW_HANDLER(SIOCSIWRATE, r8180_wx_set_rate),
+ IW_HANDLER(SIOCGIWRATE, r8180_wx_get_rate),
+ IW_HANDLER(SIOCSIWRTS, r8180_wx_set_rts),
+ IW_HANDLER(SIOCGIWRTS, r8180_wx_get_rts),
+ IW_HANDLER(SIOCSIWFRAG, r8180_wx_set_frag),
+ IW_HANDLER(SIOCGIWFRAG, r8180_wx_get_frag),
+ IW_HANDLER(SIOCSIWTXPOW, dummy),
+ IW_HANDLER(SIOCGIWTXPOW, dummy),
+ IW_HANDLER(SIOCSIWRETRY, r8180_wx_set_retry),
+ IW_HANDLER(SIOCGIWRETRY, r8180_wx_get_retry),
+ IW_HANDLER(SIOCSIWENCODE, r8180_wx_set_enc),
+ IW_HANDLER(SIOCGIWENCODE, r8180_wx_get_enc),
+ IW_HANDLER(SIOCSIWPOWER, r8180_wx_set_power),
+ IW_HANDLER(SIOCGIWPOWER, r8180_wx_get_power),
+ IW_HANDLER(SIOCSIWGENIE, r8180_wx_set_gen_ie),
+ IW_HANDLER(SIOCSIWAUTH, r8180_wx_set_auth),
+ IW_HANDLER(SIOCSIWENCODEEXT, r8180_wx_set_enc_ext),
+};
static const struct iw_priv_args r8180_private_args[] = {
{
@@ -1350,7 +1336,7 @@ static iw_handler r8180_private_handler[] = {
};
static inline int is_same_network(struct ieee80211_network *src,
- struct ieee80211_network *dst,
+ struct ieee80211_network *dst,
struct ieee80211_device *ieee)
{
/* A network is only a duplicate if the channel, BSSID, ESSID
@@ -1358,22 +1344,35 @@ static inline int is_same_network(struct ieee80211_network *src,
* We treat all <hidden> with the same BSSID and channel
* as one network
*/
- return (((src->ssid_len == dst->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && /* YJ,mod, 080819,for hidden ap */
- (src->channel == dst->channel) &&
- !memcmp(src->bssid, dst->bssid, ETH_ALEN) &&
- (!memcmp(src->ssid, dst->ssid, src->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && /* YJ,mod, 080819,for hidden ap */
- ((src->capability & WLAN_CAPABILITY_IBSS) ==
- (dst->capability & WLAN_CAPABILITY_IBSS)) &&
- ((src->capability & WLAN_CAPABILITY_BSS) ==
- (dst->capability & WLAN_CAPABILITY_BSS)));
+ if (src->channel != dst->channel)
+ return 0;
+
+ if (memcmp(src->bssid, dst->bssid, ETH_ALEN) != 0)
+ return 0;
+
+ if (ieee->iw_mode != IW_MODE_INFRA) {
+ if (src->ssid_len != dst->ssid_len)
+ return 0;
+ if (memcmp(src->ssid, dst->ssid, src->ssid_len) != 0)
+ return 0;
+ }
+
+ if ((src->capability & WLAN_CAPABILITY_IBSS) !=
+ (dst->capability & WLAN_CAPABILITY_IBSS))
+ return 0;
+ if ((src->capability & WLAN_CAPABILITY_BSS) !=
+ (dst->capability & WLAN_CAPABILITY_BSS))
+ return 0;
+
+ return 1;
}
/* WB modified to show signal to GUI on 18-01-2008 */
static struct iw_statistics *r8180_get_wireless_stats(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device* ieee = priv->ieee80211;
- struct iw_statistics* wstats = &priv->wstats;
+ struct ieee80211_device *ieee = priv->ieee80211;
+ struct iw_statistics *wstats = &priv->wstats;
int tmp_level = 0;
int tmp_qual = 0;
int tmp_noise = 0;
diff --git a/drivers/staging/rtl8187se/r8185b_init.c b/drivers/staging/rtl8187se/r8185b_init.c
index dc52a3e584d8..c8b9baff1dbc 100644
--- a/drivers/staging/rtl8187se/r8185b_init.c
+++ b/drivers/staging/rtl8187se/r8185b_init.c
@@ -497,7 +497,7 @@ static void ZEBRA_Config_85BASIC_HardCode(struct net_device *dev)
*/
RF_WriteReg(dev, 0x0f, (priv->XtalCal_Xin<<5) |
(priv->XtalCal_Xout<<1) | BIT11 | BIT9); mdelay(1);
- printk("ZEBRA_Config_85BASIC_HardCode(): (%02x)\n",
+ netdev_info(dev, "ZEBRA_Config_85BASIC_HardCode(): (%02x)\n",
(priv->XtalCal_Xin<<5) | (priv->XtalCal_Xout<<1) |
BIT11 | BIT9);
} else {
@@ -870,9 +870,10 @@ static u8 GetSupportedWirelessMode8185(struct net_device *dev)
return WIRELESS_MODE_B | WIRELESS_MODE_G;
}
-static void ActUpdateChannelAccessSetting(struct net_device *dev,
- WIRELESS_MODE WirelessMode,
- PCHANNEL_ACCESS_SETTING ChnlAccessSetting)
+static void
+ActUpdateChannelAccessSetting(struct net_device *dev,
+ WIRELESS_MODE WirelessMode,
+ PCHANNEL_ACCESS_SETTING ChnlAccessSetting)
{
AC_CODING eACI;
@@ -1084,7 +1085,7 @@ static bool MgntDisconnect(struct net_device *dev, u8 asRsn)
* PASSIVE LEVEL.
*/
static bool SetRFPowerState(struct net_device *dev,
- RT_RF_POWER_STATE eRFPowerState)
+ RT_RF_POWER_STATE eRFPowerState)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bResult = false;
@@ -1097,8 +1098,8 @@ static bool SetRFPowerState(struct net_device *dev,
return bResult;
}
-bool MgntActSet_RF_State(struct net_device *dev,
- RT_RF_POWER_STATE StateToSet, u32 ChangeSource)
+bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet,
+ u32 ChangeSource)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bActionAllowed = false;
@@ -1125,7 +1126,7 @@ bool MgntActSet_RF_State(struct net_device *dev,
* to be stuck here.
*/
if (RFWaitCounter > 1000) { /* 1sec */
- printk("MgntActSet_RF_State(): Wait too long to set RF\n");
+ netdev_info(dev, "MgntActSet_RF_State(): Wait too long to set RF\n");
/* TODO: Reset RF state? */
return false;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 2f548ebada59..8ebe6bc40022 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -33,7 +33,7 @@ void init_mlme_ap_info(struct adapter *padapter)
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
- _rtw_spinlock_init(&pmlmepriv->bcn_update_lock);
+ spin_lock_init(&pmlmepriv->bcn_update_lock);
/* for ACL */
_rtw_init_queue(&pacl_list->acl_node_q);
@@ -43,7 +43,6 @@ void init_mlme_ap_info(struct adapter *padapter)
void free_mlme_ap_info(struct adapter *padapter)
{
- unsigned long irqL;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
@@ -62,11 +61,9 @@ void free_mlme_ap_info(struct adapter *padapter)
/* free bc/mc sta_info */
psta = rtw_get_bcmc_stainfo(padapter);
- _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_free_stainfo(padapter, psta);
- _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
-
- _rtw_spinlock_free(&pmlmepriv->bcn_update_lock);
+ spin_unlock_bh(&(pstapriv->sta_hash_lock));
}
static void update_BCNTIM(struct adapter *padapter)
@@ -277,7 +274,6 @@ static u8 chk_sta_is_alive(struct sta_info *psta)
void expire_timeout_chk(struct adapter *padapter)
{
- unsigned long irqL;
struct list_head *phead, *plist;
u8 updated = 0;
struct sta_info *psta = NULL;
@@ -286,7 +282,7 @@ void expire_timeout_chk(struct adapter *padapter)
char chk_alive_list[NUM_STA];
int i;
- _enter_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->auth_list_lock);
phead = &pstapriv->auth_list;
plist = get_next(phead);
@@ -305,22 +301,22 @@ void expire_timeout_chk(struct adapter *padapter)
DBG_88E("auth expire %6ph\n",
psta->hwaddr);
- _exit_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->auth_list_lock);
- _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_free_stainfo(padapter, psta);
- _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ spin_unlock_bh(&(pstapriv->sta_hash_lock));
- _enter_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->auth_list_lock);
}
}
}
- _exit_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->auth_list_lock);
psta = NULL;
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
plist = get_next(phead);
@@ -387,7 +383,7 @@ void expire_timeout_chk(struct adapter *padapter)
}
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
if (chk_alive_num) {
u8 backup_oper_channel = 0;
@@ -424,11 +420,11 @@ void expire_timeout_chk(struct adapter *padapter)
psta->keep_alive_trycnt = 0;
DBG_88E("asoc expire %pM, state = 0x%x\n", (psta->hwaddr), psta->state);
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
rtw_list_delete(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
}
if (backup_oper_channel > 0) /* back to the original operation channel */
@@ -535,7 +531,6 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
static void update_bmc_sta(struct adapter *padapter)
{
- unsigned long irqL;
u32 init_rate = 0;
unsigned char network_type, raid;
int i, supportRateNum = 0;
@@ -604,9 +599,9 @@ static void update_bmc_sta(struct adapter *padapter)
rtw_stassoc_hw_rpt(padapter, psta);
- _enter_critical_bh(&psta->lock, &irqL);
+ spin_lock_bh(&psta->lock);
psta->state = _FW_LINKED;
- _exit_critical_bh(&psta->lock, &irqL);
+ spin_unlock_bh(&psta->lock);
} else {
DBG_88E("add_RATid_bmc_sta error!\n");
@@ -622,7 +617,6 @@ static void update_bmc_sta(struct adapter *padapter)
void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
{
- unsigned long irqL;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
@@ -679,9 +673,9 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
_rtw_memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
- _enter_critical_bh(&psta->lock, &irqL);
+ spin_lock_bh(&psta->lock);
psta->state |= _FW_LINKED;
- _exit_critical_bh(&psta->lock, &irqL);
+ spin_unlock_bh(&psta->lock);
}
static void update_hw_ht_param(struct adapter *padapter)
@@ -1134,7 +1128,6 @@ void rtw_set_macaddr_acl(struct adapter *padapter, int mode)
int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
{
- unsigned long irqL;
struct list_head *plist, *phead;
u8 added = false;
int i, ret = 0;
@@ -1148,7 +1141,7 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
if ((NUM_ACL-1) < pacl_list->num)
return -1;
- _enter_critical_bh(&(pacl_node_q->lock), &irqL);
+ spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
plist = get_next(phead);
@@ -1166,12 +1159,12 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
}
}
- _exit_critical_bh(&(pacl_node_q->lock), &irqL);
+ spin_unlock_bh(&(pacl_node_q->lock));
if (added)
return ret;
- _enter_critical_bh(&(pacl_node_q->lock), &irqL);
+ spin_lock_bh(&(pacl_node_q->lock));
for (i = 0; i < NUM_ACL; i++) {
paclnode = &pacl_list->aclnode[i];
@@ -1193,14 +1186,13 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
DBG_88E("%s, acl_num =%d\n", __func__, pacl_list->num);
- _exit_critical_bh(&(pacl_node_q->lock), &irqL);
+ spin_unlock_bh(&(pacl_node_q->lock));
return ret;
}
int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
{
- unsigned long irqL;
struct list_head *plist, *phead;
int ret = 0;
struct rtw_wlan_acl_node *paclnode;
@@ -1210,7 +1202,7 @@ int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
DBG_88E("%s(acl_num =%d) =%pM\n", __func__, pacl_list->num, (addr));
- _enter_critical_bh(&(pacl_node_q->lock), &irqL);
+ spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
plist = get_next(phead);
@@ -1230,7 +1222,7 @@ int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
}
}
- _exit_critical_bh(&(pacl_node_q->lock), &irqL);
+ spin_unlock_bh(&(pacl_node_q->lock));
DBG_88E("%s, acl_num =%d\n", __func__, pacl_list->num);
return ret;
@@ -1373,7 +1365,6 @@ static void update_bcn_vendor_spec_ie(struct adapter *padapter, u8 *oui)
void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
{
- unsigned long irqL;
struct mlme_priv *pmlmepriv;
struct mlme_ext_priv *pmlmeext;
@@ -1386,7 +1377,7 @@ void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
if (!pmlmeext->bstart_bss)
return;
- _enter_critical_bh(&pmlmepriv->bcn_update_lock, &irqL);
+ spin_lock_bh(&pmlmepriv->bcn_update_lock);
switch (ie_id) {
case 0xFF:
@@ -1416,7 +1407,7 @@ void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
pmlmepriv->update_bcn = true;
- _exit_critical_bh(&pmlmepriv->bcn_update_lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->bcn_update_lock);
if (tx)
set_tx_beacon_cmd(padapter);
@@ -1505,12 +1496,11 @@ void associated_clients_update(struct adapter *padapter, u8 updated)
{
/* update associcated stations cap. */
if (updated) {
- unsigned long irqL;
struct list_head *phead, *plist;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
plist = get_next(phead);
@@ -1523,7 +1513,7 @@ void associated_clients_update(struct adapter *padapter, u8 updated)
VCS_update(padapter, psta);
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
}
}
@@ -1731,7 +1721,6 @@ u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta)
u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
bool active, u16 reason)
{
- unsigned long irqL;
u8 beacon_updated = false;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -1753,9 +1742,9 @@ u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
rtw_clearstakey_cmd(padapter, (u8 *)psta, (u8)(psta->mac_id + 3), true);
- _enter_critical_bh(&psta->lock, &irqL);
+ spin_lock_bh(&psta->lock);
psta->state &= ~_FW_LINKED;
- _exit_critical_bh(&psta->lock, &irqL);
+ spin_unlock_bh(&psta->lock);
rtw_indicate_sta_disassoc_event(padapter, psta);
@@ -1763,16 +1752,15 @@ u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
beacon_updated = bss_cap_update_on_sta_leave(padapter, psta);
- _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_free_stainfo(padapter, psta);
- _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ spin_unlock_bh(&(pstapriv->sta_hash_lock));
return beacon_updated;
}
int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
{
- unsigned long irqL;
struct list_head *phead, *plist;
int ret = 0;
struct sta_info *psta = NULL;
@@ -1787,7 +1775,7 @@ int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
DBG_88E(FUNC_NDEV_FMT" with ch:%u, offset:%u\n",
FUNC_NDEV_ARG(padapter->pnetdev), new_ch, ch_offset);
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
plist = get_next(phead);
@@ -1799,7 +1787,7 @@ int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
issue_action_spct_ch_switch(padapter, psta->hwaddr, new_ch, ch_offset);
psta->expire_to = ((pstapriv->expire_to * 2) > 5) ? 5 : (pstapriv->expire_to * 2);
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
issue_action_spct_ch_switch(padapter, bc_addr, new_ch, ch_offset);
@@ -1808,7 +1796,6 @@ int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
int rtw_sta_flush(struct adapter *padapter)
{
- unsigned long irqL;
struct list_head *phead, *plist;
int ret = 0;
struct sta_info *psta = NULL;
@@ -1822,7 +1809,7 @@ int rtw_sta_flush(struct adapter *padapter)
if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
return ret;
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
plist = get_next(phead);
@@ -1837,7 +1824,7 @@ int rtw_sta_flush(struct adapter *padapter)
ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
issue_deauth(padapter, bc_addr, WLAN_REASON_DEAUTH_LEAVING);
@@ -1935,7 +1922,6 @@ void start_ap_mode(struct adapter *padapter)
void stop_ap_mode(struct adapter *padapter)
{
- unsigned long irqL;
struct list_head *phead, *plist;
struct rtw_wlan_acl_node *paclnode;
struct sta_info *psta = NULL;
@@ -1954,7 +1940,7 @@ void stop_ap_mode(struct adapter *padapter)
padapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
/* for ACL */
- _enter_critical_bh(&(pacl_node_q->lock), &irqL);
+ spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
plist = get_next(phead);
while ((rtw_end_of_queue_search(phead, plist)) == false) {
@@ -1969,7 +1955,7 @@ void stop_ap_mode(struct adapter *padapter)
pacl_list->num--;
}
}
- _exit_critical_bh(&(pacl_node_q->lock), &irqL);
+ spin_unlock_bh(&(pacl_node_q->lock));
DBG_88E("%s, free acl_node_queue, num =%d\n", __func__, pacl_list->num);
@@ -1979,9 +1965,9 @@ void stop_ap_mode(struct adapter *padapter)
rtw_free_all_stainfo(padapter);
psta = rtw_get_bcmc_stainfo(padapter);
- _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_free_stainfo(padapter, psta);
- _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ spin_unlock_bh(&(pstapriv->sta_hash_lock));
rtw_init_bcmc_stainfo(padapter);
diff --git a/drivers/staging/rtl8188eu/core/rtw_br_ext.c b/drivers/staging/rtl8188eu/core/rtw_br_ext.c
index 9f40742ee5cf..75e38d4ff4c3 100644
--- a/drivers/staging/rtl8188eu/core/rtw_br_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_br_ext.c
@@ -89,7 +89,7 @@ static inline int __nat25_add_pppoe_tag(struct sk_buff *skb, struct pppoe_tag *t
struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
int data_len;
- data_len = tag->tag_len + TAG_HDR_LEN;
+ data_len = be16_to_cpu(tag->tag_len) + TAG_HDR_LEN;
if (skb_tailroom(skb) < data_len) {
_DEBUG_ERR("skb_tailroom() failed in add SID tag!\n");
return -1;
@@ -155,7 +155,7 @@ static inline void __nat25_generate_ipv4_network_addr(unsigned char *networkAddr
static inline void __nat25_generate_ipx_network_addr_with_node(unsigned char *networkAddr,
- unsigned int *ipxNetAddr, unsigned char *ipxNodeAddr)
+ __be32 *ipxNetAddr, unsigned char *ipxNodeAddr)
{
memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
@@ -166,7 +166,7 @@ static inline void __nat25_generate_ipx_network_addr_with_node(unsigned char *ne
static inline void __nat25_generate_ipx_network_addr_with_socket(unsigned char *networkAddr,
- unsigned int *ipxNetAddr, unsigned short *ipxSocketAddr)
+ __be32 *ipxNetAddr, __be16 *ipxSocketAddr)
{
memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
@@ -177,7 +177,7 @@ static inline void __nat25_generate_ipx_network_addr_with_socket(unsigned char *
static inline void __nat25_generate_apple_network_addr(unsigned char *networkAddr,
- unsigned short *network, unsigned char *node)
+ __be16 *network, unsigned char *node)
{
memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
@@ -187,7 +187,7 @@ static inline void __nat25_generate_apple_network_addr(unsigned char *networkAdd
}
static inline void __nat25_generate_pppoe_network_addr(unsigned char *networkAddr,
- unsigned char *ac_mac, unsigned short *sid)
+ unsigned char *ac_mac, __be16 *sid)
{
memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
@@ -197,7 +197,7 @@ static inline void __nat25_generate_pppoe_network_addr(unsigned char *networkAdd
}
static void __nat25_generate_ipv6_network_addr(unsigned char *networkAddr,
- unsigned int *ipAddr)
+ __be32 *ipAddr)
{
memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
@@ -331,7 +331,7 @@ static inline int __nat25_network_hash(unsigned char *networkAddr)
static inline void __network_hash_link(struct adapter *priv,
struct nat25_network_db_entry *ent, int hash)
{
- /* Caller must _enter_critical_bh already! */
+ /* Caller must spin_lock_bh already! */
ent->next_hash = priv->nethash[hash];
if (ent->next_hash != NULL)
ent->next_hash->pprev_hash = &ent->next_hash;
@@ -341,7 +341,7 @@ static inline void __network_hash_link(struct adapter *priv,
static inline void __network_hash_unlink(struct nat25_network_db_entry *ent)
{
- /* Caller must _enter_critical_bh already! */
+ /* Caller must spin_lock_bh already! */
*(ent->pprev_hash) = ent->next_hash;
if (ent->next_hash != NULL)
ent->next_hash->pprev_hash = ent->pprev_hash;
@@ -353,8 +353,7 @@ static int __nat25_db_network_lookup_and_replace(struct adapter *priv,
struct sk_buff *skb, unsigned char *networkAddr)
{
struct nat25_network_db_entry *db;
- unsigned long irqL;
- _enter_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_lock_bh(&priv->br_ext_lock);
db = priv->nethash[__nat25_network_hash(networkAddr)];
while (db != NULL) {
@@ -390,12 +389,12 @@ static int __nat25_db_network_lookup_and_replace(struct adapter *priv,
db->networkAddr[15],
db->networkAddr[16]);
}
- _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_unlock_bh(&priv->br_ext_lock);
return 1;
}
db = db->next_hash;
}
- _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_unlock_bh(&priv->br_ext_lock);
return 0;
}
@@ -404,23 +403,22 @@ static void __nat25_db_network_insert(struct adapter *priv,
{
struct nat25_network_db_entry *db;
int hash;
- unsigned long irqL;
- _enter_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_lock_bh(&priv->br_ext_lock);
hash = __nat25_network_hash(networkAddr);
db = priv->nethash[hash];
while (db != NULL) {
if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) {
memcpy(db->macAddr, macAddr, ETH_ALEN);
db->ageing_timer = jiffies;
- _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_unlock_bh(&priv->br_ext_lock);
return;
}
db = db->next_hash;
}
db = (struct nat25_network_db_entry *) rtw_malloc(sizeof(*db));
if (db == NULL) {
- _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_unlock_bh(&priv->br_ext_lock);
return;
}
memcpy(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN);
@@ -430,7 +428,7 @@ static void __nat25_db_network_insert(struct adapter *priv,
__network_hash_link(priv, db, hash);
- _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_unlock_bh(&priv->br_ext_lock);
}
static void __nat25_db_print(struct adapter *priv)
@@ -444,8 +442,7 @@ static void __nat25_db_print(struct adapter *priv)
void nat25_db_cleanup(struct adapter *priv)
{
int i;
- unsigned long irqL;
- _enter_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_lock_bh(&priv->br_ext_lock);
for (i = 0; i < NAT25_HASH_SIZE; i++) {
struct nat25_network_db_entry *f;
@@ -464,14 +461,13 @@ void nat25_db_cleanup(struct adapter *priv)
f = g;
}
}
- _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_unlock_bh(&priv->br_ext_lock);
}
void nat25_db_expire(struct adapter *priv)
{
int i;
- unsigned long irqL;
- _enter_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_lock_bh(&priv->br_ext_lock);
for (i = 0; i < NAT25_HASH_SIZE; i++) {
struct nat25_network_db_entry *f;
@@ -495,7 +491,7 @@ void nat25_db_expire(struct adapter *priv)
f = g;
}
}
- _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_unlock_bh(&priv->br_ext_lock);
}
int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
@@ -811,7 +807,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
/* Handle PPPoE frame */
/*---------------------------------------------------*/
struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
- unsigned short *pMagic;
+ __be16 *pMagic;
switch (method) {
case NAT25_CHECK:
@@ -849,7 +845,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
tag->tag_len = htons(MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN+old_tag_len);
/* insert the magic_code+client mac in relay tag */
- pMagic = (unsigned short *)tag->tag_data;
+ pMagic = (__be16 *)tag->tag_data;
*pMagic = htons(MAGIC_CODE);
memcpy(tag->tag_data+MAGIC_CODE_LEN, skb->data+ETH_ALEN, ETH_ALEN);
@@ -912,7 +908,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
return -1;
}
- pMagic = (unsigned short *)tag->tag_data;
+ pMagic = (__be16 *)tag->tag_data;
if (ntohs(*pMagic) != MAGIC_CODE) {
DEBUG_ERR("Can't find MAGIC_CODE in %s packet!\n",
(ph->code == PADO_CODE ? "PADO" : "PADS"));
@@ -1009,7 +1005,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
iph->daddr.s6_addr16[4], iph->daddr.s6_addr16[5], iph->daddr.s6_addr16[6], iph->daddr.s6_addr16[7]);
if (memcmp(&iph->saddr, "\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0", 16)) {
- __nat25_generate_ipv6_network_addr(networkAddr, (unsigned int *)&iph->saddr);
+ __nat25_generate_ipv6_network_addr(networkAddr, (__be32 *)&iph->saddr);
__nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
__nat25_db_print(priv);
@@ -1020,9 +1016,10 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
struct icmp6hdr *hdr = (struct icmp6hdr *)(skb->data + ETH_HLEN + sizeof(*iph));
hdr->icmp6_cksum = 0;
hdr->icmp6_cksum = csum_ipv6_magic(&iph->saddr, &iph->daddr,
- iph->payload_len,
+ be16_to_cpu(iph->payload_len),
IPPROTO_ICMPV6,
- csum_partial((__u8 *)hdr, iph->payload_len, 0));
+ csum_partial((__u8 *)hdr,
+ be16_to_cpu(iph->payload_len), 0));
}
}
}
@@ -1033,7 +1030,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
iph->saddr.s6_addr16[4], iph->saddr.s6_addr16[5], iph->saddr.s6_addr16[6], iph->saddr.s6_addr16[7],
iph->daddr.s6_addr16[0], iph->daddr.s6_addr16[1], iph->daddr.s6_addr16[2], iph->daddr.s6_addr16[3],
iph->daddr.s6_addr16[4], iph->daddr.s6_addr16[5], iph->daddr.s6_addr16[6], iph->daddr.s6_addr16[7]);
- __nat25_generate_ipv6_network_addr(networkAddr, (unsigned int *)&iph->daddr);
+ __nat25_generate_ipv6_network_addr(networkAddr, (__be32 *)&iph->daddr);
__nat25_db_network_lookup_and_replace(priv, skb, networkAddr);
return 0;
default:
@@ -1060,8 +1057,7 @@ int nat25_handle_frame(struct adapter *priv, struct sk_buff *skb)
}
if (!priv->ethBrExtInfo.nat25_disable) {
- unsigned long irqL;
- _enter_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_lock_bh(&priv->br_ext_lock);
/*
* This function look up the destination network address from
* the NAT2.5 database. Return value = -1 means that the
@@ -1072,9 +1068,9 @@ int nat25_handle_frame(struct adapter *priv, struct sk_buff *skb)
!memcmp(priv->scdb_ip, skb->data+ETH_HLEN+16, 4)) {
memcpy(skb->data, priv->scdb_mac, ETH_ALEN);
- _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_unlock_bh(&priv->br_ext_lock);
} else {
- _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ spin_unlock_bh(&priv->br_ext_lock);
retval = nat25_db_handle(priv, skb, NAT25_LOOKUP);
}
@@ -1115,17 +1111,17 @@ struct dhcpMessage {
u_int8_t htype;
u_int8_t hlen;
u_int8_t hops;
- u_int32_t xid;
- u_int16_t secs;
- u_int16_t flags;
- u_int32_t ciaddr;
- u_int32_t yiaddr;
- u_int32_t siaddr;
- u_int32_t giaddr;
+ __be32 xid;
+ __be16 secs;
+ __be16 flags;
+ __be32 ciaddr;
+ __be32 yiaddr;
+ __be32 siaddr;
+ __be32 giaddr;
u_int8_t chaddr[16];
u_int8_t sname[64];
u_int8_t file[128];
- u_int32_t cookie;
+ __be32 cookie;
u_int8_t options[308]; /* 312 - cookie */
};
@@ -1178,21 +1174,16 @@ void *scdb_findEntry(struct adapter *priv, unsigned char *macAddr,
unsigned char networkAddr[MAX_NETWORK_ADDR_LEN];
struct nat25_network_db_entry *db;
int hash;
- /* unsigned long irqL; */
- /* _enter_critical_bh(&priv->br_ext_lock, &irqL); */
__nat25_generate_ipv4_network_addr(networkAddr, (unsigned int *)ipAddr);
hash = __nat25_network_hash(networkAddr);
db = priv->nethash[hash];
while (db != NULL) {
- if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) {
- /* _exit_critical_bh(&priv->br_ext_lock, &irqL); */
+ if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN))
return (void *)db;
- }
db = db->next_hash;
}
- /* _exit_critical_bh(&priv->br_ext_lock, &irqL); */
return NULL;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index f45f4eddb741..82fe8c47a1de 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -38,9 +38,9 @@ int _rtw_init_cmd_priv (struct cmd_priv *pcmdpriv)
_func_enter_;
- _rtw_init_sema(&(pcmdpriv->cmd_queue_sema), 0);
- /* _rtw_init_sema(&(pcmdpriv->cmd_done_sema), 0); */
- _rtw_init_sema(&(pcmdpriv->terminate_cmdthread_sema), 0);
+ sema_init(&(pcmdpriv->cmd_queue_sema), 0);
+ /* sema_init(&(pcmdpriv->cmd_done_sema), 0); */
+ sema_init(&(pcmdpriv->terminate_cmdthread_sema), 0);
_rtw_init_queue(&(pcmdpriv->cmd_queue));
@@ -84,7 +84,7 @@ int _rtw_init_evt_priv(struct evt_priv *pevtpriv)
_func_enter_;
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
- ATOMIC_SET(&pevtpriv->event_seq, 0);
+ atomic_set(&pevtpriv->event_seq, 0);
pevtpriv->evt_done_cnt = 0;
_init_workitem(&pevtpriv->c2h_wk, c2h_wk_callback, NULL);
@@ -104,7 +104,7 @@ _func_enter_;
_cancel_workitem_sync(&pevtpriv->c2h_wk);
while (pevtpriv->c2h_wk_alive)
- rtw_msleep_os(10);
+ msleep(10);
while (!rtw_cbuf_empty(pevtpriv->c2h_queue)) {
void *c2h = rtw_cbuf_pop(pevtpriv->c2h_queue);
@@ -121,10 +121,6 @@ void _rtw_free_cmd_priv (struct cmd_priv *pcmdpriv)
_func_enter_;
if (pcmdpriv) {
- _rtw_spinlock_free(&(pcmdpriv->cmd_queue.lock));
- _rtw_free_sema(&(pcmdpriv->cmd_queue_sema));
- _rtw_free_sema(&(pcmdpriv->terminate_cmdthread_sema));
-
if (pcmdpriv->cmd_allocated_buf)
kfree(pcmdpriv->cmd_allocated_buf);
@@ -153,13 +149,11 @@ _func_enter_;
if (obj == NULL)
goto exit;
- /* _enter_critical_bh(&queue->lock, &irqL); */
- _enter_critical(&queue->lock, &irqL);
+ spin_lock_irqsave(&queue->lock, irqL);
rtw_list_insert_tail(&obj->list, &queue->queue);
- /* _exit_critical_bh(&queue->lock, &irqL); */
- _exit_critical(&queue->lock, &irqL);
+ spin_unlock_irqrestore(&queue->lock, irqL);
exit:
@@ -175,8 +169,7 @@ struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue)
_func_enter_;
- /* _enter_critical_bh(&(queue->lock), &irqL); */
- _enter_critical(&queue->lock, &irqL);
+ spin_lock_irqsave(&queue->lock, irqL);
if (rtw_is_list_empty(&(queue->queue))) {
obj = NULL;
} else {
@@ -184,8 +177,7 @@ _func_enter_;
rtw_list_delete(&obj->list);
}
- /* _exit_critical_bh(&(queue->lock), &irqL); */
- _exit_critical(&queue->lock, &irqL);
+ spin_unlock_irqrestore(&queue->lock, irqL);
_func_exit_;
@@ -262,7 +254,7 @@ _func_enter_;
res = _rtw_enqueue_cmd(&pcmdpriv->cmd_queue, cmd_obj);
if (res == _SUCCESS)
- _rtw_up_sema(&pcmdpriv->cmd_queue_sema);
+ up(&pcmdpriv->cmd_queue_sema);
exit:
@@ -287,7 +279,7 @@ void rtw_cmd_clr_isr(struct cmd_priv *pcmdpriv)
{
_func_enter_;
pcmdpriv->cmd_done_cnt++;
- /* _rtw_up_sema(&(pcmdpriv->cmd_done_sema)); */
+ /* up(&(pcmdpriv->cmd_done_sema)); */
_func_exit_;
}
@@ -330,7 +322,7 @@ _func_enter_;
pcmdbuf = pcmdpriv->cmd_buf;
pcmdpriv->cmdthd_running = true;
- _rtw_up_sema(&pcmdpriv->terminate_cmdthread_sema);
+ up(&pcmdpriv->terminate_cmdthread_sema);
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("start r871x rtw_cmd_thread !!!!\n"));
@@ -416,11 +408,11 @@ post_process:
rtw_free_cmd_obj(pcmd);
} while (1);
- _rtw_up_sema(&pcmdpriv->terminate_cmdthread_sema);
+ up(&pcmdpriv->terminate_cmdthread_sema);
_func_exit_;
- thread_exit();
+ complete_and_exit(NULL, 0);
}
u8 rtw_setstandby_cmd(struct adapter *padapter, uint action)
@@ -534,7 +526,7 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
if (res == _SUCCESS) {
- pmlmepriv->scan_start_time = rtw_get_current_time();
+ pmlmepriv->scan_start_time = jiffies;
_set_timer(&pmlmepriv->scan_to_timer, SCANNING_TIMEOUT);
@@ -1722,7 +1714,7 @@ _func_enter_;
break;
case LPS_CTRL_SPECIAL_PACKET:
/* DBG_88E("LPS_CTRL_SPECIAL_PACKET\n"); */
- pwrpriv->DelayLPSLastTimeStamp = rtw_get_current_time();
+ pwrpriv->DelayLPSLastTimeStamp = jiffies;
LPS_Leave(padapter);
break;
case LPS_CTRL_LEAVE:
@@ -1971,7 +1963,7 @@ static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
while (!val) {
- rtw_msleep_os(100);
+ msleep(100);
cnt++;
@@ -2200,15 +2192,14 @@ _func_exit_;
}
void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
- unsigned long irqL;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
_func_enter_;
if (pcmd->res != H2C_SUCCESS) {
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
set_fwstate(pmlmepriv, _FW_LINKED);
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ***Error: disconnect_cmd_callback Fail ***\n."));
@@ -2246,7 +2237,6 @@ _func_exit_;
void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
- unsigned long irqL;
u8 timer_cancelled;
struct sta_info *psta = NULL;
struct wlan_network *pwlan = NULL;
@@ -2263,7 +2253,7 @@ _func_enter_;
_cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled);
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
psta = rtw_get_stainfo(&padapter->stapriv, pnetwork->MacAddress);
@@ -2277,18 +2267,16 @@ _func_enter_;
rtw_indicate_connect(padapter);
} else {
- unsigned long irqL;
-
pwlan = _rtw_alloc_network(pmlmepriv);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
if (pwlan == NULL) {
pwlan = rtw_get_oldest_wlan_network(&pmlmepriv->scanned_queue);
if (pwlan == NULL) {
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n Error: can't get pwlan in rtw_joinbss_event_callback\n"));
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto createbss_cmd_fail;
}
- pwlan->last_scanned = rtw_get_current_time();
+ pwlan->last_scanned = jiffies;
} else {
rtw_list_insert_tail(&(pwlan->list), &pmlmepriv->scanned_queue.queue);
}
@@ -2300,13 +2288,13 @@ _func_enter_;
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
/* we will set _FW_LINKED when there is one more sat to join us (rtw_stassoc_event_callback) */
}
createbss_cmd_fail:
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
rtw_free_cmd_obj(pcmd);
@@ -2332,7 +2320,6 @@ _func_exit_;
void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
- unsigned long irqL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct set_assocsta_parm *passocsta_parm = (struct set_assocsta_parm *)(pcmd->parmbuf);
@@ -2349,13 +2336,13 @@ _func_enter_;
psta->aid = passocsta_rsp->cam_id;
psta->mac_id = passocsta_rsp->cam_id;
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) && (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true))
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
set_fwstate(pmlmepriv, _FW_LINKED);
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
exit:
rtw_free_cmd_obj(pcmd);
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
index 0fe5f5de54a9..af32041a1e97 100644
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -835,7 +835,6 @@ int proc_get_all_sta_info(char *page, char **start,
off_t offset, int count,
int *eof, void *data)
{
- unsigned long irqL;
struct sta_info *psta;
struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
@@ -848,7 +847,7 @@ int proc_get_all_sta_info(char *page, char **start,
len += snprintf(page + len, count - len, "sta_dz_bitmap=0x%x, tim_bitmap=0x%x\n", pstapriv->sta_dz_bitmap, pstapriv->tim_bitmap);
- _enter_critical_bh(&pstapriv->sta_hash_lock, &irqL);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
for (i = 0; i < NUM_STA; i++) {
phead = &(pstapriv->sta_hash[i]);
@@ -882,7 +881,7 @@ int proc_get_all_sta_info(char *page, char **start,
}
}
}
- _exit_critical_bh(&pstapriv->sta_hash_lock, &irqL);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
*eof = 1;
return len;
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 806f56f1c437..6149e3aaa011 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -204,7 +204,7 @@ ReadEFuseByte(
/* This fix the problem that Efuse read error in high temperature condition. */
/* Designer says that there shall be some delay after ready bit is set, or the */
/* result will always stay on last data we read. */
- rtw_udelay_os(50);
+ udelay(50);
value32 = rtw_read32(Adapter, EFUSE_CTRL);
*pbuf = (u8)(value32 & 0xff);
diff --git a/drivers/staging/rtl8188eu/core/rtw_io.c b/drivers/staging/rtl8188eu/core/rtw_io.c
index 10c9c6560b20..ff0398fca52b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_io.c
+++ b/drivers/staging/rtl8188eu/core/rtw_io.c
@@ -205,9 +205,9 @@ void _rtw_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
_func_enter_;
if (adapter->bDriverStopped || adapter->bSurpriseRemoved) {
- RT_TRACE(_module_rtl871x_io_c_, _drv_info_,
- ("rtw_read_mem:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
- adapter->bDriverStopped, adapter->bSurpriseRemoved));
+ RT_TRACE(_module_rtl871x_io_c_, _drv_info_,
+ ("rtw_read_mem:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
+ adapter->bDriverStopped, adapter->bSurpriseRemoved));
return;
}
_read_mem = pintfhdl->io_ops._read_mem;
@@ -239,9 +239,9 @@ void _rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
_func_enter_;
if (adapter->bDriverStopped || adapter->bSurpriseRemoved) {
- RT_TRACE(_module_rtl871x_io_c_, _drv_info_,
- ("rtw_read_port:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
- adapter->bDriverStopped, adapter->bSurpriseRemoved));
+ RT_TRACE(_module_rtl871x_io_c_, _drv_info_,
+ ("rtw_read_port:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
+ adapter->bDriverStopped, adapter->bSurpriseRemoved));
return;
}
@@ -296,7 +296,7 @@ u32 _rtw_write_port_and_wait(struct adapter *adapter, u32 addr, u32 cnt, u8 *pme
if (ret == _SUCCESS)
ret = rtw_sctx_wait(&sctx);
- return ret;
+ return ret;
}
void _rtw_write_port_cancel(struct adapter *adapter)
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
index 193f641bd0de..e25b39b97d9e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -68,7 +68,6 @@ _func_exit_;
u8 rtw_do_join(struct adapter *padapter)
{
- unsigned long irqL;
struct list_head *plist, *phead;
u8 *pibss = NULL;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
@@ -77,7 +76,7 @@ u8 rtw_do_join(struct adapter *padapter)
_func_enter_;
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -92,7 +91,7 @@ _func_enter_;
pmlmepriv->to_join = true;
if (_rtw_queue_empty(queue)) {
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
/* when set_ssid/set_bssid for rtw_do_join(), but scanning queue is empty */
@@ -116,7 +115,7 @@ _func_enter_;
} else {
int select_ret;
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
select_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
if (select_ret == _SUCCESS) {
pmlmepriv->to_join = false;
@@ -178,7 +177,6 @@ _func_exit_;
u8 rtw_set_802_11_bssid(struct adapter *padapter, u8 *bssid)
{
- unsigned long irqL;
u8 status = _SUCCESS;
u32 cur_time = 0;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -195,7 +193,7 @@ _func_enter_;
goto exit;
}
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
DBG_88E("Set BSSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv));
@@ -233,7 +231,7 @@ handle_tkip_countermeasure:
/* should we add something here...? */
if (padapter->securitypriv.btkip_countermeasure) {
- cur_time = rtw_get_current_time();
+ cur_time = jiffies;
if ((cur_time - padapter->securitypriv.btkip_countermeasure_time) > 60 * HZ) {
padapter->securitypriv.btkip_countermeasure = false;
@@ -253,7 +251,7 @@ handle_tkip_countermeasure:
status = rtw_do_join(padapter);
release_mlme_lock:
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
exit:
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
@@ -266,7 +264,6 @@ _func_exit_;
u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid)
{
- unsigned long irqL;
u8 status = _SUCCESS;
u32 cur_time = 0;
@@ -285,7 +282,7 @@ _func_enter_;
goto exit;
}
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
DBG_88E("Set SSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv));
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
@@ -346,7 +343,7 @@ _func_enter_;
handle_tkip_countermeasure:
if (padapter->securitypriv.btkip_countermeasure) {
- cur_time = rtw_get_current_time();
+ cur_time = jiffies;
if ((cur_time - padapter->securitypriv.btkip_countermeasure_time) > 60 * HZ) {
padapter->securitypriv.btkip_countermeasure = false;
@@ -367,7 +364,7 @@ handle_tkip_countermeasure:
}
release_mlme_lock:
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
exit:
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
@@ -379,7 +376,6 @@ _func_exit_;
u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter,
enum ndis_802_11_network_infra networktype)
{
- unsigned long irqL;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *cur_network = &pmlmepriv->cur_network;
enum ndis_802_11_network_infra *pold_state = &(cur_network->network.InfrastructureMode);
@@ -391,7 +387,7 @@ _func_enter_;
*pold_state, networktype, get_fwstate(pmlmepriv)));
if (*pold_state != networktype) {
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, (" change mode!"));
/* DBG_88E("change mode, old_mode =%d, new_mode =%d, fw_state = 0x%x\n", *pold_state, networktype, get_fwstate(pmlmepriv)); */
@@ -439,7 +435,7 @@ _func_enter_;
case Ndis802_11InfrastructureMax:
break;
}
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
}
_func_exit_;
@@ -450,12 +446,11 @@ _func_exit_;
u8 rtw_set_802_11_disassociate(struct adapter *padapter)
{
- unsigned long irqL;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
_func_enter_;
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
@@ -467,7 +462,7 @@ _func_enter_;
rtw_pwr_wakeup(padapter);
}
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
_func_exit_;
@@ -476,7 +471,6 @@ _func_exit_;
u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_ssid *pssid, int ssid_max_num)
{
- unsigned long irqL;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
u8 res = true;
@@ -512,11 +506,11 @@ _func_enter_;
return _SUCCESS;
}
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
res = rtw_sitesurvey_cmd(padapter, pssid, ssid_max_num, NULL, 0);
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
}
exit:
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index ac3535d33a45..c7382303088f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -54,7 +54,7 @@ _func_enter_;
pmlmepriv->cur_network.network.InfrastructureMode = Ndis802_11AutoUnknown;
pmlmepriv->scan_mode = SCAN_ACTIVE;/* 1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */
- _rtw_spinlock_init(&(pmlmepriv->lock));
+ spin_lock_init(&(pmlmepriv->lock));
_rtw_init_queue(&(pmlmepriv->free_bss_pool));
_rtw_init_queue(&(pmlmepriv->scanned_queue));
@@ -93,13 +93,6 @@ _func_exit_;
return res;
}
-static void rtw_mfree_mlme_priv_lock (struct mlme_priv *pmlmepriv)
-{
- _rtw_spinlock_free(&pmlmepriv->lock);
- _rtw_spinlock_free(&(pmlmepriv->free_bss_pool.lock));
- _rtw_spinlock_free(&(pmlmepriv->scanned_queue.lock));
-}
-
#if defined (CONFIG_88EU_AP_MODE)
static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen)
{
@@ -136,8 +129,6 @@ _func_enter_;
rtw_free_mlme_priv_ie_data(pmlmepriv);
if (pmlmepriv) {
- rtw_mfree_mlme_priv_lock (pmlmepriv);
-
if (pmlmepriv->free_bss_buf) {
rtw_vmfree(pmlmepriv->free_bss_buf, MAX_BSS_CNT * sizeof(struct wlan_network));
}
@@ -147,18 +138,16 @@ _func_exit_;
int _rtw_enqueue_network(struct __queue *queue, struct wlan_network *pnetwork)
{
- unsigned long irql;
-
_func_enter_;
if (pnetwork == NULL)
goto exit;
- _enter_critical_bh(&queue->lock, &irql);
+ spin_lock_bh(&queue->lock);
rtw_list_insert_tail(&pnetwork->list, &queue->queue);
- _exit_critical_bh(&queue->lock, &irql);
+ spin_unlock_bh(&queue->lock);
exit:
@@ -169,13 +158,11 @@ _func_exit_;
struct wlan_network *_rtw_dequeue_network(struct __queue *queue)
{
- unsigned long irql;
-
struct wlan_network *pnetwork;
_func_enter_;
- _enter_critical_bh(&queue->lock, &irql);
+ spin_lock_bh(&queue->lock);
if (_rtw_queue_empty(queue)) {
pnetwork = NULL;
@@ -185,7 +172,7 @@ _func_enter_;
rtw_list_delete(&(pnetwork->list));
}
- _exit_critical_bh(&queue->lock, &irql);
+ spin_unlock_bh(&queue->lock);
_func_exit_;
@@ -194,14 +181,13 @@ _func_exit_;
struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)/* _queue *free_queue) */
{
- unsigned long irql;
struct wlan_network *pnetwork;
struct __queue *free_queue = &pmlmepriv->free_bss_pool;
struct list_head *plist = NULL;
_func_enter_;
- _enter_critical_bh(&free_queue->lock, &irql);
+ spin_lock_bh(&free_queue->lock);
if (_rtw_queue_empty(free_queue) == true) {
pnetwork = NULL;
@@ -216,14 +202,14 @@ _func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("_rtw_alloc_network: ptr=%p\n", plist));
pnetwork->network_type = 0;
pnetwork->fixed = false;
- pnetwork->last_scanned = rtw_get_current_time();
+ pnetwork->last_scanned = jiffies;
pnetwork->aid = 0;
pnetwork->join_res = 0;
pmlmepriv->num_of_scanned++;
exit:
- _exit_critical_bh(&free_queue->lock, &irql);
+ spin_unlock_bh(&free_queue->lock);
_func_exit_;
@@ -234,7 +220,6 @@ void _rtw_free_network(struct mlme_priv *pmlmepriv , struct wlan_network *pnetwo
{
u32 curr_time, delta_time;
u32 lifetime = SCANQUEUE_LIFETIME;
- unsigned long irql;
struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
_func_enter_;
@@ -244,7 +229,7 @@ _func_enter_;
if (pnetwork->fixed)
goto exit;
- curr_time = rtw_get_current_time();
+ curr_time = jiffies;
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)))
lifetime = 1;
@@ -253,11 +238,11 @@ _func_enter_;
if (delta_time < lifetime)/* unit:sec */
goto exit;
}
- _enter_critical_bh(&free_queue->lock, &irql);
+ spin_lock_bh(&free_queue->lock);
rtw_list_delete(&(pnetwork->list));
rtw_list_insert_tail(&(pnetwork->list), &(free_queue->queue));
pmlmepriv->num_of_scanned--;
- _exit_critical_bh(&free_queue->lock, &irql);
+ spin_unlock_bh(&free_queue->lock);
exit:
_func_exit_;
@@ -315,7 +300,6 @@ _func_exit_;
void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall)
{
- unsigned long irql;
struct list_head *phead, *plist;
struct wlan_network *pnetwork;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -324,7 +308,7 @@ void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall)
_func_enter_;
- _enter_critical_bh(&scanned_queue->lock, &irql);
+ spin_lock_bh(&scanned_queue->lock);
phead = get_list_head(scanned_queue);
plist = get_next(phead);
@@ -336,7 +320,7 @@ _func_enter_;
_rtw_free_network(pmlmepriv, pnetwork, isfreeall);
}
- _exit_critical_bh(&scanned_queue->lock, &irql);
+ spin_unlock_bh(&scanned_queue->lock);
_func_exit_;
}
@@ -361,7 +345,7 @@ _func_exit_;
void rtw_generate_random_ibss(u8 *pibss)
{
- u32 curtime = rtw_get_current_time();
+ u32 curtime = jiffies;
_func_enter_;
pibss[0] = 0x02; /* in ad-hoc mode bit1 must set to 1 */
@@ -592,7 +576,6 @@ Caller must hold pmlmepriv->lock first.
*/
void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *target)
{
- unsigned long irql;
struct list_head *plist, *phead;
u32 bssid_ex_sz;
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
@@ -602,7 +585,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
_func_enter_;
- _enter_critical_bh(&queue->lock, &irql);
+ spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
plist = get_next(phead);
@@ -630,7 +613,7 @@ _func_enter_;
memcpy(&(pnetwork->network), target, get_wlan_bssid_ex_sz(target));
/* variable initialize */
pnetwork->fixed = false;
- pnetwork->last_scanned = rtw_get_current_time();
+ pnetwork->last_scanned = jiffies;
pnetwork->network_type = 0;
pnetwork->aid = 0;
@@ -654,7 +637,7 @@ _func_enter_;
rtw_hal_get_def_var(adapter, HAL_DEF_CURRENT_ANTENNA, &(target->PhyInfo.Optimum_antenna));
memcpy(&(pnetwork->network), target, bssid_ex_sz);
- pnetwork->last_scanned = rtw_get_current_time();
+ pnetwork->last_scanned = jiffies;
/* bss info not receiving from the right channel */
if (pnetwork->network.PhyInfo.SignalQuality == 101)
@@ -668,7 +651,7 @@ _func_enter_;
*/
bool update_ie = true;
- pnetwork->last_scanned = rtw_get_current_time();
+ pnetwork->last_scanned = jiffies;
/* target.Reserved[0]== 1, means that scanned network is a bcn frame. */
if ((pnetwork->network.IELength > target->IELength) && (target->Reserved[0] == 1))
@@ -678,7 +661,7 @@ _func_enter_;
}
exit:
- _exit_critical_bh(&queue->lock, &irql);
+ spin_unlock_bh(&queue->lock);
_func_exit_;
}
@@ -754,7 +737,6 @@ _func_exit_;
void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
{
- unsigned long irql;
u32 len;
struct wlan_bssid_ex *pnetwork;
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
@@ -770,23 +752,22 @@ _func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n****rtw_survey_event_callback: return a wrong bss ***\n"));
return;
}
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
/* update IBSS_network 's timestamp */
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) == true) {
if (_rtw_memcmp(&(pmlmepriv->cur_network.network.MacAddress), pnetwork->MacAddress, ETH_ALEN)) {
struct wlan_network *ibss_wlan = NULL;
- unsigned long irql;
memcpy(pmlmepriv->cur_network.network.IEs, pnetwork->IEs, 8);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
ibss_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->MacAddress);
if (ibss_wlan) {
memcpy(ibss_wlan->network.IEs , pnetwork->IEs, 8);
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto exit;
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
}
}
@@ -799,7 +780,7 @@ _func_enter_;
exit:
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
_func_exit_;
@@ -810,12 +791,11 @@ _func_exit_;
void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
{
- unsigned long irql;
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
struct mlme_ext_priv *pmlmeext;
_func_enter_;
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
if (pmlmepriv->wps_probe_req_ie) {
pmlmepriv->wps_probe_req_ie_len = 0;
@@ -894,7 +874,7 @@ _func_enter_;
indicate_wx_scan_complete_event(adapter);
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
p2p_ps_wk_cmd(adapter, P2P_PS_SCAN_DONE, 0);
@@ -917,7 +897,6 @@ void rtw_fwdbg_event_callback(struct adapter *adapter , u8 *pbuf)
static void free_scanqueue(struct mlme_priv *pmlmepriv)
{
- unsigned long irql, irql0;
struct __queue *free_queue = &pmlmepriv->free_bss_pool;
struct __queue *scan_queue = &pmlmepriv->scanned_queue;
struct list_head *plist, *phead, *ptemp;
@@ -925,8 +904,8 @@ static void free_scanqueue(struct mlme_priv *pmlmepriv)
_func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+free_scanqueue\n"));
- _enter_critical_bh(&scan_queue->lock, &irql0);
- _enter_critical_bh(&free_queue->lock, &irql);
+ spin_lock_bh(&scan_queue->lock);
+ spin_lock_bh(&free_queue->lock);
phead = get_list_head(scan_queue);
plist = get_next(phead);
@@ -939,8 +918,8 @@ _func_enter_;
pmlmepriv->num_of_scanned--;
}
- _exit_critical_bh(&free_queue->lock, &irql);
- _exit_critical_bh(&scan_queue->lock, &irql0);
+ spin_unlock_bh(&free_queue->lock);
+ spin_unlock_bh(&scan_queue->lock);
_func_exit_;
}
@@ -950,7 +929,6 @@ _func_exit_;
*/
void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue)
{
- unsigned long irql;
struct wlan_network *pwlan = NULL;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
struct sta_priv *pstapriv = &adapter->stapriv;
@@ -968,9 +946,9 @@ _func_enter_;
psta = rtw_get_stainfo(&adapter->stapriv, tgt_network->network.MacAddress);
- _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_free_stainfo(adapter, psta);
- _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
}
if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE | WIFI_AP_STATE)) {
@@ -979,15 +957,15 @@ _func_enter_;
rtw_free_all_stainfo(adapter);
psta = rtw_get_bcmc_stainfo(adapter);
- _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_free_stainfo(adapter, psta);
- _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
rtw_init_bcmc_stainfo(adapter);
}
if (lock_scanned_queue)
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
if (pwlan)
@@ -999,7 +977,7 @@ _func_enter_;
rtw_free_network_nolock(pmlmepriv, pwlan);
if (lock_scanned_queue)
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
pmlmepriv->key_mask = 0;
_func_exit_;
}
@@ -1075,14 +1053,14 @@ void rtw_scan_abort(struct adapter *adapter)
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &(adapter->mlmeextpriv);
- start = rtw_get_current_time();
+ start = jiffies;
pmlmeext->scan_abort = true;
while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) &&
rtw_get_passing_time_ms(start) <= 200) {
if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
break;
DBG_88E(FUNC_NDEV_FMT"fw_state=_FW_UNDER_SURVEY!\n", FUNC_NDEV_ARG(adapter->pnetdev));
- rtw_msleep_os(20);
+ msleep(20);
}
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
if (!adapter->bDriverStopped && !adapter->bSurpriseRemoved)
@@ -1219,7 +1197,6 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
{
- unsigned long irql, irql2;
u8 timer_cancelled;
struct sta_info *ptarget_sta = NULL, *pcur_sta = NULL;
struct sta_priv *pstapriv = &adapter->stapriv;
@@ -1249,12 +1226,12 @@ _func_enter_;
goto ignore_nolock;
}
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\nrtw_joinbss_event_callback!! _enter_critical\n"));
if (pnetwork->join_res > 0) {
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
/* s1. find ptarget_wlan */
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
@@ -1267,9 +1244,9 @@ _func_enter_;
pcur_sta = rtw_get_stainfo(pstapriv, cur_network->network.MacAddress);
if (pcur_sta) {
- _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql2);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_free_stainfo(adapter, pcur_sta);
- _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql2);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
}
ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->network.MacAddress);
@@ -1291,7 +1268,7 @@ _func_enter_;
rtw_joinbss_update_network(adapter, ptarget_wlan, pnetwork);
} else {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't find ptarget_wlan when joinbss_event callback\n"));
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto ignore_joinbss_callback;
}
@@ -1301,7 +1278,7 @@ _func_enter_;
ptarget_sta = rtw_joinbss_update_stainfo(adapter, pnetwork);
if (ptarget_sta == NULL) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't update stainfo when joinbss_event callback\n"));
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto ignore_joinbss_callback;
}
}
@@ -1321,11 +1298,11 @@ _func_enter_;
} else {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_joinbss_event_callback err: fw_state:%x", get_fwstate(pmlmepriv)));
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto ignore_joinbss_callback;
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
} else if (pnetwork->join_res == -4) {
rtw_reset_securitypriv(adapter);
@@ -1341,7 +1318,7 @@ _func_enter_;
}
ignore_joinbss_callback:
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
ignore_nolock:
_func_exit_;
}
@@ -1405,7 +1382,6 @@ void rtw_stassoc_hw_rpt(struct adapter *adapter, struct sta_info *psta)
void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
{
- unsigned long irql;
struct sta_info *psta;
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
struct stassoc_event *pstassoc = (struct stassoc_event *)pbuf;
@@ -1449,20 +1425,20 @@ _func_enter_;
if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
psta->dot118021XPrivacy = adapter->securitypriv.dot11PrivacyAlgrthm;
psta->ieee8021x_blocked = false;
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_STATE))) {
if (adapter->stapriv.asoc_sta_count == 2) {
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
if (ptarget_wlan)
ptarget_wlan->fixed = true;
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
/* a sta + bc/mc_stainfo (not Ibss_stainfo) */
rtw_indicate_connect(adapter);
}
}
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
mlmeext_sta_add_event_callback(adapter, psta);
exit:
_func_exit_;
@@ -1470,7 +1446,6 @@ _func_exit_;
void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
{
- unsigned long irql, irql2;
int mac_id = -1;
struct sta_info *psta;
struct wlan_network *pwlan = NULL;
@@ -1503,7 +1478,7 @@ _func_enter_;
mlmeext_sta_del_event_callback(adapter);
- _enter_critical_bh(&pmlmepriv->lock, &irql2);
+ spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
if (pmlmepriv->to_roaming > 0)
@@ -1518,31 +1493,31 @@ _func_enter_;
rtw_free_assoc_resources(adapter, 1);
rtw_indicate_disconnect(adapter);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
/* remove the network entry in scanned_queue */
pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
if (pwlan) {
pwlan->fixed = false;
rtw_free_network_nolock(pmlmepriv, pwlan);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
_rtw_roaming(adapter, tgt_network);
}
if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
- _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_free_stainfo(adapter, psta);
- _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
if (adapter->stapriv.asoc_sta_count == 1) { /* a sta + bc/mc_stainfo (not Ibss_stainfo) */
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
/* free old ibss network */
pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
if (pwlan) {
pwlan->fixed = false;
rtw_free_network_nolock(pmlmepriv, pwlan);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
/* re-create ibss */
pdev_network = &(adapter->registrypriv.dev_network);
pibss = adapter->registrypriv.dev_network.MacAddress;
@@ -1565,7 +1540,7 @@ _func_enter_;
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("***Error=>stadel_event_callback: rtw_createbss_cmd status FAIL***\n "));
}
}
- _exit_critical_bh(&pmlmepriv->lock, &irql2);
+ spin_unlock_bh(&pmlmepriv->lock);
_func_exit_;
}
@@ -1582,7 +1557,6 @@ _func_exit_;
*/
void _rtw_join_timeout_handler (struct adapter *adapter)
{
- unsigned long irql;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
int do_join_r;
@@ -1594,7 +1568,7 @@ _func_enter_;
return;
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
if (pmlmepriv->to_roaming > 0) { /* join timeout caused by roaming */
while (1) {
@@ -1617,7 +1591,7 @@ _func_enter_;
rtw_indicate_disconnect(adapter);
free_scanqueue(pmlmepriv);/* */
}
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
_func_exit_;
}
@@ -1627,13 +1601,12 @@ _func_exit_;
*/
void rtw_scan_timeout_handler (struct adapter *adapter)
{
- unsigned long irql;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
DBG_88E(FUNC_ADPT_FMT" fw_state=%x\n", FUNC_ADPT_ARG(adapter), get_fwstate(pmlmepriv));
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
rtw_indicate_scan_done(adapter, true);
}
@@ -1761,7 +1734,6 @@ pmlmepriv->lock
int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
{
- unsigned long irql;
int ret;
struct list_head *phead;
struct adapter *adapter;
@@ -1772,7 +1744,7 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
_func_enter_;
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
adapter = (struct adapter *)pmlmepriv->nic_hdl;
pmlmepriv->pscanned = get_next(phead);
@@ -1819,7 +1791,7 @@ _func_enter_;
ret = rtw_joinbss_cmd(adapter, candidate);
exit:
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
_func_exit_;
@@ -2394,12 +2366,11 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
void rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
{
- unsigned long irql;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
_rtw_roaming(padapter, tgt_network);
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
}
void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
{
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 7ab5ff039c88..6f7e415ecb6c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -221,7 +221,7 @@ static void init_mlme_ext_priv_value(struct adapter *padapter)
_12M_RATE_, _24M_RATE_, 0xff,
};
- ATOMIC_SET(&pmlmeext->event_seq, 0);
+ atomic_set(&pmlmeext->event_seq, 0);
pmlmeext->mgnt_seq = 0;/* reset to zero when disconnect at client mode */
pmlmeext->cur_channel = padapter->registrypriv.channel;
@@ -756,7 +756,6 @@ _END_ONBEACON_:
unsigned int OnAuth(struct adapter *padapter, union recv_frame *precv_frame)
{
#ifdef CONFIG_88EU_AP_MODE
- unsigned long irqL;
unsigned int auth_mode, ie_len;
u16 seq;
unsigned char *sa, *p;
@@ -817,24 +816,24 @@ unsigned int OnAuth(struct adapter *padapter, union recv_frame *precv_frame)
pstat->state = WIFI_FW_AUTH_NULL;
pstat->auth_seq = 0;
} else {
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
if (!rtw_is_list_empty(&pstat->asoc_list)) {
rtw_list_delete(&pstat->asoc_list);
pstapriv->asoc_list_cnt--;
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
if (seq == 1) {
/* TODO: STA re_auth and auth timeout */
}
}
- _enter_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->auth_list_lock);
if (rtw_is_list_empty(&pstat->auth_list)) {
rtw_list_insert_tail(&pstat->auth_list, &pstapriv->auth_list);
pstapriv->auth_list_cnt++;
}
- _exit_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->auth_list_lock);
if (pstat->auth_seq == 0)
pstat->expire_to = pstapriv->auth_to;
@@ -1005,7 +1004,6 @@ authclnt_fail:
unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
{
#ifdef CONFIG_88EU_AP_MODE
- unsigned long irqL;
u16 capab_info;
struct rtw_ieee802_11_elems elems;
struct sta_info *pstat;
@@ -1408,20 +1406,20 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
pstat->state &= (~WIFI_FW_ASSOC_STATE);
pstat->state |= WIFI_FW_ASSOC_SUCCESS;
- _enter_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->auth_list_lock);
if (!rtw_is_list_empty(&pstat->auth_list)) {
rtw_list_delete(&pstat->auth_list);
pstapriv->auth_list_cnt--;
}
- _exit_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->auth_list_lock);
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
if (rtw_is_list_empty(&pstat->asoc_list)) {
pstat->expire_to = pstapriv->expire_to;
rtw_list_insert_tail(&pstat->asoc_list, &pstapriv->asoc_list);
pstapriv->asoc_list_cnt++;
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
/* now the station is qualified to join our BSS... */
if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (_STATS_SUCCESSFUL_ == status)) {
@@ -1590,7 +1588,6 @@ unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
#ifdef CONFIG_88EU_AP_MODE
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- unsigned long irqL;
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -1601,13 +1598,13 @@ unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
if (psta) {
u8 updated = 0;
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
if (!rtw_is_list_empty(&psta->asoc_list)) {
rtw_list_delete(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
updated = ap_free_sta(padapter, psta, false, reason);
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
associated_clients_update(padapter, updated);
}
@@ -1654,14 +1651,9 @@ unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
#ifdef CONFIG_88EU_AP_MODE
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- unsigned long irqL;
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
- /* _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL); */
- /* rtw_free_stainfo(padapter, psta); */
- /* _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL); */
-
DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
reason, GetAddr2Ptr(pframe));
@@ -1669,13 +1661,13 @@ unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
if (psta) {
u8 updated = 0;
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
if (!rtw_is_list_empty(&psta->asoc_list)) {
rtw_list_delete(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
updated = ap_free_sta(padapter, psta, false, reason);
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
associated_clients_update(padapter, updated);
}
@@ -3826,7 +3818,7 @@ int issue_probereq_p2p_ex(struct adapter *adapter, u8 *da, int try_cnt, int wait
{
int ret;
int i = 0;
- u32 start = rtw_get_current_time();
+ u32 start = jiffies;
do {
ret = _issue_probereq_p2p(adapter, da, wait_ms > 0 ? true : false);
@@ -3837,7 +3829,7 @@ int issue_probereq_p2p_ex(struct adapter *adapter, u8 *da, int try_cnt, int wait
break;
if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
- rtw_msleep_os(wait_ms);
+ msleep(wait_ms);
} while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
if (ret != _FAIL) {
@@ -4419,7 +4411,7 @@ s32 dump_mgntframe_and_wait(struct adapter *padapter, struct xmit_frame *pmgntfr
if (ret == _SUCCESS)
ret = rtw_sctx_wait(&sctx);
- return ret;
+ return ret;
}
s32 dump_mgntframe_and_wait_ack(struct adapter *padapter, struct xmit_frame *pmgntframe)
@@ -4487,9 +4479,6 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
__le16 *fctrl;
unsigned int rate_len;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
-#if defined(CONFIG_88EU_AP_MODE)
- unsigned long irqL;
-#endif /* if defined (CONFIG_88EU_AP_MODE) */
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
@@ -4505,7 +4494,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
return;
}
#if defined (CONFIG_88EU_AP_MODE)
- _enter_critical_bh(&pmlmepriv->bcn_update_lock, &irqL);
+ spin_lock_bh(&pmlmepriv->bcn_update_lock);
#endif /* if defined (CONFIG_88EU_AP_MODE) */
/* update attribute */
@@ -4690,7 +4679,7 @@ _issue_bcn:
#if defined (CONFIG_88EU_AP_MODE)
pmlmepriv->update_bcn = false;
- _exit_critical_bh(&pmlmepriv->bcn_update_lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->bcn_update_lock);
#endif /* if defined (CONFIG_88EU_AP_MODE) */
if ((pattrib->pktlen + TXDESC_SIZE) > 512) {
@@ -4972,7 +4961,7 @@ int issue_probereq_ex(struct adapter *padapter, struct ndis_802_11_ssid *pssid,
{
int ret;
int i = 0;
- u32 start = rtw_get_current_time();
+ u32 start = jiffies;
do {
ret = _issue_probereq(padapter, pssid, da, wait_ms > 0 ? true : false);
@@ -4983,7 +4972,7 @@ int issue_probereq_ex(struct adapter *padapter, struct ndis_802_11_ssid *pssid,
break;
if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
- rtw_msleep_os(wait_ms);
+ msleep(wait_ms);
} while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
@@ -5693,7 +5682,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int pow
{
int ret;
int i = 0;
- u32 start = rtw_get_current_time();
+ u32 start = jiffies;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
@@ -5710,7 +5699,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int pow
break;
if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
- rtw_msleep_os(wait_ms);
+ msleep(wait_ms);
} while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
if (ret != _FAIL) {
@@ -5816,7 +5805,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int
{
int ret;
int i = 0;
- u32 start = rtw_get_current_time();
+ u32 start = jiffies;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
@@ -5833,7 +5822,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int
break;
if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
- rtw_msleep_os(wait_ms);
+ msleep(wait_ms);
} while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
if (ret != _FAIL) {
@@ -5934,7 +5923,7 @@ int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason, int
{
int ret;
int i = 0;
- u32 start = rtw_get_current_time();
+ u32 start = jiffies;
do {
ret = _issue_deauth(padapter, da, reason, wait_ms > 0 ? true : false);
@@ -5945,7 +5934,7 @@ int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason, int
break;
if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
- rtw_msleep_os(wait_ms);
+ msleep(wait_ms);
} while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
if (ret != _FAIL) {
@@ -6156,7 +6145,6 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
static void issue_action_BSSCoexistPacket(struct adapter *padapter)
{
- unsigned long irqL;
struct list_head *plist, *phead;
unsigned char category, action;
struct xmit_frame *pmgntframe;
@@ -6231,7 +6219,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
if (pmlmepriv->num_sta_no_ht > 0) {
int i;
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -6261,7 +6249,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
ICS[0][0] = 1;
}
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
for (i = 0; i < 8; i++) {
if (ICS[i][0] == 1) {
@@ -6338,14 +6326,14 @@ unsigned int send_beacon(struct adapter *padapter)
int issue = 0;
int poll = 0;
- u32 start = rtw_get_current_time();
+ u32 start = jiffies;
rtw_hal_set_hwreg(padapter, HW_VAR_BCN_VALID, NULL);
do {
issue_beacon(padapter, 100);
issue++;
do {
- rtw_yield_os();
+ yield();
rtw_hal_get_hwreg(padapter, HW_VAR_BCN_VALID, (u8 *)(&bxmitok));
poll++;
} while ((poll%10) != 0 && !bxmitok && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
@@ -6435,7 +6423,7 @@ void site_survey(struct adapter *padapter)
if (pmlmeext->sitesurvey_res.ssid[i].SsidLength) {
/* todo: to issue two probe req??? */
issue_probereq(padapter, &(pmlmeext->sitesurvey_res.ssid[i]), NULL);
- /* rtw_msleep_os(SURVEY_TO>>1); */
+ /* msleep(SURVEY_TO>>1); */
issue_probereq(padapter, &(pmlmeext->sitesurvey_res.ssid[i]), NULL);
}
}
@@ -6443,7 +6431,7 @@ void site_survey(struct adapter *padapter)
if (pmlmeext->sitesurvey_res.scan_mode == SCAN_ACTIVE) {
/* todo: to issue two probe req??? */
issue_probereq(padapter, NULL, NULL);
- /* rtw_msleep_os(SURVEY_TO>>1); */
+ /* msleep(SURVEY_TO>>1); */
issue_probereq(padapter, NULL, NULL);
}
}
@@ -7082,7 +7070,7 @@ void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct survey_event);
pc2h_evt_hdr->ID = GEN_EVT_CODE(_Survey);
- pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
psurvey_evt = (struct survey_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
@@ -7134,7 +7122,7 @@ void report_surveydone_event(struct adapter *padapter)
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct surveydone_event);
pc2h_evt_hdr->ID = GEN_EVT_CODE(_SurveyDone);
- pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
psurveydone_evt = (struct surveydone_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
psurveydone_evt->bss_cnt = pmlmeext->sitesurvey_res.bss_cnt;
@@ -7180,7 +7168,7 @@ void report_join_res(struct adapter *padapter, int res)
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct joinbss_event);
pc2h_evt_hdr->ID = GEN_EVT_CODE(_JoinBss);
- pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
pjoinbss_evt = (struct joinbss_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
memcpy((unsigned char *)(&(pjoinbss_evt->network.network)), &(pmlmeinfo->network), sizeof(struct wlan_bssid_ex));
@@ -7233,7 +7221,7 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct stadel_event);
pc2h_evt_hdr->ID = GEN_EVT_CODE(_DelSTA);
- pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
pdel_sta_evt = (struct stadel_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
memcpy((unsigned char *)(&(pdel_sta_evt->macaddr)), MacAddr, ETH_ALEN);
@@ -7288,7 +7276,7 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct stassoc_event);
pc2h_evt_hdr->ID = GEN_EVT_CODE(_AddSTA);
- pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
padd_sta_evt = (struct stassoc_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
memcpy((unsigned char *)(&(padd_sta_evt->macaddr)), MacAddr, ETH_ALEN);
@@ -8334,7 +8322,7 @@ u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf)
goto _abort_event_;
}
- ATOMIC_INC(&pevt_priv->event_seq);
+ atomic_inc(&pevt_priv->event_seq);
peventbuf += 2;
@@ -8365,7 +8353,6 @@ u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
}
#ifdef CONFIG_88EU_AP_MODE
else { /* tx bc/mc frames after update TIM */
- unsigned long irqL;
struct sta_info *psta_bmc;
struct list_head *xmitframe_plist, *xmitframe_phead;
struct xmit_frame *pxmitframe = NULL;
@@ -8377,8 +8364,8 @@ u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
return H2C_SUCCESS;
if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) {
- rtw_msleep_os(10);/* 10ms, ATIM(HIQ) Windows */
- _enter_critical_bh(&psta_bmc->sleep_q.lock, &irqL);
+ msleep(10);/* 10ms, ATIM(HIQ) Windows */
+ spin_lock_bh(&psta_bmc->sleep_q.lock);
xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
xmitframe_plist = get_next(xmitframe_phead);
@@ -8400,12 +8387,12 @@ u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
pxmitframe->attrib.qsel = 0x11;/* HIQ */
- _exit_critical_bh(&psta_bmc->sleep_q.lock, &irqL);
+ spin_unlock_bh(&psta_bmc->sleep_q.lock);
if (rtw_hal_xmit(padapter, pxmitframe))
rtw_os_xmit_complete(padapter, pxmitframe);
- _enter_critical_bh(&psta_bmc->sleep_q.lock, &irqL);
+ spin_lock_bh(&psta_bmc->sleep_q.lock);
}
- _exit_critical_bh(&psta_bmc->sleep_q.lock, &irqL);
+ spin_unlock_bh(&psta_bmc->sleep_q.lock);
}
}
#endif
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp.c b/drivers/staging/rtl8188eu/core/rtw_mp.c
index 9832dcbbd07f..6451efdfb132 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mp.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mp.c
@@ -323,10 +323,7 @@ s32 mp_start_test(struct adapter *padapter)
struct sta_info *psta;
u32 length;
u8 val8;
-
- unsigned long irqL;
s32 res = _SUCCESS;
-
struct mp_priv *pmppriv = &padapter->mppriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *tgt_network = &pmlmepriv->cur_network;
@@ -379,7 +376,7 @@ s32 mp_start_test(struct adapter *padapter)
else
bssid.Length = length;
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true)
goto end_of_mp_start_test;
@@ -420,7 +417,7 @@ s32 mp_start_test(struct adapter *padapter)
end_of_mp_start_test:
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
if (res == _SUCCESS) {
/* set MSR to WIFI_FW_ADHOC_STATE */
@@ -439,11 +436,9 @@ void mp_stop_test(struct adapter *padapter)
struct wlan_network *tgt_network = &pmlmepriv->cur_network;
struct sta_info *psta;
- unsigned long irqL;
-
if (pmppriv->mode == MP_ON) {
pmppriv->bSetTxPower = 0;
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == false)
goto end_of_mp_stop_test;
@@ -465,7 +460,7 @@ void mp_stop_test(struct adapter *padapter)
end_of_mp_stop_test:
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
}
}
@@ -614,7 +609,7 @@ static int mp_xmit_packet_thread(void *context)
padapter->bDriverStopped) {
goto exit;
} else {
- rtw_msleep_os(1);
+ msleep(1);
continue;
}
}
@@ -643,7 +638,7 @@ exit:
pmptx->pallocated_buf = NULL;
pmptx->stop = 1;
- thread_exit();
+ complete_and_exit(NULL, 0);
}
void fill_txdesc_for_mp(struct adapter *padapter, struct tx_desc *ptxdesc)
@@ -863,11 +858,11 @@ static u32 rtw_GetPSDData(struct adapter *pAdapter, u32 point)
psd_val |= point;
rtw_write32(pAdapter, 0x808, psd_val);
- rtw_mdelay_os(1);
+ mdelay(1);
psd_val |= 0x00400000;
rtw_write32(pAdapter, 0x808, psd_val);
- rtw_mdelay_os(1);
+ mdelay(1);
psd_val = rtw_read32(pAdapter, 0x8B4);
psd_val &= 0x0000FFFF;
@@ -920,7 +915,7 @@ u32 mp_query_psd(struct adapter *pAdapter, u8 *data)
i++;
}
- rtw_msleep_os(100);
+ msleep(100);
return strlen(data)+1;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c b/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c
index f06312c41581..edcd8a5042be 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c
@@ -690,7 +690,7 @@ _func_enter_;
if (pmp_priv->tx.stop == 0) {
pmp_priv->tx.stop = 1;
DBG_88E("%s: pkt tx is running...\n", __func__);
- rtw_msleep_os(5);
+ msleep(5);
}
pmp_priv->tx.stop = 0;
pmp_priv->tx.count = 1;
@@ -725,7 +725,7 @@ _func_enter_;
if (pmp_priv->tx.stop == 0) {
pmp_priv->tx.stop = 1;
DBG_88E("%s: pkt tx is running...\n", __func__);
- rtw_msleep_os(5);
+ msleep(5);
}
pmp_priv->tx.stop = 0;
pmp_priv->tx.count = 1;
@@ -760,7 +760,7 @@ _func_enter_;
if (pmp_priv->tx.stop == 0) {
pmp_priv->tx.stop = 1;
DBG_88E("%s: pkt tx is running...\n", __func__);
- rtw_msleep_os(5);
+ msleep(5);
}
pmp_priv->tx.stop = 0;
pmp_priv->tx.count = 1;
diff --git a/drivers/staging/rtl8188eu/core/rtw_p2p.c b/drivers/staging/rtl8188eu/core/rtw_p2p.c
index f46cab14a54d..6e8c06e840b3 100644
--- a/drivers/staging/rtl8188eu/core/rtw_p2p.c
+++ b/drivers/staging/rtl8188eu/core/rtw_p2p.c
@@ -40,7 +40,6 @@ static int rtw_p2p_is_channel_list_ok(u8 desired_ch, u8 *ch_list, u8 ch_cnt)
static u32 go_add_group_info_attr(struct wifidirect_info *pwdinfo, u8 *pbuf)
{
- unsigned long irqL;
struct list_head *phead, *plist;
u32 len = 0;
u16 attr_len = 0;
@@ -56,7 +55,7 @@ static u32 go_add_group_info_attr(struct wifidirect_info *pwdinfo, u8 *pbuf)
pstart = pdata_attr;
pcur = pdata_attr;
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
plist = get_next(phead);
@@ -120,7 +119,7 @@ static u32 go_add_group_info_attr(struct wifidirect_info *pwdinfo, u8 *pbuf)
pstart = pcur;
}
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
if (attr_len > 0)
len = rtw_set_p2p_attr_content(pbuf, P2P_ATTR_GROUP_INFO, attr_len, pdata_attr);
@@ -977,10 +976,9 @@ u32 process_p2p_devdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint le
_rtw_memcmp(pwdinfo->p2p_group_ssid, groupid+ETH_ALEN, pwdinfo->p2p_group_ssid_len)) {
attr_contentlen = 0;
if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_DEVICE_ID, dev_addr, &attr_contentlen)) {
- unsigned long irqL;
struct list_head *phead, *plist;
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
plist = get_next(phead);
@@ -1000,7 +998,7 @@ u32 process_p2p_devdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint le
status = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
}
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
} else {
status = P2P_STATUS_FAIL_INVALID_PARAM;
}
@@ -1497,9 +1495,8 @@ u8 process_p2p_presence_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint le
static void find_phase_handler(struct adapter *padapter)
{
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ndis_802_11_ssid ssid;
- unsigned long irqL;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
_func_enter_;
@@ -1509,10 +1506,9 @@ _func_enter_;
rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH);
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
-
-
+ spin_lock_bh(&pmlmepriv->lock);
+ rtw_sitesurvey_cmd(padapter, &ssid, 1, NULL, 0);
+ spin_unlock_bh(&pmlmepriv->lock);
_func_exit_;
}
@@ -1833,13 +1829,12 @@ static void pre_tx_scan_timer_process(void *FunctionContext)
{
struct adapter *adapter = (struct adapter *)FunctionContext;
struct wifidirect_info *pwdinfo = &adapter->wdinfo;
- unsigned long irqL;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
return;
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ)) {
if (pwdinfo->tx_prov_disc_info.benable) { /* the provision discovery request frame is trigger to send or not */
@@ -1857,7 +1852,7 @@ static void pre_tx_scan_timer_process(void *FunctionContext)
DBG_88E("[%s] p2p_state is %d, ignore!!\n", __func__, rtw_p2p_state(pwdinfo));
}
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
}
static void find_phase_timer_process(void *FunctionContext)
@@ -1967,7 +1962,7 @@ void init_wifidirect_info(struct adapter *padapter, enum P2P_ROLE role)
rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_NONE);
- pwdinfo->listen_dwell = (u8) ((rtw_get_current_time() % 3) + 1);
+ pwdinfo->listen_dwell = (u8) ((jiffies % 3) + 1);
_rtw_memset(&pwdinfo->tx_prov_disc_info, 0x00, sizeof(struct tx_provdisc_req_info));
pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_NONE;
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index b45461fe20fe..b5db22cc81ed 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -122,7 +122,7 @@ static bool rtw_pwr_unassociated_idle(struct adapter *adapter)
bool ret = false;
- if (adapter->pwrctrlpriv.ips_deny_time >= rtw_get_current_time())
+ if (adapter->pwrctrlpriv.ips_deny_time >= jiffies)
goto exit;
if (check_fwstate(pmlmepriv, WIFI_ASOC_STATE|WIFI_SITE_MONITOR) ||
@@ -285,7 +285,7 @@ static u8 PS_RDY_CHECK(struct adapter *padapter)
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- curr_time = rtw_get_current_time();
+ curr_time = jiffies;
delta_time = curr_time - pwrpriv->DelayLPSLastTimeStamp;
if (delta_time < LPS_DELAY_TIME)
@@ -379,7 +379,7 @@ s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
s32 err = 0;
- start_time = rtw_get_current_time();
+ start_time = jiffies;
while (1) {
rtw_hal_get_hwreg(padapter, HW_VAR_FWLPS_RF_ON, &bAwake);
if (bAwake)
@@ -396,7 +396,7 @@ s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
DBG_88E("%s: Wait for FW LPS leave more than %u ms!!!\n", __func__, delay_ms);
break;
}
- rtw_usleep_os(100);
+ msleep(1);
}
return err;
@@ -522,17 +522,6 @@ _func_enter_;
_func_exit_;
}
-void rtw_free_pwrctrl_priv(struct adapter *adapter)
-{
- struct pwrctrl_priv *pwrctrlpriv = &adapter->pwrctrlpriv;
-
-_func_enter_;
-
- _free_pwrlock(&pwrctrlpriv->lock);
-
-_func_exit_;
-}
-
u8 rtw_interface_ps_func(struct adapter *padapter, enum hal_intf_ps_func efunc_id, u8 *val)
{
u8 bResult = true;
@@ -545,7 +534,7 @@ u8 rtw_interface_ps_func(struct adapter *padapter, enum hal_intf_ps_func efunc_i
inline void rtw_set_ips_deny(struct adapter *padapter, u32 ms)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- pwrpriv->ips_deny_time = rtw_get_current_time() + rtw_ms_to_systime(ms);
+ pwrpriv->ips_deny_time = jiffies + rtw_ms_to_systime(ms);
}
/*
@@ -561,15 +550,15 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int ret = _SUCCESS;
- if (pwrpriv->ips_deny_time < rtw_get_current_time() + rtw_ms_to_systime(ips_deffer_ms))
- pwrpriv->ips_deny_time = rtw_get_current_time() + rtw_ms_to_systime(ips_deffer_ms);
+ if (pwrpriv->ips_deny_time < jiffies + rtw_ms_to_systime(ips_deffer_ms))
+ pwrpriv->ips_deny_time = jiffies + rtw_ms_to_systime(ips_deffer_ms);
{
- u32 start = rtw_get_current_time();
+ u32 start = jiffies;
if (pwrpriv->ps_processing) {
DBG_88E("%s wait ps_processing...\n", __func__);
while (pwrpriv->ps_processing && rtw_get_passing_time_ms(start) <= 3000)
- rtw_msleep_os(10);
+ msleep(10);
if (pwrpriv->ps_processing)
DBG_88E("%s wait ps_processing timeout\n", __func__);
else
@@ -616,8 +605,8 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
}
exit:
- if (pwrpriv->ips_deny_time < rtw_get_current_time() + rtw_ms_to_systime(ips_deffer_ms))
- pwrpriv->ips_deny_time = rtw_get_current_time() + rtw_ms_to_systime(ips_deffer_ms);
+ if (pwrpriv->ips_deny_time < jiffies + rtw_ms_to_systime(ips_deffer_ms))
+ pwrpriv->ips_deny_time = jiffies + rtw_ms_to_systime(ips_deffer_ms);
return ret;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 9f0f30f7069a..c9c180649c12 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -49,7 +49,7 @@ _func_enter_;
_rtw_memset((u8 *)psta_recvpriv, 0, sizeof (struct sta_recv_priv));
- _rtw_spinlock_init(&psta_recvpriv->lock);
+ spin_lock_init(&psta_recvpriv->lock);
_rtw_init_queue(&psta_recvpriv->defrag_q);
@@ -65,7 +65,7 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
int res = _SUCCESS;
_func_enter_;
- _rtw_spinlock_init(&precvpriv->lock);
+ spin_lock_init(&precvpriv->lock);
_rtw_init_queue(&precvpriv->free_recv_queue);
_rtw_init_queue(&precvpriv->recv_pending_queue);
@@ -102,7 +102,7 @@ _func_enter_;
}
precvpriv->rx_pending_cnt = 1;
- _rtw_init_sema(&precvpriv->allrxreturnevt, 0);
+ sema_init(&precvpriv->allrxreturnevt, 0);
res = rtw_hal_init_recv_priv(padapter);
@@ -118,15 +118,6 @@ _func_exit_;
return res;
}
-static void rtw_mfree_recv_priv_lock(struct recv_priv *precvpriv)
-{
- _rtw_spinlock_free(&precvpriv->lock);
- _rtw_spinlock_free(&precvpriv->free_recv_queue.lock);
- _rtw_spinlock_free(&precvpriv->recv_pending_queue.lock);
-
- _rtw_spinlock_free(&precvpriv->free_recv_buf_queue.lock);
-}
-
void _rtw_free_recv_priv (struct recv_priv *precvpriv)
{
struct adapter *padapter = precvpriv->adapter;
@@ -135,8 +126,6 @@ _func_enter_;
rtw_free_uc_swdec_pending_queue(padapter);
- rtw_mfree_recv_priv_lock(precvpriv);
-
rtw_os_recv_resource_free(precvpriv);
if (precvpriv->pallocated_frame_buf) {
@@ -181,14 +170,13 @@ _func_exit_;
union recv_frame *rtw_alloc_recvframe (struct __queue *pfree_recv_queue)
{
- unsigned long irqL;
union recv_frame *precvframe;
- _enter_critical_bh(&pfree_recv_queue->lock, &irqL);
+ spin_lock_bh(&pfree_recv_queue->lock);
precvframe = _rtw_alloc_recvframe(pfree_recv_queue);
- _exit_critical_bh(&pfree_recv_queue->lock, &irqL);
+ spin_unlock_bh(&pfree_recv_queue->lock);
return precvframe;
}
@@ -203,7 +191,6 @@ void rtw_init_recvframe(union recv_frame *precvframe, struct recv_priv *precvpri
int rtw_free_recvframe(union recv_frame *precvframe, struct __queue *pfree_recv_queue)
{
- unsigned long irqL;
struct adapter *padapter;
struct recv_priv *precvpriv;
@@ -217,7 +204,7 @@ _func_enter_;
precvframe->u.hdr.pkt = NULL;
}
- _enter_critical_bh(&pfree_recv_queue->lock, &irqL);
+ spin_lock_bh(&pfree_recv_queue->lock);
rtw_list_delete(&(precvframe->u.hdr.list));
@@ -230,7 +217,7 @@ _func_enter_;
precvpriv->free_recvframe_cnt++;
}
- _exit_critical_bh(&pfree_recv_queue->lock, &irqL);
+ spin_unlock_bh(&pfree_recv_queue->lock);
_func_exit_;
@@ -260,11 +247,10 @@ _func_exit_;
int rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
{
int ret;
- unsigned long irqL;
- _enter_critical_bh(&queue->lock, &irqL);
+ spin_lock_bh(&queue->lock);
ret = _rtw_enqueue_recvframe(precvframe, queue);
- _exit_critical_bh(&queue->lock, &irqL);
+ spin_unlock_bh(&queue->lock);
return ret;
}
@@ -316,14 +302,12 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
int rtw_enqueue_recvbuf_to_head(struct recv_buf *precvbuf, struct __queue *queue)
{
- unsigned long irqL;
-
- _enter_critical_bh(&queue->lock, &irqL);
+ spin_lock_bh(&queue->lock);
rtw_list_delete(&precvbuf->list);
rtw_list_insert_head(&precvbuf->list, get_list_head(queue));
- _exit_critical_bh(&queue->lock, &irqL);
+ spin_unlock_bh(&queue->lock);
return _SUCCESS;
}
@@ -331,12 +315,12 @@ int rtw_enqueue_recvbuf_to_head(struct recv_buf *precvbuf, struct __queue *queue
int rtw_enqueue_recvbuf(struct recv_buf *precvbuf, struct __queue *queue)
{
unsigned long irqL;
- _enter_critical_ex(&queue->lock, &irqL);
+ spin_lock_irqsave(&queue->lock, irqL);
rtw_list_delete(&precvbuf->list);
rtw_list_insert_tail(&precvbuf->list, get_list_head(queue));
- _exit_critical_ex(&queue->lock, &irqL);
+ spin_unlock_irqrestore(&queue->lock, irqL);
return _SUCCESS;
}
@@ -346,7 +330,7 @@ struct recv_buf *rtw_dequeue_recvbuf (struct __queue *queue)
struct recv_buf *precvbuf;
struct list_head *plist, *phead;
- _enter_critical_ex(&queue->lock, &irqL);
+ spin_lock_irqsave(&queue->lock, irqL);
if (_rtw_queue_empty(queue)) {
precvbuf = NULL;
@@ -360,7 +344,7 @@ struct recv_buf *rtw_dequeue_recvbuf (struct __queue *queue)
rtw_list_delete(&precvbuf->list);
}
- _exit_critical_ex(&queue->lock, &irqL);
+ spin_unlock_irqrestore(&queue->lock, irqL);
return precvbuf;
}
@@ -1108,11 +1092,10 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
}
if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) {
- unsigned long irqL;
struct list_head *xmitframe_plist, *xmitframe_phead;
struct xmit_frame *pxmitframe = NULL;
- _enter_critical_bh(&psta->sleep_q.lock, &irqL);
+ spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
xmitframe_plist = get_next(xmitframe_phead);
@@ -1133,10 +1116,10 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
pxmitframe->attrib.triggered = 1;
- _exit_critical_bh(&psta->sleep_q.lock, &irqL);
+ spin_unlock_bh(&psta->sleep_q.lock);
if (rtw_hal_xmit(padapter, pxmitframe) == true)
rtw_os_xmit_complete(padapter, pxmitframe);
- _enter_critical_bh(&psta->sleep_q.lock, &irqL);
+ spin_lock_bh(&psta->sleep_q.lock);
if (psta->sleepq_len == 0) {
pstapriv->tim_bitmap &= ~BIT(psta->aid);
@@ -1165,7 +1148,7 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
}
}
- _exit_critical_bh(&psta->sleep_q.lock, &irqL);
+ spin_unlock_bh(&psta->sleep_q.lock);
}
}
@@ -1943,7 +1926,6 @@ static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reor
static int recv_indicatepkt_reorder(struct adapter *padapter, union recv_frame *prframe)
{
- unsigned long irql;
int retval = _SUCCESS;
struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
struct recv_reorder_ctrl *preorder_ctrl = prframe->u.hdr.preorder_ctrl;
@@ -1984,7 +1966,7 @@ static int recv_indicatepkt_reorder(struct adapter *padapter, union recv_frame *
}
}
- _enter_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ spin_lock_bh(&ppending_recvframe_queue->lock);
RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
("recv_indicatepkt_reorder: indicate=%d seq=%d\n",
@@ -1994,7 +1976,7 @@ static int recv_indicatepkt_reorder(struct adapter *padapter, union recv_frame *
if (!check_indicate_seq(preorder_ctrl, pattrib->seq_num)) {
rtw_recv_indicatepkt(padapter, prframe);
- _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
goto _success_exit;
}
@@ -2016,9 +1998,9 @@ static int recv_indicatepkt_reorder(struct adapter *padapter, union recv_frame *
/* recv_indicatepkts_in_order(padapter, preorder_ctrl, true); */
if (recv_indicatepkts_in_order(padapter, preorder_ctrl, false)) {
_set_timer(&preorder_ctrl->reordering_ctrl_timer, REORDER_WAIT_TIME);
- _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
} else {
- _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
_cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
}
@@ -2028,14 +2010,13 @@ _success_exit:
_err_exit:
- _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
return _FAIL;
}
void rtw_reordering_ctrl_timeout_handler(void *pcontext)
{
- unsigned long irql;
struct recv_reorder_ctrl *preorder_ctrl = (struct recv_reorder_ctrl *)pcontext;
struct adapter *padapter = preorder_ctrl->padapter;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
@@ -2043,12 +2024,12 @@ void rtw_reordering_ctrl_timeout_handler(void *pcontext)
if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
return;
- _enter_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ spin_lock_bh(&ppending_recvframe_queue->lock);
if (recv_indicatepkts_in_order(padapter, preorder_ctrl, true) == true)
_set_timer(&preorder_ctrl->reordering_ctrl_timer, REORDER_WAIT_TIME);
- _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
}
static int process_recv_indicatepkts(struct adapter *padapter, union recv_frame *prframe)
diff --git a/drivers/staging/rtl8188eu/core/rtw_sreset.c b/drivers/staging/rtl8188eu/core/rtw_sreset.c
index 298f75400c8f..ee20d4ad004f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sreset.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sreset.c
@@ -25,7 +25,7 @@ void sreset_init_value(struct adapter *padapter)
struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
struct sreset_priv *psrtpriv = &pHalData->srestpriv;
- _rtw_mutex_init(&psrtpriv->silentreset_mutex);
+ mutex_init(&psrtpriv->silentreset_mutex);
psrtpriv->silent_reset_inprogress = false;
psrtpriv->Wifi_Error_Status = WIFI_STATUS_SUCCESS;
psrtpriv->last_tx_time = 0;
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index cd3c9a7c3044..02e1e1f8b3ea 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -31,7 +31,7 @@ static void _rtw_init_stainfo(struct sta_info *psta)
_func_enter_;
_rtw_memset((u8 *)psta, 0, sizeof (struct sta_info));
- _rtw_spinlock_init(&psta->lock);
+ spin_lock_init(&psta->lock);
_rtw_init_listhead(&psta->list);
_rtw_init_listhead(&psta->hash_list);
_rtw_init_queue(&psta->sleep_q);
@@ -89,7 +89,7 @@ _func_enter_;
_rtw_init_queue(&pstapriv->free_sta_queue);
- _rtw_spinlock_init(&pstapriv->sta_hash_lock);
+ spin_lock_init(&pstapriv->sta_hash_lock);
pstapriv->asoc_sta_count = 0;
_rtw_init_queue(&pstapriv->sleep_q);
@@ -114,8 +114,8 @@ _func_enter_;
_rtw_init_listhead(&pstapriv->asoc_list);
_rtw_init_listhead(&pstapriv->auth_list);
- _rtw_spinlock_init(&pstapriv->asoc_list_lock);
- _rtw_spinlock_init(&pstapriv->auth_list_lock);
+ spin_lock_init(&pstapriv->asoc_list_lock);
+ spin_lock_init(&pstapriv->auth_list_lock);
pstapriv->asoc_list_cnt = 0;
pstapriv->auth_list_cnt = 0;
@@ -148,56 +148,15 @@ inline struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int
return (struct sta_info *)(stapriv->pstainfo_buf + offset * sizeof(struct sta_info));
}
-void _rtw_free_sta_xmit_priv_lock(struct sta_xmit_priv *psta_xmitpriv);
-void _rtw_free_sta_xmit_priv_lock(struct sta_xmit_priv *psta_xmitpriv)
-{
-_func_enter_;
-
- _rtw_spinlock_free(&psta_xmitpriv->lock);
-
- _rtw_spinlock_free(&(psta_xmitpriv->be_q.sta_pending.lock));
- _rtw_spinlock_free(&(psta_xmitpriv->bk_q.sta_pending.lock));
- _rtw_spinlock_free(&(psta_xmitpriv->vi_q.sta_pending.lock));
- _rtw_spinlock_free(&(psta_xmitpriv->vo_q.sta_pending.lock));
-_func_exit_;
-}
-
-static void _rtw_free_sta_recv_priv_lock(struct sta_recv_priv *psta_recvpriv)
-{
-_func_enter_;
-
- _rtw_spinlock_free(&psta_recvpriv->lock);
-
- _rtw_spinlock_free(&(psta_recvpriv->defrag_q.lock));
-
-_func_exit_;
-}
-
-void rtw_mfree_stainfo(struct sta_info *psta);
-void rtw_mfree_stainfo(struct sta_info *psta)
-{
-_func_enter_;
-
- if (&psta->lock != NULL)
- _rtw_spinlock_free(&psta->lock);
-
- _rtw_free_sta_xmit_priv_lock(&psta->sta_xmitpriv);
- _rtw_free_sta_recv_priv_lock(&psta->sta_recvpriv);
-
-_func_exit_;
-}
-
/* this function is used to free the memory of lock || sema for all stainfos */
-void rtw_mfree_all_stainfo(struct sta_priv *pstapriv);
-void rtw_mfree_all_stainfo(struct sta_priv *pstapriv)
+static void rtw_mfree_all_stainfo(struct sta_priv *pstapriv)
{
- unsigned long irql;
struct list_head *plist, *phead;
struct sta_info *psta = NULL;
_func_enter_;
- _enter_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
phead = get_list_head(&pstapriv->free_sta_queue);
plist = get_next(phead);
@@ -205,39 +164,20 @@ _func_enter_;
while ((rtw_end_of_queue_search(phead, plist)) == false) {
psta = LIST_CONTAINOR(plist, struct sta_info , list);
plist = get_next(plist);
-
- rtw_mfree_stainfo(psta);
}
- _exit_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
_func_exit_;
}
static void rtw_mfree_sta_priv_lock(struct sta_priv *pstapriv)
{
-#ifdef CONFIG_88EU_AP_MODE
- struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
-#endif
-
rtw_mfree_all_stainfo(pstapriv); /* be done before free sta_hash_lock */
-
- _rtw_spinlock_free(&pstapriv->free_sta_queue.lock);
-
- _rtw_spinlock_free(&pstapriv->sta_hash_lock);
- _rtw_spinlock_free(&pstapriv->wakeup_q.lock);
- _rtw_spinlock_free(&pstapriv->sleep_q.lock);
-
-#ifdef CONFIG_88EU_AP_MODE
- _rtw_spinlock_free(&pstapriv->asoc_list_lock);
- _rtw_spinlock_free(&pstapriv->auth_list_lock);
- _rtw_spinlock_free(&pacl_list->acl_node_q.lock);
-#endif
}
u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
{
- unsigned long irql;
struct list_head *phead, *plist;
struct sta_info *psta = NULL;
struct recv_reorder_ctrl *preorder_ctrl;
@@ -246,7 +186,7 @@ u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
_func_enter_;
if (pstapriv) {
/* delete all reordering_ctrl_timer */
- _enter_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
for (index = 0; index < NUM_STA; index++) {
phead = &(pstapriv->sta_hash[index]);
plist = get_next(phead);
@@ -262,7 +202,7 @@ _func_enter_;
}
}
}
- _exit_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
/*===============================*/
rtw_mfree_sta_priv_lock(pstapriv);
@@ -277,7 +217,6 @@ _func_exit_;
struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
- unsigned long irql, irql2;
s32 index;
struct list_head *phash_list;
struct sta_info *psta;
@@ -290,15 +229,15 @@ _func_enter_;
pfree_sta_queue = &pstapriv->free_sta_queue;
- _enter_critical_bh(&(pfree_sta_queue->lock), &irql);
+ spin_lock_bh(&(pfree_sta_queue->lock));
if (_rtw_queue_empty(pfree_sta_queue) == true) {
- _exit_critical_bh(&(pfree_sta_queue->lock), &irql);
+ spin_unlock_bh(&pfree_sta_queue->lock);
psta = NULL;
} else {
psta = LIST_CONTAINOR(get_next(&pfree_sta_queue->queue), struct sta_info, list);
rtw_list_delete(&(psta->list));
- _exit_critical_bh(&(pfree_sta_queue->lock), &irql);
+ spin_unlock_bh(&pfree_sta_queue->lock);
_rtw_init_stainfo(psta);
memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
index = wifi_mac_hash(hwaddr);
@@ -310,13 +249,13 @@ _func_enter_;
}
phash_list = &(pstapriv->sta_hash[index]);
- _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql2);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_list_insert_tail(&psta->hash_list, phash_list);
pstapriv->asoc_sta_count++;
- _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql2);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
/* Commented by Albert 2009/08/13 */
/* For the SMC router, the sequence number of first packet of WPS handshake will be 0. */
@@ -368,7 +307,6 @@ _func_exit_;
u32 rtw_free_stainfo(struct adapter *padapter , struct sta_info *psta)
{
int i;
- unsigned long irql0;
struct __queue *pfree_sta_queue;
struct recv_reorder_ctrl *preorder_ctrl;
struct sta_xmit_priv *pstaxmitpriv;
@@ -384,7 +322,7 @@ _func_enter_;
pstaxmitpriv = &psta->sta_xmitpriv;
- _enter_critical_bh(&pxmitpriv->lock, &irql0);
+ spin_lock_bh(&pxmitpriv->lock);
rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q);
psta->sleepq_len = 0;
@@ -405,7 +343,7 @@ _func_enter_;
rtw_list_delete(&(pstaxmitpriv->be_q.tx_pending));
- _exit_critical_bh(&pxmitpriv->lock, &irql0);
+ spin_unlock_bh(&pxmitpriv->lock);
rtw_list_delete(&psta->hash_list);
RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("\n free number_%d stainfo with hwaddr=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n", pstapriv->asoc_sta_count , psta->hwaddr[0], psta->hwaddr[1], psta->hwaddr[2], psta->hwaddr[3], psta->hwaddr[4], psta->hwaddr[5]));
@@ -419,7 +357,6 @@ _func_enter_;
/* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */
for (i = 0; i < 16; i++) {
- unsigned long irql;
struct list_head *phead, *plist;
union recv_frame *prframe;
struct __queue *ppending_recvframe_queue;
@@ -431,7 +368,7 @@ _func_enter_;
ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
- _enter_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ spin_lock_bh(&ppending_recvframe_queue->lock);
phead = get_list_head(ppending_recvframe_queue);
plist = get_next(phead);
@@ -446,7 +383,7 @@ _func_enter_;
rtw_free_recvframe(prframe, pfree_recv_queue);
}
- _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
}
if (!(psta->state & WIFI_AP_STATE))
@@ -454,12 +391,12 @@ _func_enter_;
#ifdef CONFIG_88EU_AP_MODE
- _enter_critical_bh(&pstapriv->auth_list_lock, &irql0);
+ spin_lock_bh(&pstapriv->auth_list_lock);
if (!rtw_is_list_empty(&psta->auth_list)) {
rtw_list_delete(&psta->auth_list);
pstapriv->auth_list_cnt--;
}
- _exit_critical_bh(&pstapriv->auth_list_lock, &irql0);
+ spin_unlock_bh(&pstapriv->auth_list_lock);
psta->expire_to = 0;
@@ -485,9 +422,9 @@ _func_enter_;
#endif /* CONFIG_88EU_AP_MODE */
- _enter_critical_bh(&(pfree_sta_queue->lock), &irql0);
+ spin_lock_bh(&(pfree_sta_queue->lock));
rtw_list_insert_tail(&psta->list, get_list_head(pfree_sta_queue));
- _exit_critical_bh(&(pfree_sta_queue->lock), &irql0);
+ spin_unlock_bh(&pfree_sta_queue->lock);
exit:
@@ -499,7 +436,6 @@ _func_exit_;
/* free all stainfo which in sta_hash[all] */
void rtw_free_all_stainfo(struct adapter *padapter)
{
- unsigned long irql;
struct list_head *plist, *phead;
s32 index;
struct sta_info *psta = NULL;
@@ -511,7 +447,7 @@ _func_enter_;
if (pstapriv->asoc_sta_count == 1)
goto exit;
- _enter_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
for (index = 0; index < NUM_STA; index++) {
phead = &(pstapriv->sta_hash[index]);
@@ -527,7 +463,7 @@ _func_enter_;
}
}
- _exit_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
exit:
@@ -537,7 +473,6 @@ _func_exit_;
/* any station allocated can be searched by hash list */
struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
- unsigned long irql;
struct list_head *plist, *phead;
struct sta_info *psta = NULL;
u32 index;
@@ -556,7 +491,7 @@ _func_enter_;
index = wifi_mac_hash(addr);
- _enter_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
phead = &(pstapriv->sta_hash[index]);
plist = get_next(phead);
@@ -572,7 +507,7 @@ _func_enter_;
plist = get_next(plist);
}
- _exit_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
_func_exit_;
return psta;
}
@@ -617,7 +552,6 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
{
u8 res = true;
#ifdef CONFIG_88EU_AP_MODE
- unsigned long irql;
struct list_head *plist, *phead;
struct rtw_wlan_acl_node *paclnode;
u8 match = false;
@@ -625,7 +559,7 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
- _enter_critical_bh(&(pacl_node_q->lock), &irql);
+ spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
plist = get_next(phead);
while ((!rtw_end_of_queue_search(phead, plist))) {
@@ -639,7 +573,7 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
}
}
}
- _exit_critical_bh(&(pacl_node_q->lock), &irql);
+ spin_unlock_bh(&pacl_node_q->lock);
if (pacl_list->mode == 1)/* accept unless in deny list */
res = (match) ? false : true;
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 153ec61493ab..96df62f95b6b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -912,12 +912,12 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
unsigned char *pbuf;
u32 wpa_ielen = 0;
u8 *pbssid = GetAddr3Ptr(pframe);
- u32 hidden_ssid = 0;
struct HT_info_element *pht_info = NULL;
struct rtw_ieee80211_ht_cap *pht_cap = NULL;
u32 bcn_channel;
unsigned short ht_cap_info;
unsigned char ht_info_infos_0;
+ int ssid_len;
if (is_client_associated_to_ap(Adapter) == false)
return true;
@@ -999,21 +999,15 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
/* checking SSID */
+ ssid_len = 0;
p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
- if (p == NULL) {
- DBG_88E("%s marc: cannot find SSID for survey event\n", __func__);
- hidden_ssid = true;
- } else {
- hidden_ssid = false;
- }
-
- if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) {
- memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
- bssid->Ssid.SsidLength = *(p + 1);
- } else {
- bssid->Ssid.SsidLength = 0;
- bssid->Ssid.Ssid[0] = '\0';
+ if (p) {
+ ssid_len = *(p + 1);
+ if (ssid_len > NDIS_802_11_LENGTH_SSID)
+ ssid_len = 0;
}
+ memcpy(bssid->Ssid.Ssid, (p + 2), ssid_len);
+ bssid->Ssid.SsidLength = ssid_len;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d "
"cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid,
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index a594e51d2e1c..24182fbc6a71 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -43,7 +43,7 @@ void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
{
_func_enter_;
_rtw_memset((unsigned char *)psta_xmitpriv, 0, sizeof (struct sta_xmit_priv));
- _rtw_spinlock_init(&psta_xmitpriv->lock);
+ spin_lock_init(&psta_xmitpriv->lock);
_init_txservq(&psta_xmitpriv->be_q);
_init_txservq(&psta_xmitpriv->bk_q);
_init_txservq(&psta_xmitpriv->vi_q);
@@ -67,9 +67,9 @@ _func_enter_;
/* We don't need to memset padapter->XXX to zero, because adapter is allocated by rtw_zvmalloc(). */
- _rtw_spinlock_init(&pxmitpriv->lock);
- _rtw_init_sema(&pxmitpriv->xmit_sema, 0);
- _rtw_init_sema(&pxmitpriv->terminate_xmitthread_sema, 0);
+ spin_lock_init(&pxmitpriv->lock);
+ sema_init(&pxmitpriv->xmit_sema, 0);
+ sema_init(&pxmitpriv->terminate_xmitthread_sema, 0);
/*
Please insert all the queue initializaiton using _rtw_init_queue below
@@ -153,7 +153,7 @@ _func_enter_;
/* Tx buf allocation may fail sometimes, so sleep and retry. */
res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
if (res == _FAIL) {
- rtw_msleep_os(10);
+ msleep(10);
res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
if (res == _FAIL) {
goto exit;
@@ -210,7 +210,7 @@ _func_enter_;
pxmitpriv->txirp_cnt = 1;
- _rtw_init_sema(&(pxmitpriv->tx_retevt), 0);
+ sema_init(&(pxmitpriv->tx_retevt), 0);
/* per AC pending irp */
pxmitpriv->beq_cnt = 0;
@@ -219,7 +219,7 @@ _func_enter_;
pxmitpriv->voq_cnt = 0;
pxmitpriv->ack_tx = false;
- _rtw_mutex_init(&pxmitpriv->ack_tx_mutex);
+ mutex_init(&pxmitpriv->ack_tx_mutex);
rtw_sctx_init(&pxmitpriv->ack_tx_ops, 0);
rtw_hal_init_xmit_priv(padapter);
@@ -231,23 +231,6 @@ _func_exit_;
return res;
}
-static void rtw_mfree_xmit_priv_lock (struct xmit_priv *pxmitpriv)
-{
- _rtw_spinlock_free(&pxmitpriv->lock);
- _rtw_free_sema(&pxmitpriv->xmit_sema);
- _rtw_free_sema(&pxmitpriv->terminate_xmitthread_sema);
-
- _rtw_spinlock_free(&pxmitpriv->be_pending.lock);
- _rtw_spinlock_free(&pxmitpriv->bk_pending.lock);
- _rtw_spinlock_free(&pxmitpriv->vi_pending.lock);
- _rtw_spinlock_free(&pxmitpriv->vo_pending.lock);
- _rtw_spinlock_free(&pxmitpriv->bm_pending.lock);
-
- _rtw_spinlock_free(&pxmitpriv->free_xmit_queue.lock);
- _rtw_spinlock_free(&pxmitpriv->free_xmitbuf_queue.lock);
- _rtw_spinlock_free(&pxmitpriv->pending_xmitbuf_queue.lock);
-}
-
void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv)
{
int i;
@@ -261,8 +244,6 @@ void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv)
rtw_hal_free_xmit_priv(padapter);
- rtw_mfree_xmit_priv_lock(pxmitpriv);
-
if (pxmitpriv->pxmit_frame_buf == NULL)
goto out;
@@ -284,8 +265,6 @@ void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv)
rtw_vmfree(pxmitpriv->pallocated_xmitbuf, NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
/* free xmit extension buff */
- _rtw_spinlock_free(&pxmitpriv->free_xmit_extbuf_queue.lock);
-
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
for (i = 0; i < num_xmit_extbuf; i++) {
rtw_os_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ));
@@ -298,7 +277,7 @@ void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv)
rtw_free_hwxmits(padapter);
- _rtw_mutex_free(&pxmitpriv->ack_tx_mutex);
+ mutex_destroy(&pxmitpriv->ack_tx_mutex);
out:
@@ -685,7 +664,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
_func_enter_;
- hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);;
+ hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
if (pattrib->encrypt == _TKIP_) {/* if (psecuritypriv->dot11PrivacyAlgrthm == _TKIP_PRIVACY_) */
/* encode mic code */
@@ -704,7 +683,7 @@ _func_enter_;
} else {
if (_rtw_memcmp(&stainfo->dot11tkiptxmickey.skey[0], null_key, 16) == true) {
/* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey == 0\n"); */
- /* rtw_msleep_os(10); */
+ /* msleep(10); */
return _FAIL;
}
/* start to calculate the mic code */
@@ -827,7 +806,7 @@ s32 rtw_make_wlanhdr (struct adapter *padapter , u8 *hdr, struct pkt_attrib *pat
u8 qos_option = false;
int res = _SUCCESS;
- u16 *fctrl = &pwlanhdr->frame_ctl;
+ __le16 *fctrl = &pwlanhdr->frame_ctl;
struct sta_info *psta;
@@ -1273,7 +1252,7 @@ struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
_func_enter_;
- _enter_critical(&pfree_queue->lock, &irql);
+ spin_lock_irqsave(&pfree_queue->lock, irql);
if (_rtw_queue_empty(pfree_queue) == true) {
pxmitbuf = NULL;
@@ -1299,7 +1278,7 @@ _func_enter_;
}
}
- _exit_critical(&pfree_queue->lock, &irql);
+ spin_unlock_irqrestore(&pfree_queue->lock, irql);
_func_exit_;
@@ -1316,14 +1295,14 @@ _func_enter_;
if (pxmitbuf == NULL)
return _FAIL;
- _enter_critical(&pfree_queue->lock, &irql);
+ spin_lock_irqsave(&pfree_queue->lock, irql);
rtw_list_delete(&pxmitbuf->list);
rtw_list_insert_tail(&(pxmitbuf->list), get_list_head(pfree_queue));
pxmitpriv->free_xmit_extbuf_cnt++;
- _exit_critical(&pfree_queue->lock, &irql);
+ spin_unlock_irqrestore(&pfree_queue->lock, irql);
_func_exit_;
@@ -1341,7 +1320,7 @@ _func_enter_;
/* DBG_88E("+rtw_alloc_xmitbuf\n"); */
- _enter_critical(&pfree_xmitbuf_queue->lock, &irql);
+ spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irql);
if (_rtw_queue_empty(pfree_xmitbuf_queue) == true) {
pxmitbuf = NULL;
@@ -1363,7 +1342,7 @@ _func_enter_;
rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
}
}
- _exit_critical(&pfree_xmitbuf_queue->lock, &irql);
+ spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irql);
_func_exit_;
@@ -1387,14 +1366,14 @@ _func_enter_;
if (pxmitbuf->ext_tag) {
rtw_free_xmitbuf_ext(pxmitpriv, pxmitbuf);
} else {
- _enter_critical(&pfree_xmitbuf_queue->lock, &irql);
+ spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irql);
rtw_list_delete(&pxmitbuf->list);
rtw_list_insert_tail(&(pxmitbuf->list), get_list_head(pfree_xmitbuf_queue));
pxmitpriv->free_xmitbuf_cnt++;
- _exit_critical(&pfree_xmitbuf_queue->lock, &irql);
+ spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irql);
}
_func_exit_;
@@ -1422,14 +1401,13 @@ struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pf
pfree_xmit_queue
*/
- unsigned long irql;
struct xmit_frame *pxframe = NULL;
struct list_head *plist, *phead;
struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
_func_enter_;
- _enter_critical_bh(&pfree_xmit_queue->lock, &irql);
+ spin_lock_bh(&pfree_xmit_queue->lock);
if (_rtw_queue_empty(pfree_xmit_queue) == true) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe:%d\n", pxmitpriv->free_xmitframe_cnt));
@@ -1464,7 +1442,7 @@ _func_enter_;
pxframe->ack_report = 0;
}
- _exit_critical_bh(&pfree_xmit_queue->lock, &irql);
+ spin_unlock_bh(&pfree_xmit_queue->lock);
_func_exit_;
@@ -1473,7 +1451,6 @@ _func_exit_;
s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitframe)
{
- unsigned long irql;
struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
struct adapter *padapter = pxmitpriv->adapter;
struct sk_buff *pndis_pkt = NULL;
@@ -1485,7 +1462,7 @@ _func_enter_;
goto exit;
}
- _enter_critical_bh(&pfree_xmit_queue->lock, &irql);
+ spin_lock_bh(&pfree_xmit_queue->lock);
rtw_list_delete(&pxmitframe->list);
@@ -1499,7 +1476,7 @@ _func_enter_;
pxmitpriv->free_xmitframe_cnt++;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_debug_, ("rtw_free_xmitframe():free_xmitframe_cnt=%d\n", pxmitpriv->free_xmitframe_cnt));
- _exit_critical_bh(&pfree_xmit_queue->lock, &irql);
+ spin_unlock_bh(&pfree_xmit_queue->lock);
if (pndis_pkt)
rtw_os_pkt_complete(padapter, pndis_pkt);
@@ -1513,13 +1490,12 @@ _func_exit_;
void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pframequeue)
{
- unsigned long irql;
struct list_head *plist, *phead;
struct xmit_frame *pxmitframe;
_func_enter_;
- _enter_critical_bh(&(pframequeue->lock), &irql);
+ spin_lock_bh(&(pframequeue->lock));
phead = get_list_head(pframequeue);
plist = get_next(phead);
@@ -1531,7 +1507,7 @@ _func_enter_;
rtw_free_xmitframe(pxmitpriv, pxmitframe);
}
- _exit_critical_bh(&(pframequeue->lock), &irql);
+ spin_unlock_bh(&(pframequeue->lock));
_func_exit_;
}
@@ -1570,7 +1546,6 @@ static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv, str
struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit_i, int entry)
{
- unsigned long irql0;
struct list_head *sta_plist, *sta_phead;
struct hw_xmit *phwxmit;
struct tx_servq *ptxservq = NULL;
@@ -1591,7 +1566,7 @@ _func_enter_;
inx[j] = pxmitpriv->wmm_para_seq[j];
}
- _enter_critical_bh(&pxmitpriv->lock, &irql0);
+ spin_lock_bh(&pxmitpriv->lock);
for (i = 0; i < entry; i++) {
phwxmit = phwxmit_i + inx[i];
@@ -1619,7 +1594,7 @@ _func_enter_;
}
}
exit:
- _exit_critical_bh(&pxmitpriv->lock, &irql0);
+ spin_unlock_bh(&pxmitpriv->lock);
_func_exit_;
return pxmitframe;
}
@@ -1668,7 +1643,6 @@ _func_exit_;
*/
s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
{
- /* unsigned long irql0; */
u8 ac_index;
struct sta_info *psta;
struct tx_servq *ptxservq;
@@ -1754,7 +1728,6 @@ _func_exit_;
static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
- unsigned long irql;
int res, is_vlan_tag = 0, i, do_nat25 = 1;
unsigned short vlan_hdr = 0;
void *br_port = NULL;
@@ -1762,7 +1735,7 @@ static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
rcu_read_lock();
br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
rcu_read_unlock();
- _enter_critical_bh(&padapter->br_ext_lock, &irql);
+ spin_lock_bh(&padapter->br_ext_lock);
if (!(skb->data[0] & 1) && br_port &&
memcmp(skb->data+MACADDRLEN, padapter->br_mac, MACADDRLEN) &&
*((__be16 *)(skb->data+MACADDRLEN*2)) != __constant_htons(ETH_P_8021Q) &&
@@ -1770,7 +1743,7 @@ static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
!memcmp(padapter->scdb_mac, skb->data+MACADDRLEN, MACADDRLEN) && padapter->scdb_entry) {
memcpy(skb->data+MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN);
padapter->scdb_entry->ageing_timer = jiffies;
- _exit_critical_bh(&padapter->br_ext_lock, &irql);
+ spin_unlock_bh(&padapter->br_ext_lock);
} else {
if (*((__be16 *)(skb->data+MACADDRLEN*2)) == __constant_htons(ETH_P_8021Q)) {
is_vlan_tag = 1;
@@ -1803,7 +1776,7 @@ static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
}
}
}
- _exit_critical_bh(&padapter->br_ext_lock, &irql);
+ spin_unlock_bh(&padapter->br_ext_lock);
if (do_nat25) {
if (nat25_db_handle(padapter, skb, NAT25_CHECK) == 0) {
struct sk_buff *newskb;
@@ -1930,9 +1903,6 @@ static void do_queue_select(struct adapter *padapter, struct pkt_attrib *pattrib
*/
s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
{
-#ifdef CONFIG_88EU_AP_MODE
- unsigned long irql0;
-#endif
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct xmit_frame *pxmitframe = NULL;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1972,12 +1942,12 @@ s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
do_queue_select(padapter, &pxmitframe->attrib);
#ifdef CONFIG_88EU_AP_MODE
- _enter_critical_bh(&pxmitpriv->lock, &irql0);
+ spin_lock_bh(&pxmitpriv->lock);
if (xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe)) {
- _exit_critical_bh(&pxmitpriv->lock, &irql0);
+ spin_unlock_bh(&pxmitpriv->lock);
return 1;
}
- _exit_critical_bh(&pxmitpriv->lock, &irql0);
+ spin_unlock_bh(&pxmitpriv->lock);
#endif
if (rtw_hal_xmit(padapter, pxmitframe) == false)
@@ -1990,7 +1960,6 @@ s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_frame *pxmitframe)
{
- unsigned long irql;
int ret = false;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -2016,7 +1985,7 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra
}
if (bmcst) {
- _enter_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_lock_bh(&psta->sleep_q.lock);
if (pstapriv->sta_dz_bitmap) {/* if any one sta is in ps mode */
rtw_list_delete(&pxmitframe->list);
@@ -2033,12 +2002,12 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra
ret = true;
}
- _exit_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_unlock_bh(&psta->sleep_q.lock);
return ret;
}
- _enter_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_lock_bh(&psta->sleep_q.lock);
if (psta->state&WIFI_SLEEP_STATE) {
u8 wmmps_ac = 0;
@@ -2086,7 +2055,7 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra
}
}
- _exit_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_unlock_bh(&psta->sleep_q.lock);
return ret;
}
@@ -2121,7 +2090,6 @@ static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struc
void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta)
{
- unsigned long irql0;
struct sta_info *psta_bmc;
struct sta_xmit_priv *pstaxmitpriv;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -2132,7 +2100,7 @@ void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta)
/* for BC/MC Frames */
psta_bmc = rtw_get_bcmc_stainfo(padapter);
- _enter_critical_bh(&pxmitpriv->lock, &irql0);
+ spin_lock_bh(&pxmitpriv->lock);
psta->state |= WIFI_SLEEP_STATE;
@@ -2155,19 +2123,18 @@ void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta)
dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->be_q.sta_pending);
rtw_list_delete(&(pstaxmitpriv->be_q.tx_pending));
- _exit_critical_bh(&pxmitpriv->lock, &irql0);
+ spin_unlock_bh(&pxmitpriv->lock);
}
void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
{
- unsigned long irql;
u8 update_mask = 0, wmmps_ac = 0;
struct sta_info *psta_bmc;
struct list_head *xmitframe_plist, *xmitframe_phead;
struct xmit_frame *pxmitframe = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
- _enter_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
xmitframe_plist = get_next(xmitframe_phead);
@@ -2218,10 +2185,10 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
pxmitframe->attrib.triggered = 1;
- _exit_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_unlock_bh(&psta->sleep_q.lock);
if (rtw_hal_xmit(padapter, pxmitframe))
rtw_os_xmit_complete(padapter, pxmitframe);
- _enter_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_lock_bh(&psta->sleep_q.lock);
}
if (psta->sleepq_len == 0) {
@@ -2240,7 +2207,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
pstapriv->sta_dz_bitmap &= ~BIT(psta->aid);
}
- _exit_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_unlock_bh(&psta->sleep_q.lock);
/* for BC/MC Frames */
psta_bmc = rtw_get_bcmc_stainfo(padapter);
@@ -2248,7 +2215,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
return;
if ((pstapriv->sta_dz_bitmap&0xfffe) == 0x0) { /* no any sta in ps mode */
- _enter_critical_bh(&psta_bmc->sleep_q.lock, &irql);
+ spin_lock_bh(&psta_bmc->sleep_q.lock);
xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
xmitframe_plist = get_next(xmitframe_phead);
@@ -2268,10 +2235,10 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
pxmitframe->attrib.triggered = 1;
- _exit_critical_bh(&psta_bmc->sleep_q.lock, &irql);
+ spin_unlock_bh(&psta_bmc->sleep_q.lock);
if (rtw_hal_xmit(padapter, pxmitframe))
rtw_os_xmit_complete(padapter, pxmitframe);
- _enter_critical_bh(&psta_bmc->sleep_q.lock, &irql);
+ spin_lock_bh(&psta_bmc->sleep_q.lock);
}
if (psta_bmc->sleepq_len == 0) {
@@ -2281,7 +2248,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
update_mask |= BIT(1);
}
- _exit_critical_bh(&psta_bmc->sleep_q.lock, &irql);
+ spin_unlock_bh(&psta_bmc->sleep_q.lock);
}
if (update_mask)
@@ -2290,13 +2257,12 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta)
{
- unsigned long irql;
u8 wmmps_ac = 0;
struct list_head *xmitframe_plist, *xmitframe_phead;
struct xmit_frame *pxmitframe = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
- _enter_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
xmitframe_plist = get_next(xmitframe_phead);
@@ -2355,7 +2321,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
}
}
- _exit_critical_bh(&psta->sleep_q.lock, &irql);
+ spin_unlock_bh(&psta->sleep_q.lock);
}
#endif
@@ -2363,7 +2329,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms)
{
sctx->timeout_ms = timeout_ms;
- sctx->submit_time = rtw_get_current_time();
+ sctx->submit_time = jiffies;
init_completion(&sctx->done);
sctx->status = RTW_SCTX_SUBMITTED;
}
@@ -2424,7 +2390,7 @@ int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms)
{
struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
- pack_tx_ops->submit_time = rtw_get_current_time();
+ pack_tx_ops->submit_time = jiffies;
pack_tx_ops->timeout_ms = timeout_ms;
pack_tx_ops->status = RTW_SCTX_SUBMITTED;
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
index aaa261771ab9..3df33bc7197a 100644
--- a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
+++ b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
@@ -529,9 +529,7 @@ ODM_RASupport_Init(
{
ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("=====>ODM_RASupport_Init()\n"));
- /* 2012/02/14 MH Be noticed, the init must be after IC type is recognized!!!!! */
- if (dm_odm->SupportICType == ODM_RTL8188E)
- dm_odm->RaSupport88E = true;
+ dm_odm->RaSupport88E = true;
}
int ODM_RAInfo_Init(struct odm_dm_struct *dm_odm, u8 macid)
diff --git a/drivers/staging/rtl8188eu/hal/HalHWImg8188E_RF.c b/drivers/staging/rtl8188eu/hal/HalHWImg8188E_RF.c
index 480c810c4468..17c6411ce8ac 100644
--- a/drivers/staging/rtl8188eu/hal/HalHWImg8188E_RF.c
+++ b/drivers/staging/rtl8188eu/hal/HalHWImg8188E_RF.c
@@ -211,7 +211,7 @@ enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
else if (v1 == 0xf9)
rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 1);
else
- rtw_IOL_append_WRF_cmd(pxmit_frame, ODM_RF_PATH_A, (u16)v1, v2, bRFRegOffsetMask);
+ rtw_IOL_append_WRF_cmd(pxmit_frame, RF_PATH_A, (u16)v1, v2, bRFRegOffsetMask);
} else {
odm_ConfigRF_RadioA_8188E(pDM_Odm, v1, v2);
}
@@ -247,7 +247,7 @@ enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
else if (v1 == 0xf9)
rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 1);
else
- rtw_IOL_append_WRF_cmd(pxmit_frame, ODM_RF_PATH_A, (u16)v1, v2, bRFRegOffsetMask);
+ rtw_IOL_append_WRF_cmd(pxmit_frame, RF_PATH_A, (u16)v1, v2, bRFRegOffsetMask);
} else {
odm_ConfigRF_RadioA_8188E(pDM_Odm, v1, v2);
}
diff --git a/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c b/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
index 8a7947d8de7f..15e8e3f62198 100644
--- a/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
+++ b/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
@@ -173,7 +173,7 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
("===>dm_TXPowerTrackingCallback_ThermalMeter_8188E txpowercontrol %d\n",
dm_odm->RFCalibrateInfo.TxPowerTrackControl));
- ThermalValue = (u8)ODM_GetRFReg(dm_odm, RF_PATH_A, RF_T_METER_88E, 0xfc00); /* 0x42: RF Reg[15:10] 88E */
+ ThermalValue = (u8)PHY_QueryRFReg(Adapter, RF_PATH_A, RF_T_METER_88E, 0xfc00); /* 0x42: RF Reg[15:10] 88E */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("Readback Thermal Meter = 0x%x pre thermal meter 0x%x EEPROMthermalmeter 0x%x\n",
@@ -186,7 +186,7 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
if (ThermalValue) {
/* Query OFDM path A default setting */
- ele_D = ODM_GetBBReg(dm_odm, rOFDM0_XATxIQImbalance, bMaskDWord)&bMaskOFDM_D;
+ ele_D = PHY_QueryBBReg(Adapter, rOFDM0_XATxIQImbalance, bMaskDWord)&bMaskOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { /* find the index */
if (ele_D == (OFDMSwingTable[i]&bMaskOFDM_D)) {
OFDM_index_old[0] = (u8)i;
@@ -200,7 +200,7 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
/* Query OFDM path B default setting */
if (is2t) {
- ele_D = ODM_GetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord)&bMaskOFDM_D;
+ ele_D = PHY_QueryBBReg(Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord)&bMaskOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { /* find the index */
if (ele_D == (OFDMSwingTable[i]&bMaskOFDM_D)) {
OFDM_index_old[1] = (u8)i;
@@ -428,17 +428,17 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
/* wtite new elements A, C, D to regC88 and regC9C, element B is always 0 */
value32 = (ele_D<<22) | ((ele_C&0x3F)<<16) | ele_A;
- ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord, value32);
+ PHY_SetBBReg(Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord, value32);
value32 = (ele_C&0x000003C0)>>6;
- ODM_SetBBReg(dm_odm, rOFDM0_XDTxAFE, bMaskH4Bits, value32);
+ PHY_SetBBReg(Adapter, rOFDM0_XDTxAFE, bMaskH4Bits, value32);
value32 = ((X * ele_D)>>7)&0x01;
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT28, value32);
+ PHY_SetBBReg(Adapter, rOFDM0_ECCAThreshold, BIT28, value32);
} else {
- ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord, OFDMSwingTable[(u8)OFDM_index[1]]);
- ODM_SetBBReg(dm_odm, rOFDM0_XDTxAFE, bMaskH4Bits, 0x00);
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT28, 0x00);
+ PHY_SetBBReg(Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord, OFDMSwingTable[(u8)OFDM_index[1]]);
+ PHY_SetBBReg(Adapter, rOFDM0_XDTxAFE, bMaskH4Bits, 0x00);
+ PHY_SetBBReg(Adapter, rOFDM0_ECCAThreshold, BIT28, 0x00);
}
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
@@ -449,8 +449,8 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
- ODM_GetBBReg(dm_odm, 0xc80, bMaskDWord), ODM_GetBBReg(dm_odm,
- 0xc94, bMaskDWord), ODM_GetRFReg(dm_odm, RF_PATH_A, 0x24, bRFRegOffsetMask)));
+ PHY_QueryBBReg(Adapter, 0xc80, bMaskDWord), PHY_QueryBBReg(Adapter,
+ 0xc94, bMaskDWord), PHY_QueryRFReg(Adapter, RF_PATH_A, 0x24, bRFRegOffsetMask)));
}
}
@@ -485,33 +485,33 @@ phy_PathA_IQK_8188E(struct adapter *adapt, bool configPathB)
/* 1 Tx IQK */
/* path-A IQK setting */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A IQK setting!\n"));
- ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
- ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
- ODM_SetBBReg(dm_odm, rTx_IQK_PI_A, bMaskDWord, 0x8214032a);
- ODM_SetBBReg(dm_odm, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
+ PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
+ PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
+ PHY_SetBBReg(adapt, rTx_IQK_PI_A, bMaskDWord, 0x8214032a);
+ PHY_SetBBReg(adapt, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
/* LO calibration setting */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n"));
- ODM_SetBBReg(dm_odm, rIQK_AGC_Rsp, bMaskDWord, 0x00462911);
+ PHY_SetBBReg(adapt, rIQK_AGC_Rsp, bMaskDWord, 0x00462911);
/* One shot, path A LOK & IQK */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n"));
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+ PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
/* delay x ms */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_88E));
/* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
- ODM_delay_ms(IQK_DELAY_TIME_88E);
+ mdelay(IQK_DELAY_TIME_88E);
/* Check failed */
- regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regeac = PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regeac));
- regE94 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord);
+ regE94 = PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x\n", regE94));
- regE9C = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord);
+ regE9C = PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe9c = 0x%x\n", regE9C));
- regEA4 = ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_A_2, bMaskDWord);
+ regEA4 = PHY_QueryBBReg(adapt, rRx_Power_Before_IQK_A_2, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea4 = 0x%x\n", regEA4));
if (!(regeac & BIT28) &&
@@ -533,51 +533,51 @@ phy_PathA_RxIQK(struct adapter *adapt, bool configPathB)
/* 1 Get TXIMR setting */
/* modify RXIQK mode table */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A Rx IQK modify RXIQK mode table!\n"));
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf117B);
+ PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf117B);
/* PA,PAD off */
- ODM_SetRFReg(dm_odm, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x980);
- ODM_SetRFReg(dm_odm, RF_PATH_A, 0x56, bRFRegOffsetMask, 0x51000);
+ PHY_SetRFReg(adapt, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x980);
+ PHY_SetRFReg(adapt, RF_PATH_A, 0x56, bRFRegOffsetMask, 0x51000);
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x80800000);
/* IQK setting */
- ODM_SetBBReg(dm_odm, rTx_IQK, bMaskDWord, 0x01007c00);
- ODM_SetBBReg(dm_odm, rRx_IQK, bMaskDWord, 0x81004800);
+ PHY_SetBBReg(adapt, rTx_IQK, bMaskDWord, 0x01007c00);
+ PHY_SetBBReg(adapt, rRx_IQK, bMaskDWord, 0x81004800);
/* path-A IQK setting */
- ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
- ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
- ODM_SetBBReg(dm_odm, rTx_IQK_PI_A, bMaskDWord, 0x82160c1f);
- ODM_SetBBReg(dm_odm, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
+ PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
+ PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
+ PHY_SetBBReg(adapt, rTx_IQK_PI_A, bMaskDWord, 0x82160c1f);
+ PHY_SetBBReg(adapt, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
/* LO calibration setting */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n"));
- ODM_SetBBReg(dm_odm, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
+ PHY_SetBBReg(adapt, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
/* One shot, path A LOK & IQK */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n"));
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+ PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
/* delay x ms */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("Delay %d ms for One shot, path A LOK & IQK.\n",
IQK_DELAY_TIME_88E));
- ODM_delay_ms(IQK_DELAY_TIME_88E);
+ mdelay(IQK_DELAY_TIME_88E);
/* Check failed */
- regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regeac = PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("0xeac = 0x%x\n", regeac));
- regE94 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord);
+ regE94 = PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("0xe94 = 0x%x\n", regE94));
- regE9C = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord);
+ regE9C = PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("0xe9c = 0x%x\n", regE9C));
@@ -589,55 +589,55 @@ phy_PathA_RxIQK(struct adapter *adapt, bool configPathB)
return result;
u4tmp = 0x80007C00 | (regE94&0x3FF0000) | ((regE9C&0x3FF0000) >> 16);
- ODM_SetBBReg(dm_odm, rTx_IQK, bMaskDWord, u4tmp);
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe40 = 0x%x u4tmp = 0x%x\n", ODM_GetBBReg(dm_odm, rTx_IQK, bMaskDWord), u4tmp));
+ PHY_SetBBReg(adapt, rTx_IQK, bMaskDWord, u4tmp);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe40 = 0x%x u4tmp = 0x%x\n", PHY_QueryBBReg(adapt, rTx_IQK, bMaskDWord), u4tmp));
/* 1 RX IQK */
/* modify RXIQK mode table */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A Rx IQK modify RXIQK mode table 2!\n"));
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf7ffa);
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf7ffa);
+ PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x80800000);
/* IQK setting */
- ODM_SetBBReg(dm_odm, rRx_IQK, bMaskDWord, 0x01004800);
+ PHY_SetBBReg(adapt, rRx_IQK, bMaskDWord, 0x01004800);
/* path-A IQK setting */
- ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
- ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
- ODM_SetBBReg(dm_odm, rTx_IQK_PI_A, bMaskDWord, 0x82160c05);
- ODM_SetBBReg(dm_odm, rRx_IQK_PI_A, bMaskDWord, 0x28160c1f);
+ PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
+ PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
+ PHY_SetBBReg(adapt, rTx_IQK_PI_A, bMaskDWord, 0x82160c05);
+ PHY_SetBBReg(adapt, rRx_IQK_PI_A, bMaskDWord, 0x28160c1f);
/* LO calibration setting */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n"));
- ODM_SetBBReg(dm_odm, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
+ PHY_SetBBReg(adapt, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
/* One shot, path A LOK & IQK */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n"));
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+ PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
/* delay x ms */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_88E));
/* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
- ODM_delay_ms(IQK_DELAY_TIME_88E);
+ mdelay(IQK_DELAY_TIME_88E);
/* Check failed */
- regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regeac = PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regeac));
- regE94 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord);
+ regE94 = PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x\n", regE94));
- regE9C = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord);
+ regE9C = PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe9c = 0x%x\n", regE9C));
- regEA4 = ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_A_2, bMaskDWord);
+ regEA4 = PHY_QueryBBReg(adapt, rRx_Power_Before_IQK_A_2, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea4 = 0x%x\n", regEA4));
/* reload RF 0xdf */
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x180);
+ PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ PHY_SetRFReg(adapt, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x180);
if (!(regeac & BIT27) && /* if Tx is OK, check whether Rx is OK */
(((regEA4 & 0x03FF0000)>>16) != 0x132) &&
@@ -660,29 +660,29 @@ phy_PathB_IQK_8188E(struct adapter *adapt)
/* One shot, path B LOK & IQK */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n"));
- ODM_SetBBReg(dm_odm, rIQK_AGC_Cont, bMaskDWord, 0x00000002);
- ODM_SetBBReg(dm_odm, rIQK_AGC_Cont, bMaskDWord, 0x00000000);
+ PHY_SetBBReg(adapt, rIQK_AGC_Cont, bMaskDWord, 0x00000002);
+ PHY_SetBBReg(adapt, rIQK_AGC_Cont, bMaskDWord, 0x00000000);
/* delay x ms */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("Delay %d ms for One shot, path B LOK & IQK.\n",
IQK_DELAY_TIME_88E));
- ODM_delay_ms(IQK_DELAY_TIME_88E);
+ mdelay(IQK_DELAY_TIME_88E);
/* Check failed */
- regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regeac = PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("0xeac = 0x%x\n", regeac));
- regeb4 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_B, bMaskDWord);
+ regeb4 = PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_B, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("0xeb4 = 0x%x\n", regeb4));
- regebc = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_B, bMaskDWord);
+ regebc = PHY_QueryBBReg(adapt, rTx_Power_After_IQK_B, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("0xebc = 0x%x\n", regebc));
- regec4 = ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_B_2, bMaskDWord);
+ regec4 = PHY_QueryBBReg(adapt, rRx_Power_Before_IQK_B_2, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("0xec4 = 0x%x\n", regec4));
- regecc = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_B_2, bMaskDWord);
+ regecc = PHY_QueryBBReg(adapt, rRx_Power_After_IQK_B_2, bMaskDWord);
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("0xecc = 0x%x\n", regecc));
@@ -715,7 +715,7 @@ static void patha_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u
if (final_candidate == 0xFF) {
return;
} else if (iqkok) {
- Oldval_0 = (ODM_GetBBReg(dm_odm, rOFDM0_XATxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
+ Oldval_0 = (PHY_QueryBBReg(adapt, rOFDM0_XATxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
X = result[final_candidate][0];
if ((X & 0x00000200) != 0)
@@ -724,9 +724,9 @@ static void patha_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("X = 0x%x, TX0_A = 0x%x, Oldval_0 0x%x\n",
X, TX0_A, Oldval_0));
- ODM_SetBBReg(dm_odm, rOFDM0_XATxIQImbalance, 0x3FF, TX0_A);
+ PHY_SetBBReg(adapt, rOFDM0_XATxIQImbalance, 0x3FF, TX0_A);
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(31), ((X * Oldval_0>>7) & 0x1));
+ PHY_SetBBReg(adapt, rOFDM0_ECCAThreshold, BIT(31), ((X * Oldval_0>>7) & 0x1));
Y = result[final_candidate][1];
if ((Y & 0x00000200) != 0)
@@ -734,10 +734,10 @@ static void patha_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u
TX0_C = (Y * Oldval_0) >> 8;
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Y = 0x%x, TX = 0x%x\n", Y, TX0_C));
- ODM_SetBBReg(dm_odm, rOFDM0_XCTxAFE, 0xF0000000, ((TX0_C&0x3C0)>>6));
- ODM_SetBBReg(dm_odm, rOFDM0_XATxIQImbalance, 0x003F0000, (TX0_C&0x3F));
+ PHY_SetBBReg(adapt, rOFDM0_XCTxAFE, 0xF0000000, ((TX0_C&0x3C0)>>6));
+ PHY_SetBBReg(adapt, rOFDM0_XATxIQImbalance, 0x003F0000, (TX0_C&0x3F));
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(29), ((Y * Oldval_0>>7) & 0x1));
+ PHY_SetBBReg(adapt, rOFDM0_ECCAThreshold, BIT(29), ((Y * Oldval_0>>7) & 0x1));
if (txonly) {
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("patha_fill_iqk only Tx OK\n"));
@@ -745,13 +745,13 @@ static void patha_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u
}
reg = result[final_candidate][2];
- ODM_SetBBReg(dm_odm, rOFDM0_XARxIQImbalance, 0x3FF, reg);
+ PHY_SetBBReg(adapt, rOFDM0_XARxIQImbalance, 0x3FF, reg);
reg = result[final_candidate][3] & 0x3F;
- ODM_SetBBReg(dm_odm, rOFDM0_XARxIQImbalance, 0xFC00, reg);
+ PHY_SetBBReg(adapt, rOFDM0_XARxIQImbalance, 0xFC00, reg);
reg = (result[final_candidate][3] >> 6) & 0xF;
- ODM_SetBBReg(dm_odm, rOFDM0_RxIQExtAnta, 0xF0000000, reg);
+ PHY_SetBBReg(adapt, rOFDM0_RxIQExtAnta, 0xF0000000, reg);
}
}
@@ -768,16 +768,16 @@ static void pathb_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u
if (final_candidate == 0xFF) {
return;
} else if (iqkok) {
- Oldval_1 = (ODM_GetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
+ Oldval_1 = (PHY_QueryBBReg(adapt, rOFDM0_XBTxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
X = result[final_candidate][4];
if ((X & 0x00000200) != 0)
X = X | 0xFFFFFC00;
TX1_A = (X * Oldval_1) >> 8;
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("X = 0x%x, TX1_A = 0x%x\n", X, TX1_A));
- ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, 0x3FF, TX1_A);
+ PHY_SetBBReg(adapt, rOFDM0_XBTxIQImbalance, 0x3FF, TX1_A);
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(27), ((X * Oldval_1>>7) & 0x1));
+ PHY_SetBBReg(adapt, rOFDM0_ECCAThreshold, BIT(27), ((X * Oldval_1>>7) & 0x1));
Y = result[final_candidate][5];
if ((Y & 0x00000200) != 0)
@@ -785,22 +785,22 @@ static void pathb_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u
TX1_C = (Y * Oldval_1) >> 8;
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Y = 0x%x, TX1_C = 0x%x\n", Y, TX1_C));
- ODM_SetBBReg(dm_odm, rOFDM0_XDTxAFE, 0xF0000000, ((TX1_C&0x3C0)>>6));
- ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, 0x003F0000, (TX1_C&0x3F));
+ PHY_SetBBReg(adapt, rOFDM0_XDTxAFE, 0xF0000000, ((TX1_C&0x3C0)>>6));
+ PHY_SetBBReg(adapt, rOFDM0_XBTxIQImbalance, 0x003F0000, (TX1_C&0x3F));
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(25), ((Y * Oldval_1>>7) & 0x1));
+ PHY_SetBBReg(adapt, rOFDM0_ECCAThreshold, BIT(25), ((Y * Oldval_1>>7) & 0x1));
if (txonly)
return;
reg = result[final_candidate][6];
- ODM_SetBBReg(dm_odm, rOFDM0_XBRxIQImbalance, 0x3FF, reg);
+ PHY_SetBBReg(adapt, rOFDM0_XBRxIQImbalance, 0x3FF, reg);
reg = result[final_candidate][7] & 0x3F;
- ODM_SetBBReg(dm_odm, rOFDM0_XBRxIQImbalance, 0xFC00, reg);
+ PHY_SetBBReg(adapt, rOFDM0_XBRxIQImbalance, 0xFC00, reg);
reg = (result[final_candidate][7] >> 6) & 0xF;
- ODM_SetBBReg(dm_odm, rOFDM0_AGCRSSITable, 0x0000F000, reg);
+ PHY_SetBBReg(adapt, rOFDM0_AGCRSSITable, 0x0000F000, reg);
}
}
@@ -824,7 +824,7 @@ void _PHY_SaveADDARegisters(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Save ADDA parameters.\n"));
for (i = 0; i < RegisterNum; i++) {
- ADDABackup[i] = ODM_GetBBReg(dm_odm, ADDAReg[i], bMaskDWord);
+ ADDABackup[i] = PHY_QueryBBReg(adapt, ADDAReg[i], bMaskDWord);
}
}
@@ -852,7 +852,7 @@ static void reload_adda_reg(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Reload ADDA power saving parameters !\n"));
for (i = 0; i < RegiesterNum; i++)
- ODM_SetBBReg(dm_odm, ADDAReg[i], bMaskDWord, ADDABackup[i]);
+ PHY_SetBBReg(adapt, ADDAReg[i], bMaskDWord, ADDABackup[i]);
}
static void
@@ -890,13 +890,13 @@ _PHY_PathADDAOn(
pathOn = isPathAOn ? 0x04db25a4 : 0x0b1b25a4;
if (!is2t) {
pathOn = 0x0bdb25a0;
- ODM_SetBBReg(dm_odm, ADDAReg[0], bMaskDWord, 0x0b1b25a0);
+ PHY_SetBBReg(adapt, ADDAReg[0], bMaskDWord, 0x0b1b25a0);
} else {
- ODM_SetBBReg(dm_odm, ADDAReg[0], bMaskDWord, pathOn);
+ PHY_SetBBReg(adapt, ADDAReg[0], bMaskDWord, pathOn);
}
for (i = 1; i < IQK_ADDA_REG_NUM; i++)
- ODM_SetBBReg(dm_odm, ADDAReg[i], bMaskDWord, pathOn);
+ PHY_SetBBReg(adapt, ADDAReg[i], bMaskDWord, pathOn);
}
void
@@ -930,9 +930,9 @@ _PHY_PathAStandBy(
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A standby mode!\n"));
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x0);
- ODM_SetBBReg(dm_odm, 0x840, bMaskDWord, 0x00010000);
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x0);
+ PHY_SetBBReg(adapt, 0x840, bMaskDWord, 0x00010000);
+ PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x80800000);
}
static void _PHY_PIModeSwitch(
@@ -947,8 +947,8 @@ static void _PHY_PIModeSwitch(
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("BB Switch to %s mode!\n", (PIMode ? "PI" : "SI")));
mode = PIMode ? 0x01000100 : 0x01000000;
- ODM_SetBBReg(dm_odm, rFPGA0_XA_HSSIParameter1, bMaskDWord, mode);
- ODM_SetBBReg(dm_odm, rFPGA0_XB_HSSIParameter1, bMaskDWord, mode);
+ PHY_SetBBReg(adapt, rFPGA0_XA_HSSIParameter1, bMaskDWord, mode);
+ PHY_SetBBReg(adapt, rFPGA0_XB_HSSIParameter1, bMaskDWord, mode);
}
static bool phy_SimularityCompare_8188E(
@@ -1097,7 +1097,7 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
_PHY_PathADDAOn(adapt, ADDA_REG, true, is2t);
if (t == 0)
- dm_odm->RFCalibrateInfo.bRfPiEnable = (u8)ODM_GetBBReg(dm_odm, rFPGA0_XA_HSSIParameter1, BIT(8));
+ dm_odm->RFCalibrateInfo.bRfPiEnable = (u8)PHY_QueryBBReg(adapt, rFPGA0_XA_HSSIParameter1, BIT(8));
if (!dm_odm->RFCalibrateInfo.bRfPiEnable) {
/* Switch BB to PI mode to do IQ Calibration. */
@@ -1105,19 +1105,19 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
}
/* BB setting */
- ODM_SetBBReg(dm_odm, rFPGA0_RFMOD, BIT24, 0x00);
- ODM_SetBBReg(dm_odm, rOFDM0_TRxPathEnable, bMaskDWord, 0x03a05600);
- ODM_SetBBReg(dm_odm, rOFDM0_TRMuxPar, bMaskDWord, 0x000800e4);
- ODM_SetBBReg(dm_odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22204000);
+ PHY_SetBBReg(adapt, rFPGA0_RFMOD, BIT24, 0x00);
+ PHY_SetBBReg(adapt, rOFDM0_TRxPathEnable, bMaskDWord, 0x03a05600);
+ PHY_SetBBReg(adapt, rOFDM0_TRMuxPar, bMaskDWord, 0x000800e4);
+ PHY_SetBBReg(adapt, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22204000);
- ODM_SetBBReg(dm_odm, rFPGA0_XAB_RFInterfaceSW, BIT10, 0x01);
- ODM_SetBBReg(dm_odm, rFPGA0_XAB_RFInterfaceSW, BIT26, 0x01);
- ODM_SetBBReg(dm_odm, rFPGA0_XA_RFInterfaceOE, BIT10, 0x00);
- ODM_SetBBReg(dm_odm, rFPGA0_XB_RFInterfaceOE, BIT10, 0x00);
+ PHY_SetBBReg(adapt, rFPGA0_XAB_RFInterfaceSW, BIT10, 0x01);
+ PHY_SetBBReg(adapt, rFPGA0_XAB_RFInterfaceSW, BIT26, 0x01);
+ PHY_SetBBReg(adapt, rFPGA0_XA_RFInterfaceOE, BIT10, 0x00);
+ PHY_SetBBReg(adapt, rFPGA0_XB_RFInterfaceOE, BIT10, 0x00);
if (is2t) {
- ODM_SetBBReg(dm_odm, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00010000);
- ODM_SetBBReg(dm_odm, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00010000);
+ PHY_SetBBReg(adapt, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00010000);
+ PHY_SetBBReg(adapt, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00010000);
}
/* MAC settings */
@@ -1125,23 +1125,23 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
/* Page B init */
/* AP or IQK */
- ODM_SetBBReg(dm_odm, rConfig_AntA, bMaskDWord, 0x0f600000);
+ PHY_SetBBReg(adapt, rConfig_AntA, bMaskDWord, 0x0f600000);
if (is2t)
- ODM_SetBBReg(dm_odm, rConfig_AntB, bMaskDWord, 0x0f600000);
+ PHY_SetBBReg(adapt, rConfig_AntB, bMaskDWord, 0x0f600000);
/* IQ calibration setting */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK setting!\n"));
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
- ODM_SetBBReg(dm_odm, rTx_IQK, bMaskDWord, 0x01007c00);
- ODM_SetBBReg(dm_odm, rRx_IQK, bMaskDWord, 0x81004800);
+ PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ PHY_SetBBReg(adapt, rTx_IQK, bMaskDWord, 0x01007c00);
+ PHY_SetBBReg(adapt, rRx_IQK, bMaskDWord, 0x81004800);
for (i = 0; i < retryCount; i++) {
PathAOK = phy_PathA_IQK_8188E(adapt, is2t);
if (PathAOK == 0x01) {
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Tx IQK Success!!\n"));
- result[t][0] = (ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16;
- result[t][1] = (ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16;
+ result[t][0] = (PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16;
+ result[t][1] = (PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16;
break;
}
}
@@ -1150,8 +1150,8 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
PathAOK = phy_PathA_RxIQK(adapt, is2t);
if (PathAOK == 0x03) {
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK Success!!\n"));
- result[t][2] = (ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
- result[t][3] = (ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
+ result[t][2] = (PHY_QueryBBReg(adapt, rRx_Power_Before_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
+ result[t][3] = (PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
break;
} else {
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK Fail!!\n"));
@@ -1172,15 +1172,15 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
PathBOK = phy_PathB_IQK_8188E(adapt);
if (PathBOK == 0x03) {
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B IQK Success!!\n"));
- result[t][4] = (ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_B, bMaskDWord)&0x3FF0000)>>16;
- result[t][5] = (ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_B, bMaskDWord)&0x3FF0000)>>16;
- result[t][6] = (ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_B_2, bMaskDWord)&0x3FF0000)>>16;
- result[t][7] = (ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_B_2, bMaskDWord)&0x3FF0000)>>16;
+ result[t][4] = (PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ result[t][5] = (PHY_QueryBBReg(adapt, rTx_Power_After_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ result[t][6] = (PHY_QueryBBReg(adapt, rRx_Power_Before_IQK_B_2, bMaskDWord)&0x3FF0000)>>16;
+ result[t][7] = (PHY_QueryBBReg(adapt, rRx_Power_After_IQK_B_2, bMaskDWord)&0x3FF0000)>>16;
break;
} else if (i == (retryCount - 1) && PathBOK == 0x01) { /* Tx IQK OK */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B Only Tx IQK Success!!\n"));
- result[t][4] = (ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_B, bMaskDWord)&0x3FF0000)>>16;
- result[t][5] = (ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ result[t][4] = (PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ result[t][5] = (PHY_QueryBBReg(adapt, rTx_Power_After_IQK_B, bMaskDWord)&0x3FF0000)>>16;
}
}
@@ -1191,7 +1191,7 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
/* Back to BB mode, load original value */
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK:Back to BB mode, load original value!\n"));
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0);
+ PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0);
if (t != 0) {
if (!dm_odm->RFCalibrateInfo.bRfPiEnable) {
@@ -1208,13 +1208,13 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
reload_adda_reg(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM);
/* Restore RX initial gain */
- ODM_SetBBReg(dm_odm, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00032ed3);
+ PHY_SetBBReg(adapt, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00032ed3);
if (is2t)
- ODM_SetBBReg(dm_odm, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00032ed3);
+ PHY_SetBBReg(adapt, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00032ed3);
/* load 0xe30 IQC default value */
- ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00);
- ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+ PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+ PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00);
}
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_IQCalibrate_8188E() <==\n"));
}
@@ -1245,31 +1245,31 @@ static void phy_LCCalibrate_8188E(struct adapter *adapt, bool is2t)
/* 2. Set RF mode = standby mode */
/* Path-A */
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_AC, bMask12Bits, (RF_Amode&0x8FFFF)|0x10000);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_AC, bMask12Bits, (RF_Amode&0x8FFFF)|0x10000);
/* Path-B */
if (is2t)
- ODM_SetRFReg(dm_odm, RF_PATH_B, RF_AC, bMask12Bits, (RF_Bmode&0x8FFFF)|0x10000);
+ PHY_SetRFReg(adapt, RF_PATH_B, RF_AC, bMask12Bits, (RF_Bmode&0x8FFFF)|0x10000);
}
/* 3. Read RF reg18 */
LC_Cal = PHY_QueryRFReg(adapt, RF_PATH_A, RF_CHNLBW, bMask12Bits);
/* 4. Set LC calibration begin bit15 */
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_CHNLBW, bMask12Bits, LC_Cal|0x08000);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_CHNLBW, bMask12Bits, LC_Cal|0x08000);
- ODM_sleep_ms(100);
+ msleep(100);
/* Restore original situation */
if ((tmpreg&0x70) != 0) {
/* Deal with continuous TX case */
/* Path-A */
ODM_Write1Byte(dm_odm, 0xd03, tmpreg);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_AC, bMask12Bits, RF_Amode);
+ PHY_SetRFReg(adapt, RF_PATH_A, RF_AC, bMask12Bits, RF_Amode);
/* Path-B */
if (is2t)
- ODM_SetRFReg(dm_odm, RF_PATH_B, RF_AC, bMask12Bits, RF_Bmode);
+ PHY_SetRFReg(adapt, RF_PATH_B, RF_AC, bMask12Bits, RF_Bmode);
} else {
/* Deal with Packet TX case */
ODM_Write1Byte(dm_odm, REG_TXPAUSE, 0x00);
@@ -1447,7 +1447,7 @@ void PHY_LCCalibrate_8188E(struct adapter *adapt)
return;
while (*(dm_odm->pbScanInProcess) && timecount < timeout) {
- ODM_delay_ms(50);
+ mdelay(50);
timecount += 50;
}
@@ -1475,19 +1475,19 @@ static void phy_setrfpathswitch_8188e(struct adapter *adapt, bool main, bool is2
u8 u1btmp;
u1btmp = ODM_Read1Byte(dm_odm, REG_LEDCFG2) | BIT7;
ODM_Write1Byte(dm_odm, REG_LEDCFG2, u1btmp);
- ODM_SetBBReg(dm_odm, rFPGA0_XAB_RFParameter, BIT13, 0x01);
+ PHY_SetBBReg(adapt, rFPGA0_XAB_RFParameter, BIT13, 0x01);
}
if (is2t) { /* 92C */
if (main)
- ODM_SetBBReg(dm_odm, rFPGA0_XB_RFInterfaceOE, BIT5|BIT6, 0x1); /* 92C_Path_A */
+ PHY_SetBBReg(adapt, rFPGA0_XB_RFInterfaceOE, BIT5|BIT6, 0x1); /* 92C_Path_A */
else
- ODM_SetBBReg(dm_odm, rFPGA0_XB_RFInterfaceOE, BIT5|BIT6, 0x2); /* BT */
+ PHY_SetBBReg(adapt, rFPGA0_XB_RFInterfaceOE, BIT5|BIT6, 0x2); /* BT */
} else { /* 88C */
if (main)
- ODM_SetBBReg(dm_odm, rFPGA0_XA_RFInterfaceOE, BIT8|BIT9, 0x2); /* Main */
+ PHY_SetBBReg(adapt, rFPGA0_XA_RFInterfaceOE, BIT8|BIT9, 0x2); /* Main */
else
- ODM_SetBBReg(dm_odm, rFPGA0_XA_RFInterfaceOE, BIT8|BIT9, 0x1); /* Aux */
+ PHY_SetBBReg(adapt, rFPGA0_XA_RFInterfaceOE, BIT8|BIT9, 0x1); /* Aux */
}
}
diff --git a/drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c b/drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c
index 5700dbce5b8c..50f951390695 100644
--- a/drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c
+++ b/drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c
@@ -100,7 +100,7 @@ u8 HalPwrSeqCmdParsing(struct adapter *padapter, u8 cut_vers, u8 fab_vers,
if (value == (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd)))
poll_bit = true;
else
- rtw_udelay_os(10);
+ udelay(10);
if (poll_count++ > max_poll_count) {
DBG_88E("Fail to polling Offset[%#x]\n", offset);
@@ -111,9 +111,9 @@ u8 HalPwrSeqCmdParsing(struct adapter *padapter, u8 cut_vers, u8 fab_vers,
case PWR_CMD_DELAY:
RT_TRACE(_module_hal_init_c_ , _drv_info_, ("HalPwrSeqCmdParsing: PWR_CMD_DELAY\n"));
if (GET_PWR_CFG_VALUE(pwrcfgcmd) == PWRSEQ_DELAY_US)
- rtw_udelay_os(GET_PWR_CFG_OFFSET(pwrcfgcmd));
+ udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd));
else
- rtw_udelay_os(GET_PWR_CFG_OFFSET(pwrcfgcmd)*1000);
+ udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd)*1000);
break;
case PWR_CMD_END:
/* When this command is parsed, end the process */
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 285475f9613c..3555ffaa4e06 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -182,22 +182,16 @@ void ODM_DMInit(struct odm_dm_struct *pDM_Odm)
odm_DIGInit(pDM_Odm);
odm_RateAdaptiveMaskInit(pDM_Odm);
- if (pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) {
- ;
- } else if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) {
- odm_PrimaryCCA_Init(pDM_Odm); /* Gary */
- odm_DynamicBBPowerSavingInit(pDM_Odm);
- odm_DynamicTxPowerInit(pDM_Odm);
- odm_TXPowerTrackingInit(pDM_Odm);
- ODM_EdcaTurboInit(pDM_Odm);
- ODM_RAInfo_Init_all(pDM_Odm);
- if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
- (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
- (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
- odm_InitHybridAntDiv(pDM_Odm);
- else if (pDM_Odm->AntDivType == CGCS_RX_SW_ANTDIV)
- odm_SwAntDivInit(pDM_Odm);
- }
+ odm_PrimaryCCA_Init(pDM_Odm); /* Gary */
+ odm_DynamicBBPowerSavingInit(pDM_Odm);
+ odm_DynamicTxPowerInit(pDM_Odm);
+ odm_TXPowerTrackingInit(pDM_Odm);
+ ODM_EdcaTurboInit(pDM_Odm);
+ ODM_RAInfo_Init_all(pDM_Odm);
+ if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
+ odm_InitHybridAntDiv(pDM_Odm);
}
/* 2011/09/20 MH This is the entry pointer for all team to execute HW out source DM. */
@@ -206,27 +200,14 @@ void ODM_DMInit(struct odm_dm_struct *pDM_Odm)
void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm)
{
/* 2012.05.03 Luke: For all IC series */
- odm_GlobalAdapterCheck();
odm_CmnInfoHook_Debug(pDM_Odm);
odm_CmnInfoUpdate_Debug(pDM_Odm);
odm_CommonInfoSelfUpdate(pDM_Odm);
odm_FalseAlarmCounterStatistics(pDM_Odm);
odm_RSSIMonitorCheck(pDM_Odm);
- /* For CE Platform(SPRD or Tablet) */
- /* 8723A or 8189ES platform */
- /* NeilChen--2012--08--24-- */
/* Fix Leave LPS issue */
- if ((pDM_Odm->Adapter->pwrctrlpriv.pwr_mode != PS_MODE_ACTIVE) &&/* in LPS mode */
- ((pDM_Odm->SupportICType & (ODM_RTL8723A)) ||
- (pDM_Odm->SupportICType & (ODM_RTL8188E) &&
- ((pDM_Odm->SupportInterface == ODM_ITRF_SDIO))))) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("----Step1: odm_DIG is in LPS mode\n"));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("---Step2: 8723AS is in LPS mode\n"));
- odm_DIGbyRSSI_LPS(pDM_Odm);
- } else {
- odm_DIG(pDM_Odm);
- }
+ odm_DIG(pDM_Odm);
odm_CCKPacketDetectionThresh(pDM_Odm);
if (*(pDM_Odm->pbPowerSaving))
@@ -240,17 +221,10 @@ void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm)
(pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
(pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
odm_HwAntDiv(pDM_Odm);
- else if (pDM_Odm->AntDivType == CGCS_RX_SW_ANTDIV)
- odm_SwAntDivChkAntSwitch(pDM_Odm, SWAW_STEP_PEAK);
-
- if (pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) {
- ;
- } else if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) {
- ODM_TXPowerTrackingCheck(pDM_Odm);
- odm_EdcaTurboCheck(pDM_Odm);
- odm_DynamicTxPower(pDM_Odm);
- }
- odm_dtc(pDM_Odm);
+
+ ODM_TXPowerTrackingCheck(pDM_Odm);
+ odm_EdcaTurboCheck(pDM_Odm);
+ odm_DynamicTxPower(pDM_Odm);
}
/* Init /.. Fixed HW value. Only init time. */
@@ -457,12 +431,10 @@ void ODM_CmnInfoUpdate(struct odm_dm_struct *pDM_Odm, u32 CmnInfo, u64 Value)
void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm)
{
- pDM_Odm->bCckHighPower = (bool) ODM_GetBBReg(pDM_Odm, 0x824, BIT9);
- pDM_Odm->RFPathRxEnable = (u8) ODM_GetBBReg(pDM_Odm, 0xc04, 0x0F);
- if (pDM_Odm->SupportICType & (ODM_RTL8192C|ODM_RTL8192D))
- pDM_Odm->AntDivType = CG_TRX_HW_ANTDIV;
- if (pDM_Odm->SupportICType & (ODM_RTL8723A))
- pDM_Odm->AntDivType = CGCS_RX_SW_ANTDIV;
+ struct adapter *adapter = pDM_Odm->Adapter;
+
+ pDM_Odm->bCckHighPower = (bool) PHY_QueryBBReg(adapter, 0x824, BIT9);
+ pDM_Odm->RFPathRxEnable = (u8) PHY_QueryBBReg(adapter, 0xc04, 0x0F);
ODM_InitDebugSetting(pDM_Odm);
}
@@ -526,9 +498,6 @@ void odm_CmnInfoHook_Debug(struct odm_dm_struct *pDM_Odm)
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbScanInProcess=%d\n", *(pDM_Odm->pbScanInProcess)));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbPowerSaving=%d\n", *(pDM_Odm->pbPowerSaving)));
-
- if (pDM_Odm->SupportPlatform & (ODM_AP|ODM_ADSL))
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pOnePathCCA=%d\n", *(pDM_Odm->pOnePathCCA)));
}
void odm_CmnInfoUpdate_Debug(struct odm_dm_struct *pDM_Odm)
@@ -540,53 +509,17 @@ void odm_CmnInfoUpdate_Debug(struct odm_dm_struct *pDM_Odm)
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RSSI_Min=%d\n", pDM_Odm->RSSI_Min));
}
-static int getIGIForDiff(int value_IGI)
-{
- #define ONERCCA_LOW_TH 0x30
- #define ONERCCA_LOW_DIFF 8
-
- if (value_IGI < ONERCCA_LOW_TH) {
- if ((ONERCCA_LOW_TH - value_IGI) < ONERCCA_LOW_DIFF)
- return ONERCCA_LOW_TH;
- else
- return value_IGI + ONERCCA_LOW_DIFF;
- } else {
- return value_IGI;
- }
-}
-
void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI)
{
struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
+ struct adapter *adapter = pDM_Odm->Adapter;
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
("ODM_REG(IGI_A,pDM_Odm)=0x%x, ODM_BIT(IGI,pDM_Odm)=0x%x\n",
ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm)));
if (pDM_DigTable->CurIGValue != CurrentIGI) {
- if (pDM_Odm->SupportPlatform & (ODM_CE|ODM_MP)) {
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
- if (pDM_Odm->SupportICType != ODM_RTL8188E)
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
- } else if (pDM_Odm->SupportPlatform & (ODM_AP|ODM_ADSL)) {
- switch (*(pDM_Odm->pOnePathCCA)) {
- case ODM_CCA_2R:
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
- if (pDM_Odm->SupportICType != ODM_RTL8188E)
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
- break;
- case ODM_CCA_1R_A:
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
- if (pDM_Odm->SupportICType != ODM_RTL8188E)
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), getIGIForDiff(CurrentIGI));
- break;
- case ODM_CCA_1R_B:
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), getIGIForDiff(CurrentIGI));
- if (pDM_Odm->SupportICType != ODM_RTL8188E)
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
- break;
- }
- }
+ PHY_SetBBReg(adapter, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("CurrentIGI(0x%02x).\n", CurrentIGI));
/* pDM_DigTable->PreIGValue = pDM_DigTable->CurIGValue; */
pDM_DigTable->CurIGValue = CurrentIGI;
@@ -607,9 +540,6 @@ void odm_DIGbyRSSI_LPS(struct odm_dm_struct *pDM_Odm)
u8 bFwCurrentInPSMode = false;
u8 CurrentIGI = pDM_Odm->RSSI_Min;
- if (!(pDM_Odm->SupportICType & (ODM_RTL8723A | ODM_RTL8188E)))
- return;
-
CurrentIGI = CurrentIGI + RSSI_OFFSET_DIG;
bFwCurrentInPSMode = pAdapter->pwrctrlpriv.bFwCurrentInPSMode;
@@ -646,9 +576,10 @@ void odm_DIGbyRSSI_LPS(struct odm_dm_struct *pDM_Odm)
void odm_DIGInit(struct odm_dm_struct *pDM_Odm)
{
+ struct adapter *adapter = pDM_Odm->Adapter;
struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
- pDM_DigTable->CurIGValue = (u8) ODM_GetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm));
+ pDM_DigTable->CurIGValue = (u8) PHY_QueryBBReg(adapter, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm));
pDM_DigTable->RssiLowThresh = DM_DIG_THRESH_LOW;
pDM_DigTable->RssiHighThresh = DM_DIG_THRESH_HIGH;
pDM_DigTable->FALowThresh = DM_false_ALARM_THRESH_LOW;
@@ -705,102 +636,47 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
return;
}
- if (pDM_Odm->SupportICType == ODM_RTL8192D) {
- if (*(pDM_Odm->pMacPhyMode) == ODM_DMSP) {
- if (*(pDM_Odm->pbMasterOfDMSP)) {
- DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
- FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
- FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
- } else {
- DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_1;
- FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_1);
- FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_1);
- }
- } else {
- if (*(pDM_Odm->pBandType) == ODM_BAND_5G) {
- DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
- FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
- FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
- } else {
- DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_1;
- FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_1);
- FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_1);
- }
- }
- } else {
- DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
- FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
- FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
- }
+ DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
+ FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
+ FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
/* 1 Boundary Decision */
- if ((pDM_Odm->SupportICType & (ODM_RTL8192C|ODM_RTL8723A)) &&
- ((pDM_Odm->BoardType == ODM_BOARD_HIGHPWR) || pDM_Odm->ExtLNA)) {
- if (pDM_Odm->SupportPlatform & (ODM_AP|ODM_ADSL)) {
- dm_dig_max = DM_DIG_MAX_AP_HP;
- dm_dig_min = DM_DIG_MIN_AP_HP;
- } else {
- dm_dig_max = DM_DIG_MAX_NIC_HP;
- dm_dig_min = DM_DIG_MIN_NIC_HP;
- }
- DIG_MaxOfMin = DM_DIG_MAX_AP_HP;
- } else {
- if (pDM_Odm->SupportPlatform & (ODM_AP|ODM_ADSL)) {
- dm_dig_max = DM_DIG_MAX_AP;
- dm_dig_min = DM_DIG_MIN_AP;
- DIG_MaxOfMin = dm_dig_max;
- } else {
- dm_dig_max = DM_DIG_MAX_NIC;
- dm_dig_min = DM_DIG_MIN_NIC;
- DIG_MaxOfMin = DM_DIG_MAX_AP;
- }
- }
+ dm_dig_max = DM_DIG_MAX_NIC;
+ dm_dig_min = DM_DIG_MIN_NIC;
+ DIG_MaxOfMin = DM_DIG_MAX_AP;
+
if (pDM_Odm->bLinked) {
- /* 2 8723A Series, offset need to be 10 */
- if (pDM_Odm->SupportICType == (ODM_RTL8723A)) {
- /* 2 Upper Bound */
- if ((pDM_Odm->RSSI_Min + 10) > DM_DIG_MAX_NIC)
- pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
- else if ((pDM_Odm->RSSI_Min + 10) < DM_DIG_MIN_NIC)
- pDM_DigTable->rx_gain_range_max = DM_DIG_MIN_NIC;
- else
- pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + 10;
- /* 2 If BT is Concurrent, need to set Lower Bound */
- DIG_Dynamic_MIN = DM_DIG_MIN_NIC;
- } else {
- /* 2 Modify DIG upper bound */
- if ((pDM_Odm->RSSI_Min + 20) > dm_dig_max)
- pDM_DigTable->rx_gain_range_max = dm_dig_max;
- else if ((pDM_Odm->RSSI_Min + 20) < dm_dig_min)
- pDM_DigTable->rx_gain_range_max = dm_dig_min;
- else
- pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + 20;
- /* 2 Modify DIG lower bound */
- if (pDM_Odm->bOneEntryOnly) {
- if (pDM_Odm->RSSI_Min < dm_dig_min)
- DIG_Dynamic_MIN = dm_dig_min;
- else if (pDM_Odm->RSSI_Min > DIG_MaxOfMin)
- DIG_Dynamic_MIN = DIG_MaxOfMin;
- else
- DIG_Dynamic_MIN = pDM_Odm->RSSI_Min;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("odm_DIG() : bOneEntryOnly=true, DIG_Dynamic_MIN=0x%x\n",
- DIG_Dynamic_MIN));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("odm_DIG() : pDM_Odm->RSSI_Min=%d\n",
- pDM_Odm->RSSI_Min));
- } else if ((pDM_Odm->SupportICType == ODM_RTL8188E) &&
- (pDM_Odm->SupportAbility & ODM_BB_ANT_DIV)) {
- /* 1 Lower Bound for 88E AntDiv */
- if (pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) {
- DIG_Dynamic_MIN = (u8) pDM_DigTable->AntDiv_RSSI_max;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
- ("odm_DIG(): pDM_DigTable->AntDiv_RSSI_max=%d\n",
- pDM_DigTable->AntDiv_RSSI_max));
- }
- } else {
+ /* 2 Modify DIG upper bound */
+ if ((pDM_Odm->RSSI_Min + 20) > dm_dig_max)
+ pDM_DigTable->rx_gain_range_max = dm_dig_max;
+ else if ((pDM_Odm->RSSI_Min + 20) < dm_dig_min)
+ pDM_DigTable->rx_gain_range_max = dm_dig_min;
+ else
+ pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + 20;
+ /* 2 Modify DIG lower bound */
+ if (pDM_Odm->bOneEntryOnly) {
+ if (pDM_Odm->RSSI_Min < dm_dig_min)
DIG_Dynamic_MIN = dm_dig_min;
+ else if (pDM_Odm->RSSI_Min > DIG_MaxOfMin)
+ DIG_Dynamic_MIN = DIG_MaxOfMin;
+ else
+ DIG_Dynamic_MIN = pDM_Odm->RSSI_Min;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG() : bOneEntryOnly=true, DIG_Dynamic_MIN=0x%x\n",
+ DIG_Dynamic_MIN));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG() : pDM_Odm->RSSI_Min=%d\n",
+ pDM_Odm->RSSI_Min));
+ } else if (pDM_Odm->SupportAbility & ODM_BB_ANT_DIV) {
+ /* 1 Lower Bound for 88E AntDiv */
+ if (pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) {
+ DIG_Dynamic_MIN = (u8) pDM_DigTable->AntDiv_RSSI_max;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("odm_DIG(): pDM_DigTable->AntDiv_RSSI_max=%d\n",
+ pDM_DigTable->AntDiv_RSSI_max));
}
+ } else {
+ DIG_Dynamic_MIN = dm_dig_min;
}
} else {
pDM_DigTable->rx_gain_range_max = dm_dig_max;
@@ -858,21 +734,12 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
CurrentIGI = pDM_Odm->RSSI_Min;
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("DIG: First Connect\n"));
} else {
- if (pDM_Odm->SupportICType == ODM_RTL8192D) {
- if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2_92D)
- CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
- else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1_92D)
- CurrentIGI = CurrentIGI + 1; /* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
- else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0_92D)
- CurrentIGI = CurrentIGI - 1;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
- } else {
- if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2)
- CurrentIGI = CurrentIGI + 4;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
- else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1)
- CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
- else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0)
- CurrentIGI = CurrentIGI - 2;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
- }
+ if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2)
+ CurrentIGI = CurrentIGI + 4;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
+ else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1)
+ CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
+ else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0)
+ CurrentIGI = CurrentIGI - 2;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
}
} else {
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): DIG BeforeLink\n"));
@@ -916,102 +783,69 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
{
+ struct adapter *adapter = pDM_Odm->Adapter;
u32 ret_value;
struct false_alarm_stats *FalseAlmCnt = &(pDM_Odm->FalseAlmCnt);
if (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))
return;
- if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) {
- /* hold ofdm counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_HOLDC_11N, BIT31, 1); /* hold page C counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT31, 1); /* hold page D counter */
-
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE1_11N, bMaskDWord);
- FalseAlmCnt->Cnt_Fast_Fsync = (ret_value&0xffff);
- FalseAlmCnt->Cnt_SB_Search_fail = ((ret_value&0xffff0000)>>16);
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE2_11N, bMaskDWord);
- FalseAlmCnt->Cnt_OFDM_CCA = (ret_value&0xffff);
- FalseAlmCnt->Cnt_Parity_Fail = ((ret_value&0xffff0000)>>16);
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE3_11N, bMaskDWord);
- FalseAlmCnt->Cnt_Rate_Illegal = (ret_value&0xffff);
- FalseAlmCnt->Cnt_Crc8_fail = ((ret_value&0xffff0000)>>16);
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE4_11N, bMaskDWord);
- FalseAlmCnt->Cnt_Mcs_fail = (ret_value&0xffff);
-
- FalseAlmCnt->Cnt_Ofdm_fail = FalseAlmCnt->Cnt_Parity_Fail + FalseAlmCnt->Cnt_Rate_Illegal +
- FalseAlmCnt->Cnt_Crc8_fail + FalseAlmCnt->Cnt_Mcs_fail +
- FalseAlmCnt->Cnt_Fast_Fsync + FalseAlmCnt->Cnt_SB_Search_fail;
-
- if (pDM_Odm->SupportICType == ODM_RTL8188E) {
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_SC_CNT_11N, bMaskDWord);
- FalseAlmCnt->Cnt_BW_LSC = (ret_value&0xffff);
- FalseAlmCnt->Cnt_BW_USC = ((ret_value&0xffff0000)>>16);
- }
-
- /* hold cck counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT12, 1);
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT14, 1);
-
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_LSB_11N, bMaskByte0);
- FalseAlmCnt->Cnt_Cck_fail = ret_value;
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_MSB_11N, bMaskByte3);
- FalseAlmCnt->Cnt_Cck_fail += (ret_value & 0xff)<<8;
-
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_CCA_CNT_11N, bMaskDWord);
- FalseAlmCnt->Cnt_CCK_CCA = ((ret_value&0xFF)<<8) | ((ret_value&0xFF00)>>8);
-
- FalseAlmCnt->Cnt_all = (FalseAlmCnt->Cnt_Fast_Fsync +
- FalseAlmCnt->Cnt_SB_Search_fail +
- FalseAlmCnt->Cnt_Parity_Fail +
- FalseAlmCnt->Cnt_Rate_Illegal +
- FalseAlmCnt->Cnt_Crc8_fail +
- FalseAlmCnt->Cnt_Mcs_fail +
- FalseAlmCnt->Cnt_Cck_fail);
-
- FalseAlmCnt->Cnt_CCA_all = FalseAlmCnt->Cnt_OFDM_CCA + FalseAlmCnt->Cnt_CCK_CCA;
-
- if (pDM_Odm->SupportICType >= ODM_RTL8723A) {
- /* reset false alarm counter registers */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTC_11N, BIT31, 1);
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTC_11N, BIT31, 0);
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT27, 1);
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT27, 0);
- /* update ofdm counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_HOLDC_11N, BIT31, 0); /* update page C counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT31, 0); /* update page D counter */
-
- /* reset CCK CCA counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT13|BIT12, 0);
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT13|BIT12, 2);
- /* reset CCK FA counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT15|BIT14, 0);
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT15|BIT14, 2);
- }
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Enter odm_FalseAlarmCounterStatistics\n"));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
- ("Cnt_Fast_Fsync=%d, Cnt_SB_Search_fail=%d\n",
- FalseAlmCnt->Cnt_Fast_Fsync, FalseAlmCnt->Cnt_SB_Search_fail));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
- ("Cnt_Parity_Fail=%d, Cnt_Rate_Illegal=%d\n",
- FalseAlmCnt->Cnt_Parity_Fail, FalseAlmCnt->Cnt_Rate_Illegal));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
- ("Cnt_Crc8_fail=%d, Cnt_Mcs_fail=%d\n",
- FalseAlmCnt->Cnt_Crc8_fail, FalseAlmCnt->Cnt_Mcs_fail));
- } else { /* FOR ODM_IC_11AC_SERIES */
- /* read OFDM FA counter */
- FalseAlmCnt->Cnt_Ofdm_fail = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_11AC, bMaskLWord);
- FalseAlmCnt->Cnt_Cck_fail = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_11AC, bMaskLWord);
- FalseAlmCnt->Cnt_all = FalseAlmCnt->Cnt_Ofdm_fail + FalseAlmCnt->Cnt_Cck_fail;
-
- /* reset OFDM FA coutner */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RST_11AC, BIT17, 1);
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RST_11AC, BIT17, 0);
- /* reset CCK FA counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11AC, BIT15, 0);
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11AC, BIT15, 1);
- }
+ /* hold ofdm counter */
+ PHY_SetBBReg(adapter, ODM_REG_OFDM_FA_HOLDC_11N, BIT31, 1); /* hold page C counter */
+ PHY_SetBBReg(adapter, ODM_REG_OFDM_FA_RSTD_11N, BIT31, 1); /* hold page D counter */
+
+ ret_value = PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE1_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_Fast_Fsync = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_SB_Search_fail = ((ret_value&0xffff0000)>>16);
+ ret_value = PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE2_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_OFDM_CCA = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_Parity_Fail = ((ret_value&0xffff0000)>>16);
+ ret_value = PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE3_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_Rate_Illegal = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_Crc8_fail = ((ret_value&0xffff0000)>>16);
+ ret_value = PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE4_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_Mcs_fail = (ret_value&0xffff);
+
+ FalseAlmCnt->Cnt_Ofdm_fail = FalseAlmCnt->Cnt_Parity_Fail + FalseAlmCnt->Cnt_Rate_Illegal +
+ FalseAlmCnt->Cnt_Crc8_fail + FalseAlmCnt->Cnt_Mcs_fail +
+ FalseAlmCnt->Cnt_Fast_Fsync + FalseAlmCnt->Cnt_SB_Search_fail;
+
+ ret_value = PHY_QueryBBReg(adapter, ODM_REG_SC_CNT_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_BW_LSC = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_BW_USC = ((ret_value&0xffff0000)>>16);
+
+ /* hold cck counter */
+ PHY_SetBBReg(adapter, ODM_REG_CCK_FA_RST_11N, BIT12, 1);
+ PHY_SetBBReg(adapter, ODM_REG_CCK_FA_RST_11N, BIT14, 1);
+
+ ret_value = PHY_QueryBBReg(adapter, ODM_REG_CCK_FA_LSB_11N, bMaskByte0);
+ FalseAlmCnt->Cnt_Cck_fail = ret_value;
+ ret_value = PHY_QueryBBReg(adapter, ODM_REG_CCK_FA_MSB_11N, bMaskByte3);
+ FalseAlmCnt->Cnt_Cck_fail += (ret_value & 0xff)<<8;
+
+ ret_value = PHY_QueryBBReg(adapter, ODM_REG_CCK_CCA_CNT_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_CCK_CCA = ((ret_value&0xFF)<<8) | ((ret_value&0xFF00)>>8);
+
+ FalseAlmCnt->Cnt_all = (FalseAlmCnt->Cnt_Fast_Fsync +
+ FalseAlmCnt->Cnt_SB_Search_fail +
+ FalseAlmCnt->Cnt_Parity_Fail +
+ FalseAlmCnt->Cnt_Rate_Illegal +
+ FalseAlmCnt->Cnt_Crc8_fail +
+ FalseAlmCnt->Cnt_Mcs_fail +
+ FalseAlmCnt->Cnt_Cck_fail);
+
+ FalseAlmCnt->Cnt_CCA_all = FalseAlmCnt->Cnt_OFDM_CCA + FalseAlmCnt->Cnt_CCK_CCA;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Enter odm_FalseAlarmCounterStatistics\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
+ ("Cnt_Fast_Fsync=%d, Cnt_SB_Search_fail=%d\n",
+ FalseAlmCnt->Cnt_Fast_Fsync, FalseAlmCnt->Cnt_SB_Search_fail));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
+ ("Cnt_Parity_Fail=%d, Cnt_Rate_Illegal=%d\n",
+ FalseAlmCnt->Cnt_Parity_Fail, FalseAlmCnt->Cnt_Rate_Illegal));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
+ ("Cnt_Crc8_fail=%d, Cnt_Mcs_fail=%d\n",
+ FalseAlmCnt->Cnt_Crc8_fail, FalseAlmCnt->Cnt_Mcs_fail));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Cnt_Cck_fail=%d\n", FalseAlmCnt->Cnt_Cck_fail));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Cnt_Ofdm_fail=%d\n", FalseAlmCnt->Cnt_Ofdm_fail));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Total False Alarm=%d\n", FalseAlmCnt->Cnt_all));
@@ -1077,26 +911,11 @@ void odm_DynamicBBPowerSavingInit(struct odm_dm_struct *pDM_Odm)
void odm_DynamicBBPowerSaving(struct odm_dm_struct *pDM_Odm)
{
- if ((pDM_Odm->SupportICType != ODM_RTL8192C) && (pDM_Odm->SupportICType != ODM_RTL8723A))
- return;
- if (!(pDM_Odm->SupportAbility & ODM_BB_PWR_SAVE))
- return;
- if (!(pDM_Odm->SupportPlatform & (ODM_MP|ODM_CE)))
- return;
-
- /* 1 2.Power Saving for 92C */
- if ((pDM_Odm->SupportICType == ODM_RTL8192C) && (pDM_Odm->RFType == ODM_2T2R)) {
- odm_1R_CCA(pDM_Odm);
- } else {
- /* 20100628 Joseph: Turn off BB power save for 88CE because it makesthroughput unstable. */
- /* 20100831 Joseph: Turn ON BB power save again after modifying AGC delay from 900ns ot 600ns. */
- /* 1 3.Power Saving for 88C */
- ODM_RF_Saving(pDM_Odm, false);
- }
}
void odm_1R_CCA(struct odm_dm_struct *pDM_Odm)
{
+ struct adapter *adapter = pDM_Odm->Adapter;
struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
if (pDM_Odm->RSSI_Min != 0xFF) {
@@ -1118,11 +937,11 @@ void odm_1R_CCA(struct odm_dm_struct *pDM_Odm)
if (pDM_PSTable->PreCCAState != pDM_PSTable->CurCCAState) {
if (pDM_PSTable->CurCCAState == CCA_1R) {
if (pDM_Odm->RFType == ODM_2T2R)
- ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x13);
+ PHY_SetBBReg(adapter, 0xc04, bMaskByte0, 0x13);
else
- ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x23);
+ PHY_SetBBReg(adapter, 0xc04, bMaskByte0, 0x23);
} else {
- ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x33);
+ PHY_SetBBReg(adapter, 0xc04, bMaskByte0, 0x33);
}
pDM_PSTable->PreCCAState = pDM_PSTable->CurCCAState;
}
@@ -1130,6 +949,7 @@ void odm_1R_CCA(struct odm_dm_struct *pDM_Odm)
void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal)
{
+ struct adapter *adapter = pDM_Odm->Adapter;
struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
u8 Rssi_Up_bound = 30;
u8 Rssi_Low_bound = 25;
@@ -1139,10 +959,10 @@ void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal)
Rssi_Low_bound = 45;
}
if (pDM_PSTable->initialize == 0) {
- pDM_PSTable->Reg874 = (ODM_GetBBReg(pDM_Odm, 0x874, bMaskDWord)&0x1CC000)>>14;
- pDM_PSTable->RegC70 = (ODM_GetBBReg(pDM_Odm, 0xc70, bMaskDWord)&BIT3)>>3;
- pDM_PSTable->Reg85C = (ODM_GetBBReg(pDM_Odm, 0x85c, bMaskDWord)&0xFF000000)>>24;
- pDM_PSTable->RegA74 = (ODM_GetBBReg(pDM_Odm, 0xa74, bMaskDWord)&0xF000)>>12;
+ pDM_PSTable->Reg874 = (PHY_QueryBBReg(adapter, 0x874, bMaskDWord)&0x1CC000)>>14;
+ pDM_PSTable->RegC70 = (PHY_QueryBBReg(adapter, 0xc70, bMaskDWord)&BIT3)>>3;
+ pDM_PSTable->Reg85C = (PHY_QueryBBReg(adapter, 0x85c, bMaskDWord)&0xFF000000)>>24;
+ pDM_PSTable->RegA74 = (PHY_QueryBBReg(adapter, 0xa74, bMaskDWord)&0xF000)>>12;
pDM_PSTable->initialize = 1;
}
@@ -1168,26 +988,19 @@ void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal)
if (pDM_PSTable->PreRFState != pDM_PSTable->CurRFState) {
if (pDM_PSTable->CurRFState == RF_Save) {
- /* <tynli_note> 8723 RSSI report will be wrong. Set 0x874[5]=1 when enter BB power saving mode. */
- /* Suggested by SD3 Yu-Nan. 2011.01.20. */
- if (pDM_Odm->SupportICType == ODM_RTL8723A)
- ODM_SetBBReg(pDM_Odm, 0x874 , BIT5, 0x1); /* Reg874[5]=1b'1 */
- ODM_SetBBReg(pDM_Odm, 0x874 , 0x1C0000, 0x2); /* Reg874[20:18]=3'b010 */
- ODM_SetBBReg(pDM_Odm, 0xc70, BIT3, 0); /* RegC70[3]=1'b0 */
- ODM_SetBBReg(pDM_Odm, 0x85c, 0xFF000000, 0x63); /* Reg85C[31:24]=0x63 */
- ODM_SetBBReg(pDM_Odm, 0x874, 0xC000, 0x2); /* Reg874[15:14]=2'b10 */
- ODM_SetBBReg(pDM_Odm, 0xa74, 0xF000, 0x3); /* RegA75[7:4]=0x3 */
- ODM_SetBBReg(pDM_Odm, 0x818, BIT28, 0x0); /* Reg818[28]=1'b0 */
- ODM_SetBBReg(pDM_Odm, 0x818, BIT28, 0x1); /* Reg818[28]=1'b1 */
+ PHY_SetBBReg(adapter, 0x874 , 0x1C0000, 0x2); /* Reg874[20:18]=3'b010 */
+ PHY_SetBBReg(adapter, 0xc70, BIT3, 0); /* RegC70[3]=1'b0 */
+ PHY_SetBBReg(adapter, 0x85c, 0xFF000000, 0x63); /* Reg85C[31:24]=0x63 */
+ PHY_SetBBReg(adapter, 0x874, 0xC000, 0x2); /* Reg874[15:14]=2'b10 */
+ PHY_SetBBReg(adapter, 0xa74, 0xF000, 0x3); /* RegA75[7:4]=0x3 */
+ PHY_SetBBReg(adapter, 0x818, BIT28, 0x0); /* Reg818[28]=1'b0 */
+ PHY_SetBBReg(adapter, 0x818, BIT28, 0x1); /* Reg818[28]=1'b1 */
} else {
- ODM_SetBBReg(pDM_Odm, 0x874 , 0x1CC000, pDM_PSTable->Reg874);
- ODM_SetBBReg(pDM_Odm, 0xc70, BIT3, pDM_PSTable->RegC70);
- ODM_SetBBReg(pDM_Odm, 0x85c, 0xFF000000, pDM_PSTable->Reg85C);
- ODM_SetBBReg(pDM_Odm, 0xa74, 0xF000, pDM_PSTable->RegA74);
- ODM_SetBBReg(pDM_Odm, 0x818, BIT28, 0x0);
-
- if (pDM_Odm->SupportICType == ODM_RTL8723A)
- ODM_SetBBReg(pDM_Odm, 0x874, BIT5, 0x0); /* Reg874[5]=1b'0 */
+ PHY_SetBBReg(adapter, 0x874 , 0x1CC000, pDM_PSTable->Reg874);
+ PHY_SetBBReg(adapter, 0xc70, BIT3, pDM_PSTable->RegC70);
+ PHY_SetBBReg(adapter, 0x85c, 0xFF000000, pDM_PSTable->Reg85C);
+ PHY_SetBBReg(adapter, 0xa74, 0xF000, pDM_PSTable->RegA74);
+ PHY_SetBBReg(adapter, 0x818, BIT28, 0x0);
}
pDM_PSTable->PreRFState = pDM_PSTable->CurRFState;
}
@@ -1316,18 +1129,7 @@ void odm_RefreshRateAdaptiveMask(struct odm_dm_struct *pDM_Odm)
/* at the same time. In the stage2/3, we need to prive universal interface and merge all */
/* HW dynamic mechanism. */
/* */
- switch (pDM_Odm->SupportPlatform) {
- case ODM_MP:
- odm_RefreshRateAdaptiveMaskMP(pDM_Odm);
- break;
- case ODM_CE:
- odm_RefreshRateAdaptiveMaskCE(pDM_Odm);
- break;
- case ODM_AP:
- case ODM_ADSL:
- odm_RefreshRateAdaptiveMaskAPADSL(pDM_Odm);
- break;
- }
+ odm_RefreshRateAdaptiveMaskCE(pDM_Odm);
}
void odm_RefreshRateAdaptiveMaskMP(struct odm_dm_struct *pDM_Odm)
@@ -1440,32 +1242,11 @@ void odm_DynamicTxPower(struct odm_dm_struct *pDM_Odm)
/* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
/* at the same time. In the stage2/3, we need to prive universal interface and merge all */
/* HW dynamic mechanism. */
- switch (pDM_Odm->SupportPlatform) {
- case ODM_MP:
- case ODM_CE:
- odm_DynamicTxPowerNIC(pDM_Odm);
- break;
- case ODM_AP:
- odm_DynamicTxPowerAP(pDM_Odm);
- break;
- case ODM_ADSL:
- break;
- }
+ odm_DynamicTxPowerNIC(pDM_Odm);
}
void odm_DynamicTxPowerNIC(struct odm_dm_struct *pDM_Odm)
{
- if (!(pDM_Odm->SupportAbility & ODM_BB_DYNAMIC_TXPWR))
- return;
-
- if (pDM_Odm->SupportICType == ODM_RTL8188E) {
- /* ??? */
- /* This part need to be redefined. */
- }
-}
-
-void odm_DynamicTxPowerAP(struct odm_dm_struct *pDM_Odm)
-{
}
/* 3============================================================ */
@@ -1482,27 +1263,9 @@ void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm)
/* at the same time. In the stage2/3, we need to prive universal interface and merge all */
/* HW dynamic mechanism. */
/* */
- switch (pDM_Odm->SupportPlatform) {
- case ODM_MP:
- odm_RSSIMonitorCheckMP(pDM_Odm);
- break;
- case ODM_CE:
- odm_RSSIMonitorCheckCE(pDM_Odm);
- break;
- case ODM_AP:
- odm_RSSIMonitorCheckAP(pDM_Odm);
- break;
- case ODM_ADSL:
- /* odm_DIGAP(pDM_Odm); */
- break;
- }
-
+ odm_RSSIMonitorCheckCE(pDM_Odm);
} /* odm_RSSIMonitorCheck */
-void odm_RSSIMonitorCheckMP(struct odm_dm_struct *pDM_Odm)
-{
-}
-
static void FindMinimumRSSI(struct adapter *pAdapter)
{
struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
@@ -1575,28 +1338,6 @@ void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm)
ODM_CmnInfoUpdate(&pHalData->odmpriv , ODM_CMNINFO_RSSI_MIN, pdmpriv->MinUndecoratedPWDBForDM);
}
-void odm_RSSIMonitorCheckAP(struct odm_dm_struct *pDM_Odm)
-{
-}
-
-void ODM_InitAllTimers(struct odm_dm_struct *pDM_Odm)
-{
- ODM_InitializeTimer(pDM_Odm, &pDM_Odm->DM_SWAT_Table.SwAntennaSwitchTimer,
- (void *)odm_SwAntDivChkAntSwitchCallback, NULL, "SwAntennaSwitchTimer");
-}
-
-void ODM_CancelAllTimers(struct odm_dm_struct *pDM_Odm)
-{
- ODM_CancelTimer(pDM_Odm, &pDM_Odm->DM_SWAT_Table.SwAntennaSwitchTimer);
-}
-
-void ODM_ReleaseAllTimers(struct odm_dm_struct *pDM_Odm)
-{
- ODM_ReleaseTimer(pDM_Odm, &pDM_Odm->DM_SWAT_Table.SwAntennaSwitchTimer);
-
- ODM_ReleaseTimer(pDM_Odm, &pDM_Odm->FastAntTrainingTimer);
-}
-
/* 3============================================================ */
/* 3 Tx Power Tracking */
/* 3============================================================ */
@@ -1623,19 +1364,7 @@ void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm)
/* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
/* at the same time. In the stage2/3, we need to prive universal interface and merge all */
/* HW dynamic mechanism. */
- switch (pDM_Odm->SupportPlatform) {
- case ODM_MP:
- odm_TXPowerTrackingCheckMP(pDM_Odm);
- break;
- case ODM_CE:
- odm_TXPowerTrackingCheckCE(pDM_Odm);
- break;
- case ODM_AP:
- odm_TXPowerTrackingCheckAP(pDM_Odm);
- break;
- case ODM_ADSL:
- break;
- }
+ odm_TXPowerTrackingCheckCE(pDM_Odm);
}
void odm_TXPowerTrackingCheckCE(struct odm_dm_struct *pDM_Odm)
@@ -1656,14 +1385,6 @@ void odm_TXPowerTrackingCheckCE(struct odm_dm_struct *pDM_Odm)
}
}
-void odm_TXPowerTrackingCheckMP(struct odm_dm_struct *pDM_Odm)
-{
-}
-
-void odm_TXPowerTrackingCheckAP(struct odm_dm_struct *pDM_Odm)
-{
-}
-
/* antenna mapping info */
/* 1: right-side antenna */
/* 2/0: left-side antenna */
@@ -1675,21 +1396,6 @@ void odm_TXPowerTrackingCheckAP(struct odm_dm_struct *pDM_Odm)
/* 3============================================================ */
/* 3 SW Antenna Diversity */
/* 3============================================================ */
-void odm_SwAntDivInit(struct odm_dm_struct *pDM_Odm)
-{
-}
-
-void ODM_SwAntDivChkPerPktRssi(struct odm_dm_struct *pDM_Odm, u8 StationID, struct odm_phy_status_info *pPhyInfo)
-{
-}
-
-void odm_SwAntDivChkAntSwitch(struct odm_dm_struct *pDM_Odm, u8 Step)
-{
-}
-
-void ODM_SwAntDivRestAfterLink(struct odm_dm_struct *pDM_Odm)
-{
-}
void odm_SwAntDivChkAntSwitchCallback(void *FunctionContext)
{
@@ -1706,31 +1412,7 @@ void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm)
return;
}
- if (pDM_Odm->SupportICType & (ODM_RTL8192C | ODM_RTL8192D))
- ;
- else if (pDM_Odm->SupportICType == ODM_RTL8188E)
- ODM_AntennaDiversityInit_88E(pDM_Odm);
-}
-
-void ODM_AntselStatistics_88C(struct odm_dm_struct *pDM_Odm, u8 MacId, u32 PWDBAll, bool isCCKrate)
-{
- struct sw_ant_switch *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
-
- if (pDM_SWAT_Table->antsel == 1) {
- if (isCCKrate) {
- pDM_SWAT_Table->CCK_Ant1_Cnt[MacId]++;
- } else {
- pDM_SWAT_Table->OFDM_Ant1_Cnt[MacId]++;
- pDM_SWAT_Table->RSSI_Ant1_Sum[MacId] += PWDBAll;
- }
- } else {
- if (isCCKrate) {
- pDM_SWAT_Table->CCK_Ant2_Cnt[MacId]++;
- } else {
- pDM_SWAT_Table->OFDM_Ant2_Cnt[MacId]++;
- pDM_SWAT_Table->RSSI_Ant2_Sum[MacId] += PWDBAll;
- }
- }
+ ODM_AntennaDiversityInit_88E(pDM_Odm);
}
void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm)
@@ -1740,8 +1422,7 @@ void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm)
return;
}
- if (pDM_Odm->SupportICType == ODM_RTL8188E)
- ODM_AntennaDiversity_88E(pDM_Odm);
+ ODM_AntennaDiversity_88E(pDM_Odm);
}
/* EDCA Turbo */
@@ -1768,16 +1449,7 @@ void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
if (!(pDM_Odm->SupportAbility & ODM_MAC_EDCA_TURBO))
return;
- switch (pDM_Odm->SupportPlatform) {
- case ODM_MP:
- break;
- case ODM_CE:
- odm_EdcaTurboCheckCE(pDM_Odm);
- break;
- case ODM_AP:
- case ODM_ADSL:
- break;
- }
+ odm_EdcaTurboCheckCE(pDM_Odm);
ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("<========================odm_EdcaTurboCheck\n"));
} /* odm_CheckEdcaTurbo */
@@ -1855,29 +1527,6 @@ dm_CheckEdcaTurbo_EXIT:
precvpriv->last_rx_bytes = precvpriv->rx_bytes;
}
-/* need to ODM CE Platform */
-/* move to here for ANT detection mechanism using */
-
-u32 GetPSDData(struct odm_dm_struct *pDM_Odm, unsigned int point, u8 initial_gain_psd)
-{
- u32 psd_report;
-
- /* Set DCO frequency index, offset=(40MHz/SamplePts)*point */
- ODM_SetBBReg(pDM_Odm, 0x808, 0x3FF, point);
-
- /* Start PSD calculation, Reg808[22]=0->1 */
- ODM_SetBBReg(pDM_Odm, 0x808, BIT22, 1);
- /* Need to wait for HW PSD report */
- ODM_StallExecution(30);
- ODM_SetBBReg(pDM_Odm, 0x808, BIT22, 0);
- /* Read PSD report, Reg8B4[15:0] */
- psd_report = ODM_GetBBReg(pDM_Odm, 0x8B4, bMaskDWord) & 0x0000FFFF;
-
- psd_report = (u32) (ConvertTo_dB(psd_report))+(u32)(initial_gain_psd-0x1c);
-
- return psd_report;
-}
-
u32 ConvertTo_dB(u32 Value)
{
u8 i;
@@ -1902,270 +1551,3 @@ u32 ConvertTo_dB(u32 Value)
return dB;
}
-
-/* 2011/09/22 MH Add for 92D global spin lock utilization. */
-void odm_GlobalAdapterCheck(void)
-{
-} /* odm_GlobalAdapterCheck */
-
-/* Description: */
-/* Set Single/Dual Antenna default setting for products that do not do detection in advance. */
-/* Added by Joseph, 2012.03.22 */
-void ODM_SingleDualAntennaDefaultSetting(struct odm_dm_struct *pDM_Odm)
-{
- struct sw_ant_switch *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
-
- pDM_SWAT_Table->ANTA_ON = true;
- pDM_SWAT_Table->ANTB_ON = true;
-}
-
-
-/* 2 8723A ANT DETECT */
-
-static void odm_PHY_SaveAFERegisters(struct odm_dm_struct *pDM_Odm, u32 *AFEReg, u32 *AFEBackup, u32 RegisterNum)
-{
- u32 i;
-
- /* RTPRINT(FINIT, INIT_IQK, ("Save ADDA parameters.\n")); */
- for (i = 0; i < RegisterNum; i++)
- AFEBackup[i] = ODM_GetBBReg(pDM_Odm, AFEReg[i], bMaskDWord);
-}
-
-static void odm_PHY_ReloadAFERegisters(struct odm_dm_struct *pDM_Odm, u32 *AFEReg, u32 *AFEBackup, u32 RegiesterNum)
-{
- u32 i;
-
- for (i = 0; i < RegiesterNum; i++)
- ODM_SetBBReg(pDM_Odm, AFEReg[i], bMaskDWord, AFEBackup[i]);
-}
-
-/* 2 8723A ANT DETECT */
-/* Description: */
-/* Implement IQK single tone for RF DPK loopback and BB PSD scanning. */
-/* This function is cooperated with BB team Neil. */
-bool ODM_SingleDualAntennaDetection(struct odm_dm_struct *pDM_Odm, u8 mode)
-{
- struct sw_ant_switch *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
- u32 CurrentChannel, RfLoopReg;
- u8 n;
- u32 Reg88c, Regc08, Reg874, Regc50;
- u8 initial_gain = 0x5a;
- u32 PSD_report_tmp;
- u32 AntA_report = 0x0, AntB_report = 0x0, AntO_report = 0x0;
- bool bResult = true;
- u32 AFE_Backup[16];
- u32 AFE_REG_8723A[16] = {
- rRx_Wait_CCA, rTx_CCK_RFON,
- rTx_CCK_BBON, rTx_OFDM_RFON,
- rTx_OFDM_BBON, rTx_To_Rx,
- rTx_To_Tx, rRx_CCK,
- rRx_OFDM, rRx_Wait_RIFS,
- rRx_TO_Rx, rStandby,
- rSleep, rPMPD_ANAEN,
- rFPGA0_XCD_SwitchControl, rBlue_Tooth};
-
- if (!(pDM_Odm->SupportICType & (ODM_RTL8723A|ODM_RTL8192C)))
- return bResult;
-
- if (!(pDM_Odm->SupportAbility&ODM_BB_ANT_DIV))
- return bResult;
-
- if (pDM_Odm->SupportICType == ODM_RTL8192C) {
- /* Which path in ADC/DAC is turnned on for PSD: both I/Q */
- ODM_SetBBReg(pDM_Odm, 0x808, BIT10|BIT11, 0x3);
- /* Ageraged number: 8 */
- ODM_SetBBReg(pDM_Odm, 0x808, BIT12|BIT13, 0x1);
- /* pts = 128; */
- ODM_SetBBReg(pDM_Odm, 0x808, BIT14|BIT15, 0x0);
- }
-
- /* 1 Backup Current RF/BB Settings */
-
- CurrentChannel = ODM_GetRFReg(pDM_Odm, RF_PATH_A, ODM_CHANNEL, bRFRegOffsetMask);
- RfLoopReg = ODM_GetRFReg(pDM_Odm, RF_PATH_A, 0x00, bRFRegOffsetMask);
- ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, ODM_DPDT, Antenna_A); /* change to Antenna A */
- /* Step 1: USE IQK to transmitter single tone */
-
- ODM_StallExecution(10);
-
- /* Store A Path Register 88c, c08, 874, c50 */
- Reg88c = ODM_GetBBReg(pDM_Odm, rFPGA0_AnalogParameter4, bMaskDWord);
- Regc08 = ODM_GetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord);
- Reg874 = ODM_GetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord);
- Regc50 = ODM_GetBBReg(pDM_Odm, rOFDM0_XAAGCCore1, bMaskDWord);
-
- /* Store AFE Registers */
- odm_PHY_SaveAFERegisters(pDM_Odm, AFE_REG_8723A, AFE_Backup, 16);
-
- /* Set PSD 128 pts */
- ODM_SetBBReg(pDM_Odm, rFPGA0_PSDFunction, BIT14|BIT15, 0x0); /* 128 pts */
-
- /* To SET CH1 to do */
- ODM_SetRFReg(pDM_Odm, RF_PATH_A, ODM_CHANNEL, bRFRegOffsetMask, 0x01); /* Channel 1 */
-
- /* AFE all on step */
- ODM_SetBBReg(pDM_Odm, rRx_Wait_CCA, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rTx_CCK_RFON, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rTx_CCK_BBON, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rTx_OFDM_RFON, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rTx_OFDM_BBON, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rTx_To_Rx, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rTx_To_Tx, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rRx_CCK, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rRx_OFDM, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rRx_Wait_RIFS, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rRx_TO_Rx, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rStandby, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rSleep, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rPMPD_ANAEN, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_SwitchControl, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rBlue_Tooth, bMaskDWord, 0x6FDB25A4);
-
- /* 3 wire Disable */
- ODM_SetBBReg(pDM_Odm, rFPGA0_AnalogParameter4, bMaskDWord, 0xCCF000C0);
-
- /* BB IQK Setting */
- ODM_SetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord, 0x000800E4);
- ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22208000);
-
- /* IQK setting tone@ 4.34Mhz */
- ODM_SetBBReg(pDM_Odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008C1C);
- ODM_SetBBReg(pDM_Odm, rTx_IQK, bMaskDWord, 0x01007c00);
-
-
- /* Page B init */
- ODM_SetBBReg(pDM_Odm, rConfig_AntA, bMaskDWord, 0x00080000);
- ODM_SetBBReg(pDM_Odm, rConfig_AntA, bMaskDWord, 0x0f600000);
- ODM_SetBBReg(pDM_Odm, rRx_IQK, bMaskDWord, 0x01004800);
- ODM_SetBBReg(pDM_Odm, rRx_IQK_Tone_A, bMaskDWord, 0x10008c1f);
- ODM_SetBBReg(pDM_Odm, rTx_IQK_PI_A, bMaskDWord, 0x82150008);
- ODM_SetBBReg(pDM_Odm, rRx_IQK_PI_A, bMaskDWord, 0x28150008);
- ODM_SetBBReg(pDM_Odm, rIQK_AGC_Rsp, bMaskDWord, 0x001028d0);
-
- /* RF loop Setting */
- ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0x0, 0xFFFFF, 0x50008);
-
- /* IQK Single tone start */
- ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
- ODM_SetBBReg(pDM_Odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
- ODM_StallExecution(1000);
- PSD_report_tmp = 0x0;
-
- for (n = 0; n < 2; n++) {
- PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
- if (PSD_report_tmp > AntA_report)
- AntA_report = PSD_report_tmp;
- }
-
- PSD_report_tmp = 0x0;
-
- ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_B); /* change to Antenna B */
- ODM_StallExecution(10);
-
-
- for (n = 0; n < 2; n++) {
- PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
- if (PSD_report_tmp > AntB_report)
- AntB_report = PSD_report_tmp;
- }
-
- /* change to open case */
- ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, 0); /* change to Ant A and B all open case */
- ODM_StallExecution(10);
-
- for (n = 0; n < 2; n++) {
- PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
- if (PSD_report_tmp > AntO_report)
- AntO_report = PSD_report_tmp;
- }
-
- /* Close IQK Single Tone function */
- ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
- PSD_report_tmp = 0x0;
-
- /* 1 Return to antanna A */
- ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_A);
- ODM_SetBBReg(pDM_Odm, rFPGA0_AnalogParameter4, bMaskDWord, Reg88c);
- ODM_SetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord, Regc08);
- ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, Reg874);
- ODM_SetBBReg(pDM_Odm, rOFDM0_XAAGCCore1, 0x7F, 0x40);
- ODM_SetBBReg(pDM_Odm, rOFDM0_XAAGCCore1, bMaskDWord, Regc50);
- ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, CurrentChannel);
- ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0x00, bRFRegOffsetMask, RfLoopReg);
-
- /* Reload AFE Registers */
- odm_PHY_ReloadAFERegisters(pDM_Odm, AFE_REG_8723A, AFE_Backup, 16);
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("psd_report_A[%d]= %d\n", 2416, AntA_report));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("psd_report_B[%d]= %d\n", 2416, AntB_report));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("psd_report_O[%d]= %d\n", 2416, AntO_report));
-
-
- if (pDM_Odm->SupportICType == ODM_RTL8723A) {
- /* 2 Test Ant B based on Ant A is ON */
- if (mode == ANTTESTB) {
- if (AntA_report >= 100) {
- if (AntB_report > (AntA_report+1)) {
- pDM_SWAT_Table->ANTB_ON = false;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Single Antenna A\n"));
- } else {
- pDM_SWAT_Table->ANTB_ON = true;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Dual Antenna is A and B\n"));
- }
- } else {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Need to check again\n"));
- pDM_SWAT_Table->ANTB_ON = false; /* Set Antenna B off as default */
- bResult = false;
- }
- } else if (mode == ANTTESTALL) {
- /* 2 Test Ant A and B based on DPDT Open */
- if ((AntO_report >= 100)&(AntO_report < 118)) {
- if (AntA_report > (AntO_report+1)) {
- pDM_SWAT_Table->ANTA_ON = false;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant A is OFF"));
- } else {
- pDM_SWAT_Table->ANTA_ON = true;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant A is ON"));
- }
-
- if (AntB_report > (AntO_report+2)) {
- pDM_SWAT_Table->ANTB_ON = false;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant B is OFF"));
- } else {
- pDM_SWAT_Table->ANTB_ON = true;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant B is ON"));
- }
- }
- }
- } else if (pDM_Odm->SupportICType == ODM_RTL8192C) {
- if (AntA_report >= 100) {
- if (AntB_report > (AntA_report+2)) {
- pDM_SWAT_Table->ANTA_ON = false;
- pDM_SWAT_Table->ANTB_ON = true;
- ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_B);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Single Antenna B\n"));
- } else if (AntA_report > (AntB_report+2)) {
- pDM_SWAT_Table->ANTA_ON = true;
- pDM_SWAT_Table->ANTB_ON = false;
- ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_A);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Single Antenna A\n"));
- } else {
- pDM_SWAT_Table->ANTA_ON = true;
- pDM_SWAT_Table->ANTB_ON = true;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
- ("ODM_SingleDualAntennaDetection(): Dual Antenna\n"));
- }
- } else {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Need to check again\n"));
- pDM_SWAT_Table->ANTA_ON = true; /* Set Antenna A on as default */
- pDM_SWAT_Table->ANTB_ON = false; /* Set Antenna B off as default */
- bResult = false;
- }
- }
- return bResult;
-}
-
-/* Justin: According to the current RRSI to adjust Response Frame TX power, 2012/11/05 */
-void odm_dtc(struct odm_dm_struct *pDM_Odm)
-{
-}
diff --git a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
index 19c509a2bebf..a755df35ec7d 100644
--- a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
@@ -39,61 +39,32 @@ static u8 odm_QueryRxPwrPercentage(s8 AntPower)
/* 2012/01/12 MH MOve some signal strength smooth method to MP HAL layer. */
/* IF other SW team do not support the feature, remove this section.?? */
-static s32 odm_sig_patch_lenove(struct odm_dm_struct *dm_odm, s32 CurrSig)
-{
- return 0;
-}
-
-static s32 odm_sig_patch_netcore(struct odm_dm_struct *dm_odm, s32 CurrSig)
-{
- return 0;
-}
-
static s32 odm_SignalScaleMapping_92CSeries(struct odm_dm_struct *dm_odm, s32 CurrSig)
{
s32 RetSig = 0;
- if ((dm_odm->SupportInterface == ODM_ITRF_USB) ||
- (dm_odm->SupportInterface == ODM_ITRF_SDIO)) {
- if (CurrSig >= 51 && CurrSig <= 100)
- RetSig = 100;
- else if (CurrSig >= 41 && CurrSig <= 50)
- RetSig = 80 + ((CurrSig - 40)*2);
- else if (CurrSig >= 31 && CurrSig <= 40)
- RetSig = 66 + (CurrSig - 30);
- else if (CurrSig >= 21 && CurrSig <= 30)
- RetSig = 54 + (CurrSig - 20);
- else if (CurrSig >= 10 && CurrSig <= 20)
- RetSig = 42 + (((CurrSig - 10) * 2) / 3);
- else if (CurrSig >= 5 && CurrSig <= 9)
- RetSig = 22 + (((CurrSig - 5) * 3) / 2);
- else if (CurrSig >= 1 && CurrSig <= 4)
- RetSig = 6 + (((CurrSig - 1) * 3) / 2);
- else
- RetSig = CurrSig;
- }
+ if (CurrSig >= 51 && CurrSig <= 100)
+ RetSig = 100;
+ else if (CurrSig >= 41 && CurrSig <= 50)
+ RetSig = 80 + ((CurrSig - 40)*2);
+ else if (CurrSig >= 31 && CurrSig <= 40)
+ RetSig = 66 + (CurrSig - 30);
+ else if (CurrSig >= 21 && CurrSig <= 30)
+ RetSig = 54 + (CurrSig - 20);
+ else if (CurrSig >= 10 && CurrSig <= 20)
+ RetSig = 42 + (((CurrSig - 10) * 2) / 3);
+ else if (CurrSig >= 5 && CurrSig <= 9)
+ RetSig = 22 + (((CurrSig - 5) * 3) / 2);
+ else if (CurrSig >= 1 && CurrSig <= 4)
+ RetSig = 6 + (((CurrSig - 1) * 3) / 2);
+ else
+ RetSig = CurrSig;
return RetSig;
}
static s32 odm_SignalScaleMapping(struct odm_dm_struct *dm_odm, s32 CurrSig)
{
- if ((dm_odm->SupportPlatform == ODM_MP) &&
- (dm_odm->SupportInterface != ODM_ITRF_PCIE) && /* USB & SDIO */
- (dm_odm->PatchID == 10))
- return odm_sig_patch_netcore(dm_odm, CurrSig);
- else if ((dm_odm->SupportPlatform == ODM_MP) &&
- (dm_odm->SupportInterface == ODM_ITRF_PCIE) &&
- (dm_odm->PatchID == 19))
- return odm_sig_patch_lenove(dm_odm, CurrSig);
- else
- return odm_SignalScaleMapping_92CSeries(dm_odm, CurrSig);
-}
-
-/* pMgntInfo->CustomerID == RT_CID_819x_Lenovo */
-static u8 odm_SQ_process_patch_RT_CID_819x_Lenovo(struct odm_dm_struct *dm_odm,
- u8 isCCKrate, u8 PWDB_ALL, u8 path, u8 RSSI)
-{
- return 0;
+ return odm_SignalScaleMapping_92CSeries(dm_odm, CurrSig);
}
static u8 odm_EVMdbToPercentage(s8 Value)
@@ -135,11 +106,10 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
isCCKrate = ((pPktinfo->Rate >= DESC92C_RATE1M) && (pPktinfo->Rate <= DESC92C_RATE11M)) ? true : false;
- pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_A] = -1;
- pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_B] = -1;
+ pPhyInfo->RxMIMOSignalQuality[RF_PATH_A] = -1;
+ pPhyInfo->RxMIMOSignalQuality[RF_PATH_B] = -1;
if (isCCKrate) {
- u8 report;
u8 cck_agc_rpt;
dm_odm->PhyDbgInfo.NumQryPhyStatusCCK++;
@@ -153,113 +123,51 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
/* 2011.11.28 LukeLee: 88E use different LNA & VGA gain table */
/* The RSSI formula should be modified according to the gain table */
/* In 88E, cck_highpwr is always set to 1 */
- if (dm_odm->SupportICType & (ODM_RTL8188E|ODM_RTL8812)) {
- LNA_idx = ((cck_agc_rpt & 0xE0) >> 5);
- VGA_idx = (cck_agc_rpt & 0x1F);
- switch (LNA_idx) {
- case 7:
- if (VGA_idx <= 27)
- rx_pwr_all = -100 + 2*(27-VGA_idx); /* VGA_idx = 27~2 */
- else
- rx_pwr_all = -100;
- break;
- case 6:
- rx_pwr_all = -48 + 2*(2-VGA_idx); /* VGA_idx = 2~0 */
- break;
- case 5:
- rx_pwr_all = -42 + 2*(7-VGA_idx); /* VGA_idx = 7~5 */
- break;
- case 4:
- rx_pwr_all = -36 + 2*(7-VGA_idx); /* VGA_idx = 7~4 */
- break;
- case 3:
- rx_pwr_all = -24 + 2*(7-VGA_idx); /* VGA_idx = 7~0 */
- break;
- case 2:
- if (cck_highpwr)
- rx_pwr_all = -12 + 2*(5-VGA_idx); /* VGA_idx = 5~0 */
- else
- rx_pwr_all = -6 + 2*(5-VGA_idx);
- break;
- case 1:
- rx_pwr_all = 8-2*VGA_idx;
- break;
- case 0:
- rx_pwr_all = 14-2*VGA_idx;
- break;
- default:
- break;
- }
- rx_pwr_all += 6;
- PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
- if (!cck_highpwr) {
- if (PWDB_ALL >= 80)
- PWDB_ALL = ((PWDB_ALL-80)<<1)+((PWDB_ALL-80)>>1)+80;
- else if ((PWDB_ALL <= 78) && (PWDB_ALL >= 20))
- PWDB_ALL += 3;
- if (PWDB_ALL > 100)
- PWDB_ALL = 100;
- }
- } else {
- if (!cck_highpwr) {
- report = (cck_agc_rpt & 0xc0)>>6;
- switch (report) {
- /* 03312009 modified by cosa */
- /* Modify the RF RNA gain value to -40, -20, -2, 14 by Jenyu's suggestion */
- /* Note: different RF with the different RNA gain. */
- case 0x3:
- rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
- break;
- case 0x2:
- rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
- break;
- case 0x1:
- rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
- break;
- case 0x0:
- rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
- break;
- }
- } else {
- report = (cck_agc_rpt & 0x60)>>5;
- switch (report) {
- case 0x3:
- rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f)<<1) ;
- break;
- case 0x2:
- rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f)<<1);
- break;
- case 0x1:
- rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f)<<1);
- break;
- case 0x0:
- rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f)<<1);
- break;
- }
- }
-
- PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
-
- /* Modification for ext-LNA board */
- if (dm_odm->BoardType == ODM_BOARD_HIGHPWR) {
- if ((cck_agc_rpt>>7) == 0) {
- PWDB_ALL = (PWDB_ALL > 94) ? 100 : (PWDB_ALL+6);
- } else {
- if (PWDB_ALL > 38)
- PWDB_ALL -= 16;
- else
- PWDB_ALL = (PWDB_ALL <= 16) ? (PWDB_ALL>>2) : (PWDB_ALL-12);
- }
-
- /* CCK modification */
- if (PWDB_ALL > 25 && PWDB_ALL <= 60)
- PWDB_ALL += 6;
- } else {/* Modification for int-LNA board */
- if (PWDB_ALL > 99)
- PWDB_ALL -= 8;
- else if (PWDB_ALL > 50 && PWDB_ALL <= 68)
- PWDB_ALL += 4;
- }
+ LNA_idx = ((cck_agc_rpt & 0xE0) >> 5);
+ VGA_idx = (cck_agc_rpt & 0x1F);
+ switch (LNA_idx) {
+ case 7:
+ if (VGA_idx <= 27)
+ rx_pwr_all = -100 + 2*(27-VGA_idx); /* VGA_idx = 27~2 */
+ else
+ rx_pwr_all = -100;
+ break;
+ case 6:
+ rx_pwr_all = -48 + 2*(2-VGA_idx); /* VGA_idx = 2~0 */
+ break;
+ case 5:
+ rx_pwr_all = -42 + 2*(7-VGA_idx); /* VGA_idx = 7~5 */
+ break;
+ case 4:
+ rx_pwr_all = -36 + 2*(7-VGA_idx); /* VGA_idx = 7~4 */
+ break;
+ case 3:
+ rx_pwr_all = -24 + 2*(7-VGA_idx); /* VGA_idx = 7~0 */
+ break;
+ case 2:
+ if (cck_highpwr)
+ rx_pwr_all = -12 + 2*(5-VGA_idx); /* VGA_idx = 5~0 */
+ else
+ rx_pwr_all = -6 + 2*(5-VGA_idx);
+ break;
+ case 1:
+ rx_pwr_all = 8-2*VGA_idx;
+ break;
+ case 0:
+ rx_pwr_all = 14-2*VGA_idx;
+ break;
+ default:
+ break;
+ }
+ rx_pwr_all += 6;
+ PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
+ if (!cck_highpwr) {
+ if (PWDB_ALL >= 80)
+ PWDB_ALL = ((PWDB_ALL-80)<<1)+((PWDB_ALL-80)>>1)+80;
+ else if ((PWDB_ALL <= 78) && (PWDB_ALL >= 20))
+ PWDB_ALL += 3;
+ if (PWDB_ALL > 100)
+ PWDB_ALL = 100;
}
pPhyInfo->RxPWDBAll = PWDB_ALL;
@@ -269,9 +177,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
if (pPktinfo->bPacketMatchBSSID) {
u8 SQ, SQ_rpt;
- if ((dm_odm->SupportPlatform == ODM_MP) && (dm_odm->PatchID == 19)) {
- SQ = odm_SQ_process_patch_RT_CID_819x_Lenovo(dm_odm, isCCKrate, PWDB_ALL, 0, 0);
- } else if (pPhyInfo->RxPWDBAll > 40 && !dm_odm->bInHctTest) {
+ if (pPhyInfo->RxPWDBAll > 40 && !dm_odm->bInHctTest) {
SQ = 100;
} else {
SQ_rpt = pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all;
@@ -284,15 +190,15 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
SQ = ((64-SQ_rpt) * 100) / 44;
}
pPhyInfo->SignalQuality = SQ;
- pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_A] = SQ;
- pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_B] = -1;
+ pPhyInfo->RxMIMOSignalQuality[RF_PATH_A] = SQ;
+ pPhyInfo->RxMIMOSignalQuality[RF_PATH_B] = -1;
}
} else { /* is OFDM rate */
dm_odm->PhyDbgInfo.NumQryPhyStatusOFDM++;
/* (1)Get RSSI for HT rate */
- for (i = ODM_RF_PATH_A; i < ODM_RF_PATH_MAX; i++) {
+ for (i = RF_PATH_A; i < RF_PATH_MAX; i++) {
/* 2008/01/30 MH we will judge RF RX path now. */
if (dm_odm->RFPathRxEnable & BIT(i))
rf_rx_num++;
@@ -321,14 +227,6 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
/* Get Rx snr value in DB */
pPhyInfo->RxSNR[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2);
dm_odm->PhyDbgInfo.RxSNRdB[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2);
-
- /* Record Signal Strength for next packet */
- if (pPktinfo->bPacketMatchBSSID) {
- if ((dm_odm->SupportPlatform == ODM_MP) && (dm_odm->PatchID == 19)) {
- if (i == ODM_RF_PATH_A)
- pPhyInfo->SignalQuality = odm_SQ_process_patch_RT_CID_819x_Lenovo(dm_odm, isCCKrate, PWDB_ALL, i, RSSI);
- }
- }
}
/* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1) & 0x7f) - 110;
@@ -341,26 +239,22 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
pPhyInfo->RxPower = rx_pwr_all;
pPhyInfo->RecvSignalPower = rx_pwr_all;
- if ((dm_odm->SupportPlatform == ODM_MP) && (dm_odm->PatchID == 19)) {
- /* do nothing */
- } else {
- /* (3)EVM of HT rate */
- if (pPktinfo->Rate >= DESC92C_RATEMCS8 && pPktinfo->Rate <= DESC92C_RATEMCS15)
- Max_spatial_stream = 2; /* both spatial stream make sense */
- else
- Max_spatial_stream = 1; /* only spatial stream 1 makes sense */
-
- for (i = 0; i < Max_spatial_stream; i++) {
- /* Do not use shift operation like "rx_evmX >>= 1" because the compilor of free build environment */
- /* fill most significant bit to "zero" when doing shifting operation which may change a negative */
- /* value to positive one, then the dbm value (which is supposed to be negative) is not correct anymore. */
- EVM = odm_EVMdbToPercentage((pPhyStaRpt->stream_rxevm[i])); /* dbm */
-
- if (pPktinfo->bPacketMatchBSSID) {
- if (i == ODM_RF_PATH_A) /* Fill value in RFD, Get the first spatial stream only */
- pPhyInfo->SignalQuality = (u8)(EVM & 0xff);
- pPhyInfo->RxMIMOSignalQuality[i] = (u8)(EVM & 0xff);
- }
+ /* (3)EVM of HT rate */
+ if (pPktinfo->Rate >= DESC92C_RATEMCS8 && pPktinfo->Rate <= DESC92C_RATEMCS15)
+ Max_spatial_stream = 2; /* both spatial stream make sense */
+ else
+ Max_spatial_stream = 1; /* only spatial stream 1 makes sense */
+
+ for (i = 0; i < Max_spatial_stream; i++) {
+ /* Do not use shift operation like "rx_evmX >>= 1" because the compilor of free build environment */
+ /* fill most significant bit to "zero" when doing shifting operation which may change a negative */
+ /* value to positive one, then the dbm value (which is supposed to be negative) is not correct anymore. */
+ EVM = odm_EVMdbToPercentage((pPhyStaRpt->stream_rxevm[i])); /* dbm */
+
+ if (pPktinfo->bPacketMatchBSSID) {
+ if (i == RF_PATH_A) /* Fill value in RFD, Get the first spatial stream only */
+ pPhyInfo->SignalQuality = (u8)(EVM & 0xff);
+ pPhyInfo->RxMIMOSignalQuality[i] = (u8)(EVM & 0xff);
}
}
}
@@ -396,6 +290,8 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
u32 OFDM_pkt = 0;
u32 Weighting = 0;
struct sta_info *pEntry;
+ u8 antsel_tr_mux;
+ struct fast_ant_train *pDM_FatTable = &dm_odm->DM_FatTable;
if (pPktinfo->StationID == 0xFF)
return;
@@ -408,27 +304,23 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
isCCKrate = ((pPktinfo->Rate >= DESC92C_RATE1M) && (pPktinfo->Rate <= DESC92C_RATE11M)) ? true : false;
/* Smart Antenna Debug Message------------------ */
- if (dm_odm->SupportICType == ODM_RTL8188E) {
- u8 antsel_tr_mux;
- struct fast_ant_train *pDM_FatTable = &dm_odm->DM_FatTable;
-
- if (dm_odm->AntDivType == CG_TRX_SMART_ANTDIV) {
- if (pDM_FatTable->FAT_State == FAT_TRAINING_STATE) {
- if (pPktinfo->bPacketToSelf) {
- antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2<<2) |
- (pDM_FatTable->antsel_rx_keep_1<<1) |
- pDM_FatTable->antsel_rx_keep_0;
- pDM_FatTable->antSumRSSI[antsel_tr_mux] += pPhyInfo->RxPWDBAll;
- pDM_FatTable->antRSSIcnt[antsel_tr_mux]++;
- }
- }
- } else if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)) {
- if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
+
+ if (dm_odm->AntDivType == CG_TRX_SMART_ANTDIV) {
+ if (pDM_FatTable->FAT_State == FAT_TRAINING_STATE) {
+ if (pPktinfo->bPacketToSelf) {
antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2<<2) |
- (pDM_FatTable->antsel_rx_keep_1<<1) | pDM_FatTable->antsel_rx_keep_0;
- ODM_AntselStatistics_88E(dm_odm, antsel_tr_mux, pPktinfo->StationID, pPhyInfo->RxPWDBAll);
+ (pDM_FatTable->antsel_rx_keep_1<<1) |
+ pDM_FatTable->antsel_rx_keep_0;
+ pDM_FatTable->antSumRSSI[antsel_tr_mux] += pPhyInfo->RxPWDBAll;
+ pDM_FatTable->antRSSIcnt[antsel_tr_mux]++;
}
}
+ } else if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)) {
+ if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
+ antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2<<2) |
+ (pDM_FatTable->antsel_rx_keep_1<<1) | pDM_FatTable->antsel_rx_keep_0;
+ ODM_AntselStatistics_88E(dm_odm, antsel_tr_mux, pPktinfo->StationID, pPhyInfo->RxPWDBAll);
+ }
}
/* Smart Antenna Debug Message------------------ */
@@ -438,15 +330,15 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
if (!isCCKrate) { /* ofdm rate */
- if (pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B] == 0) {
- RSSI_Ave = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A];
+ if (pPhyInfo->RxMIMOSignalStrength[RF_PATH_B] == 0) {
+ RSSI_Ave = pPhyInfo->RxMIMOSignalStrength[RF_PATH_A];
} else {
- if (pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A] > pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B]) {
- RSSI_max = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A];
- RSSI_min = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B];
+ if (pPhyInfo->RxMIMOSignalStrength[RF_PATH_A] > pPhyInfo->RxMIMOSignalStrength[RF_PATH_B]) {
+ RSSI_max = pPhyInfo->RxMIMOSignalStrength[RF_PATH_A];
+ RSSI_min = pPhyInfo->RxMIMOSignalStrength[RF_PATH_B];
} else {
- RSSI_max = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B];
- RSSI_min = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A];
+ RSSI_max = pPhyInfo->RxMIMOSignalStrength[RF_PATH_B];
+ RSSI_min = pPhyInfo->RxMIMOSignalStrength[RF_PATH_A];
}
if ((RSSI_max - RSSI_min) < 3)
RSSI_Ave = RSSI_max;
@@ -531,9 +423,7 @@ static void ODM_PhyStatusQuery_92CSeries(struct odm_dm_struct *dm_odm,
odm_RxPhyStatus92CSeries_Parsing(dm_odm, pPhyInfo, pPhyStatus,
pPktinfo);
if (dm_odm->RSSI_test) {
- /* Select the packets to do RSSI checking for antenna switching. */
- if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon)
- ODM_SwAntDivChkPerPktRssi(dm_odm, pPktinfo->StationID, pPhyInfo);
+ ;/* Select the packets to do RSSI checking for antenna switching. */
} else {
odm_Process_RSSIForDM(dm_odm, pPhyInfo, pPktinfo);
}
@@ -555,16 +445,14 @@ void ODM_MacStatusQuery(struct odm_dm_struct *dm_odm, u8 *mac_stat,
}
enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *dm_odm,
- enum ODM_RF_RADIO_PATH content,
- enum ODM_RF_RADIO_PATH rfpath)
+ enum rf_radio_path content,
+ enum rf_radio_path rfpath)
{
ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, ("===>ODM_ConfigRFWithHeaderFile\n"));
- if (dm_odm->SupportICType == ODM_RTL8188E) {
- if (rfpath == ODM_RF_PATH_A)
- READ_AND_CONFIG(8188E, _RadioA_1T_);
- ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, (" ===> ODM_ConfigRFWithHeaderFile() Radio_A:Rtl8188ERadioA_1TArray\n"));
- ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, (" ===> ODM_ConfigRFWithHeaderFile() Radio_B:Rtl8188ERadioB_1TArray\n"));
- }
+ if (rfpath == RF_PATH_A)
+ READ_AND_CONFIG(8188E, _RadioA_1T_);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, (" ===> ODM_ConfigRFWithHeaderFile() Radio_A:Rtl8188ERadioA_1TArray\n"));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, (" ===> ODM_ConfigRFWithHeaderFile() Radio_B:Rtl8188ERadioB_1TArray\n"));
ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("ODM_ConfigRFWithHeaderFile: Radio No %x\n", rfpath));
return HAL_STATUS_SUCCESS;
@@ -573,16 +461,14 @@ enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *dm_odm,
enum HAL_STATUS ODM_ConfigBBWithHeaderFile(struct odm_dm_struct *dm_odm,
enum odm_bb_config_type config_tp)
{
- if (dm_odm->SupportICType == ODM_RTL8188E) {
- if (config_tp == CONFIG_BB_PHY_REG) {
- READ_AND_CONFIG(8188E, _PHY_REG_1T_);
- } else if (config_tp == CONFIG_BB_AGC_TAB) {
- READ_AND_CONFIG(8188E, _AGC_TAB_1T_);
- } else if (config_tp == CONFIG_BB_PHY_REG_PG) {
- READ_AND_CONFIG(8188E, _PHY_REG_PG_);
- ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD,
- (" ===> phy_ConfigBBWithHeaderFile() agc:Rtl8188EPHY_REG_PGArray\n"));
- }
+ if (config_tp == CONFIG_BB_PHY_REG) {
+ READ_AND_CONFIG(8188E, _PHY_REG_1T_);
+ } else if (config_tp == CONFIG_BB_AGC_TAB) {
+ READ_AND_CONFIG(8188E, _AGC_TAB_1T_);
+ } else if (config_tp == CONFIG_BB_PHY_REG_PG) {
+ READ_AND_CONFIG(8188E, _PHY_REG_PG_);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ (" ===> phy_ConfigBBWithHeaderFile() agc:Rtl8188EPHY_REG_PGArray\n"));
}
return HAL_STATUS_SUCCESS;
}
@@ -590,7 +476,6 @@ enum HAL_STATUS ODM_ConfigBBWithHeaderFile(struct odm_dm_struct *dm_odm,
enum HAL_STATUS ODM_ConfigMACWithHeaderFile(struct odm_dm_struct *dm_odm)
{
u8 result = HAL_STATUS_SUCCESS;
- if (dm_odm->SupportICType == ODM_RTL8188E)
- result = READ_AND_CONFIG(8188E, _MAC_REG_);
+ result = READ_AND_CONFIG(8188E, _MAC_REG_);
return result;
}
diff --git a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
index 58410f3e5316..323eb93be41e 100644
--- a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
+++ b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
@@ -34,73 +34,76 @@ void ODM_DIG_LowerBound_88E(struct odm_dm_struct *dm_odm)
static void odm_RX_HWAntDivInit(struct odm_dm_struct *dm_odm)
{
+ struct adapter *adapter = dm_odm->Adapter;
u32 value32;
if (*(dm_odm->mp_mode) == 1) {
dm_odm->AntDivType = CGCS_RX_SW_ANTDIV;
- ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT7, 0); /* disable HW AntDiv */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT31, 1); /* 1:CG, 0:CS */
+ PHY_SetBBReg(adapter, ODM_REG_IGI_A_11N, BIT7, 0); /* disable HW AntDiv */
+ PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT31, 1); /* 1:CG, 0:CS */
return;
}
ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("odm_RX_HWAntDivInit()\n"));
/* MAC Setting */
- value32 = ODM_GetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
- ODM_SetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32|(BIT23|BIT25)); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
+ value32 = PHY_QueryBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
+ PHY_SetBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32|(BIT23|BIT25)); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
/* Pin Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_PIN_CTRL_11N, BIT9|BIT8, 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT10, 0); /* Reg864[10]=1'b0 antsel2 by HW */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT22, 1); /* Regb2c[22]=1'b0 disable CS/CG switch */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT31, 1); /* Regb2c[31]=1'b1 output at CG only */
+ PHY_SetBBReg(adapter, ODM_REG_PIN_CTRL_11N, BIT9|BIT8, 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
+ PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT10, 0); /* Reg864[10]=1'b0 antsel2 by HW */
+ PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT22, 1); /* Regb2c[22]=1'b0 disable CS/CG switch */
+ PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT31, 1); /* Regb2c[31]=1'b1 output at CG only */
/* OFDM Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
+ PHY_SetBBReg(adapter, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
/* CCK Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_BB_PWR_SAV4_11N, BIT7, 1); /* Fix CCK PHY status report issue */
- ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT4, 1); /* CCK complete HW AntDiv within 64 samples */
+ PHY_SetBBReg(adapter, ODM_REG_BB_PWR_SAV4_11N, BIT7, 1); /* Fix CCK PHY status report issue */
+ PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT4, 1); /* CCK complete HW AntDiv within 64 samples */
ODM_UpdateRxIdleAnt_88E(dm_odm, MAIN_ANT);
- ODM_SetBBReg(dm_odm, ODM_REG_ANT_MAPPING1_11N, 0xFFFF, 0x0201); /* antenna mapping table */
+ PHY_SetBBReg(adapter, ODM_REG_ANT_MAPPING1_11N, 0xFFFF, 0x0201); /* antenna mapping table */
}
static void odm_TRX_HWAntDivInit(struct odm_dm_struct *dm_odm)
{
+ struct adapter *adapter = dm_odm->Adapter;
u32 value32;
if (*(dm_odm->mp_mode) == 1) {
dm_odm->AntDivType = CGCS_RX_SW_ANTDIV;
- ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT7, 0); /* disable HW AntDiv */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT5|BIT4|BIT3, 0); /* Default RX (0/1) */
+ PHY_SetBBReg(adapter, ODM_REG_IGI_A_11N, BIT7, 0); /* disable HW AntDiv */
+ PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT5|BIT4|BIT3, 0); /* Default RX (0/1) */
return;
}
ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("odm_TRX_HWAntDivInit()\n"));
/* MAC Setting */
- value32 = ODM_GetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
- ODM_SetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32|(BIT23|BIT25)); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
+ value32 = PHY_QueryBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
+ PHY_SetBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32|(BIT23|BIT25)); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
/* Pin Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_PIN_CTRL_11N, BIT9|BIT8, 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT10, 0); /* Reg864[10]=1'b0 antsel2 by HW */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT22, 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT31, 1); /* Regb2c[31]=1'b1 output at CG only */
+ PHY_SetBBReg(adapter, ODM_REG_PIN_CTRL_11N, BIT9|BIT8, 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
+ PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT10, 0); /* Reg864[10]=1'b0 antsel2 by HW */
+ PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT22, 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
+ PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT31, 1); /* Regb2c[31]=1'b1 output at CG only */
/* OFDM Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
+ PHY_SetBBReg(adapter, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
/* CCK Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_BB_PWR_SAV4_11N, BIT7, 1); /* Fix CCK PHY status report issue */
- ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT4, 1); /* CCK complete HW AntDiv within 64 samples */
+ PHY_SetBBReg(adapter, ODM_REG_BB_PWR_SAV4_11N, BIT7, 1); /* Fix CCK PHY status report issue */
+ PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT4, 1); /* CCK complete HW AntDiv within 64 samples */
/* Tx Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_TX_ANT_CTRL_11N, BIT21, 0); /* Reg80c[21]=1'b0 from TX Reg */
+ PHY_SetBBReg(adapter, ODM_REG_TX_ANT_CTRL_11N, BIT21, 0); /* Reg80c[21]=1'b0 from TX Reg */
ODM_UpdateRxIdleAnt_88E(dm_odm, MAIN_ANT);
/* antenna mapping table */
if (!dm_odm->bIsMPChip) { /* testchip */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_DEFUALT_A_11N, BIT10|BIT9|BIT8, 1); /* Reg858[10:8]=3'b001 */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_DEFUALT_A_11N, BIT13|BIT12|BIT11, 2); /* Reg858[13:11]=3'b010 */
+ PHY_SetBBReg(adapter, ODM_REG_RX_DEFUALT_A_11N, BIT10|BIT9|BIT8, 1); /* Reg858[10:8]=3'b001 */
+ PHY_SetBBReg(adapter, ODM_REG_RX_DEFUALT_A_11N, BIT13|BIT12|BIT11, 2); /* Reg858[13:11]=3'b010 */
} else { /* MPchip */
- ODM_SetBBReg(dm_odm, ODM_REG_ANT_MAPPING1_11N, bMaskDWord, 0x0201); /* Reg914=3'b010, Reg915=3'b001 */
+ PHY_SetBBReg(adapter, ODM_REG_ANT_MAPPING1_11N, bMaskDWord, 0x0201); /* Reg914=3'b010, Reg915=3'b001 */
}
}
static void odm_FastAntTrainingInit(struct odm_dm_struct *dm_odm)
{
+ struct adapter *adapter = dm_odm->Adapter;
u32 value32, i;
struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
u32 AntCombination = 2;
@@ -122,68 +125,65 @@ static void odm_FastAntTrainingInit(struct odm_dm_struct *dm_odm)
dm_fat_tbl->FAT_State = FAT_NORMAL_STATE;
/* MAC Setting */
- value32 = ODM_GetMACReg(dm_odm, 0x4c, bMaskDWord);
- ODM_SetMACReg(dm_odm, 0x4c, bMaskDWord, value32|(BIT23|BIT25)); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
- value32 = ODM_GetMACReg(dm_odm, 0x7B4, bMaskDWord);
- ODM_SetMACReg(dm_odm, 0x7b4, bMaskDWord, value32|(BIT16|BIT17)); /* Reg7B4[16]=1 enable antenna training, Reg7B4[17]=1 enable A2 match */
+ value32 = PHY_QueryBBReg(adapter, 0x4c, bMaskDWord);
+ PHY_SetBBReg(adapter, 0x4c, bMaskDWord, value32|(BIT23|BIT25)); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
+ value32 = PHY_QueryBBReg(adapter, 0x7B4, bMaskDWord);
+ PHY_SetBBReg(adapter, 0x7b4, bMaskDWord, value32|(BIT16|BIT17)); /* Reg7B4[16]=1 enable antenna training, Reg7B4[17]=1 enable A2 match */
/* Match MAC ADDR */
- ODM_SetMACReg(dm_odm, 0x7b4, 0xFFFF, 0);
- ODM_SetMACReg(dm_odm, 0x7b0, bMaskDWord, 0);
+ PHY_SetBBReg(adapter, 0x7b4, 0xFFFF, 0);
+ PHY_SetBBReg(adapter, 0x7b0, bMaskDWord, 0);
- ODM_SetBBReg(dm_odm, 0x870, BIT9|BIT8, 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
- ODM_SetBBReg(dm_odm, 0x864, BIT10, 0); /* Reg864[10]=1'b0 antsel2 by HW */
- ODM_SetBBReg(dm_odm, 0xb2c, BIT22, 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
- ODM_SetBBReg(dm_odm, 0xb2c, BIT31, 1); /* Regb2c[31]=1'b1 output at CG only */
- ODM_SetBBReg(dm_odm, 0xca4, bMaskDWord, 0x000000a0);
+ PHY_SetBBReg(adapter, 0x870, BIT9|BIT8, 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
+ PHY_SetBBReg(adapter, 0x864, BIT10, 0); /* Reg864[10]=1'b0 antsel2 by HW */
+ PHY_SetBBReg(adapter, 0xb2c, BIT22, 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
+ PHY_SetBBReg(adapter, 0xb2c, BIT31, 1); /* Regb2c[31]=1'b1 output at CG only */
+ PHY_SetBBReg(adapter, 0xca4, bMaskDWord, 0x000000a0);
/* antenna mapping table */
if (AntCombination == 2) {
if (!dm_odm->bIsMPChip) { /* testchip */
- ODM_SetBBReg(dm_odm, 0x858, BIT10|BIT9|BIT8, 1); /* Reg858[10:8]=3'b001 */
- ODM_SetBBReg(dm_odm, 0x858, BIT13|BIT12|BIT11, 2); /* Reg858[13:11]=3'b010 */
+ PHY_SetBBReg(adapter, 0x858, BIT10|BIT9|BIT8, 1); /* Reg858[10:8]=3'b001 */
+ PHY_SetBBReg(adapter, 0x858, BIT13|BIT12|BIT11, 2); /* Reg858[13:11]=3'b010 */
} else { /* MPchip */
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte0, 1);
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte1, 2);
+ PHY_SetBBReg(adapter, 0x914, bMaskByte0, 1);
+ PHY_SetBBReg(adapter, 0x914, bMaskByte1, 2);
}
} else if (AntCombination == 7) {
if (!dm_odm->bIsMPChip) { /* testchip */
- ODM_SetBBReg(dm_odm, 0x858, BIT10|BIT9|BIT8, 0); /* Reg858[10:8]=3'b000 */
- ODM_SetBBReg(dm_odm, 0x858, BIT13|BIT12|BIT11, 1); /* Reg858[13:11]=3'b001 */
- ODM_SetBBReg(dm_odm, 0x878, BIT16, 0);
- ODM_SetBBReg(dm_odm, 0x858, BIT15|BIT14, 2); /* Reg878[0],Reg858[14:15])=3'b010 */
- ODM_SetBBReg(dm_odm, 0x878, BIT19|BIT18|BIT17, 3);/* Reg878[3:1]=3b'011 */
- ODM_SetBBReg(dm_odm, 0x878, BIT22|BIT21|BIT20, 4);/* Reg878[6:4]=3b'100 */
- ODM_SetBBReg(dm_odm, 0x878, BIT25|BIT24|BIT23, 5);/* Reg878[9:7]=3b'101 */
- ODM_SetBBReg(dm_odm, 0x878, BIT28|BIT27|BIT26, 6);/* Reg878[12:10]=3b'110 */
- ODM_SetBBReg(dm_odm, 0x878, BIT31|BIT30|BIT29, 7);/* Reg878[15:13]=3b'111 */
+ PHY_SetBBReg(adapter, 0x858, BIT10|BIT9|BIT8, 0); /* Reg858[10:8]=3'b000 */
+ PHY_SetBBReg(adapter, 0x858, BIT13|BIT12|BIT11, 1); /* Reg858[13:11]=3'b001 */
+ PHY_SetBBReg(adapter, 0x878, BIT16, 0);
+ PHY_SetBBReg(adapter, 0x858, BIT15|BIT14, 2); /* Reg878[0],Reg858[14:15])=3'b010 */
+ PHY_SetBBReg(adapter, 0x878, BIT19|BIT18|BIT17, 3);/* Reg878[3:1]=3b'011 */
+ PHY_SetBBReg(adapter, 0x878, BIT22|BIT21|BIT20, 4);/* Reg878[6:4]=3b'100 */
+ PHY_SetBBReg(adapter, 0x878, BIT25|BIT24|BIT23, 5);/* Reg878[9:7]=3b'101 */
+ PHY_SetBBReg(adapter, 0x878, BIT28|BIT27|BIT26, 6);/* Reg878[12:10]=3b'110 */
+ PHY_SetBBReg(adapter, 0x878, BIT31|BIT30|BIT29, 7);/* Reg878[15:13]=3b'111 */
} else { /* MPchip */
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte0, 0);
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte1, 1);
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte2, 2);
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte3, 3);
- ODM_SetBBReg(dm_odm, 0x918, bMaskByte0, 4);
- ODM_SetBBReg(dm_odm, 0x918, bMaskByte1, 5);
- ODM_SetBBReg(dm_odm, 0x918, bMaskByte2, 6);
- ODM_SetBBReg(dm_odm, 0x918, bMaskByte3, 7);
+ PHY_SetBBReg(adapter, 0x914, bMaskByte0, 0);
+ PHY_SetBBReg(adapter, 0x914, bMaskByte1, 1);
+ PHY_SetBBReg(adapter, 0x914, bMaskByte2, 2);
+ PHY_SetBBReg(adapter, 0x914, bMaskByte3, 3);
+ PHY_SetBBReg(adapter, 0x918, bMaskByte0, 4);
+ PHY_SetBBReg(adapter, 0x918, bMaskByte1, 5);
+ PHY_SetBBReg(adapter, 0x918, bMaskByte2, 6);
+ PHY_SetBBReg(adapter, 0x918, bMaskByte3, 7);
}
}
/* Default Ant Setting when no fast training */
- ODM_SetBBReg(dm_odm, 0x80c, BIT21, 1); /* Reg80c[21]=1'b1 from TX Info */
- ODM_SetBBReg(dm_odm, 0x864, BIT5|BIT4|BIT3, 0); /* Default RX */
- ODM_SetBBReg(dm_odm, 0x864, BIT8|BIT7|BIT6, 1); /* Optional RX */
+ PHY_SetBBReg(adapter, 0x80c, BIT21, 1); /* Reg80c[21]=1'b1 from TX Info */
+ PHY_SetBBReg(adapter, 0x864, BIT5|BIT4|BIT3, 0); /* Default RX */
+ PHY_SetBBReg(adapter, 0x864, BIT8|BIT7|BIT6, 1); /* Optional RX */
/* Enter Traing state */
- ODM_SetBBReg(dm_odm, 0x864, BIT2|BIT1|BIT0, (AntCombination-1)); /* Reg864[2:0]=3'd6 ant combination=reg864[2:0]+1 */
- ODM_SetBBReg(dm_odm, 0xc50, BIT7, 1); /* RegC50[7]=1'b1 enable HW AntDiv */
+ PHY_SetBBReg(adapter, 0x864, BIT2|BIT1|BIT0, (AntCombination-1)); /* Reg864[2:0]=3'd6 ant combination=reg864[2:0]+1 */
+ PHY_SetBBReg(adapter, 0xc50, BIT7, 1); /* RegC50[7]=1'b1 enable HW AntDiv */
}
void ODM_AntennaDiversityInit_88E(struct odm_dm_struct *dm_odm)
{
- if (dm_odm->SupportICType != ODM_RTL8188E)
- return;
-
ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("dm_odm->AntDivType=%d\n", dm_odm->AntDivType));
ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("dm_odm->bIsMPChip=%s\n", (dm_odm->bIsMPChip ? "true" : "false")));
@@ -198,6 +198,7 @@ void ODM_AntennaDiversityInit_88E(struct odm_dm_struct *dm_odm)
void ODM_UpdateRxIdleAnt_88E(struct odm_dm_struct *dm_odm, u8 Ant)
{
struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+ struct adapter *adapter = dm_odm->Adapter;
u32 DefaultAnt, OptionalAnt;
if (dm_fat_tbl->RxIdleAnt != Ant) {
@@ -211,13 +212,13 @@ void ODM_UpdateRxIdleAnt_88E(struct odm_dm_struct *dm_odm, u8 Ant)
}
if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT5|BIT4|BIT3, DefaultAnt); /* Default RX */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT8|BIT7|BIT6, OptionalAnt); /* Optional RX */
- ODM_SetBBReg(dm_odm, ODM_REG_ANTSEL_CTRL_11N, BIT14|BIT13|BIT12, DefaultAnt); /* Default TX */
- ODM_SetMACReg(dm_odm, ODM_REG_RESP_TX_11N, BIT6|BIT7, DefaultAnt); /* Resp Tx */
+ PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT5|BIT4|BIT3, DefaultAnt); /* Default RX */
+ PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT8|BIT7|BIT6, OptionalAnt); /* Optional RX */
+ PHY_SetBBReg(adapter, ODM_REG_ANTSEL_CTRL_11N, BIT14|BIT13|BIT12, DefaultAnt); /* Default TX */
+ PHY_SetBBReg(adapter, ODM_REG_RESP_TX_11N, BIT6|BIT7, DefaultAnt); /* Resp Tx */
} else if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV) {
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT5|BIT4|BIT3, DefaultAnt); /* Default RX */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT8|BIT7|BIT6, OptionalAnt); /* Optional RX */
+ PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT5|BIT4|BIT3, DefaultAnt); /* Default RX */
+ PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT8|BIT7|BIT6, OptionalAnt); /* Optional RX */
}
}
dm_fat_tbl->RxIdleAnt = Ant;
@@ -343,16 +344,18 @@ static void odm_HWAntDiv(struct odm_dm_struct *dm_odm)
void ODM_AntennaDiversity_88E(struct odm_dm_struct *dm_odm)
{
struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
- if ((dm_odm->SupportICType != ODM_RTL8188E) || (!(dm_odm->SupportAbility & ODM_BB_ANT_DIV)))
+ struct adapter *adapter = dm_odm->Adapter;
+
+ if (!(dm_odm->SupportAbility & ODM_BB_ANT_DIV))
return;
if (!dm_odm->bLinked) {
ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_AntennaDiversity_88E(): No Link.\n"));
if (dm_fat_tbl->bBecomeLinked) {
ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Need to Turn off HW AntDiv\n"));
- ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT7, 0); /* RegC50[7]=1'b1 enable HW AntDiv */
- ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT15, 0); /* Enable CCK AntDiv */
+ PHY_SetBBReg(adapter, ODM_REG_IGI_A_11N, BIT7, 0); /* RegC50[7]=1'b1 enable HW AntDiv */
+ PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT15, 0); /* Enable CCK AntDiv */
if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
- ODM_SetBBReg(dm_odm, ODM_REG_TX_ANT_CTRL_11N, BIT21, 0); /* Reg80c[21]=1'b0 from TX Reg */
+ PHY_SetBBReg(adapter, ODM_REG_TX_ANT_CTRL_11N, BIT21, 0); /* Reg80c[21]=1'b0 from TX Reg */
dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
}
return;
@@ -360,10 +363,10 @@ void ODM_AntennaDiversity_88E(struct odm_dm_struct *dm_odm)
if (!dm_fat_tbl->bBecomeLinked) {
ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Need to Turn on HW AntDiv\n"));
/* Because HW AntDiv is disabled before Link, we enable HW AntDiv after link */
- ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT7, 1); /* RegC50[7]=1'b1 enable HW AntDiv */
- ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT15, 1); /* Enable CCK AntDiv */
+ PHY_SetBBReg(adapter, ODM_REG_IGI_A_11N, BIT7, 1); /* RegC50[7]=1'b1 enable HW AntDiv */
+ PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT15, 1); /* Enable CCK AntDiv */
if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
- ODM_SetBBReg(dm_odm, ODM_REG_TX_ANT_CTRL_11N, BIT21, 1); /* Reg80c[21]=1'b1 from TX Info */
+ PHY_SetBBReg(adapter, ODM_REG_TX_ANT_CTRL_11N, BIT21, 1); /* Reg80c[21]=1'b1 from TX Info */
dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
}
}
diff --git a/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c b/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c
index 18c0533fbd01..6193d9fafb98 100644
--- a/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c
+++ b/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c
@@ -21,25 +21,27 @@
#include "odm_precomp.h"
void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
- u32 Data, enum ODM_RF_RADIO_PATH RF_PATH,
+ u32 Data, enum rf_radio_path RF_PATH,
u32 RegAddr)
{
- if (Addr == 0xffe) {
- ODM_sleep_ms(50);
+ struct adapter *adapter = pDM_Odm->Adapter;
+
+ if (Addr == 0xffe) {
+ msleep(50);
} else if (Addr == 0xfd) {
- ODM_delay_ms(5);
+ mdelay(5);
} else if (Addr == 0xfc) {
- ODM_delay_ms(1);
+ mdelay(1);
} else if (Addr == 0xfb) {
- ODM_delay_us(50);
+ udelay(50);
} else if (Addr == 0xfa) {
- ODM_delay_us(5);
+ udelay(5);
} else if (Addr == 0xf9) {
- ODM_delay_us(1);
+ udelay(1);
} else {
- ODM_SetRFReg(pDM_Odm, RF_PATH, RegAddr, bRFRegOffsetMask, Data);
+ PHY_SetRFReg(adapter, RF_PATH, RegAddr, bRFRegOffsetMask, Data);
/* Add 1us delay between BB/RF register setting. */
- ODM_delay_us(1);
+ udelay(1);
}
}
@@ -48,7 +50,7 @@ void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data
u32 content = 0x1000; /* RF_Content: radioa_txt */
u32 maskforPhySet = (u32)(content&0xE000);
- odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, ODM_RF_PATH_A, Addr|maskforPhySet);
+ odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, RF_PATH_A, Addr|maskforPhySet);
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("===> ODM_ConfigRFWithHeaderFile: [RadioA] %08X %08X\n", Addr, Data));
}
@@ -57,7 +59,7 @@ void odm_ConfigRF_RadioB_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data
u32 content = 0x1001; /* RF_Content: radiob_txt */
u32 maskforPhySet = (u32)(content&0xE000);
- odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, ODM_RF_PATH_B, Addr|maskforPhySet);
+ odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, RF_PATH_B, Addr|maskforPhySet);
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("===> ODM_ConfigRFWithHeaderFile: [RadioB] %08X %08X\n", Addr, Data));
}
@@ -70,9 +72,11 @@ void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data)
void odm_ConfigBB_AGC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
{
- ODM_SetBBReg(pDM_Odm, Addr, Bitmask, Data);
+ struct adapter *adapter = pDM_Odm->Adapter;
+
+ PHY_SetBBReg(adapter, Addr, Bitmask, Data);
/* Add 1us delay between BB/RF register setting. */
- ODM_delay_us(1);
+ udelay(1);
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE,
("===> ODM_ConfigBBWithHeaderFile: [AGC_TAB] %08X %08X\n",
@@ -83,17 +87,17 @@ void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
u32 Bitmask, u32 Data)
{
if (Addr == 0xfe) {
- ODM_sleep_ms(50);
+ msleep(50);
} else if (Addr == 0xfd) {
- ODM_delay_ms(5);
+ mdelay(5);
} else if (Addr == 0xfc) {
- ODM_delay_ms(1);
+ mdelay(1);
} else if (Addr == 0xfb) {
- ODM_delay_us(50);
+ udelay(50);
} else if (Addr == 0xfa) {
- ODM_delay_us(5);
+ udelay(5);
} else if (Addr == 0xf9) {
- ODM_delay_us(1);
+ udelay(1);
} else{
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
("===> @@@@@@@ ODM_ConfigBBWithHeaderFile: [PHY_REG] %08X %08X %08X\n",
@@ -104,25 +108,27 @@ void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
void odm_ConfigBB_PHY_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
{
+ struct adapter *adapter = pDM_Odm->Adapter;
+
if (Addr == 0xfe) {
- ODM_sleep_ms(50);
+ msleep(50);
} else if (Addr == 0xfd) {
- ODM_delay_ms(5);
+ mdelay(5);
} else if (Addr == 0xfc) {
- ODM_delay_ms(1);
+ mdelay(1);
} else if (Addr == 0xfb) {
- ODM_delay_us(50);
+ udelay(50);
} else if (Addr == 0xfa) {
- ODM_delay_us(5);
+ udelay(5);
} else if (Addr == 0xf9) {
- ODM_delay_us(1);
+ udelay(1);
} else {
if (Addr == 0xa24)
pDM_Odm->RFCalibrateInfo.RegA24 = Data;
- ODM_SetBBReg(pDM_Odm, Addr, Bitmask, Data);
+ PHY_SetBBReg(adapter, Addr, Bitmask, Data);
/* Add 1us delay between BB/RF register setting. */
- ODM_delay_us(1);
+ udelay(1);
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE,
("===> ODM_ConfigBBWithHeaderFile: [PHY_REG] %08X %08X\n",
Addr, Data));
diff --git a/drivers/staging/rtl8188eu/hal/odm_interface.c b/drivers/staging/rtl8188eu/hal/odm_interface.c
index 59ad5bf4d941..3cd68212afd1 100644
--- a/drivers/staging/rtl8188eu/hal/odm_interface.c
+++ b/drivers/staging/rtl8188eu/hal/odm_interface.c
@@ -57,42 +57,6 @@ void ODM_Write4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 Data)
rtw_write32(Adapter, RegAddr, Data);
}
-void ODM_SetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask, u32 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- PHY_SetBBReg(Adapter, RegAddr, BitMask, Data);
-}
-
-u32 ODM_GetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return PHY_QueryBBReg(Adapter, RegAddr, BitMask);
-}
-
-void ODM_SetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask, u32 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- PHY_SetBBReg(Adapter, RegAddr, BitMask, Data);
-}
-
-u32 ODM_GetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return PHY_QueryBBReg(Adapter, RegAddr, BitMask);
-}
-
-void ODM_SetRFReg(struct odm_dm_struct *pDM_Odm, enum ODM_RF_RADIO_PATH eRFPath, u32 RegAddr, u32 BitMask, u32 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- PHY_SetRFReg(Adapter, (enum rf_radio_path)eRFPath, RegAddr, BitMask, Data);
-}
-
-u32 ODM_GetRFReg(struct odm_dm_struct *pDM_Odm, enum ODM_RF_RADIO_PATH eRFPath, u32 RegAddr, u32 BitMask)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return PHY_QueryRFReg(Adapter, (enum rf_radio_path)eRFPath, RegAddr, BitMask);
-}
-
/* ODM Memory relative API. */
void ODM_AllocateMemory(struct odm_dm_struct *pDM_Odm, void **pPtr, u32 length)
{
@@ -110,68 +74,6 @@ s32 ODM_CompareMemory(struct odm_dm_struct *pDM_Odm, void *pBuf1, void *pBuf2, u
return _rtw_memcmp(pBuf1, pBuf2, length);
}
-/* ODM MISC relative API. */
-void ODM_AcquireSpinLock(struct odm_dm_struct *pDM_Odm, enum RT_SPINLOCK_TYPE type)
-{
-}
-
-void ODM_ReleaseSpinLock(struct odm_dm_struct *pDM_Odm, enum RT_SPINLOCK_TYPE type)
-{
-}
-
-/* Work item relative API. FOr MP driver only~! */
-void ODM_InitializeWorkItem(struct odm_dm_struct *pDM_Odm, void *pRtWorkItem,
- RT_WORKITEM_CALL_BACK RtWorkItemCallback,
- void *pContext, const char *szID)
-{
-}
-
-void ODM_StartWorkItem(void *pRtWorkItem)
-{
-}
-
-void ODM_StopWorkItem(void *pRtWorkItem)
-{
-}
-
-void ODM_FreeWorkItem(void *pRtWorkItem)
-{
-}
-
-void ODM_ScheduleWorkItem(void *pRtWorkItem)
-{
-}
-
-void ODM_IsWorkItemScheduled(void *pRtWorkItem)
-{
-}
-
-/* ODM Timer relative API. */
-void ODM_StallExecution(u32 usDelay)
-{
- rtw_udelay_os(usDelay);
-}
-
-void ODM_delay_ms(u32 ms)
-{
- rtw_mdelay_os(ms);
-}
-
-void ODM_delay_us(u32 us)
-{
- rtw_udelay_os(us);
-}
-
-void ODM_sleep_ms(u32 ms)
-{
- rtw_msleep_os(ms);
-}
-
-void ODM_sleep_us(u32 us)
-{
- rtw_usleep_os(us);
-}
-
void ODM_SetTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer, u32 msDelay)
{
_set_timer(pTimer, msDelay); /* ms */
@@ -190,10 +92,6 @@ void ODM_CancelTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer)
_cancel_timer_ex(pTimer);
}
-void ODM_ReleaseTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer)
-{
-}
-
/* ODM FW relative API. */
u32 ODM_FillH2CCmd(u8 *pH2CBuffer, u32 H2CBufferLen, u32 CmdNum,
u32 *pElementID, u32 *pCmdLen,
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 8be2ad7217d4..ca0a7085445f 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -655,8 +655,8 @@ _func_enter_;
SetFwRsvdPagePkt(adapt, false);
DLBcnCount++;
do {
- rtw_yield_os();
- /* rtw_mdelay_os(10); */
+ yield();
+ /* mdelay(10); */
/* check rsvd page download OK. */
rtw_hal_get_hwreg(adapt, HW_VAR_BCN_VALID, (u8 *)(&bcn_valid));
poll++;
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index ec0028d4e61a..4c934e2e0911 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -63,11 +63,6 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_PLATFORM, ODM_CE);
- if (Adapter->interface_type == RTW_GSPI)
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_INTERFACE, ODM_ITRF_SDIO);
- else
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_INTERFACE, Adapter->interface_type);/* RTL871X_HCI_TYPE */
-
ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_IC_TYPE, ODM_RTL8188E);
fab_ver = ODM_TSMC;
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 52b3fba0fae1..5921db86547f 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -60,7 +60,7 @@ static s32 iol_execute(struct adapter *padapter, u8 control)
reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0);
rtw_write8(padapter, REG_HMEBOX_E0, reg_0x88|control);
- start = rtw_get_current_time();
+ start = jiffies;
while ((reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0)) & control &&
(passing_time = rtw_get_passing_time_ms(start)) < 1000) {
;
@@ -238,11 +238,11 @@ static void efuse_read_phymap_from_txpktbuf(
rtw_write16(adapter, REG_PKTBUF_DBG_ADDR, dbg_addr+i);
rtw_write8(adapter, REG_TXPKTBUF_DBG, 0);
- start = rtw_get_current_time();
+ start = jiffies;
while (!(reg_0x143 = rtw_read8(adapter, REG_TXPKTBUF_DBG)) &&
(passing_time = rtw_get_passing_time_ms(start)) < 1000) {
DBG_88E("%s polling reg_0x143:0x%02x, reg_0x106:0x%02x\n", __func__, reg_0x143, rtw_read8(adapter, 0x106));
- rtw_usleep_os(100);
+ msleep(1);
}
lo32 = rtw_read32(adapter, REG_PKTBUF_DBG_DATA_L);
@@ -372,7 +372,7 @@ void rtw_IOL_cmd_tx_pkt_buf_dump(struct adapter *Adapter, int data_len)
if (pbuf) {
for (addr = 0; addr < data_cnts; addr++) {
rtw_write32(Adapter, 0x140, addr);
- rtw_usleep_os(2);
+ msleep(1);
loop = 0;
do {
rstatus = (reg_140 = rtw_read32(Adapter, REG_PKTBUF_DBG_CTRL)&BIT24);
@@ -383,7 +383,7 @@ void rtw_IOL_cmd_tx_pkt_buf_dump(struct adapter *Adapter, int data_len)
fifo_data = rtw_read32(Adapter, REG_PKTBUF_DBG_DATA_H);
memcpy(pbuf+(addr*8+4), &fifo_data, 4);
}
- rtw_usleep_os(2);
+ msleep(1);
} while (!rstatus && (loop++ < 10));
}
rtw_IOL_cmd_buf_dump(Adapter, data_len, pbuf);
@@ -574,7 +574,7 @@ static s32 _FWFreeToGo(struct adapter *padapter)
DBG_88E("%s: Polling FW ready success!! REG_MCUFWDL:0x%08x\n", __func__, value32);
return _SUCCESS;
}
- rtw_udelay_os(5);
+ udelay(5);
} while (counter++ < POLLING_READY_TIMEOUT_COUNT);
DBG_88E("%s: Polling FW ready fail!! REG_MCUFWDL:0x%08x\n", __func__, value32);
@@ -660,7 +660,7 @@ s32 rtl8188e_FirmwareDownload(struct adapter *padapter)
}
_FWDownloadEnable(padapter, true);
- fwdl_start_time = rtw_get_current_time();
+ fwdl_start_time = jiffies;
while (1) {
/* reset the FWDL chksum */
rtw_write8(padapter, REG_MCUFWDL, rtw_read8(padapter, REG_MCUFWDL) | FWDL_ChkSum_rpt);
@@ -2304,7 +2304,7 @@ void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool
if (registry_par->antdiv_cfg == 2) { /* 2:By EFUSE */
pHalData->AntDivCfg = (PROMContent[EEPROM_RF_BOARD_OPTION_88E]&0x18)>>3;
if (PROMContent[EEPROM_RF_BOARD_OPTION_88E] == 0xFF)
- pHalData->AntDivCfg = (EEPROM_DEFAULT_BOARD_OPTION&0x18)>>3;;
+ pHalData->AntDivCfg = (EEPROM_DEFAULT_BOARD_OPTION&0x18)>>3;
} else {
pHalData->AntDivCfg = registry_par->antdiv_cfg; /* 0:OFF , 1:ON, 2:By EFUSE */
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c b/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c
index e97ba02fa045..3d0e6c9e0310 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c
@@ -581,7 +581,7 @@ u8 Hal_ReadRFThermalMeter(struct adapter *pAdapter)
void Hal_GetThermalMeter(struct adapter *pAdapter, u8 *value)
{
Hal_TriggerRFThermalMeter(pAdapter);
- rtw_msleep_os(1000);
+ msleep(1000);
*value = Hal_ReadRFThermalMeter(pAdapter);
}
@@ -614,7 +614,7 @@ void Hal_SetSingleCarrierTx(struct adapter *pAdapter, u8 bStart)
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
- rtw_msleep_os(10);
+ msleep(10);
/* BB Reset */
write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
@@ -652,29 +652,27 @@ void Hal_SetSingleToneTx(struct adapter *pAdapter, u8 bStart)
/* Start Single Tone. */
RT_TRACE(_module_mp_, _drv_alert_, ("SetSingleToneTx: test start\n"));
/* <20120326, Kordan> To amplify the power of tone for Xtal calibration. (asked by Edlu) */
- if (IS_HARDWARE_TYPE_8188E(pAdapter)) {
- reg58 = PHY_QueryRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask);
- reg58 &= 0xFFFFFFF0;
- reg58 += 2;
- PHY_SetRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask, reg58);
- }
+ reg58 = PHY_QueryRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask);
+ reg58 &= 0xFFFFFFF0;
+ reg58 += 2;
+ PHY_SetRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask, reg58);
PHY_SetBBReg(pAdapter, rFPGA0_RFMOD, bCCKEn, 0x0);
PHY_SetBBReg(pAdapter, rFPGA0_RFMOD, bOFDMEn, 0x0);
if (is92C) {
_write_rfreg(pAdapter, RF_PATH_A, 0x21, BIT19, 0x01);
- rtw_usleep_os(100);
+ msleep(1);
if (rfPath == RF_PATH_A)
write_rfreg(pAdapter, RF_PATH_B, 0x00, 0x10000); /* PAD all on. */
else if (rfPath == RF_PATH_B)
write_rfreg(pAdapter, RF_PATH_A, 0x00, 0x10000); /* PAD all on. */
write_rfreg(pAdapter, rfPath, 0x00, 0x2001f); /* PAD all on. */
- rtw_usleep_os(100);
+ msleep(1);
} else {
write_rfreg(pAdapter, rfPath, 0x21, 0xd4000);
- rtw_usleep_os(100);
+ msleep(1);
write_rfreg(pAdapter, rfPath, 0x00, 0x2001f); /* PAD all on. */
- rtw_usleep_os(100);
+ msleep(1);
}
/* for dynamic set Power index. */
@@ -687,24 +685,22 @@ void Hal_SetSingleToneTx(struct adapter *pAdapter, u8 bStart)
/* <20120326, Kordan> To amplify the power of tone for Xtal calibration. (asked by Edlu) */
/* <20120326, Kordan> Only in single tone mode. (asked by Edlu) */
- if (IS_HARDWARE_TYPE_8188E(pAdapter)) {
- reg58 = PHY_QueryRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask);
- reg58 &= 0xFFFFFFF0;
- PHY_SetRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask, reg58);
- }
+ reg58 = PHY_QueryRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask);
+ reg58 &= 0xFFFFFFF0;
+ PHY_SetRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask, reg58);
write_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn, 0x1);
write_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
if (is92C) {
_write_rfreg(pAdapter, RF_PATH_A, 0x21, BIT19, 0x00);
- rtw_usleep_os(100);
+ msleep(1);
write_rfreg(pAdapter, RF_PATH_A, 0x00, 0x32d75); /* PAD all on. */
write_rfreg(pAdapter, RF_PATH_B, 0x00, 0x32d75); /* PAD all on. */
- rtw_usleep_os(100);
+ msleep(1);
} else {
write_rfreg(pAdapter, rfPath, 0x21, 0x54000);
- rtw_usleep_os(100);
+ msleep(1);
write_rfreg(pAdapter, rfPath, 0x00, 0x30000); /* PAD all on. */
- rtw_usleep_os(100);
+ msleep(1);
}
/* Stop for dynamic set Power index. */
@@ -832,7 +828,7 @@ void Hal_SetOFDMContinuousTx(struct adapter *pAdapter, u8 bStart)
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
/* Delay 10 ms */
- rtw_msleep_os(10);
+ msleep(10);
/* BB Reset */
write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x1);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c b/drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c
index 68bb96d83c81..8079fc678615 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c
@@ -190,12 +190,12 @@ phy_RFSerialRead(
tmplong2 = (tmplong2 & (~bLSSIReadAddress)) | (NewOffset<<23) | bLSSIReadEdge; /* T65 RF */
PHY_SetBBReg(Adapter, rFPGA0_XA_HSSIParameter2, bMaskDWord, tmplong&(~bLSSIReadEdge));
- rtw_udelay_os(10);/* PlatformStallExecution(10); */
+ udelay(10);/* PlatformStallExecution(10); */
PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, bMaskDWord, tmplong2);
- rtw_udelay_os(100);/* PlatformStallExecution(100); */
+ udelay(100);/* PlatformStallExecution(100); */
- rtw_udelay_os(10);/* PlatformStallExecution(10); */
+ udelay(10);/* PlatformStallExecution(10); */
if (eRFPath == RF_PATH_A)
RfPiEnable = (u8)PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter1, BIT8);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c
index 299e03e3daf6..b1cb5c4a6fd6 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c
@@ -502,27 +502,27 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
}
/*----Set RF_ENV enable----*/
PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1);
- rtw_udelay_os(1);/* PlatformStallExecution(1); */
+ udelay(1);/* PlatformStallExecution(1); */
/*----Set RF_ENV output high----*/
PHY_SetBBReg(Adapter, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
- rtw_udelay_os(1);/* PlatformStallExecution(1); */
+ udelay(1);/* PlatformStallExecution(1); */
/* Set bit number of Address and Data for RF register */
PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 1 to 4 bits for 8255 */
- rtw_udelay_os(1);/* PlatformStallExecution(1); */
+ udelay(1);/* PlatformStallExecution(1); */
PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for 8255 */
- rtw_udelay_os(1);/* PlatformStallExecution(1); */
+ udelay(1);/* PlatformStallExecution(1); */
/*----Initialize RF fom connfiguration file----*/
switch (eRFPath) {
case RF_PATH_A:
- if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, (enum ODM_RF_RADIO_PATH)eRFPath, (enum ODM_RF_RADIO_PATH)eRFPath))
+ if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, (enum rf_radio_path)eRFPath, (enum rf_radio_path)eRFPath))
rtStatus = _FAIL;
break;
case RF_PATH_B:
- if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, (enum ODM_RF_RADIO_PATH)eRFPath, (enum ODM_RF_RADIO_PATH)eRFPath))
+ if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, (enum rf_radio_path)eRFPath, (enum rf_radio_path)eRFPath))
rtStatus = _FAIL;
break;
case RF_PATH_C:
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
index 05e2475cfd61..511f61cbb9e0 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
@@ -86,7 +86,7 @@ void update_recvframe_attrib_88e(union recv_frame *precvframe, struct recv_stat
pattrib = &precvframe->u.hdr.attrib;
_rtw_memset(pattrib, 0, sizeof(struct rx_pkt_attrib));
- pattrib->crc_err = (u8)((le32_to_cpu(report.rxdw0) >> 14) & 0x1);;/* u8)prxreport->crc32; */
+ pattrib->crc_err = (u8)((le32_to_cpu(report.rxdw0) >> 14) & 0x1);/* u8)prxreport->crc32; */
/* update rx report to recv_frame attribute */
pattrib->pkt_rpt_type = (u8)((le32_to_cpu(report.rxdw3) >> 14) & 0x3);/* prxreport->rpt_sel; */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_sreset.c b/drivers/staging/rtl8188eu/hal/rtl8188e_sreset.c
index 96d698e1f33e..047b53482e67 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_sreset.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_sreset.c
@@ -43,7 +43,7 @@ void rtl8188e_sreset_xmit_status_check(struct adapter *padapter)
rtl8188e_silentreset_for_specific_platform(padapter);
}
/* total xmit irp = 4 */
- current_time = rtw_get_current_time();
+ current_time = jiffies;
if (0 == pxmitpriv->free_xmitbuf_cnt) {
diff_time = jiffies_to_msecs(current_time - psrtpriv->last_tx_time);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index 0f47b8918593..17c94f4cc477 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -75,7 +75,7 @@ int rtl8188eu_init_recv_priv(struct adapter *padapter)
for (i = 0; i < NR_RECVBUFF; i++) {
_rtw_init_listhead(&precvbuf->list);
- _rtw_spinlock_init(&precvbuf->recvbuf_lock);
+ spin_lock_init(&precvbuf->recvbuf_lock);
precvbuf->alloc_sz = MAX_RECVBUF_SZ;
res = rtw_os_recvbuf_resource_alloc(padapter, precvbuf);
if (res == _FAIL)
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index 8f43f4966f22..6fb6a46f04fe 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -445,7 +445,6 @@ s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitp
struct sta_info *psta = NULL;
struct tx_servq *ptxservq = NULL;
- unsigned long irql;
struct list_head *xmitframe_plist = NULL, *xmitframe_phead = NULL;
u32 pbuf; /* next pkt address */
@@ -535,7 +534,7 @@ s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitp
phwxmit = pxmitpriv->hwxmits + 2;
break;
}
- _enter_critical_bh(&pxmitpriv->lock, &irql);
+ spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&ptxservq->sta_pending);
xmitframe_plist = get_next(xmitframe_phead);
@@ -591,7 +590,7 @@ s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitp
if (_rtw_queue_empty(&ptxservq->sta_pending) == true)
rtw_list_delete(&ptxservq->tx_pending);
- _exit_critical_bh(&pxmitpriv->lock, &irql);
+ spin_unlock_bh(&pxmitpriv->lock);
if ((pfirstframe->attrib.ether_type != 0x0806) &&
(pfirstframe->attrib.ether_type != 0x888e) &&
(pfirstframe->attrib.ether_type != 0x88b4) &&
@@ -641,14 +640,13 @@ static s32 xmitframe_direct(struct adapter *adapt, struct xmit_frame *pxmitframe
*/
static s32 pre_xmitframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
{
- unsigned long irql;
s32 res;
struct xmit_buf *pxmitbuf = NULL;
struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
- _enter_critical_bh(&pxmitpriv->lock, &irql);
+ spin_lock_bh(&pxmitpriv->lock);
if (rtw_txframes_sta_ac_pending(adapt, pattrib) > 0)
goto enqueue;
@@ -660,7 +658,7 @@ static s32 pre_xmitframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
if (pxmitbuf == NULL)
goto enqueue;
- _exit_critical_bh(&pxmitpriv->lock, &irql);
+ spin_unlock_bh(&pxmitpriv->lock);
pxmitframe->pxmitbuf = pxmitbuf;
pxmitframe->buf_addr = pxmitbuf->pbuf;
@@ -675,7 +673,7 @@ static s32 pre_xmitframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
enqueue:
res = rtw_xmitframe_enqueue(adapt, pxmitframe);
- _exit_critical_bh(&pxmitpriv->lock, &irql);
+ spin_unlock_bh(&pxmitpriv->lock);
if (res != _SUCCESS) {
RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("pre_xmitframe: enqueue xmitframe fail\n"));
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index cca973211b2f..b24ad495062c 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -705,7 +705,7 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
- u32 init_start_time = rtw_get_current_time();
+ u32 init_start_time = jiffies;
#define HAL_INIT_PROFILE_TAG(stage) do {} while (0)
@@ -1251,7 +1251,7 @@ static void _ReadRFType(struct adapter *Adapter)
static int _ReadAdapterInfo8188EU(struct adapter *Adapter)
{
- u32 start = rtw_get_current_time();
+ u32 start = jiffies;
MSG_88E("====> %s\n", __func__);
@@ -1894,7 +1894,7 @@ _func_enter_;
/* RQPN Load 0 */
rtw_write16(Adapter, REG_RQPN_NPQ, 0x0);
rtw_write32(Adapter, REG_RQPN, 0x80000000);
- rtw_mdelay_os(10);
+ mdelay(10);
}
}
break;
diff --git a/drivers/staging/rtl8188eu/hal/usb_ops_linux.c b/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
index 787763ef74c6..31ae21a54c92 100644
--- a/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
@@ -723,6 +723,5 @@ void rtl8188eu_set_intf_ops(struct _io_ops *pops)
void rtl8188eu_set_hw_type(struct adapter *adapt)
{
adapt->chip_type = RTL8188E;
- adapt->HardwareType = HARDWARE_TYPE_RTL8188EU;
DBG_88E("CHIP TYPE: RTL8188E\n");
}
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPwrSeq.h b/drivers/staging/rtl8188eu/include/Hal8188EPwrSeq.h
index 20d0b3e3ad71..aebf1d3aac3b 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPwrSeq.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPwrSeq.h
@@ -160,7 +160,7 @@
#define RTL8188E_TRANS_END \
/* format */ \
/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,0, PWR_CMD_END, 0, 0}, /* */
+ {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, 0, PWR_CMD_END, 0, 0}, /* */
extern struct wl_pwr_cfg rtl8188E_power_on_flow[RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS+RTL8188E_TRANS_END_STEPS];
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index ad073c8af275..a492a1c547ae 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -191,7 +191,7 @@ struct dvobj_priv {
struct usb_interface *pusbintf;
struct usb_device *pusbdev;
- ATOMIC_T continual_urb_error;
+ atomic_t continual_urb_error;
};
static inline struct device *dvobj_to_dev(struct dvobj_priv *dvobj)
@@ -225,8 +225,6 @@ struct adapter {
int pid[3];/* process id from UI, 0:wps, 1:hostapd, 2:dhcpcd */
int bDongle;/* build-in module or external dongle */
u16 chip_type;
- u16 HardwareType;
- u16 interface_type;/* USB,SDIO,SPI,PCI */
struct dvobj_priv *dvobj;
struct mlme_priv mlmepriv;
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index 439c3c941ba1..c274b349f61e 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -286,39 +286,10 @@ enum rt_eeprom_type {
#define RF_CHANGE_BY_SW BIT31
enum hardware_type {
- HARDWARE_TYPE_RTL8180,
- HARDWARE_TYPE_RTL8185,
- HARDWARE_TYPE_RTL8187,
- HARDWARE_TYPE_RTL8188,
- HARDWARE_TYPE_RTL8190P,
- HARDWARE_TYPE_RTL8192E,
- HARDWARE_TYPE_RTL819xU,
- HARDWARE_TYPE_RTL8192SE,
- HARDWARE_TYPE_RTL8192SU,
- HARDWARE_TYPE_RTL8192CE,
- HARDWARE_TYPE_RTL8192CU,
- HARDWARE_TYPE_RTL8192DE,
- HARDWARE_TYPE_RTL8192DU,
- HARDWARE_TYPE_RTL8723AE,
- HARDWARE_TYPE_RTL8723AU,
- HARDWARE_TYPE_RTL8723AS,
- HARDWARE_TYPE_RTL8188EE,
HARDWARE_TYPE_RTL8188EU,
- HARDWARE_TYPE_RTL8188ES,
HARDWARE_TYPE_MAX,
};
-/* RTL8188E Series */
-#define IS_HARDWARE_TYPE_8188EE(_Adapter) \
-(((struct adapter *)_Adapter)->HardwareType == HARDWARE_TYPE_RTL8188EE)
-#define IS_HARDWARE_TYPE_8188EU(_Adapter) \
-(((struct adapter *)_Adapter)->HardwareType == HARDWARE_TYPE_RTL8188EU)
-#define IS_HARDWARE_TYPE_8188ES(_Adapter) \
-(((struct adapter *)_Adapter)->HardwareType == HARDWARE_TYPE_RTL8188ES)
-#define IS_HARDWARE_TYPE_8188E(_Adapter) \
-(IS_HARDWARE_TYPE_8188EE(_Adapter) || IS_HARDWARE_TYPE_8188EU(_Adapter) || \
- IS_HARDWARE_TYPE_8188ES(_Adapter))
-
#define GET_EEPROM_EFUSE_PRIV(adapter) (&adapter->eeprompriv)
#define is_boot_from_eeprom(adapter) (adapter->eeprompriv.EepromOrEfuse)
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index eaa4bc1b2255..9d1a79c21a2e 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -151,7 +151,7 @@ struct rtl_ps {
int Rssi_val_min;
u8 initialize;
- u32 Reg874,RegC70,Reg85C,RegA74;
+ u32 Reg874, RegC70, Reg85C, RegA74;
};
@@ -454,29 +454,7 @@ enum odm_ability_def {
ODM_RF_CALIBRATION = BIT26,
};
-/* ODM_CMNINFO_INTERFACE */
-enum odm_interface_def {
- ODM_ITRF_PCIE = 0x1,
- ODM_ITRF_USB = 0x2,
- ODM_ITRF_SDIO = 0x4,
- ODM_ITRF_ALL = 0x7,
-};
-
-/* ODM_CMNINFO_IC_TYPE */
-enum odm_ic_type {
- ODM_RTL8192S = BIT0,
- ODM_RTL8192C = BIT1,
- ODM_RTL8192D = BIT2,
- ODM_RTL8723A = BIT3,
- ODM_RTL8188E = BIT4,
- ODM_RTL8812 = BIT5,
- ODM_RTL8821 = BIT6,
-};
-
-#define ODM_IC_11N_SERIES \
- (ODM_RTL8192S | ODM_RTL8192C | ODM_RTL8192D | \
- ODM_RTL8723A | ODM_RTL8188E)
-#define ODM_IC_11AC_SERIES (ODM_RTL8812)
+#define ODM_RTL8188E BIT4
/* ODM_CMNINFO_CUT_VER */
enum odm_cut_version {
@@ -950,13 +928,6 @@ struct odm_dm_struct {
#define ODM_RF_PATH_MAX 3
-enum ODM_RF_RADIO_PATH {
- ODM_RF_PATH_A = 0, /* Radio Path A */
- ODM_RF_PATH_B = 1, /* Radio Path B */
- ODM_RF_PATH_C = 2, /* Radio Path C */
- ODM_RF_PATH_D = 3, /* Radio Path D */
-};
-
enum ODM_RF_CONTENT {
odm_radioa_txt = 0x1000,
odm_radiob_txt = 0x1001,
@@ -1128,69 +1099,28 @@ extern u8 CCKSwingTable_Ch14 [CCK_TABLE_SIZE][8];
#define SWAW_STEP_PEAK 0
#define SWAW_STEP_DETERMINE 1
-void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI);
-void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres);
-
-void ODM_SetAntenna(struct odm_dm_struct *pDM_Odm, u8 Antenna);
-
-
+#define dm_CheckTXPowerTracking ODM_TXPowerTrackingCheck
#define dm_RF_Saving ODM_RF_Saving
-void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal);
-
-#define SwAntDivRestAfterLink ODM_SwAntDivRestAfterLink
-void ODM_SwAntDivRestAfterLink(struct odm_dm_struct *pDM_Odm);
-#define dm_CheckTXPowerTracking ODM_TXPowerTrackingCheck
+void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal);
void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm);
-
+void odm_DIGbyRSSI_LPS(struct odm_dm_struct *pDM_Odm);
+void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres);
bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI,
bool bForceUpdate, u8 *pRATRState);
-
-#define dm_SWAW_RSSI_Check ODM_SwAntDivChkPerPktRssi
-void ODM_SwAntDivChkPerPktRssi(struct odm_dm_struct *pDM_Odm, u8 StationID,
- struct odm_phy_status_info *pPhyInfo);
-
u32 ConvertTo_dB(u32 Value);
-
-u32 GetPSDData(struct odm_dm_struct *pDM_Odm, unsigned int point,
- u8 initial_gain_psd);
-
-void odm_DIGbyRSSI_LPS(struct odm_dm_struct *pDM_Odm);
-
u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid,
u32 ra_mask, u8 rssi_level);
-
-void ODM_DMInit(struct odm_dm_struct *pDM_Odm);
-
-void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm);
-
void ODM_CmnInfoInit(struct odm_dm_struct *pDM_Odm,
enum odm_common_info_def CmnInfo, u32 Value);
-
+void ODM_CmnInfoUpdate(struct odm_dm_struct *pDM_Odm, u32 CmnInfo, u64 Value);
void ODM_CmnInfoHook(struct odm_dm_struct *pDM_Odm,
enum odm_common_info_def CmnInfo, void *pValue);
-
void ODM_CmnInfoPtrArrayHook(struct odm_dm_struct *pDM_Odm,
enum odm_common_info_def CmnInfo,
u16 Index, void *pValue);
-
-void ODM_CmnInfoUpdate(struct odm_dm_struct *pDM_Odm, u32 CmnInfo, u64 Value);
-
-void ODM_InitAllTimers(struct odm_dm_struct *pDM_Odm);
-
-void ODM_CancelAllTimers(struct odm_dm_struct *pDM_Odm);
-
-void ODM_ReleaseAllTimers(struct odm_dm_struct *pDM_Odm);
-
-void ODM_ResetIQKResult(struct odm_dm_struct *pDM_Odm);
-
-void ODM_AntselStatistics_88C(struct odm_dm_struct *pDM_Odm, u8 MacId,
- u32 PWDBAll, bool isCCKrate);
-
-void ODM_SingleDualAntennaDefaultSetting(struct odm_dm_struct *pDM_Odm);
-
-bool ODM_SingleDualAntennaDetection(struct odm_dm_struct *pDM_Odm, u8 mode);
-
-void odm_dtc(struct odm_dm_struct *pDM_Odm);
+void ODM_DMInit(struct odm_dm_struct *pDM_Odm);
+void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm);
+void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI);
#endif
diff --git a/drivers/staging/rtl8188eu/include/odm_HWConfig.h b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
index df5272221bad..49e7e163ba70 100644
--- a/drivers/staging/rtl8188eu/include/odm_HWConfig.h
+++ b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
@@ -121,8 +121,8 @@ void ODM_MacStatusQuery(struct odm_dm_struct *pDM_Odm,
bool bPacketBeacon);
enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *pDM_Odm,
- enum ODM_RF_RADIO_PATH Content,
- enum ODM_RF_RADIO_PATH eRFPath);
+ enum rf_radio_path Content,
+ enum rf_radio_path eRFPath);
enum HAL_STATUS ODM_ConfigBBWithHeaderFile(struct odm_dm_struct *pDM_Odm,
enum odm_bb_config_type ConfigType);
diff --git a/drivers/staging/rtl8188eu/include/odm_RegConfig8188E.h b/drivers/staging/rtl8188eu/include/odm_RegConfig8188E.h
index 727e6b26fb08..f2bf7a0d9867 100644
--- a/drivers/staging/rtl8188eu/include/odm_RegConfig8188E.h
+++ b/drivers/staging/rtl8188eu/include/odm_RegConfig8188E.h
@@ -21,7 +21,7 @@
#define __INC_ODM_REGCONFIG_H_8188E
void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data,
- enum ODM_RF_RADIO_PATH RF_PATH, u32 RegAddr);
+ enum rf_radio_path RF_PATH, u32 RegAddr);
void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm,
u32 Addr, u32 Data);
diff --git a/drivers/staging/rtl8188eu/include/odm_debug.h b/drivers/staging/rtl8188eu/include/odm_debug.h
index 622f4c1418b4..e8c4cab2c354 100644
--- a/drivers/staging/rtl8188eu/include/odm_debug.h
+++ b/drivers/staging/rtl8188eu/include/odm_debug.h
@@ -94,18 +94,7 @@
#define ODM_RT_TRACE(pDM_Odm, comp, level, fmt) \
if (((comp) & pDM_Odm->DebugComponents) && \
(level <= pDM_Odm->DebugLevel)) { \
- if (pDM_Odm->SupportICType == ODM_RTL8192C) \
- DbgPrint("[ODM-92C] "); \
- else if (pDM_Odm->SupportICType == ODM_RTL8192D) \
- DbgPrint("[ODM-92D] "); \
- else if (pDM_Odm->SupportICType == ODM_RTL8723A) \
- DbgPrint("[ODM-8723A] "); \
- else if (pDM_Odm->SupportICType == ODM_RTL8188E) \
- DbgPrint("[ODM-8188E] "); \
- else if (pDM_Odm->SupportICType == ODM_RTL8812) \
- DbgPrint("[ODM-8812] "); \
- else if (pDM_Odm->SupportICType == ODM_RTL8821) \
- DbgPrint("[ODM-8821] "); \
+ DbgPrint("[ODM-8188E] "); \
RT_PRINTK fmt; \
}
@@ -136,7 +125,7 @@
DbgPrint(title_str); \
DbgPrint(" "); \
for (__i = 0; __i < 6; __i++) \
- DbgPrint("%02X%s", __ptr[__i], (__i==5)?"":"-");\
+ DbgPrint("%02X%s", __ptr[__i], (__i == 5)?"":"-");\
DbgPrint("\n"); \
}
diff --git a/drivers/staging/rtl8188eu/include/odm_interface.h b/drivers/staging/rtl8188eu/include/odm_interface.h
index e5c8704ac010..a50eae3ec68e 100644
--- a/drivers/staging/rtl8188eu/include/odm_interface.h
+++ b/drivers/staging/rtl8188eu/include/odm_interface.h
@@ -51,7 +51,7 @@ ODM_REG(DIG,_pDM_Odm)
#define _cat(_name, _ic_type, _func) \
( \
- ((_ic_type) & ODM_IC_11N_SERIES) ? _func##_11N(_name) : \
+ (_ic_type) ? _func##_11N(_name) : \
_func##_11AC(_name) \
)
@@ -64,7 +64,7 @@ ODM_REG(DIG,_pDM_Odm)
enum odm_h2c_cmd {
ODM_H2C_RSSI_REPORT = 0,
- ODM_H2C_PSD_RESULT= 1,
+ ODM_H2C_PSD_RESULT = 1,
ODM_H2C_PathDiv = 2,
ODM_MAX_H2CCMD
};
@@ -89,22 +89,6 @@ void ODM_Write2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u16 Data);
void ODM_Write4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 Data);
-void ODM_SetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr,
- u32 BitMask, u32 Data);
-
-u32 ODM_GetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask);
-
-void ODM_SetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr,
- u32 BitMask, u32 Data);
-
-u32 ODM_GetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask);
-
-void ODM_SetRFReg(struct odm_dm_struct *pDM_Odm, enum ODM_RF_RADIO_PATH eRFPath,
- u32 RegAddr, u32 BitMask, u32 Data);
-
-u32 ODM_GetRFReg(struct odm_dm_struct *pDM_Odm, enum ODM_RF_RADIO_PATH eRFPath,
- u32 RegAddr, u32 BitMask);
-
/* Memory Relative Function. */
void ODM_AllocateMemory(struct odm_dm_struct *pDM_Odm, void **pPtr, u32 length);
void ODM_FreeMemory(struct odm_dm_struct *pDM_Odm, void *pPtr, u32 length);
@@ -112,39 +96,7 @@ void ODM_FreeMemory(struct odm_dm_struct *pDM_Odm, void *pPtr, u32 length);
s32 ODM_CompareMemory(struct odm_dm_struct *pDM_Odm, void *pBuf1, void *pBuf2,
u32 length);
-/* ODM MISC-spin lock relative API. */
-void ODM_AcquireSpinLock(struct odm_dm_struct *pDM_Odm,
- enum RT_SPINLOCK_TYPE type);
-
-void ODM_ReleaseSpinLock(struct odm_dm_struct *pDM_Odm,
- enum RT_SPINLOCK_TYPE type);
-
-/* ODM MISC-workitem relative API. */
-void ODM_InitializeWorkItem(struct odm_dm_struct *pDM_Odm, void *pRtWorkItem,
- RT_WORKITEM_CALL_BACK RtWorkItemCallback,
- void *pContext, const char *szID);
-
-void ODM_StartWorkItem(void *pRtWorkItem);
-
-void ODM_StopWorkItem(void *pRtWorkItem);
-
-void ODM_FreeWorkItem(void *pRtWorkItem);
-
-void ODM_ScheduleWorkItem(void *pRtWorkItem);
-
-void ODM_IsWorkItemScheduled(void *pRtWorkItem);
-
/* ODM Timer relative API. */
-void ODM_StallExecution(u32 usDelay);
-
-void ODM_delay_ms(u32 ms);
-
-void ODM_delay_us(u32 us);
-
-void ODM_sleep_ms(u32 ms);
-
-void ODM_sleep_us(u32 us);
-
void ODM_SetTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer,
u32 msDelay);
@@ -154,8 +106,6 @@ void ODM_InitializeTimer(struct odm_dm_struct *pDM_Odm,
void ODM_CancelTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer);
-void ODM_ReleaseTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer);
-
/* ODM FW relative API. */
u32 ODM_FillH2CCmd(u8 *pH2CBuffer, u32 H2CBufferLen, u32 CmdNum,
u32 *pElementID, u32 *pCmdLen, u8 **pCmbBuffer,
diff --git a/drivers/staging/rtl8188eu/include/odm_precomp.h b/drivers/staging/rtl8188eu/include/odm_precomp.h
index d1d95f4b87a8..6e6a656b4154 100644
--- a/drivers/staging/rtl8188eu/include/odm_precomp.h
+++ b/drivers/staging/rtl8188eu/include/odm_precomp.h
@@ -64,7 +64,6 @@ void odm_DynamicTxPowerInit(struct odm_dm_struct *pDM_Odm);
void odm_TXPowerTrackingInit(struct odm_dm_struct *pDM_Odm);
void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm);
void odm_SwAntDivInit_NIC(struct odm_dm_struct *pDM_Odm);
-void odm_GlobalAdapterCheck(void);
void odm_CmnInfoUpdate_Debug(struct odm_dm_struct *pDM_Odm);
void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm);
void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm);
@@ -76,22 +75,16 @@ void odm_SwAntDivChkAntSwitch(struct odm_dm_struct *pDM_Odm, u8 Step);
void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm);
void odm_DynamicTxPower(struct odm_dm_struct *pDM_Odm);
void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm);
-void odm_SwAntDivInit(struct odm_dm_struct *pDM_Odm);
void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm);
void odm_RefreshRateAdaptiveMask(struct odm_dm_struct *pDM_Odm);
void odm_1R_CCA(struct odm_dm_struct *pDM_Odm);
void odm_RefreshRateAdaptiveMaskCE(struct odm_dm_struct *pDM_Odm);
void odm_RefreshRateAdaptiveMaskAPADSL(struct odm_dm_struct *pDM_Odm);
void odm_DynamicTxPowerNIC(struct odm_dm_struct *pDM_Odm);
-void odm_DynamicTxPowerAP(struct odm_dm_struct *pDM_Odm);
-void odm_RSSIMonitorCheckMP(struct odm_dm_struct *pDM_Odm);
void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm);
-void odm_RSSIMonitorCheckAP(struct odm_dm_struct *pDM_Odm);
void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm);
void odm_EdcaTurboCheckCE(struct odm_dm_struct *pDM_Odm);
void odm_TXPowerTrackingCheckCE(struct odm_dm_struct *pDM_Odm);
-void odm_TXPowerTrackingCheckMP(struct odm_dm_struct *pDM_Odm);
-void odm_TXPowerTrackingCheckAP(struct odm_dm_struct *pDM_Odm);
void odm_SwAntDivChkAntSwitchCallback(void *FunctionContext);
void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm);
void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm);
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index 36523edf6a71..7956f0cdb96b 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -24,13 +24,12 @@
#define _FAIL 0
#define _SUCCESS 1
-#define RTW_RX_HANDLED 2
+#define RTW_RX_HANDLED 2
#include <linux/spinlock.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
@@ -63,8 +62,6 @@ struct __queue {
spinlock_t lock;
};
-#define thread_exit() complete_and_exit(NULL, 0)
-
static inline struct list_head *get_next(struct list_head *list)
{
return list->next;
@@ -72,45 +69,15 @@ static inline struct list_head *get_next(struct list_head *list)
static inline struct list_head *get_list_head(struct __queue *queue)
{
- return (&(queue->queue));
+ return &(queue->queue);
}
#define LIST_CONTAINOR(ptr, type, member) \
- ((type *)((char *)(ptr)-(size_t)(&((type *)0)->member)))
-
-
-static inline void _enter_critical(spinlock_t *plock, unsigned long *pirqL)
-{
- spin_lock_irqsave(plock, *pirqL);
-}
-
-static inline void _exit_critical(spinlock_t *plock, unsigned long *pirqL)
-{
- spin_unlock_irqrestore(plock, *pirqL);
-}
-
-static inline void _enter_critical_ex(spinlock_t *plock, unsigned long *pirqL)
-{
- spin_lock_irqsave(plock, *pirqL);
-}
-
-static inline void _exit_critical_ex(spinlock_t *plock, unsigned long *pirqL)
-{
- spin_unlock_irqrestore(plock, *pirqL);
-}
+ ((type *)((char *)(ptr)-(size_t)(&((type *)0)->member)))
-static inline void _enter_critical_bh(spinlock_t *plock, unsigned long *pirqL)
-{
- spin_lock_bh(plock);
-}
-
-static inline void _exit_critical_bh(spinlock_t *plock, unsigned long *pirqL)
-{
- spin_unlock_bh(plock);
-}
-
-static inline int _enter_critical_mutex(struct mutex *pmutex, unsigned long *pirqL)
+static inline int _enter_critical_mutex(struct mutex *pmutex,
+ unsigned long *pirqL)
{
int ret;
@@ -119,7 +86,8 @@ static inline int _enter_critical_mutex(struct mutex *pmutex, unsigned long *pir
}
-static inline void _exit_critical_mutex(struct mutex *pmutex, unsigned long *pirqL)
+static inline void _exit_critical_mutex(struct mutex *pmutex,
+ unsigned long *pirqL)
{
mutex_unlock(pmutex);
}
@@ -129,29 +97,33 @@ static inline void rtw_list_delete(struct list_head *plist)
list_del_init(plist);
}
-static inline void _init_timer(struct timer_list *ptimer,struct net_device *nic_hdl,void *pfunc,void* cntx)
+static inline void _init_timer(struct timer_list *ptimer,
+ struct net_device *nic_hdl,
+ void *pfunc, void *cntx)
{
ptimer->function = pfunc;
ptimer->data = (unsigned long)cntx;
init_timer(ptimer);
}
-static inline void _set_timer(struct timer_list *ptimer,u32 delay_time)
+static inline void _set_timer(struct timer_list *ptimer, u32 delay_time)
{
mod_timer(ptimer , (jiffies+(delay_time*HZ/1000)));
}
-static inline void _cancel_timer(struct timer_list *ptimer,u8 *bcancelled)
+static inline void _cancel_timer(struct timer_list *ptimer, u8 *bcancelled)
{
del_timer_sync(ptimer);
- *bcancelled= true;/* true ==1; false==0 */
+ *bcancelled = true;/* true ==1; false==0 */
}
#define RTW_TIMER_HDL_ARGS void *FunctionContext
#define RTW_TIMER_HDL_NAME(name) rtw_##name##_timer_hdl
-#define RTW_DECLARE_TIMER_HDL(name) void RTW_TIMER_HDL_NAME(name)(RTW_TIMER_HDL_ARGS)
+#define RTW_DECLARE_TIMER_HDL(name) \
+ void RTW_TIMER_HDL_NAME(name)(RTW_TIMER_HDL_ARGS)
-static inline void _init_workitem(struct work_struct *pwork, void *pfunc, void * cntx)
+static inline void _init_workitem(struct work_struct *pwork, void *pfunc,
+ void *cntx)
{
INIT_WORK(pwork, pfunc);
}
@@ -165,23 +137,6 @@ static inline void _cancel_workitem_sync(struct work_struct *pwork)
{
cancel_work_sync(pwork);
}
-/* */
-/* Global Mutex: can only be used at PASSIVE level. */
-/* */
-
-#define ACQUIRE_GLOBAL_MUTEX(_MutexCounter) \
-{ \
- while (atomic_inc_return((atomic_t *)&(_MutexCounter)) != 1)\
- { \
- atomic_dec((atomic_t *)&(_MutexCounter)); \
- msleep(10); \
- } \
-}
-
-#define RELEASE_GLOBAL_MUTEX(_MutexCounter) \
-{ \
- atomic_dec((atomic_t *)&(_MutexCounter)); \
-}
static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
{
@@ -207,7 +162,7 @@ static inline void rtw_netif_stop_queue(struct net_device *pnetdev)
}
#ifndef BIT
- #define BIT(x) ( 1 << (x))
+ #define BIT(x) (1 << (x))
#endif
#define BIT0 0x00000001
@@ -301,20 +256,13 @@ void rtw_list_insert_head(struct list_head *plist, struct list_head *phead);
void rtw_list_insert_tail(struct list_head *plist, struct list_head *phead);
void rtw_list_delete(struct list_head *plist);
-void _rtw_init_sema(struct semaphore *sema, int init_val);
-void _rtw_free_sema(struct semaphore *sema);
-void _rtw_up_sema(struct semaphore *sema);
u32 _rtw_down_sema(struct semaphore *sema);
-void _rtw_mutex_init(struct mutex *pmutex);
-void _rtw_mutex_free(struct mutex *pmutex);
-void _rtw_spinlock_init(spinlock_t *plock);
-void _rtw_spinlock_free(spinlock_t *plock);
void _rtw_init_queue(struct __queue *pqueue);
u32 _rtw_queue_empty(struct __queue *pqueue);
-u32 rtw_end_of_queue_search(struct list_head *queue, struct list_head *pelement);
+u32 rtw_end_of_queue_search(struct list_head *queue,
+ struct list_head *pelement);
-u32 rtw_get_current_time(void);
u32 rtw_systime_to_ms(u32 systime);
u32 rtw_ms_to_systime(u32 ms);
s32 rtw_get_passing_time_ms(u32 start);
@@ -322,32 +270,21 @@ s32 rtw_get_time_interval_ms(u32 start, u32 end);
void rtw_sleep_schedulable(int ms);
-void rtw_msleep_os(int ms);
-void rtw_usleep_os(int us);
-
u32 rtw_atoi(u8 *s);
-void rtw_mdelay_os(int ms);
-void rtw_udelay_os(int us);
-
-void rtw_yield_os(void);
-
static inline unsigned char _cancel_timer_ex(struct timer_list *ptimer)
{
return del_timer_sync(ptimer);
}
-static __inline void thread_enter(char *name)
+static inline void thread_enter(char *name)
{
-#ifdef daemonize
- daemonize("%s", name);
-#endif
allow_signal(SIGTERM);
}
static inline void flush_signals_thread(void)
{
- if (signal_pending (current))
+ if (signal_pending(current))
flush_signals(current);
}
@@ -357,13 +294,13 @@ static inline int res_to_status(int res)
}
#define _RND(sz, r) ((((sz)+((r)-1))/(r))*(r))
-#define RND4(x) (((x >> 2) + (((x & 3) == 0) ? 0: 1)) << 2)
+#define RND4(x) (((x >> 2) + (((x & 3) == 0) ? 0 : 1)) << 2)
static inline u32 _RND4(u32 sz)
{
u32 val;
- val = ((sz >> 2) + ((sz & 3) ? 1: 0)) << 2;
+ val = ((sz >> 2) + ((sz & 3) ? 1 : 0)) << 2;
return val;
}
@@ -371,7 +308,7 @@ static inline u32 _RND8(u32 sz)
{
u32 val;
- val = ((sz >> 3) + ((sz & 7) ? 1: 0)) << 3;
+ val = ((sz >> 3) + ((sz & 7) ? 1 : 0)) << 3;
return val;
}
@@ -379,7 +316,7 @@ static inline u32 _RND128(u32 sz)
{
u32 val;
- val = ((sz >> 7) + ((sz & 127) ? 1: 0)) << 7;
+ val = ((sz >> 7) + ((sz & 127) ? 1 : 0)) << 7;
return val;
}
@@ -387,7 +324,7 @@ static inline u32 _RND256(u32 sz)
{
u32 val;
- val = ((sz >> 8) + ((sz & 255) ? 1: 0)) << 8;
+ val = ((sz >> 8) + ((sz & 255) ? 1 : 0)) << 8;
return val;
}
@@ -395,7 +332,7 @@ static inline u32 _RND512(u32 sz)
{
u32 val;
- val = ((sz >> 9) + ((sz & 511) ? 1: 0)) << 9;
+ val = ((sz >> 9) + ((sz & 511) ? 1 : 0)) << 9;
return val;
}
@@ -404,32 +341,14 @@ static inline u32 bitshift(u32 bitmask)
u32 i;
for (i = 0; i <= 31; i++)
- if (((bitmask>>i) & 0x1) == 1) break;
+ if (((bitmask>>i) & 0x1) == 1)
+ break;
return i;
}
/* limitation of path length */
#define PATH_LENGTH_MAX PATH_MAX
-void rtw_suspend_lock_init(void);
-void rtw_suspend_lock_uninit(void);
-void rtw_lock_suspend(void);
-void rtw_unlock_suspend(void);
-
-/* Atomic integer operations */
-#define ATOMIC_T atomic_t
-
-void ATOMIC_SET(ATOMIC_T *v, int i);
-int ATOMIC_READ(ATOMIC_T *v);
-void ATOMIC_ADD(ATOMIC_T *v, int i);
-void ATOMIC_SUB(ATOMIC_T *v, int i);
-void ATOMIC_INC(ATOMIC_T *v);
-void ATOMIC_DEC(ATOMIC_T *v);
-int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i);
-int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i);
-int ATOMIC_INC_RETURN(ATOMIC_T *v);
-int ATOMIC_DEC_RETURN(ATOMIC_T *v);
-
struct rtw_netdev_priv_indicator {
void *priv;
u32 sizeof_priv;
@@ -451,7 +370,7 @@ void rtw_free_netdev(struct net_device *netdev);
#define FUNC_ADPT_FMT "%s(%s)"
#define FUNC_ADPT_ARG(adapter) __func__, adapter->pnetdev->name
-#define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)),(sig), 1)
+#define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)), (sig), 1)
u64 rtw_modular64(u64 x, u64 y);
u64 rtw_division64(u64 x, u64 y);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 555c801d2ded..161f1e5af9e6 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -456,9 +456,9 @@ void rtl8188e_EfuseParseChnlPlan(struct adapter *padapter, u8 *hwinfo,
bool AutoLoadFail);
void Hal_EfuseParseCustomerID88E(struct adapter *padapter, u8 *hwinfo,
bool AutoLoadFail);
-void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter,u8 *PROMContent,
+void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent,
bool AutoLoadFail);
-void Hal_ReadThermalMeter_88E(struct adapter * dapter, u8 *PROMContent,
+void Hal_ReadThermalMeter_88E(struct adapter *dapter, u8 *PROMContent,
bool AutoloadFail);
void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo,
bool AutoLoadFail);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
index 02ccb404f53d..a8facf00eac0 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -58,11 +58,11 @@ enum rx_packet_type {
#define INTERRUPT_MSG_FORMAT_LEN 60
void rtl8188eu_init_recvbuf(struct adapter *padapter, struct recv_buf *buf);
s32 rtl8188eu_init_recv_priv(struct adapter *padapter);
-void rtl8188eu_free_recv_priv(struct adapter * padapter);
-void rtl8188eu_recv_hdl(struct adapter * padapter, struct recv_buf *precvbuf);
+void rtl8188eu_free_recv_priv(struct adapter *padapter);
+void rtl8188eu_recv_hdl(struct adapter *padapter, struct recv_buf *precvbuf);
void rtl8188eu_recv_tasklet(void *priv);
void rtl8188e_query_rx_phy_status(union recv_frame *fr, struct phy_stat *phy);
-void rtl8188e_process_phy_info(struct adapter * padapter, void *prframe);
+void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe);
void update_recvframe_phyinfo_88e(union recv_frame *fra, struct phy_stat *phy);
void update_recvframe_attrib_88e(union recv_frame *fra, struct recv_stat *stat);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
index c12c56b97343..2c33eb30d31b 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
@@ -68,7 +68,7 @@
#define DISABLE_TRXPKT_BUF_ACCESS 0x0
-/* 0x0000h ~ 0x00FFh System Configuration */
+/* 0x0000h ~ 0x00FFh System Configuration */
#define REG_SYS_ISO_CTRL 0x0000
#define REG_SYS_FUNC_EN 0x0002
#define REG_APS_FSMCO 0x0004
@@ -142,7 +142,7 @@
#define REG_MAC_PHY_CTRL_NORMAL 0x00f8
-/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
+/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
#define REG_CR 0x0100
#define REG_PBP 0x0104
#define REG_PKT_BUFF_ACCESS_CTRL 0x0106
@@ -188,7 +188,7 @@
#define REG_LLT_INIT 0x01E0
-/* 0x0200h ~ 0x027Fh TXDMA Configuration */
+/* 0x0200h ~ 0x027Fh TXDMA Configuration */
#define REG_RQPN 0x0200
#define REG_FIFOPAGE 0x0204
#define REG_TDECTRL 0x0208
@@ -196,12 +196,12 @@
#define REG_TXDMA_STATUS 0x0210
#define REG_RQPN_NPQ 0x0214
-/* 0x0280h ~ 0x02FFh RXDMA Configuration */
+/* 0x0280h ~ 0x02FFh RXDMA Configuration */
#define REG_RXDMA_AGG_PG_TH 0x0280
#define REG_RXPKT_NUM 0x0284
#define REG_RXDMA_STATUS 0x0288
-/* 0x0300h ~ 0x03FFh PCIe */
+/* 0x0300h ~ 0x03FFh PCIe */
#define REG_PCIE_CTRL_REG 0x0300
#define REG_INT_MIG 0x0304 /* Interrupt Migration */
#define REG_BCNQ_DESA 0x0308 /* TX Beacon Descr Address */
@@ -222,7 +222,7 @@
#define REG_PCIE_HISR 0x03A0
/* spec version 11 */
-/* 0x0400h ~ 0x047Fh Protocol Configuration */
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
#define REG_VOQ_INFORMATION 0x0400
#define REG_VIQ_INFORMATION 0x0404
#define REG_BEQ_INFORMATION 0x0408
@@ -276,7 +276,7 @@
#define REG_TX_RPT_TIME 0x04F0 /* 2 byte */
#define REG_DUMMY 0x04FC
-/* 0x0500h ~ 0x05FFh EDCA Configuration */
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
#define REG_EDCA_VO_PARAM 0x0500
#define REG_EDCA_VI_PARAM 0x0504
#define REG_EDCA_BE_PARAM 0x0508
@@ -294,16 +294,16 @@
#define REG_DIS_TXREQ_CLR 0x0523
#define REG_RD_CTRL 0x0524
/* Format for offset 540h-542h: */
-/* [3:0]: TBTT prohibit setup in unit of 32us. The time for HW getting
+/* [3:0]: TBTT prohibit setup in unit of 32us. The time for HW getting
* beacon content before TBTT. */
-/* [7:4]: Reserved. */
-/* [19:8]: TBTT prohibit hold in unit of 32us. The time for HW holding
+/* [7:4]: Reserved. */
+/* [19:8]: TBTT prohibit hold in unit of 32us. The time for HW holding
* to send the beacon packet. */
-/* [23:20]: Reserved */
+/* [23:20]: Reserved */
/* Description: */
-/* | */
+/* | */
/* |<--Setup--|--Hold------------>| */
-/* --------------|---------------------- */
+/* --------------|---------------------- */
/* | */
/* TBTT */
/* Note: We cannot update beacon content to HW or send any AC packets during
@@ -335,7 +335,7 @@
#define REG_FW_RESET_TSF_CNT_0 0x05FD
#define REG_FW_BCN_DIS_CNT 0x05FE
-/* 0x0600h ~ 0x07FFh WMAC Configuration */
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
#define REG_APSD_CTRL 0x0600
#define REG_BWOPMODE 0x0603
#define REG_TCR 0x0604
@@ -382,7 +382,7 @@
#define _RXERR_RPT_SEL(type) ((type) << 28)
/* Note: */
-/* The NAV upper value is very important to WiFi 11n 5.2.3 NAV test.
+/* The NAV upper value is very important to WiFi 11n 5.2.3 NAV test.
* The default value is always too small, but the WiFi TestPlan test
* by 25,000 microseconds of NAV through sending CTS in the air.
* We must update this value greater than 25,000 microseconds to pass
@@ -422,7 +422,7 @@
#define REG_MACID1 0x0700
#define REG_BSSID1 0x0708
-/* 0xFE00h ~ 0xFE55h USB Configuration */
+/* 0xFE00h ~ 0xFE55h USB Configuration */
#define REG_USB_INFO 0xFE17
#define REG_USB_SPECIAL_OPTION 0xFE55
#define REG_USB_DMA_AGG_TO 0xFE5B
@@ -689,13 +689,13 @@ Current IOREG MAP
0x0600h ~ 0x07FFh WMAC Configuration (512 Bytes)
0x2000h ~ 0x3FFFh 8051 FW Download Region (8196 Bytes)
*/
-/* 8192C (TXPAUSE) transmission pause (Offset 0x522, 8 bits) */
+/* 8192C (TXPAUSE) transmission pause (Offset 0x522, 8 bits) */
/* Note: */
-/* The bits of stopping AC(VO/VI/BE/BK) queue in datasheet
+/* The bits of stopping AC(VO/VI/BE/BK) queue in datasheet
* RTL8192S/RTL8192C are wrong, */
-/* the correct arragement is VO - Bit0, VI - Bit1, BE - Bit2,
+/* the correct arragement is VO - Bit0, VI - Bit1, BE - Bit2,
* and BK - Bit3. */
-/* 8723 and 88E may be not correct either in the earlier version. */
+/* 8723 and 88E may be not correct either in the earlier version. */
#define StopBecon BIT6
#define StopHigh BIT5
#define StopMgt BIT4
@@ -733,7 +733,7 @@ Current IOREG MAP
#define RCR_MXDMA_OFFSET 8
#define RCR_FIFO_OFFSET 13
-/* 0xFE00h ~ 0xFE55h USB Configuration */
+/* 0xFE00h ~ 0xFE55h USB Configuration */
#define REG_USB_INFO 0xFE17
#define REG_USB_SPECIAL_OPTION 0xFE55
#define REG_USB_DMA_AGG_TO 0xFE5B
@@ -743,7 +743,7 @@ Current IOREG MAP
#define REG_USB_HRPWM 0xFE58
#define REG_USB_HCPWM 0xFE57
/* 8192C Regsiter Bit and Content definition */
-/* 0x0000h ~ 0x00FFh System Configuration */
+/* 0x0000h ~ 0x00FFh System Configuration */
/* 2 SYS_ISO_CTRL */
#define ISO_MD2PP BIT(0)
@@ -914,7 +914,7 @@ Current IOREG MAP
/* 2SYS_CFG */
#define RTL_ID BIT(23) /* TestChip ID, 1:Test(RLE); 0:MP(RL) */
-/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
+/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
/* 2 Function Enable Registers */
/* 2 CR */
@@ -975,9 +975,9 @@ Current IOREG MAP
#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14)
#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12)
#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10)
-#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8 )
-#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6 )
-#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4 )
+#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8)
+#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6)
+#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4)
#define QUEUE_LOW 1
#define QUEUE_NORMAL 2
@@ -995,7 +995,7 @@ Current IOREG MAP
#define _LLT_OP(x) (((x) & 0x3) << 30)
#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
-/* 0x0200h ~ 0x027Fh TXDMA Configuration */
+/* 0x0200h ~ 0x027Fh TXDMA Configuration */
/* 2RQPN */
#define _HPQ(x) ((x) & 0xFF)
#define _LPQ(x) (((x) & 0xFF) << 8)
@@ -1019,7 +1019,7 @@ Current IOREG MAP
/* 2 TXDMA_OFFSET_CHK */
#define DROP_DATA_EN BIT(9)
-/* 0x0280h ~ 0x028Bh RX DMA Configuration */
+/* 0x0280h ~ 0x028Bh RX DMA Configuration */
/* REG_RXDMA_CONTROL, 0x0286h */
@@ -1028,7 +1028,7 @@ Current IOREG MAP
#define RXDMA_IDLE BIT(17)
#define RW_RELEASE_EN BIT(18)
-/* 0x0400h ~ 0x047Fh Protocol Configuration */
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
/* 2 FWHW_TXQ_CTRL */
#define EN_AMPDU_RTY_NEW BIT(7)
@@ -1040,7 +1040,7 @@ Current IOREG MAP
#define RETRY_LIMIT_SHORT_SHIFT 8
#define RETRY_LIMIT_LONG_SHIFT 0
-/* 0x0500h ~ 0x05FFh EDCA Configuration */
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
/* 2 EDCA setting */
#define AC_PARAM_TXOP_LIMIT_OFFSET 16
@@ -1071,7 +1071,7 @@ Current IOREG MAP
#define AcmHw_ViqStatus BIT(5)
#define AcmHw_VoqStatus BIT(6)
-/* 0x0600h ~ 0x07FFh WMAC Configuration */
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
/* 2APSD_CTRL */
#define APSDOFF BIT(6)
#define APSDOFF_STATUS BIT(7)
@@ -1128,7 +1128,7 @@ Current IOREG MAP
#define SCR_TXBCUSEDK BIT(6) /* Force Tx Bcast pkt Use Default Key */
#define SCR_RXBCUSEDK BIT(7) /* Force Rx Bcast pkt Use Default Key */
-/* RTL8188E SDIO Configuration */
+/* RTL8188E SDIO Configuration */
/* I/O bus domain address mapping */
#define SDIO_LOCAL_BASE 0x10250000
@@ -1264,7 +1264,7 @@ Current IOREG MAP
#define SDIO_TX_FREE_PG_QUEUE 4
#define SDIO_TX_FIFO_PAGE_SZ 128
-/* 0xFE00h ~ 0xFE55h USB Configuration */
+/* 0xFE00h ~ 0xFE55h USB Configuration */
/* 2 USB Information (0xFE17) */
#define USB_IS_HIGH_SPEED 0
@@ -1331,7 +1331,7 @@ Current IOREG MAP
/* 8192C EEPROM/EFUSE share register definition. */
-/* EEPROM/Efuse PG Offset for 88EE/88EU/88ES */
+/* EEPROM/Efuse PG Offset for 88EE/88EU/88ES */
#define EEPROM_TX_PWR_INX_88E 0x10
#define EEPROM_ChannelPlan_88E 0xB8
@@ -1362,7 +1362,7 @@ Current IOREG MAP
/* RTL88ES */
#define EEPROM_MAC_ADDR_88ES 0x11A
-/* EEPROM/Efuse Value Type */
+/* EEPROM/Efuse Value Type */
#define EETYPE_TX_PWR 0x0
/* Default Value for EEPROM or EFUSE!!! */
diff --git a/drivers/staging/rtl8188eu/include/rtw_cmd.h b/drivers/staging/rtl8188eu/include/rtw_cmd.h
index 8cafd7adfdcd..3d347029ff7a 100644
--- a/drivers/staging/rtl8188eu/include/rtw_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtw_cmd.h
@@ -69,7 +69,7 @@ struct evt_priv {
bool c2h_wk_alive;
struct rtw_cbuf *c2h_queue;
#define C2H_QUEUE_MAX_LEN 10
- ATOMIC_T event_seq;
+ atomic_t event_seq;
u8 *evt_buf; /* shall be non-paged, and 4 bytes aligned */
u8 *evt_allocated_buf;
u32 evt_done_cnt;
@@ -478,8 +478,7 @@ struct getrfintfs_parm {
u8 rfintfs;
};
-struct Tx_Beacon_param
-{
+struct Tx_Beacon_param {
struct wlan_bssid_ex network;
};
@@ -625,14 +624,14 @@ struct setratable_parm {
};
struct getratable_parm {
- uint rsvd;
+ uint rsvd;
};
struct getratable_rsp {
- u8 ss_ForceUp[NumRates];
- u8 ss_ULevel[NumRates];
- u8 ss_DLevel[NumRates];
- u8 count_judge[NumRates];
+ u8 ss_ForceUp[NumRates];
+ u8 ss_ULevel[NumRates];
+ u8 ss_DLevel[NumRates];
+ u8 count_judge[NumRates];
};
/* to get TX,RX retry count */
@@ -715,26 +714,22 @@ struct set_ch_parm {
};
/*H2C Handler index: 59 */
-struct SetChannelPlan_param
-{
+struct SetChannelPlan_param {
u8 channel_plan;
};
/*H2C Handler index: 60 */
-struct LedBlink_param
-{
+struct LedBlink_param {
struct LED_871x *pLed;
};
/*H2C Handler index: 61 */
-struct SetChannelSwitch_param
-{
+struct SetChannelSwitch_param {
u8 new_ch_no;
};
/*H2C Handler index: 62 */
-struct TDLSoption_param
-{
+struct TDLSoption_param {
u8 addr[ETH_ALEN];
u8 option;
};
@@ -763,52 +758,57 @@ Result:
#define H2C_CMD_OVERFLOW 0x06
#define H2C_RESERVED 0x07
-u8 rtw_setassocsta_cmd(struct adapter *padapter, u8 *mac_addr);
+u8 rtw_setassocsta_cmd(struct adapter *padapter, u8 *mac_addr);
u8 rtw_setstandby_cmd(struct adapter *padapter, uint action);
-u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
+u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
int ssid_num, struct rtw_ieee80211_channel *ch,
int ch_num);
-u8 rtw_createbss_cmd(struct adapter *padapter);
-u8 rtw_createbss_cmd_ex(struct adapter *padapter, unsigned char *pbss,
- unsigned int sz);
-u8 rtw_setphy_cmd(struct adapter *padapter, u8 modem, u8 ch);
+u8 rtw_createbss_cmd(struct adapter *padapter);
+u8 rtw_createbss_cmd_ex(struct adapter *padapter, unsigned char *pbss,
+ unsigned int sz);
+u8 rtw_setphy_cmd(struct adapter *padapter, u8 modem, u8 ch);
u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key);
-u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue);
-u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network* pnetwork);
-u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueue);
-u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infra networktype);
-u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset);
-u8 rtw_setbasicrate_cmd(struct adapter *padapter, u8 *rateset);
-u8 rtw_setbbreg_cmd(struct adapter * padapter, u8 offset, u8 val);
-u8 rtw_setrfreg_cmd(struct adapter * padapter, u8 offset, u32 val);
-u8 rtw_getbbreg_cmd(struct adapter * padapter, u8 offset, u8 * pval);
-u8 rtw_getrfreg_cmd(struct adapter * padapter, u8 offset, u8 * pval);
-u8 rtw_setrfintfs_cmd(struct adapter *padapter, u8 mode);
-u8 rtw_setrttbl_cmd(struct adapter *padapter, struct setratable_parm *prate_table);
-u8 rtw_getrttbl_cmd(struct adapter *padapter, struct getratable_rsp *pval);
-
-u8 rtw_gettssi_cmd(struct adapter *padapter, u8 offset,u8 *pval);
-u8 rtw_setfwdig_cmd(struct adapter*padapter, u8 type);
-u8 rtw_setfwra_cmd(struct adapter*padapter, u8 type);
-
-u8 rtw_addbareq_cmd(struct adapter*padapter, u8 tid, u8 *addr);
+u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry,
+ u8 enqueue);
+u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork);
+u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms,
+ bool enqueue);
+u8 rtw_setopmode_cmd(struct adapter *padapter,
+ enum ndis_802_11_network_infra networktype);
+u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset);
+u8 rtw_setbasicrate_cmd(struct adapter *padapter, u8 *rateset);
+u8 rtw_setbbreg_cmd(struct adapter *padapter, u8 offset, u8 val);
+u8 rtw_setrfreg_cmd(struct adapter *padapter, u8 offset, u32 val);
+u8 rtw_getbbreg_cmd(struct adapter *padapter, u8 offset, u8 *pval);
+u8 rtw_getrfreg_cmd(struct adapter *padapter, u8 offset, u8 *pval);
+u8 rtw_setrfintfs_cmd(struct adapter *padapter, u8 mode);
+u8 rtw_setrttbl_cmd(struct adapter *padapter,
+ struct setratable_parm *prate_table);
+u8 rtw_getrttbl_cmd(struct adapter *padapter, struct getratable_rsp *pval);
+
+u8 rtw_gettssi_cmd(struct adapter *padapter, u8 offset, u8 *pval);
+u8 rtw_setfwdig_cmd(struct adapter *padapter, u8 type);
+u8 rtw_setfwra_cmd(struct adapter *padapter, u8 type);
+
+u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr);
u8 rtw_dynamic_chk_wk_cmd(struct adapter *adapter);
-u8 rtw_lps_ctrl_wk_cmd(struct adapter*padapter, u8 lps_ctrl_type, u8 enqueue);
-u8 rtw_rpt_timer_cfg_cmd(struct adapter*padapter, u16 minRptTime);
+u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue);
+u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 minRptTime);
- u8 rtw_antenna_select_cmd(struct adapter*padapter, u8 antenna,u8 enqueue);
-u8 rtw_ps_cmd(struct adapter*padapter);
+u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue);
+u8 rtw_ps_cmd(struct adapter *padapter);
#ifdef CONFIG_88EU_AP_MODE
-u8 rtw_chk_hi_queue_cmd(struct adapter*padapter);
+u8 rtw_chk_hi_queue_cmd(struct adapter *padapter);
#endif
-u8 rtw_set_ch_cmd(struct adapter*padapter, u8 ch, u8 bw, u8 ch_offset, u8 enqueue);
-u8 rtw_set_chplan_cmd(struct adapter*padapter, u8 chplan, u8 enqueue);
-u8 rtw_led_blink_cmd(struct adapter*padapter, struct LED_871x * pLed);
-u8 rtw_set_csa_cmd(struct adapter*padapter, u8 new_ch_no);
+u8 rtw_set_ch_cmd(struct adapter *padapter, u8 ch, u8 bw, u8 ch_offset,
+ u8 enqueue);
+u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue);
+u8 rtw_led_blink_cmd(struct adapter *padapter, struct LED_871x *pLed);
+u8 rtw_set_csa_cmd(struct adapter *padapter, u8 new_ch_no);
u8 rtw_tdls_cmd(struct adapter *padapter, u8 *addr, u8 option);
u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt);
@@ -820,7 +820,7 @@ void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
void rtw_createbss_cmd_callback(struct adapter *adapt, struct cmd_obj *pcmd);
void rtw_getbbrfreg_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
-void rtw_readtssi_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
+void rtw_readtssi_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
void rtw_setstaKey_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
void rtw_setassocsta_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cm);
@@ -913,8 +913,7 @@ enum rtw_h2c_cmd {
#define _SetRFReg_CMD_ _Write_RFREG_CMD_
#ifdef _RTW_CMD_C_
-static struct _cmd_callback rtw_cmd_callback[] =
-{
+static struct _cmd_callback rtw_cmd_callback[] = {
{GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
{GEN_CMD_CODE(_Write_MACREG), NULL},
{GEN_CMD_CODE(_Read_BBREG), &rtw_getbbrfreg_cmdrsp_callback},
diff --git a/drivers/staging/rtl8188eu/include/rtw_eeprom.h b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
index b2672c3febd1..904fea1fad6c 100644
--- a/drivers/staging/rtl8188eu/include/rtw_eeprom.h
+++ b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
@@ -108,7 +108,7 @@ enum RT_CUSTOMER_ID {
RT_CID_CC_C = 38,
RT_CID_819x_Xavi = 39,
RT_CID_819x_FUNAI_TV = 40,
- RT_CID_819x_ALPHA_WD=41,
+ RT_CID_819x_ALPHA_WD = 41,
};
struct eeprom_priv {
diff --git a/drivers/staging/rtl8188eu/include/rtw_efuse.h b/drivers/staging/rtl8188eu/include/rtw_efuse.h
index cee6b5e8b070..df51355e0f32 100644
--- a/drivers/staging/rtl8188eu/include/rtw_efuse.h
+++ b/drivers/staging/rtl8188eu/include/rtw_efuse.h
@@ -135,7 +135,7 @@ void EFUSE_GetEfuseDefinition(struct adapter *adapt, u8 type, u8 type1,
u8 efuse_OneByteRead(struct adapter *adapter, u16 addr, u8 *data, bool test);
u8 efuse_OneByteWrite(struct adapter *adapter, u16 addr, u8 data, bool test);
-void Efuse_PowerSwitch(struct adapter *adapt,u8 bWrite,u8 PwrState);
+void Efuse_PowerSwitch(struct adapter *adapt, u8 bWrite, u8 PwrState);
int Efuse_PgPacketRead(struct adapter *adapt, u8 offset, u8 *data, bool test);
int Efuse_PgPacketWrite(struct adapter *adapter, u8 offset, u8 word, u8 *data,
bool test);
diff --git a/drivers/staging/rtl8188eu/include/rtw_io.h b/drivers/staging/rtl8188eu/include/rtw_io.h
index eb6f0e550acf..3d1dfcc1b603 100644
--- a/drivers/staging/rtl8188eu/include/rtw_io.h
+++ b/drivers/staging/rtl8188eu/include/rtw_io.h
@@ -123,7 +123,7 @@ struct _io_ops {
u8 *pmem);
u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
u8 *pmem);
- u32 (*_write_scsi)(struct intf_hdl *pintfhdl,u32 cnt, u8 *pmem);
+ u32 (*_write_scsi)(struct intf_hdl *pintfhdl, u32 cnt, u8 *pmem);
void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
};
@@ -213,7 +213,7 @@ struct reg_protocol_wt {
u32 Value;
#else
/* DW1 */
- u32 Reserved1 :4;
+ u32 Reserved1:4;
u32 NumOfTrans:4;
u32 Reserved2:24;
/* DW2 */
@@ -254,7 +254,7 @@ struct io_priv {
};
uint ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
-void sync_ioreq_enqueue(struct io_req *preq,struct io_queue *ioqueue);
+void sync_ioreq_enqueue(struct io_req *preq, struct io_queue *ioqueue);
uint sync_ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
uint free_ioreq(struct io_req *preq, struct io_queue *pio_queue);
struct io_req *alloc_ioreq(struct io_queue *pio_q);
@@ -368,20 +368,20 @@ void free_io_queue(struct adapter *adapter);
void async_bus_io(struct io_queue *pio_q);
void bus_sync_io(struct io_queue *pio_q);
u32 _ioreq2rwmem(struct io_queue *pio_q);
-void dev_power_down(struct adapter * Adapter, u8 bpwrup);
-
-#define PlatformEFIOWrite1Byte(_a,_b,_c) \
- rtw_write8(_a,_b,_c)
-#define PlatformEFIOWrite2Byte(_a,_b,_c) \
- rtw_write16(_a,_b,_c)
-#define PlatformEFIOWrite4Byte(_a,_b,_c) \
- rtw_write32(_a,_b,_c)
-
-#define PlatformEFIORead1Byte(_a,_b) \
- rtw_read8(_a,_b)
-#define PlatformEFIORead2Byte(_a,_b) \
- rtw_read16(_a,_b)
-#define PlatformEFIORead4Byte(_a,_b) \
- rtw_read32(_a,_b)
+void dev_power_down(struct adapter *Adapter, u8 bpwrup);
+
+#define PlatformEFIOWrite1Byte(_a, _b, _c) \
+ rtw_write8(_a, _b, _c)
+#define PlatformEFIOWrite2Byte(_a, _b, _c) \
+ rtw_write16(_a, _b, _c)
+#define PlatformEFIOWrite4Byte(_a, _b, _c) \
+ rtw_write32(_a, _b, _c)
+
+#define PlatformEFIORead1Byte(_a, _b) \
+ rtw_read8(_a, _b)
+#define PlatformEFIORead2Byte(_a, _b) \
+ rtw_read16(_a, _b)
+#define PlatformEFIORead4Byte(_a, _b) \
+ rtw_read32(_a, _b)
#endif /* _RTL8711_IO_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h b/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
index 49efb23747de..187fe1f32478 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
@@ -28,10 +28,10 @@ typedef u8 NDIS_802_11_PMKID_VALUE[16];
u8 rtw_set_802_11_add_key(struct adapter *adapt, struct ndis_802_11_key *key);
u8 rtw_set_802_11_authentication_mode(struct adapter *adapt,
enum ndis_802_11_auth_mode authmode);
-u8 rtw_set_802_11_bssid(struct adapter*adapter, u8 *bssid);
+u8 rtw_set_802_11_bssid(struct adapter *adapter, u8 *bssid);
u8 rtw_set_802_11_add_wep(struct adapter *adapter, struct ndis_802_11_wep *wep);
u8 rtw_set_802_11_disassociate(struct adapter *adapter);
-u8 rtw_set_802_11_bssid_list_scan(struct adapter*adapter,
+u8 rtw_set_802_11_bssid_list_scan(struct adapter *adapter,
struct ndis_802_11_ssid *pssid,
int ssid_max_num);
u8 rtw_set_802_11_infrastructure_mode(struct adapter *adapter,
diff --git a/drivers/staging/rtl8188eu/include/rtw_iol.h b/drivers/staging/rtl8188eu/include/rtw_iol.h
index 6949922baa65..ec0c6cb12057 100644
--- a/drivers/staging/rtl8188eu/include/rtw_iol.h
+++ b/drivers/staging/rtl8188eu/include/rtw_iol.h
@@ -70,15 +70,15 @@ int _rtw_IOL_append_WD_cmd(struct xmit_frame *xmit_frame, u16 addr,
int _rtw_IOL_append_WRF_cmd(struct xmit_frame *xmit_frame, u8 rf_path,
u16 addr, u32 value, u32 mask);
#define rtw_IOL_append_WB_cmd(xmit_frame, addr, value, mask) \
- _rtw_IOL_append_WB_cmd((xmit_frame), (addr), (value) ,(mask))
+ _rtw_IOL_append_WB_cmd((xmit_frame), (addr), (value) , (mask))
#define rtw_IOL_append_WW_cmd(xmit_frame, addr, value, mask) \
- _rtw_IOL_append_WW_cmd((xmit_frame), (addr), (value),(mask))
+ _rtw_IOL_append_WW_cmd((xmit_frame), (addr), (value), (mask))
#define rtw_IOL_append_WD_cmd(xmit_frame, addr, value, mask) \
_rtw_IOL_append_WD_cmd((xmit_frame), (addr), (value), (mask))
#define rtw_IOL_append_WRF_cmd(xmit_frame, rf_path, addr, value, mask) \
- _rtw_IOL_append_WRF_cmd((xmit_frame),(rf_path), (addr), (value), (mask))
+ _rtw_IOL_append_WRF_cmd((xmit_frame), (rf_path), (addr), (value), (mask))
u8 rtw_IOL_cmd_boundary_handle(struct xmit_frame *pxmit_frame);
-void rtw_IOL_cmd_buf_dump(struct adapter *Adapter,int buf_len,u8 *pbuf);
+void rtw_IOL_cmd_buf_dump(struct adapter *Adapter, int buf_len, u8 *pbuf);
#endif /* __RTW_IOL_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_led.h b/drivers/staging/rtl8188eu/include/rtw_led.h
index d0da4fd40d18..0da4e27a70fa 100644
--- a/drivers/staging/rtl8188eu/include/rtw_led.h
+++ b/drivers/staging/rtl8188eu/include/rtw_led.h
@@ -147,7 +147,7 @@ struct LED_871x {
enum LED_STRATEGY_871x {
SW_LED_MODE0 = 0, /* SW control 1 LED via GPIO0. It is default option.*/
- SW_LED_MODE1= 1, /* 2 LEDs, through LED0 and LED1. For ALPHA. */
+ SW_LED_MODE1 = 1, /* 2 LEDs, through LED0 and LED1. For ALPHA. */
SW_LED_MODE2 = 2, /* SW control 1 LED via GPIO0, customized for AzWave
* 8187 minicard. */
SW_LED_MODE3 = 3, /* SW control 1 LED via GPIO0, customized for Sercomm
@@ -182,7 +182,7 @@ struct led_priv{
void BlinkTimerCallback(void *data);
void BlinkWorkItemCallback(struct work_struct *work);
-void ResetLedStatus(struct LED_871x * pLed);
+void ResetLedStatus(struct LED_871x *pLed);
void InitLed871x(struct adapter *padapter, struct LED_871x *pLed,
enum LED_PIN_871x LedPin);
@@ -190,7 +190,7 @@ void InitLed871x(struct adapter *padapter, struct LED_871x *pLed,
void DeInitLed871x(struct LED_871x *pLed);
/* hal... */
-void BlinkHandler(struct LED_871x * pLed);
+void BlinkHandler(struct LED_871x *pLed);
void SwLedOn(struct adapter *padapter, struct LED_871x *pLed);
void SwLedOff(struct adapter *padapter, struct LED_871x *pLed);
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 4a7143e0eed0..6cd988f867da 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -129,17 +129,17 @@ struct rt_link_detect {
struct profile_info {
u8 ssidlen;
- u8 ssid[ WLAN_SSID_MAXLEN ];
- u8 peermac[ ETH_ALEN ];
+ u8 ssid[WLAN_SSID_MAXLEN];
+ u8 peermac[ETH_ALEN];
};
struct tx_invite_req_info {
u8 token;
u8 benable;
- u8 go_ssid[ WLAN_SSID_MAXLEN ];
+ u8 go_ssid[WLAN_SSID_MAXLEN];
u8 ssidlen;
- u8 go_bssid[ ETH_ALEN ];
- u8 peer_macaddr[ ETH_ALEN ];
+ u8 go_bssid[ETH_ALEN];
+ u8 peer_macaddr[ETH_ALEN];
u8 operating_ch; /* This information will be set by using the
* p2p_set op_ch=x */
u8 peer_ch; /* The listen channel for peer P2P device */
@@ -182,9 +182,9 @@ struct tx_nego_req_info {
};
struct group_id_info {
- u8 go_device_addr[ ETH_ALEN ]; /* The GO's device address of
+ u8 go_device_addr[ETH_ALEN]; /* The GO's device address of
* this P2P group */
- u8 ssid[ WLAN_SSID_MAXLEN ]; /* The SSID of this P2P group */
+ u8 ssid[WLAN_SSID_MAXLEN]; /* The SSID of this P2P group */
};
struct scan_limit_info {
@@ -388,7 +388,7 @@ struct mlme_priv {
u8 *assoc_rsp;
u32 assoc_rsp_len;
-#if defined (CONFIG_88EU_AP_MODE)
+#if defined(CONFIG_88EU_AP_MODE)
/* Number of associated Non-ERP stations (i.e., stations using 802.11b
* in 802.11g BSS) */
int num_sta_non_erp;
@@ -472,7 +472,7 @@ void rtw_join_timeout_handler(void *FunctionContext);
void _rtw_scan_timeout_handler(void *FunctionContext);
void rtw_free_network_queue(struct adapter *adapter, u8 isfreeall);
int rtw_init_mlme_priv(struct adapter *adapter);
-void rtw_free_mlme_priv (struct mlme_priv *pmlmepriv);
+void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv);
int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv);
int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv,
int keyid, u8 set_tx);
@@ -508,7 +508,7 @@ static inline void set_fwstate(struct mlme_priv *pmlmepriv, int state)
{
pmlmepriv->fw_state |= state;
/* FOR HW integration */
- if (_FW_UNDER_SURVEY==state)
+ if (_FW_UNDER_SURVEY == state)
pmlmepriv->bScanInProcess = true;
}
@@ -516,7 +516,7 @@ static inline void _clr_fwstate_(struct mlme_priv *pmlmepriv, int state)
{
pmlmepriv->fw_state &= ~state;
/* FOR HW integration */
- if (_FW_UNDER_SURVEY==state)
+ if (_FW_UNDER_SURVEY == state)
pmlmepriv->bScanInProcess = false;
}
@@ -526,48 +526,38 @@ static inline void _clr_fwstate_(struct mlme_priv *pmlmepriv, int state)
*/
static inline void clr_fwstate(struct mlme_priv *pmlmepriv, int state)
{
- unsigned long irql;
-
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, state) == true)
pmlmepriv->fw_state ^= state;
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
}
static inline void clr_fwstate_ex(struct mlme_priv *pmlmepriv, int state)
{
- unsigned long irql;
-
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
_clr_fwstate_(pmlmepriv, state);
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
}
static inline void up_scanned_network(struct mlme_priv *pmlmepriv)
{
- unsigned long irql;
-
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
pmlmepriv->num_of_scanned++;
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
}
static inline void down_scanned_network(struct mlme_priv *pmlmepriv)
{
- unsigned long irql;
-
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
pmlmepriv->num_of_scanned--;
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
}
static inline void set_scanned_network_val(struct mlme_priv *pmlmepriv, int val)
{
- unsigned long irql;
-
- _enter_critical_bh(&pmlmepriv->lock, &irql);
+ spin_lock_bh(&pmlmepriv->lock);
pmlmepriv->num_of_scanned = val;
- _exit_critical_bh(&pmlmepriv->lock, &irql);
+ spin_unlock_bh(&pmlmepriv->lock);
}
u16 rtw_get_capability(struct wlan_bssid_ex *bss);
@@ -582,7 +572,7 @@ struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue);
void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue);
void rtw_indicate_disconnect(struct adapter *adapter);
void rtw_indicate_connect(struct adapter *adapter);
-void rtw_indicate_scan_done( struct adapter *padapter, bool aborted);
+void rtw_indicate_scan_done(struct adapter *padapter, bool aborted);
void rtw_scan_abort(struct adapter *adapter);
int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
@@ -598,7 +588,7 @@ void rtw_get_encrypt_decrypt_from_registrypriv(struct adapter *adapter);
void _rtw_join_timeout_handler(struct adapter *adapter);
void rtw_scan_timeout_handler(struct adapter *adapter);
- void rtw_dynamic_check_timer_handlder(struct adapter *adapter);
+void rtw_dynamic_check_timer_handlder(struct adapter *adapter);
#define rtw_is_scan_deny(adapter) false
#define rtw_clear_scan_deny(adapter) do {} while (0)
#define rtw_set_scan_deny_timer_hdl(adapter) do {} while (0)
@@ -615,7 +605,7 @@ int _rtw_enqueue_network(struct __queue *queue, struct wlan_network *pnetwork);
struct wlan_network *_rtw_dequeue_network(struct __queue *queue);
- struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv);
+struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv);
void _rtw_free_network(struct mlme_priv *pmlmepriv,
@@ -624,7 +614,7 @@ void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv,
struct wlan_network *pnetwork);
-struct wlan_network* _rtw_find_network(struct __queue *scanned_queue, u8 *addr);
+struct wlan_network *_rtw_find_network(struct __queue *scanned_queue, u8 *addr);
void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall);
@@ -650,6 +640,6 @@ int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst);
void rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network);
void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network);
-void rtw_stassoc_hw_rpt(struct adapter *adapter,struct sta_info *psta);
+void rtw_stassoc_hw_rpt(struct adapter *adapter, struct sta_info *psta);
#endif /* __RTL871X_MLME_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index b1bfa2e30fdb..f0c982d6d5f2 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -241,7 +241,7 @@ struct mlme_handler {
struct action_handler {
unsigned int num;
- char* str;
+ char *str;
unsigned int (*func)(struct adapter *adapt, union recv_frame *frame);
};
@@ -401,7 +401,7 @@ struct p2p_oper_class_map {
struct mlme_ext_priv {
struct adapter *padapter;
u8 mlmeext_init;
- ATOMIC_T event_seq;
+ atomic_t event_seq;
u16 mgnt_seq;
unsigned char cur_channel;
@@ -484,7 +484,7 @@ void write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key);
void clear_cam_entry(struct adapter *padapter, u8 entry);
void invalidate_cam_all(struct adapter *padapter);
-void CAM_empty_entry(struct adapter * Adapter, u8 ucIndex);
+void CAM_empty_entry(struct adapter *Adapter, u8 ucIndex);
int allocate_fw_sta_entry(struct adapter *padapter);
void flush_all_cam_entry(struct adapter *padapter);
@@ -548,11 +548,11 @@ void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame
void report_surveydone_event(struct adapter *padapter);
void report_del_sta_event(struct adapter *padapter,
unsigned char *addr, unsigned short reason);
-void report_add_sta_event(struct adapter *padapter, unsigned char* addr,
+void report_add_sta_event(struct adapter *padapter, unsigned char *addr,
int cam_idx);
void beacon_timing_control(struct adapter *padapter);
-extern u8 set_tx_beacon_cmd(struct adapter*padapter);
+extern u8 set_tx_beacon_cmd(struct adapter *padapter);
unsigned int setup_beacon_frame(struct adapter *padapter,
unsigned char *beacon_frame);
void update_mgnt_tx_rate(struct adapter *padapter, u8 rate);
@@ -574,7 +574,7 @@ int issue_probereq_p2p_ex(struct adapter *adapter, u8 *da, int try_cnt,
int wait_ms);
void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr,
u8 dialogToken, u8 success);
-void issue_p2p_invitation_request(struct adapter *padapter, u8* raddr);
+void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr);
#endif /* CONFIG_88EU_P2P */
void issue_beacon(struct adapter *padapter, int timeout_ms);
void issue_probersp(struct adapter *padapter, unsigned char *da,
@@ -587,7 +587,7 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta,
void issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid,
u8 *da);
s32 issue_probereq_ex(struct adapter *adapter, struct ndis_802_11_ssid *pssid,
- u8* da, int try_cnt, int wait_ms);
+ u8 *da, int try_cnt, int wait_ms);
int issue_nulldata(struct adapter *padapter, unsigned char *da,
unsigned int power_mode, int try_cnt, int wait_ms);
int issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp.h b/drivers/staging/rtl8188eu/include/rtw_mp.h
index 59bdbb5f396b..ffa299b8a6cb 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mp.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mp.h
@@ -477,19 +477,19 @@ void Hal_SetChannel(struct adapter *pAdapter);
void Hal_SetAntennaPathPower(struct adapter *pAdapter);
s32 Hal_SetThermalMeter(struct adapter *pAdapter, u8 target_ther);
s32 Hal_SetPowerTracking(struct adapter *padapter, u8 enable);
-void Hal_GetPowerTracking(struct adapter *padapter, u8 * enable);
+void Hal_GetPowerTracking(struct adapter *padapter, u8 *enable);
void Hal_GetThermalMeter(struct adapter *pAdapter, u8 *value);
void Hal_mpt_SwitchRfSetting(struct adapter *pAdapter);
-void Hal_MPT_CCKTxPowerAdjust(struct adapter * Adapter, bool bInCH14);
+void Hal_MPT_CCKTxPowerAdjust(struct adapter *Adapter, bool bInCH14);
void Hal_MPT_CCKTxPowerAdjustbyIndex(struct adapter *pAdapter, bool beven);
-void Hal_SetCCKTxPower(struct adapter *pAdapter, u8 * TxPower);
-void Hal_SetOFDMTxPower(struct adapter *pAdapter, u8 * TxPower);
+void Hal_SetCCKTxPower(struct adapter *pAdapter, u8 *TxPower);
+void Hal_SetOFDMTxPower(struct adapter *pAdapter, u8 *TxPower);
void Hal_TriggerRFThermalMeter(struct adapter *pAdapter);
u8 Hal_ReadRFThermalMeter(struct adapter *pAdapter);
void Hal_SetCCKContinuousTx(struct adapter *pAdapter, u8 bStart);
void Hal_SetOFDMContinuousTx(struct adapter *pAdapter, u8 bStart);
void Hal_ProSetCrystalCap (struct adapter *pAdapter , u32 CrystalCapVal);
void _rtw_mp_xmit_priv(struct xmit_priv *pxmitpriv);
-void MP_PHY_SetRFPathSwitch(struct adapter *pAdapter ,bool bMain);
+void MP_PHY_SetRFPathSwitch(struct adapter *pAdapter , bool bMain);
#endif /* _RTW_MP_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp_ioctl.h b/drivers/staging/rtl8188eu/include/rtw_mp_ioctl.h
index 494e90e5a756..9388368a6b19 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mp_ioctl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mp_ioctl.h
@@ -278,7 +278,7 @@ struct eeprom_rw_param {
struct mp_ioctl_handler {
u32 paramsize;
- s32 (*handler)(struct oid_par_priv* poid_par_priv);
+ s32 (*handler)(struct oid_par_priv *poid_par_priv);
u32 oid;
};
diff --git a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
index d4b8acb8025b..4a0e9ff3d479 100644
--- a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
@@ -99,12 +99,7 @@ struct reportpwrstate_parm {
static inline void _init_pwrlock(struct semaphore *plock)
{
- _rtw_init_sema(plock, 1);
-}
-
-static inline void _free_pwrlock(struct semaphore *plock)
-{
- _rtw_free_sema(plock);
+ sema_init(plock, 1);
}
static inline void _enter_pwrlock(struct semaphore *plock)
@@ -114,7 +109,7 @@ static inline void _enter_pwrlock(struct semaphore *plock)
static inline void _exit_pwrlock(struct semaphore *plock)
{
- _rtw_up_sema(plock);
+ up(plock);
}
#define LPS_DELAY_TIME 1*HZ /* 1 sec */
@@ -251,7 +246,6 @@ struct pwrctrl_priv {
(pwrctrl)->pwr_state_check_interval)
void rtw_init_pwrctrl_priv(struct adapter *adapter);
-void rtw_free_pwrctrl_priv(struct adapter *adapter);
void rtw_set_ps_mode(struct adapter *adapter, u8 ps_mode, u8 smart_ps,
u8 bcn_ant_mode);
diff --git a/drivers/staging/rtl8188eu/include/rtw_security.h b/drivers/staging/rtl8188eu/include/rtw_security.h
index 23c7814a50ed..937cad803d19 100644
--- a/drivers/staging/rtl8188eu/include/rtw_security.h
+++ b/drivers/staging/rtl8188eu/include/rtw_security.h
@@ -354,7 +354,7 @@ static const unsigned long K[64] = {
#define RORc(x, y) \
(((((unsigned long)(x) & 0xFFFFFFFFUL) >> (unsigned long)((y)&31)) | \
((unsigned long)(x) << (unsigned long)(32-((y)&31)))) & 0xFFFFFFFFUL)
-#define Ch(x, y ,z) (z ^ (x & (y ^ z)))
+#define Ch(x, y , z) (z ^ (x & (y ^ z)))
#define Maj(x, y, z) (((x | y) & z) | (x & y))
#define S(x, n) RORc((x), (n))
#define R(x, n) (((x)&0xFFFFFFFFUL)>>(n))
diff --git a/drivers/staging/rtl8188eu/include/usb_ops.h b/drivers/staging/rtl8188eu/include/usb_ops.h
index df3423765537..7d33477d551d 100644
--- a/drivers/staging/rtl8188eu/include/usb_ops.h
+++ b/drivers/staging/rtl8188eu/include/usb_ops.h
@@ -77,7 +77,7 @@ static inline int rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
{
int ret = false;
int value;
- value = ATOMIC_INC_RETURN(&dvobj->continual_urb_error);
+ value = atomic_inc_return(&dvobj->continual_urb_error);
if (value > MAX_CONTINUAL_URB_ERR) {
DBG_88E("[dvobj:%p][ERROR] continual_urb_error:%d > %d\n",
dvobj, value, MAX_CONTINUAL_URB_ERR);
@@ -91,7 +91,7 @@ static inline int rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
*/
static inline void rtw_reset_continual_urb_error(struct dvobj_priv *dvobj)
{
- ATOMIC_SET(&dvobj->continual_urb_error, 0);
+ atomic_set(&dvobj->continual_urb_error, 0);
}
#define USB_HIGH_SPEED_BULK_SIZE 512
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index 84e519974199..2e7307f259b6 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -1080,7 +1080,7 @@ enum P2P_PROTO_WK_ID {
P2P_PRE_TX_PROVDISC_PROCESS_WK = 2,
P2P_PRE_TX_NEGOREQ_PROCESS_WK = 3,
P2P_PRE_TX_INVITEREQ_PROCESS_WK = 4,
- P2P_AP_P2P_CH_SWITCH_PROCESS_WK =5,
+ P2P_AP_P2P_CH_SWITCH_PROCESS_WK = 5,
P2P_RO_CH_WK = 6,
};
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index ae5458770234..4ad80ae1067f 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -1108,7 +1108,6 @@ static int rtw_wx_set_wap(struct net_device *dev,
union iwreq_data *awrq,
char *extra)
{
- unsigned long irqL;
uint ret = 0;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct sockaddr *temp = (struct sockaddr *)awrq;
@@ -1137,7 +1136,7 @@ static int rtw_wx_set_wap(struct net_device *dev,
}
authmode = padapter->securitypriv.ndisauthtype;
- _enter_critical_bh(&queue->lock, &irqL);
+ spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
pmlmepriv->pscanned = get_next(phead);
@@ -1156,14 +1155,14 @@ static int rtw_wx_set_wap(struct net_device *dev,
if ((!memcmp(dst_bssid, src_bssid, ETH_ALEN))) {
if (!rtw_set_802_11_infrastructure_mode(padapter, pnetwork->network.InfrastructureMode)) {
ret = -1;
- _exit_critical_bh(&queue->lock, &irqL);
+ spin_unlock_bh(&queue->lock);
goto exit;
}
break;
}
}
- _exit_critical_bh(&queue->lock, &irqL);
+ spin_unlock_bh(&queue->lock);
rtw_set_802_11_authentication_mode(padapter, authmode);
/* set_802_11_encryption_mode(padapter, padapter->securitypriv.ndisencryptstatus); */
@@ -1248,7 +1247,6 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
- unsigned long irqL;
#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
#endif /* CONFIG_88EU_P2P */
@@ -1321,11 +1319,11 @@ _func_enter_;
DBG_88E("IW_SCAN_THIS_ESSID, ssid =%s, len =%d\n", req->essid, req->essid_len);
- _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_lock_bh(&pmlmepriv->lock);
_status = rtw_sitesurvey_cmd(padapter, ssid, 1, NULL, 0);
- _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ spin_unlock_bh(&pmlmepriv->lock);
} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
DBG_88E("rtw_wx_set_scan, req->scan_type == IW_SCAN_TYPE_PASSIVE\n");
}
@@ -1392,7 +1390,6 @@ _func_exit_;
static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
- unsigned long irqL;
struct list_head *plist, *phead;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
@@ -1434,13 +1431,13 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
wait_status = _FW_UNDER_SURVEY | _FW_UNDER_LINKING;
while (check_fwstate(pmlmepriv, wait_status)) {
- rtw_msleep_os(30);
+ msleep(30);
cnt++;
if (cnt > wait_for_surveydone)
break;
}
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -1463,7 +1460,7 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
wrqu->data.length = ev-extra;
wrqu->data.flags = 0;
@@ -1482,7 +1479,6 @@ static int rtw_wx_set_essid(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
- unsigned long irqL;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct __queue *queue = &pmlmepriv->scanned_queue;
@@ -1532,7 +1528,7 @@ static int rtw_wx_set_essid(struct net_device *dev,
src_ssid = ndis_ssid.Ssid;
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("rtw_wx_set_essid: ssid =[%s]\n", src_ssid));
- _enter_critical_bh(&queue->lock, &irqL);
+ spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
pmlmepriv->pscanned = get_next(phead);
@@ -1566,14 +1562,14 @@ static int rtw_wx_set_essid(struct net_device *dev,
if (!rtw_set_802_11_infrastructure_mode(padapter, pnetwork->network.InfrastructureMode)) {
ret = -1;
- _exit_critical_bh(&queue->lock, &irqL);
+ spin_unlock_bh(&queue->lock);
goto exit;
}
break;
}
}
- _exit_critical_bh(&queue->lock, &irqL);
+ spin_unlock_bh(&queue->lock);
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
("set ssid: set_802_11_auth. mode =%d\n", authmode));
rtw_set_802_11_authentication_mode(padapter, authmode);
@@ -2504,7 +2500,7 @@ static int rtw_mp_ioctl_hdl(struct net_device *dev, struct iw_request_info *info
("rtw_mp_ioctl_hdl: subcode [%d], len[%d], buffer_len[%d]\r\n",
poidparam->subcode, poidparam->len, len));
- if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) {
+ if (poidparam->subcode >= ARRAY_SIZE(mp_ioctl_hdl)) {
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("no matching drvext subcodes\r\n"));
ret = -EINVAL;
goto _rtw_mp_ioctl_hdl_exit;
@@ -2574,7 +2570,6 @@ static int rtw_get_ap_info(struct net_device *dev,
{
int ret = 0;
u32 cnt = 0, wpa_ielen;
- unsigned long irqL;
struct list_head *plist, *phead;
unsigned char *pbuf;
u8 bssid[ETH_ALEN];
@@ -2593,7 +2588,7 @@ static int rtw_get_ap_info(struct net_device *dev,
}
while ((check_fwstate(pmlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)))) {
- rtw_msleep_os(30);
+ msleep(30);
cnt++;
if (cnt > 100)
break;
@@ -2609,7 +2604,7 @@ static int rtw_get_ap_info(struct net_device *dev,
goto exit;
}
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -2622,7 +2617,7 @@ static int rtw_get_ap_info(struct net_device *dev,
if (hwaddr_aton_i(data, bssid)) {
DBG_88E("Invalid BSSID '%s'.\n", (u8 *)data);
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
return -EINVAL;
}
@@ -2646,7 +2641,7 @@ static int rtw_get_ap_info(struct net_device *dev,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (pdata->length >= 34) {
if (copy_to_user(pdata->pointer+32, (u8 *)&pdata->flags, 1)) {
@@ -3091,7 +3086,6 @@ static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
int jj, kk;
u8 peerMACStr[17] = {0x00};
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- unsigned long irqL;
struct list_head *plist, *phead;
struct __queue *queue = &(pmlmepriv->scanned_queue);
struct wlan_network *pnetwork = NULL;
@@ -3113,7 +3107,7 @@ static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -3143,7 +3137,7 @@ static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (!blnMatch)
sprintf(attr_content_str, "\n\nM = 0000");
@@ -3163,7 +3157,6 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
int jj, kk;
u8 peerMACStr[17] = {0x00};
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- unsigned long irqL;
struct list_head *plist, *phead;
struct __queue *queue = &(pmlmepriv->scanned_queue);
struct wlan_network *pnetwork = NULL;
@@ -3171,9 +3164,7 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
u8 *p2pie;
uint p2pielen = 0, attr_contentlen = 0;
u8 attr_content[100] = {0x00};
-
- u8 go_devadd_str[17 + 10] = {0x00};
- /* +10 is for the str "go_devadd =", we have to clear it at wrqu->data.pointer */
+ u8 go_devadd_str[17 + 12] = {};
/* Commented by Albert 20121209 */
/* The input data is the GO's interface address which the application wants to know its device address. */
@@ -3186,7 +3177,7 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -3227,15 +3218,15 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (!blnMatch)
- sprintf(go_devadd_str, "\n\ndev_add = NULL");
+ snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add = NULL");
else
- sprintf(go_devadd_str, "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
+ snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]);
- if (copy_to_user(wrqu->data.pointer, go_devadd_str, 10 + 17))
+ if (copy_to_user(wrqu->data.pointer, go_devadd_str, sizeof(go_devadd_str)))
return -EFAULT;
return ret;
}
@@ -3250,7 +3241,6 @@ static int rtw_p2p_get_device_type(struct net_device *dev,
int jj, kk;
u8 peerMACStr[17] = {0x00};
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- unsigned long irqL;
struct list_head *plist, *phead;
struct __queue *queue = &(pmlmepriv->scanned_queue);
struct wlan_network *pnetwork = NULL;
@@ -3271,7 +3261,7 @@ static int rtw_p2p_get_device_type(struct net_device *dev,
for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -3308,7 +3298,7 @@ static int rtw_p2p_get_device_type(struct net_device *dev,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (!blnMatch)
sprintf(dev_type_str, "\n\nN = 00");
@@ -3330,7 +3320,6 @@ static int rtw_p2p_get_device_name(struct net_device *dev,
int jj, kk;
u8 peerMACStr[17] = {0x00};
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- unsigned long irqL;
struct list_head *plist, *phead;
struct __queue *queue = &(pmlmepriv->scanned_queue);
struct wlan_network *pnetwork = NULL;
@@ -3351,7 +3340,7 @@ static int rtw_p2p_get_device_name(struct net_device *dev,
for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -3380,7 +3369,7 @@ static int rtw_p2p_get_device_name(struct net_device *dev,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (!blnMatch)
sprintf(dev_name_str, "\n\nN = 0000");
@@ -3400,7 +3389,6 @@ static int rtw_p2p_get_invitation_procedure(struct net_device *dev,
int jj, kk;
u8 peerMACStr[17] = {0x00};
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- unsigned long irqL;
struct list_head *plist, *phead;
struct __queue *queue = &(pmlmepriv->scanned_queue);
struct wlan_network *pnetwork = NULL;
@@ -3423,7 +3411,7 @@ static int rtw_p2p_get_invitation_procedure(struct net_device *dev,
for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -3455,7 +3443,7 @@ static int rtw_p2p_get_invitation_procedure(struct net_device *dev,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (!blnMatch) {
sprintf(inv_proc_str, "\nIP =-1");
@@ -3480,7 +3468,6 @@ static int rtw_p2p_connect(struct net_device *dev,
u8 peerMAC[ETH_ALEN] = {0x00};
int jj, kk;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- unsigned long irqL;
struct list_head *plist, *phead;
struct __queue *queue = &(pmlmepriv->scanned_queue);
struct wlan_network *pnetwork = NULL;
@@ -3506,7 +3493,7 @@ static int rtw_p2p_connect(struct net_device *dev,
for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
peerMAC[jj] = key_2char2num(extra[kk], extra[kk + 1]);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -3524,7 +3511,7 @@ static int rtw_p2p_connect(struct net_device *dev,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (uintPeerChannel) {
_rtw_memset(&pwdinfo->nego_req_info, 0x00, sizeof(struct tx_nego_req_info));
@@ -3569,7 +3556,6 @@ static int rtw_p2p_invite_req(struct net_device *dev,
u8 attr_content[50] = {0x00};
u8 *p2pie;
uint p2pielen = 0, attr_contentlen = 0;
- unsigned long irqL;
struct tx_invite_req_info *pinvite_req_info = &pwdinfo->invitereq_info;
/* The input data contains two informations. */
@@ -3602,7 +3588,7 @@ static int rtw_p2p_invite_req(struct net_device *dev,
for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
pinvite_req_info->peer_macaddr[jj] = key_2char2num(extra[kk], extra[kk + 1]);
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -3639,7 +3625,7 @@ static int rtw_p2p_invite_req(struct net_device *dev,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (uintPeerChannel) {
/* Store the GO's bssid */
@@ -3712,7 +3698,6 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
u8 attr_content[100] = {0x00};
u8 *p2pie;
uint p2pielen = 0, attr_contentlen = 0;
- unsigned long irqL;
/* The input data contains two informations. */
/* 1. First information is the MAC address which wants to issue the provisioning discovery request frame. */
@@ -3753,7 +3738,7 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
return ret;
}
- _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = get_next(phead);
@@ -3799,7 +3784,7 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
plist = get_next(plist);
}
- _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (uintPeerChannel) {
DBG_88E("[%s] peer channel: %d!\n", __func__, uintPeerChannel);
@@ -4132,7 +4117,6 @@ static int rtw_dbg_port(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- unsigned long irqL;
int ret = 0;
u8 major_cmd, minor_cmd;
u16 arg;
@@ -4448,7 +4432,7 @@ static int rtw_dbg_port(struct net_device *dev,
#ifdef CONFIG_88EU_AP_MODE
DBG_88E("sta_dz_bitmap = 0x%x, tim_bitmap = 0x%x\n", pstapriv->sta_dz_bitmap, pstapriv->tim_bitmap);
#endif
- _enter_critical_bh(&pstapriv->sta_hash_lock, &irqL);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
for (i = 0; i < NUM_STA; i++) {
phead = &(pstapriv->sta_hash[i]);
@@ -4486,7 +4470,7 @@ static int rtw_dbg_port(struct net_device *dev,
}
}
}
- _exit_critical_bh(&pstapriv->sta_hash_lock, &irqL);
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
}
break;
case 0x0c:/* dump rx/tx packet */
@@ -5251,7 +5235,6 @@ static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
{
- unsigned long irqL;
int ret = 0;
struct sta_info *psta = NULL;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
@@ -5271,13 +5254,13 @@ static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
psta = rtw_get_stainfo(pstapriv, param->sta_addr);
if (psta) {
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
if (!rtw_is_list_empty(&psta->asoc_list)) {
rtw_list_delete(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
associated_clients_update(padapter, updated);
psta = NULL;
} else {
@@ -7053,7 +7036,7 @@ static int rtw_mp_ctx(struct net_device *dev,
struct mp_priv *pmp_priv = &padapter->mppriv;
if (pmp_priv->tx.stop == 0) {
pmp_priv->tx.stop = 1;
- rtw_msleep_os(5);
+ msleep(5);
}
pmp_priv->tx.stop = 0;
pmp_priv->tx.count = 1;
@@ -7228,25 +7211,25 @@ static int rtw_mp_thermal(struct net_device *dev,
if (copy_from_user(extra, wrqu->pointer, wrqu->length))
return -EFAULT;
- bwrite = strncmp(extra, "write", 6); /* strncmp true is 0 */
+ bwrite = strncmp(extra, "write", 6); /* strncmp true is 0 */
- Hal_GetThermalMeter(padapter, &val);
+ Hal_GetThermalMeter(padapter, &val);
- if (bwrite == 0) {
- EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
- if (2 > max_available_size) {
- DBG_88E("no available efuse!\n");
- return -EFAULT;
- }
- if (rtw_efuse_map_write(padapter, addr, cnt, &val) == _FAIL) {
- DBG_88E("rtw_efuse_map_write error\n");
- return -EFAULT;
- } else {
- sprintf(extra, " efuse write ok :%d", val);
- }
- } else {
- sprintf(extra, "%d", val);
- }
+ if (bwrite == 0) {
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if (2 > max_available_size) {
+ DBG_88E("no available efuse!\n");
+ return -EFAULT;
+ }
+ if (rtw_efuse_map_write(padapter, addr, cnt, &val) == _FAIL) {
+ DBG_88E("rtw_efuse_map_write error\n");
+ return -EFAULT;
+ } else {
+ sprintf(extra, " efuse write ok :%d", val);
+ }
+ } else {
+ sprintf(extra, "%d", val);
+ }
wrqu->length = strlen(extra);
return 0;
@@ -7268,7 +7251,7 @@ static int rtw_mp_reset_stats(struct net_device *dev,
/* reset phy counter */
write_bbreg(padapter, 0xf14, BIT16, 0x1);
- rtw_msleep_os(10);
+ msleep(10);
write_bbreg(padapter, 0xf14, BIT16, 0x0);
return 0;
@@ -7545,7 +7528,7 @@ static int rtw_mp_get(struct net_device *dev,
break;
}
- rtw_msleep_os(10); /* delay 5ms for sending pkt before exit adb shell operation */
+ msleep(10); /* delay 5ms for sending pkt before exit adb shell operation */
return 0;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 17659bb04bef..68f98fa114d2 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -652,7 +652,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
return dscp >> 5;
}
-static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -761,7 +762,7 @@ void rtw_stop_drv_threads(struct adapter *padapter)
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_stop_drv_threads\n"));
/* Below is to termindate rtw_cmd_thread & event_thread... */
- _rtw_up_sema(&padapter->cmdpriv.cmd_queue_sema);
+ up(&padapter->cmdpriv.cmd_queue_sema);
if (padapter->cmdThread)
_rtw_down_sema(&padapter->cmdpriv.terminate_cmdthread_sema);
@@ -924,7 +925,7 @@ _func_enter_;
rtw_hal_sreset_init(padapter);
- _rtw_spinlock_init(&padapter->br_ext_lock);
+ spin_lock_init(&padapter->br_ext_lock);
exit:
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_init_drv_sw\n"));
@@ -977,9 +978,6 @@ u8 rtw_free_drv_sw(struct adapter *padapter)
}
#endif
-
- _rtw_spinlock_free(&padapter->br_ext_lock);
-
free_mlme_ext_priv(&padapter->mlmeextpriv);
rtw_free_cmd_priv(&padapter->cmdpriv);
@@ -993,8 +991,6 @@ u8 rtw_free_drv_sw(struct adapter *padapter)
_rtw_free_recv_priv(&padapter->recvpriv);
- rtw_free_pwrctrl_priv(padapter);
-
rtw_hal_free_data(padapter);
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("<== rtw_free_drv_sw\n"));
@@ -1157,7 +1153,7 @@ netdev_open_error:
int rtw_ips_pwr_up(struct adapter *padapter)
{
int result;
- u32 start_time = rtw_get_current_time();
+ u32 start_time = jiffies;
DBG_88E("===> rtw_ips_pwr_up..............\n");
rtw_reset_drv_sw(padapter);
@@ -1171,7 +1167,7 @@ int rtw_ips_pwr_up(struct adapter *padapter)
void rtw_ips_pwr_down(struct adapter *padapter)
{
- u32 start_time = rtw_get_current_time();
+ u32 start_time = jiffies;
DBG_88E("===> rtw_ips_pwr_down...................\n");
padapter->bCardDisableWOHSM = true;
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index a1ae72772c5f..8c3b077448af 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -52,7 +52,7 @@ u32 rtw_atoi(u8 *s)
}
if (flag == 1)
num = num * -1;
- return num;
+ return num;
}
inline u8 *_rtw_vmalloc(u32 sz)
@@ -161,20 +161,6 @@ void rtw_list_insert_tail(struct list_head *plist, struct list_head *phead)
Caller must check if the list is empty before calling rtw_list_delete
*/
-void _rtw_init_sema(struct semaphore *sema, int init_val)
-{
- sema_init(sema, init_val);
-}
-
-void _rtw_free_sema(struct semaphore *sema)
-{
-}
-
-void _rtw_up_sema(struct semaphore *sema)
-{
- up(sema);
-}
-
u32 _rtw_down_sema(struct semaphore *sema)
{
if (down_interruptible(sema))
@@ -183,29 +169,10 @@ u32 _rtw_down_sema(struct semaphore *sema)
return _SUCCESS;
}
-void _rtw_mutex_init(struct mutex *pmutex)
-{
- mutex_init(pmutex);
-}
-
-void _rtw_mutex_free(struct mutex *pmutex)
-{
- mutex_destroy(pmutex);
-}
-
-void _rtw_spinlock_init(spinlock_t *plock)
-{
- spin_lock_init(plock);
-}
-
-void _rtw_spinlock_free(spinlock_t *plock)
-{
-}
-
void _rtw_init_queue(struct __queue *pqueue)
{
_rtw_init_listhead(&(pqueue->queue));
- _rtw_spinlock_init(&(pqueue->lock));
+ spin_lock_init(&(pqueue->lock));
}
u32 _rtw_queue_empty(struct __queue *pqueue)
@@ -221,11 +188,6 @@ u32 rtw_end_of_queue_search(struct list_head *head, struct list_head *plist)
return false;
}
-u32 rtw_get_current_time(void)
-{
- return jiffies;
-}
-
inline u32 rtw_systime_to_ms(u32 systime)
{
return systime * 1000 / HZ;
@@ -236,8 +198,7 @@ inline u32 rtw_ms_to_systime(u32 ms)
return ms * HZ / 1000;
}
-/* the input parameter start use the same unit as returned by
- * rtw_get_current_time */
+/* the input parameter start must be in jiffies */
inline s32 rtw_get_passing_time_ms(u32 start)
{
return rtw_systime_to_ms(jiffies-start);
@@ -260,102 +221,8 @@ void rtw_sleep_schedulable(int ms)
return;
}
-void rtw_msleep_os(int ms)
-{
- msleep((unsigned int)ms);
-}
-
-void rtw_usleep_os(int us)
-{
- if (1 < (us/1000))
- msleep(1);
- else
- msleep((us/1000) + 1);
-}
-
-void rtw_mdelay_os(int ms)
-{
- mdelay((unsigned long)ms);
-}
-
-void rtw_udelay_os(int us)
-{
- udelay((unsigned long)us);
-}
-
-void rtw_yield_os(void)
-{
- yield();
-}
-
#define RTW_SUSPEND_LOCK_NAME "rtw_wifi"
-inline void rtw_suspend_lock_init(void)
-{
-}
-
-inline void rtw_suspend_lock_uninit(void)
-{
-}
-
-inline void rtw_lock_suspend(void)
-{
-}
-
-inline void rtw_unlock_suspend(void)
-{
-}
-
-inline void ATOMIC_SET(ATOMIC_T *v, int i)
-{
- atomic_set(v, i);
-}
-
-inline int ATOMIC_READ(ATOMIC_T *v)
-{
- return atomic_read(v);
-}
-
-inline void ATOMIC_ADD(ATOMIC_T *v, int i)
-{
- atomic_add(i, v);
-}
-
-inline void ATOMIC_SUB(ATOMIC_T *v, int i)
-{
- atomic_sub(i, v);
-}
-
-inline void ATOMIC_INC(ATOMIC_T *v)
-{
- atomic_inc(v);
-}
-
-inline void ATOMIC_DEC(ATOMIC_T *v)
-{
- atomic_dec(v);
-}
-
-inline int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i)
-{
- return atomic_add_return(i, v);
-}
-
-inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i)
-{
- return atomic_sub_return(i, v);
-}
-
-inline int ATOMIC_INC_RETURN(ATOMIC_T *v)
-{
- return atomic_inc_return(v);
-}
-
-inline int ATOMIC_DEC_RETURN(ATOMIC_T *v)
-{
- return atomic_dec_return(v);
-}
-
struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv,
void *old_priv)
{
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index 3852ff43810d..2a18b3208a00 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -90,16 +90,16 @@ void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup)
u32 cur_time = 0;
if (psecuritypriv->last_mic_err_time == 0) {
- psecuritypriv->last_mic_err_time = rtw_get_current_time();
+ psecuritypriv->last_mic_err_time = jiffies;
} else {
- cur_time = rtw_get_current_time();
+ cur_time = jiffies;
if (cur_time - psecuritypriv->last_mic_err_time < 60*HZ) {
psecuritypriv->btkip_countermeasure = true;
psecuritypriv->last_mic_err_time = 0;
psecuritypriv->btkip_countermeasure_time = cur_time;
} else {
- psecuritypriv->last_mic_err_time = rtw_get_current_time();
+ psecuritypriv->last_mic_err_time = jiffies;
}
}
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
index 6cf71cc2ca2e..a3c2bc5922e4 100644
--- a/drivers/staging/rtl8188eu/os_dep/rtw_android.c
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -159,7 +159,6 @@ int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
int bytes_written = 0;
struct android_wifi_priv_cmd priv_cmd;
- rtw_lock_suspend();
if (!ifr->ifr_data) {
ret = -EINVAL;
goto exit;
@@ -287,7 +286,6 @@ response:
ret = bytes_written;
}
exit:
- rtw_unlock_suspend();
kfree(command);
return ret;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 7d14779310d3..a70dcef1419e 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -53,7 +53,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
/*=== Customer ID ===*/
/****** 8188EUS ********/
- {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */
+ {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
{} /* Terminating entry */
};
@@ -71,7 +71,7 @@ struct rtw_usb_drv {
};
static struct rtw_usb_drv rtl8188e_usb_drv = {
- .usbdrv.name = (char *)"r8188eu",
+ .usbdrv.name = "r8188eu",
.usbdrv.probe = rtw_drv_init,
.usbdrv.disconnect = rtw_dev_remove,
.usbdrv.id_table = rtw_usb_id_tbl,
@@ -126,7 +126,7 @@ static u8 rtw_init_intf_priv(struct dvobj_priv *dvobj)
{
u8 rst = _SUCCESS;
- _rtw_mutex_init(&dvobj->usb_vendor_req_mutex);
+ mutex_init(&dvobj->usb_vendor_req_mutex);
dvobj->usb_alloc_vendor_req_buf = rtw_zmalloc(MAX_USB_IO_CTL_SIZE);
if (dvobj->usb_alloc_vendor_req_buf == NULL) {
@@ -144,7 +144,7 @@ static u8 rtw_deinit_intf_priv(struct dvobj_priv *dvobj)
u8 rst = _SUCCESS;
kfree(dvobj->usb_alloc_vendor_req_buf);
- _rtw_mutex_free(&dvobj->usb_vendor_req_mutex);
+ mutex_destroy(&dvobj->usb_vendor_req_mutex);
return rst;
}
@@ -240,7 +240,7 @@ _func_enter_;
}
/* 3 misc */
- _rtw_init_sema(&(pdvobjpriv->usb_suspend_sema), 0);
+ sema_init(&(pdvobjpriv->usb_suspend_sema), 0);
rtw_reset_continual_urb_error(pdvobjpriv);
usb_get_dev(pusbd);
@@ -504,7 +504,7 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
int ret = 0;
- u32 start_time = rtw_get_current_time();
+ u32 start_time = jiffies;
_func_enter_;
@@ -586,7 +586,7 @@ int rtw_resume_process(struct adapter *padapter)
struct net_device *pnetdev;
struct pwrctrl_priv *pwrpriv = NULL;
int ret = -1;
- u32 start_time = rtw_get_current_time();
+ u32 start_time = jiffies;
_func_enter_;
DBG_88E("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
@@ -657,7 +657,6 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
padapter->hw_init_mutex = &usb_drv->hw_init_mutex;
/* step 1-1., decide the chip_type via vid/pid */
- padapter->interface_type = RTW_USB;
chip_by_usb_id(padapter, pdid);
if (rtw_handle_dualmac(padapter, 1) != _SUCCESS)
@@ -865,11 +864,8 @@ static int __init rtw_drv_entry(void)
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_drv_entry\n"));
DBG_88E(DRV_NAME " driver version=%s\n", DRIVERVERSION);
- DBG_88E("build time: %s %s\n", __DATE__, __TIME__);
- rtw_suspend_lock_init();
-
- _rtw_mutex_init(&usb_drv->hw_init_mutex);
+ mutex_init(&usb_drv->hw_init_mutex);
usb_drv->drv_registered = true;
return usb_register(&usb_drv->usbdrv);
@@ -880,12 +876,10 @@ static void __exit rtw_drv_halt(void)
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_drv_halt\n"));
DBG_88E("+rtw_drv_halt\n");
- rtw_suspend_lock_uninit();
-
usb_drv->drv_registered = false;
usb_deregister(&usb_drv->usbdrv);
- _rtw_mutex_free(&usb_drv->hw_init_mutex);
+ mutex_destroy(&usb_drv->hw_init_mutex);
DBG_88E("-rtw_drv_halt\n");
}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index 4c71e3b93b58..7e3f2fadd5bf 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -146,7 +146,7 @@ _func_enter_;
}
haldata = GET_HAL_DATA(padapter);
- haldata->srestpriv.last_tx_complete_time = rtw_get_current_time();
+ haldata->srestpriv.last_tx_complete_time = jiffies;
check_completion:
rtw_sctx_done_err(&pxmitbuf->sctx,
@@ -186,7 +186,7 @@ _func_enter_;
goto exit;
}
- _enter_critical(&pxmitpriv->lock, &irqL);
+ spin_lock_irqsave(&pxmitpriv->lock, irqL);
switch (addr) {
case VO_QUEUE_INX:
@@ -213,7 +213,7 @@ _func_enter_;
break;
}
- _exit_critical(&pxmitpriv->lock, &irqL);
+ spin_unlock_irqrestore(&pxmitpriv->lock, irqL);
purb = pxmitbuf->pxmit_urb[0];
@@ -230,7 +230,7 @@ _func_enter_;
if (!status) {
struct hal_data_8188e *haldata = GET_HAL_DATA(padapter);
- haldata->srestpriv.last_tx_time = rtw_get_current_time();
+ haldata->srestpriv.last_tx_time = jiffies;
} else {
rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_WRITE_PORT_ERR);
DBG_88E("usb_write_port, status =%d\n", status);
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index 2e586c063ab8..9005971084b7 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -156,7 +156,6 @@ void rtw_os_xmit_complete(struct adapter *padapter, struct xmit_frame *pxframe)
void rtw_os_xmit_schedule(struct adapter *padapter)
{
- unsigned long irql;
struct xmit_priv *pxmitpriv;
if (!padapter)
@@ -164,12 +163,12 @@ void rtw_os_xmit_schedule(struct adapter *padapter)
pxmitpriv = &padapter->xmitpriv;
- _enter_critical_bh(&pxmitpriv->lock, &irql);
+ spin_lock_bh(&pxmitpriv->lock);
if (rtw_txframes_pending(padapter))
tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
- _exit_critical_bh(&pxmitpriv->lock, &irql);
+ spin_unlock_bh(&pxmitpriv->lock);
}
static void rtw_check_xmit_resource(struct adapter *padapter, struct sk_buff *pkt)
@@ -194,13 +193,12 @@ static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
{
struct sta_priv *pstapriv = &padapter->stapriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- unsigned long irql;
struct list_head *phead, *plist;
struct sk_buff *newskb;
struct sta_info *psta = NULL;
s32 res;
- _enter_critical_bh(&pstapriv->asoc_list_lock, &irql);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
plist = get_next(phead);
@@ -230,12 +228,12 @@ static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
DBG_88E("%s-%d: skb_copy() failed!\n", __func__, __LINE__);
pxmitpriv->tx_drop++;
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irql);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
return false; /* Caller shall tx this multicast frame via normal way. */
}
}
- _exit_critical_bh(&pstapriv->asoc_list_lock, &irql);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
dev_kfree_skb_any(skb);
return true;
}
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index 1260f10944ef..eb33c517fcc8 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -144,7 +144,7 @@ void Dot11d_UpdateCountryIe(struct rtllib_device *dev, u8 *pTaddr,
return;
}
- for (j = 0 ; j < pTriple->NumChnls; j++) {
+ for (j = 0; j < pTriple->NumChnls; j++) {
pDot11dInfo->channel_map[pTriple->FirstChnl + j] = 1;
pDot11dInfo->MaxTxPwrDbmList[pTriple->FirstChnl + j] =
pTriple->MaxTxPowerInDbm;
diff --git a/drivers/staging/rtl8192e/dot11d.h b/drivers/staging/rtl8192e/dot11d.h
index fb7683fa5ffd..eeea50260f1d 100644
--- a/drivers/staging/rtl8192e/dot11d.h
+++ b/drivers/staging/rtl8192e/dot11d.h
@@ -87,7 +87,10 @@ static inline void cpMacAddr(unsigned char *des, unsigned char *src)
#define CIE_WATCHDOG_TH 1
#define GET_CIE_WATCHDOG(__pIeeeDev) \
(GET_DOT11D_INFO(__pIeeeDev)->CountryIeWatchdog)
-#define RESET_CIE_WATCHDOG(__pIeeeDev) GET_CIE_WATCHDOG(__pIeeeDev) = 0
+static inline void RESET_CIE_WATCHDOG(struct rtllib_device *__pIeeeDev)
+{
+ GET_CIE_WATCHDOG(__pIeeeDev) = 0;
+}
#define UPDATE_CIE_WATCHDOG(__pIeeeDev) (++GET_CIE_WATCHDOG(__pIeeeDev))
#define IS_DOT11D_STATE_DONE(__pIeeeDev) \
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 2cace9a4525a..4a35f9b5602d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -30,7 +30,7 @@
#include "rtl_dm.h"
#include "rtl_wx.h"
-extern int WDCAPARA_ADD[];
+static int WDCAPARA_ADD[] = {EDCAPARA_BE, EDCAPARA_BK, EDCAPARA_VI, EDCAPARA_VO};
void rtl8192e_start_beacon(struct net_device *dev)
{
@@ -193,11 +193,12 @@ void rtl8192e_SetHwReg(struct net_device *dev, u8 variable, u8 *val)
dm_init_edca_turbo(dev);
- u4bAcParam = ((((u32)(qos_parameters->tx_op_limit[pAcParam])) <<
+ u4bAcParam = (((le16_to_cpu(
+ qos_parameters->tx_op_limit[pAcParam])) <<
AC_PARAM_TXOP_LIMIT_OFFSET) |
- (((u32)(qos_parameters->cw_max[pAcParam])) <<
+ ((le16_to_cpu(qos_parameters->cw_max[pAcParam])) <<
AC_PARAM_ECW_MAX_OFFSET) |
- (((u32)(qos_parameters->cw_min[pAcParam])) <<
+ ((le16_to_cpu(qos_parameters->cw_min[pAcParam])) <<
AC_PARAM_ECW_MIN_OFFSET) |
(((u32)u1bAIFS) << AC_PARAM_AIFS_OFFSET));
@@ -1271,7 +1272,7 @@ void rtl8192_tx_fill_desc(struct net_device *dev, struct tx_desc *pdesc,
pdesc->LastSeg = 1;
pdesc->TxBufferSize = skb->len;
- pdesc->TxBuffAddr = cpu_to_le32(mapping);
+ pdesc->TxBuffAddr = mapping;
}
void rtl8192_tx_fill_cmd_desc(struct net_device *dev,
@@ -1301,7 +1302,7 @@ void rtl8192_tx_fill_cmd_desc(struct net_device *dev,
entry_tmp->RATid = (u8)DESC_PACKET_TYPE_INIT;
}
entry->TxBufferSize = skb->len;
- entry->TxBuffAddr = cpu_to_le32(mapping);
+ entry->TxBuffAddr = mapping;
entry->OWN = 1;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index 21e6ddde68a2..5d6d304c9c01 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -1179,7 +1179,7 @@ void rtl8192_SetBWModeWorkItem(struct net_device *dev)
RT_TRACE(COMP_SWBW, "==>rtl8192_SetBWModeWorkItem() Switch to %s "
"bandwidth\n", priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz")
+ "20MHz" : "40MHz");
if (priv->rf_chip == RF_PSEUDO_11N) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index fa5603a562c3..c46c65c5542f 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
@@ -28,7 +28,6 @@
#include "r8190P_rtl8256.h" /* RTL8225 Radio frontend */
#include "r8192E_cmdpkt.h"
-extern int hwwep;
void CamResetAllEntry(struct net_device *dev)
{
u32 ulcommand = 0;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index d93caca9657d..c01abc23213e 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -562,8 +562,8 @@ void rtl8192_update_cap(struct net_device *dev, u16 cap)
}
static struct rtllib_qos_parameters def_qos_parameters = {
- {3, 3, 3, 3},
- {7, 7, 7, 7},
+ {cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3)},
+ {cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7)},
{2, 2, 2, 2},
{0, 0, 0, 0},
{0, 0, 0, 0}
@@ -585,8 +585,6 @@ static void rtl8192_update_beacon(void *data)
rtl8192_update_cap(dev, net->capability);
}
-int WDCAPARA_ADD[] = {EDCAPARA_BE, EDCAPARA_BK, EDCAPARA_VI, EDCAPARA_VO};
-
static void rtl8192_qos_activate(void *data)
{
struct r8192_priv *priv = container_of_work_rsl(data, struct r8192_priv,
@@ -1845,7 +1843,7 @@ static void rtl8192_free_tx_ring(struct net_device *dev, unsigned int prio)
struct tx_desc *entry = &ring->desc[ring->idx];
struct sk_buff *skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(priv->pdev, le32_to_cpu(entry->TxBuffAddr),
+ pci_unmap_single(priv->pdev, entry->TxBuffAddr,
skb->len, PCI_DMA_TODEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
@@ -1950,7 +1948,7 @@ static void rtl8192_tx_isr(struct net_device *dev, int prio)
}
skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(priv->pdev, le32_to_cpu(entry->TxBuffAddr),
+ pci_unmap_single(priv->pdev, entry->TxBuffAddr,
skb->len, PCI_DMA_TODEVICE);
kfree_skb(skb);
@@ -2011,7 +2009,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
fwinfo_size = sizeof(struct tx_fwinfo_8190pci);
header = (struct rtllib_hdr_1addr *)(((u8 *)skb->data) + fwinfo_size);
- fc = header->frame_ctl;
+ fc = le16_to_cpu(header->frame_ctl);
type = WLAN_FC_GET_TYPE(fc);
stype = WLAN_FC_GET_STYPE(fc);
pda_addr = header->addr1;
@@ -2101,7 +2099,7 @@ static short rtl8192_alloc_rx_desc_ring(struct net_device *dev)
dev_kfree_skb_any(skb);
return -1;
}
- entry->BufferAddress = cpu_to_le32(*mapping);
+ entry->BufferAddress = *mapping;
entry->Length = priv->rxbuffersize;
entry->OWN = 1;
@@ -2137,8 +2135,8 @@ static int rtl8192_alloc_tx_desc_ring(struct net_device *dev,
for (i = 0; i < entries; i++)
ring[i].NextDescAddress =
- cpu_to_le32((u32)dma + ((i + 1) % entries) *
- sizeof(*ring));
+ (u32)dma + ((i + 1) % entries) *
+ sizeof(*ring);
return 0;
}
@@ -2198,7 +2196,7 @@ void rtl8192_pci_resetdescring(struct net_device *dev)
__skb_dequeue(&ring->queue);
pci_unmap_single(priv->pdev,
- le32_to_cpu(entry->TxBuffAddr),
+ entry->TxBuffAddr,
skb->len, PCI_DMA_TODEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
@@ -2400,7 +2398,7 @@ static void rtl8192_rx_normal(struct net_device *dev)
}
}
done:
- pdesc->BufferAddress = cpu_to_le32(*((dma_addr_t *)skb->cb));
+ pdesc->BufferAddress = *((dma_addr_t *)skb->cb);
pdesc->OWN = 1;
pdesc->Length = priv->rxbuffersize;
if (priv->rx_idx[rx_queue_idx] == priv->rxringcount-1)
@@ -2692,7 +2690,7 @@ out:
}
-irqreturn_t rtl8192_interrupt(int irq, void *netdev)
+static irqreturn_t rtl8192_interrupt(int irq, void *netdev)
{
struct net_device *dev = (struct net_device *) netdev;
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index b015bf61cf05..35fc1164effd 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -28,7 +28,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/types.h>
@@ -188,6 +187,8 @@
#define MAX_RX_COUNT 64
#define MAX_TX_QUEUE_COUNT 9
+extern int hwwep;
+
enum RTL819x_PHY_PARAM {
RTL819X_PHY_MACPHY_REG = 0,
RTL819X_PHY_MACPHY_REG_PG = 1,
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 32fbbc9d0d92..adc6cc7ca3d6 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -115,14 +115,14 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst,
if (ACT_ADDBARSP == type) {
RT_TRACE(COMP_DBG, "====>to send ADDBARSP\n");
- tmp = cpu_to_le16(StatusCode);
+ tmp = StatusCode;
memcpy(tag, (u8 *)&tmp, 2);
tag += 2;
}
- tmp = cpu_to_le16(pBA->BaParamSet.shortData);
+ tmp = pBA->BaParamSet.shortData;
memcpy(tag, (u8 *)&tmp, 2);
tag += 2;
- tmp = cpu_to_le16(pBA->BaTimeoutValue);
+ tmp = pBA->BaTimeoutValue;
memcpy(tag, (u8 *)&tmp, 2);
tag += 2;
@@ -178,10 +178,10 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
*tag ++= ACT_CAT_BA;
*tag ++= ACT_DELBA;
- tmp = cpu_to_le16(DelbaParamSet.shortData);
+ tmp = DelbaParamSet.shortData;
memcpy(tag, (u8 *)&tmp, 2);
tag += 2;
- tmp = cpu_to_le16(ReasonCode);
+ tmp = ReasonCode;
memcpy(tag, (u8 *)&tmp, 2);
tag += 2;
diff --git a/drivers/staging/rtl8192e/rtl819x_Qos.h b/drivers/staging/rtl8192e/rtl819x_Qos.h
index 5ecd556f0797..973342b8a06d 100644
--- a/drivers/staging/rtl8192e/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192e/rtl819x_Qos.h
@@ -201,43 +201,6 @@ enum qos_ie_source {
#define AC_PARAM_SIZE 4
-#define GET_WMM_AC_PARAM_AIFSN(_pStart) \
- ((u8)LE_BITS_TO_4BYTE(_pStart, 0, 4))
-#define SET_WMM_AC_PARAM_AIFSN(_pStart, _val) \
- SET_BITS_TO_LE_4BYTE(_pStart, 0, 4, _val)
-
-#define GET_WMM_AC_PARAM_ACM(_pStart) \
- ((u8)LE_BITS_TO_4BYTE(_pStart, 4, 1))
-#define SET_WMM_AC_PARAM_ACM(_pStart, _val) \
- SET_BITS_TO_LE_4BYTE(_pStart, 4, 1, _val)
-
-#define GET_WMM_AC_PARAM_ACI(_pStart) \
- ((u8)LE_BITS_TO_4BYTE(_pStart, 5, 2))
-#define SET_WMM_AC_PARAM_ACI(_pStart, _val) \
- SET_BITS_TO_LE_4BYTE(_pStart, 5, 2, _val)
-
-#define GET_WMM_AC_PARAM_ACI_AIFSN(_pStart) \
- ((u8)LE_BITS_TO_4BYTE(_pStart, 0, 8))
-#define SET_WMM_AC_PARAM_ACI_AIFSN(_pStart, _val) \
- SET_BITS_TO_LE_4BYTE(_pStart, 0, 8, _val)
-
-#define GET_WMM_AC_PARAM_ECWMIN(_pStart) \
- ((u8)LE_BITS_TO_4BYTE(_pStart, 8, 4))
-#define SET_WMM_AC_PARAM_ECWMIN(_pStart, _val) \
- SET_BITS_TO_LE_4BYTE(_pStart, 8, 4, _val)
-
-#define GET_WMM_AC_PARAM_ECWMAX(_pStart) \
- ((u8)LE_BITS_TO_4BYTE(_pStart, 12, 4))
-#define SET_WMM_AC_PARAM_ECWMAX(_pStart, _val) \
- SET_BITS_TO_LE_4BYTE(_pStart, 12, 4, _val)
-
-#define GET_WMM_AC_PARAM_TXOP_LIMIT(_pStart) \
- ((u8)LE_BITS_TO_4BYTE(_pStart, 16, 16))
-#define SET_WMM_AC_PARAM_TXOP_LIMIT(_pStart, _val) \
- SET_BITS_TO_LE_4BYTE(_pStart, 16, 16, _val)
-
-
-
#define WMM_PARAM_ELEMENT_SIZE (8+(4*AC_PARAM_SIZE))
enum qos_ele_subtype {
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 05ef49f24cd9..83f5f57373a6 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -2896,7 +2896,7 @@ extern void HTConstructCapabilityElement(struct rtllib_device *ieee,
extern void HTConstructInfoElement(struct rtllib_device *ieee,
u8 *posHTInfo, u8 *len, u8 isEncrypt);
extern void HTConstructRT2RTAggElement(struct rtllib_device *ieee,
- u8 *posRT2RTAgg, u8* len);
+ u8 *posRT2RTAgg, u8 *len);
extern void HTOnAssocRsp(struct rtllib_device *ieee);
extern void HTInitializeHTInfo(struct rtllib_device *ieee);
extern void HTInitializeBssDesc(struct bss_ht *pBssHT);
diff --git a/drivers/staging/rtl8192e/rtllib_crypt.c b/drivers/staging/rtl8192e/rtllib_crypt.c
index 86152d0e6b5d..60c0cedefb86 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt.c
@@ -183,7 +183,7 @@ struct lib80211_crypto_ops *rtllib_get_crypto_ops(const char *name)
EXPORT_SYMBOL(rtllib_get_crypto_ops);
-static void * rtllib_crypt_null_init(int keyidx) { return (void *) 1; }
+static void *rtllib_crypt_null_init(int keyidx) { return (void *) 1; }
static void rtllib_crypt_null_deinit(void *priv) {}
static struct lib80211_crypto_ops rtllib_crypt_null = {
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
index e51cb49ce10e..5e5c76bcdbd0 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
@@ -443,13 +443,13 @@ static struct lib80211_crypto_ops rtllib_crypt_ccmp = {
};
-int __init rtllib_crypto_ccmp_init(void)
+static int __init rtllib_crypto_ccmp_init(void)
{
return lib80211_register_crypto_ops(&rtllib_crypt_ccmp);
}
-void __exit rtllib_crypto_ccmp_exit(void)
+static void __exit rtllib_crypto_ccmp_exit(void)
{
lib80211_unregister_crypto_ops(&rtllib_crypt_ccmp);
}
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 5cfd73baf1cc..7b5366bba353 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -173,7 +173,7 @@ static inline u16 Mk16(u8 hi, u8 lo)
static inline u16 Mk16_le(u16 *v)
{
- return le16_to_cpu(*v);
+ return *v;
}
@@ -427,7 +427,7 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
if (net_ratelimit()) {
printk(KERN_DEBUG "TKIP: replay detected: STA="
" %pM previous TSC %08x%04x received "
- "TSC %08x%04x\n",hdr->addr2,
+ "TSC %08x%04x\n", hdr->addr2,
tkey->rx_iv32, tkey->rx_iv16, iv32, iv16);
}
tkey->dot11RSNAStatsTKIPReplays++;
@@ -752,13 +752,13 @@ static struct lib80211_crypto_ops rtllib_crypt_tkip = {
};
-int __init rtllib_crypto_tkip_init(void)
+static int __init rtllib_crypto_tkip_init(void)
{
return lib80211_register_crypto_ops(&rtllib_crypt_tkip);
}
-void __exit rtllib_crypto_tkip_exit(void)
+static void __exit rtllib_crypto_tkip_exit(void)
{
lib80211_unregister_crypto_ops(&rtllib_crypt_tkip);
}
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
index c4df6e01ef74..b0e5f1ff07ee 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
@@ -270,13 +270,13 @@ static struct lib80211_crypto_ops rtllib_crypt_wep = {
};
-int __init rtllib_crypto_wep_init(void)
+static int __init rtllib_crypto_wep_init(void)
{
return lib80211_register_crypto_ops(&rtllib_crypt_wep);
}
-void __exit rtllib_crypto_wep_exit(void)
+static void __exit rtllib_crypto_wep_exit(void)
{
lib80211_unregister_crypto_ops(&rtllib_crypt_wep);
}
diff --git a/drivers/staging/rtl8192e/rtllib_debug.h b/drivers/staging/rtl8192e/rtllib_debug.h
index c59f67b26363..7537dae89a75 100644
--- a/drivers/staging/rtl8192e/rtllib_debug.h
+++ b/drivers/staging/rtl8192e/rtllib_debug.h
@@ -75,12 +75,14 @@ do { \
if (rt_global_debug_component & component) \
printk(KERN_DEBUG DRV_NAME ":" x "\n" , \
##args);\
-} while (0);
+} while (0)
#define assert(expr) \
+do { \
if (!(expr)) { \
printk(KERN_INFO "Assertion failed! %s,%s,%s,line=%d\n", \
#expr, __FILE__, __func__, __LINE__); \
- }
+ } \
+} while (0);
#endif
diff --git a/drivers/staging/rtl8192e/rtllib_endianfree.h b/drivers/staging/rtl8192e/rtllib_endianfree.h
index b268605a52aa..b189fa5a45e4 100644
--- a/drivers/staging/rtl8192e/rtllib_endianfree.h
+++ b/drivers/staging/rtl8192e/rtllib_endianfree.h
@@ -33,9 +33,9 @@
#define ReadEF2Byte(_ptr) EF2Byte(*((u16 *)(_ptr)))
#define ReadEF4Byte(_ptr) EF4Byte(*((u32 *)(_ptr)))
-#define WriteEF1Byte(_ptr, _val) (*((u8 *)(_ptr))) = EF1Byte(_val)
-#define WriteEF2Byte(_ptr, _val) (*((u16 *)(_ptr))) = EF2Byte(_val)
-#define WriteEF4Byte(_ptr, _val) (*((u32 *)(_ptr))) = EF4Byte(_val)
+#define WriteEF1Byte(_ptr, _val) ((*((u8 *)(_ptr))) = EF1Byte(_val))
+#define WriteEF2Byte(_ptr, _val) ((*((u16 *)(_ptr))) = EF2Byte(_val))
+#define WriteEF4Byte(_ptr, _val) ((*((u32 *)(_ptr))) = EF4Byte(_val))
#if BYTE_ORDER == __MACHINE_LITTLE_ENDIAN
#define H2N1BYTE(_val) ((u8)(_val))
#define H2N2BYTE(_val) (((((u16)(_val))&0x00ff)<<8)|\
@@ -84,15 +84,6 @@
(~BIT_OFFSET_LEN_MASK_32(__BitOffset, __BitLen)) \
)
-#define SET_BITS_TO_LE_4BYTE(__pStart, __BitOffset, __BitLen, __Value) \
- *((u32 *)(__pStart)) = \
- EF4Byte( \
- LE_BITS_CLEARED_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
- | \
- ((((u32)__Value) & BIT_LEN_MASK_32(__BitLen)) << (__BitOffset)) \
- );
-
-
#define BIT_LEN_MASK_16(__BitLen) \
(0xFFFF >> (16 - (__BitLen)))
@@ -109,21 +100,6 @@
BIT_LEN_MASK_16(__BitLen) \
)
-#define LE_BITS_CLEARED_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
- ( \
- LE_P2BYTE_TO_HOST_2BYTE(__pStart) \
- & \
- (~BIT_OFFSET_LEN_MASK_16(__BitOffset, __BitLen)) \
- )
-
-#define SET_BITS_TO_LE_2BYTE(__pStart, __BitOffset, __BitLen, __Value) \
- *((u16 *)(__pStart)) = \
- EF2Byte( \
- LE_BITS_CLEARED_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
- | ((((u16)__Value) & BIT_LEN_MASK_16(__BitLen)) << \
- (__BitOffset)) \
- );
-
#define BIT_LEN_MASK_8(__BitLen) \
(0xFF >> (8 - (__BitLen)))
@@ -140,20 +116,6 @@
BIT_LEN_MASK_8(__BitLen) \
)
-#define LE_BITS_CLEARED_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
- ( \
- LE_P1BYTE_TO_HOST_1BYTE(__pStart) \
- & \
- (~BIT_OFFSET_LEN_MASK_8(__BitOffset, __BitLen)) \
- )
-
-#define SET_BITS_TO_LE_1BYTE(__pStart, __BitOffset, __BitLen, __Value) \
- *((u8 *)(__pStart)) = EF1Byte( \
- LE_BITS_CLEARED_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
- | ((((u8)__Value) & BIT_LEN_MASK_8(__BitLen)) << \
- (__BitOffset)) \
- );
-
#define N_BYTE_ALIGMENT(__Value, __Aligment) \
((__Aligment == 1) ? (__Value) : (((__Value + __Aligment - 1) / \
__Aligment) * __Aligment))
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index 51d46e04d3f5..136909eff6d5 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -237,7 +237,7 @@ static const struct file_operations fops = {
.release = single_release,
};
-int __init rtllib_init(void)
+static int __init rtllib_init(void)
{
struct proc_dir_entry *e;
@@ -257,7 +257,7 @@ int __init rtllib_init(void)
return 0;
}
-void __exit rtllib_exit(void)
+static void __exit rtllib_exit(void)
{
if (rtllib_proc) {
remove_proc_entry("debug_level", rtllib_proc);
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 1a011b9b9da6..6c8a8e12b2b5 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -211,7 +211,7 @@ rtllib_rx_frame_mgmt(struct rtllib_device *ieee, struct sk_buff *skb,
* this is not mandatory.... but seems that the probe
* response parser uses it
*/
- struct rtllib_hdr_3addr * hdr = (struct rtllib_hdr_3addr *)skb->data;
+ struct rtllib_hdr_3addr *hdr = (struct rtllib_hdr_3addr *)skb->data;
rx_stats->len = skb->len;
rtllib_rx_mgt(ieee, skb, rx_stats);
@@ -490,7 +490,7 @@ void rtllib_indicate_packets(struct rtllib_device *ieee, struct rtllib_rxb **prx
} else {
u16 len;
/* Leave Ethernet header part of hdr and full payload */
- len = htons(sub_skb->len);
+ len = sub_skb->len;
memcpy(skb_push(sub_skb, 2), &len, 2);
memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN);
@@ -1224,7 +1224,7 @@ static void rtllib_rx_indicate_pkt_legacy(struct rtllib_device *ieee,
} else {
u16 len;
/* Leave Ethernet header part of hdr and full payload */
- len = htons(sub_skb->len);
+ len = sub_skb->len;
memcpy(skb_push(sub_skb, 2), &len, 2);
memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN);
@@ -1632,13 +1632,13 @@ static int rtllib_qos_convert_ac_to_parameters(struct rtllib_qos_parameter_info
/* WMM spec P.11: The minimum value for AIFSN shall be 2 */
qos_param->aifs[aci] = (qos_param->aifs[aci] < 2) ? 2 : qos_param->aifs[aci];
- qos_param->cw_min[aci] = ac_params->ecw_min_max & 0x0F;
+ qos_param->cw_min[aci] = cpu_to_le16(ac_params->ecw_min_max & 0x0F);
- qos_param->cw_max[aci] = (ac_params->ecw_min_max & 0xF0) >> 4;
+ qos_param->cw_max[aci] = cpu_to_le16((ac_params->ecw_min_max & 0xF0) >> 4);
qos_param->flag[aci] =
(ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00;
- qos_param->tx_op_limit[aci] = le16_to_cpu(ac_params->tx_op_limit);
+ qos_param->tx_op_limit[aci] = ac_params->tx_op_limit;
}
return rc;
}
@@ -2260,9 +2260,9 @@ static inline int rtllib_network_init(
memcpy(network->bssid, beacon->header.addr3, ETH_ALEN);
network->capability = le16_to_cpu(beacon->capability);
network->last_scanned = jiffies;
- network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]);
- network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]);
- network->beacon_interval = le32_to_cpu(beacon->beacon_interval);
+ network->time_stamp[0] = beacon->time_stamp[0];
+ network->time_stamp[1] = beacon->time_stamp[1];
+ network->beacon_interval = le16_to_cpu(beacon->beacon_interval);
/* Where to pull this? beacon->listen_interval;*/
network->listen_interval = 0x0A;
network->rates_len = network->rates_ex_len = 0;
@@ -2528,29 +2528,30 @@ static inline void rtllib_process_probe_response(
"'%s' ( %pM ): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
escape_essid(info_element->data, info_element->len),
beacon->header.addr3,
- (beacon->capability & (1<<0xf)) ? '1' : '0',
- (beacon->capability & (1<<0xe)) ? '1' : '0',
- (beacon->capability & (1<<0xd)) ? '1' : '0',
- (beacon->capability & (1<<0xc)) ? '1' : '0',
- (beacon->capability & (1<<0xb)) ? '1' : '0',
- (beacon->capability & (1<<0xa)) ? '1' : '0',
- (beacon->capability & (1<<0x9)) ? '1' : '0',
- (beacon->capability & (1<<0x8)) ? '1' : '0',
- (beacon->capability & (1<<0x7)) ? '1' : '0',
- (beacon->capability & (1<<0x6)) ? '1' : '0',
- (beacon->capability & (1<<0x5)) ? '1' : '0',
- (beacon->capability & (1<<0x4)) ? '1' : '0',
- (beacon->capability & (1<<0x3)) ? '1' : '0',
- (beacon->capability & (1<<0x2)) ? '1' : '0',
- (beacon->capability & (1<<0x1)) ? '1' : '0',
- (beacon->capability & (1<<0x0)) ? '1' : '0');
+ (le16_to_cpu(beacon->capability) & (1<<0xf)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0xe)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0xd)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0xc)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0xb)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0xa)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x9)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x8)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x7)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x6)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x5)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x4)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x3)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x2)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x1)) ? '1' : '0',
+ (le16_to_cpu(beacon->capability) & (1<<0x0)) ? '1' : '0');
if (rtllib_network_init(ieee, beacon, network, stats)) {
RTLLIB_DEBUG_SCAN("Dropped '%s' ( %pM) via %s.\n",
escape_essid(info_element->data,
info_element->len),
beacon->header.addr3,
- WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
+ WLAN_FC_GET_STYPE(
+ le16_to_cpu(beacon->header.frame_ctl)) ==
RTLLIB_STYPE_PROBE_RESP ?
"PROBE RESPONSE" : "BEACON");
goto free_network;
@@ -2560,7 +2561,7 @@ static inline void rtllib_process_probe_response(
if (!rtllib_legal_channel(ieee, network->channel))
goto free_network;
- if (WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
+ if (WLAN_FC_GET_STYPE(le16_to_cpu(beacon->header.frame_ctl)) ==
RTLLIB_STYPE_PROBE_RESP) {
if (IsPassiveChannel(ieee, network->channel)) {
printk(KERN_INFO "GetScanInfo(): For Global Domain, "
@@ -2629,7 +2630,8 @@ static inline void rtllib_process_probe_response(
RTLLIB_DEBUG_SCAN("Adding '%s' ( %pM) via %s.\n",
escape_essid(network->ssid,
network->ssid_len), network->bssid,
- WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
+ WLAN_FC_GET_STYPE(
+ le16_to_cpu(beacon->header.frame_ctl)) ==
RTLLIB_STYPE_PROBE_RESP ?
"PROBE RESPONSE" : "BEACON");
memcpy(target, network, sizeof(*target));
@@ -2640,7 +2642,8 @@ static inline void rtllib_process_probe_response(
RTLLIB_DEBUG_SCAN("Updating '%s' ( %pM) via %s.\n",
escape_essid(target->ssid,
target->ssid_len), target->bssid,
- WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
+ WLAN_FC_GET_STYPE(
+ le16_to_cpu(beacon->header.frame_ctl)) ==
RTLLIB_STYPE_PROBE_RESP ?
"PROBE RESPONSE" : "BEACON");
@@ -2682,15 +2685,17 @@ void rtllib_rx_mgt(struct rtllib_device *ieee,
{
struct rtllib_hdr_4addr *header = (struct rtllib_hdr_4addr *)skb->data ;
- if (WLAN_FC_GET_STYPE(header->frame_ctl) != RTLLIB_STYPE_PROBE_RESP &&
- WLAN_FC_GET_STYPE(header->frame_ctl) != RTLLIB_STYPE_BEACON)
+ if ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) !=
+ RTLLIB_STYPE_PROBE_RESP) &&
+ (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) !=
+ RTLLIB_STYPE_BEACON))
ieee->last_rx_ps_time = jiffies;
- switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
+ switch (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl))) {
case RTLLIB_STYPE_BEACON:
RTLLIB_DEBUG_MGMT("received BEACON (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
+ WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)));
RTLLIB_DEBUG_SCAN("Beacon\n");
rtllib_process_probe_response(
ieee, (struct rtllib_probe_response *)header,
@@ -2705,14 +2710,15 @@ void rtllib_rx_mgt(struct rtllib_device *ieee,
case RTLLIB_STYPE_PROBE_RESP:
RTLLIB_DEBUG_MGMT("received PROBE RESPONSE (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
+ WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)));
RTLLIB_DEBUG_SCAN("Probe response\n");
rtllib_process_probe_response(ieee,
(struct rtllib_probe_response *)header, stats);
break;
case RTLLIB_STYPE_PROBE_REQ:
RTLLIB_DEBUG_MGMT("received PROBE RESQUEST (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
+ WLAN_FC_GET_STYPE(
+ le16_to_cpu(header->frame_ctl)));
RTLLIB_DEBUG_SCAN("Probe request\n");
if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) &&
((ieee->iw_mode == IW_MODE_ADHOC ||
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index 933bd6deaca1..4bf72bc1ba7b 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -227,7 +227,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
/* called with 2nd param 0, no mgmt lock required */
rtllib_sta_wakeup(ieee, 0);
- if (header->frame_ctl == RTLLIB_STYPE_BEACON)
+ if (le16_to_cpu(header->frame_ctl) == RTLLIB_STYPE_BEACON)
tcb_desc->queue_index = BEACON_QUEUE;
else
tcb_desc->queue_index = MGNT_QUEUE;
@@ -295,7 +295,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb,
u16 fc, type, stype;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + 8);
- fc = header->frame_ctl;
+ fc = le16_to_cpu(header->frame_ctl);
type = WLAN_FC_GET_TYPE(fc);
stype = WLAN_FC_GET_STYPE(fc);
@@ -807,18 +807,18 @@ inline struct sk_buff *rtllib_authentication_req(struct rtllib_network *beacon,
auth = (struct rtllib_authentication *)
skb_put(skb, sizeof(struct rtllib_authentication));
- auth->header.frame_ctl = RTLLIB_STYPE_AUTH;
+ auth->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_AUTH);
if (challengelen)
- auth->header.frame_ctl |= RTLLIB_FCTL_WEP;
+ auth->header.frame_ctl |= cpu_to_le16(RTLLIB_FCTL_WEP);
- auth->header.duration_id = 0x013a;
+ auth->header.duration_id = cpu_to_le16(0x013a);
memcpy(auth->header.addr1, beacon->bssid, ETH_ALEN);
memcpy(auth->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
memcpy(auth->header.addr3, beacon->bssid, ETH_ALEN);
if (ieee->auth_mode == 0)
auth->algorithm = WLAN_AUTH_OPEN;
else if (ieee->auth_mode == 1)
- auth->algorithm = WLAN_AUTH_SHARED_KEY;
+ auth->algorithm = cpu_to_le16(WLAN_AUTH_SHARED_KEY);
else if (ieee->auth_mode == 2)
auth->algorithm = WLAN_AUTH_OPEN;
auth->transaction = cpu_to_le16(ieee->associate_seq);
@@ -921,8 +921,8 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee, u8 *dest)
if (ieee->short_slot && (ieee->current_network.capability &
WLAN_CAPABILITY_SHORT_SLOT_TIME))
- cpu_to_le16((beacon_buf->capability |=
- WLAN_CAPABILITY_SHORT_SLOT_TIME));
+ beacon_buf->capability |=
+ cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
if (encrypt)
@@ -952,7 +952,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee, u8 *dest)
u16 val16;
*(tag++) = MFIE_TYPE_IBSS_SET;
*(tag++) = 2;
- val16 = cpu_to_le16(ieee->current_network.atim_window);
+ val16 = ieee->current_network.atim_window;
memcpy((u8 *)tag, (u8 *)&val16, 2);
tag += 2;
}
@@ -1260,7 +1260,7 @@ inline struct sk_buff *rtllib_association_req(struct rtllib_network *beacon,
hdr->header.frame_ctl = RTLLIB_STYPE_ASSOC_REQ;
- hdr->header.duration_id = 37;
+ hdr->header.duration_id = cpu_to_le16(37);
memcpy(hdr->header.addr1, beacon->bssid, ETH_ALEN);
memcpy(hdr->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
memcpy(hdr->header.addr3, beacon->bssid, ETH_ALEN);
@@ -1279,7 +1279,7 @@ inline struct sk_buff *rtllib_association_req(struct rtllib_network *beacon,
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
- hdr->listen_interval = beacon->listen_interval;
+ hdr->listen_interval = cpu_to_le16(beacon->listen_interval);
hdr->info_element[0].id = MFIE_TYPE_SSID;
@@ -1451,7 +1451,7 @@ static void rtllib_associate_abort_cb(unsigned long dev)
rtllib_associate_abort((struct rtllib_device *) dev);
}
-static void rtllib_associate_step1(struct rtllib_device *ieee, u8 * daddr)
+static void rtllib_associate_step1(struct rtllib_device *ieee, u8 *daddr)
{
struct rtllib_network *beacon = &ieee->current_network;
struct sk_buff *skb;
@@ -1785,7 +1785,7 @@ void rtllib_softmac_check_all_nets(struct rtllib_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
}
-static inline u16 auth_parse(struct sk_buff *skb, u8** challenge, int *chlen)
+static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
{
struct rtllib_authentication *a;
u8 *t;
@@ -3633,7 +3633,7 @@ out:
}
EXPORT_SYMBOL(rtllib_wpa_supplicant_ioctl);
-void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib)
+static void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib)
{
u8 OpMode;
u8 i;
@@ -3658,7 +3658,7 @@ void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib)
}
-void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib, u8 *asSta,
+static void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib, u8 *asSta,
u8 asRsn)
{
u8 i;
@@ -3684,7 +3684,7 @@ void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib, u8 *asSta,
}
-void
+static void
rtllib_MgntDisconnectAP(
struct rtllib_device *rtllib,
u8 asRsn
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 3183627823fb..77964885b3f2 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -171,7 +171,7 @@ inline int rtllib_put_snap(u8 *data, u16 h_proto)
snap->oui[1] = oui[1];
snap->oui[2] = oui[2];
- *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
+ *(u16 *)(data + SNAP_SIZE) = h_proto;
return SNAP_SIZE + sizeof(u16);
}
@@ -231,7 +231,7 @@ static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size,
memset(txb, 0, sizeof(struct rtllib_txb));
txb->nr_frags = nr_frags;
- txb->frag_size = txb_size;
+ txb->frag_size = cpu_to_le16(txb_size);
for (i = 0; i < nr_frags; i++) {
txb->fragments[i] = dev_alloc_skb(txb_size);
@@ -610,7 +610,7 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
}
txb->encrypted = 0;
- txb->payload_size = skb->len;
+ txb->payload_size = cpu_to_le16(skb->len);
memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
skb->len);
@@ -764,7 +764,7 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
goto failed;
}
txb->encrypted = encrypt;
- txb->payload_size = bytes;
+ txb->payload_size = cpu_to_le16(bytes);
if (qos_actived)
txb->queue_index = UP2AC(skb->priority);
@@ -812,10 +812,10 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
}
if ((qos_actived) && (!bIsMulticast)) {
frag_hdr->seq_ctl =
- rtllib_query_seqnum(ieee, skb_frag,
- header.addr1);
+ cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag,
+ header.addr1));
frag_hdr->seq_ctl =
- cpu_to_le16(frag_hdr->seq_ctl<<4 | i);
+ cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctl)<<4 | i);
} else {
frag_hdr->seq_ctl =
cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
@@ -870,7 +870,7 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
}
txb->encrypted = 0;
- txb->payload_size = skb->len;
+ txb->payload_size = cpu_to_le16(skb->len);
memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
skb->len);
}
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index ad3bc567d35a..c9d8c102cca3 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -20,7 +20,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/types.h>
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 82a77b45fb50..37fe33005c02 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -29,7 +29,6 @@
#define _OS_INTFS_C_
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/firmware.h>
#include "osdep_service.h"
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 5b6a96e3bd7b..1a4b7a651f92 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -31,7 +31,6 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index 7e324315e6ad..c71c7e55bb36 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -31,7 +31,6 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 9fec6eda8731..23d539dba7ef 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -42,7 +42,6 @@
#include <linux/wireless.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/semaphore.h>
#include <net/iw_handler.h>
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 8fa0f9d49a8a..3ea99aea4385 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -1043,9 +1043,6 @@ void r8712_got_addbareq_event_callback(struct _adapter *adapter, u8 *pbuf)
struct sta_priv *pstapriv = &adapter->stapriv;
struct recv_reorder_ctrl *precvreorder_ctrl = NULL;
- netdev_info(adapter->pnetdev, "%s: mac = %pM, seq = %d, tid = %d\n",
- __func__, pAddbareq_pram->MacAddress,
- pAddbareq_pram->StartSeqNum, pAddbareq_pram->tid);
psta = r8712_get_stainfo(pstapriv, pAddbareq_pram->MacAddress);
if (psta) {
precvreorder_ctrl =
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index 5349669707c0..aae5125a2e7e 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -31,7 +31,6 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index dbefa43e4c2c..bbd5888e316b 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -353,6 +353,10 @@ static void disable_ht_for_spec_devid(const struct usb_device_id *pdid,
}
}
+static const struct device_type wlan_type = {
+ .name = "wlan",
+};
+
/*
* drv_init() - a device potentially for us
*
@@ -388,6 +392,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
padapter->pusb_intf = pusb_intf;
usb_set_intfdata(pusb_intf, pnetdev);
SET_NETDEV_DEV(pnetdev, &pusb_intf->dev);
+ pnetdev->dev.type = &wlan_type;
/* step 2. */
padapter->dvobj_init = &r8712_usb_dvobj_init;
padapter->dvobj_deinit = &r8712_usb_dvobj_deinit;
diff --git a/drivers/staging/rtl8821ae/Kconfig b/drivers/staging/rtl8821ae/Kconfig
new file mode 100644
index 000000000000..abccc9dabd65
--- /dev/null
+++ b/drivers/staging/rtl8821ae/Kconfig
@@ -0,0 +1,11 @@
+config R8821AE
+ tristate "RealTek RTL8821AE Wireless LAN NIC driver"
+ depends on PCI && WLAN && MAC80211
+ depends on m
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ select EEPROM_93CX6
+ select CRYPTO
+ default N
+ ---help---
+ If built as a module, it will be called r8821ae.ko.
diff --git a/drivers/staging/rtl8821ae/Makefile b/drivers/staging/rtl8821ae/Makefile
new file mode 100644
index 000000000000..8a23bd7e8842
--- /dev/null
+++ b/drivers/staging/rtl8821ae/Makefile
@@ -0,0 +1,35 @@
+PCI_MAIN_OBJS := base.o \
+ rc.o \
+ debug.o \
+ regd.o \
+ efuse.o \
+ cam.o \
+ ps.o \
+ core.o \
+ stats.o \
+ pci.o \
+
+BT_COEXIST_OBJS:= btcoexist/halbtc8192e2ant.o\
+ btcoexist/halbtc8723b1ant.o\
+ btcoexist/halbtc8723b2ant.o\
+ btcoexist/halbtcoutsrc.o\
+ btcoexist/rtl_btc.o \
+
+PCI_8821AE_HAL_OBJS:= \
+ rtl8821ae/hw.o \
+ rtl8821ae/table.o \
+ rtl8821ae/sw.o \
+ rtl8821ae/trx.o \
+ rtl8821ae/led.o \
+ rtl8821ae/fw.o \
+ rtl8821ae/phy.o \
+ rtl8821ae/rf.o \
+ rtl8821ae/dm.o \
+ rtl8821ae/pwrseq.o \
+ rtl8821ae/pwrseqcmd.o \
+ rtl8821ae/hal_btc.o \
+ rtl8821ae/hal_bt_coexist.o \
+
+rtl8821ae-objs += $(BT_COEXIST_OBJS) $(PCI_MAIN_OBJS) $(PCI_8821AE_HAL_OBJS)
+
+obj-$(CONFIG_R8821AE) += rtl8821ae.o
diff --git a/drivers/staging/rtl8821ae/TODO b/drivers/staging/rtl8821ae/TODO
new file mode 100644
index 000000000000..3ee7529d4ed5
--- /dev/null
+++ b/drivers/staging/rtl8821ae/TODO
@@ -0,0 +1,10 @@
+Realtek 8821AE PCI wifi driver TODO:
+ - remove built-in btcoexist module when the "real" one gets upstream
+ - remove built-in rtlwifi code by porting driver to use the "real" one
+ in the drivers/net/ directory.
+ - fix up coding style issues
+
+Please send any patches for this driver to:
+ Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+and the <devel@driverdev.osuosl.org> mailing list.
+
diff --git a/drivers/staging/rtl8821ae/base.c b/drivers/staging/rtl8821ae/base.c
new file mode 100644
index 000000000000..18c936fbdf1e
--- /dev/null
+++ b/drivers/staging/rtl8821ae/base.c
@@ -0,0 +1,1873 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include <linux/ip.h>
+#include <linux/module.h>
+#include "wifi.h"
+#include "rc.h"
+#include "base.h"
+#include "efuse.h"
+#include "cam.h"
+#include "ps.h"
+#include "regd.h"
+#include "pci.h"
+
+/*
+ *NOTICE!!!: This file will be very big, we hsould
+ *keep it clear under follwing roles:
+ *
+ *This file include follwing part, so, if you add new
+ *functions into this file, please check which part it
+ *should includes. or check if you should add new part
+ *for this file:
+ *
+ *1) mac80211 init functions
+ *2) tx information functions
+ *3) functions called by core.c
+ *4) wq & timer callback functions
+ *5) frame process functions
+ *6) IOT functions
+ *7) sysfs functions
+ *8) vif functions
+ *9) ...
+ */
+
+/*********************************************************
+ *
+ * mac80211 init functions
+ *
+ *********************************************************/
+static struct ieee80211_channel rtl_channeltable_2g[] = {
+ {.center_freq = 2412,.hw_value = 1,},
+ {.center_freq = 2417,.hw_value = 2,},
+ {.center_freq = 2422,.hw_value = 3,},
+ {.center_freq = 2427,.hw_value = 4,},
+ {.center_freq = 2432,.hw_value = 5,},
+ {.center_freq = 2437,.hw_value = 6,},
+ {.center_freq = 2442,.hw_value = 7,},
+ {.center_freq = 2447,.hw_value = 8,},
+ {.center_freq = 2452,.hw_value = 9,},
+ {.center_freq = 2457,.hw_value = 10,},
+ {.center_freq = 2462,.hw_value = 11,},
+ {.center_freq = 2467,.hw_value = 12,},
+ {.center_freq = 2472,.hw_value = 13,},
+ {.center_freq = 2484,.hw_value = 14,},
+};
+
+static struct ieee80211_channel rtl_channeltable_5g[] = {
+ {.center_freq = 5180,.hw_value = 36,},
+ {.center_freq = 5200,.hw_value = 40,},
+ {.center_freq = 5220,.hw_value = 44,},
+ {.center_freq = 5240,.hw_value = 48,},
+ {.center_freq = 5260,.hw_value = 52,},
+ {.center_freq = 5280,.hw_value = 56,},
+ {.center_freq = 5300,.hw_value = 60,},
+ {.center_freq = 5320,.hw_value = 64,},
+ {.center_freq = 5500,.hw_value = 100,},
+ {.center_freq = 5520,.hw_value = 104,},
+ {.center_freq = 5540,.hw_value = 108,},
+ {.center_freq = 5560,.hw_value = 112,},
+ {.center_freq = 5580,.hw_value = 116,},
+ {.center_freq = 5600,.hw_value = 120,},
+ {.center_freq = 5620,.hw_value = 124,},
+ {.center_freq = 5640,.hw_value = 128,},
+ {.center_freq = 5660,.hw_value = 132,},
+ {.center_freq = 5680,.hw_value = 136,},
+ {.center_freq = 5700,.hw_value = 140,},
+ {.center_freq = 5745,.hw_value = 149,},
+ {.center_freq = 5765,.hw_value = 153,},
+ {.center_freq = 5785,.hw_value = 157,},
+ {.center_freq = 5805,.hw_value = 161,},
+ {.center_freq = 5825,.hw_value = 165,},
+};
+
+static struct ieee80211_rate rtl_ratetable_2g[] = {
+ {.bitrate = 10,.hw_value = 0x00,},
+ {.bitrate = 20,.hw_value = 0x01,},
+ {.bitrate = 55,.hw_value = 0x02,},
+ {.bitrate = 110,.hw_value = 0x03,},
+ {.bitrate = 60,.hw_value = 0x04,},
+ {.bitrate = 90,.hw_value = 0x05,},
+ {.bitrate = 120,.hw_value = 0x06,},
+ {.bitrate = 180,.hw_value = 0x07,},
+ {.bitrate = 240,.hw_value = 0x08,},
+ {.bitrate = 360,.hw_value = 0x09,},
+ {.bitrate = 480,.hw_value = 0x0a,},
+ {.bitrate = 540,.hw_value = 0x0b,},
+};
+
+static struct ieee80211_rate rtl_ratetable_5g[] = {
+ {.bitrate = 60,.hw_value = 0x04,},
+ {.bitrate = 90,.hw_value = 0x05,},
+ {.bitrate = 120,.hw_value = 0x06,},
+ {.bitrate = 180,.hw_value = 0x07,},
+ {.bitrate = 240,.hw_value = 0x08,},
+ {.bitrate = 360,.hw_value = 0x09,},
+ {.bitrate = 480,.hw_value = 0x0a,},
+ {.bitrate = 540,.hw_value = 0x0b,},
+};
+
+static const struct ieee80211_supported_band rtl_band_2ghz = {
+ .band = IEEE80211_BAND_2GHZ,
+
+ .channels = rtl_channeltable_2g,
+ .n_channels = ARRAY_SIZE(rtl_channeltable_2g),
+
+ .bitrates = rtl_ratetable_2g,
+ .n_bitrates = ARRAY_SIZE(rtl_ratetable_2g),
+
+ .ht_cap = {0},
+};
+
+static struct ieee80211_supported_band rtl_band_5ghz = {
+ .band = IEEE80211_BAND_5GHZ,
+
+ .channels = rtl_channeltable_5g,
+ .n_channels = ARRAY_SIZE(rtl_channeltable_5g),
+
+ .bitrates = rtl_ratetable_5g,
+ .n_bitrates = ARRAY_SIZE(rtl_ratetable_5g),
+
+ .ht_cap = {0},
+};
+
+static const u8 tid_to_ac[] = {
+ 2, /* IEEE80211_AC_BE */
+ 3, /* IEEE80211_AC_BK */
+ 3, /* IEEE80211_AC_BK */
+ 2, /* IEEE80211_AC_BE */
+ 1, /* IEEE80211_AC_VI */
+ 1, /* IEEE80211_AC_VI */
+ 0, /* IEEE80211_AC_VO */
+ 0, /* IEEE80211_AC_VO */
+};
+
+u8 rtl_tid_to_ac(struct ieee80211_hw *hw, u8 tid)
+{
+ return tid_to_ac[tid];
+}
+
+static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
+ struct ieee80211_sta_ht_cap *ht_cap)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ ht_cap->ht_supported = true;
+ ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_DSSSCCK40 | IEEE80211_HT_CAP_MAX_AMSDU;
+
+ if (rtlpriv->rtlhal.disable_amsdu_8k)
+ ht_cap->cap &= ~IEEE80211_HT_CAP_MAX_AMSDU;
+
+ /*
+ *Maximum length of AMPDU that the STA can receive.
+ *Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
+ */
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+
+ /*Minimum MPDU start spacing , */
+ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+
+ ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+
+ /*
+ *hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+ *base on ant_num
+ *rx_mask: RX mask
+ *if rx_ant =1 rx_mask[0]=0xff;==>MCS0-MCS7
+ *if rx_ant =2 rx_mask[1]=0xff;==>MCS8-MCS15
+ *if rx_ant >=3 rx_mask[2]=0xff;
+ *if BW_40 rx_mask[4]=0x01;
+ *highest supported RX rate
+ */
+ if (rtlpriv->dm.supp_phymode_switch) {
+ RT_TRACE(COMP_INIT, DBG_EMERG, ("Support phy mode switch\n"));
+
+ ht_cap->mcs.rx_mask[0] = 0xFF;
+ ht_cap->mcs.rx_mask[1] = 0xFF;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+
+ ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS15;
+ } else {
+ if (get_rf_type(rtlphy) == RF_1T2R ||
+ get_rf_type(rtlphy) == RF_2T2R) {
+
+ RT_TRACE(COMP_INIT, DBG_DMESG, ("1T2R or 2T2R\n"));
+
+ ht_cap->mcs.rx_mask[0] = 0xFF;
+ ht_cap->mcs.rx_mask[1] = 0xFF;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+
+ ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS15;
+ } else if (get_rf_type(rtlphy) == RF_1T1R) {
+
+ RT_TRACE(COMP_INIT, DBG_DMESG, ("1T1R\n"));
+
+ ht_cap->mcs.rx_mask[0] = 0xFF;
+ ht_cap->mcs.rx_mask[1] = 0x00;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+
+ ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS7;
+ }
+ }
+}
+
+static void _rtl_init_mac80211(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct ieee80211_supported_band *sband;
+
+
+ if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY &&
+ rtlhal->bandset == BAND_ON_BOTH) {
+ /* 1: 2.4 G bands */
+ /* <1> use mac->bands as mem for hw->wiphy->bands */
+ sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
+
+ /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+ * to default value(1T1R) */
+ memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), &rtl_band_2ghz,
+ sizeof(struct ieee80211_supported_band));
+
+ /* <3> init ht cap base on ant_num */
+ _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
+
+ /* <4> set mac->sband to wiphy->sband */
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+
+ /* 2: 5 G bands */
+ /* <1> use mac->bands as mem for hw->wiphy->bands */
+ sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]);
+
+ /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ]
+ * to default value(1T1R) */
+ memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]), &rtl_band_5ghz,
+ sizeof(struct ieee80211_supported_band));
+
+ /* <3> init ht cap base on ant_num */
+ _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
+
+ /* <4> set mac->sband to wiphy->sband */
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+ } else {
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ /* <1> use mac->bands as mem for hw->wiphy->bands */
+ sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
+
+ /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+ * to default value(1T1R) */
+ memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]),
+ &rtl_band_2ghz,
+ sizeof(struct ieee80211_supported_band));
+
+ /* <3> init ht cap base on ant_num */
+ _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
+
+ /* <4> set mac->sband to wiphy->sband */
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+ } else if (rtlhal->current_bandtype == BAND_ON_5G) {
+ /* <1> use mac->bands as mem for hw->wiphy->bands */
+ sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]);
+
+ /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ]
+ * to default value(1T1R) */
+ memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]),
+ &rtl_band_5ghz,
+ sizeof(struct ieee80211_supported_band));
+
+ /* <3> init ht cap base on ant_num */
+ _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
+
+ /* <4> set mac->sband to wiphy->sband */
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+ } else {
+ RT_TRACE(COMP_INIT, DBG_EMERG, ("Err BAND %d\n",
+ rtlhal->current_bandtype));
+ }
+ }
+ /* <5> set hw caps */
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_RX_INCLUDES_FCS |
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0))
+ IEEE80211_HW_BEACON_FILTER |
+#endif
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_CONNECTION_MONITOR |
+ /* IEEE80211_HW_SUPPORTS_CQM_RSSI | */
+ IEEE80211_HW_MFP_CAPABLE | 0;
+
+ /* swlps or hwlps has been set in diff chip in init_sw_vars */
+ if (rtlpriv->psc.b_swctrl_lps)
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+ /* IEEE80211_HW_SUPPORTS_DYNAMIC_PS | */
+ 0;
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+#else
+/*<delete in kernel end>*/
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT) ;
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,39))
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+ hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+#endif
+
+ hw->wiphy->rts_threshold = 2347;
+
+ hw->queues = AC_MAX;
+ hw->extra_tx_headroom = RTL_TX_HEADER_SIZE;
+
+ /* TODO: Correct this value for our hw */
+ /* TODO: define these hard code value */
+ hw->max_listen_interval = 10;
+ hw->max_rate_tries = 4;
+ /* hw->max_rates = 1; */
+ hw->sta_data_size = sizeof(struct rtl_sta_info);
+#ifdef VIF_TODO
+ hw->vif_data_size = sizeof(struct rtl_vif_info);
+#endif
+
+ /* <6> mac address */
+ if (is_valid_ether_addr(rtlefuse->dev_addr)) {
+ SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr);
+ } else {
+ u8 rtlmac[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
+ get_random_bytes((rtlmac + (ETH_ALEN - 1)), 1);
+ SET_IEEE80211_PERM_ADDR(hw, rtlmac);
+ }
+
+}
+
+static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /* <1> timer */
+ init_timer(&rtlpriv->works.watchdog_timer);
+ setup_timer(&rtlpriv->works.watchdog_timer,
+ rtl_watch_dog_timer_callback, (unsigned long)hw);
+ init_timer(&rtlpriv->works.dualmac_easyconcurrent_retrytimer);
+ setup_timer(&rtlpriv->works.dualmac_easyconcurrent_retrytimer,
+ rtl_easy_concurrent_retrytimer_callback, (unsigned long)hw);
+ /* <2> work queue */
+ rtlpriv->works.hw = hw;
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+/*<delete in kernel end>*/
+ rtlpriv->works.rtl_wq = alloc_workqueue(rtlpriv->cfg->name, 0, 0);
+/*<delete in kernel start>*/
+#else
+ rtlpriv->works.rtl_wq = create_workqueue(rtlpriv->cfg->name);
+#endif
+/*<delete in kernel end>*/
+ INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
+ (void *)rtl_watchdog_wq_callback);
+ INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
+ (void *)rtl_ips_nic_off_wq_callback);
+ INIT_DELAYED_WORK(&rtlpriv->works.ps_work,
+ (void *)rtl_swlps_wq_callback);
+ INIT_DELAYED_WORK(&rtlpriv->works.ps_rfon_wq,
+ (void *)rtl_swlps_rfon_wq_callback);
+ INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq,
+ (void *)rtl_fwevt_wq_callback);
+
+}
+
+void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ del_timer_sync(&rtlpriv->works.watchdog_timer);
+
+ cancel_delayed_work(&rtlpriv->works.watchdog_wq);
+ cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
+ cancel_delayed_work(&rtlpriv->works.ps_work);
+ cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
+ cancel_delayed_work(&rtlpriv->works.fwevt_wq);
+}
+
+void rtl_init_rfkill(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ bool radio_state;
+ bool blocked;
+ u8 valid = 0;
+
+ /*set init state to on */
+ rtlpriv->rfkill.rfkill_state = 1;
+ wiphy_rfkill_set_hw_state(hw->wiphy, 0);
+
+ radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
+
+ if (valid) {
+ printk(KERN_INFO "rtlwifi: wireless switch is %s\n",
+ rtlpriv->rfkill.rfkill_state ? "on" : "off");
+
+ rtlpriv->rfkill.rfkill_state = radio_state;
+
+ blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+ }
+
+ wiphy_rfkill_start_polling(hw->wiphy);
+}
+
+void rtl_deinit_rfkill(struct ieee80211_hw *hw)
+{
+ wiphy_rfkill_stop_polling(hw->wiphy);
+}
+
+#ifdef VIF_TODO
+static void rtl_init_vif(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ INIT_LIST_HEAD(&rtlpriv->vif_priv.vif_list);
+
+ rtlpriv->vif_priv.vifs = 0;
+}
+#endif
+
+int rtl_init_core(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+
+ /* <1> init mac80211 */
+ _rtl_init_mac80211(hw);
+ rtlmac->hw = hw;
+ rtlmac->link_state = MAC80211_NOLINK;
+
+ /* <2> rate control register */
+ hw->rate_control_algorithm = "rtl_rc";
+
+ /*
+ * <3> init CRDA must come after init
+ * mac80211 hw in _rtl_init_mac80211.
+ */
+ if (rtl_regd_init(hw, rtl_reg_notifier)) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("REGD init failed\n"));
+ return 1;
+ }
+
+ /* <4> locks */
+ mutex_init(&rtlpriv->locks.conf_mutex);
+ spin_lock_init(&rtlpriv->locks.ips_lock);
+ spin_lock_init(&rtlpriv->locks.irq_th_lock);
+ spin_lock_init(&rtlpriv->locks.h2c_lock);
+ spin_lock_init(&rtlpriv->locks.rf_ps_lock);
+ spin_lock_init(&rtlpriv->locks.rf_lock);
+ spin_lock_init(&rtlpriv->locks.lps_lock);
+ spin_lock_init(&rtlpriv->locks.waitq_lock);
+ spin_lock_init(&rtlpriv->locks.entry_list_lock);
+ spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
+ spin_lock_init(&rtlpriv->locks.check_sendpkt_lock);
+ spin_lock_init(&rtlpriv->locks.fw_ps_lock);
+ spin_lock_init(&rtlpriv->locks.iqk_lock);
+ /* <5> init list */
+ INIT_LIST_HEAD(&rtlpriv->entry_list);
+
+ /* <6> init deferred work */
+ _rtl_init_deferred_work(hw);
+
+ /* <7> */
+#ifdef VIF_TODO
+ rtl_init_vif(hw);
+#endif
+
+ return 0;
+}
+
+void rtl_deinit_core(struct ieee80211_hw *hw)
+{
+}
+
+void rtl_init_rx_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf));
+}
+
+/*********************************************************
+ *
+ * tx information functions
+ *
+ *********************************************************/
+static void _rtl_qurey_shortpreamble_mode(struct ieee80211_hw *hw,
+ struct rtl_tcb_desc *tcb_desc,
+ struct ieee80211_tx_info *info)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 rate_flag = info->control.rates[0].flags;
+
+ tcb_desc->use_shortpreamble = false;
+
+ /* 1M can only use Long Preamble. 11B spec */
+ if (tcb_desc->hw_rate == rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M])
+ return;
+ else if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ tcb_desc->use_shortpreamble = true;
+
+ return;
+}
+
+static void _rtl_query_shortgi(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct rtl_tcb_desc *tcb_desc,
+ struct ieee80211_tx_info *info)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u8 rate_flag = info->control.rates[0].flags;
+ u8 sgi_40 = 0, sgi_20 = 0, bw_40 = 0;
+ tcb_desc->use_shortgi = false;
+
+ if (sta == NULL)
+ return;
+
+ sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
+ sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
+
+ if (!(sta->ht_cap.ht_supported))
+ return;
+
+ if (!sgi_40 && !sgi_20)
+ return;
+
+ if (mac->opmode == NL80211_IFTYPE_STATION)
+ bw_40 = mac->bw_40;
+ else if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT)
+ bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+ if ((bw_40 == true) && sgi_40)
+ tcb_desc->use_shortgi = true;
+ else if ((bw_40 == false) && sgi_20)
+ tcb_desc->use_shortgi = true;
+
+ if (!(rate_flag & IEEE80211_TX_RC_SHORT_GI))
+ tcb_desc->use_shortgi = false;
+}
+
+static void _rtl_query_protection_mode(struct ieee80211_hw *hw,
+ struct rtl_tcb_desc *tcb_desc,
+ struct ieee80211_tx_info *info)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 rate_flag = info->control.rates[0].flags;
+
+ /* Common Settings */
+ tcb_desc->b_rts_stbc = false;
+ tcb_desc->b_cts_enable = false;
+ tcb_desc->rts_sc = 0;
+ tcb_desc->b_rts_bw = false;
+ tcb_desc->b_rts_use_shortpreamble = false;
+ tcb_desc->b_rts_use_shortgi = false;
+
+ if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ /* Use CTS-to-SELF in protection mode. */
+ tcb_desc->b_rts_enable = true;
+ tcb_desc->b_cts_enable = true;
+ tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
+ } else if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
+ /* Use RTS-CTS in protection mode. */
+ tcb_desc->b_rts_enable = true;
+ tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
+ }
+}
+
+static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct rtl_tcb_desc *tcb_desc)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_sta_info *sta_entry = NULL;
+ u8 ratr_index = 7;
+
+ if (sta) {
+ sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+ ratr_index = sta_entry->ratr_index;
+ }
+ if (!tcb_desc->disable_ratefallback || !tcb_desc->use_driver_rate) {
+ if (mac->opmode == NL80211_IFTYPE_STATION) {
+ tcb_desc->ratr_index = 0;
+ } else if (mac->opmode == NL80211_IFTYPE_ADHOC ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT) {
+ if (tcb_desc->b_multicast || tcb_desc->b_broadcast) {
+ tcb_desc->hw_rate =
+ rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M];
+ tcb_desc->use_driver_rate = 1;
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
+ } else {
+ tcb_desc->ratr_index = ratr_index;
+ }
+ } else if (mac->opmode == NL80211_IFTYPE_AP) {
+ tcb_desc->ratr_index = ratr_index;
+ }
+ }
+
+ if (rtlpriv->dm.b_useramask) {
+ tcb_desc->ratr_index = ratr_index;
+ /* TODO we will differentiate adhoc and station futrue */
+ if (mac->opmode == NL80211_IFTYPE_STATION ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT) {
+ tcb_desc->mac_id = 0;
+
+ if (mac->mode == WIRELESS_MODE_N_24G) {
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_NGB;
+ } else if (mac->mode == WIRELESS_MODE_N_5G) {
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_NG;
+ } else if (mac->mode & WIRELESS_MODE_G) {
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_GB;
+ } else if (mac->mode & WIRELESS_MODE_B) {
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_B;
+ } else if (mac->mode & WIRELESS_MODE_A) {
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_G;
+ }
+ } else if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC) {
+ if (NULL != sta) {
+ if (sta->aid > 0) {
+ tcb_desc->mac_id = sta->aid + 1;
+ } else {
+ tcb_desc->mac_id = 1;
+ }
+ } else {
+ tcb_desc->mac_id = 0;
+ }
+ }
+ }
+}
+
+static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct rtl_tcb_desc *tcb_desc)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ tcb_desc->b_packet_bw = false;
+ if (!sta)
+ return;
+ if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT) {
+ if (!(sta->ht_cap.ht_supported) ||
+ !(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+ return;
+ } else if (mac->opmode == NL80211_IFTYPE_STATION) {
+ if (!mac->bw_40 || !(sta->ht_cap.ht_supported))
+ return;
+ }
+ if (tcb_desc->b_multicast || tcb_desc->b_broadcast)
+ return;
+
+ /*use legency rate, shall use 20MHz */
+ if (tcb_desc->hw_rate <= rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M])
+ return;
+
+ tcb_desc->b_packet_bw = true;
+}
+
+static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 hw_rate;
+
+ if ((get_rf_type(rtlphy) == RF_2T2R) && (sta->ht_cap.mcs.rx_mask[1]!=0))
+ hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15];
+ else
+ hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7];
+
+ return hw_rate;
+}
+
+void rtl_get_tcb_desc(struct ieee80211_hw *hw,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+ struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
+ struct ieee80211_rate *txrate;
+ u16 fc = rtl_get_fc(skb);
+
+ txrate = ieee80211_get_tx_rate(hw, info);
+ if (txrate != NULL)
+ tcb_desc->hw_rate = txrate->hw_value;
+
+ if (ieee80211_is_data(fc)) {
+ /*
+ *we set data rate INX 0
+ *in rtl_rc.c if skb is special data or
+ *mgt which need low data rate.
+ */
+
+ /*
+ *So tcb_desc->hw_rate is just used for
+ *special data and mgt frames
+ */
+ if (info->control.rates[0].idx == 0 ||
+ ieee80211_is_nullfunc(fc)) {
+ tcb_desc->use_driver_rate = true;
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
+
+ tcb_desc->disable_ratefallback = 1;
+ } else {
+ /*
+ *because hw will nerver use hw_rate
+ *when tcb_desc->use_driver_rate = false
+ *so we never set highest N rate here,
+ *and N rate will all be controled by FW
+ *when tcb_desc->use_driver_rate = false
+ */
+ if (sta && (sta->ht_cap.ht_supported)) {
+ tcb_desc->hw_rate = _rtl_get_highest_n_rate(hw, sta);
+ } else {
+ if(rtlmac->mode == WIRELESS_MODE_B) {
+ tcb_desc->hw_rate =
+ rtlpriv->cfg->maps[RTL_RC_CCK_RATE11M];
+ } else {
+ tcb_desc->hw_rate =
+ rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M];
+ }
+ }
+ }
+
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
+ tcb_desc->b_multicast = 1;
+ else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+ tcb_desc->b_broadcast = 1;
+
+ _rtl_txrate_selectmode(hw, sta, tcb_desc);
+ _rtl_query_bandwidth_mode(hw, sta, tcb_desc);
+ _rtl_qurey_shortpreamble_mode(hw, tcb_desc, info);
+ _rtl_query_shortgi(hw, sta, tcb_desc, info);
+ _rtl_query_protection_mode(hw, tcb_desc, info);
+ } else {
+ tcb_desc->use_driver_rate = true;
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
+ tcb_desc->disable_ratefallback = 1;
+ tcb_desc->mac_id = 0;
+ tcb_desc->b_packet_bw = false;
+ }
+}
+//EXPORT_SYMBOL(rtl_get_tcb_desc);
+
+bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 fc = rtl_get_fc(skb);
+
+ if (rtlpriv->dm.supp_phymode_switch &&
+ mac->link_state < MAC80211_LINKED &&
+ (ieee80211_is_auth(fc) || ieee80211_is_probe_req(fc))) {
+ if (rtlpriv->cfg->ops->check_switch_to_dmdp)
+ rtlpriv->cfg->ops->check_switch_to_dmdp(hw);
+ }
+ if (ieee80211_is_auth(fc)) {
+ RT_TRACE(COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
+ rtl_ips_nic_on(hw);
+
+ mac->link_state = MAC80211_LINKING;
+ /* Dul mac */
+ rtlpriv->phy.b_need_iqk = true;
+
+ }
+
+ return true;
+}
+
+struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw, u8 *sa,
+ u8 *bssid, u16 tid);
+bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 fc = rtl_get_fc(skb);
+ u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN));
+ u8 category;
+
+ if (!ieee80211_is_action(fc))
+ return true;
+
+ category = *act;
+ act++;
+ switch (category) {
+ case ACT_CAT_BA:
+ switch (*act) {
+ case ACT_ADDBAREQ:
+ if (mac->act_scanning)
+ return false;
+
+ RT_TRACE((COMP_SEND | COMP_RECV), DBG_DMESG,
+ ("%s ACT_ADDBAREQ From :%pM\n",
+ is_tx ? "Tx" : "Rx", hdr->addr2));
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("req \n"),
+ skb->data, skb->len);
+ if (!is_tx) {
+ struct ieee80211_sta *sta = NULL;
+ struct rtl_sta_info *sta_entry = NULL;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ u16 capab = 0, tid = 0;
+ struct rtl_tid_data *tid_data;
+ struct sk_buff *skb_delba = NULL;
+ struct ieee80211_rx_status rx_status = { 0 };
+
+ rcu_read_lock();
+ sta = rtl_find_sta(hw, hdr->addr3);
+ if (sta == NULL) {
+ RT_TRACE((COMP_SEND | COMP_RECV),
+ DBG_EMERG, ("sta is NULL\n"));
+ rcu_read_unlock();
+ return true;
+ }
+
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ if (!sta_entry) {
+ rcu_read_unlock();
+ return true;
+ }
+ capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+ tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
+ tid_data = &sta_entry->tids[tid];
+ if (tid_data->agg.rx_agg_state ==
+ RTL_RX_AGG_START) {
+ skb_delba = rtl_make_del_ba(hw,
+ hdr->addr2,
+ hdr->addr3,
+ tid);
+ if (skb_delba) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ rx_status.freq = hw->conf.chandef.chan->center_freq;
+ rx_status.band = hw->conf.chandef.chan->band;
+#else
+ rx_status.freq = hw->conf.channel->center_freq;
+ rx_status.band = hw->conf.channel->band;
+#endif
+ rx_status.flag |= RX_FLAG_DECRYPTED;
+ rx_status.flag |= RX_FLAG_MACTIME_MPDU;
+ rx_status.rate_idx = 0;
+ rx_status.signal = 50 + 10;
+ memcpy(IEEE80211_SKB_RXCB(skb_delba), &rx_status,
+ sizeof(rx_status));
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("fake del\n"), skb_delba->data,
+ skb_delba->len);
+ ieee80211_rx_irqsafe(hw, skb_delba);
+ }
+ }
+ rcu_read_unlock();
+ }
+ break;
+ case ACT_ADDBARSP:
+ RT_TRACE((COMP_SEND | COMP_RECV), DBG_DMESG,
+ ("%s ACT_ADDBARSP From :%pM\n",
+ is_tx ? "Tx" : "Rx", hdr->addr2));
+ break;
+ case ACT_DELBA:
+ RT_TRACE((COMP_SEND | COMP_RECV), DBG_DMESG,
+ ("ACT_ADDBADEL From :%pM\n", hdr->addr2));
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+/*should call before software enc*/
+u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ u16 fc = rtl_get_fc(skb);
+ u16 ether_type;
+ u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ const struct iphdr *ip;
+
+ if (!ieee80211_is_data(fc))
+ goto end;
+
+
+ ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len +
+ SNAP_SIZE + PROTOC_TYPE_SIZE);
+ ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE);
+ ether_type = ntohs(ether_type);
+
+ if (ETH_P_IP == ether_type) {
+ if (IPPROTO_UDP == ip->protocol) {
+ struct udphdr *udp = (struct udphdr *)((u8 *) ip +
+ (ip->ihl << 2));
+ if (((((u8 *) udp)[1] == 68) &&
+ (((u8 *) udp)[3] == 67)) ||
+ ((((u8 *) udp)[1] == 67) &&
+ (((u8 *) udp)[3] == 68))) {
+ /*
+ * 68 : UDP BOOTP client
+ * 67 : UDP BOOTP server
+ */
+ RT_TRACE((COMP_SEND | COMP_RECV),
+ DBG_DMESG, ("dhcp %s !!\n",
+ (is_tx) ? "Tx" : "Rx"));
+
+ if (is_tx) {
+ rtlpriv->ra.is_special_data = true;
+ rtl_lps_leave(hw);
+ ppsc->last_delaylps_stamp_jiffies =
+ jiffies;
+ }
+
+ return true;
+ }
+ }
+ } else if (ETH_P_ARP == ether_type) {
+ if (is_tx) {
+ rtlpriv->ra.is_special_data = true;
+ rtl_lps_leave(hw);
+ ppsc->last_delaylps_stamp_jiffies = jiffies;
+ }
+
+ return true;
+ } else if (ETH_P_PAE == ether_type) {
+ RT_TRACE((COMP_SEND | COMP_RECV), DBG_DMESG,
+ ("802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx"));
+
+ if (is_tx) {
+ rtlpriv->ra.is_special_data = true;
+ rtl_lps_leave(hw);
+ ppsc->last_delaylps_stamp_jiffies = jiffies;
+ }
+
+ return true;
+ } else if (0x86DD == ether_type) {
+ return true;
+ }
+
+end:
+ rtlpriv->ra.is_special_data = false;
+ return false;
+}
+
+/*********************************************************
+ *
+ * functions called by core.c
+ *
+ *********************************************************/
+int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_tid_data *tid_data;
+ struct rtl_sta_info *sta_entry = NULL;
+
+ if (sta == NULL)
+ return -EINVAL;
+
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ if (!sta_entry)
+ return -ENXIO;
+ tid_data = &sta_entry->tids[tid];
+
+ RT_TRACE(COMP_SEND, DBG_DMESG,
+ ("on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
+ tid_data->seq_number));
+
+ *ssn = tid_data->seq_number;
+ tid_data->agg.agg_state = RTL_AGG_START;
+
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ return 0;
+}
+
+int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_tid_data *tid_data;
+ struct rtl_sta_info *sta_entry = NULL;
+
+ if (sta == NULL)
+ return -EINVAL;
+
+ if (!sta->addr) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("ra = NULL\n"));
+ return -EINVAL;
+ }
+
+ RT_TRACE(COMP_SEND, DBG_DMESG,
+ ("on ra = %pM tid = %d\n", sta->addr, tid));
+
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ tid_data = &sta_entry->tids[tid];
+ sta_entry->tids[tid].agg.agg_state = RTL_AGG_STOP;
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ return 0;
+}
+
+int rtl_rx_agg_start(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_tid_data *tid_data;
+ struct rtl_sta_info *sta_entry = NULL;
+
+ if (sta == NULL)
+ return -EINVAL;
+
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ if (!sta_entry)
+ return -ENXIO;
+ tid_data = &sta_entry->tids[tid];
+
+ RT_TRACE(COMP_RECV, DBG_DMESG,
+ ("on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
+ tid_data->seq_number));
+
+ tid_data->agg.rx_agg_state = RTL_RX_AGG_START;
+ return 0;
+}
+
+int rtl_rx_agg_stop(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_tid_data *tid_data;
+ struct rtl_sta_info *sta_entry = NULL;
+
+ if (sta == NULL)
+ return -EINVAL;
+
+ if (!sta->addr) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("ra = NULL\n"));
+ return -EINVAL;
+ }
+
+ RT_TRACE(COMP_SEND, DBG_DMESG,
+ ("on ra = %pM tid = %d\n", sta->addr, tid));
+
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ tid_data = &sta_entry->tids[tid];
+ sta_entry->tids[tid].agg.rx_agg_state = RTL_RX_AGG_STOP;
+
+ return 0;
+}
+int rtl_tx_agg_oper(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_tid_data *tid_data;
+ struct rtl_sta_info *sta_entry = NULL;
+
+ if (sta == NULL)
+ return -EINVAL;
+
+ if (!sta->addr) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("ra = NULL\n"));
+ return -EINVAL;
+ }
+
+ RT_TRACE(COMP_SEND, DBG_DMESG,
+ ("on ra = %pM tid = %d\n", sta->addr, tid));
+
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ tid_data = &sta_entry->tids[tid];
+ sta_entry->tids[tid].agg.agg_state = RTL_AGG_OPERATIONAL;
+
+ return 0;
+}
+
+/*********************************************************
+ *
+ * wq & timer callback functions
+ *
+ *********************************************************/
+/* this function is used for roaming */
+void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
+ return;
+
+ if (rtlpriv->mac80211.link_state < MAC80211_LINKED)
+ return;
+
+ /* check if this really is a beacon */
+ if (!ieee80211_is_beacon(hdr->frame_control) &&
+ !ieee80211_is_probe_resp(hdr->frame_control))
+ return;
+
+ /* min. beacon length + FCS_LEN */
+ if (skb->len <= 40 + FCS_LEN)
+ return;
+
+ /* and only beacons from the associated BSSID, please */
+ if (ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+ return;
+
+ rtlpriv->link_info.bcn_rx_inperiod ++;
+}
+
+void rtl_watchdog_wq_callback(void *data)
+{
+ struct rtl_works *rtlworks = container_of_dwork_rtl(data,
+ struct rtl_works,
+ watchdog_wq);
+ struct ieee80211_hw *hw = rtlworks->hw;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ bool b_busytraffic = false;
+ bool b_tx_busy_traffic = false;
+ bool b_rx_busy_traffic = false;
+ bool b_higher_busytraffic = false;
+ bool b_higher_busyrxtraffic = false;
+ u8 idx, tid;
+ u32 rx_cnt_inp4eriod = 0;
+ u32 tx_cnt_inp4eriod = 0;
+ u32 aver_rx_cnt_inperiod = 0;
+ u32 aver_tx_cnt_inperiod = 0;
+ u32 aver_tidtx_inperiod[MAX_TID_COUNT] = {0};
+ u32 tidtx_inp4eriod[MAX_TID_COUNT] = {0};
+ bool benter_ps = false;
+
+ if (is_hal_stop(rtlhal))
+ return;
+
+ /* <1> Determine if action frame is allowed */
+ if (mac->link_state > MAC80211_NOLINK) {
+ if (mac->cnt_after_linked < 20)
+ mac->cnt_after_linked++;
+ } else {
+ mac->cnt_after_linked = 0;
+ }
+
+ /* <2> to check if traffic busy, if
+ * busytraffic we don't change channel */
+ if (mac->link_state >= MAC80211_LINKED) {
+
+ /* (1) get aver_rx_cnt_inperiod & aver_tx_cnt_inperiod */
+ for (idx = 0; idx <= 2; idx++) {
+ rtlpriv->link_info.num_rx_in4period[idx] =
+ rtlpriv->link_info.num_rx_in4period[idx + 1];
+ rtlpriv->link_info.num_tx_in4period[idx] =
+ rtlpriv->link_info.num_tx_in4period[idx + 1];
+ }
+ rtlpriv->link_info.num_rx_in4period[3] =
+ rtlpriv->link_info.num_rx_inperiod;
+ rtlpriv->link_info.num_tx_in4period[3] =
+ rtlpriv->link_info.num_tx_inperiod;
+ for (idx = 0; idx <= 3; idx++) {
+ rx_cnt_inp4eriod +=
+ rtlpriv->link_info.num_rx_in4period[idx];
+ tx_cnt_inp4eriod +=
+ rtlpriv->link_info.num_tx_in4period[idx];
+ }
+ aver_rx_cnt_inperiod = rx_cnt_inp4eriod / 4;
+ aver_tx_cnt_inperiod = tx_cnt_inp4eriod / 4;
+
+ /* (2) check traffic busy */
+ if (aver_rx_cnt_inperiod > 100 || aver_tx_cnt_inperiod > 100) {
+ b_busytraffic = true;
+ if (aver_rx_cnt_inperiod > aver_tx_cnt_inperiod)
+ b_rx_busy_traffic = true;
+ else
+ b_tx_busy_traffic = false;
+ }
+
+ /* Higher Tx/Rx data. */
+ if (aver_rx_cnt_inperiod > 4000 ||
+ aver_tx_cnt_inperiod > 4000) {
+ b_higher_busytraffic = true;
+
+ /* Extremely high Rx data. */
+ if (aver_rx_cnt_inperiod > 5000)
+ b_higher_busyrxtraffic = true;
+ }
+
+ /* check every tid's tx traffic */
+ for (tid = 0; tid <= 7; tid++) {
+ for (idx = 0; idx <= 2; idx++)
+ rtlpriv->link_info.tidtx_in4period[tid][idx] =
+ rtlpriv->link_info.tidtx_in4period[tid]
+ [idx + 1];
+ rtlpriv->link_info.tidtx_in4period[tid][3] =
+ rtlpriv->link_info.tidtx_inperiod[tid];
+
+ for (idx = 0; idx <= 3; idx++)
+ tidtx_inp4eriod[tid] +=
+ rtlpriv->link_info.tidtx_in4period[tid][idx];
+ aver_tidtx_inperiod[tid] = tidtx_inp4eriod[tid] / 4;
+ if (aver_tidtx_inperiod[tid] > 5000)
+ rtlpriv->link_info.higher_busytxtraffic[tid] =
+ true;
+ else
+ rtlpriv->link_info.higher_busytxtraffic[tid] =
+ false;
+ }
+
+ if (((rtlpriv->link_info.num_rx_inperiod +
+ rtlpriv->link_info.num_tx_inperiod) > 8) ||
+ (rtlpriv->link_info.num_rx_inperiod > 2))
+ benter_ps = false;
+ else
+ benter_ps = true;
+
+ /* LeisurePS only work in infra mode. */
+ if (benter_ps)
+ rtl_lps_enter(hw);
+ else
+ rtl_lps_leave(hw);
+ }
+
+ rtlpriv->link_info.num_rx_inperiod = 0;
+ rtlpriv->link_info.num_tx_inperiod = 0;
+ for (tid = 0; tid <= 7; tid++)
+ rtlpriv->link_info.tidtx_inperiod[tid] = 0;
+
+ rtlpriv->link_info.b_busytraffic = b_busytraffic;
+ rtlpriv->link_info.b_rx_busy_traffic = b_rx_busy_traffic;
+ rtlpriv->link_info.b_tx_busy_traffic = b_tx_busy_traffic;
+ rtlpriv->link_info.b_higher_busytraffic = b_higher_busytraffic;
+ rtlpriv->link_info.b_higher_busyrxtraffic = b_higher_busyrxtraffic;
+
+ /* <3> DM */
+ rtlpriv->cfg->ops->dm_watchdog(hw);
+
+ /* <4> roaming */
+ if (mac->link_state == MAC80211_LINKED &&
+ mac->opmode == NL80211_IFTYPE_STATION) {
+ if ((rtlpriv->link_info.bcn_rx_inperiod +
+ rtlpriv->link_info.num_rx_inperiod) == 0) {
+ rtlpriv->link_info.roam_times++;
+ RT_TRACE(COMP_ERR, DBG_DMESG, ("AP off for %d s\n",
+ (rtlpriv->link_info.roam_times * 2)));
+
+ /* if we can't recv beacon for 10s,
+ * we should reconnect this AP */
+ if (rtlpriv->link_info.roam_times >= 5) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("AP off, try to reconnect now\n"));
+ rtlpriv->link_info.roam_times = 0;
+ ieee80211_connection_loss(rtlpriv->mac80211.vif);
+ }
+ } else {
+ rtlpriv->link_info.roam_times = 0;
+ }
+ }
+ rtlpriv->link_info.bcn_rx_inperiod = 0;
+}
+
+void rtl_watch_dog_timer_callback(unsigned long data)
+{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ queue_delayed_work(rtlpriv->works.rtl_wq,
+ &rtlpriv->works.watchdog_wq, 0);
+
+ mod_timer(&rtlpriv->works.watchdog_timer,
+ jiffies + MSECS(RTL_WATCH_DOG_TIME));
+}
+void rtl_fwevt_wq_callback(void *data)
+{
+ struct rtl_works *rtlworks =
+ container_of_dwork_rtl(data, struct rtl_works, fwevt_wq);
+ struct ieee80211_hw *hw = rtlworks->hw;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->cfg->ops->c2h_command_handle(hw);
+}
+void rtl_easy_concurrent_retrytimer_callback(unsigned long data)
+{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_priv *buddy_priv = rtlpriv->buddy_priv;
+
+ if(buddy_priv == NULL)
+ return;
+
+ rtlpriv->cfg->ops->dualmac_easy_concurrent(hw);
+}
+/*********************************************************
+ *
+ * frame process functions
+ *
+ *********************************************************/
+u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie)
+{
+ struct ieee80211_mgmt *mgmt = (void *)data;
+ u8 *pos, *end;
+
+ pos = (u8 *)mgmt->u.beacon.variable;
+ end = data + len;
+ while (pos < end) {
+ if (pos + 2 + pos[1] > end)
+ return NULL;
+
+ if (pos[0] == ie)
+ return pos;
+
+ pos += 2 + pos[1];
+ }
+ return NULL;
+}
+
+/* when we use 2 rx ants we send IEEE80211_SMPS_OFF */
+/* when we use 1 rx ant we send IEEE80211_SMPS_STATIC */
+struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw,
+ enum ieee80211_smps_mode smps,
+ u8 *da, u8 *bssid)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct sk_buff *skb;
+ struct ieee80211_mgmt_compat *action_frame;
+
+ /* 27 = header + category + action + smps mode */
+ skb = dev_alloc_skb(27 + hw->extra_tx_headroom);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, hw->extra_tx_headroom);
+ action_frame = (void *)skb_put(skb, 27);
+ memset(action_frame, 0, 27);
+ memcpy(action_frame->da, da, ETH_ALEN);
+ memcpy(action_frame->sa, rtlefuse->dev_addr, ETH_ALEN);
+ memcpy(action_frame->bssid, bssid, ETH_ALEN);
+ action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+ action_frame->u.action.category = WLAN_CATEGORY_HT;
+ action_frame->u.action.u.ht_smps.action = WLAN_HT_ACTION_SMPS;
+ switch (smps) {
+ case IEEE80211_SMPS_AUTOMATIC:/* 0 */
+ case IEEE80211_SMPS_NUM_MODES:/* 4 */
+ WARN_ON(1);
+ case IEEE80211_SMPS_OFF:/* 1 */ /*MIMO_PS_NOLIMIT*/
+ action_frame->u.action.u.ht_smps.smps_control =
+ WLAN_HT_SMPS_CONTROL_DISABLED;/* 0 */
+ break;
+ case IEEE80211_SMPS_STATIC:/* 2 */ /*MIMO_PS_STATIC*/
+ action_frame->u.action.u.ht_smps.smps_control =
+ WLAN_HT_SMPS_CONTROL_STATIC;/* 1 */
+ break;
+ case IEEE80211_SMPS_DYNAMIC:/* 3 */ /*MIMO_PS_DYNAMIC*/
+ action_frame->u.action.u.ht_smps.smps_control =
+ WLAN_HT_SMPS_CONTROL_DYNAMIC;/* 3 */
+ break;
+ }
+
+ return skb;
+}
+
+int rtl_send_smps_action(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ enum ieee80211_smps_mode smps)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct sk_buff *skb = NULL;
+ struct rtl_tcb_desc tcb_desc;
+ u8 bssid[ETH_ALEN] = {0};
+
+ memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+
+ if (rtlpriv->mac80211.act_scanning)
+ goto err_free;
+
+ if (!sta)
+ goto err_free;
+
+ if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON))
+ goto err_free;
+
+ if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
+ goto err_free;
+
+ if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP)
+ memcpy(bssid, rtlpriv->efuse.dev_addr, ETH_ALEN);
+ else
+ memcpy(bssid, rtlpriv->mac80211.bssid, ETH_ALEN);
+
+ skb = rtl_make_smps_action(hw, smps, sta->addr, bssid);
+ /* this is a type = mgmt * stype = action frame */
+ if (skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct rtl_sta_info *sta_entry =
+ (struct rtl_sta_info *) sta->drv_priv;
+ sta_entry->mimo_ps = smps;
+ /* rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); */
+
+ info->control.rates[0].idx = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ info->band = hw->conf.chandef.chan->band;
+#else
+ info->band = hw->conf.channel->band;
+#endif
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ info->control.sta = sta;
+ rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+#else
+/*<delete in kernel end>*/
+ rtlpriv->intf_ops->adapter_tx(hw, sta, skb, &tcb_desc);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+ }
+ return 1;
+
+err_free:
+ return 0;
+}
+//EXPORT_SYMBOL(rtl_send_smps_action);
+
+/* because mac80211 have issues when can receive del ba
+ * so here we just make a fake del_ba if we receive a ba_req
+ * but rx_agg was opened to let mac80211 release some ba
+ * related resources, so please this del_ba for tx */
+struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw,
+ u8 *sa, u8 *bssid, u16 tid)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct sk_buff *skb;
+ struct ieee80211_mgmt *action_frame;
+ u16 params;
+
+ /* 27 = header + category + action + smps mode */
+ skb = dev_alloc_skb(34 + hw->extra_tx_headroom);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, hw->extra_tx_headroom);
+ action_frame = (void *)skb_put(skb, 34);
+ memset(action_frame, 0, 34);
+ memcpy(action_frame->sa, sa, ETH_ALEN);
+ memcpy(action_frame->da, rtlefuse->dev_addr, ETH_ALEN);
+ memcpy(action_frame->bssid, bssid, ETH_ALEN);
+ action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+ action_frame->u.action.category = WLAN_CATEGORY_BACK;
+ action_frame->u.action.u.delba.action_code = WLAN_ACTION_DELBA;
+ params = (u16)(1 << 11); /* bit 11 initiator */
+ params |= (u16)(tid << 12); /* bit 15:12 TID number */
+
+ action_frame->u.action.u.delba.params = cpu_to_le16(params);
+ action_frame->u.action.u.delba.reason_code =
+ cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT);
+
+ return skb;
+}
+
+/*********************************************************
+ *
+ * IOT functions
+ *
+ *********************************************************/
+static bool rtl_chk_vendor_ouisub(struct ieee80211_hw *hw,
+ struct octet_string vendor_ie)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ bool matched = false;
+ static u8 athcap_1[] = { 0x00, 0x03, 0x7F };
+ static u8 athcap_2[] = { 0x00, 0x13, 0x74 };
+ static u8 broadcap_1[] = { 0x00, 0x10, 0x18 };
+ static u8 broadcap_2[] = { 0x00, 0x0a, 0xf7 };
+ static u8 broadcap_3[] = { 0x00, 0x05, 0xb5 };
+ static u8 racap[] = { 0x00, 0x0c, 0x43 };
+ static u8 ciscocap[] = { 0x00, 0x40, 0x96 };
+ static u8 marvcap[] = { 0x00, 0x50, 0x43 };
+
+ if (memcmp(vendor_ie.octet, athcap_1, 3) == 0 ||
+ memcmp(vendor_ie.octet, athcap_2, 3) == 0) {
+ rtlpriv->mac80211.vendor = PEER_ATH;
+ matched = true;
+ } else if (memcmp(vendor_ie.octet, broadcap_1, 3) == 0 ||
+ memcmp(vendor_ie.octet, broadcap_2, 3) == 0 ||
+ memcmp(vendor_ie.octet, broadcap_3, 3) == 0) {
+ rtlpriv->mac80211.vendor = PEER_BROAD;
+ matched = true;
+ } else if (memcmp(vendor_ie.octet, racap, 3) == 0) {
+ rtlpriv->mac80211.vendor = PEER_RAL;
+ matched = true;
+ } else if (memcmp(vendor_ie.octet, ciscocap, 3) == 0) {
+ rtlpriv->mac80211.vendor = PEER_CISCO;
+ matched = true;
+ } else if (memcmp(vendor_ie.octet, marvcap, 3) == 0) {
+ rtlpriv->mac80211.vendor = PEER_MARV;
+ matched = true;
+ }
+
+ return matched;
+}
+
+bool rtl_find_221_ie(struct ieee80211_hw *hw, u8 *data,
+ unsigned int len)
+{
+ struct ieee80211_mgmt *mgmt = (void *)data;
+ struct octet_string vendor_ie;
+ u8 *pos, *end;
+
+ pos = (u8 *)mgmt->u.beacon.variable;
+ end = data + len;
+ while (pos < end) {
+ if (pos[0] == 221) {
+ vendor_ie.length = pos[1];
+ vendor_ie.octet = &pos[2];
+ if (rtl_chk_vendor_ouisub(hw, vendor_ie))
+ return true;
+ }
+
+ if (pos + 2 + pos[1] > end)
+ return false;
+
+ pos += 2 + pos[1];
+ }
+ return false;
+}
+
+void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct ieee80211_hdr *hdr = (void *)data;
+ u32 vendor = PEER_UNKNOWN;
+
+ static u8 ap3_1[3] = { 0x00, 0x14, 0xbf };
+ static u8 ap3_2[3] = { 0x00, 0x1a, 0x70 };
+ static u8 ap3_3[3] = { 0x00, 0x1d, 0x7e };
+ static u8 ap4_1[3] = { 0x00, 0x90, 0xcc };
+ static u8 ap4_2[3] = { 0x00, 0x0e, 0x2e };
+ static u8 ap4_3[3] = { 0x00, 0x18, 0x02 };
+ static u8 ap4_4[3] = { 0x00, 0x17, 0x3f };
+ static u8 ap4_5[3] = { 0x00, 0x1c, 0xdf };
+ static u8 ap5_1[3] = { 0x00, 0x1c, 0xf0 };
+ static u8 ap5_2[3] = { 0x00, 0x21, 0x91 };
+ static u8 ap5_3[3] = { 0x00, 0x24, 0x01 };
+ static u8 ap5_4[3] = { 0x00, 0x15, 0xe9 };
+ static u8 ap5_5[3] = { 0x00, 0x17, 0x9A };
+ static u8 ap5_6[3] = { 0x00, 0x18, 0xE7 };
+ static u8 ap6_1[3] = { 0x00, 0x17, 0x94 };
+ static u8 ap7_1[3] = { 0x00, 0x14, 0xa4 };
+
+ if (mac->opmode != NL80211_IFTYPE_STATION)
+ return;
+
+ if (mac->link_state == MAC80211_NOLINK) {
+ mac->vendor = PEER_UNKNOWN;
+ return;
+ }
+
+ if (mac->cnt_after_linked > 2)
+ return;
+
+ /* check if this really is a beacon */
+ if (!ieee80211_is_beacon(hdr->frame_control))
+ return;
+
+ /* min. beacon length + FCS_LEN */
+ if (len <= 40 + FCS_LEN)
+ return;
+
+ /* and only beacons from the associated BSSID, please */
+ if (ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+ return;
+
+ if (rtl_find_221_ie(hw, data, len)) {
+ vendor = mac->vendor;
+ }
+
+ if ((memcmp(mac->bssid, ap5_1, 3) == 0) ||
+ (memcmp(mac->bssid, ap5_2, 3) == 0) ||
+ (memcmp(mac->bssid, ap5_3, 3) == 0) ||
+ (memcmp(mac->bssid, ap5_4, 3) == 0) ||
+ (memcmp(mac->bssid, ap5_5, 3) == 0) ||
+ (memcmp(mac->bssid, ap5_6, 3) == 0) ||
+ vendor == PEER_ATH) {
+ vendor = PEER_ATH;
+ RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>ath find\n"));
+ } else if ((memcmp(mac->bssid, ap4_4, 3) == 0) ||
+ (memcmp(mac->bssid, ap4_5, 3) == 0) ||
+ (memcmp(mac->bssid, ap4_1, 3) == 0) ||
+ (memcmp(mac->bssid, ap4_2, 3) == 0) ||
+ (memcmp(mac->bssid, ap4_3, 3) == 0) ||
+ vendor == PEER_RAL) {
+ RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>ral findn\n"));
+ vendor = PEER_RAL;
+ } else if (memcmp(mac->bssid, ap6_1, 3) == 0 ||
+ vendor == PEER_CISCO) {
+ vendor = PEER_CISCO;
+ RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>cisco find\n"));
+ } else if ((memcmp(mac->bssid, ap3_1, 3) == 0) ||
+ (memcmp(mac->bssid, ap3_2, 3) == 0) ||
+ (memcmp(mac->bssid, ap3_3, 3) == 0) ||
+ vendor == PEER_BROAD) {
+ RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>broad find\n"));
+ vendor = PEER_BROAD;
+ } else if (memcmp(mac->bssid, ap7_1, 3) == 0 ||
+ vendor == PEER_MARV) {
+ vendor = PEER_MARV;
+ RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>marv find\n"));
+ }
+
+ mac->vendor = vendor;
+}
+
+/*********************************************************
+ *
+ * sysfs functions
+ *
+ *********************************************************/
+static ssize_t rtl_show_debug_level(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(d);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ return sprintf(buf, "0x%08X\n", rtlpriv->dbg.global_debuglevel);
+}
+
+static ssize_t rtl_store_debug_level(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(d);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret) {
+ printk(KERN_DEBUG "%s is not in hex or decimal form.\n", buf);
+ } else {
+ rtlpriv->dbg.global_debuglevel = val;
+ printk(KERN_DEBUG "debuglevel:%x\n",
+ rtlpriv->dbg.global_debuglevel);
+ }
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
+ rtl_show_debug_level, rtl_store_debug_level);
+
+static struct attribute *rtl_sysfs_entries[] = {
+
+ &dev_attr_debug_level.attr,
+
+ NULL
+};
+
+/*
+ * "name" is folder name witch will be
+ * put in device directory like :
+ * sys/devices/pci0000:00/0000:00:1c.4/
+ * 0000:06:00.0/rtl_sysfs
+ */
+struct attribute_group rtl_attribute_group = {
+ .name = "rtlsysfs",
+ .attrs = rtl_sysfs_entries,
+};
+
+#ifdef VIF_TODO
+/*********************************************************
+ *
+ * vif functions
+ *
+ *********************************************************/
+static inline struct ieee80211_vif *
+rtl_get_vif(struct rtl_vif_info *vif_priv)
+{
+ return container_of((void *)vif_priv, struct ieee80211_vif, drv_priv);
+}
+
+/* Protected by ar->mutex or RCU */
+struct ieee80211_vif *rtl_get_main_vif(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_vif_info *cvif;
+
+ list_for_each_entry_rcu(cvif, &rtlpriv->vif_priv.vif_list, list) {
+ if (cvif->active)
+ return rtl_get_vif(cvif);
+ }
+
+ return NULL;
+}
+
+static inline bool is_main_vif(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ bool ret;
+
+ rcu_read_lock();
+ ret = (rtl_get_main_vif(hw) == vif);
+ rcu_read_unlock();
+ return ret;
+}
+
+bool rtl_set_vif_info(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct rtl_vif_info *vif_info = (void *) vif->drv_priv;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int vif_id = -1;
+
+ if (rtlpriv->vif_priv.vifs >= MAX_VIRTUAL_MAC) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("vif number can not bigger than %d, now vifs is:%d\n",
+ MAX_VIRTUAL_MAC, rtlpriv->vif_priv.vifs));
+ return false;
+ }
+
+ rcu_read_lock();
+ vif_id = bitmap_find_free_region(&rtlpriv->vif_priv.vif_bitmap,
+ MAX_VIRTUAL_MAC, 0);
+ RT_TRACE(COMP_MAC80211, DBG_DMESG,
+ ("%s vid_id:%d\n", __func__, vif_id));
+
+ if (vif_id < 0) {
+ rcu_read_unlock();
+ return false;
+ }
+
+ BUG_ON(rtlpriv->vif_priv.vif[vif_id].id != vif_id);
+ vif_info->active = true;
+ vif_info->id = vif_id;
+ vif_info->enable_beacon = false;
+ rtlpriv->vif_priv.vifs++;
+ if (rtlpriv->vif_priv.vifs > 1) {
+ rtlpriv->psc.b_inactiveps = false;
+ rtlpriv->psc.b_swctrl_lps = false;
+ rtlpriv->psc.b_fwctrl_lps = false;
+ }
+
+ list_add_tail_rcu(&vif_info->list, &rtlpriv->vif_priv.vif_list);
+ rcu_assign_pointer(rtlpriv->vif_priv.vif[vif_id].vif, vif);
+
+ RT_TRACE(COMP_MAC80211, DBG_DMESG, ("vifaddress:%p %p %p\n",
+ rtlpriv->vif_priv.vif[vif_id].vif, vif, rtl_get_main_vif(hw)));
+
+ rcu_read_unlock();
+
+ return true;
+}
+#endif
+
+
+#if 0
+MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
+#endif
+struct rtl_global_var global_var = {};
+
+int rtl_core_module_init(void)
+{
+ if (rtl_rate_control_register())
+ printk(KERN_DEBUG "rtl: Unable to register rtl_rc,"
+ "use default RC !!\n");
+
+ /* add proc for debug */
+ rtl_proc_add_topdir();
+
+ /* init some global vars */
+ INIT_LIST_HEAD(&global_var.glb_priv_list);
+ spin_lock_init(&global_var.glb_list_lock);
+
+ return 0;
+}
+
+void rtl_core_module_exit(void)
+{
+ /*RC*/
+ rtl_rate_control_unregister();
+
+ /* add proc for debug */
+ rtl_proc_remove_topdir();
+}
+
+#if 0
+module_init(rtl_core_module_init);
+module_exit(rtl_core_module_exit);
+#endif
diff --git a/drivers/staging/rtl8821ae/base.h b/drivers/staging/rtl8821ae/base.h
new file mode 100644
index 000000000000..629d14f42f0b
--- /dev/null
+++ b/drivers/staging/rtl8821ae/base.h
@@ -0,0 +1,159 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_BASE_H__
+#define __RTL_BASE_H__
+
+#include "compat.h"
+
+enum ap_peer {
+ PEER_UNKNOWN = 0,
+ PEER_RTL = 1,
+ PEER_RTL_92SE = 2,
+ PEER_BROAD = 3,
+ PEER_RAL = 4,
+ PEER_ATH = 5,
+ PEER_CISCO = 6,
+ PEER_MARV = 7,
+ PEER_AIRGO = 9,
+ PEER_MAX = 10,
+} ;
+
+#define RTL_DUMMY_OFFSET 0
+#define RTL_DUMMY_UNIT 8
+#define RTL_TX_DUMMY_SIZE (RTL_DUMMY_OFFSET * RTL_DUMMY_UNIT)
+#define RTL_TX_DESC_SIZE 32
+#define RTL_TX_HEADER_SIZE (RTL_TX_DESC_SIZE + RTL_TX_DUMMY_SIZE)
+
+#define HT_AMSDU_SIZE_4K 3839
+#define HT_AMSDU_SIZE_8K 7935
+
+#define MAX_BIT_RATE_40MHZ_MCS15 300 /* Mbps */
+#define MAX_BIT_RATE_40MHZ_MCS7 150 /* Mbps */
+
+#define RTL_RATE_COUNT_LEGACY 12
+#define RTL_CHANNEL_COUNT 14
+
+#define FRAME_OFFSET_FRAME_CONTROL 0
+#define FRAME_OFFSET_DURATION 2
+#define FRAME_OFFSET_ADDRESS1 4
+#define FRAME_OFFSET_ADDRESS2 10
+#define FRAME_OFFSET_ADDRESS3 16
+#define FRAME_OFFSET_SEQUENCE 22
+#define FRAME_OFFSET_ADDRESS4 24
+
+#define SET_80211_HDR_FRAME_CONTROL(_hdr, _val) \
+ WRITEEF2BYTE(_hdr, _val)
+#define SET_80211_HDR_TYPE_AND_SUBTYPE(_hdr, _val) \
+ WRITEEF1BYTE(_hdr, _val)
+#define SET_80211_HDR_PWR_MGNT(_hdr, _val) \
+ SET_BITS_TO_LE_2BYTE(_hdr, 12, 1, _val)
+#define SET_80211_HDR_TO_DS(_hdr, _val) \
+ SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
+
+#define SET_80211_PS_POLL_AID(_hdr, _val) \
+ WRITEEF2BYTE(((u8*)(_hdr))+2, _val)
+#define SET_80211_PS_POLL_BSSID(_hdr, _val) \
+ CP_MACADDR(((u8*)(_hdr))+4, (u8*)(_val))
+#define SET_80211_PS_POLL_TA(_hdr, _val) \
+ CP_MACADDR(((u8*)(_hdr))+10, (u8*)(_val))
+
+#define SET_80211_HDR_DURATION(_hdr, _val) \
+ WRITEEF2BYTE((u8*)(_hdr)+FRAME_OFFSET_DURATION, _val)
+#define SET_80211_HDR_ADDRESS1(_hdr, _val) \
+ CP_MACADDR((u8*)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val))
+#define SET_80211_HDR_ADDRESS2(_hdr, _val) \
+ CP_MACADDR((u8*)(_hdr)+FRAME_OFFSET_ADDRESS2, (u8*)(_val))
+#define SET_80211_HDR_ADDRESS3(_hdr, _val) \
+ CP_MACADDR((u8*)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8*)(_val))
+#define SET_80211_HDR_FRAGMENT_SEQUENCE(_hdr, _val) \
+ WRITEEF2BYTE((u8*)(_hdr)+FRAME_OFFSET_SEQUENCE, _val)
+
+#define SET_BEACON_PROBE_RSP_TIME_STAMP_LOW(__phdr, __val) \
+ WRITEEF4BYTE(((u8*)(__phdr)) + 24, __val)
+#define SET_BEACON_PROBE_RSP_TIME_STAMP_HIGH(__phdr, __val) \
+ WRITEEF4BYTE(((u8*)(__phdr)) + 28, __val)
+#define SET_BEACON_PROBE_RSP_BEACON_INTERVAL(__phdr, __val) \
+ WRITEEF2BYTE(((u8*)(__phdr)) + 32, __val)
+#define GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) \
+ READEF2BYTE(((u8*)(__phdr)) + 34)
+#define SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
+ WRITEEF2BYTE(((u8*)(__phdr)) + 34, __val)
+#define MASK_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
+ SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, \
+ (GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) & (~(__val))))
+
+int rtl_init_core(struct ieee80211_hw *hw);
+void rtl_deinit_core(struct ieee80211_hw *hw);
+void rtl_init_rx_config(struct ieee80211_hw *hw);
+void rtl_init_rfkill(struct ieee80211_hw *hw);
+void rtl_deinit_rfkill(struct ieee80211_hw *hw);
+
+void rtl_watch_dog_timer_callback(unsigned long data);
+void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
+
+bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
+bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
+u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
+
+void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
+void rtl_watch_dog_timer_callback(unsigned long data);
+int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn);
+int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid);
+int rtl_tx_agg_oper(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tid);
+int rtl_rx_agg_start(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tid);
+int rtl_rx_agg_stop(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tid);
+void rtl_watchdog_wq_callback(void *data);
+void rtl_fwevt_wq_callback(void *data);
+
+void rtl_get_tcb_desc(struct ieee80211_hw *hw,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc);
+
+int rtl_send_smps_action(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ enum ieee80211_smps_mode smps);
+u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
+void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
+u8 rtl_tid_to_ac(struct ieee80211_hw *hw, u8 tid);
+extern struct attribute_group rtl_attribute_group;
+void rtl_easy_concurrent_retrytimer_callback(unsigned long data);
+extern struct rtl_global_var global_var;
+
+#ifdef VIF_TODO
+struct ieee80211_vif *rtl_get_main_vif(struct ieee80211_hw *hw);
+bool rtl_set_vif_info(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+#endif
+#endif
diff --git a/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.c b/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.c
new file mode 100644
index 000000000000..b30f17ae0215
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.c
@@ -0,0 +1,3976 @@
+//============================================================
+// Description:
+//
+// This file is for 8812a1ant Co-exist mechanism
+//
+// History
+// 2012/11/15 Cosa first check in.
+//
+//============================================================
+
+//============================================================
+// include files
+//============================================================
+#include "halbt_precomp.h"
+#if 1
+//============================================================
+// Global variables, these are static variables
+//============================================================
+static COEX_DM_8812A_1ANT GLCoexDm8812a1Ant;
+static PCOEX_DM_8812A_1ANT coex_dm=&GLCoexDm8812a1Ant;
+static COEX_STA_8812A_1ANT GLCoexSta8812a1Ant;
+static PCOEX_STA_8812A_1ANT coex_sta=&GLCoexSta8812a1Ant;
+
+const char *const GLBtInfoSrc8812a1Ant[]={
+ "BT Info[wifi fw]",
+ "BT Info[bt rsp]",
+ "BT Info[bt auto report]",
+};
+
+//============================================================
+// local function proto type if needed
+//============================================================
+//============================================================
+// local function start with halbtc8812a1ant_
+//============================================================
+#if 0
+void
+halbtc8812a1ant_Reg0x550Bit3(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN bSet
+ )
+{
+ u1Byte u1tmp=0;
+
+ u1tmp = btcoexist->btc_read_1byte(btcoexist, 0x550);
+ if(bSet)
+ {
+ u1tmp |= BIT3;
+ }
+ else
+ {
+ u1tmp &= ~BIT3;
+ }
+ btcoexist->btc_write_1byte(btcoexist, 0x550, u1tmp);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], set 0x550[3]=%d\n", (bSet? 1:0)));
+}
+#endif
+u1Byte
+halbtc8812a1ant_BtRssiState(
+ u1Byte level_num,
+ u1Byte rssi_thresh,
+ u1Byte rssi_thresh1
+ )
+{
+ s4Byte bt_rssi=0;
+ u1Byte bt_rssi_state;
+
+ bt_rssi = coex_sta->bt_rssi;
+
+ if(level_num == 2)
+ {
+ if( (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(bt_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT))
+ {
+ bt_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+ }
+ else
+ {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+ }
+ }
+ else
+ {
+ if(bt_rssi < rssi_thresh)
+ {
+ bt_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+ }
+ else
+ {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+ }
+ }
+ }
+ else if(level_num == 3)
+ {
+ if(rssi_thresh > rssi_thresh1)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi thresh error!!\n"));
+ return coex_sta->pre_bt_rssi_state;
+ }
+
+ if( (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(bt_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT))
+ {
+ bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+ }
+ else
+ {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+ }
+ }
+ else if( (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM))
+ {
+ if(bt_rssi >= (rssi_thresh1+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT))
+ {
+ bt_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+ }
+ else if(bt_rssi < rssi_thresh)
+ {
+ bt_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+ }
+ else
+ {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Medium\n"));
+ }
+ }
+ else
+ {
+ if(bt_rssi < rssi_thresh1)
+ {
+ bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+ }
+ else
+ {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+ }
+ }
+ }
+
+ coex_sta->pre_bt_rssi_state = bt_rssi_state;
+
+ return bt_rssi_state;
+}
+
+u1Byte
+halbtc8812a1ant_WifiRssiState(
+ PBTC_COEXIST btcoexist,
+ u1Byte index,
+ u1Byte level_num,
+ u1Byte rssi_thresh,
+ u1Byte rssi_thresh1
+ )
+{
+ s4Byte wifi_rssi=0;
+ u1Byte wifi_rssi_state;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+
+ if(level_num == 2)
+ {
+ if( (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(wifi_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT))
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+ }
+ else
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+ }
+ }
+ else
+ {
+ if(wifi_rssi < rssi_thresh)
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+ }
+ else
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+ }
+ }
+ }
+ else if(level_num == 3)
+ {
+ if(rssi_thresh > rssi_thresh1)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI thresh error!!\n"));
+ return coex_sta->pre_wifi_rssi_state[index];
+ }
+
+ if( (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(wifi_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT))
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+ }
+ else
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+ }
+ }
+ else if( (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_MEDIUM))
+ {
+ if(wifi_rssi >= (rssi_thresh1+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT))
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+ }
+ else if(wifi_rssi < rssi_thresh)
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+ }
+ else
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Medium\n"));
+ }
+ }
+ else
+ {
+ if(wifi_rssi < rssi_thresh1)
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+ }
+ else
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+ }
+ }
+ }
+
+ coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state;
+
+ return wifi_rssi_state;
+}
+
+void
+halbtc8812a1ant_MonitorBtEnableDisable(
+ PBTC_COEXIST btcoexist
+ )
+{
+ static BOOLEAN pre_bt_disabled=false;
+ static u4Byte bt_disable_cnt=0;
+ BOOLEAN bt_active=true, bt_disable_by68=false, bt_disabled=false;
+ u4Byte u4_tmp=0;
+
+ // This function check if bt is disabled
+
+ if( coex_sta->high_priority_tx == 0 &&
+ coex_sta->high_priority_rx == 0 &&
+ coex_sta->low_priority_tx == 0 &&
+ coex_sta->low_priority_rx == 0)
+ {
+ bt_active = false;
+ }
+ if( coex_sta->high_priority_tx == 0xffff &&
+ coex_sta->high_priority_rx == 0xffff &&
+ coex_sta->low_priority_tx == 0xffff &&
+ coex_sta->low_priority_rx == 0xffff)
+ {
+ bt_active = false;
+ }
+ if(bt_active)
+ {
+ bt_disable_cnt = 0;
+ bt_disabled = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is enabled !!\n"));
+ }
+ else
+ {
+ bt_disable_cnt++;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], bt all counters=0, %d times!!\n",
+ bt_disable_cnt));
+ if(bt_disable_cnt >= 2 ||bt_disable_by68)
+ {
+ bt_disabled = true;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is disabled !!\n"));
+ }
+ }
+ if(pre_bt_disabled != bt_disabled)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is from %s to %s!!\n",
+ (pre_bt_disabled ? "disabled":"enabled"),
+ (bt_disabled ? "disabled":"enabled")));
+ pre_bt_disabled = bt_disabled;
+ if(!bt_disabled)
+ {
+ }
+ else
+ {
+ }
+ }
+}
+
+void
+halbtc8812a1ant_MonitorBtCtr(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u4Byte reg_hp_tx_rx, reg_lp_tx_rx, u4_tmp;
+ u4Byte reg_hp_tx=0, reg_hp_rx=0, reg_lp_tx=0, reg_lp_rx=0;
+ u1Byte u1_tmp;
+
+ reg_hp_tx_rx = 0x770;
+ reg_lp_tx_rx = 0x774;
+
+ u4_tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_tx_rx);
+ reg_hp_tx = u4_tmp & bMaskLWord;
+ reg_hp_rx = (u4_tmp & bMaskHWord)>>16;
+
+ u4_tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_tx_rx);
+ reg_lp_tx = u4_tmp & bMaskLWord;
+ reg_lp_rx = (u4_tmp & bMaskHWord)>>16;
+
+ coex_sta->high_priority_tx = reg_hp_tx;
+ coex_sta->high_priority_rx = reg_hp_rx;
+ coex_sta->low_priority_tx = reg_lp_tx;
+ coex_sta->low_priority_rx = reg_lp_rx;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], High Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n",
+ reg_hp_tx_rx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], Low Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n",
+ reg_lp_tx_rx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx));
+
+ // reset counter
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+}
+
+void
+halbtc8812a1ant_QueryBtInfo(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte dataLen=3;
+ u1Byte buf[5] = {0};
+ static u4Byte btInfoCnt=0;
+
+ if(!btInfoCnt ||
+ (coex_sta->bt_info_c2h_cnt[BT_INFO_SRC_8812A_1ANT_BT_RSP]-btInfoCnt)>2)
+ {
+ buf[0] = dataLen;
+ buf[1] = 0x1; // polling enable, 1=enable, 0=disable
+ buf[2] = 0x2; // polling time in seconds
+ buf[3] = 0x1; // auto report enable, 1=enable, 0=disable
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_INFO, (PVOID)&buf[0]);
+ }
+ btInfoCnt = coex_sta->bt_info_c2h_cnt[BT_INFO_SRC_8812A_1ANT_BT_RSP];
+}
+u1Byte
+halbtc8812a1ant_ActionAlgorithm(
+ PBTC_COEXIST btcoexist
+ )
+{
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+ BOOLEAN bt_hs_on=false;
+ u1Byte algorithm=BT_8812A_1ANT_COEX_ALGO_UNDEFINED;
+ u1Byte num_of_diff_profile=0;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+ if(!stack_info->bt_link_exist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], No profile exists!!!\n"));
+ return algorithm;
+ }
+
+ if(stack_info->sco_exist)
+ num_of_diff_profile++;
+ if(stack_info->hid_exist)
+ num_of_diff_profile++;
+ if(stack_info->pan_exist)
+ num_of_diff_profile++;
+ if(stack_info->a2dp_exist)
+ num_of_diff_profile++;
+
+ if(num_of_diff_profile == 1)
+ {
+ if(stack_info->sco_exist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO only\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_SCO;
+ }
+ else
+ {
+ if(stack_info->hid_exist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID only\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_HID;
+ }
+ else if(stack_info->a2dp_exist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP only\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_A2DP;
+ }
+ else if(stack_info->pan_exist)
+ {
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(HS) only\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_PANHS;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(EDR) only\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR;
+ }
+ }
+ }
+ }
+ else if(num_of_diff_profile == 2)
+ {
+ if(stack_info->sco_exist)
+ {
+ if(stack_info->hid_exist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_HID;
+ }
+ else if(stack_info->a2dp_exist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP ==> SCO\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_SCO;
+ }
+ else if(stack_info->pan_exist)
+ {
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(HS)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_SCO;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(EDR)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ else
+ {
+ if( stack_info->hid_exist &&
+ stack_info->a2dp_exist )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP;
+ }
+ else if( stack_info->hid_exist &&
+ stack_info->pan_exist )
+ {
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(HS)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(EDR)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ else if( stack_info->pan_exist &&
+ stack_info->a2dp_exist )
+ {
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(HS)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_A2DP_PANHS;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ }
+ }
+ }
+ else if(num_of_diff_profile == 3)
+ {
+ if(stack_info->sco_exist)
+ {
+ if( stack_info->hid_exist &&
+ stack_info->a2dp_exist )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP ==> HID\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_HID;
+ }
+ else if( stack_info->hid_exist &&
+ stack_info->pan_exist )
+ {
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(HS)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(EDR)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ else if( stack_info->pan_exist &&
+ stack_info->a2dp_exist )
+ {
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(HS)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_SCO;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ else
+ {
+ if( stack_info->hid_exist &&
+ stack_info->pan_exist &&
+ stack_info->a2dp_exist )
+ {
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ }
+ }
+ }
+ else if(num_of_diff_profile >= 3)
+ {
+ if(stack_info->sco_exist)
+ {
+ if( stack_info->hid_exist &&
+ stack_info->pan_exist &&
+ stack_info->a2dp_exist )
+ {
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"));
+
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ }
+
+ return algorithm;
+}
+
+BOOLEAN
+halbtc8812a1ant_NeedToDecBtPwr(
+ PBTC_COEXIST btcoexist
+ )
+{
+ BOOLEAN ret=false;
+ BOOLEAN bt_hs_on=false, wifi_connected=false;
+ s4Byte bt_hs_rssi=0;
+
+ if(!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on))
+ return false;
+ if(!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected))
+ return false;
+ if(!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
+ return false;
+
+ if(wifi_connected)
+ {
+ if(bt_hs_on)
+ {
+ if(bt_hs_rssi > 37)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], Need to decrease bt power for HS mode!!\n"));
+ ret = true;
+ }
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], Need to decrease bt power for Wifi is connected!!\n"));
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+void
+halbtc8812a1ant_SetFwDacSwingLevel(
+ PBTC_COEXIST btcoexist,
+ u1Byte dac_swing_lvl
+ )
+{
+ u1Byte h2c_parameter[1] ={0};
+
+ // There are several type of dacswing
+ // 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6
+ h2c_parameter[0] = dac_swing_lvl;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]));
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
+}
+
+void
+halbtc8812a1ant_SetFwDecBtPwr(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN dec_bt_pwr
+ )
+{
+ u1Byte dataLen=3;
+ u1Byte buf[5] = {0};
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], decrease Bt Power : %s\n",
+ (dec_bt_pwr? "Yes!!":"No!!")));
+
+ buf[0] = dataLen;
+ buf[1] = 0x3; // OP_Code
+ buf[2] = 0x1; // OP_Code_Length
+ if(dec_bt_pwr)
+ buf[3] = 0x1; // OP_Code_Content
+ else
+ buf[3] = 0x0;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]);
+}
+
+void
+halbtc8812a1ant_DecBtPwr(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN dec_bt_pwr
+ )
+{
+ return;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s Dec BT power = %s\n",
+ (force_exec? "force to":""), ((dec_bt_pwr)? "ON":"OFF")));
+ coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_dec_bt_pwr=%d, cur_dec_bt_pwr=%d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr));
+
+ if(coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
+ return;
+ }
+ halbtc8812a1ant_SetFwDecBtPwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+
+ coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
+}
+
+void
+halbtc8812a1ant_SetFwBtLnaConstrain(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN bt_lna_cons_on
+ )
+{
+ u1Byte dataLen=3;
+ u1Byte buf[5] = {0};
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set BT LNA Constrain: %s\n",
+ (bt_lna_cons_on? "ON!!":"OFF!!")));
+
+ buf[0] = dataLen;
+ buf[1] = 0x2; // OP_Code
+ buf[2] = 0x1; // OP_Code_Length
+ if(bt_lna_cons_on)
+ buf[3] = 0x1; // OP_Code_Content
+ else
+ buf[3] = 0x0;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]);
+}
+
+void
+halbtc8812a1ant_SetBtLnaConstrain(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN bt_lna_cons_on
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT Constrain = %s\n",
+ (force_exec? "force":""), ((bt_lna_cons_on)? "ON":"OFF")));
+ coex_dm->bCurBtLnaConstrain = bt_lna_cons_on;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtLnaConstrain=%d, bCurBtLnaConstrain=%d\n",
+ coex_dm->bPreBtLnaConstrain, coex_dm->bCurBtLnaConstrain));
+
+ if(coex_dm->bPreBtLnaConstrain == coex_dm->bCurBtLnaConstrain)
+ return;
+ }
+ halbtc8812a1ant_SetFwBtLnaConstrain(btcoexist, coex_dm->bCurBtLnaConstrain);
+
+ coex_dm->bPreBtLnaConstrain = coex_dm->bCurBtLnaConstrain;
+}
+
+void
+halbtc8812a1ant_SetFwBtPsdMode(
+ PBTC_COEXIST btcoexist,
+ u1Byte bt_psd_mode
+ )
+{
+ u1Byte dataLen=3;
+ u1Byte buf[5] = {0};
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set BT PSD mode=0x%x\n",
+ bt_psd_mode));
+
+ buf[0] = dataLen;
+ buf[1] = 0x4; // OP_Code
+ buf[2] = 0x1; // OP_Code_Length
+ buf[3] = bt_psd_mode; // OP_Code_Content
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]);
+}
+
+
+void
+halbtc8812a1ant_SetBtPsdMode(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ u1Byte bt_psd_mode
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT PSD mode = 0x%x\n",
+ (force_exec? "force":""), bt_psd_mode));
+ coex_dm->bCurBtPsdMode = bt_psd_mode;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtPsdMode=0x%x, bCurBtPsdMode=0x%x\n",
+ coex_dm->bPreBtPsdMode, coex_dm->bCurBtPsdMode));
+
+ if(coex_dm->bPreBtPsdMode == coex_dm->bCurBtPsdMode)
+ return;
+ }
+ halbtc8812a1ant_SetFwBtPsdMode(btcoexist, coex_dm->bCurBtPsdMode);
+
+ coex_dm->bPreBtPsdMode = coex_dm->bCurBtPsdMode;
+}
+
+
+void
+halbtc8812a1ant_SetBtAutoReport(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN enable_auto_report
+ )
+{
+#if 0
+ u1Byte h2c_parameter[1] ={0};
+
+ h2c_parameter[0] = 0;
+
+ if(enable_auto_report)
+ {
+ h2c_parameter[0] |= BIT0;
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n",
+ (enable_auto_report? "Enabled!!":"Disabled!!"), h2c_parameter[0]));
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
+#else
+
+#endif
+}
+
+void
+halbtc8812a1ant_BtAutoReport(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN enable_auto_report
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT Auto report = %s\n",
+ (force_exec? "force to":""), ((enable_auto_report)? "Enabled":"Disabled")));
+ coex_dm->cur_bt_auto_report = enable_auto_report;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_bt_auto_report=%d, cur_bt_auto_report=%d\n",
+ coex_dm->pre_bt_auto_report, coex_dm->cur_bt_auto_report));
+
+ if(coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
+ return;
+ }
+ halbtc8812a1ant_SetBtAutoReport(btcoexist, coex_dm->cur_bt_auto_report);
+
+ coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
+}
+
+void
+halbtc8812a1ant_FwDacSwingLvl(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ u1Byte fw_dac_swing_lvl
+ )
+{
+ return;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec? "force to":""), fw_dac_swing_lvl));
+ coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_fw_dac_swing_lvl=%d, cur_fw_dac_swing_lvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl, coex_dm->cur_fw_dac_swing_lvl));
+
+ if(coex_dm->pre_fw_dac_swing_lvl == coex_dm->cur_fw_dac_swing_lvl)
+ return;
+ }
+
+ halbtc8812a1ant_SetFwDacSwingLevel(btcoexist, coex_dm->cur_fw_dac_swing_lvl);
+
+ coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
+}
+
+void
+halbtc8812a1ant_SetSwRfRxLpfCorner(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN rx_rf_shrink_on
+ )
+{
+ if(rx_rf_shrink_on)
+ {
+ //Shrink RF Rx LPF corner
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Shrink RF Rx LPF corner!!\n"));
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, 0xf0ff7);
+ }
+ else
+ {
+ //Resume RF Rx LPF corner
+ // After initialized, we can use coex_dm->bt_rf0x1e_backup
+ if(btcoexist->bInitilized)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Resume RF Rx LPF corner!!\n"));
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, coex_dm->bt_rf0x1e_backup);
+ }
+ }
+}
+
+void
+halbtc8812a1ant_RfShrink(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN rx_rf_shrink_on
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec? "force to":""), ((rx_rf_shrink_on)? "ON":"OFF")));
+ coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_rf_rx_lpf_shrink=%d, cur_rf_rx_lpf_shrink=%d\n",
+ coex_dm->pre_rf_rx_lpf_shrink, coex_dm->cur_rf_rx_lpf_shrink));
+
+ if(coex_dm->pre_rf_rx_lpf_shrink == coex_dm->cur_rf_rx_lpf_shrink)
+ return;
+ }
+ halbtc8812a1ant_SetSwRfRxLpfCorner(btcoexist, coex_dm->cur_rf_rx_lpf_shrink);
+
+ coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
+}
+
+void
+halbtc8812a1ant_SetSwPenaltyTxRateAdaptive(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN low_penalty_ra
+ )
+{
+ u1Byte u1_tmp;
+
+ u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x4fd);
+ u1_tmp |= BIT0;
+ if(low_penalty_ra)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set low penalty!!\n"));
+ u1_tmp &= ~BIT2;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set normal!!\n"));
+ u1_tmp |= BIT2;
+ }
+
+ btcoexist->btc_write_1byte(btcoexist, 0x4fd, u1_tmp);
+}
+
+void
+halbtc8812a1ant_LowPenaltyRa(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN low_penalty_ra
+ )
+{
+ return;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (force_exec? "force to":""), ((low_penalty_ra)? "ON":"OFF")));
+ coex_dm->cur_low_penalty_ra = low_penalty_ra;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_low_penalty_ra=%d, cur_low_penalty_ra=%d\n",
+ coex_dm->pre_low_penalty_ra, coex_dm->cur_low_penalty_ra));
+
+ if(coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
+ return;
+ }
+ halbtc8812a1ant_SetSwPenaltyTxRateAdaptive(btcoexist, coex_dm->cur_low_penalty_ra);
+
+ coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
+}
+
+void
+halbtc8812a1ant_SetDacSwingReg(
+ PBTC_COEXIST btcoexist,
+ u4Byte level
+ )
+{
+ u1Byte val=(u1Byte)level;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Write SwDacSwing = 0x%x\n", level));
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc5b, 0x3e, val);
+}
+
+void
+halbtc8812a1ant_SetSwFullTimeDacSwing(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN sw_dac_swing_on,
+ u4Byte sw_dac_swing_lvl
+ )
+{
+ if(sw_dac_swing_on)
+ {
+ halbtc8812a1ant_SetDacSwingReg(btcoexist, sw_dac_swing_lvl);
+ }
+ else
+ {
+ halbtc8812a1ant_SetDacSwingReg(btcoexist, 0x18);
+ }
+}
+
+
+void
+halbtc8812a1ant_DacSwing(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN dac_swing_on,
+ u4Byte dac_swing_lvl
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+ (force_exec? "force to":""), ((dac_swing_on)? "ON":"OFF"), dac_swing_lvl));
+ coex_dm->cur_dac_swing_on = dac_swing_on;
+ coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_dac_swing_on=%d, pre_dac_swing_lvl=0x%x, cur_dac_swing_on=%d, cur_dac_swing_lvl=0x%x\n",
+ coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl,
+ coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl));
+
+ if( (coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
+ (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl) )
+ return;
+ }
+ delay_ms(30);
+ halbtc8812a1ant_SetSwFullTimeDacSwing(btcoexist, dac_swing_on, dac_swing_lvl);
+
+ coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on;
+ coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
+}
+
+void
+halbtc8812a1ant_SetAdcBackOff(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN adc_back_off
+ )
+{
+ if(adc_back_off)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level On!\n"));
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3);
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level Off!\n"));
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1);
+ }
+}
+
+void
+halbtc8812a1ant_AdcBackOff(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN adc_back_off
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn AdcBackOff = %s\n",
+ (force_exec? "force to":""), ((adc_back_off)? "ON":"OFF")));
+ coex_dm->cur_adc_back_off = adc_back_off;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_adc_back_off=%d, cur_adc_back_off=%d\n",
+ coex_dm->pre_adc_back_off, coex_dm->cur_adc_back_off));
+
+ if(coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off)
+ return;
+ }
+ halbtc8812a1ant_SetAdcBackOff(btcoexist, coex_dm->cur_adc_back_off);
+
+ coex_dm->pre_adc_back_off = coex_dm->cur_adc_back_off;
+}
+
+void
+halbtc8812a1ant_SetAgcTable(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN agc_table_en
+ )
+{
+ u1Byte rssi_adjust_val=0;
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
+ if(agc_table_en)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table On!\n"));
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x3fa58);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x37a58);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x2fa58);
+ rssi_adjust_val = 8;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table Off!\n"));
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x39258);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x31258);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x29258);
+ }
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
+
+ // set rssi_adjust_val for wifi module.
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, &rssi_adjust_val);
+}
+
+
+void
+halbtc8812a1ant_AgcTable(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN agc_table_en
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s %s Agc Table\n",
+ (force_exec? "force to":""), ((agc_table_en)? "Enable":"Disable")));
+ coex_dm->cur_agc_table_en = agc_table_en;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_agc_table_en=%d, cur_agc_table_en=%d\n",
+ coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en));
+
+ if(coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
+ return;
+ }
+ halbtc8812a1ant_SetAgcTable(btcoexist, agc_table_en);
+
+ coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
+}
+
+void
+halbtc8812a1ant_SetCoexTable(
+ PBTC_COEXIST btcoexist,
+ u4Byte val0x6c0,
+ u4Byte val0x6c4,
+ u4Byte val0x6c8,
+ u1Byte val0x6cc
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0));
+ btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4));
+ btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8));
+ btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc));
+ btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
+}
+
+void
+halbtc8812a1ant_CoexTable(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ u4Byte val0x6c0,
+ u4Byte val0x6c4,
+ u4Byte val0x6c8,
+ u1Byte val0x6cc
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ (force_exec? "force to":""), val0x6c0, val0x6c4, val0x6c8, val0x6cc));
+ coex_dm->cur_val0x6c0 = val0x6c0;
+ coex_dm->cur_val0x6c4 = val0x6c4;
+ coex_dm->cur_val0x6c8 = val0x6c8;
+ coex_dm->cur_val0x6cc = val0x6cc;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_val0x6c0=0x%x, pre_val0x6c4=0x%x, pre_val0x6c8=0x%x, pre_val0x6cc=0x%x !!\n",
+ coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4, coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], cur_val0x6c0=0x%x, cur_val0x6c4=0x%x, cur_val0x6c8=0x%x, cur_val0x6cc=0x%x !!\n",
+ coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4, coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc));
+
+ if( (coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
+ (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
+ (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) &&
+ (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc) )
+ return;
+ }
+ halbtc8812a1ant_SetCoexTable(btcoexist, val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+
+ coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
+ coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
+ coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8;
+ coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
+}
+
+void
+halbtc8812a1ant_SetFwIgnoreWlanAct(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN enable
+ )
+{
+ u1Byte dataLen=3;
+ u1Byte buf[5] = {0};
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], %s BT Ignore Wlan_Act\n",
+ (enable? "Enable":"Disable")));
+
+ buf[0] = dataLen;
+ buf[1] = 0x1; // OP_Code
+ buf[2] = 0x1; // OP_Code_Length
+ if(enable)
+ buf[3] = 0x1; // OP_Code_Content
+ else
+ buf[3] = 0x0;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]);
+}
+
+void
+halbtc8812a1ant_IgnoreWlanAct(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN enable
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec? "force to":""), (enable? "ON":"OFF")));
+ coex_dm->cur_ignore_wlan_act = enable;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
+ coex_dm->pre_ignore_wlan_act, coex_dm->cur_ignore_wlan_act));
+
+ if(coex_dm->pre_ignore_wlan_act == coex_dm->cur_ignore_wlan_act)
+ return;
+ }
+ halbtc8812a1ant_SetFwIgnoreWlanAct(btcoexist, enable);
+
+ coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
+}
+
+void
+halbtc8812a1ant_SetFwPstdma(
+ PBTC_COEXIST btcoexist,
+ u1Byte byte1,
+ u1Byte byte2,
+ u1Byte byte3,
+ u1Byte byte4,
+ u1Byte byte5
+ )
+{
+ u1Byte h2c_parameter[5] ={0};
+
+ h2c_parameter[0] = byte1;
+ h2c_parameter[1] = byte2;
+ h2c_parameter[2] = byte3;
+ h2c_parameter[3] = byte4;
+ h2c_parameter[4] = byte5;
+
+ coex_dm->ps_tdma_para[0] = byte1;
+ coex_dm->ps_tdma_para[1] = byte2;
+ coex_dm->ps_tdma_para[2] = byte3;
+ coex_dm->ps_tdma_para[3] = byte4;
+ coex_dm->ps_tdma_para[4] = byte5;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1]<<24|h2c_parameter[2]<<16|h2c_parameter[3]<<8|h2c_parameter[4]));
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
+}
+
+void
+halbtc8812a1ant_SetLpsRpwm(
+ PBTC_COEXIST btcoexist,
+ u1Byte lps_val,
+ u1Byte rpwm_val
+ )
+{
+ u1Byte lps=lps_val;
+ u1Byte rpwm=rpwm_val;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_1ANT_LPS, &lps);
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_1ANT_RPWM, &rpwm);
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT, NULL);
+}
+
+void
+halbtc8812a1ant_LpsRpwm(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ u1Byte lps_val,
+ u1Byte rpwm_val
+ )
+{
+ BOOLEAN bForceExecPwrCmd=false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set lps/rpwm=0x%x/0x%x \n",
+ (force_exec? "force to":""), lps_val, rpwm_val));
+ coex_dm->cur_lps = lps_val;
+ coex_dm->cur_rpwm = rpwm_val;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_lps/cur_lps=0x%x/0x%x, pre_rpwm/cur_rpwm=0x%x/0x%x!!\n",
+ coex_dm->pre_lps, coex_dm->cur_lps, coex_dm->pre_rpwm, coex_dm->cur_rpwm));
+
+ if( (coex_dm->pre_lps == coex_dm->cur_lps) &&
+ (coex_dm->pre_rpwm == coex_dm->cur_rpwm) )
+ {
+ return;
+ }
+ }
+ halbtc8812a1ant_SetLpsRpwm(btcoexist, lps_val, rpwm_val);
+
+ coex_dm->pre_lps = coex_dm->cur_lps;
+ coex_dm->pre_rpwm = coex_dm->cur_rpwm;
+}
+
+void
+halbtc8812a1ant_SwMechanism1(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN shrink_rx_lpf,
+ BOOLEAN low_penalty_ra,
+ BOOLEAN limited_dig,
+ BOOLEAN bt_lna_constrain
+ )
+{
+ //halbtc8812a1ant_RfShrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
+ //halbtc8812a1ant_LowPenaltyRa(btcoexist, NORMAL_EXEC, low_penalty_ra);
+
+ //no limited DIG
+ //halbtc8812a1ant_SetBtLnaConstrain(btcoexist, NORMAL_EXEC, bt_lna_constrain);
+}
+
+void
+halbtc8812a1ant_SwMechanism2(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN agc_table_shift,
+ BOOLEAN adc_back_off,
+ BOOLEAN sw_dac_swing,
+ u4Byte dac_swing_lvl
+ )
+{
+ //halbtc8812a1ant_AgcTable(btcoexist, NORMAL_EXEC, agc_table_shift);
+ //halbtc8812a1ant_AdcBackOff(btcoexist, NORMAL_EXEC, adc_back_off);
+ //halbtc8812a1ant_DacSwing(btcoexist, NORMAL_EXEC, sw_dac_swing, dac_swing_lvl);
+}
+
+void
+halbtc8812a1ant_PsTdma(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN turn_on,
+ u1Byte type
+ )
+{
+ BOOLEAN bTurnOnByCnt=false;
+ u1Byte psTdmaTypeByCnt=0, rssi_adjust_val=0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (force_exec? "force to":""), (turn_on? "ON":"OFF"), type));
+ coex_dm->cur_ps_tdma_on = turn_on;
+ coex_dm->cur_ps_tdma = type;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma));
+
+ if( (coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
+ (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma) )
+ return;
+ }
+ if(turn_on)
+ {
+ switch(type)
+ {
+ default:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x1a, 0x1a, 0x0, 0x58);
+ break;
+ case 1:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x1a, 0x1a, 0x0, 0x48);
+ rssi_adjust_val = 11;
+ break;
+ case 2:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x12, 0x12, 0x0, 0x48);
+ rssi_adjust_val = 14;
+ break;
+ case 3:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x25, 0x3, 0x10, 0x40);
+ break;
+ case 4:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x15, 0x3, 0x14, 0x0);
+ rssi_adjust_val = 17;
+ break;
+ case 5:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x61, 0x15, 0x3, 0x31, 0x0);
+ break;
+ case 6:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0xa, 0x3, 0x0, 0x0);
+ break;
+ case 7:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0xc, 0x5, 0x0, 0x0);
+ break;
+ case 8:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x25, 0x3, 0x10, 0x0);
+ break;
+ case 9:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0xa, 0xa, 0x0, 0x48);
+ rssi_adjust_val = 18;
+ break;
+ case 10:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0xa, 0xa, 0x0, 0x40);
+ break;
+ case 11:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x5, 0x5, 0x0, 0x48);
+ rssi_adjust_val = 20;
+ break;
+ case 12:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xeb, 0xa, 0x3, 0x31, 0x18);
+ break;
+
+ case 15:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0xa, 0x3, 0x8, 0x0);
+ break;
+ case 16:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x15, 0x3, 0x10, 0x0);
+ rssi_adjust_val = 18;
+ break;
+
+ case 18:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x25, 0x3, 0x10, 0x0);
+ rssi_adjust_val = 14;
+ break;
+
+ case 20:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0x25, 0x25, 0x0, 0x0);
+ break;
+ case 21:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x20, 0x3, 0x10, 0x40);
+ break;
+ case 22:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0x8, 0x8, 0x0, 0x40);
+ break;
+ case 23:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0x25, 0x3, 0x31, 0x18);
+ rssi_adjust_val = 22;
+ break;
+ case 24:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0x15, 0x3, 0x31, 0x18);
+ rssi_adjust_val = 22;
+ break;
+ case 25:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0xa, 0x3, 0x31, 0x18);
+ rssi_adjust_val = 22;
+ break;
+ case 26:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0xa, 0x3, 0x31, 0x18);
+ rssi_adjust_val = 22;
+ break;
+ case 27:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0x25, 0x3, 0x31, 0x98);
+ rssi_adjust_val = 22;
+ break;
+ case 28:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x69, 0x25, 0x3, 0x31, 0x0);
+ break;
+ case 29:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xab, 0x1a, 0x1a, 0x1, 0x8);
+ break;
+ case 30:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x15, 0x3, 0x14, 0x0);
+ break;
+ case 31:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x1a, 0x1a, 0, 0x58);
+ break;
+ case 32:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xab, 0xa, 0x3, 0x31, 0x88);
+ break;
+ case 33:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xa3, 0x25, 0x3, 0x30, 0x88);
+ break;
+ case 34:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x1a, 0x1a, 0x0, 0x8);
+ break;
+ case 35:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, 0x1a, 0x0, 0x8);
+ break;
+ case 36:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x12, 0x3, 0x14, 0x58);
+ break;
+ }
+ }
+ else
+ {
+ // disable PS tdma
+ switch(type)
+ {
+ case 8:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x8, 0x0, 0x0, 0x0, 0x0);
+ btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x4);
+ break;
+ case 0:
+ default:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0, 0x0, 0x0);
+ delay_ms(5);
+ btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20);
+ break;
+ case 9:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0, 0x0, 0x0);
+ btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x4);
+ break;
+ case 10:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0, 0x8, 0x0);
+ delay_ms(5);
+ btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20);
+ break;
+ }
+ }
+ rssi_adjust_val =0;
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, &rssi_adjust_val);
+
+ // update pre state
+ coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
+ coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
+}
+
+void
+halbtc8812a1ant_CoexAllOff(
+ PBTC_COEXIST btcoexist
+ )
+{
+ // fw all off
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ // sw all off
+ halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false);
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+
+ // hw all off
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+}
+
+void
+halbtc8812a1ant_WifiParaAdjust(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN enable
+ )
+{
+ if(enable)
+ {
+ halbtc8812a1ant_LowPenaltyRa(btcoexist, NORMAL_EXEC, true);
+ }
+ else
+ {
+ halbtc8812a1ant_LowPenaltyRa(btcoexist, NORMAL_EXEC, false);
+ }
+}
+
+BOOLEAN
+halbtc8812a1ant_IsCommonAction(
+ PBTC_COEXIST btcoexist
+ )
+{
+ BOOLEAN common=false, wifi_connected=false, wifi_busy=false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+ //halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+
+ if(!wifi_connected &&
+ BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"));
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false);
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+ common = true;
+ }
+ else if(wifi_connected &&
+ (BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT non connected-idle!!\n"));
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+
+ halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false);
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+ common = true;
+ }
+ else if(!wifi_connected &&
+ (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"));
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false);
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+ common = true;
+ }
+ else if(wifi_connected &&
+ (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT connected-idle!!\n"));
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8812a1ant_SwMechanism1(btcoexist,true,true,true,true);
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+ common = true;
+ }
+ else if(!wifi_connected &&
+ (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE != coex_dm->bt_status) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT Busy!!\n"));
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false);
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+ common = true;
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism1(btcoexist,true,true,true,true);
+
+ common = false;
+ }
+
+ return common;
+}
+
+
+void
+halbtc8812a1ant_TdmaDurationAdjustForAcl(
+ PBTC_COEXIST btcoexist
+ )
+{
+ static s4Byte up,dn,m,n,wait_count;
+ s4Byte result; //0: no change, +1: increase WiFi duration, -1: decrease WiFi duration
+ u1Byte retry_count=0, bt_info_ext;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], halbtc8812a1ant_TdmaDurationAdjustForAcl()\n"));
+ if(coex_dm->reset_tdma_adjust)
+ {
+ coex_dm->reset_tdma_adjust = false;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], first run TdmaDurationAdjust()!!\n"));
+
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ //============
+ up = 0;
+ dn = 0;
+ m = 1;
+ n= 3;
+ result = 0;
+ wait_count = 0;
+ }
+ else
+ {
+ //accquire the BT TRx retry count from BT_Info byte2
+ retry_count = coex_sta->bt_retry_cnt;
+ bt_info_ext = coex_sta->bt_info_ext;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retry_count = %d\n", retry_count));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
+ up, dn, m, n, wait_count));
+ result = 0;
+ wait_count++;
+
+ if(retry_count == 0) // no retry in the last 2-second duration
+ {
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if(up >= n) // if ³sÄò n ­Ó2¬í retry count¬°0, «h½Õ¼eWiFi duration
+ {
+ wait_count = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Increase wifi duration!!\n"));
+ }
+ }
+ else if (retry_count <= 3) // <=3 retry in the last 2-second duration
+ {
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) // if ³sÄò 2 ­Ó2¬í retry count< 3, «h½Õ¯¶WiFi duration
+ {
+ if (wait_count <= 2)
+ m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+ else
+ m = 1;
+
+ if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ wait_count = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n"));
+ }
+ }
+ else //retry count > 3, ¥u­n1¦¸ retry count > 3, «h½Õ¯¶WiFi duration
+ {
+ if (wait_count == 1)
+ m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+ else
+ m = 1;
+
+ if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ wait_count = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n"));
+ }
+
+ if(result == -1)
+ {
+ if( (BT_INFO_8812A_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
+ ((coex_dm->cur_ps_tdma == 1) ||(coex_dm->cur_ps_tdma == 2)) )
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ }
+ else if(coex_dm->cur_ps_tdma == 1)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ }
+ else if(coex_dm->cur_ps_tdma == 2)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ }
+ else if(coex_dm->cur_ps_tdma == 9)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ }
+ }
+ else if(result == 1)
+ {
+ if( (BT_INFO_8812A_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
+ ((coex_dm->cur_ps_tdma == 1) ||(coex_dm->cur_ps_tdma == 2)) )
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ }
+ else if(coex_dm->cur_ps_tdma == 11)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ }
+ else if(coex_dm->cur_ps_tdma == 9)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ }
+ else if(coex_dm->cur_ps_tdma == 2)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 1);
+ coex_dm->ps_tdma_du_adj_type = 1;
+ }
+ }
+
+ if( coex_dm->cur_ps_tdma != 1 &&
+ coex_dm->cur_ps_tdma != 2 &&
+ coex_dm->cur_ps_tdma != 9 &&
+ coex_dm->cur_ps_tdma != 11 )
+ {
+ // recover to previous adjust type
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, coex_dm->ps_tdma_du_adj_type);
+ }
+ }
+}
+
+u1Byte
+halbtc8812a1ant_PsTdmaTypeByWifiRssi(
+ s4Byte wifi_rssi,
+ s4Byte pre_wifi_rssi,
+ u1Byte wifi_rssi_thresh
+ )
+{
+ u1Byte ps_tdma_type=0;
+
+ if(wifi_rssi > pre_wifi_rssi)
+ {
+ if(wifi_rssi > (wifi_rssi_thresh+5))
+ {
+ ps_tdma_type = 26;
+ }
+ else
+ {
+ ps_tdma_type = 25;
+ }
+ }
+ else
+ {
+ if(wifi_rssi > wifi_rssi_thresh)
+ {
+ ps_tdma_type = 26;
+ }
+ else
+ {
+ ps_tdma_type = 25;
+ }
+ }
+
+ return ps_tdma_type;
+}
+
+void
+halbtc8812a1ant_PsTdmaCheckForPowerSaveState(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN new_ps_state
+ )
+{
+ u1Byte lps_mode=0x0;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode);
+
+ if(lps_mode) // already under LPS state
+ {
+ if(new_ps_state)
+ {
+ // keep state under LPS, do nothing.
+ }
+ else
+ {
+ // will leave LPS state, turn off psTdma first
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 0);
+ }
+ }
+ else // NO PS state
+ {
+ if(new_ps_state)
+ {
+ // will enter LPS state, turn off psTdma first
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 0);
+ }
+ else
+ {
+ // keep state under NO PS state, do nothing.
+ }
+ }
+}
+
+// SCO only or SCO+PAN(HS)
+void
+halbtc8812a1ant_ActionSco(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state;
+ u4Byte wifi_bw;
+
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 4);
+
+ if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+
+void
+halbtc8812a1ant_ActionHid(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state, bt_rssi_state;
+ u4Byte wifi_bw;
+
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+ bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+ if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,false,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+//A2DP only / PAN(EDR) only/ A2DP+PAN(HS)
+void
+halbtc8812a1ant_ActionA2dp(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state, bt_rssi_state;
+ u4Byte wifi_bw;
+
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+ bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+ if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+void
+halbtc8812a1ant_ActionA2dpPanHs(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u4Byte wifi_bw;
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+ bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+ if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+void
+halbtc8812a1ant_ActionPanEdr(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state, bt_rssi_state;
+ u4Byte wifi_bw;
+
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+ bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+ if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+
+//PAN(HS) only
+void
+halbtc8812a1ant_ActionPanHs(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state, bt_rssi_state;
+ u4Byte wifi_bw;
+
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+ bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // fw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ }
+ else
+ {
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+ }
+
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+ }
+ }
+ else
+ {
+ // fw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ }
+ else
+ {
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+ }
+
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+//PAN(EDR)+A2DP
+void
+halbtc8812a1ant_ActionPanEdrA2dp(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u4Byte wifi_bw;
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+ bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+ if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+void
+halbtc8812a1ant_ActionPanEdrHid(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state, bt_rssi_state;
+ u4Byte wifi_bw;
+
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+ bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+ if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+// HID+A2DP+PAN(EDR)
+void
+halbtc8812a1ant_ActionHidA2dpPanEdr(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u4Byte wifi_bw;
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+ bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+ if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+void
+halbtc8812a1ant_ActionHidA2dp(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u4Byte wifi_bw;
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+ bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+ if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+void
+halbtc8812a1ant_ActionHs(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN hs_connecting
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action for HS, hs_connecting=%d!!!\n", hs_connecting));
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+
+ if(hs_connecting)
+ {
+ halbtc8812a1ant_CoexTable(btcoexist, FORCE_EXEC, 0xaaaaaaaa, 0xaaaaaaaa, 0xffff, 0x3);
+ }
+ else
+ {
+ if((coex_sta->high_priority_tx+coex_sta->high_priority_rx+
+ coex_sta->low_priority_tx+coex_sta->low_priority_rx)<=1200)
+ halbtc8812a1ant_CoexTable(btcoexist, FORCE_EXEC, 0xaaaaaaaa, 0xaaaaaaaa, 0xffff, 0x3);
+ else
+ halbtc8812a1ant_CoexTable(btcoexist, FORCE_EXEC, 0xffffffff, 0xffffffff, 0xffff, 0x3);
+ }
+}
+
+
+void
+halbtc8812a1ant_ActionWifiNotConnected(
+ PBTC_COEXIST btcoexist
+ )
+{
+ BOOLEAN hs_connecting=false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting);
+
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+ if(hs_connecting)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HS is connecting!!!\n"));
+ halbtc8812a1ant_ActionHs(btcoexist, hs_connecting);
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+}
+
+void
+halbtc8812a1ant_ActionWifiNotConnectedAssoAuthScan(
+ PBTC_COEXIST btcoexist
+ )
+{
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+ BOOLEAN hs_connecting=false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting);
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+ if(hs_connecting)
+ {
+ halbtc8812a1ant_ActionHs(btcoexist, hs_connecting);
+ }
+ else if(btcoexist->bt_info.bt_disabled)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status)
+{
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else if( (BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) )
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 28);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status)
+ {
+ if(stack_info->hid_exist)
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 35);
+ else
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 29);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) )
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3);
+ }
+ else
+ {
+ //error condition, should not reach here, record error number for debugging.
+ coex_dm->error_condition = 1;
+ }
+}
+
+void
+halbtc8812a1ant_ActionWifiConnectedScan(
+ PBTC_COEXIST btcoexist
+ )
+{
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ActionConnectedScan()===>\n"));
+
+ if(btcoexist->bt_info.bt_disabled)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+
+ // psTdma
+ if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ActionConnectedScan(), bt is under inquiry/page scan\n"));
+ if(stack_info->sco_exist)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 32);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ }
+ else if( (BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) )
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 5);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status)
+ {
+ if(stack_info->hid_exist)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 34);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 4);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ }
+ else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) )
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 33);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else
+ {
+ //error condition, should not reach here
+ coex_dm->error_condition = 2;
+ }
+ }
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ActionConnectedScan()<===\n"));
+}
+
+void
+halbtc8812a1ant_ActionWifiConnectedSpecialPacket(
+ PBTC_COEXIST btcoexist
+ )
+{
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+ if(btcoexist->bt_info.bt_disabled)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else
+ {
+ if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status)
+ {
+ if(stack_info->sco_exist)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 32);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ }
+ else if( (BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) )
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 28);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status)
+ {
+ if(stack_info->hid_exist)
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 35);
+ else
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 29);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) )
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3);
+ }
+ else
+ {
+ //error condition, should not reach here
+ coex_dm->error_condition = 3;
+ }
+ }
+}
+
+void
+halbtc8812a1ant_ActionWifiConnected(
+ PBTC_COEXIST btcoexist
+ )
+{
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+ BOOLEAN wifi_connected=false, wifi_busy=false, bt_hs_on=false;
+ BOOLEAN scan=false, link=false, roam=false;
+ BOOLEAN hs_connecting=false, under4way=false;
+ u4Byte wifi_bw;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect()===>\n"));
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+ if(!wifi_connected)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi not connected<===\n"));
+ return;
+ }
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under4way);
+ if(under4way)
+ {
+ halbtc8812a1ant_ActionWifiConnectedSpecialPacket(btcoexist);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n"));
+ return;
+ }
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+ if(scan || link || roam)
+ {
+ halbtc8812a1ant_ActionWifiConnectedScan(btcoexist);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"));
+ return;
+ }
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ if(!wifi_busy)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi associated-idle!!!\n"));
+ if(btcoexist->bt_info.bt_disabled)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else
+ {
+ if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], bt is under inquiry/page scan!!!\n"));
+ if(stack_info->sco_exist)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 32);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x26, 0x0);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x26, 0x0);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 0);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status)
+ {
+ if(stack_info->hid_exist && stack_info->numOfLink==1)
+ {
+ // hid only
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5fff5fff, 0x5fff5fff, 0xffff, 0x3);
+ coex_dm->reset_tdma_adjust = true;
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+
+ if(stack_info->hid_exist)
+ {
+ if(stack_info->a2dp_exist)
+ {
+ // hid+a2dp
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else if(stack_info->pan_exist)
+ {
+ if(bt_hs_on)
+ {
+ // hid+hs
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ }
+ else
+ {
+ // hid+pan
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ }
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else
+ {
+ coex_dm->error_condition = 4;
+ }
+ coex_dm->reset_tdma_adjust = true;
+ }
+ else if(stack_info->a2dp_exist)
+ {
+ if(stack_info->pan_exist)
+ {
+ if(bt_hs_on)
+ {
+ // a2dp+hs
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ }
+ else
+ {
+ // a2dp+pan
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 36);
+ }
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ coex_dm->reset_tdma_adjust = true;
+ }
+ else
+ {
+ // a2dp only
+ halbtc8812a1ant_TdmaDurationAdjustForAcl(btcoexist);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ }
+ else if(stack_info->pan_exist)
+ {
+ // pan only
+ if(bt_hs_on)
+ {
+ coex_dm->error_condition = 5;
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ coex_dm->reset_tdma_adjust = true;
+ }
+ else
+ {
+ // temp state, do nothing!!!
+ //DbgPrint("error 6, coex_dm->bt_status=%d\n", coex_dm->bt_status);
+ //DbgPrint("error 6, stack_info->numOfLink=%d, stack_info->hid_exist=%d, stack_info->a2dp_exist=%d, stack_info->pan_exist=%d, stack_info->sco_exist=%d\n",
+ //stack_info->numOfLink, stack_info->hid_exist, stack_info->a2dp_exist, stack_info->pan_exist, stack_info->sco_exist);
+ //coex_dm->error_condition = 6;
+ }
+ }
+ }
+ else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) )
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3);
+ }
+ else
+ {
+ //error condition, should not reach here
+ coex_dm->error_condition = 7;
+ }
+ }
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi busy!!!\n"));
+ if(btcoexist->bt_info.bt_disabled)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else
+ {
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HS is under progress!!!\n"));
+ //DbgPrint("coex_dm->bt_status = 0x%x\n", coex_dm->bt_status);
+ halbtc8812a1ant_ActionHs(btcoexist, hs_connecting);
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status)
+ {
+ if(stack_info->sco_exist)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 32);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 5);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5a5a5a5a, 0x5a5a5a5a, 0xffff, 0x3);
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HS is under progress!!!\n"));
+ halbtc8812a1ant_ActionHs(btcoexist, hs_connecting);
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 5);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5a5a5a5a, 0x5a5a5a5a, 0xffff, 0x3);
+ }
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status)
+ {
+ if(stack_info->hid_exist && stack_info->numOfLink==1)
+ {
+ // hid only
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5fff5fff, 0x5fff5fff, 0xffff, 0x3);
+ coex_dm->reset_tdma_adjust = true;
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+
+ if(stack_info->hid_exist)
+ {
+ if(stack_info->a2dp_exist)
+ {
+ // hid+a2dp
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else if(stack_info->pan_exist)
+ {
+ if(bt_hs_on)
+ {
+ // hid+hs
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ }
+ else
+ {
+ // hid+pan
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ }
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else
+ {
+ coex_dm->error_condition = 8;
+ }
+ coex_dm->reset_tdma_adjust = true;
+ }
+ else if(stack_info->a2dp_exist)
+ {
+ if(stack_info->pan_exist)
+ {
+ if(bt_hs_on)
+ {
+ // a2dp+hs
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ }
+ else
+ {
+ // a2dp+pan
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 36);
+ }
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ coex_dm->reset_tdma_adjust = true;
+ }
+ else
+ {
+ // a2dp only
+ halbtc8812a1ant_TdmaDurationAdjustForAcl(btcoexist);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ }
+ else if(stack_info->pan_exist)
+ {
+ // pan only
+ if(bt_hs_on)
+ {
+ coex_dm->error_condition = 9;
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ coex_dm->reset_tdma_adjust = true;
+ }
+ else
+ {
+ //DbgPrint("error 10, stack_info->numOfLink=%d, stack_info->hid_exist=%d, stack_info->a2dp_exist=%d, stack_info->pan_exist=%d, stack_info->sco_exist=%d\n",
+ //stack_info->numOfLink, stack_info->hid_exist, stack_info->a2dp_exist, stack_info->pan_exist, stack_info->sco_exist);
+ coex_dm->error_condition = 10;
+ }
+ }
+ }
+ else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) )
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3);
+ }
+ else
+ {
+ //DbgPrint("error 11, coex_dm->bt_status=%d\n", coex_dm->bt_status);
+ //DbgPrint("error 11, stack_info->numOfLink=%d, stack_info->hid_exist=%d, stack_info->a2dp_exist=%d, stack_info->pan_exist=%d, stack_info->sco_exist=%d\n",
+ //stack_info->numOfLink, stack_info->hid_exist, stack_info->a2dp_exist, stack_info->pan_exist, stack_info->sco_exist);
+ //error condition, should not reach here
+ coex_dm->error_condition = 11;
+ }
+ }
+ }
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect()<===\n"));
+}
+
+void
+halbtc8812a1ant_RunSwCoexistMechanism(
+ PBTC_COEXIST btcoexist
+ )
+{
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+ BOOLEAN wifi_under5g=false, wifi_busy=false, wifi_connected=false;
+ u1Byte bt_info_original=0, bt_retry_cnt=0;
+ u1Byte algorithm=0;
+
+ return;
+ if(stack_info->bProfileNotified)
+ {
+ algorithm = halbtc8812a1ant_ActionAlgorithm(btcoexist);
+ coex_dm->cur_algorithm = algorithm;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Algorithm = %d \n", coex_dm->cur_algorithm));
+
+ if(halbtc8812a1ant_IsCommonAction(btcoexist))
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action common.\n"));
+ }
+ else
+ {
+ switch(coex_dm->cur_algorithm)
+ {
+ case BT_8812A_1ANT_COEX_ALGO_SCO:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = SCO.\n"));
+ halbtc8812a1ant_ActionSco(btcoexist);
+ break;
+ case BT_8812A_1ANT_COEX_ALGO_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID.\n"));
+ halbtc8812a1ant_ActionHid(btcoexist);
+ break;
+ case BT_8812A_1ANT_COEX_ALGO_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP.\n"));
+ halbtc8812a1ant_ActionA2dp(btcoexist);
+ break;
+ case BT_8812A_1ANT_COEX_ALGO_A2DP_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP+PAN(HS).\n"));
+ halbtc8812a1ant_ActionA2dpPanHs(btcoexist);
+ break;
+ case BT_8812A_1ANT_COEX_ALGO_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR).\n"));
+ halbtc8812a1ant_ActionPanEdr(btcoexist);
+ break;
+ case BT_8812A_1ANT_COEX_ALGO_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HS mode.\n"));
+ halbtc8812a1ant_ActionPanHs(btcoexist);
+ break;
+ case BT_8812A_1ANT_COEX_ALGO_PANEDR_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN+A2DP.\n"));
+ halbtc8812a1ant_ActionPanEdrA2dp(btcoexist);
+ break;
+ case BT_8812A_1ANT_COEX_ALGO_PANEDR_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR)+HID.\n"));
+ halbtc8812a1ant_ActionPanEdrHid(btcoexist);
+ break;
+ case BT_8812A_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP+PAN.\n"));
+ halbtc8812a1ant_ActionHidA2dpPanEdr(btcoexist);
+ break;
+ case BT_8812A_1ANT_COEX_ALGO_HID_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP.\n"));
+ halbtc8812a1ant_ActionHidA2dp(btcoexist);
+ break;
+ default:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = coexist All Off!!\n"));
+ halbtc8812a1ant_CoexAllOff(btcoexist);
+ break;
+ }
+ coex_dm->pre_algorithm = coex_dm->cur_algorithm;
+ }
+ }
+}
+
+void
+halbtc8812a1ant_RunCoexistMechanism(
+ PBTC_COEXIST btcoexist
+ )
+{
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+ BOOLEAN wifi_under5g=false, wifi_busy=false, wifi_connected=false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism()===>\n"));
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under5g);
+
+ if(wifi_under5g)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for 5G <===\n"));
+ return;
+ }
+
+ if(btcoexist->manual_control)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"));
+ return;
+ }
+
+ if(btcoexist->stop_coex_dm)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"));
+ return;
+ }
+
+ halbtc8812a1ant_RunSwCoexistMechanism(btcoexist);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+ if(btcoexist->bt_info.bt_disabled)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], bt is disabled!!!\n"));
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ if(wifi_busy)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ }
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else if(coex_sta->under_ips)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is under IPS !!!\n"));
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 0);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ halbtc8812a1ant_WifiParaAdjust(btcoexist, false);
+ }
+ else if(!wifi_connected)
+ {
+ BOOLEAN scan=false, link=false, roam=false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is non connected-idle !!!\n"));
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+ if(scan || link || roam)
+ halbtc8812a1ant_ActionWifiNotConnectedAssoAuthScan(btcoexist);
+ else
+ halbtc8812a1ant_ActionWifiNotConnected(btcoexist);
+ }
+ else // wifi LPS/Busy
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is NOT under IPS!!!\n"));
+ halbtc8812a1ant_WifiParaAdjust(btcoexist, true);
+ halbtc8812a1ant_ActionWifiConnected(btcoexist);
+ }
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism()<===\n"));
+}
+
+void
+halbtc8812a1ant_InitCoexDm(
+ PBTC_COEXIST btcoexist
+ )
+{
+ BOOLEAN wifi_connected=false;
+ // force to reset coex mechanism
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+ if(!wifi_connected) // non-connected scan
+ {
+ halbtc8812a1ant_ActionWifiNotConnected(btcoexist);
+ }
+ else // wifi is connected
+ {
+ halbtc8812a1ant_ActionWifiConnected(btcoexist);
+ }
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, FORCE_EXEC, 6);
+ halbtc8812a1ant_DecBtPwr(btcoexist, FORCE_EXEC, false);
+
+ // sw all off
+ halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false);
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+ halbtc8812a1ant_CoexTable(btcoexist, FORCE_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+}
+
+//============================================================
+// work around function start with wa_halbtc8812a1ant_
+//============================================================
+//============================================================
+// extern function start with EXhalbtc8812a1ant_
+//============================================================
+void
+EXhalbtc8812a1ant_InitHwConfig(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u4Byte u4_tmp=0;
+ u2Byte u2Tmp=0;
+ u1Byte u1_tmp=0;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], 1Ant Init HW Config!!\n"));
+
+ // backup rf 0x1e value
+ coex_dm->bt_rf0x1e_backup =
+ btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff);
+
+ //ant sw control to BT
+ btcoexist->btc_write_4byte(btcoexist, 0x900, 0x00000400);
+ btcoexist->btc_write_1byte(btcoexist, 0x76d, 0x1);
+ btcoexist->btc_write_1byte(btcoexist, 0xcb3, 0x77);
+ btcoexist->btc_write_1byte(btcoexist, 0xcb7, 0x40);
+
+ // 0x790[5:0]=0x5
+ u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
+ u1_tmp &= 0xc0;
+ u1_tmp |= 0x5;
+ btcoexist->btc_write_1byte(btcoexist, 0x790, u1_tmp);
+
+ // PTA parameter
+ btcoexist->btc_write_1byte(btcoexist, 0x6cc, 0x0);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c8, 0xffff);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c4, 0x55555555);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c0, 0x55555555);
+
+ // coex parameters
+ btcoexist->btc_write_1byte(btcoexist, 0x778, 0x1);
+
+ // enable counter statistics
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
+
+ // enable PTA
+ btcoexist->btc_write_1byte(btcoexist, 0x40, 0x20);
+
+ // bt clock related
+ u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x4);
+ u1_tmp |= BIT7;
+ btcoexist->btc_write_1byte(btcoexist, 0x4, u1_tmp);
+
+ // bt clock related
+ u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x7);
+ u1_tmp |= BIT1;
+ btcoexist->btc_write_1byte(btcoexist, 0x7, u1_tmp);
+}
+
+void
+EXhalbtc8812a1ant_InitCoexDm(
+ PBTC_COEXIST btcoexist
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Coex Mechanism Init!!\n"));
+
+ btcoexist->stop_coex_dm = false;
+
+ halbtc8812a1ant_InitCoexDm(btcoexist);
+}
+
+void
+EXhalbtc8812a1ant_DisplayCoexInfo(
+ PBTC_COEXIST btcoexist
+ )
+{
+ PBTC_BOARD_INFO board_info=&btcoexist->boardInfo;
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+ pu1Byte cli_buf=btcoexist->cli_buf;
+ u1Byte u1_tmp[4], i, bt_info_ext, psTdmaCase=0;
+ u4Byte u4_tmp[4];
+ BOOLEAN roam=false, scan=false, link=false, wifi_under5g=false;
+ BOOLEAN bt_hs_on=false, wifi_busy=false;
+ s4Byte wifi_rssi=0, bt_hs_rssi=0;
+ u4Byte wifi_bw, wifiTrafficDir;
+ u1Byte wifiDot11Chnl, wifiHsChnl;
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============");
+ CL_PRINTF(cli_buf);
+
+ if(btcoexist->manual_control)
+ {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n ============[Under Manual Control]============");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n ==========================================");
+ CL_PRINTF(cli_buf);
+ }
+ if(btcoexist->stop_coex_dm)
+ {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n ============[Coex is STOPPED]============");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n ==========================================");
+ CL_PRINTF(cli_buf);
+ }
+
+ if(!board_info->bBtExist)
+ {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+ CL_PRINTF(cli_buf);
+ return;
+ }
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", \
+ board_info->pgAntNum, board_info->btdmAntNum);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \
+ ((stack_info->bProfileNotified)? "Yes":"No"), stack_info->hciVersion);
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_FW_VER);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifiDot11Chnl);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifiHsChnl);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \
+ wifiDot11Chnl, wifiHsChnl, bt_hs_on);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \
+ coex_dm->wifi_chnl_info[0], coex_dm->wifi_chnl_info[1],
+ coex_dm->wifi_chnl_info[2]);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", \
+ wifi_rssi, bt_hs_rssi);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan", \
+ link, roam, scan);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under5g);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", "Wifi status", \
+ (wifi_under5g? "5G":"2.4G"),
+ ((BTC_WIFI_BW_LEGACY==wifi_bw)? "Legacy": (((BTC_WIFI_BW_HT40==wifi_bw)? "HT40":"HT20"))),
+ ((!wifi_busy)? "idle": ((BTC_WIFI_TRAFFIC_TX==wifiTrafficDir)? "uplink":"downlink")));
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", \
+ ((coex_sta->c2h_bt_inquiry_page)?("inquiry/page scan"):((BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)? "non-connected idle":
+ ( (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)? "connected-idle":"busy"))),
+ coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
+ CL_PRINTF(cli_buf);
+
+ if(stack_info->bProfileNotified)
+ {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \
+ stack_info->sco_exist, stack_info->hid_exist, stack_info->pan_exist, stack_info->a2dp_exist);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+ }
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Info A2DP rate", \
+ (bt_info_ext&BIT0)? "Basic rate":"EDR rate");
+ CL_PRINTF(cli_buf);
+
+ for(i=0; i<BT_INFO_SRC_8812A_1ANT_MAX; i++)
+ {
+ if(coex_sta->bt_info_c2h_cnt[i])
+ {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8812a1Ant[i], \
+ coex_sta->bt_info_c2h[i][0], coex_sta->bt_info_c2h[i][1],
+ coex_sta->bt_info_c2h[i][2], coex_sta->bt_info_c2h[i][3],
+ coex_sta->bt_info_c2h[i][4], coex_sta->bt_info_c2h[i][5],
+ coex_sta->bt_info_c2h[i][6], coex_sta->bt_info_c2h_cnt[i]);
+ CL_PRINTF(cli_buf);
+ }
+ }
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s, (0x%x/0x%x)", "PS state, IPS/LPS, (lps/rpwm)", \
+ ((coex_sta->under_ips? "IPS ON":"IPS OFF")),
+ ((coex_sta->under_lps? "LPS ON":"LPS OFF")),
+ btcoexist->bt_info.lps1Ant,
+ btcoexist->bt_info.rpwm1Ant);
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+ if(!btcoexist->manual_control)
+ {
+ // Sw mechanism
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Sw mechanism]============");
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d/ %d ", "SM1[ShRf/ LpRA/ LimDig/ btLna]", \
+ coex_dm->cur_rf_rx_lpf_shrink, coex_dm->cur_low_penalty_ra, coex_dm->limited_dig, coex_dm->bCurBtLnaConstrain);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ", "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", \
+ coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+ CL_PRINTF(cli_buf);
+
+ // Fw mechanism
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw mechanism]============");
+ CL_PRINTF(cli_buf);
+
+ psTdmaCase = coex_dm->cur_ps_tdma;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d", "PS TDMA", \
+ coex_dm->ps_tdma_para[0], coex_dm->ps_tdma_para[1],
+ coex_dm->ps_tdma_para[2], coex_dm->ps_tdma_para[3],
+ coex_dm->ps_tdma_para[4], psTdmaCase);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "Latest error condition(should be 0)", \
+ coex_dm->error_condition);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct", \
+ coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act);
+ CL_PRINTF(cli_buf);
+ }
+
+ // Hw setting
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============");
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", \
+ coex_dm->bt_rf0x1e_backup);
+ CL_PRINTF(cli_buf);
+
+ u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x778", \
+ u1_tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x92c);
+ u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x930);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x92c/ 0x930", \
+ (u1_tmp[0]), u4_tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
+ u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x4f);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x40/ 0x4f", \
+ u1_tmp[0], u1_tmp[1]);
+ CL_PRINTF(cli_buf);
+
+ u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
+ u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \
+ u4_tmp[0], u1_tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", \
+ u4_tmp[0]);
+ CL_PRINTF(cli_buf);
+
+#if 0
+ u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48);
+ u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xf4c);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0xf48/ 0xf4c (FA cnt)", \
+ u4_tmp[0], u4_tmp[1]);
+ CL_PRINTF(cli_buf);
+#endif
+
+ u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
+ u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
+ u4_tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
+ u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \
+ u4_tmp[0], u4_tmp[1], u4_tmp[2], u1_tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x770(hp rx[31:16]/tx[15:0])", \
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x774(lp rx[31:16]/tx[15:0])", \
+ coex_sta->low_priority_rx, coex_sta->low_priority_tx);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+void
+EXhalbtc8812a1ant_IpsNotify(
+ PBTC_COEXIST btcoexist,
+ u1Byte type
+ )
+{
+ u4Byte u4_tmp=0;
+
+ if(btcoexist->manual_control || btcoexist->stop_coex_dm)
+ return;
+
+ if(BTC_IPS_ENTER == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n"));
+ coex_sta->under_ips = true;
+
+ // 0x4c[23]=1
+ u4_tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u4_tmp |= BIT23;
+ btcoexist->btc_write_4byte(btcoexist, 0x4c, u4_tmp);
+
+ halbtc8812a1ant_CoexAllOff(btcoexist);
+ }
+ else if(BTC_IPS_LEAVE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n"));
+ coex_sta->under_ips = false;
+ //halbtc8812a1ant_InitCoexDm(btcoexist);
+ }
+}
+
+void
+EXhalbtc8812a1ant_LpsNotify(
+ PBTC_COEXIST btcoexist,
+ u1Byte type
+ )
+{
+ if(btcoexist->manual_control || btcoexist->stop_coex_dm)
+ return;
+
+ if(BTC_LPS_ENABLE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n"));
+ coex_sta->under_lps = true;
+ }
+ else if(BTC_IPS_LEAVE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n"));
+ coex_sta->under_lps = false;
+ }
+}
+
+void
+EXhalbtc8812a1ant_ScanNotify(
+ PBTC_COEXIST btcoexist,
+ u1Byte type
+ )
+{
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+ BOOLEAN wifi_connected=false;
+
+ if(btcoexist->manual_control ||btcoexist->stop_coex_dm)
+ return;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+ if(BTC_SCAN_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n"));
+ if(!wifi_connected) // non-connected scan
+ {
+ //set 0x550[3]=1 before PsTdma
+ //halbtc8812a1ant_Reg0x550Bit3(btcoexist, true);
+ halbtc8812a1ant_ActionWifiNotConnectedAssoAuthScan(btcoexist);
+ }
+ else // wifi is connected
+ {
+ halbtc8812a1ant_ActionWifiConnectedScan(btcoexist);
+ }
+ }
+ else if(BTC_SCAN_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n"));
+ if(!wifi_connected) // non-connected scan
+ {
+ //halbtc8812a1ant_Reg0x550Bit3(btcoexist, false);
+ halbtc8812a1ant_ActionWifiNotConnected(btcoexist);
+ }
+ else
+ {
+ halbtc8812a1ant_ActionWifiConnected(btcoexist);
+ }
+ }
+}
+
+void
+EXhalbtc8812a1ant_ConnectNotify(
+ PBTC_COEXIST btcoexist,
+ u1Byte type
+ )
+{
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+ BOOLEAN wifi_connected=false;
+
+ if(btcoexist->manual_control ||btcoexist->stop_coex_dm)
+ return;
+
+ if(BTC_ASSOCIATE_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n"));
+ if(btcoexist->bt_info.bt_disabled)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ }
+ else
+ {
+ halbtc8812a1ant_ActionWifiNotConnectedAssoAuthScan(btcoexist);
+ }
+ }
+ else if(BTC_ASSOCIATE_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n"));
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+ if(!wifi_connected) // non-connected scan
+ {
+ //halbtc8812a1ant_Reg0x550Bit3(btcoexist, false);
+ halbtc8812a1ant_ActionWifiNotConnected(btcoexist);
+ }
+ else
+ {
+ halbtc8812a1ant_ActionWifiConnected(btcoexist);
+ }
+ }
+}
+
+void
+EXhalbtc8812a1ant_MediaStatusNotify(
+ PBTC_COEXIST btcoexist,
+ u1Byte type
+ )
+{
+ u1Byte dataLen=5;
+ u1Byte buf[6] = {0};
+ u1Byte h2c_parameter[3] ={0};
+ BOOLEAN wifi_under5g=false;
+ u4Byte wifi_bw;
+ u1Byte wifi_central_chnl;
+
+ if(btcoexist->manual_control ||btcoexist->stop_coex_dm)
+ return;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under5g);
+
+ // only 2.4G we need to inform bt the chnl mask
+ if(!wifi_under5g)
+ {
+ if(BTC_MEDIA_CONNECT == type)
+ {
+ h2c_parameter[0] = 0x1;
+ }
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifi_central_chnl);
+ h2c_parameter[1] = wifi_central_chnl;
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ h2c_parameter[2] = 0x30;
+ else
+ h2c_parameter[2] = 0x20;
+ }
+
+ coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
+ coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
+ coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
+
+ buf[0] = dataLen;
+ buf[1] = 0x5; // OP_Code
+ buf[2] = 0x3; // OP_Code_Length
+ buf[3] = h2c_parameter[0]; // OP_Code_Content
+ buf[4] = h2c_parameter[1];
+ buf[5] = h2c_parameter[2];
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]);
+}
+
+void
+EXhalbtc8812a1ant_SpecialPacketNotify(
+ PBTC_COEXIST btcoexist,
+ u1Byte type
+ )
+{
+ BOOLEAN bSecurityLink=false;
+
+ if(btcoexist->manual_control ||btcoexist->stop_coex_dm)
+ return;
+
+ //if(type == BTC_PACKET_DHCP)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], special Packet(%d) notify\n", type));
+ if(btcoexist->bt_info.bt_disabled)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ }
+ else
+ {
+ halbtc8812a1ant_ActionWifiConnectedSpecialPacket(btcoexist);
+ }
+ }
+}
+
+void
+EXhalbtc8812a1ant_BtInfoNotify(
+ PBTC_COEXIST btcoexist,
+ pu1Byte tmp_buf,
+ u1Byte length
+ )
+{
+ u1Byte bt_info=0;
+ u1Byte i, rsp_source=0;
+ static u4Byte set_bt_lna_cnt=0, set_bt_psd_mode=0;
+ BOOLEAN bt_busy=false, limited_dig=false;
+ BOOLEAN wifi_connected=false;
+ BOOLEAN bRejApAggPkt=false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify()===>\n"));
+
+
+ rsp_source = tmp_buf[0]&0xf;
+ if(rsp_source >= BT_INFO_SRC_8812A_1ANT_MAX)
+ rsp_source = BT_INFO_SRC_8812A_1ANT_WIFI_FW;
+ coex_sta->bt_info_c2h_cnt[rsp_source]++;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Bt info[%d], length=%d, hex data=[", rsp_source, length));
+ for(i=0; i<length; i++)
+ {
+ coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
+ if(i == 1)
+ bt_info = tmp_buf[i];
+ if(i == length-1)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%2x]\n", tmp_buf[i]));
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%2x, ", tmp_buf[i]));
+ }
+ }
+
+ if(btcoexist->manual_control)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n"));
+ return;
+ }
+ if(btcoexist->stop_coex_dm)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for Coex STOPPED!!<===\n"));
+ return;
+ }
+
+ if(BT_INFO_SRC_8812A_1ANT_WIFI_FW != rsp_source)
+ {
+ coex_sta->bt_retry_cnt =
+ coex_sta->bt_info_c2h[rsp_source][2];
+
+ coex_sta->bt_rssi =
+ coex_sta->bt_info_c2h[rsp_source][3]*2+10;
+
+ coex_sta->bt_info_ext =
+ coex_sta->bt_info_c2h[rsp_source][4];
+
+ // Here we need to resend some wifi info to BT
+ // because bt is reset and loss of the info.
+ if( (coex_sta->bt_info_ext & BIT1) )
+ {
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+ if(wifi_connected)
+ {
+ EXhalbtc8812a1ant_MediaStatusNotify(btcoexist, BTC_MEDIA_CONNECT);
+ }
+ else
+ {
+ EXhalbtc8812a1ant_MediaStatusNotify(btcoexist, BTC_MEDIA_DISCONNECT);
+ }
+
+ set_bt_psd_mode = 0;
+ }
+
+ // test-chip bt patch doesn't support, temporary remove.
+ // need to add back when mp-chip. 12/20/2012
+#if 0
+ if(set_bt_psd_mode <= 3)
+ {
+ halbtc8812a1ant_SetBtPsdMode(btcoexist, FORCE_EXEC, 0xd);
+ set_bt_psd_mode++;
+ }
+
+ if(coex_dm->bCurBtLnaConstrain)
+ {
+ if( (coex_sta->bt_info_ext & BIT2) )
+ {
+ }
+ else
+ {
+ if(set_bt_lna_cnt <= 3)
+ {
+ halbtc8812a1ant_SetBtLnaConstrain(btcoexist, FORCE_EXEC, true);
+ set_bt_lna_cnt++;
+ }
+ }
+ }
+ else
+ {
+ set_bt_lna_cnt = 0;
+ }
+#endif
+ // test-chip bt patch only rsp the status for BT_RSP,
+ // so temporary we consider the following only under BT_RSP
+ if(BT_INFO_SRC_8812A_1ANT_BT_RSP == rsp_source)
+ {
+ if( (coex_sta->bt_info_ext & BIT3) )
+ {
+ #if 0// temp disable because bt patch report the wrong value.
+ halbtc8812a1ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, false);
+ #endif
+ }
+ else
+ {
+ // BT already NOT ignore Wlan active, do nothing here.
+ }
+
+ if( (coex_sta->bt_info_ext & BIT4) )
+ {
+ // BT auto report already enabled, do nothing
+ }
+ else
+ {
+ halbtc8812a1ant_BtAutoReport(btcoexist, FORCE_EXEC, true);
+ }
+ }
+ }
+
+ // check BIT2 first ==> check if bt is under inquiry or page scan
+ if(bt_info & BT_INFO_8812A_1ANT_B_INQ_PAGE)
+ {
+ coex_sta->c2h_bt_inquiry_page = true;
+ coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_INQ_PAGE;
+ }
+ else
+ {
+ coex_sta->c2h_bt_inquiry_page = false;
+ if(!(bt_info&BT_INFO_8812A_1ANT_B_CONNECTION))
+ {
+ coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt non-connected idle!!!\n"));
+ }
+ else if(bt_info == BT_INFO_8812A_1ANT_B_CONNECTION) // connection exists but no busy
+ {
+ coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt connected-idle!!!\n"));
+ }
+ else if((bt_info&BT_INFO_8812A_1ANT_B_SCO_ESCO) ||
+ (bt_info&BT_INFO_8812A_1ANT_B_SCO_BUSY))
+ {
+ coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_SCO_BUSY;
+ bRejApAggPkt = true;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt sco busy!!!\n"));
+ }
+ else if(bt_info&BT_INFO_8812A_1ANT_B_ACL_BUSY)
+ {
+ if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
+ coex_dm->reset_tdma_adjust = true;
+ coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_ACL_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt acl busy!!!\n"));
+ }
+#if 0
+ else if(bt_info&BT_INFO_8812A_1ANT_B_SCO_ESCO)
+ {
+ coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt acl/sco busy!!!\n"));
+ }
+#endif
+ else
+ {
+ //DbgPrint("error, undefined bt_info=0x%x\n", bt_info);
+ coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_MAX;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt non-defined state!!!\n"));
+ }
+
+ // send delete BA to disable aggregation
+ //btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT, &bRejApAggPkt);
+ }
+
+ if( (BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) )
+ {
+ bt_busy = true;
+ }
+ else
+ {
+ bt_busy = false;
+ }
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
+
+ if(bt_busy)
+ {
+ limited_dig = true;
+ }
+ else
+ {
+ limited_dig = false;
+ }
+ coex_dm->limited_dig = limited_dig;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+
+ halbtc8812a1ant_RunCoexistMechanism(btcoexist);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify()<===\n"));
+}
+
+void
+EXhalbtc8812a1ant_StackOperationNotify(
+ PBTC_COEXIST btcoexist,
+ u1Byte type
+ )
+{
+ if(BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair start notify\n"));
+ }
+ else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair finish notify\n"));
+ }
+}
+
+void
+EXhalbtc8812a1ant_HaltNotify(
+ PBTC_COEXIST btcoexist
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Halt notify\n"));
+
+ halbtc8812a1ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true);
+ halbtc8812a1ant_PsTdma(btcoexist, FORCE_EXEC, false, 0);
+ btcoexist->btc_write_1byte(btcoexist, 0x4f, 0xf);
+ halbtc8812a1ant_WifiParaAdjust(btcoexist, false);
+}
+
+void
+EXhalbtc8812a1ant_PnpNotify(
+ PBTC_COEXIST btcoexist,
+ u1Byte pnpState
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify\n"));
+
+ if(BTC_WIFI_PNP_SLEEP == pnpState)
+ {
+ btcoexist->stop_coex_dm = true;
+ halbtc8812a1ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true);
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ }
+ else if(BTC_WIFI_PNP_WAKE_UP == pnpState)
+ {
+
+ }
+}
+
+void
+EXhalbtc8812a1ant_Periodical(
+ PBTC_COEXIST btcoexist
+ )
+{
+ BOOLEAN wifi_under5g=false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Periodical()===>\n"));
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], 1Ant Periodical!!\n"));
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under5g);
+
+ if(wifi_under5g)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Periodical(), return for 5G<===\n"));
+ halbtc8812a1ant_CoexAllOff(btcoexist);
+ return;
+ }
+
+ halbtc8812a1ant_QueryBtInfo(btcoexist);
+ halbtc8812a1ant_MonitorBtCtr(btcoexist);
+ halbtc8812a1ant_MonitorBtEnableDisable(btcoexist);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Periodical()<===\n"));
+}
+
+void
+EXhalbtc8812a1ant_DbgControl(
+ PBTC_COEXIST btcoexist,
+ u1Byte opCode,
+ u1Byte opLen,
+ pu1Byte pData
+ )
+{
+ switch(opCode)
+ {
+ case BTC_DBG_SET_COEX_NORMAL:
+ btcoexist->manual_control = false;
+ halbtc8812a1ant_InitCoexDm(btcoexist);
+ break;
+ case BTC_DBG_SET_COEX_WIFI_ONLY:
+ btcoexist->manual_control = true;
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ break;
+ case BTC_DBG_SET_COEX_BT_ONLY:
+ // todo
+ break;
+ default:
+ break;
+ }
+}
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.h b/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.h
new file mode 100644
index 000000000000..37bdab5ae9f1
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.h
@@ -0,0 +1,205 @@
+//===========================================
+// The following is for 8812A_1ANT BT Co-exist definition
+//===========================================
+#define BT_INFO_8812A_1ANT_B_FTP BIT7
+#define BT_INFO_8812A_1ANT_B_A2DP BIT6
+#define BT_INFO_8812A_1ANT_B_HID BIT5
+#define BT_INFO_8812A_1ANT_B_SCO_BUSY BIT4
+#define BT_INFO_8812A_1ANT_B_ACL_BUSY BIT3
+#define BT_INFO_8812A_1ANT_B_INQ_PAGE BIT2
+#define BT_INFO_8812A_1ANT_B_SCO_ESCO BIT1
+#define BT_INFO_8812A_1ANT_B_CONNECTION BIT0
+
+#define BT_INFO_8812A_1ANT_A2DP_BASIC_RATE(_BT_INFO_EXT_) \
+ (((_BT_INFO_EXT_&BIT0))? true:false)
+
+#define BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT 2
+
+#define
+#define OUT
+
+typedef enum _BT_INFO_SRC_8812A_1ANT{
+ BT_INFO_SRC_8812A_1ANT_WIFI_FW = 0x0,
+ BT_INFO_SRC_8812A_1ANT_BT_RSP = 0x1,
+ BT_INFO_SRC_8812A_1ANT_BT_ACTIVE_SEND = 0x2,
+ BT_INFO_SRC_8812A_1ANT_MAX
+}BT_INFO_SRC_8812A_1ANT,*PBT_INFO_SRC_8812A_1ANT;
+
+typedef enum _BT_8812A_1ANT_BT_STATUS{
+ BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8812A_1ANT_BT_STATUS_INQ_PAGE = 0x2,
+ BT_8812A_1ANT_BT_STATUS_ACL_BUSY = 0x3,
+ BT_8812A_1ANT_BT_STATUS_SCO_BUSY = 0x4,
+ BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
+ BT_8812A_1ANT_BT_STATUS_MAX
+}BT_8812A_1ANT_BT_STATUS,*PBT_8812A_1ANT_BT_STATUS;
+
+typedef enum _BT_8812A_1ANT_COEX_ALGO{
+ BT_8812A_1ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_8812A_1ANT_COEX_ALGO_SCO = 0x1,
+ BT_8812A_1ANT_COEX_ALGO_HID = 0x2,
+ BT_8812A_1ANT_COEX_ALGO_A2DP = 0x3,
+ BT_8812A_1ANT_COEX_ALGO_A2DP_PANHS = 0x4,
+ BT_8812A_1ANT_COEX_ALGO_PANEDR = 0x5,
+ BT_8812A_1ANT_COEX_ALGO_PANHS = 0x6,
+ BT_8812A_1ANT_COEX_ALGO_PANEDR_A2DP = 0x7,
+ BT_8812A_1ANT_COEX_ALGO_PANEDR_HID = 0x8,
+ BT_8812A_1ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9,
+ BT_8812A_1ANT_COEX_ALGO_HID_A2DP = 0xa,
+ BT_8812A_1ANT_COEX_ALGO_MAX = 0xb,
+}BT_8812A_1ANT_COEX_ALGO,*PBT_8812A_1ANT_COEX_ALGO;
+
+typedef struct _COEX_DM_8812A_1ANT{
+ // fw mechanism
+ bool pre_dec_bt_pwr;
+ bool cur_dec_bt_pwr;
+ bool bPreBtLnaConstrain;
+ bool bCurBtLnaConstrain;
+ u8 bPreBtPsdMode;
+ u8 bCurBtPsdMode;
+ u8 pre_fw_dac_swing_lvl;
+ u8 cur_fw_dac_swing_lvl;
+ bool cur_ignore_wlan_act;
+ bool pre_ignore_wlan_act;
+ u8 pre_ps_tdma;
+ u8 cur_ps_tdma;
+ u8 ps_tdma_para[5];
+ u8 ps_tdma_du_adj_type;
+ bool reset_tdma_adjust;
+ bool pre_ps_tdma_on;
+ bool cur_ps_tdma_on;
+ bool pre_bt_auto_report;
+ bool cur_bt_auto_report;
+ u8 pre_lps;
+ u8 cur_lps;
+ u8 pre_rpwm;
+ u8 cur_rpwm;
+
+ // sw mechanism
+ bool pre_rf_rx_lpf_shrink;
+ bool cur_rf_rx_lpf_shrink;
+ u32 bt_rf0x1e_backup;
+ bool pre_low_penalty_ra;
+ bool cur_low_penalty_ra;
+ bool pre_dac_swing_on;
+ u32 pre_dac_swing_lvl;
+ bool cur_dac_swing_on;
+ u32 cur_dac_swing_lvl;
+ bool pre_adc_back_off;
+ bool cur_adc_back_off;
+ bool pre_agc_table_en;
+ bool cur_agc_table_en;
+ u32 pre_val0x6c0;
+ u32 cur_val0x6c0;
+ u32 pre_val0x6c4;
+ u32 cur_val0x6c4;
+ u32 pre_val0x6c8;
+ u32 cur_val0x6c8;
+ u8 pre_val0x6cc;
+ u8 cur_val0x6cc;
+ bool limited_dig;
+
+ // algorithm related
+ u8 pre_algorithm;
+ u8 cur_algorithm;
+ u8 bt_status;
+ u8 wifi_chnl_info[3];
+
+ u8 error_condition;
+} COEX_DM_8812A_1ANT, *PCOEX_DM_8812A_1ANT;
+
+typedef struct _COEX_STA_8812A_1ANT{
+ bool under_lps;
+ bool under_ips;
+ u32 high_priority_tx;
+ u32 high_priority_rx;
+ u32 low_priority_tx;
+ u32 low_priority_rx;
+ u8 bt_rssi;
+ u8 pre_bt_rssi_state;
+ u8 pre_wifi_rssi_state[4];
+ bool c2h_bt_info_req_sent;
+ u8 bt_info_c2h[BT_INFO_SRC_8812A_1ANT_MAX][10];
+ u32 bt_info_c2h_cnt[BT_INFO_SRC_8812A_1ANT_MAX];
+ bool c2h_bt_inquiry_page;
+ u8 bt_retry_cnt;
+ u8 bt_info_ext;
+}COEX_STA_8812A_1ANT, *PCOEX_STA_8812A_1ANT;
+
+//===========================================
+// The following is interface which will notify coex module.
+//===========================================
+void
+EXhalbtc8812a1ant_InitHwConfig(
+ PBTC_COEXIST btcoexist
+ );
+void
+EXhalbtc8812a1ant_InitCoexDm(
+ PBTC_COEXIST btcoexist
+ );
+void
+EXhalbtc8812a1ant_IpsNotify(
+ PBTC_COEXIST btcoexist,
+ u8 type
+ );
+void
+EXhalbtc8812a1ant_LpsNotify(
+ PBTC_COEXIST btcoexist,
+ u8 type
+ );
+void
+EXhalbtc8812a1ant_ScanNotify(
+ PBTC_COEXIST btcoexist,
+ u8 type
+ );
+void
+EXhalbtc8812a1ant_ConnectNotify(
+ PBTC_COEXIST btcoexist,
+ u8 type
+ );
+void
+EXhalbtc8812a1ant_MediaStatusNotify(
+ PBTC_COEXIST btcoexist,
+ u8 type
+ );
+void
+EXhalbtc8812a1ant_SpecialPacketNotify(
+ PBTC_COEXIST btcoexist,
+ u8 type
+ );
+void
+EXhalbtc8812a1ant_BtInfoNotify(
+ PBTC_COEXIST btcoexist,
+ u8 *tmp_buf,
+ u8 length
+ );
+void
+EXhalbtc8812a1ant_StackOperationNotify(
+ PBTC_COEXIST btcoexist,
+ u8 type
+ );
+void
+EXhalbtc8812a1ant_HaltNotify(
+ PBTC_COEXIST btcoexist
+ );
+void
+EXhalbtc8812a1ant_PnpNotify(
+ PBTC_COEXIST btcoexist,
+ u8 pnpState
+ );
+void
+EXhalbtc8812a1ant_Periodical(
+ PBTC_COEXIST btcoexist
+ );
+void
+EXhalbtc8812a1ant_DisplayCoexInfo(
+ PBTC_COEXIST btcoexist
+ );
+void
+EXhalbtc8812a1ant_DbgControl(
+ PBTC_COEXIST btcoexist,
+ u8 opCode,
+ u8 opLen,
+ u8 *pData
+ );
diff --git a/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.c b/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.c
new file mode 100644
index 000000000000..e619923ef0ab
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.c
@@ -0,0 +1,1614 @@
+//============================================================
+// Description:
+//
+// This file is for RTL8723A Co-exist mechanism
+//
+// History
+// 2012/08/22 Cosa first check in.
+// 2012/11/14 Cosa Revise for 8723A 1Ant out sourcing.
+//
+//============================================================
+
+//============================================================
+// include files
+//============================================================
+#include "Mp_Precomp.h"
+#if(BT_30_SUPPORT == 1)
+//============================================================
+// Global variables, these are static variables
+//============================================================
+static COEX_DM_8723A_1ANT GLCoexDm8723a1Ant;
+static PCOEX_DM_8723A_1ANT pCoexDm=&GLCoexDm8723a1Ant;
+static COEX_STA_8723A_1ANT GLCoexSta8723a1Ant;
+static PCOEX_STA_8723A_1ANT pCoexSta=&GLCoexSta8723a1Ant;
+
+const char *const GLBtInfoSrc8723a1Ant[]={
+ "BT Info[wifi fw]",
+ "BT Info[bt rsp]",
+ "BT Info[bt auto report]",
+};
+
+//============================================================
+// local function proto type if needed
+//============================================================
+//============================================================
+// local function start with halbtc8723a1ant_
+//============================================================
+VOID
+halbtc8723a1ant_Reg0x550Bit3(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bSet
+ )
+{
+ u1Byte u1tmp=0;
+
+ u1tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x550);
+ if(bSet)
+ {
+ u1tmp |= BIT3;
+ }
+ else
+ {
+ u1tmp &= ~BIT3;
+ }
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x550, u1tmp);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], set 0x550[3]=%d\n", (bSet? 1:0)));
+}
+
+VOID
+halbtc8723a1ant_NotifyFwScan(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte scanType
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ if(BTC_SCAN_START == scanType)
+ H2C_Parameter[0] = 0x1;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Notify FW for wifi scan, write 0x3b=0x%x\n",
+ H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x3b, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8723a1ant_QueryBtInfo(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ pCoexSta->bC2hBtInfoReqSent = true;
+
+ H2C_Parameter[0] |= BIT0; // trigger
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Query Bt Info, FW write 0x38=0x%x\n",
+ H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x38, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8723a1ant_SetSwRfRxLpfCorner(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bRxRfShrinkOn
+ )
+{
+ if(bRxRfShrinkOn)
+ {
+ //Shrink RF Rx LPF corner
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Shrink RF Rx LPF corner!!\n"));
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, 0xf0ff7);
+ }
+ else
+ {
+ //Resume RF Rx LPF corner
+ // After initialized, we can use pCoexDm->btRf0x1eBackup
+ if(pBtCoexist->initilized)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Resume RF Rx LPF corner!!\n"));
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, pCoexDm->btRf0x1eBackup);
+ }
+ }
+}
+
+VOID
+halbtc8723a1ant_RfShrink(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bRxRfShrinkOn
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (bForceExec? "force to":""), ((bRxRfShrinkOn)? "ON":"OFF")));
+ pCoexDm->bCurRfRxLpfShrink = bRxRfShrinkOn;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n",
+ pCoexDm->bPreRfRxLpfShrink, pCoexDm->bCurRfRxLpfShrink));
+
+ if(pCoexDm->bPreRfRxLpfShrink == pCoexDm->bCurRfRxLpfShrink)
+ return;
+ }
+ halbtc8723a1ant_SetSwRfRxLpfCorner(pBtCoexist, pCoexDm->bCurRfRxLpfShrink);
+
+ pCoexDm->bPreRfRxLpfShrink = pCoexDm->bCurRfRxLpfShrink;
+}
+
+VOID
+halbtc8723a1ant_SetSwPenaltyTxRateAdaptive(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bLowPenaltyRa
+ )
+{
+ u1Byte tmpU1;
+
+ tmpU1 = pBtCoexist->btc_read_1byte(pBtCoexist, 0x4fd);
+ tmpU1 |= BIT0;
+ if(bLowPenaltyRa)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set low penalty!!\n"));
+ tmpU1 &= ~BIT2;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set normal!!\n"));
+ tmpU1 |= BIT2;
+ }
+
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x4fd, tmpU1);
+}
+
+VOID
+halbtc8723a1ant_LowPenaltyRa(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bLowPenaltyRa
+ )
+{
+ return;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (bForceExec? "force to":""), ((bLowPenaltyRa)? "ON":"OFF")));
+ pCoexDm->bCurLowPenaltyRa = bLowPenaltyRa;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
+ pCoexDm->bPreLowPenaltyRa, pCoexDm->bCurLowPenaltyRa));
+
+ if(pCoexDm->bPreLowPenaltyRa == pCoexDm->bCurLowPenaltyRa)
+ return;
+ }
+ halbtc8723a1ant_SetSwPenaltyTxRateAdaptive(pBtCoexist, pCoexDm->bCurLowPenaltyRa);
+
+ pCoexDm->bPreLowPenaltyRa = pCoexDm->bCurLowPenaltyRa;
+}
+
+VOID
+halbtc8723a1ant_SetCoexTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u4Byte val0x6c0,
+ IN u4Byte val0x6c8,
+ IN u1Byte val0x6cc
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c0, val0x6c0);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c8, val0x6c8);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc));
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x6cc, val0x6cc);
+}
+
+VOID
+halbtc8723a1ant_CoexTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u4Byte val0x6c0,
+ IN u4Byte val0x6c8,
+ IN u1Byte val0x6cc
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ (bForceExec? "force to":""), val0x6c0, val0x6c8, val0x6cc));
+ pCoexDm->curVal0x6c0 = val0x6c0;
+ pCoexDm->curVal0x6c8 = val0x6c8;
+ pCoexDm->curVal0x6cc = val0x6cc;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], preVal0x6c0=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
+ pCoexDm->preVal0x6c0, pCoexDm->preVal0x6c8, pCoexDm->preVal0x6cc));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], curVal0x6c0=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
+ pCoexDm->curVal0x6c0, pCoexDm->curVal0x6c8, pCoexDm->curVal0x6cc));
+
+ if( (pCoexDm->preVal0x6c0 == pCoexDm->curVal0x6c0) &&
+ (pCoexDm->preVal0x6c8 == pCoexDm->curVal0x6c8) &&
+ (pCoexDm->preVal0x6cc == pCoexDm->curVal0x6cc) )
+ return;
+ }
+ halbtc8723a1ant_SetCoexTable(pBtCoexist, val0x6c0, val0x6c8, val0x6cc);
+
+ pCoexDm->preVal0x6c0 = pCoexDm->curVal0x6c0;
+ pCoexDm->preVal0x6c8 = pCoexDm->curVal0x6c8;
+ pCoexDm->preVal0x6cc = pCoexDm->curVal0x6cc;
+}
+
+VOID
+halbtc8723a1ant_SetFwIgnoreWlanAct(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bEnable
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ if(bEnable)
+ {
+ H2C_Parameter[0] |= BIT0; // function enable
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x25=0x%x\n",
+ H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x25, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8723a1ant_IgnoreWlanAct(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bEnable
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn Ignore WlanAct %s\n",
+ (bForceExec? "force to":""), (bEnable? "ON":"OFF")));
+ pCoexDm->bCurIgnoreWlanAct = bEnable;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+ pCoexDm->bPreIgnoreWlanAct, pCoexDm->bCurIgnoreWlanAct));
+
+ if(pCoexDm->bPreIgnoreWlanAct == pCoexDm->bCurIgnoreWlanAct)
+ return;
+ }
+ halbtc8723a1ant_SetFwIgnoreWlanAct(pBtCoexist, bEnable);
+
+ pCoexDm->bPreIgnoreWlanAct = pCoexDm->bCurIgnoreWlanAct;
+}
+
+VOID
+halbtc8723a1ant_SetFwPstdma(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type,
+ IN u1Byte byte1,
+ IN u1Byte byte2,
+ IN u1Byte byte3,
+ IN u1Byte byte4,
+ IN u1Byte byte5
+ )
+{
+ u1Byte H2C_Parameter[5] ={0};
+ u1Byte realByte1=byte1, realByte5=byte5;
+ BOOLEAN bApEnable=FALSE;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, &bApEnable);
+
+ // byte1[1:0] != 0 means enable pstdma
+ // for 2Ant bt coexist, if byte1 != 0 means enable pstdma
+ if(byte1)
+ {
+ if(bApEnable)
+ {
+ if(type != 5 && type != 12)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], FW for 1Ant AP mode\n"));
+ realByte1 &= ~BIT4;
+ realByte1 |= BIT5;
+
+ realByte5 |= BIT5;
+ realByte5 &= ~BIT6;
+ }
+ }
+ }
+ H2C_Parameter[0] = realByte1;
+ H2C_Parameter[1] = byte2;
+ H2C_Parameter[2] = byte3;
+ H2C_Parameter[3] = byte4;
+ H2C_Parameter[4] = realByte5;
+
+ pCoexDm->psTdmaPara[0] = realByte1;
+ pCoexDm->psTdmaPara[1] = byte2;
+ pCoexDm->psTdmaPara[2] = byte3;
+ pCoexDm->psTdmaPara[3] = byte4;
+ pCoexDm->psTdmaPara[4] = realByte5;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x3a(5bytes)=0x%x%08x\n",
+ H2C_Parameter[0],
+ H2C_Parameter[1]<<24|H2C_Parameter[2]<<16|H2C_Parameter[3]<<8|H2C_Parameter[4]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x3a, 5, H2C_Parameter);
+}
+
+VOID
+halbtc8723a1ant_PsTdma(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bTurnOn,
+ IN u1Byte type
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (bForceExec? "force to":""), (bTurnOn? "ON":"OFF"), type));
+ pCoexDm->bCurPsTdmaOn = bTurnOn;
+ pCoexDm->curPsTdma = type;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ pCoexDm->bPrePsTdmaOn, pCoexDm->bCurPsTdmaOn));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ pCoexDm->prePsTdma, pCoexDm->curPsTdma));
+
+ if( (pCoexDm->bPrePsTdmaOn == pCoexDm->bCurPsTdmaOn) &&
+ (pCoexDm->prePsTdma == pCoexDm->curPsTdma) )
+ return;
+ }
+ if(pCoexDm->bCurPsTdmaOn)
+ {
+ switch(pCoexDm->curPsTdma)
+ {
+ case 1:
+ default:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x1a, 0x1a, 0x0, 0x40);
+ break;
+ case 2:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x12, 0x12, 0x0, 0x40);
+ break;
+ case 3:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x3f, 0x3, 0x10, 0x40);
+ break;
+ case 4:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x15, 0x3, 0x10, 0x0);
+ break;
+ case 5:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0xa9, 0x15, 0x3, 0x35, 0xc0);
+ break;
+
+ case 8:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x25, 0x3, 0x10, 0x0);
+ break;
+ case 9:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0xa, 0xa, 0x0, 0x40);
+ break;
+ case 10:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0xa, 0xa, 0x0, 0x40);
+ break;
+ case 11:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x5, 0x5, 0x0, 0x40);
+ break;
+ case 12:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0xa9, 0xa, 0x3, 0x15, 0xc0);
+ break;
+
+ case 18:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x25, 0x3, 0x10, 0x0);
+ break;
+
+ case 20:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x2a, 0x2a, 0x0, 0x0);
+ break;
+ case 21:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x20, 0x3, 0x10, 0x40);
+ break;
+ case 22:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x1a, 0x1a, 0x2, 0x40);
+ break;
+ case 23:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x12, 0x12, 0x2, 0x40);
+ break;
+ case 24:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0xa, 0xa, 0x2, 0x40);
+ break;
+ case 25:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x5, 0x5, 0x2, 0x40);
+ break;
+ case 26:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x25, 0x3, 0x10, 0x0);
+ break;
+ case 27:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x5, 0x5, 0x2, 0x40);
+ break;
+ case 28:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x3, 0x2f, 0x2f, 0x0, 0x0);
+ break;
+
+ }
+ }
+ else
+ {
+ // disable PS tdma
+ switch(pCoexDm->curPsTdma)
+ {
+ case 8:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x8, 0x0, 0x0, 0x0, 0x0);
+ break;
+ case 0:
+ default:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x0, 0x0, 0x0, 0x0, 0x0);
+ pBtCoexist->btc_write_2byte(pBtCoexist, 0x860, 0x210);
+ break;
+ case 9:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x0, 0x0, 0x0, 0x0, 0x0);
+ pBtCoexist->btc_write_2byte(pBtCoexist, 0x860, 0x110);
+ break;
+
+ }
+ }
+
+ // update pre state
+ pCoexDm->bPrePsTdmaOn = pCoexDm->bCurPsTdmaOn;
+ pCoexDm->prePsTdma = pCoexDm->curPsTdma;
+}
+
+
+VOID
+halbtc8723a1ant_CoexAllOff(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ // fw all off
+ halbtc8723a1ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+ // sw all off
+ halbtc8723a1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a1ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+
+ // hw all off
+ halbtc8723a1ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+}
+
+VOID
+halbtc8723a1ant_InitCoexDm(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ // force to reset coex mechanism
+ halbtc8723a1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE);
+}
+
+VOID
+halbtc8723a1ant_BtEnableAction(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ halbtc8723a1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE);
+}
+
+VOID
+halbtc8723a1ant_MonitorBtCtr(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u4Byte regHPTxRx, regLPTxRx, u4Tmp;
+ u4Byte regHPTx=0, regHPRx=0, regLPTx=0, regLPRx=0;
+ u1Byte u1Tmp;
+
+ regHPTxRx = 0x770;
+ regLPTxRx = 0x774;
+
+ u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regHPTxRx);
+ regHPTx = u4Tmp & MASKLWORD;
+ regHPRx = (u4Tmp & MASKHWORD)>>16;
+
+ u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regLPTxRx);
+ regLPTx = u4Tmp & MASKLWORD;
+ regLPRx = (u4Tmp & MASKHWORD)>>16;
+
+ pCoexSta->highPriorityTx = regHPTx;
+ pCoexSta->highPriorityRx = regHPRx;
+ pCoexSta->lowPriorityTx = regLPTx;
+ pCoexSta->lowPriorityRx = regLPRx;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], High Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ regHPTxRx, regHPTx, regHPTx, regHPRx, regHPRx));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], Low Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ regLPTxRx, regLPTx, regLPTx, regLPRx, regLPRx));
+
+ // reset counter
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0xc);
+}
+
+VOID
+halbtc8723a1ant_MonitorBtEnableDisable(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ static BOOLEAN bPreBtDisabled=FALSE;
+ static u4Byte btDisableCnt=0;
+ BOOLEAN bBtActive=true, bBtDisabled=FALSE;
+
+ // This function check if bt is disabled
+
+ if( pCoexSta->highPriorityTx == 0 &&
+ pCoexSta->highPriorityRx == 0 &&
+ pCoexSta->lowPriorityTx == 0 &&
+ pCoexSta->lowPriorityRx == 0)
+ {
+ bBtActive = FALSE;
+ }
+ if( pCoexSta->highPriorityTx == 0xffff &&
+ pCoexSta->highPriorityRx == 0xffff &&
+ pCoexSta->lowPriorityTx == 0xffff &&
+ pCoexSta->lowPriorityRx == 0xffff)
+ {
+ bBtActive = FALSE;
+ }
+ if(bBtActive)
+ {
+ btDisableCnt = 0;
+ bBtDisabled = FALSE;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is enabled !!\n"));
+ }
+ else
+ {
+ btDisableCnt++;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], bt all counters=0, %d times!!\n",
+ btDisableCnt));
+ if(btDisableCnt >= 2)
+ {
+ bBtDisabled = true;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is disabled !!\n"));
+ }
+ }
+ if(bPreBtDisabled != bBtDisabled)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is from %s to %s!!\n",
+ (bPreBtDisabled ? "disabled":"enabled"),
+ (bBtDisabled ? "disabled":"enabled")));
+ bPreBtDisabled = bBtDisabled;
+ if(!bBtDisabled)
+ {
+ halbtc8723a1ant_BtEnableAction(pBtCoexist);
+ }
+ else
+ {
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+ }
+ }
+}
+
+VOID
+halbtc8723a1ant_TdmaDurationAdjust(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ static s4Byte up,dn,m,n,WaitCount;
+ s4Byte result; //0: no change, +1: increase WiFi duration, -1: decrease WiFi duration
+ u1Byte retryCount=0;
+ u1Byte btState;
+ BOOLEAN bScan=FALSE, bLink=FALSE, bRoam=FALSE;
+ u4Byte wifiBw;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ btState = pCoexDm->btStatus;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], TdmaDurationAdjust()\n"));
+ if(pCoexDm->psTdmaGlobalCnt != pCoexDm->psTdmaMonitorCnt)
+ {
+ pCoexDm->psTdmaMonitorCnt = 0;
+ pCoexDm->psTdmaGlobalCnt = 0;
+ }
+ if(pCoexDm->psTdmaMonitorCnt == 0)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], first run BT A2DP + WiFi busy state!!\n"));
+ if(btState == BT_STATE_8723A_1ANT_ACL_ONLY_BUSY)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ pCoexDm->psTdmaDuAdjType = 1;
+ }
+ else
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 22);
+ pCoexDm->psTdmaDuAdjType = 22;
+ }
+ //============
+ up = 0;
+ dn = 0;
+ m = 1;
+ n= 3;
+ result = 0;
+ WaitCount = 0;
+ }
+ else
+ {
+ //accquire the BT TRx retry count from BT_Info byte2
+ retryCount = pCoexSta->btRetryCnt;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], retryCount = %d\n", retryCount));
+ result = 0;
+ WaitCount++;
+
+ if(retryCount == 0) // no retry in the last 2-second duration
+ {
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if(up >= n) // if ³sÄò n ­Ó2¬í retry count¬°0, «h½Õ¼eWiFi duration
+ {
+ WaitCount = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Increase wifi duration!!\n"));
+ }
+ }
+ else if (retryCount <= 3) // <=3 retry in the last 2-second duration
+ {
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) // if ³sÄò 2 ­Ó2¬í retry count< 3, «h½Õ¯¶WiFi duration
+ {
+ if (WaitCount <= 2)
+ m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+ else
+ m = 1;
+
+ if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n"));
+ }
+ }
+ else //retry count > 3, ¥u­n1¦¸ retry count > 3, «h½Õ¯¶WiFi duration
+ {
+ if (WaitCount == 1)
+ m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+ else
+ m = 1;
+
+ if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n"));
+ }
+
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT TxRx counter H+L <= 1200\n"));
+ if(btState != BT_STATE_8723A_1ANT_ACL_ONLY_BUSY)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], NOT ACL only busy!\n"));
+ if(BTC_WIFI_BW_HT40 != wifiBw)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], 20MHz\n"));
+ if(result == -1)
+ {
+ if(pCoexDm->curPsTdma == 22)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 23);
+ pCoexDm->psTdmaDuAdjType = 23;
+ }
+ else if(pCoexDm->curPsTdma == 23)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24);
+ pCoexDm->psTdmaDuAdjType = 24;
+ }
+ else if(pCoexDm->curPsTdma == 24)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 25);
+ pCoexDm->psTdmaDuAdjType = 25;
+ }
+ }
+ else if (result == 1)
+ {
+ if(pCoexDm->curPsTdma == 25)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24);
+ pCoexDm->psTdmaDuAdjType = 24;
+ }
+ else if(pCoexDm->curPsTdma == 24)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 23);
+ pCoexDm->psTdmaDuAdjType = 23;
+ }
+ else if(pCoexDm->curPsTdma == 23)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 22);
+ pCoexDm->psTdmaDuAdjType = 22;
+ }
+ }
+ // error handle, if not in the following state,
+ // set psTdma again.
+ if( (pCoexDm->psTdmaDuAdjType != 22) &&
+ (pCoexDm->psTdmaDuAdjType != 23) &&
+ (pCoexDm->psTdmaDuAdjType != 24) &&
+ (pCoexDm->psTdmaDuAdjType != 25) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], duration case out of handle!!\n"));
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 23);
+ pCoexDm->psTdmaDuAdjType = 23;
+ }
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], 40MHz\n"));
+ if(result == -1)
+ {
+ if(pCoexDm->curPsTdma == 23)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24);
+ pCoexDm->psTdmaDuAdjType = 24;
+ }
+ else if(pCoexDm->curPsTdma == 24)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 25);
+ pCoexDm->psTdmaDuAdjType = 25;
+ }
+ else if(pCoexDm->curPsTdma == 25)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 27);
+ pCoexDm->psTdmaDuAdjType = 27;
+ }
+ }
+ else if (result == 1)
+ {
+ if(pCoexDm->curPsTdma == 27)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 25);
+ pCoexDm->psTdmaDuAdjType = 25;
+ }
+ else if(pCoexDm->curPsTdma == 25)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24);
+ pCoexDm->psTdmaDuAdjType = 24;
+ }
+ else if(pCoexDm->curPsTdma == 24)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 23);
+ pCoexDm->psTdmaDuAdjType = 23;
+ }
+ }
+ // error handle, if not in the following state,
+ // set psTdma again.
+ if( (pCoexDm->psTdmaDuAdjType != 23) &&
+ (pCoexDm->psTdmaDuAdjType != 24) &&
+ (pCoexDm->psTdmaDuAdjType != 25) &&
+ (pCoexDm->psTdmaDuAdjType != 27) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], duration case out of handle!!\n"));
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24);
+ pCoexDm->psTdmaDuAdjType = 24;
+ }
+ }
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ACL only busy\n"));
+ if (result == -1)
+ {
+ if(pCoexDm->curPsTdma == 1)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ else if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ }
+ else if (result == 1)
+ {
+ if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ else if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ pCoexDm->psTdmaDuAdjType = 1;
+ }
+ }
+
+ // error handle, if not in the following state,
+ // set psTdma again.
+ if( (pCoexDm->psTdmaDuAdjType != 1) &&
+ (pCoexDm->psTdmaDuAdjType != 2) &&
+ (pCoexDm->psTdmaDuAdjType != 9) &&
+ (pCoexDm->psTdmaDuAdjType != 11) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], duration case out of handle!!\n"));
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ }
+ }
+ }
+
+ // if current PsTdma not match with the recorded one (when scan, dhcp...),
+ // then we have to adjust it back to the previous record one.
+ if(pCoexDm->curPsTdma != pCoexDm->psTdmaDuAdjType)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
+ pCoexDm->curPsTdma, pCoexDm->psTdmaDuAdjType));
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+
+ if( !bScan && !bLink && !bRoam)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, pCoexDm->psTdmaDuAdjType);
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"));
+ }
+ }
+ pCoexDm->psTdmaMonitorCnt++;
+}
+
+
+VOID
+halbtc8723a1ant_CoexForWifiConnect(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BOOLEAN bWifiConnected=FALSE, bWifiBusy=FALSE;
+ u1Byte btState, btInfoOriginal=0;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+
+ btState = pCoexDm->btStatus;
+ btInfoOriginal = pCoexSta->btInfoC2h[BT_INFO_SRC_8723A_1ANT_BT_RSP][0];
+
+ if(bWifiConnected)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi connected!!\n"));
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+
+ if( !bWifiBusy &&
+ ((BT_STATE_8723A_1ANT_NO_CONNECTION == btState) ||
+ (BT_STATE_8723A_1ANT_CONNECT_IDLE == btState)) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], [Wifi is idle] or [Bt is non connected idle or Bt is connected idle]!!\n"));
+
+ if(BT_STATE_8723A_1ANT_NO_CONNECTION == btState)
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);
+ else if(BT_STATE_8723A_1ANT_CONNECT_IDLE == btState)
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+ pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, 0xc0);
+ }
+ else
+ {
+ if( (BT_STATE_8723A_1ANT_SCO_ONLY_BUSY == btState) ||
+ (BT_STATE_8723A_1ANT_ACL_SCO_BUSY == btState) ||
+ (BT_STATE_8723A_1ANT_HID_BUSY == btState) ||
+ (BT_STATE_8723A_1ANT_HID_SCO_BUSY == btState) )
+ {
+ pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, 0x60);
+ }
+ else
+ {
+ pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, 0xc0);
+ }
+ switch(btState)
+ {
+ case BT_STATE_8723A_1ANT_NO_CONNECTION:
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+ break;
+ case BT_STATE_8723A_1ANT_CONNECT_IDLE:
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ break;
+ case BT_STATE_8723A_1ANT_INQ_OR_PAG:
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ break;
+ case BT_STATE_8723A_1ANT_SCO_ONLY_BUSY:
+ case BT_STATE_8723A_1ANT_ACL_SCO_BUSY:
+ case BT_STATE_8723A_1ANT_HID_BUSY:
+ case BT_STATE_8723A_1ANT_HID_SCO_BUSY:
+ halbtc8723a1ant_TdmaDurationAdjust(pBtCoexist);
+ break;
+ case BT_STATE_8723A_1ANT_ACL_ONLY_BUSY:
+ if (btInfoOriginal&BT_INFO_8723A_1ANT_B_A2DP)
+ {
+ halbtc8723a1ant_TdmaDurationAdjust(pBtCoexist);
+ }
+ else if(btInfoOriginal&BT_INFO_8723A_1ANT_B_FTP)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ }
+ else if( (btInfoOriginal&BT_INFO_8723A_1ANT_B_A2DP) &&
+ (btInfoOriginal&BT_INFO_8723A_1ANT_B_FTP) )
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ }
+ else
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ }
+ break;
+ default:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], error!!!, undefined case in halbtc8723a1ant_CoexForWifiConnect()!!\n"));
+ break;
+ }
+ }
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is disconnected!!\n"));
+ }
+
+ pCoexDm->psTdmaGlobalCnt++;
+}
+
+//============================================================
+// work around function start with wa_halbtc8723a1ant_
+//============================================================
+VOID
+wa_halbtc8723a1ant_MonitorC2h(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte tmp1b=0x0;
+ u4Byte curC2hTotalCnt=0x0;
+ static u4Byte preC2hTotalCnt=0x0, sameCntPollingTime=0x0;
+
+ curC2hTotalCnt+=pCoexSta->btInfoC2hCnt[BT_INFO_SRC_8723A_1ANT_BT_RSP];
+
+ if(curC2hTotalCnt == preC2hTotalCnt)
+ {
+ sameCntPollingTime++;
+ }
+ else
+ {
+ preC2hTotalCnt = curC2hTotalCnt;
+ sameCntPollingTime = 0;
+ }
+
+ if(sameCntPollingTime >= 2)
+ {
+ tmp1b = pBtCoexist->btc_read_1byte(pBtCoexist, 0x1af);
+ if(tmp1b != 0x0)
+ {
+ pCoexSta->c2hHangDetectCnt++;
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x1af, 0x0);
+ }
+ }
+}
+
+//============================================================
+// extern function start with EXhalbtc8723a1ant_
+//============================================================
+VOID
+EXhalbtc8723a1ant_InitHwConfig(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], 1Ant Init HW Config!!\n"));
+
+ // backup rf 0x1e value
+ pCoexDm->btRf0x1eBackup =
+ pBtCoexist->btc_get_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff);
+
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x40, 0x20);
+
+ // enable counter statistics
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0x4);
+
+ // coex table
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x6cc, 0x0); // 1-Ant coex
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c8, 0xffff); // wifi break table
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c4, 0x55555555); //coex table
+
+ // antenna switch control parameter
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x858, 0xaaaaaaaa);
+
+ pBtCoexist->btc_write_2byte(pBtCoexist, 0x860, 0x210); //set antenna at wifi side if ANTSW is software control
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x870, 0x300); //SPDT(connected with TRSW) control by hardware PTA
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x874, 0x22804000); //ANTSW keep by GNT_BT
+
+ // coexistence parameters
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x778, 0x1); // enable RTK mode PTA
+}
+
+VOID
+EXhalbtc8723a1ant_InitCoexDm(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Coex Mechanism Init!!\n"));
+
+ halbtc8723a1ant_InitCoexDm(pBtCoexist);
+}
+
+VOID
+EXhalbtc8723a1ant_DisplayCoexInfo(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ struct btc_board_info * pBoardInfo=&pBtCoexist->board_info;
+ PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info;
+ pu1Byte cliBuf=pBtCoexist->cli_buf;
+ u1Byte u1Tmp[4], i, btInfoExt, psTdmaCase=0;
+ u4Byte u4Tmp[4];
+ BOOLEAN bRoam=FALSE, bScan=FALSE, bLink=FALSE, bWifiUnder5G=FALSE;
+ BOOLEAN bBtHsOn=FALSE, bWifiBusy=FALSE;
+ s4Byte wifiRssi=0, btHsRssi=0;
+ u4Byte wifiBw, wifiTrafficDir;
+ u1Byte wifiDot11Chnl, wifiHsChnl;
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============");
+ CL_PRINTF(cliBuf);
+
+ if(!pBoardInfo->bt_exist)
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+ CL_PRINTF(cliBuf);
+ return;
+ }
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", \
+ pBoardInfo->pg_ant_num, pBoardInfo->btdm_ant_num);
+ CL_PRINTF(cliBuf);
+
+ if(pBtCoexist->manual_control)
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "[Action Manual control]!!");
+ CL_PRINTF(cliBuf);
+ }
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \
+ ((pStackInfo->bProfileNotified)? "Yes":"No"), pStackInfo->hciVersion);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifiDot11Chnl);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifiHsChnl);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \
+ wifiDot11Chnl, wifiHsChnl, bBtHsOn);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \
+ pCoexDm->wifiChnlInfo[0], pCoexDm->wifiChnlInfo[1],
+ pCoexDm->wifiChnlInfo[2]);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_HS_RSSI, &btHsRssi);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", \
+ wifiRssi, btHsRssi);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", "Wifi bLink/ bRoam/ bScan", \
+ bLink, bRoam, bScan);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_UNDER_5G, &bWifiUnder5G);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", "Wifi status", \
+ (bWifiUnder5G? "5G":"2.4G"),
+ ((BTC_WIFI_BW_LEGACY==wifiBw)? "Legacy": (((BTC_WIFI_BW_HT40==wifiBw)? "HT40":"HT20"))),
+ ((!bWifiBusy)? "idle": ((BTC_WIFI_TRAFFIC_TX==wifiTrafficDir)? "uplink":"downlink")));
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", \
+ ((pCoexSta->bC2hBtInquiryPage)?("inquiry/page scan"):((BT_8723A_1ANT_BT_STATUS_IDLE == pCoexDm->btStatus)? "idle":( (BT_8723A_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)? "connected-idle":"busy"))),
+ pCoexSta->btRssi, pCoexSta->btRetryCnt);
+ CL_PRINTF(cliBuf);
+
+ if(pStackInfo->bProfileNotified)
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \
+ pStackInfo->bScoExist, pStackInfo->bHidExist, pStackInfo->bPanExist, pStackInfo->bA2dpExist);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_BT_LINK_INFO);
+ }
+
+ btInfoExt = pCoexSta->btInfoExt;
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Info A2DP rate", \
+ (btInfoExt&BIT0)? "Basic rate":"EDR rate");
+ CL_PRINTF(cliBuf);
+
+ for(i=0; i<BT_INFO_SRC_8723A_1ANT_MAX; i++)
+ {
+ if(pCoexSta->btInfoC2hCnt[i])
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8723a1Ant[i], \
+ pCoexSta->btInfoC2h[i][0], pCoexSta->btInfoC2h[i][1],
+ pCoexSta->btInfoC2h[i][2], pCoexSta->btInfoC2h[i][3],
+ pCoexSta->btInfoC2h[i][4], pCoexSta->btInfoC2h[i][5],
+ pCoexSta->btInfoC2h[i][6], pCoexSta->btInfoC2hCnt[i]);
+ CL_PRINTF(cliBuf);
+ }
+ }
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d", "write 0x1af=0x0 num", \
+ pCoexSta->c2hHangDetectCnt);
+ CL_PRINTF(cliBuf);
+
+ // Sw mechanism
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Sw mechanism]============");
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d", "SM1[ShRf/ LpRA/ LimDig]", \
+ pCoexDm->bCurRfRxLpfShrink, pCoexDm->bCurLowPenaltyRa, pCoexDm->limited_dig);
+ CL_PRINTF(cliBuf);
+
+ // Fw mechanism
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw mechanism]============");
+ CL_PRINTF(cliBuf);
+
+ if(!pBtCoexist->manual_control)
+ {
+ psTdmaCase = pCoexDm->curPsTdma;
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d", "PS TDMA", \
+ pCoexDm->psTdmaPara[0], pCoexDm->psTdmaPara[1],
+ pCoexDm->psTdmaPara[2], pCoexDm->psTdmaPara[3],
+ pCoexDm->psTdmaPara[4], psTdmaCase);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d ", "IgnWlanAct", \
+ pCoexDm->bCurIgnoreWlanAct);
+ CL_PRINTF(cliBuf);
+ }
+
+ // Hw setting
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============");
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", \
+ pCoexDm->btRf0x1eBackup);
+ CL_PRINTF(cliBuf);
+
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x778);
+ u1Tmp[1] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x783);
+ u1Tmp[2] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x796);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x778/ 0x783/ 0x796", \
+ u1Tmp[0], u1Tmp[1], u1Tmp[2]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x880);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x880", \
+ u4Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x40);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x40", \
+ u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x550);
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x522);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \
+ u4Tmp[0], u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x484);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x484(rate adaptive)", \
+ u4Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xc50);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", \
+ u4Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda0);
+ u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda4);
+ u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda8);
+ u4Tmp[3] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xdac);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0xda0/0xda4/0xda8/0xdac(FA cnt)", \
+ u4Tmp[0], u4Tmp[1], u4Tmp[2], u4Tmp[3]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c0);
+ u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c4);
+ u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c8);
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x6cc);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \
+ u4Tmp[0], u4Tmp[1], u4Tmp[2], u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x770 (hp rx[31:16]/tx[15:0])", \
+ pCoexSta->highPriorityRx, pCoexSta->highPriorityTx);
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x774(lp rx[31:16]/tx[15:0])", \
+ pCoexSta->lowPriorityRx, pCoexSta->lowPriorityTx);
+ CL_PRINTF(cliBuf);
+
+ // Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x41b);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x41b (mgntQ hang chk == 0xf)", \
+ u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+VOID
+EXhalbtc8723a1ant_IpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_IPS_ENTER == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n"));
+ halbtc8723a1ant_CoexAllOff(pBtCoexist);
+ }
+ else if(BTC_IPS_LEAVE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n"));
+ //halbtc8723a1ant_InitCoexDm(pBtCoexist);
+ }
+}
+
+VOID
+EXhalbtc8723a1ant_LpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_LPS_ENABLE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n"));
+ }
+ else if(BTC_LPS_DISABLE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n"));
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ }
+}
+
+VOID
+EXhalbtc8723a1ant_ScanNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ BOOLEAN bWifiConnected=FALSE;
+
+ halbtc8723a1ant_NotifyFwScan(pBtCoexist, type);
+
+ if(pBtCoexist->btInfo.bBtDisabled)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);
+ }
+ else
+ {
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ if(BTC_SCAN_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n"));
+ if(!bWifiConnected) // non-connected scan
+ {
+ //set 0x550[3]=1 before PsTdma
+ halbtc8723a1ant_Reg0x550Bit3(pBtCoexist, true);
+ }
+
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ }
+ else if(BTC_SCAN_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n"));
+ if(!bWifiConnected) // non-connected scan
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ }
+ else
+ {
+ halbtc8723a1ant_CoexForWifiConnect(pBtCoexist);
+ }
+ }
+ }
+}
+
+VOID
+EXhalbtc8723a1ant_ConnectNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ BOOLEAN bWifiConnected=FALSE;
+
+ if(pBtCoexist->btInfo.bBtDisabled)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);
+ }
+ else
+ {
+ if(BTC_ASSOCIATE_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n"));
+ //set 0x550[3]=1 before PsTdma
+ halbtc8723a1ant_Reg0x550Bit3(pBtCoexist, true);
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8); // extend wifi slot
+ }
+ else if(BTC_ASSOCIATE_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n"));
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ if(!bWifiConnected) // non-connected scan
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ }
+ else
+ {
+ halbtc8723a1ant_CoexForWifiConnect(pBtCoexist);
+ }
+ }
+ }
+}
+
+VOID
+EXhalbtc8723a1ant_MediaStatusNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_MEDIA_CONNECT == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA connect notify\n"));
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA disconnect notify\n"));
+ }
+}
+
+VOID
+EXhalbtc8723a1ant_SpecialPacketNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(type == BTC_PACKET_DHCP)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], DHCP Packet notify\n"));
+ if(pBtCoexist->btInfo.bBtDisabled)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);
+ }
+ else
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 18);
+ }
+ }
+}
+
+VOID
+EXhalbtc8723a1ant_BtInfoNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN pu1Byte tmpBuf,
+ IN u1Byte length
+ )
+{
+ u1Byte btInfo=0;
+ u1Byte i, rspSource=0;
+ BOOLEAN bBtHsOn=FALSE, bBtBusy=FALSE, bForceLps=FALSE;
+
+ pCoexSta->bC2hBtInfoReqSent = FALSE;
+
+ rspSource = BT_INFO_SRC_8723A_1ANT_BT_RSP;
+ pCoexSta->btInfoC2hCnt[rspSource]++;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Bt info[%d], length=%d, hex data=[", rspSource, length));
+ for(i=0; i<length; i++)
+ {
+ pCoexSta->btInfoC2h[rspSource][i] = tmpBuf[i];
+ if(i == 0)
+ btInfo = tmpBuf[i];
+ if(i == length-1)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x]\n", tmpBuf[i]));
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x, ", tmpBuf[i]));
+ }
+ }
+
+ if(BT_INFO_SRC_8723A_1ANT_WIFI_FW != rspSource)
+ {
+ pCoexSta->btRetryCnt =
+ pCoexSta->btInfoC2h[rspSource][1];
+
+ pCoexSta->btRssi =
+ pCoexSta->btInfoC2h[rspSource][2]*2+10;
+
+ pCoexSta->btInfoExt =
+ pCoexSta->btInfoC2h[rspSource][3];
+ }
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ // check BIT2 first ==> check if bt is under inquiry or page scan
+ if(btInfo & BT_INFO_8723A_1ANT_B_INQ_PAGE)
+ {
+ pCoexSta->bC2hBtInquiryPage = true;
+ }
+ else
+ {
+ pCoexSta->bC2hBtInquiryPage = FALSE;
+ }
+ btInfo &= ~BIT2;
+ if(!(btInfo & BIT0))
+ {
+ pCoexDm->btStatus = BT_STATE_8723A_1ANT_NO_CONNECTION;
+ bForceLps = FALSE;
+ }
+ else
+ {
+ bForceLps = true;
+ if(btInfo == 0x1)
+ {
+ pCoexDm->btStatus = BT_STATE_8723A_1ANT_CONNECT_IDLE;
+ }
+ else if(btInfo == 0x9)
+ {
+ pCoexDm->btStatus = BT_STATE_8723A_1ANT_ACL_ONLY_BUSY;
+ bBtBusy = true;
+ }
+ else if(btInfo == 0x13)
+ {
+ pCoexDm->btStatus = BT_STATE_8723A_1ANT_SCO_ONLY_BUSY;
+ bBtBusy = true;
+ }
+ else if(btInfo == 0x1b)
+ {
+ pCoexDm->btStatus = BT_STATE_8723A_1ANT_ACL_SCO_BUSY;
+ bBtBusy = true;
+ }
+ else if(btInfo == 0x29)
+ {
+ pCoexDm->btStatus = BT_STATE_8723A_1ANT_HID_BUSY;
+ bBtBusy = true;
+ }
+ else if(btInfo == 0x3b)
+ {
+ pCoexDm->btStatus = BT_STATE_8723A_1ANT_HID_SCO_BUSY;
+ bBtBusy = true;
+ }
+ }
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bBtBusy);
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_LIMITED_DIG, &bBtBusy);
+ if(bForceLps)
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ else
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+
+ if( (BT_STATE_8723A_1ANT_NO_CONNECTION == pCoexDm->btStatus) ||
+ (BT_STATE_8723A_1ANT_CONNECT_IDLE == pCoexDm->btStatus) )
+ {
+ if(pCoexSta->bC2hBtInquiryPage)
+ pCoexDm->btStatus = BT_STATE_8723A_1ANT_INQ_OR_PAG;
+ }
+}
+
+VOID
+EXhalbtc8723a1ant_StackOperationNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair start notify\n"));
+ }
+ else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair finish notify\n"));
+ }
+}
+
+VOID
+EXhalbtc8723a1ant_HaltNotify(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ halbtc8723a1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 0);
+
+ halbtc8723a1ant_LowPenaltyRa(pBtCoexist, FORCE_EXEC, FALSE);
+ halbtc8723a1ant_RfShrink(pBtCoexist, FORCE_EXEC, FALSE);
+
+ halbtc8723a1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true);
+ EXhalbtc8723a1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT);
+}
+
+VOID
+EXhalbtc8723a1ant_Periodical(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BOOLEAN bScan=FALSE, bLink=FALSE, bRoam=FALSE, bWifiConnected=FALSE;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], 1Ant Periodical!!\n"));
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+
+ // work around for c2h hang
+ wa_halbtc8723a1ant_MonitorC2h(pBtCoexist);
+
+ halbtc8723a1ant_QueryBtInfo(pBtCoexist);
+ halbtc8723a1ant_MonitorBtCtr(pBtCoexist);
+ halbtc8723a1ant_MonitorBtEnableDisable(pBtCoexist);
+
+
+ if(bScan)
+ return;
+ if(bLink)
+ return;
+
+ if(bWifiConnected)
+ {
+ if(pBtCoexist->btInfo.bBtDisabled)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);
+
+ halbtc8723a1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a1ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+ }
+ else
+ {
+ halbtc8723a1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a1ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a1ant_CoexForWifiConnect(pBtCoexist);
+ }
+ }
+ else
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+ halbtc8723a1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a1ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+ }
+}
+
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.h b/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.h
new file mode 100644
index 000000000000..60992f59eb37
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.h
@@ -0,0 +1,176 @@
+//===========================================
+// The following is for 8723A 1Ant BT Co-exist definition
+//===========================================
+#define BT_INFO_8723A_1ANT_B_FTP BIT7
+#define BT_INFO_8723A_1ANT_B_A2DP BIT6
+#define BT_INFO_8723A_1ANT_B_HID BIT5
+#define BT_INFO_8723A_1ANT_B_SCO_BUSY BIT4
+#define BT_INFO_8723A_1ANT_B_ACL_BUSY BIT3
+#define BT_INFO_8723A_1ANT_B_INQ_PAGE BIT2
+#define BT_INFO_8723A_1ANT_B_SCO_ESCO BIT1
+#define BT_INFO_8723A_1ANT_B_CONNECTION BIT0
+
+typedef enum _BT_STATE_8723A_1ANT{
+ BT_STATE_8723A_1ANT_DISABLED = 0,
+ BT_STATE_8723A_1ANT_NO_CONNECTION = 1,
+ BT_STATE_8723A_1ANT_CONNECT_IDLE = 2,
+ BT_STATE_8723A_1ANT_INQ_OR_PAG = 3,
+ BT_STATE_8723A_1ANT_ACL_ONLY_BUSY = 4,
+ BT_STATE_8723A_1ANT_SCO_ONLY_BUSY = 5,
+ BT_STATE_8723A_1ANT_ACL_SCO_BUSY = 6,
+ BT_STATE_8723A_1ANT_HID_BUSY = 7,
+ BT_STATE_8723A_1ANT_HID_SCO_BUSY = 8,
+ BT_STATE_8723A_1ANT_MAX
+}BT_STATE_8723A_1ANT, *PBT_STATE_8723A_1ANT;
+
+#define BTC_RSSI_COEX_THRESH_TOL_8723A_1ANT 2
+
+typedef enum _BT_INFO_SRC_8723A_1ANT{
+ BT_INFO_SRC_8723A_1ANT_WIFI_FW = 0x0,
+ BT_INFO_SRC_8723A_1ANT_BT_RSP = 0x1,
+ BT_INFO_SRC_8723A_1ANT_BT_ACTIVE_SEND = 0x2,
+ BT_INFO_SRC_8723A_1ANT_MAX
+}BT_INFO_SRC_8723A_1ANT,*PBT_INFO_SRC_8723A_1ANT;
+
+typedef enum _BT_8723A_1ANT_BT_STATUS{
+ BT_8723A_1ANT_BT_STATUS_IDLE = 0x0,
+ BT_8723A_1ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8723A_1ANT_BT_STATUS_NON_IDLE = 0x2,
+ BT_8723A_1ANT_BT_STATUS_MAX
+}BT_8723A_1ANT_BT_STATUS,*PBT_8723A_1ANT_BT_STATUS;
+
+typedef enum _BT_8723A_1ANT_COEX_ALGO{
+ BT_8723A_1ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_8723A_1ANT_COEX_ALGO_SCO = 0x1,
+ BT_8723A_1ANT_COEX_ALGO_HID = 0x2,
+ BT_8723A_1ANT_COEX_ALGO_A2DP = 0x3,
+ BT_8723A_1ANT_COEX_ALGO_PANEDR = 0x4,
+ BT_8723A_1ANT_COEX_ALGO_PANHS = 0x5,
+ BT_8723A_1ANT_COEX_ALGO_PANEDR_A2DP = 0x6,
+ BT_8723A_1ANT_COEX_ALGO_PANEDR_HID = 0x7,
+ BT_8723A_1ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x8,
+ BT_8723A_1ANT_COEX_ALGO_HID_A2DP = 0x9,
+ BT_8723A_1ANT_COEX_ALGO_MAX
+}BT_8723A_1ANT_COEX_ALGO,*PBT_8723A_1ANT_COEX_ALGO;
+
+typedef struct _COEX_DM_8723A_1ANT{
+ // fw mechanism
+ BOOLEAN bCurIgnoreWlanAct;
+ BOOLEAN bPreIgnoreWlanAct;
+ u1Byte prePsTdma;
+ u1Byte curPsTdma;
+ u1Byte psTdmaPara[5];
+ u1Byte psTdmaDuAdjType;
+ u4Byte psTdmaMonitorCnt;
+ u4Byte psTdmaGlobalCnt;
+ BOOLEAN bResetTdmaAdjust;
+ BOOLEAN bPrePsTdmaOn;
+ BOOLEAN bCurPsTdmaOn;
+
+ // sw mechanism
+ BOOLEAN bPreRfRxLpfShrink;
+ BOOLEAN bCurRfRxLpfShrink;
+ u4Byte btRf0x1eBackup;
+ BOOLEAN bPreLowPenaltyRa;
+ BOOLEAN bCurLowPenaltyRa;
+ u4Byte preVal0x6c0;
+ u4Byte curVal0x6c0;
+ u4Byte preVal0x6c8;
+ u4Byte curVal0x6c8;
+ u1Byte preVal0x6cc;
+ u1Byte curVal0x6cc;
+ BOOLEAN limited_dig;
+
+ // algorithm related
+ u1Byte preAlgorithm;
+ u1Byte curAlgorithm;
+ u1Byte btStatus;
+ u1Byte wifiChnlInfo[3];
+} COEX_DM_8723A_1ANT, *PCOEX_DM_8723A_1ANT;
+
+typedef struct _COEX_STA_8723A_1ANT{
+ u4Byte highPriorityTx;
+ u4Byte highPriorityRx;
+ u4Byte lowPriorityTx;
+ u4Byte lowPriorityRx;
+ u1Byte btRssi;
+ u1Byte preBtRssiState;
+ u1Byte preBtRssiState1;
+ u1Byte preWifiRssiState[4];
+ BOOLEAN bC2hBtInfoReqSent;
+ u1Byte btInfoC2h[BT_INFO_SRC_8723A_1ANT_MAX][10];
+ u4Byte btInfoC2hCnt[BT_INFO_SRC_8723A_1ANT_MAX];
+ BOOLEAN bC2hBtInquiryPage;
+ u1Byte btRetryCnt;
+ u1Byte btInfoExt;
+ //BOOLEAN bHoldForStackOperation;
+ //u1Byte bHoldPeriodCnt;
+ // this is for c2h hang work-around
+ u4Byte c2hHangDetectCnt;
+}COEX_STA_8723A_1ANT, *PCOEX_STA_8723A_1ANT;
+
+//===========================================
+// The following is interface which will notify coex module.
+//===========================================
+VOID
+EXhalbtc8723a1ant_InitHwConfig(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8723a1ant_InitCoexDm(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8723a1ant_IpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a1ant_LpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a1ant_ScanNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a1ant_ConnectNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a1ant_MediaStatusNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a1ant_SpecialPacketNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a1ant_BtInfoNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN pu1Byte tmpBuf,
+ IN u1Byte length
+ );
+VOID
+EXhalbtc8723a1ant_StackOperationNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a1ant_HaltNotify(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8723a1ant_Periodical(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8723a1ant_DisplayCoexInfo(
+ IN PBTC_COEXIST pBtCoexist
+ );
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbt_precomp.h b/drivers/staging/rtl8821ae/btcoexist/halbt_precomp.h
new file mode 100644
index 000000000000..d538ba3d0a2e
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbt_precomp.h
@@ -0,0 +1,99 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __HALBT_PRECOMP_H__
+#define __HALBT_PRECOMP_H__
+/*************************************************************
+ * include files
+ *************************************************************/
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../regd.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../pci.h"
+#include "../rtl8821ae/reg.h"
+#include "../rtl8821ae/def.h"
+#include "../rtl8821ae/phy.h"
+#include "../rtl8821ae/dm.h"
+#include "../rtl8821ae/fw.h"
+#include "../rtl8821ae/led.h"
+#include "../rtl8821ae/hw.h"
+#include "../rtl8821ae/pwrseqcmd.h"
+#include "../rtl8821ae/pwrseq.h"
+
+#include "halbtcoutsrc.h"
+
+
+#include "halbtc8192e2ant.h"
+#include "halbtc8723b1ant.h"
+#include "halbtc8723b2ant.h"
+
+
+
+#define GetDefaultAdapter(padapter) padapter
+
+
+#define BIT0 0x00000001
+#define BIT1 0x00000002
+#define BIT2 0x00000004
+#define BIT3 0x00000008
+#define BIT4 0x00000010
+#define BIT5 0x00000020
+#define BIT6 0x00000040
+#define BIT7 0x00000080
+#define BIT8 0x00000100
+#define BIT9 0x00000200
+#define BIT10 0x00000400
+#define BIT11 0x00000800
+#define BIT12 0x00001000
+#define BIT13 0x00002000
+#define BIT14 0x00004000
+#define BIT15 0x00008000
+#define BIT16 0x00010000
+#define BIT17 0x00020000
+#define BIT18 0x00040000
+#define BIT19 0x00080000
+#define BIT20 0x00100000
+#define BIT21 0x00200000
+#define BIT22 0x00400000
+#define BIT23 0x00800000
+#define BIT24 0x01000000
+#define BIT25 0x02000000
+#define BIT26 0x04000000
+#define BIT27 0x08000000
+#define BIT28 0x10000000
+#define BIT29 0x20000000
+#define BIT30 0x40000000
+#define BIT31 0x80000000
+
+#define MASKBYTE0 0xff
+#define MASKBYTE1 0xff00
+#define MASKBYTE2 0xff0000
+#define MASKBYTE3 0xff000000
+#define MASKHWORD 0xffff0000
+#define MASKLWORD 0x0000ffff
+#define MASKDWORD 0xffffffff
+#define MASK12BITS 0xfff
+#define MASKH4BITS 0xf0000000
+#define MASKOFDM_D 0xffc00000
+#define MASKCCK 0x3f3f3f3f
+
+#endif /* __HALBT_PRECOMP_H__ */
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.c
new file mode 100644
index 000000000000..973d0ea82cb8
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.c
@@ -0,0 +1,3891 @@
+//============================================================
+// Description:
+//
+// This file is for 8192e1ant Co-exist mechanism
+//
+// History
+// 2012/11/15 Cosa first check in.
+//
+//============================================================
+
+//============================================================
+// include files
+//============================================================
+#include "Mp_Precomp.h"
+#if(BT_30_SUPPORT == 1)
+//============================================================
+// Global variables, these are static variables
+//============================================================
+static COEX_DM_8192E_1ANT GLCoexDm8192e1Ant;
+static PCOEX_DM_8192E_1ANT pCoexDm=&GLCoexDm8192e1Ant;
+static COEX_STA_8192E_1ANT GLCoexSta8192e1Ant;
+static PCOEX_STA_8192E_1ANT pCoexSta=&GLCoexSta8192e1Ant;
+
+const char *const GLBtInfoSrc8192e1Ant[]={
+ "BT Info[wifi fw]",
+ "BT Info[bt rsp]",
+ "BT Info[bt auto report]",
+};
+
+u4Byte GLCoexVerDate8192e1Ant=20130729;
+u4Byte GLCoexVer8192e1Ant=0x10;
+
+//============================================================
+// local function proto type if needed
+//============================================================
+//============================================================
+// local function start with halbtc8192e1ant_
+//============================================================
+u1Byte
+halbtc8192e1ant_BtRssiState(
+ u1Byte levelNum,
+ u1Byte rssiThresh,
+ u1Byte rssiThresh1
+ )
+{
+ s4Byte btRssi=0;
+ u1Byte btRssiState=pCoexSta->preBtRssiState;
+
+ btRssi = pCoexSta->btRssi;
+
+ if(levelNum == 2)
+ {
+ if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT))
+ {
+ btRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+ }
+ }
+ else
+ {
+ if(btRssi < rssiThresh)
+ {
+ btRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+ }
+ }
+ }
+ else if(levelNum == 3)
+ {
+ if(rssiThresh > rssiThresh1)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi thresh error!!\n"));
+ return pCoexSta->preBtRssiState;
+ }
+
+ if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT))
+ {
+ btRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+ }
+ }
+ else if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_MEDIUM) ||
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_MEDIUM))
+ {
+ if(btRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT))
+ {
+ btRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+ }
+ else if(btRssi < rssiThresh)
+ {
+ btRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Medium\n"));
+ }
+ }
+ else
+ {
+ if(btRssi < rssiThresh1)
+ {
+ btRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+ }
+ }
+ }
+
+ pCoexSta->preBtRssiState = btRssiState;
+
+ return btRssiState;
+}
+
+u1Byte
+halbtc8192e1ant_WifiRssiState(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte index,
+ IN u1Byte levelNum,
+ IN u1Byte rssiThresh,
+ IN u1Byte rssiThresh1
+ )
+{
+ s4Byte wifiRssi=0;
+ u1Byte wifiRssiState=pCoexSta->preWifiRssiState[index];
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+
+ if(levelNum == 2)
+ {
+ if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(wifiRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT))
+ {
+ wifiRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+ }
+ }
+ else
+ {
+ if(wifiRssi < rssiThresh)
+ {
+ wifiRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+ }
+ }
+ }
+ else if(levelNum == 3)
+ {
+ if(rssiThresh > rssiThresh1)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI thresh error!!\n"));
+ return pCoexSta->preWifiRssiState[index];
+ }
+
+ if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(wifiRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT))
+ {
+ wifiRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+ }
+ }
+ else if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_MEDIUM) ||
+ (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_MEDIUM))
+ {
+ if(wifiRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT))
+ {
+ wifiRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+ }
+ else if(wifiRssi < rssiThresh)
+ {
+ wifiRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Medium\n"));
+ }
+ }
+ else
+ {
+ if(wifiRssi < rssiThresh1)
+ {
+ wifiRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+ }
+ }
+ }
+
+ pCoexSta->preWifiRssiState[index] = wifiRssiState;
+
+ return wifiRssiState;
+}
+
+VOID
+halbtc8192e1ant_Updatera_mask(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u1Byte type,
+ IN u4Byte rateMask
+ )
+{
+ if(BTC_RATE_DISABLE == type)
+ {
+ pCoexDm->curra_mask |= rateMask; // disable rate
+ }
+ else if(BTC_RATE_ENABLE == type)
+ {
+ pCoexDm->curra_mask &= ~rateMask; // enable rate
+ }
+
+ if( bForceExec || (pCoexDm->prera_mask != pCoexDm->curra_mask))
+ {
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_UPDATE_ra_mask, &pCoexDm->curra_mask);
+ }
+ pCoexDm->prera_mask = pCoexDm->curra_mask;
+}
+
+VOID
+halbtc8192e1ant_MonitorBtCtr(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u4Byte regHPTxRx, regLPTxRx, u4Tmp;
+ u4Byte regHPTx=0, regHPRx=0, regLPTx=0, regLPRx=0;
+ u1Byte u1Tmp;
+
+ regHPTxRx = 0x770;
+ regLPTxRx = 0x774;
+
+ u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regHPTxRx);
+ regHPTx = u4Tmp & MASKLWORD;
+ regHPRx = (u4Tmp & MASKHWORD)>>16;
+
+ u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regLPTxRx);
+ regLPTx = u4Tmp & MASKLWORD;
+ regLPRx = (u4Tmp & MASKHWORD)>>16;
+
+ pCoexSta->highPriorityTx = regHPTx;
+ pCoexSta->highPriorityRx = regHPRx;
+ pCoexSta->lowPriorityTx = regLPTx;
+ pCoexSta->lowPriorityRx = regLPRx;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], High Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ regHPTxRx, regHPTx, regHPTx, regHPRx, regHPRx));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], Low Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ regLPTxRx, regLPTx, regLPTx, regLPRx, regLPRx));
+
+ // reset counter
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0xc);
+}
+
+VOID
+halbtc8192e1ant_QueryBtInfo(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ pCoexSta->bC2hBtInfoReqSent = true;
+
+ H2C_Parameter[0] |= BIT0; // trigger
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Query Bt Info, FW write 0x61=0x%x\n",
+ H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x61, 1, H2C_Parameter);
+}
+
+BOOLEAN
+halbtc8192e1ant_IsWifiStatusChanged(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ static BOOLEAN bPreWifiBusy=FALSE, bPreUnder4way=FALSE, bPreBtHsOn=FALSE;
+ BOOLEAN bWifiBusy=FALSE, bUnder4way=FALSE, bBtHsOn=FALSE;
+ BOOLEAN bWifiConnected=FALSE;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &bUnder4way);
+
+ if(bWifiConnected)
+ {
+ if(bWifiBusy != bPreWifiBusy)
+ {
+ bPreWifiBusy = bWifiBusy;
+ return true;
+ }
+ if(bUnder4way != bPreUnder4way)
+ {
+ bPreUnder4way = bUnder4way;
+ return true;
+ }
+ if(bBtHsOn != bPreBtHsOn)
+ {
+ bPreBtHsOn = bBtHsOn;
+ return true;
+ }
+ }
+
+ return FALSE;
+}
+
+VOID
+halbtc8192e1ant_UpdateBtLinkInfo(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+
+ pBtLinkInfo->bBtLinkExist = pCoexSta->bBtLinkExist;
+ pBtLinkInfo->bScoExist = pCoexSta->bScoExist;
+ pBtLinkInfo->bA2dpExist = pCoexSta->bA2dpExist;
+ pBtLinkInfo->bPanExist = pCoexSta->bPanExist;
+ pBtLinkInfo->bHidExist = pCoexSta->bHidExist;
+
+ // check if Sco only
+ if( pBtLinkInfo->bScoExist &&
+ !pBtLinkInfo->bA2dpExist &&
+ !pBtLinkInfo->bPanExist &&
+ !pBtLinkInfo->bHidExist )
+ pBtLinkInfo->bScoOnly = true;
+ else
+ pBtLinkInfo->bScoOnly = FALSE;
+
+ // check if A2dp only
+ if( !pBtLinkInfo->bScoExist &&
+ pBtLinkInfo->bA2dpExist &&
+ !pBtLinkInfo->bPanExist &&
+ !pBtLinkInfo->bHidExist )
+ pBtLinkInfo->bA2dpOnly = true;
+ else
+ pBtLinkInfo->bA2dpOnly = FALSE;
+
+ // check if Pan only
+ if( !pBtLinkInfo->bScoExist &&
+ !pBtLinkInfo->bA2dpExist &&
+ pBtLinkInfo->bPanExist &&
+ !pBtLinkInfo->bHidExist )
+ pBtLinkInfo->bPanOnly = true;
+ else
+ pBtLinkInfo->bPanOnly = FALSE;
+
+ // check if Hid only
+ if( !pBtLinkInfo->bScoExist &&
+ !pBtLinkInfo->bA2dpExist &&
+ !pBtLinkInfo->bPanExist &&
+ pBtLinkInfo->bHidExist )
+ pBtLinkInfo->bHidOnly = true;
+ else
+ pBtLinkInfo->bHidOnly = FALSE;
+}
+
+u1Byte
+halbtc8192e1ant_ActionAlgorithm(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+ BOOLEAN bBtHsOn=FALSE;
+ u1Byte algorithm=BT_8192E_1ANT_COEX_ALGO_UNDEFINED;
+ u1Byte numOfDiffProfile=0;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+
+ if(!pBtLinkInfo->bBtLinkExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], No BT link exists!!!\n"));
+ return algorithm;
+ }
+
+ if(pBtLinkInfo->bScoExist)
+ numOfDiffProfile++;
+ if(pBtLinkInfo->bHidExist)
+ numOfDiffProfile++;
+ if(pBtLinkInfo->bPanExist)
+ numOfDiffProfile++;
+ if(pBtLinkInfo->bA2dpExist)
+ numOfDiffProfile++;
+
+ if(numOfDiffProfile == 1)
+ {
+ if(pBtLinkInfo->bScoExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO only\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_SCO;
+ }
+ else
+ {
+ if(pBtLinkInfo->bHidExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID only\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_HID;
+ }
+ else if(pBtLinkInfo->bA2dpExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP only\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_A2DP;
+ }
+ else if(pBtLinkInfo->bPanExist)
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(HS) only\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_PANHS;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(EDR) only\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR;
+ }
+ }
+ }
+ }
+ else if(numOfDiffProfile == 2)
+ {
+ if(pBtLinkInfo->bScoExist)
+ {
+ if(pBtLinkInfo->bHidExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_HID;
+ }
+ else if(pBtLinkInfo->bA2dpExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP ==> SCO\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_SCO;
+ }
+ else if(pBtLinkInfo->bPanExist)
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(HS)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_SCO;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(EDR)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ else
+ {
+ if( pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bA2dpExist )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP;
+ }
+ else if( pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bPanExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(HS)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(EDR)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ else if( pBtLinkInfo->bPanExist &&
+ pBtLinkInfo->bA2dpExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(HS)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_A2DP_PANHS;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ }
+ }
+ }
+ else if(numOfDiffProfile == 3)
+ {
+ if(pBtLinkInfo->bScoExist)
+ {
+ if( pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bA2dpExist )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP ==> HID\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_HID;
+ }
+ else if( pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bPanExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(HS)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(EDR)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ else if( pBtLinkInfo->bPanExist &&
+ pBtLinkInfo->bA2dpExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(HS)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_SCO;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ else
+ {
+ if( pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bPanExist &&
+ pBtLinkInfo->bA2dpExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ }
+ }
+ }
+ else if(numOfDiffProfile >= 3)
+ {
+ if(pBtLinkInfo->bScoExist)
+ {
+ if( pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bPanExist &&
+ pBtLinkInfo->bA2dpExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"));
+
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ }
+
+ return algorithm;
+}
+
+VOID
+halbtc8192e1ant_SetFwDacSwingLevel(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte dacSwingLvl
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ // There are several type of dacswing
+ // 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6
+ H2C_Parameter[0] = dacSwingLvl;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Set Dac Swing Level=0x%x\n", dacSwingLvl));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x64=0x%x\n", H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x64, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8192e1ant_SetFwDecBtPwr(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte decBtPwrLvl
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ H2C_Parameter[0] = decBtPwrLvl;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], decrease Bt Power level = %d, FW write 0x62=0x%x\n",
+ decBtPwrLvl, H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x62, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8192e1ant_DecBtPwr(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u1Byte decBtPwrLvl
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s Dec BT power level = %d\n",
+ (bForceExec? "force to":""), decBtPwrLvl));
+ pCoexDm->curBtDecPwrLvl = decBtPwrLvl;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], BtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
+ pCoexDm->preBtDecPwrLvl, pCoexDm->curBtDecPwrLvl));
+
+ if(pCoexDm->preBtDecPwrLvl == pCoexDm->curBtDecPwrLvl)
+ return;
+ }
+ halbtc8192e1ant_SetFwDecBtPwr(pBtCoexist, pCoexDm->curBtDecPwrLvl);
+
+ pCoexDm->preBtDecPwrLvl = pCoexDm->curBtDecPwrLvl;
+}
+
+VOID
+halbtc8192e1ant_SetFwBtLnaConstrain(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bBtLnaConsOn
+ )
+{
+ u1Byte H2C_Parameter[2] ={0};
+
+ H2C_Parameter[0] = 0x3; // opCode, 0x3=BT_SET_LNA_CONSTRAIN
+
+ if(bBtLnaConsOn)
+ {
+ H2C_Parameter[1] |= BIT0;
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set BT LNA Constrain: %s, FW write 0x69=0x%x\n",
+ (bBtLnaConsOn? "ON!!":"OFF!!"),
+ H2C_Parameter[0]<<8|H2C_Parameter[1]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x69, 2, H2C_Parameter);
+}
+
+VOID
+halbtc8192e1ant_SetBtLnaConstrain(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bBtLnaConsOn
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT Constrain = %s\n",
+ (bForceExec? "force":""), ((bBtLnaConsOn)? "ON":"OFF")));
+ pCoexDm->bCurBtLnaConstrain = bBtLnaConsOn;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtLnaConstrain=%d, bCurBtLnaConstrain=%d\n",
+ pCoexDm->bPreBtLnaConstrain, pCoexDm->bCurBtLnaConstrain));
+
+ if(pCoexDm->bPreBtLnaConstrain == pCoexDm->bCurBtLnaConstrain)
+ return;
+ }
+ halbtc8192e1ant_SetFwBtLnaConstrain(pBtCoexist, pCoexDm->bCurBtLnaConstrain);
+
+ pCoexDm->bPreBtLnaConstrain = pCoexDm->bCurBtLnaConstrain;
+}
+
+VOID
+halbtc8192e1ant_SetFwBtPsdMode(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte btPsdMode
+ )
+{
+ u1Byte H2C_Parameter[2] ={0};
+
+ H2C_Parameter[0] = 0x2; // opCode, 0x2=BT_SET_PSD_MODE
+
+ H2C_Parameter[1] = btPsdMode;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set BT PSD mode=0x%x, FW write 0x69=0x%x\n",
+ H2C_Parameter[1],
+ H2C_Parameter[0]<<8|H2C_Parameter[1]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x69, 2, H2C_Parameter);
+}
+
+
+VOID
+halbtc8192e1ant_SetBtPsdMode(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u1Byte btPsdMode
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT PSD mode = 0x%x\n",
+ (bForceExec? "force":""), btPsdMode));
+ pCoexDm->bCurBtPsdMode = btPsdMode;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtPsdMode=0x%x, bCurBtPsdMode=0x%x\n",
+ pCoexDm->bPreBtPsdMode, pCoexDm->bCurBtPsdMode));
+
+ if(pCoexDm->bPreBtPsdMode == pCoexDm->bCurBtPsdMode)
+ return;
+ }
+ halbtc8192e1ant_SetFwBtPsdMode(pBtCoexist, pCoexDm->bCurBtPsdMode);
+
+ pCoexDm->bPreBtPsdMode = pCoexDm->bCurBtPsdMode;
+}
+
+
+VOID
+halbtc8192e1ant_SetBtAutoReport(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bEnableAutoReport
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ H2C_Parameter[0] = 0;
+
+ if(bEnableAutoReport)
+ {
+ H2C_Parameter[0] |= BIT0;
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n",
+ (bEnableAutoReport? "Enabled!!":"Disabled!!"), H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x68, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8192e1ant_BtAutoReport(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bEnableAutoReport
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT Auto report = %s\n",
+ (bForceExec? "force to":""), ((bEnableAutoReport)? "Enabled":"Disabled")));
+ pCoexDm->bCurBtAutoReport = bEnableAutoReport;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
+ pCoexDm->bPreBtAutoReport, pCoexDm->bCurBtAutoReport));
+
+ if(pCoexDm->bPreBtAutoReport == pCoexDm->bCurBtAutoReport)
+ return;
+ }
+ halbtc8192e1ant_SetBtAutoReport(pBtCoexist, pCoexDm->bCurBtAutoReport);
+
+ pCoexDm->bPreBtAutoReport = pCoexDm->bCurBtAutoReport;
+}
+
+VOID
+halbtc8192e1ant_FwDacSwingLvl(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u1Byte fwDacSwingLvl
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set FW Dac Swing level = %d\n",
+ (bForceExec? "force to":""), fwDacSwingLvl));
+ pCoexDm->curFwDacSwingLvl = fwDacSwingLvl;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+ pCoexDm->preFwDacSwingLvl, pCoexDm->curFwDacSwingLvl));
+
+ if(pCoexDm->preFwDacSwingLvl == pCoexDm->curFwDacSwingLvl)
+ return;
+ }
+
+ halbtc8192e1ant_SetFwDacSwingLevel(pBtCoexist, pCoexDm->curFwDacSwingLvl);
+
+ pCoexDm->preFwDacSwingLvl = pCoexDm->curFwDacSwingLvl;
+}
+
+VOID
+halbtc8192e1ant_SetSwRfRxLpfCorner(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bRxRfShrinkOn
+ )
+{
+ if(bRxRfShrinkOn)
+ {
+ //Shrink RF Rx LPF corner
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Shrink RF Rx LPF corner!!\n"));
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, 0xf0ff7);
+ }
+ else
+ {
+ //Resume RF Rx LPF corner
+ // After initialized, we can use pCoexDm->btRf0x1eBackup
+ if(pBtCoexist->initilized)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Resume RF Rx LPF corner!!\n"));
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, pCoexDm->btRf0x1eBackup);
+ }
+ }
+}
+
+VOID
+halbtc8192e1ant_RfShrink(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bRxRfShrinkOn
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (bForceExec? "force to":""), ((bRxRfShrinkOn)? "ON":"OFF")));
+ pCoexDm->bCurRfRxLpfShrink = bRxRfShrinkOn;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n",
+ pCoexDm->bPreRfRxLpfShrink, pCoexDm->bCurRfRxLpfShrink));
+
+ if(pCoexDm->bPreRfRxLpfShrink == pCoexDm->bCurRfRxLpfShrink)
+ return;
+ }
+ halbtc8192e1ant_SetSwRfRxLpfCorner(pBtCoexist, pCoexDm->bCurRfRxLpfShrink);
+
+ pCoexDm->bPreRfRxLpfShrink = pCoexDm->bCurRfRxLpfShrink;
+}
+
+VOID
+halbtc8192e1ant_SetSwPenaltyTxRateAdaptive(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bLowPenaltyRa
+ )
+{
+ u1Byte tmpU1;
+
+ tmpU1 = pBtCoexist->btc_read_1byte(pBtCoexist, 0x4fd);
+ tmpU1 |= BIT0;
+ if(bLowPenaltyRa)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set low penalty!!\n"));
+ tmpU1 &= ~BIT2;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set normal!!\n"));
+ tmpU1 |= BIT2;
+ }
+
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x4fd, tmpU1);
+}
+
+VOID
+halbtc8192e1ant_LowPenaltyRa(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bLowPenaltyRa
+ )
+{
+ return;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (bForceExec? "force to":""), ((bLowPenaltyRa)? "ON":"OFF")));
+ pCoexDm->bCurLowPenaltyRa = bLowPenaltyRa;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
+ pCoexDm->bPreLowPenaltyRa, pCoexDm->bCurLowPenaltyRa));
+
+ if(pCoexDm->bPreLowPenaltyRa == pCoexDm->bCurLowPenaltyRa)
+ return;
+ }
+ halbtc8192e1ant_SetSwPenaltyTxRateAdaptive(pBtCoexist, pCoexDm->bCurLowPenaltyRa);
+
+ pCoexDm->bPreLowPenaltyRa = pCoexDm->bCurLowPenaltyRa;
+}
+
+VOID
+halbtc8192e1ant_SetDacSwingReg(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u4Byte level
+ )
+{
+ u1Byte val=(u1Byte)level;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Write SwDacSwing = 0x%x\n", level));
+ pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0x883, 0x3e, val);
+}
+
+VOID
+halbtc8192e1ant_SetSwFullTimeDacSwing(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bSwDacSwingOn,
+ IN u4Byte swDacSwingLvl
+ )
+{
+ if(bSwDacSwingOn)
+ {
+ halbtc8192e1ant_SetDacSwingReg(pBtCoexist, swDacSwingLvl);
+ }
+ else
+ {
+ halbtc8192e1ant_SetDacSwingReg(pBtCoexist, 0x18);
+ }
+}
+
+
+VOID
+halbtc8192e1ant_DacSwing(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bDacSwingOn,
+ IN u4Byte dacSwingLvl
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn DacSwing=%s, dacSwingLvl=0x%x\n",
+ (bForceExec? "force to":""), ((bDacSwingOn)? "ON":"OFF"), dacSwingLvl));
+ pCoexDm->bCurDacSwingOn = bDacSwingOn;
+ pCoexDm->curDacSwingLvl = dacSwingLvl;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+ pCoexDm->bPreDacSwingOn, pCoexDm->preDacSwingLvl,
+ pCoexDm->bCurDacSwingOn, pCoexDm->curDacSwingLvl));
+
+ if( (pCoexDm->bPreDacSwingOn == pCoexDm->bCurDacSwingOn) &&
+ (pCoexDm->preDacSwingLvl == pCoexDm->curDacSwingLvl) )
+ return;
+ }
+ mdelay(30);
+ halbtc8192e1ant_SetSwFullTimeDacSwing(pBtCoexist, bDacSwingOn, dacSwingLvl);
+
+ pCoexDm->bPreDacSwingOn = pCoexDm->bCurDacSwingOn;
+ pCoexDm->preDacSwingLvl = pCoexDm->curDacSwingLvl;
+}
+
+VOID
+halbtc8192e1ant_SetAdcBackOff(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bAdcBackOff
+ )
+{
+ if(bAdcBackOff)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level On!\n"));
+ pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0x8db, 0x60, 0x3);
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level Off!\n"));
+ pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0x8db, 0x60, 0x1);
+ }
+}
+
+VOID
+halbtc8192e1ant_AdcBackOff(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bAdcBackOff
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn AdcBackOff = %s\n",
+ (bForceExec? "force to":""), ((bAdcBackOff)? "ON":"OFF")));
+ pCoexDm->bCurAdcBackOff = bAdcBackOff;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n",
+ pCoexDm->bPreAdcBackOff, pCoexDm->bCurAdcBackOff));
+
+ if(pCoexDm->bPreAdcBackOff == pCoexDm->bCurAdcBackOff)
+ return;
+ }
+ halbtc8192e1ant_SetAdcBackOff(pBtCoexist, pCoexDm->bCurAdcBackOff);
+
+ pCoexDm->bPreAdcBackOff = pCoexDm->bCurAdcBackOff;
+}
+
+VOID
+halbtc8192e1ant_SetAgcTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bAgcTableEn
+ )
+{
+ u1Byte rssiAdjustVal=0;
+
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
+ if(bAgcTableEn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table On!\n"));
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x3fa58);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x37a58);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x2fa58);
+ rssiAdjustVal = 8;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table Off!\n"));
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x39258);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x31258);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x29258);
+ }
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
+
+ // set rssiAdjustVal for wifi module.
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, &rssiAdjustVal);
+}
+
+
+VOID
+halbtc8192e1ant_AgcTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bAgcTableEn
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s %s Agc Table\n",
+ (bForceExec? "force to":""), ((bAgcTableEn)? "Enable":"Disable")));
+ pCoexDm->bCurAgcTableEn = bAgcTableEn;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ pCoexDm->bPreAgcTableEn, pCoexDm->bCurAgcTableEn));
+
+ if(pCoexDm->bPreAgcTableEn == pCoexDm->bCurAgcTableEn)
+ return;
+ }
+ halbtc8192e1ant_SetAgcTable(pBtCoexist, bAgcTableEn);
+
+ pCoexDm->bPreAgcTableEn = pCoexDm->bCurAgcTableEn;
+}
+
+VOID
+halbtc8192e1ant_SetCoexTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u4Byte val0x6c0,
+ IN u4Byte val0x6c4,
+ IN u4Byte val0x6c8,
+ IN u1Byte val0x6cc
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c0, val0x6c0);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c4, val0x6c4);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c8, val0x6c8);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc));
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x6cc, val0x6cc);
+}
+
+VOID
+halbtc8192e1ant_CoexTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u4Byte val0x6c0,
+ IN u4Byte val0x6c4,
+ IN u4Byte val0x6c8,
+ IN u1Byte val0x6cc
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ (bForceExec? "force to":""), val0x6c0, val0x6c4, val0x6c8, val0x6cc));
+ pCoexDm->curVal0x6c0 = val0x6c0;
+ pCoexDm->curVal0x6c4 = val0x6c4;
+ pCoexDm->curVal0x6c8 = val0x6c8;
+ pCoexDm->curVal0x6cc = val0x6cc;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
+ pCoexDm->preVal0x6c0, pCoexDm->preVal0x6c4, pCoexDm->preVal0x6c8, pCoexDm->preVal0x6cc));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
+ pCoexDm->curVal0x6c0, pCoexDm->curVal0x6c4, pCoexDm->curVal0x6c8, pCoexDm->curVal0x6cc));
+
+ if( (pCoexDm->preVal0x6c0 == pCoexDm->curVal0x6c0) &&
+ (pCoexDm->preVal0x6c4 == pCoexDm->curVal0x6c4) &&
+ (pCoexDm->preVal0x6c8 == pCoexDm->curVal0x6c8) &&
+ (pCoexDm->preVal0x6cc == pCoexDm->curVal0x6cc) )
+ return;
+ }
+ halbtc8192e1ant_SetCoexTable(pBtCoexist, val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+
+ pCoexDm->preVal0x6c0 = pCoexDm->curVal0x6c0;
+ pCoexDm->preVal0x6c4 = pCoexDm->curVal0x6c4;
+ pCoexDm->preVal0x6c8 = pCoexDm->curVal0x6c8;
+ pCoexDm->preVal0x6cc = pCoexDm->curVal0x6cc;
+}
+
+VOID
+halbtc8192e1ant_CoexTableWithType(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u1Byte type
+ )
+{
+ switch(type)
+ {
+ case 0:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x55555555, 0x55555555, 0xffffff, 0x3);
+ break;
+ case 1:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x55555555, 0x5a5a5a5a, 0xffffff, 0x3);
+ break;
+ case 2:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x5a5a5a5a, 0x5a5a5a5a, 0xffffff, 0x3);
+ break;
+ case 3:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0xaaaaaaaa, 0xaaaaaaaa, 0xffffff, 0x3);
+ break;
+ case 4:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0xffffffff, 0xffffffff, 0xffffff, 0x3);
+ break;
+ case 5:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x5fff5fff, 0x5fff5fff, 0xffffff, 0x3);
+ break;
+ case 6:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x55ff55ff, 0x5a5a5a5a, 0xffffff, 0x3);
+ break;
+ case 7:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0xddffddff, 0xddffddff, 0xffffff, 0x3);
+ break;
+ case 8:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x55ff55ff, 0x5afa5afa, 0xffffff, 0x3);
+ break;
+ case 9:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x5f5f5f5f, 0x5f5f5f5f, 0xffffff, 0x3);
+ break;
+ default:
+ break;
+ }
+}
+
+VOID
+halbtc8192e1ant_SetFwIgnoreWlanAct(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bEnable
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ if(bEnable)
+ {
+ H2C_Parameter[0] |= BIT0; // function enable
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
+ H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x63, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8192e1ant_IgnoreWlanAct(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bEnable
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn Ignore WlanAct %s\n",
+ (bForceExec? "force to":""), (bEnable? "ON":"OFF")));
+ pCoexDm->bCurIgnoreWlanAct = bEnable;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+ pCoexDm->bPreIgnoreWlanAct, pCoexDm->bCurIgnoreWlanAct));
+
+ if(pCoexDm->bPreIgnoreWlanAct == pCoexDm->bCurIgnoreWlanAct)
+ return;
+ }
+ halbtc8192e1ant_SetFwIgnoreWlanAct(pBtCoexist, bEnable);
+
+ pCoexDm->bPreIgnoreWlanAct = pCoexDm->bCurIgnoreWlanAct;
+}
+
+VOID
+halbtc8192e1ant_SetFwPstdma(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte byte1,
+ IN u1Byte byte2,
+ IN u1Byte byte3,
+ IN u1Byte byte4,
+ IN u1Byte byte5
+ )
+{
+ u1Byte H2C_Parameter[5] ={0};
+
+ H2C_Parameter[0] = byte1;
+ H2C_Parameter[1] = byte2;
+ H2C_Parameter[2] = byte3;
+ H2C_Parameter[3] = byte4;
+ H2C_Parameter[4] = byte5;
+
+ pCoexDm->psTdmaPara[0] = byte1;
+ pCoexDm->psTdmaPara[1] = byte2;
+ pCoexDm->psTdmaPara[2] = byte3;
+ pCoexDm->psTdmaPara[3] = byte4;
+ pCoexDm->psTdmaPara[4] = byte5;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+ H2C_Parameter[0],
+ H2C_Parameter[1]<<24|H2C_Parameter[2]<<16|H2C_Parameter[3]<<8|H2C_Parameter[4]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x60, 5, H2C_Parameter);
+}
+
+VOID
+halbtc8192e1ant_SetLpsRpwm(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte lpsVal,
+ IN u1Byte rpwmVal
+ )
+{
+ u1Byte lps=lpsVal;
+ u1Byte rpwm=rpwmVal;
+
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_1ANT_LPS, &lps);
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_1ANT_RPWM, &rpwm);
+}
+
+VOID
+halbtc8192e1ant_LpsRpwm(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u1Byte lpsVal,
+ IN u1Byte rpwmVal
+ )
+{
+ BOOLEAN bForceExecPwrCmd=FALSE;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set lps/rpwm=0x%x/0x%x \n",
+ (bForceExec? "force to":""), lpsVal, rpwmVal));
+ pCoexDm->curLps = lpsVal;
+ pCoexDm->curRpwm = rpwmVal;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], preLps/curLps=0x%x/0x%x, preRpwm/curRpwm=0x%x/0x%x!!\n",
+ pCoexDm->preLps, pCoexDm->curLps, pCoexDm->preRpwm, pCoexDm->curRpwm));
+
+ if( (pCoexDm->preLps == pCoexDm->curLps) &&
+ (pCoexDm->preRpwm == pCoexDm->curRpwm) )
+ {
+ return;
+ }
+ }
+ halbtc8192e1ant_SetLpsRpwm(pBtCoexist, lpsVal, rpwmVal);
+
+ pCoexDm->preLps = pCoexDm->curLps;
+ pCoexDm->preRpwm = pCoexDm->curRpwm;
+}
+
+VOID
+halbtc8192e1ant_SwMechanism1(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bShrinkRxLPF,
+ IN BOOLEAN bLowPenaltyRA,
+ IN BOOLEAN limited_dig,
+ IN BOOLEAN bBTLNAConstrain
+ )
+{
+ //halbtc8192e1ant_RfShrink(pBtCoexist, NORMAL_EXEC, bShrinkRxLPF);
+ //halbtc8192e1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, bLowPenaltyRA);
+
+ //no limited DIG
+ //halbtc8192e1ant_SetBtLnaConstrain(pBtCoexist, NORMAL_EXEC, bBTLNAConstrain);
+}
+
+VOID
+halbtc8192e1ant_SwMechanism2(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bAGCTableShift,
+ IN BOOLEAN bADCBackOff,
+ IN BOOLEAN bSWDACSwing,
+ IN u4Byte dacSwingLvl
+ )
+{
+ //halbtc8192e1ant_AgcTable(pBtCoexist, NORMAL_EXEC, bAGCTableShift);
+ //halbtc8192e1ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, bADCBackOff);
+ //halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, bSWDACSwing, dacSwingLvl);
+}
+
+VOID
+halbtc8192e1ant_PsTdma(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bTurnOn,
+ IN u1Byte type
+ )
+{
+ BOOLEAN bTurnOnByCnt=FALSE;
+ u1Byte psTdmaTypeByCnt=0, rssiAdjustVal=0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (bForceExec? "force to":""), (bTurnOn? "ON":"OFF"), type));
+ pCoexDm->bCurPsTdmaOn = bTurnOn;
+ pCoexDm->curPsTdma = type;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ pCoexDm->bPrePsTdmaOn, pCoexDm->bCurPsTdmaOn));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ pCoexDm->prePsTdma, pCoexDm->curPsTdma));
+
+ if( (pCoexDm->bPrePsTdmaOn == pCoexDm->bCurPsTdmaOn) &&
+ (pCoexDm->prePsTdma == pCoexDm->curPsTdma) )
+ return;
+ }
+ if(bTurnOn)
+ {
+ switch(type)
+ {
+ default:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x2c, 0x03, 0x10, 0x50);
+ break;
+ case 1:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x2c, 0x03, 0x10, 0x50);
+ rssiAdjustVal = 11;
+ break;
+ case 2:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x25, 0x03, 0x10, 0x50);
+ rssiAdjustVal = 14;
+ break;
+ case 3:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x25, 0x3, 0x10, 0x40);
+ break;
+ case 4:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x15, 0x3, 0x14, 0x0);
+ rssiAdjustVal = 17;
+ break;
+ case 5:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x61, 0x15, 0x3, 0x31, 0x0);
+ break;
+ case 6:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0xa, 0x3, 0x0, 0x0);
+ break;
+ case 7:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0xc, 0x5, 0x0, 0x0);
+ break;
+ case 8:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x25, 0x3, 0x10, 0x0);
+ break;
+ case 9:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x1e, 0x03, 0x10, 0x50);
+ rssiAdjustVal = 18;
+ break;
+ case 10:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0xa, 0xa, 0x0, 0x40);
+ break;
+ case 11:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x12, 0x03, 0x10, 0x50);
+ rssiAdjustVal = 20;
+ break;
+ case 12:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xeb, 0xa, 0x3, 0x31, 0x18);
+ break;
+
+ case 15:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0xa, 0x3, 0x8, 0x0);
+ break;
+ case 16:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x15, 0x3, 0x10, 0x0);
+ rssiAdjustVal = 18;
+ break;
+
+ case 18:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x25, 0x3, 0x10, 0x0);
+ rssiAdjustVal = 14;
+ break;
+
+ case 20:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0x25, 0x25, 0x0, 0x0);
+ break;
+ case 21:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x20, 0x3, 0x10, 0x40);
+ break;
+ case 22:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0x8, 0x8, 0x0, 0x40);
+ break;
+ case 23:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x25, 0x3, 0x31, 0x18);
+ rssiAdjustVal = 22;
+ break;
+ case 24:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x15, 0x3, 0x31, 0x18);
+ rssiAdjustVal = 22;
+ break;
+ case 25:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0x3, 0x31, 0x18);
+ rssiAdjustVal = 22;
+ break;
+ case 26:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0x3, 0x31, 0x18);
+ rssiAdjustVal = 22;
+ break;
+ case 27:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x25, 0x3, 0x31, 0x98);
+ rssiAdjustVal = 22;
+ break;
+ case 28:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x69, 0x25, 0x3, 0x31, 0x0);
+ break;
+ case 29:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xab, 0x1a, 0x1a, 0x1, 0x10);
+ break;
+ case 30:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x15, 0x3, 0x14, 0x0);
+ break;
+ case 31:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xd3, 0x1a, 0x1a, 0, 0x58);
+ break;
+ case 32:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xab, 0xa, 0x3, 0x31, 0x90);
+ break;
+ case 33:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xa3, 0x25, 0x3, 0x30, 0x90);
+ break;
+ case 34:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xd3, 0x1a, 0x1a, 0x0, 0x10);
+ break;
+ case 35:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0x0, 0x10);
+ break;
+ case 36:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xd3, 0x12, 0x3, 0x14, 0x50);
+ break;
+ case 37:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x25, 0x3, 0x10, 0x50);
+ break;
+ case 38:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0xe1, 0x90);
+ break;
+ }
+ }
+ else
+ {
+ // disable PS tdma
+ switch(type)
+ {
+ case 8: //0x778 = 1, ant2PTA
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x8, 0x0, 0x0, 0x0, 0x0);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x4);
+ break;
+ case 0: //0x778 = 1, ant2BT
+ default:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x0, 0x0);
+ mdelay(5);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x20);
+ break;
+ case 9: //0x778 = 1, ant2WIFI
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x0, 0x0);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x4);
+ break;
+ case 10: //0x778 = 3, ant2BT
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x8, 0x0);
+ mdelay(5);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x20);
+ break;
+ }
+ }
+ rssiAdjustVal =0;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, &rssiAdjustVal);
+
+ // update pre state
+ pCoexDm->bPrePsTdmaOn = pCoexDm->bCurPsTdmaOn;
+ pCoexDm->prePsTdma = pCoexDm->curPsTdma;
+}
+
+VOID
+halbtc8192e1ant_SetSwitchSsType(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte ssType
+ )
+{
+ u1Byte mimoPs=BTC_MIMO_PS_DYNAMIC;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], REAL set SS Type = %d\n", ssType));
+
+ if(ssType == 1)
+ {
+ halbtc8192e1ant_Updatera_mask(pBtCoexist, FORCE_EXEC, BTC_RATE_DISABLE, 0xfff00000); // disable 2ss
+ halbtc8192e1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 0);
+ // switch ofdm path
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0xc04, 0x11);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0xd04, 0x1);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x90c, 0x81111111);
+ // switch cck patch
+ pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0xe77, 0x4, 0x1);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0xa07, 0x81);
+ mimoPs=BTC_MIMO_PS_STATIC;
+ }
+ else if(ssType == 2)
+ {
+ halbtc8192e1ant_Updatera_mask(pBtCoexist, FORCE_EXEC, BTC_RATE_ENABLE, 0xfff00000); // enable 2ss
+ halbtc8192e1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 8);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0xc04, 0x33);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0xd04, 0x3);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x90c, 0x81121313);
+ pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0xe77, 0x4, 0x0);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0xa07, 0x41);
+ mimoPs=BTC_MIMO_PS_DYNAMIC;
+ }
+
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_SEND_MIMO_PS, &mimoPs); // set rx 1ss or 2ss
+}
+
+VOID
+halbtc8192e1ant_SwitchSsType(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u1Byte newSsType
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], %s Switch SS Type = %d\n",
+ (bForceExec? "force to":""), newSsType));
+ pCoexDm->curSsType = newSsType;
+
+ if(!bForceExec)
+ {
+ if(pCoexDm->preSsType == pCoexDm->curSsType)
+ return;
+ }
+ halbtc8192e1ant_SetSwitchSsType(pBtCoexist, pCoexDm->curSsType);
+
+ pCoexDm->preSsType = pCoexDm->curSsType;
+}
+
+VOID
+halbtc8192e1ant_CoexAllOff(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ // sw all off
+ halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE);
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+
+ // hw all off
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+}
+
+BOOLEAN
+halbtc8192e1ant_IsCommonAction(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BOOLEAN bCommon=FALSE, bWifiConnected=FALSE, bWifiBusy=FALSE;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+
+ if(!bWifiConnected &&
+ BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"));
+ halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE);
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+ bCommon = true;
+ }
+ else if(bWifiConnected &&
+ (BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT non connected-idle!!\n"));
+ halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE);
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+ bCommon = true;
+ }
+ else if(!bWifiConnected &&
+ (BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"));
+ halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE);
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+ bCommon = true;
+ }
+ else if(bWifiConnected &&
+ (BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT connected-idle!!\n"));
+ halbtc8192e1ant_SwMechanism1(pBtCoexist,true,true,true,true);
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+ bCommon = true;
+ }
+ else if(!bWifiConnected &&
+ (BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE != pCoexDm->btStatus) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT Busy!!\n"));
+ halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE);
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+ bCommon = true;
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism1(pBtCoexist,true,true,true,true);
+
+ bCommon = FALSE;
+ }
+
+ return bCommon;
+}
+
+
+VOID
+halbtc8192e1ant_TdmaDurationAdjustForAcl(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte wifiStatus
+ )
+{
+ static s4Byte up,dn,m,n,WaitCount;
+ s4Byte result; //0: no change, +1: increase WiFi duration, -1: decrease WiFi duration
+ u1Byte retryCount=0, btInfoExt;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], TdmaDurationAdjustForAcl()\n"));
+
+ if( (BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == wifiStatus) ||
+ (BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifiStatus) ||
+ (BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == wifiStatus) )
+ {
+ if( pCoexDm->curPsTdma != 1 &&
+ pCoexDm->curPsTdma != 2 &&
+ pCoexDm->curPsTdma != 3 &&
+ pCoexDm->curPsTdma != 9 )
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+
+ up = 0;
+ dn = 0;
+ m = 1;
+ n= 3;
+ result = 0;
+ WaitCount = 0;
+ }
+ return;
+ }
+
+ if(!pCoexDm->bAutoTdmaAdjust)
+ {
+ pCoexDm->bAutoTdmaAdjust = true;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], first run TdmaDurationAdjust()!!\n"));
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ //============
+ up = 0;
+ dn = 0;
+ m = 1;
+ n= 3;
+ result = 0;
+ WaitCount = 0;
+ }
+ else
+ {
+ //accquire the BT TRx retry count from BT_Info byte2
+ retryCount = pCoexSta->btRetryCnt;
+ btInfoExt = pCoexSta->btInfoExt;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retryCount = %d\n", retryCount));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], up=%d, dn=%d, m=%d, n=%d, WaitCount=%d\n",
+ up, dn, m, n, WaitCount));
+ result = 0;
+ WaitCount++;
+
+ if(retryCount == 0) // no retry in the last 2-second duration
+ {
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if(up >= n) // if ³sÄò n ­Ó2¬í retry count¬°0, «h½Õ¼eWiFi duration
+ {
+ WaitCount = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Increase wifi duration!!\n"));
+ }
+ }
+ else if (retryCount <= 3) // <=3 retry in the last 2-second duration
+ {
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) // if ³sÄò 2 ­Ó2¬í retry count< 3, «h½Õ¯¶WiFi duration
+ {
+ if (WaitCount <= 2)
+ m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+ else
+ m = 1;
+
+ if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n"));
+ }
+ }
+ else //retry count > 3, ¥u­n1¦¸ retry count > 3, «h½Õ¯¶WiFi duration
+ {
+ if (WaitCount == 1)
+ m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+ else
+ m = 1;
+
+ if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n"));
+ }
+
+ if(result == -1)
+ {
+ if( (BT_INFO_8192E_1ANT_A2DP_BASIC_RATE(btInfoExt)) &&
+ ((pCoexDm->curPsTdma == 1) ||(pCoexDm->curPsTdma == 2)) )
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ else if(pCoexDm->curPsTdma == 1)
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ else if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ }
+ else if(result == 1)
+ {
+ if( (BT_INFO_8192E_1ANT_A2DP_BASIC_RATE(btInfoExt)) &&
+ ((pCoexDm->curPsTdma == 1) ||(pCoexDm->curPsTdma == 2)) )
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ else if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ pCoexDm->psTdmaDuAdjType = 1;
+ }
+ }
+
+ if( pCoexDm->curPsTdma != 1 &&
+ pCoexDm->curPsTdma != 2 &&
+ pCoexDm->curPsTdma != 9 &&
+ pCoexDm->curPsTdma != 11 )
+ {
+ // recover to previous adjust type
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, pCoexDm->psTdmaDuAdjType);
+ }
+ }
+}
+
+u1Byte
+halbtc8192e1ant_PsTdmaTypeByWifiRssi(
+ IN s4Byte wifiRssi,
+ IN s4Byte preWifiRssi,
+ IN u1Byte wifiRssiThresh
+ )
+{
+ u1Byte psTdmaType=0;
+
+ if(wifiRssi > preWifiRssi)
+ {
+ if(wifiRssi > (wifiRssiThresh+5))
+ {
+ psTdmaType = 26;
+ }
+ else
+ {
+ psTdmaType = 25;
+ }
+ }
+ else
+ {
+ if(wifiRssi > wifiRssiThresh)
+ {
+ psTdmaType = 26;
+ }
+ else
+ {
+ psTdmaType = 25;
+ }
+ }
+
+ return psTdmaType;
+}
+
+VOID
+halbtc8192e1ant_PsTdmaCheckForPowerSaveState(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bNewPsState
+ )
+{
+ u1Byte lpsMode=0x0;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_LPS_MODE, &lpsMode);
+
+ if(lpsMode) // already under LPS state
+ {
+ if(bNewPsState)
+ {
+ // keep state under LPS, do nothing.
+ }
+ else
+ {
+ // will leave LPS state, turn off psTdma first
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ }
+ }
+ else // NO PS state
+ {
+ if(bNewPsState)
+ {
+ // will enter LPS state, turn off psTdma first
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ }
+ else
+ {
+ // keep state under NO PS state, do nothing.
+ }
+ }
+}
+
+VOID
+halbtc8192e1ant_PowerSaveState(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte psType,
+ IN u1Byte lpsVal,
+ IN u1Byte rpwmVal
+ )
+{
+ BOOLEAN bLowPwrDisable=FALSE;
+
+ switch(psType)
+ {
+ case BTC_PS_WIFI_NATIVE:
+ // recover to original 32k low power setting
+ bLowPwrDisable = FALSE;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+ break;
+ case BTC_PS_LPS_ON:
+ halbtc8192e1ant_PsTdmaCheckForPowerSaveState(pBtCoexist, true);
+ halbtc8192e1ant_LpsRpwm(pBtCoexist, NORMAL_EXEC, lpsVal, rpwmVal);
+ // when coex force to enter LPS, do not enter 32k low power.
+ bLowPwrDisable = true;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+ // power save must executed before psTdma.
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ break;
+ case BTC_PS_LPS_OFF:
+ halbtc8192e1ant_PsTdmaCheckForPowerSaveState(pBtCoexist, FALSE);
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ break;
+ default:
+ break;
+ }
+}
+
+
+VOID
+halbtc8192e1ant_ActionWifiOnly(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);
+}
+
+VOID
+halbtc8192e1ant_MonitorBtEnableDisable(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ static BOOLEAN bPreBtDisabled=FALSE;
+ static u4Byte btDisableCnt=0;
+ BOOLEAN bBtActive=true, bBtDisabled=FALSE;
+
+ // This function check if bt is disabled
+
+ if( pCoexSta->highPriorityTx == 0 &&
+ pCoexSta->highPriorityRx == 0 &&
+ pCoexSta->lowPriorityTx == 0 &&
+ pCoexSta->lowPriorityRx == 0)
+ {
+ bBtActive = FALSE;
+ }
+ if( pCoexSta->highPriorityTx == 0xffff &&
+ pCoexSta->highPriorityRx == 0xffff &&
+ pCoexSta->lowPriorityTx == 0xffff &&
+ pCoexSta->lowPriorityRx == 0xffff)
+ {
+ bBtActive = FALSE;
+ }
+ if(bBtActive)
+ {
+ btDisableCnt = 0;
+ bBtDisabled = FALSE;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is enabled !!\n"));
+ }
+ else
+ {
+ btDisableCnt++;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], bt all counters=0, %d times!!\n",
+ btDisableCnt));
+ if(btDisableCnt >= 2)
+ {
+ bBtDisabled = true;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is disabled !!\n"));
+ halbtc8192e1ant_ActionWifiOnly(pBtCoexist);
+ }
+ }
+ if(bPreBtDisabled != bBtDisabled)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is from %s to %s!!\n",
+ (bPreBtDisabled ? "disabled":"enabled"),
+ (bBtDisabled ? "disabled":"enabled")));
+ bPreBtDisabled = bBtDisabled;
+ if(!bBtDisabled)
+ {
+ }
+ else
+ {
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+ }
+ }
+}
+
+//=============================================
+//
+// Software Coex Mechanism start
+//
+//=============================================
+
+// SCO only or SCO+PAN(HS)
+VOID
+halbtc8192e1ant_ActionSco(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState;
+ u4Byte wifiBw;
+
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+
+VOID
+halbtc8192e1ant_ActionHid(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState;
+ u4Byte wifiBw;
+
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,FALSE,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+//A2DP only / PAN(EDR) only/ A2DP+PAN(HS)
+VOID
+halbtc8192e1ant_ActionA2dp(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState;
+ u4Byte wifiBw;
+
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+VOID
+halbtc8192e1ant_ActionA2dpPanHs(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, btInfoExt;
+ u4Byte wifiBw;
+
+ btInfoExt = pCoexSta->btInfoExt;
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+VOID
+halbtc8192e1ant_ActionPanEdr(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState;
+ u4Byte wifiBw;
+
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+
+//PAN(HS) only
+VOID
+halbtc8192e1ant_ActionPanHs(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState;
+ u4Byte wifiBw;
+
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+//PAN(EDR)+A2DP
+VOID
+halbtc8192e1ant_ActionPanEdrA2dp(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, btInfoExt;
+ u4Byte wifiBw;
+
+ btInfoExt = pCoexSta->btInfoExt;
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+VOID
+halbtc8192e1ant_ActionPanEdrHid(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState;
+ u4Byte wifiBw;
+
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+// HID+A2DP+PAN(EDR)
+VOID
+halbtc8192e1ant_ActionHidA2dpPanEdr(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, btInfoExt;
+ u4Byte wifiBw;
+
+ btInfoExt = pCoexSta->btInfoExt;
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+VOID
+halbtc8192e1ant_ActionHidA2dp(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, btInfoExt;
+ u4Byte wifiBw;
+
+ btInfoExt = pCoexSta->btInfoExt;
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+//=============================================
+//
+// Non-Software Coex Mechanism start
+//
+//=============================================
+VOID
+halbtc8192e1ant_ActionBtInquiry(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+ BOOLEAN bWifiConnected=FALSE, bBtHsOn=FALSE;
+
+ // Note:
+ // Do not do DacSwing here, use original setting.
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ if(bBtHsOn)
+ return;
+
+ if(!bWifiConnected)
+ {
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ }
+ else if( (pBtLinkInfo->bScoExist) ||
+ (pBtLinkInfo->bHidOnly) )
+ {
+ // SCO/HID-only busy
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 32);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 1);
+ }
+ else
+ {
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_LPS_ON, 0x50, 0x0);
+
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 30);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ }
+}
+
+VOID
+halbtc8192e1ant_ActionBtScoHidOnlyBusy(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte wifiStatus
+ )
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+ u1Byte btRssiState=BTC_RSSI_STATE_HIGH;
+
+ if(BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == wifiStatus)
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ }
+ else
+ {
+ if(pBtLinkInfo->bHidOnly)
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ }
+ else
+ {
+ // dec bt power for diff level
+ btRssiState = halbtc8192e1ant_BtRssiState(3, 34, 42);
+ if( (btRssiState == BTC_RSSI_STATE_LOW) ||
+ (btRssiState == BTC_RSSI_STATE_STAY_LOW) )
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ }
+ else if( (btRssiState == BTC_RSSI_STATE_MEDIUM) ||
+ (btRssiState == BTC_RSSI_STATE_STAY_MEDIUM) )
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2);
+ }
+ else if( (btRssiState == BTC_RSSI_STATE_HIGH) ||
+ (btRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 6);
+ }
+
+ // sw dacSwing
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, true, 0xc);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7);
+ }
+ }
+}
+
+VOID
+halbtc8192e1ant_ActionHs(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action for HS!!!\n"));
+
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ if(BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus)
+ {
+ // error, should not be here
+ pCoexDm->errorCondition = 1;
+ }
+ else if(BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, true, 6);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 10);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ }
+ else if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus &&
+ !pBtCoexist->bt_link_info.bHidOnly)
+ {
+ if(pCoexDm->curSsType == 1)
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, true, 6);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 10);
+ //halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 38);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ }
+ }
+ else
+ {
+ halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_BUSY);
+ }
+}
+
+VOID
+halbtc8192e1ant_ActionWifiConnectedBtAclBusy(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte wifiStatus
+ )
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+
+ if(pBtLinkInfo->bHidOnly)
+ {
+ halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist, wifiStatus);
+ pCoexDm->bAutoTdmaAdjust = FALSE;
+ return;
+ }
+
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+ if( (pBtLinkInfo->bA2dpOnly) ||
+ (pBtLinkInfo->bHidExist&&pBtLinkInfo->bA2dpExist) )
+ {
+ halbtc8192e1ant_TdmaDurationAdjustForAcl(pBtCoexist, wifiStatus);
+ }
+ else if( (pBtLinkInfo->bPanOnly) ||
+ (pBtLinkInfo->bHidExist&&pBtLinkInfo->bPanExist) )
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->bAutoTdmaAdjust = FALSE;
+ }
+ else
+ {
+ if( (BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == wifiStatus) ||
+ (BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifiStatus) ||
+ (BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == wifiStatus) )
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ else
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->bAutoTdmaAdjust = FALSE;
+ }
+
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 1);
+}
+
+
+VOID
+halbtc8192e1ant_ActionWifiNotConnected(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ // power save state
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+}
+
+VOID
+halbtc8192e1ant_ActionWifiNotConnectedAssoAuthScan(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+}
+
+VOID
+halbtc8192e1ant_ActionWifiConnectedScan(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ // power save state
+ if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus && !pBtCoexist->bt_link_info.bHidOnly)
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_LPS_ON, 0x50, 0x0);
+ else
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus)
+ {
+ halbtc8192e1ant_ActionWifiConnectedBtAclBusy(pBtCoexist,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN);
+ }
+ else if( (BT_8192E_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+ (BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus) )
+ {
+ halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN);
+ }
+ else
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ }
+}
+
+
+VOID
+halbtc8192e1ant_ActionWifiConnectedSpecialPacket(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ // power save state
+ if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus && !pBtCoexist->bt_link_info.bHidOnly)
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_LPS_ON, 0x50, 0x0);
+ else
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus)
+ {
+ halbtc8192e1ant_ActionWifiConnectedBtAclBusy(pBtCoexist,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT);
+ }
+ else
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ }
+}
+
+VOID
+halbtc8192e1ant_ActionWifiConnected(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BOOLEAN bWifiConnected=FALSE, bWifiBusy=FALSE;
+ BOOLEAN bScan=FALSE, bLink=FALSE, bRoam=FALSE;
+ BOOLEAN bUnder4way=FALSE;
+ u4Byte wifiBw;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect()===>\n"));
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ if(!bWifiConnected)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi not connected<===\n"));
+ return;
+ }
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &bUnder4way);
+ if(bUnder4way)
+ {
+ halbtc8192e1ant_ActionWifiConnectedSpecialPacket(pBtCoexist);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n"));
+ return;
+ }
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+ if(bScan || bLink || bRoam)
+ {
+ halbtc8192e1ant_ActionWifiConnectedScan(pBtCoexist);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"));
+ return;
+ }
+
+ // power save state
+ if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus && !pBtCoexist->bt_link_info.bHidOnly)
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_LPS_ON, 0x50, 0x0);
+ else
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+ if(!bWifiBusy)
+ {
+ if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus)
+ {
+ halbtc8192e1ant_ActionWifiConnectedBtAclBusy(pBtCoexist,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_IDLE);
+ }
+ else if( (BT_8192E_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+ (BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus) )
+ {
+ halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_IDLE);
+ }
+ else
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ }
+ }
+ else
+ {
+ if(BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus)
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ }
+ else if(BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ }
+ else if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus)
+ {
+ halbtc8192e1ant_ActionWifiConnectedBtAclBusy(pBtCoexist,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_BUSY);
+ }
+ else if( (BT_8192E_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+ (BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus) )
+ {
+ halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_BUSY);
+ }
+ else
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ }
+ }
+}
+
+VOID
+halbtc8192e1ant_RunSwCoexistMechanism(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BOOLEAN bWifiUnder5G=FALSE, bWifiBusy=FALSE, bWifiConnected=FALSE;
+ u1Byte btInfoOriginal=0, btRetryCnt=0;
+ u1Byte algorithm=0;
+
+ return;
+
+ algorithm = halbtc8192e1ant_ActionAlgorithm(pBtCoexist);
+ pCoexDm->curAlgorithm = algorithm;
+
+ if(halbtc8192e1ant_IsCommonAction(pBtCoexist))
+ {
+ }
+ else
+ {
+ switch(pCoexDm->curAlgorithm)
+ {
+ case BT_8192E_1ANT_COEX_ALGO_SCO:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = SCO.\n"));
+ halbtc8192e1ant_ActionSco(pBtCoexist);
+ break;
+ case BT_8192E_1ANT_COEX_ALGO_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID.\n"));
+ halbtc8192e1ant_ActionHid(pBtCoexist);
+ break;
+ case BT_8192E_1ANT_COEX_ALGO_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP.\n"));
+ halbtc8192e1ant_ActionA2dp(pBtCoexist);
+ break;
+ case BT_8192E_1ANT_COEX_ALGO_A2DP_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP+PAN(HS).\n"));
+ halbtc8192e1ant_ActionA2dpPanHs(pBtCoexist);
+ break;
+ case BT_8192E_1ANT_COEX_ALGO_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR).\n"));
+ halbtc8192e1ant_ActionPanEdr(pBtCoexist);
+ break;
+ case BT_8192E_1ANT_COEX_ALGO_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HS mode.\n"));
+ halbtc8192e1ant_ActionPanHs(pBtCoexist);
+ break;
+ case BT_8192E_1ANT_COEX_ALGO_PANEDR_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN+A2DP.\n"));
+ halbtc8192e1ant_ActionPanEdrA2dp(pBtCoexist);
+ break;
+ case BT_8192E_1ANT_COEX_ALGO_PANEDR_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR)+HID.\n"));
+ halbtc8192e1ant_ActionPanEdrHid(pBtCoexist);
+ break;
+ case BT_8192E_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP+PAN.\n"));
+ halbtc8192e1ant_ActionHidA2dpPanEdr(pBtCoexist);
+ break;
+ case BT_8192E_1ANT_COEX_ALGO_HID_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP.\n"));
+ halbtc8192e1ant_ActionHidA2dp(pBtCoexist);
+ break;
+ default:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = coexist All Off!!\n"));
+ halbtc8192e1ant_CoexAllOff(pBtCoexist);
+ break;
+ }
+ pCoexDm->preAlgorithm = pCoexDm->curAlgorithm;
+ }
+}
+
+VOID
+halbtc8192e1ant_RunCoexistMechanism(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+ BOOLEAN bWifiConnected=FALSE, bBtHsOn=FALSE;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism()===>\n"));
+
+ if(pBtCoexist->manual_control)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"));
+ return;
+ }
+
+ if(pBtCoexist->bStopCoexDm)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"));
+ return;
+ }
+
+ if(pCoexSta->bUnderIps)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is under IPS !!!\n"));
+ return;
+ }
+
+ halbtc8192e1ant_RunSwCoexistMechanism(pBtCoexist);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ if(pCoexSta->bC2hBtInquiryPage)
+ {
+ halbtc8192e1ant_ActionBtInquiry(pBtCoexist);
+ return;
+ }
+
+ // 1ss or 2ss
+ if(pBtLinkInfo->bScoExist)
+ {
+ halbtc8192e1ant_SwitchSsType(pBtCoexist, NORMAL_EXEC, 1);
+ }
+ else if(bBtHsOn)
+ {
+ if(pBtLinkInfo->bHidOnly)
+ halbtc8192e1ant_SwitchSsType(pBtCoexist, NORMAL_EXEC, 2);
+ else
+ halbtc8192e1ant_SwitchSsType(pBtCoexist, NORMAL_EXEC, 1);
+ }
+ else
+ halbtc8192e1ant_SwitchSsType(pBtCoexist, NORMAL_EXEC, 2);
+
+ if(bBtHsOn)
+ {
+ halbtc8192e1ant_ActionHs(pBtCoexist);
+ return;
+ }
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ if(!bWifiConnected)
+ {
+ BOOLEAN bScan=FALSE, bLink=FALSE, bRoam=FALSE;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is non connected-idle !!!\n"));
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+
+ if(bScan || bLink || bRoam)
+ halbtc8192e1ant_ActionWifiNotConnectedAssoAuthScan(pBtCoexist);
+ else
+ halbtc8192e1ant_ActionWifiNotConnected(pBtCoexist);
+ }
+ else
+ {
+ halbtc8192e1ant_ActionWifiConnected(pBtCoexist);
+ }
+}
+
+VOID
+halbtc8192e1ant_InitCoexDm(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ // force to reset coex mechanism
+ halbtc8192e1ant_FwDacSwingLvl(pBtCoexist, FORCE_EXEC, 6);
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, FORCE_EXEC, 0);
+
+ // sw all off
+ halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE);
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+ halbtc8192e1ant_SwitchSsType(pBtCoexist, FORCE_EXEC, 2);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, FORCE_EXEC, 0);
+}
+
+//============================================================
+// work around function start with wa_halbtc8192e1ant_
+//============================================================
+//============================================================
+// extern function start with EXhalbtc8192e1ant_
+//============================================================
+VOID
+EXhalbtc8192e1ant_InitHwConfig(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u4Byte u4Tmp=0;
+ u16 u2Tmp=0;
+ u1Byte u1Tmp=0;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], 1Ant Init HW Config!!\n"));
+
+ // backup rf 0x1e value
+ pCoexDm->btRf0x1eBackup =
+ pBtCoexist->btc_get_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff);
+
+ // antenna sw ctrl to bt
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x4f, 0x6);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x944, 0x24);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x930, 0x700700);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x20);
+ if(pBtCoexist->chipInterface == BTC_INTF_USB)
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x64, 0x30430004);
+ else
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x64, 0x30030004);
+
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, FORCE_EXEC, 0);
+
+ // antenna switch control parameter
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x858, 0x55555555);
+
+ // coex parameters
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x778, 0x1);
+ // 0x790[5:0]=0x5
+ u1Tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x790);
+ u1Tmp &= 0xc0;
+ u1Tmp |= 0x5;
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x790, u1Tmp);
+
+ // enable counter statistics
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0x4);
+
+ // enable PTA
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x40, 0x20);
+ // enable mailbox interface
+ u2Tmp = pBtCoexist->btc_read_2byte(pBtCoexist, 0x40);
+ u2Tmp |= BIT9;
+ pBtCoexist->btc_write_2byte(pBtCoexist, 0x40, u2Tmp);
+
+ // enable PTA I2C mailbox
+ u1Tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x101);
+ u1Tmp |= BIT4;
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x101, u1Tmp);
+
+ // enable bt clock when wifi is disabled.
+ u1Tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x93);
+ u1Tmp |= BIT0;
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x93, u1Tmp);
+ // enable bt clock when suspend.
+ u1Tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x7);
+ u1Tmp |= BIT0;
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x7, u1Tmp);
+}
+
+VOID
+EXhalbtc8192e1ant_InitCoexDm(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Coex Mechanism Init!!\n"));
+
+ pBtCoexist->bStopCoexDm = FALSE;
+
+ halbtc8192e1ant_InitCoexDm(pBtCoexist);
+}
+
+VOID
+EXhalbtc8192e1ant_DisplayCoexInfo(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ struct btc_board_info * pBoardInfo=&pBtCoexist->board_info;
+ PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info;
+ PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+ pu1Byte cliBuf=pBtCoexist->cli_buf;
+ u1Byte u1Tmp[4], i, btInfoExt, psTdmaCase=0;
+ u4Byte u4Tmp[4];
+ BOOLEAN bRoam=FALSE, bScan=FALSE, bLink=FALSE, bWifiUnder5G=FALSE;
+ BOOLEAN bBtHsOn=FALSE, bWifiBusy=FALSE;
+ s4Byte wifiRssi=0, btHsRssi=0;
+ u4Byte wifiBw, wifiTrafficDir;
+ u1Byte wifiDot11Chnl, wifiHsChnl;
+ u4Byte fwVer=0, btPatchVer=0;
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============");
+ CL_PRINTF(cliBuf);
+
+ if(pBtCoexist->manual_control)
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[Under Manual Control]============");
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ==========================================");
+ CL_PRINTF(cliBuf);
+ }
+ if(pBtCoexist->bStopCoexDm)
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[Coex is STOPPED]============");
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ==========================================");
+ CL_PRINTF(cliBuf);
+ }
+
+ if(!pBoardInfo->bt_exist)
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+ CL_PRINTF(cliBuf);
+ return;
+ }
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", \
+ pBoardInfo->pg_ant_num, pBoardInfo->btdm_ant_num);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \
+ ((pStackInfo->bProfileNotified)? "Yes":"No"), pStackInfo->hciVersion);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_BT_PATCH_VER, &btPatchVer);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_FW_VER, &fwVer);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)", "CoexVer/ FwVer/ PatchVer", \
+ GLCoexVerDate8192e1Ant, GLCoexVer8192e1Ant, fwVer, btPatchVer, btPatchVer);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifiDot11Chnl);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifiHsChnl);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \
+ wifiDot11Chnl, wifiHsChnl, bBtHsOn);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \
+ pCoexDm->wifiChnlInfo[0], pCoexDm->wifiChnlInfo[1],
+ pCoexDm->wifiChnlInfo[2]);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_HS_RSSI, &btHsRssi);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", \
+ wifiRssi, btHsRssi);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", "Wifi bLink/ bRoam/ bScan", \
+ bLink, bRoam, bScan);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_UNDER_5G, &bWifiUnder5G);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", "Wifi status", \
+ (bWifiUnder5G? "5G":"2.4G"),
+ ((BTC_WIFI_BW_LEGACY==wifiBw)? "Legacy": (((BTC_WIFI_BW_HT40==wifiBw)? "HT40":"HT20"))),
+ ((!bWifiBusy)? "idle": ((BTC_WIFI_TRAFFIC_TX==wifiTrafficDir)? "uplink":"downlink")));
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", \
+ ((pBtCoexist->btInfo.bBtDisabled)? ("disabled"): ((pCoexSta->bC2hBtInquiryPage)?("inquiry/page scan"):((BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus)? "non-connected idle":
+ ( (BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)? "connected-idle":"busy")))),
+ pCoexSta->btRssi, pCoexSta->btRetryCnt);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \
+ pBtLinkInfo->bScoExist, pBtLinkInfo->bHidExist, pBtLinkInfo->bPanExist, pBtLinkInfo->bA2dpExist);
+ CL_PRINTF(cliBuf);
+ pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_BT_LINK_INFO);
+
+ btInfoExt = pCoexSta->btInfoExt;
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Info A2DP rate", \
+ (btInfoExt&BIT0)? "Basic rate":"EDR rate");
+ CL_PRINTF(cliBuf);
+
+ for(i=0; i<BT_INFO_SRC_8192E_1ANT_MAX; i++)
+ {
+ if(pCoexSta->btInfoC2hCnt[i])
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8192e1Ant[i], \
+ pCoexSta->btInfoC2h[i][0], pCoexSta->btInfoC2h[i][1],
+ pCoexSta->btInfoC2h[i][2], pCoexSta->btInfoC2h[i][3],
+ pCoexSta->btInfoC2h[i][4], pCoexSta->btInfoC2h[i][5],
+ pCoexSta->btInfoC2h[i][6], pCoexSta->btInfoC2hCnt[i]);
+ CL_PRINTF(cliBuf);
+ }
+ }
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s, (0x%x/0x%x)", "PS state, IPS/LPS, (lps/rpwm)", \
+ ((pCoexSta->bUnderIps? "IPS ON":"IPS OFF")),
+ ((pCoexSta->bUnderLps? "LPS ON":"LPS OFF")),
+ pBtCoexist->btInfo.lps1Ant,
+ pBtCoexist->btInfo.rpwm_1ant);
+ CL_PRINTF(cliBuf);
+ pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "SS Type", \
+ pCoexDm->curSsType);
+ CL_PRINTF(cliBuf);
+
+ if(!pBtCoexist->manual_control)
+ {
+ // Sw mechanism
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Sw mechanism]============");
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d/ %d ", "SM1[ShRf/ LpRA/ LimDig/ btLna]", \
+ pCoexDm->bCurRfRxLpfShrink, pCoexDm->bCurLowPenaltyRa, pCoexDm->limited_dig, pCoexDm->bCurBtLnaConstrain);
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ", "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", \
+ pCoexDm->bCurAgcTableEn, pCoexDm->bCurAdcBackOff, pCoexDm->bCurDacSwingOn, pCoexDm->curDacSwingLvl);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/ %s/ %d ", "DelBA/ BtCtrlAgg/ AggSize", \
+ (pBtCoexist->btInfo.reject_agg_pkt? "Yes":"No"), (pBtCoexist->btInfo.b_bt_ctrl_agg_buf_size? "Yes":"No"),
+ pBtCoexist->btInfo.aggBufSize);
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "Rate Mask", \
+ pBtCoexist->btInfo.ra_mask);
+ CL_PRINTF(cliBuf);
+
+ // Fw mechanism
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw mechanism]============");
+ CL_PRINTF(cliBuf);
+
+ psTdmaCase = pCoexDm->curPsTdma;
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", "PS TDMA", \
+ pCoexDm->psTdmaPara[0], pCoexDm->psTdmaPara[1],
+ pCoexDm->psTdmaPara[2], pCoexDm->psTdmaPara[3],
+ pCoexDm->psTdmaPara[4], psTdmaCase, pCoexDm->bAutoTdmaAdjust);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "Latest error condition(should be 0)", \
+ pCoexDm->errorCondition);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "DecBtPwrLvl/ IgnWlanAct", \
+ pCoexDm->curBtDecPwrLvl, pCoexDm->bCurIgnoreWlanAct);
+ CL_PRINTF(cliBuf);
+ }
+
+ // Hw setting
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============");
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", \
+ pCoexDm->btRf0x1eBackup);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xc04);
+ u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xd04);
+ u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x90c);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0xc04/ 0xd04/ 0x90c", \
+ u4Tmp[0], u4Tmp[1], u4Tmp[2]);
+ CL_PRINTF(cliBuf);
+
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x778);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x778", \
+ u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x92c);
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x930);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x92c/ 0x930", \
+ (u1Tmp[0]), u4Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x40);
+ u1Tmp[1] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x4f);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x40/ 0x4f", \
+ u1Tmp[0], u1Tmp[1]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x550);
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x522);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \
+ u4Tmp[0], u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xc50);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", \
+ u4Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c0);
+ u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c4);
+ u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c8);
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x6cc);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \
+ u4Tmp[0], u4Tmp[1], u4Tmp[2], u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x770(hp rx[31:16]/tx[15:0])", \
+ pCoexSta->highPriorityRx, pCoexSta->highPriorityTx);
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x774(lp rx[31:16]/tx[15:0])", \
+ pCoexSta->lowPriorityRx, pCoexSta->lowPriorityTx);
+ CL_PRINTF(cliBuf);
+#if(BT_AUTO_REPORT_ONLY_8192E_1ANT == 1)
+ halbtc8192e1ant_MonitorBtCtr(pBtCoexist);
+#endif
+
+ pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+VOID
+EXhalbtc8192e1ant_IpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ u4Byte u4Tmp=0;
+
+ if(pBtCoexist->manual_control || pBtCoexist->bStopCoexDm)
+ return;
+
+ if(BTC_IPS_ENTER == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n"));
+ pCoexSta->bUnderIps = true;
+ halbtc8192e1ant_CoexAllOff(pBtCoexist);
+ }
+ else if(BTC_IPS_LEAVE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n"));
+ pCoexSta->bUnderIps = FALSE;
+ }
+}
+
+VOID
+EXhalbtc8192e1ant_LpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(pBtCoexist->manual_control || pBtCoexist->bStopCoexDm)
+ return;
+
+ if(BTC_LPS_ENABLE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n"));
+ pCoexSta->bUnderLps = true;
+ }
+ else if(BTC_LPS_DISABLE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n"));
+ pCoexSta->bUnderLps = FALSE;
+ }
+}
+
+VOID
+EXhalbtc8192e1ant_ScanNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ BOOLEAN bWifiConnected=FALSE, bBtHsOn=FALSE;
+
+ if(pBtCoexist->manual_control ||
+ pBtCoexist->bStopCoexDm ||
+ pBtCoexist->btInfo.bBtDisabled )
+ return;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ if(pCoexSta->bC2hBtInquiryPage)
+ {
+ halbtc8192e1ant_ActionBtInquiry(pBtCoexist);
+ return;
+ }
+ else if(bBtHsOn)
+ {
+ halbtc8192e1ant_ActionHs(pBtCoexist);
+ return;
+ }
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ if(BTC_SCAN_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n"));
+ if(!bWifiConnected) // non-connected scan
+ {
+ halbtc8192e1ant_ActionWifiNotConnectedAssoAuthScan(pBtCoexist);
+ }
+ else // wifi is connected
+ {
+ halbtc8192e1ant_ActionWifiConnectedScan(pBtCoexist);
+ }
+ }
+ else if(BTC_SCAN_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n"));
+ if(!bWifiConnected) // non-connected scan
+ {
+ halbtc8192e1ant_ActionWifiNotConnected(pBtCoexist);
+ }
+ else
+ {
+ halbtc8192e1ant_ActionWifiConnected(pBtCoexist);
+ }
+ }
+}
+
+VOID
+EXhalbtc8192e1ant_ConnectNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ BOOLEAN bWifiConnected=FALSE, bBtHsOn=FALSE;
+
+ if(pBtCoexist->manual_control ||
+ pBtCoexist->bStopCoexDm ||
+ pBtCoexist->btInfo.bBtDisabled )
+ return;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ if(pCoexSta->bC2hBtInquiryPage)
+ {
+ halbtc8192e1ant_ActionBtInquiry(pBtCoexist);
+ return;
+ }
+ else if(bBtHsOn)
+ {
+ halbtc8192e1ant_ActionHs(pBtCoexist);
+ return;
+ }
+
+ if(BTC_ASSOCIATE_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n"));
+ halbtc8192e1ant_ActionWifiNotConnectedAssoAuthScan(pBtCoexist);
+ }
+ else if(BTC_ASSOCIATE_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n"));
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ if(!bWifiConnected) // non-connected scan
+ {
+ halbtc8192e1ant_ActionWifiNotConnected(pBtCoexist);
+ }
+ else
+ {
+ halbtc8192e1ant_ActionWifiConnected(pBtCoexist);
+ }
+ }
+}
+
+VOID
+EXhalbtc8192e1ant_MediaStatusNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ u1Byte H2C_Parameter[3] ={0};
+ u4Byte wifiBw;
+ u1Byte wifiCentralChnl;
+
+ if(pBtCoexist->manual_control ||
+ pBtCoexist->bStopCoexDm ||
+ pBtCoexist->btInfo.bBtDisabled )
+ return;
+
+ if(BTC_MEDIA_CONNECT == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA connect notify\n"));
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA disconnect notify\n"));
+ }
+
+ // only 2.4G we need to inform bt the chnl mask
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifiCentralChnl);
+ if( (BTC_MEDIA_CONNECT == type) &&
+ (wifiCentralChnl <= 14) )
+ {
+ H2C_Parameter[0] = 0x1;
+ H2C_Parameter[1] = wifiCentralChnl;
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ H2C_Parameter[2] = 0x30;
+ else
+ H2C_Parameter[2] = 0x20;
+ }
+
+ pCoexDm->wifiChnlInfo[0] = H2C_Parameter[0];
+ pCoexDm->wifiChnlInfo[1] = H2C_Parameter[1];
+ pCoexDm->wifiChnlInfo[2] = H2C_Parameter[2];
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x66=0x%x\n",
+ H2C_Parameter[0]<<16|H2C_Parameter[1]<<8|H2C_Parameter[2]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x66, 3, H2C_Parameter);
+}
+
+VOID
+EXhalbtc8192e1ant_SpecialPacketNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ BOOLEAN bBtHsOn=FALSE;
+
+ if(pBtCoexist->manual_control ||
+ pBtCoexist->bStopCoexDm ||
+ pBtCoexist->btInfo.bBtDisabled )
+ return;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ if(pCoexSta->bC2hBtInquiryPage)
+ {
+ halbtc8192e1ant_ActionBtInquiry(pBtCoexist);
+ return;
+ }
+ else if(bBtHsOn)
+ {
+ halbtc8192e1ant_ActionHs(pBtCoexist);
+ return;
+ }
+
+ if( BTC_PACKET_DHCP == type ||
+ BTC_PACKET_EAPOL == type )
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], special Packet(%d) notify\n", type));
+ halbtc8192e1ant_ActionWifiConnectedSpecialPacket(pBtCoexist);
+ }
+}
+
+VOID
+EXhalbtc8192e1ant_BtInfoNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN pu1Byte tmpBuf,
+ IN u1Byte length
+ )
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+ u1Byte btInfo=0;
+ u1Byte i, rspSource=0;
+ static u4Byte setBtPsdMode=0;
+ BOOLEAN bBtBusy=FALSE, limited_dig=FALSE;
+ BOOLEAN bWifiConnected=FALSE;
+ BOOLEAN b_bt_ctrl_agg_buf_size=FALSE;
+
+ pCoexSta->bC2hBtInfoReqSent = FALSE;
+
+ rspSource = tmpBuf[0]&0xf;
+ if(rspSource >= BT_INFO_SRC_8192E_1ANT_MAX)
+ rspSource = BT_INFO_SRC_8192E_1ANT_WIFI_FW;
+ pCoexSta->btInfoC2hCnt[rspSource]++;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Bt info[%d], length=%d, hex data=[", rspSource, length));
+ for(i=0; i<length; i++)
+ {
+ pCoexSta->btInfoC2h[rspSource][i] = tmpBuf[i];
+ if(i == 1)
+ btInfo = tmpBuf[i];
+ if(i == length-1)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x]\n", tmpBuf[i]));
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x, ", tmpBuf[i]));
+ }
+ }
+
+ if(pBtCoexist->btInfo.bBtDisabled)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for BT is disabled <===\n"));
+ return;
+ }
+
+ if(pBtCoexist->manual_control)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n"));
+ return;
+ }
+ if(pBtCoexist->bStopCoexDm)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for Coex STOPPED!!<===\n"));
+ return;
+ }
+
+ if(BT_INFO_SRC_8192E_1ANT_WIFI_FW != rspSource)
+ {
+ pCoexSta->btRetryCnt = // [3:0]
+ pCoexSta->btInfoC2h[rspSource][2]&0xf;
+
+ pCoexSta->btRssi =
+ pCoexSta->btInfoC2h[rspSource][3]*2+10;
+
+ pCoexSta->btInfoExt =
+ pCoexSta->btInfoC2h[rspSource][4];
+
+ // Here we need to resend some wifi info to BT
+ // because bt is reset and loss of the info.
+ if( (pCoexSta->btInfoExt & BIT1) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"));
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ if(bWifiConnected)
+ {
+ EXhalbtc8192e1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_CONNECT);
+ }
+ else
+ {
+ EXhalbtc8192e1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT);
+ }
+
+ setBtPsdMode = 0;
+ }
+
+ // test-chip bt patch only rsp the status for BT_RSP,
+ // so temporary we consider the following only under BT_RSP
+ if(BT_INFO_SRC_8192E_1ANT_BT_RSP == rspSource)
+ {
+ if( (pCoexSta->btInfoExt & BIT3) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n"));
+ halbtc8192e1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE);
+ }
+ else
+ {
+ // BT already NOT ignore Wlan active, do nothing here.
+ }
+#if(BT_AUTO_REPORT_ONLY_8192E_1ANT == 0)
+ if( (pCoexSta->btInfoExt & BIT4) )
+ {
+ // BT auto report already enabled, do nothing
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n"));
+ halbtc8192e1ant_BtAutoReport(pBtCoexist, FORCE_EXEC, true);
+ }
+#endif
+ }
+ }
+
+ // check BIT2 first ==> check if bt is under inquiry or page scan
+ if(btInfo & BT_INFO_8192E_1ANT_B_INQ_PAGE)
+ pCoexSta->bC2hBtInquiryPage = true;
+ else
+ pCoexSta->bC2hBtInquiryPage = FALSE;
+
+ // set link exist status
+ if(!(btInfo&BT_INFO_8192E_1ANT_B_CONNECTION))
+ {
+ pCoexSta->bBtLinkExist = FALSE;
+ pCoexSta->bPanExist = FALSE;
+ pCoexSta->bA2dpExist = FALSE;
+ pCoexSta->bHidExist = FALSE;
+ pCoexSta->bScoExist = FALSE;
+ }
+ else // connection exists
+ {
+ pCoexSta->bBtLinkExist = true;
+ if(btInfo & BT_INFO_8192E_1ANT_B_FTP)
+ pCoexSta->bPanExist = true;
+ else
+ pCoexSta->bPanExist = FALSE;
+ if(btInfo & BT_INFO_8192E_1ANT_B_A2DP)
+ pCoexSta->bA2dpExist = true;
+ else
+ pCoexSta->bA2dpExist = FALSE;
+ if(btInfo & BT_INFO_8192E_1ANT_B_HID)
+ pCoexSta->bHidExist = true;
+ else
+ pCoexSta->bHidExist = FALSE;
+ if(btInfo & BT_INFO_8192E_1ANT_B_SCO_ESCO)
+ pCoexSta->bScoExist = true;
+ else
+ pCoexSta->bScoExist = FALSE;
+ }
+
+ halbtc8192e1ant_UpdateBtLinkInfo(pBtCoexist);
+
+ if(!(btInfo&BT_INFO_8192E_1ANT_B_CONNECTION))
+ {
+ pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt non-connected idle!!!\n"));
+ }
+ else if(btInfo == BT_INFO_8192E_1ANT_B_CONNECTION) // connection exists but no busy
+ {
+ pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt connected-idle!!!\n"));
+ }
+ else if((btInfo&BT_INFO_8192E_1ANT_B_SCO_ESCO) ||
+ (btInfo&BT_INFO_8192E_1ANT_B_SCO_BUSY))
+ {
+ pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_SCO_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt sco busy!!!\n"));
+ }
+ else if( (btInfo&BT_INFO_8192E_1ANT_B_ACL_BUSY) ||
+ (btInfo&BT_INFO_8192E_1ANT_B_A2DP) ||
+ (btInfo&BT_INFO_8192E_1ANT_B_FTP) )
+ {
+ if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY != pCoexDm->btStatus)
+ pCoexDm->bAutoTdmaAdjust = FALSE;
+ pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_ACL_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt acl busy!!!\n"));
+ }
+ else
+ {
+ pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_MAX;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt non-defined state!!!\n"));
+ }
+
+ // ra mask check
+ if(pBtLinkInfo->bScoExist || pBtLinkInfo->bHidExist)
+ {
+ halbtc8192e1ant_Updatera_mask(pBtCoexist, NORMAL_EXEC, BTC_RATE_DISABLE, 0x00000003); // disable tx cck 1M/2M
+ }
+ else
+ {
+ halbtc8192e1ant_Updatera_mask(pBtCoexist, NORMAL_EXEC, BTC_RATE_ENABLE, 0x00000003); // enable tx cck 1M/2M
+ }
+
+ if( (BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) ||
+ (BT_8192E_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+ (BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus) )
+ {
+ bBtBusy = true;
+ limited_dig = true;
+ if(pBtLinkInfo->bHidExist)
+ b_bt_ctrl_agg_buf_size = true;
+ }
+ else
+ {
+ bBtBusy = FALSE;
+ limited_dig = FALSE;
+ }
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bBtBusy);
+
+ //============================================
+ // Aggregation related setting
+ //============================================
+ // if sco, reject AddBA
+ //pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT, &bRejApAggPkt);
+
+ // decide BT control aggregation buf size or not
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE, &b_bt_ctrl_agg_buf_size);
+ // real update aggregation setting
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
+ //============================================
+
+ pCoexDm->limited_dig = limited_dig;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+
+ halbtc8192e1ant_RunCoexistMechanism(pBtCoexist);
+}
+
+VOID
+EXhalbtc8192e1ant_StackOperationNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair start notify\n"));
+ }
+ else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair finish notify\n"));
+ }
+}
+
+VOID
+EXhalbtc8192e1ant_HaltNotify(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Halt notify\n"));
+
+ pBtCoexist->bStopCoexDm = true;
+ halbtc8192e1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true);
+
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 0);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x4f, 0xf);
+
+ EXhalbtc8192e1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT);
+}
+
+VOID
+EXhalbtc8192e1ant_PnpNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte pnpState
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify\n"));
+
+ if(BTC_WIFI_PNP_SLEEP == pnpState)
+ {
+ pBtCoexist->bStopCoexDm = true;
+ halbtc8192e1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true);
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);
+ }
+ else if(BTC_WIFI_PNP_WAKE_UP == pnpState)
+ {
+
+ }
+}
+
+VOID
+EXhalbtc8192e1ant_Periodical(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ static u1Byte disVerInfoCnt=0;
+ u4Byte fwVer=0, btPatchVer=0;
+ struct btc_board_info * pBoardInfo=&pBtCoexist->board_info;
+ PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ==========================Periodical===========================\n"));
+
+ if(disVerInfoCnt <= 5)
+ {
+ disVerInfoCnt += 1;
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ****************************************************************\n"));
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", \
+ pBoardInfo->pg_ant_num, pBoardInfo->btdm_ant_num, pBoardInfo->btdm_ant_pos));
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], BT stack/ hci ext ver = %s / %d\n", \
+ ((pStackInfo->bProfileNotified)? "Yes":"No"), pStackInfo->hciVersion));
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_BT_PATCH_VER, &btPatchVer);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_FW_VER, &fwVer);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", \
+ GLCoexVerDate8192e1Ant, GLCoexVer8192e1Ant, fwVer, btPatchVer, btPatchVer));
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ****************************************************************\n"));
+ }
+#if(BT_AUTO_REPORT_ONLY_8192E_1ANT == 0)
+ halbtc8192e1ant_QueryBtInfo(pBtCoexist);
+ halbtc8192e1ant_MonitorBtCtr(pBtCoexist);
+ halbtc8192e1ant_MonitorBtEnableDisable(pBtCoexist);
+#else
+ if( halbtc8192e1ant_IsWifiStatusChanged(pBtCoexist) ||
+ pCoexDm->bAutoTdmaAdjust)
+ {
+ halbtc8192e1ant_RunCoexistMechanism(pBtCoexist);
+ }
+#endif
+}
+
+VOID
+EXhalbtc8192e1ant_DbgControl(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte opCode,
+ IN u1Byte opLen,
+ IN pu1Byte pData
+ )
+{
+ switch(opCode)
+ {
+ case BTC_DBG_SET_COEX_NORMAL:
+ pBtCoexist->manual_control = FALSE;
+ halbtc8192e1ant_InitCoexDm(pBtCoexist);
+ break;
+ case BTC_DBG_SET_COEX_WIFI_ONLY:
+ pBtCoexist->manual_control = true;
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);
+ break;
+ case BTC_DBG_SET_COEX_BT_ONLY:
+ // todo
+ break;
+ default:
+ break;
+ }
+}
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.h b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.h
new file mode 100644
index 000000000000..a759b758faef
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.h
@@ -0,0 +1,226 @@
+//===========================================
+// The following is for 8192E_1ANT BT Co-exist definition
+//===========================================
+#define BT_AUTO_REPORT_ONLY_8192E_1ANT 0
+
+#define BT_INFO_8192E_1ANT_B_FTP BIT7
+#define BT_INFO_8192E_1ANT_B_A2DP BIT6
+#define BT_INFO_8192E_1ANT_B_HID BIT5
+#define BT_INFO_8192E_1ANT_B_SCO_BUSY BIT4
+#define BT_INFO_8192E_1ANT_B_ACL_BUSY BIT3
+#define BT_INFO_8192E_1ANT_B_INQ_PAGE BIT2
+#define BT_INFO_8192E_1ANT_B_SCO_ESCO BIT1
+#define BT_INFO_8192E_1ANT_B_CONNECTION BIT0
+
+#define BT_INFO_8192E_1ANT_A2DP_BASIC_RATE(_BT_INFO_EXT_) \
+ (((_BT_INFO_EXT_&BIT0))? true:FALSE)
+
+#define BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT 2
+
+typedef enum _BT_INFO_SRC_8192E_1ANT{
+ BT_INFO_SRC_8192E_1ANT_WIFI_FW = 0x0,
+ BT_INFO_SRC_8192E_1ANT_BT_RSP = 0x1,
+ BT_INFO_SRC_8192E_1ANT_BT_ACTIVE_SEND = 0x2,
+ BT_INFO_SRC_8192E_1ANT_MAX
+}BT_INFO_SRC_8192E_1ANT,*PBT_INFO_SRC_8192E_1ANT;
+
+typedef enum _BT_8192E_1ANT_BT_STATUS{
+ BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8192E_1ANT_BT_STATUS_INQ_PAGE = 0x2,
+ BT_8192E_1ANT_BT_STATUS_ACL_BUSY = 0x3,
+ BT_8192E_1ANT_BT_STATUS_SCO_BUSY = 0x4,
+ BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
+ BT_8192E_1ANT_BT_STATUS_MAX
+}BT_8192E_1ANT_BT_STATUS,*PBT_8192E_1ANT_BT_STATUS;
+
+typedef enum _BT_8192E_1ANT_WIFI_STATUS{
+ BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN = 0x1,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN = 0x2,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT = 0x3,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_IDLE = 0x4,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_BUSY = 0x5,
+ BT_8192E_1ANT_WIFI_STATUS_MAX
+}BT_8192E_1ANT_WIFI_STATUS,*PBT_8192E_1ANT_WIFI_STATUS;
+
+typedef enum _BT_8192E_1ANT_COEX_ALGO{
+ BT_8192E_1ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_8192E_1ANT_COEX_ALGO_SCO = 0x1,
+ BT_8192E_1ANT_COEX_ALGO_HID = 0x2,
+ BT_8192E_1ANT_COEX_ALGO_A2DP = 0x3,
+ BT_8192E_1ANT_COEX_ALGO_A2DP_PANHS = 0x4,
+ BT_8192E_1ANT_COEX_ALGO_PANEDR = 0x5,
+ BT_8192E_1ANT_COEX_ALGO_PANHS = 0x6,
+ BT_8192E_1ANT_COEX_ALGO_PANEDR_A2DP = 0x7,
+ BT_8192E_1ANT_COEX_ALGO_PANEDR_HID = 0x8,
+ BT_8192E_1ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9,
+ BT_8192E_1ANT_COEX_ALGO_HID_A2DP = 0xa,
+ BT_8192E_1ANT_COEX_ALGO_MAX = 0xb,
+}BT_8192E_1ANT_COEX_ALGO,*PBT_8192E_1ANT_COEX_ALGO;
+
+typedef struct _COEX_DM_8192E_1ANT{
+ // fw mechanism
+ u1Byte preBtDecPwrLvl;
+ u1Byte curBtDecPwrLvl;
+ BOOLEAN bPreBtLnaConstrain;
+ BOOLEAN bCurBtLnaConstrain;
+ u1Byte bPreBtPsdMode;
+ u1Byte bCurBtPsdMode;
+ u1Byte preFwDacSwingLvl;
+ u1Byte curFwDacSwingLvl;
+ BOOLEAN bCurIgnoreWlanAct;
+ BOOLEAN bPreIgnoreWlanAct;
+ u1Byte prePsTdma;
+ u1Byte curPsTdma;
+ u1Byte psTdmaPara[5];
+ u1Byte psTdmaDuAdjType;
+ BOOLEAN bAutoTdmaAdjust;
+ BOOLEAN bPrePsTdmaOn;
+ BOOLEAN bCurPsTdmaOn;
+ BOOLEAN bPreBtAutoReport;
+ BOOLEAN bCurBtAutoReport;
+ u1Byte preLps;
+ u1Byte curLps;
+ u1Byte preRpwm;
+ u1Byte curRpwm;
+
+ // sw mechanism
+ BOOLEAN bPreRfRxLpfShrink;
+ BOOLEAN bCurRfRxLpfShrink;
+ u4Byte btRf0x1eBackup;
+ BOOLEAN bPreLowPenaltyRa;
+ BOOLEAN bCurLowPenaltyRa;
+ BOOLEAN bPreDacSwingOn;
+ u4Byte preDacSwingLvl;
+ BOOLEAN bCurDacSwingOn;
+ u4Byte curDacSwingLvl;
+ BOOLEAN bPreAdcBackOff;
+ BOOLEAN bCurAdcBackOff;
+ BOOLEAN bPreAgcTableEn;
+ BOOLEAN bCurAgcTableEn;
+ u4Byte preVal0x6c0;
+ u4Byte curVal0x6c0;
+ u4Byte preVal0x6c4;
+ u4Byte curVal0x6c4;
+ u4Byte preVal0x6c8;
+ u4Byte curVal0x6c8;
+ u1Byte preVal0x6cc;
+ u1Byte curVal0x6cc;
+ BOOLEAN limited_dig;
+
+ // algorithm related
+ u1Byte preAlgorithm;
+ u1Byte curAlgorithm;
+ u1Byte btStatus;
+ u1Byte wifiChnlInfo[3];
+
+ u1Byte preSsType;
+ u1Byte curSsType;
+
+ u4Byte prera_mask;
+ u4Byte curra_mask;
+
+ u1Byte errorCondition;
+} COEX_DM_8192E_1ANT, *PCOEX_DM_8192E_1ANT;
+
+typedef struct _COEX_STA_8192E_1ANT{
+ BOOLEAN bBtLinkExist;
+ BOOLEAN bScoExist;
+ BOOLEAN bA2dpExist;
+ BOOLEAN bHidExist;
+ BOOLEAN bPanExist;
+
+ BOOLEAN bUnderLps;
+ BOOLEAN bUnderIps;
+ u4Byte highPriorityTx;
+ u4Byte highPriorityRx;
+ u4Byte lowPriorityTx;
+ u4Byte lowPriorityRx;
+ u1Byte btRssi;
+ u1Byte preBtRssiState;
+ u1Byte preWifiRssiState[4];
+ BOOLEAN bC2hBtInfoReqSent;
+ u1Byte btInfoC2h[BT_INFO_SRC_8192E_1ANT_MAX][10];
+ u4Byte btInfoC2hCnt[BT_INFO_SRC_8192E_1ANT_MAX];
+ BOOLEAN bC2hBtInquiryPage;
+ u1Byte btRetryCnt;
+ u1Byte btInfoExt;
+}COEX_STA_8192E_1ANT, *PCOEX_STA_8192E_1ANT;
+
+//===========================================
+// The following is interface which will notify coex module.
+//===========================================
+VOID
+EXhalbtc8192e1ant_InitHwConfig(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8192e1ant_InitCoexDm(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8192e1ant_IpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8192e1ant_LpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8192e1ant_ScanNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8192e1ant_ConnectNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8192e1ant_MediaStatusNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8192e1ant_SpecialPacketNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8192e1ant_BtInfoNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN pu1Byte tmpBuf,
+ IN u1Byte length
+ );
+VOID
+EXhalbtc8192e1ant_StackOperationNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8192e1ant_HaltNotify(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8192e1ant_PnpNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte pnpState
+ );
+VOID
+EXhalbtc8192e1ant_Periodical(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8192e1ant_DisplayCoexInfo(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8192e1ant_DbgControl(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte opCode,
+ IN u1Byte opLen,
+ IN pu1Byte pData
+ );
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.c
new file mode 100644
index 000000000000..44ec78562e2d
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.c
@@ -0,0 +1,4242 @@
+/**************************************************************
+ * Description:
+ *
+ * This file is for RTL8192E Co-exist mechanism
+ *
+ * History
+ * 2012/11/15 Cosa first check in.
+ *
+ **************************************************************/
+
+/**************************************************************
+ * include files
+ **************************************************************/
+#include "halbt_precomp.h"
+#if 1
+/**************************************************************
+ * Global variables, these are static variables
+ **************************************************************/
+static struct coex_dm_8192e_2ant glcoex_dm_8192e_2ant;
+static struct coex_dm_8192e_2ant *coex_dm = &glcoex_dm_8192e_2ant;
+static struct coex_sta_8192e_2ant glcoex_sta_8192e_2ant;
+static struct coex_sta_8192e_2ant *coex_sta = &glcoex_sta_8192e_2ant;
+
+const char *const GLBtInfoSrc8192e2Ant[]={
+ "BT Info[wifi fw]",
+ "BT Info[bt rsp]",
+ "BT Info[bt auto report]",
+};
+
+u32 glcoex_ver_date_8192e_2ant = 20130902;
+u32 glcoex_ver_8192e_2ant = 0x34;
+
+/**************************************************************
+ * local function proto type if needed
+ **************************************************************/
+/**************************************************************
+ * local function start with halbtc8192e2ant_
+ **************************************************************/
+u8 halbtc8192e2ant_btrssi_state(u8 level_num, u8 rssi_thresh, u8 rssi_thresh1)
+{
+ int btrssi=0;
+ u8 btrssi_state = coex_sta->pre_bt_rssi_state;
+
+ btrssi = coex_sta->bt_rssi;
+
+ if (level_num == 2) {
+ if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi pre state=LOW\n");
+ if (btrssi >= (rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+ btrssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to High\n");
+ } else {
+ btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at Low\n");
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi pre state=HIGH\n");
+ if (btrssi < rssi_thresh) {
+ btrssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to Low\n");
+ } else {
+ btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at High\n");
+ }
+ }
+ } else if (level_num == 3) {
+ if (rssi_thresh > rssi_thresh1) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi thresh error!!\n");
+ return coex_sta->pre_bt_rssi_state;
+ }
+
+ if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi pre state=LOW\n");
+ if(btrssi >= (rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+ btrssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to Medium\n");
+ } else {
+ btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at Low\n");
+ }
+ } else if ((coex_sta->pre_bt_rssi_state ==
+ BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_bt_rssi_state ==
+ BTC_RSSI_STATE_STAY_MEDIUM)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi pre state=MEDIUM\n");
+ if (btrssi >= (rssi_thresh1 +
+ BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+ btrssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to High\n");
+ } else if (btrssi < rssi_thresh) {
+ btrssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to Low\n");
+ } else {
+ btrssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at Medium\n");
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi pre state=HIGH\n");
+ if (btrssi < rssi_thresh1) {
+ btrssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to Medium\n");
+ } else {
+ btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at High\n");
+ }
+ }
+ }
+
+ coex_sta->pre_bt_rssi_state = btrssi_state;
+
+ return btrssi_state;
+}
+
+u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist * btcoexist, u8 index,
+ u8 level_num, u8 rssi_thresh, u8 rssi_thresh1)
+{
+ int wifirssi = 0;
+ u8 wifirssi_state = coex_sta->pre_wifi_rssi_state[index];
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi);
+
+ if (level_num == 2) {
+ if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_LOW)) {
+ if (wifirssi >= (rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+ wifirssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to High\n");
+ } else {
+ wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at Low\n");
+ }
+ } else {
+ if (wifirssi < rssi_thresh) {
+ wifirssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to Low\n");
+ } else {
+ wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at High\n");
+ }
+ }
+ } else if (level_num == 3) {
+ if (rssi_thresh > rssi_thresh1) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI thresh error!!\n");
+ return coex_sta->pre_wifi_rssi_state[index];
+ }
+
+ if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_LOW)) {
+ if (wifirssi >= (rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+ wifirssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to Medium\n");
+ } else {
+ wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at Low\n");
+ }
+ } else if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_MEDIUM)) {
+ if (wifirssi >= (rssi_thresh1 +
+ BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+ wifirssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to High\n");
+ } else if (wifirssi < rssi_thresh) {
+ wifirssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to Low\n");
+ } else {
+ wifirssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at Medium\n");
+ }
+ } else {
+ if (wifirssi < rssi_thresh1) {
+ wifirssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to Medium\n");
+ } else {
+ wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at High\n");
+ }
+ }
+ }
+
+ coex_sta->pre_wifi_rssi_state[index] = wifirssi_state;
+
+ return wifirssi_state;
+}
+
+void halbtc8192e2ant_monitor_bt_enable_disable(struct btc_coexist *btcoexist)
+{
+ static bool pre_bt_disabled = false;
+ static u32 bt_disable_cnt = 0;
+ bool bt_active = true, bt_disabled = false;
+
+ /* This function check if bt is disabled */
+
+ if (coex_sta->high_priority_tx == 0 &&
+ coex_sta->high_priority_rx == 0 &&
+ coex_sta->low_priority_tx == 0 &&
+ coex_sta->low_priority_rx == 0)
+ bt_active = false;
+
+ if (coex_sta->high_priority_tx == 0xffff &&
+ coex_sta->high_priority_rx == 0xffff &&
+ coex_sta->low_priority_tx == 0xffff &&
+ coex_sta->low_priority_rx == 0xffff)
+ bt_active = false;
+
+ if (bt_active) {
+ bt_disable_cnt = 0;
+ bt_disabled = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+ &bt_disabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], BT is enabled !!\n");
+ } else {
+ bt_disable_cnt++;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], bt all counters=0, %d times!!\n",
+ bt_disable_cnt);
+ if (bt_disable_cnt >= 2) {
+ bt_disabled = true;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+ &bt_disabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], BT is disabled !!\n");
+ }
+ }
+ if (pre_bt_disabled != bt_disabled) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (pre_bt_disabled ? "disabled":"enabled"),
+ (bt_disabled ? "disabled":"enabled"));
+ pre_bt_disabled = bt_disabled;
+ }
+}
+
+u32 halbtc8192e2ant_decidera_mask(struct btc_coexist *btcoexist,
+ u8 sstype, u32 ra_masktype)
+{
+ u32 disra_mask = 0x0;
+
+ switch (ra_masktype) {
+ case 0: /* normal mode */
+ if (sstype == 2)
+ disra_mask = 0x0; /* enable 2ss */
+ else
+ disra_mask = 0xfff00000;/* disable 2ss */
+ break;
+ case 1: /* disable cck 1/2 */
+ if(sstype == 2)
+ disra_mask = 0x00000003;/* enable 2ss */
+ else
+ disra_mask = 0xfff00003;/* disable 2ss */
+ break;
+ case 2: /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4 */
+ if(sstype == 2)
+ disra_mask = 0x0001f1f7;/* enable 2ss */
+ else
+ disra_mask = 0xfff1f1f7;/* disable 2ss */
+ break;
+ default:
+ break;
+ }
+
+ return disra_mask;
+}
+
+void halbtc8192e2ant_Updatera_mask(struct btc_coexist *btcoexist,
+ bool force_exec, u32 dis_ratemask)
+{
+ coex_dm->curra_mask = dis_ratemask;
+
+ if (force_exec || (coex_dm->prera_mask != coex_dm->curra_mask))
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask,
+ &coex_dm->curra_mask);
+ coex_dm->prera_mask = coex_dm->curra_mask;
+}
+
+void halbtc8192e2ant_autorate_fallback_retry(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ bool wifi_under_bmode = false;
+
+ coex_dm->cur_arfrtype = type;
+
+ if (force_exec || (coex_dm->pre_arfrtype != coex_dm->cur_arfrtype)) {
+ switch (coex_dm->cur_arfrtype) {
+ case 0: /* normal mode */
+ btcoexist->btc_write_4byte(btcoexist, 0x430,
+ coex_dm->backup_arfr_cnt1);
+ btcoexist->btc_write_4byte(btcoexist, 0x434,
+ coex_dm->backup_arfr_cnt2);
+ break;
+ case 1:
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_BL_WIFI_UNDER_B_MODE,
+ &wifi_under_bmode);
+ if (wifi_under_bmode) {
+ btcoexist->btc_write_4byte(btcoexist, 0x430,
+ 0x0);
+ btcoexist->btc_write_4byte(btcoexist, 0x434,
+ 0x01010101);
+ } else {
+ btcoexist->btc_write_4byte(btcoexist, 0x430,
+ 0x0);
+ btcoexist->btc_write_4byte(btcoexist, 0x434,
+ 0x04030201);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ coex_dm->pre_arfrtype = coex_dm->cur_arfrtype;
+}
+
+void halbtc8192e2ant_retrylimit(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ coex_dm->cur_retrylimit_type = type;
+
+ if (force_exec || (coex_dm->pre_retrylimit_type !=
+ coex_dm->cur_retrylimit_type)) {
+ switch (coex_dm->cur_retrylimit_type) {
+ case 0: /* normal mode */
+ btcoexist->btc_write_2byte(btcoexist, 0x42a,
+ coex_dm->backup_retrylimit);
+ break;
+ case 1: /* retry limit=8 */
+ btcoexist->btc_write_2byte(btcoexist, 0x42a,
+ 0x0808);
+ break;
+ default:
+ break;
+ }
+ }
+
+ coex_dm->pre_retrylimit_type = coex_dm->cur_retrylimit_type;
+}
+
+void halbtc8192e2ant_ampdu_maxtime(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ coex_dm->cur_ampdutime_type = type;
+
+ if (force_exec || (coex_dm->pre_ampdutime_type !=
+ coex_dm->cur_ampdutime_type)) {
+ switch (coex_dm->cur_ampdutime_type) {
+ case 0: /* normal mode */
+ btcoexist->btc_write_1byte(btcoexist, 0x456,
+ coex_dm->backup_ampdu_maxtime);
+ break;
+ case 1: /* AMPDU timw = 0x38 * 32us */
+ btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38);
+ break;
+ default:
+ break;
+ }
+ }
+
+ coex_dm->pre_ampdutime_type = coex_dm->cur_ampdutime_type;
+}
+
+void halbtc8192e2ant_limited_tx(struct btc_coexist *btcoexist,
+ bool force_exec, u8 ra_masktype, u8 arfr_type,
+ u8 retrylimit_type, u8 ampdutime_type)
+{
+ u32 disra_mask = 0x0;
+
+ coex_dm->curra_masktype = ra_masktype;
+ disra_mask = halbtc8192e2ant_decidera_mask(btcoexist,
+ coex_dm->cur_sstype,
+ ra_masktype);
+ halbtc8192e2ant_Updatera_mask(btcoexist, force_exec, disra_mask);
+
+ halbtc8192e2ant_autorate_fallback_retry(btcoexist, force_exec,
+ arfr_type);
+ halbtc8192e2ant_retrylimit(btcoexist, force_exec, retrylimit_type);
+ halbtc8192e2ant_ampdu_maxtime(btcoexist, force_exec, ampdutime_type);
+}
+
+void halbtc8192e2ant_limited_rx(struct btc_coexist *btcoexist,
+ bool force_exec, bool rej_ap_agg_pkt,
+ bool b_bt_ctrl_agg_buf_size,
+ u8 agg_buf_size)
+{
+ bool reject_rx_agg = rej_ap_agg_pkt;
+ bool bt_ctrl_rx_agg_size = b_bt_ctrl_agg_buf_size;
+ u8 rx_agg_size = agg_buf_size;
+
+ /*********************************************
+ * Rx Aggregation related setting
+ *********************************************/
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT,
+ &reject_rx_agg);
+ /* decide BT control aggregation buf size or not */
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE,
+ &bt_ctrl_rx_agg_size);
+ /* aggregation buf size, only work
+ * when BT control Rx aggregation size. */
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rx_agg_size);
+ /* real update aggregation setting */
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
+
+
+}
+
+void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
+{
+ u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
+ u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0;
+
+ reg_hp_txrx = 0x770;
+ reg_lp_txrx = 0x774;
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
+ reg_hp_tx = u32tmp & MASKLWORD;
+ reg_hp_rx = (u32tmp & MASKHWORD)>>16;
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
+ reg_lp_tx = u32tmp & MASKLWORD;
+ reg_lp_rx = (u32tmp & MASKHWORD)>>16;
+
+ coex_sta->high_priority_tx = reg_hp_tx;
+ coex_sta->high_priority_rx = reg_hp_rx;
+ coex_sta->low_priority_tx = reg_lp_tx;
+ coex_sta->low_priority_rx = reg_lp_rx;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex] High Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex] Low Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+
+ /* reset counter */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+}
+
+void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist)
+{
+ u8 h2c_parameter[1] ={0};
+
+ coex_sta->c2h_bt_info_req_sent = true;
+
+ h2c_parameter[0] |= BIT0; /* trigger */
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Query Bt Info, FW write 0x61=0x%x\n",
+ h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
+}
+
+bool halbtc8192e2ant_iswifi_status_changed(struct btc_coexist *btcoexist)
+{
+ static bool pre_wifi_busy = false;
+ static bool pre_under_4way = false, pre_bt_hson = false;
+ bool wifi_busy = false, under_4way = false, bt_hson = false;
+ bool wifi_connected = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ &under_4way);
+
+ if (wifi_connected) {
+ if (wifi_busy != pre_wifi_busy) {
+ pre_wifi_busy = wifi_busy;
+ return true;
+ }
+ if (under_4way != pre_under_4way) {
+ pre_under_4way = under_4way;
+ return true;
+ }
+ if (bt_hson != pre_bt_hson) {
+ pre_bt_hson = bt_hson;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void halbtc8192e2ant_update_btlink_info(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool bt_hson = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+
+ bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
+ bt_link_info->sco_exist = coex_sta->sco_exist;
+ bt_link_info->a2dp_exist = coex_sta->a2dp_exist;
+ bt_link_info->pan_exist = coex_sta->pan_exist;
+ bt_link_info->hid_exist = coex_sta->hid_exist;
+
+ /* work around for HS mode. */
+ if (bt_hson) {
+ bt_link_info->pan_exist = true;
+ bt_link_info->bt_link_exist = true;
+ }
+
+ /* check if Sco only */
+ if (bt_link_info->sco_exist &&
+ !bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist &&
+ !bt_link_info->hid_exist)
+ bt_link_info->sco_only = true;
+ else
+ bt_link_info->sco_only = false;
+
+ /* check if A2dp only */
+ if (!bt_link_info->sco_exist &&
+ bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist &&
+ !bt_link_info->hid_exist)
+ bt_link_info->a2dp_only = true;
+ else
+ bt_link_info->a2dp_only = false;
+
+ /* check if Pan only */
+ if (!bt_link_info->sco_exist &&
+ !bt_link_info->a2dp_exist &&
+ bt_link_info->pan_exist &&
+ !bt_link_info->hid_exist)
+ bt_link_info->pan_only = true;
+ else
+ bt_link_info->pan_only = false;
+
+ /* check if Hid only */
+ if (!bt_link_info->sco_exist &&
+ !bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist &&
+ bt_link_info->hid_exist)
+ bt_link_info->hid_only = true;
+ else
+ bt_link_info->hid_only = false;
+}
+
+u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ struct btc_stack_info *stack_info = &btcoexist->stack_info;
+ bool bt_hson=false;
+ u8 algorithm = BT_8192E_2ANT_COEX_ALGO_UNDEFINED;
+ u8 numOfDiffProfile = 0;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+
+ if (!bt_link_info->bt_link_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "No BT link exists!!!\n");
+ return algorithm;
+ }
+
+ if (bt_link_info->sco_exist)
+ numOfDiffProfile++;
+ if (bt_link_info->hid_exist)
+ numOfDiffProfile++;
+ if (bt_link_info->pan_exist)
+ numOfDiffProfile++;
+ if (bt_link_info->a2dp_exist)
+ numOfDiffProfile++;
+
+ if (numOfDiffProfile == 1) {
+ if (bt_link_info->sco_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO only\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
+ } else {
+ if (bt_link_info->hid_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "HID only\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
+ } else if (bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "A2DP only\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP;
+ } else if (bt_link_info->pan_exist) {
+ if (bt_hson) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "PAN(HS) only\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_PANHS;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "PAN(EDR) only\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_PANEDR;
+ }
+ }
+ }
+ } else if (numOfDiffProfile == 2) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO + HID\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
+ } else if (bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO + A2DP ==> SCO\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (bt_link_info->pan_exist) {
+ if (bt_hson) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO + PAN(HS)\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO + PAN(EDR)\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
+ }
+ }
+ } else {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) {
+ if (stack_info->num_of_hid >= 2) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "HID*2 + A2DP\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "HID + A2DP\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
+ }
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist) {
+ if (bt_hson) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "HID + PAN(HS)\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "HID + PAN(EDR)\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hson) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "A2DP + PAN(HS)\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "A2DP + PAN(EDR)\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ }
+ }
+ } else if (numOfDiffProfile == 3) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO + HID + A2DP ==> HID\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist) {
+ if (bt_hson) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO + HID + PAN(HS)\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO + HID + PAN(EDR)\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
+ }
+ } else if (bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hson) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO + A2DP + PAN(HS)\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO + A2DP + PAN(EDR)\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ } else {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hson) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "HID + A2DP + PAN(HS)\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "HID + A2DP + PAN(EDR)\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ }
+ }
+ } else if (numOfDiffProfile >= 3) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hson) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "ErrorSCO+HID+A2DP+PAN(HS)\n");
+
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO+HID+A2DP+PAN(EDR)\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ }
+
+ return algorithm;
+}
+
+void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist,
+ u8 dac_swinglvl)
+{
+ u8 h2c_parameter[1] ={0};
+
+ /* There are several type of dacswing
+ * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 */
+ h2c_parameter[0] = dac_swinglvl;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swinglvl);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
+}
+
+void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist,
+ u8 dec_btpwr_lvl)
+{
+ u8 h2c_parameter[1] ={0};
+
+ h2c_parameter[0] = dec_btpwr_lvl;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex] decrease Bt Power level = %d, FW write 0x62=0x%x\n",
+ dec_btpwr_lvl, h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
+}
+
+void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist,
+ bool force_exec, u8 dec_btpwr_lvl)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s Dec BT power level = %d\n",
+ (force_exec? "force to":""), dec_btpwr_lvl);
+ coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+ }
+ halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+
+ coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
+}
+
+void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist,
+ bool enable_autoreport)
+{
+ u8 h2c_parameter[1] ={0};
+
+ h2c_parameter[0] = 0;
+
+ if (enable_autoreport)
+ h2c_parameter[0] |= BIT0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n",
+ (enable_autoreport? "Enabled!!":"Disabled!!"),
+ h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
+}
+
+void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
+ bool force_exec, bool enable_autoreport)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s BT Auto report = %s\n",
+ (force_exec? "force to":""),
+ ((enable_autoreport)? "Enabled":"Disabled"));
+ coex_dm->cur_bt_auto_report = enable_autoreport;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
+ coex_dm->pre_bt_auto_report,
+ coex_dm->cur_bt_auto_report);
+
+ if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
+ return;
+ }
+ halbtc8192e2ant_set_bt_autoreport(btcoexist,
+ coex_dm->cur_bt_auto_report);
+
+ coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
+}
+
+void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist,
+ bool force_exec, u8 fw_dac_swinglvl)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec? "force to":""), fw_dac_swinglvl);
+ coex_dm->cur_fw_dac_swing_lvl = fw_dac_swinglvl;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
+
+ if (coex_dm->pre_fw_dac_swing_lvl ==
+ coex_dm->cur_fw_dac_swing_lvl)
+ return;
+ }
+
+ halbtc8192e2ant_setfw_dac_swinglevel(btcoexist,
+ coex_dm->cur_fw_dac_swing_lvl);
+
+ coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
+}
+
+void halbtc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
+ bool rx_rf_shrink_on)
+{
+ if (rx_rf_shrink_on) {
+ /* Shrink RF Rx LPF corner */
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+ 0xfffff, 0xffffc);
+ } else {
+ /* Resume RF Rx LPF corner
+ * After initialized, we can use coex_dm->btRf0x1eBackup */
+ if (btcoexist->initilized) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+ 0xfffff,
+ coex_dm->bt_rf0x1e_backup);
+ }
+ }
+}
+
+void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
+ bool force_exec, bool rx_rf_shrink_on)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec? "force to":""), ((rx_rf_shrink_on)? "ON":"OFF"));
+ coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
+ coex_dm->pre_rf_rx_lpf_shrink,
+ coex_dm->cur_rf_rx_lpf_shrink);
+
+ if (coex_dm->pre_rf_rx_lpf_shrink ==
+ coex_dm->cur_rf_rx_lpf_shrink)
+ return;
+ }
+ halbtc8192e2ant_set_sw_rf_rx_lpf_corner(btcoexist,
+ coex_dm->cur_rf_rx_lpf_shrink);
+
+ coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
+}
+
+void halbtc8192e2ant_set_sw_penalty_tx_rateadaptive(
+ struct btc_coexist *btcoexist,
+ bool low_penalty_ra)
+{
+ u8 h2c_parameter[6] ={0};
+
+ h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty */
+
+ if (low_penalty_ra) {
+ h2c_parameter[1] |= BIT0;
+ /* normal rate except MCS7/6/5, OFDM54/48/36 */
+ h2c_parameter[2] = 0x00;
+ h2c_parameter[3] = 0xf7; /* MCS7 or OFDM54 */
+ h2c_parameter[4] = 0xf8; /* MCS6 or OFDM48 */
+ h2c_parameter[5] = 0xf9; /* MCS5 or OFDM36 */
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra? "ON!!":"OFF!!"));
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
+}
+
+void halbtc8192e2ant_low_penalty_ra(struct btc_coexist *btcoexist,
+ bool force_exec, bool low_penalty_ra)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (force_exec? "force to":""), ((low_penalty_ra)? "ON":"OFF"));
+ coex_dm->cur_low_penalty_ra = low_penalty_ra;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex] bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
+ coex_dm->pre_low_penalty_ra,
+ coex_dm->cur_low_penalty_ra);
+
+ if (coex_dm->pre_low_penalty_ra ==
+ coex_dm->cur_low_penalty_ra)
+ return;
+ }
+ halbtc8192e2ant_set_sw_penalty_tx_rateadaptive(btcoexist,
+ coex_dm->cur_low_penalty_ra);
+
+ coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
+}
+
+void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist,
+ u32 level)
+{
+ u8 val = (u8)level;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
+}
+
+void halbtc8192e2ant_setsw_fulltime_dacswing(struct btc_coexist *btcoexist,
+ bool sw_dac_swingon,
+ u32 sw_dac_swinglvl)
+{
+ if (sw_dac_swingon)
+ halbtc8192e2ant_set_dac_swingreg(btcoexist, sw_dac_swinglvl);
+ else
+ halbtc8192e2ant_set_dac_swingreg(btcoexist, 0x18);
+}
+
+
+void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist,
+ bool force_exec, bool dac_swingon,
+ u32 dac_swinglvl)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn DacSwing=%s, dac_swinglvl=0x%x\n",
+ (force_exec? "force to":""),
+ ((dac_swingon)? "ON":"OFF"), dac_swinglvl);
+ coex_dm->cur_dac_swing_on = dac_swingon;
+ coex_dm->cur_dac_swing_lvl = dac_swinglvl;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, ",
+ coex_dm->pre_dac_swing_on,
+ coex_dm->pre_dac_swing_lvl);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
+
+ if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
+ (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
+ return;
+ }
+ mdelay(30);
+ halbtc8192e2ant_setsw_fulltime_dacswing(btcoexist, dac_swingon,
+ dac_swinglvl);
+
+ coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on;
+ coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
+}
+
+void halbtc8192e2ant_set_adc_backoff(struct btc_coexist *btcoexist,
+ bool adc_backoff)
+{
+ if(adc_backoff)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB BackOff Level On!\n");
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc05, 0x30, 0x3);
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB BackOff Level Off!\n");
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc05, 0x30, 0x1);
+ }
+}
+
+void halbtc8192e2ant_adc_backoff(struct btc_coexist *btcoexist,
+ bool force_exec, bool adc_backoff)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn AdcBackOff = %s\n",
+ (force_exec? "force to":""), ((adc_backoff)? "ON":"OFF"));
+ coex_dm->cur_adc_back_off = adc_backoff;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n",
+ coex_dm->pre_adc_back_off, coex_dm->cur_adc_back_off);
+
+ if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off)
+ return;
+ }
+ halbtc8192e2ant_set_adc_backoff(btcoexist, coex_dm->cur_adc_back_off);
+
+ coex_dm->pre_adc_back_off = coex_dm->cur_adc_back_off;
+}
+
+void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
+ bool agc_table_en)
+{
+
+ /* BB AGC Gain Table */
+ if (agc_table_en) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table On!\n");
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x0a1A0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x091B0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x081C0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x071D0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x061E0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x051F0001);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table Off!\n");
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa71D0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa61E0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa51F0001);
+ }
+}
+
+void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist,
+ bool force_exec, bool agc_table_en)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s %s Agc Table\n",
+ (force_exec? "force to":""),
+ ((agc_table_en)? "Enable":"Disable"));
+ coex_dm->cur_agc_table_en = agc_table_en;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en);
+
+ if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
+ return;
+ }
+ halbtc8192e2ant_set_agc_table(btcoexist, agc_table_en);
+
+ coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
+}
+
+void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist,
+ u32 val0x6c0, u32 val0x6c4,
+ u32 val0x6c8, u8 val0x6cc)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+ btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
+}
+
+void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist, bool force_exec,
+ u32 val0x6c0, u32 val0x6c4,
+ u32 val0x6c8, u8 val0x6cc)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s write Coex Table 0x6c0=0x%x, ",
+ (force_exec? "force to":""), val0x6c0);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ val0x6c4, val0x6c8, val0x6cc);
+ coex_dm->cur_val0x6c0 = val0x6c0;
+ coex_dm->cur_val0x6c4 = val0x6c4;
+ coex_dm->cur_val0x6c8 = val0x6c8;
+ coex_dm->cur_val0x6cc = val0x6cc;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, ",
+ coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
+ coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, \n",
+ coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
+ coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+
+ if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
+ (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
+ (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) &&
+ (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
+ return;
+ }
+ halbtc8192e2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
+ val0x6c8, val0x6cc);
+
+ coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
+ coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
+ coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8;
+ coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
+}
+
+void halbtc8192e2ant_coex_table_with_type(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ switch (type) {
+ case 0:
+ halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x5a5a5a5a, 0xffffff, 0x3);
+ break;
+ case 1:
+ halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+ 0x5a5a5a5a, 0xffffff, 0x3);
+ break;
+ case 2:
+ halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x5ffb5ffb, 0xffffff, 0x3);
+ break;
+ case 3:
+ halbtc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff,
+ 0x5fdb5fdb, 0xffffff, 0x3);
+ break;
+ case 4:
+ halbtc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff,
+ 0x5ffb5ffb, 0xffffff, 0x3);
+ break;
+ default:
+ break;
+ }
+}
+
+void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
+ bool enable)
+{
+ u8 h2c_parameter[1] ={0};
+
+ if (enable)
+ h2c_parameter[0] |= BIT0; /* function enable */
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
+ h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
+}
+
+void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist,
+ bool force_exec, bool enable)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec? "force to":""), (enable? "ON":"OFF"));
+ coex_dm->cur_ignore_wlan_act = enable;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreIgnoreWlanAct = %d ",
+ coex_dm->pre_ignore_wlan_act);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->cur_ignore_wlan_act);
+
+ if (coex_dm->pre_ignore_wlan_act ==
+ coex_dm->cur_ignore_wlan_act)
+ return;
+ }
+ halbtc8192e2ant_set_fw_ignore_wlanact(btcoexist, enable);
+
+ coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
+}
+
+void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1,
+ u8 byte2, u8 byte3, u8 byte4, u8 byte5)
+{
+ u8 h2c_parameter[5] ={0};
+
+ h2c_parameter[0] = byte1;
+ h2c_parameter[1] = byte2;
+ h2c_parameter[2] = byte3;
+ h2c_parameter[3] = byte4;
+ h2c_parameter[4] = byte5;
+
+ coex_dm->ps_tdma_para[0] = byte1;
+ coex_dm->ps_tdma_para[1] = byte2;
+ coex_dm->ps_tdma_para[2] = byte3;
+ coex_dm->ps_tdma_para[3] = byte4;
+ coex_dm->ps_tdma_para[4] = byte5;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 | h2c_parameter[4]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
+}
+
+void halbtc8192e2ant_sw_mechanism1(struct btc_coexist *btcoexist,
+ bool shrink_rx_lpf, bool low_penalty_ra,
+ bool limited_dig, bool btlan_constrain)
+{
+ halbtc8192e2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
+}
+
+void halbtc8192e2ant_sw_mechanism2(struct btc_coexist *btcoexist,
+ bool agc_table_shift, bool adc_backoff,
+ bool sw_dac_swing, u32 dac_swinglvl)
+{
+ halbtc8192e2ant_AgcTable(btcoexist, NORMAL_EXEC, agc_table_shift);
+ halbtc8192e2ant_DacSwing(btcoexist, NORMAL_EXEC, sw_dac_swing,
+ dac_swinglvl);
+}
+
+void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
+ bool force_exec, bool turn_on, u8 type)
+{
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (force_exec? "force to":""), (turn_on? "ON":"OFF"), type);
+ coex_dm->cur_ps_tdma_on = turn_on;
+ coex_dm->cur_ps_tdma = type;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+
+ if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
+ (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
+ return;
+ }
+ if (turn_on) {
+ switch (type) {
+ case 1:
+ default:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe1, 0x90);
+ break;
+ case 2:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0xe1, 0x90);
+ break;
+ case 3:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0xf1, 0x90);
+ break;
+ case 4:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x10,
+ 0x3, 0xf1, 0x90);
+ break;
+ case 5:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0x60, 0x90);
+ break;
+ case 6:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0x60, 0x90);
+ break;
+ case 7:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0x70, 0x90);
+ break;
+ case 8:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xa3, 0x10,
+ 0x3, 0x70, 0x90);
+ break;
+ case 9:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe1, 0x10);
+ break;
+ case 10:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0xe1, 0x10);
+ break;
+ case 11:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0xf1, 0x10);
+ break;
+ case 12:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x10,
+ 0x3, 0xf1, 0x10);
+ break;
+ case 13:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe0, 0x10);
+ break;
+ case 14:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0xe0, 0x10);
+ break;
+ case 15:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0xf0, 0x10);
+ break;
+ case 16:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+ 0x3, 0xf0, 0x10);
+ break;
+ case 17:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0x61, 0x20,
+ 0x03, 0x10, 0x10);
+ break;
+ case 18:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x5,
+ 0x5, 0xe1, 0x90);
+ break;
+ case 19:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x25,
+ 0x25, 0xe1, 0x90);
+ break;
+ case 20:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x25,
+ 0x25, 0x60, 0x90);
+ break;
+ case 21:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x15,
+ 0x03, 0x70, 0x90);
+ break;
+ case 71:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe1, 0x90);
+ break;
+ }
+ } else {
+ /* disable PS tdma */
+ switch (type) {
+ default:
+ case 0:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0x8, 0x0, 0x0,
+ 0x0, 0x0);
+ btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x4);
+ break;
+ case 1:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x8, 0x0);
+ mdelay(5);
+ btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20);
+ break;
+ }
+ }
+
+ /* update pre state */
+ coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
+ coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
+}
+
+void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist, u8 sstype)
+{
+ u8 mimops = BTC_MIMO_PS_DYNAMIC;
+ u32 disra_mask = 0x0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], REAL set SS Type = %d\n", sstype);
+
+ disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, sstype,
+ coex_dm->curra_masktype);
+ halbtc8192e2ant_Updatera_mask(btcoexist, FORCE_EXEC, disra_mask);
+
+ if (sstype == 1) {
+ halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
+ /* switch ofdm path */
+ btcoexist->btc_write_1byte(btcoexist, 0xc04, 0x11);
+ btcoexist->btc_write_1byte(btcoexist, 0xd04, 0x1);
+ btcoexist->btc_write_4byte(btcoexist, 0x90c, 0x81111111);
+ /* switch cck patch */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xe77, 0x4, 0x1);
+ btcoexist->btc_write_1byte(btcoexist, 0xa07, 0x81);
+ mimops=BTC_MIMO_PS_STATIC;
+ } else if (sstype == 2) {
+ halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
+ btcoexist->btc_write_1byte(btcoexist, 0xc04, 0x33);
+ btcoexist->btc_write_1byte(btcoexist, 0xd04, 0x3);
+ btcoexist->btc_write_4byte(btcoexist, 0x90c, 0x81121313);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xe77, 0x4, 0x0);
+ btcoexist->btc_write_1byte(btcoexist, 0xa07, 0x41);
+ mimops=BTC_MIMO_PS_DYNAMIC;
+ }
+ /* set rx 1ss or 2ss */
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_SEND_MIMO_PS, &mimops);
+}
+
+void halbtc8192e2ant_switch_sstype(struct btc_coexist *btcoexist,
+ bool force_exec, u8 new_sstype)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], %s Switch SS Type = %d\n",
+ (force_exec? "force to":""), new_sstype);
+ coex_dm->cur_sstype = new_sstype;
+
+ if (!force_exec) {
+ if (coex_dm->pre_sstype == coex_dm->cur_sstype)
+ return;
+ }
+ halbtc8192e2ant_set_switch_sstype(btcoexist, coex_dm->cur_sstype);
+
+ coex_dm->pre_sstype = coex_dm->cur_sstype;
+}
+
+void halbtc8192e2ant_coex_alloff(struct btc_coexist *btcoexist)
+{
+ /* fw all off */
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+
+ /* sw all off */
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+ /* hw all off */
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+}
+
+void halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ /* force to reset coex mechanism */
+
+ halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, FORCE_EXEC, 6);
+ halbtc8192e2ant_dec_btpwr(btcoexist, FORCE_EXEC, 0);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+ halbtc8192e2ant_switch_sstype(btcoexist, FORCE_EXEC, 2);
+
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+}
+
+void halbtc8192e2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
+{
+ bool low_pwr_disable = true;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+}
+
+bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool common = false, wifi_connected = false, wifi_busy = false;
+ bool bt_hson = false, low_pwr_disable = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+ if (bt_link_info->sco_exist || bt_link_info->hid_exist)
+ halbtc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 1, 0, 0, 0);
+ else
+ halbtc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+
+ if (!wifi_connected) {
+ low_pwr_disable = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi non-connected idle!!\n");
+
+ if ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+ coex_dm->bt_status) ||
+ (BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE ==
+ coex_dm->bt_status)) {
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC,
+ 2);
+ halbtc8192e2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
+ 0);
+ } else {
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC,
+ 1);
+ halbtc8192e2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 0);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
+ 1);
+ }
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, false,
+ false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, false,
+ 0x18);
+
+ common = true;
+ } else {
+ if (BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+ coex_dm->bt_status) {
+ low_pwr_disable = false;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Wifi connected + BT non connected-idle!!\n");
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC,
+ 2);
+ halbtc8192e2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
+ 0);
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC,
+ 6);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ common = true;
+ } else if (BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE ==
+ coex_dm->bt_status) {
+ low_pwr_disable = true;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ if (bt_hson)
+ return false;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Wifi connected + BT connected-idle!!\n");
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC,
+ 2);
+ halbtc8192e2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
+ 0);
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC,
+ 6);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ common = true;
+ } else {
+ low_pwr_disable = true;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ if (wifi_busy) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Wifi Connected-Busy + BT Busy!!\n");
+ common = false;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Wifi Connected-Idle + BT Busy!!\n");
+
+ halbtc8192e2ant_switch_sstype(btcoexist,
+ NORMAL_EXEC, 1);
+ halbtc8192e2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC,
+ 2);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 21);
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist,
+ NORMAL_EXEC, 6);
+ halbtc8192e2ant_dec_btpwr(btcoexist,
+ NORMAL_EXEC, 0);
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false,
+ false, false,
+ false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false,
+ false, false,
+ 0x18);
+ common = true;
+ }
+ }
+ }
+ return common;
+}
+
+void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
+ bool sco_hid, bool tx_pause,
+ u8 max_interval)
+{
+ static int up, dn, m, n, wait_cnt;
+ /* 0: no change, +1: increase WiFi duration,
+ * -1: decrease WiFi duration */
+ int result;
+ u8 retry_cnt = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], TdmaDurationAdjust()\n");
+
+ if (!coex_dm->auto_tdma_adjust) {
+ coex_dm->auto_tdma_adjust = true;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ if (sco_hid) {
+ if (tx_pause) {
+ if (max_interval == 1) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 13);
+ coex_dm->ps_tdma_du_adj_type = 13;
+ } else if (max_interval == 2) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (max_interval == 3) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ }
+ } else {
+ if (max_interval == 1) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (max_interval == 2) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (max_interval == 3) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ }
+ }
+ } else {
+ if (tx_pause) {
+ if (max_interval == 1) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type = 5;
+ } else if (max_interval == 2) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (max_interval == 3) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ }
+ } else {
+ if (max_interval == 1) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type = 1;
+ } else if (max_interval == 2) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (max_interval == 3) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ }
+ }
+ }
+
+ up = 0;
+ dn = 0;
+ m = 1;
+ n= 3;
+ result = 0;
+ wait_cnt = 0;
+ } else {
+ /* accquire the BT TRx retry count from BT_Info byte2 */
+ retry_cnt = coex_sta->bt_retry_cnt;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], retry_cnt = %d\n", retry_cnt);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
+ up, dn, m, n, wait_cnt);
+ result = 0;
+ wait_cnt++;
+ /* no retry in the last 2-second duration */
+ if (retry_cnt == 0) {
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if (up >= n) {
+ wait_cnt = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex]Increase wifi duration!!\n");
+ }
+ } else if (retry_cnt <= 3) {
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) {
+ if (wait_cnt <= 2)
+ m++;
+ else
+ m = 1;
+
+ if (m >= 20)
+ m = 20;
+
+ n = 3 * m;
+ up = 0;
+ dn = 0;
+ wait_cnt = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "Reduce wifi duration for retry<3\n");
+ }
+ } else {
+ if (wait_cnt == 1)
+ m++;
+ else
+ m = 1;
+
+ if (m >= 20)
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ wait_cnt = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "Decrease wifi duration for retryCounter>3!!\n");
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], max Interval = %d\n", max_interval);
+ if (max_interval == 1) {
+ if (tx_pause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
+
+ if (coex_dm->cur_ps_tdma == 71) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type = 5;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type = 5;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type = 8;
+ }
+ if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 13);
+ coex_dm->ps_tdma_du_adj_type = 13;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type = 16;
+ }
+
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type =
+ 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type =
+ 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type =
+ 5;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 13);
+ coex_dm->ps_tdma_du_adj_type =
+ 13;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 71);
+ coex_dm->ps_tdma_du_adj_type = 71;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type = 4;
+ }
+ if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type = 12;
+ }
+
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 71) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type =
+ 1;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type =
+ 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type =
+ 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type =
+ 1;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 71);
+ coex_dm->ps_tdma_du_adj_type =
+ 71;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type =
+ 9;
+ }
+ }
+ }
+ } else if (max_interval == 2) {
+ if (tx_pause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
+ if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type = 8;
+ }
+ if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type = 16;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type =
+ 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type =
+ 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type = 4;
+ }
+ if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type = 12;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type =
+ 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type =
+ 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if(coex_dm->cur_ps_tdma == 12) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if(coex_dm->cur_ps_tdma == 11) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if(coex_dm->cur_ps_tdma == 10) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ }
+ }
+ }
+ } else if (max_interval == 3) {
+ if (tx_pause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
+ if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type = 8;
+ }
+ if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type = 16;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type =
+ 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type =
+ 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type = 4;
+ }
+ if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type = 12;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type =
+ 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type =
+ 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ }
+ }
+ }
+ }
+ }
+
+ /* if current PsTdma not match with
+ * the recorded one (when scan, dhcp...),
+ * then we have to adjust it back to the previous record one. */
+ if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) {
+ bool scan = false, link = false, roam = false;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], PsTdma type dismatch!!!, " );
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "curPsTdma=%d, recordPsTdma=%d\n",
+ coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+ if ( !scan && !link && !roam)
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true,
+ coex_dm->ps_tdma_du_adj_type);
+ else
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+ }
+}
+
+/* SCO only or SCO+PAN(HS) */
+void halbtc8192e2ant_action_sco(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
+
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+ }
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x6);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x6);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x6);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x6);
+ }
+ }
+}
+
+void halbtc8192e2ant_action_sco_pan(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
+
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+ }
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x6);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x6);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x6);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x6);
+ }
+ }
+}
+
+void halbtc8192e2ant_action_hid(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state=BTC_RSSI_STATE_HIGH;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
+void halbtc8192e2ant_action_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u32 wifi_bw;
+ bool long_dist = false;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW ||
+ btrssi_state == BTC_RSSI_STATE_STAY_LOW) &&
+ (wifirssi_state == BTC_RSSI_STATE_LOW ||
+ wifirssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
+ long_dist = true;
+ }
+ if (long_dist) {
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, true,
+ 0x4);
+ } else {
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false,
+ 0x8);
+ }
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (long_dist)
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ else
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+
+
+ if (long_dist) {
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 17);
+ coex_dm->auto_tdma_adjust = false;
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ } else {
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
+ true, 1);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
+ false, 1);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
+ false, 1);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ }
+ }
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8192e2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 2);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, false,
+ 2);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, false,
+ 2);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ }
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x6);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ true, 0x6);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x6);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ true, 0x6);
+ }
+ }
+}
+
+void halbtc8192e2ant_action_pan_edr(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
+ }
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/* PAN(HS) only */
+void halbtc8192e2ant_action_pan_hs(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ }
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/* PAN(EDR)+A2DP */
+void halbtc8192e2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state=BTC_RSSI_STATE_HIGH;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 3);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, false,
+ 3);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, false,
+ 3);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8192e2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/* HID+A2DP+PAN(EDR) */
+void halbtc8192e2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 3);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8192e2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
+{
+ u8 algorithm = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism()===>\n");
+
+ if (btcoexist->manual_control) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], return for Manual CTRL <===\n");
+ return;
+ }
+
+ if (coex_sta->under_ips) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], wifi is under IPS !!!\n");
+ return;
+ }
+
+ algorithm = halbtc8192e2ant_action_algorithm(btcoexist);
+ if (coex_sta->c2h_bt_inquiry_page &&
+ (BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
+ halbtc8192e2ant_action_bt_inquiry(btcoexist);
+ return;
+ }
+
+ coex_dm->cur_algorithm = algorithm;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Algorithm = %d \n", coex_dm->cur_algorithm);
+
+ if (halbtc8192e2ant_is_common_action(btcoexist)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant common.\n");
+ coex_dm->auto_tdma_adjust = false;
+ } else {
+ if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
+ coex_dm->pre_algorithm,
+ coex_dm->cur_algorithm);
+ coex_dm->auto_tdma_adjust = false;
+ }
+ switch (coex_dm->cur_algorithm) {
+ case BT_8192E_2ANT_COEX_ALGO_SCO:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = SCO.\n");
+ halbtc8192e2ant_action_sco(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_SCO_PAN:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = SCO+PAN(EDR).\n");
+ halbtc8192e2ant_action_sco_pan(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = HID.\n");
+ halbtc8192e2ant_action_hid(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = A2DP.\n");
+ halbtc8192e2ant_action_a2dp(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = A2DP+PAN(HS).\n");
+ halbtc8192e2ant_action_a2dp_pan_hs(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = PAN(EDR).\n");
+ halbtc8192e2ant_action_pan_edr(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = HS mode.\n");
+ halbtc8192e2ant_action_pan_hs(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = PAN+A2DP.\n");
+ halbtc8192e2ant_action_pan_edr_a2dp(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = PAN(EDR)+HID.\n");
+ halbtc8192e2ant_action_pan_edr_hid(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = HID+A2DP+PAN.\n");
+ halbtc8192e2ant_action_hid_a2dp_pan_edr(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_HID_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = HID+A2DP.\n");
+ halbtc8192e2ant_action_hid_a2dp(btcoexist);
+ break;
+ default:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = unknown!!\n");
+ /* halbtc8192e2ant_coex_alloff(btcoexist); */
+ break;
+ }
+ coex_dm->pre_algorithm = coex_dm->cur_algorithm;
+ }
+}
+
+void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist, bool backup)
+{
+ u16 u16tmp = 0;
+ u8 u8tmp = 0;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], 2Ant Init HW Config!!\n");
+
+ if (backup) {
+ /* backup rf 0x1e value */
+ coex_dm->bt_rf0x1e_backup =
+ btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A,
+ 0x1e, 0xfffff);
+
+ coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist,
+ 0x430);
+ coex_dm->backup_arfr_cnt2 = btcoexist->btc_read_4byte(btcoexist,
+ 0x434);
+ coex_dm->backup_retrylimit = btcoexist->btc_read_2byte(
+ btcoexist,
+ 0x42a);
+ coex_dm->backup_ampdu_maxtime = btcoexist->btc_read_1byte(
+ btcoexist,
+ 0x456);
+ }
+
+ /* antenna sw ctrl to bt */
+ btcoexist->btc_write_1byte(btcoexist, 0x4f, 0x6);
+ btcoexist->btc_write_1byte(btcoexist, 0x944, 0x24);
+ btcoexist->btc_write_4byte(btcoexist, 0x930, 0x700700);
+ btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20);
+ if (btcoexist->chip_interface == BTC_INTF_USB)
+ btcoexist->btc_write_4byte(btcoexist, 0x64, 0x30430004);
+ else
+ btcoexist->btc_write_4byte(btcoexist, 0x64, 0x30030004);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+
+ /* antenna switch control parameter */
+ btcoexist->btc_write_4byte(btcoexist, 0x858, 0x55555555);
+
+ /* coex parameters */
+ btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3);
+ /* 0x790[5:0]=0x5 */
+ u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
+ u8tmp &= 0xc0;
+ u8tmp |= 0x5;
+ btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
+
+ /* enable counter statistics */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
+
+ /* enable PTA */
+ btcoexist->btc_write_1byte(btcoexist, 0x40, 0x20);
+ /* enable mailbox interface */
+ u16tmp = btcoexist->btc_read_2byte(btcoexist, 0x40);
+ u16tmp |= BIT9;
+ btcoexist->btc_write_2byte(btcoexist, 0x40, u16tmp);
+
+ /* enable PTA I2C mailbox */
+ u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x101);
+ u8tmp |= BIT4;
+ btcoexist->btc_write_1byte(btcoexist, 0x101, u8tmp);
+
+ /* enable bt clock when wifi is disabled. */
+ u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x93);
+ u8tmp |= BIT0;
+ btcoexist->btc_write_1byte(btcoexist, 0x93, u8tmp);
+ /* enable bt clock when suspend. */
+ u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x7);
+ u8tmp |= BIT0;
+ btcoexist->btc_write_1byte(btcoexist, 0x7, u8tmp);
+}
+
+/*************************************************************
+ * work around function start with wa_halbtc8192e2ant_
+ *************************************************************/
+
+/************************************************************
+ * extern function start with EXhalbtc8192e2ant_
+ ************************************************************/
+
+void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist)
+{
+ halbtc8192e2ant_init_hwconfig(btcoexist, true);
+}
+
+void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], Coex Mechanism Init!!\n");
+ halbtc8192e2ant_init_coex_dm(btcoexist);
+}
+
+void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ struct btc_stack_info*stack_info = &btcoexist->stack_info;
+ u8 *cli_buf = btcoexist->cli_buf;
+ u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0;
+ u16 u16tmp[4];
+ u32 u32tmp[4];
+ bool roam = false, scan = false, link = false, wifi_under_5g = false;
+ bool bt_hson = false, wifi_busy = false;
+ int wifirssi = 0, bt_hs_rssi = 0;
+ u32 wifi_bw, wifi_traffic_dir;
+ u8 wifi_dot11_chnl, wifi_hs_chnl;
+ u32 fw_ver = 0, bt_patch_ver = 0;
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ============[BT Coexist info]============");
+ CL_PRINTF(cli_buf);
+
+ if (btcoexist->manual_control) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ===========[Under Manual Control]===========");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ==========================================");
+ CL_PRINTF(cli_buf);
+ }
+
+ if (!board_info->bt_exist) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+ CL_PRINTF(cli_buf);
+ return;
+ }
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
+ board_info->pg_ant_num, board_info->btdm_ant_num);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d",
+ "BT stack/ hci ext ver",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ FwVer/ PatchVer",
+ glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
+ &wifi_dot11_chnl);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsMode(HsChnl)",
+ wifi_dot11_chnl, bt_hson, wifi_hs_chnl);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ",
+ "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info[0],
+ coex_dm->wifi_chnl_info[1], coex_dm->wifi_chnl_info[2]);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi);
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "Wifi rssi/ HS rssi", wifirssi, bt_hs_rssi);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+ "Wifi link/ roam/ scan", link, roam, scan);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+ &wifi_traffic_dir);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ",
+ "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+ ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
+ (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
+ ((!wifi_busy) ? "idle" :
+ ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ?
+ "uplink" : "downlink")));
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ",
+ "BT [status/ rssi/ retryCnt]",
+ ((btcoexist->bt_info.bt_disabled) ? ("disabled") :
+ ((coex_sta->c2h_bt_inquiry_page) ?
+ ("inquiry/page scan") :
+ ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+ coex_dm->bt_status) ? "non-connected idle" :
+ ((BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE ==
+ coex_dm->bt_status) ? "connected-idle" : "busy")))),
+ coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d",
+ "SCO/HID/PAN/A2DP", stack_info->sco_exist,
+ stack_info->hid_exist, stack_info->pan_exist,
+ stack_info->a2dp_exist);
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s",
+ "BT Info A2DP rate",
+ (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
+ CL_PRINTF(cli_buf);
+
+ for (i=0; i<BT_INFO_SRC_8192E_2ANT_MAX; i++) {
+ if (coex_sta->bt_info_c2h_cnt[i]) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x %02x ",
+ GLBtInfoSrc8192e2Ant[i],
+ coex_sta->bt_info_c2h[i][0],
+ coex_sta->bt_info_c2h[i][1],
+ coex_sta->bt_info_c2h[i][2],
+ coex_sta->bt_info_c2h[i][3]);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "%02x %02x %02x(%d)",
+ coex_sta->bt_info_c2h[i][4],
+ coex_sta->bt_info_c2h[i][5],
+ coex_sta->bt_info_c2h[i][6],
+ coex_sta->bt_info_c2h_cnt[i]);
+ CL_PRINTF(cli_buf);
+ }
+ }
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s",
+ "PS state, IPS/LPS",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "SS Type",
+ coex_dm->cur_sstype);
+ CL_PRINTF(cli_buf);
+
+ /* Sw mechanism */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Sw mechanism]============");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+ "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+ coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
+ "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+ coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+ coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "Rate Mask",
+ btcoexist->bt_info.ra_mask);
+ CL_PRINTF(cli_buf);
+
+ /* Fw mechanism */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Fw mechanism]============");
+ CL_PRINTF(cli_buf);
+
+ ps_tdma_case = coex_dm->cur_ps_tdma;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)",
+ "PS TDMA", coex_dm->ps_tdma_para[0],
+ coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2],
+ coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4],
+ ps_tdma_case, coex_dm->auto_tdma_adjust);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
+ "DecBtPwr/ IgnWlanAct",
+ coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act);
+ CL_PRINTF(cli_buf);
+
+ /* Hw setting */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Hw setting]============");
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x",
+ "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
+ coex_dm->backup_arfr_cnt2, coex_dm->backup_retrylimit,
+ coex_dm->backup_ampdu_maxtime);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
+ u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "0x430/0x434/0x42a/0x456",
+ u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc04);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xd04);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x90c);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]);
+ CL_PRINTF(cli_buf);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x778",
+ u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x92c);
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x930);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x4f);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0x40/ 0x4f", u8tmp[0], u8tmp[1]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)",
+ u32tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+ u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "0x770(hp rx[31:16]/tx[15:0])",
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "0x774(lp rx[31:16]/tx[15:0])",
+ coex_sta->low_priority_rx, coex_sta->low_priority_tx);
+ CL_PRINTF(cli_buf);
+#if(BT_AUTO_REPORT_ONLY_8192E_2ANT == 1)
+ halbtc8192e2ant_monitor_bt_ctr(btcoexist);
+#endif
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_IPS_ENTER == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], IPS ENTER notify\n");
+ coex_sta->under_ips = true;
+ halbtc8192e2ant_coex_alloff(btcoexist);
+ } else if (BTC_IPS_LEAVE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], IPS LEAVE notify\n");
+ coex_sta->under_ips = false;
+ }
+}
+
+void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_LPS_ENABLE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], LPS ENABLE notify\n");
+ coex_sta->under_lps = true;
+ } else if (BTC_LPS_DISABLE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], LPS DISABLE notify\n");
+ coex_sta->under_lps = false;
+ }
+}
+
+void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_SCAN_START == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], SCAN START notify\n");
+ else if(BTC_SCAN_FINISH == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], SCAN FINISH notify\n");
+}
+
+void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_ASSOCIATE_START == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], CONNECT START notify\n");
+ else if(BTC_ASSOCIATE_FINISH == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], CONNECT FINISH notify\n");
+}
+
+void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ u8 h2c_parameter[3] ={0};
+ u32 wifi_bw;
+ u8 wifi_center_chnl;
+
+ if (btcoexist->manual_control ||
+ btcoexist->stop_coex_dm ||
+ btcoexist->bt_info.bt_disabled)
+ return;
+
+ if (BTC_MEDIA_CONNECT == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], MEDIA connect notify\n");
+ else
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], MEDIA disconnect notify\n");
+
+ /* only 2.4G we need to inform bt the chnl mask */
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
+ &wifi_center_chnl);
+ if ((BTC_MEDIA_CONNECT == type) &&
+ (wifi_center_chnl <= 14)) {
+ h2c_parameter[0] = 0x1;
+ h2c_parameter[1] = wifi_center_chnl;
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw)
+ h2c_parameter[2] = 0x30;
+ else
+ h2c_parameter[2] = 0x20;
+ }
+
+ coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
+ coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
+ coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x66=0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
+}
+
+void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ if (type == BTC_PACKET_DHCP)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], DHCP Packet notify\n");
+ }
+
+void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmp_buf, u8 length )
+{
+ u8 bt_info = 0;
+ u8 i, rspSource = 0;
+ bool bt_busy = false, limited_dig = false;
+ bool wifi_connected = false;
+
+ coex_sta->c2h_bt_info_req_sent = false;
+
+ rspSource = tmp_buf[0] & 0xf;
+ if (rspSource >= BT_INFO_SRC_8192E_2ANT_MAX)
+ rspSource = BT_INFO_SRC_8192E_2ANT_WIFI_FW;
+ coex_sta->bt_info_c2h_cnt[rspSource]++;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], Bt info[%d], length=%d, hex data=[",
+ rspSource, length);
+ for (i = 0; i < length; i++) {
+ coex_sta->bt_info_c2h[rspSource][i] = tmp_buf[i];
+ if (i == 1)
+ bt_info = tmp_buf[i];
+ if (i == length-1)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "0x%02x]\n", tmp_buf[i]);
+ else
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "0x%02x, ", tmp_buf[i]);
+ }
+
+ if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rspSource) {
+ coex_sta->bt_retry_cnt = /* [3:0] */
+ coex_sta->bt_info_c2h[rspSource][2] & 0xf;
+
+ coex_sta->bt_rssi =
+ coex_sta->bt_info_c2h[rspSource][3] * 2 + 10;
+
+ coex_sta->bt_info_ext =
+ coex_sta->bt_info_c2h[rspSource][4];
+
+ /* Here we need to resend some wifi info to BT
+ * because bt is reset and loss of the info. */
+ if ((coex_sta->bt_info_ext & BIT1)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "bit1, send wifi BW&Chnl to BT!!\n");
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ if (wifi_connected)
+ ex_halbtc8192e2ant_media_status_notify(
+ btcoexist,
+ BTC_MEDIA_CONNECT);
+ else
+ ex_halbtc8192e2ant_media_status_notify(
+ btcoexist,
+ BTC_MEDIA_DISCONNECT);
+ }
+
+ if ((coex_sta->bt_info_ext & BIT3)) {
+ if (!btcoexist->manual_control &&
+ !btcoexist->stop_coex_dm) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "bit3, BT NOT ignore Wlan active!\n");
+ halbtc8192e2ant_IgnoreWlanAct(btcoexist,
+ FORCE_EXEC,
+ false);
+ }
+ } else {
+ /* BT already NOT ignore Wlan active,
+ * do nothing here. */
+ }
+
+#if(BT_AUTO_REPORT_ONLY_8192E_2ANT == 0)
+ if ((coex_sta->bt_info_ext & BIT4)) {
+ /* BT auto report already enabled, do nothing */
+ } else {
+ halbtc8192e2ant_bt_autoreport(btcoexist, FORCE_EXEC,
+ true);
+ }
+#endif
+ }
+
+ /* check BIT2 first ==> check if bt is under inquiry or page scan */
+ if(bt_info & BT_INFO_8192E_2ANT_B_INQ_PAGE)
+ coex_sta->c2h_bt_inquiry_page = true;
+ else
+ coex_sta->c2h_bt_inquiry_page = false;
+
+ /* set link exist status */
+ if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) {
+ coex_sta->bt_link_exist = false;
+ coex_sta->pan_exist = false;
+ coex_sta->a2dp_exist = false;
+ coex_sta->hid_exist = false;
+ coex_sta->sco_exist = false;
+ } else {/* connection exists */
+ coex_sta->bt_link_exist = true;
+ if (bt_info & BT_INFO_8192E_2ANT_B_FTP)
+ coex_sta->pan_exist = true;
+ else
+ coex_sta->pan_exist = false;
+ if (bt_info & BT_INFO_8192E_2ANT_B_A2DP)
+ coex_sta->a2dp_exist = true;
+ else
+ coex_sta->a2dp_exist = false;
+ if (bt_info & BT_INFO_8192E_2ANT_B_HID)
+ coex_sta->hid_exist = true;
+ else
+ coex_sta->hid_exist = false;
+ if (bt_info & BT_INFO_8192E_2ANT_B_SCO_ESCO)
+ coex_sta->sco_exist = true;
+ else
+ coex_sta->sco_exist = false;
+ }
+
+ halbtc8192e2ant_update_btlink_info(btcoexist);
+
+ if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) {
+ coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Non-Connected idle!!!\n");
+ } else if (bt_info == BT_INFO_8192E_2ANT_B_CONNECTION) {
+ coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
+ } else if ((bt_info&BT_INFO_8192E_2ANT_B_SCO_ESCO) ||
+ (bt_info&BT_INFO_8192E_2ANT_B_SCO_BUSY)) {
+ coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
+ } else if (bt_info&BT_INFO_8192E_2ANT_B_ACL_BUSY) {
+ coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
+ } else {
+ coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_MAX;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
+ }
+
+ if ((BT_8192E_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
+ (BT_8192E_2ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8192E_2ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) {
+ bt_busy = true;
+ limited_dig = true;
+ } else {
+ bt_busy = false;
+ limited_dig = false;
+ }
+
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
+
+ coex_dm->limited_dig = limited_dig;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+
+ halbtc8192e2ant_run_coexist_mechanism(btcoexist);
+}
+
+void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ if (BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex] StackOP Inquiry/page/pair start notify\n");
+ else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex] StackOP Inquiry/page/pair finish notify\n");
+}
+
+void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+
+ halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true);
+ ex_halbtc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
+}
+
+void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist)
+{
+ static u8 dis_ver_info_cnt = 0;
+ u32 fw_ver = 0, bt_patch_ver = 0;
+ struct btc_board_info *board_info=&btcoexist->board_info;
+ struct btc_stack_info *stack_info=&btcoexist->stack_info;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "=======================Periodical=======================\n");
+ if (dis_ver_info_cnt <= 5) {
+ dis_ver_info_cnt += 1;
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "************************************************\n");
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num, board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "BT stack/ hci ext ver = %s / %d\n",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
+ &bt_patch_ver);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "************************************************\n");
+ }
+
+#if(BT_AUTO_REPORT_ONLY_8192E_2ANT == 0)
+ halbtc8192e2ant_querybt_info(btcoexist);
+ halbtc8192e2ant_monitor_bt_ctr(btcoexist);
+ halbtc8192e2ant_monitor_bt_enable_disable(btcoexist);
+#else
+ if (halbtc8192e2ant_iswifi_status_changed(btcoexist) ||
+ coex_dm->auto_tdma_adjust)
+ halbtc8192e2ant_run_coexist_mechanism(btcoexist);
+#endif
+}
+
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.h b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.h
new file mode 100644
index 000000000000..6d109edb8950
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.h
@@ -0,0 +1,162 @@
+/*****************************************************************
+ * The following is for 8192E 2Ant BT Co-exist definition
+ *****************************************************************/
+#define BT_AUTO_REPORT_ONLY_8192E_2ANT 0
+
+#define BT_INFO_8192E_2ANT_B_FTP BIT7
+#define BT_INFO_8192E_2ANT_B_A2DP BIT6
+#define BT_INFO_8192E_2ANT_B_HID BIT5
+#define BT_INFO_8192E_2ANT_B_SCO_BUSY BIT4
+#define BT_INFO_8192E_2ANT_B_ACL_BUSY BIT3
+#define BT_INFO_8192E_2ANT_B_INQ_PAGE BIT2
+#define BT_INFO_8192E_2ANT_B_SCO_ESCO BIT1
+#define BT_INFO_8192E_2ANT_B_CONNECTION BIT0
+
+#define BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT 2
+
+enum bt_info_src_8192e_2ant{
+ BT_INFO_SRC_8192E_2ANT_WIFI_FW = 0x0,
+ BT_INFO_SRC_8192E_2ANT_BT_RSP = 0x1,
+ BT_INFO_SRC_8192E_2ANT_BT_ACTIVE_SEND = 0x2,
+ BT_INFO_SRC_8192E_2ANT_MAX
+};
+
+enum bt_8192e_2ant_bt_status{
+ BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8192E_2ANT_BT_STATUS_INQ_PAGE = 0x2,
+ BT_8192E_2ANT_BT_STATUS_ACL_BUSY = 0x3,
+ BT_8192E_2ANT_BT_STATUS_SCO_BUSY = 0x4,
+ BT_8192E_2ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
+ BT_8192E_2ANT_BT_STATUS_MAX
+};
+
+enum bt_8192e_2ant_coex_algo{
+ BT_8192E_2ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_8192E_2ANT_COEX_ALGO_SCO = 0x1,
+ BT_8192E_2ANT_COEX_ALGO_SCO_PAN = 0x2,
+ BT_8192E_2ANT_COEX_ALGO_HID = 0x3,
+ BT_8192E_2ANT_COEX_ALGO_A2DP = 0x4,
+ BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS = 0x5,
+ BT_8192E_2ANT_COEX_ALGO_PANEDR = 0x6,
+ BT_8192E_2ANT_COEX_ALGO_PANHS = 0x7,
+ BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP = 0x8,
+ BT_8192E_2ANT_COEX_ALGO_PANEDR_HID = 0x9,
+ BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0xa,
+ BT_8192E_2ANT_COEX_ALGO_HID_A2DP = 0xb,
+ BT_8192E_2ANT_COEX_ALGO_MAX = 0xc
+};
+
+struct coex_dm_8192e_2ant{
+ /* fw mechanism */
+ u8 pre_dec_bt_pwr;
+ u8 cur_dec_bt_pwr;
+ u8 pre_fw_dac_swing_lvl;
+ u8 cur_fw_dac_swing_lvl;
+ bool cur_ignore_wlan_act;
+ bool pre_ignore_wlan_act;
+ u8 pre_ps_tdma;
+ u8 cur_ps_tdma;
+ u8 ps_tdma_para[5];
+ u8 ps_tdma_du_adj_type;
+ bool reset_tdma_adjust;
+ bool auto_tdma_adjust;
+ bool pre_ps_tdma_on;
+ bool cur_ps_tdma_on;
+ bool pre_bt_auto_report;
+ bool cur_bt_auto_report;
+
+ /* sw mechanism */
+ bool pre_rf_rx_lpf_shrink;
+ bool cur_rf_rx_lpf_shrink;
+ u32 bt_rf0x1e_backup;
+ bool pre_low_penalty_ra;
+ bool cur_low_penalty_ra;
+ bool pre_dac_swing_on;
+ u32 pre_dac_swing_lvl;
+ bool cur_dac_swing_on;
+ u32 cur_dac_swing_lvl;
+ bool pre_adc_back_off;
+ bool cur_adc_back_off;
+ bool pre_agc_table_en;
+ bool cur_agc_table_en;
+ u32 pre_val0x6c0;
+ u32 cur_val0x6c0;
+ u32 pre_val0x6c4;
+ u32 cur_val0x6c4;
+ u32 pre_val0x6c8;
+ u32 cur_val0x6c8;
+ u8 pre_val0x6cc;
+ u8 cur_val0x6cc;
+ bool limited_dig;
+
+ u32 backup_arfr_cnt1; /* Auto Rate Fallback Retry cnt */
+ u32 backup_arfr_cnt2; /* Auto Rate Fallback Retry cnt */
+ u16 backup_retrylimit;
+ u8 backup_ampdu_maxtime;
+
+ /* algorithm related */
+ u8 pre_algorithm;
+ u8 cur_algorithm;
+ u8 bt_status;
+ u8 wifi_chnl_info[3];
+
+ u8 pre_sstype;
+ u8 cur_sstype;
+
+ u32 prera_mask;
+ u32 curra_mask;
+ u8 curra_masktype;
+ u8 pre_arfrtype;
+ u8 cur_arfrtype;
+ u8 pre_retrylimit_type;
+ u8 cur_retrylimit_type;
+ u8 pre_ampdutime_type;
+ u8 cur_ampdutime_type;
+};
+
+struct coex_sta_8192e_2ant{
+ bool bt_link_exist;
+ bool sco_exist;
+ bool a2dp_exist;
+ bool hid_exist;
+ bool pan_exist;
+
+ bool under_lps;
+ bool under_ips;
+ u32 high_priority_tx;
+ u32 high_priority_rx;
+ u32 low_priority_tx;
+ u32 low_priority_rx;
+ u8 bt_rssi;
+ u8 pre_bt_rssi_state;
+ u8 pre_wifi_rssi_state[4];
+ bool c2h_bt_info_req_sent;
+ u8 bt_info_c2h[BT_INFO_SRC_8192E_2ANT_MAX][10];
+ u32 bt_info_c2h_cnt[BT_INFO_SRC_8192E_2ANT_MAX];
+ bool c2h_bt_inquiry_page;
+ u8 bt_retry_cnt;
+ u8 bt_info_ext;
+};
+
+/****************************************************************
+ * The following is interface which will notify coex module.
+ ****************************************************************/
+void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist);
+void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist);
+void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmpBuf,u8 length);
+void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist);
+void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist);
+void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist);
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.c
new file mode 100644
index 000000000000..180d6f12e7b5
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.c
@@ -0,0 +1,3780 @@
+//============================================================
+// Description:
+//
+// This file is for RTL8723A Co-exist mechanism
+//
+// History
+// 2012/08/22 Cosa first check in.
+// 2012/11/14 Cosa Revise for 8723A 2Ant out sourcing.
+//
+//============================================================
+
+//============================================================
+// include files
+//============================================================
+#include "Mp_Precomp.h"
+#if(BT_30_SUPPORT == 1)
+//============================================================
+// Global variables, these are static variables
+//============================================================
+static COEX_DM_8723A_2ANT GLCoexDm8723a2Ant;
+static PCOEX_DM_8723A_2ANT pCoexDm=&GLCoexDm8723a2Ant;
+static COEX_STA_8723A_2ANT GLCoexSta8723a2Ant;
+static PCOEX_STA_8723A_2ANT pCoexSta=&GLCoexSta8723a2Ant;
+
+const char *const GLBtInfoSrc8723a2Ant[]={
+ "BT Info[wifi fw]",
+ "BT Info[bt rsp]",
+ "BT Info[bt auto report]",
+};
+
+//============================================================
+// local function proto type if needed
+//============================================================
+//============================================================
+// local function start with halbtc8723a2ant_
+//============================================================
+BOOLEAN
+halbtc8723a2ant_IsWifiIdle(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BOOLEAN bWifiConnected=FALSE, bScan=FALSE, bLink=FALSE, bRoam=FALSE;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+
+ if(bWifiConnected)
+ return FALSE;
+ if(bScan)
+ return FALSE;
+ if(bLink)
+ return FALSE;
+ if(bRoam)
+ return FALSE;
+
+ return true;
+}
+
+u1Byte
+halbtc8723a2ant_BtRssiState(
+ u1Byte levelNum,
+ u1Byte rssiThresh,
+ u1Byte rssiThresh1
+ )
+{
+ s4Byte btRssi=0;
+ u1Byte btRssiState=pCoexSta->preBtRssiState;
+
+ btRssi = pCoexSta->btRssi;
+
+ if(levelNum == 2)
+ {
+ if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT))
+ {
+ btRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+ }
+ }
+ else
+ {
+ if(btRssi < rssiThresh)
+ {
+ btRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+ }
+ }
+ }
+ else if(levelNum == 3)
+ {
+ if(rssiThresh > rssiThresh1)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi thresh error!!\n"));
+ return pCoexSta->preBtRssiState;
+ }
+
+ if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT))
+ {
+ btRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+ }
+ }
+ else if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_MEDIUM) ||
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_MEDIUM))
+ {
+ if(btRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT))
+ {
+ btRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+ }
+ else if(btRssi < rssiThresh)
+ {
+ btRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Medium\n"));
+ }
+ }
+ else
+ {
+ if(btRssi < rssiThresh1)
+ {
+ btRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+ }
+ }
+ }
+
+ pCoexSta->preBtRssiState = btRssiState;
+
+ return btRssiState;
+}
+
+u1Byte
+halbtc8723a2ant_WifiRssiState(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte index,
+ IN u1Byte levelNum,
+ IN u1Byte rssiThresh,
+ IN u1Byte rssiThresh1
+ )
+{
+ s4Byte wifiRssi=0;
+ u1Byte wifiRssiState=pCoexSta->preWifiRssiState[index];
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+
+ if(levelNum == 2)
+ {
+ if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(wifiRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT))
+ {
+ wifiRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+ }
+ }
+ else
+ {
+ if(wifiRssi < rssiThresh)
+ {
+ wifiRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+ }
+ }
+ }
+ else if(levelNum == 3)
+ {
+ if(rssiThresh > rssiThresh1)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI thresh error!!\n"));
+ return pCoexSta->preWifiRssiState[index];
+ }
+
+ if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(wifiRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT))
+ {
+ wifiRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+ }
+ }
+ else if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_MEDIUM) ||
+ (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_MEDIUM))
+ {
+ if(wifiRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT))
+ {
+ wifiRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+ }
+ else if(wifiRssi < rssiThresh)
+ {
+ wifiRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Medium\n"));
+ }
+ }
+ else
+ {
+ if(wifiRssi < rssiThresh1)
+ {
+ wifiRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+ }
+ }
+ }
+
+ pCoexSta->preWifiRssiState[index] = wifiRssiState;
+
+ return wifiRssiState;
+}
+
+VOID
+halbtc8723a2ant_IndicateWifiChnlBwInfo(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ u1Byte H2C_Parameter[3] ={0};
+ u4Byte wifiBw;
+ u1Byte wifiCentralChnl;
+
+ // only 2.4G we need to inform bt the chnl mask
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifiCentralChnl);
+ if( (BTC_MEDIA_CONNECT == type) &&
+ (wifiCentralChnl <= 14) )
+ {
+ H2C_Parameter[0] = 0x1;
+ H2C_Parameter[1] = wifiCentralChnl;
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ H2C_Parameter[2] = 0x30;
+ else
+ H2C_Parameter[2] = 0x20;
+ }
+
+ pCoexDm->wifiChnlInfo[0] = H2C_Parameter[0];
+ pCoexDm->wifiChnlInfo[1] = H2C_Parameter[1];
+ pCoexDm->wifiChnlInfo[2] = H2C_Parameter[2];
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x19=0x%x\n",
+ H2C_Parameter[0]<<16|H2C_Parameter[1]<<8|H2C_Parameter[2]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x19, 3, H2C_Parameter);
+}
+
+VOID
+halbtc8723a2ant_QueryBtInfo(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ pCoexSta->bC2hBtInfoReqSent = true;
+
+ H2C_Parameter[0] |= BIT0; // trigger
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Query Bt Info, FW write 0x38=0x%x\n",
+ H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x38, 1, H2C_Parameter);
+}
+u1Byte
+halbtc8723a2ant_ActionAlgorithm(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info;
+ BOOLEAN bBtHsOn=FALSE, bBtBusy=FALSE, limited_dig=FALSE;
+ u1Byte algorithm=BT_8723A_2ANT_COEX_ALGO_UNDEFINED;
+ u1Byte numOfDiffProfile=0;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+
+ //======================
+ // here we get BT status first
+ //======================
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_IDLE;
+
+ if((pStackInfo->bScoExist) ||(bBtHsOn) ||(pStackInfo->bHidExist))
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO or HID or HS exists, set BT non-idle !!!\n"));
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_NON_IDLE;
+ }
+ else
+ {
+ // A2dp profile
+ if( (pBtCoexist->stack_info.numOfLink == 1) &&
+ (pStackInfo->bA2dpExist) )
+ {
+ if( (pCoexSta->lowPriorityTx+ pCoexSta->lowPriorityRx) < 100)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP, low priority tx+rx < 100, set BT connected-idle!!!\n"));
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP, low priority tx+rx >= 100, set BT non-idle!!!\n"));
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_NON_IDLE;
+ }
+ }
+ // Pan profile
+ if( (pBtCoexist->stack_info.numOfLink == 1) &&
+ (pStackInfo->bPanExist) )
+ {
+ if((pCoexSta->lowPriorityTx+ pCoexSta->lowPriorityRx) < 600)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN, low priority tx+rx < 600, set BT connected-idle!!!\n"));
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE;
+ }
+ else
+ {
+ if(pCoexSta->lowPriorityTx)
+ {
+ if((pCoexSta->lowPriorityRx /pCoexSta->lowPriorityTx)>9 )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN, low priority rx/tx > 9, set BT connected-idle!!!\n"));
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE;
+ }
+ }
+ }
+ if(BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE != pCoexDm->btStatus)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN, set BT non-idle!!!\n"));
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_NON_IDLE;
+ }
+ }
+ // Pan+A2dp profile
+ if( (pBtCoexist->stack_info.numOfLink == 2) &&
+ (pStackInfo->bA2dpExist) &&
+ (pStackInfo->bPanExist) )
+ {
+ if((pCoexSta->lowPriorityTx+ pCoexSta->lowPriorityRx) < 600)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN+A2DP, low priority tx+rx < 600, set BT connected-idle!!!\n"));
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE;
+ }
+ else
+ {
+ if(pCoexSta->lowPriorityTx)
+ {
+ if((pCoexSta->lowPriorityRx /pCoexSta->lowPriorityTx)>9 )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN+A2DP, low priority rx/tx > 9, set BT connected-idle!!!\n"));
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE;
+ }
+ }
+ }
+ if(BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE != pCoexDm->btStatus)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN+A2DP, set BT non-idle!!!\n"));
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_NON_IDLE;
+ }
+ }
+ }
+ if(BT_8723A_2ANT_BT_STATUS_IDLE != pCoexDm->btStatus)
+ {
+ bBtBusy = true;
+ limited_dig = true;
+ }
+ else
+ {
+ bBtBusy = FALSE;
+ limited_dig = FALSE;
+ }
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bBtBusy);
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+ //======================
+
+ if(!pStackInfo->bBtLinkExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], No profile exists!!!\n"));
+ return algorithm;
+ }
+
+ if(pStackInfo->bScoExist)
+ numOfDiffProfile++;
+ if(pStackInfo->bHidExist)
+ numOfDiffProfile++;
+ if(pStackInfo->bPanExist)
+ numOfDiffProfile++;
+ if(pStackInfo->bA2dpExist)
+ numOfDiffProfile++;
+
+ if(numOfDiffProfile == 1)
+ {
+ if(pStackInfo->bScoExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO only\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_SCO;
+ }
+ else
+ {
+ if(pStackInfo->bHidExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID only\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_HID;
+ }
+ else if(pStackInfo->bA2dpExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP only\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_A2DP;
+ }
+ else if(pStackInfo->bPanExist)
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(HS) only\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_PANHS;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(EDR) only\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR;
+ }
+ }
+ }
+ }
+ else if(numOfDiffProfile == 2)
+ {
+ if(pStackInfo->bScoExist)
+ {
+ if(pStackInfo->bHidExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_HID;
+ }
+ else if(pStackInfo->bA2dpExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP ==> SCO\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_SCO;
+ }
+ else if(pStackInfo->bPanExist)
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(HS)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_SCO;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(EDR)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ else
+ {
+ if( pStackInfo->bHidExist &&
+ pStackInfo->bA2dpExist )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP;
+ }
+ else if( pStackInfo->bHidExist &&
+ pStackInfo->bPanExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(HS)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(EDR)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ else if( pStackInfo->bPanExist &&
+ pStackInfo->bA2dpExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(HS)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ }
+ }
+ }
+ else if(numOfDiffProfile == 3)
+ {
+ if(pStackInfo->bScoExist)
+ {
+ if( pStackInfo->bHidExist &&
+ pStackInfo->bA2dpExist )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP ==> HID\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_HID;
+ }
+ else if( pStackInfo->bHidExist &&
+ pStackInfo->bPanExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(HS)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(EDR)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ else if( pStackInfo->bPanExist &&
+ pStackInfo->bA2dpExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(HS)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_SCO;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ else
+ {
+ if( pStackInfo->bHidExist &&
+ pStackInfo->bPanExist &&
+ pStackInfo->bA2dpExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ }
+ }
+ }
+ else if(numOfDiffProfile >= 3)
+ {
+ if(pStackInfo->bScoExist)
+ {
+ if( pStackInfo->bHidExist &&
+ pStackInfo->bPanExist &&
+ pStackInfo->bA2dpExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"));
+
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ }
+
+ return algorithm;
+}
+
+BOOLEAN
+halbtc8723a2ant_NeedToDecBtPwr(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BOOLEAN bRet=FALSE;
+ BOOLEAN bBtHsOn=FALSE, bWifiConnected=FALSE;
+ s4Byte btHsRssi=0;
+
+ if(!pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn))
+ return FALSE;
+ if(!pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected))
+ return FALSE;
+ if(!pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_HS_RSSI, &btHsRssi))
+ return FALSE;
+
+ if(bWifiConnected)
+ {
+ if(bBtHsOn)
+ {
+ if(btHsRssi > 37)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], Need to decrease bt power for HS mode!!\n"));
+ bRet = true;
+ }
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], Need to decrease bt power for Wifi is connected!!\n"));
+ bRet = true;
+ }
+ }
+
+ return bRet;
+}
+
+VOID
+halbtc8723a2ant_SetFwDacSwingLevel(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte dacSwingLvl
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ // There are several type of dacswing
+ // 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6
+ H2C_Parameter[0] = dacSwingLvl;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Set Dac Swing Level=0x%x\n", dacSwingLvl));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x29=0x%x\n", H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x29, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8723a2ant_SetFwDecBtPwr(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bDecBtPwr
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ H2C_Parameter[0] = 0;
+
+ if(bDecBtPwr)
+ {
+ H2C_Parameter[0] |= BIT1;
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], decrease Bt Power : %s, FW write 0x21=0x%x\n",
+ (bDecBtPwr? "Yes!!":"No!!"), H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x21, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8723a2ant_DecBtPwr(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bDecBtPwr
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s Dec BT power = %s\n",
+ (bForceExec? "force to":""), ((bDecBtPwr)? "ON":"OFF")));
+ pCoexDm->bCurDecBtPwr = bDecBtPwr;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
+ pCoexDm->bPreDecBtPwr, pCoexDm->bCurDecBtPwr));
+
+ if(pCoexDm->bPreDecBtPwr == pCoexDm->bCurDecBtPwr)
+ return;
+ }
+ halbtc8723a2ant_SetFwDecBtPwr(pBtCoexist, pCoexDm->bCurDecBtPwr);
+
+ pCoexDm->bPreDecBtPwr = pCoexDm->bCurDecBtPwr;
+}
+
+VOID
+halbtc8723a2ant_FwDacSwingLvl(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u1Byte fwDacSwingLvl
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set FW Dac Swing level = %d\n",
+ (bForceExec? "force to":""), fwDacSwingLvl));
+ pCoexDm->curFwDacSwingLvl = fwDacSwingLvl;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+ pCoexDm->preFwDacSwingLvl, pCoexDm->curFwDacSwingLvl));
+
+ if(pCoexDm->preFwDacSwingLvl == pCoexDm->curFwDacSwingLvl)
+ return;
+ }
+
+ halbtc8723a2ant_SetFwDacSwingLevel(pBtCoexist, pCoexDm->curFwDacSwingLvl);
+
+ pCoexDm->preFwDacSwingLvl = pCoexDm->curFwDacSwingLvl;
+}
+
+VOID
+halbtc8723a2ant_SetSwRfRxLpfCorner(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bRxRfShrinkOn
+ )
+{
+ if(bRxRfShrinkOn)
+ {
+ //Shrink RF Rx LPF corner
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Shrink RF Rx LPF corner!!\n"));
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, 0xf0ff7);
+ }
+ else
+ {
+ //Resume RF Rx LPF corner
+ // After initialized, we can use pCoexDm->btRf0x1eBackup
+ if(pBtCoexist->initilized)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Resume RF Rx LPF corner!!\n"));
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, pCoexDm->btRf0x1eBackup);
+ }
+ }
+}
+
+VOID
+halbtc8723a2ant_RfShrink(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bRxRfShrinkOn
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (bForceExec? "force to":""), ((bRxRfShrinkOn)? "ON":"OFF")));
+ pCoexDm->bCurRfRxLpfShrink = bRxRfShrinkOn;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n",
+ pCoexDm->bPreRfRxLpfShrink, pCoexDm->bCurRfRxLpfShrink));
+
+ if(pCoexDm->bPreRfRxLpfShrink == pCoexDm->bCurRfRxLpfShrink)
+ return;
+ }
+ halbtc8723a2ant_SetSwRfRxLpfCorner(pBtCoexist, pCoexDm->bCurRfRxLpfShrink);
+
+ pCoexDm->bPreRfRxLpfShrink = pCoexDm->bCurRfRxLpfShrink;
+}
+
+VOID
+halbtc8723a2ant_SetSwPenaltyTxRateAdaptive(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bLowPenaltyRa
+ )
+{
+ u1Byte tmpU1;
+
+ tmpU1 = pBtCoexist->btc_read_1byte(pBtCoexist, 0x4fd);
+ tmpU1 |= BIT0;
+ if(bLowPenaltyRa)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set low penalty!!\n"));
+ tmpU1 &= ~BIT2;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set normal!!\n"));
+ tmpU1 |= BIT2;
+ }
+
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x4fd, tmpU1);
+}
+
+VOID
+halbtc8723a2ant_LowPenaltyRa(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bLowPenaltyRa
+ )
+{
+ return;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (bForceExec? "force to":""), ((bLowPenaltyRa)? "ON":"OFF")));
+ pCoexDm->bCurLowPenaltyRa = bLowPenaltyRa;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
+ pCoexDm->bPreLowPenaltyRa, pCoexDm->bCurLowPenaltyRa));
+
+ if(pCoexDm->bPreLowPenaltyRa == pCoexDm->bCurLowPenaltyRa)
+ return;
+ }
+ halbtc8723a2ant_SetSwPenaltyTxRateAdaptive(pBtCoexist, pCoexDm->bCurLowPenaltyRa);
+
+ pCoexDm->bPreLowPenaltyRa = pCoexDm->bCurLowPenaltyRa;
+}
+
+VOID
+halbtc8723a2ant_SetSwFullTimeDacSwing(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bSwDacSwingOn,
+ IN u4Byte swDacSwingLvl
+ )
+{
+ if(bSwDacSwingOn)
+ {
+ pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, swDacSwingLvl);
+ }
+ else
+ {
+ pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, 0xc0);
+ }
+}
+
+
+VOID
+halbtc8723a2ant_DacSwing(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bDacSwingOn,
+ IN u4Byte dacSwingLvl
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn DacSwing=%s, dacSwingLvl=0x%x\n",
+ (bForceExec? "force to":""), ((bDacSwingOn)? "ON":"OFF"), dacSwingLvl));
+ pCoexDm->bCurDacSwingOn = bDacSwingOn;
+ pCoexDm->curDacSwingLvl = dacSwingLvl;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+ pCoexDm->bPreDacSwingOn, pCoexDm->preDacSwingLvl,
+ pCoexDm->bCurDacSwingOn, pCoexDm->curDacSwingLvl));
+
+ if( (pCoexDm->bPreDacSwingOn == pCoexDm->bCurDacSwingOn) &&
+ (pCoexDm->preDacSwingLvl == pCoexDm->curDacSwingLvl) )
+ return;
+ }
+ mdelay(30);
+ halbtc8723a2ant_SetSwFullTimeDacSwing(pBtCoexist, bDacSwingOn, dacSwingLvl);
+
+ pCoexDm->bPreDacSwingOn = pCoexDm->bCurDacSwingOn;
+ pCoexDm->preDacSwingLvl = pCoexDm->curDacSwingLvl;
+}
+
+VOID
+halbtc8723a2ant_SetAdcBackOff(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bAdcBackOff
+ )
+{
+ if(bAdcBackOff)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level On!\n"));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc04,0x3a07611);
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level Off!\n"));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc04,0x3a05611);
+ }
+}
+
+VOID
+halbtc8723a2ant_AdcBackOff(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bAdcBackOff
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn AdcBackOff = %s\n",
+ (bForceExec? "force to":""), ((bAdcBackOff)? "ON":"OFF")));
+ pCoexDm->bCurAdcBackOff = bAdcBackOff;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n",
+ pCoexDm->bPreAdcBackOff, pCoexDm->bCurAdcBackOff));
+
+ if(pCoexDm->bPreAdcBackOff == pCoexDm->bCurAdcBackOff)
+ return;
+ }
+ halbtc8723a2ant_SetAdcBackOff(pBtCoexist, pCoexDm->bCurAdcBackOff);
+
+ pCoexDm->bPreAdcBackOff = pCoexDm->bCurAdcBackOff;
+}
+
+VOID
+halbtc8723a2ant_SetAgcTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bAgcTableEn
+ )
+{
+ u1Byte rssiAdjustVal=0;
+
+ if(bAgcTableEn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table On!\n"));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4e1c0001);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4d1d0001);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4c1e0001);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4b1f0001);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4a200001);
+
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0xdc000);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x90000);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x51000);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x12000);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1a, 0xfffff, 0x00355);
+
+ rssiAdjustVal = 6;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table Off!\n"));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x641c0001);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x631d0001);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x621e0001);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x611f0001);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x60200001);
+
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x32000);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x71000);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0xb0000);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0xfc000);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1a, 0xfffff, 0x30355);
+ }
+
+ // set rssiAdjustVal for wifi module.
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, &rssiAdjustVal);
+}
+
+
+VOID
+halbtc8723a2ant_AgcTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bAgcTableEn
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s %s Agc Table\n",
+ (bForceExec? "force to":""), ((bAgcTableEn)? "Enable":"Disable")));
+ pCoexDm->bCurAgcTableEn = bAgcTableEn;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ pCoexDm->bPreAgcTableEn, pCoexDm->bCurAgcTableEn));
+
+ if(pCoexDm->bPreAgcTableEn == pCoexDm->bCurAgcTableEn)
+ return;
+ }
+ halbtc8723a2ant_SetAgcTable(pBtCoexist, bAgcTableEn);
+
+ pCoexDm->bPreAgcTableEn = pCoexDm->bCurAgcTableEn;
+}
+
+VOID
+halbtc8723a2ant_SetCoexTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u4Byte val0x6c0,
+ IN u4Byte val0x6c8,
+ IN u1Byte val0x6cc
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c0, val0x6c0);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c8, val0x6c8);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc));
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x6cc, val0x6cc);
+}
+
+VOID
+halbtc8723a2ant_CoexTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u4Byte val0x6c0,
+ IN u4Byte val0x6c8,
+ IN u1Byte val0x6cc
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ (bForceExec? "force to":""), val0x6c0, val0x6c8, val0x6cc));
+ pCoexDm->curVal0x6c0 = val0x6c0;
+ pCoexDm->curVal0x6c8 = val0x6c8;
+ pCoexDm->curVal0x6cc = val0x6cc;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], preVal0x6c0=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
+ pCoexDm->preVal0x6c0, pCoexDm->preVal0x6c8, pCoexDm->preVal0x6cc));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], curVal0x6c0=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
+ pCoexDm->curVal0x6c0, pCoexDm->curVal0x6c8, pCoexDm->curVal0x6cc));
+
+ if( (pCoexDm->preVal0x6c0 == pCoexDm->curVal0x6c0) &&
+ (pCoexDm->preVal0x6c8 == pCoexDm->curVal0x6c8) &&
+ (pCoexDm->preVal0x6cc == pCoexDm->curVal0x6cc) )
+ return;
+ }
+ halbtc8723a2ant_SetCoexTable(pBtCoexist, val0x6c0, val0x6c8, val0x6cc);
+
+ pCoexDm->preVal0x6c0 = pCoexDm->curVal0x6c0;
+ pCoexDm->preVal0x6c8 = pCoexDm->curVal0x6c8;
+ pCoexDm->preVal0x6cc = pCoexDm->curVal0x6cc;
+}
+
+VOID
+halbtc8723a2ant_SetFwIgnoreWlanAct(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bEnable
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ if(bEnable)
+ {
+ H2C_Parameter[0] |= BIT0; // function enable
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x25=0x%x\n",
+ H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x25, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8723a2ant_IgnoreWlanAct(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bEnable
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn Ignore WlanAct %s\n",
+ (bForceExec? "force to":""), (bEnable? "ON":"OFF")));
+ pCoexDm->bCurIgnoreWlanAct = bEnable;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+ pCoexDm->bPreIgnoreWlanAct, pCoexDm->bCurIgnoreWlanAct));
+
+ if(pCoexDm->bPreIgnoreWlanAct == pCoexDm->bCurIgnoreWlanAct)
+ return;
+ }
+ halbtc8723a2ant_SetFwIgnoreWlanAct(pBtCoexist, bEnable);
+
+ pCoexDm->bPreIgnoreWlanAct = pCoexDm->bCurIgnoreWlanAct;
+}
+
+VOID
+halbtc8723a2ant_SetFwPstdma(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte byte1,
+ IN u1Byte byte2,
+ IN u1Byte byte3,
+ IN u1Byte byte4,
+ IN u1Byte byte5
+ )
+{
+ u1Byte H2C_Parameter[5] ={0};
+
+ H2C_Parameter[0] = byte1;
+ H2C_Parameter[1] = byte2;
+ H2C_Parameter[2] = byte3;
+ H2C_Parameter[3] = byte4;
+ H2C_Parameter[4] = byte5;
+
+ pCoexDm->psTdmaPara[0] = byte1;
+ pCoexDm->psTdmaPara[1] = byte2;
+ pCoexDm->psTdmaPara[2] = byte3;
+ pCoexDm->psTdmaPara[3] = byte4;
+ pCoexDm->psTdmaPara[4] = byte5;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x3a(5bytes)=0x%x%08x\n",
+ H2C_Parameter[0],
+ H2C_Parameter[1]<<24|H2C_Parameter[2]<<16|H2C_Parameter[3]<<8|H2C_Parameter[4]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x3a, 5, H2C_Parameter);
+}
+
+VOID
+halbtc8723a2ant_PsTdma(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bTurnOn,
+ IN u1Byte type
+ )
+{
+ u4Byte btTxRxCnt=0;
+
+ btTxRxCnt = pCoexSta->highPriorityTx+pCoexSta->highPriorityRx+
+ pCoexSta->lowPriorityTx+pCoexSta->lowPriorityRx;
+
+ if(btTxRxCnt > 3000)
+ {
+ pCoexDm->bCurPsTdmaOn = true;
+ pCoexDm->curPsTdma = 8;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], turn ON PS TDMA, type=%d for BT tx/rx counters=%d(>3000)\n",
+ pCoexDm->curPsTdma, btTxRxCnt));
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (bForceExec? "force to":""), (bTurnOn? "ON":"OFF"), type));
+ pCoexDm->bCurPsTdmaOn = bTurnOn;
+ pCoexDm->curPsTdma = type;
+ }
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ pCoexDm->bPrePsTdmaOn, pCoexDm->bCurPsTdmaOn));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ pCoexDm->prePsTdma, pCoexDm->curPsTdma));
+
+ if( (pCoexDm->bPrePsTdmaOn == pCoexDm->bCurPsTdmaOn) &&
+ (pCoexDm->prePsTdma == pCoexDm->curPsTdma) )
+ return;
+ }
+ if(pCoexDm->bCurPsTdmaOn)
+ {
+ switch(pCoexDm->curPsTdma)
+ {
+ case 1:
+ default:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0xe1, 0x98);
+ break;
+ case 2:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0xe1, 0x98);
+ break;
+ case 3:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0xa, 0xe1, 0x98);
+ break;
+ case 4:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xa3, 0x5, 0x5, 0xe1, 0x80);
+ break;
+ case 5:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0x60, 0x98);
+ break;
+ case 6:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0x60, 0x98);
+ break;
+ case 7:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0xa, 0x60, 0x98);
+ break;
+ case 8:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xa3, 0x5, 0x5, 0x60, 0x80);
+ break;
+ case 9:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0xe1, 0x98);
+ break;
+ case 10:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0xe1, 0x98);
+ break;
+ case 11:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0xa, 0xe1, 0x98);
+ break;
+ case 12:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x5, 0x5, 0xe1, 0x98);
+ break;
+ case 13:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0x60, 0x98);
+ break;
+ case 14:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0x60, 0x98);
+ break;
+ case 15:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0xa, 0x60, 0x98);
+ break;
+ case 16:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x5, 0x5, 0x60, 0x98);
+ break;
+ case 17:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xa3, 0x2f, 0x2f, 0x60, 0x80);
+ break;
+ case 18:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x5, 0x5, 0xe1, 0x98);
+ break;
+ case 19:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x25, 0x25, 0xe1, 0x98);
+ break;
+ case 20:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x25, 0x25, 0x60, 0x98);
+ break;
+ }
+ }
+ else
+ {
+ // disable PS tdma
+ switch(pCoexDm->curPsTdma)
+ {
+ case 0:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x8, 0x0);
+ break;
+ case 1:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x0, 0x0);
+ break;
+ default:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x8, 0x0);
+ break;
+ }
+ }
+
+ // update pre state
+ pCoexDm->bPrePsTdmaOn = pCoexDm->bCurPsTdmaOn;
+ pCoexDm->prePsTdma = pCoexDm->curPsTdma;
+}
+
+
+VOID
+halbtc8723a2ant_CoexAllOff(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ // fw all off
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+
+ // sw all off
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+
+ // hw all off
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+}
+
+VOID
+halbtc8723a2ant_InitCoexDm(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ // force to reset coex mechanism
+ halbtc8723a2ant_CoexTable(pBtCoexist, FORCE_EXEC, 0x55555555, 0xffff, 0x3);
+ halbtc8723a2ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 0);
+ halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, FORCE_EXEC, 0x20);
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, FORCE_EXEC, FALSE);
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE);
+
+ halbtc8723a2ant_AgcTable(pBtCoexist, FORCE_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, FORCE_EXEC, FALSE);
+ halbtc8723a2ant_LowPenaltyRa(pBtCoexist, FORCE_EXEC, FALSE);
+ halbtc8723a2ant_RfShrink(pBtCoexist, FORCE_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, FORCE_EXEC, FALSE, 0xc0);
+}
+
+VOID
+halbtc8723a2ant_BtInquiryPage(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BOOLEAN bLowPwrDisable=true;
+
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+}
+
+VOID
+halbtc8723a2ant_BtEnableAction(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BOOLEAN bWifiConnected=FALSE;
+
+ // Here we need to resend some wifi info to BT
+ // because bt is reset and loss of the info.
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ if(bWifiConnected)
+ {
+ halbtc8723a2ant_IndicateWifiChnlBwInfo(pBtCoexist, BTC_MEDIA_CONNECT);
+ }
+ else
+ {
+ halbtc8723a2ant_IndicateWifiChnlBwInfo(pBtCoexist, BTC_MEDIA_DISCONNECT);
+ }
+
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE);
+}
+
+VOID
+halbtc8723a2ant_MonitorBtCtr(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u4Byte regHPTxRx, regLPTxRx, u4Tmp;
+ u4Byte regHPTx=0, regHPRx=0, regLPTx=0, regLPRx=0;
+ u1Byte u1Tmp;
+
+ regHPTxRx = 0x770;
+ regLPTxRx = 0x774;
+
+ u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regHPTxRx);
+ regHPTx = u4Tmp & MASKLWORD;
+ regHPRx = (u4Tmp & MASKHWORD)>>16;
+
+ u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regLPTxRx);
+ regLPTx = u4Tmp & MASKLWORD;
+ regLPRx = (u4Tmp & MASKHWORD)>>16;
+
+ pCoexSta->highPriorityTx = regHPTx;
+ pCoexSta->highPriorityRx = regHPRx;
+ pCoexSta->lowPriorityTx = regLPTx;
+ pCoexSta->lowPriorityRx = regLPRx;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], High Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ regHPTxRx, regHPTx, regHPTx, regHPRx, regHPRx));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], Low Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ regLPTxRx, regLPTx, regLPTx, regLPRx, regLPRx));
+
+ // reset counter
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0xc);
+}
+
+VOID
+halbtc8723a2ant_MonitorBtEnableDisable(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ static BOOLEAN bPreBtDisabled=FALSE;
+ static u4Byte btDisableCnt=0;
+ BOOLEAN bBtActive=true, bBtDisabled=FALSE;
+
+ // This function check if bt is disabled
+
+ if( pCoexSta->highPriorityTx == 0 &&
+ pCoexSta->highPriorityRx == 0 &&
+ pCoexSta->lowPriorityTx == 0 &&
+ pCoexSta->lowPriorityRx == 0)
+ {
+ bBtActive = FALSE;
+ }
+ if( pCoexSta->highPriorityTx == 0xffff &&
+ pCoexSta->highPriorityRx == 0xffff &&
+ pCoexSta->lowPriorityTx == 0xffff &&
+ pCoexSta->lowPriorityRx == 0xffff)
+ {
+ bBtActive = FALSE;
+ }
+ if(bBtActive)
+ {
+ btDisableCnt = 0;
+ bBtDisabled = FALSE;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is enabled !!\n"));
+ }
+ else
+ {
+ btDisableCnt++;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], bt all counters=0, %d times!!\n",
+ btDisableCnt));
+ if(btDisableCnt >= 2)
+ {
+ bBtDisabled = true;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is disabled !!\n"));
+ }
+ }
+ if(bPreBtDisabled != bBtDisabled)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is from %s to %s!!\n",
+ (bPreBtDisabled ? "disabled":"enabled"),
+ (bBtDisabled ? "disabled":"enabled")));
+ bPreBtDisabled = bBtDisabled;
+ if(!bBtDisabled)
+ {
+ halbtc8723a2ant_BtEnableAction(pBtCoexist);
+ }
+ }
+}
+
+BOOLEAN
+halbtc8723a2ant_IsCommonAction(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info;
+ BOOLEAN bCommon=FALSE, bWifiConnected=FALSE;
+ BOOLEAN bLowPwrDisable=FALSE;
+
+ if(!pStackInfo->bBtLinkExist)
+ {
+ bLowPwrDisable = FALSE;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+ }
+ else
+ {
+ bLowPwrDisable = true;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+ }
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+
+ if(halbtc8723a2ant_IsWifiIdle(pBtCoexist) &&
+ BT_8723A_2ANT_BT_STATUS_IDLE == pCoexDm->btStatus)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi idle + Bt idle!!\n"));
+
+ halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+
+ bCommon = true;
+ }
+ else if(!halbtc8723a2ant_IsWifiIdle(pBtCoexist) &&
+ (BT_8723A_2ANT_BT_STATUS_IDLE == pCoexDm->btStatus) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non-idle + BT idle!!\n"));
+
+ halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+
+ bCommon = true;
+ }
+ else if(halbtc8723a2ant_IsWifiIdle(pBtCoexist) &&
+ (BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi idle + Bt connected idle!!\n"));
+
+ halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+
+ bCommon = true;
+ }
+ else if(!halbtc8723a2ant_IsWifiIdle(pBtCoexist) &&
+ (BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non-idle + Bt connected idle!!\n"));
+
+ halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+
+ bCommon = true;
+ }
+ else if(halbtc8723a2ant_IsWifiIdle(pBtCoexist) &&
+ (BT_8723A_2ANT_BT_STATUS_NON_IDLE == pCoexDm->btStatus) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi idle + BT non-idle!!\n"));
+
+ halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+
+ bCommon = true;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non-idle + BT non-idle!!\n"));
+ halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+
+ bCommon = FALSE;
+ }
+
+ return bCommon;
+}
+VOID
+halbtc8723a2ant_TdmaDurationAdjust(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bScoHid,
+ IN BOOLEAN bTxPause,
+ IN u1Byte maxInterval
+ )
+{
+ static s4Byte up,dn,m,n,WaitCount;
+ s4Byte result; //0: no change, +1: increase WiFi duration, -1: decrease WiFi duration
+ u1Byte retryCount=0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], TdmaDurationAdjust()\n"));
+
+ if(pCoexDm->bResetTdmaAdjust)
+ {
+ pCoexDm->bResetTdmaAdjust = FALSE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], first run TdmaDurationAdjust()!!\n"));
+ {
+ if(bScoHid)
+ {
+ if(bTxPause)
+ {
+ if(maxInterval == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+ pCoexDm->psTdmaDuAdjType = 13;
+ }
+ else if(maxInterval == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ else if(maxInterval == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ }
+ else
+ {
+ if(maxInterval == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ else if(maxInterval == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ else if(maxInterval == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ }
+ }
+ else
+ {
+ if(bTxPause)
+ {
+ if(maxInterval == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+ pCoexDm->psTdmaDuAdjType = 5;
+ }
+ else if(maxInterval == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ }
+ else if(maxInterval == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ }
+ else
+ {
+ if(maxInterval == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ pCoexDm->psTdmaDuAdjType = 1;
+ }
+ else if(maxInterval == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(maxInterval == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ }
+ }
+ }
+ //============
+ up = 0;
+ dn = 0;
+ m = 1;
+ n= 3;
+ result = 0;
+ WaitCount = 0;
+ }
+ else
+ {
+ //accquire the BT TRx retry count from BT_Info byte2
+ retryCount = pCoexSta->btRetryCnt;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retryCount = %d\n", retryCount));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], up=%d, dn=%d, m=%d, n=%d, WaitCount=%d\n",
+ up, dn, m, n, WaitCount));
+ result = 0;
+ WaitCount++;
+
+ if(retryCount == 0) // no retry in the last 2-second duration
+ {
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if(up >= n) // if ³sÄò n ­Ó2¬í retry count¬°0, «h½Õ¼eWiFi duration
+ {
+ WaitCount = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Increase wifi duration!!\n"));
+ }
+ }
+ else if (retryCount <= 3) // <=3 retry in the last 2-second duration
+ {
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) // if ³sÄò 2 ­Ó2¬í retry count< 3, «h½Õ¯¶WiFi duration
+ {
+ if (WaitCount <= 2)
+ m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+ else
+ m = 1;
+
+ if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n"));
+ }
+ }
+ else //retry count > 3, ¥u­n1¦¸ retry count > 3, «h½Õ¯¶WiFi duration
+ {
+ if (WaitCount == 1)
+ m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+ else
+ m = 1;
+
+ if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n"));
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], max Interval = %d\n", maxInterval));
+ if(maxInterval == 1)
+ {
+ if(bTxPause)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 1\n"));
+
+ if(pCoexDm->curPsTdma == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+ pCoexDm->psTdmaDuAdjType = 5;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ }
+ else if(pCoexDm->curPsTdma == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 4)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ pCoexDm->psTdmaDuAdjType = 8;
+ }
+ if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+ pCoexDm->psTdmaDuAdjType = 13;
+ }
+ else if(pCoexDm->curPsTdma == 10)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 12)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ pCoexDm->psTdmaDuAdjType = 16;
+ }
+
+ if(result == -1)
+ {
+ if(pCoexDm->curPsTdma == 5)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ }
+ else if(pCoexDm->curPsTdma == 6)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 7)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ pCoexDm->psTdmaDuAdjType = 8;
+ }
+ else if(pCoexDm->curPsTdma == 13)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ else if(pCoexDm->curPsTdma == 14)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 15)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ pCoexDm->psTdmaDuAdjType = 16;
+ }
+ }
+ else if (result == 1)
+ {
+ if(pCoexDm->curPsTdma == 8)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 7)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ }
+ else if(pCoexDm->curPsTdma == 6)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+ pCoexDm->psTdmaDuAdjType = 5;
+ }
+ else if(pCoexDm->curPsTdma == 16)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 15)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ else if(pCoexDm->curPsTdma == 14)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+ pCoexDm->psTdmaDuAdjType = 13;
+ }
+ }
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 0\n"));
+ if(pCoexDm->curPsTdma == 5)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ pCoexDm->psTdmaDuAdjType = 1;
+ }
+ else if(pCoexDm->curPsTdma == 6)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 7)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 8)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ pCoexDm->psTdmaDuAdjType = 4;
+ }
+ if(pCoexDm->curPsTdma == 13)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ else if(pCoexDm->curPsTdma == 14)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ else if(pCoexDm->curPsTdma == 15)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 16)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ pCoexDm->psTdmaDuAdjType = 12;
+ }
+
+ if(result == -1)
+ {
+ if(pCoexDm->curPsTdma == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ pCoexDm->psTdmaDuAdjType = 4;
+ }
+ else if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ else if(pCoexDm->curPsTdma == 10)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ pCoexDm->psTdmaDuAdjType = 12;
+ }
+ }
+ else if (result == 1)
+ {
+ if(pCoexDm->curPsTdma == 4)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ pCoexDm->psTdmaDuAdjType = 1;
+ }
+ else if(pCoexDm->curPsTdma == 12)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ else if(pCoexDm->curPsTdma == 10)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ }
+ }
+ }
+ else if(maxInterval == 2)
+ {
+ if(bTxPause)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 1\n"));
+ if(pCoexDm->curPsTdma == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ }
+ else if(pCoexDm->curPsTdma == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 4)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ pCoexDm->psTdmaDuAdjType = 8;
+ }
+ if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ else if(pCoexDm->curPsTdma == 10)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 12)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ pCoexDm->psTdmaDuAdjType = 16;
+ }
+ if(result == -1)
+ {
+ if(pCoexDm->curPsTdma == 5)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ }
+ else if(pCoexDm->curPsTdma == 6)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 7)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ pCoexDm->psTdmaDuAdjType = 8;
+ }
+ else if(pCoexDm->curPsTdma == 13)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ else if(pCoexDm->curPsTdma == 14)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 15)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ pCoexDm->psTdmaDuAdjType = 16;
+ }
+ }
+ else if (result == 1)
+ {
+ if(pCoexDm->curPsTdma == 8)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 7)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ }
+ else if(pCoexDm->curPsTdma == 6)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ }
+ else if(pCoexDm->curPsTdma == 16)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 15)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ else if(pCoexDm->curPsTdma == 14)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ }
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 0\n"));
+ if(pCoexDm->curPsTdma == 5)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 6)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 7)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 8)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ pCoexDm->psTdmaDuAdjType = 4;
+ }
+ if(pCoexDm->curPsTdma == 13)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ else if(pCoexDm->curPsTdma == 14)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ else if(pCoexDm->curPsTdma == 15)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 16)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ pCoexDm->psTdmaDuAdjType = 12;
+ }
+ if(result == -1)
+ {
+ if(pCoexDm->curPsTdma == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ pCoexDm->psTdmaDuAdjType = 4;
+ }
+ else if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ else if(pCoexDm->curPsTdma == 10)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ pCoexDm->psTdmaDuAdjType = 12;
+ }
+ }
+ else if (result == 1)
+ {
+ if(pCoexDm->curPsTdma == 4)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 12)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ else if(pCoexDm->curPsTdma == 10)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ }
+ }
+ }
+ else if(maxInterval == 3)
+ {
+ if(bTxPause)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 1\n"));
+ if(pCoexDm->curPsTdma == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 4)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ pCoexDm->psTdmaDuAdjType = 8;
+ }
+ if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 10)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 12)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ pCoexDm->psTdmaDuAdjType = 16;
+ }
+ if(result == -1)
+ {
+ if(pCoexDm->curPsTdma == 5)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 6)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 7)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ pCoexDm->psTdmaDuAdjType = 8;
+ }
+ else if(pCoexDm->curPsTdma == 13)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 14)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 15)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ pCoexDm->psTdmaDuAdjType = 16;
+ }
+ }
+ else if (result == 1)
+ {
+ if(pCoexDm->curPsTdma == 8)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 7)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 6)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 16)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 15)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 14)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ }
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 0\n"));
+ if(pCoexDm->curPsTdma == 5)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 6)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 7)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 8)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ pCoexDm->psTdmaDuAdjType = 4;
+ }
+ if(pCoexDm->curPsTdma == 13)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 14)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 15)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 16)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ pCoexDm->psTdmaDuAdjType = 12;
+ }
+ if(result == -1)
+ {
+ if(pCoexDm->curPsTdma == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ pCoexDm->psTdmaDuAdjType = 4;
+ }
+ else if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 10)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ pCoexDm->psTdmaDuAdjType = 12;
+ }
+ }
+ else if (result == 1)
+ {
+ if(pCoexDm->curPsTdma == 4)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 12)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 10)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ }
+ }
+ }
+ }
+
+ // if current PsTdma not match with the recorded one (when scan, dhcp...),
+ // then we have to adjust it back to the previous record one.
+ if(pCoexDm->curPsTdma != pCoexDm->psTdmaDuAdjType)
+ {
+ BOOLEAN bScan=FALSE, bLink=FALSE, bRoam=FALSE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
+ pCoexDm->curPsTdma, pCoexDm->psTdmaDuAdjType));
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+
+ if( !bScan && !bLink && !bRoam)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, pCoexDm->psTdmaDuAdjType);
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"));
+ }
+ }
+}
+
+// SCO only or SCO+PAN(HS)
+VOID
+halbtc8723a2ant_ActionSco(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, wifiRssiState1;
+ u4Byte wifiBw;
+
+ if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ }
+
+ // sw mechanism
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+ wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ }
+
+ // sw mechanism
+ if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ }
+}
+
+
+VOID
+halbtc8723a2ant_ActionHid(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, wifiRssiState1;
+ u4Byte wifiBw;
+
+ if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+ }
+
+ // sw mechanism
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+ wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+ }
+
+ // sw mechanism
+ if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ }
+}
+
+//A2DP only / PAN(EDR) only/ A2DP+PAN(HS)
+VOID
+halbtc8723a2ant_ActionA2dp(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, wifiRssiState1, btInfoExt;
+ u4Byte wifiBw;
+
+ btInfoExt = pCoexSta->btInfoExt;
+
+ if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ if(btInfoExt&BIT0) //a2dp rate, 1:basic /0:edr
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, FALSE, 3);
+ }
+ else
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, FALSE, 1);
+ }
+ }
+ else
+ {
+ if(btInfoExt&BIT0) //a2dp rate, 1:basic /0:edr
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, true, 3);
+ }
+ else
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, true, 1);
+ }
+ }
+
+ // sw mechanism
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+ wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ if(btInfoExt&BIT0) //a2dp rate, 1:basic /0:edr
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, FALSE, 3);
+ }
+ else
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, FALSE, 1);
+ }
+ }
+ else
+ {
+ if(btInfoExt&BIT0) //a2dp rate, 1:basic /0:edr
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, true, 3);
+ }
+ else
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, true, 1);
+ }
+ }
+
+ // sw mechanism
+ if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ }
+}
+
+VOID
+halbtc8723a2ant_ActionPanEdr(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, wifiRssiState1, btInfoExt;
+ u4Byte wifiBw;
+
+ btInfoExt = pCoexSta->btInfoExt;
+
+ if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ }
+
+ // sw mechanism
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+ wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ }
+
+ // sw mechanism
+ if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ }
+}
+
+
+//PAN(HS) only
+VOID
+halbtc8723a2ant_ActionPanHs(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState;
+ u4Byte wifiBw;
+
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ }
+ else
+ {
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ }
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+ // sw mechanism
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ }
+ else
+ {
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ }
+
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ }
+}
+
+//PAN(EDR)+A2DP
+VOID
+halbtc8723a2ant_ActionPanEdrA2dp(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, wifiRssiState1, btInfoExt;
+ u4Byte wifiBw;
+
+ btInfoExt = pCoexSta->btInfoExt;
+
+ if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ }
+ }
+ else
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ }
+ }
+
+ // sw mechanism
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+ wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 47, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ }
+ }
+ else
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ }
+ }
+
+ // sw mechanism
+ if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ }
+}
+
+VOID
+halbtc8723a2ant_ActionPanEdrHid(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, wifiRssiState1;
+ u4Byte wifiBw;
+
+ if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ }
+
+ // sw mechanism
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+ wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ }
+
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ }
+}
+
+// HID+A2DP+PAN(EDR)
+VOID
+halbtc8723a2ant_ActionHidA2dpPanEdr(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, wifiRssiState1, btInfoExt;
+ u4Byte wifiBw;
+
+ btInfoExt = pCoexSta->btInfoExt;
+
+ if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ }
+ }
+ else
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ }
+ }
+
+ // sw mechanism
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+ wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ }
+ }
+ else
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ }
+ }
+
+ // sw mechanism
+ if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ }
+}
+
+VOID
+halbtc8723a2ant_ActionHidA2dp(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, wifiRssiState1, btInfoExt;
+ u4Byte wifiBw;
+
+ btInfoExt = pCoexSta->btInfoExt;
+
+ if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, FALSE, 3);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, FALSE, 1);
+ }
+ }
+ else
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, true, 3);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, true, 1);
+ }
+ }
+
+ // sw mechanism
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+ wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, FALSE, 3);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, FALSE, 1);
+ }
+ }
+ else
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, true, 3);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, true, 1);
+ }
+ }
+
+ // sw mechanism
+ if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ }
+}
+
+VOID
+halbtc8723a2ant_RunCoexistMechanism(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info;
+ u1Byte btInfoOriginal=0, btRetryCnt=0;
+ u1Byte algorithm=0;
+
+ if(pBtCoexist->manual_control)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Manual control!!!\n"));
+ return;
+ }
+
+ if(pStackInfo->bProfileNotified)
+ {
+ if(pCoexSta->bHoldForStackOperation)
+ {
+ // if bt inquiry/page/pair, do not execute.
+ return;
+ }
+
+ algorithm = halbtc8723a2ant_ActionAlgorithm(pBtCoexist);
+ if(pCoexSta->bHoldPeriodCnt && (BT_8723A_2ANT_COEX_ALGO_PANHS!=algorithm))
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex],Hold BT inquiry/page scan setting (cnt = %d)!!\n",
+ pCoexSta->bHoldPeriodCnt));
+ if(pCoexSta->bHoldPeriodCnt >= 6)
+ {
+ pCoexSta->bHoldPeriodCnt = 0;
+ // next time the coexist parameters should be reset again.
+ }
+ else
+ pCoexSta->bHoldPeriodCnt++;
+ return;
+ }
+
+ pCoexDm->curAlgorithm = algorithm;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Algorithm = %d \n", pCoexDm->curAlgorithm));
+ if(halbtc8723a2ant_IsCommonAction(pBtCoexist))
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant common.\n"));
+ pCoexDm->bResetTdmaAdjust = true;
+ }
+ else
+ {
+ if(pCoexDm->curAlgorithm != pCoexDm->preAlgorithm)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n",
+ pCoexDm->preAlgorithm, pCoexDm->curAlgorithm));
+ pCoexDm->bResetTdmaAdjust = true;
+ }
+ switch(pCoexDm->curAlgorithm)
+ {
+ case BT_8723A_2ANT_COEX_ALGO_SCO:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = SCO.\n"));
+ halbtc8723a2ant_ActionSco(pBtCoexist);
+ break;
+ case BT_8723A_2ANT_COEX_ALGO_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HID.\n"));
+ halbtc8723a2ant_ActionHid(pBtCoexist);
+ break;
+ case BT_8723A_2ANT_COEX_ALGO_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = A2DP.\n"));
+ halbtc8723a2ant_ActionA2dp(pBtCoexist);
+ break;
+ case BT_8723A_2ANT_COEX_ALGO_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = PAN(EDR).\n"));
+ halbtc8723a2ant_ActionPanEdr(pBtCoexist);
+ break;
+ case BT_8723A_2ANT_COEX_ALGO_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HS mode.\n"));
+ halbtc8723a2ant_ActionPanHs(pBtCoexist);
+ break;
+ case BT_8723A_2ANT_COEX_ALGO_PANEDR_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = PAN+A2DP.\n"));
+ halbtc8723a2ant_ActionPanEdrA2dp(pBtCoexist);
+ break;
+ case BT_8723A_2ANT_COEX_ALGO_PANEDR_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID.\n"));
+ halbtc8723a2ant_ActionPanEdrHid(pBtCoexist);
+ break;
+ case BT_8723A_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN.\n"));
+ halbtc8723a2ant_ActionHidA2dpPanEdr(pBtCoexist);
+ break;
+ case BT_8723A_2ANT_COEX_ALGO_HID_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HID+A2DP.\n"));
+ halbtc8723a2ant_ActionHidA2dp(pBtCoexist);
+ break;
+ default:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n"));
+ halbtc8723a2ant_CoexAllOff(pBtCoexist);
+ break;
+ }
+ pCoexDm->preAlgorithm = pCoexDm->curAlgorithm;
+ }
+ }
+}
+
+//============================================================
+// work around function start with wa_halbtc8723a2ant_
+//============================================================
+VOID
+wa_halbtc8723a2ant_MonitorC2h(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte tmp1b=0x0;
+ u4Byte curC2hTotalCnt=0x0;
+ static u4Byte preC2hTotalCnt=0x0, sameCntPollingTime=0x0;
+
+ curC2hTotalCnt+=pCoexSta->btInfoC2hCnt[BT_INFO_SRC_8723A_2ANT_BT_RSP];
+
+ if(curC2hTotalCnt == preC2hTotalCnt)
+ {
+ sameCntPollingTime++;
+ }
+ else
+ {
+ preC2hTotalCnt = curC2hTotalCnt;
+ sameCntPollingTime = 0;
+ }
+
+ if(sameCntPollingTime >= 2)
+ {
+ tmp1b = pBtCoexist->btc_read_1byte(pBtCoexist, 0x1af);
+ if(tmp1b != 0x0)
+ {
+ pCoexSta->c2hHangDetectCnt++;
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x1af, 0x0);
+ }
+ }
+}
+
+//============================================================
+// extern function start with EXhalbtc8723a2ant_
+//============================================================
+VOID
+EXhalbtc8723a2ant_InitHwConfig(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u4Byte u4Tmp=0;
+ u1Byte u1Tmp=0;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], 2Ant Init HW Config!!\n"));
+
+ // backup rf 0x1e value
+ pCoexDm->btRf0x1eBackup =
+ pBtCoexist->btc_get_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff);
+
+ // Enable counter statistics
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0x4);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x778, 0x3);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x40, 0x20);
+}
+
+VOID
+EXhalbtc8723a2ant_InitCoexDm(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Coex Mechanism Init!!\n"));
+
+ halbtc8723a2ant_InitCoexDm(pBtCoexist);
+}
+
+VOID
+EXhalbtc8723a2ant_DisplayCoexInfo(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ struct btc_board_info * pBoardInfo=&pBtCoexist->board_info;
+ PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info;
+ pu1Byte cliBuf=pBtCoexist->cli_buf;
+ u1Byte u1Tmp[4], i, btInfoExt, psTdmaCase=0;
+ u4Byte u4Tmp[4];
+ BOOLEAN bRoam=FALSE, bScan=FALSE, bLink=FALSE, bWifiUnder5G=FALSE;
+ BOOLEAN bBtHsOn=FALSE, bWifiBusy=FALSE;
+ s4Byte wifiRssi=0, btHsRssi=0;
+ u4Byte wifiBw, wifiTrafficDir;
+ u1Byte wifiDot11Chnl, wifiHsChnl;
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============");
+ CL_PRINTF(cliBuf);
+
+ if(!pBoardInfo->bt_exist)
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+ CL_PRINTF(cliBuf);
+ return;
+ }
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", \
+ pBoardInfo->pg_ant_num, pBoardInfo->btdm_ant_num);
+ CL_PRINTF(cliBuf);
+
+ if(pBtCoexist->manual_control)
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "[Action Manual control]!!");
+ CL_PRINTF(cliBuf);
+ }
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \
+ ((pStackInfo->bProfileNotified)? "Yes":"No"), pStackInfo->hciVersion);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifiDot11Chnl);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifiHsChnl);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \
+ wifiDot11Chnl, wifiHsChnl, bBtHsOn);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \
+ pCoexDm->wifiChnlInfo[0], pCoexDm->wifiChnlInfo[1],
+ pCoexDm->wifiChnlInfo[2]);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_HS_RSSI, &btHsRssi);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", \
+ wifiRssi, btHsRssi);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", "Wifi bLink/ bRoam/ bScan", \
+ bLink, bRoam, bScan);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_UNDER_5G, &bWifiUnder5G);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", "Wifi status", \
+ (bWifiUnder5G? "5G":"2.4G"),
+ ((BTC_WIFI_BW_LEGACY==wifiBw)? "Legacy": (((BTC_WIFI_BW_HT40==wifiBw)? "HT40":"HT20"))),
+ ((!bWifiBusy)? "idle": ((BTC_WIFI_TRAFFIC_TX==wifiTrafficDir)? "uplink":"downlink")));
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", \
+ ((pCoexSta->bC2hBtInquiryPage)?("inquiry/page scan"):((BT_8723A_2ANT_BT_STATUS_IDLE == pCoexDm->btStatus)? "idle":( (BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)? "connected-idle":"busy"))),
+ pCoexSta->btRssi, pCoexSta->btRetryCnt);
+ CL_PRINTF(cliBuf);
+
+ if(pStackInfo->bProfileNotified)
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \
+ pStackInfo->bScoExist, pStackInfo->bHidExist, pStackInfo->bPanExist, pStackInfo->bA2dpExist);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_BT_LINK_INFO);
+ }
+
+ btInfoExt = pCoexSta->btInfoExt;
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Info A2DP rate", \
+ (btInfoExt&BIT0)? "Basic rate":"EDR rate");
+ CL_PRINTF(cliBuf);
+
+ for(i=0; i<BT_INFO_SRC_8723A_2ANT_MAX; i++)
+ {
+ if(pCoexSta->btInfoC2hCnt[i])
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8723a2Ant[i], \
+ pCoexSta->btInfoC2h[i][0], pCoexSta->btInfoC2h[i][1],
+ pCoexSta->btInfoC2h[i][2], pCoexSta->btInfoC2h[i][3],
+ pCoexSta->btInfoC2h[i][4], pCoexSta->btInfoC2h[i][5],
+ pCoexSta->btInfoC2h[i][6], pCoexSta->btInfoC2hCnt[i]);
+ CL_PRINTF(cliBuf);
+ }
+ }
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d", "write 0x1af=0x0 num", \
+ pCoexSta->c2hHangDetectCnt);
+ CL_PRINTF(cliBuf);
+
+ // Sw mechanism
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Sw mechanism]============");
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d", "SM1[ShRf/ LpRA/ LimDig]", \
+ pCoexDm->bCurRfRxLpfShrink, pCoexDm->bCurLowPenaltyRa, pCoexDm->limited_dig);
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ", "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", \
+ pCoexDm->bCurAgcTableEn, pCoexDm->bCurAdcBackOff, pCoexDm->bCurDacSwingOn, pCoexDm->curDacSwingLvl);
+ CL_PRINTF(cliBuf);
+
+ // Fw mechanism
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw mechanism]============");
+ CL_PRINTF(cliBuf);
+
+ if(!pBtCoexist->manual_control)
+ {
+ psTdmaCase = pCoexDm->curPsTdma;
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d", "PS TDMA", \
+ pCoexDm->psTdmaPara[0], pCoexDm->psTdmaPara[1],
+ pCoexDm->psTdmaPara[2], pCoexDm->psTdmaPara[3],
+ pCoexDm->psTdmaPara[4], psTdmaCase);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct", \
+ pCoexDm->bCurDecBtPwr, pCoexDm->bCurIgnoreWlanAct);
+ CL_PRINTF(cliBuf);
+ }
+
+ // Hw setting
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============");
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", \
+ pCoexDm->btRf0x1eBackup);
+ CL_PRINTF(cliBuf);
+
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x778);
+ u1Tmp[1] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x783);
+ u1Tmp[2] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x796);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x778/ 0x783/ 0x796", \
+ u1Tmp[0], u1Tmp[1], u1Tmp[2]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x880);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x880", \
+ u4Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x40);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x40", \
+ u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x550);
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x522);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \
+ u4Tmp[0], u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x484);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x484(rate adaptive)", \
+ u4Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xc50);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", \
+ u4Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda0);
+ u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda4);
+ u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda8);
+ u4Tmp[3] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xdac);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0xda0/0xda4/0xda8/0xdac(FA cnt)", \
+ u4Tmp[0], u4Tmp[1], u4Tmp[2], u4Tmp[3]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c0);
+ u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c4);
+ u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c8);
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x6cc);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \
+ u4Tmp[0], u4Tmp[1], u4Tmp[2], u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x770 (hp rx[31:16]/tx[15:0])", \
+ pCoexSta->highPriorityRx, pCoexSta->highPriorityTx);
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x774(lp rx[31:16]/tx[15:0])", \
+ pCoexSta->lowPriorityRx, pCoexSta->lowPriorityTx);
+ CL_PRINTF(cliBuf);
+
+ // Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x41b);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x41b (mgntQ hang chk == 0xf)", \
+ u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+VOID
+EXhalbtc8723a2ant_IpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_IPS_ENTER == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n"));
+ halbtc8723a2ant_CoexAllOff(pBtCoexist);
+ }
+ else if(BTC_IPS_LEAVE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n"));
+ //halbtc8723a2ant_InitCoexDm(pBtCoexist);
+ }
+}
+
+VOID
+EXhalbtc8723a2ant_LpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_LPS_ENABLE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n"));
+ }
+ else if(BTC_LPS_DISABLE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n"));
+ }
+}
+
+VOID
+EXhalbtc8723a2ant_ScanNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_SCAN_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n"));
+ }
+ else if(BTC_SCAN_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n"));
+ }
+}
+
+VOID
+EXhalbtc8723a2ant_ConnectNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_ASSOCIATE_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n"));
+ }
+ else if(BTC_ASSOCIATE_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n"));
+ }
+}
+
+VOID
+EXhalbtc8723a2ant_MediaStatusNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_MEDIA_CONNECT == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA connect notify\n"));
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA disconnect notify\n"));
+ }
+
+ halbtc8723a2ant_IndicateWifiChnlBwInfo(pBtCoexist, type);
+}
+
+VOID
+EXhalbtc8723a2ant_SpecialPacketNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(type == BTC_PACKET_DHCP)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], DHCP Packet notify\n"));
+ }
+}
+
+VOID
+EXhalbtc8723a2ant_BtInfoNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN pu1Byte tmpBuf,
+ IN u1Byte length
+ )
+{
+ u1Byte btInfo=0;
+ u1Byte i, rspSource=0;
+ BOOLEAN bBtBusy=FALSE, limited_dig=FALSE;
+ BOOLEAN bWifiConnected=FALSE, bBtHsOn=FALSE;
+
+ pCoexSta->bC2hBtInfoReqSent = FALSE;
+
+ rspSource = BT_INFO_SRC_8723A_2ANT_BT_RSP;
+ pCoexSta->btInfoC2hCnt[rspSource]++;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Bt info[%d], length=%d, hex data=[", rspSource, length));
+ for(i=0; i<length; i++)
+ {
+ pCoexSta->btInfoC2h[rspSource][i] = tmpBuf[i];
+ if(i == 0)
+ btInfo = tmpBuf[i];
+ if(i == length-1)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x]\n", tmpBuf[i]));
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x, ", tmpBuf[i]));
+ }
+ }
+
+ if(BT_INFO_SRC_8723A_2ANT_WIFI_FW != rspSource)
+ {
+ pCoexSta->btRetryCnt =
+ pCoexSta->btInfoC2h[rspSource][1];
+
+ pCoexSta->btRssi =
+ pCoexSta->btInfoC2h[rspSource][2]*2+10;
+
+ pCoexSta->btInfoExt =
+ pCoexSta->btInfoC2h[rspSource][3];
+ }
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ // check BIT2 first ==> check if bt is under inquiry or page scan
+ if(btInfo & BT_INFO_8723A_2ANT_B_INQ_PAGE)
+ {
+ pCoexSta->bC2hBtInquiryPage = true;
+ }
+ else
+ {
+ pCoexSta->bC2hBtInquiryPage = FALSE;
+ }
+}
+
+VOID
+EXhalbtc8723a2ant_StackOperationNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair start notify\n"));
+ pCoexSta->bHoldForStackOperation = true;
+ pCoexSta->bHoldPeriodCnt = 1;
+ halbtc8723a2ant_BtInquiryPage(pBtCoexist);
+ }
+ else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair finish notify\n"));
+ pCoexSta->bHoldForStackOperation = FALSE;
+ }
+}
+
+VOID
+EXhalbtc8723a2ant_HaltNotify(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Halt notify\n"));
+
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true);
+ EXhalbtc8723a2ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT);
+}
+
+VOID
+EXhalbtc8723a2ant_Periodical(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], 2Ant Periodical!!\n"));
+
+ // work around for c2h hang
+ wa_halbtc8723a2ant_MonitorC2h(pBtCoexist);
+
+ halbtc8723a2ant_QueryBtInfo(pBtCoexist);
+ halbtc8723a2ant_MonitorBtCtr(pBtCoexist);
+ halbtc8723a2ant_MonitorBtEnableDisable(pBtCoexist);
+
+ halbtc8723a2ant_RunCoexistMechanism(pBtCoexist);
+}
+
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.h b/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.h
new file mode 100644
index 000000000000..c07d3738aadc
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.h
@@ -0,0 +1,179 @@
+//===========================================
+// The following is for 8723A 2Ant BT Co-exist definition
+//===========================================
+#define BT_INFO_8723A_2ANT_B_FTP BIT7
+#define BT_INFO_8723A_2ANT_B_A2DP BIT6
+#define BT_INFO_8723A_2ANT_B_HID BIT5
+#define BT_INFO_8723A_2ANT_B_SCO_BUSY BIT4
+#define BT_INFO_8723A_2ANT_B_ACL_BUSY BIT3
+#define BT_INFO_8723A_2ANT_B_INQ_PAGE BIT2
+#define BT_INFO_8723A_2ANT_B_SCO_ESCO BIT1
+#define BT_INFO_8723A_2ANT_B_CONNECTION BIT0
+
+#define BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT 2
+
+typedef enum _BT_INFO_SRC_8723A_2ANT{
+ BT_INFO_SRC_8723A_2ANT_WIFI_FW = 0x0,
+ BT_INFO_SRC_8723A_2ANT_BT_RSP = 0x1,
+ BT_INFO_SRC_8723A_2ANT_BT_ACTIVE_SEND = 0x2,
+ BT_INFO_SRC_8723A_2ANT_MAX
+}BT_INFO_SRC_8723A_2ANT,*PBT_INFO_SRC_8723A_2ANT;
+
+typedef enum _BT_8723A_2ANT_BT_STATUS{
+ BT_8723A_2ANT_BT_STATUS_IDLE = 0x0,
+ BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8723A_2ANT_BT_STATUS_NON_IDLE = 0x2,
+ BT_8723A_2ANT_BT_STATUS_MAX
+}BT_8723A_2ANT_BT_STATUS,*PBT_8723A_2ANT_BT_STATUS;
+
+typedef enum _BT_8723A_2ANT_COEX_ALGO{
+ BT_8723A_2ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_8723A_2ANT_COEX_ALGO_SCO = 0x1,
+ BT_8723A_2ANT_COEX_ALGO_HID = 0x2,
+ BT_8723A_2ANT_COEX_ALGO_A2DP = 0x3,
+ BT_8723A_2ANT_COEX_ALGO_PANEDR = 0x4,
+ BT_8723A_2ANT_COEX_ALGO_PANHS = 0x5,
+ BT_8723A_2ANT_COEX_ALGO_PANEDR_A2DP = 0x6,
+ BT_8723A_2ANT_COEX_ALGO_PANEDR_HID = 0x7,
+ BT_8723A_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x8,
+ BT_8723A_2ANT_COEX_ALGO_HID_A2DP = 0x9,
+ BT_8723A_2ANT_COEX_ALGO_MAX
+}BT_8723A_2ANT_COEX_ALGO,*PBT_8723A_2ANT_COEX_ALGO;
+
+typedef struct _COEX_DM_8723A_2ANT{
+ // fw mechanism
+ BOOLEAN bPreDecBtPwr;
+ BOOLEAN bCurDecBtPwr;
+ //BOOLEAN bPreBtLnaConstrain;
+ //BOOLEAN bCurBtLnaConstrain;
+ //u1Byte bPreBtPsdMode;
+ //u1Byte bCurBtPsdMode;
+ u1Byte preFwDacSwingLvl;
+ u1Byte curFwDacSwingLvl;
+ BOOLEAN bCurIgnoreWlanAct;
+ BOOLEAN bPreIgnoreWlanAct;
+ u1Byte prePsTdma;
+ u1Byte curPsTdma;
+ u1Byte psTdmaPara[5];
+ u1Byte psTdmaDuAdjType;
+ BOOLEAN bResetTdmaAdjust;
+ BOOLEAN bPrePsTdmaOn;
+ BOOLEAN bCurPsTdmaOn;
+ //BOOLEAN bPreBtAutoReport;
+ //BOOLEAN bCurBtAutoReport;
+
+ // sw mechanism
+ BOOLEAN bPreRfRxLpfShrink;
+ BOOLEAN bCurRfRxLpfShrink;
+ u4Byte btRf0x1eBackup;
+ BOOLEAN bPreLowPenaltyRa;
+ BOOLEAN bCurLowPenaltyRa;
+ BOOLEAN bPreDacSwingOn;
+ u4Byte preDacSwingLvl;
+ BOOLEAN bCurDacSwingOn;
+ u4Byte curDacSwingLvl;
+ BOOLEAN bPreAdcBackOff;
+ BOOLEAN bCurAdcBackOff;
+ BOOLEAN bPreAgcTableEn;
+ BOOLEAN bCurAgcTableEn;
+ u4Byte preVal0x6c0;
+ u4Byte curVal0x6c0;
+ u4Byte preVal0x6c8;
+ u4Byte curVal0x6c8;
+ u1Byte preVal0x6cc;
+ u1Byte curVal0x6cc;
+ BOOLEAN limited_dig;
+
+ // algorithm related
+ u1Byte preAlgorithm;
+ u1Byte curAlgorithm;
+ u1Byte btStatus;
+ u1Byte wifiChnlInfo[3];
+} COEX_DM_8723A_2ANT, *PCOEX_DM_8723A_2ANT;
+
+typedef struct _COEX_STA_8723A_2ANT{
+ u4Byte highPriorityTx;
+ u4Byte highPriorityRx;
+ u4Byte lowPriorityTx;
+ u4Byte lowPriorityRx;
+ u1Byte btRssi;
+ u1Byte preBtRssiState;
+ u1Byte preBtRssiState1;
+ u1Byte preWifiRssiState[4];
+ BOOLEAN bC2hBtInfoReqSent;
+ u1Byte btInfoC2h[BT_INFO_SRC_8723A_2ANT_MAX][10];
+ u4Byte btInfoC2hCnt[BT_INFO_SRC_8723A_2ANT_MAX];
+ BOOLEAN bC2hBtInquiryPage;
+ u1Byte btRetryCnt;
+ u1Byte btInfoExt;
+ BOOLEAN bHoldForStackOperation;
+ u1Byte bHoldPeriodCnt;
+ // this is for c2h hang work-around
+ u4Byte c2hHangDetectCnt;
+}COEX_STA_8723A_2ANT, *PCOEX_STA_8723A_2ANT;
+
+//===========================================
+// The following is interface which will notify coex module.
+//===========================================
+VOID
+EXhalbtc8723a2ant_InitHwConfig(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8723a2ant_InitCoexDm(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8723a2ant_IpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a2ant_LpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a2ant_ScanNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a2ant_ConnectNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a2ant_MediaStatusNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a2ant_SpecialPacketNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a2ant_HaltNotify(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8723a2ant_Periodical(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8723a2ant_BtInfoNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN pu1Byte tmpBuf,
+ IN u1Byte length
+ );
+VOID
+EXhalbtc8723a2ant_StackOperationNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a2ant_DisplayCoexInfo(
+ IN PBTC_COEXIST pBtCoexist
+ );
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.c
new file mode 100644
index 000000000000..3414ba78cc43
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.c
@@ -0,0 +1,4104 @@
+/***************************************************************
+ * Description:
+ *
+ * This file is for RTL8723B Co-exist mechanism
+ *
+ * History
+ * 2012/11/15 Cosa first check in.
+ *
+ ***************************************************************/
+
+
+/***************************************************************
+ * include files
+ ***************************************************************/
+#include "halbt_precomp.h"
+#if 1
+/***************************************************************
+ * Global variables, these are static variables
+ ***************************************************************/
+static struct coex_dm_8723b_1ant glcoex_dm_8723b_1ant;
+static struct coex_dm_8723b_1ant *coex_dm = &glcoex_dm_8723b_1ant;
+static struct coex_sta_8723b_1ant glcoex_sta_8723b_1ant;
+static struct coex_sta_8723b_1ant *coex_sta = &glcoex_sta_8723b_1ant;
+
+const char *const GLBtInfoSrc8723b1Ant[]={
+ "BT Info[wifi fw]",
+ "BT Info[bt rsp]",
+ "BT Info[bt auto report]",
+};
+
+u32 glcoex_ver_date_8723b_1ant = 20130906;
+u32 glcoex_ver_8723b_1ant = 0x45;
+
+/***************************************************************
+ * local function proto type if needed
+ ***************************************************************/
+/***************************************************************
+ * local function start with halbtc8723b1ant_
+ ***************************************************************/
+u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, u8 rssi_thresh1)
+{
+ s32 bt_rssi=0;
+ u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
+
+ bt_rssi = coex_sta->bt_rssi;
+
+ if (level_num == 2){
+ if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ if (bt_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
+ bt_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to High\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at Low\n");
+ }
+ } else {
+ if (bt_rssi < rssi_thresh) {
+ bt_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Low\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at High\n");
+ }
+ }
+ } else if (level_num == 3) {
+ if (rssi_thresh > rssi_thresh1) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi thresh error!!\n");
+ return coex_sta->pre_bt_rssi_state;
+ }
+
+ if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ if (bt_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
+ bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Medium\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at Low\n");
+ }
+ } else if ((coex_sta->pre_bt_rssi_state ==
+ BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_bt_rssi_state ==
+ BTC_RSSI_STATE_STAY_MEDIUM)) {
+ if (bt_rssi >= rssi_thresh1 +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
+ bt_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to High\n");
+ } else if (bt_rssi < rssi_thresh) {
+ bt_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Low\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at Medium\n");
+ }
+ } else {
+ if (bt_rssi < rssi_thresh1) {
+ bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Medium\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at High\n");
+ }
+ }
+ }
+
+ coex_sta->pre_bt_rssi_state = bt_rssi_state;
+
+ return bt_rssi_state;
+}
+
+u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
+ u8 index, u8 level_num,
+ u8 rssi_thresh, u8 rssi_thresh1)
+{
+ s32 wifi_rssi=0;
+ u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
+
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+
+ if (level_num == 2) {
+ if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_LOW)) {
+ if (wifi_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
+ wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to High\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at Low\n");
+ }
+ } else {
+ if (wifi_rssi < rssi_thresh) {
+ wifi_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Low\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at High\n");
+ }
+ }
+ } else if (level_num == 3) {
+ if (rssi_thresh > rssi_thresh1) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI thresh error!!\n");
+ return coex_sta->pre_wifi_rssi_state[index];
+ }
+
+ if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_LOW)) {
+ if (wifi_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
+ wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Medium\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at Low\n");
+ }
+ } else if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_MEDIUM)) {
+ if (wifi_rssi >= rssi_thresh1 +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
+ wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to High\n");
+ } else if (wifi_rssi < rssi_thresh) {
+ wifi_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Low\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at Medium\n");
+ }
+ } else {
+ if (wifi_rssi < rssi_thresh1) {
+ wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Medium\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at High\n");
+ }
+ }
+ }
+
+ coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state;
+
+ return wifi_rssi_state;
+}
+
+void halbtc8723b1ant_updatera_mask(struct btc_coexist *btcoexist,
+ bool force_exec, u32 dis_rate_mask)
+{
+ coex_dm->curra_mask = dis_rate_mask;
+
+ if (force_exec || (coex_dm->prera_mask != coex_dm->curra_mask))
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask,
+ &coex_dm->curra_mask);
+
+ coex_dm->prera_mask = coex_dm->curra_mask;
+}
+
+void halbtc8723b1ant_auto_rate_fallback_retry(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ bool wifi_under_bmode = false;
+
+ coex_dm->cur_arfr_type = type;
+
+ if (force_exec || (coex_dm->pre_arfr_type != coex_dm->cur_arfr_type)) {
+ switch (coex_dm->cur_arfr_type) {
+ case 0: /* normal mode */
+ btcoexist->btc_write_4byte(btcoexist, 0x430,
+ coex_dm->backup_arfr_cnt1);
+ btcoexist->btc_write_4byte(btcoexist, 0x434,
+ coex_dm->backup_arfr_cnt2);
+ break;
+ case 1:
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_BL_WIFI_UNDER_B_MODE,
+ &wifi_under_bmode);
+ if (wifi_under_bmode) {
+ btcoexist->btc_write_4byte(btcoexist,
+ 0x430, 0x0);
+ btcoexist->btc_write_4byte(btcoexist,
+ 0x434, 0x01010101);
+ } else {
+ btcoexist->btc_write_4byte(btcoexist,
+ 0x430, 0x0);
+ btcoexist->btc_write_4byte(btcoexist,
+ 0x434, 0x04030201);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ coex_dm->pre_arfr_type = coex_dm->cur_arfr_type;
+}
+
+void halbtc8723b1ant_retry_limit(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ coex_dm->cur_retry_limit_type = type;
+
+ if (force_exec || (coex_dm->pre_retry_limit_type !=
+ coex_dm->cur_retry_limit_type)) {
+
+ switch (coex_dm->cur_retry_limit_type) {
+ case 0: /* normal mode */
+ btcoexist->btc_write_2byte(btcoexist, 0x42a,
+ coex_dm->backup_retry_limit);
+ break;
+ case 1: /* retry limit=8 */
+ btcoexist->btc_write_2byte(btcoexist, 0x42a, 0x0808);
+ break;
+ default:
+ break;
+ }
+ }
+
+ coex_dm->pre_retry_limit_type = coex_dm->cur_retry_limit_type;
+}
+
+void halbtc8723b1ant_ampdu_maxtime(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ coex_dm->cur_ampdu_time_type = type;
+
+ if (force_exec || (coex_dm->pre_ampdu_time_type !=
+ coex_dm->cur_ampdu_time_type)) {
+ switch (coex_dm->cur_ampdu_time_type) {
+ case 0: /* normal mode */
+ btcoexist->btc_write_1byte(btcoexist, 0x456,
+ coex_dm->backup_ampdu_max_time);
+ break;
+ case 1: /* AMPDU timw = 0x38 * 32us */
+ btcoexist->btc_write_1byte(btcoexist,
+ 0x456, 0x38);
+ break;
+ default:
+ break;
+ }
+ }
+
+ coex_dm->pre_ampdu_time_type = coex_dm->cur_ampdu_time_type;
+}
+
+void halbtc8723b1ant_limited_tx(struct btc_coexist *btcoexist,
+ bool force_exec, u8 ra_maskType, u8 arfr_type,
+ u8 retry_limit_type, u8 ampdu_time_type)
+{
+ switch (ra_maskType) {
+ case 0: /* normal mode */
+ halbtc8723b1ant_updatera_mask(btcoexist, force_exec, 0x0);
+ break;
+ case 1: /* disable cck 1/2 */
+ halbtc8723b1ant_updatera_mask(btcoexist, force_exec,
+ 0x00000003);
+ break;
+ /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4*/
+ case 2:
+ halbtc8723b1ant_updatera_mask(btcoexist, force_exec,
+ 0x0001f1f7);
+ break;
+ default:
+ break;
+ }
+
+ halbtc8723b1ant_auto_rate_fallback_retry(btcoexist, force_exec,
+ arfr_type);
+ halbtc8723b1ant_retry_limit(btcoexist, force_exec, retry_limit_type);
+ halbtc8723b1ant_ampdu_maxtime(btcoexist, force_exec, ampdu_time_type);
+}
+
+void halbtc8723b1ant_limited_rx(struct btc_coexist *btcoexist,
+ bool force_exec, bool rej_ap_agg_pkt,
+ bool b_bt_ctrl_agg_buf_size, u8 agg_buf_size)
+{
+ bool reject_rx_agg = rej_ap_agg_pkt;
+ bool bt_ctrl_rx_agg_size = b_bt_ctrl_agg_buf_size;
+ u8 rxAggSize = agg_buf_size;
+
+ /**********************************************
+ * Rx Aggregation related setting
+ **********************************************/
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT,
+ &reject_rx_agg);
+ /* decide BT control aggregation buf size or not */
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE,
+ &bt_ctrl_rx_agg_size);
+ /* aggregation buf size, only work
+ *when BT control Rx aggregation size. */
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rxAggSize);
+ /* real update aggregation setting */
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
+}
+
+void halbtc8723b1ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
+{
+ u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
+ u32 reg_hp_tx = 0, reg_hp_rx = 0;
+ u32 reg_lp_tx = 0, reg_lp_rx = 0;
+
+ reg_hp_txrx = 0x770;
+ reg_lp_txrx = 0x774;
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
+ reg_hp_tx = u32tmp & MASKLWORD;
+ reg_hp_rx = (u32tmp & MASKHWORD) >> 16;
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
+ reg_lp_tx = u32tmp & MASKLWORD;
+ reg_lp_rx = (u32tmp & MASKHWORD) >> 16;
+
+ coex_sta->high_priority_tx = reg_hp_tx;
+ coex_sta->high_priority_rx = reg_hp_rx;
+ coex_sta->low_priority_tx = reg_lp_tx;
+ coex_sta->low_priority_rx = reg_lp_rx;
+
+ /* reset counter */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+}
+
+void halbtc8723b1ant_query_bt_info(struct btc_coexist *btcoexist)
+{
+ u8 h2c_parameter[1] = {0};
+
+ coex_sta->c2h_bt_info_req_sent = true;
+
+ h2c_parameter[0] |= BIT0; /* trigger*/
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Query Bt Info, FW write 0x61=0x%x\n",
+ h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
+}
+
+bool halbtc8723b1ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
+{
+ static bool pre_wifi_busy = false;
+ static bool pre_under_4way = false, pre_bt_hs_on = false;
+ bool wifi_busy = false, under_4way = false, bt_hs_on = false;
+ bool wifi_connected = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ &under_4way);
+
+ if (wifi_connected) {
+ if (wifi_busy != pre_wifi_busy) {
+ pre_wifi_busy = wifi_busy;
+ return true;
+ }
+ if (under_4way != pre_under_4way) {
+ pre_under_4way = under_4way;
+ return true;
+ }
+ if (bt_hs_on != pre_bt_hs_on) {
+ pre_bt_hs_on = bt_hs_on;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void halbtc8723b1ant_update_bt_link_info(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool bt_hs_on = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+ bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
+ bt_link_info->sco_exist = coex_sta->sco_exist;
+ bt_link_info->a2dp_exist = coex_sta->a2dp_exist;
+ bt_link_info->pan_exist = coex_sta->pan_exist;
+ bt_link_info->hid_exist = coex_sta->hid_exist;
+
+ /* work around for HS mode. */
+ if (bt_hs_on) {
+ bt_link_info->pan_exist = true;
+ bt_link_info->bt_link_exist = true;
+ }
+
+ /* check if Sco only */
+ if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->sco_only = true;
+ else
+ bt_link_info->sco_only = false;
+
+ /* check if A2dp only */
+ if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->a2dp_only = true;
+ else
+ bt_link_info->a2dp_only = false;
+
+ /* check if Pan only */
+ if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->pan_only = true;
+ else
+ bt_link_info->pan_only = false;
+
+ /* check if Hid only */
+ if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && bt_link_info->hid_exist )
+ bt_link_info->hid_only = true;
+ else
+ bt_link_info->hid_only = false;
+}
+
+u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool bt_hs_on = false;
+ u8 algorithm = BT_8723B_1ANT_COEX_ALGO_UNDEFINED;
+ u8 numOfDiffProfile = 0;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+ if (!bt_link_info->bt_link_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], No BT link exists!!!\n");
+ return algorithm;
+ }
+
+ if (bt_link_info->sco_exist)
+ numOfDiffProfile++;
+ if (bt_link_info->hid_exist)
+ numOfDiffProfile++;
+ if (bt_link_info->pan_exist)
+ numOfDiffProfile++;
+ if (bt_link_info->a2dp_exist)
+ numOfDiffProfile++;
+
+ if (numOfDiffProfile == 1) {
+ if (bt_link_info->sco_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO only\n");
+ algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
+ } else {
+ if (bt_link_info->hid_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = HID only\n");
+ algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
+ } else if (bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = A2DP only\n");
+ algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP;
+ } else if (bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "PAN(HS) only\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_PANHS;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "PAN(EDR) only\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_PANEDR;
+ }
+ }
+ }
+ } else if (numOfDiffProfile == 2) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID\n");
+ algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
+ } else if (bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "SCO + A2DP ==> SCO\n");
+ algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
+ } else if (bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile "
+ "= SCO + PAN(HS)\n");
+ algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile "
+ "= SCO + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ } else {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "HID + A2DP\n");
+ algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "HID + PAN(HS)\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "HID + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "A2DP + PAN(HS)\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "A2DP + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ }
+ }
+ } else if (numOfDiffProfile == 3) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "SCO + HID + A2DP ==> HID\n");
+ algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "SCO + HID + PAN(HS)\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "SCO + HID + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "SCO + A2DP + PAN(HS)\n");
+ algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + "
+ "A2DP + PAN(EDR) ==> HID\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ } else {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "HID + A2DP + PAN(HS)\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "HID + A2DP + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ }
+ }
+ } else if (numOfDiffProfile >= 3) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Error!!! "
+ "BT Profile = SCO + "
+ "HID + A2DP + PAN(HS)\n");
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "SCO + HID + A2DP + PAN(EDR)"
+ "==>PAN(EDR)+HID\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ }
+
+ return algorithm;
+}
+
+bool halbtc8723b1ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist)
+{
+ bool ret = false;
+ bool bt_hs_on = false, wifi_connected = false;
+ s32 bt_hs_rssi = 0;
+ u8 bt_rssi_state;
+
+ if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on))
+ return false;
+ if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected))
+ return false;
+ if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
+ return false;
+
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 35, 0);
+
+ if (wifi_connected) {
+ if (bt_hs_on) {
+ if (bt_hs_rssi > 37)
+ ret = true;
+ } else {
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+void halbtc8723b1ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
+ u8 dac_swing_lvl)
+{
+ u8 h2c_parameter[1] = {0};
+
+ /* There are several type of dacswing
+ * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 */
+ h2c_parameter[0] = dac_swing_lvl;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
+}
+
+void halbtc8723b1ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
+ bool dec_bt_pwr)
+{
+ u8 h2c_parameter[1] = {0};
+
+ h2c_parameter[0] = 0;
+
+ if (dec_bt_pwr)
+ h2c_parameter[0] |= BIT1;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
+ (dec_bt_pwr? "Yes!!":"No!!"),h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
+}
+
+void halbtc8723b1ant_dec_bt_pwr(struct btc_coexist *btcoexist,
+ bool force_exec, bool dec_bt_pwr)
+{
+ return;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s Dec BT power = %s\n",
+ (force_exec ? "force to" : ""), (dec_bt_pwr ? "ON" : "OFF"));
+ coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+
+ if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
+ return;
+ }
+ halbtc8723b1ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+
+ coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
+}
+
+void halbtc8723b1ant_set_bt_auto_report(struct btc_coexist *btcoexist,
+ bool enable_auto_report)
+{
+ u8 h2c_parameter[1] = {0};
+
+ h2c_parameter[0] = 0;
+
+ if (enable_auto_report)
+ h2c_parameter[0] |= BIT0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n",
+ (enable_auto_report? "Enabled!!":"Disabled!!"),
+ h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
+}
+
+void halbtc8723b1ant_bt_auto_report(struct btc_coexist *btcoexist,
+ bool force_exec, bool enable_auto_report)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s BT Auto report = %s\n",
+ (force_exec? "force to":""),
+ ((enable_auto_report)? "Enabled":"Disabled"));
+ coex_dm->cur_bt_auto_report = enable_auto_report;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreBtAutoReport=%d, "
+ "bCurBtAutoReport=%d\n",
+ coex_dm->pre_bt_auto_report,
+ coex_dm->cur_bt_auto_report);
+
+ if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
+ return;
+ }
+ halbtc8723b1ant_set_bt_auto_report(btcoexist,
+ coex_dm->cur_bt_auto_report);
+
+ coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
+}
+
+void halbtc8723b1ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
+ bool force_exec, u8 fw_dac_swing_lvl)
+{
+ return;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec? "force to":""), fw_dac_swing_lvl);
+ coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], preFwDacSwingLvl=%d, "
+ "curFwDacSwingLvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
+
+ if (coex_dm->pre_fw_dac_swing_lvl ==
+ coex_dm->cur_fw_dac_swing_lvl)
+ return;
+ }
+
+ halbtc8723b1ant_set_fw_dac_swing_level(btcoexist,
+ coex_dm->cur_fw_dac_swing_lvl);
+
+ coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
+}
+
+void halbtc8723b1ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
+ bool rx_rf_shrink_on)
+{
+ if (rx_rf_shrink_on) {
+ /*Shrink RF Rx LPF corner */
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+ 0xfffff, 0xffff7);
+ } else {
+ /*Resume RF Rx LPF corner
+ * After initialized, we can use coex_dm->btRf0x1eBackup */
+ if (btcoexist->initilized) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
+ 0x1e, 0xfffff,
+ coex_dm->bt_rf0x1e_backup);
+ }
+ }
+}
+
+void halbtc8723b1ant_rf_shrink(struct btc_coexist *btcoexist,
+ bool force_exec, bool rx_rf_shrink_on)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec? "force to":""),
+ ((rx_rf_shrink_on)? "ON":"OFF"));
+ coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreRfRxLpfShrink=%d, "
+ "bCurRfRxLpfShrink=%d\n",
+ coex_dm->pre_rf_rx_lpf_shrink,
+ coex_dm->cur_rf_rx_lpf_shrink);
+
+ if (coex_dm->pre_rf_rx_lpf_shrink ==
+ coex_dm->cur_rf_rx_lpf_shrink)
+ return;
+ }
+ halbtc8723b1ant_set_sw_rf_rx_lpf_corner(btcoexist,
+ coex_dm->cur_rf_rx_lpf_shrink);
+
+ coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
+}
+
+void halbtc8723b1ant_set_sw_penalty_tx_rate_adaptive(
+ struct btc_coexist *btcoexist,
+ bool low_penalty_ra)
+{
+ u8 h2c_parameter[6] = {0};
+
+ h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty */
+
+ if (low_penalty_ra) {
+ h2c_parameter[1] |= BIT0;
+ /*normal rate except MCS7/6/5, OFDM54/48/36 */
+ h2c_parameter[2] = 0x00;
+ h2c_parameter[3] = 0xf7; /*MCS7 or OFDM54 */
+ h2c_parameter[4] = 0xf8; /*MCS6 or OFDM48 */
+ h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36 */
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
+}
+
+void halbtc8723b1ant_low_penalty_ra(struct btc_coexist *btcoexist,
+ bool force_exec, bool low_penalty_ra)
+{
+ coex_dm->cur_low_penalty_ra = low_penalty_ra;
+
+ if (!force_exec) {
+ if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
+ return;
+ }
+ halbtc8723b1ant_set_sw_penalty_tx_rate_adaptive(btcoexist,
+ coex_dm->cur_low_penalty_ra);
+
+ coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
+}
+
+void halbtc8723b1ant_set_dac_swing_reg(struct btc_coexist *btcoexist, u32 level)
+{
+ u8 val = (u8) level;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
+}
+
+void halbtc8723b1ant_set_sw_full_time_dac_swing(struct btc_coexist *btcoexist,
+ bool sw_dac_swing_on,
+ u32 sw_dac_swing_lvl)
+{
+ if (sw_dac_swing_on)
+ halbtc8723b1ant_set_dac_swing_reg(btcoexist, sw_dac_swing_lvl);
+ else
+ halbtc8723b1ant_set_dac_swing_reg(btcoexist, 0x18);
+}
+
+
+void halbtc8723b1ant_dac_swing(struct btc_coexist *btcoexist, bool force_exec,
+ bool dac_swing_on, u32 dac_swing_lvl)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+ (force_exec ? "force to" : ""), (dac_swing_on ? "ON" : "OFF"),
+ dac_swing_lvl);
+
+ coex_dm->cur_dac_swing_on = dac_swing_on;
+ coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, "
+ "bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+ coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl,
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
+
+ if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
+ (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
+ return;
+ }
+ mdelay(30);
+ halbtc8723b1ant_set_sw_full_time_dac_swing(btcoexist, dac_swing_on,
+ dac_swing_lvl);
+
+ coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on;
+ coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
+}
+
+void halbtc8723b1ant_set_adc_backoff(struct btc_coexist *btcoexist,
+ bool adc_backoff)
+{
+ if (adc_backoff) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB BackOff Level On!\n");
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB BackOff Level Off!\n");
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1);
+ }
+}
+
+void halbtc8723b1ant_adc_backoff(struct btc_coexist *btcoexist,
+ bool force_exec, bool adc_backoff)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn AdcBackOff = %s\n",
+ (force_exec ? "force to" : ""), (adc_backoff ? "ON" : "OFF"));
+ coex_dm->cur_adc_backoff = adc_backoff;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n",
+ coex_dm->pre_adc_backoff, coex_dm->cur_adc_backoff);
+
+ if(coex_dm->pre_adc_backoff == coex_dm->cur_adc_backoff)
+ return;
+ }
+ halbtc8723b1ant_set_adc_backoff(btcoexist, coex_dm->cur_adc_backoff);
+
+ coex_dm->pre_adc_backoff =
+ coex_dm->cur_adc_backoff;
+}
+
+void halbtc8723b1ant_set_agc_table(struct btc_coexist *btcoexist,
+ bool adc_table_en)
+{
+ u8 rssi_adjust_val = 0;
+
+ btcoexist->btc_set_rf_reg(btcoexist,
+ BTC_RF_A, 0xef, 0xfffff, 0x02000);
+ if (adc_table_en) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table On!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x3fa58);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x37a58);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x2fa58);
+ rssi_adjust_val = 8;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table Off!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x39258);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x31258);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x29258);
+ }
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
+
+ /* set rssi_adjust_val for wifi module. */
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
+ &rssi_adjust_val);
+}
+
+
+void halbtc8723b1ant_agc_table(struct btc_coexist *btcoexist,
+ bool force_exec, bool adc_table_en)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s %s Agc Table\n",
+ (force_exec ? "force to" : ""),
+ (adc_table_en ? "Enable" : "Disable"));
+ coex_dm->cur_agc_table_en = adc_table_en;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ coex_dm->pre_agc_table_en,
+ coex_dm->cur_agc_table_en);
+
+ if(coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
+ return;
+ }
+ halbtc8723b1ant_set_agc_table(btcoexist, adc_table_en);
+
+ coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
+}
+
+void halbtc8723b1ant_set_coex_table(struct btc_coexist *btcoexist,
+ u32 val0x6c0, u32 val0x6c4,
+ u32 val0x6c8, u8 val0x6cc)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+ btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
+}
+
+void halbtc8723b1ant_coex_table(struct btc_coexist *btcoexist,
+ bool force_exec, u32 val0x6c0,
+ u32 val0x6c4, u32 val0x6c8,
+ u8 val0x6cc)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s write Coex Table 0x6c0=0x%x,"
+ " 0x6c4=0x%x, 0x6cc=0x%x\n", (force_exec ? "force to" : ""),
+ val0x6c0, val0x6c4, val0x6cc);
+ coex_dm->cur_val0x6c0 = val0x6c0;
+ coex_dm->cur_val0x6c4 = val0x6c4;
+ coex_dm->cur_val0x6c8 = val0x6c8;
+ coex_dm->cur_val0x6cc = val0x6cc;
+
+ if (!force_exec) {
+ if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
+ (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
+ (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) &&
+ (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
+ return;
+ }
+ halbtc8723b1ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
+ val0x6c8, val0x6cc);
+
+ coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
+ coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
+ coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8;
+ coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
+}
+
+void halbtc8723b1ant_coex_table_with_type(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ switch (type) {
+ case 0:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x55555555, 0xffffff, 0x3);
+ break;
+ case 1:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x5a5a5a5a, 0xffffff, 0x3);
+ break;
+ case 2:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+ 0x5a5a5a5a, 0xffffff, 0x3);
+ break;
+ case 3:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0xaaaaaaaa, 0xffffff, 0x3);
+ break;
+ case 4:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x5aaa5aaa, 0xffffff, 0x3);
+ break;
+ case 5:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+ 0xaaaa5a5a, 0xffffff, 0x3);
+ break;
+ case 6:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0xaaaa5a5a, 0xffffff, 0x3);
+ break;
+ case 7:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x5afa5afa,
+ 0x5afa5afa, 0xffffff, 0x3);
+ break;
+ default:
+ break;
+ }
+}
+
+void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist,
+ bool enable)
+{
+ u8 h2c_parameter[1] = {0};
+
+ if (enable)
+ h2c_parameter[0] |= BIT0; /* function enable */
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set FW for BT Ignore Wlan_Act,"
+ " FW write 0x63=0x%x\n", h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
+}
+
+void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
+ bool force_exec, bool enable)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ coex_dm->cur_ignore_wlan_act = enable;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreIgnoreWlanAct = %d, "
+ "bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
+
+ if (coex_dm->pre_ignore_wlan_act ==
+ coex_dm->cur_ignore_wlan_act)
+ return;
+ }
+ halbtc8723b1ant_SetFwIgnoreWlanAct(btcoexist, enable);
+
+ coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
+}
+
+void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
+ u8 byte1, u8 byte2, u8 byte3,
+ u8 byte4, u8 byte5)
+{
+ u8 h2c_parameter[5] = {0};
+ u8 real_byte1 = byte1, real_byte5 = byte5;
+ bool ap_enable = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ &ap_enable);
+
+ if (ap_enable) {
+ if ((byte1 & BIT4) && !(byte1 & BIT5)) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], FW for 1Ant AP mode\n");
+ real_byte1 &= ~BIT4;
+ real_byte1 |= BIT5;
+
+ real_byte5 |= BIT5;
+ real_byte5 &= ~BIT6;
+ }
+ }
+
+ h2c_parameter[0] = real_byte1;
+ h2c_parameter[1] = byte2;
+ h2c_parameter[2] = byte3;
+ h2c_parameter[3] = byte4;
+ h2c_parameter[4] = real_byte5;
+
+ coex_dm->ps_tdma_para[0] = real_byte1;
+ coex_dm->ps_tdma_para[1] = byte2;
+ coex_dm->ps_tdma_para[2] = byte3;
+ coex_dm->ps_tdma_para[3] = byte4;
+ coex_dm->ps_tdma_para[4] = real_byte5;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 |
+ h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 |
+ h2c_parameter[4]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
+}
+
+void halbtc8723b1ant_SetLpsRpwm(struct btc_coexist *btcoexist,
+ u8 lps_val, u8 rpwm_val)
+{
+ u8 lps = lps_val;
+ u8 rpwm = rpwm_val;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_1ANT_LPS, &lps);
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_1ANT_RPWM, &rpwm);
+}
+
+void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist, bool force_exec,
+ u8 lps_val, u8 rpwm_val)
+{
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s set lps/rpwm=0x%x/0x%x \n",
+ (force_exec ? "force to" : ""), lps_val, rpwm_val);
+ coex_dm->cur_lps = lps_val;
+ coex_dm->cur_rpwm = rpwm_val;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], LPS-RxBeaconMode=0x%x , LPS-RPWM=0x%x!!\n",
+ coex_dm->cur_lps, coex_dm->cur_rpwm);
+
+ if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
+ (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], LPS-RPWM_Last=0x%x"
+ " , LPS-RPWM_Now=0x%x!!\n",
+ coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+
+ return;
+ }
+ }
+ halbtc8723b1ant_SetLpsRpwm(btcoexist, lps_val, rpwm_val);
+
+ coex_dm->pre_lps = coex_dm->cur_lps;
+ coex_dm->pre_rpwm = coex_dm->cur_rpwm;
+}
+
+void halbtc8723b1ant_sw_mechanism1(struct btc_coexist *btcoexist,
+ bool shrink_rx_lpf, bool low_penalty_ra,
+ bool limited_dig, bool bt_lna_constrain)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], SM1[ShRf/ LpRA/ LimDig/ btLna] = %d %d %d %d\n",
+ shrink_rx_lpf, low_penalty_ra, limited_dig, bt_lna_constrain);
+
+ halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
+}
+
+void halbtc8723b1ant_sw_mechanism2(struct btc_coexist *btcoexist,
+ bool agc_table_shift, bool adc_backoff,
+ bool sw_dac_swing, u32 dac_swing_lvl)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], SM2[AgcT/ AdcB/ SwDacSwing(lvl)] = %d %d %d\n",
+ agc_table_shift, adc_backoff, sw_dac_swing);
+}
+
+void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist,
+ u8 ant_pos_type, bool init_hw_cfg,
+ bool wifi_off)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ u32 fw_ver = 0, u32tmp = 0;
+ bool pg_ext_switch = false;
+ bool use_ext_switch = false;
+ u8 h2c_parameter[2] = {0};
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_EXT_SWITCH, &pg_ext_switch);
+ /* [31:16]=fw ver, [15:0]=fw sub ver */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+
+
+ if ((fw_ver < 0xc0000) || pg_ext_switch)
+ use_ext_switch = true;
+
+ if (init_hw_cfg){
+ /*BT select s0/s1 is controlled by WiFi */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1);
+
+ /*Force GNT_BT to Normal */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
+ } else if (wifi_off) {
+ /*Force GNT_BT to High */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x3);
+ /*BT select s0/s1 is controlled by BT */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x0);
+
+ /* 0x4c[24:23]=00, Set Antenna control by BT_RFE_CTRL
+ * BT Vendor 0xac=0xf002 */
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u32tmp &= ~BIT23;
+ u32tmp &= ~BIT24;
+ btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+ }
+
+ if (use_ext_switch) {
+ if (init_hw_cfg) {
+ /* 0x4c[23]=0, 0x4c[24]=1 Antenna control by WL/BT */
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u32tmp &= ~BIT23;
+ u32tmp |= BIT24;
+ btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+
+ if (board_info->btdm_ant_pos ==
+ BTC_ANTENNA_AT_MAIN_PORT) {
+ /* Main Ant to BT for IPS case 0x4c[23]=1 */
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x64, 0x1,
+ 0x1);
+
+ /*tell firmware "no antenna inverse"*/
+ h2c_parameter[0] = 0;
+ h2c_parameter[1] = 1; /*ext switch type*/
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ } else {
+ /*Aux Ant to BT for IPS case 0x4c[23]=1 */
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x64, 0x1,
+ 0x0);
+
+ /*tell firmware "antenna inverse"*/
+ h2c_parameter[0] = 1;
+ h2c_parameter[1] = 1; /*ext switch type*/
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ }
+ }
+
+ /* fixed internal switch first*/
+ /* fixed internal switch S1->WiFi, S0->BT*/
+ if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+ else/* fixed internal switch S0->WiFi, S1->BT*/
+ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+
+ /* ext switch setting */
+ switch (ant_pos_type) {
+ case BTC_ANT_PATH_WIFI:
+ if (board_info->btdm_ant_pos ==
+ BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x92c, 0x3,
+ 0x1);
+ else
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x92c, 0x3,
+ 0x2);
+ break;
+ case BTC_ANT_PATH_BT:
+ if (board_info->btdm_ant_pos ==
+ BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x92c, 0x3,
+ 0x2);
+ else
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x92c, 0x3,
+ 0x1);
+ break;
+ default:
+ case BTC_ANT_PATH_PTA:
+ if (board_info->btdm_ant_pos ==
+ BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x92c, 0x3,
+ 0x1);
+ else
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x92c, 0x3,
+ 0x2);
+ break;
+ }
+
+ } else {
+ if (init_hw_cfg) {
+ /* 0x4c[23]=1, 0x4c[24]=0 Antenna control by 0x64*/
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u32tmp |= BIT23;
+ u32tmp &= ~BIT24;
+ btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+
+ if (board_info->btdm_ant_pos ==
+ BTC_ANTENNA_AT_MAIN_PORT) {
+ /*Main Ant to WiFi for IPS case 0x4c[23]=1*/
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x64, 0x1,
+ 0x0);
+
+ /*tell firmware "no antenna inverse"*/
+ h2c_parameter[0] = 0;
+ h2c_parameter[1] = 0; /*internal switch type*/
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ } else {
+ /*Aux Ant to BT for IPS case 0x4c[23]=1*/
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x64, 0x1,
+ 0x1);
+
+ /*tell firmware "antenna inverse"*/
+ h2c_parameter[0] = 1;
+ h2c_parameter[1] = 0; /*internal switch type*/
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ }
+ }
+
+ /* fixed external switch first*/
+ /*Main->WiFi, Aux->BT*/
+ if(board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
+ 0x3, 0x1);
+ else/*Main->BT, Aux->WiFi */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
+ 0x3, 0x2);
+
+ /* internal switch setting*/
+ switch (ant_pos_type) {
+ case BTC_ANT_PATH_WIFI:
+ if(board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_2byte(btcoexist, 0x948,
+ 0x0);
+ else
+ btcoexist->btc_write_2byte(btcoexist, 0x948,
+ 0x280);
+ break;
+ case BTC_ANT_PATH_BT:
+ if(board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_2byte(btcoexist, 0x948,
+ 0x280);
+ else
+ btcoexist->btc_write_2byte(btcoexist, 0x948,
+ 0x0);
+ break;
+ default:
+ case BTC_ANT_PATH_PTA:
+ if(board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_2byte(btcoexist, 0x948,
+ 0x200);
+ else
+ btcoexist->btc_write_2byte(btcoexist, 0x948,
+ 0x80);
+ break;
+ }
+ }
+}
+
+void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
+ bool turn_on, u8 type)
+{
+ bool wifi_busy = false;
+ u8 rssi_adjust_val = 0;
+
+ coex_dm->cur_ps_tdma_on = turn_on;
+ coex_dm->cur_ps_tdma = type;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+ if (!force_exec) {
+ if (coex_dm->cur_ps_tdma_on)
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], ******** TDMA(on, %d) *********\n",
+ coex_dm->cur_ps_tdma);
+ else
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], ******** TDMA(off, %d) ********\n",
+ coex_dm->cur_ps_tdma);
+
+
+ if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
+ (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
+ return;
+ }
+ if (turn_on) {
+ switch (type) {
+ default:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1a,
+ 0x1a, 0x0, 0x50);
+ break;
+ case 1:
+ if (wifi_busy)
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+ 0x3a, 0x03,
+ 0x10, 0x50);
+ else
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist,0x51,
+ 0x3a, 0x03,
+ 0x10, 0x51);
+
+ rssi_adjust_val = 11;
+ break;
+ case 2:
+ if (wifi_busy)
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+ 0x2b, 0x03,
+ 0x10, 0x50);
+ else
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+ 0x2b, 0x03,
+ 0x10, 0x51);
+ rssi_adjust_val = 14;
+ break;
+ case 3:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1d,
+ 0x1d, 0x0, 0x52);
+ break;
+ case 4:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x15,
+ 0x3, 0x14, 0x0);
+ rssi_adjust_val = 17;
+ break;
+ case 5:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x15,
+ 0x3, 0x11, 0x10);
+ break;
+ case 6:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x20,
+ 0x3, 0x11, 0x13);
+ break;
+ case 7:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xc,
+ 0x5, 0x0, 0x0);
+ break;
+ case 8:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x25,
+ 0x3, 0x10, 0x0);
+ break;
+ case 9:
+ if(wifi_busy)
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+ 0x21, 0x3,
+ 0x10, 0x50);
+ else
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+ 0x21, 0x3,
+ 0x10, 0x50);
+ rssi_adjust_val = 18;
+ break;
+ case 10:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa,
+ 0xa, 0x0, 0x40);
+ break;
+ case 11:
+ if (wifi_busy)
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+ 0x15, 0x03,
+ 0x10, 0x50);
+ else
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+ 0x15, 0x03,
+ 0x10, 0x50);
+ rssi_adjust_val = 20;
+ break;
+ case 12:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x0a,
+ 0x0a, 0x0, 0x50);
+ break;
+ case 13:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x15,
+ 0x15, 0x0, 0x50);
+ break;
+ case 14:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x21,
+ 0x3, 0x10, 0x52);
+ break;
+ case 15:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa,
+ 0x3, 0x8, 0x0);
+ break;
+ case 16:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x15,
+ 0x3, 0x10, 0x0);
+ rssi_adjust_val = 18;
+ break;
+ case 18:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x25,
+ 0x3, 0x10, 0x0);
+ rssi_adjust_val = 14;
+ break;
+ case 20:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x35,
+ 0x03, 0x11, 0x10);
+ break;
+ case 21:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x15,
+ 0x03, 0x11, 0x10);
+ break;
+ case 22:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x25,
+ 0x03, 0x11, 0x10);
+ break;
+ case 23:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+ 0x3, 0x31, 0x18);
+ rssi_adjust_val = 22;
+ break;
+ case 24:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
+ 0x3, 0x31, 0x18);
+ rssi_adjust_val = 22;
+ break;
+ case 25:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+ 0x3, 0x31, 0x18);
+ rssi_adjust_val = 22;
+ break;
+ case 26:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+ 0x3, 0x31, 0x18);
+ rssi_adjust_val = 22;
+ break;
+ case 27:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+ 0x3, 0x31, 0x98);
+ rssi_adjust_val = 22;
+ break;
+ case 28:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x69, 0x25,
+ 0x3, 0x31, 0x0);
+ break;
+ case 29:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xab, 0x1a,
+ 0x1a, 0x1, 0x10);
+ break;
+ case 30:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x14,
+ 0x3, 0x10, 0x50);
+ break;
+ case 31:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1a,
+ 0x1a, 0, 0x58);
+ break;
+ case 32:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0xa,
+ 0x3, 0x10, 0x0);
+ break;
+ case 33:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x25,
+ 0x3, 0x30, 0x90);
+ break;
+ case 34:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x53, 0x1a,
+ 0x1a, 0x0, 0x10);
+ break;
+ case 35:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x63, 0x1a,
+ 0x1a, 0x0, 0x10);
+ break;
+ case 36:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x12,
+ 0x3, 0x14, 0x50);
+ break;
+ /* SoftAP only with no sta associated,BT disable ,
+ * TDMA mode for power saving
+ * here softap mode screen off will cost 70-80mA for phone */
+ case 40:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x23, 0x18,
+ 0x00, 0x10, 0x24);
+ break;
+ }
+ } else {
+ switch (type) {
+ case 8: /*PTA Control */
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x8, 0x0,
+ 0x0, 0x0, 0x0);
+ halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_PTA,
+ false, false);
+ break;
+ case 0:
+ default: /*Software control, Antenna at BT side */
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0,
+ 0x0, 0x0, 0x0);
+ halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT,
+ false, false);
+ break;
+ case 9: /*Software control, Antenna at WiFi side */
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0,
+ 0x0, 0x0, 0x0);
+ halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_WIFI,
+ false, false);
+ break;
+ }
+ }
+ rssi_adjust_val = 0;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE,
+ &rssi_adjust_val);
+
+ /* update pre state */
+ coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
+ coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
+}
+
+void halbtc8723b1ant_coex_alloff(struct btc_coexist *btcoexist)
+{
+ /* fw all off */
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ /* sw all off */
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+
+ /* hw all off */
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+}
+
+bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist)
+{
+ bool commom = false, wifi_connected = false;
+ bool wifi_busy = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+ if (!wifi_connected &&
+ BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi non connected-idle + "
+ "BT non connected-idle!!\n");
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ commom = true;
+ } else if (wifi_connected &&
+ (BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+ coex_dm->bt_status)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi connected + "
+ "BT non connected-idle!!\n");
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ commom = true;
+ } else if (!wifi_connected &&
+ (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
+ coex_dm->bt_status)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi non connected-idle + "
+ "BT connected-idle!!\n");
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ commom = true;
+ } else if (wifi_connected &&
+ (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
+ coex_dm->bt_status)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi connected + BT connected-idle!!\n");
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ commom = true;
+ } else if (!wifi_connected &&
+ (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE !=
+ coex_dm->bt_status)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ ("[BTCoex], Wifi non connected-idle + BT Busy!!\n"));
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ commom = true;
+ } else {
+ if (wifi_busy)
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Busy"
+ " + BT Busy!!\n");
+ else
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Idle"
+ " + BT Busy!!\n");
+
+ commom = false;
+ }
+
+ return commom;
+}
+
+
+void halbtc8723b1ant_tdma_duration_adjust_for_acl(struct btc_coexist *btcoexist,
+ u8 wifi_status)
+{
+ static s32 up, dn, m, n, wait_count;
+ /* 0: no change, +1: increase WiFi duration,
+ * -1: decrease WiFi duration */
+ s32 result;
+ u8 retry_count = 0, bt_info_ext;
+ static bool pre_wifi_busy = false;
+ bool wifi_busy = false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], TdmaDurationAdjustForAcl()\n");
+
+ if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifi_status)
+ wifi_busy = true;
+ else
+ wifi_busy = false;
+
+ if ((BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
+ wifi_status) ||
+ (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifi_status) ||
+ (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == wifi_status)) {
+ if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
+ coex_dm->cur_ps_tdma != 3 && coex_dm->cur_ps_tdma != 9) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+
+ up = 0;
+ dn = 0;
+ m = 1;
+ n = 3;
+ result = 0;
+ wait_count = 0;
+ }
+ return;
+ }
+
+ if (!coex_dm->auto_tdma_adjust) {
+ coex_dm->auto_tdma_adjust = true;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
+
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+
+ up = 0;
+ dn = 0;
+ m = 1;
+ n = 3;
+ result = 0;
+ wait_count = 0;
+ } else {
+ /*accquire the BT TRx retry count from BT_Info byte2 */
+ retry_count = coex_sta->bt_retry_cnt;
+ bt_info_ext = coex_sta->bt_info_ext;
+ result = 0;
+ wait_count++;
+ /* no retry in the last 2-second duration */
+ if (retry_count == 0) {
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if (up >= n) {
+ wait_count = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Increase wifi "
+ "duration!!\n");
+ }
+ } else if (retry_count <= 3) {
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) {
+ if (wait_count <= 2)
+ m++;
+ else
+ m = 1;
+
+ if (m >= 20)
+ m = 20;
+
+ n = 3 * m;
+ up = 0;
+ dn = 0;
+ wait_count = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration"
+ " for retryCounter<3!!\n");
+ }
+ } else {
+ if (wait_count == 1)
+ m++;
+ else
+ m = 1;
+
+ if (m >= 20)
+ m = 20;
+
+ n = 3 * m;
+ up = 0;
+ dn = 0;
+ wait_count = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration"
+ " for retryCounter>3!!\n");
+ }
+
+ if (result == -1) {
+ if ((BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
+ ((coex_dm->cur_ps_tdma == 1) ||
+ (coex_dm->cur_ps_tdma == 2))) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ }
+ } else if(result == 1) {
+ if ((BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
+ ((coex_dm->cur_ps_tdma == 1) ||
+ (coex_dm->cur_ps_tdma == 2))) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type = 1;
+ }
+ } else { /*no change */
+ /*if busy / idle change */
+ if (wifi_busy != pre_wifi_busy) {
+ pre_wifi_busy = wifi_busy;
+ halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC,
+ true,
+ coex_dm->cur_ps_tdma);
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex],********* TDMA(on, %d) ********\n",
+ coex_dm->cur_ps_tdma);
+ }
+
+ if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
+ coex_dm->cur_ps_tdma != 9 && coex_dm->cur_ps_tdma != 11) {
+ /* recover to previous adjust type */
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
+ coex_dm->ps_tdma_du_adj_type);
+ }
+ }
+}
+
+u8 halbtc8723b1ant_ps_tdma_type_by_wifi_rssi(s32 wifi_rssi, s32 pre_wifi_rssi,
+ u8 wifi_rssi_thresh)
+{
+ u8 ps_tdma_type=0;
+
+ if (wifi_rssi > pre_wifi_rssi) {
+ if (wifi_rssi > (wifi_rssi_thresh + 5))
+ ps_tdma_type = 26;
+ else
+ ps_tdma_type = 25;
+ } else {
+ if (wifi_rssi > wifi_rssi_thresh)
+ ps_tdma_type = 26;
+ else
+ ps_tdma_type = 25;
+ }
+
+ return ps_tdma_type;
+}
+
+void halbtc8723b1ant_PsTdmaCheckForPowerSaveState(struct btc_coexist *btcoexist,
+ bool new_ps_state)
+{
+ u8 lps_mode = 0x0;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode);
+
+ if (lps_mode) { /* already under LPS state */
+ if (new_ps_state) {
+ /* keep state under LPS, do nothing. */
+ } else {
+ /* will leave LPS state, turn off psTdma first */
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ false, 0);
+ }
+ } else { /* NO PS state */
+ if (new_ps_state) {
+ /* will enter LPS state, turn off psTdma first */
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ false, 0);
+ } else {
+ /* keep state under NO PS state, do nothing. */
+ }
+ }
+}
+
+void halbtc8723b1ant_power_save_state(struct btc_coexist *btcoexist,
+ u8 ps_type, u8 lps_val,
+ u8 rpwm_val)
+{
+ bool low_pwr_disable = false;
+
+ switch (ps_type) {
+ case BTC_PS_WIFI_NATIVE:
+ /* recover to original 32k low power setting */
+ low_pwr_disable = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+ break;
+ case BTC_PS_LPS_ON:
+ halbtc8723b1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8723b1ant_LpsRpwm(btcoexist, NORMAL_EXEC, lps_val,
+ rpwm_val);
+ /* when coex force to enter LPS, do not enter 32k low power. */
+ low_pwr_disable = true;
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+ /* power save must executed before psTdma. */
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ break;
+ case BTC_PS_LPS_OFF:
+ halbtc8723b1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ break;
+ default:
+ break;
+ }
+}
+
+void halbtc8723b1ant_action_wifi_only(struct btc_coexist *btcoexist)
+{
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9);
+}
+
+void halbtc8723b1ant_monitor_bt_enable_disable(struct btc_coexist *btcoexist)
+{
+ static bool pre_bt_disabled = false;
+ static u32 bt_disable_cnt = 0;
+ bool bt_active = true, bt_disabled = false;
+
+ /* This function check if bt is disabled */
+
+ if (coex_sta->high_priority_tx == 0 &&
+ coex_sta->high_priority_rx == 0 &&
+ coex_sta->low_priority_tx == 0 &&
+ coex_sta->low_priority_rx == 0)
+ bt_active = false;
+
+ if (coex_sta->high_priority_tx == 0xffff &&
+ coex_sta->high_priority_rx == 0xffff &&
+ coex_sta->low_priority_tx == 0xffff &&
+ coex_sta->low_priority_rx == 0xffff)
+ bt_active = false;
+
+ if (bt_active) {
+ bt_disable_cnt = 0;
+ bt_disabled = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+ &bt_disabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], BT is enabled !!\n");
+ } else {
+ bt_disable_cnt++;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], bt all counters=0, %d times!!\n",
+ bt_disable_cnt);
+ if (bt_disable_cnt >= 2) {
+ bt_disabled = true;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+ &bt_disabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], BT is disabled !!\n");
+ halbtc8723b1ant_action_wifi_only(btcoexist);
+ }
+ }
+ if (pre_bt_disabled != bt_disabled) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (pre_bt_disabled ? "disabled" : "enabled"),
+ (bt_disabled ? "disabled" : "enabled"));
+ pre_bt_disabled = bt_disabled;
+ if (!bt_disabled) {
+ } else {
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS,
+ NULL);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS,
+ NULL);
+ }
+ }
+}
+
+/***************************************************
+ *
+ * Software Coex Mechanism start
+ *
+ ***************************************************/
+/* SCO only or SCO+PAN(HS) */
+void halbtc8723b1ant_action_sco(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state =
+ halbtc8723b1ant_wifi_rssi_state(btcoexist, 0, 2, 25, 0);
+
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4);
+
+ if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+
+void halbtc8723b1ant_action_hid(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 0, 2, 25, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
+void halbtc8723b1ant_action_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 0, 2, 25, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8723b1ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u32 wifi_bw;
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 0, 2, 25, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8723b1ant_action_pan_edr(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 0, 2, 25, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+
+/* PAN(HS) only */
+void halbtc8723b1ant_action_pan_hs(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 0, 2, 25, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* fw mechanism */
+ if((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+ false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+ false);
+
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* fw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+ false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+ false);
+
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/*PAN(EDR)+A2DP */
+void halbtc8723b1ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u32 wifi_bw;
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 0, 2, 25, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8723b1ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 0, 2, 25, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/* HID+A2DP+PAN(EDR) */
+void halbtc8723b1ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u32 wifi_bw;
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 0, 2, 25, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* sw mechanism */
+ if((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8723b1ant_action_hid_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u32 wifi_bw;
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 0, 2, 25, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+ if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/*****************************************************
+ *
+ * Non-Software Coex Mechanism start
+ *
+ *****************************************************/
+void halbtc8723b1ant_action_hs(struct btc_coexist *btcoexist)
+{
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 2);
+}
+
+void halbtc8723b1ant_action_bt_inquiry(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool wifi_connected = false, ap_enable = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ &ap_enable);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+
+ if (!wifi_connected) {
+ halbtc8723b1ant_power_save_state(btcoexist,
+ BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ } else if (bt_link_info->sco_exist || bt_link_info->hid_only) {
+ /* SCO/HID-only busy */
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ } else {
+ if (ap_enable)
+ halbtc8723b1ant_power_save_state(btcoexist,
+ BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ else
+ halbtc8723b1ant_power_save_state(btcoexist,
+ BTC_PS_LPS_ON,
+ 0x50, 0x4);
+
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 30);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ }
+}
+
+void halbtc8723b1ant_action_bt_sco_hid_only_busy(struct btc_coexist * btcoexist,
+ u8 wifi_status)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool wifi_connected = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+
+ /* tdma and coex table */
+
+ if (bt_link_info->sco_exist) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+ } else { /* HID */
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
+ }
+}
+
+void halbtc8723b1ant_action_wifi_connected_bt_acl_busy(
+ struct btc_coexist *btcoexist,
+ u8 wifi_status)
+{
+ u8 bt_rssi_state;
+
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 28, 0);
+
+ if (bt_link_info->hid_only) { /*HID */
+ halbtc8723b1ant_action_bt_sco_hid_only_busy(btcoexist,
+ wifi_status);
+ coex_dm->auto_tdma_adjust = false;
+ return;
+ } else if (bt_link_info->a2dp_only) { /*A2DP */
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_tdma_duration_adjust_for_acl(btcoexist,
+ wifi_status);
+ } else { /*for low BT RSSI */
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->auto_tdma_adjust = false;
+ }
+
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) { /*HID+A2DP */
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->auto_tdma_adjust = false;
+ } else { /*for low BT RSSI*/
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->auto_tdma_adjust = false;
+ }
+
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
+ /*PAN(OPP,FTP), HID+PAN(OPP,FTP) */
+ } else if (bt_link_info->pan_only ||
+ (bt_link_info->hid_exist && bt_link_info->pan_exist)) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
+ coex_dm->auto_tdma_adjust = false;
+ /*A2DP+PAN(OPP,FTP), HID+A2DP+PAN(OPP,FTP)*/
+ } else if ((bt_link_info->a2dp_exist && bt_link_info->pan_exist) ||
+ (bt_link_info->hid_exist && bt_link_info->a2dp_exist &&
+ bt_link_info->pan_exist)) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ coex_dm->auto_tdma_adjust = false;
+ } else {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ coex_dm->auto_tdma_adjust = false;
+ }
+}
+
+void halbtc8723b1ant_action_wifi_not_connected(struct btc_coexist *btcoexist)
+{
+ /* power save state */
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+
+ /* tdma and coex table */
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+}
+
+void halbtc8723b1ant_action_wifi_not_connected_asso_auth_scan(
+ struct btc_coexist *btcoexist)
+{
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+}
+
+void halbtc8723b1ant_ActionWifiConnectedScan(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+
+ /* tdma and coex table */
+ if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
+ if (bt_link_info->a2dp_exist &&
+ bt_link_info->pan_exist) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 22);
+ halbtc8723b1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ } else {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ }
+ } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
+ coex_dm->bt_status)) {
+ halbtc8723b1ant_action_bt_sco_hid_only_busy(btcoexist,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN);
+ } else {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ }
+}
+
+void halbtc8723b1ant_action_wifi_connected_special_packet(
+ struct btc_coexist *btcoexist)
+{
+ bool hs_connecting = false;
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting);
+
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+
+ /* tdma and coex table */
+ if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
+ if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 22);
+ halbtc8723b1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ } else {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 20);
+ halbtc8723b1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ }
+ } else {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ }
+}
+
+void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
+{
+ bool wifi_busy = false;
+ bool scan = false, link = false, roam = false;
+ bool under_4way = false, ap_enable = false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], CoexForWifiConnect()===>\n");
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ &under_4way);
+ if (under_4way) {
+ halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], CoexForWifiConnect(), "
+ "return for wifi is under 4way<===\n");
+ return;
+ }
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+ if (scan || link || roam) {
+ halbtc8723b1ant_ActionWifiConnectedScan(btcoexist);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], CoexForWifiConnect(), "
+ "return for wifi is under scan<===\n");
+ return;
+ }
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ &ap_enable);
+ /* power save state */
+ if (!ap_enable &&
+ BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status &&
+ !btcoexist->bt_link_info.hid_only)
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_LPS_ON,
+ 0x50, 0x4);
+ else
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+
+ /* tdma and coex table */
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ if (!wifi_busy) {
+ if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
+ halbtc8723b1ant_action_wifi_connected_bt_acl_busy(btcoexist,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE);
+ } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY ==
+ coex_dm->bt_status) ||
+ (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
+ coex_dm->bt_status)) {
+ halbtc8723b1ant_action_bt_sco_hid_only_busy(btcoexist,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE);
+ } else {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ false, 8);
+ halbtc8723b1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 2);
+ }
+ } else {
+ if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
+ halbtc8723b1ant_action_wifi_connected_bt_acl_busy(btcoexist,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY);
+ } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY ==
+ coex_dm->bt_status) ||
+ (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
+ coex_dm->bt_status)) {
+ halbtc8723b1ant_action_bt_sco_hid_only_busy(btcoexist,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY);
+ } else {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 2);
+ }
+ }
+}
+
+void halbtc8723b1ant_run_sw_coexist_mechanism(struct btc_coexist *btcoexist)
+{
+ u8 algorithm = 0;
+
+ algorithm = halbtc8723b1ant_action_algorithm(btcoexist);
+ coex_dm->cur_algorithm = algorithm;
+
+ if (halbtc8723b1ant_is_common_action(btcoexist)) {
+ } else {
+ switch (coex_dm->cur_algorithm) {
+ case BT_8723B_1ANT_COEX_ALGO_SCO:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = SCO.\n");
+ halbtc8723b1ant_action_sco(btcoexist);
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = HID.\n");
+ halbtc8723b1ant_action_hid(btcoexist);
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = A2DP.\n");
+ halbtc8723b1ant_action_a2dp(btcoexist);
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = "
+ "A2DP+PAN(HS).\n");
+ halbtc8723b1ant_action_a2dp_pan_hs(btcoexist);
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = PAN(EDR).\n");
+ halbtc8723b1ant_action_pan_edr(btcoexist);
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = HS mode.\n");
+ halbtc8723b1ant_action_pan_hs(btcoexist);
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = PAN+A2DP.\n");
+ halbtc8723b1ant_action_pan_edr_a2dp(btcoexist);
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = "
+ "PAN(EDR)+HID.\n");
+ halbtc8723b1ant_action_pan_edr_hid(btcoexist);
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = "
+ "HID+A2DP+PAN.\n");
+ halbtc8723b1ant_action_hid_a2dp_pan_edr(btcoexist);
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_HID_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = HID+A2DP.\n");
+ halbtc8723b1ant_action_hid_a2dp(btcoexist);
+ break;
+ default:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = "
+ "coexist All Off!!\n");
+ break;
+ }
+ coex_dm->pre_algorithm = coex_dm->cur_algorithm;
+ }
+}
+
+void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool wifi_connected = false, bt_hs_on = false;
+ bool limited_dig = false, bIncreaseScanDevNum = false;
+ bool b_bt_ctrl_agg_buf_size = false;
+ u8 agg_buf_size = 5;
+ u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism()===>\n");
+
+ if (btcoexist->manual_control) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), "
+ "return for Manual CTRL <===\n");
+ return;
+ }
+
+ if (btcoexist->stop_coex_dm) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), "
+ "return for Stop Coex DM <===\n");
+ return;
+ }
+
+ if (coex_sta->under_ips) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], wifi is under IPS !!!\n");
+ return;
+ }
+
+ if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
+ (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) {
+ limited_dig = true;
+ bIncreaseScanDevNum = true;
+ }
+
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_INC_SCAN_DEV_NUM,
+ &bIncreaseScanDevNum);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+
+ if (!bt_link_info->sco_exist && !bt_link_info->hid_exist) {
+ halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+ } else {
+ if (wifi_connected) {
+ wifi_rssi_state =
+ halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 1, 2, 30, 0);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_limited_tx(btcoexist,
+ NORMAL_EXEC,
+ 1, 1, 1, 1);
+ } else {
+ halbtc8723b1ant_limited_tx(btcoexist,
+ NORMAL_EXEC,
+ 1, 1, 1, 1);
+ }
+ } else {
+ halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC,
+ 0, 0, 0, 0);
+ }
+ }
+
+ if (bt_link_info->sco_exist) {
+ b_bt_ctrl_agg_buf_size = true;
+ agg_buf_size = 0x3;
+ } else if (bt_link_info->hid_exist) {
+ b_bt_ctrl_agg_buf_size = true;
+ agg_buf_size = 0x5;
+ } else if (bt_link_info->a2dp_exist || bt_link_info->pan_exist) {
+ b_bt_ctrl_agg_buf_size = true;
+ agg_buf_size = 0x8;
+ }
+ halbtc8723b1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
+ b_bt_ctrl_agg_buf_size, agg_buf_size);
+
+ halbtc8723b1ant_run_sw_coexist_mechanism(btcoexist);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+ if (coex_sta->c2h_bt_inquiry_page) {
+ halbtc8723b1ant_action_bt_inquiry(btcoexist);
+ return;
+ } else if (bt_hs_on) {
+ halbtc8723b1ant_action_hs(btcoexist);
+ return;
+ }
+
+
+ if (!wifi_connected) {
+ bool scan = false, link = false, roam = false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], wifi is non connected-idle !!!\n");
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+ if (scan || link || roam)
+ halbtc8723b1ant_action_wifi_not_connected_asso_auth_scan(btcoexist);
+ else
+ halbtc8723b1ant_action_wifi_not_connected(btcoexist);
+ } else { /* wifi LPS/Busy */
+ halbtc8723b1ant_action_wifi_connected(btcoexist);
+ }
+}
+
+void halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ /* force to reset coex mechanism */
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false);
+
+ /* sw all off */
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist,false, false, false, 0x18);
+
+ halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+}
+
+void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist, bool backup)
+{
+ u32 u32tmp = 0;
+ u8 u8tmp = 0;
+ u32 cnt_bt_cal_chk = 0;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], 1Ant Init HW Config!!\n");
+
+ if (backup) {/* backup rf 0x1e value */
+ coex_dm->bt_rf0x1e_backup =
+ btcoexist->btc_get_rf_reg(btcoexist,
+ BTC_RF_A, 0x1e, 0xfffff);
+
+ coex_dm->backup_arfr_cnt1 =
+ btcoexist->btc_read_4byte(btcoexist, 0x430);
+ coex_dm->backup_arfr_cnt2 =
+ btcoexist->btc_read_4byte(btcoexist, 0x434);
+ coex_dm->backup_retry_limit =
+ btcoexist->btc_read_2byte(btcoexist, 0x42a);
+ coex_dm->backup_ampdu_max_time =
+ btcoexist->btc_read_1byte(btcoexist, 0x456);
+ }
+
+ /* WiFi goto standby while GNT_BT 0-->1 */
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x780);
+ /* BT goto standby while GNT_BT 1-->0 */
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x2, 0xfffff, 0x500);
+
+ btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3);
+ btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77);
+
+
+ /* BT calibration check */
+ while (cnt_bt_cal_chk <= 20) {
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x49d);
+ cnt_bt_cal_chk++;
+ if (u32tmp & BIT0) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], ########### BT "
+ "calibration(cnt=%d) ###########\n",
+ cnt_bt_cal_chk);
+ mdelay(50);
+ } else {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], ********** BT NOT "
+ "calibration (cnt=%d)**********\n",
+ cnt_bt_cal_chk);
+ break;
+ }
+ }
+
+ /* 0x790[5:0]=0x5 */
+ u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
+ u8tmp &= 0xc0;
+ u8tmp |= 0x5;
+ btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
+
+ /* Enable counter statistics */
+ /*0x76e[3] =1, WLAN_Act control by PTA */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+ btcoexist->btc_write_1byte(btcoexist, 0x778, 0x1);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
+
+ /*Antenna config */
+ halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_PTA, true, false);
+ /* PTA parameter */
+ halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+
+}
+
+void halbtc8723b1ant_wifi_off_hw_cfg(struct btc_coexist *btcoexist)
+{
+ /* set wlan_act to low */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0);
+}
+
+/**************************************************************
+ * work around function start with wa_halbtc8723b1ant_
+ **************************************************************/
+/**************************************************************
+ * extern function start with EXhalbtc8723b1ant_
+ **************************************************************/
+
+void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist)
+{
+ halbtc8723b1ant_init_hw_config(btcoexist, true);
+}
+
+void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], Coex Mechanism Init!!\n");
+
+ btcoexist->stop_coex_dm = false;
+
+ halbtc8723b1ant_init_coex_dm(btcoexist);
+
+ halbtc8723b1ant_query_bt_info(btcoexist);
+}
+
+void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ struct btc_stack_info *stack_info = &btcoexist->stack_info;
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ u8 *cli_buf = btcoexist->cli_buf;
+ u8 u8tmp[4], i, bt_info_ext, psTdmaCase=0;
+ u16 u16tmp[4];
+ u32 u32tmp[4];
+ bool roam = false, scan = false;
+ bool link = false, wifi_under_5g = false;
+ bool bt_hs_on = false, wifi_busy = false;
+ s32 wifi_rssi =0, bt_hs_rssi = 0;
+ u32 wifi_bw, wifi_traffic_dir, fa_ofdm, fa_cck;
+ u8 wifi_dot11_chnl, wifi_hs_chnl;
+ u32 fw_ver = 0, bt_patch_ver = 0;
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ============[BT Coexist info]============");
+ CL_PRINTF(cli_buf);
+
+ if (btcoexist->manual_control) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ============[Under Manual Control]==========");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ==========================================");
+ CL_PRINTF(cli_buf);
+ }
+ if (btcoexist->stop_coex_dm) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ============[Coex is STOPPED]============");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ==========================================");
+ CL_PRINTF(cli_buf);
+ }
+
+ if (!board_info->bt_exist) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+ CL_PRINTF(cli_buf);
+ return;
+ }
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d",
+ "Ant PG Num/ Ant Mech/ Ant Pos:", \
+ board_info->pg_ant_num, board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d",
+ "BT stack/ hci ext ver", \
+ ((stack_info->profile_notified)? "Yes":"No"),
+ stack_info->hci_version);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ FwVer/ PatchVer", \
+ glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
+ &wifi_dot11_chnl);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsChnl(HsMode)", \
+ wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ",
+ "H2C Wifi inform bt chnl Info", \
+ coex_dm->wifi_chnl_info[0], coex_dm->wifi_chnl_info[1],
+ coex_dm->wifi_chnl_info[2]);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+ "Wifi link/ roam/ scan", link, roam, scan);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist,BTC_GET_BL_WIFI_UNDER_5G,
+ &wifi_under_5g);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+ &wifi_traffic_dir);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ",
+ "Wifi status", (wifi_under_5g? "5G":"2.4G"),
+ ((BTC_WIFI_BW_LEGACY==wifi_bw)? "Legacy":
+ (((BTC_WIFI_BW_HT40==wifi_bw)? "HT40":"HT20"))),
+ ((!wifi_busy)? "idle":
+ ((BTC_WIFI_TRAFFIC_TX==wifi_traffic_dir)?
+ "uplink":"downlink")));
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ",
+ "BT [status/ rssi/ retryCnt]",
+ ((btcoexist->bt_info.bt_disabled)? ("disabled"):
+ ((coex_sta->c2h_bt_inquiry_page)?("inquiry/page scan"):
+ ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)?
+ "non-connected idle":
+ ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)?
+ "connected-idle":"busy")))),
+ coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
+ CL_PRINTF(cli_buf);
+
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d",
+ "SCO/HID/PAN/A2DP", bt_link_info->sco_exist,
+ bt_link_info->hid_exist, bt_link_info->pan_exist,
+ bt_link_info->a2dp_exist);
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s",
+ "BT Info A2DP rate",
+ (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
+ CL_PRINTF(cli_buf);
+
+ for (i = 0; i < BT_INFO_SRC_8723B_1ANT_MAX; i++) {
+ if (coex_sta->bt_info_c2h_cnt[i]) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x "
+ "%02x %02x %02x %02x(%d)",
+ GLBtInfoSrc8723b1Ant[i],
+ coex_sta->bt_info_c2h[i][0],
+ coex_sta->bt_info_c2h[i][1],
+ coex_sta->bt_info_c2h[i][2],
+ coex_sta->bt_info_c2h[i][3],
+ coex_sta->bt_info_c2h[i][4],
+ coex_sta->bt_info_c2h[i][5],
+ coex_sta->bt_info_c2h[i][6],
+ coex_sta->bt_info_c2h_cnt[i]);
+ CL_PRINTF(cli_buf);
+ }
+ }
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %s/%s, (0x%x/0x%x)",
+ "PS state, IPS/LPS, (lps/rpwm)", \
+ ((coex_sta->under_ips? "IPS ON":"IPS OFF")),
+ ((coex_sta->under_lps? "LPS ON":"LPS OFF")),
+ btcoexist->bt_info.lps_1ant,
+ btcoexist->bt_info.rpwm_1ant);
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+ if (!btcoexist->manual_control) {
+ /* Sw mechanism */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Sw mechanism]============");
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+ "SM1[ShRf/ LpRA/ LimDig]", \
+ coex_dm->cur_rf_rx_lpf_shrink,
+ coex_dm->cur_low_penalty_ra,
+ btcoexist->bt_info.limited_dig);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d/ %d(0x%x) ",
+ "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", \
+ coex_dm->cur_agc_table_en,
+ coex_dm->cur_adc_backoff,
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
+ CL_PRINTF(cli_buf);
+
+
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ",
+ "Rate Mask", btcoexist->bt_info.ra_mask);
+ CL_PRINTF(cli_buf);
+
+ /* Fw mechanism */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Fw mechanism]============");
+ CL_PRINTF(cli_buf);
+
+ psTdmaCase = coex_dm->cur_ps_tdma;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x %02x %02x "
+ "case-%d (auto:%d)",
+ "PS TDMA", coex_dm->ps_tdma_para[0],
+ coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2],
+ coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4],
+ psTdmaCase, coex_dm->auto_tdma_adjust);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ",
+ "Latest error condition(should be 0)", \
+ coex_dm->error_condition);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
+ "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr,
+ coex_dm->cur_ignore_wlan_act);
+ CL_PRINTF(cli_buf);
+ }
+
+ /* Hw setting */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Hw setting]============");
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x",
+ "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
+ coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
+ coex_dm->backup_ampdu_max_time);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
+ u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "0x430/0x434/0x42a/0x456",
+ u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6cc);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x880);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0],
+ (u32tmp[1] & 0x3e000000) >> 25);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x948/ 0x67[5] / 0x765",
+ u32tmp[0], ((u8tmp[0] & 0x20)>> 5), u8tmp[1]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+ u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
+ CL_PRINTF(cli_buf);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+ ((u8tmp[0] & 0x8)>>3), u8tmp[1],
+ ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0xda8);
+ u32tmp[3] = btcoexist->btc_read_4byte(btcoexist, 0xcf0);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
+
+ fa_ofdm = ((u32tmp[0] & 0xffff0000) >> 16) +
+ ((u32tmp[1] & 0xffff0000) >> 16) +
+ (u32tmp[1] & 0xffff) +
+ (u32tmp[2] & 0xffff) + \
+ ((u32tmp[3] & 0xffff0000) >> 16) +
+ (u32tmp[3] & 0xffff) ;
+ fa_cck = (u8tmp[0] << 8) + u8tmp[1];
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "OFDM-CCA/OFDM-FA/CCK-FA",
+ u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8(coexTable)",
+ u32tmp[0], u32tmp[1], u32tmp[2]);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "0x770(high-pri rx/tx)", coex_sta->high_priority_rx,
+ coex_sta->high_priority_tx);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+ coex_sta->low_priority_tx);
+ CL_PRINTF(cli_buf);
+#if(BT_AUTO_REPORT_ONLY_8723B_1ANT == 1)
+ halbtc8723b1ant_monitor_bt_ctr(btcoexist);
+#endif
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+{
+
+ if (btcoexist->manual_control || btcoexist->stop_coex_dm)
+ return;
+
+ if (BTC_IPS_ENTER == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], IPS ENTER notify\n");
+ coex_sta->under_ips = true;
+
+ halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT,
+ false, true);
+ /* set PTA control */
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
+ halbtc8723b1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 0);
+ } else if (BTC_IPS_LEAVE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], IPS LEAVE notify\n");
+ coex_sta->under_ips = false;
+
+ halbtc8723b1ant_run_coexist_mechanism(btcoexist);
+ }
+}
+
+void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (btcoexist->manual_control || btcoexist->stop_coex_dm)
+ return;
+
+ if (BTC_LPS_ENABLE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], LPS ENABLE notify\n");
+ coex_sta->under_lps = true;
+ } else if (BTC_LPS_DISABLE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], LPS DISABLE notify\n");
+ coex_sta->under_lps = false;
+ }
+}
+
+void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ bool wifi_connected = false, bt_hs_on = false;
+
+ if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
+ btcoexist->bt_info.bt_disabled)
+ return;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+
+ halbtc8723b1ant_query_bt_info(btcoexist);
+
+ if (coex_sta->c2h_bt_inquiry_page) {
+ halbtc8723b1ant_action_bt_inquiry(btcoexist);
+ return;
+ } else if (bt_hs_on) {
+ halbtc8723b1ant_action_hs(btcoexist);
+ return;
+ }
+
+ if (BTC_SCAN_START == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], SCAN START notify\n");
+ if (!wifi_connected) /* non-connected scan */
+ halbtc8723b1ant_action_wifi_not_connected_asso_auth_scan(btcoexist);
+ else /* wifi is connected */
+ halbtc8723b1ant_ActionWifiConnectedScan(btcoexist);
+ } else if (BTC_SCAN_FINISH == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], SCAN FINISH notify\n");
+ if (!wifi_connected) /* non-connected scan */
+ halbtc8723b1ant_action_wifi_not_connected(btcoexist);
+ else
+ halbtc8723b1ant_action_wifi_connected(btcoexist);
+ }
+}
+
+void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ bool wifi_connected = false, bt_hs_on = false;
+
+ if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
+ btcoexist->bt_info.bt_disabled)
+ return;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ if (coex_sta->c2h_bt_inquiry_page) {
+ halbtc8723b1ant_action_bt_inquiry(btcoexist);
+ return;
+ } else if (bt_hs_on) {
+ halbtc8723b1ant_action_hs(btcoexist);
+ return;
+ }
+
+ if (BTC_ASSOCIATE_START == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], CONNECT START notify\n");
+ halbtc8723b1ant_action_wifi_not_connected_asso_auth_scan(btcoexist);
+ } else if (BTC_ASSOCIATE_FINISH == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], CONNECT FINISH notify\n");
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ if (!wifi_connected) /* non-connected scan */
+ halbtc8723b1ant_action_wifi_not_connected(btcoexist);
+ else
+ halbtc8723b1ant_action_wifi_connected(btcoexist);
+ }
+}
+
+void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ u8 h2c_parameter[3] ={0};
+ u32 wifi_bw;
+ u8 wifiCentralChnl;
+
+ if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
+ btcoexist->bt_info.bt_disabled )
+ return;
+
+ if (BTC_MEDIA_CONNECT == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], MEDIA connect notify\n");
+ else
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], MEDIA disconnect notify\n");
+
+ /* only 2.4G we need to inform bt the chnl mask */
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
+ &wifiCentralChnl);
+
+ if ((BTC_MEDIA_CONNECT == type) &&
+ (wifiCentralChnl <= 14)) {
+ h2c_parameter[0] = 0x0;
+ h2c_parameter[1] = wifiCentralChnl;
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw)
+ h2c_parameter[2] = 0x30;
+ else
+ h2c_parameter[2] = 0x20;
+ }
+
+ coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
+ coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
+ coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x66=0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
+}
+
+void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ bool bt_hs_on = false;
+
+ if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
+ btcoexist->bt_info.bt_disabled)
+ return;
+
+ coex_sta->special_pkt_period_cnt = 0;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ if (coex_sta->c2h_bt_inquiry_page) {
+ halbtc8723b1ant_action_bt_inquiry(btcoexist);
+ return;
+ } else if (bt_hs_on) {
+ halbtc8723b1ant_action_hs(btcoexist);
+ return;
+ }
+
+ if (BTC_PACKET_DHCP == type ||
+ BTC_PACKET_EAPOL == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], special Packet(%d) notify\n", type);
+ halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
+ }
+}
+
+void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmp_buf, u8 length)
+{
+ u8 bt_info = 0;
+ u8 i, rsp_source = 0;
+ bool wifi_connected = false;
+ bool bt_busy = false;
+
+ coex_sta->c2h_bt_info_req_sent = false;
+
+ rsp_source = tmp_buf[0] & 0xf;
+ if (rsp_source >= BT_INFO_SRC_8723B_1ANT_MAX)
+ rsp_source = BT_INFO_SRC_8723B_1ANT_WIFI_FW;
+ coex_sta->bt_info_c2h_cnt[rsp_source]++;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], Bt info[%d], length=%d, hex data=[",
+ rsp_source, length);
+ for (i=0; i<length; i++) {
+ coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
+ if (i == 1)
+ bt_info = tmp_buf[i];
+ if (i == length - 1)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "0x%02x]\n", tmp_buf[i]);
+ else
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "0x%02x, ", tmp_buf[i]);
+ }
+
+ if (BT_INFO_SRC_8723B_1ANT_WIFI_FW != rsp_source) {
+ coex_sta->bt_retry_cnt = /* [3:0] */
+ coex_sta->bt_info_c2h[rsp_source][2] & 0xf;
+
+ coex_sta->bt_rssi =
+ coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10;
+
+ coex_sta->bt_info_ext =
+ coex_sta->bt_info_c2h[rsp_source][4];
+
+ /* Here we need to resend some wifi info to BT
+ * because bt is reset and loss of the info.*/
+ if(coex_sta->bt_info_ext & BIT1)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT ext info bit1 check, "
+ "send wifi BW&Chnl to BT!!\n");
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ if(wifi_connected)
+ ex_halbtc8723b1ant_media_status_notify(btcoexist,
+ BTC_MEDIA_CONNECT);
+ else
+ ex_halbtc8723b1ant_media_status_notify(btcoexist,
+ BTC_MEDIA_DISCONNECT);
+ }
+
+ if (coex_sta->bt_info_ext & BIT3) {
+ if (!btcoexist->manual_control &&
+ !btcoexist->stop_coex_dm) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT ext info bit3 check, "
+ "set BT NOT ignore Wlan active!!\n");
+ halbtc8723b1ant_ignore_wlan_act(btcoexist,
+ FORCE_EXEC,
+ false);
+ }
+ } else {
+ /* BT already NOT ignore Wlan active, do nothing here.*/
+ }
+#if(BT_AUTO_REPORT_ONLY_8723B_1ANT == 0)
+ if (coex_sta->bt_info_ext & BIT4) {
+ /* BT auto report already enabled, do nothing */
+ } else {
+ halbtc8723b1ant_bt_auto_report(btcoexist, FORCE_EXEC,
+ true);
+ }
+#endif
+ }
+
+ /* check BIT2 first ==> check if bt is under inquiry or page scan */
+ if (bt_info & BT_INFO_8723B_1ANT_B_INQ_PAGE)
+ coex_sta->c2h_bt_inquiry_page = true;
+ else
+ coex_sta->c2h_bt_inquiry_page = false;
+
+ /* set link exist status */
+ if (!(bt_info & BT_INFO_8723B_1ANT_B_CONNECTION)) {
+ coex_sta->bt_link_exist = false;
+ coex_sta->pan_exist = false;
+ coex_sta->a2dp_exist = false;
+ coex_sta->hid_exist = false;
+ coex_sta->sco_exist = false;
+ } else { /* connection exists */
+ coex_sta->bt_link_exist = true;
+ if (bt_info & BT_INFO_8723B_1ANT_B_FTP)
+ coex_sta->pan_exist = true;
+ else
+ coex_sta->pan_exist = false;
+ if (bt_info & BT_INFO_8723B_1ANT_B_A2DP)
+ coex_sta->a2dp_exist = true;
+ else
+ coex_sta->a2dp_exist = false;
+ if (bt_info & BT_INFO_8723B_1ANT_B_HID)
+ coex_sta->hid_exist = true;
+ else
+ coex_sta->hid_exist = false;
+ if (bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO)
+ coex_sta->sco_exist = true;
+ else
+ coex_sta->sco_exist = false;
+ }
+
+ halbtc8723b1ant_update_bt_link_info(btcoexist);
+
+ if (!(bt_info&BT_INFO_8723B_1ANT_B_CONNECTION)) {
+ coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), "
+ "BT Non-Connected idle!!!\n");
+ /* connection exists but no busy */
+ } else if (bt_info == BT_INFO_8723B_1ANT_B_CONNECTION) {
+ coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ } else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) ||
+ (bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY)) {
+ coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_SCO_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), "
+ "BT SCO busy!!!\n");
+ } else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY) {
+ if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
+ coex_dm->auto_tdma_adjust = false;
+
+ coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_ACL_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ } else {
+ coex_dm->bt_status =
+ BT_8723B_1ANT_BT_STATUS_MAX;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
+ }
+
+ if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
+ (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status))
+ bt_busy = true;
+ else
+ bt_busy = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
+
+ halbtc8723b1ant_run_coexist_mechanism(btcoexist);
+}
+
+void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+
+ btcoexist->stop_coex_dm = true;
+
+ halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false, true);
+
+ halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
+ halbtc8723b1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
+
+ ex_halbtc8723b1ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
+}
+
+void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Pnp notify\n");
+
+ if (BTC_WIFI_PNP_SLEEP == pnp_state) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], Pnp notify to SLEEP\n");
+ btcoexist->stop_coex_dm = true;
+ halbtc8723b1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9);
+ } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], Pnp notify to WAKE UP\n");
+ btcoexist->stop_coex_dm = false;
+ halbtc8723b1ant_init_hw_config(btcoexist, false);
+ halbtc8723b1ant_init_coex_dm(btcoexist);
+ halbtc8723b1ant_query_bt_info(btcoexist);
+ }
+}
+
+void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ struct btc_stack_info *stack_info = &btcoexist->stack_info;
+ static u8 dis_ver_info_cnt = 0;
+ u32 fw_ver = 0, bt_patch_ver = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], =========================="
+ "Periodical===========================\n");
+
+ if (dis_ver_info_cnt <= 5) {
+ dis_ver_info_cnt += 1;
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], *************************"
+ "***************************************\n");
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], Ant PG Num/ Ant Mech/ "
+ "Ant Pos = %d/ %d/ %d\n", \
+ board_info->pg_ant_num, board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n", \
+ ((stack_info->profile_notified)? "Yes":"No"),
+ stack_info->hci_version);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
+ &bt_patch_ver);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], CoexVer/ FwVer/ PatchVer "
+ "= %d_%x/ 0x%x/ 0x%x(%d)\n", \
+ glcoex_ver_date_8723b_1ant,
+ glcoex_ver_8723b_1ant, fw_ver,
+ bt_patch_ver, bt_patch_ver);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], *****************************"
+ "***********************************\n");
+ }
+
+#if(BT_AUTO_REPORT_ONLY_8723B_1ANT == 0)
+ halbtc8723b1ant_query_bt_info(btcoexist);
+ halbtc8723b1ant_monitor_bt_ctr(btcoexist);
+ halbtc8723b1ant_monitor_bt_enable_disable(btcoexist);
+#else
+ if (halbtc8723b1ant_is_wifi_status_changed(btcoexist) ||
+ coex_dm->auto_tdma_adjust) {
+ if (coex_sta->special_pkt_period_cnt > 2)
+ halbtc8723b1ant_run_coexist_mechanism(btcoexist);
+ }
+
+ coex_sta->special_pkt_period_cnt++;
+#endif
+}
+
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.h b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.h
new file mode 100644
index 000000000000..5ce292f2e7c6
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.h
@@ -0,0 +1,175 @@
+/**********************************************************************
+ * The following is for 8723B 1ANT BT Co-exist definition
+ **********************************************************************/
+#define BT_AUTO_REPORT_ONLY_8723B_1ANT 1
+
+#define BT_INFO_8723B_1ANT_B_FTP BIT7
+#define BT_INFO_8723B_1ANT_B_A2DP BIT6
+#define BT_INFO_8723B_1ANT_B_HID BIT5
+#define BT_INFO_8723B_1ANT_B_SCO_BUSY BIT4
+#define BT_INFO_8723B_1ANT_B_ACL_BUSY BIT3
+#define BT_INFO_8723B_1ANT_B_INQ_PAGE BIT2
+#define BT_INFO_8723B_1ANT_B_SCO_ESCO BIT1
+#define BT_INFO_8723B_1ANT_B_CONNECTION BIT0
+
+#define BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(_BT_INFO_EXT_) \
+ (((_BT_INFO_EXT_&BIT0))? true:false)
+
+#define BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT 2
+
+typedef enum _BT_INFO_SRC_8723B_1ANT{
+ BT_INFO_SRC_8723B_1ANT_WIFI_FW = 0x0,
+ BT_INFO_SRC_8723B_1ANT_BT_RSP = 0x1,
+ BT_INFO_SRC_8723B_1ANT_BT_ACTIVE_SEND = 0x2,
+ BT_INFO_SRC_8723B_1ANT_MAX
+}BT_INFO_SRC_8723B_1ANT,*PBT_INFO_SRC_8723B_1ANT;
+
+typedef enum _BT_8723B_1ANT_BT_STATUS{
+ BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8723B_1ANT_BT_STATUS_INQ_PAGE = 0x2,
+ BT_8723B_1ANT_BT_STATUS_ACL_BUSY = 0x3,
+ BT_8723B_1ANT_BT_STATUS_SCO_BUSY = 0x4,
+ BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
+ BT_8723B_1ANT_BT_STATUS_MAX
+}BT_8723B_1ANT_BT_STATUS,*PBT_8723B_1ANT_BT_STATUS;
+
+typedef enum _BT_8723B_1ANT_WIFI_STATUS{
+ BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN = 0x1,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN = 0x2,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT = 0x3,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE = 0x4,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY = 0x5,
+ BT_8723B_1ANT_WIFI_STATUS_MAX
+}BT_8723B_1ANT_WIFI_STATUS,*PBT_8723B_1ANT_WIFI_STATUS;
+
+typedef enum _BT_8723B_1ANT_COEX_ALGO{
+ BT_8723B_1ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_8723B_1ANT_COEX_ALGO_SCO = 0x1,
+ BT_8723B_1ANT_COEX_ALGO_HID = 0x2,
+ BT_8723B_1ANT_COEX_ALGO_A2DP = 0x3,
+ BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS = 0x4,
+ BT_8723B_1ANT_COEX_ALGO_PANEDR = 0x5,
+ BT_8723B_1ANT_COEX_ALGO_PANHS = 0x6,
+ BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP = 0x7,
+ BT_8723B_1ANT_COEX_ALGO_PANEDR_HID = 0x8,
+ BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9,
+ BT_8723B_1ANT_COEX_ALGO_HID_A2DP = 0xa,
+ BT_8723B_1ANT_COEX_ALGO_MAX = 0xb,
+}BT_8723B_1ANT_COEX_ALGO,*PBT_8723B_1ANT_COEX_ALGO;
+
+struct coex_dm_8723b_1ant{
+ /* fw mechanism */
+ bool pre_dec_bt_pwr;
+ bool cur_dec_bt_pwr;
+ u8 pre_fw_dac_swing_lvl;
+ u8 cur_fw_dac_swing_lvl;
+ bool cur_ignore_wlan_act;
+ bool pre_ignore_wlan_act;
+ u8 pre_ps_tdma;
+ u8 cur_ps_tdma;
+ u8 ps_tdma_para[5];
+ u8 ps_tdma_du_adj_type;
+ bool auto_tdma_adjust;
+ bool pre_ps_tdma_on;
+ bool cur_ps_tdma_on;
+ bool pre_bt_auto_report;
+ bool cur_bt_auto_report;
+ u8 pre_lps;
+ u8 cur_lps;
+ u8 pre_rpwm;
+ u8 cur_rpwm;
+
+ /* sw mechanism */
+ bool pre_rf_rx_lpf_shrink;
+ bool cur_rf_rx_lpf_shrink;
+ u32 bt_rf0x1e_backup;
+ bool pre_low_penalty_ra;
+ bool cur_low_penalty_ra;
+ bool pre_dac_swing_on;
+ u32 pre_dac_swing_lvl;
+ bool cur_dac_swing_on;
+ u32 cur_dac_swing_lvl;
+ bool pre_adc_backoff;
+ bool cur_adc_backoff;
+ bool pre_agc_table_en;
+ bool cur_agc_table_en;
+ u32 pre_val0x6c0;
+ u32 cur_val0x6c0;
+ u32 pre_val0x6c4;
+ u32 cur_val0x6c4;
+ u32 pre_val0x6c8;
+ u32 cur_val0x6c8;
+ u8 pre_val0x6cc;
+ u8 cur_val0x6cc;
+ bool limited_dig;
+
+ u32 backup_arfr_cnt1; /* Auto Rate Fallback Retry cnt */
+ u32 backup_arfr_cnt2; /* Auto Rate Fallback Retry cnt */
+ u16 backup_retry_limit;
+ u8 backup_ampdu_max_time;
+
+ /* algorithm related */
+ u8 pre_algorithm;
+ u8 cur_algorithm;
+ u8 bt_status;
+ u8 wifi_chnl_info[3];
+
+ u32 prera_mask;
+ u32 curra_mask;
+ u8 pre_arfr_type;
+ u8 cur_arfr_type;
+ u8 pre_retry_limit_type;
+ u8 cur_retry_limit_type;
+ u8 pre_ampdu_time_type;
+ u8 cur_ampdu_time_type;
+
+ u8 error_condition;
+};
+
+struct coex_sta_8723b_1ant{
+ bool bt_link_exist;
+ bool sco_exist;
+ bool a2dp_exist;
+ bool hid_exist;
+ bool pan_exist;
+
+ bool under_lps;
+ bool under_ips;
+ u32 special_pkt_period_cnt;
+ u32 high_priority_tx;
+ u32 high_priority_rx;
+ u32 low_priority_tx;
+ u32 low_priority_rx;
+ u8 bt_rssi;
+ u8 pre_bt_rssi_state;
+ u8 pre_wifi_rssi_state[4];
+ bool c2h_bt_info_req_sent;
+ u8 bt_info_c2h[BT_INFO_SRC_8723B_1ANT_MAX][10];
+ u32 bt_info_c2h_cnt[BT_INFO_SRC_8723B_1ANT_MAX];
+ bool c2h_bt_inquiry_page;
+ u8 bt_retry_cnt;
+ u8 bt_info_ext;
+};
+
+/*************************************************************************
+ * The following is interface which will notify coex module.
+ *************************************************************************/
+void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist);
+void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist);
+void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmpbuf, u8 length);
+void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist);
+void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpState);
+void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist);
+void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist);
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.c
new file mode 100644
index 000000000000..83b1b4218333
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.c
@@ -0,0 +1,4185 @@
+/***************************************************************
+ * Description:
+ *
+ * This file is for RTL8723B Co-exist mechanism
+ *
+ * History
+ * 2012/11/15 Cosa first check in.
+ *
+ **************************************************************/
+/**************************************************************
+ * include files
+ **************************************************************/
+#include "halbt_precomp.h"
+#if 1
+/**************************************************************
+ * Global variables, these are static variables
+ **************************************************************/
+static struct coex_dm_8723b_2ant glcoex_dm_8723b_2ant;
+static struct coex_dm_8723b_2ant *coex_dm = &glcoex_dm_8723b_2ant;
+static struct coex_sta_8723b_2ant glcoex_sta_8723b_2ant;
+static struct coex_sta_8723b_2ant *coex_sta = &glcoex_sta_8723b_2ant;
+
+const char *const glbt_info_src_8723b_2ant[] = {
+ "BT Info[wifi fw]",
+ "BT Info[bt rsp]",
+ "BT Info[bt auto report]",
+};
+
+u32 glcoex_ver_date_8723b_2ant = 20131113;
+u32 glcoex_ver_8723b_2ant = 0x3f;
+
+/**************************************************************
+ * local function proto type if needed
+ **************************************************************/
+/**************************************************************
+ * local function start with halbtc8723b2ant_
+ **************************************************************/
+u8 halbtc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, u8 rssi_thresh1)
+{
+ s32 bt_rssi = 0;
+ u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
+
+ bt_rssi = coex_sta->bt_rssi;
+
+ if (level_num == 2) {
+ if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ if (bt_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ bt_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to High\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at Low\n");
+ }
+ } else {
+ if (bt_rssi < rssi_thresh) {
+ bt_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Low\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at High\n");
+ }
+ }
+ } else if (level_num == 3) {
+ if (rssi_thresh > rssi_thresh1) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi thresh error!!\n");
+ return coex_sta->pre_bt_rssi_state;
+ }
+
+ if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ if (bt_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Medium\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at Low\n");
+ }
+ } else if ((coex_sta->pre_bt_rssi_state ==
+ BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_bt_rssi_state ==
+ BTC_RSSI_STATE_STAY_MEDIUM)) {
+ if (bt_rssi >= rssi_thresh1 +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ bt_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to High\n");
+ } else if (bt_rssi < rssi_thresh) {
+ bt_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Low\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at Medium\n");
+ }
+ } else {
+ if (bt_rssi < rssi_thresh1) {
+ bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Medium\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at High\n");
+ }
+ }
+ }
+
+ coex_sta->pre_bt_rssi_state = bt_rssi_state;
+
+ return bt_rssi_state;
+}
+
+u8 halbtc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
+ u8 index, u8 level_num,
+ u8 rssi_thresh, u8 rssi_thresh1)
+{
+ s32 wifi_rssi=0;
+ u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+
+ if (level_num == 2) {
+ if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_LOW)) {
+ if (wifi_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to High\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at Low\n");
+ }
+ } else {
+ if (wifi_rssi < rssi_thresh) {
+ wifi_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Low\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at High\n");
+ }
+ }
+ } else if (level_num == 3) {
+ if (rssi_thresh > rssi_thresh1) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI thresh error!!\n");
+ return coex_sta->pre_wifi_rssi_state[index];
+ }
+
+ if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_LOW)) {
+ if(wifi_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Medium\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at Low\n");
+ }
+ } else if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_MEDIUM)) {
+ if (wifi_rssi >= rssi_thresh1 +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to High\n");
+ } else if (wifi_rssi < rssi_thresh) {
+ wifi_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Low\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at Medium\n");
+ }
+ } else {
+ if (wifi_rssi < rssi_thresh1) {
+ wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Medium\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at High\n");
+ }
+ }
+ }
+
+ coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state;
+
+ return wifi_rssi_state;
+}
+
+void halbtc8723b2ant_monitor_bt_enable_disable(struct btc_coexist *btcoexist)
+{
+ static bool pre_bt_disabled = false;
+ static u32 bt_disable_cnt = 0;
+ bool bt_active = true, bt_disabled = false;
+
+ /* This function check if bt is disabled */
+ if (coex_sta->high_priority_tx == 0 &&
+ coex_sta->high_priority_rx == 0 &&
+ coex_sta->low_priority_tx == 0 &&
+ coex_sta->low_priority_rx == 0)
+ bt_active = false;
+
+ if (coex_sta->high_priority_tx == 0xffff &&
+ coex_sta->high_priority_rx == 0xffff &&
+ coex_sta->low_priority_tx == 0xffff &&
+ coex_sta->low_priority_rx == 0xffff)
+ bt_active = true;
+
+ if (bt_active) {
+ bt_disable_cnt = 0;
+ bt_disabled = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+ &bt_disabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], BT is enabled !!\n");
+ } else {
+ bt_disable_cnt++;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], bt all counters=0, %d times!!\n",
+ bt_disable_cnt);
+ if (bt_disable_cnt >= 2) {
+ bt_disabled = true;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+ &bt_disabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], BT is disabled !!\n");
+ }
+ }
+
+ if (pre_bt_disabled != bt_disabled) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (pre_bt_disabled ? "disabled":"enabled"),
+ (bt_disabled ? "disabled":"enabled"));
+
+ pre_bt_disabled = bt_disabled;
+ if (!bt_disabled) {
+ } else {
+ }
+ }
+}
+
+void halbtc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
+{
+ u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
+ u32 reg_hp_tx = 0, reg_hp_rx = 0;
+ u32 reg_lp_tx = 0, reg_lp_rx = 0;
+
+ reg_hp_txrx = 0x770;
+ reg_lp_txrx = 0x774;
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
+ reg_hp_tx = u32tmp & MASKLWORD;
+ reg_hp_rx = (u32tmp & MASKHWORD) >> 16;
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
+ reg_lp_tx = u32tmp & MASKLWORD;
+ reg_lp_rx = (u32tmp & MASKHWORD) >> 16;
+
+ coex_sta->high_priority_tx = reg_hp_tx;
+ coex_sta->high_priority_rx = reg_hp_rx;
+ coex_sta->low_priority_tx = reg_lp_tx;
+ coex_sta->low_priority_rx = reg_lp_rx;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+
+ /* reset counter */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+}
+
+void halbtc8723b2ant_query_bt_info(struct btc_coexist *btcoexist)
+{
+ u8 h2c_parameter[1] ={0};
+
+ coex_sta->c2h_bt_info_req_sent = true;
+
+ h2c_parameter[0] |= BIT0; /* trigger */
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Query Bt Info, FW write 0x61=0x%x\n",
+ h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
+}
+
+bool halbtc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
+{
+ static bool pre_wifi_busy = false;
+ static bool pre_under_4way = false;
+ static bool pre_bt_hs_on = false;
+ bool wifi_busy = false, under_4way = false, bt_hs_on = false;
+ bool wifi_connected = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ &under_4way);
+
+ if (wifi_connected) {
+ if (wifi_busy != pre_wifi_busy) {
+ pre_wifi_busy = wifi_busy;
+ return true;
+ }
+
+ if (under_4way != pre_under_4way) {
+ pre_under_4way = under_4way;
+ return true;
+ }
+
+ if (bt_hs_on != pre_bt_hs_on) {
+ pre_bt_hs_on = bt_hs_on;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void halbtc8723b2ant_update_bt_link_info(struct btc_coexist *btcoexist)
+{
+ /*struct btc_stack_info *stack_info = &btcoexist->stack_info;*/
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool bt_hs_on = false;
+
+#if(BT_AUTO_REPORT_ONLY_8723B_2ANT == 1) /* profile from bt patch */
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+ bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
+ bt_link_info->sco_exist = coex_sta->sco_exist;
+ bt_link_info->a2dp_exist = coex_sta->a2dp_exist;
+ bt_link_info->pan_exist = coex_sta->pan_exist;
+ bt_link_info->hid_exist = coex_sta->hid_exist;
+
+ /* work around for HS mode. */
+ if (bt_hs_on) {
+ bt_link_info->pan_exist = true;
+ bt_link_info->bt_link_exist = true;
+ }
+#else /* profile from bt stack */
+ bt_link_info->bt_link_exist = stack_info->bt_link_exist;
+ bt_link_info->sco_exist = stack_info->sco_exist;
+ bt_link_info->a2dp_exist = stack_info->a2dp_exist;
+ bt_link_info->pan_exist = stack_info->pan_exist;
+ bt_link_info->hid_exist = stack_info->hid_exist;
+
+ /*for win-8 stack HID report error*/
+ if (!stack_info->hid_exist)
+ stack_info->hid_exist = coex_sta->hid_exist;
+ /*sync BTInfo with BT firmware and stack*/
+ /* when stack HID report error, here we use the info from bt fw.*/
+ if (!stack_info->bt_link_exist)
+ stack_info->bt_link_exist = coex_sta->bt_link_exist;
+#endif
+ /* check if Sco only */
+ if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->sco_only = true;
+ else
+ bt_link_info->sco_only = false;
+
+ /* check if A2dp only */
+ if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->a2dp_only = true;
+ else
+ bt_link_info->a2dp_only = false;
+
+ /* check if Pan only */
+ if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->pan_only = true;
+ else
+ bt_link_info->pan_only = false;
+
+ /* check if Hid only */
+ if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && bt_link_info->hid_exist)
+ bt_link_info->hid_only = true;
+ else
+ bt_link_info->hid_only = false;
+}
+
+u8 halbtc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info=&btcoexist->bt_link_info;
+ bool bt_hs_on = false;
+ u8 algorithm = BT_8723B_2ANT_COEX_ALGO_UNDEFINED;
+ u8 num_of_diff_profile = 0;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+ if (!bt_link_info->bt_link_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], No BT link exists!!!\n");
+ return algorithm;
+ }
+
+ if (bt_link_info->sco_exist)
+ num_of_diff_profile++;
+ if (bt_link_info->hid_exist)
+ num_of_diff_profile++;
+ if (bt_link_info->pan_exist)
+ num_of_diff_profile++;
+ if (bt_link_info->a2dp_exist)
+ num_of_diff_profile++;
+
+ if (num_of_diff_profile == 1) {
+ if (bt_link_info->sco_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO only\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
+ } else {
+ if (bt_link_info->hid_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID only\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
+ } else if (bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], A2DP only\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP;
+ } else if (bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], PAN(HS) only\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANHS;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], PAN(EDR) only\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR;
+ }
+ }
+ }
+ } else if (num_of_diff_profile == 2) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + A2DP ==> SCO\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + PAN(HS)\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ } else {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + A2DP\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + PAN(HS)\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], A2DP + PAN(HS)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex],A2DP + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ }
+ }
+ } else if (num_of_diff_profile == 3) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID + A2DP"
+ " ==> HID\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID + "
+ "PAN(HS)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID + "
+ "PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + A2DP + "
+ "PAN(HS)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + A2DP + "
+ "PAN(EDR) ==> HID\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ } else {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + A2DP + "
+ "PAN(HS)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + A2DP + "
+ "PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ }
+ }
+ } else if (num_of_diff_profile >= 3) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Error!!! SCO + HID"
+ " + A2DP + PAN(HS)\n");
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID + A2DP +"
+ " PAN(EDR)==>PAN(EDR)+HID\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ }
+
+ return algorithm;
+}
+
+bool halbtc8723b2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist)
+{
+ bool bRet = false;
+ bool bt_hs_on = false, wifi_connected = false;
+ s32 bt_hs_rssi=0;
+ u8 bt_rssi_state;
+
+ if (!btcoexist->btc_get(btcoexist,
+ BTC_GET_BL_HS_OPERATION, &bt_hs_on))
+ return false;
+ if (!btcoexist->btc_get(btcoexist,
+ BTC_GET_BL_WIFI_CONNECTED, &wifi_connected))
+ return false;
+ if (!btcoexist->btc_get(btcoexist,
+ BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
+ return false;
+
+ bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+ if (wifi_connected) {
+ if (bt_hs_on) {
+ if (bt_hs_rssi > 37) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], Need to decrease bt "
+ "power for HS mode!!\n");
+ bRet = true;
+ }
+ } else {
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], Need to decrease bt "
+ "power for Wifi is connected!!\n");
+ bRet = true;
+ }
+ }
+ }
+
+ return bRet;
+}
+
+void halbtc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
+ u8 dac_swing_lvl)
+{
+ u8 h2c_parameter[1] ={0};
+
+ /* There are several type of dacswing
+ * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 */
+ h2c_parameter[0] = dac_swing_lvl;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
+}
+
+void halbtc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
+ bool dec_bt_pwr)
+{
+ u8 h2c_parameter[1] = {0};
+
+ h2c_parameter[0] = 0;
+
+ if (dec_bt_pwr)
+ h2c_parameter[0] |= BIT1;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
+ (dec_bt_pwr? "Yes!!":"No!!"), h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
+}
+
+void halbtc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
+ bool force_exec, bool dec_bt_pwr)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s Dec BT power = %s\n",
+ (force_exec? "force to":""), (dec_bt_pwr? "ON":"OFF"));
+ coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+
+ if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
+ return;
+ }
+ halbtc8723b2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+
+ coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
+}
+
+void halbtc8723b2ant_set_bt_auto_report(struct btc_coexist *btcoexist,
+ bool enable_auto_report)
+{
+ u8 h2c_parameter[1] = {0};
+ h2c_parameter[0] = 0;
+
+ if (enable_auto_report)
+ h2c_parameter[0] |= BIT0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n",
+ (enable_auto_report? "Enabled!!":"Disabled!!"),
+ h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
+}
+
+void halbtc8723b2ant_bt_auto_report(struct btc_coexist *btcoexist,
+ bool force_exec, bool enable_auto_report)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s BT Auto report = %s\n",
+ (force_exec? "force to":""),
+ ((enable_auto_report)? "Enabled":"Disabled"));
+ coex_dm->cur_bt_auto_report = enable_auto_report;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreBtAutoReport=%d, "
+ "bCurBtAutoReport=%d\n",
+ coex_dm->pre_bt_auto_report,
+ coex_dm->cur_bt_auto_report);
+
+ if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
+ return;
+ }
+ halbtc8723b2ant_set_bt_auto_report(btcoexist,
+ coex_dm->cur_bt_auto_report);
+
+ coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
+}
+
+void halbtc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
+ bool force_exec, u8 fw_dac_swing_lvl)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec? "force to":""), fw_dac_swing_lvl);
+ coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], preFwDacSwingLvl=%d, "
+ "curFwDacSwingLvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
+
+ if(coex_dm->pre_fw_dac_swing_lvl ==
+ coex_dm->cur_fw_dac_swing_lvl)
+ return;
+ }
+
+ halbtc8723b2ant_set_fw_dac_swing_level(btcoexist,
+ coex_dm->cur_fw_dac_swing_lvl);
+ coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
+}
+
+void halbtc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
+ bool rx_rf_shrink_on)
+{
+ if (rx_rf_shrink_on) {
+ /* Shrink RF Rx LPF corner */
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+ 0xfffff, 0xffffc);
+ } else {
+ /* Resume RF Rx LPF corner */
+ /* After initialized, we can use coex_dm->btRf0x1eBackup */
+ if (btcoexist->initilized) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+ 0xfffff,
+ coex_dm->bt_rf0x1e_backup);
+ }
+ }
+}
+
+void halbtc8723b2ant_rf_shrink(struct btc_coexist *btcoexist,
+ bool force_exec, bool rx_rf_shrink_on)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec? "force to":""), (rx_rf_shrink_on? "ON":"OFF"));
+ coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreRfRxLpfShrink=%d, "
+ "bCurRfRxLpfShrink=%d\n",
+ coex_dm->pre_rf_rx_lpf_shrink,
+ coex_dm->cur_rf_rx_lpf_shrink);
+
+ if (coex_dm->pre_rf_rx_lpf_shrink ==
+ coex_dm->cur_rf_rx_lpf_shrink)
+ return;
+ }
+ halbtc8723b2ant_set_sw_rf_rx_lpf_corner(btcoexist,
+ coex_dm->cur_rf_rx_lpf_shrink);
+
+ coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
+}
+
+void halbtc8723b2ant_set_sw_penalty_txrate_adaptive(
+ struct btc_coexist *btcoexist,
+ bool low_penalty_ra)
+{
+ u8 h2c_parameter[6] ={0};
+
+ h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty*/
+
+ if (low_penalty_ra) {
+ h2c_parameter[1] |= BIT0;
+ /*normal rate except MCS7/6/5, OFDM54/48/36*/
+ h2c_parameter[2] = 0x00;
+ h2c_parameter[3] = 0xf7; /*MCS7 or OFDM54*/
+ h2c_parameter[4] = 0xf8; /*MCS6 or OFDM48*/
+ h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra? "ON!!":"OFF!!"));
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
+}
+
+void halbtc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
+ bool force_exec, bool low_penalty_ra)
+{
+ /*return; */
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (force_exec? "force to":""), (low_penalty_ra? "ON":"OFF"));
+ coex_dm->cur_low_penalty_ra = low_penalty_ra;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreLowPenaltyRa=%d, "
+ "bCurLowPenaltyRa=%d\n",
+ coex_dm->pre_low_penalty_ra,
+ coex_dm->cur_low_penalty_ra);
+
+ if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
+ return;
+ }
+ halbtc8723b2ant_set_sw_penalty_txrate_adaptive(btcoexist,
+ coex_dm->cur_low_penalty_ra);
+
+ coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
+}
+
+void halbtc8723b2ant_set_dac_swing_reg(struct btc_coexist * btcoexist,
+ u32 level)
+{
+ u8 val = (u8) level;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
+}
+
+void halbtc8723b2ant_set_sw_fulltime_dac_swing(struct btc_coexist *btcoexist,
+ bool sw_dac_swing_on,
+ u32 sw_dac_swing_lvl)
+{
+ if(sw_dac_swing_on)
+ halbtc8723b2ant_set_dac_swing_reg(btcoexist, sw_dac_swing_lvl);
+ else
+ halbtc8723b2ant_set_dac_swing_reg(btcoexist, 0x18);
+}
+
+
+void halbtc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
+ bool force_exec, bool dac_swing_on,
+ u32 dac_swing_lvl)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+ (force_exec? "force to":""),
+ (dac_swing_on? "ON":"OFF"), dac_swing_lvl);
+ coex_dm->cur_dac_swing_on = dac_swing_on;
+ coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x,"
+ " bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+ coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl,
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
+
+ if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
+ (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
+ return;
+ }
+ mdelay(30);
+ halbtc8723b2ant_set_sw_fulltime_dac_swing(btcoexist, dac_swing_on,
+ dac_swing_lvl);
+
+ coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on;
+ coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
+}
+
+void halbtc8723b2ant_set_adc_backoff(struct btc_coexist *btcoexist,
+ bool adc_backoff)
+{
+ if (adc_backoff) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB BackOff Level On!\n");
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc05, 0x30, 0x3);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB BackOff Level Off!\n");
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc05, 0x30, 0x1);
+ }
+}
+
+void halbtc8723b2ant_adc_backoff(struct btc_coexist *btcoexist,
+ bool force_exec, bool adc_backoff)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn AdcBackOff = %s\n",
+ (force_exec? "force to":""), (adc_backoff? "ON":"OFF"));
+ coex_dm->cur_adc_back_off = adc_backoff;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n",
+ coex_dm->pre_adc_back_off,
+ coex_dm->cur_adc_back_off);
+
+ if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off)
+ return;
+ }
+ halbtc8723b2ant_set_adc_backoff(btcoexist, coex_dm->cur_adc_back_off);
+
+ coex_dm->pre_adc_back_off = coex_dm->cur_adc_back_off;
+}
+
+void halbtc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
+ bool agc_table_en)
+{
+ u8 rssi_adjust_val = 0;
+
+ /* BB AGC Gain Table */
+ if (agc_table_en) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table On!\n");
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6b1D0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6a1E0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table Off!\n");
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa71D0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa61E0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa51F0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa4200001);
+ }
+
+
+ /* RF Gain */
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
+ if (agc_table_en) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table On!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x38fff);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x38ffe);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table Off!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x380c3);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x28ce6);
+ }
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1);
+
+ if (agc_table_en) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table On!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+ 0xfffff, 0x38fff);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+ 0xfffff, 0x38ffe);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table Off!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+ 0xfffff, 0x380c3);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+ 0xfffff, 0x28ce6);
+ }
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x0);
+
+ /* set rssiAdjustVal for wifi module. */
+ if (agc_table_en)
+ rssi_adjust_val = 8;
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
+ &rssi_adjust_val);
+}
+
+void halbtc8723b2ant_agc_table(struct btc_coexist *btcoexist,
+ bool force_exec, bool agc_table_en)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s %s Agc Table\n",
+ (force_exec? "force to":""),
+ (agc_table_en? "Enable":"Disable"));
+ coex_dm->cur_agc_table_en = agc_table_en;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en);
+
+ if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
+ return;
+ }
+ halbtc8723b2ant_set_agc_table(btcoexist, agc_table_en);
+
+ coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
+}
+
+void halbtc8723b2ant_set_coex_table(struct btc_coexist *btcoexist,
+ u32 val0x6c0, u32 val0x6c4,
+ u32 val0x6c8, u8 val0x6cc)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+ btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
+}
+
+void halbtc8723b2ant_coex_table(struct btc_coexist *btcoexist,
+ bool force_exec, u32 val0x6c0,
+ u32 val0x6c4, u32 val0x6c8,
+ u8 val0x6cc)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s write Coex Table 0x6c0=0x%x,"
+ " 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ (force_exec? "force to":""), val0x6c0,
+ val0x6c4, val0x6c8, val0x6cc);
+ coex_dm->cur_val0x6c0 = val0x6c0;
+ coex_dm->cur_val0x6c4 = val0x6c4;
+ coex_dm->cur_val0x6c8 = val0x6c8;
+ coex_dm->cur_val0x6cc = val0x6cc;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], preVal0x6c0=0x%x, "
+ "preVal0x6c4=0x%x, preVal0x6c8=0x%x, "
+ "preVal0x6cc=0x%x !!\n",
+ coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
+ coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], curVal0x6c0=0x%x, "
+ "curVal0x6c4=0x%x, curVal0x6c8=0x%x, "
+ "curVal0x6cc=0x%x !!\n",
+ coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
+ coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+
+ if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
+ (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
+ (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) &&
+ (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
+ return;
+ }
+ halbtc8723b2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
+ val0x6c8, val0x6cc);
+
+ coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
+ coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
+ coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8;
+ coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
+}
+
+void halbtc8723b2ant_coex_table_with_type(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ switch (type) {
+ case 0:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x55555555, 0xffff, 0x3);
+ break;
+ case 1:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x5afa5afa, 0xffff, 0x3);
+ break;
+ case 2:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+ 0x5a5a5a5a, 0xffff, 0x3);
+ break;
+ case 3:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa,
+ 0xaaaaaaaa, 0xffff, 0x3);
+ break;
+ case 4:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0xffffffff,
+ 0xffffffff, 0xffff, 0x3);
+ break;
+ case 5:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
+ 0x5fff5fff, 0xffff, 0x3);
+ break;
+ case 6:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5a5a5a5a, 0xffff, 0x3);
+ break;
+ case 7:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5afa5afa, 0xffff, 0x3);
+ break;
+ case 8:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x5aea5aea,
+ 0x5aea5aea, 0xffff, 0x3);
+ break;
+ case 9:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5aea5aea, 0xffff, 0x3);
+ break;
+ case 10:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5aff5aff, 0xffff, 0x3);
+ break;
+ case 11:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5a5f5a5f, 0xffff, 0x3);
+ break;
+ case 12:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5f5f5f5f, 0xffff, 0x3);
+ break;
+ default:
+ break;
+ }
+}
+
+void halbtc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
+ bool enable)
+{
+ u8 h2c_parameter[1] ={0};
+
+ if (enable)
+ h2c_parameter[0] |= BIT0;/* function enable*/
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, "
+ "FW write 0x63=0x%x\n", h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
+}
+
+void halbtc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
+ bool force_exec, bool enable)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec? "force to":""), (enable? "ON":"OFF"));
+ coex_dm->cur_ignore_wlan_act = enable;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreIgnoreWlanAct = %d, "
+ "bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
+
+ if (coex_dm->pre_ignore_wlan_act ==
+ coex_dm->cur_ignore_wlan_act)
+ return;
+ }
+ halbtc8723b2ant_set_fw_ignore_wlan_act(btcoexist, enable);
+
+ coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
+}
+
+void halbtc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
+ u8 byte2, u8 byte3, u8 byte4, u8 byte5)
+{
+ u8 h2c_parameter[5] ={0};
+
+ h2c_parameter[0] = byte1;
+ h2c_parameter[1] = byte2;
+ h2c_parameter[2] = byte3;
+ h2c_parameter[3] = byte4;
+ h2c_parameter[4] = byte5;
+
+ coex_dm->ps_tdma_para[0] = byte1;
+ coex_dm->ps_tdma_para[1] = byte2;
+ coex_dm->ps_tdma_para[2] = byte3;
+ coex_dm->ps_tdma_para[3] = byte4;
+ coex_dm->ps_tdma_para[4] = byte5;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 | h2c_parameter[4]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
+}
+
+void halbtc8723b2ant_sw_mechanism1(struct btc_coexist *btcoexist,
+ bool shrink_rx_lpf, bool low_penalty_ra,
+ bool limited_dig, bool bt_lna_constrain)
+{
+ /*
+ u32 wifi_bw;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 != wifi_bw) //only shrink RF Rx LPF for HT40
+ {
+ if (shrink_rx_lpf)
+ shrink_rx_lpf = false;
+ }
+ */
+
+ halbtc8723b2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
+ halbtc8723b2ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
+}
+
+void halbtc8723b2ant_sw_mechanism2(struct btc_coexist *btcoexist,
+ bool agc_table_shift, bool adc_backoff,
+ bool sw_dac_swing, u32 dac_swing_lvl)
+{
+ halbtc8723b2ant_agc_table(btcoexist, NORMAL_EXEC, agc_table_shift);
+ /*halbtc8723b2ant_adc_backoff(btcoexist, NORMAL_EXEC, adc_backoff);*/
+ halbtc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing,
+ dac_swing_lvl);
+}
+
+void halbtc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
+ u8 antpos_type, bool init_hwcfg,
+ bool wifi_off)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ u32 fw_ver = 0, u32tmp=0;
+ bool pg_ext_switch = false;
+ bool use_ext_switch = false;
+ u8 h2c_parameter[2] ={0};
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_EXT_SWITCH, &pg_ext_switch);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+
+ if ((fw_ver<0xc0000) || pg_ext_switch)
+ use_ext_switch = true;
+
+ if (init_hwcfg) {
+ /* 0x4c[23]=0, 0x4c[24]=1 Antenna control by WL/BT */
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u32tmp &= ~BIT23;
+ u32tmp |= BIT24;
+ btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+
+ btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3);
+ btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1);
+
+ /* Force GNT_BT to low */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
+ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+
+ if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
+ /* tell firmware "no antenna inverse" */
+ h2c_parameter[0] = 0;
+ h2c_parameter[1] = 1; /* ext switch type */
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ } else {
+ /* tell firmware "antenna inverse" */
+ h2c_parameter[0] = 1;
+ h2c_parameter[1] = 1; /* ext switch type */
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ }
+ }
+
+ /* ext switch setting */
+ if (use_ext_switch) {
+ /* fixed internal switch S1->WiFi, S0->BT */
+ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+ switch (antpos_type) {
+ case BTC_ANT_WIFI_AT_MAIN:
+ /* ext switch main at wifi */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
+ 0x3, 0x1);
+ break;
+ case BTC_ANT_WIFI_AT_AUX:
+ /* ext switch aux at wifi */
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x92c, 0x3, 0x2);
+ break;
+ }
+ } else { /* internal switch */
+ /* fixed ext switch */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, 0x3, 0x1);
+ switch (antpos_type) {
+ case BTC_ANT_WIFI_AT_MAIN:
+ /* fixed internal switch S1->WiFi, S0->BT */
+ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+ break;
+ case BTC_ANT_WIFI_AT_AUX:
+ /* fixed internal switch S0->WiFi, S1->BT */
+ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+ break;
+ }
+ }
+}
+
+
+void halbtc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
+ bool turn_on, u8 type)
+{
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (force_exec? "force to":""), (turn_on? "ON":"OFF"), type);
+ coex_dm->cur_ps_tdma_on = turn_on;
+ coex_dm->cur_ps_tdma = type;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+
+ if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
+ (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
+ return;
+ }
+ if (turn_on) {
+ switch (type) {
+ case 1:
+ default:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe1, 0x90);
+ break;
+ case 2:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0xe1, 0x90);
+ break;
+ case 3:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0xf1, 0x90);
+ break;
+ case 4:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
+ 0x03, 0xf1, 0x90);
+ break;
+ case 5:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0x60, 0x90);
+ break;
+ case 6:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0x60, 0x90);
+ break;
+ case 7:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0x70, 0x90);
+ break;
+ case 8:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x10,
+ 0x3, 0x70, 0x90);
+ break;
+ case 9:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe1, 0x90);
+ break;
+ case 10:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0xe1, 0x90);
+ break;
+ case 11:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+ 0xa, 0xe1, 0x90);
+ break;
+ case 12:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
+ 0x5, 0xe1, 0x90);
+ break;
+ case 13:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0x60, 0x90);
+ break;
+ case 14:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0x60, 0x90);
+ break;
+ case 15:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+ 0xa, 0x60, 0x90);
+ break;
+ case 16:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
+ 0x5, 0x60, 0x90);
+ break;
+ case 17:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x2f,
+ 0x2f, 0x60, 0x90);
+ break;
+ case 18:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
+ 0x5, 0xe1, 0x90);
+ break;
+ case 19:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+ 0x25, 0xe1, 0x90);
+ break;
+ case 20:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+ 0x25, 0x60, 0x90);
+ break;
+ case 21:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
+ 0x03, 0x70, 0x90);
+ break;
+ case 71:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe1, 0x90);
+ break;
+ }
+ } else {
+ /* disable PS tdma */
+ switch (type) {
+ case 0:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x40, 0x0);
+ break;
+ case 1:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x48, 0x0);
+ break;
+ default:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x40, 0x0);
+ break;
+ }
+ }
+
+ /* update pre state */
+ coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
+ coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
+}
+
+void halbtc8723b2ant_coex_alloff(struct btc_coexist *btcoexist)
+{
+ /* fw all off */
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ /* sw all off */
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+ /* hw all off */
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+}
+
+void halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ /* force to reset coex mechanism*/
+
+ halbtc8723b2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false);
+
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+}
+
+void halbtc8723b2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
+{
+ bool wifi_connected = false;
+ bool low_pwr_disable = true;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+
+ if (wifi_connected) {
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ } else {
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ }
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+ coex_dm->need_recover_0x948 = true;
+ coex_dm->backup_0x948 = btcoexist->btc_read_2byte(btcoexist, 0x948);
+
+ halbtc8723b2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_AUX,
+ false, false);
+}
+
+bool halbtc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
+{
+ bool bCommon = false, wifi_connected = false;
+ bool wifi_busy = false;
+ bool bt_hs_on = false, low_pwr_disable = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+ if (!wifi_connected) {
+ low_pwr_disable = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi non-connected idle!!\n");
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+ 0x0);
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, false,
+ false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, false,
+ 0x18);
+
+ bCommon = true;
+ } else {
+ if (BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+ coex_dm->bt_status) {
+ low_pwr_disable = false;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi connected + "
+ "BT non connected-idle!!\n");
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+ 0xfffff, 0x0);
+ halbtc8723b2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 0);
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
+ 1);
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+ 0xb);
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+ false);
+
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ bCommon = true;
+ } else if (BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE ==
+ coex_dm->bt_status) {
+ low_pwr_disable = true;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ if(bt_hs_on)
+ return false;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi connected + "
+ "BT connected-idle!!\n");
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+ 0xfffff, 0x0);
+ halbtc8723b2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 0);
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
+ 1);
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+ 0xb);
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+ false);
+
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ bCommon = true;
+ } else {
+ low_pwr_disable = true;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ if (wifi_busy) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Busy + "
+ "BT Busy!!\n");
+ bCommon = false;
+ } else {
+ if(bt_hs_on)
+ return false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Idle + "
+ "BT Busy!!\n");
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
+ 0x1, 0xfffff, 0x0);
+ halbtc8723b2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC,
+ 7);
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 21);
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist,
+ NORMAL_EXEC,
+ 0xb);
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist,
+ NORMAL_EXEC,
+ true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist,
+ NORMAL_EXEC,
+ false);
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false,
+ false, false,
+ false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false,
+ false, false,
+ 0x18);
+ bCommon = true;
+ }
+ }
+ }
+
+ return bCommon;
+}
+void halbtc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
+ bool sco_hid, bool tx_pause,
+ u8 max_interval)
+{
+ static s32 up, dn, m, n, wait_count;
+ /*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/
+ s32 result;
+ u8 retryCount=0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], TdmaDurationAdjust()\n");
+
+ if (!coex_dm->auto_tdma_adjust) {
+ coex_dm->auto_tdma_adjust = true;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ if (sco_hid) {
+ if (tx_pause) {
+ if (max_interval == 1) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 13);
+ coex_dm->ps_tdma_du_adj_type = 13;
+ }else if (max_interval == 2) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (max_interval == 3) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ }
+ } else {
+ if(max_interval == 1) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (max_interval == 2) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (max_interval == 3) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ }
+ }
+ } else {
+ if (tx_pause) {
+ if (max_interval == 1) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type = 5;
+ } else if (max_interval == 2) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (max_interval == 3) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ }
+ } else {
+ if (max_interval == 1) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type = 1;
+ } else if (max_interval == 2) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (max_interval == 3) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ }
+ }
+ }
+
+ up = 0;
+ dn = 0;
+ m = 1;
+ n= 3;
+ result = 0;
+ wait_count = 0;
+ } else {
+ /*accquire the BT TRx retry count from BT_Info byte2*/
+ retryCount = coex_sta->bt_retry_cnt;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], retryCount = %d\n", retryCount);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
+ up, dn, m, n, wait_count);
+ result = 0;
+ wait_count++;
+ /* no retry in the last 2-second duration*/
+ if (retryCount == 0) {
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if (up >= n) {
+ wait_count = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Increase wifi "
+ "duration!!\n");
+ }/* <=3 retry in the last 2-second duration*/
+ } else if (retryCount <= 3) {
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) {
+ if (wait_count <= 2)
+ m++;
+ else
+ m = 1;
+
+ if (m >= 20)
+ m = 20;
+
+ n = 3 * m;
+ up = 0;
+ dn = 0;
+ wait_count = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration "
+ "for retryCounter<3!!\n");
+ }
+ } else {
+ if (wait_count == 1)
+ m++;
+ else
+ m = 1;
+
+ if (m >= 20)
+ m = 20;
+
+ n = 3 * m;
+ up = 0;
+ dn = 0;
+ wait_count = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration "
+ "for retryCounter>3!!\n");
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], max Interval = %d\n", max_interval);
+ if (max_interval == 1) {
+ if (tx_pause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
+
+ if (coex_dm->cur_ps_tdma == 71) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type = 5;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type = 5;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type = 8;
+ }
+
+ if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 13);
+ coex_dm->ps_tdma_du_adj_type = 13;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type = 16;
+ }
+
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type =
+ 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if(coex_dm->cur_ps_tdma == 14) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if(coex_dm->cur_ps_tdma == 15) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type =
+ 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if(coex_dm->cur_ps_tdma == 6) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type =
+ 5;
+ } else if(coex_dm->cur_ps_tdma == 16) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if(coex_dm->cur_ps_tdma == 15) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if(coex_dm->cur_ps_tdma == 14) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 13);
+ coex_dm->ps_tdma_du_adj_type =
+ 13;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 71);
+ coex_dm->ps_tdma_du_adj_type = 71;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type = 4;
+ }
+
+ if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if(coex_dm->cur_ps_tdma == 16) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type = 12;
+ }
+
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 71) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type =
+ 1;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if(coex_dm->cur_ps_tdma == 3) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type =
+ 4;
+ } else if(coex_dm->cur_ps_tdma == 9) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type =
+ 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type =
+ 1;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 71);
+ coex_dm->ps_tdma_du_adj_type =
+ 71;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type =
+ 9;
+ }
+ }
+ }
+ } else if(max_interval == 2) {
+ if (tx_pause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
+ if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type = 8;
+ }
+ if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type = 16;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type =
+ 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type =
+ 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type = 4;
+ }
+ if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 14){
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type = 12;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type =
+ 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type =
+ 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ }
+ }
+ }
+ } else if (max_interval == 3) {
+ if (tx_pause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
+ if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type = 8;
+ }
+ if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type = 16;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type =
+ 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type =
+ 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type = 4;
+ }
+ if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type = 12;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type =
+ 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type =
+ 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ }
+ }
+ }
+ }
+ }
+
+ /*if current PsTdma not match with the recorded one (when scan, dhcp..),
+ *then we have to adjust it back to the previous record one.*/
+ if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) {
+ bool scan = false, link = false, roam = false;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], PsTdma type dismatch!!!, "
+ "curPsTdma=%d, recordPsTdma=%d\n",
+ coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+ if (!scan && !link && !roam)
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
+ coex_dm->ps_tdma_du_adj_type);
+ else
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], roaming/link/scan is under"
+ " progress, will adjust next time!!!\n");
+ }
+}
+
+/* SCO only or SCO+PAN(HS) */
+void halbtc8723b2ant_action_sco(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4);
+
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ /*for SCO quality at 11b/g mode*/
+ if (BTC_WIFI_BW_LEGACY == wifi_bw)
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+ else /*for SCO quality & wifi performance balance at 11n mode*/
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 8);
+
+ /*for voice quality */
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x4);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ true, 0x4);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x4);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ true, 0x4);
+ }
+ }
+}
+
+
+void halbtc8723b2ant_action_hid(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_LEGACY == wifi_bw) /*/for HID at 11b/g mode*/
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ else /*for HID quality & wifi performance balance at 11n mode*/
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 9);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+ else
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS)*/
+void halbtc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+ u32 wifi_bw;
+ u8 ap_num = 0;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ wifi_rssi_state1 = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 1, 2, 40, 0);
+ bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
+
+ /* define the office environment */
+ /* driver don't know AP num in Linux, so we will never enter this if */
+ if (ap_num >= 10 && BTC_RSSI_HIGH(wifi_rssi_state1)) {
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+ 0x0);
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x18);
+ }
+ return;
+ }
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist,false, false, 1);
+ else
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist,false, true, 1);
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 2);
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false,0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 10);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
+ else
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+
+/*PAN(HS) only*/
+void halbtc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/*PAN(EDR)+A2DP*/
+void halbtc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_coex_table_with_type(btcoexist,NORMAL_EXEC, 12);
+ if (BTC_WIFI_BW_HT40 == wifi_bw)
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, false,
+ true, 3);
+ else
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, false,
+ false, 3);
+ } else {
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 3);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+ 3);
+ halbtc8723b2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 11);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+ 0xfffff, 0x780);
+ } else {
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+ 6);
+ halbtc8723b2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 7);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+ 0xfffff, 0x0);
+ }
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+ } else {
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b2ant_coex_table_with_type(btcoexist,NORMAL_EXEC, 11);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+ 0x0);
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)){
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/* HID+A2DP+PAN(EDR) */
+void halbtc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ if (BTC_WIFI_BW_HT40 == wifi_bw)
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, true,
+ true, 2);
+ else
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, true,
+ false, 3);
+ } else {
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 3);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+ else
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
+{
+ u8 algorithm = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism()===>\n");
+
+ if (btcoexist->manual_control) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), "
+ "return for Manual CTRL <===\n");
+ return;
+ }
+
+ if (coex_sta->under_ips) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], wifi is under IPS !!!\n");
+ return;
+ }
+
+ algorithm = halbtc8723b2ant_action_algorithm(btcoexist);
+ if (coex_sta->c2h_bt_inquiry_page &&
+ (BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
+ halbtc8723b2ant_action_bt_inquiry(btcoexist);
+ return;
+ } else {
+ if (coex_dm->need_recover_0x948) {
+ coex_dm->need_recover_0x948 = false;
+ btcoexist->btc_write_2byte(btcoexist, 0x948,
+ coex_dm->backup_0x948);
+ }
+ }
+
+ coex_dm->cur_algorithm = algorithm;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, "[BTCoex], Algorithm = %d \n",
+ coex_dm->cur_algorithm);
+
+ if (halbtc8723b2ant_is_common_action(btcoexist)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant common.\n");
+ coex_dm->auto_tdma_adjust = false;
+ } else {
+ if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], preAlgorithm=%d, "
+ "curAlgorithm=%d\n", coex_dm->pre_algorithm,
+ coex_dm->cur_algorithm);
+ coex_dm->auto_tdma_adjust = false;
+ }
+ switch (coex_dm->cur_algorithm) {
+ case BT_8723B_2ANT_COEX_ALGO_SCO:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = SCO.\n");
+ halbtc8723b2ant_action_sco(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HID.\n");
+ halbtc8723b2ant_action_hid(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = A2DP.\n");
+ halbtc8723b2ant_action_a2dp(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = A2DP+PAN(HS).\n");
+ halbtc8723b2ant_action_a2dp_pan_hs(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = PAN(EDR).\n");
+ halbtc8723b2ant_action_pan_edr(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = HS mode.\n");
+ halbtc8723b2ant_action_pan_hs(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = PAN+A2DP.\n");
+ halbtc8723b2ant_action_pan_edr_a2dp(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = PAN(EDR)+HID.\n");
+ halbtc8723b2ant_action_pan_edr_hid(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = HID+A2DP+PAN.\n");
+ halbtc8723b2ant_action_hid_a2dp_pan_edr(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_HID_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = HID+A2DP.\n");
+ halbtc8723b2ant_action_hid_a2dp(btcoexist);
+ break;
+ default:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = coexist All Off!!\n");
+ halbtc8723b2ant_coex_alloff(btcoexist);
+ break;
+ }
+ coex_dm->pre_algorithm = coex_dm->cur_algorithm;
+ }
+}
+
+void halbtc8723b2ant_wifioff_hwcfg(struct btc_coexist *btcoexist)
+{
+ /* set wlan_act to low */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
+ /* Force GNT_BT to High */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x3);
+ /* BT select s0/s1 is controlled by BT */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x0);
+}
+
+/*********************************************************************
+ * work around function start with wa_halbtc8723b2ant_
+ *********************************************************************/
+/*********************************************************************
+ * extern function start with EXhalbtc8723b2ant_
+ *********************************************************************/
+void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
+{
+ u8 u8tmp = 0;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], 2Ant Init HW Config!!\n");
+ coex_dm->bt_rf0x1e_backup =
+ btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff);
+
+ /* 0x790[5:0]=0x5 */
+ u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
+ u8tmp &= 0xc0;
+ u8tmp |= 0x5;
+ btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
+
+
+ /*Antenna config */
+ halbtc8723b2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_MAIN,
+ true, false);
+
+
+
+
+ /* PTA parameter */
+ halbtc8723b2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+
+ /* Enable counter statistics */
+ /*0x76e[3] =1, WLAN_Act control by PTA*/
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+ btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
+}
+
+void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], Coex Mechanism Init!!\n");
+ halbtc8723b2ant_init_coex_dm(btcoexist);
+}
+
+void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ struct btc_stack_info *stack_info = &btcoexist->stack_info;
+ struct btc_bt_link_info* bt_link_info = &btcoexist->bt_link_info;
+ u8 *cli_buf = btcoexist->cli_buf;
+ u8 u8tmp[4], i, bt_info_ext, ps_tdma_case=0;
+ u32 u32tmp[4];
+ bool roam = false, scan = false;
+ bool link = false, wifi_under_5g = false;
+ bool bt_hs_on = false, wifi_busy = false;
+ s32 wifi_rssi = 0, bt_hs_rssi = 0;
+ u32 wifi_bw, wifi_traffic_dir, fa_ofdm, fa_cck;
+ u8 wifi_dot11_chnl, wifi_hs_chnl;
+ u32 fw_ver = 0, bt_patch_ver = 0;
+ u8 ap_num = 0;
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ============[BT Coexist info]============");
+ CL_PRINTF(cli_buf);
+
+ if (btcoexist->manual_control) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ==========[Under Manual Control]============");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ==========================================");
+ CL_PRINTF(cli_buf);
+ }
+
+ if (!board_info->bt_exist) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+ CL_PRINTF(cli_buf);
+ return;
+ }
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
+ "Ant PG number/ Ant mechanism:",
+ board_info->pg_ant_num, board_info->btdm_ant_num);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d",
+ "BT stack/ hci ext ver",
+ ((stack_info->profile_notified)? "Yes":"No"),
+ stack_info->hci_version);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ fw_ver/ PatchVer",
+ glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
+ &wifi_dot11_chnl);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsChnl(HsMode)",
+ wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ",
+ "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info[0],
+ coex_dm->wifi_chnl_info[1], coex_dm->wifi_chnl_info[2]);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d",
+ "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+ "Wifi link/ roam/ scan", link, roam, scan);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+ &wifi_traffic_dir);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ",
+ "Wifi status", (wifi_under_5g? "5G":"2.4G"),
+ ((BTC_WIFI_BW_LEGACY == wifi_bw)? "Legacy":
+ (((BTC_WIFI_BW_HT40 == wifi_bw)? "HT40":"HT20"))),
+ ((!wifi_busy)? "idle":
+ ((BTC_WIFI_TRAFFIC_TX ==wifi_traffic_dir)?\
+ "uplink":"downlink")));
+ CL_PRINTF(cli_buf);
+
+
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d",
+ "SCO/HID/PAN/A2DP",
+ bt_link_info->sco_exist, bt_link_info->hid_exist,
+ bt_link_info->pan_exist, bt_link_info->a2dp_exist);
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s",
+ "BT Info A2DP rate",
+ (bt_info_ext&BIT0)? "Basic rate":"EDR rate");
+ CL_PRINTF(cli_buf);
+
+ for (i=0; i<BT_INFO_SRC_8723B_2ANT_MAX; i++) {
+ if (coex_sta->bt_info_c2h_cnt[i]) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x "
+ "%02x %02x %02x %02x(%d)",
+ glbt_info_src_8723b_2ant[i], \
+ coex_sta->bt_info_c2h[i][0],
+ coex_sta->bt_info_c2h[i][1],
+ coex_sta->bt_info_c2h[i][2],
+ coex_sta->bt_info_c2h[i][3],
+ coex_sta->bt_info_c2h[i][4],
+ coex_sta->bt_info_c2h[i][5],
+ coex_sta->bt_info_c2h[i][6],
+ coex_sta->bt_info_c2h_cnt[i]);
+ CL_PRINTF(cli_buf);
+ }
+ }
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s",
+ "PS state, IPS/LPS",
+ ((coex_sta->under_ips? "IPS ON":"IPS OFF")),
+ ((coex_sta->under_lps? "LPS ON":"LPS OFF")));
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+ /* Sw mechanism */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s", "============[Sw mechanism]============");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+ "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+ coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
+ "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+ coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+ coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+ CL_PRINTF(cli_buf);
+
+ /* Fw mechanism */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Fw mechanism]============");
+ CL_PRINTF(cli_buf);
+
+ ps_tdma_case = coex_dm->cur_ps_tdma;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)",
+ "PS TDMA", coex_dm->ps_tdma_para[0],
+ coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2],
+ coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4],
+ ps_tdma_case, coex_dm->auto_tdma_adjust);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
+ "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr,
+ coex_dm->cur_ignore_wlan_act);
+ CL_PRINTF(cli_buf);
+
+ /* Hw setting */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Hw setting]============");
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x",
+ "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+ CL_PRINTF(cli_buf);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x880);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0x778/0x880[29:25]", u8tmp[0],
+ (u32tmp[0]&0x3e000000) >> 25);
+ CL_PRINTF(cli_buf);
+
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x948/ 0x67[5] / 0x765",
+ u32tmp[0], ((u8tmp[0]&0x20)>> 5), u8tmp[1]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+ u32tmp[0]&0x3, u32tmp[1]&0xff, u32tmp[2]&0x3);
+ CL_PRINTF(cli_buf);
+
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+ ((u8tmp[0] & 0x8)>>3), u8tmp[1],
+ ((u32tmp[0]&0x01800000)>>23), u8tmp[2]&0x1);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0xc50(dig)/0x49c(null-drop)", u32tmp[0]&0xff, u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0xda8);
+ u32tmp[3] = btcoexist->btc_read_4byte(btcoexist, 0xcf0);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
+
+ fa_ofdm = ((u32tmp[0]&0xffff0000) >> 16) +
+ ((u32tmp[1]&0xffff0000) >> 16) +
+ (u32tmp[1] & 0xffff) +
+ (u32tmp[2] & 0xffff) +
+ ((u32tmp[3]&0xffff0000) >> 16) +
+ (u32tmp[3] & 0xffff) ;
+ fa_cck = (u8tmp[0] << 8) + u8tmp[1];
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "OFDM-CCA/OFDM-FA/CCK-FA", \
+ u32tmp[0]&0xffff, fa_ofdm, fa_cck);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \
+ u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "0x770(high-pri rx/tx)",
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+ coex_sta->low_priority_tx);
+ CL_PRINTF(cli_buf);
+#if(BT_AUTO_REPORT_ONLY_8723B_2ANT == 1)
+ halbtc8723b2ant_monitor_bt_ctr(btcoexist);
+#endif
+ btcoexist->btc_disp_dbg_msg(btcoexist,
+ BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_IPS_ENTER == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], IPS ENTER notify\n");
+ coex_sta->under_ips = true;
+ halbtc8723b2ant_wifioff_hwcfg(btcoexist);
+ halbtc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+ halbtc8723b2ant_coex_alloff(btcoexist);
+ } else if (BTC_IPS_LEAVE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], IPS LEAVE notify\n");
+ coex_sta->under_ips = false;
+ ex_halbtc8723b2ant_init_hwconfig(btcoexist);
+ halbtc8723b2ant_init_coex_dm(btcoexist);
+ halbtc8723b2ant_query_bt_info(btcoexist);
+ }
+}
+
+void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_LPS_ENABLE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], LPS ENABLE notify\n");
+ coex_sta->under_lps = true;
+ } else if (BTC_LPS_DISABLE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], LPS DISABLE notify\n");
+ coex_sta->under_lps = false;
+ }
+}
+
+void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_SCAN_START == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], SCAN START notify\n");
+ else if (BTC_SCAN_FINISH == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], SCAN FINISH notify\n");
+}
+
+void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_ASSOCIATE_START == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], CONNECT START notify\n");
+ else if (BTC_ASSOCIATE_FINISH == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], CONNECT FINISH notify\n");
+}
+
+void ex_halbtc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ u8 h2c_parameter[3] ={0};
+ u32 wifi_bw;
+ u8 wifi_central_chnl;
+
+ if (BTC_MEDIA_CONNECT == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], MEDIA connect notify\n");
+ else
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], MEDIA disconnect notify\n");
+
+ /* only 2.4G we need to inform bt the chnl mask */
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifi_central_chnl);
+ if ((BTC_MEDIA_CONNECT == type) &&
+ (wifi_central_chnl <= 14)) {
+ h2c_parameter[0] = 0x1;
+ h2c_parameter[1] = wifi_central_chnl;
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw)
+ h2c_parameter[2] = 0x30;
+ else
+ h2c_parameter[2] = 0x20;
+ }
+
+ coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
+ coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
+ coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x66=0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
+}
+
+void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ if (type == BTC_PACKET_DHCP)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], DHCP Packet notify\n");
+}
+
+void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmpbuf, u8 length)
+{
+ u8 btInfo = 0;
+ u8 i, rsp_source = 0;
+ bool bt_busy = false, limited_dig = false;
+ bool wifi_connected = false;
+
+ coex_sta->c2h_bt_info_req_sent = false;
+
+ rsp_source = tmpbuf[0]&0xf;
+ if(rsp_source >= BT_INFO_SRC_8723B_2ANT_MAX)
+ rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW;
+ coex_sta->bt_info_c2h_cnt[rsp_source]++;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], Bt info[%d], length=%d, hex data=[",
+ rsp_source, length);
+ for (i = 0; i < length; i++) {
+ coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i];
+ if (i == 1)
+ btInfo = tmpbuf[i];
+ if (i == length-1)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "0x%02x]\n", tmpbuf[i]);
+ else
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "0x%02x, ", tmpbuf[i]);
+ }
+
+ if (btcoexist->manual_control) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), "
+ "return for Manual CTRL<===\n");
+ return;
+ }
+
+ if (BT_INFO_SRC_8723B_2ANT_WIFI_FW != rsp_source) {
+ coex_sta->bt_retry_cnt = /* [3:0]*/
+ coex_sta->bt_info_c2h[rsp_source][2] & 0xf;
+
+ coex_sta->bt_rssi =
+ coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10;
+
+ coex_sta->bt_info_ext =
+ coex_sta->bt_info_c2h[rsp_source][4];
+
+ /* Here we need to resend some wifi info to BT
+ because bt is reset and loss of the info.*/
+ if ((coex_sta->bt_info_ext & BIT1)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT ext info bit1 check,"
+ " send wifi BW&Chnl to BT!!\n");
+ btcoexist->btc_get(btcoexist,BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ if (wifi_connected)
+ ex_halbtc8723b2ant_media_status_notify(
+ btcoexist,
+ BTC_MEDIA_CONNECT);
+ else
+ ex_halbtc8723b2ant_media_status_notify(
+ btcoexist,
+ BTC_MEDIA_DISCONNECT);
+ }
+
+ if ((coex_sta->bt_info_ext & BIT3)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT ext info bit3 check, "
+ "set BT NOT to ignore Wlan active!!\n");
+ halbtc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC,
+ false);
+ } else {
+ /* BT already NOT ignore Wlan active, do nothing here.*/
+ }
+#if(BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
+ if ((coex_sta->bt_info_ext & BIT4)) {
+ /* BT auto report already enabled, do nothing*/
+ } else {
+ halbtc8723b2ant_bt_auto_report(btcoexist, FORCE_EXEC,
+ true);
+ }
+#endif
+ }
+
+ /* check BIT2 first ==> check if bt is under inquiry or page scan*/
+ if (btInfo & BT_INFO_8723B_2ANT_B_INQ_PAGE)
+ coex_sta->c2h_bt_inquiry_page = true;
+ else
+ coex_sta->c2h_bt_inquiry_page = false;
+
+ /* set link exist status*/
+ if (!(btInfo & BT_INFO_8723B_2ANT_B_CONNECTION)) {
+ coex_sta->bt_link_exist = false;
+ coex_sta->pan_exist = false;
+ coex_sta->a2dp_exist = false;
+ coex_sta->hid_exist = false;
+ coex_sta->sco_exist = false;
+ } else {// connection exists
+ coex_sta->bt_link_exist = true;
+ if (btInfo & BT_INFO_8723B_2ANT_B_FTP)
+ coex_sta->pan_exist = true;
+ else
+ coex_sta->pan_exist = false;
+ if (btInfo & BT_INFO_8723B_2ANT_B_A2DP)
+ coex_sta->a2dp_exist = true;
+ else
+ coex_sta->a2dp_exist = false;
+ if (btInfo & BT_INFO_8723B_2ANT_B_HID)
+ coex_sta->hid_exist = true;
+ else
+ coex_sta->hid_exist = false;
+ if (btInfo & BT_INFO_8723B_2ANT_B_SCO_ESCO)
+ coex_sta->sco_exist = true;
+ else
+ coex_sta->sco_exist = false;
+ }
+
+ halbtc8723b2ant_update_bt_link_info(btcoexist);
+
+ if (!(btInfo & BT_INFO_8723B_2ANT_B_CONNECTION)) {
+ coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), "
+ "BT Non-Connected idle!!!\n");
+ /* connection exists but no busy */
+ } else if (btInfo == BT_INFO_8723B_2ANT_B_CONNECTION) {
+ coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ } else if ((btInfo & BT_INFO_8723B_2ANT_B_SCO_ESCO) ||
+ (btInfo & BT_INFO_8723B_2ANT_B_SCO_BUSY)) {
+ coex_dm->bt_status =
+ BT_8723B_2ANT_BT_STATUS_SCO_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ } else if (btInfo&BT_INFO_8723B_2ANT_B_ACL_BUSY) {
+ coex_dm->bt_status =
+ BT_8723B_2ANT_BT_STATUS_ACL_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ } else {
+ coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), "
+ "BT Non-Defined state!!!\n");
+ }
+
+ if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
+ (BT_8723B_2ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) {
+ bt_busy = true;
+ limited_dig = true;
+ } else {
+ bt_busy = false;
+ limited_dig = false;
+ }
+
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
+
+ coex_dm->limited_dig = limited_dig;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+
+ halbtc8723b2ant_run_coexist_mechanism(btcoexist);
+}
+
+void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+
+ halbtc8723b2ant_wifioff_hwcfg(btcoexist);
+ halbtc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+ ex_halbtc8723b2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
+}
+
+void ex_halbtc8723b2ant_periodical(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ struct btc_stack_info *stack_info = &btcoexist->stack_info;
+ static u8 dis_ver_info_cnt = 0;
+ u32 fw_ver = 0, bt_patch_ver = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], =========================="
+ "Periodical===========================\n");
+
+ if (dis_ver_info_cnt <= 5) {
+ dis_ver_info_cnt += 1;
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], ****************************"
+ "************************************\n");
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], Ant PG Num/ Ant Mech/ "
+ "Ant Pos = %d/ %d/ %d\n", board_info->pg_ant_num,
+ board_info->btdm_ant_num, board_info->btdm_ant_pos);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+ ((stack_info->profile_notified)? "Yes":"No"),
+ stack_info->hci_version);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
+ &bt_patch_ver);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], CoexVer/ fw_ver/ PatchVer = "
+ "%d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], *****************************"
+ "***********************************\n");
+ }
+
+#if(BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
+ halbtc8723b2ant_query_bt_info(btcoexist);
+ halbtc8723b2ant_monitor_bt_ctr(btcoexist);
+ halbtc8723b2ant_monitor_bt_enable_disable(btcoexist);
+#else
+ if (halbtc8723b2ant_is_wifi_status_changed(btcoexist) ||
+ coex_dm->auto_tdma_adjust)
+ halbtc8723b2ant_run_coexist_mechanism(btcoexist);
+#endif
+}
+
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.h b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.h
new file mode 100644
index 000000000000..fa3784aa70cd
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.h
@@ -0,0 +1,145 @@
+/************************************************************************
+ * The following is for 8723B 2Ant BT Co-exist definition
+ ************************************************************************/
+#define BT_AUTO_REPORT_ONLY_8723B_2ANT 1
+
+
+#define BT_INFO_8723B_2ANT_B_FTP BIT7
+#define BT_INFO_8723B_2ANT_B_A2DP BIT6
+#define BT_INFO_8723B_2ANT_B_HID BIT5
+#define BT_INFO_8723B_2ANT_B_SCO_BUSY BIT4
+#define BT_INFO_8723B_2ANT_B_ACL_BUSY BIT3
+#define BT_INFO_8723B_2ANT_B_INQ_PAGE BIT2
+#define BT_INFO_8723B_2ANT_B_SCO_ESCO BIT1
+#define BT_INFO_8723B_2ANT_B_CONNECTION BIT0
+
+#define BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT 2
+
+typedef enum _BT_INFO_SRC_8723B_2ANT{
+ BT_INFO_SRC_8723B_2ANT_WIFI_FW = 0x0,
+ BT_INFO_SRC_8723B_2ANT_BT_RSP = 0x1,
+ BT_INFO_SRC_8723B_2ANT_BT_ACTIVE_SEND = 0x2,
+ BT_INFO_SRC_8723B_2ANT_MAX
+}BT_INFO_SRC_8723B_2ANT,*PBT_INFO_SRC_8723B_2ANT;
+
+typedef enum _BT_8723B_2ANT_BT_STATUS{
+ BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8723B_2ANT_BT_STATUS_INQ_PAGE = 0x2,
+ BT_8723B_2ANT_BT_STATUS_ACL_BUSY = 0x3,
+ BT_8723B_2ANT_BT_STATUS_SCO_BUSY = 0x4,
+ BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
+ BT_8723B_2ANT_BT_STATUS_MAX
+}BT_8723B_2ANT_BT_STATUS,*PBT_8723B_2ANT_BT_STATUS;
+
+typedef enum _BT_8723B_2ANT_COEX_ALGO{
+ BT_8723B_2ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_8723B_2ANT_COEX_ALGO_SCO = 0x1,
+ BT_8723B_2ANT_COEX_ALGO_HID = 0x2,
+ BT_8723B_2ANT_COEX_ALGO_A2DP = 0x3,
+ BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS = 0x4,
+ BT_8723B_2ANT_COEX_ALGO_PANEDR = 0x5,
+ BT_8723B_2ANT_COEX_ALGO_PANHS = 0x6,
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP = 0x7,
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID = 0x8,
+ BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9,
+ BT_8723B_2ANT_COEX_ALGO_HID_A2DP = 0xa,
+ BT_8723B_2ANT_COEX_ALGO_MAX = 0xb,
+}BT_8723B_2ANT_COEX_ALGO,*PBT_8723B_2ANT_COEX_ALGO;
+
+struct coex_dm_8723b_2ant{
+ /* fw mechanism */
+ bool pre_dec_bt_pwr;
+ bool cur_dec_bt_pwr;
+ u8 pre_fw_dac_swing_lvl;
+ u8 cur_fw_dac_swing_lvl;
+ bool cur_ignore_wlan_act;
+ bool pre_ignore_wlan_act;
+ u8 pre_ps_tdma;
+ u8 cur_ps_tdma;
+ u8 ps_tdma_para[5];
+ u8 ps_tdma_du_adj_type;
+ bool reset_tdma_adjust;
+ bool auto_tdma_adjust;
+ bool pre_ps_tdma_on;
+ bool cur_ps_tdma_on;
+ bool pre_bt_auto_report;
+ bool cur_bt_auto_report;
+
+ /* sw mechanism */
+ bool pre_rf_rx_lpf_shrink;
+ bool cur_rf_rx_lpf_shrink;
+ u32 bt_rf0x1e_backup;
+ bool pre_low_penalty_ra;
+ bool cur_low_penalty_ra;
+ bool pre_dac_swing_on;
+ u32 pre_dac_swing_lvl;
+ bool cur_dac_swing_on;
+ u32 cur_dac_swing_lvl;
+ bool pre_adc_back_off;
+ bool cur_adc_back_off;
+ bool pre_agc_table_en;
+ bool cur_agc_table_en;
+ u32 pre_val0x6c0;
+ u32 cur_val0x6c0;
+ u32 pre_val0x6c4;
+ u32 cur_val0x6c4;
+ u32 pre_val0x6c8;
+ u32 cur_val0x6c8;
+ u8 pre_val0x6cc;
+ u8 cur_val0x6cc;
+ bool limited_dig;
+
+ /* algorithm related */
+ u8 pre_algorithm;
+ u8 cur_algorithm;
+ u8 bt_status;
+ u8 wifi_chnl_info[3];
+
+ bool need_recover_0x948;
+ u16 backup_0x948;
+};
+
+struct coex_sta_8723b_2ant{
+ bool bt_link_exist;
+ bool sco_exist;
+ bool a2dp_exist;
+ bool hid_exist;
+ bool pan_exist;
+
+ bool under_lps;
+ bool under_ips;
+ u32 high_priority_tx;
+ u32 high_priority_rx;
+ u32 low_priority_tx;
+ u32 low_priority_rx;
+ u8 bt_rssi;
+ u8 pre_bt_rssi_state;
+ u8 pre_wifi_rssi_state[4];
+ bool c2h_bt_info_req_sent;
+ u8 bt_info_c2h[BT_INFO_SRC_8723B_2ANT_MAX][10];
+ u32 bt_info_c2h_cnt[BT_INFO_SRC_8723B_2ANT_MAX];
+ bool c2h_bt_inquiry_page;
+ u8 bt_retry_cnt;
+ u8 bt_info_ext;
+};
+
+/*********************************************************************
+ * The following is interface which will notify coex module.
+ *********************************************************************/
+void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmpbuf, u8 length);
+void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_periodical(struct btc_coexist * btcoexist);
+void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist);
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.c b/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.c
new file mode 100644
index 000000000000..9d9fa4d7575d
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.c
@@ -0,0 +1,1181 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include "halbt_precomp.h"
+
+/*#if(BT_30_SUPPORT == 1)*/
+#if 1
+/***********************************************
+ * Global variables
+ ***********************************************/
+const char *const bt_profile_string[]={
+ "NONE",
+ "A2DP",
+ "PAN",
+ "HID",
+ "SCO",
+};
+
+const char *const bt_spec_string[]={
+ "1.0b",
+ "1.1",
+ "1.2",
+ "2.0+EDR",
+ "2.1+EDR",
+ "3.0+HS",
+ "4.0",
+};
+
+const char *const bt_link_role_string[]={
+ "Master",
+ "Slave",
+};
+
+const char *const h2c_state_string[]={
+ "successful",
+ "h2c busy",
+ "rf off",
+ "fw not read",
+};
+
+const char *const io_state_string[]={
+ "IO_STATUS_SUCCESS",
+ "IO_STATUS_FAIL_CANNOT_IO",
+ "IO_STATUS_FAIL_RF_OFF",
+ "IO_STATUS_FAIL_FW_READ_CLEAR_TIMEOUT",
+ "IO_STATUS_FAIL_WAIT_IO_EVENT_TIMEOUT",
+ "IO_STATUS_INVALID_LEN",
+ "IO_STATUS_IO_IDLE_QUEUE_EMPTY",
+ "IO_STATUS_IO_INSERT_WAIT_QUEUE_FAIL",
+ "IO_STATUS_UNKNOWN_FAIL",
+ "IO_STATUS_WRONG_LEVEL",
+ "IO_STATUS_H2C_STOPPED",
+};
+
+struct btc_coexist gl_bt_coexist;
+
+u32 btc_dbg_type[BTC_MSG_MAX];
+u8 btc_dbg_buf[100];
+
+/***************************************************
+ * Debug related function
+ ***************************************************/
+bool halbtc_is_bt_coexist_available(struct btc_coexist *btcoexist)
+{
+ if (!btcoexist->binded || NULL == btcoexist->adapter)
+ return false;
+
+ return true;
+}
+
+bool halbtc_is_wifi_busy(struct rtl_priv *rtlpriv)
+{
+
+ if (rtlpriv->link_info.b_busytraffic)
+ return true;
+ else
+ return false;
+}
+
+
+void halbtc_dbg_init(void)
+{
+ u8 i;
+
+ for (i = 0; i < BTC_MSG_MAX; i++)
+ btc_dbg_type[i] = 0;
+
+ btc_dbg_type[BTC_MSG_INTERFACE] = \
+// INTF_INIT |
+// INTF_NOTIFY |
+ 0;
+
+ btc_dbg_type[BTC_MSG_ALGORITHM] = \
+// ALGO_BT_RSSI_STATE |
+// ALGO_WIFI_RSSI_STATE |
+// ALGO_BT_MONITOR |
+// ALGO_TRACE |
+// ALGO_TRACE_FW |
+// ALGO_TRACE_FW_DETAIL |
+// ALGO_TRACE_FW_EXEC |
+// ALGO_TRACE_SW |
+// ALGO_TRACE_SW_DETAIL |
+// ALGO_TRACE_SW_EXEC |
+ 0;
+}
+
+bool halbtc_is_hw_mailbox_exist(struct btc_coexist *btcoexist)
+{
+ return true;
+}
+
+bool halbtc_is_bt40(struct rtl_priv *adapter)
+{
+ struct rtl_priv *rtlpriv = adapter;
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ bool is_ht40 = true;
+ enum ht_channel_width bw = rtlphy->current_chan_bw;
+
+
+ if (bw == HT_CHANNEL_WIDTH_20)
+ is_ht40 = false;
+ else if (bw == HT_CHANNEL_WIDTH_20_40)
+ is_ht40 = true;
+
+ return is_ht40;
+}
+
+bool halbtc_legacy(struct rtl_priv *adapter)
+{
+ struct rtl_priv *rtlpriv = adapter;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ bool is_legacy = false;
+
+ if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B))
+ is_legacy = true;
+
+ return is_legacy;
+}
+
+bool halbtc_is_wifi_uplink(struct rtl_priv *adapter)
+{
+ struct rtl_priv *rtlpriv = adapter;
+
+ if (rtlpriv->link_info.b_tx_busy_traffic)
+ return true;
+ else
+ return false;
+}
+
+u32 halbtc_get_wifi_bw(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv =
+ (struct rtl_priv *)btcoexist->adapter;
+ u32 wifi_bw = BTC_WIFI_BW_HT20;
+
+ if (halbtc_is_bt40(rtlpriv)){
+ wifi_bw = BTC_WIFI_BW_HT40;
+ } else {
+ if(halbtc_legacy(rtlpriv))
+ wifi_bw = BTC_WIFI_BW_LEGACY;
+ else
+ wifi_bw = BTC_WIFI_BW_HT20;
+ }
+ return wifi_bw;
+}
+
+u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 chnl = 1;
+
+
+ if (rtlphy->current_channel != 0)
+ chnl = rtlphy->current_channel;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "halbtc_get_wifi_central_chnl:%d\n",chnl);
+ return chnl;
+}
+
+void halbtc_leave_lps(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv;
+ struct rtl_ps_ctl *ppsc;
+ bool ap_enable = false;
+
+ rtlpriv = btcoexist->adapter;
+ ppsc = rtl_psc(rtlpriv);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ &ap_enable);
+
+ if (ap_enable) {
+ printk("halbtc_leave_lps()<--dont leave lps under AP mode\n");
+ return;
+ }
+
+ btcoexist->bt_info.bt_ctrl_lps = true;
+ btcoexist->bt_info.bt_lps_on = false;
+}
+
+void halbtc_enter_lps(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv;
+ struct rtl_ps_ctl *ppsc;
+ bool ap_enable = false;
+
+ rtlpriv = btcoexist->adapter;
+ ppsc = rtl_psc(rtlpriv);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ &ap_enable);
+
+ if (ap_enable) {
+ printk("halbtc_enter_lps()<--dont enter lps under AP mode\n");
+ return;
+ }
+
+ btcoexist->bt_info.bt_ctrl_lps = true;
+ btcoexist->bt_info.bt_lps_on = false;
+}
+
+void halbtc_normal_lps(struct btc_coexist *btcoexist)
+{
+ if (btcoexist->bt_info.bt_ctrl_lps) {
+ btcoexist->bt_info.bt_lps_on = false;
+ btcoexist->bt_info.bt_ctrl_lps = false;
+ }
+
+}
+
+void halbtc_leave_low_power(void)
+{
+}
+
+void halbtc_nomal_low_power(void)
+{
+}
+
+void halbtc_disable_low_power(void)
+{
+}
+
+void halbtc_aggregation_check(void)
+{
+}
+
+
+u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist)
+{
+ return 0;
+}
+
+s32 halbtc_get_wifi_rssi(struct rtl_priv *adapter)
+{
+ struct rtl_priv *rtlpriv = adapter;
+ s32 undecorated_smoothed_pwdb = 0;
+
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+ else /* associated entry pwdb */
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+ return undecorated_smoothed_pwdb;
+}
+
+bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ bool *bool_tmp = (bool*)out_buf;
+ int *s32_tmp = (int*)out_buf;
+ u32 *u32_tmp = (u32*)out_buf;
+ u8 *u8_tmp = (u8*)out_buf;
+ bool tmp = false;
+
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return false;
+
+
+ switch (get_type){
+ case BTC_GET_BL_HS_OPERATION:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_HS_CONNECTING:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_CONNECTED:
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+ tmp = true;
+
+ *bool_tmp = tmp;
+ break;
+ case BTC_GET_BL_WIFI_BUSY:
+ if(halbtc_is_wifi_busy(rtlpriv))
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_SCAN:
+ if (mac->act_scanning == true)
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_LINK:
+ if (mac->link_state == MAC80211_LINKING)
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_ROAM: /*TODO*/
+ if (mac->link_state == MAC80211_LINKING)
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_4_WAY_PROGRESS: /*TODO*/
+ *bool_tmp = false;
+
+ break;
+ case BTC_GET_BL_WIFI_UNDER_5G:
+ *bool_tmp = false; /*TODO*/
+
+ case BTC_GET_BL_WIFI_DHCP: /*TODO*/
+ break;
+ case BTC_GET_BL_WIFI_SOFTAP_IDLE:
+ *bool_tmp = true;
+ break;
+ case BTC_GET_BL_WIFI_SOFTAP_LINKING:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_IN_EARLY_SUSPEND:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_AP_MODE_ENABLE:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_ENABLE_ENCRYPTION:
+ if (NO_ENCRYPTION == rtlpriv->sec.pairwise_enc_algorithm)
+ *bool_tmp = false;
+ else
+ *bool_tmp = true;
+ break;
+ case BTC_GET_BL_WIFI_UNDER_B_MODE:
+ *bool_tmp = false; /*TODO*/
+ break;
+ case BTC_GET_BL_EXT_SWITCH:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_S4_WIFI_RSSI:
+ *s32_tmp = halbtc_get_wifi_rssi(rtlpriv);
+ break;
+ case BTC_GET_S4_HS_RSSI: /*TODO*/
+ *s32_tmp = halbtc_get_wifi_rssi(rtlpriv);
+ break;
+ case BTC_GET_U4_WIFI_BW:
+ *u32_tmp = halbtc_get_wifi_bw(btcoexist);
+ break;
+ case BTC_GET_U4_WIFI_TRAFFIC_DIRECTION:
+ if (halbtc_is_wifi_uplink(rtlpriv))
+ *u32_tmp = BTC_WIFI_TRAFFIC_TX;
+ else
+ *u32_tmp = BTC_WIFI_TRAFFIC_RX;
+ break;
+ case BTC_GET_U4_WIFI_FW_VER:
+ *u32_tmp = rtlhal->fw_version;
+ break;
+ case BTC_GET_U4_BT_PATCH_VER:
+ *u32_tmp = halbtc_get_bt_patch_version(btcoexist);
+ break;
+ case BTC_GET_U1_WIFI_DOT11_CHNL:
+ *u8_tmp = rtlphy->current_channel;
+ break;
+ case BTC_GET_U1_WIFI_CENTRAL_CHNL:
+ *u8_tmp = halbtc_get_wifi_central_chnl(btcoexist);
+ break;
+ case BTC_GET_U1_WIFI_HS_CHNL:
+ *u8_tmp = 1;/* BT_OperateChnl(rtlpriv); */
+ break;
+ case BTC_GET_U1_MAC_PHY_MODE:
+ *u8_tmp = BTC_MP_UNKNOWN;
+ break;
+ case BTC_GET_U1_AP_NUM:
+ /* driver don't know AP num in Linux,
+ * So, the return value here is not right */
+ *u8_tmp = 1;/* pDefMgntInfo->NumBssDesc4Query; */
+ break;
+
+ /************* 1Ant **************/
+ case BTC_GET_U1_LPS_MODE:
+ *u8_tmp = btcoexist->pwr_mode_val[0];
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist;
+ bool *bool_tmp = (bool *)in_buf;
+ u8 *u8_tmp = (u8 *)in_buf;
+ u32 *u32_tmp = (u32 *)in_buf;
+
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return false;
+
+ switch (set_type) {
+ /* set some bool type variables. */
+ case BTC_SET_BL_BT_DISABLE:
+ btcoexist->bt_info.bt_disabled = *bool_tmp;
+ break;
+ case BTC_SET_BL_BT_TRAFFIC_BUSY:
+ btcoexist->bt_info.bt_busy = *bool_tmp;
+ break;
+ case BTC_SET_BL_BT_LIMITED_DIG:
+ btcoexist->bt_info.limited_dig = *bool_tmp;
+ break;
+ case BTC_SET_BL_FORCE_TO_ROAM:
+ btcoexist->bt_info.force_to_roam = *bool_tmp;
+ break;
+ case BTC_SET_BL_TO_REJ_AP_AGG_PKT:
+ btcoexist->bt_info.reject_agg_pkt = *bool_tmp;
+ break;
+ case BTC_SET_BL_BT_CTRL_AGG_SIZE:
+ btcoexist->bt_info.b_bt_ctrl_buf_size = *bool_tmp;
+ break;
+ case BTC_SET_BL_INC_SCAN_DEV_NUM:
+ btcoexist->bt_info.increase_scan_dev_num = *bool_tmp;
+ break;
+ /* set some u1Byte type variables. */
+ case BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON:
+ btcoexist->bt_info.rssi_adjust_for_agc_table_on = *u8_tmp;
+ break;
+ case BTC_SET_U1_AGG_BUF_SIZE:
+ btcoexist->bt_info.agg_buf_size = *u8_tmp;
+ break;
+ /* the following are some action which will be triggered */
+ case BTC_SET_ACT_GET_BT_RSSI:
+ /*BTHCI_SendGetBtRssiEvent(rtlpriv);*/
+ break;
+ case BTC_SET_ACT_AGGREGATE_CTRL:
+ halbtc_aggregation_check();
+ break;
+
+ /* 1Ant */
+ case BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE:
+ btcoexist->bt_info.rssi_adjust_for_1ant_coex_type = *u8_tmp;
+ break;
+ case BTC_SET_UI_SCAN_SIG_COMPENSATION:
+ /* rtlpriv->mlmepriv.scan_compensation = *u8_tmp; */
+ break;
+ case BTC_SET_U1_1ANT_LPS:
+ btcoexist->bt_info.lps_1ant = *u8_tmp;
+ break;
+ case BTC_SET_U1_1ANT_RPWM:
+ btcoexist->bt_info.rpwm_1ant = *u8_tmp;
+ break;
+ /* the following are some action which will be triggered */
+ case BTC_SET_ACT_LEAVE_LPS:
+ halbtc_leave_lps(btcoexist);
+ break;
+ case BTC_SET_ACT_ENTER_LPS:
+ halbtc_enter_lps(btcoexist);
+ break;
+ case BTC_SET_ACT_NORMAL_LPS:
+ halbtc_normal_lps(btcoexist);
+ break;
+ case BTC_SET_ACT_DISABLE_LOW_POWER:
+ halbtc_disable_low_power();
+ break;
+ case BTC_SET_ACT_UPDATE_ra_mask:
+ btcoexist->bt_info.ra_mask = *u32_tmp;
+ break;
+ case BTC_SET_ACT_SEND_MIMO_PS:
+ break;
+ case BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT:
+ btcoexist->bt_info.force_exec_pwr_cmd_cnt++;
+ break;
+ case BTC_SET_ACT_CTRL_BT_INFO: /*wait for 8812/8821*/
+ break;
+ case BTC_SET_ACT_CTRL_BT_COEX:
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+void halbtc_display_coex_statistics(struct btc_coexist *btcoexist)
+{
+}
+
+void halbtc_display_bt_link_info(struct btc_coexist *btcoexist)
+{
+}
+
+void halbtc_display_bt_fw_info(struct btc_coexist *btcoexist)
+{
+}
+
+void halbtc_display_fw_pwr_mode_cmd(struct btc_coexist *btcoexist)
+{
+}
+
+/************************************************************
+ * IO related function
+ ************************************************************/
+u8 halbtc_read_1byte(void *bt_context, u32 reg_addr)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_read_byte(rtlpriv, reg_addr);
+}
+
+
+u16 halbtc_read_2byte(void *bt_context, u32 reg_addr)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_read_word(rtlpriv, reg_addr);
+}
+
+
+u32 halbtc_read_4byte(void *bt_context, u32 reg_addr)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_read_dword(rtlpriv, reg_addr);
+}
+
+
+void halbtc_write_1byte(void *bt_context, u32 reg_addr, u8 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_write_byte(rtlpriv, reg_addr, data);
+}
+
+void halbtc_bitmask_write_1byte(void *bt_context, u32 reg_addr,
+ u8 bit_mask, u8 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ u8 original_value, bit_shift = 0;
+ u8 i;
+
+ if (bit_mask != MASKDWORD) {/*if not "double word" write*/
+ original_value = rtl_read_byte(rtlpriv, reg_addr);
+ for (i=0; i<=7; i++) {
+ if((bit_mask>>i)&0x1)
+ break;
+ }
+ bit_shift = i;
+ data = (original_value & (~bit_mask)) |
+ ((data << bit_shift) & bit_mask);
+ }
+ rtl_write_byte(rtlpriv, reg_addr, data);
+}
+
+
+void halbtc_write_2byte(void *bt_context, u32 reg_addr, u16 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_write_word(rtlpriv, reg_addr, data);
+}
+
+
+void halbtc_write_4byte(void *bt_context, u32 reg_addr, u32 data)
+{
+ struct btc_coexist *btcoexist =
+ (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_write_dword(rtlpriv, reg_addr, data);
+}
+
+
+void halbtc_set_macreg(void *bt_context, u32 reg_addr, u32 bit_mask, u32 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data);
+}
+
+
+u32 halbtc_get_macreg(void *bt_context, u32 reg_addr, u32 bit_mask)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_get_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask);
+}
+
+
+void halbtc_set_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask, u32 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data);
+}
+
+
+u32 halbtc_get_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_get_bbreg(rtlpriv->mac80211.hw,reg_addr, bit_mask);
+}
+
+
+void halbtc_set_rfreg(void *bt_context, u8 rf_path, u32 reg_addr,
+ u32 bit_mask, u32 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_set_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask, data);
+}
+
+
+u32 halbtc_get_rfreg(void *bt_context, u8 rf_path, u32 reg_addr, u32 bit_mask)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_get_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask);
+}
+
+
+void halbtc_fill_h2c_cmd(void *bt_context, u8 element_id,
+ u32 cmd_len, u8 *cmd_buf)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, element_id,
+ cmd_len, cmd_buf);
+}
+
+void halbtc_display_dbg_msg(void *bt_context, u8 disp_type)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ switch (disp_type) {
+ case BTC_DBG_DISP_COEX_STATISTICS:
+ halbtc_display_coex_statistics(btcoexist);
+ break;
+ case BTC_DBG_DISP_BT_LINK_INFO:
+ halbtc_display_bt_link_info(btcoexist);
+ break;
+ case BTC_DBG_DISP_BT_FW_VER:
+ halbtc_display_bt_fw_info(btcoexist);
+ break;
+ case BTC_DBG_DISP_FW_PWR_MODE_CMD:
+ halbtc_display_fw_pwr_mode_cmd(btcoexist);
+ break;
+ default:
+ break;
+ }
+}
+
+bool halbtc_under_ips(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+ enum rf_pwrstate rtstate;
+
+ if (ppsc->b_inactiveps) {
+ rtstate = ppsc->rfpwr_state;
+
+ if (rtstate != ERFON &&
+ ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/*****************************************************************
+ * Extern functions called by other module
+ *****************************************************************/
+bool exhalbtc_initlize_variables(struct rtl_priv *adapter)
+{
+ struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+ btcoexist->statistics.cnt_bind++;
+
+ halbtc_dbg_init();
+
+ if (btcoexist->binded)
+ return false;
+ else
+ btcoexist->binded = true;
+
+#if ( defined(CONFIG_PCI_HCI))
+ btcoexist->chip_interface = BTC_INTF_PCI;
+#elif ( defined(CONFIG_USB_HCI))
+ btcoexist->chip_interface = BTC_INTF_USB;
+#elif ( defined(CONFIG_SDIO_HCI))
+ btcoexist->chip_interface = BTC_INTF_SDIO;
+#elif ( defined(CONFIG_GSPI_HCI))
+ btcoexist->chip_interface = BTC_INTF_GSPI;
+#else
+ btcoexist->chip_interface = BTC_INTF_UNKNOWN;
+#endif
+
+ if (NULL == btcoexist->adapter)
+ btcoexist->adapter = adapter;
+
+ btcoexist->stack_info.profile_notified = false;
+
+ btcoexist->btc_read_1byte = halbtc_read_1byte;
+ btcoexist->btc_write_1byte = halbtc_write_1byte;
+ btcoexist->btc_write_1byte_bitmask = halbtc_bitmask_write_1byte;
+ btcoexist->btc_read_2byte = halbtc_read_2byte;
+ btcoexist->btc_write_2byte = halbtc_write_2byte;
+ btcoexist->btc_read_4byte = halbtc_read_4byte;
+ btcoexist->btc_write_4byte = halbtc_write_4byte;
+
+ btcoexist->btc_set_bb_reg = halbtc_set_bbreg;
+ btcoexist->btc_get_bb_reg = halbtc_get_bbreg;
+
+ btcoexist->btc_set_rf_reg = halbtc_set_rfreg;
+ btcoexist->btc_get_rf_reg = halbtc_get_rfreg;
+
+ btcoexist->btc_fill_h2c = halbtc_fill_h2c_cmd;
+ btcoexist->btc_disp_dbg_msg = halbtc_display_dbg_msg;
+
+ btcoexist->btc_get = halbtc_get;
+ btcoexist->btc_set = halbtc_set;
+
+ btcoexist->cli_buf = &btc_dbg_buf[0];
+
+ btcoexist->bt_info.b_bt_ctrl_buf_size = false;
+ btcoexist->bt_info.agg_buf_size = 5;
+
+ btcoexist->bt_info.increase_scan_dev_num = false;
+ return true;
+}
+
+void exhalbtc_init_hw_config(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->statistics.cnt_init_hw_config++;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_init_hwconfig(btcoexist);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_init_hwconfig(btcoexist);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ ex_halbtc8192e2ant_init_hwconfig(btcoexist);
+ }
+
+}
+
+void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->statistics.cnt_init_coex_dm++;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_init_coex_dm(btcoexist);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_init_coex_dm(btcoexist);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ ex_halbtc8192e2ant_init_coex_dm(btcoexist);
+ }
+
+ btcoexist->initilized = true;
+}
+
+void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 ips_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_ips_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (ERFOFF == type)
+ ips_type = BTC_IPS_ENTER;
+ else
+ ips_type = BTC_IPS_LEAVE;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_ips_notify(btcoexist, ips_type);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_ips_notify(btcoexist, ips_type);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ ex_halbtc8192e2ant_ips_notify(btcoexist, ips_type);
+ }
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 lps_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_lps_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (EACTIVE == type)
+ lps_type = BTC_LPS_DISABLE;
+ else
+ lps_type = BTC_LPS_ENABLE;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_lps_notify(btcoexist, lps_type);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_lps_notify(btcoexist, lps_type);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ ex_halbtc8192e2ant_lps_notify(btcoexist, lps_type);
+ }
+}
+
+void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 scan_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_scan_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (type)
+ scan_type = BTC_SCAN_START;
+ else
+ scan_type = BTC_SCAN_FINISH;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_scan_notify(btcoexist, scan_type);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_scan_notify(btcoexist, scan_type);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ ex_halbtc8192e2ant_scan_notify(btcoexist, scan_type);
+ }
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 asso_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_connect_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (action)
+ asso_type = BTC_ASSOCIATE_START;
+ else
+ asso_type = BTC_ASSOCIATE_FINISH;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_connect_notify(btcoexist, asso_type);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_connect_notify(btcoexist, asso_type);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ ex_halbtc8192e2ant_connect_notify(btcoexist, asso_type);
+ }
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
+ enum rt_media_status media_status)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 status;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_media_status_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (RT_MEDIA_CONNECT == media_status)
+ status = BTC_MEDIA_CONNECT;
+ else
+ status = BTC_MEDIA_DISCONNECT;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_media_status_notify(btcoexist, status);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_media_status_notify(btcoexist, status);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ ex_halbtc8192e2ant_media_status_notify(btcoexist, status);
+ }
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type)
+{
+ u8 packet_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_special_packet_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ /*if(PACKET_DHCP == pkt_type)*/
+ packet_type = BTC_PACKET_DHCP;
+ /*else if(PACKET_EAPOL == pkt_type)
+ packet_type = BTC_PACKET_EAPOL;
+ else
+ packet_type = BTC_PACKET_UNKNOWN;*/
+
+ halbtc_leave_low_power();
+
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_special_packet_notify(btcoexist,
+ packet_type);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_special_packet_notify(btcoexist,
+ packet_type);
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmp_buf, u8 length)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_bt_info_notify++;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_bt_info_notify(btcoexist, tmp_buf, length);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_bt_info_notify(btcoexist, tmp_buf, length);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ // ex_halbtc8192e2ant_bt_info_notify(btcoexist, tmp_buf, length);
+ }
+}
+
+void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ u8 stack_op_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_stack_operation_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ stack_op_type = BTC_STACK_OP_NONE;
+}
+
+void exhalbtc_halt_notify(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_halt_notify(btcoexist);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_halt_notify(btcoexist);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ ex_halbtc8192e2ant_halt_notify(btcoexist);
+ }
+}
+
+void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
+{
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+}
+
+void exhalbtc_periodical(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_periodical++;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_periodical(btcoexist);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_periodical(btcoexist);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ ex_halbtc8192e2ant_periodical(btcoexist);
+ }
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_dbg_control(struct btc_coexist *btcoexist,
+ u8 code, u8 len, u8 *data)
+{
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_dbg_ctrl++;
+}
+
+void exhalbtc_stack_update_profile_info()
+{
+}
+
+void exhalbtc_update_min_bt_rssi(char bt_rssi)
+{
+ struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->stack_info.min_bt_rssi = bt_rssi;
+}
+
+
+void exhalbtc_set_hci_version(u16 hci_version)
+{
+ struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->stack_info.hci_version = hci_version;
+}
+
+void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version)
+{
+ struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->bt_info.bt_real_fw_ver = bt_patch_version;
+ btcoexist->bt_info.bt_hci_ver = bt_hci_version;
+}
+
+void exhalbtc_set_bt_exist(bool bt_exist)
+{
+ gl_bt_coexist.board_info.bt_exist = bt_exist;
+}
+
+void exhalbtc_set_chip_type(u8 chip_type)
+{
+ switch (chip_type) {
+ default:
+ case BT_2WIRE:
+ case BT_ISSC_3WIRE:
+ case BT_ACCEL:
+ case BT_RTL8756:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_UNDEF;
+ break;
+ case BT_CSR_BC4:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC4;
+ break;
+ case BT_CSR_BC8:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC8;
+ break;
+ case BT_RTL8723A:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723A;
+ break;
+ case BT_RTL8821A:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8821;
+ break;
+ case BT_RTL8723B:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723B;
+ break;
+ }
+}
+
+void exhalbtc_set_ant_num(u8 type, u8 ant_num)
+{
+ if (BT_COEX_ANT_TYPE_PG == type) {
+ gl_bt_coexist.board_info.pg_ant_num = ant_num;
+ gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+ } else if (BT_COEX_ANT_TYPE_ANTDIV == type) {
+ gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+ }
+}
+
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist)
+{
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_display_coex_info(btcoexist);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_display_coex_info(btcoexist);
+}
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.h b/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.h
new file mode 100644
index 000000000000..787798e76217
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.h
@@ -0,0 +1,549 @@
+#ifndef __HALBTC_OUT_SRC_H__
+#define __HALBTC_OUT_SRC_H__
+
+#include "../wifi.h"
+
+#define NORMAL_EXEC false
+#define FORCE_EXEC true
+
+#define BTC_RF_A RF90_PATH_A
+#define BTC_RF_B RF90_PATH_B
+#define BTC_RF_C RF90_PATH_C
+#define BTC_RF_D RF90_PATH_D
+
+#define BTC_SMSP SINGLEMAC_SINGLEPHY
+#define BTC_DMDP DUALMAC_DUALPHY
+#define BTC_DMSP DUALMAC_SINGLEPHY
+#define BTC_MP_UNKNOWN 0xff
+
+#define IN
+#define OUT
+
+#define BT_TMP_BUF_SIZE 100
+
+#define BT_COEX_ANT_TYPE_PG 0
+#define BT_COEX_ANT_TYPE_ANTDIV 1
+#define BT_COEX_ANT_TYPE_DETECTED 2
+
+#define BTC_MIMO_PS_STATIC 0
+#define BTC_MIMO_PS_DYNAMIC 1
+
+#define BTC_RATE_DISABLE 0
+#define BTC_RATE_ENABLE 1
+
+/* single Antenna definition */
+#define BTC_ANT_PATH_WIFI 0
+#define BTC_ANT_PATH_BT 1
+#define BTC_ANT_PATH_PTA 2
+/* dual Antenna definition */
+#define BTC_ANT_WIFI_AT_MAIN 0
+#define BTC_ANT_WIFI_AT_AUX 1
+/* coupler Antenna definition */
+#define BTC_ANT_WIFI_AT_CPL_MAIN 0
+#define BTC_ANT_WIFI_AT_CPL_AUX 1
+
+enum btc_chip_interface{
+ BTC_INTF_UNKNOWN = 0,
+ BTC_INTF_PCI = 1,
+ BTC_INTF_USB = 2,
+ BTC_INTF_SDIO = 3,
+ BTC_INTF_GSPI = 4,
+ BTC_INTF_MAX
+};
+
+enum btc_chip_type{
+ BTC_CHIP_UNDEF = 0,
+ BTC_CHIP_CSR_BC4 = 1,
+ BTC_CHIP_CSR_BC8 = 2,
+ BTC_CHIP_RTL8723A = 3,
+ BTC_CHIP_RTL8821 = 4,
+ BTC_CHIP_RTL8723B = 5,
+ BTC_CHIP_MAX
+};
+
+enum btc_msg_type{
+ BTC_MSG_INTERFACE = 0x0,
+ BTC_MSG_ALGORITHM = 0x1,
+ BTC_MSG_MAX
+};
+
+extern u32 btc_dbg_type[];
+
+/* following is for BTC_MSG_INTERFACE */
+#define INTF_INIT BIT0
+#define INTF_NOTIFY BIT2
+
+/* following is for BTC_ALGORITHM */
+#define ALGO_BT_RSSI_STATE BIT0
+#define ALGO_WIFI_RSSI_STATE BIT1
+#define ALGO_BT_MONITOR BIT2
+#define ALGO_TRACE BIT3
+#define ALGO_TRACE_FW BIT4
+#define ALGO_TRACE_FW_DETAIL BIT5
+#define ALGO_TRACE_FW_EXEC BIT6
+#define ALGO_TRACE_SW BIT7
+#define ALGO_TRACE_SW_DETAIL BIT8
+#define ALGO_TRACE_SW_EXEC BIT9
+
+
+
+#define CL_SPRINTF snprintf
+#define CL_PRINTF printk
+
+#define BTC_PRINT(dbgtype, dbgflag, printstr, ...) \
+ do { \
+ if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\
+ printk(printstr, ##__VA_ARGS__); \
+ } \
+ } while(0)
+
+#define BTC_PRINT_F(dbgtype, dbgflag, printstr, ...) \
+ do { \
+ if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\
+ printk(KERN_DEBUG "%s: ", __func__); \
+ printk(printstr, ##__VA_ARGS__); \
+ } \
+ } while(0)
+
+#define BTC_PRINT_ADDR(dbgtype, dbgflag, printstr, _ptr) \
+ do { \
+ if(unlikely(btc_dbg_type[dbgtype] & dbgflag)) { \
+ int __i; \
+ u8* __ptr = (u8*)_Ptr; \
+ printk printstr; \
+ for( __i = 0; __i < 6; __i++ ) \
+ printk("%02X%s", __ptr[__i], (__i==5)?"":"-");\
+ printk(KERN_DEBUG "\n"); \
+ }\
+ } while(0)
+
+#define BTC_PRINT_DATA(dbgtype, dbgflag, _titlestring, _hexdata, _hexdatalen) \
+ do { \
+ if(unlikely(btc_dbg_type[dbgtype] & dbgflag) ) { \
+ int __i; \
+ u8 *__ptr = (u8*)_hexdata; \
+ printk(_titlestring); \
+ for( __i = 0; __i < (int)_hexdatalen; __i++ ) { \
+ printk("%02X%s", __ptr[__i], (((__i + 1) % 4) \
+ == 0)?" ":" ");\
+ if (((__i + 1) % 16) == 0) \
+ printk("\n"); \
+ } \
+ printk(KERN_DEBUG "\n"); \
+ } \
+ } while(0)
+
+
+#define BTC_RSSI_HIGH(_rssi_) \
+ ((_rssi_==BTC_RSSI_STATE_HIGH || _rssi_==BTC_RSSI_STATE_STAY_HIGH) ? \
+ true : false)
+
+#define BTC_RSSI_MEDIUM(_rssi_) \
+ ((_rssi_==BTC_RSSI_STATE_MEDIUM || _rssi_==BTC_RSSI_STATE_STAY_MEDIUM) \
+ ? true : false)
+
+#define BTC_RSSI_LOW(_rssi_) \
+ ((_rssi_==BTC_RSSI_STATE_LOW || _rssi_==BTC_RSSI_STATE_STAY_LOW) ? \
+ true : false)
+
+
+enum btc_power_save_type {
+ BTC_PS_WIFI_NATIVE = 0,
+ BTC_PS_LPS_ON = 1,
+ BTC_PS_LPS_OFF = 2,
+ BTC_PS_LPS_MAX
+};
+
+struct btc_board_info {
+ /* The following is some board information */
+ u8 bt_chip_type;
+ u8 pg_ant_num; /* pg ant number */
+ u8 btdm_ant_num; /* ant number for btdm */
+ u8 btdm_ant_pos;
+ bool bt_exist;
+};
+
+enum btc_dbg_opcode{
+ BTC_DBG_SET_COEX_NORMAL = 0x0,
+ BTC_DBG_SET_COEX_WIFI_ONLY = 0x1,
+ BTC_DBG_SET_COEX_BT_ONLY = 0x2,
+ BTC_DBG_MAX
+};
+
+enum btc_rssi_state{
+ BTC_RSSI_STATE_HIGH = 0x0,
+ BTC_RSSI_STATE_MEDIUM = 0x1,
+ BTC_RSSI_STATE_LOW = 0x2,
+ BTC_RSSI_STATE_STAY_HIGH = 0x3,
+ BTC_RSSI_STATE_STAY_MEDIUM = 0x4,
+ BTC_RSSI_STATE_STAY_LOW = 0x5,
+ BTC_RSSI_MAX
+};
+
+enum btc_wifi_role{
+ BTC_ROLE_STATION = 0x0,
+ BTC_ROLE_AP = 0x1,
+ BTC_ROLE_IBSS = 0x2,
+ BTC_ROLE_HS_MODE = 0x3,
+ BTC_ROLE_MAX
+};
+
+enum btc_wifi_bw_mode{
+ BTC_WIFI_BW_LEGACY = 0x0,
+ BTC_WIFI_BW_HT20 = 0x1,
+ BTC_WIFI_BW_HT40 = 0x2,
+ BTC_WIFI_BW_MAX
+};
+
+enum btc_wifi_traffic_dir{
+ BTC_WIFI_TRAFFIC_TX = 0x0,
+ BTC_WIFI_TRAFFIC_RX = 0x1,
+ BTC_WIFI_TRAFFIC_MAX
+};
+
+enum btc_wifi_pnp{
+ BTC_WIFI_PNP_WAKE_UP = 0x0,
+ BTC_WIFI_PNP_SLEEP = 0x1,
+ BTC_WIFI_PNP_MAX
+};
+
+
+enum btc_get_type{
+ /* type bool */
+ BTC_GET_BL_HS_OPERATION,
+ BTC_GET_BL_HS_CONNECTING,
+ BTC_GET_BL_WIFI_CONNECTED,
+ BTC_GET_BL_WIFI_BUSY,
+ BTC_GET_BL_WIFI_SCAN,
+ BTC_GET_BL_WIFI_LINK,
+ BTC_GET_BL_WIFI_DHCP,
+ BTC_GET_BL_WIFI_SOFTAP_IDLE,
+ BTC_GET_BL_WIFI_SOFTAP_LINKING,
+ BTC_GET_BL_WIFI_IN_EARLY_SUSPEND,
+ BTC_GET_BL_WIFI_ROAM,
+ BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ BTC_GET_BL_WIFI_UNDER_5G,
+ BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ BTC_GET_BL_WIFI_ENABLE_ENCRYPTION,
+ BTC_GET_BL_WIFI_UNDER_B_MODE,
+ BTC_GET_BL_EXT_SWITCH,
+
+ /* type s4Byte */
+ BTC_GET_S4_WIFI_RSSI,
+ BTC_GET_S4_HS_RSSI,
+
+ /* type u32 */
+ BTC_GET_U4_WIFI_BW,
+ BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+ BTC_GET_U4_WIFI_FW_VER,
+ BTC_GET_U4_BT_PATCH_VER,
+
+ /* type u1Byte */
+ BTC_GET_U1_WIFI_DOT11_CHNL,
+ BTC_GET_U1_WIFI_CENTRAL_CHNL,
+ BTC_GET_U1_WIFI_HS_CHNL,
+ BTC_GET_U1_MAC_PHY_MODE,
+ BTC_GET_U1_AP_NUM,
+
+ /* for 1Ant */
+ BTC_GET_U1_LPS_MODE,
+ BTC_GET_BL_BT_SCO_BUSY,
+
+ /* for test mode */
+ BTC_GET_DRIVER_TEST_CFG,
+#if 0
+ BTC_GET_U1_LPS,
+ BTC_GET_U1_RPWM,
+#endif
+ BTC_GET_MAX
+};
+
+
+enum btc_set_type{
+ /* type bool */
+ BTC_SET_BL_BT_DISABLE,
+ BTC_SET_BL_BT_TRAFFIC_BUSY,
+ BTC_SET_BL_BT_LIMITED_DIG,
+ BTC_SET_BL_FORCE_TO_ROAM,
+ BTC_SET_BL_TO_REJ_AP_AGG_PKT,
+ BTC_SET_BL_BT_CTRL_AGG_SIZE,
+ BTC_SET_BL_INC_SCAN_DEV_NUM,
+
+ /* type u1Byte */
+ BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
+ BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE,
+ BTC_SET_UI_SCAN_SIG_COMPENSATION,
+ BTC_SET_U1_AGG_BUF_SIZE,
+
+ /* type trigger some action */
+ BTC_SET_ACT_GET_BT_RSSI,
+ BTC_SET_ACT_AGGREGATE_CTRL,
+
+ /********* for 1Ant **********/
+ /* type bool */
+ BTC_SET_BL_BT_SCO_BUSY,
+ /* type u1Byte */
+ BTC_SET_U1_1ANT_LPS,
+ BTC_SET_U1_1ANT_RPWM,
+ /* type trigger some action */
+ BTC_SET_ACT_LEAVE_LPS,
+ BTC_SET_ACT_ENTER_LPS,
+ BTC_SET_ACT_NORMAL_LPS,
+ BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ BTC_SET_ACT_UPDATE_ra_mask,
+ BTC_SET_ACT_SEND_MIMO_PS,
+ /* BT Coex related */
+ BTC_SET_ACT_CTRL_BT_INFO,
+ BTC_SET_ACT_CTRL_BT_COEX,
+ /***************************/
+ BTC_SET_MAX
+};
+
+enum btc_dbg_disp_type{
+ BTC_DBG_DISP_COEX_STATISTICS = 0x0,
+ BTC_DBG_DISP_BT_LINK_INFO = 0x1,
+ BTC_DBG_DISP_BT_FW_VER = 0x2,
+ BTC_DBG_DISP_FW_PWR_MODE_CMD = 0x3,
+ BTC_DBG_DISP_MAX
+};
+
+enum btc_notify_type_ips{
+ BTC_IPS_LEAVE = 0x0,
+ BTC_IPS_ENTER = 0x1,
+ BTC_IPS_MAX
+};
+
+enum btc_notify_type_lps{
+ BTC_LPS_DISABLE = 0x0,
+ BTC_LPS_ENABLE = 0x1,
+ BTC_LPS_MAX
+};
+
+enum btc_notify_type_scan{
+ BTC_SCAN_FINISH = 0x0,
+ BTC_SCAN_START = 0x1,
+ BTC_SCAN_MAX
+};
+
+enum btc_notify_type_associate{
+ BTC_ASSOCIATE_FINISH = 0x0,
+ BTC_ASSOCIATE_START = 0x1,
+ BTC_ASSOCIATE_MAX
+};
+
+enum btc_notify_type_media_status{
+ BTC_MEDIA_DISCONNECT = 0x0,
+ BTC_MEDIA_CONNECT = 0x1,
+ BTC_MEDIA_MAX
+};
+
+enum btc_notify_type_special_packet{
+ BTC_PACKET_UNKNOWN = 0x0,
+ BTC_PACKET_DHCP = 0x1,
+ BTC_PACKET_ARP = 0x2,
+ BTC_PACKET_EAPOL = 0x3,
+ BTC_PACKET_MAX
+};
+
+enum btc_notify_type_stack_operation{
+ BTC_STACK_OP_NONE = 0x0,
+ BTC_STACK_OP_INQ_PAGE_PAIR_START = 0x1,
+ BTC_STACK_OP_INQ_PAGE_PAIR_FINISH = 0x2,
+ BTC_STACK_OP_MAX
+};
+
+
+typedef u8 (*bfp_btc_r1)(void *btc_context, u32 reg_addr);
+
+typedef u16 (*bfp_btc_r2)(void *btc_context, u32 reg_addr);
+
+typedef u32 (*bfp_btc_r4)(void *btc_context, u32 reg_addr);
+
+typedef void (*bfp_btc_w1)(void *btc_context, u32 reg_addr, u8 data);
+
+typedef void (*bfp_btc_w1_bit_mak)(void *btc_context, u32 reg_addr,
+ u8 bit_mask, u8 data1b);
+
+typedef void (*bfp_btc_w2)(void *btc_context, u32 reg_addr, u16 data);
+
+typedef void (*bfp_btc_w4)(void *btc_context, u32 reg_addr, u32 data);
+
+typedef void (*bfp_btc_wr_1byte_bit_mask)(void *btc_context, u32 reg_addr,
+ u8 bit_mask, u8 data);
+
+typedef void (*bfp_btc_set_bb_reg)(void *btc_context, u32 reg_addr,
+ u32 bit_mask, u32 data);
+
+typedef u32 (*bfp_btc_get_bb_reg)(void *btc_context, u32 reg_addr,
+ u32 bit_mask);
+
+typedef void (*bfp_btc_set_rf_reg)(void *btc_context, u8 rf_path, u32 reg_addr,
+ u32 bit_mask, u32 data);
+
+typedef u32 (*bfp_btc_get_rf_reg)(void *btc_context, u8 rf_path,
+ u32 reg_addr, u32 bit_mask);
+
+typedef void (*bfp_btc_fill_h2c)(void *btc_context, u8 element_id,
+ u32 cmd_len, u8 *cmd_buffer);
+
+typedef bool (*bfp_btc_get)(void *btcoexist, u8 get_type, void *out_buf);
+
+typedef bool (*bfp_btc_set)(void *btcoexist, u8 set_type, void *in_buf);
+
+typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type);
+
+struct btc_bt_info {
+ bool bt_disabled;
+ u8 rssi_adjust_for_agc_table_on;
+ u8 rssi_adjust_for_1ant_coex_type;
+ bool bt_busy;
+ u8 agg_buf_size;
+ bool limited_dig;
+ bool reject_agg_pkt;
+ bool b_bt_ctrl_buf_size;
+ bool increase_scan_dev_num;
+ u16 bt_hci_ver;
+ u16 bt_real_fw_ver;
+ u8 bt_fw_ver;
+
+ /* the following is for 1Ant solution */
+ bool bt_ctrl_lps;
+ bool bt_pwr_save_mode;
+ bool bt_lps_on;
+ bool force_to_roam;
+ u8 force_exec_pwr_cmd_cnt;
+ u8 lps_1ant;
+ u8 rpwm_1ant;
+ u32 ra_mask;
+};
+
+struct btc_stack_info {
+ bool profile_notified;
+ u16 hci_version; /* stack hci version */
+ u8 num_of_link;
+ bool bt_link_exist;
+ bool sco_exist;
+ bool acl_exist;
+ bool a2dp_exist;
+ bool hid_exist;
+ u8 num_of_hid;
+ bool pan_exist;
+ bool unknown_acl_exist;
+ char min_bt_rssi;
+};
+
+struct btc_statistics {
+ u32 cnt_bind;
+ u32 cnt_init_hw_config;
+ u32 cnt_init_coex_dm;
+ u32 cnt_ips_notify;
+ u32 cnt_lps_notify;
+ u32 cnt_scan_notify;
+ u32 cnt_connect_notify;
+ u32 cnt_media_status_notify;
+ u32 cnt_special_packet_notify;
+ u32 cnt_bt_info_notify;
+ u32 cnt_periodical;
+ u32 cnt_stack_operation_notify;
+ u32 cnt_dbg_ctrl;
+};
+
+struct btc_bt_link_info {
+ bool bt_link_exist;
+ bool sco_exist;
+ bool sco_only;
+ bool a2dp_exist;
+ bool a2dp_only;
+ bool hid_exist;
+ bool hid_only;
+ bool pan_exist;
+ bool pan_only;
+};
+
+enum btc_antenna_pos {
+ BTC_ANTENNA_AT_MAIN_PORT = 0x1,
+ BTC_ANTENNA_AT_AUX_PORT = 0x2,
+};
+
+struct btc_coexist {
+ /* make sure only one adapter can bind the data context */
+ bool binded;
+ /* default adapter */
+ void *adapter;
+ struct btc_board_info board_info;
+ /* some bt info referenced by non-bt module */
+ struct btc_bt_info bt_info;
+ struct btc_stack_info stack_info;
+ enum btc_chip_interface chip_interface;
+ struct btc_bt_link_info bt_link_info;
+
+ bool initilized;
+ bool stop_coex_dm;
+ bool manual_control;
+ u8 *cli_buf;
+ struct btc_statistics statistics;
+ u8 pwr_mode_val[10];
+
+ /* function pointers
+ * io related */
+ bfp_btc_r1 btc_read_1byte;
+ bfp_btc_w1 btc_write_1byte;
+ bfp_btc_w1_bit_mak btc_write_1byte_bitmask;
+ bfp_btc_r2 btc_read_2byte;
+ bfp_btc_w2 btc_write_2byte;
+ bfp_btc_r4 btc_read_4byte;
+ bfp_btc_w4 btc_write_4byte;
+
+ bfp_btc_set_bb_reg btc_set_bb_reg;
+ bfp_btc_get_bb_reg btc_get_bb_reg;
+
+
+ bfp_btc_set_rf_reg btc_set_rf_reg;
+ bfp_btc_get_rf_reg btc_get_rf_reg;
+
+
+ bfp_btc_fill_h2c btc_fill_h2c;
+
+ bfp_btc_disp_dbg_msg btc_disp_dbg_msg;
+
+ bfp_btc_get btc_get;
+ bfp_btc_set btc_set;
+};
+
+bool halbtc_is_wifi_uplink(struct rtl_priv *adapter);
+
+
+extern struct btc_coexist gl_bt_coexist;
+
+bool exhalbtc_initlize_variables(struct rtl_priv* adapter);
+void exhalbtc_init_hw_config(struct btc_coexist *btcoexist);
+void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist);
+void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action);
+void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
+ enum rt_media_status media_status);
+void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type);
+void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf,
+ u8 length);
+void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_halt_notify(struct btc_coexist *btcoexist);
+void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
+void exhalbtc_periodical(struct btc_coexist *btcoexist);
+void exhalbtc_dbg_control(struct btc_coexist *btcoexist, u8 code, u8 len,
+ u8 *data);
+void exhalbtc_stack_update_profile_info(void);
+void exhalbtc_set_hci_version(u16 hci_version);
+void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version);
+void exhalbtc_update_min_bt_rssi(char bt_rssi);
+void exhalbtc_set_bt_exist(bool bt_exist);
+void exhalbtc_set_chip_type(u8 chip_type);
+void exhalbtc_set_ant_num(u8 type, u8 ant_num);
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist);
+void exhalbtc_signal_compensation(struct btc_coexist *btcoexist,
+ u8 *rssi_wifi, u8 *rssi_bt);
+void exhalbtc_lps_leave(struct btc_coexist *btcoexist);
+void exhalbtc_low_wifi_traffic_notify(struct btc_coexist *btcoexist);
+#endif
diff --git a/drivers/staging/rtl8821ae/btcoexist/rtl_btc.c b/drivers/staging/rtl8821ae/btcoexist/rtl_btc.c
new file mode 100644
index 000000000000..6653f147757c
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/rtl_btc.c
@@ -0,0 +1,236 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+
+#include "rtl_btc.h"
+#include "halbt_precomp.h"
+
+struct rtl_btc_ops rtl_btc_operation ={
+ .btc_init_variables = rtl_btc_init_variables,
+ .btc_init_hal_vars = rtl_btc_init_hal_vars,
+ .btc_init_hw_config = rtl_btc_init_hw_config,
+ .btc_ips_notify = rtl_btc_ips_notify,
+ .btc_scan_notify = rtl_btc_scan_notify,
+ .btc_connect_notify = rtl_btc_connect_notify,
+ .btc_mediastatus_notify = rtl_btc_mediastatus_notify,
+ .btc_periodical = rtl_btc_periodical,
+ .btc_halt_notify = rtl_btc_halt_notify,
+ .btc_btinfo_notify = rtl_btc_btinfo_notify,
+ .btc_is_limited_dig = rtl_btc_is_limited_dig,
+ .btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo,
+ .btc_is_bt_disabled = rtl_btc_is_bt_disabled,
+};
+
+
+void rtl_btc_init_variables(struct rtl_priv *rtlpriv)
+{
+
+ exhalbtc_initlize_variables(rtlpriv);
+}
+
+void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
+{
+ u8 ant_num;
+ u8 bt_exist;
+ u8 bt_type;
+ ant_num = rtl_get_hwpg_ant_num(rtlpriv);
+ RT_TRACE(COMP_INIT, DBG_DMESG, ("%s, antNum is %d\n", __func__, ant_num));
+
+ bt_exist = rtl_get_hwpg_bt_exist(rtlpriv);
+ RT_TRACE(COMP_INIT, DBG_DMESG, ("%s, bt_exist is %d\n", __func__, bt_exist));
+ exhalbtc_set_bt_exist(bt_exist);
+
+ bt_type = rtl_get_hwpg_bt_type(rtlpriv);
+ RT_TRACE(COMP_INIT, DBG_DMESG, ("%s, bt_type is %d\n", __func__, bt_type));
+ exhalbtc_set_chip_type(bt_type);
+
+ exhalbtc_set_ant_num(BT_COEX_ANT_TYPE_PG, ant_num);
+
+}
+
+
+void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
+{
+ exhalbtc_init_hw_config(&gl_bt_coexist);
+ exhalbtc_init_coex_dm(&gl_bt_coexist);
+}
+
+
+void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type)
+{
+ exhalbtc_ips_notify(&gl_bt_coexist, type);
+}
+
+
+void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype)
+{
+ exhalbtc_scan_notify(&gl_bt_coexist, scantype);
+}
+
+
+void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action)
+{
+ exhalbtc_connect_notify(&gl_bt_coexist, action);
+}
+
+
+void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv, enum rt_media_status mstatus)
+{
+ exhalbtc_mediastatus_notify(&gl_bt_coexist, mstatus);
+}
+
+void rtl_btc_periodical(struct rtl_priv *rtlpriv)
+{
+// rtl_bt_dm_monitor();
+ exhalbtc_periodical(&gl_bt_coexist);
+}
+
+void rtl_btc_halt_notify(void)
+{
+ exhalbtc_halt_notify(&gl_bt_coexist);
+}
+
+void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 * tmp_buf, u8 length)
+{
+ exhalbtc_bt_info_notify(&gl_bt_coexist, tmp_buf, length);
+}
+
+bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv)
+{
+ return gl_bt_coexist.bt_info.limited_dig;
+}
+
+bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv)
+{
+ bool bt_change_edca = false;
+ u32 cur_edca_val;
+ u32 edca_bt_hs_uplink = 0x5ea42b, edca_bt_hs_downlink = 0x5ea42b;
+ u32 edca_hs;
+ u32 edca_addr = 0x504;
+
+ cur_edca_val = rtl_read_dword(rtlpriv, edca_addr);
+ if (halbtc_is_wifi_uplink(rtlpriv)){
+ if (cur_edca_val != edca_bt_hs_uplink){
+ edca_hs = edca_bt_hs_uplink;
+ bt_change_edca = true;
+ }
+ }else{
+ if (cur_edca_val != edca_bt_hs_downlink){
+ edca_hs = edca_bt_hs_downlink;
+ bt_change_edca = true;
+ }
+ }
+
+ if(bt_change_edca)
+ rtl_write_dword(rtlpriv, edca_addr, edca_hs);
+
+ return true;
+}
+
+bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv)
+{
+ if (gl_bt_coexist.bt_info.bt_disabled)
+ return true;
+ else
+ return false;
+}
+
+struct rtl_btc_ops *rtl_btc_get_ops_pointer(void)
+{
+ return &rtl_btc_operation;
+}
+//EXPORT_SYMBOL(rtl_btc_get_ops_pointer);
+
+u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
+{
+ u8 num;
+
+ if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
+ num = 2;
+ else
+ num = 1;
+
+ return num;
+}
+
+#if 0
+enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ enum rt_media_status m_status = RT_MEDIA_DISCONNECT;
+
+ u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
+
+ if(bibss || rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+ m_status = RT_MEDIA_CONNECT;
+ }
+
+ return m_status;
+}
+#endif
+
+u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv)
+{
+ return rtlpriv->btcoexist.btc_info.btcoexist;
+}
+
+u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
+{
+ return rtlpriv->btcoexist.btc_info.bt_type;
+}
+
+
+#if 0
+
+MODULE_AUTHOR("Page He <page_he@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
+
+static int __init rtl_btcoexist_module_init(void)
+{
+
+ //printk("%s, rtlpriv->btc_ops.btc_init_variables addr is %p\n", __func__, rtlpriv->btc_ops.btc_init_variables);
+
+ return 0;
+}
+
+static void __exit rtl_btcoexist_module_exit(void)
+{
+ return;
+}
+
+module_init(rtl_btcoexist_module_init);
+module_exit(rtl_btcoexist_module_exit);
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/rtl_btc.h b/drivers/staging/rtl8821ae/btcoexist/rtl_btc.h
new file mode 100644
index 000000000000..452fbf1e6d1e
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/rtl_btc.h
@@ -0,0 +1,66 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_BTC_H__
+#define __RTL_BTC_H__
+
+#include "halbt_precomp.h"
+
+
+
+void rtl_btc_init_variables(struct rtl_priv *rtlpriv);
+void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv);
+void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv);
+void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type);
+void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype);
+void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action);
+void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv, enum rt_media_status mstatus);
+void rtl_btc_periodical(struct rtl_priv *rtlpriv);
+void rtl_btc_halt_notify(void);
+void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 * tmpBuf, u8 length);
+bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv);
+bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv);
+bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv);
+
+
+//extern struct rtl_btc_ops rtl_btc_operation;
+extern struct rtl_btc_ops *rtl_btc_get_ops_pointer(void);
+
+u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv);
+u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv);
+u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv);
+//enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw);
+
+
+
+
+
+
+
+
+#endif
diff --git a/drivers/staging/rtl8821ae/cam.c b/drivers/staging/rtl8821ae/cam.c
new file mode 100644
index 000000000000..72743e78954b
--- /dev/null
+++ b/drivers/staging/rtl8821ae/cam.c
@@ -0,0 +1,354 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#include "wifi.h"
+#include "cam.h"
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+#include <linux/export.h>
+#endif
+
+void rtl_cam_reset_sec_info(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->sec.use_defaultkey = false;
+ rtlpriv->sec.pairwise_enc_algorithm = NO_ENCRYPTION;
+ rtlpriv->sec.group_enc_algorithm = NO_ENCRYPTION;
+ memset(rtlpriv->sec.key_buf, 0, KEY_BUF_SIZE * MAX_KEY_LEN);
+ memset(rtlpriv->sec.key_len, 0, KEY_BUF_SIZE);
+ rtlpriv->sec.pairwise_key = NULL;
+}
+
+static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
+ u8 *mac_addr, u8 *key_cont_128, u16 us_config)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ u32 target_command;
+ u32 target_content = 0;
+ u8 entry_i;
+
+ RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_DMESG, "Key content :",
+ key_cont_128, 16);
+
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+ target_command = entry_i + CAM_CONTENT_COUNT * entry_no;
+ target_command = target_command | BIT(31) | BIT(16);
+
+ if (entry_i == 0) {
+ target_content = (u32) (*(mac_addr + 0)) << 16 |
+ (u32) (*(mac_addr + 1)) << 24 | (u32) us_config;
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI],
+ target_content);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+ target_command);
+
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("WRITE %x: %x \n",
+ rtlpriv->cfg->maps[WCAMI], target_content));
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("The Key ID is %d\n", entry_no));
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("WRITE %x: %x \n",
+ rtlpriv->cfg->maps[RWCAM], target_command));
+
+ } else if (entry_i == 1) {
+
+ target_content = (u32) (*(mac_addr + 5)) << 24 |
+ (u32) (*(mac_addr + 4)) << 16 |
+ (u32) (*(mac_addr + 3)) << 8 |
+ (u32) (*(mac_addr + 2));
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI],
+ target_content);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+ target_command);
+
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("WRITE A4: %x \n", target_content));
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("WRITE A0: %x \n", target_command));
+
+ } else {
+
+ target_content =
+ (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 3)) <<
+ 24 | (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 2))
+ << 16 |
+ (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 1)) << 8
+ | (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 0));
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI],
+ target_content);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+ target_command);
+ udelay(100);
+
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("WRITE A4: %x \n", target_content));
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("WRITE A0: %x \n", target_command));
+ }
+ }
+
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("after set key, usconfig:%x\n", us_config));
+}
+
+u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+ u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
+ u32 ul_default_key, u8 *key_content)
+{
+ u32 us_config;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, "
+ "ulUseDK=%x MacAddr %pM\n",
+ ul_entry_idx, ul_key_id, ul_enc_alg,
+ ul_default_key, mac_addr));
+
+ if (ul_key_id == TOTAL_CAM_ENTRY) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("ulKeyId exceed!\n"));
+ return 0;
+ }
+
+ if (ul_default_key == 1) {
+ us_config = CFG_VALID | ((u16) (ul_enc_alg) << 2);
+ } else {
+ us_config = CFG_VALID | ((ul_enc_alg) << 2) | ul_key_id;
+ }
+
+ rtl_cam_program_entry(hw, ul_entry_idx, mac_addr,
+ (u8 *) key_content, us_config);
+
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("end \n"));
+
+ return 1;
+
+}
+//EXPORT_SYMBOL(rtl_cam_add_one_entry);
+
+int rtl_cam_delete_one_entry(struct ieee80211_hw *hw,
+ u8 *mac_addr, u32 ul_key_id)
+{
+ u32 ul_command;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("key_idx:%d\n", ul_key_id));
+
+ ul_command = ul_key_id * CAM_CONTENT_COUNT;
+ ul_command = ul_command | BIT(31) | BIT(16);
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], 0);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
+
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("rtl_cam_delete_one_entry(): WRITE A4: %x \n", 0));
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("rtl_cam_delete_one_entry(): WRITE A0: %x \n", ul_command));
+
+ return 0;
+
+}
+//EXPORT_SYMBOL(rtl_cam_delete_one_entry);
+
+void rtl_cam_reset_all_entry(struct ieee80211_hw *hw)
+{
+ u32 ul_command;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ ul_command = BIT(31) | BIT(30);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
+}
+//EXPORT_SYMBOL(rtl_cam_reset_all_entry);
+
+void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ u32 ul_command;
+ u32 ul_content;
+ u32 ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
+
+ switch (rtlpriv->sec.pairwise_enc_algorithm) {
+ case WEP40_ENCRYPTION:
+ ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_WEP40];
+ break;
+ case WEP104_ENCRYPTION:
+ ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_WEP104];
+ break;
+ case TKIP_ENCRYPTION:
+ ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_TKIP];
+ break;
+ case AESCCMP_ENCRYPTION:
+ ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ break;
+ default:
+ ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ }
+
+ ul_content = (uc_index & 3) | ((u16) (ul_enc_algo) << 2);
+
+ ul_content |= BIT(15);
+ ul_command = CAM_CONTENT_COUNT * uc_index;
+ ul_command = ul_command | BIT(31) | BIT(16);
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
+
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("rtl_cam_mark_invalid(): WRITE A4: %x \n", ul_content));
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("rtl_cam_mark_invalid(): WRITE A0: %x \n", ul_command));
+}
+//EXPORT_SYMBOL(rtl_cam_mark_invalid);
+
+void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ u32 ul_command;
+ u32 ul_content;
+ u32 ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ u8 entry_i;
+
+ switch (rtlpriv->sec.pairwise_enc_algorithm) {
+ case WEP40_ENCRYPTION:
+ ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_WEP40];
+ break;
+ case WEP104_ENCRYPTION:
+ ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_WEP104];
+ break;
+ case TKIP_ENCRYPTION:
+ ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_TKIP];
+ break;
+ case AESCCMP_ENCRYPTION:
+ ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ break;
+ default:
+ ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ }
+
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+
+ if (entry_i == 0) {
+ ul_content =
+ (uc_index & 0x03) | ((u16) (ul_encalgo) << 2);
+ ul_content |= BIT(15);
+
+ } else {
+ ul_content = 0;
+ }
+
+ ul_command = CAM_CONTENT_COUNT * uc_index + entry_i;
+ ul_command = ul_command | BIT(31) | BIT(16);
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
+
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("rtl_cam_empty_entry(): WRITE A4: %x \n",
+ ul_content));
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("rtl_cam_empty_entry(): WRITE A0: %x \n",
+ ul_command));
+ }
+
+}
+//EXPORT_SYMBOL(rtl_cam_empty_entry);
+
+u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 bitmap = (rtlpriv->sec.hwsec_cam_bitmap) >> 4;
+ u8 entry_idx = 0;
+ u8 i, *addr;
+
+ if (NULL == sta_addr) {
+ RT_TRACE(COMP_SEC, DBG_EMERG,
+ ("sta_addr is NULL.\n"));
+ return TOTAL_CAM_ENTRY;
+ }
+ /* Does STA already exist? */
+ for (i = 4; i < TOTAL_CAM_ENTRY; i++) {
+ addr = rtlpriv->sec.hwsec_cam_sta_addr[i];
+ if(memcmp(addr, sta_addr, ETH_ALEN) == 0)
+ return i;
+ }
+ /* Get a free CAM entry. */
+ for (entry_idx = 4; entry_idx < TOTAL_CAM_ENTRY; entry_idx++) {
+ if ((bitmap & BIT(0)) == 0) {
+ RT_TRACE(COMP_SEC, DBG_EMERG,
+ ("-----hwsec_cam_bitmap: 0x%x entry_idx=%d\n",
+ rtlpriv->sec.hwsec_cam_bitmap, entry_idx));
+ rtlpriv->sec.hwsec_cam_bitmap |= BIT(0) << entry_idx;
+ memcpy(rtlpriv->sec.hwsec_cam_sta_addr[entry_idx],
+ sta_addr, ETH_ALEN);
+ return entry_idx;
+ }
+ bitmap = bitmap >>1;
+ }
+ return TOTAL_CAM_ENTRY;
+}
+//EXPORT_SYMBOL(rtl_cam_get_free_entry);
+
+void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 bitmap;
+ u8 i, *addr;
+
+ if (NULL == sta_addr) {
+ RT_TRACE(COMP_SEC, DBG_EMERG,
+ ("sta_addr is NULL.\n"));
+ }
+
+ if ((sta_addr[0]|sta_addr[1]|sta_addr[2]|sta_addr[3]|\
+ sta_addr[4]|sta_addr[5]) == 0) {
+ RT_TRACE(COMP_SEC, DBG_EMERG,
+ ("sta_addr is 00:00:00:00:00:00.\n"));
+ return;
+ }
+ /* Does STA already exist? */
+ for (i = 4; i < TOTAL_CAM_ENTRY; i++) {
+ addr = rtlpriv->sec.hwsec_cam_sta_addr[i];
+ bitmap = (rtlpriv->sec.hwsec_cam_bitmap) >> i;
+ if (((bitmap & BIT(0)) == BIT(0)) &&
+ (memcmp(addr, sta_addr, ETH_ALEN) == 0)) {
+ /* Remove from HW Security CAM */
+ memset(rtlpriv->sec.hwsec_cam_sta_addr[i], 0, ETH_ALEN);
+ rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
+ printk("&&&&&&&&&del entry %d\n",i);
+ }
+ }
+ return;
+}
+//EXPORT_SYMBOL(rtl_cam_del_entry);
diff --git a/drivers/staging/rtl8821ae/cam.h b/drivers/staging/rtl8821ae/cam.h
new file mode 100644
index 000000000000..326fa6784ae5
--- /dev/null
+++ b/drivers/staging/rtl8821ae/cam.h
@@ -0,0 +1,56 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_CAM_H_
+#define __RTL_CAM_H_
+
+#define CAM_CONTENT_COUNT 8
+
+#define CFG_DEFAULT_KEY BIT(5)
+#define CFG_VALID BIT(15)
+
+#define PAIRWISE_KEYIDX 0
+#define CAM_PAIRWISE_KEY_POSITION 4
+
+#define CAM_CONFIG_USEDK 1
+#define CAM_CONFIG_NO_USEDK 0
+
+extern void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
+extern u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+ u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
+ u32 ul_default_key, u8 *key_content);
+int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+ u32 ul_key_id);
+void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index);
+void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index);
+void rtl_cam_reset_sec_info(struct ieee80211_hw *hw);
+u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr);
+void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr);
+
+#endif
diff --git a/drivers/staging/rtl8821ae/compat.h b/drivers/staging/rtl8821ae/compat.h
new file mode 100644
index 000000000000..68269cc2d477
--- /dev/null
+++ b/drivers/staging/rtl8821ae/compat.h
@@ -0,0 +1,125 @@
+#ifndef __RTL_COMPAT_H__
+#define __RTL_COMPAT_H__
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+/*
+ * Use this if you want to use the same suspend and resume callbacks for suspend
+ * to RAM and hibernation.
+ */
+#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+struct dev_pm_ops name = { \
+ .suspend = suspend_fn, \
+ .resume = resume_fn, \
+ .freeze = suspend_fn, \
+ .thaw = resume_fn, \
+ .poweroff = suspend_fn, \
+ .restore = resume_fn, \
+}
+
+#define compat_pci_suspend(fn) \
+ int fn##_compat(struct pci_dev *pdev, pm_message_t state) \
+ { \
+ int r; \
+ \
+ r = fn(&pdev->dev); \
+ if (r) \
+ return r; \
+ \
+ pci_save_state(pdev); \
+ pci_disable_device(pdev); \
+ pci_set_power_state(pdev, PCI_D3hot); \
+ \
+ return 0; \
+ }
+
+#define compat_pci_resume(fn) \
+ int fn##_compat(struct pci_dev *pdev) \
+ { \
+ int r; \
+ \
+ pci_set_power_state(pdev, PCI_D0); \
+ r = pci_enable_device(pdev); \
+ if (r) \
+ return r; \
+ pci_restore_state(pdev); \
+ \
+ return fn(&pdev->dev); \
+ }
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
+#define RX_FLAG_MACTIME_MPDU RX_FLAG_TSFT
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+#define RX_FLAG_MACTIME_MPDU RX_FLAG_MACTIME_START
+#else
+#endif
+//#define NETDEV_TX_OK
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+#define IEEE80211_KEY_FLAG_SW_MGMT IEEE80211_KEY_FLAG_SW_MGMT_TX
+#endif
+
+struct ieee80211_mgmt_compat {
+ __le16 frame_control;
+ __le16 duration;
+ u8 da[6];
+ u8 sa[6];
+ u8 bssid[6];
+ __le16 seq_ctrl;
+ union {
+ struct {
+ u8 category;
+ union {
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 status_code;
+ u8 variable[0];
+ } __attribute__ ((packed)) wme_action;
+ struct{
+ u8 action_code;
+ u8 dialog_token;
+ __le16 capab;
+ __le16 timeout;
+ __le16 start_seq_num;
+ } __attribute__((packed)) addba_req;
+ struct{
+ u8 action_code;
+ u8 dialog_token;
+ __le16 status;
+ __le16 capab;
+ __le16 timeout;
+ } __attribute__((packed)) addba_resp;
+ struct{
+ u8 action_code;
+ __le16 params;
+ __le16 reason_code;
+ } __attribute__((packed)) delba;
+ struct{
+ u8 action_code;
+ /* capab_info for open and confirm,
+ * reason for close
+ */
+ __le16 aux;
+ /* Followed in plink_confirm by status
+ * code, AID and supported rates,
+ * and directly by supported rates in
+ * plink_open and plink_close
+ */
+ u8 variable[0];
+ } __attribute__((packed)) plink_action;
+ struct{
+ u8 action_code;
+ u8 variable[0];
+ } __attribute__((packed)) mesh_action;
+ struct {
+ u8 action;
+ u8 smps_control;
+ } __attribute__ ((packed)) ht_smps;
+ } u;
+ } __attribute__ ((packed)) action;
+ } u;
+} __attribute__ ((packed));
+#endif
diff --git a/drivers/staging/rtl8821ae/core.c b/drivers/staging/rtl8821ae/core.c
new file mode 100644
index 000000000000..40de6089039e
--- /dev/null
+++ b/drivers/staging/rtl8821ae/core.c
@@ -0,0 +1,1464 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "core.h"
+#include "cam.h"
+#include "base.h"
+#include "ps.h"
+
+#include "btcoexist/rtl_btc.h"
+
+/*mutex for start & stop is must here. */
+static int rtl_op_start(struct ieee80211_hw *hw)
+{
+ int err = 0;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (!is_hal_stop(rtlhal))
+ return 0;
+ if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
+ return 0;
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+ err = rtlpriv->intf_ops->adapter_start(hw);
+ if (err)
+ goto out;
+ rtl_watch_dog_timer_callback((unsigned long)hw);
+
+out:
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+ return err;
+}
+
+static void rtl_op_stop(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ if (is_hal_stop(rtlhal))
+ return;
+
+ /* here is must, because adhoc do stop and start,
+ * but stop with RFOFF may cause something wrong,
+ * like adhoc TP */
+ if (unlikely(ppsc->rfpwr_state == ERFOFF))
+ rtl_ips_nic_on(hw);
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+
+ mac->link_state = MAC80211_NOLINK;
+ memset(mac->bssid, 0, 6);
+ mac->vendor = PEER_UNKNOWN;
+
+ /*reset sec info */
+ rtl_cam_reset_sec_info(hw);
+
+ rtl_deinit_deferred_work(hw);
+ rtlpriv->intf_ops->adapter_stop(hw);
+
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+}
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
+static int rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_tcb_desc tcb_desc;
+ memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+
+ if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON))
+ goto err_free;
+
+ if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
+ goto err_free;
+
+ if (!rtlpriv->intf_ops->waitq_insert(hw, skb))
+ rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+
+ return NETDEV_TX_OK;
+
+err_free:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+#else
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+#else
+/*<delete in kernel end>*/
+static void rtl_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_tcb_desc tcb_desc;
+ memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+
+ if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON))
+ goto err_free;
+
+ if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
+ goto err_free;
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ if (!rtlpriv->intf_ops->waitq_insert(hw, skb))
+ rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+#else
+/*<delete in kernel end>*/
+ if (!rtlpriv->intf_ops->waitq_insert(hw, control->sta, skb))
+ rtlpriv->intf_ops->adapter_tx(hw, control->sta, skb, &tcb_desc);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+ return;
+
+err_free:
+ dev_kfree_skb_any(skb);
+ return;
+}
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+
+static int rtl_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ int err = 0;
+
+ if (mac->vif) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("vif has been set!! mac->vif = 0x%p\n", mac->vif));
+ return -EOPNOTSUPP;
+ }
+
+/*This flag is not defined before kernel 3.4*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+#endif
+
+ rtl_ips_nic_on(hw);
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_P2P_CLIENT:
+ mac->p2p = P2P_ROLE_CLIENT;
+ /*fall through*/
+#else
+/*<delete in kernel end>*/
+ switch (vif->type) {
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+ case NL80211_IFTYPE_STATION:
+ if (mac->beacon_enabled == 1) {
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("NL80211_IFTYPE_STATION \n"));
+ mac->beacon_enabled = 0;
+ rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
+ rtlpriv->cfg->maps[RTL_IBSS_INT_MASKS]);
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("NL80211_IFTYPE_ADHOC \n"));
+
+ mac->link_state = MAC80211_LINKED;
+ rtlpriv->cfg->ops->set_bcn_reg(hw);
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
+ mac->basic_rates = 0xfff;
+ else
+ mac->basic_rates = 0xff0;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
+ (u8 *) (&mac->basic_rates));
+
+ break;
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+ case NL80211_IFTYPE_P2P_GO:
+ mac->p2p = P2P_ROLE_GO;
+ /*fall through*/
+#endif
+/*<delete in kernel end>*/
+ case NL80211_IFTYPE_AP:
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("NL80211_IFTYPE_AP \n"));
+
+ mac->link_state = MAC80211_LINKED;
+ rtlpriv->cfg->ops->set_bcn_reg(hw);
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
+ mac->basic_rates = 0xfff;
+ else
+ mac->basic_rates = 0xff0;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
+ (u8 *) (&mac->basic_rates));
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("NL80211_IFTYPE_MESH_POINT \n"));
+
+ mac->link_state = MAC80211_LINKED;
+ rtlpriv->cfg->ops->set_bcn_reg(hw);
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
+ mac->basic_rates = 0xfff;
+ else
+ mac->basic_rates = 0xff0;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
+ (u8 *) (&mac->basic_rates));
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("operation mode %d is not support!\n", vif->type));
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+#ifdef VIF_TODO
+ if (!rtl_set_vif_info(hw, vif))
+ goto out;
+#endif
+
+ if (mac->p2p) {
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("p2p role %x \n",vif->type));
+ mac->basic_rates = 0xff0;/*disable cck rate for p2p*/
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
+ (u8 *) (&mac->basic_rates));
+ }
+ mac->vif = vif;
+ mac->opmode = vif->type;
+ rtlpriv->cfg->ops->set_network_type(hw, vif->type);
+ memcpy(mac->mac_addr, vif->addr, ETH_ALEN);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
+
+out:
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+ return err;
+}
+
+static void rtl_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+
+ /* Free beacon resources */
+ if ((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_ADHOC) ||
+ (vif->type == NL80211_IFTYPE_MESH_POINT)) {
+ if (mac->beacon_enabled == 1) {
+ mac->beacon_enabled = 0;
+ rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
+ rtlpriv->cfg->maps[RTL_IBSS_INT_MASKS]);
+ }
+ }
+
+ /*
+ *Note: We assume NL80211_IFTYPE_UNSPECIFIED as
+ *NO LINK for our hardware.
+ */
+ mac->p2p = 0;
+ mac->vif = NULL;
+ mac->link_state = MAC80211_NOLINK;
+ memset(mac->bssid, 0, 6);
+ mac->vendor = PEER_UNKNOWN;
+ mac->opmode = NL80211_IFTYPE_UNSPECIFIED;
+ rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
+
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+}
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+/*<delete in kernel end>*/
+static int rtl_op_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype new_type, bool p2p)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int ret;
+ rtl_op_remove_interface(hw, vif);
+
+ vif->type = new_type;
+ vif->p2p = p2p;
+ ret = rtl_op_add_interface(hw, vif);
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ (" p2p %x\n",p2p));
+ return ret;
+}
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (mac->skip_scan)
+ return 1;
+
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+ if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { /* BIT(2) */
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n"));
+ }
+
+ /*For IPS */
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ if (hw->conf.flags & IEEE80211_CONF_IDLE)
+ rtl_ips_nic_off(hw);
+ else
+ rtl_ips_nic_on(hw);
+ } else {
+ /*
+ *although rfoff may not cause by ips, but we will
+ *check the reason in set_rf_power_state function
+ */
+ if (unlikely(ppsc->rfpwr_state == ERFOFF))
+ rtl_ips_nic_on(hw);
+ }
+
+ /*For LPS */
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ cancel_delayed_work(&rtlpriv->works.ps_work);
+ cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
+ if (conf->flags & IEEE80211_CONF_PS) {
+ rtlpriv->psc.sw_ps_enabled = true;
+ /* sleep here is must, or we may recv the beacon and
+ * cause mac80211 into wrong ps state, this will cause
+ * power save nullfunc send fail, and further cause
+ * pkt loss, So sleep must quickly but not immediatly
+ * because that will cause nullfunc send by mac80211
+ * fail, and cause pkt loss, we have tested that 5mA
+ * is worked very well */
+ if (!rtlpriv->psc.multi_buffered)
+ queue_delayed_work(rtlpriv->works.rtl_wq,
+ &rtlpriv->works.ps_work,
+ MSECS(5));
+ } else {
+ rtl_swlps_rf_awake(hw);
+ rtlpriv->psc.sw_ps_enabled = false;
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
+ hw->conf.long_frame_max_tx_count));
+ mac->retry_long = hw->conf.long_frame_max_tx_count;
+ mac->retry_short = hw->conf.long_frame_max_tx_count;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
+ (u8 *) (&hw->conf.long_frame_max_tx_count));
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ struct ieee80211_channel *channel = hw->conf.chandef.chan;
+ enum nl80211_channel_type channel_type =
+ cfg80211_get_chandef_type(&(hw->conf.chandef));
+#else
+ struct ieee80211_channel *channel = hw->conf.channel;
+ enum nl80211_channel_type channel_type = hw->conf.channel_type;
+#endif
+ u8 wide_chan = (u8) channel->hw_value;
+
+ if (mac->act_scanning)
+ mac->n_channels++;
+
+ if (rtlpriv->dm.supp_phymode_switch &&
+ mac->link_state < MAC80211_LINKED &&
+ !mac->act_scanning) {
+ if (rtlpriv->cfg->ops->check_switch_to_dmdp)
+ rtlpriv->cfg->ops->check_switch_to_dmdp(hw);
+ }
+
+ /*
+ *because we should back channel to
+ *current_network.chan in in scanning,
+ *So if set_chan == current_network.chan
+ *we should set it.
+ *because mac80211 tell us wrong bw40
+ *info for cisco1253 bw20, so we modify
+ *it here based on UPPER & LOWER
+ */
+ switch (channel_type) {
+ case NL80211_CHAN_HT20:
+ case NL80211_CHAN_NO_HT:
+ /* SC */
+ mac->cur_40_prime_sc =
+ PRIME_CHNL_OFFSET_DONT_CARE;
+ rtlphy->current_chan_bw = HT_CHANNEL_WIDTH_20;
+ mac->bw_40 = false;
+ break;
+ case NL80211_CHAN_HT40MINUS:
+ /* SC */
+ mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_UPPER;
+ rtlphy->current_chan_bw =
+ HT_CHANNEL_WIDTH_20_40;
+ mac->bw_40 = true;
+
+ /*wide channel */
+ wide_chan -= 2;
+
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ /* SC */
+ mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_LOWER;
+ rtlphy->current_chan_bw =
+ HT_CHANNEL_WIDTH_20_40;
+ mac->bw_40 = true;
+
+ /*wide channel */
+ wide_chan += 2;
+
+ break;
+ default:
+ mac->bw_40 = false;
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not processed \n"));
+ break;
+ }
+
+ if (wide_chan <= 0)
+ wide_chan = 1;
+
+ /* in scanning, when before we offchannel we may send a ps=1
+ * null to AP, and then we may send a ps = 0 null to AP quickly,
+ * but first null have cause AP's put lots of packet to hw tx
+ * buffer, these packet must be tx before off channel so we must
+ * delay more time to let AP flush these packets before
+ * offchannel, or dis-association or delete BA will happen by AP
+ */
+ if (rtlpriv->mac80211.offchan_deley) {
+ rtlpriv->mac80211.offchan_deley = false;
+ mdelay(50);
+ }
+
+ rtlphy->current_channel = wide_chan;
+
+ rtlpriv->cfg->ops->switch_channel(hw);
+ rtlpriv->cfg->ops->set_channel_access(hw);
+ rtlpriv->cfg->ops->set_bw_mode(hw,
+ channel_type);
+ }
+
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+
+ return 0;
+}
+
+static void rtl_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *new_flags, u64 multicast)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ *new_flags &= RTL_SUPPORTED_FILTERS;
+ if (0 == changed_flags)
+ return;
+
+ /*TODO: we disable broadcase now, so enable here */
+ if (changed_flags & FIF_ALLMULTI) {
+ if (*new_flags & FIF_ALLMULTI) {
+ mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
+ rtlpriv->cfg->maps[MAC_RCR_AB];
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("Enable receive multicast frame.\n"));
+ } else {
+ mac->rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] |
+ rtlpriv->cfg->maps[MAC_RCR_AB]);
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("Disable receive multicast frame.\n"));
+ }
+ }
+
+ if (changed_flags & FIF_FCSFAIL) {
+ if (*new_flags & FIF_FCSFAIL) {
+ mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32];
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("Enable receive FCS error frame.\n"));
+ } else {
+ mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32];
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("Disable receive FCS error frame.\n"));
+ }
+ }
+
+ /* if ssid not set to hw don't check bssid
+ * here just used for linked scanning, & linked
+ * and nolink check bssid is set in set network_type */
+ if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
+ (mac->link_state >= MAC80211_LINKED)) {
+ if (mac->opmode != NL80211_IFTYPE_AP &&
+ mac->opmode != NL80211_IFTYPE_MESH_POINT) {
+ if (*new_flags & FIF_BCN_PRBRESP_PROMISC) {
+ rtlpriv->cfg->ops->set_chk_bssid(hw, false);
+ } else {
+ rtlpriv->cfg->ops->set_chk_bssid(hw, true);
+ }
+ }
+ }
+
+ if (changed_flags & FIF_CONTROL) {
+ if (*new_flags & FIF_CONTROL) {
+ mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
+
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("Enable receive control frame.\n"));
+ } else {
+ mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("Disable receive control frame.\n"));
+ }
+ }
+
+ if (changed_flags & FIF_OTHER_BSS) {
+ if (*new_flags & FIF_OTHER_BSS) {
+ mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP];
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("Enable receive other BSS's frame.\n"));
+ } else {
+ mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP];
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("Disable receive other BSS's frame.\n"));
+ }
+ }
+}
+static int rtl_op_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal= rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_sta_info *sta_entry;
+
+ if (sta) {
+ sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+ spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+ list_add_tail(&sta_entry->list, &rtlpriv->entry_list);
+ spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ sta_entry->wireless_mode = WIRELESS_MODE_G;
+ if (sta->supp_rates[0] <= 0xf)
+ sta_entry->wireless_mode = WIRELESS_MODE_B;
+ if (sta->ht_cap.ht_supported == true)
+ sta_entry->wireless_mode = WIRELESS_MODE_N_24G;
+
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ sta_entry->wireless_mode = WIRELESS_MODE_G;
+ } else if (rtlhal->current_bandtype == BAND_ON_5G) {
+ sta_entry->wireless_mode = WIRELESS_MODE_A;
+ if (sta->ht_cap.ht_supported == true)
+ sta_entry->wireless_mode = WIRELESS_MODE_N_24G;
+
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ sta_entry->wireless_mode = WIRELESS_MODE_A;
+ }
+ /*disable cck rate for p2p*/
+ if (mac->p2p)
+ sta->supp_rates[0] &= 0xfffffff0;
+
+ memcpy(sta_entry->mac_addr, sta->addr, ETH_ALEN);
+ RT_TRACE(COMP_MAC80211, DBG_DMESG,
+ ("Add sta addr is %pM\n",sta->addr));
+ rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
+ }
+
+ return 0;
+}
+
+static int rtl_op_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_sta_info *sta_entry;
+ if (sta) {
+ RT_TRACE(COMP_MAC80211, DBG_DMESG,
+ ("Remove sta addr is %pM\n",sta->addr));
+ sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+ sta_entry->wireless_mode = 0;
+ sta_entry->ratr_index = 0;
+ spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+ list_del(&sta_entry->list);
+ spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+ }
+ return 0;
+}
+static int _rtl_get_hal_qnum(u16 queue)
+{
+ int qnum;
+
+ switch (queue) {
+ case 0:
+ qnum = AC3_VO;
+ break;
+ case 1:
+ qnum = AC2_VI;
+ break;
+ case 2:
+ qnum = AC0_BE;
+ break;
+ case 3:
+ qnum = AC1_BK;
+ break;
+ default:
+ qnum = AC0_BE;
+ break;
+ }
+ return qnum;
+}
+
+/*
+ *for mac80211 VO=0, VI=1, BE=2, BK=3
+ *for rtl819x BE=0, BK=1, VI=2, VO=3
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+static int rtl_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *param)
+#else
+static int rtl_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *param)
+#endif
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ int aci;
+
+ if (queue >= AC_MAX) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("queue number %d is incorrect!\n", queue));
+ return -EINVAL;
+ }
+
+ aci = _rtl_get_hal_qnum(queue);
+ mac->ac[aci].aifs = param->aifs;
+ mac->ac[aci].cw_min = param->cw_min;
+ mac->ac[aci].cw_max = param->cw_max;
+ mac->ac[aci].tx_op = param->txop;
+ memcpy(&mac->edca_param[aci], param, sizeof(*param));
+ rtlpriv->cfg->ops->set_qos(hw, aci);
+ return 0;
+}
+
+static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+ if ((vif->type == NL80211_IFTYPE_ADHOC) ||
+ (vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_MESH_POINT)) {
+ if ((changed & BSS_CHANGED_BEACON) ||
+ (changed & BSS_CHANGED_BEACON_ENABLED &&
+ bss_conf->enable_beacon)) {
+ if (mac->beacon_enabled == 0) {
+ RT_TRACE(COMP_MAC80211, DBG_DMESG,
+ ("BSS_CHANGED_BEACON_ENABLED \n"));
+
+ /*start hw beacon interrupt. */
+ /*rtlpriv->cfg->ops->set_bcn_reg(hw); */
+ mac->beacon_enabled = 1;
+ rtlpriv->cfg->ops->update_interrupt_mask(hw,
+ rtlpriv->cfg->maps
+ [RTL_IBSS_INT_MASKS], 0);
+
+ if (rtlpriv->cfg->ops->linked_set_reg)
+ rtlpriv->cfg->ops->linked_set_reg(hw);
+ }
+ }
+ if ((changed & BSS_CHANGED_BEACON_ENABLED &&
+ !bss_conf->enable_beacon)){
+ if (mac->beacon_enabled == 1) {
+ RT_TRACE(COMP_MAC80211, DBG_DMESG,
+ ("ADHOC DISABLE BEACON\n"));
+
+ mac->beacon_enabled = 0;
+ rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
+ rtlpriv->cfg->maps
+ [RTL_IBSS_INT_MASKS]);
+ }
+ }
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ RT_TRACE(COMP_BEACON, DBG_TRACE,
+ ("BSS_CHANGED_BEACON_INT\n"));
+ mac->beacon_interval = bss_conf->beacon_int;
+ rtlpriv->cfg->ops->set_bcn_intv(hw);
+ }
+ }
+
+ /*TODO: reference to enum ieee80211_bss_change */
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (bss_conf->assoc) {
+ struct ieee80211_sta *sta = NULL;
+ /* we should reset all sec info & cam
+ * before set cam after linked, we should not
+ * reset in disassoc, that will cause tkip->wep
+ * fail because some flag will be wrong */
+ /* reset sec info */
+ rtl_cam_reset_sec_info(hw);
+ /* reset cam to fix wep fail issue
+ * when change from wpa to wep */
+ rtl_cam_reset_all_entry(hw);
+
+ mac->link_state = MAC80211_LINKED;
+ mac->cnt_after_linked = 0;
+ mac->assoc_id = bss_conf->aid;
+ memcpy(mac->bssid, bss_conf->bssid, 6);
+
+ if (rtlpriv->cfg->ops->linked_set_reg)
+ rtlpriv->cfg->ops->linked_set_reg(hw);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, (u8*)bss_conf->bssid);
+
+ if (vif->type == NL80211_IFTYPE_STATION && sta)
+ rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
+ RT_TRACE(COMP_EASY_CONCURRENT, DBG_LOUD,
+ ("send PS STATIC frame \n"));
+ if (rtlpriv->dm.supp_phymode_switch) {
+ if (sta->ht_cap.ht_supported)
+ rtl_send_smps_action(hw, sta,
+ IEEE80211_SMPS_STATIC);
+ }
+ rcu_read_unlock();
+
+ RT_TRACE(COMP_MAC80211, DBG_DMESG,
+ ("BSS_CHANGED_ASSOC\n"));
+ } else {
+ if (mac->link_state == MAC80211_LINKED)
+ rtl_lps_leave(hw);
+ if (ppsc->p2p_ps_info.p2p_ps_mode> P2P_PS_NONE)
+ rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
+ mac->link_state = MAC80211_NOLINK;
+ memset(mac->bssid, 0, 6);
+ mac->vendor = PEER_UNKNOWN;
+
+ if (rtlpriv->dm.supp_phymode_switch) {
+ if (rtlpriv->cfg->ops->check_switch_to_dmdp)
+ rtlpriv->cfg->ops->check_switch_to_dmdp(hw);
+ }
+ RT_TRACE(COMP_MAC80211, DBG_DMESG,
+ ("BSS_CHANGED_UN_ASSOC\n"));
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ RT_TRACE(COMP_MAC80211, DBG_TRACE,
+ ("BSS_CHANGED_ERP_CTS_PROT\n"));
+ mac->use_cts_protect = bss_conf->use_cts_prot;
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("BSS_CHANGED_ERP_PREAMBLE use short preamble:%x \n",
+ bss_conf->use_short_preamble));
+
+ mac->short_preamble = bss_conf->use_short_preamble;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACK_PREAMBLE,
+ (u8 *) (&mac->short_preamble));
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ RT_TRACE(COMP_MAC80211, DBG_TRACE,
+ ("BSS_CHANGED_ERP_SLOT\n"));
+
+ if (bss_conf->use_short_slot)
+ mac->slot_time = RTL_SLOT_TIME_9;
+ else
+ mac->slot_time = RTL_SLOT_TIME_20;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+ (u8 *) (&mac->slot_time));
+ }
+
+ if (changed & BSS_CHANGED_HT) {
+ struct ieee80211_sta *sta = NULL;
+
+ RT_TRACE(COMP_MAC80211, DBG_TRACE,
+ ("BSS_CHANGED_HT\n"));
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, (u8*)bss_conf->bssid);
+ if (sta) {
+ if (sta->ht_cap.ampdu_density >
+ mac->current_ampdu_density)
+ mac->current_ampdu_density =
+ sta->ht_cap.ampdu_density;
+ if (sta->ht_cap.ampdu_factor <
+ mac->current_ampdu_factor)
+ mac->current_ampdu_factor =
+ sta->ht_cap.ampdu_factor;
+ }
+ rcu_read_unlock();
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY,
+ (u8 *) (&mac->max_mss_density));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_FACTOR,
+ &mac->current_ampdu_factor);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_MIN_SPACE,
+ &mac->current_ampdu_density);
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ u32 basic_rates;
+ struct ieee80211_sta *sta = NULL;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BSSID,
+ (u8 *) bss_conf->bssid);
+
+ RT_TRACE(COMP_MAC80211, DBG_DMESG,
+ ("bssid: %pM\n", bss_conf->bssid));
+
+ mac->vendor = PEER_UNKNOWN;
+ memcpy(mac->bssid, bss_conf->bssid, 6);
+ rtlpriv->cfg->ops->set_network_type(hw, vif->type);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, (u8*)bss_conf->bssid);
+ if (!sta) {
+ rcu_read_unlock();
+ goto out;
+ }
+
+ if (rtlhal->current_bandtype == BAND_ON_5G) {
+ mac->mode = WIRELESS_MODE_A;
+ } else {
+ if (sta->supp_rates[0] <= 0xf)
+ mac->mode = WIRELESS_MODE_B;
+ else
+ mac->mode = WIRELESS_MODE_G;
+ }
+
+ if (sta->ht_cap.ht_supported) {
+ if (rtlhal->current_bandtype == BAND_ON_2_4G)
+ mac->mode = WIRELESS_MODE_N_24G;
+ else
+ mac->mode = WIRELESS_MODE_N_5G;
+ }
+
+ /* just station need it, because ibss & ap mode will
+ * set in sta_add, and will be NULL here */
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ struct rtl_sta_info *sta_entry;
+ sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+ sta_entry->wireless_mode = mac->mode;
+ }
+
+ if (sta->ht_cap.ht_supported) {
+ mac->ht_enable = true;
+
+ /*
+ * for cisco 1252 bw20 it's wrong
+ * if (ht_cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+ * mac->bw_40 = true;
+ * }
+ * */
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ /* for 5G must << RATE_6M_INDEX=4,
+ * because 5G have no cck rate*/
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ basic_rates = sta->supp_rates[1] << 4;
+ else
+ basic_rates = sta->supp_rates[0];
+
+ mac->basic_rates = basic_rates;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
+ (u8 *) (&basic_rates));
+ }
+ rcu_read_unlock();
+ }
+
+ /*
+ * For FW LPS and Keep Alive:
+ * To tell firmware we have connected
+ * to an AP. For 92SE/CE power save v2.
+ */
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (bss_conf->assoc) {
+ u8 keep_alive = 10;
+ u8 mstatus = RT_MEDIA_CONNECT;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_KEEP_ALIVE,
+ (u8 *) (&keep_alive));
+
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_JOINBSSRPT,
+ (u8 *) (&mstatus));
+ ppsc->report_linked = true;
+
+ } else {
+
+ u8 mstatus = RT_MEDIA_DISCONNECT;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_JOINBSSRPT,
+ (u8 *) (&mstatus));
+ ppsc->report_linked = false;
+
+ }
+
+ if (rtlpriv->cfg->ops->get_btc_status()){
+ rtlpriv->btcoexist.btc_ops->btc_mediastatus_notify(
+ rtlpriv, ppsc->report_linked);
+ }
+ }
+
+out:
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+static u64 rtl_op_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+#else
+static u64 rtl_op_get_tsf(struct ieee80211_hw *hw)
+#endif
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u64 tsf;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&tsf));
+ return tsf;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+static void rtl_op_set_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u64 tsf)
+#else
+static void rtl_op_set_tsf(struct ieee80211_hw *hw, u64 tsf)
+#endif
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
+
+ mac->tsf = tsf;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&bibss));
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+static void rtl_op_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+#else
+static void rtl_op_reset_tsf(struct ieee80211_hw *hw)
+#endif
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp = 0;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, (u8 *) (&tmp));
+}
+
+static void rtl_op_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ switch (cmd) {
+ case STA_NOTIFY_SLEEP:
+ break;
+ case STA_NOTIFY_AWAKE:
+ break;
+ default:
+ break;
+ }
+}
+
+static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+/*<delete in kernel end>*/
+ ,u8 buf_size
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (action) {
+ case IEEE80211_AMPDU_TX_START:
+ RT_TRACE(COMP_MAC80211, DBG_TRACE,
+ ("IEEE80211_AMPDU_TX_START: TID:%d\n", tid));
+ return rtl_tx_agg_start(hw, vif, sta, tid, ssn);
+ break;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+#else
+ case IEEE80211_AMPDU_TX_STOP:
+#endif
+ RT_TRACE(COMP_MAC80211, DBG_TRACE,
+ ("IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid));
+ return rtl_tx_agg_stop(hw, vif, sta, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ RT_TRACE(COMP_MAC80211, DBG_TRACE,
+ ("IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid));
+ rtl_tx_agg_oper(hw, sta, tid);
+ break;
+ case IEEE80211_AMPDU_RX_START:
+ RT_TRACE(COMP_MAC80211, DBG_TRACE,
+ ("IEEE80211_AMPDU_RX_START:TID:%d\n", tid));
+ return rtl_rx_agg_start(hw, sta, tid);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ RT_TRACE(COMP_MAC80211, DBG_TRACE,
+ ("IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid));
+ return rtl_rx_agg_stop(hw, sta, tid);
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("IEEE80211_AMPDU_ERR!!!!:\n"));
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static void rtl_op_sw_scan_start(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ RT_TRACE(COMP_MAC80211, DBG_LOUD, ("\n"));
+ mac->act_scanning = true;
+ /*rtlpriv->btcops->btc_scan_notify(rtlpriv, 0); */
+ if (rtlpriv->link_info.b_higher_busytraffic) {
+ mac->skip_scan = true;
+ return;
+ }
+
+ if (rtlpriv->dm.supp_phymode_switch) {
+ if (rtlpriv->cfg->ops->check_switch_to_dmdp)
+ rtlpriv->cfg->ops->check_switch_to_dmdp(hw);
+ }
+
+ if (mac->link_state == MAC80211_LINKED) {
+ rtl_lps_leave(hw);
+ mac->link_state = MAC80211_LINKED_SCANNING;
+ } else {
+ rtl_ips_nic_on(hw);
+ }
+
+ /* Dul mac */
+ rtlpriv->rtlhal.b_load_imrandiqk_setting_for2g = false;
+
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_SITE_SURVEY);
+
+ rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_BACKUP_BAND0);
+
+}
+
+static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ RT_TRACE(COMP_MAC80211, DBG_LOUD, ("\n"));
+ mac->act_scanning = false;
+ mac->skip_scan = false;
+ if (rtlpriv->link_info.b_higher_busytraffic) {
+ return;
+ }
+
+ /* p2p will use 1/6/11 to scan */
+ if (mac->n_channels == 3)
+ mac->p2p_in_use = true;
+ else
+ mac->p2p_in_use = false;
+ mac->n_channels = 0;
+ /* Dul mac */
+ rtlpriv->rtlhal.b_load_imrandiqk_setting_for2g = false;
+
+ if (mac->link_state == MAC80211_LINKED_SCANNING) {
+ mac->link_state = MAC80211_LINKED;
+ if (mac->opmode == NL80211_IFTYPE_STATION) {
+ /* fix fwlps issue */
+ rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
+ }
+ }
+
+ rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_RESTORE);
+ /* rtlpriv->btcops->btc_scan_notify(rtlpriv, 1); */
+}
+
+static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 key_type = NO_ENCRYPTION;
+ u8 key_idx;
+ bool group_key = false;
+ bool wep_only = false;
+ int err = 0;
+ u8 mac_addr[ETH_ALEN];
+ u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ u8 zero_addr[ETH_ALEN] = { 0 };
+
+ if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("not open hw encryption\n"));
+ return -ENOSPC; /*User disabled HW-crypto */
+ }
+ /* To support IBSS, use sw-crypto for GTK */
+ if(((vif->type == NL80211_IFTYPE_ADHOC) ||
+ (vif->type == NL80211_IFTYPE_MESH_POINT)) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -ENOSPC;
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("%s hardware based encryption for keyidx: %d, mac: %pM\n",
+ cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
+ sta ? sta->addr : bcast_addr));
+ rtlpriv->sec.being_setkey = true;
+ rtl_ips_nic_on(hw);
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+ /* <1> get encryption alg */
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+/*<delete in kernel end>*/
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key_type = WEP40_ENCRYPTION;
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:WEP40\n"));
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:WEP104\n"));
+ key_type = WEP104_ENCRYPTION;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ key_type = TKIP_ENCRYPTION;
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:TKIP\n"));
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key_type = AESCCMP_ENCRYPTION;
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CCMP\n"));
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ /* HW don't support CMAC encryption,
+ * use software CMAC encryption */
+ key_type = AESCMAC_ENCRYPTION;
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CMAC\n"));
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("HW don't support CMAC encrypiton, "
+ "use software CMAC encrypiton\n"));
+ err = -EOPNOTSUPP;
+ goto out_unlock;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("alg_err:%x!!!!:\n", key->cipher));
+ goto out_unlock;
+ }
+/*<delete in kernel start>*/
+#else
+ switch (key->alg) {
+ case ALG_WEP:
+ if (key->keylen == WLAN_KEY_LEN_WEP40) {
+ key_type = WEP40_ENCRYPTION;
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:WEP40\n"));
+ } else {
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("alg:WEP104\n"));
+ key_type = WEP104_ENCRYPTION;
+ }
+ break;
+ case ALG_TKIP:
+ key_type = TKIP_ENCRYPTION;
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:TKIP\n"));
+ break;
+ case ALG_CCMP:
+ key_type = AESCCMP_ENCRYPTION;
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CCMP\n"));
+ break;
+ case ALG_AES_CMAC:
+ /*HW don't support CMAC encryption, use software CMAC encryption */
+ key_type = AESCMAC_ENCRYPTION;
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CMAC\n"));
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("HW don't support CMAC encrypiton, "
+ "use software CMAC encrypiton\n"));
+ err = -EOPNOTSUPP;
+ goto out_unlock;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("alg_err:%x!!!!:\n", key->alg));
+ goto out_unlock;
+ }
+#endif
+/*<delete in kernel end>*/
+ if(key_type == WEP40_ENCRYPTION ||
+ key_type == WEP104_ENCRYPTION ||
+ vif->type == NL80211_IFTYPE_ADHOC)
+ rtlpriv->sec.use_defaultkey = true;
+
+ /* <2> get key_idx */
+ key_idx = (u8) (key->keyidx);
+ if (key_idx > 3)
+ goto out_unlock;
+ /* <3> if pairwise key enable_hw_sec */
+ group_key = !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+
+ /* wep always be group key, but there are two conditions:
+ * 1) wep only: is just for wep enc, in this condition
+ * rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION
+ * will be true & enable_hw_sec will be set when wep
+ * ke setting.
+ * 2) wep(group) + AES(pairwise): some AP like cisco
+ * may use it, in this condition enable_hw_sec will not
+ * be set when wep key setting */
+ /* we must reset sec_info after lingked before set key,
+ * or some flag will be wrong*/
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ if (!group_key || key_type == WEP40_ENCRYPTION ||
+ key_type == WEP104_ENCRYPTION) {
+ if (group_key) {
+ wep_only = true;
+ }
+ rtlpriv->cfg->ops->enable_hw_sec(hw);
+ }
+ } else {
+ if ((!group_key) || (vif->type == NL80211_IFTYPE_ADHOC) ||
+ rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) {
+ if (rtlpriv->sec.pairwise_enc_algorithm ==
+ NO_ENCRYPTION &&
+ (key_type == WEP40_ENCRYPTION ||
+ key_type == WEP104_ENCRYPTION))
+ wep_only = true;
+ rtlpriv->sec.pairwise_enc_algorithm = key_type;
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("set enable_hw_sec, key_type:%x(OPEN:0 WEP40:"
+ "1 TKIP:2 AES:4 WEP104:5)\n", key_type));
+ rtlpriv->cfg->ops->enable_hw_sec(hw);
+ }
+ }
+ /* <4> set key based on cmd */
+ switch (cmd) {
+ case SET_KEY:
+ if (wep_only) {
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("set WEP(group/pairwise) key\n"));
+ /* Pairwise key with an assigned MAC address. */
+ rtlpriv->sec.pairwise_enc_algorithm = key_type;
+ rtlpriv->sec.group_enc_algorithm = key_type;
+ /*set local buf about wep key. */
+ memcpy(rtlpriv->sec.key_buf[key_idx],
+ key->key, key->keylen);
+ rtlpriv->sec.key_len[key_idx] = key->keylen;
+ memcpy(mac_addr, zero_addr, ETH_ALEN);
+ } else if (group_key) { /* group key */
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("set group key\n"));
+ /* group key */
+ rtlpriv->sec.group_enc_algorithm = key_type;
+ /*set local buf about group key. */
+ memcpy(rtlpriv->sec.key_buf[key_idx],
+ key->key, key->keylen);
+ rtlpriv->sec.key_len[key_idx] = key->keylen;
+ memcpy(mac_addr, bcast_addr, ETH_ALEN);
+ } else { /* pairwise key */
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("set pairwise key\n"));
+ if (!sta) {
+ RT_ASSERT(false, ("pairwise key withnot"
+ "mac_addr\n"));
+
+ err = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+ /* Pairwise key with an assigned MAC address. */
+ rtlpriv->sec.pairwise_enc_algorithm = key_type;
+ /*set local buf about pairwise key. */
+ memcpy(rtlpriv->sec.key_buf[PAIRWISE_KEYIDX],
+ key->key, key->keylen);
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX] = key->keylen;
+ rtlpriv->sec.pairwise_key =
+ rtlpriv->sec.key_buf[PAIRWISE_KEYIDX];
+ memcpy(mac_addr, sta->addr, ETH_ALEN);
+ }
+ rtlpriv->cfg->ops->set_key(hw, key_idx, mac_addr,
+ group_key, key_type, wep_only,
+ false);
+ /* <5> tell mac80211 do something: */
+ /*must use sw generate IV, or can not work !!!!. */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ key->hw_key_idx = key_idx;
+ if (key_type == TKIP_ENCRYPTION)
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ /*use software CCMP encryption for management frames (MFP) */
+ if (key_type == AESCCMP_ENCRYPTION)
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
+ break;
+ case DISABLE_KEY:
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("disable key delete one entry\n"));
+ /*set local buf about wep key. */
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ if (sta)
+ rtl_cam_del_entry(hw, sta->addr);
+ }
+ memset(rtlpriv->sec.key_buf[key_idx], 0, key->keylen);
+ rtlpriv->sec.key_len[key_idx] = 0;
+ memcpy(mac_addr, zero_addr, ETH_ALEN);
+ /*
+ *mac80211 will delete entrys one by one,
+ *so don't use rtl_cam_reset_all_entry
+ *or clear all entry here.
+ */
+ rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("cmd_err:%x!!!!:\n", cmd));
+ }
+out_unlock:
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+ rtlpriv->sec.being_setkey = false;
+ return err;
+}
+
+static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ bool radio_state;
+ bool blocked;
+ u8 valid = 0;
+
+ if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
+ return;
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+
+ /*if Radio On return true here */
+ radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
+
+ if (valid) {
+ if (unlikely(radio_state != rtlpriv->rfkill.rfkill_state)) {
+ rtlpriv->rfkill.rfkill_state = radio_state;
+
+ RT_TRACE(COMP_RF, DBG_DMESG,
+ (KERN_INFO "wireless radio switch turned %s\n",
+ radio_state ? "on" : "off"));
+
+ blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+ }
+ }
+
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+}
+
+/* this function is called by mac80211 to flush tx buffer
+ * before switch channle or power save, or tx buffer packet
+ * maybe send after offchannel or rf sleep, this may cause
+ * dis-association by AP */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->intf_ops->flush)
+ rtlpriv->intf_ops->flush(hw, queues, drop);
+}
+#else
+static void rtl_op_flush(struct ieee80211_hw *hw, bool drop)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->intf_ops->flush)
+ rtlpriv->intf_ops->flush(hw, drop);
+}
+#endif
+
+const struct ieee80211_ops rtl_ops = {
+ .start = rtl_op_start,
+ .stop = rtl_op_stop,
+ .tx = rtl_op_tx,
+ .add_interface = rtl_op_add_interface,
+ .remove_interface = rtl_op_remove_interface,
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+/*<delete in kernel end>*/
+ .change_interface = rtl_op_change_interface,
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+ .config = rtl_op_config,
+ .configure_filter = rtl_op_configure_filter,
+ .set_key = rtl_op_set_key,
+ .conf_tx = rtl_op_conf_tx,
+ .bss_info_changed = rtl_op_bss_info_changed,
+ .get_tsf = rtl_op_get_tsf,
+ .set_tsf = rtl_op_set_tsf,
+ .reset_tsf = rtl_op_reset_tsf,
+ .sta_notify = rtl_op_sta_notify,
+ .ampdu_action = rtl_op_ampdu_action,
+ .sw_scan_start = rtl_op_sw_scan_start,
+ .sw_scan_complete = rtl_op_sw_scan_complete,
+ .rfkill_poll = rtl_op_rfkill_poll,
+ .sta_add = rtl_op_sta_add,
+ .sta_remove = rtl_op_sta_remove,
+ .flush = rtl_op_flush,
+};
diff --git a/drivers/staging/rtl8821ae/core.h b/drivers/staging/rtl8821ae/core.h
new file mode 100644
index 000000000000..4b247db2861d
--- /dev/null
+++ b/drivers/staging/rtl8821ae/core.h
@@ -0,0 +1,43 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * Tmis program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * Tmis program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * tmis program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Tme full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_CORE_H__
+#define __RTL_CORE_H__
+
+#define RTL_SUPPORTED_FILTERS \
+ (FIF_PROMISC_IN_BSS | \
+ FIF_ALLMULTI | FIF_CONTROL | \
+ FIF_OTHER_BSS | \
+ FIF_FCSFAIL | \
+ FIF_BCN_PRBRESP_PROMISC)
+
+#define RTL_SUPPORTED_CTRL_FILTER 0xFF
+
+extern const struct ieee80211_ops rtl_ops;
+#endif
diff --git a/drivers/staging/rtl8821ae/debug.c b/drivers/staging/rtl8821ae/debug.c
new file mode 100644
index 000000000000..cb051223c684
--- /dev/null
+++ b/drivers/staging/rtl8821ae/debug.c
@@ -0,0 +1,988 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * Tmis program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * Tmis program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * tmis program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Tme full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "cam.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+#define GET_INODE_DATA(__node) PDE_DATA(__node)
+#else
+#define GET_INODE_DATA(__node) PDE(__node)->data
+#endif
+
+
+void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 i;
+
+ rtlpriv->dbg.global_debuglevel = DBG_DMESG;
+
+ rtlpriv->dbg.global_debugcomponents =
+ COMP_ERR |
+ COMP_FW |
+ COMP_INIT |
+ COMP_RECV |
+ COMP_SEND |
+ COMP_MLME |
+ COMP_SCAN |
+ COMP_INTR |
+ COMP_LED |
+ COMP_SEC |
+ COMP_BEACON |
+ COMP_RATE |
+ COMP_RXDESC |
+ COMP_DIG |
+ COMP_TXAGC |
+ COMP_POWER |
+ COMP_POWER_TRACKING |
+ COMP_BB_POWERSAVING |
+ COMP_SWAS |
+ COMP_RF |
+ COMP_TURBO |
+ COMP_RATR |
+ COMP_CMD |
+ COMP_EASY_CONCURRENT |
+ COMP_EFUSE |
+ COMP_QOS | COMP_MAC80211 | COMP_REGD |
+ COMP_CHAN |
+ COMP_BT_COEXIST |
+ COMP_IQK |
+ 0;
+
+ for (i = 0; i < DBGP_TYPE_MAX; i++)
+ rtlpriv->dbg.dbgp_type[i] = 0;
+
+ /*Init Debug flag enable condition */
+}
+
+struct proc_dir_entry *proc_topdir;
+static int rtl_proc_get_mac_0(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i, n, page;
+ int max = 0xff;
+ page = 0x000;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_read_dword(rtlpriv, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_mac_0(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_mac_0, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_0 = {
+ .open = dl_proc_open_mac_0,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_mac_1(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i, n, page;
+ int max = 0xff;
+ page = 0x100;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_read_dword(rtlpriv, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_mac_1(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_mac_1, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_1 = {
+ .open = dl_proc_open_mac_1,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_mac_2(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i, n, page;
+ int max = 0xff;
+ page = 0x200;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_read_dword(rtlpriv, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_mac_2(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_mac_2, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_2 = {
+ .open = dl_proc_open_mac_2,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_mac_3(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i, n, page;
+ int max = 0xff;
+ page = 0x300;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_read_dword(rtlpriv, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_mac_3(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_mac_3, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_3 = {
+ .open = dl_proc_open_mac_3,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_mac_4(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i, n, page;
+ int max = 0xff;
+ page = 0x400;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_read_dword(rtlpriv, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_mac_4(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_mac_4, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_4 = {
+ .open = dl_proc_open_mac_4,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_mac_5(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i, n, page;
+ int max = 0xff;
+ page = 0x500;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_read_dword(rtlpriv, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_mac_5(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_mac_5, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_5 = {
+ .open = dl_proc_open_mac_5,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_mac_6(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i, n, page;
+ int max = 0xff;
+ page = 0x600;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_read_dword(rtlpriv, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_mac_6(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_mac_6, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_6 = {
+ .open = dl_proc_open_mac_6,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_mac_7(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i, n, page;
+ int max = 0xff;
+ page = 0x700;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_read_dword(rtlpriv, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_mac_7(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_mac_7, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_7 = {
+ .open = dl_proc_open_mac_7,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_bb_8(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n, page;
+ int max = 0xff;
+ page = 0x800;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_get_bbreg(hw, (page | n), 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_bb_8(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_bb_8, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_8 = {
+ .open = dl_proc_open_bb_8,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_bb_9(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n, page;
+ int max = 0xff;
+ page = 0x900;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_get_bbreg(hw, (page | n), 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_bb_9(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_bb_9, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_9 = {
+ .open = dl_proc_open_bb_9,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_bb_a(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n, page;
+ int max = 0xff;
+ page = 0xa00;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_get_bbreg(hw, (page | n), 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_bb_a(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_bb_a, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_a = {
+ .open = dl_proc_open_bb_a,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_bb_b(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n, page;
+ int max = 0xff;
+ page = 0xb00;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_get_bbreg(hw, (page | n), 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_bb_b(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_bb_b, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_b = {
+ .open = dl_proc_open_bb_b,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_bb_c(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n, page;
+ int max = 0xff;
+ page = 0xc00;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_get_bbreg(hw, (page | n), 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_bb_c(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_bb_c, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_c = {
+ .open = dl_proc_open_bb_c,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_bb_d(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n, page;
+ int max = 0xff;
+ page = 0xd00;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_get_bbreg(hw, (page | n), 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_bb_d(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_bb_d, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_d = {
+ .open = dl_proc_open_bb_d,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_bb_e(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n, page;
+ int max = 0xff;
+ page = 0xe00;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_get_bbreg(hw, (page | n), 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_bb_e(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_bb_e, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_e = {
+ .open = dl_proc_open_bb_e,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_bb_f(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n, page;
+ int max = 0xff;
+ page = 0xf00;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_get_bbreg(hw, (page | n), 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_bb_f(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_bb_f, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_f = {
+ .open = dl_proc_open_bb_f,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_reg_rf_a(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n;
+ int max = 0x40;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n);
+ for (i = 0; i < 4 && n <= max; n += 1, i++)
+ seq_printf(m, "%8.8x ",
+ rtl_get_rfreg(hw, RF90_PATH_A, n, 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_rf_a(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_reg_rf_a, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_rf_a = {
+ .open = dl_proc_open_rf_a,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_reg_rf_b(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n;
+ int max = 0x40;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n);
+ for (i = 0; i < 4 && n <= max; n += 1, i++)
+ seq_printf(m, "%8.8x ",
+ rtl_get_rfreg(hw, RF90_PATH_B, n,
+ 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_rf_b(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_reg_rf_b, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_rf_b = {
+ .open = dl_proc_open_rf_b,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_cam_register_1(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 target_cmd = 0;
+ u32 target_val=0;
+ u8 entry_i=0;
+ u32 ulstatus;
+ int i = 100, j = 0;
+
+ /* This dump the current register page */
+ seq_puts(m,
+ "\n#################### SECURITY CAM (0-10) ##################\n ");
+
+ for (j = 0; j < 11; j++) {
+ seq_printf(m, "\nD: %2x > ", j);
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+ /* polling bit, and No Write enable, and address */
+ target_cmd = entry_i + CAM_CONTENT_COUNT * j;
+ target_cmd = target_cmd | BIT(31);
+
+ /* Check polling bit is clear */
+ while ((i--) >= 0) {
+ ulstatus = rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[RWCAM]);
+ if (ulstatus & BIT(31)) {
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+ target_cmd);
+ target_val = rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[RCAMO]);
+ seq_printf(m, "%8.8x ", target_val);
+ }
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_cam_1(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_cam_register_1,
+ GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_cam_1 = {
+ .open = dl_proc_open_cam_1,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_cam_register_2(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 target_cmd = 0;
+ u32 target_val = 0;
+ u8 entry_i = 0;
+ u32 ulstatus;
+ int i = 100, j = 0;
+
+ /* This dump the current register page */
+ seq_puts(m,
+ "\n################### SECURITY CAM (11-21) ##################\n ");
+
+ for (j = 11; j < 22; j++) {
+ seq_printf(m, "\nD: %2x > ", j);
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+ target_cmd = entry_i + CAM_CONTENT_COUNT * j;
+ target_cmd = target_cmd | BIT(31);
+
+ while ((i--) >= 0) {
+ ulstatus = rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[RWCAM]);
+ if (ulstatus & BIT(31)) {
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+ target_cmd);
+ target_val = rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[RCAMO]);
+ seq_printf(m, "%8.8x ", target_val);
+ }
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_cam_2(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_cam_register_2,
+ GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_cam_2 = {
+ .open = dl_proc_open_cam_2,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_cam_register_3(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 target_cmd = 0;
+ u32 target_val = 0;
+ u8 entry_i = 0;
+ u32 ulstatus;
+ int i = 100, j = 0;
+
+ /* This dump the current register page */
+ seq_puts(m,
+ "\n################### SECURITY CAM (22-31) ##################\n ");
+
+ for (j = 22; j < TOTAL_CAM_ENTRY; j++) {
+ seq_printf(m, "\nD: %2x > ", j);
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+ target_cmd = entry_i+CAM_CONTENT_COUNT*j;
+ target_cmd = target_cmd | BIT(31);
+
+ while ((i--) >= 0) {
+ ulstatus = rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[RWCAM]);
+ if (ulstatus & BIT(31)) {
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+ target_cmd);
+ target_val = rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[RCAMO]);
+ seq_printf(m, "%8.8x ", target_val);
+ }
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_cam_3(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_cam_register_3,
+ GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_cam_3 = {
+ .open = dl_proc_open_cam_3,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+void rtl_proc_add_one(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct proc_dir_entry *entry;
+
+ snprintf(rtlpriv->dbg.proc_name, 18, "%x-%x-%x-%x-%x-%x",
+ rtlefuse->dev_addr[0], rtlefuse->dev_addr[1],
+ rtlefuse->dev_addr[2], rtlefuse->dev_addr[3],
+ rtlefuse->dev_addr[4], rtlefuse->dev_addr[5]);
+
+ rtlpriv->dbg.proc_dir = proc_mkdir(rtlpriv->dbg.proc_name, proc_topdir);
+ if (!rtlpriv->dbg.proc_dir) {
+ RT_TRACE(COMP_INIT, DBG_EMERG, ("Unable to init "
+ "/proc/net/%s/%s\n", rtlpriv->cfg->name,
+ rtlpriv->dbg.proc_name));
+ return;
+ }
+
+ entry = proc_create_data("mac-0", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_mac_0, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, DBG_EMERG,
+ ("Unable to initialize /proc/net/%s/%s/mac-0\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("mac-1", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_mac_1, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/mac-1\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("mac-2", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_mac_2, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/mac-2\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("mac-3", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_mac_3, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/mac-3\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("mac-4", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_mac_4, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/mac-4\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("mac-5", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_mac_5, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/mac-5\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("mac-6", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_mac_6, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/mac-6\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("mac-7", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_mac_7, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/mac-7\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("bb-8", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_bb_8, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/bb-8\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("bb-9", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_bb_9, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/bb-9\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("bb-a", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_bb_a, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/bb-a\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("bb-b", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_bb_b, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/bb-b\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("bb-c", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_bb_c, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/bb-c\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("bb-d", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_bb_d, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/bb-d\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("bb-e", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_bb_e, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/bb-e\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("bb-f", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_bb_f, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/bb-f\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("rf-a", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_rf_a, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/rf-a\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("rf-b", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_rf_b, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/rf-b\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("cam-1", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_cam_1, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/cam-1\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("cam-2", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_cam_2, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/cam-2\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("cam-3", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_cam_3, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/cam-3\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+}
+
+void rtl_proc_remove_one(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->dbg.proc_dir) {
+ remove_proc_entry("mac-0", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("mac-1", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("mac-2", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("mac-3", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("mac-4", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("mac-5", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("mac-6", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("mac-7", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("bb-8", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("bb-9", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("bb-a", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("bb-b", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("bb-c", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("bb-d", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("bb-e", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("bb-f", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("rf-a", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("rf-b", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("cam-1", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("cam-2", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("cam-3", rtlpriv->dbg.proc_dir);
+
+ remove_proc_entry(rtlpriv->dbg.proc_name, proc_topdir);
+
+ rtlpriv->dbg.proc_dir = NULL;
+ }
+}
+
+void rtl_proc_add_topdir(void)
+{
+ proc_topdir = proc_mkdir("rtlwifi", init_net.proc_net);
+}
+
+void rtl_proc_remove_topdir(void)
+{
+ if (proc_topdir)
+ remove_proc_entry("rtlwifi", init_net.proc_net);
+} \ No newline at end of file
diff --git a/drivers/staging/rtl8821ae/debug.h b/drivers/staging/rtl8821ae/debug.h
new file mode 100644
index 000000000000..5eb6251b89da
--- /dev/null
+++ b/drivers/staging/rtl8821ae/debug.h
@@ -0,0 +1,227 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * Tmis program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * Tmis program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * tmis program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Tme full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_DEBUG_H__
+#define __RTL_DEBUG_H__
+
+/*--------------------------------------------------------------
+ Debug level
+--------------------------------------------------------------*/
+/*
+ *Fatal bug.
+ *For example, Tx/Rx/IO locked up,
+ *memory access violation,
+ *resource allocation failed,
+ *unexpected HW behavior, HW BUG
+ *and so on.
+ */
+#define DBG_EMERG 0
+
+/*
+ *Abnormal, rare, or unexpeted cases.
+ *For example, Packet/IO Ctl canceled,
+ *device suprisely unremoved and so on.
+ */
+#define DBG_WARNING 2
+
+/*
+ *Normal case driver developer should
+ *open, we can see link status like
+ *assoc/AddBA/DHCP/adapter start and
+ *so on basic and useful infromations.
+ */
+#define DBG_DMESG 3
+
+/*
+ *Normal case with useful information
+ *about current SW or HW state.
+ *For example, Tx/Rx descriptor to fill,
+ *Tx/Rx descriptor completed status,
+ *SW protocol state change, dynamic
+ *mechanism state change and so on.
+ */
+#define DBG_LOUD 4
+
+/*
+ *Normal case with detail execution
+ *flow or information.
+ */
+#define DBG_TRACE 5
+
+/*--------------------------------------------------------------
+ Define the rt_trace components
+--------------------------------------------------------------*/
+#define COMP_ERR BIT(0)
+#define COMP_FW BIT(1)
+#define COMP_INIT BIT(2) /*For init/deinit */
+#define COMP_RECV BIT(3) /*For Rx. */
+#define COMP_SEND BIT(4) /*For Tx. */
+#define COMP_MLME BIT(5) /*For MLME. */
+#define COMP_SCAN BIT(6) /*For Scan. */
+#define COMP_INTR BIT(7) /*For interrupt Related. */
+#define COMP_LED BIT(8) /*For LED. */
+#define COMP_SEC BIT(9) /*For sec. */
+#define COMP_BEACON BIT(10) /*For beacon. */
+#define COMP_RATE BIT(11) /*For rate. */
+#define COMP_RXDESC BIT(12) /*For rx desc. */
+#define COMP_DIG BIT(13) /*For DIG */
+#define COMP_TXAGC BIT(14) /*For Tx power */
+#define COMP_HIPWR BIT(15) /*For High Power Mechanism */
+#define COMP_POWER BIT(16) /*For lps/ips/aspm. */
+#define COMP_POWER_TRACKING BIT(17) /*For TX POWER TRACKING */
+#define COMP_BB_POWERSAVING BIT(18)
+#define COMP_SWAS BIT(19) /*For SW Antenna Switch */
+#define COMP_RF BIT(20) /*For RF. */
+#define COMP_TURBO BIT(21) /*For EDCA TURBO. */
+#define COMP_RATR BIT(22)
+#define COMP_CMD BIT(23)
+#define COMP_EFUSE BIT(24)
+#define COMP_QOS BIT(25)
+#define COMP_MAC80211 BIT(26)
+#define COMP_REGD BIT(27)
+#define COMP_CHAN BIT(28)
+#define COMP_EASY_CONCURRENT BIT(29)
+#define COMP_BT_COEXIST BIT(30)
+#define COMP_IQK BIT(31)
+
+/*--------------------------------------------------------------
+ Define the rt_print components
+--------------------------------------------------------------*/
+/* Define EEPROM and EFUSE check module bit*/
+#define EEPROM_W BIT(0)
+#define EFUSE_PG BIT(1)
+#define EFUSE_READ_ALL BIT(2)
+
+/* Define init check for module bit*/
+#define INIT_EEPROM BIT(0)
+#define INIT_TxPower BIT(1)
+#define INIT_IQK BIT(2)
+#define INIT_RF BIT(3)
+
+/* Define PHY-BB/RF/MAC check module bit */
+#define PHY_BBR BIT(0)
+#define PHY_BBW BIT(1)
+#define PHY_RFR BIT(2)
+#define PHY_RFW BIT(3)
+#define PHY_MACR BIT(4)
+#define PHY_MACW BIT(5)
+#define PHY_ALLR BIT(6)
+#define PHY_ALLW BIT(7)
+#define PHY_TXPWR BIT(8)
+#define PHY_PWRDIFF BIT(9)
+
+/* Define Dynamic Mechanism check module bit --> FDM */
+#define WA_IOT BIT(0)
+#define DM_PWDB BIT(1)
+#define DM_MONITOR BIT(2)
+#define DM_DIG BIT(3)
+#define DM_EDCA_TURBO BIT(4)
+
+enum dbgp_flag_e {
+ FQOS = 0,
+ FTX = 1,
+ FRX = 2,
+ FSEC = 3,
+ FMGNT = 4,
+ FMLME = 5,
+ FRESOURCE = 6,
+ FBEACON = 7,
+ FISR = 8,
+ FPHY = 9,
+ FMP = 10,
+ FEEPROM = 11,
+ FPWR = 12,
+ FDM = 13,
+ FDBGCtrl = 14,
+ FC2H = 15,
+ FBT = 16,
+ FINIT = 17,
+ FIOCTL = 18,
+ DBGP_TYPE_MAX
+};
+
+#define RT_ASSERT(_exp,fmt) \
+ do { \
+ if(!(_exp)) { \
+ printk(KERN_DEBUG "%s:%s(): ", KBUILD_MODNAME, \
+ __func__); \
+ printk fmt; \
+ } \
+ } while(0);
+
+#define RT_DISP(dbgtype, dbgflag, printstr)
+
+#define RT_TRACE(comp, level, fmt)\
+ do { \
+ if(unlikely(((comp) & rtlpriv->dbg.global_debugcomponents) && \
+ ((level) <= rtlpriv->dbg.global_debuglevel))) {\
+ printk(KERN_DEBUG "%s-%d:%s():<%lx-%x> ", \
+ KBUILD_MODNAME, \
+ rtlpriv->rtlhal.interfaceindex, __func__, \
+ in_interrupt(), in_atomic()); \
+ printk fmt; \
+ }\
+ } while(0);
+
+#define RTPRINT(rtlpriv, dbgtype, dbgflag, printstr) \
+ do { \
+ if (unlikely(rtlpriv->dbg.dbgp_type[dbgtype] & dbgflag)) { \
+ printk(KERN_DEBUG "%s: ", KBUILD_MODNAME); \
+ printk printstr; \
+ } \
+ } while(0);
+
+#define RT_PRINT_DATA(rtlpriv, _comp, _level, _titlestring, _hexdata, \
+ _hexdatalen) \
+ do {\
+ if(unlikely(((_comp) & rtlpriv->dbg.global_debugcomponents ) &&\
+ (_level <= rtlpriv->dbg.global_debuglevel ))) { \
+ int __i; \
+ u8* ptr = (u8*)_hexdata; \
+ printk(KERN_DEBUG "%s: ", KBUILD_MODNAME); \
+ printk(KERN_DEBUG "In process \"%s\" (pid %i):", \
+ current->comm, \
+ current->pid); \
+ printk(_titlestring); \
+ for( __i=0; __i<(int)_hexdatalen; __i++ ) { \
+ printk("%02X%s", ptr[__i], (((__i + 1) % 4) \
+ == 0)?" ":" ");\
+ if (((__i + 1) % 16) == 0) \
+ printk("\n"); \
+ } \
+ printk(KERN_DEBUG "\n"); \
+ } \
+ } while(0);
+
+void rtl_dbgp_flag_init(struct ieee80211_hw *hw);
+void rtl_proc_add_one(struct ieee80211_hw *hw);
+void rtl_proc_remove_one(struct ieee80211_hw *hw);
+void rtl_proc_add_topdir(void);
+void rtl_proc_remove_topdir(void);
+#endif
diff --git a/drivers/staging/rtl8821ae/efuse.c b/drivers/staging/rtl8821ae/efuse.c
new file mode 100644
index 000000000000..74c19ecc95a9
--- /dev/null
+++ b/drivers/staging/rtl8821ae/efuse.c
@@ -0,0 +1,1285 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * Tmis program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * Tmis program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * tmis program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Tme full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#include "wifi.h"
+#include "efuse.h"
+#include "btcoexist/halbt_precomp.h"
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+#include <linux/export.h>
+#endif
+
+static const u8 MAX_PGPKT_SIZE = 9;
+static const u8 PGPKT_DATA_SIZE = 8;
+static const int EFUSE_MAX_SIZE = 512;
+
+static const struct efuse_map RTL8712_SDIO_EFUSE_TABLE[] = {
+ {0, 0, 0, 2},
+ {0, 1, 0, 2},
+ {0, 2, 0, 2},
+ {1, 0, 0, 1},
+ {1, 0, 1, 1},
+ {1, 1, 0, 1},
+ {1, 1, 1, 3},
+ {1, 3, 0, 17},
+ {3, 3, 1, 48},
+ {10, 0, 0, 6},
+ {10, 3, 0, 1},
+ {10, 3, 1, 1},
+ {11, 0, 0, 28}
+};
+
+static void efuse_shadow_read_1byte(struct ieee80211_hw *hw, u16 offset,
+ u8 * value);
+static void efuse_shadow_read_2byte(struct ieee80211_hw *hw, u16 offset,
+ u16 * value);
+static void efuse_shadow_read_4byte(struct ieee80211_hw *hw, u16 offset,
+ u32 * value);
+static void efuse_shadow_write_1byte(struct ieee80211_hw *hw, u16 offset,
+ u8 value);
+static void efuse_shadow_write_2byte(struct ieee80211_hw *hw, u16 offset,
+ u16 value);
+static void efuse_shadow_write_4byte(struct ieee80211_hw *hw, u16 offset,
+ u32 value);
+static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr,
+ u8 data);
+static void efuse_read_all_map(struct ieee80211_hw *hw, u8 * efuse);
+static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset,
+ u8 *data);
+static int efuse_pg_packet_write(struct ieee80211_hw *hw, u8 offset,
+ u8 word_en, u8 * data);
+static void efuse_word_enable_data_read(u8 word_en, u8 * sourdata,
+ u8 * targetdata);
+static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
+ u16 efuse_addr, u8 word_en, u8 * data);
+static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite,
+ u8 pwrstate);
+static u16 efuse_get_current_size(struct ieee80211_hw *hw);
+static u8 efuse_calculate_word_cnts(u8 word_en);
+
+void efuse_initialize(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 bytetemp;
+ u8 temp;
+
+ bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN] + 1);
+ temp = bytetemp | 0x20;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN] + 1, temp);
+
+ bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[SYS_ISO_CTRL] + 1);
+ temp = bytetemp & 0xFE;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_ISO_CTRL] + 1, temp);
+
+ bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3);
+ temp = bytetemp | 0x80;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3, temp);
+
+ rtl_write_byte(rtlpriv, 0x2F8, 0x3);
+
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0x72);
+
+}
+
+u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 data;
+ u8 bytetemp;
+ u8 temp;
+ u32 k = 0;
+ const u32 efuse_real_content_len =
+ rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
+
+ if (address < efuse_real_content_len) {
+ temp = address & 0xFF;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
+ temp);
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
+ temp = ((address >> 8) & 0x03) | (bytetemp & 0xFC);
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
+ temp);
+
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+ temp = bytetemp & 0x7F;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3,
+ temp);
+
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+ while (!(bytetemp & 0x80)) {
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->
+ maps[EFUSE_CTRL] + 3);
+ k++;
+ if (k == 1000) {
+ k = 0;
+ break;
+ }
+ }
+ data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
+ return data;
+ } else
+ return 0xFF;
+
+}
+//EXPORT_SYMBOL(efuse_read_1byte);
+
+void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 bytetemp;
+ u8 temp;
+ u32 k = 0;
+ const u32 efuse_real_content_len =
+ rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
+
+ RT_TRACE(COMP_EFUSE, DBG_LOUD,
+ ("Addr=%x Data =%x\n", address, value));
+
+ if (address < efuse_real_content_len) {
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], value);
+
+ temp = address & 0xFF;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
+ temp);
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
+
+ temp = ((address >> 8) & 0x03) | (bytetemp & 0xFC);
+ rtl_write_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 2, temp);
+
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+ temp = bytetemp | 0x80;
+ rtl_write_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3, temp);
+
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+
+ while (bytetemp & 0x80) {
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->
+ maps[EFUSE_CTRL] + 3);
+ k++;
+ if (k == 100) {
+ k = 0;
+ break;
+ }
+ }
+ }
+
+}
+
+void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 value32;
+ u8 readbyte;
+ u16 retry;
+
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
+ (_offset & 0xff));
+ readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
+ ((_offset >> 8) & 0x03) | (readbyte & 0xfc));
+
+ readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3,
+ (readbyte & 0x7f));
+
+ retry = 0;
+ value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
+ while (!(((value32 >> 24) & 0xff) & 0x80) && (retry < 10000)) {
+ value32 = rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL]);
+ retry++;
+ }
+
+ udelay(50);
+ value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
+
+ *pbuf = (u8) (value32 & 0xff);
+}
+
+void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 efuse_tbl[rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]];
+ u8 rtemp8[1];
+ u16 efuse_addr = 0;
+ u8 offset, wren;
+ u8 u1temp = 0;
+ u16 i;
+ u16 j;
+ const u16 efuse_max_section =
+ rtlpriv->cfg->maps[EFUSE_MAX_SECTION_MAP];
+ const u32 efuse_real_content_len =
+ rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
+ u16 efuse_word[efuse_max_section][EFUSE_MAX_WORD_UNIT];
+ u16 efuse_utilized = 0;
+ u8 efuse_usage;
+
+ if ((_offset + _size_byte) > rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]) {
+ RT_TRACE(COMP_EFUSE, DBG_LOUD,
+ ("read_efuse(): Invalid offset(%#x) with read "
+ "bytes(%#x)!!\n", _offset, _size_byte));
+ return;
+ }
+
+ for (i = 0; i < efuse_max_section; i++)
+ for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++)
+ efuse_word[i][j] = 0xFFFF;
+
+ read_efuse_byte(hw, efuse_addr, rtemp8);
+ if (*rtemp8 != 0xFF) {
+ efuse_utilized++;
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
+ ("Addr=%d\n", efuse_addr));
+ efuse_addr++;
+ }
+
+ while ((*rtemp8 != 0xFF) && (efuse_addr < efuse_real_content_len)) {
+ /* Check PG header for section num. */
+ if((*rtemp8 & 0x1F ) == 0x0F) {/* extended header */
+ u1temp =( (*rtemp8 & 0xE0) >> 5);
+ read_efuse_byte(hw, efuse_addr, rtemp8);
+
+ if((*rtemp8 & 0x0F) == 0x0F) {
+ efuse_addr++;
+ read_efuse_byte(hw, efuse_addr, rtemp8);
+
+ if (*rtemp8 != 0xFF &&
+ (efuse_addr < efuse_real_content_len)) {
+ efuse_addr++;
+ }
+ continue;
+ } else {
+ offset = ((*rtemp8 & 0xF0) >> 1) | u1temp;
+ wren = (*rtemp8 & 0x0F);
+ efuse_addr++;
+ }
+ } else {
+ offset = ((*rtemp8 >> 4) & 0x0f);
+ wren = (*rtemp8 & 0x0f);
+ }
+
+ if (offset < efuse_max_section) {
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
+ ("offset-%d Worden=%x\n", offset, wren));
+
+ for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+ if (!(wren & 0x01)) {
+ RTPRINT(rtlpriv, FEEPROM,
+ EFUSE_READ_ALL, ("Addr=%d\n",
+ efuse_addr));
+
+ read_efuse_byte(hw, efuse_addr, rtemp8);
+ efuse_addr++;
+ efuse_utilized++;
+ efuse_word[offset][i] = (*rtemp8 &
+ 0xff);
+
+ if (efuse_addr >=
+ efuse_real_content_len)
+ break;
+
+ RTPRINT(rtlpriv, FEEPROM,
+ EFUSE_READ_ALL, ("Addr=%d\n",
+ efuse_addr));
+
+ read_efuse_byte(hw, efuse_addr, rtemp8);
+ efuse_addr++;
+ efuse_utilized++;
+ efuse_word[offset][i] |=
+ (((u16) * rtemp8 << 8) & 0xff00);
+
+ if (efuse_addr >= efuse_real_content_len)
+ break;
+ }
+
+ wren >>= 1;
+ }
+ }
+
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
+ ("Addr=%d\n", efuse_addr));
+ read_efuse_byte(hw, efuse_addr, rtemp8);
+ if (*rtemp8 != 0xFF && (efuse_addr < efuse_real_content_len)) {
+ efuse_utilized++;
+ efuse_addr++;
+ }
+ }
+
+ for (i = 0; i < efuse_max_section; i++) {
+ for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) {
+ efuse_tbl[(i * 8) + (j * 2)] =
+ (efuse_word[i][j] & 0xff);
+ efuse_tbl[(i * 8) + ((j * 2) + 1)] =
+ ((efuse_word[i][j] >> 8) & 0xff);
+ }
+ }
+
+ for (i = 0; i < _size_byte; i++)
+ pbuf[i] = efuse_tbl[_offset + i];
+
+ rtlefuse->efuse_usedbytes = efuse_utilized;
+ efuse_usage = (u8) ((efuse_utilized * 100) / efuse_real_content_len);
+ rtlefuse->efuse_usedpercentage = efuse_usage;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_BYTES,
+ (u8 *) & efuse_utilized);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_USAGE,
+ (u8 *) & efuse_usage);
+}
+
+bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 section_idx, i, Base;
+ u16 words_need = 0, hdr_num = 0, totalbytes, efuse_used;
+ bool bwordchanged, bresult = true;
+
+ for (section_idx = 0; section_idx < 16; section_idx++) {
+ Base = section_idx * 8;
+ bwordchanged = false;
+
+ for (i = 0; i < 8; i = i + 2) {
+ if ((rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i] !=
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i]) ||
+ (rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i + 1] !=
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i +
+ 1])) {
+ words_need++;
+ bwordchanged = true;
+ }
+ }
+
+ if (bwordchanged == true)
+ hdr_num++;
+ }
+
+ totalbytes = hdr_num + words_need * 2;
+ efuse_used = rtlefuse->efuse_usedbytes;
+
+ if ((totalbytes + efuse_used) >= (EFUSE_MAX_SIZE -
+ rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))
+ bresult = false;
+
+ RT_TRACE(COMP_EFUSE, DBG_LOUD,
+ ("efuse_shadow_update_chk(): totalbytes(%#x), "
+ "hdr_num(%#x), words_need(%#x), efuse_used(%d)\n",
+ totalbytes, hdr_num, words_need, efuse_used));
+
+ return bresult;
+}
+
+void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
+ u16 offset, u32 *value)
+{
+ if (type == 1)
+ efuse_shadow_read_1byte(hw, offset, (u8 *) value);
+ else if (type == 2)
+ efuse_shadow_read_2byte(hw, offset, (u16 *) value);
+ else if (type == 4)
+ efuse_shadow_read_4byte(hw, offset, (u32 *) value);
+
+}
+
+void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset,
+ u32 value)
+{
+ if (type == 1)
+ efuse_shadow_write_1byte(hw, offset, (u8) value);
+ else if (type == 2)
+ efuse_shadow_write_2byte(hw, offset, (u16) value);
+ else if (type == 4)
+ efuse_shadow_write_4byte(hw, offset, (u32) value);
+
+}
+
+bool efuse_shadow_update(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u16 i, offset, base;
+ u8 word_en = 0x0F;
+ u8 first_pg = false;
+
+ RT_TRACE(COMP_EFUSE, DBG_LOUD, ("\n"));
+
+ if (!efuse_shadow_update_chk(hw)) {
+ efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
+ memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
+ &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
+
+ RT_TRACE(COMP_EFUSE, DBG_LOUD,
+ ("efuse out of capacity!!\n"));
+ return false;
+ }
+ efuse_power_switch(hw, true, true);
+
+ for (offset = 0; offset < 16; offset++) {
+
+ word_en = 0x0F;
+ base = offset * 8;
+
+ for (i = 0; i < 8; i++) {
+ if (first_pg == true) {
+
+ word_en &= ~(BIT(i / 2));
+
+ rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] =
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i];
+ } else {
+
+ if (rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] !=
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i]) {
+ word_en &= ~(BIT(i / 2));
+
+ rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] =
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i];
+ }
+ }
+ }
+
+ if (word_en != 0x0F) {
+ u8 tmpdata[8];
+ memcpy(tmpdata, (&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base]), 8);
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("U-efuse\n"), tmpdata, 8);
+
+ if (!efuse_pg_packet_write(hw, (u8) offset, word_en,
+ tmpdata)) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("PG section(%#x) fail!!\n", offset));
+ break;
+ }
+ }
+
+ }
+
+ efuse_power_switch(hw, true, false);
+ efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
+
+ memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
+ &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
+
+ RT_TRACE(COMP_EFUSE, DBG_LOUD, ("\n"));
+ return true;
+}
+
+void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ if (rtlefuse->autoload_failflag == true) {
+ memset((&rtlefuse->efuse_map[EFUSE_INIT_MAP][0]),
+ 0xFF, rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
+ } else {
+ efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
+ }
+
+ memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
+ &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
+
+}
+//EXPORT_SYMBOL(rtl_efuse_shadow_map_update);
+
+void efuse_force_write_vendor_Id(struct ieee80211_hw *hw)
+{
+ u8 tmpdata[8] = { 0xFF, 0xFF, 0xEC, 0x10, 0xFF, 0xFF, 0xFF, 0xFF };
+
+ efuse_power_switch(hw, true, true);
+
+ efuse_pg_packet_write(hw, 1, 0xD, tmpdata);
+
+ efuse_power_switch(hw, true, false);
+
+}
+
+void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx)
+{
+}
+
+static void efuse_shadow_read_1byte(struct ieee80211_hw *hw,
+ u16 offset, u8 *value)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ *value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
+}
+
+static void efuse_shadow_read_2byte(struct ieee80211_hw *hw,
+ u16 offset, u16 *value)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ *value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
+ *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] << 8;
+
+}
+
+static void efuse_shadow_read_4byte(struct ieee80211_hw *hw,
+ u16 offset, u32 *value)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ *value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
+ *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] << 8;
+ *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 2] << 16;
+ *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 3] << 24;
+}
+
+static void efuse_shadow_write_1byte(struct ieee80211_hw *hw,
+ u16 offset, u8 value)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] = value;
+}
+
+static void efuse_shadow_write_2byte(struct ieee80211_hw *hw,
+ u16 offset, u16 value)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] = value & 0x00FF;
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] = value >> 8;
+
+}
+
+static void efuse_shadow_write_4byte(struct ieee80211_hw *hw,
+ u16 offset, u32 value)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] =
+ (u8) (value & 0x000000FF);
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] =
+ (u8) ((value >> 8) & 0x0000FF);
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 2] =
+ (u8) ((value >> 16) & 0x00FF);
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 3] =
+ (u8) ((value >> 24) & 0xFF);
+
+}
+
+int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmpidx = 0;
+ int bresult;
+
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
+ (u8) (addr & 0xff));
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
+ ((u8) ((addr >> 8) & 0x03)) |
+ (rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 2) &
+ 0xFC));
+
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0x72);
+
+ while (!(0x80 & rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3))
+ && (tmpidx < 100)) {
+ tmpidx++;
+ }
+
+ if (tmpidx < 100) {
+ *data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
+ bresult = true;
+ } else {
+ *data = 0xff;
+ bresult = false;
+ }
+ return bresult;
+}
+//EXPORT_SYMBOL(efuse_one_byte_read);
+
+static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmpidx = 0;
+ bool bresult;
+
+ RT_TRACE(COMP_EFUSE, DBG_LOUD,
+ ("Addr = %x Data=%x\n", addr, data));
+
+ rtl_write_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 1, (u8) (addr & 0xff));
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
+ (rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] +
+ 2) & 0xFC) | (u8) ((addr >> 8) & 0x03));
+
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], data);
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0xF2);
+
+ while ((0x80 & rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3))
+ && (tmpidx < 100)) {
+ tmpidx++;
+ }
+
+ if (tmpidx < 100)
+ bresult = true;
+ else
+ bresult = false;
+
+ return bresult;
+}
+
+static void efuse_read_all_map(struct ieee80211_hw *hw, u8 * efuse)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ efuse_power_switch(hw, false, true);
+ read_efuse(hw, 0, rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE], efuse);
+ efuse_power_switch(hw, false, false);
+}
+
+static void efuse_read_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
+ u8 efuse_data, u8 offset, u8 *tmpdata,
+ u8 *readstate)
+{
+ bool bdataempty = true;
+ u8 hoffset;
+ u8 tmpidx;
+ u8 hworden;
+ u8 word_cnts;
+
+ hoffset = (efuse_data >> 4) & 0x0F;
+ hworden = efuse_data & 0x0F;
+ word_cnts = efuse_calculate_word_cnts(hworden);
+
+ if (hoffset == offset) {
+ for (tmpidx = 0; tmpidx < word_cnts * 2; tmpidx++) {
+ if (efuse_one_byte_read(hw, *efuse_addr + 1 + tmpidx,
+ &efuse_data)) {
+ tmpdata[tmpidx] = efuse_data;
+ if (efuse_data != 0xff)
+ bdataempty = true;
+ }
+ }
+
+ if (bdataempty == true) {
+ *readstate = PG_STATE_DATA;
+ } else {
+ *efuse_addr = *efuse_addr + (word_cnts * 2) + 1;
+ *readstate = PG_STATE_HEADER;
+ }
+
+ } else {
+ *efuse_addr = *efuse_addr + (word_cnts * 2) + 1;
+ *readstate = PG_STATE_HEADER;
+ }
+}
+
+static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
+{
+ u8 readstate = PG_STATE_HEADER;
+
+ bool bcontinual = true;
+
+ u8 efuse_data, word_cnts = 0;
+ u16 efuse_addr = 0;
+ u8 hworden = 0;
+ u8 tmpdata[8];
+
+ if (data == NULL)
+ return false;
+ if (offset > 15)
+ return false;
+
+ memset(data, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
+ memset(tmpdata, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
+
+ while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) {
+ if (readstate & PG_STATE_HEADER) {
+ if (efuse_one_byte_read(hw, efuse_addr, &efuse_data)
+ && (efuse_data != 0xFF))
+ efuse_read_data_case1(hw, &efuse_addr, efuse_data, offset,
+ tmpdata, &readstate);
+ else
+ bcontinual = false;
+ } else if (readstate & PG_STATE_DATA) {
+ efuse_word_enable_data_read(hworden, tmpdata, data);
+ efuse_addr = efuse_addr + (word_cnts * 2) + 1;
+ readstate = PG_STATE_HEADER;
+ }
+
+ }
+
+ if ((data[0] == 0xff) && (data[1] == 0xff) &&
+ (data[2] == 0xff) && (data[3] == 0xff) &&
+ (data[4] == 0xff) && (data[5] == 0xff) &&
+ (data[6] == 0xff) && (data[7] == 0xff))
+ return false;
+ else
+ return true;
+
+}
+
+static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
+ u8 efuse_data, u8 offset, int *bcontinual,
+ u8 *write_state, struct pgpkt_struct *target_pkt,
+ int *repeat_times, int *bresult, u8 word_en)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct pgpkt_struct tmp_pkt;
+ int bdataempty = true;
+ u8 originaldata[8 * sizeof(u8)];
+ u8 badworden = 0x0F;
+ u8 match_word_en, tmp_word_en;
+ u8 tmpindex;
+ u8 tmp_header = efuse_data;
+ u8 tmp_word_cnts;
+
+ tmp_pkt.offset = (tmp_header >> 4) & 0x0F;
+ tmp_pkt.word_en = tmp_header & 0x0F;
+ tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
+
+ if (tmp_pkt.offset != target_pkt->offset) {
+ *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
+ *write_state = PG_STATE_HEADER;
+ } else {
+ for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) {
+ if (efuse_one_byte_read(hw,
+ (*efuse_addr + 1 + tmpindex),
+ &efuse_data) && (efuse_data != 0xFF))
+ bdataempty = false;
+ }
+
+ if (bdataempty == false) {
+ *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
+ *write_state = PG_STATE_HEADER;
+ } else {
+ match_word_en = 0x0F;
+ if (!((target_pkt->word_en & BIT(0)) |
+ (tmp_pkt.word_en & BIT(0))))
+ match_word_en &= (~BIT(0));
+
+ if (!((target_pkt->word_en & BIT(1)) |
+ (tmp_pkt.word_en & BIT(1))))
+ match_word_en &= (~BIT(1));
+
+ if (!((target_pkt->word_en & BIT(2)) |
+ (tmp_pkt.word_en & BIT(2))))
+ match_word_en &= (~BIT(2));
+
+ if (!((target_pkt->word_en & BIT(3)) |
+ (tmp_pkt.word_en & BIT(3))))
+ match_word_en &= (~BIT(3));
+
+ if ((match_word_en & 0x0F) != 0x0F) {
+ badworden = efuse_word_enable_data_write(hw,
+ *efuse_addr + 1,
+ tmp_pkt.word_en,
+ target_pkt->data);
+
+ if (0x0F != (badworden & 0x0F)) {
+ u8 reorg_offset = offset;
+ u8 reorg_worden = badworden;
+ efuse_pg_packet_write(hw, reorg_offset,
+ reorg_worden,
+ originaldata);
+ }
+
+ tmp_word_en = 0x0F;
+ if ((target_pkt->word_en & BIT(0)) ^
+ (match_word_en & BIT(0)))
+ tmp_word_en &= (~BIT(0));
+
+ if ((target_pkt->word_en & BIT(1)) ^
+ (match_word_en & BIT(1)))
+ tmp_word_en &= (~BIT(1));
+
+ if ((target_pkt->word_en & BIT(2)) ^
+ (match_word_en & BIT(2)))
+ tmp_word_en &= (~BIT(2));
+
+ if ((target_pkt->word_en & BIT(3)) ^
+ (match_word_en & BIT(3)))
+ tmp_word_en &= (~BIT(3));
+
+ if ((tmp_word_en & 0x0F) != 0x0F) {
+ *efuse_addr = efuse_get_current_size(hw);
+ target_pkt->offset = offset;
+ target_pkt->word_en = tmp_word_en;
+ } else {
+ *bcontinual = false;
+ }
+ *write_state = PG_STATE_HEADER;
+ *repeat_times += 1;
+ if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
+ *bcontinual = false;
+ *bresult = false;
+ }
+ } else {
+ *efuse_addr += (2 * tmp_word_cnts) + 1;
+ target_pkt->offset = offset;
+ target_pkt->word_en = word_en;
+ *write_state = PG_STATE_HEADER;
+ }
+ }
+ }
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, ("efuse PG_STATE_HEADER-1\n"));
+}
+
+static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr,
+ int *bcontinual, u8 *write_state,
+ struct pgpkt_struct target_pkt,
+ int *repeat_times, int *bresult)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct pgpkt_struct tmp_pkt;
+ u8 pg_header;
+ u8 tmp_header;
+ u8 originaldata[8 * sizeof(u8)];
+ u8 tmp_word_cnts;
+ u8 badworden = 0x0F;
+
+ pg_header = ((target_pkt.offset << 4) & 0xf0) | target_pkt.word_en;
+ efuse_one_byte_write(hw, *efuse_addr, pg_header);
+ efuse_one_byte_read(hw, *efuse_addr, &tmp_header);
+
+ if (tmp_header == pg_header) {
+ *write_state = PG_STATE_DATA;
+ } else if (tmp_header == 0xFF) {
+ *write_state = PG_STATE_HEADER;
+ *repeat_times += 1;
+ if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
+ *bcontinual = false;
+ *bresult = false;
+ }
+ } else {
+ tmp_pkt.offset = (tmp_header >> 4) & 0x0F;
+ tmp_pkt.word_en = tmp_header & 0x0F;
+
+ tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
+
+ memset(originaldata, 0xff, 8 * sizeof(u8));
+
+ if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) {
+ badworden = efuse_word_enable_data_write(hw,
+ *efuse_addr + 1,
+ tmp_pkt.word_en,
+ originaldata);
+
+ if (0x0F != (badworden & 0x0F)) {
+ u8 reorg_offset = tmp_pkt.offset;
+ u8 reorg_worden = badworden;
+ efuse_pg_packet_write(hw, reorg_offset,
+ reorg_worden,
+ originaldata);
+ *efuse_addr = efuse_get_current_size(hw);
+ } else {
+ *efuse_addr = *efuse_addr +
+ (tmp_word_cnts * 2) + 1;
+ }
+ } else {
+ *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
+ }
+
+ *write_state = PG_STATE_HEADER;
+ *repeat_times += 1;
+ if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
+ *bcontinual = false;
+ *bresult = false;
+ }
+
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+ ("efuse PG_STATE_HEADER-2\n"));
+ }
+}
+
+static int efuse_pg_packet_write(struct ieee80211_hw *hw,
+ u8 offset, u8 word_en, u8 *data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct pgpkt_struct target_pkt;
+ u8 write_state = PG_STATE_HEADER;
+ int bcontinual = true, bdataempty = true, bresult = true;
+ u16 efuse_addr = 0;
+ u8 efuse_data;
+ u8 target_word_cnts = 0;
+ u8 badworden = 0x0F;
+ static int repeat_times = 0;
+
+ if (efuse_get_current_size(hw) >= (EFUSE_MAX_SIZE -
+ rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) {
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+ ("efuse_pg_packet_write error \n"));
+ return false;
+ }
+
+ target_pkt.offset = offset;
+ target_pkt.word_en = word_en;
+
+ memset(target_pkt.data, 0xFF, 8 * sizeof(u8));
+
+ efuse_word_enable_data_read(word_en, data, target_pkt.data);
+ target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en);
+
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, ("efuse Power ON\n"));
+
+ while (bcontinual && (efuse_addr < (EFUSE_MAX_SIZE -
+ rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))) {
+
+ if (write_state == PG_STATE_HEADER) {
+ bdataempty = true;
+ badworden = 0x0F;
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+ ("efuse PG_STATE_HEADER\n"));
+
+ if (efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
+ (efuse_data != 0xFF))
+ efuse_write_data_case1(hw, &efuse_addr,
+ efuse_data, offset,
+ &bcontinual,
+ &write_state,
+ &target_pkt,
+ &repeat_times, &bresult,
+ word_en);
+ else
+ efuse_write_data_case2(hw, &efuse_addr,
+ &bcontinual,
+ &write_state,
+ target_pkt,
+ &repeat_times,
+ &bresult);
+
+ } else if (write_state == PG_STATE_DATA) {
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+ ("efuse PG_STATE_DATA\n"));
+ badworden = 0x0f;
+ badworden =
+ efuse_word_enable_data_write(hw, efuse_addr + 1,
+ target_pkt.word_en,
+ target_pkt.data);
+
+ if ((badworden & 0x0F) == 0x0F) {
+ bcontinual = false;
+ } else {
+ efuse_addr =
+ efuse_addr + (2 * target_word_cnts) + 1;
+
+ target_pkt.offset = offset;
+ target_pkt.word_en = badworden;
+ target_word_cnts =
+ efuse_calculate_word_cnts(target_pkt.
+ word_en);
+ write_state = PG_STATE_HEADER;
+ repeat_times++;
+ if (repeat_times > EFUSE_REPEAT_THRESHOLD_) {
+ bcontinual = false;
+ bresult = false;
+ }
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+ ("efuse PG_STATE_HEADER-3\n"));
+ }
+ }
+ }
+
+ if (efuse_addr >= (EFUSE_MAX_SIZE -
+ rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) {
+ RT_TRACE(COMP_EFUSE, DBG_LOUD,
+ ("efuse_addr(%#x) Out of size!!\n", efuse_addr));
+ }
+
+ return true;
+}
+
+static void efuse_word_enable_data_read(u8 word_en, u8 * sourdata,
+ u8 *targetdata)
+{
+ if (!(word_en & BIT(0))) {
+ targetdata[0] = sourdata[0];
+ targetdata[1] = sourdata[1];
+ }
+
+ if (!(word_en & BIT(1))) {
+ targetdata[2] = sourdata[2];
+ targetdata[3] = sourdata[3];
+ }
+
+ if (!(word_en & BIT(2))) {
+ targetdata[4] = sourdata[4];
+ targetdata[5] = sourdata[5];
+ }
+
+ if (!(word_en & BIT(3))) {
+ targetdata[6] = sourdata[6];
+ targetdata[7] = sourdata[7];
+ }
+}
+
+static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
+ u16 efuse_addr, u8 word_en, u8 *data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 tmpaddr;
+ u16 start_addr = efuse_addr;
+ u8 badworden = 0x0F;
+ u8 tmpdata[8];
+
+ memset(tmpdata, 0xff, PGPKT_DATA_SIZE);
+ RT_TRACE(COMP_EFUSE, DBG_LOUD,
+ ("word_en = %x efuse_addr=%x\n", word_en, efuse_addr));
+
+ if (!(word_en & BIT(0))) {
+ tmpaddr = start_addr;
+ efuse_one_byte_write(hw, start_addr++, data[0]);
+ efuse_one_byte_write(hw, start_addr++, data[1]);
+
+ efuse_one_byte_read(hw, tmpaddr, &tmpdata[0]);
+ efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[1]);
+ if ((data[0] != tmpdata[0]) || (data[1] != tmpdata[1]))
+ badworden &= (~BIT(0));
+ }
+
+ if (!(word_en & BIT(1))) {
+ tmpaddr = start_addr;
+ efuse_one_byte_write(hw, start_addr++, data[2]);
+ efuse_one_byte_write(hw, start_addr++, data[3]);
+
+ efuse_one_byte_read(hw, tmpaddr, &tmpdata[2]);
+ efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[3]);
+ if ((data[2] != tmpdata[2]) || (data[3] != tmpdata[3]))
+ badworden &= (~BIT(1));
+ }
+
+ if (!(word_en & BIT(2))) {
+ tmpaddr = start_addr;
+ efuse_one_byte_write(hw, start_addr++, data[4]);
+ efuse_one_byte_write(hw, start_addr++, data[5]);
+
+ efuse_one_byte_read(hw, tmpaddr, &tmpdata[4]);
+ efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[5]);
+ if ((data[4] != tmpdata[4]) || (data[5] != tmpdata[5]))
+ badworden &= (~BIT(2));
+ }
+
+ if (!(word_en & BIT(3))) {
+ tmpaddr = start_addr;
+ efuse_one_byte_write(hw, start_addr++, data[6]);
+ efuse_one_byte_write(hw, start_addr++, data[7]);
+
+ efuse_one_byte_read(hw, tmpaddr, &tmpdata[6]);
+ efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[7]);
+ if ((data[6] != tmpdata[6]) || (data[7] != tmpdata[7]))
+ badworden &= (~BIT(3));
+ }
+
+ return badworden;
+}
+
+static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite, u8 pwrstate)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tempval;
+ u16 tmpV16;
+
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ {
+ if (pwrstate == true)
+ {
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS], 0x69);
+
+ // 1.2V Power: From VDDON with Power Cut(0x0000h[15]), defualt valid
+ tmpV16 = rtl_read_word(rtlpriv,
+ rtlpriv->cfg->maps[SYS_ISO_CTRL]);
+
+ printk("SYS_ISO_CTRL=%04x.\n",tmpV16);
+ if( ! (tmpV16 & PWC_EV12V ) ){
+ tmpV16 |= PWC_EV12V ;
+ //PlatformEFIOWrite2Byte(pAdapter,REG_SYS_ISO_CTRL,tmpV16);
+ }
+ // Reset: 0x0000h[28], default valid
+ tmpV16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN]);
+ printk("SYS_FUNC_EN=%04x.\n",tmpV16);
+ if( !(tmpV16 & FEN_ELDR) ){
+ tmpV16 |= FEN_ELDR ;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN], tmpV16);
+ }
+
+ // Clock: Gated(0x0008h[5]) 8M(0x0008h[1]) clock from ANA, default valid
+ tmpV16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_CLK] );
+ printk("SYS_CLK=%04x.\n",tmpV16);
+ if( (!(tmpV16 & LOADER_CLK_EN) ) ||(!(tmpV16 & ANA8M) ) )
+ {
+ tmpV16 |= (LOADER_CLK_EN |ANA8M ) ;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_CLK], tmpV16);
+ }
+
+ if(bwrite == true)
+ {
+ // Enable LDO 2.5V before read/write action
+ tempval = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3);
+ printk("EFUSE_TEST=%04x.\n",tmpV16);
+ tempval &= ~(BIT(3) | BIT(4) |BIT(5) | BIT(6));
+ tempval |= (VOLTAGE_V25 << 3);
+ tempval |= BIT(7);
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3, tempval);
+ }
+ }
+ else
+ {
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS], 0x00);
+ if(bwrite == true){
+ // Disable LDO 2.5V after read/write action
+ tempval = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3);
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3, (tempval & 0x7F));
+ }
+ }
+ }
+ else
+ {
+ if (pwrstate == true && (rtlhal->hw_type !=
+ HARDWARE_TYPE_RTL8192SE)) {
+
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE)
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS],
+ 0x69);
+
+ tmpV16 = rtl_read_word(rtlpriv,
+ rtlpriv->cfg->maps[SYS_ISO_CTRL]);
+ if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_PWC_EV12V])) {
+ tmpV16 |= rtlpriv->cfg->maps[EFUSE_PWC_EV12V];
+ rtl_write_word(rtlpriv,
+ rtlpriv->cfg->maps[SYS_ISO_CTRL],
+ tmpV16);
+ }
+
+ tmpV16 = rtl_read_word(rtlpriv,
+ rtlpriv->cfg->maps[SYS_FUNC_EN]);
+ if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_FEN_ELDR])) {
+ tmpV16 |= rtlpriv->cfg->maps[EFUSE_FEN_ELDR];
+ rtl_write_word(rtlpriv,
+ rtlpriv->cfg->maps[SYS_FUNC_EN], tmpV16);
+ }
+
+ tmpV16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_CLK]);
+ if ((!(tmpV16 & rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN])) ||
+ (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_ANA8M]))) {
+ tmpV16 |= (rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN] |
+ rtlpriv->cfg->maps[EFUSE_ANA8M]);
+ rtl_write_word(rtlpriv,
+ rtlpriv->cfg->maps[SYS_CLK], tmpV16);
+ }
+ }
+
+ if (pwrstate == true) {
+ if (bwrite == true) {
+ tempval = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_TEST] +
+ 3);
+
+ if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE) {
+ tempval &= 0x0F;
+ tempval |= (VOLTAGE_V25 << 4);
+ }
+
+ rtl_write_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_TEST] + 3,
+ (tempval | 0x80));
+ }
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CLK],
+ 0x03);
+ }
+
+ } else {
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE)
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS], 0);
+
+ if (bwrite == true) {
+ tempval = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_TEST] +
+ 3);
+ rtl_write_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_TEST] + 3,
+ (tempval & 0x7F));
+ }
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CLK],
+ 0x02);
+ }
+
+ }
+ }
+
+}
+
+static u16 efuse_get_current_size(struct ieee80211_hw *hw)
+{
+ int bcontinual = true;
+ u16 efuse_addr = 0;
+ u8 hoffset, hworden;
+ u8 efuse_data, word_cnts;
+
+ while (bcontinual && efuse_one_byte_read(hw, efuse_addr, &efuse_data)
+ && (efuse_addr < EFUSE_MAX_SIZE)) {
+ if (efuse_data != 0xFF) {
+ hoffset = (efuse_data >> 4) & 0x0F;
+ hworden = efuse_data & 0x0F;
+ word_cnts = efuse_calculate_word_cnts(hworden);
+ efuse_addr = efuse_addr + (word_cnts * 2) + 1;
+ } else {
+ bcontinual = false;
+ }
+ }
+
+ return efuse_addr;
+}
+
+static u8 efuse_calculate_word_cnts(u8 word_en)
+{
+ u8 word_cnts = 0;
+ if (!(word_en & BIT(0)))
+ word_cnts++;
+ if (!(word_en & BIT(1)))
+ word_cnts++;
+ if (!(word_en & BIT(2)))
+ word_cnts++;
+ if (!(word_en & BIT(3)))
+ word_cnts++;
+ return word_cnts;
+}
+
diff --git a/drivers/staging/rtl8821ae/efuse.h b/drivers/staging/rtl8821ae/efuse.h
new file mode 100644
index 000000000000..a9fcbe05cf9a
--- /dev/null
+++ b/drivers/staging/rtl8821ae/efuse.h
@@ -0,0 +1,130 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_EFUSE_H_
+#define __RTL_EFUSE_H_
+
+#define EFUSE_IC_ID_OFFSET 506
+
+/*
+#define EFUSE_REAL_CONTENT_LEN 512
+#define EFUSE_MAP_LEN 128
+#define EFUSE_MAX_SECTION 16
+#define EFUSE_MAX_WORD_UNIT 4
+#define EFUSE_IC_ID_OFFSET 506
+*/
+
+#define EFUSE_MAX_WORD_UNIT 4
+
+#define EFUSE_INIT_MAP 0
+#define EFUSE_MODIFY_MAP 1
+
+#define PG_STATE_HEADER 0x01
+#define PG_STATE_WORD_0 0x02
+#define PG_STATE_WORD_1 0x04
+#define PG_STATE_WORD_2 0x08
+#define PG_STATE_WORD_3 0x10
+#define PG_STATE_DATA 0x20
+
+#define PG_SWBYTE_H 0x01
+#define PG_SWBYTE_L 0x02
+
+#define _POWERON_DELAY_
+#define _PRE_EXECUTE_READ_CMD_
+
+#define EFUSE_REPEAT_THRESHOLD_ 3
+#define EFUSE_ERROE_HANDLE 1
+
+struct efuse_map {
+ u8 offset;
+ u8 word_start;
+ u8 byte_start;
+ u8 byte_cnts;
+};
+
+struct pgpkt_struct {
+ u8 offset;
+ u8 word_en;
+ u8 data[8];
+};
+
+enum efuse_data_item {
+ EFUSE_CHIP_ID = 0,
+ EFUSE_LDO_SETTING,
+ EFUSE_CLK_SETTING,
+ EFUSE_SDIO_SETTING,
+ EFUSE_CCCR,
+ EFUSE_SDIO_MODE,
+ EFUSE_OCR,
+ EFUSE_F0CIS,
+ EFUSE_F1CIS,
+ EFUSE_MAC_ADDR,
+ EFUSE_EEPROM_VER,
+ EFUSE_CHAN_PLAN,
+ EFUSE_TXPW_TAB
+};
+
+enum {
+ VOLTAGE_V25 = 0x03,
+ LDOE25_SHIFT = 28,
+};
+
+struct efuse_priv {
+ u8 id[2];
+ u8 ldo_setting[2];
+ u8 clk_setting[2];
+ u8 cccr;
+ u8 sdio_mode;
+ u8 ocr[3];
+ u8 cis0[17];
+ u8 cis1[48];
+ u8 mac_addr[6];
+ u8 eeprom_verno;
+ u8 channel_plan;
+ u8 tx_power_b[14];
+ u8 tx_power_g[14];
+};
+
+extern void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
+extern void efuse_initialize(struct ieee80211_hw *hw);
+extern u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
+extern int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data);
+extern void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
+extern void read_efuse(struct ieee80211_hw *hw, u16 _offset,
+ u16 _size_byte, u8 * pbuf);
+extern void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
+ u16 offset, u32 * value);
+extern void efuse_shadow_write(struct ieee80211_hw *hw, u8 type,
+ u16 offset, u32 value);
+extern bool efuse_shadow_update(struct ieee80211_hw *hw);
+extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
+extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
+extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
+extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
+#endif
diff --git a/drivers/staging/rtl8821ae/pci.c b/drivers/staging/rtl8821ae/pci.c
new file mode 100644
index 000000000000..cfa651edd238
--- /dev/null
+++ b/drivers/staging/rtl8821ae/pci.c
@@ -0,0 +1,2549 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "core.h"
+#include "wifi.h"
+#include "pci.h"
+#include "base.h"
+#include "ps.h"
+#include "efuse.h"
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+#include <linux/export.h>
+#endif
+
+static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
+ INTEL_VENDOR_ID,
+ ATI_VENDOR_ID,
+ AMD_VENDOR_ID,
+ SIS_VENDOR_ID
+};
+
+static const u8 ac_to_hwq[] = {
+ VO_QUEUE,
+ VI_QUEUE,
+ BE_QUEUE,
+ BK_QUEUE
+};
+
+u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u16 fc = rtl_get_fc(skb);
+ u8 queue_index = skb_get_queue_mapping(skb);
+
+ if (unlikely(ieee80211_is_beacon(fc)))
+ return BEACON_QUEUE;
+ if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
+ return MGNT_QUEUE;
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
+ if (ieee80211_is_nullfunc(fc))
+ return HIGH_QUEUE;
+
+ return ac_to_hwq[queue_index];
+}
+
+/* Update PCI dependent default settings*/
+static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
+ u8 init_aspm;
+
+ ppsc->reg_rfps_level = 0;
+ ppsc->b_support_aspm = 0;
+
+ /*Update PCI ASPM setting */
+ ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
+ switch (rtlpci->const_pci_aspm) {
+ case 0:
+ /*No ASPM */
+ break;
+
+ case 1:
+ /*ASPM dynamically enabled/disable. */
+ ppsc->reg_rfps_level |= RT_RF_LPS_LEVEL_ASPM;
+ break;
+
+ case 2:
+ /*ASPM with Clock Req dynamically enabled/disable. */
+ ppsc->reg_rfps_level |= (RT_RF_LPS_LEVEL_ASPM |
+ RT_RF_OFF_LEVL_CLK_REQ);
+ break;
+
+ case 3:
+ /*
+ * Always enable ASPM and Clock Req
+ * from initialization to halt.
+ * */
+ ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM);
+ ppsc->reg_rfps_level |= (RT_RF_PS_LEVEL_ALWAYS_ASPM |
+ RT_RF_OFF_LEVL_CLK_REQ);
+ break;
+
+ case 4:
+ /*
+ * Always enable ASPM without Clock Req
+ * from initialization to halt.
+ * */
+ ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM |
+ RT_RF_OFF_LEVL_CLK_REQ);
+ ppsc->reg_rfps_level |= RT_RF_PS_LEVEL_ALWAYS_ASPM;
+ break;
+ }
+
+ ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
+
+ /*Update Radio OFF setting */
+ switch (rtlpci->const_hwsw_rfoff_d3) {
+ case 1:
+ if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
+ ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
+ break;
+
+ case 2:
+ if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
+ ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
+ ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
+ break;
+
+ case 3:
+ ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_PCI_D3;
+ break;
+ }
+
+ /*Set HW definition to determine if it supports ASPM. */
+ switch (rtlpci->const_support_pciaspm) {
+ case 0:{
+ /*Not support ASPM. */
+ bool b_support_aspm = false;
+ ppsc->b_support_aspm = b_support_aspm;
+ break;
+ }
+ case 1:{
+ /*Support ASPM. */
+ bool b_support_aspm = true;
+ bool b_support_backdoor = true;
+ ppsc->b_support_aspm = b_support_aspm;
+
+ /*if(priv->oem_id == RT_CID_TOSHIBA &&
+ !priv->ndis_adapter.amd_l1_patch)
+ b_support_backdoor = false; */
+
+ ppsc->b_support_backdoor = b_support_backdoor;
+
+ break;
+ }
+ case 2:
+ /*ASPM value set by chipset. */
+ if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
+ bool b_support_aspm = true;
+ ppsc->b_support_aspm = b_support_aspm;
+ }
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+
+ /* toshiba aspm issue, toshiba will set aspm selfly
+ * so we should not set aspm in driver */
+ pci_read_config_byte(rtlpci->pdev, 0x80, &init_aspm);
+ if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE &&
+ init_aspm == 0x43)
+ ppsc->b_support_aspm = false;
+}
+
+static bool _rtl_pci_platform_switch_device_pci_aspm(struct ieee80211_hw *hw,
+ u8 value)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool bresult = false;
+
+ if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
+ value |= 0x40;
+
+ pci_write_config_byte(rtlpci->pdev, 0x80, value);
+
+ return bresult;
+}
+
+/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
+static bool _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool bresult = false;
+
+ pci_write_config_byte(rtlpci->pdev, 0x81, value);
+ bresult = true;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
+ udelay(100);
+
+ return bresult;
+}
+
+/*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
+static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
+ u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
+ u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
+ /*Retrieve original configuration settings. */
+ u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
+ u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
+ pcibridge_linkctrlreg;
+ u16 aspmlevel = 0;
+
+ if (!ppsc->b_support_aspm)
+ return;
+
+ if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
+ RT_TRACE(COMP_POWER, DBG_TRACE,
+ ("PCI(Bridge) UNKNOWN.\n"));
+
+ return;
+ }
+
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
+ _rtl_pci_switch_clk_req(hw, 0x0);
+ }
+
+ if (1) {
+ /*for promising device will in L0 state after an I/O. */
+ u8 tmp_u1b;
+ pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b);
+ }
+
+ /*Set corresponding value. */
+ aspmlevel |= BIT(0) | BIT(1);
+ linkctrl_reg &= ~aspmlevel;
+ pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
+
+ _rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
+ udelay(50);
+
+ /*4 Disable Pci Bridge ASPM */
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+ pcicfg_addrport + (num4bytes << 2));
+ rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, pcibridge_linkctrlreg);
+
+ udelay(50);
+
+}
+
+/*
+ *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
+ *power saving We should follow the sequence to enable
+ *RTL8192SE first then enable Pci Bridge ASPM
+ *or the system will show bluescreen.
+ */
+static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
+ u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
+ u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
+ u16 aspmlevel;
+ u8 u_pcibridge_aspmsetting;
+ u8 u_device_aspmsetting;
+
+ if (!ppsc->b_support_aspm)
+ return;
+
+ if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
+ RT_TRACE(COMP_POWER, DBG_TRACE,
+ ("PCI(Bridge) UNKNOWN.\n"));
+ return;
+ }
+
+ /*4 Enable Pci Bridge ASPM */
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+ pcicfg_addrport + (num4bytes << 2));
+
+ u_pcibridge_aspmsetting =
+ pcipriv->ndis_adapter.pcibridge_linkctrlreg |
+ rtlpci->const_hostpci_aspm_setting;
+
+ if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
+ u_pcibridge_aspmsetting &= ~BIT(0);
+
+ rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, u_pcibridge_aspmsetting);
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("PlatformEnableASPM(): Write reg[%x] = %x\n",
+ (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
+ u_pcibridge_aspmsetting));
+
+ udelay(50);
+
+ /*Get ASPM level (with/without Clock Req) */
+ aspmlevel = rtlpci->const_devicepci_aspm_setting;
+ u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
+
+ /*_rtl_pci_platform_switch_device_pci_aspm(dev,*/
+ /*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */
+
+ u_device_aspmsetting |= aspmlevel;
+
+ _rtl_pci_platform_switch_device_pci_aspm(hw, u_device_aspmsetting);
+
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
+ _rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
+ RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
+ }
+ udelay(100);
+}
+
+static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
+
+ bool status = false;
+ u8 offset_e0;
+ unsigned offset_e4;
+
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+ pcicfg_addrport + 0xE0);
+ rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, 0xA0);
+
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+ pcicfg_addrport + 0xE0);
+ rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &offset_e0);
+
+ if (offset_e0 == 0xA0) {
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+ pcicfg_addrport + 0xE4);
+ rtl_pci_raw_read_port_ulong(PCI_CONF_DATA, &offset_e4);
+ if (offset_e4 & BIT(23))
+ status = true;
+ }
+
+ return status;
+}
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
+static u8 _rtl_pci_get_pciehdr_offset(struct ieee80211_hw *hw)
+{
+ u8 capability_offset;
+ u8 num4bytes = 0x34/4;
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ u32 pcicfg_addr_port = (pcipriv->ndis_adapter.pcibridge_busnum << 16)|
+ (pcipriv->ndis_adapter.pcibridge_devnum << 11)|
+ (pcipriv->ndis_adapter.pcibridge_funcnum << 8)|
+ (1 << 31);
+
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS , pcicfg_addr_port
+ + (num4bytes << 2));
+ rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &capability_offset);
+ while (capability_offset != 0) {
+ struct rtl_pci_capabilities_header capability_hdr;
+
+ num4bytes = capability_offset / 4;
+ /* Read the header of the capability at this offset.
+ * If the retrieved capability is not the power management
+ * capability that we are looking for, follow the link to
+ * the next capability and continue looping.
+ */
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS ,
+ pcicfg_addr_port +
+ (num4bytes << 2));
+ rtl_pci_raw_read_port_ushort(PCI_CONF_DATA,
+ (u16*)&capability_hdr);
+ /* Found the PCI express capability. */
+ if (capability_hdr.capability_id ==
+ PCI_CAPABILITY_ID_PCI_EXPRESS)
+ break;
+ else
+ capability_offset = capability_hdr.next;
+ }
+ return capability_offset;
+}
+#endif
+/*<delete in kernel end>*/
+
+bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
+ struct rtl_priv **buddy_priv)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ bool b_find_buddy_priv = false;
+ struct rtl_priv *temp_priv = NULL;
+ struct rtl_pci_priv *temp_pcipriv = NULL;
+
+ if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
+ list_for_each_entry(temp_priv, &rtlpriv->glb_var->glb_priv_list,
+ list) {
+ if (temp_priv) {
+ temp_pcipriv =
+ (struct rtl_pci_priv *)temp_priv->priv;
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ (("pcipriv->ndis_adapter.funcnumber %x \n"),
+ pcipriv->ndis_adapter.funcnumber));
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ (("temp_pcipriv->ndis_adapter.funcnumber %x \n"),
+ temp_pcipriv->ndis_adapter.funcnumber));
+
+ if ((pcipriv->ndis_adapter.busnumber ==
+ temp_pcipriv->ndis_adapter.busnumber) &&
+ (pcipriv->ndis_adapter.devnumber ==
+ temp_pcipriv->ndis_adapter.devnumber) &&
+ (pcipriv->ndis_adapter.funcnumber !=
+ temp_pcipriv->ndis_adapter.funcnumber)) {
+ b_find_buddy_priv = true;
+ break;
+ }
+ }
+ }
+ }
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ (("b_find_buddy_priv %d \n"), b_find_buddy_priv));
+
+ if (b_find_buddy_priv)
+ *buddy_priv = temp_priv;
+
+ return b_find_buddy_priv;
+}
+
+void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
+ u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
+ u8 linkctrl_reg;
+ u8 num4bbytes;
+
+ num4bbytes = (capabilityoffset + 0x10) / 4;
+
+ /*Read Link Control Register */
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+ pcicfg_addrport + (num4bbytes << 2));
+ rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &linkctrl_reg);
+
+ pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
+}
+
+static void rtl_pci_parse_configuration(struct pci_dev *pdev,
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+
+ u8 tmp;
+ int pos;
+ u8 linkctrl_reg;
+
+ /*Link Control Register */
+ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg);
+ pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg;
+
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("Link Control Register =%x\n",
+ pcipriv->ndis_adapter.linkctrl_reg));
+
+ pci_read_config_byte(pdev, 0x98, &tmp);
+ tmp |= BIT(4);
+ pci_write_config_byte(pdev, 0x98, tmp);
+
+ tmp = 0x17;
+ pci_write_config_byte(pdev, 0x70f, tmp);
+}
+
+static void rtl_pci_init_aspm(struct ieee80211_hw *hw)
+{
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ _rtl_pci_update_default_setting(hw);
+
+ if (ppsc->reg_rfps_level & RT_RF_PS_LEVEL_ALWAYS_ASPM) {
+ /*Always enable ASPM & Clock Req. */
+ rtl_pci_enable_aspm(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_PS_LEVEL_ALWAYS_ASPM);
+ }
+
+}
+
+static void _rtl_pci_io_handler_init(struct device *dev,
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->io.dev = dev;
+
+ rtlpriv->io.write8_async = pci_write8_async;
+ rtlpriv->io.write16_async = pci_write16_async;
+ rtlpriv->io.write32_async = pci_write32_async;
+
+ rtlpriv->io.read8_sync = pci_read8_sync;
+ rtlpriv->io.read16_sync = pci_read16_sync;
+ rtlpriv->io.read32_sync = pci_read32_sync;
+
+}
+
+static bool _rtl_pci_update_earlymode_info(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct rtl_tcb_desc *tcb_desc,
+ u8 tid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 additionlen = FCS_LEN;
+ struct sk_buff *next_skb;
+
+ /* here open is 4, wep/tkip is 8, aes is 12*/
+ if (info->control.hw_key)
+ additionlen += info->control.hw_key->icv_len;
+
+ /* The most skb num is 6 */
+ tcb_desc->empkt_num = 0;
+ spin_lock_bh(&rtlpriv->locks.waitq_lock);
+ skb_queue_walk(&rtlpriv->mac80211.skb_waitq[tid], next_skb) {
+ struct ieee80211_tx_info *next_info =
+ IEEE80211_SKB_CB(next_skb);
+ if (next_info->flags & IEEE80211_TX_CTL_AMPDU) {
+ tcb_desc->empkt_len[tcb_desc->empkt_num] =
+ next_skb->len + additionlen;
+ tcb_desc->empkt_num++;
+ } else {
+ break;
+ }
+
+ if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid],
+ next_skb))
+ break;
+
+ if (tcb_desc->empkt_num >= rtlhal->max_earlymode_num)
+ break;
+ }
+ spin_unlock_bh(&rtlpriv->locks.waitq_lock);
+ return true;
+}
+
+/* just for early mode now */
+static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct sk_buff *skb = NULL;
+ struct ieee80211_tx_info *info = NULL;
+ int tid; /* should be int */
+
+ if (!rtlpriv->rtlhal.b_earlymode_enable)
+ return;
+ if (rtlpriv->dm.supp_phymode_switch &&
+ (rtlpriv->easy_concurrent_ctl.bswitch_in_process ||
+ (rtlpriv->buddy_priv &&
+ rtlpriv->buddy_priv->easy_concurrent_ctl.bswitch_in_process)))
+ return;
+ /* we juse use em for BE/BK/VI/VO */
+ for (tid = 7; tid >= 0; tid--) {
+ u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(hw, tid)];
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+ while (!mac->act_scanning &&
+ rtlpriv->psc.rfpwr_state == ERFON) {
+ struct rtl_tcb_desc tcb_desc;
+ memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+
+ spin_lock_bh(&rtlpriv->locks.waitq_lock);
+ if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
+ (ring->entries - skb_queue_len(&ring->queue) >
+ rtlhal->max_earlymode_num)) {
+ skb = skb_dequeue(&mac->skb_waitq[tid]);
+ } else {
+ spin_unlock_bh(&rtlpriv->locks.waitq_lock);
+ break;
+ }
+ spin_unlock_bh(&rtlpriv->locks.waitq_lock);
+
+ /* Some macaddr can't do early mode. like
+ * multicast/broadcast/no_qos data */
+ info = IEEE80211_SKB_CB(skb);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ _rtl_pci_update_earlymode_info(hw, skb,
+ &tcb_desc, tid);
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+#else
+/*<delete in kernel end>*/
+ rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
+#endif
+/*<delete in kernel end>*/
+ }
+ }
+}
+
+static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
+
+ while (skb_queue_len(&ring->queue)) {
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
+ u16 fc;
+ u8 tid;
+ u8 *entry;
+
+
+ if (rtlpriv->use_new_trx_flow)
+ entry = (u8 *)(&ring->buffer_desc[ring->idx]);
+ else
+ entry = (u8 *)(&ring->desc[ring->idx]);
+
+ if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
+ return;
+
+ ring->idx = (ring->idx + 1) % ring->entries;
+
+ skb = __skb_dequeue(&ring->queue);
+
+ pci_unmap_single(rtlpci->pdev,
+ le32_to_cpu(rtlpriv->cfg->ops->
+ get_desc((u8 *) entry, true,
+ HW_DESC_TXBUFF_ADDR)),
+ skb->len, PCI_DMA_TODEVICE);
+
+ /* remove early mode header */
+ if(rtlpriv->rtlhal.b_earlymode_enable)
+ skb_pull(skb, EM_HDR_LEN);
+
+ RT_TRACE((COMP_INTR | COMP_SEND), DBG_TRACE,
+ ("new ring->idx:%d, "
+ "free: skb_queue_len:%d, free: seq:%d\n",
+ ring->idx,
+ skb_queue_len(&ring->queue),
+ *(u16 *) (skb->data + 22)));
+
+ if(prio == TXCMD_QUEUE) {
+ dev_kfree_skb(skb);
+ goto tx_status_ok;
+
+ }
+
+ /* for sw LPS, just after NULL skb send out, we can
+ * sure AP kown we are sleeped, our we should not let
+ * rf to sleep*/
+ fc = rtl_get_fc(skb);
+ if (ieee80211_is_nullfunc(fc)) {
+ if(ieee80211_has_pm(fc)) {
+ rtlpriv->mac80211.offchan_deley = true;
+ rtlpriv->psc.state_inap = 1;
+ } else {
+ rtlpriv->psc.state_inap = 0;
+ }
+ }
+ if (ieee80211_is_action(fc)) {
+ struct ieee80211_mgmt_compat *action_frame =
+ (struct ieee80211_mgmt_compat *)skb->data;
+ if (action_frame->u.action.u.ht_smps.action ==
+ WLAN_HT_ACTION_SMPS) {
+ dev_kfree_skb(skb);
+ goto tx_status_ok;
+ }
+ }
+
+ /* update tid tx pkt num */
+ tid = rtl_get_tid(skb);
+ if (tid <= 7)
+ rtlpriv->link_info.tidtx_inperiod[tid]++;
+
+ info = IEEE80211_SKB_CB(skb);
+ ieee80211_tx_info_clear_status(info);
+
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ /*info->status.rates[0].count = 1; */
+
+ ieee80211_tx_status_irqsafe(hw, skb);
+
+ if ((ring->entries - skb_queue_len(&ring->queue))
+ == 2) {
+
+ RT_TRACE(COMP_ERR, DBG_LOUD,
+ ("more desc left, wake"
+ "skb_queue@%d,ring->idx = %d,"
+ "skb_queue_len = 0x%d\n",
+ prio, ring->idx,
+ skb_queue_len(&ring->queue)));
+
+ ieee80211_wake_queue(hw,
+ skb_get_queue_mapping
+ (skb));
+ }
+tx_status_ok:
+ skb = NULL;
+ }
+
+ if (((rtlpriv->link_info.num_rx_inperiod +
+ rtlpriv->link_info.num_tx_inperiod) > 8) ||
+ (rtlpriv->link_info.num_rx_inperiod > 2)) {
+ rtl_lps_leave(hw);
+ }
+}
+
+static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
+ u8 *entry, int rxring_idx, int desc_idx)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u32 bufferaddress;
+ u8 tmp_one = 1;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(rtlpci->rxbuffersize);
+ if (!skb)
+ return 0;
+ rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
+
+ /* just set skb->cb to mapping addr
+ * for pci_unmap_single use */
+ *((dma_addr_t *) skb->cb) = pci_map_single(rtlpci->pdev,
+ skb_tail_pointer(skb), rtlpci->rxbuffersize,
+ PCI_DMA_FROMDEVICE);
+ bufferaddress = cpu_to_le32(*((dma_addr_t *) skb->cb));
+ if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
+ return 0;
+ if (rtlpriv->use_new_trx_flow) {
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
+ HW_DESC_RX_PREPARE,
+ (u8 *) & bufferaddress);
+ } else {
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
+ HW_DESC_RXBUFF_ADDR,
+ (u8 *) & bufferaddress);
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
+ HW_DESC_RXPKT_LEN,
+ (u8 *) & rtlpci->rxbuffersize);
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
+ HW_DESC_RXOWN,
+ (u8 *) & tmp_one);
+ }
+
+ return 1;
+}
+
+/* inorder to receive 8K AMSDU we have set skb to
+ * 9100bytes in init rx ring, but if this packet is
+ * not a AMSDU, this so big packet will be sent to
+ * TCP/IP directly, this cause big packet ping fail
+ * like: "ping -s 65507", so here we will realloc skb
+ * based on the true size of packet, I think mac80211
+ * do it will be better, but now mac80211 haven't */
+
+/* but some platform will fail when alloc skb sometimes.
+ * in this condition, we will send the old skb to
+ * mac80211 directly, this will not cause any other
+ * issues, but only be losted by TCP/IP */
+static void _rtl_pci_rx_to_mac80211(struct ieee80211_hw *hw,
+ struct sk_buff *skb, struct ieee80211_rx_status rx_status)
+{
+ if (unlikely(!rtl_action_proc(hw, skb, false))) {
+ dev_kfree_skb_any(skb);
+ } else {
+ struct sk_buff *uskb = NULL;
+ u8 *pdata;
+
+ uskb = dev_alloc_skb(skb->len + 128);
+ if (likely(uskb)) {
+ memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
+ sizeof(rx_status));
+ pdata = (u8 *)skb_put(uskb, skb->len);
+ memcpy(pdata, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+
+ ieee80211_rx_irqsafe(hw, uskb);
+ } else {
+ ieee80211_rx_irqsafe(hw, skb);
+ }
+ }
+}
+
+/*hsisr interrupt handler*/
+static void _rtl_pci_hs_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR],
+ rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR]) |
+ rtlpci->sys_irq_mask);
+
+
+}
+static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ int rxring_idx = RTL_PCI_RX_MPDU_QUEUE;
+
+ struct ieee80211_rx_status rx_status = { 0 };
+ unsigned int count = rtlpci->rxringcount;
+ bool unicast = false;
+ u8 hw_queue = 0;
+ unsigned int rx_remained_cnt;
+ u8 own;
+ u8 tmp_one;
+
+ struct rtl_stats status = {
+ .signal = 0,
+ .noise = -98,
+ .rate = 0,
+ };
+
+ /*RX NORMAL PKT */
+ while (count--) {
+ struct ieee80211_hdr *hdr;
+ u16 fc;
+ u16 len;
+ /*rx buffer descriptor */
+ struct rtl_rx_buffer_desc *buffer_desc = NULL;
+ /*if use new trx flow, it means wifi info */
+ struct rtl_rx_desc *pdesc = NULL;
+ /*rx pkt */
+ struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
+ rtlpci->rx_ring[rxring_idx].idx];
+
+ if (rtlpriv->use_new_trx_flow) {
+ rx_remained_cnt =
+ rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
+ hw_queue);
+ if (rx_remained_cnt < 1)
+ return;
+
+ } else { /* rx descriptor */
+ pdesc = &rtlpci->rx_ring[rxring_idx].desc[
+ rtlpci->rx_ring[rxring_idx].idx];
+
+ own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
+ false,
+ HW_DESC_OWN);
+ if (own) /* wait data to be filled by hardware */
+ return;
+ }
+
+ /* Get here means: data is filled already*/
+ /* AAAAAAttention !!!
+ * We can NOT access 'skb' before 'pci_unmap_single' */
+ pci_unmap_single(rtlpci->pdev, *((dma_addr_t *) skb->cb),
+ rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
+
+ if (rtlpriv->use_new_trx_flow) {
+ buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[
+ rtlpci->rx_ring[rxring_idx].idx];
+ /*means rx wifi info*/
+ pdesc = (struct rtl_rx_desc *)skb->data;
+ }
+
+ rtlpriv->cfg->ops->query_rx_desc(hw, &status,
+ &rx_status, (u8 *) pdesc, skb);
+
+ if (rtlpriv->use_new_trx_flow)
+ rtlpriv->cfg->ops->rx_check_dma_ok(hw,
+ (u8 *)buffer_desc,
+ hw_queue);
+
+
+ len = rtlpriv->cfg->ops->get_desc((u8 *)pdesc, false,
+ HW_DESC_RXPKT_LEN);
+
+ if (skb->end - skb->tail > len) {
+ skb_put(skb, len);
+ if (rtlpriv->use_new_trx_flow)
+ skb_reserve(skb, status.rx_drvinfo_size +
+ status.rx_bufshift + 24);
+ else
+ skb_reserve(skb, status.rx_drvinfo_size +
+ status.rx_bufshift);
+
+ } else {
+ printk("skb->end - skb->tail = %d, len is %d\n",
+ skb->end - skb->tail, len);
+ break;
+ }
+
+ rtlpriv->cfg->ops->rx_command_packet_handler(hw, status, skb);
+
+ /*
+ *NOTICE This can not be use for mac80211,
+ *this is done in mac80211 code,
+ *if you done here sec DHCP will fail
+ *skb_trim(skb, skb->len - 4);
+ */
+
+ hdr = rtl_get_hdr(skb);
+ fc = rtl_get_fc(skb);
+
+ if (!status.b_crc && !status.b_hwerror) {
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
+ sizeof(rx_status));
+
+ if (is_broadcast_ether_addr(hdr->addr1)) {
+ ;/*TODO*/
+ } else if (is_multicast_ether_addr(hdr->addr1)) {
+ ;/*TODO*/
+ } else {
+ unicast = true;
+ rtlpriv->stats.rxbytesunicast += skb->len;
+ }
+
+ rtl_is_special_data(hw, skb, false);
+
+ if (ieee80211_is_data(fc)) {
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
+
+ if (unicast)
+ rtlpriv->link_info.num_rx_inperiod++;
+ }
+
+ /* static bcn for roaming */
+ rtl_beacon_statistic(hw, skb);
+ rtl_p2p_info(hw, (void*)skb->data, skb->len);
+ /* for sw lps */
+ rtl_swlps_beacon(hw, (void*)skb->data, skb->len);
+ rtl_recognize_peer(hw, (void*)skb->data, skb->len);
+ if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) &&
+ (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)&&
+ (ieee80211_is_beacon(fc) ||
+ ieee80211_is_probe_resp(fc))) {
+ dev_kfree_skb_any(skb);
+ } else {
+ _rtl_pci_rx_to_mac80211(hw, skb, rx_status);
+ }
+ } else {
+ dev_kfree_skb_any(skb);
+ }
+ if (rtlpriv->use_new_trx_flow) {
+ rtlpci->rx_ring[hw_queue].next_rx_rp += 1;
+ rtlpci->rx_ring[hw_queue].next_rx_rp %=
+ RTL_PCI_MAX_RX_COUNT;
+
+
+ rx_remained_cnt--;
+ if (1/*rx_remained_cnt == 0*/) {
+ rtl_write_word(rtlpriv, 0x3B4,
+ rtlpci->rx_ring[hw_queue].next_rx_rp);
+ }
+ }
+ if (((rtlpriv->link_info.num_rx_inperiod +
+ rtlpriv->link_info.num_tx_inperiod) > 8) ||
+ (rtlpriv->link_info.num_rx_inperiod > 2)) {
+ rtl_lps_leave(hw);
+ }
+
+ if (rtlpriv->use_new_trx_flow) {
+ _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc,
+ rxring_idx,
+ rtlpci->rx_ring[rxring_idx].idx);
+ } else {
+ _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx,
+ rtlpci->rx_ring[rxring_idx].idx);
+
+ if (rtlpci->rx_ring[rxring_idx].idx ==
+ rtlpci->rxringcount - 1)
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc,
+ false,
+ HW_DESC_RXERO,
+ (u8 *) & tmp_one);
+ }
+ rtlpci->rx_ring[rxring_idx].idx =
+ (rtlpci->rx_ring[rxring_idx].idx + 1) %
+ rtlpci->rxringcount;
+ }
+}
+
+static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
+{
+ struct ieee80211_hw *hw = dev_id;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ unsigned long flags;
+ u32 inta = 0;
+ u32 intb = 0;
+
+
+
+ if (rtlpci->irq_enabled == 0)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock,flags);
+
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMR], 0x0);
+
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMRE], 0x0);
+
+
+ /*read ISR: 4/8bytes */
+ rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
+
+
+ /*Shared IRQ or HW disappared */
+ if (!inta || inta == 0xffff)
+ goto done;
+ /*<1> beacon related */
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon ok interrupt!\n"));
+ }
+
+ if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) {
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon err interrupt!\n"));
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) {
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon interrupt!\n"));
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_BcnInt]) {
+ RT_TRACE(COMP_INTR, DBG_TRACE,
+ ("prepare beacon for interrupt!\n"));
+ tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
+ }
+
+
+ /*<2> tx related */
+ if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
+ RT_TRACE(COMP_ERR, DBG_TRACE, ("IMR_TXFOVW!\n"));
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("Manage ok interrupt!\n"));
+ _rtl_pci_tx_isr(hw, MGNT_QUEUE);
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("HIGH_QUEUE ok interrupt!\n"));
+ _rtl_pci_tx_isr(hw, HIGH_QUEUE);
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
+ rtlpriv->link_info.num_tx_inperiod++;
+
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("BK Tx OK interrupt!\n"));
+ _rtl_pci_tx_isr(hw, BK_QUEUE);
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
+ rtlpriv->link_info.num_tx_inperiod++;
+
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("BE TX OK interrupt!\n"));
+ _rtl_pci_tx_isr(hw, BE_QUEUE);
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
+ rtlpriv->link_info.num_tx_inperiod++;
+
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("VI TX OK interrupt!\n"));
+ _rtl_pci_tx_isr(hw, VI_QUEUE);
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
+ rtlpriv->link_info.num_tx_inperiod++;
+
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("Vo TX OK interrupt!\n"));
+ _rtl_pci_tx_isr(hw, VO_QUEUE);
+ }
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
+ rtlpriv->link_info.num_tx_inperiod++;
+
+ RT_TRACE(COMP_INTR, DBG_TRACE,
+ ("CMD TX OK interrupt!\n"));
+ _rtl_pci_tx_isr(hw, TXCMD_QUEUE);
+ }
+ }
+
+ /*<3> rx related */
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("Rx ok interrupt!\n"));
+
+ _rtl_pci_rx_interrupt(hw);
+
+ }
+
+ if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("rx descriptor unavailable!\n"));
+ rtl_write_byte(rtlpriv, 0xb4, BIT(1) );
+ _rtl_pci_rx_interrupt(hw);
+ }
+
+ if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
+ RT_TRACE(COMP_ERR, DBG_WARNING, ("rx overflow !\n"));
+ _rtl_pci_rx_interrupt(hw);
+ }
+
+ /*<4> fw related*/
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
+ RT_TRACE(COMP_INTR, DBG_TRACE,
+ ("firmware interrupt!\n"));
+ queue_delayed_work(rtlpriv->works.rtl_wq,
+ &rtlpriv->works.fwevt_wq, 0);
+ }
+ }
+
+ /*<5> hsisr related*/
+ /* Only 8188EE & 8723BE Supported.
+ * If Other ICs Come in, System will corrupt,
+ * because maps[RTL_IMR_HSISR_IND] & maps[MAC_HSISR]
+ * are not initialized*/
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE ||
+ rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
+ RT_TRACE(COMP_INTR, DBG_TRACE,
+ ("hsisr interrupt!\n"));
+ _rtl_pci_hs_interrupt(hw);
+ }
+ }
+
+
+ if(rtlpriv->rtlhal.b_earlymode_enable)
+ tasklet_schedule(&rtlpriv->works.irq_tasklet);
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMR],
+ rtlpci->irq_mask[0]);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMRE],
+ rtlpci->irq_mask[1]);
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ return IRQ_HANDLED;
+
+done:
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
+{
+ _rtl_pci_tx_chk_waitq(hw);
+}
+
+static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl8192_tx_ring *ring = NULL;
+ struct ieee80211_hdr *hdr = NULL;
+ struct ieee80211_tx_info *info = NULL;
+ struct sk_buff *pskb = NULL;
+ struct rtl_tx_desc *pdesc = NULL;
+ struct rtl_tcb_desc tcb_desc;
+ /*This is for new trx flow*/
+ struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
+ u8 temp_one = 1;
+
+ memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+ ring = &rtlpci->tx_ring[BEACON_QUEUE];
+ pskb = __skb_dequeue(&ring->queue);
+ if (pskb)
+ kfree_skb(pskb);
+
+ /*NB: the beacon data buffer must be 32-bit aligned. */
+ pskb = ieee80211_beacon_get(hw, mac->vif);
+ if (pskb == NULL)
+ return;
+ hdr = rtl_get_hdr(pskb);
+ info = IEEE80211_SKB_CB(pskb);
+ pdesc = &ring->desc[0];
+ if (rtlpriv->use_new_trx_flow)
+ pbuffer_desc = &ring->buffer_desc[0];
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
+ (u8 *)pbuffer_desc, info, pskb,
+ BEACON_QUEUE, &tcb_desc);
+#else
+/*<delete in kernel end>*/
+ rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
+ (u8 *)pbuffer_desc, info, NULL, pskb,
+ BEACON_QUEUE, &tcb_desc);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+
+ __skb_queue_tail(&ring->queue, pskb);
+
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, true, HW_DESC_OWN,
+ (u8 *) & temp_one);
+
+ return;
+}
+
+static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 i;
+ u16 desc_num;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE)
+ desc_num = TX_DESC_NUM_92E;
+ else
+ desc_num = RT_TXDESC_NUM;
+
+ for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
+ rtlpci->txringcount[i] = desc_num;
+ }
+ /*
+ *we just alloc 2 desc for beacon queue,
+ *because we just need first desc in hw beacon.
+ */
+ rtlpci->txringcount[BEACON_QUEUE] = 2;
+
+ /*
+ *BE queue need more descriptor for performance
+ *consideration or, No more tx desc will happen,
+ *and may cause mac80211 mem leakage.
+ */
+ if (rtl_priv(hw)->use_new_trx_flow == false)
+ rtlpci->txringcount[BE_QUEUE] = RT_TXDESC_NUM_BE_QUEUE;
+
+ rtlpci->rxbuffersize = 9100; /*2048/1024; */
+ rtlpci->rxringcount = RTL_PCI_MAX_RX_COUNT; /*64; */
+}
+
+static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
+ struct pci_dev *pdev)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ rtlpriv->rtlhal.up_first_time = true;
+ rtlpriv->rtlhal.being_init_adapter = false;
+
+ rtlhal->hw = hw;
+ rtlpci->pdev = pdev;
+
+ /*Tx/Rx related var */
+ _rtl_pci_init_trx_var(hw);
+
+ /*IBSS*/ mac->beacon_interval = 100;
+
+ /*AMPDU*/
+ mac->min_space_cfg = 0;
+ mac->max_mss_density = 0;
+ /*set sane AMPDU defaults */
+ mac->current_ampdu_density = 7;
+ mac->current_ampdu_factor = 3;
+
+ /*QOS*/
+ rtlpci->acm_method = eAcmWay2_SW;
+
+ /*task */
+ tasklet_init(&rtlpriv->works.irq_tasklet,
+ (void (*)(unsigned long))_rtl_pci_irq_tasklet,
+ (unsigned long)hw);
+ tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
+ (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
+ (unsigned long)hw);
+}
+
+static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
+ unsigned int prio, unsigned int entries)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_tx_buffer_desc *buffer_desc;
+ struct rtl_tx_desc *desc;
+ dma_addr_t buffer_desc_dma, desc_dma;
+ u32 nextdescaddress;
+ int i;
+
+ /* alloc tx buffer desc for new trx flow*/
+ if (rtlpriv->use_new_trx_flow) {
+ buffer_desc = pci_alloc_consistent(rtlpci->pdev,
+ sizeof(*buffer_desc) * entries,
+ &buffer_desc_dma);
+
+ if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Cannot allocate TX ring (prio = %d)\n",
+ prio));
+ return -ENOMEM;
+ }
+
+ memset(buffer_desc, 0, sizeof(*buffer_desc) * entries);
+ rtlpci->tx_ring[prio].buffer_desc = buffer_desc;
+ rtlpci->tx_ring[prio].buffer_desc_dma = buffer_desc_dma;
+
+ rtlpci->tx_ring[prio].cur_tx_rp = 0;
+ rtlpci->tx_ring[prio].cur_tx_wp = 0;
+ rtlpci->tx_ring[prio].avl_desc = entries;
+
+ }
+
+ /* alloc dma for this ring */
+ desc = pci_alloc_consistent(rtlpci->pdev,
+ sizeof(*desc) * entries, &desc_dma);
+
+ if (!desc || (unsigned long)desc & 0xFF) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Cannot allocate TX ring (prio = %d)\n", prio));
+ return -ENOMEM;
+ }
+
+ memset(desc, 0, sizeof(*desc) * entries);
+ rtlpci->tx_ring[prio].desc = desc;
+ rtlpci->tx_ring[prio].dma = desc_dma;
+
+ rtlpci->tx_ring[prio].idx = 0;
+ rtlpci->tx_ring[prio].entries = entries;
+ skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("queue:%d, ring_addr:%p\n", prio, desc));
+
+ /* init every desc in this ring */
+ if (rtlpriv->use_new_trx_flow == false) {
+ for (i = 0; i < entries; i++) {
+ nextdescaddress = cpu_to_le32((u32) desc_dma +
+ ((i + 1) % entries) *
+ sizeof(*desc));
+
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) & (desc[i]),
+ true,
+ HW_DESC_TX_NEXTDESC_ADDR,
+ (u8 *) & nextdescaddress);
+ }
+ }
+ return 0;
+}
+
+static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ int i;
+
+ if (rtlpriv->use_new_trx_flow) {
+ struct rtl_rx_buffer_desc *entry = NULL;
+ /* alloc dma for this ring */
+ rtlpci->rx_ring[rxring_idx].buffer_desc =
+ pci_alloc_consistent(rtlpci->pdev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].
+ buffer_desc) *
+ rtlpci->rxringcount,
+ &rtlpci->rx_ring[rxring_idx].dma);
+ if (!rtlpci->rx_ring[rxring_idx].buffer_desc ||
+ (unsigned long)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("Cannot allocate RX ring\n"));
+ return -ENOMEM;
+ }
+
+ memset(rtlpci->rx_ring[rxring_idx].buffer_desc, 0,
+ sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) *
+ rtlpci->rxringcount);
+
+ /* init every desc in this ring */
+ rtlpci->rx_ring[rxring_idx].idx = 0;
+ for (i = 0; i < rtlpci->rxringcount; i++) {
+ entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
+ if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+ rxring_idx, i))
+ return -ENOMEM;
+ }
+ } else {
+ struct rtl_rx_desc *entry = NULL;
+ u8 tmp_one = 1;
+ /* alloc dma for this ring */
+ rtlpci->rx_ring[rxring_idx].desc =
+ pci_alloc_consistent(rtlpci->pdev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].
+ desc) * rtlpci->rxringcount,
+ &rtlpci->rx_ring[rxring_idx].dma);
+ if (!rtlpci->rx_ring[rxring_idx].desc ||
+ (unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Cannot allocate RX ring\n"));
+ return -ENOMEM;
+ }
+
+ memset(rtlpci->rx_ring[rxring_idx].desc, 0,
+ sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
+ rtlpci->rxringcount);
+
+ /* init every desc in this ring */
+ rtlpci->rx_ring[rxring_idx].idx = 0;
+ for (i = 0; i < rtlpci->rxringcount; i++) {
+ entry = &rtlpci->rx_ring[rxring_idx].desc[i];
+ if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+ rxring_idx, i))
+ return -ENOMEM;
+ }
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
+ HW_DESC_RXERO, (u8 *) & tmp_one);
+ }
+ return 0;
+}
+
+static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
+ unsigned int prio)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
+
+ /* free every desc in this ring */
+ while (skb_queue_len(&ring->queue)) {
+ u8 *entry;
+ struct sk_buff *skb = __skb_dequeue(&ring->queue);
+ if (rtlpriv->use_new_trx_flow)
+ entry = (u8 *)(&ring->buffer_desc[ring->idx]);
+ else
+ entry = (u8 *)(&ring->desc[ring->idx]);
+
+ pci_unmap_single(rtlpci->pdev,
+ le32_to_cpu(rtlpriv->cfg->ops->get_desc(
+ (u8 *) entry, true, HW_DESC_TXBUFF_ADDR)),
+ skb->len, PCI_DMA_TODEVICE);
+ kfree_skb(skb);
+ ring->idx = (ring->idx + 1) % ring->entries;
+ }
+
+ /* free dma of this ring */
+ pci_free_consistent(rtlpci->pdev,
+ sizeof(*ring->desc) * ring->entries,
+ ring->desc, ring->dma);
+ ring->desc = NULL;
+ if (rtlpriv->use_new_trx_flow) {
+ pci_free_consistent(rtlpci->pdev,
+ sizeof(*ring->buffer_desc) * ring->entries,
+ ring->buffer_desc, ring->buffer_desc_dma);
+ ring->buffer_desc = NULL;
+ }
+}
+
+static void _rtl_pci_free_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ int i;
+
+ /* free every desc in this ring */
+ for (i = 0; i < rtlpci->rxringcount; i++) {
+ struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[i];
+ if (!skb)
+ continue;
+
+ pci_unmap_single(rtlpci->pdev, *((dma_addr_t *) skb->cb),
+ rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
+ kfree_skb(skb);
+ }
+
+ /* free dma of this ring */
+ if (rtlpriv->use_new_trx_flow) {
+ pci_free_consistent(rtlpci->pdev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].
+ buffer_desc) * rtlpci->rxringcount,
+ rtlpci->rx_ring[rxring_idx].buffer_desc,
+ rtlpci->rx_ring[rxring_idx].dma);
+ rtlpci->rx_ring[rxring_idx].buffer_desc = NULL;
+ } else {
+ pci_free_consistent(rtlpci->pdev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
+ rtlpci->rxringcount,
+ rtlpci->rx_ring[rxring_idx].desc,
+ rtlpci->rx_ring[rxring_idx].dma);
+ rtlpci->rx_ring[rxring_idx].desc = NULL;
+ }
+}
+
+static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ int ret;
+ int i, rxring_idx;
+
+ /* rxring_idx 0:RX_MPDU_QUEUE
+ * rxring_idx 1:RX_CMD_QUEUE */
+ for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) {
+ ret = _rtl_pci_init_rx_ring(hw, rxring_idx);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
+ ret = _rtl_pci_init_tx_ring(hw, i, rtlpci->txringcount[i]);
+ if (ret)
+ goto err_free_rings;
+ }
+
+ return 0;
+
+err_free_rings:
+ for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++)
+ _rtl_pci_free_rx_ring(hw, rxring_idx);
+
+ for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
+ if (rtlpci->tx_ring[i].desc ||
+ rtlpci->tx_ring[i].buffer_desc)
+ _rtl_pci_free_tx_ring(hw, i);
+
+ return 1;
+}
+
+static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw *hw)
+{
+ u32 i, rxring_idx;
+
+ /*free rx rings */
+ for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++)
+ _rtl_pci_free_rx_ring(hw, rxring_idx);
+
+ /*free tx rings */
+ for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
+ _rtl_pci_free_tx_ring(hw, i);
+
+ return 0;
+}
+
+int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ int i, rxring_idx;
+ unsigned long flags;
+ u8 tmp_one = 1;
+ /* rxring_idx 0:RX_MPDU_QUEUE */
+ /* rxring_idx 1:RX_CMD_QUEUE */
+ for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) {
+ /* force the rx_ring[RX_MPDU_QUEUE/
+ * RX_CMD_QUEUE].idx to the first one */
+ /*new trx flow, do nothing*/
+ if ((rtlpriv->use_new_trx_flow == false) &&
+ rtlpci->rx_ring[rxring_idx].desc) {
+ struct rtl_rx_desc *entry = NULL;
+
+ for (i = 0; i < rtlpci->rxringcount; i++) {
+ entry = &rtlpci->rx_ring[rxring_idx].desc[i];
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry,
+ false,
+ HW_DESC_RXOWN,
+ (u8 *) & tmp_one);
+ }
+ }
+ rtlpci->rx_ring[rxring_idx].idx = 0; }
+
+ /* after reset, release previous pending packet,
+ * and force the tx idx to the first one */
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+ for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
+ if (rtlpci->tx_ring[i].desc ||
+ rtlpci->tx_ring[i].buffer_desc) {
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
+
+ while (skb_queue_len(&ring->queue)) {
+ u8 *entry;
+ struct sk_buff *skb =
+ __skb_dequeue(&ring->queue);
+ if (rtlpriv->use_new_trx_flow)
+ entry = (u8 *)(&ring->buffer_desc
+ [ring->idx]);
+ else
+ entry = (u8 *)(&ring->desc[ring->idx]);
+
+ pci_unmap_single(rtlpci->pdev,
+ le32_to_cpu(rtlpriv->cfg->ops->get_desc(
+ (u8 *)entry, true,
+ HW_DESC_TXBUFF_ADDR)),
+ skb->len, PCI_DMA_TODEVICE);
+ kfree_skb(skb);
+ ring->idx = (ring->idx + 1) % ring->entries;
+ }
+ ring->idx = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ return 0;
+}
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+#else
+/*<delete in kernel end>*/
+static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb)
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = info->control.sta;
+#endif
+/*<delete in kernel end>*/
+ struct rtl_sta_info *sta_entry = NULL;
+ u8 tid = rtl_get_tid(skb);
+ u16 fc = rtl_get_fc(skb);
+
+ if(!sta)
+ return false;
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+
+ if (!rtlpriv->rtlhal.b_earlymode_enable)
+ return false;
+ if (ieee80211_is_nullfunc(fc))
+ return false;
+ if (ieee80211_is_qos_nullfunc(fc))
+ return false;
+ if (ieee80211_is_pspoll(fc)) {
+ return false;
+ }
+
+ if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL)
+ return false;
+ if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE)
+ return false;
+ if (tid > 7)
+ return false;
+ /* maybe every tid should be checked */
+ if (!rtlpriv->link_info.higher_busytxtraffic[tid])
+ return false;
+
+ spin_lock_bh(&rtlpriv->locks.waitq_lock);
+ skb_queue_tail(&rtlpriv->mac80211.skb_waitq[tid], skb);
+ spin_unlock_bh(&rtlpriv->locks.waitq_lock);
+
+ return true;
+}
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct rtl_tcb_desc *ptcb_desc)
+#else
+/*<delete in kernel end>*/
+static int rtl_pci_tx(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ struct rtl_tcb_desc *ptcb_desc)
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_sta_info *sta_entry = NULL;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ struct ieee80211_sta *sta = info->control.sta;
+#endif
+/*<delete in kernel end>*/
+ struct rtl8192_tx_ring *ring;
+ struct rtl_tx_desc *pdesc;
+ struct rtl_tx_buffer_desc *ptx_bd_desc = NULL;
+ u16 idx;
+ u8 own;
+ u8 temp_one = 1;
+ u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
+ unsigned long flags;
+ struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
+ u16 fc = rtl_get_fc(skb);
+ u8 *pda_addr = hdr->addr1;
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ /*ssn */
+ u8 tid = 0;
+ u16 seq_number = 0;
+
+
+ if (ieee80211_is_mgmt(fc))
+ rtl_tx_mgmt_proc(hw, skb);
+
+ if (rtlpriv->psc.sw_ps_enabled) {
+ if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
+ !ieee80211_has_pm(fc))
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
+ }
+
+ rtl_action_proc(hw, skb, true);
+
+ if (is_multicast_ether_addr(pda_addr))
+ rtlpriv->stats.txbytesmulticast += skb->len;
+ else if (is_broadcast_ether_addr(pda_addr))
+ rtlpriv->stats.txbytesbroadcast += skb->len;
+ else
+ rtlpriv->stats.txbytesunicast += skb->len;
+
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+ ring = &rtlpci->tx_ring[hw_queue];
+ if (hw_queue != BEACON_QUEUE) {
+ if (rtlpriv->use_new_trx_flow)
+ idx = ring->cur_tx_wp;
+ else
+ idx = (ring->idx + skb_queue_len(&ring->queue)) %
+ ring->entries;
+ } else {
+ idx = 0;
+ }
+
+ pdesc = &ring->desc[idx];
+
+ if (rtlpriv->use_new_trx_flow) {
+ ptx_bd_desc = &ring->buffer_desc[idx];
+ } else {
+ own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
+ true, HW_DESC_OWN);
+
+ if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("No more TX desc@%d, ring->idx = %d,"
+ "idx = %d, skb_queue_len = 0x%d\n",
+ hw_queue, ring->idx, idx,
+ skb_queue_len(&ring->queue)));
+
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
+ flags);
+ return skb->len;
+ }
+ }
+
+ if (ieee80211_is_data_qos(fc)) {
+ tid = rtl_get_tid(skb);
+ if (sta) {
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ seq_number = (le16_to_cpu(hdr->seq_ctrl) &
+ IEEE80211_SCTL_SEQ) >> 4;
+ seq_number += 1;
+
+ if (!ieee80211_has_morefrags(hdr->frame_control))
+ sta_entry->tids[tid].seq_number = seq_number;
+ }
+ }
+
+ if (ieee80211_is_data(fc))
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
+ (u8 *)ptx_bd_desc, info, skb,
+ hw_queue, ptcb_desc);
+#else
+/*<delete in kernel end>*/
+ rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
+ (u8 *)ptx_bd_desc, info, sta, skb,
+ hw_queue, ptcb_desc);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+
+ __skb_queue_tail(&ring->queue, skb);
+ if (rtlpriv->use_new_trx_flow) {
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, true,
+ HW_DESC_OWN, (u8 *) & hw_queue);
+ } else {
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, true,
+ HW_DESC_OWN, (u8 *) & temp_one);
+ }
+
+ if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
+ hw_queue != BEACON_QUEUE) {
+
+ RT_TRACE(COMP_ERR, DBG_LOUD,
+ ("less desc left, stop skb_queue@%d, "
+ "ring->idx = %d,"
+ "idx = %d, skb_queue_len = 0x%d\n",
+ hw_queue, ring->idx, idx,
+ skb_queue_len(&ring->queue)));
+
+ ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
+ }
+
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ rtlpriv->cfg->ops->tx_polling(hw, hw_queue);
+
+ return 0;
+}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+static void rtl_pci_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+#else
+static void rtl_pci_flush(struct ieee80211_hw *hw, bool drop)
+#endif
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u16 i = 0;
+ int queue_id;
+ struct rtl8192_tx_ring *ring;
+
+ if (mac->skip_scan)
+ return;
+
+ for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) {
+ u32 queue_len;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ if (((queues >> queue_id) & 0x1) == 0) {
+ queue_id--;
+ continue;
+ }
+#endif
+ ring = &pcipriv->dev.tx_ring[queue_id];
+ queue_len = skb_queue_len(&ring->queue);
+ if (queue_len == 0 || queue_id == BEACON_QUEUE ||
+ queue_id == TXCMD_QUEUE) {
+ queue_id--;
+ continue;
+ } else {
+ msleep(5);
+ i++;
+ }
+
+ /* we just wait 1s for all queues */
+ if (rtlpriv->psc.rfpwr_state == ERFOFF ||
+ is_hal_stop(rtlhal) || i >= 200)
+ return;
+ }
+}
+
+void rtl_pci_deinit(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ _rtl_pci_deinit_trx_ring(hw);
+
+ synchronize_irq(rtlpci->pdev->irq);
+ tasklet_kill(&rtlpriv->works.irq_tasklet);
+
+ flush_workqueue(rtlpriv->works.rtl_wq);
+ destroy_workqueue(rtlpriv->works.rtl_wq);
+
+}
+
+int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int err;
+
+ _rtl_pci_init_struct(hw, pdev);
+
+ err = _rtl_pci_init_trx_ring(hw);
+ if (err) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("tx ring initialization failed"));
+ return err;
+ }
+
+ return 1;
+}
+
+int rtl_pci_start(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ int err = 0;
+ RT_TRACE(COMP_INIT, DBG_DMESG, (" rtl_pci_start \n"));
+ rtl_pci_reset_trx_ring(hw);
+
+ rtlpriv->rtlhal.driver_is_goingto_unload = false;
+ err = rtlpriv->cfg->ops->hw_init(hw);
+ if (err) {
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("Failed to config hardware err %x!\n",err));
+ return err;
+ }
+
+ rtlpriv->cfg->ops->enable_interrupt(hw);
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("enable_interrupt OK\n"));
+
+ rtl_init_rx_config(hw);
+
+ /*should after adapter start and interrupt enable. */
+ set_hal_start(rtlhal);
+
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+
+ rtlpriv->rtlhal.up_first_time = false;
+
+ RT_TRACE(COMP_INIT, DBG_DMESG, ("rtl_pci_start OK\n"));
+ return 0;
+}
+
+void rtl_pci_stop(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 RFInProgressTimeOut = 0;
+
+ /*
+ *should before disable interrrupt&adapter
+ *and will do it immediately.
+ */
+ set_hal_stop(rtlhal);
+
+ rtlpriv->cfg->ops->disable_interrupt(hw);
+
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ while (ppsc->rfchange_inprogress) {
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ if (RFInProgressTimeOut > 100) {
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ break;
+ }
+ mdelay(1);
+ RFInProgressTimeOut++;
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ }
+ ppsc->rfchange_inprogress = true;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+
+ rtlpriv->rtlhal.driver_is_goingto_unload = true;
+ rtlpriv->cfg->ops->hw_disable(hw);
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+
+ rtl_pci_enable_aspm(hw);
+}
+
+static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct pci_dev *bridge_pdev = pdev->bus->self;
+ u16 venderid;
+ u16 deviceid;
+ u8 revisionid;
+ u16 irqline;
+ u8 tmp;
+
+ venderid = pdev->vendor;
+ deviceid = pdev->device;
+ pci_read_config_byte(pdev, 0x8, &revisionid);
+ pci_read_config_word(pdev, 0x3C, &irqline);
+
+ if (deviceid == RTL_PCI_8192_DID ||
+ deviceid == RTL_PCI_0044_DID ||
+ deviceid == RTL_PCI_0047_DID ||
+ deviceid == RTL_PCI_8192SE_DID ||
+ deviceid == RTL_PCI_8174_DID ||
+ deviceid == RTL_PCI_8173_DID ||
+ deviceid == RTL_PCI_8172_DID ||
+ deviceid == RTL_PCI_8171_DID) {
+ switch (revisionid) {
+ case RTL_PCI_REVISION_ID_8192PCIE:
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("8192E is found but not supported now-"
+ "vid/did=%x/%x\n", venderid, deviceid));
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
+ return false;
+ break;
+ case RTL_PCI_REVISION_ID_8192SE:
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("8192SE is found - "
+ "vid/did=%x/%x\n", venderid, deviceid));
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("Err: Unknown device - "
+ "vid/did=%x/%x\n", venderid, deviceid));
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
+ break;
+
+ }
+ }else if(deviceid == RTL_PCI_8723AE_DID) {
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8723AE;
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("8723AE PCI-E is found - "
+ "vid/did=%x/%x\n", venderid, deviceid));
+ } else if (deviceid == RTL_PCI_8192CET_DID ||
+ deviceid == RTL_PCI_8192CE_DID ||
+ deviceid == RTL_PCI_8191CE_DID ||
+ deviceid == RTL_PCI_8188CE_DID) {
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("8192C PCI-E is found - "
+ "vid/did=%x/%x\n", venderid, deviceid));
+ } else if (deviceid == RTL_PCI_8192DE_DID ||
+ deviceid == RTL_PCI_8192DE_DID2) {
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("8192D PCI-E is found - "
+ "vid/did=%x/%x\n", venderid, deviceid));
+ }else if(deviceid == RTL_PCI_8188EE_DID){
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8188EE;
+ RT_TRACE(COMP_INIT,DBG_LOUD,
+ ("Find adapter, Hardware type is 8188EE\n"));
+ }else if (deviceid == RTL_PCI_8723BE_DID){
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8723BE;
+ RT_TRACE(COMP_INIT,DBG_LOUD,
+ ("Find adapter, Hardware type is 8723BE\n"));
+ }else if (deviceid == RTL_PCI_8192EE_DID){
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192EE;
+ RT_TRACE(COMP_INIT,DBG_LOUD,
+ ("Find adapter, Hardware type is 8192EE\n"));
+ }else if (deviceid == RTL_PCI_8821AE_DID) {
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8821AE;
+ RT_TRACE(COMP_INIT,DBG_LOUD,
+ ("Find adapter, Hardware type is 8821AE\n"));
+ }else if (deviceid == RTL_PCI_8812AE_DID) {
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8812AE;
+ RT_TRACE(COMP_INIT,DBG_LOUD,
+ ("Find adapter, Hardware type is 8812AE\n"));
+ }else {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("Err: Unknown device -"
+ " vid/did=%x/%x\n", venderid, deviceid));
+
+ rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
+ }
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
+ if (revisionid == 0 || revisionid == 1) {
+ if (revisionid == 0) {
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Find 92DE MAC0.\n"));
+ rtlhal->interfaceindex = 0;
+ } else if (revisionid == 1) {
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Find 92DE MAC1.\n"));
+ rtlhal->interfaceindex = 1;
+ }
+ } else {
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("Unknown device - "
+ "VendorID/DeviceID=%x/%x, Revision=%x\n",
+ venderid, deviceid, revisionid));
+ rtlhal->interfaceindex = 0;
+ }
+ }
+
+ /* 92ee use new trx flow */
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE)
+ rtlpriv->use_new_trx_flow = true;
+ else
+ rtlpriv->use_new_trx_flow = false;
+
+ /*find bus info */
+ pcipriv->ndis_adapter.busnumber = pdev->bus->number;
+ pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
+ pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
+
+ /*find bridge info */
+ pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN;
+ /* some ARM have no bridge_pdev and will crash here
+ * so we should check if bridge_pdev is NULL */
+ if (bridge_pdev) {
+ pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
+ for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
+ if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
+ pcipriv->ndis_adapter.pcibridge_vendor = tmp;
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("Pci Bridge Vendor is found index: %d\n",
+ tmp));
+ break;
+ }
+ }
+ }
+
+ if (pcipriv->ndis_adapter.pcibridge_vendor !=
+ PCI_BRIDGE_VENDOR_UNKNOWN) {
+ pcipriv->ndis_adapter.pcibridge_busnum =
+ bridge_pdev->bus->number;
+ pcipriv->ndis_adapter.pcibridge_devnum =
+ PCI_SLOT(bridge_pdev->devfn);
+ pcipriv->ndis_adapter.pcibridge_funcnum =
+ PCI_FUNC(bridge_pdev->devfn);
+ pcipriv->ndis_adapter.pcicfg_addrport =
+ (pcipriv->ndis_adapter.pcibridge_busnum << 16) |
+ (pcipriv->ndis_adapter.pcibridge_devnum << 11) |
+ (pcipriv->ndis_adapter.pcibridge_funcnum << 8) | (1 << 31);
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+/*<delete in kernel end>*/
+ pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
+ pci_pcie_cap(bridge_pdev);
+/*<delete in kernel start>*/
+#else
+ pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
+ _rtl_pci_get_pciehdr_offset(hw);
+#endif
+/*<delete in kernel end>*/
+ pcipriv->ndis_adapter.num4bytes =
+ (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
+
+ rtl_pci_get_linkcontrol_field(hw);
+
+ if (pcipriv->ndis_adapter.pcibridge_vendor ==
+ PCI_BRIDGE_VENDOR_AMD) {
+ pcipriv->ndis_adapter.amd_l1_patch =
+ rtl_pci_get_amd_l1_patch(hw);
+ }
+ }
+
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("pcidev busnumber:devnumber:funcnumber:"
+ "vendor:link_ctl %d:%d:%d:%x:%x\n",
+ pcipriv->ndis_adapter.busnumber,
+ pcipriv->ndis_adapter.devnumber,
+ pcipriv->ndis_adapter.funcnumber,
+ pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg));
+
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("pci_bridge busnumber:devnumber:funcnumber:vendor:"
+ "pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
+ pcipriv->ndis_adapter.pcibridge_busnum,
+ pcipriv->ndis_adapter.pcibridge_devnum,
+ pcipriv->ndis_adapter.pcibridge_funcnum,
+ pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
+ pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
+ pcipriv->ndis_adapter.pcibridge_linkctrlreg,
+ pcipriv->ndis_adapter.amd_l1_patch));
+
+ rtl_pci_parse_configuration(pdev, hw);
+ list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
+ return true;
+}
+
+static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+ int ret;
+ ret = pci_enable_msi(rtlpci->pdev);
+ if (ret < 0)
+ return ret;
+
+ ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, hw);
+ if (ret < 0) {
+ pci_disable_msi(rtlpci->pdev);
+ return ret;
+ }
+
+ rtlpci->using_msi = true;
+
+ RT_TRACE(COMP_INIT|COMP_INTR, DBG_DMESG, ("MSI Interrupt Mode!\n"));
+ return 0;
+}
+
+static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+ int ret;
+
+ ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, hw);
+ if (ret < 0) {
+ return ret;
+ }
+
+ rtlpci->using_msi = false;
+ RT_TRACE(COMP_INIT|COMP_INTR, DBG_DMESG,
+ ("Pin-based Interrupt Mode!\n"));
+ return 0;
+}
+
+static int rtl_pci_intr_mode_decide(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+ int ret;
+ if (rtlpci->msi_support == true) {
+ ret = rtl_pci_intr_mode_msi(hw);
+ if (ret < 0)
+ ret = rtl_pci_intr_mode_legacy(hw);
+ } else {
+ ret = rtl_pci_intr_mode_legacy(hw);
+ }
+ return ret;
+}
+
+/* this is used for other modules get
+ * hw pointer in rtl_pci_get_hw_pointer */
+struct ieee80211_hw *hw_export = NULL;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+int rtl_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+
+#else
+int __devinit rtl_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+#endif
+{
+ struct ieee80211_hw *hw = NULL;
+
+ struct rtl_priv *rtlpriv = NULL;
+ struct rtl_pci_priv *pcipriv = NULL;
+ struct rtl_pci *rtlpci;
+ unsigned long pmem_start, pmem_len, pmem_flags;
+ int err;
+
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ RT_ASSERT(false,
+ ("%s : Cannot enable new PCI device\n",
+ pci_name(pdev)));
+ return err;
+ }
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ RT_ASSERT(false, ("Unable to obtain 32bit DMA "
+ "for consistent allocations\n"));
+ pci_disable_device(pdev);
+ return -ENOMEM;
+ }
+ }
+
+ pci_set_master(pdev);
+
+ hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
+ sizeof(struct rtl_priv), &rtl_ops);
+ if (!hw) {
+ RT_ASSERT(false,
+ ("%s : ieee80211 alloc failed\n", pci_name(pdev)));
+ err = -ENOMEM;
+ goto fail1;
+ }
+ hw_export = hw;
+
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+ pci_set_drvdata(pdev, hw);
+
+ rtlpriv = hw->priv;
+ pcipriv = (void *)rtlpriv->priv;
+ pcipriv->dev.pdev = pdev;
+
+ /* init cfg & intf_ops */
+ rtlpriv->rtlhal.interface = INTF_PCI;
+ rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
+ rtlpriv->intf_ops = &rtl_pci_ops;
+ rtlpriv->glb_var = &global_var;
+
+ /*
+ *init dbgp flags before all
+ *other functions, because we will
+ *use it in other funtions like
+ *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
+ *you can not use these macro
+ *before this
+ */
+ rtl_dbgp_flag_init(hw);
+
+ /* MEM map */
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (err) {
+ RT_ASSERT(false, ("Can't obtain PCI resources\n"));
+ return err;
+ }
+
+ pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id);
+ pmem_len = pci_resource_len(pdev, rtlpriv->cfg->bar_id);
+ pmem_flags = pci_resource_flags(pdev, rtlpriv->cfg->bar_id);
+
+ /*shared mem start */
+ rtlpriv->io.pci_mem_start =
+ (unsigned long)pci_iomap(pdev,
+ rtlpriv->cfg->bar_id, pmem_len);
+ if (rtlpriv->io.pci_mem_start == 0) {
+ RT_ASSERT(false, ("Can't map PCI mem\n"));
+ goto fail2;
+ }
+
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("mem mapped space: start: 0x%08lx len:%08lx "
+ "flags:%08lx, after map:0x%08lx\n",
+ pmem_start, pmem_len, pmem_flags,
+ rtlpriv->io.pci_mem_start));
+
+ /* Disable Clk Request */
+ pci_write_config_byte(pdev, 0x81, 0);
+ /* leave D3 mode */
+ pci_write_config_byte(pdev, 0x44, 0);
+ pci_write_config_byte(pdev, 0x04, 0x06);
+ pci_write_config_byte(pdev, 0x04, 0x07);
+
+ /* find adapter */
+ /* if chip not support, will return false */
+ if(!_rtl_pci_find_adapter(pdev, hw))
+ goto fail3;
+
+ /* Init IO handler */
+ _rtl_pci_io_handler_init(&pdev->dev, hw);
+
+ /*like read eeprom and so on */
+ rtlpriv->cfg->ops->read_eeprom_info(hw);
+
+ if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("Can't init_sw_vars.\n"));
+ goto fail3;
+ }
+
+ rtlpriv->cfg->ops->init_sw_leds(hw);
+
+ /*aspm */
+ rtl_pci_init_aspm(hw);
+
+ /* Init mac80211 sw */
+ err = rtl_init_core(hw);
+ if (err) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Can't allocate sw for mac80211.\n"));
+ goto fail3;
+ }
+
+ /* Init PCI sw */
+ err = !rtl_pci_init(hw, pdev);
+ if (err) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("Failed to init PCI.\n"));
+ goto fail3;
+ }
+
+ err = ieee80211_register_hw(hw);
+ if (err) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Can't register mac80211 hw.\n"));
+ goto fail3;
+ } else {
+ rtlpriv->mac80211.mac80211_registered = 1;
+ }
+ /* the wiphy must have been registed to
+ * cfg80211 prior to regulatory_hint */
+ if (regulatory_hint(hw->wiphy, rtlpriv->regd.alpha2)) {
+ RT_TRACE(COMP_ERR, DBG_WARNING, ("regulatory_hint fail\n"));
+ }
+
+ err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
+ if (err) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("failed to create sysfs device attributes\n"));
+ goto fail3;
+ }
+ /* add for prov */
+ rtl_proc_add_one(hw);
+
+ /*init rfkill */
+ rtl_init_rfkill(hw);
+
+ rtlpci = rtl_pcidev(pcipriv);
+
+ err = rtl_pci_intr_mode_decide(hw);
+ if (err) {
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("%s: failed to register IRQ handler\n",
+ wiphy_name(hw->wiphy)));
+ goto fail3;
+ } else {
+ rtlpci->irq_alloc = 1;
+ }
+
+ set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+ return 0;
+
+fail3:
+ pci_set_drvdata(pdev, NULL);
+ rtl_deinit_core(hw);
+ ieee80211_free_hw(hw);
+
+ if (rtlpriv->io.pci_mem_start != 0)
+ pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start);
+
+fail2:
+ pci_release_regions(pdev);
+
+fail1:
+
+ pci_disable_device(pdev);
+
+ return -ENODEV;
+
+}
+//EXPORT_SYMBOL(rtl_pci_probe);
+
+struct ieee80211_hw *rtl_pci_get_hw_pointer(void)
+{
+ return hw_export;
+}
+//EXPORT_SYMBOL(rtl_pci_get_hw_pointer);
+
+void rtl_pci_disconnect(struct pci_dev *pdev)
+{
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+ struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
+
+ clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+
+ sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group);
+
+ /* add for prov */
+ rtl_proc_remove_one(hw);
+
+
+ /*ieee80211_unregister_hw will call ops_stop */
+ if (rtlmac->mac80211_registered == 1) {
+ ieee80211_unregister_hw(hw);
+ rtlmac->mac80211_registered = 0;
+ } else {
+ rtl_deinit_deferred_work(hw);
+ rtlpriv->intf_ops->adapter_stop(hw);
+ }
+
+ /*deinit rfkill */
+ rtl_deinit_rfkill(hw);
+
+ rtl_pci_deinit(hw);
+ rtl_deinit_core(hw);
+ rtlpriv->cfg->ops->deinit_sw_vars(hw);
+
+ if (rtlpci->irq_alloc) {
+ synchronize_irq(rtlpci->pdev->irq);
+ free_irq(rtlpci->pdev->irq, hw);
+ rtlpci->irq_alloc = 0;
+ }
+
+ if (rtlpci->using_msi == true)
+ pci_disable_msi(rtlpci->pdev);
+
+ list_del(&rtlpriv->list);
+ if (rtlpriv->io.pci_mem_start != 0) {
+ pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start);
+ pci_release_regions(pdev);
+ }
+
+ pci_disable_device(pdev);
+
+ rtl_pci_disable_aspm(hw);
+
+ pci_set_drvdata(pdev, NULL);
+
+ ieee80211_free_hw(hw);
+}
+//EXPORT_SYMBOL(rtl_pci_disconnect);
+
+/***************************************
+kernel pci power state define:
+PCI_D0 ((pci_power_t __force) 0)
+PCI_D1 ((pci_power_t __force) 1)
+PCI_D2 ((pci_power_t __force) 2)
+PCI_D3hot ((pci_power_t __force) 3)
+PCI_D3cold ((pci_power_t __force) 4)
+PCI_UNKNOWN ((pci_power_t __force) 5)
+
+This function is called when system
+goes into suspend state mac80211 will
+call rtl_mac_stop() from the mac80211
+suspend function first, So there is
+no need to call hw_disable here.
+****************************************/
+int rtl_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->cfg->ops->hw_suspend(hw);
+ rtl_deinit_rfkill(hw);
+
+ return 0;
+}
+//EXPORT_SYMBOL(rtl_pci_suspend);
+
+int rtl_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->cfg->ops->hw_resume(hw);
+ rtl_init_rfkill(hw);
+
+ return 0;
+}
+//EXPORT_SYMBOL(rtl_pci_resume);
+
+struct rtl_intf_ops rtl_pci_ops = {
+ .read_efuse_byte = read_efuse_byte,
+ .adapter_start = rtl_pci_start,
+ .adapter_stop = rtl_pci_stop,
+ .check_buddy_priv = rtl_pci_check_buddy_priv,
+ .adapter_tx = rtl_pci_tx,
+ .flush = rtl_pci_flush,
+ .reset_trx_ring = rtl_pci_reset_trx_ring,
+ .waitq_insert = rtl_pci_tx_chk_waitq_insert,
+
+ .disable_aspm = rtl_pci_disable_aspm,
+ .enable_aspm = rtl_pci_enable_aspm,
+};
diff --git a/drivers/staging/rtl8821ae/pci.h b/drivers/staging/rtl8821ae/pci.h
new file mode 100644
index 000000000000..9f206550a657
--- /dev/null
+++ b/drivers/staging/rtl8821ae/pci.h
@@ -0,0 +1,353 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_PCI_H__
+#define __RTL_PCI_H__
+
+#include <linux/pci.h>
+/*
+1: MSDU packet queue,
+2: Rx Command Queue
+*/
+#define RTL_PCI_RX_MPDU_QUEUE 0
+#define RTL_PCI_RX_CMD_QUEUE 1
+#define RTL_PCI_MAX_RX_QUEUE 2
+
+#define RTL_PCI_MAX_RX_COUNT 512//64
+#define RTL_PCI_MAX_TX_QUEUE_COUNT 9
+
+#define RT_TXDESC_NUM 128
+#define TX_DESC_NUM_92E 512
+#define RT_TXDESC_NUM_BE_QUEUE 256
+
+#define BK_QUEUE 0
+#define BE_QUEUE 1
+#define VI_QUEUE 2
+#define VO_QUEUE 3
+#define BEACON_QUEUE 4
+#define TXCMD_QUEUE 5
+#define MGNT_QUEUE 6
+#define HIGH_QUEUE 7
+#define HCCA_QUEUE 8
+
+#define RTL_PCI_DEVICE(vend, dev, cfg) \
+ .vendor = (vend), \
+ .device = (dev), \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID,\
+ .driver_data = (kernel_ulong_t)&(cfg)
+
+#define INTEL_VENDOR_ID 0x8086
+#define SIS_VENDOR_ID 0x1039
+#define ATI_VENDOR_ID 0x1002
+#define ATI_DEVICE_ID 0x7914
+#define AMD_VENDOR_ID 0x1022
+
+#define PCI_MAX_BRIDGE_NUMBER 255
+#define PCI_MAX_DEVICES 32
+#define PCI_MAX_FUNCTION 8
+
+#define PCI_CONF_ADDRESS 0x0CF8 /*PCI Configuration Space Address */
+#define PCI_CONF_DATA 0x0CFC /*PCI Configuration Space Data */
+
+#define PCI_CLASS_BRIDGE_DEV 0x06
+#define PCI_SUBCLASS_BR_PCI_TO_PCI 0x04
+#define PCI_CAPABILITY_ID_PCI_EXPRESS 0x10
+#define PCI_CAP_ID_EXP 0x10
+
+#define U1DONTCARE 0xFF
+#define U2DONTCARE 0xFFFF
+#define U4DONTCARE 0xFFFFFFFF
+
+#define RTL_PCI_8192_DID 0x8192 /*8192 PCI-E */
+#define RTL_PCI_8192SE_DID 0x8192 /*8192 SE */
+#define RTL_PCI_8174_DID 0x8174 /*8192 SE */
+#define RTL_PCI_8173_DID 0x8173 /*8191 SE Crab */
+#define RTL_PCI_8172_DID 0x8172 /*8191 SE RE */
+#define RTL_PCI_8171_DID 0x8171 /*8191 SE Unicron */
+#define RTL_PCI_0045_DID 0x0045 /*8190 PCI for Ceraga */
+#define RTL_PCI_0046_DID 0x0046 /*8190 Cardbus for Ceraga */
+#define RTL_PCI_0044_DID 0x0044 /*8192e PCIE for Ceraga */
+#define RTL_PCI_0047_DID 0x0047 /*8192e Express Card for Ceraga */
+#define RTL_PCI_700F_DID 0x700F
+#define RTL_PCI_701F_DID 0x701F
+#define RTL_PCI_DLINK_DID 0x3304
+#define RTL_PCI_8723AE_DID 0x8723 /*8723e */
+#define RTL_PCI_8192CET_DID 0x8191 /*8192ce */
+#define RTL_PCI_8192CE_DID 0x8178 /*8192ce */
+#define RTL_PCI_8191CE_DID 0x8177 /*8192ce */
+#define RTL_PCI_8188CE_DID 0x8176 /*8192ce */
+#define RTL_PCI_8192CU_DID 0x8191 /*8192ce */
+#define RTL_PCI_8192DE_DID 0x8193 /*8192de */
+#define RTL_PCI_8192DE_DID2 0x002B /*92DE*/
+#define RTL_PCI_8188EE_DID 0x8179 /*8188ee*/
+#define RTL_PCI_8723BE_DID 0xB723 /*8723be*/
+#define RTL_PCI_8192EE_DID 0x818B /*8192ee*/
+#define RTL_PCI_8821AE_DID 0x8821 /*8821ae*/
+#define RTL_PCI_8812AE_DID 0x8812 /*8812ae*/
+
+/*8192 support 16 pages of IO registers*/
+#define RTL_MEM_MAPPED_IO_RANGE_8190PCI 0x1000
+#define RTL_MEM_MAPPED_IO_RANGE_8192PCIE 0x4000
+#define RTL_MEM_MAPPED_IO_RANGE_8192SE 0x4000
+#define RTL_MEM_MAPPED_IO_RANGE_8192CE 0x4000
+#define RTL_MEM_MAPPED_IO_RANGE_8192DE 0x4000
+
+#define RTL_PCI_REVISION_ID_8190PCI 0x00
+#define RTL_PCI_REVISION_ID_8192PCIE 0x01
+#define RTL_PCI_REVISION_ID_8192SE 0x10
+#define RTL_PCI_REVISION_ID_8192CE 0x1
+#define RTL_PCI_REVISION_ID_8192DE 0x0
+
+#define PCI_VENDOR_ID_REALTEK 0x10ec
+
+#define RTL_DEFAULT_HARDWARE_TYPE HARDWARE_TYPE_RTL8192CE
+
+enum pci_bridge_vendor {
+ PCI_BRIDGE_VENDOR_INTEL = 0x0, /*0b'0000,0001 */
+ PCI_BRIDGE_VENDOR_ATI, /*0b'0000,0010*/
+ PCI_BRIDGE_VENDOR_AMD, /*0b'0000,0100*/
+ PCI_BRIDGE_VENDOR_SIS, /*0b'0000,1000*/
+ PCI_BRIDGE_VENDOR_UNKNOWN, /*0b'0100,0000*/
+ PCI_BRIDGE_VENDOR_MAX,
+};
+
+struct rtl_pci_capabilities_header {
+ u8 capability_id;
+ u8 next;
+};
+
+/* In new TRX flow, Buffer_desc is new concept
+ * But TX wifi info == TX descriptor in old flow
+ * RX wifi info == RX descriptor in old flow */
+struct rtl_tx_buffer_desc {
+#if (RTL8192EE_SEG_NUM == 2)
+ u32 dword[2*(DMA_IS_64BIT + 1)*8]; //seg = 8
+#elif (RTL8192EE_SEG_NUM == 1)
+ u32 dword[2*(DMA_IS_64BIT + 1)*4]; //seg = 4
+#elif (RTL8192EE_SEG_NUM == 0)
+ u32 dword[2*(DMA_IS_64BIT + 1)*2]; //seg = 2
+#endif
+} __packed;
+
+struct rtl_tx_desc {/*old: tx desc*//*new: tx wifi info*/
+ u32 dword[16];
+} __packed;
+
+struct rtl_rx_buffer_desc { /*rx buffer desc*/
+ u32 dword[2];
+} __packed;
+
+struct rtl_rx_desc { /*old: rx desc*//*new: rx wifi info*/
+ u32 dword[8];
+} __packed;
+
+struct rtl_tx_cmd_desc {
+ u32 dword[16];
+} __packed;
+
+struct rtl8192_tx_ring {
+ struct rtl_tx_desc *desc; /*tx desc / tx wifi info*/
+ dma_addr_t dma; /*tx desc dma memory / tx wifi info dma memory*/
+ unsigned int idx;
+ unsigned int entries;
+ struct sk_buff_head queue;
+ /*add for new trx flow*/
+ struct rtl_tx_buffer_desc *buffer_desc; /*tx buffer descriptor*/
+ dma_addr_t buffer_desc_dma; /*tx bufferd desc dma memory*/
+ u16 avl_desc; /* available_desc_to_write */
+ u16 cur_tx_wp; /* current_tx_write_point */
+ u16 cur_tx_rp; /* current_tx_read_point */
+};
+
+struct rtl8192_rx_ring {
+ struct rtl_rx_desc *desc;/*for old trx flow, not uesd in new trx*/
+ /*dma matches either 'desc' or 'buffer_desc'*/
+ dma_addr_t dma;
+ unsigned int idx;
+ struct sk_buff *rx_buf[RTL_PCI_MAX_RX_COUNT];
+ /*add for new trx flow*/
+ struct rtl_rx_buffer_desc *buffer_desc; /*rx buffer descriptor*/
+ u16 next_rx_rp; /* next_rx_read_point */
+};
+
+struct rtl_pci {
+ struct pci_dev *pdev;
+ bool irq_enabled;
+
+ /*Tx */
+ struct rtl8192_tx_ring tx_ring[RTL_PCI_MAX_TX_QUEUE_COUNT];
+ int txringcount[RTL_PCI_MAX_TX_QUEUE_COUNT];
+ u32 transmit_config;
+
+ /*Rx */
+ struct rtl8192_rx_ring rx_ring[RTL_PCI_MAX_RX_QUEUE];
+ int rxringcount;
+ u16 rxbuffersize;
+ u32 receive_config;
+
+ /*irq */
+ u8 irq_alloc;
+ u32 irq_mask[2];
+ u32 sys_irq_mask;
+
+ /*Bcn control register setting */
+ u32 reg_bcn_ctrl_val;
+
+ /*ASPM*/ u8 const_pci_aspm;
+ u8 const_amdpci_aspm;
+ u8 const_hwsw_rfoff_d3;
+ u8 const_support_pciaspm;
+ /*pci-e bridge */
+ u8 const_hostpci_aspm_setting;
+ /*pci-e device */
+ u8 const_devicepci_aspm_setting;
+ /*If it supports ASPM, Offset[560h] = 0x40,
+ otherwise Offset[560h] = 0x00. */
+ bool b_support_aspm;
+ bool b_support_backdoor;
+
+ /*QOS & EDCA */
+ enum acm_method acm_method;
+
+ u16 shortretry_limit;
+ u16 longretry_limit;
+
+ /* MSI support */
+ bool msi_support;
+ bool using_msi;
+};
+
+struct mp_adapter {
+ u8 linkctrl_reg;
+
+ u8 busnumber;
+ u8 devnumber;
+ u8 funcnumber;
+
+ u8 pcibridge_busnum;
+ u8 pcibridge_devnum;
+ u8 pcibridge_funcnum;
+
+ u8 pcibridge_vendor;
+ u16 pcibridge_vendorid;
+ u16 pcibridge_deviceid;
+
+ u32 pcicfg_addrport;
+ u8 num4bytes;
+
+ u8 pcibridge_pciehdr_offset;
+ u8 pcibridge_linkctrlreg;
+
+ bool amd_l1_patch;
+};
+
+struct rtl_pci_priv {
+ struct rtl_pci dev;
+ struct mp_adapter ndis_adapter;
+ struct rtl_led_ctl ledctl;
+ struct bt_coexist_info btcoexist;
+};
+
+#define rtl_pcipriv(hw) (((struct rtl_pci_priv *)(rtl_priv(hw))->priv))
+#define rtl_pcidev(pcipriv) (&((pcipriv)->dev))
+
+int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw);
+
+extern struct rtl_intf_ops rtl_pci_ops;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+int rtl_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id);
+#else
+int __devinit rtl_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id);
+#endif
+void rtl_pci_disconnect(struct pci_dev *pdev);
+int rtl_pci_suspend(struct device *dev);
+int rtl_pci_resume(struct device *dev);
+
+static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+ return 0xff & readb((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline u16 pci_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+ return readw((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline u32 pci_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+ return readl((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline void pci_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
+{
+ writeb(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline void pci_write16_async(struct rtl_priv *rtlpriv,
+ u32 addr, u16 val)
+{
+ writew(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline void pci_write32_async(struct rtl_priv *rtlpriv,
+ u32 addr, u32 val)
+{
+ writel(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline void rtl_pci_raw_write_port_ulong(u32 port, u32 val)
+{
+ outl(val, port);
+}
+
+static inline void rtl_pci_raw_write_port_uchar(u32 port, u8 val)
+{
+ outb(val, port);
+}
+
+static inline void rtl_pci_raw_read_port_uchar(u32 port, u8 * pval)
+{
+ *pval = inb(port);
+}
+
+static inline void rtl_pci_raw_read_port_ushort(u32 port, u16 * pval)
+{
+ *pval = inw(port);
+}
+
+static inline void rtl_pci_raw_read_port_ulong(u32 port, u32 * pval)
+{
+ *pval = inl(port);
+}
+
+#endif
diff --git a/drivers/staging/rtl8821ae/ps.c b/drivers/staging/rtl8821ae/ps.c
new file mode 100644
index 000000000000..f12ffa83c58d
--- /dev/null
+++ b/drivers/staging/rtl8821ae/ps.c
@@ -0,0 +1,1025 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "base.h"
+#include "ps.h"
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+#include <linux/export.h>
+#endif
+#include "btcoexist/rtl_btc.h"
+
+bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool init_status = true;
+
+ /*<1> reset trx ring */
+ if (rtlhal->interface == INTF_PCI)
+ rtlpriv->intf_ops->reset_trx_ring(hw);
+
+ if (is_hal_stop(rtlhal))
+ RT_TRACE(COMP_ERR, DBG_WARNING, ("Driver is already down!\n"));
+
+ /*<2> Enable Adapter */
+ rtlpriv->cfg->ops->hw_init(hw);
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ /*init_status = false; */
+
+ /*<3> Enable Interrupt */
+ rtlpriv->cfg->ops->enable_interrupt(hw);
+
+ /*<enable timer> */
+ rtl_watch_dog_timer_callback((unsigned long)hw);
+
+ return init_status;
+}
+//EXPORT_SYMBOL(rtl_ps_enable_nic);
+
+bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
+{
+ bool status = true;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /*<1> Stop all timer */
+ rtl_deinit_deferred_work(hw);
+
+ /*<2> Disable Interrupt */
+ rtlpriv->cfg->ops->disable_interrupt(hw);
+
+ /*<3> Disable Adapter */
+ rtlpriv->cfg->ops->hw_disable(hw);
+
+ return status;
+}
+//EXPORT_SYMBOL(rtl_ps_disable_nic);
+
+bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate state_toset,
+ u32 changesource, bool protect_or_not)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ enum rf_pwrstate rtstate;
+ bool b_actionallowed = false;
+ u16 rfwait_cnt = 0;
+
+ /*protect_or_not = true; */
+
+ if (protect_or_not)
+ goto no_protect;
+
+ /*
+ *Only one thread can change
+ *the RF state at one time, and others
+ *should wait to be executed.
+ */
+ while (true) {
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ if (ppsc->rfchange_inprogress) {
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("RF Change in progress!"
+ "Wait to set..state_toset(%d).\n",
+ state_toset));
+
+ /* Set RF after the previous action is done. */
+ while (ppsc->rfchange_inprogress) {
+ rfwait_cnt++;
+ mdelay(1);
+ /*
+ *Wait too long, return false to avoid
+ *to be stuck here.
+ */
+ if (rfwait_cnt > 100)
+ return false;
+ }
+ } else {
+ ppsc->rfchange_inprogress = true;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ break;
+ }
+ }
+
+no_protect:
+ rtstate = ppsc->rfpwr_state;
+
+ switch (state_toset) {
+ case ERFON:
+ ppsc->rfoff_reason &= (~changesource);
+
+ if ((changesource == RF_CHANGE_BY_HW) &&
+ (ppsc->b_hwradiooff == true)) {
+ ppsc->b_hwradiooff = false;
+ }
+
+ if (!ppsc->rfoff_reason) {
+ ppsc->rfoff_reason = 0;
+ b_actionallowed = true;
+ }
+
+ break;
+
+ case ERFOFF:
+
+ if ((changesource == RF_CHANGE_BY_HW) &&
+ (ppsc->b_hwradiooff == false)) {
+ ppsc->b_hwradiooff = true;
+ }
+
+ ppsc->rfoff_reason |= changesource;
+ b_actionallowed = true;
+ break;
+
+ case ERFSLEEP:
+ ppsc->rfoff_reason |= changesource;
+ b_actionallowed = true;
+ break;
+
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("switch case not process \n"));
+ break;
+ }
+
+ if (b_actionallowed)
+ rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset);
+
+ if (!protect_or_not) {
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ }
+
+ return b_actionallowed;
+}
+//EXPORT_SYMBOL(rtl_ps_set_rf_state);
+
+static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ ppsc->b_swrf_processing = true;
+
+ if (ppsc->inactive_pwrstate == ERFON && rtlhal->interface == INTF_PCI) {
+ if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
+ RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) &&
+ rtlhal->interface == INTF_PCI) {
+ rtlpriv->intf_ops->disable_aspm(hw);
+ RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
+ }
+ }
+
+ if (rtlpriv->cfg->ops->get_btc_status()){
+ rtlpriv->btcoexist.btc_ops->btc_ips_notify(rtlpriv,
+ ppsc->inactive_pwrstate);
+ }
+ rtl_ps_set_rf_state(hw, ppsc->inactive_pwrstate,
+ RF_CHANGE_BY_IPS, false);
+
+ if (ppsc->inactive_pwrstate == ERFOFF &&
+ rtlhal->interface == INTF_PCI) {
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
+ !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
+ rtlpriv->intf_ops->enable_aspm(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
+ }
+ }
+
+ ppsc->b_swrf_processing = false;
+}
+
+void rtl_ips_nic_off_wq_callback(void *data)
+{
+ struct rtl_works *rtlworks =
+ container_of_dwork_rtl(data, struct rtl_works, ips_nic_off_wq);
+ struct ieee80211_hw *hw = rtlworks->hw;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ enum rf_pwrstate rtstate;
+
+ if (mac->opmode != NL80211_IFTYPE_STATION) {
+ RT_TRACE(COMP_ERR, DBG_WARNING, ("not station return\n"));
+ return;
+ }
+
+ if (mac->p2p_in_use)
+ return;
+
+ if (mac->link_state > MAC80211_NOLINK)
+ return;
+
+ if (is_hal_stop(rtlhal))
+ return;
+
+ if (rtlpriv->sec.being_setkey)
+ return;
+
+ if(rtlpriv->cfg->ops->bt_turn_off_bt_coexist_before_enter_lps)
+ rtlpriv->cfg->ops->bt_turn_off_bt_coexist_before_enter_lps(hw);
+
+ if (ppsc->b_inactiveps) {
+ rtstate = ppsc->rfpwr_state;
+
+ /*
+ *Do not enter IPS in the following conditions:
+ *(1) RF is already OFF or Sleep
+ *(2) b_swrf_processing (indicates the IPS is still under going)
+ *(3) Connectted (only disconnected can trigger IPS)
+ *(4) IBSS (send Beacon)
+ *(5) AP mode (send Beacon)
+ *(6) monitor mode (rcv packet)
+ */
+
+ if (rtstate == ERFON &&
+ !ppsc->b_swrf_processing &&
+ (mac->link_state == MAC80211_NOLINK) &&
+ !mac->act_scanning) {
+ RT_TRACE(COMP_RF, DBG_LOUD,
+ ("IPSEnter(): Turn off RF.\n"));
+
+ ppsc->inactive_pwrstate = ERFOFF;
+ ppsc->b_in_powersavemode = true;
+
+ /*rtl_pci_reset_trx_ring(hw); */
+ _rtl_ps_inactive_ps(hw);
+ }
+ }
+}
+
+void rtl_ips_nic_off(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /*
+ *because when link with ap, mac80211 will ask us
+ *to disable nic quickly after scan before linking,
+ *this will cause link failed, so we delay 100ms here
+ */
+ queue_delayed_work(rtlpriv->works.rtl_wq,
+ &rtlpriv->works.ips_nic_off_wq, MSECS(100));
+}
+
+/* NOTICE: any opmode should exc nic_on, or disable without
+ * nic_on may something wrong, like adhoc TP*/
+void rtl_ips_nic_on(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ enum rf_pwrstate rtstate;
+
+ cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
+
+ spin_lock(&rtlpriv->locks.ips_lock);
+ if (ppsc->b_inactiveps) {
+ rtstate = ppsc->rfpwr_state;
+
+ if (rtstate != ERFON &&
+ !ppsc->b_swrf_processing &&
+ ppsc->rfoff_reason <= RF_CHANGE_BY_IPS) {
+
+ ppsc->inactive_pwrstate = ERFON;
+ ppsc->b_in_powersavemode = false;
+ _rtl_ps_inactive_ps(hw);
+ }
+ }
+ spin_unlock(&rtlpriv->locks.ips_lock);
+}
+
+/*for FW LPS*/
+
+/*
+ *Determine if we can set Fw into PS mode
+ *in current condition.Return true if it
+ *can enter PS mode.
+ */
+static bool rtl_get_fwlps_doze(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ u32 ps_timediff;
+
+ ps_timediff = jiffies_to_msecs(jiffies -
+ ppsc->last_delaylps_stamp_jiffies);
+
+ if (ps_timediff < 2000) {
+ RT_TRACE(COMP_POWER, DBG_LOUD,
+ ("Delay enter Fw LPS for DHCP, ARP,"
+ " or EAPOL exchanging state.\n"));
+ return false;
+ }
+
+ if (mac->link_state != MAC80211_LINKED)
+ return false;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ return false;
+
+ return true;
+}
+
+/* Change current and default preamble mode.*/
+void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool enter_fwlps;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ return;
+
+ if (mac->link_state != MAC80211_LINKED)
+ return;
+
+ if (ppsc->dot11_psmode == rt_psmode)
+ return;
+
+ /* Update power save mode configured. */
+ ppsc->dot11_psmode = rt_psmode;
+
+ /*
+ *<FW control LPS>
+ *1. Enter PS mode
+ * Set RPWM to Fw to turn RF off and send H2C fw_pwrmode
+ * cmd to set Fw into PS mode.
+ *2. Leave PS mode
+ * Send H2C fw_pwrmode cmd to Fw to set Fw into Active
+ * mode and set RPWM to turn RF on.
+ */
+
+ if ((ppsc->b_fwctrl_lps) && ppsc->report_linked) {
+ if (ppsc->dot11_psmode == EACTIVE) {
+ RT_TRACE(COMP_RF, DBG_DMESG,
+ ("FW LPS leave ps_mode:%x\n",
+ FW_PS_ACTIVE_MODE));
+ enter_fwlps = false;
+ ppsc->pwr_mode = FW_PS_ACTIVE_MODE;
+ ppsc->smart_ps = 0;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_LPS_ACTION,
+ (u8 *)(&enter_fwlps));
+ if (ppsc->p2p_ps_info.opp_ps)
+ rtl_p2p_ps_cmd(hw,P2P_PS_ENABLE);
+
+ } else {
+ if (rtl_get_fwlps_doze(hw)) {
+ RT_TRACE(COMP_RF, DBG_DMESG,
+ ("FW LPS enter ps_mode:%x\n",
+ ppsc->fwctrl_psmode));
+ enter_fwlps = true;
+ ppsc->pwr_mode = ppsc->fwctrl_psmode;
+ ppsc->smart_ps = 2;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_FW_LPS_ACTION,
+ (u8 *)(&enter_fwlps));
+
+ } else {
+ /* Reset the power save related parameters. */
+ ppsc->dot11_psmode = EACTIVE;
+ }
+ }
+ }
+}
+
+/*Enter the leisure power save mode.*/
+void rtl_lps_enter(struct ieee80211_hw *hw)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned long flag;
+
+ if (!ppsc->b_fwctrl_lps)
+ return;
+
+ if (rtlpriv->sec.being_setkey)
+ return;
+
+ if (rtlpriv->link_info.b_busytraffic)
+ return;
+
+ /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */
+ if (mac->cnt_after_linked < 5)
+ return;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ return;
+
+ if (mac->link_state != MAC80211_LINKED)
+ return;
+
+ spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+
+ /* Idle for a while if we connect to AP a while ago. */
+ if (mac->cnt_after_linked >= 2) {
+ if (ppsc->dot11_psmode == EACTIVE) {
+ RT_TRACE(COMP_POWER, DBG_LOUD,
+ ("Enter 802.11 power save mode...\n"));
+
+ rtl_lps_set_psmode(hw, EAUTOPS);
+ }
+ }
+
+ spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+}
+
+/*Leave the leisure power save mode.*/
+void rtl_lps_leave(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ unsigned long flag;
+
+ spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+
+ if (ppsc->b_fwctrl_lps) {
+ if (ppsc->dot11_psmode != EACTIVE) {
+
+ /*FIX ME */
+ rtlpriv->cfg->ops->enable_interrupt(hw);
+
+ if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM &&
+ RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) &&
+ rtlhal->interface == INTF_PCI) {
+ rtlpriv->intf_ops->disable_aspm(hw);
+ RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
+ }
+
+ RT_TRACE(COMP_POWER, DBG_LOUD,
+ ("Busy Traffic,Leave 802.11 power save..\n"));
+
+ rtl_lps_set_psmode(hw, EACTIVE);
+ }
+ }
+ spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+}
+
+/* For sw LPS*/
+void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct ieee80211_hdr *hdr = (void *) data;
+ struct ieee80211_tim_ie *tim_ie;
+ u8 *tim;
+ u8 tim_len;
+ bool u_buffed;
+ bool m_buffed;
+
+ if (mac->opmode != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!rtlpriv->psc.b_swctrl_lps)
+ return;
+
+ if (rtlpriv->mac80211.link_state != MAC80211_LINKED)
+ return;
+
+ if (!rtlpriv->psc.sw_ps_enabled)
+ return;
+
+ if (rtlpriv->psc.b_fwctrl_lps)
+ return;
+
+ if (likely(!(hw->conf.flags & IEEE80211_CONF_PS)))
+ return;
+
+ /* check if this really is a beacon */
+ if (!ieee80211_is_beacon(hdr->frame_control))
+ return;
+
+ /* min. beacon length + FCS_LEN */
+ if (len <= 40 + FCS_LEN)
+ return;
+
+ /* and only beacons from the associated BSSID, please */
+ if (ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+ return;
+
+ rtlpriv->psc.last_beacon = jiffies;
+
+ tim = rtl_find_ie(data, len - FCS_LEN, WLAN_EID_TIM);
+ if (!tim)
+ return;
+
+ if (tim[1] < sizeof(*tim_ie))
+ return;
+
+ tim_len = tim[1];
+ tim_ie = (struct ieee80211_tim_ie *) &tim[2];
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+/*<delete in kernel end>*/
+ if (!WARN_ON_ONCE(!hw->conf.ps_dtim_period))
+ rtlpriv->psc.dtim_counter = tim_ie->dtim_count;
+/*<delete in kernel start>*/
+#else
+ if (!WARN_ON_ONCE(!mac->vif->bss_conf.dtim_period))
+ rtlpriv->psc.dtim_counter = tim_ie->dtim_count;
+#endif
+/*<delete in kernel end>*/
+
+ /* Check whenever the PHY can be turned off again. */
+
+ /* 1. What about buffered unicast traffic for our AID? */
+ u_buffed = ieee80211_check_tim(tim_ie, tim_len,
+ rtlpriv->mac80211.assoc_id);
+
+ /* 2. Maybe the AP wants to send multicast/broadcast data? */
+ m_buffed = tim_ie->bitmap_ctrl & 0x01;
+ rtlpriv->psc.multi_buffered = m_buffed;
+
+ /* unicast will process by mac80211 through
+ * set ~IEEE80211_CONF_PS, So we just check
+ * multicast frames here */
+ if (!m_buffed ) {//&&) {// !rtlpriv->psc.tx_doing) {
+ /* back to low-power land. and delay is
+ * prevent null power save frame tx fail */
+ queue_delayed_work(rtlpriv->works.rtl_wq,
+ &rtlpriv->works.ps_work, MSECS(5));
+ } else {
+ RT_TRACE(COMP_POWER, DBG_DMESG,
+ ("u_bufferd: %x, m_buffered: %x\n",
+ u_buffed, m_buffed));
+ }
+}
+
+void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ unsigned long flag;
+
+ if (!rtlpriv->psc.b_swctrl_lps)
+ return;
+ if (mac->link_state != MAC80211_LINKED)
+ return;
+
+ if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM &&
+ RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
+ rtlpriv->intf_ops->disable_aspm(hw);
+ RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
+ }
+
+ spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+ rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS, false);
+ spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+}
+
+void rtl_swlps_rfon_wq_callback(void *data)
+{
+ struct rtl_works *rtlworks =
+ container_of_dwork_rtl(data, struct rtl_works, ps_rfon_wq);
+ struct ieee80211_hw *hw = rtlworks->hw;
+
+ rtl_swlps_rf_awake(hw);
+}
+
+void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ unsigned long flag;
+ u8 sleep_intv;
+
+ if (!rtlpriv->psc.sw_ps_enabled)
+ return;
+
+ if ((rtlpriv->sec.being_setkey) ||
+ (mac->opmode == NL80211_IFTYPE_ADHOC))
+ return;
+
+ /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */
+ if ((mac->link_state != MAC80211_LINKED) || (mac->cnt_after_linked < 5))
+ return;
+
+ if (rtlpriv->link_info.b_busytraffic)
+ return;
+
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ if (rtlpriv->psc.rfchange_inprogress) {
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ return;
+ }
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+
+ spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+ rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS,false);
+ spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
+ !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
+ rtlpriv->intf_ops->enable_aspm(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
+ }
+
+ /* here is power save alg, when this beacon is DTIM
+ * we will set sleep time to dtim_period * n;
+ * when this beacon is not DTIM, we will set sleep
+ * time to sleep_intv = rtlpriv->psc.dtim_counter or
+ * MAX_SW_LPS_SLEEP_INTV(default set to 5) */
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+/*<delete in kernel end>*/
+ if (rtlpriv->psc.dtim_counter == 0) {
+ if (hw->conf.ps_dtim_period == 1)
+ sleep_intv = hw->conf.ps_dtim_period * 2;
+ else
+ sleep_intv = hw->conf.ps_dtim_period;
+ } else {
+ sleep_intv = rtlpriv->psc.dtim_counter;
+ }
+/*<delete in kernel start>*/
+#else
+ if (rtlpriv->psc.dtim_counter == 0) {
+ if (mac->vif->bss_conf.dtim_period == 1)
+ sleep_intv = mac->vif->bss_conf.dtim_period * 2;
+ else
+ sleep_intv = mac->vif->bss_conf.dtim_period;
+ } else {
+ sleep_intv = rtlpriv->psc.dtim_counter;
+ }
+#endif
+/*<delete in kernel end>*/
+
+ if (sleep_intv > MAX_SW_LPS_SLEEP_INTV)
+ sleep_intv = MAX_SW_LPS_SLEEP_INTV;
+
+ /* this print should always be dtim_conter = 0 &
+ * sleep = dtim_period, that meaons, we should
+ * awake before every dtim */
+ RT_TRACE(COMP_POWER, DBG_DMESG,
+ ("dtim_counter:%x will sleep :%d beacon_intv\n",
+ rtlpriv->psc.dtim_counter, sleep_intv));
+
+ /* we tested that 40ms is enough for sw & hw sw delay */
+ queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.ps_rfon_wq,
+ MSECS(sleep_intv * mac->vif->bss_conf.beacon_int - 40));
+}
+
+
+void rtl_swlps_wq_callback(void *data)
+{
+ struct rtl_works *rtlworks =
+ container_of_dwork_rtl(data, struct rtl_works, ps_work);
+ struct ieee80211_hw *hw = rtlworks->hw;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ bool ps = false;
+
+ ps = (hw->conf.flags & IEEE80211_CONF_PS);
+
+ /* we can sleep after ps null send ok */
+ if (rtlpriv->psc.state_inap) {
+ rtl_swlps_rf_sleep(hw);
+
+ if (rtlpriv->psc.state && !ps) {
+ rtlpriv->psc.sleep_ms =
+ jiffies_to_msecs(jiffies -
+ rtlpriv->psc.last_action);
+ }
+
+ if (ps)
+ rtlpriv->psc.last_slept = jiffies;
+
+ rtlpriv->psc.last_action = jiffies;
+ rtlpriv->psc.state = ps;
+ }
+}
+
+
+void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data, unsigned int len)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ieee80211_mgmt *mgmt = (void *)data;
+ struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
+ u8 *pos, *end, *ie;
+ u16 noa_len;
+ static u8 p2p_oui_ie_type[4] = {0x50, 0x6f, 0x9a, 0x09};
+ u8 noa_num, index,i, noa_index = 0;
+ bool find_p2p_ie = false , find_p2p_ps_ie = false;
+ pos = (u8 *)mgmt->u.beacon.variable;
+ end = data + len;
+ ie = NULL;
+
+ while (pos + 1 < end) {
+
+ if (pos + 2 + pos[1] > end)
+ return;
+
+ if (pos[0] == 221 && pos[1] > 4) {
+ if (memcmp(&pos[2], p2p_oui_ie_type, 4) == 0) {
+ ie = pos + 2+4;
+ break;
+ }
+ }
+ pos += 2 + pos[1];
+ }
+
+ if (ie == NULL)
+ return;
+ find_p2p_ie = true;
+ /*to find noa ie*/
+ while (ie + 1 < end) {
+ noa_len = READEF2BYTE(&ie[1]);
+ if (ie + 3 + ie[1] > end)
+ return;
+
+ if (ie[0] == 12) {
+ find_p2p_ps_ie = true;
+ if ( (noa_len - 2) % 13 != 0){
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("P2P notice of absence: "
+ "invalid length.%d\n",noa_len));
+ return;
+ } else {
+ noa_num = (noa_len - 2) / 13;
+ }
+ noa_index = ie[3];
+ if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode == P2P_PS_NONE
+ || noa_index != p2pinfo->noa_index) {
+ RT_TRACE(COMP_FW, DBG_LOUD,
+ ("update NOA ie.\n"));
+ p2pinfo->noa_index = noa_index;
+ p2pinfo->opp_ps= (ie[4] >> 7);
+ p2pinfo->ctwindow = ie[4] & 0x7F;
+ p2pinfo->noa_num = noa_num;
+ index = 5;
+ for (i = 0; i< noa_num; i++){
+ p2pinfo->noa_count_type[i] =
+ READEF1BYTE(ie+index);
+ index += 1;
+ p2pinfo->noa_duration[i] =
+ READEF4BYTE(ie+index);
+ index += 4;
+ p2pinfo->noa_interval[i] =
+ READEF4BYTE(ie+index);
+ index += 4;
+ p2pinfo->noa_start_time[i] =
+ READEF4BYTE(ie+index);
+ index += 4;
+ }
+
+ if (p2pinfo->opp_ps == 1) {
+ p2pinfo->p2p_ps_mode = P2P_PS_CTWINDOW;
+ /* Driver should wait LPS
+ * entering CTWindow*/
+ if (rtlpriv->psc.b_fw_current_inpsmode){
+ rtl_p2p_ps_cmd(hw,
+ P2P_PS_ENABLE);
+ }
+ } else if (p2pinfo->noa_num > 0) {
+ p2pinfo->p2p_ps_mode = P2P_PS_NOA;
+ rtl_p2p_ps_cmd(hw, P2P_PS_ENABLE);
+ } else if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
+ rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
+ }
+ }
+
+ break;
+ }
+ ie += 3 + noa_len;
+ }
+
+ if (find_p2p_ie == true) {
+ if ((p2pinfo->p2p_ps_mode > P2P_PS_NONE) &&
+ (find_p2p_ps_ie == false))
+ rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
+ }
+}
+
+void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data, unsigned int len)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ieee80211_mgmt *mgmt = (void *)data;
+ struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
+ bool find_p2p_ie = false , find_p2p_ps_ie = false;
+ u8 noa_num, index,i, noa_index = 0;
+ u8 *pos, *end, *ie;
+ u16 noa_len;
+ static u8 p2p_oui_ie_type[4] = {0x50, 0x6f, 0x9a, 0x09};
+
+ pos = (u8 *) &mgmt->u.action.category;
+ end = data + len;
+ ie = NULL;
+
+ if (pos[0] == 0x7f ) {
+ if (memcmp(&pos[1], p2p_oui_ie_type, 4) == 0) {
+ ie = pos + 3+4;
+ }
+ }
+
+ if (ie == NULL)
+ return;
+ find_p2p_ie = true;
+
+ RT_TRACE(COMP_FW, DBG_LOUD, ("action frame find P2P IE.\n"));
+ /*to find noa ie*/
+ while (ie + 1 < end) {
+ noa_len = READEF2BYTE(&ie[1]);
+ if (ie + 3 + ie[1] > end)
+ return;
+
+ if (ie[0] == 12) {
+ RT_TRACE(COMP_FW, DBG_LOUD, ("find NOA IE.\n"));
+ RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD, ("noa ie "),
+ ie, noa_len);
+ find_p2p_ps_ie = true;
+ if ( (noa_len - 2) % 13 != 0){
+ RT_TRACE(COMP_FW, DBG_LOUD,
+ ("P2P notice of absence: "
+ "invalid length.%d\n",noa_len));
+ return;
+ } else {
+ noa_num = (noa_len - 2) / 13;
+ }
+ noa_index = ie[3];
+ if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode == P2P_PS_NONE
+ || noa_index != p2pinfo->noa_index) {
+ p2pinfo->noa_index = noa_index;
+ p2pinfo->opp_ps= (ie[4] >> 7);
+ p2pinfo->ctwindow = ie[4] & 0x7F;
+ p2pinfo->noa_num = noa_num;
+ index = 5;
+ for (i = 0; i< noa_num; i++){
+ p2pinfo->noa_count_type[i] =
+ READEF1BYTE(ie+index);
+ index += 1;
+ p2pinfo->noa_duration[i] =
+ READEF4BYTE(ie+index);
+ index += 4;
+ p2pinfo->noa_interval[i] =
+ READEF4BYTE(ie+index);
+ index += 4;
+ p2pinfo->noa_start_time[i] =
+ READEF4BYTE(ie+index);
+ index += 4;
+ }
+
+ if (p2pinfo->opp_ps == 1) {
+ p2pinfo->p2p_ps_mode = P2P_PS_CTWINDOW;
+ /* Driver should wait LPS
+ * entering CTWindow */
+ if (rtlpriv->psc.b_fw_current_inpsmode){
+ rtl_p2p_ps_cmd(hw,
+ P2P_PS_ENABLE);
+ }
+ } else if (p2pinfo->noa_num > 0) {
+ p2pinfo->p2p_ps_mode = P2P_PS_NOA;
+ rtl_p2p_ps_cmd(hw, P2P_PS_ENABLE);
+ } else if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
+ rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
+ }
+ }
+
+ break;
+ }
+ ie += 3 + noa_len;
+ }
+
+
+}
+
+
+void rtl_p2p_ps_cmd(struct ieee80211_hw *hw,u8 p2p_ps_state)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
+ struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
+
+ RT_TRACE(COMP_FW, DBG_LOUD, (" p2p state %x\n",p2p_ps_state));
+ switch (p2p_ps_state) {
+ case P2P_PS_DISABLE:
+ p2pinfo->p2p_ps_state = p2p_ps_state;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+ (u8 *)(&p2p_ps_state));
+
+ p2pinfo->noa_index = 0;
+ p2pinfo->ctwindow = 0;
+ p2pinfo->opp_ps = 0;
+ p2pinfo->noa_num = 0;
+ p2pinfo->p2p_ps_mode = P2P_PS_NONE;
+ if (rtlps->b_fw_current_inpsmode == true) {
+ if (rtlps->smart_ps == 0) {
+ rtlps->smart_ps = 2;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_PWRMODE,
+ (u8 *)(&rtlps->pwr_mode));
+ }
+
+ }
+ break;
+ case P2P_PS_ENABLE:
+ if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
+ p2pinfo->p2p_ps_state = p2p_ps_state;
+
+ if (p2pinfo->ctwindow > 0) {
+ if (rtlps->smart_ps != 0){
+ rtlps->smart_ps = 0;
+ rtlpriv->cfg->ops->set_hw_reg(
+ hw, HW_VAR_H2C_FW_PWRMODE,
+ (u8 *)(&rtlps->pwr_mode));
+ }
+ }
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+ (u8 *)(&p2p_ps_state));
+
+ }
+ break;
+ case P2P_PS_SCAN:
+ case P2P_PS_SCAN_DONE:
+ case P2P_PS_ALLSTASLEEP:
+ if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
+ p2pinfo->p2p_ps_state = p2p_ps_state;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+ (u8 *)(&p2p_ps_state));
+ }
+ break;
+ default:
+ break;
+
+ }
+ RT_TRACE(COMP_FW, DBG_LOUD, (" ctwindow %x oppps %x \n",
+ p2pinfo->ctwindow,p2pinfo->opp_ps));
+ RT_TRACE(COMP_FW, DBG_LOUD, ("count %x duration %x index %x interval %x"
+ " start time %x noa num %x\n",
+ p2pinfo->noa_count_type[0],
+ p2pinfo->noa_duration[0],
+ p2pinfo->noa_index,
+ p2pinfo->noa_interval[0],
+ p2pinfo->noa_start_time[0],
+ p2pinfo->noa_num));
+ RT_TRACE(COMP_FW, DBG_LOUD, ("end\n"));
+}
+
+void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct ieee80211_hdr *hdr = (void *) data;
+
+ if (!mac->p2p)
+ return;
+ if (mac->link_state != MAC80211_LINKED)
+ return;
+ /* min. beacon length + FCS_LEN */
+ if (len <= 40 + FCS_LEN)
+ return;
+
+ /* and only beacons from the associated BSSID, please */
+ if (ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+ return;
+
+ /* check if this really is a beacon */
+ if (!(ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control) ||
+ ieee80211_is_action(hdr->frame_control)))
+ return;
+
+ if (ieee80211_is_action(hdr->frame_control)) {
+ rtl_p2p_action_ie(hw,data,len - FCS_LEN);
+ } else {
+ rtl_p2p_noa_ie(hw,data,len - FCS_LEN);
+ }
+
+}
diff --git a/drivers/staging/rtl8821ae/ps.h b/drivers/staging/rtl8821ae/ps.h
new file mode 100644
index 000000000000..374ed77c4126
--- /dev/null
+++ b/drivers/staging/rtl8821ae/ps.h
@@ -0,0 +1,55 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __REALTEK_RTL_PCI_PS_H__
+#define __REALTEK_RTL_PCI_PS_H__
+
+#define MAX_SW_LPS_SLEEP_INTV 5
+
+bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate state_toset, u32 changesource,
+ bool protect_or_not);
+bool rtl_ps_enable_nic(struct ieee80211_hw *hw);
+bool rtl_ps_disable_nic(struct ieee80211_hw *hw);
+void rtl_ips_nic_off(struct ieee80211_hw *hw);
+void rtl_ips_nic_on(struct ieee80211_hw *hw);
+void rtl_ips_nic_off_wq_callback(void *data);
+void rtl_lps_enter(struct ieee80211_hw *hw);
+void rtl_lps_leave(struct ieee80211_hw *hw);
+
+void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode);
+
+void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len);
+void rtl_swlps_wq_callback(void *data);
+void rtl_swlps_rfon_wq_callback(void *data);
+void rtl_swlps_rf_awake(struct ieee80211_hw *hw);
+void rtl_swlps_rf_sleep(struct ieee80211_hw *hw);
+void rtl_p2p_ps_cmd(struct ieee80211_hw *hw,u8 p2p_ps_state);
+void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len);
+#endif
diff --git a/drivers/staging/rtl8821ae/rc.c b/drivers/staging/rtl8821ae/rc.c
new file mode 100644
index 000000000000..d387f13ea7dc
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rc.c
@@ -0,0 +1,309 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "base.h"
+#include "rc.h"
+
+/*
+ *Finds the highest rate index we can use
+ *if skb is special data like DHCP/EAPOL, we set should
+ *it to lowest rate CCK_1M, otherwise we set rate to
+ *highest rate based on wireless mode used for iwconfig
+ *show Tx rate.
+ */
+static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb, bool not_data)
+{
+ struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_sta_info *sta_entry = NULL;
+ u8 wireless_mode = 0;
+
+ /*
+ *this rate is no use for true rate, firmware
+ *will control rate at all it just used for
+ *1.show in iwconfig in B/G mode
+ *2.in rtl_get_tcb_desc when we check rate is
+ * 1M we will not use FW rate but user rate.
+ */
+ if (rtlmac->opmode == NL80211_IFTYPE_AP ||
+ rtlmac->opmode == NL80211_IFTYPE_ADHOC ||
+ rtlmac->opmode == NL80211_IFTYPE_MESH_POINT) {
+ if (sta) {
+ sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+ wireless_mode = sta_entry->wireless_mode;
+ } else {
+ return 0;
+ }
+ } else {
+ wireless_mode = rtlmac->mode;
+ }
+
+ if (rtl_is_special_data(rtlpriv->mac80211.hw, skb, true) || not_data) {
+ return 0;
+ } else {
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ if (wireless_mode == WIRELESS_MODE_B) {
+ return B_MODE_MAX_RIX;
+ } else if (wireless_mode == WIRELESS_MODE_G) {
+ return G_MODE_MAX_RIX;
+ } else {
+ if (get_rf_type(rtlphy) != RF_2T2R)
+ return N_MODE_MCS7_RIX;
+ else
+ return N_MODE_MCS15_RIX;
+ }
+ } else {
+ if (wireless_mode == WIRELESS_MODE_A) {
+ return A_MODE_MAX_RIX;
+ } else {
+ if (get_rf_type(rtlphy) != RF_2T2R)
+ return N_MODE_MCS7_RIX;
+ else
+ return N_MODE_MCS15_RIX;
+ }
+ }
+ }
+}
+
+static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
+ struct ieee80211_sta *sta,
+ struct ieee80211_tx_rate *rate,
+ struct ieee80211_tx_rate_control *txrc,
+ u8 tries, char rix, int rtsctsenable,
+ bool not_data)
+{
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ u8 sgi_20 = 0, sgi_40 = 0;
+
+ if (sta) {
+ sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
+ sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
+ }
+ rate->count = tries;
+ rate->idx = rix >= 0x00 ? rix : 0x00;
+
+ if (!not_data) {
+ if (txrc->short_preamble)
+ rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
+ if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC) {
+ if (sta && (sta->ht_cap.cap &
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+ rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ } else {
+ if (mac->bw_40)
+ rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ }
+ if (sgi_20 || sgi_40)
+ rate->flags |= IEEE80211_TX_RC_SHORT_GI;
+ if (sta && sta->ht_cap.ht_supported)
+ rate->flags |= IEEE80211_TX_RC_MCS;
+ }
+}
+
+static void rtl_get_rate(void *ppriv, struct ieee80211_sta *sta,
+ void *priv_sta,
+ struct ieee80211_tx_rate_control *txrc)
+{
+ struct rtl_priv *rtlpriv = ppriv;
+ struct sk_buff *skb = txrc->skb;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rates = tx_info->control.rates;
+ __le16 fc = rtl_get_fc(skb);
+ u8 try_per_rate, i, rix;
+ bool not_data = !ieee80211_is_data(fc);
+
+ if (rate_control_send_low(sta, priv_sta, txrc))
+ return;
+
+ rix = _rtl_rc_get_highest_rix(rtlpriv, sta, skb, not_data);
+ try_per_rate = 1;
+ _rtl_rc_rate_set_series(rtlpriv, sta, &rates[0], txrc,
+ try_per_rate, rix, 1, not_data);
+
+ if (!not_data) {
+ for (i = 1; i < 4; i++)
+ _rtl_rc_rate_set_series(rtlpriv, sta, &rates[i],
+ txrc, i, (rix - i), 1,
+ not_data);
+ }
+}
+
+static bool _rtl_tx_aggr_check(struct rtl_priv *rtlpriv,
+ struct rtl_sta_info *sta_entry, u16 tid)
+{
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ if (mac->act_scanning)
+ return false;
+
+ if (mac->opmode == NL80211_IFTYPE_STATION &&
+ mac->cnt_after_linked < 3)
+ return false;
+
+ if (sta_entry->tids[tid].agg.agg_state == RTL_AGG_STOP)
+ return true;
+
+ return false;
+}
+
+/*mac80211 Rate Control callbacks*/
+static void rtl_tx_status(void *ppriv,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = ppriv;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
+ __le16 fc = rtl_get_fc(skb);
+ struct rtl_sta_info *sta_entry;
+
+ if (!priv_sta || !ieee80211_is_data(fc))
+ return;
+
+ if (rtl_is_special_data(mac->hw, skb, true))
+ return;
+
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+ is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+ return;
+
+ if (sta) {
+ /* Check if aggregation has to be enabled for this tid */
+ sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+ if ((sta->ht_cap.ht_supported == true) &&
+ !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
+ if (ieee80211_is_data_qos(fc)) {
+ u8 tid = rtl_get_tid(skb);
+ if (_rtl_tx_aggr_check(rtlpriv, sta_entry,
+ tid)) {
+ sta_entry->tids[tid].agg.agg_state =
+ RTL_AGG_PROGRESS;
+ /*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
+ /*<delete in kernel end>*/
+ ieee80211_start_tx_ba_session(sta, tid,
+ 5000);
+ /*<delete in kernel start>*/
+#else
+ ieee80211_start_tx_ba_session(sta, tid);
+#endif
+ /*<delete in kernel end>*/
+ }
+ }
+ }
+ }
+}
+
+static void rtl_rate_init(void *ppriv,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *priv_sta)
+{
+}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
+static void rtl_rate_update(void *ppriv,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ u32 changed,
+ enum nl80211_channel_type oper_chan_type)
+{
+}
+#else
+static void rtl_rate_update(void *ppriv,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *priv_sta,
+ u32 changed)
+{
+}
+#endif
+static void *rtl_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ return rtlpriv;
+}
+
+static void rtl_rate_free(void *rtlpriv)
+{
+ return;
+}
+
+static void *rtl_rate_alloc_sta(void *ppriv,
+ struct ieee80211_sta *sta, gfp_t gfp)
+{
+ struct rtl_priv *rtlpriv = ppriv;
+ struct rtl_rate_priv *rate_priv;
+
+ rate_priv = kzalloc(sizeof(struct rtl_rate_priv), gfp);
+ if (!rate_priv) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Unable to allocate private rc structure\n"));
+ return NULL;
+ }
+
+ rtlpriv->rate_priv = rate_priv;
+
+ return rate_priv;
+}
+
+static void rtl_rate_free_sta(void *rtlpriv,
+ struct ieee80211_sta *sta, void *priv_sta)
+{
+ struct rtl_rate_priv *rate_priv = priv_sta;
+ kfree(rate_priv);
+}
+
+static struct rate_control_ops rtl_rate_ops = {
+ .module = NULL,
+ .name = "rtl_rc",
+ .alloc = rtl_rate_alloc,
+ .free = rtl_rate_free,
+ .alloc_sta = rtl_rate_alloc_sta,
+ .free_sta = rtl_rate_free_sta,
+ .rate_init = rtl_rate_init,
+ .rate_update = rtl_rate_update,
+ .tx_status = rtl_tx_status,
+ .get_rate = rtl_get_rate,
+};
+
+int rtl_rate_control_register(void)
+{
+ return ieee80211_rate_control_register(&rtl_rate_ops);
+}
+
+void rtl_rate_control_unregister(void)
+{
+ ieee80211_rate_control_unregister(&rtl_rate_ops);
+}
diff --git a/drivers/staging/rtl8821ae/rc.h b/drivers/staging/rtl8821ae/rc.h
new file mode 100644
index 000000000000..4afa2c20adcf
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rc.h
@@ -0,0 +1,47 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_RC_H__
+#define __RTL_RC_H__
+
+#define B_MODE_MAX_RIX 3
+#define G_MODE_MAX_RIX 11
+#define A_MODE_MAX_RIX 7
+
+/* in mac80211 mcs0-mcs15 is idx0-idx15*/
+#define N_MODE_MCS7_RIX 7
+#define N_MODE_MCS15_RIX 15
+
+struct rtl_rate_priv {
+ u8 ht_cap;
+};
+
+int rtl_rate_control_register(void);
+void rtl_rate_control_unregister(void);
+#endif
diff --git a/drivers/staging/rtl8821ae/regd.c b/drivers/staging/rtl8821ae/regd.c
new file mode 100644
index 000000000000..d89f15cb8089
--- /dev/null
+++ b/drivers/staging/rtl8821ae/regd.c
@@ -0,0 +1,503 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "regd.h"
+
+static struct country_code_to_enum_rd allCountries[] = {
+ {COUNTRY_CODE_FCC, "US"},
+ {COUNTRY_CODE_IC, "US"},
+ {COUNTRY_CODE_ETSI, "EC"},
+ {COUNTRY_CODE_SPAIN, "EC"},
+ {COUNTRY_CODE_FRANCE, "EC"},
+ {COUNTRY_CODE_MKK, "JP"},
+ {COUNTRY_CODE_MKK1, "JP"},
+ {COUNTRY_CODE_ISRAEL, "EC"},
+ {COUNTRY_CODE_TELEC, "JP"},
+ {COUNTRY_CODE_MIC, "JP"},
+ {COUNTRY_CODE_GLOBAL_DOMAIN, "JP"},
+ {COUNTRY_CODE_WORLD_WIDE_13, "EC"},
+ {COUNTRY_CODE_TELEC_NETGEAR, "EC"},
+};
+
+/*
+ *Only these channels all allow active
+ *scan on all world regulatory domains
+ */
+#define RTL819x_2GHZ_CH01_11 \
+ REG_RULE(2412-10, 2462+10, 40, 0, 20, 0)
+
+/*
+ *We enable active scan on these a case
+ *by case basis by regulatory domain
+ */
+#define RTL819x_2GHZ_CH12_13 \
+ REG_RULE(2467-10, 2472+10, 40, 0, 20,\
+ NL80211_RRF_PASSIVE_SCAN)
+
+#define RTL819x_2GHZ_CH14 \
+ REG_RULE(2484-10, 2484+10, 40, 0, 20, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_NO_OFDM)
+
+/* 5G chan 36 - chan 64*/
+#define RTL819x_5GHZ_5150_5350 \
+ REG_RULE(5150-10, 5350+10, 40, 0, 30, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_NO_IBSS)
+
+/* 5G chan 100 - chan 165*/
+#define RTL819x_5GHZ_5470_5850 \
+ REG_RULE(5470-10, 5850+10, 40, 0, 30, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_NO_IBSS)
+
+/* 5G chan 149 - chan 165*/
+#define RTL819x_5GHZ_5725_5850 \
+ REG_RULE(5725-10, 5850+10, 40, 0, 30, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_NO_IBSS)
+
+#define RTL819x_5GHZ_ALL \
+ RTL819x_5GHZ_5150_5350, RTL819x_5GHZ_5470_5850
+
+static const struct ieee80211_regdomain rtl_regdom_11 = {
+ .n_reg_rules = 1,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ }
+};
+
+static const struct ieee80211_regdomain rtl_regdom_12_13 = {
+ .n_reg_rules = 2,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ RTL819x_2GHZ_CH12_13,
+ }
+};
+
+static const struct ieee80211_regdomain rtl_regdom_no_midband = {
+ .n_reg_rules = 3,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ RTL819x_5GHZ_5150_5350,
+ RTL819x_5GHZ_5725_5850,
+ }
+};
+
+static const struct ieee80211_regdomain rtl_regdom_60_64 = {
+ .n_reg_rules = 3,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ RTL819x_2GHZ_CH12_13,
+ RTL819x_5GHZ_5725_5850,
+ }
+};
+
+static const struct ieee80211_regdomain rtl_regdom_14_60_64 = {
+ .n_reg_rules = 4,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ RTL819x_2GHZ_CH12_13,
+ RTL819x_2GHZ_CH14,
+ RTL819x_5GHZ_5725_5850,
+ }
+};
+
+static const struct ieee80211_regdomain rtl_regdom_14 = {
+ .n_reg_rules = 3,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ RTL819x_2GHZ_CH12_13,
+ RTL819x_2GHZ_CH14,
+ }
+};
+
+static bool _rtl_is_radar_freq(u16 center_freq)
+{
+ return (center_freq >= 5260 && center_freq <= 5700);
+}
+
+static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator)
+{
+ enum ieee80211_band band;
+ struct ieee80211_supported_band *sband;
+ const struct ieee80211_reg_rule *reg_rule;
+ struct ieee80211_channel *ch;
+ unsigned int i;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
+ u32 bandwidth = 0;
+ int r;
+#endif
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+
+ if (!wiphy->bands[band])
+ continue;
+
+ sband = wiphy->bands[band];
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+ if (_rtl_is_radar_freq(ch->center_freq) ||
+ (ch->flags & IEEE80211_CHAN_RADAR))
+ continue;
+ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+ reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ if (IS_ERR(reg_rule))
+ continue;
+#else
+ r = freq_reg_info(wiphy, ch->center_freq,
+ bandwidth, &reg_rule);
+ if (r)
+ continue;
+#endif
+
+ /*
+ *If 11d had a rule for this channel ensure
+ *we enable adhoc/beaconing if it allows us to
+ *use it. Note that we would have disabled it
+ *by applying our static world regdomain by
+ *default during init, prior to calling our
+ *regulatory_hint().
+ */
+
+ if (!(reg_rule->flags & NL80211_RRF_NO_IBSS))
+ ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
+ if (!(reg_rule->flags &
+ NL80211_RRF_PASSIVE_SCAN))
+ ch->flags &=
+ ~IEEE80211_CHAN_PASSIVE_SCAN;
+ } else {
+ if (ch->beacon_found)
+ ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN);
+ }
+ }
+ }
+}
+
+/* Allows active scan scan on Ch 12 and 13 */
+static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator
+ initiator)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ const struct ieee80211_reg_rule *reg_rule;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
+ u32 bandwidth = 0;
+ int r;
+#endif
+
+ if (!wiphy->bands[IEEE80211_BAND_2GHZ])
+ return;
+ sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+
+ /*
+ *If no country IE has been received always enable active scan
+ *on these channels. This is only done for specific regulatory SKUs
+ */
+ if (initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
+ ch = &sband->channels[11]; /* CH 12 */
+ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ ch = &sband->channels[12]; /* CH 13 */
+ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ return;
+ }
+
+ /*
+ *If a country IE has been recieved check its rule for this
+ *channel first before enabling active scan. The passive scan
+ *would have been enforced by the initial processing of our
+ *custom regulatory domain.
+ */
+
+ ch = &sband->channels[11]; /* CH 12 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+ reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ if (!IS_ERR(reg_rule)) {
+#else
+ r = freq_reg_info(wiphy, ch->center_freq, bandwidth, &reg_rule);
+ if (!r) {
+#endif
+ if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
+ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ }
+
+ ch = &sband->channels[12]; /* CH 13 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+ reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ if (!IS_ERR(reg_rule)) {
+#else
+ r = freq_reg_info(wiphy, ch->center_freq, bandwidth, &reg_rule);
+ if (!r) {
+#endif
+ if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
+ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ }
+}
+
+/*
+ *Always apply Radar/DFS rules on
+ *freq range 5260 MHz - 5700 MHz
+ */
+static void _rtl_reg_apply_radar_flags(struct wiphy *wiphy)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ unsigned int i;
+
+ if (!wiphy->bands[IEEE80211_BAND_5GHZ])
+ return;
+
+ sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+ if (!_rtl_is_radar_freq(ch->center_freq))
+ continue;
+
+ /*
+ *We always enable radar detection/DFS on this
+ *frequency range. Additionally we also apply on
+ *this frequency range:
+ *- If STA mode does not yet have DFS supports disable
+ * active scanning
+ *- If adhoc mode does not support DFS yet then disable
+ * adhoc in the frequency.
+ *- If AP mode does not yet support radar detection/DFS
+ *do not allow AP mode
+ */
+ if (!(ch->flags & IEEE80211_CHAN_DISABLED))
+ ch->flags |= IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN;
+ }
+}
+
+static void _rtl_reg_apply_world_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator,
+ struct rtl_regulatory *reg)
+{
+ _rtl_reg_apply_beaconing_flags(wiphy, initiator);
+ _rtl_reg_apply_active_scan_flags(wiphy, initiator);
+ return;
+}
+
+static void _rtl_dump_channel_map(struct wiphy *wiphy)
+{
+ enum ieee80211_band band;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ unsigned int i;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!wiphy->bands[band])
+ continue;
+ sband = wiphy->bands[band];
+ for (i = 0; i < sband->n_channels; i++)
+ ch = &sband->channels[i];
+ }
+}
+
+static int _rtl_reg_notifier_apply(struct wiphy *wiphy,
+ struct regulatory_request *request,
+ struct rtl_regulatory *reg)
+{
+ /* We always apply this */
+ _rtl_reg_apply_radar_flags(wiphy);
+
+ switch (request->initiator) {
+ case NL80211_REGDOM_SET_BY_DRIVER:
+ case NL80211_REGDOM_SET_BY_CORE:
+ case NL80211_REGDOM_SET_BY_USER:
+ break;
+ case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+ _rtl_reg_apply_world_flags(wiphy, request->initiator, reg);
+ break;
+ }
+
+ _rtl_dump_channel_map(wiphy);
+
+ return 0;
+}
+
+static const struct ieee80211_regdomain *_rtl_regdomain_select(
+ struct rtl_regulatory *reg)
+{
+ switch (reg->country_code) {
+ case COUNTRY_CODE_FCC:
+ return &rtl_regdom_no_midband;
+ case COUNTRY_CODE_IC:
+ return &rtl_regdom_11;
+ case COUNTRY_CODE_ETSI:
+ case COUNTRY_CODE_TELEC_NETGEAR:
+ return &rtl_regdom_60_64;
+ case COUNTRY_CODE_SPAIN:
+ case COUNTRY_CODE_FRANCE:
+ case COUNTRY_CODE_ISRAEL:
+ case COUNTRY_CODE_WORLD_WIDE_13:
+ return &rtl_regdom_12_13;
+ case COUNTRY_CODE_MKK:
+ case COUNTRY_CODE_MKK1:
+ case COUNTRY_CODE_TELEC:
+ case COUNTRY_CODE_MIC:
+ return &rtl_regdom_14_60_64;
+ case COUNTRY_CODE_GLOBAL_DOMAIN:
+ return &rtl_regdom_14;
+ default:
+ return &rtl_regdom_no_midband;
+ }
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg,
+ struct wiphy *wiphy,
+ void (*reg_notifier) (struct wiphy * wiphy,
+ struct regulatory_request *
+ request))
+#else
+static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg,
+ struct wiphy *wiphy,
+ int (*reg_notifier) (struct wiphy * wiphy,
+ struct regulatory_request *
+ request))
+#endif
+{
+ const struct ieee80211_regdomain *regd;
+
+ wiphy->reg_notifier = reg_notifier;
+
+ wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+ wiphy->flags &= ~WIPHY_FLAG_STRICT_REGULATORY;
+ wiphy->flags &= ~WIPHY_FLAG_DISABLE_BEACON_HINTS;
+
+ regd = _rtl_regdomain_select(reg);
+ wiphy_apply_custom_regulatory(wiphy, regd);
+ _rtl_reg_apply_radar_flags(wiphy);
+ _rtl_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
+ return 0;
+}
+
+static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+ if (allCountries[i].countrycode == countrycode)
+ return &allCountries[i];
+ }
+ return NULL;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+int rtl_regd_init(struct ieee80211_hw *hw,
+ void (*reg_notifier) (struct wiphy *wiphy,
+ struct regulatory_request *request))
+#else
+int rtl_regd_init(struct ieee80211_hw *hw,
+ int (*reg_notifier) (struct wiphy *wiphy,
+ struct regulatory_request *request))
+#endif
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct wiphy *wiphy = hw->wiphy;
+ struct country_code_to_enum_rd *country = NULL;
+
+ if (wiphy == NULL || &rtlpriv->regd == NULL)
+ return -EINVAL;
+
+ /* init country_code from efuse channel plan */
+ rtlpriv->regd.country_code = rtlpriv->efuse.channel_plan;
+
+ RT_TRACE(COMP_REGD, DBG_TRACE,
+ (KERN_DEBUG "rtl: EEPROM regdomain: 0x%0x\n",
+ rtlpriv->regd.country_code));
+
+ if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
+ RT_TRACE(COMP_REGD, DBG_DMESG,
+ (KERN_DEBUG "rtl: EEPROM indicates invalid contry code"
+ "world wide 13 should be used\n"));
+
+ rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13;
+ }
+
+ country = _rtl_regd_find_country(rtlpriv->regd.country_code);
+
+ if (country) {
+ rtlpriv->regd.alpha2[0] = country->iso_name[0];
+ rtlpriv->regd.alpha2[1] = country->iso_name[1];
+ } else {
+ rtlpriv->regd.alpha2[0] = '0';
+ rtlpriv->regd.alpha2[1] = '0';
+ }
+
+ RT_TRACE(COMP_REGD, DBG_TRACE,
+ (KERN_DEBUG "rtl: Country alpha2 being used: %c%c\n",
+ rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]));
+
+ _rtl_regd_init_wiphy(&rtlpriv->regd, wiphy, reg_notifier);
+
+ return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_REGD, DBG_LOUD, ("\n"));
+
+ _rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd);
+}
+#else
+int rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_REGD, DBG_LOUD, ("\n"));
+
+ return _rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd);
+}
+#endif
diff --git a/drivers/staging/rtl8821ae/regd.h b/drivers/staging/rtl8821ae/regd.h
new file mode 100644
index 000000000000..abc60ab8165c
--- /dev/null
+++ b/drivers/staging/rtl8821ae/regd.h
@@ -0,0 +1,75 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_REGD_H__
+#define __RTL_REGD_H__
+
+#define IEEE80211_CHAN_NO_IBSS 1<<2
+#define IEEE80211_CHAN_PASSIVE_SCAN 1<<1
+#define WIPHY_FLAG_CUSTOM_REGULATORY BIT(0)
+#define WIPHY_FLAG_STRICT_REGULATORY BIT(1)
+#define WIPHY_FLAG_DISABLE_BEACON_HINTS BIT(2)
+
+struct country_code_to_enum_rd {
+ u16 countrycode;
+ const char *iso_name;
+};
+
+enum country_code_type_t {
+ COUNTRY_CODE_FCC = 0,
+ COUNTRY_CODE_IC = 1,
+ COUNTRY_CODE_ETSI = 2,
+ COUNTRY_CODE_SPAIN = 3,
+ COUNTRY_CODE_FRANCE = 4,
+ COUNTRY_CODE_MKK = 5,
+ COUNTRY_CODE_MKK1 = 6,
+ COUNTRY_CODE_ISRAEL = 7,
+ COUNTRY_CODE_TELEC = 8,
+ COUNTRY_CODE_MIC = 9,
+ COUNTRY_CODE_GLOBAL_DOMAIN = 10,
+ COUNTRY_CODE_WORLD_WIDE_13 = 11,
+ COUNTRY_CODE_TELEC_NETGEAR = 12,
+
+ /*add new channel plan above this line */
+ COUNTRY_CODE_MAX
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+int rtl_regd_init(struct ieee80211_hw *hw,
+ void (*reg_notifier) (struct wiphy *wiphy,
+ struct regulatory_request *request));
+void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+#else
+int rtl_regd_init(struct ieee80211_hw *hw,
+ int (*reg_notifier) (struct wiphy *wiphy,
+ struct regulatory_request *request));
+int rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+#endif
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/btc.h b/drivers/staging/rtl8821ae/rtl8821ae/btc.h
new file mode 100644
index 000000000000..74ac189e3a88
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/btc.h
@@ -0,0 +1,87 @@
+
+/******************************************************************************
+ **
+ ** Copyright(c) 2009-2010 Realtek Corporation.
+ **
+ ** This program is free software; you can redistribute it and/or modify it
+ ** under the terms of version 2 of the GNU General Public License as
+ ** published by the Free Software Foundation.
+ **
+ ** This program is distributed in the hope that it will be useful, but WITHOUT
+ ** ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ ** FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ ** more details.
+ **
+ ** You should have received a copy of the GNU General Public License along with
+ ** this program; if not, write to the Free Software Foundation, Inc.,
+ ** 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ **
+ ** The full GNU General Public License is included in this distribution in the
+ ** file called LICENSE.
+ **
+ ** Contact Information:
+ ** wlanfae <wlanfae@realtek.com>
+ ** Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ ** Hsinchu 300, Taiwan.
+ ** Larry Finger <Larry.Finger@lwfinger.net>
+ **
+ ******************************************************************************/
+
+#ifndef __RTL8821AE_BTC_H__
+#define __RTL8821AE_BTC_H__
+
+#include "../wifi.h"
+#include "hal_bt_coexist.h"
+
+struct bt_coexist_c2h_info {
+ u8 no_parse_c2h;
+ u8 has_c2h;
+};
+
+struct btdm_8821ae {
+ bool b_all_off;
+ bool b_agc_table_en;
+ bool b_adc_back_off_on;
+ bool b2_ant_hid_en;
+ bool b_low_penalty_rate_adaptive;
+ bool b_rf_rx_lpf_shrink;
+ bool b_reject_aggre_pkt;
+ bool b_tra_tdma_on;
+ u8 tra_tdma_nav;
+ u8 tra_tdma_ant;
+ bool b_tdma_on;
+ u8 tdma_ant;
+ u8 tdma_nav;
+ u8 tdma_dac_swing;
+ u8 fw_dac_swing_lvl;
+ bool b_ps_tdma_on;
+ u8 ps_tdma_byte[5];
+ bool b_pta_on;
+ u32 val_0x6c0;
+ u32 val_0x6c8;
+ u32 val_0x6cc;
+ bool b_sw_dac_swing_on;
+ u32 sw_dac_swing_lvl;
+ u32 wlan_act_hi;
+ u32 wlan_act_lo;
+ u32 bt_retry_index;
+ bool b_dec_bt_pwr;
+ bool b_ignore_wlan_act;
+};
+
+struct bt_coexist_8821ae {
+ u32 high_priority_tx;
+ u32 high_priority_rx;
+ u32 low_priority_tx;
+ u32 low_priority_rx;
+ u8 c2h_bt_info;
+ bool b_c2h_bt_info_req_sent;
+ bool b_c2h_bt_inquiry_page;
+ u32 bt_inq_page_start_time;
+ u8 bt_retry_cnt;
+ u8 c2h_bt_info_original;
+ u8 bt_inquiry_page_cnt;
+ struct btdm_8821ae btdm;
+};
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/def.h b/drivers/staging/rtl8821ae/rtl8821ae/def.h
new file mode 100644
index 000000000000..72ebdeaa6e7d
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/def.h
@@ -0,0 +1,442 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_DEF_H__
+#define __RTL8821AE_DEF_H__
+
+/*--------------------------Define -------------------------------------------*/
+/* BIT 7 HT Rate*/
+/*TxHT = 0*/
+#define MGN_1M 0x02
+#define MGN_2M 0x04
+#define MGN_5_5M 0x0b
+#define MGN_11M 0x16
+
+#define MGN_6M 0x0c
+#define MGN_9M 0x12
+#define MGN_12M 0x18
+#define MGN_18M 0x24
+#define MGN_24M 0x30
+#define MGN_36M 0x48
+#define MGN_48M 0x60
+#define MGN_54M 0x6c
+
+// TxHT = 1
+#define MGN_MCS0 0x80
+#define MGN_MCS1 0x81
+#define MGN_MCS2 0x82
+#define MGN_MCS3 0x83
+#define MGN_MCS4 0x84
+#define MGN_MCS5 0x85
+#define MGN_MCS6 0x86
+#define MGN_MCS7 0x87
+#define MGN_MCS8 0x88
+#define MGN_MCS9 0x89
+#define MGN_MCS10 0x8a
+#define MGN_MCS11 0x8b
+#define MGN_MCS12 0x8c
+#define MGN_MCS13 0x8d
+#define MGN_MCS14 0x8e
+#define MGN_MCS15 0x8f
+//VHT rate
+#define MGN_VHT1SS_MCS0 0x90
+#define MGN_VHT1SS_MCS1 0x91
+#define MGN_VHT1SS_MCS2 0x92
+#define MGN_VHT1SS_MCS3 0x93
+#define MGN_VHT1SS_MCS4 0x94
+#define MGN_VHT1SS_MCS5 0x95
+#define MGN_VHT1SS_MCS6 0x96
+#define MGN_VHT1SS_MCS7 0x97
+#define MGN_VHT1SS_MCS8 0x98
+#define MGN_VHT1SS_MCS9 0x99
+#define MGN_VHT2SS_MCS0 0x9a
+#define MGN_VHT2SS_MCS1 0x9b
+#define MGN_VHT2SS_MCS2 0x9c
+#define MGN_VHT2SS_MCS3 0x9d
+#define MGN_VHT2SS_MCS4 0x9e
+#define MGN_VHT2SS_MCS5 0x9f
+#define MGN_VHT2SS_MCS6 0xa0
+#define MGN_VHT2SS_MCS7 0xa1
+#define MGN_VHT2SS_MCS8 0xa2
+#define MGN_VHT2SS_MCS9 0xa3
+
+#define MGN_VHT3SS_MCS0 0xa4
+#define MGN_VHT3SS_MCS1 0xa5
+#define MGN_VHT3SS_MCS2 0xa6
+#define MGN_VHT3SS_MCS3 0xa7
+#define MGN_VHT3SS_MCS4 0xa8
+#define MGN_VHT3SS_MCS5 0xa9
+#define MGN_VHT3SS_MCS6 0xaa
+#define MGN_VHT3SS_MCS7 0xab
+#define MGN_VHT3SS_MCS8 0xac
+#define MGN_VHT3SS_MCS9 0xad
+
+#define MGN_MCS0_SG 0xc0
+#define MGN_MCS1_SG 0xc1
+#define MGN_MCS2_SG 0xc2
+#define MGN_MCS3_SG 0xc3
+#define MGN_MCS4_SG 0xc4
+#define MGN_MCS5_SG 0xc5
+#define MGN_MCS6_SG 0xc6
+#define MGN_MCS7_SG 0xc7
+#define MGN_MCS8_SG 0xc8
+#define MGN_MCS9_SG 0xc9
+#define MGN_MCS10_SG 0xca
+#define MGN_MCS11_SG 0xcb
+#define MGN_MCS12_SG 0xcc
+#define MGN_MCS13_SG 0xcd
+#define MGN_MCS14_SG 0xce
+#define MGN_MCS15_SG 0xcf
+
+#define MGN_UNKNOWN 0xff
+
+
+/* 30 ms */
+#define WIFI_NAV_UPPER_US 30000
+#define HAL_92C_NAV_UPPER_UNIT 128
+
+#define HAL_RETRY_LIMIT_INFRA 48
+#define HAL_RETRY_LIMIT_AP_ADHOC 7
+
+#define RESET_DELAY_8185 20
+
+#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
+#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
+
+#define NUM_OF_FIRMWARE_QUEUE 10
+#define NUM_OF_PAGES_IN_FW 0x100
+#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0
+#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0
+#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02
+#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02
+#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2
+#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1
+
+#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026
+#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048
+#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048
+#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026
+#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00
+
+#define MAX_RX_DMA_BUFFER_SIZE 0x3E80
+
+
+#define MAX_LINES_HWCONFIG_TXT 1000
+#define MAX_BYTES_LINE_HWCONFIG_TXT 256
+
+#define SW_THREE_WIRE 0
+#define HW_THREE_WIRE 2
+
+#define BT_DEMO_BOARD 0
+#define BT_QA_BOARD 1
+#define BT_FPGA 2
+
+#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
+#define HAL_PRIME_CHNL_OFFSET_LOWER 1
+#define HAL_PRIME_CHNL_OFFSET_UPPER 2
+
+#define MAX_H2C_QUEUE_NUM 10
+
+#define RX_MPDU_QUEUE 0
+#define RX_CMD_QUEUE 1
+#define RX_MAX_QUEUE 2
+#define AC2QUEUEID(_AC) (_AC)
+
+#define C2H_RX_CMD_HDR_LEN 8
+#define GET_C2H_CMD_CMD_LEN(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 0, 16)
+#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 16, 8)
+#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 24, 7)
+#define GET_C2H_CMD_CONTINUE(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 31, 1)
+#define GET_C2H_CMD_CONTENT(__prxhdr) \
+ ((u8*)(__prxhdr) + C2H_RX_CMD_HDR_LEN)
+
+#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8)
+#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8)
+#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16)
+#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5)
+#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1)
+#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5)
+#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1)
+#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4)
+#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12)
+
+#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3)
+
+#define CHIP_8812 BIT(2)
+#define CHIP_8821 (BIT(0)|BIT(2))
+
+#define CHIP_8821A (BIT(0)|BIT(2))
+#define NORMAL_CHIP BIT(3)
+#define RF_TYPE_1T1R (~(BIT(4)|BIT(5)|BIT(6)))
+#define RF_TYPE_1T2R BIT(4)
+#define RF_TYPE_2T2R BIT(5)
+#define CHIP_VENDOR_UMC BIT(7)
+#define B_CUT_VERSION BIT(12)
+#define C_CUT_VERSION BIT(13)
+#define D_CUT_VERSION ((BIT(12)|BIT(13)))
+#define E_CUT_VERSION BIT(14)
+#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28))
+
+
+
+enum version_8821ae {
+ VERSION_TEST_CHIP_1T1R_8812 = 0x0004,
+ VERSION_TEST_CHIP_2T2R_8812 = 0x0024,
+ VERSION_NORMAL_TSMC_CHIP_1T1R_8812 = 0x100c,
+ VERSION_NORMAL_TSMC_CHIP_2T2R_8812 = 0x102c,
+ VERSION_NORMAL_TSMC_CHIP_1T1R_8812_C_CUT = 0x200c,
+ VERSION_NORMAL_TSMC_CHIP_2T2R_8812_C_CUT = 0x202c,
+ VERSION_TEST_CHIP_8821 = 0x0005,
+ VERSION_NORMAL_TSMC_CHIP_8821 = 0x000d,
+ VERSION_NORMAL_TSMC_CHIP_8821_B_CUT = 0x100d,
+ VERSION_UNKNOWN = 0xFF,
+};
+
+enum vht_data_sc{
+ VHT_DATA_SC_DONOT_CARE = 0,
+ VHT_DATA_SC_20_UPPER_OF_80MHZ = 1,
+ VHT_DATA_SC_20_LOWER_OF_80MHZ = 2,
+ VHT_DATA_SC_20_UPPERST_OF_80MHZ = 3,
+ VHT_DATA_SC_20_LOWEST_OF_80MHZ = 4,
+ VHT_DATA_SC_20_RECV1 = 5,
+ VHT_DATA_SC_20_RECV2 = 6,
+ VHT_DATA_SC_20_RECV3 = 7,
+ VHT_DATA_SC_20_RECV4 = 8,
+ VHT_DATA_SC_40_UPPER_OF_80MHZ = 9,
+ VHT_DATA_SC_40_LOWER_OF_80MHZ = 10,
+};
+
+
+/* MASK */
+#define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2))
+#define CHIP_TYPE_MASK BIT(3)
+#define RF_TYPE_MASK (BIT(4)|BIT(5)|BIT(6))
+#define MANUFACTUER_MASK BIT(7)
+#define ROM_VERSION_MASK (BIT(11)|BIT(10)|BIT(9)|BIT(8))
+#define CUT_VERSION_MASK (BIT(15)|BIT(14)|BIT(13)|BIT(12))
+
+/* Get element */
+#define GET_CVID_IC_TYPE(version) ((version) & IC_TYPE_MASK)
+#define GET_CVID_CHIP_TYPE(version) ((version) & CHIP_TYPE_MASK)
+#define GET_CVID_RF_TYPE(version) ((version) & RF_TYPE_MASK)
+#define GET_CVID_MANUFACTUER(version) ((version) & MANUFACTUER_MASK)
+#define GET_CVID_ROM_VERSION(version) ((version) & ROM_VERSION_MASK)
+#define GET_CVID_CUT_VERSION(version) ((version) & CUT_VERSION_MASK)
+
+#define IS_1T1R(version) ((GET_CVID_RF_TYPE(version))? false : true)
+#define IS_1T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R)\
+ ? true : false)
+#define IS_2T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R)\
+ ? true : false)
+
+#define IS_8812_SERIES(version) ((GET_CVID_IC_TYPE(version) == CHIP_8812)? \
+ true : false)
+#define IS_8821_SERIES(version) ((GET_CVID_IC_TYPE(version) == CHIP_8821)? \
+ true : false)
+
+#define IS_VENDOR_8812A_TEST_CHIP(version) ((IS_8812_SERIES(version)) ? \
+ ((IS_NORMAL_CHIP(version)) ? \
+ false : true) : false)
+#define IS_VENDOR_8812A_MP_CHIP(version) ((IS_8812_SERIES(version)) ? \
+ ((IS_NORMAL_CHIP(version)) ? \
+ true : false) : false)
+#define IS_VENDOR_8812A_C_CUT(version) ((IS_8812_SERIES(version)) ? \
+ ((GET_CVID_CUT_VERSION(version) == C_CUT_VERSION) ? \
+ true : false) : false)
+
+#define IS_VENDOR_8821A_TEST_CHIP(version) ((IS_8821_SERIES(version)) ? \
+ ((IS_NORMAL_CHIP(version)) ? \
+ false : true) : false)
+#define IS_VENDOR_8821A_MP_CHIP(version) ((IS_8821_SERIES(version)) ? \
+ ((IS_NORMAL_CHIP(version)) ? \
+ true : false) : false)
+#define IS_VENDOR_8821A_B_CUT(version) ((IS_8821_SERIES(version)) ? \
+ ((GET_CVID_CUT_VERSION(version) == B_CUT_VERSION) ? \
+ true : false) : false)
+
+
+enum rf_optype {
+ RF_OP_BY_SW_3WIRE = 0,
+ RF_OP_BY_FW,
+ RF_OP_MAX
+};
+
+enum rf_power_state {
+ RF_ON,
+ RF_OFF,
+ RF_SLEEP,
+ RF_SHUT_DOWN,
+};
+
+enum power_save_mode {
+ POWER_SAVE_MODE_ACTIVE,
+ POWER_SAVE_MODE_SAVE,
+};
+
+enum power_polocy_config {
+ POWERCFG_MAX_POWER_SAVINGS,
+ POWERCFG_GLOBAL_POWER_SAVINGS,
+ POWERCFG_LOCAL_POWER_SAVINGS,
+ POWERCFG_LENOVO,
+};
+
+enum interface_select_pci {
+ INTF_SEL1_MINICARD = 0,
+ INTF_SEL0_PCIE = 1,
+ INTF_SEL2_RSV = 2,
+ INTF_SEL3_RSV = 3,
+};
+
+enum hal_fw_c2h_cmd_id {
+ HAL_FW_C2H_CMD_Read_MACREG = 0,
+ HAL_FW_C2H_CMD_Read_BBREG = 1,
+ HAL_FW_C2H_CMD_Read_RFREG = 2,
+ HAL_FW_C2H_CMD_Read_EEPROM = 3,
+ HAL_FW_C2H_CMD_Read_EFUSE = 4,
+ HAL_FW_C2H_CMD_Read_CAM = 5,
+ HAL_FW_C2H_CMD_Get_BasicRate = 6,
+ HAL_FW_C2H_CMD_Get_DataRate = 7,
+ HAL_FW_C2H_CMD_Survey = 8,
+ HAL_FW_C2H_CMD_SurveyDone = 9,
+ HAL_FW_C2H_CMD_JoinBss = 10,
+ HAL_FW_C2H_CMD_AddSTA = 11,
+ HAL_FW_C2H_CMD_DelSTA = 12,
+ HAL_FW_C2H_CMD_AtimDone = 13,
+ HAL_FW_C2H_CMD_TX_Report = 14,
+ HAL_FW_C2H_CMD_CCX_Report = 15,
+ HAL_FW_C2H_CMD_DTM_Report = 16,
+ HAL_FW_C2H_CMD_TX_Rate_Statistics = 17,
+ HAL_FW_C2H_CMD_C2HLBK = 18,
+ HAL_FW_C2H_CMD_C2HDBG = 19,
+ HAL_FW_C2H_CMD_C2HFEEDBACK = 20,
+ HAL_FW_C2H_CMD_MAX
+};
+
+enum rtl_desc_qsel {
+ QSLT_BK = 0x2,
+ QSLT_BE = 0x0,
+ QSLT_VI = 0x5,
+ QSLT_VO = 0x7,
+ QSLT_BEACON = 0x10,
+ QSLT_HIGH = 0x11,
+ QSLT_MGNT = 0x12,
+ QSLT_CMD = 0x13,
+};
+
+enum rtl_desc8821ae_rate {
+ DESC_RATE1M = 0x00,
+ DESC_RATE2M = 0x01,
+ DESC_RATE5_5M = 0x02,
+ DESC_RATE11M = 0x03,
+
+ DESC_RATE6M = 0x04,
+ DESC_RATE9M = 0x05,
+ DESC_RATE12M = 0x06,
+ DESC_RATE18M = 0x07,
+ DESC_RATE24M = 0x08,
+ DESC_RATE36M = 0x09,
+ DESC_RATE48M = 0x0a,
+ DESC_RATE54M = 0x0b,
+
+ DESC_RATEMCS0 = 0x0c,
+ DESC_RATEMCS1 = 0x0d,
+ DESC_RATEMCS2 = 0x0e,
+ DESC_RATEMCS3 = 0x0f,
+ DESC_RATEMCS4 = 0x10,
+ DESC_RATEMCS5 = 0x11,
+ DESC_RATEMCS6 = 0x12,
+ DESC_RATEMCS7 = 0x13,
+ DESC_RATEMCS8 = 0x14,
+ DESC_RATEMCS9 = 0x15,
+ DESC_RATEMCS10 = 0x16,
+ DESC_RATEMCS11 = 0x17,
+ DESC_RATEMCS12 = 0x18,
+ DESC_RATEMCS13 = 0x19,
+ DESC_RATEMCS14 = 0x1a,
+ DESC_RATEMCS15 = 0x1b,
+ DESC_RATEVHT1SS_MCS0 = 0x1c,
+ DESC_RATEVHT1SS_MCS1 = 0x1d,
+ DESC_RATEVHT1SS_MCS2 = 0x1e,
+ DESC_RATEVHT1SS_MCS3 = 0x1f,
+ DESC_RATEVHT1SS_MCS4 = 0x20,
+ DESC_RATEVHT1SS_MCS5 = 0x21,
+ DESC_RATEVHT1SS_MCS6 = 0x22,
+ DESC_RATEVHT1SS_MCS7 = 0x23,
+ DESC_RATEVHT1SS_MCS8 = 0x24,
+ DESC_RATEVHT1SS_MCS9 = 0x25,
+ DESC_RATEVHT2SS_MCS0 = 0x26,
+ DESC_RATEVHT2SS_MCS1 = 0x27,
+ DESC_RATEVHT2SS_MCS2 = 0x28,
+ DESC_RATEVHT2SS_MCS3 = 0x29,
+ DESC_RATEVHT2SS_MCS4 = 0x2a,
+ DESC_RATEVHT2SS_MCS5 = 0x2b,
+ DESC_RATEVHT2SS_MCS6 = 0x2c,
+ DESC_RATEVHT2SS_MCS7 = 0x2d,
+ DESC_RATEVHT2SS_MCS8 = 0x2e,
+ DESC_RATEVHT2SS_MCS9 = 0x2f,
+};
+
+enum rx_packet_type{
+ NORMAL_RX,
+ TX_REPORT1,
+ TX_REPORT2,
+ HIS_REPORT,
+ C2H_PACKET,
+};
+
+struct phy_sts_cck_8821ae_t {
+ u8 adc_pwdb_X[4];
+ u8 sq_rpt;
+ u8 cck_agc_rpt;
+};
+
+struct h2c_cmd_8821ae {
+ u8 element_id;
+ u32 cmd_len;
+ u8 *p_cmdbuffer;
+};
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/dm.c b/drivers/staging/rtl8821ae/rtl8821ae/dm.c
new file mode 100644
index 000000000000..8634206b8929
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/dm.c
@@ -0,0 +1,3045 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "fw.h"
+#include "trx.h"
+#include "../btcoexist/rtl_btc.h"
+
+struct dig_t dm_digtable;
+static struct ps_t dm_pstable;
+
+static const u32 rtl8812ae_txscaling_table[TXSCALE_TABLE_SIZE] =
+{
+ 0x081, // 0, -12.0dB
+ 0x088, // 1, -11.5dB
+ 0x090, // 2, -11.0dB
+ 0x099, // 3, -10.5dB
+ 0x0A2, // 4, -10.0dB
+ 0x0AC, // 5, -9.5dB
+ 0x0B6, // 6, -9.0dB
+ 0x0C0, // 7, -8.5dB
+ 0x0CC, // 8, -8.0dB
+ 0x0D8, // 9, -7.5dB
+ 0x0E5, // 10, -7.0dB
+ 0x0F2, // 11, -6.5dB
+ 0x101, // 12, -6.0dB
+ 0x110, // 13, -5.5dB
+ 0x120, // 14, -5.0dB
+ 0x131, // 15, -4.5dB
+ 0x143, // 16, -4.0dB
+ 0x156, // 17, -3.5dB
+ 0x16A, // 18, -3.0dB
+ 0x180, // 19, -2.5dB
+ 0x197, // 20, -2.0dB
+ 0x1AF, // 21, -1.5dB
+ 0x1C8, // 22, -1.0dB
+ 0x1E3, // 23, -0.5dB
+ 0x200, // 24, +0 dB
+ 0x21E, // 25, +0.5dB
+ 0x23E, // 26, +1.0dB
+ 0x261, // 27, +1.5dB
+ 0x285, // 28, +2.0dB
+ 0x2AB, // 29, +2.5dB
+ 0x2D3, // 30, +3.0dB
+ 0x2FE, // 31, +3.5dB
+ 0x32B, // 32, +4.0dB
+ 0x35C, // 33, +4.5dB
+ 0x38E, // 34, +5.0dB
+ 0x3C4, // 35, +5.5dB
+ 0x3FE // 36, +6.0dB
+};
+
+static const u32 rtl8821ae_txscaling_table[TXSCALE_TABLE_SIZE] = {
+ 0x081, // 0, -12.0dB
+ 0x088, // 1, -11.5dB
+ 0x090, // 2, -11.0dB
+ 0x099, // 3, -10.5dB
+ 0x0A2, // 4, -10.0dB
+ 0x0AC, // 5, -9.5dB
+ 0x0B6, // 6, -9.0dB
+ 0x0C0, // 7, -8.5dB
+ 0x0CC, // 8, -8.0dB
+ 0x0D8, // 9, -7.5dB
+ 0x0E5, // 10, -7.0dB
+ 0x0F2, // 11, -6.5dB
+ 0x101, // 12, -6.0dB
+ 0x110, // 13, -5.5dB
+ 0x120, // 14, -5.0dB
+ 0x131, // 15, -4.5dB
+ 0x143, // 16, -4.0dB
+ 0x156, // 17, -3.5dB
+ 0x16A, // 18, -3.0dB
+ 0x180, // 19, -2.5dB
+ 0x197, // 20, -2.0dB
+ 0x1AF, // 21, -1.5dB
+ 0x1C8, // 22, -1.0dB
+ 0x1E3, // 23, -0.5dB
+ 0x200, // 24, +0 dB
+ 0x21E, // 25, +0.5dB
+ 0x23E, // 26, +1.0dB
+ 0x261, // 27, +1.5dB
+ 0x285, // 28, +2.0dB
+ 0x2AB, // 29, +2.5dB
+ 0x2D3, // 30, +3.0dB
+ 0x2FE, // 31, +3.5dB
+ 0x32B, // 32, +4.0dB
+ 0x35C, // 33, +4.5dB
+ 0x38E, // 34, +5.0dB
+ 0x3C4, // 35, +5.5dB
+ 0x3FE // 36, +6.0dB
+};
+
+static const u32 ofdmswing_table[] = {
+ 0x0b40002d, // 0, -15.0dB
+ 0x0c000030, // 1, -14.5dB
+ 0x0cc00033, // 2, -14.0dB
+ 0x0d800036, // 3, -13.5dB
+ 0x0e400039, // 4, -13.0dB
+ 0x0f00003c, // 5, -12.5dB
+ 0x10000040, // 6, -12.0dB
+ 0x11000044, // 7, -11.5dB
+ 0x12000048, // 8, -11.0dB
+ 0x1300004c, // 9, -10.5dB
+ 0x14400051, // 10, -10.0dB
+ 0x15800056, // 11, -9.5dB
+ 0x16c0005b, // 12, -9.0dB
+ 0x18000060, // 13, -8.5dB
+ 0x19800066, // 14, -8.0dB
+ 0x1b00006c, // 15, -7.5dB
+ 0x1c800072, // 16, -7.0dB
+ 0x1e400079, // 17, -6.5dB
+ 0x20000080, // 18, -6.0dB
+ 0x22000088, // 19, -5.5dB
+ 0x24000090, // 20, -5.0dB
+ 0x26000098, // 21, -4.5dB
+ 0x288000a2, // 22, -4.0dB
+ 0x2ac000ab, // 23, -3.5dB
+ 0x2d4000b5, // 24, -3.0dB
+ 0x300000c0, // 25, -2.5dB
+ 0x32c000cb, // 26, -2.0dB
+ 0x35c000d7, // 27, -1.5dB
+ 0x390000e4, // 28, -1.0dB
+ 0x3c8000f2, // 29, -0.5dB
+ 0x40000100, // 30, +0dB
+ 0x43c0010f, // 31, +0.5dB
+ 0x47c0011f, // 32, +1.0dB
+ 0x4c000130, // 33, +1.5dB
+ 0x50800142, // 34, +2.0dB
+ 0x55400155, // 35, +2.5dB
+ 0x5a400169, // 36, +3.0dB
+ 0x5fc0017f, // 37, +3.5dB
+ 0x65400195, // 38, +4.0dB
+ 0x6b8001ae, // 39, +4.5dB
+ 0x71c001c7, // 40, +5.0dB
+ 0x788001e2, // 41, +5.5dB
+ 0x7f8001fe // 42, +6.0dB
+};
+
+static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
+ {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}, // 0, -16.0dB
+ {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, // 1, -15.5dB
+ {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, // 2, -15.0dB
+ {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, // 3, -14.5dB
+ {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, // 4, -14.0dB
+ {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, // 5, -13.5dB
+ {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, // 6, -13.0dB
+ {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, // 7, -12.5dB
+ {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, // 8, -12.0dB
+ {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, // 9, -11.5dB
+ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, // 10, -11.0dB
+ {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, // 11, -10.5dB
+ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, // 12, -10.0dB
+ {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, // 13, -9.5dB
+ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, // 14, -9.0dB
+ {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, // 15, -8.5dB
+ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, // 16, -8.0dB
+ {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, // 17, -7.5dB
+ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, // 18, -7.0dB
+ {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, // 19, -6.5dB
+ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, // 20, -6.0dB
+ {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, // 21, -5.5dB
+ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, // 22, -5.0dB
+ {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, // 23, -4.5dB
+ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, // 24, -4.0dB
+ {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, // 25, -3.5dB
+ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, // 26, -3.0dB
+ {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, // 27, -2.5dB
+ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, // 28, -2.0dB
+ {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, // 29, -1.5dB
+ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, // 30, -1.0dB
+ {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, // 31, -0.5dB
+ {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04} // 32, +0dB
+};
+
+static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8]= {
+ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}, // 0, -16.0dB
+ {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, // 1, -15.5dB
+ {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, // 2, -15.0dB
+ {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, // 3, -14.5dB
+ {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, // 4, -14.0dB
+ {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, // 5, -13.5dB
+ {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, // 6, -13.0dB
+ {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, // 7, -12.5dB
+ {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, // 8, -12.0dB
+ {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, // 9, -11.5dB
+ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, // 10, -11.0dB
+ {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, // 11, -10.5dB
+ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, // 12, -10.0dB
+ {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, // 13, -9.5dB
+ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, // 14, -9.0dB
+ {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, // 15, -8.5dB
+ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, // 16, -8.0dB
+ {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, // 17, -7.5dB
+ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, // 18, -7.0dB
+ {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, // 19, -6.5dB
+ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, // 20, -6.0dB
+ {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, // 21, -5.5dB
+ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, // 22, -5.0dB
+ {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, // 23, -4.5dB
+ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, // 24, -4.0dB
+ {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, // 25, -3.5dB
+ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, // 26, -3.0dB
+ {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, // 27, -2.5dB
+ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, // 28, -2.0dB
+ {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, // 29, -1.5dB
+ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, // 30, -1.0dB
+ {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, // 31, -0.5dB
+ {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00} // 32, +0dB
+};
+
+static const u32 edca_setting_dl[PEER_MAX] = {
+ 0xa44f, /* 0 UNKNOWN */
+ 0x5ea44f, /* 1 REALTEK_90 */
+ 0x5e4322, /* 2 REALTEK_92SE */
+ 0x5ea42b, /* 3 BROAD */
+ 0xa44f, /* 4 RAL */
+ 0xa630, /* 5 ATH */
+ 0x5ea630, /* 6 CISCO */
+ 0x5ea42b, /* 7 MARVELL */
+};
+
+static const u32 edca_setting_ul[PEER_MAX] = {
+ 0x5e4322, /* 0 UNKNOWN */
+ 0xa44f, /* 1 REALTEK_90 */
+ 0x5ea44f, /* 2 REALTEK_92SE */
+ 0x5ea32b, /* 3 BROAD */
+ 0x5ea422, /* 4 RAL */
+ 0x5ea322, /* 5 ATH */
+ 0x3ea430, /* 6 CISCO */
+ 0x5ea44f, /* 7 MARV */
+};
+
+static u8 rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack[] =
+ {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9};
+static u8 rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack[] =
+ {0, 0, 0, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11};
+
+
+u8 rtl8812ae_delta_swing_table_idx_24gb_n_txpwrtrack[] =
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11};
+u8 rtl8812ae_delta_swing_table_idx_24gb_p_txpwrtrack[] =
+ {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9};
+u8 rtl8812ae_delta_swing_table_idx_24ga_n_txpwrtrack[] =
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 11};
+u8 rtl8812ae_delta_swing_table_idx_24ga_p_txpwrtrack[] =
+ {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9};
+u8 rtl8812ae_delta_swing_table_idx_24gcckb_n_txpwrtrack[] =
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11};
+u8 rtl8812ae_delta_swing_table_idx_24gcckb_p_txpwrtrack[] =
+ {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9};
+u8 rtl8812ae_delta_swing_table_idx_24gccka_n_txpwrtrack[] =
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 11};
+u8 rtl8812ae_delta_swing_table_idx_24gccka_p_txpwrtrack[] =
+ {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9};
+
+u8 rtl8812ae_delta_swing_table_idx_5gb_n_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13},
+ {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 13, 13},
+ {0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 9, 10, 11, 12, 12, 13, 14, 14, 14, 15, 16, 17, 17, 17, 18, 18, 18},
+};
+u8 rtl8812ae_delta_swing_table_idx_5gb_p_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11},
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11},
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11},
+};
+u8 rtl8812ae_delta_swing_table_idx_5ga_n_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 13, 13, 13},
+ {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12, 12, 12, 12, 13, 13},
+ {0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 14, 15, 15, 15, 16, 16, 16, 17, 17, 18, 18},
+};
+u8 rtl8812ae_delta_swing_table_idx_5ga_p_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11},
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11},
+ {0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11},
+};
+
+u8 rtl8821ae_delta_swing_table_idx_24gb_n_txpwrtrack[] =
+ {0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10};
+u8 rtl8821ae_delta_swing_table_idx_24gb_p_txpwrtrack[] =
+ {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12};
+u8 rtl8821ae_delta_swing_table_idx_24ga_n_txpwrtrack[] =
+ {0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10};
+u8 rtl8821ae_delta_swing_table_idx_24ga_p_txpwrtrack[] =
+ {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12};
+u8 rtl8821ae_delta_swing_table_idx_24gcckb_n_txpwrtrack[] =
+ {0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10};
+u8 rtl8821ae_delta_swing_table_idx_24gcckb_p_txpwrtrack[] =
+ {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12};
+u8 rtl8821ae_delta_swing_table_idx_24gccka_n_txpwrtrack[] =
+ {0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10};
+u8 rtl8821ae_delta_swing_table_idx_24gccka_p_txpwrtrack[] =
+ {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12};
+
+u8 rtl8821ae_delta_swing_table_idx_5gb_n_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+};
+
+u8 rtl8821ae_delta_swing_table_idx_5gb_p_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+};
+
+u8 rtl8821ae_delta_swing_table_idx_5ga_n_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+};
+
+u8 rtl8821ae_delta_swing_table_idx_5ga_p_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+};
+
+void rtl8812ae_dm_read_and_config_txpower_track(
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("===> rtl8821ae_dm_read_and_config_txpower_track\n"));
+
+
+ memcpy(rtldm->delta_swing_table_idx_24ga_p,
+ rtl8812ae_delta_swing_table_idx_24ga_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24ga_n,
+ rtl8812ae_delta_swing_table_idx_24ga_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gb_p,
+ rtl8812ae_delta_swing_table_idx_24gb_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gb_n,
+ rtl8812ae_delta_swing_table_idx_24gb_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+
+ memcpy(rtldm->delta_swing_table_idx_24gccka_p,
+ rtl8812ae_delta_swing_table_idx_24gccka_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gccka_n,
+ rtl8812ae_delta_swing_table_idx_24gccka_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gcckb_p,
+ rtl8812ae_delta_swing_table_idx_24gcckb_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gcckb_n,
+ rtl8812ae_delta_swing_table_idx_24gcckb_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+
+ memcpy(rtldm->delta_swing_table_idx_5ga_p,
+ rtl8812ae_delta_swing_table_idx_5ga_p_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+ memcpy(rtldm->delta_swing_table_idx_5ga_n,
+ rtl8812ae_delta_swing_table_idx_5ga_n_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+ memcpy(rtldm->delta_swing_table_idx_5gb_p,
+ rtl8812ae_delta_swing_table_idx_5gb_p_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+ memcpy(rtldm->delta_swing_table_idx_5gb_n,
+ rtl8812ae_delta_swing_table_idx_5gb_n_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+}
+
+void rtl8821ae_dm_read_and_config_txpower_track(
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("===> rtl8821ae_dm_read_and_config_txpower_track\n"));
+
+
+ memcpy(rtldm->delta_swing_table_idx_24ga_p,
+ rtl8821ae_delta_swing_table_idx_24ga_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24ga_n,
+ rtl8821ae_delta_swing_table_idx_24ga_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gb_p,
+ rtl8821ae_delta_swing_table_idx_24gb_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gb_n,
+ rtl8821ae_delta_swing_table_idx_24gb_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+
+ memcpy(rtldm->delta_swing_table_idx_24gccka_p,
+ rtl8821ae_delta_swing_table_idx_24gccka_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gccka_n,
+ rtl8821ae_delta_swing_table_idx_24gccka_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gcckb_p,
+ rtl8821ae_delta_swing_table_idx_24gcckb_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gcckb_n,
+ rtl8821ae_delta_swing_table_idx_24gcckb_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+
+ memcpy(rtldm->delta_swing_table_idx_5ga_p,
+ rtl8821ae_delta_swing_table_idx_5ga_p_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+ memcpy(rtldm->delta_swing_table_idx_5ga_n,
+ rtl8821ae_delta_swing_table_idx_5ga_n_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+ memcpy(rtldm->delta_swing_table_idx_5gb_p,
+ rtl8821ae_delta_swing_table_idx_5gb_p_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+ memcpy(rtldm->delta_swing_table_idx_5gb_n,
+ rtl8821ae_delta_swing_table_idx_5gb_n_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+}
+
+
+
+#define CALCULATE_SWINGTALBE_OFFSET(_offset, _direction, _size, _deltaThermal) \
+ do {\
+ for(_offset = 0; _offset < _size; _offset++)\
+ {\
+ if(_deltaThermal < thermal_threshold[_direction][_offset])\
+ {\
+ if(_offset != 0)\
+ _offset--;\
+ break;\
+ }\
+ } \
+ if(_offset >= _size)\
+ _offset = _size-1;\
+ } while(0)
+
+
+void rtl8821ae_dm_txpower_track_adjust(struct ieee80211_hw *hw,
+ u8 type,u8 *pdirection,
+ u32 *poutwrite_val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ u8 pwr_val = 0;
+
+ if (type == 0){
+ if (rtlpriv->dm.bb_swing_idx_ofdm[RF90_PATH_A] <=
+ rtlpriv->dm.bb_swing_idx_ofdm_base[RF90_PATH_A]) {
+ *pdirection = 1;
+ pwr_val = rtldm->bb_swing_idx_ofdm_base[RF90_PATH_A] - rtldm->bb_swing_idx_ofdm[RF90_PATH_A];
+ } else {
+ *pdirection = 2;
+ pwr_val = rtldm->bb_swing_idx_ofdm[RF90_PATH_A] - rtldm->bb_swing_idx_ofdm_base[RF90_PATH_A];
+ }
+ } else if (type ==1) {
+ if (rtldm->bb_swing_idx_cck <= rtldm->bb_swing_idx_cck_base) {
+ *pdirection = 1;
+ pwr_val = rtldm->bb_swing_idx_cck_base - rtldm->bb_swing_idx_cck;
+ } else {
+ *pdirection = 2;
+ pwr_val = rtldm->bb_swing_idx_cck - rtldm->bb_swing_idx_cck_base;
+ }
+ }
+
+ if (pwr_val >= TXPWRTRACK_MAX_IDX && (*pdirection == 1))
+ pwr_val = TXPWRTRACK_MAX_IDX;
+
+ *poutwrite_val = pwr_val |(pwr_val << 8)|(pwr_val << 16) | (pwr_val << 24);
+}
+
+void rtl8821ae_dm_clear_txpower_tracking_state(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtlpriv);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+ u8 p = 0;
+ rtldm->bb_swing_idx_cck_base = rtldm->default_cck_index;
+ rtldm->bb_swing_idx_cck = rtldm->default_cck_index;
+ rtldm->cck_index = 0;
+
+ for (p = RF90_PATH_A; p < MAX_RF_PATH; ++p) {
+ rtldm->bb_swing_idx_ofdm_base[p] = rtldm->default_ofdm_index;
+ rtldm->bb_swing_idx_ofdm[p] = rtldm->default_ofdm_index;
+ rtldm->ofdm_index[p] = rtldm->default_ofdm_index;
+
+ rtldm->power_index_offset[p] = 0;
+ rtldm->delta_power_index[p] = 0;
+ rtldm->delta_power_index_last[p] = 0;
+
+ rtldm->aboslute_ofdm_swing_idx[p] = 0; /*Initial Mix mode power tracking*/
+ rtldm->remnant_ofdm_swing_idx[p] = 0;
+ }
+
+ rtldm->modify_txagc_flag_path_a = false; /*Initial at Modify Tx Scaling Mode*/
+ rtldm->modify_txagc_flag_path_b = false; /*Initial at Modify Tx Scaling Mode*/
+ rtldm->remnant_cck_idx = 0;
+ rtldm->thermalvalue = rtlefuse->eeprom_thermalmeter;
+ rtldm->thermalvalue_iqk = rtlefuse->eeprom_thermalmeter;
+ rtldm->thermalvalue_lck = rtlefuse->eeprom_thermalmeter;
+}
+
+u8 rtl8821ae_dm_get_swing_index(struct ieee80211_hw *hw)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 i = 0;
+ u32 bb_swing;
+
+ bb_swing =rtl8821ae_phy_query_bb_reg(hw, rtlhal->current_bandtype, RF90_PATH_A);
+
+ for (i = 0; i < TXSCALE_TABLE_SIZE; ++i)
+ if ( bb_swing == rtl8821ae_txscaling_table[i])
+ break;
+
+ return i;
+}
+
+void rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtlpriv);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 default_swing_index = 0;
+ u8 p = 0;
+
+ rtlpriv->dm.txpower_track_control = true;
+ rtldm->thermalvalue = rtlefuse->eeprom_thermalmeter;
+ rtldm->thermalvalue_iqk = rtlefuse->eeprom_thermalmeter;
+ rtldm->thermalvalue_lck = rtlefuse->eeprom_thermalmeter;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtl8812ae_dm_read_and_config_txpower_track(hw);
+ else
+ rtl8821ae_dm_read_and_config_txpower_track(hw);
+
+ default_swing_index = rtl8821ae_dm_get_swing_index(hw);
+
+ rtldm->default_ofdm_index = (default_swing_index == TXSCALE_TABLE_SIZE) ? 24 : default_swing_index;
+ rtldm->default_cck_index = 24;
+
+ rtldm->bb_swing_idx_cck_base = rtldm->default_cck_index;
+ rtldm->cck_index = rtldm->default_cck_index;
+
+ for (p = RF90_PATH_A; p < MAX_RF_PATH; ++p)
+ {
+ rtldm->bb_swing_idx_ofdm_base[p] = rtldm->default_ofdm_index;
+ rtldm->ofdm_index[p] = rtldm->default_ofdm_index;
+ rtldm->delta_power_index[p] = 0;
+ rtldm->power_index_offset[p] = 0;
+ rtldm->delta_power_index_last[p] = 0;
+ }
+}
+
+static void rtl8821ae_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
+{
+ dm_pstable.pre_ccastate = CCA_MAX;
+ dm_pstable.cur_ccasate = CCA_MAX;
+ dm_pstable.pre_rfstate = RF_MAX;
+ dm_pstable.cur_rfstate = RF_MAX;
+ dm_pstable.rssi_val_min = 0;
+ dm_pstable.initialize = 0;
+}
+
+
+static void rtl8821ae_dm_diginit(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ //dm_digtable.dig_enable_flag = true;
+ dm_digtable.cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f);
+ /*dm_digtable.pre_igvalue = 0;
+ dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
+ dm_digtable.presta_connectstate = DIG_STA_DISCONNECT;
+ dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;*/
+ dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
+ dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
+ dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
+ dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
+ dm_digtable.rx_gain_range_max = DM_DIG_MAX;
+ dm_digtable.rx_gain_range_min = DM_DIG_MIN;
+ dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
+ dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
+ dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
+ dm_digtable.pre_cck_cca_thres = 0xff;
+ dm_digtable.cur_cck_cca_thres = 0x83;
+ dm_digtable.forbidden_igi = DM_DIG_MIN;
+ dm_digtable.large_fa_hit = 0;
+ dm_digtable.recover_cnt = 0;
+ dm_digtable.dig_dynamic_min_0 = DM_DIG_MIN;
+ dm_digtable.dig_dynamic_min_1 = DM_DIG_MIN;
+ dm_digtable.b_media_connect_0 = false;
+ dm_digtable.b_media_connect_1 = false;
+ rtlpriv->dm.b_dm_initialgain_enable = true;
+ dm_digtable.bt30_cur_igi = 0x32;
+}
+
+static void rtl8821ae_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.bdynamic_txpower_enable = false;
+
+ rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+}
+
+
+void rtl8821ae_dm_init_edca_turbo(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ rtlpriv->dm.bcurrent_turbo_edca = false;
+ rtlpriv->dm.bis_any_nonbepkts = false;
+ rtlpriv->dm.bis_cur_rdlstate = false;
+}
+
+
+void rtl8821ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rate_adaptive *p_ra = &(rtlpriv->ra);
+
+ p_ra->ratr_state = DM_RATR_STA_INIT;
+ p_ra->pre_ratr_state = DM_RATR_STA_INIT;
+
+ rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+ if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
+ rtlpriv->dm.b_useramask = true;
+ else
+ rtlpriv->dm.b_useramask = false;
+
+ p_ra->high_rssi_thresh_for_ra = 50;
+ p_ra->low_rssi_thresh_for_ra = 20;
+}
+
+
+static void rtl8821ae_dm_init_txpower_tracking(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.btxpower_tracking = true;
+ rtlpriv->dm.btxpower_trackinginit = false;
+ rtlpriv->dm.txpowercount = 0;
+ rtlpriv->dm.txpower_track_control = true;
+ rtlpriv->dm.thermalvalue = 0;
+
+ rtlpriv->dm.ofdm_index[0] = 30;
+ rtlpriv->dm.cck_index = 20;
+
+ rtlpriv->dm.bb_swing_idx_cck_base = rtlpriv->dm.cck_index;
+
+
+ rtlpriv->dm.bb_swing_idx_ofdm[RF90_PATH_A] = rtlpriv->dm.ofdm_index[0];
+ rtlpriv->dm.bb_swing_idx_ofdm[RF90_PATH_B] = rtlpriv->dm.ofdm_index[0];
+ rtlpriv->dm.delta_power_index[0] = 0;
+ rtlpriv->dm.delta_power_index_last[0] = 0;
+ rtlpriv->dm.power_index_offset[0] = 0;
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ (" rtlpriv->dm.btxpower_tracking = %d\n",
+ rtlpriv->dm.btxpower_tracking));
+}
+
+
+void rtl8821ae_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.crystal_cap = rtlpriv->efuse.crystalcap;
+
+ rtlpriv->dm.atc_status = rtl_get_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11));
+ rtlpriv->dm.cfo_threshold = CFO_THRESHOLD_XTAL;
+}
+
+
+void rtl8821ae_dm_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ spin_lock(&rtlpriv->locks.iqk_lock);
+ rtlphy->b_iqk_in_progress = false;
+ spin_unlock(&rtlpriv->locks.iqk_lock);
+
+ rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+ rtl8821ae_dm_diginit(hw);
+ rtl8821ae_dm_init_rate_adaptive_mask(hw);
+ rtl8812ae_dm_path_diversity_init(hw);
+ rtl8821ae_dm_init_edca_turbo(hw);
+ rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(hw);
+#if 1
+ rtl8821ae_dm_init_dynamic_bb_powersaving(hw);
+ rtl8821ae_dm_init_dynamic_txpower(hw);
+ rtl8821ae_dm_init_txpower_tracking(hw);
+#endif
+ rtl8821ae_dm_init_dynamic_atc_switch(hw);
+}
+
+void rtl8821ae_dm_find_minimum_rssi(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dig *rtl_dm_dig = &(rtlpriv->dm.dm_digtable);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ /* Determine the minimum RSSI */
+ if ((mac->link_state < MAC80211_LINKED) &&
+ (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+ rtl_dm_dig->min_undecorated_pwdb_for_dm = 0;
+ RT_TRACE(COMP_BB_POWERSAVING, DBG_LOUD,
+ ("Not connected to any \n"));
+ }
+ if (mac->link_state >= MAC80211_LINKED) {
+ if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC) {
+ rtl_dm_dig->min_undecorated_pwdb_for_dm =
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ RT_TRACE(COMP_BB_POWERSAVING, DBG_LOUD,
+ ("AP Client PWDB = 0x%lx \n",
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb));
+ } else {
+ rtl_dm_dig->min_undecorated_pwdb_for_dm =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+ RT_TRACE(COMP_BB_POWERSAVING, DBG_LOUD,
+ ("STA Default Port PWDB = 0x%x \n",
+ rtl_dm_dig->min_undecorated_pwdb_for_dm));
+ }
+ } else {
+ rtl_dm_dig->min_undecorated_pwdb_for_dm =
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ RT_TRACE(COMP_BB_POWERSAVING, DBG_LOUD,
+ ("AP Ext Port or disconnet PWDB = 0x%x \n",
+ rtl_dm_dig->min_undecorated_pwdb_for_dm));
+ }
+ RT_TRACE(COMP_DIG, DBG_LOUD, ("MinUndecoratedPWDBForDM =%d\n",
+ rtl_dm_dig->min_undecorated_pwdb_for_dm));
+}
+
+#if 0
+void rtl8812ae_dm_rssi_dump_to_register(
+ struct ieee80211_hw *hw
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, RA_RSSI_DUMP, Adapter->RxStats.RxRSSIPercentage[0]);
+ rtl_write_byte(rtlpriv, RB_RSSI_DUMP, Adapter->RxStats.RxRSSIPercentage[1]);
+
+ /* Rx EVM*/
+ rtl_write_byte(rtlpriv, RS1_RX_EVM_DUMP, Adapter->RxStats.RxEVMdbm[0]);
+ rtl_write_byte(rtlpriv, RS2_RX_EVM_DUMP, Adapter->RxStats.RxEVMdbm[1]);
+
+ /*Rx SNR*/
+ rtl_write_byte(rtlpriv, RA_RX_SNR_DUMP, (u1Byte)(Adapter->RxStats.RxSNRdB[0]));
+ rtl_write_byte(rtlpriv, RB_RX_SNR_DUMP, (u1Byte)(Adapter->RxStats.RxSNRdB[1]));
+
+ /*Rx Cfo_Short*/
+ rtl_write_word(rtlpriv, RA_CFO_SHORT_DUMP, Adapter->RxStats.RxCfoShort[0]);
+ rtl_write_word(rtlpriv, RB_CFO_SHORT_DUMP, Adapter->RxStats.RxCfoShort[1]);
+
+ /*Rx Cfo_Tail*/
+ rtl_write_word(rtlpriv, RA_CFO_LONG_DUMP, Adapter->RxStats.RxCfoTail[0]);
+ rtl_write_word(rtlpriv, RB_CFO_LONG_DUMP, Adapter->RxStats.RxCfoTail[1]);
+
+}
+#endif
+
+static void rtl8821ae_dm_check_rssi_monitor(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_sta_info *drv_priv;
+ u8 h2c_parameter[3] = { 0 };
+ long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff;
+
+
+ /* AP & ADHOC & MESH */
+ spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+ list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) {
+ if(drv_priv->rssi_stat.undecorated_smoothed_pwdb < tmp_entry_min_pwdb)
+ tmp_entry_min_pwdb = drv_priv->rssi_stat.undecorated_smoothed_pwdb;
+ if(drv_priv->rssi_stat.undecorated_smoothed_pwdb > tmp_entry_max_pwdb)
+ tmp_entry_max_pwdb = drv_priv->rssi_stat.undecorated_smoothed_pwdb;
+
+ /*h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
+ h2c_parameter[1] = 0x20;
+ h2c_parameter[0] = drv_priv->rssi_stat;
+ rtl8821ae_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);*/
+ }
+ spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+
+ /* If associated entry is found */
+ if (tmp_entry_max_pwdb != 0) {
+ rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = tmp_entry_max_pwdb;
+ RTPRINT(rtlpriv, FDM, DM_PWDB, ("EntryMaxPWDB = 0x%lx(%ld)\n",
+ tmp_entry_max_pwdb, tmp_entry_max_pwdb));
+ } else {
+ rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
+ }
+ /* If associated entry is found */
+ if (tmp_entry_min_pwdb != 0xff) {
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = tmp_entry_min_pwdb;
+ RTPRINT(rtlpriv, FDM, DM_PWDB, ("EntryMinPWDB = 0x%lx(%ld)\n",
+ tmp_entry_min_pwdb, tmp_entry_min_pwdb));
+ } else {
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
+ }
+ /* Indicate Rx signal strength to FW. */
+ if (rtlpriv->dm.b_useramask) {
+ h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
+ h2c_parameter[1] = 0x20;
+ h2c_parameter[0] = 0;
+ rtl8821ae_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
+ } else {
+ rtl_write_byte(rtlpriv, 0x4fe, rtlpriv->dm.undecorated_smoothed_pwdb);
+ }
+ rtl8821ae_dm_find_minimum_rssi(hw);
+ dm_digtable.rssi_val_min = rtlpriv->dm.dm_digtable.min_undecorated_pwdb_for_dm;
+}
+
+void rtl8821ae_dm_write_cck_cca_thres(struct ieee80211_hw *hw, u8 current_cca)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (dm_digtable.cur_cck_cca_thres != current_cca)
+ rtl_write_byte(rtlpriv, DM_REG_CCK_CCA_11AC, current_cca);
+
+ dm_digtable.pre_cck_cca_thres = dm_digtable.cur_cck_cca_thres;
+ dm_digtable.cur_cck_cca_thres = current_cca;
+}
+
+void rtl8821ae_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ if(dm_digtable.stop_dig)
+ return;
+
+ if (dm_digtable.cur_igvalue != current_igi){
+ rtl_set_bbreg(hw, DM_REG_IGI_A_11AC, DM_BIT_IGI_11AC, current_igi);
+ if (rtlpriv->phy.rf_type != RF_1T1R)
+ rtl_set_bbreg(hw, DM_REG_IGI_B_11AC, DM_BIT_IGI_11AC, current_igi);
+ }
+ //dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
+ dm_digtable.cur_igvalue = current_igi;
+}
+
+static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 dig_dynamic_min;
+ u8 dig_max_of_min;
+ bool first_connect, first_disconnect;
+ u8 dm_dig_max, dm_dig_min, offset;
+ u8 current_igi =dm_digtable.cur_igvalue;
+
+
+ RT_TRACE(COMP_DIG, DBG_LOUD,("rtl8821ae_dm_dig()==>\n"));
+
+
+ if (mac->act_scanning == true) {
+ RT_TRACE(COMP_DIG, DBG_LOUD,("rtl8821ae_dm_dig() Return: In Scan Progress \n"));
+ return;
+ }
+
+ /*add by Neil Chen to avoid PSD is processing*/
+ dig_dynamic_min = dm_digtable.dig_dynamic_min_0;
+ first_connect = (mac->link_state >= MAC80211_LINKED) &&
+ (dm_digtable.b_media_connect_0 == false);
+ first_disconnect = (mac->link_state < MAC80211_LINKED) &&
+ (dm_digtable.b_media_connect_0 == true);
+
+ /*1 Boundary Decision*/
+
+
+ dm_dig_max = 0x5A;
+
+ if (rtlhal->hw_type != HARDWARE_TYPE_RTL8821AE)
+ dm_dig_min = DM_DIG_MIN;
+ else
+ dm_dig_min = 0x1C;
+
+ dig_max_of_min = DM_DIG_MAX_AP;
+
+ if (mac->link_state >= MAC80211_LINKED) {
+ if (rtlhal->hw_type != HARDWARE_TYPE_RTL8821AE)
+ offset = 20;
+ else
+ offset = 10;
+
+ if ((dm_digtable.rssi_val_min + offset) > dm_dig_max)
+ dm_digtable.rx_gain_range_max = dm_dig_max;
+ else if ((dm_digtable.rssi_val_min + offset) < dm_dig_min)
+ dm_digtable.rx_gain_range_max = dm_dig_min;
+ else
+ dm_digtable.rx_gain_range_max = dm_digtable.rssi_val_min + offset;
+
+ if(rtlpriv->dm.b_one_entry_only){
+ offset = 0;
+
+ if (dm_digtable.rssi_val_min - offset < dm_dig_min)
+ dig_dynamic_min = dm_dig_min;
+ else if (dm_digtable.rssi_val_min - offset > dig_max_of_min)
+ dig_dynamic_min = dig_max_of_min;
+ else
+ dig_dynamic_min = dm_digtable.rssi_val_min - offset;
+
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig() : bOneEntryOnly=TRUE, dig_dynamic_min=0x%x\n",
+ dig_dynamic_min));
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig() : dm_digtable.rssi_val_min=%d",dm_digtable.
+ rssi_val_min));
+ } else {
+ dig_dynamic_min = dm_dig_min;
+ }
+ } else {
+ dm_digtable.rx_gain_range_max = dm_dig_max;
+ dig_dynamic_min = dm_dig_min;
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig() : No Link\n"));
+ }
+
+ if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): Abnornally false alarm case. \n"));
+
+ if (dm_digtable.large_fa_hit != 3)
+ dm_digtable.large_fa_hit++;
+ if (dm_digtable.forbidden_igi < current_igi) {
+ dm_digtable.forbidden_igi = current_igi;
+ dm_digtable.large_fa_hit = 1;
+ }
+
+ if (dm_digtable.large_fa_hit >= 3) {
+ if((dm_digtable.forbidden_igi + 1) > dm_digtable.rx_gain_range_max)
+ dm_digtable.rx_gain_range_min = dm_digtable.rx_gain_range_max;
+ else
+ dm_digtable.rx_gain_range_min = (dm_digtable.forbidden_igi + 1);
+ dm_digtable.recover_cnt = 3600;
+ }
+
+ } else {
+ /*Recovery mechanism for IGI lower bound*/
+ if (dm_digtable.recover_cnt != 0)
+ dm_digtable.recover_cnt --;
+ else {
+ if (dm_digtable.large_fa_hit < 3) {
+ if ((dm_digtable.forbidden_igi -1) < dig_dynamic_min) {
+ dm_digtable.forbidden_igi = dig_dynamic_min;
+ dm_digtable.rx_gain_range_min = dig_dynamic_min;
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): Normal Case: At Lower Bound\n"));
+ } else {
+ dm_digtable.forbidden_igi --;
+ dm_digtable.rx_gain_range_min = (dm_digtable.forbidden_igi + 1);
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): Normal Case: Approach Lower Bound\n"));
+ }
+ } else {
+ dm_digtable.large_fa_hit = 0;
+ }
+ }
+ }
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): pDM_DigTable->LargeFAHit=%d\n",
+ dm_digtable.large_fa_hit));
+
+ if (rtlpriv->dm.dbginfo.num_qry_beacon_pkt < 10)
+ dm_digtable.rx_gain_range_min = dm_dig_min;
+
+ if (dm_digtable.rx_gain_range_min > dm_digtable.rx_gain_range_max)
+ dm_digtable.rx_gain_range_min = dm_digtable.rx_gain_range_max;
+
+ /*Adjust initial gain by false alarm*/
+ if (mac->link_state >= MAC80211_LINKED) {
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): DIG AfterLink\n"));
+ if (first_connect) {
+ if (dm_digtable.rssi_val_min <= dig_max_of_min)
+ current_igi = dm_digtable.rssi_val_min;
+ else
+ current_igi = dig_max_of_min;
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig: First Connect\n"));
+ } else {
+ if(rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH2)
+ current_igi = current_igi + 4;
+ else if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH1)
+ current_igi = current_igi + 2;
+ else if(rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
+ current_igi = current_igi - 2;
+
+ if((rtlpriv->dm.dbginfo.num_qry_beacon_pkt < 10)
+ &&(rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)) {
+ current_igi = dm_digtable.rx_gain_range_min;
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): Beacon is less than 10 and FA is less than 768, IGI GOES TO 0x1E!!!!!!!!!!!!\n"));
+ }
+ }
+ } else{
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): DIG BeforeLink\n"));
+ if (first_disconnect){
+ current_igi = dm_digtable.rx_gain_range_min;
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): First DisConnect \n"));
+ } else {
+ /*2012.03.30 LukeLee: enable DIG before link but with very high thresholds*/
+ if (rtlpriv->falsealm_cnt.cnt_all > 2000)
+ current_igi = current_igi + 4;
+ else if (rtlpriv->falsealm_cnt.cnt_all > 600)
+ current_igi = current_igi + 2;
+ else if(rtlpriv->falsealm_cnt.cnt_all < 300)
+ current_igi = current_igi - 2;
+ if (current_igi >= 0x3e)
+ current_igi = 0x3e;
+ RT_TRACE(COMP_DIG, DBG_LOUD,("rtl8821ae_dm_dig(): England DIG \n"));
+ }
+ }
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): DIG End Adjust IGI\n"));
+ /* Check initial gain by upper/lower bound*/
+
+ if (current_igi > dm_digtable.rx_gain_range_max)
+ current_igi = dm_digtable.rx_gain_range_max;
+ if (current_igi < dm_digtable.rx_gain_range_min)
+ current_igi = dm_digtable.rx_gain_range_min;
+
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): rx_gain_range_max=0x%x, rx_gain_range_min=0x%x\n",
+ dm_digtable.rx_gain_range_max, dm_digtable.rx_gain_range_min));
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): TotalFA=%d\n", rtlpriv->falsealm_cnt.cnt_all));
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): CurIGValue=0x%x\n", current_igi));
+
+ rtl8821ae_dm_write_dig(hw, current_igi);
+ dm_digtable.b_media_connect_0= ((mac->link_state >= MAC80211_LINKED) ? true :false);
+ dm_digtable.dig_dynamic_min_0 = dig_dynamic_min;
+}
+
+static void rtl8821ae_dm_common_info_self_update(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 cnt = 0;
+ struct rtl_sta_info *drv_priv;
+
+ rtlpriv->dm.b_one_entry_only = false;
+
+ if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_STATION &&
+ rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+ rtlpriv->dm.b_one_entry_only = true;
+ return;
+ }
+
+ if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP ||
+ rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC ||
+ rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) {
+ spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+ list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) {
+ cnt ++;
+ }
+ spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+
+ if (cnt == 1)
+ rtlpriv->dm.b_one_entry_only = true;
+ }
+}
+
+
+static void rtl8821ae_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
+ u32 cck_enable =0;
+
+ /*read OFDM FA counter*/
+ falsealm_cnt->cnt_ofdm_fail = rtl_get_bbreg(hw, ODM_REG_OFDM_FA_11AC, BMASKLWORD);
+ falsealm_cnt->cnt_cck_fail = rtl_get_bbreg(hw, ODM_REG_CCK_FA_11AC, BMASKLWORD);
+
+ cck_enable = rtl_get_bbreg(hw, ODM_REG_BB_RX_PATH_11AC, BIT(28));
+ if (cck_enable) /*if(pDM_Odm->pBandType == ODM_BAND_2_4G)*/
+ falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail + falsealm_cnt->cnt_cck_fail;
+ else
+ falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail;
+
+ /*reset OFDM FA coutner*/
+ rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 1);
+ rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 0);
+ /* reset CCK FA counter*/
+ rtl_set_bbreg(hw, ODM_REG_CCK_FA_RST_11AC, BIT(15), 0);
+ rtl_set_bbreg(hw, ODM_REG_CCK_FA_RST_11AC, BIT(15), 1);
+
+ RT_TRACE(COMP_DIG, DBG_LOUD, ("Cnt_Cck_fail=%d\n",
+ falsealm_cnt->cnt_cck_fail));
+ RT_TRACE(COMP_DIG, DBG_LOUD, ("cnt_ofdm_fail=%d\n",
+ falsealm_cnt->cnt_ofdm_fail));
+ RT_TRACE(COMP_DIG, DBG_LOUD, ("Total False Alarm=%d\n",
+ falsealm_cnt->cnt_all));
+}
+
+void rtl8812ae_dm_check_txpower_tracking_thermalmeter(
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ static u8 tm_trigger = 0;
+
+ if (!rtlpriv->dm.btxpower_tracking)
+ return;
+
+ if (!tm_trigger) {
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER_88E, BIT(17)|BIT(16), 0x03);
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Trigger 8812 Thermal Meter!!\n"));
+ tm_trigger = 1;
+ return;
+ } else {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Schedule TxPowerTracking direct call!!\n"));
+ rtl8812ae_dm_txpower_tracking_callback_thermalmeter(hw);
+ tm_trigger = 0;
+ }
+}
+
+static void rtl8821ae_dm_iq_calibrate(struct ieee80211_hw *hw)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (mac->link_state >= MAC80211_LINKED) {
+ /*if ((*rtldm->p_channel != rtldm->pre_channel )
+ && (!mac->act_scanning)) {
+ rtldm->pre_channel = *rtldm->p_channel;
+ rtldm->linked_interval = 0;
+ }*/
+
+ if(rtldm->linked_interval < 3)
+ rtldm->linked_interval ++;
+
+ if(rtldm->linked_interval == 2)
+ {
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtl8812ae_phy_iq_calibrate(hw, false);
+ else
+ rtl8821ae_phy_iq_calibrate(hw, false);
+ }
+ } else {
+ rtldm->linked_interval = 0;
+ }
+}
+
+
+void rtl8812ae_get_delta_swing_table(
+ struct ieee80211_hw *hw,
+ u8 **temperature_up_a,
+ u8 **temperature_down_a,
+ u8 **temperature_up_b,
+ u8 **temperature_down_b
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ u8 channel = rtlphy->current_channel;
+ u8 rate = rtldm->tx_rate;
+
+
+ if ( 1 <= channel && channel <= 14) {
+ if (RX_HAL_IS_CCK_RATE(rate)) {
+ *temperature_up_a = rtldm->delta_swing_table_idx_24gccka_p;
+ *temperature_down_a = rtldm->delta_swing_table_idx_24gccka_n;
+ *temperature_up_b = rtldm->delta_swing_table_idx_24gcckb_p;
+ *temperature_down_b = rtldm->delta_swing_table_idx_24gcckb_n;
+ } else {
+ *temperature_up_a = rtldm->delta_swing_table_idx_24ga_p;
+ *temperature_down_a = rtldm->delta_swing_table_idx_24ga_n;
+ *temperature_up_b = rtldm->delta_swing_table_idx_24gb_p;
+ *temperature_down_b = rtldm->delta_swing_table_idx_24gb_n;
+ }
+ } else if ( 36 <= channel && channel <= 64) {
+ *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[0];
+ *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[0];
+ *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[0];
+ *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[0];
+ } else if ( 100 <= channel && channel <= 140) {
+ *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[1];
+ *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[1];
+ *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[1];
+ *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[1];
+ } else if ( 149 <= channel && channel <= 173) {
+ *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[2];
+ *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[2];
+ *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[2];
+ *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[2];
+ } else {
+ *temperature_up_a = (u8*)rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack;
+ *temperature_down_a =(u8*)rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack;
+ *temperature_up_b = (u8*)rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack;
+ *temperature_down_b = (u8*)rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack;
+ }
+
+ return;
+}
+
+void rtl8812ae_phy_lccalibrate(
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("===> rtl8812ae_phy_lccalibrate\n"));
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("<=== rtl8812ae_phy_lccalibrate\n"));
+
+}
+
+void rtl8812ae_dm_update_init_rate(
+ struct ieee80211_hw *hw,
+ u8 rate
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 p = 0;
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Get C2H Command! Rate=0x%x\n", rate));
+
+ rtldm->tx_rate = rate;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE){
+ rtl8821ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, RF90_PATH_A, 0);
+ }
+ else
+ {
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+ {
+ rtl8812ae_dm_txpwr_track_set_pwr(hw, BBSWING, p, 0);
+ }
+ }
+
+}
+
+u8 rtl8812ae_hw_rate_to_mrate(
+ struct ieee80211_hw *hw,
+ u8 rate
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 ret_rate = MGN_1M;
+
+
+ switch(rate)
+ {
+ case DESC_RATE1M: ret_rate = MGN_1M; break;
+ case DESC_RATE2M: ret_rate = MGN_2M; break;
+ case DESC_RATE5_5M: ret_rate = MGN_5_5M; break;
+ case DESC_RATE11M: ret_rate = MGN_11M; break;
+ case DESC_RATE6M: ret_rate = MGN_6M; break;
+ case DESC_RATE9M: ret_rate = MGN_9M; break;
+ case DESC_RATE12M: ret_rate = MGN_12M; break;
+ case DESC_RATE18M: ret_rate = MGN_18M; break;
+ case DESC_RATE24M: ret_rate = MGN_24M; break;
+ case DESC_RATE36M: ret_rate = MGN_36M; break;
+ case DESC_RATE48M: ret_rate = MGN_48M; break;
+ case DESC_RATE54M: ret_rate = MGN_54M; break;
+ case DESC_RATEMCS0: ret_rate = MGN_MCS0; break;
+ case DESC_RATEMCS1: ret_rate = MGN_MCS1; break;
+ case DESC_RATEMCS2: ret_rate = MGN_MCS2; break;
+ case DESC_RATEMCS3: ret_rate = MGN_MCS3; break;
+ case DESC_RATEMCS4: ret_rate = MGN_MCS4; break;
+ case DESC_RATEMCS5: ret_rate = MGN_MCS5; break;
+ case DESC_RATEMCS6: ret_rate = MGN_MCS6; break;
+ case DESC_RATEMCS7: ret_rate = MGN_MCS7; break;
+ case DESC_RATEMCS8: ret_rate = MGN_MCS8; break;
+ case DESC_RATEMCS9: ret_rate = MGN_MCS9; break;
+ case DESC_RATEMCS10: ret_rate = MGN_MCS10; break;
+ case DESC_RATEMCS11: ret_rate = MGN_MCS11; break;
+ case DESC_RATEMCS12: ret_rate = MGN_MCS12; break;
+ case DESC_RATEMCS13: ret_rate = MGN_MCS13; break;
+ case DESC_RATEMCS14: ret_rate = MGN_MCS14; break;
+ case DESC_RATEMCS15: ret_rate = MGN_MCS15; break;
+ case DESC_RATEVHT1SS_MCS0: ret_rate = MGN_VHT1SS_MCS0; break;
+ case DESC_RATEVHT1SS_MCS1: ret_rate = MGN_VHT1SS_MCS1; break;
+ case DESC_RATEVHT1SS_MCS2: ret_rate = MGN_VHT1SS_MCS2; break;
+ case DESC_RATEVHT1SS_MCS3: ret_rate = MGN_VHT1SS_MCS3; break;
+ case DESC_RATEVHT1SS_MCS4: ret_rate = MGN_VHT1SS_MCS4; break;
+ case DESC_RATEVHT1SS_MCS5: ret_rate = MGN_VHT1SS_MCS5; break;
+ case DESC_RATEVHT1SS_MCS6: ret_rate = MGN_VHT1SS_MCS6; break;
+ case DESC_RATEVHT1SS_MCS7: ret_rate = MGN_VHT1SS_MCS7; break;
+ case DESC_RATEVHT1SS_MCS8: ret_rate = MGN_VHT1SS_MCS8; break;
+ case DESC_RATEVHT1SS_MCS9: ret_rate = MGN_VHT1SS_MCS9; break;
+ case DESC_RATEVHT2SS_MCS0: ret_rate = MGN_VHT2SS_MCS0; break;
+ case DESC_RATEVHT2SS_MCS1: ret_rate = MGN_VHT2SS_MCS1; break;
+ case DESC_RATEVHT2SS_MCS2: ret_rate = MGN_VHT2SS_MCS2; break;
+ case DESC_RATEVHT2SS_MCS3: ret_rate = MGN_VHT2SS_MCS3; break;
+ case DESC_RATEVHT2SS_MCS4: ret_rate = MGN_VHT2SS_MCS4; break;
+ case DESC_RATEVHT2SS_MCS5: ret_rate = MGN_VHT2SS_MCS5; break;
+ case DESC_RATEVHT2SS_MCS6: ret_rate = MGN_VHT2SS_MCS6; break;
+ case DESC_RATEVHT2SS_MCS7: ret_rate = MGN_VHT2SS_MCS7; break;
+ case DESC_RATEVHT2SS_MCS8: ret_rate = MGN_VHT2SS_MCS8; break;
+ case DESC_RATEVHT2SS_MCS9: ret_rate = MGN_VHT2SS_MCS9; break;
+
+ default:
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("HwRateToMRate8812(): Non supported Rate [%x]!!!\n",rate ));
+ break;
+ }
+ return ret_rate;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: odm_TxPwrTrackSetPwr88E()
+ *
+ * Overview: 88E change all channel tx power accordign to flag.
+ * OFDM & CCK are all different.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 04/23/2012 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
+ enum pwr_track_control_method method, u8 rf_path, u8 channel_mapped_index)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 final_bb_swing_idx[2];
+ u8 pwr_tracking_limit = 26; /*+1.0dB*/
+ u8 tx_rate = 0xFF;
+ s8 final_ofdm_swing_index = 0;
+
+ if(rtldm->tx_rate != 0xFF)
+ tx_rate = rtl8812ae_hw_rate_to_mrate(hw, rtldm->tx_rate);
+
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("===>rtl8812ae_dm_txpwr_track_set_pwr\n"));
+
+ if(tx_rate != 0xFF) { /*20130429 Mimic Modify High Rate BBSwing Limit.*/
+ /*CCK*/
+ if((tx_rate >= MGN_1M) && (tx_rate <= MGN_11M))
+ pwr_tracking_limit = 32; /*+4dB*/
+ /*OFDM*/
+ else if((tx_rate >= MGN_6M) && (tx_rate <= MGN_48M))
+ pwr_tracking_limit = 30; /*+3dB*/
+ else if(tx_rate == MGN_54M)
+ pwr_tracking_limit = 28; /*+2dB*/
+ /*HT*/
+ else if((tx_rate >= MGN_MCS0) && (tx_rate <= MGN_MCS2)) /*QPSK/BPSK*/
+ pwr_tracking_limit = 34; /*+5dB*/
+ else if((tx_rate >= MGN_MCS3) && (tx_rate <= MGN_MCS4)) /*16QAM*/
+ pwr_tracking_limit = 30; /*+3dB*/
+ else if((tx_rate >= MGN_MCS5) && (tx_rate <= MGN_MCS7)) /*64QAM*/
+ pwr_tracking_limit = 28; /*+2dB*/
+
+ else if((tx_rate >= MGN_MCS8) && (tx_rate <= MGN_MCS10)) /*QPSK/BPSK*/
+ pwr_tracking_limit = 34; /*+5dB*/
+ else if((tx_rate >= MGN_MCS11) && (tx_rate <= MGN_MCS12)) /*16QAM*/
+ pwr_tracking_limit = 30; /*+3dB*/
+ else if((tx_rate >= MGN_MCS13) && (tx_rate <= MGN_MCS15)) /*64QAM*/
+ pwr_tracking_limit = 28; /*+2dB*/
+
+ /*2 VHT*/
+ else if((tx_rate >= MGN_VHT1SS_MCS0) && (tx_rate <= MGN_VHT1SS_MCS2)) /*QPSK/BPSK*/
+ pwr_tracking_limit = 34; /*+5dB*/
+ else if((tx_rate >= MGN_VHT1SS_MCS3) && (tx_rate <= MGN_VHT1SS_MCS4)) /*16QAM*/
+ pwr_tracking_limit = 30; /*+3dB*/
+ else if((tx_rate >= MGN_VHT1SS_MCS5)&&(tx_rate <= MGN_VHT1SS_MCS6)) /*64QAM*/
+ pwr_tracking_limit = 28; /*+2dB*/
+ else if(tx_rate == MGN_VHT1SS_MCS7) /*64QAM*/
+ pwr_tracking_limit = 26; /*+1dB*/
+ else if(tx_rate == MGN_VHT1SS_MCS8) /*256QAM*/
+ pwr_tracking_limit = 24; /*+0dB*/
+ else if(tx_rate == MGN_VHT1SS_MCS9) /*256QAM*/
+ pwr_tracking_limit = 22; /*-1dB*/
+
+ else if((tx_rate >= MGN_VHT2SS_MCS0)&&(tx_rate <= MGN_VHT2SS_MCS2)) /*QPSK/BPSK*/
+ pwr_tracking_limit = 34; /*+5dB*/
+ else if((tx_rate >= MGN_VHT2SS_MCS3)&&(tx_rate <= MGN_VHT2SS_MCS4)) /*16QAM*/
+ pwr_tracking_limit = 30; /*+3dB*/
+ else if((tx_rate >= MGN_VHT2SS_MCS5)&&(tx_rate <= MGN_VHT2SS_MCS6)) /*64QAM*/
+ pwr_tracking_limit = 28; /*+2dB*/
+ else if(tx_rate == MGN_VHT2SS_MCS7) /*64QAM*/
+ pwr_tracking_limit = 26; /*+1dB*/
+ else if(tx_rate == MGN_VHT2SS_MCS8) /*256QAM*/
+ pwr_tracking_limit = 24; /*+0dB*/
+ else if(tx_rate == MGN_VHT2SS_MCS9) /*256QAM*/
+ pwr_tracking_limit = 22; /*-1dB*/
+ else
+ pwr_tracking_limit = 24;
+ }
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("TxRate=0x%x, PwrTrackingLimit=%d\n", tx_rate, pwr_tracking_limit));
+
+
+ if (method == BBSWING) {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("===>rtl8812ae_dm_txpwr_track_set_pwr\n"));
+
+ if (rf_path == RF90_PATH_A) {
+ final_bb_swing_idx[RF90_PATH_A] =
+ (rtldm->ofdm_index[RF90_PATH_A] > pwr_tracking_limit) ?
+ pwr_tracking_limit : rtldm->ofdm_index[RF90_PATH_A];
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_A]=%d, \
+ pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_A]=%d\n",
+ rtldm->ofdm_index[RF90_PATH_A], final_bb_swing_idx[RF90_PATH_A]));
+
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_bb_swing_idx[RF90_PATH_A]]);
+ } else {
+ final_bb_swing_idx[RF90_PATH_B] =
+ rtldm->ofdm_index[RF90_PATH_B] > pwr_tracking_limit ? \
+ pwr_tracking_limit : rtldm->ofdm_index[RF90_PATH_B];
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_B]=%d, \
+ pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_B]=%d\n",
+ rtldm->ofdm_index[RF90_PATH_B], final_bb_swing_idx[RF90_PATH_B]));
+
+ rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_bb_swing_idx[RF90_PATH_B]]);
+ }
+ } else if (method == MIX_MODE) {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("pDM_Odm->DefaultOfdmIndex=%d, \
+ pDM_Odm->Aboslute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n",
+ rtldm->default_ofdm_index, rtldm->aboslute_ofdm_swing_idx[rf_path],
+ rf_path ));
+
+
+ final_ofdm_swing_index = rtldm->default_ofdm_index + rtldm->aboslute_ofdm_swing_idx[rf_path];
+
+ if (rf_path == RF90_PATH_A) {
+ if(final_ofdm_swing_index > pwr_tracking_limit) { /*BBSwing higher then Limit*/
+
+ rtldm->remnant_cck_idx = final_ofdm_swing_index - pwr_tracking_limit;
+ /* CCK Follow the same compensate value as Path A*/
+ rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index - pwr_tracking_limit;
+
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[pwr_tracking_limit]);
+
+ rtldm->modify_txagc_flag_path_a = true;
+
+ /*Set TxAGC Page C{};*/
+ rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_A Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d \n",
+ pwr_tracking_limit, rtldm->remnant_ofdm_swing_idx[rf_path]));
+ } else if (final_ofdm_swing_index < 0) {
+ rtldm->remnant_cck_idx = final_ofdm_swing_index;
+ /* CCK Follow the same compensate value as Path A*/
+ rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index;
+
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[0]);
+
+ rtldm->modify_txagc_flag_path_a = true;
+
+ /*Set TxAGC Page C{};*/
+ rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_A Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d \n",
+ rtldm->remnant_ofdm_swing_idx[rf_path]));
+ } else {
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_ofdm_swing_index]);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_A Compensate with BBSwing , Final_OFDM_Swing_Index = %d \n",
+ final_ofdm_swing_index));
+
+ if(rtldm->modify_txagc_flag_path_a) { /*If TxAGC has changed, reset TxAGC again*/
+ rtldm->remnant_cck_idx = 0;
+ rtldm->remnant_ofdm_swing_idx[rf_path] = 0;
+
+ /*Set TxAGC Page C{};*/
+ rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A);
+
+ rtldm->modify_txagc_flag_path_a = false;
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_A pDM_Odm->Modify_TxAGC_Flag = FALSE \n"));
+ }
+ }
+ }
+
+ if (rf_path == RF90_PATH_B) {
+ if(final_ofdm_swing_index > pwr_tracking_limit) { /*BBSwing higher then Limit*/
+ rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index - pwr_tracking_limit;
+
+ rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[pwr_tracking_limit]);
+
+ rtldm->modify_txagc_flag_path_b = true;
+
+ /*Set TxAGC Page E{};*/
+ rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_B);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_B Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d \n",
+ pwr_tracking_limit, rtldm->remnant_ofdm_swing_idx[rf_path]));
+ } else if (final_ofdm_swing_index < 0) {
+ rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index;
+
+ rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[0]);
+
+ rtldm->modify_txagc_flag_path_b = true;
+
+ /*Set TxAGC Page E{};*/
+ rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_B);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_B Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d \n",
+ rtldm->remnant_ofdm_swing_idx[rf_path] ));
+ } else {
+ rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_ofdm_swing_index]);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_B Compensate with BBSwing , Final_OFDM_Swing_Index = %d \n",
+ final_ofdm_swing_index));
+
+ if(rtldm->modify_txagc_flag_path_b) { /*If TxAGC has changed, reset TxAGC again*/
+ rtldm->remnant_ofdm_swing_idx[rf_path] = 0;
+
+ /*Set TxAGC Page E{};*/
+ rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_B);
+
+ rtldm->modify_txagc_flag_path_b = false;
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_B pDM_Odm->Modify_TxAGC_Flag = FALSE \n"));
+ }
+ }
+ }
+
+ } else {
+ return;
+ }
+}
+
+void rtl8812ae_dm_txpower_tracking_callback_thermalmeter
+ (struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ u8 thermal_value = 0, delta, delta_lck, delta_iqk, p = 0, i = 0;
+ u8 thermal_value_avg_count = 0;
+ u32 thermal_value_avg = 0;
+
+ u8 ofdm_min_index = 6; /*OFDM BB Swing should be less than +3.0dB, which is required by Arthur*/
+ u8 index_for_channel = 0; /* GetRightChnlPlaceforIQK(pHalData->CurrentChannel)*/
+
+ /* 1. The following TWO tables decide the final index of OFDM/CCK swing table.*/
+ u8 *delta_swing_table_idx_tup_a;
+ u8 *delta_swing_table_idx_tdown_a;
+ u8 *delta_swing_table_idx_tup_b;
+ u8 *delta_swing_table_idx_tdown_b;
+
+ /*2. Initilization ( 7 steps in total )*/
+ rtl8812ae_get_delta_swing_table(hw, (u8**)&delta_swing_table_idx_tup_a,
+ (u8**)&delta_swing_table_idx_tdown_a,
+ (u8**)&delta_swing_table_idx_tup_b,
+ (u8**)&delta_swing_table_idx_tdown_b);
+
+ rtldm->btxpower_trackinginit = true;
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("===>rtl8812ae_dm_txpower_tracking_callback_thermalmeter, \
+ \n pDM_Odm->BbSwingIdxCckBase: %d, pDM_Odm->BbSwingIdxOfdmBase[A]:\
+ %d, pDM_Odm->DefaultOfdmIndex: %d\n",
+ rtldm->bb_swing_idx_cck_base,
+ rtldm->bb_swing_idx_ofdm_base[RF90_PATH_A],
+ rtldm->default_ofdm_index));
+
+ thermal_value = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER_8812A, 0xfc00); /*0x42: RF Reg[15:10] 88E*/
+ if( ! rtldm->txpower_track_control || rtlefuse->eeprom_thermalmeter == 0 ||
+ rtlefuse->eeprom_thermalmeter == 0xFF)
+ return;
+
+
+ /* 3. Initialize ThermalValues of RFCalibrateInfo*/
+
+ if(rtlhal->reloadtxpowerindex)
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("reload ofdm index for band switch\n"));
+ }
+
+ /*4. Calculate average thermal meter*/
+ rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermal_value;
+ rtldm->thermalvalue_avg_index++;
+ if(rtldm->thermalvalue_avg_index == AVG_THERMAL_NUM_8812A)
+ /*Average times = c.AverageThermalNum*/
+ rtldm->thermalvalue_avg_index = 0;
+
+ for(i = 0; i < AVG_THERMAL_NUM_8812A; i++)
+ {
+ if(rtldm->thermalvalue_avg[i])
+ {
+ thermal_value_avg += rtldm->thermalvalue_avg[i];
+ thermal_value_avg_count++;
+ }
+ }
+
+ if(thermal_value_avg_count) /*Calculate Average ThermalValue after average enough times*/
+ {
+ thermal_value = (u8)(thermal_value_avg / thermal_value_avg_count);
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter));
+ }
+
+ /*5. Calculate delta, delta_LCK, delta_IQK.*/
+ /*"delta" here is used to determine whether thermal value changes or not.*/
+ delta = (thermal_value > rtldm->thermalvalue) ? \
+ (thermal_value - rtldm->thermalvalue): \
+ (rtldm->thermalvalue - thermal_value);
+ delta_lck = (thermal_value > rtldm->thermalvalue_lck) ? \
+ (thermal_value - rtldm->thermalvalue_lck) : \
+ (rtldm->thermalvalue_lck - thermal_value);
+ delta_iqk = (thermal_value > rtldm->thermalvalue_iqk) ? \
+ (thermal_value - rtldm->thermalvalue_iqk) : \
+ (rtldm->thermalvalue_iqk - thermal_value);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n",
+ delta, delta_lck, delta_iqk));
+
+ /* 6. If necessary, do LCK. */
+
+ if (delta_lck >= IQK_THRESHOLD) /*Delta temperature is equal to or larger than 20 centigrade.*/
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("delta_LCK(%d) >= Threshold_IQK(%d)\n",
+ delta_lck, IQK_THRESHOLD));
+ rtldm->thermalvalue_lck = thermal_value;
+ rtl8812ae_phy_lccalibrate(hw);
+ }
+
+ /*7. If necessary, move the index of swing table to adjust Tx power.*/
+
+ if (delta > 0 && rtldm->txpower_track_control)
+ {
+ /*"delta" here is used to record the absolute value of differrence.*/
+ delta = thermal_value > rtlefuse->eeprom_thermalmeter ? \
+ (thermal_value - rtlefuse->eeprom_thermalmeter) : \
+ (rtlefuse->eeprom_thermalmeter - thermal_value);
+
+ if (delta >= TXPWR_TRACK_TABLE_SIZE)
+ delta = TXPWR_TRACK_TABLE_SIZE - 1;
+
+ /*7.1 The Final Power Index = BaseIndex + PowerIndexOffset*/
+
+ if(thermal_value > rtlefuse->eeprom_thermalmeter) {
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("delta_swing_table_idx_tup_a[%d] = %d\n",
+ delta, delta_swing_table_idx_tup_a[delta]));
+ rtldm->delta_power_index_last[RF90_PATH_A] = rtldm->delta_power_index[RF90_PATH_A];
+ rtldm->delta_power_index[RF90_PATH_A] = delta_swing_table_idx_tup_a[delta];
+
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A] = delta_swing_table_idx_tup_a[delta];
+ /*Record delta swing for mix mode power tracking*/
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Temp is higher and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A]));
+
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("delta_swing_table_idx_tup_b[%d] = %d\n",
+ delta, delta_swing_table_idx_tup_b[delta]));
+ rtldm->delta_power_index_last[RF90_PATH_B] = rtldm->delta_power_index[RF90_PATH_B];
+ rtldm->delta_power_index[RF90_PATH_B] = delta_swing_table_idx_tup_b[delta];
+
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_B] = delta_swing_table_idx_tup_b[delta];
+ /*Record delta swing for mix mode power tracking*/
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Temp is higher and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n",
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_B]));
+
+ } else {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("delta_swing_table_idx_tdown_a[%d] = %d\n",
+ delta, delta_swing_table_idx_tdown_a[delta]));
+
+ rtldm->delta_power_index_last[RF90_PATH_A] = rtldm->delta_power_index[RF90_PATH_A];
+ rtldm->delta_power_index[RF90_PATH_A] = -1 * delta_swing_table_idx_tdown_a[delta];
+
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A] = -1 * delta_swing_table_idx_tdown_a[delta];
+ /* Record delta swing for mix mode power tracking*/
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Temp is lower and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A]));
+
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("deltaSwingTableIdx_TDOWN_B[%d] = %d\n",
+ delta, delta_swing_table_idx_tdown_b[delta]));
+
+ rtldm->delta_power_index_last[RF90_PATH_B] = rtldm->delta_power_index[RF90_PATH_B];
+ rtldm->delta_power_index[RF90_PATH_B] = -1 * delta_swing_table_idx_tdown_b[delta];
+
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_B] = -1 * delta_swing_table_idx_tdown_b[delta];
+ /*Record delta swing for mix mode power tracking*/
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Temp is lower and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n",
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_B]));
+ }
+
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("\n\n================================ [Path-%c] \
+ Calculating PowerIndexOffset ================================\n",
+ (p == RF90_PATH_A ? 'A' : 'B')));
+
+ if (rtldm->delta_power_index[p] == rtldm->delta_power_index_last[p])
+ /*If Thermal value changes but lookup table value still the same*/
+ rtldm->power_index_offset[p] = 0;
+ else
+ rtldm->power_index_offset[p] =
+ rtldm->delta_power_index[p] - rtldm->delta_power_index_last[p];
+ /*Power Index Diff between 2 times Power Tracking*/
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("[Path-%c] PowerIndexOffset(%d) = DeltaPowerIndex(%d) - DeltaPowerIndexLast(%d)\n",
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->power_index_offset[p],
+ rtldm->delta_power_index[p] ,
+ rtldm->delta_power_index_last[p]));
+
+ rtldm->ofdm_index[p] =
+ rtldm->bb_swing_idx_ofdm_base[p] + rtldm->power_index_offset[p];
+ rtldm->cck_index =
+ rtldm->bb_swing_idx_cck_base + rtldm->power_index_offset[p];
+
+ rtldm->bb_swing_idx_cck = rtldm->cck_index;
+ rtldm->bb_swing_idx_ofdm[p] = rtldm->ofdm_index[p];
+
+ /*************Print BB Swing Base and Index Offset*************/
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n",
+ rtldm->bb_swing_idx_cck,
+ rtldm->bb_swing_idx_cck_base,
+ rtldm->power_index_offset[p]));
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n",
+ rtldm->bb_swing_idx_ofdm[p],
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->bb_swing_idx_ofdm_base[p],
+ rtldm->power_index_offset[p]));
+
+ /*7.1 Handle boundary conditions of index.*/
+
+
+ if(rtldm->ofdm_index[p] > TXSCALE_TABLE_SIZE -1)
+ {
+ rtldm->ofdm_index[p] = TXSCALE_TABLE_SIZE -1;
+ }
+ else if (rtldm->ofdm_index[p] < ofdm_min_index)
+ {
+ rtldm->ofdm_index[p] = ofdm_min_index;
+ }
+ }
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("\n\n======================================================\
+ ==================================================\n"));
+ if(rtldm->cck_index > TXSCALE_TABLE_SIZE -1)
+ rtldm->cck_index = TXSCALE_TABLE_SIZE -1;
+ else if (rtldm->cck_index < 0)
+ rtldm->cck_index = 0;
+ } else {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("The thermal meter is unchanged or TxPowerTracking OFF(%d): \
+ ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n",
+ rtldm->txpower_track_control,
+ thermal_value,
+ rtldm->thermalvalue));
+
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+ rtldm->power_index_offset[p] = 0;
+ }
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("TxPowerTracking: [CCK] Swing Current Index: %d, Swing Base Index: %d\n",
+ rtldm->cck_index, rtldm->bb_swing_idx_cck_base)); /*Print Swing base & current*/
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("TxPowerTracking: [OFDM] Swing Current Index: %d, Swing Base Index[%c]: %d\n",
+ rtldm->ofdm_index[p],
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->bb_swing_idx_ofdm_base[p]));
+ }
+
+ if ((rtldm->power_index_offset[RF90_PATH_A] != 0 ||
+ rtldm->power_index_offset[RF90_PATH_B] != 0 ) &&
+ rtldm->txpower_track_control)
+ {
+ /*7.2 Configure the Swing Table to adjust Tx Power.*/
+ /*Always TRUE after Tx Power is adjusted by power tracking.*/
+ /*
+ 2012/04/23 MH According to Luke's suggestion, we can not write BB digital
+ to increase TX power. Otherwise, EVM will be bad.
+
+ 2012/04/25 MH Add for tx power tracking to set tx power in tx agc for 88E.
+ */
+ if (thermal_value > rtldm->thermalvalue)
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature Increasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_A],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue));
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature Increasing(B): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_B],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue));
+
+ } else if (thermal_value < rtldm->thermalvalue) { /*Low temperature*/
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_A],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue));
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature Decreasing(B): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_B],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue));
+ }
+
+ if (thermal_value > rtlefuse->eeprom_thermalmeter) {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature(%d) higher than PG value(%d)\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter));
+
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("**********Enter POWER Tracking MIX_MODE**********\n"));
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+ rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, p, 0);
+
+ } else {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature(%d) lower than PG value(%d)\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter));
+
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("**********Enter POWER Tracking MIX_MODE**********\n"));
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+ rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, p, index_for_channel);
+
+ }
+
+ rtldm->bb_swing_idx_cck_base = rtldm->bb_swing_idx_cck; /*Record last time Power Tracking result as base.*/
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+ rtldm->bb_swing_idx_ofdm_base[p] = rtldm->bb_swing_idx_ofdm[p];
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
+ rtldm->thermalvalue, thermal_value));
+
+ rtldm->thermalvalue = thermal_value; /*Record last Power Tracking Thermal Value*/
+
+ }
+ /*Delta temperature is equal to or larger than 20 centigrade (When threshold is 8).*/
+ if ((delta_iqk >= IQK_THRESHOLD)) {
+
+ if ( !rtlphy->b_iqk_in_progress) {
+
+ spin_lock(&rtlpriv->locks.iqk_lock);
+ rtlphy->b_iqk_in_progress = true;
+ spin_unlock(&rtlpriv->locks.iqk_lock);
+
+ rtl8812ae_do_iqk(hw, delta_iqk, thermal_value, 8);
+
+ spin_lock(&rtlpriv->locks.iqk_lock);
+ rtlphy->b_iqk_in_progress = false;
+ spin_unlock(&rtlpriv->locks.iqk_lock);
+ }
+ }
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("<===rtl8812ae_dm_txpower_tracking_callback_thermalmeter\n"));
+}
+
+
+void rtl8821ae_get_delta_swing_table(
+ struct ieee80211_hw *hw,
+ u8 **temperature_up_a,
+ u8 **temperature_down_a,
+ u8 **temperature_up_b,
+ u8 **temperature_down_b
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ u8 channel = rtlphy->current_channel;
+ u8 rate = rtldm->tx_rate;
+
+
+ if ( 1 <= channel && channel <= 14) {
+ if (RX_HAL_IS_CCK_RATE(rate)) {
+ *temperature_up_a = rtldm->delta_swing_table_idx_24gccka_p;
+ *temperature_down_a = rtldm->delta_swing_table_idx_24gccka_n;
+ *temperature_up_b = rtldm->delta_swing_table_idx_24gcckb_p;
+ *temperature_down_b = rtldm->delta_swing_table_idx_24gcckb_n;
+ } else {
+ *temperature_up_a = rtldm->delta_swing_table_idx_24ga_p;
+ *temperature_down_a = rtldm->delta_swing_table_idx_24ga_n;
+ *temperature_up_b = rtldm->delta_swing_table_idx_24gb_p;
+ *temperature_down_b = rtldm->delta_swing_table_idx_24gb_n;
+ }
+ } else if ( 36 <= channel && channel <= 64) {
+ *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[0];
+ *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[0];
+ *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[0];
+ *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[0];
+ } else if ( 100 <= channel && channel <= 140) {
+ *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[1];
+ *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[1];
+ *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[1];
+ *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[1];
+ } else if ( 149 <= channel && channel <= 173) {
+ *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[2];
+ *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[2];
+ *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[2];
+ *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[2];
+ } else {
+ *temperature_up_a = (u8*)rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack;
+ *temperature_down_a =(u8*)rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack;
+ *temperature_up_b = (u8*)rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack;
+ *temperature_down_b = (u8*)rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack;
+ }
+
+ return;
+}
+
+void rtl8821ae_phy_lccalibrate(
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("===> rtl8812ae_phy_lccalibrate\n"));
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("<=== rtl8812ae_phy_lccalibrate\n"));
+
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: odm_TxPwrTrackSetPwr88E()
+ *
+ * Overview: 88E change all channel tx power accordign to flag.
+ * OFDM & CCK are all different.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 04/23/2012 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
+ enum pwr_track_control_method method, u8 rf_path, u8 channel_mapped_index)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 final_bb_swing_idx[1];
+ u8 pwr_tracking_limit = 26; /*+1.0dB*/
+ u8 tx_rate = 0xFF;
+ s8 final_ofdm_swing_index = 0;
+
+ if(rtldm->tx_rate != 0xFF)
+ tx_rate = rtl8812ae_hw_rate_to_mrate(hw, rtldm->tx_rate);
+
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("===>rtl8812ae_dm_txpwr_track_set_pwr\n"));
+
+ if(tx_rate != 0xFF) { /*20130429 Mimic Modify High Rate BBSwing Limit.*/
+ /*CCK*/
+ if((tx_rate >= MGN_1M) && (tx_rate <= MGN_11M))
+ pwr_tracking_limit = 32; /*+4dB*/
+ /*OFDM*/
+ else if((tx_rate >= MGN_6M) && (tx_rate <= MGN_48M))
+ pwr_tracking_limit = 30; /*+3dB*/
+ else if(tx_rate == MGN_54M)
+ pwr_tracking_limit = 28; /*+2dB*/
+ /*HT*/
+ else if((tx_rate >= MGN_MCS0) && (tx_rate <= MGN_MCS2)) /*QPSK/BPSK*/
+ pwr_tracking_limit = 34; /*+5dB*/
+ else if((tx_rate >= MGN_MCS3) && (tx_rate <= MGN_MCS4)) /*16QAM*/
+ pwr_tracking_limit = 30; /*+3dB*/
+ else if((tx_rate >= MGN_MCS5) && (tx_rate <= MGN_MCS7)) /*64QAM*/
+ pwr_tracking_limit = 28; /*+2dB*/
+#if 0
+ else if((tx_rate >= MGN_MCS8) && (tx_rate <= MGN_MCS10)) /*QPSK/BPSK*/
+ pwr_tracking_limit = 34; /*+5dB*/
+ else if((tx_rate >= MGN_MCS11) && (tx_rate <= MGN_MCS12)) /*16QAM*/
+ pwr_tracking_limit = 30; /*+3dB*/
+ else if((tx_rate >= MGN_MCS13) && (tx_rate <= MGN_MCS15)) /*64QAM*/
+ pwr_tracking_limit = 28; /*+2dB*/
+#endif
+ /*2 VHT*/
+ else if((tx_rate >= MGN_VHT1SS_MCS0) && (tx_rate <= MGN_VHT1SS_MCS2)) /*QPSK/BPSK*/
+ pwr_tracking_limit = 34; /*+5dB*/
+ else if((tx_rate >= MGN_VHT1SS_MCS3) && (tx_rate <= MGN_VHT1SS_MCS4)) /*16QAM*/
+ pwr_tracking_limit = 30; /*+3dB*/
+ else if((tx_rate >= MGN_VHT1SS_MCS5)&&(tx_rate <= MGN_VHT1SS_MCS6)) /*64QAM*/
+ pwr_tracking_limit = 28; /*+2dB*/
+ else if(tx_rate == MGN_VHT1SS_MCS7) /*64QAM*/
+ pwr_tracking_limit = 26; /*+1dB*/
+ else if(tx_rate == MGN_VHT1SS_MCS8) /*256QAM*/
+ pwr_tracking_limit = 24; /*+0dB*/
+ else if(tx_rate == MGN_VHT1SS_MCS9) /*256QAM*/
+ pwr_tracking_limit = 22; /*-1dB*/
+ else
+ pwr_tracking_limit = 24;
+ }
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("TxRate=0x%x, PwrTrackingLimit=%d\n", tx_rate, pwr_tracking_limit));
+
+
+ if (method == BBSWING) {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("===>rtl8812ae_dm_txpwr_track_set_pwr\n"));
+
+ if (rf_path == RF90_PATH_A) {
+ final_bb_swing_idx[RF90_PATH_A] =
+ (rtldm->ofdm_index[RF90_PATH_A] > pwr_tracking_limit) ?
+ pwr_tracking_limit : rtldm->ofdm_index[RF90_PATH_A];
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_A]=%d, \
+ pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_A]=%d\n",
+ rtldm->ofdm_index[RF90_PATH_A], final_bb_swing_idx[RF90_PATH_A]));
+
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_bb_swing_idx[RF90_PATH_A]]);
+ }
+ } else if (method == MIX_MODE) {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("pDM_Odm->DefaultOfdmIndex=%d, \
+ pDM_Odm->Aboslute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n",
+ rtldm->default_ofdm_index, rtldm->aboslute_ofdm_swing_idx[rf_path],
+ rf_path ));
+
+
+ final_ofdm_swing_index = rtldm->default_ofdm_index + rtldm->aboslute_ofdm_swing_idx[rf_path];
+
+ if (rf_path == RF90_PATH_A) {
+ if(final_ofdm_swing_index > pwr_tracking_limit) { /*BBSwing higher then Limit*/
+
+ rtldm->remnant_cck_idx = final_ofdm_swing_index - pwr_tracking_limit;
+ /* CCK Follow the same compensate value as Path A*/
+ rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index - pwr_tracking_limit;
+
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[pwr_tracking_limit]);
+
+ rtldm->modify_txagc_flag_path_a = true;
+
+ /*Set TxAGC Page C{};*/
+ rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_A Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d \n",
+ pwr_tracking_limit, rtldm->remnant_ofdm_swing_idx[rf_path]));
+ } else if (final_ofdm_swing_index < 0) {
+ rtldm->remnant_cck_idx = final_ofdm_swing_index;
+ /* CCK Follow the same compensate value as Path A*/
+ rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index;
+
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[0]);
+
+ rtldm->modify_txagc_flag_path_a = true;
+
+ /*Set TxAGC Page C{};*/
+ rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_A Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d \n",
+ rtldm->remnant_ofdm_swing_idx[rf_path]));
+ } else {
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_ofdm_swing_index]);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_A Compensate with BBSwing , Final_OFDM_Swing_Index = %d \n",
+ final_ofdm_swing_index));
+
+ if(rtldm->modify_txagc_flag_path_a) { /*If TxAGC has changed, reset TxAGC again*/
+ rtldm->remnant_cck_idx = 0;
+ rtldm->remnant_ofdm_swing_idx[rf_path] = 0;
+
+ /*Set TxAGC Page C{};*/
+ rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A);
+
+ rtldm->modify_txagc_flag_path_a = false;
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_A pDM_Odm->Modify_TxAGC_Flag = FALSE \n"));
+ }
+ }
+ }
+
+ } else {
+ return;
+ }
+}
+
+
+void rtl8821ae_dm_txpower_tracking_callback_thermalmeter
+ (struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ u8 thermal_value = 0, delta, delta_lck, delta_iqk, p = 0, i = 0;
+ u8 thermal_value_avg_count = 0;
+ u32 thermal_value_avg = 0;
+
+ u8 ofdm_min_index = 6; /*OFDM BB Swing should be less than +3.0dB, which is required by Arthur*/
+ u8 index_for_channel = 0; /* GetRightChnlPlaceforIQK(pHalData->CurrentChannel)*/
+
+ /* 1. The following TWO tables decide the final index of OFDM/CCK swing table.*/
+ u8 *delta_swing_table_idx_tup_a;
+ u8 *delta_swing_table_idx_tdown_a;
+ u8 *delta_swing_table_idx_tup_b;
+ u8 *delta_swing_table_idx_tdown_b;
+
+ /*2. Initilization ( 7 steps in total )*/
+ rtl8821ae_get_delta_swing_table(hw, (u8**)&delta_swing_table_idx_tup_a,
+ (u8**)&delta_swing_table_idx_tdown_a,
+ (u8**)&delta_swing_table_idx_tup_b,
+ (u8**)&delta_swing_table_idx_tdown_b);
+
+ rtldm->btxpower_trackinginit = true;
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("===>rtl8812ae_dm_txpower_tracking_callback_thermalmeter, \
+ \n pDM_Odm->BbSwingIdxCckBase: %d, pDM_Odm->BbSwingIdxOfdmBase[A]:\
+ %d, pDM_Odm->DefaultOfdmIndex: %d\n",
+ rtldm->bb_swing_idx_cck_base,
+ rtldm->bb_swing_idx_ofdm_base[RF90_PATH_A],
+ rtldm->default_ofdm_index));
+
+ thermal_value = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER_8812A, 0xfc00); /*0x42: RF Reg[15:10] 88E*/
+ if( ! rtldm->txpower_track_control || rtlefuse->eeprom_thermalmeter == 0 ||
+ rtlefuse->eeprom_thermalmeter == 0xFF)
+ return;
+
+
+ /* 3. Initialize ThermalValues of RFCalibrateInfo*/
+
+ if(rtlhal->reloadtxpowerindex)
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("reload ofdm index for band switch\n"));
+ }
+
+ /*4. Calculate average thermal meter*/
+ rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermal_value;
+ rtldm->thermalvalue_avg_index++;
+ if(rtldm->thermalvalue_avg_index == AVG_THERMAL_NUM_8812A)
+ /*Average times = c.AverageThermalNum*/
+ rtldm->thermalvalue_avg_index = 0;
+
+ for(i = 0; i < AVG_THERMAL_NUM_8812A; i++)
+ {
+ if(rtldm->thermalvalue_avg[i])
+ {
+ thermal_value_avg += rtldm->thermalvalue_avg[i];
+ thermal_value_avg_count++;
+ }
+ }
+
+ if(thermal_value_avg_count) /*Calculate Average ThermalValue after average enough times*/
+ {
+ thermal_value = (u8)(thermal_value_avg / thermal_value_avg_count);
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter));
+ }
+
+ /*5. Calculate delta, delta_LCK, delta_IQK.*/
+ /*"delta" here is used to determine whether thermal value changes or not.*/
+ delta = (thermal_value > rtldm->thermalvalue) ? \
+ (thermal_value - rtldm->thermalvalue): \
+ (rtldm->thermalvalue - thermal_value);
+ delta_lck = (thermal_value > rtldm->thermalvalue_lck) ? \
+ (thermal_value - rtldm->thermalvalue_lck) : \
+ (rtldm->thermalvalue_lck - thermal_value);
+ delta_iqk = (thermal_value > rtldm->thermalvalue_iqk) ? \
+ (thermal_value - rtldm->thermalvalue_iqk) : \
+ (rtldm->thermalvalue_iqk - thermal_value);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n",
+ delta, delta_lck, delta_iqk));
+
+ /* 6. If necessary, do LCK. */
+
+ if (delta_lck >= IQK_THRESHOLD) /*Delta temperature is equal to or larger than 20 centigrade.*/
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("delta_LCK(%d) >= Threshold_IQK(%d)\n",
+ delta_lck, IQK_THRESHOLD));
+ rtldm->thermalvalue_lck = thermal_value;
+ rtl8821ae_phy_lccalibrate(hw);
+ }
+
+ /*7. If necessary, move the index of swing table to adjust Tx power.*/
+
+ if (delta > 0 && rtldm->txpower_track_control)
+ {
+ /*"delta" here is used to record the absolute value of differrence.*/
+ delta = thermal_value > rtlefuse->eeprom_thermalmeter ? \
+ (thermal_value - rtlefuse->eeprom_thermalmeter) : \
+ (rtlefuse->eeprom_thermalmeter - thermal_value);
+
+ if (delta >= TXSCALE_TABLE_SIZE)
+ delta = TXSCALE_TABLE_SIZE - 1;
+
+ /*7.1 The Final Power Index = BaseIndex + PowerIndexOffset*/
+
+ if(thermal_value > rtlefuse->eeprom_thermalmeter) {
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("delta_swing_table_idx_tup_a[%d] = %d\n",
+ delta, delta_swing_table_idx_tup_a[delta]));
+ rtldm->delta_power_index_last[RF90_PATH_A] = rtldm->delta_power_index[RF90_PATH_A];
+ rtldm->delta_power_index[RF90_PATH_A] = delta_swing_table_idx_tup_a[delta];
+
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A] = delta_swing_table_idx_tup_a[delta];
+ /*Record delta swing for mix mode power tracking*/
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Temp is higher and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A]));
+
+ } else {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("delta_swing_table_idx_tdown_a[%d] = %d\n",
+ delta, delta_swing_table_idx_tdown_a[delta]));
+
+ rtldm->delta_power_index_last[RF90_PATH_A] = rtldm->delta_power_index[RF90_PATH_A];
+ rtldm->delta_power_index[RF90_PATH_A] = -1 * delta_swing_table_idx_tdown_a[delta];
+
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A] = -1 * delta_swing_table_idx_tdown_a[delta];
+ /* Record delta swing for mix mode power tracking*/
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Temp is lower and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A]));
+ }
+
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("\n\n================================ [Path-%c] \
+ Calculating PowerIndexOffset ================================\n",
+ (p == RF90_PATH_A ? 'A' : 'B')));
+
+ if (rtldm->delta_power_index[p] == rtldm->delta_power_index_last[p])
+ /*If Thermal value changes but lookup table value still the same*/
+ rtldm->power_index_offset[p] = 0;
+ else
+ rtldm->power_index_offset[p] =
+ rtldm->delta_power_index[p] - rtldm->delta_power_index_last[p];
+ /*Power Index Diff between 2 times Power Tracking*/
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("[Path-%c] PowerIndexOffset(%d) = DeltaPowerIndex(%d) - DeltaPowerIndexLast(%d)\n",
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->power_index_offset[p],
+ rtldm->delta_power_index[p] ,
+ rtldm->delta_power_index_last[p]));
+
+ rtldm->ofdm_index[p] =
+ rtldm->bb_swing_idx_ofdm_base[p] + rtldm->power_index_offset[p];
+ rtldm->cck_index =
+ rtldm->bb_swing_idx_cck_base + rtldm->power_index_offset[p];
+
+ rtldm->bb_swing_idx_cck = rtldm->cck_index;
+ rtldm->bb_swing_idx_ofdm[p] = rtldm->ofdm_index[p];
+
+ /*************Print BB Swing Base and Index Offset*************/
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n",
+ rtldm->bb_swing_idx_cck,
+ rtldm->bb_swing_idx_cck_base,
+ rtldm->power_index_offset[p]));
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n",
+ rtldm->bb_swing_idx_ofdm[p],
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->bb_swing_idx_ofdm_base[p],
+ rtldm->power_index_offset[p]));
+
+ /*7.1 Handle boundary conditions of index.*/
+
+
+ if(rtldm->ofdm_index[p] > TXSCALE_TABLE_SIZE -1)
+ {
+ rtldm->ofdm_index[p] = TXSCALE_TABLE_SIZE -1;
+ }
+ else if (rtldm->ofdm_index[p] < ofdm_min_index)
+ {
+ rtldm->ofdm_index[p] = ofdm_min_index;
+ }
+ }
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("\n\n======================================================\
+ ==================================================\n"));
+ if(rtldm->cck_index > TXSCALE_TABLE_SIZE -1)
+ rtldm->cck_index = TXSCALE_TABLE_SIZE -1;
+ else if (rtldm->cck_index < 0)
+ rtldm->cck_index = 0;
+ } else {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("The thermal meter is unchanged or TxPowerTracking OFF(%d): \
+ ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n",
+ rtldm->txpower_track_control,
+ thermal_value,
+ rtldm->thermalvalue));
+
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+ rtldm->power_index_offset[p] = 0;
+ }
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("TxPowerTracking: [CCK] Swing Current Index: %d, Swing Base Index: %d\n",
+ rtldm->cck_index, rtldm->bb_swing_idx_cck_base)); /*Print Swing base & current*/
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("TxPowerTracking: [OFDM] Swing Current Index: %d, Swing Base Index[%c]: %d\n",
+ rtldm->ofdm_index[p],
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->bb_swing_idx_ofdm_base[p]));
+ }
+
+ if ((rtldm->power_index_offset[RF90_PATH_A] != 0 ||
+ rtldm->power_index_offset[RF90_PATH_B] != 0 ) &&
+ rtldm->txpower_track_control)
+ {
+ /*7.2 Configure the Swing Table to adjust Tx Power.*/
+ /*Always TRUE after Tx Power is adjusted by power tracking.*/
+ /*
+ 2012/04/23 MH According to Luke's suggestion, we can not write BB digital
+ to increase TX power. Otherwise, EVM will be bad.
+
+ 2012/04/25 MH Add for tx power tracking to set tx power in tx agc for 88E.
+ */
+ if (thermal_value > rtldm->thermalvalue)
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature Increasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_A],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue));
+ } else if (thermal_value < rtldm->thermalvalue) { /*Low temperature*/
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_A],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue));
+ }
+
+ if (thermal_value > rtlefuse->eeprom_thermalmeter) {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature(%d) higher than PG value(%d)\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter));
+
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("**********Enter POWER Tracking MIX_MODE**********\n"));
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+ rtl8821ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, p, index_for_channel);
+
+ } else {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature(%d) lower than PG value(%d)\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter));
+
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("**********Enter POWER Tracking MIX_MODE**********\n"));
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+ rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, p, index_for_channel);
+
+ }
+
+ rtldm->bb_swing_idx_cck_base = rtldm->bb_swing_idx_cck; /*Record last time Power Tracking result as base.*/
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+ rtldm->bb_swing_idx_ofdm_base[p] = rtldm->bb_swing_idx_ofdm[p];
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
+ rtldm->thermalvalue, thermal_value));
+
+ rtldm->thermalvalue = thermal_value; /*Record last Power Tracking Thermal Value*/
+
+ }
+ /*Delta temperature is equal to or larger than 20 centigrade (When threshold is 8).*/
+ if ((delta_iqk >= IQK_THRESHOLD)) {
+
+ if ( !rtlphy->b_iqk_in_progress) {
+
+ spin_lock(&rtlpriv->locks.iqk_lock);
+ rtlphy->b_iqk_in_progress = true;
+ spin_unlock(&rtlpriv->locks.iqk_lock);
+
+ rtl8821ae_do_iqk(hw, delta_iqk, thermal_value, 8);
+
+ spin_lock(&rtlpriv->locks.iqk_lock);
+ rtlphy->b_iqk_in_progress = false;
+ spin_unlock(&rtlpriv->locks.iqk_lock);
+ }
+ }
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("<===rtl8812ae_dm_txpower_tracking_callback_thermalmeter\n"));
+}
+
+
+void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ static u8 tm_trigger = 0;
+
+ //if (!rtlpriv->dm.btxpower_tracking)
+ // return;
+
+ if (!tm_trigger) {
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER_88E, BIT(17)|BIT(16),
+ 0x03);
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Trigger 8821ae Thermal Meter!!\n"));
+ tm_trigger = 1;
+ return;
+ } else {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Schedule TxPowerTracking !!\n"));
+
+ rtl8821ae_dm_txpower_tracking_callback_thermalmeter(hw);
+ tm_trigger = 0;
+ }
+}
+
+
+void rtl8821ae_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rate_adaptive *p_ra = &(rtlpriv->ra);
+ u32 low_rssithresh_for_ra = p_ra->low2high_rssi_thresh_for_ra;
+ u32 high_rssithresh_for_ra = p_ra->high_rssi_thresh_for_ra;
+ u8 go_up_gap = 5;
+ struct ieee80211_sta *sta = NULL;
+
+ if (is_hal_stop(rtlhal)) {
+ RT_TRACE(COMP_RATE, DBG_LOUD,
+ ("driver is going to unload\n"));
+ return;
+ }
+
+ if (!rtlpriv->dm.b_useramask) {
+ RT_TRACE(COMP_RATE, DBG_LOUD,
+ ("driver does not control rate adaptive mask\n"));
+ return;
+ }
+
+ if (mac->link_state == MAC80211_LINKED &&
+ mac->opmode == NL80211_IFTYPE_STATION) {
+
+ switch (p_ra->pre_ratr_state) {
+ case DM_RATR_STA_MIDDLE:
+ high_rssithresh_for_ra += go_up_gap;
+ break;
+ case DM_RATR_STA_LOW:
+ high_rssithresh_for_ra += go_up_gap;
+ low_rssithresh_for_ra += go_up_gap;
+ break;
+ default:
+ break;
+ }
+
+ if (rtlpriv->dm.undecorated_smoothed_pwdb >
+ (long)high_rssithresh_for_ra)
+ p_ra->ratr_state = DM_RATR_STA_HIGH;
+ else if (rtlpriv->dm.undecorated_smoothed_pwdb >
+ (long)low_rssithresh_for_ra)
+ p_ra->ratr_state = DM_RATR_STA_MIDDLE;
+ else
+ p_ra->ratr_state = DM_RATR_STA_LOW;
+
+ if (p_ra->pre_ratr_state != p_ra->ratr_state ) {
+ RT_TRACE(COMP_RATE, DBG_LOUD,
+ ("RSSI = %ld\n",
+ rtlpriv->dm.undecorated_smoothed_pwdb));
+ RT_TRACE(COMP_RATE, DBG_LOUD,
+ ("RSSI_LEVEL = %d\n", p_ra->ratr_state));
+ RT_TRACE(COMP_RATE, DBG_LOUD,
+ ("PreState = %d, CurState = %d\n",
+ p_ra->pre_ratr_state, p_ra->ratr_state));
+
+ rcu_read_lock();
+ sta = rtl_find_sta(hw, mac->bssid);
+ if (sta)
+ rtlpriv->cfg->ops->update_rate_tbl(hw, sta, p_ra->ratr_state);
+ rcu_read_unlock();
+
+ p_ra->pre_ratr_state = p_ra->ratr_state;
+ }
+ }
+}
+
+bool rtl8821ae_dm_is_edca_turbo_disable(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->btcoexist.btc_ops->btc_is_disable_edca_turbo(rtlpriv))
+ return true;
+ if (rtlpriv->mac80211.mode == WIRELESS_MODE_B)
+ return true;
+
+ return false;
+}
+
+void rtl8821ae_dm_edca_choose_traffic_idx(
+ struct ieee80211_hw *hw, u64 cur_tx_bytes, u64 cur_rx_bytes, bool b_bias_on_rx,
+ bool *pb_is_cur_rdl_state)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if(b_bias_on_rx)
+ {
+ if (cur_tx_bytes > (cur_rx_bytes*4)) {
+ *pb_is_cur_rdl_state = false;
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("Uplink Traffic\n "));
+ } else {
+ *pb_is_cur_rdl_state = true;
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("Balance Traffic\n"));
+ }
+ } else {
+ if (cur_rx_bytes > (cur_tx_bytes*4)) {
+ *pb_is_cur_rdl_state = true;
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("Downlink Traffic\n"));
+ } else {
+ *pb_is_cur_rdl_state = false;
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("Balance Traffic\n"));
+ }
+ }
+ return ;
+}
+
+static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+
+ /*Keep past Tx/Rx packet count for RT-to-RT EDCA turbo.*/
+ unsigned long cur_tx_ok_cnt = 0;
+ unsigned long cur_rx_ok_cnt = 0;
+ u32 edca_be_ul = 0x5ea42b;
+ u32 edca_be_dl = 0x5ea42b;
+ u32 edca_be = 0x5ea42b;
+ u8 iot_peer = 0;
+ bool *pb_is_cur_rdl_state = NULL;
+ bool b_last_is_cur_rdl_state = false;
+ bool b_bias_on_rx = false;
+ bool b_edca_turbo_on = false;
+
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("rtl8821ae_dm_check_edca_turbo=====>"));
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("Orginial BE PARAM: 0x%x\n",
+ rtl_read_dword(rtlpriv, DM_REG_EDCA_BE_11N)));
+
+ /*===============================
+ list paramter for different platform
+ ===============================*/
+ b_last_is_cur_rdl_state = rtlpriv->dm.bis_cur_rdlstate;
+ pb_is_cur_rdl_state = &( rtlpriv->dm.bis_cur_rdlstate);
+
+ cur_tx_ok_cnt = rtlpriv->stats.txbytesunicast - rtldm->last_tx_ok_cnt;
+ cur_rx_ok_cnt = rtlpriv->stats.rxbytesunicast - rtldm->last_rx_ok_cnt;
+
+ rtldm->last_tx_ok_cnt = rtlpriv->stats.txbytesunicast;
+ rtldm->last_rx_ok_cnt = rtlpriv->stats.rxbytesunicast;
+
+ iot_peer = rtlpriv->mac80211.vendor;
+ b_bias_on_rx = (iot_peer == PEER_RAL || iot_peer == PEER_ATH) ?
+ true : false;
+ b_edca_turbo_on = ((!rtlpriv->dm.bis_any_nonbepkts) &&
+ (!rtlpriv->dm.b_disable_framebursting)) ?
+ true : false;
+
+ /*if (rtl8821ae_dm_is_edca_turbo_disable(hw))
+ goto dm_CheckEdcaTurbo_EXIT;*/
+
+ if ((iot_peer == PEER_CISCO) && (mac->mode == WIRELESS_MODE_N_24G))
+ {
+ edca_be_dl = edca_setting_dl[iot_peer];
+ edca_be_ul = edca_setting_ul[iot_peer];
+ }
+
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("bIsAnyNonBEPkts : 0x%x bDisableFrameBursting : 0x%x \n",
+ rtlpriv->dm.bis_any_nonbepkts, rtlpriv->dm.b_disable_framebursting));
+
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("bEdcaTurboOn : 0x%x bBiasOnRx : 0x%x\n",
+ b_edca_turbo_on, b_bias_on_rx));
+
+ if (b_edca_turbo_on) {
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("curTxOkCnt : 0x%lx \n",cur_tx_ok_cnt));
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("curRxOkCnt : 0x%lx \n",cur_rx_ok_cnt));
+ if(b_bias_on_rx)
+ rtl8821ae_dm_edca_choose_traffic_idx(hw, cur_tx_ok_cnt,
+ cur_rx_ok_cnt, true, pb_is_cur_rdl_state);
+ else
+ rtl8821ae_dm_edca_choose_traffic_idx(hw, cur_tx_ok_cnt,
+ cur_rx_ok_cnt, false, pb_is_cur_rdl_state);
+
+ edca_be = ((*pb_is_cur_rdl_state) == true) ? edca_be_dl : edca_be_ul;
+
+ rtl_write_dword(rtlpriv, DM_REG_EDCA_BE_11N, edca_be);
+
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("EDCA Turbo on: EDCA_BE:0x%x\n", edca_be));
+
+ rtlpriv->dm.bcurrent_turbo_edca = true;
+
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("EDCA_BE_DL : 0x%x EDCA_BE_UL : 0x%x EDCA_BE : 0x%x \n",
+ edca_be_dl, edca_be_ul, edca_be));
+ } else {
+ if (rtlpriv->dm.bcurrent_turbo_edca) {
+ u8 tmp = AC0_BE;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+ (u8 *) (&tmp));
+ }
+ rtlpriv->dm.bcurrent_turbo_edca = false;
+ }
+
+/* dm_CheckEdcaTurbo_EXIT: */
+ rtlpriv->dm.bis_any_nonbepkts = false;
+ rtldm->last_tx_ok_cnt = rtlpriv->stats.txbytesunicast;
+ rtldm->last_rx_ok_cnt = rtlpriv->stats.rxbytesunicast;
+}
+
+static void rtl8821ae_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 cur_cck_cca_thresh;
+
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+ /*dm_digtable.rssi_val_min = rtl8821ae_dm_initial_gain_min_pwdb(hw);*/
+ if (dm_digtable.rssi_val_min > 25)
+ cur_cck_cca_thresh = 0xcd;
+ else if ((dm_digtable.rssi_val_min <= 25) && (dm_digtable.rssi_val_min > 10))
+ cur_cck_cca_thresh = 0x83;
+ else {
+ if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000)
+ cur_cck_cca_thresh = 0x83;
+ else
+ cur_cck_cca_thresh = 0x40;
+ }
+
+ } else {
+ if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000)
+ cur_cck_cca_thresh = 0x83;
+ else
+ cur_cck_cca_thresh = 0x40;
+ }
+
+ if (dm_digtable.cur_cck_cca_thres != cur_cck_cca_thresh) {
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, cur_cck_cca_thresh);
+ }
+
+ dm_digtable.pre_cck_cca_thres = dm_digtable.cur_cck_cca_thres;
+ dm_digtable.cur_cck_cca_thres = cur_cck_cca_thresh;
+ RT_TRACE(COMP_DIG, DBG_TRACE,
+ ("CCK cca thresh hold =%x\n", dm_digtable.cur_cck_cca_thres));
+
+}
+
+void rtl8821ae_dm_dynamic_edcca(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ bool b_fw_current_in_ps_mode = false;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw,HW_VAR_FW_PSMODE_STATUS, \
+ (u8*)(&b_fw_current_in_ps_mode));
+ if (b_fw_current_in_ps_mode)
+ return;
+}
+
+void rtl8812ae_dm_update_txpath(struct ieee80211_hw *hw, u8 path)
+{
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtldm->resp_tx_path != path) {
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("Need to Update Tx Path\n"));
+ if (path == RF90_PATH_A) {
+ /*Tx by Reg*/
+ rtl_set_bbreg(hw, 0x80c, 0xFFF0, 0x111);
+ /*Resp Tx by Txinfo*/
+ rtl_set_bbreg(hw, 0x6d8, BIT(7) | BIT(6), 1);
+ } else {
+ /*Tx by Reg*/
+ rtl_set_bbreg(hw, 0x80c, 0xFFF0, 0x222);
+ /*Resp Tx by Txinfo*/
+ rtl_set_bbreg(hw, 0x6d8, BIT(7) |BIT(6), 2);
+ }
+ }
+ rtldm->resp_tx_path = path;
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("Path=%s\n",(path == RF90_PATH_A) ? \
+ "RF90_PATH_A":"RF90_PATH_A"));
+}
+
+void rtl8812ae_dm_path_diversity_init(struct ieee80211_hw *hw)
+{
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+
+ //rtl_set_bbreg(hw, 0x80c , BIT(29), 1); /*Tx path from Reg*/
+ rtl_set_bbreg(hw, 0x80c , 0xFFF0, 0x111); /*Tx by Reg*/
+ rtl_set_bbreg(hw, 0x6d8 , BIT(7) | BIT(6), 1); /*Resp Tx by Txinfo*/
+ rtl8812ae_dm_update_txpath(hw, RF90_PATH_A);
+
+ rtldm->path_sel = 1; /* TxInfo default at path-A*/
+}
+
+void rtl812ae_dm_set_txpath_by_txinfo(struct ieee80211_hw *hw,
+ u8 *pdesc)
+{
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+
+ SET_TX_DESC_TX_ANT(pdesc, rtldm->path_sel);
+}
+
+void rtl8812ae_dm_path_statistics(struct ieee80211_hw *hw,
+ u32 rssi_a, u32 rssi_b)
+{
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+
+ rtldm->patha_sum += rssi_a;
+ rtldm->patha_cnt ++;
+
+ rtldm->pathb_sum += rssi_b;
+ rtldm->pathb_cnt ++;
+}
+
+void rtl8812ae_dm_path_diversity(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u32 rssi_avg_a = 0;
+ u32 rssi_avg_b = 0;
+ u32 local_min_rssi = 0;
+ u32 min_rssi = 0xFF;
+ u8 tx_resp_path=0, target_path;
+ struct ieee80211_sta *sta = NULL;
+
+ sta = rtl_find_sta(hw, mac->bssid);
+ if (sta) {
+ /*Caculate RSSI per Path*/
+ rssi_avg_a = (rtldm->patha_cnt != 0) ? \
+ (rtldm->patha_sum / rtldm->patha_cnt) : 0;
+ rssi_avg_b = (rtldm->pathb_cnt != 0) ? \
+ (rtldm->pathb_sum / rtldm->pathb_cnt) : 0;
+
+ target_path = (rssi_avg_a == rssi_avg_b) ? rtldm->resp_tx_path : \
+ ((rssi_avg_a>=rssi_avg_b) ? RF90_PATH_A : RF90_PATH_B);
+
+ RT_TRACE(COMP_DIG, DBG_TRACE, \
+ ("assoc_id=%d, PathA_Sum=%d, PathA_Cnt=%d\n", \
+ mac->assoc_id, rtldm->patha_sum, rtldm->patha_cnt));
+ RT_TRACE(COMP_DIG, DBG_TRACE, \
+ ("assoc_id=%d, PathB_Sum=%d, PathB_Cnt=%d\n", \
+ mac->assoc_id, rtldm->pathb_sum, rtldm->pathb_cnt));
+ RT_TRACE(COMP_DIG, DBG_TRACE, \
+ ("assoc_id=%d, RssiAvgA= %d, RssiAvgB= %d\n", \
+ mac->assoc_id, rssi_avg_a, rssi_avg_b));
+
+ /*Select Resp Tx Path*/
+ local_min_rssi = (rssi_avg_a > rssi_avg_b) ? rssi_avg_b : rssi_avg_a;
+ if(local_min_rssi < min_rssi)
+ {
+ min_rssi = local_min_rssi;
+ tx_resp_path = target_path;
+ }
+
+ /*Select Tx DESC*/
+ if(target_path == RF90_PATH_A)
+ rtldm->path_sel = 1;
+ else
+ rtldm->path_sel = 2;
+
+ RT_TRACE(COMP_DIG, DBG_TRACE, \
+ ("Tx from TxInfo, TargetPath=%s\n", \
+ (target_path==RF90_PATH_A) ? \
+ "ODM_RF_PATH_A":"ODM_RF_PATH_B"));
+ RT_TRACE(COMP_DIG, DBG_TRACE, \
+ ("pDM_PathDiv->PathSel= %d\n", \
+ rtldm->path_sel));
+ }
+ rtldm->patha_cnt = 0;
+ rtldm->patha_sum = 0;
+ rtldm->pathb_cnt = 0;
+ rtldm->pathb_sum = 0;
+}
+
+void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ u8 crystal_cap;
+ u32 packet_count;
+ int cfo_khz_a,cfo_khz_b,cfo_ave = 0, adjust_xtal = 0;
+ int cfo_ave_diff;
+
+ if (rtlpriv->mac80211.link_state < MAC80211_LINKED){
+ /*1.Enable ATC*/
+ if (rtldm->atc_status == ATC_STATUS_OFF)
+ {
+ rtl_set_bbreg(hw, RFC_AREA, BIT(14), ATC_STATUS_ON);
+ rtldm->atc_status = ATC_STATUS_ON;
+ }
+
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch(): No link!!\n"));
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch(): atc_status = %d\n", \
+ rtldm->atc_status));
+
+ if (rtldm->crystal_cap != rtlpriv->efuse.crystalcap)
+ {
+ rtldm->crystal_cap = rtlpriv->efuse.crystalcap;
+ crystal_cap = rtldm->crystal_cap & 0x3f;
+ crystal_cap = crystal_cap & 0x3f;
+ rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, \
+ 0x7ff80000, (crystal_cap | (crystal_cap << 6)));
+ }
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch(): crystal_cap = 0x%x\n", \
+ rtldm->crystal_cap));
+ }else{
+ /*1. Calculate CFO for path-A & path-B*/
+ cfo_khz_a = (int)(rtldm->cfo_tail[0] * 3125) / 1280;
+ cfo_khz_b = (int)(rtldm->cfo_tail[1] * 3125) / 1280;
+ packet_count = rtldm->packet_count;
+
+ /*2.No new packet*/
+ if (packet_count == rtldm->packet_count_pre) {
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch(): packet counter doesn't change\n"));
+ return;
+ }
+
+ rtldm->packet_count_pre = packet_count;
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch(): packet counter = %d\n", \
+ rtldm->packet_count));
+
+ /*3.Average CFO*/
+ if (rtlpriv->phy.rf_type == RF_1T1R)
+ cfo_ave = cfo_khz_a;
+ else
+ cfo_ave = (cfo_khz_a + cfo_khz_b) >> 1;
+
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch():"
+ "cfo_khz_a = %dkHz, cfo_khz_b = %dkHz, cfo_ave = %dkHz\n",
+ cfo_khz_a, cfo_khz_b, cfo_ave));
+
+ /*4.Avoid abnormal large CFO*/
+ cfo_ave_diff = (rtldm->cfo_ave_pre >= cfo_ave)?
+ (rtldm->cfo_ave_pre - cfo_ave):
+ (cfo_ave - rtldm->cfo_ave_pre);
+
+ if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0){
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch(): first large CFO hit\n"));
+ rtldm->large_cfo_hit = 1;
+ return;
+ }
+ else
+ rtldm->large_cfo_hit = 0;
+
+ rtldm->cfo_ave_pre = cfo_ave;
+
+ /*CFO tracking by adjusting Xtal cap.*/
+
+ /*1.Dynamic Xtal threshold*/
+ if (cfo_ave >= -rtldm->cfo_threshold &&
+ cfo_ave <= rtldm->cfo_threshold &&
+ rtldm->is_freeze == 0){
+ if (rtldm->cfo_threshold == CFO_THRESHOLD_XTAL){
+ rtldm->cfo_threshold = CFO_THRESHOLD_XTAL + 10;
+ rtldm->is_freeze = 1;
+ }
+ else
+ rtldm->cfo_threshold = CFO_THRESHOLD_XTAL;
+ }
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch(): Dynamic threshold = %d\n", \
+ rtldm->cfo_threshold));
+
+ /* 2.Calculate Xtal offset*/
+ if (cfo_ave > rtldm->cfo_threshold && rtldm->crystal_cap < 0x3f)
+ adjust_xtal = ((cfo_ave - CFO_THRESHOLD_XTAL) >> 2) + 1;
+ else if ((cfo_ave < -rtlpriv->dm.cfo_threshold) && rtlpriv->dm.crystal_cap > 0)
+ adjust_xtal = ((cfo_ave + CFO_THRESHOLD_XTAL) >> 2) - 1;
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch(): "
+ "Crystal cap = 0x%x, Crystal cap offset = %d\n",
+ rtldm->crystal_cap, adjust_xtal));
+
+ /*3.Adjudt Crystal Cap.*/
+ if (adjust_xtal != 0){
+ rtldm->is_freeze = 0;
+ rtldm->crystal_cap += adjust_xtal;
+
+ if (rtldm->crystal_cap > 0x3f)
+ rtldm->crystal_cap = 0x3f;
+ else if (rtldm->crystal_cap < 0)
+ rtldm->crystal_cap = 0;
+
+ crystal_cap = rtldm->crystal_cap & 0x3f;
+ crystal_cap = crystal_cap & 0x3f;
+ rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, \
+ 0x7ff80000, (crystal_cap | (crystal_cap << 6)));
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch(): New crystal cap = 0x%x \n", \
+ rtldm->crystal_cap));
+ }
+ }
+
+}
+
+void rtl8821ae_dm_watchdog(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool b_fw_current_inpsmode = false;
+ bool b_fw_ps_awake = true;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *) (&b_fw_current_inpsmode));
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
+ (u8 *) (&b_fw_ps_awake));
+
+ if(ppsc->p2p_ps_info.p2p_ps_mode)
+ b_fw_ps_awake = false;
+
+ if((ppsc->rfpwr_state == ERFON) &&
+ ((!b_fw_current_inpsmode) && b_fw_ps_awake) &&
+ (!ppsc->rfchange_inprogress)) {
+ rtl8821ae_dm_common_info_self_update(hw);
+ rtl8821ae_dm_false_alarm_counter_statistics(hw);
+ rtl8821ae_dm_check_rssi_monitor(hw);
+ rtl8821ae_dm_dig(hw);
+ rtl8821ae_dm_dynamic_edcca(hw);
+ rtl8821ae_dm_cck_packet_detection_thresh(hw);
+ rtl8821ae_dm_refresh_rate_adaptive_mask(hw);
+ rtl8821ae_dm_check_edca_turbo(hw);
+ rtl8821ae_dm_dynamic_atc_switch(hw);
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtl8812ae_dm_check_txpower_tracking_thermalmeter(hw);
+ else
+ rtl8821ae_dm_check_txpower_tracking_thermalmeter(hw);
+ rtl8821ae_dm_iq_calibrate(hw);
+ if (rtlpriv->cfg->ops->get_btc_status()){
+ rtlpriv->btcoexist.btc_ops->btc_periodical(rtlpriv);
+ }
+ }
+
+ rtlpriv->dm.dbginfo.num_qry_beacon_pkt = 0;
+}
+
+void rtl8821ae_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw,
+ u8 *pdesc, u32 mac_id)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ struct fast_ant_trainning *pfat_table= &(rtldm->fat_table);
+
+ if (rtlhal->hw_type != HARDWARE_TYPE_RTL8812AE)
+ return;
+
+ if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) ||
+ (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV)){
+ SET_TX_DESC_TX_ANT(pdesc, pfat_table->antsel_a[mac_id]);
+ }
+}
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/dm.h b/drivers/staging/rtl8821ae/rtl8821ae/dm.h
new file mode 100644
index 000000000000..ebbff9b6cacf
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/dm.h
@@ -0,0 +1,426 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_DM_H__
+#define __RTL8821AE_DM_H__
+
+#define MAIN_ANT 0
+#define AUX_ANT 1
+#define MAIN_ANT_CG_TRX 1
+#define AUX_ANT_CG_TRX 0
+#define MAIN_ANT_CGCS_RX 0
+#define AUX_ANT_CGCS_RX 1
+
+#define TXSCALE_TABLE_SIZE 37
+
+/*RF REG LIST*/
+#define DM_REG_RF_MODE_11N 0x00
+#define DM_REG_RF_0B_11N 0x0B
+#define DM_REG_CHNBW_11N 0x18
+#define DM_REG_T_METER_11N 0x24
+#define DM_REG_RF_25_11N 0x25
+#define DM_REG_RF_26_11N 0x26
+#define DM_REG_RF_27_11N 0x27
+#define DM_REG_RF_2B_11N 0x2B
+#define DM_REG_RF_2C_11N 0x2C
+#define DM_REG_RXRF_A3_11N 0x3C
+#define DM_REG_T_METER_92D_11N 0x42
+#define DM_REG_T_METER_88E_11N 0x42
+
+
+
+/*BB REG LIST*/
+/*PAGE 8 */
+#define DM_REG_BB_CTRL_11N 0x800
+#define DM_REG_RF_PIN_11N 0x804
+#define DM_REG_PSD_CTRL_11N 0x808
+#define DM_REG_TX_ANT_CTRL_11N 0x80C
+#define DM_REG_BB_PWR_SAV5_11N 0x818
+#define DM_REG_CCK_RPT_FORMAT_11N 0x824
+#define DM_REG_RX_DEFUALT_A_11N 0x858
+#define DM_REG_RX_DEFUALT_B_11N 0x85A
+#define DM_REG_BB_PWR_SAV3_11N 0x85C
+#define DM_REG_ANTSEL_CTRL_11N 0x860
+#define DM_REG_RX_ANT_CTRL_11N 0x864
+#define DM_REG_PIN_CTRL_11N 0x870
+#define DM_REG_BB_PWR_SAV1_11N 0x874
+#define DM_REG_ANTSEL_PATH_11N 0x878
+#define DM_REG_BB_3WIRE_11N 0x88C
+#define DM_REG_SC_CNT_11N 0x8C4
+#define DM_REG_PSD_DATA_11N 0x8B4
+/*PAGE 9*/
+#define DM_REG_ANT_MAPPING1_11N 0x914
+#define DM_REG_ANT_MAPPING2_11N 0x918
+/*PAGE A*/
+#define DM_REG_CCK_ANTDIV_PARA1_11N 0xA00
+#define DM_REG_CCK_CCA_11N 0xA0A
+#define DM_REG_CCK_CCA_11AC 0xA0A
+#define DM_REG_CCK_ANTDIV_PARA2_11N 0xA0C
+#define DM_REG_CCK_ANTDIV_PARA3_11N 0xA10
+#define DM_REG_CCK_ANTDIV_PARA4_11N 0xA14
+#define DM_REG_CCK_FILTER_PARA1_11N 0xA22
+#define DM_REG_CCK_FILTER_PARA2_11N 0xA23
+#define DM_REG_CCK_FILTER_PARA3_11N 0xA24
+#define DM_REG_CCK_FILTER_PARA4_11N 0xA25
+#define DM_REG_CCK_FILTER_PARA5_11N 0xA26
+#define DM_REG_CCK_FILTER_PARA6_11N 0xA27
+#define DM_REG_CCK_FILTER_PARA7_11N 0xA28
+#define DM_REG_CCK_FILTER_PARA8_11N 0xA29
+#define DM_REG_CCK_FA_RST_11N 0xA2C
+#define DM_REG_CCK_FA_MSB_11N 0xA58
+#define DM_REG_CCK_FA_LSB_11N 0xA5C
+#define DM_REG_CCK_CCA_CNT_11N 0xA60
+#define DM_REG_BB_PWR_SAV4_11N 0xA74
+/*PAGE B */
+#define DM_REG_LNA_SWITCH_11N 0xB2C
+#define DM_REG_PATH_SWITCH_11N 0xB30
+#define DM_REG_RSSI_CTRL_11N 0xB38
+#define DM_REG_CONFIG_ANTA_11N 0xB68
+#define DM_REG_RSSI_BT_11N 0xB9C
+/*PAGE C */
+#define DM_REG_OFDM_FA_HOLDC_11N 0xC00
+#define DM_REG_RX_PATH_11N 0xC04
+#define DM_REG_TRMUX_11N 0xC08
+#define DM_REG_OFDM_FA_RSTC_11N 0xC0C
+#define DM_REG_RXIQI_MATRIX_11N 0xC14
+#define DM_REG_TXIQK_MATRIX_LSB1_11N 0xC4C
+#define DM_REG_IGI_A_11N 0xC50
+#define DM_REG_IGI_A_11AC 0xC50
+#define DM_REG_ANTDIV_PARA2_11N 0xC54
+#define DM_REG_IGI_B_11N 0xC58
+#define DM_REG_IGI_B_11AC 0xE50
+#define DM_REG_ANTDIV_PARA3_11N 0xC5C
+#define DM_REG_BB_PWR_SAV2_11N 0xC70
+#define DM_REG_RX_OFF_11N 0xC7C
+#define DM_REG_TXIQK_MATRIXA_11N 0xC80
+#define DM_REG_TXIQK_MATRIXB_11N 0xC88
+#define DM_REG_TXIQK_MATRIXA_LSB2_11N 0xC94
+#define DM_REG_TXIQK_MATRIXB_LSB2_11N 0xC9C
+#define DM_REG_RXIQK_MATRIX_LSB_11N 0xCA0
+#define DM_REG_ANTDIV_PARA1_11N 0xCA4
+#define DM_REG_OFDM_FA_TYPE1_11N 0xCF0
+/*PAGE D */
+#define DM_REG_OFDM_FA_RSTD_11N 0xD00
+#define DM_REG_OFDM_FA_TYPE2_11N 0xDA0
+#define DM_REG_OFDM_FA_TYPE3_11N 0xDA4
+#define DM_REG_OFDM_FA_TYPE4_11N 0xDA8
+/*PAGE E */
+#define DM_REG_TXAGC_A_6_18_11N 0xE00
+#define DM_REG_TXAGC_A_24_54_11N 0xE04
+#define DM_REG_TXAGC_A_1_MCS32_11N 0xE08
+#define DM_REG_TXAGC_A_MCS0_3_11N 0xE10
+#define DM_REG_TXAGC_A_MCS4_7_11N 0xE14
+#define DM_REG_TXAGC_A_MCS8_11_11N 0xE18
+#define DM_REG_TXAGC_A_MCS12_15_11N 0xE1C
+#define DM_REG_FPGA0_IQK_11N 0xE28
+#define DM_REG_TXIQK_TONE_A_11N 0xE30
+#define DM_REG_RXIQK_TONE_A_11N 0xE34
+#define DM_REG_TXIQK_PI_A_11N 0xE38
+#define DM_REG_RXIQK_PI_A_11N 0xE3C
+#define DM_REG_TXIQK_11N 0xE40
+#define DM_REG_RXIQK_11N 0xE44
+#define DM_REG_IQK_AGC_PTS_11N 0xE48
+#define DM_REG_IQK_AGC_RSP_11N 0xE4C
+#define DM_REG_BLUETOOTH_11N 0xE6C
+#define DM_REG_RX_WAIT_CCA_11N 0xE70
+#define DM_REG_TX_CCK_RFON_11N 0xE74
+#define DM_REG_TX_CCK_BBON_11N 0xE78
+#define DM_REG_OFDM_RFON_11N 0xE7C
+#define DM_REG_OFDM_BBON_11N 0xE80
+#define DM_REG_TX2RX_11N 0xE84
+#define DM_REG_TX2TX_11N 0xE88
+#define DM_REG_RX_CCK_11N 0xE8C
+#define DM_REG_RX_OFDM_11N 0xED0
+#define DM_REG_RX_WAIT_RIFS_11N 0xED4
+#define DM_REG_RX2RX_11N 0xED8
+#define DM_REG_STANDBY_11N 0xEDC
+#define DM_REG_SLEEP_11N 0xEE0
+#define DM_REG_PMPD_ANAEN_11N 0xEEC
+
+
+/*MAC REG LIST*/
+#define DM_REG_BB_RST_11N 0x02
+#define DM_REG_ANTSEL_PIN_11N 0x4C
+#define DM_REG_EARLY_MODE_11N 0x4D0
+#define DM_REG_RSSI_MONITOR_11N 0x4FE
+#define DM_REG_EDCA_VO_11N 0x500
+#define DM_REG_EDCA_VI_11N 0x504
+#define DM_REG_EDCA_BE_11N 0x508
+#define DM_REG_EDCA_BK_11N 0x50C
+#define DM_REG_TXPAUSE_11N 0x522
+#define DM_REG_RESP_TX_11N 0x6D8
+#define DM_REG_ANT_TRAIN_PARA1_11N 0x7b0
+#define DM_REG_ANT_TRAIN_PARA2_11N 0x7b4
+
+
+/*DIG Related*/
+#define DM_BIT_IGI_11N 0x0000007F
+#define DM_BIT_IGI_11AC 0xFFFFFFFF
+
+
+
+#define HAL_DM_DIG_DISABLE BIT(0)
+#define HAL_DM_HIPWR_DISABLE BIT(1)
+
+#define OFDM_TABLE_LENGTH 43
+#define CCK_TABLE_LENGTH 33
+
+#define OFDM_TABLE_SIZE 37
+#define CCK_TABLE_SIZE 33
+
+#define BW_AUTO_SWITCH_HIGH_LOW 25
+#define BW_AUTO_SWITCH_LOW_HIGH 30
+
+#define DM_DIG_THRESH_HIGH 40
+#define DM_DIG_THRESH_LOW 35
+
+#define DM_FALSEALARM_THRESH_LOW 400
+#define DM_FALSEALARM_THRESH_HIGH 1000
+
+#define DM_DIG_MAX 0x3e
+#define DM_DIG_MIN 0x1e
+
+#define DM_DIG_MAX_AP 0x32
+#define DM_DIG_MIN_AP 0x20
+
+#define DM_DIG_FA_UPPER 0x3e
+#define DM_DIG_FA_LOWER 0x1e
+#define DM_DIG_FA_TH0 0x200
+#define DM_DIG_FA_TH1 0x300
+#define DM_DIG_FA_TH2 0x400
+
+#define DM_DIG_BACKOFF_MAX 12
+#define DM_DIG_BACKOFF_MIN -4
+#define DM_DIG_BACKOFF_DEFAULT 10
+
+#define RXPATHSELECTION_SS_TH_lOW 30
+#define RXPATHSELECTION_DIFF_TH 18
+
+#define DM_RATR_STA_INIT 0
+#define DM_RATR_STA_HIGH 1
+#define DM_RATR_STA_MIDDLE 2
+#define DM_RATR_STA_LOW 3
+
+#define CTS2SELF_THVAL 30
+#define REGC38_TH 20
+
+#define WAIOTTHVal 25
+
+#define TXHIGHPWRLEVEL_NORMAL 0
+#define TXHIGHPWRLEVEL_LEVEL1 1
+#define TXHIGHPWRLEVEL_LEVEL2 2
+#define TXHIGHPWRLEVEL_BT1 3
+#define TXHIGHPWRLEVEL_BT2 4
+
+#define DM_TYPE_BYFW 0
+#define DM_TYPE_BYDRIVER 1
+
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
+#define TXPWRTRACK_MAX_IDX 6
+
+/* Dynamic ATC switch */
+#define ATC_STATUS_OFF 0x0 /* enable */
+#define ATC_STATUS_ON 0x1 /* disable */
+#define CFO_THRESHOLD_XTAL 10 /* kHz */
+#define CFO_THRESHOLD_ATC 80 /* kHz */
+
+#define AVG_THERMAL_NUM_8812A 4
+#define TXPWR_TRACK_TABLE_SIZE 30
+#define MAX_PATH_NUM_8812A 2
+#define MAX_PATH_NUM_8821A 1
+
+
+struct ps_t {
+ u8 pre_ccastate;
+ u8 cur_ccasate;
+ u8 pre_rfstate;
+ u8 cur_rfstate;
+ u8 initialize;
+ long rssi_val_min;
+
+};
+
+struct dig_t {
+ u8 dig_enable_flag;
+ u8 dig_ext_port_stage;
+ u32 rssi_lowthresh;
+ u32 rssi_highthresh;
+
+ u32 fa_lowthresh;
+ u32 fa_highthresh;
+
+ u8 cursta_connectctate;
+ u8 presta_connectstate;
+ u8 curmultista_connectstate;
+
+ u8 pre_igvalue;
+ u8 cur_igvalue;
+ u8 bt30_cur_igi;
+ u8 backup_igvalue;
+ u8 stop_dig;
+
+ char backoff_val;
+ char backoff_val_range_max;
+ char backoff_val_range_min;
+ u8 rx_gain_range_max;
+ u8 rx_gain_range_min;
+ u8 rssi_val_min;
+
+ u8 pre_cck_cca_thres;
+ u8 cur_cck_cca_thres;
+ u8 pre_cck_pd_state;
+ u8 cur_cck_pd_state;
+
+ u8 large_fa_hit;
+ u8 forbidden_igi;
+ u32 recover_cnt;
+
+ u8 dig_dynamic_min_0;
+ u8 dig_dynamic_min_1;
+ bool b_media_connect_0;
+ bool b_media_connect_1;
+
+ u32 antdiv_rssi_max;
+ u32 rssi_max;
+};
+
+
+enum FAT_STATE {
+ FAT_NORMAL_STATE = 0,
+ FAT_TRAINING_STATE = 1,
+};
+
+enum tag_dynamic_init_gain_operation_type_definition {
+ DIG_TYPE_THRESH_HIGH = 0,
+ DIG_TYPE_THRESH_LOW = 1,
+ DIG_TYPE_BACKOFF = 2,
+ DIG_TYPE_RX_GAIN_MIN = 3,
+ DIG_TYPE_RX_GAIN_MAX = 4,
+ DIG_TYPE_ENABLE = 5,
+ DIG_TYPE_DISABLE = 6,
+ DIG_OP_TYPE_MAX
+};
+
+enum tag_cck_packet_detection_threshold_type_definition {
+ CCK_PD_STAGE_LowRssi = 0,
+ CCK_PD_STAGE_HighRssi = 1,
+ CCK_FA_STAGE_Low = 2,
+ CCK_FA_STAGE_High = 3,
+ CCK_PD_STAGE_MAX = 4,
+};
+
+enum dm_1r_cca_e {
+ CCA_1R = 0,
+ CCA_2R = 1,
+ CCA_MAX = 2,
+};
+
+enum dm_rf_e {
+ RF_SAVE = 0,
+ RF_NORMAL = 1,
+ RF_MAX = 2,
+};
+
+enum dm_sw_ant_switch_e {
+ ANS_ANTENNA_B = 1,
+ ANS_ANTENNA_A = 2,
+ ANS_ANTENNA_MAX = 3,
+};
+
+enum dm_dig_ext_port_alg_e {
+ DIG_EXT_PORT_STAGE_0 = 0,
+ DIG_EXT_PORT_STAGE_1 = 1,
+ DIG_EXT_PORT_STAGE_2 = 2,
+ DIG_EXT_PORT_STAGE_3 = 3,
+ DIG_EXT_PORT_STAGE_MAX = 4,
+};
+
+enum dm_dig_connect_e {
+ DIG_STA_DISCONNECT = 0,
+ DIG_STA_CONNECT = 1,
+ DIG_STA_BEFORE_CONNECT = 2,
+ DIG_MULTISTA_DISCONNECT = 3,
+ DIG_MULTISTA_CONNECT = 4,
+ DIG_CONNECT_MAX
+};
+
+enum pwr_track_control_method {
+ BBSWING,
+ TXAGC,
+ MIX_MODE
+};
+
+#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1)
+#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1)
+#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1)
+#define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1)
+#define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1)
+#define GET_UNDECORATED_AVERAGE_RSSI(_priv) \
+ (((struct rtl_priv *)(_priv))->mac80211.opmode == NL80211_IFTYPE_ADHOC)? \
+ (((struct rtl_priv *)(_priv))->dm.entry_min_undecoratedsmoothed_pwdb): \
+ (((struct rtl_priv *)(_priv))->dm.undecorated_smoothed_pwdb)
+
+extern struct dig_t dm_digtable;
+void rtl8821ae_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw,
+ u8 *pdesc, u32 mac_id);
+void rtl8821ae_dm_ant_sel_statistics(struct ieee80211_hw *hw,
+ u8 antsel_tr_mux, u32 mac_id,
+ u32 rx_pwdb_all);
+void rtl8821ae_dm_fast_antenna_trainning_callback(unsigned long data);
+void rtl8821ae_dm_init(struct ieee80211_hw *hw);
+void rtl8821ae_dm_watchdog(struct ieee80211_hw *hw);
+void rtl8821ae_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi);
+void rtl8821ae_dm_init_edca_turbo(struct ieee80211_hw *hw);
+void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw);
+void rtl8821ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
+void rtl8821ae_dm_txpower_track_adjust(struct ieee80211_hw *hw,
+ u8 type,u8 *pdirection,
+ u32 *poutwrite_val);
+void rtl8821ae_dm_clear_txpower_tracking_state(struct ieee80211_hw *hw);
+void rtl8821ae_dm_write_cck_cca_thres(struct ieee80211_hw *hw, u8 current_cca);
+void rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(struct ieee80211_hw *hw);
+void rtl8812ae_dm_path_diversity(struct ieee80211_hw *hw);
+void rtl8812ae_dm_path_diversity_init(struct ieee80211_hw *hw);
+void rtl8812ae_dm_path_statistics(struct ieee80211_hw *hw,
+ u32 rssi_a, u32 rssi_b);
+void rtl812ae_dm_set_txpath_by_txinfo(struct ieee80211_hw *hw,
+ u8 *pdesc);
+void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
+ enum pwr_track_control_method method,
+ u8 rf_path,
+ u8 channel_mapped_index);
+void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
+ enum pwr_track_control_method method, u8 rf_path, u8 channel_mapped_index);
+
+void rtl8812ae_dm_update_init_rate(struct ieee80211_hw *hw, u8 rate);
+u8 rtl8812ae_hw_rate_to_mrate(struct ieee80211_hw *hw, u8 rate);
+void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw *hw);
+void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw *hw);
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/fw.c b/drivers/staging/rtl8821ae/rtl8821ae/fw.c
new file mode 100644
index 000000000000..4083cab926a3
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/fw.c
@@ -0,0 +1,1349 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "fw.h"
+#include "dm.h"
+
+static void _rtl8821ae_enable_fw_download(struct ieee80211_hw *hw, bool enable)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp;
+
+ if (enable) {
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x05);
+
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
+
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+ //printk("0x80=%02x.\n",tmp);
+ } else {
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+ //printk("0x80=%02x.\n",tmp);
+ }
+
+}
+
+static void _rtl8821ae_fw_block_write(struct ieee80211_hw *hw,
+ const u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 blockSize = sizeof(u32);
+ u8 *bufferPtr = (u8 *) buffer;
+ u32 *pu4BytePtr = (u32 *) buffer;
+ u32 i, offset, blockCount, remainSize;
+
+ blockCount = size / blockSize;
+ remainSize = size % blockSize;
+
+ for (i = 0; i < blockCount; i++) {
+ offset = i * blockSize;
+ rtl_write_dword(rtlpriv, (FW_8821AE_START_ADDRESS + offset),
+ *(pu4BytePtr + i));
+ }
+
+ if (remainSize) {
+ offset = blockCount * blockSize;
+ bufferPtr += offset;
+ for (i = 0; i < remainSize; i++) {
+ rtl_write_byte(rtlpriv, (FW_8821AE_START_ADDRESS +
+ offset + i), *(bufferPtr + i));
+ }
+ }
+}
+
+static void _rtl8821ae_fw_page_write(struct ieee80211_hw *hw,
+ u32 page, const u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 value8;
+ u8 u8page = (u8) (page & 0x07);
+
+ value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
+
+ rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
+ _rtl8821ae_fw_block_write(hw, buffer, size);
+}
+
+static void _rtl8821ae_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
+{
+ u32 fwlen = *pfwlen;
+ u8 remain = (u8) (fwlen % 4);
+
+ remain = (remain == 0) ? 0 : (4 - remain);
+
+ while (remain > 0) {
+ pfwbuf[fwlen] = 0;
+ fwlen++;
+ remain--;
+ }
+
+ *pfwlen = fwlen;
+}
+
+static void _rtl8821ae_write_fw(struct ieee80211_hw *hw,
+ enum version_8821ae version,
+ u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 *bufferPtr = (u8 *) buffer;
+ u32 pageNums, remainSize;
+ u32 page, offset;
+
+ RT_TRACE(COMP_FW, DBG_LOUD, ("FW size is %d bytes,\n", size));
+
+ _rtl8821ae_fill_dummy(bufferPtr, &size);
+
+ pageNums = size / FW_8821AE_PAGE_SIZE;
+ remainSize = size % FW_8821AE_PAGE_SIZE;
+
+ if (pageNums > 8) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Page numbers should not greater then 8\n"));
+ }
+
+ for (page = 0; page < pageNums; page++) {
+ offset = page * FW_8821AE_PAGE_SIZE;
+ _rtl8821ae_fw_page_write(hw, page, (bufferPtr + offset),
+ FW_8821AE_PAGE_SIZE);
+ }
+
+ if (remainSize) {
+ offset = pageNums * FW_8821AE_PAGE_SIZE;
+ page = pageNums;
+ _rtl8821ae_fw_page_write(hw, page, (bufferPtr + offset),
+ remainSize);
+ }
+
+}
+
+static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int err = -EIO;
+ u32 counter = 0;
+ u32 value32;
+
+ do {
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ } while ((counter++ < FW_8821AE_POLLING_TIMEOUT_COUNT) &&
+ (!(value32 & FWDL_CHKSUM_RPT)));
+
+ if (counter >= FW_8821AE_POLLING_TIMEOUT_COUNT) {
+ RT_TRACE(COMP_ERR, DBG_LOUD,
+ ("chksum report faill ! REG_MCUFWDL:0x%08x .\n",
+ value32));
+ goto exit;
+ }
+
+ RT_TRACE(COMP_FW, DBG_EMERG,
+ ("Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32));
+
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ value32 |= MCUFWDL_RDY;
+ value32 &= ~WINTINI_RDY;
+ rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
+
+ rtl8821ae_firmware_selfreset(hw);
+
+ counter = 0;
+ do {
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ if (value32 & WINTINI_RDY) {
+ RT_TRACE(COMP_FW, DBG_LOUD,
+ ("Polling FW ready success!! REG_MCUFWDL:0x%08x .\n",
+ value32));
+ err = 0;
+ goto exit;
+ }
+
+ udelay(FW_8821AE_POLLING_DELAY);
+
+ } while (counter++ < FW_8821AE_POLLING_TIMEOUT_COUNT);
+
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32));
+
+exit:
+ return err;
+}
+
+int rtl8821ae_download_fw(struct ieee80211_hw *hw,
+ bool buse_wake_on_wlan_fw
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl8821a_firmware_header *pfwheader;
+ u8 *pfwdata;
+ u32 fwsize;
+ int err;
+ enum version_8821ae version = rtlhal->version;
+
+ if(!rtlhal->pfirmware)
+ return 1;
+
+ pfwheader = (struct rtl8821a_firmware_header *)rtlhal->pfirmware;
+ pfwdata = (u8 *) rtlhal->pfirmware;
+ fwsize = rtlhal->fwsize;
+ RT_TRACE(COMP_FW, DBG_DMESG,
+ ("normal Firmware SIZE %d \n",fwsize));
+
+ if (IS_FW_HEADER_EXIST_8812(pfwheader) || IS_FW_HEADER_EXIST_8821(pfwheader)) {
+ RT_TRACE(COMP_FW, DBG_DMESG,
+ ("Firmware Version(%d), Signature(%#x),Size(%d)\n",
+ pfwheader->version, pfwheader->signature,
+ (int)sizeof(struct rtl8821a_firmware_header)));
+
+ pfwdata = pfwdata + sizeof(struct rtl8821a_firmware_header);
+ fwsize = fwsize - sizeof(struct rtl8821a_firmware_header);
+ }
+
+ if(rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)){
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
+ rtl8821ae_firmware_selfreset(hw);
+ }
+ _rtl8821ae_enable_fw_download(hw, true);
+ _rtl8821ae_write_fw(hw, version, pfwdata, fwsize);
+ _rtl8821ae_enable_fw_download(hw, false);
+
+ err = _rtl8821ae_fw_free_to_go(hw);
+ if (err) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Firmware is not ready to run!\n"));
+ } else {
+ RT_TRACE(COMP_FW, DBG_LOUD,
+ ("Firmware is ready to run!\n"));
+ }
+
+ return 0;
+}
+
+static bool _rtl8821ae_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 val_hmetfr;
+ bool result = false;
+
+ val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
+ if (((val_hmetfr >> boxnum) & BIT(0)) == 0)
+ result = true;
+ return result;
+}
+
+static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
+ u8 element_id, u32 cmd_len, u8 *p_cmdbuffer)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 boxnum =0;
+ u16 box_reg = 0, box_extreg = 0;
+ u8 u1b_tmp = 0;
+ bool isfw_read = false;
+ u8 buf_index = 0;
+ bool bwrite_sucess = false;
+ u8 wait_h2c_limmit = 100;
+ /*u8 wait_writeh2c_limmit = 100;*/
+ u8 boxcontent[4], boxextcontent[4];
+ u32 h2c_waitcounter = 0;
+ unsigned long flag =0;
+ u8 idx =0;
+
+ RT_TRACE(COMP_CMD, DBG_LOUD, ("come in\n"));
+
+ while (true) {
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+ if (rtlhal->b_h2c_setinprogress) {
+ RT_TRACE(COMP_CMD, DBG_LOUD,
+ ("H2C set in progress! Wait to set.."
+ "element_id(%d).\n", element_id));
+
+ while (rtlhal->b_h2c_setinprogress) {
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
+ flag);
+ h2c_waitcounter++;
+ RT_TRACE(COMP_CMD, DBG_LOUD,
+ ("Wait 100 us (%d times)...\n",
+ h2c_waitcounter));
+ udelay(100);
+
+ if (h2c_waitcounter > 1000)
+ return;
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
+ flag);
+ }
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ } else {
+ rtlhal->b_h2c_setinprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ break;
+ }
+ }
+
+ while (!bwrite_sucess) {
+ /*cosa remove this because never reach this.*/
+#if 0
+ wait_writeh2c_limmit--;
+ if (wait_writeh2c_limmit == 0) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Write H2C fail because no trigger "
+ "for FW INT!\n"));
+ break;
+ }
+#endif
+
+ boxnum = rtlhal->last_hmeboxnum;
+ switch (boxnum) {
+ case 0:
+ box_reg = REG_HMEBOX_0;
+ box_extreg = REG_HMEBOX_EXT_0;
+ break;
+ case 1:
+ box_reg = REG_HMEBOX_1;
+ box_extreg = REG_HMEBOX_EXT_1;
+ break;
+ case 2:
+ box_reg = REG_HMEBOX_2;
+ box_extreg = REG_HMEBOX_EXT_2;
+ break;
+ case 3:
+ box_reg = REG_HMEBOX_3;
+ box_extreg = REG_HMEBOX_EXT_3;
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+
+ isfw_read = false;
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_CR);
+
+ if (u1b_tmp != 0xEA)
+ isfw_read = true;
+ else {
+ if( rtl_read_byte(rtlpriv, REG_TXDMA_STATUS) == 0xEA ||
+ rtl_read_byte(rtlpriv, REG_TXPKT_EMPTY) == 0xEA)
+ rtl_write_byte(rtlpriv, REG_SYS_CFG1 + 3, 0xFF);
+ }
+
+ if (isfw_read == true) {
+ wait_h2c_limmit = 100;
+ isfw_read = _rtl8821ae_check_fw_read_last_h2c(hw, boxnum);
+ while (!isfw_read) {
+ /*wait until Fw read*/
+ wait_h2c_limmit--;
+ if (wait_h2c_limmit == 0) {
+ RT_TRACE(COMP_CMD, DBG_LOUD,
+ ("Wating too long for FW read "
+ "clear HMEBox(%d)!\n", boxnum));
+ break;
+ }
+
+ udelay(10);
+
+ isfw_read = _rtl8821ae_check_fw_read_last_h2c(hw, boxnum);
+ u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
+ RT_TRACE(COMP_CMD, DBG_LOUD,
+ ("Wating for FW read clear HMEBox(%d)!!! "
+ "0x130 = %2x\n", boxnum, u1b_tmp));
+ }
+ }
+
+ if (!isfw_read) {
+ RT_TRACE(COMP_CMD, DBG_LOUD,
+ ("Write H2C register BOX[%d] fail!!!!! "
+ "Fw do not read. \n", boxnum));
+ break;
+ }
+
+ memset(boxcontent, 0, sizeof(boxcontent));
+ memset(boxextcontent, 0, sizeof(boxextcontent));
+ boxcontent[0] = element_id;
+ RT_TRACE(COMP_CMD, DBG_LOUD,
+ ("Write element_id box_reg(%4x) = %2x \n",
+ box_reg, element_id));
+
+ switch (cmd_len) {
+ case 1:
+ case 2:
+ case 3:
+ /*boxcontent[0] &= ~(BIT(7));*/
+ memcpy((u8 *) (boxcontent) + 1,
+ p_cmdbuffer + buf_index, cmd_len);
+
+ for (idx = 0; idx < 4; idx++) {
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ }
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ /*boxcontent[0] |= (BIT(7));*/
+ memcpy((u8 *) (boxextcontent),
+ p_cmdbuffer + buf_index+3, cmd_len-3);
+ memcpy((u8 *) (boxcontent) + 1,
+ p_cmdbuffer + buf_index, 3);
+
+ for (idx = 0; idx < 4; idx++) {
+ rtl_write_byte(rtlpriv, box_extreg + idx,
+ boxextcontent[idx]);
+ }
+
+ for (idx = 0; idx < 4; idx++) {
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ }
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+
+ bwrite_sucess = true;
+
+ rtlhal->last_hmeboxnum = boxnum + 1;
+ if (rtlhal->last_hmeboxnum == 4)
+ rtlhal->last_hmeboxnum = 0;
+
+ RT_TRACE(COMP_CMD, DBG_LOUD,
+ ("pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum));
+ }
+
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+ rtlhal->b_h2c_setinprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+
+ RT_TRACE(COMP_CMD, DBG_LOUD, ("go out\n"));
+}
+
+void rtl8821ae_fill_h2c_cmd(struct ieee80211_hw *hw,
+ u8 element_id, u32 cmd_len, u8 *p_cmdbuffer)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 tmp_cmdbuf[2];
+
+ if (rtlhal->bfw_ready == false) {
+ RT_ASSERT(false, ("return H2C cmd because of Fw "
+ "download fail!!!\n"));
+ return;
+ }
+
+ memset(tmp_cmdbuf, 0, 8);
+ memcpy(tmp_cmdbuf, p_cmdbuffer, cmd_len);
+ _rtl8821ae_fill_h2c_command(hw, element_id, cmd_len, (u8 *) & tmp_cmdbuf);
+
+ return;
+}
+
+void rtl8821ae_firmware_selfreset(struct ieee80211_hw *hw)
+{
+ u8 u1b_tmp;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ {
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp & (~BIT(3))));
+ }else {
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp & (~BIT(0))));
+ }
+
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp & (~BIT(2))));
+ udelay(50);
+
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ {
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp | BIT(3)));
+ }else {
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp | BIT(0)));
+ }
+
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp | BIT(2)));
+
+ RT_TRACE(COMP_INIT, DBG_LOUD, (" _8051Reset8812ae(): 8051 reset success .\n"));
+
+}
+
+void rtl8821ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 u1_h2c_set_pwrmode[H2C_8821AE_PWEMODE_LENGTH] = { 0 };
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ u8 rlbm,power_state = 0;
+ RT_TRACE(COMP_POWER, DBG_LOUD, ("FW LPS mode = %d\n", mode));
+
+ SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
+ rlbm = 0;/*YJ,temp,120316. FW now not support RLBM=2.*/
+ SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm);
+ SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, (rtlpriv->mac80211.p2p) ? ppsc->smart_ps : 1);
+ SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode, ppsc->reg_max_lps_awakeintvl);
+ SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0);
+ if(mode == FW_PS_ACTIVE_MODE)
+ {
+ power_state |= FW_PWR_STATE_ACTIVE;
+ }
+ else
+ {
+ power_state |= FW_PWR_STATE_RF_OFF;
+ }
+ SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state);
+
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode \n",
+ u1_h2c_set_pwrmode, H2C_8821AE_PWEMODE_LENGTH);
+ rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_SETPWRMODE, H2C_8821AE_PWEMODE_LENGTH, u1_h2c_set_pwrmode);
+
+}
+
+void rtl8821ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
+{
+ u8 u1_joinbssrpt_parm[1] = { 0 };
+
+ SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus);
+
+ rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_JOINBSSRPT, 1, u1_joinbssrpt_parm);
+}
+
+void rtl8821ae_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw, u8 ap_offload_enable)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u8 u1_apoffload_parm[H2C_8821AE_AP_OFFLOAD_LENGTH] = { 0 };
+
+ SET_H2CCMD_AP_OFFLOAD_ON(u1_apoffload_parm, ap_offload_enable);
+ SET_H2CCMD_AP_OFFLOAD_HIDDEN(u1_apoffload_parm, mac->bhiddenssid);
+ SET_H2CCMD_AP_OFFLOAD_DENYANY(u1_apoffload_parm, 0);
+
+ rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_AP_OFFLOAD, H2C_8821AE_AP_OFFLOAD_LENGTH, u1_apoffload_parm);
+
+}
+
+static bool _rtl8821ae_cmd_send_packet(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring;
+ struct rtl_tx_desc *pdesc;
+ u8 own;
+ unsigned long flags;
+ struct sk_buff *pskb = NULL;
+
+ ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+ pskb = __skb_dequeue(&ring->queue);
+ if (pskb)
+ kfree_skb(pskb);
+
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+
+ pdesc = &ring->desc[0];
+ own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
+
+ rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
+
+ __skb_queue_tail(&ring->queue, skb);
+
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
+
+ return true;
+}
+
+#define BEACON_PG 0 /* ->1 */
+#define PSPOLL_PG 2
+#define NULL_PG 3
+#define PROBERSP_PG 4 /* ->5 */
+
+#define BEACON_PG_8812 0
+#define PSPOLL_PG_8812 1
+#define NULL_PG_8812 2
+#define PROBERSP_PG_8812 3
+
+#define BEACON_PG_8821 0
+#define PSPOLL_PG_8821 1
+#define NULL_PG_8821 2
+#define PROBERSP_PG_8821 3
+
+#define TOTAL_RESERVED_PKT_LEN_8812 2048
+#define TOTAL_RESERVED_PKT_LEN_8821 1024
+
+
+static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = {
+ /* page 0 */
+ 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0xe0, 0x4c, 0x02, 0xe2, 0x64,
+ 0x40, 0x16, 0x9f, 0x23, 0xd4, 0x46, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x64, 0x00, 0x20, 0x04, 0x00, 0x06, 0x64, 0x6c,
+ 0x69, 0x6e, 0x6b, 0x31, 0x01, 0x08, 0x82, 0x84,
+ 0x8b, 0x96, 0x0c, 0x18, 0x30, 0x48, 0x03, 0x01,
+ 0x0b, 0x06, 0x02, 0x00, 0x00, 0x2a, 0x01, 0x8b,
+ 0x32, 0x04, 0x12, 0x24, 0x60, 0x6c, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x28, 0x8c, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* page 1 */
+ 0xa4, 0x10, 0x01, 0xc0, 0x40, 0x16, 0x9f, 0x23,
+ 0xd4, 0x46, 0x00, 0xe0, 0x4c, 0x02, 0xe2, 0x64,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x28, 0x8c, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* page 2 */
+ 0x48, 0x01, 0x00, 0x00, 0x40, 0x16, 0x9f, 0x23,
+ 0xd4, 0x46, 0x00, 0xe0, 0x4c, 0x02, 0xe2, 0x64,
+ 0x40, 0x16, 0x9f, 0x23, 0xd4, 0x46, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1a, 0x00, 0x28, 0x8c, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* page 3 */
+ 0xc8, 0x01, 0x00, 0x00, 0x40, 0x16, 0x9f, 0x23,
+ 0xd4, 0x46, 0x00, 0xe0, 0x4c, 0x02, 0xe2, 0x64,
+ 0x40, 0x16, 0x9f, 0x23, 0xd4, 0x46, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+
+static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = {
+ 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x02, 0x53, 0xE5,
+ 0xE0, 0x46, 0x9A, 0x57, 0x71, 0x30, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x64, 0x00, 0x30, 0x04, 0x00, 0x0C, 0x4E, 0x45,
+ 0x54, 0x47, 0x45, 0x41, 0x52, 0x5F, 0x31, 0x35,
+ 0x30, 0x4E, 0x01, 0x08, 0x82, 0x84, 0x8B, 0x96,
+ 0x0C, 0x12, 0x18, 0x24, 0x03, 0x01, 0x03, 0x06,
+ 0x02, 0x00, 0x00, 0x2A, 0x01, 0x8A, 0x32, 0x04,
+ 0x30, 0x48, 0x60, 0x6C, 0xDD, 0x18, 0x00, 0x50,
+ 0xF2, 0x01, 0x01, 0x00, 0x00, 0x50, 0xF2, 0x02,
+ 0x01, 0x00, 0x00, 0x50, 0xF2, 0x02, 0x01, 0x00,
+ 0x00, 0x50, 0xF2, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xA4, 0x10, 0x02, 0xC0, 0xE0, 0x46, 0x9A, 0x57,
+ 0x71, 0x30, 0x00, 0xE0, 0x4C, 0x02, 0x53, 0xE5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x48, 0x01, 0x00, 0x00, 0xE0, 0x46, 0x9A, 0x57,
+ 0x71, 0x30, 0x00, 0xE0, 0x4C, 0x02, 0x53, 0xE5,
+ 0xE0, 0x46, 0x9A, 0x57, 0x71, 0x30, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xC8, 0x01, 0x00, 0x00, 0xE0, 0x46, 0x9A, 0x57,
+ 0x71, 0x30, 0x00, 0xE0, 0x4C, 0x02, 0x53, 0xE5,
+ 0xE0, 0x46, 0x9A, 0x57, 0x71, 0x30, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct sk_buff *skb = NULL;
+
+ u32 totalpacketlen;
+ bool rtstatus;
+ u8 u1RsvdPageLoc[5] = { 0 };
+ bool b_dlok = false;
+
+ u8* beacon;
+ u8* p_pspoll;
+ u8* nullfunc;
+ u8* p_probersp;
+ /*---------------------------------------------------------
+ (1) beacon
+ ---------------------------------------------------------*/
+ beacon = &reserved_page_packet_8812[BEACON_PG_8812 * 512];
+ SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
+
+ /*-------------------------------------------------------
+ (2) ps-poll
+ --------------------------------------------------------*/
+ p_pspoll = &reserved_page_packet_8812[PSPOLL_PG_8812 * 512];
+ SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
+ SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
+ SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
+
+ SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG_8812);
+
+ /*--------------------------------------------------------
+ (3) null data
+ ---------------------------------------------------------*/
+ nullfunc = &reserved_page_packet_8812[NULL_PG_8812* 512];
+ SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
+ SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG_8812);
+
+ /*---------------------------------------------------------
+ (4) probe response
+ ----------------------------------------------------------*/
+ p_probersp = &reserved_page_packet_8812[PROBERSP_PG_8812 * 512];
+ SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
+ SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG_8812);
+
+ totalpacketlen = TOTAL_RESERVED_PKT_LEN_8812;
+
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+ "rtl8821ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL \n",
+ &reserved_page_packet_8812[0], totalpacketlen);
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "rtl8821ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL \n",
+ u1RsvdPageLoc, 3);
+
+
+ skb = dev_alloc_skb(totalpacketlen);
+ memcpy((u8 *) skb_put(skb, totalpacketlen),
+ &reserved_page_packet_8812, totalpacketlen);
+
+ rtstatus = _rtl8821ae_cmd_send_packet(hw, skb);
+
+ if (rtstatus)
+ b_dlok = true;
+
+ if (b_dlok) {
+ RT_TRACE(COMP_POWER, DBG_LOUD,
+ ("Set RSVD page location to Fw.\n"));
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "H2C_RSVDPAGE:\n",
+ u1RsvdPageLoc, 3);
+ rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RSVDPAGE,
+ sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
+ } else
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("Set RSVD page location to Fw FAIL!!!!!!.\n"));
+}
+
+void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct sk_buff *skb = NULL;
+
+ u32 totalpacketlen;
+ bool rtstatus;
+ u8 u1RsvdPageLoc[5] = { 0 };
+ bool b_dlok = false;
+
+ u8* beacon;
+ u8* p_pspoll;
+ u8* nullfunc;
+ u8* p_probersp;
+ /*---------------------------------------------------------
+ (1) beacon
+ ---------------------------------------------------------*/
+ beacon = &reserved_page_packet_8821[BEACON_PG_8821 * 256];
+ SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
+
+ /*-------------------------------------------------------
+ (2) ps-poll
+ --------------------------------------------------------*/
+ p_pspoll = &reserved_page_packet_8821[PSPOLL_PG_8821 * 256];
+ SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
+ SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
+ SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
+
+ SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG_8821);
+
+ /*--------------------------------------------------------
+ (3) null data
+ ---------------------------------------------------------*/
+ nullfunc = &reserved_page_packet_8821[NULL_PG_8821 * 256];
+ SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
+ SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG_8821);
+
+ /*---------------------------------------------------------
+ (4) probe response
+ ----------------------------------------------------------*/
+ p_probersp = &reserved_page_packet_8821[PROBERSP_PG_8821 * 256];
+ SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
+ SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG_8821);
+
+ totalpacketlen = TOTAL_RESERVED_PKT_LEN_8821;
+
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+ "rtl8821ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL \n",
+ &reserved_page_packet_8821[0], totalpacketlen);
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "rtl8821ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL \n",
+ u1RsvdPageLoc, 3);
+
+
+ skb = dev_alloc_skb(totalpacketlen);
+ memcpy((u8 *) skb_put(skb, totalpacketlen),
+ &reserved_page_packet_8821, totalpacketlen);
+
+ rtstatus = _rtl8821ae_cmd_send_packet(hw, skb);
+
+ if (rtstatus)
+ b_dlok = true;
+
+ if (b_dlok) {
+ RT_TRACE(COMP_POWER, DBG_LOUD,
+ ("Set RSVD page location to Fw.\n"));
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "H2C_RSVDPAGE:\n",
+ u1RsvdPageLoc, 3);
+ rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RSVDPAGE,
+ sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
+ } else
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("Set RSVD page location to Fw FAIL!!!!!!.\n"));
+}
+
+/*Shoud check FW support p2p or not.*/
+void rtl8821ae_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw, u8 ctwindow)
+{
+ u8 u1_ctwindow_period[1] ={ ctwindow};
+
+ rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_P2P_PS_CTW_CMD, 1, u1_ctwindow_period);
+
+}
+
+void rtl8821ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_p2p_ps_info *p2pinfo = &(rtlps->p2p_ps_info);
+ struct p2p_ps_offload_t *p2p_ps_offload = &rtlhal->p2p_ps_offload;
+ u8 i;
+ u16 ctwindow;
+ u32 start_time, tsf_low;
+
+ switch(p2p_ps_state)
+ {
+ case P2P_PS_DISABLE:
+ RT_TRACE(COMP_FW, DBG_LOUD,("P2P_PS_DISABLE \n"));
+ memset(p2p_ps_offload, 0, 1);
+ break;
+ case P2P_PS_ENABLE:
+ RT_TRACE(COMP_FW, DBG_LOUD,("P2P_PS_ENABLE \n"));
+ /* update CTWindow value. */
+ if( p2pinfo->ctwindow > 0 )
+ {
+ p2p_ps_offload->CTWindow_En = 1;
+ ctwindow = p2pinfo->ctwindow;
+ rtl8821ae_set_p2p_ctw_period_cmd(hw, ctwindow);
+ }
+
+ /* hw only support 2 set of NoA */
+ for( i=0 ; i<p2pinfo->noa_num ; i++)
+ {
+ /* To control the register setting for which NOA*/
+ rtl_write_byte(rtlpriv, 0x5cf, (i << 4));
+ if(i == 0)
+ p2p_ps_offload->NoA0_En = 1;
+ else
+ p2p_ps_offload->NoA1_En = 1;
+
+ /* config P2P NoA Descriptor Register */
+ rtl_write_dword(rtlpriv, 0x5E0, p2pinfo->noa_duration[i]);
+ rtl_write_dword(rtlpriv, 0x5E4, p2pinfo->noa_interval[i]);
+
+ /*Get Current TSF value */
+ tsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+
+ start_time = p2pinfo->noa_start_time[i];
+ if(p2pinfo->noa_count_type[i] != 1)
+ {
+ while( start_time <= (tsf_low+(50*1024) ) ) {
+ start_time += p2pinfo->noa_interval[i];
+ if(p2pinfo->noa_count_type[i] != 255)
+ p2pinfo->noa_count_type[i]--;
+ }
+ }
+ rtl_write_dword(rtlpriv, 0x5E8, start_time);
+ rtl_write_dword(rtlpriv, 0x5EC, p2pinfo->noa_count_type[i] );
+
+ }
+
+ if( (p2pinfo->opp_ps == 1) || (p2pinfo->noa_num > 0) )
+ {
+ /* rst p2p circuit */
+ rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4));
+
+ p2p_ps_offload->Offload_En = 1;
+
+ if(P2P_ROLE_GO == rtlpriv->mac80211.p2p)
+ {
+ p2p_ps_offload->role= 1;
+ p2p_ps_offload->AllStaSleep = 0;
+ }
+ else
+ {
+ p2p_ps_offload->role= 0;
+ }
+
+ p2p_ps_offload->discovery = 0;
+ }
+ break;
+ case P2P_PS_SCAN:
+ RT_TRACE(COMP_FW, DBG_LOUD,("P2P_PS_SCAN \n"));
+ p2p_ps_offload->discovery = 1;
+ break;
+ case P2P_PS_SCAN_DONE:
+ RT_TRACE(COMP_FW, DBG_LOUD,("P2P_PS_SCAN_DONE \n"));
+ p2p_ps_offload->discovery = 0;
+ p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
+ break;
+ default:
+ break;
+ }
+
+ rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload);
+
+}
+
+void rtl8812ae_c2h_ra_report_handler(
+ struct ieee80211_hw *hw,
+ u8 *cmd_buf,
+ u8 cmd_len
+)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 rate = cmd_buf[0] & 0x3F;
+
+ rtlhal->current_ra_rate= rtl8812ae_hw_rate_to_mrate(hw, rate);
+
+ rtl8812ae_dm_update_init_rate(hw, rate);
+}
+
+
+void _rtl8812ae_c2h_content_parsing(
+ struct ieee80211_hw *hw,
+ u8 c2h_cmd_id,
+ u8 c2h_cmd_len,
+ u8 *tmp_buf
+)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (c2h_cmd_id) {
+ case C2H_8812_DBG:
+ RT_TRACE(COMP_FW, DBG_LOUD,("[C2H], C2H_8812_DBG!!\n"));
+ break;
+
+ case C2H_8812_RA_RPT:
+ rtl8812ae_c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len);
+ break;
+
+ default:
+ break;
+ }
+
+}
+
+void rtl8812ae_c2h_packet_handler(
+ struct ieee80211_hw *hw,
+ u8 *buffer,
+ u8 length
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 c2h_cmd_id=0, c2h_cmd_seq=0, c2h_cmd_len=0;
+ u8 *tmp_buf=NULL;
+
+ c2h_cmd_id = buffer[0];
+ c2h_cmd_seq = buffer[1];
+ c2h_cmd_len = length -2;
+ tmp_buf = buffer + 2;
+
+ RT_TRACE(COMP_FW, DBG_LOUD,
+ ("[C2H packet], c2hCmdId=0x%x, c2hCmdSeq=0x%x, c2hCmdLen=%d\n",
+ c2h_cmd_id, c2h_cmd_seq, c2h_cmd_len));
+
+ RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD,
+ "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len);
+ _rtl8812ae_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf);
+}
+
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/fw.h b/drivers/staging/rtl8821ae/rtl8821ae/fw.h
new file mode 100644
index 000000000000..30eec880026c
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/fw.h
@@ -0,0 +1,321 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE__FW__H__
+#define __RTL8821AE__FW__H__
+
+#define FW_8821AE_SIZE 0x8000
+#define FW_8821AE_START_ADDRESS 0x1000
+#define FW_8821AE_END_ADDRESS 0x5FFF
+#define FW_8821AE_PAGE_SIZE 4096
+#define FW_8821AE_POLLING_DELAY 5
+#define FW_8821AE_POLLING_TIMEOUT_COUNT 6000
+
+#define IS_FW_HEADER_EXIST_8812(_pfwhdr) \
+ ((_pfwhdr->signature&0xFFF0) == 0x9500 )
+
+#define IS_FW_HEADER_EXIST_8821(_pfwhdr) \
+ ((_pfwhdr->signature&0xFFF0) == 0x2100 )
+
+#define USE_OLD_WOWLAN_DEBUG_FW 0
+
+#define H2C_8821AE_RSVDPAGE_LOC_LEN 5
+#define H2C_8821AE_PWEMODE_LENGTH 5
+#define H2C_8821AE_JOINBSSRPT_LENGTH 1
+#define H2C_8821AE_AP_OFFLOAD_LENGTH 3
+#define H2C_8821AE_WOWLAN_LENGTH 3
+#define H2C_8821AE_KEEP_ALIVE_CTRL_LENGTH 3
+#if(USE_OLD_WOWLAN_DEBUG_FW == 0)
+#define H2C_8821AE_REMOTE_WAKE_CTRL_LEN 1
+#else
+#define H2C_8821AE_REMOTE_WAKE_CTRL_LEN 3
+#endif
+#define H2C_8821AE_AOAC_GLOBAL_INFO_LEN 2
+#define H2C_8821AE_AOAC_RSVDPAGE_LOC_LEN 7
+
+
+/* Fw PS state for RPWM.
+*BIT[2:0] = HW state
+*BIT[3] = Protocol PS state, 1: register active state , 0: register sleep state
+*BIT[4] = sub-state
+*/
+#define FW_PS_GO_ON BIT(0)
+#define FW_PS_TX_NULL BIT(1)
+#define FW_PS_RF_ON BIT(2)
+#define FW_PS_REGISTER_ACTIVE BIT(3)
+
+#define FW_PS_DPS BIT(0)
+#define FW_PS_LCLK (FW_PS_DPS)
+#define FW_PS_RF_OFF BIT(1)
+#define FW_PS_ALL_ON BIT(2)
+#define FW_PS_ST_ACTIVE BIT(3)
+#define FW_PS_ISR_ENABLE BIT(4)
+#define FW_PS_IMR_ENABLE BIT(5)
+
+
+#define FW_PS_ACK BIT(6)
+#define FW_PS_TOGGLE BIT(7)
+
+ /* 8821AE RPWM value*/
+ /* BIT[0] = 1: 32k, 0: 40M*/
+#define FW_PS_CLOCK_OFF BIT(0) /* 32k*/
+#define FW_PS_CLOCK_ON 0 /*40M*/
+
+#define FW_PS_STATE_MASK (0x0F)
+#define FW_PS_STATE_HW_MASK (0x07)
+#define FW_PS_STATE_INT_MASK (0x3F) /*ISR_ENABLE, IMR_ENABLE, and PS mode should be inherited.*/
+
+#define FW_PS_STATE(x) (FW_PS_STATE_MASK & (x))
+#define FW_PS_STATE_HW(x) (FW_PS_STATE_HW_MASK & (x))
+#define FW_PS_STATE_INT(x) (FW_PS_STATE_INT_MASK & (x))
+#define FW_PS_ISR_VAL(x) ((x) & 0x70)
+#define FW_PS_IMR_MASK(x) ((x) & 0xDF)
+#define FW_PS_KEEP_IMR(x) ((x) & 0x20)
+
+
+#define FW_PS_STATE_S0 (FW_PS_DPS)
+#define FW_PS_STATE_S1 (FW_PS_LCLK)
+#define FW_PS_STATE_S2 (FW_PS_RF_OFF)
+#define FW_PS_STATE_S3 (FW_PS_ALL_ON)
+#define FW_PS_STATE_S4 ((FW_PS_ST_ACTIVE) | (FW_PS_ALL_ON))
+
+#define FW_PS_STATE_ALL_ON_8821AE (FW_PS_CLOCK_ON) /* ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))*/
+#define FW_PS_STATE_RF_ON_8821AE (FW_PS_CLOCK_ON) /* (FW_PS_RF_ON)*/
+#define FW_PS_STATE_RF_OFF_8821AE (FW_PS_CLOCK_ON) /* 0x0*/
+#define FW_PS_STATE_RF_OFF_LOW_PWR_8821AE (FW_PS_CLOCK_OFF) /* (FW_PS_STATE_RF_OFF)*/
+
+#define FW_PS_STATE_ALL_ON_92C (FW_PS_STATE_S4)
+#define FW_PS_STATE_RF_ON_92C (FW_PS_STATE_S3)
+#define FW_PS_STATE_RF_OFF_92C (FW_PS_STATE_S2)
+#define FW_PS_STATE_RF_OFF_LOW_PWR_92C (FW_PS_STATE_S1)
+
+
+/* For 8821AE H2C PwrMode Cmd ID 5.*/
+#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
+#define FW_PWR_STATE_RF_OFF 0
+
+#define FW_PS_IS_ACK(x) ((x) & FW_PS_ACK )
+#define FW_PS_IS_CLK_ON(x) ((x) & (FW_PS_RF_OFF |FW_PS_ALL_ON ))
+#define FW_PS_IS_RF_ON(x) ((x) & (FW_PS_ALL_ON))
+#define FW_PS_IS_ACTIVE(x) ((x) & (FW_PS_ST_ACTIVE))
+#define FW_PS_IS_CPWM_INT(x) ((x) & 0x40)
+
+#define FW_CLR_PS_STATE(x) ((x) = ((x) & (0xF0)))
+
+#define IS_IN_LOW_POWER_STATE_8821AE(FwPSState) \
+ (FW_PS_STATE(FwPSState) == FW_PS_CLOCK_OFF)
+
+#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
+#define FW_PWR_STATE_RF_OFF 0
+
+struct rtl8821a_firmware_header {
+ u16 signature;
+ u8 category;
+ u8 function;
+ u16 version;
+ u8 subversion;
+ u8 rsvd1;
+ u8 month;
+ u8 date;
+ u8 hour;
+ u8 minute;
+ u16 ramcodeSize;
+ u16 rsvd2;
+ u32 svnindex;
+ u32 rsvd3;
+ u32 rsvd4;
+ u32 rsvd5;
+};
+
+enum rtl8812_c2h_evt{
+ C2H_8812_DBG = 0,
+ C2H_8812_LB = 1,
+ C2H_8812_TXBF = 2,
+ C2H_8812_TX_REPORT = 3,
+ C2H_8812_BT_INFO = 9,
+ C2H_8812_BT_MP = 11,
+ C2H_8812_RA_RPT=12,
+
+ C2H_8812_FW_SWCHNL = 0x10,
+ C2H_8812_IQK_FINISH = 0x11,
+ MAX_8812_C2HEVENT
+};
+
+enum rtl8821a_h2c_cmd {
+ H2C_8821AE_RSVDPAGE = 0,
+ H2C_8821AE_JOINBSSRPT = 1,
+ H2C_8821AE_SCAN = 2,
+ H2C_8821AE_KEEP_ALIVE_CTRL = 3,
+ H2C_8821AE_DISCONNECT_DECISION = 4,
+#if(USE_OLD_WOWLAN_DEBUG_FW == 1)
+ H2C_8821AE_WO_WLAN = 5,
+#endif
+ H2C_8821AE_INIT_OFFLOAD = 6,
+#if(USE_OLD_WOWLAN_DEBUG_FW == 1)
+ H2C_8821AE_REMOTE_WAKE_CTRL = 7,
+#endif
+ H2C_8821AE_AP_OFFLOAD = 8,
+ H2C_8821AE_BCN_RSVDPAGE = 9,
+ H2C_8821AE_PROBERSP_RSVDPAGE = 10,
+
+ H2C_8821AE_SETPWRMODE = 0x20,
+ H2C_8821AE_PS_TUNING_PARA = 0x21,
+ H2C_8821AE_PS_TUNING_PARA2 = 0x22,
+ H2C_8821AE_PS_LPS_PARA = 0x23,
+ H2C_8821AE_P2P_PS_OFFLOAD = 024,
+
+#if(USE_OLD_WOWLAN_DEBUG_FW == 0)
+ H2C_8821AE_WO_WLAN = 0x80,
+ H2C_8821AE_REMOTE_WAKE_CTRL = 0x81,
+ H2C_8821AE_AOAC_GLOBAL_INFO = 0x82,
+ H2C_8821AE_AOAC_RSVDPAGE = 0x83,
+#endif
+ H2C_RSSI_REPORT = 0x42,
+ H2C_8821AE_RA_MASK = 0x40,
+ H2C_8821AE_SELECTIVE_SUSPEND_ROF_CMD,
+ H2C_8821AE_P2P_PS_MODE,
+ H2C_8821AE_PSD_RESULT,
+ /*Not defined CTW CMD for P2P yet*/
+ H2C_8821AE_P2P_PS_CTW_CMD,
+ MAX_8821AE_H2CCMD
+};
+
+#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1:0))
+
+#define SET_8821AE_H2CCMD_WOWLAN_FUNC_ENABLE(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_PATTERN_MATCH_ENABLE(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_MAGIC_PKT_ENABLE(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 2, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_UNICAST_PKT_ENABLE(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 3, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_ALL_PKT_DROP(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 4, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_GPIO_ACTIVE(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 5, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_REKEY_WAKE_UP(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 6, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_DISCONNECT_WAKE_UP(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 7, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_GPIONUM(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_GPIO_DURATION(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value)
+
+
+#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_RLBM(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 4, __Value)
+#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 4, 4, __Value)
+#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value)
+#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+3, 0, 8, __Value)
+#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+4, 0, 8, __Value)
+#define GET_8821AE_H2CCMD_PWRMODE_PARM_MODE(__pH2CCmd) \
+ LE_BITS_TO_1BYTE(__pH2CCmd, 0, 8)
+
+#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+
+/* AP_OFFLOAD */
+#define SET_H2CCMD_AP_OFFLOAD_ON(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 8, __Value)
+#define SET_H2CCMD_AP_OFFLOAD_HIDDEN(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value)
+#define SET_H2CCMD_AP_OFFLOAD_DENYANY(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value)
+#define SET_H2CCMD_AP_OFFLOAD_WAKEUP_EVT_RPT(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+3, 0, 8, __Value)
+
+/* Keep Alive Control*/
+#define SET_8821AE_H2CCMD_KEEP_ALIVE_ENABLE(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value)
+#define SET_8821AE_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value)
+#define SET_8821AE_H2CCMD_KEEP_ALIVE_PERIOD(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value)
+
+/*REMOTE_WAKE_CTRL */
+#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_EN(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value)
+#if(USE_OLD_WOWLAN_DEBUG_FW == 0)
+#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value)
+#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 2, 1, __Value)
+#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 3, 1, __Value)
+#else
+#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_PAIRWISE_ENC_ALG(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value)
+#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_GROUP_ENC_ALG(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value)
+#endif
+
+/* GTK_OFFLOAD */
+#define SET_8821AE_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 8, __Value)
+#define SET_8821AE_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value)
+
+/* AOAC_RSVDPAGE_LOC */
+#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd), 0, 8, __Value)
+#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value)
+#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value)
+#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+3, 0, 8, __Value)
+#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+4, 0, 8, __Value)
+
+int rtl8821ae_download_fw(struct ieee80211_hw *hw,
+ bool buse_wake_on_wlan_fw);
+void rtl8821ae_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
+ u32 cmd_len, u8 *p_cmdbuffer);
+void rtl8821ae_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl8821ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
+void rtl8821ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+void rtl8821ae_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw, u8 ap_offload_enable);
+void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
+void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
+void rtl8821ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
+void rtl8812ae_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 length);
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.c b/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.c
new file mode 100644
index 000000000000..8bee772d766f
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.c
@@ -0,0 +1,519 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "hal_bt_coexist.h"
+#include "../pci.h"
+#include "dm.h"
+#include "fw.h"
+#include "phy.h"
+#include "reg.h"
+#include "hal_btc.h"
+
+static bool bt_operation_on = false;
+
+void rtl8821ae_dm_bt_reject_ap_aggregated_packet(struct ieee80211_hw *hw, bool b_reject)
+{
+#if 0
+ struct rtl_priv rtlpriv = rtl_priv(hw);
+ PRX_TS_RECORD pRxTs = NULL;
+
+ if(b_reject){
+ // Do not allow receiving A-MPDU aggregation.
+ if (rtlpriv->mac80211.vendor == PEER_CISCO) {
+ if (pHTInfo->bAcceptAddbaReq) {
+ RTPRINT(FBT, BT_TRACE, ("BT_Disallow AMPDU \n"));
+ pHTInfo->bAcceptAddbaReq = FALSE;
+ if(GetTs(Adapter, (PTS_COMMON_INFO*)(&pRxTs), pMgntInfo->Bssid, 0, RX_DIR, FALSE))
+ TsInitDelBA(Adapter, (PTS_COMMON_INFO)pRxTs, RX_DIR);
+ }
+ } else {
+ if (!pHTInfo->bAcceptAddbaReq) {
+ RTPRINT(FBT, BT_TRACE, ("BT_Allow AMPDU BT Idle\n"));
+ pHTInfo->bAcceptAddbaReq = TRUE;
+ }
+ }
+ } else {
+ if(rtlpriv->mac80211.vendor == PEER_CISCO) {
+ if (!pHTInfo->bAcceptAddbaReq) {
+ RTPRINT(FBT, BT_TRACE, ("BT_Allow AMPDU \n"));
+ pHTInfo->bAcceptAddbaReq = TRUE;
+ }
+ }
+ }
+#endif
+}
+
+void _rtl8821ae_dm_bt_check_wifi_state(struct ieee80211_hw *hw)
+{
+struct rtl_priv *rtlpriv = rtl_priv(hw);
+struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+if (rtlpriv->link_info.b_busytraffic) {
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_IDLE;
+
+ if(rtlpriv->link_info.b_tx_busy_traffic) {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_UPLINK;
+ } else {
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_UPLINK;
+ }
+
+ if(rtlpriv->link_info.b_rx_busy_traffic) {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_DOWNLINK;
+ } else {
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_DOWNLINK;
+ }
+} else {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_IDLE;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_UPLINK;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_DOWNLINK;
+}
+
+if (rtlpriv->mac80211.mode == WIRELESS_MODE_G
+ || rtlpriv->mac80211.mode == WIRELESS_MODE_B) {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_LEGACY;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_HT20;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_HT40;
+} else {
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_LEGACY;
+ if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_HT40;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_HT20;
+ } else {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_HT20;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_HT40;
+ }
+}
+
+if (bt_operation_on) {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BT30;
+} else {
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_BT30;
+}
+}
+
+
+u8 rtl8821ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
+ u8 level_num, u8 rssi_thresh, u8 rssi_thresh1)
+
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ long undecoratedsmoothed_pwdb = 0;
+ u8 bt_rssi_state = 0;
+
+ undecoratedsmoothed_pwdb = rtl8821ae_dm_bt_get_rx_ss(hw);
+
+ if(level_num == 2) {
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+
+ if( (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_LOW) ||
+ (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_LOW)) {
+ if(undecoratedsmoothed_pwdb >= (rssi_thresh + BT_FW_COEX_THRESH_TOL)) {
+ bt_rssi_state = BT_RSSI_STATE_HIGH;
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to High\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at Low\n"));
+ }
+ } else {
+ if(undecoratedsmoothed_pwdb < rssi_thresh) {
+ bt_rssi_state = BT_RSSI_STATE_LOW;
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_LOW;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to Low\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at High\n"));
+ }
+ }
+ } else if(level_num == 3) {
+ if(rssi_thresh > rssi_thresh1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 thresh error!!\n"));
+ return rtlpcipriv->btcoexist.bt_pre_rssi_state;
+ }
+
+ if( (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_LOW) ||
+ (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_LOW)) {
+ if(undecoratedsmoothed_pwdb >= (rssi_thresh+BT_FW_COEX_THRESH_TOL)) {
+ bt_rssi_state = BT_RSSI_STATE_MEDIUM;
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to Medium\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at Low\n"));
+ }
+ } else if( (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_MEDIUM) ||
+ (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_MEDIUM)) {
+ if(undecoratedsmoothed_pwdb >= (rssi_thresh1 + BT_FW_COEX_THRESH_TOL)) {
+ bt_rssi_state = BT_RSSI_STATE_HIGH;
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to High\n"));
+ } else if(undecoratedsmoothed_pwdb < rssi_thresh) {
+ bt_rssi_state = BT_RSSI_STATE_LOW;
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_LOW;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to Low\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_MEDIUM;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at Medium\n"));
+ }
+ } else {
+ if(undecoratedsmoothed_pwdb < rssi_thresh1) {
+ bt_rssi_state = BT_RSSI_STATE_MEDIUM;
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,("[DM][BT], RSSI_1 state switch to Medium\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at High\n"));
+ }
+ }
+ }
+
+ rtlpcipriv->btcoexist.bt_pre_rssi_state1 = bt_rssi_state;
+
+ return bt_rssi_state;
+}
+
+u8 rtl8821ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
+ u8 level_num, u8 rssi_thresh, u8 rssi_thresh1)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ long undecoratedsmoothed_pwdb = 0;
+ u8 bt_rssi_state = 0;
+
+ undecoratedsmoothed_pwdb = rtl8821ae_dm_bt_get_rx_ss(hw);
+
+ if (level_num == 2) {
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+
+ if ((rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_LOW) ||
+ (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_LOW)){
+ if (undecoratedsmoothed_pwdb
+ >= (rssi_thresh + BT_FW_COEX_THRESH_TOL)) {
+ bt_rssi_state = BT_RSSI_STATE_HIGH;
+ rtlpcipriv->btcoexist.current_state
+ |= BT_COEX_STATE_WIFI_RSSI_HIGH;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state switch to High\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state stay at Low\n"));
+ }
+ } else {
+ if (undecoratedsmoothed_pwdb < rssi_thresh) {
+ bt_rssi_state = BT_RSSI_STATE_LOW;
+ rtlpcipriv->btcoexist.current_state
+ |= BT_COEX_STATE_WIFI_RSSI_LOW;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state switch to Low\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state stay at High\n"));
+ }
+ }
+ }
+ else if (level_num == 3) {
+ if (rssi_thresh > rssi_thresh1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI thresh error!!\n"));
+ return rtlpcipriv->btcoexist.bt_pre_rssi_state;
+ }
+ if ((rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_LOW) ||
+ (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_LOW)) {
+ if(undecoratedsmoothed_pwdb
+ >= (rssi_thresh + BT_FW_COEX_THRESH_TOL)) {
+ bt_rssi_state = BT_RSSI_STATE_MEDIUM;
+ rtlpcipriv->btcoexist.current_state
+ |= BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state switch to Medium\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state stay at Low\n"));
+ }
+ } else if ((rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_MEDIUM) ||
+ (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_MEDIUM)) {
+ if (undecoratedsmoothed_pwdb
+ >= (rssi_thresh1 + BT_FW_COEX_THRESH_TOL)) {
+ bt_rssi_state = BT_RSSI_STATE_HIGH;
+ rtlpcipriv->btcoexist.current_state
+ |= BT_COEX_STATE_WIFI_RSSI_HIGH;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state switch to High\n"));
+ } else if(undecoratedsmoothed_pwdb < rssi_thresh)
+ {
+ bt_rssi_state = BT_RSSI_STATE_LOW;
+ rtlpcipriv->btcoexist.current_state
+ |= BT_COEX_STATE_WIFI_RSSI_LOW;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state switch to Low\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_MEDIUM;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state stay at Medium\n"));
+ }
+ } else {
+ if(undecoratedsmoothed_pwdb < rssi_thresh1) {
+ bt_rssi_state = BT_RSSI_STATE_MEDIUM;
+ rtlpcipriv->btcoexist.current_state
+ |= BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state switch to Medium\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state stay at High\n"));
+ }
+ }
+ }
+
+ rtlpcipriv->btcoexist.bt_pre_rssi_state = bt_rssi_state;
+ return bt_rssi_state;
+}
+long rtl8821ae_dm_bt_get_rx_ss(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ long undecoratedsmoothed_pwdb = 0;
+
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+ undecoratedsmoothed_pwdb = GET_UNDECORATED_AVERAGE_RSSI(rtlpriv);
+ } else {
+ undecoratedsmoothed_pwdb
+ = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ }
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("rtl8821ae_dm_bt_get_rx_ss() = %ld\n", undecoratedsmoothed_pwdb));
+
+ return undecoratedsmoothed_pwdb;
+}
+
+void rtl8821ae_dm_bt_balance(struct ieee80211_hw *hw,
+ bool b_balance_on, u8 ms0, u8 ms1)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 h2c_parameter[3] ={0};
+
+ if (b_balance_on) {
+ h2c_parameter[2] = 1;
+ h2c_parameter[1] = ms1;
+ h2c_parameter[0] = ms0;
+ rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+ } else {
+ h2c_parameter[2] = 0;
+ h2c_parameter[1] = 0;
+ h2c_parameter[0] = 0;
+ }
+ rtlpcipriv->btcoexist.b_balance_on = b_balance_on;
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], Balance=[%s:%dms:%dms], write 0xc=0x%x\n",
+ b_balance_on?"ON":"OFF", ms0, ms1,
+ h2c_parameter[0]<<16 | h2c_parameter[1]<<8 | h2c_parameter[2]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0xc, 3, h2c_parameter);
+}
+
+
+void rtl8821ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+ if (type == BT_AGCTABLE_OFF) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BT]AGCTable Off!\n"));
+ rtl_write_dword(rtlpriv, 0xc78,0x641c0001);
+ rtl_write_dword(rtlpriv, 0xc78,0x631d0001);
+ rtl_write_dword(rtlpriv, 0xc78,0x621e0001);
+ rtl_write_dword(rtlpriv, 0xc78,0x611f0001);
+ rtl_write_dword(rtlpriv, 0xc78,0x60200001);
+
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_AGC_HP, 0xfffff, 0x32000);
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_AGC_HP, 0xfffff, 0x71000);
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_AGC_HP, 0xfffff, 0xb0000);
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_AGC_HP, 0xfffff, 0xfc000);
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_G1, 0xfffff, 0x30355);
+ } else if (type == BT_AGCTABLE_ON) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BT]AGCTable On!\n"));
+ rtl_write_dword(rtlpriv, 0xc78,0x4e1c0001);
+ rtl_write_dword(rtlpriv, 0xc78,0x4d1d0001);
+ rtl_write_dword(rtlpriv, 0xc78,0x4c1e0001);
+ rtl_write_dword(rtlpriv, 0xc78,0x4b1f0001);
+ rtl_write_dword(rtlpriv, 0xc78,0x4a200001);
+
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_AGC_HP, 0xfffff, 0xdc000);
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_AGC_HP, 0xfffff, 0x90000);
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_AGC_HP, 0xfffff, 0x51000);
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_AGC_HP, 0xfffff, 0x12000);
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_G1, 0xfffff, 0x00355);
+
+ rtlpcipriv->btcoexist.b_sw_coexist_all_off = false;
+ }
+}
+
+void rtl8821ae_dm_bt_bb_back_off_level(struct ieee80211_hw *hw, u8 type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+ if (type == BT_BB_BACKOFF_OFF) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BT]BBBackOffLevel Off!\n"));
+ rtl_write_dword(rtlpriv, 0xc04,0x3a05611);
+ } else if (type == BT_BB_BACKOFF_ON) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BT]BBBackOffLevel On!\n"));
+ rtl_write_dword(rtlpriv, 0xc04,0x3a07611);
+ rtlpcipriv->btcoexist.b_sw_coexist_all_off = false;
+ }
+}
+
+void rtl8821ae_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("rtl8821ae_dm_bt_fw_coex_all_off()\n"));
+
+ if(rtlpcipriv->btcoexist.b_fw_coexist_all_off)
+ return;
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("rtl8821ae_dm_bt_fw_coex_all_off(), real Do\n"));
+ rtl8821ae_dm_bt_fw_coex_all_off_8723a(hw);
+ rtlpcipriv->btcoexist.b_fw_coexist_all_off = true;
+}
+
+void rtl8821ae_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("rtl8821ae_dm_bt_sw_coex_all_off()\n"));
+
+ if(rtlpcipriv->btcoexist.b_sw_coexist_all_off)
+ return;
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("rtl8821ae_dm_bt_sw_coex_all_off(), real Do\n"));
+ rtl8821ae_dm_bt_sw_coex_all_off_8723a(hw);
+ rtlpcipriv->btcoexist.b_sw_coexist_all_off = true;
+}
+
+void rtl8821ae_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("rtl8821ae_dm_bt_hw_coex_all_off()\n"));
+
+ if(rtlpcipriv->btcoexist.b_hw_coexist_all_off)
+ return;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("rtl8821ae_dm_bt_hw_coex_all_off(), real Do\n"));
+
+ rtl8821ae_dm_bt_hw_coex_all_off_8723a(hw);
+
+ rtlpcipriv->btcoexist.b_hw_coexist_all_off = true;
+}
+
+void rtl8821ae_btdm_coex_all_off(struct ieee80211_hw *hw)
+{
+ rtl8821ae_dm_bt_fw_coex_all_off(hw);
+ rtl8821ae_dm_bt_sw_coex_all_off(hw);
+ rtl8821ae_dm_bt_hw_coex_all_off(hw);
+}
+
+bool rtl8821ae_dm_bt_is_coexist_state_changed(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+ if((rtlpcipriv->btcoexist.previous_state
+ == rtlpcipriv->btcoexist.current_state)
+ && (rtlpcipriv->btcoexist.previous_state_h
+ == rtlpcipriv->btcoexist.current_state_h))
+ return false;
+ else
+ return true;
+}
+
+bool rtl8821ae_dm_bt_is_wifi_up_link(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->link_info.b_tx_busy_traffic)
+ return true;
+ else
+ return false;
+}
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.h b/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.h
new file mode 100644
index 000000000000..799cc6f95cc1
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.h
@@ -0,0 +1,169 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_HAL_BT_COEXIST_H__
+#define __RTL8821AE_HAL_BT_COEXIST_H__
+
+#include "../wifi.h"
+
+/* The reg define is for 8723 */
+#define REG_HIGH_PRIORITY_TXRX 0x770
+#define REG_LOW_PRIORITY_TXRX 0x774
+
+#define BT_FW_COEX_THRESH_TOL 6
+#define BT_FW_COEX_THRESH_20 20
+#define BT_FW_COEX_THRESH_23 23
+#define BT_FW_COEX_THRESH_25 25
+#define BT_FW_COEX_THRESH_30 30
+#define BT_FW_COEX_THRESH_35 35
+#define BT_FW_COEX_THRESH_40 40
+#define BT_FW_COEX_THRESH_45 45
+#define BT_FW_COEX_THRESH_47 47
+#define BT_FW_COEX_THRESH_50 50
+#define BT_FW_COEX_THRESH_55 55
+
+#define BT_COEX_STATE_BT30 BIT(0)
+#define BT_COEX_STATE_WIFI_HT20 BIT(1)
+#define BT_COEX_STATE_WIFI_HT40 BIT(2)
+#define BT_COEX_STATE_WIFI_LEGACY BIT(3)
+
+#define BT_COEX_STATE_WIFI_RSSI_LOW BIT(4)
+#define BT_COEX_STATE_WIFI_RSSI_MEDIUM BIT(5)
+#define BT_COEX_STATE_WIFI_RSSI_HIGH BIT(6)
+#define BT_COEX_STATE_DEC_BT_POWER BIT(7)
+
+#define BT_COEX_STATE_WIFI_IDLE BIT(8)
+#define BT_COEX_STATE_WIFI_UPLINK BIT(9)
+#define BT_COEX_STATE_WIFI_DOWNLINK BIT(10)
+
+#define BT_COEX_STATE_BT_INQ_PAGE BIT(11)
+#define BT_COEX_STATE_BT_IDLE BIT(12)
+#define BT_COEX_STATE_BT_UPLINK BIT(13)
+#define BT_COEX_STATE_BT_DOWNLINK BIT(14)
+
+#define BT_COEX_STATE_HOLD_FOR_BT_OPERATION BIT(15)
+#define BT_COEX_STATE_BT_RSSI_LOW BIT(19)
+
+#define BT_COEX_STATE_PROFILE_HID BIT(20)
+#define BT_COEX_STATE_PROFILE_A2DP BIT(21)
+#define BT_COEX_STATE_PROFILE_PAN BIT(22)
+#define BT_COEX_STATE_PROFILE_SCO BIT(23)
+
+#define BT_COEX_STATE_WIFI_RSSI_1_LOW BIT(24)
+#define BT_COEX_STATE_WIFI_RSSI_1_MEDIUM BIT(25)
+#define BT_COEX_STATE_WIFI_RSSI_1_HIGH BIT(26)
+
+#define BT_COEX_STATE_BTINFO_COMMON BIT(30)
+#define BT_COEX_STATE_BTINFO_B_HID_SCOESCO BIT(31)
+#define BT_COEX_STATE_BTINFO_B_FTP_A2DP BIT(29)
+
+#define BT_COEX_STATE_BT_CNT_LEVEL_0 BIT(0)
+#define BT_COEX_STATE_BT_CNT_LEVEL_1 BIT(1)
+#define BT_COEX_STATE_BT_CNT_LEVEL_2 BIT(2)
+#define BT_COEX_STATE_BT_CNT_LEVEL_3 BIT(3)
+
+#define BT_RSSI_STATE_HIGH 0
+#define BT_RSSI_STATE_MEDIUM 1
+#define BT_RSSI_STATE_LOW 2
+#define BT_RSSI_STATE_STAY_HIGH 3
+#define BT_RSSI_STATE_STAY_MEDIUM 4
+#define BT_RSSI_STATE_STAY_LOW 5
+
+#define BT_AGCTABLE_OFF 0
+#define BT_AGCTABLE_ON 1
+#define BT_BB_BACKOFF_OFF 0
+#define BT_BB_BACKOFF_ON 1
+#define BT_FW_NAV_OFF 0
+#define BT_FW_NAV_ON 1
+
+#define BT_COEX_MECH_NONE 0
+#define BT_COEX_MECH_SCO 1
+#define BT_COEX_MECH_HID 2
+#define BT_COEX_MECH_A2DP 3
+#define BT_COEX_MECH_PAN 4
+#define BT_COEX_MECH_HID_A2DP 5
+#define BT_COEX_MECH_HID_PAN 6
+#define BT_COEX_MECH_PAN_A2DP 7
+#define BT_COEX_MECH_HID_SCO_ESCO 8
+#define BT_COEX_MECH_FTP_A2DP 9
+#define BT_COEX_MECH_COMMON 10
+#define BT_COEX_MECH_MAX 11
+
+#define BT_DBG_PROFILE_NONE 0
+#define BT_DBG_PROFILE_SCO 1
+#define BT_DBG_PROFILE_HID 2
+#define BT_DBG_PROFILE_A2DP 3
+#define BT_DBG_PROFILE_PAN 4
+#define BT_DBG_PROFILE_HID_A2DP 5
+#define BT_DBG_PROFILE_HID_PAN 6
+#define BT_DBG_PROFILE_PAN_A2DP 7
+#define BT_DBG_PROFILE_MAX 9
+
+#define BTINFO_B_FTP BIT(7)
+#define BTINFO_B_A2DP BIT(6)
+#define BTINFO_B_HID BIT(5)
+#define BTINFO_B_SCO_BUSY BIT(4)
+#define BTINFO_B_ACL_BUSY BIT(3)
+#define BTINFO_B_INQ_PAGE BIT(2)
+#define BTINFO_B_SCO_ESCO BIT(1)
+#define BTINFO_B_CONNECTION BIT(0)
+
+
+void rtl8821ae_btdm_coex_all_off(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw);
+
+void rtl8821ae_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw);
+long rtl8821ae_dm_bt_get_rx_ss(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_balance(struct ieee80211_hw *hw,
+ bool b_balance_on, u8 ms0, u8 ms1);
+void rtl8821ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 tyep);
+void rtl8821ae_dm_bt_bb_back_off_level(struct ieee80211_hw *hw, u8 type);
+u8 rtl8821ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
+ u8 level_num, u8 rssi_thresh, u8 rssi_thresh1);
+u8 rtl8821ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
+ u8 level_num, u8 rssi_thresh, u8 rssi_thresh1);
+void _rtl8821ae_dm_bt_check_wifi_state(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_reject_ap_aggregated_packet(struct ieee80211_hw *hw,
+ bool b_reject);
+
+#if 0
+VOID
+BTDM_PWDBMonitor(
+ PADAPTER Adapter
+ );
+
+BOOLEAN
+BTDM_DIGByBTRSSI(
+ PADAPTER Adapter
+ );
+#endif
+bool rtl8821ae_dm_bt_is_coexist_state_changed(struct ieee80211_hw *hw);
+bool rtl8821ae_dm_bt_is_wifi_up_link(struct ieee80211_hw *hw);
+#endif
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.c b/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.c
new file mode 100644
index 000000000000..79386ee142f9
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.c
@@ -0,0 +1,2069 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#include "hal_btc.h"
+#include "../pci.h"
+#include "phy.h"
+#include "fw.h"
+#include "reg.h"
+#include "def.h"
+#include "../btcoexist/rtl_btc.h"
+
+static struct bt_coexist_8821ae hal_coex_8821ae;
+
+void rtl8821ae_dm_bt_turn_off_bt_coexist_before_enter_lps(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ if(!rtlpcipriv->btcoexist.bt_coexistence)
+ return;
+
+ if(ppsc->b_inactiveps) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,("[BT][DM], Before enter IPS, turn off all Coexist DM\n"));
+ rtlpcipriv->btcoexist.current_state = 0;
+ rtlpcipriv->btcoexist.previous_state = 0;
+ rtlpcipriv->btcoexist.current_state_h = 0;
+ rtlpcipriv->btcoexist.previous_state_h = 0;
+ rtl8821ae_btdm_coex_all_off(hw);
+ }
+}
+
+
+enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ enum rt_media_status m_status = RT_MEDIA_DISCONNECT;
+
+ u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
+
+ if(bibss || rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+ m_status = RT_MEDIA_CONNECT;
+ }
+
+ return m_status;
+}
+
+void rtl_8821ae_bt_wifi_media_status_notify(struct ieee80211_hw *hw, bool mstatus)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 h2c_parameter[3] ={0};
+ u8 chnl;
+
+ if(!rtlpcipriv->btcoexist.bt_coexistence)
+ return;
+
+ if(RT_MEDIA_CONNECT == mstatus)
+ h2c_parameter[0] = 0x1; // 0: disconnected, 1:connected
+ else
+ h2c_parameter[0] = 0x0;
+
+ if(mgnt_link_status_query(hw)) {
+ chnl = rtlphy->current_channel;
+ h2c_parameter[1] = chnl;
+ }
+
+ if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40){
+ h2c_parameter[2] = 0x30;
+ } else {
+ h2c_parameter[2] = 0x20;
+ }
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,("[BTCoex], FW write 0x19=0x%x\n",
+ h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0x19, 3, h2c_parameter);
+
+}
+
+
+bool rtl8821ae_dm_bt_is_wifi_busy(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ if(rtlpriv->link_info.b_busytraffic ||
+ rtlpriv->link_info.b_rx_busy_traffic ||
+ rtlpriv->link_info.b_tx_busy_traffic)
+ return true;
+ else
+ return false;
+}
+void rtl8821ae_dm_bt_set_fw_3a(struct ieee80211_hw *hw,
+ u8 byte1, u8 byte2, u8 byte3, u8 byte4, u8 byte5)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 h2c_parameter[5] ={0};
+ h2c_parameter[0] = byte1;
+ h2c_parameter[1] = byte2;
+ h2c_parameter[2] = byte3;
+ h2c_parameter[3] = byte4;
+ h2c_parameter[4] = byte5;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], FW write 0x3a(4bytes)=0x%x%8x\n",
+ h2c_parameter[0], h2c_parameter[1]<<24 | h2c_parameter[2]<<16 | h2c_parameter[3]<<8 | h2c_parameter[4]));
+ rtl8821ae_fill_h2c_cmd(hw, 0x3a, 5, h2c_parameter);
+}
+
+bool rtl8821ae_dm_bt_need_to_dec_bt_pwr(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Need to decrease bt power\n"));
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_DEC_BT_POWER;
+ return true;
+ }
+
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_DEC_BT_POWER;
+ return false;
+}
+
+
+bool rtl8821ae_dm_bt_is_same_coexist_state(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+ if ((rtlpcipriv->btcoexist.previous_state
+ == rtlpcipriv->btcoexist.current_state)
+ &&(rtlpcipriv->btcoexist.previous_state_h
+ == rtlpcipriv->btcoexist.current_state_h)) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[DM][BT], Coexist state do not chang!!\n"));
+ return true;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[DM][BT], Coexist state changed!!\n"));
+ return false;
+ }
+}
+
+void rtl8821ae_dm_bt_set_coex_table(struct ieee80211_hw *hw,
+ u32 val_0x6c0, u32 val_0x6c8, u32 val_0x6cc)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("set coex table, set 0x6c0=0x%x\n", val_0x6c0));
+ rtl_write_dword(rtlpriv, 0x6c0, val_0x6c0);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("set coex table, set 0x6c8=0x%x\n", val_0x6c8));
+ rtl_write_dword(rtlpriv, 0x6c8, val_0x6c8);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("set coex table, set 0x6cc=0x%x\n", val_0x6cc));
+ rtl_write_byte(rtlpriv, 0x6cc, val_0x6cc);
+}
+
+void rtl8821ae_dm_bt_set_hw_pta_mode(struct ieee80211_hw *hw, bool b_mode)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (BT_PTA_MODE_ON == b_mode) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("PTA mode on, "));
+ /* Enable GPIO 0/1/2/3/8 pins for bt */
+ rtl_write_byte(rtlpriv, 0x40, 0x20);
+ rtlpcipriv->btcoexist.b_hw_coexist_all_off = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("PTA mode off\n"));
+ rtl_write_byte(rtlpriv, 0x40, 0x0);
+ }
+}
+
+void rtl8821ae_dm_bt_set_sw_rf_rx_lpf_corner(struct ieee80211_hw *hw, u8 type)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (BT_RF_RX_LPF_CORNER_SHRINK == type) {
+ /* Shrink RF Rx LPF corner, 0x1e[7:4]=1111 ==> [11:4] by Jenyu */
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Shrink RF Rx LPF corner!!\n"));
+ /* PHY_SetRFReg(Adapter, (RF_RADIO_PATH_E)PathA, 0x1e, 0xf0, 0xf); */
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e, 0xfffff, 0xf0ff7);
+ rtlpcipriv->btcoexist.b_sw_coexist_all_off = false;
+ } else if(BT_RF_RX_LPF_CORNER_RESUME == type) {
+ /*Resume RF Rx LPF corner*/
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Resume RF Rx LPF corner!!\n"));
+ /* PHY_SetRFReg(Adapter, (RF_RADIO_PATH_E)PathA, 0x1e, 0xf0,
+ * pHalData->btcoexist.BtRfRegOrigin1E); */
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e, 0xfffff,
+ rtlpcipriv->btcoexist.bt_rfreg_origin_1e);
+ }
+}
+
+void rtl8821ae_dm_bt_set_sw_penalty_tx_rate_adaptive(struct ieee80211_hw *hw,
+ u8 ra_type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ u8 tmp_u1;
+
+ tmp_u1 = rtl_read_byte(rtlpriv, 0x4fd);
+ tmp_u1 |= BIT(0);
+ if (BT_TX_RATE_ADAPTIVE_LOW_PENALTY == ra_type) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Tx rate adaptive, set low penalty!!\n"));
+ tmp_u1 &= ~BIT(2);
+ rtlpcipriv->btcoexist.b_sw_coexist_all_off = false;
+ } else if(BT_TX_RATE_ADAPTIVE_NORMAL == ra_type) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Tx rate adaptive, set normal!!\n"));
+ tmp_u1 |= BIT(2);
+ }
+
+ rtl_write_byte(rtlpriv, 0x4fd, tmp_u1);
+}
+
+void rtl8821ae_dm_bt_btdm_structure_reload(struct ieee80211_hw *hw,
+ struct btdm_8821ae *p_btdm)
+{
+ p_btdm->b_all_off = false;
+ p_btdm->b_agc_table_en = false;
+ p_btdm->b_adc_back_off_on = false;
+ p_btdm->b2_ant_hid_en = false;
+ p_btdm->b_low_penalty_rate_adaptive = false;
+ p_btdm->b_rf_rx_lpf_shrink = false;
+ p_btdm->b_reject_aggre_pkt= false;
+
+ p_btdm->b_tdma_on = false;
+ p_btdm->tdma_ant = TDMA_2ANT;
+ p_btdm->tdma_nav = TDMA_NAV_OFF;
+ p_btdm->tdma_dac_swing = TDMA_DAC_SWING_OFF;
+ p_btdm->fw_dac_swing_lvl = 0x20;
+
+ p_btdm->b_tra_tdma_on = false;
+ p_btdm->tra_tdma_ant = TDMA_2ANT;
+ p_btdm->tra_tdma_nav = TDMA_NAV_OFF;
+ p_btdm->b_ignore_wlan_act = false;
+
+ p_btdm->b_ps_tdma_on = false;
+ p_btdm->ps_tdma_byte[0] = 0x0;
+ p_btdm->ps_tdma_byte[1] = 0x0;
+ p_btdm->ps_tdma_byte[2] = 0x0;
+ p_btdm->ps_tdma_byte[3] = 0x8;
+ p_btdm->ps_tdma_byte[4] = 0x0;
+
+ p_btdm->b_pta_on = true;
+ p_btdm->val_0x6c0 = 0x5a5aaaaa;
+ p_btdm->val_0x6c8 = 0xcc;
+ p_btdm->val_0x6cc = 0x3;
+
+ p_btdm->b_sw_dac_swing_on = false;
+ p_btdm->sw_dac_swing_lvl = 0xc0;
+ p_btdm->wlan_act_hi = 0x20;
+ p_btdm->wlan_act_lo = 0x10;
+ p_btdm->bt_retry_index = 2;
+
+ p_btdm->b_dec_bt_pwr = false;
+}
+
+void rtl8821ae_dm_bt_btdm_structure_reload_all_off(struct ieee80211_hw *hw,
+ struct btdm_8821ae *p_btdm)
+{
+ rtl8821ae_dm_bt_btdm_structure_reload(hw, p_btdm);
+ p_btdm->b_all_off = true;
+ p_btdm->b_pta_on = false;
+ p_btdm->wlan_act_hi = 0x10;
+}
+
+bool rtl8821ae_dm_bt_is_2_ant_common_action(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct btdm_8821ae btdm8821ae;
+ bool b_common = false;
+
+ rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae);
+
+ if(!rtl8821ae_dm_bt_is_wifi_busy(hw)
+ && !rtlpcipriv->btcoexist.b_bt_busy) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("Wifi idle + Bt idle, bt coex mechanism always off!!\n"));
+ rtl8821ae_dm_bt_btdm_structure_reload_all_off(hw, &btdm8821ae);
+ b_common = true;
+ } else if (rtl8821ae_dm_bt_is_wifi_busy(hw)
+ && !rtlpcipriv->btcoexist.b_bt_busy) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("Wifi non-idle + Bt disabled/idle!!\n"));
+ btdm8821ae.b_low_penalty_rate_adaptive = true;
+ btdm8821ae.b_rf_rx_lpf_shrink = false;
+ btdm8821ae.b_reject_aggre_pkt = false;
+
+ /* sw mechanism */
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+
+ btdm8821ae.b_pta_on = true;
+ btdm8821ae.val_0x6c0 = 0x5a5aaaaa;
+ btdm8821ae.val_0x6c8 = 0xcccc;
+ btdm8821ae.val_0x6cc = 0x3;
+
+ btdm8821ae.b_tdma_on = false;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+ btdm8821ae.b2_ant_hid_en = false;
+
+ b_common = true;
+ }else if (rtlpcipriv->btcoexist.b_bt_busy) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("Bt non-idle!\n"));
+ if(mgnt_link_status_query(hw) == RT_MEDIA_CONNECT){
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi connection exist\n"))
+ b_common = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("No Wifi connection!\n"));
+ btdm8821ae.b_rf_rx_lpf_shrink = true;
+ btdm8821ae.b_low_penalty_rate_adaptive = false;
+ btdm8821ae.b_reject_aggre_pkt = false;
+
+ /* sw mechanism */
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+
+ btdm8821ae.b_pta_on = true;
+ btdm8821ae.val_0x6c0 = 0x55555555;
+ btdm8821ae.val_0x6c8 = 0x0000ffff;
+ btdm8821ae.val_0x6cc = 0x3;
+
+ btdm8821ae.b_tdma_on = false;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+ btdm8821ae.b2_ant_hid_en = false;
+
+ b_common = true;
+ }
+ }
+
+ if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) {
+ btdm8821ae.b_dec_bt_pwr = true;
+ }
+
+ if(b_common)
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BTINFO_COMMON;
+
+ if (b_common && rtl8821ae_dm_bt_is_coexist_state_changed(hw))
+ rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae);
+
+ return b_common;
+}
+
+void rtl8821ae_dm_bt_set_sw_full_time_dac_swing(
+ struct ieee80211_hw * hw, bool b_sw_dac_swing_on, u32 sw_dac_swing_lvl)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (b_sw_dac_swing_on) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], SwDacSwing = 0x%x\n", sw_dac_swing_lvl));
+ rtl8821ae_phy_set_bb_reg(hw, 0x880, 0xff000000, sw_dac_swing_lvl);
+ rtlpcipriv->btcoexist.b_sw_coexist_all_off = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], SwDacSwing Off!\n"));
+ rtl8821ae_phy_set_bb_reg(hw, 0x880, 0xff000000, 0xc0);
+ }
+}
+
+void rtl8821ae_dm_bt_set_fw_dec_bt_pwr(
+ struct ieee80211_hw *hw, bool b_dec_bt_pwr)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 h2c_parameter[1] ={0};
+
+ h2c_parameter[0] = 0;
+
+ if (b_dec_bt_pwr) {
+ h2c_parameter[0] |= BIT(1);
+ rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+ }
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], decrease Bt Power : %s, write 0x21=0x%x\n",
+ (b_dec_bt_pwr? "Yes!!":"No!!"), h2c_parameter[0]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0x21, 1, h2c_parameter);
+}
+
+
+void rtl8821ae_dm_bt_set_fw_2_ant_hid(struct ieee80211_hw *hw,
+ bool b_enable, bool b_dac_swing_on)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 h2c_parameter[1] ={0};
+
+ if (b_enable) {
+ h2c_parameter[0] |= BIT(0);
+ rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+ }
+ if (b_dac_swing_on) {
+ h2c_parameter[0] |= BIT(1); /* Dac Swing default enable */
+ }
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], turn 2-Ant+HID mode %s, DACSwing:%s, write 0x15=0x%x\n",
+ (b_enable ? "ON!!":"OFF!!"), (b_dac_swing_on ? "ON":"OFF"),
+ h2c_parameter[0]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0x15, 1, h2c_parameter);
+}
+
+void rtl8821ae_dm_bt_set_fw_tdma_ctrl(struct ieee80211_hw *hw,
+ bool b_enable, u8 ant_num, u8 nav_en, u8 dac_swing_en)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ u8 h2c_parameter[1] ={0};
+ u8 h2c_parameter1[1] = {0};
+
+ h2c_parameter[0] = 0;
+ h2c_parameter1[0] = 0;
+
+ if(b_enable) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], set BT PTA update manager to trigger update!!\n"));
+ h2c_parameter1[0] |= BIT(0);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], turn TDMA mode ON!!\n"));
+ h2c_parameter[0] |= BIT(0); /* function enable */
+ if (TDMA_1ANT == ant_num) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TDMA_1ANT\n"));
+ h2c_parameter[0] |= BIT(1);
+ } else if(TDMA_2ANT == ant_num) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TDMA_2ANT\n"));
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], Unknown Ant\n"));
+ }
+
+ if (TDMA_NAV_OFF == nav_en) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TDMA_NAV_OFF\n"));
+ } else if (TDMA_NAV_ON == nav_en) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TDMA_NAV_ON\n"));
+ h2c_parameter[0] |= BIT(2);
+ }
+
+ if (TDMA_DAC_SWING_OFF == dac_swing_en) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], TDMA_DAC_SWING_OFF\n"));
+ } else if(TDMA_DAC_SWING_ON == dac_swing_en) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], TDMA_DAC_SWING_ON\n"));
+ h2c_parameter[0] |= BIT(4);
+ }
+ rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], set BT PTA update manager to no update!!\n"));
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], turn TDMA mode OFF!!\n"));
+ }
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], FW2AntTDMA, write 0x26=0x%x\n", h2c_parameter1[0]));
+ rtl8821ae_fill_h2c_cmd(hw, 0x26, 1, h2c_parameter1);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], FW2AntTDMA, write 0x14=0x%x\n", h2c_parameter[0]));
+ rtl8821ae_fill_h2c_cmd(hw, 0x14, 1, h2c_parameter);
+
+ if (!b_enable) {
+ /* delay_ms(2);
+ * PlatformEFIOWrite1Byte(Adapter, 0x778, 0x1); */
+ }
+}
+
+
+void rtl8821ae_dm_bt_set_fw_ignore_wlan_act( struct ieee80211_hw *hw, bool b_enable)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ u8 h2c_parameter[1] ={0};
+
+ if (b_enable) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], BT Ignore Wlan_Act !!\n"));
+ h2c_parameter[0] |= BIT(0); // function enable
+ rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], BT don't ignore Wlan_Act !!\n"));
+ }
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], set FW for BT Ignore Wlan_Act, write 0x25=0x%x\n",
+ h2c_parameter[0]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0x25, 1, h2c_parameter);
+}
+
+
+void rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(struct ieee80211_hw *hw,
+ bool b_enable, u8 ant_num, u8 nav_en
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ //struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ u8 h2c_parameter[2] ={0};
+
+
+ if (b_enable) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], turn TTDMA mode ON!!\n"));
+ h2c_parameter[0] |= BIT(0); // function enable
+ if (TDMA_1ANT == ant_num) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TTDMA_1ANT\n"));
+ h2c_parameter[0] |= BIT(1);
+ } else if (TDMA_2ANT == ant_num) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TTDMA_2ANT\n"));
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], Unknown Ant\n"));
+ }
+
+ if (TDMA_NAV_OFF == nav_en) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TTDMA_NAV_OFF\n"));
+ } else if (TDMA_NAV_ON == nav_en) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TTDMA_NAV_ON\n"));
+ h2c_parameter[1] |= BIT(0);
+ }
+
+ rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], turn TTDMA mode OFF!!\n"));
+ }
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], FW Traditional TDMA, write 0x33=0x%x\n",
+ h2c_parameter[0] << 8| h2c_parameter[1]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0x33, 2, h2c_parameter);
+}
+
+
+void rtl8821ae_dm_bt_set_fw_dac_swing_level(struct ieee80211_hw *hw,
+ u8 dac_swing_lvl)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 h2c_parameter[1] ={0};
+ h2c_parameter[0] = dac_swing_lvl;
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl));
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], write 0x29=0x%x\n", h2c_parameter[0]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0x29, 1, h2c_parameter);
+}
+
+void rtl8821ae_dm_bt_set_fw_bt_hid_info(struct ieee80211_hw *hw, bool b_enable)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 h2c_parameter[1] ={0};
+ h2c_parameter[0] = 0;
+
+ if(b_enable){
+ h2c_parameter[0] |= BIT(0);
+ rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+ }
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], Set BT HID information=0x%x\n", b_enable));
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], write 0x24=0x%x\n", h2c_parameter[0]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0x24, 1, h2c_parameter);
+}
+
+void rtl8821ae_dm_bt_set_fw_bt_retry_index(struct ieee80211_hw *hw,
+ u8 retry_index)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 h2c_parameter[1] ={0};
+ h2c_parameter[0] = retry_index;
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], Set BT Retry Index=%d\n", retry_index));
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], write 0x23=0x%x\n", h2c_parameter[0]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0x23, 1, h2c_parameter);
+}
+
+void rtl8821ae_dm_bt_set_fw_wlan_act(struct ieee80211_hw *hw,
+ u8 wlan_act_hi, u8 wlan_act_lo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 h2c_parameter_hi[1] ={0};
+ u8 h2c_parameter_lo[1] ={0};
+ h2c_parameter_hi[0] = wlan_act_hi;
+ h2c_parameter_lo[0] = wlan_act_lo;
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], Set WLAN_ACT Hi:Lo=0x%x/0x%x\n", wlan_act_hi, wlan_act_lo));
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], write 0x22=0x%x\n", h2c_parameter_hi[0]));
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], write 0x11=0x%x\n", h2c_parameter_lo[0]));
+
+ /* WLAN_ACT = High duration, unit:ms */
+ rtl8821ae_fill_h2c_cmd(hw, 0x22, 1, h2c_parameter_hi);
+ /* WLAN_ACT = Low duration, unit:3*625us */
+ rtl8821ae_fill_h2c_cmd(hw, 0x11, 1, h2c_parameter_lo);
+}
+
+void rtl8821ae_dm_bt_set_bt_dm(struct ieee80211_hw *hw, struct btdm_8821ae *p_btdm)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct btdm_8821ae *p_btdm_8821ae = &hal_coex_8821ae.btdm;
+ u8 i;
+
+ bool b_fw_current_inpsmode = false;
+ bool b_fw_ps_awake = true;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *) (&b_fw_current_inpsmode));
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
+ (u8 *) (&b_fw_ps_awake));
+
+ // check new setting is different with the old one,
+ // if all the same, don't do the setting again.
+ if (memcmp(p_btdm_8821ae, p_btdm, sizeof(struct btdm_8821ae)) == 0) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], the same coexist setting, return!!\n"));
+ return;
+ } else { //save the new coexist setting
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], UPDATE TO NEW COEX SETTING!!\n"));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new bAllOff=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_all_off, p_btdm->b_all_off));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new b_agc_table_en=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_agc_table_en, p_btdm->b_agc_table_en));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new b_adc_back_off_on=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_adc_back_off_on, p_btdm->b_adc_back_off_on));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new b2_ant_hid_en=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b2_ant_hid_en, p_btdm->b2_ant_hid_en));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new bLowPenaltyRateAdaptive=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_low_penalty_rate_adaptive,
+ p_btdm->b_low_penalty_rate_adaptive));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new bRfRxLpfShrink=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_rf_rx_lpf_shrink, p_btdm->b_rf_rx_lpf_shrink));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new bRejectAggrePkt=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_reject_aggre_pkt, p_btdm->b_reject_aggre_pkt));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new b_tdma_on=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_tdma_on, p_btdm->b_tdma_on));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new tdmaAnt=0x%x/ 0x%x \n",
+ p_btdm_8821ae->tdma_ant, p_btdm->tdma_ant));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new tdmaNav=0x%x/ 0x%x \n",
+ p_btdm_8821ae->tdma_nav, p_btdm->tdma_nav));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new tdma_dac_swing=0x%x/ 0x%x \n",
+ p_btdm_8821ae->tdma_dac_swing, p_btdm->tdma_dac_swing));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new fw_dac_swing_lvl=0x%x/ 0x%x \n",
+ p_btdm_8821ae->fw_dac_swing_lvl, p_btdm->fw_dac_swing_lvl));
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new bTraTdmaOn=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_tra_tdma_on, p_btdm->b_tra_tdma_on));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new traTdmaAnt=0x%x/ 0x%x \n",
+ p_btdm_8821ae->tra_tdma_ant, p_btdm->tra_tdma_ant));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new traTdmaNav=0x%x/ 0x%x \n",
+ p_btdm_8821ae->tra_tdma_nav, p_btdm->tra_tdma_nav));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new bPsTdmaOn=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_ps_tdma_on, p_btdm->b_ps_tdma_on));
+ for(i=0; i<5; i++)
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new psTdmaByte[i]=0x%x/ 0x%x \n",
+ p_btdm_8821ae->ps_tdma_byte[i], p_btdm->ps_tdma_byte[i]));
+ }
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new bIgnoreWlanAct=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_ignore_wlan_act, p_btdm->b_ignore_wlan_act));
+
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new bPtaOn=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_pta_on, p_btdm->b_pta_on));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new val_0x6c0=0x%x/ 0x%x \n",
+ p_btdm_8821ae->val_0x6c0, p_btdm->val_0x6c0));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new val_0x6c8=0x%x/ 0x%x \n",
+ p_btdm_8821ae->val_0x6c8, p_btdm->val_0x6c8));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new val_0x6cc=0x%x/ 0x%x \n",
+ p_btdm_8821ae->val_0x6cc, p_btdm->val_0x6cc));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new b_sw_dac_swing_on=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_sw_dac_swing_on, p_btdm->b_sw_dac_swing_on));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new sw_dac_swing_lvl=0x%x/ 0x%x \n",
+ p_btdm_8821ae->sw_dac_swing_lvl, p_btdm->sw_dac_swing_lvl));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new wlanActHi=0x%x/ 0x%x \n",
+ p_btdm_8821ae->wlan_act_hi, p_btdm->wlan_act_hi));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new wlanActLo=0x%x/ 0x%x \n",
+ p_btdm_8821ae->wlan_act_lo, p_btdm->wlan_act_lo));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new btRetryIndex=0x%x/ 0x%x \n",
+ p_btdm_8821ae->bt_retry_index, p_btdm->bt_retry_index));
+
+ memcpy(p_btdm_8821ae, p_btdm, sizeof(struct btdm_8821ae));
+ }
+ /*
+ * Here we only consider when Bt Operation
+ * inquiry/paging/pairing is ON
+ * we only need to turn off TDMA */
+
+ if (rtlpcipriv->btcoexist.b_hold_for_bt_operation) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], set to ignore wlanAct for BT OP!!\n"));
+ rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, true);
+ return;
+ }
+
+ if (p_btdm->b_all_off) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], disable all coexist mechanism !!\n"));
+ rtl8821ae_btdm_coex_all_off(hw);
+ return;
+ }
+
+ rtl8821ae_dm_bt_reject_ap_aggregated_packet(hw, p_btdm->b_reject_aggre_pkt);
+
+ if(p_btdm->b_low_penalty_rate_adaptive)
+ rtl8821ae_dm_bt_set_sw_penalty_tx_rate_adaptive(hw,
+ BT_TX_RATE_ADAPTIVE_LOW_PENALTY);
+ else
+ rtl8821ae_dm_bt_set_sw_penalty_tx_rate_adaptive(hw,
+ BT_TX_RATE_ADAPTIVE_NORMAL);
+
+ if(p_btdm->b_rf_rx_lpf_shrink)
+ rtl8821ae_dm_bt_set_sw_rf_rx_lpf_corner(hw, BT_RF_RX_LPF_CORNER_SHRINK);
+ else
+ rtl8821ae_dm_bt_set_sw_rf_rx_lpf_corner(hw, BT_RF_RX_LPF_CORNER_RESUME);
+
+ if(p_btdm->b_agc_table_en)
+ rtl8821ae_dm_bt_agc_table(hw, BT_AGCTABLE_ON);
+ else
+ rtl8821ae_dm_bt_agc_table(hw, BT_AGCTABLE_OFF);
+
+ if(p_btdm->b_adc_back_off_on)
+ rtl8821ae_dm_bt_bb_back_off_level(hw, BT_BB_BACKOFF_ON);
+ else
+ rtl8821ae_dm_bt_bb_back_off_level(hw, BT_BB_BACKOFF_OFF);
+
+ rtl8821ae_dm_bt_set_fw_bt_retry_index(hw, p_btdm->bt_retry_index);
+
+ rtl8821ae_dm_bt_set_fw_dac_swing_level(hw, p_btdm->fw_dac_swing_lvl);
+ rtl8821ae_dm_bt_set_fw_wlan_act(hw, p_btdm->wlan_act_hi, p_btdm->wlan_act_lo);
+
+ rtl8821ae_dm_bt_set_coex_table(hw, p_btdm->val_0x6c0,
+ p_btdm->val_0x6c8, p_btdm->val_0x6cc);
+ rtl8821ae_dm_bt_set_hw_pta_mode(hw, p_btdm->b_pta_on);
+
+ /*
+ * Note: There is a constraint between TDMA and 2AntHID
+ * Only one of 2AntHid and tdma can be turn on
+ * We should turn off those mechanisms should be turned off first
+ * and then turn on those mechanisms should be turned on.
+ */
+#if 1
+ if(p_btdm->b2_ant_hid_en) {
+ // turn off tdma
+ rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on,
+ p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav);
+ rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, p_btdm->tdma_ant,
+ p_btdm->tdma_nav, p_btdm->tdma_dac_swing);
+
+ // turn off Pstdma
+ rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, p_btdm->b_ignore_wlan_act);
+ rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); // Antenna control by PTA, 0x870 = 0x300.
+
+ // turn on 2AntHid
+ rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, true);
+ rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, true, true);
+ } else if(p_btdm->b_tdma_on) {
+ // turn off 2AntHid
+ rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false);
+ rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+
+ // turn off pstdma
+ rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, p_btdm->b_ignore_wlan_act);
+ rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); // Antenna control by PTA, 0x870 = 0x300.
+
+ // turn on tdma
+ rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav);
+ rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, true, p_btdm->tdma_ant, p_btdm->tdma_nav, p_btdm->tdma_dac_swing);
+ } else if(p_btdm->b_ps_tdma_on) {
+ // turn off 2AntHid
+ rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false);
+ rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+
+ // turn off tdma
+ rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav);
+ rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, p_btdm->tdma_ant, p_btdm->tdma_nav, p_btdm->tdma_dac_swing);
+
+ // turn on pstdma
+ rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, p_btdm->b_ignore_wlan_act);
+ rtl8821ae_dm_bt_set_fw_3a(hw,
+ p_btdm->ps_tdma_byte[0],
+ p_btdm->ps_tdma_byte[1],
+ p_btdm->ps_tdma_byte[2],
+ p_btdm->ps_tdma_byte[3],
+ p_btdm->ps_tdma_byte[4]);
+ } else {
+ // turn off 2AntHid
+ rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false);
+ rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+
+ // turn off tdma
+ rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav);
+ rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, p_btdm->tdma_ant, p_btdm->tdma_nav, p_btdm->tdma_dac_swing);
+
+ // turn off pstdma
+ rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, p_btdm->b_ignore_wlan_act);
+ rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); // Antenna control by PTA, 0x870 = 0x300.
+ }
+#else
+ if (p_btdm->b_tdma_on) {
+ if(p_btdm->b_ps_tdma_on) {
+ } else {
+ rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0);
+ }
+ /* Turn off 2AntHID first then turn tdma ON */
+ rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false);
+ rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+ rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav);
+ rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, true,
+ p_btdm->tdma_ant, p_btdm->tdma_nav, p_btdm->tdma_dac_swing);
+ } else {
+ /* Turn off tdma first then turn 2AntHID ON if need */
+ rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav);
+ rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, p_btdm->tdma_ant,
+ p_btdm->tdma_nav, p_btdm->tdma_dac_swing);
+ if (p_btdm->b2_ant_hid_en) {
+ rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, true);
+ rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, true, true);
+ } else {
+ rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false);
+ rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+ }
+ if(p_btdm->b_ps_tdma_on) {
+ rtl8821ae_dm_bt_set_fw_3a(hw, p_btdm->ps_tdma_byte[0], p_btdm->ps_tdma_byte[1],
+ p_btdm->ps_tdma_byte[2], p_btdm->ps_tdma_byte[3], p_btdm->ps_tdma_byte[4]);
+ } else {
+ rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0);
+ }
+ }
+#endif
+
+ /*
+ * Note:
+ * We should add delay for making sure sw DacSwing can be set sucessfully.
+ * because of that rtl8821ae_dm_bt_set_fw_2_ant_hid() and rtl8821ae_dm_bt_set_fw_tdma_ctrl()
+ * will overwrite the reg 0x880.
+ */
+ mdelay(30);
+ rtl8821ae_dm_bt_set_sw_full_time_dac_swing(hw,
+ p_btdm->b_sw_dac_swing_on, p_btdm->sw_dac_swing_lvl);
+ rtl8821ae_dm_bt_set_fw_dec_bt_pwr(hw, p_btdm->b_dec_bt_pwr);
+}
+
+void rtl8821ae_dm_bt_bt_state_update_2_ant_hid(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], HID busy!!\n"));
+ rtlpcipriv->btcoexist.b_bt_busy = true;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_BT_IDLE;
+}
+
+void rtl8821ae_dm_bt_bt_state_update_2_ant_pan(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ bool b_idle = false;
+
+ if (hal_coex_8821ae.low_priority_tx >=
+ hal_coex_8821ae.low_priority_rx) {
+ if((hal_coex_8821ae.low_priority_tx/
+ hal_coex_8821ae.low_priority_rx) > 10) {
+ b_idle = true;
+ }
+ } else {
+ if((hal_coex_8821ae.low_priority_rx/
+ hal_coex_8821ae.low_priority_tx) > 10) {
+ b_idle = true;
+ }
+ }
+
+ if(!b_idle) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], PAN busy!!\n"));
+ rtlpcipriv->btcoexist.b_bt_busy = true;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_BT_IDLE;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], PAN idle!!\n"));
+ }
+}
+
+void rtl8821ae_dm_bt_2_ant_sco_action(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct btdm_8821ae btdm8821ae;
+ u8 bt_rssi_state;
+
+ rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae);
+ btdm8821ae.b_rf_rx_lpf_shrink = true;
+ btdm8821ae.b_low_penalty_rate_adaptive = true;
+ btdm8821ae.b_reject_aggre_pkt = false;
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT40\n"));
+ /* coex table */
+ btdm8821ae.val_0x6c0 = 0x5a5aaaaa;
+ btdm8821ae.val_0x6c8 = 0xcc;
+ btdm8821ae.val_0x6cc = 0x3;
+ /* sw mechanism */
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = true;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ /* fw mechanism */
+ btdm8821ae.b_tdma_on = false;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT20 or Legacy\n"));
+ bt_rssi_state
+ = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, BT_FW_COEX_THRESH_47, 0);
+
+ /* coex table */
+ btdm8821ae.val_0x6c0 = 0x5a5aaaaa;
+ btdm8821ae.val_0x6c8 = 0xcc;
+ btdm8821ae.val_0x6cc = 0x3;
+ /* sw mechanism */
+ if ((bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) {
+ btdm8821ae.b_agc_table_en = true;
+ btdm8821ae.b_adc_back_off_on = true;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ } else {
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ }
+ /* fw mechanism */
+ btdm8821ae.b_tdma_on = false;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+ }
+
+ if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) {
+ btdm8821ae.b_dec_bt_pwr = true;
+ }
+
+ if(rtl8821ae_dm_bt_is_coexist_state_changed(hw))
+ rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae);
+}
+
+void rtl8821ae_dm_bt_2_ant_hid_action(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct btdm_8821ae btdm8821ae;
+ u8 bt_rssi_state;
+
+ rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae);
+
+ btdm8821ae.b_rf_rx_lpf_shrink = true;
+ btdm8821ae.b_low_penalty_rate_adaptive = true;
+ btdm8821ae.b_reject_aggre_pkt = false;
+
+ // coex table
+ btdm8821ae.val_0x6c0 = 0x55555555;
+ btdm8821ae.val_0x6c8 = 0xffff;
+ btdm8821ae.val_0x6cc = 0x3;
+ btdm8821ae.b_ignore_wlan_act = true;
+
+ if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT40\n"));
+ // sw mechanism
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+
+ // fw mechanism
+ btdm8821ae.b_ps_tdma_on = true;
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xf;
+ btdm8821ae.ps_tdma_byte[2] = 0xf;
+ btdm8821ae.ps_tdma_byte[3] = 0x0;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+
+ btdm8821ae.b_tra_tdma_on = false;
+ btdm8821ae.b_tdma_on = false;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+ btdm8821ae.b2_ant_hid_en = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT20 or Legacy\n"));
+ bt_rssi_state =
+ rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, 47, 0);
+
+ if( (bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi high \n"));
+ // sw mechanism
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = true;
+ btdm8821ae.sw_dac_swing_lvl = 0x20;
+
+ // fw mechanism
+ btdm8821ae.b_ps_tdma_on = false;
+ btdm8821ae.b_tdma_on = false;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+ btdm8821ae.b2_ant_hid_en = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi low \n"));
+ // sw mechanism
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+
+ // fw mechanism
+ btdm8821ae.b_ps_tdma_on = false;
+ btdm8821ae.b_tdma_on = false;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+ btdm8821ae.b2_ant_hid_en = true;
+ btdm8821ae.fw_dac_swing_lvl = 0x20;
+ }
+ }
+
+ if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) {
+ btdm8821ae.b_dec_bt_pwr = true;
+ }
+
+ if (rtl8821ae_dm_bt_is_coexist_state_changed(hw)) {
+ rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae);
+ }
+}
+
+
+void rtl8821ae_dm_bt_2_ant_2_dp_action_no_profile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct btdm_8821ae btdm8821ae;
+ u8 bt_rssi_state;
+
+ rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae);
+
+ btdm8821ae.b_rf_rx_lpf_shrink = true;
+ btdm8821ae.b_low_penalty_rate_adaptive = true;
+ btdm8821ae.b_reject_aggre_pkt = false;
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("HT40\n"));
+ if (rtl8821ae_dm_bt_is_wifi_up_link(hw)) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi Uplink\n"));
+ /* coex table */
+ btdm8821ae.val_0x6c0 = 0x5a5a5a5a;
+ btdm8821ae.val_0x6c8 = 0xcccc;
+ btdm8821ae.val_0x6cc = 0x3;
+ // sw mechanism
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = true;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ // fw mechanism
+ btdm8821ae.b_tra_tdma_on = true;
+ btdm8821ae.b_tdma_on = true;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_ON;
+ btdm8821ae.b2_ant_hid_en = false;
+ //btSpec = BTHCI_GetBTCoreSpecByProf(Adapter, BT_PROFILE_A2DP);
+ //if(btSpec >= BT_SPEC_2_1_EDR)
+ {
+ btdm8821ae.wlan_act_hi = 0x10;
+ btdm8821ae.wlan_act_lo = 0x10;
+ }
+ //else
+ //{
+ //btdm8821ae.wlanActHi = 0x20;
+ //btdm8821ae.wlanActLo = 0x20;
+ //}
+ btdm8821ae.bt_retry_index = 2;
+ btdm8821ae.fw_dac_swing_lvl = 0x18;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi Downlink\n"));
+ // coex table
+ btdm8821ae.val_0x6c0 = 0x5a5a5a5a;
+ btdm8821ae.val_0x6c8 = 0xcc;
+ btdm8821ae.val_0x6cc = 0x3;
+ // sw mechanism
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = true;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ // fw mechanism
+ btdm8821ae.b_tra_tdma_on = true;
+ btdm8821ae.b_tdma_on = true;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_ON;
+ btdm8821ae.b2_ant_hid_en = false;
+ //btSpec = BTHCI_GetBTCoreSpecByProf(Adapter, BT_PROFILE_A2DP);
+ //if(btSpec >= BT_SPEC_2_1_EDR)
+ {
+ btdm8821ae.wlan_act_hi = 0x10;
+ btdm8821ae.wlan_act_lo = 0x10;
+ }
+ //else
+ //{
+ // btdm8821ae.wlanActHi = 0x20;
+ // btdm8821ae.wlanActLo = 0x20;
+ //}
+ btdm8821ae.bt_retry_index = 2;
+ btdm8821ae.fw_dac_swing_lvl = 0x40;
+ }
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("HT20 or Legacy\n"));
+ bt_rssi_state = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, BT_FW_COEX_THRESH_47, 0);
+
+ if(rtl8821ae_dm_bt_is_wifi_up_link(hw))
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi Uplink\n"));
+ // coex table
+ btdm8821ae.val_0x6c0 = 0x5a5a5a5a;
+ btdm8821ae.val_0x6c8 = 0xcccc;
+ btdm8821ae.val_0x6cc = 0x3;
+ // sw mechanism
+ if( (bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) )
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi rssi high \n"));
+ btdm8821ae.b_agc_table_en = true;
+ btdm8821ae.b_adc_back_off_on = true;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi rssi low \n"));
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ }
+ // fw mechanism
+ btdm8821ae.b_tra_tdma_on = true;
+ btdm8821ae.b_tdma_on = true;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_ON;
+ btdm8821ae.b2_ant_hid_en = false;
+ //btSpec = BTHCI_GetBTCoreSpecByProf(Adapter, BT_PROFILE_A2DP);
+ //if(btSpec >= BT_SPEC_2_1_EDR)
+ {
+ btdm8821ae.wlan_act_hi = 0x10;
+ btdm8821ae.wlan_act_lo = 0x10;
+ }
+ //else
+ //{
+ //btdm8821ae.wlanActHi = 0x20;
+ //btdm8821ae.wlanActLo = 0x20;
+ //}
+ btdm8821ae.bt_retry_index = 2;
+ btdm8821ae.fw_dac_swing_lvl = 0x18;
+ }
+ else
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi Downlink\n"));
+ // coex table
+ btdm8821ae.val_0x6c0 = 0x5a5a5a5a;
+ btdm8821ae.val_0x6c8 = 0xcc;
+ btdm8821ae.val_0x6cc = 0x3;
+ // sw mechanism
+ if( (bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) )
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi rssi high \n"));
+ btdm8821ae.b_agc_table_en = true;
+ btdm8821ae.b_adc_back_off_on = true;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ }
+ else
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi rssi low \n"));
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ }
+ // fw mechanism
+ btdm8821ae.b_tra_tdma_on = true;
+ btdm8821ae.b_tdma_on = true;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_ON;
+ btdm8821ae.b2_ant_hid_en = false;
+ //btSpec = BTHCI_GetBTCoreSpecByProf(Adapter, BT_PROFILE_A2DP);
+ //if(btSpec >= BT_SPEC_2_1_EDR)
+ {
+ btdm8821ae.wlan_act_hi = 0x10;
+ btdm8821ae.wlan_act_lo = 0x10;
+ }
+ //else
+ //{
+ //btdm8821ae.wlanActHi = 0x20;
+ //btdm8821ae.wlanActLo = 0x20;
+ //}
+ btdm8821ae.bt_retry_index = 2;
+ btdm8821ae.fw_dac_swing_lvl = 0x40;
+ }
+ }
+
+ if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) {
+ btdm8821ae.b_dec_bt_pwr = true;
+ }
+
+ if (rtl8821ae_dm_bt_is_coexist_state_changed(hw)) {
+ rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae);
+ }
+}
+
+
+//============================================================
+// extern function start with BTDM_
+//============================================================
+u32 rtl8821ae_dm_bt_tx_rx_couter_h(struct ieee80211_hw *hw)
+{
+ u32 counters=0;
+
+ counters = hal_coex_8821ae.high_priority_tx + hal_coex_8821ae.high_priority_rx ;
+ return counters;
+}
+
+u32 rtl8821ae_dm_bt_tx_rx_couter_l(struct ieee80211_hw *hw)
+{
+ u32 counters=0;
+
+ counters = hal_coex_8821ae.low_priority_tx + hal_coex_8821ae.low_priority_rx ;
+ return counters;
+}
+
+u8 rtl8821ae_dm_bt_bt_tx_rx_counter_level(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ u32 bt_tx_rx_cnt = 0;
+ u8 bt_tx_rx_cnt_lvl = 0;
+
+ bt_tx_rx_cnt = rtl8821ae_dm_bt_tx_rx_couter_h(hw)
+ + rtl8821ae_dm_bt_tx_rx_couter_l(hw);
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt));
+
+ rtlpcipriv->btcoexist.current_state_h &= ~\
+ (BT_COEX_STATE_BT_CNT_LEVEL_0 | BT_COEX_STATE_BT_CNT_LEVEL_1|
+ BT_COEX_STATE_BT_CNT_LEVEL_2);
+
+ if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_3) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], BT TxRx Counters at level 3\n"));
+ bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_3;
+ rtlpcipriv->btcoexist.current_state_h |= BT_COEX_STATE_BT_CNT_LEVEL_3;
+ } else if(bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_2) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], BT TxRx Counters at level 2\n"));
+ bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_2;
+ rtlpcipriv->btcoexist.current_state_h |= BT_COEX_STATE_BT_CNT_LEVEL_2;
+ } else if(bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], BT TxRx Counters at level 1\n"));
+ bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_1;
+ rtlpcipriv->btcoexist.current_state_h |= BT_COEX_STATE_BT_CNT_LEVEL_1;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], BT TxRx Counters at level 0\n"));
+ bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_0;
+ rtlpcipriv->btcoexist.current_state_h |= BT_COEX_STATE_BT_CNT_LEVEL_0;
+ }
+ return bt_tx_rx_cnt_lvl;
+}
+
+
+void rtl8821ae_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct btdm_8821ae btdm8821ae;
+
+ u8 bt_rssi_state, bt_rssi_state1;
+ u8 bt_tx_rx_cnt_lvl = 0;
+
+ rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae);
+
+
+ btdm8821ae.b_rf_rx_lpf_shrink = true;
+ btdm8821ae.b_low_penalty_rate_adaptive = true;
+ btdm8821ae.b_reject_aggre_pkt = false;
+
+ bt_tx_rx_cnt_lvl = rtl8821ae_dm_bt_bt_tx_rx_counter_level(hw);
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl));
+
+ if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40)
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT40\n"));
+ // coex table
+ btdm8821ae.val_0x6c0 = 0x55555555;
+ btdm8821ae.val_0x6c8 = 0xffff;
+ btdm8821ae.val_0x6cc = 0x3;
+
+ // sw mechanism
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+
+ // fw mechanism
+ btdm8821ae.b_ps_tdma_on = true;
+ if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0x5;
+ btdm8821ae.ps_tdma_byte[2] = 0x5;
+ btdm8821ae.ps_tdma_byte[3] = 0x2;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xa;
+ btdm8821ae.ps_tdma_byte[2] = 0xa;
+ btdm8821ae.ps_tdma_byte[3] = 0x2;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xf;
+ btdm8821ae.ps_tdma_byte[2] = 0xf;
+ btdm8821ae.ps_tdma_byte[3] = 0x2;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ }
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT20 or Legacy\n"));
+ bt_rssi_state = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, 47, 0);
+ bt_rssi_state1 = rtl8821ae_dm_bt_check_coex_rssi_state1(hw, 2, 27, 0);
+
+ // coex table
+ btdm8821ae.val_0x6c0 = 0x55555555;
+ btdm8821ae.val_0x6c8 = 0xffff;
+ btdm8821ae.val_0x6cc = 0x3;
+
+ // sw mechanism
+ if( (bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi high \n"));
+ btdm8821ae.b_agc_table_en = true;
+ btdm8821ae.b_adc_back_off_on = true;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi low \n"));
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ }
+
+ // fw mechanism
+ btdm8821ae.b_ps_tdma_on = true;
+ if( (bt_rssi_state1 == BT_RSSI_STATE_HIGH) ||
+ (bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH) ) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,("Wifi rssi-1 high \n"));
+ // only rssi high we need to do this,
+ // when rssi low, the value will modified by fw
+ rtl_write_byte(rtlpriv, 0x883, 0x40);
+ if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0x5;
+ btdm8821ae.ps_tdma_byte[2] = 0x5;
+ btdm8821ae.ps_tdma_byte[3] = 0x83;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xa;
+ btdm8821ae.ps_tdma_byte[2] = 0xa;
+ btdm8821ae.ps_tdma_byte[3] = 0x83;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xf;
+ btdm8821ae.ps_tdma_byte[2] = 0xf;
+ btdm8821ae.ps_tdma_byte[3] = 0x83;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ }
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi-1 low \n"));
+ if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2)
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0x5;
+ btdm8821ae.ps_tdma_byte[2] = 0x5;
+ btdm8821ae.ps_tdma_byte[3] = 0x2;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xa;
+ btdm8821ae.ps_tdma_byte[2] = 0xa;
+ btdm8821ae.ps_tdma_byte[3] = 0x2;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xf;
+ btdm8821ae.ps_tdma_byte[2] = 0xf;
+ btdm8821ae.ps_tdma_byte[3] = 0x2;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ }
+ }
+ }
+
+ if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) {
+ btdm8821ae.b_dec_bt_pwr = true;
+ }
+
+ // Always ignore WlanAct if bHid|bSCOBusy|bSCOeSCO
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n",
+ hal_coex_8821ae.bt_inq_page_start_time, bt_tx_rx_cnt_lvl));
+ if( (hal_coex_8821ae.bt_inq_page_start_time) ||
+ (BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl) ) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], Set BT inquiry / page scan 0x3a setting\n"));
+ btdm8821ae.b_ps_tdma_on = true;
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0x5;
+ btdm8821ae.ps_tdma_byte[2] = 0x5;
+ btdm8821ae.ps_tdma_byte[3] = 0x2;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ }
+
+ if(rtl8821ae_dm_bt_is_coexist_state_changed(hw)) {
+ rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae);
+ }
+}
+
+void rtl8821ae_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct btdm_8821ae btdm8821ae;
+
+ u8 bt_rssi_state, bt_rssi_state1;
+ u32 bt_tx_rx_cnt_lvl = 0;
+
+ rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae);
+
+ btdm8821ae.b_rf_rx_lpf_shrink = true;
+ btdm8821ae.b_low_penalty_rate_adaptive = true;
+ btdm8821ae.b_reject_aggre_pkt = false;
+
+ bt_tx_rx_cnt_lvl = rtl8821ae_dm_bt_bt_tx_rx_counter_level(hw);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl));
+
+ if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40)
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT40\n"));
+ bt_rssi_state = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, 37, 0);
+
+ // coex table
+ btdm8821ae.val_0x6c0 = 0x55555555;
+ btdm8821ae.val_0x6c8 = 0xffff;
+ btdm8821ae.val_0x6cc = 0x3;
+
+ // sw mechanism
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = true;
+ btdm8821ae.b_sw_dac_swing_on = false;
+
+ // fw mechanism
+ btdm8821ae.b_ps_tdma_on = true;
+ if ((bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi high \n"));
+ if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0x5;
+ btdm8821ae.ps_tdma_byte[2] = 0x5;
+ btdm8821ae.ps_tdma_byte[3] = 0x81;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xa;
+ btdm8821ae.ps_tdma_byte[2] = 0xa;
+ btdm8821ae.ps_tdma_byte[3] = 0x81;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xf;
+ btdm8821ae.ps_tdma_byte[2] = 0xf;
+ btdm8821ae.ps_tdma_byte[3] = 0x81;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ }
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi low \n"));
+ if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0x5;
+ btdm8821ae.ps_tdma_byte[2] = 0x5;
+ btdm8821ae.ps_tdma_byte[3] = 0x0;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xa;
+ btdm8821ae.ps_tdma_byte[2] = 0xa;
+ btdm8821ae.ps_tdma_byte[3] = 0x0;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xf;
+ btdm8821ae.ps_tdma_byte[2] = 0xf;
+ btdm8821ae.ps_tdma_byte[3] = 0x0;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ }
+ }
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT20 or Legacy\n"));
+ bt_rssi_state = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, 47, 0);
+ bt_rssi_state1 = rtl8821ae_dm_bt_check_coex_rssi_state1(hw, 2, 27, 0);
+
+ // coex table
+ btdm8821ae.val_0x6c0 = 0x55555555;
+ btdm8821ae.val_0x6c8 = 0xffff;
+ btdm8821ae.val_0x6cc = 0x3;
+
+ // sw mechanism
+ if( (bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi high \n"));
+ btdm8821ae.b_agc_table_en = true;
+ btdm8821ae.b_adc_back_off_on = true;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi low \n"));
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ }
+
+ // fw mechanism
+ btdm8821ae.b_ps_tdma_on = true;
+ if( (bt_rssi_state1 == BT_RSSI_STATE_HIGH) ||
+ (bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH) ) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi-1 high \n"));
+ // only rssi high we need to do this,
+ // when rssi low, the value will modified by fw
+ rtl_write_byte(rtlpriv, 0x883, 0x40);
+ if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0x5;
+ btdm8821ae.ps_tdma_byte[2] = 0x5;
+ btdm8821ae.ps_tdma_byte[3] = 0x81;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xa;
+ btdm8821ae.ps_tdma_byte[2] = 0xa;
+ btdm8821ae.ps_tdma_byte[3] = 0x81;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xf;
+ btdm8821ae.ps_tdma_byte[2] = 0xf;
+ btdm8821ae.ps_tdma_byte[3] = 0x81;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ }
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi-1 low \n"));
+ if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0x5;
+ btdm8821ae.ps_tdma_byte[2] = 0x5;
+ btdm8821ae.ps_tdma_byte[3] = 0x0;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xa;
+ btdm8821ae.ps_tdma_byte[2] = 0xa;
+ btdm8821ae.ps_tdma_byte[3] = 0x0;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xf;
+ btdm8821ae.ps_tdma_byte[2] = 0xf;
+ btdm8821ae.ps_tdma_byte[3] = 0x0;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ }
+ }
+ }
+
+ if(rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) {
+ btdm8821ae.b_dec_bt_pwr = true;
+ }
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n",
+ hal_coex_8821ae.bt_inq_page_start_time, bt_tx_rx_cnt_lvl));
+
+ if( (hal_coex_8821ae.bt_inq_page_start_time) ||
+ (BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl) )
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], Set BT inquiry / page scan 0x3a setting\n"));
+ btdm8821ae.b_ps_tdma_on = true;
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0x5;
+ btdm8821ae.ps_tdma_byte[2] = 0x5;
+ btdm8821ae.ps_tdma_byte[3] = 0x83;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ }
+
+ if(rtl8821ae_dm_bt_is_coexist_state_changed(hw)){
+ rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae);
+ }
+}
+
+void rtl8821ae_dm_bt_inq_page_monitor(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 cur_time;
+ cur_time = jiffies;
+ if (hal_coex_8821ae.b_c2h_bt_inquiry_page) {
+ //pHalData->btcoexist.halCoex8821ae.btInquiryPageCnt++;
+ // bt inquiry or page is started.
+ if(hal_coex_8821ae.bt_inq_page_start_time == 0){
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BT_INQ_PAGE;
+ hal_coex_8821ae.bt_inq_page_start_time = cur_time;
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], BT Inquiry/page is started at time : 0x%x \n",
+ hal_coex_8821ae.bt_inq_page_start_time));
+ }
+ }
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], BT Inquiry/page started time : 0x%x, cur_time : 0x%x \n",
+ hal_coex_8821ae.bt_inq_page_start_time, cur_time));
+
+ if (hal_coex_8821ae.bt_inq_page_start_time) {
+ if ((((long)cur_time - (long)hal_coex_8821ae.bt_inq_page_start_time) / HZ) >= 10) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT Inquiry/page >= 10sec!!!"));
+ hal_coex_8821ae.bt_inq_page_start_time = 0;
+ rtlpcipriv->btcoexist.current_state &=~ BT_COEX_STATE_BT_INQ_PAGE;
+ }
+ }
+
+#if 0
+ if (hal_coex_8821ae.b_c2h_bt_inquiry_page) {
+ hal_coex_8821ae.b_c2h_bt_inquiry_page++;
+ // bt inquiry or page is started.
+ } if(hal_coex_8821ae.b_c2h_bt_inquiry_page) {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BT_INQ_PAGE;
+ if(hal_coex_8821ae.bt_inquiry_page_cnt >= 4)
+ hal_coex_8821ae.bt_inquiry_page_cnt = 0;
+ hal_coex_8821ae.bt_inquiry_page_cnt++;
+ } else {
+ rtlpcipriv->btcoexist.current_state &=~ BT_COEX_STATE_BT_INQ_PAGE;
+ }
+#endif
+}
+
+void rtl8821ae_dm_bt_reset_action_profile_state(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+ rtlpcipriv->btcoexist.current_state &= ~\
+ (BT_COEX_STATE_PROFILE_HID | BT_COEX_STATE_PROFILE_A2DP|
+ BT_COEX_STATE_PROFILE_PAN | BT_COEX_STATE_PROFILE_SCO);
+
+ rtlpcipriv->btcoexist.current_state &= ~\
+ (BT_COEX_STATE_BTINFO_COMMON | BT_COEX_STATE_BTINFO_B_HID_SCOESCO|
+ BT_COEX_STATE_BTINFO_B_FTP_A2DP);
+}
+
+void _rtl8821ae_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ u8 bt_retry_cnt;
+ u8 bt_info_original;
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex] Get bt info by fw!!\n"));
+
+ _rtl8821ae_dm_bt_check_wifi_state(hw);
+
+ if (hal_coex_8821ae.b_c2h_bt_info_req_sent) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex] c2h for bt_info not rcvd yet!!\n"));
+ }
+
+ bt_retry_cnt = hal_coex_8821ae.bt_retry_cnt;
+ bt_info_original = hal_coex_8821ae.c2h_bt_info_original;
+
+ // when bt inquiry or page scan, we have to set h2c 0x25
+ // ignore wlanact for continuous 4x2secs
+ rtl8821ae_dm_bt_inq_page_monitor(hw);
+ rtl8821ae_dm_bt_reset_action_profile_state(hw);
+
+ if(rtl8821ae_dm_bt_is_2_ant_common_action(hw)) {
+ rtlpcipriv->btcoexist.bt_profile_case = BT_COEX_MECH_COMMON;
+ rtlpcipriv->btcoexist.bt_profile_action= BT_COEX_MECH_COMMON;
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Action 2-Ant common.\n"));
+ } else {
+ if( (bt_info_original & BTINFO_B_HID) ||
+ (bt_info_original & BTINFO_B_SCO_BUSY) ||
+ (bt_info_original & BTINFO_B_SCO_ESCO) ) {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BTINFO_B_HID_SCOESCO;
+ rtlpcipriv->btcoexist.bt_profile_case = BT_COEX_MECH_HID_SCO_ESCO;
+ rtlpcipriv->btcoexist.bt_profile_action = BT_COEX_MECH_HID_SCO_ESCO;
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BTInfo: bHid|bSCOBusy|bSCOeSCO\n"));
+ rtl8821ae_dm_bt_2_ant_hid_sco_esco(hw);
+ } else if( (bt_info_original & BTINFO_B_FTP) ||
+ (bt_info_original & BTINFO_B_A2DP) ) {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BTINFO_B_FTP_A2DP;
+ rtlpcipriv->btcoexist.bt_profile_case = BT_COEX_MECH_FTP_A2DP;
+ rtlpcipriv->btcoexist.bt_profile_action = BT_COEX_MECH_FTP_A2DP;
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("BTInfo: bFTP|bA2DP\n"));
+ rtl8821ae_dm_bt_2_ant_ftp_a2dp(hw);
+ } else {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BTINFO_B_HID_SCOESCO;
+ rtlpcipriv->btcoexist.bt_profile_case = BT_COEX_MECH_NONE;
+ rtlpcipriv->btcoexist.bt_profile_action= BT_COEX_MECH_NONE;
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BTInfo: undefined case!!!!\n"));
+ rtl8821ae_dm_bt_2_ant_hid_sco_esco(hw);
+ }
+ }
+}
+
+void _rtl8821ae_dm_bt_coexist_1_ant(struct ieee80211_hw *hw)
+{
+ return;
+}
+
+void rtl8821ae_dm_bt_hw_coex_all_off_8723a(struct ieee80211_hw *hw)
+{
+ rtl8821ae_dm_bt_set_coex_table(hw, 0x5a5aaaaa, 0xcc, 0x3);
+ rtl8821ae_dm_bt_set_hw_pta_mode(hw, true);
+}
+
+void rtl8821ae_dm_bt_fw_coex_all_off_8723a(struct ieee80211_hw *hw)
+{
+ rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, false);
+ rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0);
+ rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+ rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, false, TDMA_2ANT, TDMA_NAV_OFF);
+ rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, TDMA_2ANT,
+ TDMA_NAV_OFF, TDMA_DAC_SWING_OFF);
+ rtl8821ae_dm_bt_set_fw_dac_swing_level(hw, 0);
+ rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false);
+ rtl8821ae_dm_bt_set_fw_bt_retry_index(hw, 2);
+ rtl8821ae_dm_bt_set_fw_wlan_act(hw, 0x10, 0x10);
+ rtl8821ae_dm_bt_set_fw_dec_bt_pwr(hw, false);
+}
+
+void rtl8821ae_dm_bt_sw_coex_all_off_8723a(struct ieee80211_hw *hw)
+{
+ rtl8821ae_dm_bt_agc_table(hw, BT_AGCTABLE_OFF);
+ rtl8821ae_dm_bt_bb_back_off_level(hw, BT_BB_BACKOFF_OFF);
+ rtl8821ae_dm_bt_reject_ap_aggregated_packet(hw, false);
+
+ rtl8821ae_dm_bt_set_sw_penalty_tx_rate_adaptive(hw,
+ BT_TX_RATE_ADAPTIVE_NORMAL);
+ rtl8821ae_dm_bt_set_sw_rf_rx_lpf_corner(hw, BT_RF_RX_LPF_CORNER_RESUME);
+ rtl8821ae_dm_bt_set_sw_full_time_dac_swing(hw, false, 0xc0);
+}
+
+void rtl8821ae_dm_bt_query_bt_information(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 h2c_parameter[1] = {0};
+
+ hal_coex_8821ae.b_c2h_bt_info_req_sent = true;
+
+ h2c_parameter[0] |= BIT(0);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("Query Bt information, write 0x38=0x%x\n", h2c_parameter[0]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0x38, 1, h2c_parameter);
+}
+
+void rtl8821ae_dm_bt_bt_hw_counters_monitor(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ u32 reg_hp_tx_rx, reg_lp_tx_rx, u32_tmp;
+ u32 reg_hp_tx=0, reg_hp_rx=0, reg_lp_tx=0, reg_lp_rx=0;
+
+ reg_hp_tx_rx = REG_HIGH_PRIORITY_TXRX;
+ reg_lp_tx_rx = REG_LOW_PRIORITY_TXRX;
+
+ u32_tmp = rtl_read_dword(rtlpriv, reg_hp_tx_rx);
+ reg_hp_tx = u32_tmp & MASKLWORD;
+ reg_hp_rx = (u32_tmp & MASKHWORD)>>16;
+
+ u32_tmp = rtl_read_dword(rtlpriv, reg_lp_tx_rx);
+ reg_lp_tx = u32_tmp & MASKLWORD;
+ reg_lp_rx = (u32_tmp & MASKHWORD)>>16;
+
+ if(rtlpcipriv->btcoexist.lps_counter > 1) {
+ reg_hp_tx %= rtlpcipriv->btcoexist.lps_counter;
+ reg_hp_rx %= rtlpcipriv->btcoexist.lps_counter;
+ reg_lp_tx %= rtlpcipriv->btcoexist.lps_counter;
+ reg_lp_rx %= rtlpcipriv->btcoexist.lps_counter;
+ }
+
+ hal_coex_8821ae.high_priority_tx = reg_hp_tx;
+ hal_coex_8821ae.high_priority_rx = reg_hp_rx;
+ hal_coex_8821ae.low_priority_tx = reg_lp_tx;
+ hal_coex_8821ae.low_priority_rx = reg_lp_rx;
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("High Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n",
+ reg_hp_tx_rx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("Low Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n",
+ reg_lp_tx_rx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx));
+ rtlpcipriv->btcoexist.lps_counter = 0;
+ //rtl_write_byte(rtlpriv, 0x76e, 0xc);
+}
+
+void rtl8821ae_dm_bt_bt_enable_disable_check(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ bool bt_alife = true;
+
+ if (hal_coex_8821ae.high_priority_tx == 0 &&
+ hal_coex_8821ae.high_priority_rx == 0 &&
+ hal_coex_8821ae.low_priority_tx == 0 &&
+ hal_coex_8821ae.low_priority_rx == 0) {
+ bt_alife = false;
+ }
+ if (hal_coex_8821ae.high_priority_tx == 0xeaea &&
+ hal_coex_8821ae.high_priority_rx == 0xeaea &&
+ hal_coex_8821ae.low_priority_tx == 0xeaea &&
+ hal_coex_8821ae.low_priority_rx == 0xeaea) {
+ bt_alife = false;
+ }
+ if (hal_coex_8821ae.high_priority_tx == 0xffff &&
+ hal_coex_8821ae.high_priority_rx == 0xffff &&
+ hal_coex_8821ae.low_priority_tx == 0xffff &&
+ hal_coex_8821ae.low_priority_rx == 0xffff) {
+ bt_alife = false;
+ }
+ if (bt_alife) {
+ rtlpcipriv->btcoexist.bt_active_zero_cnt = 0;
+ rtlpcipriv->btcoexist.b_cur_bt_disabled = false;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("8821AE BT is enabled !!\n"));
+ } else {
+ rtlpcipriv->btcoexist.bt_active_zero_cnt++;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("8821AE bt all counters=0, %d times!!\n",
+ rtlpcipriv->btcoexist.bt_active_zero_cnt));
+ if (rtlpcipriv->btcoexist.bt_active_zero_cnt >= 2) {
+ rtlpcipriv->btcoexist.b_cur_bt_disabled = true;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("8821AE BT is disabled !!\n"));
+ }
+ }
+ if (rtlpcipriv->btcoexist.b_pre_bt_disabled !=
+ rtlpcipriv->btcoexist.b_cur_bt_disabled) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("8821AE BT is from %s to %s!!\n",
+ (rtlpcipriv->btcoexist.b_pre_bt_disabled ? "disabled":"enabled"),
+ (rtlpcipriv->btcoexist.b_cur_bt_disabled ? "disabled":"enabled")));
+ rtlpcipriv->btcoexist.b_pre_bt_disabled
+ = rtlpcipriv->btcoexist.b_cur_bt_disabled;
+ }
+}
+
+
+void rtl8821ae_dm_bt_coexist(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+ rtl8821ae_dm_bt_query_bt_information(hw);
+ rtl8821ae_dm_bt_bt_hw_counters_monitor(hw);
+ rtl8821ae_dm_bt_bt_enable_disable_check(hw);
+
+ if (rtlpcipriv->btcoexist.bt_ant_num == ANT_X2) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], 2 Ant mechanism\n"));
+ _rtl8821ae_dm_bt_coexist_2_ant(hw);
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], 1 Ant mechanism\n"));
+ _rtl8821ae_dm_bt_coexist_1_ant(hw);
+ }
+
+ if (!rtl8821ae_dm_bt_is_same_coexist_state(hw)) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], Coexist State[bitMap] change from 0x%x%8x to 0x%x%8x\n",
+ rtlpcipriv->btcoexist.previous_state_h,
+ rtlpcipriv->btcoexist.previous_state,
+ rtlpcipriv->btcoexist.current_state_h,
+ rtlpcipriv->btcoexist.current_state));
+ rtlpcipriv->btcoexist.previous_state
+ = rtlpcipriv->btcoexist.current_state;
+ rtlpcipriv->btcoexist.previous_state_h
+ = rtlpcipriv->btcoexist.current_state_h;
+ }
+}
+
+void rtl8821ae_dm_bt_parse_bt_info(struct ieee80211_hw *hw, u8 * tmp_buf, u8 len)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ u8 bt_info;
+ u8 i;
+
+ hal_coex_8821ae.b_c2h_bt_info_req_sent = false;
+ hal_coex_8821ae.bt_retry_cnt = 0;
+ for (i = 0; i < len; i++) {
+ if (i == 0) {
+ hal_coex_8821ae.c2h_bt_info_original = tmp_buf[i];
+ } else if (i == 1) {
+ hal_coex_8821ae.bt_retry_cnt = tmp_buf[i];
+ }
+ if(i == len-1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("0x%2x]", tmp_buf[i]));
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("0x%2x, ", tmp_buf[i]));
+ }
+ }
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("BT info bt_info (Data)= 0x%x\n",hal_coex_8821ae.c2h_bt_info_original));
+ bt_info = hal_coex_8821ae.c2h_bt_info_original;
+
+ if(bt_info & BIT(2)){
+ hal_coex_8821ae.b_c2h_bt_inquiry_page = true;
+ } else {
+ hal_coex_8821ae.b_c2h_bt_inquiry_page = false;
+ }
+
+ if (bt_info & BTINFO_B_CONNECTION) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTC2H], BTInfo: bConnect=true\n"));
+ rtlpcipriv->btcoexist.b_bt_busy = true;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_BT_IDLE;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTC2H], BTInfo: bConnect=false\n"));
+ rtlpcipriv->btcoexist.b_bt_busy = false;
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BT_IDLE;
+ }
+}
+void rtl_8821ae_c2h_command_handle(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct c2h_evt_hdr c2h_event;
+ u8 * ptmp_buf = NULL;
+ u8 index = 0;
+ u8 u1b_tmp = 0;
+ memset(&c2h_event, 0, sizeof(c2h_event));
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL);
+ RT_TRACE(COMP_FW, DBG_DMESG,
+ ("&&&&&&: REG_C2HEVT_MSG_NORMAL is 0x%x\n", u1b_tmp));
+ c2h_event.cmd_id = u1b_tmp & 0xF;
+ c2h_event.cmd_len = (u1b_tmp & 0xF0) >> 4;
+ c2h_event.cmd_seq = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL + 1);
+ RT_TRACE(COMP_FW, DBG_DMESG, ("cmd_id: %d, cmd_len: %d, cmd_seq: %d\n",
+ c2h_event.cmd_id , c2h_event.cmd_len, c2h_event.cmd_seq));
+ u1b_tmp = rtl_read_byte(rtlpriv, 0x01AF);
+ if (u1b_tmp == C2H_EVT_HOST_CLOSE) {
+ return;
+ } else if (u1b_tmp != C2H_EVT_FW_CLOSE) {
+ rtl_write_byte(rtlpriv, 0x1AF, 0x00);
+ return;
+ }
+ ptmp_buf = (u8 *) kmalloc(c2h_event.cmd_len, GFP_KERNEL);
+ if(ptmp_buf == NULL) {
+ RT_TRACE(COMP_FW, DBG_TRACE, ("malloc cmd buf failed\n"));
+ return;
+ }
+
+ /* Read the content */
+ for (index = 0; index < c2h_event.cmd_len; index ++) {
+ ptmp_buf[index] = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL + 2+ index);
+ }
+
+ switch(c2h_event.cmd_id) {
+ case C2H_BT_RSSI:
+ break;
+
+ case C2H_BT_OP_MODE:
+ break;
+
+ case BT_INFO:
+ RT_TRACE(COMP_FW, DBG_TRACE,
+ ("BT info Byte[0] (ID) is 0x%x\n", c2h_event.cmd_id));
+ RT_TRACE(COMP_FW, DBG_TRACE,
+ ("BT info Byte[1] (Seq) is 0x%x\n", c2h_event.cmd_seq));
+ RT_TRACE(COMP_FW, DBG_TRACE,
+ ("BT info Byte[2] (Data)= 0x%x\n", ptmp_buf[0]));
+
+ if (rtlpriv->cfg->ops->get_btc_status()){
+ rtlpriv->btcoexist.btc_ops->btc_btinfo_notify(rtlpriv, ptmp_buf, c2h_event.cmd_len);
+ }
+ break;
+ default:
+ break;
+ }
+
+ if(ptmp_buf)
+ kfree(ptmp_buf);
+
+ rtl_write_byte(rtlpriv, 0x01AF, C2H_EVT_HOST_CLOSE);
+}
+
+
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.h b/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.h
new file mode 100644
index 000000000000..a94474faca49
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.h
@@ -0,0 +1,160 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_HAL_BTC_H__
+#define __RTL8821AE_HAL_BTC_H__
+
+#include "../wifi.h"
+#include "btc.h"
+#include "hal_bt_coexist.h"
+
+#define BT_TXRX_CNT_THRES_1 1200
+#define BT_TXRX_CNT_THRES_2 1400
+#define BT_TXRX_CNT_THRES_3 3000
+#define BT_TXRX_CNT_LEVEL_0 0 // < 1200
+#define BT_TXRX_CNT_LEVEL_1 1 // >= 1200 && < 1400
+#define BT_TXRX_CNT_LEVEL_2 2 // >= 1400
+#define BT_TXRX_CNT_LEVEL_3 3
+
+
+
+#define BT_COEX_DISABLE 0
+#define BT_Q_PKT_OFF 0
+#define BT_Q_PKT_ON 1
+
+#define BT_TX_PWR_OFF 0
+#define BT_TX_PWR_ON 1
+
+/* TDMA mode definition */
+#define TDMA_2ANT 0
+#define TDMA_1ANT 1
+#define TDMA_NAV_OFF 0
+#define TDMA_NAV_ON 1
+#define TDMA_DAC_SWING_OFF 0
+#define TDMA_DAC_SWING_ON 1
+
+/* PTA mode related definition */
+#define BT_PTA_MODE_OFF 0
+#define BT_PTA_MODE_ON 1
+
+/* Penalty Tx Rate Adaptive */
+#define BT_TX_RATE_ADAPTIVE_NORMAL 0
+#define BT_TX_RATE_ADAPTIVE_LOW_PENALTY 1
+
+/* RF Corner */
+#define BT_RF_RX_LPF_CORNER_RESUME 0
+#define BT_RF_RX_LPF_CORNER_SHRINK 1
+
+#define C2H_EVT_HOST_CLOSE 0x00
+#define C2H_EVT_FW_CLOSE 0xFF
+
+enum bt_traffic_mode {
+ BT_MOTOR_EXT_BE = 0x00,
+ BT_MOTOR_EXT_GUL = 0x01,
+ BT_MOTOR_EXT_GUB = 0x02,
+ BT_MOTOR_EXT_GULB = 0x03
+};
+
+enum bt_traffic_mode_profile {
+ BT_PROFILE_NONE,
+ BT_PROFILE_A2DP,
+ BT_PROFILE_PAN,
+ BT_PROFILE_HID,
+ BT_PROFILE_SCO
+};
+
+enum hci_ext_bt_operation {
+ HCI_BT_OP_NONE = 0x0,
+ HCI_BT_OP_INQUIRE_START = 0x1,
+ HCI_BT_OP_INQUIRE_FINISH = 0x2,
+ HCI_BT_OP_PAGING_START = 0x3,
+ HCI_BT_OP_PAGING_SUCCESS = 0x4,
+ HCI_BT_OP_PAGING_UNSUCCESS = 0x5,
+ HCI_BT_OP_PAIRING_START = 0x6,
+ HCI_BT_OP_PAIRING_FINISH = 0x7,
+ HCI_BT_OP_BT_DEV_ENABLE = 0x8,
+ HCI_BT_OP_BT_DEV_DISABLE = 0x9,
+ HCI_BT_OP_MAX,
+};
+
+enum bt_spec {
+ BT_SPEC_1_0_b = 0x00,
+ BT_SPEC_1_1 = 0x01,
+ BT_SPEC_1_2 = 0x02,
+ BT_SPEC_2_0_EDR = 0x03,
+ BT_SPEC_2_1_EDR = 0x04,
+ BT_SPEC_3_0_HS = 0x05,
+ BT_SPEC_4_0 = 0x06
+};
+
+struct c2h_evt_hdr {
+ u8 cmd_id;
+ u8 cmd_len;
+ u8 cmd_seq;
+};
+
+enum bt_state{
+ BT_INFO_STATE_DISABLED = 0,
+ BT_INFO_STATE_NO_CONNECTION = 1,
+ BT_INFO_STATE_CONNECT_IDLE = 2,
+ BT_INFO_STATE_INQ_OR_PAG = 3,
+ BT_INFO_STATE_ACL_ONLY_BUSY = 4,
+ BT_INFO_STATE_SCO_ONLY_BUSY = 5,
+ BT_INFO_STATE_ACL_SCO_BUSY = 6,
+ BT_INFO_STATE_HID_BUSY = 7,
+ BT_INFO_STATE_HID_SCO_BUSY = 8,
+ BT_INFO_STATE_MAX = 7
+};
+
+enum rtl8723be_c2h_evt {
+ C2H_DBG = 0,
+ C2H_TSF = 1,
+ C2H_AP_RPT_RSP = 2,
+ C2H_CCX_TX_RPT = 3, // The FW notify the report of the specific tx packet.
+ C2H_BT_RSSI = 4,
+ C2H_BT_OP_MODE = 5,
+ C2H_HW_INFO_EXCH = 10,
+ C2H_C2H_H2C_TEST = 11,
+ BT_INFO = 9,
+ MAX_C2HEVENT
+};
+
+
+
+void rtl8821ae_dm_bt_fw_coex_all_off_8723a(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_sw_coex_all_off_8723a(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_hw_coex_all_off_8723a(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_coexist(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_set_bt_dm(struct ieee80211_hw *hw, struct btdm_8821ae *p_btdm);
+void rtl_8821ae_c2h_command_handle(struct ieee80211_hw * hw);
+void rtl_8821ae_bt_wifi_media_status_notify(struct ieee80211_hw * hw, bool mstatus);
+void rtl8821ae_dm_bt_turn_off_bt_coexist_before_enter_lps(struct ieee80211_hw *hw);
+
+
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hw.c b/drivers/staging/rtl8821ae/rtl8821ae/hw.c
new file mode 100644
index 000000000000..5ed7a114c56b
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hw.c
@@ -0,0 +1,3346 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../regd.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "fw.h"
+#include "led.h"
+#include "hw.h"
+#include "pwrseqcmd.h"
+#include "pwrseq.h"
+#include "btc.h"
+#include "../btcoexist/rtl_btc.h"
+
+#define LLT_CONFIG 5
+
+static void _rtl8821ae_return_beacon_queue_skb(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+ while (skb_queue_len(&ring->queue)) {
+ struct rtl_tx_desc *entry = &ring->desc[ring->idx];
+ struct sk_buff *skb = __skb_dequeue(&ring->queue);
+
+ pci_unmap_single(rtlpci->pdev,
+ le32_to_cpu(rtlpriv->cfg->ops->get_desc(
+ (u8 *) entry, true, HW_DESC_TXBUFF_ADDR)),
+ skb->len, PCI_DMA_TODEVICE);
+ kfree_skb(skb);
+ ring->idx = (ring->idx + 1) % ring->entries;
+ }
+
+}
+
+static void _rtl8821ae_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+ u8 set_bits, u8 clear_bits)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpci->reg_bcn_ctrl_val |= set_bits;
+ rtlpci->reg_bcn_ctrl_val &= ~clear_bits;
+
+ rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
+}
+
+void _rtl8821ae_stop_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp1byte;
+
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6)));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte &= ~(BIT(0));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+
+void _rtl8821ae_resume_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp1byte;
+
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte |= BIT(0);
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+
+static void _rtl8821ae_enable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(1));
+}
+
+static void _rtl8821ae_disable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(1), 0);
+}
+
+static void _rtl8821ae_set_fw_clock_on(struct ieee80211_hw *hw,
+ u8 rpwm_val, bool b_need_turn_off_ckk)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool b_support_remote_wake_up;
+ u32 count = 0,isr_regaddr,content;
+ bool b_schedule_timer = b_need_turn_off_ckk;
+ rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN,
+ (u8 *) (&b_support_remote_wake_up));
+
+ if (!rtlhal->bfw_ready)
+ return;
+ if (!rtlpriv->psc.b_fw_current_inpsmode)
+ return;
+
+ while (1) {
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ if (rtlhal->bfw_clk_change_in_progress) {
+ while (rtlhal->bfw_clk_change_in_progress) {
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ count++;
+ udelay(100);
+ if (count > 1000)
+ return;
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ }
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ } else {
+ rtlhal->bfw_clk_change_in_progress = false;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ }
+ }
+
+ if (IS_IN_LOW_POWER_STATE_8821AE(rtlhal->fw_ps_state)) {
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM,
+ (u8 *) (&rpwm_val));
+ if (FW_PS_IS_ACK(rpwm_val)) {
+ isr_regaddr = REG_HISR;
+ content = rtl_read_dword(rtlpriv, isr_regaddr);
+ while (!(content & IMR_CPWM) && (count < 500)) {
+ udelay(50);
+ count++;
+ content = rtl_read_dword(rtlpriv, isr_regaddr);
+ }
+
+ if (content & IMR_CPWM) {
+ rtl_write_word(rtlpriv,isr_regaddr, 0x0100);
+ rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_8821AE;
+ RT_TRACE(COMP_POWER, DBG_LOUD, ("Receive CPWM INT!!! Set pHalData->FwPSState = %X\n", rtlhal->fw_ps_state));
+ }
+ }
+
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ rtlhal->bfw_clk_change_in_progress = false;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ if (b_schedule_timer) {
+ mod_timer(&rtlpriv->works.fw_clockoff_timer,
+ jiffies + MSECS(10));
+ }
+
+ } else {
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ rtlhal->bfw_clk_change_in_progress = false;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ }
+
+
+}
+
+static void _rtl8821ae_set_fw_clock_off(struct ieee80211_hw *hw,
+ u8 rpwm_val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring;
+ enum rf_pwrstate rtstate;
+ bool b_schedule_timer = false;
+ u8 queue;
+
+ if (!rtlhal->bfw_ready)
+ return;
+ if (!rtlpriv->psc.b_fw_current_inpsmode)
+ return;
+ if (!rtlhal->ballow_sw_to_change_hwclc)
+ return;
+ rtlpriv->cfg->ops->get_hw_reg(hw,HW_VAR_RF_STATE,(u8 *)(&rtstate));
+ if (rtstate == ERFOFF ||rtlpriv->psc.inactive_pwrstate ==ERFOFF)
+ return;
+
+ for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) {
+ ring = &rtlpci->tx_ring[queue];
+ if (skb_queue_len(&ring->queue)) {
+ b_schedule_timer = true;
+ break;
+ }
+ }
+
+ if (b_schedule_timer) {
+ mod_timer(&rtlpriv->works.fw_clockoff_timer,
+ jiffies + MSECS(10));
+ return;
+ }
+
+ if (FW_PS_STATE(rtlhal->fw_ps_state) != FW_PS_STATE_RF_OFF_LOW_PWR_8821AE) {
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ if (!rtlhal->bfw_clk_change_in_progress) {
+ rtlhal->bfw_clk_change_in_progress = true;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val);
+ rtl_write_word(rtlpriv, REG_HISR, 0x0100);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
+ (u8 *) (&rpwm_val));
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ rtlhal->bfw_clk_change_in_progress = false;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ } else {
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ mod_timer(&rtlpriv->works.fw_clockoff_timer,
+ jiffies + MSECS(10));
+ }
+ }
+
+}
+
+static void _rtl8821ae_set_fw_ps_rf_on(struct ieee80211_hw *hw)
+{
+ u8 rpwm_val = 0;
+ rpwm_val |= (FW_PS_STATE_RF_OFF_8821AE | FW_PS_ACK);
+ _rtl8821ae_set_fw_clock_on(hw, rpwm_val, true);
+}
+
+static void _rtl8821ae_fwlps_leave(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool b_fw_current_inps = false;
+ u8 rpwm_val = 0,fw_pwrmode = FW_PS_ACTIVE_MODE;
+
+ if (ppsc->b_low_power_enable){
+ rpwm_val = (FW_PS_STATE_ALL_ON_8821AE|FW_PS_ACK);/* RF on */
+ _rtl8821ae_set_fw_clock_on(hw, rpwm_val, false);
+ rtlhal->ballow_sw_to_change_hwclc = false;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+ (u8 *) (&fw_pwrmode));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *) (&b_fw_current_inps));
+ } else {
+ rpwm_val = FW_PS_STATE_ALL_ON_8821AE; /* RF on */
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
+ (u8 *) (&rpwm_val));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+ (u8 *) (&fw_pwrmode));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *) (&b_fw_current_inps));
+ }
+
+}
+
+static void _rtl8821ae_fwlps_enter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool b_fw_current_inps = true;
+ u8 rpwm_val;
+
+ if (ppsc->b_low_power_enable){
+ rpwm_val = FW_PS_STATE_RF_OFF_LOW_PWR_8821AE; /* RF off */
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_FW_PSMODE_STATUS,
+ (u8 *) (&b_fw_current_inps));
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_PWRMODE,
+ (u8 *) (&ppsc->fwctrl_psmode));
+ rtlhal->ballow_sw_to_change_hwclc = true;
+ _rtl8821ae_set_fw_clock_off(hw, rpwm_val);
+
+
+ } else {
+ rpwm_val = FW_PS_STATE_RF_OFF_8821AE; /* RF off */
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_FW_PSMODE_STATUS,
+ (u8 *) (&b_fw_current_inps));
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_PWRMODE,
+ (u8 *) (&ppsc->fwctrl_psmode));
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_SET_RPWM,
+ (u8 *) (&rpwm_val));
+ }
+
+}
+
+void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ switch (variable) {
+ case HW_VAR_ETHER_ADDR:
+ *((u32 *)(val)) = rtl_read_dword(rtlpriv, REG_MACID);
+ *((u16 *)(val+4)) = rtl_read_word(rtlpriv, REG_MACID + 4);
+ break;
+ case HW_VAR_BSSID:
+ *((u32 *)(val)) = rtl_read_dword(rtlpriv, REG_BSSID);
+ *((u16 *)(val+4)) = rtl_read_word(rtlpriv, REG_BSSID+4);
+ break;
+ case HW_VAR_MEDIA_STATUS:
+ val[0] = rtl_read_byte(rtlpriv, REG_CR+2) & 0x3;
+ break;
+ case HW_VAR_SLOT_TIME:
+ *((u8 *)(val)) = mac->slot_time;
+ break;
+ case HW_VAR_BEACON_INTERVAL:
+ *((u16 *)(val)) = rtl_read_word(rtlpriv, REG_BCN_INTERVAL);
+ break;
+ case HW_VAR_ATIM_WINDOW:
+ *((u16 *)(val)) = rtl_read_word(rtlpriv, REG_ATIMWND);
+ break;
+ case HW_VAR_RCR:
+ *((u32 *) (val)) = rtlpci->receive_config;
+ break;
+ case HW_VAR_RF_STATE:
+ *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
+ break;
+ case HW_VAR_FWLPS_RF_ON:{
+ enum rf_pwrstate rfState;
+ u32 val_rcr;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw,
+ HW_VAR_RF_STATE,
+ (u8 *) (&rfState));
+ if (rfState == ERFOFF) {
+ *((bool *) (val)) = true;
+ } else {
+ val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ val_rcr &= 0x00070000;
+ if (val_rcr)
+ *((bool *) (val)) = false;
+ else
+ *((bool *) (val)) = true;
+ }
+ break;
+ }
+ case HW_VAR_FW_PSMODE_STATUS:
+ *((bool *) (val)) = ppsc->b_fw_current_inpsmode;
+ break;
+ case HW_VAR_CORRECT_TSF:{
+ u64 tsf;
+ u32 *ptsf_low = (u32 *) & tsf;
+ u32 *ptsf_high = ((u32 *) & tsf) + 1;
+
+ *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
+ *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+
+ *((u64 *) (val)) = tsf;
+
+ break;
+ }
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process %x\n",variable));
+ break;
+ }
+}
+
+
+void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 idx;
+
+ switch (variable) {
+ case HW_VAR_ETHER_ADDR:{
+ for (idx = 0; idx < ETH_ALEN; idx++) {
+ rtl_write_byte(rtlpriv, (REG_MACID + idx),
+ val[idx]);
+ }
+ break;
+ }
+ case HW_VAR_BASIC_RATE:{
+ u16 b_rate_cfg = ((u16 *) val)[0];
+ u8 rate_index = 0;
+ b_rate_cfg = b_rate_cfg & 0x15f;
+ b_rate_cfg |= 0x01;
+ rtl_write_byte(rtlpriv, REG_RRSR, b_rate_cfg & 0xff);
+ rtl_write_byte(rtlpriv, REG_RRSR + 1,
+ (b_rate_cfg >> 8) & 0xff);
+ while (b_rate_cfg > 0x1) {
+ b_rate_cfg = (b_rate_cfg >> 1);
+ rate_index++;
+ }
+ rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
+ rate_index);
+ break;
+ }
+ case HW_VAR_BSSID:{
+ for (idx = 0; idx < ETH_ALEN; idx++) {
+ rtl_write_byte(rtlpriv, (REG_BSSID + idx),
+ val[idx]);
+ }
+ break;
+ }
+ case HW_VAR_SIFS:{
+ rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]);
+
+ rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
+
+ if (!mac->ht_enable)
+ rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+ 0x0e0e);
+ else
+ rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+ *((u16 *) val));
+ break;
+ }
+ case HW_VAR_SLOT_TIME:{
+ u8 e_aci;
+
+ RT_TRACE(COMP_MLME, DBG_LOUD,
+ ("HW_VAR_SLOT_TIME %x\n", val[0]));
+
+ rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
+
+ for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_AC_PARAM,
+ (u8 *) (&e_aci));
+ }
+ break;
+ }
+ case HW_VAR_ACK_PREAMBLE:{
+ u8 reg_tmp;
+ u8 short_preamble = (bool) (*(u8 *) val);
+ reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL+2);
+ if (short_preamble){
+ reg_tmp |= BIT(1);
+ rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp);
+ } else {
+ reg_tmp &= (~BIT(1));
+ rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp);
+ }
+ break;
+ }
+ case HW_VAR_WPA_CONFIG:
+ rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+ break;
+ case HW_VAR_AMPDU_MIN_SPACE:{
+ u8 min_spacing_to_set;
+ u8 sec_min_space;
+
+ min_spacing_to_set = *((u8 *) val);
+ if (min_spacing_to_set <= 7) {
+ sec_min_space = 0;
+
+ if (min_spacing_to_set < sec_min_space)
+ min_spacing_to_set = sec_min_space;
+
+ mac->min_space_cfg = ((mac->min_space_cfg &
+ 0xf8) |
+ min_spacing_to_set);
+
+ *val = min_spacing_to_set;
+
+ RT_TRACE(COMP_MLME, DBG_LOUD,
+ ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg));
+
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+ }
+ break;
+ }
+ case HW_VAR_SHORTGI_DENSITY:{
+ u8 density_to_set;
+
+ density_to_set = *((u8 *) val);
+ mac->min_space_cfg |= (density_to_set << 3);
+
+ RT_TRACE(COMP_MLME, DBG_LOUD,
+ ("Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg));
+
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+
+ break;
+ }
+ case HW_VAR_AMPDU_FACTOR:{
+ u32 ampdu_len = (*((u8 *)val));
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+ if(ampdu_len < VHT_AGG_SIZE_128K)
+ ampdu_len = (0x2000 << (*((u8 *)val))) -1;
+ else
+ ampdu_len = 0x1ffff;
+ } else if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ if(ampdu_len < HT_AGG_SIZE_64K)
+ ampdu_len = (0x2000 << (*((u8 *)val))) -1;
+ else
+ ampdu_len = 0xffff;
+ }
+ ampdu_len |= BIT(31);
+
+ rtl_write_dword(rtlpriv,
+ REG_AMPDU_MAX_LENGTH_8812, ampdu_len);
+ break;
+ }
+ case HW_VAR_AC_PARAM:{
+ u8 e_aci = *((u8 *) val);
+ rtl8821ae_dm_init_edca_turbo(hw);
+
+ if (rtlpci->acm_method != eAcmWay2_SW)
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_ACM_CTRL,
+ (u8 *) (&e_aci));
+ break;
+ }
+ case HW_VAR_ACM_CTRL:{
+ u8 e_aci = *((u8 *) val);
+ union aci_aifsn *p_aci_aifsn =
+ (union aci_aifsn *)(&(mac->ac[0].aifs));
+ u8 acm = p_aci_aifsn->f.acm;
+ u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
+
+ acm_ctrl =
+ acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1);
+
+ if (acm) {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl |= AcmHw_BeqEn;
+ break;
+ case AC2_VI:
+ acm_ctrl |= AcmHw_ViqEn;
+ break;
+ case AC3_VO:
+ acm_ctrl |= AcmHw_VoqEn;
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("HW_VAR_ACM_CTRL acm set "
+ "failed: eACI is %d\n", acm));
+ break;
+ }
+ } else {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl &= (~AcmHw_BeqEn);
+ break;
+ case AC2_VI:
+ acm_ctrl &= (~AcmHw_ViqEn);
+ break;
+ case AC3_VO:
+ acm_ctrl &= (~AcmHw_BeqEn);
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+ }
+
+ RT_TRACE(COMP_QOS, DBG_TRACE,
+ ("SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
+ "Write 0x%X\n", acm_ctrl));
+ rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
+ break;
+ }
+ case HW_VAR_RCR:{
+ rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
+ rtlpci->receive_config = ((u32 *) (val))[0];
+ break;
+ }
+ case HW_VAR_RETRY_LIMIT:{
+ u8 retry_limit = ((u8 *) (val))[0];
+
+ rtl_write_word(rtlpriv, REG_RL,
+ retry_limit << RETRY_LIMIT_SHORT_SHIFT |
+ retry_limit << RETRY_LIMIT_LONG_SHIFT);
+ break;
+ }
+ case HW_VAR_DUAL_TSF_RST:
+ rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
+ break;
+ case HW_VAR_EFUSE_BYTES:
+ rtlefuse->efuse_usedbytes = *((u16 *) val);
+ break;
+ case HW_VAR_EFUSE_USAGE:
+ rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ break;
+ case HW_VAR_IO_CMD:
+ rtl8821ae_phy_set_io_cmd(hw, (*(enum io_type *)val));
+ break;
+ case HW_VAR_SET_RPWM:{
+ u8 rpwm_val;
+
+ rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM);
+ udelay(1);
+
+ if (rpwm_val & BIT(7)) {
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
+ (*(u8 *) val));
+ } else {
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
+ ((*(u8 *) val) | BIT(7)));
+ }
+
+ break;
+ }
+ case HW_VAR_H2C_FW_PWRMODE:{
+ rtl8821ae_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
+ break;
+ }
+ case HW_VAR_FW_PSMODE_STATUS:
+ ppsc->b_fw_current_inpsmode = *((bool *) val);
+ break;
+
+ case HW_VAR_RESUME_CLK_ON:
+ _rtl8821ae_set_fw_ps_rf_on(hw);
+ break;
+
+ case HW_VAR_FW_LPS_ACTION:{
+ bool b_enter_fwlps = *((bool *) val);
+
+ if (b_enter_fwlps)
+ _rtl8821ae_fwlps_enter(hw);
+ else
+ _rtl8821ae_fwlps_leave(hw);
+
+ break;
+ }
+
+ case HW_VAR_H2C_FW_JOINBSSRPT:{
+ u8 mstatus = (*(u8 *) val);
+ u8 tmp_regcr, tmp_reg422,bcnvalid_reg;
+ u8 count = 0, dlbcn_count = 0;
+ bool b_recover = false;
+
+ if (mstatus == RT_MEDIA_CONNECT) {
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID,
+ NULL);
+
+ tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1);
+ rtl_write_byte(rtlpriv, REG_CR + 1,
+ (tmp_regcr | BIT(0)));
+
+ _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(3));
+ _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(4), 0);
+
+ tmp_reg422 =
+ rtl_read_byte(rtlpriv,
+ REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422 & (~BIT(6)));
+ if (tmp_reg422 & BIT(6))
+ b_recover = true;
+
+ do {
+ bcnvalid_reg = rtl_read_byte(rtlpriv, REG_TDECTRL+2);
+ rtl_write_byte(rtlpriv, REG_TDECTRL+2,(bcnvalid_reg | BIT(0)));
+ _rtl8821ae_return_beacon_queue_skb(hw);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtl8812ae_set_fw_rsvdpagepkt(hw, 0);
+ else
+ rtl8821ae_set_fw_rsvdpagepkt(hw, 0);
+ bcnvalid_reg = rtl_read_byte(rtlpriv, REG_TDECTRL+2);
+ count = 0;
+ while (!(bcnvalid_reg & BIT(0)) && count <20){
+ count++;
+ udelay(10);
+ bcnvalid_reg = rtl_read_byte(rtlpriv, REG_TDECTRL+2);
+ }
+ dlbcn_count++;
+ } while (!(bcnvalid_reg & BIT(0)) && dlbcn_count <5);
+
+ if (bcnvalid_reg & BIT(0))
+ rtl_write_byte(rtlpriv, REG_TDECTRL+2, BIT(0));
+
+ _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(3), 0);
+ _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(4));
+
+ if (b_recover) {
+ rtl_write_byte(rtlpriv,
+ REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422);
+ }
+
+ rtl_write_byte(rtlpriv, REG_CR + 1,
+ (tmp_regcr & ~(BIT(0))));
+ }
+ rtl8821ae_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+
+ break;
+ }
+ case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:{
+ rtl8821ae_set_p2p_ps_offload_cmd(hw, (*(u8 *) val));
+ break;
+ }
+
+ case HW_VAR_AID:{
+ u16 u2btmp;
+ u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
+ u2btmp &= 0xC000;
+ rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp |
+ mac->assoc_id));
+
+ break;
+ }
+ case HW_VAR_CORRECT_TSF:{
+ u8 btype_ibss = ((u8 *) (val))[0];
+
+ if (btype_ibss == true)
+ _rtl8821ae_stop_tx_beacon(hw);
+
+ _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(3));
+
+ rtl_write_dword(rtlpriv, REG_TSFTR,
+ (u32) (mac->tsf & 0xffffffff));
+ rtl_write_dword(rtlpriv, REG_TSFTR + 4,
+ (u32) ((mac->tsf >> 32) & 0xffffffff));
+
+ _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(3), 0);
+
+ if (btype_ibss == true)
+ _rtl8821ae_resume_tx_beacon(hw);
+
+ break;
+
+ }
+ case HW_VAR_NAV_UPPER: {
+ u32 us_nav_upper = ((u32)*val);
+
+ if(us_nav_upper > HAL_92C_NAV_UPPER_UNIT * 0xFF)
+ {
+ RT_TRACE(COMP_INIT , DBG_WARNING,
+ ("The setting value (0x%08X us) of NAV_UPPER"
+ " is larger than (%d * 0xFF)!!!\n",
+ us_nav_upper, HAL_92C_NAV_UPPER_UNIT));
+ break;
+ }
+ rtl_write_byte(rtlpriv, REG_NAV_UPPER,
+ ((u8)((us_nav_upper + HAL_92C_NAV_UPPER_UNIT - 1) / HAL_92C_NAV_UPPER_UNIT)));
+ break;
+ }
+ case HW_VAR_KEEP_ALIVE: {
+ u8 array[2];
+ array[0] = 0xff;
+ array[1] = *((u8 *)val);
+ rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_KEEP_ALIVE_CTRL, 2, array);
+ }
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("switch case "
+ "not process %x\n",variable));
+ break;
+ }
+}
+
+static bool _rtl8821ae_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ bool status = true;
+ long count = 0;
+ u32 value = _LLT_INIT_ADDR(address) |
+ _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
+
+ rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
+
+ do {
+ value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
+ if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
+ break;
+
+ if (count > POLLING_LLT_THRESHOLD) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Failed to polling write LLT done at "
+ "address %d!\n", address));
+ status = false;
+ break;
+ }
+ } while (++count);
+
+ return status;
+}
+
+static bool _rtl8821ae_llt_table_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned short i;
+ u8 txpktbuf_bndy;
+ u8 maxPage;
+ bool status;
+
+ maxPage = 255;
+ txpktbuf_bndy = 0xF8;
+
+
+ rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy);
+ rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, MAX_RX_DMA_BUFFER_SIZE - 1);
+
+ rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy);
+
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
+
+ rtl_write_byte(rtlpriv, REG_PBP, 0x31);
+ rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4);
+
+ for (i = 0; i < (txpktbuf_bndy - 1); i++) {
+ status = _rtl8821ae_llt_write(hw, i, i + 1);
+ if (true != status)
+ return status;
+ }
+
+ status = _rtl8821ae_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
+ if (true != status)
+ return status;
+
+ for (i = txpktbuf_bndy; i < maxPage; i++) {
+ status = _rtl8821ae_llt_write(hw, i, (i + 1));
+ if (true != status)
+ return status;
+ }
+
+ status = _rtl8821ae_llt_write(hw, maxPage, txpktbuf_bndy);
+ if (true != status)
+ return status;
+
+ rtl_write_dword(rtlpriv, REG_RQPN, 0x80e70808);
+ rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x00);
+
+ return true;
+}
+
+static void _rtl8821ae_gen_refresh_led_state(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (rtlpriv->rtlhal.up_first_time)
+ return;
+
+ if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtl8812ae_sw_led_on(hw, pLed0);
+ else
+ rtl8821ae_sw_led_on(hw, pLed0);
+ else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtl8812ae_sw_led_on(hw, pLed0);
+ else
+ rtl8821ae_sw_led_on(hw, pLed0);
+ else
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtl8812ae_sw_led_off(hw, pLed0);
+ else
+ rtl8821ae_sw_led_off(hw, pLed0);
+}
+
+static bool _rtl8821ae_init_mac(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ u8 bytetmp = 0;
+ u16 wordtmp = 0;
+ bool b_mac_func_enable = rtlhal->b_mac_func_enable;
+
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
+
+ /*Auto Power Down to CHIP-off State*/
+ bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) & (~BIT(7));
+ rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, bytetmp);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+ /* HW Power on sequence*/
+ if(!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_PCI_MSK, RTL8812_NIC_ENABLE_FLOW)) {
+ RT_TRACE(COMP_INIT,DBG_LOUD,("init 8812 MAC Fail as power on failure\n"));
+ return false;
+ }
+ } else {
+ /* HW Power on sequence */
+ if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_A_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_PCI_MSK, RTL8821A_NIC_ENABLE_FLOW)){
+ RT_TRACE(COMP_INIT,DBG_LOUD,("init 8821 MAC Fail as power on failure\n"));
+ return false;
+ }
+ }
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO) | BIT(4);
+ rtl_write_byte(rtlpriv, REG_APS_FSMCO, bytetmp);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_CR);
+ bytetmp = 0xff;
+ rtl_write_byte(rtlpriv, REG_CR, bytetmp);
+ mdelay(2);
+
+ bytetmp |= 0x7f;
+ rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, bytetmp);
+ mdelay(2);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CFG + 3);
+ if (bytetmp & BIT(0)) {
+ bytetmp = rtl_read_byte(rtlpriv, 0x7c);
+ bytetmp |= BIT(6);
+ rtl_write_byte(rtlpriv, 0x7c, bytetmp);
+ }
+ }
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_GPIO_MUXCFG + 1);
+ bytetmp &= ~BIT(4);
+ rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG + 1, bytetmp);
+
+ rtl_write_word(rtlpriv, REG_CR, 0x2ff);
+
+ if (!b_mac_func_enable) {
+ if (!_rtl8821ae_llt_table_init(hw))
+ return false;
+ }
+
+ rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff);
+ rtl_write_dword(rtlpriv, REG_HISRE, 0xffffffff);
+
+ /* Enable FW Beamformer Interrupt */
+ bytetmp = rtl_read_byte(rtlpriv, REG_FWIMR + 3);
+ rtl_write_byte(rtlpriv, REG_FWIMR + 3, bytetmp | BIT(6));
+
+ wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL);
+ wordtmp &= 0xf;
+ wordtmp |= 0xF5B1;
+ rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp);
+
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 1, 0x1F);
+ rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+ rtl_write_word(rtlpriv, REG_RXFLTMAP2, 0xFFFF);
+ /*low address*/
+ rtl_write_dword(rtlpriv, REG_BCNQ_DESA,
+ rtlpci->tx_ring[BEACON_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_MGQ_DESA,
+ rtlpci->tx_ring[MGNT_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_VOQ_DESA,
+ rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_VIQ_DESA,
+ rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_BEQ_DESA,
+ rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_BKQ_DESA,
+ rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_HQ_DESA,
+ rtlpci->tx_ring[HIGH_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_RX_DESA,
+ rtlpci->rx_ring[RX_MPDU_QUEUE].dma & DMA_BIT_MASK(32));
+
+ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, 0x77);
+
+ rtl_write_dword(rtlpriv, REG_INT_MIG, 0);
+
+ rtl_write_byte(rtlpriv, REG_SECONDARY_CCA_CTRL, 0x3);
+ _rtl8821ae_gen_refresh_led_state(hw);
+
+ return true;
+}
+
+static void _rtl8821ae_hw_configure(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u32 reg_rrsr;
+
+ reg_rrsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+
+ rtl_write_dword(rtlpriv, REG_RRSR, reg_rrsr);
+ /* ARFB table 9 for 11ac 5G 2SS */
+ rtl_write_dword(rtlpriv, REG_ARFR0 + 4, 0xfffff000);
+ /* ARFB table 10 for 11ac 5G 1SS */
+ rtl_write_dword(rtlpriv, REG_ARFR1 + 4, 0x003ff000);
+ /* ARFB table 11 for 11ac 24G 1SS */
+ rtl_write_dword(rtlpriv, REG_ARFR2, 0x00000015);
+ rtl_write_dword(rtlpriv, REG_ARFR2 + 4, 0x003ff000);
+ /* ARFB table 12 for 11ac 24G 1SS */
+ rtl_write_dword(rtlpriv, REG_ARFR3, 0x00000015);
+ rtl_write_dword(rtlpriv, REG_ARFR3 + 4, 0xffcff000);
+ /* 0x420[7] = 0 , enable retry AMPDU in new AMPD not singal MPDU. */
+ rtl_write_word(rtlpriv, REG_FWHW_TXQ_CTRL, 0x1F00);
+ rtl_write_byte(rtlpriv, REG_AMPDU_MAX_TIME, 0x70);
+
+ /*Set retry limit*/
+ rtl_write_word(rtlpriv, REG_RL, 0x0707);
+
+
+ /* Set Data / Response auto rate fallack retry count*/
+ rtl_write_dword(rtlpriv, REG_DARFRC, 0x01000000);
+ rtl_write_dword(rtlpriv, REG_DARFRC + 4, 0x07060504);
+ rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000);
+ rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504);
+
+ rtlpci->reg_bcn_ctrl_val = 0x1d;
+ rtl_write_byte(rtlpriv, REG_BCN_CTRL, rtlpci->reg_bcn_ctrl_val);
+
+ /* TBTT prohibit hold time. Suggested by designer TimChen. */
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1,0xff); // 8 ms
+
+ /* AGGR_BK_TIME Reg51A 0x16 */
+ rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040);
+
+ /*For Rx TP. Suggested by SD1 Richard. Added by tynli. 2010.04.12.*/
+ rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03086666);
+
+ rtl_write_byte(rtlpriv, REG_HT_SINGLE_AMPDU, 0x80);
+ rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x20);
+ rtl_write_word(rtlpriv, REG_MAX_AGGR_NUM, 0x1F1F);
+}
+
+static u16 _rtl8821ae_mdio_read(struct rtl_priv *rtlpriv, u8 addr)
+{
+ u16 ret = 0;
+ u8 tmp = 0, count = 0;
+
+ rtl_write_byte(rtlpriv, REG_MDIO_CTL, addr | BIT(6));
+ tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(6) ;
+ count = 0;
+ while (tmp && count < 20) {
+ udelay(10);
+ tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(6);
+ count++;
+ }
+ if (0 == tmp)
+ ret = rtl_read_word(rtlpriv, REG_MDIO_RDATA);
+
+ return ret;
+}
+
+void _rtl8821ae_mdio_write(struct rtl_priv *rtlpriv, u8 addr, u16 data)
+{
+ u8 tmp = 0, count = 0;
+
+ rtl_write_word(rtlpriv, REG_MDIO_WDATA, data);
+ rtl_write_byte(rtlpriv, REG_MDIO_CTL, addr | BIT(5));
+ tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(5) ;
+ count = 0;
+ while (tmp && count < 20) {
+ udelay(10);
+ tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(5);
+ count++;
+ }
+}
+
+static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
+{
+ u16 read_addr = addr & 0xfffc;
+ u8 tmp = 0, count = 0, ret = 0;
+
+ rtl_write_word(rtlpriv, REG_DBI_ADDR, read_addr);
+ rtl_write_byte(rtlpriv, REG_DBI_FLAG, 0x2);
+ tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG);
+ count = 0;
+ while (tmp && count < 20) {
+ udelay(10);
+ tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG);
+ count++;
+ }
+ if (0 == tmp) {
+ read_addr = REG_DBI_RDATA + addr % 4;
+ ret = rtl_read_word(rtlpriv, read_addr);
+ }
+ return ret;
+}
+
+void _rtl8821ae_dbi_write(struct rtl_priv *rtlpriv, u16 addr, u8 data)
+{
+ u8 tmp = 0, count = 0;
+ u16 wrtie_addr, remainder = addr % 4;
+
+ wrtie_addr = REG_DBI_WDATA + remainder;
+ rtl_write_byte(rtlpriv, wrtie_addr, data);
+
+ wrtie_addr = (addr & 0xfffc) | (BIT(0) << (remainder + 12));
+ rtl_write_word(rtlpriv, REG_DBI_ADDR, wrtie_addr);
+
+ rtl_write_byte(rtlpriv, REG_DBI_FLAG, 0x1);
+
+ tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG);
+ count = 0;
+ while (tmp && count < 20) {
+ udelay(10);
+ tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG);
+ count++;
+ }
+
+}
+
+static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ if (_rtl8821ae_mdio_read(rtlpriv, 0x04) != 0x8544)
+ _rtl8821ae_mdio_write(rtlpriv, 0x04, 0x8544);
+
+ if (_rtl8821ae_mdio_read(rtlpriv, 0x0b) != 0x0070)
+ _rtl8821ae_mdio_write(rtlpriv, 0x0b, 0x0070);
+ }
+
+ tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f);
+ _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7));
+
+ tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719);
+ _rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4));
+
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ {
+ tmp = _rtl8821ae_dbi_read(rtlpriv, 0x718);
+ _rtl8821ae_dbi_write(rtlpriv, 0x718, tmp|BIT(4));
+ }
+}
+
+void rtl8821ae_enable_hw_security_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 sec_reg_value;
+ u8 tmp;
+
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm));
+
+ if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("not open hw encryption\n"));
+ return;
+ }
+
+ sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable;
+
+ if (rtlpriv->sec.use_defaultkey) {
+ sec_reg_value |= SCR_TxUseDK;
+ sec_reg_value |= SCR_RxUseDK;
+ }
+
+ sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
+
+ tmp = rtl_read_byte(rtlpriv, REG_CR + 1);
+ rtl_write_byte(rtlpriv, REG_CR + 1, tmp | BIT(1));
+
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("The SECR-value %x \n", sec_reg_value));
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
+
+}
+
+#if 0
+bool _rtl8821ae_check_pcie_dma_hang(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp;
+ tmp = rtl_read_byte(rtlpriv, REG_DBI_CTRL+3);
+ if (!(tmp&BIT(2))) {
+ rtl_write_byte(rtlpriv, REG_DBI_CTRL+3, tmp|BIT(2));
+ mdelay(100);
+ }
+
+ tmp = rtl_read_byte(rtlpriv, REG_DBI_CTRL+3);
+ if (tmp&BIT(0) || tmp&BIT(1)) {
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("rtl8821ae_check_pcie_dma_hang(): TRUE! Reset PCIE DMA!\n"));
+ return true;
+ } else {
+ return false;
+ }
+}
+
+void _rtl8821ae_reset_pcie_interface_dma(struct ieee80211_hw *hw,
+ bool mac_power_on, bool watch_dog)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp;
+ bool release_mac_rx_pause;
+ u8 backup_pcie_dma_pause;
+
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("_rtl8821ae_reset_pcie_interface_dma()\n"));
+
+ tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL);
+ tmp &= ~BIT(1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, tmp);
+ tmp = rtl_read_byte(rtlpriv, REG_PMC_DBG_CTRL2);
+ tmp |= BIT2;
+ rtl_write_byte(rtlpriv, REG_PMC_DBG_CTRL2, tmp);
+
+ tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
+ if (tmp & BIT(2)) {
+ release_mac_rx_pause = false;
+ } else {
+ rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, tmp | BIT(2));
+ release_mac_rx_pause = true;
+ }
+ backup_pcie_dma_pause = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG+1);
+ if (backup_pcie_dma_pause != 0xFF)
+ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+1, 0xFF);
+
+ if (mac_power_on)
+ rtl_write_byte(rtlpriv, REG_CR, 0);
+
+ tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
+ tmp &= ~BIT(0);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, tmp);
+
+ tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
+ tmp |= ~BIT(0);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, tmp);
+
+ if (mac_power_on)
+ rtl_write_byte(rtlpriv, REG_CR, 0xFF);
+
+ tmp = rtl_read_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL+2);
+ tmp |= BIT1;
+ rtl_write_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL+2, tmp);
+
+ if (watch_dog) {
+ u32 rqpn = 0;
+ u32 rqpn_npq = 0;
+ u8 tx_page_boundary = _RQPN_Init_8812E(Adapter, &rqpn_npq, &rqpn);
+
+ if(LLT_table_init_8812(Adapter, TX_PAGE_BOUNDARY, RQPN, RQPN_NPQ) == RT_STATUS_FAILURE)
+ return false;
+
+ PlatformAcquireSpinLock(Adapter, RT_RX_SPINLOCK);
+ PlatformAcquireSpinLock(Adapter, RT_TX_SPINLOCK);
+
+ // <1> Reset Tx descriptor
+ Adapter->HalFunc.ResetTxDescHandler(Adapter,Adapter->NumTxDesc);
+
+ // <2> Reset Rx descriptor
+ Adapter->HalFunc.ResetRxDescHandler(Adapter,Adapter->NumRxDesc);
+
+ // <3> Reset RFDs
+ FreeRFDs( Adapter, TRUE);
+
+ // <4> Reset TCBs
+ FreeTCBs( Adapter, TRUE);
+
+ // We should set all Rx desc own bit to 1 to prevent from RDU after enable Rx DMA. 2013.02.18, by tynli.
+ PrepareAllRxDescBuffer(Adapter);
+
+ PlatformReleaseSpinLock(Adapter, RT_TX_SPINLOCK);
+ PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK);
+
+ //
+ // Initialize TRx DMA address.
+ //
+ // Because set 0x100 to 0x0 will cause the Rx descriptor address 0x340 be cleared to zero on 88EE,
+ // we should re-initialize Rx desc. address before enable DMA. 2012.11.07. by tynli.
+ InitTRxDescHwAddress8812AE(Adapter);
+ }
+
+ // In MAC power on state, BB and RF maybe in ON state, if we release TRx DMA here
+ // it will cause packets to be started to Tx/Rx, so we release Tx/Rx DMA later.
+ if(!bInMACPowerOn || bInWatchDog)
+ {
+ // 8. release TRX DMA
+ //write 0x284 bit[18] = 1'b0
+ //write 0x301 = 0x00
+ if(bReleaseMACRxPause)
+ {
+ u1Tmp = PlatformEFIORead1Byte(Adapter, REG_RXDMA_CONTROL);
+ PlatformEFIOWrite1Byte(Adapter, REG_RXDMA_CONTROL, (u1Tmp&~BIT2));
+ }
+ PlatformEFIOWrite1Byte(Adapter, REG_PCIE_CTRL_REG+1, BackUpPcieDMAPause);
+ }
+
+ if(IS_HARDWARE_TYPE_8821E(Adapter))
+ {
+ //9. lock system register
+ // write 0xCC bit[2] = 1'b0
+ u1Tmp = PlatformEFIORead1Byte(Adapter, REG_PMC_DBG_CTRL2_8723B);
+ u1Tmp &= ~(BIT2);
+ PlatformEFIOWrite1Byte(Adapter, REG_PMC_DBG_CTRL2_8723B, u1Tmp);
+ }
+
+ return RT_STATUS_SUCCESS;
+}
+#endif
+
+// Static MacID Mapping (cf. Used in MacIdDoStaticMapping) ----------
+#define MAC_ID_STATIC_FOR_DEFAULT_PORT 0
+#define MAC_ID_STATIC_FOR_BROADCAST_MULTICAST 1
+#define MAC_ID_STATIC_FOR_BT_CLIENT_START 2
+#define MAC_ID_STATIC_FOR_BT_CLIENT_END 3
+// -----------------------------------------------------------
+
+void rtl8821ae_macid_initialize_mediastatus(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 media_rpt[4] = {RT_MEDIA_CONNECT, 1, \
+ MAC_ID_STATIC_FOR_BROADCAST_MULTICAST, \
+ MAC_ID_STATIC_FOR_BT_CLIENT_END};
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, \
+ HW_VAR_H2C_FW_MEDIASTATUSRPT, media_rpt);
+
+ RT_TRACE(COMP_INIT,DBG_LOUD, \
+ ("Initialize MacId media status: from %d to %d\n", \
+ MAC_ID_STATIC_FOR_BROADCAST_MULTICAST, \
+ MAC_ID_STATIC_FOR_BT_CLIENT_END));
+}
+
+int rtl8821ae_hw_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool rtstatus = true;
+ int err;
+ u8 tmp_u1b;
+ u32 nav_upper = WIFI_NAV_UPPER_US;
+
+ rtlpriv->rtlhal.being_init_adapter = true;
+ rtlpriv->intf_ops->disable_aspm(hw);
+
+ /*YP wowlan not considered*/
+
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_CR);
+ if (tmp_u1b!=0 && tmp_u1b != 0xEA) {
+ rtlhal->b_mac_func_enable = true;
+ RT_TRACE(COMP_INIT,DBG_LOUD,(" MAC has already power on.\n"));
+ } else {
+ rtlhal->b_mac_func_enable = false;
+ rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_8821AE;
+ }
+
+/* if (_rtl8821ae_check_pcie_dma_hang(hw)) {
+ _rtl8821ae_reset_pcie_interface_dma(hw,rtlhal->b_mac_func_enable,false);
+ rtlhal->b_mac_func_enable = false;
+ } */
+
+ rtstatus = _rtl8821ae_init_mac(hw);
+ if (rtstatus != true) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("Init MAC failed\n"));
+ err = 1;
+ return err;
+ }
+
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CFG);
+ tmp_u1b &= 0x7F;
+ rtl_write_byte(rtlpriv, REG_SYS_CFG, tmp_u1b);
+
+ err = rtl8821ae_download_fw(hw, false);
+ if (err) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("Failed to download FW. Init HW "
+ "without FW now..\n"));
+ err = 1;
+ rtlhal->bfw_ready = false;
+ return err;
+ } else {
+ rtlhal->bfw_ready = true;
+ }
+ rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_8821AE;
+ rtlhal->bfw_clk_change_in_progress = false;
+ rtlhal->ballow_sw_to_change_hwclc = false;
+ rtlhal->last_hmeboxnum = 0;
+
+ /*SIC_Init(Adapter);
+ if(pHalData->AMPDUBurstMode)
+ PlatformEFIOWrite1Byte(Adapter,REG_AMPDU_BURST_MODE_8812, 0x7F);*/
+
+ rtl8821ae_phy_mac_config(hw);
+ /* because last function modify RCR, so we update
+ * rcr var here, or TP will unstable for receive_config
+ * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx
+ * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252
+ rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR);
+ rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
+ rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);*/
+ rtl8821ae_phy_bb_config(hw);
+
+ rtl8821ae_phy_rf_config(hw);
+
+ _rtl8821ae_hw_configure(hw);
+
+ rtl8821ae_phy_switch_wirelessband(hw, BAND_ON_2_4G);
+
+ /*set wireless mode*/
+
+ rtlhal->b_mac_func_enable = true;
+
+ rtl_cam_reset_all_entry(hw);
+
+ rtl8821ae_enable_hw_security_config(hw);
+
+ ppsc->rfpwr_state = ERFON;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
+ _rtl8821ae_enable_aspm_back_door(hw);
+ rtlpriv->intf_ops->enable_aspm(hw);
+
+ //rtl8821ae_bt_hw_init(hw);
+ rtlpriv->rtlhal.being_init_adapter = false;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_NAV_UPPER, (u8 *)&nav_upper);
+
+ //rtl8821ae_dm_check_txpower_tracking(hw);
+ //rtl8821ae_phy_lc_calibrate(hw);
+
+ /* Release Rx DMA*/
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
+ if (tmp_u1b & BIT(2)) {
+ /* Release Rx DMA if needed*/
+ tmp_u1b &= ~BIT(2);
+ rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, tmp_u1b);
+ }
+
+ /* Release Tx/Rx PCIE DMA if*/
+ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0);
+
+ rtl8821ae_dm_init(hw);
+ rtl8821ae_macid_initialize_mediastatus(hw);
+
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("rtl8821ae_hw_init() <====\n"));
+ return err;
+}
+
+static enum version_8821ae _rtl8821ae_read_chip_version(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ enum version_8821ae version = VERSION_UNKNOWN;
+ u32 value32;
+
+ value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG1);
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("ReadChipVersion8812A 0xF0 = 0x%x \n", value32));
+
+
+
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtlphy->rf_type = RF_2T2R;
+ else if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+ rtlphy->rf_type = RF_1T1R;
+
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("RF_Type is %x!!\n", rtlphy->rf_type));
+
+
+ if (value32 & TRP_VAUX_EN)
+ {
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ {
+ if(rtlphy->rf_type == RF_2T2R)
+ version = VERSION_TEST_CHIP_2T2R_8812;
+ else
+ version = VERSION_TEST_CHIP_1T1R_8812;
+ }
+ else
+ version = VERSION_TEST_CHIP_8821;
+ } else {
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ {
+ u32 rtl_id = ((value32 & CHIP_VER_RTL_MASK) >> 12) +1 ;
+
+ if(rtlphy->rf_type == RF_2T2R)
+ version = (enum version_8821ae)(CHIP_8812 | NORMAL_CHIP | RF_TYPE_2T2R);
+ else
+ version = (enum version_8821ae)(CHIP_8812 | NORMAL_CHIP);
+
+ version = (enum version_8821ae)(version| (rtl_id << 12));
+ }
+ else if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+ {
+ u32 rtl_id = value32 & CHIP_VER_RTL_MASK;
+
+ version = (enum version_8821ae)(CHIP_8821 | NORMAL_CHIP | rtl_id);
+ }
+ }
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
+ "RF_2T2R" : "RF_1T1R"));
+
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+ {
+ /*WL_HWROF_EN.*/
+ value32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL);
+ rtlphy->hw_rof_enable= ((value32 & WL_HWROF_EN) ? 1 : 0);
+ }
+
+ switch(version)
+ {
+ case VERSION_TEST_CHIP_1T1R_8812:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: VERSION_TEST_CHIP_1T1R_8812.\n"));
+ break;
+ case VERSION_TEST_CHIP_2T2R_8812:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: VERSION_TEST_CHIP_2T2R_8812.\n"));
+ break;
+ case VERSION_NORMAL_TSMC_CHIP_1T1R_8812:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_1T1R_8812.\n"));
+ break;
+ case VERSION_NORMAL_TSMC_CHIP_2T2R_8812:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_2T2R_8812.\n"));
+ break;
+ case VERSION_NORMAL_TSMC_CHIP_1T1R_8812_C_CUT:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_1T1R_8812 C CUT.\n"));
+ break;
+ case VERSION_NORMAL_TSMC_CHIP_2T2R_8812_C_CUT:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_2T2R_8812 C CUT.\n"));
+ break;
+ case VERSION_TEST_CHIP_8821:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: VERSION_TEST_CHIP_8821.\n"));
+ break;
+ case VERSION_NORMAL_TSMC_CHIP_8821:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_8821 A CUT.\n"));
+ break;
+ case VERSION_NORMAL_TSMC_CHIP_8821_B_CUT:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_8821 B CUT.\n"));
+ break;
+ default:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: Unknow (0x%X).\n", version));
+ break;
+ }
+
+ return version;
+}
+
+static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
+ enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
+ enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
+ bt_msr &= 0xfc;
+
+ rtl_write_dword(rtlpriv, REG_BCN_CTRL, 0);
+ RT_TRACE(COMP_BEACON, DBG_LOUD,
+ ("clear 0x550 when set HW_VAR_MEDIA_STATUS\n"));
+
+ if (type == NL80211_IFTYPE_UNSPECIFIED ||
+ type == NL80211_IFTYPE_STATION) {
+ _rtl8821ae_stop_tx_beacon(hw);
+ _rtl8821ae_enable_bcn_sub_func(hw);
+ } else if (type == NL80211_IFTYPE_ADHOC ||
+ type == NL80211_IFTYPE_AP) {
+ _rtl8821ae_resume_tx_beacon(hw);
+ _rtl8821ae_disable_bcn_sub_func(hw);
+ } else {
+ RT_TRACE(COMP_ERR, DBG_WARNING,("Set HW_VAR_MEDIA_STATUS: "
+ "No such media status(%x).\n", type));
+ }
+
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ bt_msr |= MSR_NOLINK;
+ ledaction = LED_CTL_LINK;
+ RT_TRACE(COMP_INIT, DBG_TRACE, ("Set Network type to NO LINK!\n"));
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ bt_msr |= MSR_ADHOC;
+ RT_TRACE(COMP_INIT, DBG_TRACE, ("Set Network type to Ad Hoc!\n"));
+ break;
+ case NL80211_IFTYPE_STATION:
+ bt_msr |= MSR_INFRA;
+ ledaction = LED_CTL_LINK;
+ RT_TRACE(COMP_INIT, DBG_TRACE, ("Set Network type to STA!\n"));
+ break;
+ case NL80211_IFTYPE_AP:
+ bt_msr |= MSR_AP;
+ RT_TRACE(COMP_INIT, DBG_TRACE, ("Set Network type to AP!\n"));
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("Network type %d not support!\n", type));
+ return 1;
+ break;
+
+ }
+
+ rtl_write_byte(rtlpriv, (MSR), bt_msr);
+ rtlpriv->cfg->ops->led_control(hw, ledaction);
+ if ((bt_msr & 0xfc) == MSR_AP)
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+ else
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
+
+ return 0;
+}
+
+void rtl8821ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u32 reg_rcr = rtlpci->receive_config;
+
+ if (rtlpriv->psc.rfpwr_state != ERFON)
+ return;
+
+ if (check_bssid == true) {
+ reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+ (u8 *) (&reg_rcr));
+ _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(4));
+ } else if (check_bssid == false) {
+ reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
+ _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(4), 0);
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_RCR, (u8 *) (&reg_rcr));
+ }
+
+}
+
+int rtl8821ae_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("rtl8821ae_set_network_type!\n"));
+
+ if (_rtl8821ae_set_media_status(hw, type))
+ return -EOPNOTSUPP;
+
+ if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
+ if (type != NL80211_IFTYPE_AP)
+ rtl8821ae_set_check_bssid(hw, true);
+ } else {
+ rtl8821ae_set_check_bssid(hw, false);
+ }
+
+ return 0;
+}
+
+/* don't set REG_EDCA_BE_PARAM here because mac80211 will send pkt when scan */
+void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ rtl8821ae_dm_init_edca_turbo(hw);
+ switch (aci) {
+ case AC1_BK:
+ rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
+ break;
+ case AC0_BE:
+ /* rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param); */
+ break;
+ case AC2_VI:
+ rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
+ break;
+ case AC3_VO:
+ rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
+ break;
+ default:
+ RT_ASSERT(false, ("invalid aci: %d !\n", aci));
+ break;
+ }
+}
+
+void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
+ rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
+ rtlpci->irq_enabled = true;
+ /* there are some C2H CMDs have been sent before system interrupt is enabled, e.g., C2H, CPWM.
+ *So we need to clear all C2H events that FW has notified, otherwise FW won't schedule any commands anymore.
+ */
+ //rtl_write_byte(rtlpriv, REG_C2HEVT_CLEAR, 0);
+ /*enable system interrupt*/
+ rtl_write_dword(rtlpriv, REG_HSIMR, rtlpci->sys_irq_mask & 0xFFFFFFFF);
+}
+
+void rtl8821ae_disable_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ rtl_write_dword(rtlpriv, REG_HIMR, IMR_DISABLED);
+ rtl_write_dword(rtlpriv, REG_HIMRE, IMR_DISABLED);
+ rtlpci->irq_enabled = false;
+ synchronize_irq(rtlpci->pdev->irq);
+}
+
+static void _rtl8821ae_poweroff_adapter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 u1b_tmp;
+
+ rtlhal->b_mac_func_enable = false;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ /* Combo (PCIe + USB) Card and PCIe-MF Card */
+ /* 1. Run LPS WL RFOFF flow */
+ //RT_TRACE(COMP_INIT, DBG_LOUD, ("=====>CardDisableRTL8812E,RTL8821A_NIC_LPS_ENTER_FLOW\n"));
+ rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_PCI_MSK, RTL8821A_NIC_LPS_ENTER_FLOW);
+ }
+ /* 2. 0x1F[7:0] = 0 */
+ /* turn off RF */
+ //rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
+ if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) &&
+ rtlhal->bfw_ready ) {
+ rtl8821ae_firmware_selfreset(hw);
+ }
+
+ /* Reset MCU. Suggested by Filen. */
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp & (~BIT(2))));
+
+ /* g. MCUFWDL 0x80[1:0]=0 */
+ /* reset MCU ready status */
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ /* HW card disable configuration. */
+ rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_PCI_MSK, RTL8821A_NIC_DISABLE_FLOW);
+ } else {
+ /* HW card disable configuration. */
+ rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_PCI_MSK, RTL8812_NIC_DISABLE_FLOW);
+ }
+
+ /* Reset MCU IO Wrapper */
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0))));
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, u1b_tmp | BIT(0));
+
+ /* 7. RSV_CTRL 0x1C[7:0] = 0x0E */
+ /* lock ISO/CLK/Power control register */
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e);
+}
+
+void rtl8821ae_card_disable(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ enum nl80211_iftype opmode;
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("rtl8821ae_card_disable.\n"));
+
+ mac->link_state = MAC80211_NOLINK;
+ opmode = NL80211_IFTYPE_UNSPECIFIED;
+ _rtl8821ae_set_media_status(hw, opmode);
+ if (rtlpriv->rtlhal.driver_is_goingto_unload ||
+ ppsc->rfoff_reason > RF_CHANGE_BY_PS)
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ _rtl8821ae_poweroff_adapter(hw);
+
+ /* after power off we should do iqk again */
+ rtlpriv->phy.iqk_initialized = false;
+}
+
+void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw,
+ u32 *p_inta, u32 *p_intb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, *p_inta);
+
+
+ *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+ rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+
+}
+
+
+void rtl8821ae_set_beacon_related_registers(struct ieee80211_hw *hw)
+{
+
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u16 bcn_interval, atim_window;
+
+ bcn_interval = mac->beacon_interval;
+ atim_window = 2; /*FIX MERGE */
+ rtl8821ae_disable_interrupt(hw);
+ rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+ rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18);
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18);
+ rtl_write_byte(rtlpriv, 0x606, 0x30);
+ rtlpci->reg_bcn_ctrl_val |= BIT(3);
+ rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
+ rtl8821ae_enable_interrupt(hw);
+}
+
+void rtl8821ae_set_beacon_interval(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 bcn_interval = mac->beacon_interval;
+
+ RT_TRACE(COMP_BEACON, DBG_DMESG,
+ ("beacon_interval:%d\n", bcn_interval));
+ rtl8821ae_disable_interrupt(hw);
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+ rtl8821ae_enable_interrupt(hw);
+}
+
+void rtl8821ae_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ RT_TRACE(COMP_INTR, DBG_LOUD,
+ ("add_msr:%x, rm_msr:%x\n", add_msr, rm_msr));
+
+ if (add_msr)
+ rtlpci->irq_mask[0] |= add_msr;
+ if (rm_msr)
+ rtlpci->irq_mask[0] &= (~rm_msr);
+ rtl8821ae_disable_interrupt(hw);
+ rtl8821ae_enable_interrupt(hw);
+}
+
+static u8 _rtl8821ae_get_chnl_group(u8 chnl)
+{
+ u8 group = 0;
+
+ if (chnl <= 14) {
+ if (1 <= chnl && chnl <= 2 )
+ group = 0;
+ else if (3 <= chnl && chnl <= 5 )
+ group = 1;
+ else if (6 <= chnl && chnl <= 8 )
+ group = 2;
+ else if (9 <= chnl && chnl <= 11)
+ group = 3;
+ else /*if (12 <= chnl && chnl <= 14)*/
+ group = 4;
+ } else {
+ if (36 <= chnl && chnl <= 42)
+ group = 0;
+ else if (44 <= chnl && chnl <= 48)
+ group = 1;
+ else if (50 <= chnl && chnl <= 58)
+ group = 2;
+ else if (60 <= chnl && chnl <= 64)
+ group = 3;
+ else if (100 <= chnl && chnl <= 106)
+ group = 4;
+ else if (108 <= chnl && chnl <= 114)
+ group = 5;
+ else if (116 <= chnl && chnl <= 122)
+ group = 6;
+ else if (124 <= chnl && chnl <= 130)
+ group = 7;
+ else if (132 <= chnl && chnl <= 138)
+ group = 8;
+ else if (140 <= chnl && chnl <= 144)
+ group = 9;
+ else if (149 <= chnl && chnl <= 155)
+ group = 10;
+ else if (157 <= chnl && chnl <= 161)
+ group = 11;
+ else if (165 <= chnl && chnl <= 171)
+ group = 12;
+ else if (173 <= chnl && chnl <= 177)
+ group = 13;
+ else
+ /*RT_TRACE(COMP_EFUSE,DBG_LOUD,
+ ("5G, Channel %d in Group not found \n",chnl));*/
+ RT_ASSERT(!COMP_EFUSE,
+ ("5G, Channel %d in Group not found \n",chnl));
+ }
+ return group;
+}
+
+static void _rtl8821ae_read_power_value_fromprom(struct ieee80211_hw *hw,
+ struct txpower_info_2g *pwrinfo24g,
+ struct txpower_info_5g *pwrinfo5g,
+ bool autoload_fail,
+ u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 rfPath, eeAddr=EEPROM_TX_PWR_INX, group,TxCount=0;
+
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("hal_ReadPowerValueFromPROM8821ae(): PROMContent[0x%x]=0x%x\n", (eeAddr+1), hwinfo[eeAddr+1]));
+ if (0xFF == hwinfo[eeAddr+1]) /*YJ,add,120316*/
+ autoload_fail = true;
+
+ if (autoload_fail)
+ {
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("auto load fail : Use Default value!\n"));
+ for (rfPath = 0 ; rfPath < MAX_RF_PATH ; rfPath++) {
+ /*2.4G default value*/
+ for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) {
+ pwrinfo24g->index_cck_base[rfPath][group] = 0x2D;
+ pwrinfo24g->index_bw40_base[rfPath][group] = 0x2D;
+ }
+ for (TxCount = 0;TxCount < MAX_TX_COUNT;TxCount++) {
+ if (TxCount == 0) {
+ pwrinfo24g->bw20_diff[rfPath][0] = 0x02;
+ pwrinfo24g->ofdm_diff[rfPath][0] = 0x04;
+ } else {
+ pwrinfo24g->bw20_diff[rfPath][TxCount] = 0xFE;
+ pwrinfo24g->bw40_diff[rfPath][TxCount] = 0xFE;
+ pwrinfo24g->cck_diff[rfPath][TxCount] = 0xFE;
+ pwrinfo24g->ofdm_diff[rfPath][TxCount] = 0xFE;
+ }
+ }
+ /*5G default value*/
+ for (group = 0 ; group < MAX_CHNL_GROUP_5G; group++)
+ pwrinfo5g->index_bw40_base[rfPath][group] = 0x2A;
+
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+ if (TxCount == 0) {
+ pwrinfo5g->ofdm_diff[rfPath][0] = 0x04;
+ pwrinfo5g->bw20_diff[rfPath][0] = 0x00;
+ pwrinfo5g->bw80_diff[rfPath][0] = 0xFE;
+ pwrinfo5g->bw160_diff[rfPath][0] = 0xFE;
+ } else {
+ pwrinfo5g->ofdm_diff[rfPath][0] = 0xFE;
+ pwrinfo5g->bw20_diff[rfPath][0] = 0xFE;
+ pwrinfo5g->bw40_diff[rfPath][0] = 0xFE;
+ pwrinfo5g->bw80_diff[rfPath][0] = 0xFE;
+ pwrinfo5g->bw160_diff[rfPath][0] = 0xFE;
+ }
+ }
+ }
+ return;
+ }
+
+ rtl_priv(hw)->efuse.b_txpwr_fromeprom = true;
+
+ for (rfPath = 0 ; rfPath < MAX_RF_PATH ; rfPath++) {
+ /*2.4G default value*/
+ for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) {
+ pwrinfo24g->index_cck_base[rfPath][group] = hwinfo[eeAddr++];
+ if (pwrinfo24g->index_cck_base[rfPath][group] == 0xFF)
+ pwrinfo24g->index_cck_base[rfPath][group] = 0x2D;
+ }
+ for (group = 0 ; group < MAX_CHNL_GROUP_24G - 1; group++) {
+ pwrinfo24g->index_bw40_base[rfPath][group] = hwinfo[eeAddr++];
+ if (pwrinfo24g->index_bw40_base[rfPath][group] == 0xFF)
+ pwrinfo24g->index_bw40_base[rfPath][group] = 0x2D;
+ }
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount ++) {
+ if (TxCount == 0) {
+ pwrinfo24g->bw40_diff[rfPath][TxCount] = 0;
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo24g->bw20_diff[rfPath][TxCount] = 0x02;
+ } else {
+ pwrinfo24g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4;
+ if (pwrinfo24g->bw20_diff[rfPath][TxCount] & BIT(3)) /*bit sign number to 8 bit sign number*/
+ pwrinfo24g->bw20_diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo24g->ofdm_diff[rfPath][TxCount] = 0x04;
+ } else {
+ pwrinfo24g->ofdm_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f);
+ if(pwrinfo24g->ofdm_diff[rfPath][TxCount] & BIT(3)) /*bit sign number to 8 bit sign number*/
+ pwrinfo24g->ofdm_diff[rfPath][TxCount] |= 0xF0;
+ }
+ pwrinfo24g->cck_diff[rfPath][TxCount] = 0;
+ eeAddr++;
+ } else {
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo24g->bw40_diff[rfPath][TxCount] = 0xFE;
+ } else {
+ pwrinfo24g->bw40_diff[rfPath][TxCount] = (hwinfo[eeAddr]&0xf0) >> 4;
+ if (pwrinfo24g->bw40_diff[rfPath][TxCount] & BIT(3))
+ pwrinfo24g->bw40_diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo24g->bw20_diff[rfPath][TxCount] = 0xFE;
+ } else {
+ pwrinfo24g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f);
+ if (pwrinfo24g->bw20_diff[rfPath][TxCount] & BIT(3))
+ pwrinfo24g->bw20_diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ eeAddr++;
+
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo24g->ofdm_diff[rfPath][TxCount] = 0xFE;
+ } else {
+ pwrinfo24g->ofdm_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4;
+ if(pwrinfo24g->ofdm_diff[rfPath][TxCount] & BIT(3))
+ pwrinfo24g->ofdm_diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo24g->cck_diff[rfPath][TxCount] = 0xFE;
+ } else {
+ pwrinfo24g->cck_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f);
+ if(pwrinfo24g->cck_diff[rfPath][TxCount] & BIT(3))
+ pwrinfo24g->cck_diff[rfPath][TxCount] |= 0xF0;
+ }
+ eeAddr++;
+ }
+ }
+
+ /*5G default value*/
+ for (group = 0 ; group < MAX_CHNL_GROUP_5G; group ++) {
+ pwrinfo5g->index_bw40_base[rfPath][group] = hwinfo[eeAddr++];
+ if (pwrinfo5g->index_bw40_base[rfPath][group] == 0xFF)
+ pwrinfo5g->index_bw40_base[rfPath][group] = 0xFE;
+ }
+
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+ if (TxCount == 0) {
+ pwrinfo5g->bw40_diff[rfPath][TxCount] = 0;
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo5g->bw20_diff[rfPath][TxCount] = 0x0;
+ } else {
+ pwrinfo5g->bw20_diff[rfPath][0] = (hwinfo[eeAddr] & 0xf0) >> 4;
+ if(pwrinfo5g->bw20_diff[rfPath][TxCount] & BIT(3))
+ pwrinfo5g->bw20_diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo5g->ofdm_diff[rfPath][TxCount] = 0x4;
+ } else {
+ pwrinfo5g->ofdm_diff[rfPath][0] = (hwinfo[eeAddr] & 0x0f);
+ if(pwrinfo5g->ofdm_diff[rfPath][TxCount] & BIT(3))
+ pwrinfo5g->ofdm_diff[rfPath][TxCount] |= 0xF0;
+ }
+ eeAddr++;
+ } else {
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo5g->bw40_diff[rfPath][TxCount] = 0xFE;
+ } else {
+ pwrinfo5g->bw40_diff[rfPath][TxCount]= (hwinfo[eeAddr] & 0xf0) >> 4;
+ if(pwrinfo5g->bw40_diff[rfPath][TxCount] & BIT(3))
+ pwrinfo5g->bw40_diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo5g->bw20_diff[rfPath][TxCount] = 0xFE;
+ } else {
+ pwrinfo5g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f);
+ if(pwrinfo5g->bw20_diff[rfPath][TxCount] & BIT(3))
+ pwrinfo5g->bw20_diff[rfPath][TxCount] |= 0xF0;
+ }
+ eeAddr++;
+ }
+ }
+
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo5g->ofdm_diff[rfPath][1] = 0xFE;
+ pwrinfo5g->ofdm_diff[rfPath][2] = 0xFE;
+ } else {
+ pwrinfo5g->ofdm_diff[rfPath][1] = (hwinfo[eeAddr] & 0xf0) >> 4;
+ pwrinfo5g->ofdm_diff[rfPath][2] = (hwinfo[eeAddr] & 0x0f);
+ }
+ eeAddr++;
+ if (hwinfo[eeAddr] == 0xFF)
+ pwrinfo5g->ofdm_diff[rfPath][3] = 0xFE;
+ else
+ pwrinfo5g->ofdm_diff[rfPath][3] = (hwinfo[eeAddr] & 0x0f);
+
+ eeAddr++;
+
+ for (TxCount = 1; TxCount < MAX_TX_COUNT; TxCount++) {
+ if (pwrinfo5g->ofdm_diff[rfPath][TxCount] == 0xFF)
+ pwrinfo5g->ofdm_diff[rfPath][TxCount] = 0xFE;
+ else if(pwrinfo5g->ofdm_diff[rfPath][TxCount] & BIT(3))
+ pwrinfo5g->ofdm_diff[rfPath][TxCount] |= 0xF0;
+ }
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo5g->bw80_diff[rfPath][TxCount] = 0xFE;
+ } else {
+ pwrinfo5g->bw80_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4;
+ if(pwrinfo5g->bw80_diff[rfPath][TxCount] & BIT(3)) //4bit sign number to 8 bit sign number
+ pwrinfo5g->bw80_diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo5g->bw160_diff[rfPath][TxCount] = 0xFE;
+ } else {
+ pwrinfo5g->bw160_diff[rfPath][TxCount]= (hwinfo[eeAddr] & 0x0f);
+ if(pwrinfo5g->bw160_diff[rfPath][TxCount] & BIT(3)) //4bit sign number to 8 bit sign number
+ pwrinfo5g->bw160_diff[rfPath][TxCount] |= 0xF0;
+ }
+ eeAddr++;
+ }
+ }
+}
+
+static void _rtl8812ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail,
+ u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct txpower_info_2g pwrinfo24g;
+ struct txpower_info_5g pwrinfo5g;
+ u8 channel5g[CHANNEL_MAX_NUMBER_5G] =
+ {36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,100,102,104,106,108,110,112,
+ 114,116,118,120,122,124,126,128,130,132,134,136,138,140,142,144,149,151,
+ 153,155,157,159,161,163,165,167,168,169,171,173,175,177};
+ u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171};
+ u8 rf_path, index;
+ u8 i;
+
+ _rtl8821ae_read_power_value_fromprom(hw, &pwrinfo24g, &pwrinfo5g, autoload_fail, hwinfo);
+
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ for (i = 0; i < CHANNEL_MAX_NUMBER_2G; i++) {
+ index = _rtl8821ae_get_chnl_group(i + 1);
+
+ if (i == CHANNEL_MAX_NUMBER_2G - 1) {
+ rtlefuse->txpwrlevel_cck[rf_path][i] =
+ pwrinfo24g.index_cck_base[rf_path][5];
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
+ pwrinfo24g.index_bw40_base[rf_path][index];
+ } else {
+ rtlefuse->txpwrlevel_cck[rf_path][i] =
+ pwrinfo24g.index_cck_base[rf_path][index];
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
+ pwrinfo24g.index_bw40_base[rf_path][index];
+ }
+ }
+
+ for (i = 0; i < CHANNEL_MAX_NUMBER_5G; i++) {
+ index = _rtl8821ae_get_chnl_group(channel5g[i]);
+ rtlefuse->txpwr_5g_bw40base[rf_path][i] = pwrinfo5g.index_bw40_base[rf_path][index];
+ }
+ for (i = 0; i < CHANNEL_MAX_NUMBER_5G_80M; i++) {
+ u8 upper, lower;
+ index = _rtl8821ae_get_chnl_group(channel5g_80m[i]);
+ upper = pwrinfo5g.index_bw40_base[rf_path][index];
+ lower = pwrinfo5g.index_bw40_base[rf_path][index + 1];
+
+ rtlefuse->txpwr_5g_bw80base[rf_path][i] = (upper + lower) / 2;
+ }
+ for (i = 0; i < MAX_TX_COUNT; i++) {
+ rtlefuse->txpwr_cckdiff[rf_path][i] = pwrinfo24g.cck_diff[rf_path][i];
+ rtlefuse->txpwr_legacyhtdiff[rf_path][i] = pwrinfo24g.ofdm_diff[rf_path][i];
+ rtlefuse->txpwr_ht20diff[rf_path][i] = pwrinfo24g.bw20_diff[rf_path][i];
+ rtlefuse->txpwr_ht40diff[rf_path][i] = pwrinfo24g.bw40_diff[rf_path][i];
+
+ rtlefuse->txpwr_5g_ofdmdiff[rf_path][i] = pwrinfo5g.ofdm_diff[rf_path][i];
+ rtlefuse->txpwr_5g_bw20diff[rf_path][i] = pwrinfo5g.bw20_diff[rf_path][i];
+ rtlefuse->txpwr_5g_bw40diff[rf_path][i] = pwrinfo5g.bw40_diff[rf_path][i];
+ rtlefuse->txpwr_5g_bw80diff[rf_path][i] = pwrinfo5g.bw80_diff[rf_path][i];
+ }
+ }
+
+ if (!autoload_fail){
+ rtlefuse->eeprom_regulatory =
+ hwinfo[EEPROM_RF_BOARD_OPTION] & 0x07;/*bit0~2*/
+ if (hwinfo[EEPROM_RF_BOARD_OPTION] == 0xFF)
+ rtlefuse->eeprom_regulatory = 0;
+ } else {
+ rtlefuse->eeprom_regulatory = 0;
+ }
+
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory ));
+}
+
+static void _rtl8821ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail,
+ u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct txpower_info_2g pwrinfo24g;
+ struct txpower_info_5g pwrinfo5g;
+ u8 channel5g[CHANNEL_MAX_NUMBER_5G] =
+ {36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,100,102,104,106,108,110,112,
+ 114,116,118,120,122,124,126,128,130,132,134,136,138,140,142,144,149,151,
+ 153,155,157,159,161,163,165,167,168,169,171,173,175,177};
+ u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171};
+ u8 rf_path, index;
+ u8 i;
+
+ _rtl8821ae_read_power_value_fromprom(hw, &pwrinfo24g, &pwrinfo5g, autoload_fail, hwinfo);
+
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ for (i = 0; i < CHANNEL_MAX_NUMBER_2G; i++) {
+ index = _rtl8821ae_get_chnl_group(i + 1);
+
+ if (i == CHANNEL_MAX_NUMBER_2G - 1) {
+ rtlefuse->txpwrlevel_cck[rf_path][i] = pwrinfo24g.index_cck_base[rf_path][5];
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = pwrinfo24g.index_bw40_base[rf_path][index];
+ } else {
+ rtlefuse->txpwrlevel_cck[rf_path][i] = pwrinfo24g.index_cck_base[rf_path][index];
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = pwrinfo24g.index_bw40_base[rf_path][index];
+ }
+ }
+
+ for (i = 0; i < CHANNEL_MAX_NUMBER_5G; i++) {
+ index = _rtl8821ae_get_chnl_group(channel5g[i]);
+ rtlefuse->txpwr_5g_bw40base[rf_path][i] = pwrinfo5g.index_bw40_base[rf_path][index];
+ }
+ for (i = 0; i < CHANNEL_MAX_NUMBER_5G_80M; i++) {
+ u8 upper, lower;
+ index = _rtl8821ae_get_chnl_group(channel5g_80m[i]);
+ upper = pwrinfo5g.index_bw40_base[rf_path][index];
+ lower = pwrinfo5g.index_bw40_base[rf_path][index + 1];
+
+ rtlefuse->txpwr_5g_bw80base[rf_path][i] = (upper + lower) / 2;
+ }
+ for (i = 0; i < MAX_TX_COUNT; i++) {
+ rtlefuse->txpwr_cckdiff[rf_path][i] = pwrinfo24g.cck_diff[rf_path][i];
+ rtlefuse->txpwr_legacyhtdiff[rf_path][i] = pwrinfo24g.ofdm_diff[rf_path][i];
+ rtlefuse->txpwr_ht20diff[rf_path][i] = pwrinfo24g.bw20_diff[rf_path][i];
+ rtlefuse->txpwr_ht40diff[rf_path][i] = pwrinfo24g.bw40_diff[rf_path][i];
+
+ rtlefuse->txpwr_5g_ofdmdiff[rf_path][i] = pwrinfo5g.ofdm_diff[rf_path][i];
+ rtlefuse->txpwr_5g_bw20diff[rf_path][i] = pwrinfo5g.bw20_diff[rf_path][i];
+ rtlefuse->txpwr_5g_bw40diff[rf_path][i] = pwrinfo5g.bw40_diff[rf_path][i];
+ rtlefuse->txpwr_5g_bw80diff[rf_path][i] = pwrinfo5g.bw80_diff[rf_path][i];
+ }
+ }
+
+ if (!autoload_fail){
+ rtlefuse->eeprom_regulatory = hwinfo[EEPROM_RF_BOARD_OPTION] & 0x07;/*bit0~2*/
+ if (hwinfo[EEPROM_RF_BOARD_OPTION] == 0xFF)
+ rtlefuse->eeprom_regulatory = 0;
+ } else {
+ rtlefuse->eeprom_regulatory = 0;
+ }
+
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory ));
+}
+
+static void _rtl8812ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_test )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ u16 i, usvalue;
+ u8 hwinfo[HWSET_MAX_SIZE];
+ u16 eeprom_id;
+
+ if (b_pseudo_test) {
+ /* need add */
+ }
+
+ if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+ rtl_efuse_shadow_map_update(hw);
+ memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ HWSET_MAX_SIZE);
+ } else if (rtlefuse->epromtype == EEPROM_93C46) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("RTL819X Not boot from eeprom, check it !!"));
+ }
+
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP \n"),
+ hwinfo, HWSET_MAX_SIZE);
+
+ eeprom_id = *((u16 *) & hwinfo[0]);
+ if (eeprom_id != RTL_EEPROM_ID) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
+ rtlefuse->autoload_failflag = true;
+ } else {
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+ rtlefuse->autoload_failflag = false;
+ }
+
+ if (rtlefuse->autoload_failflag == true) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("RTL8812AE autoload_failflag, check it !!"));
+ return;
+ }
+
+ rtlefuse->eeprom_version = *(u8 *) & hwinfo[EEPROM_VERSION];
+ if (rtlefuse->eeprom_version == 0xff)
+ rtlefuse->eeprom_version = 0;
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM version: 0x%2x\n", rtlefuse->eeprom_version));
+
+ rtlefuse->eeprom_vid = *(u16 *) &hwinfo[EEPROM_VID];
+ rtlefuse->eeprom_did = *(u16 *) &hwinfo[EEPROM_DID];
+ rtlefuse->eeprom_svid = *(u16 *) &hwinfo[EEPROM_SVID];
+ rtlefuse->eeprom_smid = *(u16 *) &hwinfo[EEPROM_SMID];
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROMId = 0x%4x\n", eeprom_id));
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid));
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did));
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid));
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid));
+
+ /*customer ID*/
+ rtlefuse->eeprom_oemid = *(u8 *) & hwinfo[EEPROM_CUSTOMER_ID];
+ if (rtlefuse->eeprom_oemid == 0xFF)
+ rtlefuse->eeprom_oemid = 0;
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
+
+ for (i = 0; i < 6; i += 2) {
+ usvalue = *(u16 *) & hwinfo[EEPROM_MAC_ADDR + i];
+ *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
+ }
+
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("dev_addr: %pM\n", rtlefuse->dev_addr));
+
+ _rtl8812ae_read_txpower_info_from_hwpg(hw,
+ rtlefuse->autoload_failflag, hwinfo);
+
+ /*board type*/
+ rtlefuse->board_type = (((*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION]) & 0xE0 ) >> 5);
+ if ((*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION]) == 0xff )
+ rtlefuse->board_type = 0;
+ rtlhal->boad_type = rtlefuse->board_type;
+
+ rtl8812ae_read_bt_coexist_info_from_hwpg(hw,
+ rtlefuse->autoload_failflag, hwinfo);
+
+ rtlefuse->eeprom_channelplan = *(u8 *) & hwinfo[EEPROM_CHANNELPLAN];
+ if (rtlefuse->eeprom_channelplan == 0xff)
+ rtlefuse->eeprom_channelplan = 0x7F;
+
+ /* set channel paln to world wide 13 */
+ //rtlefuse->channel_plan = (u8) rtlefuse->eeprom_channelplan;
+
+ /*parse xtal*/
+ rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8821AE];
+ if ( rtlefuse->crystalcap == 0xFF )
+ rtlefuse->crystalcap = 0x20;
+
+ rtlefuse->eeprom_thermalmeter = *(u8 *) & hwinfo[EEPROM_THERMAL_METER];
+ if ((rtlefuse->eeprom_thermalmeter == 0xff) ||rtlefuse->autoload_failflag )
+ {
+ rtlefuse->b_apk_thermalmeterignore = true;
+ rtlefuse->eeprom_thermalmeter = 0xff;
+ }
+
+ rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter));
+
+ if (rtlefuse->autoload_failflag == false) {
+ rtlefuse->antenna_div_cfg = *(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION] & 0x18 >> 3;
+ if (*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION] == 0xff)
+ rtlefuse->antenna_div_cfg = 0x00;
+ /*if (BT_1ant())
+ rtlefuse->antenna_div_cfg = 0;*/
+ rtlefuse->antenna_div_type = *(u8 *) & hwinfo[EEPROM_RF_ANTENNA_OPT_88E];
+ if (rtlefuse->antenna_div_type == 0xFF)
+ {
+ rtlefuse->antenna_div_type = FIXED_HW_ANTDIV;
+ }
+ } else {
+ rtlefuse->antenna_div_cfg = 0;
+ rtlefuse->antenna_div_type = 0;
+ }
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("SWAS: bHwAntDiv = %x, TRxAntDivType = %x\n",
+ rtlefuse->antenna_div_cfg, rtlefuse->antenna_div_type));
+
+ /*Hal_ReadPAType_8821A()*/
+ /*Hal_EfuseParseRateIndicationOption8821A()*/
+ /*Hal_ReadEfusePCIeCap8821AE()*/
+
+ pcipriv->ledctl.bled_opendrain = true;
+
+ if (rtlhal->oem_id == RT_CID_DEFAULT) {
+ switch (rtlefuse->eeprom_oemid) {
+ case RT_CID_DEFAULT:
+ break;
+ case EEPROM_CID_TOSHIBA:
+ rtlhal->oem_id = RT_CID_TOSHIBA;
+ break;
+ case EEPROM_CID_CCX:
+ rtlhal->oem_id = RT_CID_CCX;
+ break;
+ case EEPROM_CID_QMI:
+ rtlhal->oem_id = RT_CID_819x_QMI;
+ break;
+ case EEPROM_CID_WHQL:
+ break;
+ default:
+ break;
+
+ }
+ }
+}
+
+static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_test )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ u16 i, usvalue;
+ u8 hwinfo[HWSET_MAX_SIZE];
+ u16 eeprom_id;
+
+ if (b_pseudo_test) {
+ /* need add */
+ }
+
+ if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+ rtl_efuse_shadow_map_update(hw);
+ memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ HWSET_MAX_SIZE);
+ } else if (rtlefuse->epromtype == EEPROM_93C46) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("RTL819X Not boot from eeprom, check it !!"));
+ }
+
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP \n"),
+ hwinfo, HWSET_MAX_SIZE);
+
+ eeprom_id = *((u16 *) & hwinfo[0]);
+ if (eeprom_id != RTL_EEPROM_ID) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
+ rtlefuse->autoload_failflag = true;
+ } else {
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+ rtlefuse->autoload_failflag = false;
+ }
+
+ if (rtlefuse->autoload_failflag == true) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("RTL8812AE autoload_failflag, check it !!"));
+ return;
+ }
+
+ rtlefuse->eeprom_version = *(u8 *) & hwinfo[EEPROM_VERSION];
+ if (rtlefuse->eeprom_version == 0xff)
+ rtlefuse->eeprom_version = 0;
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM version: 0x%2x\n", rtlefuse->eeprom_version));
+
+ rtlefuse->eeprom_vid = *(u16 *) &hwinfo[EEPROM_VID];
+ rtlefuse->eeprom_did = *(u16 *) &hwinfo[EEPROM_DID];
+ rtlefuse->eeprom_svid = *(u16 *) &hwinfo[EEPROM_SVID];
+ rtlefuse->eeprom_smid = *(u16 *) &hwinfo[EEPROM_SMID];
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROMId = 0x%4x\n", eeprom_id));
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid));
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did));
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid));
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid));
+
+ /*customer ID*/
+ rtlefuse->eeprom_oemid = *(u8 *) & hwinfo[EEPROM_CUSTOMER_ID];
+ if (rtlefuse->eeprom_oemid == 0xFF)
+ rtlefuse->eeprom_oemid = 0;
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
+
+ for (i = 0; i < 6; i += 2) {
+ usvalue = *(u16 *) & hwinfo[EEPROM_MAC_ADDR + i];
+ *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
+ }
+
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("dev_addr: %pM\n", rtlefuse->dev_addr));
+
+ _rtl8821ae_read_txpower_info_from_hwpg(hw,
+ rtlefuse->autoload_failflag, hwinfo);
+
+ /*board type*/
+ rtlefuse->board_type = (((*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION]) & 0xE0 ) >> 5);
+ if ((*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION]) == 0xff )
+ rtlefuse->board_type = 0;
+ rtlhal->boad_type = rtlefuse->board_type;
+
+ rtl8821ae_read_bt_coexist_info_from_hwpg(hw,
+ rtlefuse->autoload_failflag, hwinfo);
+
+ rtlefuse->eeprom_channelplan = *(u8 *) & hwinfo[EEPROM_CHANNELPLAN];
+ if (rtlefuse->eeprom_channelplan == 0xff)
+ rtlefuse->eeprom_channelplan = 0x7F;
+
+ /* set channel paln to world wide 13 */
+ //rtlefuse->channel_plan = (u8) rtlefuse->eeprom_channelplan;
+
+ /*parse xtal*/
+ rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8821AE];
+ if ( rtlefuse->crystalcap == 0xFF )
+ rtlefuse->crystalcap = 0x20;
+
+ rtlefuse->eeprom_thermalmeter = *(u8 *) & hwinfo[EEPROM_THERMAL_METER];
+ if ((rtlefuse->eeprom_thermalmeter == 0xff) ||rtlefuse->autoload_failflag )
+ {
+ rtlefuse->b_apk_thermalmeterignore = true;
+ rtlefuse->eeprom_thermalmeter = 0x18;
+ }
+
+ rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter));
+
+ if (rtlefuse->autoload_failflag == false) {
+ rtlefuse->antenna_div_cfg = (*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION] & BIT(3))?true:false;
+ /*if (BT_1ant())
+ rtlefuse->antenna_div_cfg = 0;*/
+
+ rtlefuse->antenna_div_type = CG_TRX_HW_ANTDIV;
+ } else {
+ rtlefuse->antenna_div_cfg = 0;
+ rtlefuse->antenna_div_type = 0;
+ }
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("SWAS: bHwAntDiv = %x, TRxAntDivType = %x\n",
+ rtlefuse->antenna_div_cfg, rtlefuse->antenna_div_type));
+
+ pcipriv->ledctl.bled_opendrain = true;
+
+ if (rtlhal->oem_id == RT_CID_DEFAULT) {
+ switch (rtlefuse->eeprom_oemid) {
+ case RT_CID_DEFAULT:
+ break;
+ case EEPROM_CID_TOSHIBA:
+ rtlhal->oem_id = RT_CID_TOSHIBA;
+ break;
+ case EEPROM_CID_CCX:
+ rtlhal->oem_id = RT_CID_CCX;
+ break;
+ case EEPROM_CID_QMI:
+ rtlhal->oem_id = RT_CID_819x_QMI;
+ break;
+ case EEPROM_CID_WHQL:
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+
+/*static void _rtl8821ae_hal_customized_behavior(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ pcipriv->ledctl.bled_opendrain = true;
+ switch (rtlhal->oem_id) {
+ case RT_CID_819x_HP:
+ pcipriv->ledctl.bled_opendrain = true;
+ break;
+ case RT_CID_819x_Lenovo:
+ case RT_CID_DEFAULT:
+ case RT_CID_TOSHIBA:
+ case RT_CID_CCX:
+ case RT_CID_819x_Acer:
+ case RT_CID_WHQL:
+ default:
+ break;
+ }
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("RT Customized ID: 0x%02X\n", rtlhal->oem_id));
+}*/
+
+void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp_u1b;
+
+ rtlhal->version = _rtl8821ae_read_chip_version(hw);
+
+ if (get_rf_type(rtlphy) == RF_1T1R)
+ rtlpriv->dm.brfpath_rxenable[0] = true;
+ else
+ rtlpriv->dm.brfpath_rxenable[0] =
+ rtlpriv->dm.brfpath_rxenable[1] = true;
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
+ rtlhal->version));
+
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
+ if (tmp_u1b & BIT(4)) {
+ RT_TRACE(COMP_INIT, DBG_DMESG, ("Boot from EEPROM\n"));
+ rtlefuse->epromtype = EEPROM_93C46;
+ } else {
+ RT_TRACE(COMP_INIT, DBG_DMESG, ("Boot from EFUSE\n"));
+ rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
+ }
+
+ if (tmp_u1b & BIT(5)) {
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+ rtlefuse->autoload_failflag = false;
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ _rtl8812ae_read_adapter_info(hw, false);
+ else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+ _rtl8821ae_read_adapter_info(hw, false);
+ } else {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("Autoload ERR!!\n"));
+ }
+ /*hal_ReadRFType_8812A()*/
+ //_rtl8821ae_hal_customized_behavior(hw);
+}
+
+static void rtl8821ae_update_hal_rate_table(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 ratr_value;
+ u8 ratr_index = 0;
+ u8 b_nmode = mac->ht_enable;
+ u8 mimo_ps = IEEE80211_SMPS_OFF;
+ u16 shortgi_rate;
+ u32 tmp_ratr_value;
+ u8 b_curtxbw_40mhz = mac->bw_40;
+ u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ 1 : 0;
+ u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ 1 : 0;
+ enum wireless_mode wirelessmode = mac->mode;
+
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ ratr_value = sta->supp_rates[1] << 4;
+ else
+ ratr_value = sta->supp_rates[0];
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ ratr_value = 0xfff;
+ ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+ sta->ht_cap.mcs.rx_mask[0] << 12);
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ if (ratr_value & 0x0000000c)
+ ratr_value &= 0x0000000d;
+ else
+ ratr_value &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_value &= 0x00000FF5;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ b_nmode = 1;
+ if (mimo_ps == IEEE80211_SMPS_STATIC) {
+ ratr_value &= 0x0007F005;
+ } else {
+ u32 ratr_mask;
+
+ if (get_rf_type(rtlphy) == RF_1T2R ||
+ get_rf_type(rtlphy) == RF_1T1R)
+ ratr_mask = 0x000ff005;
+ else
+ ratr_mask = 0x0f0ff005;
+
+ ratr_value &= ratr_mask;
+ }
+ break;
+ default:
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_value &= 0x000ff0ff;
+ else
+ ratr_value &= 0x0f0ff0ff;
+
+ break;
+ }
+
+ if ( (rtlpcipriv->btcoexist.bt_coexistence) &&
+ (rtlpcipriv->btcoexist.bt_coexist_type == BT_CSR_BC4) &&
+ (rtlpcipriv->btcoexist.bt_cur_state) &&
+ (rtlpcipriv->btcoexist.bt_ant_isolation) &&
+ ((rtlpcipriv->btcoexist.bt_service == BT_SCO)||
+ (rtlpcipriv->btcoexist.bt_service == BT_BUSY)) )
+ ratr_value &= 0x0fffcfc0;
+ else
+ ratr_value &= 0x0FFFFFFF;
+
+ if (b_nmode && ((b_curtxbw_40mhz &&
+ b_curshortgi_40mhz) || (!b_curtxbw_40mhz &&
+ b_curshortgi_20mhz))) {
+
+ ratr_value |= 0x10000000;
+ tmp_ratr_value = (ratr_value >> 12);
+
+ for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
+ if ((1 << shortgi_rate) & tmp_ratr_value)
+ break;
+ }
+
+ shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
+ (shortgi_rate << 4) | (shortgi_rate);
+ }
+
+ rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+
+ RT_TRACE(COMP_RATR, DBG_DMESG,
+ ("%x\n", rtl_read_dword(rtlpriv, REG_ARFR0)));
+}
+
+
+static u8 _rtl8821ae_mrate_idx_to_arfr_id(
+ struct ieee80211_hw *hw, u8 rate_index,
+ enum wireless_mode wirelessmode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 ret = 0;
+ switch(rate_index){
+ case RATR_INX_WIRELESS_NGB:
+ if(rtlphy->rf_type == RF_1T1R)
+ ret = 1;
+ else
+ ret = 0;
+ ;break;
+ case RATR_INX_WIRELESS_N:
+ case RATR_INX_WIRELESS_NG:
+ if(rtlphy->rf_type == RF_1T1R)
+ ret = 5;
+ else
+ ret = 4;
+ ;break;
+ case RATR_INX_WIRELESS_NB:
+ if(rtlphy->rf_type == RF_1T1R)
+ ret = 3;
+ else
+ ret = 2;
+ ;break;
+ case RATR_INX_WIRELESS_GB:
+ ret = 6;
+ break;
+ case RATR_INX_WIRELESS_G:
+ ret = 7;
+ break;
+ case RATR_INX_WIRELESS_B:
+ ret = 8;
+ break;
+ case RATR_INX_WIRELESS_MC:
+ if ((wirelessmode == WIRELESS_MODE_B)
+ || (wirelessmode == WIRELESS_MODE_G)
+ || (wirelessmode == WIRELESS_MODE_N_24G)
+ || (wirelessmode == WIRELESS_MODE_AC_24G))
+ ret = 6;
+ else
+ ret = 7;
+ case RATR_INX_WIRELESS_AC_5N:
+ if(rtlphy->rf_type == RF_1T1R)
+ ret = 10;
+ else
+ ret = 9;
+ break;
+ case RATR_INX_WIRELESS_AC_24N:
+ if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80)
+ {
+ if(rtlphy->rf_type == RF_1T1R)
+ ret = 10;
+ else
+ ret = 9;
+ } else {
+ if(rtlphy->rf_type == RF_1T1R)
+ ret = 11;
+ else
+ ret = 12;
+ }
+ break;
+ default:
+ ret = 0;break;
+ }
+ return ret;
+}
+
+static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u8 rssi_level)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_sta_info * sta_entry = NULL;
+ u32 ratr_bitmap;
+ u8 ratr_index;
+ u8 b_curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ ? 1 : 0;
+ u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ 1 : 0;
+ u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ 1 : 0;
+ enum wireless_mode wirelessmode = 0;
+ bool b_shortgi = false;
+ u8 rate_mask[7];
+ u8 macid = 0;
+ u8 mimo_ps = IEEE80211_SMPS_OFF;
+
+ sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+ wirelessmode = sta_entry->wireless_mode;
+ if (mac->opmode == NL80211_IFTYPE_STATION ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT)
+ b_curtxbw_40mhz = mac->bw_40;
+ else if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC)
+ macid = sta->aid + 1;
+
+ ratr_bitmap = sta->supp_rates[0];
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ ratr_bitmap = 0xfff;
+
+ ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+ sta->ht_cap.mcs.rx_mask[0] << 12);
+/*mac id owner*/
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ ratr_index = RATR_INX_WIRELESS_B;
+ if (ratr_bitmap & 0x0000000c)
+ ratr_bitmap &= 0x0000000d;
+ else
+ ratr_bitmap &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_index = RATR_INX_WIRELESS_GB;
+
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00000f00;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x00000ff0;
+ else
+ ratr_bitmap &= 0x00000ff5;
+ break;
+ case WIRELESS_MODE_A:
+ ratr_index = RATR_INX_WIRELESS_G;
+ ratr_bitmap &= 0x00000ff0;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ if (wirelessmode == WIRELESS_MODE_N_24G)
+ ratr_index = RATR_INX_WIRELESS_NGB;
+ else
+ ratr_index = RATR_INX_WIRELESS_NG;
+
+ if (mimo_ps == IEEE80211_SMPS_STATIC || mimo_ps == IEEE80211_SMPS_DYNAMIC) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00070000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0007f000;
+ else
+ ratr_bitmap &= 0x0007f005;
+ } else {
+ if ( rtlphy->rf_type == RF_1T1R) {
+ if (b_curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff005;
+ }
+ } else {
+ if (b_curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0fff0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0ffff000;
+ else
+ ratr_bitmap &= 0x0ffff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0fff0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0ffff000;
+ else
+ ratr_bitmap &= 0x0ffff005;
+ }
+ }
+ }
+ if ((b_curtxbw_40mhz && b_curshortgi_40mhz) ||
+ (!b_curtxbw_40mhz && b_curshortgi_20mhz)) {
+
+ if (macid == 0)
+ b_shortgi = true;
+ else if (macid == 1)
+ b_shortgi = false;
+ }
+ break;
+
+ case WIRELESS_MODE_AC_24G:
+ ratr_index = RATR_INX_WIRELESS_AC_24N;
+ if(rssi_level == 1)
+ ratr_bitmap &= 0xfc3f0000;
+ else if(rssi_level == 2)
+ ratr_bitmap &= 0xfffff000;
+ else
+ ratr_bitmap &= 0xffffffff;
+ break;
+
+ case WIRELESS_MODE_AC_5G:
+ ratr_index = RATR_INX_WIRELESS_AC_5N;
+
+ if (rtlphy->rf_type == RF_1T1R)
+ {
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ {
+ if(rssi_level == 1) /*add by Gary for ac-series*/
+ ratr_bitmap &= 0x003f8000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x003ff000;
+ else
+ ratr_bitmap &= 0x003ff010;
+ }
+ else
+ ratr_bitmap &= 0x000ff010;
+ }
+ else
+ {
+ if(rssi_level == 1) /* add by Gary for ac-series*/
+ ratr_bitmap &= 0xfe3f8000; /*VHT 2SS MCS3~9*/
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0xfffff000; /*VHT 2SS MCS0~9*/
+ else
+ ratr_bitmap &= 0xfffff010; /*All*/
+ }
+ break;
+
+ default:
+ ratr_index = RATR_INX_WIRELESS_NGB;
+
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_bitmap &= 0x000ff0ff;
+ else
+ ratr_bitmap &= 0x0f0ff0ff;
+ break;
+
+ }
+
+ sta_entry->ratr_index = ratr_index;
+
+ RT_TRACE(COMP_RATR, DBG_DMESG,
+ ("ratr_bitmap :%x\n", ratr_bitmap));
+ *(u32 *) & rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) |
+ (ratr_index << 28));
+ rate_mask[0] = macid;
+ rate_mask[1] = _rtl8821ae_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode) | (b_shortgi ? 0x80 : 0x00);
+ rate_mask[2] = b_curtxbw_40mhz;
+ /* if (prox_priv->proxim_modeinfo->power_output > 0)
+ rate_mask[2] |= BIT(6); */
+
+ rate_mask[3] = (u8)(ratr_bitmap & 0x000000ff);
+ rate_mask[4] = (u8)((ratr_bitmap & 0x0000ff00) >>8);
+ rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16);
+ rate_mask[6] = (u8)((ratr_bitmap & 0xff000000) >> 24);
+
+ RT_TRACE(COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
+ "ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap,
+ rate_mask[0], rate_mask[1],
+ rate_mask[2], rate_mask[3],
+ rate_mask[4], rate_mask[5],
+ rate_mask[6]));
+ rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RA_MASK, 7, rate_mask);
+ _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(3), 0);
+}
+
+void rtl8821ae_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u8 rssi_level)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ if (rtlpriv->dm.b_useramask)
+ rtl8821ae_update_hal_rate_mask(hw, sta, rssi_level);
+ else
+ /*RT_TRACE(COMP_RATR,DBG_LOUD,("rtl8821ae_update_hal_rate_tbl(): Error! 8821ae FW RA Only"));*/
+ rtl8821ae_update_hal_rate_table(hw, sta);
+}
+
+void rtl8821ae_update_channel_access_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 sifs_timer;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+ (u8 *) & mac->slot_time);
+ if (!mac->ht_enable)
+ sifs_timer = 0x0a0a;
+ else
+ sifs_timer = 0x0e0e;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *) & sifs_timer);
+}
+
+bool rtl8821ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+ u8 u1tmp = 0;
+ bool b_actuallyset = false;
+
+ if (rtlpriv->rtlhal.being_init_adapter)
+ return false;
+
+ if (ppsc->b_swrf_processing)
+ return false;
+
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ if (ppsc->rfchange_inprogress) {
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ return false;
+ } else {
+ ppsc->rfchange_inprogress = true;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ }
+
+ cur_rfstate = ppsc->rfpwr_state;
+
+ rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2,
+ rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL_2) & ~(BIT(1)));
+
+ u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL_2);
+
+ if (rtlphy->polarity_ctl) {
+ e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFOFF : ERFON;
+ } else {
+ e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF;
+ }
+
+ if ((ppsc->b_hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) {
+ RT_TRACE(COMP_RF, DBG_DMESG,
+ ("GPIOChangeRF - HW Radio ON, RF ON\n"));
+
+ e_rfpowerstate_toset = ERFON;
+ ppsc->b_hwradiooff = false;
+ b_actuallyset = true;
+ } else if ((ppsc->b_hwradiooff == false)
+ && (e_rfpowerstate_toset == ERFOFF)) {
+ RT_TRACE(COMP_RF, DBG_DMESG,
+ ("GPIOChangeRF - HW Radio OFF, RF OFF\n"));
+
+ e_rfpowerstate_toset = ERFOFF;
+ ppsc->b_hwradiooff = true;
+ b_actuallyset = true;
+ }
+
+ if (b_actuallyset) {
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ } else {
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ }
+
+ *valid = 1;
+ return !ppsc->b_hwradiooff;
+
+}
+
+void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 *macaddr = p_macaddr;
+ u32 entry_id = 0;
+ bool is_pairwise = false;
+
+ static u8 cam_const_addr[4][6] = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+ };
+ static u8 cam_const_broad[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+
+ if (clear_all) {
+ u8 idx = 0;
+ u8 cam_offset = 0;
+ u8 clear_number = 5;
+
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("clear_all\n"));
+
+ for (idx = 0; idx < clear_number; idx++) {
+ rtl_cam_mark_invalid(hw, cam_offset + idx);
+ rtl_cam_empty_entry(hw, cam_offset + idx);
+
+ if (idx < 5) {
+ memset(rtlpriv->sec.key_buf[idx], 0,
+ MAX_KEY_LEN);
+ rtlpriv->sec.key_len[idx] = 0;
+ }
+ }
+
+ } else {
+ switch (enc_algo) {
+ case WEP40_ENCRYPTION:
+ enc_algo = CAM_WEP40;
+ break;
+ case WEP104_ENCRYPTION:
+ enc_algo = CAM_WEP104;
+ break;
+ case TKIP_ENCRYPTION:
+ enc_algo = CAM_TKIP;
+ break;
+ case AESCCMP_ENCRYPTION:
+ enc_algo = CAM_AES;
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("switch case "
+ "not process \n"));
+ enc_algo = CAM_TKIP;
+ break;
+ }
+
+ if (is_wepkey || rtlpriv->sec.use_defaultkey) {
+ macaddr = cam_const_addr[key_index];
+ entry_id = key_index;
+ } else {
+ if (is_group) {
+ macaddr = cam_const_broad;
+ entry_id = key_index;
+ } else {
+ if (mac->opmode == NL80211_IFTYPE_AP) {
+ entry_id = rtl_cam_get_free_entry(hw, p_macaddr);
+ if (entry_id >= TOTAL_CAM_ENTRY) {
+ RT_TRACE(COMP_SEC, DBG_EMERG,
+ ("Can not find free hw security cam entry\n"));
+ return;
+ }
+ } else {
+ entry_id = CAM_PAIRWISE_KEY_POSITION;
+ }
+
+ key_index = PAIRWISE_KEYIDX;
+ is_pairwise = true;
+ }
+ }
+
+ if (rtlpriv->sec.key_len[key_index] == 0) {
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("delete one entry, entry_id is %d\n",entry_id));
+ if (mac->opmode == NL80211_IFTYPE_AP)
+ rtl_cam_del_entry(hw, p_macaddr);
+ rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
+ } else {
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("add one entry\n"));
+ if (is_pairwise) {
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("set Pairwiase key\n"));
+
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf[key_index]);
+ } else {
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("set group key\n"));
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ rtl_cam_add_one_entry(hw,
+ rtlefuse->dev_addr,
+ PAIRWISE_KEYIDX,
+ CAM_PAIRWISE_KEY_POSITION,
+ enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf
+ [entry_id]);
+ }
+
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf[entry_id]);
+ }
+
+ }
+ }
+}
+
+
+void rtl8812ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+ bool auto_load_fail, u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 value;
+
+ if (!auto_load_fail) {
+ value = *(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION];
+ if (((value & 0xe0) >> 5) == 0x1)
+ rtlpriv->btcoexist.btc_info.btcoexist = 1;
+ else
+ rtlpriv->btcoexist.btc_info.btcoexist = 0;
+ rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8812A;
+
+ value = hwinfo[EEPROM_RF_BT_SETTING];
+ rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1);
+ } else {
+ rtlpriv->btcoexist.btc_info.btcoexist = 0;
+ rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8812A;
+ rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
+ }
+ /*move BT_InitHalVars() to init_sw_vars*/
+}
+
+void rtl8821ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+ bool auto_load_fail, u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 value;
+ u32 tmpu_32;
+
+ if (!auto_load_fail) {
+ tmpu_32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL);
+ if(tmpu_32 & BIT(18))
+ rtlpriv->btcoexist.btc_info.btcoexist = 1;
+ else
+ rtlpriv->btcoexist.btc_info.btcoexist = 0;
+ rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8821A;
+
+ value = hwinfo[EEPROM_RF_BT_SETTING];
+ rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1);
+ } else {
+ rtlpriv->btcoexist.btc_info.btcoexist = 0;
+ rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8821A;
+ rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
+ }
+ /*move BT_InitHalVars() to init_sw_vars*/
+}
+
+void rtl8821ae_bt_reg_init(struct ieee80211_hw* hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+ /* 0:Low, 1:High, 2:From Efuse. */
+ rtlpcipriv->btcoexist.b_reg_bt_iso = 2;
+ /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */
+ rtlpcipriv->btcoexist.b_reg_bt_sco= 3;
+ /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
+ rtlpcipriv->btcoexist.b_reg_bt_sco= 0;
+}
+
+
+void rtl8821ae_bt_hw_init(struct ieee80211_hw* hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->cfg->ops->get_btc_status()){
+ rtlpriv->btcoexist.btc_ops->btc_init_hw_config(rtlpriv);
+ }
+}
+
+void rtl8821ae_suspend(struct ieee80211_hw *hw)
+{
+}
+
+void rtl8821ae_resume(struct ieee80211_hw *hw)
+{
+}
+
+/* Turn on AAP (RCR:bit 0) for promicuous mode. */
+void rtl8821ae_allow_all_destaddr(struct ieee80211_hw *hw,
+ bool allow_all_da, bool write_into_reg)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ if (allow_all_da) /* Set BIT0 */
+ rtlpci->receive_config |= RCR_AAP;
+ else /* Clear BIT0 */
+ rtlpci->receive_config &= ~RCR_AAP;
+
+ if(write_into_reg)
+ rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+
+
+ RT_TRACE(COMP_TURBO | COMP_INIT, DBG_LOUD,
+ ("receive_config=0x%08X, write_into_reg=%d\n",
+ rtlpci->receive_config, write_into_reg ));
+}
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hw.h b/drivers/staging/rtl8821ae/rtl8821ae/hw.h
new file mode 100644
index 000000000000..4fb6bf0d1da2
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hw.h
@@ -0,0 +1,75 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_HW_H__
+#define __RTL8821AE_HW_H__
+
+void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw);
+
+void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw,
+ u32 *p_inta, u32 *p_intb);
+int rtl8821ae_hw_init(struct ieee80211_hw *hw);
+void rtl8821ae_card_disable(struct ieee80211_hw *hw);
+void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw);
+void rtl8821ae_disable_interrupt(struct ieee80211_hw *hw);
+int rtl8821ae_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
+void rtl8821ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
+void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci);
+void rtl8821ae_set_beacon_related_registers(struct ieee80211_hw *hw);
+void rtl8821ae_set_beacon_interval(struct ieee80211_hw *hw);
+void rtl8821ae_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr);
+void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl8821ae_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level);
+void rtl8821ae_update_channel_access_setting(struct ieee80211_hw *hw);
+bool rtl8821ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
+void rtl8821ae_enable_hw_security_config(struct ieee80211_hw *hw);
+void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all);
+
+void rtl8821ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail,
+ u8* hwinfo);
+void rtl8812ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail,
+ u8* hwinfo);
+void rtl8821ae_bt_reg_init(struct ieee80211_hw* hw);
+void rtl8821ae_bt_hw_init(struct ieee80211_hw* hw);
+void rtl8821ae_suspend(struct ieee80211_hw *hw);
+void rtl8821ae_resume(struct ieee80211_hw *hw);
+void rtl8821ae_allow_all_destaddr(struct ieee80211_hw *hw,
+ bool allow_all_da,
+ bool write_into_reg);
+void _rtl8821ae_stop_tx_beacon(struct ieee80211_hw *hw);
+void _rtl8821ae_resume_tx_beacon(struct ieee80211_hw *hw);
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/led.c b/drivers/staging/rtl8821ae/rtl8821ae/led.c
new file mode 100644
index 000000000000..130a4f4b24a2
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/led.c
@@ -0,0 +1,239 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "reg.h"
+
+static void _rtl8821ae_init_led(struct ieee80211_hw *hw,
+ struct rtl_led *pled,
+ enum rtl_led_pin ledpin)
+{
+ pled->hw = hw;
+ pled->ledpin = ledpin;
+ pled->b_ledon = false;
+}
+
+void rtl8821ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+ u8 ledcfg;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_LED, DBG_LOUD,
+ ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+
+ switch (pled->ledpin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+ ledcfg &= ~BIT(6);
+ rtl_write_byte(rtlpriv,
+ REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5));
+ break;
+ case LED_PIN_LED1:
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1);
+ rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10);
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+ pled->b_ledon = true;
+}
+
+void rtl8812ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+ u16 ledreg = REG_LEDCFG1;
+ u8 ledcfg = 0;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (pled->ledpin) {
+ case LED_PIN_LED0:
+ ledreg = REG_LEDCFG1;
+ break;
+
+ case LED_PIN_LED1:
+ ledreg = REG_LEDCFG2;
+ break;
+
+ case LED_PIN_GPIO0:
+ default:
+ break;
+ }
+
+ RT_TRACE(COMP_LED, DBG_LOUD, ("In SwLedOn, LedAddr:%X LEDPIN=%d \n", ledreg, pled->ledpin));
+
+ ledcfg = rtl_read_byte(rtlpriv, ledreg);
+ ledcfg |= BIT(5); /*Set 0x4c[21]*/
+ ledcfg &= ~(BIT(7) | BIT(6) | BIT(3) |BIT(2) | BIT(1) |BIT(0));
+ /*Clear 0x4c[23:22] and 0x4c[19:16]*/
+ rtl_write_byte(rtlpriv, ledreg, ledcfg); /*SW control led0 on.*/
+ pled->b_ledon = true;
+}
+
+void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ u8 ledcfg;
+
+ RT_TRACE(COMP_LED, DBG_LOUD,
+ ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+
+ switch (pled->ledpin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ ledcfg &= 0xf0;
+ if (pcipriv->ledctl.bled_opendrain == true) {
+ ledcfg &= 0x90; /* Set to software control. */
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3)));
+ ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
+ ledcfg &= 0xFE;
+ rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, ledcfg);
+ }
+ else {
+ ledcfg &= ~BIT(6);
+ rtl_write_byte(rtlpriv, REG_LEDCFG2,
+ (ledcfg | BIT(3) | BIT(5)));
+ }
+ break;
+ case LED_PIN_LED1:
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1);
+ ledcfg &= 0x10; /* Set to software control. */
+ rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg|BIT(3));
+
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+ pled->b_ledon = false;
+}
+
+void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled){
+ u16 ledreg = REG_LEDCFG1;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+
+ switch(pled->ledpin)
+ {
+ case LED_PIN_LED0:
+ ledreg = REG_LEDCFG1;
+ break;
+
+ case LED_PIN_LED1:
+ ledreg = REG_LEDCFG2;
+ break;
+
+ case LED_PIN_GPIO0:
+ default:
+ break;
+ }
+
+ RT_TRACE(COMP_LED,DBG_LOUD,("In SwLedOff,LedAddr:%X LEDPIN=%d\n", ledreg, pled->ledpin));
+
+ if(pcipriv->ledctl.bled_opendrain == true) /*Open-drain arrangement for controlling the LED*/
+ {
+ u8 ledcfg = rtl_read_byte(rtlpriv, ledreg);
+
+ ledreg &= 0xd0; /* Set to software control.*/
+ rtl_write_byte(rtlpriv, ledreg, (ledcfg | BIT(3)));
+
+ /*Open-drain arrangement*/
+ ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
+ ledcfg &= 0xFE;/*Set GPIO[8] to input mode*/
+ rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, ledcfg);
+ }
+ else
+ {
+ rtl_write_byte(rtlpriv, ledreg, 0x28);
+ }
+
+ pled->b_ledon = false;
+}
+
+void rtl8821ae_init_sw_leds(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ _rtl8821ae_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
+ _rtl8821ae_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
+}
+
+static void _rtl8821ae_sw_led_control(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ switch (ledaction) {
+ case LED_CTL_POWER_ON:
+ case LED_CTL_LINK:
+ case LED_CTL_NO_LINK:
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtl8812ae_sw_led_on(hw, pLed0);
+ else
+ rtl8821ae_sw_led_on(hw, pLed0);
+ break;
+ case LED_CTL_POWER_OFF:
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)\
+ rtl8812ae_sw_led_off(hw, pLed0);
+ else
+ rtl8821ae_sw_led_off(hw, pLed0);
+ break;
+ default:
+ break;
+ }
+}
+
+void rtl8821ae_led_control(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
+ (ledaction == LED_CTL_TX ||
+ ledaction == LED_CTL_RX ||
+ ledaction == LED_CTL_SITE_SURVEY ||
+ ledaction == LED_CTL_LINK ||
+ ledaction == LED_CTL_NO_LINK ||
+ ledaction == LED_CTL_START_TO_LINK ||
+ ledaction == LED_CTL_POWER_ON)) {
+ return;
+ }
+ RT_TRACE(COMP_LED, DBG_LOUD, ("ledaction %d, \n",
+ ledaction));
+ _rtl8821ae_sw_led_control(hw, ledaction);
+}
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/led.h b/drivers/staging/rtl8821ae/rtl8821ae/led.h
new file mode 100644
index 000000000000..44be401ba21f
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/led.h
@@ -0,0 +1,40 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_LED_H__
+#define __RTL8821AE_LED_H__
+
+void rtl8821ae_init_sw_leds(struct ieee80211_hw *hw);
+void rtl8821ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8812ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8821ae_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction);
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/phy.c b/drivers/staging/rtl8821ae/rtl8821ae/phy.c
new file mode 100644
index 000000000000..d02fca38a2b2
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/phy.c
@@ -0,0 +1,5525 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../ps.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "table.h"
+#include "trx.h"
+#include "../btcoexist/halbt_precomp.h"
+#include "hw.h"
+
+#define READ_NEXT_PAIR(array_table,v1, v2, i) do { i += 2; v1 = array_table[i]; v2 = array_table[i+1]; } while(0)
+
+static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset);
+static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset,
+ u32 data);
+static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask);
+static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw);
+static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+static bool _rtl8821ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+ u8 configtype);
+static bool _rtl8812ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+ u8 configtype);
+static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype);
+static bool _rtl8812ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype);
+static void _rtl8821ae_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
+
+static long _rtl8821ae_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+ enum wireless_mode wirelessmode,
+ u8 txpwridx);
+static void rtl8821ae_phy_set_rf_on(struct ieee80211_hw *hw);
+static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw);
+
+void rtl8812ae_fixspur(
+ struct ieee80211_hw *hw,
+ enum ht_channel_width band_width,
+ u8 channel
+)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ /*C cut Item12 ADC FIFO CLOCK*/
+ if(IS_VENDOR_8812A_C_CUT(rtlhal->version))
+ {
+ if(band_width == HT_CHANNEL_WIDTH_20_40 && channel == 11)
+ rtl_set_bbreg(hw, RRFMOD, 0xC00, 0x3) ;
+ /* 0x8AC[11:10] = 2'b11*/
+ else
+ rtl_set_bbreg(hw, RRFMOD, 0xC00, 0x2);
+ /* 0x8AC[11:10] = 2'b10*/
+
+
+ /* <20120914, Kordan> A workarould to resolve
+ 2480Mhz spur by setting ADC clock as 160M. (Asked by Binson)*/
+ if (band_width == HT_CHANNEL_WIDTH_20 &&
+ (channel == 13 || channel == 14)) {
+ rtl_set_bbreg(hw, RRFMOD, 0x300, 0x3);
+ /*0x8AC[9:8] = 2'b11*/
+ rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 1);
+ /* 0x8C4[30] = 1*/
+ } else if (band_width == HT_CHANNEL_WIDTH_20_40 &&
+ channel == 11) {
+ rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 1);
+ /*0x8C4[30] = 1*/
+ } else if (band_width != HT_CHANNEL_WIDTH_80) {
+ rtl_set_bbreg(hw, RRFMOD, 0x300, 0x2);
+ /*0x8AC[9:8] = 2'b10*/
+ rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 0);
+ /*0x8C4[30] = 0*/
+ }
+ }
+ else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ {
+ /* <20120914, Kordan> A workarould to resolve
+ 2480Mhz spur by setting ADC clock as 160M. (Asked by Binson)*/
+ if (band_width == HT_CHANNEL_WIDTH_20 &&
+ (channel == 13 || channel == 14))
+ rtl_set_bbreg(hw, RRFMOD, 0x300, 0x3);
+ /*0x8AC[9:8] = 11*/
+ else if (channel <= 14) /*2.4G only*/
+ rtl_set_bbreg(hw, RRFMOD, 0x300, 0x2);
+ /*0x8AC[9:8] = 10*/
+ }
+
+}
+
+u32 rtl8821ae_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 returnvalue, originalvalue, bitshift;
+
+ RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+ "bitmask(%#x)\n", regaddr,
+ bitmask));
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+
+ RT_TRACE(COMP_RF, DBG_TRACE, ("BBR MASK=0x%x "
+ "Addr[0x%x]=0x%x\n", bitmask,
+ regaddr, originalvalue));
+
+ return returnvalue;
+
+}
+
+void rtl8821ae_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 originalvalue, bitshift;
+
+ RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
+ " data(%#x)\n", regaddr, bitmask,
+ data));
+
+ if (bitmask != MASKDWORD) {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
+ data = ((originalvalue & (~bitmask)) | ((data << bitshift) & bitmask));
+ }
+
+ rtl_write_dword(rtlpriv, regaddr, data);
+
+ RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
+ " data(%#x)\n", regaddr, bitmask,
+ data));
+
+}
+
+u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 original_value, readback_value, bitshift;
+ unsigned long flags;
+
+ RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+ "rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask));
+
+ spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+
+
+ original_value = _rtl8821ae_phy_rf_serial_read(hw,rfpath, regaddr);
+ bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+
+ RT_TRACE(COMP_RF, DBG_TRACE,
+ ("regaddr(%#x), rfpath(%#x), "
+ "bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value));
+
+ return readback_value;
+}
+
+void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 original_value, bitshift;
+ unsigned long flags;
+
+ RT_TRACE(COMP_RF, DBG_TRACE,
+ ("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath));
+
+ spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = _rtl8821ae_phy_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+ bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+ }
+
+ _rtl8821ae_phy_rf_serial_write(hw, rfpath, regaddr, data);
+
+
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+
+ RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+ "bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath));
+
+}
+
+static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool b_is_pi_mode =false;
+ u32 retvalue = 0;
+
+ /* 2009/06/17 MH We can not execute IO for power save or other accident mode.*/
+ if (RT_CANNOT_IO(hw)) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("return all one\n"));
+ return 0xFFFFFFFF;
+ }
+
+ /* <20120809, Kordan> CCA OFF(when entering), asked by James to avoid reading the wrong value.
+ <20120828, Kordan> Toggling CCA would affect RF 0x0, skip it!*/
+ if (offset != 0x0 &&
+ !((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+ || (IS_VENDOR_8812A_C_CUT(rtlhal->version))))
+ rtl_set_bbreg(hw, RCCAONSEC, 0x8, 1);
+
+ offset &= 0xff;
+
+ if (rfpath == RF90_PATH_A)
+ b_is_pi_mode = (bool) rtl_get_bbreg(hw, 0xC00, 0x4);
+ else if (rfpath == RF90_PATH_B)
+ b_is_pi_mode = (bool) rtl_get_bbreg(hw, 0xE00, 0x4);
+
+ rtl_set_bbreg(hw, RHSSIREAD_8821AE, 0xff, offset);
+
+ if ((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+ || (IS_VENDOR_8812A_C_CUT(rtlhal->version)))
+ udelay(20);
+
+ if (b_is_pi_mode)
+ {
+ if (rfpath == RF90_PATH_A) {
+ retvalue = rtl_get_bbreg(hw, RA_PIREAD_8821A, BLSSIREADBACKDATA);
+ }
+ else if (rfpath == RF90_PATH_B){
+ retvalue = rtl_get_bbreg(hw, RB_PIREAD_8821A, BLSSIREADBACKDATA);
+ }
+ }
+ else
+ {
+ if (rfpath == RF90_PATH_A) {
+ retvalue = rtl_get_bbreg(hw, RA_SIREAD_8821A, BLSSIREADBACKDATA);
+ }
+ else if (rfpath == RF90_PATH_B){
+ retvalue = rtl_get_bbreg(hw, RB_SIREAD_8821A, BLSSIREADBACKDATA);
+ }
+ }
+
+ /*<20120809, Kordan> CCA ON(when exiting), asked by James to avoid reading the wrong value.
+ <20120828, Kordan> Toggling CCA would affect RF 0x0, skip it!*/
+ if (offset != 0x0 && ! ((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+ || (IS_VENDOR_8812A_C_CUT(rtlhal->version))))
+ rtl_set_bbreg(hw, RCCAONSEC, 0x8, 0);
+ return retvalue;
+}
+
+#if 0
+static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+ u32 newoffset;
+ u32 tmplong, tmplong2;
+ u8 rfpi_enable = 0;
+ u32 retvalue;
+
+ offset &= 0xff;
+ newoffset = offset;
+ if (RT_CANNOT_IO(hw)) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("return all one\n"));
+ return 0xFFFFFFFF;
+ }
+ tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
+ if (rfpath == RF90_PATH_A)
+ tmplong2 = tmplong;
+ else
+ tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
+ tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
+ (newoffset << 23) | BLSSIREADEDGE;
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+ tmplong & (~BLSSIREADEDGE));
+ mdelay(1);
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
+ mdelay(1);
+ /*rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+ tmplong | BLSSIREADEDGE);*/
+ mdelay(1);
+ if (rfpath == RF90_PATH_A)
+ rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
+ BIT(8));
+ else if (rfpath == RF90_PATH_B)
+ rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
+ BIT(8));
+ if (rfpi_enable)
+ retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
+ BLSSIREADBACKDATA);
+ else
+ retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
+ BLSSIREADBACKDATA);
+ RT_TRACE(COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rflssi_readback,
+ retvalue));
+ return retvalue;
+}
+#endif
+
+static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset,
+ u32 data)
+{
+ u32 data_and_addr;
+ u32 newoffset;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+
+ if (RT_CANNOT_IO(hw)) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("stop\n"));
+ return;
+ }
+ offset &= 0xff;
+ newoffset = offset;
+ data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
+ rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
+ RT_TRACE(COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset,
+ data_and_addr));
+}
+
+static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask)
+{
+ u32 i;
+
+ for (i = 0; i <= 31; i++) {
+ if (((bitmask >> i) & 0x1) == 1)
+ break;
+ }
+ return i;
+}
+
+bool rtl8821ae_phy_mac_config(struct ieee80211_hw *hw)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool rtstatus = 0;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtstatus = _rtl8812ae_phy_config_mac_with_headerfile(hw);
+ else
+ rtstatus = _rtl8821ae_phy_config_mac_with_headerfile(hw);
+
+ return rtstatus;
+}
+
+bool rtl8821ae_phy_bb_config(struct ieee80211_hw *hw)
+{
+ bool rtstatus = true;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 regval;
+ u8 crystal_cap;
+ //u32 tmp;
+
+ _rtl8821ae_phy_init_bb_rf_register_definition(hw);
+
+ regval = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN);
+ regval |= regval | FEN_PCIEA;
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, regval);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN,
+ regval | FEN_BB_GLB_RSTN | FEN_BBRSTB);
+
+ rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x7);/*RF_EN | RF_RSTB | RF_SDMRSTB*/
+ rtl_write_byte(rtlpriv, REG_OPT_CTRL + 2, 0x7);/*RF_EN | RF_RSTB | RF_SDMRSTB*/
+
+ rtstatus = _rtl8821ae_phy_bb8821a_config_parafile(hw);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ {
+ crystal_cap = rtlefuse->crystalcap & 0x3F;
+ rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0x7FF80000, (crystal_cap | (crystal_cap << 6)));
+ }else{
+ crystal_cap = rtlefuse->crystalcap & 0x3F;
+ rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000, (crystal_cap | (crystal_cap << 6)));
+ }
+ rtlphy->reg_837 = rtl_read_byte(rtlpriv, 0x837);
+
+ return rtstatus;
+}
+
+bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw)
+{
+ return rtl8821ae_phy_rf6052_config(hw);
+}
+
+
+u32 phy_get_tx_bb_swing_8812A(
+ struct ieee80211_hw *hw,
+ u8 band,
+ u8 rf_path
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_dm *rtldm = rtl_dm(rtlpriv);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ char bb_swing_2g = (char) (-1 * 0xFF);
+ char bb_swing_5g = (char) (-1 * 0xFF);
+ u32 out = 0x200;
+ const char auto_temp = -1;
+
+ RT_TRACE(COMP_SCAN, DBG_LOUD,
+ ("===> PHY_GetTxBBSwing_8812A, bbSwing_2G: %d, bbSwing_5G: %d\n",
+ (int)bb_swing_2g, (int)bb_swing_5g));
+
+ if ( rtlefuse->autoload_failflag) {
+ if ( band == BAND_ON_2_4G ) {
+ rtldm->bb_swing_diff_2g = bb_swing_2g;
+ if (bb_swing_2g == 0) out = 0x200; // 0 dB
+ else if (bb_swing_2g == -3) out = 0x16A; // -3 dB
+ else if (bb_swing_2g == -6) out = 0x101; // -6 dB
+ else if (bb_swing_2g == -9) out = 0x0B6; // -9 dB
+ else {
+ rtldm->bb_swing_diff_2g = 0;
+ out = 0x200;
+ }
+
+ } else if ( band == BAND_ON_5G ) {
+ rtldm->bb_swing_diff_5g = bb_swing_5g;
+ if (bb_swing_5g == 0) out = 0x200; // 0 dB
+ else if (bb_swing_5g == -3) out = 0x16A; // -3 dB
+ else if (bb_swing_5g == -6) out = 0x101; // -6 dB
+ else if (bb_swing_5g == -9) out = 0x0B6; // -9 dB
+ else {
+ if ( rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ rtldm->bb_swing_diff_5g = -3;
+ out = 0x16A;
+ } else {
+ rtldm->bb_swing_diff_5g = 0;
+ out = 0x200;
+ }
+ }
+ } else {
+ rtldm->bb_swing_diff_2g = -3;
+ rtldm->bb_swing_diff_5g = -3;
+ out = 0x16A; // -3 dB
+ }
+ }
+ else
+ {
+ u32 swing = 0, swing_a = 0, swing_b = 0;
+
+ if (band == BAND_ON_2_4G)
+ {
+ if (0xFF == auto_temp)
+ {
+ efuse_shadow_read(hw, 1, 0xC6, (u32 *)&swing);
+ swing = (swing == 0xFF) ? 0x00 : swing;
+ }
+ else if (bb_swing_2g == 0) swing = 0x00; // 0 dB
+ else if (bb_swing_2g == -3) swing = 0x05; // -3 dB
+ else if (bb_swing_2g == -6) swing = 0x0A; // -6 dB
+ else if (bb_swing_2g == -9) swing = 0xFF; // -9 dB
+ else swing = 0x00;
+ }
+ else
+ {
+ if (0xFF == auto_temp)
+ {
+ efuse_shadow_read(hw, 1, 0xC7, (u32 *)&swing);
+ swing = (swing == 0xFF) ? 0x00 : swing;
+ }
+ else if (bb_swing_5g == 0) swing = 0x00; // 0 dB
+ else if (bb_swing_5g == -3) swing = 0x05; // -3 dB
+ else if (bb_swing_5g == -6) swing = 0x0A; // -6 dB
+ else if (bb_swing_5g == -9) swing = 0xFF; // -9 dB
+ else swing = 0x00;
+ }
+
+ swing_a = (swing & 0x3) >> 0; // 0xC6/C7[1:0]
+ swing_b = (swing & 0xC) >> 2; // 0xC6/C7[3:2]
+ RT_TRACE(COMP_SCAN, DBG_LOUD,
+ ("===> PHY_GetTxBBSwing_8812A, swingA: 0x%X, swingB: 0x%X\n",
+ swing_a, swing_b));
+
+ //3 Path-A
+ if (swing_a == 0x0) {
+ if (band == BAND_ON_2_4G)
+ rtldm->bb_swing_diff_2g = 0;
+ else
+ rtldm->bb_swing_diff_5g = 0;
+ out = 0x200; // 0 dB
+ } else if (swing_a == 0x1) {
+ if (band == BAND_ON_2_4G)
+ rtldm->bb_swing_diff_2g = -3;
+ else
+ rtldm->bb_swing_diff_5g = -3;
+ out = 0x16A; // -3 dB
+ } else if (swing_a == 0x2) {
+ if (band == BAND_ON_2_4G)
+ rtldm->bb_swing_diff_2g = -6;
+ else
+ rtldm->bb_swing_diff_5g = -6;
+ out = 0x101; // -6 dB
+ } else if (swing_a == 0x3) {
+ if (band == BAND_ON_2_4G)
+ rtldm->bb_swing_diff_2g = -9;
+ else
+ rtldm->bb_swing_diff_5g = -9;
+ out = 0x0B6; // -9 dB
+ }
+
+ //3 Path-B
+ if (swing_b == 0x0) {
+ if (band == BAND_ON_2_4G)
+ rtldm->bb_swing_diff_2g = 0;
+ else
+ rtldm->bb_swing_diff_5g = 0;
+ out = 0x200; // 0 dB
+ } else if (swing_b == 0x1) {
+ if (band == BAND_ON_2_4G)
+ rtldm->bb_swing_diff_2g = -3;
+ else
+ rtldm->bb_swing_diff_5g = -3;
+ out = 0x16A; // -3 dB
+ } else if (swing_b == 0x2) {
+ if (band == BAND_ON_2_4G)
+ rtldm->bb_swing_diff_2g = -6;
+ else
+ rtldm->bb_swing_diff_5g = -6;
+ out = 0x101; // -6 dB
+ } else if (swing_b == 0x3) {
+ if (band == BAND_ON_2_4G)
+ rtldm->bb_swing_diff_2g = -9;
+ else
+ rtldm->bb_swing_diff_5g = -9;
+ out = 0x0B6; // -9 dB
+ }
+ }
+
+ RT_TRACE(COMP_SCAN, DBG_LOUD,
+ ("<=== PHY_GetTxBBSwing_8812A, out = 0x%X\n", out));
+ return out;
+}
+void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_dm *rtldm = rtl_dm(rtlpriv);
+ u8 current_band = rtlhal->current_bandtype;
+ u32 txpath, rxpath;
+ //u8 i, value8;
+ char bb_diff_between_band;
+
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("\n"));
+ txpath = rtl8821ae_phy_query_bb_reg(hw, RTXPATH, 0xf0);
+ rxpath = rtl8821ae_phy_query_bb_reg(hw, RCCK_RX, 0x0f000000);
+ rtlhal->current_bandtype = (enum band_type) band;
+ /* reconfig BB/RF according to wireless mode */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ /* BB & RF Config */
+ RT_TRACE(COMP_CMD, DBG_DMESG, ("2.4G\n"));
+ rtl_set_bbreg(hw, ROFDMCCKEN, BOFDMEN|BCCKEN, 0x03);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ /* 0xCB0[15:12] = 0x7 (LNA_On)*/
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF000, 0x7);
+ /* 0xCB0[7:4] = 0x7 (PAPE_A)*/
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF0, 0x7);
+ }
+
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+ rtl_set_bbreg(hw, 0x830, 0xE, 0x4); /*0x830[3:1] = 0x4*/
+ rtl_set_bbreg(hw, 0x834, 0x3, 0x1); /*0x834[1:0] = 0x1*/
+ }
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xF00, 0); // 0xC1C[11:8] = 0
+ else
+ rtl_set_bbreg(hw, 0x82c, 0x3, 0); // 0x82C[1:0] = 2b'00
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
+ rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x000);
+ rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x000);
+ }
+
+ rtl_set_bbreg(hw, RTXPATH, 0xf0, txpath);
+ rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, rxpath);
+
+ rtl_write_byte(rtlpriv, REG_CCK_CHECK, 0x0);
+ } else {/* 5G band */
+ u16 count, reg_41a;
+ RT_TRACE(COMP_CMD, DBG_DMESG, ("5G\n"));
+
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ /*0xCB0[15:12] = 0x5 (LNA_On)*/
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF000, 0x5);
+ /*0xCB0[7:4] = 0x4 (PAPE_A)*/
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF0, 0x4);
+ }
+ /*CCK_CHECK_en*/
+ rtl_write_byte(rtlpriv, REG_CCK_CHECK, 0x80);
+
+ count = 0;
+ reg_41a = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY);
+ RT_TRACE(COMP_SCAN, DBG_LOUD, ("Reg41A value %d", reg_41a));
+ reg_41a &= 0x30;
+ while ((reg_41a!= 0x30) && (count < 50)) {
+ udelay(50);
+ RT_TRACE(COMP_SCAN, DBG_LOUD, ("Delay 50us \n"));
+
+ reg_41a = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY);
+ reg_41a &= 0x30;
+ count++;
+ RT_TRACE(COMP_SCAN, DBG_LOUD, ("Reg41A value %d", reg_41a));
+ }
+ if (count != 0)
+ RT_TRACE(COMP_MLME, DBG_LOUD,
+ ("PHY_SwitchWirelessBand8812(): Switch to 5G Band. "
+ "Count = %d reg41A=0x%x\n", count, reg_41a));
+
+ // 2012/02/01, Sinda add registry to switch workaround without long-run verification for scan issue.
+ rtl_set_bbreg(hw, ROFDMCCKEN, BOFDMEN|BCCKEN, 0x03);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+ rtl_set_bbreg(hw, 0x830, 0xE, 0x3); /*0x830[3:1] = 0x3*/
+ rtl_set_bbreg(hw, 0x834, 0x3, 0x2); /*0x834[1:0] = 0x2*/
+ }
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ /* AGC table select */
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xF00, 1); /* 0xC1C[11:8] = 1*/
+ } else
+ rtl_set_bbreg(hw, 0x82c, 0x3, 1); // 0x82C[1:0] = 2'b00
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337777);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
+ rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x010);
+ rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x010);
+ }
+
+ rtl_set_bbreg(hw, RTXPATH, 0xf0, txpath);
+ rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, rxpath);
+
+ RT_TRACE(COMP_SCAN, DBG_LOUD,
+ ("==>PHY_SwitchWirelessBand8812() BAND_ON_5G settings OFDM index 0x%x\n",
+ rtlpriv->dm.ofdm_index[RF90_PATH_A]));
+ }
+
+ if ((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) ||
+ (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)) {
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000,
+ phy_get_tx_bb_swing_8812A(hw, band, RF90_PATH_A)); // 0xC1C[31:21]
+ rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000,
+ phy_get_tx_bb_swing_8812A(hw, band, RF90_PATH_B)); // 0xE1C[31:21]
+
+ /* <20121005, Kordan> When TxPowerTrack is ON, we should take care of the change of BB swing.
+ That is, reset all info to trigger Tx power tracking.*/
+ if (band != current_band) {
+ bb_diff_between_band = (rtldm->bb_swing_diff_2g - rtldm->bb_swing_diff_5g);
+ bb_diff_between_band = (band == BAND_ON_2_4G) ? bb_diff_between_band : (-1 * bb_diff_between_band);
+ rtldm->default_ofdm_index += bb_diff_between_band * 2;
+ }
+ rtl8821ae_dm_clear_txpower_tracking_state(hw);
+ }
+
+ RT_TRACE(COMP_SCAN, DBG_TRACE,
+ ("<==rtl8821ae_phy_switch_wirelessband():Switch Band OK.\n"));
+ return;
+}
+
+static bool _rtl8821ae_check_condition(struct ieee80211_hw *hw,
+ const u32 Condition
+ )
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u32 _board = rtlefuse->board_type; /*need efuse define*/
+ u32 _interface = rtlhal->interface;
+ u32 _platform = 0x08;/*SupportPlatform */
+ u32 cond = Condition;
+
+ if ( Condition == 0xCDCDCDCD )
+ return true;
+
+ cond = Condition & 0xFF;
+ if ( (_board != cond) == 0 && cond != 0xFF)
+ return false;
+
+ cond = Condition & 0xFF00;
+ cond = cond >> 8;
+ if ( (_interface & cond) == 0 && cond != 0x07)
+ return false;
+
+ cond = Condition & 0xFF0000;
+ cond = cond >> 16;
+ if ( (_platform & cond) == 0 && cond != 0x0F)
+ return false;
+ return true;
+}
+
+static void _rtl8821ae_config_rf_reg(struct ieee80211_hw *hw,
+ u32 addr,
+ u32 data,
+ enum radio_path rfpath,
+ u32 regaddr
+ )
+{
+ if ( addr == 0xfe || addr == 0xffe) {
+ mdelay(50);
+ } else {
+ rtl_set_rfreg(hw, rfpath, regaddr, RFREG_OFFSET_MASK, data);
+ udelay(1);
+ }
+}
+
+static void _rtl8821ae_config_rf_radio_a(struct ieee80211_hw *hw,
+ u32 addr, u32 data)
+{
+ u32 content = 0x1000; /*RF Content: radio_a_txt*/
+ u32 maskforphyset = (u32)(content & 0xE000);
+
+ _rtl8821ae_config_rf_reg(hw, addr, data, RF90_PATH_A, addr | maskforphyset);
+
+}
+
+static void _rtl8821ae_config_rf_radio_b(struct ieee80211_hw *hw,
+ u32 addr, u32 data)
+{
+ u32 content = 0x1001; /*RF Content: radio_b_txt*/
+ u32 maskforphyset = (u32)(content & 0xE000);
+
+ _rtl8821ae_config_rf_reg(hw, addr, data, RF90_PATH_B, addr | maskforphyset);
+
+}
+
+static void _rtl8812ae_config_bb_reg(struct ieee80211_hw *hw,
+ u32 addr, u32 data)
+{
+ if ( addr == 0xfe) {
+ mdelay(50);
+ } else if ( addr == 0xfd)
+ mdelay(5);
+ else if ( addr == 0xfc)
+ mdelay(1);
+ else if ( addr == 0xfb)
+ udelay(50);
+ else if ( addr == 0xfa)
+ udelay(5);
+ else if ( addr == 0xf9)
+ udelay(1);
+ else {
+ rtl_set_bbreg(hw, addr, MASKDWORD,data);
+ }
+ udelay(1);
+}
+
+static void _rtl8821ae_config_bb_reg(struct ieee80211_hw *hw,
+ u32 addr, u32 data)
+{
+ if ( addr == 0xfe) {
+ mdelay(50);
+ } else if ( addr == 0xfd)
+ mdelay(5);
+ else if ( addr == 0xfc)
+ mdelay(1);
+ else if ( addr == 0xfb)
+ udelay(50);
+ else if ( addr == 0xfa)
+ udelay(5);
+ else if ( addr == 0xf9)
+ udelay(1);
+
+ rtl_set_bbreg(hw, addr, MASKDWORD,data);
+ udelay(1);
+}
+
+static void _rtl8821ae_phy_init_tx_power_by_rate(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ u8 band, rfpath, txnum, rate_section;
+
+ for ( band = BAND_ON_2_4G; band <= BAND_ON_5G; ++band )
+ for ( rfpath = 0; rfpath < TX_PWR_BY_RATE_NUM_RF; ++rfpath )
+ for ( txnum = 0; txnum < TX_PWR_BY_RATE_NUM_RF; ++txnum )
+ for ( rate_section = 0; rate_section < TX_PWR_BY_RATE_NUM_SECTION; ++rate_section )
+ rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = 0;
+}
+
+void _rtl8821ae_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
+ u8 band, u8 path,
+ u8 rate_section,
+ u8 txnum, u8 value)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (path > RF90_PATH_D) {
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n", path));
+ return;
+ }
+
+ if (band == BAND_ON_2_4G) {
+ switch (rate_section) {
+ case CCK:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][0] = value;
+ break;
+ case OFDM:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][1] = value;
+ break;
+ case HT_MCS0_MCS7:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][2] = value;
+ break;
+ case HT_MCS8_MCS15:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][3] = value;
+ break;
+ case VHT_1SSMCS0_1SSMCS9:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][4] = value;
+ break;
+ case VHT_2SSMCS0_2SSMCS9:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][5] = value;
+ break;
+ default:
+ RT_TRACE(COMP_INIT, DBG_LOUD, ( "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
+ rate_section, path, txnum ) );
+ break;
+ };
+ } else if (band == BAND_ON_5G) {
+ switch (rate_section) {
+ case OFDM:
+ rtlphy->txpwr_by_rate_base_5g[path][txnum][0] = value;
+ break;
+ case HT_MCS0_MCS7:
+ rtlphy->txpwr_by_rate_base_5g[path][txnum][1] = value;
+ break;
+ case HT_MCS8_MCS15:
+ rtlphy->txpwr_by_rate_base_5g[path][txnum][2] = value;
+ break;
+ case VHT_1SSMCS0_1SSMCS9:
+ rtlphy->txpwr_by_rate_base_5g[path][txnum][3] = value;
+ break;
+ case VHT_2SSMCS0_2SSMCS9:
+ rtlphy->txpwr_by_rate_base_5g[path][txnum][4] = value;
+ break;
+ default:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Invalid RateSection %d in Band 5G, Rf Path %d, "
+ "%dTx in PHY_SetTxPowerByRateBase()\n",
+ rate_section, path, txnum));
+ break;
+ };
+ } else {
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Invalid Band %d in PHY_SetTxPowerByRateBase()\n", band));
+ }
+
+}
+
+u8 _rtl8821ae_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
+ u8 band, u8 path,
+ u8 txnum, u8 rate_section)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 value = 0;
+
+ if (path > RF90_PATH_D) {
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n", path));
+ return 0;
+ }
+
+ if (band == BAND_ON_2_4G) {
+ switch (rate_section) {
+ case CCK:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][0];
+ break;
+ case OFDM:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][1];
+ break;
+ case HT_MCS0_MCS7:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][2];
+ break;
+ case HT_MCS8_MCS15:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][3];
+ break;
+ case VHT_1SSMCS0_1SSMCS9:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][4];
+ break;
+ case VHT_2SSMCS0_2SSMCS9:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][5];
+ break;
+ default:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Invalid RateSection %d in Band 2.4G, Rf Path %d,"
+ " %dTx in PHY_GetTxPowerByRateBase()\n",
+ rate_section, path, txnum));
+ break;
+ };
+ } else if (band == BAND_ON_5G) {
+ switch (rate_section) {
+ case OFDM:
+ value = rtlphy->txpwr_by_rate_base_5g[path][txnum][0];
+ break;
+ case HT_MCS0_MCS7:
+ value = rtlphy->txpwr_by_rate_base_5g[path][txnum][1];
+ break;
+ case HT_MCS8_MCS15:
+ value = rtlphy->txpwr_by_rate_base_5g[path][txnum][2];
+ break;
+ case VHT_1SSMCS0_1SSMCS9:
+ value = rtlphy->txpwr_by_rate_base_5g[path][txnum][3];
+ break;
+ case VHT_2SSMCS0_2SSMCS9:
+ value = rtlphy->txpwr_by_rate_base_5g[path][txnum][4];
+ break;
+ default:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Invalid RateSection %d in Band 5G, Rf Path %d,"
+ " %dTx in PHY_GetTxPowerByRateBase()\n",
+ rate_section, path, txnum));
+ break;
+ };
+ } else {
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Invalid Band %d in PHY_GetTxPowerByRateBase()\n", band));
+ }
+
+ return value;
+
+}
+void _rtl8821ae_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u16 rawValue = 0;
+ u8 base = 0, path = 0;
+
+ for (path = RF90_PATH_A; path <= RF90_PATH_B; ++path) {
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][0] >> 24) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, CCK, RF_1TX, base);
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][2] >> 24) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, OFDM, RF_1TX, base );
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][4] >> 24) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS0_MCS7, RF_1TX, base );
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][6] >> 24) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS8_MCS15, RF_2TX, base );
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][8] >> 24) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base );
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][11] >> 8) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base );
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][2] >> 24) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, OFDM, RF_1TX, base );
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][4] >> 24) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, HT_MCS0_MCS7, RF_1TX, base );
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][6] >> 24) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, HT_MCS8_MCS15, RF_2TX, base );
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][8] >> 24) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base );
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][11] >> 8) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base );
+ }
+}
+
+void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start,
+ u8 end, u8 base_val)
+{
+ char i = 0;
+ u8 temp_value = 0;
+ u32 temp_data = 0;
+
+ for (i = 3; i >= 0; --i)
+ {
+ if (i >= start && i <= end) {
+ // Get the exact value
+ temp_value = (u8) (*data >> (i * 8)) & 0xF;
+ temp_value += ((u8) ((*data >> (i * 8 + 4)) & 0xF)) * 10;
+
+ // Change the value to a relative value
+ temp_value = (temp_value > base_val) ? temp_value - base_val : base_val - temp_value;
+ } else {
+ temp_value = (u8) (*data >> (i * 8)) & 0xFF;
+ }
+ temp_data <<= 8;
+ temp_data |= temp_value;
+ }
+ *data = temp_data;
+}
+
+void _rtl8821ae_phy_convert_txpower_dbm_to_relative_value(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 base = 0, rfPath = 0;
+
+ for (rfPath = RF90_PATH_A; rfPath <= RF90_PATH_B; ++rfPath) {
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, CCK);
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G CCK 1TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][0] ),
+ 0, 3, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, OFDM );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G OFDM 1TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][1] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][2] ),
+ 0, 3, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, HT_MCS0_MCS7 );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G HTMCS0-7 1TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][3] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][4] ),
+ 0, 3, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_2TX, HT_MCS8_MCS15 );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G HTMCS8-15 2TX: %d\n", base ) );
+
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][5] ),
+ 0, 3, base );
+
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][6] ),
+ 0, 3, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, VHT_1SSMCS0_1SSMCS9 );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G VHT1SSMCS0-9 1TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][7] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][8] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][9] ),
+ 0, 1, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_2TX, VHT_2SSMCS0_2SSMCS9 );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G VHT2SSMCS0-9 2TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][9] ),
+ 2, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][10] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][11] ),
+ 0, 3, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, OFDM );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G OFDM 1TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][1] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][2] ),
+ 0, 3, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, HT_MCS0_MCS7 );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G HTMCS0-7 1TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][3] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][4] ),
+ 0, 3, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_2TX, HT_MCS8_MCS15 );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G HTMCS8-15 2TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][5] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][6] ),
+ 0, 3, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, VHT_1SSMCS0_1SSMCS9 );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G VHT1SSMCS0-9 1TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][7] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][8] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][9] ),
+ 0, 1, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_2TX, VHT_2SSMCS0_2SSMCS9 );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G VHT2SSMCS0-9 2TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][9] ),
+ 2, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][10] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][11] ),
+ 0, 3, base );
+ }
+
+ RT_TRACE(COMP_POWER, DBG_TRACE,
+ ("<===_rtl8821ae_phy_convert_txpower_dbm_to_relative_value()\n"));
+
+}
+
+void _rtl8821ae_phy_txpower_by_rate_configuration(struct ieee80211_hw *hw)
+{
+ _rtl8821ae_phy_store_txpower_by_rate_base(hw);
+ _rtl8821ae_phy_convert_txpower_dbm_to_relative_value(hw);
+}
+
+static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool rtstatus;
+
+ /*TX POWER LIMIT
+ PHY_InitTxPowerLimit
+ PHY_ConfigRFWithCustomPowerLimitTableParaFile*/
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtstatus = _rtl8812ae_phy_config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_PHY_REG);
+ else{
+ rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_PHY_REG);
+ }
+ if (rtstatus != true) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
+ return false;
+ }
+ _rtl8821ae_phy_init_tx_power_by_rate(hw);
+ if (rtlefuse->autoload_failflag == false) {
+ //rtlphy->pwrgroup_cnt = 0;
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtstatus = _rtl8812ae_phy_config_bb_with_pgheaderfile(hw,
+ BASEBAND_CONFIG_PHY_REG);
+ else{
+ rtstatus = _rtl8821ae_phy_config_bb_with_pgheaderfile(hw,
+ BASEBAND_CONFIG_PHY_REG);
+ }
+ }
+ if (rtstatus != true) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
+ return false;
+ }
+
+ _rtl8821ae_phy_txpower_by_rate_configuration(hw);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtstatus = _rtl8812ae_phy_config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_AGC_TAB);
+ else
+ rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_AGC_TAB);
+
+ if (rtstatus != true) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
+ return false;
+ }
+ rtlphy->bcck_high_power = (bool) (rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER2,
+ 0x200));
+ return true;
+}
+
+static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i, v1, v2;
+ u32 arraylength;
+ u32 *ptrarray;
+
+ RT_TRACE(COMP_INIT, DBG_TRACE, ("Read rtl8812AE_MAC_REG_Array\n"));
+ arraylength = RTL8812AEMAC_1T_ARRAYLEN;
+ ptrarray = RTL8812AE_MAC_REG_ARRAY;
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Img:RTL8812AE_MAC_REG_ARRAY LEN %d\n",arraylength));
+ for (i = 0; i < arraylength; i += 2) {
+ v1 = ptrarray[i];
+ v2 = (u8) ptrarray[i + 1];
+ if (v1<0xCDCDCDCD) {
+ rtl_write_byte(rtlpriv, v1, (u8) v2);
+ } else {
+ if (!_rtl8821ae_check_condition(hw,v1)) {
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylength -2)
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2*/
+ } else {/*Configure matched pairs and skip to end of if-else.*/
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylength -2) {
+ rtl_write_byte(rtlpriv,v1,v2);
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylength -2)
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+ }
+ }
+ }
+ return true;
+}
+
+static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i, v1, v2;
+ u32 arraylength;
+ u32 *ptrarray;
+
+ RT_TRACE(COMP_INIT, DBG_TRACE, ("Read rtl8821AE_MAC_REG_Array\n"));
+ arraylength = RTL8821AEMAC_1T_ARRAYLEN;
+ ptrarray = RTL8821AE_MAC_REG_ARRAY;
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Img:RTL8821AE_MAC_REG_ARRAY LEN %d\n",arraylength));
+ for (i = 0; i < arraylength; i += 2) {
+ v1 = ptrarray[i];
+ v2 = (u8) ptrarray[i + 1];
+ if (v1<0xCDCDCDCD) {
+ rtl_write_byte(rtlpriv, v1, (u8) v2);
+ continue;
+ } else {
+ if (!_rtl8821ae_check_condition(hw,v1)) {
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylength -2)
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2*/
+ } else {/*Configure matched pairs and skip to end of if-else.*/
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylength -2) {
+ rtl_write_byte(rtlpriv,v1,v2);
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylength -2)
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+ }
+ }
+ }
+ return true;
+}
+
+static bool _rtl8812ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ int i;
+ u32 *array_table;
+ u16 arraylen;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 v1 = 0, v2 = 0;
+
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ arraylen = RTL8812AEPHY_REG_1TARRAYLEN;
+ array_table = RTL8812AE_PHY_REG_ARRAY;
+
+ for (i = 0; i < arraylen; i += 2) {
+ v1 = array_table[i];
+ v2 = array_table[i+1];
+ if (v1<0xCDCDCDCD) {
+ _rtl8812ae_config_bb_reg(hw, v1, v2);
+ continue;
+ } else {/*This line is the start line of branch.*/
+ if (!_rtl8821ae_check_condition(hw,v1)) {
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen -2)
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2*/
+ } else {/*Configure matched pairs and skip to end of if-else.*/
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen -2) {
+ _rtl8812ae_config_bb_reg(hw,v1,v2);
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylen -2)
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+ }
+ }
+ }
+ } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
+ arraylen = RTL8812AEAGCTAB_1TARRAYLEN;
+ array_table = RTL8812AE_AGC_TAB_ARRAY;
+
+ for (i = 0; i < arraylen; i = i + 2) {
+ v1 = array_table[i];
+ v2 = array_table[i+1];
+ if (v1 < 0xCDCDCDCD) {
+ rtl_set_bbreg(hw, v1, MASKDWORD, v2);
+ udelay(1);
+ continue;
+ } else {/*This line is the start line of branch.*/
+ if (!_rtl8821ae_check_condition(hw,v1)) {
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen -2)
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2*/
+ }else{/*Configure matched pairs and skip to end of if-else.*/
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen -2)
+ {
+ rtl_set_bbreg(hw, v1, MASKDWORD, v2);
+ udelay(1);
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylen -2)
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+ }
+ }
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("The agctab_array_table[0] is "
+ "%x Rtl818EEPHY_REGArray[1] is %x \n",
+ array_table[i],
+ array_table[i + 1]));
+ }
+ }
+ return true;
+}
+
+static bool _rtl8821ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ int i;
+ u32 *array_table;
+ u16 arraylen;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 v1 = 0, v2 = 0;
+
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ arraylen = RTL8821AEPHY_REG_1TARRAYLEN;
+ array_table = RTL8821AE_PHY_REG_ARRAY;
+
+ for (i = 0; i < arraylen; i += 2) {
+ v1 = array_table[i];
+ v2 = array_table[i+1];
+ if (v1<0xCDCDCDCD) {
+ _rtl8821ae_config_bb_reg(hw, v1, v2);
+ continue;
+ } else {/*This line is the start line of branch.*/
+ if (!_rtl8821ae_check_condition(hw,v1)) {
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen -2)
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2*/
+ } else {/*Configure matched pairs and skip to end of if-else.*/
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen -2) {
+ _rtl8821ae_config_bb_reg(hw,v1,v2);
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylen -2)
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+ }
+ }
+ }
+ } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
+ arraylen = RTL8821AEAGCTAB_1TARRAYLEN;
+ array_table = RTL8821AE_AGC_TAB_ARRAY;
+
+ for (i = 0; i < arraylen; i = i + 2) {
+ v1 = array_table[i];
+ v2 = array_table[i+1];
+ if (v1 < 0xCDCDCDCD) {
+ rtl_set_bbreg(hw, v1, MASKDWORD, v2);
+ udelay(1);
+ continue;
+ } else {/*This line is the start line of branch.*/
+ if (!_rtl8821ae_check_condition(hw,v1)) {
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen -2)
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2*/
+ }else{/*Configure matched pairs and skip to end of if-else.*/
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen -2)
+ {
+ rtl_set_bbreg(hw, v1, MASKDWORD, v2);
+ udelay(1);
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylen -2)
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+ }
+ }
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("The agctab_array_table[0] is "
+ "%x Rtl818EEPHY_REGArray[1] is %x \n",
+ array_table[i],
+ array_table[i + 1]));
+ }
+ }
+ return true;
+}
+
+static u8 _rtl8821ae_get_rate_selection_index(u32 regaddr)
+{
+ u8 index = 0;
+
+ regaddr &= 0xFFF;
+ if (regaddr >= 0xC20 && regaddr <= 0xC4C)
+ index = (u8) ((regaddr - 0xC20) / 4);
+ else if (regaddr >= 0xE20 && regaddr <= 0xE4C)
+ index = (u8) ((regaddr - 0xE20) / 4);
+ else
+ RT_ASSERT(!COMP_INIT,
+ ("Invalid RegAddr 0x%x in"
+ "PHY_GetRateSectionIndexOfTxPowerByRate()\n",regaddr));
+
+ return index;
+}
+
+static void _rtl8821ae_store_tx_power_by_rate(struct ieee80211_hw *hw,
+ u32 band, u32 rfpath,
+ u32 txnum, u32 regaddr,
+ u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 rate_section = _rtl8821ae_get_rate_selection_index(regaddr);
+
+ if (band != BAND_ON_2_4G && band != BAND_ON_5G)
+ RT_TRACE(COMP_INIT, DBG_WARNING, ("Invalid Band %d\n", band));
+
+ if (rfpath > MAX_RF_PATH)
+ RT_TRACE(COMP_INIT, DBG_WARNING, ("Invalid RfPath %d\n", rfpath));
+
+ if (txnum > MAX_RF_PATH)
+ RT_TRACE(COMP_INIT, DBG_WARNING, ("Invalid TxNum %d\n", txnum ) );
+
+ rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = data;
+ RT_TRACE(COMP_INIT, DBG_WARNING,( "pHalData->TxPwrByRateOffset[Band %d][RfPath %d][TxNum %d][RateSection %d] = 0x%x\n",
+ band, rfpath, txnum, rate_section, rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section]));
+
+}
+
+static bool _rtl8812ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i;
+ u32 *phy_regarray_table_pg;
+ u16 phy_regarray_pg_len;
+ u32 v1, v2, v3, v4, v5, v6;
+
+ phy_regarray_pg_len = RTL8812AEPHY_REG_ARRAY_PGLEN;
+ phy_regarray_table_pg = RTL8812AE_PHY_REG_ARRAY_PG;
+
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ for (i = 0; i < phy_regarray_pg_len; i += 6) {
+ v1 = phy_regarray_table_pg[i];
+ v2 = phy_regarray_table_pg[i+1];
+ v3 = phy_regarray_table_pg[i+2];
+ v4 = phy_regarray_table_pg[i+3];
+ v5 = phy_regarray_table_pg[i+4];
+ v6 = phy_regarray_table_pg[i+5];
+
+ if (v1<0xCDCDCDCD) {
+ if ( (v4 == 0xfe) || (v4 == 0xffe))
+ mdelay(50);
+ else
+ /*_rtl8821ae_store_pwrIndex_diffrate_offset*/
+ _rtl8821ae_store_tx_power_by_rate(hw, v1, v2, v3, v4, v5, v6);
+ continue;
+ } else {
+ if (!_rtl8821ae_check_condition(hw,v1)) { /*don't need the hw_body*/
+ i += 2; /* skip the pair of expression*/
+ v1 = phy_regarray_table_pg[i];
+ v2 = phy_regarray_table_pg[i+1];
+ v3 = phy_regarray_table_pg[i+2];
+ while (v2 != 0xDEAD) {
+ i += 3;
+ v1 = phy_regarray_table_pg[i];
+ v2 = phy_regarray_table_pg[i+1];
+ v3 = phy_regarray_table_pg[i+2];
+ }
+ }
+ }
+ }
+ } else {
+
+ RT_TRACE(COMP_SEND, DBG_TRACE,
+ ("configtype != BaseBand_Config_PHY_REG\n"));
+ }
+ return true;
+}
+
+static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i;
+ u32 *phy_regarray_table_pg;
+ u16 phy_regarray_pg_len;
+ u32 v1, v2, v3, v4, v5, v6;
+
+ phy_regarray_pg_len = RTL8821AEPHY_REG_ARRAY_PGLEN;
+ phy_regarray_table_pg = RTL8821AE_PHY_REG_ARRAY_PG;
+
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ for (i = 0; i < phy_regarray_pg_len; i += 6) {
+ v1 = phy_regarray_table_pg[i];
+ v2 = phy_regarray_table_pg[i+1];
+ v3 = phy_regarray_table_pg[i+2];
+ v4 = phy_regarray_table_pg[i+3];
+ v5 = phy_regarray_table_pg[i+4];
+ v6 = phy_regarray_table_pg[i+5];
+
+ if (v1<0xCDCDCDCD) {
+ if (v4 == 0xfe)
+ mdelay(50);
+ else if (v4 == 0xfd)
+ mdelay(5);
+ else if (v4 == 0xfc)
+ mdelay(1);
+ else if (v4 == 0xfb)
+ udelay(50);
+ else if (v4 == 0xfa)
+ udelay(5);
+ else if (v4 == 0xf9)
+ udelay(1);
+
+ /*_rtl8821ae_store_pwrIndex_diffrate_offset*/
+ _rtl8821ae_store_tx_power_by_rate(hw, v1, v2, v3, v4, v5, v6);
+ continue;
+ } else {
+ if (!_rtl8821ae_check_condition(hw,v1)) { /*don't need the hw_body*/
+ i += 2; /* skip the pair of expression*/
+ v1 = phy_regarray_table_pg[i];
+ v2 = phy_regarray_table_pg[i+1];
+ v3 = phy_regarray_table_pg[i+2];
+ while (v2 != 0xDEAD) {
+ i += 3;
+ v1 = phy_regarray_table_pg[i];
+ v2 = phy_regarray_table_pg[i+1];
+ v3 = phy_regarray_table_pg[i+2];
+ }
+ }
+ }
+ }
+ } else {
+
+ RT_TRACE(COMP_SEND, DBG_TRACE,
+ ("configtype != BaseBand_Config_PHY_REG\n"));
+ }
+ return true;
+}
+
+bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw * hw,
+ enum radio_path rfpath)
+{
+ #define READ_NEXT_RF_PAIR_8812(radioa_array_table,v1, v2, i) do { i += 2; v1 = radioa_array_table[i]; v2 = radioa_array_table[i+1]; } while(0)
+
+ int i;
+ bool rtstatus = true;
+ u32 *radioa_array_table_a, *radioa_array_table_b;
+ u16 radioa_arraylen_a, radioa_arraylen_b;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 v1 = 0, v2 = 0;
+
+ radioa_arraylen_a = RTL8812AE_RADIOA_1TARRAYLEN;
+ radioa_array_table_a= RTL8812AE_RADIOA_ARRAY;
+ radioa_arraylen_b= RTL8812AE_RADIOB_1TARRAYLEN;
+ radioa_array_table_b = RTL8812AE_RADIOB_ARRAY;
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Radio_A:RTL8821AE_RADIOA_ARRAY %d\n",radioa_arraylen_a));
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("Radio No %x\n", rfpath));
+ rtstatus = true;
+ switch (rfpath) {
+ case RF90_PATH_A:
+ for (i = 0; i < radioa_arraylen_a; i = i + 2) {
+ v1 = radioa_array_table_a[i];
+ v2 = radioa_array_table_a[i+1];
+ if (v1<0xcdcdcdcd) {
+ _rtl8821ae_config_rf_radio_a(hw,v1,v2);
+ continue;
+ }else{/*This line is the start line of branch.*/
+ if(!_rtl8821ae_check_condition(hw,v1)){
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < radioa_arraylen_a-2)
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2*/
+ } else {/*Configure matched pairs and skip to end of if-else.*/
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < radioa_arraylen_a -2) {
+ _rtl8821ae_config_rf_radio_a(hw,v1,v2);
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < radioa_arraylen_a-2)
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i);
+ }
+ }
+ }
+ break;
+ case RF90_PATH_B:
+ for (i = 0; i < radioa_arraylen_b; i = i + 2) {
+ v1 = radioa_array_table_b[i];
+ v2 = radioa_array_table_b[i+1];
+ if (v1<0xcdcdcdcd) {
+ _rtl8821ae_config_rf_radio_b(hw,v1,v2);
+ continue;
+ }else{/*This line is the start line of branch.*/
+ if(!_rtl8821ae_check_condition(hw,v1)){
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < radioa_arraylen_b-2)
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2*/
+ } else {/*Configure matched pairs and skip to end of if-else.*/
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < radioa_arraylen_b-2) {
+ _rtl8821ae_config_rf_radio_b(hw,v1,v2);
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < radioa_arraylen_b-2)
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i);
+ }
+ }
+ }
+ break;
+ case RF90_PATH_C:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ case RF90_PATH_D:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+ return true;
+}
+
+
+bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw * hw,
+ enum radio_path rfpath)
+{
+ #define READ_NEXT_RF_PAIR(v1, v2, i) do { i += 2; v1 = radioa_array_table[i]; v2 = radioa_array_table[i+1]; } while(0)
+
+ int i;
+ bool rtstatus = true;
+ u32 *radioa_array_table;
+ u16 radioa_arraylen;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ //struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 v1 = 0, v2 = 0;
+
+ radioa_arraylen = RTL8821AE_RADIOA_1TARRAYLEN;
+ radioa_array_table = RTL8821AE_RADIOA_ARRAY;
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Radio_A:RTL8821AE_RADIOA_ARRAY %d\n",radioa_arraylen));
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("Radio No %x\n", rfpath));
+ rtstatus = true;
+ switch (rfpath) {
+ case RF90_PATH_A:
+ for (i = 0; i < radioa_arraylen; i = i + 2) {
+ v1 = radioa_array_table[i];
+ v2 = radioa_array_table[i+1];
+ if (v1<0xcdcdcdcd) {
+ _rtl8821ae_config_rf_radio_a(hw,v1,v2);
+ }else{/*This line is the start line of branch.*/
+ if(!_rtl8821ae_check_condition(hw,v1)){
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_RF_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < radioa_arraylen -2)
+ READ_NEXT_RF_PAIR(v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2*/
+ } else {/*Configure matched pairs and skip to end of if-else.*/
+ READ_NEXT_RF_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < radioa_arraylen -2) {
+ _rtl8821ae_config_rf_radio_a(hw,v1,v2);
+ READ_NEXT_RF_PAIR(v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < radioa_arraylen -2)
+ READ_NEXT_RF_PAIR(v1, v2, i);
+ }
+ }
+ }
+ break;
+
+ case RF90_PATH_B:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ case RF90_PATH_C:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ case RF90_PATH_D:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+ return true;
+}
+
+void rtl8821ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ rtlphy->default_initialgain[0] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[1] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[2] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[3] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
+
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("Default initial gain (c50=0x%x, "
+ "c58=0x%x, c60=0x%x, c68=0x%x \n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]));
+
+ rtlphy->framesync = (u8) rtl_get_bbreg(hw,
+ ROFDM0_RXDETECTOR3, MASKBYTE0);
+ rtlphy->framesync_c34 = rtl_get_bbreg(hw,
+ ROFDM0_RXDETECTOR2, MASKDWORD);
+
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("Default framesync (0x%x) = 0x%x \n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync));
+}
+
+static void _rtl8821ae_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset = RA_LSSIWRITE_8821A;
+ rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset = RB_LSSIWRITE_8821A;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RHSSIREAD_8821AE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RHSSIREAD_8821AE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback = RA_SIREAD_8821A;
+ rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback = RB_SIREAD_8821A;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi = RA_PIREAD_8821A;
+ rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi = RB_PIREAD_8821A;
+}
+
+void rtl8821ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 txpwr_level;
+ long txpwr_dbm;
+
+ txpwr_level = rtlphy->cur_cck_txpwridx;
+ txpwr_dbm = _rtl8821ae_phy_txpwr_idx_to_dbm(hw,
+ WIRELESS_MODE_B, txpwr_level);
+ txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
+ if (_rtl8821ae_phy_txpwr_idx_to_dbm(hw,
+ WIRELESS_MODE_G,
+ txpwr_level) > txpwr_dbm)
+ txpwr_dbm =
+ _rtl8821ae_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
+ txpwr_level);
+ txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
+ if (_rtl8821ae_phy_txpwr_idx_to_dbm(hw,
+ WIRELESS_MODE_N_24G,
+ txpwr_level) > txpwr_dbm)
+ txpwr_dbm =
+ _rtl8821ae_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
+ txpwr_level);
+ *powerlevel = txpwr_dbm;
+}
+
+static bool _rtl8821ae_phy_get_chnl_index(u8 channel, u8 *chnl_index)
+{
+ u8 channel_5g[CHANNEL_MAX_NUMBER_5G] =
+ {36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,100,102,104,106,108,110,112,
+ 114,116,118,120,122,124,126,128,130,132,134,136,138,140,142,144,149,151,
+ 153,155,157,159,161,163,165,167,168,169,171,173,175,177};
+ u8 i = 0;
+ bool in_24g = true;
+
+ if (channel <= 14) {
+ in_24g = true;
+ *chnl_index = channel - 1;
+ } else {
+ in_24g = false;
+
+ for (i = 0; i < sizeof(channel_5g) / sizeof(u8); ++i) {
+ if (channel_5g[i] == channel) {
+ *chnl_index = i;
+ return in_24g;
+ }
+ }
+ }
+ return in_24g;
+}
+
+static char _rtl8821ae_phy_get_ratesection_intxpower_byrate(u8 path, u8 rate)
+{
+ char rate_section = 0;
+ switch (rate) {
+ case DESC_RATE1M:
+ case DESC_RATE2M:
+ case DESC_RATE5_5M:
+ case DESC_RATE11M:
+ rate_section = 0;
+ break;
+
+ case DESC_RATE6M:
+ case DESC_RATE9M:
+ case DESC_RATE12M:
+ case DESC_RATE18M:
+ rate_section = 1;
+ break;
+
+ case DESC_RATE24M:
+ case DESC_RATE36M:
+ case DESC_RATE48M:
+ case DESC_RATE54M:
+ rate_section = 2;
+ break;
+
+ case DESC_RATEMCS0:
+ case DESC_RATEMCS1:
+ case DESC_RATEMCS2:
+ case DESC_RATEMCS3:
+ rate_section = 3;
+ break;
+
+ case DESC_RATEMCS4:
+ case DESC_RATEMCS5:
+ case DESC_RATEMCS6:
+ case DESC_RATEMCS7:
+ rate_section = 4;
+ break;
+
+ case DESC_RATEMCS8:
+ case DESC_RATEMCS9:
+ case DESC_RATEMCS10:
+ case DESC_RATEMCS11:
+ rate_section = 5;
+ break;
+
+ case DESC_RATEMCS12:
+ case DESC_RATEMCS13:
+ case DESC_RATEMCS14:
+ case DESC_RATEMCS15:
+ rate_section = 6;
+ break;
+
+ case DESC_RATEVHT1SS_MCS0:
+ case DESC_RATEVHT1SS_MCS1:
+ case DESC_RATEVHT1SS_MCS2:
+ case DESC_RATEVHT1SS_MCS3:
+ rate_section = 7;
+ break;
+
+ case DESC_RATEVHT1SS_MCS4:
+ case DESC_RATEVHT1SS_MCS5:
+ case DESC_RATEVHT1SS_MCS6:
+ case DESC_RATEVHT1SS_MCS7:
+ rate_section = 8;
+ break;
+
+ case DESC_RATEVHT1SS_MCS8:
+ case DESC_RATEVHT1SS_MCS9:
+ case DESC_RATEVHT2SS_MCS0:
+ case DESC_RATEVHT2SS_MCS1:
+ rate_section = 9;
+ break;
+
+ case DESC_RATEVHT2SS_MCS2:
+ case DESC_RATEVHT2SS_MCS3:
+ case DESC_RATEVHT2SS_MCS4:
+ case DESC_RATEVHT2SS_MCS5:
+ rate_section = 10;
+ break;
+
+ case DESC_RATEVHT2SS_MCS6:
+ case DESC_RATEVHT2SS_MCS7:
+ case DESC_RATEVHT2SS_MCS8:
+ case DESC_RATEVHT2SS_MCS9:
+ rate_section = 11;
+ break;
+
+ default:
+ RT_ASSERT(true, ("Rate_Section is Illegal\n"));
+ break;
+ }
+
+ return rate_section;
+}
+
+static char _rtl8821ae_phy_get_txpower_by_rate(struct ieee80211_hw *hw,
+ u8 band, u8 path, u8 rate)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 shift = 0, rate_section, tx_num;
+ char tx_pwr_diff = 0;
+
+ rate_section = _rtl8821ae_phy_get_ratesection_intxpower_byrate(path, rate);
+ tx_num = RF_TX_NUM_NONIMPLEMENT;
+
+ if (tx_num == RF_TX_NUM_NONIMPLEMENT) {
+ if ((rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15 ) ||
+ (rate >= DESC_RATEVHT2SS_MCS2 && rate <= DESC_RATEVHT2SS_MCS9))
+ tx_num = RF_2TX;
+ else
+ tx_num = RF_1TX;
+ }
+
+ switch (rate) {
+ case DESC_RATE1M: shift = 0; break;
+ case DESC_RATE2M: shift = 8; break;
+ case DESC_RATE5_5M: shift = 16; break;
+ case DESC_RATE11M: shift = 24; break;
+
+ case DESC_RATE6M: shift = 0; break;
+ case DESC_RATE9M: shift = 8; break;
+ case DESC_RATE12M: shift = 16; break;
+ case DESC_RATE18M: shift = 24; break;
+
+ case DESC_RATE24M: shift = 0; break;
+ case DESC_RATE36M: shift = 8; break;
+ case DESC_RATE48M: shift = 16; break;
+ case DESC_RATE54M: shift = 24; break;
+
+ case DESC_RATEMCS0: shift = 0; break;
+ case DESC_RATEMCS1: shift = 8; break;
+ case DESC_RATEMCS2: shift = 16; break;
+ case DESC_RATEMCS3: shift = 24; break;
+
+ case DESC_RATEMCS4: shift = 0; break;
+ case DESC_RATEMCS5: shift = 8; break;
+ case DESC_RATEMCS6: shift = 16; break;
+ case DESC_RATEMCS7: shift = 24; break;
+
+ case DESC_RATEMCS8: shift = 0; break;
+ case DESC_RATEMCS9: shift = 8; break;
+ case DESC_RATEMCS10: shift = 16; break;
+ case DESC_RATEMCS11: shift = 24; break;
+
+ case DESC_RATEMCS12: shift = 0; break;
+ case DESC_RATEMCS13: shift = 8; break;
+ case DESC_RATEMCS14: shift = 16; break;
+ case DESC_RATEMCS15: shift = 24; break;
+
+ case DESC_RATEVHT1SS_MCS0: shift = 0; break;
+ case DESC_RATEVHT1SS_MCS1: shift = 8; break;
+ case DESC_RATEVHT1SS_MCS2: shift = 16; break;
+ case DESC_RATEVHT1SS_MCS3: shift = 24; break;
+
+ case DESC_RATEVHT1SS_MCS4: shift = 0; break;
+ case DESC_RATEVHT1SS_MCS5: shift = 8; break;
+ case DESC_RATEVHT1SS_MCS6: shift = 16; break;
+ case DESC_RATEVHT1SS_MCS7: shift = 24; break;
+
+ case DESC_RATEVHT1SS_MCS8: shift = 0; break;
+ case DESC_RATEVHT1SS_MCS9: shift = 8; break;
+ case DESC_RATEVHT2SS_MCS0: shift = 16; break;
+ case DESC_RATEVHT2SS_MCS1: shift = 24; break;
+
+ case DESC_RATEVHT2SS_MCS2: shift = 0; break;
+ case DESC_RATEVHT2SS_MCS3: shift = 8; break;
+ case DESC_RATEVHT2SS_MCS4: shift = 16; break;
+ case DESC_RATEVHT2SS_MCS5: shift = 24; break;
+
+ case DESC_RATEVHT2SS_MCS6: shift = 0; break;
+ case DESC_RATEVHT2SS_MCS7: shift = 8; break;
+ case DESC_RATEVHT2SS_MCS8: shift = 16; break;
+ case DESC_RATEVHT2SS_MCS9: shift = 24; break;
+
+ default:
+ RT_ASSERT(true, ("Rate_Section is Illegal\n"));
+ break;
+ }
+
+ tx_pwr_diff = (u8) (rtlphy->tx_power_by_rate_offset[band][path][tx_num][rate_section] >> shift) & 0xff;
+
+ return tx_pwr_diff;
+}
+
+static u8 _rtl8821ae_get_txpower_index(struct ieee80211_hw *hw, u8 path,
+ u8 rate, u8 bandwidth, u8 channel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 index = (channel - 1);
+ u8 txpower = 0;
+ bool in_24g = false;
+ char powerdiff_byrate = 0;
+
+ if (((rtlhal->current_bandtype == BAND_ON_2_4G) && (channel > 14 || channel < 1)) ||
+ ((rtlhal->current_bandtype == BAND_ON_5G) && (channel <= 14))) {
+ index = 0;
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("Illegal channel!!\n"));
+ }
+
+ in_24g = _rtl8821ae_phy_get_chnl_index(channel, &index);
+ if (in_24g) {
+ if (RX_HAL_IS_CCK_RATE(rate))
+ txpower = rtlefuse->txpwrlevel_cck[path][index];
+ else if ( DESC_RATE6M <= rate )
+ txpower = rtlefuse->txpwrlevel_ht40_1s[path][index];
+ else
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("invalid rate\n"));
+
+ if (DESC_RATE6M <= rate && rate <= DESC_RATE54M && !RX_HAL_IS_CCK_RATE(rate))
+ txpower += rtlefuse->txpwr_legacyhtdiff[path][TX_1S];
+
+ if (bandwidth == HT_CHANNEL_WIDTH_20) {
+ if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_ht20diff[path][TX_1S];
+ if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_ht20diff[path][TX_2S];
+ } else if (bandwidth == HT_CHANNEL_WIDTH_20_40) {
+ if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_ht40diff[path][TX_1S];
+ if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_ht40diff[path][TX_2S];
+ } else if (bandwidth == HT_CHANNEL_WIDTH_80) {
+ if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_ht40diff[path][TX_1S];
+ if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_ht40diff[path][TX_2S];
+ }
+
+ } else {
+ if (DESC_RATE6M <= rate)
+ txpower = rtlefuse->txpwr_5g_bw40base[path][index];
+ else
+ RT_TRACE(COMP_POWER_TRACKING, DBG_WARNING,("INVALID Rate.\n"));
+
+ if (DESC_RATE6M <= rate && rate <= DESC_RATE54M && !RX_HAL_IS_CCK_RATE(rate))
+ txpower += rtlefuse->txpwr_5g_ofdmdiff[path][TX_1S];
+
+ if (bandwidth == HT_CHANNEL_WIDTH_20) {
+ if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_5g_bw20diff[path][TX_1S];
+ if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_5g_bw20diff[path][TX_2S];
+ } else if (bandwidth == HT_CHANNEL_WIDTH_20_40) {
+ if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_5g_bw40diff[path][TX_1S];
+ if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_5g_bw40diff[path][TX_2S];
+ } else if (bandwidth == HT_CHANNEL_WIDTH_80) {
+ u8 channel_5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171};
+ u8 i = 0;
+ for (i = 0; i < sizeof(channel_5g_80m) / sizeof(u8); ++i)
+ if (channel_5g_80m[i] == channel)
+ index = i;
+
+ if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower = rtlefuse->txpwr_5g_bw80base[path][index]
+ + rtlefuse->txpwr_5g_bw80diff[path][TX_1S];
+ if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower = rtlefuse->txpwr_5g_bw80base[path][index]
+ + rtlefuse->txpwr_5g_bw80diff[path][TX_1S]
+ + rtlefuse->txpwr_5g_bw80diff[path][TX_2S];
+ }
+ }
+ if (rtlefuse->eeprom_regulatory != 2)
+ powerdiff_byrate = _rtl8821ae_phy_get_txpower_by_rate(hw,
+ (u8)(!in_24g), path, rate);
+
+ if (rate == DESC_RATEVHT1SS_MCS8 || rate == DESC_RATEVHT1SS_MCS9 ||
+ rate == DESC_RATEVHT2SS_MCS8 || rate == DESC_RATEVHT2SS_MCS9)
+ txpower -= powerdiff_byrate;
+ else
+ txpower += powerdiff_byrate;
+
+ if (rate > DESC_RATE11M)
+ txpower += rtlpriv->dm.remnant_ofdm_swing_idx[path];
+ else
+ txpower += rtlpriv->dm.remnant_cck_idx;
+
+ if (txpower > MAX_POWER_INDEX)
+ txpower = MAX_POWER_INDEX;
+
+ return txpower;
+}
+
+static void _rtl8821ae_phy_set_txpower_index(struct ieee80211_hw *hw,
+ u8 power_index, u8 path, u8 rate)
+{
+ struct rtl_priv* rtlpriv = rtl_priv(hw);
+
+ if (path == RF90_PATH_A) {
+ switch (rate) {
+ case DESC_RATE1M:
+ rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKBYTE0, power_index);
+ break;
+ case DESC_RATE2M:
+ rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKBYTE1, power_index);
+ break;
+ case DESC_RATE5_5M:
+ rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKBYTE2, power_index);
+ break;
+ case DESC_RATE11M:
+ rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATE6M:
+ rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, MASKBYTE0, power_index);
+ break;
+ case DESC_RATE9M:
+ rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, MASKBYTE1, power_index);
+ break;
+ case DESC_RATE12M:
+ rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, MASKBYTE2, power_index);
+ break;
+ case DESC_RATE18M:
+ rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATE24M:
+ rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, MASKBYTE0, power_index);
+ break;
+ case DESC_RATE36M:
+ rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, MASKBYTE1, power_index);
+ break;
+ case DESC_RATE48M:
+ rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, MASKBYTE2, power_index);
+ break;
+ case DESC_RATE54M:
+ rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEMCS0:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEMCS1:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEMCS2:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEMCS3:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEMCS4:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEMCS5:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEMCS6:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEMCS7:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEMCS8:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEMCS9:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEMCS10:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEMCS11:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEMCS12:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEMCS13:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEMCS14:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEMCS15:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT1SS_MCS0:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS1:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS2:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS3:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT1SS_MCS4:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS5:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS6:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS7:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT1SS_MCS8:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS9:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS0:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS1:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT2SS_MCS2:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS3:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS4:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS5:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT2SS_MCS6:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS7:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS8:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS9:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, MASKBYTE3, power_index);
+ break;
+
+ default:
+ RT_TRACE(COMP_POWER, DBG_LOUD, ("Invalid Rate!!\n"));
+ break;
+ }
+ } else if (path == RF90_PATH_B) {
+ switch (rate) {
+ case DESC_RATE1M:
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKBYTE0, power_index);
+ break;
+ case DESC_RATE2M:
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKBYTE1, power_index);
+ break;
+ case DESC_RATE5_5M:
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKBYTE2, power_index);
+ break;
+ case DESC_RATE11M:
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATE6M:
+ rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, MASKBYTE0, power_index);
+ break;
+ case DESC_RATE9M:
+ rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, MASKBYTE1, power_index);
+ break;
+ case DESC_RATE12M:
+ rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, MASKBYTE2, power_index);
+ break;
+ case DESC_RATE18M:
+ rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATE24M:
+ rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, MASKBYTE0, power_index);
+ break;
+ case DESC_RATE36M:
+ rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, MASKBYTE1, power_index);
+ break;
+ case DESC_RATE48M:
+ rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, MASKBYTE2, power_index);
+ break;
+ case DESC_RATE54M:
+ rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEMCS0:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEMCS1:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEMCS2:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEMCS3:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEMCS4:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEMCS5:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEMCS6:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEMCS7:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEMCS8:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEMCS9:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEMCS10:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEMCS11:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEMCS12:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEMCS13:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEMCS14:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEMCS15:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT1SS_MCS0:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS1:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS2:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS3:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT1SS_MCS4:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS5:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS6:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS7:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT1SS_MCS8:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS9:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS0:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS1:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT2SS_MCS2:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS3:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS4:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS5:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT2SS_MCS6:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS7:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS8:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS9:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, MASKBYTE3, power_index);
+ break;
+
+ default:
+ RT_TRACE(COMP_POWER, DBG_LOUD, ("Invalid Rate!!\n"));
+ break;
+ }
+ } else {
+ RT_TRACE(COMP_POWER, DBG_LOUD, ("Invalid RFPath!!\n"));
+ }
+}
+
+void _rtl8821ae_phy_set_txpower_level_by_path(struct ieee80211_hw *hw,
+ u8 *array, u8 path, u8 channel,
+ u8 size)
+{
+ struct rtl_phy *rtlphy = &(rtl_priv(hw)->phy);
+ u8 i;
+ u8 power_index;
+ for (i = 0; i < size; i ++) {
+ power_index = _rtl8821ae_get_txpower_index(hw, path, array[i],
+ rtlphy->current_chan_bw, channel);
+ _rtl8821ae_phy_set_txpower_index(hw, power_index, path, array[i]);
+ }
+}
+
+static void _rtl8821ae_phy_txpower_training_by_path(struct ieee80211_hw *hw,
+ u8 bw, u8 channel, u8 path)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ u8 i;
+ u32 power_level, data, offset;
+
+ if(path >= rtlphy->num_total_rfpath)
+ return;
+
+ data = 0;
+ if (path == RF90_PATH_A) {
+ power_level =
+ _rtl8821ae_get_txpower_index(hw, RF90_PATH_A,
+ DESC_RATEMCS7, bw, channel);
+ offset = RA_TXPWRTRAING;
+ } else {
+ power_level =
+ _rtl8821ae_get_txpower_index(hw, RF90_PATH_A,
+ DESC_RATEMCS7, bw, channel);
+ offset = RB_TXPWRTRAING;
+ }
+
+ for (i = 0; i < 3; i++) {
+ if (i == 0)
+ power_level = power_level - 10;
+ else if (i == 1)
+ power_level = power_level - 8;
+ else
+ power_level = power_level - 6;
+
+ data |= (((power_level > 2) ? (power_level) : 2) << (i * 8));
+ }
+ rtl_set_bbreg(hw, offset, 0xffffff, data);
+}
+
+void rtl8821ae_phy_set_txpower_level_by_path(struct ieee80211_hw *hw, u8 channel, u8 path)
+{
+ //struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtl_priv(hw)->phy);
+ u8 cck_rates[] = {DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M};
+ u8 ofdm_rates[] = {DESC_RATE6M, DESC_RATE9M, DESC_RATE12M, DESC_RATE18M,
+ DESC_RATE24M, DESC_RATE36M, DESC_RATE48M, DESC_RATE54M};
+ u8 ht_rates_1t[] = {DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2, DESC_RATEMCS3,
+ DESC_RATEMCS4, DESC_RATEMCS5, DESC_RATEMCS6, DESC_RATEMCS7};
+ u8 ht_rates_2t[] = {DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10, DESC_RATEMCS11,
+ DESC_RATEMCS12, DESC_RATEMCS13, DESC_RATEMCS14, DESC_RATEMCS15};
+ u8 vht_rates_1t[] = {DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1, DESC_RATEVHT1SS_MCS2,
+ DESC_RATEVHT1SS_MCS3, DESC_RATEVHT1SS_MCS4,
+ DESC_RATEVHT1SS_MCS5, DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7,
+ DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9};
+ u8 vht_rates_2t[] = {DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1, DESC_RATEVHT2SS_MCS2,
+ DESC_RATEVHT2SS_MCS3, DESC_RATEVHT2SS_MCS4,
+ DESC_RATEVHT2SS_MCS5, DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7,
+ DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9};
+ //u8 i,size;
+ //u8 power_index;
+
+ if (rtlhal->current_bandtype == BAND_ON_2_4G)
+ _rtl8821ae_phy_set_txpower_level_by_path(hw,cck_rates,path,channel,
+ sizeof(cck_rates) / sizeof(u8));
+
+ _rtl8821ae_phy_set_txpower_level_by_path(hw,ofdm_rates,path,channel,
+ sizeof(ofdm_rates) / sizeof(u8));
+ _rtl8821ae_phy_set_txpower_level_by_path(hw,ht_rates_1t,path,channel,
+ sizeof(ht_rates_1t) / sizeof(u8));
+ _rtl8821ae_phy_set_txpower_level_by_path(hw,vht_rates_1t,path,channel,
+ sizeof(vht_rates_1t) / sizeof(u8));
+
+ if (rtlphy->num_total_rfpath >= 2) {
+ _rtl8821ae_phy_set_txpower_level_by_path(hw,ht_rates_2t,path,channel,
+ sizeof(ht_rates_2t) / sizeof(u8));
+ _rtl8821ae_phy_set_txpower_level_by_path(hw,vht_rates_2t,path,channel,
+ sizeof(vht_rates_2t) / sizeof(u8));
+ }
+
+ _rtl8821ae_phy_txpower_training_by_path(hw, rtlphy->current_chan_bw, channel, path);
+}
+/*just in case, write txpower in DW, to reduce time*/
+#if 0
+void _rtl8821ae_phy_get_txpower_index_by_rate_array(struct ieee80211_hw *hw, u8 channel,
+ u8 *rate, u8 path, u8 bw, u8 *power_index, u8 size)
+{
+ u8 i;
+ for (i = 0; i < size; i++)
+ power_index[i] = _rtl8821ae_get_txpower_index(hw, path, rate[i], bw, channel);
+}
+
+void rtl8821ae_phy_set_txpower_level_by_path2(struct ieee80211_hw *hw, u8 channel, u8 path)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtl_priv(hw)->phy);
+ u8 cck_rates[] = {DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M};
+ u8 ofdm_rates[] = {DESC_RATE6M, DESC_RATE9M, DESC_RATE12M, DESC_RATE18M,
+ DESC_RATE24M, DESC_RATE36M, DESC_RATE48M, DESC_RATE54M};
+ u8 ht_rates_1t[] = {DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2, DESC_RATEMCS3,
+ DESC_RATEMCS4, DESC_RATEMCS5, DESC_RATEMCS6, DESC_RATEMCS7};
+ u8 ht_rates_2t[] = {DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10, DESC_RATEMCS11,
+ DESC_RATEMCS12, DESC_RATEMCS13, DESC_RATEMCS14, DESC_RATEMCS15};
+ u8 vht_rates_1t[] = {DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1, DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3, DESC_RATEVHT1SS_MCS4,
+ DESC_RATEVHT1SS_MCS5, DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7, DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9};
+ u8 vht_rates_2t[] = {DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1, DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3, DESC_RATEVHT2SS_MCS4,
+ DESC_RATEVHT2SS_MCS5, DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7, DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9};
+ u8 i, j;
+ u8 pwridx[48] = {0};
+ u8 cs = sizeof(cck_rates) / sizeof(u8);
+ u8 os = sizeof(ofdm_rates) / sizeof(u8);
+ u8 h1s = sizeof(ht_rates_1t) / sizeof(u8);
+ u8 h2s = sizeof(ht_rates_2t) / sizeof(u8);
+ u8 v1s = sizeof(vht_rates_1t) / sizeof(u8);
+ u8 v2s = sizeof(vht_rates_2t) / sizeof(u8);
+
+ u8 len, start;
+ u32 reg_addr, power_index;
+ u8 bw = rtlphy->current_chan_bw;
+
+ _rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel,
+ ofdm_rates, path, bw, &pwridx[cs], os);
+
+ _rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel,
+ ht_rates_1t, path, bw, &pwridx[cs+os], h1s);
+
+ _rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel,
+ vht_rates_1t, path, bw, &pwridx[cs+os+h1s+h2s], v1s);
+
+
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ _rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel,
+ cck_rates, path, bw, pwridx, cs);
+
+ start = 0;
+ } else {
+ start = cs;
+ }
+
+ reg_addr = (path == 0) ? RTXAGC_A_CCK11_CCK1 : RTXAGC_B_CCK11_CCK1;
+ reg_addr += start;
+
+ len = cs + os + h1s + h2s + v1s;
+ if (rtlphy->num_total_rfpath >= 2) {
+ _rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel,
+ ht_rates_2t, path, bw, &pwridx[cs+os+h1s], h2s);
+
+ _rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel,
+ vht_rates_2t, path, bw, &pwridx[cs+os+h1s+h2s+v1s], v2s);
+
+ len += v2s;
+ }
+ for (i = start; i < len; i += 4) {
+ power_index = 0;
+ for (j = 0; j < 4; j++)
+ power_index |= (pwridx[i+j] << (j*8));
+ rtl_set_bbreg(hw, reg_addr + i, MASKDWORD, power_index);
+ }
+
+ _rtl8821ae_phy_txpower_training_by_path(hw, rtlphy->current_chan_bw, channel, path);
+}
+#endif
+
+void rtl8821ae_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 path = 0;
+
+ for (path = RF90_PATH_A; path < rtlphy->num_total_rfpath; ++path )
+ rtl8821ae_phy_set_txpower_level_by_path(hw, channel, path);
+}
+
+static long _rtl8821ae_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+ enum wireless_mode wirelessmode,
+ u8 txpwridx)
+{
+ long offset;
+ long pwrout_dbm;
+
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ offset = -7;
+ break;
+ case WIRELESS_MODE_G:
+ case WIRELESS_MODE_N_24G:
+ offset = -8;
+ break;
+ default:
+ offset = -8;
+ break;
+ }
+ pwrout_dbm = txpwridx / 2 + offset;
+ return pwrout_dbm;
+}
+
+void rtl8821ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ enum io_type iotype = IO_CMD_PAUSE_BAND0_DM_BY_SCAN;
+
+ if (!is_hal_stop(rtlhal)) {
+ switch (operation) {
+ case SCAN_OPT_BACKUP_BAND0:
+ iotype = IO_CMD_PAUSE_BAND0_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_IO_CMD,
+ (u8 *) & iotype);
+
+ break;
+ case SCAN_OPT_BACKUP_BAND1:
+ iotype = IO_CMD_PAUSE_BAND1_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_IO_CMD,
+ (u8 *) & iotype);
+
+ break;
+ case SCAN_OPT_RESTORE:
+ iotype = IO_CMD_RESUME_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_IO_CMD,
+ (u8 *) & iotype);
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Unknown Scan Backup operation.\n"));
+ break;
+ }
+ }
+}
+
+static void _rtl8821ae_phy_set_reg_bw(struct rtl_priv * rtlpriv, u8 bw)
+{
+ u16 reg_rf_mode_bw, tmp = 0;
+ reg_rf_mode_bw = rtl_read_word(rtlpriv, REG_TRXPTCL_CTL);
+ switch (bw) {
+ case HT_CHANNEL_WIDTH_20:
+ rtl_write_word(rtlpriv, REG_TRXPTCL_CTL, reg_rf_mode_bw & 0xFE7F);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ tmp = reg_rf_mode_bw | BIT(7);
+ rtl_write_word(rtlpriv, REG_TRXPTCL_CTL, tmp & 0xFEFF);
+ break;
+ case HT_CHANNEL_WIDTH_80:
+ tmp = reg_rf_mode_bw | BIT(8);
+ rtl_write_word(rtlpriv, REG_TRXPTCL_CTL, tmp & 0xFF7F);
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_WARNING,("unknown Bandwidth: 0x%x\n",bw));
+ break;
+ }
+}
+
+static u8 _rtl8821ae_phy_get_secondary_chnl(struct rtl_priv * rtlpriv)
+{
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ u8 sc_set_40 = 0, sc_set_20 =0;
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
+ if(mac->cur_80_prime_sc == PRIME_CHNL_OFFSET_LOWER)
+ sc_set_40 = VHT_DATA_SC_40_LOWER_OF_80MHZ;
+ else if(mac->cur_80_prime_sc == PRIME_CHNL_OFFSET_UPPER)
+ sc_set_40 = VHT_DATA_SC_40_UPPER_OF_80MHZ;
+ else
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("SCMapping: Not Correct Primary40MHz Setting \n"));
+
+ if((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER) &&
+ (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER))
+ sc_set_20 = VHT_DATA_SC_20_LOWEST_OF_80MHZ;
+ else if((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER) &&
+ (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER))
+ sc_set_20 = VHT_DATA_SC_20_LOWER_OF_80MHZ;
+ else if((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER) &&
+ (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER))
+ sc_set_20 = VHT_DATA_SC_20_UPPER_OF_80MHZ;
+ else if((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER) &&
+ (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER))
+ sc_set_20 = VHT_DATA_SC_20_UPPERST_OF_80MHZ;
+ else
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("SCMapping: Not Correct Primary40MHz Setting \n"));
+ } else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ if (mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER)
+ sc_set_20 = VHT_DATA_SC_20_UPPER_OF_80MHZ;
+ else if (mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER)
+ sc_set_20 = VHT_DATA_SC_20_LOWER_OF_80MHZ;
+ else
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("SCMapping: Not Correct Primary40MHz Setting \n"));
+ }
+ return ((sc_set_40 << 4) | sc_set_20);
+}
+
+void rtl8821ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 sub_chnl = 0;
+ u8 l1pk_val = 0;
+
+ RT_TRACE(COMP_SCAN, DBG_TRACE,
+ ("Switch to %s bandwidth\n",
+ (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" :
+ (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40 ?
+ "40MHz" : "80MHz"))))
+
+
+
+ _rtl8821ae_phy_set_reg_bw(rtlpriv, rtlphy->current_chan_bw);
+ sub_chnl = _rtl8821ae_phy_get_secondary_chnl(rtlpriv);
+ rtl_write_byte(rtlpriv, 0x0483, sub_chnl);
+
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
+ rtl_set_bbreg(hw, RRFMOD, 0x003003C3, 0x00300200);
+ rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 0);
+
+ if(rtlphy->rf_type == RF_2T2R)
+ rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, 7);
+ else
+ rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, 8);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtl_set_bbreg(hw, RRFMOD, 0x003003C3, 0x00300201);
+ rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 0);
+ rtl_set_bbreg(hw, RRFMOD, 0x3C, sub_chnl);
+ rtl_set_bbreg(hw, RCCAONSEC, 0xf0000000, sub_chnl);
+
+ if(rtlphy->reg_837 & BIT(2))
+ l1pk_val = 6;
+ else
+ {
+ if(rtlphy->rf_type == RF_2T2R)
+ l1pk_val = 7;
+ else
+ l1pk_val = 8;
+ }
+ rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, l1pk_val); // 0x848[25:22] = 0x6
+
+ if(sub_chnl == VHT_DATA_SC_20_UPPER_OF_80MHZ)
+ rtl_set_bbreg(hw, RCCK_SYSTEM, BCCK_SYSTEM, 1);
+ else
+ rtl_set_bbreg(hw, RCCK_SYSTEM, BCCK_SYSTEM, 0);
+ break;
+
+ case HT_CHANNEL_WIDTH_80:
+ rtl_set_bbreg(hw, RRFMOD, 0x003003C3, 0x00300202); // 0x8ac[21,20,9:6,1,0]=8'b11100010
+ rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 1); // 0x8c4[30] = 1
+ rtl_set_bbreg(hw, RRFMOD, 0x3C, sub_chnl);
+ rtl_set_bbreg(hw, RCCAONSEC, 0xf0000000, sub_chnl);
+
+ if(rtlphy->reg_837 & BIT(2))
+ l1pk_val = 5;
+ else
+ {
+ if(rtlphy->rf_type == RF_2T2R)
+ l1pk_val = 6;
+ else
+ l1pk_val = 7;
+ }
+ rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, l1pk_val);
+
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+ break;
+ }
+
+ rtl8812ae_fixspur(hw, rtlphy->current_chan_bw, rtlphy->current_channel);
+
+ rtl8821ae_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
+ rtlphy->set_bwmode_inprogress = false;
+
+ RT_TRACE(COMP_SCAN, DBG_LOUD, (" \n"));
+}
+
+void rtl8821ae_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp_bw = rtlphy->current_chan_bw;
+
+ if (rtlphy->set_bwmode_inprogress)
+ return;
+ rtlphy->set_bwmode_inprogress = true;
+ if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
+ rtl8821ae_phy_set_bw_mode_callback(hw);
+ } else {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("FALSE driver sleep or unload\n"));
+ rtlphy->set_bwmode_inprogress = false;
+ rtlphy->current_chan_bw = tmp_bw;
+ }
+}
+
+void rtl8821ae_phy_sw_chnl_callback(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 channel = rtlphy->current_channel;
+ u8 path;
+ u32 data;
+
+ RT_TRACE(COMP_SCAN, DBG_TRACE,
+ ("switch to channel%d\n", rtlphy->current_channel));
+ if (is_hal_stop(rtlhal))
+ return;
+
+ if (36 <= channel && channel <= 48)
+ data = 0x494;
+ else if (50 <= channel && channel <= 64)
+ data = 0x453;
+ else if (100 <= channel && channel <= 116)
+ data = 0x452;
+ else if (118 <= channel)
+ data = 0x412;
+ else
+ data = 0x96a;
+ rtl_set_bbreg(hw, RFC_AREA, 0x1ffe0000, data);
+
+
+ for(path = RF90_PATH_A; path < rtlphy->num_total_rfpath; path++)
+ {
+ if (36 <= channel && channel <= 64)
+ data = 0x101;
+ else if (100 <= channel && channel <= 140)
+ data = 0x301;
+ else if (140 < channel)
+ data = 0x501;
+ else
+ data = 0x000;
+ rtl8821ae_phy_set_rf_reg(hw, path, RF_CHNLBW,
+ BIT(18)|BIT(17)|BIT(16)|BIT(9)|BIT(8), data);
+
+ rtl8821ae_phy_set_rf_reg(hw, path, RF_CHNLBW,
+ BMASKBYTE0, channel);
+
+ if (channel > 14) {
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ if (36 <= channel && channel <= 64)
+ data = 0x114E9;
+ else if (100 <= channel && channel <= 140)
+ data = 0x110E9;
+ else
+ data = 0x110E9;
+ rtl8821ae_phy_set_rf_reg(hw, path, RF_APK,
+ BRFREGOFFSETMASK, data);
+ }
+ }
+ }
+ RT_TRACE(COMP_SCAN, DBG_TRACE, ("\n"));
+}
+
+u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 timeout = 1000, timecount = 0;
+ u8 channel = rtlphy->current_channel;
+
+ if (rtlphy->sw_chnl_inprogress)
+ return 0;
+ if (rtlphy->set_bwmode_inprogress)
+ return 0;
+
+ if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) {
+ RT_TRACE(COMP_CHAN, DBG_LOUD,
+ ("sw_chnl_inprogress false driver sleep or unload\n"));
+ return 0;
+ }
+ while (rtlphy->lck_inprogress && timecount < timeout) {
+ mdelay(50);
+ timecount += 50;
+ }
+
+ if (rtlphy->current_channel > 14 && rtlhal->current_bandtype != BAND_ON_5G)
+ rtl8821ae_phy_switch_wirelessband(hw, BAND_ON_5G);
+ else if (rtlphy->current_channel <= 14 && rtlhal->current_bandtype != BAND_ON_2_4G)
+ rtl8821ae_phy_switch_wirelessband(hw, BAND_ON_2_4G);
+
+ rtlphy->sw_chnl_inprogress = true;
+ if (channel == 0)
+ channel = 1;
+
+ RT_TRACE(COMP_SCAN, DBG_TRACE,
+ ("switch to channel%d, band type is %d\n", rtlphy->current_channel, rtlhal->current_bandtype));
+
+ rtl8821ae_phy_sw_chnl_callback(hw);
+
+ rtl8821ae_dm_clear_txpower_tracking_state(hw);
+ rtl8821ae_phy_set_txpower_level(hw, rtlphy->current_channel);
+
+ RT_TRACE(COMP_SCAN, DBG_TRACE, ("\n"));
+ rtlphy->sw_chnl_inprogress = false;
+ return 1;
+}
+
+#if 0
+static u8 _rtl8821ae_phy_path_b_iqk(struct ieee80211_hw *hw)
+{
+ u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+ u8 result = 0x00;
+
+ rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
+ rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
+ mdelay(IQK_DELAY_TIME);
+ reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
+ reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
+ reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
+ reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
+ reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
+
+ if (!(reg_eac & BIT(31)) &&
+ (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
+ (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
+ result |= 0x01;
+ else
+ return result;
+ if (!(reg_eac & BIT(30)) &&
+ (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
+ (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
+ result |= 0x02;
+ return result;
+}
+
+static u8 _rtl8821ae_phy_path_a_rx_iqk(struct ieee80211_hw *hw, bool config_pathb)
+{
+ u32 reg_eac, reg_e94, reg_e9c, reg_ea4,u32temp;
+ u8 result = 0x00;
+
+ /*Get TXIMR Setting*/
+ /*Modify RX IQK mode table*/
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0000f);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf117b);
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000);
+
+ /*IQK Setting*/
+ rtl_set_bbreg(hw, RTx_IQK, MASKDWORD, 0x01007c00);
+ rtl_set_bbreg(hw, RRx_IQK, MASKDWORD, 0x81004800);
+
+ /*path a IQK setting*/
+ rtl_set_bbreg(hw, RTx_IQK_Tone_A, MASKDWORD, 0x10008c1c);
+ rtl_set_bbreg(hw, RRx_IQK_Tone_A, MASKDWORD, 0x30008c1c);
+ rtl_set_bbreg(hw, RTx_IQK_PI_A, MASKDWORD, 0x82160804);
+ rtl_set_bbreg(hw, RRx_IQK_PI_A, MASKDWORD, 0x28160000);
+
+ /*LO calibration Setting*/
+ rtl_set_bbreg(hw, RIQK_AGC_Rsp, MASKDWORD, 0x0046a911);
+ /*one shot,path A LOK & iqk*/
+ rtl_set_bbreg(hw, RIQK_AGC_Pts, MASKDWORD, 0xf9000000);
+ rtl_set_bbreg(hw, RIQK_AGC_Pts, MASKDWORD, 0xf8000000);
+
+ mdelay(IQK_DELAY_TIME);
+
+ reg_eac = rtl_get_bbreg(hw, RRx_Power_After_IQK_A_2, MASKDWORD);
+ reg_e94 = rtl_get_bbreg(hw, RTx_Power_Before_IQK_A, MASKDWORD);
+ reg_e9c = rtl_get_bbreg(hw, RTx_Power_After_IQK_A, MASKDWORD);
+
+
+ if (!(reg_eac & BIT(28)) &&
+ (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
+ (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
+ result |= 0x01;
+ else
+ return result;
+
+ u32temp = 0x80007C00 | (reg_e94&0x3FF0000) | ((reg_e9c&0x3FF0000) >> 16);
+ rtl_set_bbreg(hw, RTx_IQK, MASKDWORD, u32temp);
+ /*RX IQK*/
+ /*Modify RX IQK mode table*/
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0000f);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf7ffa);
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000);
+
+ /*IQK Setting*/
+ rtl_set_bbreg(hw, RRx_IQK, MASKDWORD, 0x01004800);
+
+ /*path a IQK setting*/
+ rtl_set_bbreg(hw, RTx_IQK_Tone_A, MASKDWORD, 0x30008c1c);
+ rtl_set_bbreg(hw, RRx_IQK_Tone_A, MASKDWORD, 0x10008c1c);
+ rtl_set_bbreg(hw, RTx_IQK_PI_A, MASKDWORD, 0x82160c05);
+ rtl_set_bbreg(hw, RRx_IQK_PI_A, MASKDWORD, 0x28160c05);
+
+ /*LO calibration Setting*/
+ rtl_set_bbreg(hw, RIQK_AGC_Rsp, MASKDWORD, 0x0046a911);
+ /*one shot,path A LOK & iqk*/
+ rtl_set_bbreg(hw, RIQK_AGC_Pts, MASKDWORD, 0xf9000000);
+ rtl_set_bbreg(hw, RIQK_AGC_Pts, MASKDWORD, 0xf8000000);
+
+ mdelay(IQK_DELAY_TIME);
+
+ reg_eac = rtl_get_bbreg(hw, RRx_Power_After_IQK_A_2, MASKDWORD);
+ reg_e94 = rtl_get_bbreg(hw, RTx_Power_Before_IQK_A, MASKDWORD);
+ reg_e9c = rtl_get_bbreg(hw, RTx_Power_After_IQK_A, MASKDWORD);
+ reg_ea4 = rtl_get_bbreg(hw, RRx_Power_Before_IQK_A_2, MASKDWORD);
+
+ if (!(reg_eac & BIT(27)) &&
+ (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
+ (((reg_eac & 0x03FF0000) >> 16) != 0x36))
+ result |= 0x02;
+ return result;
+}
+#endif
+
+u8 _rtl8812ae_get_right_chnl_place_for_iqk(u8 chnl)
+{
+ u8 channel_all[TARGET_CHNL_NUM_2G_5G_8812] =
+ {1,2,3,4,5,6,7,8,9,10,11,12,13,14,36,38,40,42,\
+ 44,46,48,50,52,54,56,58,60,62,64,100,\
+ 102,104,106,108,110,112,114,116,118,\
+ 120,122,124,126,128,130,132,134,136,\
+ 138,140,149,151,153,155,157,159,161,\
+ 163,165};
+ u8 place = chnl;
+
+ if(chnl > 14)
+ {
+ for(place = 14; place<sizeof(channel_all); place++)
+ {
+ if(channel_all[place] == chnl)
+ {
+ return place-13;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void _rtl8812ae_iqk_rx_fill_iqc(
+ struct ieee80211_hw *hw,
+ enum radio_path path,
+ u32 rx_x,
+ u32 rx_y
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (path) {
+ case RF90_PATH_A:
+ {
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ if (rx_x >> 1 ==0x112 || rx_y >> 1 == 0x3ee){
+ rtl_set_bbreg(hw, 0xc10, 0x000003ff, 0x100);
+ rtl_set_bbreg(hw, 0xc10, 0x03ff0000, 0);
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RX_X = %x;;RX_Y = %x ====>fill to IQC\n",
+ rx_x >> 1 & 0x000003ff, rx_y >> 1 & 0x000003ff));
+ }
+ else{
+ rtl_set_bbreg(hw, 0xc10, 0x000003ff, rx_x >> 1);
+ rtl_set_bbreg(hw, 0xc10, 0x03ff0000, rx_y >> 1);
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RX_X = %x;;RX_Y = %x ====>fill to IQC\n",
+ rx_x >> 1 & 0x000003ff, rx_y >> 1 & 0x000003ff));
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("0xc10 = %x ====>fill to IQC\n",
+ rtl_read_dword(rtlpriv, 0xc10)));
+ }
+ }
+ break;
+ case RF90_PATH_B:
+ {
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ if (rx_x >> 1 ==0x112 || rx_y >> 1 == 0x3ee){
+ rtl_set_bbreg(hw, 0xe10, 0x000003ff, 0x100);
+ rtl_set_bbreg(hw, 0xe10, 0x03ff0000, 0);
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RX_X = %x;;RX_Y = %x ====>fill to IQC\n",
+ rx_x >> 1 & 0x000003ff, rx_y >> 1 & 0x000003ff));
+ }
+ else{
+ rtl_set_bbreg(hw, 0xe10, 0x000003ff, rx_x >> 1);
+ rtl_set_bbreg(hw, 0xe10, 0x03ff0000, rx_y >> 1);
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RX_X = %x;;RX_Y = %x====>fill to IQC\n ",
+ rx_x >> 1 & 0x000003ff, rx_y >> 1 & 0x000003ff));
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("0xe10 = %x====>fill to IQC\n",
+ rtl_read_dword(rtlpriv, 0xe10)));
+ }
+ }
+ break;
+ default:
+ break;
+ };
+}
+
+void _rtl8812ae_iqk_tx_fill_iqc(
+ struct ieee80211_hw *hw,
+ enum radio_path path,
+ u32 tx_x,
+ u32 tx_y
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (path) {
+ case RF90_PATH_A:
+ {
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/
+ rtl_write_dword(rtlpriv, 0xc90, 0x00000080);
+ rtl_write_dword(rtlpriv, 0xcc4, 0x20040000);
+ rtl_write_dword(rtlpriv, 0xcc8, 0x20000000);
+ rtl_set_bbreg(hw, 0xccc, 0x000007ff, tx_y);
+ rtl_set_bbreg(hw, 0xcd4, 0x000007ff, tx_x);
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TX_X = %x;;TX_Y = %x =====> fill to IQC\n",
+ tx_x & 0x000007ff, tx_y & 0x000007ff));
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("0xcd4 = %x;;0xccc = %x ====>fill to IQC\n",
+ rtl_get_bbreg(hw, 0xcd4, 0x000007ff),
+ rtl_get_bbreg(hw, 0xccc, 0x000007ff)));
+ }
+ break;
+ case RF90_PATH_B:
+ {
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/
+ rtl_write_dword(rtlpriv, 0xe90, 0x00000080);
+ rtl_write_dword(rtlpriv, 0xec4, 0x20040000);
+ rtl_write_dword(rtlpriv, 0xec8, 0x20000000);
+ rtl_set_bbreg(hw, 0xecc, 0x000007ff, tx_y);
+ rtl_set_bbreg(hw, 0xed4, 0x000007ff, tx_x);
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TX_X = %x;;TX_Y = %x =====> fill to IQC\n",
+ tx_x&0x000007ff, tx_y&0x000007ff));
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("0xed4 = %x;;0xecc = %x ====>fill to IQC\n",
+ rtl_get_bbreg(hw, 0xed4, 0x000007ff),
+ rtl_get_bbreg(hw, 0xecc, 0x000007ff)));
+ }
+ break;
+ default:
+ break;
+ };
+}
+
+void _rtl8812ae_iqk_backup_macbb(
+ struct ieee80211_hw *hw,
+ u32 *macbb_backup,
+ u32 *backup_macbb_reg,
+ u32 mac_bb_num
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ /*save MACBB default value*/
+ for (i = 0; i < mac_bb_num; i++) {
+ macbb_backup[i] =rtl_read_dword(rtlpriv,backup_macbb_reg[i]);
+ }
+
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupMacBB Success!!!!\n"));
+}
+
+void _rtl8812ae_iqk_backup_afe(
+ struct ieee80211_hw *hw,
+ u32 *afe_backup,
+ u32 *backup_afe_REG,
+ u32 afe_num
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ /*Save AFE Parameters */
+ for (i = 0; i < afe_num; i++){
+ afe_backup[i] = rtl_read_dword(rtlpriv, backup_afe_REG[i]);
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupAFE Success!!!!\n"));
+}
+
+void _rtl8812ae_iqk_backup_rf(
+ struct ieee80211_hw *hw,
+ u32 *rfa_backup,
+ u32 *rfb_backup,
+ u32 *backup_rf_reg,
+ u32 rf_num
+ )
+{
+
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ /*Save RF Parameters*/
+ for (i = 0; i < rf_num; i++){
+ rfa_backup[i] = rtl_get_rfreg(hw, RF90_PATH_A, backup_rf_reg[i], BMASKDWORD);
+ rfb_backup[i] = rtl_get_rfreg(hw, RF90_PATH_B, backup_rf_reg[i], BMASKDWORD);
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupRF Success!!!!\n"));
+}
+
+void _rtl8812ae_iqk_configure_mac(
+ struct ieee80211_hw *hw
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ /* ========MAC register setting========*/
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ rtl_write_byte(rtlpriv, 0x522, 0x3f);
+ rtl_set_bbreg(hw, 0x550, BIT(11) | BIT(3), 0x0);
+ rtl_write_byte(rtlpriv, 0x808, 0x00); /*RX ante off*/
+ rtl_set_bbreg(hw, 0x838, 0xf, 0xc); /*CCA off*/
+}
+
+#define cal_num 10
+
+void _rtl8812ae_iqk_tx(
+ struct ieee80211_hw *hw,
+ u8 chnl_idx
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ u8 delay_count;
+ u8 cal0_retry, cal1_retry;
+ u8 tx0_average = 0, tx1_average = 0, rx0_average = 0, rx1_average = 0;
+ int tx0_x = 0, tx0_y = 0, rx0_x = 0, rx0_y = 0;
+ int tx_x0[cal_num], tx_y0[cal_num], rx_x0[cal_num], rx_y0[cal_num];
+ int tx1_x = 0, tx1_y = 0, rx1_x = 0, rx1_y = 0;
+ int tx_x1[cal_num], tx_y1[cal_num], rx_x1[cal_num], rx_y1[cal_num];
+ bool tx0iqkok= false, rx0iqkok = false, tx0_fail = true, rx0_fail;
+ bool iqk0_ready = false, tx0_finish = false, rx0_finish = false;
+ bool tx1iqkok = false, rx1iqkok = false, tx1_fail = true, rx1_fail;
+ bool iqk1_ready = false, tx1_finish = false, rx1_finish = false, vdf_enable = false;
+ int i, tx_dt[3] = {0}, rx_dt[3] = {0}, ii, dx = 0, dy = 0;
+
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("BandWidth = %d.\n",
+ rtlphy->current_chan_bw));
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80){
+ vdf_enable = true;
+ }
+ vdf_enable = false;
+
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ /*========Path-A AFE all on========*/
+ /*Port 0 DAC/ADC on*/
+ rtl_write_dword(rtlpriv, 0xc60, 0x77777777);
+ rtl_write_dword(rtlpriv, 0xc64, 0x77777777);
+
+ /* Port 1 DAC/ADC off*/
+ rtl_write_dword(rtlpriv, 0xe60, 0x77777777);
+ rtl_write_dword(rtlpriv, 0xe64, 0x77777777);
+
+ rtl_write_dword(rtlpriv, 0xc68, 0x19791979);
+ rtl_write_dword(rtlpriv, 0xe68, 0x19791979);
+ rtl_set_bbreg(hw,0xc00, 0xf, 0x4);/*hardware 3-wire off*/
+ rtl_set_bbreg(hw,0xe00, 0xf, 0x4);/*hardware 3-wire off*/
+
+ /*DAC/ADC sampling rate (160 MHz)*/
+ rtl_set_bbreg(hw, 0xc5c, BIT(26) | BIT(25) | BIT(24), 0x7);
+ rtl_set_bbreg(hw, 0xe5c, BIT(26) | BIT(25) | BIT(24), 0x7);
+ rtl_set_bbreg(hw, 0x8c4, BIT(30), 0x1);
+
+ /*====== Path A TX IQK RF Setting ======*/
+ rtl_set_bbreg(hw,0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ rtl_set_rfreg(hw,RF90_PATH_A, 0xef, BRFREGOFFSETMASK, 0x80002);
+ rtl_set_rfreg(hw,RF90_PATH_A, 0x30, BRFREGOFFSETMASK, 0x20000);
+ rtl_set_rfreg(hw,RF90_PATH_A, 0x31, BRFREGOFFSETMASK, 0x3fffd);
+ rtl_set_rfreg(hw,RF90_PATH_A, 0x32, BRFREGOFFSETMASK, 0xfe83f);
+ rtl_set_rfreg(hw,RF90_PATH_A, 0x65, BRFREGOFFSETMASK, 0x931d5);
+ rtl_set_rfreg(hw,RF90_PATH_A, 0x8f, BRFREGOFFSETMASK, 0x8a001);
+ /*====== Path A TX IQK RF Setting ======*/
+ rtl_set_rfreg(hw,RF90_PATH_B, 0xef, BRFREGOFFSETMASK, 0x80002);
+ rtl_set_rfreg(hw,RF90_PATH_B, 0x30, BRFREGOFFSETMASK, 0x20000);
+ rtl_set_rfreg(hw,RF90_PATH_B, 0x31, BRFREGOFFSETMASK, 0x3fffd);
+ rtl_set_rfreg(hw,RF90_PATH_B, 0x32, BRFREGOFFSETMASK, 0xfe83f);
+ rtl_set_rfreg(hw,RF90_PATH_B, 0x65, BRFREGOFFSETMASK, 0x931d5);
+ rtl_set_rfreg(hw,RF90_PATH_B, 0x8f, BRFREGOFFSETMASK, 0x8a001);
+ rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+ rtl_write_dword(rtlpriv, 0xb00, 0x03000100);
+ rtl_set_bbreg(hw, 0xc94, BIT(0), 0x1);
+ rtl_set_bbreg(hw, 0xe94, BIT(0), 0x1);
+ rtl_write_dword(rtlpriv, 0x978, 0x29002000);/* TX (X,Y)*/
+ rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);/* RX (X,Y)*/
+ rtl_write_dword(rtlpriv, 0x984, 0x00462910);/*[0]:AGC_en, [15]:idac_K_Mask*/
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1*/
+
+ /*ExternalPA_5G == 0*/
+ rtl_write_dword(rtlpriv, 0xc88, 0x821403f1);
+ rtl_write_dword(rtlpriv, 0xe88, 0x821403f1);
+
+ if (rtlhal->current_bandtype){
+ rtl_write_dword(rtlpriv, 0xc8c, 0x68163e96);
+ rtl_write_dword(rtlpriv, 0xe8c, 0x68163e96);
+ }
+ else{
+ rtl_write_dword(rtlpriv, 0xc8c, 0x28163e96);
+ rtl_write_dword(rtlpriv, 0xe8c, 0x28163e96);
+ }
+
+ if (vdf_enable){}
+ else{
+ rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);/*TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16*/
+ rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);/*RX_Tone_idx[9:0], RxK_Mask[29]*/
+ rtl_write_dword(rtlpriv, 0xce8, 0x00000000);
+ rtl_write_dword(rtlpriv, 0xe80, 0x18008c10);/*TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16*/
+ rtl_write_dword(rtlpriv, 0xe84, 0x38008c10);/*RX_Tone_idx[9:0], RxK_Mask[29]*/
+ rtl_write_dword(rtlpriv, 0xee8, 0x00000000);
+
+ cal0_retry = 0;
+ cal1_retry = 0;
+ while(1){
+ /*one shot*/
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);/* cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module*/
+ rtl_write_dword(rtlpriv, 0xeb8, 0x00100000);/* cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module*/
+ rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+ rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+ mdelay(10); /*Delay 25ms*/
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+ rtl_write_dword(rtlpriv, 0xeb8, 0x00000000);
+ delay_count = 0;
+ while (1){
+ if (!tx0_finish)
+ iqk0_ready = (bool) rtl_get_bbreg(hw, 0xd00, BIT(10));
+ if (!tx1_finish)
+ iqk1_ready = (bool) rtl_get_bbreg(hw, 0xd40, BIT(10));
+ if ((iqk0_ready && iqk1_ready) || (delay_count>20))
+ break;
+ else{
+ mdelay(1);
+ delay_count++;
+ }
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("TX delay_count = %d\n", delay_count));
+ if (delay_count < 20){ // If 20ms No Result, then cal_retry++
+ /* ============TXIQK Check==============*/
+ tx0_fail = (bool) rtl_get_bbreg(hw, 0xd00, BIT(12));
+ tx1_fail = (bool) rtl_get_bbreg(hw, 0xd40, BIT(12));
+ if (!(tx0_fail || tx0_finish)){
+ rtl_write_dword(rtlpriv, 0xcb8, 0x02000000);
+ tx_x0[tx0_average] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000) << 21;
+ rtl_write_dword(rtlpriv, 0xcb8, 0x04000000);
+ tx_y0[tx0_average] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000) << 21;
+ tx0iqkok = true;
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TX_X0[%d] = %x ;; TX_Y0[%d] = %x\n",
+ tx0_average, (tx_x0[tx0_average]) >> 21 & 0x000007ff,
+ tx0_average, (tx_y0[tx0_average]) >> 21 & 0x000007ff));
+
+ tx0_average++;
+ }
+ else{
+ tx0iqkok = false;
+ cal0_retry++;
+ if (cal0_retry == 10)
+ break;
+ }
+ if (!(tx1_fail || tx1_finish)){
+ rtl_write_dword(rtlpriv, 0xeb8, 0x02000000);
+ tx_x1[tx1_average] = rtl_get_bbreg(hw, 0xd40, 0x07ff0000) << 21;
+ rtl_write_dword(rtlpriv, 0xeb8, 0x04000000);
+ tx_y1[tx1_average] = rtl_get_bbreg(hw, 0xd40, 0x07ff0000) << 21;
+ tx1iqkok= true;
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TX_X1[%d] = %x ;; TX_Y1[%d] = %x\n",
+ tx1_average, (tx_x1[tx1_average]) >> 21 & 0x000007ff,
+ tx1_average, (tx_y1[tx1_average]) >> 21 & 0x000007ff));
+
+ tx1_average++;
+ }
+ else{
+ tx1iqkok = false;
+ cal1_retry++;
+ if (cal1_retry == 10)
+ break;
+ }
+ }
+ else{
+ tx0iqkok = false;
+ tx1iqkok = false;
+ cal0_retry++;
+ cal1_retry++;
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("Delay 20ms TX IQK Not Ready!!!!!\n"));
+ if (cal0_retry == 10)
+ break;
+ }
+ if (tx0_average >= 2){
+ for (i = 0; i < tx0_average; i++){
+ for (ii = i+1; ii <tx0_average; ii++){
+ dx = (tx_x0[i] >> 21) - (tx_x0[ii] >> 21);
+ if (dx < 4 && dx > -4){
+ dy = (tx_y0[i]>>21) - (tx_y0[ii]>>21);
+ if (dy < 4 && dy > -4){
+ tx0_x = ((tx_x0[i] >> 21) + (tx_x0[ii] >> 21)) / 2;
+ tx0_y = ((tx_y0[i] >> 21) + (tx_y0[ii] >> 21)) / 2;
+ tx_x0[0] = tx_x0[i];
+ tx_y0[1] = tx_y0[ii];
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TX0_X = %x;;TX0_Y = %x\n",
+ tx0_x & 0x000007ff, tx0_y & 0x000007ff));
+ if ((rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80)
+ && vdf_enable) {
+ tx_dt[0] = (tx_dt[i] + tx_dt[ii]) / 2;
+ }
+ tx0_finish = true;
+ }
+ }
+ }
+ }
+ }
+ if (tx1_average >= 2){
+ for (i = 0; i < tx1_average; i++){
+ for (ii = i+1; ii < tx1_average; ii++){
+ dx = (tx_x1[i] >> 21) - (tx_x1[ii] >> 21);
+ if (dx < 4 && dx > -4){
+ dy = (tx_y1[i] >> 21) - (tx_y1[ii] >> 21);
+ if (dy < 4 && dy > -4){
+ tx1_x = ((tx_x1[i] >> 21) + (tx_x1[ii] >> 21)) / 2;
+ tx1_y = ((tx_y1[i] >> 21) + (tx_y1[ii] >> 21)) / 2;
+ tx_x1[0] = tx_x1[i];
+ tx_y1[1] = tx_y1[ii];
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TX1_X = %x;;TX1_Y = %x\n",
+ tx1_x & 0x000007ff, tx1_y & 0x000007ff));
+ if ((rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80)
+ && vdf_enable) {
+ tx_dt[0] = (tx_dt[i] + tx_dt[ii]) / 2;
+ }
+ tx1_finish = true;
+ }
+ }
+ }
+ }
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TX0_Average = %d, TX1_Average = %d\n",
+ tx0_average, tx1_average));
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TX0_finish = %d, TX1_finish = %d\n",
+ tx0_finish, tx1_finish));
+ if (tx0_finish && tx1_finish)
+ break;
+ if ((cal0_retry + tx0_average) >= 10
+ || (cal1_retry + tx1_average) >= 10 )
+ break;
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TXA_cal_retry = %d\n", cal0_retry));
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TXB_cal_retry = %d\n", cal1_retry));
+
+ }
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x58, 0x7fe00,
+ rtl_get_rfreg(hw, RF90_PATH_A, 0x8, 0xffc00)); /*Load LOK*/
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x58, 0x7fe00,
+ rtl_get_rfreg(hw, RF90_PATH_B, 0x8, 0xffc00)); /* Load LOK*/
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/
+
+
+ if (vdf_enable) {}
+ else{
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ if (tx0_finish) {
+ /*====== Path A RX IQK RF Setting======*/
+ rtl_set_rfreg(hw, RF90_PATH_A, 0xef, BRFREGOFFSETMASK, 0x80000);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x18, 0x00c00, 0x3); /* BW 20M*/
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x30, BRFREGOFFSETMASK, 0x30000);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x31, BRFREGOFFSETMASK, 0x3f7ff);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x32, BRFREGOFFSETMASK, 0xfe7bf);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x8f, BRFREGOFFSETMASK, 0x88001);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x65, BRFREGOFFSETMASK, 0x931d6);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0xef, BRFREGOFFSETMASK, 0x00000);
+ }
+ if (tx1_finish){
+ /*====== Path B RX IQK RF Setting======*/
+ rtl_set_rfreg(hw, RF90_PATH_B, 0xef, BRFREGOFFSETMASK, 0x80000);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x30, BRFREGOFFSETMASK, 0x30000);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x31, BRFREGOFFSETMASK, 0x3f7ff);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x32, BRFREGOFFSETMASK, 0xfe7bf);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x8f, BRFREGOFFSETMASK, 0x88001);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x65, BRFREGOFFSETMASK, 0x931d1);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0xef, BRFREGOFFSETMASK, 0x00000);
+ }
+ rtl_set_bbreg(hw, 0x978, BIT(31), 0x1);
+ rtl_set_bbreg(hw, 0x97c, BIT(31), 0x0);
+ rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+ rtl_write_dword(rtlpriv, 0x984, 0x0046a890);
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/
+ if (tx0_finish) {
+ rtl_write_dword(rtlpriv, 0xc80, 0x38008c10);/*TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16*/
+ rtl_write_dword(rtlpriv, 0xc84, 0x18008c10);/*RX_Tone_idx[9:0], RxK_Mask[29]*/
+ rtl_write_dword(rtlpriv, 0xc88, 0x02140119);
+ rtl_write_dword(rtlpriv, 0xc8c, 0x28160cc0);
+ }
+ if (tx1_finish){
+ rtl_write_dword(rtlpriv, 0xe80, 0x38008c10);/*TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16*/
+ rtl_write_dword(rtlpriv, 0xe84, 0x18008c10);/*RX_Tone_idx[9:0], RxK_Mask[29]*/
+ rtl_write_dword(rtlpriv, 0xe88, 0x02140119);
+ rtl_write_dword(rtlpriv, 0xe8c, 0x28160ca0);
+ }
+ cal0_retry = 0;
+ cal1_retry = 0;
+ while(1){
+ /* one shot*/
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ if (tx0_finish){
+ rtl_set_bbreg(hw, 0x978, 0x03FF8000, (tx_x0[rx0_average % 2]) >> 21 & 0x000007ff);
+ rtl_set_bbreg(hw, 0x978, 0x000007FF, (tx_y0[rx0_average % 2]) >> 21 & 0x000007ff);
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1*/
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00300000);/*cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module*/
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);
+ mdelay(5); /*Delay 10ms*/
+ }
+ if (tx1_finish){
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ rtl_set_bbreg(hw, 0x978, 0x03FF8000, (tx_x1[rx1_average % 2]) >> 21 & 0x000007ff);
+ rtl_set_bbreg(hw, 0x978, 0x000007FF, (tx_y1[rx1_average % 2]) >> 21 & 0x000007ff);
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/
+ rtl_write_dword(rtlpriv, 0xeb8, 0x00300000);/*cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module*/
+ rtl_write_dword(rtlpriv, 0xeb8, 0x00100000);/* cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module*/
+ }
+ mdelay(10); /*Delay 10ms*/
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+ rtl_write_dword(rtlpriv, 0xeb8, 0x00000000);
+ delay_count = 0;
+ while (1){
+ if (!rx0_finish && tx0_finish)
+ iqk0_ready = (bool) rtl_get_bbreg(hw, 0xd00, BIT(10));
+ if (!rx1_finish && tx1_finish)
+ iqk1_ready = (bool) rtl_get_bbreg(hw, 0xd40, BIT(10));
+ if ((iqk0_ready && iqk1_ready)||(delay_count>20))
+ break;
+ else{
+ mdelay(1);
+ delay_count++;
+ }
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RX delay_count = %d\n", delay_count));
+ if (delay_count < 20){ // If 20ms No Result, then cal_retry++
+ // ============RXIQK Check==============
+ rx0_fail = (bool) rtl_get_bbreg(hw, 0xd00, BIT(11));
+ rx1_fail = (bool) rtl_get_bbreg(hw, 0xd40, BIT(11));
+ if (!(rx0_fail || rx0_finish) && tx0_finish){
+ rtl_write_dword(rtlpriv, 0xcb8, 0x06000000);
+ rx_x0[rx0_average] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000) << 21;
+ rtl_write_dword(rtlpriv, 0xcb8, 0x08000000);
+ rx_y0[rx0_average] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000) << 21;
+ rx0iqkok= true;
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RX_X0[%d] = %x ;; RX_Y0[%d] = %x\n",
+ rx0_average, (rx_x0[rx0_average]) >> 21 & 0x000007ff,
+ rx0_average, (rx_y0[rx0_average]) >> 21 & 0x000007ff));
+
+ rx0_average++;
+ }
+ else{
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("1. RXA_cal_retry = %d\n", cal0_retry));
+ rx0iqkok = false;
+ cal0_retry++;
+ if (cal0_retry == 10)
+ break;
+ }
+ if (!(rx1_fail || rx1_finish) && tx1_finish){
+ rtl_write_dword(rtlpriv, 0xeb8, 0x06000000);
+ rx_x1[rx1_average] = rtl_get_bbreg(hw, 0xd40, 0x07ff0000) << 21;
+ rtl_write_dword(rtlpriv, 0xeb8, 0x08000000);
+ rx_y1[rx1_average] = rtl_get_bbreg(hw, 0xd40, 0x07ff0000) << 21;
+ rx1iqkok = true;
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RX_X1[%d] = %x ;; RX_Y1[%d] = %x\n",
+ rx1_average, (rx_x1[rx1_average]) >> 21 & 0x000007ff,
+ rx1_average, (rx_y1[rx1_average]) >> 21 & 0x000007ff));
+
+ rx1_average++;
+ }
+ else{
+ rx1iqkok= false;
+ cal1_retry++;
+ if (cal1_retry == 10)
+ break;
+ }
+
+ }
+ else{
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("2. RXA_cal_retry = %d\n", cal0_retry));
+ rx0iqkok = false;
+ rx1iqkok = false;
+ cal0_retry++;
+ cal1_retry++;
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("Delay 20ms RX IQK Not Ready!!!!!\n"));
+ if (cal0_retry == 10)
+ break;
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("3. RXA_cal_retry = %d\n", cal0_retry));
+ if (rx0_average >= 2){
+ for (i = 0; i < rx0_average; i++){
+ for (ii = i+1; ii < rx0_average; ii++){
+ dx = (rx_x0[i] >> 21) - (rx_x0[ii] >> 21);
+ if (dx < 4 && dx > -4){
+ dy = (rx_y0[i] >> 21) - (rx_y0[ii] >> 21);
+ if (dy < 4 && dy > -4){
+ rx0_x = ((rx_x0[i]>>21) + (rx_x0[ii] >> 21)) / 2;
+ rx0_y = ((rx_y0[i]>>21) + (rx_y0[ii] >> 21)) / 2;
+ if ((rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80)
+ && vdf_enable) {
+ rx_dt[0] = (rx_dt[i] + rx_dt[ii]) / 2;
+ }
+ rx0_finish = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+ if (rx1_average >= 2){
+ for (i = 0; i < rx1_average; i++){
+ for (ii = i+1; ii < rx1_average; ii++){
+ dx = (rx_x1[i] >> 21) - (rx_x1[ii] >> 21);
+ if (dx < 4 && dx > -4){
+ dy = (rx_y1[i] >> 21) - (rx_y1[ii] >> 21);
+ if (dy < 4 && dy > -4){
+ rx1_x = ((rx_x1[i] >> 21) + (rx_x1[ii] >> 21)) / 2;
+ rx1_y = ((rx_y1[i] >> 21) + (rx_y1[ii] >> 21)) / 2;
+ if ((rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80)
+ && vdf_enable) {
+ rx_dt[0] = (rx_dt[i] + rx_dt[ii]) / 2;
+ }
+ rx1_finish = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RX0_Average = %d, RX1_Average = %d\n",
+ rx0_average, rx1_average));
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RX0_finish = %d, RX1_finish = %d\n",
+ rx0_finish, rx1_finish));
+ if ((rx0_finish|| !tx0_finish) && (rx1_finish || !tx1_finish) )
+ break;
+ if ((cal0_retry + rx0_average) >= 10
+ || (cal1_retry + rx1_average) >= 10
+ || rx0_average == 3
+ || rx1_average == 3)
+ break;
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RXA_cal_retry = %d\n", cal0_retry));
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RXB_cal_retry = %d\n", cal1_retry));
+ }
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/
+ switch (rtlphy->current_chan_bw)
+ {
+ case HT_CHANNEL_WIDTH_20_40:
+ {
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x18, 0x00c00, 0x1);
+ }
+ break;
+ case HT_CHANNEL_WIDTH_80:
+ {
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x18, 0x00c00, 0x0);
+ }
+ break;
+ default:
+ break;
+
+ }
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 0 --> Page C*/
+ /*FillIQK Result*/
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("========Path_A =======\n"));
+
+ if (tx0_finish){
+ _rtl8812ae_iqk_tx_fill_iqc(hw, RF90_PATH_A, tx0_x, tx0_y);
+ }
+ else{
+ _rtl8812ae_iqk_tx_fill_iqc(hw, RF90_PATH_A, 0x200, 0x0);
+ }
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80
+ || vdf_enable){
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 0 --> Page C*/
+ rtl_set_bbreg(hw, 0xce8, 0x3fff0000, tx_dt[0] & 0x00003fff);
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ }
+
+ if (rx0_finish == 1){
+ _rtl8812ae_iqk_rx_fill_iqc(hw, RF90_PATH_A, rx0_x, rx0_y);
+ }
+ else{
+ _rtl8812ae_iqk_rx_fill_iqc(hw, RF90_PATH_A, 0x200, 0x0);
+ }
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80
+ || vdf_enable){
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 0 --> Page C*/
+ rtl_set_bbreg(hw, 0xce8, 0x00003fff, rx_dt[0] & 0x00003fff);
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/
+ }
+
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("========Path_B =======\n"));
+
+ if (tx1_finish){
+ _rtl8812ae_iqk_tx_fill_iqc(hw, RF90_PATH_B, tx1_x, tx1_y);
+ }
+ else{
+ _rtl8812ae_iqk_tx_fill_iqc(hw, RF90_PATH_B, 0x200, 0x0);
+ }
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80
+ || vdf_enable){
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 0 --> Page C*/
+ rtl_set_bbreg(hw, 0xee8, 0x3fff0000, tx_dt[0] & 0x00003fff);
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/
+ }
+
+ if (rx1_finish == 1){
+ _rtl8812ae_iqk_rx_fill_iqc(hw, RF90_PATH_B, rx1_x, rx1_y);
+ }
+ else{
+ _rtl8812ae_iqk_rx_fill_iqc(hw, RF90_PATH_B, 0x200, 0x0);
+ }
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80
+ || vdf_enable){
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 0 --> Page C*/
+ rtl_set_bbreg(hw, 0xee8, 0x00003fff, rx_dt[0] & 0x00003fff);
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/
+ }
+}
+
+void _rtl8812ae_iqk_restore_rf(
+ struct ieee80211_hw *hw,
+ enum radio_path path,
+ u32 *backup_rf_reg,
+ u32 *rf_backup,
+ u32 rf_reg_num
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ for (i = 0; i < rf_reg_num; i++)
+ rtl_set_rfreg(hw, path, backup_rf_reg[i], BRFREGOFFSETMASK, rf_backup[i]);
+
+ rtl_set_rfreg(hw, path, 0xef, BRFREGOFFSETMASK, 0x0);
+
+ switch(path){
+ case RF90_PATH_A:
+ {
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RestoreRF Path A Success!!!!\n"));
+ }
+ break;
+ case RF90_PATH_B:
+ {
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RestoreRF Path B Success!!!!\n"));
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void _rtl8812ae_iqk_restore_afe(
+ struct ieee80211_hw *hw,
+ u32 *afe_backup,
+ u32 *backup_afe_reg,
+ u32 afe_num
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ /*Reload AFE Parameters */
+ for (i = 0; i < afe_num; i++){
+ rtl_write_dword(rtlpriv, backup_afe_reg[i], afe_backup[i]);
+ }
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1*/
+ rtl_write_dword(rtlpriv, 0xc80, 0x0);
+ rtl_write_dword(rtlpriv, 0xc84, 0x0);
+ rtl_write_dword(rtlpriv, 0xc88, 0x0);
+ rtl_write_dword(rtlpriv, 0xc8c, 0x3c000000);
+ rtl_write_dword(rtlpriv, 0xc90, 0x00000080);
+ rtl_write_dword(rtlpriv, 0xc94, 0x00000000);
+ rtl_write_dword(rtlpriv, 0xcc4, 0x20040000);
+ rtl_write_dword(rtlpriv, 0xcc8, 0x20000000);
+ rtl_write_dword(rtlpriv, 0xcb8, 0x0);
+ rtl_write_dword(rtlpriv, 0xe80, 0x0);
+ rtl_write_dword(rtlpriv, 0xe84, 0x0);
+ rtl_write_dword(rtlpriv, 0xe88, 0x0);
+ rtl_write_dword(rtlpriv, 0xe8c, 0x3c000000);
+ rtl_write_dword(rtlpriv, 0xe90, 0x00000080);
+ rtl_write_dword(rtlpriv, 0xe94, 0x00000000);
+ rtl_write_dword(rtlpriv, 0xec4, 0x20040000);
+ rtl_write_dword(rtlpriv, 0xec8, 0x20000000);
+ rtl_write_dword(rtlpriv, 0xeb8, 0x0);
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RestoreAFE Success!!!!\n"));
+}
+
+void _rtl8812ae_iqk_restore_macbb(
+ struct ieee80211_hw *hw,
+ u32 *macbb_backup,
+ u32 *backup_macbb_reg,
+ u32 macbb_num
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/
+ //Reload MacBB Parameters
+ for (i = 0; i < macbb_num; i++){
+ rtl_write_dword(rtlpriv, backup_macbb_reg[i], macbb_backup[i]);
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RestoreMacBB Success!!!!\n"));
+}
+
+#define MACBB_REG_NUM 10
+#define AFE_REG_NUM 14
+#define RF_REG_NUM 3
+
+static void _rtl8812ae_phy_iq_calibrate(
+ struct ieee80211_hw *hw,
+ u8 channel)
+{
+ u32 macbb_backup[MACBB_REG_NUM];
+ u32 afe_backup[AFE_REG_NUM];
+ u32 rfa_backup[RF_REG_NUM];
+ u32 rfb_backup[RF_REG_NUM];
+ u32 backup_macbb_reg[MACBB_REG_NUM] = {0xb00, 0x520, 0x550,
+ 0x808, 0x90c, 0xc00, 0xe00,
+ 0x8c4,0x838, 0x82c};
+ u32 backup_afe_reg[AFE_REG_NUM] = {0xc5c, 0xc60, 0xc64, 0xc68,
+ 0xcb8, 0xcb0, 0xcb4,0xe5c,
+ 0xe60, 0xe64, 0xe68, 0xeb8,
+ 0xeb0, 0xeb4};
+ u32 backup_rf_reg[RF_REG_NUM] = {0x65, 0x8f, 0x0};
+ u8 chnl_idx = _rtl8812ae_get_right_chnl_place_for_iqk(channel);
+
+ _rtl8812ae_iqk_backup_macbb(hw, macbb_backup, backup_macbb_reg, MACBB_REG_NUM);
+ _rtl8812ae_iqk_backup_afe(hw, afe_backup, backup_afe_reg, AFE_REG_NUM);
+ _rtl8812ae_iqk_backup_rf(hw, rfa_backup, rfb_backup, backup_rf_reg, RF_REG_NUM);
+
+ _rtl8812ae_iqk_configure_mac(hw);
+ _rtl8812ae_iqk_tx(hw, chnl_idx);
+ _rtl8812ae_iqk_restore_rf(hw, RF90_PATH_A, backup_rf_reg, rfa_backup, RF_REG_NUM);
+ _rtl8812ae_iqk_restore_rf(hw, RF90_PATH_A, backup_rf_reg, rfb_backup, RF_REG_NUM); // PATH_A ?
+
+ _rtl8812ae_iqk_restore_afe(hw, afe_backup, backup_afe_reg, AFE_REG_NUM);
+ _rtl8812ae_iqk_restore_macbb(hw, macbb_backup, backup_macbb_reg, MACBB_REG_NUM);
+}
+
+
+void _rtl8821ae_iqk_backup_macbb(
+ struct ieee80211_hw *hw,
+ u32 *macbb_backup,
+ u32 *backup_macbb_reg,
+ u32 mac_bb_num
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ /*save MACBB default value*/
+ for (i = 0; i < mac_bb_num; i++) {
+ macbb_backup[i] =rtl_read_dword(rtlpriv,backup_macbb_reg[i]);
+ }
+
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupMacBB Success!!!!\n"));
+}
+
+void _rtl8821ae_iqk_backup_afe(
+ struct ieee80211_hw *hw,
+ u32 *afe_backup,
+ u32 *backup_afe_REG,
+ u32 afe_num
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ /*Save AFE Parameters */
+ for (i = 0; i < afe_num; i++){
+ afe_backup[i] = rtl_read_dword(rtlpriv, backup_afe_REG[i]);
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupAFE Success!!!!\n"));
+}
+
+void _rtl8821ae_iqk_backup_rf(
+ struct ieee80211_hw *hw,
+ u32 *rfa_backup,
+ u32 *rfb_backup,
+ u32 *backup_rf_reg,
+ u32 rf_num
+ )
+{
+
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ /*Save RF Parameters*/
+ for (i = 0; i < rf_num; i++){
+ rfa_backup[i] = rtl_get_rfreg(hw, RF90_PATH_A, backup_rf_reg[i], BMASKDWORD);
+ rfb_backup[i] = rtl_get_rfreg(hw, RF90_PATH_B, backup_rf_reg[i], BMASKDWORD);
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupRF Success!!!!\n"));
+}
+
+void _rtl8821ae_iqk_configure_mac(
+ struct ieee80211_hw *hw
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ /* ========MAC register setting========*/
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ rtl_write_byte(rtlpriv, 0x522, 0x3f);
+ rtl_set_bbreg(hw, 0x550, BIT(11) | BIT(3), 0x0);
+ rtl_write_byte(rtlpriv, 0x808, 0x00); /*RX ante off*/
+ rtl_set_bbreg(hw, 0x838, 0xf, 0xc); /*CCA off*/
+}
+
+
+void _rtl8821ae_iqk_tx_fill_iqc(
+ struct ieee80211_hw *hw,
+ enum radio_path path,
+ u32 tx_x,
+ u32 tx_y
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ switch (path) {
+ case RF90_PATH_A:
+ {
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+ rtl_write_dword(rtlpriv, 0xc90, 0x00000080);
+ rtl_write_dword(rtlpriv, 0xcc4, 0x20040000);
+ rtl_write_dword(rtlpriv, 0xcc8, 0x20000000);
+ rtl_set_bbreg(hw, 0xccc, 0x000007ff, tx_y);
+ rtl_set_bbreg(hw, 0xcd4, 0x000007ff, tx_x);
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("TX_X = %x;;TX_Y = %x =====> fill to IQC\n", tx_x, tx_y));
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("0xcd4 = %x;;0xccc = %x ====>fill to IQC\n", rtl_get_bbreg(hw, 0xcd4, 0x000007ff), rtl_get_bbreg(hw, 0xccc, 0x000007ff)));
+ }
+ break;
+ default:
+ break;
+ };
+}
+
+
+void _rtl8821ae_iqk_rx_fill_iqc(
+ struct ieee80211_hw *hw,
+ enum radio_path path,
+ u32 rx_x,
+ u32 rx_y
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ switch (path) {
+ case RF90_PATH_A:
+ {
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ rtl_set_bbreg(hw, 0xc10, 0x000003ff, rx_x>>1);
+ rtl_set_bbreg(hw, 0xc10, 0x03ff0000, rx_y>>1);
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("rx_x = %x;;rx_y = %x ====>fill to IQC\n", rx_x>>1, rx_y>>1));
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("0xc10 = %x ====>fill to IQC\n", rtl_read_dword(rtlpriv, 0xc10)));
+ }
+ break;
+ default:
+ break;
+ };
+}
+
+
+
+#define cal_num 10
+
+void _rtl8821ae_iqk_tx(
+ struct ieee80211_hw *hw,
+ enum radio_path path
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ u32 tx_fail, rx_fail, delay_count, iqk_ready, cal_retry, cal = 0, temp_reg65;
+ int tx_x = 0, tx_y = 0, rx_x = 0, rx_y = 0, tx_average = 0, rx_average = 0;
+ int tx_x0[cal_num], tx_y0[cal_num], tx_x0_rxk[cal_num], tx_y0_rxk[cal_num], rx_x0[cal_num], rx_y0[cal_num];
+ bool tx0iqkok = false, rx0iqkok = false;
+ bool vdf_enable = false;
+ int i, k, vdf_y[3], vdf_x[3], tx_dt[3], rx_dt[3], ii, dx = 0, dy = 0, tx_finish = 0, rx_finish = 0;
+
+
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("BandWidth = %d.\n",
+ rtlphy->current_chan_bw));
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80){
+ vdf_enable = true;
+ }
+
+ while (cal < cal_num) {
+ switch (path) {
+ case RF90_PATH_A:
+ {
+ temp_reg65 = rtl_get_rfreg(hw, path, 0x65, 0xffffffff);
+ //Path-A LOK
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ /*========Path-A AFE all on========*/
+ /*Port 0 DAC/ADC on*/
+ rtl_write_dword(rtlpriv, 0xc60, 0x77777777);
+ rtl_write_dword(rtlpriv, 0xc64, 0x77777777);
+ rtl_write_dword(rtlpriv, 0xc68, 0x19791979);
+ rtl_write_dword(rtlpriv, 0xc6c, 0x19791979);
+ rtl_write_dword(rtlpriv, 0xc70, 0x19791979);
+ rtl_write_dword(rtlpriv, 0xc74, 0x19791979);
+ rtl_write_dword(rtlpriv, 0xc78, 0x19791979);
+ rtl_write_dword(rtlpriv, 0xc7c, 0x19791979);
+ rtl_write_dword(rtlpriv, 0xc80, 0x19791979);
+ rtl_write_dword(rtlpriv, 0xc84, 0x19791979);
+
+ rtl_set_bbreg(hw, 0xc00, 0xf, 0x4); /*hardware 3-wire off*/
+
+ // LOK Setting
+ //====== LOK ======
+ /*DAC/ADC sampling rate (160 MHz)*/
+ rtl_set_bbreg(hw, 0xc5c, BIT(26) | BIT(25) | BIT(24), 0x7);
+
+ // 2. LoK RF Setting (at BW = 20M)
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80002);
+ rtl_set_rfreg(hw, path, 0x18, 0x00c00, 0x3); // BW 20M
+ rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x20000);
+ rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0003f);
+ rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xf3fc3);
+ rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d5);
+ rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001);
+ rtl_set_bbreg(hw, 0xcb8, 0xf, 0xd);
+ rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+ rtl_write_dword(rtlpriv, 0xb00, 0x03000100);
+ rtl_set_bbreg(hw, 0xc94, BIT(0), 0x1);
+ rtl_write_dword(rtlpriv, 0x978, 0x29002000);// TX (X,Y)
+ rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);// RX (X,Y)
+ rtl_write_dword(rtlpriv, 0x984, 0x00462910);// [0]:AGC_en, [15]:idac_K_Mask
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+ rtl_write_dword(rtlpriv, 0xc88, 0x821403f4);
+
+ if (rtlhal->current_bandtype)
+ rtl_write_dword(rtlpriv, 0xc8c, 0x68163e96);
+ else
+ rtl_write_dword(rtlpriv, 0xc8c, 0x28163e96);
+
+ rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+ rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);// RX_Tone_idx[9:0], RxK_Mask[29]
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+ rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+ rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+ mdelay(10); //Delay 10ms
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ rtl_set_rfreg(hw, path, 0x58, 0x7fe00, rtl_get_rfreg(hw, path, 0x8, 0xffc00)); // Load LOK
+
+ switch (rtlphy->current_chan_bw)
+ {
+ case 1:
+ {
+ rtl_set_rfreg(hw, path, 0x18, 0x00c00, 0x1);
+ }
+ break;
+ case 2:
+ {
+ rtl_set_rfreg(hw, path, 0x18, 0x00c00, 0x0);
+ }
+ break;
+ default:
+ break;
+
+ }
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+
+ // 3. TX RF Setting
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000);
+ rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x20000);
+ rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0003f);
+ rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xf3fc3);
+ rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d5);
+ rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001);
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000);
+ //ODM_SetBBReg(pDM_Odm, 0xcb8, 0xf, 0xd);
+ rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+ rtl_write_dword(rtlpriv, 0xb00, 0x03000100);
+ rtl_set_bbreg(hw, 0xc94, BIT(0), 0x1);
+ rtl_write_dword(rtlpriv, 0x978, 0x29002000);// TX (X,Y)
+ rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);// RX (X,Y)
+ rtl_write_dword(rtlpriv, 0x984, 0x0046a910);// [0]:AGC_en, [15]:idac_K_Mask
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+ rtl_write_dword(rtlpriv, 0xc88, 0x821403f1);
+ if (rtlhal->current_bandtype)
+ rtl_write_dword(rtlpriv, 0xc8c, 0x40163e96);
+ else
+ rtl_write_dword(rtlpriv, 0xc8c, 0x00163e96);
+
+ if (vdf_enable == 1){
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("VDF_enable\n"));
+ for (k = 0;k <= 2; k++){
+ switch (k){
+ case 0:
+ {
+ rtl_write_dword(rtlpriv, 0xc80, 0x18008c38);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+ rtl_write_dword(rtlpriv, 0xc84, 0x38008c38);// RX_Tone_idx[9:0], RxK_Mask[29]
+ rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0);
+ }
+ break;
+ case 1:
+ {
+ rtl_set_bbreg(hw, 0xc80, BIT(28), 0x0);
+ rtl_set_bbreg(hw, 0xc84, BIT(28), 0x0);
+ rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0);
+ }
+ break;
+ case 2:
+ {
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("vdf_y[1] = %x;;;vdf_y[0] = %x\n", vdf_y[1]>>21 & 0x00007ff, vdf_y[0]>>21 & 0x00007ff));
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("vdf_x[1] = %x;;;vdf_x[0] = %x\n", vdf_x[1]>>21 & 0x00007ff, vdf_x[0]>>21 & 0x00007ff));
+ tx_dt[cal] = (vdf_y[1]>>20)-(vdf_y[0]>>20);
+ tx_dt[cal] = ((16*tx_dt[cal])*10000/15708);
+ tx_dt[cal] = (tx_dt[cal] >> 1 )+(tx_dt[cal] & BIT(0));
+ rtl_write_dword(rtlpriv, 0xc80, 0x18008c20);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+ rtl_write_dword(rtlpriv, 0xc84, 0x38008c20);// RX_Tone_idx[9:0], RxK_Mask[29]
+ rtl_set_bbreg(hw, 0xce8, BIT(31), 0x1);
+ rtl_set_bbreg(hw, 0xce8, 0x3fff0000, tx_dt[cal] & 0x00003fff);
+ }
+ break;
+ default:
+ break;
+ }
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+ cal_retry = 0;
+ while(1){
+ // one shot
+ rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+ rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+ mdelay(10); //Delay 10ms
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+ delay_count = 0;
+ while (1){
+ iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10));
+ if ((~iqk_ready) || (delay_count>20)){
+ break;
+ }
+ else{
+ mdelay(1);
+ delay_count++;
+ }
+ }
+
+ if (delay_count < 20){ // If 20ms No Result, then cal_retry++
+ // ============TXIQK Check==============
+ tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12));
+
+ if (~tx_fail){
+ rtl_write_dword(rtlpriv, 0xcb8, 0x02000000);
+ vdf_x[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ rtl_write_dword(rtlpriv, 0xcb8, 0x04000000);
+ vdf_y[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ tx0iqkok = true;
+ break;
+ }
+ else{
+ rtl_set_bbreg(hw, 0xccc, 0x000007ff, 0x0);
+ rtl_set_bbreg(hw, 0xcd4, 0x000007ff, 0x200);
+ tx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10) {
+ break;
+ }
+ }
+ }
+ else{
+ tx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10){
+ break;
+ }
+ }
+ }
+ }
+ if (k == 3){
+ tx_x0[cal] = vdf_x[k-1] ;
+ tx_y0[cal] = vdf_y[k-1];
+ }
+ }
+
+ else {
+ rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+ rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);// RX_Tone_idx[9:0], RxK_Mask[29]
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+ cal_retry = 0;
+ while(1){
+ // one shot
+ rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+ rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+ mdelay(10); //Delay 10ms
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+ delay_count = 0;
+ while (1){
+ iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10));
+ if ((~iqk_ready) || (delay_count>20)) {
+ break;
+ }
+ else{
+ mdelay(1);
+ delay_count++;
+ }
+ }
+
+ if (delay_count < 20){ // If 20ms No Result, then cal_retry++
+ // ============TXIQK Check==============
+ tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12));
+
+ if (~tx_fail){
+ rtl_write_dword(rtlpriv, 0xcb8, 0x02000000);
+ tx_x0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ rtl_write_dword(rtlpriv, 0xcb8, 0x04000000);
+ tx_y0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ tx0iqkok = true;
+ break;
+ }
+ else{
+ rtl_set_bbreg(hw, 0xccc, 0x000007ff, 0x0);
+ rtl_set_bbreg(hw, 0xcd4, 0x000007ff, 0x200);
+ tx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10) {
+ break;
+ }
+ }
+ }
+ else{
+ tx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10)
+ break;
+ }
+ }
+ }
+
+
+ if (tx0iqkok == false)
+ break; // TXK fail, Don't do RXK
+
+ if (vdf_enable == 1){
+ rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0); // TX VDF Disable
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("RXVDF Start\n"));
+ for (k = 0;k <= 2; k++){
+ //====== RX mode TXK (RXK Step 1) ======
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ // 1. TX RF Setting
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000);
+ rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000);
+ rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x00029);
+ rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xd7ffb);
+ rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, temp_reg65);
+ rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001);
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000);
+
+ rtl_set_bbreg(hw, 0xcb8, 0xf, 0xd);
+ rtl_write_dword(rtlpriv, 0x978, 0x29002000);// TX (X,Y)
+ rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);// RX (X,Y)
+ rtl_write_dword(rtlpriv, 0x984, 0x0046a910);// [0]:AGC_en, [15]:idac_K_Mask
+ rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+ rtl_write_dword(rtlpriv, 0xb00, 0x03000100);
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+ switch (k){
+ case 0:
+ {
+ rtl_write_dword(rtlpriv, 0xc80, 0x18008c38);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+ rtl_write_dword(rtlpriv, 0xc84, 0x38008c38);// RX_Tone_idx[9:0], RxK_Mask[29]
+ rtl_set_bbreg(hw, 0xce8, BIT(30), 0x0);
+ }
+ break;
+ case 1:
+ {
+ rtl_write_dword(rtlpriv, 0xc80, 0x08008c38);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+ rtl_write_dword(rtlpriv, 0xc84, 0x28008c38);// RX_Tone_idx[9:0], RxK_Mask[29]
+ rtl_set_bbreg(hw, 0xce8, BIT(30), 0x0);
+ }
+ break;
+ case 2:
+ {
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("VDF_Y[1] = %x;;;VDF_Y[0] = %x\n", vdf_y[1]>>21 & 0x00007ff, vdf_y[0]>>21 & 0x00007ff));
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("VDF_X[1] = %x;;;VDF_X[0] = %x\n", vdf_x[1]>>21 & 0x00007ff, vdf_x[0]>>21 & 0x00007ff));
+ rx_dt[cal] = (vdf_y[1]>>20)-(vdf_y[0]>>20);
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("Rx_dt = %d\n", rx_dt[cal]));
+ rx_dt[cal] = ((16*rx_dt[cal])*10000/13823);
+ rx_dt[cal] = (rx_dt[cal] >> 1 )+(rx_dt[cal] & BIT(0));
+ rtl_write_dword(rtlpriv, 0xc80, 0x18008c20);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+ rtl_write_dword(rtlpriv, 0xc84, 0x38008c20);// RX_Tone_idx[9:0], RxK_Mask[29]
+ rtl_set_bbreg(hw, 0xce8, 0x00003fff, rx_dt[cal] & 0x00003fff);
+ }
+ break;
+ default:
+ break;
+ }
+ rtl_write_dword(rtlpriv, 0xc88, 0x821603e0);
+ rtl_write_dword(rtlpriv, 0xc8c, 0x68163e96);
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+ cal_retry = 0;
+ while(1){
+ // one shot
+ rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+ rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+ mdelay(10); //Delay 10ms
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+ delay_count = 0;
+ while (1){
+ iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10));
+ if ((~iqk_ready)||(delay_count>20)){
+ break;
+ }
+ else{
+ mdelay(1);
+ delay_count++;
+ }
+ }
+
+ if (delay_count < 20){ // If 20ms No Result, then cal_retry++
+ // ============TXIQK Check==============
+ tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12));
+
+ if (~tx_fail){
+ rtl_write_dword(rtlpriv, 0xcb8, 0x02000000);
+ tx_x0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ rtl_write_dword(rtlpriv, 0xcb8, 0x04000000);
+ tx_y0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ tx0iqkok = true;
+ break;
+ }
+ else{
+ tx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10)
+ break;
+ }
+ }
+ else{
+ tx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10)
+ break;
+ }
+ }
+
+ if (tx0iqkok == false){ //If RX mode TXK fail, then take TXK Result
+ tx_x0_rxk[cal] = tx_x0[cal];
+ tx_y0_rxk[cal] = tx_y0[cal];
+ tx0iqkok = true;
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("RXK Step 1 fail\n"));
+ }
+
+
+ //====== RX IQK ======
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ // 1. RX RF Setting
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000);
+ rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000);
+ rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0002f);
+ rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xfffbb);
+ rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x88001);
+ rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d8);
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000);
+
+ rtl_set_bbreg(hw, 0x978, 0x03FF8000, (tx_x0_rxk[cal])>>21&0x000007ff);
+ rtl_set_bbreg(hw, 0x978, 0x000007FF, (tx_y0_rxk[cal])>>21&0x000007ff);
+ rtl_set_bbreg(hw, 0x978, BIT(31), 0x1);
+ rtl_set_bbreg(hw, 0x97c, BIT(31), 0x0);
+ rtl_set_bbreg(hw, 0xcb8, 0xF, 0xe);
+ rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+ rtl_write_dword(rtlpriv, 0x984, 0x0046a911);
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+ rtl_set_bbreg(hw, 0xc80, BIT(29), 0x1);
+ rtl_set_bbreg(hw, 0xc84, BIT(29), 0x0);
+ rtl_write_dword(rtlpriv, 0xc88, 0x02140119);
+
+ rtl_write_dword(rtlpriv, 0xc8c, 0x28160d00); /* pDM_Odm->SupportInterface == 1 */
+
+ if (k==2){
+ rtl_set_bbreg(hw, 0xce8, BIT(30), 0x1); //RX VDF Enable
+ }
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+
+ cal_retry = 0;
+ while(1){
+ // one shot
+ rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+ rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+ mdelay(10); //Delay 10ms
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+ delay_count = 0;
+ while (1){
+ iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10));
+ if ((~iqk_ready)||(delay_count>20)){
+ break;
+ }
+ else{
+ mdelay(1);
+ delay_count++;
+ }
+ }
+
+ if (delay_count < 20){ // If 20ms No Result, then cal_retry++
+ // ============RXIQK Check==============
+ rx_fail = rtl_get_bbreg(hw, 0xd00, BIT(11));
+ if (rx_fail == 0){
+ rtl_write_dword(rtlpriv, 0xcb8, 0x06000000);
+ vdf_x[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ rtl_write_dword(rtlpriv, 0xcb8, 0x08000000);
+ vdf_y[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ rx0iqkok = true;
+ break;
+ }
+ else{
+ rtl_set_bbreg(hw, 0xc10, 0x000003ff, 0x200>>1);
+ rtl_set_bbreg(hw, 0xc10, 0x03ff0000, 0x0>>1);
+ rx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10)
+ break;
+
+ }
+ }
+ else{
+ rx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10)
+ break;
+ }
+ }
+
+ }
+ if (k == 3){
+ rx_x0[cal] = vdf_x[k-1] ;
+ rx_y0[cal] = vdf_y[k-1];
+ }
+ rtl_set_bbreg(hw, 0xce8, BIT(31), 0x1); // TX VDF Enable
+ }
+
+ else{
+ //====== RX mode TXK (RXK Step 1) ======
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ // 1. TX RF Setting
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000);
+ rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000);
+ rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x00029);
+ rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xd7ffb);
+ rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, temp_reg65);
+ rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001);
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000);
+ rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+ rtl_write_dword(rtlpriv, 0xb00, 0x03000100);
+ rtl_write_dword(rtlpriv, 0x984, 0x0046a910);// [0]:AGC_en, [15]:idac_K_Mask
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+ rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+ rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);// RX_Tone_idx[9:0], RxK_Mask[29]
+ rtl_write_dword(rtlpriv, 0xc88, 0x821603e0);
+ //ODM_Write4Byte(pDM_Odm, 0xc8c, 0x68163e96);
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+ cal_retry = 0;
+ while(1){
+ // one shot
+ rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+ rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+ mdelay(10); //Delay 10ms
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+ delay_count = 0;
+ while (1){
+ iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10));
+ if ((~iqk_ready)||(delay_count>20)){
+ break;
+ }
+ else{
+ mdelay(1);
+ delay_count++;
+ }
+ }
+
+ if (delay_count < 20){ // If 20ms No Result, then cal_retry++
+ // ============TXIQK Check==============
+ tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12));
+
+ if (~tx_fail){
+ rtl_write_dword(rtlpriv, 0xcb8, 0x02000000);
+ tx_x0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ rtl_write_dword(rtlpriv, 0xcb8, 0x04000000);
+ tx_y0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ tx0iqkok = true;
+ break;
+ }
+ else{
+ tx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10)
+ break;
+ }
+ }
+ else{
+ tx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10)
+ break;
+ }
+ }
+
+
+ if (tx0iqkok == false){ //If RX mode TXK fail, then take TXK Result
+ tx_x0_rxk[cal] = tx_x0[cal];
+ tx_y0_rxk[cal] = tx_y0[cal];
+ tx0iqkok = true;
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("1"));
+ }
+
+
+ //====== RX IQK ======
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ // 1. RX RF Setting
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000);
+ rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000);
+ rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0002f);
+ rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xfffbb);
+ rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x88001);
+ rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d8);
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000);
+
+ rtl_set_bbreg(hw, 0x978, 0x03FF8000, (tx_x0_rxk[cal])>>21&0x000007ff);
+ rtl_set_bbreg(hw, 0x978, 0x000007FF, (tx_y0_rxk[cal])>>21&0x000007ff);
+ rtl_set_bbreg(hw, 0x978, BIT(31), 0x1);
+ rtl_set_bbreg(hw, 0x97c, BIT(31), 0x0);
+ //ODM_SetBBReg(pDM_Odm, 0xcb8, 0xF, 0xe);
+ rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+ rtl_write_dword(rtlpriv, 0x984, 0x0046a911);
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+ rtl_write_dword(rtlpriv, 0xc80, 0x38008c10);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+ rtl_write_dword(rtlpriv, 0xc84, 0x18008c10);// RX_Tone_idx[9:0], RxK_Mask[29]
+ rtl_write_dword(rtlpriv, 0xc88, 0x02140119);
+
+ rtl_write_dword(rtlpriv, 0xc8c, 0x28160d00); /*pDM_Odm->SupportInterface == 1*/
+
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+
+ cal_retry = 0;
+ while(1){
+ // one shot
+ rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+ rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+ mdelay(10); //Delay 10ms
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+ delay_count = 0;
+ while (1){
+ iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10));
+ if ((~iqk_ready)||(delay_count>20)){
+ break;
+ }
+ else{
+ mdelay(1);
+ delay_count++;
+ }
+ }
+
+ if (delay_count < 20){ // If 20ms No Result, then cal_retry++
+ // ============RXIQK Check==============
+ rx_fail = rtl_get_bbreg(hw, 0xd00, BIT(11));
+ if (rx_fail == 0){
+ /*
+ ODM_Write4Byte(pDM_Odm, 0xcb8, 0x05000000);
+ reg1 = ODM_GetBBReg(pDM_Odm, 0xd00, 0xffffffff);
+ ODM_Write4Byte(pDM_Odm, 0xcb8, 0x06000000);
+ reg2 = ODM_GetBBReg(pDM_Odm, 0xd00, 0x0000001f);
+ DbgPrint("reg1 = %d, reg2 = %d", reg1, reg2);
+ Image_Power = (reg2<<32)+reg1;
+ DbgPrint("Before PW = %d\n", Image_Power);
+ ODM_Write4Byte(pDM_Odm, 0xcb8, 0x07000000);
+ reg1 = ODM_GetBBReg(pDM_Odm, 0xd00, 0xffffffff);
+ ODM_Write4Byte(pDM_Odm, 0xcb8, 0x08000000);
+ reg2 = ODM_GetBBReg(pDM_Odm, 0xd00, 0x0000001f);
+ Image_Power = (reg2<<32)+reg1;
+ DbgPrint("After PW = %d\n", Image_Power);
+ */
+
+ rtl_write_dword(rtlpriv, 0xcb8, 0x06000000);
+ rx_x0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ rtl_write_dword(rtlpriv, 0xcb8, 0x08000000);
+ rx_y0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ rx0iqkok = true;
+ break;
+ }
+ else{
+ rtl_set_bbreg(hw, 0xc10, 0x000003ff, 0x200>>1);
+ rtl_set_bbreg(hw, 0xc10, 0x03ff0000, 0x0>>1);
+ rx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10)
+ break;
+
+ }
+ }
+ else{
+ rx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10)
+ break;
+ }
+ }
+ }
+
+ if (tx0iqkok)
+ tx_average++;
+ if (rx0iqkok)
+ rx_average++;
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, temp_reg65);
+ }
+ break;
+ default:
+ break;
+ }
+ cal++;
+ }
+
+ // FillIQK Result
+ switch (path){
+ case RF90_PATH_A:
+ {
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("========Path_A =======\n"));
+ if (tx_average == 0)
+ break;
+
+ for (i = 0; i < tx_average; i++){
+ RT_TRACE(COMP_IQK, DBG_LOUD, (" TX_X0_RXK[%d] = %x ;; TX_Y0_RXK[%d] = %x\n", i, (tx_x0_rxk[i])>>21&0x000007ff, i, (tx_y0_rxk[i])>>21&0x000007ff));
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("TX_X0[%d] = %x ;; TX_Y0[%d] = %x\n", i, (tx_x0[i])>>21&0x000007ff, i, (tx_y0[i])>>21&0x000007ff));
+ }
+ for (i = 0; i < tx_average; i++){
+ for (ii = i+1; ii <tx_average; ii++){
+ dx = (tx_x0[i]>>21) - (tx_x0[ii]>>21);
+ if (dx < 3 && dx > -3){
+ dy = (tx_y0[i]>>21) - (tx_y0[ii]>>21);
+ if (dy < 3 && dy > -3){
+ tx_x = ((tx_x0[i]>>21) + (tx_x0[ii]>>21))/2;
+ tx_y = ((tx_y0[i]>>21) + (tx_y0[ii]>>21))/2;
+ tx_finish = 1;
+ break;
+ }
+ }
+ }
+ if (tx_finish == 1)
+ break;
+ }
+
+ if (tx_finish == 1){
+ _rtl8821ae_iqk_tx_fill_iqc(hw, path, tx_x, tx_y); // ?
+ }
+ else{
+ _rtl8821ae_iqk_tx_fill_iqc(hw, path, 0x200, 0x0);
+ }
+
+ if (rx_average == 0)
+ break;
+
+ for (i = 0; i < rx_average; i++){
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("RX_X0[%d] = %x ;; RX_Y0[%d] = %x\n", i, (rx_x0[i])>>21&0x000007ff, i, (rx_y0[i])>>21&0x000007ff));
+ }
+ for (i = 0; i < rx_average; i++){
+ for (ii = i+1; ii <rx_average; ii++){
+ dx = (rx_x0[i]>>21) - (rx_x0[ii]>>21);
+ if (dx < 4 && dx > -4){
+ dy = (rx_y0[i]>>21) - (rx_y0[ii]>>21);
+ if (dy < 4 && dy > -4){
+ rx_x = ((rx_x0[i]>>21) + (rx_x0[ii]>>21))/2;
+ rx_y = ((rx_y0[i]>>21) + (rx_y0[ii]>>21))/2;
+ rx_finish = 1;
+ break;
+ }
+ }
+ }
+ if (rx_finish == 1)
+ break;
+ }
+
+ if (rx_finish == 1){
+ _rtl8821ae_iqk_rx_fill_iqc(hw, path, rx_x, rx_y);
+ }
+ else{
+ _rtl8821ae_iqk_rx_fill_iqc(hw, path, 0x200, 0x0);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void _rtl8821ae_iqk_restore_rf(
+ struct ieee80211_hw *hw,
+ enum radio_path path,
+ u32* backup_rf_reg,
+ u32* rf_backup,
+ u32 rf_reg_num
+ )
+{
+ u32 i;
+ struct rtl_priv* rtlpriv = rtl_priv(hw);
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ for (i = 0; i < RF_REG_NUM; i++)
+ rtl_set_rfreg(hw, path, backup_rf_reg[i], RFREG_OFFSET_MASK, rf_backup[i]);
+
+ switch(path){
+ case RF90_PATH_A:
+ {
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("RestoreRF Path A Success!!!!\n"));
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void _rtl8821ae_iqk_restore_afe(
+ struct ieee80211_hw *hw,
+ u32* afe_backup,
+ u32* backup_afe_reg,
+ u32 afe_num
+ )
+{
+ u32 i;
+ struct rtl_priv* rtlpriv = rtl_priv(hw);
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ //Reload AFE Parameters
+ for (i = 0; i < afe_num; i++){
+ rtl_write_dword(rtlpriv, backup_afe_reg[i], afe_backup[i]);
+ }
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+ rtl_write_dword(rtlpriv, 0xc80, 0x0);
+ rtl_write_dword(rtlpriv, 0xc84, 0x0);
+ rtl_write_dword(rtlpriv, 0xc88, 0x0);
+ rtl_write_dword(rtlpriv, 0xc8c, 0x3c000000);
+ rtl_write_dword(rtlpriv, 0xc90, 0x00000080);
+ rtl_write_dword(rtlpriv, 0xc94, 0x00000000);
+ rtl_write_dword(rtlpriv, 0xcc4, 0x20040000);
+ rtl_write_dword(rtlpriv, 0xcc8, 0x20000000);
+ rtl_write_dword(rtlpriv, 0xcb8, 0x0);
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("RestoreAFE Success!!!!\n"));
+}
+
+void _rtl8821ae_iqk_restore_macbb(
+ struct ieee80211_hw *hw,
+ u32* macbb_backup,
+ u32* backup_macbb_reg,
+ u32 macbb_num
+ )
+{
+ u32 i;
+ struct rtl_priv* rtlpriv = rtl_priv(hw);
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ //Reload MacBB Parameters
+ for (i = 0; i < macbb_num; i++){
+ rtl_write_dword(rtlpriv, backup_macbb_reg[i], macbb_backup[i]);
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("RestoreMacBB Success!!!!\n"));
+}
+
+
+#undef MACBB_REG_NUM
+#undef AFE_REG_NUM
+#undef RF_REG_NUM
+
+#define MACBB_REG_NUM 11
+#define AFE_REG_NUM 12
+#define RF_REG_NUM 3
+
+static void _rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw)
+{
+ u32 macbb_backup[MACBB_REG_NUM];
+ u32 afe_backup[AFE_REG_NUM];
+ u32 rfa_backup[RF_REG_NUM];
+ u32 rfb_backup[RF_REG_NUM];
+ u32 backup_macbb_reg[MACBB_REG_NUM] = {0xb00, 0x520, 0x550, 0x808, 0x90c, 0xc00, 0xc50,
+ 0xe00, 0xe50, 0x838, 0x82c};
+ u32 backup_afe_reg[AFE_REG_NUM] = {0xc5c, 0xc60, 0xc64, 0xc68, 0xc6c, 0xc70, 0xc74,
+ 0xc78, 0xc7c, 0xc80, 0xc84, 0xcb8};
+ u32 backup_rf_reg[RF_REG_NUM] = {0x65, 0x8f, 0x0};
+
+ _rtl8821ae_iqk_backup_macbb(hw, macbb_backup, backup_macbb_reg, MACBB_REG_NUM);
+ _rtl8821ae_iqk_backup_afe(hw, afe_backup, backup_afe_reg, AFE_REG_NUM);
+ _rtl8821ae_iqk_backup_rf(hw, rfa_backup, rfb_backup, backup_rf_reg, RF_REG_NUM);
+
+ _rtl8821ae_iqk_configure_mac(hw);
+ _rtl8821ae_iqk_tx(hw, RF90_PATH_A);
+ _rtl8821ae_iqk_restore_rf(hw, RF90_PATH_A, backup_rf_reg, rfa_backup, RF_REG_NUM);
+
+ _rtl8821ae_iqk_restore_afe(hw, afe_backup, backup_afe_reg, AFE_REG_NUM);
+ _rtl8821ae_iqk_restore_macbb(hw, macbb_backup, backup_macbb_reg, MACBB_REG_NUM);
+}
+
+static void _rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
+{
+ u8 tmpreg;
+ u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ tmpreg = rtl_read_byte(rtlpriv, 0xd03);
+
+ if ((tmpreg & 0x70) != 0)
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
+ else
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+
+ if ((tmpreg & 0x70) != 0) {
+ rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
+
+ if (is2t)
+ rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00,
+ MASK12BITS);
+
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS,
+ (rf_a_mode & 0x8FFFF) | 0x10000);
+
+ if (is2t)
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
+ (rf_b_mode & 0x8FFFF) | 0x10000);
+ }
+ lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS);
+
+ rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdfbe0);
+ /* rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, lc_cal | 0x08000); */
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, 0x8c0a);
+
+ mdelay(100);
+
+ rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdffe0);
+
+ if ((tmpreg & 0x70) != 0) {
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode);
+
+ if (is2t)
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS, rf_b_mode);
+ } else {
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+ }
+RT_TRACE(COMP_INIT,DBG_LOUD,("\n"));
+
+}
+
+static void _rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool main)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ //struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ //struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ RT_TRACE(COMP_INIT,DBG_LOUD,("\n"));
+
+ if (main)
+ rtl_set_bbreg(hw, RA_RFE_PINMUX + 4, BIT(29) | BIT(28), 0x1);
+ else
+ rtl_set_bbreg(hw, RA_RFE_PINMUX + 4, BIT(29) | BIT(28), 0x2);
+}
+
+#undef IQK_ADDA_REG_NUM
+#undef IQK_DELAY_TIME
+
+void rtl8812ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (!rtlphy->b_iqk_in_progress)
+ {
+ spin_lock(&rtlpriv->locks.iqk_lock);
+ rtlphy->b_iqk_in_progress = true;
+ spin_unlock(&rtlpriv->locks.iqk_lock);
+
+ _rtl8812ae_phy_iq_calibrate(hw, rtlphy->current_channel);
+
+ spin_lock(&rtlpriv->locks.iqk_lock);
+ rtlphy->b_iqk_in_progress = false;
+ spin_unlock(&rtlpriv->locks.iqk_lock);
+ }
+}
+
+void rtl8812ae_reset_iqk_result(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 i;
+
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("rtl8812ae_dm_reset_iqk_result:: settings regs %d default regs %d\n",
+ (int)(sizeof(rtlphy->iqk_matrix_regsetting) /
+ sizeof(struct iqk_matrix_regs)),
+ IQK_MATRIX_SETTINGS_NUM));
+
+ for(i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
+ {
+ rtlphy->iqk_matrix_regsetting[i].value[0][0] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][2] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][4] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][6] = 0x100;
+
+ rtlphy->iqk_matrix_regsetting[i].value[0][1] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][3] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][5] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][7] = 0x0;
+
+ rtlphy->iqk_matrix_regsetting[i].b_iqk_done = false;
+
+ }
+ }
+}
+
+void rtl8812ae_do_iqk(struct ieee80211_hw *hw,u8 delta_thermal_index,
+ u8 thermal_value, u8 threshold)
+{
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+
+ rtl8812ae_reset_iqk_result(hw);
+
+ rtldm->thermalvalue_iqk= thermal_value;
+ rtl8812ae_phy_iq_calibrate(hw, false);
+}
+
+void rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (!rtlphy->b_iqk_in_progress)
+ {
+ spin_lock(&rtlpriv->locks.iqk_lock);
+ rtlphy->b_iqk_in_progress = true;
+ spin_unlock(&rtlpriv->locks.iqk_lock);
+
+ _rtl8821ae_phy_iq_calibrate(hw);
+
+ spin_lock(&rtlpriv->locks.iqk_lock);
+ rtlphy->b_iqk_in_progress = false;
+ spin_unlock(&rtlpriv->locks.iqk_lock);
+ }
+}
+
+void rtl8821ae_reset_iqk_result(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 i;
+
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("rtl8812ae_dm_reset_iqk_result:: settings regs %d default regs %d\n",
+ (int)(sizeof(rtlphy->iqk_matrix_regsetting) /
+ sizeof(struct iqk_matrix_regs)),
+ IQK_MATRIX_SETTINGS_NUM));
+
+ for(i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
+ {
+ rtlphy->iqk_matrix_regsetting[i].value[0][0] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][2] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][4] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][6] = 0x100;
+
+ rtlphy->iqk_matrix_regsetting[i].value[0][1] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][3] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][5] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][7] = 0x0;
+
+ rtlphy->iqk_matrix_regsetting[i].b_iqk_done = false;
+
+ }
+ }
+}
+
+void rtl8821ae_do_iqk(struct ieee80211_hw *hw,u8 delta_thermal_index,
+ u8 thermal_value, u8 threshold)
+{
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+
+ rtl8821ae_reset_iqk_result(hw);
+
+ rtldm->thermalvalue_iqk= thermal_value;
+ rtl8821ae_phy_iq_calibrate(hw, false);
+}
+
+void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+ u32 timeout = 2000, timecount = 0;
+
+
+ while (rtlpriv->mac80211.act_scanning && timecount < timeout) {
+ udelay(50);
+ timecount += 50;
+ }
+
+ rtlphy->lck_inprogress = true;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("LCK:Start!!! currentband %x delay %d ms\n",
+ rtlhal->current_bandtype, timecount));
+
+ _rtl8821ae_phy_lc_calibrate(hw, false);
+
+ rtlphy->lck_inprogress = false;
+}
+
+void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (rtlphy->b_apk_done)
+ return;
+
+ return;
+}
+
+void rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
+{
+ _rtl8821ae_phy_set_rfpath_switch(hw, bmain);
+}
+
+bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ bool b_postprocessing = false;
+
+ RT_TRACE(COMP_CMD, DBG_TRACE,
+ ("-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress));
+ do {
+ switch (iotype) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ RT_TRACE(COMP_CMD, DBG_TRACE,
+ ("[IO CMD] Resume DM after scan.\n"));
+ b_postprocessing = true;
+ break;
+ case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
+ case IO_CMD_PAUSE_BAND1_DM_BY_SCAN:
+ RT_TRACE(COMP_CMD, DBG_TRACE,
+ ("[IO CMD] Pause DM before scan.\n"));
+ b_postprocessing = true;
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+ } while (false);
+ if (b_postprocessing && !rtlphy->set_io_inprogress) {
+ rtlphy->set_io_inprogress = true;
+ rtlphy->current_io_type = iotype;
+ } else {
+ return false;
+ }
+ rtl8821ae_phy_set_io(hw);
+ RT_TRACE(COMP_CMD, DBG_TRACE, ("IO Type(%#x)\n", iotype));
+ return true;
+}
+
+static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ RT_TRACE(COMP_CMD, DBG_TRACE,
+ ("--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress));
+ switch (rtlphy->current_io_type) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ if (rtlpriv->mac80211.opmode== NL80211_IFTYPE_ADHOC)
+ _rtl8821ae_resume_tx_beacon(hw);
+ rtl8821ae_dm_write_dig(hw, rtlphy->initgain_backup.xaagccore1);
+ rtl8821ae_dm_write_cck_cca_thres(hw, rtlphy->initgain_backup.cca);
+ break;
+ case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
+ if (rtlpriv->mac80211.opmode== NL80211_IFTYPE_ADHOC)
+ _rtl8821ae_stop_tx_beacon(hw);
+ rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue;
+ rtl8821ae_dm_write_dig(hw, 0x17);
+ rtlphy->initgain_backup.cca = dm_digtable.cur_cck_cca_thres;
+ rtl8821ae_dm_write_cck_cca_thres(hw, 0x40);
+ break;
+ case IO_CMD_PAUSE_BAND1_DM_BY_SCAN:
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+ rtlphy->set_io_inprogress = false;
+ RT_TRACE(COMP_CMD, DBG_TRACE,
+ ("(%#x)\n", rtlphy->current_io_type));
+}
+
+static void rtl8821ae_phy_set_rf_on(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+}
+
+#if 0
+static void _rtl8821ae_phy_set_rf_sleep(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+ /*rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+ u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+ while (u4b_tmp != 0 && delay > 0) {
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+ u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+ delay--;
+ }
+ if (delay == 0) {
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+ RT_TRACE(COMP_POWER, DBG_TRACE,
+ ("Switch RF timeout !!!.\n"));
+ return;
+ }*/
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
+}
+#endif
+
+static bool _rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool bresult = true;
+ u8 i, queue_id;
+ struct rtl8192_tx_ring *ring = NULL;
+
+ switch (rfpwr_state) {
+ case ERFON:{
+ if ((ppsc->rfpwr_state == ERFOFF) &&
+ RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
+ bool rtstatus = false;
+ u32 InitializeCount = 0;
+ do {
+ InitializeCount++;
+ RT_TRACE(COMP_RF, DBG_DMESG,
+ ("IPS Set eRf nic enable\n"));
+ rtstatus = rtl_ps_enable_nic(hw);
+ } while ((rtstatus != true)
+ && (InitializeCount < 10));
+ RT_CLEAR_PS_LEVEL(ppsc,
+ RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ RT_TRACE(COMP_RF, DBG_DMESG,
+ ("Set ERFON sleeped:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->
+ last_sleep_jiffies)));
+ ppsc->last_awake_jiffies = jiffies;
+ rtl8821ae_phy_set_rf_on(hw);
+ }
+ if (mac->link_state == MAC80211_LINKED) {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_LINK);
+ } else {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_NO_LINK);
+ }
+ break;
+ }
+ case ERFOFF:{
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+ ring = &pcipriv->dev.tx_ring[queue_id];
+ if (skb_queue_len(&ring->queue) == 0) {
+ queue_id++;
+ continue;
+ } else {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("eRf Off/Sleep: %d times "
+ "TcbBusyQueue[%d] =%d before "
+ "doze!\n", (i + 1), queue_id,
+ skb_queue_len(&ring->queue)));
+
+ udelay(10);
+ i++;
+ }
+ if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("\n ERFSLEEP: %d times "
+ "TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue)));
+ break;
+ }
+ }
+
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
+ RT_TRACE(COMP_RF, DBG_DMESG,
+ ("IPS Set eRf nic disable\n"));
+ rtl_ps_disable_nic(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_NO_LINK);
+ } else {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_POWER_OFF);
+ }
+ }
+ break;
+ }
+ /*case ERFSLEEP:{
+ if (ppsc->rfpwr_state == ERFOFF)
+ break;
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+ ring = &pcipriv->dev.tx_ring[queue_id];
+ if (skb_queue_len(&ring->queue) == 0) {
+ queue_id++;
+ continue;
+ } else {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("eRf Off/Sleep: %d times "
+ "TcbBusyQueue[%d] =%d before "
+ "doze!\n", (i + 1), queue_id,
+ skb_queue_len(&ring->queue)));
+
+ udelay(10);
+ i++;
+ }
+ if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("\n ERFSLEEP: %d times "
+ "TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue)));
+ break;
+ }
+ }
+ RT_TRACE(COMP_RF, DBG_DMESG,
+ ("Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies)));
+ ppsc->last_sleep_jiffies = jiffies;
+ _rtl8821ae_phy_set_rf_sleep(hw);
+ break;
+ }*/
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ bresult = false;
+ break;
+ }
+ if (bresult)
+ ppsc->rfpwr_state = rfpwr_state;
+ return bresult;
+}
+
+bool rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state)
+{
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ bool bresult = false;
+
+ if (rfpwr_state == ppsc->rfpwr_state)
+ return bresult;
+ bresult = _rtl8821ae_phy_set_rf_power_state(hw, rfpwr_state);
+ return bresult;
+}
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/phy.h b/drivers/staging/rtl8821ae/rtl8821ae/phy.h
new file mode 100644
index 000000000000..a932d8c9d45d
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/phy.h
@@ -0,0 +1,258 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_PHY_H__
+#define __RTL8821AE_PHY_H__
+
+/*It must always set to 4, otherwise read efuse table secquence will be wrong.*/
+#define MAX_TX_COUNT 4
+#define TX_1S 0
+#define TX_2S 1
+#define TX_3S 2
+#define TX_4S 3
+
+#define MAX_POWER_INDEX 0x3F
+
+#define MAX_PRECMD_CNT 16
+#define MAX_RFDEPENDCMD_CNT 16
+#define MAX_POSTCMD_CNT 16
+
+#define MAX_DOZE_WAITING_TIMES_9x 64
+
+#define RT_CANNOT_IO(hw) false
+#define HIGHPOWER_RADIOA_ARRAYLEN 22
+
+#define IQK_ADDA_REG_NUM 16
+#define IQK_BB_REG_NUM 9
+#define MAX_TOLERANCE 5
+#define IQK_DELAY_TIME 10
+#define index_mapping_NUM 15
+
+#define APK_BB_REG_NUM 5
+#define APK_AFE_REG_NUM 16
+#define APK_CURVE_REG_NUM 4
+#define PATH_NUM 2
+
+#define LOOP_LIMIT 5
+#define MAX_STALL_TIME 50
+#define AntennaDiversityValue 0x80
+#define MAX_TXPWR_IDX_NMODE_92S 63
+#define Reset_Cnt_Limit 3
+
+#define IQK_ADDA_REG_NUM 16
+#define IQK_MAC_REG_NUM 4
+
+#define RF6052_MAX_PATH 2
+
+#define CT_OFFSET_MAC_ADDR 0X16
+
+#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A
+#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60
+#define CT_OFFSET_HT402S_TX_PWR_IDX_DIFF 0x66
+#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69
+#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C
+
+#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F
+#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72
+
+#define CT_OFFSET_CHANNEL_PLAH 0x75
+#define CT_OFFSET_THERMAL_METER 0x78
+#define CT_OFFSET_RF_OPTION 0x79
+#define CT_OFFSET_VERSION 0x7E
+#define CT_OFFSET_CUSTOMER_ID 0x7F
+
+#define RTL8821AE_MAX_PATH_NUM 2
+
+#define TARGET_CHNL_NUM_2G_5G_8812 59
+
+enum swchnlcmd_id {
+ CMDID_END,
+ CMDID_SET_TXPOWEROWER_LEVEL,
+ CMDID_BBREGWRITE10,
+ CMDID_WRITEPORT_ULONG,
+ CMDID_WRITEPORT_USHORT,
+ CMDID_WRITEPORT_UCHAR,
+ CMDID_RF_WRITEREG,
+};
+
+struct swchnlcmd {
+ enum swchnlcmd_id cmdid;
+ u32 para1;
+ u32 para2;
+ u32 msdelay;
+};
+
+enum hw90_block_e {
+ HW90_BLOCK_MAC = 0,
+ HW90_BLOCK_PHY0 = 1,
+ HW90_BLOCK_PHY1 = 2,
+ HW90_BLOCK_RF = 3,
+ HW90_BLOCK_MAXIMUM = 4,
+};
+
+enum baseband_config_type {
+ BASEBAND_CONFIG_PHY_REG = 0,
+ BASEBAND_CONFIG_AGC_TAB = 1,
+};
+
+enum ra_offset_area {
+ RA_OFFSET_LEGACY_OFDM1,
+ RA_OFFSET_LEGACY_OFDM2,
+ RA_OFFSET_HT_OFDM1,
+ RA_OFFSET_HT_OFDM2,
+ RA_OFFSET_HT_OFDM3,
+ RA_OFFSET_HT_OFDM4,
+ RA_OFFSET_HT_CCK,
+};
+
+enum antenna_path {
+ ANTENNA_NONE,
+ ANTENNA_D,
+ ANTENNA_C,
+ ANTENNA_CD,
+ ANTENNA_B,
+ ANTENNA_BD,
+ ANTENNA_BC,
+ ANTENNA_BCD,
+ ANTENNA_A,
+ ANTENNA_AD,
+ ANTENNA_AC,
+ ANTENNA_ACD,
+ ANTENNA_AB,
+ ANTENNA_ABD,
+ ANTENNA_ABC,
+ ANTENNA_ABCD
+};
+
+struct r_antenna_select_ofdm {
+ u32 r_tx_antenna:4;
+ u32 r_ant_l:4;
+ u32 r_ant_non_ht:4;
+ u32 r_ant_ht1:4;
+ u32 r_ant_ht2:4;
+ u32 r_ant_ht_s1:4;
+ u32 r_ant_non_ht_s1:4;
+ u32 ofdm_txsc:2;
+ u32 reserved:2;
+};
+
+struct r_antenna_select_cck {
+ u8 r_cckrx_enable_2:2;
+ u8 r_cckrx_enable:2;
+ u8 r_ccktx_enable:4;
+};
+
+
+struct efuse_contents {
+ u8 mac_addr[ETH_ALEN];
+ u8 cck_tx_power_idx[6];
+ u8 ht40_1s_tx_power_idx[6];
+ u8 ht40_2s_tx_power_idx_diff[3];
+ u8 ht20_tx_power_idx_diff[3];
+ u8 ofdm_tx_power_idx_diff[3];
+ u8 ht40_max_power_offset[3];
+ u8 ht20_max_power_offset[3];
+ u8 channel_plan;
+ u8 thermal_meter;
+ u8 rf_option[5];
+ u8 version;
+ u8 oem_id;
+ u8 regulatory;
+};
+
+struct tx_power_struct {
+ u8 cck[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht40_1s[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht40_2s[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht20_diff[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 legacy_ht_diff[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 legacy_ht_txpowerdiff;
+ u8 groupht20[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 groupht40[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 pwrgroup_cnt;
+ u32 mcs_original_offset[4][16];
+};
+enum _ANT_DIV_TYPE
+{
+ NO_ANTDIV = 0xFF,
+ CG_TRX_HW_ANTDIV = 0x01,
+ CGCS_RX_HW_ANTDIV = 0x02,
+ FIXED_HW_ANTDIV = 0x03,
+ CG_TRX_SMART_ANTDIV = 0x04,
+ CGCS_RX_SW_ANTDIV = 0x05,
+
+};
+
+extern u32 rtl8821ae_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+extern void rtl8821ae_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+extern u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask);
+extern void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask, u32 data);
+extern bool rtl8821ae_phy_mac_config(struct ieee80211_hw *hw);
+extern bool rtl8821ae_phy_bb_config(struct ieee80211_hw *hw);
+extern bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw);
+extern void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band);
+extern void rtl8821ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+extern void rtl8821ae_phy_get_txpower_level(struct ieee80211_hw *hw,
+ long *powerlevel);
+extern void rtl8821ae_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+extern void rtl8821ae_phy_scan_operation_backup(struct ieee80211_hw *hw,
+ u8 operation);
+extern void rtl8821ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+extern void rtl8821ae_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+extern void rtl8821ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+extern u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw);
+extern void rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
+extern void rtl8812ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
+void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
+bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
+extern bool rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
+u8 _rtl8812ae_get_right_chnl_place_for_iqk(u8 chnl);
+void rtl8821ae_phy_set_txpower_level_by_path(struct ieee80211_hw *hw, u8 channel, u8 path);
+void rtl8812ae_do_iqk(struct ieee80211_hw *hw,u8 delta_thermal_index,
+ u8 thermal_value, u8 threshold);
+void rtl8821ae_do_iqk(struct ieee80211_hw *hw,u8 delta_thermal_index,
+ u8 thermal_value, u8 threshold);
+void rtl8821ae_reset_iqk_result(struct ieee80211_hw *hw);
+
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.c b/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.c
new file mode 100644
index 000000000000..a2e4a01b712b
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.c
@@ -0,0 +1,199 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "pwrseqcmd.h"
+#include "pwrseq.h"
+
+/*
+ drivers should parse below arrays and do the corresponding actions
+*/
+//3 Power on Array
+struct wlan_pwr_cfg rtl8812_power_on_flow[RTL8812_TRANS_CARDEMU_TO_ACT_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+ RTL8812_TRANS_CARDEMU_TO_ACT
+ RTL8812_TRANS_END
+};
+
+//3Radio off GPIO Array
+struct wlan_pwr_cfg rtl8812_radio_off_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+ RTL8812_TRANS_ACT_TO_CARDEMU
+ RTL8812_TRANS_END
+};
+
+//3Card Disable Array
+struct wlan_pwr_cfg rtl8812_card_disable_flow[ RTL8812_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8812_TRANS_CARDEMU_TO_PDN_STEPS
+ + RTL8812_TRANS_END_STEPS ] =
+{
+ RTL8812_TRANS_ACT_TO_CARDEMU
+ RTL8812_TRANS_CARDEMU_TO_CARDDIS
+ RTL8812_TRANS_END
+};
+
+//3 Card Enable Array
+struct wlan_pwr_cfg rtl8812_card_enable_flow[ RTL8812_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8812_TRANS_CARDEMU_TO_PDN_STEPS
+ + RTL8812_TRANS_END_STEPS ] =
+{
+ RTL8812_TRANS_CARDDIS_TO_CARDEMU
+ RTL8812_TRANS_CARDEMU_TO_ACT
+ RTL8812_TRANS_END
+};
+
+//3Suspend Array
+struct wlan_pwr_cfg rtl8812_suspend_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_SUS_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+ RTL8812_TRANS_ACT_TO_CARDEMU
+ RTL8812_TRANS_CARDEMU_TO_SUS
+ RTL8812_TRANS_END
+};
+
+//3 Resume Array
+struct wlan_pwr_cfg rtl8812_resume_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_SUS_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+ RTL8812_TRANS_SUS_TO_CARDEMU
+ RTL8812_TRANS_CARDEMU_TO_ACT
+ RTL8812_TRANS_END
+};
+
+
+
+//3HWPDN Array
+struct wlan_pwr_cfg rtl8812_hwpdn_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_PDN_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+ RTL8812_TRANS_ACT_TO_CARDEMU
+ RTL8812_TRANS_CARDEMU_TO_PDN
+ RTL8812_TRANS_END
+};
+
+//3 Enter LPS
+struct wlan_pwr_cfg rtl8812_enter_lps_flow[RTL8812_TRANS_ACT_TO_LPS_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+ //FW behavior
+ RTL8812_TRANS_ACT_TO_LPS
+ RTL8812_TRANS_END
+};
+
+//3 Leave LPS
+struct wlan_pwr_cfg rtl8812_leave_lps_flow[RTL8812_TRANS_LPS_TO_ACT_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+ //FW behavior
+ RTL8812_TRANS_LPS_TO_ACT
+ RTL8812_TRANS_END
+};
+
+
+/*
+ drivers should parse below arrays and do the corresponding actions
+*/
+/*3 Power on Array*/
+struct wlan_pwr_cfg rtl8821A_power_on_flow[RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS
+ + RTL8821A_TRANS_END_STEPS] =
+{
+ RTL8821A_TRANS_CARDEMU_TO_ACT
+ RTL8821A_TRANS_END
+};
+
+/*3Radio off GPIO Array */
+struct wlan_pwr_cfg rtl8821A_radio_off_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_END_STEPS] =
+{
+ RTL8821A_TRANS_ACT_TO_CARDEMU
+ RTL8821A_TRANS_END
+};
+
+/*3Card Disable Array*/
+struct wlan_pwr_cfg rtl8821A_card_disable_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS
+ + RTL8821A_TRANS_END_STEPS] =
+{
+ RTL8821A_TRANS_ACT_TO_CARDEMU
+ RTL8821A_TRANS_CARDEMU_TO_CARDDIS
+ RTL8821A_TRANS_END
+};
+
+/*3 Card Enable Array*/
+struct wlan_pwr_cfg rtl8821A_card_enable_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS /*RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS*/
+ + RTL8821A_TRANS_END_STEPS] =
+{
+ RTL8821A_TRANS_CARDDIS_TO_CARDEMU
+ RTL8821A_TRANS_CARDEMU_TO_ACT
+ RTL8821A_TRANS_END
+};
+
+/*3Suspend Array*/
+struct wlan_pwr_cfg rtl8821A_suspend_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS
+ + RTL8821A_TRANS_END_STEPS] =
+{
+ RTL8821A_TRANS_ACT_TO_CARDEMU
+ RTL8821A_TRANS_CARDEMU_TO_SUS
+ RTL8821A_TRANS_END
+};
+
+/*3 Resume Array*/
+struct wlan_pwr_cfg rtl8821A_resume_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS
+ + RTL8821A_TRANS_END_STEPS] =
+{
+ RTL8821A_TRANS_SUS_TO_CARDEMU
+ RTL8821A_TRANS_CARDEMU_TO_ACT
+ RTL8821A_TRANS_END
+};
+
+/*3HWPDN Array*/
+struct wlan_pwr_cfg rtl8821A_hwpdn_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS
+ + RTL8821A_TRANS_END_STEPS] =
+{
+ RTL8821A_TRANS_ACT_TO_CARDEMU
+ RTL8821A_TRANS_CARDEMU_TO_PDN
+ RTL8821A_TRANS_END
+};
+
+/*3 Enter LPS */
+struct wlan_pwr_cfg rtl8821A_enter_lps_flow[RTL8821A_TRANS_ACT_TO_LPS_STEPS
+ + RTL8821A_TRANS_END_STEPS] =
+{
+ /*FW behavior*/
+ RTL8821A_TRANS_ACT_TO_LPS
+ RTL8821A_TRANS_END
+};
+
+/*3 Leave LPS */
+struct wlan_pwr_cfg rtl8821A_leave_lps_flow[RTL8821A_TRANS_LPS_TO_ACT_STEPS
+ + RTL8821A_TRANS_END_STEPS] =
+{
+ /*FW behavior*/
+ RTL8821A_TRANS_LPS_TO_ACT
+ RTL8821A_TRANS_END
+};
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.h b/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.h
new file mode 100644
index 000000000000..8b39c042fa93
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.h
@@ -0,0 +1,413 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_PWRSEQ_H__
+#define __RTL8821AE_PWRSEQ_H__
+
+#include "pwrseqcmd.h"
+#include "../btcoexist/halbt_precomp.h"
+
+#define RTL8812_TRANS_CARDEMU_TO_ACT_STEPS 15
+#define RTL8812_TRANS_ACT_TO_CARDEMU_STEPS 15
+#define RTL8812_TRANS_CARDEMU_TO_SUS_STEPS 15
+#define RTL8812_TRANS_SUS_TO_CARDEMU_STEPS 15
+#define RTL8812_TRANS_CARDEMU_TO_PDN_STEPS 25
+#define RTL8812_TRANS_PDN_TO_CARDEMU_STEPS 15
+#define RTL8812_TRANS_ACT_TO_LPS_STEPS 15
+#define RTL8812_TRANS_LPS_TO_ACT_STEPS 15
+#define RTL8812_TRANS_END_STEPS 1
+
+
+#define RTL8812_TRANS_CARDEMU_TO_ACT \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, 0},/* disable SW LPS 0x04[10]=0*/ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT1, BIT1},/* wait till 0x04[17] = 1 power ready*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},/* disable HWPDN 0x04[15]=0*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, 0},/* disable WL suspend*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/* polling until return 0*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT0, 0},/**/
+
+#define RTL8812_TRANS_ACT_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0c00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x04}, /* 0xc00[7:0] = 4 turn off 3-wire */ \
+ {0x0e00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x04}, /* 0xe00[7:0] = 4 turn off 3-wire */ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0}, /* 0x2[0] = 0 RESET BB, CLOSE RF */ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0}, /* Whole BB is reset*/ \
+ /*{0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},//0x1F[7:0] = 0 turn off RF*/ \
+ /*{0x004E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},//0x4C[23] = 0x4E[7] = 0, switch DPDT_SEL_P output from register 0x65[2] */ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x2A}, /* 0x07[7:0] = 0x28 sps pwm mode 0x2a for BT coex*/ \
+ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x02, 0},/*0x8[1] = 0 ANA clk =500k */ \
+ /*{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0|BIT1, 0}, // 0x02[1:0] = 0 reset BB */ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1}, /*0x04[9] = 1 turn off MAC by HW state machine*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT1, 0}, /*wait till 0x04[9] = 0 polling until return 0 to disable*/
+
+#define RTL8812_TRANS_CARDEMU_TO_SUS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xF0, 0xcc},\
+ {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xF0, 0xEC},\
+ {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x07},/* gpio11 input mode, gpio10~8 output mode */ \
+ {0x0045, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio 0~7 output same value as input ?? */ \
+ {0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xff},/* gpio0~7 output mode */ \
+ {0x0047, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/* 0x47[7:0] = 00 gpio mode */ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/* suspend option all off */ \
+ {0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, BIT7},/*0x14[7] = 1 turn on ZCD */ \
+ {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, BIT0},/* 0x15[0] =1 trun on ZCD */ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, BIT4},/*0x23[4] = 1 hpon LDO sleep mode */ \
+ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x02, 0},/*0x8[1] = 0 ANA clk =500k */ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, BIT3}, /*0x04[11] = 2b'11 enable WL suspend for PCIe*/
+
+#define RTL8812_TRANS_SUS_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, 0}, /*0x04[11] = 2b'01enable WL suspend*/ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, 0},/*0x23[4] = 0 hpon LDO sleep mode leave */ \
+ {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, 0},/* 0x15[0] =0 trun off ZCD */ \
+ {0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, 0},/*0x14[7] = 0 turn off ZCD */ \
+ {0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio0~7 input mode */ \
+ {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio11 input mode, gpio10~8 input mode */
+
+#define RTL8812_TRANS_CARDEMU_TO_CARDDIS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ /**{0x0194, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0}, //0x194[0]=0 , disable 32K clock*/ \
+ /**{0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x94}, //0x93=0x94 , 90[30] =0 enable 500k ANA clock .switch clock from 12M to 500K , 90 [26] =0 disable EEprom loader clock*/ \
+ {0x0003, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, 0}, /*0x03[2] = 0, reset 8051*/ \
+ {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x05}, /*0x80=05h if reload fw, fill the default value of host_CPU handshake field*/ \
+ {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xF0, 0xcc},\
+ {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xF0, 0xEC},\
+ {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x07},/* gpio11 input mode, gpio10~8 output mode */ \
+ {0x0045, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio 0~7 output same value as input ?? */ \
+ {0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xff},/* gpio0~7 output mode */ \
+ {0x0047, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/* 0x47[7:0] = 00 gpio mode */ \
+ {0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, BIT7},/*0x14[7] = 1 turn on ZCD */ \
+ {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, BIT0},/* 0x15[0] =1 trun on ZCD */ \
+ {0x0012, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, 0},/*0x12[0] = 0 force PFM mode */ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, BIT4},/*0x23[4] = 1 hpon LDO sleep mode */ \
+ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x02, 0},/*0x8[1] = 0 ANA clk =500k */ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07=0x20 , SOP option to disable BG/MB*/ \
+ {0x001f, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0}, /*0x01f[1]=0 , disable RFC_0 control REG_RF_CTRL_8812 */ \
+ {0x0076, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0}, /*0x076[1]=0 , disable RFC_1 control REG_OPT_CTRL_8812 +2 */ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, BIT3}, /*0x04[11] = 2b'01 enable WL suspend*/
+
+#define RTL8812_TRANS_CARDDIS_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0012, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/*0x12[0] = 1 force PWM mode */ \
+ {0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, 0},/*0x14[7] = 0 turn off ZCD */ \
+ {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, 0},/* 0x15[0] =0 trun off ZCD */ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, 0},/*0x23[4] = 0 hpon LDO leave sleep mode */ \
+ {0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio0~7 input mode */ \
+ {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio11 input mode, gpio10~8 input mode */ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, 0}, /*0x04[10] = 0, enable SW LPS PCIE only*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, 0}, /*0x04[11] = 2b'01enable WL suspend*/ \
+ {0x0003, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, BIT2}, /*0x03[2] = 1, enable 8051*/ \
+ {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/*PCIe DMA start*/
+
+
+#define RTL8812_TRANS_CARDEMU_TO_PDN \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, BIT7},/* 0x04[15] = 1*/
+
+#define RTL8812_TRANS_PDN_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},/* 0x04[15] = 0*/
+
+#define RTL8812_TRANS_ACT_TO_LPS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF},/*PCIe DMA stop*/ \
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x7F},/*Tx Pause*/ \
+ {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x0c00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x04}, /* 0xc00[7:0] = 4 turn off 3-wire */ \
+ {0x0e00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x04}, /* 0xe00[7:0] = 4 turn off 3-wire */ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0},/*CCK and OFDM are disabled,and clock are gated,and RF closed*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0}, /* Whole BB is reset*/ \
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x03},/*Reset MAC TRX*/ \
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*check if removed later*/ \
+ {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT5, BIT5},/*Respond TxOK to scheduler*/
+
+
+#define RTL8812_TRANS_LPS_TO_ACT \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/ \
+ {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/ \
+ {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/ \
+ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*. 0x08[4] = 0 switch TSF to 40M*/ \
+ {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT7, 0}, /*Polling 0x109[7]=0 TSF in 40M*/ \
+ {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT6|BIT7, 0}, /*. 0x29[7:6] = 2b'00 enable BB clock*/ \
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1}, /*. 0x101[1] = 1*/ \
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF}, /*. 0x100[7:0] = 0xFF enable WMAC TRX*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1|BIT0, BIT1|BIT0}, /*. 0x02[1:0] = 2b'11 enable BB macro*/ \
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/
+
+#define RTL8812_TRANS_END \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,0,PWR_CMD_END, 0, 0}, //
+
+
+extern struct wlan_pwr_cfg rtl8812_power_on_flow[RTL8812_TRANS_CARDEMU_TO_ACT_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8812_radio_off_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8812_card_disable_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_PDN_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8812_card_enable_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_PDN_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8812_suspend_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_SUS_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8812_resume_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_SUS_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8812_hwpdn_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_PDN_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8812_enter_lps_flow[RTL8812_TRANS_ACT_TO_LPS_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8812_leave_lps_flow[RTL8812_TRANS_LPS_TO_ACT_STEPS+RTL8812_TRANS_END_STEPS];
+
+/*
+ Check document WM-20130516-JackieLau-RTL8821A_Power_Architecture-R10.vsd
+ There are 6 HW Power States:
+ 0: POFF--Power Off
+ 1: PDN--Power Down
+ 2: CARDEMU--Card Emulation
+ 3: ACT--Active Mode
+ 4: LPS--Low Power State
+ 5: SUS--Suspend
+
+ The transision from different states are defined below
+ TRANS_CARDEMU_TO_ACT
+ TRANS_ACT_TO_CARDEMU
+ TRANS_CARDEMU_TO_SUS
+ TRANS_SUS_TO_CARDEMU
+ TRANS_CARDEMU_TO_PDN
+ TRANS_ACT_TO_LPS
+ TRANS_LPS_TO_ACT
+
+ TRANS_END
+*/
+#define RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS 25
+#define RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS 15
+#define RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS 15
+#define RTL8821A_TRANS_SUS_TO_CARDEMU_STEPS 15
+#define RTL8821A_TRANS_CARDDIS_TO_CARDEMU_STEPS 15
+#define RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS 15
+#define RTL8821A_TRANS_PDN_TO_CARDEMU_STEPS 15
+#define RTL8821A_TRANS_ACT_TO_LPS_STEPS 15
+#define RTL8821A_TRANS_LPS_TO_ACT_STEPS 15
+#define RTL8821A_TRANS_END_STEPS 1
+
+
+#define RTL8821A_TRANS_CARDEMU_TO_ACT \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0}, /*0x20[0] = 1b'1 enable LDOA12 MACRO block for all interface*/ \
+ {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*0x67[0] = 0 to disable BT_GPS_SEL pins*/ \
+ {0x0001, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 1, PWRSEQ_DELAY_MS},/*Delay 1ms*/ \
+ {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT5, 0}, /*0x00[5] = 1b'0 release analog Ips to digital ,1:isolation*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, (BIT4|BIT3|BIT2), 0},/* disable SW LPS 0x04[10]=0 and WLSUS_EN 0x04[12:11]=0*/ \
+ {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0 , BIT0},/* Disable USB suspend */ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT1, BIT1},/* wait till 0x04[17] = 1 power ready*/ \
+ {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0 , 0},/* Enable USB suspend */ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/* release WLON reset 0x04[16]=1*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},/* disable HWPDN 0x04[15]=0*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, (BIT4|BIT3), 0},/* disable WL suspend*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/* polling until return 0*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT0, 0},/**/ \
+ {0x004F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/*0x4C[24] = 0x4F[0] = 1, switch DPDT_SEL_P output from WL BB */\
+ {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, (BIT5|BIT4), (BIT5|BIT4)},/*0x66[13] = 0x67[5] = 1, switch for PAPE_G/PAPE_A from WL BB ; 0x66[12] = 0x67[4] = 1, switch LNAON from WL BB */\
+ {0x0025, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT6, 0},/*anapar_mac<118> , 0x25[6]=0 by wlan single function*/\
+ {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1},/*Enable falling edge triggering interrupt*/\
+ {0x0063, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1},/*Enable GPIO9 interrupt mode*/\
+ {0x0062, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*Enable GPIO9 input mode*/\
+ {0x0058, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/*Enable HSISR GPIO[C:0] interrupt*/\
+ {0x005A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1},/*Enable HSISR GPIO9 interrupt*/\
+ {0x007A, PWR_CUT_TESTCHIP_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x3A},/*0x7A = 0x3A start BT*/\
+ {0x002E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF , 0x82 },/* 0x2C[23:12]=0x820 ; XTAL trim */ \
+ {0x0010, PWR_CUT_A_MSK , PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT6 , BIT6 },/* 0x10[6]=1 ; MP·s¼W¹ï©ó0x2Cªº±±¨îÅv¡A¶·§â0x10[6]³]¬°1¤~¯àÅýWLAN±±¨î */ \
+
+
+#define RTL8821A_TRANS_ACT_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/*0x1F[7:0] = 0 turn off RF*/ \
+ {0x004F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0},/*0x4C[24] = 0x4F[0] = 0, switch DPDT_SEL_P output from register 0x65[2] */\
+ {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*Enable rising edge triggering interrupt*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1}, /*0x04[9] = 1 turn off MAC by HW state machine*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT1, 0}, /*wait till 0x04[9] = 0 polling until return 0 to disable*/ \
+ {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT5, BIT5}, /*0x00[5] = 1b'1 analog Ips to digital ,1:isolation*/ \
+ {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0}, /*0x20[0] = 1b'0 disable LDOA12 MACRO block*/ \
+
+
+#define RTL8821A_TRANS_CARDEMU_TO_SUS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4|BIT3, (BIT4|BIT3)}, /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07[7:0] = 0x20 SDIO SOP option to disable BG/MB/ACK/SWR*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, BIT3|BIT4}, /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/
+
+#define RTL8821A_TRANS_SUS_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3 | BIT7, 0}, /*clear suspend enable and power down enable*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, BIT0, 0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_POLLING, BIT1, BIT1}, /*wait power state to suspend*/\
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*0x23[4] = 1b'0 12H LDO enter normal mode*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, 0}, /*0x04[12:11] = 2b'01enable WL suspend*/
+
+#define RTL8821A_TRANS_CARDEMU_TO_CARDDIS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07=0x20 , SOP option to disable BG/MB*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, BIT2}, /*0x04[10] = 1, enable SW LPS*/ \
+ {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 1}, /*0x48[16] = 1 to enable GPIO9 as EXT WAKEUP*/ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/
+
+#define RTL8821A_TRANS_CARDDIS_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3 | BIT7, 0}, /*clear suspend enable and power down enable*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, BIT0, 0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_POLLING, BIT1, BIT1}, /*wait power state to suspend*/\
+ {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0}, /*0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, 0}, /*0x04[12:11] = 2b'01enable WL suspend*/\
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*0x23[4] = 1b'0 12H LDO enter normal mode*/ \
+ {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/*PCIe DMA start*/
+
+
+#define RTL8821A_TRANS_CARDEMU_TO_PDN \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK|PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07[7:0] = 0x20 SOP option to disable BG/MB/ACK/SWR*/ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0},/* 0x04[16] = 0*/\
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, BIT7},/* 0x04[15] = 1*/
+
+#define RTL8821A_TRANS_PDN_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},/* 0x04[15] = 0*/
+
+#define RTL8821A_TRANS_ACT_TO_LPS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF},/*PCIe DMA stop*/ \
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF},/*Tx Pause*/ \
+ {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0},/*CCK and OFDM are disabled,and clock are gated*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*Whole BB is reset*/ \
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x03},/*Reset MAC TRX*/ \
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*check if removed later*/ \
+ {0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/*When driver enter Sus/ Disable, enable LOP for BT*/ \
+ {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT5, BIT5},/*Respond TxOK to scheduler*/ \
+
+
+#define RTL8821A_TRANS_LPS_TO_ACT \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/\
+ {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/\
+ {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/\
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/\
+ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*. 0x08[4] = 0 switch TSF to 40M*/\
+ {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT7, 0}, /*Polling 0x109[7]=0 TSF in 40M*/\
+ {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT6|BIT7, 0}, /*. 0x29[7:6] = 2b'00 enable BB clock*/\
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1}, /*. 0x101[1] = 1*/\
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF}, /*. 0x100[7:0] = 0xFF enable WMAC TRX*/\
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1|BIT0, BIT1|BIT0}, /*. 0x02[1:0] = 2b'11 enable BB macro*/\
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/
+
+#define RTL8821A_TRANS_END \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,0,PWR_CMD_END, 0, 0}, //
+
+extern struct wlan_pwr_cfg rtl8821A_power_on_flow[RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS
+ + RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_radio_off_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_card_disable_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS
+ + RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_card_enable_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS/*RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS*/
+ + RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_suspend_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS
+ + RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_resume_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS
+ + RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_hwpdn_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS
+ + RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_enter_lps_flow[RTL8821A_TRANS_ACT_TO_LPS_STEPS
+ + RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_leave_lps_flow[RTL8821A_TRANS_LPS_TO_ACT_STEPS
+ + RTL8821A_TRANS_END_STEPS];
+
+/*RTL8812 Power Configuration CMDs for PCIe interface*/
+#define RTL8812_NIC_PWR_ON_FLOW rtl8812_power_on_flow
+#define RTL8812_NIC_RF_OFF_FLOW rtl8812_radio_off_flow
+#define RTL8812_NIC_DISABLE_FLOW rtl8812_card_disable_flow
+#define RTL8812_NIC_ENABLE_FLOW rtl8812_card_enable_flow
+#define RTL8812_NIC_SUSPEND_FLOW rtl8812_suspend_flow
+#define RTL8812_NIC_RESUME_FLOW rtl8812_resume_flow
+#define RTL8812_NIC_PDN_FLOW rtl8812_hwpdn_flow
+#define RTL8812_NIC_LPS_ENTER_FLOW rtl8812_enter_lps_flow
+#define RTL8812_NIC_LPS_LEAVE_FLOW rtl8812_leave_lps_flow
+
+/* RTL8821 Power Configuration CMDs for PCIe interface */
+#define RTL8821A_NIC_PWR_ON_FLOW rtl8821A_power_on_flow
+#define RTL8821A_NIC_RF_OFF_FLOW rtl8821A_radio_off_flow
+#define RTL8821A_NIC_DISABLE_FLOW rtl8821A_card_disable_flow
+#define RTL8821A_NIC_ENABLE_FLOW rtl8821A_card_enable_flow
+#define RTL8821A_NIC_SUSPEND_FLOW rtl8821A_suspend_flow
+#define RTL8821A_NIC_RESUME_FLOW rtl8821A_resume_flow
+#define RTL8821A_NIC_PDN_FLOW rtl8821A_hwpdn_flow
+#define RTL8821A_NIC_LPS_ENTER_FLOW rtl8821A_enter_lps_flow
+#define RTL8821A_NIC_LPS_LEAVE_FLOW rtl8821A_leave_lps_flow
+
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.c b/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.c
new file mode 100644
index 000000000000..710bc015251c
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.c
@@ -0,0 +1,140 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "pwrseq.h"
+
+
+/*
+* Description:
+* This routine deal with the Power Configuration CMDs
+* parsing for RTL8723/RTL8188E Series IC.
+* Assumption:
+* We should follow specific format which was released from HW SD.
+*
+* 2011.07.07, added by Roger.
+*/
+bool rtl_hal_pwrseqcmdparsing (struct rtl_priv* rtlpriv, u8 cut_version,
+ u8 fab_version, u8 interface_type,
+ struct wlan_pwr_cfg pwrcfgcmd[])
+
+{
+ struct wlan_pwr_cfg pwr_cfg_cmd = {0};
+ bool polling_bit = false;
+ u32 ary_idx=0;
+ u8 value = 0;
+ u32 offset = 0;
+ u32 polling_count = 0;
+ u32 max_polling_cnt = 5000;
+
+ do {
+ pwr_cfg_cmd = pwrcfgcmd[ary_idx];
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), fab_msk(%#x),"
+ "interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n",
+ GET_PWR_CFG_OFFSET(pwr_cfg_cmd), GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd),
+ GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd), GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd),
+ GET_PWR_CFG_BASE(pwr_cfg_cmd), GET_PWR_CFG_CMD(pwr_cfg_cmd),
+ GET_PWR_CFG_MASK(pwr_cfg_cmd), GET_PWR_CFG_VALUE(pwr_cfg_cmd)));
+
+ if ((GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd)&fab_version) &&
+ (GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd)&cut_version) &&
+ (GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd)&interface_type)) {
+ switch (GET_PWR_CFG_CMD(pwr_cfg_cmd)) {
+ case PWR_CMD_READ:
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n"));
+ break;
+
+ case PWR_CMD_WRITE: {
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n"));
+ offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd);
+
+ /*Read the value from system register*/
+ value = rtl_read_byte(rtlpriv, offset);
+ value = value & (~(GET_PWR_CFG_MASK(pwr_cfg_cmd)));
+ value = value | (GET_PWR_CFG_VALUE(pwr_cfg_cmd)
+ & GET_PWR_CFG_MASK(pwr_cfg_cmd));
+
+ /*Write the value back to sytem register*/
+ rtl_write_byte(rtlpriv, offset, value);
+ }
+ break;
+
+ case PWR_CMD_POLLING:
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n"));
+ polling_bit = false;
+ offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd);
+
+ do {
+ value = rtl_read_byte(rtlpriv, offset);
+
+ value = value & GET_PWR_CFG_MASK(pwr_cfg_cmd);
+ if (value == (GET_PWR_CFG_VALUE(pwr_cfg_cmd)
+ & GET_PWR_CFG_MASK(pwr_cfg_cmd)))
+ polling_bit=true;
+ else
+ udelay(10);
+
+ if (polling_count++ > max_polling_cnt) {
+ return false;
+ }
+ } while (!polling_bit);
+
+ break;
+
+ case PWR_CMD_DELAY:
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n"));
+ if (GET_PWR_CFG_VALUE(pwr_cfg_cmd) == PWRSEQ_DELAY_US)
+ udelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd));
+ else
+ mdelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd));
+ break;
+
+ case PWR_CMD_END:
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n"));
+ return true;
+ break;
+
+ default:
+ RT_ASSERT(false,
+ ("rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n"));
+ break;
+ }
+
+ }
+
+ ary_idx++;
+ } while (1);
+
+ return true;
+}
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.h b/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.h
new file mode 100644
index 000000000000..571e7e50d5b5
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.h
@@ -0,0 +1,71 @@
+#ifndef __RTL8821AE_PWRSEQCMD_H__
+#define __RTL8821AE_PWRSEQCMD_H__
+
+#include "../wifi.h"
+/*---------------------------------------------*/
+/*The value of cmd: 4 bits */
+/*---------------------------------------------*/
+#define PWR_CMD_READ 0x00
+#define PWR_CMD_WRITE 0x01
+#define PWR_CMD_POLLING 0x02
+#define PWR_CMD_DELAY 0x03
+#define PWR_CMD_END 0x04
+
+/* define the base address of each block */
+#define PWR_BASEADDR_MAC 0x00
+#define PWR_BASEADDR_USB 0x01
+#define PWR_BASEADDR_PCIE 0x02
+#define PWR_BASEADDR_SDIO 0x03
+
+#define PWR_INTF_SDIO_MSK BIT(0)
+#define PWR_INTF_USB_MSK BIT(1)
+#define PWR_INTF_PCI_MSK BIT(2)
+#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+#define PWR_FAB_TSMC_MSK BIT(0)
+#define PWR_FAB_UMC_MSK BIT(1)
+#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+#define PWR_CUT_TESTCHIP_MSK BIT(0)
+#define PWR_CUT_A_MSK BIT(1)
+#define PWR_CUT_B_MSK BIT(2)
+#define PWR_CUT_C_MSK BIT(3)
+#define PWR_CUT_D_MSK BIT(4)
+#define PWR_CUT_E_MSK BIT(5)
+#define PWR_CUT_F_MSK BIT(6)
+#define PWR_CUT_G_MSK BIT(7)
+#define PWR_CUT_ALL_MSK 0xFF
+
+
+enum pwrseq_delay_unit {
+ PWRSEQ_DELAY_US,
+ PWRSEQ_DELAY_MS,
+};
+
+struct wlan_pwr_cfg {
+ u16 offset;
+ u8 cut_msk;
+ u8 fab_msk:4;
+ u8 interface_msk:4;
+ u8 base:4;
+ u8 cmd:4;
+ u8 msk;
+ u8 value;
+
+};
+
+#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset
+#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) __PWR_CMD.cut_msk
+#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) __PWR_CMD.fab_msk
+#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) __PWR_CMD.interface_msk
+#define GET_PWR_CFG_BASE(__PWR_CMD) __PWR_CMD.base
+#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd
+#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk
+#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value
+
+bool rtl_hal_pwrseqcmdparsing(struct rtl_priv * rtlpriv, u8 cut_version,
+ u8 fab_version, u8 interface_type,
+ struct wlan_pwr_cfg pwrcfgcmd[]);
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/reg.h b/drivers/staging/rtl8821ae/rtl8821ae/reg.h
new file mode 100644
index 000000000000..09c5f00d2603
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/reg.h
@@ -0,0 +1,2427 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_REG_H__
+#define __RTL8821AE_REG_H__
+
+#define TXPKT_BUF_SELECT 0x69
+#define RXPKT_BUF_SELECT 0xA5
+#define DISABLE_TRXPKT_BUF_ACCESS 0x0
+
+#define REG_SYS_ISO_CTRL 0x0000
+#define REG_SYS_FUNC_EN 0x0002
+#define REG_APS_FSMCO 0x0004
+#define REG_SYS_CLKR 0x0008
+#define REG_9346CR 0x000A
+#define REG_EE_VPD 0x000C
+#define REG_AFE_MISC 0x0010
+#define REG_SPS0_CTRL 0x0011
+#define REG_SPS_OCP_CFG 0x0018
+#define REG_RSV_CTRL 0x001C
+#define REG_RF_CTRL 0x001F
+#define REG_LDOA15_CTRL 0x0020
+#define REG_LDOV12D_CTRL 0x0021
+#define REG_LDOHCI12_CTRL 0x0022
+#define REG_LPLDO_CTRL 0x0023
+#define REG_AFE_XTAL_CTRL 0x0024
+#define REG_AFE_LDO_CTRL 0x0027 /* 1.5v for 8188EE test chip, 1.4v for MP chip */
+#define REG_AFE_PLL_CTRL 0x0028
+#define REG_MAC_PHY_CTRL 0x002c
+#define REG_EFUSE_CTRL 0x0030
+#define REG_EFUSE_TEST 0x0034
+#define REG_PWR_DATA 0x0038
+#define REG_CAL_TIMER 0x003C
+#define REG_ACLK_MON 0x003E
+#define REG_GPIO_MUXCFG 0x0040
+#define REG_GPIO_IO_SEL 0x0042
+#define REG_MAC_PINMUX_CFG 0x0043
+#define REG_GPIO_PIN_CTRL 0x0044
+#define REG_GPIO_INTM 0x0048
+#define REG_LEDCFG0 0x004C
+#define REG_LEDCFG1 0x004D
+#define REG_LEDCFG2 0x004E
+#define REG_LEDCFG3 0x004F
+#define REG_FSIMR 0x0050
+#define REG_FSISR 0x0054
+#define REG_HSIMR 0x0058
+#define REG_HSISR 0x005c
+#define REG_GPIO_PIN_CTRL_2 0x0060
+#define REG_GPIO_IO_SEL_2 0x0062
+#define REG_MULTI_FUNC_CTRL 0x0068
+#define REG_GPIO_OUTPUT 0x006c
+#define REG_OPT_CTRL 0x0074
+#define REG_AFE_XTAL_CTRL_EXT 0x0078
+#define REG_XCK_OUT_CTRL 0x007c
+#define REG_MCUFWDL 0x0080
+#define REG_WOL_EVENT 0x0081
+#define REG_MCUTSTCFG 0x0084
+
+
+#define REG_HIMR 0x00B0
+#define REG_HISR 0x00B4
+#define REG_HIMRE 0x00B8
+#define REG_HISRE 0x00BC
+
+#define REG_PMC_DBG_CTRL2 0x00CC
+
+#define REG_EFUSE_ACCESS 0x00CF
+
+#define REG_BIST_SCAN 0x00D0
+#define REG_BIST_RPT 0x00D4
+#define REG_BIST_ROM_RPT 0x00D8
+#define REG_USB_SIE_INTF 0x00E0
+#define REG_PCIE_MIO_INTF 0x00E4
+#define REG_PCIE_MIO_INTD 0x00E8
+#define REG_HPON_FSM 0x00EC
+#define REG_SYS_CFG 0x00F0
+#define REG_GPIO_OUTSTS 0x00F4
+#define REG_SYS_CFG1 0x00FC
+#define REG_ROM_VERSION 0x00FD
+
+#define REG_CR 0x0100
+#define REG_PBP 0x0104
+#define REG_PKT_BUFF_ACCESS_CTRL 0x0106
+#define REG_TRXDMA_CTRL 0x010C
+#define REG_TRXFF_BNDY 0x0114
+#define REG_TRXFF_STATUS 0x0118
+#define REG_RXFF_PTR 0x011C
+
+#define REG_CPWM 0x012F
+#define REG_FWIMR 0x0130
+#define REG_FWISR 0x0134
+#define REG_PKTBUF_DBG_CTRL 0x0140
+#define REG_PKTBUF_DBG_DATA_L 0x0144
+#define REG_PKTBUF_DBG_DATA_H 0x0148
+#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL+2)
+
+#define REG_TC0_CTRL 0x0150
+#define REG_TC1_CTRL 0x0154
+#define REG_TC2_CTRL 0x0158
+#define REG_TC3_CTRL 0x015C
+#define REG_TC4_CTRL 0x0160
+#define REG_TCUNIT_BASE 0x0164
+#define REG_MBIST_START 0x0174
+#define REG_MBIST_DONE 0x0178
+#define REG_MBIST_FAIL 0x017C
+#define REG_32K_CTRL 0x0194
+#define REG_C2HEVT_MSG_NORMAL 0x01A0
+#define REG_C2HEVT_CLEAR 0x01AF
+#define REG_C2HEVT_MSG_TEST 0x01B8
+#define REG_MCUTST_1 0x01c0
+#define REG_FMETHR 0x01C8
+#define REG_HMETFR 0x01CC
+#define REG_HMEBOX_0 0x01D0
+#define REG_HMEBOX_1 0x01D4
+#define REG_HMEBOX_2 0x01D8
+#define REG_HMEBOX_3 0x01DC
+
+#define REG_LLT_INIT 0x01E0
+#define REG_BB_ACCEESS_CTRL 0x01E8
+#define REG_BB_ACCESS_DATA 0x01EC
+
+#define REG_HMEBOX_EXT_0 0x01F0
+#define REG_HMEBOX_EXT_1 0x01F4
+#define REG_HMEBOX_EXT_2 0x01F8
+#define REG_HMEBOX_EXT_3 0x01FC
+
+#define REG_RQPN 0x0200
+#define REG_FIFOPAGE 0x0204
+#define REG_TDECTRL 0x0208
+#define REG_TXDMA_OFFSET_CHK 0x020C
+#define REG_TXDMA_STATUS 0x0210
+#define REG_RQPN_NPQ 0x0214
+
+#define REG_RXDMA_AGG_PG_TH 0x0280
+#define REG_FW_UPD_RDPTR 0x0284 /* FW shall update this register before FW write RXPKT_RELEASE_POLL to 1 */
+#define REG_RXDMA_CONTROL 0x0286 /* Control the RX DMA.*/
+#define REG_RXPKT_NUM 0x0287 /* The number of packets in RXPKTBUF. */
+
+#define REG_PCIE_CTRL_REG 0x0300
+#define REG_INT_MIG 0x0304
+#define REG_BCNQ_DESA 0x0308
+#define REG_HQ_DESA 0x0310
+#define REG_MGQ_DESA 0x0318
+#define REG_VOQ_DESA 0x0320
+#define REG_VIQ_DESA 0x0328
+#define REG_BEQ_DESA 0x0330
+#define REG_BKQ_DESA 0x0338
+#define REG_RX_DESA 0x0340
+
+#define REG_DBI_WDATA 0x0348
+#define REG_DBI_RDATA 0x034C
+#define REG_DBI_ADDR 0x0350
+#define REG_DBI_FLAG 0x0352
+#define REG_MDIO_WDATA 0x0354
+#define REG_MDIO_RDATA 0x0356
+#define REG_MDIO_CTL 0x0358
+#define REG_DBG_SEL 0x0360
+#define REG_PCIE_HRPWM 0x0361
+#define REG_PCIE_HCPWM 0x0363
+#define REG_UART_CTRL 0x0364
+#define REG_WATCH_DOG 0x0368
+#define REG_UART_TX_DESA 0x0370
+#define REG_UART_RX_DESA 0x0378
+
+
+#define REG_HDAQ_DESA_NODEF 0x0000
+#define REG_CMDQ_DESA_NODEF 0x0000
+
+#define REG_VOQ_INFORMATION 0x0400
+#define REG_VIQ_INFORMATION 0x0404
+#define REG_BEQ_INFORMATION 0x0408
+#define REG_BKQ_INFORMATION 0x040C
+#define REG_MGQ_INFORMATION 0x0410
+#define REG_HGQ_INFORMATION 0x0414
+#define REG_BCNQ_INFORMATION 0x0418
+#define REG_TXPKT_EMPTY 0x041A
+
+
+#define REG_CPU_MGQ_INFORMATION 0x041C
+#define REG_FWHW_TXQ_CTRL 0x0420
+#define REG_HWSEQ_CTRL 0x0423
+#define REG_TXPKTBUF_BCNQ_BDNY 0x0424
+#define REG_TXPKTBUF_MGQ_BDNY 0x0425
+#define REG_MULTI_BCNQ_EN 0x0426
+#define REG_MULTI_BCNQ_OFFSET 0x0427
+#define REG_SPEC_SIFS 0x0428
+#define REG_RL 0x042A
+#define REG_DARFRC 0x0430
+#define REG_RARFRC 0x0438
+#define REG_RRSR 0x0440
+#define REG_ARFR0 0x0444
+#define REG_ARFR1 0x044C
+#define REG_CCK_CHECK 0x0454
+#define REG_AMPDU_MAX_TIME 0x0456
+#define REG_AGGLEN_LMT 0x0458
+#define REG_AMPDU_MIN_SPACE 0x045C
+#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D
+#define REG_FAST_EDCA_CTRL 0x0460
+#define REG_RD_RESP_PKT_TH 0x0463
+#define REG_INIRTS_RATE_SEL 0x0480
+#define REG_INIDATA_RATE_SEL 0x0484
+#define REG_ARFR2 0x048C
+#define REG_ARFR3 0x0494
+#define REG_POWER_STATUS 0x04A4
+#define REG_POWER_STAGE1 0x04B4
+#define REG_POWER_STAGE2 0x04B8
+#define REG_PKT_LIFE_TIME 0x04C0
+#define REG_STBC_SETTING 0x04C4
+#define REG_HT_SINGLE_AMPDU 0x04C7
+#define REG_PROT_MODE_CTRL 0x04C8
+#define REG_MAX_AGGR_NUM 0x04CA
+#define REG_BAR_MODE_CTRL 0x04CC
+#define REG_RA_TRY_RATE_AGG_LMT 0x04CF
+#define REG_EARLY_MODE_CONTROL 0x04D0
+#define REG_NQOS_SEQ 0x04DC
+#define REG_QOS_SEQ 0x04DE
+#define REG_NEED_CPU_HANDLE 0x04E0
+#define REG_PKT_LOSE_RPT 0x04E1
+#define REG_PTCL_ERR_STATUS 0x04E2
+#define REG_TX_RPT_CTRL 0x04EC
+#define REG_TX_RPT_TIME 0x04F0
+#define REG_DUMMY 0x04FC
+
+#define REG_EDCA_VO_PARAM 0x0500
+#define REG_EDCA_VI_PARAM 0x0504
+#define REG_EDCA_BE_PARAM 0x0508
+#define REG_EDCA_BK_PARAM 0x050C
+#define REG_BCNTCFG 0x0510
+#define REG_PIFS 0x0512
+#define REG_RDG_PIFS 0x0513
+#define REG_SIFS_CTX 0x0514
+#define REG_SIFS_TRX 0x0516
+#define REG_AGGR_BREAK_TIME 0x051A
+#define REG_SLOT 0x051B
+#define REG_TX_PTCL_CTRL 0x0520
+#define REG_TXPAUSE 0x0522
+#define REG_DIS_TXREQ_CLR 0x0523
+#define REG_RD_CTRL 0x0524
+#define REG_TBTT_PROHIBIT 0x0540
+#define REG_RD_NAV_NXT 0x0544
+#define REG_NAV_PROT_LEN 0x0546
+#define REG_BCN_CTRL 0x0550
+#define REG_USTIME_TSF 0x0551
+#define REG_MBID_NUM 0x0552
+#define REG_DUAL_TSF_RST 0x0553
+#define REG_BCN_INTERVAL 0x0554
+#define REG_MBSSID_BCN_SPACE 0x0554
+#define REG_DRVERLYINT 0x0558
+#define REG_BCNDMATIM 0x0559
+#define REG_ATIMWND 0x055A
+#define REG_BCN_MAX_ERR 0x055D
+#define REG_RXTSF_OFFSET_CCK 0x055E
+#define REG_RXTSF_OFFSET_OFDM 0x055F
+#define REG_TSFTR 0x0560
+#define REG_INIT_TSFTR 0x0564
+#define REG_SECONDARY_CCA_CTRL 0x0577
+#define REG_PSTIMER 0x0580
+#define REG_TIMER0 0x0584
+#define REG_TIMER1 0x0588
+#define REG_ACMHWCTRL 0x05C0
+#define REG_ACMRSTCTRL 0x05C1
+#define REG_ACMAVG 0x05C2
+#define REG_VO_ADMTIME 0x05C4
+#define REG_VI_ADMTIME 0x05C6
+#define REG_BE_ADMTIME 0x05C8
+#define REG_EDCA_RANDOM_GEN 0x05CC
+#define REG_NOA_DESC_SEL 0x05CF
+#define REG_NOA_DESC_DURATION 0x05E0
+#define REG_NOA_DESC_INTERVAL 0x05E4
+#define REG_NOA_DESC_START 0x05E8
+#define REG_NOA_DESC_COUNT 0x05EC
+#define REG_SCH_TX_CMD 0x05F8
+
+#define REG_APSD_CTRL 0x0600
+#define REG_BWOPMODE 0x0603
+#define REG_TCR 0x0604
+#define REG_RCR 0x0608
+#define REG_RX_PKT_LIMIT 0x060C
+#define REG_RX_DLK_TIME 0x060D
+#define REG_RX_DRVINFO_SZ 0x060F
+
+#define REG_MACID 0x0610
+#define REG_BSSID 0x0618
+#define REG_MAR 0x0620
+#define REG_MBIDCAMCFG 0x0628
+
+#define REG_USTIME_EDCA 0x0638
+#define REG_MAC_SPEC_SIFS 0x063A
+#define REG_RESP_SIFS_CCK 0x063C
+#define REG_RESP_SIFS_OFDM 0x063E
+#define REG_ACKTO 0x0640
+#define REG_CTS2TO 0x0641
+#define REG_EIFS 0x0642
+
+#define REG_NAV_CTRL 0x0650
+#define REG_NAV_UPPER 0x0652
+#define REG_BACAMCMD 0x0654
+#define REG_BACAMCONTENT 0x0658
+#define REG_LBDLY 0x0660
+#define REG_FWDLY 0x0661
+#define REG_RXERR_RPT 0x0664
+#define REG_TRXPTCL_CTL 0x0668
+
+#define REG_CAMCMD 0x0670
+#define REG_CAMWRITE 0x0674
+#define REG_CAMREAD 0x0678
+#define REG_CAMDBG 0x067C
+#define REG_SECCFG 0x0680
+
+#define REG_WOW_CTRL 0x0690
+#define REG_PSSTATUS 0x0691
+#define REG_PS_RX_INFO 0x0692
+#define REG_UAPSD_TID 0x0693
+#define REG_LPNAV_CTRL 0x0694
+#define REG_WKFMCAM_NUM 0x0698
+#define REG_WKFMCAM_RWD 0x069C
+#define REG_RXFLTMAP0 0x06A0
+#define REG_RXFLTMAP1 0x06A2
+#define REG_RXFLTMAP2 0x06A4
+#define REG_BCN_PSR_RPT 0x06A8
+#define REG_CALB32K_CTRL 0x06AC
+#define REG_PKT_MON_CTRL 0x06B4
+#define REG_BT_COEX_TABLE 0x06C0
+#define REG_WMAC_RESP_TXINFO 0x06D8
+
+#define REG_USB_INFO 0xFE17
+#define REG_USB_SPECIAL_OPTION 0xFE55
+#define REG_USB_DMA_AGG_TO 0xFE5B
+#define REG_USB_AGG_TO 0xFE5C
+#define REG_USB_AGG_TH 0xFE5D
+
+#define REG_TEST_USB_TXQS 0xFE48
+#define REG_TEST_SIE_VID 0xFE60
+#define REG_TEST_SIE_PID 0xFE62
+#define REG_TEST_SIE_OPTIONAL 0xFE64
+#define REG_TEST_SIE_CHIRP_K 0xFE65
+#define REG_TEST_SIE_PHY 0xFE66
+#define REG_TEST_SIE_MAC_ADDR 0xFE70
+#define REG_TEST_SIE_STRING 0xFE80
+
+#define REG_NORMAL_SIE_VID 0xFE60
+#define REG_NORMAL_SIE_PID 0xFE62
+#define REG_NORMAL_SIE_OPTIONAL 0xFE64
+#define REG_NORMAL_SIE_EP 0xFE65
+#define REG_NORMAL_SIE_PHY 0xFE68
+#define REG_NORMAL_SIE_MAC_ADDR 0xFE70
+#define REG_NORMAL_SIE_STRING 0xFE80
+
+#define CR9346 REG_9346CR
+#define MSR (REG_CR + 2)
+#define ISR REG_HISR
+#define TSFR REG_TSFTR
+
+#define MACIDR0 REG_MACID
+#define MACIDR4 (REG_MACID + 4)
+
+#define PBP REG_PBP
+
+#define IDR0 MACIDR0
+#define IDR4 MACIDR4
+
+#define UNUSED_REGISTER 0x1BF
+#define DCAM UNUSED_REGISTER
+#define PSR UNUSED_REGISTER
+#define BBADDR UNUSED_REGISTER
+#define PHYDATAR UNUSED_REGISTER
+
+#define INVALID_BBRF_VALUE 0x12345678
+
+#define MAX_MSS_DENSITY_2T 0x13
+#define MAX_MSS_DENSITY_1T 0x0A
+
+#define CMDEEPROM_EN BIT(5)
+#define CMDEEPROM_SEL BIT(4)
+#define CMD9346CR_9356SEL BIT(4)
+#define AUTOLOAD_EEPROM (CMDEEPROM_EN|CMDEEPROM_SEL)
+#define AUTOLOAD_EFUSE CMDEEPROM_EN
+
+#define GPIOSEL_GPIO 0
+#define GPIOSEL_ENBT BIT(5)
+
+#define GPIO_IN REG_GPIO_PIN_CTRL
+#define GPIO_OUT (REG_GPIO_PIN_CTRL+1)
+#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2)
+#define GPIO_MOD (REG_GPIO_PIN_CTRL+3)
+
+/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */
+#define HSIMR_GPIO12_0_INT_EN BIT(0)
+#define HSIMR_SPS_OCP_INT_EN BIT(5)
+#define HSIMR_RON_INT_EN BIT(6)
+#define HSIMR_PDN_INT_EN BIT(7)
+#define HSIMR_GPIO9_INT_EN BIT(25)
+
+
+/*
+* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte)
+*/
+#define HSISR_GPIO12_0_INT BIT(0)
+#define HSISR_SPS_OCP_INT BIT(5)
+#define HSISR_RON_INT_EN BIT(6)
+#define HSISR_PDNINT BIT(7)
+#define HSISR_GPIO9_INT BIT(25)
+
+#define MSR_NOLINK 0x00
+#define MSR_ADHOC 0x01
+#define MSR_INFRA 0x02
+#define MSR_AP 0x03
+
+#define RRSR_RSC_OFFSET 21
+#define RRSR_SHORT_OFFSET 23
+#define RRSR_RSC_BW_40M 0x600000
+#define RRSR_RSC_UPSUBCHNL 0x400000
+#define RRSR_RSC_LOWSUBCHNL 0x200000
+#define RRSR_SHORT 0x800000
+#define RRSR_1M BIT(0)
+#define RRSR_2M BIT(1)
+#define RRSR_5_5M BIT(2)
+#define RRSR_11M BIT(3)
+#define RRSR_6M BIT(4)
+#define RRSR_9M BIT(5)
+#define RRSR_12M BIT(6)
+#define RRSR_18M BIT(7)
+#define RRSR_24M BIT(8)
+#define RRSR_36M BIT(9)
+#define RRSR_48M BIT(10)
+#define RRSR_54M BIT(11)
+#define RRSR_MCS0 BIT(12)
+#define RRSR_MCS1 BIT(13)
+#define RRSR_MCS2 BIT(14)
+#define RRSR_MCS3 BIT(15)
+#define RRSR_MCS4 BIT(16)
+#define RRSR_MCS5 BIT(17)
+#define RRSR_MCS6 BIT(18)
+#define RRSR_MCS7 BIT(19)
+#define BRSR_ACKSHORTPMB BIT(23)
+
+#define RATR_1M 0x00000001
+#define RATR_2M 0x00000002
+#define RATR_55M 0x00000004
+#define RATR_11M 0x00000008
+#define RATR_6M 0x00000010
+#define RATR_9M 0x00000020
+#define RATR_12M 0x00000040
+#define RATR_18M 0x00000080
+#define RATR_24M 0x00000100
+#define RATR_36M 0x00000200
+#define RATR_48M 0x00000400
+#define RATR_54M 0x00000800
+#define RATR_MCS0 0x00001000
+#define RATR_MCS1 0x00002000
+#define RATR_MCS2 0x00004000
+#define RATR_MCS3 0x00008000
+#define RATR_MCS4 0x00010000
+#define RATR_MCS5 0x00020000
+#define RATR_MCS6 0x00040000
+#define RATR_MCS7 0x00080000
+#define RATR_MCS8 0x00100000
+#define RATR_MCS9 0x00200000
+#define RATR_MCS10 0x00400000
+#define RATR_MCS11 0x00800000
+#define RATR_MCS12 0x01000000
+#define RATR_MCS13 0x02000000
+#define RATR_MCS14 0x04000000
+#define RATR_MCS15 0x08000000
+
+#define RATE_1M BIT(0)
+#define RATE_2M BIT(1)
+#define RATE_5_5M BIT(2)
+#define RATE_11M BIT(3)
+#define RATE_6M BIT(4)
+#define RATE_9M BIT(5)
+#define RATE_12M BIT(6)
+#define RATE_18M BIT(7)
+#define RATE_24M BIT(8)
+#define RATE_36M BIT(9)
+#define RATE_48M BIT(10)
+#define RATE_54M BIT(11)
+#define RATE_MCS0 BIT(12)
+#define RATE_MCS1 BIT(13)
+#define RATE_MCS2 BIT(14)
+#define RATE_MCS3 BIT(15)
+#define RATE_MCS4 BIT(16)
+#define RATE_MCS5 BIT(17)
+#define RATE_MCS6 BIT(18)
+#define RATE_MCS7 BIT(19)
+#define RATE_MCS8 BIT(20)
+#define RATE_MCS9 BIT(21)
+#define RATE_MCS10 BIT(22)
+#define RATE_MCS11 BIT(23)
+#define RATE_MCS12 BIT(24)
+#define RATE_MCS13 BIT(25)
+#define RATE_MCS14 BIT(26)
+#define RATE_MCS15 BIT(27)
+
+#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M)
+#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M |\
+ RATR_24M| RATR_36M | RATR_48M | RATR_54M)
+#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 |\
+ RATR_MCS3 | RATR_MCS4 | RATR_MCS5 |\
+ RATR_MCS6 | RATR_MCS7)
+#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 |\
+ RATR_MCS11| RATR_MCS12 | RATR_MCS13 |\
+ RATR_MCS14 | RATR_MCS15)
+
+#define BW_OPMODE_20MHZ BIT(2)
+#define BW_OPMODE_5G BIT(1)
+#define BW_OPMODE_11J BIT(0)
+
+#define CAM_VALID BIT(15)
+#define CAM_NOTVALID 0x0000
+#define CAM_USEDK BIT(5)
+
+#define CAM_NONE 0x0
+#define CAM_WEP40 0x01
+#define CAM_TKIP 0x02
+#define CAM_AES 0x04
+#define CAM_WEP104 0x05
+
+#define TOTAL_CAM_ENTRY 32
+#define HALF_CAM_ENTRY 16
+
+#define CAM_WRITE BIT(16)
+#define CAM_READ 0x00000000
+#define CAM_POLLINIG BIT(31)
+
+#define SCR_USEDK 0x01
+#define SCR_TXSEC_ENABLE 0x02
+#define SCR_RXSEC_ENABLE 0x04
+
+#define WOW_PMEN BIT(0)
+#define WOW_WOMEN BIT(1)
+#define WOW_MAGIC BIT(2)
+#define WOW_UWF BIT(3)
+
+/*********************************************
+* 8188 IMR/ISR bits
+**********************************************/
+#define IMR_DISABLED 0x0
+/* IMR DW0(0x0060-0063) Bit 0-31 */
+#define IMR_TXCCK BIT(30) /* TXRPT interrupt when CCX bit of the packet is set */
+#define IMR_PSTIMEOUT BIT(29) /* Power Save Time Out Interrupt */
+#define IMR_GTINT4 BIT(28) /* When GTIMER4 expires, this bit is set to 1 */
+#define IMR_GTINT3 BIT(27) /* When GTIMER3 expires, this bit is set to 1 */
+#define IMR_TBDER BIT(26) /* Transmit Beacon0 Error */
+#define IMR_TBDOK BIT(25) /* Transmit Beacon0 OK */
+#define IMR_TSF_BIT32_TOGGLE BIT(24) /* TSF Timer BIT32 toggle indication interrupt */
+#define IMR_BCNDMAINT0 BIT(20) /* Beacon DMA Interrupt 0 */
+#define IMR_BCNDOK0 BIT(16) /* Beacon Queue DMA OK0 */
+#define IMR_HSISR_IND_ON_INT BIT(15) /* HSISR Indicator (HSIMR & HSISR is true, this bit is set to 1) */
+#define IMR_BCNDMAINT_E BIT(14) /* Beacon DMA Interrupt Extension for Win7 */
+#define IMR_ATIMEND BIT(12) /* CTWidnow End or ATIM Window End */
+#define IMR_HISR1_IND_INT BIT(11) /* HISR1 Indicator (HISR1 & HIMR1 is true, this bit is set to 1)*/
+#define IMR_C2HCMD BIT(10) /* CPU to Host Command INT Status, Write 1 clear */
+#define IMR_CPWM2 BIT(9) /* CPU power Mode exchange INT Status, Write 1 clear */
+#define IMR_CPWM BIT(8) /* CPU power Mode exchange INT Status, Write 1 clear */
+#define IMR_HIGHDOK BIT(7) /* High Queue DMA OK */
+#define IMR_MGNTDOK BIT(6) /* Management Queue DMA OK */
+#define IMR_BKDOK BIT(5) /* AC_BK DMA OK */
+#define IMR_BEDOK BIT(4) /* AC_BE DMA OK */
+#define IMR_VIDOK BIT(3) /* AC_VI DMA OK */
+#define IMR_VODOK BIT(2) /* AC_VO DMA OK */
+#define IMR_RDU BIT(1) /* Rx Descriptor Unavailable */
+#define IMR_ROK BIT(0) /* Receive DMA OK */
+
+/* IMR DW1(0x00B4-00B7) Bit 0-31 */
+#define IMR_BCNDMAINT7 BIT(27) /* Beacon DMA Interrupt 7 */
+#define IMR_BCNDMAINT6 BIT(26) /* Beacon DMA Interrupt 6 */
+#define IMR_BCNDMAINT5 BIT(25) /* Beacon DMA Interrupt 5 */
+#define IMR_BCNDMAINT4 BIT(24) /* Beacon DMA Interrupt 4 */
+#define IMR_BCNDMAINT3 BIT(23) /* Beacon DMA Interrupt 3 */
+#define IMR_BCNDMAINT2 BIT(22) /* Beacon DMA Interrupt 2 */
+#define IMR_BCNDMAINT1 BIT(21) /* Beacon DMA Interrupt 1 */
+#define IMR_BCNDOK7 BIT(20) /* Beacon Queue DMA OK Interrup 7 */
+#define IMR_BCNDOK6 BIT(19) /* Beacon Queue DMA OK Interrup 6 */
+#define IMR_BCNDOK5 BIT(18) /* Beacon Queue DMA OK Interrup 5 */
+#define IMR_BCNDOK4 BIT(17) /* Beacon Queue DMA OK Interrup 4 */
+#define IMR_BCNDOK3 BIT(16) /* Beacon Queue DMA OK Interrup 3 */
+#define IMR_BCNDOK2 BIT(15) /* Beacon Queue DMA OK Interrup 2 */
+#define IMR_BCNDOK1 BIT(14) /* Beacon Queue DMA OK Interrup 1 */
+#define IMR_ATIMEND_E BIT(13) /* ATIM Window End Extension for Win7 */
+#define IMR_TXERR BIT(11) /* Tx Error Flag Interrupt Status, write 1 clear. */
+#define IMR_RXERR BIT(10) /* Rx Error Flag INT Status, Write 1 clear */
+#define IMR_TXFOVW BIT(9) /* Transmit FIFO Overflow */
+#define IMR_RXFOVW BIT(8) /* Receive FIFO Overflow */
+
+
+#define HWSET_MAX_SIZE 512
+#define EFUSE_MAX_SECTION 64
+#define EFUSE_REAL_CONTENT_LEN 256
+#define EFUSE_OOB_PROTECT_BYTES 18 /* PG data exclude header, dummy 7 bytes frome CP test and reserved 1byte.*/
+
+
+#define EEPROM_DEFAULT_TSSI 0x0
+#define EEPROM_DEFAULT_TXPOWERDIFF 0x0
+#define EEPROM_DEFAULT_CRYSTALCAP 0x5
+#define EEPROM_DEFAULT_BOARDTYPE 0x02
+#define EEPROM_DEFAULT_TXPOWER 0x1010
+#define EEPROM_DEFAULT_HT2T_TXPWR 0x10
+
+#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
+#define EEPROM_DEFAULT_THERMALMETER 0x18
+#define EEPROM_DEFAULT_ANTTXPOWERDIFF 0x0
+#define EEPROM_DEFAULT_TXPWDIFF_CRYSTALCAP 0x5
+#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22
+#define EEPROM_DEFAULT_HT40_2SDIFF 0x0
+#define EEPROM_DEFAULT_HT20_DIFF 2
+#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
+#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0
+#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0
+
+#define RF_OPTION1 0x79
+#define RF_OPTION2 0x7A
+#define RF_OPTION3 0x7B
+#define RF_OPTION4 0xC3
+
+#define EEPROM_DEFAULT_PID 0x1234
+#define EEPROM_DEFAULT_VID 0x5678
+#define EEPROM_DEFAULT_CUSTOMERID 0xAB
+#define EEPROM_DEFAULT_SUBCUSTOMERID 0xCD
+#define EEPROM_DEFAULT_VERSION 0
+
+#define EEPROM_CHANNEL_PLAN_FCC 0x0
+#define EEPROM_CHANNEL_PLAN_IC 0x1
+#define EEPROM_CHANNEL_PLAN_ETSI 0x2
+#define EEPROM_CHANNEL_PLAN_SPAIN 0x3
+#define EEPROM_CHANNEL_PLAN_FRANCE 0x4
+#define EEPROM_CHANNEL_PLAN_MKK 0x5
+#define EEPROM_CHANNEL_PLAN_MKK1 0x6
+#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7
+#define EEPROM_CHANNEL_PLAN_TELEC 0x8
+#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9
+#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA
+#define EEPROM_CHANNEL_PLAN_NCC 0xB
+#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
+
+#define EEPROM_CID_DEFAULT 0x0
+#define EEPROM_CID_TOSHIBA 0x4
+#define EEPROM_CID_CCX 0x10
+#define EEPROM_CID_QMI 0x0D
+#define EEPROM_CID_WHQL 0xFE
+
+#define RTL_EEPROM_ID 0x8129
+
+#define EEPROM_HPON 0x02
+#define EEPROM_CLK 0x06
+#define EEPROM_TESTR 0x08
+
+
+#define EEPROM_TXPOWERCCK 0x10
+#define EEPROM_TXPOWERHT40_1S 0x16
+#define EEPROM_TXPOWERHT20DIFF 0x1B
+#define EEPROM_TXPOWER_OFDMDIFF 0x1B
+
+
+
+#define EEPROM_TX_PWR_INX 0x10
+
+#define EEPROM_CHANNELPLAN 0xB8
+#define EEPROM_XTAL_8821AE 0xB9
+#define EEPROM_THERMAL_METER 0xBA
+#define EEPROM_IQK_LCK_88E 0xBB
+
+#define EEPROM_RF_BOARD_OPTION 0xC1
+#define EEPROM_RF_FEATURE_OPTION_88E 0xC2
+#define EEPROM_RF_BT_SETTING 0xC3
+#define EEPROM_VERSION 0xC4
+#define EEPROM_CUSTOMER_ID 0xC5
+#define EEPROM_RF_ANTENNA_OPT_88E 0xC9
+
+#define EEPROM_MAC_ADDR 0xD0
+#define EEPROM_VID 0xD6
+#define EEPROM_DID 0xD8
+#define EEPROM_SVID 0xDA
+#define EEPROM_SMID 0xDC
+
+#define STOPBECON BIT(6)
+#define STOPHIGHT BIT(5)
+#define STOPMGT BIT(4)
+#define STOPVO BIT(3)
+#define STOPVI BIT(2)
+#define STOPBE BIT(1)
+#define STOPBK BIT(0)
+
+#define RCR_APPFCS BIT(31)
+#define RCR_APP_MIC BIT(30)
+#define RCR_APP_ICV BIT(29)
+#define RCR_APP_PHYST_RXFF BIT(28)
+#define RCR_APP_BA_SSN BIT(27)
+#define RCR_NONQOS_VHT BIT(26)
+#define RCR_ENMBID BIT(24)
+#define RCR_LSIGEN BIT(23)
+#define RCR_MFBEN BIT(22)
+#define RCR_HTC_LOC_CTRL BIT(14)
+#define RCR_AMF BIT(13)
+#define RCR_ACF BIT(12)
+#define RCR_ADF BIT(11)
+#define RCR_AICV BIT(9)
+#define RCR_ACRC32 BIT(8)
+#define RCR_CBSSID_BCN BIT(7)
+#define RCR_CBSSID_DATA BIT(6)
+#define RCR_CBSSID RCR_CBSSID_DATA
+#define RCR_APWRMGT BIT(5)
+#define RCR_ADD3 BIT(4)
+#define RCR_AB BIT(3)
+#define RCR_AM BIT(2)
+#define RCR_APM BIT(1)
+#define RCR_AAP BIT(0)
+#define RCR_MXDMA_OFFSET 8
+#define RCR_FIFO_OFFSET 13
+
+#define RSV_CTRL 0x001C
+#define RD_CTRL 0x0524
+
+#define REG_USB_INFO 0xFE17
+#define REG_USB_SPECIAL_OPTION 0xFE55
+#define REG_USB_DMA_AGG_TO 0xFE5B
+#define REG_USB_AGG_TO 0xFE5C
+#define REG_USB_AGG_TH 0xFE5D
+
+#define REG_USB_VID 0xFE60
+#define REG_USB_PID 0xFE62
+#define REG_USB_OPTIONAL 0xFE64
+#define REG_USB_CHIRP_K 0xFE65
+#define REG_USB_PHY 0xFE66
+#define REG_USB_MAC_ADDR 0xFE70
+#define REG_USB_HRPWM 0xFE58
+#define REG_USB_HCPWM 0xFE57
+
+#define SW18_FPWM BIT(3)
+
+#define ISO_MD2PP BIT(0)
+#define ISO_UA2USB BIT(1)
+#define ISO_UD2CORE BIT(2)
+#define ISO_PA2PCIE BIT(3)
+#define ISO_PD2CORE BIT(4)
+#define ISO_IP2MAC BIT(5)
+#define ISO_DIOP BIT(6)
+#define ISO_DIOE BIT(7)
+#define ISO_EB2CORE BIT(8)
+#define ISO_DIOR BIT(9)
+
+#define PWC_EV25V BIT(14)
+#define PWC_EV12V BIT(15)
+
+#define FEN_BBRSTB BIT(0)
+#define FEN_BB_GLB_RSTN BIT(1)
+#define FEN_USBA BIT(2)
+#define FEN_UPLL BIT(3)
+#define FEN_USBD BIT(4)
+#define FEN_DIO_PCIE BIT(5)
+#define FEN_PCIEA BIT(6)
+#define FEN_PPLL BIT(7)
+#define FEN_PCIED BIT(8)
+#define FEN_DIOE BIT(9)
+#define FEN_CPUEN BIT(10)
+#define FEN_DCORE BIT(11)
+#define FEN_ELDR BIT(12)
+#define FEN_DIO_RF BIT(13)
+#define FEN_HWPDN BIT(14)
+#define FEN_MREGEN BIT(15)
+
+#define PFM_LDALL BIT(0)
+#define PFM_ALDN BIT(1)
+#define PFM_LDKP BIT(2)
+#define PFM_WOWL BIT(3)
+#define EnPDN BIT(4)
+#define PDN_PL BIT(5)
+#define APFM_ONMAC BIT(8)
+#define APFM_OFF BIT(9)
+#define APFM_RSM BIT(10)
+#define AFSM_HSUS BIT(11)
+#define AFSM_PCIE BIT(12)
+#define APDM_MAC BIT(13)
+#define APDM_HOST BIT(14)
+#define APDM_HPDN BIT(15)
+#define RDY_MACON BIT(16)
+#define SUS_HOST BIT(17)
+#define ROP_ALD BIT(20)
+#define ROP_PWR BIT(21)
+#define ROP_SPS BIT(22)
+#define SOP_MRST BIT(25)
+#define SOP_FUSE BIT(26)
+#define SOP_ABG BIT(27)
+#define SOP_AMB BIT(28)
+#define SOP_RCK BIT(29)
+#define SOP_A8M BIT(30)
+#define XOP_BTCK BIT(31)
+
+#define ANAD16V_EN BIT(0)
+#define ANA8M BIT(1)
+#define MACSLP BIT(4)
+#define LOADER_CLK_EN BIT(5)
+#define _80M_SSC_DIS BIT(7)
+#define _80M_SSC_EN_HO BIT(8)
+#define PHY_SSC_RSTB BIT(9)
+#define SEC_CLK_EN BIT(10)
+#define MAC_CLK_EN BIT(11)
+#define SYS_CLK_EN BIT(12)
+#define RING_CLK_EN BIT(13)
+
+#define BOOT_FROM_EEPROM BIT(4)
+#define EEPROM_EN BIT(5)
+
+#define AFE_BGEN BIT(0)
+#define AFE_MBEN BIT(1)
+#define MAC_ID_EN BIT(7)
+
+#define WLOCK_ALL BIT(0)
+#define WLOCK_00 BIT(1)
+#define WLOCK_04 BIT(2)
+#define WLOCK_08 BIT(3)
+#define WLOCK_40 BIT(4)
+#define R_DIS_PRST_0 BIT(5)
+#define R_DIS_PRST_1 BIT(6)
+#define LOCK_ALL_EN BIT(7)
+
+#define RF_EN BIT(0)
+#define RF_RSTB BIT(1)
+#define RF_SDMRSTB BIT(2)
+
+#define LDA15_EN BIT(0)
+#define LDA15_STBY BIT(1)
+#define LDA15_OBUF BIT(2)
+#define LDA15_REG_VOS BIT(3)
+#define _LDA15_VOADJ(x) (((x) & 0x7) << 4)
+
+#define LDV12_EN BIT(0)
+#define LDV12_SDBY BIT(1)
+#define LPLDO_HSM BIT(2)
+#define LPLDO_LSM_DIS BIT(3)
+#define _LDV12_VADJ(x) (((x) & 0xF) << 4)
+
+#define XTAL_EN BIT(0)
+#define XTAL_BSEL BIT(1)
+#define _XTAL_BOSC(x) (((x) & 0x3) << 2)
+#define _XTAL_CADJ(x) (((x) & 0xF) << 4)
+#define XTAL_GATE_USB BIT(8)
+#define _XTAL_USB_DRV(x) (((x) & 0x3) << 9)
+#define XTAL_GATE_AFE BIT(11)
+#define _XTAL_AFE_DRV(x) (((x) & 0x3) << 12)
+#define XTAL_RF_GATE BIT(14)
+#define _XTAL_RF_DRV(x) (((x) & 0x3) << 15)
+#define XTAL_GATE_DIG BIT(17)
+#define _XTAL_DIG_DRV(x) (((x) & 0x3) << 18)
+#define XTAL_BT_GATE BIT(20)
+#define _XTAL_BT_DRV(x) (((x) & 0x3) << 21)
+#define _XTAL_GPIO(x) (((x) & 0x7) << 23)
+
+#define CKDLY_AFE BIT(26)
+#define CKDLY_USB BIT(27)
+#define CKDLY_DIG BIT(28)
+#define CKDLY_BT BIT(29)
+
+#define APLL_EN BIT(0)
+#define APLL_320_EN BIT(1)
+#define APLL_FREF_SEL BIT(2)
+#define APLL_EDGE_SEL BIT(3)
+#define APLL_WDOGB BIT(4)
+#define APLL_LPFEN BIT(5)
+
+#define APLL_REF_CLK_13MHZ 0x1
+#define APLL_REF_CLK_19_2MHZ 0x2
+#define APLL_REF_CLK_20MHZ 0x3
+#define APLL_REF_CLK_25MHZ 0x4
+#define APLL_REF_CLK_26MHZ 0x5
+#define APLL_REF_CLK_38_4MHZ 0x6
+#define APLL_REF_CLK_40MHZ 0x7
+
+#define APLL_320EN BIT(14)
+#define APLL_80EN BIT(15)
+#define APLL_1MEN BIT(24)
+
+#define ALD_EN BIT(18)
+#define EF_PD BIT(19)
+#define EF_FLAG BIT(31)
+
+#define EF_TRPT BIT(7)
+#define LDOE25_EN BIT(31)
+
+#define RSM_EN BIT(0)
+#define Timer_EN BIT(4)
+
+#define TRSW0EN BIT(2)
+#define TRSW1EN BIT(3)
+#define EROM_EN BIT(4)
+#define EnBT BIT(5)
+#define EnUart BIT(8)
+#define Uart_910 BIT(9)
+#define EnPMAC BIT(10)
+#define SIC_SWRST BIT(11)
+#define EnSIC BIT(12)
+#define SIC_23 BIT(13)
+#define EnHDP BIT(14)
+#define SIC_LBK BIT(15)
+
+#define LED0PL BIT(4)
+#define LED1PL BIT(12)
+#define LED0DIS BIT(7)
+
+#define MCUFWDL_EN BIT(0)
+#define MCUFWDL_RDY BIT(1)
+#define FWDL_CHKSUM_RPT BIT(2)
+#define MACINI_RDY BIT(3)
+#define BBINI_RDY BIT(4)
+#define RFINI_RDY BIT(5)
+#define WINTINI_RDY BIT(6)
+#define CPRST BIT(23)
+
+#define XCLK_VLD BIT(0)
+#define ACLK_VLD BIT(1)
+#define UCLK_VLD BIT(2)
+#define PCLK_VLD BIT(3)
+#define PCIRSTB BIT(4)
+#define V15_VLD BIT(5)
+#define TRP_B15V_EN BIT(7)
+#define SIC_IDLE BIT(8)
+#define BD_MAC2 BIT(9)
+#define BD_MAC1 BIT(10)
+#define IC_MACPHY_MODE BIT(11)
+#define VENDOR_ID BIT(19)
+#define PAD_HWPD_IDN BIT(22)
+#define TRP_VAUX_EN BIT(23)
+#define TRP_BT_EN BIT(24)
+#define BD_PKG_SEL BIT(25)
+#define BD_HCI_SEL BIT(26)
+#define TYPE_ID BIT(27)
+
+#define CHIP_VER_RTL_MASK 0xF000
+#define CHIP_VER_RTL_SHIFT 12
+
+#define REG_LBMODE (REG_CR + 3)
+
+#define HCI_TXDMA_EN BIT(0)
+#define HCI_RXDMA_EN BIT(1)
+#define TXDMA_EN BIT(2)
+#define RXDMA_EN BIT(3)
+#define PROTOCOL_EN BIT(4)
+#define SCHEDULE_EN BIT(5)
+#define MACTXEN BIT(6)
+#define MACRXEN BIT(7)
+#define ENSWBCN BIT(8)
+#define ENSEC BIT(9)
+
+#define _NETTYPE(x) (((x) & 0x3) << 16)
+#define MASK_NETTYPE 0x30000
+#define NT_NO_LINK 0x0
+#define NT_LINK_AD_HOC 0x1
+#define NT_LINK_AP 0x2
+#define NT_AS_AP 0x3
+
+#define _LBMODE(x) (((x) & 0xF) << 24)
+#define MASK_LBMODE 0xF000000
+#define LOOPBACK_NORMAL 0x0
+#define LOOPBACK_IMMEDIATELY 0xB
+#define LOOPBACK_MAC_DELAY 0x3
+#define LOOPBACK_PHY 0x1
+#define LOOPBACK_DMA 0x7
+
+#define GET_RX_PAGE_SIZE(value) ((value) & 0xF)
+#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4)
+#define _PSRX_MASK 0xF
+#define _PSTX_MASK 0xF0
+#define _PSRX(x) (x)
+#define _PSTX(x) ((x) << 4)
+
+#define PBP_64 0x0
+#define PBP_128 0x1
+#define PBP_256 0x2
+#define PBP_512 0x3
+#define PBP_1024 0x4
+
+#define RXDMA_ARBBW_EN BIT(0)
+#define RXSHFT_EN BIT(1)
+#define RXDMA_AGG_EN BIT(2)
+#define QS_VO_QUEUE BIT(8)
+#define QS_VI_QUEUE BIT(9)
+#define QS_BE_QUEUE BIT(10)
+#define QS_BK_QUEUE BIT(11)
+#define QS_MANAGER_QUEUE BIT(12)
+#define QS_HIGH_QUEUE BIT(13)
+
+#define HQSEL_VOQ BIT(0)
+#define HQSEL_VIQ BIT(1)
+#define HQSEL_BEQ BIT(2)
+#define HQSEL_BKQ BIT(3)
+#define HQSEL_MGTQ BIT(4)
+#define HQSEL_HIQ BIT(5)
+
+#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14)
+#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12)
+#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10)
+#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8 )
+#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6 )
+#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4 )
+
+#define QUEUE_LOW 1
+#define QUEUE_NORMAL 2
+#define QUEUE_HIGH 3
+
+#define _LLT_NO_ACTIVE 0x0
+#define _LLT_WRITE_ACCESS 0x1
+#define _LLT_READ_ACCESS 0x2
+
+#define _LLT_INIT_DATA(x) ((x) & 0xFF)
+#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8)
+#define _LLT_OP(x) (((x) & 0x3) << 30)
+#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
+
+#define BB_WRITE_READ_MASK (BIT(31) | BIT(30))
+#define BB_WRITE_EN BIT(30)
+#define BB_READ_EN BIT(31)
+
+#define _HPQ(x) ((x) & 0xFF)
+#define _LPQ(x) (((x) & 0xFF) << 8)
+#define _PUBQ(x) (((x) & 0xFF) << 16)
+#define _NPQ(x) ((x) & 0xFF)
+
+#define HPQ_PUBLIC_DIS BIT(24)
+#define LPQ_PUBLIC_DIS BIT(25)
+#define LD_RQPN BIT(31)
+
+#define BCN_VALID BIT(16)
+#define BCN_HEAD(x) (((x) & 0xFF) << 8)
+#define BCN_HEAD_MASK 0xFF00
+
+#define BLK_DESC_NUM_SHIFT 4
+#define BLK_DESC_NUM_MASK 0xF
+
+#define DROP_DATA_EN BIT(9)
+
+#define EN_AMPDU_RTY_NEW BIT(7)
+
+#define _INIRTSMCS_SEL(x) ((x) & 0x3F)
+
+#define _SPEC_SIFS_CCK(x) ((x) & 0xFF)
+#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8)
+
+#define RATE_REG_BITMAP_ALL 0xFFFFF
+
+#define _RRSC_BITMAP(x) ((x) & 0xFFFFF)
+
+#define _RRSR_RSC(x) (((x) & 0x3) << 21)
+#define RRSR_RSC_RESERVED 0x0
+#define RRSR_RSC_UPPER_SUBCHANNEL 0x1
+#define RRSR_RSC_LOWER_SUBCHANNEL 0x2
+#define RRSR_RSC_DUPLICATE_MODE 0x3
+
+#define USE_SHORT_G1 BIT(20)
+
+#define _AGGLMT_MCS0(x) ((x) & 0xF)
+#define _AGGLMT_MCS1(x) (((x) & 0xF) << 4)
+#define _AGGLMT_MCS2(x) (((x) & 0xF) << 8)
+#define _AGGLMT_MCS3(x) (((x) & 0xF) << 12)
+#define _AGGLMT_MCS4(x) (((x) & 0xF) << 16)
+#define _AGGLMT_MCS5(x) (((x) & 0xF) << 20)
+#define _AGGLMT_MCS6(x) (((x) & 0xF) << 24)
+#define _AGGLMT_MCS7(x) (((x) & 0xF) << 28)
+
+#define RETRY_LIMIT_SHORT_SHIFT 8
+#define RETRY_LIMIT_LONG_SHIFT 0
+
+#define _DARF_RC1(x) ((x) & 0x1F)
+#define _DARF_RC2(x) (((x) & 0x1F) << 8)
+#define _DARF_RC3(x) (((x) & 0x1F) << 16)
+#define _DARF_RC4(x) (((x) & 0x1F) << 24)
+#define _DARF_RC5(x) ((x) & 0x1F)
+#define _DARF_RC6(x) (((x) & 0x1F) << 8)
+#define _DARF_RC7(x) (((x) & 0x1F) << 16)
+#define _DARF_RC8(x) (((x) & 0x1F) << 24)
+
+#define _RARF_RC1(x) ((x) & 0x1F)
+#define _RARF_RC2(x) (((x) & 0x1F) << 8)
+#define _RARF_RC3(x) (((x) & 0x1F) << 16)
+#define _RARF_RC4(x) (((x) & 0x1F) << 24)
+#define _RARF_RC5(x) ((x) & 0x1F)
+#define _RARF_RC6(x) (((x) & 0x1F) << 8)
+#define _RARF_RC7(x) (((x) & 0x1F) << 16)
+#define _RARF_RC8(x) (((x) & 0x1F) << 24)
+
+#define AC_PARAM_TXOP_LIMIT_OFFSET 16
+#define AC_PARAM_ECW_MAX_OFFSET 12
+#define AC_PARAM_ECW_MIN_OFFSET 8
+#define AC_PARAM_AIFS_OFFSET 0
+
+#define _AIFS(x) (x)
+#define _ECW_MAX_MIN(x) ((x) << 8)
+#define _TXOP_LIMIT(x) ((x) << 16)
+
+#define _BCNIFS(x) ((x) & 0xFF)
+#define _BCNECW(x) ((((x) & 0xF))<< 8)
+
+#define _LRL(x) ((x) & 0x3F)
+#define _SRL(x) (((x) & 0x3F) << 8)
+
+#define _SIFS_CCK_CTX(x) ((x) & 0xFF)
+#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8);
+
+#define _SIFS_OFDM_CTX(x) ((x) & 0xFF)
+#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8);
+
+#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8)
+
+#define DIS_EDCA_CNT_DWN BIT(11)
+
+#define EN_MBSSID BIT(1)
+#define EN_TXBCN_RPT BIT(2)
+#define EN_BCN_FUNCTION BIT(3)
+
+#define TSFTR_RST BIT(0)
+#define TSFTR1_RST BIT(1)
+
+#define STOP_BCNQ BIT(6)
+
+#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4)
+#define DIS_TSF_UDT0_TEST_CHIP BIT(5)
+
+#define AcmHw_HwEn BIT(0)
+#define AcmHw_BeqEn BIT(1)
+#define AcmHw_ViqEn BIT(2)
+#define AcmHw_VoqEn BIT(3)
+#define AcmHw_BeqStatus BIT(4)
+#define AcmHw_ViqStatus BIT(5)
+#define AcmHw_VoqStatus BIT(6)
+
+#define APSDOFF BIT(6)
+#define APSDOFF_STATUS BIT(7)
+
+#define BW_20MHZ BIT(2)
+
+#define RATE_BITMAP_ALL 0xFFFFF
+
+#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1
+
+#define TSFRST BIT(0)
+#define DIS_GCLK BIT(1)
+#define PAD_SEL BIT(2)
+#define PWR_ST BIT(6)
+#define PWRBIT_OW_EN BIT(7)
+#define ACRC BIT(8)
+#define CFENDFORM BIT(9)
+#define ICV BIT(10)
+
+#define AAP BIT(0)
+#define APM BIT(1)
+#define AM BIT(2)
+#define AB BIT(3)
+#define ADD3 BIT(4)
+#define APWRMGT BIT(5)
+#define CBSSID BIT(6)
+#define CBSSID_DATA BIT(6)
+#define CBSSID_BCN BIT(7)
+#define ACRC32 BIT(8)
+#define AICV BIT(9)
+#define ADF BIT(11)
+#define ACF BIT(12)
+#define AMF BIT(13)
+#define HTC_LOC_CTRL BIT(14)
+#define UC_DATA_EN BIT(16)
+#define BM_DATA_EN BIT(17)
+#define MFBEN BIT(22)
+#define LSIGEN BIT(23)
+#define EnMBID BIT(24)
+#define APP_BASSN BIT(27)
+#define APP_PHYSTS BIT(28)
+#define APP_ICV BIT(29)
+#define APP_MIC BIT(30)
+#define APP_FCS BIT(31)
+
+#define _MIN_SPACE(x) ((x) & 0x7)
+#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3)
+
+#define RXERR_TYPE_OFDM_PPDU 0
+#define RXERR_TYPE_OFDM_FALSE_ALARM 1
+#define RXERR_TYPE_OFDM_MPDU_OK 2
+#define RXERR_TYPE_OFDM_MPDU_FAIL 3
+#define RXERR_TYPE_CCK_PPDU 4
+#define RXERR_TYPE_CCK_FALSE_ALARM 5
+#define RXERR_TYPE_CCK_MPDU_OK 6
+#define RXERR_TYPE_CCK_MPDU_FAIL 7
+#define RXERR_TYPE_HT_PPDU 8
+#define RXERR_TYPE_HT_FALSE_ALARM 9
+#define RXERR_TYPE_HT_MPDU_TOTAL 10
+#define RXERR_TYPE_HT_MPDU_OK 11
+#define RXERR_TYPE_HT_MPDU_FAIL 12
+#define RXERR_TYPE_RX_FULL_DROP 15
+
+#define RXERR_COUNTER_MASK 0xFFFFF
+#define RXERR_RPT_RST BIT(27)
+#define _RXERR_RPT_SEL(type) ((type) << 28)
+
+#define SCR_TxUseDK BIT(0)
+#define SCR_RxUseDK BIT(1)
+#define SCR_TxEncEnable BIT(2)
+#define SCR_RxDecEnable BIT(3)
+#define SCR_SKByA2 BIT(4)
+#define SCR_NoSKMC BIT(5)
+#define SCR_TXBCUSEDK BIT(6)
+#define SCR_RXBCUSEDK BIT(7)
+
+#define XCLK_VLD BIT(0)
+#define ACLK_VLD BIT(1)
+#define UCLK_VLD BIT(2)
+#define PCLK_VLD BIT(3)
+#define PCIRSTB BIT(4)
+#define V15_VLD BIT(5)
+#define TRP_B15V_EN BIT(7)
+#define SIC_IDLE BIT(8)
+#define BD_MAC2 BIT(9)
+#define BD_MAC1 BIT(10)
+#define IC_MACPHY_MODE BIT(11)
+#define BT_FUNC BIT(16)
+#define VENDOR_ID BIT(19)
+#define PAD_HWPD_IDN BIT(22)
+#define TRP_VAUX_EN BIT(23)
+#define TRP_BT_EN BIT(24)
+#define BD_PKG_SEL BIT(25)
+#define BD_HCI_SEL BIT(26)
+#define TYPE_ID BIT(27)
+
+#define USB_IS_HIGH_SPEED 0
+#define USB_IS_FULL_SPEED 1
+#define USB_SPEED_MASK BIT(5)
+
+#define USB_NORMAL_SIE_EP_MASK 0xF
+#define USB_NORMAL_SIE_EP_SHIFT 4
+
+#define USB_TEST_EP_MASK 0x30
+#define USB_TEST_EP_SHIFT 4
+
+#define USB_AGG_EN BIT(3)
+
+#define MAC_ADDR_LEN 6
+#define LAST_ENTRY_OF_TX_PKT_BUFFER 175/*255 88e*/
+
+#define POLLING_LLT_THRESHOLD 20
+#define POLLING_READY_TIMEOUT_COUNT 3000
+
+#define MAX_MSS_DENSITY_2T 0x13
+#define MAX_MSS_DENSITY_1T 0x0A
+
+#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6))
+#define EPROM_CMD_CONFIG 0x3
+#define EPROM_CMD_LOAD 1
+
+#define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE
+
+#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2)
+
+#define RA_LSSIWRITE_8821A 0xc90
+#define RB_LSSIWRITE_8821A 0xe90
+
+#define RA_PIREAD_8821A 0xd04
+#define RB_PIREAD_8821A 0xd44
+#define RA_SIREAD_8821A 0xd08
+#define RB_SIREAD_8821A 0xd48
+
+#define RPMAC_RESET 0x100
+#define RPMAC_TXSTART 0x104
+#define RPMAC_TXLEGACYSIG 0x108
+#define RPMAC_TXHTSIG1 0x10c
+#define RPMAC_TXHTSIG2 0x110
+#define RPMAC_PHYDEBUG 0x114
+#define RPMAC_TXPACKETNUM 0x118
+#define RPMAC_TXIDLE 0x11c
+#define RPMAC_TXMACHEADER0 0x120
+#define RPMAC_TXMACHEADER1 0x124
+#define RPMAC_TXMACHEADER2 0x128
+#define RPMAC_TXMACHEADER3 0x12c
+#define RPMAC_TXMACHEADER4 0x130
+#define RPMAC_TXMACHEADER5 0x134
+#define RPMAC_TXDADATYPE 0x138
+#define RPMAC_TXRANDOMSEED 0x13c
+#define RPMAC_CCKPLCPPREAMBLE 0x140
+#define RPMAC_CCKPLCPHEADER 0x144
+#define RPMAC_CCKCRC16 0x148
+#define RPMAC_OFDMRXCRC32OK 0x170
+#define RPMAC_OFDMRXCRC32Er 0x174
+#define RPMAC_OFDMRXPARITYER 0x178
+#define RPMAC_OFDMRXCRC8ER 0x17c
+#define RPMAC_CCKCRXRC16ER 0x180
+#define RPMAC_CCKCRXRC32ER 0x184
+#define RPMAC_CCKCRXRC32OK 0x188
+#define RPMAC_TXSTATUS 0x18c
+
+#define RFPGA0_RFMOD 0x800
+
+#define RFPGA0_TXINFO 0x804
+#define RFPGA0_PSDFUNCTION 0x808
+
+#define RFPGA0_TXGAINSTAGE 0x80c
+
+#define RFPGA0_RFTIMING1 0x810
+#define RFPGA0_RFTIMING2 0x814
+
+#define RFPGA0_XA_HSSIPARAMETER1 0x820
+#define RFPGA0_XA_HSSIPARAMETER2 0x824
+#define RFPGA0_XB_HSSIPARAMETER1 0x828
+#define RFPGA0_XB_HSSIPARAMETER2 0x82c
+#define RCCAONSEC 0x838
+
+#define RFPGA0_XA_LSSIPARAMETER 0x840
+#define RFPGA0_XB_LSSIPARAMETER 0x844
+#define RL1PEAKTH 0x848
+
+#define RFPGA0_RFWAKEUPPARAMETER 0x850
+#define RFPGA0_RFSLEEPUPPARAMETER 0x854
+
+#define RFPGA0_XAB_SWITCHCONTROL 0x858
+#define RFPGA0_XCD_SWITCHCONTROL 0x85c
+
+#define RFPGA0_XA_RFINTERFACEOE 0x860
+#define RFC_AREA 0x860
+#define RFPGA0_XB_RFINTERFACEOE 0x864
+
+#define RFPGA0_XAB_RFINTERFACESW 0x870
+#define RFPGA0_XCD_RFINTERFACESW 0x874
+
+#define rFPGA0_XAB_RFPARAMETER 0x878
+#define rFPGA0_XCD_RFPARAMETER 0x87c
+
+#define RFPGA0_ANALOGPARAMETER1 0x880
+#define RFPGA0_ANALOGPARAMETER2 0x884
+#define RFPGA0_ANALOGPARAMETER3 0x888
+#define RFPGA0_ANALOGPARAMETER4 0x88c
+
+#define RFPGA0_XA_LSSIREADBACK 0x8a0
+#define RFPGA0_XB_LSSIREADBACK 0x8a4
+#define RFPGA0_XC_LSSIREADBACK 0x8a8
+//#define RFPGA0_XD_LSSIREADBACK 0x8ac
+#define RRFMOD 0x8ac
+#define RHSSIREAD_8821AE 0x8b0
+
+#define RFPGA0_PSDREPORT 0x8b4
+#define TRANSCEIVEA_HSPI_READBACK 0x8b8
+#define TRANSCEIVEB_HSPI_READBACK 0x8bc
+//#define REG_SC_CNT 0x8c4
+#define RADC_BUF_CLK 0x8c4
+#define RFPGA0_XAB_RFINTERFACERB 0x8e0
+#define RFPGA0_XCD_RFINTERFACERB 0x8e4
+
+#define RFPGA1_RFMOD 0x900
+
+#define RFPGA1_TXBLOCK 0x904
+#define RFPGA1_DEBUGSELECT 0x908
+#define RFPGA1_TXINFO 0x90c
+
+#define RCCK_SYSTEM 0xa00
+#define BCCK_SYSTEM 0x10
+
+
+#define RCCK0_AFESETTING 0xa04
+#define RCCK0_CCA 0xa08
+
+#define RCCK0_RXAGC1 0xa0c
+#define RCCK0_RXAGC2 0xa10
+
+#define RCCK0_RXHP 0xa14
+
+#define RCCK0_DSPPARAMETER1 0xa18
+#define RCCK0_DSPPARAMETER2 0xa1c
+
+#define RCCK0_TXFILTER1 0xa20
+#define RCCK0_TXFILTER2 0xa24
+#define RCCK0_DEBUGPORT 0xa28
+#define RCCK0_FALSEALARMREPORT 0xa2c
+#define RCCK0_TRSSIREPORT 0xa50
+#define RCCK0_RXREPORT 0xa54
+#define RCCK0_FACOUNTERLOWER 0xa5c
+#define RCCK0_FACOUNTERUPPER 0xa58
+#define RCCK0_CCA_CNT 0xa60
+
+
+/* PageB(0xB00) */
+#define rPdp_AntA 0xb00
+#define rPdp_AntA_4 0xb04
+#define rPdp_AntA_8 0xb08
+#define rPdp_AntA_C 0xb0c
+#define rPdp_AntA_10 0xb10
+#define rPdp_AntA_14 0xb14
+#define rPdp_AntA_18 0xb18
+#define rPdp_AntA_1C 0xb1c
+#define rPdp_AntA_20 0xb20
+#define rPdp_AntA_24 0xb24
+
+#define rConfig_Pmpd_AntA 0xb28
+#define rConfig_ram64x16 0xb2c
+
+#define rBndA 0xb30
+#define rHssiPar 0xb34
+
+#define rConfig_AntA 0xb68
+#define rConfig_AntB 0xb6c
+
+#define rPdp_AntB 0xb70
+#define rPdp_AntB_4 0xb74
+#define rPdp_AntB_8 0xb78
+#define rPdp_AntB_C 0xb7c
+#define rPdp_AntB_10 0xb80
+#define rPdp_AntB_14 0xb84
+#define rPdp_AntB_18 0xb88
+#define rPdp_AntB_1C 0xb8c
+#define rPdp_AntB_20 0xb90
+#define rPdp_AntB_24 0xb94
+
+#define rConfig_Pmpd_AntB 0xb98
+
+#define rBndB 0xba0
+
+#define rAPK 0xbd8
+#define rPm_Rx0_AntA 0xbdc
+#define rPm_Rx1_AntA 0xbe0
+#define rPm_Rx2_AntA 0xbe4
+#define rPm_Rx3_AntA 0xbe8
+#define rPm_Rx0_AntB 0xbec
+#define rPm_Rx1_AntB 0xbf0
+#define rPm_Rx2_AntB 0xbf4
+#define rPm_Rx3_AntB 0xbf8
+
+/*RSSI Dump*/
+#define RA_RSSI_DUMP 0xBF0
+#define RB_RSSI_DUMP 0xBF1
+#define RS1_RX_EVM_DUMP 0xBF4
+#define RS2_RX_EVM_DUMP 0xBF5
+#define RA_RX_SNR_DUMP 0xBF6
+#define RB_RX_SNR_DUMP 0xBF7
+#define RA_CFO_SHORT_DUMP 0xBF8
+#define RB_CFO_SHORT_DUMP 0xBFA
+#define RA_CFO_LONG_DUMP 0xBEC
+#define RB_CFO_LONG_DUMP 0xBEE
+
+/*Page C*/
+#define ROFDM0_LSTF 0xc00
+
+#define ROFDM0_TRXPATHENABLE 0xc04
+#define ROFDM0_TRMUXPAR 0xc08
+#define ROFDM0_TRSWISOLATION 0xc0c
+
+#define ROFDM0_XARXAFE 0xc10
+#define ROFDM0_XARXIQIMBALANCE 0xc14
+#define ROFDM0_XBRXAFE 0xc18
+#define ROFDM0_XBRXIQIMBALANCE 0xc1c
+#define ROFDM0_XCRXAFE 0xc20
+#define ROFDM0_XCRXIQIMBANLANCE 0xc24
+#define ROFDM0_XDRXAFE 0xc28
+#define ROFDM0_XDRXIQIMBALANCE 0xc2c
+
+#define ROFDM0_RXDETECTOR1 0xc30
+#define ROFDM0_RXDETECTOR2 0xc34
+#define ROFDM0_RXDETECTOR3 0xc38
+#define ROFDM0_RXDETECTOR4 0xc3c
+
+#define ROFDM0_RXDSP 0xc40
+#define ROFDM0_CFOANDDAGC 0xc44
+#define ROFDM0_CCADROPTHRESHOLD 0xc48
+#define ROFDM0_ECCATHRESHOLD 0xc4c
+
+#define ROFDM0_XAAGCCORE1 0xc50
+#define ROFDM0_XAAGCCORE2 0xc54
+#define ROFDM0_XBAGCCORE1 0xc58
+#define ROFDM0_XBAGCCORE2 0xc5c
+#define ROFDM0_XCAGCCORE1 0xc60
+#define ROFDM0_XCAGCCORE2 0xc64
+#define ROFDM0_XDAGCCORE1 0xc68
+#define ROFDM0_XDAGCCORE2 0xc6c
+
+#define ROFDM0_AGCPARAMETER1 0xc70
+#define ROFDM0_AGCPARAMETER2 0xc74
+#define ROFDM0_AGCRSSITABLE 0xc78
+#define ROFDM0_HTSTFAGC 0xc7c
+
+#define ROFDM0_XATXIQIMBALANCE 0xc80
+#define ROFDM0_XATXAFE 0xc84
+#define ROFDM0_XBTXIQIMBALANCE 0xc88
+#define ROFDM0_XBTXAFE 0xc8c
+#define ROFDM0_XCTXIQIMBALANCE 0xc90
+#define ROFDM0_XCTXAFE 0xc94
+#define ROFDM0_XDTXIQIMBALANCE 0xc98
+#define ROFDM0_XDTXAFE 0xc9c
+
+#define ROFDM0_RXIQEXTANTA 0xca0
+#define ROFDM0_TXCOEFF1 0xca4
+#define ROFDM0_TXCOEFF2 0xca8
+#define ROFDM0_TXCOEFF3 0xcac
+#define ROFDM0_TXCOEFF4 0xcb0
+#define ROFDM0_TXCOEFF5 0xcb4
+#define ROFDM0_TXCOEFF6 0xcb8
+
+/*Path_A RFE cotrol */
+#define RA_RFE_CTRL_8812 0xcb8
+/*Path_B RFE control*/
+#define RB_RFE_CTRL_8812 0xeb8
+
+#define ROFDM0_RXHPPARAMETER 0xce0
+#define ROFDM0_TXPSEUDONOISEWGT 0xce4
+#define ROFDM0_FRAMESYNC 0xcf0
+#define ROFDM0_DFSREPORT 0xcf4
+
+
+#define ROFDM1_LSTF 0xd00
+#define ROFDM1_TRXPATHENABLE 0xd04
+
+#define ROFDM1_CF0 0xd08
+#define ROFDM1_CSI1 0xd10
+#define ROFDM1_SBD 0xd14
+#define ROFDM1_CSI2 0xd18
+#define ROFDM1_CFOTRACKING 0xd2c
+#define ROFDM1_TRXMESAURE1 0xd34
+#define ROFDM1_INTFDET 0xd3c
+#define ROFDM1_PSEUDONOISESTATEAB 0xd50
+#define ROFDM1_PSEUDONOISESTATECD 0xd54
+#define ROFDM1_RXPSEUDONOISEWGT 0xd58
+
+#define ROFDM_PHYCOUNTER1 0xda0
+#define ROFDM_PHYCOUNTER2 0xda4
+#define ROFDM_PHYCOUNTER3 0xda8
+
+#define ROFDM_SHORTCFOAB 0xdac
+#define ROFDM_SHORTCFOCD 0xdb0
+#define ROFDM_LONGCFOAB 0xdb4
+#define ROFDM_LONGCFOCD 0xdb8
+#define ROFDM_TAILCF0AB 0xdbc
+#define ROFDM_TAILCF0CD 0xdc0
+#define ROFDM_PWMEASURE1 0xdc4
+#define ROFDM_PWMEASURE2 0xdc8
+#define ROFDM_BWREPORT 0xdcc
+#define ROFDM_AGCREPORT 0xdd0
+#define ROFDM_RXSNR 0xdd4
+#define ROFDM_RXEVMCSI 0xdd8
+#define ROFDM_SIGREPORT 0xddc
+
+#define RTXAGC_A_CCK11_CCK1 0xc20
+#define RTXAGC_A_OFDM18_OFDM6 0xc24
+#define RTXAGC_A_OFDM54_OFDM24 0xc28
+#define RTXAGC_A_MCS03_MCS00 0xc2c
+#define RTXAGC_A_MCS07_MCS04 0xc30
+#define RTXAGC_A_MCS11_MCS08 0xc34
+#define RTXAGC_A_MCS15_MCS12 0xc38
+#define RTXAGC_A_NSS1INDEX3_NSS1INDEX0 0xc3c
+#define RTXAGC_A_NSS1INDEX7_NSS1INDEX4 0xc40
+#define RTXAGC_A_NSS2INDEX1_NSS1INDEX8 0xc44
+#define RTXAGC_A_NSS2INDEX5_NSS2INDEX2 0xc48
+#define RTXAGC_A_NSS2INDEX9_NSS2INDEX6 0xc4c
+#define RTXAGC_B_CCK11_CCK1 0xe20
+#define RTXAGC_B_OFDM18_OFDM6 0xe24
+#define RTXAGC_B_OFDM54_OFDM24 0xe28
+#define RTXAGC_B_MCS03_MCS00 0xe2c
+#define RTXAGC_B_MCS07_MCS04 0xe30
+#define RTXAGC_B_MCS11_MCS08 0xe34
+#define RTXAGC_B_MCS15_MCS12 0xe38
+#define RTXAGC_B_NSS1INDEX3_NSS1INDEX0 0xe3c
+#define RTXAGC_B_NSS1INDEX7_NSS1INDEX4 0xe40
+#define RTXAGC_B_NSS2INDEX1_NSS1INDEX8 0xe44
+#define RTXAGC_B_NSS2INDEX5_NSS2INDEX2 0xe48
+#define RTXAGC_B_NSS2INDEX9_NSS2INDEX6 0xe4c
+
+#define RA_TXPWRTRAING 0xc54
+#define RB_TXPWRTRAING 0xe54
+
+
+#define RFPGA0_IQK 0xe28
+#define RTx_IQK_Tone_A 0xe30
+#define RRx_IQK_Tone_A 0xe34
+#define RTx_IQK_PI_A 0xe38
+#define RRx_IQK_PI_A 0xe3c
+
+#define RTx_IQK 0xe40
+#define RRx_IQK 0xe44
+#define RIQK_AGC_Pts 0xe48
+#define RIQK_AGC_Rsp 0xe4c
+#define RTx_IQK_Tone_B 0xe50
+#define RRx_IQK_Tone_B 0xe54
+#define RTx_IQK_PI_B 0xe58
+#define RRx_IQK_PI_B 0xe5c
+#define RIQK_AGC_Cont 0xe60
+
+#define RBlue_Tooth 0xe6c
+#define RRx_Wait_CCA 0xe70
+#define RTx_CCK_RFON 0xe74
+#define RTx_CCK_BBON 0xe78
+#define RTx_OFDM_RFON 0xe7c
+#define RTx_OFDM_BBON 0xe80
+#define RTx_To_Rx 0xe84
+#define RTx_To_Tx 0xe88
+#define RRx_CCK 0xe8c
+
+#define RTx_Power_Before_IQK_A 0xe94
+#define RTx_Power_After_IQK_A 0xe9c
+
+#define RRx_Power_Before_IQK_A 0xea0
+#define RRx_Power_Before_IQK_A_2 0xea4
+#define RRx_Power_After_IQK_A 0xea8
+#define RRx_Power_After_IQK_A_2 0xeac
+
+#define RTx_Power_Before_IQK_B 0xeb4
+#define RTx_Power_After_IQK_B 0xebc
+
+#define RRx_Power_Before_IQK_B 0xec0
+#define RRx_Power_Before_IQK_B_2 0xec4
+#define RRx_Power_After_IQK_B 0xec8
+#define RRx_Power_After_IQK_B_2 0xecc
+
+#define RRx_OFDM 0xed0
+#define RRx_Wait_RIFS 0xed4
+#define RRx_TO_Rx 0xed8
+#define RStandby 0xedc
+#define RSleep 0xee0
+#define RPMPD_ANAEN 0xeec
+
+#define RZEBRA1_HSSIENABLE 0x0
+#define RZEBRA1_TRXENABLE1 0x1
+#define RZEBRA1_TRXENABLE2 0x2
+#define RZEBRA1_AGC 0x4
+#define RZEBRA1_CHARGEPUMP 0x5
+#define RZEBRA1_CHANNEL 0x7
+
+#define RZEBRA1_TXGAIN 0x8
+#define RZEBRA1_TXLPF 0x9
+#define RZEBRA1_RXLPF 0xb
+#define RZEBRA1_RXHPFCORNER 0xc
+
+#define RGLOBALCTRL 0
+#define RRTL8256_TXLPF 19
+#define RRTL8256_RXLPF 11
+#define RRTL8258_TXLPF 0x11
+#define RRTL8258_RXLPF 0x13
+#define RRTL8258_RSSILPF 0xa
+
+#define RF_AC 0x00
+
+#define RF_IQADJ_G1 0x01
+#define RF_IQADJ_G2 0x02
+#define RF_POW_TRSW 0x05
+
+#define RF_GAIN_RX 0x06
+#define RF_GAIN_TX 0x07
+
+#define RF_TXM_IDAC 0x08
+#define RF_BS_IQGEN 0x0F
+
+#define RF_MODE1 0x10
+#define RF_MODE2 0x11
+
+#define RF_RX_AGC_HP 0x12
+#define RF_TX_AGC 0x13
+#define RF_BIAS 0x14
+#define RF_IPA 0x15
+#define RF_POW_ABILITY 0x17
+#define RF_MODE_AG 0x18
+#define RRFCHANNEL 0x18
+#define RF_CHNLBW 0x18
+#define RF_TOP 0x19
+
+#define RF_RX_G1 0x1A
+#define RF_RX_G2 0x1B
+
+#define RF_RX_BB2 0x1C
+#define RF_RX_BB1 0x1D
+
+#define RF_RCK1 0x1E
+#define RF_RCK2 0x1F
+
+#define RF_TX_G1 0x20
+#define RF_TX_G2 0x21
+#define RF_TX_G3 0x22
+
+#define RF_TX_BB1 0x23
+#define RF_T_METER 0x24
+#define RF_T_METER_88E 0x42
+#define RF_T_METER_8812A 0x42
+
+#define RF_SYN_G1 0x25
+#define RF_SYN_G2 0x26
+#define RF_SYN_G3 0x27
+#define RF_SYN_G4 0x28
+#define RF_SYN_G5 0x29
+#define RF_SYN_G6 0x2A
+#define RF_SYN_G7 0x2B
+#define RF_SYN_G8 0x2C
+
+#define RF_RCK_OS 0x30
+#define RF_TXPA_G1 0x31
+#define RF_TXPA_G2 0x32
+#define RF_TXPA_G3 0x33
+
+#define RF_TX_BIAS_A 0x35
+#define RF_TX_BIAS_D 0x36
+#define RF_LOBF_9 0x38
+#define RF_RXRF_A3 0x3C
+#define RF_TRSW 0x3F
+
+#define RF_TXRF_A2 0x41
+#define RF_TXPA_G4 0x46
+#define RF_TXPA_A4 0x4B
+
+#define RF_APK 0x63
+
+#define RF_WE_LUT 0xEF
+
+#define BBBRESETB 0x100
+#define BGLOBALRESETB 0x200
+#define BOFDMTXSTART 0x4
+#define BCCKTXSTART 0x8
+#define BCRC32DEBUG 0x100
+#define BPMACLOOPBACK 0x10
+#define BTXLSIG 0xffffff
+#define BOFDMTXRATE 0xf
+#define BOFDMTXRESERVED 0x10
+#define BOFDMTXLENGTH 0x1ffe0
+#define BOFDMTXPARITY 0x20000
+#define BTXHTSIG1 0xffffff
+#define BTXHTMCSRATE 0x7f
+#define BTXHTBW 0x80
+#define BTXHTLENGTH 0xffff00
+#define BTXHTSIG2 0xffffff
+#define BTXHTSMOOTHING 0x1
+#define BTXHTSOUNDING 0x2
+#define BTXHTRESERVED 0x4
+#define BTXHTAGGREATION 0x8
+#define BTXHTSTBC 0x30
+#define BTXHTADVANCECODING 0x40
+#define BTXHTSHORTGI 0x80
+#define BTXHTNUMBERHT_LTF 0x300
+#define BTXHTCRC8 0x3fc00
+#define BCOUNTERRESET 0x10000
+#define BNUMOFOFDMTX 0xffff
+#define BNUMOFCCKTX 0xffff0000
+#define BTXIDLEINTERVAL 0xffff
+#define BOFDMSERVICE 0xffff0000
+#define BTXMACHEADER 0xffffffff
+#define BTXDATAINIT 0xff
+#define BTXHTMODE 0x100
+#define BTXDATATYPE 0x30000
+#define BTXRANDOMSEED 0xffffffff
+#define BCCKTXPREAMBLE 0x1
+#define BCCKTXSFD 0xffff0000
+#define BCCKTXSIG 0xff
+#define BCCKTXSERVICE 0xff00
+#define BCCKLENGTHEXT 0x8000
+#define BCCKTXLENGHT 0xffff0000
+#define BCCKTXCRC16 0xffff
+#define BCCKTXSTATUS 0x1
+#define BOFDMTXSTATUS 0x2
+#define IS_BB_REG_OFFSET_92S(_Offset) \
+ ((_Offset >= 0x800) && (_Offset <= 0xfff))
+
+#define BRFMOD 0x1
+#define BJAPANMODE 0x2
+#define BCCKTXSC 0x30
+/* Block & Path enable*/
+#define ROFDMCCKEN 0x808
+#define BCCKEN 0x10000000
+#define BOFDMEN 0x20000000
+#define RRXPATH 0x808 /* Rx antenna*/
+#define BRXPATH 0xff
+#define RTXPATH 0x80c /* Tx antenna*/
+#define BTXPATH 0x0fffffff
+#define RCCK_RX 0xa04 /* for cck rx path selection*/
+#define BCCK_RX 0x0c000000
+#define RVHTLEN_USE_LSIG 0x8c3 /* Use LSIG for VHT length*/
+
+
+#define BOFDMRXADCPHASE 0x10000
+#define BOFDMTXDACPHASE 0x40000
+#define BXATXAGC 0x3f
+
+#define BXBTXAGC 0xf00
+#define BXCTXAGC 0xf000
+#define BXDTXAGC 0xf0000
+
+#define BPASTART 0xf0000000
+#define BTRSTART 0x00f00000
+#define BRFSTART 0x0000f000
+#define BBBSTART 0x000000f0
+#define BBBCCKSTART 0x0000000f
+#define BPAEND 0xf
+#define BTREND 0x0f000000
+#define BRFEND 0x000f0000
+#define BCCAMASK 0x000000f0
+#define BR2RCCAMASK 0x00000f00
+#define BHSSI_R2TDELAY 0xf8000000
+#define BHSSI_T2RDELAY 0xf80000
+#define BCONTXHSSI 0x400
+#define BIGFROMCCK 0x200
+#define BAGCADDRESS 0x3f
+#define BRXHPTX 0x7000
+#define BRXHP2RX 0x38000
+#define BRXHPCCKINI 0xc0000
+#define BAGCTXCODE 0xc00000
+#define BAGCRXCODE 0x300000
+
+#define B3WIREDATALENGTH 0x800
+#define B3WIREADDREAALENGTH 0x400
+
+#define B3WIRERFPOWERDOWN 0x1
+#define B5GPAPEPOLARITY 0x40000000
+#define B2GPAPEPOLARITY 0x80000000
+#define BRFSW_TXDEFAULTANT 0x3
+#define BRFSW_TXOPTIONANT 0x30
+#define BRFSW_RXDEFAULTANT 0x300
+#define BRFSW_RXOPTIONANT 0x3000
+#define BRFSI_3WIREDATA 0x1
+#define BRFSI_3WIRECLOCK 0x2
+#define BRFSI_3WIRELOAD 0x4
+#define BRFSI_3WIRERW 0x8
+#define BRFSI_3WIRE 0xf
+
+#define BRFSI_RFENV 0x10
+
+#define BRFSI_TRSW 0x20
+#define BRFSI_TRSWB 0x40
+#define BRFSI_ANTSW 0x100
+#define BRFSI_ANTSWB 0x200
+#define BRFSI_PAPE 0x400
+#define BRFSI_PAPE5G 0x800
+#define BBANDSELECT 0x1
+#define BHTSIG2_GI 0x80
+#define BHTSIG2_SMOOTHING 0x01
+#define BHTSIG2_SOUNDING 0x02
+#define BHTSIG2_AGGREATON 0x08
+#define BHTSIG2_STBC 0x30
+#define BHTSIG2_ADVCODING 0x40
+#define BHTSIG2_NUMOFHTLTF 0x300
+#define BHTSIG2_CRC8 0x3fc
+#define BHTSIG1_MCS 0x7f
+#define BHTSIG1_BANDWIDTH 0x80
+#define BHTSIG1_HTLENGTH 0xffff
+#define BLSIG_RATE 0xf
+#define BLSIG_RESERVED 0x10
+#define BLSIG_LENGTH 0x1fffe
+#define BLSIG_PARITY 0x20
+#define BCCKRXPHASE 0x4
+
+#define BLSSIREADADDRESS 0x7f800000
+#define BLSSIREADEDGE 0x80000000
+
+#define BLSSIREADBACKDATA 0xfffff
+
+#define BLSSIREADOKFLAG 0x1000
+#define BCCKSAMPLERATE 0x8
+#define BREGULATOR0STANDBY 0x1
+#define BREGULATORPLLSTANDBY 0x2
+#define BREGULATOR1STANDBY 0x4
+#define BPLLPOWERUP 0x8
+#define BDPLLPOWERUP 0x10
+#define BDA10POWERUP 0x20
+#define BAD7POWERUP 0x200
+#define BDA6POWERUP 0x2000
+#define BXTALPOWERUP 0x4000
+#define B40MDCLKPOWERUP 0x8000
+#define BDA6DEBUGMODE 0x20000
+#define BDA6SWING 0x380000
+
+#define BADCLKPHASE 0x4000000
+#define B80MCLKDELAY 0x18000000
+#define BAFEWATCHDOGENABLE 0x20000000
+
+#define BXTALCAP01 0xc0000000
+#define BXTALCAP23 0x3
+#define BXTALCAP92X 0x0f000000
+#define BXTALCAP 0x0f000000
+
+#define BINTDIFCLKENABLE 0x400
+#define BEXTSIGCLKENABLE 0x800
+#define BBANDGAP_MBIAS_POWERUP 0x10000
+#define BAD11SH_GAIN 0xc0000
+#define BAD11NPUT_RANGE 0x700000
+#define BAD110P_CURRENT 0x3800000
+#define BLPATH_LOOPBACK 0x4000000
+#define BQPATH_LOOPBACK 0x8000000
+#define BAFE_LOOPBACK 0x10000000
+#define BDA10_SWING 0x7e0
+#define BDA10_REVERSE 0x800
+#define BDA_CLK_SOURCE 0x1000
+#define BDA7INPUT_RANGE 0x6000
+#define BDA7_GAIN 0x38000
+#define BDA7OUTPUT_CM_MODE 0x40000
+#define BDA7INPUT_CM_MODE 0x380000
+#define BDA7CURRENT 0xc00000
+#define BREGULATOR_ADJUST 0x7000000
+#define BAD11POWERUP_ATTX 0x1
+#define BDA10PS_ATTX 0x10
+#define BAD11POWERUP_ATRX 0x100
+#define BDA10PS_ATRX 0x1000
+#define BCCKRX_AGC_FORMAT 0x200
+#define BPSDFFT_SAMPLE_POINT 0xc000
+#define BPSD_AVERAGE_NUM 0x3000
+#define BIQPATH_CONTROL 0xc00
+#define BPSD_FREQ 0x3ff
+#define BPSD_ANTENNA_PATH 0x30
+#define BPSD_IQ_SWITCH 0x40
+#define BPSD_RX_TRIGGER 0x400000
+#define BPSD_TX_TRIGGER 0x80000000
+#define BPSD_SINE_TONE_SCALE 0x7f000000
+#define BPSD_REPORT 0xffff
+
+#define BOFDM_TXSC 0x30000000
+#define BCCK_TXON 0x1
+#define BOFDM_TXON 0x2
+#define BDEBUG_PAGE 0xfff
+#define BDEBUG_ITEM 0xff
+#define BANTL 0x10
+#define BANT_NONHT 0x100
+#define BANT_HT1 0x1000
+#define BANT_HT2 0x10000
+#define BANT_HT1S1 0x100000
+#define BANT_NONHTS1 0x1000000
+
+#define BCCK_BBMODE 0x3
+#define BCCK_TXPOWERSAVING 0x80
+#define BCCK_RXPOWERSAVING 0x40
+
+#define BCCK_SIDEBAND 0x10
+
+#define BCCK_SCRAMBLE 0x8
+#define BCCK_ANTDIVERSITY 0x8000
+#define BCCK_CARRIER_RECOVERY 0x4000
+#define BCCK_TXRATE 0x3000
+#define BCCK_DCCANCEL 0x0800
+#define BCCK_ISICANCEL 0x0400
+#define BCCK_MATCH_FILTER 0x0200
+#define BCCK_EQUALIZER 0x0100
+#define BCCK_PREAMBLE_DETECT 0x800000
+#define BCCK_FAST_FALSECCA 0x400000
+#define BCCK_CH_ESTSTART 0x300000
+#define BCCK_CCA_COUNT 0x080000
+#define BCCK_CS_LIM 0x070000
+#define BCCK_BIST_MODE 0x80000000
+#define BCCK_CCAMASK 0x40000000
+#define BCCK_TX_DAC_PHASE 0x4
+#define BCCK_RX_ADC_PHASE 0x20000000
+#define BCCKR_CP_MODE 0x0100
+#define BCCK_TXDC_OFFSET 0xf0
+#define BCCK_RXDC_OFFSET 0xf
+#define BCCK_CCA_MODE 0xc000
+#define BCCK_FALSECS_LIM 0x3f00
+#define BCCK_CS_RATIO 0xc00000
+#define BCCK_CORGBIT_SEL 0x300000
+#define BCCK_PD_LIM 0x0f0000
+#define BCCK_NEWCCA 0x80000000
+#define BCCK_RXHP_OF_IG 0x8000
+#define BCCK_RXIG 0x7f00
+#define BCCK_LNA_POLARITY 0x800000
+#define BCCK_RX1ST_BAIN 0x7f0000
+#define BCCK_RF_EXTEND 0x20000000
+#define BCCK_RXAGC_SATLEVEL 0x1f000000
+#define BCCK_RXAGC_SATCOUNT 0xe0
+#define bCCKRxRFSettle 0x1f
+#define BCCK_FIXED_RXAGC 0x8000
+#define BCCK_ANTENNA_POLARITY 0x2000
+#define BCCK_TXFILTER_TYPE 0x0c00
+#define BCCK_RXAGC_REPORTTYPE 0x0300
+#define BCCK_RXDAGC_EN 0x80000000
+#define BCCK_RXDAGC_PERIOD 0x20000000
+#define BCCK_RXDAGC_SATLEVEL 0x1f000000
+#define BCCK_TIMING_RECOVERY 0x800000
+#define BCCK_TXC0 0x3f0000
+#define BCCK_TXC1 0x3f000000
+#define BCCK_TXC2 0x3f
+#define BCCK_TXC3 0x3f00
+#define BCCK_TXC4 0x3f0000
+#define BCCK_TXC5 0x3f000000
+#define BCCK_TXC6 0x3f
+#define BCCK_TXC7 0x3f00
+#define BCCK_DEBUGPORT 0xff0000
+#define BCCK_DAC_DEBUG 0x0f000000
+#define BCCK_FALSEALARM_ENABLE 0x8000
+#define BCCK_FALSEALARM_READ 0x4000
+#define BCCK_TRSSI 0x7f
+#define BCCK_RXAGC_REPORT 0xfe
+#define BCCK_RXREPORT_ANTSEL 0x80000000
+#define BCCK_RXREPORT_MFOFF 0x40000000
+#define BCCK_RXREPORT_SQLOSS 0x20000000
+#define BCCK_RXREPORT_PKTLOSS 0x10000000
+#define BCCK_RXREPORT_LOCKEDBIT 0x08000000
+#define BCCK_RXREPORT_RATEERROR 0x04000000
+#define BCCK_RXREPORT_RXRATE 0x03000000
+#define BCCK_RXFA_COUNTER_LOWER 0xff
+#define BCCK_RXFA_COUNTER_UPPER 0xff000000
+#define BCCK_RXHPAGC_START 0xe000
+#define BCCK_RXHPAGC_FINAL 0x1c00
+#define BCCK_RXFALSEALARM_ENABLE 0x8000
+#define BCCK_FACOUNTER_FREEZE 0x4000
+#define BCCK_TXPATH_SEL 0x10000000
+#define BCCK_DEFAULT_RXPATH 0xc000000
+#define BCCK_OPTION_RXPATH 0x3000000
+
+#define BNUM_OFSTF 0x3
+#define BSHIFT_L 0xc0
+#define BGI_TH 0xc
+#define BRXPATH_A 0x1
+#define BRXPATH_B 0x2
+#define BRXPATH_C 0x4
+#define BRXPATH_D 0x8
+#define BTXPATH_A 0x1
+#define BTXPATH_B 0x2
+#define BTXPATH_C 0x4
+#define BTXPATH_D 0x8
+#define BTRSSI_FREQ 0x200
+#define BADC_BACKOFF 0x3000
+#define BDFIR_BACKOFF 0xc000
+#define BTRSSI_LATCH_PHASE 0x10000
+#define BRX_LDC_OFFSET 0xff
+#define BRX_QDC_OFFSET 0xff00
+#define BRX_DFIR_MODE 0x1800000
+#define BRX_DCNF_TYPE 0xe000000
+#define BRXIQIMB_A 0x3ff
+#define BRXIQIMB_B 0xfc00
+#define BRXIQIMB_C 0x3f0000
+#define BRXIQIMB_D 0xffc00000
+#define BDC_DC_NOTCH 0x60000
+#define BRXNB_NOTCH 0x1f000000
+#define BPD_TH 0xf
+#define BPD_TH_OPT2 0xc000
+#define BPWED_TH 0x700
+#define BIFMF_WIN_L 0x800
+#define BPD_OPTION 0x1000
+#define BMF_WIN_L 0xe000
+#define BBW_SEARCH_L 0x30000
+#define BWIN_ENH_L 0xc0000
+#define BBW_TH 0x700000
+#define BED_TH2 0x3800000
+#define BBW_OPTION 0x4000000
+#define BRADIO_TH 0x18000000
+#define BWINDOW_L 0xe0000000
+#define BSBD_OPTION 0x1
+#define BFRAME_TH 0x1c
+#define BFS_OPTION 0x60
+#define BDC_SLOPE_CHECK 0x80
+#define BFGUARD_COUNTER_DC_L 0xe00
+#define BFRAME_WEIGHT_SHORT 0x7000
+#define BSUB_TUNE 0xe00000
+#define BFRAME_DC_LENGTH 0xe000000
+#define BSBD_START_OFFSET 0x30000000
+#define BFRAME_TH_2 0x7
+#define BFRAME_GI2_TH 0x38
+#define BGI2_SYNC_EN 0x40
+#define BSARCH_SHORT_EARLY 0x300
+#define BSARCH_SHORT_LATE 0xc00
+#define BSARCH_GI2_LATE 0x70000
+#define BCFOANTSUM 0x1
+#define BCFOACC 0x2
+#define BCFOSTARTOFFSET 0xc
+#define BCFOLOOPBACK 0x70
+#define BCFOSUMWEIGHT 0x80
+#define BDAGCENABLE 0x10000
+#define BTXIQIMB_A 0x3ff
+#define BTXIQIMB_b 0xfc00
+#define BTXIQIMB_C 0x3f0000
+#define BTXIQIMB_D 0xffc00000
+#define BTXIDCOFFSET 0xff
+#define BTXIQDCOFFSET 0xff00
+#define BTXDFIRMODE 0x10000
+#define BTXPESUDO_NOISEON 0x4000000
+#define BTXPESUDO_NOISE_A 0xff
+#define BTXPESUDO_NOISE_B 0xff00
+#define BTXPESUDO_NOISE_C 0xff0000
+#define BTXPESUDO_NOISE_D 0xff000000
+#define BCCA_DROPOPTION 0x20000
+#define BCCA_DROPTHRES 0xfff00000
+#define BEDCCA_H 0xf
+#define BEDCCA_L 0xf0
+#define BLAMBDA_ED 0x300
+#define BRX_INITIALGAIN 0x7f
+#define BRX_ANTDIV_EN 0x80
+#define BRX_AGC_ADDRESS_FOR_LNA 0x7f00
+#define BRX_HIGHPOWER_FLOW 0x8000
+#define BRX_AGC_FREEZE_THRES 0xc0000
+#define BRX_FREEZESTEP_AGC1 0x300000
+#define BRX_FREEZESTEP_AGC2 0xc00000
+#define BRX_FREEZESTEP_AGC3 0x3000000
+#define BRX_FREEZESTEP_AGC0 0xc000000
+#define BRXRSSI_CMP_EN 0x10000000
+#define BRXQUICK_AGCEN 0x20000000
+#define BRXAGC_FREEZE_THRES_MODE 0x40000000
+#define BRX_OVERFLOW_CHECKTYPE 0x80000000
+#define BRX_AGCSHIFT 0x7f
+#define BTRSW_TRI_ONLY 0x80
+#define BPOWER_THRES 0x300
+#define BRXAGC_EN 0x1
+#define BRXAGC_TOGETHER_EN 0x2
+#define BRXAGC_MIN 0x4
+#define BRXHP_INI 0x7
+#define BRXHP_TRLNA 0x70
+#define BRXHP_RSSI 0x700
+#define BRXHP_BBP1 0x7000
+#define BRXHP_BBP2 0x70000
+#define BRXHP_BBP3 0x700000
+#define BRSSI_H 0x7f0000
+#define BRSSI_GEN 0x7f000000
+#define BRXSETTLE_TRSW 0x7
+#define BRXSETTLE_LNA 0x38
+#define BRXSETTLE_RSSI 0x1c0
+#define BRXSETTLE_BBP 0xe00
+#define BRXSETTLE_RXHP 0x7000
+#define BRXSETTLE_ANTSW_RSSI 0x38000
+#define BRXSETTLE_ANTSW 0xc0000
+#define BRXPROCESS_TIME_DAGC 0x300000
+#define BRXSETTLE_HSSI 0x400000
+#define BRXPROCESS_TIME_BBPPW 0x800000
+#define BRXANTENNA_POWER_SHIFT 0x3000000
+#define BRSSI_TABLE_SELECT 0xc000000
+#define BRXHP_FINAL 0x7000000
+#define BRXHPSETTLE_BBP 0x7
+#define BRXHTSETTLE_HSSI 0x8
+#define BRXHTSETTLE_RXHP 0x70
+#define BRXHTSETTLE_BBPPW 0x80
+#define BRXHTSETTLE_IDLE 0x300
+#define BRXHTSETTLE_RESERVED 0x1c00
+#define BRXHT_RXHP_EN 0x8000
+#define BRXAGC_FREEZE_THRES 0x30000
+#define BRXAGC_TOGETHEREN 0x40000
+#define BRXHTAGC_MIN 0x80000
+#define BRXHTAGC_EN 0x100000
+#define BRXHTDAGC_EN 0x200000
+#define BRXHT_RXHP_BBP 0x1c00000
+#define BRXHT_RXHP_FINAL 0xe0000000
+#define BRXPW_RADIO_TH 0x3
+#define BRXPW_RADIO_EN 0x4
+#define BRXMF_HOLD 0x3800
+#define BRXPD_DELAY_TH1 0x38
+#define BRXPD_DELAY_TH2 0x1c0
+#define BRXPD_DC_COUNT_MAX 0x600
+#define BRXPD_DELAY_TH 0x8000
+#define BRXPROCESS_DELAY 0xf0000
+#define BRXSEARCHRANGE_GI2_EARLY 0x700000
+#define BRXFRAME_FUARD_COUNTER_L 0x3800000
+#define BRXSGI_GUARD_L 0xc000000
+#define BRXSGI_SEARCH_L 0x30000000
+#define BRXSGI_TH 0xc0000000
+#define BDFSCNT0 0xff
+#define BDFSCNT1 0xff00
+#define BDFSFLAG 0xf0000
+#define BMF_WEIGHT_SUM 0x300000
+#define BMINIDX_TH 0x7f000000
+#define BDAFORMAT 0x40000
+#define BTXCH_EMU_ENABLE 0x01000000
+#define BTRSW_ISOLATION_A 0x7f
+#define BTRSW_ISOLATION_B 0x7f00
+#define BTRSW_ISOLATION_C 0x7f0000
+#define BTRSW_ISOLATION_D 0x7f000000
+#define BEXT_LNA_GAIN 0x7c00
+
+#define BSTBC_EN 0x4
+#define BANTENNA_MAPPING 0x10
+#define BNSS 0x20
+#define BCFO_ANTSUM_ID 0x200
+#define BPHY_COUNTER_RESET 0x8000000
+#define BCFO_REPORT_GET 0x4000000
+#define BOFDM_CONTINUE_TX 0x10000000
+#define BOFDM_SINGLE_CARRIER 0x20000000
+#define BOFDM_SINGLE_TONE 0x40000000
+#define BHT_DETECT 0x100
+#define BCFOEN 0x10000
+#define BCFOVALUE 0xfff00000
+#define BSIGTONE_RE 0x3f
+#define BSIGTONE_IM 0x7f00
+#define BCOUNTER_CCA 0xffff
+#define BCOUNTER_PARITYFAIL 0xffff0000
+#define BCOUNTER_RATEILLEGAL 0xffff
+#define BCOUNTER_CRC8FAIL 0xffff0000
+#define BCOUNTER_MCSNOSUPPORT 0xffff
+#define BCOUNTER_FASTSYNC 0xffff
+#define BSHORTCFO 0xfff
+#define BSHORTCFOT_LENGTH 12
+#define BSHORTCFOF_LENGTH 11
+#define BLONGCFO 0x7ff
+#define BLONGCFOT_LENGTH 11
+#define BLONGCFOF_LENGTH 11
+#define BTAILCFO 0x1fff
+#define BTAILCFOT_LENGTH 13
+#define BTAILCFOF_LENGTH 12
+#define BNOISE_EN_PWDB 0xffff
+#define BCC_POWER_DB 0xffff0000
+#define BMOISE_PWDB 0xffff
+#define BPOWERMEAST_LENGTH 10
+#define BPOWERMEASF_LENGTH 3
+#define BRX_HT_BW 0x1
+#define BRXSC 0x6
+#define BRX_HT 0x8
+#define BNB_INTF_DET_ON 0x1
+#define BINTF_WIN_LEN_CFG 0x30
+#define BNB_INTF_TH_CFG 0x1c0
+#define BRFGAIN 0x3f
+#define BTABLESEL 0x40
+#define BTRSW 0x80
+#define BRXSNR_A 0xff
+#define BRXSNR_B 0xff00
+#define BRXSNR_C 0xff0000
+#define BRXSNR_D 0xff000000
+#define BSNR_EVMT_LENGTH 8
+#define BSNR_EVMF_LENGTH 1
+#define BCSI1ST 0xff
+#define BCSI2ND 0xff00
+#define BRXEVM1ST 0xff0000
+#define BRXEVM2ND 0xff000000
+#define BSIGEVM 0xff
+#define BPWDB 0xff00
+#define BSGIEN 0x10000
+
+#define BSFACTOR_QMA1 0xf
+#define BSFACTOR_QMA2 0xf0
+#define BSFACTOR_QMA3 0xf00
+#define BSFACTOR_QMA4 0xf000
+#define BSFACTOR_QMA5 0xf0000
+#define BSFACTOR_QMA6 0xf0000
+#define BSFACTOR_QMA7 0xf00000
+#define BSFACTOR_QMA8 0xf000000
+#define BSFACTOR_QMA9 0xf0000000
+#define BCSI_SCHEME 0x100000
+
+#define BNOISE_LVL_TOP_SET 0x3
+#define BCHSMOOTH 0x4
+#define BCHSMOOTH_CFG1 0x38
+#define BCHSMOOTH_CFG2 0x1c0
+#define BCHSMOOTH_CFG3 0xe00
+#define BCHSMOOTH_CFG4 0x7000
+#define BMRCMODE 0x800000
+#define BTHEVMCFG 0x7000000
+
+#define BLOOP_FIT_TYPE 0x1
+#define BUPD_CFO 0x40
+#define BUPD_CFO_OFFDATA 0x80
+#define BADV_UPD_CFO 0x100
+#define BADV_TIME_CTRL 0x800
+#define BUPD_CLKO 0x1000
+#define BFC 0x6000
+#define BTRACKING_MODE 0x8000
+#define BPHCMP_ENABLE 0x10000
+#define BUPD_CLKO_LTF 0x20000
+#define BCOM_CH_CFO 0x40000
+#define BCSI_ESTI_MODE 0x80000
+#define BADV_UPD_EQZ 0x100000
+#define BUCHCFG 0x7000000
+#define BUPDEQZ 0x8000000
+
+#define BRX_PESUDO_NOISE_ON 0x20000000
+#define BRX_PESUDO_NOISE_A 0xff
+#define BRX_PESUDO_NOISE_B 0xff00
+#define BRX_PESUDO_NOISE_C 0xff0000
+#define BRX_PESUDO_NOISE_D 0xff000000
+#define BRX_PESUDO_NOISESTATE_A 0xffff
+#define BRX_PESUDO_NOISESTATE_B 0xffff0000
+#define BRX_PESUDO_NOISESTATE_C 0xffff
+#define BRX_PESUDO_NOISESTATE_D 0xffff0000
+
+#define BZEBRA1_HSSIENABLE 0x8
+#define BZEBRA1_TRXCONTROL 0xc00
+#define BZEBRA1_TRXGAINSETTING 0x07f
+#define BZEBRA1_RXCOUNTER 0xc00
+#define BZEBRA1_TXCHANGEPUMP 0x38
+#define BZEBRA1_RXCHANGEPUMP 0x7
+#define BZEBRA1_CHANNEL_NUM 0xf80
+#define BZEBRA1_TXLPFBW 0x400
+#define BZEBRA1_RXLPFBW 0x600
+
+#define BRTL8256REG_MODE_CTRL1 0x100
+#define BRTL8256REG_MODE_CTRL0 0x40
+#define BRTL8256REG_TXLPFBW 0x18
+#define BRTL8256REG_RXLPFBW 0x600
+
+#define BRTL8258_TXLPFBW 0xc
+#define BRTL8258_RXLPFBW 0xc00
+#define BRTL8258_RSSILPFBW 0xc0
+
+#define BBYTE0 0x1
+#define BBYTE1 0x2
+#define BBYTE2 0x4
+#define BBYTE3 0x8
+#define BWORD0 0x3
+#define BWORD1 0xc
+#define BWORD 0xf
+
+#define MASKBYTE0 0xff
+#define MASKBYTE1 0xff00
+#define MASKBYTE2 0xff0000
+#define MASKBYTE3 0xff000000
+#define MASKHWORD 0xffff0000
+#define MASKLWORD 0x0000ffff
+#define MASKDWORD 0xffffffff
+#define MASK12BITS 0xfff
+#define MASKH4BITS 0xf0000000
+#define MASKOFDM_D 0xffc00000
+#define MASKCCK 0x3f3f3f3f
+
+#define MASK4BITS 0x0f
+#define MASK20BITS 0xfffff
+#define RFREG_OFFSET_MASK 0xfffff
+
+#define BENABLE 0x1
+#define BDISABLE 0x0
+
+#define LEFT_ANTENNA 0x0
+#define RIGHT_ANTENNA 0x1
+
+#define TCHECK_TXSTATUS 500
+#define TUPDATE_RXCOUNTER 100
+
+#define REG_UN_used_register 0x01bf
+
+/* WOL bit information */
+#define HAL92C_WOL_PTK_UPDATE_EVENT BIT(0)
+#define HAL92C_WOL_GTK_UPDATE_EVENT BIT(1)
+#define HAL92C_WOL_DISASSOC_EVENT BIT(2)
+#define HAL92C_WOL_DEAUTH_EVENT BIT(3)
+#define HAL92C_WOL_FW_DISCONNECT_EVENT BIT(4)
+
+#define WOL_REASON_PTK_UPDATE BIT(0)
+#define WOL_REASON_GTK_UPDATE BIT(1)
+#define WOL_REASON_DISASSOC BIT(2)
+#define WOL_REASON_DEAUTH BIT(3)
+#define WOL_REASON_FW_DISCONNECT BIT(4)
+
+#define RA_RFE_PINMUX 0xcb0 /* Path_A RFE cotrol pinmux*/
+#define RB_RFE_PINMUX 0xeb0 /* Path_B RFE control pinmux*/
+
+#define RA_RFE_INV 0xcb4
+#define RB_RFE_INV 0xeb4
+
+/* RXIQC */
+#define RA_RXIQC_AB 0xc10 /*RxIQ imblance matrix coeff. A & B*/
+#define RA_RXIQC_CD 0xc14 /*RxIQ imblance matrix coeff. C & D*/
+#define RA_TXSCALE 0xc1c /* Pah_A TX scaling factor*/
+#define RB_TXSCALE 0xe1c /* Path_B TX scaling factor*/
+#define RB_RXIQC_AB 0xe10 /*RxIQ imblance matrix coeff. A & B*/
+#define RB_RXIQC_CD 0xe14 /*RxIQ imblance matrix coeff. C & D*/
+#define RXIQC_AC 0x02ff /*bit mask for IQC matrix element A & C*/
+#define RXIQC_BD 0x02ff0000 /*bit mask for IQC matrix element A & C*/
+
+/* 2 EFUSE_TEST (For RTL8723 partially) */
+#define EFUSE_SEL(x) (((x) & 0x3) << 8)
+#define EFUSE_SEL_MASK 0x300
+#define EFUSE_WIFI_SEL_0 0x0
+
+/*REG_MULTI_FUNC_CTRL(For RTL8723 Only)*/
+#define WL_HWPDN_EN BIT(0) /* Enable GPIO[9] as WiFi HW PDn source*/
+#define WL_HWPDN_SL BIT(1) /* WiFi HW PDn polarity control*/
+#define WL_FUNC_EN BIT(2) // WiFi function enable
+#define WL_HWROF_EN BIT(3) // Enable GPIO[9] as WiFi RF HW PDn source
+#define BT_HWPDN_EN BIT(16) // Enable GPIO[11] as BT HW PDn source
+#define BT_HWPDN_SL BIT(17) // BT HW PDn polarity control
+#define BT_FUNC_EN BIT(18) // BT function enable
+#define BT_HWROF_EN BIT(19) // Enable GPIO[11] as BT/GPS RF HW PDn source
+#define GPS_HWPDN_EN BIT(20) // Enable GPIO[10] as GPS HW PDn source
+#define GPS_HWPDN_SL BIT(21) // GPS HW PDn polarity control
+#define GPS_FUNC_EN BIT(22) // GPS function enable
+
+
+#define BMASKBYTE0 0xff
+#define BMASKBYTE1 0xff00
+#define BMASKBYTE2 0xff0000
+#define BMASKBYTE3 0xff000000
+#define BMASKHWORD 0xffff0000
+#define BMASKLWORD 0x0000ffff
+#define BMASKDWORD 0xffffffff
+#define BMASK12BITS 0xfff
+#define BMASKH4BITS 0xf0000000
+#define BMASKOFDM_D 0xffc00000
+#define BMASKCCK 0x3f3f3f3f
+
+#define BRFREGOFFSETMASK 0xfffff
+
+#define ODM_REG_CCK_RPT_FORMAT_11AC 0x804
+#define ODM_REG_BB_RX_PATH_11AC 0x808
+/*PAGE 9*/
+#define ODM_REG_OFDM_FA_RST_11AC 0x9A4
+/*PAGE A*/
+#define ODM_REG_CCK_CCA_11AC 0xA0A
+#define ODM_REG_CCK_FA_RST_11AC 0xA2C
+#define ODM_REG_CCK_FA_11AC 0xA5C
+/*PAGE C*/
+#define ODM_REG_IGI_A_11AC 0xC50
+/*PAGE E*/
+#define ODM_REG_IGI_B_11AC 0xE50
+/*PAGE F*/
+#define ODM_REG_OFDM_FA_11AC 0xF48
+
+
+//2 MAC REG LIST
+
+
+
+
+//DIG Related
+#define ODM_BIT_IGI_11AC 0xFFFFFFFF
+#define ODM_BIT_CCK_RPT_FORMAT_11AC BIT16
+#define ODM_BIT_BB_RX_PATH_11AC 0xF
+
+typedef enum AGGRE_SIZE{
+ HT_AGG_SIZE_8K = 0,
+ HT_AGG_SIZE_16K = 1,
+ HT_AGG_SIZE_32K = 2,
+ HT_AGG_SIZE_64K = 3,
+ VHT_AGG_SIZE_128K = 4,
+ VHT_AGG_SIZE_256K = 5,
+ VHT_AGG_SIZE_512K = 6,
+ VHT_AGG_SIZE_1024K = 7,
+}AGGRE_SIZE_E, *PAGGRE_SIZE_E;
+
+#define REG_AMPDU_MAX_LENGTH_8812 0x0458
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/rf.c b/drivers/staging/rtl8821ae/rtl8821ae/rf.c
new file mode 100644
index 000000000000..87c1c9746c43
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/rf.c
@@ -0,0 +1,464 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+
+static bool _rtl8821ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
+
+void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (bandwidth) {
+ case HT_CHANNEL_WIDTH_20:
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 3);
+ rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 3);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 1);
+ rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 1);
+ break;
+ case HT_CHANNEL_WIDTH_80:
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 0);
+ rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 0);
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("unknown bandwidth: %#X\n", bandwidth));
+ break;
+ }
+}
+
+void rtl8821ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u32 tx_agc[2] = {0, 0}, tmpval;
+ bool turbo_scanoff = false;
+ u8 idx1, idx2;
+ u8 *ptr;
+ u8 direction;
+ u32 pwrtrac_value;
+
+ if (rtlefuse->eeprom_regulatory != 0)
+ turbo_scanoff = true;
+
+ if (mac->act_scanning == true) {
+ tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
+ tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
+
+ if (turbo_scanoff) {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ }
+ }
+ } else {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ }
+
+ if (rtlefuse->eeprom_regulatory == 0) {
+ tmpval =
+ (rtlphy->mcs_txpwrlevel_origoffset[0][6]) +
+ (rtlphy->mcs_txpwrlevel_origoffset[0][7] <<
+ 8);
+ tx_agc[RF90_PATH_A] += tmpval;
+
+ tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) +
+ (rtlphy->mcs_txpwrlevel_origoffset[0][15] <<
+ 24);
+ tx_agc[RF90_PATH_B] += tmpval;
+ }
+ }
+
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ ptr = (u8 *) (&(tx_agc[idx1]));
+ for (idx2 = 0; idx2 < 4; idx2++) {
+ if (*ptr > RF6052_MAX_TX_PWR)
+ *ptr = RF6052_MAX_TX_PWR;
+ ptr++;
+ }
+ }
+ rtl8821ae_dm_txpower_track_adjust(hw,1,&direction,&pwrtrac_value);
+ if (direction ==1){
+ tx_agc[0] += pwrtrac_value;
+ tx_agc[1] += pwrtrac_value;
+ } else if (direction == 2){
+ tx_agc[0] -= pwrtrac_value;
+ tx_agc[1] -= pwrtrac_value;
+ }
+ tmpval = tx_agc[RF90_PATH_A] ;
+ rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKDWORD, tmpval);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 1~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_A_CCK11_CCK1));
+
+ tmpval = tx_agc[RF90_PATH_B] ;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKDWORD, tmpval);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK11_CCK1));
+}
+
+static void rtl8821ae_phy_get_power_base(struct ieee80211_hw *hw,
+ u8 *ppowerlevel_ofdm, u8 *ppowerlevel_bw20, u8 *ppowerlevel_bw40, u8 channel,
+ u32 *ofdmbase, u32 *mcsbase)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 powerBase0, powerBase1;
+ u8 i, powerlevel[2];
+
+ for (i = 0; i < 2; i++) {
+ powerBase0 = ppowerlevel_ofdm[i];
+
+ powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) |
+ (powerBase0 << 8) | powerBase0;
+ *(ofdmbase + i) = powerBase0;
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ (" [OFDM power base index rf(%c) = 0x%x]\n",
+ ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)));
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
+ powerlevel[i] = ppowerlevel_bw20[i];
+ }else{
+ powerlevel[i] = ppowerlevel_bw40[i];
+ }
+ powerBase1 = powerlevel[i];
+ powerBase1 = (powerBase1 << 24) |
+ (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
+
+ *(mcsbase + i) = powerBase1;
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ (" [MCS power base index rf(%c) = 0x%x]\n",
+ ((i == 0) ? 'A' : 'B'), *(mcsbase + i)));
+ }
+}
+
+static void _rtl8821ae_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
+ u8 channel, u8 index,
+ u32 *powerBase0,
+ u32 *powerBase1,
+ u32 *p_outwriteval)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 i, chnlgroup = 0, pwr_diff_limit[4],pwr_diff = 0,customer_pwr_diff;
+ u32 writeVal, customer_limit, rf;
+
+ for (rf = 0; rf < 2; rf++) {
+ switch (rtlefuse->eeprom_regulatory) {
+ case 0:
+ chnlgroup = 0;
+
+ writeVal =
+ rtlphy->mcs_txpwrlevel_origoffset[chnlgroup][index +
+ (rf ? 8 : 0)]
+ + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("RTK better performance, "
+ "writeVal(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ case 1:
+ if (rtlphy->pwrgroup_cnt == 1)
+ chnlgroup = 0;
+ else {
+ if(channel<3)
+ chnlgroup = 0;
+ else if (channel <6)
+ chnlgroup = 1;
+ else if (channel <9)
+ chnlgroup = 2;
+ else if (channel <12)
+ chnlgroup = 3;
+ else if (channel < 14)
+ chnlgroup = 4;
+ else if (channel == 14)
+ chnlgroup = 5;
+ }
+
+ writeVal =
+ rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
+ [index + (rf ? 8 : 0)] + ((index < 2) ?
+ powerBase0[rf] :
+ powerBase1[rf]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Realtek regulatory, 20MHz, "
+ "writeVal(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+
+ break;
+ case 2:
+ writeVal =
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Better regulatory, "
+ "writeVal(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ case 3:
+ chnlgroup = 0;
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("customer's limit, 40MHz "
+ "rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'),
+ rtlefuse->pwrgroup_ht40[rf][channel -
+ 1]));
+ } else {
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("customer's limit, 20MHz "
+ "rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'),
+ rtlefuse->pwrgroup_ht20[rf][channel -
+ 1]));
+ }
+
+ if (index < 2)
+ pwr_diff = rtlefuse->txpwr_legacyhtdiff[rf][channel-1];
+ else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20)
+ pwr_diff = rtlefuse->txpwr_ht20diff[rf][channel-1];
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40)
+ customer_pwr_diff = rtlefuse->pwrgroup_ht40[rf][channel-1];
+ else
+ customer_pwr_diff = rtlefuse->pwrgroup_ht20[rf][channel-1];
+
+ if (pwr_diff > customer_pwr_diff)
+ pwr_diff = 0;
+ else
+ pwr_diff = customer_pwr_diff - pwr_diff;
+
+ for (i = 0; i < 4; i++) {
+ pwr_diff_limit[i] =
+ (u8) ((rtlphy->mcs_txpwrlevel_origoffset
+ [chnlgroup][index + (rf ? 8 : 0)] & (0x7f <<
+ (i * 8))) >> (i * 8));
+
+ if(pwr_diff_limit[i] > pwr_diff)
+ pwr_diff_limit[i] = pwr_diff;
+ }
+
+ customer_limit = (pwr_diff_limit[3] << 24) |
+ (pwr_diff_limit[2] << 16) |
+ (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Customer's limit rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), customer_limit));
+
+ writeVal = customer_limit +
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Customer, writeVal rf(%c)= 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ default:
+ chnlgroup = 0;
+ writeVal =
+ rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
+ [index + (rf ? 8 : 0)]
+ + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("RTK better performance, writeVal "
+ "rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ }
+
+ if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
+ writeVal = writeVal - 0x06060606;
+ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_BT2)
+ writeVal = writeVal - 0x0c0c0c0c;
+ *(p_outwriteval + rf) = writeVal;
+ }
+}
+
+static void _rtl8821ae_write_ofdm_power_reg(struct ieee80211_hw *hw,
+ u8 index, u32 *pValue)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 regoffset_a[6] = {
+ RTXAGC_A_OFDM18_OFDM6, RTXAGC_A_OFDM54_OFDM24,
+ RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
+ RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
+ };
+ u16 regoffset_b[6] = {
+ RTXAGC_B_OFDM18_OFDM6, RTXAGC_B_OFDM54_OFDM24,
+ RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
+ RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
+ };
+ u8 i, rf, pwr_val[4];
+ u32 writeVal;
+ u16 regoffset;
+
+ for (rf = 0; rf < 2; rf++) {
+ writeVal = pValue[rf];
+ for (i = 0; i < 4; i++) {
+ pwr_val[i] = (u8) ((writeVal & (0x7f <<
+ (i * 8))) >> (i * 8));
+
+ if (pwr_val[i] > RF6052_MAX_TX_PWR)
+ pwr_val[i] = RF6052_MAX_TX_PWR;
+ }
+ writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
+ (pwr_val[1] << 8) | pwr_val[0];
+
+ if (rf == 0)
+ regoffset = regoffset_a[index];
+ else
+ regoffset = regoffset_b[index];
+ rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Set 0x%x = %08x\n", regoffset, writeVal));
+ }
+}
+
+void rtl8821ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel_ofdm, u8 *ppowerlevel_bw20, u8 *ppowerlevel_bw40, u8 channel)
+{
+ u32 writeVal[2], powerBase0[2], powerBase1[2];
+ u8 index;
+ u8 direction;
+ u32 pwrtrac_value;
+
+ rtl8821ae_phy_get_power_base(hw, ppowerlevel_ofdm, ppowerlevel_bw20, ppowerlevel_bw40,
+ channel, &powerBase0[0], &powerBase1[0]);
+
+ rtl8821ae_dm_txpower_track_adjust(hw,1,&direction,&pwrtrac_value);
+
+ for (index = 0; index < 6; index++) {
+ _rtl8821ae_get_txpower_writeval_by_regulatory(hw,
+ channel, index,
+ &powerBase0[0],
+ &powerBase1[0],
+ &writeVal[0]);
+ if (direction ==1){
+ writeVal[0] += pwrtrac_value;
+ writeVal[1] += pwrtrac_value;
+ } else if (direction == 2){
+ writeVal[0] -= pwrtrac_value;
+ writeVal[1] -= pwrtrac_value;
+ }
+ _rtl8821ae_write_ofdm_power_reg(hw, index, &writeVal[0]);
+ }
+}
+
+bool rtl8821ae_phy_rf6052_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (rtlphy->rf_type == RF_1T1R)
+ rtlphy->num_total_rfpath = 1;
+ else
+ rtlphy->num_total_rfpath = 2;
+
+ return _rtl8821ae_phy_rf6052_config_parafile(hw);
+
+}
+
+static bool _rtl8821ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ //u32 u4_regvalue = 0;
+ u8 rfpath;
+ bool rtstatus = true;
+ //struct bb_reg_def *pphyreg;
+
+ for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+ switch (rfpath) {
+ case RF90_PATH_A: {
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtstatus = rtl8812ae_phy_config_rf_with_headerfile(hw,
+ (enum radio_path)rfpath);
+ else
+ rtstatus = rtl8821ae_phy_config_rf_with_headerfile(hw,
+ (enum radio_path)rfpath);
+ break;
+ }
+ case RF90_PATH_B: {
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtstatus = rtl8812ae_phy_config_rf_with_headerfile(hw,
+ (enum radio_path)rfpath);
+ else
+ rtstatus = rtl8821ae_phy_config_rf_with_headerfile(hw,
+ (enum radio_path)rfpath);
+ break;
+ }
+ case RF90_PATH_C:
+ break;
+ case RF90_PATH_D:
+ break;
+ }
+
+ if (rtstatus != true) {
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("Radio[%d] Fail!!", rfpath));
+ return false;
+ }
+
+ }
+
+ /*put arrays in dm.c*/
+ /*_rtl8821ae_config_rf_txpwr_track_headerfile(hw);*/
+ RT_TRACE(COMP_INIT, DBG_TRACE, ("\n"));
+ return rtstatus;
+}
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/rf.h b/drivers/staging/rtl8821ae/rtl8821ae/rf.h
new file mode 100644
index 000000000000..b665c0ff1b7d
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/rf.h
@@ -0,0 +1,46 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_RF_H__
+#define __RTL8821AE_RF_H__
+
+#define RF6052_MAX_TX_PWR 0x3F
+#define RF6052_MAX_REG 0x3F
+
+extern void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
+ u8 bandwidth);
+extern void rtl8821ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+extern void rtl8821ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel_ofdm,
+ u8 *ppowerlevel_bw20,
+ u8 *ppowerlevel_bw40,
+ u8 channel);
+extern bool rtl8821ae_phy_rf6052_config(struct ieee80211_hw *hw);
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/sw.c b/drivers/staging/rtl8821ae/rtl8821ae/sw.c
new file mode 100644
index 000000000000..85a3474fc099
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/sw.c
@@ -0,0 +1,499 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+
+#include "../wifi.h"
+#include "../core.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "hw.h"
+#include "sw.h"
+#include "trx.h"
+#include "led.h"
+#include "table.h"
+#include "hal_btc.h"
+#include "../btcoexist/rtl_btc.h"
+
+void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ /*close ASPM for AMD defaultly */
+ rtlpci->const_amdpci_aspm = 0;
+
+ /*
+ * ASPM PS mode.
+ * 0 - Disable ASPM,
+ * 1 - Enable ASPM without Clock Req,
+ * 2 - Enable ASPM with Clock Req,
+ * 3 - Alwyas Enable ASPM with Clock Req,
+ * 4 - Always Enable ASPM without Clock Req.
+ * set defult to RTL8192CE:3 RTL8192E:2
+ * */
+ rtlpci->const_pci_aspm = 3;
+
+ /*Setting for PCI-E device */
+ rtlpci->const_devicepci_aspm_setting = 0x03;
+
+ /*Setting for PCI-E bridge */
+ rtlpci->const_hostpci_aspm_setting = 0x02;
+
+ /*
+ * In Hw/Sw Radio Off situation.
+ * 0 - Default,
+ * 1 - From ASPM setting without low Mac Pwr,
+ * 2 - From ASPM setting with low Mac Pwr,
+ * 3 - Bus D3
+ * set default to RTL8192CE:0 RTL8192SE:2
+ */
+ rtlpci->const_hwsw_rfoff_d3 = 0;
+
+ /*
+ * This setting works for those device with
+ * backdoor ASPM setting such as EPHY setting.
+ * 0 - Not support ASPM,
+ * 1 - Support ASPM,
+ * 2 - According to chipset.
+ */
+ rtlpci->const_support_pciaspm = 1;
+}
+
+/*InitializeVariables8812E*/
+int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
+{
+ int err = 0;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ const struct firmware *firmware;
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ char *fw_name = NULL;
+
+ rtl8821ae_bt_reg_init(hw);
+ rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
+
+ rtlpriv->dm.b_dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_flag = 0;
+ rtlpriv->dm.b_disable_framebursting = 0;;
+ rtlpriv->dm.thermalvalue = 0;
+ rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25);
+
+ mac->ht_enable = true;
+
+ rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G;
+ /*following 2 is for register 5G band, refer to _rtl_init_mac80211()*/
+ rtlpriv->rtlhal.bandset = BAND_ON_BOTH;
+ rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY;
+
+ rtlpci->receive_config = (RCR_APPFCS |
+ RCR_APP_MIC |
+ RCR_APP_ICV |
+ RCR_APP_PHYST_RXFF |
+ RCR_NONQOS_VHT |
+ RCR_HTC_LOC_CTRL |
+ RCR_AMF |
+ RCR_ACF |
+ RCR_ADF | /*This bit controls the PS-Poll packet filter.*/
+ RCR_AICV |
+ RCR_ACRC32 |
+ RCR_AB |
+ RCR_AM |
+ RCR_APM |
+ 0);
+
+
+ rtlpci->irq_mask[0] =
+ (u32) (IMR_PSTIMEOUT |
+ IMR_GTINT3 |
+ /*IMR_TBDER |
+ IMR_TBDOK |
+ IMR_BCNDMAINT0 |*/
+ IMR_HSISR_IND_ON_INT |
+ IMR_C2HCMD |
+ IMR_HIGHDOK |
+ IMR_MGNTDOK |
+ IMR_BKDOK |
+ IMR_BEDOK |
+ IMR_VIDOK |
+ IMR_VODOK |
+ IMR_RDU |
+ IMR_ROK |
+ 0);
+
+ rtlpci->irq_mask[1] =
+ (u32)( IMR_RXFOVW |
+ IMR_TXFOVW |
+ 0);
+
+ /* for LPS & IPS */
+ rtlpriv->psc.b_inactiveps = rtlpriv->cfg->mod_params->b_inactiveps;
+ rtlpriv->psc.b_swctrl_lps = rtlpriv->cfg->mod_params->b_swctrl_lps;
+ rtlpriv->psc.b_fwctrl_lps = rtlpriv->cfg->mod_params->b_fwctrl_lps;
+ rtlpriv->psc.b_reg_fwctrl_lps = 3;
+ rtlpriv->psc.reg_max_lps_awakeintvl = 5;
+ /* for ASPM, you can close aspm through
+ * set const_support_pciaspm = 0 */
+ rtl8821ae_init_aspm_vars(hw);
+
+ if (rtlpriv->psc.b_reg_fwctrl_lps == 1)
+ rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE;
+ else if (rtlpriv->psc.b_reg_fwctrl_lps == 2)
+ rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE;
+ else if (rtlpriv->psc.b_reg_fwctrl_lps == 3)
+ rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
+
+ /* for firmware buf */
+ rtlpriv->rtlhal.pfirmware = (u8 *) vmalloc(0x8000);
+ if (!rtlpriv->rtlhal.pfirmware) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Can't alloc buffer for fw.\n"));
+ return 1;
+ }
+
+ fw_name = "rtlwifi/rtl8821aefw.bin";
+ err = request_firmware(&firmware, fw_name, rtlpriv->io.dev);
+ if (err) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Failed to request firmware!\n"));
+ return 1;
+ }
+
+ if (firmware->size > 0x8000) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Firmware is too big!\n"));
+ release_firmware(firmware);
+ return 1;
+ }
+
+ memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size);
+ rtlpriv->rtlhal.fwsize = firmware->size;
+ release_firmware(firmware);
+
+ if (rtlpriv->cfg->ops->get_btc_status()){
+ rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv);
+ rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv);
+ }
+
+ RT_TRACE(COMP_INIT, DBG_LOUD, (" FirmwareDownload OK\n"));
+ return err;
+}
+
+void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ //printk("=========>rtl8821ae_deinit_sw_vars().\n");
+ if (rtlpriv->cfg->ops->get_btc_status()){
+ //printk("=========>rtl8821ae_deinit_sw_vars().get_btc_status\n");
+ rtlpriv->btcoexist.btc_ops->btc_halt_notify();
+ }
+ if (rtlpriv->rtlhal.pfirmware) {
+ //printk("=========>rtl8821ae_deinit_sw_vars().rtlpriv->rtlhal.pfirmware\n");
+ vfree(rtlpriv->rtlhal.pfirmware);
+ rtlpriv->rtlhal.pfirmware = NULL;
+ }
+ //printk("<=========rtl8821ae_deinit_sw_vars().\n");
+}
+
+u32 rtl8812ae_rx_command_packet_handler(
+ struct ieee80211_hw *hw,
+ struct rtl_stats status,
+ struct sk_buff *skb
+ )
+{
+ u32 result = 0;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (status.packet_report_type) {
+ case NORMAL_RX:
+ result = 0;
+ break;
+ case C2H_PACKET:
+ rtl8812ae_c2h_packet_handler(hw, skb->data, (u8) skb->len);
+ result = 1;
+ RT_TRACE(COMP_RECV, DBG_LOUD,
+ ("===>rtl8821ae_rx_command_packet_handler(): (u8) skb->len=%d\n\n", skb->len));
+ break;
+ default:
+ RT_TRACE(COMP_RECV, DBG_LOUD,
+ ("===>rtl8821ae_rx_command_packet_handler(): No this packet type!!\n"));
+ break;
+ }
+
+ return result;
+}
+
+
+/* get bt coexist status */
+bool rtl8821ae_get_btc_status(void)
+{
+ return true;
+}
+
+struct rtl_hal_ops rtl8821ae_hal_ops = {
+ .init_sw_vars = rtl8821ae_init_sw_vars,
+ .deinit_sw_vars = rtl8821ae_deinit_sw_vars,
+ .read_eeprom_info = rtl8821ae_read_eeprom_info,
+ .interrupt_recognized = rtl8821ae_interrupt_recognized,
+ .hw_init = rtl8821ae_hw_init,
+ .hw_disable = rtl8821ae_card_disable,
+ .hw_suspend = rtl8821ae_suspend,
+ .hw_resume = rtl8821ae_resume,
+ .enable_interrupt = rtl8821ae_enable_interrupt,
+ .disable_interrupt = rtl8821ae_disable_interrupt,
+ .set_network_type = rtl8821ae_set_network_type,
+ .set_chk_bssid = rtl8821ae_set_check_bssid,
+ .set_qos = rtl8821ae_set_qos,
+ .set_bcn_reg = rtl8821ae_set_beacon_related_registers,
+ .set_bcn_intv = rtl8821ae_set_beacon_interval,
+ .update_interrupt_mask = rtl8821ae_update_interrupt_mask,
+ .get_hw_reg = rtl8821ae_get_hw_reg,
+ .set_hw_reg = rtl8821ae_set_hw_reg,
+ .update_rate_tbl = rtl8821ae_update_hal_rate_tbl,
+ .fill_tx_desc = rtl8821ae_tx_fill_desc,
+ .fill_tx_cmddesc = rtl8821ae_tx_fill_cmddesc,
+ .query_rx_desc = rtl8821ae_rx_query_desc,
+ .set_channel_access = rtl8821ae_update_channel_access_setting,
+ .radio_onoff_checking = rtl8821ae_gpio_radio_on_off_checking,
+ .set_bw_mode = rtl8821ae_phy_set_bw_mode,
+ .switch_channel = rtl8821ae_phy_sw_chnl,
+ .dm_watchdog = rtl8821ae_dm_watchdog,
+ .scan_operation_backup = rtl8821ae_phy_scan_operation_backup,
+ .set_rf_power_state = rtl8821ae_phy_set_rf_power_state,
+ .led_control = rtl8821ae_led_control,
+ .set_desc = rtl8821ae_set_desc,
+ .get_desc = rtl8821ae_get_desc,
+ .is_tx_desc_closed = rtl8821ae_is_tx_desc_closed,
+ .tx_polling = rtl8821ae_tx_polling,
+ .enable_hw_sec = rtl8821ae_enable_hw_security_config,
+ .set_key = rtl8821ae_set_key,
+ .init_sw_leds = rtl8821ae_init_sw_leds,
+ .allow_all_destaddr = rtl8821ae_allow_all_destaddr,
+ .get_bbreg = rtl8821ae_phy_query_bb_reg,
+ .set_bbreg = rtl8821ae_phy_set_bb_reg,
+ .get_rfreg = rtl8821ae_phy_query_rf_reg,
+ .set_rfreg = rtl8821ae_phy_set_rf_reg,
+ .c2h_command_handle = rtl_8821ae_c2h_command_handle,
+ .bt_wifi_media_status_notify = rtl_8821ae_bt_wifi_media_status_notify,
+ .bt_turn_off_bt_coexist_before_enter_lps = rtl8821ae_dm_bt_turn_off_bt_coexist_before_enter_lps,
+ .fill_h2c_cmd = rtl8821ae_fill_h2c_cmd,
+ .get_btc_status = rtl8821ae_get_btc_status,
+ .rx_command_packet_handler = rtl8812ae_rx_command_packet_handler,
+};
+
+struct rtl_mod_params rtl8821ae_mod_params = {
+ .sw_crypto = false,
+ .b_inactiveps = false,//true,
+ .b_swctrl_lps = false,
+ .b_fwctrl_lps = false, //true,
+};
+
+struct rtl_hal_cfg rtl8821ae_hal_cfg = {
+ .bar_id = 2,
+ .write_readback = true,
+ .name = "rtl8821ae_pci",
+ .fw_name = "rtlwifi/rtl8821aefw.bin",
+ .ops = &rtl8821ae_hal_ops,
+ .mod_params = &rtl8821ae_mod_params,
+ .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+ .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
+ .maps[SYS_CLK] = REG_SYS_CLKR,
+ .maps[MAC_RCR_AM] = AM,
+ .maps[MAC_RCR_AB] = AB,
+ .maps[MAC_RCR_ACRC32] = ACRC32,
+ .maps[MAC_RCR_ACF] = ACF,
+ .maps[MAC_RCR_AAP] = AAP,
+ .maps[MAC_HIMR] = REG_HIMR,
+ .maps[MAC_HIMRE] = REG_HIMRE,
+
+
+ .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS,
+
+ .maps[EFUSE_TEST] = REG_EFUSE_TEST,
+ .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_CLK] = 0,
+ .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
+ .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
+ .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
+ .maps[EFUSE_ANA8M] = ANA8M,
+ .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
+ .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
+ .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
+ .maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES,
+
+ .maps[RWCAM] = REG_CAMCMD,
+ .maps[WCAMI] = REG_CAMWRITE,
+ .maps[RCAMO] = REG_CAMREAD,
+ .maps[CAMDBG] = REG_CAMDBG,
+ .maps[SECR] = REG_SECCFG,
+ .maps[SEC_CAM_NONE] = CAM_NONE,
+ .maps[SEC_CAM_WEP40] = CAM_WEP40,
+ .maps[SEC_CAM_TKIP] = CAM_TKIP,
+ .maps[SEC_CAM_AES] = CAM_AES,
+ .maps[SEC_CAM_WEP104] = CAM_WEP104,
+
+ .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
+ .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
+ .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
+ .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
+ .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
+ .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
+/* .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8, */ /*need check*/
+ .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
+ .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
+ .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
+ .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
+ .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
+ .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
+ .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
+/* .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,*/
+/* .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,*/
+
+ .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
+ .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
+ .maps[RTL_IMR_BcnInt] = IMR_BCNDMAINT0,
+ .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
+ .maps[RTL_IMR_RDU] = IMR_RDU,
+ .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
+ .maps[RTL_IMR_BDOK] = IMR_BCNDOK0,
+ .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
+ .maps[RTL_IMR_TBDER] = IMR_TBDER,
+ .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
+ .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
+ .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
+ .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
+ .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
+ .maps[RTL_IMR_VODOK] = IMR_VODOK,
+ .maps[RTL_IMR_ROK] = IMR_ROK,
+ .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNDMAINT0 | IMR_TBDOK | IMR_TBDER),
+
+ .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
+
+ .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+static struct pci_device_id rtl8821ae_pci_ids[] = {
+ {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8812, rtl8821ae_hal_cfg)},
+ {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8821, rtl8821ae_hal_cfg)},
+ {},
+};
+#else
+static struct pci_device_id rtl8821ae_pci_ids[] __devinitdata = {
+ {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8812, rtl8821ae_hal_cfg)},
+ {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8821, rtl8821ae_hal_cfg)},
+ {},
+};
+#endif
+
+MODULE_DEVICE_TABLE(pci, rtl8821ae_pci_ids);
+
+MODULE_AUTHOR("Ping Yan<ping_yan@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8821ae 802.11ac PCI wireless");
+MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin");
+
+module_param_named(swenc, rtl8821ae_mod_params.sw_crypto, bool, 0444);
+module_param_named(ips, rtl8821ae_mod_params.b_inactiveps, bool, 0444);
+module_param_named(swlps, rtl8821ae_mod_params.b_swctrl_lps, bool, 0444);
+module_param_named(fwlps, rtl8821ae_mod_params.b_fwctrl_lps, bool, 0444);
+MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
+MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
+MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n");
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
+static const SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+compat_pci_suspend(rtl_pci_suspend)
+compat_pci_resume(rtl_pci_resume)
+#endif
+
+static struct pci_driver rtl8821ae_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = rtl8821ae_pci_ids,
+ .probe = rtl_pci_probe,
+ .remove = rtl_pci_disconnect,
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
+ .driver.pm = &rtlwifi_pm_ops,
+#elif defined(CONFIG_PM)
+ .suspend = rtl_pci_suspend_compat,
+ .resume = rtl_pci_resume_compat,
+#endif
+
+};
+
+
+extern int rtl_core_module_init(void);
+extern void rtl_core_module_exit(void);
+
+static int __init rtl8821ae_module_init(void)
+{
+ int ret;
+
+ ret = rtl_core_module_init();
+ if (ret)
+ return ret;
+
+ //printk("==========>rtl8821ae_module_init().\n");
+ ret = pci_register_driver(&rtl8821ae_driver);
+ if (ret)
+ RT_ASSERT(false, (": No device found\n"));
+
+ return ret;
+}
+
+static void __exit rtl8821ae_module_exit(void)
+{
+ pci_unregister_driver(&rtl8821ae_driver);
+ rtl_core_module_exit();
+}
+
+module_init(rtl8821ae_module_init);
+module_exit(rtl8821ae_module_exit);
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/sw.h b/drivers/staging/rtl8821ae/rtl8821ae/sw.h
new file mode 100644
index 000000000000..3d49b2f043da
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/sw.h
@@ -0,0 +1,39 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_SW_H__
+#define __RTL8821AE_SW_H__
+
+int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw);
+void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw);
+void rtl8821ae_init_var_map(struct ieee80211_hw *hw);
+bool rtl8821ae_get_btc_status(void);
+
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/table.c b/drivers/staging/rtl8821ae/rtl8821ae/table.c
new file mode 100644
index 000000000000..a6c4ca4fd9b2
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/table.c
@@ -0,0 +1,4002 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Created on 2010/ 5/18, 1:41
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "table.h"
+u32 RTL8812AE_PHY_REG_ARRAY[] = {
+ 0x800, 0x8020D010,
+ 0x804, 0x080112E0,
+ 0x808, 0x0E028233,
+ 0x80C, 0x12131113,
+ 0x810, 0x20101263,
+ 0x814, 0x020C3D10,
+ 0x818, 0x03A00385,
+ 0x820, 0x00000000,
+ 0x824, 0x00030FE0,
+ 0x828, 0x00000000,
+ 0x82C, 0x002083DD,
+ 0x830, 0x2AAA6C86,
+ 0x834, 0x0037A706,
+ 0x838, 0x06C89B44,
+ 0x83C, 0x0000095B,
+ 0x840, 0xC0000001,
+ 0x844, 0x40003CDE,
+ 0x848, 0x6210FF8B,
+ 0x84C, 0x6CFDFFB8,
+ 0x850, 0x28874706,
+ 0x854, 0x0001520C,
+ 0x858, 0x8060E000,
+ 0x85C, 0x74210168,
+ 0x860, 0x6929C321,
+ 0x864, 0x79727432,
+ 0x868, 0x8CA7A314,
+ 0x86C, 0x338C2878,
+ 0x870, 0x03333333,
+ 0x874, 0x31602C2E,
+ 0x878, 0x00003152,
+ 0x87C, 0x000FC000,
+ 0x8A0, 0x00000013,
+ 0x8A4, 0x7F7F7F7F,
+ 0x8A8, 0xA202033E,
+ 0x8AC, 0x0FF0FA0A,
+ 0x8B0, 0x00000600,
+ 0x8B4, 0x000FC080,
+ 0x8B8, 0x6C0057FF,
+ 0x8BC, 0x4CA520A3,
+ 0x8C0, 0x27F00020,
+ 0x8C4, 0x00000000,
+ 0x8C8, 0x00013169,
+ 0x8CC, 0x08248492,
+ 0x8D0, 0x0000B800,
+ 0x8DC, 0x00000000,
+ 0x8D4, 0x940008A0,
+ 0x8D8, 0x290B5612,
+ 0x8F8, 0x400002C0,
+ 0x8FC, 0x00000000,
+ 0xFF0F07D8, 0xABCD,
+ 0x900, 0x00000700,
+ 0xFF0F07D0, 0xCDEF,
+ 0x900, 0x00000700,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x900, 0x00000700,
+ 0xFF0F07D8, 0xDEAD,
+ 0x90C, 0x00000000,
+ 0x910, 0x0000FC00,
+ 0x914, 0x00000404,
+ 0x918, 0x1C1028C0,
+ 0x91C, 0x64B11A1C,
+ 0x920, 0xE0767233,
+ 0x924, 0x055AA500,
+ 0x928, 0x00000004,
+ 0x92C, 0xFFFE0000,
+ 0x930, 0xFFFFFFFE,
+ 0x934, 0x001FFFFF,
+ 0x960, 0x00000000,
+ 0x964, 0x00000000,
+ 0x968, 0x00000000,
+ 0x96C, 0x00000000,
+ 0x970, 0x801FFFFF,
+ 0x978, 0x00000000,
+ 0x97C, 0x00000000,
+ 0x980, 0x00000000,
+ 0x984, 0x00000000,
+ 0x988, 0x00000000,
+ 0x990, 0x27100000,
+ 0x994, 0xFFFF0100,
+ 0x998, 0xFFFFFF5C,
+ 0x99C, 0xFFFFFFFF,
+ 0x9A0, 0x000000FF,
+ 0x9A4, 0x00080080,
+ 0x9A8, 0x00000000,
+ 0x9AC, 0x00000000,
+ 0x9B0, 0x81081008,
+ 0x9B4, 0x00000000,
+ 0x9B8, 0x01081008,
+ 0x9BC, 0x01081008,
+ 0x9D0, 0x00000000,
+ 0x9D4, 0x00000000,
+ 0x9D8, 0x00000000,
+ 0x9DC, 0x00000000,
+ 0x9E4, 0x00000002,
+ 0x9E8, 0x000002D5,
+ 0xA00, 0x00D047C8,
+ 0xA04, 0x01FF000C,
+ 0xA08, 0x8C838300,
+ 0xA0C, 0x2E7F000F,
+ 0xA10, 0x9500BB78,
+ 0xA14, 0x11144028,
+ 0xA18, 0x00881117,
+ 0xA1C, 0x89140F00,
+ 0xA20, 0x1A1B0000,
+ 0xA24, 0x090E1317,
+ 0xA28, 0x00000204,
+ 0xA2C, 0x00900000,
+ 0xA70, 0x101FFF00,
+ 0xA74, 0x00000008,
+ 0xA78, 0x00000900,
+ 0xA7C, 0x225B0606,
+ 0xA80, 0x218075B2,
+ 0xA84, 0x001F8C80,
+ 0xB00, 0x03100000,
+ 0xB04, 0x0000B000,
+ 0xB08, 0xAE0201EB,
+ 0xB0C, 0x01003207,
+ 0xB10, 0x00009807,
+ 0xB14, 0x01000000,
+ 0xB18, 0x00000002,
+ 0xB1C, 0x00000002,
+ 0xB20, 0x0000001F,
+ 0xB24, 0x03020100,
+ 0xB28, 0x07060504,
+ 0xB2C, 0x0B0A0908,
+ 0xB30, 0x0F0E0D0C,
+ 0xB34, 0x13121110,
+ 0xB38, 0x17161514,
+ 0xB3C, 0x0000003A,
+ 0xB40, 0x00000000,
+ 0xB44, 0x00000000,
+ 0xB48, 0x13000032,
+ 0xB4C, 0x48080000,
+ 0xB50, 0x00000000,
+ 0xB54, 0x00000000,
+ 0xB58, 0x00000000,
+ 0xB5C, 0x00000000,
+ 0xC00, 0x00000007,
+ 0xC04, 0x00042020,
+ 0xC08, 0x80410231,
+ 0xC0C, 0x00000000,
+ 0xC10, 0x00000100,
+ 0xC14, 0x01000000,
+ 0xC1C, 0x40000003,
+ 0xC20, 0x12121212,
+ 0xC24, 0x12121212,
+ 0xC28, 0x12121212,
+ 0xC2C, 0x12121212,
+ 0xC30, 0x12121212,
+ 0xC34, 0x12121212,
+ 0xC38, 0x12121212,
+ 0xC3C, 0x12121212,
+ 0xC40, 0x12121212,
+ 0xC44, 0x12121212,
+ 0xC48, 0x12121212,
+ 0xC4C, 0x12121212,
+ 0xC50, 0x00000020,
+ 0xC54, 0x0008121C,
+ 0xC58, 0x30000C1C,
+ 0xC5C, 0x00000058,
+ 0xC60, 0x34344443,
+ 0xC64, 0x07003333,
+ 0xC68, 0x59791979,
+ 0xC6C, 0x59795979,
+ 0xC70, 0x19795979,
+ 0xC74, 0x19795979,
+ 0xC78, 0x19791979,
+ 0xC7C, 0x19791979,
+ 0xC80, 0x19791979,
+ 0xC84, 0x19791979,
+ 0xC94, 0x0100005C,
+ 0xC98, 0x00000000,
+ 0xC9C, 0x00000000,
+ 0xCA0, 0x00000029,
+ 0xCA4, 0x08040201,
+ 0xCA8, 0x80402010,
+ 0xFF0F0740, 0xABCD,
+ 0xCB0, 0x77547717,
+ 0xFF0F01C0, 0xCDEF,
+ 0xCB0, 0x77547717,
+ 0xFF0F02C0, 0xCDEF,
+ 0xCB0, 0x77547717,
+ 0xFF0F07D8, 0xCDEF,
+ 0xCB0, 0x54547710,
+ 0xFF0F07D0, 0xCDEF,
+ 0xCB0, 0x54547710,
+ 0xCDCDCDCD, 0xCDCD,
+ 0xCB0, 0x77547777,
+ 0xFF0F0740, 0xDEAD,
+ 0xCB4, 0x00000077,
+ 0xCB8, 0x00508242,
+ 0xE00, 0x00000007,
+ 0xE04, 0x00042020,
+ 0xE08, 0x80410231,
+ 0xE0C, 0x00000000,
+ 0xE10, 0x00000100,
+ 0xE14, 0x01000000,
+ 0xE1C, 0x40000003,
+ 0xE20, 0x12121212,
+ 0xE24, 0x12121212,
+ 0xE28, 0x12121212,
+ 0xE2C, 0x12121212,
+ 0xE30, 0x12121212,
+ 0xE34, 0x12121212,
+ 0xE38, 0x12121212,
+ 0xE3C, 0x12121212,
+ 0xE40, 0x12121212,
+ 0xE44, 0x12121212,
+ 0xE48, 0x12121212,
+ 0xE4C, 0x12121212,
+ 0xE50, 0x00000020,
+ 0xE54, 0x0008121C,
+ 0xE58, 0x30000C1C,
+ 0xE5C, 0x00000058,
+ 0xE60, 0x34344443,
+ 0xE64, 0x07003333,
+ 0xE68, 0x59791979,
+ 0xE6C, 0x59795979,
+ 0xE70, 0x19795979,
+ 0xE74, 0x19795979,
+ 0xE78, 0x19791979,
+ 0xE7C, 0x19791979,
+ 0xE80, 0x19791979,
+ 0xE84, 0x19791979,
+ 0xE94, 0x0100005C,
+ 0xE98, 0x00000000,
+ 0xE9C, 0x00000000,
+ 0xEA0, 0x00000029,
+ 0xEA4, 0x08040201,
+ 0xEA8, 0x80402010,
+ 0xFF0F0740, 0xABCD,
+ 0xEB0, 0x77547717,
+ 0xFF0F01C0, 0xCDEF,
+ 0xEB0, 0x77547717,
+ 0xFF0F02C0, 0xCDEF,
+ 0xEB0, 0x77547717,
+ 0xFF0F07D8, 0xCDEF,
+ 0xEB0, 0x54547710,
+ 0xFF0F07D0, 0xCDEF,
+ 0xEB0, 0x54547710,
+ 0xCDCDCDCD, 0xCDCD,
+ 0xEB0, 0x77547777,
+ 0xFF0F0740, 0xDEAD,
+ 0xEB4, 0x00000077,
+ 0xEB8, 0x00508242,
+};
+
+u32 RTL8821AE_PHY_REG_ARRAY[] = {
+ 0x800, 0x0020D090,
+ 0x804, 0x080112E0,
+ 0x808, 0x0E028211,
+ 0x80C, 0x92131111,
+ 0x810, 0x20101261,
+ 0x814, 0x020C3D10,
+ 0x818, 0x03A00385,
+ 0x820, 0x00000000,
+ 0x824, 0x00030FE0,
+ 0x828, 0x00000000,
+ 0x82C, 0x002081DD,
+ 0x830, 0x2AAA8E24,
+ 0x834, 0x0037A706,
+ 0x838, 0x06489B44,
+ 0x83C, 0x0000095B,
+ 0x840, 0xC0000001,
+ 0x844, 0x40003CDE,
+ 0x848, 0x62103F8B,
+ 0x84C, 0x6CFDFFB8,
+ 0x850, 0x28874706,
+ 0x854, 0x0001520C,
+ 0x858, 0x8060E000,
+ 0x85C, 0x74210168,
+ 0x860, 0x6929C321,
+ 0x864, 0x79727432,
+ 0x868, 0x8CA7A314,
+ 0x86C, 0x888C2878,
+ 0x870, 0x08888888,
+ 0x874, 0x31612C2E,
+ 0x878, 0x00000152,
+ 0x87C, 0x000FD000,
+ 0x8A0, 0x00000013,
+ 0x8A4, 0x7F7F7F7F,
+ 0x8A8, 0xA2000338,
+ 0x8AC, 0x0FF0FA0A,
+ 0x8B4, 0x000FC080,
+ 0x8B8, 0x6C10D7FF,
+ 0x8BC, 0x0CA52090,
+ 0x8C0, 0x1BF00020,
+ 0x8C4, 0x00000000,
+ 0x8C8, 0x00013169,
+ 0x8CC, 0x08248492,
+ 0x8D4, 0x940008A0,
+ 0x8D8, 0x290B5612,
+ 0x8F8, 0x400002C0,
+ 0x8FC, 0x00000000,
+ 0x900, 0x00000700,
+ 0x90C, 0x00000000,
+ 0x910, 0x0000FC00,
+ 0x914, 0x00000404,
+ 0x918, 0x1C1028C0,
+ 0x91C, 0x64B11A1C,
+ 0x920, 0xE0767233,
+ 0x924, 0x055AA500,
+ 0x928, 0x00000004,
+ 0x92C, 0xFFFE0000,
+ 0x930, 0xFFFFFFFE,
+ 0x934, 0x001FFFFF,
+ 0x960, 0x00000000,
+ 0x964, 0x00000000,
+ 0x968, 0x00000000,
+ 0x96C, 0x00000000,
+ 0x970, 0x801FFFFF,
+ 0x974, 0x000003FF,
+ 0x978, 0x00000000,
+ 0x97C, 0x00000000,
+ 0x980, 0x00000000,
+ 0x984, 0x00000000,
+ 0x988, 0x00000000,
+ 0x990, 0x27100000,
+ 0x994, 0xFFFF0100,
+ 0x998, 0xFFFFFF5C,
+ 0x99C, 0xFFFFFFFF,
+ 0x9A0, 0x000000FF,
+ 0x9A4, 0x00480080,
+ 0x9A8, 0x00000000,
+ 0x9AC, 0x00000000,
+ 0x9B0, 0x81081008,
+ 0x9B4, 0x01081008,
+ 0x9B8, 0x01081008,
+ 0x9BC, 0x01081008,
+ 0x9D0, 0x00000000,
+ 0x9D4, 0x00000000,
+ 0x9D8, 0x00000000,
+ 0x9DC, 0x00000000,
+ 0x9E0, 0x00005D00,
+ 0x9E4, 0x00000002,
+ 0x9E8, 0x00000001,
+ 0xA00, 0x00D047C8,
+ 0xA04, 0x01FF000C,
+ 0xA08, 0x8C8A8300,
+ 0xA0C, 0x2E68000F,
+ 0xA10, 0x9500BB78,
+ 0xA14, 0x11144028,
+ 0xA18, 0x00881117,
+ 0xA1C, 0x89140F00,
+ 0xA20, 0x1A1B0000,
+ 0xA24, 0x090E1317,
+ 0xA28, 0x00000204,
+ 0xA2C, 0x00900000,
+ 0xA70, 0x101FFF00,
+ 0xA74, 0x00000008,
+ 0xA78, 0x00000900,
+ 0xA7C, 0x225B0606,
+ 0xA80, 0x21805490,
+ 0xA84, 0x001F0000,
+ 0xB00, 0x03100040,
+ 0xB04, 0x0000B000,
+ 0xB08, 0xAE0201EB,
+ 0xB0C, 0x01003207,
+ 0xB10, 0x00009807,
+ 0xB14, 0x01000000,
+ 0xB18, 0x00000002,
+ 0xB1C, 0x00000002,
+ 0xB20, 0x0000001F,
+ 0xB24, 0x03020100,
+ 0xB28, 0x07060504,
+ 0xB2C, 0x0B0A0908,
+ 0xB30, 0x0F0E0D0C,
+ 0xB34, 0x13121110,
+ 0xB38, 0x17161514,
+ 0xB3C, 0x0000003A,
+ 0xB40, 0x00000000,
+ 0xB44, 0x00000000,
+ 0xB48, 0x13000032,
+ 0xB4C, 0x48080000,
+ 0xB50, 0x00000000,
+ 0xB54, 0x00000000,
+ 0xB58, 0x00000000,
+ 0xB5C, 0x00000000,
+ 0xC00, 0x00000007,
+ 0xC04, 0x00042020,
+ 0xC08, 0x80410231,
+ 0xC0C, 0x00000000,
+ 0xC10, 0x00000100,
+ 0xC14, 0x01000000,
+ 0xC1C, 0x40000003,
+ 0xC20, 0x2C2C2C2C,
+ 0xC24, 0x30303030,
+ 0xC28, 0x30303030,
+ 0xC2C, 0x2C2C2C2C,
+ 0xC30, 0x2C2C2C2C,
+ 0xC34, 0x2C2C2C2C,
+ 0xC38, 0x2C2C2C2C,
+ 0xC3C, 0x2A2A2A2A,
+ 0xC40, 0x2A2A2A2A,
+ 0xC44, 0x2A2A2A2A,
+ 0xC48, 0x2A2A2A2A,
+ 0xC4C, 0x2A2A2A2A,
+ 0xC50, 0x00000020,
+ 0xC54, 0x001C1208,
+ 0xC58, 0x30000C1C,
+ 0xC5C, 0x00000058,
+ 0xC60, 0x34344443,
+ 0xC64, 0x07003333,
+ 0xC68, 0x19791979,
+ 0xC6C, 0x19791979,
+ 0xC70, 0x19791979,
+ 0xC74, 0x19791979,
+ 0xC78, 0x19791979,
+ 0xC7C, 0x19791979,
+ 0xC80, 0x19791979,
+ 0xC84, 0x19791979,
+ 0xC94, 0x0100005C,
+ 0xC98, 0x00000000,
+ 0xC9C, 0x00000000,
+ 0xCA0, 0x00000029,
+ 0xCA4, 0x08040201,
+ 0xCA8, 0x80402010,
+ 0xCB0, 0x77775747,
+ 0xCB4, 0x10000077,
+ 0xCB8, 0x00508240,
+};
+
+u32 RTL8812AE_PHY_REG_ARRAY_PG[] = {
+ 0, 0, 0, 0x00000c20, 0xffffffff, 0x34363840,
+ 0, 0, 0, 0x00000c24, 0xffffffff, 0x42424444,
+ 0, 0, 0, 0x00000c28, 0xffffffff, 0x30323638,
+ 0, 0, 0, 0x00000c2c, 0xffffffff, 0x40424444,
+ 0, 0, 0, 0x00000c30, 0xffffffff, 0x28303236,
+ 0, 0, 1, 0x00000c34, 0xffffffff, 0x38404242,
+ 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283034,
+ 0, 0, 0, 0x00000c3c, 0xffffffff, 0x40424444,
+ 0, 0, 0, 0x00000c40, 0xffffffff, 0x28303236,
+ 0, 0, 0, 0x00000c44, 0xffffffff, 0x42422426,
+ 0, 0, 1, 0x00000c48, 0xffffffff, 0x30343840,
+ 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628,
+ 0, 1, 0, 0x00000e20, 0xffffffff, 0x34363840,
+ 0, 1, 0, 0x00000e24, 0xffffffff, 0x42424444,
+ 0, 1, 0, 0x00000e28, 0xffffffff, 0x30323638,
+ 0, 1, 0, 0x00000e2c, 0xffffffff, 0x40424444,
+ 0, 1, 0, 0x00000e30, 0xffffffff, 0x28303236,
+ 0, 1, 1, 0x00000e34, 0xffffffff, 0x38404242,
+ 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283034,
+ 0, 1, 0, 0x00000e3c, 0xffffffff, 0x40424444,
+ 0, 1, 0, 0x00000e40, 0xffffffff, 0x28303236,
+ 0, 1, 0, 0x00000e44, 0xffffffff, 0x42422426,
+ 0, 1, 1, 0x00000e48, 0xffffffff, 0x30343840,
+ 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628,
+ 1, 0, 0, 0x00000c24, 0xffffffff, 0x42424444,
+ 1, 0, 0, 0x00000c28, 0xffffffff, 0x30323640,
+ 1, 0, 0, 0x00000c2c, 0xffffffff, 0x40424444,
+ 1, 0, 0, 0x00000c30, 0xffffffff, 0x28303236,
+ 1, 0, 1, 0x00000c34, 0xffffffff, 0x38404242,
+ 1, 0, 1, 0x00000c38, 0xffffffff, 0x26283034,
+ 1, 0, 0, 0x00000c3c, 0xffffffff, 0x40424444,
+ 1, 0, 0, 0x00000c40, 0xffffffff, 0x28303236,
+ 1, 0, 0, 0x00000c44, 0xffffffff, 0x42422426,
+ 1, 0, 1, 0x00000c48, 0xffffffff, 0x30343840,
+ 1, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628,
+ 1, 1, 0, 0x00000e24, 0xffffffff, 0x42424444,
+ 1, 1, 0, 0x00000e28, 0xffffffff, 0x30323640,
+ 1, 1, 0, 0x00000e2c, 0xffffffff, 0x40424444,
+ 1, 1, 0, 0x00000e30, 0xffffffff, 0x28303236,
+ 1, 1, 1, 0x00000e34, 0xffffffff, 0x38404242,
+ 1, 1, 1, 0x00000e38, 0xffffffff, 0x26283034,
+ 1, 1, 0, 0x00000e3c, 0xffffffff, 0x40424444,
+ 1, 1, 0, 0x00000e40, 0xffffffff, 0x28303236,
+ 1, 1, 0, 0x00000e44, 0xffffffff, 0x42422426,
+ 1, 1, 1, 0x00000e48, 0xffffffff, 0x30343840,
+ 1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628
+};
+
+u32 RTL8821AE_PHY_REG_ARRAY_PG[] = {
+ 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
+ 0, 0, 0, 0x00000c24, 0xffffffff, 0x36363838,
+ 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234,
+ 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363838,
+ 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032,
+ 0, 0, 0, 0x00000c3c, 0xffffffff, 0x32343636,
+ 0, 0, 0, 0x00000c40, 0xffffffff, 0x24262830,
+ 0, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022,
+ 1, 0, 0, 0x00000c24, 0xffffffff, 0x34343636,
+ 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032,
+ 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343636,
+ 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830,
+ 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343636,
+ 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830,
+ 1, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022
+};
+
+/* it seems not used
+u8 *RTL8821AE_TXPWR_LMT_ARRAY[] = {
+ "FCC", "2.4G", "20M", "CCK", "1T", "01", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "01", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "01", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "02", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "02", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "02", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "03", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "03", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "03", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "04", "34",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "04", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "04", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "05", "34",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "05", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "05", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "06", "34",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "06", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "06", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "07", "34",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "07", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "07", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "08", "34",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "08", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "08", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "09", "34",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "09", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "09", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "10", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "10", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "10", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "11", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "11", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "11", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "12", "63",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "12", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "12", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "13", "63",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "13", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "13", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "14", "63",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "14", "63",
+ "MKK", "2.4G", "20M", "CCK", "1T", "14", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "01", "30",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "01", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "01", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "02", "30",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "02", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "02", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "03", "30",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "03", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "03", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "04", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "04", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "04", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "05", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "05", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "05", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "06", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "06", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "06", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "07", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "07", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "07", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "08", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "08", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "08", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "09", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "09", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "09", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "10", "30",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "10", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "10", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "11", "30",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "11", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "11", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "12", "63",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "12", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "12", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "13", "63",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "13", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "13", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "14", "63",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "14", "63",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "14", "63",
+ "FCC", "2.4G", "20M", "HT", "1T", "01", "26",
+ "ETSI", "2.4G", "20M", "HT", "1T", "01", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "01", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "02", "26",
+ "ETSI", "2.4G", "20M", "HT", "1T", "02", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "02", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "03", "26",
+ "ETSI", "2.4G", "20M", "HT", "1T", "03", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "03", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "04", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "04", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "04", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "05", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "05", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "05", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "06", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "06", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "06", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "07", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "07", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "07", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "08", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "08", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "08", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "09", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "09", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "09", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "10", "26",
+ "ETSI", "2.4G", "20M", "HT", "1T", "10", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "10", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "11", "26",
+ "ETSI", "2.4G", "20M", "HT", "1T", "11", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "11", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "12", "63",
+ "ETSI", "2.4G", "20M", "HT", "1T", "12", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "12", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "13", "63",
+ "ETSI", "2.4G", "20M", "HT", "1T", "13", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "13", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "14", "63",
+ "ETSI", "2.4G", "20M", "HT", "1T", "14", "63",
+ "MKK", "2.4G", "20M", "HT", "1T", "14", "63",
+ "FCC", "2.4G", "20M", "HT", "2T", "01", "30",
+ "ETSI", "2.4G", "20M", "HT", "2T", "01", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "01", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "02", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "02", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "02", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "03", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "03", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "03", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "04", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "04", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "04", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "05", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "05", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "05", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "06", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "06", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "06", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "07", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "07", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "07", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "08", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "08", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "08", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "09", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "09", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "09", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "10", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "10", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "10", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "11", "30",
+ "ETSI", "2.4G", "20M", "HT", "2T", "11", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "11", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "12", "63",
+ "ETSI", "2.4G", "20M", "HT", "2T", "12", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "12", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "13", "63",
+ "ETSI", "2.4G", "20M", "HT", "2T", "13", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "13", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "14", "63",
+ "ETSI", "2.4G", "20M", "HT", "2T", "14", "63",
+ "MKK", "2.4G", "20M", "HT", "2T", "14", "63",
+ "FCC", "2.4G", "40M", "HT", "1T", "01", "63",
+ "ETSI", "2.4G", "40M", "HT", "1T", "01", "63",
+ "MKK", "2.4G", "40M", "HT", "1T", "01", "63",
+ "FCC", "2.4G", "40M", "HT", "1T", "02", "63",
+ "ETSI", "2.4G", "40M", "HT", "1T", "02", "63",
+ "MKK", "2.4G", "40M", "HT", "1T", "02", "63",
+ "FCC", "2.4G", "40M", "HT", "1T", "03", "26",
+ "ETSI", "2.4G", "40M", "HT", "1T", "03", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "03", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "04", "26",
+ "ETSI", "2.4G", "40M", "HT", "1T", "04", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "04", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "05", "32",
+ "ETSI", "2.4G", "40M", "HT", "1T", "05", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "05", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "06", "32",
+ "ETSI", "2.4G", "40M", "HT", "1T", "06", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "06", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "07", "32",
+ "ETSI", "2.4G", "40M", "HT", "1T", "07", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "07", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "08", "26",
+ "ETSI", "2.4G", "40M", "HT", "1T", "08", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "08", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "09", "26",
+ "ETSI", "2.4G", "40M", "HT", "1T", "09", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "09", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "10", "26",
+ "ETSI", "2.4G", "40M", "HT", "1T", "10", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "10", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "11", "26",
+ "ETSI", "2.4G", "40M", "HT", "1T", "11", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "11", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "12", "63",
+ "ETSI", "2.4G", "40M", "HT", "1T", "12", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "12", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "13", "63",
+ "ETSI", "2.4G", "40M", "HT", "1T", "13", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "13", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "14", "63",
+ "ETSI", "2.4G", "40M", "HT", "1T", "14", "63",
+ "MKK", "2.4G", "40M", "HT", "1T", "14", "63",
+ "FCC", "2.4G", "40M", "HT", "2T", "01", "63",
+ "ETSI", "2.4G", "40M", "HT", "2T", "01", "63",
+ "MKK", "2.4G", "40M", "HT", "2T", "01", "63",
+ "FCC", "2.4G", "40M", "HT", "2T", "02", "63",
+ "ETSI", "2.4G", "40M", "HT", "2T", "02", "63",
+ "MKK", "2.4G", "40M", "HT", "2T", "02", "63",
+ "FCC", "2.4G", "40M", "HT", "2T", "03", "30",
+ "ETSI", "2.4G", "40M", "HT", "2T", "03", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "03", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "04", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "04", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "04", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "05", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "05", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "05", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "06", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "06", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "06", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "07", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "07", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "07", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "08", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "08", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "08", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "09", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "09", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "09", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "10", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "10", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "10", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "11", "30",
+ "ETSI", "2.4G", "40M", "HT", "2T", "11", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "11", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "12", "63",
+ "ETSI", "2.4G", "40M", "HT", "2T", "12", "32",
+ "MKK", "2.4G", "40M", "HT", "2T", "12", "32",
+ "FCC", "2.4G", "40M", "HT", "2T", "13", "63",
+ "ETSI", "2.4G", "40M", "HT", "2T", "13", "32",
+ "MKK", "2.4G", "40M", "HT", "2T", "13", "32",
+ "FCC", "2.4G", "40M", "HT", "2T", "14", "63",
+ "ETSI", "2.4G", "40M", "HT", "2T", "14", "63",
+ "MKK", "2.4G", "40M", "HT", "2T", "14", "63",
+ "FCC", "5G", "20M", "OFDM", "1T", "36", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "36", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "36", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "40", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "40", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "40", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "44", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "44", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "44", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "48", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "48", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "48", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "52", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "52", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "52", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "56", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "56", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "56", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "60", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "60", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "60", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "64", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "64", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "64", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "100", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "100", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "100", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "114", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "114", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "114", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "108", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "108", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "108", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "112", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "112", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "112", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "116", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "116", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "116", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "120", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "120", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "120", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "124", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "124", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "124", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "128", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "128", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "128", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "132", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "132", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "132", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "136", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "136", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "136", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "140", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "140", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "140", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "149", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "149", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "149", "63",
+ "FCC", "5G", "20M", "OFDM", "1T", "153", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "153", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "153", "63",
+ "FCC", "5G", "20M", "OFDM", "1T", "157", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "157", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "157", "63",
+ "FCC", "5G", "20M", "OFDM", "1T", "161", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "161", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "161", "63",
+ "FCC", "5G", "20M", "OFDM", "1T", "165", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "165", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "165", "63",
+ "FCC", "5G", "20M", "HT", "1T", "36", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "36", "30",
+ "MKK", "5G", "20M", "HT", "1T", "36", "30",
+ "FCC", "5G", "20M", "HT", "1T", "40", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "40", "30",
+ "MKK", "5G", "20M", "HT", "1T", "40", "30",
+ "FCC", "5G", "20M", "HT", "1T", "44", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "44", "30",
+ "MKK", "5G", "20M", "HT", "1T", "44", "30",
+ "FCC", "5G", "20M", "HT", "1T", "48", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "48", "30",
+ "MKK", "5G", "20M", "HT", "1T", "48", "30",
+ "FCC", "5G", "20M", "HT", "1T", "52", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "52", "30",
+ "MKK", "5G", "20M", "HT", "1T", "52", "30",
+ "FCC", "5G", "20M", "HT", "1T", "56", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "56", "30",
+ "MKK", "5G", "20M", "HT", "1T", "56", "30",
+ "FCC", "5G", "20M", "HT", "1T", "60", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "60", "30",
+ "MKK", "5G", "20M", "HT", "1T", "60", "30",
+ "FCC", "5G", "20M", "HT", "1T", "64", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "64", "30",
+ "MKK", "5G", "20M", "HT", "1T", "64", "30",
+ "FCC", "5G", "20M", "HT", "1T", "100", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "100", "30",
+ "MKK", "5G", "20M", "HT", "1T", "100", "30",
+ "FCC", "5G", "20M", "HT", "1T", "114", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "114", "30",
+ "MKK", "5G", "20M", "HT", "1T", "114", "30",
+ "FCC", "5G", "20M", "HT", "1T", "108", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "108", "30",
+ "MKK", "5G", "20M", "HT", "1T", "108", "30",
+ "FCC", "5G", "20M", "HT", "1T", "112", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "112", "30",
+ "MKK", "5G", "20M", "HT", "1T", "112", "30",
+ "FCC", "5G", "20M", "HT", "1T", "116", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "116", "30",
+ "MKK", "5G", "20M", "HT", "1T", "116", "30",
+ "FCC", "5G", "20M", "HT", "1T", "120", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "120", "30",
+ "MKK", "5G", "20M", "HT", "1T", "120", "30",
+ "FCC", "5G", "20M", "HT", "1T", "124", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "124", "30",
+ "MKK", "5G", "20M", "HT", "1T", "124", "30",
+ "FCC", "5G", "20M", "HT", "1T", "128", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "128", "30",
+ "MKK", "5G", "20M", "HT", "1T", "128", "30",
+ "FCC", "5G", "20M", "HT", "1T", "132", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "132", "30",
+ "MKK", "5G", "20M", "HT", "1T", "132", "30",
+ "FCC", "5G", "20M", "HT", "1T", "136", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "136", "30",
+ "MKK", "5G", "20M", "HT", "1T", "136", "30",
+ "FCC", "5G", "20M", "HT", "1T", "140", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "140", "30",
+ "MKK", "5G", "20M", "HT", "1T", "140", "30",
+ "FCC", "5G", "20M", "HT", "1T", "149", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "149", "30",
+ "MKK", "5G", "20M", "HT", "1T", "149", "63",
+ "FCC", "5G", "20M", "HT", "1T", "153", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "153", "30",
+ "MKK", "5G", "20M", "HT", "1T", "153", "63",
+ "FCC", "5G", "20M", "HT", "1T", "157", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "157", "30",
+ "MKK", "5G", "20M", "HT", "1T", "157", "63",
+ "FCC", "5G", "20M", "HT", "1T", "161", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "161", "30",
+ "MKK", "5G", "20M", "HT", "1T", "161", "63",
+ "FCC", "5G", "20M", "HT", "1T", "165", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "165", "30",
+ "MKK", "5G", "20M", "HT", "1T", "165", "63",
+ "FCC", "5G", "20M", "HT", "2T", "36", "28",
+ "ETSI", "5G", "20M", "HT", "2T", "36", "30",
+ "MKK", "5G", "20M", "HT", "2T", "36", "30",
+ "FCC", "5G", "20M", "HT", "2T", "40", "28",
+ "ETSI", "5G", "20M", "HT", "2T", "40", "30",
+ "MKK", "5G", "20M", "HT", "2T", "40", "30",
+ "FCC", "5G", "20M", "HT", "2T", "44", "28",
+ "ETSI", "5G", "20M", "HT", "2T", "44", "30",
+ "MKK", "5G", "20M", "HT", "2T", "44", "30",
+ "FCC", "5G", "20M", "HT", "2T", "48", "28",
+ "ETSI", "5G", "20M", "HT", "2T", "48", "30",
+ "MKK", "5G", "20M", "HT", "2T", "48", "30",
+ "FCC", "5G", "20M", "HT", "2T", "52", "34",
+ "ETSI", "5G", "20M", "HT", "2T", "52", "30",
+ "MKK", "5G", "20M", "HT", "2T", "52", "30",
+ "FCC", "5G", "20M", "HT", "2T", "56", "32",
+ "ETSI", "5G", "20M", "HT", "2T", "56", "30",
+ "MKK", "5G", "20M", "HT", "2T", "56", "30",
+ "FCC", "5G", "20M", "HT", "2T", "60", "30",
+ "ETSI", "5G", "20M", "HT", "2T", "60", "30",
+ "MKK", "5G", "20M", "HT", "2T", "60", "30",
+ "FCC", "5G", "20M", "HT", "2T", "64", "26",
+ "ETSI", "5G", "20M", "HT", "2T", "64", "30",
+ "MKK", "5G", "20M", "HT", "2T", "64", "30",
+ "FCC", "5G", "20M", "HT", "2T", "100", "28",
+ "ETSI", "5G", "20M", "HT", "2T", "100", "30",
+ "MKK", "5G", "20M", "HT", "2T", "100", "30",
+ "FCC", "5G", "20M", "HT", "2T", "114", "28",
+ "ETSI", "5G", "20M", "HT", "2T", "114", "30",
+ "MKK", "5G", "20M", "HT", "2T", "114", "30",
+ "FCC", "5G", "20M", "HT", "2T", "108", "30",
+ "ETSI", "5G", "20M", "HT", "2T", "108", "30",
+ "MKK", "5G", "20M", "HT", "2T", "108", "30",
+ "FCC", "5G", "20M", "HT", "2T", "112", "32",
+ "ETSI", "5G", "20M", "HT", "2T", "112", "30",
+ "MKK", "5G", "20M", "HT", "2T", "112", "30",
+ "FCC", "5G", "20M", "HT", "2T", "116", "32",
+ "ETSI", "5G", "20M", "HT", "2T", "116", "30",
+ "MKK", "5G", "20M", "HT", "2T", "116", "30",
+ "FCC", "5G", "20M", "HT", "2T", "120", "34",
+ "ETSI", "5G", "20M", "HT", "2T", "120", "30",
+ "MKK", "5G", "20M", "HT", "2T", "120", "30",
+ "FCC", "5G", "20M", "HT", "2T", "124", "32",
+ "ETSI", "5G", "20M", "HT", "2T", "124", "30",
+ "MKK", "5G", "20M", "HT", "2T", "124", "30",
+ "FCC", "5G", "20M", "HT", "2T", "128", "30",
+ "ETSI", "5G", "20M", "HT", "2T", "128", "30",
+ "MKK", "5G", "20M", "HT", "2T", "128", "30",
+ "FCC", "5G", "20M", "HT", "2T", "132", "28",
+ "ETSI", "5G", "20M", "HT", "2T", "132", "30",
+ "MKK", "5G", "20M", "HT", "2T", "132", "30",
+ "FCC", "5G", "20M", "HT", "2T", "136", "28",
+ "ETSI", "5G", "20M", "HT", "2T", "136", "30",
+ "MKK", "5G", "20M", "HT", "2T", "136", "30",
+ "FCC", "5G", "20M", "HT", "2T", "140", "26",
+ "ETSI", "5G", "20M", "HT", "2T", "140", "30",
+ "MKK", "5G", "20M", "HT", "2T", "140", "30",
+ "FCC", "5G", "20M", "HT", "2T", "149", "34",
+ "ETSI", "5G", "20M", "HT", "2T", "149", "30",
+ "MKK", "5G", "20M", "HT", "2T", "149", "63",
+ "FCC", "5G", "20M", "HT", "2T", "153", "34",
+ "ETSI", "5G", "20M", "HT", "2T", "153", "30",
+ "MKK", "5G", "20M", "HT", "2T", "153", "63",
+ "FCC", "5G", "20M", "HT", "2T", "157", "34",
+ "ETSI", "5G", "20M", "HT", "2T", "157", "30",
+ "MKK", "5G", "20M", "HT", "2T", "157", "63",
+ "FCC", "5G", "20M", "HT", "2T", "161", "34",
+ "ETSI", "5G", "20M", "HT", "2T", "161", "30",
+ "MKK", "5G", "20M", "HT", "2T", "161", "63",
+ "FCC", "5G", "20M", "HT", "2T", "165", "34",
+ "ETSI", "5G", "20M", "HT", "2T", "165", "30",
+ "MKK", "5G", "20M", "HT", "2T", "165", "63",
+ "FCC", "5G", "40M", "HT", "1T", "38", "26",
+ "ETSI", "5G", "40M", "HT", "1T", "38", "30",
+ "MKK", "5G", "40M", "HT", "1T", "38", "30",
+ "FCC", "5G", "40M", "HT", "1T", "46", "30",
+ "ETSI", "5G", "40M", "HT", "1T", "46", "30",
+ "MKK", "5G", "40M", "HT", "1T", "46", "30",
+ "FCC", "5G", "40M", "HT", "1T", "54", "30",
+ "ETSI", "5G", "40M", "HT", "1T", "54", "30",
+ "MKK", "5G", "40M", "HT", "1T", "54", "30",
+ "FCC", "5G", "40M", "HT", "1T", "62", "26",
+ "ETSI", "5G", "40M", "HT", "1T", "62", "30",
+ "MKK", "5G", "40M", "HT", "1T", "62", "30",
+ "FCC", "5G", "40M", "HT", "1T", "102", "24",
+ "ETSI", "5G", "40M", "HT", "1T", "102", "30",
+ "MKK", "5G", "40M", "HT", "1T", "102", "30",
+ "FCC", "5G", "40M", "HT", "1T", "110", "30",
+ "ETSI", "5G", "40M", "HT", "1T", "110", "30",
+ "MKK", "5G", "40M", "HT", "1T", "110", "30",
+ "FCC", "5G", "40M", "HT", "1T", "118", "30",
+ "ETSI", "5G", "40M", "HT", "1T", "118", "30",
+ "MKK", "5G", "40M", "HT", "1T", "118", "30",
+ "FCC", "5G", "40M", "HT", "1T", "126", "30",
+ "ETSI", "5G", "40M", "HT", "1T", "126", "30",
+ "MKK", "5G", "40M", "HT", "1T", "126", "30",
+ "FCC", "5G", "40M", "HT", "1T", "134", "30",
+ "ETSI", "5G", "40M", "HT", "1T", "134", "30",
+ "MKK", "5G", "40M", "HT", "1T", "134", "30",
+ "FCC", "5G", "40M", "HT", "1T", "151", "30",
+ "ETSI", "5G", "40M", "HT", "1T", "151", "30",
+ "MKK", "5G", "40M", "HT", "1T", "151", "63",
+ "FCC", "5G", "40M", "HT", "1T", "159", "30",
+ "ETSI", "5G", "40M", "HT", "1T", "159", "30",
+ "MKK", "5G", "40M", "HT", "1T", "159", "63",
+ "FCC", "5G", "40M", "HT", "2T", "38", "28",
+ "ETSI", "5G", "40M", "HT", "2T", "38", "30",
+ "MKK", "5G", "40M", "HT", "2T", "38", "30",
+ "FCC", "5G", "40M", "HT", "2T", "46", "28",
+ "ETSI", "5G", "40M", "HT", "2T", "46", "30",
+ "MKK", "5G", "40M", "HT", "2T", "46", "30",
+ "FCC", "5G", "40M", "HT", "2T", "54", "30",
+ "ETSI", "5G", "40M", "HT", "2T", "54", "30",
+ "MKK", "5G", "40M", "HT", "2T", "54", "30",
+ "FCC", "5G", "40M", "HT", "2T", "62", "30",
+ "ETSI", "5G", "40M", "HT", "2T", "62", "30",
+ "MKK", "5G", "40M", "HT", "2T", "62", "30",
+ "FCC", "5G", "40M", "HT", "2T", "102", "26",
+ "ETSI", "5G", "40M", "HT", "2T", "102", "30",
+ "MKK", "5G", "40M", "HT", "2T", "102", "30",
+ "FCC", "5G", "40M", "HT", "2T", "110", "30",
+ "ETSI", "5G", "40M", "HT", "2T", "110", "30",
+ "MKK", "5G", "40M", "HT", "2T", "110", "30",
+ "FCC", "5G", "40M", "HT", "2T", "118", "34",
+ "ETSI", "5G", "40M", "HT", "2T", "118", "30",
+ "MKK", "5G", "40M", "HT", "2T", "118", "30",
+ "FCC", "5G", "40M", "HT", "2T", "126", "32",
+ "ETSI", "5G", "40M", "HT", "2T", "126", "30",
+ "MKK", "5G", "40M", "HT", "2T", "126", "30",
+ "FCC", "5G", "40M", "HT", "2T", "134", "30",
+ "ETSI", "5G", "40M", "HT", "2T", "134", "30",
+ "MKK", "5G", "40M", "HT", "2T", "134", "30",
+ "FCC", "5G", "40M", "HT", "2T", "151", "34",
+ "ETSI", "5G", "40M", "HT", "2T", "151", "30",
+ "MKK", "5G", "40M", "HT", "2T", "151", "63",
+ "FCC", "5G", "40M", "HT", "2T", "159", "34",
+ "ETSI", "5G", "40M", "HT", "2T", "159", "30",
+ "MKK", "5G", "40M", "HT", "2T", "159", "63",
+ "FCC", "5G", "80M", "VHT", "1T", "42", "22",
+ "ETSI", "5G", "80M", "VHT", "1T", "42", "30",
+ "MKK", "5G", "80M", "VHT", "1T", "42", "30",
+ "FCC", "5G", "80M", "VHT", "1T", "58", "20",
+ "ETSI", "5G", "80M", "VHT", "1T", "58", "30",
+ "MKK", "5G", "80M", "VHT", "1T", "58", "30",
+ "FCC", "5G", "80M", "VHT", "1T", "106", "20",
+ "ETSI", "5G", "80M", "VHT", "1T", "106", "30",
+ "MKK", "5G", "80M", "VHT", "1T", "106", "30",
+ "FCC", "5G", "80M", "VHT", "1T", "122", "28",
+ "ETSI", "5G", "80M", "VHT", "1T", "122", "30",
+ "MKK", "5G", "80M", "VHT", "1T", "122", "30",
+ "FCC", "5G", "80M", "VHT", "1T", "155", "30",
+ "ETSI", "5G", "80M", "VHT", "1T", "155", "30",
+ "MKK", "5G", "80M", "VHT", "1T", "155", "63",
+ "FCC", "5G", "80M", "VHT", "2T", "42", "28",
+ "ETSI", "5G", "80M", "VHT", "2T", "42", "30",
+ "MKK", "5G", "80M", "VHT", "2T", "42", "30",
+ "FCC", "5G", "80M", "VHT", "2T", "58", "26",
+ "ETSI", "5G", "80M", "VHT", "2T", "58", "30",
+ "MKK", "5G", "80M", "VHT", "2T", "58", "30",
+ "FCC", "5G", "80M", "VHT", "2T", "106", "28",
+ "ETSI", "5G", "80M", "VHT", "2T", "106", "30",
+ "MKK", "5G", "80M", "VHT", "2T", "106", "30",
+ "FCC", "5G", "80M", "VHT", "2T", "122", "32",
+ "ETSI", "5G", "80M", "VHT", "2T", "122", "30",
+ "MKK", "5G", "80M", "VHT", "2T", "122", "30",
+ "FCC", "5G", "80M", "VHT", "2T", "155", "34",
+ "ETSI", "5G", "80M", "VHT", "2T", "155", "30",
+ "MKK", "5G", "80M", "VHT", "2T", "155", "63"
+};*/
+
+u32 RTL8812AE_RADIOA_ARRAY[] = {
+ 0x000, 0x00010000,
+ 0x018, 0x0001712A,
+ 0x056, 0x00051CF2,
+ 0x066, 0x00040000,
+ 0x01E, 0x00080000,
+ 0x089, 0x00000080,
+ 0xFF0F0740, 0xABCD,
+ 0x086, 0x00014B38,
+ 0xFF0F02C0, 0xCDEF,
+ 0x086, 0x00014B38,
+ 0xFF0F01C0, 0xCDEF,
+ 0x086, 0x00014B38,
+ 0xFF0F07D8, 0xCDEF,
+ 0x086, 0x00014B3A,
+ 0xFF0F07D0, 0xCDEF,
+ 0x086, 0x00014B3A,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x086, 0x00014B38,
+ 0xFF0F0740, 0xDEAD,
+ 0x0B1, 0x0001FC1A,
+ 0x0B3, 0x000F0810,
+ 0x0B4, 0x0001A78D,
+ 0x0BA, 0x00086180,
+ 0x018, 0x00000006,
+ 0x0EF, 0x00002000,
+ 0xFF0F07D8, 0xABCD,
+ 0x03B, 0x0003F218,
+ 0x03B, 0x00030A58,
+ 0x03B, 0x0002FA58,
+ 0x03B, 0x00022590,
+ 0x03B, 0x0001FA50,
+ 0x03B, 0x00010248,
+ 0x03B, 0x00008240,
+ 0xFF0F07D0, 0xCDEF,
+ 0x03B, 0x0003F218,
+ 0x03B, 0x00030A58,
+ 0x03B, 0x0002FA58,
+ 0x03B, 0x00022590,
+ 0x03B, 0x0001FA50,
+ 0x03B, 0x00010248,
+ 0x03B, 0x00008240,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x03B, 0x00038A58,
+ 0x03B, 0x00037A58,
+ 0x03B, 0x0002A590,
+ 0x03B, 0x00027A50,
+ 0x03B, 0x00018248,
+ 0x03B, 0x00010240,
+ 0x03B, 0x00008240,
+ 0xFF0F07D8, 0xDEAD,
+ 0x0EF, 0x00000100,
+ 0xFF0F07D8, 0xABCD,
+ 0x034, 0x0000A4EE,
+ 0x034, 0x00009076,
+ 0x034, 0x00008073,
+ 0x034, 0x00007070,
+ 0x034, 0x0000606D,
+ 0x034, 0x0000506A,
+ 0x034, 0x00004049,
+ 0x034, 0x00003046,
+ 0x034, 0x00002028,
+ 0x034, 0x00001025,
+ 0x034, 0x00000022,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0000ADF4,
+ 0x034, 0x00009DF1,
+ 0x034, 0x00008DEE,
+ 0x034, 0x00007DEB,
+ 0x034, 0x00006DE8,
+ 0x034, 0x00005CEC,
+ 0x034, 0x00004CE9,
+ 0x034, 0x000034EA,
+ 0x034, 0x000024E7,
+ 0x034, 0x0000146B,
+ 0x034, 0x0000006D,
+ 0xFF0F07D8, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x000020A2,
+ 0x0DF, 0x00000080,
+ 0x035, 0x00000192,
+ 0x035, 0x00008192,
+ 0x035, 0x00010192,
+ 0x036, 0x00000024,
+ 0x036, 0x00008024,
+ 0x036, 0x00010024,
+ 0x036, 0x00018024,
+ 0x0EF, 0x00000000,
+ 0x051, 0x00000C21,
+ 0x052, 0x000006D9,
+ 0x053, 0x000FC649,
+ 0x054, 0x0000017E,
+ 0x0EF, 0x00000002,
+ 0x008, 0x00008400,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00001000,
+ 0x03A, 0x00000080,
+ 0x03B, 0x0003A02C,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000400,
+ 0x03B, 0x0003202C,
+ 0x03C, 0x00010000,
+ 0x03A, 0x000000A0,
+ 0x03B, 0x0002B064,
+ 0x03C, 0x00004000,
+ 0x03A, 0x000000D8,
+ 0x03B, 0x00023070,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000468,
+ 0x03B, 0x0001B870,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000098,
+ 0x03B, 0x00012085,
+ 0x03C, 0x000E4000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x0000A080,
+ 0x03C, 0x000F0000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x00002080,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000080,
+ 0x03B, 0x0007A02C,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000400,
+ 0x03B, 0x0007202C,
+ 0x03C, 0x00010000,
+ 0x03A, 0x000000A0,
+ 0x03B, 0x0006B064,
+ 0x03C, 0x00004000,
+ 0x03A, 0x000000D8,
+ 0x03B, 0x00023070,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000468,
+ 0x03B, 0x0005B870,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000098,
+ 0x03B, 0x00052085,
+ 0x03C, 0x000E4000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x0004A080,
+ 0x03C, 0x000F0000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x00042080,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000080,
+ 0x03B, 0x000BA02C,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000400,
+ 0x03B, 0x000B202C,
+ 0x03C, 0x00010000,
+ 0x03A, 0x000000A0,
+ 0x03B, 0x000AB064,
+ 0x03C, 0x00004000,
+ 0x03A, 0x000000D8,
+ 0x03B, 0x000A3070,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000468,
+ 0x03B, 0x0009B870,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000098,
+ 0x03B, 0x00092085,
+ 0x03C, 0x000E4000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x0008A080,
+ 0x03C, 0x000F0000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x00082080,
+ 0x03C, 0x00010000,
+ 0x0EF, 0x00001100,
+ 0xFF0F0740, 0xABCD,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xFF0F01C0, 0xCDEF,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xFF0F07D8, 0xCDEF,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xFF0F07D0, 0xCDEF,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0004ADF5,
+ 0x034, 0x00049DF2,
+ 0x034, 0x00048DEF,
+ 0x034, 0x00047DEC,
+ 0x034, 0x00046DE9,
+ 0x034, 0x00045DC9,
+ 0x034, 0x00044CE8,
+ 0x034, 0x000438CA,
+ 0x034, 0x00042889,
+ 0x034, 0x0004184A,
+ 0x034, 0x0004044A,
+ 0xFF0F0740, 0xDEAD,
+ 0xFF0F0740, 0xABCD,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xFF0F01C0, 0xCDEF,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xFF0F07D8, 0xCDEF,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xFF0F07D0, 0xCDEF,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0002ADF5,
+ 0x034, 0x00029DF2,
+ 0x034, 0x00028DEF,
+ 0x034, 0x00027DEC,
+ 0x034, 0x00026DE9,
+ 0x034, 0x00025DC9,
+ 0x034, 0x00024CE8,
+ 0x034, 0x000238CA,
+ 0x034, 0x00022889,
+ 0x034, 0x0002184A,
+ 0x034, 0x0002044A,
+ 0xFF0F0740, 0xDEAD,
+ 0xFF0F0740, 0xABCD,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xFF0F01C0, 0xCDEF,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xFF0F07D8, 0xCDEF,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xFF0F07D0, 0xCDEF,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0000AFF7,
+ 0x034, 0x00009DF7,
+ 0x034, 0x00008DF4,
+ 0x034, 0x00007DF1,
+ 0x034, 0x00006DEE,
+ 0x034, 0x00005DCD,
+ 0x034, 0x00004CEB,
+ 0x034, 0x000038CC,
+ 0x034, 0x0000288B,
+ 0x034, 0x0000184C,
+ 0x034, 0x0000044C,
+ 0xFF0F0740, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0xFF0F0740, 0xABCD,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001D4,
+ 0x035, 0x000081D4,
+ 0x035, 0x000101D4,
+ 0x035, 0x000201B4,
+ 0x035, 0x000281B4,
+ 0x035, 0x000301B4,
+ 0x035, 0x000401B4,
+ 0x035, 0x000481B4,
+ 0x035, 0x000501B4,
+ 0xFF0F02C0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001D4,
+ 0x035, 0x000081D4,
+ 0x035, 0x000101D4,
+ 0x035, 0x000201B4,
+ 0x035, 0x000281B4,
+ 0x035, 0x000301B4,
+ 0x035, 0x000401B4,
+ 0x035, 0x000481B4,
+ 0x035, 0x000501B4,
+ 0xFF0F01C0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001D4,
+ 0x035, 0x000081D4,
+ 0x035, 0x000101D4,
+ 0x035, 0x000201B4,
+ 0x035, 0x000281B4,
+ 0x035, 0x000301B4,
+ 0x035, 0x000401B4,
+ 0x035, 0x000481B4,
+ 0x035, 0x000501B4,
+ 0xFF0F07D8, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001D4,
+ 0x035, 0x000081D4,
+ 0x035, 0x000101D4,
+ 0x035, 0x000201B4,
+ 0x035, 0x000281B4,
+ 0x035, 0x000301B4,
+ 0x035, 0x000401B4,
+ 0x035, 0x000481B4,
+ 0x035, 0x000501B4,
+ 0xFF0F07D0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001D4,
+ 0x035, 0x000081D4,
+ 0x035, 0x000101D4,
+ 0x035, 0x000201B4,
+ 0x035, 0x000281B4,
+ 0x035, 0x000301B4,
+ 0x035, 0x000401B4,
+ 0x035, 0x000481B4,
+ 0x035, 0x000501B4,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x00000188,
+ 0x035, 0x00008147,
+ 0x035, 0x00010147,
+ 0x035, 0x000201D7,
+ 0x035, 0x000281D7,
+ 0x035, 0x000301D7,
+ 0x035, 0x000401D8,
+ 0x035, 0x000481D8,
+ 0x035, 0x000501D8,
+ 0xFF0F0740, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0xFF0F0740, 0xABCD,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00004BFB,
+ 0x036, 0x0000CBFB,
+ 0x036, 0x00014BFB,
+ 0x036, 0x0001CBFB,
+ 0x036, 0x00024F4B,
+ 0x036, 0x0002CF4B,
+ 0x036, 0x00034F4B,
+ 0x036, 0x0003CF4B,
+ 0x036, 0x00044F4B,
+ 0x036, 0x0004CF4B,
+ 0x036, 0x00054F4B,
+ 0x036, 0x0005CF4B,
+ 0xFF0F02C0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00004BFB,
+ 0x036, 0x0000CBFB,
+ 0x036, 0x00014BFB,
+ 0x036, 0x0001CBFB,
+ 0x036, 0x00024F4B,
+ 0x036, 0x0002CF4B,
+ 0x036, 0x00034F4B,
+ 0x036, 0x0003CF4B,
+ 0x036, 0x00044F4B,
+ 0x036, 0x0004CF4B,
+ 0x036, 0x00054F4B,
+ 0x036, 0x0005CF4B,
+ 0xFF0F01C0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00004BFB,
+ 0x036, 0x0000CBFB,
+ 0x036, 0x00014BFB,
+ 0x036, 0x0001CBFB,
+ 0x036, 0x00024F4B,
+ 0x036, 0x0002CF4B,
+ 0x036, 0x00034F4B,
+ 0x036, 0x0003CF4B,
+ 0x036, 0x00044F4B,
+ 0x036, 0x0004CF4B,
+ 0x036, 0x00054F4B,
+ 0x036, 0x0005CF4B,
+ 0xFF0F07D8, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00004BFB,
+ 0x036, 0x0000CBFB,
+ 0x036, 0x00014BFB,
+ 0x036, 0x0001CBFB,
+ 0x036, 0x00024F4B,
+ 0x036, 0x0002CF4B,
+ 0x036, 0x00034F4B,
+ 0x036, 0x0003CF4B,
+ 0x036, 0x00044F4B,
+ 0x036, 0x0004CF4B,
+ 0x036, 0x00054F4B,
+ 0x036, 0x0005CF4B,
+ 0xFF0F07D0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00004BFB,
+ 0x036, 0x0000CBFB,
+ 0x036, 0x00014BFB,
+ 0x036, 0x0001CBFB,
+ 0x036, 0x00024F4B,
+ 0x036, 0x0002CF4B,
+ 0x036, 0x00034F4B,
+ 0x036, 0x0003CF4B,
+ 0x036, 0x00044F4B,
+ 0x036, 0x0004CF4B,
+ 0x036, 0x00054F4B,
+ 0x036, 0x0005CF4B,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00084EB4,
+ 0x036, 0x0008CC35,
+ 0x036, 0x00094C35,
+ 0x036, 0x0009CC35,
+ 0x036, 0x000A4935,
+ 0x036, 0x000ACC35,
+ 0x036, 0x000B4C35,
+ 0x036, 0x000BCC35,
+ 0x036, 0x000C4EB4,
+ 0x036, 0x000CCEB5,
+ 0x036, 0x000D4EB5,
+ 0x036, 0x000DCEB5,
+ 0xFF0F0740, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000008,
+ 0xFF0F0740, 0xABCD,
+ 0x03C, 0x000002CC,
+ 0x03C, 0x00000522,
+ 0x03C, 0x00000902,
+ 0xFF0F02C0, 0xCDEF,
+ 0x03C, 0x000002CC,
+ 0x03C, 0x00000522,
+ 0x03C, 0x00000902,
+ 0xFF0F01C0, 0xCDEF,
+ 0x03C, 0x000002CC,
+ 0x03C, 0x00000522,
+ 0x03C, 0x00000902,
+ 0xFF0F07D8, 0xCDEF,
+ 0x03C, 0x000002CC,
+ 0x03C, 0x00000522,
+ 0x03C, 0x00000902,
+ 0xFF0F07D0, 0xCDEF,
+ 0x03C, 0x000002CC,
+ 0x03C, 0x00000522,
+ 0x03C, 0x00000902,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x03C, 0x000002A8,
+ 0x03C, 0x000005A2,
+ 0x03C, 0x00000880,
+ 0xFF0F0740, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000002,
+ 0x0DF, 0x00000080,
+ 0x01F, 0x00040064,
+ 0xFF0F0740, 0xABCD,
+ 0x061, 0x000FDD43,
+ 0x062, 0x00038F4B,
+ 0x063, 0x00032117,
+ 0x064, 0x000194AC,
+ 0x065, 0x000931D1,
+ 0xFF0F02C0, 0xCDEF,
+ 0x061, 0x000FDD43,
+ 0x062, 0x00038F4B,
+ 0x063, 0x00032117,
+ 0x064, 0x000194AC,
+ 0x065, 0x000931D1,
+ 0xFF0F01C0, 0xCDEF,
+ 0x061, 0x000FDD43,
+ 0x062, 0x00038F4B,
+ 0x063, 0x00032117,
+ 0x064, 0x000194AC,
+ 0x065, 0x000931D1,
+ 0xFF0F07D8, 0xCDEF,
+ 0x061, 0x000FDD43,
+ 0x062, 0x00038F4B,
+ 0x063, 0x00032117,
+ 0x064, 0x000194AC,
+ 0x065, 0x000931D1,
+ 0xFF0F07D0, 0xCDEF,
+ 0x061, 0x000FDD43,
+ 0x062, 0x00038F4B,
+ 0x063, 0x00032117,
+ 0x064, 0x000194AC,
+ 0x065, 0x000931D1,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x061, 0x000E5D53,
+ 0x062, 0x00038FCD,
+ 0x063, 0x000314EB,
+ 0x064, 0x000196AC,
+ 0x065, 0x000911D7,
+ 0xFF0F0740, 0xDEAD,
+ 0x008, 0x00008400,
+ 0x01C, 0x000739D2,
+ 0x0B4, 0x0001E78D,
+ 0x018, 0x0001F12A,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0B4, 0x0001A78D,
+ 0x018, 0x0001712A,
+};
+
+u32 RTL8812AE_RADIOB_ARRAY[] = {
+ 0x056, 0x00051CF2,
+ 0x066, 0x00040000,
+ 0x089, 0x00000080,
+ 0xFF0F0740, 0xABCD,
+ 0x086, 0x00014B38,
+ 0xFF0F01C0, 0xCDEF,
+ 0x086, 0x00014B38,
+ 0xFF0F02C0, 0xCDEF,
+ 0x086, 0x00014B38,
+ 0xFF0F07D8, 0xCDEF,
+ 0x086, 0x00014B3A,
+ 0xFF0F07D0, 0xCDEF,
+ 0x086, 0x00014B3A,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x086, 0x00014B38,
+ 0xFF0F0740, 0xDEAD,
+ 0x018, 0x00000006,
+ 0x0EF, 0x00002000,
+ 0xFF0F07D8, 0xABCD,
+ 0x03B, 0x0003F218,
+ 0x03B, 0x00030A58,
+ 0x03B, 0x0002FA58,
+ 0x03B, 0x00022590,
+ 0x03B, 0x0001FA50,
+ 0x03B, 0x00010248,
+ 0x03B, 0x00008240,
+ 0xFF0F07D0, 0xCDEF,
+ 0x03B, 0x0003F218,
+ 0x03B, 0x00030A58,
+ 0x03B, 0x0002FA58,
+ 0x03B, 0x00022590,
+ 0x03B, 0x0001FA50,
+ 0x03B, 0x00010248,
+ 0x03B, 0x00008240,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x03B, 0x00038A58,
+ 0x03B, 0x00037A58,
+ 0x03B, 0x0002A590,
+ 0x03B, 0x00027A50,
+ 0x03B, 0x00018248,
+ 0x03B, 0x00010240,
+ 0x03B, 0x00008240,
+ 0xFF0F07D8, 0xDEAD,
+ 0x0EF, 0x00000100,
+ 0xFF0F07D8, 0xABCD,
+ 0x034, 0x0000A4EE,
+ 0x034, 0x00009076,
+ 0x034, 0x00008073,
+ 0x034, 0x00007070,
+ 0x034, 0x0000606D,
+ 0x034, 0x0000506A,
+ 0x034, 0x00004049,
+ 0x034, 0x00003046,
+ 0x034, 0x00002028,
+ 0x034, 0x00001025,
+ 0x034, 0x00000022,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0000ADF4,
+ 0x034, 0x00009DF1,
+ 0x034, 0x00008DEE,
+ 0x034, 0x00007DEB,
+ 0x034, 0x00006DE8,
+ 0x034, 0x00005CEC,
+ 0x034, 0x00004CE9,
+ 0x034, 0x000034EA,
+ 0x034, 0x000024E7,
+ 0x034, 0x0000146B,
+ 0x034, 0x0000006D,
+ 0xFF0F07D8, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x000020A2,
+ 0x0DF, 0x00000080,
+ 0x035, 0x00000192,
+ 0x035, 0x00008192,
+ 0x035, 0x00010192,
+ 0x036, 0x00000024,
+ 0x036, 0x00008024,
+ 0x036, 0x00010024,
+ 0x036, 0x00018024,
+ 0x0EF, 0x00000000,
+ 0x051, 0x00000C21,
+ 0x052, 0x000006D9,
+ 0x053, 0x000FC649,
+ 0x054, 0x0000017E,
+ 0x0EF, 0x00000002,
+ 0x008, 0x00008400,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00001000,
+ 0x03A, 0x00000080,
+ 0x03B, 0x0003A02C,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000400,
+ 0x03B, 0x0003202C,
+ 0x03C, 0x00010000,
+ 0x03A, 0x000000A0,
+ 0x03B, 0x0002B064,
+ 0x03C, 0x00004000,
+ 0x03A, 0x000000D8,
+ 0x03B, 0x00023070,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000468,
+ 0x03B, 0x0001B870,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000098,
+ 0x03B, 0x00012085,
+ 0x03C, 0x000E4000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x0000A080,
+ 0x03C, 0x000F0000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x00002080,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000080,
+ 0x03B, 0x0007A02C,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000400,
+ 0x03B, 0x0007202C,
+ 0x03C, 0x00010000,
+ 0x03A, 0x000000A0,
+ 0x03B, 0x0006B064,
+ 0x03C, 0x00004000,
+ 0x03A, 0x000000D8,
+ 0x03B, 0x00063070,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000468,
+ 0x03B, 0x0005B870,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000098,
+ 0x03B, 0x00052085,
+ 0x03C, 0x000E4000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x0004A080,
+ 0x03C, 0x000F0000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x00042080,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000080,
+ 0x03B, 0x000BA02C,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000400,
+ 0x03B, 0x000B202C,
+ 0x03C, 0x00010000,
+ 0x03A, 0x000000A0,
+ 0x03B, 0x000AB064,
+ 0x03C, 0x00004000,
+ 0x03A, 0x000000D8,
+ 0x03B, 0x000A3070,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000468,
+ 0x03B, 0x0009B870,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000098,
+ 0x03B, 0x00092085,
+ 0x03C, 0x000E4000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x0008A080,
+ 0x03C, 0x000F0000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x00082080,
+ 0x03C, 0x00010000,
+ 0x0EF, 0x00001100,
+ 0xFF0F0740, 0xABCD,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xFF0F01C0, 0xCDEF,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xFF0F07D8, 0xCDEF,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xFF0F07D0, 0xCDEF,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0004ADF5,
+ 0x034, 0x00049DF2,
+ 0x034, 0x00048DEF,
+ 0x034, 0x00047DEC,
+ 0x034, 0x00046DE9,
+ 0x034, 0x00045DC9,
+ 0x034, 0x00044CE8,
+ 0x034, 0x000438CA,
+ 0x034, 0x00042889,
+ 0x034, 0x0004184A,
+ 0x034, 0x0004044A,
+ 0xFF0F0740, 0xDEAD,
+ 0xFF0F0740, 0xABCD,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xFF0F01C0, 0xCDEF,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xFF0F07D8, 0xCDEF,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xFF0F07D0, 0xCDEF,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0002ADF5,
+ 0x034, 0x00029DF2,
+ 0x034, 0x00028DEF,
+ 0x034, 0x00027DEC,
+ 0x034, 0x00026DE9,
+ 0x034, 0x00025DC9,
+ 0x034, 0x00024CE8,
+ 0x034, 0x000238CA,
+ 0x034, 0x00022889,
+ 0x034, 0x0002184A,
+ 0x034, 0x0002044A,
+ 0xFF0F0740, 0xDEAD,
+ 0xFF0F0740, 0xABCD,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xFF0F01C0, 0xCDEF,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xFF0F07D8, 0xCDEF,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xFF0F07D0, 0xCDEF,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0000AFF7,
+ 0x034, 0x00009DF7,
+ 0x034, 0x00008DF4,
+ 0x034, 0x00007DF1,
+ 0x034, 0x00006DEE,
+ 0x034, 0x00005DCD,
+ 0x034, 0x00004CEB,
+ 0x034, 0x000038CC,
+ 0x034, 0x0000288B,
+ 0x034, 0x0000184C,
+ 0x034, 0x0000044C,
+ 0xFF0F0740, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0xFF0F0740, 0xABCD,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001C5,
+ 0x035, 0x000081C5,
+ 0x035, 0x000101C5,
+ 0x035, 0x00020174,
+ 0x035, 0x00028174,
+ 0x035, 0x00030174,
+ 0x035, 0x00040185,
+ 0x035, 0x00048185,
+ 0x035, 0x00050185,
+ 0x0EF, 0x00000000,
+ 0xFF0F01C0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001C5,
+ 0x035, 0x000081C5,
+ 0x035, 0x000101C5,
+ 0x035, 0x00020174,
+ 0x035, 0x00028174,
+ 0x035, 0x00030174,
+ 0x035, 0x00040185,
+ 0x035, 0x00048185,
+ 0x035, 0x00050185,
+ 0x0EF, 0x00000000,
+ 0xFF0F02C0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001C5,
+ 0x035, 0x000081C5,
+ 0x035, 0x000101C5,
+ 0x035, 0x00020174,
+ 0x035, 0x00028174,
+ 0x035, 0x00030174,
+ 0x035, 0x00040185,
+ 0x035, 0x00048185,
+ 0x035, 0x00050185,
+ 0x0EF, 0x00000000,
+ 0xFF0F07D8, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001C5,
+ 0x035, 0x000081C5,
+ 0x035, 0x000101C5,
+ 0x035, 0x00020174,
+ 0x035, 0x00028174,
+ 0x035, 0x00030174,
+ 0x035, 0x00040185,
+ 0x035, 0x00048185,
+ 0x035, 0x00050185,
+ 0x0EF, 0x00000000,
+ 0xFF0F07D0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001C5,
+ 0x035, 0x000081C5,
+ 0x035, 0x000101C5,
+ 0x035, 0x00020174,
+ 0x035, 0x00028174,
+ 0x035, 0x00030174,
+ 0x035, 0x00040185,
+ 0x035, 0x00048185,
+ 0x035, 0x00050185,
+ 0x0EF, 0x00000000,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x00000186,
+ 0x035, 0x00008186,
+ 0x035, 0x00010185,
+ 0x035, 0x000201D5,
+ 0x035, 0x000281D5,
+ 0x035, 0x000301D5,
+ 0x035, 0x000401D5,
+ 0x035, 0x000481D5,
+ 0x035, 0x000501D5,
+ 0x0EF, 0x00000000,
+ 0xFF0F0740, 0xDEAD,
+ 0xFF0F0740, 0xABCD,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00005B8B,
+ 0x036, 0x0000DB8B,
+ 0x036, 0x00015B8B,
+ 0x036, 0x0001DB8B,
+ 0x036, 0x000262DB,
+ 0x036, 0x0002E2DB,
+ 0x036, 0x000362DB,
+ 0x036, 0x0003E2DB,
+ 0x036, 0x0004553B,
+ 0x036, 0x0004D53B,
+ 0x036, 0x0005553B,
+ 0x036, 0x0005D53B,
+ 0xFF0F01C0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00005B8B,
+ 0x036, 0x0000DB8B,
+ 0x036, 0x00015B8B,
+ 0x036, 0x0001DB8B,
+ 0x036, 0x000262DB,
+ 0x036, 0x0002E2DB,
+ 0x036, 0x000362DB,
+ 0x036, 0x0003E2DB,
+ 0x036, 0x0004553B,
+ 0x036, 0x0004D53B,
+ 0x036, 0x0005553B,
+ 0x036, 0x0005D53B,
+ 0xFF0F02C0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00005B8B,
+ 0x036, 0x0000DB8B,
+ 0x036, 0x00015B8B,
+ 0x036, 0x0001DB8B,
+ 0x036, 0x000262DB,
+ 0x036, 0x0002E2DB,
+ 0x036, 0x000362DB,
+ 0x036, 0x0003E2DB,
+ 0x036, 0x0004553B,
+ 0x036, 0x0004D53B,
+ 0x036, 0x0005553B,
+ 0x036, 0x0005D53B,
+ 0xFF0F07D8, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00005B8B,
+ 0x036, 0x0000DB8B,
+ 0x036, 0x00015B8B,
+ 0x036, 0x0001DB8B,
+ 0x036, 0x000262DB,
+ 0x036, 0x0002E2DB,
+ 0x036, 0x000362DB,
+ 0x036, 0x0003E2DB,
+ 0x036, 0x0004553B,
+ 0x036, 0x0004D53B,
+ 0x036, 0x0005553B,
+ 0x036, 0x0005D53B,
+ 0xFF0F07D0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00005B8B,
+ 0x036, 0x0000DB8B,
+ 0x036, 0x00015B8B,
+ 0x036, 0x0001DB8B,
+ 0x036, 0x000262DB,
+ 0x036, 0x0002E2DB,
+ 0x036, 0x000362DB,
+ 0x036, 0x0003E2DB,
+ 0x036, 0x0004553B,
+ 0x036, 0x0004D53B,
+ 0x036, 0x0005553B,
+ 0x036, 0x0005D53B,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00084EB4,
+ 0x036, 0x0008C9B4,
+ 0x036, 0x000949B4,
+ 0x036, 0x0009C9B4,
+ 0x036, 0x000A4935,
+ 0x036, 0x000AC935,
+ 0x036, 0x000B4935,
+ 0x036, 0x000BC935,
+ 0x036, 0x000C4EB4,
+ 0x036, 0x000CCEB4,
+ 0x036, 0x000D4EB4,
+ 0x036, 0x000DCEB4,
+ 0xFF0F0740, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000008,
+ 0xFF0F0740, 0xABCD,
+ 0x03C, 0x000002DC,
+ 0x03C, 0x00000524,
+ 0x03C, 0x00000902,
+ 0xFF0F01C0, 0xCDEF,
+ 0x03C, 0x000002DC,
+ 0x03C, 0x00000524,
+ 0x03C, 0x00000902,
+ 0xFF0F02C0, 0xCDEF,
+ 0x03C, 0x000002DC,
+ 0x03C, 0x00000524,
+ 0x03C, 0x00000902,
+ 0xFF0F07D8, 0xCDEF,
+ 0x03C, 0x000002DC,
+ 0x03C, 0x00000524,
+ 0x03C, 0x00000902,
+ 0xFF0F07D0, 0xCDEF,
+ 0x03C, 0x000002DC,
+ 0x03C, 0x00000524,
+ 0x03C, 0x00000902,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x03C, 0x000002AA,
+ 0x03C, 0x000005A2,
+ 0x03C, 0x00000880,
+ 0xFF0F0740, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000002,
+ 0x0DF, 0x00000080,
+ 0xFF0F0740, 0xABCD,
+ 0x061, 0x000EAC43,
+ 0x062, 0x00038F47,
+ 0x063, 0x00031157,
+ 0x064, 0x0001C4AC,
+ 0x065, 0x000931D1,
+ 0xFF0F01C0, 0xCDEF,
+ 0x061, 0x000EAC43,
+ 0x062, 0x00038F47,
+ 0x063, 0x00031157,
+ 0x064, 0x0001C4AC,
+ 0x065, 0x000931D1,
+ 0xFF0F02C0, 0xCDEF,
+ 0x061, 0x000EAC43,
+ 0x062, 0x00038F47,
+ 0x063, 0x00031157,
+ 0x064, 0x0001C4AC,
+ 0x065, 0x000931D1,
+ 0xFF0F07D8, 0xCDEF,
+ 0x061, 0x000EAC43,
+ 0x062, 0x00038F47,
+ 0x063, 0x00031157,
+ 0x064, 0x0001C4AC,
+ 0x065, 0x000931D1,
+ 0xFF0F07D0, 0xCDEF,
+ 0x061, 0x000EAC43,
+ 0x062, 0x00038F47,
+ 0x063, 0x00031157,
+ 0x064, 0x0001C4AC,
+ 0x065, 0x000931D1,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x061, 0x000E5D53,
+ 0x062, 0x00038FCD,
+ 0x063, 0x000314EB,
+ 0x064, 0x000196AC,
+ 0x065, 0x000931D7,
+ 0xFF0F0740, 0xDEAD,
+ 0x008, 0x00008400,
+};
+
+u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 0x018, 0x0001712A,
+ 0x056, 0x00051CF2,
+ 0x066, 0x00040000,
+ 0x000, 0x00010000,
+ 0x01E, 0x00080000,
+ 0x082, 0x00000830,
+ 0x083, 0x00021800,
+ 0x084, 0x00028000,
+ 0x085, 0x00048000,
+ 0x086, 0x00094838,
+ 0x087, 0x00044980,
+ 0x088, 0x00048000,
+ 0x089, 0x0000D480,
+ 0x08A, 0x00042240,
+ 0x08B, 0x000F0380,
+ 0x08C, 0x00090000,
+ 0x08D, 0x00022852,
+ 0x08E, 0x00065540,
+ 0x08F, 0x00088001,
+ 0x0EF, 0x00020000,
+ 0x03E, 0x00000380,
+ 0x03F, 0x00090018,
+ 0x03E, 0x00020380,
+ 0x03F, 0x000A0018,
+ 0x03E, 0x00040308,
+ 0x03F, 0x000A0018,
+ 0x03E, 0x00060018,
+ 0x03F, 0x000A0018,
+ 0x0EF, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x089, 0x00000080,
+ 0x08B, 0x00080180,
+ 0x0EF, 0x00001000,
+ 0x03A, 0x00000244,
+ 0x03B, 0x00038027,
+ 0x03C, 0x00082000,
+ 0x03A, 0x00000244,
+ 0x03B, 0x00030113,
+ 0x03C, 0x00082000,
+ 0x03A, 0x0000014C,
+ 0x03B, 0x00028027,
+ 0x03C, 0x00082000,
+ 0x03A, 0x000000CC,
+ 0x03B, 0x00027027,
+ 0x03C, 0x00042000,
+ 0x03A, 0x0000014C,
+ 0x03B, 0x0001F913,
+ 0x03C, 0x00042000,
+ 0x03A, 0x0000010C,
+ 0x03B, 0x00017F10,
+ 0x03C, 0x00012000,
+ 0x03A, 0x000000D0,
+ 0x03B, 0x00008027,
+ 0x03C, 0x000CA000,
+ 0x03A, 0x00000244,
+ 0x03B, 0x00078027,
+ 0x03C, 0x00082000,
+ 0x03A, 0x00000244,
+ 0x03B, 0x00070113,
+ 0x03C, 0x00082000,
+ 0x03A, 0x0000014C,
+ 0x03B, 0x00068027,
+ 0x03C, 0x00082000,
+ 0x03A, 0x000000CC,
+ 0x03B, 0x00067027,
+ 0x03C, 0x00042000,
+ 0x03A, 0x0000014C,
+ 0x03B, 0x0005F913,
+ 0x03C, 0x00042000,
+ 0x03A, 0x0000010C,
+ 0x03B, 0x00057F10,
+ 0x03C, 0x00012000,
+ 0x03A, 0x000000D0,
+ 0x03B, 0x00048027,
+ 0x03C, 0x000CA000,
+ 0x03A, 0x00000244,
+ 0x03B, 0x000B8027,
+ 0x03C, 0x00082000,
+ 0x03A, 0x00000244,
+ 0x03B, 0x000B0113,
+ 0x03C, 0x00082000,
+ 0x03A, 0x0000014C,
+ 0x03B, 0x000A8027,
+ 0x03C, 0x00082000,
+ 0x03A, 0x000000CC,
+ 0x03B, 0x000A7027,
+ 0x03C, 0x00042000,
+ 0x03A, 0x0000014C,
+ 0x03B, 0x0009F913,
+ 0x03C, 0x00042000,
+ 0x03A, 0x0000010C,
+ 0x03B, 0x00097F10,
+ 0x03C, 0x00012000,
+ 0x03A, 0x000000D0,
+ 0x03B, 0x00088027,
+ 0x03C, 0x000CA000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00001100,
+ 0xFF0F0104, 0xABCD,
+ 0x034, 0x0004ADF3,
+ 0x034, 0x00049DF0,
+ 0xFF0F0204, 0xCDEF,
+ 0x034, 0x0004ADF3,
+ 0x034, 0x00049DF0,
+ 0xFF0F0404, 0xCDEF,
+ 0x034, 0x0004ADF3,
+ 0x034, 0x00049DF0,
+ 0xFF0F0200, 0xCDEF,
+ 0x034, 0x0004ADF5,
+ 0x034, 0x00049DF2,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x0004A0F3,
+ 0x034, 0x000490B1,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0004ADF7,
+ 0x034, 0x00049DF3,
+ 0xFF0F0104, 0xDEAD,
+ 0xFF0F0104, 0xABCD,
+ 0x034, 0x00048DED,
+ 0x034, 0x00047DEA,
+ 0x034, 0x00046DE7,
+ 0x034, 0x00045CE9,
+ 0x034, 0x00044CE6,
+ 0x034, 0x000438C6,
+ 0x034, 0x00042886,
+ 0x034, 0x00041486,
+ 0x034, 0x00040447,
+ 0xFF0F0204, 0xCDEF,
+ 0x034, 0x00048DED,
+ 0x034, 0x00047DEA,
+ 0x034, 0x00046DE7,
+ 0x034, 0x00045CE9,
+ 0x034, 0x00044CE6,
+ 0x034, 0x000438C6,
+ 0x034, 0x00042886,
+ 0x034, 0x00041486,
+ 0x034, 0x00040447,
+ 0xFF0F0404, 0xCDEF,
+ 0x034, 0x00048DED,
+ 0x034, 0x00047DEA,
+ 0x034, 0x00046DE7,
+ 0x034, 0x00045CE9,
+ 0x034, 0x00044CE6,
+ 0x034, 0x000438C6,
+ 0x034, 0x00042886,
+ 0x034, 0x00041486,
+ 0x034, 0x00040447,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x000480AE,
+ 0x034, 0x000470AB,
+ 0x034, 0x0004608B,
+ 0x034, 0x00045069,
+ 0x034, 0x00044048,
+ 0x034, 0x00043045,
+ 0x034, 0x00042026,
+ 0x034, 0x00041023,
+ 0x034, 0x00040002,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x00048DEF,
+ 0x034, 0x00047DEC,
+ 0x034, 0x00046DE9,
+ 0x034, 0x00045CCB,
+ 0x034, 0x0004488D,
+ 0x034, 0x0004348D,
+ 0x034, 0x0004248A,
+ 0x034, 0x0004108D,
+ 0x034, 0x0004008A,
+ 0xFF0F0104, 0xDEAD,
+ 0xFF0F0200, 0xABCD,
+ 0x034, 0x0002ADF4,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x0002A0F3,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0002ADF7,
+ 0xFF0F0200, 0xDEAD,
+ 0xFF0F0104, 0xABCD,
+ 0x034, 0x00029DF4,
+ 0xFF0F0204, 0xCDEF,
+ 0x034, 0x00029DF4,
+ 0xFF0F0404, 0xCDEF,
+ 0x034, 0x00029DF4,
+ 0xFF0F0200, 0xCDEF,
+ 0x034, 0x00029DF1,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x000290F0,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x00029DF2,
+ 0xFF0F0104, 0xDEAD,
+ 0xFF0F0104, 0xABCD,
+ 0x034, 0x00028DF1,
+ 0x034, 0x00027DEE,
+ 0x034, 0x00026DEB,
+ 0x034, 0x00025CEC,
+ 0x034, 0x00024CE9,
+ 0x034, 0x000238CA,
+ 0x034, 0x00022889,
+ 0x034, 0x00021489,
+ 0x034, 0x0002044A,
+ 0xFF0F0204, 0xCDEF,
+ 0x034, 0x00028DF1,
+ 0x034, 0x00027DEE,
+ 0x034, 0x00026DEB,
+ 0x034, 0x00025CEC,
+ 0x034, 0x00024CE9,
+ 0x034, 0x000238CA,
+ 0x034, 0x00022889,
+ 0x034, 0x00021489,
+ 0x034, 0x0002044A,
+ 0xFF0F0404, 0xCDEF,
+ 0x034, 0x00028DF1,
+ 0x034, 0x00027DEE,
+ 0x034, 0x00026DEB,
+ 0x034, 0x00025CEC,
+ 0x034, 0x00024CE9,
+ 0x034, 0x000238CA,
+ 0x034, 0x00022889,
+ 0x034, 0x00021489,
+ 0x034, 0x0002044A,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x000280AF,
+ 0x034, 0x000270AC,
+ 0x034, 0x0002608B,
+ 0x034, 0x00025069,
+ 0x034, 0x00024048,
+ 0x034, 0x00023045,
+ 0x034, 0x00022026,
+ 0x034, 0x00021023,
+ 0x034, 0x00020002,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x00028DEE,
+ 0x034, 0x00027DEB,
+ 0x034, 0x00026CCD,
+ 0x034, 0x00025CCA,
+ 0x034, 0x0002488C,
+ 0x034, 0x0002384C,
+ 0x034, 0x00022849,
+ 0x034, 0x00021449,
+ 0x034, 0x0002004D,
+ 0xFF0F0104, 0xDEAD,
+ 0xFF0F02C0, 0xABCD,
+ 0x034, 0x0000A0D7,
+ 0x034, 0x000090D3,
+ 0x034, 0x000080B1,
+ 0x034, 0x000070AE,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0000ADF7,
+ 0x034, 0x00009DF4,
+ 0x034, 0x00008DF1,
+ 0x034, 0x00007DEE,
+ 0xFF0F02C0, 0xDEAD,
+ 0xFF0F0104, 0xABCD,
+ 0x034, 0x00006DEB,
+ 0x034, 0x00005CEC,
+ 0x034, 0x00004CE9,
+ 0x034, 0x000038CA,
+ 0x034, 0x00002889,
+ 0x034, 0x00001489,
+ 0x034, 0x0000044A,
+ 0xFF0F0204, 0xCDEF,
+ 0x034, 0x00006DEB,
+ 0x034, 0x00005CEC,
+ 0x034, 0x00004CE9,
+ 0x034, 0x000038CA,
+ 0x034, 0x00002889,
+ 0x034, 0x00001489,
+ 0x034, 0x0000044A,
+ 0xFF0F0404, 0xCDEF,
+ 0x034, 0x00006DEB,
+ 0x034, 0x00005CEC,
+ 0x034, 0x00004CE9,
+ 0x034, 0x000038CA,
+ 0x034, 0x00002889,
+ 0x034, 0x00001489,
+ 0x034, 0x0000044A,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x0000608D,
+ 0x034, 0x0000506B,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x00002044,
+ 0x034, 0x00001025,
+ 0x034, 0x00000004,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x00006DCD,
+ 0x034, 0x00005CCD,
+ 0x034, 0x00004CCA,
+ 0x034, 0x0000388C,
+ 0x034, 0x00002888,
+ 0x034, 0x00001488,
+ 0x034, 0x00000486,
+ 0xFF0F0104, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0xFF0F0104, 0xABCD,
+ 0x035, 0x00000187,
+ 0x035, 0x00008187,
+ 0x035, 0x00010187,
+ 0x035, 0x00020188,
+ 0x035, 0x00028188,
+ 0x035, 0x00030188,
+ 0x035, 0x00040188,
+ 0x035, 0x00048188,
+ 0x035, 0x00050188,
+ 0xFF0F0204, 0xCDEF,
+ 0x035, 0x00000187,
+ 0x035, 0x00008187,
+ 0x035, 0x00010187,
+ 0x035, 0x00020188,
+ 0x035, 0x00028188,
+ 0x035, 0x00030188,
+ 0x035, 0x00040188,
+ 0x035, 0x00048188,
+ 0x035, 0x00050188,
+ 0xFF0F0404, 0xCDEF,
+ 0x035, 0x00000187,
+ 0x035, 0x00008187,
+ 0x035, 0x00010187,
+ 0x035, 0x00020188,
+ 0x035, 0x00028188,
+ 0x035, 0x00030188,
+ 0x035, 0x00040188,
+ 0x035, 0x00048188,
+ 0x035, 0x00050188,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x035, 0x00000145,
+ 0x035, 0x00008145,
+ 0x035, 0x00010145,
+ 0x035, 0x00020196,
+ 0x035, 0x00028196,
+ 0x035, 0x00030196,
+ 0x035, 0x000401C7,
+ 0x035, 0x000481C7,
+ 0x035, 0x000501C7,
+ 0xFF0F0104, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0xFF0F0104, 0xABCD,
+ 0x036, 0x00085733,
+ 0x036, 0x0008D733,
+ 0x036, 0x00095733,
+ 0x036, 0x0009D733,
+ 0x036, 0x000A64B4,
+ 0x036, 0x000AE4B4,
+ 0x036, 0x000B64B4,
+ 0x036, 0x000BE4B4,
+ 0x036, 0x000C64B4,
+ 0x036, 0x000CE4B4,
+ 0x036, 0x000D64B4,
+ 0x036, 0x000DE4B4,
+ 0xFF0F0204, 0xCDEF,
+ 0x036, 0x00085733,
+ 0x036, 0x0008D733,
+ 0x036, 0x00095733,
+ 0x036, 0x0009D733,
+ 0x036, 0x000A64B4,
+ 0x036, 0x000AE4B4,
+ 0x036, 0x000B64B4,
+ 0x036, 0x000BE4B4,
+ 0x036, 0x000C64B4,
+ 0x036, 0x000CE4B4,
+ 0x036, 0x000D64B4,
+ 0x036, 0x000DE4B4,
+ 0xFF0F0404, 0xCDEF,
+ 0x036, 0x00085733,
+ 0x036, 0x0008D733,
+ 0x036, 0x00095733,
+ 0x036, 0x0009D733,
+ 0x036, 0x000A64B4,
+ 0x036, 0x000AE4B4,
+ 0x036, 0x000B64B4,
+ 0x036, 0x000BE4B4,
+ 0x036, 0x000C64B4,
+ 0x036, 0x000CE4B4,
+ 0x036, 0x000D64B4,
+ 0x036, 0x000DE4B4,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x036, 0x000056B3,
+ 0x036, 0x0000D6B3,
+ 0x036, 0x000156B3,
+ 0x036, 0x0001D6B3,
+ 0x036, 0x00026634,
+ 0x036, 0x0002E634,
+ 0x036, 0x00036634,
+ 0x036, 0x0003E634,
+ 0x036, 0x000467B4,
+ 0x036, 0x0004E7B4,
+ 0x036, 0x000567B4,
+ 0x036, 0x0005E7B4,
+ 0xFF0F0104, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000008,
+ 0xFF0F0104, 0xABCD,
+ 0x03C, 0x000001C8,
+ 0x03C, 0x00000492,
+ 0xFF0F0204, 0xCDEF,
+ 0x03C, 0x000001C8,
+ 0x03C, 0x00000492,
+ 0xFF0F0404, 0xCDEF,
+ 0x03C, 0x000001C8,
+ 0x03C, 0x00000492,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x03C, 0x0000022A,
+ 0x03C, 0x00000594,
+ 0xFF0F0104, 0xDEAD,
+ 0xFF0F0104, 0xABCD,
+ 0x03C, 0x00000800,
+ 0xFF0F0204, 0xCDEF,
+ 0x03C, 0x00000800,
+ 0xFF0F0404, 0xCDEF,
+ 0x03C, 0x00000800,
+ 0xFF0F02C0, 0xCDEF,
+ 0x03C, 0x00000820,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x03C, 0x00000900,
+ 0xFF0F0104, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000002,
+ 0xFF0F0104, 0xABCD,
+ 0x008, 0x0004E400,
+ 0xFF0F0204, 0xCDEF,
+ 0x008, 0x0004E400,
+ 0xFF0F0404, 0xCDEF,
+ 0x008, 0x0004E400,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x008, 0x00002000,
+ 0xFF0F0104, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x0DF, 0x000000C0,
+ 0x01F, 0x00040064,
+ 0xFF0F0104, 0xABCD,
+ 0x058, 0x000A7284,
+ 0x059, 0x000600EC,
+ 0xFF0F0204, 0xCDEF,
+ 0x058, 0x000A7284,
+ 0x059, 0x000600EC,
+ 0xFF0F0404, 0xCDEF,
+ 0x058, 0x000A7284,
+ 0x059, 0x000600EC,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x058, 0x00081184,
+ 0x059, 0x0006016C,
+ 0xFF0F0104, 0xDEAD,
+ 0xFF0F0104, 0xABCD,
+ 0x061, 0x000E8D73,
+ 0x062, 0x00093FC5,
+ 0xFF0F0204, 0xCDEF,
+ 0x061, 0x000E8D73,
+ 0x062, 0x00093FC5,
+ 0xFF0F0404, 0xCDEF,
+ 0x061, 0x000E8D73,
+ 0x062, 0x00093FC5,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x061, 0x000EAD53,
+ 0x062, 0x00093BC4,
+ 0xFF0F0104, 0xDEAD,
+ 0xFF0F0104, 0xABCD,
+ 0x063, 0x000110E9,
+ 0xFF0F0204, 0xCDEF,
+ 0x063, 0x000110E9,
+ 0xFF0F0404, 0xCDEF,
+ 0x063, 0x000110E9,
+ 0xFF0F0200, 0xCDEF,
+ 0x063, 0x000710E9,
+ 0xFF0F02C0, 0xCDEF,
+ 0x063, 0x000110E9,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x063, 0x000714E9,
+ 0xFF0F0104, 0xDEAD,
+ 0xFF0F0104, 0xABCD,
+ 0x064, 0x0001C27C,
+ 0xFF0F0204, 0xCDEF,
+ 0x064, 0x0001C27C,
+ 0xFF0F0404, 0xCDEF,
+ 0x064, 0x0001C27C,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x064, 0x0001C67C,
+ 0xFF0F0104, 0xDEAD,
+ 0xFF0F0200, 0xABCD,
+ 0x065, 0x00093016,
+ 0xFF0F02C0, 0xCDEF,
+ 0x065, 0x00093015,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x065, 0x00091016,
+ 0xFF0F0200, 0xDEAD,
+ 0x018, 0x00000006,
+ 0x0EF, 0x00002000,
+ 0x03B, 0x0003824B,
+ 0x03B, 0x0003024B,
+ 0x03B, 0x0002844B,
+ 0x03B, 0x00020F4B,
+ 0x03B, 0x00018F4B,
+ 0x03B, 0x000104B2,
+ 0x03B, 0x00008049,
+ 0x03B, 0x00000148,
+ 0x03B, 0x0007824B,
+ 0x03B, 0x0007024B,
+ 0x03B, 0x0006824B,
+ 0x03B, 0x00060F4B,
+ 0x03B, 0x00058F4B,
+ 0x03B, 0x000504B2,
+ 0x03B, 0x00048049,
+ 0x03B, 0x00040148,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x034, 0x0000ADF3,
+ 0x034, 0x00009DEF,
+ 0x034, 0x00008DEC,
+ 0x034, 0x00007DE9,
+ 0x034, 0x00006CED,
+ 0x034, 0x00005CE9,
+ 0x034, 0x000044E9,
+ 0x034, 0x000034E6,
+ 0x034, 0x0000246A,
+ 0x034, 0x00001467,
+ 0x034, 0x00000068,
+ 0x0EF, 0x00000000,
+ 0x0ED, 0x00000010,
+ 0x044, 0x0000ADF2,
+ 0x044, 0x00009DEF,
+ 0x044, 0x00008DEC,
+ 0x044, 0x00007DE9,
+ 0x044, 0x00006CEC,
+ 0x044, 0x00005CE9,
+ 0x044, 0x000044EC,
+ 0x044, 0x000034E9,
+ 0x044, 0x0000246C,
+ 0x044, 0x00001469,
+ 0x044, 0x0000006C,
+ 0x0ED, 0x00000000,
+ 0x0ED, 0x00000001,
+ 0x040, 0x00038DA7,
+ 0x040, 0x000300C2,
+ 0x040, 0x000288E2,
+ 0x040, 0x000200B8,
+ 0x040, 0x000188A5,
+ 0x040, 0x00010FBC,
+ 0x040, 0x00008F71,
+ 0x040, 0x00000240,
+ 0x0ED, 0x00000000,
+ 0x0EF, 0x000020A2,
+ 0x0DF, 0x00000080,
+ 0x035, 0x00000120,
+ 0x035, 0x00008120,
+ 0x035, 0x00010120,
+ 0x036, 0x00000085,
+ 0x036, 0x00008085,
+ 0x036, 0x00010085,
+ 0x036, 0x00018085,
+ 0x0EF, 0x00000000,
+ 0x051, 0x00000C31,
+ 0x052, 0x00000622,
+ 0x053, 0x000FC70B,
+ 0x054, 0x0000017E,
+ 0x056, 0x00051DF3,
+ 0x051, 0x00000C01,
+ 0x052, 0x000006D6,
+ 0x053, 0x000FC649,
+ 0x070, 0x00049661,
+ 0x071, 0x0007843E,
+ 0x072, 0x00000382,
+ 0x074, 0x00051400,
+ 0x035, 0x00000160,
+ 0x035, 0x00008160,
+ 0x035, 0x00010160,
+ 0x036, 0x00000124,
+ 0x036, 0x00008124,
+ 0x036, 0x00010124,
+ 0x036, 0x00018124,
+ 0x0ED, 0x0000000C,
+ 0x045, 0x00000140,
+ 0x045, 0x00008140,
+ 0x045, 0x00010140,
+ 0x046, 0x00000124,
+ 0x046, 0x00008124,
+ 0x046, 0x00010124,
+ 0x046, 0x00018124,
+ 0x0DF, 0x00000088,
+ 0x0B3, 0x000F0E18,
+ 0x0B4, 0x0001214C,
+ 0x0B7, 0x0003000C,
+ 0x01C, 0x000539D2,
+ 0x018, 0x0001F12A,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x018, 0x0001712A,
+};
+
+u32 RTL8812AE_MAC_REG_ARRAY[] = {
+ 0x010, 0x0000000C,
+ 0xFF0F0180, 0xABCD,
+ 0x025, 0x0000000F,
+ 0xFF0F01C0, 0xCDEF,
+ 0x025, 0x0000000F,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x025, 0x0000006F,
+ 0xFF0F0180, 0xDEAD,
+ 0x072, 0x00000000,
+ 0x428, 0x0000000A,
+ 0x429, 0x00000010,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000004,
+ 0x435, 0x00000005,
+ 0x436, 0x00000007,
+ 0x437, 0x00000008,
+ 0x43C, 0x00000004,
+ 0x43D, 0x00000005,
+ 0x43E, 0x00000007,
+ 0x43F, 0x00000008,
+ 0x440, 0x0000005D,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000010,
+ 0x445, 0x00000000,
+ 0x446, 0x00000000,
+ 0x447, 0x00000000,
+ 0x448, 0x00000000,
+ 0x449, 0x000000F0,
+ 0x44A, 0x0000000F,
+ 0x44B, 0x0000003E,
+ 0x44C, 0x00000010,
+ 0x44D, 0x00000000,
+ 0x44E, 0x00000000,
+ 0x44F, 0x00000000,
+ 0x450, 0x00000000,
+ 0x451, 0x000000F0,
+ 0x452, 0x0000000F,
+ 0x453, 0x00000000,
+ 0x45B, 0x00000080,
+ 0x460, 0x00000066,
+ 0x461, 0x00000066,
+ 0x4C8, 0x000000FF,
+ 0x4C9, 0x00000008,
+ 0x4CC, 0x000000FF,
+ 0x4CD, 0x000000FF,
+ 0x4CE, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000A2,
+ 0x502, 0x0000002F,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000A3,
+ 0x506, 0x0000005E,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002B,
+ 0x509, 0x000000A4,
+ 0x50A, 0x0000005E,
+ 0x50B, 0x00000000,
+ 0x50C, 0x0000004F,
+ 0x50D, 0x000000A4,
+ 0x50E, 0x00000000,
+ 0x50F, 0x00000000,
+ 0x512, 0x0000001C,
+ 0x514, 0x0000000A,
+ 0x516, 0x0000000A,
+ 0x525, 0x0000004F,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55C, 0x00000050,
+ 0x55D, 0x000000FF,
+ 0x604, 0x00000001,
+ 0x605, 0x00000030,
+ 0x607, 0x00000003,
+ 0x608, 0x0000000E,
+ 0x609, 0x0000002A,
+ 0x620, 0x000000FF,
+ 0x621, 0x000000FF,
+ 0x622, 0x000000FF,
+ 0x623, 0x000000FF,
+ 0x624, 0x000000FF,
+ 0x625, 0x000000FF,
+ 0x626, 0x000000FF,
+ 0x627, 0x000000FF,
+ 0x638, 0x00000050,
+ 0x63C, 0x0000000A,
+ 0x63D, 0x0000000A,
+ 0x63E, 0x0000000E,
+ 0x63F, 0x0000000E,
+ 0x640, 0x00000080,
+ 0x642, 0x00000040,
+ 0x643, 0x00000000,
+ 0x652, 0x000000C8,
+ 0x66E, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70A, 0x00000065,
+ 0x70B, 0x00000087,
+ 0x718, 0x00000040,
+};
+
+u32 RTL8821AE_MAC_REG_ARRAY[] = {
+ 0x428, 0x0000000A,
+ 0x429, 0x00000010,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000004,
+ 0x435, 0x00000005,
+ 0x436, 0x00000007,
+ 0x437, 0x00000008,
+ 0x43C, 0x00000004,
+ 0x43D, 0x00000005,
+ 0x43E, 0x00000007,
+ 0x43F, 0x00000008,
+ 0x440, 0x0000005D,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000010,
+ 0x445, 0x00000000,
+ 0x446, 0x00000000,
+ 0x447, 0x00000000,
+ 0x448, 0x00000000,
+ 0x449, 0x000000F0,
+ 0x44A, 0x0000000F,
+ 0x44B, 0x0000003E,
+ 0x44C, 0x00000010,
+ 0x44D, 0x00000000,
+ 0x44E, 0x00000000,
+ 0x44F, 0x00000000,
+ 0x450, 0x00000000,
+ 0x451, 0x000000F0,
+ 0x452, 0x0000000F,
+ 0x453, 0x00000000,
+ 0x456, 0x0000005E,
+ 0x460, 0x00000066,
+ 0x461, 0x00000066,
+ 0x4C8, 0x0000003F,
+ 0x4C9, 0x000000FF,
+ 0x4CC, 0x000000FF,
+ 0x4CD, 0x000000FF,
+ 0x4CE, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000A2,
+ 0x502, 0x0000002F,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000A3,
+ 0x506, 0x0000005E,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002B,
+ 0x509, 0x000000A4,
+ 0x50A, 0x0000005E,
+ 0x50B, 0x00000000,
+ 0x50C, 0x0000004F,
+ 0x50D, 0x000000A4,
+ 0x50E, 0x00000000,
+ 0x50F, 0x00000000,
+ 0x512, 0x0000001C,
+ 0x514, 0x0000000A,
+ 0x516, 0x0000000A,
+ 0x525, 0x0000004F,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55C, 0x00000050,
+ 0x55D, 0x000000FF,
+ 0x605, 0x00000030,
+ 0x607, 0x00000007,
+ 0x608, 0x0000000E,
+ 0x609, 0x0000002A,
+ 0x620, 0x000000FF,
+ 0x621, 0x000000FF,
+ 0x622, 0x000000FF,
+ 0x623, 0x000000FF,
+ 0x624, 0x000000FF,
+ 0x625, 0x000000FF,
+ 0x626, 0x000000FF,
+ 0x627, 0x000000FF,
+ 0x638, 0x00000050,
+ 0x63C, 0x0000000A,
+ 0x63D, 0x0000000A,
+ 0x63E, 0x0000000E,
+ 0x63F, 0x0000000E,
+ 0x640, 0x00000040,
+ 0x642, 0x00000040,
+ 0x643, 0x00000000,
+ 0x652, 0x000000C8,
+ 0x66E, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70A, 0x00000065,
+ 0x70B, 0x00000087,
+ 0x718, 0x00000040,
+};
+
+u32 RTL8812AE_AGC_TAB_ARRAY[] = {
+ 0xFF0F07D8, 0xABCD,
+ 0x81C, 0xFC000001,
+ 0x81C, 0xFB020001,
+ 0x81C, 0xFA040001,
+ 0x81C, 0xF9060001,
+ 0x81C, 0xF8080001,
+ 0x81C, 0xF70A0001,
+ 0x81C, 0xF60C0001,
+ 0x81C, 0xF50E0001,
+ 0x81C, 0xF4100001,
+ 0x81C, 0xF3120001,
+ 0x81C, 0xF2140001,
+ 0x81C, 0xF1160001,
+ 0x81C, 0xF0180001,
+ 0x81C, 0xEF1A0001,
+ 0x81C, 0xEE1C0001,
+ 0x81C, 0xED1E0001,
+ 0x81C, 0xEC200001,
+ 0x81C, 0xEB220001,
+ 0x81C, 0xEA240001,
+ 0x81C, 0xCD260001,
+ 0x81C, 0xCC280001,
+ 0x81C, 0xCB2A0001,
+ 0x81C, 0xCA2C0001,
+ 0x81C, 0xC92E0001,
+ 0x81C, 0xC8300001,
+ 0x81C, 0xA6320001,
+ 0x81C, 0xA5340001,
+ 0x81C, 0xA4360001,
+ 0x81C, 0xA3380001,
+ 0x81C, 0xA23A0001,
+ 0x81C, 0x883C0001,
+ 0x81C, 0x873E0001,
+ 0x81C, 0x86400001,
+ 0x81C, 0x85420001,
+ 0x81C, 0x84440001,
+ 0x81C, 0x83460001,
+ 0x81C, 0x82480001,
+ 0x81C, 0x814A0001,
+ 0x81C, 0x484C0001,
+ 0x81C, 0x474E0001,
+ 0x81C, 0x46500001,
+ 0x81C, 0x45520001,
+ 0x81C, 0x44540001,
+ 0x81C, 0x43560001,
+ 0x81C, 0x42580001,
+ 0x81C, 0x415A0001,
+ 0x81C, 0x255C0001,
+ 0x81C, 0x245E0001,
+ 0x81C, 0x23600001,
+ 0x81C, 0x22620001,
+ 0x81C, 0x21640001,
+ 0x81C, 0x21660001,
+ 0x81C, 0x21680001,
+ 0x81C, 0x216A0001,
+ 0x81C, 0x216C0001,
+ 0x81C, 0x216E0001,
+ 0x81C, 0x21700001,
+ 0x81C, 0x21720001,
+ 0x81C, 0x21740001,
+ 0x81C, 0x21760001,
+ 0x81C, 0x21780001,
+ 0x81C, 0x217A0001,
+ 0x81C, 0x217C0001,
+ 0x81C, 0x217E0001,
+ 0xFF0F07D0, 0xCDEF,
+ 0x81C, 0xF9000001,
+ 0x81C, 0xF8020001,
+ 0x81C, 0xF7040001,
+ 0x81C, 0xF6060001,
+ 0x81C, 0xF5080001,
+ 0x81C, 0xF40A0001,
+ 0x81C, 0xF30C0001,
+ 0x81C, 0xF20E0001,
+ 0x81C, 0xF1100001,
+ 0x81C, 0xF0120001,
+ 0x81C, 0xEF140001,
+ 0x81C, 0xEE160001,
+ 0x81C, 0xED180001,
+ 0x81C, 0xEC1A0001,
+ 0x81C, 0xEB1C0001,
+ 0x81C, 0xEA1E0001,
+ 0x81C, 0xCD200001,
+ 0x81C, 0xCC220001,
+ 0x81C, 0xCB240001,
+ 0x81C, 0xCA260001,
+ 0x81C, 0xC9280001,
+ 0x81C, 0xC82A0001,
+ 0x81C, 0xC72C0001,
+ 0x81C, 0xC62E0001,
+ 0x81C, 0xA5300001,
+ 0x81C, 0xA4320001,
+ 0x81C, 0xA3340001,
+ 0x81C, 0xA2360001,
+ 0x81C, 0x88380001,
+ 0x81C, 0x873A0001,
+ 0x81C, 0x863C0001,
+ 0x81C, 0x853E0001,
+ 0x81C, 0x84400001,
+ 0x81C, 0x83420001,
+ 0x81C, 0x82440001,
+ 0x81C, 0x81460001,
+ 0x81C, 0x48480001,
+ 0x81C, 0x474A0001,
+ 0x81C, 0x464C0001,
+ 0x81C, 0x454E0001,
+ 0x81C, 0x44500001,
+ 0x81C, 0x43520001,
+ 0x81C, 0x42540001,
+ 0x81C, 0x41560001,
+ 0x81C, 0x25580001,
+ 0x81C, 0x245A0001,
+ 0x81C, 0x235C0001,
+ 0x81C, 0x225E0001,
+ 0x81C, 0x21600001,
+ 0x81C, 0x21620001,
+ 0x81C, 0x21640001,
+ 0x81C, 0x21660001,
+ 0x81C, 0x21680001,
+ 0x81C, 0x216A0001,
+ 0x81C, 0x236C0001,
+ 0x81C, 0x226E0001,
+ 0x81C, 0x21700001,
+ 0x81C, 0x21720001,
+ 0x81C, 0x21740001,
+ 0x81C, 0x21760001,
+ 0x81C, 0x21780001,
+ 0x81C, 0x217A0001,
+ 0x81C, 0x217C0001,
+ 0x81C, 0x217E0001,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x81C, 0xFF000001,
+ 0x81C, 0xFF020001,
+ 0x81C, 0xFF040001,
+ 0x81C, 0xFF060001,
+ 0x81C, 0xFF080001,
+ 0x81C, 0xFE0A0001,
+ 0x81C, 0xFD0C0001,
+ 0x81C, 0xFC0E0001,
+ 0x81C, 0xFB100001,
+ 0x81C, 0xFA120001,
+ 0x81C, 0xF9140001,
+ 0x81C, 0xF8160001,
+ 0x81C, 0xF7180001,
+ 0x81C, 0xF61A0001,
+ 0x81C, 0xF51C0001,
+ 0x81C, 0xF41E0001,
+ 0x81C, 0xF3200001,
+ 0x81C, 0xF2220001,
+ 0x81C, 0xF1240001,
+ 0x81C, 0xF0260001,
+ 0x81C, 0xEF280001,
+ 0x81C, 0xEE2A0001,
+ 0x81C, 0xED2C0001,
+ 0x81C, 0xEC2E0001,
+ 0x81C, 0xEB300001,
+ 0x81C, 0xEA320001,
+ 0x81C, 0xE9340001,
+ 0x81C, 0xE8360001,
+ 0x81C, 0xE7380001,
+ 0x81C, 0xE63A0001,
+ 0x81C, 0xE53C0001,
+ 0x81C, 0xC73E0001,
+ 0x81C, 0xC6400001,
+ 0x81C, 0xC5420001,
+ 0x81C, 0xC4440001,
+ 0x81C, 0xC3460001,
+ 0x81C, 0xC2480001,
+ 0x81C, 0xC14A0001,
+ 0x81C, 0xA74C0001,
+ 0x81C, 0xA64E0001,
+ 0x81C, 0xA5500001,
+ 0x81C, 0xA4520001,
+ 0x81C, 0xA3540001,
+ 0x81C, 0xA2560001,
+ 0x81C, 0xA1580001,
+ 0x81C, 0x675A0001,
+ 0x81C, 0x665C0001,
+ 0x81C, 0x655E0001,
+ 0x81C, 0x64600001,
+ 0x81C, 0x63620001,
+ 0x81C, 0x48640001,
+ 0x81C, 0x47660001,
+ 0x81C, 0x46680001,
+ 0x81C, 0x456A0001,
+ 0x81C, 0x446C0001,
+ 0x81C, 0x436E0001,
+ 0x81C, 0x42700001,
+ 0x81C, 0x41720001,
+ 0x81C, 0x41740001,
+ 0x81C, 0x41760001,
+ 0x81C, 0x41780001,
+ 0x81C, 0x417A0001,
+ 0x81C, 0x417C0001,
+ 0x81C, 0x417E0001,
+ 0xFF0F07D8, 0xDEAD,
+ 0xFF0F0180, 0xABCD,
+ 0x81C, 0xFC800001,
+ 0x81C, 0xFB820001,
+ 0x81C, 0xFA840001,
+ 0x81C, 0xF9860001,
+ 0x81C, 0xF8880001,
+ 0x81C, 0xF78A0001,
+ 0x81C, 0xF68C0001,
+ 0x81C, 0xF58E0001,
+ 0x81C, 0xF4900001,
+ 0x81C, 0xF3920001,
+ 0x81C, 0xF2940001,
+ 0x81C, 0xF1960001,
+ 0x81C, 0xF0980001,
+ 0x81C, 0xEF9A0001,
+ 0x81C, 0xEE9C0001,
+ 0x81C, 0xED9E0001,
+ 0x81C, 0xECA00001,
+ 0x81C, 0xEBA20001,
+ 0x81C, 0xEAA40001,
+ 0x81C, 0xE9A60001,
+ 0x81C, 0xE8A80001,
+ 0x81C, 0xE7AA0001,
+ 0x81C, 0xE6AC0001,
+ 0x81C, 0xE5AE0001,
+ 0x81C, 0xE4B00001,
+ 0x81C, 0xE3B20001,
+ 0x81C, 0xA8B40001,
+ 0x81C, 0xA7B60001,
+ 0x81C, 0xA6B80001,
+ 0x81C, 0xA5BA0001,
+ 0x81C, 0xA4BC0001,
+ 0x81C, 0xA3BE0001,
+ 0x81C, 0xA2C00001,
+ 0x81C, 0xA1C20001,
+ 0x81C, 0x68C40001,
+ 0x81C, 0x67C60001,
+ 0x81C, 0x66C80001,
+ 0x81C, 0x65CA0001,
+ 0x81C, 0x64CC0001,
+ 0x81C, 0x47CE0001,
+ 0x81C, 0x46D00001,
+ 0x81C, 0x45D20001,
+ 0x81C, 0x44D40001,
+ 0x81C, 0x43D60001,
+ 0x81C, 0x42D80001,
+ 0x81C, 0x08DA0001,
+ 0x81C, 0x07DC0001,
+ 0x81C, 0x06DE0001,
+ 0x81C, 0x05E00001,
+ 0x81C, 0x04E20001,
+ 0x81C, 0x03E40001,
+ 0x81C, 0x02E60001,
+ 0x81C, 0x01E80001,
+ 0x81C, 0x01EA0001,
+ 0x81C, 0x01EC0001,
+ 0x81C, 0x01EE0001,
+ 0x81C, 0x01F00001,
+ 0x81C, 0x01F20001,
+ 0x81C, 0x01F40001,
+ 0x81C, 0x01F60001,
+ 0x81C, 0x01F80001,
+ 0x81C, 0x01FA0001,
+ 0x81C, 0x01FC0001,
+ 0x81C, 0x01FE0001,
+ 0xFF0F0280, 0xCDEF,
+ 0x81C, 0xFC800001,
+ 0x81C, 0xFB820001,
+ 0x81C, 0xFA840001,
+ 0x81C, 0xF9860001,
+ 0x81C, 0xF8880001,
+ 0x81C, 0xF78A0001,
+ 0x81C, 0xF68C0001,
+ 0x81C, 0xF58E0001,
+ 0x81C, 0xF4900001,
+ 0x81C, 0xF3920001,
+ 0x81C, 0xF2940001,
+ 0x81C, 0xF1960001,
+ 0x81C, 0xF0980001,
+ 0x81C, 0xEF9A0001,
+ 0x81C, 0xEE9C0001,
+ 0x81C, 0xED9E0001,
+ 0x81C, 0xECA00001,
+ 0x81C, 0xEBA20001,
+ 0x81C, 0xEAA40001,
+ 0x81C, 0xE9A60001,
+ 0x81C, 0xE8A80001,
+ 0x81C, 0xE7AA0001,
+ 0x81C, 0xE6AC0001,
+ 0x81C, 0xE5AE0001,
+ 0x81C, 0xE4B00001,
+ 0x81C, 0xE3B20001,
+ 0x81C, 0xA8B40001,
+ 0x81C, 0xA7B60001,
+ 0x81C, 0xA6B80001,
+ 0x81C, 0xA5BA0001,
+ 0x81C, 0xA4BC0001,
+ 0x81C, 0xA3BE0001,
+ 0x81C, 0xA2C00001,
+ 0x81C, 0xA1C20001,
+ 0x81C, 0x68C40001,
+ 0x81C, 0x67C60001,
+ 0x81C, 0x66C80001,
+ 0x81C, 0x65CA0001,
+ 0x81C, 0x64CC0001,
+ 0x81C, 0x47CE0001,
+ 0x81C, 0x46D00001,
+ 0x81C, 0x45D20001,
+ 0x81C, 0x44D40001,
+ 0x81C, 0x43D60001,
+ 0x81C, 0x42D80001,
+ 0x81C, 0x08DA0001,
+ 0x81C, 0x07DC0001,
+ 0x81C, 0x06DE0001,
+ 0x81C, 0x05E00001,
+ 0x81C, 0x04E20001,
+ 0x81C, 0x03E40001,
+ 0x81C, 0x02E60001,
+ 0x81C, 0x01E80001,
+ 0x81C, 0x01EA0001,
+ 0x81C, 0x01EC0001,
+ 0x81C, 0x01EE0001,
+ 0x81C, 0x01F00001,
+ 0x81C, 0x01F20001,
+ 0x81C, 0x01F40001,
+ 0x81C, 0x01F60001,
+ 0x81C, 0x01F80001,
+ 0x81C, 0x01FA0001,
+ 0x81C, 0x01FC0001,
+ 0x81C, 0x01FE0001,
+ 0xFF0F01C0, 0xCDEF,
+ 0x81C, 0xFC800001,
+ 0x81C, 0xFB820001,
+ 0x81C, 0xFA840001,
+ 0x81C, 0xF9860001,
+ 0x81C, 0xF8880001,
+ 0x81C, 0xF78A0001,
+ 0x81C, 0xF68C0001,
+ 0x81C, 0xF58E0001,
+ 0x81C, 0xF4900001,
+ 0x81C, 0xF3920001,
+ 0x81C, 0xF2940001,
+ 0x81C, 0xF1960001,
+ 0x81C, 0xF0980001,
+ 0x81C, 0xEF9A0001,
+ 0x81C, 0xEE9C0001,
+ 0x81C, 0xED9E0001,
+ 0x81C, 0xECA00001,
+ 0x81C, 0xEBA20001,
+ 0x81C, 0xEAA40001,
+ 0x81C, 0xE9A60001,
+ 0x81C, 0xE8A80001,
+ 0x81C, 0xE7AA0001,
+ 0x81C, 0xE6AC0001,
+ 0x81C, 0xE5AE0001,
+ 0x81C, 0xE4B00001,
+ 0x81C, 0xE3B20001,
+ 0x81C, 0xA8B40001,
+ 0x81C, 0xA7B60001,
+ 0x81C, 0xA6B80001,
+ 0x81C, 0xA5BA0001,
+ 0x81C, 0xA4BC0001,
+ 0x81C, 0xA3BE0001,
+ 0x81C, 0xA2C00001,
+ 0x81C, 0xA1C20001,
+ 0x81C, 0x68C40001,
+ 0x81C, 0x67C60001,
+ 0x81C, 0x66C80001,
+ 0x81C, 0x65CA0001,
+ 0x81C, 0x64CC0001,
+ 0x81C, 0x47CE0001,
+ 0x81C, 0x46D00001,
+ 0x81C, 0x45D20001,
+ 0x81C, 0x44D40001,
+ 0x81C, 0x43D60001,
+ 0x81C, 0x42D80001,
+ 0x81C, 0x08DA0001,
+ 0x81C, 0x07DC0001,
+ 0x81C, 0x06DE0001,
+ 0x81C, 0x05E00001,
+ 0x81C, 0x04E20001,
+ 0x81C, 0x03E40001,
+ 0x81C, 0x02E60001,
+ 0x81C, 0x01E80001,
+ 0x81C, 0x01EA0001,
+ 0x81C, 0x01EC0001,
+ 0x81C, 0x01EE0001,
+ 0x81C, 0x01F00001,
+ 0x81C, 0x01F20001,
+ 0x81C, 0x01F40001,
+ 0x81C, 0x01F60001,
+ 0x81C, 0x01F80001,
+ 0x81C, 0x01FA0001,
+ 0x81C, 0x01FC0001,
+ 0x81C, 0x01FE0001,
+ 0xFF0F02C0, 0xCDEF,
+ 0x81C, 0xFC800001,
+ 0x81C, 0xFB820001,
+ 0x81C, 0xFA840001,
+ 0x81C, 0xF9860001,
+ 0x81C, 0xF8880001,
+ 0x81C, 0xF78A0001,
+ 0x81C, 0xF68C0001,
+ 0x81C, 0xF58E0001,
+ 0x81C, 0xF4900001,
+ 0x81C, 0xF3920001,
+ 0x81C, 0xF2940001,
+ 0x81C, 0xF1960001,
+ 0x81C, 0xF0980001,
+ 0x81C, 0xEF9A0001,
+ 0x81C, 0xEE9C0001,
+ 0x81C, 0xED9E0001,
+ 0x81C, 0xECA00001,
+ 0x81C, 0xEBA20001,
+ 0x81C, 0xEAA40001,
+ 0x81C, 0xE9A60001,
+ 0x81C, 0xE8A80001,
+ 0x81C, 0xE7AA0001,
+ 0x81C, 0xE6AC0001,
+ 0x81C, 0xE5AE0001,
+ 0x81C, 0xE4B00001,
+ 0x81C, 0xE3B20001,
+ 0x81C, 0xA8B40001,
+ 0x81C, 0xA7B60001,
+ 0x81C, 0xA6B80001,
+ 0x81C, 0xA5BA0001,
+ 0x81C, 0xA4BC0001,
+ 0x81C, 0xA3BE0001,
+ 0x81C, 0xA2C00001,
+ 0x81C, 0xA1C20001,
+ 0x81C, 0x68C40001,
+ 0x81C, 0x67C60001,
+ 0x81C, 0x66C80001,
+ 0x81C, 0x65CA0001,
+ 0x81C, 0x64CC0001,
+ 0x81C, 0x47CE0001,
+ 0x81C, 0x46D00001,
+ 0x81C, 0x45D20001,
+ 0x81C, 0x44D40001,
+ 0x81C, 0x43D60001,
+ 0x81C, 0x42D80001,
+ 0x81C, 0x08DA0001,
+ 0x81C, 0x07DC0001,
+ 0x81C, 0x06DE0001,
+ 0x81C, 0x05E00001,
+ 0x81C, 0x04E20001,
+ 0x81C, 0x03E40001,
+ 0x81C, 0x02E60001,
+ 0x81C, 0x01E80001,
+ 0x81C, 0x01EA0001,
+ 0x81C, 0x01EC0001,
+ 0x81C, 0x01EE0001,
+ 0x81C, 0x01F00001,
+ 0x81C, 0x01F20001,
+ 0x81C, 0x01F40001,
+ 0x81C, 0x01F60001,
+ 0x81C, 0x01F80001,
+ 0x81C, 0x01FA0001,
+ 0x81C, 0x01FC0001,
+ 0x81C, 0x01FE0001,
+ 0xFF0F07D8, 0xCDEF,
+ 0x81C, 0xFC800001,
+ 0x81C, 0xFB820001,
+ 0x81C, 0xFA840001,
+ 0x81C, 0xF9860001,
+ 0x81C, 0xF8880001,
+ 0x81C, 0xF78A0001,
+ 0x81C, 0xF68C0001,
+ 0x81C, 0xF58E0001,
+ 0x81C, 0xF4900001,
+ 0x81C, 0xF3920001,
+ 0x81C, 0xF2940001,
+ 0x81C, 0xF1960001,
+ 0x81C, 0xF0980001,
+ 0x81C, 0xEF9A0001,
+ 0x81C, 0xEE9C0001,
+ 0x81C, 0xED9E0001,
+ 0x81C, 0xECA00001,
+ 0x81C, 0xEBA20001,
+ 0x81C, 0xEAA40001,
+ 0x81C, 0xE9A60001,
+ 0x81C, 0xE8A80001,
+ 0x81C, 0xE7AA0001,
+ 0x81C, 0xE6AC0001,
+ 0x81C, 0xE5AE0001,
+ 0x81C, 0xE4B00001,
+ 0x81C, 0xE3B20001,
+ 0x81C, 0xA8B40001,
+ 0x81C, 0xA7B60001,
+ 0x81C, 0xA6B80001,
+ 0x81C, 0xA5BA0001,
+ 0x81C, 0xA4BC0001,
+ 0x81C, 0xA3BE0001,
+ 0x81C, 0xA2C00001,
+ 0x81C, 0xA1C20001,
+ 0x81C, 0x68C40001,
+ 0x81C, 0x67C60001,
+ 0x81C, 0x66C80001,
+ 0x81C, 0x65CA0001,
+ 0x81C, 0x64CC0001,
+ 0x81C, 0x47CE0001,
+ 0x81C, 0x46D00001,
+ 0x81C, 0x45D20001,
+ 0x81C, 0x44D40001,
+ 0x81C, 0x43D60001,
+ 0x81C, 0x42D80001,
+ 0x81C, 0x08DA0001,
+ 0x81C, 0x07DC0001,
+ 0x81C, 0x06DE0001,
+ 0x81C, 0x05E00001,
+ 0x81C, 0x04E20001,
+ 0x81C, 0x03E40001,
+ 0x81C, 0x02E60001,
+ 0x81C, 0x01E80001,
+ 0x81C, 0x01EA0001,
+ 0x81C, 0x01EC0001,
+ 0x81C, 0x01EE0001,
+ 0x81C, 0x01F00001,
+ 0x81C, 0x01F20001,
+ 0x81C, 0x01F40001,
+ 0x81C, 0x01F60001,
+ 0x81C, 0x01F80001,
+ 0x81C, 0x01FA0001,
+ 0x81C, 0x01FC0001,
+ 0x81C, 0x01FE0001,
+ 0xFF0F07D0, 0xCDEF,
+ 0x81C, 0xFC800001,
+ 0x81C, 0xFB820001,
+ 0x81C, 0xFA840001,
+ 0x81C, 0xF9860001,
+ 0x81C, 0xF8880001,
+ 0x81C, 0xF78A0001,
+ 0x81C, 0xF68C0001,
+ 0x81C, 0xF58E0001,
+ 0x81C, 0xF4900001,
+ 0x81C, 0xF3920001,
+ 0x81C, 0xF2940001,
+ 0x81C, 0xF1960001,
+ 0x81C, 0xF0980001,
+ 0x81C, 0xEF9A0001,
+ 0x81C, 0xEE9C0001,
+ 0x81C, 0xED9E0001,
+ 0x81C, 0xECA00001,
+ 0x81C, 0xEBA20001,
+ 0x81C, 0xEAA40001,
+ 0x81C, 0xE9A60001,
+ 0x81C, 0xE8A80001,
+ 0x81C, 0xE7AA0001,
+ 0x81C, 0xE6AC0001,
+ 0x81C, 0xE5AE0001,
+ 0x81C, 0xE4B00001,
+ 0x81C, 0xE3B20001,
+ 0x81C, 0xA8B40001,
+ 0x81C, 0xA7B60001,
+ 0x81C, 0xA6B80001,
+ 0x81C, 0xA5BA0001,
+ 0x81C, 0xA4BC0001,
+ 0x81C, 0xA3BE0001,
+ 0x81C, 0xA2C00001,
+ 0x81C, 0xA1C20001,
+ 0x81C, 0x68C40001,
+ 0x81C, 0x67C60001,
+ 0x81C, 0x66C80001,
+ 0x81C, 0x65CA0001,
+ 0x81C, 0x64CC0001,
+ 0x81C, 0x47CE0001,
+ 0x81C, 0x46D00001,
+ 0x81C, 0x45D20001,
+ 0x81C, 0x44D40001,
+ 0x81C, 0x43D60001,
+ 0x81C, 0x42D80001,
+ 0x81C, 0x08DA0001,
+ 0x81C, 0x07DC0001,
+ 0x81C, 0x06DE0001,
+ 0x81C, 0x05E00001,
+ 0x81C, 0x04E20001,
+ 0x81C, 0x03E40001,
+ 0x81C, 0x02E60001,
+ 0x81C, 0x01E80001,
+ 0x81C, 0x01EA0001,
+ 0x81C, 0x01EC0001,
+ 0x81C, 0x01EE0001,
+ 0x81C, 0x01F00001,
+ 0x81C, 0x01F20001,
+ 0x81C, 0x01F40001,
+ 0x81C, 0x01F60001,
+ 0x81C, 0x01F80001,
+ 0x81C, 0x01FA0001,
+ 0x81C, 0x01FC0001,
+ 0x81C, 0x01FE0001,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x81C, 0xFF800001,
+ 0x81C, 0xFF820001,
+ 0x81C, 0xFF840001,
+ 0x81C, 0xFE860001,
+ 0x81C, 0xFD880001,
+ 0x81C, 0xFC8A0001,
+ 0x81C, 0xFB8C0001,
+ 0x81C, 0xFA8E0001,
+ 0x81C, 0xF9900001,
+ 0x81C, 0xF8920001,
+ 0x81C, 0xF7940001,
+ 0x81C, 0xF6960001,
+ 0x81C, 0xF5980001,
+ 0x81C, 0xF49A0001,
+ 0x81C, 0xF39C0001,
+ 0x81C, 0xF29E0001,
+ 0x81C, 0xF1A00001,
+ 0x81C, 0xF0A20001,
+ 0x81C, 0xEFA40001,
+ 0x81C, 0xEEA60001,
+ 0x81C, 0xEDA80001,
+ 0x81C, 0xECAA0001,
+ 0x81C, 0xEBAC0001,
+ 0x81C, 0xEAAE0001,
+ 0x81C, 0xE9B00001,
+ 0x81C, 0xE8B20001,
+ 0x81C, 0xE7B40001,
+ 0x81C, 0xE6B60001,
+ 0x81C, 0xE5B80001,
+ 0x81C, 0xE4BA0001,
+ 0x81C, 0xE3BC0001,
+ 0x81C, 0xA8BE0001,
+ 0x81C, 0xA7C00001,
+ 0x81C, 0xA6C20001,
+ 0x81C, 0xA5C40001,
+ 0x81C, 0xA4C60001,
+ 0x81C, 0xA3C80001,
+ 0x81C, 0xA2CA0001,
+ 0x81C, 0xA1CC0001,
+ 0x81C, 0x68CE0001,
+ 0x81C, 0x67D00001,
+ 0x81C, 0x66D20001,
+ 0x81C, 0x65D40001,
+ 0x81C, 0x64D60001,
+ 0x81C, 0x47D80001,
+ 0x81C, 0x46DA0001,
+ 0x81C, 0x45DC0001,
+ 0x81C, 0x44DE0001,
+ 0x81C, 0x43E00001,
+ 0x81C, 0x42E20001,
+ 0x81C, 0x08E40001,
+ 0x81C, 0x07E60001,
+ 0x81C, 0x06E80001,
+ 0x81C, 0x05EA0001,
+ 0x81C, 0x04EC0001,
+ 0x81C, 0x03EE0001,
+ 0x81C, 0x02F00001,
+ 0x81C, 0x01F20001,
+ 0x81C, 0x01F40001,
+ 0x81C, 0x01F60001,
+ 0x81C, 0x01F80001,
+ 0x81C, 0x01FA0001,
+ 0x81C, 0x01FC0001,
+ 0x81C, 0x01FE0001,
+ 0xFF0F0180, 0xDEAD,
+ 0xC50, 0x00000022,
+ 0xC50, 0x00000020,
+ 0xE50, 0x00000022,
+ 0xE50, 0x00000020,
+};
+
+u32 RTL8821AE_AGC_TAB_ARRAY[] = {
+ 0x81C, 0xBF000001,
+ 0x81C, 0xBF020001,
+ 0x81C, 0xBF040001,
+ 0x81C, 0xBF060001,
+ 0x81C, 0xBE080001,
+ 0x81C, 0xBD0A0001,
+ 0x81C, 0xBC0C0001,
+ 0x81C, 0xBA0E0001,
+ 0x81C, 0xB9100001,
+ 0x81C, 0xB8120001,
+ 0x81C, 0xB7140001,
+ 0x81C, 0xB6160001,
+ 0x81C, 0xB5180001,
+ 0x81C, 0xB41A0001,
+ 0x81C, 0xB31C0001,
+ 0x81C, 0xB21E0001,
+ 0x81C, 0xB1200001,
+ 0x81C, 0xB0220001,
+ 0x81C, 0xAF240001,
+ 0x81C, 0xAE260001,
+ 0x81C, 0xAD280001,
+ 0x81C, 0xAC2A0001,
+ 0x81C, 0xAB2C0001,
+ 0x81C, 0xAA2E0001,
+ 0x81C, 0xA9300001,
+ 0x81C, 0xA8320001,
+ 0x81C, 0xA7340001,
+ 0x81C, 0xA6360001,
+ 0x81C, 0xA5380001,
+ 0x81C, 0xA43A0001,
+ 0x81C, 0xA33C0001,
+ 0x81C, 0x673E0001,
+ 0x81C, 0x66400001,
+ 0x81C, 0x65420001,
+ 0x81C, 0x64440001,
+ 0x81C, 0x63460001,
+ 0x81C, 0x62480001,
+ 0x81C, 0x614A0001,
+ 0x81C, 0x474C0001,
+ 0x81C, 0x464E0001,
+ 0x81C, 0x45500001,
+ 0x81C, 0x44520001,
+ 0x81C, 0x43540001,
+ 0x81C, 0x42560001,
+ 0x81C, 0x41580001,
+ 0x81C, 0x285A0001,
+ 0x81C, 0x275C0001,
+ 0x81C, 0x265E0001,
+ 0x81C, 0x25600001,
+ 0x81C, 0x24620001,
+ 0x81C, 0x0A640001,
+ 0x81C, 0x09660001,
+ 0x81C, 0x08680001,
+ 0x81C, 0x076A0001,
+ 0x81C, 0x066C0001,
+ 0x81C, 0x056E0001,
+ 0x81C, 0x04700001,
+ 0x81C, 0x03720001,
+ 0x81C, 0x02740001,
+ 0x81C, 0x01760001,
+ 0x81C, 0x01780001,
+ 0x81C, 0x017A0001,
+ 0x81C, 0x017C0001,
+ 0x81C, 0x017E0001,
+ 0xFF0F02C0, 0xABCD,
+ 0x81C, 0xFB000101,
+ 0x81C, 0xFA020101,
+ 0x81C, 0xF9040101,
+ 0x81C, 0xF8060101,
+ 0x81C, 0xF7080101,
+ 0x81C, 0xF60A0101,
+ 0x81C, 0xF50C0101,
+ 0x81C, 0xF40E0101,
+ 0x81C, 0xF3100101,
+ 0x81C, 0xF2120101,
+ 0x81C, 0xF1140101,
+ 0x81C, 0xF0160101,
+ 0x81C, 0xEF180101,
+ 0x81C, 0xEE1A0101,
+ 0x81C, 0xED1C0101,
+ 0x81C, 0xEC1E0101,
+ 0x81C, 0xEB200101,
+ 0x81C, 0xEA220101,
+ 0x81C, 0xE9240101,
+ 0x81C, 0xE8260101,
+ 0x81C, 0xE7280101,
+ 0x81C, 0xE62A0101,
+ 0x81C, 0xE52C0101,
+ 0x81C, 0xE42E0101,
+ 0x81C, 0xE3300101,
+ 0x81C, 0xA5320101,
+ 0x81C, 0xA4340101,
+ 0x81C, 0xA3360101,
+ 0x81C, 0x87380101,
+ 0x81C, 0x863A0101,
+ 0x81C, 0x853C0101,
+ 0x81C, 0x843E0101,
+ 0x81C, 0x69400101,
+ 0x81C, 0x68420101,
+ 0x81C, 0x67440101,
+ 0x81C, 0x66460101,
+ 0x81C, 0x49480101,
+ 0x81C, 0x484A0101,
+ 0x81C, 0x474C0101,
+ 0x81C, 0x2A4E0101,
+ 0x81C, 0x29500101,
+ 0x81C, 0x28520101,
+ 0x81C, 0x27540101,
+ 0x81C, 0x26560101,
+ 0x81C, 0x25580101,
+ 0x81C, 0x245A0101,
+ 0x81C, 0x235C0101,
+ 0x81C, 0x055E0101,
+ 0x81C, 0x04600101,
+ 0x81C, 0x03620101,
+ 0x81C, 0x02640101,
+ 0x81C, 0x01660101,
+ 0x81C, 0x01680101,
+ 0x81C, 0x016A0101,
+ 0x81C, 0x016C0101,
+ 0x81C, 0x016E0101,
+ 0x81C, 0x01700101,
+ 0x81C, 0x01720101,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x81C, 0xFF000101,
+ 0x81C, 0xFF020101,
+ 0x81C, 0xFE040101,
+ 0x81C, 0xFD060101,
+ 0x81C, 0xFC080101,
+ 0x81C, 0xFD0A0101,
+ 0x81C, 0xFC0C0101,
+ 0x81C, 0xFB0E0101,
+ 0x81C, 0xFA100101,
+ 0x81C, 0xF9120101,
+ 0x81C, 0xF8140101,
+ 0x81C, 0xF7160101,
+ 0x81C, 0xF6180101,
+ 0x81C, 0xF51A0101,
+ 0x81C, 0xF41C0101,
+ 0x81C, 0xF31E0101,
+ 0x81C, 0xF2200101,
+ 0x81C, 0xF1220101,
+ 0x81C, 0xF0240101,
+ 0x81C, 0xEF260101,
+ 0x81C, 0xEE280101,
+ 0x81C, 0xED2A0101,
+ 0x81C, 0xEC2C0101,
+ 0x81C, 0xEB2E0101,
+ 0x81C, 0xEA300101,
+ 0x81C, 0xE9320101,
+ 0x81C, 0xE8340101,
+ 0x81C, 0xE7360101,
+ 0x81C, 0xE6380101,
+ 0x81C, 0xE53A0101,
+ 0x81C, 0xE43C0101,
+ 0x81C, 0xE33E0101,
+ 0x81C, 0xA5400101,
+ 0x81C, 0xA4420101,
+ 0x81C, 0xA3440101,
+ 0x81C, 0x87460101,
+ 0x81C, 0x86480101,
+ 0x81C, 0x854A0101,
+ 0x81C, 0x844C0101,
+ 0x81C, 0x694E0101,
+ 0x81C, 0x68500101,
+ 0x81C, 0x67520101,
+ 0x81C, 0x66540101,
+ 0x81C, 0x49560101,
+ 0x81C, 0x48580101,
+ 0x81C, 0x475A0101,
+ 0x81C, 0x2A5C0101,
+ 0x81C, 0x295E0101,
+ 0x81C, 0x28600101,
+ 0x81C, 0x27620101,
+ 0x81C, 0x26640101,
+ 0x81C, 0x25660101,
+ 0x81C, 0x24680101,
+ 0x81C, 0x236A0101,
+ 0x81C, 0x056C0101,
+ 0x81C, 0x046E0101,
+ 0x81C, 0x03700101,
+ 0x81C, 0x02720101,
+ 0xFF0F02C0, 0xDEAD,
+ 0x81C, 0x01740101,
+ 0x81C, 0x01760101,
+ 0x81C, 0x01780101,
+ 0x81C, 0x017A0101,
+ 0x81C, 0x017C0101,
+ 0x81C, 0x017E0101,
+ 0xC50, 0x00000022,
+ 0xC50, 0x00000020,
+
+};
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/table.h b/drivers/staging/rtl8821ae/rtl8821ae/table.h
new file mode 100644
index 000000000000..b9d7b266a33a
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/table.h
@@ -0,0 +1,62 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Created on 2010/ 5/18, 1:41
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_TABLE__H_
+#define __RTL8821AE_TABLE__H_
+
+#include <linux/types.h>
+#define RTL8821AEPHY_REG_1TARRAYLEN 344
+extern u32 RTL8821AE_PHY_REG_ARRAY[];
+#define RTL8812AEPHY_REG_1TARRAYLEN 490
+extern u32 RTL8812AE_PHY_REG_ARRAY[];
+#define RTL8821AEPHY_REG_ARRAY_PGLEN 90
+extern u32 RTL8821AE_PHY_REG_ARRAY_PG[];
+#define RTL8812AEPHY_REG_ARRAY_PGLEN 276
+extern u32 RTL8812AE_PHY_REG_ARRAY_PG[];
+//#define RTL8723BE_RADIOA_1TARRAYLEN 206
+//extern u8 *RTL8821AE_TXPWR_LMT_ARRAY[];
+#define RTL8812AE_RADIOA_1TARRAYLEN 1264
+extern u32 RTL8812AE_RADIOA_ARRAY[];
+#define RTL8812AE_RADIOB_1TARRAYLEN 1240
+extern u32 RTL8812AE_RADIOB_ARRAY[];
+#define RTL8821AE_RADIOA_1TARRAYLEN 1176
+extern u32 RTL8821AE_RADIOA_ARRAY[];
+#define RTL8821AEMAC_1T_ARRAYLEN 194
+extern u32 RTL8821AE_MAC_REG_ARRAY[];
+#define RTL8812AEMAC_1T_ARRAYLEN 214
+extern u32 RTL8812AE_MAC_REG_ARRAY[];
+#define RTL8821AEAGCTAB_1TARRAYLEN 382
+extern u32 RTL8821AE_AGC_TAB_ARRAY[];
+#define RTL8812AEAGCTAB_1TARRAYLEN 1312
+extern u32 RTL8812AE_AGC_TAB_ARRAY[];
+
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/trx.c b/drivers/staging/rtl8821ae/rtl8821ae/trx.c
new file mode 100644
index 000000000000..75ae4387fe19
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/trx.c
@@ -0,0 +1,1050 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "../stats.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "trx.h"
+#include "led.h"
+#include "dm.h"
+#include "phy.h"
+u8 _rtl8821ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
+{
+ u16 fc = rtl_get_fc(skb);
+
+ if (unlikely(ieee80211_is_beacon(fc)))
+ return QSLT_BEACON;
+ if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
+ return QSLT_MGNT;
+
+ return skb->priority;
+}
+
+/* mac80211's rate_idx is like this:
+ *
+ * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
+ *
+ * B/G rate:
+ * (rx_status->flag & RX_FLAG_HT) = 0,
+ * DESC_RATE1M-->DESC_RATE54M ==> idx is 0-->11,
+ *
+ * N rate:
+ * (rx_status->flag & RX_FLAG_HT) = 1,
+ * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
+ *
+ * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
+ * A rate:
+ * (rx_status->flag & RX_FLAG_HT) = 0,
+ * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7,
+ *
+ * N rate:
+ * (rx_status->flag & RX_FLAG_HT) = 1,
+ * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
+ */
+static int _rtl8821ae_rate_mapping(struct ieee80211_hw *hw,
+ bool isht, u8 desc_rate)
+{
+ int rate_idx;
+
+ if (false == isht) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
+#else
+ if (IEEE80211_BAND_2GHZ == hw->conf.channel->band) {
+#endif
+ switch (desc_rate) {
+ case DESC_RATE1M:
+ rate_idx = 0;
+ break;
+ case DESC_RATE2M:
+ rate_idx = 1;
+ break;
+ case DESC_RATE5_5M:
+ rate_idx = 2;
+ break;
+ case DESC_RATE11M:
+ rate_idx = 3;
+ break;
+ case DESC_RATE6M:
+ rate_idx = 4;
+ break;
+ case DESC_RATE9M:
+ rate_idx = 5;
+ break;
+ case DESC_RATE12M:
+ rate_idx = 6;
+ break;
+ case DESC_RATE18M:
+ rate_idx = 7;
+ break;
+ case DESC_RATE24M:
+ rate_idx = 8;
+ break;
+ case DESC_RATE36M:
+ rate_idx = 9;
+ break;
+ case DESC_RATE48M:
+ rate_idx = 10;
+ break;
+ case DESC_RATE54M:
+ rate_idx = 11;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ } else {
+ switch (desc_rate) {
+ case DESC_RATE6M:
+ rate_idx = 0;
+ break;
+ case DESC_RATE9M:
+ rate_idx = 1;
+ break;
+ case DESC_RATE12M:
+ rate_idx = 2;
+ break;
+ case DESC_RATE18M:
+ rate_idx = 3;
+ break;
+ case DESC_RATE24M:
+ rate_idx = 4;
+ break;
+ case DESC_RATE36M:
+ rate_idx = 5;
+ break;
+ case DESC_RATE48M:
+ rate_idx = 6;
+ break;
+ case DESC_RATE54M:
+ rate_idx = 7;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ }
+ } else {
+ switch(desc_rate) {
+ case DESC_RATEMCS0:
+ rate_idx = 0;
+ break;
+ case DESC_RATEMCS1:
+ rate_idx = 1;
+ break;
+ case DESC_RATEMCS2:
+ rate_idx = 2;
+ break;
+ case DESC_RATEMCS3:
+ rate_idx = 3;
+ break;
+ case DESC_RATEMCS4:
+ rate_idx = 4;
+ break;
+ case DESC_RATEMCS5:
+ rate_idx = 5;
+ break;
+ case DESC_RATEMCS6:
+ rate_idx = 6;
+ break;
+ case DESC_RATEMCS7:
+ rate_idx = 7;
+ break;
+ case DESC_RATEMCS8:
+ rate_idx = 8;
+ break;
+ case DESC_RATEMCS9:
+ rate_idx = 9;
+ break;
+ case DESC_RATEMCS10:
+ rate_idx = 10;
+ break;
+ case DESC_RATEMCS11:
+ rate_idx = 11;
+ break;
+ case DESC_RATEMCS12:
+ rate_idx = 12;
+ break;
+ case DESC_RATEMCS13:
+ rate_idx = 13;
+ break;
+ case DESC_RATEMCS14:
+ rate_idx = 14;
+ break;
+ case DESC_RATEMCS15:
+ rate_idx = 15;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ }
+ return rate_idx;
+}
+
+static void _rtl8821ae_query_rxphystatus(struct ieee80211_hw *hw,
+ struct rtl_stats *pstatus, u8 *pdesc,
+ struct rx_fwinfo_8821ae *p_drvinfo, bool bpacket_match_bssid,
+ bool bpacket_toself, bool b_packet_beacon)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+ struct phy_sts_cck_8821ae_t *cck_buf;
+ struct phy_status_rpt *p_phystRpt = (struct phy_status_rpt *)p_drvinfo;
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ char rx_pwr_all = 0, rx_pwr[4];
+ u8 rf_rx_num = 0, evm, pwdb_all;
+ u8 i, max_spatial_stream;
+ u32 rssi, total_rssi = 0;
+ bool b_is_cck = pstatus->b_is_cck;
+ u8 lan_idx,vga_idx;
+
+ /* Record it for next packet processing */
+ pstatus->b_packet_matchbssid = bpacket_match_bssid;
+ pstatus->b_packet_toself = bpacket_toself;
+ pstatus->b_packet_beacon = b_packet_beacon;
+ pstatus->rx_mimo_signalquality[0] = -1;
+ pstatus->rx_mimo_signalquality[1] = -1;
+
+ if (b_is_cck) {
+ u8 cck_highpwr;
+ u8 cck_agc_rpt;
+ /* CCK Driver info Structure is not the same as OFDM packet. */
+ cck_buf = (struct phy_sts_cck_8821ae_t *)p_drvinfo;
+ cck_agc_rpt = cck_buf->cck_agc_rpt;
+
+ /* (1)Hardware does not provide RSSI for CCK */
+ /* (2)PWDB, Average PWDB cacluated by
+ * hardware (for rate adaptive) */
+ if (ppsc->rfpwr_state == ERFON)
+ cck_highpwr = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2,
+ BIT(9));
+ else
+ cck_highpwr = false;
+
+ lan_idx = ((cck_agc_rpt & 0xE0) >> 5);
+ vga_idx = (cck_agc_rpt & 0x1f);
+ switch (lan_idx) {
+ case 7:
+ if(vga_idx <= 27)
+ rx_pwr_all = -100 + 2*(27-vga_idx); /*VGA_idx = 27~2*/
+ else
+ rx_pwr_all = -100;
+ break;
+ case 6:
+ rx_pwr_all = -48 + 2*(2-vga_idx); /*VGA_idx = 2~0*/
+ break;
+ case 5:
+ rx_pwr_all = -42 + 2*(7-vga_idx); /*VGA_idx = 7~5*/
+ break;
+ case 4:
+ rx_pwr_all = -36 + 2*(7-vga_idx); /*VGA_idx = 7~4*/
+ break;
+ case 3:
+ rx_pwr_all = -24 + 2*(7-vga_idx); /*VGA_idx = 7~0*/
+ break;
+ case 2:
+ if(cck_highpwr)
+ rx_pwr_all = -12 + 2*(5-vga_idx); /*VGA_idx = 5~0*/
+ else
+ rx_pwr_all = -6+ 2*(5-vga_idx);
+ break;
+ case 1:
+ rx_pwr_all = 8-2*vga_idx;
+ break;
+ case 0:
+ rx_pwr_all = 14-2*vga_idx;
+ break;
+ default:
+ break;
+ }
+ rx_pwr_all += 6;
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
+ /* CCK gain is smaller than OFDM/MCS gain, */
+ /* so we add gain diff by experiences,
+ * the val is 6 */
+ pwdb_all += 6;
+ if(pwdb_all > 100)
+ pwdb_all = 100;
+ /* modify the offset to make the same
+ * gain index with OFDM. */
+ if(pwdb_all > 34 && pwdb_all <= 42)
+ pwdb_all -= 2;
+ else if(pwdb_all > 26 && pwdb_all <= 34)
+ pwdb_all -= 6;
+ else if(pwdb_all > 14 && pwdb_all <= 26)
+ pwdb_all -= 8;
+ else if(pwdb_all > 4 && pwdb_all <= 14)
+ pwdb_all -= 4;
+ if (cck_highpwr == false){
+ if (pwdb_all >= 80)
+ pwdb_all =((pwdb_all-80)<<1)+((pwdb_all-80)>>1)+80;
+ else if((pwdb_all <= 78) && (pwdb_all >= 20))
+ pwdb_all += 3;
+ if(pwdb_all>100)
+ pwdb_all = 100;
+ }
+
+ pstatus->rx_pwdb_all = pwdb_all;
+ pstatus->recvsignalpower = rx_pwr_all;
+
+ /* (3) Get Signal Quality (EVM) */
+ if (bpacket_match_bssid) {
+ u8 sq;
+
+ if (pstatus->rx_pwdb_all > 40)
+ sq = 100;
+ else {
+ sq = cck_buf->sq_rpt;
+ if (sq > 64)
+ sq = 0;
+ else if (sq < 20)
+ sq = 100;
+ else
+ sq = ((64 - sq) * 100) / 44;
+ }
+
+ pstatus->signalquality = sq;
+ pstatus->rx_mimo_signalquality[0] = sq;
+ pstatus->rx_mimo_signalquality[1] = -1;
+ }
+ } else {
+ rtlpriv->dm.brfpath_rxenable[0] =
+ rtlpriv->dm.brfpath_rxenable[1] = true;
+
+ /* (1)Get RSSI for HT rate */
+ for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) {
+
+ /* we will judge RF RX path now. */
+ if (rtlpriv->dm.brfpath_rxenable[i])
+ rf_rx_num++;
+
+ rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
+
+ /* Translate DBM to percentage. */
+ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
+ total_rssi += rssi;
+
+ /* Get Rx snr value in DB */
+ rtlpriv->stats.rx_snr_db[i] = (long)(p_drvinfo->rxsnr[i] / 2);
+
+ /* Record Signal Strength for next packet */
+ if (bpacket_match_bssid)
+ pstatus->rx_mimo_signalstrength[i] = (u8) rssi;
+ }
+
+ /* (2)PWDB, Average PWDB cacluated by
+ * hardware (for rate adaptive) */
+ rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
+
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
+ pstatus->rx_pwdb_all = pwdb_all;
+ pstatus->rxpower = rx_pwr_all;
+ pstatus->recvsignalpower = rx_pwr_all;
+
+ /* (3)EVM of HT rate */
+ if (pstatus->b_is_ht && pstatus->rate >= DESC_RATEMCS8 &&
+ pstatus->rate <= DESC_RATEMCS15)
+ max_spatial_stream = 2;
+ else
+ max_spatial_stream = 1;
+
+ for (i = 0; i < max_spatial_stream; i++) {
+ evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]);
+
+ if (bpacket_match_bssid) {
+ /* Fill value in RFD, Get the first
+ * spatial stream only */
+ if (i == 0)
+ pstatus->signalquality = (u8) (evm & 0xff);
+ pstatus->rx_mimo_signalquality[i] = (u8) (evm & 0xff);
+ }
+ }
+ }
+
+ /* UI BSS List signal strength(in percentage),
+ * make it good looking, from 0~100. */
+ if (b_is_cck)
+ pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
+ pwdb_all));
+ else if (rf_rx_num != 0)
+ pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
+ total_rssi /= rf_rx_num));
+ /*HW antenna diversity*/
+ rtldm->fat_table.antsel_rx_keep_0 = p_phystRpt->ant_sel;
+ rtldm->fat_table.antsel_rx_keep_1 = p_phystRpt->ant_sel_b;
+ rtldm->fat_table.antsel_rx_keep_2 = p_phystRpt->antsel_rx_keep_2;
+
+}
+#if 0
+static void _rtl8821ae_smart_antenna(struct ieee80211_hw *hw,
+ struct rtl_stats *pstatus)
+{
+ struct rtl_dm *rtldm= rtl_dm(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse =rtl_efuse(rtl_priv(hw));
+ u8 antsel_tr_mux;
+ struct fast_ant_trainning *pfat_table = &(rtldm->fat_table);
+
+ if (rtlefuse->antenna_div_type == CG_TRX_SMART_ANTDIV) {
+ if (pfat_table->fat_state == FAT_TRAINING_STATE) {
+ if (pstatus->b_packet_toself) {
+ antsel_tr_mux = (pfat_table->antsel_rx_keep_2 << 2) |
+ (pfat_table->antsel_rx_keep_1 << 1) | pfat_table->antsel_rx_keep_0;
+ pfat_table->ant_sum_rssi[antsel_tr_mux] += pstatus->rx_pwdb_all;
+ pfat_table->ant_rssi_cnt[antsel_tr_mux]++;
+ }
+ }
+ } else if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) ||
+ (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)) {
+ if (pstatus->b_packet_toself || pstatus->b_packet_matchbssid) {
+ antsel_tr_mux = (pfat_table->antsel_rx_keep_2 << 2) |
+ (pfat_table->antsel_rx_keep_1 << 1) | pfat_table->antsel_rx_keep_0;
+ rtl8821ae_dm_ant_sel_statistics(hw, antsel_tr_mux, 0, pstatus->rx_pwdb_all);
+ }
+
+ }
+}
+#endif
+static void _rtl8821ae_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+ struct sk_buff *skb, struct rtl_stats *pstatus,
+ u8 *pdesc, struct rx_fwinfo_8821ae *p_drvinfo)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct ieee80211_hdr *hdr;
+ u8 *tmp_buf;
+ u8 *praddr;
+ u8 *psaddr;
+ u16 fc, type;
+ bool b_packet_matchbssid, b_packet_toself, b_packet_beacon;
+
+ tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
+
+ hdr = (struct ieee80211_hdr *)tmp_buf;
+ fc = le16_to_cpu(hdr->frame_control);
+ type = WLAN_FC_GET_TYPE(fc);
+ praddr = hdr->addr1;
+ psaddr = ieee80211_get_SA(hdr);
+ memcpy(pstatus->psaddr, psaddr, ETH_ALEN);
+
+ b_packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
+ (!ether_addr_equal(mac->bssid, (fc & IEEE80211_FCTL_TODS) ?
+ hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ?
+ hdr->addr2 : hdr->addr3)) && (!pstatus->b_hwerror) &&
+ (!pstatus->b_crc) && (!pstatus->b_icv));
+
+ b_packet_toself = b_packet_matchbssid &&
+ (!ether_addr_equal(praddr, rtlefuse->dev_addr));
+
+ if (ieee80211_is_beacon(fc))
+ b_packet_beacon = true;
+ else
+ b_packet_beacon = false;
+
+ if (b_packet_beacon && b_packet_matchbssid)
+ rtl_priv(hw)->dm.dbginfo.num_qry_beacon_pkt++;
+
+ _rtl8821ae_query_rxphystatus(hw, pstatus, pdesc, p_drvinfo,
+ b_packet_matchbssid, b_packet_toself,
+ b_packet_beacon);
+ /*_rtl8821ae_smart_antenna(hw, pstatus); */
+ rtl_process_phyinfo(hw, tmp_buf, pstatus);
+}
+
+static void _rtl8821ae_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
+ u8 *virtualaddress)
+{
+ u32 dwtmp = 0;
+ memset(virtualaddress, 0, 8);
+
+ SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num);
+ if (ptcb_desc->empkt_num == 1)
+ dwtmp = ptcb_desc->empkt_len[0];
+ else {
+ dwtmp = ptcb_desc->empkt_len[0];
+ dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4;
+ dwtmp += ptcb_desc->empkt_len[1];
+ }
+ SET_EARLYMODE_LEN0(virtualaddress, dwtmp);
+
+ if (ptcb_desc->empkt_num <= 3)
+ dwtmp = ptcb_desc->empkt_len[2];
+ else {
+ dwtmp = ptcb_desc->empkt_len[2];
+ dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4;
+ dwtmp += ptcb_desc->empkt_len[3];
+ }
+ SET_EARLYMODE_LEN1(virtualaddress, dwtmp);
+ if (ptcb_desc->empkt_num <= 5)
+ dwtmp = ptcb_desc->empkt_len[4];
+ else {
+ dwtmp = ptcb_desc->empkt_len[4];
+ dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4;
+ dwtmp += ptcb_desc->empkt_len[5];
+ }
+ SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF);
+ SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4);
+ if (ptcb_desc->empkt_num <= 7)
+ dwtmp = ptcb_desc->empkt_len[6];
+ else {
+ dwtmp = ptcb_desc->empkt_len[6];
+ dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4;
+ dwtmp += ptcb_desc->empkt_len[7];
+ }
+ SET_EARLYMODE_LEN3(virtualaddress, dwtmp);
+ if (ptcb_desc->empkt_num <= 9)
+ dwtmp = ptcb_desc->empkt_len[8];
+ else {
+ dwtmp = ptcb_desc->empkt_len[8];
+ dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4;
+ dwtmp += ptcb_desc->empkt_len[9];
+ }
+ SET_EARLYMODE_LEN4(virtualaddress, dwtmp);
+}
+
+bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *status,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc, struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rx_fwinfo_8821ae *p_drvinfo;
+ struct ieee80211_hdr *hdr;
+
+ u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+
+ status->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
+ status->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+ RX_DRV_INFO_SIZE_UNIT;
+ status->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
+ status->b_icv = (u16) GET_RX_DESC_ICV(pdesc);
+ status->b_crc = (u16) GET_RX_DESC_CRC32(pdesc);
+ status->b_hwerror = (status->b_crc | status->b_icv);
+ status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
+ status->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
+ status->b_shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
+ status->b_isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+ status->b_isfirst_ampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+ status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
+ status->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+ status->macid = GET_RX_DESC_MACID(pdesc);
+ status->b_is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
+
+ status->b_is_cck = RX_HAL_IS_CCK_RATE(status->rate);
+
+ if (GET_RX_STATUS_DESC_RPT_SEL(pdesc))
+ status->packet_report_type = C2H_PACKET;
+ else
+ status->packet_report_type = NORMAL_RX;
+
+ if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc))
+ status->wake_match = BIT(2);
+ else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
+ status->wake_match = BIT(1);
+ else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
+ status->wake_match = BIT(0);
+ else
+ status->wake_match = 0;
+
+ if (status->wake_match)
+ RT_TRACE(COMP_RXDESC,DBG_LOUD,
+ ("GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",status->wake_match ));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->band = hw->conf.chandef.chan->band;
+#else
+ rx_status->freq = hw->conf.channel->center_freq;
+ rx_status->band = hw->conf.channel->band;
+#endif
+
+ hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size
+ + status->rx_bufshift);
+
+ if (status->b_crc)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (status->rx_is40Mhzpacket)
+ rx_status->flag |= RX_FLAG_40MHZ;
+
+ if (status->b_is_ht)
+ rx_status->flag |= RX_FLAG_HT;
+
+ rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+
+ /* hw will set status->decrypted true, if it finds the
+ * frame is open data frame or mgmt frame. */
+ /* So hw will not decryption robust managment frame
+ * for IEEE80211w but still set status->decrypted
+ * true, so here we should set it back to undecrypted
+ * for IEEE80211w frame, and mac80211 sw will help
+ * to decrypt it */
+ if (status->decrypted) {
+ if (!hdr) {
+ WARN_ON_ONCE(true);
+ pr_err("decrypted is true but hdr NULL, from skb %p\n",
+ rtl_get_hdr(skb));
+ return false;
+ }
+
+ if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ (ieee80211_has_protected(hdr->frame_control)))
+ rx_status->flag &= ~RX_FLAG_DECRYPTED;
+ else
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ }
+
+ /* rate_idx: index of data rate into band's
+ * supported rates or MCS index if HT rates
+ * are use (RX_FLAG_HT)*/
+ /* Notice: this is diff with windows define */
+ rx_status->rate_idx = _rtl8821ae_rate_mapping(hw,
+ status->b_is_ht, status->rate);
+
+ rx_status->mactime = status->timestamp_low;
+ if (phystatus == true) {
+ p_drvinfo = (struct rx_fwinfo_8821ae *)(skb->data +
+ status->rx_bufshift);
+
+ _rtl8821ae_translate_rx_signal_stuff(hw,
+ skb, status, pdesc,
+ p_drvinfo);
+ }
+
+ /*rx_status->qual = status->signal; */
+ rx_status->signal = status->recvsignalpower + 10;
+ /*rx_status->noise = -status->noise; */
+ if (status->packet_report_type == TX_REPORT2){
+ status->macid_valid_entry[0] = GET_RX_RPT2_DESC_MACID_VALID_1(pdesc);
+ status->macid_valid_entry[1] = GET_RX_RPT2_DESC_MACID_VALID_2(pdesc);
+ }
+ return true;
+}
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd,
+ struct ieee80211_tx_info *info, struct sk_buff *skb,
+ u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
+#else
+/*<delete in kernel end>*/
+void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ struct ieee80211_sta *sta = info->control.sta;
+#endif
+/*<delete in kernel end>*/
+ u8 *pdesc = (u8 *) pdesc_tx;
+ u16 seq_number;
+ u16 fc = le16_to_cpu(hdr->frame_control);
+ unsigned int buf_len = 0;
+ unsigned int skb_len = skb->len;
+ u8 fw_qsel = _rtl8821ae_map_hwqueue_to_fwqueue(skb, hw_queue);
+ bool b_firstseg = ((hdr->seq_ctrl &
+ cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
+ bool b_lastseg = ((hdr->frame_control &
+ cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
+ dma_addr_t mapping;
+ u8 bw_40 = 0;
+ u8 short_gi = 0;
+
+ if (mac->opmode == NL80211_IFTYPE_STATION) {
+ bw_40 = mac->bw_40;
+ } else if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC) {
+ if (sta)
+ bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ }
+ seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+ rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
+ /* reserve 8 byte for AMPDU early mode */
+ if (rtlhal->b_earlymode_enable) {
+ skb_push(skb, EM_HDR_LEN);
+ memset(skb->data, 0, EM_HDR_LEN);
+ }
+ buf_len = skb->len;
+ mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+ RT_TRACE(COMP_SEND, DBG_TRACE,
+ ("DMA mapping error"));
+ return;
+ }
+ CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8821ae));
+ if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
+ b_firstseg = true;
+ b_lastseg = true;
+ }
+ if (b_firstseg) {
+ if (rtlhal->b_earlymode_enable) {
+ SET_TX_DESC_PKT_OFFSET(pdesc, 1);
+ SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN + EM_HDR_LEN);
+ if (ptcb_desc->empkt_num) {
+ RT_TRACE(COMP_SEND, DBG_TRACE,
+ ("Insert 8 byte.pTcb->EMPktNum:%d\n",
+ ptcb_desc->empkt_num));
+ _rtl8821ae_insert_emcontent(ptcb_desc, (u8 *)(skb->data));
+ }
+ } else {
+ SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ }
+
+ /* ptcb_desc->use_driver_rate = true; */
+ SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
+ if (ptcb_desc->hw_rate > DESC_RATEMCS0) {
+ short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
+ } else {
+ short_gi = (ptcb_desc->use_shortpreamble) ? 1 :0;
+ }
+ SET_TX_DESC_DATA_SHORTGI(pdesc, short_gi);
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ SET_TX_DESC_AGG_ENABLE(pdesc, 1);
+ SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
+ }
+ SET_TX_DESC_SEQ(pdesc, seq_number);
+ SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->b_rts_enable &&
+ !ptcb_desc->b_cts_enable) ? 1 : 0));
+ SET_TX_DESC_HW_RTS_ENABLE(pdesc,0);
+ SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->b_cts_enable) ? 1 : 0));
+ /* SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->b_rts_stbc) ? 1 : 0));*/
+
+ SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
+ /* SET_TX_DESC_RTS_BW(pdesc, 0);*/
+ SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
+ SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <= DESC_RATE54M) ?
+ (ptcb_desc->b_rts_use_shortpreamble ? 1 : 0) :
+ (ptcb_desc->b_rts_use_shortgi ? 1 : 0)));
+
+ if(ptcb_desc->btx_enable_sw_calc_duration)
+ SET_TX_DESC_NAV_USE_HDR(pdesc, 1);
+
+ if (bw_40) {
+ if (ptcb_desc->b_packet_bw) {
+ SET_TX_DESC_DATA_BW(pdesc, 1);
+ SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
+ } else {
+ SET_TX_DESC_DATA_BW(pdesc, 0);
+ SET_TX_DESC_TX_SUB_CARRIER(pdesc, mac->cur_40_prime_sc);
+ }
+ } else {
+ SET_TX_DESC_DATA_BW(pdesc, 0);
+ SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ }
+
+ SET_TX_DESC_LINIP(pdesc, 0);
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb_len);
+ if (sta) {
+ u8 ampdu_density = sta->ht_cap.ampdu_density;
+ SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
+ }
+ if (info->control.hw_key) {
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+/*<delete in kernel end>*/
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ break;
+ default:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ break;
+
+ }
+/*<delete in kernel start>*/
+#else
+ switch (keyconf->alg) {
+ case ALG_WEP:
+ case ALG_TKIP:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ break;
+ case ALG_CCMP:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ break;
+ default:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ break;
+
+ }
+#endif
+/*<delete in kernel end>*/
+ }
+
+ SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
+ SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
+ SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
+ SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ? 1 : 0);
+ SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
+
+#if 0
+ SET_TX_DESC_USE_RATE(pdesc, 1);
+ SET_TX_DESC_TX_RATE(pdesc, 0x04);
+
+ SET_TX_DESC_RETRY_LIMIT_ENABLE(pdesc, 1);
+ SET_TX_DESC_DATA_RETRY_LIMIT(pdesc, 0x3f);
+#endif
+
+ /*SET_TX_DESC_PWR_STATUS(pdesc, pwr_status);*/
+ /* Set TxRate and RTSRate in TxDesc */
+ /* This prevent Tx initial rate of new-coming packets */
+ /* from being overwritten by retried packet rate.*/
+ if (!ptcb_desc->use_driver_rate) {
+ /*SET_TX_DESC_RTS_RATE(pdesc, 0x08); */
+ /* SET_TX_DESC_TX_RATE(pdesc, 0x0b); */
+ }
+ if (ieee80211_is_data_qos(fc)) {
+ if (mac->rdg_en) {
+ RT_TRACE(COMP_SEND, DBG_TRACE,
+ ("Enable RDG function.\n"));
+ SET_TX_DESC_RDG_ENABLE(pdesc, 1);
+ SET_TX_DESC_HTC(pdesc, 1);
+ }
+ }
+ }
+
+ SET_TX_DESC_FIRST_SEG(pdesc, (b_firstseg ? 1 : 0));
+ SET_TX_DESC_LAST_SEG(pdesc, (b_lastseg ? 1 : 0));
+ SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) buf_len);
+ SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+ //if (rtlpriv->dm.b_useramask) {
+ if(1){
+ SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index);
+ SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+ } else {
+ SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index);
+ SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+ }
+/* if (ieee80211_is_data_qos(fc))
+ SET_TX_DESC_QOS(pdesc, 1);
+*/
+ if (!ieee80211_is_data_qos(fc)) {
+ SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+ SET_TX_DESC_HWSEQ_SEL(pdesc, 0);
+ }
+ SET_TX_DESC_MORE_FRAG(pdesc, (b_lastseg ? 0 : 1));
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+ is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
+ SET_TX_DESC_BMC(pdesc, 1);
+ }
+
+ rtl8821ae_dm_set_tx_ant_by_tx_info(hw,pdesc,ptcb_desc->mac_id);
+ RT_TRACE(COMP_SEND, DBG_TRACE, ("\n"));
+}
+
+void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
+ u8 *pdesc, bool b_firstseg,
+ bool b_lastseg, struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 fw_queue = QSLT_BEACON;
+
+ dma_addr_t mapping = pci_map_single(rtlpci->pdev,
+ skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+
+ if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+ RT_TRACE(COMP_SEND, DBG_TRACE,
+ ("DMA mapping error"));
+ return;
+ }
+ CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
+
+ SET_TX_DESC_FIRST_SEG(pdesc, 1);
+ SET_TX_DESC_LAST_SEG(pdesc, 1);
+
+ SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+
+ SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+
+ SET_TX_DESC_USE_RATE(pdesc, 1);
+ SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
+ SET_TX_DESC_DISABLE_FB(pdesc, 1);
+
+ SET_TX_DESC_DATA_BW(pdesc, 0);
+
+ SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+
+ SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
+/*
+ if(IsCtrlNDPA(VirtualAddress) || IsMgntNDPA(VirtualAddress))
+ {
+ SET_TX_DESC_DATA_RETRY_LIMIT_8812(pDesc, 5);
+ SET_TX_DESC_RETRY_LIMIT_ENABLE_8812(pDesc, 1);
+
+ if(IsMgntNDPA(VirtualAddress))
+ {
+ SET_TX_DESC_NDPA_8812(pDesc, 1);
+ SET_TX_DESC_RTS_SC_8812(pDesc, SCMapping_8812(Adapter, pTcb));
+ }
+ else
+ {
+ SET_TX_DESC_NDPA_8812(pDesc, 2);
+ SET_TX_DESC_RTS_SC_8812(pDesc, SCMapping_8812(Adapter, pTcb));
+ }
+ }*/
+
+ SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len));
+
+ SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+
+ SET_TX_DESC_MACID(pdesc, 0);
+
+ SET_TX_DESC_OWN(pdesc, 1);
+
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C Tx Cmd Content\n",
+ pdesc, TX_DESC_SIZE);
+}
+
+void rtl8821ae_set_desc(struct ieee80211_hw * hw, u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+{
+ if (istx == true) {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ SET_TX_DESC_OWN(pdesc, 1);
+ break;
+ case HW_DESC_TX_NEXTDESC_ADDR:
+ SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
+ break;
+ default:
+ RT_ASSERT(false, ("ERR txdesc :%d"
+ " not process\n", desc_name));
+ break;
+ }
+ } else {
+ switch (desc_name) {
+ case HW_DESC_RXOWN:
+ SET_RX_DESC_OWN(pdesc, 1);
+ break;
+ case HW_DESC_RXBUFF_ADDR:
+ SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *) val);
+ break;
+ case HW_DESC_RXPKT_LEN:
+ SET_RX_DESC_PKT_LEN(pdesc, *(u32 *) val);
+ break;
+ case HW_DESC_RXERO:
+ SET_RX_DESC_EOR(pdesc, 1);
+ break;
+ default:
+ RT_ASSERT(false, ("ERR rxdesc :%d "
+ "not process\n", desc_name));
+ break;
+ }
+ }
+}
+
+u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name)
+{
+ u32 ret = 0;
+
+ if (istx == true) {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ ret = GET_TX_DESC_OWN(pdesc);
+ break;
+ case HW_DESC_TXBUFF_ADDR:
+ ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
+ break;
+ default:
+ RT_ASSERT(false, ("ERR txdesc :%d "
+ "not process\n", desc_name));
+ break;
+ }
+ } else {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ ret = GET_RX_DESC_OWN(pdesc);
+ break;
+ case HW_DESC_RXPKT_LEN:
+ ret = GET_RX_DESC_PKT_LEN(pdesc);
+ break;
+ default:
+ RT_ASSERT(false, ("ERR rxdesc :%d "
+ "not process\n", desc_name));
+ break;
+ }
+ }
+ return ret;
+}
+
+bool rtl8821ae_is_tx_desc_closed(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+ u8 *entry = (u8 *)(&ring->desc[ring->idx]);
+ u8 own = (u8) rtl8821ae_get_desc(entry, true, HW_DESC_OWN);
+
+ /*
+ *beacon packet will only use the first
+ *descriptor defautly,and the own may not
+ *be cleared by the hardware
+ */
+ if (own)
+ return false;
+ else
+ return true;
+}
+
+
+void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (hw_queue == BEACON_QUEUE) {
+ rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4));
+ } else {
+ rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG,
+ BIT(0) << (hw_queue));
+ }
+}
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/trx.h b/drivers/staging/rtl8821ae/rtl8821ae/trx.h
new file mode 100644
index 000000000000..da93e5c7ece7
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/trx.h
@@ -0,0 +1,641 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_TRX_H__
+#define __RTL8821AE_TRX_H__
+
+#define TX_DESC_SIZE 40
+#define TX_DESC_AGGR_SUBFRAME_SIZE 32
+
+#define RX_DESC_SIZE 32
+#define RX_DRV_INFO_SIZE_UNIT 8
+
+#define TX_DESC_NEXT_DESC_OFFSET 40
+#define USB_HWDESC_HEADER_LEN 40
+#define CRCLENGTH 4
+
+#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val)
+#define SET_TX_DESC_OFFSET(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val)
+#define SET_TX_DESC_BMC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val)
+#define SET_TX_DESC_HTC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val)
+#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val)
+#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val)
+#define SET_TX_DESC_LINIP(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val)
+#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val)
+#define SET_TX_DESC_GF(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+#define SET_TX_DESC_OWN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+
+#define GET_TX_DESC_PKT_SIZE(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 0, 16)
+#define GET_TX_DESC_OFFSET(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 16, 8)
+#define GET_TX_DESC_BMC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 24, 1)
+#define GET_TX_DESC_HTC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 25, 1)
+#define GET_TX_DESC_LAST_SEG(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+#define GET_TX_DESC_FIRST_SEG(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+#define GET_TX_DESC_LINIP(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+#define GET_TX_DESC_NO_ACM(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+#define GET_TX_DESC_GF(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+#define GET_TX_DESC_OWN(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+
+#define SET_TX_DESC_MACID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 7, __val)
+#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val)
+#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val)
+#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val)
+#define SET_TX_DESC_PIFS(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val)
+#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 5, __val)
+#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val)
+#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val)
+#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 5, __val)
+
+
+#define SET_TX_DESC_PAID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 9, __val)
+#define SET_TX_DESC_CCA_RTS(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 10, 2, __val)
+#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 12, 1, __val)
+#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val)
+#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val)
+#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val)
+#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val)
+#define SET_TX_DESC_RAW(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
+#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
+#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
+#define SET_TX_DESC_BT_INT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 23, 1, __val)
+#define SET_TX_DESC_GID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 6, __val)
+
+
+#define SET_TX_DESC_WHEADER_LEN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 4, __val)
+#define SET_TX_DESC_CHK_EN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 4, 1, __val)
+#define SET_TX_DESC_EARLY_MODE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 5, 1, __val)
+#define SET_TX_DESC_HWSEQ_SEL(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 6, 2, __val)
+#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 1, __val)
+#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 9, 1, __val)
+#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 10, 1, __val)
+#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 11, 1, __val)
+#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 12, 1, __val)
+#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 13, 1, __val)
+#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 15, 1, __val)
+#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 1, __val)
+#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 17, 5, __val)
+#define SET_TX_DESC_NDPA(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 22, 2, __val)
+#define SET_TX_DESC_AMPDU_MAX_TIME(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 24, 8, __val)
+#define SET_TX_DESC_TX_ANT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 4, __val)
+
+#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 7, __val)
+#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 5, __val)
+#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 4, __val)
+#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 17, 1, __val)
+#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 6, __val)
+#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 5, __val)
+
+
+#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 4, __val)
+#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
+ SET_BITS_TO_LE_1BYTE(__pdesc+20, 6, 1, __val)
+#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 5, 2, __val)
+#define SET_TX_DESC_DATA_LDPC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val)
+#define SET_TX_DESC_DATA_STBC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 2, __val)
+#define SET_TX_DESC_CTROL_STBC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 10, 2, __val)
+#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 12, 1, __val)
+#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
+
+
+#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val)
+
+#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+28, 0, 16)
+
+#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+32, 15, 1, __val)
+
+#define SET_TX_DESC_SEQ(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+36, 12, 12, __val)
+
+#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val)
+
+#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+40, 0, 32)
+
+
+#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+48, 0, 32, __val)
+
+#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+48, 0, 32)
+
+#define GET_RX_DESC_PKT_LEN(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 0, 14)
+#define GET_RX_DESC_CRC32(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 14, 1)
+#define GET_RX_DESC_ICV(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 15, 1)
+#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 16, 4)
+#define GET_RX_DESC_SECURITY(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 20, 3)
+#define GET_RX_DESC_QOS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 23, 1)
+#define GET_RX_DESC_SHIFT(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 24, 2)
+#define GET_RX_DESC_PHYST(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+#define GET_RX_DESC_SWDEC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+#define GET_RX_DESC_LS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+#define GET_RX_DESC_FS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+#define GET_RX_DESC_EOR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+#define GET_RX_DESC_OWN(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+
+#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
+#define SET_RX_DESC_EOR(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+#define SET_RX_DESC_OWN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+
+#define GET_RX_DESC_MACID(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 0, 7)
+#define GET_RX_DESC_TID(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 8, 4)
+#define GET_RX_DESC_AMSDU(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 13, 1)
+#define GET_RX_STATUS_DESC_RXID_MATCH(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
+#define GET_RX_DESC_PAGGR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
+#define GET_RX_DESC_A1_FIT(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
+#define GET_RX_DESC_CHKERR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 20, 1)
+#define GET_RX_DESC_IPVER(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 21, 1)
+#define GET_RX_STATUS_DESC_IS_TCPUDP(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 22, 1)
+#define GET_RX_STATUS_DESC_CHK_VLD(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 23, 1)
+#define GET_RX_DESC_PAM(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 24, 1)
+#define GET_RX_DESC_PWR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 25, 1)
+#define GET_RX_DESC_MD(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 26, 1)
+#define GET_RX_DESC_MF(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 27, 1)
+#define GET_RX_DESC_TYPE(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 28, 2)
+#define GET_RX_DESC_MC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 30, 1)
+#define GET_RX_DESC_BC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 31, 1)
+
+
+#define GET_RX_DESC_SEQ(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 0, 12)
+#define GET_RX_DESC_FRAG(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
+#define GET_RX_STATUS_DESC_RX_IS_QOS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 16, 1)
+#define GET_RX_STATUS_DESC_WLANHD_IV_LEN(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 18, 6)
+#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 28, 1)
+
+
+#define GET_RX_DESC_RXMCS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 0, 7)
+#define GET_RX_DESC_RXHT(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 6, 1)
+#define GET_RX_STATUS_DESC_RX_GF(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 7, 1)
+#define GET_RX_DESC_HTC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 10, 1)
+#define GET_RX_STATUS_DESC_EOSP(__pdesc) \
+ LE_BITS_TO_4BYTE( __pdesc+12, 11, 1)
+#define GET_RX_STATUS_DESC_BSSID_FIT(__pdesc) \
+ LE_BITS_TO_4BYTE( __pdesc+12, 12, 2)
+
+#define GET_RX_STATUS_DESC_PATTERN_MATCH(__pdesc) \
+ LE_BITS_TO_4BYTE( __pdesc+12, 29, 1)
+#define GET_RX_STATUS_DESC_UNICAST_MATCH(__pdesc) \
+ LE_BITS_TO_4BYTE( __pdesc+12, 30, 1)
+#define GET_RX_STATUS_DESC_MAGIC_MATCH(__pdesc) \
+ LE_BITS_TO_4BYTE( __pdesc+12, 31, 1)
+
+#define GET_RX_DESC_SPLCP(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+16, 0, 1)
+#define GET_RX_STATUS_DESC_LDPC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+16, 1, 1)
+#define GET_RX_STATUS_DESC_STBC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+16, 2, 1)
+#define GET_RX_DESC_BW(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+16, 4, 2)
+
+#define GET_RX_DESC_TSFL(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+20, 0, 32)
+
+#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+24, 0, 32)
+#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+28, 0, 32)
+
+#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val)
+#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val)
+
+
+/* TX report 2 format in Rx desc*/
+
+#define GET_RX_RPT2_DESC_PKT_LEN(__pRxStatusDesc) \
+ LE_BITS_TO_4BYTE( __pRxStatusDesc, 0, 9)
+#define GET_RX_RPT2_DESC_MACID_VALID_1(__pRxStatusDesc) \
+ LE_BITS_TO_4BYTE( __pRxStatusDesc+16, 0, 32)
+#define GET_RX_RPT2_DESC_MACID_VALID_2(__pRxStatusDesc) \
+ LE_BITS_TO_4BYTE( __pRxStatusDesc+20, 0, 32)
+
+#define SET_EARLYMODE_PKTNUM(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __value)
+#define SET_EARLYMODE_LEN0(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr, 4, 12, __value)
+#define SET_EARLYMODE_LEN1(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr, 16, 12, __value)
+#define SET_EARLYMODE_LEN2_1(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr, 28, 4, __value)
+#define SET_EARLYMODE_LEN2_2(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 8, __value)
+#define SET_EARLYMODE_LEN3(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr+4, 8, 12, __value)
+#define SET_EARLYMODE_LEN4(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr+4, 20, 12, __value)
+
+#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
+do { \
+ if(_size > TX_DESC_NEXT_DESC_OFFSET) \
+ memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
+ else \
+ memset(__pdesc, 0, _size); \
+} while (0);
+
+#define RX_HAL_IS_CCK_RATE(rxmcs)\
+ (rxmcs == DESC_RATE1M ||\
+ rxmcs == DESC_RATE2M ||\
+ rxmcs == DESC_RATE5_5M ||\
+ rxmcs == DESC_RATE11M)
+
+#define IS_LITTLE_ENDIAN 1
+
+struct phy_rx_agc_info_t {
+ #if IS_LITTLE_ENDIAN
+ u8 gain:7,trsw:1;
+ #else
+ u8 trsw:1,gain:7;
+ #endif
+};
+struct phy_status_rpt{
+ struct phy_rx_agc_info_t path_agc[2];
+ u8 ch_corr[2];
+ u8 cck_sig_qual_ofdm_pwdb_all;
+ u8 cck_agc_rpt_ofdm_cfosho_a;
+ u8 cck_rpt_b_ofdm_cfosho_b;
+ u8 rsvd_1;//ch_corr_msb;
+ u8 noise_power_db_msb;
+ u8 path_cfotail[2];
+ u8 pcts_mask[2];
+ u8 stream_rxevm[2];
+ u8 path_rxsnr[2];
+ u8 noise_power_db_lsb;
+ u8 rsvd_2[3];
+ u8 stream_csi[2];
+ u8 stream_target_csi[2];
+ u8 sig_evm;
+ u8 rsvd_3;
+#if IS_LITTLE_ENDIAN
+ u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 idle_long:1;
+ u8 r_ant_train_en:1;
+ u8 ant_sel_b:1;
+ u8 ant_sel:1;
+#else /* _BIG_ENDIAN_ */
+ u8 ant_sel:1;
+ u8 ant_sel_b:1;
+ u8 r_ant_train_en:1;
+ u8 idle_long:1;
+ u8 rxsc:2;
+ u8 sgi_en:1;
+ u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/
+#endif
+}__packed;
+
+struct rx_fwinfo_8821ae {
+ u8 gain_trsw[4];
+ u8 pwdb_all;
+ u8 cfosho[4];
+ u8 cfotail[4];
+ char rxevm[2];
+ char rxsnr[4];
+ u8 pdsnr[2];
+ u8 csi_current[2];
+ u8 csi_target[2];
+ u8 sigevm;
+ u8 max_ex_pwr;
+ u8 ex_intf_flag:1;
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 reserve:4;
+} __packed;
+
+struct tx_desc_8821ae {
+ u32 pktsize:16;
+ u32 offset:8;
+ u32 bmc:1;
+ u32 htc:1;
+ u32 lastseg:1;
+ u32 firstseg:1;
+ u32 linip:1;
+ u32 noacm:1;
+ u32 gf:1;
+ u32 own:1;
+
+ u32 macid:6;
+ u32 rsvd0:2;
+ u32 queuesel:5;
+ u32 rd_nav_ext:1;
+ u32 lsig_txop_en:1;
+ u32 pifs:1;
+ u32 rateid:4;
+ u32 nav_usehdr:1;
+ u32 en_descid:1;
+ u32 sectype:2;
+ u32 pktoffset:8;
+
+ u32 rts_rc:6;
+ u32 data_rc:6;
+ u32 agg_en:1;
+ u32 rdg_en:1;
+ u32 bar_retryht:2;
+ u32 agg_break:1;
+ u32 morefrag:1;
+ u32 raw:1;
+ u32 ccx:1;
+ u32 ampdudensity:3;
+ u32 bt_int:1;
+ u32 ant_sela:1;
+ u32 ant_selb:1;
+ u32 txant_cck:2;
+ u32 txant_l:2;
+ u32 txant_ht:2;
+
+ u32 nextheadpage:8;
+ u32 tailpage:8;
+ u32 seq:12;
+ u32 cpu_handle:1;
+ u32 tag1:1;
+ u32 trigger_int:1;
+ u32 hwseq_en:1;
+
+ u32 rtsrate:5;
+ u32 apdcfe:1;
+ u32 qos:1;
+ u32 hwseq_ssn:1;
+ u32 userrate:1;
+ u32 dis_rtsfb:1;
+ u32 dis_datafb:1;
+ u32 cts2self:1;
+ u32 rts_en:1;
+ u32 hwrts_en:1;
+ u32 portid:1;
+ u32 pwr_status:3;
+ u32 waitdcts:1;
+ u32 cts2ap_en:1;
+ u32 txsc:2;
+ u32 stbc:2;
+ u32 txshort:1;
+ u32 txbw:1;
+ u32 rtsshort:1;
+ u32 rtsbw:1;
+ u32 rtssc:2;
+ u32 rtsstbc:2;
+
+ u32 txrate:6;
+ u32 shortgi:1;
+ u32 ccxt:1;
+ u32 txrate_fb_lmt:5;
+ u32 rtsrate_fb_lmt:4;
+ u32 retrylmt_en:1;
+ u32 txretrylmt:6;
+ u32 usb_txaggnum:8;
+
+ u32 txagca:5;
+ u32 txagcb:5;
+ u32 usemaxlen:1;
+ u32 maxaggnum:5;
+ u32 mcsg1maxlen:4;
+ u32 mcsg2maxlen:4;
+ u32 mcsg3maxlen:4;
+ u32 mcs7sgimaxlen:4;
+
+ u32 txbuffersize:16;
+ u32 sw_offset30:8;
+ u32 sw_offset31:4;
+ u32 rsvd1:1;
+ u32 antsel_c:1;
+ u32 null_0:1;
+ u32 null_1:1;
+
+ u32 txbuffaddr;
+ u32 txbufferaddr64;
+ u32 nextdescaddress;
+ u32 nextdescaddress64;
+
+ u32 reserve_pass_pcie_mm_limit[4];
+} __packed;
+
+struct rx_desc_8821ae {
+ u32 length:14;
+ u32 crc32:1;
+ u32 icverror:1;
+ u32 drv_infosize:4;
+ u32 security:3;
+ u32 qos:1;
+ u32 shift:2;
+ u32 phystatus:1;
+ u32 swdec:1;
+ u32 lastseg:1;
+ u32 firstseg:1;
+ u32 eor:1;
+ u32 own:1;
+
+ u32 macid:6;
+ u32 tid:4;
+ u32 hwrsvd:5;
+ u32 paggr:1;
+ u32 faggr:1;
+ u32 a1_fit:4;
+ u32 a2_fit:4;
+ u32 pam:1;
+ u32 pwr:1;
+ u32 moredata:1;
+ u32 morefrag:1;
+ u32 type:2;
+ u32 mc:1;
+ u32 bc:1;
+
+ u32 seq:12;
+ u32 frag:4;
+ u32 nextpktlen:14;
+ u32 nextind:1;
+ u32 rsvd:1;
+
+ u32 rxmcs:6;
+ u32 rxht:1;
+ u32 amsdu:1;
+ u32 splcp:1;
+ u32 bandwidth:1;
+ u32 htc:1;
+ u32 tcpchk_rpt:1;
+ u32 ipcchk_rpt:1;
+ u32 tcpchk_valid:1;
+ u32 hwpcerr:1;
+ u32 hwpcind:1;
+ u32 iv0:16;
+
+ u32 iv1;
+
+ u32 tsfl;
+
+ u32 bufferaddress;
+ u32 bufferaddress64;
+
+} __packed;
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd,
+ struct ieee80211_tx_info *info, struct sk_buff *skb,
+ u8 hw_queue, struct rtl_tcb_desc *ptcb_desc);
+#else
+/*<delete in kernel end>*/
+void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ u8 hw_queue, struct rtl_tcb_desc *ptcb_desc);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *status,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc, struct sk_buff *skb);
+void rtl8821ae_set_desc(struct ieee80211_hw * hw, u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+bool rtl8821ae_is_tx_desc_closed(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index);
+void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
+void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
+ bool b_firstseg, bool b_lastseg,
+ struct sk_buff *skb);
+#endif
diff --git a/drivers/staging/rtl8821ae/stats.c b/drivers/staging/rtl8821ae/stats.c
new file mode 100644
index 000000000000..a20c0f8f65ec
--- /dev/null
+++ b/drivers/staging/rtl8821ae/stats.c
@@ -0,0 +1,283 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#include "wifi.h"
+#include "stats.h"
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+#include <linux/export.h>
+#endif
+
+u8 rtl_query_rxpwrpercentage(char antpower)
+{
+ if ((antpower <= -100) || (antpower >= 20))
+ return 0;
+ else if (antpower >= 0)
+ return 100;
+ else
+ return (100 + antpower);
+}
+//EXPORT_SYMBOL(rtl_query_rxpwrpercentage);
+
+u8 rtl_evm_db_to_percentage(char value)
+{
+ char ret_val;
+ ret_val = value;
+
+ if (ret_val >= 0)
+ ret_val = 0;
+ if (ret_val <= -33)
+ ret_val = -33;
+ ret_val = 0 - ret_val;
+ ret_val *= 3;
+ if (ret_val == 99)
+ ret_val = 100;
+
+ return ret_val;
+}
+//EXPORT_SYMBOL(rtl_evm_db_to_percentage);
+
+long rtl_translate_todbm(struct ieee80211_hw *hw,
+ u8 signal_strength_index)
+{
+ long signal_power;
+
+ signal_power = (long)((signal_strength_index + 1) >> 1);
+ signal_power -= 95;
+ return signal_power;
+}
+
+long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig)
+{
+ long retsig;
+
+ if (currsig >= 61 && currsig <= 100)
+ retsig = 90 + ((currsig - 60) / 4);
+ else if (currsig >= 41 && currsig <= 60)
+ retsig = 78 + ((currsig - 40) / 2);
+ else if (currsig >= 31 && currsig <= 40)
+ retsig = 66 + (currsig - 30);
+ else if (currsig >= 21 && currsig <= 30)
+ retsig = 54 + (currsig - 20);
+ else if (currsig >= 5 && currsig <= 20)
+ retsig = 42 + (((currsig - 5) * 2) / 3);
+ else if (currsig == 4)
+ retsig = 36;
+ else if (currsig == 3)
+ retsig = 27;
+ else if (currsig == 2)
+ retsig = 18;
+ else if (currsig == 1)
+ retsig = 9;
+ else
+ retsig = currsig;
+
+ return retsig;
+}
+//EXPORT_SYMBOL(rtl_signal_scale_mapping);
+
+void rtl_process_ui_rssi(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 rfpath;
+ u32 last_rssi, tmpval;
+
+ if (!pstatus->b_packet_toself && !pstatus->b_packet_beacon)
+ return;
+
+ rtlpriv->stats.pwdb_all_cnt += pstatus->rx_pwdb_all;
+ rtlpriv->stats.rssi_calculate_cnt++;
+
+ if (rtlpriv->stats.ui_rssi.total_num++ >= PHY_RSSI_SLID_WIN_MAX) {
+ rtlpriv->stats.ui_rssi.total_num = PHY_RSSI_SLID_WIN_MAX;
+ last_rssi = rtlpriv->stats.ui_rssi.elements[
+ rtlpriv->stats.ui_rssi.index];
+ rtlpriv->stats.ui_rssi.total_val -= last_rssi;
+ }
+ rtlpriv->stats.ui_rssi.total_val += pstatus->signalstrength;
+ rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.index++] =
+ pstatus->signalstrength;
+ if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
+ rtlpriv->stats.ui_rssi.index = 0;
+ tmpval = rtlpriv->stats.ui_rssi.total_val /
+ rtlpriv->stats.ui_rssi.total_num;
+ rtlpriv->stats.signal_strength = rtl_translate_todbm(hw,
+ (u8) tmpval);
+ pstatus->rssi = rtlpriv->stats.signal_strength;
+
+ if (pstatus->b_is_cck)
+ return;
+
+ for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+ rfpath++) {
+ if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ pstatus->rx_mimo_signalstrength[rfpath];
+
+ }
+ if (pstatus->rx_mimo_signalstrength[rfpath] >
+ rtlpriv->stats.rx_rssi_percentage[rfpath]) {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstatus->rx_mimo_signalstrength[rfpath])) /
+ (RX_SMOOTH_FACTOR);
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ rtlpriv->stats.rx_rssi_percentage[rfpath] + 1;
+ } else {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstatus->rx_mimo_signalstrength[rfpath])) /
+ (RX_SMOOTH_FACTOR);
+ }
+ rtlpriv->stats.rx_snr_db[rfpath] = pstatus->rx_snr[rfpath];
+ rtlpriv->stats.rx_evm_dbm[rfpath] =
+ pstatus->rx_mimo_evm_dbm[rfpath];
+ rtlpriv->stats.rx_cfo_short[rfpath] =
+ pstatus->cfo_short[rfpath];
+ rtlpriv->stats.rx_cfo_tail[rfpath] = pstatus->cfo_tail[rfpath];
+ }
+}
+
+static void rtl_update_rxsignalstatistics(struct ieee80211_hw *hw,
+ struct rtl_stats *pstatus)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int weighting = 0;
+
+ if (rtlpriv->stats.recv_signal_power == 0)
+ rtlpriv->stats.recv_signal_power = pstatus->recvsignalpower;
+ if (pstatus->recvsignalpower > rtlpriv->stats.recv_signal_power)
+ weighting = 5;
+ else if (pstatus->recvsignalpower < rtlpriv->stats.recv_signal_power)
+ weighting = (-5);
+ rtlpriv->stats.recv_signal_power = (rtlpriv->stats.recv_signal_power *
+ 5 + pstatus->recvsignalpower + weighting) / 6;
+}
+
+static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_sta_info *drv_priv = NULL;
+ struct ieee80211_sta *sta = NULL;
+ long undecorated_smoothed_pwdb;
+
+ rcu_read_lock();
+ if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
+ sta = rtl_find_sta(hw, pstatus->psaddr);
+
+ /* adhoc or ap mode */
+ if (sta) {
+ drv_priv = (struct rtl_sta_info *) sta->drv_priv;
+ undecorated_smoothed_pwdb =
+ drv_priv->rssi_stat.undecorated_smoothed_pwdb;
+ } else {
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+ }
+
+ if (undecorated_smoothed_pwdb < 0)
+ undecorated_smoothed_pwdb = pstatus->rx_pwdb_all;
+ if (pstatus->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
+ undecorated_smoothed_pwdb = (((undecorated_smoothed_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ undecorated_smoothed_pwdb = undecorated_smoothed_pwdb + 1;
+ } else {
+ undecorated_smoothed_pwdb = (((undecorated_smoothed_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ }
+
+ if(sta) {
+ drv_priv->rssi_stat.undecorated_smoothed_pwdb =
+ undecorated_smoothed_pwdb;
+ } else {
+ rtlpriv->dm.undecorated_smoothed_pwdb = undecorated_smoothed_pwdb;
+ }
+ rcu_read_unlock();
+
+ rtl_update_rxsignalstatistics(hw, pstatus);
+}
+
+static void rtl_process_ui_link_quality(struct ieee80211_hw *hw,
+ struct rtl_stats *pstatus)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 last_evm, n_stream, tmpval;
+
+ if (pstatus->signalquality == 0)
+ return;
+
+ if (rtlpriv->stats.ui_link_quality.total_num++ >=
+ PHY_LINKQUALITY_SLID_WIN_MAX) {
+ rtlpriv->stats.ui_link_quality.total_num =
+ PHY_LINKQUALITY_SLID_WIN_MAX;
+ last_evm = rtlpriv->stats.ui_link_quality.elements[
+ rtlpriv->stats.ui_link_quality.index];
+ rtlpriv->stats.ui_link_quality.total_val -= last_evm;
+ }
+ rtlpriv->stats.ui_link_quality.total_val += pstatus->signalquality;
+ rtlpriv->stats.ui_link_quality.elements[
+ rtlpriv->stats.ui_link_quality.index++] =
+ pstatus->signalquality;
+ if (rtlpriv->stats.ui_link_quality.index >=
+ PHY_LINKQUALITY_SLID_WIN_MAX)
+ rtlpriv->stats.ui_link_quality.index = 0;
+ tmpval = rtlpriv->stats.ui_link_quality.total_val /
+ rtlpriv->stats.ui_link_quality.total_num;
+ rtlpriv->stats.signal_quality = tmpval;
+ rtlpriv->stats.last_sigstrength_inpercent = tmpval;
+ for (n_stream = 0; n_stream < 2; n_stream++) {
+ if (pstatus->rx_mimo_signalquality[n_stream] != -1) {
+ if (rtlpriv->stats.rx_evm_percentage[n_stream] == 0) {
+ rtlpriv->stats.rx_evm_percentage[n_stream] =
+ pstatus->rx_mimo_signalquality[n_stream];
+ }
+ rtlpriv->stats.rx_evm_percentage[n_stream] =
+ ((rtlpriv->stats.rx_evm_percentage[n_stream]
+ * (RX_SMOOTH_FACTOR - 1)) +
+ (pstatus->rx_mimo_signalquality[n_stream] * 1)) /
+ (RX_SMOOTH_FACTOR);
+ }
+ }
+}
+
+void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer,
+ struct rtl_stats *pstatus)
+{
+
+ if (!pstatus->b_packet_matchbssid)
+ return;
+
+ rtl_process_ui_rssi(hw, pstatus);
+ rtl_process_pwdb(hw, pstatus);
+ rtl_process_ui_link_quality(hw, pstatus);
+}
+//EXPORT_SYMBOL(rtl_process_phyinfo);
diff --git a/drivers/staging/rtl8821ae/stats.h b/drivers/staging/rtl8821ae/stats.h
new file mode 100644
index 000000000000..d69d0cfd7e14
--- /dev/null
+++ b/drivers/staging/rtl8821ae/stats.h
@@ -0,0 +1,46 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_STATS_H__
+#define __RTL_STATS_H__
+
+#define PHY_RSSI_SLID_WIN_MAX 100
+#define PHY_LINKQUALITY_SLID_WIN_MAX 20
+#define PHY_BEACON_RSSI_SLID_WIN_MAX 10
+
+/* Rx smooth factor */
+#define RX_SMOOTH_FACTOR 20
+
+u8 rtl_query_rxpwrpercentage(char antpower);
+u8 rtl_evm_db_to_percentage(char value);
+long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig);
+void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer,
+ struct rtl_stats *pstatus);
+
+#endif
diff --git a/drivers/staging/rtl8821ae/wifi.h b/drivers/staging/rtl8821ae/wifi.h
new file mode 100644
index 000000000000..76bef93ad70a
--- /dev/null
+++ b/drivers/staging/rtl8821ae/wifi.h
@@ -0,0 +1,2532 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_WIFI_H__
+#define __RTL_WIFI_H__
+
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/firmware.h>
+#include <linux/version.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+#include "debug.h"
+
+
+#define RF_CHANGE_BY_INIT 0
+#define RF_CHANGE_BY_IPS BIT(28)
+#define RF_CHANGE_BY_PS BIT(29)
+#define RF_CHANGE_BY_HW BIT(30)
+#define RF_CHANGE_BY_SW BIT(31)
+
+#define IQK_ADDA_REG_NUM 16
+#define IQK_MAC_REG_NUM 4
+#define IQK_THRESHOLD 8
+
+#define MAX_KEY_LEN 61
+#define KEY_BUF_SIZE 5
+
+/* QoS related. */
+/*aci: 0x00 Best Effort*/
+/*aci: 0x01 Background*/
+/*aci: 0x10 Video*/
+/*aci: 0x11 Voice*/
+/*Max: define total number.*/
+#define AC0_BE 0
+#define AC1_BK 1
+#define AC2_VI 2
+#define AC3_VO 3
+#define AC_MAX 4
+#define QOS_QUEUE_NUM 4
+#define RTL_MAC80211_NUM_QUEUE 5
+
+#define QBSS_LOAD_SIZE 5
+#define MAX_WMMELE_LENGTH 64
+
+#define TOTAL_CAM_ENTRY 32
+
+/*slot time for 11g. */
+#define RTL_SLOT_TIME_9 9
+#define RTL_SLOT_TIME_20 20
+
+/*related with tcp/ip. */
+/*if_ehther.h*/
+#define ETH_P_PAE 0x888E /*Port Access Entity
+ *(IEEE 802.1X) */
+#define ETH_P_IP 0x0800 /*Internet Protocol packet */
+#define ETH_P_ARP 0x0806 /*Address Resolution packet */
+#define SNAP_SIZE 6
+#define PROTOC_TYPE_SIZE 2
+
+/*related with 802.11 frame*/
+#define MAC80211_3ADDR_LEN 24
+#define MAC80211_4ADDR_LEN 30
+
+#define CHANNEL_MAX_NUMBER (14 + 24 + 21) /* 14 is the max
+ * channel number */
+#define CHANNEL_MAX_NUMBER_2G 14
+#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to
+ *"phy_GetChnlGroup8812A" and
+ * "Hal_ReadTxPowerInfo8812A"*/
+#define CHANNEL_MAX_NUMBER_5G_80M 7
+#define CHANNEL_GROUP_MAX (3 + 9) /* ch1~3, ch4~9, ch10~14
+ * total three groups */
+#define MAX_PG_GROUP 13
+#define CHANNEL_GROUP_MAX_2G 3
+#define CHANNEL_GROUP_IDX_5GL 3
+#define CHANNEL_GROUP_IDX_5GM 6
+#define CHANNEL_GROUP_IDX_5GH 9
+#define CHANNEL_GROUP_MAX_5G 9
+#define CHANNEL_MAX_NUMBER_2G 14
+#define AVG_THERMAL_NUM 8
+#define AVG_THERMAL_NUM_92E 4
+#define AVG_THERMAL_NUM_88E 4
+#define AVG_THERMAL_NUM_8723BE 4
+#define MAX_TID_COUNT 9
+#define MAX_NUM_RATES 264
+
+/*for 88E use*/
+/*It must always set to 4, otherwise read efuse table secquence will be wrong.*/
+#define MAX_TX_COUNT 4
+#define MAX_RF_PATH 4
+#define MAX_CHNL_GROUP_24G 6
+#define MAX_CHNL_GROUP_5G 14
+
+/* BK, BE, VI, VO, HCCA, MANAGEMENT, COMMAND, HIGH, BEACON. */
+#define MAX_TX_QUEUE 9
+
+#define TX_PWR_BY_RATE_NUM_BAND 2
+#define TX_PWR_BY_RATE_NUM_RF 4
+#define TX_PWR_BY_RATE_NUM_SECTION 12
+#define MAX_BASE_NUM_IN_PHY_REG_PG_24G 6
+#define MAX_BASE_NUM_IN_PHY_REG_PG_5G 5
+
+#define DELTA_SWINGIDX_SIZE 30
+#define BAND_NUM 3
+/*Now, it's just for 8192ee
+ *not OK yet, keep it 0*/
+#define DMA_IS_64BIT 0
+#define RTL8192EE_SEG_NUM 1 /* 0:2 seg, 1: 4 seg, 2: 8 seg */
+
+struct txpower_info_2g {
+ u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
+ u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
+ /*If only one tx, only BW20 and OFDM are used.*/
+ u8 cck_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT];
+};
+
+struct txpower_info_5g {
+ u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_5G];
+ /*If only one tx, only BW20, OFDM, BW80 and BW160 are used.*/
+ u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw80_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw160_diff[MAX_RF_PATH][MAX_TX_COUNT];
+};
+
+
+/* for early mode */
+#define EM_HDR_LEN 8
+#define FCS_LEN 4
+
+#define MAX_VIRTUAL_MAC 1
+
+enum rf_tx_num {
+ RF_1TX = 0,
+ RF_2TX,
+ RF_MAX_TX_NUM,
+ RF_TX_NUM_NONIMPLEMENT,
+};
+
+enum rate_section {
+ CCK = 0,
+ OFDM,
+ HT_MCS0_MCS7,
+ HT_MCS8_MCS15,
+ VHT_1SSMCS0_1SSMCS9,
+ VHT_2SSMCS0_2SSMCS9,
+};
+
+enum intf_type {
+ INTF_PCI = 0,
+ INTF_USB = 1,
+};
+
+enum radio_path {
+ RF90_PATH_A = 0,
+ RF90_PATH_B = 1,
+ RF90_PATH_C = 2,
+ RF90_PATH_D = 3,
+};
+
+enum rt_eeprom_type {
+ EEPROM_93C46,
+ EEPROM_93C56,
+ EEPROM_BOOT_EFUSE,
+};
+
+enum rtl_status {
+ RTL_STATUS_INTERFACE_START = 0,
+};
+
+enum hardware_type {
+ HARDWARE_TYPE_RTL8192E,
+ HARDWARE_TYPE_RTL8192U,
+ HARDWARE_TYPE_RTL8192SE,
+ HARDWARE_TYPE_RTL8192SU,
+ HARDWARE_TYPE_RTL8192CE,
+ HARDWARE_TYPE_RTL8192CU,
+ HARDWARE_TYPE_RTL8192DE,
+ HARDWARE_TYPE_RTL8192DU,
+ HARDWARE_TYPE_RTL8723AE,
+ HARDWARE_TYPE_RTL8188EE,
+ HARDWARE_TYPE_RTL8723BE,
+ HARDWARE_TYPE_RTL8192EE,
+ HARDWARE_TYPE_RTL8821AE,
+ HARDWARE_TYPE_RTL8812AE,
+ /* keep it last */
+ HARDWARE_TYPE_NUM
+};
+
+enum scan_operation_backup_opt {
+ SCAN_OPT_BACKUP_BAND0=0,
+ SCAN_OPT_BACKUP_BAND1,
+ SCAN_OPT_RESTORE,
+ SCAN_OPT_MAX
+};
+
+/*RF state.*/
+enum rf_pwrstate {
+ ERFON,
+ ERFSLEEP,
+ ERFOFF
+};
+
+struct bb_reg_def {
+ u32 rfintfs;
+ u32 rfintfi;
+ u32 rfintfo;
+ u32 rfintfe;
+ u32 rf3wire_offset;
+ u32 rflssi_select;
+ u32 rftxgain_stage;
+ u32 rfhssi_para1;
+ u32 rfhssi_para2;
+ u32 rfswitch_control;
+ u32 rfagc_control1;
+ u32 rfagc_control2;
+ u32 rfrxiq_imbalance;
+ u32 rfrx_afe;
+ u32 rftxiq_imbalance;
+ u32 rftx_afe;
+ u32 rflssi_readback;
+ u32 rflssi_readbackpi;
+};
+
+enum io_type {
+ IO_CMD_PAUSE_BAND0_DM_BY_SCAN = 0,
+ IO_CMD_PAUSE_BAND1_DM_BY_SCAN = 1,
+ IO_CMD_RESUME_DM_BY_SCAN = 2,
+};
+
+enum hw_variables {
+ HW_VAR_ETHER_ADDR,
+ HW_VAR_MULTICAST_REG,
+ HW_VAR_BASIC_RATE,
+ HW_VAR_BSSID,
+ HW_VAR_MEDIA_STATUS,
+ HW_VAR_SECURITY_CONF,
+ HW_VAR_BEACON_INTERVAL,
+ HW_VAR_ATIM_WINDOW,
+ HW_VAR_LISTEN_INTERVAL,
+ HW_VAR_CS_COUNTER,
+ HW_VAR_DEFAULTKEY0,
+ HW_VAR_DEFAULTKEY1,
+ HW_VAR_DEFAULTKEY2,
+ HW_VAR_DEFAULTKEY3,
+ HW_VAR_SIFS,
+ HW_VAR_DIFS,
+ HW_VAR_EIFS,
+ HW_VAR_SLOT_TIME,
+ HW_VAR_ACK_PREAMBLE,
+ HW_VAR_CW_CONFIG,
+ HW_VAR_CW_VALUES,
+ HW_VAR_RATE_FALLBACK_CONTROL,
+ HW_VAR_CONTENTION_WINDOW,
+ HW_VAR_RETRY_COUNT,
+ HW_VAR_TR_SWITCH,
+ HW_VAR_COMMAND,
+ HW_VAR_WPA_CONFIG,
+ HW_VAR_AMPDU_MIN_SPACE,
+ HW_VAR_SHORTGI_DENSITY,
+ HW_VAR_AMPDU_FACTOR,
+ HW_VAR_MCS_RATE_AVAILABLE,
+ HW_VAR_AC_PARAM,
+ HW_VAR_ACM_CTRL,
+ HW_VAR_DIS_Req_Qsize,
+ HW_VAR_CCX_CHNL_LOAD,
+ HW_VAR_CCX_NOISE_HISTOGRAM,
+ HW_VAR_CCX_CLM_NHM,
+ HW_VAR_TxOPLimit,
+ HW_VAR_TURBO_MODE,
+ HW_VAR_RF_STATE,
+ HW_VAR_RF_OFF_BY_HW,
+ HW_VAR_BUS_SPEED,
+ HW_VAR_SET_DEV_POWER,
+
+ HW_VAR_RCR,
+ HW_VAR_RATR_0,
+ HW_VAR_RRSR,
+ HW_VAR_CPU_RST,
+ HW_VAR_CECHK_BSSID,
+ HW_VAR_LBK_MODE,
+ HW_VAR_AES_11N_FIX,
+ HW_VAR_USB_RX_AGGR,
+ HW_VAR_USER_CONTROL_TURBO_MODE,
+ HW_VAR_RETRY_LIMIT,
+ HW_VAR_INIT_TX_RATE,
+ HW_VAR_TX_RATE_REG,
+ HW_VAR_EFUSE_USAGE,
+ HW_VAR_EFUSE_BYTES,
+ HW_VAR_AUTOLOAD_STATUS,
+ HW_VAR_RF_2R_DISABLE,
+ HW_VAR_SET_RPWM,
+ HW_VAR_H2C_FW_PWRMODE,
+ HW_VAR_H2C_FW_JOINBSSRPT,
+ HW_VAR_H2C_FW_MEDIASTATUSRPT,
+ HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+ HW_VAR_FW_PSMODE_STATUS,
+ HW_VAR_RESUME_CLK_ON,
+ HW_VAR_FW_LPS_ACTION,
+ HW_VAR_1X1_RECV_COMBINE,
+ HW_VAR_STOP_SEND_BEACON,
+ HW_VAR_TSF_TIMER,
+ HW_VAR_IO_CMD,
+
+ HW_VAR_RF_RECOVERY,
+ HW_VAR_H2C_FW_UPDATE_GTK,
+ HW_VAR_WF_MASK,
+ HW_VAR_WF_CRC,
+ HW_VAR_WF_IS_MAC_ADDR,
+ HW_VAR_H2C_FW_OFFLOAD,
+ HW_VAR_RESET_WFCRC,
+
+ HW_VAR_HANDLE_FW_C2H,
+ HW_VAR_DL_FW_RSVD_PAGE,
+ HW_VAR_AID,
+ HW_VAR_HW_SEQ_ENABLE,
+ HW_VAR_CORRECT_TSF,
+ HW_VAR_BCN_VALID,
+ HW_VAR_FWLPS_RF_ON,
+ HW_VAR_DUAL_TSF_RST,
+ HW_VAR_SWITCH_EPHY_WoWLAN,
+ HW_VAR_INT_MIGRATION,
+ HW_VAR_INT_AC,
+ HW_VAR_RF_TIMING,
+
+ HAL_DEF_WOWLAN,
+ HW_VAR_MRC,
+ HW_VAR_KEEP_ALIVE,
+ HW_VAR_NAV_UPPER,
+};
+
+enum rt_media_status {
+ RT_MEDIA_DISCONNECT = 0,
+ RT_MEDIA_CONNECT = 1
+};
+
+enum rt_oem_id {
+ RT_CID_DEFAULT = 0,
+ RT_CID_8187_ALPHA0 = 1,
+ RT_CID_8187_SERCOMM_PS = 2,
+ RT_CID_8187_HW_LED = 3,
+ RT_CID_8187_NETGEAR = 4,
+ RT_CID_WHQL = 5,
+ RT_CID_819x_CAMEO = 6,
+ RT_CID_819x_RUNTOP = 7,
+ RT_CID_819x_Senao = 8,
+ RT_CID_TOSHIBA = 9,
+ RT_CID_819x_Netcore = 10,
+ RT_CID_Nettronix = 11,
+ RT_CID_DLINK = 12,
+ RT_CID_PRONET = 13,
+ RT_CID_COREGA = 14,
+ RT_CID_819x_ALPHA = 15,
+ RT_CID_819x_Sitecom = 16,
+ RT_CID_CCX = 17,
+ RT_CID_819x_Lenovo = 18,
+ RT_CID_819x_QMI = 19,
+ RT_CID_819x_Edimax_Belkin = 20,
+ RT_CID_819x_Sercomm_Belkin = 21,
+ RT_CID_819x_CAMEO1 = 22,
+ RT_CID_819x_MSI = 23,
+ RT_CID_819x_Acer = 24,
+ RT_CID_819x_HP = 27,
+ RT_CID_819x_CLEVO = 28,
+ RT_CID_819x_Arcadyan_Belkin = 29,
+ RT_CID_819x_SAMSUNG = 30,
+ RT_CID_819x_WNC_COREGA = 31,
+ RT_CID_819x_Foxcoon = 32,
+ RT_CID_819x_DELL = 33,
+ RT_CID_819x_PRONETS = 34,
+ RT_CID_819x_Edimax_ASUS = 35,
+ RT_CID_NETGEAR = 36,
+ RT_CID_PLANEX = 37,
+ RT_CID_CC_C = 38,
+};
+
+enum hw_descs {
+ HW_DESC_OWN,
+ HW_DESC_RXOWN,
+ HW_DESC_TX_NEXTDESC_ADDR,
+ HW_DESC_TXBUFF_ADDR,
+ HW_DESC_RXBUFF_ADDR,
+ HW_DESC_RXPKT_LEN,
+ HW_DESC_RXERO,
+ HW_DESC_RX_PREPARE,
+};
+
+enum prime_sc {
+ PRIME_CHNL_OFFSET_DONT_CARE = 0,
+ PRIME_CHNL_OFFSET_LOWER = 1,
+ PRIME_CHNL_OFFSET_UPPER = 2,
+};
+
+enum rf_type {
+ RF_1T1R = 0,
+ RF_1T2R = 1,
+ RF_2T2R = 2,
+ RF_2T2R_GREEN = 3,
+};
+
+enum ht_channel_width {
+ HT_CHANNEL_WIDTH_20 = 0,
+ HT_CHANNEL_WIDTH_20_40 = 1,
+ HT_CHANNEL_WIDTH_80 = 2,
+};
+
+/* Ref: 802.11i sepc D10.0 7.3.2.25.1
+Cipher Suites Encryption Algorithms */
+enum rt_enc_alg {
+ NO_ENCRYPTION = 0,
+ WEP40_ENCRYPTION = 1,
+ TKIP_ENCRYPTION = 2,
+ RSERVED_ENCRYPTION = 3,
+ AESCCMP_ENCRYPTION = 4,
+ WEP104_ENCRYPTION = 5,
+ AESCMAC_ENCRYPTION = 6, /*IEEE802.11w */
+};
+
+enum rtl_hal_state {
+ _HAL_STATE_STOP = 0,
+ _HAL_STATE_START = 1,
+};
+
+enum rtl_var_map {
+ /*reg map */
+ SYS_ISO_CTRL = 0,
+ SYS_FUNC_EN,
+ SYS_CLK,
+ MAC_RCR_AM,
+ MAC_RCR_AB,
+ MAC_RCR_ACRC32,
+ MAC_RCR_ACF,
+ MAC_RCR_AAP,
+ MAC_HIMR,
+ MAC_HIMRE,
+ MAC_HSISR,
+
+ /*efuse map */
+ EFUSE_TEST,
+ EFUSE_CTRL,
+ EFUSE_CLK,
+ EFUSE_CLK_CTRL,
+ EFUSE_PWC_EV12V,
+ EFUSE_FEN_ELDR,
+ EFUSE_LOADER_CLK_EN,
+ EFUSE_ANA8M,
+ EFUSE_HWSET_MAX_SIZE,
+ EFUSE_MAX_SECTION_MAP,
+ EFUSE_REAL_CONTENT_SIZE,
+ EFUSE_OOB_PROTECT_BYTES_LEN,
+ EFUSE_ACCESS,
+ /*CAM map */
+ RWCAM,
+ WCAMI,
+ RCAMO,
+ CAMDBG,
+ SECR,
+ SEC_CAM_NONE,
+ SEC_CAM_WEP40,
+ SEC_CAM_TKIP,
+ SEC_CAM_AES,
+ SEC_CAM_WEP104,
+
+ /*IMR map */
+ RTL_IMR_BCNDMAINT6, /*Beacon DMA Interrupt 6 */
+ RTL_IMR_BCNDMAINT5, /*Beacon DMA Interrupt 5 */
+ RTL_IMR_BCNDMAINT4, /*Beacon DMA Interrupt 4 */
+ RTL_IMR_BCNDMAINT3, /*Beacon DMA Interrupt 3 */
+ RTL_IMR_BCNDMAINT2, /*Beacon DMA Interrupt 2 */
+ RTL_IMR_BCNDMAINT1, /*Beacon DMA Interrupt 1 */
+ RTL_IMR_BCNDOK8, /*Beacon Queue DMA OK Interrup 8 */
+ RTL_IMR_BCNDOK7, /*Beacon Queue DMA OK Interrup 7 */
+ RTL_IMR_BCNDOK6, /*Beacon Queue DMA OK Interrup 6 */
+ RTL_IMR_BCNDOK5, /*Beacon Queue DMA OK Interrup 5 */
+ RTL_IMR_BCNDOK4, /*Beacon Queue DMA OK Interrup 4 */
+ RTL_IMR_BCNDOK3, /*Beacon Queue DMA OK Interrup 3 */
+ RTL_IMR_BCNDOK2, /*Beacon Queue DMA OK Interrup 2 */
+ RTL_IMR_BCNDOK1, /*Beacon Queue DMA OK Interrup 1 */
+ RTL_IMR_TIMEOUT2, /*Timeout interrupt 2 */
+ RTL_IMR_TIMEOUT1, /*Timeout interrupt 1 */
+ RTL_IMR_TXFOVW, /*Transmit FIFO Overflow */
+ RTL_IMR_PSTIMEOUT, /*Power save time out interrupt */
+ RTL_IMR_BcnInt, /*Beacon DMA Interrupt 0 */
+ RTL_IMR_RXFOVW, /*Receive FIFO Overflow */
+ RTL_IMR_RDU, /*Receive Descriptor Unavailable */
+ RTL_IMR_ATIMEND, /*For 92C,ATIM Window End Interrupt */
+ RTL_IMR_BDOK, /*Beacon Queue DMA OK Interrup */
+ RTL_IMR_HIGHDOK, /*High Queue DMA OK Interrupt */
+ RTL_IMR_COMDOK, /*Command Queue DMA OK Interrupt*/
+ RTL_IMR_TBDOK, /*Transmit Beacon OK interrup */
+ RTL_IMR_MGNTDOK, /*Management Queue DMA OK Interrupt */
+ RTL_IMR_TBDER, /*For 92C,Transmit Beacon Error Interrupt */
+ RTL_IMR_BKDOK, /*AC_BK DMA OK Interrupt */
+ RTL_IMR_BEDOK, /*AC_BE DMA OK Interrupt */
+ RTL_IMR_VIDOK, /*AC_VI DMA OK Interrupt */
+ RTL_IMR_VODOK, /*AC_VO DMA Interrupt */
+ RTL_IMR_ROK, /*Receive DMA OK Interrupt */
+ RTL_IMR_HSISR_IND, /*HSISR Interrupt*/
+ RTL_IBSS_INT_MASKS, /*(RTL_IMR_BcnInt | RTL_IMR_TBDOK |
+ * RTL_IMR_TBDER) */
+ RTL_IMR_C2HCMD, /*fw interrupt*/
+
+ /*CCK Rates, TxHT = 0 */
+ RTL_RC_CCK_RATE1M,
+ RTL_RC_CCK_RATE2M,
+ RTL_RC_CCK_RATE5_5M,
+ RTL_RC_CCK_RATE11M,
+
+ /*OFDM Rates, TxHT = 0 */
+ RTL_RC_OFDM_RATE6M,
+ RTL_RC_OFDM_RATE9M,
+ RTL_RC_OFDM_RATE12M,
+ RTL_RC_OFDM_RATE18M,
+ RTL_RC_OFDM_RATE24M,
+ RTL_RC_OFDM_RATE36M,
+ RTL_RC_OFDM_RATE48M,
+ RTL_RC_OFDM_RATE54M,
+
+ RTL_RC_HT_RATEMCS7,
+ RTL_RC_HT_RATEMCS15,
+
+ /*keep it last */
+ RTL_VAR_MAP_MAX,
+};
+
+/*Firmware PS mode for control LPS.*/
+enum _fw_ps_mode {
+ FW_PS_ACTIVE_MODE = 0,
+ FW_PS_MIN_MODE = 1,
+ FW_PS_MAX_MODE = 2,
+ FW_PS_DTIM_MODE = 3,
+ FW_PS_VOIP_MODE = 4,
+ FW_PS_UAPSD_WMM_MODE = 5,
+ FW_PS_UAPSD_MODE = 6,
+ FW_PS_IBSS_MODE = 7,
+ FW_PS_WWLAN_MODE = 8,
+ FW_PS_PM_Radio_Off = 9,
+ FW_PS_PM_Card_Disable = 10,
+};
+
+enum rt_psmode {
+ EACTIVE, /*Active/Continuous access. */
+ EMAXPS, /*Max power save mode. */
+ EFASTPS, /*Fast power save mode. */
+ EAUTOPS, /*Auto power save mode. */
+};
+
+/*LED related.*/
+enum led_ctl_mode {
+ LED_CTL_POWER_ON = 1,
+ LED_CTL_LINK = 2,
+ LED_CTL_NO_LINK = 3,
+ LED_CTL_TX = 4,
+ LED_CTL_RX = 5,
+ LED_CTL_SITE_SURVEY = 6,
+ LED_CTL_POWER_OFF = 7,
+ LED_CTL_START_TO_LINK = 8,
+ LED_CTL_START_WPS = 9,
+ LED_CTL_STOP_WPS = 10,
+};
+
+enum rtl_led_pin {
+ LED_PIN_GPIO0,
+ LED_PIN_LED0,
+ LED_PIN_LED1,
+ LED_PIN_LED2
+};
+
+/*QoS related.*/
+/*acm implementation method.*/
+enum acm_method {
+ eAcmWay0_SwAndHw = 0,
+ eAcmWay1_HW = 1,
+ eAcmWay2_SW = 2,
+};
+
+enum macphy_mode {
+ SINGLEMAC_SINGLEPHY = 0,
+ DUALMAC_DUALPHY,
+ DUALMAC_SINGLEPHY,
+};
+
+enum band_type {
+ BAND_ON_2_4G = 0,
+ BAND_ON_5G,
+ BAND_ON_BOTH,
+ BANDMAX
+};
+
+/*aci/aifsn Field.
+Ref: WMM spec 2.2.2: WME Parameter Element, p.12.*/
+union aci_aifsn {
+ u8 char_data;
+
+ struct {
+ u8 aifsn:4;
+ u8 acm:1;
+ u8 aci:2;
+ u8 reserved:1;
+ } f; /* Field */
+};
+
+/*mlme related.*/
+enum wireless_mode {
+ WIRELESS_MODE_UNKNOWN = 0x00,
+ WIRELESS_MODE_A = 0x01,
+ WIRELESS_MODE_B = 0x02,
+ WIRELESS_MODE_G = 0x04,
+ WIRELESS_MODE_AUTO = 0x08,
+ WIRELESS_MODE_N_24G = 0x10,
+ WIRELESS_MODE_N_5G = 0x20,
+ WIRELESS_MODE_AC_5G = 0x40,
+ WIRELESS_MODE_AC_24G = 0x80
+};
+
+enum ratr_table_mode {
+ RATR_INX_WIRELESS_NGB = 0, // BGN 40 Mhz 2SS 1SS
+ RATR_INX_WIRELESS_NG = 1, // GN or N
+ RATR_INX_WIRELESS_NB = 2, // BGN 20 Mhz 2SS 1SS or BN
+ RATR_INX_WIRELESS_N = 3,
+ RATR_INX_WIRELESS_GB = 4,
+ RATR_INX_WIRELESS_G = 5,
+ RATR_INX_WIRELESS_B = 6,
+ RATR_INX_WIRELESS_MC = 7,
+ RATR_INX_WIRELESS_AC_5N = 8,
+ RATR_INX_WIRELESS_AC_24N = 9,
+};
+
+enum rtl_link_state {
+ MAC80211_NOLINK = 0,
+ MAC80211_LINKING = 1,
+ MAC80211_LINKED = 2,
+ MAC80211_LINKED_SCANNING = 3,
+};
+
+enum act_category {
+ ACT_CAT_QOS = 1,
+ ACT_CAT_DLS = 2,
+ ACT_CAT_BA = 3,
+ ACT_CAT_HT = 7,
+ ACT_CAT_WMM = 17,
+};
+
+enum ba_action {
+ ACT_ADDBAREQ = 0,
+ ACT_ADDBARSP = 1,
+ ACT_DELBA = 2,
+};
+
+enum rt_polarity_ctl {
+ RT_POLARITY_LOW_ACT = 0,
+ RT_POLARITY_HIGH_ACT = 1,
+};
+
+
+struct octet_string {
+ u8 *octet;
+ u16 length;
+};
+
+struct rtl_hdr_3addr {
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctl;
+ u8 payload[0];
+} __packed;
+
+struct rtl_info_element {
+ u8 id;
+ u8 len;
+ u8 data[0];
+} __packed;
+
+struct rtl_probe_rsp {
+ struct rtl_hdr_3addr header;
+ u32 time_stamp[2];
+ __le16 beacon_interval;
+ __le16 capability;
+ /*SSID, supported rates, FH params, DS params,
+ CF params, IBSS params, TIM (if beacon), RSN */
+ struct rtl_info_element info_element[0];
+} __packed;
+
+/*LED related.*/
+/*ledpin Identify how to implement this SW led.*/
+struct rtl_led {
+ void *hw;
+ enum rtl_led_pin ledpin;
+ bool b_ledon;
+};
+
+struct rtl_led_ctl {
+ bool bled_opendrain;
+ struct rtl_led sw_led0;
+ struct rtl_led sw_led1;
+};
+
+struct rtl_qos_parameters {
+ __le16 cw_min;
+ __le16 cw_max;
+ u8 aifs;
+ u8 flag;
+ __le16 tx_op;
+} __packed;
+
+struct rt_smooth_data {
+ u32 elements[100]; /*array to store values */
+ u32 index; /*index to current array to store */
+ u32 total_num; /*num of valid elements */
+ u32 total_val; /*sum of valid elements */
+};
+
+struct rtl_ht_agg {
+ u16 txq_id;
+ u16 wait_for_ba;
+ u16 start_idx;
+ u64 bitmap;
+ u32 rate_n_flags;
+ u8 agg_state;
+ u8 rx_agg_state;
+};
+
+struct rtl_tid_data {
+ u16 seq_number;
+ struct rtl_ht_agg agg;
+};
+
+struct rssi_sta{
+ long undecorated_smoothed_pwdb;
+};
+
+struct rtl_sta_info {
+ struct list_head list;
+ u8 ratr_index;
+ u8 wireless_mode;
+ u8 mimo_ps;
+ u8 mac_addr[6];
+ struct rtl_tid_data tids[MAX_TID_COUNT];
+
+ /* just used for ap adhoc or mesh*/
+ struct rssi_sta rssi_stat;
+} __packed;
+
+#ifdef VIF_TODO
+struct rtl_vif {
+ unsigned int id;
+ /* struct ieee80211_vif __rcu *vif; */
+ struct ieee80211_vif *vif;
+};
+
+struct rtl_vif_info {
+ struct list_head list;
+ bool active;
+ unsigned int id;
+ struct sk_buff *beacon;
+ bool enable_beacon;
+};
+
+struct vif_priv {
+ struct list_head vif_list;
+
+ /* interface mode settings */
+ unsigned long vif_bitmap;
+ unsigned int vifs;
+ struct rtl_vif vif[MAX_VIRTUAL_MAC];
+
+ /* beaconing */
+ spinlock_t beacon_lock;
+ unsigned int global_pretbtt;
+ unsigned int global_beacon_int;
+ /* struct rtl_vif_info __rcu *beacon_iter; */
+ struct rtl_vif_info *beacon_iter;
+ unsigned int beacon_enabled;
+};
+#endif
+
+struct false_alarm_statistics {
+ u32 cnt_parity_fail;
+ u32 cnt_rate_illegal;
+ u32 cnt_crc8_fail;
+ u32 cnt_mcs_fail;
+ u32 cnt_fast_fsync_fail;
+ u32 cnt_sb_search_fail;
+ u32 cnt_ofdm_fail;
+ u32 cnt_cck_fail;
+ u32 cnt_all;
+ u32 cnt_ofdm_cca;
+ u32 cnt_cck_cca;
+ u32 cnt_cca_all;
+ u32 cnt_bw_usc;
+ u32 cnt_bw_lsc;
+};
+
+struct init_gain {
+ u8 xaagccore1;
+ u8 xbagccore1;
+ u8 xcagccore1;
+ u8 xdagccore1;
+ u8 cca;
+
+};
+
+struct wireless_stats {
+ unsigned long txbytesunicast;
+ unsigned long txbytesmulticast;
+ unsigned long txbytesbroadcast;
+ unsigned long rxbytesunicast;
+
+ long rx_snr_db[4];
+ /*Correct smoothed ss in Dbm, only used
+ in driver to report real power now. */
+ long recv_signal_power;
+ long signal_quality;
+ long last_sigstrength_inpercent;
+
+ u32 rssi_calculate_cnt;
+ u32 pwdb_all_cnt;
+
+ /*Transformed, in dbm. Beautified signal
+ strength for UI, not correct. */
+ long signal_strength;
+
+ u8 rx_rssi_percentage[4];
+ u8 rx_evm_dbm[4];
+ u8 rx_evm_percentage[2];
+
+ u16 rx_cfo_short[4];
+ u16 rx_cfo_tail[4];
+
+ struct rt_smooth_data ui_rssi;
+ struct rt_smooth_data ui_link_quality;
+};
+
+struct rate_adaptive {
+ u8 rate_adaptive_disabled;
+ u8 ratr_state;
+ u16 reserve;
+
+ u32 high_rssi_thresh_for_ra;
+ u32 high2low_rssi_thresh_for_ra;
+ u8 low2high_rssi_thresh_for_ra;
+ u32 low_rssi_thresh_for_ra;
+ u32 upper_rssi_threshold_ratr;
+ u32 middleupper_rssi_threshold_ratr;
+ u32 middle_rssi_threshold_ratr;
+ u32 middlelow_rssi_threshold_ratr;
+ u32 low_rssi_threshold_ratr;
+ u32 ultralow_rssi_threshold_ratr;
+ u32 low_rssi_threshold_ratr_40m;
+ u32 low_rssi_threshold_ratr_20m;
+ u8 ping_rssi_enable;
+ u32 ping_rssi_ratr;
+ u32 ping_rssi_thresh_for_ra;
+ u32 last_ratr;
+ u8 pre_ratr_state;
+ u8 ldpc_thres;
+ bool use_ldpc;
+ bool lower_rts_rate;
+ bool is_special_data;
+};
+
+struct regd_pair_mapping {
+ u16 reg_dmnenum;
+ u16 reg_5ghz_ctl;
+ u16 reg_2ghz_ctl;
+};
+
+struct dynamic_primary_cca{
+ u8 pricca_flag;
+ u8 intf_flag;
+ u8 intf_type;
+ u8 dup_rts_flag;
+ u8 monitor_flag;
+ u8 ch_offset;
+ u8 mf_state;
+};
+
+struct rtl_regulatory {
+ char alpha2[2];
+ u16 country_code;
+ u16 max_power_level;
+ u32 tp_scale;
+ u16 current_rd;
+ u16 current_rd_ext;
+ int16_t power_limit;
+ struct regd_pair_mapping *regpair;
+};
+
+struct rtl_rfkill {
+ bool rfkill_state; /*0 is off, 1 is on */
+};
+
+/*for P2P PS**/
+#define P2P_MAX_NOA_NUM 2
+
+enum p2p_role {
+ P2P_ROLE_DISABLE = 0,
+ P2P_ROLE_DEVICE = 1,
+ P2P_ROLE_CLIENT = 2,
+ P2P_ROLE_GO = 3
+};
+
+enum p2p_ps_state {
+ P2P_PS_DISABLE = 0,
+ P2P_PS_ENABLE = 1,
+ P2P_PS_SCAN = 2,
+ P2P_PS_SCAN_DONE = 3,
+ P2P_PS_ALLSTASLEEP = 4, // for P2P GO
+};
+
+enum p2p_ps_mode {
+ P2P_PS_NONE = 0,
+ P2P_PS_CTWINDOW = 1,
+ P2P_PS_NOA = 2,
+ P2P_PS_MIX = 3, // CTWindow and NoA
+};
+
+struct rtl_p2p_ps_info {
+ enum p2p_ps_mode p2p_ps_mode; /* indicate p2p ps mode */
+ enum p2p_ps_state p2p_ps_state; /* indicate p2p ps state */
+ u8 noa_index; /* Identifies and instance of Notice of Absence timing. */
+ /* Client traffic window. A period of time in TU after TBTT. */
+ u8 ctwindow;
+ u8 opp_ps; /* opportunistic power save. */
+ u8 noa_num; /* number of NoA descriptor in P2P IE. */
+ /* Count for owner, Type of client. */
+ u8 noa_count_type[P2P_MAX_NOA_NUM];
+ /* Max duration for owner, preferred or
+ * min acceptable duration for client. */
+ u32 noa_duration[P2P_MAX_NOA_NUM];
+ /* Length of interval for owner, preferred or
+ * max acceptable interval of client. */
+ u32 noa_interval[P2P_MAX_NOA_NUM];
+ /* schedule expressed in terms of the lower 4 bytes of the TSF timer. */
+ u32 noa_start_time[P2P_MAX_NOA_NUM];
+};
+
+ struct p2p_ps_offload_t {
+ u8 Offload_En:1;
+ u8 role:1; /* 1: Owner, 0: Client */
+ u8 CTWindow_En:1;
+ u8 NoA0_En:1;
+ u8 NoA1_En:1;
+ u8 AllStaSleep:1;
+ u8 discovery:1;
+ u8 reserved:1;
+};
+
+#define IQK_MATRIX_REG_NUM 8
+#define IQK_MATRIX_SETTINGS_NUM (14+24+21) // Channels_2_4G_NUM + Channels_5G_20M_NUM + Channels_5G
+struct iqk_matrix_regs {
+ bool b_iqk_done;
+ long value[1][IQK_MATRIX_REG_NUM];
+};
+
+struct rtl_phy {
+ struct bb_reg_def phyreg_def[4]; /*Radio A/B/C/D */
+ struct init_gain initgain_backup;
+ enum io_type current_io_type;
+
+ u8 rf_mode;
+ u8 rf_type;
+ u8 current_chan_bw;
+ u8 set_bwmode_inprogress;
+ u8 sw_chnl_inprogress;
+ u8 sw_chnl_stage;
+ u8 sw_chnl_step;
+ u8 current_channel;
+ u8 h2c_box_num;
+ u8 set_io_inprogress;
+ u8 lck_inprogress;
+
+ /* record for power tracking */
+ s32 reg_e94;
+ s32 reg_e9c;
+ s32 reg_ea4;
+ s32 reg_eac;
+ s32 reg_eb4;
+ s32 reg_ebc;
+ s32 reg_ec4;
+ s32 reg_ecc;
+ u8 rfpienable;
+ u8 reserve_0;
+ u16 reserve_1;
+ u32 reg_c04, reg_c08, reg_874;
+ u32 adda_backup[16];
+ u32 iqk_mac_backup[IQK_MAC_REG_NUM];
+ u32 iqk_bb_backup[10];
+ bool iqk_initialized;
+
+ bool rfpath_rx_enable[MAX_RF_PATH];
+ /*Jaguar*/
+ u8 reg_837;
+ /* Dul mac */
+ bool b_need_iqk;
+ struct iqk_matrix_regs iqk_matrix_regsetting[IQK_MATRIX_SETTINGS_NUM];
+
+ bool b_rfpi_enable;
+
+ bool b_iqk_in_progress;
+
+ u8 pwrgroup_cnt;
+ u8 bcck_high_power;
+ /* this is for 88E & 8723A */
+ u32 mcs_txpwrlevel_origoffset[MAX_PG_GROUP][16];
+ /* this is for 92EE */
+ u32 tx_power_by_rate_offset[TX_PWR_BY_RATE_NUM_BAND]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_SECTION];
+ u8 txpwr_by_rate_base_24g[TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [MAX_BASE_NUM_IN_PHY_REG_PG_24G];
+
+ u8 txpwr_by_rate_base_5g[TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [MAX_BASE_NUM_IN_PHY_REG_PG_5G];
+ u8 default_initialgain[4];
+
+ /* the current Tx power level */
+ u8 cur_cck_txpwridx;
+ u8 cur_ofdm24g_txpwridx;
+ u8 cur_bw20_txpwridx;
+ u8 cur_bw40_txpwridx;
+
+ u32 rfreg_chnlval[2];
+ bool b_apk_done;
+ u32 reg_rf3c[2]; /* pathA / pathB */
+
+ u32 backup_rf_0x1a;/*92ee*/
+ /* bfsync */
+ u8 framesync;
+ u32 framesync_c34;
+
+ u8 num_total_rfpath;
+ u16 rf_pathmap;
+
+ u8 hw_rof_enable; /*Enable GPIO[9] as WL RF HW PDn source*/
+
+ enum rt_polarity_ctl polarity_ctl;
+};
+
+#define RTL_AGG_STOP 0
+#define RTL_AGG_PROGRESS 1
+#define RTL_AGG_START 2
+#define RTL_AGG_OPERATIONAL 3
+#define RTL_RX_AGG_START 1
+#define RTL_RX_AGG_STOP 0
+
+struct rtl_priv;
+struct rtl_io {
+ struct device *dev;
+
+ /*PCI MEM map */
+ unsigned long pci_mem_end; /*shared mem end */
+ unsigned long pci_mem_start; /*shared mem start */
+
+ /*PCI IO map */
+ unsigned long pci_base_addr; /*device I/O address */
+
+ void (*write8_async) (struct rtl_priv * rtlpriv, u32 addr, u8 val);
+ void (*write16_async) (struct rtl_priv * rtlpriv, u32 addr, u16 val);
+ void (*write32_async) (struct rtl_priv * rtlpriv, u32 addr, u32 val);
+
+ u8(*read8_sync) (struct rtl_priv * rtlpriv, u32 addr);
+ u16(*read16_sync) (struct rtl_priv * rtlpriv, u32 addr);
+ u32(*read32_sync) (struct rtl_priv * rtlpriv, u32 addr);
+
+};
+
+struct rtl_mac {
+ u8 mac_addr[ETH_ALEN];
+ u8 mac80211_registered;
+ u8 beacon_enabled;
+
+ u32 tx_ss_num;
+ u32 rx_ss_num;
+
+ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif;
+ enum nl80211_iftype opmode;
+
+ /*Probe Beacon management */
+ enum rtl_link_state link_state;
+
+ int n_channels;
+ int n_bitrates;
+
+ bool offchan_deley;
+ u8 p2p; /*using p2p role*/
+ bool p2p_in_use;
+
+ /*filters */
+ u32 rx_conf;
+
+ bool act_scanning;
+ u8 cnt_after_linked;
+ bool skip_scan;
+
+ /* early mode */
+ /* skb wait queue */
+ struct sk_buff_head skb_waitq[MAX_TID_COUNT];
+
+ /*RDG*/
+ bool rdg_en;
+
+ /*AP*/
+ u8 bssid[6];
+ u32 vendor;
+ u32 basic_rates; /* b/g rates */
+ u8 ht_enable;
+ u8 bw_40;
+ u8 mode; /* wireless mode */
+ u8 slot_time;
+ u8 short_preamble;
+ u8 use_cts_protect;
+ u8 cur_40_prime_sc;
+ u8 cur_40_prime_sc_bk;
+ u8 cur_80_prime_sc;
+ u64 tsf;
+ u8 retry_short;
+ u8 retry_long;
+ u16 assoc_id;
+ bool bhiddenssid;
+
+ /*IBSS*/
+ int beacon_interval;
+
+ /*AMPDU*/
+ u8 min_space_cfg; /*For Min spacing configurations */
+ u8 max_mss_density;
+ u8 current_ampdu_factor;
+ u8 current_ampdu_density;
+
+ /*QOS & EDCA */
+ struct ieee80211_tx_queue_params edca_param[RTL_MAC80211_NUM_QUEUE];
+ struct rtl_qos_parameters ac[AC_MAX];
+};
+
+struct rtl_hal {
+ struct ieee80211_hw *hw;
+
+ bool driver_is_goingto_unload;
+ bool up_first_time;
+ bool bfirst_init;
+ bool being_init_adapter;
+ bool b_bbrf_ready;
+ bool b_mac_func_enable;
+ bool b_pre_edcca_enable;
+
+ enum intf_type interface;
+ u16 hw_type; /*92c or 92d or 92s and so on */
+ u8 ic_class;
+ u8 oem_id;
+ u32 version; /*version of chip */
+ u8 state; /*stop 0, start 1 */
+ u8 boad_type;
+
+ /*firmware */
+ u32 fwsize;
+ u8 *pfirmware;
+ u16 fw_version;
+ u16 fw_subversion;
+ bool b_h2c_setinprogress;
+ u8 last_hmeboxnum;
+ bool bfw_ready;
+
+ /*Reserve page start offset except beacon in TxQ. */
+ u8 fw_rsvdpage_startoffset;
+ u8 h2c_txcmd_seq;
+ u8 current_ra_rate;
+
+ /* FW Cmd IO related */
+ u16 fwcmd_iomap;
+ u32 fwcmd_ioparam;
+ bool set_fwcmd_inprogress;
+ u8 current_fwcmd_io;
+
+ bool bfw_clk_change_in_progress;
+ bool ballow_sw_to_change_hwclc;
+ u8 fw_ps_state;
+ struct p2p_ps_offload_t p2p_ps_offload;
+ /**/
+ bool driver_going2unload;
+
+ /*AMPDU init min space*/
+ u8 minspace_cfg; /*For Min spacing configurations */
+
+ /* Dul mac */
+ enum macphy_mode macphymode;
+ enum band_type current_bandtype; /* 0:2.4G, 1:5G */
+ enum band_type current_bandtypebackup;
+ enum band_type bandset;
+ /* dual MAC 0--Mac0 1--Mac1 */
+ u32 interfaceindex;
+ /* just for DulMac S3S4 */
+ u8 macphyctl_reg;
+ bool b_earlymode_enable;
+ u8 max_earlymode_num;
+ /* Dul mac*/
+ bool during_mac0init_radiob;
+ bool during_mac1init_radioa;
+ bool reloadtxpowerindex;
+ /* True if IMR or IQK have done
+ for 2.4G in scan progress */
+ bool b_load_imrandiqk_setting_for2g;
+
+ bool disable_amsdu_8k;
+ bool bmaster_of_dmsp;
+ bool bslave_of_dmsp;
+
+ u16 rx_tag;/*for 92ee*/
+ u8 rts_en;
+};
+
+struct rtl_security {
+ /*default 0 */
+ bool use_sw_sec;
+
+ bool being_setkey;
+ bool use_defaultkey;
+ /*Encryption Algorithm for Unicast Packet */
+ enum rt_enc_alg pairwise_enc_algorithm;
+ /*Encryption Algorithm for Brocast/Multicast */
+ enum rt_enc_alg group_enc_algorithm;
+ /*Cam Entry Bitmap */
+ u32 hwsec_cam_bitmap;
+ u8 hwsec_cam_sta_addr[TOTAL_CAM_ENTRY][ETH_ALEN];
+ /*local Key buffer, indx 0 is for
+ pairwise key 1-4 is for agoup key. */
+ u8 key_buf[KEY_BUF_SIZE][MAX_KEY_LEN];
+ u8 key_len[KEY_BUF_SIZE];
+
+ /*The pointer of Pairwise Key,
+ it always points to KeyBuf[4] */
+ u8 *pairwise_key;
+};
+
+struct rtl_dig {
+ u8 dig_enable_flag;
+ u8 dig_ext_port_stage;
+
+ u32 rssi_lowthresh;
+ u32 rssi_highthresh;
+
+ u32 fa_lowthresh;
+ u32 fa_highthresh;
+
+ u8 cursta_connectstate;
+ u8 presta_connectstate;
+ u8 curmultista_connectstate;
+
+ u8 pre_igvalue;
+ u8 cur_igvalue;
+
+ char backoff_val;
+ char backoff_val_range_max;
+ char backoff_val_range_min;
+ u8 rx_gain_range_max;
+ u8 rx_gain_range_min;
+ u8 rssi_val_min;
+ u8 min_undecorated_pwdb_for_dm;
+ long last_min_undecorated_pwdb_for_dm;
+
+ u8 pre_cck_pd_state;
+ u8 cur_cck_pd_state;
+
+ u8 large_fa_hit;
+ u8 forbidden_igi;
+ u32 recover_cnt;
+
+};
+
+struct rtl_pstbl {
+ u8 pre_ccastate;
+ u8 cur_ccasate;
+
+ u8 pre_rfstate;
+ u8 cur_rfstate;
+
+ long rssi_val_min;
+
+};
+
+#define ASSOCIATE_ENTRY_NUM 32+1
+
+struct fast_ant_trainning{
+ u8 bssid[6];
+ u8 antsel_rx_keep_0;
+ u8 antsel_rx_keep_1;
+ u8 antsel_rx_keep_2;
+ u32 ant_sum_rssi[7];
+ u32 ant_rssi_cnt[7];
+ u32 ant_ave_rssi[7];
+ u8 fat_state;
+ u32 train_idx;
+ u8 antsel_a[ASSOCIATE_ENTRY_NUM];
+ u8 antsel_b[ASSOCIATE_ENTRY_NUM];
+ u8 antsel_c[ASSOCIATE_ENTRY_NUM];
+ u32 main_ant_sum[ASSOCIATE_ENTRY_NUM];
+ u32 aux_ant_sum[ASSOCIATE_ENTRY_NUM];
+ u32 main_ant_cnt[ASSOCIATE_ENTRY_NUM];
+ u32 aux_ant_cnt[ASSOCIATE_ENTRY_NUM];
+ u8 rx_idle_ant;
+ bool b_becomelinked;
+};
+
+struct dm_phy_dbg_info {
+ char rx_snrdb[4];
+ u64 num_qry_phy_status;
+ u64 num_qry_phy_status_cck;
+ u64 num_qry_phy_status_ofdm;
+ u16 num_qry_beacon_pkt;
+ u16 num_non_be_pkt;
+ s32 rx_evm[4];
+};
+
+struct rtl_dm {
+ /*PHY status for DM */
+ long entry_min_undecoratedsmoothed_pwdb;
+ long undecorated_smoothed_pwdb; /*out dm */
+ long entry_max_undecoratedsmoothed_pwdb;
+ bool b_dm_initialgain_enable;
+ bool bdynamic_txpower_enable;
+ bool bcurrent_turbo_edca;
+ bool bis_any_nonbepkts; /*out dm */
+ bool bis_cur_rdlstate;
+ bool btxpower_trackinginit;
+ bool b_disable_framebursting;
+ bool b_cck_inch14;
+ bool btxpower_tracking;
+ bool b_useramask;
+ bool brfpath_rxenable[4];
+ bool binform_fw_driverctrldm;
+ bool bcurrent_mrc_switch;
+ u8 txpowercount;
+
+ u8 thermalvalue_rxgain;
+ u8 thermalvalue_iqk;
+ u8 thermalvalue_lck;
+ u8 thermalvalue;
+ u8 thermalvalue_avg[AVG_THERMAL_NUM];
+ u8 thermalvalue_avg_index;
+ bool bdone_txpower;
+ u8 last_dtp_lvl;
+ u8 dynamic_txhighpower_lvl; /*Tx high power level */
+ u8 dm_flag; /*Indicate if each dynamic mechanism's status. */
+ u8 dm_type;
+ u8 txpower_track_control;
+ bool binterrupt_migration;
+ bool bdisable_tx_int;
+ char ofdm_index[MAX_RF_PATH];
+ u8 default_ofdm_index;
+ u8 default_cck_index;
+ char cck_index;
+ char delta_power_index[MAX_RF_PATH];
+ char delta_power_index_last[MAX_RF_PATH];
+ char power_index_offset[MAX_RF_PATH];
+ char aboslute_ofdm_swing_idx[MAX_RF_PATH];
+ char remnant_ofdm_swing_idx[MAX_RF_PATH];
+ char remnant_cck_idx;
+ bool modify_txagc_flag_path_a;
+ bool modify_txagc_flag_path_b;
+
+ bool b_one_entry_only;
+ struct dm_phy_dbg_info dbginfo;
+ /* Dynamic ATC switch */
+
+ bool atc_status;
+ bool large_cfo_hit;
+ bool is_freeze;
+ int cfo_tail[2];
+ int cfo_ave_pre;
+ int crystal_cap;
+ u8 cfo_threshold;
+ u32 packet_count;
+ u32 packet_count_pre;
+ u8 tx_rate;
+
+
+ /*88e tx power tracking*/
+ u8 bb_swing_idx_ofdm[MAX_RF_PATH];
+ u8 bb_swing_idx_ofdm_current;
+ u8 bb_swing_idx_ofdm_base[MAX_RF_PATH];
+ bool bb_swing_flag_Ofdm;
+ u8 bb_swing_idx_cck;
+ u8 bb_swing_idx_cck_current;
+ u8 bb_swing_idx_cck_base;
+ bool bb_swing_flag_cck;
+
+ char bb_swing_diff_2g;
+ char bb_swing_diff_5g;
+
+ u8 delta_swing_table_idx_24gccka_p[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24gccka_n[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24gcckb_p[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24gcckb_n[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24ga_p[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24ga_n[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24gb_p[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24gb_n[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_5ga_p[BAND_NUM][DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_5ga_n[BAND_NUM][DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_5gb_p[BAND_NUM][DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_5gb_n[BAND_NUM][DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24ga_p_8188e[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24ga_n_8188e[DELTA_SWINGIDX_SIZE];
+
+
+ /* DMSP */
+ bool supp_phymode_switch;
+
+ /* DulMac */
+ struct rtl_dig dm_digtable;
+ struct rtl_pstbl dm_pstable;
+ struct fast_ant_trainning fat_table;
+
+ u8 resp_tx_path;
+ u8 path_sel;
+ u32 patha_sum;
+ u32 pathb_sum;
+ u32 patha_cnt;
+ u32 pathb_cnt;
+
+ u8 pre_channel;
+ u8 *p_channel;
+ u8 linked_interval;
+
+ u64 last_tx_ok_cnt;
+ u64 last_rx_ok_cnt;
+};
+
+#define EFUSE_MAX_LOGICAL_SIZE 256
+
+struct rtl_efuse {
+ bool bautoLoad_ok;
+ bool bootfromefuse;
+ u16 max_physical_size;
+
+ u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE];
+ u16 efuse_usedbytes;
+ u8 efuse_usedpercentage;
+#ifdef EFUSE_REPG_WORKAROUND
+ bool efuse_re_pg_sec1flag;
+ u8 efuse_re_pg_data[8];
+#endif
+
+ u8 autoload_failflag;
+ u8 autoload_status;
+
+ short epromtype;
+ u16 eeprom_vid;
+ u16 eeprom_did;
+ u16 eeprom_svid;
+ u16 eeprom_smid;
+ u8 eeprom_oemid;
+ u16 eeprom_channelplan;
+ u8 eeprom_version;
+
+ u8 dev_addr[6];
+ u8 board_type;
+ u8 wowlan_enable;
+ u8 antenna_div_cfg;
+ u8 antenna_div_type;
+
+ bool b_txpwr_fromeprom;
+ u8 eeprom_crystalcap;
+ u8 eeprom_tssi[2];
+ u8 eeprom_tssi_5g[3][2]; /* for 5GL/5GM/5GH band. */
+ u8 eeprom_pwrlimit_ht20[CHANNEL_GROUP_MAX];
+ u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX];
+ u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G];
+ u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX];
+ u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][CHANNEL_GROUP_MAX];
+
+
+ u8 internal_pa_5g[2]; /* pathA / pathB */
+ u8 eeprom_c9;
+ u8 eeprom_cc;
+
+ /*For power group */
+ u8 eeprom_pwrgroup[2][3];
+ u8 pwrgroup_ht20[2][CHANNEL_MAX_NUMBER];
+ u8 pwrgroup_ht40[2][CHANNEL_MAX_NUMBER];
+
+ u8 txpwrlevel_cck[MAX_RF_PATH][CHANNEL_MAX_NUMBER_2G];
+ /*For HT 40MHZ pwr */
+ u8 txpwrlevel_ht40_1s[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ /*For HT 40MHZ pwr */
+ u8 txpwrlevel_ht40_2s[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ char txpwr_cckdiff[MAX_RF_PATH][MAX_TX_COUNT]; /*CCK_24G_Diff*/
+ /*HT 20<->40 Pwr diff */
+ char txpwr_ht20diff[MAX_RF_PATH][MAX_TX_COUNT]; /*BW20_24G_Diff*/
+ char txpwr_ht40diff[MAX_RF_PATH][MAX_TX_COUNT];/*BW40_24G_Diff*/
+ /*For HT<->legacy pwr diff */
+ char txpwr_legacyhtdiff[MAX_RF_PATH][MAX_TX_COUNT];/*OFDM_24G_Diff*/
+
+ u8 txpwr_5g_bw40base[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ u8 txpwr_5g_bw80base[MAX_RF_PATH][CHANNEL_MAX_NUMBER_5G_80M];
+ char txpwr_5g_ofdmdiff[MAX_RF_PATH][MAX_TX_COUNT];
+ char txpwr_5g_bw20diff[MAX_RF_PATH][MAX_TX_COUNT];
+ char txpwr_5g_bw40diff[MAX_RF_PATH][MAX_TX_COUNT];
+ char txpwr_5g_bw80diff[MAX_RF_PATH][MAX_TX_COUNT];
+
+ u8 txpwr_safetyflag; /* Band edge enable flag */
+ u16 eeprom_txpowerdiff;
+ u8 legacy_httxpowerdiff; /* Legacy to HT rate power diff */
+ u8 antenna_txpwdiff[3];
+
+ u8 eeprom_regulatory;
+ u8 eeprom_thermalmeter;
+ u8 thermalmeter[2];/*ThermalMeter, index 0 for RFIC0, and 1 for RFIC1 */
+ u16 tssi_13dbm;
+ u8 crystalcap; /* CrystalCap. */
+ u8 delta_iqk;
+ u8 delta_lck;
+
+ u8 legacy_ht_txpowerdiff; /*Legacy to HT rate power diff */
+ bool b_apk_thermalmeterignore;
+
+ bool b1x1_recvcombine;
+ bool b1ss_support;
+
+ /*channel plan */
+ u8 channel_plan;
+};
+
+struct rtl_ps_ctl {
+ bool pwrdomain_protect;
+ bool b_in_powersavemode;
+ bool rfchange_inprogress;
+ bool b_swrf_processing;
+ bool b_hwradiooff;
+ /*
+ * just for PCIE ASPM
+ * If it supports ASPM, Offset[560h] = 0x40,
+ * otherwise Offset[560h] = 0x00.
+ * */
+ bool b_support_aspm;
+ bool b_support_backdoor;
+
+ /*for LPS */
+ enum rt_psmode dot11_psmode; /*Power save mode configured. */
+ bool b_swctrl_lps;
+ bool b_fwctrl_lps;
+ u8 fwctrl_psmode;
+ /*For Fw control LPS mode */
+ u8 b_reg_fwctrl_lps;
+ /*Record Fw PS mode status. */
+ bool b_fw_current_inpsmode;
+ u8 reg_max_lps_awakeintvl;
+ bool report_linked;
+ bool b_low_power_enable;/*for 32k*/
+
+ /*for IPS */
+ bool b_inactiveps;
+
+ u32 rfoff_reason;
+
+ /*RF OFF Level */
+ u32 cur_ps_level;
+ u32 reg_rfps_level;
+
+ /*just for PCIE ASPM */
+ u8 const_amdpci_aspm;
+
+ enum rf_pwrstate inactive_pwrstate;
+ enum rf_pwrstate rfpwr_state; /*cur power state */
+
+ /* for SW LPS*/
+ bool sw_ps_enabled;
+ bool state;
+ bool state_inap;
+ bool multi_buffered;
+ u16 nullfunc_seq;
+ unsigned int dtim_counter;
+ unsigned int sleep_ms;
+ unsigned long last_sleep_jiffies;
+ unsigned long last_awake_jiffies;
+ unsigned long last_delaylps_stamp_jiffies;
+ unsigned long last_dtim;
+ unsigned long last_beacon;
+ unsigned long last_action;
+ unsigned long last_slept;
+
+ /*For P2P PS */
+ struct rtl_p2p_ps_info p2p_ps_info;
+ u8 pwr_mode;
+ u8 smart_ps;
+};
+
+struct rtl_stats {
+ u8 psaddr[ETH_ALEN];
+ u32 mac_time[2];
+ s8 rssi;
+ u8 signal;
+ u8 noise;
+ u8 rate; /* hw desc rate */
+ u8 rawdata;
+ u8 received_channel;
+ u8 control;
+ u8 mask;
+ u8 freq;
+ u16 len;
+ u64 tsf;
+ u32 beacon_time;
+ u8 nic_type;
+ u16 length;
+ u8 signalquality; /*in 0-100 index. */
+ /*
+ * Real power in dBm for this packet,
+ * no beautification and aggregation.
+ * */
+ s32 recvsignalpower;
+ s8 rxpower; /*in dBm Translate from PWdB */
+ u8 signalstrength; /*in 0-100 index. */
+ u16 b_hwerror:1;
+ u16 b_crc:1;
+ u16 b_icv:1;
+ u16 b_shortpreamble:1;
+ u16 antenna:1;
+ u16 decrypted:1;
+ u16 wakeup:1;
+ u32 timestamp_low;
+ u32 timestamp_high;
+ bool b_shift;
+
+ u8 rx_drvinfo_size;
+ u8 rx_bufshift;
+ bool b_isampdu;
+ bool b_isfirst_ampdu;
+ bool rx_is40Mhzpacket;
+ u32 rx_pwdb_all;
+ u8 rx_mimo_signalstrength[4]; /*in 0~100 index */
+ s8 rx_mimo_signalquality[4];
+ u8 rx_mimo_evm_dbm[4];
+ u16 cfo_short[4]; /* per-path's Cfo_short */
+ u16 cfo_tail[4];
+
+ u8 rx_pwr[4]; /* per-path's pwdb */
+ u8 rx_snr[4]; /* per-path's SNR */
+ u8 bandwidth;
+ u8 bt_coex_pwr_adjust;
+ bool b_packet_matchbssid;
+ bool b_is_cck;
+ bool b_is_ht;
+ bool b_packet_toself;
+ bool b_packet_beacon; /*for rssi */
+ char cck_adc_pwdb[4]; /*for rx path selection */
+
+ u8 packet_report_type;
+
+ u32 macid;
+ u8 wake_match;
+ u32 bt_rx_rssi_percentage;
+ u32 macid_valid_entry[2];
+};
+
+struct rt_link_detect {
+ /* count for raoming */
+ u32 bcn_rx_inperiod;
+ u32 roam_times;
+
+ u32 num_tx_in4period[4];
+ u32 num_rx_in4period[4];
+
+ u32 num_tx_inperiod;
+ u32 num_rx_inperiod;
+
+ bool b_busytraffic;
+ bool b_tx_busy_traffic;
+ bool b_rx_busy_traffic;
+ bool b_higher_busytraffic;
+ bool b_higher_busyrxtraffic;
+
+ u32 tidtx_in4period[MAX_TID_COUNT][4];
+ u32 tidtx_inperiod[MAX_TID_COUNT];
+ bool higher_busytxtraffic[MAX_TID_COUNT];
+};
+
+struct rtl_tcb_desc {
+ u8 b_packet_bw:1;
+ u8 b_multicast:1;
+ u8 b_broadcast:1;
+
+ u8 b_rts_stbc:1;
+ u8 b_rts_enable:1;
+ u8 b_cts_enable:1;
+ u8 b_rts_use_shortpreamble:1;
+ u8 b_rts_use_shortgi:1;
+ u8 rts_sc:1;
+ u8 b_rts_bw:1;
+ u8 rts_rate;
+
+ u8 use_shortgi:1;
+ u8 use_shortpreamble:1;
+ u8 use_driver_rate:1;
+ u8 disable_ratefallback:1;
+
+ u8 ratr_index;
+ u8 mac_id;
+ u8 hw_rate;
+
+ u8 b_last_inipkt:1;
+ u8 b_cmd_or_init:1;
+ u8 queue_index;
+
+ /* early mode */
+ u8 empkt_num;
+ /* The max value by HW */
+ u32 empkt_len[10];
+ bool btx_enable_sw_calc_duration;
+ /* used for hal construct pkt,
+ * we may set desc when tx */
+ u8 self_desc;
+};
+
+struct proxim {
+ bool proxim_on;
+
+ void *proximity_priv;
+ int (*proxim_rx)(struct ieee80211_hw *hw, struct rtl_stats *status,
+ struct sk_buff *skb);
+ u8 (*proxim_get_var)(struct ieee80211_hw *hw, u8 type);
+};
+
+struct rtl_hal_ops {
+ int (*init_sw_vars) (struct ieee80211_hw * hw);
+ void (*deinit_sw_vars) (struct ieee80211_hw * hw);
+ void (*read_eeprom_info) (struct ieee80211_hw * hw);
+ void (*interrupt_recognized) (struct ieee80211_hw * hw,
+ u32 * p_inta, u32 * p_intb);
+ int (*hw_init) (struct ieee80211_hw * hw);
+ void (*hw_disable) (struct ieee80211_hw * hw);
+ void (*hw_suspend) (struct ieee80211_hw * hw);
+ void (*hw_resume) (struct ieee80211_hw * hw);
+ void (*enable_interrupt) (struct ieee80211_hw * hw);
+ void (*disable_interrupt) (struct ieee80211_hw * hw);
+ int (*set_network_type) (struct ieee80211_hw * hw,
+ enum nl80211_iftype type);
+ void (*set_chk_bssid)(struct ieee80211_hw *hw,
+ bool check_bssid);
+ void (*set_bw_mode) (struct ieee80211_hw * hw,
+ enum nl80211_channel_type ch_type);
+ u8(*switch_channel) (struct ieee80211_hw * hw);
+ void (*set_qos) (struct ieee80211_hw * hw, int aci);
+ void (*set_bcn_reg) (struct ieee80211_hw * hw);
+ void (*set_bcn_intv) (struct ieee80211_hw * hw);
+ void (*update_interrupt_mask) (struct ieee80211_hw * hw,
+ u32 add_msr, u32 rm_msr);
+ void (*get_hw_reg) (struct ieee80211_hw * hw, u8 variable, u8 * val);
+ void (*set_hw_reg) (struct ieee80211_hw * hw, u8 variable, u8 * val);
+ void (*update_rate_tbl) (struct ieee80211_hw * hw,
+ struct ieee80211_sta *sta, u8 rssi_level);
+ void (*pre_fill_tx_bd_desc) (struct ieee80211_hw *hw, u8 *tx_bd_desc,
+ u8 *desc, u8 queue_index,
+ struct sk_buff *skb, dma_addr_t addr);
+ u16 (*rx_desc_buff_remained_cnt) (struct ieee80211_hw *hw,
+ u8 queue_index);
+ void (*rx_check_dma_ok) (struct ieee80211_hw *hw, u8 *header_desc,
+ u8 queue_index);
+ void (*fill_tx_desc) (struct ieee80211_hw * hw,
+ struct ieee80211_hdr * hdr,
+ u8 * pdesc_tx, u8 * pbd_desc,
+ struct ieee80211_tx_info * info,
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+/*<delete in kernel end>*/
+ struct ieee80211_sta *sta,
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+ struct sk_buff * skb, u8 hw_queue,
+ struct rtl_tcb_desc *ptcb_desc);
+ void (*fill_tx_cmddesc) (struct ieee80211_hw * hw, u8 * pdesc,
+ bool b_firstseg, bool b_lastseg,
+ struct sk_buff * skb);
+ bool(*query_rx_desc) (struct ieee80211_hw * hw,
+ struct rtl_stats * status,
+ struct ieee80211_rx_status * rx_status,
+ u8 * pdesc, struct sk_buff * skb);
+ void (*set_channel_access) (struct ieee80211_hw * hw);
+ bool(*radio_onoff_checking) (struct ieee80211_hw * hw, u8 * valid);
+ void (*dm_watchdog) (struct ieee80211_hw * hw);
+ void (*scan_operation_backup) (struct ieee80211_hw * hw, u8 operation);
+ bool(*set_rf_power_state) (struct ieee80211_hw * hw,
+ enum rf_pwrstate rfpwr_state);
+ void (*led_control) (struct ieee80211_hw * hw,
+ enum led_ctl_mode ledaction);
+ void (*set_desc) (struct ieee80211_hw *hw, u8 * pdesc, bool istx,
+ u8 desc_name, u8 * val);
+ u32(*get_desc) (u8 * pdesc, bool istx, u8 desc_name);
+ bool (*is_tx_desc_closed) (struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index);
+ void (*tx_polling) (struct ieee80211_hw * hw, u8 hw_queue);
+ void (*enable_hw_sec) (struct ieee80211_hw * hw);
+ void (*set_key) (struct ieee80211_hw * hw, u32 key_index,
+ u8 * p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all);
+ void (*init_sw_leds) (struct ieee80211_hw * hw);
+ u32(*get_bbreg) (struct ieee80211_hw * hw, u32 regaddr, u32 bitmask);
+ void (*set_bbreg) (struct ieee80211_hw * hw, u32 regaddr, u32 bitmask,
+ u32 data);
+ u32(*get_rfreg) (struct ieee80211_hw * hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask);
+ void (*set_rfreg) (struct ieee80211_hw * hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data);
+ void (*allow_all_destaddr)(struct ieee80211_hw *hw,
+ bool allow_all_da, bool write_into_reg);
+ void (*linked_set_reg) (struct ieee80211_hw * hw);
+ void (*check_switch_to_dmdp) (struct ieee80211_hw * hw);
+ void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw);
+ void (*dualmac_switch_to_dmdp) (struct ieee80211_hw *hw);
+ void (*c2h_command_handle) (struct ieee80211_hw *hw);
+ void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw, bool mstate);
+ void (*bt_turn_off_bt_coexist_before_enter_lps) (struct ieee80211_hw *hw);
+ void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
+ u32 cmd_len, u8 *p_cmdbuffer);
+ bool (*get_btc_status) (void);
+ u32 (*rx_command_packet_handler)(struct ieee80211_hw *hw, struct rtl_stats status, struct sk_buff *skb);
+};
+
+struct rtl_intf_ops {
+ /*com */
+ void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
+ int (*adapter_start) (struct ieee80211_hw * hw);
+ void (*adapter_stop) (struct ieee80211_hw * hw);
+ bool (*check_buddy_priv)(struct ieee80211_hw *hw,
+ struct rtl_priv **buddy_priv);
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ int (*adapter_tx) (struct ieee80211_hw * hw, struct sk_buff * skb,
+ struct rtl_tcb_desc *ptcb_desc);
+#else
+/*<delete in kernel end>*/
+ int (*adapter_tx) (struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ struct rtl_tcb_desc *ptcb_desc);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ void (*flush)(struct ieee80211_hw *hw, u32 queues, bool drop);
+#else
+ void (*flush)(struct ieee80211_hw *hw, bool drop);
+#endif
+ int (*reset_trx_ring) (struct ieee80211_hw * hw);
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb);
+#else
+/*<delete in kernel end>*/
+ bool (*waitq_insert) (struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+
+ /*pci */
+ void (*disable_aspm) (struct ieee80211_hw * hw);
+ void (*enable_aspm) (struct ieee80211_hw * hw);
+
+ /*usb */
+};
+
+struct rtl_mod_params {
+ /* default: 0 = using hardware encryption */
+ bool sw_crypto;
+
+ /* default: 1 = using no linked power save */
+ bool b_inactiveps;
+
+ /* default: 1 = using linked sw power save */
+ bool b_swctrl_lps;
+
+ /* default: 1 = using linked fw power save */
+ bool b_fwctrl_lps;
+};
+
+struct rtl_hal_cfg {
+ u8 bar_id;
+ bool write_readback;
+ char *name;
+ char *fw_name;
+ struct rtl_hal_ops *ops;
+ struct rtl_mod_params *mod_params;
+
+ /*this map used for some registers or vars
+ defined int HAL but used in MAIN */
+ u32 maps[RTL_VAR_MAP_MAX];
+
+};
+
+struct rtl_locks {
+ /* mutex */
+ struct mutex conf_mutex;
+
+ /*spin lock */
+ spinlock_t ips_lock;
+ spinlock_t irq_th_lock;
+ spinlock_t h2c_lock;
+ spinlock_t rf_ps_lock;
+ spinlock_t rf_lock;
+ spinlock_t lps_lock;
+ spinlock_t waitq_lock;
+ spinlock_t entry_list_lock;
+
+ /*FW clock change */
+ spinlock_t fw_ps_lock;
+
+ /*Dul mac*/
+ spinlock_t cck_and_rw_pagea_lock;
+
+ /*Easy concurrent*/
+ spinlock_t check_sendpkt_lock;
+
+ spinlock_t iqk_lock;
+};
+
+struct rtl_works {
+ struct ieee80211_hw *hw;
+
+ /*timer */
+ struct timer_list watchdog_timer;
+ struct timer_list dualmac_easyconcurrent_retrytimer;
+ struct timer_list fw_clockoff_timer;
+ struct timer_list fast_antenna_trainning_timer;
+ /*task */
+ struct tasklet_struct irq_tasklet;
+ struct tasklet_struct irq_prepare_bcn_tasklet;
+
+ /*work queue */
+ struct workqueue_struct *rtl_wq;
+ struct delayed_work watchdog_wq;
+ struct delayed_work ips_nic_off_wq;
+
+ /* For SW LPS */
+ struct delayed_work ps_work;
+ struct delayed_work ps_rfon_wq;
+ struct delayed_work fwevt_wq;
+};
+
+struct rtl_debug {
+ u32 dbgp_type[DBGP_TYPE_MAX];
+ u32 global_debuglevel;
+ u64 global_debugcomponents;
+
+ /* add for proc debug */
+ struct proc_dir_entry *proc_dir;
+ char proc_name[20];
+};
+
+#define MIMO_PS_STATIC 0
+#define MIMO_PS_DYNAMIC 1
+#define MIMO_PS_NOLIMIT 3
+
+struct rtl_dualmac_easy_concurrent_ctl {
+ enum band_type currentbandtype_backfordmdp;
+ bool bclose_bbandrf_for_dmsp;
+ bool bchange_to_dmdp;
+ bool bchange_to_dmsp;
+ bool bswitch_in_process;
+};
+
+struct rtl_dmsp_ctl {
+ bool bactivescan_for_slaveofdmsp;
+ bool bscan_for_anothermac_fordmsp;
+ bool bscan_for_itself_fordmsp;
+ bool bwritedig_for_anothermacofdmsp;
+ u32 curdigvalue_for_anothermacofdmsp;
+ bool bchangecckpdstate_for_anothermacofdmsp;
+ u8 curcckpdstate_for_anothermacofdmsp;
+ bool bchangetxhighpowerlvl_for_anothermacofdmsp;
+ u8 curtxhighlvl_for_anothermacofdmsp;
+ long rssivalmin_for_anothermacofdmsp;
+};
+
+struct rtl_global_var {
+ /* from this list we can get
+ * other adapter's rtl_priv */
+ struct list_head glb_priv_list;
+ spinlock_t glb_list_lock;
+};
+
+struct rtl_btc_info {
+ u8 bt_type;
+ u8 btcoexist;
+ u8 ant_num;
+};
+
+struct rtl_btc_ops {
+ void (*btc_init_variables) (struct rtl_priv *rtlpriv);
+ void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv);
+ void (*btc_init_hw_config) (struct rtl_priv *rtlpriv);
+ void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type);
+ void (*btc_scan_notify) (struct rtl_priv *rtlpriv, u8 scantype);
+ void (*btc_connect_notify) (struct rtl_priv *rtlpriv, u8 action);
+ void (*btc_mediastatus_notify) (struct rtl_priv *rtlpriv,
+ enum rt_media_status mstatus);
+ void (*btc_periodical) (struct rtl_priv *rtlpriv);
+ void (*btc_halt_notify) (void);
+ void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv,
+ u8 * tmp_buf, u8 length);
+ bool (*btc_is_limited_dig) (struct rtl_priv *rtlpriv);
+ bool (*btc_is_disable_edca_turbo) (struct rtl_priv *rtlpriv);
+ bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv);
+};
+
+struct rtl_bt_coexist {
+ struct rtl_btc_ops *btc_ops;
+ struct rtl_btc_info btc_info;
+};
+
+
+struct rtl_priv {
+ struct list_head list;
+#ifdef VIF_TODO
+ struct vif_priv vif_priv;
+#endif
+ struct rtl_priv *buddy_priv;
+ struct rtl_global_var *glb_var;
+ struct rtl_dualmac_easy_concurrent_ctl easy_concurrent_ctl;
+ struct rtl_dmsp_ctl dmsp_ctl;
+ struct rtl_locks locks;
+ struct rtl_works works;
+ struct rtl_mac mac80211;
+ struct rtl_hal rtlhal;
+ struct rtl_regulatory regd;
+ struct rtl_rfkill rfkill;
+ struct rtl_io io;
+ struct rtl_phy phy;
+ struct rtl_dm dm;
+ struct rtl_security sec;
+ struct rtl_efuse efuse;
+
+ struct rtl_ps_ctl psc;
+ struct rate_adaptive ra;
+ struct dynamic_primary_cca primarycca;
+ struct wireless_stats stats;
+ struct rt_link_detect link_info;
+ struct false_alarm_statistics falsealm_cnt;
+
+ struct rtl_rate_priv *rate_priv;
+
+ struct rtl_debug dbg;
+
+ /* sta entry list for ap adhoc or mesh */
+ struct list_head entry_list;
+
+ /*
+ *hal_cfg : for diff cards
+ *intf_ops : for diff interrface usb/pcie
+ */
+ struct rtl_hal_cfg *cfg;
+ struct rtl_intf_ops *intf_ops;
+
+ /*this var will be set by set_bit,
+ and was used to indicate status of
+ interface or hardware */
+ unsigned long status;
+
+ /* intel Proximity, should be alloc mem
+ * in intel Proximity module and can only
+ * be used in intel Proximity mode */
+ struct proxim proximity;
+
+ /*for bt coexist use*/
+ struct rtl_bt_coexist btcoexist;
+
+ /* seperate 92ee from other ICs,
+ * 92ee use new trx flow. */
+ bool use_new_trx_flow;
+ /*This must be the last item so
+ that it points to the data allocated
+ beyond this structure like:
+ rtl_pci_priv or rtl_usb_priv */
+ u8 priv[0];
+};
+
+#define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv))
+#define rtl_mac(rtlpriv) (&((rtlpriv)->mac80211))
+#define rtl_hal(rtlpriv) (&((rtlpriv)->rtlhal))
+#define rtl_efuse(rtlpriv) (&((rtlpriv)->efuse))
+#define rtl_psc(rtlpriv) (&((rtlpriv)->psc))
+#define rtl_sec(rtlpriv) (&((rtlpriv)->sec))
+#define rtl_dm(rtlpriv) (&((rtlpriv)->dm))
+/***************************************
+ Bluetooth Co-existance Related
+****************************************/
+
+enum bt_ant_num {
+ ANT_X2 = 0,
+ ANT_X1 = 1,
+};
+
+enum bt_co_type {
+ BT_2WIRE = 0,
+ BT_ISSC_3WIRE = 1,
+ BT_ACCEL = 2,
+ BT_CSR_BC4 = 3,
+ BT_CSR_BC8 = 4,
+ BT_RTL8756 = 5,
+ BT_RTL8723A = 6,
+ BT_RTL8821A = 7,
+ BT_RTL8723B = 8,
+ BT_RTL8192E = 9,
+ BT_RTL8812A = 11,
+};
+
+enum bt_total_ant_num{
+ ANT_TOTAL_X2 = 0,
+ ANT_TOTAL_X1 = 1
+};
+
+enum bt_cur_state {
+ BT_OFF = 0,
+ BT_ON = 1,
+};
+
+enum bt_service_type {
+ BT_SCO = 0,
+ BT_A2DP = 1,
+ BT_HID = 2,
+ BT_HID_IDLE = 3,
+ BT_SCAN = 4,
+ BT_IDLE = 5,
+ BT_OTHER_ACTION = 6,
+ BT_BUSY = 7,
+ BT_OTHERBUSY = 8,
+ BT_PAN = 9,
+};
+
+enum bt_radio_shared {
+ BT_RADIO_SHARED = 0,
+ BT_RADIO_INDIVIDUAL = 1,
+};
+
+struct bt_coexist_info {
+
+ /* EEPROM BT info. */
+ u8 eeprom_bt_coexist;
+ u8 eeprom_bt_type;
+ u8 eeprom_bt_ant_num;
+ u8 eeprom_bt_ant_isolation;
+ u8 eeprom_bt_radio_shared;
+
+ u8 bt_coexistence;
+ u8 bt_ant_num;
+ u8 bt_coexist_type;
+ u8 bt_state;
+ u8 bt_cur_state; /* 0:on, 1:off */
+ u8 bt_ant_isolation; /* 0:good, 1:bad */
+ u8 bt_pape_ctrl; /* 0:SW, 1:SW/HW dynamic */
+ u8 bt_service;
+ u8 bt_radio_shared_type;
+ u8 bt_rfreg_origin_1e;
+ u8 bt_rfreg_origin_1f;
+ u8 bt_rssi_state;
+ u32 ratio_tx;
+ u32 ratio_pri;
+ u32 bt_edca_ul;
+ u32 bt_edca_dl;
+
+ bool b_init_set;
+ bool b_bt_busy_traffic;
+ bool b_bt_traffic_mode_set;
+ bool b_bt_non_traffic_mode_set;
+
+ bool b_fw_coexist_all_off;
+ bool b_sw_coexist_all_off;
+ bool b_hw_coexist_all_off;
+ u32 current_state;
+ u32 previous_state;
+ u32 current_state_h;
+ u32 previous_state_h;
+
+ u8 bt_pre_rssi_state;
+ u8 bt_pre_rssi_state1;
+
+ u8 b_reg_bt_iso;
+ u8 b_reg_bt_sco;
+ bool b_balance_on;
+ u8 bt_active_zero_cnt;
+ bool b_cur_bt_disabled;
+ bool b_pre_bt_disabled;
+
+ u8 bt_profile_case;
+ u8 bt_profile_action;
+ bool b_bt_busy;
+ bool b_hold_for_bt_operation;
+ u8 lps_counter;
+};
+
+
+/****************************************
+ mem access macro define start
+ Call endian free function when
+ 1. Read/write packet content.
+ 2. Before write integer to IO.
+ 3. After read integer from IO.
+****************************************/
+/* Convert little data endian to host */
+#define EF1BYTE(_val) \
+ ((u8)(_val))
+#define EF2BYTE(_val) \
+ (le16_to_cpu(_val))
+#define EF4BYTE(_val) \
+ (le32_to_cpu(_val))
+
+/* Read data from memory */
+#define READEF1BYTE(_ptr) \
+ EF1BYTE(*((u8 *)(_ptr)))
+#define READEF2BYTE(_ptr) \
+ EF2BYTE(*((u16 *)(_ptr)))
+#define READEF4BYTE(_ptr) \
+ EF4BYTE(*((u32 *)(_ptr)))
+
+/* Write data to memory */
+#define WRITEEF1BYTE(_ptr, _val) \
+ (*((u8 *)(_ptr)))=EF1BYTE(_val)
+#define WRITEEF2BYTE(_ptr, _val) \
+ (*((u16 *)(_ptr)))=EF2BYTE(_val)
+#define WRITEEF4BYTE(_ptr, _val) \
+ (*((u32 *)(_ptr)))=EF4BYTE(_val)
+
+/*Example:
+BIT_LEN_MASK_32(0) => 0x00000000
+BIT_LEN_MASK_32(1) => 0x00000001
+BIT_LEN_MASK_32(2) => 0x00000003
+BIT_LEN_MASK_32(32) => 0xFFFFFFFF*/
+#define BIT_LEN_MASK_32(__bitlen) \
+ (0xFFFFFFFF >> (32 - (__bitlen)))
+#define BIT_LEN_MASK_16(__bitlen) \
+ (0xFFFF >> (16 - (__bitlen)))
+#define BIT_LEN_MASK_8(__bitlen) \
+ (0xFF >> (8 - (__bitlen)))
+
+/*Example:
+BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
+BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000*/
+#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
+ (BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
+#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
+ (BIT_LEN_MASK_16(__bitlen) << (__bitoffset))
+#define BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) \
+ (BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
+
+/*Description:
+Return 4-byte value in host byte ordering from
+4-byte pointer in little-endian system.*/
+#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
+ (EF4BYTE(*((u32 *)(__pstart))))
+#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
+ (EF2BYTE(*((u16 *)(__pstart))))
+#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
+ (EF1BYTE(*((u8 *)(__pstart))))
+
+/*Description:
+Translate subfield (continuous bits in little-endian) of 4-byte
+value to host byte ordering.*/
+#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ ( LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset) ) & \
+ BIT_LEN_MASK_32(__bitlen) \
+ )
+#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ ( LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset) ) & \
+ BIT_LEN_MASK_16(__bitlen) \
+ )
+#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ ( LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset) ) & \
+ BIT_LEN_MASK_8(__bitlen) \
+ )
+
+/*Description:
+Mask subfield (continuous bits in little-endian) of 4-byte value
+and return the result in 4-byte value in host byte ordering.*/
+#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \
+ ( ~BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) ) \
+ )
+#define LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ LE_P2BYTE_TO_HOST_2BYTE(__pstart) & \
+ ( ~BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) ) \
+ )
+#define LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ LE_P1BYTE_TO_HOST_1BYTE(__pstart) & \
+ ( ~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) ) \
+ )
+
+/*Description:
+Set subfield of little-endian 4-byte value to specified value. */
+#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
+ *((u32 *)(__pstart)) = EF4BYTE \
+ ( \
+ LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
+ ( (((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset) )\
+ );
+#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
+ *((u16 *)(__pstart)) = EF2BYTE \
+ ( \
+ LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
+ ( (((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset) )\
+ );
+#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
+ *((u8 *)(__pstart)) = EF1BYTE \
+ ( \
+ LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) | \
+ ( (((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset) ) \
+ );
+
+#define N_BYTE_ALIGMENT(__value, __aligment) ((__aligment == 1) ? \
+ (__value) : (((__value + __aligment - 1) / __aligment) * __aligment))
+
+/****************************************
+ mem access macro define end
+****************************************/
+
+#define byte(x,n) ((x >> (8 * n)) & 0xff)
+
+#define packet_get_type(_packet) (EF1BYTE((_packet).octet[0]) & 0xFC)
+#define RTL_WATCH_DOG_TIME 2000
+#define MSECS(t) msecs_to_jiffies(t)
+#define WLAN_FC_GET_VERS(fc) ((fc) & IEEE80211_FCTL_VERS)
+#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE)
+#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE)
+#define WLAN_FC_MORE_DATA(fc) ((fc) & IEEE80211_FCTL_MOREDATA)
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+#define RT_RF_OFF_LEVL_ASPM BIT(0) /*PCI ASPM */
+#define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /*PCI clock request */
+#define RT_RF_OFF_LEVL_PCI_D3 BIT(2) /*PCI D3 mode */
+/*NIC halt, re-initialize hw parameters*/
+#define RT_RF_OFF_LEVL_HALT_NIC BIT(3)
+#define RT_RF_OFF_LEVL_FREE_FW BIT(4) /*FW free, re-download the FW */
+#define RT_RF_OFF_LEVL_FW_32K BIT(5) /*FW in 32k */
+/*Always enable ASPM and Clock Req in initialization.*/
+#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6)
+/* no matter RFOFF or SLEEP we set PS_ASPM_LEVL*/
+#define RT_PS_LEVEL_ASPM BIT(7)
+/*When LPS is on, disable 2R if no packet is received or transmittd.*/
+#define RT_RF_LPS_DISALBE_2R BIT(30)
+#define RT_RF_LPS_LEVEL_ASPM BIT(31) /*LPS with ASPM */
+#define RT_IN_PS_LEVEL(ppsc, _ps_flg) \
+ ((ppsc->cur_ps_level & _ps_flg) ? true : false)
+#define RT_CLEAR_PS_LEVEL(ppsc, _ps_flg) \
+ (ppsc->cur_ps_level &= (~(_ps_flg)))
+#define RT_SET_PS_LEVEL(ppsc, _ps_flg) \
+ (ppsc->cur_ps_level |= _ps_flg)
+
+#define container_of_dwork_rtl(x,y,z) \
+ container_of(container_of(x, struct delayed_work, work), y, z)
+
+#define FILL_OCTET_STRING(_os,_octet,_len) \
+ (_os).octet=(u8*)(_octet); \
+ (_os).length=(_len);
+
+#define CP_MACADDR(des,src) \
+ ((des)[0]=(src)[0],(des)[1]=(src)[1],\
+ (des)[2]=(src)[2],(des)[3]=(src)[3],\
+ (des)[4]=(src)[4],(des)[5]=(src)[5])
+
+static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr)
+{
+ return rtlpriv->io.read8_sync(rtlpriv, addr);
+}
+
+static inline u16 rtl_read_word(struct rtl_priv *rtlpriv, u32 addr)
+{
+ return rtlpriv->io.read16_sync(rtlpriv, addr);
+}
+
+static inline u32 rtl_read_dword(struct rtl_priv *rtlpriv, u32 addr)
+{
+ return rtlpriv->io.read32_sync(rtlpriv, addr);
+}
+
+static inline void rtl_write_byte(struct rtl_priv *rtlpriv, u32 addr, u8 val8)
+{
+ rtlpriv->io.write8_async(rtlpriv, addr, val8);
+
+ if (rtlpriv->cfg->write_readback)
+ rtlpriv->io.read8_sync(rtlpriv, addr);
+}
+
+static inline void rtl_write_word(struct rtl_priv *rtlpriv, u32 addr, u16 val16)
+{
+ rtlpriv->io.write16_async(rtlpriv, addr, val16);
+
+ if (rtlpriv->cfg->write_readback)
+ rtlpriv->io.read16_sync(rtlpriv, addr);
+}
+
+static inline void rtl_write_dword(struct rtl_priv *rtlpriv,
+ u32 addr, u32 val32)
+{
+ rtlpriv->io.write32_async(rtlpriv, addr, val32);
+
+ if (rtlpriv->cfg->write_readback)
+ rtlpriv->io.read32_sync(rtlpriv, addr);
+}
+
+static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask)
+{
+ return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_bbreg(hw,
+ regaddr,
+ bitmask);
+}
+
+static inline void rtl_set_bbreg(struct ieee80211_hw *hw, u32 regaddr,
+ u32 bitmask, u32 data)
+{
+ ((struct rtl_priv *)(hw)->priv)->cfg->ops->set_bbreg(hw,
+ regaddr, bitmask,
+ data);
+
+}
+
+static inline u32 rtl_get_rfreg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask)
+{
+ return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_rfreg(hw,
+ rfpath,
+ regaddr,
+ bitmask);
+}
+
+static inline void rtl_set_rfreg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask, u32 data)
+{
+ ((struct rtl_priv *)(hw)->priv)->cfg->ops->set_rfreg(hw,
+ rfpath, regaddr,
+ bitmask, data);
+}
+
+static inline bool is_hal_stop(struct rtl_hal *rtlhal)
+{
+ return (_HAL_STATE_STOP == rtlhal->state);
+}
+
+static inline void set_hal_start(struct rtl_hal *rtlhal)
+{
+ rtlhal->state = _HAL_STATE_START;
+}
+
+static inline void set_hal_stop(struct rtl_hal *rtlhal)
+{
+ rtlhal->state = _HAL_STATE_STOP;
+}
+
+static inline u8 get_rf_type(struct rtl_phy *rtlphy)
+{
+ return rtlphy->rf_type;
+}
+
+static inline struct ieee80211_hdr *rtl_get_hdr(struct sk_buff *skb)
+{
+ return (struct ieee80211_hdr *)(skb->data);
+}
+
+static inline u16 rtl_get_fc(struct sk_buff *skb)
+{
+ return le16_to_cpu(rtl_get_hdr(skb)->frame_control);
+}
+
+static inline u16 rtl_get_tid_h(struct ieee80211_hdr *hdr)
+{
+ return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+static inline u16 rtl_get_tid(struct sk_buff *skb)
+{
+ return rtl_get_tid_h(rtl_get_hdr(skb));
+}
+
+static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw,
+ u8 *mac_addr)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ return ieee80211_find_sta(mac->vif, mac_addr);
+}
+
+struct ieee80211_hw *rtl_pci_get_hw_pointer(void);
+#endif
diff --git a/drivers/staging/rts5139/ms.c b/drivers/staging/rts5139/ms.c
index a27f7e224e03..9253f6ab2e08 100644
--- a/drivers/staging/rts5139/ms.c
+++ b/drivers/staging/rts5139/ms.c
@@ -48,7 +48,7 @@ static inline int ms_check_err_code(struct rts51x_chip *chip, u8 err_code)
{
struct ms_info *ms_card = &(chip->ms_card);
- return (ms_card->err_code == err_code);
+ return ms_card->err_code == err_code;
}
static int ms_parse_err_code(struct rts51x_chip *chip)
diff --git a/drivers/staging/rts5139/rts51x.c b/drivers/staging/rts5139/rts51x.c
index 04213463123e..a8d2d046b44f 100644
--- a/drivers/staging/rts5139/rts51x.c
+++ b/drivers/staging/rts5139/rts51x.c
@@ -30,7 +30,6 @@
#include <linux/errno.h>
#include <linux/freezer.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/utsname.h>
diff --git a/drivers/staging/rts5139/rts51x_card.c b/drivers/staging/rts5139/rts51x_card.c
index 509d83e623a5..03456d9873e5 100644
--- a/drivers/staging/rts5139/rts51x_card.c
+++ b/drivers/staging/rts5139/rts51x_card.c
@@ -373,7 +373,7 @@ void rts51x_release_cards(struct rts51x_chip *chip)
static inline u8 double_depth(u8 depth)
{
- return ((depth > 1) ? (depth - 1) : depth);
+ return (depth > 1) ? (depth - 1) : depth;
}
int rts51x_switch_ssc_clock(struct rts51x_chip *chip, int clk)
@@ -653,8 +653,8 @@ int rts51x_switch_normal_clock(struct rts51x_chip *chip, int clk)
return STATUS_SUCCESS;
}
-int rts51x_card_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 sec_addr,
- u16 sec_cnt)
+int rts51x_card_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip,
+ u32 sec_addr, u16 sec_cnt)
{
int retval;
unsigned int lun = SCSI_LUN(srb);
@@ -770,8 +770,8 @@ void rts51x_eject_card(struct rts51x_chip *chip, unsigned int lun)
XD_INT | MS_INT | SD_INT);
}
-void rts51x_trans_dma_enable(enum dma_data_direction dir, struct rts51x_chip *chip,
- u32 byte_cnt, u8 pack_size)
+void rts51x_trans_dma_enable(enum dma_data_direction dir,
+ struct rts51x_chip *chip, u32 byte_cnt, u8 pack_size)
{
if (pack_size > DMA_1024)
pack_size = DMA_512;
diff --git a/drivers/staging/rts5139/rts51x_card.h b/drivers/staging/rts5139/rts51x_card.h
index e62b25c31413..df8816e0f840 100644
--- a/drivers/staging/rts5139/rts51x_card.h
+++ b/drivers/staging/rts5139/rts51x_card.h
@@ -743,13 +743,13 @@ void rts51x_init_cards(struct rts51x_chip *chip);
void rts51x_release_cards(struct rts51x_chip *chip);
int rts51x_switch_ssc_clock(struct rts51x_chip *chip, int clk);
int rts51x_switch_normal_clock(struct rts51x_chip *chip, int clk);
-int rts51x_card_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 sec_addr,
- u16 sec_cnt);
+int rts51x_card_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip,
+ u32 sec_addr, u16 sec_cnt);
u8 rts51x_get_lun_card(struct rts51x_chip *chip, unsigned int lun);
int rts51x_select_card(struct rts51x_chip *chip, int card);
void rts51x_eject_card(struct rts51x_chip *chip, unsigned int lun);
-void rts51x_trans_dma_enable(enum dma_data_direction dir, struct rts51x_chip *chip,
- u32 byte_cnt, u8 pack_size);
+void rts51x_trans_dma_enable(enum dma_data_direction dir,
+ struct rts51x_chip *chip, u32 byte_cnt, u8 pack_size);
int rts51x_enable_card_clock(struct rts51x_chip *chip, u8 card);
int rts51x_card_power_on(struct rts51x_chip *chip, u8 card);
int rts51x_toggle_gpio(struct rts51x_chip *chip, u8 gpio);
diff --git a/drivers/staging/rts5139/rts51x_scsi.c b/drivers/staging/rts5139/rts51x_scsi.c
index a474eede70a3..3a990253c780 100644
--- a/drivers/staging/rts5139/rts51x_scsi.c
+++ b/drivers/staging/rts5139/rts51x_scsi.c
@@ -1985,7 +1985,6 @@ static int show_info(struct seq_file *m, struct Scsi_Host *host)
SPRINTF(" Vendor: Realtek Corp.\n");
SPRINTF(" Product: RTS51xx USB Card Reader\n");
SPRINTF(" Version: %s\n", DRIVER_VERSION);
- SPRINTF(" Build: %s\n", __TIME__);
return 0;
}
diff --git a/drivers/staging/rts5208/Kconfig b/drivers/staging/rts5208/Kconfig
new file mode 100644
index 000000000000..055655cecaf7
--- /dev/null
+++ b/drivers/staging/rts5208/Kconfig
@@ -0,0 +1,15 @@
+config RTS5208
+ tristate "Realtek PCI-E Card Reader RTS5208/5288 support"
+ depends on PCI && SCSI
+ help
+ Say Y here to include driver code to support the Realtek
+ PCI-E card reader rts5208/rts5288.
+
+ If this driver is compiled as a module, it will be named rts5208.
+
+config RTS5208_DEBUG
+ bool "Realtek PCI-E Card Reader RTS5208/5288 verbose debug"
+ depends on RTS5208
+ help
+ Say Y here in order to have the rts5208 code generate
+ verbose debugging messages.
diff --git a/drivers/staging/rts5208/Makefile b/drivers/staging/rts5208/Makefile
new file mode 100644
index 000000000000..17b4471c4d6d
--- /dev/null
+++ b/drivers/staging/rts5208/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_RTS5208) := rts5208.o
+
+ccflags-y := -Idrivers/scsi
+
+rts5208-y := rtsx.o rtsx_chip.o rtsx_transport.o rtsx_scsi.o \
+ rtsx_card.o general.o sd.o xd.o ms.o spi.o
diff --git a/drivers/staging/rts5208/TODO b/drivers/staging/rts5208/TODO
new file mode 100644
index 000000000000..57bcf5834c0c
--- /dev/null
+++ b/drivers/staging/rts5208/TODO
@@ -0,0 +1,7 @@
+TODO:
+- use kernel coding style
+- checkpatch.pl fixes
+- We will use the stack in drivers/mmc to implement
+ rts5208/5288 in the future
+
+Micky Ching <micky_ching@realsil.com.cn> \ No newline at end of file
diff --git a/drivers/staging/rts5208/debug.h b/drivers/staging/rts5208/debug.h
new file mode 100644
index 000000000000..5ba8a3a0fbdc
--- /dev/null
+++ b/drivers/staging/rts5208/debug.h
@@ -0,0 +1,43 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_DEBUG_H
+#define __REALTEK_RTSX_DEBUG_H
+
+#include <linux/kernel.h>
+
+#define RTSX_STOR "rts5208: "
+
+#ifdef CONFIG_RTS5208_DEBUG
+#define RTSX_DEBUGP(x...) pr_debug(RTSX_STOR x)
+#define RTSX_DEBUGPN(x...) pr_debug(x)
+#define RTSX_DEBUGPX(x...) printk(x)
+#define RTSX_DEBUG(x) x
+#else
+#define RTSX_DEBUGP(x...)
+#define RTSX_DEBUGPN(x...)
+#define RTSX_DEBUGPX(x...)
+#define RTSX_DEBUG(x)
+#endif
+
+#endif /* __REALTEK_RTSX_DEBUG_H */
diff --git a/drivers/staging/rts5208/general.c b/drivers/staging/rts5208/general.c
new file mode 100644
index 000000000000..eada934288b2
--- /dev/null
+++ b/drivers/staging/rts5208/general.c
@@ -0,0 +1,35 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#include "general.h"
+
+int bit1cnt_long(u32 data)
+{
+ int i, cnt = 0;
+ for (i = 0; i < 32; i++) {
+ if (data & 0x01)
+ cnt++;
+ data >>= 1;
+ }
+ return cnt;
+}
+
diff --git a/drivers/staging/rts5208/general.h b/drivers/staging/rts5208/general.h
new file mode 100644
index 000000000000..90a1f9297f5e
--- /dev/null
+++ b/drivers/staging/rts5208/general.h
@@ -0,0 +1,31 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __RTSX_GENERAL_H
+#define __RTSX_GENERAL_H
+
+#include "rtsx.h"
+
+int bit1cnt_long(u32 data);
+
+#endif /* __RTSX_GENERAL_H */
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
new file mode 100644
index 000000000000..edf979f18a6c
--- /dev/null
+++ b/drivers/staging/rts5208/ms.c
@@ -0,0 +1,4208 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+
+#include "rtsx.h"
+#include "rtsx_transport.h"
+#include "rtsx_scsi.h"
+#include "rtsx_card.h"
+#include "ms.h"
+
+static inline void ms_set_err_code(struct rtsx_chip *chip, u8 err_code)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ ms_card->err_code = err_code;
+}
+
+static inline int ms_check_err_code(struct rtsx_chip *chip, u8 err_code)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ return (ms_card->err_code == err_code);
+}
+
+static int ms_parse_err_code(struct rtsx_chip *chip)
+{
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+static int ms_transfer_tpc(struct rtsx_chip *chip, u8 trans_mode,
+ u8 tpc, u8 cnt, u8 cfg)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ u8 *ptr;
+
+ RTSX_DEBUGP("ms_transfer_tpc: tpc = 0x%x\n", tpc);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER,
+ 0xFF, MS_TRANSFER_START | trans_mode);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+
+ rtsx_add_cmd(chip, READ_REG_CMD, MS_TRANS_CFG, 0, 0);
+
+ retval = rtsx_send_cmd(chip, MS_CARD, 5000);
+ if (retval < 0) {
+ rtsx_clear_ms_error(chip);
+ ms_set_err_code(chip, MS_TO_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+
+ ptr = rtsx_get_cmd_data(chip) + 1;
+
+ if (!(tpc & 0x08)) { /* Read Packet */
+ if (*ptr & MS_CRC16_ERR) {
+ ms_set_err_code(chip, MS_CRC16_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+ } else { /* Write Packet */
+ if (CHK_MSPRO(ms_card) && !(*ptr & 0x80)) {
+ if (*ptr & (MS_INT_ERR | MS_INT_CMDNK)) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+ }
+ }
+
+ if (*ptr & MS_RDY_TIMEOUT) {
+ rtsx_clear_ms_error(chip);
+ ms_set_err_code(chip, MS_TO_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_transfer_data(struct rtsx_chip *chip, u8 trans_mode,
+ u8 tpc, u16 sec_cnt, u8 cfg, int mode_2k,
+ int use_sg, void *buf, int buf_len)
+{
+ int retval;
+ u8 val, err_code = 0;
+ enum dma_data_direction dir;
+
+ if (!buf || !buf_len)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (trans_mode == MS_TM_AUTO_READ) {
+ dir = DMA_FROM_DEVICE;
+ err_code = MS_FLASH_READ_ERROR;
+ } else if (trans_mode == MS_TM_AUTO_WRITE) {
+ dir = DMA_TO_DEVICE;
+ err_code = MS_FLASH_WRITE_ERROR;
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ MS_SECTOR_CNT_H, 0xFF, (u8)(sec_cnt >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_SECTOR_CNT_L, 0xFF, (u8)sec_cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+
+ if (mode_2k) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ MS_CFG, MS_2K_SECTOR_MODE, MS_2K_SECTOR_MODE);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_CFG, MS_2K_SECTOR_MODE, 0);
+ }
+
+ trans_dma_enable(dir, chip, sec_cnt * 512, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ MS_TRANSFER, 0xFF, MS_TRANSFER_START | trans_mode);
+ rtsx_add_cmd(chip, CHECK_REG_CMD,
+ MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ retval = rtsx_transfer_data(chip, MS_CARD, buf, buf_len,
+ use_sg, dir, chip->mspro_timeout);
+ if (retval < 0) {
+ ms_set_err_code(chip, err_code);
+ if (retval == -ETIMEDOUT)
+ retval = STATUS_TIMEDOUT;
+ else
+ retval = STATUS_FAIL;
+
+ TRACE_RET(chip, retval);
+ }
+
+ RTSX_READ_REG(chip, MS_TRANS_CFG, &val);
+ if (val & (MS_INT_CMDNK | MS_INT_ERR | MS_CRC16_ERR | MS_RDY_TIMEOUT))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_write_bytes(struct rtsx_chip *chip,
+ u8 tpc, u8 cnt, u8 cfg, u8 *data, int data_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+
+ if (!data || (data_len < cnt))
+ TRACE_RET(chip, STATUS_ERROR);
+
+ rtsx_init_cmd(chip);
+
+ for (i = 0; i < cnt; i++) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ PPBUF_BASE2 + i, 0xFF, data[i]);
+ }
+ if (cnt % 2)
+ rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2 + i, 0xFF, 0xFF);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_WRITE_BYTES);
+ rtsx_add_cmd(chip, CHECK_REG_CMD,
+ MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, MS_CARD, 5000);
+ if (retval < 0) {
+ u8 val = 0;
+
+ rtsx_read_register(chip, MS_TRANS_CFG, &val);
+ RTSX_DEBUGP("MS_TRANS_CFG: 0x%02x\n", val);
+
+ rtsx_clear_ms_error(chip);
+
+ if (!(tpc & 0x08)) {
+ if (val & MS_CRC16_ERR) {
+ ms_set_err_code(chip, MS_CRC16_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+ } else {
+ if (CHK_MSPRO(ms_card) && !(val & 0x80)) {
+ if (val & (MS_INT_ERR | MS_INT_CMDNK)) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip,
+ ms_parse_err_code(chip));
+ }
+ }
+ }
+
+ if (val & MS_RDY_TIMEOUT) {
+ ms_set_err_code(chip, MS_TO_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+
+ ms_set_err_code(chip, MS_TO_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_read_bytes(struct rtsx_chip *chip,
+ u8 tpc, u8 cnt, u8 cfg, u8 *data, int data_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 *ptr;
+
+ if (!data)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | MS_TM_READ_BYTES);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+
+ for (i = 0; i < data_len - 1; i++)
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + i, 0, 0);
+
+ if (data_len % 2)
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + data_len, 0, 0);
+ else
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + data_len - 1,
+ 0, 0);
+
+ retval = rtsx_send_cmd(chip, MS_CARD, 5000);
+ if (retval < 0) {
+ u8 val = 0;
+
+ rtsx_read_register(chip, MS_TRANS_CFG, &val);
+ rtsx_clear_ms_error(chip);
+
+ if (!(tpc & 0x08)) {
+ if (val & MS_CRC16_ERR) {
+ ms_set_err_code(chip, MS_CRC16_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+ } else {
+ if (CHK_MSPRO(ms_card) && !(val & 0x80)) {
+ if (val & (MS_INT_ERR | MS_INT_CMDNK)) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip,
+ ms_parse_err_code(chip));
+ }
+ }
+ }
+
+ if (val & MS_RDY_TIMEOUT) {
+ ms_set_err_code(chip, MS_TO_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+
+ ms_set_err_code(chip, MS_TO_ERROR);
+ TRACE_RET(chip, ms_parse_err_code(chip));
+ }
+
+ ptr = rtsx_get_cmd_data(chip) + 1;
+
+ for (i = 0; i < data_len; i++)
+ data[i] = ptr[i];
+
+ if ((tpc == PRO_READ_SHORT_DATA) && (data_len == 8)) {
+ RTSX_DEBUGP("Read format progress:\n");
+ RTSX_DUMP(ptr, cnt);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_set_rw_reg_addr(struct rtsx_chip *chip,
+ u8 read_start, u8 read_cnt, u8 write_start, u8 write_cnt)
+{
+ int retval, i;
+ u8 data[4];
+
+ data[0] = read_start;
+ data[1] = read_cnt;
+ data[2] = write_start;
+ data[3] = write_cnt;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, SET_RW_REG_ADRS, 4,
+ NO_WAIT_INT, data, 4);
+ if (retval == STATUS_SUCCESS)
+ return STATUS_SUCCESS;
+ rtsx_clear_ms_error(chip);
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+static int ms_send_cmd(struct rtsx_chip *chip, u8 cmd, u8 cfg)
+{
+ u8 data[2];
+
+ data[0] = cmd;
+ data[1] = 0;
+
+ return ms_write_bytes(chip, PRO_SET_CMD, 1, cfg, data, 1);
+}
+
+static int ms_set_init_para(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ if (CHK_HG8BIT(ms_card)) {
+ if (chip->asic_code)
+ ms_card->ms_clock = chip->asic_ms_hg_clk;
+ else
+ ms_card->ms_clock = chip->fpga_ms_hg_clk;
+
+ } else if (CHK_MSPRO(ms_card) || CHK_MS4BIT(ms_card)) {
+ if (chip->asic_code)
+ ms_card->ms_clock = chip->asic_ms_4bit_clk;
+ else
+ ms_card->ms_clock = chip->fpga_ms_4bit_clk;
+
+ } else {
+ if (chip->asic_code)
+ ms_card->ms_clock = chip->asic_ms_1bit_clk;
+ else
+ ms_card->ms_clock = chip->fpga_ms_1bit_clk;
+ }
+
+ retval = switch_clock(chip, ms_card->ms_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = select_card(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_switch_clock(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ retval = select_card(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = switch_clock(chip, ms_card->ms_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_pull_ctl_disable(struct rtsx_chip *chip)
+{
+ if (CHECK_PID(chip, 0x5208)) {
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF,
+ MS_D1_PD | MS_D2_PD | MS_CLK_PD | MS_D6_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF,
+ MS_D3_PD | MS_D0_PD | MS_BS_PD | XD_D4_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF,
+ MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF,
+ XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL5, 0xFF,
+ MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL6, 0xFF,
+ MS_D5_PD | MS_D4_PD);
+ } else if (CHECK_PID(chip, 0x5288)) {
+ if (CHECK_BARO_PKG(chip, QFN)) {
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF, 0x55);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF, 0x55);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF, 0x4B);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF, 0x69);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_pull_ctl_enable(struct rtsx_chip *chip)
+{
+ int retval;
+
+ rtsx_init_cmd(chip);
+
+ if (CHECK_PID(chip, 0x5208)) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
+ MS_D1_PD | MS_D2_PD | MS_CLK_NP | MS_D6_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
+ MS_D3_PD | MS_D0_PD | MS_BS_NP | XD_D4_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
+ MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
+ XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
+ MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
+ MS_D5_PD | MS_D4_PD);
+ } else if (CHECK_PID(chip, 0x5288)) {
+ if (CHECK_BARO_PKG(chip, QFN)) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ CARD_PULL_CTL1, 0xFF, 0x55);
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ CARD_PULL_CTL2, 0xFF, 0x45);
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ CARD_PULL_CTL3, 0xFF, 0x4B);
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ CARD_PULL_CTL4, 0xFF, 0x29);
+ }
+ }
+
+ retval = rtsx_send_cmd(chip, MS_CARD, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_prepare_reset(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ u8 oc_mask = 0;
+
+ ms_card->ms_type = 0;
+ ms_card->check_ms_flow = 0;
+ ms_card->switch_8bit_fail = 0;
+ ms_card->delay_write.delay_write_flag = 0;
+
+ ms_card->pro_under_formatting = 0;
+
+ retval = ms_power_off_card3v3(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!chip->ft2_fast_mode)
+ wait_timeout(250);
+
+ retval = enable_card_clock(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (chip->asic_code) {
+ retval = ms_pull_ctl_enable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ RTSX_WRITE_REG(chip, FPGA_PULL_CTL,
+ FPGA_MS_PULL_CTL_BIT | 0x20, 0);
+ }
+
+ if (!chip->ft2_fast_mode) {
+ retval = card_power_on(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ wait_timeout(150);
+
+#ifdef SUPPORT_OCP
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
+ oc_mask = MS_OC_NOW | MS_OC_EVER;
+ else
+ oc_mask = SD_OC_NOW | SD_OC_EVER;
+
+ if (chip->ocp_stat & oc_mask) {
+ RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n",
+ chip->ocp_stat);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ }
+
+ RTSX_WRITE_REG(chip, CARD_OE, MS_OUTPUT_EN, MS_OUTPUT_EN);
+
+ if (chip->asic_code) {
+ RTSX_WRITE_REG(chip, MS_CFG, 0xFF,
+ SAMPLE_TIME_RISING | PUSH_TIME_DEFAULT |
+ NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1);
+ } else {
+ RTSX_WRITE_REG(chip, MS_CFG, 0xFF,
+ SAMPLE_TIME_FALLING | PUSH_TIME_DEFAULT |
+ NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1);
+ }
+ RTSX_WRITE_REG(chip, MS_TRANS_CFG,
+ 0xFF, NO_WAIT_INT | NO_AUTO_READ_INT_REG);
+ RTSX_WRITE_REG(chip, CARD_STOP,
+ MS_STOP | MS_CLR_ERR, MS_STOP | MS_CLR_ERR);
+
+ retval = ms_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 val;
+
+ retval = ms_set_rw_reg_addr(chip, Pro_StatusReg, 6, SystemParm, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, READ_REG,
+ 6, NO_WAIT_INT);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_READ_REG(chip, PPBUF_BASE2 + 2, &val);
+ RTSX_DEBUGP("Type register: 0x%x\n", val);
+ if (val != 0x01) {
+ if (val != 0x02)
+ ms_card->check_ms_flow = 1;
+
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_READ_REG(chip, PPBUF_BASE2 + 4, &val);
+ RTSX_DEBUGP("Category register: 0x%x\n", val);
+ if (val != 0) {
+ ms_card->check_ms_flow = 1;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_READ_REG(chip, PPBUF_BASE2 + 5, &val);
+ RTSX_DEBUGP("Class register: 0x%x\n", val);
+ if (val == 0) {
+ RTSX_READ_REG(chip, PPBUF_BASE2, &val);
+ if (val & WRT_PRTCT)
+ chip->card_wp |= MS_CARD;
+ else
+ chip->card_wp &= ~MS_CARD;
+
+ } else if ((val == 0x01) || (val == 0x02) || (val == 0x03)) {
+ chip->card_wp |= MS_CARD;
+ } else {
+ ms_card->check_ms_flow = 1;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ ms_card->ms_type |= TYPE_MSPRO;
+
+ RTSX_READ_REG(chip, PPBUF_BASE2 + 3, &val);
+ RTSX_DEBUGP("IF Mode register: 0x%x\n", val);
+ if (val == 0) {
+ ms_card->ms_type &= 0x0F;
+ } else if (val == 7) {
+ if (switch_8bit_bus)
+ ms_card->ms_type |= MS_HG;
+ else
+ ms_card->ms_type &= 0x0F;
+
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_confirm_cpu_startup(struct rtsx_chip *chip)
+{
+ int retval, i, k;
+ u8 val;
+
+ /* Confirm CPU StartUp */
+ k = 0;
+ do {
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_read_bytes(chip, GET_INT, 1,
+ NO_WAIT_INT, &val, 1);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (k > 100)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ k++;
+ wait_timeout(100);
+ } while (!(val & INT_REG_CED));
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_ERR) {
+ if (val & INT_REG_CMDNK)
+ chip->card_wp |= (MS_CARD);
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ /* -- end confirm CPU startup */
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_switch_parallel_bus(struct rtsx_chip *chip)
+{
+ int retval, i;
+ u8 data[2];
+
+ data[0] = PARALLEL_4BIT_IF;
+ data[1] = 0;
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, WRITE_REG, 1, NO_WAIT_INT,
+ data, 2);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_switch_8bit_bus(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 data[2];
+
+ data[0] = PARALLEL_8BIT_IF;
+ data[1] = 0;
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, WRITE_REG, 1,
+ NO_WAIT_INT, data, 2);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, MS_CFG, 0x98,
+ MS_BUS_WIDTH_8 | SAMPLE_TIME_FALLING);
+ ms_card->ms_type |= MS_8BIT;
+ retval = ms_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT,
+ 1, NO_WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_pro_reset_flow(struct rtsx_chip *chip, int switch_8bit_bus)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+
+ for (i = 0; i < 3; i++) {
+ retval = ms_prepare_reset(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_identify_media_type(chip, switch_8bit_bus);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_confirm_cpu_startup(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_switch_parallel_bus(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ /* Switch MS-PRO into Parallel mode */
+ RTSX_WRITE_REG(chip, MS_CFG, 0x18, MS_BUS_WIDTH_4);
+ RTSX_WRITE_REG(chip, MS_CFG, PUSH_TIME_ODD, PUSH_TIME_ODD);
+
+ retval = ms_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ /* If MSPro HG Card, We shall try to switch to 8-bit bus */
+ if (CHK_MSHG(ms_card) && chip->support_ms_8bit && switch_8bit_bus) {
+ retval = ms_switch_8bit_bus(chip);
+ if (retval != STATUS_SUCCESS) {
+ ms_card->switch_8bit_fail = 1;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+#ifdef XC_POWERCLASS
+static int msxc_change_power(struct rtsx_chip *chip, u8 mode)
+{
+ int retval;
+ u8 buf[6];
+
+ ms_cleanup_work(chip);
+
+ retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_DataCount1, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ buf[0] = 0;
+ buf[1] = mode;
+ buf[2] = 0;
+ buf[3] = 0;
+ buf[4] = 0;
+ buf[5] = 0;
+
+ retval = ms_write_bytes(chip, PRO_WRITE_REG , 6, NO_WAIT_INT, buf, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_send_cmd(chip, XC_CHG_POWER, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_READ_REG(chip, MS_TRANS_CFG, buf);
+ if (buf[0] & (MS_INT_CMDNK | MS_INT_ERR))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+#endif
+
+static int ms_read_attribute_info(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 val, *buf, class_code, device_type, sub_class, data[16];
+ u16 total_blk = 0, blk_size = 0;
+#ifdef SUPPORT_MSXC
+ u32 xc_total_blk = 0, xc_blk_size = 0;
+#endif
+ u32 sys_info_addr = 0, sys_info_size;
+#ifdef SUPPORT_PCGL_1P18
+ u32 model_name_addr = 0, model_name_size;
+ int found_sys_info = 0, found_model_name = 0;
+#endif
+
+ retval = ms_set_rw_reg_addr(chip, Pro_IntReg, 2, Pro_SystemParm, 7);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_MS8BIT(ms_card))
+ data[0] = PARALLEL_8BIT_IF;
+ else
+ data[0] = PARALLEL_4BIT_IF;
+
+ data[1] = 0;
+
+ data[2] = 0x40;
+ data[3] = 0;
+ data[4] = 0;
+ data[5] = 0;
+ data[6] = 0;
+ data[7] = 0;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, PRO_WRITE_REG, 7, NO_WAIT_INT,
+ data, 8);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ buf = kmalloc(64 * 512, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_send_cmd(chip, PRO_READ_ATRB, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ continue;
+
+ retval = rtsx_read_register(chip, MS_TRANS_CFG, &val);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (!(val & MS_INT_BREQ)) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ retval = ms_transfer_data(chip, MS_TM_AUTO_READ,
+ PRO_READ_LONG_DATA, 0x40, WAIT_INT,
+ 0, 0, buf, 64 * 512);
+ if (retval == STATUS_SUCCESS)
+ break;
+ else
+ rtsx_clear_ms_error(chip);
+ }
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ i = 0;
+ do {
+ retval = rtsx_read_register(chip, MS_TRANS_CFG, &val);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if ((val & MS_INT_CED) || !(val & MS_INT_BREQ))
+ break;
+
+ retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ,
+ PRO_READ_LONG_DATA, 0, WAIT_INT);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ i++;
+ } while (i < 1024);
+
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if ((buf[0] != 0xa5) && (buf[1] != 0xc3)) {
+ /* Signature code is wrong */
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if ((buf[4] < 1) || (buf[4] > 12)) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ for (i = 0; i < buf[4]; i++) {
+ int cur_addr_off = 16 + i * 12;
+
+#ifdef SUPPORT_MSXC
+ if ((buf[cur_addr_off + 8] == 0x10) ||
+ (buf[cur_addr_off + 8] == 0x13))
+#else
+ if (buf[cur_addr_off + 8] == 0x10)
+#endif
+ {
+ sys_info_addr = ((u32)buf[cur_addr_off + 0] << 24) |
+ ((u32)buf[cur_addr_off + 1] << 16) |
+ ((u32)buf[cur_addr_off + 2] << 8) |
+ buf[cur_addr_off + 3];
+ sys_info_size = ((u32)buf[cur_addr_off + 4] << 24) |
+ ((u32)buf[cur_addr_off + 5] << 16) |
+ ((u32)buf[cur_addr_off + 6] << 8) |
+ buf[cur_addr_off + 7];
+ RTSX_DEBUGP("sys_info_addr = 0x%x, sys_info_size = 0x%x\n",
+ sys_info_addr, sys_info_size);
+ if (sys_info_size != 96) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (sys_info_addr < 0x1A0) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if ((sys_info_size + sys_info_addr) > 0x8000) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+#ifdef SUPPORT_MSXC
+ if (buf[cur_addr_off + 8] == 0x13)
+ ms_card->ms_type |= MS_XC;
+#endif
+#ifdef SUPPORT_PCGL_1P18
+ found_sys_info = 1;
+#else
+ break;
+#endif
+ }
+#ifdef SUPPORT_PCGL_1P18
+ if (buf[cur_addr_off + 8] == 0x15) {
+ model_name_addr = ((u32)buf[cur_addr_off + 0] << 24) |
+ ((u32)buf[cur_addr_off + 1] << 16) |
+ ((u32)buf[cur_addr_off + 2] << 8) |
+ buf[cur_addr_off + 3];
+ model_name_size = ((u32)buf[cur_addr_off + 4] << 24) |
+ ((u32)buf[cur_addr_off + 5] << 16) |
+ ((u32)buf[cur_addr_off + 6] << 8) |
+ buf[cur_addr_off + 7];
+ RTSX_DEBUGP("model_name_addr = 0x%x, model_name_size = 0x%x\n",
+ model_name_addr, model_name_size);
+ if (model_name_size != 48) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (model_name_addr < 0x1A0) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if ((model_name_size + model_name_addr) > 0x8000) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ found_model_name = 1;
+ }
+
+ if (found_sys_info && found_model_name)
+ break;
+#endif
+ }
+
+ if (i == buf[4]) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ class_code = buf[sys_info_addr + 0];
+ device_type = buf[sys_info_addr + 56];
+ sub_class = buf[sys_info_addr + 46];
+#ifdef SUPPORT_MSXC
+ if (CHK_MSXC(ms_card)) {
+ xc_total_blk = ((u32)buf[sys_info_addr + 6] << 24) |
+ ((u32)buf[sys_info_addr + 7] << 16) |
+ ((u32)buf[sys_info_addr + 8] << 8) |
+ buf[sys_info_addr + 9];
+ xc_blk_size = ((u32)buf[sys_info_addr + 32] << 24) |
+ ((u32)buf[sys_info_addr + 33] << 16) |
+ ((u32)buf[sys_info_addr + 34] << 8) |
+ buf[sys_info_addr + 35];
+ RTSX_DEBUGP("xc_total_blk = 0x%x, xc_blk_size = 0x%x\n",
+ xc_total_blk, xc_blk_size);
+ } else {
+ total_blk = ((u16)buf[sys_info_addr + 6] << 8) |
+ buf[sys_info_addr + 7];
+ blk_size = ((u16)buf[sys_info_addr + 2] << 8) |
+ buf[sys_info_addr + 3];
+ RTSX_DEBUGP("total_blk = 0x%x, blk_size = 0x%x\n",
+ total_blk, blk_size);
+ }
+#else
+ total_blk = ((u16)buf[sys_info_addr + 6] << 8) | buf[sys_info_addr + 7];
+ blk_size = ((u16)buf[sys_info_addr + 2] << 8) | buf[sys_info_addr + 3];
+ RTSX_DEBUGP("total_blk = 0x%x, blk_size = 0x%x\n", total_blk, blk_size);
+#endif
+
+ RTSX_DEBUGP("class_code = 0x%x, device_type = 0x%x, sub_class = 0x%x\n",
+ class_code, device_type, sub_class);
+
+ memcpy(ms_card->raw_sys_info, buf + sys_info_addr, 96);
+#ifdef SUPPORT_PCGL_1P18
+ memcpy(ms_card->raw_model_name, buf + model_name_addr, 48);
+#endif
+
+ kfree(buf);
+
+#ifdef SUPPORT_MSXC
+ if (CHK_MSXC(ms_card)) {
+ if (class_code != 0x03)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ if (class_code != 0x02)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#else
+ if (class_code != 0x02)
+ TRACE_RET(chip, STATUS_FAIL);
+#endif
+
+ if (device_type != 0x00) {
+ if ((device_type == 0x01) || (device_type == 0x02) ||
+ (device_type == 0x03)) {
+ chip->card_wp |= MS_CARD;
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (sub_class & 0xC0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_DEBUGP("class_code: 0x%x, device_type: 0x%x, sub_class: 0x%x\n",
+ class_code, device_type, sub_class);
+
+#ifdef SUPPORT_MSXC
+ if (CHK_MSXC(ms_card)) {
+ chip->capacity[chip->card2lun[MS_CARD]] =
+ ms_card->capacity = xc_total_blk * xc_blk_size;
+ } else {
+ chip->capacity[chip->card2lun[MS_CARD]] =
+ ms_card->capacity = total_blk * blk_size;
+ }
+#else
+ ms_card->capacity = total_blk * blk_size;
+ chip->capacity[chip->card2lun[MS_CARD]] = ms_card->capacity;
+#endif
+
+ return STATUS_SUCCESS;
+}
+
+#ifdef SUPPORT_MAGIC_GATE
+static int mg_set_tpc_para_sub(struct rtsx_chip *chip,
+ int type, u8 mg_entry_num);
+#endif
+
+static int reset_ms_pro(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+#ifdef XC_POWERCLASS
+ u8 change_power_class;
+
+ if (chip->ms_power_class_en & 0x02)
+ change_power_class = 2;
+ else if (chip->ms_power_class_en & 0x01)
+ change_power_class = 1;
+ else
+ change_power_class = 0;
+#endif
+
+#ifdef XC_POWERCLASS
+Retry:
+#endif
+ retval = ms_pro_reset_flow(chip, 1);
+ if (retval != STATUS_SUCCESS) {
+ if (ms_card->switch_8bit_fail) {
+ retval = ms_pro_reset_flow(chip, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ retval = ms_read_attribute_info(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+#ifdef XC_POWERCLASS
+ if (CHK_HG8BIT(ms_card))
+ change_power_class = 0;
+
+ if (change_power_class && CHK_MSXC(ms_card)) {
+ u8 power_class_en = chip->ms_power_class_en;
+
+ RTSX_DEBUGP("power_class_en = 0x%x\n", power_class_en);
+ RTSX_DEBUGP("change_power_class = %d\n", change_power_class);
+
+ if (change_power_class)
+ power_class_en &= (1 << (change_power_class - 1));
+ else
+ power_class_en = 0;
+
+ if (power_class_en) {
+ u8 power_class_mode =
+ (ms_card->raw_sys_info[46] & 0x18) >> 3;
+ RTSX_DEBUGP("power_class_mode = 0x%x",
+ power_class_mode);
+ if (change_power_class > power_class_mode)
+ change_power_class = power_class_mode;
+ if (change_power_class) {
+ retval = msxc_change_power(chip,
+ change_power_class);
+ if (retval != STATUS_SUCCESS) {
+ change_power_class--;
+ goto Retry;
+ }
+ }
+ }
+ }
+#endif
+
+#ifdef SUPPORT_MAGIC_GATE
+ retval = mg_set_tpc_para_sub(chip, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+#endif
+
+ if (CHK_HG8BIT(ms_card))
+ chip->card_bus_width[chip->card2lun[MS_CARD]] = 8;
+ else
+ chip->card_bus_width[chip->card2lun[MS_CARD]] = 4;
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_read_status_reg(struct rtsx_chip *chip)
+{
+ int retval;
+ u8 val[2];
+
+ retval = ms_set_rw_reg_addr(chip, StatusReg0, 2, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_read_bytes(chip, READ_REG, 2, NO_WAIT_INT, val, 2);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val[1] & (STS_UCDT | STS_UCEX | STS_UCFG)) {
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+static int ms_read_extra_data(struct rtsx_chip *chip,
+ u16 block_addr, u8 page_num, u8 *buf, int buf_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 val, data[10];
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_MS4BIT(ms_card)) {
+ /* Parallel interface */
+ data[0] = 0x88;
+ } else {
+ /* Serial interface */
+ data[0] = 0x80;
+ }
+ data[1] = 0;
+ data[2] = (u8)(block_addr >> 8);
+ data[3] = (u8)block_addr;
+ data[4] = 0x40;
+ data[5] = page_num;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT,
+ data, 6);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
+ MS_EXTRA_SIZE, SystemParm, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ retval = ms_read_bytes(chip, READ_REG, MS_EXTRA_SIZE, NO_WAIT_INT,
+ data, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (buf && buf_len) {
+ if (buf_len > MS_EXTRA_SIZE)
+ buf_len = MS_EXTRA_SIZE;
+ memcpy(buf, data, buf_len);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_write_extra_data(struct rtsx_chip *chip,
+ u16 block_addr, u8 page_num, u8 *buf, int buf_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 val, data[16];
+
+ if (!buf || (buf_len < MS_EXTRA_SIZE))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, 6 + MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(block_addr >> 8);
+ data[3] = (u8)block_addr;
+ data[4] = 0x40;
+ data[5] = page_num;
+
+ for (i = 6; i < MS_EXTRA_SIZE + 6; i++)
+ data[i] = buf[i - 6];
+
+ retval = ms_write_bytes(chip, WRITE_REG , (6+MS_EXTRA_SIZE),
+ NO_WAIT_INT, data, 16);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ u8 val, data[6];
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(block_addr >> 8);
+ data[3] = (u8)block_addr;
+ data[4] = 0x20;
+ data[5] = page_num;
+
+ retval = ms_write_bytes(chip, WRITE_REG , 6, NO_WAIT_INT, data, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ if (!(val & INT_REG_BREQ)) {
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS)
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+
+ } else {
+ if (!(val & INT_REG_BREQ)) {
+ ms_set_err_code(chip, MS_BREQ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ, READ_PAGE_DATA,
+ 0, NO_WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (ms_check_err_code(chip, MS_FLASH_WRITE_ERROR))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+
+static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ u8 val, data[8], extra[MS_EXTRA_SIZE];
+
+ retval = ms_read_extra_data(chip, phy_blk, 0, extra, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, 7);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(phy_blk >> 8);
+ data[3] = (u8)phy_blk;
+ data[4] = 0x80;
+ data[5] = 0;
+ data[6] = extra[0] & 0x7F;
+ data[7] = 0xFF;
+
+ retval = ms_write_bytes(chip, WRITE_REG, 7, NO_WAIT_INT, data, 7);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+static int ms_erase_block(struct rtsx_chip *chip, u16 phy_blk)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i = 0;
+ u8 val, data[6];
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(phy_blk >> 8);
+ data[3] = (u8)phy_blk;
+ data[4] = 0;
+ data[5] = 0;
+
+ retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT, data, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ERASE_RTY:
+ retval = ms_send_cmd(chip, BLOCK_ERASE, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_CMDNK) {
+ if (i < 3) {
+ i++;
+ goto ERASE_RTY;
+ }
+
+ ms_set_err_code(chip, MS_CMD_NK);
+ ms_set_bad_block(chip, phy_blk);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+static void ms_set_page_status(u16 log_blk, u8 type, u8 *extra, int extra_len)
+{
+ if (!extra || (extra_len < MS_EXTRA_SIZE))
+ return;
+
+ memset(extra, 0xFF, MS_EXTRA_SIZE);
+
+ if (type == setPS_NG) {
+ /* set page status as 1:NG,and block status keep 1:OK */
+ extra[0] = 0xB8;
+ } else {
+ /* set page status as 0:Data Error,and block status keep 1:OK */
+ extra[0] = 0x98;
+ }
+
+ extra[2] = (u8)(log_blk >> 8);
+ extra[3] = (u8)log_blk;
+}
+
+static int ms_init_page(struct rtsx_chip *chip, u16 phy_blk, u16 log_blk,
+ u8 start_page, u8 end_page)
+{
+ int retval;
+ u8 extra[MS_EXTRA_SIZE], i;
+
+ memset(extra, 0xff, MS_EXTRA_SIZE);
+
+ extra[0] = 0xf8; /* Block, page OK, data erased */
+ extra[1] = 0xff;
+ extra[2] = (u8)(log_blk >> 8);
+ extra[3] = (u8)log_blk;
+
+ for (i = start_page; i < end_page; i++) {
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_write_extra_data(chip, phy_blk, i,
+ extra, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
+ u16 log_blk, u8 start_page, u8 end_page)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, rty_cnt, uncorrect_flag = 0;
+ u8 extra[MS_EXTRA_SIZE], val, i, j, data[16];
+
+ RTSX_DEBUGP("Copy page from 0x%x to 0x%x, logical block is 0x%x\n",
+ old_blk, new_blk, log_blk);
+ RTSX_DEBUGP("start_page = %d, end_page = %d\n", start_page, end_page);
+
+ retval = ms_read_extra_data(chip, new_blk, 0, extra, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_READ_REG(chip, PPBUF_BASE2, &val);
+
+ if (val & BUF_FULL) {
+ retval = ms_send_cmd(chip, CLEAR_BUF, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!(val & INT_REG_CED)) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ for (i = start_page; i < end_page; i++) {
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ ms_read_extra_data(chip, old_blk, i, extra, MS_EXTRA_SIZE);
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
+ MS_EXTRA_SIZE, SystemParm, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(old_blk >> 8);
+ data[3] = (u8)old_blk;
+ data[4] = 0x20;
+ data[5] = i;
+
+ retval = ms_write_bytes(chip, WRITE_REG , 6, NO_WAIT_INT,
+ data, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS) {
+ uncorrect_flag = 1;
+ RTSX_DEBUGP("Uncorrectable error\n");
+ } else {
+ uncorrect_flag = 0;
+ }
+
+ retval = ms_transfer_tpc(chip,
+ MS_TM_NORMAL_READ,
+ READ_PAGE_DATA,
+ 0, NO_WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (uncorrect_flag) {
+ ms_set_page_status(log_blk, setPS_NG,
+ extra, MS_EXTRA_SIZE);
+ if (i == 0)
+ extra[0] &= 0xEF;
+
+ ms_write_extra_data(chip, old_blk, i,
+ extra, MS_EXTRA_SIZE);
+ RTSX_DEBUGP("page %d : extra[0] = 0x%x\n", i, extra[0]);
+ MS_SET_BAD_BLOCK_FLG(ms_card);
+
+ ms_set_page_status(log_blk, setPS_Error,
+ extra, MS_EXTRA_SIZE);
+ ms_write_extra_data(chip, new_blk, i,
+ extra, MS_EXTRA_SIZE);
+ continue;
+ }
+
+ for (rty_cnt = 0; rty_cnt < MS_MAX_RETRY_COUNT;
+ rty_cnt++) {
+ retval = ms_transfer_tpc(
+ chip,
+ MS_TM_NORMAL_WRITE,
+ WRITE_PAGE_DATA,
+ 0, NO_WAIT_INT);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (rty_cnt == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (!(val & INT_REG_BREQ)) {
+ ms_set_err_code(chip, MS_BREQ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
+ MS_EXTRA_SIZE, SystemParm, (6+MS_EXTRA_SIZE));
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(new_blk >> 8);
+ data[3] = (u8)new_blk;
+ data[4] = 0x20;
+ data[5] = i;
+
+ if ((extra[0] & 0x60) != 0x60)
+ data[6] = extra[0];
+ else
+ data[6] = 0xF8;
+
+ data[6 + 1] = 0xFF;
+ data[6 + 2] = (u8)(log_blk >> 8);
+ data[6 + 3] = (u8)log_blk;
+
+ for (j = 4; j <= MS_EXTRA_SIZE; j++)
+ data[6 + j] = 0xFF;
+
+ retval = ms_write_bytes(chip, WRITE_REG, (6 + MS_EXTRA_SIZE),
+ NO_WAIT_INT, data, 16);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (i == 0) {
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
+ MS_EXTRA_SIZE, SystemParm, 7);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(old_blk >> 8);
+ data[3] = (u8)old_blk;
+ data[4] = 0x80;
+ data[5] = 0;
+ data[6] = 0xEF;
+ data[7] = 0xFF;
+
+ retval = ms_write_bytes(chip, WRITE_REG, 7,
+ NO_WAIT_INT, data, 8);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_read_bytes(chip, GET_INT, 1,
+ NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CED) {
+ if (val & INT_REG_ERR) {
+ ms_set_err_code(chip,
+ MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+static int reset_ms(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ u16 i, reg_addr, block_size;
+ u8 val, extra[MS_EXTRA_SIZE], j, *ptr;
+#ifndef SUPPORT_MAGIC_GATE
+ u16 eblock_cnt;
+#endif
+
+ retval = ms_prepare_reset(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_card->ms_type |= TYPE_MS;
+
+ retval = ms_send_cmd(chip, MS_RESET, NO_WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_READ_REG(chip, PPBUF_BASE2, &val);
+ if (val & WRT_PRTCT)
+ chip->card_wp |= MS_CARD;
+ else
+ chip->card_wp &= ~MS_CARD;
+
+ i = 0;
+
+RE_SEARCH:
+ /* Search Boot Block */
+ while (i < (MAX_DEFECTIVE_BLOCK + 2)) {
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_read_extra_data(chip, i, 0, extra, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS) {
+ i++;
+ continue;
+ }
+
+ if (extra[0] & BLOCK_OK) {
+ if (!(extra[1] & NOT_BOOT_BLOCK)) {
+ ms_card->boot_block = i;
+ break;
+ }
+ }
+ i++;
+ }
+
+ if (i == (MAX_DEFECTIVE_BLOCK + 2)) {
+ RTSX_DEBUGP("No boot block found!");
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ for (j = 0; j < 3; j++) {
+ retval = ms_read_page(chip, ms_card->boot_block, j);
+ if (retval != STATUS_SUCCESS) {
+ if (ms_check_err_code(chip, MS_FLASH_WRITE_ERROR)) {
+ i = ms_card->boot_block + 1;
+ ms_set_err_code(chip, MS_NO_ERROR);
+ goto RE_SEARCH;
+ }
+ }
+ }
+
+ retval = ms_read_page(chip, ms_card->boot_block, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ /* Read MS system information as sys_info */
+ rtsx_init_cmd(chip);
+
+ for (i = 0; i < 96; i++)
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 0x1A0 + i, 0, 0);
+
+ retval = rtsx_send_cmd(chip, MS_CARD, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ptr = rtsx_get_cmd_data(chip);
+ memcpy(ms_card->raw_sys_info, ptr, 96);
+
+ /* Read useful block contents */
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, READ_REG_CMD, HEADER_ID0, 0, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, HEADER_ID1, 0, 0);
+
+ for (reg_addr = DISABLED_BLOCK0; reg_addr <= DISABLED_BLOCK3;
+ reg_addr++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
+
+ for (reg_addr = BLOCK_SIZE_0; reg_addr <= PAGE_SIZE_1; reg_addr++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
+
+ rtsx_add_cmd(chip, READ_REG_CMD, MS_Device_Type, 0, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, MS_4bit_Support, 0, 0);
+
+ retval = rtsx_send_cmd(chip, MS_CARD, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ptr = rtsx_get_cmd_data(chip);
+
+ RTSX_DEBUGP("Boot block data:\n");
+ RTSX_DUMP(ptr, 16);
+
+ /* Block ID error
+ * HEADER_ID0, HEADER_ID1
+ */
+ if (ptr[0] != 0x00 || ptr[1] != 0x01) {
+ i = ms_card->boot_block + 1;
+ goto RE_SEARCH;
+ }
+
+ /* Page size error
+ * PAGE_SIZE_0, PAGE_SIZE_1
+ */
+ if (ptr[12] != 0x02 || ptr[13] != 0x00) {
+ i = ms_card->boot_block + 1;
+ goto RE_SEARCH;
+ }
+
+ if ((ptr[14] == 1) || (ptr[14] == 3))
+ chip->card_wp |= MS_CARD;
+
+ /* BLOCK_SIZE_0, BLOCK_SIZE_1 */
+ block_size = ((u16)ptr[6] << 8) | ptr[7];
+ if (block_size == 0x0010) {
+ /* Block size 16KB */
+ ms_card->block_shift = 5;
+ ms_card->page_off = 0x1F;
+ } else if (block_size == 0x0008) {
+ /* Block size 8KB */
+ ms_card->block_shift = 4;
+ ms_card->page_off = 0x0F;
+ }
+
+ /* BLOCK_COUNT_0, BLOCK_COUNT_1 */
+ ms_card->total_block = ((u16)ptr[8] << 8) | ptr[9];
+
+#ifdef SUPPORT_MAGIC_GATE
+ j = ptr[10];
+
+ if (ms_card->block_shift == 4) { /* 4MB or 8MB */
+ if (j < 2) { /* Effective block for 4MB: 0x1F0 */
+ ms_card->capacity = 0x1EE0;
+ } else { /* Effective block for 8MB: 0x3E0 */
+ ms_card->capacity = 0x3DE0;
+ }
+ } else { /* 16MB, 32MB, 64MB or 128MB */
+ if (j < 5) { /* Effective block for 16MB: 0x3E0 */
+ ms_card->capacity = 0x7BC0;
+ } else if (j < 0xA) { /* Effective block for 32MB: 0x7C0 */
+ ms_card->capacity = 0xF7C0;
+ } else if (j < 0x11) { /* Effective block for 64MB: 0xF80 */
+ ms_card->capacity = 0x1EF80;
+ } else { /* Effective block for 128MB: 0x1F00 */
+ ms_card->capacity = 0x3DF00;
+ }
+ }
+#else
+ /* EBLOCK_COUNT_0, EBLOCK_COUNT_1 */
+ eblock_cnt = ((u16)ptr[10] << 8) | ptr[11];
+
+ ms_card->capacity = ((u32)eblock_cnt - 2) << ms_card->block_shift;
+#endif
+
+ chip->capacity[chip->card2lun[MS_CARD]] = ms_card->capacity;
+
+ /* Switch I/F Mode */
+ if (ptr[15]) {
+ retval = ms_set_rw_reg_addr(chip, 0, 0, SystemParm, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, PPBUF_BASE2, 0xFF, 0x88);
+ RTSX_WRITE_REG(chip, PPBUF_BASE2 + 1, 0xFF, 0);
+
+ retval = ms_transfer_tpc(chip, MS_TM_WRITE_BYTES, WRITE_REG , 1,
+ NO_WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, MS_CFG, 0x58 | MS_NO_CHECK_INT,
+ MS_BUS_WIDTH_4 | PUSH_TIME_ODD | MS_NO_CHECK_INT);
+
+ ms_card->ms_type |= MS_4BIT;
+ }
+
+ if (CHK_MS4BIT(ms_card))
+ chip->card_bus_width[chip->card2lun[MS_CARD]] = 4;
+ else
+ chip->card_bus_width[chip->card2lun[MS_CARD]] = 1;
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_init_l2p_tbl(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int size, i, seg_no, retval;
+ u16 defect_block, reg_addr;
+ u8 val1, val2;
+
+ ms_card->segment_cnt = ms_card->total_block >> 9;
+ RTSX_DEBUGP("ms_card->segment_cnt = %d\n", ms_card->segment_cnt);
+
+ size = ms_card->segment_cnt * sizeof(struct zone_entry);
+ ms_card->segment = vzalloc(size);
+ if (ms_card->segment == NULL)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_read_page(chip, ms_card->boot_block, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, INIT_FAIL);
+
+ reg_addr = PPBUF_BASE2;
+ for (i = 0; i < (((ms_card->total_block >> 9) * 10) + 1); i++) {
+ retval = rtsx_read_register(chip, reg_addr++, &val1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, INIT_FAIL);
+
+ retval = rtsx_read_register(chip, reg_addr++, &val2);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, INIT_FAIL);
+
+ defect_block = ((u16)val1 << 8) | val2;
+ if (defect_block == 0xFFFF)
+ break;
+
+ seg_no = defect_block / 512;
+ ms_card->segment[seg_no].defect_list[ms_card->segment[seg_no].disable_count++] = defect_block;
+ }
+
+ for (i = 0; i < ms_card->segment_cnt; i++) {
+ ms_card->segment[i].build_flag = 0;
+ ms_card->segment[i].l2p_table = NULL;
+ ms_card->segment[i].free_table = NULL;
+ ms_card->segment[i].get_index = 0;
+ ms_card->segment[i].set_index = 0;
+ ms_card->segment[i].unused_blk_cnt = 0;
+
+ RTSX_DEBUGP("defective block count of segment %d is %d\n",
+ i, ms_card->segment[i].disable_count);
+ }
+
+ return STATUS_SUCCESS;
+
+INIT_FAIL:
+ if (ms_card->segment) {
+ vfree(ms_card->segment);
+ ms_card->segment = NULL;
+ }
+
+ return STATUS_FAIL;
+}
+
+static u16 ms_get_l2p_tbl(struct rtsx_chip *chip, int seg_no, u16 log_off)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct zone_entry *segment;
+
+ if (ms_card->segment == NULL)
+ return 0xFFFF;
+
+ segment = &(ms_card->segment[seg_no]);
+
+ if (segment->l2p_table)
+ return segment->l2p_table[log_off];
+
+ return 0xFFFF;
+}
+
+static void ms_set_l2p_tbl(struct rtsx_chip *chip,
+ int seg_no, u16 log_off, u16 phy_blk)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct zone_entry *segment;
+
+ if (ms_card->segment == NULL)
+ return;
+
+ segment = &(ms_card->segment[seg_no]);
+ if (segment->l2p_table)
+ segment->l2p_table[log_off] = phy_blk;
+}
+
+static void ms_set_unused_block(struct rtsx_chip *chip, u16 phy_blk)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct zone_entry *segment;
+ int seg_no;
+
+ seg_no = (int)phy_blk >> 9;
+ segment = &(ms_card->segment[seg_no]);
+
+ segment->free_table[segment->set_index++] = phy_blk;
+ if (segment->set_index >= MS_FREE_TABLE_CNT)
+ segment->set_index = 0;
+
+ segment->unused_blk_cnt++;
+}
+
+static u16 ms_get_unused_block(struct rtsx_chip *chip, int seg_no)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct zone_entry *segment;
+ u16 phy_blk;
+
+ segment = &(ms_card->segment[seg_no]);
+
+ if (segment->unused_blk_cnt <= 0)
+ return 0xFFFF;
+
+ phy_blk = segment->free_table[segment->get_index];
+ segment->free_table[segment->get_index++] = 0xFFFF;
+ if (segment->get_index >= MS_FREE_TABLE_CNT)
+ segment->get_index = 0;
+
+ segment->unused_blk_cnt--;
+
+ return phy_blk;
+}
+
+static const unsigned short ms_start_idx[] = {0, 494, 990, 1486, 1982, 2478,
+ 2974, 3470, 3966, 4462, 4958,
+ 5454, 5950, 6446, 6942, 7438,
+ 7934};
+
+static int ms_arbitrate_l2p(struct rtsx_chip *chip, u16 phy_blk,
+ u16 log_off, u8 us1, u8 us2)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct zone_entry *segment;
+ int seg_no;
+ u16 tmp_blk;
+
+ seg_no = (int)phy_blk >> 9;
+ segment = &(ms_card->segment[seg_no]);
+ tmp_blk = segment->l2p_table[log_off];
+
+ if (us1 != us2) {
+ if (us1 == 0) {
+ if (!(chip->card_wp & MS_CARD))
+ ms_erase_block(chip, tmp_blk);
+
+ ms_set_unused_block(chip, tmp_blk);
+ segment->l2p_table[log_off] = phy_blk;
+ } else {
+ if (!(chip->card_wp & MS_CARD))
+ ms_erase_block(chip, phy_blk);
+
+ ms_set_unused_block(chip, phy_blk);
+ }
+ } else {
+ if (phy_blk < tmp_blk) {
+ if (!(chip->card_wp & MS_CARD))
+ ms_erase_block(chip, phy_blk);
+
+ ms_set_unused_block(chip, phy_blk);
+ } else {
+ if (!(chip->card_wp & MS_CARD))
+ ms_erase_block(chip, tmp_blk);
+
+ ms_set_unused_block(chip, tmp_blk);
+ segment->l2p_table[log_off] = phy_blk;
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct zone_entry *segment;
+ int retval, table_size, disable_cnt, defect_flag, i;
+ u16 start, end, phy_blk, log_blk, tmp_blk;
+ u8 extra[MS_EXTRA_SIZE], us1, us2;
+
+ RTSX_DEBUGP("ms_build_l2p_tbl: %d\n", seg_no);
+
+ if (ms_card->segment == NULL) {
+ retval = ms_init_l2p_tbl(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ }
+
+ if (ms_card->segment[seg_no].build_flag) {
+ RTSX_DEBUGP("l2p table of segment %d has been built\n", seg_no);
+ return STATUS_SUCCESS;
+ }
+
+ if (seg_no == 0)
+ table_size = 494;
+ else
+ table_size = 496;
+
+ segment = &(ms_card->segment[seg_no]);
+
+ if (segment->l2p_table == NULL) {
+ segment->l2p_table = vmalloc(table_size * 2);
+ if (segment->l2p_table == NULL)
+ TRACE_GOTO(chip, BUILD_FAIL);
+ }
+ memset((u8 *)(segment->l2p_table), 0xff, table_size * 2);
+
+ if (segment->free_table == NULL) {
+ segment->free_table = vmalloc(MS_FREE_TABLE_CNT * 2);
+ if (segment->free_table == NULL)
+ TRACE_GOTO(chip, BUILD_FAIL);
+ }
+ memset((u8 *)(segment->free_table), 0xff, MS_FREE_TABLE_CNT * 2);
+
+ start = (u16)seg_no << 9;
+ end = (u16)(seg_no + 1) << 9;
+
+ disable_cnt = segment->disable_count;
+
+ segment->get_index = segment->set_index = 0;
+ segment->unused_blk_cnt = 0;
+
+ for (phy_blk = start; phy_blk < end; phy_blk++) {
+ if (disable_cnt) {
+ defect_flag = 0;
+ for (i = 0; i < segment->disable_count; i++) {
+ if (phy_blk == segment->defect_list[i]) {
+ defect_flag = 1;
+ break;
+ }
+ }
+ if (defect_flag) {
+ disable_cnt--;
+ continue;
+ }
+ }
+
+ retval = ms_read_extra_data(chip, phy_blk, 0,
+ extra, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS) {
+ RTSX_DEBUGP("read extra data fail\n");
+ ms_set_bad_block(chip, phy_blk);
+ continue;
+ }
+
+ if (seg_no == ms_card->segment_cnt - 1) {
+ if (!(extra[1] & NOT_TRANSLATION_TABLE)) {
+ if (!(chip->card_wp & MS_CARD)) {
+ retval = ms_erase_block(chip, phy_blk);
+ if (retval != STATUS_SUCCESS)
+ continue;
+ extra[2] = 0xff;
+ extra[3] = 0xff;
+ }
+ }
+ }
+
+ if (!(extra[0] & BLOCK_OK))
+ continue;
+ if (!(extra[1] & NOT_BOOT_BLOCK))
+ continue;
+ if ((extra[0] & PAGE_OK) != PAGE_OK)
+ continue;
+
+ log_blk = ((u16)extra[2] << 8) | extra[3];
+
+ if (log_blk == 0xFFFF) {
+ if (!(chip->card_wp & MS_CARD)) {
+ retval = ms_erase_block(chip, phy_blk);
+ if (retval != STATUS_SUCCESS)
+ continue;
+ }
+ ms_set_unused_block(chip, phy_blk);
+ continue;
+ }
+
+ if ((log_blk < ms_start_idx[seg_no]) ||
+ (log_blk >= ms_start_idx[seg_no+1])) {
+ if (!(chip->card_wp & MS_CARD)) {
+ retval = ms_erase_block(chip, phy_blk);
+ if (retval != STATUS_SUCCESS)
+ continue;
+ }
+ ms_set_unused_block(chip, phy_blk);
+ continue;
+ }
+
+ if (segment->l2p_table[log_blk - ms_start_idx[seg_no]] == 0xFFFF) {
+ segment->l2p_table[log_blk - ms_start_idx[seg_no]] = phy_blk;
+ continue;
+ }
+
+ us1 = extra[0] & 0x10;
+ tmp_blk = segment->l2p_table[log_blk - ms_start_idx[seg_no]];
+ retval = ms_read_extra_data(chip, tmp_blk, 0,
+ extra, MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ continue;
+ us2 = extra[0] & 0x10;
+
+ (void)ms_arbitrate_l2p(chip, phy_blk,
+ log_blk-ms_start_idx[seg_no], us1, us2);
+ continue;
+ }
+
+ segment->build_flag = 1;
+
+ RTSX_DEBUGP("unused block count: %d\n", segment->unused_blk_cnt);
+
+ /* Logical Address Confirmation Process */
+ if (seg_no == ms_card->segment_cnt - 1) {
+ if (segment->unused_blk_cnt < 2)
+ chip->card_wp |= MS_CARD;
+ } else {
+ if (segment->unused_blk_cnt < 1)
+ chip->card_wp |= MS_CARD;
+ }
+
+ if (chip->card_wp & MS_CARD)
+ return STATUS_SUCCESS;
+
+ for (log_blk = ms_start_idx[seg_no];
+ log_blk < ms_start_idx[seg_no + 1]; log_blk++) {
+ if (segment->l2p_table[log_blk-ms_start_idx[seg_no]] == 0xFFFF) {
+ phy_blk = ms_get_unused_block(chip, seg_no);
+ if (phy_blk == 0xFFFF) {
+ chip->card_wp |= MS_CARD;
+ return STATUS_SUCCESS;
+ }
+ retval = ms_init_page(chip, phy_blk, log_blk, 0, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, BUILD_FAIL);
+
+ segment->l2p_table[log_blk-ms_start_idx[seg_no]] = phy_blk;
+ if (seg_no == ms_card->segment_cnt - 1) {
+ if (segment->unused_blk_cnt < 2) {
+ chip->card_wp |= MS_CARD;
+ return STATUS_SUCCESS;
+ }
+ } else {
+ if (segment->unused_blk_cnt < 1) {
+ chip->card_wp |= MS_CARD;
+ return STATUS_SUCCESS;
+ }
+ }
+ }
+ }
+
+ /* Make boot block be the first normal block */
+ if (seg_no == 0) {
+ for (log_blk = 0; log_blk < 494; log_blk++) {
+ tmp_blk = segment->l2p_table[log_blk];
+ if (tmp_blk < ms_card->boot_block) {
+ RTSX_DEBUGP("Boot block is not the first normal block.\n");
+
+ if (chip->card_wp & MS_CARD)
+ break;
+
+ phy_blk = ms_get_unused_block(chip, 0);
+ retval = ms_copy_page(chip, tmp_blk, phy_blk,
+ log_blk, 0, ms_card->page_off + 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ segment->l2p_table[log_blk] = phy_blk;
+
+ retval = ms_set_bad_block(chip, tmp_blk);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ return STATUS_SUCCESS;
+
+BUILD_FAIL:
+ segment->build_flag = 0;
+ if (segment->l2p_table) {
+ vfree(segment->l2p_table);
+ segment->l2p_table = NULL;
+ }
+ if (segment->free_table) {
+ vfree(segment->free_table);
+ segment->free_table = NULL;
+ }
+
+ return STATUS_FAIL;
+}
+
+
+int reset_ms_card(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ memset(ms_card, 0, sizeof(struct ms_info));
+
+ retval = enable_card_clock(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = select_card(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_card->ms_type = 0;
+
+ retval = reset_ms_pro(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (ms_card->check_ms_flow) {
+ retval = reset_ms(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ retval = ms_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!CHK_MSPRO(ms_card)) {
+ /* Build table for the last segment,
+ * to check if L2P table block exists, erasing it
+ */
+ retval = ms_build_l2p_tbl(chip, ms_card->total_block / 512 - 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_DEBUGP("ms_card->ms_type = 0x%x\n", ms_card->ms_type);
+
+ return STATUS_SUCCESS;
+}
+
+static int mspro_set_rw_cmd(struct rtsx_chip *chip,
+ u32 start_sec, u16 sec_cnt, u8 cmd)
+{
+ int retval, i;
+ u8 data[8];
+
+ data[0] = cmd;
+ data[1] = (u8)(sec_cnt >> 8);
+ data[2] = (u8)sec_cnt;
+ data[3] = (u8)(start_sec >> 24);
+ data[4] = (u8)(start_sec >> 16);
+ data[5] = (u8)(start_sec >> 8);
+ data[6] = (u8)start_sec;
+ data[7] = 0;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, PRO_EX_SET_CMD, 7,
+ WAIT_INT, data, 8);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+
+void mspro_stop_seq_mode(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ if (ms_card->seq_mode) {
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ return;
+
+ ms_card->seq_mode = 0;
+ ms_card->total_sec_cnt = 0;
+ ms_send_cmd(chip, PRO_STOP, WAIT_INT);
+
+ rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
+ }
+}
+
+static inline int ms_auto_tune_clock(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ if (chip->asic_code) {
+ if (ms_card->ms_clock > 30)
+ ms_card->ms_clock -= 20;
+ } else {
+ if (ms_card->ms_clock == CLK_80)
+ ms_card->ms_clock = CLK_60;
+ else if (ms_card->ms_clock == CLK_60)
+ ms_card->ms_clock = CLK_40;
+ }
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
+ struct rtsx_chip *chip, u32 start_sector,
+ u16 sector_cnt)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, mode_2k = 0;
+ u16 count;
+ u8 val, trans_mode, rw_tpc, rw_cmd;
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ ms_card->cleanup_counter = 0;
+
+ if (CHK_MSHG(ms_card)) {
+ if ((start_sector % 4) || (sector_cnt % 4)) {
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ rw_tpc = PRO_READ_LONG_DATA;
+ rw_cmd = PRO_READ_DATA;
+ } else {
+ rw_tpc = PRO_WRITE_LONG_DATA;
+ rw_cmd = PRO_WRITE_DATA;
+ }
+ } else {
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ rw_tpc = PRO_READ_QUAD_DATA;
+ rw_cmd = PRO_READ_2K_DATA;
+ } else {
+ rw_tpc = PRO_WRITE_QUAD_DATA;
+ rw_cmd = PRO_WRITE_2K_DATA;
+ }
+ mode_2k = 1;
+ }
+ } else {
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ rw_tpc = PRO_READ_LONG_DATA;
+ rw_cmd = PRO_READ_DATA;
+ } else {
+ rw_tpc = PRO_WRITE_LONG_DATA;
+ rw_cmd = PRO_WRITE_DATA;
+ }
+ }
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (srb->sc_data_direction == DMA_FROM_DEVICE)
+ trans_mode = MS_TM_AUTO_READ;
+ else
+ trans_mode = MS_TM_AUTO_WRITE;
+
+ RTSX_READ_REG(chip, MS_TRANS_CFG, &val);
+
+ if (ms_card->seq_mode) {
+ if ((ms_card->pre_dir != srb->sc_data_direction)
+ || ((ms_card->pre_sec_addr + ms_card->pre_sec_cnt) != start_sector)
+ || (mode_2k && (ms_card->seq_mode & MODE_512_SEQ))
+ || (!mode_2k && (ms_card->seq_mode & MODE_2K_SEQ))
+ || !(val & MS_INT_BREQ)
+ || ((ms_card->total_sec_cnt + sector_cnt) > 0xFE00)) {
+ ms_card->seq_mode = 0;
+ ms_card->total_sec_cnt = 0;
+ if (val & MS_INT_BREQ) {
+ retval = ms_send_cmd(chip, PRO_STOP, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
+ }
+ }
+ }
+
+ if (!ms_card->seq_mode) {
+ ms_card->total_sec_cnt = 0;
+ if (sector_cnt >= SEQ_START_CRITERIA) {
+ if ((ms_card->capacity - start_sector) > 0xFE00)
+ count = 0xFE00;
+ else
+ count = (u16)(ms_card->capacity - start_sector);
+
+ if (count > sector_cnt) {
+ if (mode_2k)
+ ms_card->seq_mode |= MODE_2K_SEQ;
+ else
+ ms_card->seq_mode |= MODE_512_SEQ;
+ }
+ } else {
+ count = sector_cnt;
+ }
+ retval = mspro_set_rw_cmd(chip, start_sector, count, rw_cmd);
+ if (retval != STATUS_SUCCESS) {
+ ms_card->seq_mode = 0;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ retval = ms_transfer_data(chip, trans_mode, rw_tpc, sector_cnt,
+ WAIT_INT, mode_2k, scsi_sg_count(srb),
+ scsi_sglist(srb), scsi_bufflen(srb));
+ if (retval != STATUS_SUCCESS) {
+ ms_card->seq_mode = 0;
+ rtsx_read_register(chip, MS_TRANS_CFG, &val);
+ rtsx_clear_ms_error(chip);
+
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ chip->rw_need_retry = 0;
+ RTSX_DEBUGP("No card exist, exit mspro_rw_multi_sector\n");
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & MS_INT_BREQ)
+ ms_send_cmd(chip, PRO_STOP, WAIT_INT);
+
+ if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) {
+ RTSX_DEBUGP("MSPro CRC error, tune clock!\n");
+ chip->rw_need_retry = 1;
+ ms_auto_tune_clock(chip);
+ }
+
+ TRACE_RET(chip, retval);
+ }
+
+ if (ms_card->seq_mode) {
+ ms_card->pre_sec_addr = start_sector;
+ ms_card->pre_sec_cnt = sector_cnt;
+ ms_card->pre_dir = srb->sc_data_direction;
+ ms_card->total_sec_cnt += sector_cnt;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int mspro_read_format_progress(struct rtsx_chip *chip,
+ const int short_data_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u32 total_progress, cur_progress;
+ u8 cnt, tmp;
+ u8 data[8];
+
+ RTSX_DEBUGP("mspro_read_format_progress, short_data_len = %d\n",
+ short_data_len);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS) {
+ ms_card->format_status = FORMAT_FAIL;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = rtsx_read_register(chip, MS_TRANS_CFG, &tmp);
+ if (retval != STATUS_SUCCESS) {
+ ms_card->format_status = FORMAT_FAIL;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (!(tmp & MS_INT_BREQ)) {
+ if ((tmp & (MS_INT_CED | MS_INT_BREQ | MS_INT_CMDNK | MS_INT_ERR)) == MS_INT_CED) {
+ ms_card->format_status = FORMAT_SUCCESS;
+ return STATUS_SUCCESS;
+ }
+ ms_card->format_status = FORMAT_FAIL;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (short_data_len >= 256)
+ cnt = 0;
+ else
+ cnt = (u8)short_data_len;
+
+ retval = rtsx_write_register(chip, MS_CFG, MS_NO_CHECK_INT,
+ MS_NO_CHECK_INT);
+ if (retval != STATUS_SUCCESS) {
+ ms_card->format_status = FORMAT_FAIL;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, cnt, WAIT_INT,
+ data, 8);
+ if (retval != STATUS_SUCCESS) {
+ ms_card->format_status = FORMAT_FAIL;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ total_progress = (data[0] << 24) | (data[1] << 16) |
+ (data[2] << 8) | data[3];
+ cur_progress = (data[4] << 24) | (data[5] << 16) |
+ (data[6] << 8) | data[7];
+
+ RTSX_DEBUGP("total_progress = %d, cur_progress = %d\n",
+ total_progress, cur_progress);
+
+ if (total_progress == 0) {
+ ms_card->progress = 0;
+ } else {
+ u64 ulltmp = (u64)cur_progress * (u64)65535;
+ do_div(ulltmp, total_progress);
+ ms_card->progress = (u16)ulltmp;
+ }
+ RTSX_DEBUGP("progress = %d\n", ms_card->progress);
+
+ for (i = 0; i < 5000; i++) {
+ retval = rtsx_read_register(chip, MS_TRANS_CFG, &tmp);
+ if (retval != STATUS_SUCCESS) {
+ ms_card->format_status = FORMAT_FAIL;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (tmp & (MS_INT_CED | MS_INT_CMDNK |
+ MS_INT_BREQ | MS_INT_ERR))
+ break;
+
+ wait_timeout(1);
+ }
+
+ retval = rtsx_write_register(chip, MS_CFG, MS_NO_CHECK_INT, 0);
+ if (retval != STATUS_SUCCESS) {
+ ms_card->format_status = FORMAT_FAIL;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (i == 5000) {
+ ms_card->format_status = FORMAT_FAIL;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (tmp & (MS_INT_CMDNK | MS_INT_ERR)) {
+ ms_card->format_status = FORMAT_FAIL;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (tmp & MS_INT_CED) {
+ ms_card->format_status = FORMAT_SUCCESS;
+ ms_card->pro_under_formatting = 0;
+ } else if (tmp & MS_INT_BREQ) {
+ ms_card->format_status = FORMAT_IN_PROGRESS;
+ } else {
+ ms_card->format_status = FORMAT_FAIL;
+ ms_card->pro_under_formatting = 0;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+void mspro_polling_format_status(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int i;
+
+ if (ms_card->pro_under_formatting &&
+ (rtsx_get_stat(chip) != RTSX_STAT_SS)) {
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ for (i = 0; i < 65535; i++) {
+ mspro_read_format_progress(chip, MS_SHORT_DATA_LEN);
+ if (ms_card->format_status != FORMAT_IN_PROGRESS)
+ break;
+ }
+ }
+
+ return;
+}
+
+int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ int short_data_len, int quick_format)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 buf[8], tmp;
+ u16 para;
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_set_rw_reg_addr(chip, 0x00, 0x00, Pro_TPCParm, 0x01);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ memset(buf, 0, 2);
+ switch (short_data_len) {
+ case 32:
+ buf[0] = 0;
+ break;
+ case 64:
+ buf[0] = 1;
+ break;
+ case 128:
+ buf[0] = 2;
+ break;
+ case 256:
+ default:
+ buf[0] = 3;
+ break;
+ }
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, PRO_WRITE_REG, 1,
+ NO_WAIT_INT, buf, 2);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (quick_format)
+ para = 0x0000;
+ else
+ para = 0x0001;
+
+ retval = mspro_set_rw_cmd(chip, 0, para, PRO_FORMAT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_READ_REG(chip, MS_TRANS_CFG, &tmp);
+
+ if (tmp & (MS_INT_CMDNK | MS_INT_ERR))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if ((tmp & (MS_INT_BREQ | MS_INT_CED)) == MS_INT_BREQ) {
+ ms_card->pro_under_formatting = 1;
+ ms_card->progress = 0;
+ ms_card->format_status = FORMAT_IN_PROGRESS;
+ return STATUS_SUCCESS;
+ }
+
+ if (tmp & MS_INT_CED) {
+ ms_card->pro_under_formatting = 0;
+ ms_card->progress = 0;
+ ms_card->format_status = FORMAT_SUCCESS;
+ set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_NO_SENSE);
+ return STATUS_SUCCESS;
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+
+static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
+ u16 log_blk, u8 start_page, u8 end_page,
+ u8 *buf, unsigned int *index,
+ unsigned int *offset)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 extra[MS_EXTRA_SIZE], page_addr, val, trans_cfg, data[6];
+ u8 *ptr;
+
+ retval = ms_read_extra_data(chip, phy_blk, start_page,
+ extra, MS_EXTRA_SIZE);
+ if (retval == STATUS_SUCCESS) {
+ if ((extra[1] & 0x30) != 0x30) {
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(phy_blk >> 8);
+ data[3] = (u8)phy_blk;
+ data[4] = 0;
+ data[5] = start_page;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT,
+ data, 6);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ptr = buf;
+
+ for (page_addr = start_page; page_addr < end_page; page_addr++) {
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (val & INT_REG_ERR) {
+ if (val & INT_REG_BREQ) {
+ retval = ms_read_status_reg(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (!(chip->card_wp & MS_CARD)) {
+ reset_ms(chip);
+ ms_set_page_status(log_blk, setPS_NG, extra, MS_EXTRA_SIZE);
+ ms_write_extra_data(chip, phy_blk,
+ page_addr, extra, MS_EXTRA_SIZE);
+ }
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ if (!(val & INT_REG_BREQ)) {
+ ms_set_err_code(chip, MS_BREQ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (page_addr == (end_page - 1)) {
+ if (!(val & INT_REG_CED)) {
+ retval = ms_send_cmd(chip, BLOCK_END, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT,
+ &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!(val & INT_REG_CED)) {
+ ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ trans_cfg = NO_WAIT_INT;
+ } else {
+ trans_cfg = WAIT_INT;
+ }
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, READ_PAGE_DATA);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG,
+ 0xFF, trans_cfg);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, RING_BUFFER);
+
+ trans_dma_enable(DMA_FROM_DEVICE, chip, 512, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | MS_TM_NORMAL_READ);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr,
+ 512, scsi_sg_count(chip->srb),
+ index, offset, DMA_FROM_DEVICE,
+ chip->ms_timeout);
+ if (retval < 0) {
+ if (retval == -ETIMEDOUT) {
+ ms_set_err_code(chip, MS_TO_ERROR);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_TIMEDOUT);
+ }
+
+ retval = rtsx_read_register(chip, MS_TRANS_CFG, &val);
+ if (retval != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_TO_ERROR);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_TIMEDOUT);
+ }
+ if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) {
+ ms_set_err_code(chip, MS_CRC16_ERROR);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (scsi_sg_count(chip->srb) == 0)
+ ptr += 512;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
+ u16 new_blk, u16 log_blk, u8 start_page,
+ u8 end_page, u8 *buf, unsigned int *index,
+ unsigned int *offset)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, i;
+ u8 page_addr, val, data[16];
+ u8 *ptr;
+
+ if (!start_page) {
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, 7);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(old_blk >> 8);
+ data[3] = (u8)old_blk;
+ data[4] = 0x80;
+ data[5] = 0;
+ data[6] = 0xEF;
+ data[7] = 0xFF;
+
+ retval = ms_write_bytes(chip, WRITE_REG, 7, NO_WAIT_INT,
+ data, 8);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+ retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT, 1,
+ NO_WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, (6 + MS_EXTRA_SIZE));
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (CHK_MS4BIT(ms_card))
+ data[0] = 0x88;
+ else
+ data[0] = 0x80;
+
+ data[1] = 0;
+ data[2] = (u8)(new_blk >> 8);
+ data[3] = (u8)new_blk;
+ if ((end_page - start_page) == 1)
+ data[4] = 0x20;
+ else
+ data[4] = 0;
+
+ data[5] = start_page;
+ data[6] = 0xF8;
+ data[7] = 0xFF;
+ data[8] = (u8)(log_blk >> 8);
+ data[9] = (u8)log_blk;
+
+ for (i = 0x0A; i < 0x10; i++)
+ data[i] = 0xFF;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, WRITE_REG, 6 + MS_EXTRA_SIZE,
+ NO_WAIT_INT, data, 16);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ptr = buf;
+ for (page_addr = start_page; page_addr < end_page; page_addr++) {
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ ms_set_err_code(chip, MS_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (val & INT_REG_CMDNK) {
+ ms_set_err_code(chip, MS_CMD_NK);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (val & INT_REG_ERR) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (!(val & INT_REG_BREQ)) {
+ ms_set_err_code(chip, MS_BREQ_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ udelay(30);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC,
+ 0xFF, WRITE_PAGE_DATA);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG,
+ 0xFF, WAIT_INT);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, RING_BUFFER);
+
+ trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | MS_TM_NORMAL_WRITE);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr,
+ 512, scsi_sg_count(chip->srb),
+ index, offset, DMA_TO_DEVICE,
+ chip->ms_timeout);
+ if (retval < 0) {
+ ms_set_err_code(chip, MS_TO_ERROR);
+ rtsx_clear_ms_error(chip);
+
+ if (retval == -ETIMEDOUT)
+ TRACE_RET(chip, STATUS_TIMEDOUT);
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if ((end_page - start_page) == 1) {
+ if (!(val & INT_REG_CED)) {
+ ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ if (page_addr == (end_page - 1)) {
+ if (!(val & INT_REG_CED)) {
+ retval = ms_send_cmd(chip, BLOCK_END,
+ WAIT_INT);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_read_bytes(chip, GET_INT, 1,
+ NO_WAIT_INT, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if ((page_addr == (end_page - 1)) ||
+ (page_addr == ms_card->page_off)) {
+ if (!(val & INT_REG_CED)) {
+ ms_set_err_code(chip,
+ MS_FLASH_WRITE_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ if (scsi_sg_count(chip->srb) == 0)
+ ptr += 512;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
+ u16 log_blk, u8 page_off)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval, seg_no;
+
+ retval = ms_copy_page(chip, old_blk, new_blk, log_blk,
+ page_off, ms_card->page_off + 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ seg_no = old_blk >> 9;
+
+ if (MS_TST_BAD_BLOCK_FLG(ms_card)) {
+ MS_CLR_BAD_BLOCK_FLG(ms_card);
+ ms_set_bad_block(chip, old_blk);
+ } else {
+ retval = ms_erase_block(chip, old_blk);
+ if (retval == STATUS_SUCCESS)
+ ms_set_unused_block(chip, old_blk);
+ }
+
+ ms_set_l2p_tbl(chip, seg_no, log_blk - ms_start_idx[seg_no], new_blk);
+
+ return STATUS_SUCCESS;
+}
+
+static int ms_prepare_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
+ u16 log_blk, u8 start_page)
+{
+ int retval;
+
+ if (start_page) {
+ retval = ms_copy_page(chip, old_blk, new_blk, log_blk,
+ 0, start_page);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+#ifdef MS_DELAY_WRITE
+int ms_delay_write(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_delay_write_tag *delay_write = &(ms_card->delay_write);
+ int retval;
+
+ if (delay_write->delay_write_flag) {
+ retval = ms_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ delay_write->delay_write_flag = 0;
+ retval = ms_finish_write(chip,
+ delay_write->old_phyblock,
+ delay_write->new_phyblock,
+ delay_write->logblock,
+ delay_write->pageoff);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+#endif
+
+static inline void ms_rw_fail(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ if (srb->sc_data_direction == DMA_FROM_DEVICE)
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ else
+ set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
+}
+
+static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ u32 start_sector, u16 sector_cnt)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval, seg_no;
+ unsigned int index = 0, offset = 0;
+ u16 old_blk = 0, new_blk = 0, log_blk, total_sec_cnt = sector_cnt;
+ u8 start_page, end_page = 0, page_cnt;
+ u8 *ptr;
+#ifdef MS_DELAY_WRITE
+ struct ms_delay_write_tag *delay_write = &(ms_card->delay_write);
+#endif
+
+ ms_set_err_code(chip, MS_NO_ERROR);
+
+ ms_card->cleanup_counter = 0;
+
+ ptr = (u8 *)scsi_sglist(srb);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS) {
+ ms_rw_fail(srb, chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ log_blk = (u16)(start_sector >> ms_card->block_shift);
+ start_page = (u8)(start_sector & ms_card->page_off);
+
+ for (seg_no = 0; seg_no < ARRAY_SIZE(ms_start_idx) - 1; seg_no++) {
+ if (log_blk < ms_start_idx[seg_no+1])
+ break;
+ }
+
+ if (ms_card->segment[seg_no].build_flag == 0) {
+ retval = ms_build_l2p_tbl(chip, seg_no);
+ if (retval != STATUS_SUCCESS) {
+ chip->card_fail |= MS_CARD;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+#ifdef MS_DELAY_WRITE
+ if (delay_write->delay_write_flag &&
+ (delay_write->logblock == log_blk) &&
+ (start_page > delay_write->pageoff)) {
+ delay_write->delay_write_flag = 0;
+ retval = ms_copy_page(chip,
+ delay_write->old_phyblock,
+ delay_write->new_phyblock, log_blk,
+ delay_write->pageoff, start_page);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ old_blk = delay_write->old_phyblock;
+ new_blk = delay_write->new_phyblock;
+ } else if (delay_write->delay_write_flag &&
+ (delay_write->logblock == log_blk) &&
+ (start_page == delay_write->pageoff)) {
+ delay_write->delay_write_flag = 0;
+ old_blk = delay_write->old_phyblock;
+ new_blk = delay_write->new_phyblock;
+ } else {
+ retval = ms_delay_write(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ old_blk = ms_get_l2p_tbl(chip, seg_no,
+ log_blk - ms_start_idx[seg_no]);
+ new_blk = ms_get_unused_block(chip, seg_no);
+ if ((old_blk == 0xFFFF) || (new_blk == 0xFFFF)) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_prepare_write(chip, old_blk, new_blk,
+ log_blk, start_page);
+ if (retval != STATUS_SUCCESS) {
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#ifdef MS_DELAY_WRITE
+ }
+#endif
+ } else {
+#ifdef MS_DELAY_WRITE
+ retval = ms_delay_write(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ old_blk = ms_get_l2p_tbl(chip, seg_no,
+ log_blk - ms_start_idx[seg_no]);
+ if (old_blk == 0xFFFF) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ RTSX_DEBUGP("seg_no = %d, old_blk = 0x%x, new_blk = 0x%x\n",
+ seg_no, old_blk, new_blk);
+
+ while (total_sec_cnt) {
+ if ((start_page + total_sec_cnt) > (ms_card->page_off + 1))
+ end_page = ms_card->page_off + 1;
+ else
+ end_page = start_page + (u8)total_sec_cnt;
+
+ page_cnt = end_page - start_page;
+
+ RTSX_DEBUGP("start_page = %d, end_page = %d, page_cnt = %d\n",
+ start_page, end_page, page_cnt);
+
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ retval = ms_read_multiple_pages(chip,
+ old_blk, log_blk, start_page, end_page,
+ ptr, &index, &offset);
+ } else {
+ retval = ms_write_multiple_pages(chip, old_blk,
+ new_blk, log_blk, start_page, end_page,
+ ptr, &index, &offset);
+ }
+
+ if (retval != STATUS_SUCCESS) {
+ toggle_gpio(chip, 1);
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ ms_rw_fail(srb, chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+ if (end_page == (ms_card->page_off + 1)) {
+ retval = ms_erase_block(chip, old_blk);
+ if (retval == STATUS_SUCCESS)
+ ms_set_unused_block(chip, old_blk);
+
+ ms_set_l2p_tbl(chip, seg_no,
+ log_blk - ms_start_idx[seg_no],
+ new_blk);
+ }
+ }
+
+ total_sec_cnt -= page_cnt;
+ if (scsi_sg_count(srb) == 0)
+ ptr += page_cnt * 512;
+
+ if (total_sec_cnt == 0)
+ break;
+
+ log_blk++;
+
+ for (seg_no = 0; seg_no < ARRAY_SIZE(ms_start_idx) - 1;
+ seg_no++) {
+ if (log_blk < ms_start_idx[seg_no+1])
+ break;
+ }
+
+ if (ms_card->segment[seg_no].build_flag == 0) {
+ retval = ms_build_l2p_tbl(chip, seg_no);
+ if (retval != STATUS_SUCCESS) {
+ chip->card_fail |= MS_CARD;
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ old_blk = ms_get_l2p_tbl(chip, seg_no,
+ log_blk - ms_start_idx[seg_no]);
+ if (old_blk == 0xFFFF) {
+ ms_rw_fail(srb, chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+ new_blk = ms_get_unused_block(chip, seg_no);
+ if (new_blk == 0xFFFF) {
+ ms_rw_fail(srb, chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ RTSX_DEBUGP("seg_no = %d, old_blk = 0x%x, new_blk = 0x%x\n",
+ seg_no, old_blk, new_blk);
+
+ start_page = 0;
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+ if (end_page < (ms_card->page_off + 1)) {
+#ifdef MS_DELAY_WRITE
+ delay_write->delay_write_flag = 1;
+ delay_write->old_phyblock = old_blk;
+ delay_write->new_phyblock = new_blk;
+ delay_write->logblock = log_blk;
+ delay_write->pageoff = end_page;
+#else
+ retval = ms_finish_write(chip, old_blk, new_blk,
+ log_blk, end_page);
+ if (retval != STATUS_SUCCESS) {
+ if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ ms_rw_fail(srb, chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ }
+ }
+
+ scsi_set_resid(srb, 0);
+
+ return STATUS_SUCCESS;
+}
+
+int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ u32 start_sector, u16 sector_cnt)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ if (CHK_MSPRO(ms_card))
+ retval = mspro_rw_multi_sector(srb, chip, start_sector,
+ sector_cnt);
+ else
+ retval = ms_rw_multi_sector(srb, chip, start_sector,
+ sector_cnt);
+
+ return retval;
+}
+
+
+void ms_free_l2p_tbl(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int i = 0;
+
+ if (ms_card->segment != NULL) {
+ for (i = 0; i < ms_card->segment_cnt; i++) {
+ if (ms_card->segment[i].l2p_table != NULL) {
+ vfree(ms_card->segment[i].l2p_table);
+ ms_card->segment[i].l2p_table = NULL;
+ }
+ if (ms_card->segment[i].free_table != NULL) {
+ vfree(ms_card->segment[i].free_table);
+ ms_card->segment[i].free_table = NULL;
+ }
+ }
+ vfree(ms_card->segment);
+ ms_card->segment = NULL;
+ }
+}
+
+#ifdef SUPPORT_MAGIC_GATE
+
+#ifdef READ_BYTES_WAIT_INT
+static int ms_poll_int(struct rtsx_chip *chip)
+{
+ int retval;
+ u8 val;
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANS_CFG, MS_INT_CED, MS_INT_CED);
+
+ retval = rtsx_send_cmd(chip, MS_CARD, 5000);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ val = *rtsx_get_cmd_data(chip);
+ if (val & MS_INT_ERR)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+#endif
+
+#ifdef MS_SAMPLE_INT_ERR
+static int check_ms_err(struct rtsx_chip *chip)
+{
+ int retval;
+ u8 val;
+
+ retval = rtsx_read_register(chip, MS_TRANSFER, &val);
+ if (retval != STATUS_SUCCESS)
+ return 1;
+ if (val & MS_TRANSFER_ERR)
+ return 1;
+
+ retval = rtsx_read_register(chip, MS_TRANS_CFG, &val);
+ if (retval != STATUS_SUCCESS)
+ return 1;
+
+ if (val & (MS_INT_ERR | MS_INT_CMDNK))
+ return 1;
+
+ return 0;
+}
+#else
+static int check_ms_err(struct rtsx_chip *chip)
+{
+ int retval;
+ u8 val;
+
+ retval = rtsx_read_register(chip, MS_TRANSFER, &val);
+ if (retval != STATUS_SUCCESS)
+ return 1;
+ if (val & MS_TRANSFER_ERR)
+ return 1;
+
+ return 0;
+}
+#endif
+
+static int mg_send_ex_cmd(struct rtsx_chip *chip, u8 cmd, u8 entry_num)
+{
+ int retval, i;
+ u8 data[8];
+
+ data[0] = cmd;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+ data[4] = 0;
+ data[5] = 0;
+ data[6] = entry_num;
+ data[7] = 0;
+
+ for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
+ retval = ms_write_bytes(chip, PRO_EX_SET_CMD, 7, WAIT_INT,
+ data, 8);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i == MS_MAX_RETRY_COUNT)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (check_ms_err(chip)) {
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int mg_set_tpc_para_sub(struct rtsx_chip *chip, int type,
+ u8 mg_entry_num)
+{
+ int retval;
+ u8 buf[6];
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ if (type == 0)
+ retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_TPCParm, 1);
+ else
+ retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_DataCount1, 6);
+
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ buf[0] = 0;
+ buf[1] = 0;
+ if (type == 1) {
+ buf[2] = 0;
+ buf[3] = 0;
+ buf[4] = 0;
+ buf[5] = mg_entry_num;
+ }
+ retval = ms_write_bytes(chip, PRO_WRITE_REG, (type == 0) ? 1 : 6,
+ NO_WAIT_INT, buf, 6);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int mg_set_leaf_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ int i;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 buf1[32], buf2[12];
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ if (scsi_bufflen(srb) < 12) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = mg_send_ex_cmd(chip, MG_SET_LID, 0);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ memset(buf1, 0, 32);
+ rtsx_stor_get_xfer_buf(buf2, min_t(int, 12, scsi_bufflen(srb)), srb);
+ for (i = 0; i < 8; i++)
+ buf1[8+i] = buf2[4+i];
+
+ retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT,
+ buf1, 32);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (check_ms_err(chip)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int mg_get_local_EKB(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval = STATUS_FAIL;
+ int bufflen;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 *buf = NULL;
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ buf = kmalloc(1540, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ buf[0] = 0x04;
+ buf[1] = 0x1A;
+ buf[2] = 0x00;
+ buf[3] = 0x00;
+
+ retval = mg_send_ex_cmd(chip, MG_GET_LEKB, 0);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_GOTO(chip, GetEKBFinish);
+ }
+
+ retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
+ 3, WAIT_INT, 0, 0, buf + 4, 1536);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ rtsx_clear_ms_error(chip);
+ TRACE_GOTO(chip, GetEKBFinish);
+ }
+ if (check_ms_err(chip)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ bufflen = min_t(int, 1052, scsi_bufflen(srb));
+ rtsx_stor_set_xfer_buf(buf, bufflen, srb);
+
+GetEKBFinish:
+ kfree(buf);
+ return retval;
+}
+
+int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ int bufflen;
+ int i;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 buf[32];
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = mg_send_ex_cmd(chip, MG_GET_ID, 0);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT,
+ buf, 32);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (check_ms_err(chip)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ memcpy(ms_card->magic_gate_id, buf, 16);
+
+#ifdef READ_BYTES_WAIT_INT
+ retval = ms_poll_int(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+
+ retval = mg_send_ex_cmd(chip, MG_SET_RD, 0);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ bufflen = min_t(int, 12, scsi_bufflen(srb));
+ rtsx_stor_get_xfer_buf(buf, bufflen, srb);
+
+ for (i = 0; i < 8; i++)
+ buf[i] = buf[4+i];
+
+ for (i = 0; i < 24; i++)
+ buf[8+i] = 0;
+
+ retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA,
+ 32, WAIT_INT, buf, 32);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (check_ms_err(chip)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ ms_card->mg_auth = 0;
+
+ return STATUS_SUCCESS;
+}
+
+int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ int bufflen;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 buf1[32], buf2[36];
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = mg_send_ex_cmd(chip, MG_MAKE_RMS, 0);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT,
+ buf1, 32);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (check_ms_err(chip)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ buf2[0] = 0x00;
+ buf2[1] = 0x22;
+ buf2[2] = 0x00;
+ buf2[3] = 0x00;
+
+ memcpy(buf2 + 4, ms_card->magic_gate_id, 16);
+ memcpy(buf2 + 20, buf1, 16);
+
+ bufflen = min_t(int, 36, scsi_bufflen(srb));
+ rtsx_stor_set_xfer_buf(buf2, bufflen, srb);
+
+#ifdef READ_BYTES_WAIT_INT
+ retval = ms_poll_int(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+
+ return STATUS_SUCCESS;
+}
+
+int mg_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ int i;
+ int bufflen;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 buf[32];
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = mg_send_ex_cmd(chip, MG_MAKE_KSE, 0);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ bufflen = min_t(int, 12, scsi_bufflen(srb));
+ rtsx_stor_get_xfer_buf(buf, bufflen, srb);
+
+ for (i = 0; i < 8; i++)
+ buf[i] = buf[4+i];
+
+ for (i = 0; i < 24; i++)
+ buf[8+i] = 0;
+
+ retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT,
+ buf, 32);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (check_ms_err(chip)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ ms_card->mg_auth = 1;
+
+ return STATUS_SUCCESS;
+}
+
+int mg_get_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ int bufflen;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 *buf = NULL;
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ buf = kmalloc(1028, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ buf[0] = 0x04;
+ buf[1] = 0x02;
+ buf[2] = 0x00;
+ buf[3] = 0x00;
+
+ retval = mg_send_ex_cmd(chip, MG_GET_IBD, ms_card->mg_entry_num);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_GOTO(chip, GetICVFinish);
+ }
+
+ retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
+ 2, WAIT_INT, 0, 0, buf + 4, 1024);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ rtsx_clear_ms_error(chip);
+ TRACE_GOTO(chip, GetICVFinish);
+ }
+ if (check_ms_err(chip)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ rtsx_clear_ms_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ bufflen = min_t(int, 1028, scsi_bufflen(srb));
+ rtsx_stor_set_xfer_buf(buf, bufflen, srb);
+
+GetICVFinish:
+ kfree(buf);
+ return retval;
+}
+
+int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ int bufflen;
+#ifdef MG_SET_ICV_SLOW
+ int i;
+#endif
+ unsigned int lun = SCSI_LUN(srb);
+ u8 *buf = NULL;
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ ms_cleanup_work(chip);
+
+ retval = ms_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ buf = kmalloc(1028, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ bufflen = min_t(int, 1028, scsi_bufflen(srb));
+ rtsx_stor_get_xfer_buf(buf, bufflen, srb);
+
+ retval = mg_send_ex_cmd(chip, MG_SET_IBD, ms_card->mg_entry_num);
+ if (retval != STATUS_SUCCESS) {
+ if (ms_card->mg_auth == 0) {
+ if ((buf[5] & 0xC0) != 0)
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ else
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_WRITE_ERR);
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
+ }
+ TRACE_GOTO(chip, SetICVFinish);
+ }
+
+#ifdef MG_SET_ICV_SLOW
+ for (i = 0; i < 2; i++) {
+ udelay(50);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC,
+ 0xFF, PRO_WRITE_LONG_DATA);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, WAIT_INT);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, RING_BUFFER);
+
+ trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
+ MS_TRANSFER_START | MS_TM_NORMAL_WRITE);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ retval = rtsx_transfer_data(chip, MS_CARD, buf + 4 + i*512,
+ 512, 0, DMA_TO_DEVICE, 3000);
+ if ((retval < 0) || check_ms_err(chip)) {
+ rtsx_clear_ms_error(chip);
+ if (ms_card->mg_auth == 0) {
+ if ((buf[5] & 0xC0) != 0)
+ set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ else
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_WRITE_ERR);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_WRITE_ERR);
+ }
+ retval = STATUS_FAIL;
+ TRACE_GOTO(chip, SetICVFinish);
+ }
+ }
+#else
+ retval = ms_transfer_data(chip, MS_TM_AUTO_WRITE, PRO_WRITE_LONG_DATA,
+ 2, WAIT_INT, 0, 0, buf + 4, 1024);
+ if ((retval != STATUS_SUCCESS) || check_ms_err(chip)) {
+ rtsx_clear_ms_error(chip);
+ if (ms_card->mg_auth == 0) {
+ if ((buf[5] & 0xC0) != 0)
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ else
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MG_WRITE_ERR);
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
+ }
+ TRACE_GOTO(chip, SetICVFinish);
+ }
+#endif
+
+SetICVFinish:
+ kfree(buf);
+ return retval;
+}
+
+#endif /* SUPPORT_MAGIC_GATE */
+
+void ms_cleanup_work(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ if (CHK_MSPRO(ms_card)) {
+ if (ms_card->seq_mode) {
+ RTSX_DEBUGP("MS Pro: stop transmission\n");
+ mspro_stop_seq_mode(chip);
+ ms_card->cleanup_counter = 0;
+ }
+ if (CHK_MSHG(ms_card)) {
+ rtsx_write_register(chip, MS_CFG,
+ MS_2K_SECTOR_MODE, 0x00);
+ }
+ }
+#ifdef MS_DELAY_WRITE
+ else if ((!CHK_MSPRO(ms_card)) && ms_card->delay_write.delay_write_flag) {
+ RTSX_DEBUGP("MS: delay write\n");
+ ms_delay_write(chip);
+ ms_card->cleanup_counter = 0;
+ }
+#endif
+}
+
+int ms_power_off_card3v3(struct rtsx_chip *chip)
+{
+ int retval;
+
+ retval = disable_card_clock(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (chip->asic_code) {
+ retval = ms_pull_ctl_disable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ RTSX_WRITE_REG(chip, FPGA_PULL_CTL,
+ FPGA_MS_PULL_CTL_BIT | 0x20, FPGA_MS_PULL_CTL_BIT);
+ }
+ RTSX_WRITE_REG(chip, CARD_OE, MS_OUTPUT_EN, 0);
+ if (!chip->ft2_fast_mode) {
+ retval = card_power_off(chip, MS_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int release_ms_card(struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+
+ RTSX_DEBUGP("release_ms_card\n");
+
+#ifdef MS_DELAY_WRITE
+ ms_card->delay_write.delay_write_flag = 0;
+#endif
+ ms_card->pro_under_formatting = 0;
+
+ chip->card_ready &= ~MS_CARD;
+ chip->card_fail &= ~MS_CARD;
+ chip->card_wp &= ~MS_CARD;
+
+ ms_free_l2p_tbl(chip);
+
+ memset(ms_card->raw_sys_info, 0, 96);
+#ifdef SUPPORT_PCGL_1P18
+ memset(ms_card->raw_model_name, 0, 48);
+#endif
+
+ retval = ms_power_off_card3v3(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
diff --git a/drivers/staging/rts5208/ms.h b/drivers/staging/rts5208/ms.h
new file mode 100644
index 000000000000..26c5b03d535e
--- /dev/null
+++ b/drivers/staging/rts5208/ms.h
@@ -0,0 +1,227 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_MS_H
+#define __REALTEK_RTSX_MS_H
+
+#define MS_DELAY_WRITE
+
+#define MS_MAX_RETRY_COUNT 3
+
+#define MS_EXTRA_SIZE 0x9
+
+#define WRT_PRTCT 0x01
+
+/* Error Code */
+#define MS_NO_ERROR 0x00
+#define MS_CRC16_ERROR 0x80
+#define MS_TO_ERROR 0x40
+#define MS_NO_CARD 0x20
+#define MS_NO_MEMORY 0x10
+#define MS_CMD_NK 0x08
+#define MS_FLASH_READ_ERROR 0x04
+#define MS_FLASH_WRITE_ERROR 0x02
+#define MS_BREQ_ERROR 0x01
+#define MS_NOT_FOUND 0x03
+
+/* Transfer Protocol Command */
+#define READ_PAGE_DATA 0x02
+#define READ_REG 0x04
+#define GET_INT 0x07
+#define WRITE_PAGE_DATA 0x0D
+#define WRITE_REG 0x0B
+#define SET_RW_REG_ADRS 0x08
+#define SET_CMD 0x0E
+
+#define PRO_READ_LONG_DATA 0x02
+#define PRO_READ_SHORT_DATA 0x03
+#define PRO_READ_REG 0x04
+#define PRO_READ_QUAD_DATA 0x05
+#define PRO_GET_INT 0x07
+#define PRO_WRITE_LONG_DATA 0x0D
+#define PRO_WRITE_SHORT_DATA 0x0C
+#define PRO_WRITE_QUAD_DATA 0x0A
+#define PRO_WRITE_REG 0x0B
+#define PRO_SET_RW_REG_ADRS 0x08
+#define PRO_SET_CMD 0x0E
+#define PRO_EX_SET_CMD 0x09
+
+#ifdef SUPPORT_MAGIC_GATE
+
+#define MG_GET_ID 0x40
+#define MG_SET_LID 0x41
+#define MG_GET_LEKB 0x42
+#define MG_SET_RD 0x43
+#define MG_MAKE_RMS 0x44
+#define MG_MAKE_KSE 0x45
+#define MG_SET_IBD 0x46
+#define MG_GET_IBD 0x47
+
+#endif
+
+#ifdef XC_POWERCLASS
+#define XC_CHG_POWER 0x16
+#endif
+
+#define BLOCK_READ 0xAA
+#define BLOCK_WRITE 0x55
+#define BLOCK_END 0x33
+#define BLOCK_ERASE 0x99
+#define FLASH_STOP 0xCC
+
+#define SLEEP 0x5A
+#define CLEAR_BUF 0xC3
+#define MS_RESET 0x3C
+
+#define PRO_READ_DATA 0x20
+#define PRO_WRITE_DATA 0x21
+#define PRO_READ_ATRB 0x24
+#define PRO_STOP 0x25
+#define PRO_ERASE 0x26
+#define PRO_READ_2K_DATA 0x27
+#define PRO_WRITE_2K_DATA 0x28
+
+#define PRO_FORMAT 0x10
+#define PRO_SLEEP 0x11
+
+#define IntReg 0x01
+#define StatusReg0 0x02
+#define StatusReg1 0x03
+
+#define SystemParm 0x10
+#define BlockAdrs 0x11
+#define CMDParm 0x14
+#define PageAdrs 0x15
+
+#define OverwriteFlag 0x16
+#define ManagemenFlag 0x17
+#define LogicalAdrs 0x18
+#define ReserveArea 0x1A
+
+#define Pro_IntReg 0x01
+#define Pro_StatusReg 0x02
+#define Pro_TypeReg 0x04
+#define Pro_IFModeReg 0x05
+#define Pro_CatagoryReg 0x06
+#define Pro_ClassReg 0x07
+
+
+#define Pro_SystemParm 0x10
+#define Pro_DataCount1 0x11
+#define Pro_DataCount0 0x12
+#define Pro_DataAddr3 0x13
+#define Pro_DataAddr2 0x14
+#define Pro_DataAddr1 0x15
+#define Pro_DataAddr0 0x16
+
+#define Pro_TPCParm 0x17
+#define Pro_CMDParm 0x18
+
+#define INT_REG_CED 0x80
+#define INT_REG_ERR 0x40
+#define INT_REG_BREQ 0x20
+#define INT_REG_CMDNK 0x01
+
+#define BLOCK_BOOT 0xC0
+#define BLOCK_OK 0x80
+#define PAGE_OK 0x60
+#define DATA_COMPL 0x10
+
+#define NOT_BOOT_BLOCK 0x4
+#define NOT_TRANSLATION_TABLE 0x8
+
+#define HEADER_ID0 PPBUF_BASE2
+#define HEADER_ID1 (PPBUF_BASE2 + 1)
+#define DISABLED_BLOCK0 (PPBUF_BASE2 + 0x170 + 4)
+#define DISABLED_BLOCK1 (PPBUF_BASE2 + 0x170 + 5)
+#define DISABLED_BLOCK2 (PPBUF_BASE2 + 0x170 + 6)
+#define DISABLED_BLOCK3 (PPBUF_BASE2 + 0x170 + 7)
+#define BLOCK_SIZE_0 (PPBUF_BASE2 + 0x1a0 + 2)
+#define BLOCK_SIZE_1 (PPBUF_BASE2 + 0x1a0 + 3)
+#define BLOCK_COUNT_0 (PPBUF_BASE2 + 0x1a0 + 4)
+#define BLOCK_COUNT_1 (PPBUF_BASE2 + 0x1a0 + 5)
+#define EBLOCK_COUNT_0 (PPBUF_BASE2 + 0x1a0 + 6)
+#define EBLOCK_COUNT_1 (PPBUF_BASE2 + 0x1a0 + 7)
+#define PAGE_SIZE_0 (PPBUF_BASE2 + 0x1a0 + 8)
+#define PAGE_SIZE_1 (PPBUF_BASE2 + 0x1a0 + 9)
+
+#define MS_Device_Type (PPBUF_BASE2 + 0x1D8)
+
+#define MS_4bit_Support (PPBUF_BASE2 + 0x1D3)
+
+#define setPS_NG 1
+#define setPS_Error 0
+
+#define PARALLEL_8BIT_IF 0x40
+#define PARALLEL_4BIT_IF 0x00
+#define SERIAL_IF 0x80
+
+#define BUF_FULL 0x10
+#define BUF_EMPTY 0x20
+
+#define MEDIA_BUSY 0x80
+#define FLASH_BUSY 0x40
+#define DATA_ERROR 0x20
+#define STS_UCDT 0x10
+#define EXTRA_ERROR 0x08
+#define STS_UCEX 0x04
+#define FLAG_ERROR 0x02
+#define STS_UCFG 0x01
+
+#define MS_SHORT_DATA_LEN 32
+
+#define FORMAT_SUCCESS 0
+#define FORMAT_FAIL 1
+#define FORMAT_IN_PROGRESS 2
+
+#define MS_SET_BAD_BLOCK_FLG(ms_card) ((ms_card)->multi_flag |= 0x80)
+#define MS_CLR_BAD_BLOCK_FLG(ms_card) ((ms_card)->multi_flag &= 0x7F)
+#define MS_TST_BAD_BLOCK_FLG(ms_card) ((ms_card)->multi_flag & 0x80)
+
+void mspro_polling_format_status(struct rtsx_chip *chip);
+
+void mspro_stop_seq_mode(struct rtsx_chip *chip);
+int reset_ms_card(struct rtsx_chip *chip);
+int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ u32 start_sector, u16 sector_cnt);
+int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ int short_data_len, int quick_format);
+void ms_free_l2p_tbl(struct rtsx_chip *chip);
+void ms_cleanup_work(struct rtsx_chip *chip);
+int ms_power_off_card3v3(struct rtsx_chip *chip);
+int release_ms_card(struct rtsx_chip *chip);
+#ifdef MS_DELAY_WRITE
+int ms_delay_write(struct rtsx_chip *chip);
+#endif
+
+#ifdef SUPPORT_MAGIC_GATE
+int mg_set_leaf_id(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int mg_get_local_EKB(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int mg_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int mg_get_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+#endif
+
+#endif /* __REALTEK_RTSX_MS_H */
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
new file mode 100644
index 000000000000..8586ac5d2144
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx.c
@@ -0,0 +1,1071 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+
+#include "rtsx.h"
+#include "rtsx_chip.h"
+#include "rtsx_transport.h"
+#include "rtsx_scsi.h"
+#include "rtsx_card.h"
+#include "general.h"
+
+#include "ms.h"
+#include "sd.h"
+#include "xd.h"
+
+MODULE_DESCRIPTION("Realtek PCI-Express card reader rts5208/rts5288 driver");
+MODULE_LICENSE("GPL");
+
+static unsigned int delay_use = 1;
+module_param(delay_use, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device");
+
+static int ss_en;
+module_param(ss_en, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ss_en, "enable selective suspend");
+
+static int ss_interval = 50;
+module_param(ss_interval, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ss_interval, "Interval to enter ss state in seconds");
+
+static int auto_delink_en;
+module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
+
+static unsigned char aspm_l0s_l1_en;
+module_param(aspm_l0s_l1_en, byte, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(aspm_l0s_l1_en, "enable device aspm");
+
+static int msi_en;
+module_param(msi_en, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(msi_en, "enable msi");
+
+static irqreturn_t rtsx_interrupt(int irq, void *dev_id);
+
+/***********************************************************************
+ * Host functions
+ ***********************************************************************/
+
+static const char *host_info(struct Scsi_Host *host)
+{
+ return "SCSI emulation for PCI-Express Mass Storage devices";
+}
+
+static int slave_alloc(struct scsi_device *sdev)
+{
+ /*
+ * Set the INQUIRY transfer length to 36. We don't use any of
+ * the extra data and many devices choke if asked for more or
+ * less than 36 bytes.
+ */
+ sdev->inquiry_len = 36;
+ return 0;
+}
+
+static int slave_configure(struct scsi_device *sdev)
+{
+ /* Scatter-gather buffers (all but the last) must have a length
+ * divisible by the bulk maxpacket size. Otherwise a data packet
+ * would end up being short, causing a premature end to the data
+ * transfer. Since high-speed bulk pipes have a maxpacket size
+ * of 512, we'll use that as the scsi device queue's DMA alignment
+ * mask. Guaranteeing proper alignment of the first buffer will
+ * have the desired effect because, except at the beginning and
+ * the end, scatter-gather buffers follow page boundaries. */
+ blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
+
+ /* Set the SCSI level to at least 2. We'll leave it at 3 if that's
+ * what is originally reported. We need this to avoid confusing
+ * the SCSI layer with devices that report 0 or 1, but need 10-byte
+ * commands (ala ATAPI devices behind certain bridges, or devices
+ * which simply have broken INQUIRY data).
+ *
+ * NOTE: This means /dev/sg programs (ala cdrecord) will get the
+ * actual information. This seems to be the preference for
+ * programs like that.
+ *
+ * NOTE: This also means that /proc/scsi/scsi and sysfs may report
+ * the actual value or the modified one, depending on where the
+ * data comes from.
+ */
+ if (sdev->scsi_level < SCSI_2)
+ sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2;
+
+ return 0;
+}
+
+
+/***********************************************************************
+ * /proc/scsi/ functions
+ ***********************************************************************/
+
+/* we use this macro to help us write into the buffer */
+#undef SPRINTF
+#define SPRINTF(args...) \
+ do { if (pos < buffer+length) pos += sprintf(pos, ## args); } while (0)
+
+/* queue a command */
+/* This is always called with scsi_lock(host) held */
+static int queuecommand_lck(struct scsi_cmnd *srb,
+ void (*done)(struct scsi_cmnd *))
+{
+ struct rtsx_dev *dev = host_to_rtsx(srb->device->host);
+ struct rtsx_chip *chip = dev->chip;
+
+ /* check for state-transition errors */
+ if (chip->srb != NULL) {
+ dev_err(&dev->pci->dev, "Error in %s: chip->srb = %p\n",
+ __func__, chip->srb);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ /* fail the command if we are disconnecting */
+ if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
+ dev_info(&dev->pci->dev, "Fail command during disconnect\n");
+ srb->result = DID_NO_CONNECT << 16;
+ done(srb);
+ return 0;
+ }
+
+ /* enqueue the command and wake up the control thread */
+ srb->scsi_done = done;
+ chip->srb = srb;
+ complete(&dev->cmnd_ready);
+
+ return 0;
+}
+
+static DEF_SCSI_QCMD(queuecommand)
+
+/***********************************************************************
+ * Error handling functions
+ ***********************************************************************/
+
+/* Command timeout and abort */
+static int command_abort(struct scsi_cmnd *srb)
+{
+ struct Scsi_Host *host = srb->device->host;
+ struct rtsx_dev *dev = host_to_rtsx(host);
+ struct rtsx_chip *chip = dev->chip;
+
+ dev_info(&dev->pci->dev, "%s called\n", __func__);
+
+ scsi_lock(host);
+
+ /* Is this command still active? */
+ if (chip->srb != srb) {
+ scsi_unlock(host);
+ dev_info(&dev->pci->dev, "-- nothing to abort\n");
+ return FAILED;
+ }
+
+ rtsx_set_stat(chip, RTSX_STAT_ABORT);
+
+ scsi_unlock(host);
+
+ /* Wait for the aborted command to finish */
+ wait_for_completion(&dev->notify);
+
+ return SUCCESS;
+}
+
+/* This invokes the transport reset mechanism to reset the state of the
+ * device */
+static int device_reset(struct scsi_cmnd *srb)
+{
+ int result = 0;
+ struct rtsx_dev *dev = host_to_rtsx(srb->device->host);
+
+ dev_info(&dev->pci->dev, "%s called\n", __func__);
+
+ return result < 0 ? FAILED : SUCCESS;
+}
+
+/* Simulate a SCSI bus reset by resetting the device's USB port. */
+static int bus_reset(struct scsi_cmnd *srb)
+{
+ int result = 0;
+ struct rtsx_dev *dev = host_to_rtsx(srb->device->host);
+
+ dev_info(&dev->pci->dev, "%s called\n", __func__);
+
+ return result < 0 ? FAILED : SUCCESS;
+}
+
+
+/*
+ * this defines our host template, with which we'll allocate hosts
+ */
+
+static struct scsi_host_template rtsx_host_template = {
+ /* basic userland interface stuff */
+ .name = CR_DRIVER_NAME,
+ .proc_name = CR_DRIVER_NAME,
+ .info = host_info,
+
+ /* command interface -- queued only */
+ .queuecommand = queuecommand,
+
+ /* error and abort handlers */
+ .eh_abort_handler = command_abort,
+ .eh_device_reset_handler = device_reset,
+ .eh_bus_reset_handler = bus_reset,
+
+ /* queue commands only, only one command per LUN */
+ .can_queue = 1,
+ .cmd_per_lun = 1,
+
+ /* unknown initiator id */
+ .this_id = -1,
+
+ .slave_alloc = slave_alloc,
+ .slave_configure = slave_configure,
+
+ /* lots of sg segments can be handled */
+ .sg_tablesize = SG_ALL,
+
+ /* limit the total size of a transfer to 120 KB */
+ .max_sectors = 240,
+
+ /* merge commands... this seems to help performance, but
+ * periodically someone should test to see which setting is more
+ * optimal.
+ */
+ .use_clustering = 1,
+
+ /* emulated HBA */
+ .emulated = 1,
+
+ /* we do our own delay after a device or bus reset */
+ .skip_settle_delay = 1,
+
+ /* module management */
+ .module = THIS_MODULE
+};
+
+
+static int rtsx_acquire_irq(struct rtsx_dev *dev)
+{
+ struct rtsx_chip *chip = dev->chip;
+
+ dev_info(&dev->pci->dev, "%s: chip->msi_en = %d, pci->irq = %d\n",
+ __func__, chip->msi_en, dev->pci->irq);
+
+ if (request_irq(dev->pci->irq, rtsx_interrupt,
+ chip->msi_en ? 0 : IRQF_SHARED,
+ CR_DRIVER_NAME, dev)) {
+ dev_err(&dev->pci->dev,
+ "rtsx: unable to grab IRQ %d, disabling device\n",
+ dev->pci->irq);
+ return -1;
+ }
+
+ dev->irq = dev->pci->irq;
+ pci_intx(dev->pci, !chip->msi_en);
+
+ return 0;
+}
+
+
+int rtsx_read_pci_cfg_byte(u8 bus, u8 dev, u8 func, u8 offset, u8 *val)
+{
+ struct pci_dev *pdev;
+ u8 data;
+ u8 devfn = (dev << 3) | func;
+
+ pdev = pci_get_bus_and_slot(bus, devfn);
+ if (!pdev)
+ return -1;
+
+ pci_read_config_byte(pdev, offset, &data);
+ if (val)
+ *val = data;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/*
+ * power management
+ */
+static int rtsx_suspend(struct pci_dev *pci, pm_message_t state)
+{
+ struct rtsx_dev *dev = (struct rtsx_dev *)pci_get_drvdata(pci);
+ struct rtsx_chip *chip;
+
+ if (!dev)
+ return 0;
+
+ /* lock the device pointers */
+ mutex_lock(&(dev->dev_mutex));
+
+ chip = dev->chip;
+
+ rtsx_do_before_power_down(chip, PM_S3);
+
+ if (dev->irq >= 0) {
+ synchronize_irq(dev->irq);
+ free_irq(dev->irq, (void *)dev);
+ dev->irq = -1;
+ }
+
+ if (chip->msi_en)
+ pci_disable_msi(pci);
+
+ pci_save_state(pci);
+ pci_enable_wake(pci, pci_choose_state(pci, state), 1);
+ pci_disable_device(pci);
+ pci_set_power_state(pci, pci_choose_state(pci, state));
+
+ /* unlock the device pointers */
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+}
+
+static int rtsx_resume(struct pci_dev *pci)
+{
+ struct rtsx_dev *dev = (struct rtsx_dev *)pci_get_drvdata(pci);
+ struct rtsx_chip *chip;
+
+ if (!dev)
+ return 0;
+
+ chip = dev->chip;
+
+ /* lock the device pointers */
+ mutex_lock(&(dev->dev_mutex));
+
+ pci_set_power_state(pci, PCI_D0);
+ pci_restore_state(pci);
+ if (pci_enable_device(pci) < 0) {
+ dev_err(&dev->pci->dev,
+ "%s: pci_enable_device failed, disabling device\n",
+ CR_DRIVER_NAME);
+ /* unlock the device pointers */
+ mutex_unlock(&dev->dev_mutex);
+ return -EIO;
+ }
+ pci_set_master(pci);
+
+ if (chip->msi_en) {
+ if (pci_enable_msi(pci) < 0)
+ chip->msi_en = 0;
+ }
+
+ if (rtsx_acquire_irq(dev) < 0) {
+ /* unlock the device pointers */
+ mutex_unlock(&dev->dev_mutex);
+ return -EIO;
+ }
+
+ rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03, 0x00);
+ rtsx_init_chip(chip);
+
+ /* unlock the device pointers */
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static void rtsx_shutdown(struct pci_dev *pci)
+{
+ struct rtsx_dev *dev = (struct rtsx_dev *)pci_get_drvdata(pci);
+ struct rtsx_chip *chip;
+
+ if (!dev)
+ return;
+
+ chip = dev->chip;
+
+ rtsx_do_before_power_down(chip, PM_S1);
+
+ if (dev->irq >= 0) {
+ synchronize_irq(dev->irq);
+ free_irq(dev->irq, (void *)dev);
+ dev->irq = -1;
+ }
+
+ if (chip->msi_en)
+ pci_disable_msi(pci);
+
+ pci_disable_device(pci);
+
+ return;
+}
+
+static int rtsx_control_thread(void *__dev)
+{
+ struct rtsx_dev *dev = (struct rtsx_dev *)__dev;
+ struct rtsx_chip *chip = dev->chip;
+ struct Scsi_Host *host = rtsx_to_host(dev);
+
+ for (;;) {
+ if (wait_for_completion_interruptible(&dev->cmnd_ready))
+ break;
+
+ /* lock the device pointers */
+ mutex_lock(&(dev->dev_mutex));
+
+ /* if the device has disconnected, we are free to exit */
+ if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
+ dev_info(&dev->pci->dev, "-- rtsx-control exiting\n");
+ mutex_unlock(&dev->dev_mutex);
+ break;
+ }
+
+ /* lock access to the state */
+ scsi_lock(host);
+
+ /* has the command aborted ? */
+ if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
+ chip->srb->result = DID_ABORT << 16;
+ goto SkipForAbort;
+ }
+
+ scsi_unlock(host);
+
+ /* reject the command if the direction indicator
+ * is UNKNOWN
+ */
+ if (chip->srb->sc_data_direction == DMA_BIDIRECTIONAL) {
+ dev_err(&dev->pci->dev, "UNKNOWN data direction\n");
+ chip->srb->result = DID_ERROR << 16;
+ }
+
+ /* reject if target != 0 or if LUN is higher than
+ * the maximum known LUN
+ */
+ else if (chip->srb->device->id) {
+ dev_err(&dev->pci->dev, "Bad target number (%d:%d)\n",
+ chip->srb->device->id,
+ chip->srb->device->lun);
+ chip->srb->result = DID_BAD_TARGET << 16;
+ }
+
+ else if (chip->srb->device->lun > chip->max_lun) {
+ dev_err(&dev->pci->dev, "Bad LUN (%d:%d)\n",
+ chip->srb->device->id,
+ chip->srb->device->lun);
+ chip->srb->result = DID_BAD_TARGET << 16;
+ }
+
+ /* we've got a command, let's do it! */
+ else {
+ RTSX_DEBUG(scsi_show_command(chip->srb));
+ rtsx_invoke_transport(chip->srb, chip);
+ }
+
+ /* lock access to the state */
+ scsi_lock(host);
+
+ /* did the command already complete because of a disconnect? */
+ if (!chip->srb)
+ ; /* nothing to do */
+
+ /* indicate that the command is done */
+ else if (chip->srb->result != DID_ABORT << 16) {
+ chip->srb->scsi_done(chip->srb);
+ } else {
+SkipForAbort:
+ dev_err(&dev->pci->dev, "scsi command aborted\n");
+ }
+
+ if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
+ complete(&(dev->notify));
+
+ rtsx_set_stat(chip, RTSX_STAT_IDLE);
+ }
+
+ /* finished working on this command */
+ chip->srb = NULL;
+ scsi_unlock(host);
+
+ /* unlock the device pointers */
+ mutex_unlock(&dev->dev_mutex);
+ } /* for (;;) */
+
+ /* notify the exit routine that we're actually exiting now
+ *
+ * complete()/wait_for_completion() is similar to up()/down(),
+ * except that complete() is safe in the case where the structure
+ * is getting deleted in a parallel mode of execution (i.e. just
+ * after the down() -- that's necessary for the thread-shutdown
+ * case.
+ *
+ * complete_and_exit() goes even further than this -- it is safe in
+ * the case that the thread of the caller is going away (not just
+ * the structure) -- this is necessary for the module-remove case.
+ * This is important in preemption kernels, which transfer the flow
+ * of execution immediately upon a complete().
+ */
+ complete_and_exit(&dev->control_exit, 0);
+}
+
+
+static int rtsx_polling_thread(void *__dev)
+{
+ struct rtsx_dev *dev = (struct rtsx_dev *)__dev;
+ struct rtsx_chip *chip = dev->chip;
+ struct sd_info *sd_card = &(chip->sd_card);
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ sd_card->cleanup_counter = 0;
+ xd_card->cleanup_counter = 0;
+ ms_card->cleanup_counter = 0;
+
+ /* Wait until SCSI scan finished */
+ wait_timeout((delay_use + 5) * 1000);
+
+ for (;;) {
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(POLLING_INTERVAL);
+
+ /* lock the device pointers */
+ mutex_lock(&(dev->dev_mutex));
+
+ /* if the device has disconnected, we are free to exit */
+ if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
+ dev_info(&dev->pci->dev, "-- rtsx-polling exiting\n");
+ mutex_unlock(&dev->dev_mutex);
+ break;
+ }
+
+ mutex_unlock(&dev->dev_mutex);
+
+ mspro_polling_format_status(chip);
+
+ /* lock the device pointers */
+ mutex_lock(&(dev->dev_mutex));
+
+ rtsx_polling_func(chip);
+
+ /* unlock the device pointers */
+ mutex_unlock(&dev->dev_mutex);
+ }
+
+ complete_and_exit(&dev->polling_exit, 0);
+}
+
+/*
+ * interrupt handler
+ */
+static irqreturn_t rtsx_interrupt(int irq, void *dev_id)
+{
+ struct rtsx_dev *dev = dev_id;
+ struct rtsx_chip *chip;
+ int retval;
+ u32 status;
+
+ if (dev)
+ chip = dev->chip;
+ else
+ return IRQ_NONE;
+
+ if (!chip)
+ return IRQ_NONE;
+
+ spin_lock(&dev->reg_lock);
+
+ retval = rtsx_pre_handle_interrupt(chip);
+ if (retval == STATUS_FAIL) {
+ spin_unlock(&dev->reg_lock);
+ if (chip->int_reg == 0xFFFFFFFF)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+ }
+
+ status = chip->int_reg;
+
+ if (dev->check_card_cd) {
+ if (!(dev->check_card_cd & status)) {
+ /* card not exist, return TRANS_RESULT_FAIL */
+ dev->trans_result = TRANS_RESULT_FAIL;
+ if (dev->done)
+ complete(dev->done);
+ goto Exit;
+ }
+ }
+
+ if (status & (NEED_COMPLETE_INT | DELINK_INT)) {
+ if (status & (TRANS_FAIL_INT | DELINK_INT)) {
+ if (status & DELINK_INT)
+ RTSX_SET_DELINK(chip);
+ dev->trans_result = TRANS_RESULT_FAIL;
+ if (dev->done)
+ complete(dev->done);
+ } else if (status & TRANS_OK_INT) {
+ dev->trans_result = TRANS_RESULT_OK;
+ if (dev->done)
+ complete(dev->done);
+ } else if (status & DATA_DONE_INT) {
+ dev->trans_result = TRANS_NOT_READY;
+ if (dev->done && (dev->trans_state == STATE_TRANS_SG))
+ complete(dev->done);
+ }
+ }
+
+Exit:
+ spin_unlock(&dev->reg_lock);
+ return IRQ_HANDLED;
+}
+
+
+/* Release all our dynamic resources */
+static void rtsx_release_resources(struct rtsx_dev *dev)
+{
+ dev_info(&dev->pci->dev, "-- %s\n", __func__);
+
+ /* Tell the control thread to exit. The SCSI host must
+ * already have been removed so it won't try to queue
+ * any more commands.
+ */
+ dev_info(&dev->pci->dev, "-- sending exit command to thread\n");
+ complete(&dev->cmnd_ready);
+ if (dev->ctl_thread)
+ wait_for_completion(&dev->control_exit);
+ if (dev->polling_thread)
+ wait_for_completion(&dev->polling_exit);
+
+ wait_timeout(200);
+
+ if (dev->rtsx_resv_buf) {
+ dma_free_coherent(&(dev->pci->dev), RTSX_RESV_BUF_LEN,
+ dev->rtsx_resv_buf, dev->rtsx_resv_buf_addr);
+ dev->chip->host_cmds_ptr = NULL;
+ dev->chip->host_sg_tbl_ptr = NULL;
+ }
+
+ if (dev->irq > 0)
+ free_irq(dev->irq, (void *)dev);
+ if (dev->chip->msi_en)
+ pci_disable_msi(dev->pci);
+ if (dev->remap_addr)
+ iounmap(dev->remap_addr);
+
+ pci_disable_device(dev->pci);
+ pci_release_regions(dev->pci);
+
+ rtsx_release_chip(dev->chip);
+ kfree(dev->chip);
+}
+
+/* First stage of disconnect processing: stop all commands and remove
+ * the host */
+static void quiesce_and_remove_host(struct rtsx_dev *dev)
+{
+ struct Scsi_Host *host = rtsx_to_host(dev);
+ struct rtsx_chip *chip = dev->chip;
+
+ /* Prevent new transfers, stop the current command, and
+ * interrupt a SCSI-scan or device-reset delay */
+ mutex_lock(&dev->dev_mutex);
+ scsi_lock(host);
+ rtsx_set_stat(chip, RTSX_STAT_DISCONNECT);
+ scsi_unlock(host);
+ mutex_unlock(&dev->dev_mutex);
+ wake_up(&dev->delay_wait);
+ wait_for_completion(&dev->scanning_done);
+
+ /* Wait some time to let other threads exist */
+ wait_timeout(100);
+
+ /* queuecommand won't accept any new commands and the control
+ * thread won't execute a previously-queued command. If there
+ * is such a command pending, complete it with an error. */
+ mutex_lock(&dev->dev_mutex);
+ if (chip->srb) {
+ chip->srb->result = DID_NO_CONNECT << 16;
+ scsi_lock(host);
+ chip->srb->scsi_done(dev->chip->srb);
+ chip->srb = NULL;
+ scsi_unlock(host);
+ }
+ mutex_unlock(&dev->dev_mutex);
+
+ /* Now we own no commands so it's safe to remove the SCSI host */
+ scsi_remove_host(host);
+}
+
+/* Second stage of disconnect processing: deallocate all resources */
+static void release_everything(struct rtsx_dev *dev)
+{
+ rtsx_release_resources(dev);
+
+ /* Drop our reference to the host; the SCSI core will free it
+ * when the refcount becomes 0. */
+ scsi_host_put(rtsx_to_host(dev));
+}
+
+/* Thread to carry out delayed SCSI-device scanning */
+static int rtsx_scan_thread(void *__dev)
+{
+ struct rtsx_dev *dev = (struct rtsx_dev *)__dev;
+ struct rtsx_chip *chip = dev->chip;
+
+ /* Wait for the timeout to expire or for a disconnect */
+ if (delay_use > 0) {
+ dev_info(&dev->pci->dev,
+ "%s: waiting for device to settle before scanning\n",
+ CR_DRIVER_NAME);
+ wait_event_interruptible_timeout(dev->delay_wait,
+ rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT),
+ delay_use * HZ);
+ }
+
+ /* If the device is still connected, perform the scanning */
+ if (!rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
+ scsi_scan_host(rtsx_to_host(dev));
+ dev_info(&dev->pci->dev, "%s: device scan complete\n",
+ CR_DRIVER_NAME);
+
+ /* Should we unbind if no devices were detected? */
+ }
+
+ complete_and_exit(&dev->scanning_done, 0);
+}
+
+static void rtsx_init_options(struct rtsx_chip *chip)
+{
+ chip->vendor_id = chip->rtsx->pci->vendor;
+ chip->product_id = chip->rtsx->pci->device;
+ chip->adma_mode = 1;
+ chip->lun_mc = 0;
+ chip->driver_first_load = 1;
+#ifdef HW_AUTO_SWITCH_SD_BUS
+ chip->sdio_in_charge = 0;
+#endif
+
+ chip->mspro_formatter_enable = 1;
+ chip->ignore_sd = 0;
+ chip->use_hw_setting = 0;
+ chip->lun_mode = DEFAULT_SINGLE;
+ chip->auto_delink_en = auto_delink_en;
+ chip->ss_en = ss_en;
+ chip->ss_idle_period = ss_interval * 1000;
+ chip->remote_wakeup_en = 0;
+ chip->aspm_l0s_l1_en = aspm_l0s_l1_en;
+ chip->dynamic_aspm = 1;
+ chip->fpga_sd_sdr104_clk = CLK_200;
+ chip->fpga_sd_ddr50_clk = CLK_100;
+ chip->fpga_sd_sdr50_clk = CLK_100;
+ chip->fpga_sd_hs_clk = CLK_100;
+ chip->fpga_mmc_52m_clk = CLK_80;
+ chip->fpga_ms_hg_clk = CLK_80;
+ chip->fpga_ms_4bit_clk = CLK_80;
+ chip->fpga_ms_1bit_clk = CLK_40;
+ chip->asic_sd_sdr104_clk = 203;
+ chip->asic_sd_sdr50_clk = 98;
+ chip->asic_sd_ddr50_clk = 98;
+ chip->asic_sd_hs_clk = 98;
+ chip->asic_mmc_52m_clk = 98;
+ chip->asic_ms_hg_clk = 117;
+ chip->asic_ms_4bit_clk = 78;
+ chip->asic_ms_1bit_clk = 39;
+ chip->ssc_depth_sd_sdr104 = SSC_DEPTH_2M;
+ chip->ssc_depth_sd_sdr50 = SSC_DEPTH_2M;
+ chip->ssc_depth_sd_ddr50 = SSC_DEPTH_1M;
+ chip->ssc_depth_sd_hs = SSC_DEPTH_1M;
+ chip->ssc_depth_mmc_52m = SSC_DEPTH_1M;
+ chip->ssc_depth_ms_hg = SSC_DEPTH_1M;
+ chip->ssc_depth_ms_4bit = SSC_DEPTH_512K;
+ chip->ssc_depth_low_speed = SSC_DEPTH_512K;
+ chip->ssc_en = 1;
+ chip->sd_speed_prior = 0x01040203;
+ chip->sd_current_prior = 0x00010203;
+ chip->sd_ctl = SD_PUSH_POINT_AUTO |
+ SD_SAMPLE_POINT_AUTO |
+ SUPPORT_MMC_DDR_MODE;
+ chip->sd_ddr_tx_phase = 0;
+ chip->mmc_ddr_tx_phase = 1;
+ chip->sd_default_tx_phase = 15;
+ chip->sd_default_rx_phase = 15;
+ chip->pmos_pwr_on_interval = 200;
+ chip->sd_voltage_switch_delay = 1000;
+ chip->ms_power_class_en = 3;
+
+ chip->sd_400mA_ocp_thd = 1;
+ chip->sd_800mA_ocp_thd = 5;
+ chip->ms_ocp_thd = 2;
+
+ chip->card_drive_sel = 0x55;
+ chip->sd30_drive_sel_1v8 = 0x03;
+ chip->sd30_drive_sel_3v3 = 0x01;
+
+ chip->do_delink_before_power_down = 1;
+ chip->auto_power_down = 1;
+ chip->polling_config = 0;
+
+ chip->force_clkreq_0 = 1;
+ chip->ft2_fast_mode = 0;
+
+ chip->sdio_retry_cnt = 1;
+
+ chip->xd_timeout = 2000;
+ chip->sd_timeout = 10000;
+ chip->ms_timeout = 2000;
+ chip->mspro_timeout = 15000;
+
+ chip->power_down_in_ss = 1;
+
+ chip->sdr104_en = 1;
+ chip->sdr50_en = 1;
+ chip->ddr50_en = 1;
+
+ chip->delink_stage1_step = 100;
+ chip->delink_stage2_step = 40;
+ chip->delink_stage3_step = 20;
+
+ chip->auto_delink_in_L1 = 1;
+ chip->blink_led = 1;
+ chip->msi_en = msi_en;
+ chip->hp_watch_bios_hotplug = 0;
+ chip->max_payload = 0;
+ chip->phy_voltage = 0;
+
+ chip->support_ms_8bit = 1;
+ chip->s3_pwr_off_delay = 1000;
+}
+
+static int rtsx_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ struct Scsi_Host *host;
+ struct rtsx_dev *dev;
+ int err = 0;
+ struct task_struct *th;
+
+ RTSX_DEBUGP("Realtek PCI-E card reader detected\n");
+
+ err = pci_enable_device(pci);
+ if (err < 0) {
+ dev_err(&pci->dev, "PCI enable device failed!\n");
+ return err;
+ }
+
+ err = pci_request_regions(pci, CR_DRIVER_NAME);
+ if (err < 0) {
+ dev_err(&pci->dev, "PCI request regions for %s failed!\n",
+ CR_DRIVER_NAME);
+ pci_disable_device(pci);
+ return err;
+ }
+
+ /*
+ * Ask the SCSI layer to allocate a host structure, with extra
+ * space at the end for our private rtsx_dev structure.
+ */
+ host = scsi_host_alloc(&rtsx_host_template, sizeof(*dev));
+ if (!host) {
+ dev_err(&pci->dev, "Unable to allocate the scsi host\n");
+ pci_release_regions(pci);
+ pci_disable_device(pci);
+ return -ENOMEM;
+ }
+
+ dev = host_to_rtsx(host);
+ memset(dev, 0, sizeof(struct rtsx_dev));
+
+ dev->chip = kzalloc(sizeof(struct rtsx_chip), GFP_KERNEL);
+ if (dev->chip == NULL) {
+ err = -ENOMEM;
+ goto errout;
+ }
+
+ spin_lock_init(&dev->reg_lock);
+ mutex_init(&(dev->dev_mutex));
+ init_completion(&dev->cmnd_ready);
+ init_completion(&dev->control_exit);
+ init_completion(&dev->polling_exit);
+ init_completion(&(dev->notify));
+ init_completion(&dev->scanning_done);
+ init_waitqueue_head(&dev->delay_wait);
+
+ dev->pci = pci;
+ dev->irq = -1;
+
+ dev_info(&pci->dev, "Resource length: 0x%x\n",
+ (unsigned int)pci_resource_len(pci, 0));
+ dev->addr = pci_resource_start(pci, 0);
+ dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci, 0));
+ if (dev->remap_addr == NULL) {
+ dev_err(&pci->dev, "ioremap error\n");
+ err = -ENXIO;
+ goto errout;
+ }
+
+ /*
+ * Using "unsigned long" cast here to eliminate gcc warning in
+ * 64-bit system
+ */
+ dev_info(&pci->dev, "Original address: 0x%lx, remapped address: 0x%lx\n",
+ (unsigned long)(dev->addr), (unsigned long)(dev->remap_addr));
+
+ dev->rtsx_resv_buf = dma_alloc_coherent(&(pci->dev), RTSX_RESV_BUF_LEN,
+ &(dev->rtsx_resv_buf_addr), GFP_KERNEL);
+ if (dev->rtsx_resv_buf == NULL) {
+ dev_err(&pci->dev, "alloc dma buffer fail\n");
+ err = -ENXIO;
+ goto errout;
+ }
+ dev->chip->host_cmds_ptr = dev->rtsx_resv_buf;
+ dev->chip->host_cmds_addr = dev->rtsx_resv_buf_addr;
+ dev->chip->host_sg_tbl_ptr = dev->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
+ dev->chip->host_sg_tbl_addr = dev->rtsx_resv_buf_addr +
+ HOST_CMDS_BUF_LEN;
+
+ dev->chip->rtsx = dev;
+
+ rtsx_init_options(dev->chip);
+
+ dev_info(&pci->dev, "pci->irq = %d\n", pci->irq);
+
+ if (dev->chip->msi_en) {
+ if (pci_enable_msi(pci) < 0)
+ dev->chip->msi_en = 0;
+ }
+
+ if (rtsx_acquire_irq(dev) < 0) {
+ err = -EBUSY;
+ goto errout;
+ }
+
+ pci_set_master(pci);
+ synchronize_irq(dev->irq);
+
+ rtsx_init_chip(dev->chip);
+
+ /* set the supported max_lun and max_id for the scsi host
+ * NOTE: the minimal value of max_id is 1 */
+ host->max_id = 1;
+ host->max_lun = dev->chip->max_lun;
+
+ /* Start up our control thread */
+ th = kthread_run(rtsx_control_thread, dev, CR_DRIVER_NAME);
+ if (IS_ERR(th)) {
+ dev_err(&pci->dev, "Unable to start control thread\n");
+ err = PTR_ERR(th);
+ goto errout;
+ }
+ dev->ctl_thread = th;
+
+ err = scsi_add_host(host, &pci->dev);
+ if (err) {
+ dev_err(&pci->dev, "Unable to add the scsi host\n");
+ goto errout;
+ }
+
+ /* Start up the thread for delayed SCSI-device scanning */
+ th = kthread_run(rtsx_scan_thread, dev, "rtsx-scan");
+ if (IS_ERR(th)) {
+ dev_err(&pci->dev, "Unable to start the device-scanning thread\n");
+ complete(&dev->scanning_done);
+ quiesce_and_remove_host(dev);
+ err = PTR_ERR(th);
+ goto errout;
+ }
+
+ /* Start up the thread for polling thread */
+ th = kthread_run(rtsx_polling_thread, dev, "rtsx-polling");
+ if (IS_ERR(th)) {
+ dev_err(&pci->dev, "Unable to start the device-polling thread\n");
+ quiesce_and_remove_host(dev);
+ err = PTR_ERR(th);
+ goto errout;
+ }
+ dev->polling_thread = th;
+
+ pci_set_drvdata(pci, dev);
+
+ return 0;
+
+ /* We come here if there are any problems */
+errout:
+ dev_err(&pci->dev, "rtsx_probe() failed\n");
+ release_everything(dev);
+
+ return err;
+}
+
+
+static void rtsx_remove(struct pci_dev *pci)
+{
+ struct rtsx_dev *dev = (struct rtsx_dev *)pci_get_drvdata(pci);
+
+ dev_info(&pci->dev, "rtsx_remove() called\n");
+
+ quiesce_and_remove_host(dev);
+ release_everything(dev);
+
+ pci_set_drvdata(pci, NULL);
+}
+
+/* PCI IDs */
+static DEFINE_PCI_DEVICE_TABLE(rtsx_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5208), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5288), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { 0, },
+};
+
+MODULE_DEVICE_TABLE(pci, rtsx_ids);
+
+/* pci_driver definition */
+static struct pci_driver driver = {
+ .name = CR_DRIVER_NAME,
+ .id_table = rtsx_ids,
+ .probe = rtsx_probe,
+ .remove = rtsx_remove,
+#ifdef CONFIG_PM
+ .suspend = rtsx_suspend,
+ .resume = rtsx_resume,
+#endif
+ .shutdown = rtsx_shutdown,
+};
+
+static int __init rtsx_init(void)
+{
+ pr_info("Initializing Realtek PCIE storage driver...\n");
+
+ return pci_register_driver(&driver);
+}
+
+static void __exit rtsx_exit(void)
+{
+ pr_info("rtsx_exit() called\n");
+
+ pci_unregister_driver(&driver);
+
+ pr_info("%s module exit\n", CR_DRIVER_NAME);
+}
+
+module_init(rtsx_init)
+module_exit(rtsx_exit)
diff --git a/drivers/staging/rts5208/rtsx.h b/drivers/staging/rts5208/rtsx.h
new file mode 100644
index 000000000000..37eab56ee02e
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx.h
@@ -0,0 +1,185 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_H
+#define __REALTEK_RTSX_H
+
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/mutex.h>
+#include <linux/cdrom.h>
+#include <linux/workqueue.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_devinfo.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+
+#include "debug.h"
+#include "trace.h"
+#include "general.h"
+
+#define CR_DRIVER_NAME "rts5208"
+
+#define pci_get_bus_and_slot(bus, devfn) \
+ pci_get_domain_bus_and_slot(0, (bus), (devfn))
+
+/*
+ * macros for easy use
+ */
+#define rtsx_writel(chip, reg, value) \
+ iowrite32(value, (chip)->rtsx->remap_addr + reg)
+#define rtsx_readl(chip, reg) \
+ ioread32((chip)->rtsx->remap_addr + reg)
+#define rtsx_writew(chip, reg, value) \
+ iowrite16(value, (chip)->rtsx->remap_addr + reg)
+#define rtsx_readw(chip, reg) \
+ ioread16((chip)->rtsx->remap_addr + reg)
+#define rtsx_writeb(chip, reg, value) \
+ iowrite8(value, (chip)->rtsx->remap_addr + reg)
+#define rtsx_readb(chip, reg) \
+ ioread8((chip)->rtsx->remap_addr + reg)
+
+#define rtsx_read_config_byte(chip, where, val) \
+ pci_read_config_byte((chip)->rtsx->pci, where, val)
+
+#define rtsx_write_config_byte(chip, where, val) \
+ pci_write_config_byte((chip)->rtsx->pci, where, val)
+
+#define wait_timeout_x(task_state, msecs) \
+do { \
+ set_current_state((task_state)); \
+ schedule_timeout((msecs) * HZ / 1000); \
+} while (0)
+#define wait_timeout(msecs) wait_timeout_x(TASK_INTERRUPTIBLE, (msecs))
+
+
+#define STATE_TRANS_NONE 0
+#define STATE_TRANS_CMD 1
+#define STATE_TRANS_BUF 2
+#define STATE_TRANS_SG 3
+
+#define TRANS_NOT_READY 0
+#define TRANS_RESULT_OK 1
+#define TRANS_RESULT_FAIL 2
+
+#define SCSI_LUN(srb) ((srb)->device->lun)
+
+typedef unsigned long DELAY_PARA_T;
+
+struct rtsx_chip;
+
+struct rtsx_dev {
+ struct pci_dev *pci;
+
+ /* pci resources */
+ unsigned long addr;
+ void __iomem *remap_addr;
+ int irq;
+
+ /* locks */
+ spinlock_t reg_lock;
+
+ struct task_struct *ctl_thread; /* the control thread */
+ struct task_struct *polling_thread; /* the polling thread */
+
+ /* mutual exclusion and synchronization structures */
+ struct completion cmnd_ready; /* to sleep thread on */
+ struct completion control_exit; /* control thread exit */
+ struct completion polling_exit; /* polling thread exit */
+ struct completion notify; /* thread begin/end */
+ struct completion scanning_done; /* wait for scan thread */
+
+ wait_queue_head_t delay_wait; /* wait during scan, reset */
+ struct mutex dev_mutex;
+
+ /* host reserved buffer */
+ void *rtsx_resv_buf;
+ dma_addr_t rtsx_resv_buf_addr;
+
+ char trans_result;
+ char trans_state;
+
+ struct completion *done;
+ /* Whether interrupt handler should care card cd info */
+ u32 check_card_cd;
+
+ struct rtsx_chip *chip;
+};
+
+typedef struct rtsx_dev rtsx_dev_t;
+
+/* Convert between rtsx_dev and the corresponding Scsi_Host */
+static inline struct Scsi_Host *rtsx_to_host(struct rtsx_dev *dev)
+{
+ return container_of((void *) dev, struct Scsi_Host, hostdata);
+}
+static inline struct rtsx_dev *host_to_rtsx(struct Scsi_Host *host)
+{
+ return (struct rtsx_dev *) host->hostdata;
+}
+
+static inline void get_current_time(u8 *timeval_buf, int buf_len)
+{
+ struct timeval tv;
+
+ if (!timeval_buf || (buf_len < 8))
+ return;
+
+ do_gettimeofday(&tv);
+
+ timeval_buf[0] = (u8)(tv.tv_sec >> 24);
+ timeval_buf[1] = (u8)(tv.tv_sec >> 16);
+ timeval_buf[2] = (u8)(tv.tv_sec >> 8);
+ timeval_buf[3] = (u8)(tv.tv_sec);
+ timeval_buf[4] = (u8)(tv.tv_usec >> 24);
+ timeval_buf[5] = (u8)(tv.tv_usec >> 16);
+ timeval_buf[6] = (u8)(tv.tv_usec >> 8);
+ timeval_buf[7] = (u8)(tv.tv_usec);
+}
+
+/* The scsi_lock() and scsi_unlock() macros protect the sm_state and the
+ * single queue element srb for write access */
+#define scsi_unlock(host) spin_unlock_irq(host->host_lock)
+#define scsi_lock(host) spin_lock_irq(host->host_lock)
+
+#define lock_state(chip) spin_lock_irq(&((chip)->rtsx->reg_lock))
+#define unlock_state(chip) spin_unlock_irq(&((chip)->rtsx->reg_lock))
+
+/* struct scsi_cmnd transfer buffer access utilities */
+enum xfer_buf_dir {TO_XFER_BUF, FROM_XFER_BUF};
+
+int rtsx_read_pci_cfg_byte(u8 bus, u8 dev, u8 func, u8 offset, u8 *val);
+
+#endif /* __REALTEK_RTSX_H */
diff --git a/drivers/staging/rts5208/rtsx_card.c b/drivers/staging/rts5208/rtsx_card.c
new file mode 100644
index 000000000000..3055eb10c076
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx_card.c
@@ -0,0 +1,1126 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+
+#include "rtsx.h"
+#include "rtsx_transport.h"
+#include "rtsx_scsi.h"
+#include "rtsx_card.h"
+
+#include "rtsx_sys.h"
+#include "general.h"
+
+#include "sd.h"
+#include "xd.h"
+#include "ms.h"
+
+void do_remaining_work(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+#ifdef XD_DELAY_WRITE
+ struct xd_info *xd_card = &(chip->xd_card);
+#endif
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ if (chip->card_ready & SD_CARD) {
+ if (sd_card->seq_mode) {
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ sd_card->cleanup_counter++;
+ } else {
+ sd_card->cleanup_counter = 0;
+ }
+ }
+
+#ifdef XD_DELAY_WRITE
+ if (chip->card_ready & XD_CARD) {
+ if (xd_card->delay_write.delay_write_flag) {
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ xd_card->cleanup_counter++;
+ } else {
+ xd_card->cleanup_counter = 0;
+ }
+ }
+#endif
+
+ if (chip->card_ready & MS_CARD) {
+ if (CHK_MSPRO(ms_card)) {
+ if (ms_card->seq_mode) {
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ ms_card->cleanup_counter++;
+ } else {
+ ms_card->cleanup_counter = 0;
+ }
+ } else {
+#ifdef MS_DELAY_WRITE
+ if (ms_card->delay_write.delay_write_flag) {
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ ms_card->cleanup_counter++;
+ } else {
+ ms_card->cleanup_counter = 0;
+ }
+#endif
+ }
+ }
+
+ if (sd_card->cleanup_counter > POLLING_WAIT_CNT)
+ sd_cleanup_work(chip);
+
+ if (xd_card->cleanup_counter > POLLING_WAIT_CNT)
+ xd_cleanup_work(chip);
+
+ if (ms_card->cleanup_counter > POLLING_WAIT_CNT)
+ ms_cleanup_work(chip);
+}
+
+void try_to_switch_sdio_ctrl(struct rtsx_chip *chip)
+{
+ u8 reg1 = 0, reg2 = 0;
+
+ rtsx_read_register(chip, 0xFF34, &reg1);
+ rtsx_read_register(chip, 0xFF38, &reg2);
+ RTSX_DEBUGP("reg 0xFF34: 0x%x, reg 0xFF38: 0x%x\n", reg1, reg2);
+ if ((reg1 & 0xC0) && (reg2 & 0xC0)) {
+ chip->sd_int = 1;
+ rtsx_write_register(chip, SDIO_CTRL, 0xFF, SDIO_BUS_CTRL | SDIO_CD_CTRL);
+ rtsx_write_register(chip, PWR_GATE_CTRL, LDO3318_PWR_MASK, LDO_ON);
+ }
+}
+
+#ifdef SUPPORT_SDIO_ASPM
+void dynamic_configure_sdio_aspm(struct rtsx_chip *chip)
+{
+ u8 buf[12], reg;
+ int i;
+
+ for (i = 0; i < 12; i++)
+ rtsx_read_register(chip, 0xFF08 + i, &buf[i]);
+ rtsx_read_register(chip, 0xFF25, &reg);
+ if ((memcmp(buf, chip->sdio_raw_data, 12) != 0) || (reg & 0x03)) {
+ chip->sdio_counter = 0;
+ chip->sdio_idle = 0;
+ } else {
+ if (!chip->sdio_idle) {
+ chip->sdio_counter++;
+ if (chip->sdio_counter >= SDIO_IDLE_COUNT) {
+ chip->sdio_counter = 0;
+ chip->sdio_idle = 1;
+ }
+ }
+ }
+ memcpy(chip->sdio_raw_data, buf, 12);
+
+ if (chip->sdio_idle) {
+ if (!chip->sdio_aspm) {
+ RTSX_DEBUGP("SDIO enter ASPM!\n");
+ rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFC,
+ 0x30 | (chip->aspm_level[1] << 2));
+ chip->sdio_aspm = 1;
+ }
+ } else {
+ if (chip->sdio_aspm) {
+ RTSX_DEBUGP("SDIO exit ASPM!\n");
+ rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFC, 0x30);
+ chip->sdio_aspm = 0;
+ }
+ }
+}
+#endif
+
+void do_reset_sd_card(struct rtsx_chip *chip)
+{
+ int retval;
+
+ RTSX_DEBUGP("%s: %d, card2lun = 0x%x\n", __func__,
+ chip->sd_reset_counter, chip->card2lun[SD_CARD]);
+
+ if (chip->card2lun[SD_CARD] >= MAX_ALLOWED_LUN_CNT) {
+ clear_bit(SD_NR, &(chip->need_reset));
+ chip->sd_reset_counter = 0;
+ chip->sd_show_cnt = 0;
+ return;
+ }
+
+ chip->rw_fail_cnt[chip->card2lun[SD_CARD]] = 0;
+
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ rtsx_write_register(chip, SDIO_CTRL, 0xFF, 0);
+
+ retval = reset_sd_card(chip);
+ if (chip->need_release & SD_CARD)
+ return;
+ if (retval == STATUS_SUCCESS) {
+ clear_bit(SD_NR, &(chip->need_reset));
+ chip->sd_reset_counter = 0;
+ chip->sd_show_cnt = 0;
+ chip->card_ready |= SD_CARD;
+ chip->card_fail &= ~SD_CARD;
+ chip->rw_card[chip->card2lun[SD_CARD]] = sd_rw;
+ } else {
+ if (chip->sd_io || (chip->sd_reset_counter >= MAX_RESET_CNT)) {
+ clear_bit(SD_NR, &(chip->need_reset));
+ chip->sd_reset_counter = 0;
+ chip->sd_show_cnt = 0;
+ } else {
+ chip->sd_reset_counter++;
+ }
+ chip->card_ready &= ~SD_CARD;
+ chip->card_fail |= SD_CARD;
+ chip->capacity[chip->card2lun[SD_CARD]] = 0;
+ chip->rw_card[chip->card2lun[SD_CARD]] = NULL;
+
+ rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN, 0);
+ if (!chip->ft2_fast_mode)
+ card_power_off(chip, SD_CARD);
+ if (chip->sd_io) {
+ chip->sd_int = 0;
+ try_to_switch_sdio_ctrl(chip);
+ } else {
+ disable_card_clock(chip, SD_CARD);
+ }
+ }
+}
+
+void do_reset_xd_card(struct rtsx_chip *chip)
+{
+ int retval;
+
+ RTSX_DEBUGP("%s: %d, card2lun = 0x%x\n", __func__,
+ chip->xd_reset_counter, chip->card2lun[XD_CARD]);
+
+ if (chip->card2lun[XD_CARD] >= MAX_ALLOWED_LUN_CNT) {
+ clear_bit(XD_NR, &(chip->need_reset));
+ chip->xd_reset_counter = 0;
+ chip->xd_show_cnt = 0;
+ return;
+ }
+
+ chip->rw_fail_cnt[chip->card2lun[XD_CARD]] = 0;
+
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ rtsx_write_register(chip, SDIO_CTRL, 0xFF, 0);
+
+ retval = reset_xd_card(chip);
+ if (chip->need_release & XD_CARD)
+ return;
+ if (retval == STATUS_SUCCESS) {
+ clear_bit(XD_NR, &(chip->need_reset));
+ chip->xd_reset_counter = 0;
+ chip->card_ready |= XD_CARD;
+ chip->card_fail &= ~XD_CARD;
+ chip->rw_card[chip->card2lun[XD_CARD]] = xd_rw;
+ } else {
+ if (chip->xd_reset_counter >= MAX_RESET_CNT) {
+ clear_bit(XD_NR, &(chip->need_reset));
+ chip->xd_reset_counter = 0;
+ chip->xd_show_cnt = 0;
+ } else {
+ chip->xd_reset_counter++;
+ }
+ chip->card_ready &= ~XD_CARD;
+ chip->card_fail |= XD_CARD;
+ chip->capacity[chip->card2lun[XD_CARD]] = 0;
+ chip->rw_card[chip->card2lun[XD_CARD]] = NULL;
+
+ rtsx_write_register(chip, CARD_OE, XD_OUTPUT_EN, 0);
+ if (!chip->ft2_fast_mode)
+ card_power_off(chip, XD_CARD);
+ disable_card_clock(chip, XD_CARD);
+ }
+}
+
+void do_reset_ms_card(struct rtsx_chip *chip)
+{
+ int retval;
+
+ RTSX_DEBUGP("%s: %d, card2lun = 0x%x\n", __func__,
+ chip->ms_reset_counter, chip->card2lun[MS_CARD]);
+
+ if (chip->card2lun[MS_CARD] >= MAX_ALLOWED_LUN_CNT) {
+ clear_bit(MS_NR, &(chip->need_reset));
+ chip->ms_reset_counter = 0;
+ chip->ms_show_cnt = 0;
+ return;
+ }
+
+ chip->rw_fail_cnt[chip->card2lun[MS_CARD]] = 0;
+
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ rtsx_write_register(chip, SDIO_CTRL, 0xFF, 0);
+
+ retval = reset_ms_card(chip);
+ if (chip->need_release & MS_CARD)
+ return;
+ if (retval == STATUS_SUCCESS) {
+ clear_bit(MS_NR, &(chip->need_reset));
+ chip->ms_reset_counter = 0;
+ chip->card_ready |= MS_CARD;
+ chip->card_fail &= ~MS_CARD;
+ chip->rw_card[chip->card2lun[MS_CARD]] = ms_rw;
+ } else {
+ if (chip->ms_reset_counter >= MAX_RESET_CNT) {
+ clear_bit(MS_NR, &(chip->need_reset));
+ chip->ms_reset_counter = 0;
+ chip->ms_show_cnt = 0;
+ } else {
+ chip->ms_reset_counter++;
+ }
+ chip->card_ready &= ~MS_CARD;
+ chip->card_fail |= MS_CARD;
+ chip->capacity[chip->card2lun[MS_CARD]] = 0;
+ chip->rw_card[chip->card2lun[MS_CARD]] = NULL;
+
+ rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN, 0);
+ if (!chip->ft2_fast_mode)
+ card_power_off(chip, MS_CARD);
+ disable_card_clock(chip, MS_CARD);
+ }
+}
+
+static void release_sdio(struct rtsx_chip *chip)
+{
+ if (chip->sd_io) {
+ rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ if (chip->chip_insert_with_sdio) {
+ chip->chip_insert_with_sdio = 0;
+
+ if (CHECK_PID(chip, 0x5288))
+ rtsx_write_register(chip, 0xFE5A, 0x08, 0x00);
+ else
+ rtsx_write_register(chip, 0xFE70, 0x80, 0x00);
+ }
+
+ rtsx_write_register(chip, SDIO_CTRL, SDIO_CD_CTRL, 0);
+ chip->sd_io = 0;
+ }
+}
+
+void rtsx_power_off_card(struct rtsx_chip *chip)
+{
+ if ((chip->card_ready & SD_CARD) || chip->sd_io) {
+ sd_cleanup_work(chip);
+ sd_power_off_card3v3(chip);
+ }
+
+ if (chip->card_ready & XD_CARD) {
+ xd_cleanup_work(chip);
+ xd_power_off_card3v3(chip);
+ }
+
+ if (chip->card_ready & MS_CARD) {
+ ms_cleanup_work(chip);
+ ms_power_off_card3v3(chip);
+ }
+}
+
+void rtsx_release_cards(struct rtsx_chip *chip)
+{
+ chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
+
+ if ((chip->card_ready & SD_CARD) || chip->sd_io) {
+ if (chip->int_reg & SD_EXIST)
+ sd_cleanup_work(chip);
+ release_sd_card(chip);
+ }
+
+ if (chip->card_ready & XD_CARD) {
+ if (chip->int_reg & XD_EXIST)
+ xd_cleanup_work(chip);
+ release_xd_card(chip);
+ }
+
+ if (chip->card_ready & MS_CARD) {
+ if (chip->int_reg & MS_EXIST)
+ ms_cleanup_work(chip);
+ release_ms_card(chip);
+ }
+}
+
+void rtsx_reset_cards(struct rtsx_chip *chip)
+{
+ if (!chip->need_reset)
+ return;
+
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ rtsx_force_power_on(chip, SSC_PDCTL | OC_PDCTL);
+
+ rtsx_disable_aspm(chip);
+
+ if ((chip->need_reset & SD_CARD) && chip->chip_insert_with_sdio)
+ clear_bit(SD_NR, &(chip->need_reset));
+
+ if (chip->need_reset & XD_CARD) {
+ chip->card_exist |= XD_CARD;
+
+ if (chip->xd_show_cnt >= MAX_SHOW_CNT)
+ do_reset_xd_card(chip);
+ else
+ chip->xd_show_cnt++;
+ }
+ if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN)) {
+ if (chip->card_exist & XD_CARD) {
+ clear_bit(SD_NR, &(chip->need_reset));
+ clear_bit(MS_NR, &(chip->need_reset));
+ }
+ }
+ if (chip->need_reset & SD_CARD) {
+ chip->card_exist |= SD_CARD;
+
+ if (chip->sd_show_cnt >= MAX_SHOW_CNT) {
+ rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
+ do_reset_sd_card(chip);
+ } else {
+ chip->sd_show_cnt++;
+ }
+ }
+ if (chip->need_reset & MS_CARD) {
+ chip->card_exist |= MS_CARD;
+
+ if (chip->ms_show_cnt >= MAX_SHOW_CNT)
+ do_reset_ms_card(chip);
+ else
+ chip->ms_show_cnt++;
+ }
+}
+
+void rtsx_reinit_cards(struct rtsx_chip *chip, int reset_chip)
+{
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ rtsx_force_power_on(chip, SSC_PDCTL | OC_PDCTL);
+
+ if (reset_chip)
+ rtsx_reset_chip(chip);
+
+ chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
+
+ if ((chip->int_reg & SD_EXIST) && (chip->need_reinit & SD_CARD)) {
+ release_sdio(chip);
+ release_sd_card(chip);
+
+ wait_timeout(100);
+
+ chip->card_exist |= SD_CARD;
+ do_reset_sd_card(chip);
+ }
+
+ if ((chip->int_reg & XD_EXIST) && (chip->need_reinit & XD_CARD)) {
+ release_xd_card(chip);
+
+ wait_timeout(100);
+
+ chip->card_exist |= XD_CARD;
+ do_reset_xd_card(chip);
+ }
+
+ if ((chip->int_reg & MS_EXIST) && (chip->need_reinit & MS_CARD)) {
+ release_ms_card(chip);
+
+ wait_timeout(100);
+
+ chip->card_exist |= MS_CARD;
+ do_reset_ms_card(chip);
+ }
+
+ chip->need_reinit = 0;
+}
+
+#ifdef DISABLE_CARD_INT
+void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset, unsigned long *need_release)
+{
+ u8 release_map = 0, reset_map = 0;
+
+ chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
+
+ if (chip->card_exist) {
+ if (chip->card_exist & XD_CARD) {
+ if (!(chip->int_reg & XD_EXIST))
+ release_map |= XD_CARD;
+ } else if (chip->card_exist & SD_CARD) {
+ if (!(chip->int_reg & SD_EXIST))
+ release_map |= SD_CARD;
+ } else if (chip->card_exist & MS_CARD) {
+ if (!(chip->int_reg & MS_EXIST))
+ release_map |= MS_CARD;
+ }
+ } else {
+ if (chip->int_reg & XD_EXIST)
+ reset_map |= XD_CARD;
+ else if (chip->int_reg & SD_EXIST)
+ reset_map |= SD_CARD;
+ else if (chip->int_reg & MS_EXIST)
+ reset_map |= MS_CARD;
+ }
+
+ if (reset_map) {
+ int xd_cnt = 0, sd_cnt = 0, ms_cnt = 0;
+ int i;
+
+ for (i = 0; i < (DEBOUNCE_CNT); i++) {
+ chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
+
+ if (chip->int_reg & XD_EXIST)
+ xd_cnt++;
+ else
+ xd_cnt = 0;
+
+ if (chip->int_reg & SD_EXIST)
+ sd_cnt++;
+ else
+ sd_cnt = 0;
+
+ if (chip->int_reg & MS_EXIST)
+ ms_cnt++;
+ else
+ ms_cnt = 0;
+
+ wait_timeout(30);
+ }
+
+ reset_map = 0;
+ if (!(chip->card_exist & XD_CARD) && (xd_cnt > (DEBOUNCE_CNT-1)))
+ reset_map |= XD_CARD;
+ if (!(chip->card_exist & SD_CARD) && (sd_cnt > (DEBOUNCE_CNT-1)))
+ reset_map |= SD_CARD;
+ if (!(chip->card_exist & MS_CARD) && (ms_cnt > (DEBOUNCE_CNT-1)))
+ reset_map |= MS_CARD;
+ }
+
+ if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN))
+ rtsx_write_register(chip, HOST_SLEEP_STATE, 0xC0, 0x00);
+
+ if (need_reset)
+ *need_reset = reset_map;
+ if (need_release)
+ *need_release = release_map;
+}
+#endif
+
+void rtsx_init_cards(struct rtsx_chip *chip)
+{
+ if (RTSX_TST_DELINK(chip) && (rtsx_get_stat(chip) != RTSX_STAT_SS)) {
+ RTSX_DEBUGP("Reset chip in polling thread!\n");
+ rtsx_reset_chip(chip);
+ RTSX_CLR_DELINK(chip);
+ }
+
+#ifdef DISABLE_CARD_INT
+ card_cd_debounce(chip, &(chip->need_reset), &(chip->need_release));
+#endif
+
+ if (chip->need_release) {
+ if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN)) {
+ if (chip->int_reg & XD_EXIST) {
+ clear_bit(SD_NR, &(chip->need_release));
+ clear_bit(MS_NR, &(chip->need_release));
+ }
+ }
+
+ if (!(chip->card_exist & SD_CARD) && !chip->sd_io)
+ clear_bit(SD_NR, &(chip->need_release));
+ if (!(chip->card_exist & XD_CARD))
+ clear_bit(XD_NR, &(chip->need_release));
+ if (!(chip->card_exist & MS_CARD))
+ clear_bit(MS_NR, &(chip->need_release));
+
+ RTSX_DEBUGP("chip->need_release = 0x%x\n", (unsigned int)(chip->need_release));
+
+#ifdef SUPPORT_OCP
+ if (chip->need_release) {
+ if (chip->ocp_stat & (CARD_OC_NOW | CARD_OC_EVER))
+ rtsx_write_register(chip, OCPCLR,
+ CARD_OC_INT_CLR | CARD_OC_CLR,
+ CARD_OC_INT_CLR | CARD_OC_CLR);
+ chip->ocp_stat = 0;
+ }
+#endif
+ if (chip->need_release) {
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ rtsx_force_power_on(chip, SSC_PDCTL | OC_PDCTL);
+ }
+
+ if (chip->need_release & SD_CARD) {
+ clear_bit(SD_NR, &(chip->need_release));
+ chip->card_exist &= ~SD_CARD;
+ chip->card_ejected &= ~SD_CARD;
+ chip->card_fail &= ~SD_CARD;
+ CLR_BIT(chip->lun_mc, chip->card2lun[SD_CARD]);
+ chip->rw_fail_cnt[chip->card2lun[SD_CARD]] = 0;
+ rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
+
+ release_sdio(chip);
+ release_sd_card(chip);
+ }
+
+ if (chip->need_release & XD_CARD) {
+ clear_bit(XD_NR, &(chip->need_release));
+ chip->card_exist &= ~XD_CARD;
+ chip->card_ejected &= ~XD_CARD;
+ chip->card_fail &= ~XD_CARD;
+ CLR_BIT(chip->lun_mc, chip->card2lun[XD_CARD]);
+ chip->rw_fail_cnt[chip->card2lun[XD_CARD]] = 0;
+
+ release_xd_card(chip);
+
+ if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN))
+ rtsx_write_register(chip, HOST_SLEEP_STATE, 0xC0, 0xC0);
+ }
+
+ if (chip->need_release & MS_CARD) {
+ clear_bit(MS_NR, &(chip->need_release));
+ chip->card_exist &= ~MS_CARD;
+ chip->card_ejected &= ~MS_CARD;
+ chip->card_fail &= ~MS_CARD;
+ CLR_BIT(chip->lun_mc, chip->card2lun[MS_CARD]);
+ chip->rw_fail_cnt[chip->card2lun[MS_CARD]] = 0;
+
+ release_ms_card(chip);
+ }
+
+ RTSX_DEBUGP("chip->card_exist = 0x%x\n", chip->card_exist);
+
+ if (!chip->card_exist)
+ turn_off_led(chip, LED_GPIO);
+ }
+
+ if (chip->need_reset) {
+ RTSX_DEBUGP("chip->need_reset = 0x%x\n", (unsigned int)(chip->need_reset));
+
+ rtsx_reset_cards(chip);
+ }
+
+ if (chip->need_reinit) {
+ RTSX_DEBUGP("chip->need_reinit = 0x%x\n", (unsigned int)(chip->need_reinit));
+
+ rtsx_reinit_cards(chip, 0);
+ }
+}
+
+static inline u8 double_depth(u8 depth)
+{
+ return ((depth > 1) ? (depth - 1) : depth);
+}
+
+int switch_ssc_clock(struct rtsx_chip *chip, int clk)
+{
+ int retval;
+ u8 N = (u8)(clk - 2), min_N, max_N;
+ u8 mcu_cnt, div, max_div, ssc_depth, ssc_depth_mask;
+ int sd_vpclk_phase_reset = 0;
+
+ if (chip->cur_clk == clk)
+ return STATUS_SUCCESS;
+
+ min_N = 60;
+ max_N = 120;
+ max_div = CLK_DIV_4;
+
+ RTSX_DEBUGP("Switch SSC clock to %dMHz (cur_clk = %d)\n", clk, chip->cur_clk);
+
+ if ((clk <= 2) || (N > max_N))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ mcu_cnt = (u8)(125/clk + 3);
+ if (mcu_cnt > 7)
+ mcu_cnt = 7;
+
+ div = CLK_DIV_1;
+ while ((N < min_N) && (div < max_div)) {
+ N = (N + 2) * 2 - 2;
+ div++;
+ }
+ RTSX_DEBUGP("N = %d, div = %d\n", N, div);
+
+ if (chip->ssc_en) {
+ ssc_depth = 0x01;
+ N -= 2;
+ } else {
+ ssc_depth = 0;
+ }
+
+ ssc_depth_mask = 0x03;
+
+ RTSX_DEBUGP("ssc_depth = %d\n", ssc_depth);
+
+ rtsx_init_cmd(chip);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CLK_DIV, 0xFF, (div << 4) | mcu_cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL2, ssc_depth_mask, ssc_depth);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, N);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
+ if (sd_vpclk_phase_reset) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET);
+ }
+
+ retval = rtsx_send_cmd(chip, 0, WAIT_TIME);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ udelay(10);
+ RTSX_WRITE_REG(chip, CLK_CTL, CLK_LOW_FREQ, 0);
+
+ chip->cur_clk = clk;
+
+ return STATUS_SUCCESS;
+}
+
+int switch_normal_clock(struct rtsx_chip *chip, int clk)
+{
+ u8 sel, div, mcu_cnt;
+ int sd_vpclk_phase_reset = 0;
+
+ if (chip->cur_clk == clk)
+ return STATUS_SUCCESS;
+
+ switch (clk) {
+ case CLK_20:
+ RTSX_DEBUGP("Switch clock to 20MHz\n");
+ sel = SSC_80;
+ div = CLK_DIV_4;
+ mcu_cnt = 7;
+ break;
+
+ case CLK_30:
+ RTSX_DEBUGP("Switch clock to 30MHz\n");
+ sel = SSC_120;
+ div = CLK_DIV_4;
+ mcu_cnt = 7;
+ break;
+
+ case CLK_40:
+ RTSX_DEBUGP("Switch clock to 40MHz\n");
+ sel = SSC_80;
+ div = CLK_DIV_2;
+ mcu_cnt = 7;
+ break;
+
+ case CLK_50:
+ RTSX_DEBUGP("Switch clock to 50MHz\n");
+ sel = SSC_100;
+ div = CLK_DIV_2;
+ mcu_cnt = 6;
+ break;
+
+ case CLK_60:
+ RTSX_DEBUGP("Switch clock to 60MHz\n");
+ sel = SSC_120;
+ div = CLK_DIV_2;
+ mcu_cnt = 6;
+ break;
+
+ case CLK_80:
+ RTSX_DEBUGP("Switch clock to 80MHz\n");
+ sel = SSC_80;
+ div = CLK_DIV_1;
+ mcu_cnt = 5;
+ break;
+
+ case CLK_100:
+ RTSX_DEBUGP("Switch clock to 100MHz\n");
+ sel = SSC_100;
+ div = CLK_DIV_1;
+ mcu_cnt = 5;
+ break;
+
+ case CLK_120:
+ RTSX_DEBUGP("Switch clock to 120MHz\n");
+ sel = SSC_120;
+ div = CLK_DIV_1;
+ mcu_cnt = 5;
+ break;
+
+ case CLK_150:
+ RTSX_DEBUGP("Switch clock to 150MHz\n");
+ sel = SSC_150;
+ div = CLK_DIV_1;
+ mcu_cnt = 4;
+ break;
+
+ case CLK_200:
+ RTSX_DEBUGP("Switch clock to 200MHz\n");
+ sel = SSC_200;
+ div = CLK_DIV_1;
+ mcu_cnt = 4;
+ break;
+
+ default:
+ RTSX_DEBUGP("Try to switch to an illegal clock (%d)\n", clk);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_WRITE_REG(chip, CLK_CTL, 0xFF, CLK_LOW_FREQ);
+ if (sd_vpclk_phase_reset) {
+ RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
+ RTSX_WRITE_REG(chip, SD_VPCLK1_CTL, PHASE_NOT_RESET, 0);
+ }
+ RTSX_WRITE_REG(chip, CLK_DIV, 0xFF, (div << 4) | mcu_cnt);
+ RTSX_WRITE_REG(chip, CLK_SEL, 0xFF, sel);
+
+ if (sd_vpclk_phase_reset) {
+ udelay(200);
+ RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET);
+ RTSX_WRITE_REG(chip, SD_VPCLK1_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET);
+ udelay(200);
+ }
+ RTSX_WRITE_REG(chip, CLK_CTL, 0xFF, 0);
+
+ chip->cur_clk = clk;
+
+ return STATUS_SUCCESS;
+}
+
+void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip, u32 byte_cnt, u8 pack_size)
+{
+ if (pack_size > DMA_1024)
+ pack_size = DMA_512;
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, IRQSTAT0, DMA_DONE_INT, DMA_DONE_INT);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, DMATC3, 0xFF, (u8)(byte_cnt >> 24));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, DMATC2, 0xFF, (u8)(byte_cnt >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, DMATC1, 0xFF, (u8)(byte_cnt >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, DMATC0, 0xFF, (u8)byte_cnt);
+
+ if (dir == DMA_FROM_DEVICE) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL, 0x03 | DMA_PACK_SIZE_MASK,
+ DMA_DIR_FROM_CARD | DMA_EN | pack_size);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL, 0x03 | DMA_PACK_SIZE_MASK,
+ DMA_DIR_TO_CARD | DMA_EN | pack_size);
+ }
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
+}
+
+int enable_card_clock(struct rtsx_chip *chip, u8 card)
+{
+ u8 clk_en = 0;
+
+ if (card & XD_CARD)
+ clk_en |= XD_CLK_EN;
+ if (card & SD_CARD)
+ clk_en |= SD_CLK_EN;
+ if (card & MS_CARD)
+ clk_en |= MS_CLK_EN;
+
+ RTSX_WRITE_REG(chip, CARD_CLK_EN, clk_en, clk_en);
+
+ return STATUS_SUCCESS;
+}
+
+int disable_card_clock(struct rtsx_chip *chip, u8 card)
+{
+ u8 clk_en = 0;
+
+ if (card & XD_CARD)
+ clk_en |= XD_CLK_EN;
+ if (card & SD_CARD)
+ clk_en |= SD_CLK_EN;
+ if (card & MS_CARD)
+ clk_en |= MS_CLK_EN;
+
+ RTSX_WRITE_REG(chip, CARD_CLK_EN, clk_en, 0);
+
+ return STATUS_SUCCESS;
+}
+
+int card_power_on(struct rtsx_chip *chip, u8 card)
+{
+ int retval;
+ u8 mask, val1, val2;
+
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN) && (card == MS_CARD)) {
+ mask = MS_POWER_MASK;
+ val1 = MS_PARTIAL_POWER_ON;
+ val2 = MS_POWER_ON;
+ } else {
+ mask = SD_POWER_MASK;
+ val1 = SD_PARTIAL_POWER_ON;
+ val2 = SD_POWER_ON;
+ }
+
+ rtsx_init_cmd(chip);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, mask, val1);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ udelay(chip->pmos_pwr_on_interval);
+
+ rtsx_init_cmd(chip);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, mask, val2);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int card_power_off(struct rtsx_chip *chip, u8 card)
+{
+ u8 mask, val;
+
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN) && (card == MS_CARD)) {
+ mask = MS_POWER_MASK;
+ val = MS_POWER_OFF;
+ } else {
+ mask = SD_POWER_MASK;
+ val = SD_POWER_OFF;
+ }
+
+ RTSX_WRITE_REG(chip, CARD_PWR_CTL, mask, val);
+
+ return STATUS_SUCCESS;
+}
+
+int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 sec_addr, u16 sec_cnt)
+{
+ int retval;
+ unsigned int lun = SCSI_LUN(srb);
+ int i;
+
+ if (chip->rw_card[lun] == NULL)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ for (i = 0; i < 3; i++) {
+ chip->rw_need_retry = 0;
+
+ retval = chip->rw_card[lun](srb, chip, sec_addr, sec_cnt);
+ if (retval != STATUS_SUCCESS) {
+ if (rtsx_check_chip_exist(chip) != STATUS_SUCCESS) {
+ rtsx_release_chip(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (detect_card_cd(chip, chip->cur_card) != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!chip->rw_need_retry) {
+ RTSX_DEBUGP("RW fail, but no need to retry\n");
+ break;
+ }
+ } else {
+ chip->rw_need_retry = 0;
+ break;
+ }
+
+ RTSX_DEBUGP("Retry RW, (i = %d)\n", i);
+ }
+
+ return retval;
+}
+
+int card_share_mode(struct rtsx_chip *chip, int card)
+{
+ u8 mask, value;
+
+ if (CHECK_PID(chip, 0x5208)) {
+ mask = CARD_SHARE_MASK;
+ if (card == SD_CARD)
+ value = CARD_SHARE_48_SD;
+ else if (card == MS_CARD)
+ value = CARD_SHARE_48_MS;
+ else if (card == XD_CARD)
+ value = CARD_SHARE_48_XD;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+
+ } else if (CHECK_PID(chip, 0x5288)) {
+ mask = 0x03;
+ if (card == SD_CARD)
+ value = CARD_SHARE_BAROSSA_SD;
+ else if (card == MS_CARD)
+ value = CARD_SHARE_BAROSSA_MS;
+ else if (card == XD_CARD)
+ value = CARD_SHARE_BAROSSA_XD;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_WRITE_REG(chip, CARD_SHARE_MODE, mask, value);
+
+ return STATUS_SUCCESS;
+}
+
+
+int select_card(struct rtsx_chip *chip, int card)
+{
+ int retval;
+
+ if (chip->cur_card != card) {
+ u8 mod;
+
+ if (card == SD_CARD)
+ mod = SD_MOD_SEL;
+ else if (card == MS_CARD)
+ mod = MS_MOD_SEL;
+ else if (card == XD_CARD)
+ mod = XD_MOD_SEL;
+ else if (card == SPI_CARD)
+ mod = SPI_MOD_SEL;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, CARD_SELECT, 0x07, mod);
+ chip->cur_card = card;
+
+ retval = card_share_mode(chip, card);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+void toggle_gpio(struct rtsx_chip *chip, u8 gpio)
+{
+ u8 temp_reg;
+
+ rtsx_read_register(chip, CARD_GPIO, &temp_reg);
+ temp_reg ^= (0x01 << gpio);
+ rtsx_write_register(chip, CARD_GPIO, 0xFF, temp_reg);
+}
+
+void turn_on_led(struct rtsx_chip *chip, u8 gpio)
+{
+ if (CHECK_PID(chip, 0x5288))
+ rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), (u8)(1 << gpio));
+ else
+ rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0);
+}
+
+void turn_off_led(struct rtsx_chip *chip, u8 gpio)
+{
+ if (CHECK_PID(chip, 0x5288))
+ rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0);
+ else
+ rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), (u8)(1 << gpio));
+}
+
+int detect_card_cd(struct rtsx_chip *chip, int card)
+{
+ u32 card_cd, status;
+
+ if (card == SD_CARD) {
+ card_cd = SD_EXIST;
+ } else if (card == MS_CARD) {
+ card_cd = MS_EXIST;
+ } else if (card == XD_CARD) {
+ card_cd = XD_EXIST;
+ } else {
+ RTSX_DEBUGP("Wrong card type: 0x%x\n", card);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ status = rtsx_readl(chip, RTSX_BIPR);
+ if (!(status & card_cd))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int check_card_exist(struct rtsx_chip *chip, unsigned int lun)
+{
+ if (chip->card_exist & chip->lun2card[lun])
+ return 1;
+
+ return 0;
+}
+
+int check_card_ready(struct rtsx_chip *chip, unsigned int lun)
+{
+ if (chip->card_ready & chip->lun2card[lun])
+ return 1;
+
+ return 0;
+}
+
+int check_card_wp(struct rtsx_chip *chip, unsigned int lun)
+{
+ if (chip->card_wp & chip->lun2card[lun])
+ return 1;
+
+ return 0;
+}
+
+int check_card_fail(struct rtsx_chip *chip, unsigned int lun)
+{
+ if (chip->card_fail & chip->lun2card[lun])
+ return 1;
+
+ return 0;
+}
+
+int check_card_ejected(struct rtsx_chip *chip, unsigned int lun)
+{
+ if (chip->card_ejected & chip->lun2card[lun])
+ return 1;
+
+ return 0;
+}
+
+u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun)
+{
+ if ((chip->card_ready & chip->lun2card[lun]) == XD_CARD)
+ return (u8)XD_CARD;
+ else if ((chip->card_ready & chip->lun2card[lun]) == SD_CARD)
+ return (u8)SD_CARD;
+ else if ((chip->card_ready & chip->lun2card[lun]) == MS_CARD)
+ return (u8)MS_CARD;
+
+ return 0;
+}
+
+void eject_card(struct rtsx_chip *chip, unsigned int lun)
+{
+ do_remaining_work(chip);
+
+ if ((chip->card_ready & chip->lun2card[lun]) == SD_CARD) {
+ release_sd_card(chip);
+ chip->card_ejected |= SD_CARD;
+ chip->card_ready &= ~SD_CARD;
+ chip->capacity[lun] = 0;
+ } else if ((chip->card_ready & chip->lun2card[lun]) == XD_CARD) {
+ release_xd_card(chip);
+ chip->card_ejected |= XD_CARD;
+ chip->card_ready &= ~XD_CARD;
+ chip->capacity[lun] = 0;
+ } else if ((chip->card_ready & chip->lun2card[lun]) == MS_CARD) {
+ release_ms_card(chip);
+ chip->card_ejected |= MS_CARD;
+ chip->card_ready &= ~MS_CARD;
+ chip->capacity[lun] = 0;
+ }
+}
diff --git a/drivers/staging/rts5208/rtsx_card.h b/drivers/staging/rts5208/rtsx_card.h
new file mode 100644
index 000000000000..4528b619f6b3
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx_card.h
@@ -0,0 +1,1098 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_CARD_H
+#define __REALTEK_RTSX_CARD_H
+
+#include "debug.h"
+#include "rtsx.h"
+#include "rtsx_chip.h"
+#include "rtsx_transport.h"
+#include "sd.h"
+
+#define SSC_POWER_DOWN 0x01
+#define SD_OC_POWER_DOWN 0x02
+#define MS_OC_POWER_DOWN 0x04
+#define ALL_POWER_DOWN 0x07
+#define OC_POWER_DOWN 0x06
+
+#define PMOS_STRG_MASK 0x10
+#define PMOS_STRG_800mA 0x10
+#define PMOS_STRG_400mA 0x00
+
+#define POWER_OFF 0x03
+#define PARTIAL_POWER_ON 0x01
+#define POWER_ON 0x00
+
+#define MS_POWER_OFF 0x0C
+#define MS_PARTIAL_POWER_ON 0x04
+#define MS_POWER_ON 0x00
+#define MS_POWER_MASK 0x0C
+
+#define SD_POWER_OFF 0x03
+#define SD_PARTIAL_POWER_ON 0x01
+#define SD_POWER_ON 0x00
+#define SD_POWER_MASK 0x03
+
+#define XD_OUTPUT_EN 0x02
+#define SD_OUTPUT_EN 0x04
+#define MS_OUTPUT_EN 0x08
+#define SPI_OUTPUT_EN 0x10
+
+#define CLK_LOW_FREQ 0x01
+
+#define CLK_DIV_1 0x01
+#define CLK_DIV_2 0x02
+#define CLK_DIV_4 0x03
+#define CLK_DIV_8 0x04
+
+#define SSC_80 0
+#define SSC_100 1
+#define SSC_120 2
+#define SSC_150 3
+#define SSC_200 4
+
+#define XD_CLK_EN 0x02
+#define SD_CLK_EN 0x04
+#define MS_CLK_EN 0x08
+#define SPI_CLK_EN 0x10
+
+#define XD_MOD_SEL 1
+#define SD_MOD_SEL 2
+#define MS_MOD_SEL 3
+#define SPI_MOD_SEL 4
+
+#define CHANGE_CLK 0x01
+
+#define SD_CRC7_ERR 0x80
+#define SD_CRC16_ERR 0x40
+#define SD_CRC_WRITE_ERR 0x20
+#define SD_CRC_WRITE_ERR_MASK 0x1C
+#define GET_CRC_TIME_OUT 0x02
+#define SD_TUNING_COMPARE_ERR 0x01
+
+#define SD_RSP_80CLK_TIMEOUT 0x01
+
+#define SD_CLK_TOGGLE_EN 0x80
+#define SD_CLK_FORCE_STOP 0x40
+#define SD_DAT3_STATUS 0x10
+#define SD_DAT2_STATUS 0x08
+#define SD_DAT1_STATUS 0x04
+#define SD_DAT0_STATUS 0x02
+#define SD_CMD_STATUS 0x01
+
+#define SD_IO_USING_1V8 0x80
+#define SD_IO_USING_3V3 0x7F
+#define TYPE_A_DRIVING 0x00
+#define TYPE_B_DRIVING 0x01
+#define TYPE_C_DRIVING 0x02
+#define TYPE_D_DRIVING 0x03
+
+#define DDR_FIX_RX_DAT 0x00
+#define DDR_VAR_RX_DAT 0x80
+#define DDR_FIX_RX_DAT_EDGE 0x00
+#define DDR_FIX_RX_DAT_14_DELAY 0x40
+#define DDR_FIX_RX_CMD 0x00
+#define DDR_VAR_RX_CMD 0x20
+#define DDR_FIX_RX_CMD_POS_EDGE 0x00
+#define DDR_FIX_RX_CMD_14_DELAY 0x10
+#define SD20_RX_POS_EDGE 0x00
+#define SD20_RX_14_DELAY 0x08
+#define SD20_RX_SEL_MASK 0x08
+
+#define DDR_FIX_TX_CMD_DAT 0x00
+#define DDR_VAR_TX_CMD_DAT 0x80
+#define DDR_FIX_TX_DAT_14_TSU 0x00
+#define DDR_FIX_TX_DAT_12_TSU 0x40
+#define DDR_FIX_TX_CMD_NEG_EDGE 0x00
+#define DDR_FIX_TX_CMD_14_AHEAD 0x20
+#define SD20_TX_NEG_EDGE 0x00
+#define SD20_TX_14_AHEAD 0x10
+#define SD20_TX_SEL_MASK 0x10
+#define DDR_VAR_SDCLK_POL_SWAP 0x01
+
+#define SD_TRANSFER_START 0x80
+#define SD_TRANSFER_END 0x40
+#define SD_STAT_IDLE 0x20
+#define SD_TRANSFER_ERR 0x10
+#define SD_TM_NORMAL_WRITE 0x00
+#define SD_TM_AUTO_WRITE_3 0x01
+#define SD_TM_AUTO_WRITE_4 0x02
+#define SD_TM_AUTO_READ_3 0x05
+#define SD_TM_AUTO_READ_4 0x06
+#define SD_TM_CMD_RSP 0x08
+#define SD_TM_AUTO_WRITE_1 0x09
+#define SD_TM_AUTO_WRITE_2 0x0A
+#define SD_TM_NORMAL_READ 0x0C
+#define SD_TM_AUTO_READ_1 0x0D
+#define SD_TM_AUTO_READ_2 0x0E
+#define SD_TM_AUTO_TUNING 0x0F
+
+#define PHASE_CHANGE 0x80
+#define PHASE_NOT_RESET 0x40
+
+#define DCMPS_CHANGE 0x80
+#define DCMPS_CHANGE_DONE 0x40
+#define DCMPS_ERROR 0x20
+#define DCMPS_CURRENT_PHASE 0x1F
+
+#define SD_CLK_DIVIDE_0 0x00
+#define SD_CLK_DIVIDE_256 0xC0
+#define SD_CLK_DIVIDE_128 0x80
+#define SD_BUS_WIDTH_1 0x00
+#define SD_BUS_WIDTH_4 0x01
+#define SD_BUS_WIDTH_8 0x02
+#define SD_ASYNC_FIFO_NOT_RST 0x10
+#define SD_20_MODE 0x00
+#define SD_DDR_MODE 0x04
+#define SD_30_MODE 0x08
+
+#define SD_CLK_DIVIDE_MASK 0xC0
+
+#define SD_CMD_IDLE 0x80
+
+#define SD_DATA_IDLE 0x80
+
+#define DCM_RESET 0x08
+#define DCM_LOCKED 0x04
+#define DCM_208M 0x00
+#define DCM_TX 0x01
+#define DCM_RX 0x02
+
+#define DRP_START 0x80
+#define DRP_DONE 0x40
+
+#define DRP_WRITE 0x80
+#define DRP_READ 0x00
+#define DCM_WRITE_ADDRESS_50 0x50
+#define DCM_WRITE_ADDRESS_51 0x51
+#define DCM_READ_ADDRESS_00 0x00
+#define DCM_READ_ADDRESS_51 0x51
+
+#define SD_CALCULATE_CRC7 0x00
+#define SD_NO_CALCULATE_CRC7 0x80
+#define SD_CHECK_CRC16 0x00
+#define SD_NO_CHECK_CRC16 0x40
+#define SD_NO_CHECK_WAIT_CRC_TO 0x20
+#define SD_WAIT_BUSY_END 0x08
+#define SD_NO_WAIT_BUSY_END 0x00
+#define SD_CHECK_CRC7 0x00
+#define SD_NO_CHECK_CRC7 0x04
+#define SD_RSP_LEN_0 0x00
+#define SD_RSP_LEN_6 0x01
+#define SD_RSP_LEN_17 0x02
+#define SD_RSP_TYPE_R0 0x04
+#define SD_RSP_TYPE_R1 0x01
+#define SD_RSP_TYPE_R1b 0x09
+#define SD_RSP_TYPE_R2 0x02
+#define SD_RSP_TYPE_R3 0x05
+#define SD_RSP_TYPE_R4 0x05
+#define SD_RSP_TYPE_R5 0x01
+#define SD_RSP_TYPE_R6 0x01
+#define SD_RSP_TYPE_R7 0x01
+
+#define SD_RSP_80CLK_TIMEOUT_EN 0x01
+
+#define SAMPLE_TIME_RISING 0x00
+#define SAMPLE_TIME_FALLING 0x80
+#define PUSH_TIME_DEFAULT 0x00
+#define PUSH_TIME_ODD 0x40
+#define NO_EXTEND_TOGGLE 0x00
+#define EXTEND_TOGGLE_CHK 0x20
+#define MS_BUS_WIDTH_1 0x00
+#define MS_BUS_WIDTH_4 0x10
+#define MS_BUS_WIDTH_8 0x18
+#define MS_2K_SECTOR_MODE 0x04
+#define MS_512_SECTOR_MODE 0x00
+#define MS_TOGGLE_TIMEOUT_EN 0x00
+#define MS_TOGGLE_TIMEOUT_DISEN 0x01
+#define MS_NO_CHECK_INT 0x02
+
+#define WAIT_INT 0x80
+#define NO_WAIT_INT 0x00
+#define NO_AUTO_READ_INT_REG 0x00
+#define AUTO_READ_INT_REG 0x40
+#define MS_CRC16_ERR 0x20
+#define MS_RDY_TIMEOUT 0x10
+#define MS_INT_CMDNK 0x08
+#define MS_INT_BREQ 0x04
+#define MS_INT_ERR 0x02
+#define MS_INT_CED 0x01
+
+#define MS_TRANSFER_START 0x80
+#define MS_TRANSFER_END 0x40
+#define MS_TRANSFER_ERR 0x20
+#define MS_BS_STATE 0x10
+#define MS_TM_READ_BYTES 0x00
+#define MS_TM_NORMAL_READ 0x01
+#define MS_TM_WRITE_BYTES 0x04
+#define MS_TM_NORMAL_WRITE 0x05
+#define MS_TM_AUTO_READ 0x08
+#define MS_TM_AUTO_WRITE 0x0C
+
+#define CARD_SHARE_MASK 0x0F
+#define CARD_SHARE_MULTI_LUN 0x00
+#define CARD_SHARE_NORMAL 0x00
+#define CARD_SHARE_48_XD 0x02
+#define CARD_SHARE_48_SD 0x04
+#define CARD_SHARE_48_MS 0x08
+#define CARD_SHARE_BAROSSA_XD 0x00
+#define CARD_SHARE_BAROSSA_SD 0x01
+#define CARD_SHARE_BAROSSA_MS 0x02
+
+#define MS_DRIVE_8 0x00
+#define MS_DRIVE_4 0x40
+#define MS_DRIVE_12 0x80
+#define SD_DRIVE_8 0x00
+#define SD_DRIVE_4 0x10
+#define SD_DRIVE_12 0x20
+#define XD_DRIVE_8 0x00
+#define XD_DRIVE_4 0x04
+#define XD_DRIVE_12 0x08
+
+#define SPI_STOP 0x01
+#define XD_STOP 0x02
+#define SD_STOP 0x04
+#define MS_STOP 0x08
+#define SPI_CLR_ERR 0x10
+#define XD_CLR_ERR 0x20
+#define SD_CLR_ERR 0x40
+#define MS_CLR_ERR 0x80
+
+#define CRC_FIX_CLK (0x00 << 0)
+#define CRC_VAR_CLK0 (0x01 << 0)
+#define CRC_VAR_CLK1 (0x02 << 0)
+#define SD30_FIX_CLK (0x00 << 2)
+#define SD30_VAR_CLK0 (0x01 << 2)
+#define SD30_VAR_CLK1 (0x02 << 2)
+#define SAMPLE_FIX_CLK (0x00 << 4)
+#define SAMPLE_VAR_CLK0 (0x01 << 4)
+#define SAMPLE_VAR_CLK1 (0x02 << 4)
+
+#define SDIO_VER_20 0x80
+#define SDIO_VER_10 0x00
+#define SDIO_VER_CHG 0x40
+#define SDIO_BUS_AUTO_SWITCH 0x10
+
+#define PINGPONG_BUFFER 0x01
+#define RING_BUFFER 0x00
+
+#define RB_FLUSH 0x80
+
+#define DMA_DONE_INT_EN 0x80
+#define SUSPEND_INT_EN 0x40
+#define LINK_RDY_INT_EN 0x20
+#define LINK_DOWN_INT_EN 0x10
+
+#define DMA_DONE_INT 0x80
+#define SUSPEND_INT 0x40
+#define LINK_RDY_INT 0x20
+#define LINK_DOWN_INT 0x10
+
+#define MRD_ERR_INT_EN 0x40
+#define MWR_ERR_INT_EN 0x20
+#define SCSI_CMD_INT_EN 0x10
+#define TLP_RCV_INT_EN 0x08
+#define TLP_TRSMT_INT_EN 0x04
+#define MRD_COMPLETE_INT_EN 0x02
+#define MWR_COMPLETE_INT_EN 0x01
+
+#define MRD_ERR_INT 0x40
+#define MWR_ERR_INT 0x20
+#define SCSI_CMD_INT 0x10
+#define TLP_RX_INT 0x08
+#define TLP_TX_INT 0x04
+#define MRD_COMPLETE_INT 0x02
+#define MWR_COMPLETE_INT 0x01
+
+#define MSG_RX_INT_EN 0x08
+#define MRD_RX_INT_EN 0x04
+#define MWR_RX_INT_EN 0x02
+#define CPLD_RX_INT_EN 0x01
+
+#define MSG_RX_INT 0x08
+#define MRD_RX_INT 0x04
+#define MWR_RX_INT 0x02
+#define CPLD_RX_INT 0x01
+
+#define MSG_TX_INT_EN 0x08
+#define MRD_TX_INT_EN 0x04
+#define MWR_TX_INT_EN 0x02
+#define CPLD_TX_INT_EN 0x01
+
+#define MSG_TX_INT 0x08
+#define MRD_TX_INT 0x04
+#define MWR_TX_INT 0x02
+#define CPLD_TX_INT 0x01
+
+#define DMA_RST 0x80
+#define DMA_BUSY 0x04
+#define DMA_DIR_TO_CARD 0x00
+#define DMA_DIR_FROM_CARD 0x02
+#define DMA_EN 0x01
+#define DMA_128 (0 << 4)
+#define DMA_256 (1 << 4)
+#define DMA_512 (2 << 4)
+#define DMA_1024 (3 << 4)
+#define DMA_PACK_SIZE_MASK 0x30
+
+#define XD_PWR_OFF_DELAY0 0x00
+#define XD_PWR_OFF_DELAY1 0x02
+#define XD_PWR_OFF_DELAY2 0x04
+#define XD_PWR_OFF_DELAY3 0x06
+#define XD_AUTO_PWR_OFF_EN 0xF7
+#define XD_NO_AUTO_PWR_OFF 0x08
+
+#define XD_TIME_RWN_1 0x00
+#define XD_TIME_RWN_STEP 0x20
+#define XD_TIME_RW_1 0x00
+#define XD_TIME_RW_STEP 0x04
+#define XD_TIME_SETUP_1 0x00
+#define XD_TIME_SETUP_STEP 0x01
+
+#define XD_ECC2_UNCORRECTABLE 0x80
+#define XD_ECC2_ERROR 0x40
+#define XD_ECC1_UNCORRECTABLE 0x20
+#define XD_ECC1_ERROR 0x10
+#define XD_RDY 0x04
+#define XD_CE_EN 0xFD
+#define XD_CE_DISEN 0x02
+#define XD_WP_EN 0xFE
+#define XD_WP_DISEN 0x01
+
+#define XD_TRANSFER_START 0x80
+#define XD_TRANSFER_END 0x40
+#define XD_PPB_EMPTY 0x20
+#define XD_RESET 0x00
+#define XD_ERASE 0x01
+#define XD_READ_STATUS 0x02
+#define XD_READ_ID 0x03
+#define XD_READ_REDUNDANT 0x04
+#define XD_READ_PAGES 0x05
+#define XD_SET_CMD 0x06
+#define XD_NORMAL_READ 0x07
+#define XD_WRITE_PAGES 0x08
+#define XD_NORMAL_WRITE 0x09
+#define XD_WRITE_REDUNDANT 0x0A
+#define XD_SET_ADDR 0x0B
+
+#define XD_PPB_TO_SIE 0x80
+#define XD_TO_PPB_ONLY 0x00
+#define XD_BA_TRANSFORM 0x40
+#define XD_BA_NO_TRANSFORM 0x00
+#define XD_NO_CALC_ECC 0x20
+#define XD_CALC_ECC 0x00
+#define XD_IGNORE_ECC 0x10
+#define XD_CHECK_ECC 0x00
+#define XD_DIRECT_TO_RB 0x08
+#define XD_ADDR_LENGTH_0 0x00
+#define XD_ADDR_LENGTH_1 0x01
+#define XD_ADDR_LENGTH_2 0x02
+#define XD_ADDR_LENGTH_3 0x03
+#define XD_ADDR_LENGTH_4 0x04
+
+#define XD_GPG 0xFF
+#define XD_BPG 0x00
+
+#define XD_GBLK 0xFF
+#define XD_LATER_BBLK 0xF0
+
+#define XD_ECC2_ALL1 0x80
+#define XD_ECC1_ALL1 0x40
+#define XD_BA2_ALL0 0x20
+#define XD_BA1_ALL0 0x10
+#define XD_BA1_BA2_EQL 0x04
+#define XD_BA2_VALID 0x02
+#define XD_BA1_VALID 0x01
+
+#define XD_PGSTS_ZEROBIT_OVER4 0x00
+#define XD_PGSTS_NOT_FF 0x02
+#define XD_AUTO_CHK_DATA_STATUS 0x01
+
+#define RSTB_MODE_DETECT 0x80
+#define MODE_OUT_VLD 0x40
+#define MODE_OUT_0_NONE 0x00
+#define MODE_OUT_10_NONE 0x04
+#define MODE_OUT_10_47 0x05
+#define MODE_OUT_10_180 0x06
+#define MODE_OUT_10_680 0x07
+#define MODE_OUT_16_NONE 0x08
+#define MODE_OUT_16_47 0x09
+#define MODE_OUT_16_180 0x0A
+#define MODE_OUT_16_680 0x0B
+#define MODE_OUT_NONE_NONE 0x0C
+#define MODE_OUT_NONE_47 0x0D
+#define MODE_OUT_NONE_180 0x0E
+#define MODE_OUT_NONE_680 0x0F
+
+#define CARD_OC_INT_EN 0x20
+#define CARD_DETECT_EN 0x08
+
+#define MS_DETECT_EN 0x80
+#define MS_OCP_INT_EN 0x40
+#define MS_OCP_INT_CLR 0x20
+#define MS_OC_CLR 0x10
+#define SD_DETECT_EN 0x08
+#define SD_OCP_INT_EN 0x04
+#define SD_OCP_INT_CLR 0x02
+#define SD_OC_CLR 0x01
+
+#define CARD_OCP_DETECT 0x80
+#define CARD_OC_NOW 0x08
+#define CARD_OC_EVER 0x04
+
+#define MS_OCP_DETECT 0x80
+#define MS_OC_NOW 0x40
+#define MS_OC_EVER 0x20
+#define SD_OCP_DETECT 0x08
+#define SD_OC_NOW 0x04
+#define SD_OC_EVER 0x02
+
+#define CARD_OC_INT_CLR 0x08
+#define CARD_OC_CLR 0x02
+
+#define SD_OCP_GLITCH_MASK 0x07
+#define SD_OCP_GLITCH_6_4 0x00
+#define SD_OCP_GLITCH_64 0x01
+#define SD_OCP_GLITCH_640 0x02
+#define SD_OCP_GLITCH_1000 0x03
+#define SD_OCP_GLITCH_2000 0x04
+#define SD_OCP_GLITCH_4000 0x05
+#define SD_OCP_GLITCH_8000 0x06
+#define SD_OCP_GLITCH_10000 0x07
+
+#define MS_OCP_GLITCH_MASK 0x70
+#define MS_OCP_GLITCH_6_4 (0x00 << 4)
+#define MS_OCP_GLITCH_64 (0x01 << 4)
+#define MS_OCP_GLITCH_640 (0x02 << 4)
+#define MS_OCP_GLITCH_1000 (0x03 << 4)
+#define MS_OCP_GLITCH_2000 (0x04 << 4)
+#define MS_OCP_GLITCH_4000 (0x05 << 4)
+#define MS_OCP_GLITCH_8000 (0x06 << 4)
+#define MS_OCP_GLITCH_10000 (0x07 << 4)
+
+#define OCP_TIME_60 0x00
+#define OCP_TIME_100 (0x01 << 3)
+#define OCP_TIME_200 (0x02 << 3)
+#define OCP_TIME_400 (0x03 << 3)
+#define OCP_TIME_600 (0x04 << 3)
+#define OCP_TIME_800 (0x05 << 3)
+#define OCP_TIME_1100 (0x06 << 3)
+#define OCP_TIME_MASK 0x38
+
+#define MS_OCP_TIME_60 0x00
+#define MS_OCP_TIME_100 (0x01 << 4)
+#define MS_OCP_TIME_200 (0x02 << 4)
+#define MS_OCP_TIME_400 (0x03 << 4)
+#define MS_OCP_TIME_600 (0x04 << 4)
+#define MS_OCP_TIME_800 (0x05 << 4)
+#define MS_OCP_TIME_1100 (0x06 << 4)
+#define MS_OCP_TIME_MASK 0x70
+
+#define SD_OCP_TIME_60 0x00
+#define SD_OCP_TIME_100 0x01
+#define SD_OCP_TIME_200 0x02
+#define SD_OCP_TIME_400 0x03
+#define SD_OCP_TIME_600 0x04
+#define SD_OCP_TIME_800 0x05
+#define SD_OCP_TIME_1100 0x06
+#define SD_OCP_TIME_MASK 0x07
+
+#define OCP_THD_315_417 0x00
+#define OCP_THD_283_783 (0x01 << 6)
+#define OCP_THD_244_946 (0x02 << 6)
+#define OCP_THD_191_1080 (0x03 << 6)
+#define OCP_THD_MASK 0xC0
+
+#define MS_OCP_THD_450 0x00
+#define MS_OCP_THD_550 (0x01 << 4)
+#define MS_OCP_THD_650 (0x02 << 4)
+#define MS_OCP_THD_750 (0x03 << 4)
+#define MS_OCP_THD_850 (0x04 << 4)
+#define MS_OCP_THD_950 (0x05 << 4)
+#define MS_OCP_THD_1050 (0x06 << 4)
+#define MS_OCP_THD_1150 (0x07 << 4)
+#define MS_OCP_THD_MASK 0x70
+
+#define SD_OCP_THD_450 0x00
+#define SD_OCP_THD_550 0x01
+#define SD_OCP_THD_650 0x02
+#define SD_OCP_THD_750 0x03
+#define SD_OCP_THD_850 0x04
+#define SD_OCP_THD_950 0x05
+#define SD_OCP_THD_1050 0x06
+#define SD_OCP_THD_1150 0x07
+#define SD_OCP_THD_MASK 0x07
+
+#define FPGA_MS_PULL_CTL_EN 0xEF
+#define FPGA_SD_PULL_CTL_EN 0xF7
+#define FPGA_XD_PULL_CTL_EN1 0xFE
+#define FPGA_XD_PULL_CTL_EN2 0xFD
+#define FPGA_XD_PULL_CTL_EN3 0xFB
+
+#define FPGA_MS_PULL_CTL_BIT 0x10
+#define FPGA_SD_PULL_CTL_BIT 0x08
+
+#define BLINK_EN 0x08
+#define LED_GPIO0 (0 << 4)
+#define LED_GPIO1 (1 << 4)
+#define LED_GPIO2 (2 << 4)
+
+#define SDIO_BUS_CTRL 0x01
+#define SDIO_CD_CTRL 0x02
+
+#define SSC_RSTB 0x80
+#define SSC_8X_EN 0x40
+#define SSC_FIX_FRAC 0x20
+#define SSC_SEL_1M 0x00
+#define SSC_SEL_2M 0x08
+#define SSC_SEL_4M 0x10
+#define SSC_SEL_8M 0x18
+
+#define SSC_DEPTH_MASK 0x07
+#define SSC_DEPTH_DISALBE 0x00
+#define SSC_DEPTH_4M 0x01
+#define SSC_DEPTH_2M 0x02
+#define SSC_DEPTH_1M 0x03
+#define SSC_DEPTH_512K 0x04
+#define SSC_DEPTH_256K 0x05
+#define SSC_DEPTH_128K 0x06
+#define SSC_DEPTH_64K 0x07
+
+#define XD_D3_NP 0x00
+#define XD_D3_PD (0x01 << 6)
+#define XD_D3_PU (0x02 << 6)
+#define XD_D2_NP 0x00
+#define XD_D2_PD (0x01 << 4)
+#define XD_D2_PU (0x02 << 4)
+#define XD_D1_NP 0x00
+#define XD_D1_PD (0x01 << 2)
+#define XD_D1_PU (0x02 << 2)
+#define XD_D0_NP 0x00
+#define XD_D0_PD 0x01
+#define XD_D0_PU 0x02
+
+#define SD_D7_NP 0x00
+#define SD_D7_PD (0x01 << 4)
+#define SD_DAT7_PU (0x02 << 4)
+#define SD_CLK_NP 0x00
+#define SD_CLK_PD (0x01 << 2)
+#define SD_CLK_PU (0x02 << 2)
+#define SD_D5_NP 0x00
+#define SD_D5_PD 0x01
+#define SD_D5_PU 0x02
+
+#define MS_D1_NP 0x00
+#define MS_D1_PD (0x01 << 6)
+#define MS_D1_PU (0x02 << 6)
+#define MS_D2_NP 0x00
+#define MS_D2_PD (0x01 << 4)
+#define MS_D2_PU (0x02 << 4)
+#define MS_CLK_NP 0x00
+#define MS_CLK_PD (0x01 << 2)
+#define MS_CLK_PU (0x02 << 2)
+#define MS_D6_NP 0x00
+#define MS_D6_PD 0x01
+#define MS_D6_PU 0x02
+
+#define XD_D7_NP 0x00
+#define XD_D7_PD (0x01 << 6)
+#define XD_D7_PU (0x02 << 6)
+#define XD_D6_NP 0x00
+#define XD_D6_PD (0x01 << 4)
+#define XD_D6_PU (0x02 << 4)
+#define XD_D5_NP 0x00
+#define XD_D5_PD (0x01 << 2)
+#define XD_D5_PU (0x02 << 2)
+#define XD_D4_NP 0x00
+#define XD_D4_PD 0x01
+#define XD_D4_PU 0x02
+
+#define SD_D6_NP 0x00
+#define SD_D6_PD (0x01 << 6)
+#define SD_D6_PU (0x02 << 6)
+#define SD_D0_NP 0x00
+#define SD_D0_PD (0x01 << 4)
+#define SD_D0_PU (0x02 << 4)
+#define SD_D1_NP 0x00
+#define SD_D1_PD 0x01
+#define SD_D1_PU 0x02
+
+#define MS_D3_NP 0x00
+#define MS_D3_PD (0x01 << 6)
+#define MS_D3_PU (0x02 << 6)
+#define MS_D0_NP 0x00
+#define MS_D0_PD (0x01 << 4)
+#define MS_D0_PU (0x02 << 4)
+#define MS_BS_NP 0x00
+#define MS_BS_PD (0x01 << 2)
+#define MS_BS_PU (0x02 << 2)
+
+#define XD_WP_NP 0x00
+#define XD_WP_PD (0x01 << 6)
+#define XD_WP_PU (0x02 << 6)
+#define XD_CE_NP 0x00
+#define XD_CE_PD (0x01 << 3)
+#define XD_CE_PU (0x02 << 3)
+#define XD_CLE_NP 0x00
+#define XD_CLE_PD (0x01 << 1)
+#define XD_CLE_PU (0x02 << 1)
+#define XD_CD_PD 0x00
+#define XD_CD_PU 0x01
+
+#define SD_D4_NP 0x00
+#define SD_D4_PD (0x01 << 6)
+#define SD_D4_PU (0x02 << 6)
+
+#define MS_D7_NP 0x00
+#define MS_D7_PD (0x01 << 6)
+#define MS_D7_PU (0x02 << 6)
+
+#define XD_RDY_NP 0x00
+#define XD_RDY_PD (0x01 << 6)
+#define XD_RDY_PU (0x02 << 6)
+#define XD_WE_NP 0x00
+#define XD_WE_PD (0x01 << 4)
+#define XD_WE_PU (0x02 << 4)
+#define XD_RE_NP 0x00
+#define XD_RE_PD (0x01 << 2)
+#define XD_RE_PU (0x02 << 2)
+#define XD_ALE_NP 0x00
+#define XD_ALE_PD 0x01
+#define XD_ALE_PU 0x02
+
+#define SD_D3_NP 0x00
+#define SD_D3_PD (0x01 << 4)
+#define SD_D3_PU (0x02 << 4)
+#define SD_D2_NP 0x00
+#define SD_D2_PD (0x01 << 2)
+#define SD_D2_PU (0x02 << 2)
+
+#define MS_INS_PD 0x00
+#define MS_INS_PU (0x01 << 7)
+#define SD_WP_NP 0x00
+#define SD_WP_PD (0x01 << 5)
+#define SD_WP_PU (0x02 << 5)
+#define SD_CD_PD 0x00
+#define SD_CD_PU (0x01 << 4)
+#define SD_CMD_NP 0x00
+#define SD_CMD_PD (0x01 << 2)
+#define SD_CMD_PU (0x02 << 2)
+
+#define MS_D5_NP 0x00
+#define MS_D5_PD (0x01 << 2)
+#define MS_D5_PU (0x02 << 2)
+#define MS_D4_NP 0x00
+#define MS_D4_PD 0x01
+#define MS_D4_PU 0x02
+
+#define FORCE_PM_CLOCK 0x10
+#define EN_CLOCK_PM 0x01
+
+#define HOST_ENTER_S3 0x02
+#define HOST_ENTER_S1 0x01
+
+#define AUX_PWR_DETECTED 0x01
+
+#define PHY_DEBUG_MODE 0x01
+
+#define SPI_COMMAND_BIT_8 0xE0
+#define SPI_ADDRESS_BIT_24 0x17
+#define SPI_ADDRESS_BIT_32 0x1F
+
+#define SPI_TRANSFER0_START 0x80
+#define SPI_TRANSFER0_END 0x40
+#define SPI_C_MODE0 0x00
+#define SPI_CA_MODE0 0x01
+#define SPI_CDO_MODE0 0x02
+#define SPI_CDI_MODE0 0x03
+#define SPI_CADO_MODE0 0x04
+#define SPI_CADI_MODE0 0x05
+#define SPI_POLLING_MODE0 0x06
+
+#define SPI_TRANSFER1_START 0x80
+#define SPI_TRANSFER1_END 0x40
+#define SPI_DO_MODE1 0x00
+#define SPI_DI_MODE1 0x01
+
+#define CS_POLARITY_HIGH 0x40
+#define CS_POLARITY_LOW 0x00
+#define DTO_MSB_FIRST 0x00
+#define DTO_LSB_FIRST 0x20
+#define SPI_MASTER 0x00
+#define SPI_SLAVE 0x10
+#define SPI_MODE0 0x00
+#define SPI_MODE1 0x04
+#define SPI_MODE2 0x08
+#define SPI_MODE3 0x0C
+#define SPI_MANUAL 0x00
+#define SPI_HALF_AUTO 0x01
+#define SPI_AUTO 0x02
+#define SPI_EEPROM_AUTO 0x03
+
+#define EDO_TIMING_MASK 0x03
+#define SAMPLE_RISING 0x00
+#define SAMPLE_DELAY_HALF 0x01
+#define SAMPLE_DELAY_ONE 0x02
+#define SAPMLE_DELAY_ONE_HALF 0x03
+#define TCS_MASK 0x0C
+
+#define NOT_BYPASS_SD 0x02
+#define DISABLE_SDIO_FUNC 0x04
+#define SELECT_1LUN 0x08
+
+#define PWR_GATE_EN 0x01
+#define LDO3318_PWR_MASK 0x06
+#define LDO_ON 0x00
+#define LDO_SUSPEND 0x04
+#define LDO_OFF 0x06
+
+#define SD_CFG1 0xFDA0
+#define SD_CFG2 0xFDA1
+#define SD_CFG3 0xFDA2
+#define SD_STAT1 0xFDA3
+#define SD_STAT2 0xFDA4
+#define SD_BUS_STAT 0xFDA5
+#define SD_PAD_CTL 0xFDA6
+#define SD_SAMPLE_POINT_CTL 0xFDA7
+#define SD_PUSH_POINT_CTL 0xFDA8
+#define SD_CMD0 0xFDA9
+#define SD_CMD1 0xFDAA
+#define SD_CMD2 0xFDAB
+#define SD_CMD3 0xFDAC
+#define SD_CMD4 0xFDAD
+#define SD_CMD5 0xFDAE
+#define SD_BYTE_CNT_L 0xFDAF
+#define SD_BYTE_CNT_H 0xFDB0
+#define SD_BLOCK_CNT_L 0xFDB1
+#define SD_BLOCK_CNT_H 0xFDB2
+#define SD_TRANSFER 0xFDB3
+#define SD_CMD_STATE 0xFDB5
+#define SD_DATA_STATE 0xFDB6
+
+#define DCM_DRP_CTL 0xFC23
+#define DCM_DRP_TRIG 0xFC24
+#define DCM_DRP_CFG 0xFC25
+#define DCM_DRP_WR_DATA_L 0xFC26
+#define DCM_DRP_WR_DATA_H 0xFC27
+#define DCM_DRP_RD_DATA_L 0xFC28
+#define DCM_DRP_RD_DATA_H 0xFC29
+#define SD_VPCLK0_CTL 0xFC2A
+#define SD_VPCLK1_CTL 0xFC2B
+#define SD_DCMPS0_CTL 0xFC2C
+#define SD_DCMPS1_CTL 0xFC2D
+#define SD_VPTX_CTL SD_VPCLK0_CTL
+#define SD_VPRX_CTL SD_VPCLK1_CTL
+#define SD_DCMPS_TX_CTL SD_DCMPS0_CTL
+#define SD_DCMPS_RX_CTL SD_DCMPS1_CTL
+
+#define CARD_CLK_SOURCE 0xFC2E
+
+#define CARD_PWR_CTL 0xFD50
+#define CARD_CLK_SWITCH 0xFD51
+#define CARD_SHARE_MODE 0xFD52
+#define CARD_DRIVE_SEL 0xFD53
+#define CARD_STOP 0xFD54
+#define CARD_OE 0xFD55
+#define CARD_AUTO_BLINK 0xFD56
+#define CARD_GPIO_DIR 0xFD57
+#define CARD_GPIO 0xFD58
+
+#define CARD_DATA_SOURCE 0xFD5B
+#define CARD_SELECT 0xFD5C
+#define SD30_DRIVE_SEL 0xFD5E
+
+#define CARD_CLK_EN 0xFD69
+
+#define SDIO_CTRL 0xFD6B
+
+#define FPDCTL 0xFC00
+#define PDINFO 0xFC01
+
+#define CLK_CTL 0xFC02
+#define CLK_DIV 0xFC03
+#define CLK_SEL 0xFC04
+
+#define SSC_DIV_N_0 0xFC0F
+#define SSC_DIV_N_1 0xFC10
+
+#define RCCTL 0xFC14
+
+#define FPGA_PULL_CTL 0xFC1D
+
+#define CARD_PULL_CTL1 0xFD60
+#define CARD_PULL_CTL2 0xFD61
+#define CARD_PULL_CTL3 0xFD62
+#define CARD_PULL_CTL4 0xFD63
+#define CARD_PULL_CTL5 0xFD64
+#define CARD_PULL_CTL6 0xFD65
+
+#define IRQEN0 0xFE20
+#define IRQSTAT0 0xFE21
+#define IRQEN1 0xFE22
+#define IRQSTAT1 0xFE23
+#define TLPRIEN 0xFE24
+#define TLPRISTAT 0xFE25
+#define TLPTIEN 0xFE26
+#define TLPTISTAT 0xFE27
+#define DMATC0 0xFE28
+#define DMATC1 0xFE29
+#define DMATC2 0xFE2A
+#define DMATC3 0xFE2B
+#define DMACTL 0xFE2C
+#define BCTL 0xFE2D
+#define RBBC0 0xFE2E
+#define RBBC1 0xFE2F
+#define RBDAT 0xFE30
+#define RBCTL 0xFE34
+#define CFGADDR0 0xFE35
+#define CFGADDR1 0xFE36
+#define CFGDATA0 0xFE37
+#define CFGDATA1 0xFE38
+#define CFGDATA2 0xFE39
+#define CFGDATA3 0xFE3A
+#define CFGRWCTL 0xFE3B
+#define PHYRWCTL 0xFE3C
+#define PHYDATA0 0xFE3D
+#define PHYDATA1 0xFE3E
+#define PHYADDR 0xFE3F
+#define MSGRXDATA0 0xFE40
+#define MSGRXDATA1 0xFE41
+#define MSGRXDATA2 0xFE42
+#define MSGRXDATA3 0xFE43
+#define MSGTXDATA0 0xFE44
+#define MSGTXDATA1 0xFE45
+#define MSGTXDATA2 0xFE46
+#define MSGTXDATA3 0xFE47
+#define MSGTXCTL 0xFE48
+#define PETXCFG 0xFE49
+
+#define CDRESUMECTL 0xFE52
+#define WAKE_SEL_CTL 0xFE54
+#define PME_FORCE_CTL 0xFE56
+#define ASPM_FORCE_CTL 0xFE57
+#define PM_CLK_FORCE_CTL 0xFE58
+#define PERST_GLITCH_WIDTH 0xFE5C
+#define CHANGE_LINK_STATE 0xFE5B
+#define RESET_LOAD_REG 0xFE5E
+#define HOST_SLEEP_STATE 0xFE60
+#define MAIN_PWR_OFF_CTL 0xFE70 /* RTS5208 */
+
+#define NFTS_TX_CTRL 0xFE72
+
+#define PWR_GATE_CTRL 0xFE75
+#define PWD_SUSPEND_EN 0xFE76
+
+#define EFUSE_CONTENT 0xFE5F
+
+#define XD_INIT 0xFD10
+#define XD_DTCTL 0xFD11
+#define XD_CTL 0xFD12
+#define XD_TRANSFER 0xFD13
+#define XD_CFG 0xFD14
+#define XD_ADDRESS0 0xFD15
+#define XD_ADDRESS1 0xFD16
+#define XD_ADDRESS2 0xFD17
+#define XD_ADDRESS3 0xFD18
+#define XD_ADDRESS4 0xFD19
+#define XD_DAT 0xFD1A
+#define XD_PAGE_CNT 0xFD1B
+#define XD_PAGE_STATUS 0xFD1C
+#define XD_BLOCK_STATUS 0xFD1D
+#define XD_BLOCK_ADDR1_L 0xFD1E
+#define XD_BLOCK_ADDR1_H 0xFD1F
+#define XD_BLOCK_ADDR2_L 0xFD20
+#define XD_BLOCK_ADDR2_H 0xFD21
+#define XD_BYTE_CNT_L 0xFD22
+#define XD_BYTE_CNT_H 0xFD23
+#define XD_PARITY 0xFD24
+#define XD_ECC_BIT1 0xFD25
+#define XD_ECC_BYTE1 0xFD26
+#define XD_ECC_BIT2 0xFD27
+#define XD_ECC_BYTE2 0xFD28
+#define XD_RESERVED0 0xFD29
+#define XD_RESERVED1 0xFD2A
+#define XD_RESERVED2 0xFD2B
+#define XD_RESERVED3 0xFD2C
+#define XD_CHK_DATA_STATUS 0xFD2D
+#define XD_CATCTL 0xFD2E
+
+#define MS_CFG 0xFD40
+#define MS_TPC 0xFD41
+#define MS_TRANS_CFG 0xFD42
+#define MS_TRANSFER 0xFD43
+#define MS_INT_REG 0xFD44
+#define MS_BYTE_CNT 0xFD45
+#define MS_SECTOR_CNT_L 0xFD46
+#define MS_SECTOR_CNT_H 0xFD47
+#define MS_DBUS_H 0xFD48
+
+#define SSC_CTL1 0xFC11
+#define SSC_CTL2 0xFC12
+
+#define OCPCTL 0xFC15
+#define OCPSTAT 0xFC16
+#define OCPCLR 0xFC17 /* 5208 */
+#define OCPPARA1 0xFC18
+#define OCPPARA2 0xFC19
+
+#define EFUSE_OP 0xFC20
+#define EFUSE_CTRL 0xFC21
+#define EFUSE_DATA 0xFC22
+
+#define SPI_COMMAND 0xFD80
+#define SPI_ADDR0 0xFD81
+#define SPI_ADDR1 0xFD82
+#define SPI_ADDR2 0xFD83
+#define SPI_ADDR3 0xFD84
+#define SPI_CA_NUMBER 0xFD85
+#define SPI_LENGTH0 0xFD86
+#define SPI_LENGTH1 0xFD87
+#define SPI_DATA 0xFD88
+#define SPI_DATA_NUMBER 0xFD89
+#define SPI_TRANSFER0 0xFD90
+#define SPI_TRANSFER1 0xFD91
+#define SPI_CONTROL 0xFD92
+#define SPI_SIG 0xFD93
+#define SPI_TCTL 0xFD94
+#define SPI_SLAVE_NUM 0xFD95
+#define SPI_CLK_DIVIDER0 0xFD96
+#define SPI_CLK_DIVIDER1 0xFD97
+
+#define SRAM_BASE 0xE600
+#define RBUF_BASE 0xF400
+#define PPBUF_BASE1 0xF800
+#define PPBUF_BASE2 0xFA00
+#define IMAGE_FLAG_ADDR0 0xCE80
+#define IMAGE_FLAG_ADDR1 0xCE81
+
+#define READ_OP 1
+#define WRITE_OP 2
+
+#define LCTLR 0x80
+
+#define POLLING_WAIT_CNT 1
+#define IDLE_MAX_COUNT 10
+#define SDIO_IDLE_COUNT 10
+
+#define DEBOUNCE_CNT 5
+
+void do_remaining_work(struct rtsx_chip *chip);
+void try_to_switch_sdio_ctrl(struct rtsx_chip *chip);
+void do_reset_sd_card(struct rtsx_chip *chip);
+void do_reset_xd_card(struct rtsx_chip *chip);
+void do_reset_ms_card(struct rtsx_chip *chip);
+void rtsx_power_off_card(struct rtsx_chip *chip);
+void rtsx_release_cards(struct rtsx_chip *chip);
+void rtsx_reset_cards(struct rtsx_chip *chip);
+void rtsx_reinit_cards(struct rtsx_chip *chip, int reset_chip);
+void rtsx_init_cards(struct rtsx_chip *chip);
+int switch_ssc_clock(struct rtsx_chip *chip, int clk);
+int switch_normal_clock(struct rtsx_chip *chip, int clk);
+int enable_card_clock(struct rtsx_chip *chip, u8 card);
+int disable_card_clock(struct rtsx_chip *chip, u8 card);
+int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ u32 sec_addr, u16 sec_cnt);
+void trans_dma_enable(enum dma_data_direction dir,
+ struct rtsx_chip *chip, u32 byte_cnt, u8 pack_size);
+void toggle_gpio(struct rtsx_chip *chip, u8 gpio);
+void turn_on_led(struct rtsx_chip *chip, u8 gpio);
+void turn_off_led(struct rtsx_chip *chip, u8 gpio);
+
+int card_share_mode(struct rtsx_chip *chip, int card);
+int select_card(struct rtsx_chip *chip, int card);
+int detect_card_cd(struct rtsx_chip *chip, int card);
+int check_card_exist(struct rtsx_chip *chip, unsigned int lun);
+int check_card_ready(struct rtsx_chip *chip, unsigned int lun);
+int check_card_wp(struct rtsx_chip *chip, unsigned int lun);
+int check_card_fail(struct rtsx_chip *chip, unsigned int lun);
+int check_card_ejected(struct rtsx_chip *chip, unsigned int lun);
+void eject_card(struct rtsx_chip *chip, unsigned int lun);
+u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun);
+
+static inline u32 get_card_size(struct rtsx_chip *chip, unsigned int lun)
+{
+#ifdef SUPPORT_SD_LOCK
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ if ((get_lun_card(chip, lun) == SD_CARD) &&
+ (sd_card->sd_lock_status & SD_LOCKED))
+ return 0;
+ else
+ return chip->capacity[lun];
+#else
+ return chip->capacity[lun];
+#endif
+}
+
+static inline int switch_clock(struct rtsx_chip *chip, int clk)
+{
+ int retval = 0;
+
+ if (chip->asic_code)
+ retval = switch_ssc_clock(chip, clk);
+ else
+ retval = switch_normal_clock(chip, clk);
+
+ return retval;
+}
+
+int card_power_on(struct rtsx_chip *chip, u8 card);
+int card_power_off(struct rtsx_chip *chip, u8 card);
+
+static inline int card_power_off_all(struct rtsx_chip *chip)
+{
+ RTSX_WRITE_REG(chip, CARD_PWR_CTL, 0x0F, 0x0F);
+
+ return STATUS_SUCCESS;
+}
+
+static inline void rtsx_clear_xd_error(struct rtsx_chip *chip)
+{
+ rtsx_write_register(chip, CARD_STOP, XD_STOP | XD_CLR_ERR,
+ XD_STOP | XD_CLR_ERR);
+}
+
+static inline void rtsx_clear_sd_error(struct rtsx_chip *chip)
+{
+ rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+}
+
+static inline void rtsx_clear_ms_error(struct rtsx_chip *chip)
+{
+ rtsx_write_register(chip, CARD_STOP, MS_STOP | MS_CLR_ERR,
+ MS_STOP | MS_CLR_ERR);
+}
+
+static inline void rtsx_clear_spi_error(struct rtsx_chip *chip)
+{
+ rtsx_write_register(chip, CARD_STOP, SPI_STOP | SPI_CLR_ERR,
+ SPI_STOP | SPI_CLR_ERR);
+}
+
+#ifdef SUPPORT_SDIO_ASPM
+void dynamic_configure_sdio_aspm(struct rtsx_chip *chip);
+#endif
+
+#endif /* __REALTEK_RTSX_CARD_H */
diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c
new file mode 100644
index 000000000000..6426807a906f
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx_chip.c
@@ -0,0 +1,1979 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/vmalloc.h>
+
+#include "rtsx.h"
+#include "rtsx_transport.h"
+#include "rtsx_scsi.h"
+#include "rtsx_card.h"
+#include "rtsx_chip.h"
+#include "rtsx_sys.h"
+#include "general.h"
+
+#include "sd.h"
+#include "xd.h"
+#include "ms.h"
+
+static void rtsx_calibration(struct rtsx_chip *chip)
+{
+ rtsx_write_phy_register(chip, 0x1B, 0x135E);
+ wait_timeout(10);
+ rtsx_write_phy_register(chip, 0x00, 0x0280);
+ rtsx_write_phy_register(chip, 0x01, 0x7112);
+ rtsx_write_phy_register(chip, 0x01, 0x7110);
+ rtsx_write_phy_register(chip, 0x01, 0x7112);
+ rtsx_write_phy_register(chip, 0x01, 0x7113);
+ rtsx_write_phy_register(chip, 0x00, 0x0288);
+}
+
+void rtsx_disable_card_int(struct rtsx_chip *chip)
+{
+ u32 reg = rtsx_readl(chip, RTSX_BIER);
+
+ reg &= ~(XD_INT_EN | SD_INT_EN | MS_INT_EN);
+ rtsx_writel(chip, RTSX_BIER, reg);
+}
+
+void rtsx_enable_card_int(struct rtsx_chip *chip)
+{
+ u32 reg = rtsx_readl(chip, RTSX_BIER);
+ int i;
+
+ for (i = 0; i <= chip->max_lun; i++) {
+ if (chip->lun2card[i] & XD_CARD)
+ reg |= XD_INT_EN;
+ if (chip->lun2card[i] & SD_CARD)
+ reg |= SD_INT_EN;
+ if (chip->lun2card[i] & MS_CARD)
+ reg |= MS_INT_EN;
+ }
+ if (chip->hw_bypass_sd)
+ reg &= ~((u32)SD_INT_EN);
+
+ rtsx_writel(chip, RTSX_BIER, reg);
+}
+
+void rtsx_enable_bus_int(struct rtsx_chip *chip)
+{
+ u32 reg = 0;
+#ifndef DISABLE_CARD_INT
+ int i;
+#endif
+
+ reg = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN;
+
+#ifndef DISABLE_CARD_INT
+ for (i = 0; i <= chip->max_lun; i++) {
+ RTSX_DEBUGP("lun2card[%d] = 0x%02x\n", i, chip->lun2card[i]);
+
+ if (chip->lun2card[i] & XD_CARD)
+ reg |= XD_INT_EN;
+ if (chip->lun2card[i] & SD_CARD)
+ reg |= SD_INT_EN;
+ if (chip->lun2card[i] & MS_CARD)
+ reg |= MS_INT_EN;
+ }
+ if (chip->hw_bypass_sd)
+ reg &= ~((u32)SD_INT_EN);
+#endif
+
+ if (chip->ic_version >= IC_VER_C)
+ reg |= DELINK_INT_EN;
+#ifdef SUPPORT_OCP
+ reg |= OC_INT_EN;
+#endif
+ if (!chip->adma_mode)
+ reg |= DATA_DONE_INT_EN;
+
+ /* Enable Bus Interrupt */
+ rtsx_writel(chip, RTSX_BIER, reg);
+
+ RTSX_DEBUGP("RTSX_BIER: 0x%08x\n", reg);
+}
+
+void rtsx_disable_bus_int(struct rtsx_chip *chip)
+{
+ rtsx_writel(chip, RTSX_BIER, 0);
+}
+
+static int rtsx_pre_handle_sdio_old(struct rtsx_chip *chip)
+{
+ if (chip->ignore_sd && CHK_SDIO_EXIST(chip)) {
+ if (chip->asic_code) {
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL5, 0xFF,
+ MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU);
+ } else {
+ RTSX_WRITE_REG(chip, FPGA_PULL_CTL, 0xFF,
+ FPGA_SD_PULL_CTL_EN);
+ }
+ RTSX_WRITE_REG(chip, CARD_SHARE_MODE, 0xFF, CARD_SHARE_48_SD);
+
+ /* Enable SDIO internal clock */
+ RTSX_WRITE_REG(chip, 0xFF2C, 0x01, 0x01);
+
+ RTSX_WRITE_REG(chip, SDIO_CTRL, 0xFF,
+ SDIO_BUS_CTRL | SDIO_CD_CTRL);
+
+ chip->sd_int = 1;
+ chip->sd_io = 1;
+ } else {
+ chip->need_reset |= SD_CARD;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+#ifdef HW_AUTO_SWITCH_SD_BUS
+static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
+{
+ u8 tmp;
+ int sw_bypass_sd = 0;
+ int retval;
+
+ if (chip->driver_first_load) {
+ if (CHECK_PID(chip, 0x5288)) {
+ RTSX_READ_REG(chip, 0xFE5A, &tmp);
+ if (tmp & 0x08)
+ sw_bypass_sd = 1;
+ } else if (CHECK_PID(chip, 0x5208)) {
+ RTSX_READ_REG(chip, 0xFE70, &tmp);
+ if (tmp & 0x80)
+ sw_bypass_sd = 1;
+ }
+ } else {
+ if (chip->sdio_in_charge)
+ sw_bypass_sd = 1;
+ }
+ RTSX_DEBUGP("chip->sdio_in_charge = %d\n", chip->sdio_in_charge);
+ RTSX_DEBUGP("chip->driver_first_load = %d\n", chip->driver_first_load);
+ RTSX_DEBUGP("sw_bypass_sd = %d\n", sw_bypass_sd);
+
+ if (sw_bypass_sd) {
+ u8 cd_toggle_mask = 0;
+
+ RTSX_READ_REG(chip, TLPTISTAT, &tmp);
+ cd_toggle_mask = 0x08;
+
+ if (tmp & cd_toggle_mask) {
+ /* Disable sdio_bus_auto_switch */
+ if (CHECK_PID(chip, 0x5288))
+ RTSX_WRITE_REG(chip, 0xFE5A, 0x08, 0x00);
+ else if (CHECK_PID(chip, 0x5208))
+ RTSX_WRITE_REG(chip, 0xFE70, 0x80, 0x00);
+
+ RTSX_WRITE_REG(chip, TLPTISTAT, 0xFF, tmp);
+
+ chip->need_reset |= SD_CARD;
+ } else {
+ RTSX_DEBUGP("Chip inserted with SDIO!\n");
+
+ if (chip->asic_code) {
+ retval = sd_pull_ctl_enable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ RTSX_WRITE_REG(chip, FPGA_PULL_CTL,
+ FPGA_SD_PULL_CTL_BIT | 0x20, 0);
+ }
+ retval = card_share_mode(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ /* Enable sdio_bus_auto_switch */
+ if (CHECK_PID(chip, 0x5288))
+ RTSX_WRITE_REG(chip, 0xFE5A, 0x08, 0x08);
+ else if (CHECK_PID(chip, 0x5208))
+ RTSX_WRITE_REG(chip, 0xFE70, 0x80, 0x80);
+
+ chip->chip_insert_with_sdio = 1;
+ chip->sd_io = 1;
+ }
+ } else {
+ RTSX_WRITE_REG(chip, TLPTISTAT, 0x08, 0x08);
+
+ chip->need_reset |= SD_CARD;
+ }
+
+ return STATUS_SUCCESS;
+}
+#endif
+
+int rtsx_reset_chip(struct rtsx_chip *chip)
+{
+ int retval;
+
+ rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
+
+ rtsx_disable_aspm(chip);
+
+ RTSX_WRITE_REG(chip, HOST_SLEEP_STATE, 0x03, 0x00);
+
+ /* Disable card clock */
+ RTSX_WRITE_REG(chip, CARD_CLK_EN, 0x1E, 0);
+
+#ifdef SUPPORT_OCP
+ /* SSC power on, OCD power on */
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
+ RTSX_WRITE_REG(chip, FPDCTL, OC_POWER_DOWN, 0);
+ else
+ RTSX_WRITE_REG(chip, FPDCTL, OC_POWER_DOWN, MS_OC_POWER_DOWN);
+
+ RTSX_WRITE_REG(chip, OCPPARA1, OCP_TIME_MASK, OCP_TIME_800);
+ RTSX_WRITE_REG(chip, OCPPARA2, OCP_THD_MASK, OCP_THD_244_946);
+ RTSX_WRITE_REG(chip, OCPCTL, 0xFF, CARD_OC_INT_EN | CARD_DETECT_EN);
+#else
+ /* OC power down */
+ RTSX_WRITE_REG(chip, FPDCTL, OC_POWER_DOWN, OC_POWER_DOWN);
+#endif
+
+ if (!CHECK_PID(chip, 0x5288))
+ RTSX_WRITE_REG(chip, CARD_GPIO_DIR, 0xFF, 0x03);
+
+ /* Turn off LED */
+ RTSX_WRITE_REG(chip, CARD_GPIO, 0xFF, 0x03);
+
+ /* Reset delink mode */
+ RTSX_WRITE_REG(chip, CHANGE_LINK_STATE, 0x0A, 0);
+
+ /* Card driving select */
+ RTSX_WRITE_REG(chip, CARD_DRIVE_SEL, 0xFF, chip->card_drive_sel);
+
+#ifdef LED_AUTO_BLINK
+ RTSX_WRITE_REG(chip, CARD_AUTO_BLINK, 0xFF,
+ LED_BLINK_SPEED | BLINK_EN | LED_GPIO0);
+#endif
+
+ if (chip->asic_code) {
+ /* Enable SSC Clock */
+ RTSX_WRITE_REG(chip, SSC_CTL1, 0xFF, SSC_8X_EN | SSC_SEL_4M);
+ RTSX_WRITE_REG(chip, SSC_CTL2, 0xFF, 0x12);
+ }
+
+ /* Disable cd_pwr_save (u_force_rst_core_en=0, u_cd_rst_core_en=0)
+ 0xFE5B
+ bit[1] u_cd_rst_core_en rst_value = 0
+ bit[2] u_force_rst_core_en rst_value = 0
+ bit[5] u_mac_phy_rst_n_dbg rst_value = 1
+ bit[4] u_non_sticky_rst_n_dbg rst_value = 0
+ */
+ RTSX_WRITE_REG(chip, CHANGE_LINK_STATE, 0x16, 0x10);
+
+ /* Enable ASPM */
+ if (chip->aspm_l0s_l1_en) {
+ if (chip->dynamic_aspm) {
+ if (CHK_SDIO_EXIST(chip)) {
+ if (CHECK_PID(chip, 0x5288)) {
+ retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF, chip->aspm_l0s_l1_en);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ } else {
+ if (CHECK_PID(chip, 0x5208))
+ RTSX_WRITE_REG(chip, ASPM_FORCE_CTL,
+ 0xFF, 0x3F);
+
+ retval = rtsx_write_config_byte(chip, LCTLR,
+ chip->aspm_l0s_l1_en);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ chip->aspm_level[0] = chip->aspm_l0s_l1_en;
+ if (CHK_SDIO_EXIST(chip)) {
+ chip->aspm_level[1] = chip->aspm_l0s_l1_en;
+ if (CHECK_PID(chip, 0x5288))
+ retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF, chip->aspm_l0s_l1_en);
+ else
+ retval = rtsx_write_cfg_dw(chip, 1, 0xC0, 0xFF, chip->aspm_l0s_l1_en);
+
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ }
+
+ chip->aspm_enabled = 1;
+ }
+ } else {
+ if (chip->asic_code && CHECK_PID(chip, 0x5208)) {
+ retval = rtsx_write_phy_register(chip, 0x07, 0x0129);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ retval = rtsx_write_config_byte(chip, LCTLR,
+ chip->aspm_l0s_l1_en);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = rtsx_write_config_byte(chip, 0x81, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_SDIO_EXIST(chip)) {
+ if (CHECK_PID(chip, 0x5288))
+ retval = rtsx_write_cfg_dw(chip, 2, 0xC0,
+ 0xFF00, 0x0100);
+ else
+ retval = rtsx_write_cfg_dw(chip, 1, 0xC0,
+ 0xFF00, 0x0100);
+
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ }
+
+ if (CHECK_PID(chip, 0x5288)) {
+ if (!CHK_SDIO_EXIST(chip)) {
+ retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFFFF,
+ 0x0103);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = rtsx_write_cfg_dw(chip, 2, 0x84, 0xFF, 0x03);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ }
+ }
+
+ RTSX_WRITE_REG(chip, IRQSTAT0, LINK_RDY_INT, LINK_RDY_INT);
+
+ RTSX_WRITE_REG(chip, PERST_GLITCH_WIDTH, 0xFF, 0x80);
+
+ /* Enable PCIE interrupt */
+ if (chip->asic_code) {
+ if (CHECK_PID(chip, 0x5208)) {
+ if (chip->phy_debug_mode) {
+ RTSX_WRITE_REG(chip, CDRESUMECTL, 0x77, 0);
+ rtsx_disable_bus_int(chip);
+ } else {
+ rtsx_enable_bus_int(chip);
+ }
+
+ if (chip->ic_version >= IC_VER_D) {
+ u16 reg;
+ retval = rtsx_read_phy_register(chip, 0x00,
+ &reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ reg &= 0xFE7F;
+ reg |= 0x80;
+ retval = rtsx_write_phy_register(chip, 0x00,
+ reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = rtsx_read_phy_register(chip, 0x1C,
+ &reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ reg &= 0xFFF7;
+ retval = rtsx_write_phy_register(chip, 0x1C,
+ reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ }
+
+ if (chip->driver_first_load &&
+ (chip->ic_version < IC_VER_C))
+ rtsx_calibration(chip);
+
+ } else {
+ rtsx_enable_bus_int(chip);
+ }
+ } else {
+ rtsx_enable_bus_int(chip);
+ }
+
+ chip->need_reset = 0;
+
+ chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
+
+ if (chip->hw_bypass_sd)
+ goto NextCard;
+ RTSX_DEBUGP("In rtsx_reset_chip, chip->int_reg = 0x%x\n",
+ chip->int_reg);
+ if (chip->int_reg & SD_EXIST) {
+#ifdef HW_AUTO_SWITCH_SD_BUS
+ if (CHECK_PID(chip, 0x5208) && (chip->ic_version < IC_VER_C))
+ retval = rtsx_pre_handle_sdio_old(chip);
+ else
+ retval = rtsx_pre_handle_sdio_new(chip);
+
+ RTSX_DEBUGP("chip->need_reset = 0x%x (rtsx_reset_chip)\n",
+ (unsigned int)(chip->need_reset));
+#else /* HW_AUTO_SWITCH_SD_BUS */
+ retval = rtsx_pre_handle_sdio_old(chip);
+#endif /* HW_AUTO_SWITCH_SD_BUS */
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ } else {
+ chip->sd_io = 0;
+ RTSX_WRITE_REG(chip, SDIO_CTRL, SDIO_BUS_CTRL | SDIO_CD_CTRL,
+ 0);
+ }
+
+NextCard:
+ if (chip->int_reg & XD_EXIST)
+ chip->need_reset |= XD_CARD;
+ if (chip->int_reg & MS_EXIST)
+ chip->need_reset |= MS_CARD;
+ if (chip->int_reg & CARD_EXIST)
+ RTSX_WRITE_REG(chip, SSC_CTL1, SSC_RSTB, SSC_RSTB);
+
+ RTSX_DEBUGP("In rtsx_init_chip, chip->need_reset = 0x%x\n",
+ (unsigned int)(chip->need_reset));
+
+ RTSX_WRITE_REG(chip, RCCTL, 0x01, 0x00);
+
+ if (CHECK_PID(chip, 0x5208) || CHECK_PID(chip, 0x5288)) {
+ /* Turn off main power when entering S3/S4 state */
+ RTSX_WRITE_REG(chip, MAIN_PWR_OFF_CTL, 0x03, 0x03);
+ }
+
+ if (chip->remote_wakeup_en && !chip->auto_delink_en) {
+ RTSX_WRITE_REG(chip, WAKE_SEL_CTL, 0x07, 0x07);
+ if (chip->aux_pwr_exist)
+ RTSX_WRITE_REG(chip, PME_FORCE_CTL, 0xFF, 0x33);
+ } else {
+ RTSX_WRITE_REG(chip, WAKE_SEL_CTL, 0x07, 0x04);
+ RTSX_WRITE_REG(chip, PME_FORCE_CTL, 0xFF, 0x30);
+ }
+
+ if (CHECK_PID(chip, 0x5208) && (chip->ic_version >= IC_VER_D))
+ RTSX_WRITE_REG(chip, PETXCFG, 0x1C, 0x14);
+
+ if (chip->asic_code && CHECK_PID(chip, 0x5208)) {
+ retval = rtsx_clr_phy_reg_bit(chip, 0x1C, 2);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (chip->ft2_fast_mode) {
+ RTSX_WRITE_REG(chip, CARD_PWR_CTL, 0xFF,
+ MS_PARTIAL_POWER_ON | SD_PARTIAL_POWER_ON);
+ udelay(chip->pmos_pwr_on_interval);
+ RTSX_WRITE_REG(chip, CARD_PWR_CTL, 0xFF,
+ MS_POWER_ON | SD_POWER_ON);
+
+ wait_timeout(200);
+ }
+
+ /* Reset card */
+ rtsx_reset_detected_cards(chip, 0);
+
+ chip->driver_first_load = 0;
+
+ return STATUS_SUCCESS;
+}
+
+static inline int check_sd_speed_prior(u32 sd_speed_prior)
+{
+ int i, fake_para = 0;
+
+ for (i = 0; i < 4; i++) {
+ u8 tmp = (u8)(sd_speed_prior >> (i*8));
+ if ((tmp < 0x01) || (tmp > 0x04)) {
+ fake_para = 1;
+ break;
+ }
+ }
+
+ return !fake_para;
+}
+
+static inline int check_sd_current_prior(u32 sd_current_prior)
+{
+ int i, fake_para = 0;
+
+ for (i = 0; i < 4; i++) {
+ u8 tmp = (u8)(sd_current_prior >> (i*8));
+ if (tmp > 0x03) {
+ fake_para = 1;
+ break;
+ }
+ }
+
+ return !fake_para;
+}
+
+static int rts5208_init(struct rtsx_chip *chip)
+{
+ int retval;
+ u16 reg = 0;
+ u8 val = 0;
+
+ RTSX_WRITE_REG(chip, CLK_SEL, 0x03, 0x03);
+ RTSX_READ_REG(chip, CLK_SEL, &val);
+ if (val == 0)
+ chip->asic_code = 1;
+ else
+ chip->asic_code = 0;
+
+ if (chip->asic_code) {
+ retval = rtsx_read_phy_register(chip, 0x1C, &reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_DEBUGP("Value of phy register 0x1C is 0x%x\n", reg);
+ chip->ic_version = (reg >> 4) & 0x07;
+ if (reg & PHY_DEBUG_MODE)
+ chip->phy_debug_mode = 1;
+ else
+ chip->phy_debug_mode = 0;
+
+ } else {
+ RTSX_READ_REG(chip, 0xFE80, &val);
+ chip->ic_version = val;
+ chip->phy_debug_mode = 0;
+ }
+
+ RTSX_READ_REG(chip, PDINFO, &val);
+ RTSX_DEBUGP("PDINFO: 0x%x\n", val);
+ if (val & AUX_PWR_DETECTED)
+ chip->aux_pwr_exist = 1;
+ else
+ chip->aux_pwr_exist = 0;
+
+ RTSX_READ_REG(chip, 0xFE50, &val);
+ if (val & 0x01)
+ chip->hw_bypass_sd = 1;
+ else
+ chip->hw_bypass_sd = 0;
+
+ rtsx_read_config_byte(chip, 0x0E, &val);
+ if (val & 0x80)
+ SET_SDIO_EXIST(chip);
+ else
+ CLR_SDIO_EXIST(chip);
+
+ if (chip->use_hw_setting) {
+ RTSX_READ_REG(chip, CHANGE_LINK_STATE, &val);
+ if (val & 0x80)
+ chip->auto_delink_en = 1;
+ else
+ chip->auto_delink_en = 0;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int rts5288_init(struct rtsx_chip *chip)
+{
+ int retval;
+ u8 val = 0, max_func;
+ u32 lval = 0;
+
+ RTSX_WRITE_REG(chip, CLK_SEL, 0x03, 0x03);
+ RTSX_READ_REG(chip, CLK_SEL, &val);
+ if (val == 0)
+ chip->asic_code = 1;
+ else
+ chip->asic_code = 0;
+
+ chip->ic_version = 0;
+ chip->phy_debug_mode = 0;
+
+ RTSX_READ_REG(chip, PDINFO, &val);
+ RTSX_DEBUGP("PDINFO: 0x%x\n", val);
+ if (val & AUX_PWR_DETECTED)
+ chip->aux_pwr_exist = 1;
+ else
+ chip->aux_pwr_exist = 0;
+
+ RTSX_READ_REG(chip, CARD_SHARE_MODE, &val);
+ RTSX_DEBUGP("CARD_SHARE_MODE: 0x%x\n", val);
+ if (val & 0x04)
+ chip->baro_pkg = QFN;
+ else
+ chip->baro_pkg = LQFP;
+
+ RTSX_READ_REG(chip, 0xFE5A, &val);
+ if (val & 0x10)
+ chip->hw_bypass_sd = 1;
+ else
+ chip->hw_bypass_sd = 0;
+
+ retval = rtsx_read_cfg_dw(chip, 0, 0x718, &lval);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ max_func = (u8)((lval >> 29) & 0x07);
+ RTSX_DEBUGP("Max function number: %d\n", max_func);
+ if (max_func == 0x02)
+ SET_SDIO_EXIST(chip);
+ else
+ CLR_SDIO_EXIST(chip);
+
+ if (chip->use_hw_setting) {
+ RTSX_READ_REG(chip, CHANGE_LINK_STATE, &val);
+ if (val & 0x80)
+ chip->auto_delink_en = 1;
+ else
+ chip->auto_delink_en = 0;
+
+ if (CHECK_BARO_PKG(chip, LQFP))
+ chip->lun_mode = SD_MS_1LUN;
+ else
+ chip->lun_mode = DEFAULT_SINGLE;
+
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_init_chip(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct ms_info *ms_card = &(chip->ms_card);
+ int retval;
+ unsigned int i;
+
+ RTSX_DEBUGP("Vendor ID: 0x%04x, Product ID: 0x%04x\n",
+ chip->vendor_id, chip->product_id);
+
+ chip->ic_version = 0;
+
+#ifdef _MSG_TRACE
+ chip->msg_idx = 0;
+#endif
+
+ memset(xd_card, 0, sizeof(struct xd_info));
+ memset(sd_card, 0, sizeof(struct sd_info));
+ memset(ms_card, 0, sizeof(struct ms_info));
+
+ chip->xd_reset_counter = 0;
+ chip->sd_reset_counter = 0;
+ chip->ms_reset_counter = 0;
+
+ chip->xd_show_cnt = MAX_SHOW_CNT;
+ chip->sd_show_cnt = MAX_SHOW_CNT;
+ chip->ms_show_cnt = MAX_SHOW_CNT;
+
+ chip->sd_io = 0;
+ chip->auto_delink_cnt = 0;
+ chip->auto_delink_allowed = 1;
+ rtsx_set_stat(chip, RTSX_STAT_INIT);
+
+ chip->aspm_enabled = 0;
+ chip->chip_insert_with_sdio = 0;
+ chip->sdio_aspm = 0;
+ chip->sdio_idle = 0;
+ chip->sdio_counter = 0;
+ chip->cur_card = 0;
+ chip->phy_debug_mode = 0;
+ chip->sdio_func_exist = 0;
+ memset(chip->sdio_raw_data, 0, 12);
+
+ for (i = 0; i < MAX_ALLOWED_LUN_CNT; i++) {
+ set_sense_type(chip, i, SENSE_TYPE_NO_SENSE);
+ chip->rw_fail_cnt[i] = 0;
+ }
+
+ if (!check_sd_speed_prior(chip->sd_speed_prior))
+ chip->sd_speed_prior = 0x01040203;
+
+ RTSX_DEBUGP("sd_speed_prior = 0x%08x\n", chip->sd_speed_prior);
+
+ if (!check_sd_current_prior(chip->sd_current_prior))
+ chip->sd_current_prior = 0x00010203;
+
+ RTSX_DEBUGP("sd_current_prior = 0x%08x\n", chip->sd_current_prior);
+
+ if ((chip->sd_ddr_tx_phase > 31) || (chip->sd_ddr_tx_phase < 0))
+ chip->sd_ddr_tx_phase = 0;
+
+ if ((chip->mmc_ddr_tx_phase > 31) || (chip->mmc_ddr_tx_phase < 0))
+ chip->mmc_ddr_tx_phase = 0;
+
+ RTSX_WRITE_REG(chip, FPDCTL, SSC_POWER_DOWN, 0);
+ wait_timeout(200);
+ RTSX_WRITE_REG(chip, CLK_DIV, 0x07, 0x07);
+ RTSX_DEBUGP("chip->use_hw_setting = %d\n", chip->use_hw_setting);
+
+ if (CHECK_PID(chip, 0x5208)) {
+ retval = rts5208_init(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ } else if (CHECK_PID(chip, 0x5288)) {
+ retval = rts5288_init(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ }
+
+ if (chip->ss_en == 2)
+ chip->ss_en = 0;
+
+ RTSX_DEBUGP("chip->asic_code = %d\n", chip->asic_code);
+ RTSX_DEBUGP("chip->ic_version = 0x%x\n", chip->ic_version);
+ RTSX_DEBUGP("chip->phy_debug_mode = %d\n", chip->phy_debug_mode);
+ RTSX_DEBUGP("chip->aux_pwr_exist = %d\n", chip->aux_pwr_exist);
+ RTSX_DEBUGP("chip->sdio_func_exist = %d\n", chip->sdio_func_exist);
+ RTSX_DEBUGP("chip->hw_bypass_sd = %d\n", chip->hw_bypass_sd);
+ RTSX_DEBUGP("chip->aspm_l0s_l1_en = %d\n", chip->aspm_l0s_l1_en);
+ RTSX_DEBUGP("chip->lun_mode = %d\n", chip->lun_mode);
+ RTSX_DEBUGP("chip->auto_delink_en = %d\n", chip->auto_delink_en);
+ RTSX_DEBUGP("chip->ss_en = %d\n", chip->ss_en);
+ RTSX_DEBUGP("chip->baro_pkg = %d\n", chip->baro_pkg);
+
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
+ chip->card2lun[SD_CARD] = 0;
+ chip->card2lun[MS_CARD] = 1;
+ chip->card2lun[XD_CARD] = 0xFF;
+ chip->lun2card[0] = SD_CARD;
+ chip->lun2card[1] = MS_CARD;
+ chip->max_lun = 1;
+ SET_SDIO_IGNORED(chip);
+ } else if (CHECK_LUN_MODE(chip, SD_MS_1LUN)) {
+ chip->card2lun[SD_CARD] = 0;
+ chip->card2lun[MS_CARD] = 0;
+ chip->card2lun[XD_CARD] = 0xFF;
+ chip->lun2card[0] = SD_CARD | MS_CARD;
+ chip->max_lun = 0;
+ } else {
+ chip->card2lun[XD_CARD] = 0;
+ chip->card2lun[SD_CARD] = 0;
+ chip->card2lun[MS_CARD] = 0;
+ chip->lun2card[0] = XD_CARD | SD_CARD | MS_CARD;
+ chip->max_lun = 0;
+ }
+
+ retval = rtsx_reset_chip(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+void rtsx_release_chip(struct rtsx_chip *chip)
+{
+ xd_free_l2p_tbl(chip);
+ ms_free_l2p_tbl(chip);
+ chip->card_exist = 0;
+ chip->card_ready = 0;
+}
+
+#if !defined(LED_AUTO_BLINK) && defined(REGULAR_BLINK)
+static inline void rtsx_blink_led(struct rtsx_chip *chip)
+{
+ if (chip->card_exist && chip->blink_led) {
+ if (chip->led_toggle_counter < LED_TOGGLE_INTERVAL) {
+ chip->led_toggle_counter++;
+ } else {
+ chip->led_toggle_counter = 0;
+ toggle_gpio(chip, LED_GPIO);
+ }
+ }
+}
+#endif
+
+static void rtsx_monitor_aspm_config(struct rtsx_chip *chip)
+{
+ int maybe_support_aspm, reg_changed;
+ u32 tmp = 0;
+ u8 reg0 = 0, reg1 = 0;
+
+ maybe_support_aspm = 0;
+ reg_changed = 0;
+ rtsx_read_config_byte(chip, LCTLR, &reg0);
+ if (chip->aspm_level[0] != reg0) {
+ reg_changed = 1;
+ chip->aspm_level[0] = reg0;
+ }
+ if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip)) {
+ rtsx_read_cfg_dw(chip, 1, 0xC0, &tmp);
+ reg1 = (u8)tmp;
+ if (chip->aspm_level[1] != reg1) {
+ reg_changed = 1;
+ chip->aspm_level[1] = reg1;
+ }
+
+ if ((reg0 & 0x03) && (reg1 & 0x03))
+ maybe_support_aspm = 1;
+
+ } else {
+ if (reg0 & 0x03)
+ maybe_support_aspm = 1;
+
+ }
+
+ if (reg_changed) {
+ if (maybe_support_aspm)
+ chip->aspm_l0s_l1_en = 0x03;
+
+ RTSX_DEBUGP("aspm_level[0] = 0x%02x, aspm_level[1] = 0x%02x\n",
+ chip->aspm_level[0], chip->aspm_level[1]);
+
+ if (chip->aspm_l0s_l1_en) {
+ chip->aspm_enabled = 1;
+ } else {
+ chip->aspm_enabled = 0;
+ chip->sdio_aspm = 0;
+ }
+ rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFF,
+ 0x30 | chip->aspm_level[0] |
+ (chip->aspm_level[1] << 2));
+ }
+}
+
+void rtsx_polling_func(struct rtsx_chip *chip)
+{
+#ifdef SUPPORT_SD_LOCK
+ struct sd_info *sd_card = &(chip->sd_card);
+#endif
+ int ss_allowed;
+
+ if (rtsx_chk_stat(chip, RTSX_STAT_SUSPEND))
+ return;
+
+ if (rtsx_chk_stat(chip, RTSX_STAT_DELINK))
+ goto Delink_Stage;
+
+ if (chip->polling_config) {
+ u8 val;
+ rtsx_read_config_byte(chip, 0, &val);
+ }
+
+ if (rtsx_chk_stat(chip, RTSX_STAT_SS))
+ return;
+
+#ifdef SUPPORT_OCP
+ if (chip->ocp_int) {
+ rtsx_read_register(chip, OCPSTAT, &(chip->ocp_stat));
+
+ if (chip->card_exist & SD_CARD)
+ sd_power_off_card3v3(chip);
+ else if (chip->card_exist & MS_CARD)
+ ms_power_off_card3v3(chip);
+ else if (chip->card_exist & XD_CARD)
+ xd_power_off_card3v3(chip);
+
+ chip->ocp_int = 0;
+ }
+#endif
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_erase_status) {
+ if (chip->card_exist & SD_CARD) {
+ u8 val;
+ rtsx_read_register(chip, 0xFD30, &val);
+ if (val & 0x02) {
+ sd_card->sd_erase_status = SD_NOT_ERASE;
+ sd_card->sd_lock_notify = 1;
+ chip->need_reinit |= SD_CARD;
+ }
+ } else {
+ sd_card->sd_erase_status = SD_NOT_ERASE;
+ }
+ }
+#endif
+
+ rtsx_init_cards(chip);
+
+ if (chip->ss_en) {
+ ss_allowed = 1;
+
+ if (CHECK_PID(chip, 0x5288)) {
+ ss_allowed = 0;
+ } else {
+ if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip)) {
+ u32 val;
+ rtsx_read_cfg_dw(chip, 1, 0x04, &val);
+ if (val & 0x07)
+ ss_allowed = 0;
+
+ }
+ }
+ } else {
+ ss_allowed = 0;
+ }
+
+ if (ss_allowed && !chip->sd_io) {
+ if (rtsx_get_stat(chip) != RTSX_STAT_IDLE) {
+ chip->ss_counter = 0;
+ } else {
+ if (chip->ss_counter <
+ (chip->ss_idle_period / POLLING_INTERVAL)) {
+ chip->ss_counter++;
+ } else {
+ rtsx_exclusive_enter_ss(chip);
+ return;
+ }
+ }
+ }
+
+ if (CHECK_PID(chip, 0x5208)) {
+ rtsx_monitor_aspm_config(chip);
+
+#ifdef SUPPORT_SDIO_ASPM
+ if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip) &&
+ chip->aspm_l0s_l1_en && chip->dynamic_aspm) {
+ if (chip->sd_io) {
+ dynamic_configure_sdio_aspm(chip);
+ } else {
+ if (!chip->sdio_aspm) {
+ RTSX_DEBUGP("SDIO enter ASPM!\n");
+ rtsx_write_register(chip,
+ ASPM_FORCE_CTL, 0xFC,
+ 0x30 | (chip->aspm_level[1] << 2));
+ chip->sdio_aspm = 1;
+ }
+ }
+ }
+#endif
+ }
+
+ if (chip->idle_counter < IDLE_MAX_COUNT) {
+ chip->idle_counter++;
+ } else {
+ if (rtsx_get_stat(chip) != RTSX_STAT_IDLE) {
+ RTSX_DEBUGP("Idle state!\n");
+ rtsx_set_stat(chip, RTSX_STAT_IDLE);
+
+#if !defined(LED_AUTO_BLINK) && defined(REGULAR_BLINK)
+ chip->led_toggle_counter = 0;
+#endif
+ rtsx_force_power_on(chip, SSC_PDCTL);
+
+ turn_off_led(chip, LED_GPIO);
+
+ if (chip->auto_power_down && !chip->card_ready && !chip->sd_io)
+ rtsx_force_power_down(chip, SSC_PDCTL | OC_PDCTL);
+
+ }
+ }
+
+ switch (rtsx_get_stat(chip)) {
+ case RTSX_STAT_RUN:
+#if !defined(LED_AUTO_BLINK) && defined(REGULAR_BLINK)
+ rtsx_blink_led(chip);
+#endif
+ do_remaining_work(chip);
+ break;
+
+ case RTSX_STAT_IDLE:
+ if (chip->sd_io && !chip->sd_int)
+ try_to_switch_sdio_ctrl(chip);
+
+ rtsx_enable_aspm(chip);
+ break;
+
+ default:
+ break;
+ }
+
+
+#ifdef SUPPORT_OCP
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
+#ifdef CONFIG_RTS5208_DEBUG
+ if (chip->ocp_stat &
+ (SD_OC_NOW | SD_OC_EVER | MS_OC_NOW | MS_OC_EVER))
+ RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n",
+ chip->ocp_stat);
+#endif
+
+ if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
+ if (chip->card_exist & SD_CARD) {
+ rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN,
+ 0);
+ card_power_off(chip, SD_CARD);
+ chip->card_fail |= SD_CARD;
+ }
+ }
+ if (chip->ocp_stat & (MS_OC_NOW | MS_OC_EVER)) {
+ if (chip->card_exist & MS_CARD) {
+ rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN,
+ 0);
+ card_power_off(chip, MS_CARD);
+ chip->card_fail |= MS_CARD;
+ }
+ }
+ } else {
+ if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
+ RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n",
+ chip->ocp_stat);
+ if (chip->card_exist & SD_CARD) {
+ rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN,
+ 0);
+ chip->card_fail |= SD_CARD;
+ } else if (chip->card_exist & MS_CARD) {
+ rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN,
+ 0);
+ chip->card_fail |= MS_CARD;
+ } else if (chip->card_exist & XD_CARD) {
+ rtsx_write_register(chip, CARD_OE, XD_OUTPUT_EN,
+ 0);
+ chip->card_fail |= XD_CARD;
+ }
+ card_power_off(chip, SD_CARD);
+ }
+ }
+#endif
+
+Delink_Stage:
+ if (chip->auto_delink_en && chip->auto_delink_allowed &&
+ !chip->card_ready && !chip->card_ejected && !chip->sd_io) {
+ int enter_L1 = chip->auto_delink_in_L1 && (
+ chip->aspm_l0s_l1_en || chip->ss_en);
+ int delink_stage1_cnt = chip->delink_stage1_step;
+ int delink_stage2_cnt = delink_stage1_cnt +
+ chip->delink_stage2_step;
+ int delink_stage3_cnt = delink_stage2_cnt +
+ chip->delink_stage3_step;
+
+ if (chip->auto_delink_cnt <= delink_stage3_cnt) {
+ if (chip->auto_delink_cnt == delink_stage1_cnt) {
+ rtsx_set_stat(chip, RTSX_STAT_DELINK);
+
+ if (chip->asic_code && CHECK_PID(chip, 0x5208))
+ rtsx_set_phy_reg_bit(chip, 0x1C, 2);
+
+ if (chip->card_exist) {
+ RTSX_DEBUGP("False card inserted, do force delink\n");
+
+ if (enter_L1)
+ rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03, 1);
+
+ rtsx_write_register(chip,
+ CHANGE_LINK_STATE, 0x0A,
+ 0x0A);
+
+ if (enter_L1)
+ rtsx_enter_L1(chip);
+
+ chip->auto_delink_cnt = delink_stage3_cnt + 1;
+ } else {
+ RTSX_DEBUGP("No card inserted, do delink\n");
+
+ if (enter_L1)
+ rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03, 1);
+
+ rtsx_write_register(chip, CHANGE_LINK_STATE, 0x02, 0x02);
+
+ if (enter_L1)
+ rtsx_enter_L1(chip);
+
+ }
+ }
+
+ if (chip->auto_delink_cnt == delink_stage2_cnt) {
+ RTSX_DEBUGP("Try to do force delink\n");
+
+ if (enter_L1)
+ rtsx_exit_L1(chip);
+
+ if (chip->asic_code && CHECK_PID(chip, 0x5208))
+ rtsx_set_phy_reg_bit(chip, 0x1C, 2);
+
+ rtsx_write_register(chip, CHANGE_LINK_STATE,
+ 0x0A, 0x0A);
+ }
+
+ chip->auto_delink_cnt++;
+ }
+ } else {
+ chip->auto_delink_cnt = 0;
+ }
+}
+
+void rtsx_undo_delink(struct rtsx_chip *chip)
+{
+ chip->auto_delink_allowed = 0;
+ rtsx_write_register(chip, CHANGE_LINK_STATE, 0x0A, 0x00);
+}
+
+/**
+ * rtsx_stop_cmd - stop command transfer and DMA transfer
+ * @chip: Realtek's card reader chip
+ * @card: flash card type
+ *
+ * Stop command transfer and DMA transfer.
+ * This function is called in error handler.
+ */
+void rtsx_stop_cmd(struct rtsx_chip *chip, int card)
+{
+ int i;
+
+ for (i = 0; i <= 8; i++) {
+ int addr = RTSX_HCBAR + i * 4;
+ u32 reg;
+ reg = rtsx_readl(chip, addr);
+ RTSX_DEBUGP("BAR (0x%02x): 0x%08x\n", addr, reg);
+ }
+ rtsx_writel(chip, RTSX_HCBCTLR, STOP_CMD);
+ rtsx_writel(chip, RTSX_HDBCTLR, STOP_DMA);
+
+ for (i = 0; i < 16; i++) {
+ u16 addr = 0xFE20 + (u16)i;
+ u8 val;
+ rtsx_read_register(chip, addr, &val);
+ RTSX_DEBUGP("0x%04X: 0x%02x\n", addr, val);
+ }
+
+ rtsx_write_register(chip, DMACTL, 0x80, 0x80);
+ rtsx_write_register(chip, RBCTL, 0x80, 0x80);
+}
+
+#define MAX_RW_REG_CNT 1024
+
+int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data)
+{
+ int i;
+ u32 val = 3 << 30;
+
+ val |= (u32)(addr & 0x3FFF) << 16;
+ val |= (u32)mask << 8;
+ val |= (u32)data;
+
+ rtsx_writel(chip, RTSX_HAIMR, val);
+
+ for (i = 0; i < MAX_RW_REG_CNT; i++) {
+ val = rtsx_readl(chip, RTSX_HAIMR);
+ if ((val & (1 << 31)) == 0) {
+ if (data != (u8)val)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+ }
+ }
+
+ TRACE_RET(chip, STATUS_TIMEDOUT);
+}
+
+int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data)
+{
+ u32 val = 2 << 30;
+ int i;
+
+ if (data)
+ *data = 0;
+
+ val |= (u32)(addr & 0x3FFF) << 16;
+
+ rtsx_writel(chip, RTSX_HAIMR, val);
+
+ for (i = 0; i < MAX_RW_REG_CNT; i++) {
+ val = rtsx_readl(chip, RTSX_HAIMR);
+ if ((val & (1 << 31)) == 0)
+ break;
+ }
+
+ if (i >= MAX_RW_REG_CNT)
+ TRACE_RET(chip, STATUS_TIMEDOUT);
+
+ if (data)
+ *data = (u8)(val & 0xFF);
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_write_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 mask,
+ u32 val)
+{
+ u8 mode = 0, tmp;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (mask & 0xFF) {
+ RTSX_WRITE_REG(chip, CFGDATA0 + i,
+ 0xFF, (u8)(val & mask & 0xFF));
+ mode |= (1 << i);
+ }
+ mask >>= 8;
+ val >>= 8;
+ }
+
+ if (mode) {
+ RTSX_WRITE_REG(chip, CFGADDR0, 0xFF, (u8)addr);
+ RTSX_WRITE_REG(chip, CFGADDR1, 0xFF, (u8)(addr >> 8));
+
+ RTSX_WRITE_REG(chip, CFGRWCTL, 0xFF,
+ 0x80 | mode | ((func_no & 0x03) << 4));
+
+ for (i = 0; i < MAX_RW_REG_CNT; i++) {
+ RTSX_READ_REG(chip, CFGRWCTL, &tmp);
+ if ((tmp & 0x80) == 0)
+ break;
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_read_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 *val)
+{
+ int i;
+ u8 tmp;
+ u32 data = 0;
+
+ RTSX_WRITE_REG(chip, CFGADDR0, 0xFF, (u8)addr);
+ RTSX_WRITE_REG(chip, CFGADDR1, 0xFF, (u8)(addr >> 8));
+ RTSX_WRITE_REG(chip, CFGRWCTL, 0xFF, 0x80 | ((func_no & 0x03) << 4));
+
+ for (i = 0; i < MAX_RW_REG_CNT; i++) {
+ RTSX_READ_REG(chip, CFGRWCTL, &tmp);
+ if ((tmp & 0x80) == 0)
+ break;
+ }
+
+ for (i = 0; i < 4; i++) {
+ RTSX_READ_REG(chip, CFGDATA0 + i, &tmp);
+ data |= (u32)tmp << (i * 8);
+ }
+
+ if (val)
+ *val = data;
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_write_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
+ int len)
+{
+ u32 *data, *mask;
+ u16 offset = addr % 4;
+ u16 aligned_addr = addr - offset;
+ int dw_len, i, j;
+ int retval;
+
+ RTSX_DEBUGP("%s\n", __func__);
+
+ if (!buf)
+ TRACE_RET(chip, STATUS_NOMEM);
+
+ if ((len + offset) % 4)
+ dw_len = (len + offset) / 4 + 1;
+ else
+ dw_len = (len + offset) / 4;
+
+ RTSX_DEBUGP("dw_len = %d\n", dw_len);
+
+ data = vzalloc(dw_len * 4);
+ if (!data)
+ TRACE_RET(chip, STATUS_NOMEM);
+
+ mask = vzalloc(dw_len * 4);
+ if (!mask) {
+ vfree(data);
+ TRACE_RET(chip, STATUS_NOMEM);
+ }
+
+ j = 0;
+ for (i = 0; i < len; i++) {
+ mask[j] |= 0xFF << (offset * 8);
+ data[j] |= buf[i] << (offset * 8);
+ if (++offset == 4) {
+ j++;
+ offset = 0;
+ }
+ }
+
+ RTSX_DUMP(mask, dw_len * 4);
+ RTSX_DUMP(data, dw_len * 4);
+
+ for (i = 0; i < dw_len; i++) {
+ retval = rtsx_write_cfg_dw(chip, func, aligned_addr + i * 4,
+ mask[i], data[i]);
+ if (retval != STATUS_SUCCESS) {
+ vfree(data);
+ vfree(mask);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ vfree(data);
+ vfree(mask);
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_read_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
+ int len)
+{
+ u32 *data;
+ u16 offset = addr % 4;
+ u16 aligned_addr = addr - offset;
+ int dw_len, i, j;
+ int retval;
+
+ RTSX_DEBUGP("%s\n", __func__);
+
+ if ((len + offset) % 4)
+ dw_len = (len + offset) / 4 + 1;
+ else
+ dw_len = (len + offset) / 4;
+
+ RTSX_DEBUGP("dw_len = %d\n", dw_len);
+
+ data = vmalloc(dw_len * 4);
+ if (!data)
+ TRACE_RET(chip, STATUS_NOMEM);
+
+ for (i = 0; i < dw_len; i++) {
+ retval = rtsx_read_cfg_dw(chip, func, aligned_addr + i * 4,
+ data + i);
+ if (retval != STATUS_SUCCESS) {
+ vfree(data);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (buf) {
+ j = 0;
+
+ for (i = 0; i < len; i++) {
+ buf[i] = (u8)(data[j] >> (offset * 8));
+ if (++offset == 4) {
+ j++;
+ offset = 0;
+ }
+ }
+ }
+
+ vfree(data);
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_write_phy_register(struct rtsx_chip *chip, u8 addr, u16 val)
+{
+ int i, finished = 0;
+ u8 tmp;
+
+ RTSX_WRITE_REG(chip, PHYDATA0, 0xFF, (u8)val);
+ RTSX_WRITE_REG(chip, PHYDATA1, 0xFF, (u8)(val >> 8));
+ RTSX_WRITE_REG(chip, PHYADDR, 0xFF, addr);
+ RTSX_WRITE_REG(chip, PHYRWCTL, 0xFF, 0x81);
+
+ for (i = 0; i < 100000; i++) {
+ RTSX_READ_REG(chip, PHYRWCTL, &tmp);
+ if (!(tmp & 0x80)) {
+ finished = 1;
+ break;
+ }
+ }
+
+ if (!finished)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_read_phy_register(struct rtsx_chip *chip, u8 addr, u16 *val)
+{
+ int i, finished = 0;
+ u16 data = 0;
+ u8 tmp;
+
+ RTSX_WRITE_REG(chip, PHYADDR, 0xFF, addr);
+ RTSX_WRITE_REG(chip, PHYRWCTL, 0xFF, 0x80);
+
+ for (i = 0; i < 100000; i++) {
+ RTSX_READ_REG(chip, PHYRWCTL, &tmp);
+ if (!(tmp & 0x80)) {
+ finished = 1;
+ break;
+ }
+ }
+
+ if (!finished)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_READ_REG(chip, PHYDATA0, &tmp);
+ data = tmp;
+ RTSX_READ_REG(chip, PHYDATA1, &tmp);
+ data |= (u16)tmp << 8;
+
+ if (val)
+ *val = data;
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_read_efuse(struct rtsx_chip *chip, u8 addr, u8 *val)
+{
+ int i;
+ u8 data = 0;
+
+ RTSX_WRITE_REG(chip, EFUSE_CTRL, 0xFF, 0x80|addr);
+
+ for (i = 0; i < 100; i++) {
+ RTSX_READ_REG(chip, EFUSE_CTRL, &data);
+ if (!(data & 0x80))
+ break;
+ udelay(1);
+ }
+
+ if (data & 0x80)
+ TRACE_RET(chip, STATUS_TIMEDOUT);
+
+ RTSX_READ_REG(chip, EFUSE_DATA, &data);
+ if (val)
+ *val = data;
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_write_efuse(struct rtsx_chip *chip, u8 addr, u8 val)
+{
+ int i, j;
+ u8 data = 0, tmp = 0xFF;
+
+ for (i = 0; i < 8; i++) {
+ if (val & (u8)(1 << i))
+ continue;
+
+ tmp &= (~(u8)(1 << i));
+ RTSX_DEBUGP("Write 0x%x to 0x%x\n", tmp, addr);
+
+ RTSX_WRITE_REG(chip, EFUSE_DATA, 0xFF, tmp);
+ RTSX_WRITE_REG(chip, EFUSE_CTRL, 0xFF, 0xA0|addr);
+
+ for (j = 0; j < 100; j++) {
+ RTSX_READ_REG(chip, EFUSE_CTRL, &data);
+ if (!(data & 0x80))
+ break;
+ wait_timeout(3);
+ }
+
+ if (data & 0x80)
+ TRACE_RET(chip, STATUS_TIMEDOUT);
+
+ wait_timeout(5);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_clr_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit)
+{
+ int retval;
+ u16 value;
+
+ retval = rtsx_read_phy_register(chip, reg, &value);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (value & (1 << bit)) {
+ value &= ~(1 << bit);
+ retval = rtsx_write_phy_register(chip, reg, value);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit)
+{
+ int retval;
+ u16 value;
+
+ retval = rtsx_read_phy_register(chip, reg, &value);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (0 == (value & (1 << bit))) {
+ value |= (1 << bit);
+ retval = rtsx_write_phy_register(chip, reg, value);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_check_link_ready(struct rtsx_chip *chip)
+{
+ u8 val;
+
+ RTSX_READ_REG(chip, IRQSTAT0, &val);
+
+ RTSX_DEBUGP("IRQSTAT0: 0x%x\n", val);
+ if (val & LINK_RDY_INT) {
+ RTSX_DEBUGP("Delinked!\n");
+ rtsx_write_register(chip, IRQSTAT0, LINK_RDY_INT, LINK_RDY_INT);
+ return STATUS_FAIL;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static void rtsx_handle_pm_dstate(struct rtsx_chip *chip, u8 dstate)
+{
+ u32 ultmp;
+
+ RTSX_DEBUGP("%04x set pm_dstate to %d\n", chip->product_id, dstate);
+
+ if (CHK_SDIO_EXIST(chip)) {
+ u8 func_no;
+
+ if (CHECK_PID(chip, 0x5288))
+ func_no = 2;
+ else
+ func_no = 1;
+
+ rtsx_read_cfg_dw(chip, func_no, 0x84, &ultmp);
+ RTSX_DEBUGP("pm_dstate of function %d: 0x%x\n", (int)func_no,
+ ultmp);
+ rtsx_write_cfg_dw(chip, func_no, 0x84, 0xFF, dstate);
+ }
+
+ rtsx_write_config_byte(chip, 0x44, dstate);
+ rtsx_write_config_byte(chip, 0x45, 0);
+}
+
+void rtsx_enter_L1(struct rtsx_chip *chip)
+{
+ rtsx_handle_pm_dstate(chip, 2);
+}
+
+void rtsx_exit_L1(struct rtsx_chip *chip)
+{
+ rtsx_write_config_byte(chip, 0x44, 0);
+ rtsx_write_config_byte(chip, 0x45, 0);
+}
+
+void rtsx_enter_ss(struct rtsx_chip *chip)
+{
+ RTSX_DEBUGP("Enter Selective Suspend State!\n");
+
+ rtsx_write_register(chip, IRQSTAT0, LINK_RDY_INT, LINK_RDY_INT);
+
+ if (chip->power_down_in_ss) {
+ rtsx_power_off_card(chip);
+ rtsx_force_power_down(chip, SSC_PDCTL | OC_PDCTL);
+ }
+
+ if (CHK_SDIO_EXIST(chip)) {
+ if (CHECK_PID(chip, 0x5288))
+ rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF00, 0x0100);
+ else
+ rtsx_write_cfg_dw(chip, 1, 0xC0, 0xFF00, 0x0100);
+ }
+
+ if (chip->auto_delink_en) {
+ rtsx_write_register(chip, HOST_SLEEP_STATE, 0x01, 0x01);
+ } else {
+ if (!chip->phy_debug_mode) {
+ u32 tmp;
+ tmp = rtsx_readl(chip, RTSX_BIER);
+ tmp |= CARD_INT;
+ rtsx_writel(chip, RTSX_BIER, tmp);
+ }
+
+ rtsx_write_register(chip, CHANGE_LINK_STATE, 0x02, 0);
+ }
+
+ rtsx_enter_L1(chip);
+
+ RTSX_CLR_DELINK(chip);
+ rtsx_set_stat(chip, RTSX_STAT_SS);
+}
+
+void rtsx_exit_ss(struct rtsx_chip *chip)
+{
+ RTSX_DEBUGP("Exit Selective Suspend State!\n");
+
+ rtsx_exit_L1(chip);
+
+ if (chip->power_down_in_ss) {
+ rtsx_force_power_on(chip, SSC_PDCTL | OC_PDCTL);
+ udelay(1000);
+ }
+
+ if (RTSX_TST_DELINK(chip)) {
+ chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
+ rtsx_reinit_cards(chip, 1);
+ RTSX_CLR_DELINK(chip);
+ } else if (chip->power_down_in_ss) {
+ chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
+ rtsx_reinit_cards(chip, 0);
+ }
+}
+
+int rtsx_pre_handle_interrupt(struct rtsx_chip *chip)
+{
+ u32 status, int_enable;
+ int exit_ss = 0;
+#ifdef SUPPORT_OCP
+ u32 ocp_int = 0;
+
+ ocp_int = OC_INT;
+#endif
+
+ if (chip->ss_en) {
+ chip->ss_counter = 0;
+ if (rtsx_get_stat(chip) == RTSX_STAT_SS) {
+ exit_ss = 1;
+ rtsx_exit_L1(chip);
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ }
+ }
+
+ int_enable = rtsx_readl(chip, RTSX_BIER);
+ chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
+
+ if (((chip->int_reg & int_enable) == 0) ||
+ (chip->int_reg == 0xFFFFFFFF))
+ return STATUS_FAIL;
+
+ status = chip->int_reg &= (int_enable | 0x7FFFFF);
+
+ if (status & CARD_INT) {
+ chip->auto_delink_cnt = 0;
+
+ if (status & SD_INT) {
+ if (status & SD_EXIST) {
+ set_bit(SD_NR, &(chip->need_reset));
+ } else {
+ set_bit(SD_NR, &(chip->need_release));
+ chip->sd_reset_counter = 0;
+ chip->sd_show_cnt = 0;
+ clear_bit(SD_NR, &(chip->need_reset));
+ }
+ } else {
+ /* If multi-luns, it's possible that
+ when plugging/unplugging one card
+ there is another card which still
+ exists in the slot. In this case,
+ all existed cards should be reset.
+ */
+ if (exit_ss && (status & SD_EXIST))
+ set_bit(SD_NR, &(chip->need_reinit));
+ }
+ if (!CHECK_PID(chip, 0x5288) || CHECK_BARO_PKG(chip, QFN)) {
+ if (status & XD_INT) {
+ if (status & XD_EXIST) {
+ set_bit(XD_NR, &(chip->need_reset));
+ } else {
+ set_bit(XD_NR, &(chip->need_release));
+ chip->xd_reset_counter = 0;
+ chip->xd_show_cnt = 0;
+ clear_bit(XD_NR, &(chip->need_reset));
+ }
+ } else {
+ if (exit_ss && (status & XD_EXIST))
+ set_bit(XD_NR, &(chip->need_reinit));
+ }
+ }
+ if (status & MS_INT) {
+ if (status & MS_EXIST) {
+ set_bit(MS_NR, &(chip->need_reset));
+ } else {
+ set_bit(MS_NR, &(chip->need_release));
+ chip->ms_reset_counter = 0;
+ chip->ms_show_cnt = 0;
+ clear_bit(MS_NR, &(chip->need_reset));
+ }
+ } else {
+ if (exit_ss && (status & MS_EXIST))
+ set_bit(MS_NR, &(chip->need_reinit));
+ }
+ }
+
+#ifdef SUPPORT_OCP
+ chip->ocp_int = ocp_int & status;
+#endif
+
+ if (chip->sd_io) {
+ if (chip->int_reg & DATA_DONE_INT)
+ chip->int_reg &= ~(u32)DATA_DONE_INT;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+void rtsx_do_before_power_down(struct rtsx_chip *chip, int pm_stat)
+{
+ int retval;
+
+ RTSX_DEBUGP("rtsx_do_before_power_down, pm_stat = %d\n", pm_stat);
+
+ rtsx_set_stat(chip, RTSX_STAT_SUSPEND);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS)
+ return;
+
+ rtsx_release_cards(chip);
+ rtsx_disable_bus_int(chip);
+ turn_off_led(chip, LED_GPIO);
+
+#ifdef HW_AUTO_SWITCH_SD_BUS
+ if (chip->sd_io) {
+ chip->sdio_in_charge = 1;
+ if (CHECK_PID(chip, 0x5208)) {
+ rtsx_write_register(chip, TLPTISTAT, 0x08, 0x08);
+ /* Enable sdio_bus_auto_switch */
+ rtsx_write_register(chip, 0xFE70, 0x80, 0x80);
+ } else if (CHECK_PID(chip, 0x5288)) {
+ rtsx_write_register(chip, TLPTISTAT, 0x08, 0x08);
+ /* Enable sdio_bus_auto_switch */
+ rtsx_write_register(chip, 0xFE5A, 0x08, 0x08);
+ }
+ }
+#endif
+
+ if (CHECK_PID(chip, 0x5208) && (chip->ic_version >= IC_VER_D)) {
+ /* u_force_clkreq_0 */
+ rtsx_write_register(chip, PETXCFG, 0x08, 0x08);
+ }
+
+ if (pm_stat == PM_S1) {
+ RTSX_DEBUGP("Host enter S1\n");
+ rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03,
+ HOST_ENTER_S1);
+ } else if (pm_stat == PM_S3) {
+ if (chip->s3_pwr_off_delay > 0)
+ wait_timeout(chip->s3_pwr_off_delay);
+
+ RTSX_DEBUGP("Host enter S3\n");
+ rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03,
+ HOST_ENTER_S3);
+ }
+
+ if (chip->do_delink_before_power_down && chip->auto_delink_en)
+ rtsx_write_register(chip, CHANGE_LINK_STATE, 0x02, 2);
+
+ rtsx_force_power_down(chip, SSC_PDCTL | OC_PDCTL);
+
+ chip->cur_clk = 0;
+ chip->cur_card = 0;
+ chip->card_exist = 0;
+}
+
+void rtsx_enable_aspm(struct rtsx_chip *chip)
+{
+ if (chip->aspm_l0s_l1_en && chip->dynamic_aspm) {
+ if (!chip->aspm_enabled) {
+ RTSX_DEBUGP("Try to enable ASPM\n");
+ chip->aspm_enabled = 1;
+
+ if (chip->asic_code && CHECK_PID(chip, 0x5208))
+ rtsx_write_phy_register(chip, 0x07, 0);
+ if (CHECK_PID(chip, 0x5208)) {
+ rtsx_write_register(chip, ASPM_FORCE_CTL, 0xF3,
+ 0x30 | chip->aspm_level[0]);
+ } else {
+ rtsx_write_config_byte(chip, LCTLR,
+ chip->aspm_l0s_l1_en);
+ }
+
+ if (CHK_SDIO_EXIST(chip)) {
+ u16 val = chip->aspm_l0s_l1_en | 0x0100;
+ if (CHECK_PID(chip, 0x5288))
+ rtsx_write_cfg_dw(chip, 2, 0xC0,
+ 0xFFFF, val);
+ else
+ rtsx_write_cfg_dw(chip, 1, 0xC0,
+ 0xFFFF, val);
+ }
+ }
+ }
+
+ return;
+}
+
+void rtsx_disable_aspm(struct rtsx_chip *chip)
+{
+ if (CHECK_PID(chip, 0x5208))
+ rtsx_monitor_aspm_config(chip);
+
+ if (chip->aspm_l0s_l1_en && chip->dynamic_aspm) {
+ if (chip->aspm_enabled) {
+ RTSX_DEBUGP("Try to disable ASPM\n");
+ chip->aspm_enabled = 0;
+
+ if (chip->asic_code && CHECK_PID(chip, 0x5208))
+ rtsx_write_phy_register(chip, 0x07, 0x0129);
+ if (CHECK_PID(chip, 0x5208))
+ rtsx_write_register(chip, ASPM_FORCE_CTL,
+ 0xF3, 0x30);
+ else
+ rtsx_write_config_byte(chip, LCTLR, 0x00);
+
+ wait_timeout(1);
+ }
+ }
+
+ return;
+}
+
+int rtsx_read_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
+{
+ int retval;
+ int i, j;
+ u16 reg_addr;
+ u8 *ptr;
+
+ if (!buf)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ ptr = buf;
+ reg_addr = PPBUF_BASE2;
+ for (i = 0; i < buf_len/256; i++) {
+ rtsx_init_cmd(chip);
+
+ for (j = 0; j < 256; j++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr++, 0, 0);
+
+ retval = rtsx_send_cmd(chip, 0, 250);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ memcpy(ptr, rtsx_get_cmd_data(chip), 256);
+ ptr += 256;
+ }
+
+ if (buf_len%256) {
+ rtsx_init_cmd(chip);
+
+ for (j = 0; j < buf_len%256; j++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr++, 0, 0);
+
+ retval = rtsx_send_cmd(chip, 0, 250);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ memcpy(ptr, rtsx_get_cmd_data(chip), buf_len%256);
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_write_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
+{
+ int retval;
+ int i, j;
+ u16 reg_addr;
+ u8 *ptr;
+
+ if (!buf)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ ptr = buf;
+ reg_addr = PPBUF_BASE2;
+ for (i = 0; i < buf_len/256; i++) {
+ rtsx_init_cmd(chip);
+
+ for (j = 0; j < 256; j++) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, reg_addr++, 0xFF,
+ *ptr);
+ ptr++;
+ }
+
+ retval = rtsx_send_cmd(chip, 0, 250);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (buf_len%256) {
+ rtsx_init_cmd(chip);
+
+ for (j = 0; j < buf_len%256; j++) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, reg_addr++, 0xFF,
+ *ptr);
+ ptr++;
+ }
+
+ retval = rtsx_send_cmd(chip, 0, 250);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_check_chip_exist(struct rtsx_chip *chip)
+{
+ if (rtsx_readl(chip, 0) == 0xFFFFFFFF)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_force_power_on(struct rtsx_chip *chip, u8 ctl)
+{
+ int retval;
+ u8 mask = 0;
+
+ if (ctl & SSC_PDCTL)
+ mask |= SSC_POWER_DOWN;
+
+#ifdef SUPPORT_OCP
+ if (ctl & OC_PDCTL) {
+ mask |= SD_OC_POWER_DOWN;
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
+ mask |= MS_OC_POWER_DOWN;
+ }
+#endif
+
+ if (mask) {
+ retval = rtsx_write_register(chip, FPDCTL, mask, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHECK_PID(chip, 0x5288))
+ wait_timeout(200);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int rtsx_force_power_down(struct rtsx_chip *chip, u8 ctl)
+{
+ int retval;
+ u8 mask = 0, val = 0;
+
+ if (ctl & SSC_PDCTL)
+ mask |= SSC_POWER_DOWN;
+
+#ifdef SUPPORT_OCP
+ if (ctl & OC_PDCTL) {
+ mask |= SD_OC_POWER_DOWN;
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
+ mask |= MS_OC_POWER_DOWN;
+ }
+#endif
+
+ if (mask) {
+ val = mask;
+ retval = rtsx_write_register(chip, FPDCTL, mask, val);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
diff --git a/drivers/staging/rts5208/rtsx_chip.h b/drivers/staging/rts5208/rtsx_chip.h
new file mode 100644
index 000000000000..c25efcc3f3aa
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx_chip.h
@@ -0,0 +1,1002 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_CHIP_H
+#define __REALTEK_RTSX_CHIP_H
+
+#include "rtsx.h"
+
+#define SUPPORT_CPRM
+#define SUPPORT_OCP
+#define SUPPORT_SDIO_ASPM
+#define SUPPORT_MAGIC_GATE
+#define SUPPORT_MSXC
+#define SUPPORT_SD_LOCK
+/* Hardware switch bus_ctl and cd_ctl automatically */
+#define HW_AUTO_SWITCH_SD_BUS
+/* Enable hardware interrupt write clear */
+#define HW_INT_WRITE_CLR
+/* #define LED_AUTO_BLINK */
+/* #define DISABLE_CARD_INT */
+
+#ifdef SUPPORT_MAGIC_GATE
+ /* Using NORMAL_WRITE instead of AUTO_WRITE to set ICV */
+ #define MG_SET_ICV_SLOW
+ /* HW may miss ERR/CMDNK signal when sampling INT status. */
+ #define MS_SAMPLE_INT_ERR
+ /* HW DO NOT support Wait_INT function during READ_BYTES
+ * transfer mode */
+ #define READ_BYTES_WAIT_INT
+#endif
+
+#ifdef SUPPORT_MSXC
+#define XC_POWERCLASS
+#define SUPPORT_PCGL_1P18
+#endif
+
+#ifndef LED_AUTO_BLINK
+#define REGULAR_BLINK
+#endif
+
+#define LED_BLINK_SPEED 5
+#define LED_TOGGLE_INTERVAL 6
+#define GPIO_TOGGLE_THRESHOLD 1024
+#define LED_GPIO 0
+
+#define POLLING_INTERVAL 30
+
+#define TRACE_ITEM_CNT 64
+
+#ifndef STATUS_SUCCESS
+#define STATUS_SUCCESS 0
+#endif
+#ifndef STATUS_FAIL
+#define STATUS_FAIL 1
+#endif
+#ifndef STATUS_TIMEDOUT
+#define STATUS_TIMEDOUT 2
+#endif
+#ifndef STATUS_NOMEM
+#define STATUS_NOMEM 3
+#endif
+#ifndef STATUS_READ_FAIL
+#define STATUS_READ_FAIL 4
+#endif
+#ifndef STATUS_WRITE_FAIL
+#define STATUS_WRITE_FAIL 5
+#endif
+#ifndef STATUS_ERROR
+#define STATUS_ERROR 10
+#endif
+
+#define PM_S1 1
+#define PM_S3 3
+
+/*
+ * Transport return codes
+ */
+
+#define TRANSPORT_GOOD 0 /* Transport good, command good */
+#define TRANSPORT_FAILED 1 /* Transport good, command failed */
+#define TRANSPORT_NO_SENSE 2 /* Command failed, no auto-sense */
+#define TRANSPORT_ERROR 3 /* Transport bad (i.e. device dead) */
+
+
+/*-----------------------------------
+ Start-Stop-Unit
+-----------------------------------*/
+#define STOP_MEDIUM 0x00 /* access disable */
+#define MAKE_MEDIUM_READY 0x01 /* access enable */
+#define UNLOAD_MEDIUM 0x02 /* unload */
+#define LOAD_MEDIUM 0x03 /* load */
+
+/*-----------------------------------
+ STANDARD_INQUIRY
+-----------------------------------*/
+#define QULIFIRE 0x00
+#define AENC_FNC 0x00
+#define TRML_IOP 0x00
+#define REL_ADR 0x00
+#define WBUS_32 0x00
+#define WBUS_16 0x00
+#define SYNC 0x00
+#define LINKED 0x00
+#define CMD_QUE 0x00
+#define SFT_RE 0x00
+
+#define VEN_ID_LEN 8 /* Vendor ID Length */
+#define PRDCT_ID_LEN 16 /* Product ID Length */
+#define PRDCT_REV_LEN 4 /* Product LOT Length */
+
+/* Dynamic flag definitions: used in set_bit() etc. */
+#define RTSX_FLIDX_TRANS_ACTIVE 18 /* 0x00040000 transfer is active */
+#define RTSX_FLIDX_ABORTING 20 /* 0x00100000 abort is in
+ * progress */
+#define RTSX_FLIDX_DISCONNECTING 21 /* 0x00200000 disconnect
+ * in progress */
+#define ABORTING_OR_DISCONNECTING ((1UL << US_FLIDX_ABORTING) | \
+ (1UL << US_FLIDX_DISCONNECTING))
+#define RTSX_FLIDX_RESETTING 22 /* 0x00400000 device reset
+ * in progress */
+#define RTSX_FLIDX_TIMED_OUT 23 /* 0x00800000 SCSI
+ * midlayer timed out */
+
+#define DRCT_ACCESS_DEV 0x00 /* Direct Access Device */
+#define RMB_DISC 0x80 /* The Device is Removable */
+#define ANSI_SCSI2 0x02 /* Based on ANSI-SCSI2 */
+
+#define SCSI 0x00 /* Interface ID */
+
+#define WRITE_PROTECTED_MEDIA 0x07
+
+/*---- sense key ----*/
+#define ILI 0x20 /* ILI bit is on */
+
+#define NO_SENSE 0x00 /* not exist sense key */
+#define RECOVER_ERR 0x01 /* Target/Logical unit is recoverd */
+#define NOT_READY 0x02 /* Logical unit is not ready */
+#define MEDIA_ERR 0x03 /* medium/data error */
+#define HARDWARE_ERR 0x04 /* hardware error */
+#define ILGAL_REQ 0x05 /* CDB/parameter/identify msg error */
+#define UNIT_ATTENTION 0x06 /* unit attention condition occur */
+#define DAT_PRTCT 0x07 /* read/write is desable */
+#define BLNC_CHK 0x08 /* find blank/DOF in read */
+ /* write to unblank area */
+#define CPY_ABRT 0x0a /* Copy/Compare/Copy&Verify illgal */
+#define ABRT_CMD 0x0b /* Target make the command in error */
+#define EQUAL 0x0c /* Search Data end with Equal */
+#define VLM_OVRFLW 0x0d /* Some data are left in buffer */
+#define MISCMP 0x0e /* find inequality */
+
+#define READ_ERR -1
+#define WRITE_ERR -2
+
+#define FIRST_RESET 0x01
+#define USED_EXIST 0x02
+
+/*-----------------------------------
+ SENSE_DATA
+-----------------------------------*/
+/*---- valid ----*/
+#define SENSE_VALID 0x80 /* Sense data is valid as SCSI2 */
+#define SENSE_INVALID 0x00 /* Sense data is invalid as SCSI2 */
+
+/*---- error code ----*/
+#define CUR_ERR 0x70 /* current error */
+#define DEF_ERR 0x71 /* specific command error */
+
+/*---- sense key Information ----*/
+#define SNSKEYINFO_LEN 3 /* length of sense key information */
+
+#define SKSV 0x80
+#define CDB_ILLEGAL 0x40
+#define DAT_ILLEGAL 0x00
+#define BPV 0x08
+#define BIT_ILLEGAL0 0 /* bit0 is illegal */
+#define BIT_ILLEGAL1 1 /* bit1 is illegal */
+#define BIT_ILLEGAL2 2 /* bit2 is illegal */
+#define BIT_ILLEGAL3 3 /* bit3 is illegal */
+#define BIT_ILLEGAL4 4 /* bit4 is illegal */
+#define BIT_ILLEGAL5 5 /* bit5 is illegal */
+#define BIT_ILLEGAL6 6 /* bit6 is illegal */
+#define BIT_ILLEGAL7 7 /* bit7 is illegal */
+
+/*---- ASC ----*/
+#define ASC_NO_INFO 0x00
+#define ASC_MISCMP 0x1d
+#define ASC_INVLD_CDB 0x24
+#define ASC_INVLD_PARA 0x26
+#define ASC_LU_NOT_READY 0x04
+#define ASC_WRITE_ERR 0x0c
+#define ASC_READ_ERR 0x11
+#define ASC_LOAD_EJCT_ERR 0x53
+#define ASC_MEDIA_NOT_PRESENT 0x3A
+#define ASC_MEDIA_CHANGED 0x28
+#define ASC_MEDIA_IN_PROCESS 0x04
+#define ASC_WRITE_PROTECT 0x27
+#define ASC_LUN_NOT_SUPPORTED 0x25
+
+/*---- ASQC ----*/
+#define ASCQ_NO_INFO 0x00
+#define ASCQ_MEDIA_IN_PROCESS 0x01
+#define ASCQ_MISCMP 0x00
+#define ASCQ_INVLD_CDB 0x00
+#define ASCQ_INVLD_PARA 0x02
+#define ASCQ_LU_NOT_READY 0x02
+#define ASCQ_WRITE_ERR 0x02
+#define ASCQ_READ_ERR 0x00
+#define ASCQ_LOAD_EJCT_ERR 0x00
+#define ASCQ_WRITE_PROTECT 0x00
+
+
+struct sense_data_t {
+ unsigned char err_code; /* error code */
+ /* bit7 : valid */
+ /* (1 : SCSI2) */
+ /* (0 : Vendor * specific) */
+ /* bit6-0 : error * code */
+ /* (0x70 : current * error) */
+ /* (0x71 : specific command error) */
+ unsigned char seg_no; /* segment No. */
+ unsigned char sense_key; /* byte5 : ILI */
+ /* bit3-0 : sense key */
+ unsigned char info[4]; /* information */
+ unsigned char ad_sense_len; /* additional sense data length */
+ unsigned char cmd_info[4]; /* command specific information */
+ unsigned char asc; /* ASC */
+ unsigned char ascq; /* ASCQ */
+ unsigned char rfu; /* FRU */
+ unsigned char sns_key_info[3];/* sense key specific information */
+};
+
+/* PCI Operation Register Address */
+#define RTSX_HCBAR 0x00
+#define RTSX_HCBCTLR 0x04
+#define RTSX_HDBAR 0x08
+#define RTSX_HDBCTLR 0x0C
+#define RTSX_HAIMR 0x10
+#define RTSX_BIPR 0x14
+#define RTSX_BIER 0x18
+
+/* Host command buffer control register */
+#define STOP_CMD (0x01 << 28)
+
+/* Host data buffer control register */
+#define SDMA_MODE 0x00
+#define ADMA_MODE (0x02 << 26)
+#define STOP_DMA (0x01 << 28)
+#define TRIG_DMA (0x01 << 31)
+
+/* Bus interrupt pending register */
+#define CMD_DONE_INT (1 << 31)
+#define DATA_DONE_INT (1 << 30)
+#define TRANS_OK_INT (1 << 29)
+#define TRANS_FAIL_INT (1 << 28)
+#define XD_INT (1 << 27)
+#define MS_INT (1 << 26)
+#define SD_INT (1 << 25)
+#define GPIO0_INT (1 << 24)
+#define OC_INT (1 << 23)
+#define SD_WRITE_PROTECT (1 << 19)
+#define XD_EXIST (1 << 18)
+#define MS_EXIST (1 << 17)
+#define SD_EXIST (1 << 16)
+#define DELINK_INT GPIO0_INT
+#define MS_OC_INT (1 << 23)
+#define SD_OC_INT (1 << 22)
+
+#define CARD_INT (XD_INT | MS_INT | SD_INT)
+#define NEED_COMPLETE_INT (DATA_DONE_INT | TRANS_OK_INT | TRANS_FAIL_INT)
+#define RTSX_INT (CMD_DONE_INT | NEED_COMPLETE_INT | CARD_INT | GPIO0_INT | OC_INT)
+
+#define CARD_EXIST (XD_EXIST | MS_EXIST | SD_EXIST)
+
+/* Bus interrupt enable register */
+#define CMD_DONE_INT_EN (1 << 31)
+#define DATA_DONE_INT_EN (1 << 30)
+#define TRANS_OK_INT_EN (1 << 29)
+#define TRANS_FAIL_INT_EN (1 << 28)
+#define XD_INT_EN (1 << 27)
+#define MS_INT_EN (1 << 26)
+#define SD_INT_EN (1 << 25)
+#define GPIO0_INT_EN (1 << 24)
+#define OC_INT_EN (1 << 23)
+#define DELINK_INT_EN GPIO0_INT_EN
+#define MS_OC_INT_EN (1 << 23)
+#define SD_OC_INT_EN (1 << 22)
+
+
+#define READ_REG_CMD 0
+#define WRITE_REG_CMD 1
+#define CHECK_REG_CMD 2
+
+#define HOST_TO_DEVICE 0
+#define DEVICE_TO_HOST 1
+
+
+#define RTSX_RESV_BUF_LEN 4096
+#define HOST_CMDS_BUF_LEN 1024
+#define HOST_SG_TBL_BUF_LEN (RTSX_RESV_BUF_LEN - HOST_CMDS_BUF_LEN)
+
+#define SD_NR 2
+#define MS_NR 3
+#define XD_NR 4
+#define SPI_NR 7
+#define SD_CARD (1 << SD_NR)
+#define MS_CARD (1 << MS_NR)
+#define XD_CARD (1 << XD_NR)
+#define SPI_CARD (1 << SPI_NR)
+
+#define MAX_ALLOWED_LUN_CNT 8
+
+#define XD_FREE_TABLE_CNT 1200
+#define MS_FREE_TABLE_CNT 512
+
+
+/* Bit Operation */
+#define SET_BIT(data, idx) ((data) |= 1 << (idx))
+#define CLR_BIT(data, idx) ((data) &= ~(1 << (idx)))
+#define CHK_BIT(data, idx) ((data) & (1 << (idx)))
+
+/* SG descriptor */
+#define SG_INT 0x04
+#define SG_END 0x02
+#define SG_VALID 0x01
+
+#define SG_NO_OP 0x00
+#define SG_TRANS_DATA (0x02 << 4)
+#define SG_LINK_DESC (0x03 << 4)
+
+struct rtsx_chip;
+
+typedef int (*card_rw_func)(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ u32 sec_addr, u16 sec_cnt);
+
+/* Supported Clock */
+enum card_clock {CLK_20 = 1, CLK_30, CLK_40, CLK_50, CLK_60,
+ CLK_80, CLK_100, CLK_120, CLK_150, CLK_200};
+
+enum RTSX_STAT {RTSX_STAT_INIT, RTSX_STAT_IDLE, RTSX_STAT_RUN, RTSX_STAT_SS,
+ RTSX_STAT_DELINK, RTSX_STAT_SUSPEND,
+ RTSX_STAT_ABORT, RTSX_STAT_DISCONNECT};
+enum IC_VER {IC_VER_AB, IC_VER_C = 2, IC_VER_D = 3};
+
+#define MAX_RESET_CNT 3
+
+/* For MS Card */
+#define MAX_DEFECTIVE_BLOCK 10
+
+struct zone_entry {
+ u16 *l2p_table;
+ u16 *free_table;
+ u16 defect_list[MAX_DEFECTIVE_BLOCK]; /* For MS card only */
+ int set_index;
+ int get_index;
+ int unused_blk_cnt;
+ int disable_count;
+ /* To indicate whether the L2P table of this zone has been built. */
+ int build_flag;
+};
+
+#define TYPE_SD 0x0000
+#define TYPE_MMC 0x0001
+
+/* TYPE_SD */
+#define SD_HS 0x0100
+#define SD_SDR50 0x0200
+#define SD_DDR50 0x0400
+#define SD_SDR104 0x0800
+#define SD_HCXC 0x1000
+
+/* TYPE_MMC */
+#define MMC_26M 0x0100
+#define MMC_52M 0x0200
+#define MMC_4BIT 0x0400
+#define MMC_8BIT 0x0800
+#define MMC_SECTOR_MODE 0x1000
+#define MMC_DDR52 0x2000
+
+/* SD card */
+#define CHK_SD(sd_card) (((sd_card)->sd_type & 0xFF) == TYPE_SD)
+#define CHK_SD_HS(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_HS))
+#define CHK_SD_SDR50(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_SDR50))
+#define CHK_SD_DDR50(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_DDR50))
+#define CHK_SD_SDR104(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_SDR104))
+#define CHK_SD_HCXC(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_HCXC))
+#define CHK_SD_HC(sd_card) (CHK_SD_HCXC(sd_card) && ((sd_card)->capacity <= 0x4000000))
+#define CHK_SD_XC(sd_card) (CHK_SD_HCXC(sd_card) && ((sd_card)->capacity > 0x4000000))
+#define CHK_SD30_SPEED(sd_card) (CHK_SD_SDR50(sd_card) || CHK_SD_DDR50(sd_card) || CHK_SD_SDR104(sd_card))
+
+#define SET_SD(sd_card) ((sd_card)->sd_type = TYPE_SD)
+#define SET_SD_HS(sd_card) ((sd_card)->sd_type |= SD_HS)
+#define SET_SD_SDR50(sd_card) ((sd_card)->sd_type |= SD_SDR50)
+#define SET_SD_DDR50(sd_card) ((sd_card)->sd_type |= SD_DDR50)
+#define SET_SD_SDR104(sd_card) ((sd_card)->sd_type |= SD_SDR104)
+#define SET_SD_HCXC(sd_card) ((sd_card)->sd_type |= SD_HCXC)
+
+#define CLR_SD_HS(sd_card) ((sd_card)->sd_type &= ~SD_HS)
+#define CLR_SD_SDR50(sd_card) ((sd_card)->sd_type &= ~SD_SDR50)
+#define CLR_SD_DDR50(sd_card) ((sd_card)->sd_type &= ~SD_DDR50)
+#define CLR_SD_SDR104(sd_card) ((sd_card)->sd_type &= ~SD_SDR104)
+#define CLR_SD_HCXC(sd_card) ((sd_card)->sd_type &= ~SD_HCXC)
+
+/* MMC card */
+#define CHK_MMC(sd_card) (((sd_card)->sd_type & 0xFF) == TYPE_MMC)
+#define CHK_MMC_26M(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_26M))
+#define CHK_MMC_52M(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_52M))
+#define CHK_MMC_4BIT(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_4BIT))
+#define CHK_MMC_8BIT(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_8BIT))
+#define CHK_MMC_SECTOR_MODE(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_SECTOR_MODE))
+#define CHK_MMC_DDR52(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_DDR52))
+
+#define SET_MMC(sd_card) ((sd_card)->sd_type = TYPE_MMC)
+#define SET_MMC_26M(sd_card) ((sd_card)->sd_type |= MMC_26M)
+#define SET_MMC_52M(sd_card) ((sd_card)->sd_type |= MMC_52M)
+#define SET_MMC_4BIT(sd_card) ((sd_card)->sd_type |= MMC_4BIT)
+#define SET_MMC_8BIT(sd_card) ((sd_card)->sd_type |= MMC_8BIT)
+#define SET_MMC_SECTOR_MODE(sd_card) ((sd_card)->sd_type |= MMC_SECTOR_MODE)
+#define SET_MMC_DDR52(sd_card) ((sd_card)->sd_type |= MMC_DDR52)
+
+#define CLR_MMC_26M(sd_card) ((sd_card)->sd_type &= ~MMC_26M)
+#define CLR_MMC_52M(sd_card) ((sd_card)->sd_type &= ~MMC_52M)
+#define CLR_MMC_4BIT(sd_card) ((sd_card)->sd_type &= ~MMC_4BIT)
+#define CLR_MMC_8BIT(sd_card) ((sd_card)->sd_type &= ~MMC_8BIT)
+#define CLR_MMC_SECTOR_MODE(sd_card) ((sd_card)->sd_type &= ~MMC_SECTOR_MODE)
+#define CLR_MMC_DDR52(sd_card) ((sd_card)->sd_type &= ~MMC_DDR52)
+
+#define CHK_MMC_HS(sd_card) (CHK_MMC_52M(sd_card) && CHK_MMC_26M(sd_card))
+#define CLR_MMC_HS(sd_card) \
+do { \
+ CLR_MMC_DDR52(sd_card); \
+ CLR_MMC_52M(sd_card); \
+ CLR_MMC_26M(sd_card); \
+} while (0)
+
+#define SD_SUPPORT_CLASS_TEN 0x01
+#define SD_SUPPORT_1V8 0x02
+
+#define SD_SET_CLASS_TEN(sd_card) ((sd_card)->sd_setting |= SD_SUPPORT_CLASS_TEN)
+#define SD_CHK_CLASS_TEN(sd_card) ((sd_card)->sd_setting & SD_SUPPORT_CLASS_TEN)
+#define SD_CLR_CLASS_TEN(sd_card) ((sd_card)->sd_setting &= ~SD_SUPPORT_CLASS_TEN)
+#define SD_SET_1V8(sd_card) ((sd_card)->sd_setting |= SD_SUPPORT_1V8)
+#define SD_CHK_1V8(sd_card) ((sd_card)->sd_setting & SD_SUPPORT_1V8)
+#define SD_CLR_1V8(sd_card) ((sd_card)->sd_setting &= ~SD_SUPPORT_1V8)
+
+struct sd_info {
+ u16 sd_type;
+ u8 err_code;
+ u8 sd_data_buf_ready;
+ u32 sd_addr;
+ u32 capacity;
+
+ u8 raw_csd[16];
+ u8 raw_scr[8];
+
+ /* Sequential RW */
+ int seq_mode;
+ enum dma_data_direction pre_dir;
+ u32 pre_sec_addr;
+ u16 pre_sec_cnt;
+
+ int cleanup_counter;
+
+ int sd_clock;
+
+ int mmc_dont_switch_bus;
+
+#ifdef SUPPORT_CPRM
+ int sd_pass_thru_en;
+ int pre_cmd_err;
+ u8 last_rsp_type;
+ u8 rsp[17];
+#endif
+
+ u8 func_group1_mask;
+ u8 func_group2_mask;
+ u8 func_group3_mask;
+ u8 func_group4_mask;
+
+ u8 sd_switch_fail;
+ u8 sd_read_phase;
+
+#ifdef SUPPORT_SD_LOCK
+ u8 sd_lock_status;
+ u8 sd_erase_status;
+ u8 sd_lock_notify;
+#endif
+ int need_retune;
+};
+
+struct xd_delay_write_tag {
+ u32 old_phyblock;
+ u32 new_phyblock;
+ u32 logblock;
+ u8 pageoff;
+ u8 delay_write_flag;
+};
+
+struct xd_info {
+ u8 maker_code;
+ u8 device_code;
+ u8 block_shift;
+ u8 page_off;
+ u8 addr_cycle;
+ u16 cis_block;
+ u8 multi_flag;
+ u8 err_code;
+ u32 capacity;
+
+ struct zone_entry *zone;
+ int zone_cnt;
+
+ struct xd_delay_write_tag delay_write;
+ int cleanup_counter;
+
+ int xd_clock;
+};
+
+#define MODE_512_SEQ 0x01
+#define MODE_2K_SEQ 0x02
+
+#define TYPE_MS 0x0000
+#define TYPE_MSPRO 0x0001
+
+#define MS_4BIT 0x0100
+#define MS_8BIT 0x0200
+#define MS_HG 0x0400
+#define MS_XC 0x0800
+
+#define HG8BIT (MS_HG | MS_8BIT)
+
+#define CHK_MSPRO(ms_card) (((ms_card)->ms_type & 0xFF) == TYPE_MSPRO)
+#define CHK_HG8BIT(ms_card) (CHK_MSPRO(ms_card) && (((ms_card)->ms_type & HG8BIT) == HG8BIT))
+#define CHK_MSXC(ms_card) (CHK_MSPRO(ms_card) && ((ms_card)->ms_type & MS_XC))
+#define CHK_MSHG(ms_card) (CHK_MSPRO(ms_card) && ((ms_card)->ms_type & MS_HG))
+
+#define CHK_MS8BIT(ms_card) (((ms_card)->ms_type & MS_8BIT))
+#define CHK_MS4BIT(ms_card) (((ms_card)->ms_type & MS_4BIT))
+
+struct ms_delay_write_tag {
+ u16 old_phyblock;
+ u16 new_phyblock;
+ u16 logblock;
+ u8 pageoff;
+ u8 delay_write_flag;
+};
+
+struct ms_info {
+ u16 ms_type;
+ u8 block_shift;
+ u8 page_off;
+ u16 total_block;
+ u16 boot_block;
+ u32 capacity;
+
+ u8 check_ms_flow;
+ u8 switch_8bit_fail;
+ u8 err_code;
+
+ struct zone_entry *segment;
+ int segment_cnt;
+
+ int pro_under_formatting;
+ int format_status;
+ u16 progress;
+ u8 raw_sys_info[96];
+#ifdef SUPPORT_PCGL_1P18
+ u8 raw_model_name[48];
+#endif
+
+ u8 multi_flag;
+
+ /* Sequential RW */
+ u8 seq_mode;
+ enum dma_data_direction pre_dir;
+ u32 pre_sec_addr;
+ u16 pre_sec_cnt;
+ u32 total_sec_cnt;
+
+ struct ms_delay_write_tag delay_write;
+
+ int cleanup_counter;
+
+ int ms_clock;
+
+#ifdef SUPPORT_MAGIC_GATE
+ u8 magic_gate_id[16];
+ u8 mg_entry_num;
+ int mg_auth; /* flag to indicate authentication process */
+#endif
+};
+
+struct spi_info {
+ u8 use_clk;
+ u8 write_en;
+ u16 clk_div;
+ u8 err_code;
+
+ int spi_clock;
+};
+
+
+#ifdef _MSG_TRACE
+struct trace_msg_t {
+ u16 line;
+#define MSG_FUNC_LEN 64
+ char func[MSG_FUNC_LEN];
+#define MSG_FILE_LEN 32
+ char file[MSG_FILE_LEN];
+#define TIME_VAL_LEN 16
+ u8 timeval_buf[TIME_VAL_LEN];
+ u8 valid;
+};
+#endif
+
+/************/
+/* LUN mode */
+/************/
+/* Single LUN, support xD/SD/MS */
+#define DEFAULT_SINGLE 0
+/* 2 LUN mode, support SD/MS */
+#define SD_MS_2LUN 1
+/* Single LUN, but only support SD/MS, for Barossa LQFP */
+#define SD_MS_1LUN 2
+
+#define LAST_LUN_MODE 2
+
+/* Barossa package */
+#define QFN 0
+#define LQFP 1
+
+/******************/
+/* sd_ctl bit map */
+/******************/
+/* SD push point control, bit 0, 1 */
+#define SD_PUSH_POINT_CTL_MASK 0x03
+#define SD_PUSH_POINT_DELAY 0x01
+#define SD_PUSH_POINT_AUTO 0x02
+/* SD sample point control, bit 2, 3 */
+#define SD_SAMPLE_POINT_CTL_MASK 0x0C
+#define SD_SAMPLE_POINT_DELAY 0x04
+#define SD_SAMPLE_POINT_AUTO 0x08
+/* SD DDR Tx phase set by user, bit 4 */
+#define SD_DDR_TX_PHASE_SET_BY_USER 0x10
+/* MMC DDR Tx phase set by user, bit 5 */
+#define MMC_DDR_TX_PHASE_SET_BY_USER 0x20
+/* Support MMC DDR mode, bit 6 */
+#define SUPPORT_MMC_DDR_MODE 0x40
+/* Reset MMC at first */
+#define RESET_MMC_FIRST 0x80
+
+#define SEQ_START_CRITERIA 0x20
+
+/* MS Power Class En */
+#define POWER_CLASS_2_EN 0x02
+#define POWER_CLASS_1_EN 0x01
+
+#define MAX_SHOW_CNT 10
+#define MAX_RESET_CNT 3
+
+#define SDIO_EXIST 0x01
+#define SDIO_IGNORED 0x02
+
+#define CHK_SDIO_EXIST(chip) ((chip)->sdio_func_exist & SDIO_EXIST)
+#define SET_SDIO_EXIST(chip) ((chip)->sdio_func_exist |= SDIO_EXIST)
+#define CLR_SDIO_EXIST(chip) ((chip)->sdio_func_exist &= ~SDIO_EXIST)
+
+#define CHK_SDIO_IGNORED(chip) ((chip)->sdio_func_exist & SDIO_IGNORED)
+#define SET_SDIO_IGNORED(chip) ((chip)->sdio_func_exist |= SDIO_IGNORED)
+#define CLR_SDIO_IGNORED(chip) ((chip)->sdio_func_exist &= ~SDIO_IGNORED)
+
+struct rtsx_chip {
+ rtsx_dev_t *rtsx;
+
+ u32 int_reg; /* Bus interrupt pending register */
+ char max_lun;
+ void *context;
+
+ void *host_cmds_ptr; /* host commands buffer pointer */
+ dma_addr_t host_cmds_addr;
+ int ci; /* Command Index */
+
+ void *host_sg_tbl_ptr; /* SG descriptor table */
+ dma_addr_t host_sg_tbl_addr;
+ int sgi; /* SG entry index */
+
+ struct scsi_cmnd *srb; /* current srb */
+ struct sense_data_t sense_buffer[MAX_ALLOWED_LUN_CNT];
+
+ int cur_clk; /* current card clock */
+
+ /* Current accessed card */
+ int cur_card;
+
+ unsigned long need_release; /* need release bit map */
+ unsigned long need_reset; /* need reset
+ * bit map */
+ /* Flag to indicate that this card is just resumed from SS state,
+ * and need released before being resetted
+ */
+ unsigned long need_reinit;
+
+ int rw_need_retry;
+
+#ifdef SUPPORT_OCP
+ u32 ocp_int;
+ u8 ocp_stat;
+#endif
+
+ u8 card_exist; /* card exist bit map (physical exist) */
+ u8 card_ready; /* card ready bit map (reset successfully) */
+ u8 card_fail; /* card reset fail bit map */
+ u8 card_ejected; /* card ejected bit map */
+ u8 card_wp; /* card write protected bit map */
+
+ u8 lun_mc; /* flag to indicate whether to answer
+ * MediaChange */
+
+#ifndef LED_AUTO_BLINK
+ int led_toggle_counter;
+#endif
+
+ int sd_reset_counter;
+ int xd_reset_counter;
+ int ms_reset_counter;
+
+ /* card bus width */
+ u8 card_bus_width[MAX_ALLOWED_LUN_CNT];
+ /* card capacity */
+ u32 capacity[MAX_ALLOWED_LUN_CNT];
+ /* read/write card function pointer */
+ card_rw_func rw_card[MAX_ALLOWED_LUN_CNT];
+ /* read/write capacity, used for GPIO Toggle */
+ u32 rw_cap[MAX_ALLOWED_LUN_CNT];
+ /* card to lun mapping table */
+ u8 card2lun[32];
+ /* lun to card mapping table */
+ u8 lun2card[MAX_ALLOWED_LUN_CNT];
+
+ int rw_fail_cnt[MAX_ALLOWED_LUN_CNT];
+
+ int sd_show_cnt;
+ int xd_show_cnt;
+ int ms_show_cnt;
+
+ /* card information */
+ struct sd_info sd_card;
+ struct xd_info xd_card;
+ struct ms_info ms_card;
+
+ struct spi_info spi;
+
+#ifdef _MSG_TRACE
+ struct trace_msg_t trace_msg[TRACE_ITEM_CNT];
+ int msg_idx;
+#endif
+
+ int auto_delink_cnt;
+ int auto_delink_allowed;
+
+ int aspm_enabled;
+
+ int sdio_aspm;
+ int sdio_idle;
+ int sdio_counter;
+ u8 sdio_raw_data[12];
+
+ u8 sd_io;
+ u8 sd_int;
+
+ u8 rtsx_flag;
+
+ int ss_counter;
+ int idle_counter;
+ enum RTSX_STAT rtsx_stat;
+
+ u16 vendor_id;
+ u16 product_id;
+ u8 ic_version;
+
+ int driver_first_load;
+
+#ifdef HW_AUTO_SWITCH_SD_BUS
+ int sdio_in_charge;
+#endif
+
+ u8 aspm_level[2];
+
+ int chip_insert_with_sdio;
+
+ /* Options */
+
+ int adma_mode;
+
+ int auto_delink_en;
+ int ss_en;
+ u8 lun_mode;
+ u8 aspm_l0s_l1_en;
+
+ int power_down_in_ss;
+
+ int sdr104_en;
+ int ddr50_en;
+ int sdr50_en;
+
+ int baro_pkg;
+
+ int asic_code;
+ int phy_debug_mode;
+ int hw_bypass_sd;
+ int sdio_func_exist;
+ int aux_pwr_exist;
+ u8 ms_power_class_en;
+
+ int mspro_formatter_enable;
+
+ int remote_wakeup_en;
+
+ int ignore_sd;
+ int use_hw_setting;
+
+ int ss_idle_period;
+
+ int dynamic_aspm;
+
+ int fpga_sd_sdr104_clk;
+ int fpga_sd_ddr50_clk;
+ int fpga_sd_sdr50_clk;
+ int fpga_sd_hs_clk;
+ int fpga_mmc_52m_clk;
+ int fpga_ms_hg_clk;
+ int fpga_ms_4bit_clk;
+ int fpga_ms_1bit_clk;
+
+ int asic_sd_sdr104_clk;
+ int asic_sd_ddr50_clk;
+ int asic_sd_sdr50_clk;
+ int asic_sd_hs_clk;
+ int asic_mmc_52m_clk;
+ int asic_ms_hg_clk;
+ int asic_ms_4bit_clk;
+ int asic_ms_1bit_clk;
+
+ u8 ssc_depth_sd_sdr104;
+ u8 ssc_depth_sd_ddr50;
+ u8 ssc_depth_sd_sdr50;
+ u8 ssc_depth_sd_hs;
+ u8 ssc_depth_mmc_52m;
+ u8 ssc_depth_ms_hg;
+ u8 ssc_depth_ms_4bit;
+ u8 ssc_depth_low_speed;
+
+ u8 card_drive_sel;
+ u8 sd30_drive_sel_1v8;
+ u8 sd30_drive_sel_3v3;
+
+ u8 sd_400mA_ocp_thd;
+ u8 sd_800mA_ocp_thd;
+ u8 ms_ocp_thd;
+
+ int ssc_en;
+ int msi_en;
+
+ int xd_timeout;
+ int sd_timeout;
+ int ms_timeout;
+ int mspro_timeout;
+
+ int auto_power_down;
+
+ int sd_ddr_tx_phase;
+ int mmc_ddr_tx_phase;
+ int sd_default_tx_phase;
+ int sd_default_rx_phase;
+
+ int pmos_pwr_on_interval;
+ int sd_voltage_switch_delay;
+ int s3_pwr_off_delay;
+
+ int force_clkreq_0;
+ int ft2_fast_mode;
+
+ int do_delink_before_power_down;
+ int polling_config;
+ int sdio_retry_cnt;
+
+ int delink_stage1_step;
+ int delink_stage2_step;
+ int delink_stage3_step;
+
+ int auto_delink_in_L1;
+ int hp_watch_bios_hotplug;
+ int support_ms_8bit;
+
+ u8 blink_led;
+ u8 phy_voltage;
+ u8 max_payload;
+
+ u32 sd_speed_prior;
+ u32 sd_current_prior;
+ u32 sd_ctl;
+};
+
+#define rtsx_set_stat(chip, stat) \
+do { \
+ if ((stat) != RTSX_STAT_IDLE) { \
+ (chip)->idle_counter = 0; \
+ } \
+ (chip)->rtsx_stat = (enum RTSX_STAT)(stat); \
+} while (0)
+#define rtsx_get_stat(chip) ((chip)->rtsx_stat)
+#define rtsx_chk_stat(chip, stat) ((chip)->rtsx_stat == (stat))
+
+#define RTSX_SET_DELINK(chip) ((chip)->rtsx_flag |= 0x01)
+#define RTSX_CLR_DELINK(chip) ((chip)->rtsx_flag &= 0xFE)
+#define RTSX_TST_DELINK(chip) ((chip)->rtsx_flag & 0x01)
+
+#define CHECK_PID(chip, pid) ((chip)->product_id == (pid))
+#define CHECK_BARO_PKG(chip, pkg) ((chip)->baro_pkg == (pkg))
+#define CHECK_LUN_MODE(chip, mode) ((chip)->lun_mode == (mode))
+
+/* Power down control */
+#define SSC_PDCTL 0x01
+#define OC_PDCTL 0x02
+
+int rtsx_force_power_on(struct rtsx_chip *chip, u8 ctl);
+int rtsx_force_power_down(struct rtsx_chip *chip, u8 ctl);
+
+void rtsx_disable_card_int(struct rtsx_chip *chip);
+void rtsx_enable_card_int(struct rtsx_chip *chip);
+void rtsx_enable_bus_int(struct rtsx_chip *chip);
+void rtsx_disable_bus_int(struct rtsx_chip *chip);
+int rtsx_reset_chip(struct rtsx_chip *chip);
+int rtsx_init_chip(struct rtsx_chip *chip);
+void rtsx_release_chip(struct rtsx_chip *chip);
+void rtsx_polling_func(struct rtsx_chip *chip);
+void rtsx_undo_delink(struct rtsx_chip *chip);
+void rtsx_stop_cmd(struct rtsx_chip *chip, int card);
+int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data);
+int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data);
+int rtsx_write_cfg_dw(struct rtsx_chip *chip,
+ u8 func_no, u16 addr, u32 mask, u32 val);
+int rtsx_read_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 *val);
+int rtsx_write_cfg_seq(struct rtsx_chip *chip,
+ u8 func, u16 addr, u8 *buf, int len);
+int rtsx_read_cfg_seq(struct rtsx_chip *chip,
+ u8 func, u16 addr, u8 *buf, int len);
+int rtsx_write_phy_register(struct rtsx_chip *chip, u8 addr, u16 val);
+int rtsx_read_phy_register(struct rtsx_chip *chip, u8 addr, u16 *val);
+int rtsx_read_efuse(struct rtsx_chip *chip, u8 addr, u8 *val);
+int rtsx_write_efuse(struct rtsx_chip *chip, u8 addr, u8 val);
+int rtsx_clr_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit);
+int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit);
+int rtsx_check_link_ready(struct rtsx_chip *chip);
+void rtsx_enter_ss(struct rtsx_chip *chip);
+void rtsx_exit_ss(struct rtsx_chip *chip);
+int rtsx_pre_handle_interrupt(struct rtsx_chip *chip);
+void rtsx_enter_L1(struct rtsx_chip *chip);
+void rtsx_exit_L1(struct rtsx_chip *chip);
+void rtsx_do_before_power_down(struct rtsx_chip *chip, int pm_stat);
+void rtsx_enable_aspm(struct rtsx_chip *chip);
+void rtsx_disable_aspm(struct rtsx_chip *chip);
+int rtsx_read_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len);
+int rtsx_write_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len);
+int rtsx_check_chip_exist(struct rtsx_chip *chip);
+
+#define RTSX_WRITE_REG(chip, addr, mask, data) \
+ do { \
+ int retval = rtsx_write_register((chip), (addr), (mask), (data)); \
+ if (retval != STATUS_SUCCESS) { \
+ TRACE_RET((chip), retval); \
+ } \
+ } while (0)
+
+#define RTSX_READ_REG(chip, addr, data) \
+ do { \
+ int retval = rtsx_read_register((chip), (addr), (data)); \
+ if (retval != STATUS_SUCCESS) { \
+ TRACE_RET((chip), retval); \
+ } \
+ } while (0)
+
+#endif /* __REALTEK_RTSX_CHIP_H */
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
new file mode 100644
index 000000000000..382e73a54f4d
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -0,0 +1,3370 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+
+#include "rtsx.h"
+#include "rtsx_transport.h"
+#include "rtsx_sys.h"
+#include "rtsx_card.h"
+#include "rtsx_chip.h"
+#include "rtsx_scsi.h"
+#include "sd.h"
+#include "ms.h"
+#include "spi.h"
+
+void scsi_show_command(struct scsi_cmnd *srb)
+{
+ char *what = NULL;
+ int i, unknown_cmd = 0;
+
+ switch (srb->cmnd[0]) {
+ case TEST_UNIT_READY:
+ what = "TEST_UNIT_READY";
+ break;
+ case REZERO_UNIT:
+ what = "REZERO_UNIT";
+ break;
+ case REQUEST_SENSE:
+ what = "REQUEST_SENSE";
+ break;
+ case FORMAT_UNIT:
+ what = "FORMAT_UNIT";
+ break;
+ case READ_BLOCK_LIMITS:
+ what = "READ_BLOCK_LIMITS";
+ break;
+ case REASSIGN_BLOCKS:
+ what = "REASSIGN_BLOCKS";
+ break;
+ case READ_6:
+ what = "READ_6";
+ break;
+ case WRITE_6:
+ what = "WRITE_6";
+ break;
+ case SEEK_6:
+ what = "SEEK_6";
+ break;
+ case READ_REVERSE:
+ what = "READ_REVERSE";
+ break;
+ case WRITE_FILEMARKS:
+ what = "WRITE_FILEMARKS";
+ break;
+ case SPACE:
+ what = "SPACE";
+ break;
+ case INQUIRY:
+ what = "INQUIRY";
+ break;
+ case RECOVER_BUFFERED_DATA:
+ what = "RECOVER_BUFFERED_DATA";
+ break;
+ case MODE_SELECT:
+ what = "MODE_SELECT";
+ break;
+ case RESERVE:
+ what = "RESERVE";
+ break;
+ case RELEASE:
+ what = "RELEASE";
+ break;
+ case COPY:
+ what = "COPY";
+ break;
+ case ERASE:
+ what = "ERASE";
+ break;
+ case MODE_SENSE:
+ what = "MODE_SENSE";
+ break;
+ case START_STOP:
+ what = "START_STOP";
+ break;
+ case RECEIVE_DIAGNOSTIC:
+ what = "RECEIVE_DIAGNOSTIC";
+ break;
+ case SEND_DIAGNOSTIC:
+ what = "SEND_DIAGNOSTIC";
+ break;
+ case ALLOW_MEDIUM_REMOVAL:
+ what = "ALLOW_MEDIUM_REMOVAL";
+ break;
+ case SET_WINDOW:
+ what = "SET_WINDOW";
+ break;
+ case READ_CAPACITY:
+ what = "READ_CAPACITY";
+ break;
+ case READ_10:
+ what = "READ_10";
+ break;
+ case WRITE_10:
+ what = "WRITE_10";
+ break;
+ case SEEK_10:
+ what = "SEEK_10";
+ break;
+ case WRITE_VERIFY:
+ what = "WRITE_VERIFY";
+ break;
+ case VERIFY:
+ what = "VERIFY";
+ break;
+ case SEARCH_HIGH:
+ what = "SEARCH_HIGH";
+ break;
+ case SEARCH_EQUAL:
+ what = "SEARCH_EQUAL";
+ break;
+ case SEARCH_LOW:
+ what = "SEARCH_LOW";
+ break;
+ case SET_LIMITS:
+ what = "SET_LIMITS";
+ break;
+ case READ_POSITION:
+ what = "READ_POSITION";
+ break;
+ case SYNCHRONIZE_CACHE:
+ what = "SYNCHRONIZE_CACHE";
+ break;
+ case LOCK_UNLOCK_CACHE:
+ what = "LOCK_UNLOCK_CACHE";
+ break;
+ case READ_DEFECT_DATA:
+ what = "READ_DEFECT_DATA";
+ break;
+ case MEDIUM_SCAN:
+ what = "MEDIUM_SCAN";
+ break;
+ case COMPARE:
+ what = "COMPARE";
+ break;
+ case COPY_VERIFY:
+ what = "COPY_VERIFY";
+ break;
+ case WRITE_BUFFER:
+ what = "WRITE_BUFFER";
+ break;
+ case READ_BUFFER:
+ what = "READ_BUFFER";
+ break;
+ case UPDATE_BLOCK:
+ what = "UPDATE_BLOCK";
+ break;
+ case READ_LONG:
+ what = "READ_LONG";
+ break;
+ case WRITE_LONG:
+ what = "WRITE_LONG";
+ break;
+ case CHANGE_DEFINITION:
+ what = "CHANGE_DEFINITION";
+ break;
+ case WRITE_SAME:
+ what = "WRITE_SAME";
+ break;
+ case GPCMD_READ_SUBCHANNEL:
+ what = "READ SUBCHANNEL";
+ break;
+ case READ_TOC:
+ what = "READ_TOC";
+ break;
+ case GPCMD_READ_HEADER:
+ what = "READ HEADER";
+ break;
+ case GPCMD_PLAY_AUDIO_10:
+ what = "PLAY AUDIO (10)";
+ break;
+ case GPCMD_PLAY_AUDIO_MSF:
+ what = "PLAY AUDIO MSF";
+ break;
+ case GPCMD_GET_EVENT_STATUS_NOTIFICATION:
+ what = "GET EVENT/STATUS NOTIFICATION";
+ break;
+ case GPCMD_PAUSE_RESUME:
+ what = "PAUSE/RESUME";
+ break;
+ case LOG_SELECT:
+ what = "LOG_SELECT";
+ break;
+ case LOG_SENSE:
+ what = "LOG_SENSE";
+ break;
+ case GPCMD_STOP_PLAY_SCAN:
+ what = "STOP PLAY/SCAN";
+ break;
+ case GPCMD_READ_DISC_INFO:
+ what = "READ DISC INFORMATION";
+ break;
+ case GPCMD_READ_TRACK_RZONE_INFO:
+ what = "READ TRACK INFORMATION";
+ break;
+ case GPCMD_RESERVE_RZONE_TRACK:
+ what = "RESERVE TRACK";
+ break;
+ case GPCMD_SEND_OPC:
+ what = "SEND OPC";
+ break;
+ case MODE_SELECT_10:
+ what = "MODE_SELECT_10";
+ break;
+ case GPCMD_REPAIR_RZONE_TRACK:
+ what = "REPAIR TRACK";
+ break;
+ case 0x59:
+ what = "READ MASTER CUE";
+ break;
+ case MODE_SENSE_10:
+ what = "MODE_SENSE_10";
+ break;
+ case GPCMD_CLOSE_TRACK:
+ what = "CLOSE TRACK/SESSION";
+ break;
+ case 0x5C:
+ what = "READ BUFFER CAPACITY";
+ break;
+ case 0x5D:
+ what = "SEND CUE SHEET";
+ break;
+ case GPCMD_BLANK:
+ what = "BLANK";
+ break;
+ case REPORT_LUNS:
+ what = "REPORT LUNS";
+ break;
+ case MOVE_MEDIUM:
+ what = "MOVE_MEDIUM or PLAY AUDIO (12)";
+ break;
+ case READ_12:
+ what = "READ_12";
+ break;
+ case WRITE_12:
+ what = "WRITE_12";
+ break;
+ case WRITE_VERIFY_12:
+ what = "WRITE_VERIFY_12";
+ break;
+ case SEARCH_HIGH_12:
+ what = "SEARCH_HIGH_12";
+ break;
+ case SEARCH_EQUAL_12:
+ what = "SEARCH_EQUAL_12";
+ break;
+ case SEARCH_LOW_12:
+ what = "SEARCH_LOW_12";
+ break;
+ case SEND_VOLUME_TAG:
+ what = "SEND_VOLUME_TAG";
+ break;
+ case READ_ELEMENT_STATUS:
+ what = "READ_ELEMENT_STATUS";
+ break;
+ case GPCMD_READ_CD_MSF:
+ what = "READ CD MSF";
+ break;
+ case GPCMD_SCAN:
+ what = "SCAN";
+ break;
+ case GPCMD_SET_SPEED:
+ what = "SET CD SPEED";
+ break;
+ case GPCMD_MECHANISM_STATUS:
+ what = "MECHANISM STATUS";
+ break;
+ case GPCMD_READ_CD:
+ what = "READ CD";
+ break;
+ case 0xE1:
+ what = "WRITE CONTINUE";
+ break;
+ case WRITE_LONG_2:
+ what = "WRITE_LONG_2";
+ break;
+ case VENDOR_CMND:
+ what = "Realtek's vendor command";
+ break;
+ default:
+ what = "(unknown command)"; unknown_cmd = 1;
+ break;
+ }
+
+ if (srb->cmnd[0] != TEST_UNIT_READY)
+ RTSX_DEBUGP("Command %s (%d bytes)\n", what, srb->cmd_len);
+
+ if (unknown_cmd) {
+ RTSX_DEBUGP("");
+ for (i = 0; i < srb->cmd_len && i < 16; i++)
+ RTSX_DEBUGPN(" %02x", srb->cmnd[i]);
+ RTSX_DEBUGPN("\n");
+ }
+}
+
+void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type)
+{
+ switch (sense_type) {
+ case SENSE_TYPE_MEDIA_CHANGE:
+ set_sense_data(chip, lun, CUR_ERR, 0x06, 0, 0x28, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_NOT_PRESENT:
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x3A, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_LBA_OVER_RANGE:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x21, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x25, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_WRITE_PROTECT:
+ set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x27, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x11, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_WRITE_ERR:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x02, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_INVALID_CMD_FIELD:
+ set_sense_data(chip, lun, CUR_ERR, ILGAL_REQ, 0,
+ ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1);
+ break;
+
+ case SENSE_TYPE_FORMAT_IN_PROGRESS:
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04, 0, 0);
+ break;
+
+ case SENSE_TYPE_FORMAT_CMD_FAILED:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x31, 0x01, 0, 0);
+ break;
+
+#ifdef SUPPORT_MAGIC_GATE
+ case SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x02, 0, 0);
+ break;
+
+ case SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x00, 0, 0);
+ break;
+
+ case SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM:
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x30, 0x00, 0, 0);
+ break;
+
+ case SENSE_TYPE_MG_WRITE_ERR:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x00, 0, 0);
+ break;
+#endif
+
+#ifdef SUPPORT_SD_LOCK
+ case SENSE_TYPE_MEDIA_READ_FORBIDDEN:
+ set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x11, 0x13, 0, 0);
+ break;
+#endif
+
+ case SENSE_TYPE_NO_SENSE:
+ default:
+ set_sense_data(chip, lun, CUR_ERR, 0, 0, 0, 0, 0, 0);
+ break;
+ }
+}
+
+void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
+ u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0,
+ u16 sns_key_info1)
+{
+ struct sense_data_t *sense = &(chip->sense_buffer[lun]);
+
+ sense->err_code = err_code;
+ sense->sense_key = sense_key;
+ sense->info[0] = (u8)(info >> 24);
+ sense->info[1] = (u8)(info >> 16);
+ sense->info[2] = (u8)(info >> 8);
+ sense->info[3] = (u8)info;
+
+ sense->ad_sense_len = sizeof(struct sense_data_t) - 8;
+ sense->asc = asc;
+ sense->ascq = ascq;
+ if (sns_key_info0 != 0) {
+ sense->sns_key_info[0] = SKSV | sns_key_info0;
+ sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8;
+ sense->sns_key_info[2] = sns_key_info1 & 0x0f;
+ }
+}
+
+static int test_unit_ready(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ return TRANSPORT_FAILED;
+ }
+
+ if (!(CHK_BIT(chip->lun_mc, lun))) {
+ SET_BIT(chip->lun_mc, lun);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ }
+
+#ifdef SUPPORT_SD_LOCK
+ if (get_lun_card(chip, SCSI_LUN(srb)) == SD_CARD) {
+ struct sd_info *sd_card = &(chip->sd_card);
+ if (sd_card->sd_lock_notify) {
+ sd_card->sd_lock_notify = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ } else if (sd_card->sd_lock_status & SD_LOCKED) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ return TRANSPORT_FAILED;
+ }
+ }
+#endif
+
+ return TRANSPORT_GOOD;
+}
+
+static unsigned char formatter_inquiry_str[20] = {
+ 'M', 'E', 'M', 'O', 'R', 'Y', 'S', 'T', 'I', 'C', 'K',
+#ifdef SUPPORT_MAGIC_GATE
+ '-', 'M', 'G', /* Byte[47:49] */
+#else
+ 0x20, 0x20, 0x20, /* Byte[47:49] */
+#endif
+
+#ifdef SUPPORT_MAGIC_GATE
+ 0x0B, /* Byte[50]: MG, MS, MSPro, MSXC */
+#else
+ 0x09, /* Byte[50]: MS, MSPro, MSXC */
+#endif
+ 0x00, /* Byte[51]: Category Specific Commands */
+ 0x00, /* Byte[52]: Access Control and feature */
+ 0x20, 0x20, 0x20, /* Byte[53:55] */
+};
+
+static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ char *inquiry_default = (char *)"Generic-xD/SD/M.S. 1.00 ";
+ char *inquiry_sdms = (char *)"Generic-SD/MemoryStick 1.00 ";
+ char *inquiry_sd = (char *)"Generic-SD/MMC 1.00 ";
+ char *inquiry_ms = (char *)"Generic-MemoryStick 1.00 ";
+ char *inquiry_string;
+ unsigned char sendbytes;
+ unsigned char *buf;
+ u8 card = get_lun_card(chip, lun);
+ int pro_formatter_flag = 0;
+ unsigned char inquiry_buf[] = {
+ QULIFIRE|DRCT_ACCESS_DEV,
+ RMB_DISC|0x0D,
+ 0x00,
+ 0x01,
+ 0x1f,
+ 0x02,
+ 0,
+ REL_ADR|WBUS_32|WBUS_16|SYNC|LINKED|CMD_QUE|SFT_RE,
+ };
+
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
+ if (chip->lun2card[lun] == SD_CARD)
+ inquiry_string = inquiry_sd;
+ else
+ inquiry_string = inquiry_ms;
+
+ } else if (CHECK_LUN_MODE(chip, SD_MS_1LUN)) {
+ inquiry_string = inquiry_sdms;
+ } else {
+ inquiry_string = inquiry_default;
+ }
+
+ buf = vmalloc(scsi_bufflen(srb));
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+#ifdef SUPPORT_MAGIC_GATE
+ if ((chip->mspro_formatter_enable) &&
+ (chip->lun2card[lun] & MS_CARD))
+#else
+ if (chip->mspro_formatter_enable)
+#endif
+ {
+ if (!card || (card == MS_CARD))
+ pro_formatter_flag = 1;
+ }
+
+ if (pro_formatter_flag) {
+ if (scsi_bufflen(srb) < 56)
+ sendbytes = (unsigned char)(scsi_bufflen(srb));
+ else
+ sendbytes = 56;
+
+ } else {
+ if (scsi_bufflen(srb) < 36)
+ sendbytes = (unsigned char)(scsi_bufflen(srb));
+ else
+ sendbytes = 36;
+ }
+
+ if (sendbytes > 8) {
+ memcpy(buf, inquiry_buf, 8);
+ memcpy(buf + 8, inquiry_string, sendbytes - 8);
+ if (pro_formatter_flag) {
+ /* Additional Length */
+ buf[4] = 0x33;
+ }
+ } else {
+ memcpy(buf, inquiry_buf, sendbytes);
+ }
+
+ if (pro_formatter_flag) {
+ if (sendbytes > 36)
+ memcpy(buf + 36, formatter_inquiry_str, sendbytes - 36);
+ }
+
+ scsi_set_resid(srb, 0);
+
+ rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+
+static int start_stop_unit(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+
+ scsi_set_resid(srb, scsi_bufflen(srb));
+
+ if (srb->cmnd[1] == 1)
+ return TRANSPORT_GOOD;
+
+ switch (srb->cmnd[0x4]) {
+ case STOP_MEDIUM:
+ /* Media disabled */
+ return TRANSPORT_GOOD;
+
+ case UNLOAD_MEDIUM:
+ /* Media shall be unload */
+ if (check_card_ready(chip, lun))
+ eject_card(chip, lun);
+ return TRANSPORT_GOOD;
+
+ case MAKE_MEDIUM_READY:
+ case LOAD_MEDIUM:
+ if (check_card_ready(chip, lun)) {
+ return TRANSPORT_GOOD;
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ break;
+ }
+
+ TRACE_RET(chip, TRANSPORT_ERROR);
+}
+
+
+static int allow_medium_removal(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int prevent;
+
+ prevent = srb->cmnd[4] & 0x1;
+
+ scsi_set_resid(srb, 0);
+
+ if (prevent) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+
+static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct sense_data_t *sense;
+ unsigned int lun = SCSI_LUN(srb);
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned char *tmp, *buf;
+
+ sense = &(chip->sense_buffer[lun]);
+
+ if ((get_lun_card(chip, lun) == MS_CARD) &&
+ ms_card->pro_under_formatting) {
+ if (ms_card->format_status == FORMAT_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ ms_card->pro_under_formatting = 0;
+ ms_card->progress = 0;
+ } else if (ms_card->format_status == FORMAT_IN_PROGRESS) {
+ /* Logical Unit Not Ready Format in Progress */
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
+ 0, (u16)(ms_card->progress));
+ } else {
+ /* Format Command Failed */
+ set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
+ ms_card->pro_under_formatting = 0;
+ ms_card->progress = 0;
+ }
+
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+ }
+
+ buf = vmalloc(scsi_bufflen(srb));
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ tmp = (unsigned char *)sense;
+ memcpy(buf, tmp, scsi_bufflen(srb));
+
+ rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ vfree(buf);
+
+ scsi_set_resid(srb, 0);
+ /* Reset Sense Data */
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ return TRANSPORT_GOOD;
+}
+
+static void ms_mode_sense(struct rtsx_chip *chip, u8 cmd,
+ int lun, u8 *buf, int buf_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int sys_info_offset;
+ int data_size = buf_len;
+ int support_format = 0;
+ int i = 0;
+
+ if (cmd == MODE_SENSE) {
+ sys_info_offset = 8;
+ if (data_size > 0x68)
+ data_size = 0x68;
+
+ buf[i++] = 0x67; /* Mode Data Length */
+ } else {
+ sys_info_offset = 12;
+ if (data_size > 0x6C)
+ data_size = 0x6C;
+
+ buf[i++] = 0x00; /* Mode Data Length (MSB) */
+ buf[i++] = 0x6A; /* Mode Data Length (LSB) */
+ }
+
+ /* Medium Type Code */
+ if (check_card_ready(chip, lun)) {
+ if (CHK_MSXC(ms_card)) {
+ support_format = 1;
+ buf[i++] = 0x40;
+ } else if (CHK_MSPRO(ms_card)) {
+ support_format = 1;
+ buf[i++] = 0x20;
+ } else {
+ buf[i++] = 0x10;
+ }
+
+ /* WP */
+ if (check_card_wp(chip, lun))
+ buf[i++] = 0x80;
+ else
+ buf[i++] = 0x00;
+
+ } else {
+ buf[i++] = 0x00; /* MediaType */
+ buf[i++] = 0x00; /* WP */
+ }
+
+ buf[i++] = 0x00; /* Reserved */
+
+ if (cmd == MODE_SENSE_10) {
+ buf[i++] = 0x00; /* Reserved */
+ buf[i++] = 0x00; /* Block descriptor length(MSB) */
+ buf[i++] = 0x00; /* Block descriptor length(LSB) */
+
+ /* The Following Data is the content of "Page 0x20" */
+ if (data_size >= 9)
+ buf[i++] = 0x20; /* Page Code */
+ if (data_size >= 10)
+ buf[i++] = 0x62; /* Page Length */
+ if (data_size >= 11)
+ buf[i++] = 0x00; /* No Access Control */
+ if (data_size >= 12) {
+ if (support_format)
+ buf[i++] = 0xC0; /* SF, SGM */
+ else
+ buf[i++] = 0x00;
+ }
+ } else {
+ /* The Following Data is the content of "Page 0x20" */
+ if (data_size >= 5)
+ buf[i++] = 0x20; /* Page Code */
+ if (data_size >= 6)
+ buf[i++] = 0x62; /* Page Length */
+ if (data_size >= 7)
+ buf[i++] = 0x00; /* No Access Control */
+ if (data_size >= 8) {
+ if (support_format)
+ buf[i++] = 0xC0; /* SF, SGM */
+ else
+ buf[i++] = 0x00;
+ }
+ }
+
+ if (data_size > sys_info_offset) {
+ /* 96 Bytes Attribute Data */
+ int len = data_size - sys_info_offset;
+ len = (len < 96) ? len : 96;
+
+ memcpy(buf + sys_info_offset, ms_card->raw_sys_info, len);
+ }
+}
+
+static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned int dataSize;
+ int status;
+ int pro_formatter_flag;
+ unsigned char pageCode, *buf;
+ u8 card = get_lun_card(chip, lun);
+
+#ifndef SUPPORT_MAGIC_GATE
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ scsi_set_resid(srb, scsi_bufflen(srb));
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+#endif
+
+ pro_formatter_flag = 0;
+ dataSize = 8;
+#ifdef SUPPORT_MAGIC_GATE
+ if ((chip->lun2card[lun] & MS_CARD)) {
+ if (!card || (card == MS_CARD)) {
+ dataSize = 108;
+ if (chip->mspro_formatter_enable)
+ pro_formatter_flag = 1;
+ }
+ }
+#else
+ if (card == MS_CARD) {
+ if (chip->mspro_formatter_enable) {
+ pro_formatter_flag = 1;
+ dataSize = 108;
+ }
+ }
+#endif
+
+ buf = kmalloc(dataSize, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ pageCode = srb->cmnd[2] & 0x3f;
+
+ if ((pageCode == 0x3F) || (pageCode == 0x1C) ||
+ (pageCode == 0x00) ||
+ (pro_formatter_flag && (pageCode == 0x20))) {
+ if (srb->cmnd[0] == MODE_SENSE) {
+ if ((pageCode == 0x3F) || (pageCode == 0x20)) {
+ ms_mode_sense(chip, srb->cmnd[0],
+ lun, buf, dataSize);
+ } else {
+ dataSize = 4;
+ buf[0] = 0x03;
+ buf[1] = 0x00;
+ if (check_card_wp(chip, lun))
+ buf[2] = 0x80;
+ else
+ buf[2] = 0x00;
+
+ buf[3] = 0x00;
+ }
+ } else {
+ if ((pageCode == 0x3F) || (pageCode == 0x20)) {
+ ms_mode_sense(chip, srb->cmnd[0],
+ lun, buf, dataSize);
+ } else {
+ dataSize = 8;
+ buf[0] = 0x00;
+ buf[1] = 0x06;
+ buf[2] = 0x00;
+ if (check_card_wp(chip, lun))
+ buf[3] = 0x80;
+ else
+ buf[3] = 0x00;
+ buf[4] = 0x00;
+ buf[5] = 0x00;
+ buf[6] = 0x00;
+ buf[7] = 0x00;
+ }
+ }
+ status = TRANSPORT_GOOD;
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ scsi_set_resid(srb, scsi_bufflen(srb));
+ status = TRANSPORT_FAILED;
+ }
+
+ if (status == TRANSPORT_GOOD) {
+ unsigned int len = min_t(unsigned int, scsi_bufflen(srb),
+ dataSize);
+ rtsx_stor_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+ }
+ kfree(buf);
+
+ return status;
+}
+
+static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+#ifdef SUPPORT_SD_LOCK
+ struct sd_info *sd_card = &(chip->sd_card);
+#endif
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+ u32 start_sec;
+ u16 sec_cnt;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ if (!check_card_ready(chip, lun) || (get_card_size(chip, lun) == 0)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!(CHK_BIT(chip->lun_mc, lun))) {
+ SET_BIT(chip->lun_mc, lun);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ }
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_erase_status) {
+ /* Accessing to any card is forbidden
+ * until the erase procedure of SD is completed
+ */
+ RTSX_DEBUGP("SD card being erased!\n");
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (get_lun_card(chip, lun) == SD_CARD) {
+ if (sd_card->sd_lock_status & SD_LOCKED) {
+ RTSX_DEBUGP("SD card locked!\n");
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+#endif
+
+ if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) {
+ start_sec = ((u32)srb->cmnd[2] << 24) |
+ ((u32)srb->cmnd[3] << 16) |
+ ((u32)srb->cmnd[4] << 8) | ((u32)srb->cmnd[5]);
+ sec_cnt = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
+ } else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
+ start_sec = ((u32)(srb->cmnd[1] & 0x1F) << 16) |
+ ((u32)srb->cmnd[2] << 8) | ((u32)srb->cmnd[3]);
+ sec_cnt = srb->cmnd[4];
+ } else if ((srb->cmnd[0] == VENDOR_CMND) &&
+ (srb->cmnd[1] == SCSI_APP_CMD) &&
+ ((srb->cmnd[2] == PP_READ10) || (srb->cmnd[2] == PP_WRITE10))) {
+ start_sec = ((u32)srb->cmnd[4] << 24) |
+ ((u32)srb->cmnd[5] << 16) |
+ ((u32)srb->cmnd[6] << 8) | ((u32)srb->cmnd[7]);
+ sec_cnt = ((u16)(srb->cmnd[9]) << 8) | srb->cmnd[10];
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ /* In some test, we will receive a start_sec like 0xFFFFFFFF.
+ * In this situation, start_sec + sec_cnt will overflow, so we
+ * need to judge start_sec at first
+ */
+ if ((start_sec > get_card_size(chip, lun)) ||
+ ((start_sec + sec_cnt) > get_card_size(chip, lun))) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LBA_OVER_RANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (sec_cnt == 0) {
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+ }
+
+ if (chip->rw_fail_cnt[lun] == 3) {
+ RTSX_DEBUGP("read/write fail three times in succession\n");
+ if (srb->sc_data_direction == DMA_FROM_DEVICE)
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ else
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+ if (check_card_wp(chip, lun)) {
+ RTSX_DEBUGP("Write protected card!\n");
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_PROTECT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ retval = card_rw(srb, chip, start_sec, sec_cnt);
+ if (retval != STATUS_SUCCESS) {
+ if (chip->need_release & chip->lun2card[lun]) {
+ chip->rw_fail_cnt[lun] = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ } else {
+ chip->rw_fail_cnt[lun]++;
+ if (srb->sc_data_direction == DMA_FROM_DEVICE)
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ else
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ }
+ retval = TRANSPORT_FAILED;
+ TRACE_GOTO(chip, Exit);
+ } else {
+ chip->rw_fail_cnt[lun] = 0;
+ retval = TRANSPORT_GOOD;
+ }
+
+ scsi_set_resid(srb, 0);
+
+Exit:
+ return retval;
+}
+
+static int read_format_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned char *buf;
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned int buf_len;
+ u8 card = get_lun_card(chip, lun);
+ u32 card_size;
+ int desc_cnt;
+ int i = 0;
+
+ if (!check_card_ready(chip, lun)) {
+ if (!chip->mspro_formatter_enable) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ buf_len = (scsi_bufflen(srb) > 12) ? 0x14 : 12;
+
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ buf[i++] = 0;
+ buf[i++] = 0;
+ buf[i++] = 0;
+
+ /* Capacity List Length */
+ if ((buf_len > 12) && chip->mspro_formatter_enable &&
+ (chip->lun2card[lun] & MS_CARD) &&
+ (!card || (card == MS_CARD))) {
+ buf[i++] = 0x10;
+ desc_cnt = 2;
+ } else {
+ buf[i++] = 0x08;
+ desc_cnt = 1;
+ }
+
+ while (desc_cnt) {
+ if (check_card_ready(chip, lun)) {
+ card_size = get_card_size(chip, lun);
+ buf[i++] = (unsigned char)(card_size >> 24);
+ buf[i++] = (unsigned char)(card_size >> 16);
+ buf[i++] = (unsigned char)(card_size >> 8);
+ buf[i++] = (unsigned char)card_size;
+
+ if (desc_cnt == 2)
+ buf[i++] = 2;
+ else
+ buf[i++] = 0;
+ } else {
+ buf[i++] = 0xFF;
+ buf[i++] = 0xFF;
+ buf[i++] = 0xFF;
+ buf[i++] = 0xFF;
+
+ if (desc_cnt == 2)
+ buf[i++] = 3;
+ else
+ buf[i++] = 0;
+ }
+
+ buf[i++] = 0x00;
+ buf[i++] = 0x02;
+ buf[i++] = 0x00;
+
+ desc_cnt--;
+ }
+
+ buf_len = min_t(unsigned int, scsi_bufflen(srb), buf_len);
+ rtsx_stor_set_xfer_buf(buf, buf_len, srb);
+ kfree(buf);
+
+ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned char *buf;
+ unsigned int lun = SCSI_LUN(srb);
+ u32 card_size;
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!(CHK_BIT(chip->lun_mc, lun))) {
+ SET_BIT(chip->lun_mc, lun);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ }
+
+ buf = kmalloc(8, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ card_size = get_card_size(chip, lun);
+ buf[0] = (unsigned char)((card_size - 1) >> 24);
+ buf[1] = (unsigned char)((card_size - 1) >> 16);
+ buf[2] = (unsigned char)((card_size - 1) >> 8);
+ buf[3] = (unsigned char)(card_size - 1);
+
+ buf[4] = 0x00;
+ buf[5] = 0x00;
+ buf[6] = 0x02;
+ buf[7] = 0x00;
+
+ rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ kfree(buf);
+
+ scsi_set_resid(srb, 0);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned short len, i;
+ int retval;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ len = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
+
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ for (i = 0; i < len; i++) {
+ retval = spi_read_eeprom(chip, i, buf + i);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
+ rtsx_stor_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned short len, i;
+ int retval;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ len = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (len == 511) {
+ retval = spi_erase_eeprom_chip(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ } else {
+ len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb),
+ len);
+ buf = vmalloc(len);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rtsx_stor_get_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ for (i = 0; i < len; i++) {
+ retval = spi_write_eeprom(chip, i, buf[i]);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ vfree(buf);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned short addr, len, i;
+ int retval;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = ((u16)srb->cmnd[2] << 8) | srb->cmnd[3];
+ len = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
+
+ if (addr < 0xFC00) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ for (i = 0; i < len; i++) {
+ retval = rtsx_read_register(chip, addr + i, buf + i);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
+ rtsx_stor_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned short addr, len, i;
+ int retval;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = ((u16)srb->cmnd[2] << 8) | srb->cmnd[3];
+ len = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
+
+ if (addr < 0xFC00) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
+ buf = vmalloc(len);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rtsx_stor_get_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ for (i = 0; i < len; i++) {
+ retval = rtsx_write_register(chip, addr + i, 0xFF, buf[i]);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int get_sd_csd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (get_lun_card(chip, lun) != SD_CARD) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ rtsx_stor_set_xfer_buf(sd_card->raw_csd, scsi_bufflen(srb), srb);
+
+ return TRANSPORT_GOOD;
+}
+
+static int toggle_gpio_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ u8 gpio = srb->cmnd[2];
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ if (gpio > 3)
+ gpio = 1;
+ toggle_gpio(chip, gpio);
+
+ return TRANSPORT_GOOD;
+}
+
+#ifdef _MSG_TRACE
+static int trace_msg_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned char *ptr, *buf = NULL;
+ int i, msg_cnt;
+ u8 clear;
+ unsigned int buf_len;
+
+ buf_len = 4 + ((2 + MSG_FUNC_LEN + MSG_FILE_LEN + TIME_VAL_LEN) *
+ TRACE_ITEM_CNT);
+
+ if ((scsi_bufflen(srb) < buf_len) || (scsi_sglist(srb) == NULL)) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ clear = srb->cmnd[2];
+
+ buf = vmalloc(scsi_bufflen(srb));
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+ ptr = buf;
+
+ if (chip->trace_msg[chip->msg_idx].valid)
+ msg_cnt = TRACE_ITEM_CNT;
+ else
+ msg_cnt = chip->msg_idx;
+
+ *(ptr++) = (u8)(msg_cnt >> 24);
+ *(ptr++) = (u8)(msg_cnt >> 16);
+ *(ptr++) = (u8)(msg_cnt >> 8);
+ *(ptr++) = (u8)msg_cnt;
+ RTSX_DEBUGP("Trace message count is %d\n", msg_cnt);
+
+ for (i = 1; i <= msg_cnt; i++) {
+ int j, idx;
+
+ idx = chip->msg_idx - i;
+ if (idx < 0)
+ idx += TRACE_ITEM_CNT;
+
+ *(ptr++) = (u8)(chip->trace_msg[idx].line >> 8);
+ *(ptr++) = (u8)(chip->trace_msg[idx].line);
+ for (j = 0; j < MSG_FUNC_LEN; j++)
+ *(ptr++) = chip->trace_msg[idx].func[j];
+
+ for (j = 0; j < MSG_FILE_LEN; j++)
+ *(ptr++) = chip->trace_msg[idx].file[j];
+
+ for (j = 0; j < TIME_VAL_LEN; j++)
+ *(ptr++) = chip->trace_msg[idx].timeval_buf[j];
+ }
+
+ rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ vfree(buf);
+
+ if (clear) {
+ chip->msg_idx = 0;
+ for (i = 0; i < TRACE_ITEM_CNT; i++)
+ chip->trace_msg[i].valid = 0;
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+#endif
+
+static int read_host_reg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ u8 addr, buf[4];
+ u32 val;
+ unsigned int len;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = srb->cmnd[4];
+
+ val = rtsx_readl(chip, addr);
+ RTSX_DEBUGP("Host register (0x%x): 0x%x\n", addr, val);
+
+ buf[0] = (u8)(val >> 24);
+ buf[1] = (u8)(val >> 16);
+ buf[2] = (u8)(val >> 8);
+ buf[3] = (u8)val;
+
+ len = min_t(unsigned int, scsi_bufflen(srb), 4);
+ rtsx_stor_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_host_reg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ u8 addr, buf[4];
+ u32 val;
+ unsigned int len;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = srb->cmnd[4];
+
+ len = min_t(unsigned int, scsi_bufflen(srb), 4);
+ rtsx_stor_get_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ val = ((u32)buf[0] << 24) | ((u32)buf[1] << 16) | ((u32)buf[2]
+ << 8) | buf[3];
+
+ rtsx_writel(chip, addr, val);
+
+ return TRANSPORT_GOOD;
+}
+
+static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned lun = SCSI_LUN(srb);
+
+ if (srb->cmnd[3] == 1) {
+ /* Variable Clock */
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct sd_info *sd_card = &(chip->sd_card);
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ switch (srb->cmnd[4]) {
+ case XD_CARD:
+ xd_card->xd_clock = srb->cmnd[5];
+ break;
+
+ case SD_CARD:
+ sd_card->sd_clock = srb->cmnd[5];
+ break;
+
+ case MS_CARD:
+ ms_card->ms_clock = srb->cmnd[5];
+ break;
+
+ default:
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ } else if (srb->cmnd[3] == 2) {
+ if (srb->cmnd[4]) {
+ chip->blink_led = 1;
+ } else {
+ int retval;
+
+ chip->blink_led = 0;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en &&
+ (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ turn_off_led(chip, LED_GPIO);
+ }
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int get_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+
+ if (srb->cmnd[3] == 1) {
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct sd_info *sd_card = &(chip->sd_card);
+ struct ms_info *ms_card = &(chip->ms_card);
+ u8 tmp;
+
+ switch (srb->cmnd[4]) {
+ case XD_CARD:
+ tmp = (u8)(xd_card->xd_clock);
+ break;
+
+ case SD_CARD:
+ tmp = (u8)(sd_card->sd_clock);
+ break;
+
+ case MS_CARD:
+ tmp = (u8)(ms_card->ms_clock);
+ break;
+
+ default:
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ rtsx_stor_set_xfer_buf(&tmp, 1, srb);
+ } else if (srb->cmnd[3] == 2) {
+ u8 tmp = chip->blink_led;
+ rtsx_stor_set_xfer_buf(&tmp, 1, srb);
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int dma_access_ring_buffer(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ unsigned int lun = SCSI_LUN(srb);
+ u16 len;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ len = ((u16)(srb->cmnd[4]) << 8) | srb->cmnd[5];
+ len = min_t(u16, len, scsi_bufflen(srb));
+
+ if (srb->sc_data_direction == DMA_FROM_DEVICE)
+ RTSX_DEBUGP("Read from device\n");
+ else
+ RTSX_DEBUGP("Write to device\n");
+
+ retval = rtsx_transfer_data(chip, 0, scsi_sglist(srb), len,
+ scsi_sg_count(srb), srb->sc_data_direction, 1000);
+ if (retval < 0) {
+ if (srb->sc_data_direction == DMA_FROM_DEVICE)
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ else
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ scsi_set_resid(srb, 0);
+
+ return TRANSPORT_GOOD;
+}
+
+static int get_dev_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ struct ms_info *ms_card = &(chip->ms_card);
+ int buf_len;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 card = get_lun_card(chip, lun);
+ u8 status[32];
+#ifdef SUPPORT_OCP
+ u8 oc_now_mask = 0, oc_ever_mask = 0;
+#endif
+
+ memset(status, 0, 32);
+
+ status[0] = (u8)(chip->product_id);
+ status[1] = chip->ic_version;
+
+ if (chip->auto_delink_en)
+ status[2] = 0x10;
+ else
+ status[2] = 0x00;
+
+ status[3] = 20;
+ status[4] = 10;
+ status[5] = 05;
+ status[6] = 21;
+
+ if (chip->card_wp)
+ status[7] = 0x20;
+ else
+ status[7] = 0x00;
+
+#ifdef SUPPORT_OCP
+ status[8] = 0;
+ if (CHECK_LUN_MODE(chip,
+ SD_MS_2LUN) && (chip->lun2card[lun] == MS_CARD)) {
+ oc_now_mask = MS_OC_NOW;
+ oc_ever_mask = MS_OC_EVER;
+ } else {
+ oc_now_mask = SD_OC_NOW;
+ oc_ever_mask = SD_OC_EVER;
+ }
+
+ if (chip->ocp_stat & oc_now_mask)
+ status[8] |= 0x02;
+
+ if (chip->ocp_stat & oc_ever_mask)
+ status[8] |= 0x01;
+#endif
+
+ if (card == SD_CARD) {
+ if (CHK_SD(sd_card)) {
+ if (CHK_SD_HCXC(sd_card)) {
+ if (sd_card->capacity > 0x4000000)
+ status[0x0E] = 0x02;
+ else
+ status[0x0E] = 0x01;
+ } else {
+ status[0x0E] = 0x00;
+ }
+
+ if (CHK_SD_SDR104(sd_card))
+ status[0x0F] = 0x03;
+ else if (CHK_SD_DDR50(sd_card))
+ status[0x0F] = 0x04;
+ else if (CHK_SD_SDR50(sd_card))
+ status[0x0F] = 0x02;
+ else if (CHK_SD_HS(sd_card))
+ status[0x0F] = 0x01;
+ else
+ status[0x0F] = 0x00;
+ } else {
+ if (CHK_MMC_SECTOR_MODE(sd_card))
+ status[0x0E] = 0x01;
+ else
+ status[0x0E] = 0x00;
+
+ if (CHK_MMC_DDR52(sd_card))
+ status[0x0F] = 0x03;
+ else if (CHK_MMC_52M(sd_card))
+ status[0x0F] = 0x02;
+ else if (CHK_MMC_26M(sd_card))
+ status[0x0F] = 0x01;
+ else
+ status[0x0F] = 0x00;
+ }
+ } else if (card == MS_CARD) {
+ if (CHK_MSPRO(ms_card)) {
+ if (CHK_MSXC(ms_card))
+ status[0x0E] = 0x01;
+ else
+ status[0x0E] = 0x00;
+
+ if (CHK_HG8BIT(ms_card))
+ status[0x0F] = 0x01;
+ else
+ status[0x0F] = 0x00;
+ }
+ }
+
+#ifdef SUPPORT_SD_LOCK
+ if (card == SD_CARD) {
+ status[0x17] = 0x80;
+ if (sd_card->sd_erase_status)
+ status[0x17] |= 0x01;
+ if (sd_card->sd_lock_status & SD_LOCKED) {
+ status[0x17] |= 0x02;
+ status[0x07] |= 0x40;
+ }
+ if (sd_card->sd_lock_status & SD_PWD_EXIST)
+ status[0x17] |= 0x04;
+ } else {
+ status[0x17] = 0x00;
+ }
+
+ RTSX_DEBUGP("status[0x17] = 0x%x\n", status[0x17]);
+#endif
+
+ status[0x18] = 0x8A;
+ status[0x1A] = 0x28;
+#ifdef SUPPORT_SD_LOCK
+ status[0x1F] = 0x01;
+#endif
+
+ buf_len = min_t(unsigned int, scsi_bufflen(srb), sizeof(status));
+ rtsx_stor_set_xfer_buf(status, buf_len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int set_chip_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int phy_debug_mode;
+ int retval;
+ u16 reg;
+
+ if (!CHECK_PID(chip, 0x5208)) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ phy_debug_mode = (int)(srb->cmnd[3]);
+
+ if (phy_debug_mode) {
+ chip->phy_debug_mode = 1;
+ retval = rtsx_write_register(chip, CDRESUMECTL, 0x77, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ rtsx_disable_bus_int(chip);
+
+ retval = rtsx_read_phy_register(chip, 0x1C, &reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ reg |= 0x0001;
+ retval = rtsx_write_phy_register(chip, 0x1C, reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ chip->phy_debug_mode = 0;
+ retval = rtsx_write_register(chip, CDRESUMECTL, 0x77, 0x77);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ rtsx_enable_bus_int(chip);
+
+ retval = rtsx_read_phy_register(chip, 0x1C, &reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ reg &= 0xFFFE;
+ retval = rtsx_write_phy_register(chip, 0x1C, reg);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval = STATUS_SUCCESS;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 cmd_type, mask, value, idx;
+ u16 addr;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ switch (srb->cmnd[3]) {
+ case INIT_BATCHCMD:
+ rtsx_init_cmd(chip);
+ break;
+
+ case ADD_BATCHCMD:
+ cmd_type = srb->cmnd[4];
+ if (cmd_type > 2) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ addr = (srb->cmnd[5] << 8) | srb->cmnd[6];
+ mask = srb->cmnd[7];
+ value = srb->cmnd[8];
+ rtsx_add_cmd(chip, cmd_type, addr, mask, value);
+ break;
+
+ case SEND_BATCHCMD:
+ retval = rtsx_send_cmd(chip, 0, 1000);
+ break;
+
+ case GET_BATCHRSP:
+ idx = srb->cmnd[4];
+ value = *(rtsx_get_cmd_data(chip) + idx);
+ if (scsi_bufflen(srb) < 1) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ rtsx_stor_set_xfer_buf(&value, 1, srb);
+ scsi_set_resid(srb, 0);
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int suit_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int result;
+
+ switch (srb->cmnd[3]) {
+ case INIT_BATCHCMD:
+ case ADD_BATCHCMD:
+ case SEND_BATCHCMD:
+ case GET_BATCHRSP:
+ result = rw_mem_cmd_buf(srb, chip);
+ break;
+ default:
+ result = TRANSPORT_ERROR;
+ }
+
+ return result;
+}
+
+static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned short addr, len, i;
+ int retval;
+ u8 *buf;
+ u16 val;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
+ len = ((u16)srb->cmnd[6] << 8) | srb->cmnd[7];
+
+ if (len % 2)
+ len -= len % 2;
+
+ if (len) {
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ for (i = 0; i < len / 2; i++) {
+ retval = rtsx_read_phy_register(chip, addr + i, &val);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ buf[2*i] = (u8)(val >> 8);
+ buf[2*i+1] = (u8)val;
+ }
+
+ len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb),
+ len);
+ rtsx_stor_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ vfree(buf);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned short addr, len, i;
+ int retval;
+ u8 *buf;
+ u16 val;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
+ len = ((u16)srb->cmnd[6] << 8) | srb->cmnd[7];
+
+ if (len % 2)
+ len -= len % 2;
+
+ if (len) {
+ len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb),
+ len);
+
+ buf = vmalloc(len);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rtsx_stor_get_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ for (i = 0; i < len / 2; i++) {
+ val = ((u16)buf[2*i] << 8) | buf[2*i+1];
+ retval = rtsx_write_phy_register(chip, addr + i, val);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ vfree(buf);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int erase_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned short addr;
+ int retval;
+ u8 mode;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ mode = srb->cmnd[3];
+ addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
+
+ if (mode == 0) {
+ retval = spi_erase_eeprom_chip(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ } else if (mode == 1) {
+ retval = spi_erase_eeprom_byte(chip, addr);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ } else {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned short addr, len, i;
+ int retval;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
+ len = ((u16)srb->cmnd[6] << 8) | srb->cmnd[7];
+
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ for (i = 0; i < len; i++) {
+ retval = spi_read_eeprom(chip, addr + i, buf + i);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
+ rtsx_stor_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned short addr, len, i;
+ int retval;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
+ len = ((u16)srb->cmnd[6] << 8) | srb->cmnd[7];
+
+ len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
+ buf = vmalloc(len);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rtsx_stor_get_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ for (i = 0; i < len; i++) {
+ retval = spi_write_eeprom(chip, addr + i, buf[i]);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ u8 addr, len, i;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = srb->cmnd[4];
+ len = srb->cmnd[5];
+
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ for (i = 0; i < len; i++) {
+ retval = rtsx_read_efuse(chip, addr + i, buf + i);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ len = (u8)min_t(unsigned int, scsi_bufflen(srb), len);
+ rtsx_stor_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval, result = TRANSPORT_GOOD;
+ u16 val;
+ u8 addr, len, i;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ addr = srb->cmnd[4];
+ len = srb->cmnd[5];
+
+ len = (u8)min_t(unsigned int, scsi_bufflen(srb), len);
+ buf = vmalloc(len);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rtsx_stor_get_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ retval = rtsx_force_power_on(chip, SSC_PDCTL);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ TRACE_RET(chip, TRANSPORT_ERROR);
+ }
+
+ if (chip->asic_code) {
+ retval = rtsx_read_phy_register(chip, 0x08, &val);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ TRACE_RET(chip, TRANSPORT_ERROR);
+ }
+
+ retval = rtsx_write_register(chip, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, LDO_OFF);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ TRACE_RET(chip, TRANSPORT_ERROR);
+ }
+
+ wait_timeout(600);
+
+ retval = rtsx_write_phy_register(chip, 0x08,
+ 0x4C00 | chip->phy_voltage);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ TRACE_RET(chip, TRANSPORT_ERROR);
+ }
+
+ retval = rtsx_write_register(chip, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, LDO_ON);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ TRACE_RET(chip, TRANSPORT_ERROR);
+ }
+
+ wait_timeout(600);
+ }
+
+ retval = card_power_on(chip, SPI_CARD);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ TRACE_RET(chip, TRANSPORT_ERROR);
+ }
+
+ wait_timeout(50);
+
+ for (i = 0; i < len; i++) {
+ retval = rtsx_write_efuse(chip, addr + i, buf[i]);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ result = TRANSPORT_FAILED;
+ TRACE_GOTO(chip, Exit);
+ }
+ }
+
+Exit:
+ vfree(buf);
+
+ retval = card_power_off(chip, SPI_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ if (chip->asic_code) {
+ retval = rtsx_write_register(chip, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, LDO_OFF);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ wait_timeout(600);
+
+ retval = rtsx_write_phy_register(chip, 0x08, val);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ retval = rtsx_write_register(chip, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, LDO_ON);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+ }
+
+ return result;
+}
+
+static int read_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ u8 func, func_max;
+ u16 addr, len;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ func = srb->cmnd[3];
+ addr = ((u16)(srb->cmnd[4]) << 8) | srb->cmnd[5];
+ len = ((u16)(srb->cmnd[6]) << 8) | srb->cmnd[7];
+
+ RTSX_DEBUGP("%s: func = %d, addr = 0x%x, len = %d\n", __func__, func,
+ addr, len);
+
+ if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip))
+ func_max = 1;
+ else
+ func_max = 0;
+
+ if (func > func_max) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ retval = rtsx_read_cfg_seq(chip, func, addr, buf, len);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ vfree(buf);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ len = (u16)min_t(unsigned int, scsi_bufflen(srb), len);
+ rtsx_stor_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ u8 func, func_max;
+ u16 addr, len;
+ u8 *buf;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ func = srb->cmnd[3];
+ addr = ((u16)(srb->cmnd[4]) << 8) | srb->cmnd[5];
+ len = ((u16)(srb->cmnd[6]) << 8) | srb->cmnd[7];
+
+ RTSX_DEBUGP("%s: func = %d, addr = 0x%x\n", __func__, func, addr);
+
+ if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip))
+ func_max = 1;
+ else
+ func_max = 0;
+
+ if (func > func_max) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rtsx_stor_get_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ retval = rtsx_write_cfg_seq(chip, func, addr, buf, len);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
+ vfree(buf);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int app_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int result;
+
+ switch (srb->cmnd[2]) {
+ case PP_READ10:
+ case PP_WRITE10:
+ result = read_write(srb, chip);
+ break;
+
+ case READ_HOST_REG:
+ result = read_host_reg(srb, chip);
+ break;
+
+ case WRITE_HOST_REG:
+ result = write_host_reg(srb, chip);
+ break;
+
+ case GET_VAR:
+ result = get_variable(srb, chip);
+ break;
+
+ case SET_VAR:
+ result = set_variable(srb, chip);
+ break;
+
+ case DMA_READ:
+ case DMA_WRITE:
+ result = dma_access_ring_buffer(srb, chip);
+ break;
+
+ case READ_PHY:
+ result = read_phy_register(srb, chip);
+ break;
+
+ case WRITE_PHY:
+ result = write_phy_register(srb, chip);
+ break;
+
+ case ERASE_EEPROM2:
+ result = erase_eeprom2(srb, chip);
+ break;
+
+ case READ_EEPROM2:
+ result = read_eeprom2(srb, chip);
+ break;
+
+ case WRITE_EEPROM2:
+ result = write_eeprom2(srb, chip);
+ break;
+
+ case READ_EFUSE:
+ result = read_efuse(srb, chip);
+ break;
+
+ case WRITE_EFUSE:
+ result = write_efuse(srb, chip);
+ break;
+
+ case READ_CFG:
+ result = read_cfg_byte(srb, chip);
+ break;
+
+ case WRITE_CFG:
+ result = write_cfg_byte(srb, chip);
+ break;
+
+ case SET_CHIP_MODE:
+ result = set_chip_mode(srb, chip);
+ break;
+
+ case SUIT_CMD:
+ result = suit_cmd(srb, chip);
+ break;
+
+ case GET_DEV_STATUS:
+ result = get_dev_status(srb, chip);
+ break;
+
+ default:
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return result;
+}
+
+
+static int read_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ u8 rtsx_status[16];
+ int buf_len;
+ unsigned int lun = SCSI_LUN(srb);
+
+ rtsx_status[0] = (u8)(chip->vendor_id >> 8);
+ rtsx_status[1] = (u8)(chip->vendor_id);
+
+ rtsx_status[2] = (u8)(chip->product_id >> 8);
+ rtsx_status[3] = (u8)(chip->product_id);
+
+ rtsx_status[4] = (u8)lun;
+
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
+ if (chip->lun2card[lun] == SD_CARD)
+ rtsx_status[5] = 2;
+ else
+ rtsx_status[5] = 3;
+ } else {
+ if (chip->card_exist) {
+ if (chip->card_exist & XD_CARD)
+ rtsx_status[5] = 4;
+ else if (chip->card_exist & SD_CARD)
+ rtsx_status[5] = 2;
+ else if (chip->card_exist & MS_CARD)
+ rtsx_status[5] = 3;
+ else
+ rtsx_status[5] = 7;
+ } else {
+ rtsx_status[5] = 7;
+ }
+ }
+
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
+ rtsx_status[6] = 2;
+ else
+ rtsx_status[6] = 1;
+
+ rtsx_status[7] = (u8)(chip->product_id);
+ rtsx_status[8] = chip->ic_version;
+
+ if (check_card_exist(chip, lun))
+ rtsx_status[9] = 1;
+ else
+ rtsx_status[9] = 0;
+
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN))
+ rtsx_status[10] = 0;
+ else
+ rtsx_status[10] = 1;
+
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
+ if (chip->lun2card[lun] == SD_CARD)
+ rtsx_status[11] = SD_CARD;
+ else
+ rtsx_status[11] = MS_CARD;
+ } else {
+ rtsx_status[11] = XD_CARD | SD_CARD | MS_CARD;
+ }
+
+ if (check_card_ready(chip, lun))
+ rtsx_status[12] = 1;
+ else
+ rtsx_status[12] = 0;
+
+ if (get_lun_card(chip, lun) == XD_CARD) {
+ rtsx_status[13] = 0x40;
+ } else if (get_lun_card(chip, lun) == SD_CARD) {
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ rtsx_status[13] = 0x20;
+ if (CHK_SD(sd_card)) {
+ if (CHK_SD_HCXC(sd_card))
+ rtsx_status[13] |= 0x04;
+ if (CHK_SD_HS(sd_card))
+ rtsx_status[13] |= 0x02;
+ } else {
+ rtsx_status[13] |= 0x08;
+ if (CHK_MMC_52M(sd_card))
+ rtsx_status[13] |= 0x02;
+ if (CHK_MMC_SECTOR_MODE(sd_card))
+ rtsx_status[13] |= 0x04;
+ }
+ } else if (get_lun_card(chip, lun) == MS_CARD) {
+ struct ms_info *ms_card = &(chip->ms_card);
+
+ if (CHK_MSPRO(ms_card)) {
+ rtsx_status[13] = 0x38;
+ if (CHK_HG8BIT(ms_card))
+ rtsx_status[13] |= 0x04;
+#ifdef SUPPORT_MSXC
+ if (CHK_MSXC(ms_card))
+ rtsx_status[13] |= 0x01;
+#endif
+ } else {
+ rtsx_status[13] = 0x30;
+ }
+ } else {
+ if (CHECK_LUN_MODE(chip, DEFAULT_SINGLE)) {
+#ifdef SUPPORT_SDIO
+ if (chip->sd_io && chip->sd_int)
+ rtsx_status[13] = 0x60;
+ else
+ rtsx_status[13] = 0x70;
+#else
+ rtsx_status[13] = 0x70;
+#endif
+ } else {
+ if (chip->lun2card[lun] == SD_CARD)
+ rtsx_status[13] = 0x20;
+ else
+ rtsx_status[13] = 0x30;
+ }
+ }
+
+ rtsx_status[14] = 0x78;
+ if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip))
+ rtsx_status[15] = 0x83;
+ else
+ rtsx_status[15] = 0x82;
+
+ buf_len = min_t(unsigned int, scsi_bufflen(srb), sizeof(rtsx_status));
+ rtsx_stor_set_xfer_buf(rtsx_status, buf_len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int get_card_bus_width(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ u8 card, bus_width;
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ card = get_lun_card(chip, lun);
+ if ((card == SD_CARD) || (card == MS_CARD)) {
+ bus_width = chip->card_bus_width[lun];
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ rtsx_stor_set_xfer_buf(&bus_width, scsi_bufflen(srb), srb);
+
+ return TRANSPORT_GOOD;
+}
+
+static int spi_vendor_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int result;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 gpio_dir;
+
+ if (CHECK_PID(chip, 0x5208) || CHECK_PID(chip, 0x5288)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ rtsx_force_power_on(chip, SSC_PDCTL);
+
+ rtsx_read_register(chip, CARD_GPIO_DIR, &gpio_dir);
+ rtsx_write_register(chip, CARD_GPIO_DIR, 0x07, gpio_dir & 0x06);
+
+ switch (srb->cmnd[2]) {
+ case SCSI_SPI_GETSTATUS:
+ result = spi_get_status(srb, chip);
+ break;
+
+ case SCSI_SPI_SETPARAMETER:
+ result = spi_set_parameter(srb, chip);
+ break;
+
+ case SCSI_SPI_READFALSHID:
+ result = spi_read_flash_id(srb, chip);
+ break;
+
+ case SCSI_SPI_READFLASH:
+ result = spi_read_flash(srb, chip);
+ break;
+
+ case SCSI_SPI_WRITEFLASH:
+ result = spi_write_flash(srb, chip);
+ break;
+
+ case SCSI_SPI_WRITEFLASHSTATUS:
+ result = spi_write_flash_status(srb, chip);
+ break;
+
+ case SCSI_SPI_ERASEFLASH:
+ result = spi_erase_flash(srb, chip);
+ break;
+
+ default:
+ rtsx_write_register(chip, CARD_GPIO_DIR, 0x07, gpio_dir);
+
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ rtsx_write_register(chip, CARD_GPIO_DIR, 0x07, gpio_dir);
+
+ if (result != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ return TRANSPORT_GOOD;
+}
+
+static int vendor_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int result;
+
+ switch (srb->cmnd[1]) {
+ case READ_STATUS:
+ result = read_status(srb, chip);
+ break;
+
+ case READ_MEM:
+ result = read_mem(srb, chip);
+ break;
+
+ case WRITE_MEM:
+ result = write_mem(srb, chip);
+ break;
+
+ case READ_EEPROM:
+ result = read_eeprom(srb, chip);
+ break;
+
+ case WRITE_EEPROM:
+ result = write_eeprom(srb, chip);
+ break;
+
+ case TOGGLE_GPIO:
+ result = toggle_gpio_cmd(srb, chip);
+ break;
+
+ case GET_SD_CSD:
+ result = get_sd_csd(srb, chip);
+ break;
+
+ case GET_BUS_WIDTH:
+ result = get_card_bus_width(srb, chip);
+ break;
+
+#ifdef _MSG_TRACE
+ case TRACE_MSG:
+ result = trace_msg_cmd(srb, chip);
+ break;
+#endif
+
+ case SCSI_APP_CMD:
+ result = app_cmd(srb, chip);
+ break;
+
+ case SPI_VENDOR_COMMAND:
+ result = spi_vendor_cmd(srb, chip);
+ break;
+
+ default:
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return result;
+}
+
+#if !defined(LED_AUTO_BLINK) && !defined(REGULAR_BLINK)
+void led_shine(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ u16 sec_cnt;
+
+ if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10))
+ sec_cnt = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
+ else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6))
+ sec_cnt = srb->cmnd[4];
+ else
+ return;
+
+ if (chip->rw_cap[lun] >= GPIO_TOGGLE_THRESHOLD) {
+ toggle_gpio(chip, LED_GPIO);
+ chip->rw_cap[lun] = 0;
+ } else {
+ chip->rw_cap[lun] += sec_cnt;
+ }
+}
+#endif
+
+static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval, quick_format;
+
+ if (get_lun_card(chip, lun) != MS_CARD) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((srb->cmnd[3] != 0x4D) || (srb->cmnd[4] != 0x47) ||
+ (srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D) ||
+ (srb->cmnd[7] != 0x74)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+
+ if (!check_card_ready(chip, lun) ||
+ (get_card_size(chip, lun) == 0)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ if (srb->cmnd[8] & 0x01)
+ quick_format = 0;
+ else
+ quick_format = 1;
+
+ if (!(chip->card_ready & MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (chip->card_wp & MS_CARD) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ retval = mspro_format(srb, chip, MS_SHORT_DATA_LEN, quick_format);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+
+#ifdef SUPPORT_PCGL_1P18
+static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ u8 dev_info_id, data_len;
+ u8 *buf;
+ unsigned int buf_len;
+ int i;
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((srb->cmnd[2] != 0xB0) || (srb->cmnd[4] != 0x4D) ||
+ (srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) ||
+ (srb->cmnd[7] != 0x44)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ dev_info_id = srb->cmnd[3];
+ if ((CHK_MSXC(ms_card) && (dev_info_id == 0x10)) ||
+ (!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) ||
+ !CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (dev_info_id == 0x15)
+ buf_len = data_len = 0x3A;
+ else
+ buf_len = data_len = 0x6A;
+
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ i = 0;
+ /* GET Memory Stick Media Information Response Header */
+ buf[i++] = 0x00; /* Data length MSB */
+ buf[i++] = data_len; /* Data length LSB */
+ /* Device Information Type Code */
+ if (CHK_MSXC(ms_card))
+ buf[i++] = 0x03;
+ else
+ buf[i++] = 0x02;
+
+ /* SGM bit */
+ buf[i++] = 0x01;
+ /* Reserved */
+ buf[i++] = 0x00;
+ buf[i++] = 0x00;
+ buf[i++] = 0x00;
+ /* Number of Device Information */
+ buf[i++] = 0x01;
+
+ /* Device Information Body */
+
+ /* Device Information ID Number */
+ buf[i++] = dev_info_id;
+ /* Device Information Length */
+ if (dev_info_id == 0x15)
+ data_len = 0x31;
+ else
+ data_len = 0x61;
+
+ buf[i++] = 0x00; /* Data length MSB */
+ buf[i++] = data_len; /* Data length LSB */
+ /* Valid Bit */
+ buf[i++] = 0x80;
+ if ((dev_info_id == 0x10) || (dev_info_id == 0x13)) {
+ /* System Information */
+ memcpy(buf+i, ms_card->raw_sys_info, 96);
+ } else {
+ /* Model Name */
+ memcpy(buf+i, ms_card->raw_model_name, 48);
+ }
+
+ rtsx_stor_set_xfer_buf(buf, buf_len, srb);
+
+ if (dev_info_id == 0x15)
+ scsi_set_resid(srb, scsi_bufflen(srb)-0x3C);
+ else
+ scsi_set_resid(srb, scsi_bufflen(srb)-0x6C);
+
+ kfree(buf);
+ return STATUS_SUCCESS;
+}
+#endif
+
+static int ms_sp_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval = TRANSPORT_ERROR;
+
+ if (srb->cmnd[2] == MS_FORMAT)
+ retval = ms_format_cmnd(srb, chip);
+#ifdef SUPPORT_PCGL_1P18
+ else if (srb->cmnd[2] == GET_MS_INFORMATION)
+ retval = get_ms_information(srb, chip);
+#endif
+
+ return retval;
+}
+
+#ifdef SUPPORT_CPRM
+static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ int result;
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ sd_cleanup_work(chip);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != SD_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ switch (srb->cmnd[0]) {
+ case SD_PASS_THRU_MODE:
+ result = sd_pass_thru_mode(srb, chip);
+ break;
+
+ case SD_EXECUTE_NO_DATA:
+ result = sd_execute_no_data(srb, chip);
+ break;
+
+ case SD_EXECUTE_READ:
+ result = sd_execute_read_data(srb, chip);
+ break;
+
+ case SD_EXECUTE_WRITE:
+ result = sd_execute_write_data(srb, chip);
+ break;
+
+ case SD_GET_RSP:
+ result = sd_get_cmd_rsp(srb, chip);
+ break;
+
+ case SD_HW_RST:
+ result = sd_hw_rst(srb, chip);
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return result;
+}
+#endif
+
+#ifdef SUPPORT_MAGIC_GATE
+static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+ u8 key_format;
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ ms_cleanup_work(chip);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (srb->cmnd[7] != KC_MG_R_PRO) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ key_format = srb->cmnd[10] & 0x3F;
+ RTSX_DEBUGP("key_format = 0x%x\n", key_format);
+
+ switch (key_format) {
+ case KF_GET_LOC_EKB:
+ if ((scsi_bufflen(srb) == 0x41C) &&
+ (srb->cmnd[8] == 0x04) &&
+ (srb->cmnd[9] == 0x1C)) {
+ retval = mg_get_local_EKB(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_RSP_CHG:
+ if ((scsi_bufflen(srb) == 0x24) &&
+ (srb->cmnd[8] == 0x00) &&
+ (srb->cmnd[9] == 0x24)) {
+ retval = mg_get_rsp_chg(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_GET_ICV:
+ ms_card->mg_entry_num = srb->cmnd[5];
+ if ((scsi_bufflen(srb) == 0x404) &&
+ (srb->cmnd[8] == 0x04) &&
+ (srb->cmnd[9] == 0x04) &&
+ (srb->cmnd[2] == 0x00) &&
+ (srb->cmnd[3] == 0x00) &&
+ (srb->cmnd[4] == 0x00) &&
+ (srb->cmnd[5] < 32)) {
+ retval = mg_get_ICV(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+
+static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+ u8 key_format;
+
+ RTSX_DEBUGP("--%s--\n", __func__);
+
+ rtsx_disable_aspm(chip);
+
+ if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ rtsx_exit_ss(chip);
+ wait_timeout(100);
+ }
+ rtsx_set_stat(chip, RTSX_STAT_RUN);
+
+ ms_cleanup_work(chip);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if (check_card_wp(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (srb->cmnd[7] != KC_MG_R_PRO) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ key_format = srb->cmnd[10] & 0x3F;
+ RTSX_DEBUGP("key_format = 0x%x\n", key_format);
+
+ switch (key_format) {
+ case KF_SET_LEAF_ID:
+ if ((scsi_bufflen(srb) == 0x0C) &&
+ (srb->cmnd[8] == 0x00) &&
+ (srb->cmnd[9] == 0x0C)) {
+ retval = mg_set_leaf_id(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_CHG_HOST:
+ if ((scsi_bufflen(srb) == 0x0C) &&
+ (srb->cmnd[8] == 0x00) &&
+ (srb->cmnd[9] == 0x0C)) {
+ retval = mg_chg(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_RSP_HOST:
+ if ((scsi_bufflen(srb) == 0x0C) &&
+ (srb->cmnd[8] == 0x00) &&
+ (srb->cmnd[9] == 0x0C)) {
+ retval = mg_rsp(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_SET_ICV:
+ ms_card->mg_entry_num = srb->cmnd[5];
+ if ((scsi_bufflen(srb) == 0x404) &&
+ (srb->cmnd[8] == 0x04) &&
+ (srb->cmnd[9] == 0x04) &&
+ (srb->cmnd[2] == 0x00) &&
+ (srb->cmnd[3] == 0x00) &&
+ (srb->cmnd[4] == 0x00) &&
+ (srb->cmnd[5] < 32)) {
+ retval = mg_set_ICV(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+#endif
+
+int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+#ifdef SUPPORT_SD_LOCK
+ struct sd_info *sd_card = &(chip->sd_card);
+#endif
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int result;
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_erase_status) {
+ /* Block all SCSI command except for
+ * REQUEST_SENSE and rs_ppstatus
+ */
+ if (!((srb->cmnd[0] == VENDOR_CMND) &&
+ (srb->cmnd[1] == SCSI_APP_CMD) &&
+ (srb->cmnd[2] == GET_DEV_STATUS)) &&
+ (srb->cmnd[0] != REQUEST_SENSE)) {
+ /* Logical Unit Not Ready Format in Progress */
+ set_sense_data(chip, lun, CUR_ERR,
+ 0x02, 0, 0x04, 0x04, 0, 0);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+#endif
+
+ if ((get_lun_card(chip, lun) == MS_CARD) &&
+ (ms_card->format_status == FORMAT_IN_PROGRESS)) {
+ if ((srb->cmnd[0] != REQUEST_SENSE) &&
+ (srb->cmnd[0] != INQUIRY)) {
+ /* Logical Unit Not Ready Format in Progress */
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
+ 0, (u16)(ms_card->progress));
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ switch (srb->cmnd[0]) {
+ case READ_10:
+ case WRITE_10:
+ case READ_6:
+ case WRITE_6:
+ result = read_write(srb, chip);
+#if !defined(LED_AUTO_BLINK) && !defined(REGULAR_BLINK)
+ led_shine(srb, chip);
+#endif
+ break;
+
+ case TEST_UNIT_READY:
+ result = test_unit_ready(srb, chip);
+ break;
+
+ case INQUIRY:
+ result = inquiry(srb, chip);
+ break;
+
+ case READ_CAPACITY:
+ result = read_capacity(srb, chip);
+ break;
+
+ case START_STOP:
+ result = start_stop_unit(srb, chip);
+ break;
+
+ case ALLOW_MEDIUM_REMOVAL:
+ result = allow_medium_removal(srb, chip);
+ break;
+
+ case REQUEST_SENSE:
+ result = request_sense(srb, chip);
+ break;
+
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ result = mode_sense(srb, chip);
+ break;
+
+ case 0x23:
+ result = read_format_capacity(srb, chip);
+ break;
+
+ case VENDOR_CMND:
+ result = vendor_cmnd(srb, chip);
+ break;
+
+ case MS_SP_CMND:
+ result = ms_sp_cmnd(srb, chip);
+ break;
+
+#ifdef SUPPORT_CPRM
+ case SD_PASS_THRU_MODE:
+ case SD_EXECUTE_NO_DATA:
+ case SD_EXECUTE_READ:
+ case SD_EXECUTE_WRITE:
+ case SD_GET_RSP:
+ case SD_HW_RST:
+ result = sd_extention_cmnd(srb, chip);
+ break;
+#endif
+
+#ifdef SUPPORT_MAGIC_GATE
+ case CMD_MSPRO_MG_RKEY:
+ result = mg_report_key(srb, chip);
+ break;
+
+ case CMD_MSPRO_MG_SKEY:
+ result = mg_send_key(srb, chip);
+ break;
+#endif
+
+ case FORMAT_UNIT:
+ case MODE_SELECT:
+ case VERIFY:
+ result = TRANSPORT_GOOD;
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ result = TRANSPORT_FAILED;
+ }
+
+ return result;
+}
diff --git a/drivers/staging/rts5208/rtsx_scsi.h b/drivers/staging/rts5208/rtsx_scsi.h
new file mode 100644
index 000000000000..d1750570dd38
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx_scsi.h
@@ -0,0 +1,143 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_SCSI_H
+#define __REALTEK_RTSX_SCSI_H
+
+#include "rtsx.h"
+#include "rtsx_chip.h"
+
+#define MS_SP_CMND 0xFA
+#define MS_FORMAT 0xA0
+#define GET_MS_INFORMATION 0xB0
+
+#define VENDOR_CMND 0xF0
+
+#define READ_STATUS 0x09
+
+#define READ_EEPROM 0x04
+#define WRITE_EEPROM 0x05
+#define READ_MEM 0x0D
+#define WRITE_MEM 0x0E
+#define GET_BUS_WIDTH 0x13
+#define GET_SD_CSD 0x14
+#define TOGGLE_GPIO 0x15
+#define TRACE_MSG 0x18
+
+#define SCSI_APP_CMD 0x10
+
+#define PP_READ10 0x1A
+#define PP_WRITE10 0x0A
+#define READ_HOST_REG 0x1D
+#define WRITE_HOST_REG 0x0D
+#define SET_VAR 0x05
+#define GET_VAR 0x15
+#define DMA_READ 0x16
+#define DMA_WRITE 0x06
+#define GET_DEV_STATUS 0x10
+#define SET_CHIP_MODE 0x27
+#define SUIT_CMD 0xE0
+#define WRITE_PHY 0x07
+#define READ_PHY 0x17
+#define WRITE_EEPROM2 0x03
+#define READ_EEPROM2 0x13
+#define ERASE_EEPROM2 0x23
+#define WRITE_EFUSE 0x04
+#define READ_EFUSE 0x14
+#define WRITE_CFG 0x0E
+#define READ_CFG 0x1E
+
+#define SPI_VENDOR_COMMAND 0x1C
+
+#define SCSI_SPI_GETSTATUS 0x00
+#define SCSI_SPI_SETPARAMETER 0x01
+#define SCSI_SPI_READFALSHID 0x02
+#define SCSI_SPI_READFLASH 0x03
+#define SCSI_SPI_WRITEFLASH 0x04
+#define SCSI_SPI_WRITEFLASHSTATUS 0x05
+#define SCSI_SPI_ERASEFLASH 0x06
+
+#define INIT_BATCHCMD 0x41
+#define ADD_BATCHCMD 0x42
+#define SEND_BATCHCMD 0x43
+#define GET_BATCHRSP 0x44
+
+#define CHIP_NORMALMODE 0x00
+#define CHIP_DEBUGMODE 0x01
+
+/* SD Pass Through Command Extension */
+#define SD_PASS_THRU_MODE 0xD0
+#define SD_EXECUTE_NO_DATA 0xD1
+#define SD_EXECUTE_READ 0xD2
+#define SD_EXECUTE_WRITE 0xD3
+#define SD_GET_RSP 0xD4
+#define SD_HW_RST 0xD6
+
+#ifdef SUPPORT_MAGIC_GATE
+#define CMD_MSPRO_MG_RKEY 0xA4 /* Report Key Command */
+#define CMD_MSPRO_MG_SKEY 0xA3 /* Send Key Command */
+
+/* CBWCB field: key class */
+#define KC_MG_R_PRO 0xBE /* MG-R PRO*/
+
+/* CBWCB field: key format */
+#define KF_SET_LEAF_ID 0x31 /* Set Leaf ID */
+#define KF_GET_LOC_EKB 0x32 /* Get Local EKB */
+#define KF_CHG_HOST 0x33 /* Challenge (host) */
+#define KF_RSP_CHG 0x34 /* Response and Challenge (device) */
+#define KF_RSP_HOST 0x35 /* Response (host) */
+#define KF_GET_ICV 0x36 /* Get ICV */
+#define KF_SET_ICV 0x37 /* SSet ICV */
+#endif
+
+/* Sense type */
+#define SENSE_TYPE_NO_SENSE 0
+#define SENSE_TYPE_MEDIA_CHANGE 1
+#define SENSE_TYPE_MEDIA_NOT_PRESENT 2
+#define SENSE_TYPE_MEDIA_LBA_OVER_RANGE 3
+#define SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT 4
+#define SENSE_TYPE_MEDIA_WRITE_PROTECT 5
+#define SENSE_TYPE_MEDIA_INVALID_CMD_FIELD 6
+#define SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR 7
+#define SENSE_TYPE_MEDIA_WRITE_ERR 8
+#define SENSE_TYPE_FORMAT_IN_PROGRESS 9
+#define SENSE_TYPE_FORMAT_CMD_FAILED 10
+#ifdef SUPPORT_MAGIC_GATE
+#define SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB 0x0b
+#define SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN 0x0c
+#define SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM 0x0d
+#define SENSE_TYPE_MG_WRITE_ERR 0x0e
+#endif
+#ifdef SUPPORT_SD_LOCK
+/* FOR Locked SD card*/
+#define SENSE_TYPE_MEDIA_READ_FORBIDDEN 0x10
+#endif
+
+void scsi_show_command(struct scsi_cmnd *srb);
+void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type);
+void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
+ u8 sense_key, u32 info, u8 asc, u8 ascq,
+ u8 sns_key_info0, u16 sns_key_info1);
+int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+
+#endif /* __REALTEK_RTSX_SCSI_H */
diff --git a/drivers/staging/rts5208/rtsx_sys.h b/drivers/staging/rts5208/rtsx_sys.h
new file mode 100644
index 000000000000..0b6b4d4f1fea
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx_sys.h
@@ -0,0 +1,50 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __RTSX_SYS_H
+#define __RTSX_SYS_H
+
+#include "rtsx.h"
+#include "rtsx_chip.h"
+#include "rtsx_card.h"
+
+typedef dma_addr_t ULONG_PTR;
+
+static inline void rtsx_exclusive_enter_ss(struct rtsx_chip *chip)
+{
+ struct rtsx_dev *dev = chip->rtsx;
+
+ spin_lock(&(dev->reg_lock));
+ rtsx_enter_ss(chip);
+ spin_unlock(&(dev->reg_lock));
+}
+
+static inline void rtsx_reset_detected_cards(struct rtsx_chip *chip, int flag)
+{
+ rtsx_reset_cards(chip);
+}
+
+#define RTSX_MSG_IN_INT(x)
+
+#endif /* __RTSX_SYS_H */
+
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
new file mode 100644
index 000000000000..97b7b012983e
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -0,0 +1,769 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+
+#include "rtsx.h"
+#include "rtsx_scsi.h"
+#include "rtsx_transport.h"
+#include "rtsx_chip.h"
+#include "rtsx_card.h"
+#include "debug.h"
+
+/***********************************************************************
+ * Scatter-gather transfer buffer access routines
+ ***********************************************************************/
+
+/* Copy a buffer of length buflen to/from the srb's transfer buffer.
+ * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
+ * points to a list of s-g entries and we ignore srb->request_bufflen.
+ * For non-scatter-gather transfers, srb->request_buffer points to the
+ * transfer buffer itself and srb->request_bufflen is the buffer's length.)
+ * Update the *index and *offset variables so that the next copy will
+ * pick up from where this one left off. */
+
+unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
+ unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
+ unsigned int *offset, enum xfer_buf_dir dir)
+{
+ unsigned int cnt;
+
+ /* If not using scatter-gather, just transfer the data directly.
+ * Make certain it will fit in the available buffer space. */
+ if (scsi_sg_count(srb) == 0) {
+ if (*offset >= scsi_bufflen(srb))
+ return 0;
+ cnt = min(buflen, scsi_bufflen(srb) - *offset);
+ if (dir == TO_XFER_BUF)
+ memcpy((unsigned char *) scsi_sglist(srb) + *offset,
+ buffer, cnt);
+ else
+ memcpy(buffer, (unsigned char *) scsi_sglist(srb) +
+ *offset, cnt);
+ *offset += cnt;
+
+ /* Using scatter-gather. We have to go through the list one entry
+ * at a time. Each s-g entry contains some number of pages, and
+ * each page has to be kmap()'ed separately. If the page is already
+ * in kernel-addressable memory then kmap() will return its address.
+ * If the page is not directly accessible -- such as a user buffer
+ * located in high memory -- then kmap() will map it to a temporary
+ * position in the kernel's virtual address space. */
+ } else {
+ struct scatterlist *sg =
+ (struct scatterlist *) scsi_sglist(srb)
+ + *index;
+
+ /* This loop handles a single s-g list entry, which may
+ * include multiple pages. Find the initial page structure
+ * and the starting offset within the page, and update
+ * the *offset and *index values for the next loop. */
+ cnt = 0;
+ while (cnt < buflen && *index < scsi_sg_count(srb)) {
+ struct page *page = sg_page(sg) +
+ ((sg->offset + *offset) >> PAGE_SHIFT);
+ unsigned int poff =
+ (sg->offset + *offset) & (PAGE_SIZE-1);
+ unsigned int sglen = sg->length - *offset;
+
+ if (sglen > buflen - cnt) {
+
+ /* Transfer ends within this s-g entry */
+ sglen = buflen - cnt;
+ *offset += sglen;
+ } else {
+
+ /* Transfer continues to next s-g entry */
+ *offset = 0;
+ ++*index;
+ ++sg;
+ }
+
+ /* Transfer the data for all the pages in this
+ * s-g entry. For each page: call kmap(), do the
+ * transfer, and call kunmap() immediately after. */
+ while (sglen > 0) {
+ unsigned int plen = min(sglen, (unsigned int)
+ PAGE_SIZE - poff);
+ unsigned char *ptr = kmap(page);
+
+ if (dir == TO_XFER_BUF)
+ memcpy(ptr + poff, buffer + cnt, plen);
+ else
+ memcpy(buffer + cnt, ptr + poff, plen);
+ kunmap(page);
+
+ /* Start at the beginning of the next page */
+ poff = 0;
+ ++page;
+ cnt += plen;
+ sglen -= plen;
+ }
+ }
+ }
+
+ /* Return the amount actually transferred */
+ return cnt;
+}
+
+/* Store the contents of buffer into srb's transfer buffer and set the
+* SCSI residue. */
+void rtsx_stor_set_xfer_buf(unsigned char *buffer,
+ unsigned int buflen, struct scsi_cmnd *srb)
+{
+ unsigned int index = 0, offset = 0;
+
+ rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
+ TO_XFER_BUF);
+ if (buflen < scsi_bufflen(srb))
+ scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
+}
+
+void rtsx_stor_get_xfer_buf(unsigned char *buffer,
+ unsigned int buflen, struct scsi_cmnd *srb)
+{
+ unsigned int index = 0, offset = 0;
+
+ rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
+ FROM_XFER_BUF);
+ if (buflen < scsi_bufflen(srb))
+ scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
+}
+
+
+/***********************************************************************
+ * Transport routines
+ ***********************************************************************/
+
+/* Invoke the transport and basic error-handling/recovery methods
+ *
+ * This is used to send the message to the device and receive the response.
+ */
+void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int result;
+
+ result = rtsx_scsi_handler(srb, chip);
+
+ /* if the command gets aborted by the higher layers, we need to
+ * short-circuit all other processing
+ */
+ if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
+ RTSX_DEBUGP("-- command was aborted\n");
+ srb->result = DID_ABORT << 16;
+ goto Handle_Errors;
+ }
+
+ /* if there is a transport error, reset and don't auto-sense */
+ if (result == TRANSPORT_ERROR) {
+ RTSX_DEBUGP("-- transport indicates error, resetting\n");
+ srb->result = DID_ERROR << 16;
+ goto Handle_Errors;
+ }
+
+ srb->result = SAM_STAT_GOOD;
+
+ /*
+ * If we have a failure, we're going to do a REQUEST_SENSE
+ * automatically. Note that we differentiate between a command
+ * "failure" and an "error" in the transport mechanism.
+ */
+ if (result == TRANSPORT_FAILED) {
+ /* set the result so the higher layers expect this data */
+ srb->result = SAM_STAT_CHECK_CONDITION;
+ memcpy(srb->sense_buffer,
+ (unsigned char *)&(chip->sense_buffer[SCSI_LUN(srb)]),
+ sizeof(struct sense_data_t));
+ }
+
+ return;
+
+ /* Error and abort processing: try to resynchronize with the device
+ * by issuing a port reset. If that fails, try a class-specific
+ * device reset. */
+Handle_Errors:
+ return;
+}
+
+void rtsx_add_cmd(struct rtsx_chip *chip,
+ u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
+{
+ u32 *cb = (u32 *)(chip->host_cmds_ptr);
+ u32 val = 0;
+
+ val |= (u32)(cmd_type & 0x03) << 30;
+ val |= (u32)(reg_addr & 0x3FFF) << 16;
+ val |= (u32)mask << 8;
+ val |= (u32)data;
+
+ spin_lock_irq(&chip->rtsx->reg_lock);
+ if (chip->ci < (HOST_CMDS_BUF_LEN / 4))
+ cb[(chip->ci)++] = cpu_to_le32(val);
+
+ spin_unlock_irq(&chip->rtsx->reg_lock);
+}
+
+void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
+{
+ u32 val = 1 << 31;
+
+ rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
+
+ val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
+ /* Hardware Auto Response */
+ val |= 0x40000000;
+ rtsx_writel(chip, RTSX_HCBCTLR, val);
+}
+
+int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
+{
+ struct rtsx_dev *rtsx = chip->rtsx;
+ struct completion trans_done;
+ u32 val = 1 << 31;
+ long timeleft;
+ int err = 0;
+
+ if (card == SD_CARD)
+ rtsx->check_card_cd = SD_EXIST;
+ else if (card == MS_CARD)
+ rtsx->check_card_cd = MS_EXIST;
+ else if (card == XD_CARD)
+ rtsx->check_card_cd = XD_EXIST;
+ else
+ rtsx->check_card_cd = 0;
+
+ spin_lock_irq(&rtsx->reg_lock);
+
+ /* set up data structures for the wakeup system */
+ rtsx->done = &trans_done;
+ rtsx->trans_result = TRANS_NOT_READY;
+ init_completion(&trans_done);
+ rtsx->trans_state = STATE_TRANS_CMD;
+
+ rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
+
+ val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
+ /* Hardware Auto Response */
+ val |= 0x40000000;
+ rtsx_writel(chip, RTSX_HCBCTLR, val);
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+ /* Wait for TRANS_OK_INT */
+ timeleft = wait_for_completion_interruptible_timeout(
+ &trans_done, timeout * HZ / 1000);
+ if (timeleft <= 0) {
+ RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
+ err = -ETIMEDOUT;
+ TRACE_GOTO(chip, finish_send_cmd);
+ }
+
+ spin_lock_irq(&rtsx->reg_lock);
+ if (rtsx->trans_result == TRANS_RESULT_FAIL)
+ err = -EIO;
+ else if (rtsx->trans_result == TRANS_RESULT_OK)
+ err = 0;
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+finish_send_cmd:
+ rtsx->done = NULL;
+ rtsx->trans_state = STATE_TRANS_NONE;
+
+ if (err < 0)
+ rtsx_stop_cmd(chip, card);
+
+ return err;
+}
+
+static inline void rtsx_add_sg_tbl(
+ struct rtsx_chip *chip, u32 addr, u32 len, u8 option)
+{
+ u64 *sgb = (u64 *)(chip->host_sg_tbl_ptr);
+ u64 val = 0;
+ u32 temp_len = 0;
+ u8 temp_opt = 0;
+
+ do {
+ if (len > 0x80000) {
+ temp_len = 0x80000;
+ temp_opt = option & (~SG_END);
+ } else {
+ temp_len = len;
+ temp_opt = option;
+ }
+ val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt;
+
+ if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8))
+ sgb[(chip->sgi)++] = cpu_to_le64(val);
+
+ len -= temp_len;
+ addr += temp_len;
+ } while (len);
+}
+
+static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
+ struct scatterlist *sg, int num_sg, unsigned int *index,
+ unsigned int *offset, int size,
+ enum dma_data_direction dma_dir, int timeout)
+{
+ struct rtsx_dev *rtsx = chip->rtsx;
+ struct completion trans_done;
+ u8 dir;
+ int sg_cnt, i, resid;
+ int err = 0;
+ long timeleft;
+ struct scatterlist *sg_ptr;
+ u32 val = TRIG_DMA;
+
+ if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
+ return -EIO;
+
+ if (dma_dir == DMA_TO_DEVICE)
+ dir = HOST_TO_DEVICE;
+ else if (dma_dir == DMA_FROM_DEVICE)
+ dir = DEVICE_TO_HOST;
+ else
+ return -ENXIO;
+
+ if (card == SD_CARD)
+ rtsx->check_card_cd = SD_EXIST;
+ else if (card == MS_CARD)
+ rtsx->check_card_cd = MS_EXIST;
+ else if (card == XD_CARD)
+ rtsx->check_card_cd = XD_EXIST;
+ else
+ rtsx->check_card_cd = 0;
+
+ spin_lock_irq(&rtsx->reg_lock);
+
+ /* set up data structures for the wakeup system */
+ rtsx->done = &trans_done;
+
+ rtsx->trans_state = STATE_TRANS_SG;
+ rtsx->trans_result = TRANS_NOT_READY;
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+ sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
+
+ resid = size;
+ sg_ptr = sg;
+ chip->sgi = 0;
+ /* Usually the next entry will be @sg@ + 1, but if this sg element
+ * is part of a chained scatterlist, it could jump to the start of
+ * a new scatterlist array. So here we use sg_next to move to
+ * the proper sg
+ */
+ for (i = 0; i < *index; i++)
+ sg_ptr = sg_next(sg_ptr);
+ for (i = *index; i < sg_cnt; i++) {
+ dma_addr_t addr;
+ unsigned int len;
+ u8 option;
+
+ addr = sg_dma_address(sg_ptr);
+ len = sg_dma_len(sg_ptr);
+
+ RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
+ (unsigned int)addr, len);
+ RTSX_DEBUGP("*index = %d, *offset = %d\n", *index, *offset);
+
+ addr += *offset;
+
+ if ((len - *offset) > resid) {
+ *offset += resid;
+ len = resid;
+ resid = 0;
+ } else {
+ resid -= (len - *offset);
+ len -= *offset;
+ *offset = 0;
+ *index = *index + 1;
+ }
+ if ((i == (sg_cnt - 1)) || !resid)
+ option = SG_VALID | SG_END | SG_TRANS_DATA;
+ else
+ option = SG_VALID | SG_TRANS_DATA;
+
+ rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
+
+ if (!resid)
+ break;
+
+ sg_ptr = sg_next(sg_ptr);
+ }
+
+ RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
+
+ val |= (u32)(dir & 0x01) << 29;
+ val |= ADMA_MODE;
+
+ spin_lock_irq(&rtsx->reg_lock);
+
+ init_completion(&trans_done);
+
+ rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
+ rtsx_writel(chip, RTSX_HDBCTLR, val);
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+ timeleft = wait_for_completion_interruptible_timeout(
+ &trans_done, timeout * HZ / 1000);
+ if (timeleft <= 0) {
+ RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
+ RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ spin_lock_irq(&rtsx->reg_lock);
+ if (rtsx->trans_result == TRANS_RESULT_FAIL) {
+ err = -EIO;
+ spin_unlock_irq(&rtsx->reg_lock);
+ goto out;
+ }
+ spin_unlock_irq(&rtsx->reg_lock);
+
+ /* Wait for TRANS_OK_INT */
+ spin_lock_irq(&rtsx->reg_lock);
+ if (rtsx->trans_result == TRANS_NOT_READY) {
+ init_completion(&trans_done);
+ spin_unlock_irq(&rtsx->reg_lock);
+ timeleft = wait_for_completion_interruptible_timeout(
+ &trans_done, timeout * HZ / 1000);
+ if (timeleft <= 0) {
+ RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
+ RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+ } else {
+ spin_unlock_irq(&rtsx->reg_lock);
+ }
+
+ spin_lock_irq(&rtsx->reg_lock);
+ if (rtsx->trans_result == TRANS_RESULT_FAIL)
+ err = -EIO;
+ else if (rtsx->trans_result == TRANS_RESULT_OK)
+ err = 0;
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+out:
+ rtsx->done = NULL;
+ rtsx->trans_state = STATE_TRANS_NONE;
+ dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
+
+ if (err < 0)
+ rtsx_stop_cmd(chip, card);
+
+ return err;
+}
+
+static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
+ struct scatterlist *sg, int num_sg,
+ enum dma_data_direction dma_dir, int timeout)
+{
+ struct rtsx_dev *rtsx = chip->rtsx;
+ struct completion trans_done;
+ u8 dir;
+ int buf_cnt, i;
+ int err = 0;
+ long timeleft;
+ struct scatterlist *sg_ptr;
+
+ if ((sg == NULL) || (num_sg <= 0))
+ return -EIO;
+
+ if (dma_dir == DMA_TO_DEVICE)
+ dir = HOST_TO_DEVICE;
+ else if (dma_dir == DMA_FROM_DEVICE)
+ dir = DEVICE_TO_HOST;
+ else
+ return -ENXIO;
+
+ if (card == SD_CARD)
+ rtsx->check_card_cd = SD_EXIST;
+ else if (card == MS_CARD)
+ rtsx->check_card_cd = MS_EXIST;
+ else if (card == XD_CARD)
+ rtsx->check_card_cd = XD_EXIST;
+ else
+ rtsx->check_card_cd = 0;
+
+ spin_lock_irq(&rtsx->reg_lock);
+
+ /* set up data structures for the wakeup system */
+ rtsx->done = &trans_done;
+
+ rtsx->trans_state = STATE_TRANS_SG;
+ rtsx->trans_result = TRANS_NOT_READY;
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+ buf_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
+
+ sg_ptr = sg;
+
+ for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) {
+ u32 val = TRIG_DMA;
+ int sg_cnt, j;
+
+ if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8))
+ sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
+ else
+ sg_cnt = (HOST_SG_TBL_BUF_LEN / 8);
+
+ chip->sgi = 0;
+ for (j = 0; j < sg_cnt; j++) {
+ dma_addr_t addr = sg_dma_address(sg_ptr);
+ unsigned int len = sg_dma_len(sg_ptr);
+ u8 option;
+
+ RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
+ (unsigned int)addr, len);
+
+ if (j == (sg_cnt - 1))
+ option = SG_VALID | SG_END | SG_TRANS_DATA;
+ else
+ option = SG_VALID | SG_TRANS_DATA;
+
+ rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
+
+ sg_ptr = sg_next(sg_ptr);
+ }
+
+ RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
+
+ val |= (u32)(dir & 0x01) << 29;
+ val |= ADMA_MODE;
+
+ spin_lock_irq(&rtsx->reg_lock);
+
+ init_completion(&trans_done);
+
+ rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
+ rtsx_writel(chip, RTSX_HDBCTLR, val);
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+ timeleft = wait_for_completion_interruptible_timeout(
+ &trans_done, timeout * HZ / 1000);
+ if (timeleft <= 0) {
+ RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
+ RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ spin_lock_irq(&rtsx->reg_lock);
+ if (rtsx->trans_result == TRANS_RESULT_FAIL) {
+ err = -EIO;
+ spin_unlock_irq(&rtsx->reg_lock);
+ goto out;
+ }
+ spin_unlock_irq(&rtsx->reg_lock);
+
+ sg_ptr += sg_cnt;
+ }
+
+ /* Wait for TRANS_OK_INT */
+ spin_lock_irq(&rtsx->reg_lock);
+ if (rtsx->trans_result == TRANS_NOT_READY) {
+ init_completion(&trans_done);
+ spin_unlock_irq(&rtsx->reg_lock);
+ timeleft = wait_for_completion_interruptible_timeout(
+ &trans_done, timeout * HZ / 1000);
+ if (timeleft <= 0) {
+ RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
+ RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+ } else {
+ spin_unlock_irq(&rtsx->reg_lock);
+ }
+
+ spin_lock_irq(&rtsx->reg_lock);
+ if (rtsx->trans_result == TRANS_RESULT_FAIL)
+ err = -EIO;
+ else if (rtsx->trans_result == TRANS_RESULT_OK)
+ err = 0;
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+out:
+ rtsx->done = NULL;
+ rtsx->trans_state = STATE_TRANS_NONE;
+ dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
+
+ if (err < 0)
+ rtsx_stop_cmd(chip, card);
+
+ return err;
+}
+
+static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
+ enum dma_data_direction dma_dir, int timeout)
+{
+ struct rtsx_dev *rtsx = chip->rtsx;
+ struct completion trans_done;
+ dma_addr_t addr;
+ u8 dir;
+ int err = 0;
+ u32 val = (1 << 31);
+ long timeleft;
+
+ if ((buf == NULL) || (len <= 0))
+ return -EIO;
+
+ if (dma_dir == DMA_TO_DEVICE)
+ dir = HOST_TO_DEVICE;
+ else if (dma_dir == DMA_FROM_DEVICE)
+ dir = DEVICE_TO_HOST;
+ else
+ return -ENXIO;
+
+ addr = dma_map_single(&(rtsx->pci->dev), buf, len, dma_dir);
+ if (!addr)
+ return -ENOMEM;
+
+ if (card == SD_CARD)
+ rtsx->check_card_cd = SD_EXIST;
+ else if (card == MS_CARD)
+ rtsx->check_card_cd = MS_EXIST;
+ else if (card == XD_CARD)
+ rtsx->check_card_cd = XD_EXIST;
+ else
+ rtsx->check_card_cd = 0;
+
+ val |= (u32)(dir & 0x01) << 29;
+ val |= (u32)(len & 0x00FFFFFF);
+
+ spin_lock_irq(&rtsx->reg_lock);
+
+ /* set up data structures for the wakeup system */
+ rtsx->done = &trans_done;
+
+ init_completion(&trans_done);
+
+ rtsx->trans_state = STATE_TRANS_BUF;
+ rtsx->trans_result = TRANS_NOT_READY;
+
+ rtsx_writel(chip, RTSX_HDBAR, addr);
+ rtsx_writel(chip, RTSX_HDBCTLR, val);
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+ /* Wait for TRANS_OK_INT */
+ timeleft = wait_for_completion_interruptible_timeout(
+ &trans_done, timeout * HZ / 1000);
+ if (timeleft <= 0) {
+ RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
+ RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ spin_lock_irq(&rtsx->reg_lock);
+ if (rtsx->trans_result == TRANS_RESULT_FAIL)
+ err = -EIO;
+ else if (rtsx->trans_result == TRANS_RESULT_OK)
+ err = 0;
+
+ spin_unlock_irq(&rtsx->reg_lock);
+
+out:
+ rtsx->done = NULL;
+ rtsx->trans_state = STATE_TRANS_NONE;
+ dma_unmap_single(&(rtsx->pci->dev), addr, len, dma_dir);
+
+ if (err < 0)
+ rtsx_stop_cmd(chip, card);
+
+ return err;
+}
+
+int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
+ void *buf, size_t len, int use_sg, unsigned int *index,
+ unsigned int *offset, enum dma_data_direction dma_dir,
+ int timeout)
+{
+ int err = 0;
+
+ /* don't transfer data during abort processing */
+ if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
+ return -EIO;
+
+ if (use_sg) {
+ err = rtsx_transfer_sglist_adma_partial(chip, card,
+ (struct scatterlist *)buf, use_sg,
+ index, offset, (int)len, dma_dir, timeout);
+ } else {
+ err = rtsx_transfer_buf(chip, card,
+ buf, len, dma_dir, timeout);
+ }
+
+ if (err < 0) {
+ if (RTSX_TST_DELINK(chip)) {
+ RTSX_CLR_DELINK(chip);
+ chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
+ rtsx_reinit_cards(chip, 1);
+ }
+ }
+
+ return err;
+}
+
+int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
+ int use_sg, enum dma_data_direction dma_dir, int timeout)
+{
+ int err = 0;
+
+ RTSX_DEBUGP("use_sg = %d\n", use_sg);
+
+ /* don't transfer data during abort processing */
+ if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
+ return -EIO;
+
+ if (use_sg) {
+ err = rtsx_transfer_sglist_adma(chip, card,
+ (struct scatterlist *)buf,
+ use_sg, dma_dir, timeout);
+ } else {
+ err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
+ }
+
+ if (err < 0) {
+ if (RTSX_TST_DELINK(chip)) {
+ RTSX_CLR_DELINK(chip);
+ chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
+ rtsx_reinit_cards(chip, 1);
+ }
+ }
+
+ return err;
+}
+
diff --git a/drivers/staging/rts5208/rtsx_transport.h b/drivers/staging/rts5208/rtsx_transport.h
new file mode 100644
index 000000000000..b4b112372776
--- /dev/null
+++ b/drivers/staging/rts5208/rtsx_transport.h
@@ -0,0 +1,66 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_TRANSPORT_H
+#define __REALTEK_RTSX_TRANSPORT_H
+
+#include "rtsx.h"
+#include "rtsx_chip.h"
+
+#define WAIT_TIME 2000
+
+unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
+ unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
+ unsigned int *offset, enum xfer_buf_dir dir);
+void rtsx_stor_set_xfer_buf(unsigned char *buffer,
+ unsigned int buflen, struct scsi_cmnd *srb);
+void rtsx_stor_get_xfer_buf(unsigned char *buffer,
+ unsigned int buflen, struct scsi_cmnd *srb);
+void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+
+
+#define rtsx_init_cmd(chip) ((chip)->ci = 0)
+
+void rtsx_add_cmd(struct rtsx_chip *chip,
+ u8 cmd_type, u16 reg_addr, u8 mask, u8 data);
+void rtsx_send_cmd_no_wait(struct rtsx_chip *chip);
+int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout);
+
+extern inline u8 *rtsx_get_cmd_data(struct rtsx_chip *chip)
+{
+#ifdef CMD_USING_SG
+ return (u8 *)(chip->host_sg_tbl_ptr);
+#else
+ return (u8 *)(chip->host_cmds_ptr);
+#endif
+}
+
+int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
+ int use_sg, enum dma_data_direction dma_dir, int timeout);
+
+int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
+ void *buf, size_t len,
+ int use_sg, unsigned int *index, unsigned int *offset,
+ enum dma_data_direction dma_dir, int timeout);
+
+#endif /* __REALTEK_RTSX_TRANSPORT_H */
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
new file mode 100644
index 000000000000..c7c1f5410430
--- /dev/null
+++ b/drivers/staging/rts5208/sd.c
@@ -0,0 +1,4525 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+
+#include "rtsx.h"
+#include "rtsx_transport.h"
+#include "rtsx_scsi.h"
+#include "rtsx_card.h"
+#include "sd.h"
+
+#define SD_MAX_RETRY_COUNT 3
+
+static u16 REG_SD_CFG1;
+static u16 REG_SD_CFG2;
+static u16 REG_SD_CFG3;
+static u16 REG_SD_STAT1;
+static u16 REG_SD_STAT2;
+static u16 REG_SD_BUS_STAT;
+static u16 REG_SD_PAD_CTL;
+static u16 REG_SD_SAMPLE_POINT_CTL;
+static u16 REG_SD_PUSH_POINT_CTL;
+static u16 REG_SD_CMD0;
+static u16 REG_SD_CMD1;
+static u16 REG_SD_CMD2;
+static u16 REG_SD_CMD3;
+static u16 REG_SD_CMD4;
+static u16 REG_SD_CMD5;
+static u16 REG_SD_BYTE_CNT_L;
+static u16 REG_SD_BYTE_CNT_H;
+static u16 REG_SD_BLOCK_CNT_L;
+static u16 REG_SD_BLOCK_CNT_H;
+static u16 REG_SD_TRANSFER;
+static u16 REG_SD_VPCLK0_CTL;
+static u16 REG_SD_VPCLK1_CTL;
+static u16 REG_SD_DCMPS0_CTL;
+static u16 REG_SD_DCMPS1_CTL;
+
+static inline void sd_set_err_code(struct rtsx_chip *chip, u8 err_code)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ sd_card->err_code |= err_code;
+}
+
+static inline void sd_clr_err_code(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ sd_card->err_code = 0;
+}
+
+static inline int sd_check_err_code(struct rtsx_chip *chip, u8 err_code)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ return sd_card->err_code & err_code;
+}
+
+static void sd_init_reg_addr(struct rtsx_chip *chip)
+{
+ REG_SD_CFG1 = 0xFD31;
+ REG_SD_CFG2 = 0xFD33;
+ REG_SD_CFG3 = 0xFD3E;
+ REG_SD_STAT1 = 0xFD30;
+ REG_SD_STAT2 = 0;
+ REG_SD_BUS_STAT = 0;
+ REG_SD_PAD_CTL = 0;
+ REG_SD_SAMPLE_POINT_CTL = 0;
+ REG_SD_PUSH_POINT_CTL = 0;
+ REG_SD_CMD0 = 0xFD34;
+ REG_SD_CMD1 = 0xFD35;
+ REG_SD_CMD2 = 0xFD36;
+ REG_SD_CMD3 = 0xFD37;
+ REG_SD_CMD4 = 0xFD38;
+ REG_SD_CMD5 = 0xFD5A;
+ REG_SD_BYTE_CNT_L = 0xFD39;
+ REG_SD_BYTE_CNT_H = 0xFD3A;
+ REG_SD_BLOCK_CNT_L = 0xFD3B;
+ REG_SD_BLOCK_CNT_H = 0xFD3C;
+ REG_SD_TRANSFER = 0xFD32;
+ REG_SD_VPCLK0_CTL = 0;
+ REG_SD_VPCLK1_CTL = 0;
+ REG_SD_DCMPS0_CTL = 0;
+ REG_SD_DCMPS1_CTL = 0;
+}
+
+static int sd_check_data0_status(struct rtsx_chip *chip)
+{
+ u8 stat;
+
+ RTSX_READ_REG(chip, REG_SD_STAT1, &stat);
+
+ if (!(stat & SD_DAT0_STATUS)) {
+ sd_set_err_code(chip, SD_BUSY);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
+ u32 arg, u8 rsp_type, u8 *rsp, int rsp_len)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int timeout = 100;
+ u16 reg_addr;
+ u8 *ptr;
+ int stat_idx = 0;
+ int rty_cnt = 0;
+
+ sd_clr_err_code(chip);
+
+ RTSX_DEBUGP("SD/MMC CMD %d, arg = 0x%08x\n", cmd_idx, arg);
+
+ if (rsp_type == SD_RSP_TYPE_R1b)
+ timeout = 3000;
+
+RTY_SEND_CMD:
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 0x40 | cmd_idx);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, (u8)(arg >> 24));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, (u8)(arg >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, (u8)(arg >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF, (u8)arg);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
+ 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
+ SD_TRANSFER_END | SD_STAT_IDLE, SD_TRANSFER_END | SD_STAT_IDLE);
+
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16;
+ reg_addr++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
+
+ stat_idx = 16;
+ } else if (rsp_type != SD_RSP_TYPE_R0) {
+ for (reg_addr = REG_SD_CMD0; reg_addr <= REG_SD_CMD4;
+ reg_addr++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
+
+ stat_idx = 5;
+ }
+
+ rtsx_add_cmd(chip, READ_REG_CMD, REG_SD_STAT1, 0, 0);
+
+ retval = rtsx_send_cmd(chip, SD_CARD, timeout);
+ if (retval < 0) {
+ u8 val;
+
+ rtsx_read_register(chip, REG_SD_STAT1, &val);
+ RTSX_DEBUGP("SD_STAT1: 0x%x\n", val);
+
+ rtsx_read_register(chip, REG_SD_CFG3, &val);
+ RTSX_DEBUGP("SD_CFG3: 0x%x\n", val);
+
+ if (retval == -ETIMEDOUT) {
+ if (rsp_type & SD_WAIT_BUSY_END) {
+ retval = sd_check_data0_status(chip);
+ if (retval != STATUS_SUCCESS) {
+ rtsx_clear_sd_error(chip);
+ TRACE_RET(chip, retval);
+ }
+ } else {
+ sd_set_err_code(chip, SD_TO_ERR);
+ }
+ retval = STATUS_TIMEDOUT;
+ } else {
+ retval = STATUS_FAIL;
+ }
+ rtsx_clear_sd_error(chip);
+
+ TRACE_RET(chip, retval);
+ }
+
+ if (rsp_type == SD_RSP_TYPE_R0)
+ return STATUS_SUCCESS;
+
+ ptr = rtsx_get_cmd_data(chip) + 1;
+
+ if ((ptr[0] & 0xC0) != 0) {
+ sd_set_err_code(chip, SD_STS_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (!(rsp_type & SD_NO_CHECK_CRC7)) {
+ if (ptr[stat_idx] & SD_CRC7_ERR) {
+ if (cmd_idx == WRITE_MULTIPLE_BLOCK) {
+ sd_set_err_code(chip, SD_CRC_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (rty_cnt < SD_MAX_RETRY_COUNT) {
+ wait_timeout(20);
+ rty_cnt++;
+ goto RTY_SEND_CMD;
+ } else {
+ sd_set_err_code(chip, SD_CRC_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ if ((rsp_type == SD_RSP_TYPE_R1) || (rsp_type == SD_RSP_TYPE_R1b)) {
+ if ((cmd_idx != SEND_RELATIVE_ADDR) &&
+ (cmd_idx != SEND_IF_COND)) {
+ if (cmd_idx != STOP_TRANSMISSION) {
+ if (ptr[1] & 0x80)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#ifdef SUPPORT_SD_LOCK
+ if (ptr[1] & 0x7D)
+#else
+ if (ptr[1] & 0x7F)
+#endif
+ {
+ RTSX_DEBUGP("ptr[1]: 0x%02x\n", ptr[1]);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (ptr[2] & 0xFF) {
+ RTSX_DEBUGP("ptr[2]: 0x%02x\n", ptr[2]);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (ptr[3] & 0x80) {
+ RTSX_DEBUGP("ptr[3]: 0x%02x\n", ptr[3]);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (ptr[3] & 0x01)
+ sd_card->sd_data_buf_ready = 1;
+ else
+ sd_card->sd_data_buf_ready = 0;
+ }
+ }
+
+ if (rsp && rsp_len)
+ memcpy(rsp, ptr, rsp_len);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_read_data(struct rtsx_chip *chip,
+ u8 trans_mode, u8 *cmd, int cmd_len, u16 byte_cnt,
+ u16 blk_cnt, u8 bus_width, u8 *buf, int buf_len,
+ int timeout)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i;
+
+ sd_clr_err_code(chip);
+
+ if (!buf)
+ buf_len = 0;
+
+ if (buf_len > 512)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ if (cmd_len) {
+ RTSX_DEBUGP("SD/MMC CMD %d\n", cmd[0] - 0x40);
+ for (i = 0; i < (cmd_len < 6 ? cmd_len : 6); i++)
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0 + i,
+ 0xFF, cmd[i]);
+ }
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
+ (u8)byte_cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
+ (u8)(byte_cnt >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
+ (u8)blk_cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
+ (u8)(blk_cnt >> 8));
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END|
+ SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ if (trans_mode != SD_TM_AUTO_TUNING)
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ trans_mode | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
+ SD_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, SD_CARD, timeout);
+ if (retval < 0) {
+ if (retval == -ETIMEDOUT) {
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (buf && buf_len) {
+ retval = rtsx_read_ppbuf(chip, buf, buf_len);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode,
+ u8 *cmd, int cmd_len, u16 byte_cnt, u16 blk_cnt, u8 bus_width,
+ u8 *buf, int buf_len, int timeout)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i;
+
+ sd_clr_err_code(chip);
+
+ if (!buf)
+ buf_len = 0;
+
+ if (buf_len > 512) {
+ /* This function can't write data more than one page */
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (buf && buf_len) {
+ retval = rtsx_write_ppbuf(chip, buf, buf_len);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rtsx_init_cmd(chip);
+
+ if (cmd_len) {
+ RTSX_DEBUGP("SD/MMC CMD %d\n", cmd[0] - 0x40);
+ for (i = 0; i < (cmd_len < 6 ? cmd_len : 6); i++) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ REG_SD_CMD0 + i, 0xFF, cmd[i]);
+ }
+ }
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
+ (u8)byte_cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
+ (u8)(byte_cnt >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
+ (u8)blk_cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
+ (u8)(blk_cnt >> 8));
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
+ SD_CHECK_CRC7 | SD_RSP_LEN_6);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ trans_mode | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
+ SD_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, SD_CARD, timeout);
+ if (retval < 0) {
+ if (retval == -ETIMEDOUT) {
+ sd_send_cmd_get_rsp(chip, SEND_STATUS,
+ sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0);
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i;
+ u8 csd_ver, trans_speed;
+ u8 rsp[16];
+
+ for (i = 0; i < 6; i++) {
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, SEND_CSD, sd_card->sd_addr,
+ SD_RSP_TYPE_R2, rsp, 16);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+
+ if (i == 6)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ memcpy(sd_card->raw_csd, rsp + 1, 15);
+
+ RTSX_DEBUGP("CSD Response:\n");
+ RTSX_DUMP(sd_card->raw_csd, 16);
+
+ csd_ver = (rsp[1] & 0xc0) >> 6;
+ RTSX_DEBUGP("csd_ver = %d\n", csd_ver);
+
+ trans_speed = rsp[4];
+ if ((trans_speed & 0x07) == 0x02) {
+ if ((trans_speed & 0xf8) >= 0x30) {
+ if (chip->asic_code)
+ sd_card->sd_clock = 47;
+ else
+ sd_card->sd_clock = CLK_50;
+
+ } else if ((trans_speed & 0xf8) == 0x28) {
+ if (chip->asic_code)
+ sd_card->sd_clock = 39;
+ else
+ sd_card->sd_clock = CLK_40;
+
+ } else if ((trans_speed & 0xf8) == 0x20) {
+ if (chip->asic_code)
+ sd_card->sd_clock = 29;
+ else
+ sd_card->sd_clock = CLK_30;
+
+ } else if ((trans_speed & 0xf8) >= 0x10) {
+ if (chip->asic_code)
+ sd_card->sd_clock = 23;
+ else
+ sd_card->sd_clock = CLK_20;
+
+ } else if ((trans_speed & 0x08) >= 0x08) {
+ if (chip->asic_code)
+ sd_card->sd_clock = 19;
+ else
+ sd_card->sd_clock = CLK_20;
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (CHK_MMC_SECTOR_MODE(sd_card)) {
+ sd_card->capacity = 0;
+ } else {
+ if ((!CHK_SD_HCXC(sd_card)) || (csd_ver == 0)) {
+ u8 blk_size, c_size_mult;
+ u16 c_size;
+ blk_size = rsp[6] & 0x0F;
+ c_size = ((u16)(rsp[7] & 0x03) << 10)
+ + ((u16)rsp[8] << 2)
+ + ((u16)(rsp[9] & 0xC0) >> 6);
+ c_size_mult = (u8)((rsp[10] & 0x03) << 1);
+ c_size_mult += (rsp[11] & 0x80) >> 7;
+ sd_card->capacity = (((u32)(c_size + 1)) *
+ (1 << (c_size_mult + 2)))
+ << (blk_size - 9);
+ } else {
+ u32 total_sector = 0;
+ total_sector = (((u32)rsp[8] & 0x3f) << 16) |
+ ((u32)rsp[9] << 8) | (u32)rsp[10];
+ sd_card->capacity = (total_sector + 1) << 10;
+ }
+ }
+
+ if (check_wp) {
+ if (rsp[15] & 0x30)
+ chip->card_wp |= SD_CARD;
+
+ RTSX_DEBUGP("CSD WP Status: 0x%x\n", rsp[15]);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_set_sample_push_timing(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ u8 val = 0;
+
+ if ((chip->sd_ctl & SD_PUSH_POINT_CTL_MASK) == SD_PUSH_POINT_DELAY)
+ val |= 0x10;
+
+ if ((chip->sd_ctl & SD_SAMPLE_POINT_CTL_MASK) == SD_SAMPLE_POINT_AUTO) {
+ if (chip->asic_code) {
+ if (CHK_SD_HS(sd_card) || CHK_MMC_52M(sd_card)) {
+ if (val & 0x10)
+ val |= 0x04;
+ else
+ val |= 0x08;
+ }
+ } else {
+ if (val & 0x10)
+ val |= 0x04;
+ else
+ val |= 0x08;
+ }
+ } else if ((chip->sd_ctl & SD_SAMPLE_POINT_CTL_MASK) ==
+ SD_SAMPLE_POINT_DELAY) {
+ if (val & 0x10)
+ val |= 0x04;
+ else
+ val |= 0x08;
+ }
+
+ RTSX_WRITE_REG(chip, REG_SD_CFG1, 0x1C, val);
+
+ return STATUS_SUCCESS;
+}
+
+static void sd_choose_proper_clock(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ if (CHK_SD_SDR104(sd_card)) {
+ if (chip->asic_code)
+ sd_card->sd_clock = chip->asic_sd_sdr104_clk;
+ else
+ sd_card->sd_clock = chip->fpga_sd_sdr104_clk;
+
+ } else if (CHK_SD_DDR50(sd_card)) {
+ if (chip->asic_code)
+ sd_card->sd_clock = chip->asic_sd_ddr50_clk;
+ else
+ sd_card->sd_clock = chip->fpga_sd_ddr50_clk;
+
+ } else if (CHK_SD_SDR50(sd_card)) {
+ if (chip->asic_code)
+ sd_card->sd_clock = chip->asic_sd_sdr50_clk;
+ else
+ sd_card->sd_clock = chip->fpga_sd_sdr50_clk;
+
+ } else if (CHK_SD_HS(sd_card)) {
+ if (chip->asic_code)
+ sd_card->sd_clock = chip->asic_sd_hs_clk;
+ else
+ sd_card->sd_clock = chip->fpga_sd_hs_clk;
+
+ } else if (CHK_MMC_52M(sd_card) || CHK_MMC_DDR52(sd_card)) {
+ if (chip->asic_code)
+ sd_card->sd_clock = chip->asic_mmc_52m_clk;
+ else
+ sd_card->sd_clock = chip->fpga_mmc_52m_clk;
+
+ } else if (CHK_MMC_26M(sd_card)) {
+ if (chip->asic_code)
+ sd_card->sd_clock = 48;
+ else
+ sd_card->sd_clock = CLK_50;
+ }
+}
+
+static int sd_set_clock_divider(struct rtsx_chip *chip, u8 clk_div)
+{
+ u8 mask = 0, val = 0;
+
+ mask = 0x60;
+ if (clk_div == SD_CLK_DIVIDE_0)
+ val = 0x00;
+ else if (clk_div == SD_CLK_DIVIDE_128)
+ val = 0x40;
+ else if (clk_div == SD_CLK_DIVIDE_256)
+ val = 0x20;
+
+ RTSX_WRITE_REG(chip, REG_SD_CFG1, mask, val);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_set_init_para(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ retval = sd_set_sample_push_timing(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ sd_choose_proper_clock(chip);
+
+ retval = switch_clock(chip, sd_card->sd_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int sd_select_card(struct rtsx_chip *chip, int select)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd_idx, cmd_type;
+ u32 addr;
+
+ if (select) {
+ cmd_idx = SELECT_CARD;
+ cmd_type = SD_RSP_TYPE_R1;
+ addr = sd_card->sd_addr;
+ } else {
+ cmd_idx = DESELECT_CARD;
+ cmd_type = SD_RSP_TYPE_R0;
+ addr = 0;
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, cmd_idx, addr, cmd_type, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+#ifdef SUPPORT_SD_LOCK
+static int sd_update_lock_status(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 rsp[5];
+
+ retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, rsp, 5);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (rsp[1] & 0x02)
+ sd_card->sd_lock_status |= SD_LOCKED;
+ else
+ sd_card->sd_lock_status &= ~SD_LOCKED;
+
+ RTSX_DEBUGP("sd_card->sd_lock_status = 0x%x\n",
+ sd_card->sd_lock_status);
+
+ if (rsp[1] & 0x01)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+#endif
+
+static int sd_wait_state_data_ready(struct rtsx_chip *chip, u8 state,
+ u8 data_ready, int polling_cnt)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval, i;
+ u8 rsp[5];
+
+ for (i = 0; i < polling_cnt; i++) {
+ retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
+ sd_card->sd_addr, SD_RSP_TYPE_R1, rsp,
+ 5);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (((rsp[3] & 0x1E) == state) &&
+ ((rsp[3] & 0x01) == data_ready))
+ return STATUS_SUCCESS;
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+static int sd_change_bank_voltage(struct rtsx_chip *chip, u8 voltage)
+{
+ int retval;
+
+ if (voltage == SD_IO_3V3) {
+ if (chip->asic_code) {
+ retval = rtsx_write_phy_register(chip, 0x08,
+ 0x4FC0 |
+ chip->phy_voltage);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ RTSX_WRITE_REG(chip, SD_PAD_CTL, SD_IO_USING_1V8, 0);
+ }
+ } else if (voltage == SD_IO_1V8) {
+ if (chip->asic_code) {
+ retval = rtsx_write_phy_register(chip, 0x08,
+ 0x4C40 |
+ chip->phy_voltage);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ RTSX_WRITE_REG(chip, SD_PAD_CTL, SD_IO_USING_1V8,
+ SD_IO_USING_1V8);
+ }
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_voltage_switch(struct rtsx_chip *chip)
+{
+ int retval;
+ u8 stat;
+
+ RTSX_WRITE_REG(chip, SD_BUS_STAT, SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP,
+ SD_CLK_TOGGLE_EN);
+
+ retval = sd_send_cmd_get_rsp(chip, VOLTAGE_SWITCH, 0, SD_RSP_TYPE_R1,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ udelay(chip->sd_voltage_switch_delay);
+
+ RTSX_READ_REG(chip, SD_BUS_STAT, &stat);
+ if (stat & (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
+ SD_DAT1_STATUS | SD_DAT0_STATUS)) {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_WRITE_REG(chip, SD_BUS_STAT, 0xFF, SD_CLK_FORCE_STOP);
+ retval = sd_change_bank_voltage(chip, SD_IO_1V8);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ wait_timeout(50);
+
+ RTSX_WRITE_REG(chip, SD_BUS_STAT, 0xFF, SD_CLK_TOGGLE_EN);
+ wait_timeout(10);
+
+ RTSX_READ_REG(chip, SD_BUS_STAT, &stat);
+ if ((stat & (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
+ SD_DAT1_STATUS | SD_DAT0_STATUS)) !=
+ (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
+ SD_DAT1_STATUS | SD_DAT0_STATUS)) {
+ RTSX_DEBUGP("SD_BUS_STAT: 0x%x\n", stat);
+ rtsx_write_register(chip, SD_BUS_STAT,
+ SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
+ rtsx_write_register(chip, CARD_CLK_EN, 0xFF, 0);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_WRITE_REG(chip, SD_BUS_STAT, SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP,
+ 0);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_reset_dcm(struct rtsx_chip *chip, u8 tune_dir)
+{
+ if (tune_dir == TUNE_RX) {
+ RTSX_WRITE_REG(chip, DCM_DRP_CTL, 0xFF, DCM_RESET | DCM_RX);
+ RTSX_WRITE_REG(chip, DCM_DRP_CTL, 0xFF, DCM_RX);
+ } else {
+ RTSX_WRITE_REG(chip, DCM_DRP_CTL, 0xFF, DCM_RESET | DCM_TX);
+ RTSX_WRITE_REG(chip, DCM_DRP_CTL, 0xFF, DCM_TX);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ u16 SD_VP_CTL, SD_DCMPS_CTL;
+ u8 val;
+ int retval;
+ int ddr_rx = 0;
+
+ RTSX_DEBUGP("sd_change_phase (sample_point = %d, tune_dir = %d)\n",
+ sample_point, tune_dir);
+
+ if (tune_dir == TUNE_RX) {
+ SD_VP_CTL = SD_VPRX_CTL;
+ SD_DCMPS_CTL = SD_DCMPS_RX_CTL;
+ if (CHK_SD_DDR50(sd_card))
+ ddr_rx = 1;
+ } else {
+ SD_VP_CTL = SD_VPTX_CTL;
+ SD_DCMPS_CTL = SD_DCMPS_TX_CTL;
+ }
+
+ if (chip->asic_code) {
+ RTSX_WRITE_REG(chip, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
+ RTSX_WRITE_REG(chip, SD_VP_CTL, 0x1F, sample_point);
+ RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
+ RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET,
+ PHASE_NOT_RESET);
+ RTSX_WRITE_REG(chip, CLK_CTL, CHANGE_CLK, 0);
+ } else {
+#ifdef CONFIG_RTS5208_DEBUG
+ rtsx_read_register(chip, SD_VP_CTL, &val);
+ RTSX_DEBUGP("SD_VP_CTL: 0x%x\n", val);
+ rtsx_read_register(chip, SD_DCMPS_CTL, &val);
+ RTSX_DEBUGP("SD_DCMPS_CTL: 0x%x\n", val);
+#endif
+
+ if (ddr_rx) {
+ RTSX_WRITE_REG(chip, SD_VP_CTL, PHASE_CHANGE,
+ PHASE_CHANGE);
+ udelay(50);
+ RTSX_WRITE_REG(chip, SD_VP_CTL, 0xFF,
+ PHASE_CHANGE | PHASE_NOT_RESET | sample_point);
+ } else {
+ RTSX_WRITE_REG(chip, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
+ udelay(50);
+ RTSX_WRITE_REG(chip, SD_VP_CTL, 0xFF,
+ PHASE_NOT_RESET | sample_point);
+ }
+ udelay(100);
+
+ rtsx_init_cmd(chip);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SD_DCMPS_CTL, DCMPS_CHANGE,
+ DCMPS_CHANGE);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SD_DCMPS_CTL,
+ DCMPS_CHANGE_DONE, DCMPS_CHANGE_DONE);
+ retval = rtsx_send_cmd(chip, SD_CARD, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, Fail);
+
+ val = *rtsx_get_cmd_data(chip);
+ if (val & DCMPS_ERROR)
+ TRACE_GOTO(chip, Fail);
+
+ if ((val & DCMPS_CURRENT_PHASE) != sample_point)
+ TRACE_GOTO(chip, Fail);
+
+ RTSX_WRITE_REG(chip, SD_DCMPS_CTL, DCMPS_CHANGE, 0);
+ if (ddr_rx)
+ RTSX_WRITE_REG(chip, SD_VP_CTL, PHASE_CHANGE, 0);
+ else
+ RTSX_WRITE_REG(chip, CLK_CTL, CHANGE_CLK, 0);
+
+ udelay(50);
+ }
+
+ RTSX_WRITE_REG(chip, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
+
+ return STATUS_SUCCESS;
+
+Fail:
+#ifdef CONFIG_RTS5208_DEBUG
+ rtsx_read_register(chip, SD_VP_CTL, &val);
+ RTSX_DEBUGP("SD_VP_CTL: 0x%x\n", val);
+ rtsx_read_register(chip, SD_DCMPS_CTL, &val);
+ RTSX_DEBUGP("SD_DCMPS_CTL: 0x%x\n", val);
+#endif
+
+ rtsx_write_register(chip, SD_DCMPS_CTL, DCMPS_CHANGE, 0);
+ rtsx_write_register(chip, SD_VP_CTL, PHASE_CHANGE, 0);
+ wait_timeout(10);
+ sd_reset_dcm(chip, tune_dir);
+ return STATUS_FAIL;
+}
+
+static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd[5], buf[8];
+
+ retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ cmd[0] = 0x40 | SEND_SCR;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 8, 1, bus_width,
+ buf, 8, 250);
+ if (retval != STATUS_SUCCESS) {
+ rtsx_clear_sd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ memcpy(sd_card->raw_scr, buf, 8);
+
+ if ((buf[0] & 0x0F) == 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
+ u8 func_to_switch, u8 *buf, int buf_len)
+{
+ u8 support_mask = 0, query_switch = 0, switch_busy = 0;
+ int support_offset = 0, query_switch_offset = 0, check_busy_offset = 0;
+
+ if (func_group == SD_FUNC_GROUP_1) {
+ support_offset = FUNCTION_GROUP1_SUPPORT_OFFSET;
+ query_switch_offset = FUNCTION_GROUP1_QUERY_SWITCH_OFFSET;
+ check_busy_offset = FUNCTION_GROUP1_CHECK_BUSY_OFFSET;
+
+ switch (func_to_switch) {
+ case HS_SUPPORT:
+ support_mask = HS_SUPPORT_MASK;
+ query_switch = HS_QUERY_SWITCH_OK;
+ switch_busy = HS_SWITCH_BUSY;
+ break;
+
+ case SDR50_SUPPORT:
+ support_mask = SDR50_SUPPORT_MASK;
+ query_switch = SDR50_QUERY_SWITCH_OK;
+ switch_busy = SDR50_SWITCH_BUSY;
+ break;
+
+ case SDR104_SUPPORT:
+ support_mask = SDR104_SUPPORT_MASK;
+ query_switch = SDR104_QUERY_SWITCH_OK;
+ switch_busy = SDR104_SWITCH_BUSY;
+ break;
+
+ case DDR50_SUPPORT:
+ support_mask = DDR50_SUPPORT_MASK;
+ query_switch = DDR50_QUERY_SWITCH_OK;
+ switch_busy = DDR50_SWITCH_BUSY;
+ break;
+
+ default:
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else if (func_group == SD_FUNC_GROUP_3) {
+ support_offset = FUNCTION_GROUP3_SUPPORT_OFFSET;
+ query_switch_offset = FUNCTION_GROUP3_QUERY_SWITCH_OFFSET;
+ check_busy_offset = FUNCTION_GROUP3_CHECK_BUSY_OFFSET;
+
+ switch (func_to_switch) {
+ case DRIVING_TYPE_A:
+ support_mask = DRIVING_TYPE_A_MASK;
+ query_switch = TYPE_A_QUERY_SWITCH_OK;
+ switch_busy = TYPE_A_SWITCH_BUSY;
+ break;
+
+ case DRIVING_TYPE_C:
+ support_mask = DRIVING_TYPE_C_MASK;
+ query_switch = TYPE_C_QUERY_SWITCH_OK;
+ switch_busy = TYPE_C_SWITCH_BUSY;
+ break;
+
+ case DRIVING_TYPE_D:
+ support_mask = DRIVING_TYPE_D_MASK;
+ query_switch = TYPE_D_QUERY_SWITCH_OK;
+ switch_busy = TYPE_D_SWITCH_BUSY;
+ break;
+
+ default:
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else if (func_group == SD_FUNC_GROUP_4) {
+ support_offset = FUNCTION_GROUP4_SUPPORT_OFFSET;
+ query_switch_offset = FUNCTION_GROUP4_QUERY_SWITCH_OFFSET;
+ check_busy_offset = FUNCTION_GROUP4_CHECK_BUSY_OFFSET;
+
+ switch (func_to_switch) {
+ case CURRENT_LIMIT_400:
+ support_mask = CURRENT_LIMIT_400_MASK;
+ query_switch = CURRENT_LIMIT_400_QUERY_SWITCH_OK;
+ switch_busy = CURRENT_LIMIT_400_SWITCH_BUSY;
+ break;
+
+ case CURRENT_LIMIT_600:
+ support_mask = CURRENT_LIMIT_600_MASK;
+ query_switch = CURRENT_LIMIT_600_QUERY_SWITCH_OK;
+ switch_busy = CURRENT_LIMIT_600_SWITCH_BUSY;
+ break;
+
+ case CURRENT_LIMIT_800:
+ support_mask = CURRENT_LIMIT_800_MASK;
+ query_switch = CURRENT_LIMIT_800_QUERY_SWITCH_OK;
+ switch_busy = CURRENT_LIMIT_800_SWITCH_BUSY;
+ break;
+
+ default:
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (func_group == SD_FUNC_GROUP_1) {
+ if (!(buf[support_offset] & support_mask) ||
+ ((buf[query_switch_offset] & 0x0F) != query_switch)) {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ /* Check 'Busy Status' */
+ if ((buf[DATA_STRUCTURE_VER_OFFSET] == 0x01) &&
+ ((buf[check_busy_offset] & switch_busy) == switch_busy)) {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode,
+ u8 func_group, u8 func_to_switch, u8 bus_width)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd[5], buf[64];
+
+ RTSX_DEBUGP("sd_check_switch_mode (mode = %d, func_group = %d, func_to_switch = %d)\n",
+ mode, func_group, func_to_switch);
+
+ cmd[0] = 0x40 | SWITCH;
+ cmd[1] = mode;
+
+ if (func_group == SD_FUNC_GROUP_1) {
+ cmd[2] = 0xFF;
+ cmd[3] = 0xFF;
+ cmd[4] = 0xF0 + func_to_switch;
+ } else if (func_group == SD_FUNC_GROUP_3) {
+ cmd[2] = 0xFF;
+ cmd[3] = 0xF0 + func_to_switch;
+ cmd[4] = 0xFF;
+ } else if (func_group == SD_FUNC_GROUP_4) {
+ cmd[2] = 0xFF;
+ cmd[3] = 0x0F + (func_to_switch << 4);
+ cmd[4] = 0xFF;
+ } else {
+ cmd[1] = SD_CHECK_MODE;
+ cmd[2] = 0xFF;
+ cmd[3] = 0xFF;
+ cmd[4] = 0xFF;
+ }
+
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1, bus_width,
+ buf, 64, 250);
+ if (retval != STATUS_SUCCESS) {
+ rtsx_clear_sd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_DUMP(buf, 64);
+
+ if (func_group == NO_ARGUMENT) {
+ sd_card->func_group1_mask = buf[0x0D];
+ sd_card->func_group2_mask = buf[0x0B];
+ sd_card->func_group3_mask = buf[0x09];
+ sd_card->func_group4_mask = buf[0x07];
+
+ RTSX_DEBUGP("func_group1_mask = 0x%02x\n", buf[0x0D]);
+ RTSX_DEBUGP("func_group2_mask = 0x%02x\n", buf[0x0B]);
+ RTSX_DEBUGP("func_group3_mask = 0x%02x\n", buf[0x09]);
+ RTSX_DEBUGP("func_group4_mask = 0x%02x\n", buf[0x07]);
+ } else {
+ /* Maximum current consumption, check whether current is
+ * acceptable; bit[511:496] = 0x0000 means some error happened.
+ */
+ u16 cc = ((u16)buf[0] << 8) | buf[1];
+ RTSX_DEBUGP("Maximum current consumption: %dmA\n", cc);
+ if ((cc == 0) || (cc > 800))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_query_switch_result(chip, func_group,
+ func_to_switch, buf, 64);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if ((cc > 400) || (func_to_switch > CURRENT_LIMIT_400)) {
+ RTSX_WRITE_REG(chip, OCPPARA2, SD_OCP_THD_MASK,
+ chip->sd_800mA_ocp_thd);
+ RTSX_WRITE_REG(chip, CARD_PWR_CTL, PMOS_STRG_MASK,
+ PMOS_STRG_800mA);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static u8 downgrade_switch_mode(u8 func_group, u8 func_to_switch)
+{
+ if (func_group == SD_FUNC_GROUP_1) {
+ if (func_to_switch > HS_SUPPORT)
+ func_to_switch--;
+
+ } else if (func_group == SD_FUNC_GROUP_4) {
+ if (func_to_switch > CURRENT_LIMIT_200)
+ func_to_switch--;
+ }
+
+ return func_to_switch;
+}
+
+static int sd_check_switch(struct rtsx_chip *chip,
+ u8 func_group, u8 func_to_switch, u8 bus_width)
+{
+ int retval;
+ int i;
+ int switch_good = 0;
+
+ for (i = 0; i < 3; i++) {
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_check_switch_mode(chip, SD_CHECK_MODE, func_group,
+ func_to_switch, bus_width);
+ if (retval == STATUS_SUCCESS) {
+ u8 stat;
+
+ retval = sd_check_switch_mode(chip, SD_SWITCH_MODE,
+ func_group, func_to_switch, bus_width);
+ if (retval == STATUS_SUCCESS) {
+ switch_good = 1;
+ break;
+ }
+
+ RTSX_READ_REG(chip, SD_STAT1, &stat);
+ if (stat & SD_CRC16_ERR) {
+ RTSX_DEBUGP("SD CRC16 error when switching mode\n");
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ func_to_switch = downgrade_switch_mode(func_group,
+ func_to_switch);
+
+ wait_timeout(20);
+ }
+
+ if (!switch_good)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i;
+ u8 func_to_switch = 0;
+
+ /* Get supported functions */
+ retval = sd_check_switch_mode(chip, SD_CHECK_MODE,
+ NO_ARGUMENT, NO_ARGUMENT, bus_width);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ sd_card->func_group1_mask &= ~(sd_card->sd_switch_fail);
+
+ /* Function Group 1: Access Mode */
+ for (i = 0; i < 4; i++) {
+ switch ((u8)(chip->sd_speed_prior >> (i*8))) {
+ case SDR104_SUPPORT:
+ if ((sd_card->func_group1_mask & SDR104_SUPPORT_MASK)
+ && chip->sdr104_en) {
+ func_to_switch = SDR104_SUPPORT;
+ }
+ break;
+
+ case DDR50_SUPPORT:
+ if ((sd_card->func_group1_mask & DDR50_SUPPORT_MASK)
+ && chip->ddr50_en) {
+ func_to_switch = DDR50_SUPPORT;
+ }
+ break;
+
+ case SDR50_SUPPORT:
+ if ((sd_card->func_group1_mask & SDR50_SUPPORT_MASK)
+ && chip->sdr50_en) {
+ func_to_switch = SDR50_SUPPORT;
+ }
+ break;
+
+ case HS_SUPPORT:
+ if (sd_card->func_group1_mask & HS_SUPPORT_MASK)
+ func_to_switch = HS_SUPPORT;
+
+ break;
+
+ default:
+ continue;
+ }
+
+
+ if (func_to_switch)
+ break;
+
+ }
+ RTSX_DEBUGP("SD_FUNC_GROUP_1: func_to_switch = 0x%02x", func_to_switch);
+
+#ifdef SUPPORT_SD_LOCK
+ if ((sd_card->sd_lock_status & SD_SDR_RST)
+ && (DDR50_SUPPORT == func_to_switch)
+ && (sd_card->func_group1_mask & SDR50_SUPPORT_MASK)) {
+ func_to_switch = SDR50_SUPPORT;
+ RTSX_DEBUGP("Using SDR50 instead of DDR50 for SD Lock\n");
+ }
+#endif
+
+ if (func_to_switch) {
+ retval = sd_check_switch(chip, SD_FUNC_GROUP_1, func_to_switch,
+ bus_width);
+ if (retval != STATUS_SUCCESS) {
+ if (func_to_switch == SDR104_SUPPORT) {
+ sd_card->sd_switch_fail = SDR104_SUPPORT_MASK;
+ } else if (func_to_switch == DDR50_SUPPORT) {
+ sd_card->sd_switch_fail = SDR104_SUPPORT_MASK |
+ DDR50_SUPPORT_MASK;
+ } else if (func_to_switch == SDR50_SUPPORT) {
+ sd_card->sd_switch_fail = SDR104_SUPPORT_MASK |
+ DDR50_SUPPORT_MASK | SDR50_SUPPORT_MASK;
+ }
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (func_to_switch == SDR104_SUPPORT)
+ SET_SD_SDR104(sd_card);
+ else if (func_to_switch == DDR50_SUPPORT)
+ SET_SD_DDR50(sd_card);
+ else if (func_to_switch == SDR50_SUPPORT)
+ SET_SD_SDR50(sd_card);
+ else
+ SET_SD_HS(sd_card);
+ }
+
+ if (CHK_SD_DDR50(sd_card)) {
+ RTSX_WRITE_REG(chip, SD_PUSH_POINT_CTL, 0x06, 0x04);
+ retval = sd_set_sample_push_timing(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (!func_to_switch || (func_to_switch == HS_SUPPORT)) {
+ /* Do not try to switch current limit if the card doesn't
+ * support UHS mode or we don't want it to support UHS mode
+ */
+ return STATUS_SUCCESS;
+ }
+
+ /* Function Group 4: Current Limit */
+ func_to_switch = 0xFF;
+
+ for (i = 0; i < 4; i++) {
+ switch ((u8)(chip->sd_current_prior >> (i*8))) {
+ case CURRENT_LIMIT_800:
+ if (sd_card->func_group4_mask & CURRENT_LIMIT_800_MASK)
+ func_to_switch = CURRENT_LIMIT_800;
+
+ break;
+
+ case CURRENT_LIMIT_600:
+ if (sd_card->func_group4_mask & CURRENT_LIMIT_600_MASK)
+ func_to_switch = CURRENT_LIMIT_600;
+
+ break;
+
+ case CURRENT_LIMIT_400:
+ if (sd_card->func_group4_mask & CURRENT_LIMIT_400_MASK)
+ func_to_switch = CURRENT_LIMIT_400;
+
+ break;
+
+ case CURRENT_LIMIT_200:
+ if (sd_card->func_group4_mask & CURRENT_LIMIT_200_MASK)
+ func_to_switch = CURRENT_LIMIT_200;
+
+ break;
+
+ default:
+ continue;
+ }
+
+ if (func_to_switch != 0xFF)
+ break;
+ }
+
+ RTSX_DEBUGP("SD_FUNC_GROUP_4: func_to_switch = 0x%02x", func_to_switch);
+
+ if (func_to_switch <= CURRENT_LIMIT_800) {
+ retval = sd_check_switch(chip, SD_FUNC_GROUP_4, func_to_switch,
+ bus_width);
+ if (retval != STATUS_SUCCESS) {
+ if (sd_check_err_code(chip, SD_NO_CARD))
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ RTSX_DEBUGP("Switch current limit finished! (%d)\n", retval);
+ }
+
+ if (CHK_SD_DDR50(sd_card))
+ RTSX_WRITE_REG(chip, SD_PUSH_POINT_CTL, 0x06, 0);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_wait_data_idle(struct rtsx_chip *chip)
+{
+ int retval = STATUS_TIMEDOUT;
+ int i;
+ u8 val = 0;
+
+ for (i = 0; i < 100; i++) {
+ RTSX_READ_REG(chip, SD_DATA_STATE, &val);
+ if (val & SD_DATA_IDLE) {
+ retval = STATUS_SUCCESS;
+ break;
+ }
+ udelay(100);
+ }
+ RTSX_DEBUGP("SD_DATA_STATE: 0x%02x\n", val);
+
+ return retval;
+}
+
+static int sd_sdr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
+{
+ int retval;
+ u8 cmd[5];
+
+ retval = sd_change_phase(chip, sample_point, TUNE_RX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ cmd[0] = 0x40 | SEND_TUNING_PATTERN;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ retval = sd_read_data(chip, SD_TM_AUTO_TUNING,
+ cmd, 5, 0x40, 1, SD_BUS_WIDTH_4, NULL, 0, 100);
+ if (retval != STATUS_SUCCESS) {
+ (void)sd_wait_data_idle(chip);
+
+ rtsx_clear_sd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd[5];
+
+ retval = sd_change_phase(chip, sample_point, TUNE_RX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_DEBUGP("sd ddr tuning rx\n");
+
+ retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ cmd[0] = 0x40 | SD_STATUS;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ,
+ cmd, 5, 64, 1, SD_BUS_WIDTH_4, NULL, 0, 100);
+ if (retval != STATUS_SUCCESS) {
+ (void)sd_wait_data_idle(chip);
+
+ rtsx_clear_sd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd[5], bus_width;
+
+ if (CHK_MMC_8BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_8;
+ else if (CHK_MMC_4BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_4;
+ else
+ bus_width = SD_BUS_WIDTH_1;
+
+ retval = sd_change_phase(chip, sample_point, TUNE_RX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_DEBUGP("mmc ddr tuning rx\n");
+
+ cmd[0] = 0x40 | SEND_EXT_CSD;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ,
+ cmd, 5, 0x200, 1, bus_width, NULL, 0, 100);
+ if (retval != STATUS_SUCCESS) {
+ (void)sd_wait_data_idle(chip);
+
+ rtsx_clear_sd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ retval = sd_change_phase(chip, sample_point, TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
+ SD_RSP_80CLK_TIMEOUT_EN);
+
+ retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS) {
+ if (sd_check_err_code(chip, SD_RSP_TIMEOUT)) {
+ rtsx_write_register(chip, SD_CFG3,
+ SD_RSP_80CLK_TIMEOUT_EN, 0);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ RTSX_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd[5], bus_width;
+
+ retval = sd_change_phase(chip, sample_point, TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_SD(sd_card)) {
+ bus_width = SD_BUS_WIDTH_4;
+ } else {
+ if (CHK_MMC_8BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_8;
+ else if (CHK_MMC_4BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_4;
+ else
+ bus_width = SD_BUS_WIDTH_1;
+ }
+
+ retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
+ SD_RSP_80CLK_TIMEOUT_EN);
+
+ cmd[0] = 0x40 | PROGRAM_CSD;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ retval = sd_write_data(chip, SD_TM_AUTO_WRITE_2,
+ cmd, 5, 16, 1, bus_width, sd_card->raw_csd, 16, 100);
+ if (retval != STATUS_SUCCESS) {
+ rtsx_clear_sd_error(chip);
+ rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
+
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1,
+ NULL, 0);
+
+ return STATUS_SUCCESS;
+}
+
+static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
+ u8 tune_dir)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ struct timing_phase_path path[MAX_PHASE + 1];
+ int i, j, cont_path_cnt;
+ int new_block, max_len, final_path_idx;
+ u8 final_phase = 0xFF;
+
+ if (phase_map == 0xFFFFFFFF) {
+ if (tune_dir == TUNE_RX)
+ final_phase = (u8)chip->sd_default_rx_phase;
+ else
+ final_phase = (u8)chip->sd_default_tx_phase;
+
+ goto Search_Finish;
+ }
+
+ cont_path_cnt = 0;
+ new_block = 1;
+ j = 0;
+ for (i = 0; i < MAX_PHASE + 1; i++) {
+ if (phase_map & (1 << i)) {
+ if (new_block) {
+ new_block = 0;
+ j = cont_path_cnt++;
+ path[j].start = i;
+ path[j].end = i;
+ } else {
+ path[j].end = i;
+ }
+ } else {
+ new_block = 1;
+ if (cont_path_cnt) {
+ int idx = cont_path_cnt - 1;
+ path[idx].len = path[idx].end -
+ path[idx].start + 1;
+ path[idx].mid = path[idx].start +
+ path[idx].len / 2;
+ }
+ }
+ }
+
+ if (cont_path_cnt == 0) {
+ RTSX_DEBUGP("No continuous phase path\n");
+ goto Search_Finish;
+ } else {
+ int idx = cont_path_cnt - 1;
+ path[idx].len = path[idx].end - path[idx].start + 1;
+ path[idx].mid = path[idx].start + path[idx].len / 2;
+ }
+
+ if ((path[0].start == 0) &&
+ (path[cont_path_cnt - 1].end == MAX_PHASE)) {
+ path[0].start = path[cont_path_cnt - 1].start - MAX_PHASE - 1;
+ path[0].len += path[cont_path_cnt - 1].len;
+ path[0].mid = path[0].start + path[0].len / 2;
+ if (path[0].mid < 0)
+ path[0].mid += MAX_PHASE + 1;
+
+ cont_path_cnt--;
+ }
+
+ max_len = 0;
+ final_phase = 0;
+ final_path_idx = 0;
+ for (i = 0; i < cont_path_cnt; i++) {
+ if (path[i].len > max_len) {
+ max_len = path[i].len;
+ final_phase = (u8)path[i].mid;
+ final_path_idx = i;
+ }
+
+ RTSX_DEBUGP("path[%d].start = %d\n", i, path[i].start);
+ RTSX_DEBUGP("path[%d].end = %d\n", i, path[i].end);
+ RTSX_DEBUGP("path[%d].len = %d\n", i, path[i].len);
+ RTSX_DEBUGP("path[%d].mid = %d\n", i, path[i].mid);
+ RTSX_DEBUGP("\n");
+ }
+
+ if (tune_dir == TUNE_TX) {
+ if (CHK_SD_SDR104(sd_card)) {
+ if (max_len > 15) {
+ int temp_mid = (max_len - 16) / 2;
+ int temp_final_phase =
+ path[final_path_idx].end -
+ (max_len - (6 + temp_mid));
+
+ if (temp_final_phase < 0)
+ final_phase = (u8)(temp_final_phase +
+ MAX_PHASE + 1);
+ else
+ final_phase = (u8)temp_final_phase;
+ }
+ } else if (CHK_SD_SDR50(sd_card)) {
+ if (max_len > 12) {
+ int temp_mid = (max_len - 13) / 2;
+ int temp_final_phase =
+ path[final_path_idx].end -
+ (max_len - (3 + temp_mid));
+
+ if (temp_final_phase < 0)
+ final_phase = (u8)(temp_final_phase +
+ MAX_PHASE + 1);
+ else
+ final_phase = (u8)temp_final_phase;
+ }
+ }
+ }
+
+Search_Finish:
+ RTSX_DEBUGP("Final chosen phase: %d\n", final_phase);
+ return final_phase;
+}
+
+static int sd_tuning_rx(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i, j;
+ u32 raw_phase_map[3], phase_map;
+ u8 final_phase;
+ int (*tuning_cmd)(struct rtsx_chip *chip, u8 sample_point);
+
+ if (CHK_SD(sd_card)) {
+ if (CHK_SD_DDR50(sd_card))
+ tuning_cmd = sd_ddr_tuning_rx_cmd;
+ else
+ tuning_cmd = sd_sdr_tuning_rx_cmd;
+
+ } else {
+ if (CHK_MMC_DDR52(sd_card))
+ tuning_cmd = mmc_ddr_tunning_rx_cmd;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ for (i = 0; i < 3; i++) {
+ raw_phase_map[i] = 0;
+ for (j = MAX_PHASE; j >= 0; j--) {
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = tuning_cmd(chip, (u8)j);
+ if (retval == STATUS_SUCCESS)
+ raw_phase_map[i] |= 1 << j;
+ }
+ }
+
+ phase_map = raw_phase_map[0] & raw_phase_map[1] & raw_phase_map[2];
+ for (i = 0; i < 3; i++)
+ RTSX_DEBUGP("RX raw_phase_map[%d] = 0x%08x\n", i,
+ raw_phase_map[i]);
+
+ RTSX_DEBUGP("RX phase_map = 0x%08x\n", phase_map);
+
+ final_phase = sd_search_final_phase(chip, phase_map, TUNE_RX);
+ if (final_phase == 0xFF)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_change_phase(chip, final_phase, TUNE_RX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i;
+ u32 phase_map;
+ u8 final_phase;
+
+ RTSX_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
+ SD_RSP_80CLK_TIMEOUT_EN);
+
+ phase_map = 0;
+ for (i = MAX_PHASE; i >= 0; i--) {
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_NO_CARD);
+ rtsx_write_register(chip, SD_CFG3,
+ SD_RSP_80CLK_TIMEOUT_EN, 0);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_change_phase(chip, (u8)i, TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ continue;
+
+ retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
+ sd_card->sd_addr, SD_RSP_TYPE_R1, NULL,
+ 0);
+ if ((retval == STATUS_SUCCESS) ||
+ !sd_check_err_code(chip, SD_RSP_TIMEOUT))
+ phase_map |= 1 << i;
+ }
+
+ RTSX_WRITE_REG(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
+
+ RTSX_DEBUGP("DDR TX pre tune phase_map = 0x%08x\n", phase_map);
+
+ final_phase = sd_search_final_phase(chip, phase_map, TUNE_TX);
+ if (final_phase == 0xFF)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_change_phase(chip, final_phase, TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_DEBUGP("DDR TX pre tune phase: %d\n", (int)final_phase);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_tuning_tx(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int i, j;
+ u32 raw_phase_map[3], phase_map;
+ u8 final_phase;
+ int (*tuning_cmd)(struct rtsx_chip *chip, u8 sample_point);
+
+ if (CHK_SD(sd_card)) {
+ if (CHK_SD_DDR50(sd_card))
+ tuning_cmd = sd_ddr_tuning_tx_cmd;
+ else
+ tuning_cmd = sd_sdr_tuning_tx_cmd;
+
+ } else {
+ if (CHK_MMC_DDR52(sd_card))
+ tuning_cmd = sd_ddr_tuning_tx_cmd;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ for (i = 0; i < 3; i++) {
+ raw_phase_map[i] = 0;
+ for (j = MAX_PHASE; j >= 0; j--) {
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_NO_CARD);
+ rtsx_write_register(chip, SD_CFG3,
+ SD_RSP_80CLK_TIMEOUT_EN, 0);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = tuning_cmd(chip, (u8)j);
+ if (retval == STATUS_SUCCESS)
+ raw_phase_map[i] |= 1 << j;
+ }
+ }
+
+ phase_map = raw_phase_map[0] & raw_phase_map[1] & raw_phase_map[2];
+ for (i = 0; i < 3; i++)
+ RTSX_DEBUGP("TX raw_phase_map[%d] = 0x%08x\n",
+ i, raw_phase_map[i]);
+
+ RTSX_DEBUGP("TX phase_map = 0x%08x\n", phase_map);
+
+ final_phase = sd_search_final_phase(chip, phase_map, TUNE_TX);
+ if (final_phase == 0xFF)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_change_phase(chip, final_phase, TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_sdr_tuning(struct rtsx_chip *chip)
+{
+ int retval;
+
+ retval = sd_tuning_tx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_tuning_rx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_ddr_tuning(struct rtsx_chip *chip)
+{
+ int retval;
+
+ if (!(chip->sd_ctl & SD_DDR_TX_PHASE_SET_BY_USER)) {
+ retval = sd_ddr_pre_tuning_tx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ retval = sd_change_phase(chip, (u8)chip->sd_ddr_tx_phase,
+ TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_tuning_rx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!(chip->sd_ctl & SD_DDR_TX_PHASE_SET_BY_USER)) {
+ retval = sd_tuning_tx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int mmc_ddr_tuning(struct rtsx_chip *chip)
+{
+ int retval;
+
+ if (!(chip->sd_ctl & MMC_DDR_TX_PHASE_SET_BY_USER)) {
+ retval = sd_ddr_pre_tuning_tx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ retval = sd_change_phase(chip, (u8)chip->mmc_ddr_tx_phase,
+ TUNE_TX);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_tuning_rx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!(chip->sd_ctl & MMC_DDR_TX_PHASE_SET_BY_USER)) {
+ retval = sd_tuning_tx(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int sd_switch_clock(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ int re_tuning = 0;
+
+ retval = select_card(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = switch_clock(chip, sd_card->sd_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (re_tuning) {
+ if (CHK_SD(sd_card)) {
+ if (CHK_SD_DDR50(sd_card))
+ retval = sd_ddr_tuning(chip);
+ else
+ retval = sd_sdr_tuning(chip);
+ } else {
+ if (CHK_MMC_DDR52(sd_card))
+ retval = mmc_ddr_tuning(chip);
+ }
+
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_prepare_reset(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ if (chip->asic_code)
+ sd_card->sd_clock = 29;
+ else
+ sd_card->sd_clock = CLK_30;
+
+ sd_card->sd_type = 0;
+ sd_card->seq_mode = 0;
+ sd_card->sd_data_buf_ready = 0;
+ sd_card->capacity = 0;
+
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status = 0;
+ sd_card->sd_erase_status = 0;
+#endif
+
+ chip->capacity[chip->card2lun[SD_CARD]] = 0;
+ chip->sd_io = 0;
+
+ retval = sd_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ RTSX_WRITE_REG(chip, REG_SD_CFG1, 0xFF, 0x40);
+
+ RTSX_WRITE_REG(chip, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ retval = select_card(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_pull_ctl_disable(struct rtsx_chip *chip)
+{
+ if (CHECK_PID(chip, 0x5208)) {
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF,
+ XD_D3_PD | SD_D7_PD | SD_CLK_PD | SD_D5_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF,
+ SD_D6_PD | SD_D0_PD | SD_D1_PD | XD_D5_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF,
+ SD_D4_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF,
+ XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL5, 0xFF,
+ MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL6, 0xFF, MS_D5_PD | MS_D4_PD);
+ } else if (CHECK_PID(chip, 0x5288)) {
+ if (CHECK_BARO_PKG(chip, QFN)) {
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF, 0x55);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF, 0x55);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF, 0x4B);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF, 0x69);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int sd_pull_ctl_enable(struct rtsx_chip *chip)
+{
+ int retval;
+
+ rtsx_init_cmd(chip);
+
+ if (CHECK_PID(chip, 0x5208)) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
+ XD_D3_PD | SD_DAT7_PU | SD_CLK_NP | SD_D5_PU);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
+ SD_D6_PU | SD_D0_PU | SD_D1_PU | XD_D5_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
+ SD_D4_PU | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
+ XD_RDY_PD | SD_D3_PU | SD_D2_PU | XD_ALE_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
+ MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
+ MS_D5_PD | MS_D4_PD);
+ } else if (CHECK_PID(chip, 0x5288)) {
+ if (CHECK_BARO_PKG(chip, QFN)) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
+ 0xA8);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
+ 0x5A);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
+ 0x95);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
+ 0xAA);
+ }
+ }
+
+ retval = rtsx_send_cmd(chip, SD_CARD, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_init_power(struct rtsx_chip *chip)
+{
+ int retval;
+
+ retval = sd_power_off_card3v3(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!chip->ft2_fast_mode)
+ wait_timeout(250);
+
+ retval = enable_card_clock(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (chip->asic_code) {
+ retval = sd_pull_ctl_enable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ RTSX_WRITE_REG(chip, FPGA_PULL_CTL, FPGA_SD_PULL_CTL_BIT | 0x20,
+ 0);
+ }
+
+ if (!chip->ft2_fast_mode) {
+ retval = card_power_on(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ wait_timeout(260);
+
+#ifdef SUPPORT_OCP
+ if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
+ RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n",
+ chip->ocp_stat);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ }
+
+ RTSX_WRITE_REG(chip, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_dummy_clock(struct rtsx_chip *chip)
+{
+ RTSX_WRITE_REG(chip, REG_SD_CFG3, 0x01, 0x01);
+ wait_timeout(5);
+ RTSX_WRITE_REG(chip, REG_SD_CFG3, 0x01, 0);
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_read_lba0(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 cmd[5], bus_width;
+
+ cmd[0] = 0x40 | READ_SINGLE_BLOCK;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ if (CHK_SD(sd_card)) {
+ bus_width = SD_BUS_WIDTH_4;
+ } else {
+ if (CHK_MMC_8BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_8;
+ else if (CHK_MMC_4BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_4;
+ else
+ bus_width = SD_BUS_WIDTH_1;
+ }
+
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd,
+ 5, 512, 1, bus_width, NULL, 0, 100);
+ if (retval != STATUS_SUCCESS) {
+ rtsx_clear_sd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sd_check_wp_state(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u32 val;
+ u16 sd_card_type;
+ u8 cmd[5], buf[64];
+
+ retval = sd_send_cmd_get_rsp(chip, APP_CMD,
+ sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ cmd[0] = 0x40 | SD_STATUS;
+ cmd[1] = 0;
+ cmd[2] = 0;
+ cmd[3] = 0;
+ cmd[4] = 0;
+
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1,
+ SD_BUS_WIDTH_4, buf, 64, 250);
+ if (retval != STATUS_SUCCESS) {
+ rtsx_clear_sd_error(chip);
+
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_DEBUGP("ACMD13:\n");
+ RTSX_DUMP(buf, 64);
+
+ sd_card_type = ((u16)buf[2] << 8) | buf[3];
+ RTSX_DEBUGP("sd_card_type = 0x%04x\n", sd_card_type);
+ if ((sd_card_type == 0x0001) || (sd_card_type == 0x0002)) {
+ /* ROM card or OTP */
+ chip->card_wp |= SD_CARD;
+ }
+
+ /* Check SD Machanical Write-Protect Switch */
+ val = rtsx_readl(chip, RTSX_BIPR);
+ if (val & SD_WRITE_PROTECT)
+ chip->card_wp |= SD_CARD;
+
+ return STATUS_SUCCESS;
+}
+
+static int reset_sd(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval, i = 0, j = 0, k = 0, hi_cap_flow = 0;
+ int sd_dont_switch = 0;
+ int support_1v8 = 0;
+ int try_sdio = 1;
+ u8 rsp[16];
+ u8 switch_bus_width;
+ u32 voltage = 0;
+ int sd20_mode = 0;
+
+ SET_SD(sd_card);
+
+Switch_Fail:
+
+ i = 0;
+ j = 0;
+ k = 0;
+ hi_cap_flow = 0;
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON)
+ goto SD_UNLOCK_ENTRY;
+#endif
+
+ retval = sd_prepare_reset(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_dummy_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip) && try_sdio) {
+ int rty_cnt = 0;
+
+ for (; rty_cnt < chip->sdio_retry_cnt; rty_cnt++) {
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, IO_SEND_OP_COND, 0,
+ SD_RSP_TYPE_R4, rsp, 5);
+ if (retval == STATUS_SUCCESS) {
+ int func_num = (rsp[1] >> 4) & 0x07;
+ if (func_num) {
+ RTSX_DEBUGP("SD_IO card (Function number: %d)!\n", func_num);
+ chip->sd_io = 1;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ break;
+ }
+
+ sd_init_power(chip);
+
+ sd_dummy_clock(chip);
+ }
+
+ RTSX_DEBUGP("Normal card!\n");
+ }
+
+ /* Start Initialization Process of SD Card */
+RTY_SD_RST:
+ retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ wait_timeout(20);
+
+ retval = sd_send_cmd_get_rsp(chip, SEND_IF_COND, 0x000001AA,
+ SD_RSP_TYPE_R7, rsp, 5);
+ if (retval == STATUS_SUCCESS) {
+ if ((rsp[4] == 0xAA) && ((rsp[3] & 0x0f) == 0x01)) {
+ hi_cap_flow = 1;
+ voltage = SUPPORT_VOLTAGE | 0x40000000;
+ }
+ }
+
+ if (!hi_cap_flow) {
+ voltage = SUPPORT_VOLTAGE;
+
+ retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0,
+ SD_RSP_TYPE_R0, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ wait_timeout(20);
+ }
+
+ do {
+ retval = sd_send_cmd_get_rsp(chip, APP_CMD, 0, SD_RSP_TYPE_R1,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS) {
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ j++;
+ if (j < 3)
+ goto RTY_SD_RST;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, SD_APP_OP_COND, voltage,
+ SD_RSP_TYPE_R3, rsp, 5);
+ if (retval != STATUS_SUCCESS) {
+ k++;
+ if (k < 3)
+ goto RTY_SD_RST;
+ else
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ i++;
+ wait_timeout(20);
+ } while (!(rsp[1] & 0x80) && (i < 255));
+
+ if (i == 255)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (hi_cap_flow) {
+ if (rsp[1] & 0x40)
+ SET_SD_HCXC(sd_card);
+ else
+ CLR_SD_HCXC(sd_card);
+
+ support_1v8 = 0;
+ } else {
+ CLR_SD_HCXC(sd_card);
+ support_1v8 = 0;
+ }
+ RTSX_DEBUGP("support_1v8 = %d\n", support_1v8);
+
+ if (support_1v8) {
+ retval = sd_voltage_switch(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ for (i = 0; i < 3; i++) {
+ retval = sd_send_cmd_get_rsp(chip, SEND_RELATIVE_ADDR, 0,
+ SD_RSP_TYPE_R6, rsp, 5);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ sd_card->sd_addr = (u32)rsp[1] << 24;
+ sd_card->sd_addr += (u32)rsp[2] << 16;
+
+ if (sd_card->sd_addr)
+ break;
+ }
+
+ retval = sd_check_csd(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_select_card(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+#ifdef SUPPORT_SD_LOCK
+SD_UNLOCK_ENTRY:
+ retval = sd_update_lock_status(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (sd_card->sd_lock_status & SD_LOCKED) {
+ sd_card->sd_lock_status |= (SD_LOCK_1BIT_MODE | SD_PWD_EXIST);
+ return STATUS_SUCCESS;
+ } else if (!(sd_card->sd_lock_status & SD_UNLOCK_POW_ON)) {
+ sd_card->sd_lock_status &= ~SD_PWD_EXIST;
+ }
+#endif
+
+ retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_send_cmd_get_rsp(chip, SET_CLR_CARD_DETECT, 0,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (support_1v8) {
+ retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ switch_bus_width = SD_BUS_WIDTH_4;
+ } else {
+ switch_bus_width = SD_BUS_WIDTH_1;
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!(sd_card->raw_csd[4] & 0x40))
+ sd_dont_switch = 1;
+
+ if (!sd_dont_switch) {
+ if (sd20_mode) {
+ /* Set sd_switch_fail here, because we needn't
+ * switch to UHS mode
+ */
+ sd_card->sd_switch_fail = SDR104_SUPPORT_MASK |
+ DDR50_SUPPORT_MASK | SDR50_SUPPORT_MASK;
+ }
+
+ /* Check the card whether follow SD1.1 spec or higher */
+ retval = sd_check_spec(chip, switch_bus_width);
+ if (retval == STATUS_SUCCESS) {
+ retval = sd_switch_function(chip, switch_bus_width);
+ if (retval != STATUS_SUCCESS) {
+ sd_init_power(chip);
+ sd_dont_switch = 1;
+ try_sdio = 0;
+
+ goto Switch_Fail;
+ }
+ } else {
+ if (support_1v8) {
+ sd_init_power(chip);
+ sd_dont_switch = 1;
+ try_sdio = 0;
+
+ goto Switch_Fail;
+ }
+ }
+ }
+
+ if (!support_1v8) {
+ retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2,
+ SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status &= ~SD_LOCK_1BIT_MODE;
+#endif
+
+ if (!sd20_mode && CHK_SD30_SPEED(sd_card)) {
+ int read_lba0 = 1;
+
+ RTSX_WRITE_REG(chip, SD30_DRIVE_SEL, 0x07,
+ chip->sd30_drive_sel_1v8);
+
+ retval = sd_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (CHK_SD_DDR50(sd_card))
+ retval = sd_ddr_tuning(chip);
+ else
+ retval = sd_sdr_tuning(chip);
+
+ if (retval != STATUS_SUCCESS) {
+ if (sd20_mode) {
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ try_sdio = 0;
+ sd20_mode = 1;
+ goto Switch_Fail;
+ }
+ }
+
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+
+ if (CHK_SD_DDR50(sd_card)) {
+ retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
+ if (retval != STATUS_SUCCESS)
+ read_lba0 = 0;
+ }
+
+ if (read_lba0) {
+ retval = sd_read_lba0(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (sd20_mode) {
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ try_sdio = 0;
+ sd20_mode = 1;
+ goto Switch_Fail;
+ }
+ }
+ }
+ }
+
+ retval = sd_check_wp_state(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ chip->card_bus_width[chip->card2lun[SD_CARD]] = 4;
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON) {
+ RTSX_WRITE_REG(chip, REG_SD_BLOCK_CNT_H, 0xFF, 0x02);
+ RTSX_WRITE_REG(chip, REG_SD_BLOCK_CNT_L, 0xFF, 0x00);
+ }
+#endif
+
+ return STATUS_SUCCESS;
+}
+
+
+static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 buf[8] = {0}, bus_width, *ptr;
+ u16 byte_cnt;
+ int len;
+
+ retval = sd_send_cmd_get_rsp(chip, BUSTEST_W, 0, SD_RSP_TYPE_R1, NULL,
+ 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, SWITCH_FAIL);
+
+ if (width == MMC_8BIT_BUS) {
+ buf[0] = 0x55;
+ buf[1] = 0xAA;
+ len = 8;
+ byte_cnt = 8;
+ bus_width = SD_BUS_WIDTH_8;
+ } else {
+ buf[0] = 0x5A;
+ len = 4;
+ byte_cnt = 4;
+ bus_width = SD_BUS_WIDTH_4;
+ }
+
+ retval = rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0x02);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, SWITCH_ERR);
+
+ retval = sd_write_data(chip, SD_TM_AUTO_WRITE_3,
+ NULL, 0, byte_cnt, 1, bus_width, buf, len, 100);
+ if (retval != STATUS_SUCCESS) {
+ rtsx_clear_sd_error(chip);
+ rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0);
+ TRACE_RET(chip, SWITCH_ERR);
+ }
+
+ retval = rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, SWITCH_ERR);
+
+ RTSX_DEBUGP("SD/MMC CMD %d\n", BUSTEST_R);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 0x40 | BUSTEST_R);
+
+ if (width == MMC_8BIT_BUS)
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L,
+ 0xFF, 0x08);
+ else
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L,
+ 0xFF, 0x04);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 1);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_NO_CHECK_CRC16 | SD_NO_WAIT_BUSY_END|
+ SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ SD_TM_NORMAL_READ | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
+ SD_TRANSFER_END);
+
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2, 0, 0);
+ if (width == MMC_8BIT_BUS)
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 1, 0, 0);
+
+ retval = rtsx_send_cmd(chip, SD_CARD, 100);
+ if (retval < 0) {
+ rtsx_clear_sd_error(chip);
+ TRACE_RET(chip, SWITCH_ERR);
+ }
+
+ ptr = rtsx_get_cmd_data(chip) + 1;
+
+ if (width == MMC_8BIT_BUS) {
+ RTSX_DEBUGP("BUSTEST_R [8bits]: 0x%02x 0x%02x\n", ptr[0],
+ ptr[1]);
+ if ((ptr[0] == 0xAA) && (ptr[1] == 0x55)) {
+ u8 rsp[5];
+ u32 arg;
+
+ if (CHK_MMC_DDR52(sd_card))
+ arg = 0x03B70600;
+ else
+ arg = 0x03B70200;
+
+ retval = sd_send_cmd_get_rsp(chip, SWITCH, arg,
+ SD_RSP_TYPE_R1b, rsp, 5);
+ if ((retval == STATUS_SUCCESS) &&
+ !(rsp[4] & MMC_SWITCH_ERR))
+ return SWITCH_SUCCESS;
+ }
+ } else {
+ RTSX_DEBUGP("BUSTEST_R [4bits]: 0x%02x\n", ptr[0]);
+ if (ptr[0] == 0xA5) {
+ u8 rsp[5];
+ u32 arg;
+
+ if (CHK_MMC_DDR52(sd_card))
+ arg = 0x03B70500;
+ else
+ arg = 0x03B70100;
+
+ retval = sd_send_cmd_get_rsp(chip, SWITCH, arg,
+ SD_RSP_TYPE_R1b, rsp, 5);
+ if ((retval == STATUS_SUCCESS) &&
+ !(rsp[4] & MMC_SWITCH_ERR))
+ return SWITCH_SUCCESS;
+ }
+ }
+
+ TRACE_RET(chip, SWITCH_FAIL);
+}
+
+
+static int mmc_switch_timing_bus(struct rtsx_chip *chip, int switch_ddr)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+ u8 *ptr, card_type, card_type_mask = 0;
+
+ CLR_MMC_HS(sd_card);
+
+ RTSX_DEBUGP("SD/MMC CMD %d\n", SEND_EXT_CSD);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
+ 0x40 | SEND_EXT_CSD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF, 0);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 2);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 1);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END|
+ SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ SD_TM_NORMAL_READ | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
+ SD_TRANSFER_END);
+
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 196, 0xFF, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 212, 0xFF, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 213, 0xFF, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 214, 0xFF, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 215, 0xFF, 0);
+
+ retval = rtsx_send_cmd(chip, SD_CARD, 1000);
+ if (retval < 0) {
+ if (retval == -ETIMEDOUT) {
+ rtsx_clear_sd_error(chip);
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ }
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ ptr = rtsx_get_cmd_data(chip);
+ if (ptr[0] & SD_TRANSFER_ERR) {
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (CHK_MMC_SECTOR_MODE(sd_card)) {
+ sd_card->capacity = ((u32)ptr[5] << 24) | ((u32)ptr[4] << 16) |
+ ((u32)ptr[3] << 8) | ((u32)ptr[2]);
+ }
+
+ card_type_mask = 0x03;
+ card_type = ptr[1] & card_type_mask;
+ if (card_type) {
+ u8 rsp[5];
+
+ if (card_type & 0x04) {
+ if (switch_ddr)
+ SET_MMC_DDR52(sd_card);
+ else
+ SET_MMC_52M(sd_card);
+ } else if (card_type & 0x02) {
+ SET_MMC_52M(sd_card);
+ } else {
+ SET_MMC_26M(sd_card);
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, SWITCH,
+ 0x03B90100, SD_RSP_TYPE_R1b, rsp, 5);
+ if ((retval != STATUS_SUCCESS) || (rsp[4] & MMC_SWITCH_ERR))
+ CLR_MMC_HS(sd_card);
+ }
+
+ sd_choose_proper_clock(chip);
+ retval = switch_clock(chip, sd_card->sd_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ /* Test Bus Procedure */
+ retval = mmc_test_switch_bus(chip, MMC_8BIT_BUS);
+ if (retval == SWITCH_SUCCESS) {
+ SET_MMC_8BIT(sd_card);
+ chip->card_bus_width[chip->card2lun[SD_CARD]] = 8;
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status &= ~SD_LOCK_1BIT_MODE;
+#endif
+ } else if (retval == SWITCH_FAIL) {
+ retval = mmc_test_switch_bus(chip, MMC_4BIT_BUS);
+ if (retval == SWITCH_SUCCESS) {
+ SET_MMC_4BIT(sd_card);
+ chip->card_bus_width[chip->card2lun[SD_CARD]] = 4;
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status &= ~SD_LOCK_1BIT_MODE;
+#endif
+ } else if (retval == SWITCH_FAIL) {
+ CLR_MMC_8BIT(sd_card);
+ CLR_MMC_4BIT(sd_card);
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+static int reset_mmc(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval, i = 0, j = 0, k = 0;
+ int switch_ddr = 1;
+ u8 rsp[16];
+ u8 spec_ver = 0;
+ u32 temp;
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON)
+ goto MMC_UNLOCK_ENTRY;
+#endif
+
+Switch_Fail:
+ retval = sd_prepare_reset(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+
+ SET_MMC(sd_card);
+
+RTY_MMC_RST:
+ retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ do {
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, SEND_OP_COND,
+ (SUPPORT_VOLTAGE | 0x40000000),
+ SD_RSP_TYPE_R3, rsp, 5);
+ if (retval != STATUS_SUCCESS) {
+ if (sd_check_err_code(chip, SD_BUSY) ||
+ sd_check_err_code(chip, SD_TO_ERR)) {
+ k++;
+ if (k < 20) {
+ sd_clr_err_code(chip);
+ goto RTY_MMC_RST;
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ j++;
+ if (j < 100) {
+ sd_clr_err_code(chip);
+ goto RTY_MMC_RST;
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ wait_timeout(20);
+ i++;
+ } while (!(rsp[1] & 0x80) && (i < 255));
+
+ if (i == 255)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if ((rsp[1] & 0x60) == 0x40)
+ SET_MMC_SECTOR_MODE(sd_card);
+ else
+ CLR_MMC_SECTOR_MODE(sd_card);
+
+ retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ sd_card->sd_addr = 0x00100000;
+ retval = sd_send_cmd_get_rsp(chip, SET_RELATIVE_ADDR, sd_card->sd_addr,
+ SD_RSP_TYPE_R6, rsp, 5);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_check_csd(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ spec_ver = (sd_card->raw_csd[0] & 0x3C) >> 2;
+
+ retval = sd_select_card(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1,
+ NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+#ifdef SUPPORT_SD_LOCK
+MMC_UNLOCK_ENTRY:
+ retval = sd_update_lock_status(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+#endif
+
+ retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ chip->card_bus_width[chip->card2lun[SD_CARD]] = 1;
+
+ if (!sd_card->mmc_dont_switch_bus) {
+ if (spec_ver == 4) {
+ /* MMC 4.x Cards */
+ retval = mmc_switch_timing_bus(chip, switch_ddr);
+ if (retval != STATUS_SUCCESS) {
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ sd_card->mmc_dont_switch_bus = 1;
+ TRACE_GOTO(chip, Switch_Fail);
+ }
+ }
+
+ if (CHK_MMC_SECTOR_MODE(sd_card) && (sd_card->capacity == 0))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (switch_ddr && CHK_MMC_DDR52(sd_card)) {
+ retval = sd_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = mmc_ddr_tuning(chip);
+ if (retval != STATUS_SUCCESS) {
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ switch_ddr = 0;
+ TRACE_GOTO(chip, Switch_Fail);
+ }
+
+ retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
+ if (retval == STATUS_SUCCESS) {
+ retval = sd_read_lba0(chip);
+ if (retval != STATUS_SUCCESS) {
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ switch_ddr = 0;
+ TRACE_GOTO(chip, Switch_Fail);
+ }
+ }
+ }
+ }
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_lock_status & SD_UNLOCK_POW_ON) {
+ RTSX_WRITE_REG(chip, REG_SD_BLOCK_CNT_H, 0xFF, 0x02);
+ RTSX_WRITE_REG(chip, REG_SD_BLOCK_CNT_L, 0xFF, 0x00);
+ }
+#endif
+
+ temp = rtsx_readl(chip, RTSX_BIPR);
+ if (temp & SD_WRITE_PROTECT)
+ chip->card_wp |= SD_CARD;
+
+ return STATUS_SUCCESS;
+}
+
+int reset_sd_card(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ sd_init_reg_addr(chip);
+
+ memset(sd_card, 0, sizeof(struct sd_info));
+ chip->capacity[chip->card2lun[SD_CARD]] = 0;
+
+ retval = enable_card_clock(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (chip->ignore_sd && CHK_SDIO_EXIST(chip) &&
+ !CHK_SDIO_IGNORED(chip)) {
+ if (chip->asic_code) {
+ retval = sd_pull_ctl_enable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ retval = rtsx_write_register(chip, FPGA_PULL_CTL,
+ FPGA_SD_PULL_CTL_BIT | 0x20, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ retval = card_share_mode(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ chip->sd_io = 1;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (chip->sd_ctl & RESET_MMC_FIRST) {
+ retval = reset_mmc(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (sd_check_err_code(chip, SD_NO_CARD))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = reset_sd(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ retval = reset_sd(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (sd_check_err_code(chip, SD_NO_CARD))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (chip->sd_io) {
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ retval = reset_mmc(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, REG_SD_BYTE_CNT_L, 0xFF, 0);
+ RTSX_WRITE_REG(chip, REG_SD_BYTE_CNT_H, 0xFF, 2);
+
+ chip->capacity[chip->card2lun[SD_CARD]] = sd_card->capacity;
+
+ retval = sd_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_DEBUGP("sd_card->sd_type = 0x%x\n", sd_card->sd_type);
+
+ return STATUS_SUCCESS;
+}
+
+static int reset_mmc_only(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ sd_card->sd_type = 0;
+ sd_card->seq_mode = 0;
+ sd_card->sd_data_buf_ready = 0;
+ sd_card->capacity = 0;
+ sd_card->sd_switch_fail = 0;
+
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status = 0;
+ sd_card->sd_erase_status = 0;
+#endif
+
+ chip->capacity[chip->card2lun[SD_CARD]] = sd_card->capacity = 0;
+
+ retval = enable_card_clock(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = reset_mmc(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, REG_SD_BYTE_CNT_L, 0xFF, 0);
+ RTSX_WRITE_REG(chip, REG_SD_BYTE_CNT_H, 0xFF, 2);
+
+ chip->capacity[chip->card2lun[SD_CARD]] = sd_card->capacity;
+
+ retval = sd_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_DEBUGP("In reset_mmc_only, sd_card->sd_type = 0x%x\n",
+ sd_card->sd_type);
+
+ return STATUS_SUCCESS;
+}
+
+#define WAIT_DATA_READY_RTY_CNT 255
+
+static int wait_data_buf_ready(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int i, retval;
+
+ for (i = 0; i < WAIT_DATA_READY_RTY_CNT; i++) {
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ sd_card->sd_data_buf_ready = 0;
+
+ retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
+ sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (sd_card->sd_data_buf_ready) {
+ return sd_send_cmd_get_rsp(chip, SEND_STATUS,
+ sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0);
+ }
+ }
+
+ sd_set_err_code(chip, SD_TO_ERR);
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+void sd_stop_seq_mode(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ if (sd_card->seq_mode) {
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ return;
+
+ retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
+ SD_RSP_TYPE_R1b, NULL, 0);
+ if (retval != STATUS_SUCCESS)
+ sd_set_err_code(chip, SD_STS_ERR);
+
+ retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
+ if (retval != STATUS_SUCCESS)
+ sd_set_err_code(chip, SD_STS_ERR);
+
+ sd_card->seq_mode = 0;
+
+ rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
+ }
+}
+
+static inline int sd_auto_tune_clock(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ if (chip->asic_code) {
+ if (sd_card->sd_clock > 30)
+ sd_card->sd_clock -= 20;
+ } else {
+ switch (sd_card->sd_clock) {
+ case CLK_200:
+ sd_card->sd_clock = CLK_150;
+ break;
+
+ case CLK_150:
+ sd_card->sd_clock = CLK_120;
+ break;
+
+ case CLK_120:
+ sd_card->sd_clock = CLK_100;
+ break;
+
+ case CLK_100:
+ sd_card->sd_clock = CLK_80;
+ break;
+
+ case CLK_80:
+ sd_card->sd_clock = CLK_60;
+ break;
+
+ case CLK_60:
+ sd_card->sd_clock = CLK_50;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
+ u16 sector_cnt)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ u32 data_addr;
+ u8 cfg2;
+ int retval;
+
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ RTSX_DEBUGP("sd_rw: Read %d %s from 0x%x\n", sector_cnt,
+ (sector_cnt > 1) ? "sectors" : "sector", start_sector);
+ } else {
+ RTSX_DEBUGP("sd_rw: Write %d %s to 0x%x\n", sector_cnt,
+ (sector_cnt > 1) ? "sectors" : "sector", start_sector);
+ }
+
+ sd_card->cleanup_counter = 0;
+
+ if (!(chip->card_ready & SD_CARD)) {
+ sd_card->seq_mode = 0;
+
+ retval = reset_sd_card(chip);
+ if (retval == STATUS_SUCCESS) {
+ chip->card_ready |= SD_CARD;
+ chip->card_fail &= ~SD_CARD;
+ } else {
+ chip->card_ready &= ~SD_CARD;
+ chip->card_fail |= SD_CARD;
+ chip->capacity[chip->card2lun[SD_CARD]] = 0;
+ chip->rw_need_retry = 1;
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (!CHK_SD_HCXC(sd_card) && !CHK_MMC_SECTOR_MODE(sd_card))
+ data_addr = start_sector << 9;
+ else
+ data_addr = start_sector;
+
+ sd_clr_err_code(chip);
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_IO_ERR);
+ TRACE_GOTO(chip, RW_FAIL);
+ }
+
+ if (sd_card->seq_mode &&
+ ((sd_card->pre_dir != srb->sc_data_direction) ||
+ ((sd_card->pre_sec_addr + sd_card->pre_sec_cnt) !=
+ start_sector))) {
+ if ((sd_card->pre_sec_cnt < 0x80)
+ && (sd_card->pre_dir == DMA_FROM_DEVICE)
+ && !CHK_SD30_SPEED(sd_card)
+ && !CHK_SD_HS(sd_card)
+ && !CHK_MMC_HS(sd_card)) {
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION,
+ 0, SD_RSP_TYPE_R1b, NULL, 0);
+ if (retval != STATUS_SUCCESS) {
+ chip->rw_need_retry = 1;
+ sd_set_err_code(chip, SD_STS_ERR);
+ TRACE_GOTO(chip, RW_FAIL);
+ }
+
+ sd_card->seq_mode = 0;
+
+ retval = rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
+ if (retval != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_IO_ERR);
+ TRACE_GOTO(chip, RW_FAIL);
+ }
+
+ if ((sd_card->pre_sec_cnt < 0x80)
+ && !CHK_SD30_SPEED(sd_card)
+ && !CHK_SD_HS(sd_card)
+ && !CHK_MMC_HS(sd_card)) {
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
+ }
+ }
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 0x00);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 0x02);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
+ (u8)sector_cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
+ (u8)(sector_cnt >> 8));
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
+
+ if (CHK_MMC_8BIT(sd_card))
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
+ 0x03, SD_BUS_WIDTH_8);
+ else if (CHK_MMC_4BIT(sd_card) || CHK_SD(sd_card))
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
+ 0x03, SD_BUS_WIDTH_4);
+ else
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
+ 0x03, SD_BUS_WIDTH_1);
+
+ if (sd_card->seq_mode) {
+ cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16|
+ SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 |
+ SD_RSP_LEN_0;
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, cfg2);
+
+ trans_dma_enable(srb->sc_data_direction, chip, sector_cnt * 512,
+ DMA_512);
+
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_READ_3 | SD_TRANSFER_START);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
+ }
+
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+ } else {
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ RTSX_DEBUGP("SD/MMC CMD %d\n", READ_MULTIPLE_BLOCK);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
+ 0x40 | READ_MULTIPLE_BLOCK);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF,
+ (u8)(data_addr >> 24));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF,
+ (u8)(data_addr >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF,
+ (u8)(data_addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF,
+ (u8)data_addr);
+
+ cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 |
+ SD_RSP_LEN_6;
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
+ cfg2);
+
+ trans_dma_enable(srb->sc_data_direction, chip,
+ sector_cnt * 512, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_READ_2 | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+ } else {
+ retval = rtsx_send_cmd(chip, SD_CARD, 50);
+ if (retval < 0) {
+ rtsx_clear_sd_error(chip);
+
+ chip->rw_need_retry = 1;
+ sd_set_err_code(chip, SD_TO_ERR);
+ TRACE_GOTO(chip, RW_FAIL);
+ }
+
+ retval = wait_data_buf_ready(chip);
+ if (retval != STATUS_SUCCESS) {
+ chip->rw_need_retry = 1;
+ sd_set_err_code(chip, SD_TO_ERR);
+ TRACE_GOTO(chip, RW_FAIL);
+ }
+
+ retval = sd_send_cmd_get_rsp(chip, WRITE_MULTIPLE_BLOCK,
+ data_addr, SD_RSP_TYPE_R1, NULL, 0);
+ if (retval != STATUS_SUCCESS) {
+ chip->rw_need_retry = 1;
+ TRACE_GOTO(chip, RW_FAIL);
+ }
+
+ rtsx_init_cmd(chip);
+
+ cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END |
+ SD_NO_CHECK_CRC7 | SD_RSP_LEN_0;
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
+ cfg2);
+
+ trans_dma_enable(srb->sc_data_direction, chip,
+ sector_cnt * 512, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+ }
+
+ sd_card->seq_mode = 1;
+ }
+
+ retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
+ scsi_bufflen(srb), scsi_sg_count(srb),
+ srb->sc_data_direction, chip->sd_timeout);
+ if (retval < 0) {
+ u8 stat = 0;
+ int err;
+
+ sd_card->seq_mode = 0;
+
+ if (retval == -ETIMEDOUT)
+ err = STATUS_TIMEDOUT;
+ else
+ err = STATUS_FAIL;
+
+ rtsx_read_register(chip, REG_SD_STAT1, &stat);
+ rtsx_clear_sd_error(chip);
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ chip->rw_need_retry = 0;
+ RTSX_DEBUGP("No card exist, exit sd_rw\n");
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ chip->rw_need_retry = 1;
+
+ retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
+ SD_RSP_TYPE_R1b, NULL, 0);
+ if (retval != STATUS_SUCCESS) {
+ sd_set_err_code(chip, SD_STS_ERR);
+ TRACE_GOTO(chip, RW_FAIL);
+ }
+
+ if (stat & (SD_CRC7_ERR | SD_CRC16_ERR | SD_CRC_WRITE_ERR)) {
+ RTSX_DEBUGP("SD CRC error, tune clock!\n");
+ sd_set_err_code(chip, SD_CRC_ERR);
+ TRACE_GOTO(chip, RW_FAIL);
+ }
+
+ if (err == STATUS_TIMEDOUT) {
+ sd_set_err_code(chip, SD_TO_ERR);
+ TRACE_GOTO(chip, RW_FAIL);
+ }
+
+ TRACE_RET(chip, err);
+ }
+
+ sd_card->pre_sec_addr = start_sector;
+ sd_card->pre_sec_cnt = sector_cnt;
+ sd_card->pre_dir = srb->sc_data_direction;
+
+ return STATUS_SUCCESS;
+
+RW_FAIL:
+ sd_card->seq_mode = 0;
+
+ if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
+ chip->rw_need_retry = 0;
+ RTSX_DEBUGP("No card exist, exit sd_rw\n");
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (sd_check_err_code(chip, SD_CRC_ERR)) {
+ if (CHK_MMC_4BIT(sd_card) || CHK_MMC_8BIT(sd_card)) {
+ sd_card->mmc_dont_switch_bus = 1;
+ reset_mmc_only(chip);
+ sd_card->mmc_dont_switch_bus = 0;
+ } else {
+ sd_card->need_retune = 1;
+ sd_auto_tune_clock(chip);
+ }
+ } else if (sd_check_err_code(chip, SD_TO_ERR | SD_STS_ERR)) {
+ retval = reset_sd_card(chip);
+ if (retval != STATUS_SUCCESS) {
+ chip->card_ready &= ~SD_CARD;
+ chip->card_fail |= SD_CARD;
+ chip->capacity[chip->card2lun[SD_CARD]] = 0;
+ }
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+#ifdef SUPPORT_CPRM
+int soft_reset_sd_card(struct rtsx_chip *chip)
+{
+ return reset_sd(chip);
+}
+
+int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
+ u32 arg, u8 rsp_type, u8 *rsp, int rsp_len, int special_check)
+{
+ int retval;
+ int timeout = 100;
+ u16 reg_addr;
+ u8 *ptr;
+ int stat_idx = 0;
+ int rty_cnt = 0;
+
+ RTSX_DEBUGP("EXT SD/MMC CMD %d\n", cmd_idx);
+
+ if (rsp_type == SD_RSP_TYPE_R1b)
+ timeout = 3000;
+
+RTY_SEND_CMD:
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF, 0x40 | cmd_idx);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, (u8)(arg >> 24));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, (u8)(arg >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, (u8)(arg >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF, (u8)arg);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
+ 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
+ SD_TRANSFER_END);
+
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16;
+ reg_addr++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
+
+ stat_idx = 17;
+ } else if (rsp_type != SD_RSP_TYPE_R0) {
+ for (reg_addr = REG_SD_CMD0; reg_addr <= REG_SD_CMD4;
+ reg_addr++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
+
+ stat_idx = 6;
+ }
+ rtsx_add_cmd(chip, READ_REG_CMD, REG_SD_CMD5, 0, 0);
+
+ rtsx_add_cmd(chip, READ_REG_CMD, REG_SD_STAT1, 0, 0);
+
+ retval = rtsx_send_cmd(chip, SD_CARD, timeout);
+ if (retval < 0) {
+ if (retval == -ETIMEDOUT) {
+ rtsx_clear_sd_error(chip);
+
+ if (rsp_type & SD_WAIT_BUSY_END) {
+ retval = sd_check_data0_status(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, retval);
+ } else {
+ sd_set_err_code(chip, SD_TO_ERR);
+ }
+ }
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (rsp_type == SD_RSP_TYPE_R0)
+ return STATUS_SUCCESS;
+
+ ptr = rtsx_get_cmd_data(chip) + 1;
+
+ if ((ptr[0] & 0xC0) != 0) {
+ sd_set_err_code(chip, SD_STS_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (!(rsp_type & SD_NO_CHECK_CRC7)) {
+ if (ptr[stat_idx] & SD_CRC7_ERR) {
+ if (cmd_idx == WRITE_MULTIPLE_BLOCK) {
+ sd_set_err_code(chip, SD_CRC_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (rty_cnt < SD_MAX_RETRY_COUNT) {
+ wait_timeout(20);
+ rty_cnt++;
+ goto RTY_SEND_CMD;
+ } else {
+ sd_set_err_code(chip, SD_CRC_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ if ((cmd_idx == SELECT_CARD) || (cmd_idx == APP_CMD) ||
+ (cmd_idx == SEND_STATUS) || (cmd_idx == STOP_TRANSMISSION)) {
+ if ((cmd_idx != STOP_TRANSMISSION) && (special_check == 0)) {
+ if (ptr[1] & 0x80)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#ifdef SUPPORT_SD_LOCK
+ if (ptr[1] & 0x7D)
+#else
+ if (ptr[1] & 0x7F)
+#endif
+ {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ if (ptr[2] & 0xF8)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (cmd_idx == SELECT_CARD) {
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ if ((ptr[3] & 0x1E) != 0x04)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ } else if (rsp_type == SD_RSP_TYPE_R0) {
+ if ((ptr[3] & 0x1E) != 0x03)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ if (rsp && rsp_len)
+ memcpy(rsp, ptr, rsp_len);
+
+ return STATUS_SUCCESS;
+}
+
+int ext_sd_get_rsp(struct rtsx_chip *chip, int len, u8 *rsp, u8 rsp_type)
+{
+ int retval, rsp_len;
+ u16 reg_addr;
+
+ if (rsp_type == SD_RSP_TYPE_R0)
+ return STATUS_SUCCESS;
+
+ rtsx_init_cmd(chip);
+
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16;
+ reg_addr++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0xFF, 0);
+
+ rsp_len = 17;
+ } else if (rsp_type != SD_RSP_TYPE_R0) {
+ for (reg_addr = REG_SD_CMD0; reg_addr <= REG_SD_CMD4;
+ reg_addr++)
+ rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0xFF, 0);
+
+ rsp_len = 6;
+ }
+ rtsx_add_cmd(chip, READ_REG_CMD, REG_SD_CMD5, 0xFF, 0);
+
+ retval = rtsx_send_cmd(chip, SD_CARD, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (rsp) {
+ int min_len = (rsp_len < len) ? rsp_len : len;
+
+ memcpy(rsp, rtsx_get_cmd_data(chip), min_len);
+
+ RTSX_DEBUGP("min_len = %d\n", min_len);
+ RTSX_DEBUGP("Response in cmd buf: 0x%x 0x%x 0x%x 0x%x\n",
+ rsp[0], rsp[1], rsp[2], rsp[3]);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int len;
+ u8 buf[18] = {
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x0E,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x53,
+ 0x44,
+ 0x20,
+ 0x43,
+ 0x61,
+ 0x72,
+ 0x64,
+ 0x00,
+ 0x00,
+ 0x00,
+ };
+
+ sd_card->pre_cmd_err = 0;
+
+ if (!(CHK_BIT(chip->lun_mc, lun))) {
+ SET_BIT(chip->lun_mc, lun);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((0x53 != srb->cmnd[2]) || (0x44 != srb->cmnd[3]) ||
+ (0x20 != srb->cmnd[4]) || (0x43 != srb->cmnd[5]) ||
+ (0x61 != srb->cmnd[6]) || (0x72 != srb->cmnd[7]) ||
+ (0x64 != srb->cmnd[8])) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ switch (srb->cmnd[1] & 0x0F) {
+ case 0:
+ sd_card->sd_pass_thru_en = 0;
+ break;
+
+ case 1:
+ sd_card->sd_pass_thru_en = 1;
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ buf[5] = (1 == CHK_SD(sd_card)) ? 0x01 : 0x02;
+ if (chip->card_wp & SD_CARD)
+ buf[5] |= 0x80;
+
+ buf[6] = (u8)(sd_card->sd_addr >> 16);
+ buf[7] = (u8)(sd_card->sd_addr >> 24);
+
+ buf[15] = chip->max_lun;
+
+ len = min_t(int, 18, scsi_bufflen(srb));
+ rtsx_stor_set_xfer_buf(buf, len, srb);
+
+ return TRANSPORT_GOOD;
+}
+
+static inline int get_rsp_type(struct scsi_cmnd *srb, u8 *rsp_type,
+ int *rsp_len)
+{
+ if (!rsp_type || !rsp_len)
+ return STATUS_FAIL;
+
+ switch (srb->cmnd[10]) {
+ case 0x03:
+ *rsp_type = SD_RSP_TYPE_R0;
+ *rsp_len = 0;
+ break;
+
+ case 0x04:
+ *rsp_type = SD_RSP_TYPE_R1;
+ *rsp_len = 6;
+ break;
+
+ case 0x05:
+ *rsp_type = SD_RSP_TYPE_R1b;
+ *rsp_len = 6;
+ break;
+
+ case 0x06:
+ *rsp_type = SD_RSP_TYPE_R2;
+ *rsp_len = 17;
+ break;
+
+ case 0x07:
+ *rsp_type = SD_RSP_TYPE_R3;
+ *rsp_len = 6;
+ break;
+
+ default:
+ return STATUS_FAIL;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval, rsp_len;
+ u8 cmd_idx, rsp_type;
+ u8 standby = 0, acmd = 0;
+ u32 arg;
+
+ if (!sd_card->sd_pass_thru_en) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ if (sd_card->pre_cmd_err) {
+ sd_card->pre_cmd_err = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ cmd_idx = srb->cmnd[2] & 0x3F;
+ if (srb->cmnd[1] & 0x02)
+ standby = 1;
+
+ if (srb->cmnd[1] & 0x01)
+ acmd = 1;
+
+ arg = ((u32)srb->cmnd[3] << 24) | ((u32)srb->cmnd[4] << 16) |
+ ((u32)srb->cmnd[5] << 8) | srb->cmnd[6];
+
+ retval = get_rsp_type(srb, &rsp_type, &rsp_len);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ sd_card->last_rsp_type = rsp_type;
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+#ifdef SUPPORT_SD_LOCK
+ if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
+ if (CHK_MMC_8BIT(sd_card)) {
+ retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
+ SD_BUS_WIDTH_8);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ } else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) {
+ retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
+ SD_BUS_WIDTH_4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+#else
+ retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, SD_BUS_WIDTH_4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+#endif
+
+ if (standby) {
+ retval = sd_select_card(chip, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Cmd_Failed);
+ }
+
+ if (acmd) {
+ retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Cmd_Failed);
+ }
+
+ retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type,
+ sd_card->rsp, rsp_len, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Cmd_Failed);
+
+ if (standby) {
+ retval = sd_select_card(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Cmd_Failed);
+ }
+
+#ifdef SUPPORT_SD_LOCK
+ retval = sd_update_lock_status(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Cmd_Failed);
+#endif
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+
+SD_Execute_Cmd_Failed:
+ sd_card->pre_cmd_err = 1;
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ release_sd_card(chip);
+ do_reset_sd_card(chip);
+ if (!(chip->card_ready & SD_CARD))
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+
+ TRACE_RET(chip, TRANSPORT_FAILED);
+}
+
+int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval, rsp_len, i;
+ int cmd13_checkbit = 0, read_err = 0;
+ u8 cmd_idx, rsp_type, bus_width;
+ u8 send_cmd12 = 0, standby = 0, acmd = 0;
+ u32 data_len;
+
+ if (!sd_card->sd_pass_thru_en) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (sd_card->pre_cmd_err) {
+ sd_card->pre_cmd_err = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ cmd_idx = srb->cmnd[2] & 0x3F;
+ if (srb->cmnd[1] & 0x04)
+ send_cmd12 = 1;
+
+ if (srb->cmnd[1] & 0x02)
+ standby = 1;
+
+ if (srb->cmnd[1] & 0x01)
+ acmd = 1;
+
+ data_len = ((u32)srb->cmnd[7] << 16) | ((u32)srb->cmnd[8]
+ << 8) | srb->cmnd[9];
+
+ retval = get_rsp_type(srb, &rsp_type, &rsp_len);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ sd_card->last_rsp_type = rsp_type;
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+#ifdef SUPPORT_SD_LOCK
+ if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
+ if (CHK_MMC_8BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_8;
+ else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card))
+ bus_width = SD_BUS_WIDTH_4;
+ else
+ bus_width = SD_BUS_WIDTH_1;
+ } else {
+ bus_width = SD_BUS_WIDTH_4;
+ }
+ RTSX_DEBUGP("bus_width = %d\n", bus_width);
+#else
+ bus_width = SD_BUS_WIDTH_4;
+#endif
+
+ if (data_len < 512) {
+ retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ if (standby) {
+ retval = sd_select_card(chip, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ if (acmd) {
+ retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ if (data_len <= 512) {
+ int min_len;
+ u8 *buf;
+ u16 byte_cnt, blk_cnt;
+ u8 cmd[5];
+
+ byte_cnt = ((u16)(srb->cmnd[8] & 0x03) << 8) | srb->cmnd[9];
+ blk_cnt = 1;
+
+ cmd[0] = 0x40 | cmd_idx;
+ cmd[1] = srb->cmnd[3];
+ cmd[2] = srb->cmnd[4];
+ cmd[3] = srb->cmnd[5];
+ cmd[4] = srb->cmnd[6];
+
+ buf = kmalloc(data_len, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, byte_cnt,
+ blk_cnt, bus_width, buf, data_len, 2000);
+ if (retval != STATUS_SUCCESS) {
+ read_err = 1;
+ kfree(buf);
+ rtsx_clear_sd_error(chip);
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ min_len = min(data_len, scsi_bufflen(srb));
+ rtsx_stor_set_xfer_buf(buf, min_len, srb);
+
+ kfree(buf);
+ } else if (!(data_len & 0x1FF)) {
+ rtsx_init_cmd(chip);
+
+ trans_dma_enable(DMA_FROM_DEVICE, chip, data_len, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
+ 0x02);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
+ 0x00);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H,
+ 0xFF, (srb->cmnd[7] & 0xFE) >> 1);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L,
+ 0xFF, (u8)((data_len & 0x0001FE00) >> 9));
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
+ 0x40 | cmd_idx);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF,
+ srb->cmnd[3]);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF,
+ srb->cmnd[4]);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF,
+ srb->cmnd[5]);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF,
+ srb->cmnd[6]);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
+ 0xFF, SD_TM_AUTO_READ_2 | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
+ scsi_bufflen(srb), scsi_sg_count(srb),
+ DMA_FROM_DEVICE, 10000);
+ if (retval < 0) {
+ read_err = 1;
+ rtsx_clear_sd_error(chip);
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ } else {
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ retval = ext_sd_get_rsp(chip, rsp_len, sd_card->rsp, rsp_type);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+
+ if (standby) {
+ retval = sd_select_card(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ if (send_cmd12) {
+ retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION,
+ 0, SD_RSP_TYPE_R1b, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ if (data_len < 512) {
+ retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+
+ retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+
+ retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+ }
+
+ if ((srb->cmnd[1] & 0x02) || (srb->cmnd[1] & 0x04))
+ cmd13_checkbit = 1;
+
+ for (i = 0; i < 3; i++) {
+ retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS,
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0,
+ cmd13_checkbit);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Read_Cmd_Failed);
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+
+SD_Execute_Read_Cmd_Failed:
+ sd_card->pre_cmd_err = 1;
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ if (read_err)
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+
+ release_sd_card(chip);
+ do_reset_sd_card(chip);
+ if (!(chip->card_ready & SD_CARD))
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+
+ TRACE_RET(chip, TRANSPORT_FAILED);
+}
+
+int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval, rsp_len, i;
+ int cmd13_checkbit = 0, write_err = 0;
+ u8 cmd_idx, rsp_type;
+ u8 send_cmd12 = 0, standby = 0, acmd = 0;
+ u32 data_len, arg;
+#ifdef SUPPORT_SD_LOCK
+ int lock_cmd_fail = 0;
+ u8 sd_lock_state = 0;
+ u8 lock_cmd_type = 0;
+#endif
+
+ if (!sd_card->sd_pass_thru_en) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (sd_card->pre_cmd_err) {
+ sd_card->pre_cmd_err = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ cmd_idx = srb->cmnd[2] & 0x3F;
+ if (srb->cmnd[1] & 0x04)
+ send_cmd12 = 1;
+
+ if (srb->cmnd[1] & 0x02)
+ standby = 1;
+
+ if (srb->cmnd[1] & 0x01)
+ acmd = 1;
+
+ data_len = ((u32)srb->cmnd[7] << 16) | ((u32)srb->cmnd[8]
+ << 8) | srb->cmnd[9];
+ arg = ((u32)srb->cmnd[3] << 24) | ((u32)srb->cmnd[4] << 16) |
+ ((u32)srb->cmnd[5] << 8) | srb->cmnd[6];
+
+#ifdef SUPPORT_SD_LOCK
+ if (cmd_idx == LOCK_UNLOCK) {
+ sd_lock_state = sd_card->sd_lock_status;
+ sd_lock_state &= SD_LOCKED;
+ }
+#endif
+
+ retval = get_rsp_type(srb, &rsp_type, &rsp_len);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ sd_card->last_rsp_type = rsp_type;
+
+ retval = sd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+#ifdef SUPPORT_SD_LOCK
+ if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
+ if (CHK_MMC_8BIT(sd_card)) {
+ retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
+ SD_BUS_WIDTH_8);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+
+ } else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) {
+ retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
+ SD_BUS_WIDTH_4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+#else
+ retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, SD_BUS_WIDTH_4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+#endif
+
+ if (data_len < 512) {
+ retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ if (standby) {
+ retval = sd_select_card(chip, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ if (acmd) {
+ retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type,
+ sd_card->rsp, rsp_len, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+
+ if (data_len <= 512) {
+ u16 i;
+ u8 *buf;
+
+ buf = kmalloc(data_len, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rtsx_stor_get_xfer_buf(buf, data_len, srb);
+
+#ifdef SUPPORT_SD_LOCK
+ if (cmd_idx == LOCK_UNLOCK)
+ lock_cmd_type = buf[0] & 0x0F;
+#endif
+
+ if (data_len > 256) {
+ rtsx_init_cmd(chip);
+ for (i = 0; i < 256; i++) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ PPBUF_BASE2 + i, 0xFF, buf[i]);
+ }
+ retval = rtsx_send_cmd(chip, 0, 250);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ rtsx_init_cmd(chip);
+ for (i = 256; i < data_len; i++) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ PPBUF_BASE2 + i, 0xFF, buf[i]);
+ }
+ retval = rtsx_send_cmd(chip, 0, 250);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+ } else {
+ rtsx_init_cmd(chip);
+ for (i = 0; i < data_len; i++) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD,
+ PPBUF_BASE2 + i, 0xFF, buf[i]);
+ }
+ retval = rtsx_send_cmd(chip, 0, 250);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+ }
+
+ kfree(buf);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
+ srb->cmnd[8] & 0x03);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
+ srb->cmnd[9]);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
+ 0x00);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
+ 0x01);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, SD_CARD, 250);
+ } else if (!(data_len & 0x1FF)) {
+ rtsx_init_cmd(chip);
+
+ trans_dma_enable(DMA_TO_DEVICE, chip, data_len, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
+ 0x02);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
+ 0x00);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H,
+ 0xFF, (srb->cmnd[7] & 0xFE) >> 1);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L,
+ 0xFF, (u8)((data_len & 0x0001FE00) >> 9));
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
+ SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
+ scsi_bufflen(srb), scsi_sg_count(srb),
+ DMA_TO_DEVICE, 10000);
+
+ } else {
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ if (retval < 0) {
+ write_err = 1;
+ rtsx_clear_sd_error(chip);
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+#ifdef SUPPORT_SD_LOCK
+ if (cmd_idx == LOCK_UNLOCK) {
+ if (lock_cmd_type == SD_ERASE) {
+ sd_card->sd_erase_status = SD_UNDER_ERASING;
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+ }
+
+ rtsx_init_cmd(chip);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, 0xFD30, 0x02, 0x02);
+
+ rtsx_send_cmd(chip, SD_CARD, 250);
+
+ retval = sd_update_lock_status(chip);
+ if (retval != STATUS_SUCCESS) {
+ RTSX_DEBUGP("Lock command fail!\n");
+ lock_cmd_fail = 1;
+ }
+ }
+#endif /* SUPPORT_SD_LOCK */
+
+ if (standby) {
+ retval = sd_select_card(chip, 1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ if (send_cmd12) {
+ retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION,
+ 0, SD_RSP_TYPE_R1b, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ if (data_len < 512) {
+ retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200,
+ SD_RSP_TYPE_R1, NULL, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+
+ retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+
+ rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+
+ if ((srb->cmnd[1] & 0x02) || (srb->cmnd[1] & 0x04))
+ cmd13_checkbit = 1;
+
+ for (i = 0; i < 3; i++) {
+ retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS,
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0,
+ cmd13_checkbit);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (retval != STATUS_SUCCESS)
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+
+#ifdef SUPPORT_SD_LOCK
+ if (cmd_idx == LOCK_UNLOCK) {
+ if (!lock_cmd_fail) {
+ RTSX_DEBUGP("lock_cmd_type = 0x%x\n", lock_cmd_type);
+ if (lock_cmd_type & SD_CLR_PWD)
+ sd_card->sd_lock_status &= ~SD_PWD_EXIST;
+
+ if (lock_cmd_type & SD_SET_PWD)
+ sd_card->sd_lock_status |= SD_PWD_EXIST;
+ }
+
+ RTSX_DEBUGP("sd_lock_state = 0x%x, sd_card->sd_lock_status = 0x%x\n",
+ sd_lock_state, sd_card->sd_lock_status);
+ if (sd_lock_state ^ (sd_card->sd_lock_status & SD_LOCKED)) {
+ sd_card->sd_lock_notify = 1;
+ if (sd_lock_state) {
+ if (sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) {
+ sd_card->sd_lock_status |= (
+ SD_UNLOCK_POW_ON | SD_SDR_RST);
+ if (CHK_SD(sd_card)) {
+ retval = reset_sd(chip);
+ if (retval != STATUS_SUCCESS) {
+ sd_card->sd_lock_status &= ~(SD_UNLOCK_POW_ON | SD_SDR_RST);
+ TRACE_GOTO(chip, SD_Execute_Write_Cmd_Failed);
+ }
+ }
+
+ sd_card->sd_lock_status &= ~(SD_UNLOCK_POW_ON | SD_SDR_RST);
+ }
+ }
+ }
+ }
+
+ if (lock_cmd_fail) {
+ scsi_set_resid(srb, 0);
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+#endif /* SUPPORT_SD_LOCK */
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+
+SD_Execute_Write_Cmd_Failed:
+ sd_card->pre_cmd_err = 1;
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ if (write_err)
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+
+ release_sd_card(chip);
+ do_reset_sd_card(chip);
+ if (!(chip->card_ready & SD_CARD))
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+
+ TRACE_RET(chip, TRANSPORT_FAILED);
+}
+
+int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int count;
+ u16 data_len;
+
+ if (!sd_card->sd_pass_thru_en) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (sd_card->pre_cmd_err) {
+ sd_card->pre_cmd_err = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ data_len = ((u16)srb->cmnd[7] << 8) | srb->cmnd[8];
+
+ if (sd_card->last_rsp_type == SD_RSP_TYPE_R0) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else if (sd_card->last_rsp_type == SD_RSP_TYPE_R2) {
+ count = (data_len < 17) ? data_len : 17;
+ } else {
+ count = (data_len < 6) ? data_len : 6;
+ }
+ rtsx_stor_set_xfer_buf(sd_card->rsp, count, srb);
+
+ RTSX_DEBUGP("Response length: %d\n", data_len);
+ RTSX_DEBUGP("Response: 0x%x 0x%x 0x%x 0x%x\n", sd_card->rsp[0],
+ sd_card->rsp[1], sd_card->rsp[2], sd_card->rsp[3]);
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+
+int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+
+ if (!sd_card->sd_pass_thru_en) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (sd_card->pre_cmd_err) {
+ sd_card->pre_cmd_err = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((0x53 != srb->cmnd[2]) || (0x44 != srb->cmnd[3]) ||
+ (0x20 != srb->cmnd[4]) || (0x43 != srb->cmnd[5]) ||
+ (0x61 != srb->cmnd[6]) || (0x72 != srb->cmnd[7]) ||
+ (0x64 != srb->cmnd[8])) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ switch (srb->cmnd[1] & 0x0F) {
+ case 0:
+#ifdef SUPPORT_SD_LOCK
+ if (0x64 == srb->cmnd[9])
+ sd_card->sd_lock_status |= SD_SDR_RST;
+#endif
+ retval = reset_sd_card(chip);
+ if (retval != STATUS_SUCCESS) {
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status &= ~SD_SDR_RST;
+#endif
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ sd_card->pre_cmd_err = 1;
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status &= ~SD_SDR_RST;
+#endif
+ break;
+
+ case 1:
+ retval = soft_reset_sd_card(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ sd_card->pre_cmd_err = 1;
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+#endif
+
+void sd_cleanup_work(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+
+ if (sd_card->seq_mode) {
+ RTSX_DEBUGP("SD: stop transmission\n");
+ sd_stop_seq_mode(chip);
+ sd_card->cleanup_counter = 0;
+ }
+}
+
+int sd_power_off_card3v3(struct rtsx_chip *chip)
+{
+ int retval;
+
+ retval = disable_card_clock(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, CARD_OE, SD_OUTPUT_EN, 0);
+
+ if (!chip->ft2_fast_mode) {
+ retval = card_power_off(chip, SD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ wait_timeout(50);
+ }
+
+ if (chip->asic_code) {
+ retval = sd_pull_ctl_disable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ RTSX_WRITE_REG(chip, FPGA_PULL_CTL,
+ FPGA_SD_PULL_CTL_BIT | 0x20, FPGA_SD_PULL_CTL_BIT);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int release_sd_card(struct rtsx_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ int retval;
+
+ RTSX_DEBUGP("release_sd_card\n");
+
+ chip->card_ready &= ~SD_CARD;
+ chip->card_fail &= ~SD_CARD;
+ chip->card_wp &= ~SD_CARD;
+
+ chip->sd_io = 0;
+ chip->sd_int = 0;
+
+#ifdef SUPPORT_SD_LOCK
+ sd_card->sd_lock_status = 0;
+ sd_card->sd_erase_status = 0;
+#endif
+
+ memset(sd_card->raw_csd, 0, 16);
+ memset(sd_card->raw_scr, 0, 8);
+
+ retval = sd_power_off_card3v3(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
diff --git a/drivers/staging/rts5208/sd.h b/drivers/staging/rts5208/sd.h
new file mode 100644
index 000000000000..735b2d0f5a78
--- /dev/null
+++ b/drivers/staging/rts5208/sd.h
@@ -0,0 +1,301 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_SD_H
+#define __REALTEK_RTSX_SD_H
+
+#include "rtsx_chip.h"
+
+#define SUPPORT_VOLTAGE 0x003C0000
+
+/* Error Code */
+#define SD_NO_ERROR 0x0
+#define SD_CRC_ERR 0x80
+#define SD_TO_ERR 0x40
+#define SD_NO_CARD 0x20
+#define SD_BUSY 0x10
+#define SD_STS_ERR 0x08
+#define SD_RSP_TIMEOUT 0x04
+#define SD_IO_ERR 0x02
+
+/* Return code for MMC switch bus */
+#define SWITCH_SUCCESS 0
+#define SWITCH_ERR 1
+#define SWITCH_FAIL 2
+
+/* MMC/SD Command Index */
+/* Basic command (class 0) */
+#define GO_IDLE_STATE 0
+#define SEND_OP_COND 1
+#define ALL_SEND_CID 2
+#define SET_RELATIVE_ADDR 3
+#define SEND_RELATIVE_ADDR 3
+#define SET_DSR 4
+#define IO_SEND_OP_COND 5
+#define SWITCH 6
+#define SELECT_CARD 7
+#define DESELECT_CARD 7
+/* CMD8 is "SEND_EXT_CSD" for MMC4.x Spec
+ * while is "SEND_IF_COND" for SD 2.0
+ */
+#define SEND_EXT_CSD 8
+#define SEND_IF_COND 8
+
+#define SEND_CSD 9
+#define SEND_CID 10
+#define VOLTAGE_SWITCH 11
+#define READ_DAT_UTIL_STOP 11
+#define STOP_TRANSMISSION 12
+#define SEND_STATUS 13
+#define GO_INACTIVE_STATE 15
+
+#define SET_BLOCKLEN 16
+#define READ_SINGLE_BLOCK 17
+#define READ_MULTIPLE_BLOCK 18
+#define SEND_TUNING_PATTERN 19
+
+#define BUSTEST_R 14
+#define BUSTEST_W 19
+
+#define WRITE_BLOCK 24
+#define WRITE_MULTIPLE_BLOCK 25
+#define PROGRAM_CSD 27
+
+#define ERASE_WR_BLK_START 32
+#define ERASE_WR_BLK_END 33
+#define ERASE_CMD 38
+
+#define LOCK_UNLOCK 42
+#define IO_RW_DIRECT 52
+
+#define APP_CMD 55
+#define GEN_CMD 56
+
+#define SET_BUS_WIDTH 6
+#define SD_STATUS 13
+#define SEND_NUM_WR_BLOCKS 22
+#define SET_WR_BLK_ERASE_COUNT 23
+#define SD_APP_OP_COND 41
+#define SET_CLR_CARD_DETECT 42
+#define SEND_SCR 51
+
+#define SD_READ_COMPLETE 0x00
+#define SD_READ_TO 0x01
+#define SD_READ_ADVENCE 0x02
+
+#define SD_CHECK_MODE 0x00
+#define SD_SWITCH_MODE 0x80
+#define SD_FUNC_GROUP_1 0x01
+#define SD_FUNC_GROUP_2 0x02
+#define SD_FUNC_GROUP_3 0x03
+#define SD_FUNC_GROUP_4 0x04
+#define SD_CHECK_SPEC_V1_1 0xFF
+
+#define NO_ARGUMENT 0x00
+#define CHECK_PATTERN 0x000000AA
+#define VOLTAGE_SUPPLY_RANGE 0x00000100
+#define SUPPORT_HIGH_AND_EXTENDED_CAPACITY 0x40000000
+#define SUPPORT_MAX_POWER_PERMANCE 0x10000000
+#define SUPPORT_1V8 0x01000000
+
+#define SWTICH_NO_ERR 0x00
+#define CARD_NOT_EXIST 0x01
+#define SPEC_NOT_SUPPORT 0x02
+#define CHECK_MODE_ERR 0x03
+#define CHECK_NOT_READY 0x04
+#define SWITCH_CRC_ERR 0x05
+#define SWITCH_MODE_ERR 0x06
+#define SWITCH_PASS 0x07
+
+#ifdef SUPPORT_SD_LOCK
+#define SD_ERASE 0x08
+#define SD_LOCK 0x04
+#define SD_UNLOCK 0x00
+#define SD_CLR_PWD 0x02
+#define SD_SET_PWD 0x01
+
+#define SD_PWD_LEN 0x10
+
+#define SD_LOCKED 0x80
+#define SD_LOCK_1BIT_MODE 0x40
+#define SD_PWD_EXIST 0x20
+#define SD_UNLOCK_POW_ON 0x01
+#define SD_SDR_RST 0x02
+
+#define SD_NOT_ERASE 0x00
+#define SD_UNDER_ERASING 0x01
+#define SD_COMPLETE_ERASE 0x02
+
+#define SD_RW_FORBIDDEN 0x0F
+
+#endif
+
+#define HS_SUPPORT 0x01
+#define SDR50_SUPPORT 0x02
+#define SDR104_SUPPORT 0x03
+#define DDR50_SUPPORT 0x04
+
+#define HS_SUPPORT_MASK 0x02
+#define SDR50_SUPPORT_MASK 0x04
+#define SDR104_SUPPORT_MASK 0x08
+#define DDR50_SUPPORT_MASK 0x10
+
+#define HS_QUERY_SWITCH_OK 0x01
+#define SDR50_QUERY_SWITCH_OK 0x02
+#define SDR104_QUERY_SWITCH_OK 0x03
+#define DDR50_QUERY_SWITCH_OK 0x04
+
+#define HS_SWITCH_BUSY 0x02
+#define SDR50_SWITCH_BUSY 0x04
+#define SDR104_SWITCH_BUSY 0x08
+#define DDR50_SWITCH_BUSY 0x10
+
+#define FUNCTION_GROUP1_SUPPORT_OFFSET 0x0D
+#define FUNCTION_GROUP1_QUERY_SWITCH_OFFSET 0x10
+#define FUNCTION_GROUP1_CHECK_BUSY_OFFSET 0x1D
+
+#define DRIVING_TYPE_A 0x01
+#define DRIVING_TYPE_B 0x00
+#define DRIVING_TYPE_C 0x02
+#define DRIVING_TYPE_D 0x03
+
+#define DRIVING_TYPE_A_MASK 0x02
+#define DRIVING_TYPE_B_MASK 0x01
+#define DRIVING_TYPE_C_MASK 0x04
+#define DRIVING_TYPE_D_MASK 0x08
+
+#define TYPE_A_QUERY_SWITCH_OK 0x01
+#define TYPE_B_QUERY_SWITCH_OK 0x00
+#define TYPE_C_QUERY_SWITCH_OK 0x02
+#define TYPE_D_QUERY_SWITCH_OK 0x03
+
+#define TYPE_A_SWITCH_BUSY 0x02
+#define TYPE_B_SWITCH_BUSY 0x01
+#define TYPE_C_SWITCH_BUSY 0x04
+#define TYPE_D_SWITCH_BUSY 0x08
+
+#define FUNCTION_GROUP3_SUPPORT_OFFSET 0x09
+#define FUNCTION_GROUP3_QUERY_SWITCH_OFFSET 0x0F
+#define FUNCTION_GROUP3_CHECK_BUSY_OFFSET 0x19
+
+#define CURRENT_LIMIT_200 0x00
+#define CURRENT_LIMIT_400 0x01
+#define CURRENT_LIMIT_600 0x02
+#define CURRENT_LIMIT_800 0x03
+
+#define CURRENT_LIMIT_200_MASK 0x01
+#define CURRENT_LIMIT_400_MASK 0x02
+#define CURRENT_LIMIT_600_MASK 0x04
+#define CURRENT_LIMIT_800_MASK 0x08
+
+#define CURRENT_LIMIT_200_QUERY_SWITCH_OK 0x00
+#define CURRENT_LIMIT_400_QUERY_SWITCH_OK 0x01
+#define CURRENT_LIMIT_600_QUERY_SWITCH_OK 0x02
+#define CURRENT_LIMIT_800_QUERY_SWITCH_OK 0x03
+
+#define CURRENT_LIMIT_200_SWITCH_BUSY 0x01
+#define CURRENT_LIMIT_400_SWITCH_BUSY 0x02
+#define CURRENT_LIMIT_600_SWITCH_BUSY 0x04
+#define CURRENT_LIMIT_800_SWITCH_BUSY 0x08
+
+#define FUNCTION_GROUP4_SUPPORT_OFFSET 0x07
+#define FUNCTION_GROUP4_QUERY_SWITCH_OFFSET 0x0F
+#define FUNCTION_GROUP4_CHECK_BUSY_OFFSET 0x17
+
+#define DATA_STRUCTURE_VER_OFFSET 0x11
+
+#define MAX_PHASE 31
+
+#define MMC_8BIT_BUS 0x0010
+#define MMC_4BIT_BUS 0x0020
+
+#define MMC_SWITCH_ERR 0x80
+
+#define SD_IO_3V3 0
+#define SD_IO_1V8 1
+
+#define TUNE_TX 0x00
+#define TUNE_RX 0x01
+
+#define CHANGE_TX 0x00
+#define CHANGE_RX 0x01
+
+#define DCM_HIGH_FREQUENCY_MODE 0x00
+#define DCM_LOW_FREQUENCY_MODE 0x01
+
+#define DCM_HIGH_FREQUENCY_MODE_SET 0x0C
+#define DCM_Low_FREQUENCY_MODE_SET 0x00
+
+#define MULTIPLY_BY_1 0x00
+#define MULTIPLY_BY_2 0x01
+#define MULTIPLY_BY_3 0x02
+#define MULTIPLY_BY_4 0x03
+#define MULTIPLY_BY_5 0x04
+#define MULTIPLY_BY_6 0x05
+#define MULTIPLY_BY_7 0x06
+#define MULTIPLY_BY_8 0x07
+#define MULTIPLY_BY_9 0x08
+#define MULTIPLY_BY_10 0x09
+
+#define DIVIDE_BY_2 0x01
+#define DIVIDE_BY_3 0x02
+#define DIVIDE_BY_4 0x03
+#define DIVIDE_BY_5 0x04
+#define DIVIDE_BY_6 0x05
+#define DIVIDE_BY_7 0x06
+#define DIVIDE_BY_8 0x07
+#define DIVIDE_BY_9 0x08
+#define DIVIDE_BY_10 0x09
+
+struct timing_phase_path {
+ int start;
+ int end;
+ int mid;
+ int len;
+};
+
+int sd_select_card(struct rtsx_chip *chip, int select);
+int sd_pull_ctl_enable(struct rtsx_chip *chip);
+int reset_sd_card(struct rtsx_chip *chip);
+int sd_switch_clock(struct rtsx_chip *chip);
+void sd_stop_seq_mode(struct rtsx_chip *chip);
+int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ u32 start_sector, u16 sector_cnt);
+void sd_cleanup_work(struct rtsx_chip *chip);
+int sd_power_off_card3v3(struct rtsx_chip *chip);
+int release_sd_card(struct rtsx_chip *chip);
+#ifdef SUPPORT_CPRM
+int soft_reset_sd_card(struct rtsx_chip *chip);
+int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
+ u32 arg, u8 rsp_type, u8 *rsp, int rsp_len, int special_check);
+int ext_sd_get_rsp(struct rtsx_chip *chip, int len, u8 *rsp, u8 rsp_type);
+
+int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+#endif
+
+#endif /* __REALTEK_RTSX_SD_H */
diff --git a/drivers/staging/rts5208/spi.c b/drivers/staging/rts5208/spi.c
new file mode 100644
index 000000000000..312b9f9c6456
--- /dev/null
+++ b/drivers/staging/rts5208/spi.c
@@ -0,0 +1,877 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+
+#include "rtsx.h"
+#include "rtsx_transport.h"
+#include "rtsx_scsi.h"
+#include "rtsx_card.h"
+#include "spi.h"
+
+static inline void spi_set_err_code(struct rtsx_chip *chip, u8 err_code)
+{
+ struct spi_info *spi = &(chip->spi);
+
+ spi->err_code = err_code;
+}
+
+static int spi_init(struct rtsx_chip *chip)
+{
+ RTSX_WRITE_REG(chip, SPI_CONTROL, 0xFF,
+ CS_POLARITY_LOW | DTO_MSB_FIRST | SPI_MASTER | SPI_MODE0 |
+ SPI_AUTO);
+ RTSX_WRITE_REG(chip, SPI_TCTL, EDO_TIMING_MASK, SAMPLE_DELAY_HALF);
+
+ return STATUS_SUCCESS;
+}
+
+static int spi_set_init_para(struct rtsx_chip *chip)
+{
+ struct spi_info *spi = &(chip->spi);
+ int retval;
+
+ RTSX_WRITE_REG(chip, SPI_CLK_DIVIDER1, 0xFF, (u8)(spi->clk_div >> 8));
+ RTSX_WRITE_REG(chip, SPI_CLK_DIVIDER0, 0xFF, (u8)(spi->clk_div));
+
+ retval = switch_clock(chip, spi->spi_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = select_card(chip, SPI_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, CARD_CLK_EN, SPI_CLK_EN, SPI_CLK_EN);
+ RTSX_WRITE_REG(chip, CARD_OE, SPI_OUTPUT_EN, SPI_OUTPUT_EN);
+
+ wait_timeout(10);
+
+ retval = spi_init(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int sf_polling_status(struct rtsx_chip *chip, int msec)
+{
+ int retval;
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, SPI_RDSR);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_POLLING_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, msec);
+ if (retval < 0) {
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_BUSY_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sf_enable_write(struct rtsx_chip *chip, u8 ins)
+{
+ struct spi_info *spi = &(chip->spi);
+ int retval;
+
+ if (!spi->write_en)
+ return STATUS_SUCCESS;
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_C_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0) {
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int sf_disable_write(struct rtsx_chip *chip, u8 ins)
+{
+ struct spi_info *spi = &(chip->spi);
+ int retval;
+
+ if (!spi->write_en)
+ return STATUS_SUCCESS;
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_C_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0) {
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static void sf_program(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr,
+ u16 len)
+{
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, (u8)len);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, (u8)(len >> 8));
+ if (addr_mode) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
+ (u8)(addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
+ (u8)(addr >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CADO_MODE0);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CDO_MODE0);
+ }
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+}
+
+static int sf_erase(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr)
+{
+ int retval;
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ if (addr_mode) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
+ (u8)(addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
+ (u8)(addr >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_C_MODE0);
+ }
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0) {
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int spi_init_eeprom(struct rtsx_chip *chip)
+{
+ int retval;
+ int clk;
+
+ if (chip->asic_code)
+ clk = 30;
+ else
+ clk = CLK_30;
+
+ RTSX_WRITE_REG(chip, SPI_CLK_DIVIDER1, 0xFF, 0x00);
+ RTSX_WRITE_REG(chip, SPI_CLK_DIVIDER0, 0xFF, 0x27);
+
+ retval = switch_clock(chip, clk);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = select_card(chip, SPI_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, CARD_CLK_EN, SPI_CLK_EN, SPI_CLK_EN);
+ RTSX_WRITE_REG(chip, CARD_OE, SPI_OUTPUT_EN, SPI_OUTPUT_EN);
+
+ wait_timeout(10);
+
+ RTSX_WRITE_REG(chip, SPI_CONTROL, 0xFF,
+ CS_POLARITY_HIGH | SPI_EEPROM_AUTO);
+ RTSX_WRITE_REG(chip, SPI_TCTL, EDO_TIMING_MASK, SAMPLE_DELAY_HALF);
+
+ return STATUS_SUCCESS;
+}
+
+static int spi_eeprom_program_enable(struct rtsx_chip *chip)
+{
+ int retval;
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x86);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x13);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+int spi_erase_eeprom_chip(struct rtsx_chip *chip)
+{
+ int retval;
+
+ retval = spi_init_eeprom(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = spi_eeprom_program_enable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_GPIO_DIR, 0x01, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x12);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x84);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, CARD_GPIO_DIR, 0x01, 0x01);
+
+ return STATUS_SUCCESS;
+}
+
+int spi_erase_eeprom_byte(struct rtsx_chip *chip, u16 addr)
+{
+ int retval;
+
+ retval = spi_init_eeprom(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = spi_eeprom_program_enable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_GPIO_DIR, 0x01, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x07);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, (u8)(addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x46);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, CARD_GPIO_DIR, 0x01, 0x01);
+
+ return STATUS_SUCCESS;
+}
+
+
+int spi_read_eeprom(struct rtsx_chip *chip, u16 addr, u8 *val)
+{
+ int retval;
+ u8 data;
+
+ retval = spi_init_eeprom(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_GPIO_DIR, 0x01, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x06);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, (u8)(addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x46);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 1);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CADI_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ wait_timeout(5);
+ RTSX_READ_REG(chip, SPI_DATA, &data);
+
+ if (val)
+ *val = data;
+
+ RTSX_WRITE_REG(chip, CARD_GPIO_DIR, 0x01, 0x01);
+
+ return STATUS_SUCCESS;
+}
+
+int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val)
+{
+ int retval;
+
+ retval = spi_init_eeprom(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = spi_eeprom_program_enable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_GPIO_DIR, 0x01, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x05);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, val);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, (u8)addr);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, (u8)(addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x4E);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, CARD_GPIO_DIR, 0x01, 0x01);
+
+ return STATUS_SUCCESS;
+}
+
+
+int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct spi_info *spi = &(chip->spi);
+
+ RTSX_DEBUGP("spi_get_status: err_code = 0x%x\n", spi->err_code);
+ rtsx_stor_set_xfer_buf(&(spi->err_code),
+ min_t(int, scsi_bufflen(srb), 1), srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - 1);
+
+ return STATUS_SUCCESS;
+}
+
+int spi_set_parameter(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ struct spi_info *spi = &(chip->spi);
+
+ spi_set_err_code(chip, SPI_NO_ERR);
+
+ if (chip->asic_code)
+ spi->spi_clock = ((u16)(srb->cmnd[8]) << 8) | srb->cmnd[9];
+ else
+ spi->spi_clock = srb->cmnd[3];
+
+ spi->clk_div = ((u16)(srb->cmnd[4]) << 8) | srb->cmnd[5];
+ spi->write_en = srb->cmnd[6];
+
+ RTSX_DEBUGP("spi_set_parameter: spi_clock = %d, clk_div = %d, write_en = %d\n",
+ spi->spi_clock, spi->clk_div, spi->write_en);
+
+ return STATUS_SUCCESS;
+}
+
+int spi_read_flash_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ u16 len;
+ u8 *buf;
+
+ spi_set_err_code(chip, SPI_NO_ERR);
+
+ len = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
+ if (len > 512) {
+ spi_set_err_code(chip, SPI_INVALID_COMMAND);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = spi_set_init_para(chip);
+ if (retval != STATUS_SUCCESS) {
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, srb->cmnd[3]);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, srb->cmnd[4]);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, srb->cmnd[5]);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, srb->cmnd[6]);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, srb->cmnd[7]);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, srb->cmnd[8]);
+
+ if (len == 0) {
+ if (srb->cmnd[9]) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0,
+ 0xFF, SPI_TRANSFER0_START | SPI_CA_MODE0);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0,
+ 0xFF, SPI_TRANSFER0_START | SPI_C_MODE0);
+ }
+ } else {
+ if (srb->cmnd[9]) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CADI_MODE0);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CDI_MODE0);
+ }
+ }
+
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0) {
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (len) {
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ retval = rtsx_read_ppbuf(chip, buf, len);
+ if (retval != STATUS_SUCCESS) {
+ spi_set_err_code(chip, SPI_READ_ERR);
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ scsi_set_resid(srb, 0);
+
+ kfree(buf);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ unsigned int index = 0, offset = 0;
+ u8 ins, slow_read;
+ u32 addr;
+ u16 len;
+ u8 *buf;
+
+ spi_set_err_code(chip, SPI_NO_ERR);
+
+ ins = srb->cmnd[3];
+ addr = ((u32)(srb->cmnd[4]) << 16) | ((u32)(srb->cmnd[5])
+ << 8) | srb->cmnd[6];
+ len = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
+ slow_read = srb->cmnd[9];
+
+ retval = spi_set_init_para(chip);
+ if (retval != STATUS_SUCCESS) {
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ buf = kmalloc(SF_PAGE_LEN, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ while (len) {
+ u16 pagelen = SF_PAGE_LEN - (u8)addr;
+
+ if (pagelen > len)
+ pagelen = len;
+
+ rtsx_init_cmd(chip);
+
+ trans_dma_enable(DMA_FROM_DEVICE, chip, 256, DMA_256);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
+
+ if (slow_read) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF,
+ (u8)addr);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
+ (u8)(addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
+ (u8)(addr >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
+ (u8)addr);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
+ (u8)(addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR3, 0xFF,
+ (u8)(addr >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_32);
+ }
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF,
+ (u8)(pagelen >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF,
+ (u8)pagelen);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CADI_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0,
+ SPI_TRANSFER0_END, SPI_TRANSFER0_END);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ retval = rtsx_transfer_data(chip, 0, buf, pagelen, 0,
+ DMA_FROM_DEVICE, 10000);
+ if (retval < 0) {
+ kfree(buf);
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rtsx_stor_access_xfer_buf(buf, pagelen, srb, &index, &offset,
+ TO_XFER_BUF);
+
+ addr += pagelen;
+ len -= pagelen;
+ }
+
+ scsi_set_resid(srb, 0);
+ kfree(buf);
+
+ return STATUS_SUCCESS;
+}
+
+int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ u8 ins, program_mode;
+ u32 addr;
+ u16 len;
+ u8 *buf;
+ unsigned int index = 0, offset = 0;
+
+ spi_set_err_code(chip, SPI_NO_ERR);
+
+ ins = srb->cmnd[3];
+ addr = ((u32)(srb->cmnd[4]) << 16) | ((u32)(srb->cmnd[5])
+ << 8) | srb->cmnd[6];
+ len = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
+ program_mode = srb->cmnd[9];
+
+ retval = spi_set_init_para(chip);
+ if (retval != STATUS_SUCCESS) {
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (program_mode == BYTE_PROGRAM) {
+ buf = kmalloc(4, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ while (len) {
+ retval = sf_enable_write(chip, SPI_WREN);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rtsx_stor_access_xfer_buf(buf, 1, srb, &index, &offset,
+ FROM_XFER_BUF);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF,
+ buf[0]);
+ sf_program(chip, ins, 1, addr, 1);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0) {
+ kfree(buf);
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sf_polling_status(chip, 100);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ addr++;
+ len--;
+ }
+
+ kfree(buf);
+
+ } else if (program_mode == AAI_PROGRAM) {
+ int first_byte = 1;
+
+ retval = sf_enable_write(chip, SPI_WREN);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ buf = kmalloc(4, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ while (len) {
+ rtsx_stor_access_xfer_buf(buf, 1, srb, &index, &offset,
+ FROM_XFER_BUF);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF,
+ buf[0]);
+ if (first_byte) {
+ sf_program(chip, ins, 1, addr, 1);
+ first_byte = 0;
+ } else {
+ sf_program(chip, ins, 0, 0, 1);
+ }
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval < 0) {
+ kfree(buf);
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sf_polling_status(chip, 100);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ len--;
+ }
+
+ kfree(buf);
+
+ retval = sf_disable_write(chip, SPI_WRDI);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sf_polling_status(chip, 100);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else if (program_mode == PAGE_PROGRAM) {
+ buf = kmalloc(SF_PAGE_LEN, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, STATUS_NOMEM);
+
+ while (len) {
+ u16 pagelen = SF_PAGE_LEN - (u8)addr;
+
+ if (pagelen > len)
+ pagelen = len;
+
+ retval = sf_enable_write(chip, SPI_WREN);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rtsx_init_cmd(chip);
+
+ trans_dma_enable(DMA_TO_DEVICE, chip, 256, DMA_256);
+ sf_program(chip, ins, 1, addr, pagelen);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ rtsx_stor_access_xfer_buf(buf, pagelen, srb, &index,
+ &offset, FROM_XFER_BUF);
+
+ retval = rtsx_transfer_data(chip, 0, buf, pagelen, 0,
+ DMA_TO_DEVICE, 100);
+ if (retval < 0) {
+ kfree(buf);
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sf_polling_status(chip, 100);
+ if (retval != STATUS_SUCCESS) {
+ kfree(buf);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ addr += pagelen;
+ len -= pagelen;
+ }
+
+ kfree(buf);
+ } else {
+ spi_set_err_code(chip, SPI_INVALID_COMMAND);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int spi_erase_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ u8 ins, erase_mode;
+ u32 addr;
+
+ spi_set_err_code(chip, SPI_NO_ERR);
+
+ ins = srb->cmnd[3];
+ addr = ((u32)(srb->cmnd[4]) << 16) | ((u32)(srb->cmnd[5])
+ << 8) | srb->cmnd[6];
+ erase_mode = srb->cmnd[9];
+
+ retval = spi_set_init_para(chip);
+ if (retval != STATUS_SUCCESS) {
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (erase_mode == PAGE_ERASE) {
+ retval = sf_enable_write(chip, SPI_WREN);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sf_erase(chip, ins, 1, addr);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else if (erase_mode == CHIP_ERASE) {
+ retval = sf_enable_write(chip, SPI_WREN);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = sf_erase(chip, ins, 0, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ spi_set_err_code(chip, SPI_INVALID_COMMAND);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int spi_write_flash_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+{
+ int retval;
+ u8 ins, status, ewsr;
+
+ ins = srb->cmnd[3];
+ status = srb->cmnd[4];
+ ewsr = srb->cmnd[5];
+
+ retval = spi_set_init_para(chip);
+ if (retval != STATUS_SUCCESS) {
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = sf_enable_write(chip, ewsr);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
+ PINGPONG_BUFFER);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 1);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF, status);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
+ SPI_TRANSFER0_START | SPI_CDO_MODE0);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
+ SPI_TRANSFER0_END);
+
+ retval = rtsx_send_cmd(chip, 0, 100);
+ if (retval != STATUS_SUCCESS) {
+ rtsx_clear_spi_error(chip);
+ spi_set_err_code(chip, SPI_HW_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
diff --git a/drivers/staging/rts5208/spi.h b/drivers/staging/rts5208/spi.h
new file mode 100644
index 000000000000..fc824b5d8d59
--- /dev/null
+++ b/drivers/staging/rts5208/spi.h
@@ -0,0 +1,65 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_SPI_H
+#define __REALTEK_RTSX_SPI_H
+
+/* SPI operation error */
+#define SPI_NO_ERR 0x00
+#define SPI_HW_ERR 0x01
+#define SPI_INVALID_COMMAND 0x02
+#define SPI_READ_ERR 0x03
+#define SPI_WRITE_ERR 0x04
+#define SPI_ERASE_ERR 0x05
+#define SPI_BUSY_ERR 0x06
+
+/* Serial flash instruction */
+#define SPI_READ 0x03
+#define SPI_FAST_READ 0x0B
+#define SPI_WREN 0x06
+#define SPI_WRDI 0x04
+#define SPI_RDSR 0x05
+
+#define SF_PAGE_LEN 256
+
+#define BYTE_PROGRAM 0
+#define AAI_PROGRAM 1
+#define PAGE_PROGRAM 2
+
+#define PAGE_ERASE 0
+#define CHIP_ERASE 1
+
+int spi_erase_eeprom_chip(struct rtsx_chip *chip);
+int spi_erase_eeprom_byte(struct rtsx_chip *chip, u16 addr);
+int spi_read_eeprom(struct rtsx_chip *chip, u16 addr, u8 *val);
+int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val);
+int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int spi_set_parameter(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int spi_read_flash_id(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int spi_erase_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+int spi_write_flash_status(struct scsi_cmnd *srb, struct rtsx_chip *chip);
+
+
+#endif /* __REALTEK_RTSX_SPI_H */
diff --git a/drivers/staging/rts5208/trace.h b/drivers/staging/rts5208/trace.h
new file mode 100644
index 000000000000..0f177fbaaf1f
--- /dev/null
+++ b/drivers/staging/rts5208/trace.h
@@ -0,0 +1,93 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_TRACE_H
+#define __REALTEK_RTSX_TRACE_H
+
+#define _MSG_TRACE
+
+#ifdef _MSG_TRACE
+static inline char *filename(char *path)
+{
+ char *ptr;
+
+ if (path == NULL)
+ return NULL;
+
+ ptr = path;
+
+ while (*ptr != '\0') {
+ if ((*ptr == '\\') || (*ptr == '/'))
+ path = ptr + 1;
+
+ ptr++;
+ }
+
+ return path;
+}
+
+#define TRACE_RET(chip, ret) \
+ do { \
+ char *_file = filename(__FILE__); \
+ RTSX_DEBUGP("[%s][%s]:[%d]\n", _file, __func__, __LINE__); \
+ (chip)->trace_msg[(chip)->msg_idx].line = (u16)(__LINE__); \
+ strncpy((chip)->trace_msg[(chip)->msg_idx].func, __func__, MSG_FUNC_LEN-1); \
+ strncpy((chip)->trace_msg[(chip)->msg_idx].file, _file, MSG_FILE_LEN-1); \
+ get_current_time((chip)->trace_msg[(chip)->msg_idx].timeval_buf, TIME_VAL_LEN); \
+ (chip)->trace_msg[(chip)->msg_idx].valid = 1; \
+ (chip)->msg_idx++; \
+ if ((chip)->msg_idx >= TRACE_ITEM_CNT) { \
+ (chip)->msg_idx = 0; \
+ } \
+ return ret; \
+ } while (0)
+
+#define TRACE_GOTO(chip, label) \
+ do { \
+ char *_file = filename(__FILE__); \
+ RTSX_DEBUGP("[%s][%s]:[%d]\n", _file, __func__, __LINE__); \
+ (chip)->trace_msg[(chip)->msg_idx].line = (u16)(__LINE__); \
+ strncpy((chip)->trace_msg[(chip)->msg_idx].func, __func__, MSG_FUNC_LEN-1); \
+ strncpy((chip)->trace_msg[(chip)->msg_idx].file, _file, MSG_FILE_LEN-1); \
+ get_current_time((chip)->trace_msg[(chip)->msg_idx].timeval_buf, TIME_VAL_LEN); \
+ (chip)->trace_msg[(chip)->msg_idx].valid = 1; \
+ (chip)->msg_idx++; \
+ if ((chip)->msg_idx >= TRACE_ITEM_CNT) { \
+ (chip)->msg_idx = 0; \
+ } \
+ goto label; \
+ } while (0)
+#else
+#define TRACE_RET(chip, ret) return ret
+#define TRACE_GOTO(chip, label) goto label
+#endif
+
+#ifdef CONFIG_RTS5208_DEBUG
+#define RTSX_DUMP(buf, buf_len) \
+ print_hex_dump(KERN_DEBUG, RTSX_STOR, DUMP_PREFIX_NONE, \
+ 16, 1, (buf), (buf_len), false)
+#else
+#define RTSX_DUMP(buf, buf_len)
+#endif
+
+#endif /* __REALTEK_RTSX_TRACE_H */
diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c
new file mode 100644
index 000000000000..6aef53d14e31
--- /dev/null
+++ b/drivers/staging/rts5208/xd.c
@@ -0,0 +1,2088 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+
+#include "rtsx.h"
+#include "rtsx_transport.h"
+#include "rtsx_scsi.h"
+#include "rtsx_card.h"
+#include "xd.h"
+
+static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no);
+static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk, u16 logoff,
+ u8 start_page, u8 end_page);
+
+static inline void xd_set_err_code(struct rtsx_chip *chip, u8 err_code)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+
+ xd_card->err_code = err_code;
+}
+
+static inline int xd_check_err_code(struct rtsx_chip *chip, u8 err_code)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+
+ return (xd_card->err_code == err_code);
+}
+
+static int xd_set_init_para(struct rtsx_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval;
+
+ if (chip->asic_code)
+ xd_card->xd_clock = 47;
+ else
+ xd_card->xd_clock = CLK_50;
+
+ retval = switch_clock(chip, xd_card->xd_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_switch_clock(struct rtsx_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval;
+
+ retval = select_card(chip, XD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = switch_clock(chip, xd_card->xd_clock);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_read_id(struct rtsx_chip *chip, u8 id_cmd, u8 *id_buf, u8 buf_len)
+{
+ int retval, i;
+ u8 *ptr;
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, id_cmd);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_READ_ID);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
+ XD_TRANSFER_END);
+
+ for (i = 0; i < 4; i++)
+ rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_ADDRESS1 + i), 0, 0);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 20);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ptr = rtsx_get_cmd_data(chip) + 1;
+ if (id_buf && buf_len) {
+ if (buf_len > 4)
+ buf_len = 4;
+ memcpy(id_buf, ptr, buf_len);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static void xd_assign_phy_addr(struct rtsx_chip *chip, u32 addr, u8 mode)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+
+ switch (mode) {
+ case XD_RW_ADDR:
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1, 0xFF, (u8)addr);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2,
+ 0xFF, (u8)(addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS3,
+ 0xFF, (u8)(addr >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
+ xd_card->addr_cycle | XD_CALC_ECC | XD_BA_NO_TRANSFORM);
+ break;
+
+ case XD_ERASE_ADDR:
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, (u8)addr);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1,
+ 0xFF, (u8)(addr >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2,
+ 0xFF, (u8)(addr >> 16));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
+ (xd_card->addr_cycle - 1) | XD_CALC_ECC |
+ XD_BA_NO_TRANSFORM);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr,
+ u8 *buf, int buf_len)
+{
+ int retval, i;
+
+ rtsx_init_cmd(chip);
+
+ xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
+ 0xFF, XD_TRANSFER_START | XD_READ_REDUNDANT);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ for (i = 0; i < 6; i++)
+ rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_PAGE_STATUS + i),
+ 0, 0);
+ for (i = 0; i < 4; i++)
+ rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_RESERVED0 + i),
+ 0, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, XD_PARITY, 0, 0);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 500);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (buf && buf_len) {
+ u8 *ptr = rtsx_get_cmd_data(chip) + 1;
+
+ if (buf_len > 11)
+ buf_len = 11;
+ memcpy(buf, ptr, buf_len);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
+ u8 *buf, int buf_len)
+{
+ int retval, i;
+
+ if (!buf || (buf_len < 0))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ for (i = 0; i < buf_len; i++)
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + offset + i,
+ 0, 0);
+
+ retval = rtsx_send_cmd(chip, 0, 250);
+ if (retval < 0) {
+ rtsx_clear_xd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ memcpy(buf, rtsx_get_cmd_data(chip), buf_len);
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
+ int buf_len)
+{
+ int retval;
+ u8 reg;
+
+ if (!buf || (buf_len < 10))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
+ XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_READ_PAGES);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
+ XD_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 250);
+ if (retval == -ETIMEDOUT) {
+ rtsx_clear_xd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_READ_REG(chip, XD_PAGE_STATUS, &reg);
+ if (reg != XD_GPG) {
+ rtsx_clear_xd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ RTSX_READ_REG(chip, XD_CTL, &reg);
+ if (!(reg & XD_ECC1_ERROR) || !(reg & XD_ECC1_UNCORRECTABLE)) {
+ retval = xd_read_data_from_ppb(chip, 0, buf, buf_len);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ if (reg & XD_ECC1_ERROR) {
+ u8 ecc_bit, ecc_byte;
+
+ RTSX_READ_REG(chip, XD_ECC_BIT1, &ecc_bit);
+ RTSX_READ_REG(chip, XD_ECC_BYTE1, &ecc_byte);
+
+ RTSX_DEBUGP("ECC_BIT1 = 0x%x, ECC_BYTE1 = 0x%x\n",
+ ecc_bit, ecc_byte);
+ if (ecc_byte < buf_len) {
+ RTSX_DEBUGP("Before correct: 0x%x\n",
+ buf[ecc_byte]);
+ buf[ecc_byte] ^= (1 << ecc_bit);
+ RTSX_DEBUGP("After correct: 0x%x\n",
+ buf[ecc_byte]);
+ }
+ }
+ } else if (!(reg & XD_ECC2_ERROR) || !(reg & XD_ECC2_UNCORRECTABLE)) {
+ rtsx_clear_xd_error(chip);
+
+ retval = xd_read_data_from_ppb(chip, 256, buf, buf_len);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ if (reg & XD_ECC2_ERROR) {
+ u8 ecc_bit, ecc_byte;
+
+ RTSX_READ_REG(chip, XD_ECC_BIT2, &ecc_bit);
+ RTSX_READ_REG(chip, XD_ECC_BYTE2, &ecc_byte);
+
+ RTSX_DEBUGP("ECC_BIT2 = 0x%x, ECC_BYTE2 = 0x%x\n",
+ ecc_bit, ecc_byte);
+ if (ecc_byte < buf_len) {
+ RTSX_DEBUGP("Before correct: 0x%x\n",
+ buf[ecc_byte]);
+ buf[ecc_byte] ^= (1 << ecc_bit);
+ RTSX_DEBUGP("After correct: 0x%x\n",
+ buf[ecc_byte]);
+ }
+ }
+ } else {
+ rtsx_clear_xd_error(chip);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static void xd_fill_pull_ctl_disable(struct rtsx_chip *chip)
+{
+ if (CHECK_PID(chip, 0x5208)) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
+ XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
+ XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
+ XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
+ XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
+ MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
+ MS_D5_PD | MS_D4_PD);
+ } else if (CHECK_PID(chip, 0x5288)) {
+ if (CHECK_BARO_PKG(chip, QFN)) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1,
+ 0xFF, 0x55);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2,
+ 0xFF, 0x55);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3,
+ 0xFF, 0x4B);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4,
+ 0xFF, 0x69);
+ }
+ }
+}
+
+static void xd_fill_pull_ctl_stage1_barossa(struct rtsx_chip *chip)
+{
+ if (CHECK_BARO_PKG(chip, QFN)) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x4B);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ }
+}
+
+static void xd_fill_pull_ctl_enable(struct rtsx_chip *chip)
+{
+ if (CHECK_PID(chip, 0x5208)) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
+ XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
+ XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
+ XD_WP_PD | XD_CE_PU | XD_CLE_PD | XD_CD_PU);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
+ XD_RDY_PU | XD_WE_PU | XD_RE_PU | XD_ALE_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
+ MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
+ MS_D5_PD | MS_D4_PD);
+ } else if (CHECK_PID(chip, 0x5288)) {
+ if (CHECK_BARO_PKG(chip, QFN)) {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1,
+ 0xFF, 0x55);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2,
+ 0xFF, 0x55);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3,
+ 0xFF, 0x53);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4,
+ 0xFF, 0xA9);
+ }
+ }
+}
+
+static int xd_pull_ctl_disable(struct rtsx_chip *chip)
+{
+ if (CHECK_PID(chip, 0x5208)) {
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF,
+ XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF,
+ XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF,
+ XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF,
+ XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL5, 0xFF,
+ MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL6, 0xFF, MS_D5_PD | MS_D4_PD);
+ } else if (CHECK_PID(chip, 0x5288)) {
+ if (CHECK_BARO_PKG(chip, QFN)) {
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF, 0x55);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF, 0x55);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF, 0x4B);
+ RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF, 0x69);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int reset_xd(struct rtsx_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval, i, j;
+ u8 *ptr, id_buf[4], redunt[11];
+
+ retval = select_card(chip, XD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS, 0xFF,
+ XD_PGSTS_NOT_FF);
+ if (chip->asic_code) {
+ if (!CHECK_PID(chip, 0x5288))
+ xd_fill_pull_ctl_disable(chip);
+ else
+ xd_fill_pull_ctl_stage1_barossa(chip);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
+ (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN3) | 0x20);
+ }
+
+ if (!chip->ft2_fast_mode)
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_INIT,
+ XD_NO_AUTO_PWR_OFF, 0);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, 0);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!chip->ft2_fast_mode) {
+ retval = card_power_off(chip, XD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ wait_timeout(250);
+
+ rtsx_init_cmd(chip);
+
+ if (chip->asic_code) {
+ xd_fill_pull_ctl_enable(chip);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
+ (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN2) |
+ 0x20);
+ }
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = card_power_on(chip, XD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+#ifdef SUPPORT_OCP
+ wait_timeout(50);
+ if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
+ RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n",
+ chip->ocp_stat);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ }
+
+ rtsx_init_cmd(chip);
+
+ if (chip->ft2_fast_mode) {
+ if (chip->asic_code) {
+ xd_fill_pull_ctl_enable(chip);
+ } else {
+ rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
+ (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN2) |
+ 0x20);
+ }
+ }
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, XD_OUTPUT_EN);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CTL, XD_CE_DISEN, XD_CE_DISEN);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (!chip->ft2_fast_mode)
+ wait_timeout(200);
+
+ retval = xd_set_init_para(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ /* Read ID to check if the timing setting is right */
+ for (i = 0; i < 4; i++) {
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DTCTL, 0xFF,
+ XD_TIME_SETUP_STEP * 3 +
+ XD_TIME_RW_STEP * (2 + i) + XD_TIME_RWN_STEP * i);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CATCTL, 0xFF,
+ XD_TIME_SETUP_STEP * 3 + XD_TIME_RW_STEP * (4 + i) +
+ XD_TIME_RWN_STEP * (3 + i));
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_RESET);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ptr = rtsx_get_cmd_data(chip) + 1;
+
+ RTSX_DEBUGP("XD_DAT: 0x%x, XD_CTL: 0x%x\n", ptr[0], ptr[1]);
+
+ if (((ptr[0] & READY_FLAG) != READY_STATE) ||
+ !(ptr[1] & XD_RDY))
+ continue;
+
+ retval = xd_read_id(chip, READ_ID, id_buf, 4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_DEBUGP("READ_ID: 0x%x 0x%x 0x%x 0x%x\n",
+ id_buf[0], id_buf[1], id_buf[2], id_buf[3]);
+
+ xd_card->device_code = id_buf[1];
+
+ /* Check if the xD card is supported */
+ switch (xd_card->device_code) {
+ case XD_4M_X8_512_1:
+ case XD_4M_X8_512_2:
+ xd_card->block_shift = 4;
+ xd_card->page_off = 0x0F;
+ xd_card->addr_cycle = 3;
+ xd_card->zone_cnt = 1;
+ xd_card->capacity = 8000;
+ XD_SET_4MB(xd_card);
+ break;
+ case XD_8M_X8_512:
+ xd_card->block_shift = 4;
+ xd_card->page_off = 0x0F;
+ xd_card->addr_cycle = 3;
+ xd_card->zone_cnt = 1;
+ xd_card->capacity = 16000;
+ break;
+ case XD_16M_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 3;
+ xd_card->zone_cnt = 1;
+ xd_card->capacity = 32000;
+ break;
+ case XD_32M_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 3;
+ xd_card->zone_cnt = 2;
+ xd_card->capacity = 64000;
+ break;
+ case XD_64M_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 4;
+ xd_card->zone_cnt = 4;
+ xd_card->capacity = 128000;
+ break;
+ case XD_128M_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 4;
+ xd_card->zone_cnt = 8;
+ xd_card->capacity = 256000;
+ break;
+ case XD_256M_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 4;
+ xd_card->zone_cnt = 16;
+ xd_card->capacity = 512000;
+ break;
+ case XD_512M_X8:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 4;
+ xd_card->zone_cnt = 32;
+ xd_card->capacity = 1024000;
+ break;
+ case xD_1G_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 4;
+ xd_card->zone_cnt = 64;
+ xd_card->capacity = 2048000;
+ break;
+ case xD_2G_X8_512:
+ XD_PAGE_512(xd_card);
+ xd_card->addr_cycle = 4;
+ xd_card->zone_cnt = 128;
+ xd_card->capacity = 4096000;
+ break;
+ default:
+ continue;
+ }
+
+ /* Confirm timing setting */
+ for (j = 0; j < 10; j++) {
+ retval = xd_read_id(chip, READ_ID, id_buf, 4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if (id_buf[1] != xd_card->device_code)
+ break;
+ }
+
+ if (j == 10)
+ break;
+ }
+
+ if (i == 4) {
+ xd_card->block_shift = 0;
+ xd_card->page_off = 0;
+ xd_card->addr_cycle = 0;
+ xd_card->capacity = 0;
+
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = xd_read_id(chip, READ_xD_ID, id_buf, 4);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ RTSX_DEBUGP("READ_xD_ID: 0x%x 0x%x 0x%x 0x%x\n",
+ id_buf[0], id_buf[1], id_buf[2], id_buf[3]);
+ if (id_buf[2] != XD_ID_CODE)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ /* Search CIS block */
+ for (i = 0; i < 24; i++) {
+ u32 page_addr;
+
+ if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ page_addr = (u32)i << xd_card->block_shift;
+
+ for (j = 0; j < 3; j++) {
+ retval = xd_read_redundant(chip, page_addr, redunt, 11);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (j == 3)
+ continue;
+
+ if (redunt[BLOCK_STATUS] != XD_GBLK)
+ continue;
+
+ j = 0;
+ if (redunt[PAGE_STATUS] != XD_GPG) {
+ for (j = 1; j <= 8; j++) {
+ retval = xd_read_redundant(chip, page_addr + j,
+ redunt, 11);
+ if (retval == STATUS_SUCCESS) {
+ if (redunt[PAGE_STATUS] == XD_GPG)
+ break;
+ }
+ }
+
+ if (j == 9)
+ break;
+ }
+
+ /* Check CIS data */
+ if ((redunt[BLOCK_STATUS] == XD_GBLK) &&
+ (redunt[PARITY] & XD_BA1_ALL0)) {
+ u8 buf[10];
+
+ page_addr += j;
+
+ retval = xd_read_cis(chip, page_addr, buf, 10);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if ((buf[0] == 0x01) && (buf[1] == 0x03) &&
+ (buf[2] == 0xD9)
+ && (buf[3] == 0x01) && (buf[4] == 0xFF)
+ && (buf[5] == 0x18) && (buf[6] == 0x02)
+ && (buf[7] == 0xDF) && (buf[8] == 0x01)
+ && (buf[9] == 0x20)) {
+ xd_card->cis_block = (u16)i;
+ }
+ }
+
+ break;
+ }
+
+ RTSX_DEBUGP("CIS block: 0x%x\n", xd_card->cis_block);
+ if (xd_card->cis_block == 0xFFFF)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ chip->capacity[chip->card2lun[XD_CARD]] = xd_card->capacity;
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_check_data_blank(u8 *redunt)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ if (redunt[PAGE_STATUS + i] != 0xFF)
+ return 0;
+ }
+
+ if ((redunt[PARITY] & (XD_ECC1_ALL1 | XD_ECC2_ALL1))
+ != (XD_ECC1_ALL1 | XD_ECC2_ALL1))
+ return 0;
+
+
+ for (i = 0; i < 4; i++) {
+ if (redunt[RESERVED0 + i] != 0xFF)
+ return 0;
+ }
+
+ return 1;
+}
+
+static u16 xd_load_log_block_addr(u8 *redunt)
+{
+ u16 addr = 0xFFFF;
+
+ if (redunt[PARITY] & XD_BA1_BA2_EQL)
+ addr = ((u16)redunt[BLOCK_ADDR1_H] << 8) |
+ redunt[BLOCK_ADDR1_L];
+ else if (redunt[PARITY] & XD_BA1_VALID)
+ addr = ((u16)redunt[BLOCK_ADDR1_H] << 8) |
+ redunt[BLOCK_ADDR1_L];
+ else if (redunt[PARITY] & XD_BA2_VALID)
+ addr = ((u16)redunt[BLOCK_ADDR2_H] << 8) |
+ redunt[BLOCK_ADDR2_L];
+
+ return addr;
+}
+
+static int xd_init_l2p_tbl(struct rtsx_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int size, i;
+
+ RTSX_DEBUGP("xd_init_l2p_tbl: zone_cnt = %d\n", xd_card->zone_cnt);
+
+ if (xd_card->zone_cnt < 1)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ size = xd_card->zone_cnt * sizeof(struct zone_entry);
+ RTSX_DEBUGP("Buffer size for l2p table is %d\n", size);
+
+ xd_card->zone = vmalloc(size);
+ if (!xd_card->zone)
+ TRACE_RET(chip, STATUS_ERROR);
+
+ for (i = 0; i < xd_card->zone_cnt; i++) {
+ xd_card->zone[i].build_flag = 0;
+ xd_card->zone[i].l2p_table = NULL;
+ xd_card->zone[i].free_table = NULL;
+ xd_card->zone[i].get_index = 0;
+ xd_card->zone[i].set_index = 0;
+ xd_card->zone[i].unused_blk_cnt = 0;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static inline void free_zone(struct zone_entry *zone)
+{
+ RTSX_DEBUGP("free_zone\n");
+
+ if (!zone)
+ return;
+
+ zone->build_flag = 0;
+ zone->set_index = 0;
+ zone->get_index = 0;
+ zone->unused_blk_cnt = 0;
+ if (zone->l2p_table) {
+ vfree(zone->l2p_table);
+ zone->l2p_table = NULL;
+ }
+ if (zone->free_table) {
+ vfree(zone->free_table);
+ zone->free_table = NULL;
+ }
+}
+
+static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct zone_entry *zone;
+ int zone_no;
+
+ zone_no = (int)phy_blk >> 10;
+ if (zone_no >= xd_card->zone_cnt) {
+ RTSX_DEBUGP("Set unused block to invalid zone (zone_no = %d, zone_cnt = %d)\n",
+ zone_no, xd_card->zone_cnt);
+ return;
+ }
+ zone = &(xd_card->zone[zone_no]);
+
+ if (zone->free_table == NULL) {
+ if (xd_build_l2p_tbl(chip, zone_no) != STATUS_SUCCESS)
+ return;
+ }
+
+ if ((zone->set_index >= XD_FREE_TABLE_CNT)
+ || (zone->set_index < 0)) {
+ free_zone(zone);
+ RTSX_DEBUGP("Set unused block fail, invalid set_index\n");
+ return;
+ }
+
+ RTSX_DEBUGP("Set unused block to index %d\n", zone->set_index);
+
+ zone->free_table[zone->set_index++] = (u16) (phy_blk & 0x3ff);
+ if (zone->set_index >= XD_FREE_TABLE_CNT)
+ zone->set_index = 0;
+ zone->unused_blk_cnt++;
+}
+
+static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct zone_entry *zone;
+ u32 phy_blk;
+
+ if (zone_no >= xd_card->zone_cnt) {
+ RTSX_DEBUGP("Get unused block from invalid zone (zone_no = %d, zone_cnt = %d)\n",
+ zone_no, xd_card->zone_cnt);
+ return BLK_NOT_FOUND;
+ }
+ zone = &(xd_card->zone[zone_no]);
+
+ if ((zone->unused_blk_cnt == 0) ||
+ (zone->set_index == zone->get_index)) {
+ free_zone(zone);
+ RTSX_DEBUGP("Get unused block fail, no unused block available\n");
+ return BLK_NOT_FOUND;
+ }
+ if ((zone->get_index >= XD_FREE_TABLE_CNT) || (zone->get_index < 0)) {
+ free_zone(zone);
+ RTSX_DEBUGP("Get unused block fail, invalid get_index\n");
+ return BLK_NOT_FOUND;
+ }
+
+ RTSX_DEBUGP("Get unused block from index %d\n", zone->get_index);
+
+ phy_blk = zone->free_table[zone->get_index];
+ zone->free_table[zone->get_index++] = 0xFFFF;
+ if (zone->get_index >= XD_FREE_TABLE_CNT)
+ zone->get_index = 0;
+ zone->unused_blk_cnt--;
+
+ phy_blk += ((u32)(zone_no) << 10);
+ return phy_blk;
+}
+
+static void xd_set_l2p_tbl(struct rtsx_chip *chip,
+ int zone_no, u16 log_off, u16 phy_off)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct zone_entry *zone;
+
+ zone = &(xd_card->zone[zone_no]);
+ zone->l2p_table[log_off] = phy_off;
+}
+
+static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct zone_entry *zone;
+ int retval;
+
+ zone = &(xd_card->zone[zone_no]);
+ if (zone->l2p_table[log_off] == 0xFFFF) {
+ u32 phy_blk = 0;
+ int i;
+
+#ifdef XD_DELAY_WRITE
+ retval = xd_delay_write(chip);
+ if (retval != STATUS_SUCCESS) {
+ RTSX_DEBUGP("In xd_get_l2p_tbl, delay write fail!\n");
+ return BLK_NOT_FOUND;
+ }
+#endif
+
+ if (zone->unused_blk_cnt <= 0) {
+ RTSX_DEBUGP("No unused block!\n");
+ return BLK_NOT_FOUND;
+ }
+
+ for (i = 0; i < zone->unused_blk_cnt; i++) {
+ phy_blk = xd_get_unused_block(chip, zone_no);
+ if (phy_blk == BLK_NOT_FOUND) {
+ RTSX_DEBUGP("No unused block available!\n");
+ return BLK_NOT_FOUND;
+ }
+
+ retval = xd_init_page(chip, phy_blk, log_off,
+ 0, xd_card->page_off + 1);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+ if (i >= zone->unused_blk_cnt) {
+ RTSX_DEBUGP("No good unused block available!\n");
+ return BLK_NOT_FOUND;
+ }
+
+ xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(phy_blk & 0x3FF));
+ return phy_blk;
+ }
+
+ return (u32)zone->l2p_table[log_off] + ((u32)(zone_no) << 10);
+}
+
+int reset_xd_card(struct rtsx_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval;
+
+ memset(xd_card, 0, sizeof(struct xd_info));
+
+ xd_card->block_shift = 0;
+ xd_card->page_off = 0;
+ xd_card->addr_cycle = 0;
+ xd_card->capacity = 0;
+ xd_card->zone_cnt = 0;
+ xd_card->cis_block = 0xFFFF;
+ xd_card->delay_write.delay_write_flag = 0;
+
+ retval = enable_card_clock(chip, XD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = reset_xd(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ retval = xd_init_l2p_tbl(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval;
+ u32 page_addr;
+ u8 reg = 0;
+
+ RTSX_DEBUGP("mark block 0x%x as bad block\n", phy_blk);
+
+ if (phy_blk == BLK_NOT_FOUND)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, XD_GPG);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, XD_LATER_BBLK);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H, 0xFF, 0xFF);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, 0xFF);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR2_H, 0xFF, 0xFF);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR2_L, 0xFF, 0xFF);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED0, 0xFF, 0xFF);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED1, 0xFF, 0xFF);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED2, 0xFF, 0xFF);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED3, 0xFF, 0xFF);
+
+ page_addr = phy_blk << xd_card->block_shift;
+
+ xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF,
+ xd_card->page_off + 1);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_WRITE_REDUNDANT);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 500);
+ if (retval < 0) {
+ rtsx_clear_xd_error(chip);
+ rtsx_read_register(chip, XD_DAT, &reg);
+ if (reg & PROGRAM_ERROR)
+ xd_set_err_code(chip, XD_PRG_ERROR);
+ else
+ xd_set_err_code(chip, XD_TO_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
+ u16 logoff, u8 start_page, u8 end_page)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval;
+ u32 page_addr;
+ u8 reg = 0;
+
+ RTSX_DEBUGP("Init block 0x%x\n", phy_blk);
+
+ if (start_page > end_page)
+ TRACE_RET(chip, STATUS_FAIL);
+ if (phy_blk == BLK_NOT_FOUND)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, 0xFF);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, 0xFF);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H,
+ 0xFF, (u8)(logoff >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)logoff);
+
+ page_addr = (phy_blk << xd_card->block_shift) + start_page;
+
+ xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG,
+ XD_BA_TRANSFORM, XD_BA_TRANSFORM);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT,
+ 0xFF, (end_page - start_page));
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
+ 0xFF, XD_TRANSFER_START | XD_WRITE_REDUNDANT);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 500);
+ if (retval < 0) {
+ rtsx_clear_xd_error(chip);
+ rtsx_read_register(chip, XD_DAT, &reg);
+ if (reg & PROGRAM_ERROR) {
+ xd_mark_bad_block(chip, phy_blk);
+ xd_set_err_code(chip, XD_PRG_ERROR);
+ } else {
+ xd_set_err_code(chip, XD_TO_ERROR);
+ }
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
+ u8 start_page, u8 end_page)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ u32 old_page, new_page;
+ u8 i, reg = 0;
+ int retval;
+
+ RTSX_DEBUGP("Copy page from block 0x%x to block 0x%x\n",
+ old_blk, new_blk);
+
+ if (start_page > end_page)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ if ((old_blk == BLK_NOT_FOUND) || (new_blk == BLK_NOT_FOUND))
+ TRACE_RET(chip, STATUS_FAIL);
+
+ old_page = (old_blk << xd_card->block_shift) + start_page;
+ new_page = (new_blk << xd_card->block_shift) + start_page;
+
+ XD_CLR_BAD_NEWBLK(xd_card);
+
+ RTSX_WRITE_REG(chip, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
+
+ for (i = start_page; i < end_page; i++) {
+ if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
+ rtsx_clear_xd_error(chip);
+ xd_set_err_code(chip, XD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ rtsx_init_cmd(chip);
+
+ xd_assign_phy_addr(chip, old_page, XD_RW_ADDR);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
+ XD_AUTO_CHK_DATA_STATUS, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_READ_PAGES);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 500);
+ if (retval < 0) {
+ rtsx_clear_xd_error(chip);
+ reg = 0;
+ rtsx_read_register(chip, XD_CTL, &reg);
+ if (reg & (XD_ECC1_ERROR | XD_ECC2_ERROR)) {
+ wait_timeout(100);
+
+ if (detect_card_cd(chip,
+ XD_CARD) != STATUS_SUCCESS) {
+ xd_set_err_code(chip, XD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (((reg & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ==
+ (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
+ || ((reg & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) ==
+ (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
+ rtsx_write_register(chip,
+ XD_PAGE_STATUS, 0xFF,
+ XD_BPG);
+ rtsx_write_register(chip,
+ XD_BLOCK_STATUS, 0xFF,
+ XD_GBLK);
+ XD_SET_BAD_OLDBLK(xd_card);
+ RTSX_DEBUGP("old block 0x%x ecc error\n", old_blk);
+ }
+ } else {
+ xd_set_err_code(chip, XD_TO_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (XD_CHK_BAD_OLDBLK(xd_card))
+ rtsx_clear_xd_error(chip);
+
+ rtsx_init_cmd(chip);
+
+ xd_assign_phy_addr(chip, new_page, XD_RW_ADDR);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_WRITE_PAGES);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 300);
+ if (retval < 0) {
+ rtsx_clear_xd_error(chip);
+ reg = 0;
+ rtsx_read_register(chip, XD_DAT, &reg);
+ if (reg & PROGRAM_ERROR) {
+ xd_mark_bad_block(chip, new_blk);
+ xd_set_err_code(chip, XD_PRG_ERROR);
+ XD_SET_BAD_NEWBLK(xd_card);
+ } else {
+ xd_set_err_code(chip, XD_TO_ERROR);
+ }
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ old_page++;
+ new_page++;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_reset_cmd(struct rtsx_chip *chip)
+{
+ int retval;
+ u8 *ptr;
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
+ 0xFF, XD_TRANSFER_START | XD_RESET);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+ rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 100);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ptr = rtsx_get_cmd_data(chip) + 1;
+ if (((ptr[0] & READY_FLAG) == READY_STATE) && (ptr[1] & XD_RDY))
+ return STATUS_SUCCESS;
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ u32 page_addr;
+ u8 reg = 0, *ptr;
+ int i, retval;
+
+ if (phy_blk == BLK_NOT_FOUND)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ page_addr = phy_blk << xd_card->block_shift;
+
+ for (i = 0; i < 3; i++) {
+ rtsx_init_cmd(chip);
+
+ xd_assign_phy_addr(chip, page_addr, XD_ERASE_ADDR);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_ERASE);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+ rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 250);
+ if (retval < 0) {
+ rtsx_clear_xd_error(chip);
+ rtsx_read_register(chip, XD_DAT, &reg);
+ if (reg & PROGRAM_ERROR) {
+ xd_mark_bad_block(chip, phy_blk);
+ xd_set_err_code(chip, XD_PRG_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ xd_set_err_code(chip, XD_ERASE_FAIL);
+ }
+ retval = xd_reset_cmd(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ continue;
+ }
+
+ ptr = rtsx_get_cmd_data(chip) + 1;
+ if (*ptr & PROGRAM_ERROR) {
+ xd_mark_bad_block(chip, phy_blk);
+ xd_set_err_code(chip, XD_PRG_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+ }
+
+ xd_mark_bad_block(chip, phy_blk);
+ xd_set_err_code(chip, XD_ERASE_FAIL);
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+
+static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct zone_entry *zone;
+ int retval;
+ u32 start, end, i;
+ u16 max_logoff, cur_fst_page_logoff;
+ u16 cur_lst_page_logoff, ent_lst_page_logoff;
+ u8 redunt[11];
+
+ RTSX_DEBUGP("xd_build_l2p_tbl: %d\n", zone_no);
+
+ if (xd_card->zone == NULL) {
+ retval = xd_init_l2p_tbl(chip);
+ if (retval != STATUS_SUCCESS)
+ return retval;
+ }
+
+ if (xd_card->zone[zone_no].build_flag) {
+ RTSX_DEBUGP("l2p table of zone %d has been built\n", zone_no);
+ return STATUS_SUCCESS;
+ }
+
+ zone = &(xd_card->zone[zone_no]);
+
+ if (zone->l2p_table == NULL) {
+ zone->l2p_table = vmalloc(2000);
+ if (zone->l2p_table == NULL)
+ TRACE_GOTO(chip, Build_Fail);
+ }
+ memset((u8 *)(zone->l2p_table), 0xff, 2000);
+
+ if (zone->free_table == NULL) {
+ zone->free_table = vmalloc(XD_FREE_TABLE_CNT * 2);
+ if (zone->free_table == NULL)
+ TRACE_GOTO(chip, Build_Fail);
+ }
+ memset((u8 *)(zone->free_table), 0xff, XD_FREE_TABLE_CNT * 2);
+
+ if (zone_no == 0) {
+ if (xd_card->cis_block == 0xFFFF)
+ start = 0;
+ else
+ start = xd_card->cis_block + 1;
+ if (XD_CHK_4MB(xd_card)) {
+ end = 0x200;
+ max_logoff = 499;
+ } else {
+ end = 0x400;
+ max_logoff = 999;
+ }
+ } else {
+ start = (u32)(zone_no) << 10;
+ end = (u32)(zone_no + 1) << 10;
+ max_logoff = 999;
+ }
+
+ RTSX_DEBUGP("start block 0x%x, end block 0x%x\n", start, end);
+
+ zone->set_index = zone->get_index = 0;
+ zone->unused_blk_cnt = 0;
+
+ for (i = start; i < end; i++) {
+ u32 page_addr = i << xd_card->block_shift;
+ u32 phy_block;
+
+ retval = xd_read_redundant(chip, page_addr, redunt, 11);
+ if (retval != STATUS_SUCCESS)
+ continue;
+
+ if (redunt[BLOCK_STATUS] != 0xFF) {
+ RTSX_DEBUGP("bad block\n");
+ continue;
+ }
+
+ if (xd_check_data_blank(redunt)) {
+ RTSX_DEBUGP("blank block\n");
+ xd_set_unused_block(chip, i);
+ continue;
+ }
+
+ cur_fst_page_logoff = xd_load_log_block_addr(redunt);
+ if ((cur_fst_page_logoff == 0xFFFF) ||
+ (cur_fst_page_logoff > max_logoff)) {
+ retval = xd_erase_block(chip, i);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, i);
+ continue;
+ }
+
+ if ((zone_no == 0) && (cur_fst_page_logoff == 0) &&
+ (redunt[PAGE_STATUS] != XD_GPG))
+ XD_SET_MBR_FAIL(xd_card);
+
+ if (zone->l2p_table[cur_fst_page_logoff] == 0xFFFF) {
+ zone->l2p_table[cur_fst_page_logoff] = (u16)(i & 0x3FF);
+ continue;
+ }
+
+ phy_block = zone->l2p_table[cur_fst_page_logoff] +
+ ((u32)((zone_no) << 10));
+
+ page_addr = ((i + 1) << xd_card->block_shift) - 1;
+
+ retval = xd_read_redundant(chip, page_addr, redunt, 11);
+ if (retval != STATUS_SUCCESS)
+ continue;
+
+ cur_lst_page_logoff = xd_load_log_block_addr(redunt);
+ if (cur_lst_page_logoff == cur_fst_page_logoff) {
+ int m;
+
+ page_addr = ((phy_block + 1) <<
+ xd_card->block_shift) - 1;
+
+ for (m = 0; m < 3; m++) {
+ retval = xd_read_redundant(chip, page_addr,
+ redunt, 11);
+ if (retval == STATUS_SUCCESS)
+ break;
+ }
+
+ if (m == 3) {
+ zone->l2p_table[cur_fst_page_logoff] =
+ (u16)(i & 0x3FF);
+ retval = xd_erase_block(chip, phy_block);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, phy_block);
+ continue;
+ }
+
+ ent_lst_page_logoff = xd_load_log_block_addr(redunt);
+ if (ent_lst_page_logoff != cur_fst_page_logoff) {
+ zone->l2p_table[cur_fst_page_logoff] =
+ (u16)(i & 0x3FF);
+ retval = xd_erase_block(chip, phy_block);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, phy_block);
+ continue;
+ } else {
+ retval = xd_erase_block(chip, i);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, i);
+ }
+ } else {
+ retval = xd_erase_block(chip, i);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, i);
+ }
+ }
+
+ if (XD_CHK_4MB(xd_card))
+ end = 500;
+ else
+ end = 1000;
+
+ i = 0;
+ for (start = 0; start < end; start++) {
+ if (zone->l2p_table[start] == 0xFFFF)
+ i++;
+ }
+
+ RTSX_DEBUGP("Block count %d, invalid L2P entry %d\n", end, i);
+ RTSX_DEBUGP("Total unused block: %d\n", zone->unused_blk_cnt);
+
+ if ((zone->unused_blk_cnt - i) < 1)
+ chip->card_wp |= XD_CARD;
+
+ zone->build_flag = 1;
+
+ return STATUS_SUCCESS;
+
+Build_Fail:
+ if (zone->l2p_table) {
+ vfree(zone->l2p_table);
+ zone->l2p_table = NULL;
+ }
+ if (zone->free_table) {
+ vfree(zone->free_table);
+ zone->free_table = NULL;
+ }
+
+ return STATUS_FAIL;
+}
+
+static int xd_send_cmd(struct rtsx_chip *chip, u8 cmd)
+{
+ int retval;
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, cmd);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_SET_CMD);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ retval = rtsx_send_cmd(chip, XD_CARD, 200);
+ if (retval < 0)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
+ u32 log_blk, u8 start_page, u8 end_page,
+ u8 *buf, unsigned int *index,
+ unsigned int *offset)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ u32 page_addr, new_blk;
+ u16 log_off;
+ u8 reg_val, page_cnt;
+ int zone_no, retval, i;
+
+ if (start_page > end_page)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ page_cnt = end_page - start_page;
+ zone_no = (int)(log_blk / 1000);
+ log_off = (u16)(log_blk % 1000);
+
+ if ((phy_blk & 0x3FF) == 0x3FF) {
+ for (i = 0; i < 256; i++) {
+ page_addr = ((u32)i) << xd_card->block_shift;
+
+ retval = xd_read_redundant(chip, page_addr, NULL, 0);
+ if (retval == STATUS_SUCCESS)
+ break;
+
+ if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
+ xd_set_err_code(chip, XD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ }
+
+ page_addr = (phy_blk << xd_card->block_shift) + start_page;
+
+ rtsx_init_cmd(chip);
+
+ xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_PPB_TO_SIE, XD_PPB_TO_SIE);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
+ XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
+
+ trans_dma_enable(chip->srb->sc_data_direction, chip,
+ page_cnt * 512, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
+ XD_TRANSFER_START | XD_READ_PAGES);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END | XD_PPB_EMPTY, XD_TRANSFER_END | XD_PPB_EMPTY);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512,
+ scsi_sg_count(chip->srb),
+ index, offset, DMA_FROM_DEVICE,
+ chip->xd_timeout);
+ if (retval < 0) {
+ rtsx_clear_xd_error(chip);
+
+ if (retval == -ETIMEDOUT) {
+ xd_set_err_code(chip, XD_TO_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ TRACE_GOTO(chip, Fail);
+ }
+ }
+
+ return STATUS_SUCCESS;
+
+Fail:
+ RTSX_READ_REG(chip, XD_PAGE_STATUS, &reg_val);
+
+ if (reg_val != XD_GPG)
+ xd_set_err_code(chip, XD_PRG_ERROR);
+
+ RTSX_READ_REG(chip, XD_CTL, &reg_val);
+
+ if (((reg_val & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
+ == (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
+ || ((reg_val & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))
+ == (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
+ wait_timeout(100);
+
+ if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
+ xd_set_err_code(chip, XD_NO_CARD);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ xd_set_err_code(chip, XD_ECC_ERROR);
+
+ new_blk = xd_get_unused_block(chip, zone_no);
+ if (new_blk == NO_NEW_BLK) {
+ XD_CLR_BAD_OLDBLK(xd_card);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = xd_copy_page(chip, phy_blk, new_blk, 0,
+ xd_card->page_off + 1);
+ if (retval != STATUS_SUCCESS) {
+ if (!XD_CHK_BAD_NEWBLK(xd_card)) {
+ retval = xd_erase_block(chip, new_blk);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, new_blk);
+ } else {
+ XD_CLR_BAD_NEWBLK(xd_card);
+ }
+ XD_CLR_BAD_OLDBLK(xd_card);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF));
+ xd_erase_block(chip, phy_blk);
+ xd_mark_bad_block(chip, phy_blk);
+ XD_CLR_BAD_OLDBLK(xd_card);
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+static int xd_finish_write(struct rtsx_chip *chip,
+ u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval, zone_no;
+ u16 log_off;
+
+ RTSX_DEBUGP("xd_finish_write, old_blk = 0x%x, new_blk = 0x%x, log_blk = 0x%x\n",
+ old_blk, new_blk, log_blk);
+
+ if (page_off > xd_card->page_off)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ zone_no = (int)(log_blk / 1000);
+ log_off = (u16)(log_blk % 1000);
+
+ if (old_blk == BLK_NOT_FOUND) {
+ retval = xd_init_page(chip, new_blk, log_off,
+ page_off, xd_card->page_off + 1);
+ if (retval != STATUS_SUCCESS) {
+ retval = xd_erase_block(chip, new_blk);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, new_blk);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ retval = xd_copy_page(chip, old_blk, new_blk,
+ page_off, xd_card->page_off + 1);
+ if (retval != STATUS_SUCCESS) {
+ if (!XD_CHK_BAD_NEWBLK(xd_card)) {
+ retval = xd_erase_block(chip, new_blk);
+ if (retval == STATUS_SUCCESS)
+ xd_set_unused_block(chip, new_blk);
+ }
+ XD_CLR_BAD_NEWBLK(xd_card);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = xd_erase_block(chip, old_blk);
+ if (retval == STATUS_SUCCESS) {
+ if (XD_CHK_BAD_OLDBLK(xd_card)) {
+ xd_mark_bad_block(chip, old_blk);
+ XD_CLR_BAD_OLDBLK(xd_card);
+ } else {
+ xd_set_unused_block(chip, old_blk);
+ }
+ } else {
+ xd_set_err_code(chip, XD_NO_ERROR);
+ XD_CLR_BAD_OLDBLK(xd_card);
+ }
+ }
+
+ xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF));
+
+ return STATUS_SUCCESS;
+}
+
+static int xd_prepare_write(struct rtsx_chip *chip,
+ u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
+{
+ int retval;
+
+ RTSX_DEBUGP("%s, old_blk = 0x%x, new_blk = 0x%x, log_blk = 0x%x, page_off = %d\n",
+ __func__, old_blk, new_blk, log_blk, (int)page_off);
+
+ if (page_off) {
+ retval = xd_copy_page(chip, old_blk, new_blk, 0, page_off);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
+ u32 new_blk, u32 log_blk, u8 start_page,
+ u8 end_page, u8 *buf, unsigned int *index,
+ unsigned int *offset)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ u32 page_addr;
+ int zone_no, retval;
+ u16 log_off;
+ u8 page_cnt, reg_val;
+
+ RTSX_DEBUGP("%s, old_blk = 0x%x, new_blk = 0x%x, log_blk = 0x%x\n",
+ __func__, old_blk, new_blk, log_blk);
+
+ if (start_page > end_page)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ page_cnt = end_page - start_page;
+ zone_no = (int)(log_blk / 1000);
+ log_off = (u16)(log_blk % 1000);
+
+ page_addr = (new_blk << xd_card->block_shift) + start_page;
+
+ retval = xd_send_cmd(chip, READ1_1);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ rtsx_init_cmd(chip);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H,
+ 0xFF, (u8)(log_off >> 8));
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)log_off);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, XD_GBLK);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, XD_GPG);
+
+ xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_BA_TRANSFORM,
+ XD_BA_TRANSFORM);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
+
+ trans_dma_enable(chip->srb->sc_data_direction, chip,
+ page_cnt * 512, DMA_512);
+
+ rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
+ 0xFF, XD_TRANSFER_START | XD_WRITE_PAGES);
+ rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
+ XD_TRANSFER_END, XD_TRANSFER_END);
+
+ rtsx_send_cmd_no_wait(chip);
+
+ retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512,
+ scsi_sg_count(chip->srb),
+ index, offset, DMA_TO_DEVICE, chip->xd_timeout);
+ if (retval < 0) {
+ rtsx_clear_xd_error(chip);
+
+ if (retval == -ETIMEDOUT) {
+ xd_set_err_code(chip, XD_TO_ERROR);
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ TRACE_GOTO(chip, Fail);
+ }
+ }
+
+ if (end_page == (xd_card->page_off + 1)) {
+ xd_card->delay_write.delay_write_flag = 0;
+
+ if (old_blk != BLK_NOT_FOUND) {
+ retval = xd_erase_block(chip, old_blk);
+ if (retval == STATUS_SUCCESS) {
+ if (XD_CHK_BAD_OLDBLK(xd_card)) {
+ xd_mark_bad_block(chip, old_blk);
+ XD_CLR_BAD_OLDBLK(xd_card);
+ } else {
+ xd_set_unused_block(chip, old_blk);
+ }
+ } else {
+ xd_set_err_code(chip, XD_NO_ERROR);
+ XD_CLR_BAD_OLDBLK(xd_card);
+ }
+ }
+ xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF));
+ }
+
+ return STATUS_SUCCESS;
+
+Fail:
+ RTSX_READ_REG(chip, XD_DAT, &reg_val);
+ if (reg_val & PROGRAM_ERROR) {
+ xd_set_err_code(chip, XD_PRG_ERROR);
+ xd_mark_bad_block(chip, new_blk);
+ }
+
+ TRACE_RET(chip, STATUS_FAIL);
+}
+
+#ifdef XD_DELAY_WRITE
+int xd_delay_write(struct rtsx_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_delay_write_tag *delay_write = &(xd_card->delay_write);
+ int retval;
+
+ if (delay_write->delay_write_flag) {
+ RTSX_DEBUGP("xd_delay_write\n");
+ retval = xd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ delay_write->delay_write_flag = 0;
+ retval = xd_finish_write(chip,
+ delay_write->old_phyblock,
+ delay_write->new_phyblock,
+ delay_write->logblock, delay_write->pageoff);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ return STATUS_SUCCESS;
+}
+#endif
+
+int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ u32 start_sector, u16 sector_cnt)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ unsigned int lun = SCSI_LUN(srb);
+#ifdef XD_DELAY_WRITE
+ struct xd_delay_write_tag *delay_write = &(xd_card->delay_write);
+#endif
+ int retval, zone_no;
+ unsigned int index = 0, offset = 0;
+ u32 log_blk, old_blk = 0, new_blk = 0;
+ u16 log_off, total_sec_cnt = sector_cnt;
+ u8 start_page, end_page = 0, page_cnt;
+ u8 *ptr;
+
+ xd_set_err_code(chip, XD_NO_ERROR);
+
+ xd_card->cleanup_counter = 0;
+
+ RTSX_DEBUGP("xd_rw: scsi_sg_count = %d\n", scsi_sg_count(srb));
+
+ ptr = (u8 *)scsi_sglist(srb);
+
+ retval = xd_switch_clock(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+
+ if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
+ chip->card_fail |= XD_CARD;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ log_blk = start_sector >> xd_card->block_shift;
+ start_page = (u8)start_sector & xd_card->page_off;
+ zone_no = (int)(log_blk / 1000);
+ log_off = (u16)(log_blk % 1000);
+
+ if (xd_card->zone[zone_no].build_flag == 0) {
+ retval = xd_build_l2p_tbl(chip, zone_no);
+ if (retval != STATUS_SUCCESS) {
+ chip->card_fail |= XD_CARD;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+#ifdef XD_DELAY_WRITE
+ if (delay_write->delay_write_flag &&
+ (delay_write->logblock == log_blk) &&
+ (start_page > delay_write->pageoff)) {
+ delay_write->delay_write_flag = 0;
+ if (delay_write->old_phyblock != BLK_NOT_FOUND) {
+ retval = xd_copy_page(chip,
+ delay_write->old_phyblock,
+ delay_write->new_phyblock,
+ delay_write->pageoff, start_page);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+ old_blk = delay_write->old_phyblock;
+ new_blk = delay_write->new_phyblock;
+ } else if (delay_write->delay_write_flag &&
+ (delay_write->logblock == log_blk) &&
+ (start_page == delay_write->pageoff)) {
+ delay_write->delay_write_flag = 0;
+ old_blk = delay_write->old_phyblock;
+ new_blk = delay_write->new_phyblock;
+ } else {
+ retval = xd_delay_write(chip);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
+ new_blk = xd_get_unused_block(chip, zone_no);
+ if ((old_blk == BLK_NOT_FOUND) ||
+ (new_blk == BLK_NOT_FOUND)) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = xd_prepare_write(chip, old_blk, new_blk,
+ log_blk, start_page);
+ if (retval != STATUS_SUCCESS) {
+ if (detect_card_cd(chip, XD_CARD) !=
+ STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#ifdef XD_DELAY_WRITE
+ }
+#endif
+ } else {
+#ifdef XD_DELAY_WRITE
+ retval = xd_delay_write(chip);
+ if (retval != STATUS_SUCCESS) {
+ if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+
+ old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
+ if (old_blk == BLK_NOT_FOUND) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ RTSX_DEBUGP("old_blk = 0x%x\n", old_blk);
+
+ while (total_sec_cnt) {
+ if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
+ chip->card_fail |= XD_CARD;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if ((start_page + total_sec_cnt) > (xd_card->page_off + 1))
+ end_page = xd_card->page_off + 1;
+ else
+ end_page = start_page + (u8)total_sec_cnt;
+
+ page_cnt = end_page - start_page;
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ retval = xd_read_multiple_pages(chip, old_blk, log_blk,
+ start_page, end_page, ptr,
+ &index, &offset);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ } else {
+ retval = xd_write_multiple_pages(chip, old_blk,
+ new_blk, log_blk,
+ start_page, end_page, ptr,
+ &index, &offset);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ total_sec_cnt -= page_cnt;
+ if (scsi_sg_count(srb) == 0)
+ ptr += page_cnt * 512;
+
+ if (total_sec_cnt == 0)
+ break;
+
+ log_blk++;
+ zone_no = (int)(log_blk / 1000);
+ log_off = (u16)(log_blk % 1000);
+
+ if (xd_card->zone[zone_no].build_flag == 0) {
+ retval = xd_build_l2p_tbl(chip, zone_no);
+ if (retval != STATUS_SUCCESS) {
+ chip->card_fail |= XD_CARD;
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
+ if (old_blk == BLK_NOT_FOUND) {
+ if (srb->sc_data_direction == DMA_FROM_DEVICE)
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ else
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (srb->sc_data_direction == DMA_TO_DEVICE) {
+ new_blk = xd_get_unused_block(chip, zone_no);
+ if (new_blk == BLK_NOT_FOUND) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ }
+
+ start_page = 0;
+ }
+
+ if ((srb->sc_data_direction == DMA_TO_DEVICE) &&
+ (end_page != (xd_card->page_off + 1))) {
+#ifdef XD_DELAY_WRITE
+ delay_write->delay_write_flag = 1;
+ delay_write->old_phyblock = old_blk;
+ delay_write->new_phyblock = new_blk;
+ delay_write->logblock = log_blk;
+ delay_write->pageoff = end_page;
+#else
+ if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
+ chip->card_fail |= XD_CARD;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ retval = xd_finish_write(chip, old_blk, new_blk,
+ log_blk, end_page);
+ if (retval != STATUS_SUCCESS) {
+ if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+#endif
+ }
+
+ scsi_set_resid(srb, 0);
+
+ return STATUS_SUCCESS;
+}
+
+void xd_free_l2p_tbl(struct rtsx_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int i = 0;
+
+ if (xd_card->zone != NULL) {
+ for (i = 0; i < xd_card->zone_cnt; i++) {
+ if (xd_card->zone[i].l2p_table != NULL) {
+ vfree(xd_card->zone[i].l2p_table);
+ xd_card->zone[i].l2p_table = NULL;
+ }
+ if (xd_card->zone[i].free_table != NULL) {
+ vfree(xd_card->zone[i].free_table);
+ xd_card->zone[i].free_table = NULL;
+ }
+ }
+ vfree(xd_card->zone);
+ xd_card->zone = NULL;
+ }
+}
+
+void xd_cleanup_work(struct rtsx_chip *chip)
+{
+#ifdef XD_DELAY_WRITE
+ struct xd_info *xd_card = &(chip->xd_card);
+
+ if (xd_card->delay_write.delay_write_flag) {
+ RTSX_DEBUGP("xD: delay write\n");
+ xd_delay_write(chip);
+ xd_card->cleanup_counter = 0;
+ }
+#endif
+}
+
+int xd_power_off_card3v3(struct rtsx_chip *chip)
+{
+ int retval;
+
+ retval = disable_card_clock(chip, XD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ RTSX_WRITE_REG(chip, CARD_OE, XD_OUTPUT_EN, 0);
+
+ if (!chip->ft2_fast_mode) {
+ retval = card_power_off(chip, XD_CARD);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ wait_timeout(50);
+ }
+
+ if (chip->asic_code) {
+ retval = xd_pull_ctl_disable(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ } else {
+ RTSX_WRITE_REG(chip, FPGA_PULL_CTL, 0xFF, 0xDF);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+int release_xd_card(struct rtsx_chip *chip)
+{
+ struct xd_info *xd_card = &(chip->xd_card);
+ int retval;
+
+ RTSX_DEBUGP("release_xd_card\n");
+
+ chip->card_ready &= ~XD_CARD;
+ chip->card_fail &= ~XD_CARD;
+ chip->card_wp &= ~XD_CARD;
+
+ xd_card->delay_write.delay_write_flag = 0;
+
+ xd_free_l2p_tbl(chip);
+
+ retval = xd_power_off_card3v3(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+}
diff --git a/drivers/staging/rts5208/xd.h b/drivers/staging/rts5208/xd.h
new file mode 100644
index 000000000000..938138c50bb5
--- /dev/null
+++ b/drivers/staging/rts5208/xd.h
@@ -0,0 +1,188 @@
+/* Driver for Realtek PCI-Express card reader
+ * Header file
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG (wei_wang@realsil.com.cn)
+ * Micky Ching (micky_ching@realsil.com.cn)
+ */
+
+#ifndef __REALTEK_RTSX_XD_H
+#define __REALTEK_RTSX_XD_H
+
+#define XD_DELAY_WRITE
+
+/* Error Codes */
+#define XD_NO_ERROR 0x00
+#define XD_NO_MEMORY 0x80
+#define XD_PRG_ERROR 0x40
+#define XD_NO_CARD 0x20
+#define XD_READ_FAIL 0x10
+#define XD_ERASE_FAIL 0x08
+#define XD_WRITE_FAIL 0x04
+#define XD_ECC_ERROR 0x02
+#define XD_TO_ERROR 0x01
+
+/* XD Commands */
+#define READ1_1 0x00
+#define READ1_2 0x01
+#define READ2 0x50
+#define READ_ID 0x90
+#define RESET 0xff
+#define PAGE_PRG_1 0x80
+#define PAGE_PRG_2 0x10
+#define BLK_ERASE_1 0x60
+#define BLK_ERASE_2 0xD0
+#define READ_STS 0x70
+#define READ_xD_ID 0x9A
+#define COPY_BACK_512 0x8A
+#define COPY_BACK_2K 0x85
+#define READ1_1_2 0x30
+#define READ1_1_3 0x35
+#define CHG_DAT_OUT_1 0x05
+#define RDM_DAT_OUT_1 0x05
+#define CHG_DAT_OUT_2 0xE0
+#define RDM_DAT_OUT_2 0xE0
+#define CHG_DAT_OUT_2 0xE0
+#define CHG_DAT_IN_1 0x85
+#define CACHE_PRG 0x15
+
+/* Redundant Area Related */
+#define XD_EXTRA_SIZE 0x10
+#define XD_2K_EXTRA_SIZE 0x40
+
+#define NOT_WRITE_PROTECTED 0x80
+#define READY_STATE 0x40
+#define PROGRAM_ERROR 0x01
+#define PROGRAM_ERROR_N_1 0x02
+#define INTERNAL_READY 0x20
+#define READY_FLAG 0x5F
+
+#define XD_8M_X8_512 0xE6
+#define XD_16M_X8_512 0x73
+#define XD_32M_X8_512 0x75
+#define XD_64M_X8_512 0x76
+#define XD_128M_X8_512 0x79
+#define XD_256M_X8_512 0x71
+#define XD_128M_X8_2048 0xF1
+#define XD_256M_X8_2048 0xDA
+#define XD_512M_X8 0xDC
+#define XD_128M_X16_2048 0xC1
+#define XD_4M_X8_512_1 0xE3
+#define XD_4M_X8_512_2 0xE5
+#define xD_1G_X8_512 0xD3
+#define xD_2G_X8_512 0xD5
+
+#define XD_ID_CODE 0xB5
+
+#define VENDOR_BLOCK 0xEFFF
+#define CIS_BLOCK 0xDFFF
+
+#define BLK_NOT_FOUND 0xFFFFFFFF
+
+#define NO_NEW_BLK 0xFFFFFFFF
+
+#define PAGE_CORRECTABLE 0x0
+#define PAGE_NOTCORRECTABLE 0x1
+
+#define NO_OFFSET 0x0
+#define WITH_OFFSET 0x1
+
+#define Sect_Per_Page 4
+#define XD_ADDR_MODE_2C XD_ADDR_MODE_2A
+
+#define ZONE0_BAD_BLOCK 23
+#define NOT_ZONE0_BAD_BLOCK 24
+
+#define XD_RW_ADDR 0x01
+#define XD_ERASE_ADDR 0x02
+
+#define XD_PAGE_512(xd_card) \
+do { \
+ (xd_card)->block_shift = 5; \
+ (xd_card)->page_off = 0x1F; \
+} while (0)
+
+#define XD_SET_BAD_NEWBLK(xd_card) ((xd_card)->multi_flag |= 0x01)
+#define XD_CLR_BAD_NEWBLK(xd_card) ((xd_card)->multi_flag &= ~0x01)
+#define XD_CHK_BAD_NEWBLK(xd_card) ((xd_card)->multi_flag & 0x01)
+
+#define XD_SET_BAD_OLDBLK(xd_card) ((xd_card)->multi_flag |= 0x02)
+#define XD_CLR_BAD_OLDBLK(xd_card) ((xd_card)->multi_flag &= ~0x02)
+#define XD_CHK_BAD_OLDBLK(xd_card) ((xd_card)->multi_flag & 0x02)
+
+#define XD_SET_MBR_FAIL(xd_card) ((xd_card)->multi_flag |= 0x04)
+#define XD_CLR_MBR_FAIL(xd_card) ((xd_card)->multi_flag &= ~0x04)
+#define XD_CHK_MBR_FAIL(xd_card) ((xd_card)->multi_flag & 0x04)
+
+#define XD_SET_ECC_FLD_ERR(xd_card) ((xd_card)->multi_flag |= 0x08)
+#define XD_CLR_ECC_FLD_ERR(xd_card) ((xd_card)->multi_flag &= ~0x08)
+#define XD_CHK_ECC_FLD_ERR(xd_card) ((xd_card)->multi_flag & 0x08)
+
+#define XD_SET_4MB(xd_card) ((xd_card)->multi_flag |= 0x10)
+#define XD_CLR_4MB(xd_card) ((xd_card)->multi_flag &= ~0x10)
+#define XD_CHK_4MB(xd_card) ((xd_card)->multi_flag & 0x10)
+
+#define XD_SET_ECC_ERR(xd_card) ((xd_card)->multi_flag |= 0x40)
+#define XD_CLR_ECC_ERR(xd_card) ((xd_card)->multi_flag &= ~0x40)
+#define XD_CHK_ECC_ERR(xd_card) ((xd_card)->multi_flag & 0x40)
+
+#define PAGE_STATUS 0
+#define BLOCK_STATUS 1
+#define BLOCK_ADDR1_L 2
+#define BLOCK_ADDR1_H 3
+#define BLOCK_ADDR2_L 4
+#define BLOCK_ADDR2_H 5
+#define RESERVED0 6
+#define RESERVED1 7
+#define RESERVED2 8
+#define RESERVED3 9
+#define PARITY 10
+
+#define CIS0_0 0
+#define CIS0_1 1
+#define CIS0_2 2
+#define CIS0_3 3
+#define CIS0_4 4
+#define CIS0_5 5
+#define CIS0_6 6
+#define CIS0_7 7
+#define CIS0_8 8
+#define CIS0_9 9
+#define CIS1_0 256
+#define CIS1_1 (256 + 1)
+#define CIS1_2 (256 + 2)
+#define CIS1_3 (256 + 3)
+#define CIS1_4 (256 + 4)
+#define CIS1_5 (256 + 5)
+#define CIS1_6 (256 + 6)
+#define CIS1_7 (256 + 7)
+#define CIS1_8 (256 + 8)
+#define CIS1_9 (256 + 9)
+
+int reset_xd_card(struct rtsx_chip *chip);
+#ifdef XD_DELAY_WRITE
+int xd_delay_write(struct rtsx_chip *chip);
+#endif
+int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ u32 start_sector, u16 sector_cnt);
+void xd_free_l2p_tbl(struct rtsx_chip *chip);
+void xd_cleanup_work(struct rtsx_chip *chip);
+int xd_power_off_card3v3(struct rtsx_chip *chip);
+int release_xd_card(struct rtsx_chip *chip);
+
+#endif /* __REALTEK_RTSX_XD_H */
diff --git a/drivers/staging/sb105x/sb_mp_register.h b/drivers/staging/sb105x/sb_mp_register.h
index 16de497415ee..276c1bbcc18d 100644
--- a/drivers/staging/sb105x/sb_mp_register.h
+++ b/drivers/staging/sb105x/sb_mp_register.h
@@ -116,10 +116,10 @@
#define SB105X_FCR_TXFR 0x04 /* TX FIFO Reset */
#define SB105X_FCR_DMS 0x08 /* DMA Mode Select */
-#define SB105X_FCR_RTR08 0x00 /* Receice Trigger Level set at 8 */
-#define SB105X_FCR_RTR16 0x40 /* Receice Trigger Level set at 16 */
-#define SB105X_FCR_RTR56 0x80 /* Receice Trigger Level set at 56 */
-#define SB105X_FCR_RTR60 0xc0 /* Receice Trigger Level set at 60 */
+#define SB105X_FCR_RTR08 0x00 /* Receive Trigger Level set at 8 */
+#define SB105X_FCR_RTR16 0x40 /* Receive Trigger Level set at 16 */
+#define SB105X_FCR_RTR56 0x80 /* Receive Trigger Level set at 56 */
+#define SB105X_FCR_RTR60 0xc0 /* Receive Trigger Level set at 60 */
#define SB105X_FCR_TTR08 0x00 /* Transmit Trigger Level set at 8 */
#define SB105X_FCR_TTR16 0x10 /* Transmit Trigger Level set at 16 */
#define SB105X_FCR_TTR32 0x20 /* Transmit Trigger Level set at 32 */
diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c
index 5cd3efff97d3..c9d6ee3903ad 100644
--- a/drivers/staging/sb105x/sb_pci_mp.c
+++ b/drivers/staging/sb105x/sb_pci_mp.c
@@ -182,7 +182,7 @@ static int sb1054_get_register(struct sb_uart_port *port, int page, int reg)
if( page <= 0)
{
- printk(" page 0 can not use this fuction\n");
+ printk(" page 0 can not use this function\n");
return -1;
}
@@ -243,7 +243,7 @@ static int sb1054_set_register(struct sb_uart_port *port, int page, int reg, int
if( page <= 0)
{
- printk(" page 0 can not use this fuction\n");
+ printk(" page 0 can not use this function\n");
return -1;
}
switch(page)
diff --git a/drivers/staging/sb105x/sb_pci_mp.h b/drivers/staging/sb105x/sb_pci_mp.h
index 11d92992e925..80ae4ab04603 100644
--- a/drivers/staging/sb105x/sb_pci_mp.h
+++ b/drivers/staging/sb105x/sb_pci_mp.h
@@ -9,7 +9,6 @@
#include <linux/sched.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/tty_driver.h>
diff --git a/drivers/staging/sbe-2t3e3/ctrl.c b/drivers/staging/sbe-2t3e3/ctrl.c
index a5825d7f1bbf..d280bcfd660a 100644
--- a/drivers/staging/sbe-2t3e3/ctrl.c
+++ b/drivers/staging/sbe-2t3e3/ctrl.c
@@ -31,7 +31,7 @@ void t3e3_set_frame_type(struct channel *sc, u32 mode)
sc->p.frame_type = mode;
}
-void t3e3_set_loopback(struct channel *sc, u32 mode)
+static void t3e3_set_loopback(struct channel *sc, u32 mode)
{
u32 tx, rx;
@@ -95,7 +95,7 @@ void t3e3_set_loopback(struct channel *sc, u32 mode)
}
-void t3e3_reg_read(struct channel *sc, u32 *reg, u32 *val)
+static void t3e3_reg_read(struct channel *sc, u32 *reg, u32 *val)
{
u32 i;
@@ -132,7 +132,7 @@ void t3e3_reg_read(struct channel *sc, u32 *reg, u32 *val)
}
}
-void t3e3_reg_write(struct channel *sc, u32 *reg)
+static void t3e3_reg_write(struct channel *sc, u32 *reg)
{
u32 i;
@@ -164,12 +164,12 @@ void t3e3_reg_write(struct channel *sc, u32 *reg)
}
}
-void t3e3_port_get(struct channel *sc, t3e3_param_t *param)
+static void t3e3_port_get(struct channel *sc, t3e3_param_t *param)
{
memcpy(param, &(sc->p), sizeof(t3e3_param_t));
}
-void t3e3_port_set(struct channel *sc, t3e3_param_t *param)
+static void t3e3_port_set(struct channel *sc, t3e3_param_t *param)
{
if (param->frame_mode != 0xff)
cpld_set_frame_mode(sc, param->frame_mode);
@@ -216,7 +216,7 @@ void t3e3_port_set(struct channel *sc, t3e3_param_t *param)
cpld_set_scrambler(sc, param->scrambler);
}
-void t3e3_port_get_stats(struct channel *sc,
+static void t3e3_port_get_stats(struct channel *sc,
t3e3_stats_t *stats)
{
u32 result;
@@ -282,7 +282,7 @@ void t3e3_port_get_stats(struct channel *sc,
memcpy(stats, &(sc->s), sizeof(t3e3_stats_t));
}
-void t3e3_port_del_stats(struct channel *sc)
+static void t3e3_port_del_stats(struct channel *sc)
{
memset(&(sc->s), 0, sizeof(t3e3_stats_t));
}
diff --git a/drivers/staging/sep/sep_crypto.c b/drivers/staging/sep/sep_crypto.c
index b9262a78dd6e..7fc267550c65 100644
--- a/drivers/staging/sep/sep_crypto.c
+++ b/drivers/staging/sep/sep_crypto.c
@@ -32,7 +32,6 @@
*/
/* #define DEBUG */
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
@@ -3927,6 +3926,7 @@ int sep_crypto_setup(void)
err_algs:
for (k = 0; k < i; k++)
crypto_unregister_ahash(&hash_algs[k]);
+ destroy_workqueue(sep_dev->workqueue);
return err;
err_crypto_algs:
@@ -3945,6 +3945,7 @@ void sep_crypto_takedown(void)
for (i = 0; i < ARRAY_SIZE(crypto_algs); i++)
crypto_unregister_alg(&crypto_algs[i]);
+ destroy_workqueue(sep_dev->workqueue);
tasklet_kill(&sep_dev->finish_tasklet);
}
diff --git a/drivers/staging/sep/sep_main.c b/drivers/staging/sep/sep_main.c
index 1e80a4013b8c..122614c4092b 100644
--- a/drivers/staging/sep/sep_main.c
+++ b/drivers/staging/sep/sep_main.c
@@ -39,7 +39,6 @@
/* #define DEBUG */
/* #define SEP_PERF_DEBUG */
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
@@ -4292,7 +4291,7 @@ static void sep_remove(struct pci_dev *pdev)
}
/* Initialize struct pci_device_id for our driver */
-static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
+static const struct pci_device_id sep_pci_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
{0}
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index 73fc3cc19e33..f0fcbf7c7d7f 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -5,7 +5,6 @@
*/
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -970,17 +969,11 @@ static void qt_block_until_empty(struct tty_struct *tty,
{
int timeout = HZ / 10;
int wait = 30;
- int count;
-
- while (1) {
-
- count = qt_chars_in_buffer(tty);
-
- if (count <= 0)
- return;
-
- interruptible_sleep_on_timeout(&qt_port->wait, timeout);
+ /* returns if we get a signal, an error, or the buffer is empty */
+ while (wait_event_interruptible_timeout(qt_port->wait,
+ qt_chars_in_buffer(tty) <= 0,
+ timeout) == 0) {
wait--;
if (wait == 0) {
dev_dbg(&qt_port->port->dev, "%s - TIMEOUT", __func__);
@@ -1137,7 +1130,10 @@ static int qt_ioctl(struct tty_struct *tty,
if (cmd == TIOCMIWAIT) {
while (qt_port != NULL) {
+#if 0
+ /* this never wakes up */
interruptible_sleep_on(&qt_port->msr_wait);
+#endif
if (signal_pending(current))
return -ERESTARTSYS;
else {
diff --git a/drivers/staging/silicom/bpctl_mod.c b/drivers/staging/silicom/bpctl_mod.c
index 39dc92a271ab..20325f53328e 100644
--- a/drivers/staging/silicom/bpctl_mod.c
+++ b/drivers/staging/silicom/bpctl_mod.c
@@ -135,8 +135,6 @@ static int bp_get_dev_idx_bsf(struct net_device *dev, int *index)
else
return -EOPNOTSUPP;
- if (!drvinfo.bus_info)
- return -ENODATA;
if (!strcmp(drvinfo.bus_info, "N/A"))
return -ENODATA;
diff --git a/drivers/staging/silicom/bypasslib/bypass.c b/drivers/staging/silicom/bypasslib/bypass.c
index ba0d23a1cfbe..09e00dac04f3 100644
--- a/drivers/staging/silicom/bypasslib/bypass.c
+++ b/drivers/staging/silicom/bypasslib/bypass.c
@@ -7,11 +7,11 @@
/* the Free Software Foundation, located in the file LICENSE. */
/* */
/* */
-/* bypass.c */
+/* bypass.c */
/* */
/******************************************************************************/
-#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#if defined(CONFIG_SMP) && !defined(__SMP__)
#define __SMP__
#endif
@@ -22,7 +22,7 @@
#include <linux/sched.h>
#include <linux/wait.h>
-#include <linux/netdevice.h> // struct device, and other headers
+#include <linux/netdevice.h> /* struct device, and other headers */
#include <linux/kernel_stat.h>
#include <linux/pci.h>
#include <linux/rtnetlink.h>
@@ -40,20 +40,17 @@ MODULE_AUTHOR("www.silicom.co.il");
MODULE_LICENSE("GPL");
-int init_lib_module(void);
-void cleanup_lib_module(void);
-
static int do_cmd(struct net_device *dev, struct ifreq *ifr, int cmd, int *data)
{
int ret = -1;
struct if_bypass *bypass_cb;
- static int (*ioctl) (struct net_device *, struct ifreq *, int);
bypass_cb = (struct if_bypass *)ifr;
bypass_cb->cmd = cmd;
bypass_cb->data = *data;
- if ((dev->netdev_ops) && (ioctl = dev->netdev_ops->ndo_do_ioctl)) {
- ret = ioctl(dev, ifr, SIOCGIFBYPASS);
+
+ if (dev->netdev_ops && dev->netdev_ops->ndo_do_ioctl) {
+ ret = dev->netdev_ops->ndo_do_ioctl(dev, ifr, SIOCGIFBYPASS);
*data = bypass_cb->data;
}
@@ -66,13 +63,12 @@ static int doit(int cmd, int if_index, int *data)
int ret = -1;
struct net_device *dev;
struct net_device *n;
- for_each_netdev_safe(&init_net, dev, n) {
+ for_each_netdev_safe(&init_net, dev, n) {
if (dev->ifindex == if_index) {
ret = do_cmd(dev, &ifr, cmd, data);
if (ret < 0)
ret = -1;
-
}
}
@@ -82,56 +78,65 @@ static int doit(int cmd, int if_index, int *data)
#define bp_symbol_get(fn_name) symbol_get(fn_name)
#define bp_symbol_put(fn_name) symbol_put(fn_name)
-#define SET_BPLIB_INT_FN(fn_name, arg_type, arg, ret) \
- ({ int (* fn_ex)(arg_type)=NULL; \
- fn_ex=bp_symbol_get(fn_name##_sd); \
- if(fn_ex) { \
- ret= fn_ex(arg); \
- bp_symbol_put(fn_name##_sd); \
- } else ret=-1; \
- })
-
-#define SET_BPLIB_INT_FN2(fn_name, arg_type, arg, arg_type1, arg1, ret) \
- ({ int (* fn_ex)(arg_type,arg_type1)=NULL; \
- fn_ex=bp_symbol_get(fn_name##_sd); \
- if(fn_ex) { \
- ret= fn_ex(arg,arg1); \
- bp_symbol_put(fn_name##_sd); \
- } else ret=-1; \
- })
-#define SET_BPLIB_INT_FN3(fn_name, arg_type, arg, arg_type1, arg1,arg_type2, arg2, ret) \
- ({ int (* fn_ex)(arg_type,arg_type1, arg_type2)=NULL; \
- fn_ex=bp_symbol_get(fn_name##_sd); \
- if(fn_ex) { \
- ret= fn_ex(arg,arg1,arg2); \
- bp_symbol_put(fn_name##_sd); \
- } else ret=-1; \
- })
-
-#define DO_BPLIB_GET_ARG_FN(fn_name,ioctl_val, if_index) \
- ({ int data, ret=0; \
- if(is_dev_sd(if_index)){ \
- SET_BPLIB_INT_FN(fn_name, int, if_index, ret); \
- return ret; \
- } \
- return doit(ioctl_val,if_index, &data); \
- })
-
-#define DO_BPLIB_SET_ARG_FN(fn_name,ioctl_val,if_index,arg) \
- ({ int data, ret=0; \
- if(is_dev_sd(if_index)){ \
- SET_BPLIB_INT_FN2(fn_name, int, if_index, int, arg, ret); \
- return ret; \
- } \
- data=arg; \
- return doit(ioctl_val,if_index, &data); \
- })
+#define SET_BPLIB_INT_FN(fn_name, arg_type, arg, ret) \
+({ int (*fn_ex)(arg_type) = NULL; \
+ fn_ex = bp_symbol_get(fn_name##_sd); \
+ if (fn_ex) { \
+ ret = fn_ex(arg); \
+ bp_symbol_put(fn_name##_sd); \
+ } else { \
+ ret = -1; \
+ } \
+})
+
+#define SET_BPLIB_INT_FN2(fn_name, arg_type, arg, arg_type1, arg1, ret)\
+({ int (*fn_ex)(arg_type, arg_type1) = NULL; \
+ fn_ex = bp_symbol_get(fn_name##_sd); \
+ if (fn_ex) { \
+ ret = fn_ex(arg, arg1); \
+ bp_symbol_put(fn_name##_sd); \
+ } else { \
+ ret = -1; \
+ } \
+})
+
+#define SET_BPLIB_INT_FN3(fn_name, arg_type, arg, arg_type1, arg1, \
+ arg_type2, arg2, ret) \
+({ int (*fn_ex)(arg_type, arg_type1, arg_type2) = NULL; \
+ fn_ex = bp_symbol_get(fn_name##_sd); \
+ if (fn_ex) { \
+ ret = fn_ex(arg, arg1, arg2); \
+ bp_symbol_put(fn_name##_sd); \
+ } else { \
+ ret = -1; \
+ } \
+})
+
+#define DO_BPLIB_GET_ARG_FN(fn_name, ioctl_val, if_index) \
+({ int data, ret = 0; \
+ if (is_dev_sd(if_index)) { \
+ SET_BPLIB_INT_FN(fn_name, int, if_index, ret); \
+ return ret; \
+ } \
+ return doit(ioctl_val, if_index, &data); \
+})
+
+#define DO_BPLIB_SET_ARG_FN(fn_name, ioctl_val, if_index, arg) \
+({ int data, ret = 0; \
+ if (is_dev_sd(if_index)) { \
+ SET_BPLIB_INT_FN2(fn_name, int, if_index, int, \
+ arg, ret); \
+ return ret; \
+ } \
+ data = arg; \
+ return doit(ioctl_val, if_index, &data); \
+})
static int is_dev_sd(int if_index)
{
int ret = 0;
SET_BPLIB_INT_FN(is_bypass, int, if_index, ret);
- return (ret >= 0 ? 1 : 0);
+ return ret >= 0 ? 1 : 0;
}
static int is_bypass_dev(int if_index)
@@ -139,16 +144,19 @@ static int is_bypass_dev(int if_index)
struct pci_dev *pdev = NULL;
struct net_device *dev = NULL;
struct ifreq ifr;
- int ret = 0, data = 0;
+ int ret = 0;
+ int data = 0;
while ((pdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, pdev))) {
- if ((dev = pci_get_drvdata(pdev)) != NULL)
- if (((dev = pci_get_drvdata(pdev)) != NULL) &&
- (dev->ifindex == if_index)) {
+ dev = pci_get_drvdata(pdev);
+ if (dev != NULL) {
+ dev = pci_get_drvdata(pdev);
+ if ((dev != NULL) && (dev->ifindex == if_index)) {
if ((pdev->vendor == SILICOM_VID) &&
(pdev->device >= SILICOM_BP_PID_MIN) &&
- (pdev->device <= SILICOM_BP_PID_MAX))
+ (pdev->device <= SILICOM_BP_PID_MAX)) {
goto send_cmd;
+ }
#if defined(BP_VENDOR_SUPPORT) && defined(ETHTOOL_GDRVINFO)
else {
struct ethtool_drvinfo info;
@@ -173,10 +181,11 @@ static int is_bypass_dev(int if_index)
#endif
return -1;
}
+ }
}
send_cmd:
ret = do_cmd(dev, &ifr, IS_BYPASS, &data);
- return (ret < 0 ? -1 : ret);
+ return ret < 0 ? -1 : ret;
}
static int is_bypass(int if_index)
@@ -267,11 +276,13 @@ EXPORT_SYMBOL(get_bypass_pwup);
static int set_bypass_wd(int if_index, int ms_timeout, int *ms_timeout_set)
{
- int data = ms_timeout, ret = 0;
- if (is_dev_sd(if_index))
+ int data = ms_timeout;
+ int ret = 0;
+
+ if (is_dev_sd(if_index)) {
SET_BPLIB_INT_FN3(set_bypass_wd, int, if_index, int, ms_timeout,
int *, ms_timeout_set, ret);
- else {
+ } else {
ret = doit(SET_BYPASS_WD, if_index, &data);
if (ret > 0) {
*ms_timeout_set = ret;
@@ -284,7 +295,9 @@ EXPORT_SYMBOL(set_bypass_wd);
static int get_bypass_wd(int if_index, int *ms_timeout_set)
{
- int *data = ms_timeout_set, ret = 0;
+ int *data = ms_timeout_set;
+ int ret = 0;
+
if (is_dev_sd(if_index))
SET_BPLIB_INT_FN2(get_bypass_wd, int, if_index, int *,
ms_timeout_set, ret);
@@ -297,10 +310,11 @@ EXPORT_SYMBOL(get_bypass_wd);
static int get_wd_expire_time(int if_index, int *ms_time_left)
{
int *data = ms_time_left, ret = 0;
- if (is_dev_sd(if_index))
+
+ if (is_dev_sd(if_index)) {
SET_BPLIB_INT_FN2(get_wd_expire_time, int, if_index, int *,
ms_time_left, ret);
- else {
+ } else {
ret = doit(GET_WD_EXPIRE_TIME, if_index, data);
if ((ret == 0) && (*data != 0))
ret = 1;
@@ -476,14 +490,14 @@ EXPORT_SYMBOL(get_bp_hw_reset);
static int get_bypass_info(int if_index, struct bp_info *bp_info)
{
int ret = 0;
+
if (is_dev_sd(if_index)) {
SET_BPLIB_INT_FN2(get_bypass_info, int, if_index,
struct bp_info *, bp_info, ret);
} else {
- static int (*ioctl) (struct net_device *, struct ifreq *, int);
struct net_device *dev;
-
struct net_device *n;
+
for_each_netdev_safe(&init_net, dev, n) {
if (dev->ifindex == if_index) {
struct if_bypass_info *bypass_cb;
@@ -493,17 +507,16 @@ static int get_bypass_info(int if_index, struct bp_info *bp_info)
bypass_cb = (struct if_bypass_info *)&ifr;
bypass_cb->cmd = GET_BYPASS_INFO;
- if ((dev->netdev_ops) &&
- (ioctl = dev->netdev_ops->ndo_do_ioctl)) {
- ret = ioctl(dev, &ifr, SIOCGIFBYPASS);
- }
-
+ if (dev->netdev_ops &&
+ dev->netdev_ops->ndo_do_ioctl)
+ ret = dev->netdev_ops->ndo_do_ioctl(dev,
+ &ifr, SIOCGIFBYPASS);
else
ret = -1;
if (ret == 0)
memcpy(bp_info, &bypass_cb->bp_info,
sizeof(struct bp_info));
- ret = (ret < 0 ? -1 : 0);
+ ret = ret < 0 ? -1 : 0;
break;
}
}
@@ -512,14 +525,13 @@ static int get_bypass_info(int if_index, struct bp_info *bp_info)
}
EXPORT_SYMBOL(get_bypass_info);
-int init_lib_module(void)
+static int __init init_lib_module(void)
{
-
printk(VERSION);
return 0;
}
-void cleanup_lib_module(void)
+static void __exit cleanup_lib_module(void)
{
}
diff --git a/drivers/staging/slicoss/README b/drivers/staging/slicoss/README
index cb04a87b2017..53052c4e78ae 100644
--- a/drivers/staging/slicoss/README
+++ b/drivers/staging/slicoss/README
@@ -14,7 +14,6 @@ TODO:
- use net_device_ops
- use dev->stats rather than adapter->stats
- don't cast netdev_priv it is already void
- - use compare_ether_addr
- GET RID OF MACROS
- work on all architectures
- without CONFIG_X86_64 confusion
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 652272b96a56..1426ca49bfe8 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -136,7 +136,7 @@ MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
module_param(intagg_delay, int, 0);
MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
-static DEFINE_PCI_DEVICE_TABLE(slic_pci_tbl) = {
+static const struct pci_device_id slic_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_1GB_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_2GB_DEVICE_ID) },
{ 0 }
@@ -595,15 +595,12 @@ static void slic_adapter_set_hwaddr(struct adapter *adapter)
memcpy(adapter->macaddr,
card->config.MacInfo[adapter->functionnumber].macaddrA,
sizeof(struct slic_config_mac));
- if (!(adapter->currmacaddr[0] || adapter->currmacaddr[1] ||
- adapter->currmacaddr[2] || adapter->currmacaddr[3] ||
- adapter->currmacaddr[4] || adapter->currmacaddr[5])) {
- memcpy(adapter->currmacaddr, adapter->macaddr, 6);
- }
- if (adapter->netdev) {
+ if (is_zero_ether_addr(adapter->currmacaddr))
+ memcpy(adapter->currmacaddr, adapter->macaddr,
+ ETH_ALEN);
+ if (adapter->netdev)
memcpy(adapter->netdev->dev_addr, adapter->currmacaddr,
- 6);
- }
+ ETH_ALEN);
}
}
@@ -767,13 +764,11 @@ static bool slic_mac_filter(struct adapter *adapter,
{
struct net_device *netdev = adapter->netdev;
u32 opts = adapter->macopts;
- u32 *dhost4 = (u32 *)&ether_frame->ether_dhost[0];
- u16 *dhost2 = (u16 *)&ether_frame->ether_dhost[4];
if (opts & MAC_PROMISC)
return true;
- if ((*dhost4 == 0xFFFFFFFF) && (*dhost2 == 0xFFFF)) {
+ if (is_broadcast_ether_addr(ether_frame->ether_dhost)) {
if (opts & MAC_BCAST) {
adapter->rcv_broadcasts++;
return true;
@@ -782,7 +777,7 @@ static bool slic_mac_filter(struct adapter *adapter,
}
}
- if (ether_frame->ether_dhost[0] & 0x01) {
+ if (is_multicast_ether_addr(ether_frame->ether_dhost)) {
if (opts & MAC_ALLMCAST) {
adapter->rcv_multicasts++;
netdev->stats.multicast++;
@@ -2335,7 +2330,7 @@ static int slic_mcast_add_list(struct adapter *adapter, char *address)
if (mcaddr == NULL)
return 1;
- memcpy(mcaddr->address, address, 6);
+ memcpy(mcaddr->address, address, ETH_ALEN);
mcaddr->next = adapter->mcastaddrs;
adapter->mcastaddrs = mcaddr;
diff --git a/drivers/staging/sm7xxfb/sm7xxfb.c b/drivers/staging/sm7xxfb/sm7xxfb.c
index ba199ffff178..6176d98744cc 100644
--- a/drivers/staging/sm7xxfb/sm7xxfb.c
+++ b/drivers/staging/sm7xxfb/sm7xxfb.c
@@ -585,7 +585,7 @@ static void smtc_set_timing(struct smtcfb_info *sfb)
}
}
-void smtcfb_setmode(struct smtcfb_info *sfb)
+static void smtcfb_setmode(struct smtcfb_info *sfb)
{
switch (sfb->fb.var.bits_per_pixel) {
case 32:
@@ -920,7 +920,7 @@ failed_free:
* 0x712 (LynxEM+)
* 0x720 (Lynx3DM, Lynx3DM+)
*/
-static DEFINE_PCI_DEVICE_TABLE(smtcfb_pci_table) = {
+static const struct pci_device_id smtcfb_pci_table[] = {
{ PCI_DEVICE(0x126f, 0x710), },
{ PCI_DEVICE(0x126f, 0x712), },
{ PCI_DEVICE(0x126f, 0x720), },
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 47502fa5f3f6..ef5933b93590 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -37,8 +37,6 @@
#include <linux/input.h>
#include <linux/kmod.h>
-#include <linux/bootmem.h> /* for alloc_bootmem */
-
/* speakup_*_selection */
#include <linux/module.h>
#include <linux/sched.h>
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index 135428856d47..4e18fb405344 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -6,6 +6,10 @@
#include "spk_priv.h"
#include "serialio.h"
+#ifndef SERIAL_PORT_DFNS
+#define SERIAL_PORT_DFNS
+#endif
+
static void start_serial_interrupt(int irq);
static const struct old_serial_port rs_table[] = {
diff --git a/drivers/staging/speakup/serialio.h b/drivers/staging/speakup/serialio.h
index 55d68b5ad165..0a937732a190 100644
--- a/drivers/staging/speakup/serialio.h
+++ b/drivers/staging/speakup/serialio.h
@@ -36,30 +36,4 @@ struct old_serial_port {
#define spk_serial_tx_busy() ((inb(speakup_info.port_tts + UART_LSR) & BOTH_EMPTY) != BOTH_EMPTY)
-/* 2.6.22 doesn't have them any more, hardcode it for now (these values should
- * be fine for 99% cases) */
-#ifndef BASE_BAUD
-#define BASE_BAUD (1843200 / 16)
-#endif
-#ifndef STD_COM_FLAGS
-#ifdef CONFIG_SERIAL_DETECT_IRQ
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
-#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
-#else
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
-#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
-#endif
-#endif
-#ifndef SERIAL_PORT_DFNS
-#define SERIAL_PORT_DFNS \
- /* UART CLK PORT IRQ FLAGS */ \
- { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
- { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
- { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
- { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
-#endif
-#ifndef IRQF_SHARED
-#define IRQF_SHARED SA_SHIRQ
-#endif
-
#endif
diff --git a/drivers/staging/tidspbridge/Makefile b/drivers/staging/tidspbridge/Makefile
index 8c8c92a9083f..adb21c53f747 100644
--- a/drivers/staging/tidspbridge/Makefile
+++ b/drivers/staging/tidspbridge/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_TIDSPBRIDGE) += tidspbridge.o
-libgen = gen/gh.o gen/uuidutil.o
+libgen = gen/gh.o
libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \
core/tiomap3430_pwr.o core/tiomap_io.o \
core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o
diff --git a/drivers/staging/tidspbridge/gen/gh.c b/drivers/staging/tidspbridge/gen/gh.c
index 25eaef782aaa..936470cb608e 100644
--- a/drivers/staging/tidspbridge/gen/gh.c
+++ b/drivers/staging/tidspbridge/gen/gh.c
@@ -14,56 +14,45 @@
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
-#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/slab.h>
-#include <dspbridge/host_os.h>
-#include <dspbridge/gh.h>
-
-struct element {
- struct element *next;
- u8 data[1];
+struct gh_node {
+ struct hlist_node hl;
+ u8 data[0];
};
+#define GH_HASH_ORDER 8
+
struct gh_t_hash_tab {
- u16 max_bucket;
- u16 val_size;
- struct element **buckets;
- u16(*hash) (void *, u16);
- bool(*match) (void *, void *);
- void (*delete) (void *);
+ u32 val_size;
+ DECLARE_HASHTABLE(hash_table, GH_HASH_ORDER);
+ u32 (*hash)(const void *key);
+ bool (*match)(const void *key, const void *value);
+ void (*delete)(void *key);
};
-static void noop(void *p);
-
/*
* ======== gh_create ========
*/
-struct gh_t_hash_tab *gh_create(u16 max_bucket, u16 val_size,
- u16(*hash) (void *, u16), bool(*match) (void *,
- void *),
- void (*delete) (void *))
+struct gh_t_hash_tab *gh_create(u32 val_size, u32 (*hash)(const void *),
+ bool (*match)(const void *, const void *),
+ void (*delete)(void *))
{
struct gh_t_hash_tab *hash_tab;
- u16 i;
+
hash_tab = kzalloc(sizeof(struct gh_t_hash_tab), GFP_KERNEL);
- if (hash_tab == NULL)
- return NULL;
- hash_tab->max_bucket = max_bucket;
+ if (!hash_tab)
+ return ERR_PTR(-ENOMEM);
+
+ hash_init(hash_tab->hash_table);
+
hash_tab->val_size = val_size;
hash_tab->hash = hash;
hash_tab->match = match;
- hash_tab->delete = delete == NULL ? noop : delete;
-
- hash_tab->buckets =
- kzalloc(sizeof(struct element *) * max_bucket, GFP_KERNEL);
- if (hash_tab->buckets == NULL) {
- gh_delete(hash_tab);
- return NULL;
- }
-
- for (i = 0; i < max_bucket; i++)
- hash_tab->buckets[i] = NULL;
+ hash_tab->delete = delete;
return hash_tab;
}
@@ -73,21 +62,16 @@ struct gh_t_hash_tab *gh_create(u16 max_bucket, u16 val_size,
*/
void gh_delete(struct gh_t_hash_tab *hash_tab)
{
- struct element *elem, *next;
- u16 i;
-
- if (hash_tab != NULL) {
- if (hash_tab->buckets != NULL) {
- for (i = 0; i < hash_tab->max_bucket; i++) {
- for (elem = hash_tab->buckets[i]; elem != NULL;
- elem = next) {
- next = elem->next;
- (*hash_tab->delete) (elem->data);
- kfree(elem);
- }
- }
-
- kfree(hash_tab->buckets);
+ struct gh_node *n;
+ struct hlist_node *tmp;
+ u32 i;
+
+ if (hash_tab) {
+ hash_for_each_safe(hash_tab->hash_table, i, tmp, n, hl) {
+ hash_del(&n->hl);
+ if (hash_tab->delete)
+ hash_tab->delete(n->data);
+ kfree(n);
}
kfree(hash_tab);
@@ -98,56 +82,39 @@ void gh_delete(struct gh_t_hash_tab *hash_tab)
* ======== gh_find ========
*/
-void *gh_find(struct gh_t_hash_tab *hash_tab, void *key)
+void *gh_find(struct gh_t_hash_tab *hash_tab, const void *key)
{
- struct element *elem;
+ struct gh_node *n;
+ u32 key_hash = hash_tab->hash(key);
- elem = hash_tab->buckets[(*hash_tab->hash) (key, hash_tab->max_bucket)];
-
- for (; elem; elem = elem->next) {
- if ((*hash_tab->match) (key, elem->data))
- return elem->data;
+ hash_for_each_possible(hash_tab->hash_table, n, hl, key_hash) {
+ if (hash_tab->match(key, n->data))
+ return n->data;
}
- return NULL;
+ return ERR_PTR(-ENODATA);
}
/*
* ======== gh_insert ========
*/
-void *gh_insert(struct gh_t_hash_tab *hash_tab, void *key, void *value)
+void *gh_insert(struct gh_t_hash_tab *hash_tab, const void *key,
+ const void *value)
{
- struct element *elem;
- u16 i;
- char *src, *dst;
+ struct gh_node *n;
- elem = kzalloc(sizeof(struct element) - 1 + hash_tab->val_size,
+ n = kmalloc(sizeof(struct gh_node) + hash_tab->val_size,
GFP_KERNEL);
- if (elem != NULL) {
-
- dst = (char *)elem->data;
- src = (char *)value;
- for (i = 0; i < hash_tab->val_size; i++)
- *dst++ = *src++;
- i = (*hash_tab->hash) (key, hash_tab->max_bucket);
- elem->next = hash_tab->buckets[i];
- hash_tab->buckets[i] = elem;
+ if (!n)
+ return ERR_PTR(-ENOMEM);
- return elem->data;
- }
-
- return NULL;
-}
+ INIT_HLIST_NODE(&n->hl);
+ hash_add(hash_tab->hash_table, &n->hl, hash_tab->hash(key));
+ memcpy(n->data, value, hash_tab->val_size);
-/*
- * ======== noop ========
- */
-/* ARGSUSED */
-static void noop(void *p)
-{
- p = p; /* stifle compiler warning */
+ return n->data;
}
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
@@ -162,16 +129,13 @@ static void noop(void *p)
void gh_iterate(struct gh_t_hash_tab *hash_tab,
void (*callback)(void *, void *), void *user_data)
{
- struct element *elem;
+ struct gh_node *n;
u32 i;
- if (hash_tab && hash_tab->buckets)
- for (i = 0; i < hash_tab->max_bucket; i++) {
- elem = hash_tab->buckets[i];
- while (elem) {
- callback(&elem->data, user_data);
- elem = elem->next;
- }
- }
+ if (!hash_tab)
+ return;
+
+ hash_for_each(hash_tab->hash_table, i, n, hl)
+ callback(&n->data, user_data);
}
#endif
diff --git a/drivers/staging/tidspbridge/gen/uuidutil.c b/drivers/staging/tidspbridge/gen/uuidutil.c
deleted file mode 100644
index b7d8313d1acb..000000000000
--- a/drivers/staging/tidspbridge/gen/uuidutil.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * uuidutil.c
- *
- * DSP-BIOS Bridge driver support functions for TI OMAP processors.
- *
- * This file contains the implementation of UUID helper functions.
- *
- * Copyright (C) 2005-2006 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-#include <linux/types.h>
-
-/* ----------------------------------- Host OS */
-#include <dspbridge/host_os.h>
-
-/* ----------------------------------- DSP/BIOS Bridge */
-#include <dspbridge/dbdefs.h>
-
-/* ----------------------------------- This */
-#include <dspbridge/uuidutil.h>
-
-static s32 uuid_hex_to_bin(char *buf, s32 len)
-{
- s32 i;
- s32 result = 0;
- int value;
-
- for (i = 0; i < len; i++) {
- value = hex_to_bin(*buf++);
- result *= 16;
- if (value > 0)
- result += value;
- }
-
- return result;
-}
-
-/*
- * ======== uuid_uuid_from_string ========
- * Purpose:
- * Converts a string to a struct dsp_uuid.
- */
-void uuid_uuid_from_string(char *sz_uuid, struct dsp_uuid *uuid_obj)
-{
- s32 j;
-
- uuid_obj->data1 = uuid_hex_to_bin(sz_uuid, 8);
- sz_uuid += 8;
-
- /* Step over underscore */
- sz_uuid++;
-
- uuid_obj->data2 = (u16) uuid_hex_to_bin(sz_uuid, 4);
- sz_uuid += 4;
-
- /* Step over underscore */
- sz_uuid++;
-
- uuid_obj->data3 = (u16) uuid_hex_to_bin(sz_uuid, 4);
- sz_uuid += 4;
-
- /* Step over underscore */
- sz_uuid++;
-
- uuid_obj->data4 = (u8) uuid_hex_to_bin(sz_uuid, 2);
- sz_uuid += 2;
-
- uuid_obj->data5 = (u8) uuid_hex_to_bin(sz_uuid, 2);
- sz_uuid += 2;
-
- /* Step over underscore */
- sz_uuid++;
-
- for (j = 0; j < 6; j++) {
- uuid_obj->data6[j] = (u8) uuid_hex_to_bin(sz_uuid, 2);
- sz_uuid += 2;
- }
-}
diff --git a/drivers/staging/tidspbridge/include/dspbridge/gh.h b/drivers/staging/tidspbridge/include/dspbridge/gh.h
index da85079dbfb6..e4303b4bf5fd 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/gh.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/gh.h
@@ -18,13 +18,13 @@
#define GH_
#include <dspbridge/host_os.h>
-extern struct gh_t_hash_tab *gh_create(u16 max_bucket, u16 val_size,
- u16(*hash) (void *, u16),
- bool(*match) (void *, void *),
- void (*delete) (void *));
+extern struct gh_t_hash_tab *gh_create(u32 val_size,
+ u32 (*hash)(const void *), bool (*match)(const void *,
+ const void *), void (*delete) (void *));
extern void gh_delete(struct gh_t_hash_tab *hash_tab);
-extern void *gh_find(struct gh_t_hash_tab *hash_tab, void *key);
-extern void *gh_insert(struct gh_t_hash_tab *hash_tab, void *key, void *value);
+extern void *gh_find(struct gh_t_hash_tab *hash_tab, const void *key);
+extern void *gh_insert(struct gh_t_hash_tab *hash_tab, const void *key,
+ const void *value);
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
void gh_iterate(struct gh_t_hash_tab *hash_tab,
void (*callback)(void *, void *), void *user_data);
diff --git a/drivers/staging/tidspbridge/include/dspbridge/uuidutil.h b/drivers/staging/tidspbridge/include/dspbridge/uuidutil.h
index 414bf71d652d..b4951a1381e7 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/uuidutil.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/uuidutil.h
@@ -21,22 +21,4 @@
#define MAXUUIDLEN 37
-/*
- * ======== uuid_uuid_from_string ========
- * Purpose:
- * Converts an ANSI string to a dsp_uuid.
- * Parameters:
- * sz_uuid: Pointer to a string that represents a dsp_uuid object.
- * uuid_obj: Pointer to a dsp_uuid object.
- * Returns:
- * Requires:
- * uuid_obj & sz_uuid are non-NULL values.
- * Ensures:
- * Details:
- * We assume the string representation of a UUID has the following format:
- * "12345678_1234_1234_1234_123456789abc".
- */
-extern void uuid_uuid_from_string(char *sz_uuid,
- struct dsp_uuid *uuid_obj);
-
#endif /* UUIDUTIL_ */
diff --git a/drivers/staging/tidspbridge/pmgr/cmm.c b/drivers/staging/tidspbridge/pmgr/cmm.c
index 4a800dadd703..f961e0ec9da8 100644
--- a/drivers/staging/tidspbridge/pmgr/cmm.c
+++ b/drivers/staging/tidspbridge/pmgr/cmm.c
@@ -359,7 +359,7 @@ int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, u32 ul_seg_id)
* Return the communication memory manager object for this device.
* This is typically called from the client process.
*/
-int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr)
+int cmm_get_handle(void *hprocessor, struct cmm_object **ph_cmm_mgr)
{
int status = 0;
struct dev_object *hdev_obj;
@@ -449,8 +449,7 @@ int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
struct cmm_mnode *new_node;
s32 slot_seg;
- dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
- "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n",
+ dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n",
__func__, dw_gpp_base_pa, ul_size, dsp_addr_offset,
dw_dsp_base, ul_dsp_size, gpp_base_va);
@@ -828,7 +827,7 @@ int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
* Purpose:
* Set/Get translator info.
*/
-int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
+int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 **paddr,
u32 ul_size, u32 segm_id, bool set_info)
{
struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
diff --git a/drivers/staging/tidspbridge/pmgr/dbll.c b/drivers/staging/tidspbridge/pmgr/dbll.c
index 41e88abe47af..8e21d1e47c9c 100644
--- a/drivers/staging/tidspbridge/pmgr/dbll.c
+++ b/drivers/staging/tidspbridge/pmgr/dbll.c
@@ -33,9 +33,6 @@
#include <dspbridge/dbll.h>
#include <dspbridge/rmm.h>
-/* Number of buckets for symbol hash table */
-#define MAXBUCKETS 211
-
/* Max buffer length */
#define MAXEXPR 128
@@ -183,8 +180,8 @@ static int execute(struct dynamic_loader_initialize *this, ldr_addr start);
static void release(struct dynamic_loader_initialize *this);
/* symbol table hash functions */
-static u16 name_hash(void *key, u16 max_bucket);
-static bool name_match(void *key, void *sp);
+static u32 name_hash(const void *key);
+static bool name_match(const void *key, const void *sp);
static void sym_delete(void *value);
/* Symbol Redefinition */
@@ -277,17 +274,16 @@ bool dbll_get_addr(struct dbll_library_obj *zl_lib, char *name,
struct dbll_sym_val **sym_val)
{
struct dbll_symbol *sym;
- bool status = false;
sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, name);
- if (sym != NULL) {
- *sym_val = &sym->value;
- status = true;
- }
+ if (IS_ERR(sym))
+ return false;
- dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p, status 0x%x\n",
- __func__, zl_lib, name, sym_val, status);
- return status;
+ *sym_val = &sym->value;
+
+ dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p\n",
+ __func__, zl_lib, name, sym_val);
+ return true;
}
/*
@@ -312,7 +308,6 @@ bool dbll_get_c_addr(struct dbll_library_obj *zl_lib, char *name,
{
struct dbll_symbol *sym;
char cname[MAXEXPR + 1];
- bool status = false;
cname[0] = '_';
@@ -321,13 +316,12 @@ bool dbll_get_c_addr(struct dbll_library_obj *zl_lib, char *name,
/* Check for C name, if not found */
sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, cname);
+ if (IS_ERR(sym))
+ return false;
- if (sym != NULL) {
- *sym_val = &sym->value;
- status = true;
- }
+ *sym_val = &sym->value;
- return status;
+ return true;
}
/*
@@ -378,8 +372,8 @@ int dbll_get_sect(struct dbll_library_obj *lib, char *name, u32 *paddr,
opened_doff = false;
}
- dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p psize: %p, "
- "status 0x%x\n", __func__, lib, name, paddr, psize, status);
+ dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p psize: %p, status 0x%x\n",
+ __func__, lib, name, paddr, psize, status);
return status;
}
@@ -416,12 +410,13 @@ int dbll_load(struct dbll_library_obj *lib, dbll_flags flags,
/* Create a hash table for symbols if not already created */
if (zl_lib->sym_tab == NULL) {
got_symbols = false;
- zl_lib->sym_tab = gh_create(MAXBUCKETS,
- sizeof(struct dbll_symbol),
+ zl_lib->sym_tab = gh_create(sizeof(struct dbll_symbol),
name_hash,
name_match, sym_delete);
- if (zl_lib->sym_tab == NULL)
- status = -ENOMEM;
+ if (IS_ERR(zl_lib->sym_tab)) {
+ status = PTR_ERR(zl_lib->sym_tab);
+ zl_lib->sym_tab = NULL;
+ }
}
/*
@@ -593,10 +588,11 @@ int dbll_open(struct dbll_tar_obj *target, char *file, dbll_flags flags,
goto func_cont;
zl_lib->sym_tab =
- gh_create(MAXBUCKETS, sizeof(struct dbll_symbol), name_hash,
- name_match, sym_delete);
- if (zl_lib->sym_tab == NULL) {
- status = -ENOMEM;
+ gh_create(sizeof(struct dbll_symbol), name_hash, name_match,
+ sym_delete);
+ if (IS_ERR(zl_lib->sym_tab)) {
+ status = PTR_ERR(zl_lib->sym_tab);
+ zl_lib->sym_tab = NULL;
} else {
/* Do a fake load to get symbols - set write func to no_op */
zl_lib->init.dl_init.writemem = no_op;
@@ -705,8 +701,8 @@ func_cont:
opened_doff = false;
}
- dev_dbg(bridge, "%s: lib: %p name: %s buf: %p size: 0x%x, "
- "status 0x%x\n", __func__, lib, name, buf, size, status);
+ dev_dbg(bridge, "%s: lib: %p name: %s buf: %p size: 0x%x, status 0x%x\n",
+ __func__, lib, name, buf, size, status);
return status;
}
@@ -793,11 +789,10 @@ static int dof_open(struct dbll_library_obj *zl_lib)
/*
* ======== name_hash ========
*/
-static u16 name_hash(void *key, u16 max_bucket)
+static u32 name_hash(const void *key)
{
- u16 ret;
- u16 hash;
- char *name = (char *)key;
+ u32 hash;
+ const char *name = key;
hash = 0;
@@ -806,19 +801,16 @@ static u16 name_hash(void *key, u16 max_bucket)
hash ^= *name++;
}
- ret = hash % max_bucket;
-
- return ret;
+ return hash;
}
/*
* ======== name_match ========
*/
-static bool name_match(void *key, void *sp)
+static bool name_match(const void *key, const void *sp)
{
if ((key != NULL) && (sp != NULL)) {
- if (strcmp((char *)key, ((struct dbll_symbol *)sp)->name) ==
- 0)
+ if (strcmp(key, ((struct dbll_symbol *)sp)->name) == 0)
return true;
}
return false;
@@ -915,10 +907,10 @@ static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this,
status = dbll_get_addr((struct dbll_library_obj *)lib,
(char *)name, &dbll_sym);
if (!status) {
- status =
- dbll_get_c_addr((struct dbll_library_obj *)
- lib, (char *)name,
- &dbll_sym);
+ status = dbll_get_c_addr(
+ (struct dbll_library_obj *)
+ lib, (char *)name,
+ &dbll_sym);
}
}
}
@@ -937,7 +929,6 @@ static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym
*this, const char *name,
unsigned moduleid)
{
- struct dynload_symbol *ret_sym;
struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
struct dbll_library_obj *lib;
struct dbll_symbol *sym;
@@ -945,8 +936,10 @@ static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym
lib = ldr_sym->lib;
sym = (struct dbll_symbol *)gh_find(lib->sym_tab, (char *)name);
- ret_sym = (struct dynload_symbol *)&sym->value;
- return ret_sym;
+ if (IS_ERR(sym))
+ return NULL;
+
+ return (struct dynload_symbol *)&sym->value;
}
/*
@@ -991,8 +984,10 @@ static struct dynload_symbol *dbll_add_to_symbol_table(struct dynamic_loader_sym
sym_ptr =
(struct dbll_symbol *)gh_insert(lib->sym_tab, (void *)name,
(void *)&symbol);
- if (sym_ptr == NULL)
+ if (IS_ERR(sym_ptr)) {
kfree(symbol.name);
+ sym_ptr = NULL;
+ }
}
if (sym_ptr != NULL)
@@ -1172,8 +1167,7 @@ func_cont:
if (!run_addr_flag)
info->run_addr = info->load_addr;
info->context = (u32) rmm_addr_obj.segid;
- dev_dbg(bridge, "%s: %s base = 0x%x len = 0x%x, "
- "info->run_addr 0x%x, info->load_addr 0x%x\n",
+ dev_dbg(bridge, "%s: %s base = 0x%x len = 0x%x, info->run_addr 0x%x, info->load_addr 0x%x\n",
__func__, info->name, info->load_addr / DSPWORDSIZE,
info->size / DSPWORDSIZE, info->run_addr,
info->load_addr);
@@ -1399,7 +1393,7 @@ void find_symbol_callback(void *elem, void *user_data)
* @sym_addr_output: Symbol Output address
* @name_output: String with the dsp symbol
*
- * This function retrieves the dsp symbol from the dsp binary.
+ * This function retrieves the dsp symbol from the dsp binary.
*/
bool dbll_find_dsp_symbol(struct dbll_library_obj *zl_lib, u32 address,
u32 offset_range, u32 *sym_addr_output,
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c
index 6234ffb5e8a3..616dc1f63070 100644
--- a/drivers/staging/tidspbridge/pmgr/dev.c
+++ b/drivers/staging/tidspbridge/pmgr/dev.c
@@ -606,7 +606,7 @@ int dev_get_node_manager(struct dev_object *hdev_obj,
* ======== dev_get_symbol ========
*/
int dev_get_symbol(struct dev_object *hdev_obj,
- const char *str_sym, u32 * pul_value)
+ const char *str_sym, u32 *pul_value)
{
int status = 0;
struct cod_manager *cod_mgr;
@@ -916,8 +916,8 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
/* Local helper macro: */
#define STORE_FXN(cast, pfn) \
- (intf_fxns->pfn = ((drv_fxns->pfn != NULL) ? drv_fxns->pfn : \
- (cast)fxn_not_implemented))
+ (intf_fxns->pfn = ((drv_fxns->pfn != NULL) ? drv_fxns->pfn : \
+ (cast)fxn_not_implemented))
bridge_version = MAKEVERSION(drv_fxns->brd_api_major_version,
drv_fxns->brd_api_minor_version);
diff --git a/drivers/staging/tidspbridge/pmgr/dmm.c b/drivers/staging/tidspbridge/pmgr/dmm.c
index 7c9f83916068..fcf564aa566d 100644
--- a/drivers/staging/tidspbridge/pmgr/dmm.c
+++ b/drivers/staging/tidspbridge/pmgr/dmm.c
@@ -217,8 +217,8 @@ int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
status = -ENOENT;
spin_unlock(&dmm_obj->dmm_lock);
- dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
- "chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
+ dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, chunk %p",
+ __func__, dmm_mgr, addr, size, status, chunk);
return status;
}
@@ -268,9 +268,9 @@ int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
spin_unlock(&dmm_obj->dmm_lock);
- dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
- "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
- prsv_addr, status, rsv_addr, rsv_size);
+ dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, rsv_addr %x, rsv_size %x\n",
+ __func__, dmm_mgr, size,
+ prsv_addr, status, rsv_addr, rsv_size);
return status;
}
@@ -299,8 +299,8 @@ int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
}
spin_unlock(&dmm_obj->dmm_lock);
- dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
- "chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
+ dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, chunk %p\n",
+ __func__, dmm_mgr, addr, psize, status, chunk);
return status;
}
@@ -475,11 +475,11 @@ u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
}
}
spin_unlock(&dmm_mgr->dmm_lock);
- printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
+ dev_info(bridge, "Total DSP VA FREE memory = %d Mbytes\n",
freemem / (1024 * 1024));
- printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
+ dev_info(bridge, "Total DSP VA USED memory= %d Mbytes\n",
(((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
- printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
+ dev_info(bridge, "DSP VA - Biggest FREE block = %d Mbytes\n",
(bigsize * PG_SIZE4K / (1024 * 1024)));
return 0;
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c
index 70db4ff99ec6..b7d5c8cbb2a1 100644
--- a/drivers/staging/tidspbridge/pmgr/dspapi.c
+++ b/drivers/staging/tidspbridge/pmgr/dspapi.c
@@ -162,7 +162,7 @@ static u8 size_cmd[] = {
ARRAY_SIZE(cmm_cmd),
};
-static inline void _cp_fm_usr(void *to, const void __user * from,
+static inline void _cp_fm_usr(void *to, const void __user *from,
int *err, unsigned long bytes)
{
if (*err)
@@ -507,7 +507,7 @@ u32 mgrwrap_wait_for_bridge_events(union trapped_args *args, void *pr_ctxt)
/*
* ======== MGRWRAP_GetProcessResourceInfo ========
*/
-u32 __deprecated mgrwrap_get_process_resources_info(union trapped_args * args,
+u32 __deprecated mgrwrap_get_process_resources_info(union trapped_args *args,
void *pr_ctxt)
{
pr_err("%s: deprecated dspbridge ioctl\n", __func__);
@@ -581,7 +581,7 @@ func_end:
/*
* ======== procwrap_detach ========
*/
-u32 __deprecated procwrap_detach(union trapped_args * args, void *pr_ctxt)
+u32 __deprecated procwrap_detach(union trapped_args *args, void *pr_ctxt)
{
/* proc_detach called at bridge_release only */
pr_err("%s: deprecated dspbridge ioctl\n", __func__);
@@ -1564,7 +1564,7 @@ u32 strmwrap_free_buffer(union trapped_args *args, void *pr_ctxt)
/*
* ======== strmwrap_get_event_handle ========
*/
-u32 __deprecated strmwrap_get_event_handle(union trapped_args * args,
+u32 __deprecated strmwrap_get_event_handle(union trapped_args *args,
void *pr_ctxt)
{
pr_err("%s: deprecated dspbridge ioctl\n", __func__);
@@ -1793,7 +1793,7 @@ u32 strmwrap_select(union trapped_args *args, void *pr_ctxt)
/*
* ======== cmmwrap_calloc_buf ========
*/
-u32 __deprecated cmmwrap_calloc_buf(union trapped_args * args, void *pr_ctxt)
+u32 __deprecated cmmwrap_calloc_buf(union trapped_args *args, void *pr_ctxt)
{
/* This operation is done in kernel */
pr_err("%s: deprecated dspbridge ioctl\n", __func__);
@@ -1803,7 +1803,7 @@ u32 __deprecated cmmwrap_calloc_buf(union trapped_args * args, void *pr_ctxt)
/*
* ======== cmmwrap_free_buf ========
*/
-u32 __deprecated cmmwrap_free_buf(union trapped_args * args, void *pr_ctxt)
+u32 __deprecated cmmwrap_free_buf(union trapped_args *args, void *pr_ctxt)
{
/* This operation is done in kernel */
pr_err("%s: deprecated dspbridge ioctl\n", __func__);
diff --git a/drivers/staging/tidspbridge/rmgr/dbdcd.c b/drivers/staging/tidspbridge/rmgr/dbdcd.c
index 3d2a26f1efe5..190ca3fe7327 100644
--- a/drivers/staging/tidspbridge/rmgr/dbdcd.c
+++ b/drivers/staging/tidspbridge/rmgr/dbdcd.c
@@ -74,6 +74,47 @@ static int get_dep_lib_info(struct dcd_manager *hdcd_mgr,
enum nldr_phase phase);
/*
+ * ======== dcd_uuid_from_string ========
+ * Purpose:
+ * Converts an ANSI string to a dsp_uuid.
+ * Parameters:
+ * sz_uuid: Pointer to a string that represents a dsp_uuid object.
+ * uuid_obj: Pointer to a dsp_uuid object.
+ * Returns:
+ * 0: Success.
+ * -EINVAL: Coversion failed
+ * Requires:
+ * uuid_obj & sz_uuid are non-NULL values.
+ * Ensures:
+ * Details:
+ * We assume the string representation of a UUID has the following format:
+ * "12345678_1234_1234_1234_123456789abc".
+ */
+static int dcd_uuid_from_string(char *sz_uuid, struct dsp_uuid *uuid_obj)
+{
+ char c;
+ u64 t;
+ struct dsp_uuid uuid_tmp;
+
+ /*
+ * sscanf implementation cannot deal with hh format modifier
+ * if the converted value doesn't fit in u32. So, convert the
+ * last six bytes to u64 and memcpy what is needed
+ */
+ if(sscanf(sz_uuid, "%8x%c%4hx%c%4hx%c%2hhx%2hhx%c%llx",
+ &uuid_tmp.data1, &c, &uuid_tmp.data2, &c,
+ &uuid_tmp.data3, &c, &uuid_tmp.data4,
+ &uuid_tmp.data5, &c, &t) != 10)
+ return -EINVAL;
+
+ t = cpu_to_be64(t);
+ memcpy(&uuid_tmp.data6[0], ((char*)&t) + 2, 6);
+ *uuid_obj = uuid_tmp;
+
+ return 0;
+}
+
+/*
* ======== dcd_auto_register ========
* Purpose:
* Parses the supplied image and resigsters with DCD.
@@ -253,14 +294,15 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
if (!status) {
/* Create UUID value using string retrieved from
* registry. */
- uuid_uuid_from_string(sz_value, &dsp_uuid_obj);
-
- *uuid_obj = dsp_uuid_obj;
+ status = dcd_uuid_from_string(sz_value, &dsp_uuid_obj);
- /* Increment enum_refs to update reference count. */
- enum_refs++;
+ if (!status) {
+ *uuid_obj = dsp_uuid_obj;
- status = 0;
+ /* Increment enum_refs to update reference
+ * count. */
+ enum_refs++;
+ }
} else if (status == -ENODATA) {
/* At the end of enumeration. Reset enum_refs. */
enum_refs = 0;
@@ -581,24 +623,28 @@ int dcd_get_objects(struct dcd_manager *hdcd_mgr,
psz_cur = psz_coff_buf;
while ((token = strsep(&psz_cur, seps)) && *token != '\0') {
/* Retrieve UUID string. */
- uuid_uuid_from_string(token, &dsp_uuid_obj);
-
- /* Retrieve object type */
- token = strsep(&psz_cur, seps);
+ status = dcd_uuid_from_string(token, &dsp_uuid_obj);
- /* Retrieve object type */
- object_type = atoi(token);
+ if (!status) {
+ /* Retrieve object type */
+ token = strsep(&psz_cur, seps);
- /*
- * Apply register_fxn to the found DCD object.
- * Possible actions include:
- *
- * 1) Register found DCD object.
- * 2) Unregister found DCD object (when handle == NULL)
- * 3) Add overlay node.
- */
- status =
- register_fxn(&dsp_uuid_obj, object_type, handle);
+ /* Retrieve object type */
+ object_type = atoi(token);
+
+ /*
+ * Apply register_fxn to the found DCD object.
+ * Possible actions include:
+ *
+ * 1) Register found DCD object.
+ * 2) Unregister found DCD object
+ * (when handle == NULL)
+ * 3) Add overlay node.
+ */
+ status =
+ register_fxn(&dsp_uuid_obj, object_type,
+ handle);
+ }
if (status) {
/* if error occurs, break from while loop. */
break;
@@ -1001,9 +1047,12 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
token = strsep(&psz_cur, seps);
/* dsp_uuid ui_node_id */
- uuid_uuid_from_string(token,
- &gen_obj->obj_data.node_obj.ndb_props.
- ui_node_id);
+ status = dcd_uuid_from_string(token,
+ &gen_obj->obj_data.node_obj.
+ ndb_props.ui_node_id);
+ if (status)
+ break;
+
token = strsep(&psz_cur, seps);
/* ac_name */
@@ -1400,9 +1449,12 @@ static int get_dep_lib_info(struct dcd_manager *hdcd_mgr,
break;
} else {
/* Retrieve UUID string. */
- uuid_uuid_from_string(token,
- &(dep_lib_uuids
- [dep_libs]));
+ status = dcd_uuid_from_string(token,
+ &(dep_lib_uuids
+ [dep_libs]));
+ if (status)
+ break;
+
/* Is this library persistent? */
token = strsep(&psz_cur, seps);
prstnt_dep_libs[dep_libs] = atoi(token);
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 56e355b3e7fa..74d31dabe832 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -23,7 +23,6 @@
#include <linux/pm.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/cdev.h>
@@ -258,6 +257,8 @@ err:
/* This function maps kernel space memory to user space memory. */
static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
{
+ unsigned long base_pgoff;
+ int status;
struct omap_dsp_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
@@ -269,9 +270,31 @@ static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_start, vma->vm_end, vma->vm_page_prot,
vma->vm_flags);
- return vm_iomap_memory(vma,
- pdata->phys_mempool_base,
- pdata->phys_mempool_size);
+ /*
+ * vm_iomap_memory() expects vma->vm_pgoff to be expressed as an offset
+ * from the start of the physical memory pool, but we're called with
+ * a pfn (physical page number) stored there instead.
+ *
+ * To avoid duplicating lots of tricky overflow checking logic,
+ * temporarily convert vma->vm_pgoff to the offset vm_iomap_memory()
+ * expects, but restore the original value once the mapping has been
+ * created.
+ */
+ base_pgoff = pdata->phys_mempool_base >> PAGE_SHIFT;
+
+ if (vma->vm_pgoff < base_pgoff)
+ return -EINVAL;
+
+ vma->vm_pgoff -= base_pgoff;
+
+ status = vm_iomap_memory(vma,
+ pdata->phys_mempool_base,
+ pdata->phys_mempool_size);
+
+ /* Restore the original value of vma->vm_pgoff */
+ vma->vm_pgoff += base_pgoff;
+
+ return status;
}
static const struct file_operations bridge_fops = {
@@ -566,7 +589,7 @@ func_cont:
class_destroy(bridge_class);
}
- return 0;
+ return status;
}
#ifdef CONFIG_PM
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index db48a789d308..5d1d4a183300 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -102,11 +102,13 @@ static int tweak_clear_halt_cmd(struct urb *urb)
ret = usb_clear_halt(urb->dev, target_pipe);
if (ret < 0)
- dev_err(&urb->dev->dev, "usb_clear_halt error: devnum %d endp "
- "%d ret %d\n", urb->dev->devnum, target_endp, ret);
+ dev_err(&urb->dev->dev,
+ "usb_clear_halt error: devnum %d endp %d ret %d\n",
+ urb->dev->devnum, target_endp, ret);
else
- dev_info(&urb->dev->dev, "usb_clear_halt done: devnum %d endp "
- "%d\n", urb->dev->devnum, target_endp);
+ dev_info(&urb->dev->dev,
+ "usb_clear_halt done: devnum %d endp %d\n",
+ urb->dev->devnum, target_endp);
return ret;
}
@@ -127,11 +129,13 @@ static int tweak_set_interface_cmd(struct urb *urb)
ret = usb_set_interface(urb->dev, interface, alternate);
if (ret < 0)
- dev_err(&urb->dev->dev, "usb_set_interface error: inf %u alt "
- "%u ret %d\n", interface, alternate, ret);
+ dev_err(&urb->dev->dev,
+ "usb_set_interface error: inf %u alt %u ret %d\n",
+ interface, alternate, ret);
else
- dev_info(&urb->dev->dev, "usb_set_interface done: inf %u alt "
- "%u\n", interface, alternate);
+ dev_info(&urb->dev->dev,
+ "usb_set_interface done: inf %u alt %u\n",
+ interface, alternate);
return ret;
}
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index e2bffece81d7..96552e3a1bfb 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -155,8 +155,9 @@ static void usbip_dump_usb_device(struct usb_device *udev)
dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus);
- dev_dbg(dev, "descriptor %p, config %p, actconfig %p, "
- "rawdescriptors %p\n", &udev->descriptor, udev->config,
+ dev_dbg(dev,
+ "descriptor %p, config %p, actconfig %p, rawdescriptors %p\n",
+ &udev->descriptor, udev->config,
udev->actconfig, udev->rawdescriptors);
dev_dbg(dev, "have_langid %d, string_langid %d\n",
diff --git a/drivers/staging/usbip/userspace/libsrc/names.c b/drivers/staging/usbip/userspace/libsrc/names.c
index 3c8d28b771e0..81ff8522405c 100644
--- a/drivers/staging/usbip/userspace/libsrc/names.c
+++ b/drivers/staging/usbip/userspace/libsrc/names.c
@@ -169,14 +169,14 @@ static void *my_malloc(size_t size)
struct pool *p;
p = calloc(1, sizeof(struct pool));
- if (!p) {
- free(p);
+ if (!p)
return NULL;
- }
p->mem = calloc(1, size);
- if (!p->mem)
+ if (!p->mem) {
+ free(p);
return NULL;
+ }
p->next = pool_head;
pool_head = p;
diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_common.c b/drivers/staging/usbip/userspace/libsrc/usbip_common.c
index 17e08e022c00..66f03cc62ac6 100644
--- a/drivers/staging/usbip/userspace/libsrc/usbip_common.c
+++ b/drivers/staging/usbip/userspace/libsrc/usbip_common.c
@@ -165,7 +165,7 @@ int read_attr_speed(struct sysfs_device *dev)
goto err;
}
- ret = sscanf(attr->value, "%s\n", speed);
+ ret = sscanf(attr->value, "%99s\n", speed);
if (ret < 1) {
dbg("sscanf failed");
goto err;
diff --git a/drivers/staging/usbip/userspace/libsrc/vhci_driver.c b/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
index 1091bb20de11..209df9b37cb4 100644
--- a/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
+++ b/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
@@ -4,6 +4,8 @@
#include "usbip_common.h"
#include "vhci_driver.h"
+#include <limits.h>
+#include <netdb.h>
#undef PROGNAME
#define PROGNAME "libusbip"
@@ -72,7 +74,7 @@ static int parse_status(char *value)
unsigned long socket;
char lbusid[SYSFS_BUS_ID_SIZE];
- ret = sscanf(c, "%d %d %d %x %lx %s\n",
+ ret = sscanf(c, "%d %d %d %x %lx %31s\n",
&port, &status, &speed,
&devid, &socket, lbusid);
@@ -337,6 +339,29 @@ err:
return -1;
}
+static int read_record(int rhport, char *host, char *port, char *busid)
+{
+ FILE *file;
+ char path[PATH_MAX+1];
+
+ snprintf(path, PATH_MAX, VHCI_STATE_PATH"/port%d", rhport);
+
+ file = fopen(path, "r");
+ if (!file) {
+ err("fopen");
+ return -1;
+ }
+
+ if (fscanf(file, "%s %s %s\n", host, port, busid) != 3) {
+ err("fscanf");
+ fclose(file);
+ return -1;
+ }
+
+ fclose(file);
+
+ return 0;
+}
/* ---------------------------------------------------------------------- */
@@ -535,3 +560,45 @@ int usbip_vhci_detach_device(uint8_t port)
return 0;
}
+
+int usbip_vhci_imported_device_dump(struct usbip_imported_device *idev)
+{
+ char product_name[100];
+ char host[NI_MAXHOST] = "unknown host";
+ char serv[NI_MAXSERV] = "unknown port";
+ char remote_busid[SYSFS_BUS_ID_SIZE];
+ int ret;
+ int read_record_error = 0;
+
+ if (idev->status == VDEV_ST_NULL || idev->status == VDEV_ST_NOTASSIGNED)
+ return 0;
+
+ ret = read_record(idev->port, host, serv, remote_busid);
+ if (ret) {
+ err("read_record");
+ read_record_error = 1;
+ }
+
+ printf("Port %02d: <%s> at %s\n", idev->port,
+ usbip_status_string(idev->status),
+ usbip_speed_string(idev->udev.speed));
+
+ usbip_names_get_product(product_name, sizeof(product_name),
+ idev->udev.idVendor, idev->udev.idProduct);
+
+ printf(" %s\n", product_name);
+
+ if (!read_record_error) {
+ printf("%10s -> usbip://%s:%s/%s\n", idev->udev.busid,
+ host, serv, remote_busid);
+ printf("%10s -> remote bus/dev %03d/%03d\n", " ",
+ idev->busnum, idev->devnum);
+ } else {
+ printf("%10s -> unknown host, remote port and remote busid\n",
+ idev->udev.busid);
+ printf("%10s -> remote bus/dev %03d/%03d\n", " ",
+ idev->busnum, idev->devnum);
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/usbip/userspace/libsrc/vhci_driver.h b/drivers/staging/usbip/userspace/libsrc/vhci_driver.h
index 89949aa7c313..e071f8049c1f 100644
--- a/drivers/staging/usbip/userspace/libsrc/vhci_driver.h
+++ b/drivers/staging/usbip/userspace/libsrc/vhci_driver.h
@@ -64,4 +64,6 @@ int usbip_vhci_attach_device(uint8_t port, int sockfd, uint8_t busnum,
int usbip_vhci_detach_device(uint8_t port);
+int usbip_vhci_imported_device_dump(struct usbip_imported_device *idev);
+
#endif /* __VHCI_DRIVER_H */
diff --git a/drivers/staging/usbip/userspace/src/Makefile.am b/drivers/staging/usbip/userspace/src/Makefile.am
index a11300361392..b4f8c4b04b2f 100644
--- a/drivers/staging/usbip/userspace/src/Makefile.am
+++ b/drivers/staging/usbip/userspace/src/Makefile.am
@@ -6,7 +6,7 @@ sbin_PROGRAMS := usbip usbipd
usbip_SOURCES := usbip.h utils.h usbip.c utils.c usbip_network.c \
usbip_attach.c usbip_detach.c usbip_list.c \
- usbip_bind.c usbip_unbind.c
+ usbip_bind.c usbip_unbind.c usbip_port.c
usbipd_SOURCES := usbip_network.h usbipd.c usbip_network.c
diff --git a/drivers/staging/usbip/userspace/src/usbip.c b/drivers/staging/usbip/userspace/src/usbip.c
index 04a5f20bea65..d7599d943529 100644
--- a/drivers/staging/usbip/userspace/src/usbip.c
+++ b/drivers/staging/usbip/userspace/src/usbip.c
@@ -93,6 +93,12 @@ static const struct command cmds[] = {
.help = "Unbind device from " USBIP_HOST_DRV_NAME ".ko",
.usage = usbip_unbind_usage
},
+ {
+ .name = "port",
+ .fn = usbip_port_show,
+ .help = "Show imported USB devices",
+ .usage = NULL
+ },
{ NULL, NULL, NULL, NULL }
};
diff --git a/drivers/staging/usbip/userspace/src/usbip.h b/drivers/staging/usbip/userspace/src/usbip.h
index 14d4a475b683..84fe66a9d8ad 100644
--- a/drivers/staging/usbip/userspace/src/usbip.h
+++ b/drivers/staging/usbip/userspace/src/usbip.h
@@ -29,6 +29,7 @@ int usbip_detach(int argc, char *argv[]);
int usbip_list(int argc, char *argv[]);
int usbip_bind(int argc, char *argv[]);
int usbip_unbind(int argc, char *argv[]);
+int usbip_port_show(int argc, char *argv[]);
void usbip_attach_usage(void);
void usbip_detach_usage(void);
diff --git a/drivers/staging/usbip/userspace/src/usbip_port.c b/drivers/staging/usbip/userspace/src/usbip_port.c
new file mode 100644
index 000000000000..52aa168b46b3
--- /dev/null
+++ b/drivers/staging/usbip/userspace/src/usbip_port.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
+ * 2005-2007 Takahiro Hirofuchi
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "vhci_driver.h"
+#include "usbip_common.h"
+
+static int list_imported_devices()
+{
+ int i;
+ struct usbip_imported_device *idev;
+ int ret;
+
+ ret = usbip_vhci_driver_open();
+ if (ret < 0) {
+ err("open vhci_driver");
+ return -1;
+ }
+
+ printf("Imported USB devices\n");
+ printf("====================\n");
+
+ for (i = 0; i < vhci_driver->nports; i++) {
+ idev = &vhci_driver->idev[i];
+
+ if (usbip_vhci_imported_device_dump(idev) < 0)
+ ret = -1;
+ }
+
+ usbip_vhci_driver_close();
+
+ return ret;
+
+}
+
+int usbip_port_show(__attribute__((unused)) int argc,
+ __attribute__((unused)) char *argv[])
+{
+ int ret;
+
+ ret = list_imported_devices();
+ if (ret < 0)
+ err("list imported devices");
+
+ return ret;
+}
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index e810ad53e2ac..72391ef87646 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -220,8 +220,7 @@ static inline void hub_descriptor(struct usb_hub_descriptor *desc)
memset(desc, 0, sizeof(*desc));
desc->bDescriptorType = 0x29;
desc->bDescLength = 9;
- desc->wHubCharacteristics = (__force __u16)
- (__constant_cpu_to_le16(0x0001));
+ desc->wHubCharacteristics = (__constant_cpu_to_le16(0x0001));
desc->bNbrPorts = VHCI_NPORTS;
desc->u.hs.DeviceRemovable[0] = 0xff;
desc->u.hs.DeviceRemovable[1] = 0xff;
@@ -348,8 +347,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
USB_PORT_STAT_ENABLE;
}
}
- ((u16 *) buf)[0] = cpu_to_le16(dum->port_status[rhport]);
- ((u16 *) buf)[1] = cpu_to_le16(dum->port_status[rhport] >> 16);
+ ((__le16 *) buf)[0] = cpu_to_le16(dum->port_status[rhport]);
+ ((__le16 *) buf)[1] = cpu_to_le16(dum->port_status[rhport] >> 16);
usbip_dbg_vhci_rh(" GetPortStatus bye %x %x\n", ((u16 *)buf)[0],
((u16 *)buf)[1]);
@@ -537,7 +536,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
goto no_need_xmit;
case USB_REQ_GET_DESCRIPTOR:
- if (ctrlreq->wValue == (USB_DT_DEVICE << 8))
+ if (ctrlreq->wValue == cpu_to_le16(USB_DT_DEVICE << 8))
usbip_dbg_vhci_hc("Not yet?: "
"Get_Descriptor to device 0 "
"(get max pipe size)\n");
@@ -918,7 +917,7 @@ static void vhci_stop(struct usb_hcd *hcd)
sysfs_remove_group(&vhci_dev(vhci)->kobj, &dev_attr_group);
/* 2. shutdown all the ports of vhci_hcd */
- for (rhport = 0 ; rhport < VHCI_NPORTS; rhport++) {
+ for (rhport = 0; rhport < VHCI_NPORTS; rhport++) {
struct vhci_device *vdev = &vhci->vdev[rhport];
usbip_event_add(&vdev->ud, VDEV_EVENT_REMOVED);
@@ -1108,7 +1107,7 @@ static struct platform_driver vhci_driver = {
.suspend = vhci_hcd_suspend,
.resume = vhci_hcd_resume,
.driver = {
- .name = (char *) driver_name,
+ .name = driver_name,
.owner = THIS_MODULE,
},
};
@@ -1125,7 +1124,7 @@ static void the_pdev_release(struct device *dev)
static struct platform_device the_pdev = {
/* should be the same name as driver_name */
- .name = (char *) driver_name,
+ .name = driver_name,
.id = -1,
.dev = {
.release = the_pdev_release,
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index 9b51586d11d9..0141bc34d5cc 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -149,7 +149,8 @@ static int valid_args(__u32 rhport, enum usb_device_speed speed)
case USB_SPEED_WIRELESS:
break;
default:
- pr_err("speed %d\n", speed);
+ pr_err("Failed attach request for unsupported USB speed: %s\n",
+ usb_speed_string(speed));
return -EINVAL;
}
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index daec15565a43..792792715673 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -147,6 +147,7 @@ static const struct file_operations vme_user_fops = {
.write = vme_user_write,
.llseek = vme_user_llseek,
.unlocked_ioctl = vme_user_unlocked_ioctl,
+ .compat_ioctl = vme_user_unlocked_ioctl,
};
@@ -663,9 +664,16 @@ err_nocard:
static int vme_user_match(struct vme_dev *vdev)
{
- if (vdev->num >= VME_USER_BUS_MAX)
- return 0;
- return 1;
+ int i;
+
+ int cur_bus = vme_bus_num(vdev);
+ int cur_slot = vme_slot_num(vdev);
+
+ for (i = 0; i < bus_num; i++)
+ if ((cur_bus == bus[i]) && (cur_slot == vdev->num))
+ return 1;
+
+ return 0;
}
/*
diff --git a/drivers/staging/vme/devices/vme_user.h b/drivers/staging/vme/devices/vme_user.h
index 280ccc7f26bb..b8cc7bc78a73 100644
--- a/drivers/staging/vme/devices/vme_user.h
+++ b/drivers/staging/vme/devices/vme_user.h
@@ -7,18 +7,18 @@
* VMEbus Master Window Configuration Structure
*/
struct vme_master {
- int enable; /* State of Window */
- unsigned long long vme_addr; /* Starting Address on the VMEbus */
- unsigned long long size; /* Window Size */
- u32 aspace; /* Address Space */
- u32 cycle; /* Cycle properties */
- u32 dwidth; /* Maximum Data Width */
+ __u32 enable; /* State of Window */
+ __u64 vme_addr; /* Starting Address on the VMEbus */
+ __u64 size; /* Window Size */
+ __u32 aspace; /* Address Space */
+ __u32 cycle; /* Cycle properties */
+ __u32 dwidth; /* Maximum Data Width */
#if 0
char prefetchenable; /* Prefetch Read Enable State */
int prefetchsize; /* Prefetch Read Size (Cache Lines) */
char wrpostenable; /* Write Post State */
#endif
-};
+} __packed;
/*
@@ -31,17 +31,17 @@ struct vme_master {
/* VMEbus Slave Window Configuration Structure */
struct vme_slave {
- int enable; /* State of Window */
- unsigned long long vme_addr; /* Starting Address on the VMEbus */
- unsigned long long size; /* Window Size */
- u32 aspace; /* Address Space */
- u32 cycle; /* Cycle properties */
+ __u32 enable; /* State of Window */
+ __u64 vme_addr; /* Starting Address on the VMEbus */
+ __u64 size; /* Window Size */
+ __u32 aspace; /* Address Space */
+ __u32 cycle; /* Cycle properties */
#if 0
char wrpostenable; /* Write Post State */
char rmwlock; /* Lock PCI during RMW Cycles */
char data64bitcapable; /* non-VMEbus capable of 64-bit Data */
#endif
-};
+} __packed;
struct vme_irq_id {
__u8 level;
diff --git a/drivers/staging/vt6655/80211hdr.h b/drivers/staging/vt6655/80211hdr.h
index ba533402a9af..ba155cdded2f 100644
--- a/drivers/staging/vt6655/80211hdr.h
+++ b/drivers/staging/vt6655/80211hdr.h
@@ -155,7 +155,7 @@
#ifdef __BIG_ENDIAN
/* GET & SET Frame Control bit */
-#define WLAN_GET_FC_PRVER(n) ((((unsigned short)(n) >> 8) & (BIT0 | BIT1))
+#define WLAN_GET_FC_PRVER(n) (((unsigned short)(n) >> 8) & (BIT0 | BIT1))
#define WLAN_GET_FC_FTYPE(n) ((((unsigned short)(n) >> 8) & (BIT2 | BIT3)) >> 2)
#define WLAN_GET_FC_FSTYPE(n) ((((unsigned short)(n) >> 8) & (BIT4|BIT5|BIT6|BIT7)) >> 4)
#define WLAN_GET_FC_TODS(n) ((((unsigned short)(n) << 8) & (BIT8)) >> 8)
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 959568a1eb6a..fa14659ba43c 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -1865,7 +1865,7 @@ BBvCalculateParameter(
break;
case RATE_5M:
- if (bCCK == false)
+ if (!bCCK)
cbBitCount++;
cbUsCount = (cbBitCount * 10) / 55;
cbTmp = (cbUsCount * 55) / 10;
@@ -1879,7 +1879,7 @@ BBvCalculateParameter(
case RATE_11M:
- if (bCCK == false)
+ if (!bCCK)
cbBitCount++;
cbUsCount = cbBitCount / 11;
cbTmp = cbUsCount * 11;
diff --git a/drivers/staging/vt6655/bssdb.c b/drivers/staging/vt6655/bssdb.c
index a23b591eeac3..d7efd0173a9a 100644
--- a/drivers/staging/vt6655/bssdb.c
+++ b/drivers/staging/vt6655/bssdb.c
@@ -64,7 +64,6 @@
/*--------------------- Static Variables --------------------------*/
static int msglevel = MSG_LEVEL_INFO;
-//static int msglevel =MSG_LEVEL_DEBUG;
const unsigned short awHWRetry0[5][5] = {
{RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
@@ -131,27 +130,26 @@ BSSpSearchBSSList(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
"BSSpSearchBSSList BSSID[%pM]\n", pbyDesireBSSID);
if ((!is_broadcast_ether_addr(pbyDesireBSSID)) &&
- (memcmp(pbyDesireBSSID, ZeroBSSID, 6) != 0)) {
+ (memcmp(pbyDesireBSSID, ZeroBSSID, 6) != 0))
pbyBSSID = pbyDesireBSSID;
- }
}
if (pbyDesireSSID != NULL) {
- if (((PWLAN_IE_SSID)pbyDesireSSID)->len != 0) {
+ if (((PWLAN_IE_SSID)pbyDesireSSID)->len != 0)
pSSID = (PWLAN_IE_SSID) pbyDesireSSID;
- }
}
if (pbyBSSID != NULL) {
- // match BSSID first
+ /* match BSSID first */
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
pCurrBSS = &(pMgmt->sBSSList[ii]);
- if (pDevice->bLinkPass == false) pCurrBSS->bSelected = false;
+ if (!pDevice->bLinkPass)
+ pCurrBSS->bSelected = false;
if ((pCurrBSS->bActive) &&
- (pCurrBSS->bSelected == false)) {
+ (!pCurrBSS->bSelected)) {
if (ether_addr_equal(pCurrBSS->abyBSSID,
pbyBSSID)) {
if (pSSID != NULL) {
- // compare ssid
+ /* compare ssid */
if (!memcmp(pSSID->abySSID,
((PWLAN_IE_SSID)pCurrBSS->abySSID)->abySSID,
pSSID->len)) {
@@ -176,26 +174,26 @@ BSSpSearchBSSList(
}
}
} else {
- // ignore BSSID
+ /* ignore BSSID */
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
pCurrBSS = &(pMgmt->sBSSList[ii]);
- //2007-0721-01<Add>by MikeLiu
+ /* 2007-0721-01<Add>by MikeLiu */
pCurrBSS->bSelected = false;
if (pCurrBSS->bActive) {
if (pSSID != NULL) {
- // matched SSID
+ /* matched SSID */
if (!!memcmp(pSSID->abySSID,
((PWLAN_IE_SSID)pCurrBSS->abySSID)->abySSID,
pSSID->len) ||
(pSSID->len != ((PWLAN_IE_SSID)pCurrBSS->abySSID)->len)) {
- // SSID not match skip this BSS
+ /* SSID not match skip this BSS */
continue;
}
}
if (((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo)) ||
((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo))
) {
- // Type not match skip this BSS
+ /* Type not match skip this BSS */
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BSS type mismatch.... Config[%d] BSS[0x%04x]\n", pMgmt->eConfigMode, pCurrBSS->wCapInfo);
continue;
}
@@ -203,50 +201,23 @@ BSSpSearchBSSList(
if (ePhyType != PHY_TYPE_AUTO) {
if (((ePhyType == PHY_TYPE_11A) && (PHY_TYPE_11A != pCurrBSS->eNetworkTypeInUse)) ||
((ePhyType != PHY_TYPE_11A) && (PHY_TYPE_11A == pCurrBSS->eNetworkTypeInUse))) {
- // PhyType not match skip this BSS
+ /* PhyType not match skip this BSS */
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Physical type mismatch.... ePhyType[%d] BSS[%d]\n", ePhyType, pCurrBSS->eNetworkTypeInUse);
continue;
}
}
-/*
- if (pMgmt->eAuthenMode < WMAC_AUTH_WPA) {
- if (pCurrBSS->bWPAValid == true) {
- // WPA AP will reject connection of station without WPA enable.
- continue;
- }
- } else if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK)) {
- if (pCurrBSS->bWPAValid == false) {
- // station with WPA enable can't join NonWPA AP.
- continue;
- }
- } else if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) {
- if (pCurrBSS->bWPA2Valid == false) {
- // station with WPA2 enable can't join NonWPA2 AP.
- continue;
- }
- }
-*/
+
if (pSelect == NULL) {
pSelect = pCurrBSS;
} else {
- // compare RSSI, select signal strong one
- if (pCurrBSS->uRSSI < pSelect->uRSSI) {
+ /* compare RSSI, select signal strong one */
+ if (pCurrBSS->uRSSI < pSelect->uRSSI)
pSelect = pCurrBSS;
- }
}
}
}
if (pSelect != NULL) {
pSelect->bSelected = true;
-/*
- if (pDevice->bRoaming == false) {
- // Einsn Add @20070907
- memset(pbyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- memcpy(pbyDesireSSID,pCurrBSS->abySSID,WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- }*/
-
return pSelect;
}
}
@@ -278,7 +249,6 @@ BSSvClearBSSList(
if (pMgmt->sBSSList[ii].bActive &&
ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID,
pMgmt->abyCurrBSSID)) {
- // bKeepCurrBSSID = false;
continue;
}
}
@@ -385,7 +355,7 @@ BSSbInsertToBSSList(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Get free KnowBSS node failed.\n");
return false;
}
- // save the BSS info
+ /* save the BSS info */
pBSSList->bActive = true;
memcpy(pBSSList->abyBSSID, abyBSSIDAddr, WLAN_BSSID_LEN);
HIDWORD(pBSSList->qwBSSTimestamp) = cpu_to_le32(HIDWORD(qwTimestamp));
@@ -416,15 +386,14 @@ BSSbInsertToBSSList(
pBSSList->sERP.byERP = psERP->byERP;
pBSSList->sERP.bERPExist = psERP->bERPExist;
- // Check if BSS is 802.11a/b/g
+ /* check if BSS is 802.11a/b/g */
if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) {
pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
} else {
- if (pBSSList->sERP.bERPExist == true) {
+ if (pBSSList->sERP.bERPExist)
pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
- } else {
+ else
pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
- }
}
pBSSList->byRxRate = pRxPacket->byRxRate;
@@ -434,10 +403,9 @@ BSSbInsertToBSSList(
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
(pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- // assoc with BSS
- if (pBSSList == pMgmt->pCurrBSS) {
+ /* assoc with BSS */
+ if (pBSSList == pMgmt->pCurrBSS)
bParsingQuiet = true;
- }
}
WPA_ClearRSN(pBSSList);
@@ -463,7 +431,7 @@ BSSbInsertToBSSList(
}
}
- if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || (pBSSList->bWPA2Valid == true)) {
+ if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || pBSSList->bWPA2Valid) {
PSKeyItem pTransmitKey = NULL;
bool bIs802_1x = false;
@@ -473,13 +441,13 @@ BSSbInsertToBSSList(
break;
}
}
- if ((bIs802_1x == true) && (pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len) &&
+ if (bIs802_1x && (pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len) &&
(!memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySSID, pSSID->len))) {
bAdd_PMKID_Candidate((void *)pDevice, pBSSList->abyBSSID, &pBSSList->sRSNCapObj);
- if ((pDevice->bLinkPass == true) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- if ((KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, PAIRWISE_KEY, &pTransmitKey) == true) ||
- (KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, GROUP_KEY, &pTransmitKey) == true)) {
+ if (pDevice->bLinkPass && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
+ if (KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, PAIRWISE_KEY, &pTransmitKey) ||
+ KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, GROUP_KEY, &pTransmitKey)) {
pDevice->gsPMKIDCandidate.StatusType = Ndis802_11StatusType_PMKID_CandidateList;
pDevice->gsPMKIDCandidate.Version = 1;
@@ -490,7 +458,7 @@ BSSbInsertToBSSList(
}
if (pDevice->bUpdateBBVGA) {
- // Moniter if RSSI is too strong.
+ /* monitor if RSSI is too strong */
pBSSList->byRSSIStatCnt = 0;
RFvRSSITodBm(pDevice, (unsigned char)(pRxPacket->uRSSI), &pBSSList->ldBmMAX);
pBSSList->ldBmAverage[0] = pBSSList->ldBmMAX;
@@ -498,16 +466,15 @@ BSSbInsertToBSSList(
pBSSList->ldBmAverage[ii] = 0;
}
- if ((pIE_Country != NULL) &&
- (pMgmt->b11hEnable == true)) {
+ if ((pIE_Country != NULL) && pMgmt->b11hEnable) {
set_country_info(pMgmt->pAdapter, pBSSList->eNetworkTypeInUse,
pIE_Country);
}
- if ((bParsingQuiet == true) && (pIE_Quiet != NULL)) {
+ if (bParsingQuiet && (pIE_Quiet != NULL)) {
if ((((PWLAN_IE_QUIET)pIE_Quiet)->len == 8) &&
(((PWLAN_IE_QUIET)pIE_Quiet)->byQuietCount != 0)) {
- // valid EID
+ /* valid EID */
if (pQuiet == NULL) {
pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
CARDbSetQuiet(pMgmt->pAdapter,
@@ -530,8 +497,7 @@ BSSbInsertToBSSList(
}
}
- if ((bParsingQuiet == true) &&
- (pQuiet != NULL)) {
+ if (bParsingQuiet && (pQuiet != NULL)) {
CARDbStartQuiet(pMgmt->pAdapter);
}
@@ -552,7 +518,7 @@ BSSbInsertToBSSList(
* true if success.
*
-*/
-// TODO: input structure modify
+/* TODO: input structure modify */
bool
BSSbUpdateToBSSList(
@@ -593,7 +559,6 @@ BSSbUpdateToBSSList(
pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
pBSSList->uClearCount = 0;
pBSSList->uChannel = byCurrChannel;
-// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BSSbUpdateToBSSList: pBSSList->uChannel: %d\n", pBSSList->uChannel);
if (pSSID->len > WLAN_SSID_MAXLEN)
pSSID->len = WLAN_SSID_MAXLEN;
@@ -602,23 +567,21 @@ BSSbUpdateToBSSList(
memcpy(pBSSList->abySSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
memcpy(pBSSList->abySuppRates, pSuppRates, pSuppRates->len + WLAN_IEHDR_LEN);
- if (pExtSuppRates != NULL) {
+ if (pExtSuppRates != NULL)
memcpy(pBSSList->abyExtSuppRates, pExtSuppRates, pExtSuppRates->len + WLAN_IEHDR_LEN);
- } else {
+ else
memset(pBSSList->abyExtSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
- }
pBSSList->sERP.byERP = psERP->byERP;
pBSSList->sERP.bERPExist = psERP->bERPExist;
- // Check if BSS is 802.11a/b/g
+ /* check if BSS is 802.11a/b/g */
if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) {
pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
} else {
- if (pBSSList->sERP.bERPExist == true) {
+ if (pBSSList->sERP.bERPExist)
pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
- } else {
+ else
pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
- }
}
pBSSList->byRxRate = pRxPacket->byRxRate;
@@ -629,13 +592,12 @@ BSSbUpdateToBSSList(
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
(pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- // assoc with BSS
- if (pBSSList == pMgmt->pCurrBSS) {
+ /* assoc with BSS */
+ if (pBSSList == pMgmt->pCurrBSS)
bParsingQuiet = true;
- }
}
- WPA_ClearRSN(pBSSList); //mike update
+ WPA_ClearRSN(pBSSList); /* mike update */
if (pRSNWPA != NULL) {
unsigned int uLen = pRSNWPA->len + 2;
@@ -646,7 +608,7 @@ BSSbUpdateToBSSList(
}
}
- WPA2_ClearRSN(pBSSList); //mike update
+ WPA2_ClearRSN(pBSSList); /* mike update */
if (pRSN != NULL) {
unsigned int uLen = pRSN->len + 2;
@@ -659,27 +621,25 @@ BSSbUpdateToBSSList(
if (pRxPacket->uRSSI != 0) {
RFvRSSITodBm(pDevice, (unsigned char)(pRxPacket->uRSSI), &ldBm);
- // Moniter if RSSI is too strong.
+ /* monitor if RSSI is too strong */
pBSSList->byRSSIStatCnt++;
pBSSList->byRSSIStatCnt %= RSSI_STAT_COUNT;
pBSSList->ldBmAverage[pBSSList->byRSSIStatCnt] = ldBm;
for (ii = 0; ii < RSSI_STAT_COUNT; ii++) {
- if (pBSSList->ldBmAverage[ii] != 0) {
+ if (pBSSList->ldBmAverage[ii] != 0)
pBSSList->ldBmMAX = max(pBSSList->ldBmAverage[ii], ldBm);
- }
}
}
- if ((pIE_Country != NULL) &&
- (pMgmt->b11hEnable == true)) {
+ if ((pIE_Country != NULL) && pMgmt->b11hEnable) {
set_country_info(pMgmt->pAdapter, pBSSList->eNetworkTypeInUse,
pIE_Country);
}
- if ((bParsingQuiet == true) && (pIE_Quiet != NULL)) {
+ if (bParsingQuiet && (pIE_Quiet != NULL)) {
if ((((PWLAN_IE_QUIET)pIE_Quiet)->len == 8) &&
(((PWLAN_IE_QUIET)pIE_Quiet)->byQuietCount != 0)) {
- // valid EID
+ /* valid EID */
if (pQuiet == NULL) {
pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
CARDbSetQuiet(pMgmt->pAdapter,
@@ -702,8 +662,7 @@ BSSbUpdateToBSSList(
}
}
- if ((bParsingQuiet == true) &&
- (pQuiet != NULL)) {
+ if (bParsingQuiet && (pQuiet != NULL)) {
CARDbStartQuiet(pMgmt->pAdapter);
}
@@ -732,7 +691,7 @@ BSSDBbIsSTAInNodeDB(void *pMgmtObject, unsigned char *abyDstAddr,
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
unsigned int ii;
- // Index = 0 reserved for AP Node
+ /* Index = 0 reserved for AP Node */
for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
if (pMgmt->sNodeDBTable[ii].bActive) {
if (ether_addr_equal(abyDstAddr,
@@ -765,8 +724,10 @@ BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex)
unsigned int BigestCount = 0;
unsigned int SelectIndex;
struct sk_buff *skb;
- // Index = 0 reserved for AP Node (In STA mode)
- // Index = 0 reserved for Broadcast/MultiCast (In AP mode)
+ /*
+ * Index = 0 reserved for AP Node (In STA mode)
+ * Index = 0 reserved for Broadcast/MultiCast (In AP mode)
+ */
SelectIndex = 1;
for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
if (pMgmt->sNodeDBTable[ii].bActive) {
@@ -779,11 +740,11 @@ BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex)
}
}
- // if not found replace uInActiveCount is largest one.
+ /* if not found replace uInActiveCount is largest one */
if (ii == (MAX_NODE_NUM + 1)) {
*puNodeIndex = SelectIndex;
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Replace inactive node = %d\n", SelectIndex);
- // clear ps buffer
+ /* clear ps buffer */
if (pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue.next != NULL) {
while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue)) != NULL)
dev_kfree_skb(skb);
@@ -795,7 +756,7 @@ BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex)
memset(&pMgmt->sNodeDBTable[*puNodeIndex], 0, sizeof(KnownNodeDB));
pMgmt->sNodeDBTable[*puNodeIndex].bActive = true;
pMgmt->sNodeDBTable[*puNodeIndex].uRatePollTimeout = FALLBACK_POLL_SECOND;
- // for AP mode PS queue
+ /* for AP mode PS queue */
skb_queue_head_init(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue);
pMgmt->sNodeDBTable[*puNodeIndex].byAuthSequence = 0;
pMgmt->sNodeDBTable[*puNodeIndex].wEnQueueCnt = 0;
@@ -826,9 +787,9 @@ BSSvRemoveOneNode(
while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue)) != NULL)
dev_kfree_skb(skb);
- // clear context
+ /* clear context */
memset(&pMgmt->sNodeDBTable[uNodeIndex], 0, sizeof(KnownNodeDB));
- // clear tx bit map
+ /* clear tx bit map */
pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[uNodeIndex].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[uNodeIndex].wAID & 7];
return;
@@ -859,9 +820,8 @@ BSSvUpdateAPNode(
memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
pMgmt->sNodeDBTable[0].bActive = true;
- if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
+ if (pDevice->eCurrentPHYType == PHY_TYPE_11B)
uRateLen = WLAN_RATES_MAXLEN_11B;
- }
pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
uRateLen);
@@ -882,12 +842,10 @@ BSSvUpdateAPNode(
pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxSuppRate;
pMgmt->sNodeDBTable[0].bShortPreamble = WLAN_GET_CAP_INFO_SHORTPREAMBLE(*pwCapInfo);
pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND;
-#ifdef PLICE_DEBUG
- printk("BSSvUpdateAPNode:MaxSuppRate is %d\n", pMgmt->sNodeDBTable[0].wMaxSuppRate);
-#endif
- // Auto rate fallback function initiation.
- // RATEbInit(pDevice);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pMgmt->sNodeDBTable[0].wTxDataRate = %d \n", pMgmt->sNodeDBTable[0].wTxDataRate);
+ netdev_dbg(pDevice->dev, "BSSvUpdateAPNode:MaxSuppRate is %d\n",
+ pMgmt->sNodeDBTable[0].wMaxSuppRate);
+ /* auto rate fallback function initiation */
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pMgmt->sNodeDBTable[0].wTxDataRate = %d\n", pMgmt->sNodeDBTable[0].wTxDataRate);
};
/*+
@@ -926,9 +884,9 @@ BSSvAddMulticastNode(
&(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate)
);
pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxBasicRate;
-#ifdef PLICE_DEBUG
- printk("BSSvAddMultiCastNode:pMgmt->sNodeDBTable[0].wTxDataRate is %d\n", pMgmt->sNodeDBTable[0].wTxDataRate);
-#endif
+ netdev_dbg(pDevice->dev,
+ "BSSvAddMultiCastNode:pMgmt->sNodeDBTable[0].wTxDataRate is %d\n",
+ pMgmt->sNodeDBTable[0].wTxDataRate);
pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND;
};
@@ -944,7 +902,7 @@ BSSvAddMulticastNode(
* none.
*
-*/
-//2008-4-14 <add> by chester for led issue
+/* 2008-4-14 <add> by chester for led issue */
#ifdef FOR_LED_ON_NOTEBOOK
bool cc = false;
unsigned int status;
@@ -961,7 +919,7 @@ BSSvSecondCallBack(
unsigned int uSleepySTACnt = 0;
unsigned int uNonShortSlotSTACnt = 0;
unsigned int uLongPreambleSTACnt = 0;
- viawget_wpa_header *wpahdr; //DavidWang
+ viawget_wpa_header *wpahdr; /* DavidWang */
spin_lock_irq(&pDevice->lock);
@@ -969,51 +927,47 @@ BSSvSecondCallBack(
pDevice->byERPFlag &=
~(WLAN_SET_ERP_BARKER_MODE(1) | WLAN_SET_ERP_NONERP_PRESENT(1));
- //2008-4-14 <add> by chester for led issue
+ /* 2008-4-14 <add> by chester for led issue */
#ifdef FOR_LED_ON_NOTEBOOK
MACvGPIOIn(pDevice->PortOffset, &pDevice->byGPIO);
- if (((!(pDevice->byGPIO & GPIO0_DATA) && (pDevice->bHWRadioOff == false)) || ((pDevice->byGPIO & GPIO0_DATA) && (pDevice->bHWRadioOff == true))) && (cc == false)) {
+ if (((!(pDevice->byGPIO & GPIO0_DATA) && (!pDevice->bHWRadioOff)) ||
+ ((pDevice->byGPIO & GPIO0_DATA) && pDevice->bHWRadioOff)) &&
+ (!cc)) {
cc = true;
- } else if (cc == true) {
- if (pDevice->bHWRadioOff == true) {
- if (!(pDevice->byGPIO & GPIO0_DATA))
-//||(!(pDevice->byGPIO & GPIO0_DATA) && (pDevice->byRadioCtl & EEP_RADIOCTL_INV)))
- {
- if (status == 1) goto start;
+ } else if (cc) {
+ if (pDevice->bHWRadioOff) {
+ if (!(pDevice->byGPIO & GPIO0_DATA)) {
+ if (status == 1)
+ goto start;
status = 1;
CARDbRadioPowerOff(pDevice);
pMgmt->sNodeDBTable[0].bActive = false;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
pMgmt->eCurrState = WMAC_STATE_IDLE;
- //netif_stop_queue(pDevice->dev);
pDevice->bLinkPass = false;
}
- if (pDevice->byGPIO & GPIO0_DATA)
-//||(!(pDevice->byGPIO & GPIO0_DATA) && (pDevice->byRadioCtl & EEP_RADIOCTL_INV)))
- {
- if (status == 2) goto start;
+ if (pDevice->byGPIO & GPIO0_DATA) {
+ if (status == 2)
+ goto start;
status = 2;
CARDbRadioPowerOn(pDevice);
}
} else {
- if (pDevice->byGPIO & GPIO0_DATA)
-//||(!(pDevice->byGPIO & GPIO0_DATA) && (pDevice->byRadioCtl & EEP_RADIOCTL_INV)))
- {
- if (status == 3) goto start;
+ if (pDevice->byGPIO & GPIO0_DATA) {
+ if (status == 3)
+ goto start;
status = 3;
CARDbRadioPowerOff(pDevice);
pMgmt->sNodeDBTable[0].bActive = false;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
pMgmt->eCurrState = WMAC_STATE_IDLE;
- //netif_stop_queue(pDevice->dev);
pDevice->bLinkPass = false;
}
- if (!(pDevice->byGPIO & GPIO0_DATA))
-//||(!(pDevice->byGPIO & GPIO0_DATA) && (pDevice->byRadioCtl & EEP_RADIOCTL_INV)))
- {
- if (status == 4) goto start;
+ if (!(pDevice->byGPIO & GPIO0_DATA)) {
+ if (status == 4)
+ goto start;
status = 4;
CARDbRadioPowerOn(pDevice);
}
@@ -1025,14 +979,15 @@ start:
if (pDevice->wUseProtectCntDown > 0) {
pDevice->wUseProtectCntDown--;
} else {
- // disable protect mode
+ /* disable protect mode */
pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
}
{
pDevice->byReAssocCount++;
- if ((pDevice->byReAssocCount > 10) && (pDevice->bLinkPass != true)) { //10 sec timeout
- printk("Re-association timeout!!!\n");
+ /* 10 sec timeout */
+ if ((pDevice->byReAssocCount > 10) && (!pDevice->bLinkPass)) {
+ netdev_info(pDevice->dev, "Re-association timeout!!!\n");
pDevice->byReAssocCount = 0;
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
{
@@ -1043,7 +998,7 @@ start:
wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
}
#endif
- } else if (pDevice->bLinkPass == true)
+ } else if (pDevice->bLinkPass)
pDevice->byReAssocCount = 0;
}
@@ -1053,7 +1008,7 @@ start:
for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
if (pMgmt->sNodeDBTable[ii].bActive) {
- // Increase in-activity counter
+ /* increase in-activity counter */
pMgmt->sNodeDBTable[ii].uInActiveCount++;
if (ii > 0) {
@@ -1067,7 +1022,7 @@ start:
if (pMgmt->sNodeDBTable[ii].eNodeState >= NODE_ASSOC) {
pDevice->uAssocCount++;
- // check if Non ERP exist
+ /* check if Non ERP exist */
if (pMgmt->sNodeDBTable[ii].uInActiveCount < ERP_RECOVER_COUNT) {
if (!pMgmt->sNodeDBTable[ii].bShortPreamble) {
pDevice->byERPFlag |= WLAN_SET_ERP_BARKER_MODE(1);
@@ -1082,43 +1037,39 @@ start:
}
}
- // check if any STA in PS mode
+ /* check if any STA in PS mode */
if (pMgmt->sNodeDBTable[ii].bPSEnable)
uSleepySTACnt++;
}
- // Rate fallback check
+ /* rate fallback check */
if (!pDevice->bFixRate) {
-/*
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (ii == 0))
- RATEvTxRateFallBack(pDevice, &(pMgmt->sNodeDBTable[ii]));
-*/
if (ii > 0) {
- // ii = 0 for multicast node (AP & Adhoc)
+ /* ii = 0 for multicast node (AP & Adhoc) */
RATEvTxRateFallBack((void *)pDevice, &(pMgmt->sNodeDBTable[ii]));
} else {
- // ii = 0 reserved for unicast AP node (Infra STA)
+ /* ii = 0 reserved for unicast AP node (Infra STA) */
if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA)
-#ifdef PLICE_DEBUG
- printk("SecondCallback:Before:TxDataRate is %d\n", pMgmt->sNodeDBTable[0].wTxDataRate);
-#endif
+ netdev_dbg(pDevice->dev,
+ "SecondCallback:Before:TxDataRate is %d\n",
+ pMgmt->sNodeDBTable[0].wTxDataRate);
RATEvTxRateFallBack((void *)pDevice, &(pMgmt->sNodeDBTable[ii]));
-#ifdef PLICE_DEBUG
- printk("SecondCallback:After:TxDataRate is %d\n", pMgmt->sNodeDBTable[0].wTxDataRate);
-#endif
+ netdev_dbg(pDevice->dev,
+ "SecondCallback:After:TxDataRate is %d\n",
+ pMgmt->sNodeDBTable[0].wTxDataRate);
}
}
- // check if pending PS queue
+ /* check if pending PS queue */
if (pMgmt->sNodeDBTable[ii].wEnQueueCnt != 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index= %d, Queue = %d pending \n",
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index= %d, Queue = %d pending\n",
ii, pMgmt->sNodeDBTable[ii].wEnQueueCnt);
if ((ii > 0) && (pMgmt->sNodeDBTable[ii].wEnQueueCnt > 15)) {
BSSvRemoveOneNode(pDevice, ii);
- DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Pending many queues PS STA Index = %d remove \n", ii);
+ DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Pending many queues PS STA Index = %d remove\n", ii);
continue;
}
}
@@ -1127,7 +1078,7 @@ start:
}
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->eCurrentPHYType == PHY_TYPE_11G)) {
- // on/off protect mode
+ /* on/off protect mode */
if (WLAN_GET_ERP_USE_PROTECTION(pDevice->byERPFlag)) {
if (!pDevice->bProtectMode) {
MACvEnableProtectMD(pDevice->PortOffset);
@@ -1139,7 +1090,7 @@ start:
pDevice->bProtectMode = false;
}
}
- // on/off short slot time
+ /* on/off short slot time */
if (uNonShortSlotSTACnt > 0) {
if (pDevice->bShortSlotTime) {
@@ -1155,7 +1106,7 @@ start:
}
}
- // on/off barker long preamble mode
+ /* on/off barker long preamble mode */
if (uLongPreambleSTACnt > 0) {
if (!pDevice->bBarkerPreambleMd) {
@@ -1171,7 +1122,7 @@ start:
}
- // Check if any STA in PS mode, enable DTIM multicast deliver
+ /* check if any STA in PS mode, enable DTIM multicast deliver */
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
if (uSleepySTACnt > 0)
pMgmt->sNodeDBTable[0].bPSEnable = true;
@@ -1184,11 +1135,10 @@ start:
if ((pMgmt->eCurrMode == WMAC_MODE_STANDBY) ||
(pMgmt->eCurrMode == WMAC_MODE_ESS_STA)) {
- if (pMgmt->sNodeDBTable[0].bActive) { // Assoc with BSS
- if (pDevice->bUpdateBBVGA) {
- // s_vCheckSensitivity((void *) pDevice);
+ /* assoc with BSS */
+ if (pMgmt->sNodeDBTable[0].bActive) {
+ if (pDevice->bUpdateBBVGA)
s_vCheckPreEDThreshold((void *)pDevice);
- }
if ((pMgmt->sNodeDBTable[0].uInActiveCount >= (LOST_BEACON_COUNT/2)) &&
(pDevice->byBBVGACurrent != pDevice->abyBBVGA[0])) {
@@ -1232,12 +1182,18 @@ start:
if (pDevice->uAutoReConnectTime < 10) {
pDevice->uAutoReConnectTime++;
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- //network manager support need not do Roaming scan???
- if (pDevice->bWPASuppWextEnabled == true)
+ /*
+ * network manager support need not do
+ * Roaming scan???
+ */
+ if (pDevice->bWPASuppWextEnabled)
pDevice->uAutoReConnectTime = 0;
#endif
} else {
- //mike use old encryption status for wpa reauthen
+ /*
+ * mike use old encryption status
+ * for wpa reauthentication
+ */
if (pDevice->bWPADEVUp)
pDevice->eEncryptionStatus = pDevice->eOldEncryptionStatus;
@@ -1252,7 +1208,7 @@ start:
}
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- // if adhoc started which essid is NULL string, rescanning.
+ /* if adhoc started which essid is NULL string, rescanning */
if ((pMgmt->eCurrState == WMAC_STATE_STARTED) && (pCurrSSID->len == 0)) {
if (pDevice->uAutoReConnectTime < 10) {
pDevice->uAutoReConnectTime++;
@@ -1262,13 +1218,11 @@ start:
bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
pDevice->uAutoReConnectTime = 0;
- };
+ }
}
if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
- if (pDevice->bUpdateBBVGA) {
- //s_vCheckSensitivity((void *) pDevice);
+ if (pDevice->bUpdateBBVGA)
s_vCheckPreEDThreshold((void *)pDevice);
- }
if (pMgmt->sNodeDBTable[0].uInActiveCount >= ADHOC_LOST_BEACON_COUNT) {
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Lost other STA beacon [%d] sec, started !\n", pMgmt->sNodeDBTable[0].uInActiveCount);
pMgmt->sNodeDBTable[0].uInActiveCount = 0;
@@ -1318,40 +1272,31 @@ BSSvUpdateNodeTxCounter(
unsigned short wFallBackRate = RATE_1M;
unsigned char byFallBack;
unsigned int ii;
-// unsigned int txRetryTemp;
-//PLICE_DEBUG->
- //txRetryTemp = byTxRetry;
-//PLICE_DEBUG <-
pTxBufHead = (PSTxBufHead) pbyBuffer;
- if (pTxBufHead->wFIFOCtl & FIFOCTL_AUTO_FB_0) {
+ if (pTxBufHead->wFIFOCtl & FIFOCTL_AUTO_FB_0)
byFallBack = AUTO_FB_0;
- } else if (pTxBufHead->wFIFOCtl & FIFOCTL_AUTO_FB_1) {
+ else if (pTxBufHead->wFIFOCtl & FIFOCTL_AUTO_FB_1)
byFallBack = AUTO_FB_1;
- } else {
+ else
byFallBack = AUTO_FB_NONE;
- }
- wRate = pTxBufHead->wReserved; //?wRate
+ wRate = pTxBufHead->wReserved;
- // Only Unicast using support rates
+ /* Only Unicast using support rates */
if (pTxBufHead->wFIFOCtl & FIFOCTL_NEEDACK) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wRate %04X, byTsr0 %02X, byTsr1 %02X\n", wRate, byTsr0, byTsr1);
if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
pMgmt->sNodeDBTable[0].uTxAttempts += 1;
if ((byTsr1 & TSR1_TERR) == 0) {
- // transmit success, TxAttempts at least plus one
+ /* transmit success, TxAttempts at least plus one */
pMgmt->sNodeDBTable[0].uTxOk[MAX_RATE]++;
if ((byFallBack == AUTO_FB_NONE) ||
(wRate < RATE_18M)) {
wFallBackRate = wRate;
} else if (byFallBack == AUTO_FB_0) {
-//PLICE_DEBUG
if (byTxRetry < 5)
wFallBackRate = awHWRetry0[wRate-RATE_18M][byTxRetry];
- //wFallBackRate = awHWRetry0[wRate-RATE_12M][byTxRetry];
- //wFallBackRate = awHWRetry0[wRate-RATE_18M][txRetryTemp] +1;
else
wFallBackRate = awHWRetry0[wRate-RATE_18M][4];
- //wFallBackRate = awHWRetry0[wRate-RATE_12M][4];
} else if (byFallBack == AUTO_FB_1) {
if (byTxRetry < 5)
wFallBackRate = awHWRetry1[wRate-RATE_18M][byTxRetry];
@@ -1369,18 +1314,11 @@ BSSvUpdateNodeTxCounter(
(wRate < RATE_18M)) {
pMgmt->sNodeDBTable[0].uTxFail[wRate] += byTxRetry;
} else if (byFallBack == AUTO_FB_0) {
-//PLICE_DEBUG
- for (ii = 0; ii < byTxRetry; ii++)
- //for (ii=0;ii<txRetryTemp;ii++)
- {
- if (ii < 5) {
-//PLICE_DEBUG
+ for (ii = 0; ii < byTxRetry; ii++) {
+ if (ii < 5)
wFallBackRate = awHWRetry0[wRate-RATE_18M][ii];
- //wFallBackRate = awHWRetry0[wRate-RATE_12M][ii];
- } else {
+ else
wFallBackRate = awHWRetry0[wRate-RATE_18M][4];
- //wFallBackRate = awHWRetry0[wRate-RATE_12M][4];
- }
pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++;
}
} else if (byFallBack == AUTO_FB_1) {
@@ -1402,7 +1340,7 @@ BSSvUpdateNodeTxCounter(
if (BSSDBbIsSTAInNodeDB((void *)pMgmt, &(pMACHeader->abyAddr1[0]), &uNodeIndex)) {
pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts += 1;
if ((byTsr1 & TSR1_TERR) == 0) {
- // transmit success, TxAttempts at least plus one
+ /* transmit success, TxAttempts at least plus one */
pMgmt->sNodeDBTable[uNodeIndex].uTxOk[MAX_RATE]++;
if ((byFallBack == AUTO_FB_NONE) ||
(wRate < RATE_18M)) {
@@ -1485,7 +1423,7 @@ BSSvClearNodeDBTable(
for (ii = uStartIndex; ii < (MAX_NODE_NUM + 1); ii++) {
if (pMgmt->sNodeDBTable[ii].bActive) {
- // check if sTxPSQueue has been initial
+ /* check if sTxPSQueue has been initial */
if (pMgmt->sNodeDBTable[ii].sTxPSQueue.next != NULL) {
while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "PS skb != NULL %d\n", ii);
@@ -1517,7 +1455,7 @@ void s_vCheckSensitivity(
((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID);
if (pBSSList != NULL) {
- // Updata BB Reg if RSSI is too strong.
+ /* Update BB Reg if RSSI is too strong */
long LocalldBmAverage = 0;
long uNumofdBm = 0;
for (ii = 0; ii < RSSI_STAT_COUNT; ii++) {
@@ -1556,9 +1494,8 @@ BSSvClearAnyBSSJoinRecord(
PSMgmtObject pMgmt = pDevice->pMgmt;
unsigned int ii;
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ for (ii = 0; ii < MAX_BSS_NUM; ii++)
pMgmt->sBSSList[ii].bSelected = false;
- }
return;
}
@@ -1580,19 +1517,18 @@ void s_uCalculateLinkQual(
pDevice->scStatistic.RxOkCnt;
TxOkRatio = (TxCnt < 6) ? 4000 : ((pDevice->scStatistic.TxNoRetryOkCount * 4000) / TxCnt);
RxOkRatio = (RxCnt < 6) ? 2000 : ((pDevice->scStatistic.RxOkCnt * 2000) / RxCnt);
-//decide link quality
- if (pDevice->bLinkPass != true) {
+ /* decide link quality */
+ if (!pDevice->bLinkPass) {
pDevice->scStatistic.LinkQuality = 0;
pDevice->scStatistic.SignalStren = 0;
} else {
RFvRSSITodBm(pDevice, (unsigned char)(pDevice->uCurrRSSI), &ldBm);
- if (-ldBm < 50) {
+ if (-ldBm < 50)
RssiRatio = 4000;
- } else if (-ldBm > 90) {
+ else if (-ldBm > 90)
RssiRatio = 0;
- } else {
+ else
RssiRatio = (40-(-ldBm-50))*4000/40;
- }
pDevice->scStatistic.SignalStren = RssiRatio/40;
pDevice->scStatistic.LinkQuality = (RssiRatio+TxOkRatio+RxOkRatio)/100;
}
@@ -1616,10 +1552,8 @@ void s_vCheckPreEDThreshold(
if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID);
- if (pBSSList != NULL) {
+ if (pBSSList != NULL)
pDevice->byBBPreEDRSSI = (unsigned char) (~(pBSSList->ldBmAverRange) + 1);
- //BBvUpdatePreEDThreshold(pDevice, false);
- }
}
return;
}
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index fbf18e23e78e..db38ca051130 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -1053,7 +1053,7 @@ CARDbAdd_PMKID_Candidate(
for (ii = 0; ii < pDevice->gsPMKIDCandidate.NumCandidates; ii++) {
pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[ii];
if (!memcmp(pCandidateList->BSSID, pbyBSSID, ETH_ALEN)) {
- if ((bRSNCapExist == true) && (wRSNCap & BIT0)) {
+ if (bRSNCapExist && (wRSNCap & BIT0)) {
pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
} else {
pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
@@ -1064,7 +1064,7 @@ CARDbAdd_PMKID_Candidate(
// New Candidate
pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[pDevice->gsPMKIDCandidate.NumCandidates];
- if ((bRSNCapExist == true) && (wRSNCap & BIT0)) {
+ if (bRSNCapExist && (wRSNCap & BIT0)) {
pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
} else {
pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
@@ -1190,7 +1190,7 @@ CARDbStartMeasure(
}
} while (pDevice->uNumOfMeasureEIDs != 0);
- if (bExpired == false) {
+ if (!bExpired) {
MACvSelectPage1(pDevice->PortOffset);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MSRSTART, LODWORD(qwStartTSF));
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MSRSTART + 4, HIDWORD(qwStartTSF));
@@ -1280,7 +1280,7 @@ CARDbSetQuiet(
PSDevice pDevice = (PSDevice) pDeviceHandler;
unsigned int ii = 0;
- if (bResetQuiet == true) {
+ if (bResetQuiet) {
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN));
for (ii = 0; ii < MAX_QUIET_COUNT; ii++) {
pDevice->sQuiet[ii].bEnable = false;
@@ -2013,7 +2013,7 @@ QWORD CARDqGetTSFOffset(unsigned char byRxRate, QWORD qwTSF1, QWORD qwTSF2)
HIDWORD(qwTSFOffset) = HIDWORD(qwTSF1) - HIDWORD(qwTSF2) - 1;
} else {
HIDWORD(qwTSFOffset) = HIDWORD(qwTSF1) - HIDWORD(qwTSF2);
- };
+ }
return qwTSFOffset;
}
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index ba9481fa654f..3198a31e2ed7 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -441,8 +441,8 @@ void init_channel_table(void *pDeviceHandler)
break;
}
- if ((pDevice->dwDiagRefCount != 0) || (pDevice->b11hEnable == true)) {
- if (bMultiBand == true) {
+ if ((pDevice->dwDiagRefCount != 0) || pDevice->b11hEnable) {
+ if (bMultiBand) {
for (ii = 0; ii < CARD_MAX_CHANNEL_TBL; ii++) {
sChannelTbl[ii + 1].bValid = true;
pDevice->abyRegPwr[ii + 1] = pDevice->abyOFDMDefaultPwr[ii + 1];
@@ -463,7 +463,7 @@ void init_channel_table(void *pDeviceHandler)
}
}
} else if (pDevice->byZoneType <= CCODE_MAX) {
- if (bMultiBand == true) {
+ if (bMultiBand) {
for (ii = 0; ii < CARD_MAX_CHANNEL_TBL; ii++) {
if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
sChannelTbl[ii + 1].bValid = true;
@@ -531,7 +531,7 @@ bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel)
return bResult;
}
- if (sChannelTbl[uConnectionChannel].bValid == false) {
+ if (!sChannelTbl[uConnectionChannel].bValid) {
return false;
}
@@ -557,7 +557,7 @@ bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel)
bResult &= RFbSelectChannel(pDevice->PortOffset, pDevice->byRFType, (unsigned char)uConnectionChannel);
// Init Synthesizer Table
- if (pDevice->bEnablePSMode == true)
+ if (pDevice->bEnablePSMode)
RFvWriteWakeProgSyn(pDevice->PortOffset, pDevice->byRFType, uConnectionChannel);
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "CARDbSetMediaChannel: %d\n", (unsigned char)uConnectionChannel);
@@ -766,7 +766,7 @@ unsigned char auto_channel_select(void *pDeviceHandler, CARD_PHY_TYPE ePHYType)
if (ePHYType == PHY_TYPE_11A) {
for (ii = CB_MAX_CHANNEL_24G + 1; ii <= CB_MAX_CHANNEL; ii++) {
- if (sChannelTbl[ii].bValid == true) {
+ if (sChannelTbl[ii].bValid) {
if (byOptionChannel == 0) {
byOptionChannel = (unsigned char) ii;
}
@@ -780,7 +780,7 @@ unsigned char auto_channel_select(void *pDeviceHandler, CARD_PHY_TYPE ePHYType)
} else {
byOptionChannel = 0;
for (ii = 1; ii <= CB_MAX_CHANNEL_24G; ii++) {
- if (sChannelTbl[ii].bValid == true) {
+ if (sChannelTbl[ii].bValid) {
if (sChannelTbl[ii].byMAP == 0) {
aiWeight[ii] += 100;
} else if (sChannelTbl[ii].byMAP & 0x01) {
@@ -807,7 +807,7 @@ unsigned char auto_channel_select(void *pDeviceHandler, CARD_PHY_TYPE ePHYType)
}
}
for (ii = 1; ii <= CB_MAX_CHANNEL_24G; ii++) {
- if ((sChannelTbl[ii].bValid == true) &&
+ if (sChannelTbl[ii].bValid &&
(aiWeight[ii] > aiWeight[byOptionChannel])) {
byOptionChannel = (unsigned char) ii;
}
diff --git a/drivers/staging/vt6655/datarate.c b/drivers/staging/vt6655/datarate.c
index e7b6bc7de4ac..c9a89cd7633c 100644
--- a/drivers/staging/vt6655/datarate.c
+++ b/drivers/staging/vt6655/datarate.c
@@ -218,8 +218,7 @@ RATEvParseMaxRate(
for (ii = 0; ii < uRateLen; ii++) {
byRate = (unsigned char)(pItemRates->abyRates[ii]);
- if (WLAN_MGMT_IS_BASICRATE(byRate) &&
- (bUpdateBasicRate == true)) {
+ if (WLAN_MGMT_IS_BASICRATE(byRate) && bUpdateBasicRate) {
// Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate
CARDbAddBasicRate((void *)pDevice, wGetRateIdx(byRate));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ParseMaxRate AddBasicRate: %d\n", wGetRateIdx(byRate));
@@ -329,7 +328,7 @@ RATEvTxRateFallBack(
for (ii = 0; ii < MAX_RATE; ii++) {
if (psNodeDBTable->wSuppRate & (0x0001<<ii)) {
- if (bAutoRate[ii] == true) {
+ if (bAutoRate[ii]) {
wIdxUpRate = (unsigned short) ii;
}
} else {
@@ -354,8 +353,7 @@ RATEvTxRateFallBack(
wIdxDownRate = psNodeDBTable->wTxDataRate;
for (ii = psNodeDBTable->wTxDataRate; ii > 0;) {
ii--;
- if ((dwThroughputTbl[ii] > dwThroughput) &&
- (bAutoRate[ii] == true)) {
+ if ((dwThroughputTbl[ii] > dwThroughput) && bAutoRate[ii]) {
dwThroughput = dwThroughputTbl[ii];
wIdxDownRate = (unsigned short) ii;
}
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index ca1b8578cf79..062c3a374b99 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -31,7 +31,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ioport.h>
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index e93fdc88d844..a952df1bf9d6 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -267,7 +267,7 @@ static CHIP_INFO chip_info_table[] = {
{0, NULL}
};
-DEFINE_PCI_DEVICE_TABLE(vt6655_pci_id_table) = {
+const struct pci_device_id vt6655_pci_id_table[] = {
{ PCI_VDEVICE(VIA, 0x3253), (kernel_ulong_t)chip_info_table},
{ 0, }
};
@@ -561,7 +561,7 @@ static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
pDevice->byTxAntennaMode = ANT_B;
pDevice->dwTxAntennaSel = 1;
pDevice->dwRxAntennaSel = 1;
- if (pDevice->bTxRxAntInv == true)
+ if (pDevice->bTxRxAntInv)
pDevice->byRxAntennaMode = ANT_A;
else
pDevice->byRxAntennaMode = ANT_B;
@@ -578,13 +578,13 @@ static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
pDevice->dwRxAntennaSel = 0;
if (byValue & EEP_ANTENNA_AUX) {
pDevice->byTxAntennaMode = ANT_A;
- if (pDevice->bTxRxAntInv == true)
+ if (pDevice->bTxRxAntInv)
pDevice->byRxAntennaMode = ANT_B;
else
pDevice->byRxAntennaMode = ANT_A;
} else {
pDevice->byTxAntennaMode = ANT_B;
- if (pDevice->bTxRxAntInv == true)
+ if (pDevice->bTxRxAntInv)
pDevice->byRxAntennaMode = ANT_A;
else
pDevice->byRxAntennaMode = ANT_B;
@@ -635,7 +635,7 @@ static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
pDevice->byRFType &= RF_MASK;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byRFType = %x\n", pDevice->byRFType);
- if (pDevice->bZoneRegExist == false) {
+ if (!pDevice->bZoneRegExist) {
pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byZoneType = %x\n", pDevice->byZoneType);
@@ -742,7 +742,7 @@ static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
if (!(pDevice->byGPIO & GPIO0_DATA)) { pDevice->bHWRadioOff = false; }
}
- if ((pDevice->bRadioControlOff == true)) {
+ if (pDevice->bRadioControlOff) {
CARDbRadioPowerOff(pDevice);
} else CARDbRadioPowerOn(pDevice);
#else
@@ -751,7 +751,7 @@ static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
pDevice->bHWRadioOff = true;
}
}
- if ((pDevice->bHWRadioOff == true) || (pDevice->bRadioControlOff == true)) {
+ if (pDevice->bHWRadioOff || pDevice->bRadioControlOff) {
CARDbRadioPowerOff(pDevice);
}
@@ -809,7 +809,7 @@ static bool device_release_WPADEV(PSDevice pDevice)
int ii = 0;
// wait_queue_head_t Set_wait;
//send device close to wpa_supplicnat layer
- if (pDevice->bWPADEVUp == true) {
+ if (pDevice->bWPADEVUp) {
wpahdr = (viawget_wpa_header *)pDevice->skb->data;
wpahdr->type = VIAWGET_DEVICECLOSE_MSG;
wpahdr->resp_ie_len = 0;
@@ -826,7 +826,7 @@ static bool device_release_WPADEV(PSDevice pDevice)
//wait release WPADEV
// init_waitqueue_head(&Set_wait);
// wait_event_timeout(Set_wait, ((pDevice->wpadev==NULL)&&(pDevice->skb == NULL)),5*HZ); //1s wait
- while ((pDevice->bWPADEVUp == true)) {
+ while (pDevice->bWPADEVUp) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ / 20); //wait 50ms
ii++;
@@ -892,7 +892,7 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
#ifdef DEBUG
printk("Before get pci_info memaddr is %x\n", pDevice->memaddr);
#endif
- if (device_get_pci_info(pDevice, pcid) == false) {
+ if (!device_get_pci_info(pDevice, pcid)) {
printk(KERN_ERR DEVICE_NAME ": Failed to find PCI device.\n");
device_free_info(pDevice);
return -ENODEV;
@@ -1633,7 +1633,7 @@ static int device_tx_srv(PSDevice pDevice, unsigned int uIdx) {
bFull = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " AC0DMA is Full = %d\n", pDevice->iTDUsed[uIdx]);
}
- if (netif_queue_stopped(pDevice->dev) && (bFull == false)) {
+ if (netif_queue_stopped(pDevice->dev) && !bFull) {
netif_wake_queue(pDevice->dev);
}
}
@@ -1798,7 +1798,7 @@ static int device_open(struct net_device *dev) {
pDevice->byReAssocCount = 0;
pDevice->bWPADEVUp = false;
// Patch: if WEP key already set by iwconfig but device not yet open
- if ((pDevice->bEncryptionEnable == true) && (pDevice->bTransmitKey == true)) {
+ if (pDevice->bEncryptionEnable && pDevice->bTransmitKey) {
KeybSetDefaultKey(&(pDevice->sKey),
(unsigned long)(pDevice->byKeyIndex | (1 << 31)),
pDevice->uKeyLength,
@@ -1895,7 +1895,7 @@ static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev) {
return 0;
}
- if (pDevice->bStopTx0Pkt == true) {
+ if (pDevice->bStopTx0Pkt) {
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
@@ -1924,7 +1924,7 @@ bool device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, unsigned int uNodeI
SKeyItem STempKey;
// unsigned char byKeyIndex = 0;
- if (pDevice->bStopTx0Pkt == true) {
+ if (pDevice->bStopTx0Pkt) {
dev_kfree_skb_irq(skb);
return false;
}
@@ -1993,14 +1993,14 @@ bool device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, unsigned int uNodeI
} else if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
byPktType = PK_TYPE_11A;
} else {
- if (pDevice->bProtectMode == true) {
+ if (pDevice->bProtectMode) {
byPktType = PK_TYPE_11GB;
} else {
byPktType = PK_TYPE_11GA;
}
}
- if (pDevice->bEncryptionEnable == true)
+ if (pDevice->bEncryptionEnable)
bNeedEncryption = true;
if (pDevice->bEnableHostWEP) {
@@ -2076,7 +2076,7 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
bool bNodeExist = false;
spin_lock_irq(&pDevice->lock);
- if (pDevice->bLinkPass == false) {
+ if (!pDevice->bLinkPass) {
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
@@ -2130,7 +2130,7 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
}
}
- if (bNodeExist == false) {
+ if (!bNodeExist) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "Unknown STA not found in node DB \n");
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
@@ -2149,7 +2149,7 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
cbFrameBodySize += 8;
}
- if (pDevice->bEncryptionEnable == true) {
+ if (pDevice->bEncryptionEnable) {
bNeedEncryption = true;
// get Transmit key
do {
@@ -2196,7 +2196,7 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
if (pDevice->bEnableHostWEP) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "acdma0: STA index %d\n", uNodeIndex);
- if (pDevice->bEncryptionEnable == true) {
+ if (pDevice->bEncryptionEnable) {
pTransmitKey = &STempKey;
pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
@@ -2286,7 +2286,7 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
} else if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
byPktType = PK_TYPE_11A;
} else {
- if (pDevice->bProtectMode == true) {
+ if (pDevice->bProtectMode) {
byPktType = PK_TYPE_11GB;
} else {
byPktType = PK_TYPE_11GA;
@@ -2297,7 +2297,7 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
// printk("FIX RATE:CurrentRate is %d");
//#endif
- if (bNeedEncryption == true) {
+ if (bNeedEncryption) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ntohs Pkt Type=%04x\n", ntohs(pDevice->sTxEthHeader.wType));
if ((pDevice->sTxEthHeader.wType) == TYPE_PKT_802_1x) {
bNeedEncryption = false;
@@ -2306,7 +2306,7 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
if (pTransmitKey == NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Don't Find TX KEY\n");
} else {
- if (bTKIP_UseGTK == true) {
+ if (bTKIP_UseGTK) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "error: KEY is GTK!!~~\n");
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Find PTK [%lX]\n", pTransmitKey->dwKeyIndex);
@@ -2493,7 +2493,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
MACvSelectPage0(pDevice->PortOffset);
//xxxx
// WCMDbFlushCommandQueue(pDevice->pMgmt, true);
- if (set_channel(pDevice, pDevice->pCurrMeasureEID->sReq.byChannel) == true) {
+ if (set_channel(pDevice, pDevice->pCurrMeasureEID->sReq.byChannel)) {
pDevice->bMeasureInProgress = true;
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_READY);
@@ -2544,12 +2544,12 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
if (pDevice->dwIsr & ISR_QUIETSTART) {
do {
;
- } while (CARDbStartQuiet(pDevice) == false);
+ } while (!CARDbStartQuiet(pDevice));
}
}
if (pDevice->dwIsr & ISR_TBTT) {
- if (pDevice->bEnableFirstQuiet == true) {
+ if (pDevice->bEnableFirstQuiet) {
pDevice->byQuietStartCount--;
if (pDevice->byQuietStartCount == 0) {
pDevice->bEnableFirstQuiet = false;
@@ -2558,7 +2558,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
MACvSelectPage0(pDevice->PortOffset);
}
}
- if ((pDevice->bChannelSwitch == true) &&
+ if (pDevice->bChannelSwitch &&
(pDevice->eOPMode == OP_MODE_INFRASTRUCTURE)) {
pDevice->byChannelSwitchCount--;
if (pDevice->byChannelSwitchCount == 0) {
@@ -2575,7 +2575,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
if (pDevice->eOPMode == OP_MODE_ADHOC) {
//pDevice->bBeaconSent = false;
} else {
- if ((pDevice->bUpdateBBVGA) && (pDevice->bLinkPass == true) && (pDevice->uCurrRSSI != 0)) {
+ if ((pDevice->bUpdateBBVGA) && pDevice->bLinkPass && (pDevice->uCurrRSSI != 0)) {
long ldBm;
RFvRSSITodBm(pDevice, (unsigned char) pDevice->uCurrRSSI, &ldBm);
@@ -2642,7 +2642,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
}
pDevice->bBeaconSent = true;
- if (pDevice->bChannelSwitch == true) {
+ if (pDevice->bChannelSwitch) {
pDevice->byChannelSwitchCount--;
if (pDevice->byChannelSwitchCount == 0) {
pDevice->bChannelSwitch = false;
@@ -3237,7 +3237,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
netif_stop_queue(pDevice->dev);
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- if (pDevice->bWPASuppWextEnabled != true)
+ if (!pDevice->bWPASuppWextEnabled)
#endif
bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
@@ -3373,7 +3373,7 @@ viawget_resume(struct pci_dev *pcid)
spin_lock_irq(&pDevice->lock);
MACvRestoreContext(pDevice->PortOffset, pDevice->abyMacContext);
device_init_registers(pDevice, DEVICE_INIT_DXPL);
- if (pMgmt->sNodeDBTable[0].bActive == true) { // Assoc with BSS
+ if (pMgmt->sNodeDBTable[0].bActive) { // Assoc with BSS
pMgmt->sNodeDBTable[0].bActive = false;
pDevice->bLinkPass = false;
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index 0ff51cb4a207..0a29c9015419 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -169,7 +169,7 @@ s_vProcessRxMACHeader(PSDevice pDevice, unsigned char *pbyRxBufferAddr,
}
} else {
cbHeaderSize += WLAN_HDR_ADDR3_LEN;
- };
+ }
pbyRxBuffer = (unsigned char *)(pbyRxBufferAddr + cbHeaderSize);
if (ether_addr_equal(pbyRxBuffer, pDevice->abySNAP_Bridgetunnel)) {
@@ -263,7 +263,7 @@ s_vGetDASA(unsigned char *pbyRxBufferAddr, unsigned int *pcbHeaderSize,
psEthHeader->abySrcAddr[ii] = pMACHeader->abyAddr2[ii];
}
}
- };
+ }
*pcbHeaderSize = cbHeaderSize;
}
@@ -379,7 +379,7 @@ device_receive_frame(
pMACHeader = (PS802_11Header)((unsigned char *)(skb->data) + 8);
//PLICE_DEBUG<-
- if (pDevice->bMeasureInProgress == true) {
+ if (pDevice->bMeasureInProgress) {
if ((*pbyRsr & RSR_CRCOK) != 0) {
pDevice->byBasicMap |= 0x01;
}
@@ -436,7 +436,7 @@ device_receive_frame(
}
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (s_bAPModeRxCtl(pDevice, pbyFrame, iSANodeIndex) == true) {
+ if (s_bAPModeRxCtl(pDevice, pbyFrame, iSANodeIndex)) {
return false;
}
}
@@ -592,7 +592,7 @@ device_receive_frame(
}
} else {
// Control Frame
- };
+ }
return false;
} else {
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
@@ -608,8 +608,7 @@ device_receive_frame(
}
} else {
// discard DATA packet while not associate || BSSID error
- if ((pDevice->bLinkPass == false) ||
- !(*pbyRsr & RSR_BSSIDOK)) {
+ if (!pDevice->bLinkPass || !(*pbyRsr & RSR_BSSIDOK)) {
if (bDeFragRx) {
if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
DBG_PRT(MSG_LEVEL_ERR, KERN_ERR "%s: can not alloc more frag bufs\n",
@@ -658,7 +657,7 @@ device_receive_frame(
// Now it only supports 802.11g Infrastructure Mode, and support rate must up to 54 Mbps
if (pDevice->bDiversityEnable && (FrameSize > 50) &&
(pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) &&
- (pDevice->bLinkPass == true)) {
+ pDevice->bLinkPass) {
BBvAntennaDiversity(pDevice, s_byGetRateIdx(*pbyRxRate), 0);
}
@@ -683,7 +682,7 @@ device_receive_frame(
// -----------------------------------------------
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnable8021x == true)) {
+ if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && pDevice->bEnable8021x) {
unsigned char abyMacHdr[24];
// Only 802.1x packet incoming allowed
@@ -698,7 +697,7 @@ device_receive_frame(
if (wEtherType == ETH_P_PAE) {
skb->dev = pDevice->apdev;
- if (bIsWEP == true) {
+ if (bIsWEP) {
// strip IV header(8)
memcpy(&abyMacHdr[0], (skb->data + 4), 24);
memcpy((skb->data + 4 + cbIVOffset), &abyMacHdr[0], 24);
@@ -770,8 +769,9 @@ device_receive_frame(
//DBG_PRN_GRP12(("LocalL: %lx, LocalR: %lx\n", dwLocalMIC_L, dwLocalMIC_R));
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dwMICKey0= %lx,dwMICKey1= %lx \n", dwMICKey0, dwMICKey1);
- if ((cpu_to_le32(*pdwMIC_L) != dwLocalMIC_L) || (cpu_to_le32(*pdwMIC_R) != dwLocalMIC_R) ||
- (pDevice->bRxMICFail == true)) {
+ if ((cpu_to_le32(*pdwMIC_L) != dwLocalMIC_L) ||
+ (cpu_to_le32(*pdwMIC_R) != dwLocalMIC_R) ||
+ pDevice->bRxMICFail) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC comparison is fail!\n");
pDevice->bRxMICFail = false;
//pDevice->s802_11Counter.TKIPLocalMICFailures.QuadPart++;
@@ -894,13 +894,13 @@ device_receive_frame(
return false;
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (s_bAPModeRxData(pDevice,
+ if (!s_bAPModeRxData(pDevice,
skb,
FrameSize,
cbHeaderOffset,
iSANodeIndex,
iDANodeIndex
-) == false) {
+)) {
if (bDeFragRx) {
if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
DBG_PRT(MSG_LEVEL_ERR, KERN_ERR "%s: can not alloc more frag bufs\n",
@@ -1123,7 +1123,7 @@ static bool s_bHandleRxEncryption(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pKey == NULL\n");
if (byDecMode == KEY_CTL_WEP) {
// pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++;
- } else if (pDevice->bLinkPass == true) {
+ } else if (pDevice->bLinkPass) {
// pDevice->s802_11Counter.DecryptFailureCount.QuadPart++;
}
return false;
@@ -1131,7 +1131,7 @@ static bool s_bHandleRxEncryption(
if (byDecMode != pKey->byCipherSuite) {
if (byDecMode == KEY_CTL_WEP) {
// pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++;
- } else if (pDevice->bLinkPass == true) {
+ } else if (pDevice->bLinkPass) {
// pDevice->s802_11Counter.DecryptFailureCount.QuadPart++;
}
*pKeyOut = NULL;
@@ -1234,7 +1234,7 @@ static bool s_bHostWepRxEncryption(
if (byDecMode != pKey->byCipherSuite) {
if (byDecMode == KEY_CTL_WEP) {
// pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++;
- } else if (pDevice->bLinkPass == true) {
+ } else if (pDevice->bLinkPass) {
// pDevice->s802_11Counter.DecryptFailureCount.QuadPart++;
}
return false;
@@ -1245,7 +1245,7 @@ static bool s_bHostWepRxEncryption(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "byDecMode == KEY_CTL_WEP \n");
if ((pDevice->byLocalID <= REV_ID_VT3253_A1) ||
(((PSKeyTable)(pKey->pvKeyTable))->bSoftWEP == true) ||
- (bOnFly == false)) {
+ !bOnFly) {
// Software WEP
// 1. 3253A
// 2. WEP 256
@@ -1277,7 +1277,7 @@ static bool s_bHostWepRxEncryption(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "TSC0_15: %x\n", *pwRxTSC15_0);
if (byDecMode == KEY_CTL_TKIP) {
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1) || (bOnFly == false)) {
+ if ((pDevice->byLocalID <= REV_ID_VT3253_A1) || !bOnFly) {
// Software TKIP
// 1. 3253 A
// 2. NotOnFly
@@ -1297,7 +1297,7 @@ static bool s_bHostWepRxEncryption(
}
if (byDecMode == KEY_CTL_CCMP) {
- if (bOnFly == false) {
+ if (!bOnFly) {
// Software CCMP
// NotOnFly
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "soft KEY_CTL_CCMP\n");
diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
index ab8b2ba6eedd..6eecd5358916 100644
--- a/drivers/staging/vt6655/hostap.c
+++ b/drivers/staging/vt6655/hostap.c
@@ -454,7 +454,7 @@ static int hostap_set_encryption(PSDevice pDevice,
unsigned long dwKeyIndex = 0;
unsigned char abyKey[MAX_KEY_LEN];
unsigned char abySeq[MAX_KEY_LEN];
- NDIS_802_11_KEY_RSC KeyRSC;
+ unsigned long long KeyRSC;
unsigned char byKeyDecMode = KEY_CTL_WEP;
int ret = 0;
int iNodeIndex = -1;
@@ -495,11 +495,11 @@ static int hostap_set_encryption(PSDevice pDevice,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " hostap_set_encryption: alg %d \n", param->u.crypt.alg);
if (param->u.crypt.alg == WPA_ALG_NONE) {
- if (pMgmt->sNodeDBTable[iNodeIndex].bOnFly == true) {
- if (KeybRemoveKey(&(pDevice->sKey),
+ if (pMgmt->sNodeDBTable[iNodeIndex].bOnFly) {
+ if (!KeybRemoveKey(&(pDevice->sKey),
param->sta_addr,
pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex,
- pDevice->PortOffset) == false) {
+ pDevice->PortOffset)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "KeybRemoveKey fail \n");
}
pMgmt->sNodeDBTable[iNodeIndex].bOnFly = false;
@@ -557,7 +557,7 @@ static int hostap_set_encryption(PSDevice pDevice,
(unsigned char *)abyKey,
KEY_CTL_WEP,
pDevice->PortOffset,
- pDevice->byLocalID) == true) {
+ pDevice->byLocalID)) {
pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true;
} else {
@@ -624,7 +624,7 @@ static int hostap_set_encryption(PSDevice pDevice,
(unsigned char *)abyKey,
byKeyDecMode,
pDevice->PortOffset,
- pDevice->byLocalID) == true) {
+ pDevice->byLocalID)) {
pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true;
} else {
@@ -636,7 +636,7 @@ static int hostap_set_encryption(PSDevice pDevice,
}
- if (bKeyTableFull == true) {
+ if (bKeyTableFull) {
wKeyCtl &= 0x7F00; // clear all key control filed
wKeyCtl |= (byKeyDecMode << 4);
wKeyCtl |= (byKeyDecMode);
diff --git a/drivers/staging/vt6655/iwctl.c b/drivers/staging/vt6655/iwctl.c
index 4bff8aa96be7..ac3fc16704c1 100644
--- a/drivers/staging/vt6655/iwctl.c
+++ b/drivers/staging/vt6655/iwctl.c
@@ -1632,7 +1632,7 @@ int iwctl_giwsens(struct net_device *dev,
wrq->value = ldBm;
} else {
wrq->value = 0;
- };
+ }
wrq->disabled = (wrq->value == 0);
wrq->fixed = 1;
@@ -1827,7 +1827,7 @@ int iwctl_siwencodeext(struct net_device *dev,
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
struct viawget_wpa_param *param = NULL;
//original member
- wpa_alg alg_name;
+ enum wpa_alg alg_name;
u8 addr[6];
int key_idx, set_tx = 0;
u8 seq[IW_ENCODE_SEQ_MAX_SIZE];
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index 04c1304d16e5..eab3b41f9e3c 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -64,13 +64,12 @@ s_vCheckKeyTableValid(PSKeyManagement pTable, unsigned long dwIoBase)
int i;
for (i = 0; i < MAX_KEY_TABLE; i++) {
- if ((pTable->KeyTable[i].bInUse == true) &&
- (pTable->KeyTable[i].PairwiseKey.bKeyValid == false) &&
- (pTable->KeyTable[i].GroupKey[0].bKeyValid == false) &&
- (pTable->KeyTable[i].GroupKey[1].bKeyValid == false) &&
- (pTable->KeyTable[i].GroupKey[2].bKeyValid == false) &&
- (pTable->KeyTable[i].GroupKey[3].bKeyValid == false)
-) {
+ if (pTable->KeyTable[i].bInUse &&
+ !pTable->KeyTable[i].PairwiseKey.bKeyValid &&
+ !pTable->KeyTable[i].GroupKey[0].bKeyValid &&
+ !pTable->KeyTable[i].GroupKey[1].bKeyValid &&
+ !pTable->KeyTable[i].GroupKey[2].bKeyValid &&
+ !pTable->KeyTable[i].GroupKey[3].bKeyValid) {
pTable->KeyTable[i].bInUse = false;
pTable->KeyTable[i].wKeyCtl = 0;
pTable->KeyTable[i].bSoftWEP = false;
@@ -140,17 +139,17 @@ bool KeybGetKey(
*pKey = NULL;
for (i = 0; i < MAX_KEY_TABLE; i++) {
- if ((pTable->KeyTable[i].bInUse == true) &&
+ if (pTable->KeyTable[i].bInUse &&
ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if (dwKeyIndex == 0xFFFFFFFF) {
- if (pTable->KeyTable[i].PairwiseKey.bKeyValid == true) {
+ if (pTable->KeyTable[i].PairwiseKey.bKeyValid) {
*pKey = &(pTable->KeyTable[i].PairwiseKey);
return true;
} else {
return false;
}
} else if (dwKeyIndex < MAX_GROUP_KEY) {
- if (pTable->KeyTable[i].GroupKey[dwKeyIndex].bKeyValid == true) {
+ if (pTable->KeyTable[i].GroupKey[dwKeyIndex].bKeyValid) {
*pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex]);
return true;
} else {
@@ -202,12 +201,11 @@ bool KeybSetKey(
j = (MAX_KEY_TABLE-1);
for (i = 0; i < (MAX_KEY_TABLE - 1); i++) {
- if ((pTable->KeyTable[i].bInUse == false) &&
- (j == (MAX_KEY_TABLE-1))) {
+ if (!pTable->KeyTable[i].bInUse && (j == (MAX_KEY_TABLE-1))) {
// found empty table
j = i;
}
- if ((pTable->KeyTable[i].bInUse == true) &&
+ if (pTable->KeyTable[i].bInUse &&
ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
// found table already exist
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
@@ -384,7 +382,7 @@ bool KeybRemoveKey(
}
for (i = 0; i < MAX_KEY_TABLE; i++) {
- if ((pTable->KeyTable[i].bInUse == true) &&
+ if (pTable->KeyTable[i].bInUse &&
ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
@@ -428,7 +426,7 @@ bool KeybRemoveAllKey(
int i, u;
for (i = 0; i < MAX_KEY_TABLE; i++) {
- if ((pTable->KeyTable[i].bInUse == true) &&
+ if (pTable->KeyTable[i].bInUse &&
ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
for (u = 0; u < MAX_GROUP_KEY; u++) {
@@ -461,7 +459,7 @@ void KeyvRemoveWEPKey(
)
{
if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) {
- if (pTable->KeyTable[MAX_KEY_TABLE-1].bInUse == true) {
+ if (pTable->KeyTable[MAX_KEY_TABLE-1].bInUse) {
if (pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF].byCipherSuite == KEY_CTL_WEP) {
pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = false;
if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex & 0x7FFFFFFF)) {
@@ -511,10 +509,10 @@ bool KeybGetTransmitKey(
*pKey = NULL;
for (i = 0; i < MAX_KEY_TABLE; i++) {
- if ((pTable->KeyTable[i].bInUse == true) &&
+ if (pTable->KeyTable[i].bInUse &&
ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
if (dwKeyType == PAIRWISE_KEY) {
- if (pTable->KeyTable[i].PairwiseKey.bKeyValid == true) {
+ if (pTable->KeyTable[i].PairwiseKey.bKeyValid) {
*pKey = &(pTable->KeyTable[i].PairwiseKey);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "KeybGetTransmitKey:");
@@ -535,7 +533,7 @@ bool KeybGetTransmitKey(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ERROR: dwGTKeyIndex == 0 !!!\n");
return false;
}
- if (pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)].bKeyValid == true) {
+ if (pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)].bKeyValid) {
*pKey = &(pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)]);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "KeybGetTransmitKey:");
@@ -583,8 +581,8 @@ bool KeybCheckPairewiseKey(
*pKey = NULL;
for (i = 0; i < MAX_KEY_TABLE; i++) {
- if ((pTable->KeyTable[i].bInUse == true) &&
- (pTable->KeyTable[i].PairwiseKey.bKeyValid == true)) {
+ if (pTable->KeyTable[i].bInUse &&
+ pTable->KeyTable[i].PairwiseKey.bKeyValid) {
*pKey = &(pTable->KeyTable[i].PairwiseKey);
return true;
}
@@ -657,7 +655,7 @@ bool KeybSetDefaultKey(
pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0x4000; // disable on-fly disable address match
pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP = true;
} else {
- if (pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP == false)
+ if (!pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP)
pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0xC000; // enable on-fly disable address match
}
@@ -740,7 +738,7 @@ bool KeybSetAllGroupKey(
}
for (i = 0; i < MAX_KEY_TABLE - 1; i++) {
- if (pTable->KeyTable[i].bInUse == true) {
+ if (pTable->KeyTable[i].bInUse) {
// found table already exist
// Group key
pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF]);
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 001d15c0fa40..21bd8a1126d7 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -957,13 +957,13 @@ bool MACbSafeStop(unsigned long dwIoBase)
{
MACvRegBitsOff(dwIoBase, MAC_REG_TCR, TCR_AUTOBCNTX);
- if (MACbSafeRxOff(dwIoBase) == false) {
+ if (!MACbSafeRxOff(dwIoBase)) {
DBG_PORT80(0xA1);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " MACbSafeRxOff == false)\n");
MACbSafeSoftwareReset(dwIoBase);
return false;
}
- if (MACbSafeTxOff(dwIoBase) == false) {
+ if (!MACbSafeTxOff(dwIoBase)) {
DBG_PORT80(0xA2);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " MACbSafeTxOff == false)\n");
MACbSafeSoftwareReset(dwIoBase);
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index 2340d2f0399b..4bd1ccb79515 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -260,7 +260,7 @@ PSvSendPSPOLL(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send PS-Poll packet failed..\n");
} else {
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send PS-Poll packet success..\n");
- };
+ }
return;
}
@@ -284,16 +284,15 @@ PSbSendNullPacket(
PSMgmtObject pMgmt = pDevice->pMgmt;
unsigned int uIdx;
- if (pDevice->bLinkPass == false) {
+ if (!pDevice->bLinkPass) {
return false;
}
#ifdef TxInSleep
- if ((pDevice->bEnablePSMode == false) &&
- (pDevice->fTxDataInSleep == false)) {
+ if (!pDevice->bEnablePSMode && !pDevice->fTxDataInSleep) {
return false;
}
#else
- if (pDevice->bEnablePSMode == false) {
+ if (!pDevice->bEnablePSMode) {
return false;
}
#endif
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index ce173cc16c19..edb1b2768b17 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -976,7 +976,7 @@ bool RFbSetPower(
}
bResult = RFbRawSetPower(pDevice, byPwr, uRATE);
- if (bResult == true) {
+ if (bResult) {
pDevice->byCurPwr = byPwr;
}
return bResult;
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 3a2661e2b27c..6affd6edac0d 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -435,7 +435,7 @@ s_uGetDataDuration(
switch (byDurType) {
case DATADUR_B: //DATADUR_B
- if (((uMACfragNum == 1)) || (bLastFrag == 1)) {//Non Frag or Last Frag
+ if (((uMACfragNum == 1)) || bLastFrag) {//Non Frag or Last Frag
if (bNeedAck) {
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
return pDevice->uSIFS + uAckTime;
@@ -458,7 +458,7 @@ s_uGetDataDuration(
break;
case DATADUR_A: //DATADUR_A
- if (((uMACfragNum == 1)) || (bLastFrag == 1)) {//Non Frag or Last Frag
+ if (((uMACfragNum == 1)) || bLastFrag) {//Non Frag or Last Frag
if (bNeedAck) {
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
return pDevice->uSIFS + uAckTime;
@@ -481,7 +481,7 @@ s_uGetDataDuration(
break;
case DATADUR_A_F0: //DATADUR_A_F0
- if (((uMACfragNum == 1)) || (bLastFrag == 1)) {//Non Frag or Last Frag
+ if (((uMACfragNum == 1)) || bLastFrag) {//Non Frag or Last Frag
if (bNeedAck) {
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
return pDevice->uSIFS + uAckTime;
@@ -523,7 +523,7 @@ s_uGetDataDuration(
break;
case DATADUR_A_F1: //DATADUR_A_F1
- if (((uMACfragNum == 1)) || (bLastFrag == 1)) {//Non Frag or Last Frag
+ if (((uMACfragNum == 1)) || bLastFrag) {//Non Frag or Last Frag
if (bNeedAck) {
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
return pDevice->uSIFS + uAckTime;
@@ -2212,7 +2212,7 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
else {
bNeedACK = true;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
- };
+ }
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) ||
(pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
@@ -2686,7 +2686,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, unsigned char *pbMPDU, un
}
bNeedACK = true;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
- };
+ }
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) ||
(pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
diff --git a/drivers/staging/vt6655/vntwifi.c b/drivers/staging/vt6655/vntwifi.c
index d2bdb71fe62d..e78aedf99077 100644
--- a/drivers/staging/vt6655/vntwifi.c
+++ b/drivers/staging/vt6655/vntwifi.c
@@ -494,7 +494,7 @@ VNTWIFIvUpdateNodeTxCounter(
}
}
pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts++;
- if (bTxOk == true) {
+ if (bTxOk) {
// transmit success, TxAttempts at least plus one
pMgmt->sNodeDBTable[uNodeIndex].uTxOk[MAX_RATE]++;
pMgmt->sNodeDBTable[uNodeIndex].uTxOk[wRate]++;
@@ -584,7 +584,7 @@ VNTWIFIbyGetKeyCypher(
{
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
- if (bGroupKey == true) {
+ if (bGroupKey) {
return pMgmt->byCSSGK;
} else {
return pMgmt->byCSSPK;
@@ -731,7 +731,7 @@ VNTWIFIbMeasureReport(
pMgmt->uLengthOfRepEIDs += (2 + pMgmt->pCurrMeasureEIDRep->len);
pMgmt->pCurrMeasureEIDRep = (PWLAN_IE_MEASURE_REP) pbyCurrentEID;
}
- if (bEndOfReport == true) {
+ if (bEndOfReport) {
IEEE11hbMSRRepTx(pMgmt);
}
//spin_unlock_irq(&pDevice->lock);
diff --git a/drivers/staging/vt6655/wcmd.c b/drivers/staging/vt6655/wcmd.c
index 9c57eefe78fb..72caaa203ddc 100644
--- a/drivers/staging/vt6655/wcmd.c
+++ b/drivers/staging/vt6655/wcmd.c
@@ -317,7 +317,7 @@ vCommandTimer(
if (pDevice->dwDiagRefCount != 0)
return;
- if (pDevice->bCmdRunning != true)
+ if (!pDevice->bCmdRunning)
return;
spin_lock_irq(&pDevice->lock);
@@ -326,7 +326,7 @@ vCommandTimer(
case WLAN_CMD_SCAN_START:
pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff == true) {
+ if (pDevice->bRadioOff) {
s_bCommandComplete(pDevice);
spin_unlock_irq(&pDevice->lock);
return;
@@ -393,7 +393,7 @@ vCommandTimer(
vAdHocBeaconStop(pDevice);
- if (set_channel(pMgmt->pAdapter, pMgmt->uScanChannel) == true) {
+ if (set_channel(pMgmt->pAdapter, pMgmt->uScanChannel)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "SCAN Channel: %d\n", pMgmt->uScanChannel);
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "SET SCAN Channel Fail: %d\n", pMgmt->uScanChannel);
@@ -408,7 +408,7 @@ vCommandTimer(
}
- if ((pMgmt->b11hEnable == false) ||
+ if (!pMgmt->b11hEnable ||
(pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) {
s_vProbeChannel(pDevice);
spin_unlock_irq(&pDevice->lock);
@@ -498,7 +498,7 @@ vCommandTimer(
case WLAN_CMD_SSID_START:
pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff == true) {
+ if (pDevice->bRadioOff) {
s_bCommandComplete(pDevice);
spin_unlock_irq(&pDevice->lock);
return;
@@ -659,7 +659,7 @@ vCommandTimer(
netif_wake_queue(pDevice->dev);
}
#ifdef TxInSleep
- if (pDevice->IsTxDataTrigger != false) { //TxDataTimer is not triggered at the first time
+ if (pDevice->IsTxDataTrigger) { //TxDataTimer is not triggered at the first time
del_timer(&pDevice->sTimerTxData);
init_timer(&pDevice->sTimerTxData);
pDevice->sTimerTxData.data = (unsigned long) pDevice;
@@ -694,7 +694,7 @@ vCommandTimer(
pMgmt->eCurrState = WMAC_STATE_IDLE;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
pDevice->bLinkPass = false;
- if (pDevice->bEnableHostWEP == true)
+ if (pDevice->bEnableHostWEP)
BSSvClearNodeDBTable(pDevice, 1);
else
BSSvClearNodeDBTable(pDevice, 0);
@@ -776,7 +776,7 @@ vCommandTimer(
case WLAN_CMD_RADIO_START:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "eCommandState == WLAN_CMD_RADIO_START\n");
- if (pDevice->bRadioCmd == true)
+ if (pDevice->bRadioCmd)
CARDbRadioPowerOn(pDevice);
else
CARDbRadioPowerOff(pDevice);
@@ -948,7 +948,7 @@ bool bScheduleCommand(
ADD_ONE_WITH_WRAP_AROUND(pDevice->uCmdEnqueueIdx, CMD_Q_SIZE);
pDevice->cbFreeCmdQueue--;
- if (pDevice->bCmdRunning == false) {
+ if (!pDevice->bCmdRunning) {
s_bCommandComplete(pDevice);
} else {
}
@@ -1031,8 +1031,8 @@ BSSvSecondTxData(
spin_lock_irq(&pDevice->lock);
#if 1
- if (((pDevice->bLinkPass == true) && (pMgmt->eAuthenMode < WMAC_AUTH_WPA)) || //open && sharekey linking
- (pDevice->fWPA_Authened == true)) { //wpa linking
+ if ((pDevice->bLinkPass && (pMgmt->eAuthenMode < WMAC_AUTH_WPA)) || //open && sharekey linking
+ pDevice->fWPA_Authened) { //wpa linking
#else
if (pDevice->bLinkPass == true) {
#endif
diff --git a/drivers/staging/vt6655/wctl.c b/drivers/staging/vt6655/wctl.c
index f05f9f55398b..950039f6d128 100644
--- a/drivers/staging/vt6655/wctl.c
+++ b/drivers/staging/vt6655/wctl.c
@@ -110,7 +110,7 @@ unsigned int WCTLuSearchDFCB(PSDevice pDevice, PS802_11Header pMACHeader)
unsigned int ii;
for (ii = 0; ii < pDevice->cbDFCB; ii++) {
- if ((pDevice->sRxDFCB[ii].bInUse == true) &&
+ if (pDevice->sRxDFCB[ii].bInUse &&
ether_addr_equal(pDevice->sRxDFCB[ii].abyAddr2,
pMACHeader->abyAddr2)) {
//
@@ -141,7 +141,7 @@ unsigned int WCTLuInsertDFCB(PSDevice pDevice, PS802_11Header pMACHeader)
if (pDevice->cbFreeDFCB == 0)
return pDevice->cbDFCB;
for (ii = 0; ii < pDevice->cbDFCB; ii++) {
- if (pDevice->sRxDFCB[ii].bInUse == false) {
+ if (!pDevice->sRxDFCB[ii].bInUse) {
pDevice->cbFreeDFCB--;
pDevice->sRxDFCB[ii].uLifetime = pDevice->dwMaxReceiveLifetime;
pDevice->sRxDFCB[ii].bInUse = true;
@@ -174,7 +174,7 @@ bool WCTLbHandleFragment(PSDevice pDevice, PS802_11Header pMACHeader, unsigned i
{
unsigned int uHeaderSize;
- if (bWEP == true) {
+ if (bWEP) {
uHeaderSize = 28;
if (bExtIV)
// ExtIV
diff --git a/drivers/staging/vt6655/wmgr.c b/drivers/staging/vt6655/wmgr.c
index ed4b32b6d9ce..5200a2a0ecca 100644
--- a/drivers/staging/vt6655/wmgr.c
+++ b/drivers/staging/vt6655/wmgr.c
@@ -468,15 +468,15 @@ vMgrAssocBeginSta(
// ERP Phy (802.11g) should support short preamble.
if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- if (CARDbIsShorSlotTime(pMgmt->pAdapter) == true) {
+ if (CARDbIsShorSlotTime(pMgmt->pAdapter)) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTSLOTTIME(1);
}
} else if (pMgmt->eCurrentPHYMode == PHY_TYPE_11B) {
- if (CARDbIsShortPreamble(pMgmt->pAdapter) == true) {
+ if (CARDbIsShortPreamble(pMgmt->pAdapter)) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
}
}
- if (pMgmt->b11hEnable == true)
+ if (pMgmt->b11hEnable)
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1);
/* build an assocreq frame and send it */
@@ -539,15 +539,15 @@ vMgrReAssocBeginSta(
// ERP Phy (802.11g) should support short preamble.
if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- if (CARDbIsShorSlotTime(pMgmt->pAdapter) == true) {
+ if (CARDbIsShorSlotTime(pMgmt->pAdapter)) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTSLOTTIME(1);
}
} else if (pMgmt->eCurrentPHYMode == PHY_TYPE_11B) {
- if (CARDbIsShortPreamble(pMgmt->pAdapter) == true) {
+ if (CARDbIsShortPreamble(pMgmt->pAdapter)) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
}
}
- if (pMgmt->b11hEnable == true)
+ if (pMgmt->b11hEnable)
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1);
pTxPacket = s_MgrMakeReAssocRequest
@@ -736,7 +736,7 @@ s_vMgrRxAssocRequest(
pDevice->bProtectMode = true;
pDevice->bNonERPPresent = true;
}
- if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble == false) {
+ if (!pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble) {
pDevice->bBarkerPreambleMd = true;
}
@@ -891,7 +891,7 @@ s_vMgrRxReAssocRequest(
pDevice->bProtectMode = true;
pDevice->bNonERPPresent = true;
}
- if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble == false) {
+ if (!pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble) {
pDevice->bBarkerPreambleMd = true;
}
@@ -1719,7 +1719,7 @@ s_vMgrRxDeauthentication(
}
/* else, ignore it. TODO: IBSS authentication service
would be implemented here */
- };
+ }
return;
}
@@ -1837,7 +1837,7 @@ s_vMgrRxBeacon(
bChannelHit = true;
}
//2008-0730-01<Add>by MikeLiu
- if (ChannelExceedZoneType(pDevice, byCurrChannel) == true)
+ if (ChannelExceedZoneType(pDevice, byCurrChannel))
return;
if (sFrame.pERP != NULL) {
@@ -1957,9 +1957,9 @@ s_vMgrRxBeacon(
}
}
- if ((WLAN_GET_CAP_INFO_ESS(*sFrame.pwCapInfo) == true) &&
- (bIsBSSIDEqual == true) &&
- (bIsSSIDEqual == true) &&
+ if (WLAN_GET_CAP_INFO_ESS(*sFrame.pwCapInfo) &&
+ bIsBSSIDEqual &&
+ bIsSSIDEqual &&
(pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
(pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
// add state check to prevent reconnect fail since we'll receive Beacon
@@ -2001,7 +2001,7 @@ s_vMgrRxBeacon(
&(pMgmt->sNodeDBTable[0].byTopCCKBasicRate),
&(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate)
);
- if (bUpdatePhyParameter == true) {
+ if (bUpdatePhyParameter) {
CARDbSetPhyParameter(pMgmt->pAdapter,
pMgmt->eCurrentPHYMode,
pMgmt->wCurrCapInfo,
@@ -2023,7 +2023,7 @@ s_vMgrRxBeacon(
sFrame.pIE_CHSW->byCount
);
- } else if (bIsChannelEqual == false) {
+ } else if (!bIsChannelEqual) {
set_channel(pMgmt->pAdapter, pBSSList->uChannel);
}
}
@@ -2067,12 +2067,12 @@ s_vMgrRxBeacon(
}
// if infra mode
- if (bIsAPBeacon == true) {
+ if (bIsAPBeacon) {
// Infra mode: Local TSF always follow AP's TSF if Difference huge.
if (bTSFLargeDiff)
bUpdateTSF = true;
- if ((pDevice->bEnablePSMode == true) && (sFrame.pTIM != 0)) {
+ if (pDevice->bEnablePSMode && (sFrame.pTIM != 0)) {
// deal with DTIM, analysis TIM
pMgmt->bMulticastTIM = WLAN_MGMT_IS_MULTICAST_TIM(sFrame.pTIM->byBitMapCtl) ? true : false;
pMgmt->byDTIMCount = sFrame.pTIM->byDTIMCount;
@@ -2092,10 +2092,10 @@ s_vMgrRxBeacon(
pMgmt->bInTIM = sFrame.pTIM->byVirtBitMap[uLocateByteIndex] & byTIMBitOn ? true : false;
} else {
pMgmt->bInTIM = false;
- };
+ }
} else {
pMgmt->bInTIM = false;
- };
+ }
if (pMgmt->bInTIM ||
(pMgmt->bMulticastTIM && (pMgmt->byDTIMCount == 0))) {
@@ -2110,7 +2110,7 @@ s_vMgrRxBeacon(
} else {
pMgmt->bInTIMWake = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BCN: Not In TIM..\n");
- if (pDevice->bPWBitOn == false) {
+ if (!pDevice->bPWBitOn) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BCN: Send Null Packet\n");
if (PSbSendNullPacket(pDevice))
pDevice->bPWBitOn = true;
@@ -2332,10 +2332,10 @@ vMgrCreateOwnIBSS(
}
// Disable Protect Mode
- pDevice->bProtectMode = 0;
+ pDevice->bProtectMode = false;
MACvDisableProtectMD(pDevice->PortOffset);
- pDevice->bBarkerPreambleMd = 0;
+ pDevice->bBarkerPreambleMd = false;
MACvDisableBarkerPreambleMd(pDevice->PortOffset);
// Kyle Test 2003.11.04
@@ -2480,7 +2480,7 @@ vMgrCreateOwnIBSS(
pMgmt->wCurrCapInfo &= (~WLAN_SET_CAP_INFO_SHORTPREAMBLE(1));
}
- if ((pMgmt->b11hEnable == true) &&
+ if (pMgmt->b11hEnable &&
(pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1);
} else {
@@ -2530,7 +2530,7 @@ vMgrJoinBSSBegin(
unsigned char byTopOFDMBasicRate = RATE_1M;
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- if (pMgmt->sBSSList[ii].bActive == true)
+ if (pMgmt->sBSSList[ii].bActive)
break;
}
@@ -2656,7 +2656,7 @@ vMgrJoinBSSBegin(
if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2) {
bool bResult = bAdd_PMKID_Candidate((void *)pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bAdd_PMKID_Candidate: 1(%d)\n", bResult);
- if (bResult == false) {
+ if (!bResult) {
vFlush_PMKID_Candidate((void *)pDevice);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "vFlush_PMKID_Candidate: 4\n");
bAdd_PMKID_Candidate((void *)pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj);
@@ -2671,19 +2671,19 @@ vMgrJoinBSSBegin(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "End of Join AP -- A/B/G Action\n");
} else {
pMgmt->eCurrState = WMAC_STATE_IDLE;
- };
+ }
} else {
// ad-hoc mode BSS
if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- if (WPA_SearchRSN(0, WPA_TKIP, pCurr) == false) {
+ if (!WPA_SearchRSN(0, WPA_TKIP, pCurr)) {
// encryption mode error
pMgmt->eCurrState = WMAC_STATE_IDLE;
return;
}
} else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- if (WPA_SearchRSN(0, WPA_AESCCMP, pCurr) == false) {
+ if (!WPA_SearchRSN(0, WPA_AESCCMP, pCurr)) {
// encryption mode error
pMgmt->eCurrState = WMAC_STATE_IDLE;
return;
@@ -2740,8 +2740,8 @@ vMgrJoinBSSBegin(
bMgrPrepareBeaconToSend((void *)pDevice, pMgmt);
} else {
pMgmt->eCurrState = WMAC_STATE_IDLE;
- };
- };
+ }
+ }
return;
}
@@ -2776,10 +2776,10 @@ s_vMgrSynchBSS(
*pStatus = CMD_STATUS_FAILURE;
- if (s_bCipherMatch(pCurr,
+ if (!s_bCipherMatch(pCurr,
pDevice->eEncryptionStatus,
&(pMgmt->byCSSPK),
- &(pMgmt->byCSSGK)) == false) {
+ &(pMgmt->byCSSGK))) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "s_bCipherMatch Fail .......\n");
return;
}
@@ -2869,18 +2869,17 @@ s_vMgrSynchBSS(
CARDbSetBSSID(pMgmt->pAdapter, pCurr->abyBSSID, OP_MODE_ADHOC);
}
- if (CARDbSetPhyParameter(pMgmt->pAdapter,
+ if (!CARDbSetPhyParameter(pMgmt->pAdapter,
ePhyType,
pCurr->wCapInfo,
pCurr->sERP.byERP,
pMgmt->abyCurrSuppRates,
- pMgmt->abyCurrExtSuppRates
- ) != true) {
+ pMgmt->abyCurrExtSuppRates)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "<----s_bSynchBSS Set Phy Mode Fail [%d]\n", ePhyType);
return;
}
// set channel and clear NAV
- if (set_channel(pMgmt->pAdapter, pCurr->uChannel) == false) {
+ if (!set_channel(pMgmt->pAdapter, pCurr->uChannel)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "<----s_bSynchBSS Set Channel [%d]\n", pCurr->uChannel);
return;
}
@@ -2924,7 +2923,7 @@ static void Encyption_Rebuild(
if ((pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) || //networkmanager 0.7.0 does not give the pairwise-key selection,
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) { // so we need re-select it according to real pairwise-key info.
- if (pCurr->bWPAValid == true) { //WPA-PSK
+ if (pCurr->bWPAValid) { //WPA-PSK
pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
if (pCurr->abyPKType[0] == WPA_TKIP) {
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; //TKIP
@@ -2933,7 +2932,7 @@ static void Encyption_Rebuild(
pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled; //AES
PRINT_K("Encyption_Rebuild--->ssid reset config to [WPAPSK-AES]\n");
}
- } else if (pCurr->bWPA2Valid == true) { //WPA2-PSK
+ } else if (pCurr->bWPA2Valid) { //WPA2-PSK
pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
if (pCurr->abyCSSPK[0] == WLAN_11i_CSS_TKIP) {
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; //TKIP
@@ -3150,8 +3149,7 @@ s_MgrMakeBeacon(
}
}
- if ((pMgmt->b11hEnable == true) &&
- (pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
+ if (pMgmt->b11hEnable && (pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
// Country IE
pbyBuffer = (unsigned char *)(sFrame.pBuf + sFrame.len);
set_country_IE(pMgmt->pAdapter, pbyBuffer);
@@ -3164,7 +3162,7 @@ s_MgrMakeBeacon(
((PWLAN_IE_PW_CONST) pbyBuffer)->byPower = 0;
pbyBuffer += (1) + WLAN_IEHDR_LEN;
uLength += (1) + WLAN_IEHDR_LEN;
- if (pMgmt->bSwitchChannel == true) {
+ if (pMgmt->bSwitchChannel) {
// Channel Switch IE
((PWLAN_IE_CH_SW) pbyBuffer)->byElementID = WLAN_EID_CH_SWITCH;
((PWLAN_IE_CH_SW) pbyBuffer)->len = 3;
@@ -3193,7 +3191,7 @@ s_MgrMakeBeacon(
pbyBuffer += (7) + WLAN_IEHDR_LEN;
uLength += (7) + WLAN_IEHDR_LEN;
for (ii = CB_MAX_CHANNEL_24G+1; ii <= CB_MAX_CHANNEL; ii++) {
- if (get_channel_map_info(pMgmt->pAdapter, ii, pbyBuffer, pbyBuffer+1) == true) {
+ if (get_channel_map_info(pMgmt->pAdapter, ii, pbyBuffer, pbyBuffer+1)) {
pbyBuffer += 2;
uLength += 2;
pIBSSDFS->len += 2;
@@ -3209,11 +3207,11 @@ s_MgrMakeBeacon(
sFrame.pERP->byElementID = WLAN_EID_ERP;
sFrame.pERP->len = 1;
sFrame.pERP->byContext = 0;
- if (pDevice->bProtectMode == true)
+ if (pDevice->bProtectMode)
sFrame.pERP->byContext |= WLAN_EID_ERP_USE_PROTECTION;
- if (pDevice->bNonERPPresent == true)
+ if (pDevice->bNonERPPresent)
sFrame.pERP->byContext |= WLAN_EID_ERP_NONERP_PRESENT;
- if (pDevice->bBarkerPreambleMd == true)
+ if (pDevice->bBarkerPreambleMd)
sFrame.pERP->byContext |= WLAN_EID_ERP_BARKER_MODE;
}
if (((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len != 0) {
@@ -3225,7 +3223,7 @@ s_MgrMakeBeacon(
);
}
// hostapd wpa/wpa2 IE
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnableHostapd == true)) {
+ if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && pDevice->bEnableHostapd) {
if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
if (pMgmt->wWPAIELen != 0) {
sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len);
@@ -3338,16 +3336,15 @@ s_MgrMakeProbeResponse(
sFrame.pERP->byElementID = WLAN_EID_ERP;
sFrame.pERP->len = 1;
sFrame.pERP->byContext = 0;
- if (pDevice->bProtectMode == true)
+ if (pDevice->bProtectMode)
sFrame.pERP->byContext |= WLAN_EID_ERP_USE_PROTECTION;
- if (pDevice->bNonERPPresent == true)
+ if (pDevice->bNonERPPresent)
sFrame.pERP->byContext |= WLAN_EID_ERP_NONERP_PRESENT;
- if (pDevice->bBarkerPreambleMd == true)
+ if (pDevice->bBarkerPreambleMd)
sFrame.pERP->byContext |= WLAN_EID_ERP_BARKER_MODE;
}
- if ((pMgmt->b11hEnable == true) &&
- (pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
+ if (pMgmt->b11hEnable && (pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
// Country IE
pbyBuffer = (unsigned char *)(sFrame.pBuf + sFrame.len);
set_country_IE(pMgmt->pAdapter, pbyBuffer);
@@ -3360,7 +3357,7 @@ s_MgrMakeProbeResponse(
((PWLAN_IE_PW_CONST) pbyBuffer)->byPower = 0;
pbyBuffer += (1) + WLAN_IEHDR_LEN;
uLength += (1) + WLAN_IEHDR_LEN;
- if (pMgmt->bSwitchChannel == true) {
+ if (pMgmt->bSwitchChannel) {
// Channel Switch IE
((PWLAN_IE_CH_SW) pbyBuffer)->byElementID = WLAN_EID_CH_SWITCH;
((PWLAN_IE_CH_SW) pbyBuffer)->len = 3;
@@ -3389,7 +3386,7 @@ s_MgrMakeProbeResponse(
pbyBuffer += (7) + WLAN_IEHDR_LEN;
uLength += (7) + WLAN_IEHDR_LEN;
for (ii = CB_MAX_CHANNEL_24G + 1; ii <= CB_MAX_CHANNEL; ii++) {
- if (get_channel_map_info(pMgmt->pAdapter, ii, pbyBuffer, pbyBuffer+1) == true) {
+ if (get_channel_map_info(pMgmt->pAdapter, ii, pbyBuffer, pbyBuffer+1)) {
pbyBuffer += 2;
uLength += 2;
pIBSSDFS->len += 2;
@@ -3409,7 +3406,7 @@ s_MgrMakeProbeResponse(
}
// hostapd wpa/wpa2 IE
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnableHostapd == true)) {
+ if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && pDevice->bEnableHostapd) {
if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
if (pMgmt->wWPAIELen != 0) {
sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len);
@@ -3507,7 +3504,7 @@ s_MgrMakeAssocRequest(
pbyIEs += pCurrRates->len + WLAN_IEHDR_LEN;
// for 802.11h
- if (pMgmt->b11hEnable == true) {
+ if (pMgmt->b11hEnable) {
if (sFrame.pCurrPowerCap == NULL) {
sFrame.pCurrPowerCap = (PWLAN_IE_PW_CAP)(sFrame.pBuf + sFrame.len);
sFrame.len += (2 + WLAN_IEHDR_LEN);
@@ -3650,7 +3647,7 @@ s_MgrMakeAssocRequest(
sFrame.pRSN->len += 6;
// RSN Capabilities
- if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == true) {
+ if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist) {
memcpy(&sFrame.pRSN->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2);
} else {
sFrame.pRSN->abyRSN[16] = 0;
@@ -3658,7 +3655,7 @@ s_MgrMakeAssocRequest(
}
sFrame.pRSN->len += 2;
- if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && (pDevice->bRoaming == true) && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
+ if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && pDevice->bRoaming && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
// RSN PMKID
pbyRSN = &sFrame.pRSN->abyRSN[18];
pwPMKID = (unsigned short *)pbyRSN; // Point to PMKID count
@@ -3896,7 +3893,7 @@ s_MgrMakeReAssocRequest(
sFrame.pRSN->len += 6;
// RSN Capabilities
- if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == true) {
+ if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist) {
memcpy(&sFrame.pRSN->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2);
} else {
sFrame.pRSN->abyRSN[16] = 0;
@@ -3904,7 +3901,7 @@ s_MgrMakeReAssocRequest(
}
sFrame.pRSN->len += 2;
- if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && (pDevice->bRoaming == true) && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
+ if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && pDevice->bRoaming && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
// RSN PMKID
pbyRSN = &sFrame.pRSN->abyRSN[18];
pwPMKID = (unsigned short *)pbyRSN; // Point to PMKID count
@@ -4141,7 +4138,7 @@ s_vMgrRxProbeResponse(
}
//2008-0730-01<Add>by MikeLiu
- if (ChannelExceedZoneType(pDevice, byCurrChannel) == true)
+ if (ChannelExceedZoneType(pDevice, byCurrChannel))
return;
if (sFrame.pERP != NULL) {
@@ -4578,7 +4575,7 @@ bAdd_PMKID_Candidate(
for (ii = 0; ii < pDevice->gsPMKIDCandidate.NumCandidates; ii++) {
pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[ii];
if (!memcmp(pCandidateList->BSSID, pbyBSSID, ETH_ALEN)) {
- if ((psRSNCapObj->bRSNCapExist == true) && (psRSNCapObj->wRSNCap & BIT0)) {
+ if (psRSNCapObj->bRSNCapExist && (psRSNCapObj->wRSNCap & BIT0)) {
pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
} else {
pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
@@ -4589,7 +4586,7 @@ bAdd_PMKID_Candidate(
// New Candidate
pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[pDevice->gsPMKIDCandidate.NumCandidates];
- if ((psRSNCapObj->bRSNCapExist == true) && (psRSNCapObj->wRSNCap & BIT0)) {
+ if (psRSNCapObj->bRSNCapExist && (psRSNCapObj->wRSNCap & BIT0)) {
pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
} else {
pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
@@ -4650,7 +4647,7 @@ s_bCipherMatch(
}
if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) &&
- (pBSSNode->bWPA2Valid == true) &&
+ pBSSNode->bWPA2Valid &&
//20080123-01,<Add> by Einsn Liu
((EncStatus == Ndis802_11Encryption3Enabled) || (EncStatus == Ndis802_11Encryption2Enabled))) {
//WPA2
@@ -4684,7 +4681,7 @@ s_bCipherMatch(
}
} else if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) &&
- (pBSSNode->bWPAValid == true) &&
+ pBSSNode->bWPAValid &&
((EncStatus == Ndis802_11Encryption3Enabled) || (EncStatus == Ndis802_11Encryption2Enabled))) {
//WPA
// check Group Key Cipher
diff --git a/drivers/staging/vt6655/wpa.c b/drivers/staging/vt6655/wpa.c
index b697fa6c3b16..990ea0f9e9fb 100644
--- a/drivers/staging/vt6655/wpa.c
+++ b/drivers/staging/vt6655/wpa.c
@@ -241,7 +241,7 @@ WPA_SearchRSN(
int ii;
unsigned char byPKType = WPA_NONE;
- if (pBSSList->bWPAValid == false)
+ if (!pBSSList->bWPAValid)
return false;
switch (byCmd) {
diff --git a/drivers/staging/vt6655/wpa2.c b/drivers/staging/vt6655/wpa2.c
index 089788dfba63..2013122e92b2 100644
--- a/drivers/staging/vt6655/wpa2.c
+++ b/drivers/staging/vt6655/wpa2.c
@@ -42,14 +42,14 @@ static int msglevel = MSG_LEVEL_INFO;
/*--------------------- Static Variables --------------------------*/
-const unsigned char abyOUIGK[4] = { 0x00, 0x0F, 0xAC, 0x00 };
-const unsigned char abyOUIWEP40[4] = { 0x00, 0x0F, 0xAC, 0x01 };
-const unsigned char abyOUIWEP104[4] = { 0x00, 0x0F, 0xAC, 0x05 };
-const unsigned char abyOUITKIP[4] = { 0x00, 0x0F, 0xAC, 0x02 };
-const unsigned char abyOUICCMP[4] = { 0x00, 0x0F, 0xAC, 0x04 };
+static const unsigned char abyOUIGK[4] = { 0x00, 0x0F, 0xAC, 0x00 };
+static const unsigned char abyOUIWEP40[4] = { 0x00, 0x0F, 0xAC, 0x01 };
+static const unsigned char abyOUIWEP104[4] = { 0x00, 0x0F, 0xAC, 0x05 };
+static const unsigned char abyOUITKIP[4] = { 0x00, 0x0F, 0xAC, 0x02 };
+static const unsigned char abyOUICCMP[4] = { 0x00, 0x0F, 0xAC, 0x04 };
-const unsigned char abyOUI8021X[4] = { 0x00, 0x0F, 0xAC, 0x01 };
-const unsigned char abyOUIPSK[4] = { 0x00, 0x0F, 0xAC, 0x02 };
+static const unsigned char abyOUI8021X[4] = { 0x00, 0x0F, 0xAC, 0x01 };
+static const unsigned char abyOUIPSK[4] = { 0x00, 0x0F, 0xAC, 0x02 };
/*--------------------- Static Functions --------------------------*/
@@ -192,7 +192,7 @@ WPA2vParseRSN(
break;
} //for
- if (bUseGK == true) {
+ if (bUseGK) {
if (j != 1) {
// invalid CSS, This should be only PK CSS.
return;
@@ -335,7 +335,7 @@ WPA2uSetIEs(
pRSNIEs->len += 2;
if ((pMgmt->gsPMKIDCache.BSSIDInfoCount > 0) &&
- (pMgmt->bRoaming == true) &&
+ pMgmt->bRoaming &&
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
// RSN PMKID
pwPMKID = (unsigned short *)(&pRSNIEs->abyRSN[18]); // Point to PMKID count
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c
index 044368a46c53..d17224f39b4d 100644
--- a/drivers/staging/vt6655/wpactl.c
+++ b/drivers/staging/vt6655/wpactl.c
@@ -202,11 +202,11 @@ int wpa_set_keys(PSDevice pDevice, void *ctx, bool fcpfkernel)
int uu, ii;
if (param->u.wpa_key.alg_name > WPA_ALG_CCMP ||
- param->u.wpa_key.key_len >= MAX_KEY_LEN ||
- param->u.wpa_key.seq_len >= MAX_KEY_LEN)
+ param->u.wpa_key.key_len > MAX_KEY_LEN ||
+ param->u.wpa_key.seq_len > MAX_KEY_LEN)
return -EINVAL;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "param->u.wpa_key.alg_name = %d \n", param->u.wpa_key.alg_name);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "param->u.wpa_key.alg_name = %d\n", param->u.wpa_key.alg_name);
if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
pDevice->bEncryptionEnable = false;
@@ -341,22 +341,22 @@ int wpa_set_keys(PSDevice pDevice, void *ctx, bool fcpfkernel)
// If is_broadcast_ether_addr, set the key as every key entry's group key.
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Groupe Key Assign.\n");
- if ((KeybSetAllGroupKey(&(pDevice->sKey),
+ if (KeybSetAllGroupKey(&(pDevice->sKey),
dwKeyIndex,
param->u.wpa_key.key_len,
(PQWORD) &(KeyRSC),
(unsigned char *)abyKey,
byKeyDecMode,
pDevice->PortOffset,
- pDevice->byLocalID) == true) &&
- (KeybSetDefaultKey(&(pDevice->sKey),
+ pDevice->byLocalID) &&
+ KeybSetDefaultKey(&(pDevice->sKey),
dwKeyIndex,
param->u.wpa_key.key_len,
(PQWORD) &(KeyRSC),
(unsigned char *)abyKey,
byKeyDecMode,
pDevice->PortOffset,
- pDevice->byLocalID) == true)) {
+ pDevice->byLocalID)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "GROUP Key Assign.\n");
} else {
@@ -389,7 +389,7 @@ int wpa_set_keys(PSDevice pDevice, void *ctx, bool fcpfkernel)
(unsigned char *)abyKey,
byKeyDecMode,
pDevice->PortOffset,
- pDevice->byLocalID) == true) {
+ pDevice->byLocalID)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Set\n");
} else {
@@ -415,7 +415,7 @@ int wpa_set_keys(PSDevice pDevice, void *ctx, bool fcpfkernel)
//spin_unlock_irq(&pDevice->lock);
/*
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " key=%x-%x-%x-%x-%x-xxxxx \n",
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " key=%x-%x-%x-%x-%x-xxxxx\n",
pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][0],
pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][1],
pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[byKeyIndex][2],
@@ -596,7 +596,7 @@ static int wpa_get_scan(PSDevice pDevice,
ptempBSS = kmalloc(sizeof(KnownBSS), (int)GFP_ATOMIC);
if (ptempBSS == NULL) {
- printk("bubble sort kmalloc memory fail@@@\n");
+ printk(KERN_ERR "bubble sort kmalloc memory fail@@@\n");
ret = -ENOMEM;
@@ -804,7 +804,7 @@ static int wpa_set_associate(PSDevice pDevice,
else
pDevice->bEncryptionEnable = false;
if (!((pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) ||
- ((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && (bWepEnabled == true)))) //DavidWang //20080717-06,<Modify> by chester//Not to initial WEP
+ ((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && bWepEnabled))) //DavidWang //20080717-06,<Modify> by chester//Not to initial WEP
KeyvInitTable(&pDevice->sKey, pDevice->PortOffset);
spin_lock_irq(&pDevice->lock);
pDevice->bLinkPass = false;
@@ -869,18 +869,18 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
switch (param->cmd) {
case VIAWGET_SET_WPA:
ret = wpa_set_wpa(pDevice, param);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_WPA \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_WPA\n");
break;
case VIAWGET_SET_KEY:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_KEY \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_KEY\n");
spin_lock_irq(&pDevice->lock);
ret = wpa_set_keys(pDevice, param, false);
spin_unlock_irq(&pDevice->lock);
break;
case VIAWGET_SET_SCAN:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_SCAN \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_SCAN\n");
ret = wpa_set_scan(pDevice, param);
break;
@@ -891,40 +891,40 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
break;
case VIAWGET_GET_SSID:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SSID \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SSID\n");
ret = wpa_get_ssid(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_GET_BSSID:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_BSSID \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_BSSID\n");
ret = wpa_get_bssid(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_SET_ASSOCIATE:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_ASSOCIATE \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_ASSOCIATE\n");
ret = wpa_set_associate(pDevice, param);
break;
case VIAWGET_SET_DISASSOCIATE:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DISASSOCIATE \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DISASSOCIATE\n");
ret = wpa_set_disassociate(pDevice, param);
break;
case VIAWGET_SET_DROP_UNENCRYPT:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DROP_UNENCRYPT \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DROP_UNENCRYPT\n");
break;
case VIAWGET_SET_DEAUTHENTICATE:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DEAUTHENTICATE \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DEAUTHENTICATE\n");
break;
default:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ioctl: unknown cmd=%d\n",
param->cmd);
- return -EOPNOTSUPP;
- break;
+ ret = -EOPNOTSUPP;
+ goto out;
}
if ((ret == 0) && wpa_ioctl) {
diff --git a/drivers/staging/vt6655/wpactl.h b/drivers/staging/vt6655/wpactl.h
index b9e2ab231f1b..f7638baf340d 100644
--- a/drivers/staging/vt6655/wpactl.h
+++ b/drivers/staging/vt6655/wpactl.h
@@ -38,11 +38,11 @@
//WPA related
-typedef enum { WPA_ALG_NONE, WPA_ALG_WEP, WPA_ALG_TKIP, WPA_ALG_CCMP } wpa_alg;
-typedef enum { CIPHER_NONE, CIPHER_WEP40, CIPHER_TKIP, CIPHER_CCMP,
- CIPHER_WEP104 } wpa_cipher;
-typedef enum { KEY_MGMT_802_1X, KEY_MGMT_CCKM, KEY_MGMT_PSK, KEY_MGMT_NONE,
- KEY_MGMT_802_1X_NO_WPA, KEY_MGMT_WPA_NONE } wpa_key_mgmt;
+enum wpa_alg { WPA_ALG_NONE, WPA_ALG_WEP, WPA_ALG_TKIP, WPA_ALG_CCMP };
+enum wpa_cipher { CIPHER_NONE, CIPHER_WEP40, CIPHER_TKIP, CIPHER_CCMP,
+ CIPHER_WEP104 };
+enum wpa_key_mgmt { KEY_MGMT_802_1X, KEY_MGMT_CCKM, KEY_MGMT_PSK, KEY_MGMT_NONE,
+ KEY_MGMT_802_1X_NO_WPA, KEY_MGMT_WPA_NONE };
#define AUTH_ALG_OPEN_SYSTEM 0x01
#define AUTH_ALG_SHARED_KEY 0x02
@@ -51,8 +51,6 @@ typedef enum { KEY_MGMT_802_1X, KEY_MGMT_CCKM, KEY_MGMT_PSK, KEY_MGMT_NONE,
#define GENERIC_INFO_ELEM 0xdd
#define RSN_INFO_ELEM 0x30
-typedef unsigned long long NDIS_802_11_KEY_RSC;
-
/*--------------------- Export Classes ----------------------------*/
/*--------------------- Export Variables --------------------------*/
diff --git a/drivers/staging/vt6655/wroute.c b/drivers/staging/vt6655/wroute.c
index 85302c5e2bac..c39d5ed59ded 100644
--- a/drivers/staging/vt6655/wroute.c
+++ b/drivers/staging/vt6655/wroute.c
@@ -63,7 +63,8 @@ static int msglevel = MSG_LEVEL_INFO;
* Return Value: true if packet duplicate; otherwise false
*
*/
-bool ROUTEbRelay(PSDevice pDevice, unsigned char *pbySkbData, unsigned int uDataLen, unsigned int uNodeIndex)
+bool ROUTEbRelay(PSDevice pDevice, unsigned char *pbySkbData,
+ unsigned int uDataLen, unsigned int uNodeIndex)
{
PSMgmtObject pMgmt = pDevice->pMgmt;
PSTxDesc pHeadTD, pLastTD;
@@ -78,7 +79,8 @@ bool ROUTEbRelay(PSDevice pDevice, unsigned char *pbySkbData, unsigned int uData
unsigned char *pbyBSSID;
if (AVAIL_TD(pDevice, TYPE_AC0DMA) <= 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Relay can't allocate TD1..\n");
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Relay can't allocate TD1..\n");
return false;
}
@@ -86,22 +88,24 @@ bool ROUTEbRelay(PSDevice pDevice, unsigned char *pbySkbData, unsigned int uData
pHeadTD->m_td1TD1.byTCR = (TCR_EDP | TCR_STP);
- memcpy(pDevice->sTxEthHeader.abyDstAddr, (unsigned char *)pbySkbData, ETH_HLEN);
+ memcpy(pDevice->sTxEthHeader.abyDstAddr, pbySkbData, ETH_HLEN);
cbFrameBodySize = uDataLen - ETH_HLEN;
- if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN) {
+ if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN)
cbFrameBodySize += 8;
- }
if (pDevice->bEncryptionEnable == true) {
bNeedEncryption = true;
// get group key
pbyBSSID = pDevice->abyBroadcastAddr;
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) {
+ if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID,
+ GROUP_KEY, &pTransmitKey) == false) {
pTransmitKey = NULL;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "KEY is NULL. [%d]\n", pDevice->pMgmt->eCurrMode);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_DEBUG "KEY is NULL. [%d]\n",
+ pDevice->pMgmt->eCurrMode);
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "Get GTK.\n");
}
@@ -117,25 +121,24 @@ bool ROUTEbRelay(PSDevice pDevice, unsigned char *pbySkbData, unsigned int uData
pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0;
memcpy(pTransmitKey->abyKey,
&pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0],
- pTransmitKey->uKeyLength
-);
+ pTransmitKey->uKeyLength);
}
}
- uMACfragNum = cbGetFragCount(pDevice, pTransmitKey, cbFrameBodySize, &pDevice->sTxEthHeader);
+ uMACfragNum = cbGetFragCount(pDevice, pTransmitKey,
+ cbFrameBodySize, &pDevice->sTxEthHeader);
- if (uMACfragNum > AVAIL_TD(pDevice, TYPE_AC0DMA)) {
+ if (uMACfragNum > AVAIL_TD(pDevice, TYPE_AC0DMA))
return false;
- }
- byPktType = (unsigned char)pDevice->byPacketType;
+
+ byPktType = pDevice->byPacketType;
if (pDevice->bFixRate) {
if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
- if (pDevice->uConnectionRate >= RATE_11M) {
+ if (pDevice->uConnectionRate >= RATE_11M)
pDevice->wCurrentRate = RATE_11M;
- } else {
- pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
- }
+ else
+ pDevice->wCurrentRate = pDevice->uConnectionRate;
} else {
if ((pDevice->eCurrentPHYType == PHY_TYPE_11A) &&
(pDevice->uConnectionRate <= RATE_6M)) {
@@ -144,7 +147,7 @@ bool ROUTEbRelay(PSDevice pDevice, unsigned char *pbySkbData, unsigned int uData
if (pDevice->uConnectionRate >= RATE_54M)
pDevice->wCurrentRate = RATE_54M;
else
- pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
+ pDevice->wCurrentRate = pDevice->uConnectionRate;
}
}
} else {
@@ -154,12 +157,11 @@ bool ROUTEbRelay(PSDevice pDevice, unsigned char *pbySkbData, unsigned int uData
if (pDevice->wCurrentRate <= RATE_11M)
byPktType = PK_TYPE_11B;
- vGenerateFIFOHeader(pDevice, byPktType, pDevice->pbyTmpBuff, bNeedEncryption,
- cbFrameBodySize, TYPE_AC0DMA, pHeadTD,
- &pDevice->sTxEthHeader, pbySkbData, pTransmitKey, uNodeIndex,
- &uMACfragNum,
- &cbHeaderSize
-);
+ vGenerateFIFOHeader(pDevice, byPktType, pDevice->pbyTmpBuff,
+ bNeedEncryption, cbFrameBodySize, TYPE_AC0DMA,
+ pHeadTD, &pDevice->sTxEthHeader, pbySkbData,
+ pTransmitKey, uNodeIndex, &uMACfragNum,
+ &cbHeaderSize);
if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
// Disable PS
diff --git a/drivers/staging/vt6656/Makefile b/drivers/staging/vt6656/Makefile
index c998547884c0..1d829b46c2e7 100644
--- a/drivers/staging/vt6656/Makefile
+++ b/drivers/staging/vt6656/Makefile
@@ -16,7 +16,6 @@ vt6656_stage-y += main_usb.o \
dpc.o \
power.o \
datarate.o \
- mib.o \
rc4.o \
tether.o \
tcrc.o \
diff --git a/drivers/staging/vt6656/aes_ccmp.c b/drivers/staging/vt6656/aes_ccmp.c
index 6c7693911cd6..61b9f7bdb858 100644
--- a/drivers/staging/vt6656/aes_ccmp.c
+++ b/drivers/staging/vt6656/aes_ccmp.c
@@ -37,7 +37,7 @@
* SBOX Table
*/
-u8 sbox_table[256] = {
+static u8 sbox_table[256] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
@@ -56,7 +56,7 @@ u8 sbox_table[256] = {
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
};
-u8 dot2_table[256] = {
+static u8 dot2_table[256] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
@@ -75,7 +75,7 @@ u8 dot2_table[256] = {
0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5
};
-u8 dot3_table[256] = {
+static u8 dot3_table[256] = {
0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11,
0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21,
0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71,
@@ -115,7 +115,7 @@ static void xor_32(u8 *a, u8 *b, u8 *out)
(*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
}
-void AddRoundKey(u8 *key, int round)
+static void AddRoundKey(u8 *key, int round)
{
u8 sbox_key[4];
u8 rcon_table[10] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36};
@@ -133,7 +133,7 @@ void AddRoundKey(u8 *key, int round)
xor_32(&key[12], &key[8], &key[12]);
}
-void SubBytes(u8 *in, u8 *out)
+static void SubBytes(u8 *in, u8 *out)
{
int i;
@@ -141,7 +141,7 @@ void SubBytes(u8 *in, u8 *out)
out[i] = sbox_table[in[i]];
}
-void ShiftRows(u8 *in, u8 *out)
+static void ShiftRows(u8 *in, u8 *out)
{
out[0] = in[0];
out[1] = in[5];
@@ -161,7 +161,7 @@ void ShiftRows(u8 *in, u8 *out)
out[15] = in[11];
}
-void MixColumns(u8 *in, u8 *out)
+static void MixColumns(u8 *in, u8 *out)
{
out[0] = dot2_table[in[0]] ^ dot3_table[in[1]] ^ in[2] ^ in[3];
@@ -170,7 +170,7 @@ void MixColumns(u8 *in, u8 *out)
out[3] = dot3_table[in[0]] ^ in[1] ^ in[2] ^ dot2_table[in[3]];
}
-void AESv128(u8 *key, u8 *data, u8 *ciphertext)
+static void AESv128(u8 *key, u8 *data, u8 *ciphertext)
{
int i;
int round;
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 4aa5ef54b683..3d4610e25fca 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -48,7 +48,7 @@
static int msglevel =MSG_LEVEL_INFO;
//static int msglevel =MSG_LEVEL_DEBUG;
-u8 abyVT3184_AGC[] = {
+static u8 abyVT3184_AGC[] = {
0x00, //0
0x00, //1
0x02, //2
@@ -115,7 +115,7 @@ u8 abyVT3184_AGC[] = {
0x3E //3F
};
-u8 abyVT3184_AL2230[] = {
+static u8 abyVT3184_AL2230[] = {
0x31,//00
0x00,
0x00,
@@ -375,7 +375,7 @@ u8 abyVT3184_AL2230[] = {
};
//{{RobertYu:20060515, new BB setting for VT3226D0
-u8 abyVT3184_VT3226D0[] = {
+static u8 abyVT3184_VT3226D0[] = {
0x31,//00
0x00,
0x00,
@@ -634,7 +634,7 @@ u8 abyVT3184_VT3226D0[] = {
0x00,
};
-const u16 awcFrameTime[MAX_RATE] =
+static const u16 awcFrameTime[MAX_RATE] =
{10, 20, 55, 110, 24, 36, 48, 72, 96, 144, 192, 216};
/*
@@ -931,191 +931,177 @@ void BBvSetAntennaMode(struct vnt_private *pDevice, u8 byAntennaMode)
*
*/
-int BBbVT3184Init(struct vnt_private *pDevice)
+int BBbVT3184Init(struct vnt_private *priv)
{
- int ntStatus;
- u16 wLength;
- u8 * pbyAddr;
- u8 * pbyAgc;
- u16 wLengthAgc;
- u8 abyArray[256];
+ int status;
+ u16 lenght;
+ u8 *addr;
+ u8 *agc;
+ u16 lenght_agc;
+ u8 array[256];
u8 data;
- ntStatus = CONTROLnsRequestIn(pDevice,
- MESSAGE_TYPE_READ,
- 0,
- MESSAGE_REQUEST_EEPROM,
- EEP_MAX_CONTEXT_SIZE,
- pDevice->abyEEPROM);
- if (ntStatus != STATUS_SUCCESS) {
- return false;
- }
+ status = CONTROLnsRequestIn(priv, MESSAGE_TYPE_READ, 0,
+ MESSAGE_REQUEST_EEPROM, EEP_MAX_CONTEXT_SIZE,
+ priv->abyEEPROM);
+ if (status != STATUS_SUCCESS)
+ return false;
+
+ /* zonetype initial */
+ priv->byOriginalZonetype = priv->abyEEPROM[EEP_OFS_ZONETYPE];
+
+ if (priv->config_file.ZoneType >= 0) {
+ if ((priv->config_file.ZoneType == 0) &&
+ (priv->abyEEPROM[EEP_OFS_ZONETYPE] != 0x00)) {
+ priv->abyEEPROM[EEP_OFS_ZONETYPE] = 0;
+ priv->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0B;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Init Zone Type :USA\n");
+ } else if ((priv->config_file.ZoneType == 1) &&
+ (priv->abyEEPROM[EEP_OFS_ZONETYPE] != 0x01)) {
+ priv->abyEEPROM[EEP_OFS_ZONETYPE] = 0x01;
+ priv->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0D;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Init Zone Type :Japan\n");
+ } else if ((priv->config_file.ZoneType == 2) &&
+ (priv->abyEEPROM[EEP_OFS_ZONETYPE] != 0x02)) {
+ priv->abyEEPROM[EEP_OFS_ZONETYPE] = 0x02;
+ priv->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0D;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Init Zone Type :Europe\n");
+ } else {
+ if (priv->config_file.ZoneType !=
+ priv->abyEEPROM[EEP_OFS_ZONETYPE])
+ printk("zonetype in file[%02x]\
+ mismatch with in EEPROM[%02x]\n",
+ priv->config_file.ZoneType,
+ priv->abyEEPROM[EEP_OFS_ZONETYPE]);
+ else
+ printk("Read Zonetype file success,\
+ use default zonetype setting[%02x]\n",
+ priv->config_file.ZoneType);
+ }
+ }
-// if ((pDevice->abyEEPROM[EEP_OFS_RADIOCTL]&0x06)==0x04)
-// return false;
-
-//zonetype initial
- pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
- if(pDevice->config_file.ZoneType >= 0) { //read zonetype file ok!
- if ((pDevice->config_file.ZoneType == 0)&&
- (pDevice->abyEEPROM[EEP_OFS_ZONETYPE] !=0x00)){ //for USA
- pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0;
- pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0B;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Init Zone Type :USA\n");
- }
- else if((pDevice->config_file.ZoneType == 1)&&
- (pDevice->abyEEPROM[EEP_OFS_ZONETYPE]!=0x01)){ //for Japan
- pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0x01;
- pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0D;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Init Zone Type :Japan\n");
- }
- else if((pDevice->config_file.ZoneType == 2)&&
- (pDevice->abyEEPROM[EEP_OFS_ZONETYPE]!=0x02)){ //for Europe
- pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0x02;
- pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0D;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Init Zone Type :Europe\n");
- }
-else {
- if(pDevice->config_file.ZoneType !=pDevice->abyEEPROM[EEP_OFS_ZONETYPE])
- printk("zonetype in file[%02x] mismatch with in EEPROM[%02x]\n",pDevice->config_file.ZoneType,pDevice->abyEEPROM[EEP_OFS_ZONETYPE]);
- else
- printk("Read Zonetype file success,use default zonetype setting[%02x]\n",pDevice->config_file.ZoneType);
- }
-}
+ if (!priv->bZoneRegExist)
+ priv->byZoneType = priv->abyEEPROM[EEP_OFS_ZONETYPE];
+
+ priv->byRFType = priv->abyEEPROM[EEP_OFS_RFTYPE];
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Zone Type %x\n",
+ priv->byZoneType);
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"RF Type %d\n", priv->byRFType);
+
+ if ((priv->byRFType == RF_AL2230) ||
+ (priv->byRFType == RF_AL2230S)) {
+ priv->byBBRxConf = abyVT3184_AL2230[10];
+ lenght = sizeof(abyVT3184_AL2230);
+ addr = abyVT3184_AL2230;
+ agc = abyVT3184_AGC;
+ lenght_agc = sizeof(abyVT3184_AGC);
+
+ priv->abyBBVGA[0] = 0x1C;
+ priv->abyBBVGA[1] = 0x10;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -70;
+ priv->ldBmThreshold[1] = -48;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
+ } else if (priv->byRFType == RF_AIROHA7230) {
+ priv->byBBRxConf = abyVT3184_AL2230[10];
+ lenght = sizeof(abyVT3184_AL2230);
+ addr = abyVT3184_AL2230;
+ agc = abyVT3184_AGC;
+ lenght_agc = sizeof(abyVT3184_AGC);
+
+ addr[0xd7] = 0x06;
+
+ priv->abyBBVGA[0] = 0x1c;
+ priv->abyBBVGA[1] = 0x10;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -70;
+ priv->ldBmThreshold[1] = -48;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
+ } else if ((priv->byRFType == RF_VT3226) ||
+ (priv->byRFType == RF_VT3226D0)) {
+ priv->byBBRxConf = abyVT3184_VT3226D0[10];
+ lenght = sizeof(abyVT3184_VT3226D0);
+ addr = abyVT3184_VT3226D0;
+ agc = abyVT3184_AGC;
+ lenght_agc = sizeof(abyVT3184_AGC);
+
+ priv->abyBBVGA[0] = 0x20;
+ priv->abyBBVGA[1] = 0x10;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -70;
+ priv->ldBmThreshold[1] = -48;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
+ /* Fix VT3226 DFC system timing issue */
+ MACvRegBitsOn(priv, MAC_REG_SOFTPWRCTL2, SOFTPWRCTL_RFLEOPT);
+ } else if ((priv->byRFType == RF_VT3342A0)) {
+ priv->byBBRxConf = abyVT3184_VT3226D0[10];
+ lenght = sizeof(abyVT3184_VT3226D0);
+ addr = abyVT3184_VT3226D0;
+ agc = abyVT3184_AGC;
+ lenght_agc = sizeof(abyVT3184_AGC);
+
+ priv->abyBBVGA[0] = 0x20;
+ priv->abyBBVGA[1] = 0x10;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -70;
+ priv->ldBmThreshold[1] = -48;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
+ /* Fix VT3226 DFC system timing issue */
+ MACvRegBitsOn(priv, MAC_REG_SOFTPWRCTL2, SOFTPWRCTL_RFLEOPT);
+ } else {
+ return true;
+ }
- if ( !pDevice->bZoneRegExist ) {
- pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
- }
- pDevice->byRFType = pDevice->abyEEPROM[EEP_OFS_RFTYPE];
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Zone Type %x\n", pDevice->byZoneType);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"RF Type %d\n", pDevice->byRFType);
-
- if ((pDevice->byRFType == RF_AL2230) || (pDevice->byRFType == RF_AL2230S)) {
- pDevice->byBBRxConf = abyVT3184_AL2230[10];
- wLength = sizeof(abyVT3184_AL2230);
- pbyAddr = abyVT3184_AL2230;
- pbyAgc = abyVT3184_AGC;
- wLengthAgc = sizeof(abyVT3184_AGC);
-
- pDevice->abyBBVGA[0] = 0x1C;
- pDevice->abyBBVGA[1] = 0x10;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -70;
- pDevice->ldBmThreshold[1] = -48;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
- }
- else if (pDevice->byRFType == RF_AIROHA7230) {
- pDevice->byBBRxConf = abyVT3184_AL2230[10];
- wLength = sizeof(abyVT3184_AL2230);
- pbyAddr = abyVT3184_AL2230;
- pbyAgc = abyVT3184_AGC;
- wLengthAgc = sizeof(abyVT3184_AGC);
-
- // Init ANT B select,TX Config CR09 = 0x61->0x45, 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
- //pbyAddr[0x09] = 0x41;
- // Init ANT B select,RX Config CR10 = 0x28->0x2A, 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted)
- //pbyAddr[0x0a] = 0x28;
- // Select VC1/VC2, CR215 = 0x02->0x06
- pbyAddr[0xd7] = 0x06;
-
- pDevice->abyBBVGA[0] = 0x1C;
- pDevice->abyBBVGA[1] = 0x10;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -70;
- pDevice->ldBmThreshold[1] = -48;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
- }
- else if ( (pDevice->byRFType == RF_VT3226) || (pDevice->byRFType == RF_VT3226D0) ) {
- pDevice->byBBRxConf = abyVT3184_VT3226D0[10]; //RobertYu:20060515
- wLength = sizeof(abyVT3184_VT3226D0); //RobertYu:20060515
- pbyAddr = abyVT3184_VT3226D0; //RobertYu:20060515
- pbyAgc = abyVT3184_AGC;
- wLengthAgc = sizeof(abyVT3184_AGC);
-
- pDevice->abyBBVGA[0] = 0x20; //RobertYu:20060104, reguest by Jack
- pDevice->abyBBVGA[1] = 0x10;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -70;
- pDevice->ldBmThreshold[1] = -48;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
- // Fix VT3226 DFC system timing issue
- MACvRegBitsOn(pDevice, MAC_REG_SOFTPWRCTL2, SOFTPWRCTL_RFLEOPT);
- //}}
- //{{RobertYu:20060609
- } else if ( (pDevice->byRFType == RF_VT3342A0) ) {
- pDevice->byBBRxConf = abyVT3184_VT3226D0[10];
- wLength = sizeof(abyVT3184_VT3226D0);
- pbyAddr = abyVT3184_VT3226D0;
- pbyAgc = abyVT3184_AGC;
- wLengthAgc = sizeof(abyVT3184_AGC);
-
- pDevice->abyBBVGA[0] = 0x20;
- pDevice->abyBBVGA[1] = 0x10;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -70;
- pDevice->ldBmThreshold[1] = -48;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
- // Fix VT3226 DFC system timing issue
- MACvRegBitsOn(pDevice, MAC_REG_SOFTPWRCTL2, SOFTPWRCTL_RFLEOPT);
- //}}
- } else {
- return true;
- }
+ memcpy(array, addr, lenght);
- memcpy(abyArray, pbyAddr, wLength);
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_WRITE,
- 0,
- MESSAGE_REQUEST_BBREG,
- wLength,
- abyArray
- );
-
- memcpy(abyArray, pbyAgc, wLengthAgc);
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_WRITE,
- 0,
- MESSAGE_REQUEST_BBAGC,
- wLengthAgc,
- abyArray
- );
-
- if ((pDevice->byRFType == RF_VT3226) || //RobertYu:20051116, 20060111 remove VT3226D0
- (pDevice->byRFType == RF_VT3342A0) //RobertYu:20060609
- ) {
- ControlvWriteByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_ITRTMSET,0x23);
- MACvRegBitsOn(pDevice,MAC_REG_PAPEDELAY,0x01);
- }
- else if (pDevice->byRFType == RF_VT3226D0)
- {
- ControlvWriteByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_ITRTMSET,0x11);
- MACvRegBitsOn(pDevice,MAC_REG_PAPEDELAY,0x01);
- }
+ CONTROLnsRequestOut(priv, MESSAGE_TYPE_WRITE, 0,
+ MESSAGE_REQUEST_BBREG, lenght, array);
- ControlvWriteByte(pDevice,MESSAGE_REQUEST_BBREG,0x04,0x7F);
- ControlvWriteByte(pDevice,MESSAGE_REQUEST_BBREG,0x0D,0x01);
+ memcpy(array, agc, lenght_agc);
+
+ CONTROLnsRequestOut(priv, MESSAGE_TYPE_WRITE, 0,
+ MESSAGE_REQUEST_BBAGC, lenght_agc, array);
+
+ if ((priv->byRFType == RF_VT3226) ||
+ (priv->byRFType == RF_VT3342A0)) {
+ ControlvWriteByte(priv, MESSAGE_REQUEST_MACREG,
+ MAC_REG_ITRTMSET, 0x23);
+ MACvRegBitsOn(priv, MAC_REG_PAPEDELAY, 0x01);
+ } else if (priv->byRFType == RF_VT3226D0) {
+ ControlvWriteByte(priv, MESSAGE_REQUEST_MACREG,
+ MAC_REG_ITRTMSET, 0x11);
+ MACvRegBitsOn(priv, MAC_REG_PAPEDELAY, 0x01);
+ }
+
+ ControlvWriteByte(priv, MESSAGE_REQUEST_BBREG, 0x04, 0x7f);
+ ControlvWriteByte(priv, MESSAGE_REQUEST_BBREG, 0x0d, 0x01);
+
+ RFbRFTableDownload(priv);
- RFbRFTableDownload(pDevice);
/* Fix for TX USB resets from vendors driver */
- CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ, USB_REG4,
+ CONTROLnsRequestIn(priv, MESSAGE_TYPE_READ, USB_REG4,
MESSAGE_REQUEST_MEM, sizeof(data), &data);
data |= 0x2;
- CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, USB_REG4,
+ CONTROLnsRequestOut(priv, MESSAGE_TYPE_WRITE, USB_REG4,
MESSAGE_REQUEST_MEM, sizeof(data), &data);
- return true;//ntStatus;
+ return true;
}
/*
@@ -1464,7 +1450,6 @@ void BBvUpdatePreEDThreshold(struct vnt_private *pDevice, int bScanning)
if( bScanning )
{ // need Max sensitivity //RSSI -69, -70,....
- if(pDevice->byBBPreEDIndex == 0) break;
pDevice->byBBPreEDIndex = 0;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x30); //CR206(0xCE)
@@ -1607,7 +1592,6 @@ void BBvUpdatePreEDThreshold(struct vnt_private *pDevice, int bScanning)
if( bScanning )
{ // need Max sensitivity //RSSI -69, -70, ...
- if(pDevice->byBBPreEDIndex == 0) break;
pDevice->byBBPreEDIndex = 0;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x24); //CR206(0xCE)
@@ -1759,7 +1743,6 @@ void BBvUpdatePreEDThreshold(struct vnt_private *pDevice, int bScanning)
case RF_VT3342A0: //RobertYu:20060627, testing table
if( bScanning )
{ // need Max sensitivity //RSSI -67, -68, ...
- if(pDevice->byBBPreEDIndex == 0) break;
pDevice->byBBPreEDIndex = 0;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x38); //CR206(0xCE)
diff --git a/drivers/staging/vt6656/bssdb.c b/drivers/staging/vt6656/bssdb.c
index dad3f8c78e21..9c78dab95d35 100644
--- a/drivers/staging/vt6656/bssdb.c
+++ b/drivers/staging/vt6656/bssdb.c
@@ -21,22 +21,21 @@
* Purpose: Handles the Basic Service Set & Node Database functions
*
* Functions:
- * BSSpSearchBSSList - Search known BSS list for Desire SSID or BSSID
- * BSSvClearBSSList - Clear BSS List
- * BSSbInsertToBSSList - Insert a BSS set into known BSS list
- * BSSbUpdateToBSSList - Update BSS set in known BSS list
- * BSSbIsSTAInNodeDB - Search Node DB table to find the index of matched DstAddr
- * BSSvCreateOneNode - Allocate an Node for Node DB
- * BSSvUpdateAPNode - Update AP Node content in Index 0 of KnownNodeDB
- * BSSvSecondCallBack - One second timer callback function to update Node DB info & AP link status
- * BSSvUpdateNodeTxCounter - Update Tx attemps, Tx failure counter in Node DB for auto-fall back rate control
+ * BSSpSearchBSSList - Search known BSS list for Desire SSID or BSSID
+ * BSSvClearBSSList - Clear BSS List
+ * BSSbInsertToBSSList - Insert a BSS set into known BSS list
+ * BSSbUpdateToBSSList - Update BSS set in known BSS list
+ * BSSbIsSTAInNodeDB - Search Node DB table to find the index of matched DstAddr
+ * BSSvCreateOneNode - Allocate an Node for Node DB
+ * BSSvUpdateAPNode - Update AP Node content in Index 0 of KnownNodeDB
+ * BSSvSecondCallBack - One second timer callback function to update Node DB info & AP link status
+ * BSSvUpdateNodeTxCounter - Update Tx attemps, Tx failure counter in Node DB for auto-fallback rate control
*
* Revision History:
*
* Author: Lyndon Chen
*
* Date: July 17, 2002
- *
*/
#include "tmacro.h"
@@ -59,41 +58,38 @@
#include "iowpa.h"
#include "power.h"
-static int msglevel =MSG_LEVEL_INFO;
-//static int msglevel =MSG_LEVEL_DEBUG;
-
-const u16 awHWRetry0[5][5] = {
- {RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
- {RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M},
- {RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M},
- {RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M},
- {RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M}
- };
-const u16 awHWRetry1[5][5] = {
- {RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M},
- {RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M},
- {RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M},
- {RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M},
- {RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M}
- };
+static int msglevel = MSG_LEVEL_INFO;
+/* static int msglevel = MSG_LEVEL_DEBUG; */
+
+static const u16 awHWRetry0[5][5] = {
+ {RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
+ {RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M},
+ {RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M},
+ {RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M},
+ {RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M}
+ };
+static const u16 awHWRetry1[5][5] = {
+ {RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M},
+ {RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M},
+ {RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M},
+ {RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M},
+ {RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M}
+ };
static void s_vCheckSensitivity(struct vnt_private *pDevice);
static void s_vCheckPreEDThreshold(struct vnt_private *pDevice);
static void s_uCalculateLinkQual(struct vnt_private *pDevice);
-/*+
- *
+/*
* Routine Description:
- * Search known BSS list for Desire SSID or BSSID.
+ * Search known BSS list for Desire SSID or BSSID.
*
* Return Value:
- * PTR to KnownBSS or NULL
- *
--*/
-
+ * PTR to KnownBSS or NULL
+ */
PKnownBSS BSSpSearchBSSList(struct vnt_private *pDevice,
- u8 *pbyDesireBSSID, u8 *pbyDesireSSID,
- CARD_PHY_TYPE ePhyType)
+ u8 *pbyDesireBSSID, u8 *pbyDesireSSID,
+ CARD_PHY_TYPE ePhyType)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
u8 *pbyBSSID = NULL;
@@ -104,204 +100,202 @@ PKnownBSS BSSpSearchBSSList(struct vnt_private *pDevice,
int ii = 0;
int jj = 0;
- if (pbyDesireBSSID != NULL) {
+ if (pbyDesireBSSID) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
"BSSpSearchBSSList BSSID[%pM]\n", pbyDesireBSSID);
- if ((!is_broadcast_ether_addr(pbyDesireBSSID)) &&
- (memcmp(pbyDesireBSSID, ZeroBSSID, 6)!= 0)){
- pbyBSSID = pbyDesireBSSID;
- }
- }
- if (pbyDesireSSID != NULL) {
- if (((PWLAN_IE_SSID)pbyDesireSSID)->len != 0) {
- pSSID = (PWLAN_IE_SSID) pbyDesireSSID;
- }
- }
-
- if ((pbyBSSID != NULL)&&(pDevice->bRoaming == false)) {
- // match BSSID first
- for (ii = 0; ii <MAX_BSS_NUM; ii++) {
- pCurrBSS = &(pMgmt->sBSSList[ii]);
-
- pCurrBSS->bSelected = false;
-
- if ((pCurrBSS->bActive) &&
- (pCurrBSS->bSelected == false)) {
- if (ether_addr_equal(pCurrBSS->abyBSSID, pbyBSSID)) {
- if (pSSID != NULL) {
- // compare ssid
- if ( !memcmp(pSSID->abySSID,
- ((PWLAN_IE_SSID)pCurrBSS->abySSID)->abySSID,
- pSSID->len)) {
- if ((pMgmt->eConfigMode == WMAC_CONFIG_AUTO) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo))
- ) {
- pCurrBSS->bSelected = true;
- return(pCurrBSS);
- }
- }
- } else {
- if ((pMgmt->eConfigMode == WMAC_CONFIG_AUTO) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo))
- ) {
- pCurrBSS->bSelected = true;
- return(pCurrBSS);
- }
- }
- }
- }
- }
- } else {
- // ignore BSSID
- for (ii = 0; ii <MAX_BSS_NUM; ii++) {
- pCurrBSS = &(pMgmt->sBSSList[ii]);
-
- //2007-0721-01<Mark>by MikeLiu
- // if ((pCurrBSS->bActive) &&
- // (pCurrBSS->bSelected == false)) {
-
- pCurrBSS->bSelected = false;
- if (pCurrBSS->bActive) {
-
- if (pSSID != NULL) {
- // matched SSID
- if (memcmp(pSSID->abySSID,
- ((PWLAN_IE_SSID)pCurrBSS->abySSID)->abySSID,
- pSSID->len) ||
- (pSSID->len != ((PWLAN_IE_SSID)pCurrBSS->abySSID)->len)) {
- // SSID not match skip this BSS
- continue;
- }
- }
- if (((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo)) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo))
- ){
- // Type not match skip this BSS
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSS type mismatch.... Config[%d] BSS[0x%04x]\n", pMgmt->eConfigMode, pCurrBSS->wCapInfo);
- continue;
- }
-
- if (ePhyType != PHY_TYPE_AUTO) {
- if (((ePhyType == PHY_TYPE_11A) && (PHY_TYPE_11A != pCurrBSS->eNetworkTypeInUse)) ||
- ((ePhyType != PHY_TYPE_11A) && (PHY_TYPE_11A == pCurrBSS->eNetworkTypeInUse))) {
- // PhyType not match skip this BSS
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Physical type mismatch.... ePhyType[%d] BSS[%d]\n", ePhyType, pCurrBSS->eNetworkTypeInUse);
- continue;
- }
- }
-
- pMgmt->pSameBSS[jj].uChannel = pCurrBSS->uChannel;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
- "BSSpSearchBSSList pSelect1[%pM]\n",
- pCurrBSS->abyBSSID);
- jj++;
-
- if (pSelect == NULL) {
- pSelect = pCurrBSS;
- } else {
- // compare RSSI, select the strongest signal
- if (pCurrBSS->uRSSI < pSelect->uRSSI) {
- pSelect = pCurrBSS;
- }
- }
- }
- }
-
-pDevice->bSameBSSMaxNum = jj;
-
- if (pSelect != NULL) {
- pSelect->bSelected = true;
- if (pDevice->bRoaming == false) {
- // Einsn Add @20070907
- memcpy(pbyDesireSSID,pCurrBSS->abySSID,WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1) ;
- }
-
- return(pSelect);
- }
- }
- return(NULL);
+ if (!is_broadcast_ether_addr(pbyDesireBSSID) &&
+ memcmp(pbyDesireBSSID, ZeroBSSID, 6) != 0)
+ pbyBSSID = pbyDesireBSSID;
+ }
+ if (pbyDesireSSID &&
+ ((PWLAN_IE_SSID) pbyDesireSSID)->len != 0)
+ pSSID = (PWLAN_IE_SSID) pbyDesireSSID;
+
+ if (pbyBSSID && pDevice->bRoaming == false) {
+ /* match BSSID first */
+ for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ pCurrBSS = &(pMgmt->sBSSList[ii]);
+
+ pCurrBSS->bSelected = false;
+
+ if (pCurrBSS->bActive &&
+ pCurrBSS->bSelected == false &&
+ ether_addr_equal(pCurrBSS->abyBSSID, pbyBSSID)) {
+ if (pSSID) {
+ /* compare ssid */
+ if (!memcmp(pSSID->abySSID,
+ ((PWLAN_IE_SSID) pCurrBSS->abySSID)->abySSID,
+ pSSID->len) &&
+ (pMgmt->eConfigMode == WMAC_CONFIG_AUTO ||
+ (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA &&
+ WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) ||
+ (pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA &&
+ WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo)))) {
+
+ pCurrBSS->bSelected = true;
+ return pCurrBSS;
+ }
+ } else if (pMgmt->eConfigMode == WMAC_CONFIG_AUTO ||
+ (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA &&
+ WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) ||
+ (pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA &&
+ WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo))) {
+ pCurrBSS->bSelected = true;
+ return pCurrBSS;
+ }
+ }
+ }
+ } else {
+ /* ignore BSSID */
+ for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ pCurrBSS = &(pMgmt->sBSSList[ii]);
+
+ /* 2007-0721-01<Mark>by MikeLiu
+ * if ((pCurrBSS->bActive) &&
+ * (pCurrBSS->bSelected == false)) { */
+
+ pCurrBSS->bSelected = false;
+ if (pCurrBSS->bActive) {
+
+ if (pSSID &&
+ /* matched SSID */
+ (memcmp(pSSID->abySSID,
+ ((PWLAN_IE_SSID) pCurrBSS->abySSID)->abySSID,
+ pSSID->len) ||
+ pSSID->len !=
+ ((PWLAN_IE_SSID) pCurrBSS->abySSID)->len)) {
+ /* SSID not match skip this BSS */
+ continue;
+ }
+
+ if ((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA &&
+ WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo)) ||
+ (pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA &&
+ WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo))) {
+ /* Type not match skip this BSS */
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "BSS type mismatch.... Config[%d] BSS[0x%04x]\n",
+ pMgmt->eConfigMode,
+ pCurrBSS->wCapInfo);
+ continue;
+ }
+
+ if (ePhyType != PHY_TYPE_AUTO &&
+ ((ePhyType == PHY_TYPE_11A &&
+ PHY_TYPE_11A != pCurrBSS->eNetworkTypeInUse) ||
+ (ePhyType != PHY_TYPE_11A &&
+ PHY_TYPE_11A == pCurrBSS->eNetworkTypeInUse))) {
+ /* PhyType not match skip this BSS */
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Physical type mismatch.... ePhyType[%d] BSS[%d]\n",
+ ePhyType,
+ pCurrBSS->eNetworkTypeInUse);
+ continue;
+ }
+
+ pMgmt->pSameBSS[jj].uChannel = pCurrBSS->uChannel;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "BSSpSearchBSSList pSelect1[%pM]\n",
+ pCurrBSS->abyBSSID);
+ jj++;
+
+ if (!pSelect)
+ pSelect = pCurrBSS;
+ /* compare RSSI, select the strongest signal */
+ else if (pCurrBSS->uRSSI < pSelect->uRSSI)
+ pSelect = pCurrBSS;
+ }
+ }
+
+ pDevice->bSameBSSMaxNum = jj;
+
+ if (pSelect) {
+ pSelect->bSelected = true;
+ if (pDevice->bRoaming == false) {
+ /* Einsn Add @20070907 */
+ memcpy(pbyDesireSSID,
+ pCurrBSS->abySSID,
+ WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ }
+
+ return pSelect;
+ }
+ }
+ return NULL;
}
-/*+
- *
+/*
* Routine Description:
- * Clear BSS List
+ * Clear BSS List
*
* Return Value:
- * None.
- *
--*/
-
+ * None.
+ */
void BSSvClearBSSList(struct vnt_private *pDevice, int bKeepCurrBSSID)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
int ii;
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- if (bKeepCurrBSSID) {
- if (pMgmt->sBSSList[ii].bActive &&
- ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID,
- pMgmt->abyCurrBSSID)) {
- //mike mark: there are two BSSID's in list. If that AP is in hidden ssid mode, one SSID is null,
- // but other's might not be obvious, so if it associate's with your STA,
- // you must keep the two of them!!
- // bKeepCurrBSSID = false;
- continue;
- }
- }
-
- pMgmt->sBSSList[ii].bActive = false;
- memset(&pMgmt->sBSSList[ii], 0, sizeof(KnownBSS));
- }
- BSSvClearAnyBSSJoinRecord(pDevice);
+ for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ if (bKeepCurrBSSID &&
+ pMgmt->sBSSList[ii].bActive &&
+ ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID,
+ pMgmt->abyCurrBSSID)) {
+
+ /* mike mark:
+ * there are two BSSID's in list. If that AP is
+ * in hidden ssid mode, one SSID is null, but
+ * other's might not be obvious, so if it
+ * associate's with your STA, you must keep the
+ * two of them!! bKeepCurrBSSID = false;
+ */
+
+ continue;
+ }
+
+ pMgmt->sBSSList[ii].bActive = false;
+ memset(&pMgmt->sBSSList[ii], 0, sizeof(KnownBSS));
+ }
+ BSSvClearAnyBSSJoinRecord(pDevice);
}
-/*+
- *
+/*
* Routine Description:
- * search BSS list by BSSID & SSID if matched
+ * search BSS list by BSSID & SSID if matched
*
* Return Value:
- * true if found.
- *
--*/
+ * true if found.
+ */
PKnownBSS BSSpAddrIsInBSSList(struct vnt_private *pDevice,
- u8 *abyBSSID, PWLAN_IE_SSID pSSID)
+ u8 *abyBSSID,
+ PWLAN_IE_SSID pSSID)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
PKnownBSS pBSSList = NULL;
int ii;
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSSList = &(pMgmt->sBSSList[ii]);
- if (pBSSList->bActive) {
- if (ether_addr_equal(pBSSList->abyBSSID, abyBSSID)) {
- if (pSSID->len == ((PWLAN_IE_SSID)pBSSList->abySSID)->len){
- if (memcmp(pSSID->abySSID,
- ((PWLAN_IE_SSID)pBSSList->abySSID)->abySSID,
- pSSID->len) == 0)
- return pBSSList;
- }
- }
- }
- }
-
- return NULL;
-};
+ for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ pBSSList = &(pMgmt->sBSSList[ii]);
+ if (pBSSList->bActive &&
+ ether_addr_equal(pBSSList->abyBSSID, abyBSSID) &&
+ pSSID->len == ((PWLAN_IE_SSID) pBSSList->abySSID)->len &&
+ memcmp(pSSID->abySSID,
+ ((PWLAN_IE_SSID) pBSSList->abySSID)->abySSID,
+ pSSID->len) == 0)
+ return pBSSList;
+ }
-/*+
- *
+ return NULL;
+}
+
+/*
* Routine Description:
- * Insert a BSS set into known BSS list
+ * Insert a BSS set into known BSS list
*
* Return Value:
- * true if success.
- *
--*/
-
+ * true if success.
+ */
int BSSbInsertToBSSList(struct vnt_private *pDevice,
u8 *abyBSSIDAddr,
u64 qwTimestamp,
@@ -322,162 +316,173 @@ int BSSbInsertToBSSList(struct vnt_private *pDevice,
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct vnt_rx_mgmt *pRxPacket =
- (struct vnt_rx_mgmt *)pRxPacketContext;
+ (struct vnt_rx_mgmt *) pRxPacketContext;
PKnownBSS pBSSList = NULL;
unsigned int ii;
bool bParsingQuiet = false;
- pBSSList = (PKnownBSS)&(pMgmt->sBSSList[0]);
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSSList = (PKnownBSS)&(pMgmt->sBSSList[ii]);
- if (!pBSSList->bActive)
- break;
- }
-
- if (ii == MAX_BSS_NUM){
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Get free KnowBSS node failed.\n");
- return false;
- }
- // save the BSS info
- pBSSList->bActive = true;
- memcpy( pBSSList->abyBSSID, abyBSSIDAddr, WLAN_BSSID_LEN);
+ pBSSList = (PKnownBSS) &(pMgmt->sBSSList[0]);
+
+ for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ pBSSList = (PKnownBSS) &(pMgmt->sBSSList[ii]);
+ if (!pBSSList->bActive)
+ break;
+ }
+
+ if (ii == MAX_BSS_NUM) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Get free KnowBSS node failed.\n");
+ return false;
+ }
+ /* save the BSS info */
+ pBSSList->bActive = true;
+ memcpy(pBSSList->abyBSSID, abyBSSIDAddr, WLAN_BSSID_LEN);
pBSSList->qwBSSTimestamp = cpu_to_le64(qwTimestamp);
- pBSSList->wBeaconInterval = cpu_to_le16(wBeaconInterval);
- pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
- pBSSList->uClearCount = 0;
-
- if (pSSID->len > WLAN_SSID_MAXLEN)
- pSSID->len = WLAN_SSID_MAXLEN;
- memcpy( pBSSList->abySSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
-
- pBSSList->uChannel = byCurrChannel;
-
- if (pSuppRates->len > WLAN_RATES_MAXLEN)
- pSuppRates->len = WLAN_RATES_MAXLEN;
- memcpy( pBSSList->abySuppRates, pSuppRates, pSuppRates->len + WLAN_IEHDR_LEN);
-
- if (pExtSuppRates != NULL) {
- if (pExtSuppRates->len > WLAN_RATES_MAXLEN)
- pExtSuppRates->len = WLAN_RATES_MAXLEN;
- memcpy(pBSSList->abyExtSuppRates, pExtSuppRates, pExtSuppRates->len + WLAN_IEHDR_LEN);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSSbInsertToBSSList: pExtSuppRates->len = %d\n", pExtSuppRates->len);
-
- } else {
- memset(pBSSList->abyExtSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
- }
- pBSSList->sERP.byERP = psERP->byERP;
- pBSSList->sERP.bERPExist = psERP->bERPExist;
-
- // Check if BSS is 802.11a/b/g
- if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) {
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
- } else {
- if (pBSSList->sERP.bERPExist == true) {
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
- } else {
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
- }
- }
-
- pBSSList->byRxRate = pRxPacket->byRxRate;
- pBSSList->qwLocalTSF = pRxPacket->qwLocalTSF;
- pBSSList->uRSSI = pRxPacket->uRSSI;
- pBSSList->bySQ = pRxPacket->bySQ;
-
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- // assoc with BSS
- if (pBSSList == pMgmt->pCurrBSS) {
- bParsingQuiet = true;
- }
- }
-
- WPA_ClearRSN(pBSSList);
-
- if (pRSNWPA != NULL) {
- unsigned int uLen = pRSNWPA->len + 2;
-
- if (uLen <= (uIELength -
- (unsigned int) (u32) ((u8 *) pRSNWPA - pbyIEs))) {
- pBSSList->wWPALen = uLen;
- memcpy(pBSSList->byWPAIE, pRSNWPA, uLen);
- WPA_ParseRSN(pBSSList, pRSNWPA);
+ pBSSList->wBeaconInterval = cpu_to_le16(wBeaconInterval);
+ pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
+ pBSSList->uClearCount = 0;
+
+ if (pSSID->len > WLAN_SSID_MAXLEN)
+ pSSID->len = WLAN_SSID_MAXLEN;
+ memcpy(pBSSList->abySSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
+
+ pBSSList->uChannel = byCurrChannel;
+
+ if (pSuppRates->len > WLAN_RATES_MAXLEN)
+ pSuppRates->len = WLAN_RATES_MAXLEN;
+ memcpy(pBSSList->abySuppRates, pSuppRates,
+ pSuppRates->len + WLAN_IEHDR_LEN);
+
+ if (pExtSuppRates) {
+ if (pExtSuppRates->len > WLAN_RATES_MAXLEN)
+ pExtSuppRates->len = WLAN_RATES_MAXLEN;
+ memcpy(pBSSList->abyExtSuppRates, pExtSuppRates,
+ pExtSuppRates->len + WLAN_IEHDR_LEN);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "BSSbInsertToBSSList: pExtSuppRates->len = %d\n",
+ pExtSuppRates->len);
+
+ } else {
+ memset(pBSSList->abyExtSuppRates, 0,
+ WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
}
- }
+ pBSSList->sERP.byERP = psERP->byERP;
+ pBSSList->sERP.bERPExist = psERP->bERPExist;
+
+ /* Check if BSS is 802.11a/b/g */
+ if (pBSSList->uChannel > CB_MAX_CHANNEL_24G)
+ pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
+ else if (pBSSList->sERP.bERPExist == true)
+ pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
+ else
+ pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
+
+ pBSSList->byRxRate = pRxPacket->byRxRate;
+ pBSSList->qwLocalTSF = pRxPacket->qwLocalTSF;
+ pBSSList->uRSSI = pRxPacket->uRSSI;
+ pBSSList->bySQ = pRxPacket->bySQ;
+
+ if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA &&
+ pMgmt->eCurrState == WMAC_STATE_ASSOC &&
+ /* assoc with BSS */
+ pBSSList == pMgmt->pCurrBSS)
+ bParsingQuiet = true;
+
+ WPA_ClearRSN(pBSSList);
+
+ if (pRSNWPA) {
+ unsigned int uLen = pRSNWPA->len + 2;
+
+ if (uLen <= (uIELength -
+ (unsigned int) (u32) ((u8 *) pRSNWPA - pbyIEs))) {
+ pBSSList->wWPALen = uLen;
+ memcpy(pBSSList->byWPAIE, pRSNWPA, uLen);
+ WPA_ParseRSN(pBSSList, pRSNWPA);
+ }
+ }
+
+ WPA2_ClearRSN(pBSSList);
+
+ if (pRSN) {
+ unsigned int uLen = pRSN->len + 2;
+
+ if (uLen <= (uIELength -
+ (unsigned int) (u32) ((u8 *) pRSN - pbyIEs))) {
+ pBSSList->wRSNLen = uLen;
+ memcpy(pBSSList->byRSNIE, pRSN, uLen);
+ WPA2vParseRSN(pBSSList, pRSN);
+ }
+ }
+
+ if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2 ||
+ pBSSList->bWPA2Valid == true) {
+
+ PSKeyItem pTransmitKey = NULL;
+ bool bIs802_1x = false;
+
+ for (ii = 0; ii < pBSSList->wAKMSSAuthCount; ii++) {
+ if (pBSSList->abyAKMSSAuthType[ii] ==
+ WLAN_11i_AKMSS_802_1X) {
+ bIs802_1x = true;
+ break;
+ }
+ }
+ if (bIs802_1x == true &&
+ pSSID->len == ((PWLAN_IE_SSID) pMgmt->abyDesireSSID)->len &&
+ !memcmp(pSSID->abySSID,
+ ((PWLAN_IE_SSID) pMgmt->abyDesireSSID)->abySSID,
+ pSSID->len)) {
+
+ bAdd_PMKID_Candidate((void *) pDevice,
+ pBSSList->abyBSSID,
+ &pBSSList->sRSNCapObj);
+
+ if (pDevice->bLinkPass == true &&
+ pMgmt->eCurrState == WMAC_STATE_ASSOC &&
+ (KeybGetTransmitKey(&(pDevice->sKey),
+ pDevice->abyBSSID,
+ PAIRWISE_KEY,
+ &pTransmitKey) == true ||
+ KeybGetTransmitKey(&(pDevice->sKey),
+ pDevice->abyBSSID,
+ GROUP_KEY,
+ &pTransmitKey) == true)) {
+ pDevice->gsPMKIDCandidate.StatusType =
+ Ndis802_11StatusType_PMKID_CandidateList;
+ pDevice->gsPMKIDCandidate.Version = 1;
- WPA2_ClearRSN(pBSSList);
- if (pRSN != NULL) {
- unsigned int uLen = pRSN->len + 2;
+ }
+ }
+ }
- if (uLen <= (uIELength -
- (unsigned int) (u32) ((u8 *) pRSN - pbyIEs))) {
- pBSSList->wRSNLen = uLen;
- memcpy(pBSSList->byRSNIE, pRSN, uLen);
- WPA2vParseRSN(pBSSList, pRSN);
+ if (pDevice->bUpdateBBVGA) {
+ /* Monitor if RSSI is too strong. */
+ pBSSList->byRSSIStatCnt = 0;
+ RFvRSSITodBm(pDevice, (u8) (pRxPacket->uRSSI),
+ &pBSSList->ldBmMAX);
+ pBSSList->ldBmAverage[0] = pBSSList->ldBmMAX;
+ pBSSList->ldBmAverRange = pBSSList->ldBmMAX;
+ for (ii = 1; ii < RSSI_STAT_COUNT; ii++)
+ pBSSList->ldBmAverage[ii] = 0;
}
- }
-
- if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || (pBSSList->bWPA2Valid == true)) {
-
- PSKeyItem pTransmitKey = NULL;
- bool bIs802_1x = false;
-
- for (ii = 0; ii < pBSSList->wAKMSSAuthCount; ii ++) {
- if (pBSSList->abyAKMSSAuthType[ii] == WLAN_11i_AKMSS_802_1X) {
- bIs802_1x = true;
- break;
- }
- }
- if ((bIs802_1x == true) && (pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len) &&
- ( !memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySSID, pSSID->len))) {
-
- bAdd_PMKID_Candidate((void *) pDevice,
- pBSSList->abyBSSID,
- &pBSSList->sRSNCapObj);
-
- if ((pDevice->bLinkPass == true) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- if ((KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, PAIRWISE_KEY, &pTransmitKey) == true) ||
- (KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, GROUP_KEY, &pTransmitKey) == true)) {
- pDevice->gsPMKIDCandidate.StatusType = Ndis802_11StatusType_PMKID_CandidateList;
- pDevice->gsPMKIDCandidate.Version = 1;
-
- }
-
- }
- }
- }
-
- if (pDevice->bUpdateBBVGA) {
- // Monitor if RSSI is too strong.
- pBSSList->byRSSIStatCnt = 0;
- RFvRSSITodBm(pDevice, (u8)(pRxPacket->uRSSI), &pBSSList->ldBmMAX);
- pBSSList->ldBmAverage[0] = pBSSList->ldBmMAX;
- pBSSList->ldBmAverRange = pBSSList->ldBmMAX;
- for (ii = 1; ii < RSSI_STAT_COUNT; ii++)
- pBSSList->ldBmAverage[ii] = 0;
- }
-
- pBSSList->uIELength = uIELength;
- if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN)
- pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
- memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength);
-
- return true;
+
+ pBSSList->uIELength = uIELength;
+ if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN)
+ pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
+ memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength);
+
+ return true;
}
-/*+
- *
+/*
* Routine Description:
- * Update BSS set in known BSS list
+ * Update BSS set in known BSS list
*
* Return Value:
- * true if success.
- *
--*/
-// TODO: input structure modify
-
+ * true if success.
+ */
+/* TODO: input structure modify */
int BSSbUpdateToBSSList(struct vnt_private *pDevice,
u64 qwTimestamp,
u16 wBeaconInterval,
@@ -499,321 +504,306 @@ int BSSbUpdateToBSSList(struct vnt_private *pDevice,
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct vnt_rx_mgmt *pRxPacket =
- (struct vnt_rx_mgmt *)pRxPacketContext;
+ (struct vnt_rx_mgmt *) pRxPacketContext;
int ii, jj;
signed long ldBm, ldBmSum;
bool bParsingQuiet = false;
- if (pBSSList == NULL)
- return false;
+ if (!pBSSList)
+ return false;
pBSSList->qwBSSTimestamp = cpu_to_le64(qwTimestamp);
- pBSSList->wBeaconInterval = cpu_to_le16(wBeaconInterval);
- pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
- pBSSList->uClearCount = 0;
- pBSSList->uChannel = byCurrChannel;
-
- if (pSSID->len > WLAN_SSID_MAXLEN)
- pSSID->len = WLAN_SSID_MAXLEN;
-
- if ((pSSID->len != 0) && (pSSID->abySSID[0] != 0))
- memcpy(pBSSList->abySSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
- memcpy(pBSSList->abySuppRates, pSuppRates,pSuppRates->len + WLAN_IEHDR_LEN);
-
- if (pExtSuppRates != NULL) {
- memcpy(pBSSList->abyExtSuppRates, pExtSuppRates,pExtSuppRates->len + WLAN_IEHDR_LEN);
- } else {
- memset(pBSSList->abyExtSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
- }
- pBSSList->sERP.byERP = psERP->byERP;
- pBSSList->sERP.bERPExist = psERP->bERPExist;
-
- // Check if BSS is 802.11a/b/g
- if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) {
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
- } else {
- if (pBSSList->sERP.bERPExist == true) {
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
- } else {
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
- }
- }
-
- pBSSList->byRxRate = pRxPacket->byRxRate;
- pBSSList->qwLocalTSF = pRxPacket->qwLocalTSF;
- if(bChannelHit)
- pBSSList->uRSSI = pRxPacket->uRSSI;
- pBSSList->bySQ = pRxPacket->bySQ;
-
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- // assoc with BSS
- if (pBSSList == pMgmt->pCurrBSS) {
- bParsingQuiet = true;
- }
- }
-
- WPA_ClearRSN(pBSSList); //mike update
-
- if (pRSNWPA != NULL) {
- unsigned int uLen = pRSNWPA->len + 2;
- if (uLen <= (uIELength -
- (unsigned int) (u32) ((u8 *) pRSNWPA - pbyIEs))) {
- pBSSList->wWPALen = uLen;
- memcpy(pBSSList->byWPAIE, pRSNWPA, uLen);
- WPA_ParseRSN(pBSSList, pRSNWPA);
+ pBSSList->wBeaconInterval = cpu_to_le16(wBeaconInterval);
+ pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
+ pBSSList->uClearCount = 0;
+ pBSSList->uChannel = byCurrChannel;
+
+ if (pSSID->len > WLAN_SSID_MAXLEN)
+ pSSID->len = WLAN_SSID_MAXLEN;
+
+ if (pSSID->len != 0 && pSSID->abySSID[0] != 0)
+ memcpy(pBSSList->abySSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
+ memcpy(pBSSList->abySuppRates, pSuppRates,
+ pSuppRates->len + WLAN_IEHDR_LEN);
+
+ if (pExtSuppRates)
+ memcpy(pBSSList->abyExtSuppRates, pExtSuppRates,
+ pExtSuppRates->len + WLAN_IEHDR_LEN);
+ else
+ memset(pBSSList->abyExtSuppRates, 0,
+ WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
+ pBSSList->sERP.byERP = psERP->byERP;
+ pBSSList->sERP.bERPExist = psERP->bERPExist;
+
+ /* Check if BSS is 802.11a/b/g */
+ if (pBSSList->uChannel > CB_MAX_CHANNEL_24G)
+ pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
+ else if (pBSSList->sERP.bERPExist == true)
+ pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
+ else
+ pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
+
+ pBSSList->byRxRate = pRxPacket->byRxRate;
+ pBSSList->qwLocalTSF = pRxPacket->qwLocalTSF;
+ if (bChannelHit)
+ pBSSList->uRSSI = pRxPacket->uRSSI;
+ pBSSList->bySQ = pRxPacket->bySQ;
+
+ if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA &&
+ pMgmt->eCurrState == WMAC_STATE_ASSOC &&
+ /* assoc with BSS */
+ pBSSList == pMgmt->pCurrBSS)
+ bParsingQuiet = true;
+
+ WPA_ClearRSN(pBSSList); /* mike update */
+
+ if (pRSNWPA) {
+ unsigned int uLen = pRSNWPA->len + 2;
+ if (uLen <= (uIELength -
+ (unsigned int) (u32) ((u8 *) pRSNWPA - pbyIEs))) {
+ pBSSList->wWPALen = uLen;
+ memcpy(pBSSList->byWPAIE, pRSNWPA, uLen);
+ WPA_ParseRSN(pBSSList, pRSNWPA);
+ }
}
- }
- WPA2_ClearRSN(pBSSList); //mike update
+ WPA2_ClearRSN(pBSSList); /* mike update */
- if (pRSN != NULL) {
- unsigned int uLen = pRSN->len + 2;
- if (uLen <= (uIELength -
- (unsigned int) (u32) ((u8 *) pRSN - pbyIEs))) {
- pBSSList->wRSNLen = uLen;
- memcpy(pBSSList->byRSNIE, pRSN, uLen);
- WPA2vParseRSN(pBSSList, pRSN);
+ if (pRSN) {
+ unsigned int uLen = pRSN->len + 2;
+ if (uLen <= (uIELength -
+ (unsigned int) (u32) ((u8 *) pRSN - pbyIEs))) {
+ pBSSList->wRSNLen = uLen;
+ memcpy(pBSSList->byRSNIE, pRSN, uLen);
+ WPA2vParseRSN(pBSSList, pRSN);
+ }
}
- }
-
- if (pRxPacket->uRSSI != 0) {
- RFvRSSITodBm(pDevice, (u8)(pRxPacket->uRSSI), &ldBm);
- // Monitor if RSSI is too strong.
- pBSSList->byRSSIStatCnt++;
- pBSSList->byRSSIStatCnt %= RSSI_STAT_COUNT;
- pBSSList->ldBmAverage[pBSSList->byRSSIStatCnt] = ldBm;
- ldBmSum = 0;
- for (ii = 0, jj = 0; ii < RSSI_STAT_COUNT; ii++) {
- if (pBSSList->ldBmAverage[ii] != 0) {
- pBSSList->ldBmMAX =
- max(pBSSList->ldBmAverage[ii], ldBm);
- ldBmSum +=
- pBSSList->ldBmAverage[ii];
- jj++;
+
+ if (pRxPacket->uRSSI != 0) {
+ RFvRSSITodBm(pDevice, (u8) (pRxPacket->uRSSI), &ldBm);
+ /* Monitor if RSSI is too strong. */
+ pBSSList->byRSSIStatCnt++;
+ pBSSList->byRSSIStatCnt %= RSSI_STAT_COUNT;
+ pBSSList->ldBmAverage[pBSSList->byRSSIStatCnt] = ldBm;
+ ldBmSum = 0;
+ for (ii = 0, jj = 0; ii < RSSI_STAT_COUNT; ii++) {
+ if (pBSSList->ldBmAverage[ii] != 0) {
+ pBSSList->ldBmMAX =
+ max(pBSSList->ldBmAverage[ii], ldBm);
+ ldBmSum +=
+ pBSSList->ldBmAverage[ii];
+ jj++;
+ }
}
- }
- pBSSList->ldBmAverRange = ldBmSum /jj;
- }
+ pBSSList->ldBmAverRange = ldBmSum / jj;
+ }
- pBSSList->uIELength = uIELength;
- if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN)
- pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
- memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength);
+ pBSSList->uIELength = uIELength;
+ if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN)
+ pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
+ memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength);
- return true;
+ return true;
}
-/*+
- *
+/*
* Routine Description:
- * Search Node DB table to find the index of matched DstAddr
+ * Search Node DB table to find the index of matched DstAddr
*
* Return Value:
- * None
- *
--*/
-
+ * None
+ */
int BSSbIsSTAInNodeDB(struct vnt_private *pDevice,
- u8 *abyDstAddr, u32 *puNodeIndex)
+ u8 *abyDstAddr,
+ u32 *puNodeIndex)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
unsigned int ii;
- // Index = 0 reserved for AP Node
- for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
- if (pMgmt->sNodeDBTable[ii].bActive) {
- if (ether_addr_equal(abyDstAddr,
+ /* Index = 0 reserved for AP Node */
+ for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
+ if (pMgmt->sNodeDBTable[ii].bActive &&
+ ether_addr_equal(abyDstAddr,
pMgmt->sNodeDBTable[ii].abyMACAddr)) {
- *puNodeIndex = ii;
- return true;
- }
- }
- }
+ *puNodeIndex = ii;
+ return true;
+ }
+ }
- return false;
+ return false;
};
-/*+
- *
+/*
* Routine Description:
- * Find an empty node and allocate it; if no empty node
- * is found, then use the most inactive one.
+ * Find an empty node and allocate it; if no empty node
+ * is found, then use the most inactive one.
*
* Return Value:
- * None
- *
--*/
+ * None
+ */
void BSSvCreateOneNode(struct vnt_private *pDevice, u32 *puNodeIndex)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
- int ii;
+ int ii;
u32 BigestCount = 0;
u32 SelectIndex;
- struct sk_buff *skb;
-
- // Index = 0 reserved for AP Node (In STA mode)
- // Index = 0 reserved for Broadcast/MultiCast (In AP mode)
- SelectIndex = 1;
- for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
- if (pMgmt->sNodeDBTable[ii].bActive) {
- if (pMgmt->sNodeDBTable[ii].uInActiveCount > BigestCount) {
- BigestCount = pMgmt->sNodeDBTable[ii].uInActiveCount;
- SelectIndex = ii;
- }
- }
- else {
- break;
- }
- }
-
- // if not found replace uInActiveCount with the largest one.
- if ( ii == (MAX_NODE_NUM + 1)) {
- *puNodeIndex = SelectIndex;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Replace inactive node = %d\n", SelectIndex);
- // clear ps buffer
- if (pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue.next != NULL) {
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue)) != NULL)
- dev_kfree_skb(skb);
- }
- }
- else {
- *puNodeIndex = ii;
- }
-
- memset(&pMgmt->sNodeDBTable[*puNodeIndex], 0, sizeof(KnownNodeDB));
- pMgmt->sNodeDBTable[*puNodeIndex].bActive = true;
- pMgmt->sNodeDBTable[*puNodeIndex].uRatePollTimeout = FALLBACK_POLL_SECOND;
- // for AP mode PS queue
- skb_queue_head_init(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue);
- pMgmt->sNodeDBTable[*puNodeIndex].byAuthSequence = 0;
- pMgmt->sNodeDBTable[*puNodeIndex].wEnQueueCnt = 0;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Create node index = %d\n", ii);
-};
+ struct sk_buff *skb;
+
+ /* Index = 0 reserved for AP Node (In STA mode)
+ Index = 0 reserved for Broadcast/MultiCast (In AP mode) */
+ SelectIndex = 1;
+ for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
+ if (pMgmt->sNodeDBTable[ii].bActive) {
+ if (pMgmt->sNodeDBTable[ii].uInActiveCount > BigestCount) {
+ BigestCount =
+ pMgmt->sNodeDBTable[ii].uInActiveCount;
+ SelectIndex = ii;
+ }
+ } else {
+ break;
+ }
+ }
-/*+
- *
+ /* if not found replace uInActiveCount with the largest one. */
+ if (ii == (MAX_NODE_NUM + 1)) {
+ *puNodeIndex = SelectIndex;
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Replace inactive node = %d\n", SelectIndex);
+ /* clear ps buffer */
+ if (pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue.next) {
+ while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue)))
+ dev_kfree_skb(skb);
+ }
+ } else {
+ *puNodeIndex = ii;
+ }
+
+ memset(&pMgmt->sNodeDBTable[*puNodeIndex], 0, sizeof(KnownNodeDB));
+ pMgmt->sNodeDBTable[*puNodeIndex].bActive = true;
+ pMgmt->sNodeDBTable[*puNodeIndex].uRatePollTimeout = FALLBACK_POLL_SECOND;
+ /* for AP mode PS queue */
+ skb_queue_head_init(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue);
+ pMgmt->sNodeDBTable[*puNodeIndex].byAuthSequence = 0;
+ pMgmt->sNodeDBTable[*puNodeIndex].wEnQueueCnt = 0;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Create node index = %d\n", ii);
+}
+
+/*
* Routine Description:
- * Remove Node by NodeIndex
+ * Remove Node by NodeIndex
*
*
* Return Value:
- * None
- *
--*/
-
+ * None
+ */
void BSSvRemoveOneNode(struct vnt_private *pDevice, u32 uNodeIndex)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
u8 byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue)) != NULL)
- dev_kfree_skb(skb);
- // clear context
- memset(&pMgmt->sNodeDBTable[uNodeIndex], 0, sizeof(KnownNodeDB));
- // clear tx bit map
- pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[uNodeIndex].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[uNodeIndex].wAID & 7];
-};
-/*+
- *
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue)))
+ dev_kfree_skb(skb);
+ /* clear context */
+ memset(&pMgmt->sNodeDBTable[uNodeIndex], 0, sizeof(KnownNodeDB));
+ /* clear tx bit map */
+ pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[uNodeIndex].wAID >> 3] &=
+ ~byMask[pMgmt->sNodeDBTable[uNodeIndex].wAID & 7];
+}
+
+/*
* Routine Description:
- * Update AP Node content in Index 0 of KnownNodeDB
+ * Update AP Node content in Index 0 of KnownNodeDB
*
*
* Return Value:
- * None
- *
--*/
-
-void BSSvUpdateAPNode(struct vnt_private *pDevice, u16 *pwCapInfo,
- PWLAN_IE_SUPP_RATES pSuppRates, PWLAN_IE_SUPP_RATES pExtSuppRates)
+ * None
+ */
+void BSSvUpdateAPNode(struct vnt_private *pDevice,
+ u16 *pwCapInfo,
+ PWLAN_IE_SUPP_RATES pSuppRates,
+ PWLAN_IE_SUPP_RATES pExtSuppRates)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
u32 uRateLen = WLAN_RATES_MAXLEN;
- memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
-
- pMgmt->sNodeDBTable[0].bActive = true;
- if (pDevice->byBBType == BB_TYPE_11B) {
- uRateLen = WLAN_RATES_MAXLEN_11B;
- }
- pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- uRateLen);
- pMgmt->abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pExtSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- uRateLen);
- RATEvParseMaxRate((void *) pDevice,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- true,
- &(pMgmt->sNodeDBTable[0].wMaxBasicRate),
- &(pMgmt->sNodeDBTable[0].wMaxSuppRate),
- &(pMgmt->sNodeDBTable[0].wSuppRate),
- &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate),
- &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate)
- );
- memcpy(pMgmt->sNodeDBTable[0].abyMACAddr, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
- pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxSuppRate;
- pMgmt->sNodeDBTable[0].bShortPreamble = WLAN_GET_CAP_INFO_SHORTPREAMBLE(*pwCapInfo);
- pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND;
- // Auto rate fallback function initiation.
- // RATEbInit(pDevice);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pMgmt->sNodeDBTable[0].wTxDataRate = %d \n", pMgmt->sNodeDBTable[0].wTxDataRate);
+ memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
+
+ pMgmt->sNodeDBTable[0].bActive = true;
+ if (pDevice->byBBType == BB_TYPE_11B)
+ uRateLen = WLAN_RATES_MAXLEN_11B;
+ pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES) pSuppRates,
+ (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrSuppRates,
+ uRateLen);
+ pMgmt->abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES) pExtSuppRates,
+ (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrExtSuppRates,
+ uRateLen);
+ RATEvParseMaxRate((void *) pDevice,
+ (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrSuppRates,
+ (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrExtSuppRates,
+ true,
+ &(pMgmt->sNodeDBTable[0].wMaxBasicRate),
+ &(pMgmt->sNodeDBTable[0].wMaxSuppRate),
+ &(pMgmt->sNodeDBTable[0].wSuppRate),
+ &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate),
+ &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate));
+ memcpy(pMgmt->sNodeDBTable[0].abyMACAddr, pMgmt->abyCurrBSSID,
+ WLAN_ADDR_LEN);
+ pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxSuppRate;
+ pMgmt->sNodeDBTable[0].bShortPreamble =
+ WLAN_GET_CAP_INFO_SHORTPREAMBLE(*pwCapInfo);
+ pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND;
+ /* Auto rate fallback function initiation.
+ * RATEbInit(pDevice); */
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO"pMgmt->sNodeDBTable[0].wTxDataRate = %d\n",
+ pMgmt->sNodeDBTable[0].wTxDataRate);
-};
+}
-/*+
- *
+/*
* Routine Description:
- * Add Multicast Node content in Index 0 of KnownNodeDB
+ * Add Multicast Node content in Index 0 of KnownNodeDB
*
*
* Return Value:
- * None
- *
--*/
-
+ * None
+ */
void BSSvAddMulticastNode(struct vnt_private *pDevice)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
- if (!pDevice->bEnableHostWEP)
- memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
- memset(pMgmt->sNodeDBTable[0].abyMACAddr, 0xff, WLAN_ADDR_LEN);
- pMgmt->sNodeDBTable[0].bActive = true;
- pMgmt->sNodeDBTable[0].bPSEnable = false;
- skb_queue_head_init(&pMgmt->sNodeDBTable[0].sTxPSQueue);
- RATEvParseMaxRate((void *) pDevice,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- true,
- &(pMgmt->sNodeDBTable[0].wMaxBasicRate),
- &(pMgmt->sNodeDBTable[0].wMaxSuppRate),
- &(pMgmt->sNodeDBTable[0].wSuppRate),
- &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate),
- &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate)
- );
- pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxBasicRate;
- pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND;
+ if (!pDevice->bEnableHostWEP)
+ memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
+ memset(pMgmt->sNodeDBTable[0].abyMACAddr, 0xff, WLAN_ADDR_LEN);
+ pMgmt->sNodeDBTable[0].bActive = true;
+ pMgmt->sNodeDBTable[0].bPSEnable = false;
+ skb_queue_head_init(&pMgmt->sNodeDBTable[0].sTxPSQueue);
+ RATEvParseMaxRate((void *) pDevice,
+ (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrSuppRates,
+ (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrExtSuppRates,
+ true,
+ &(pMgmt->sNodeDBTable[0].wMaxBasicRate),
+ &(pMgmt->sNodeDBTable[0].wMaxSuppRate),
+ &(pMgmt->sNodeDBTable[0].wSuppRate),
+ &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate),
+ &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate));
+ pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxBasicRate;
+ pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND;
-};
+}
-/*+
- *
+/*
* Routine Description:
*
*
- * Second call back function to update Node DB info & AP link status
+ * Second call back function to update Node DB info & AP link status
*
*
* Return Value:
- * none.
- *
--*/
-
+ * none.
+ */
void BSSvSecondCallBack(struct work_struct *work)
{
struct vnt_private *pDevice = container_of(work,
@@ -828,342 +818,365 @@ void BSSvSecondCallBack(struct work_struct *work)
if (pDevice->Flags & fMP_DISCONNECTED)
return;
- spin_lock_irq(&pDevice->lock);
-
- pDevice->uAssocCount = 0;
-
- //Power Saving Mode Tx Burst
- if ( pDevice->bEnablePSMode == true ) {
- pDevice->ulPSModeWaitTx++;
- if ( pDevice->ulPSModeWaitTx >= 2 ) {
- pDevice->ulPSModeWaitTx = 0;
- pDevice->bPSModeTxBurst = false;
- }
- }
-
- pDevice->byERPFlag &=
- ~(WLAN_SET_ERP_BARKER_MODE(1) | WLAN_SET_ERP_NONERP_PRESENT(1));
-
- if (pDevice->wUseProtectCntDown > 0) {
- pDevice->wUseProtectCntDown --;
- }
- else {
- // disable protect mode
- pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
- }
-
-if(pDevice->byReAssocCount > 0) {
- pDevice->byReAssocCount++;
- if((pDevice->byReAssocCount > 10) && (pDevice->bLinkPass != true)) { //10 sec timeout
- printk("Re-association timeout!!!\n");
- pDevice->byReAssocCount = 0;
- // if(pDevice->bWPASuppWextEnabled == true)
- {
- union iwreq_data wrqu;
- memset(&wrqu, 0, sizeof (wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
- }
- else if(pDevice->bLinkPass == true)
- pDevice->byReAssocCount = 0;
-}
+ spin_lock_irq(&pDevice->lock);
- pMgmt->eLastState = pMgmt->eCurrState ;
+ pDevice->uAssocCount = 0;
+
+ /* Power Saving Mode Tx Burst */
+ if (pDevice->bEnablePSMode == true) {
+ pDevice->ulPSModeWaitTx++;
+ if (pDevice->ulPSModeWaitTx >= 2) {
+ pDevice->ulPSModeWaitTx = 0;
+ pDevice->bPSModeTxBurst = false;
+ }
+ }
+
+ pDevice->byERPFlag &=
+ ~(WLAN_SET_ERP_BARKER_MODE(1) | WLAN_SET_ERP_NONERP_PRESENT(1));
+
+ if (pDevice->wUseProtectCntDown > 0) {
+ pDevice->wUseProtectCntDown--;
+ } else {
+ /* disable protect mode */
+ pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
+ }
+
+ if (pDevice->byReAssocCount > 0) {
+ pDevice->byReAssocCount++;
+ if (pDevice->byReAssocCount > 10 &&
+ pDevice->bLinkPass != true) { /* 10 sec timeout */
+ printk("Re-association timeout!!!\n");
+ pDevice->byReAssocCount = 0;
+ /* if (pDevice->bWPASuppWextEnabled == true) */
+ {
+ union iwreq_data wrqu;
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n");
+ wireless_send_event(pDevice->dev, SIOCGIWAP,
+ &wrqu, NULL);
+ }
+ } else if (pDevice->bLinkPass == true) {
+ pDevice->byReAssocCount = 0;
+ }
+ }
+
+ pMgmt->eLastState = pMgmt->eCurrState;
s_uCalculateLinkQual(pDevice);
- for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
-
- if (pMgmt->sNodeDBTable[ii].bActive) {
- // Increase in-activity counter
- pMgmt->sNodeDBTable[ii].uInActiveCount++;
-
- if (ii > 0) {
- if (pMgmt->sNodeDBTable[ii].uInActiveCount > MAX_INACTIVE_COUNT) {
- BSSvRemoveOneNode(pDevice, ii);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
- "Inactive timeout [%d] sec, STA index = [%d] remove\n", MAX_INACTIVE_COUNT, ii);
- continue;
- }
-
- if (pMgmt->sNodeDBTable[ii].eNodeState >= NODE_ASSOC) {
-
- pDevice->uAssocCount++;
-
- // check if Non ERP exist
- if (pMgmt->sNodeDBTable[ii].uInActiveCount < ERP_RECOVER_COUNT) {
- if (!pMgmt->sNodeDBTable[ii].bShortPreamble) {
- pDevice->byERPFlag |= WLAN_SET_ERP_BARKER_MODE(1);
- uLongPreambleSTACnt ++;
- }
- if (!pMgmt->sNodeDBTable[ii].bERPExist) {
- pDevice->byERPFlag |= WLAN_SET_ERP_NONERP_PRESENT(1);
- pDevice->byERPFlag |= WLAN_SET_ERP_USE_PROTECTION(1);
- }
- if (!pMgmt->sNodeDBTable[ii].bShortSlotTime)
- uNonShortSlotSTACnt++;
- }
- }
-
- // check if any STA in PS mode
- if (pMgmt->sNodeDBTable[ii].bPSEnable)
- uSleepySTACnt++;
-
- }
-
- // Rate fallback check
- if (!pDevice->bFixRate) {
- if (ii > 0) {
- // ii = 0 for multicast node (AP & Adhoc)
- RATEvTxRateFallBack((void *)pDevice,
- &(pMgmt->sNodeDBTable[ii]));
- }
- else {
- // ii = 0 reserved for unicast AP node (Infra STA)
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA)
- RATEvTxRateFallBack((void *)pDevice,
- &(pMgmt->sNodeDBTable[ii]));
- }
-
- }
-
- // check if pending PS queue
- if (pMgmt->sNodeDBTable[ii].wEnQueueCnt != 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index= %d, Queue = %d pending \n",
- ii, pMgmt->sNodeDBTable[ii].wEnQueueCnt);
- if ((ii >0) && (pMgmt->sNodeDBTable[ii].wEnQueueCnt > 15)) {
- BSSvRemoveOneNode(pDevice, ii);
- DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Pending many queues PS STA Index = %d remove \n", ii);
- continue;
- }
- }
- }
-
- }
-
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->byBBType == BB_TYPE_11G)) {
-
- // on/off protect mode
- if (WLAN_GET_ERP_USE_PROTECTION(pDevice->byERPFlag)) {
- if (!pDevice->bProtectMode) {
- MACvEnableProtectMD(pDevice);
- pDevice->bProtectMode = true;
- }
- }
- else {
- if (pDevice->bProtectMode) {
- MACvDisableProtectMD(pDevice);
- pDevice->bProtectMode = false;
- }
- }
- // on/off short slot time
-
- if (uNonShortSlotSTACnt > 0) {
- if (pDevice->bShortSlotTime) {
- pDevice->bShortSlotTime = false;
- BBvSetShortSlotTime(pDevice);
- vUpdateIFS((void *)pDevice);
- }
- }
- else {
- if (!pDevice->bShortSlotTime) {
- pDevice->bShortSlotTime = true;
- BBvSetShortSlotTime(pDevice);
- vUpdateIFS((void *)pDevice);
- }
- }
-
- // on/off barker long preamble mode
-
- if (uLongPreambleSTACnt > 0) {
- if (!pDevice->bBarkerPreambleMd) {
- MACvEnableBarkerPreambleMd(pDevice);
- pDevice->bBarkerPreambleMd = true;
- }
- }
- else {
- if (pDevice->bBarkerPreambleMd) {
- MACvDisableBarkerPreambleMd(pDevice);
- pDevice->bBarkerPreambleMd = false;
- }
- }
-
- }
-
- // Check if any STA in PS mode, enable DTIM multicast deliver
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (uSleepySTACnt > 0)
- pMgmt->sNodeDBTable[0].bPSEnable = true;
- else
- pMgmt->sNodeDBTable[0].bPSEnable = false;
- }
-
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pCurrSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
-
- if ((pMgmt->eCurrMode == WMAC_MODE_STANDBY) ||
- (pMgmt->eCurrMode == WMAC_MODE_ESS_STA)) {
-
- if (pMgmt->sNodeDBTable[0].bActive) { // Assoc with BSS
-
- if (pDevice->bUpdateBBVGA) {
- s_vCheckSensitivity(pDevice);
- s_vCheckPreEDThreshold(pDevice);
- }
-
- if ((pMgmt->sNodeDBTable[0].uInActiveCount >= (LOST_BEACON_COUNT/2)) &&
- (pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) ) {
- pDevice->byBBVGANew = pDevice->abyBBVGA[0];
- bScheduleCommand((void *) pDevice,
- WLAN_CMD_CHANGE_BBSENSITIVITY,
- NULL);
- }
-
- if (pMgmt->sNodeDBTable[0].uInActiveCount >= LOST_BEACON_COUNT) {
- pMgmt->sNodeDBTable[0].bActive = false;
- pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = false;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
- pDevice->bRoaming = true;
- pDevice->bIsRoaming = false;
-
- DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Lost AP beacon [%d] sec, disconnected !\n", pMgmt->sNodeDBTable[0].uInActiveCount);
- /* let wpa supplicant know AP may disconnect */
- {
- union iwreq_data wrqu;
- memset(&wrqu, 0, sizeof (wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
- }
- }
- else if (pItemSSID->len != 0) {
-//Davidwang
- if ((pDevice->bEnableRoaming == true)&&(!(pMgmt->Cisco_cckm))) {
-DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bRoaming %d, !\n", pDevice->bRoaming );
-DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bIsRoaming %d, !\n", pDevice->bIsRoaming );
- if ((pDevice->bRoaming == true)&&(pDevice->bIsRoaming == true)){
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Fast Roaming ...\n");
- BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
- bScheduleCommand((void *) pDevice,
- WLAN_CMD_BSSID_SCAN,
- pMgmt->abyDesireSSID);
- bScheduleCommand((void *) pDevice,
- WLAN_CMD_SSID,
- pMgmt->abyDesireSSID);
- pDevice->uAutoReConnectTime = 0;
- pDevice->uIsroamingTime = 0;
- pDevice->bRoaming = false;
- }
- else if ((pDevice->bRoaming == false)&&(pDevice->bIsRoaming == true)) {
- pDevice->uIsroamingTime++;
- if (pDevice->uIsroamingTime >= 20)
- pDevice->bIsRoaming = false;
- }
-
- }
-else {
- if (pDevice->uAutoReConnectTime < 10) {
- pDevice->uAutoReConnectTime++;
- //network manager support need not do Roaming scan???
- if(pDevice->bWPASuppWextEnabled ==true)
- pDevice->uAutoReConnectTime = 0;
- }
- else {
- //mike use old encryption status for wpa reauthen
- if(pDevice->bWPADEVUp)
- pDevice->eEncryptionStatus = pDevice->eOldEncryptionStatus;
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming ...\n");
- BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
- pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- bScheduleCommand((void *) pDevice,
- WLAN_CMD_BSSID_SCAN,
- pMgmt->abyDesireSSID);
- bScheduleCommand((void *) pDevice,
- WLAN_CMD_SSID,
- pMgmt->abyDesireSSID);
- pDevice->uAutoReConnectTime = 0;
- }
- }
- }
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- // if adhoc started which essid is NULL string, rescanning.
- if ((pMgmt->eCurrState == WMAC_STATE_STARTED) && (pCurrSSID->len == 0)) {
- if (pDevice->uAutoReConnectTime < 10) {
- pDevice->uAutoReConnectTime++;
- }
- else {
- DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Adhoc re-scanning ...\n");
- pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
- pDevice->uAutoReConnectTime = 0;
- };
- }
- if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
-
- if (pDevice->bUpdateBBVGA) {
- s_vCheckSensitivity(pDevice);
- s_vCheckPreEDThreshold(pDevice);
+ for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
+
+ if (pMgmt->sNodeDBTable[ii].bActive) {
+ /* Increase in-activity counter */
+ pMgmt->sNodeDBTable[ii].uInActiveCount++;
+
+ if (ii > 0) {
+ if (pMgmt->sNodeDBTable[ii].uInActiveCount >
+ MAX_INACTIVE_COUNT) {
+ BSSvRemoveOneNode(pDevice, ii);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Inactive timeout [%d] sec, STA index = [%d] remove\n",
+ MAX_INACTIVE_COUNT, ii);
+ continue;
+ }
+
+ if (pMgmt->sNodeDBTable[ii].eNodeState >=
+ NODE_ASSOC) {
+
+ pDevice->uAssocCount++;
+
+ /* check if Non ERP exist */
+ if (pMgmt->sNodeDBTable[ii].uInActiveCount <
+ ERP_RECOVER_COUNT) {
+ if (!pMgmt->sNodeDBTable[ii].bShortPreamble) {
+ pDevice->byERPFlag |=
+ WLAN_SET_ERP_BARKER_MODE(1);
+ uLongPreambleSTACnt++;
+ }
+ if (!pMgmt->sNodeDBTable[ii].bERPExist) {
+ pDevice->byERPFlag |=
+ WLAN_SET_ERP_NONERP_PRESENT(1);
+ pDevice->byERPFlag |=
+ WLAN_SET_ERP_USE_PROTECTION(1);
+ }
+ if (!pMgmt->sNodeDBTable[ii].bShortSlotTime)
+ uNonShortSlotSTACnt++;
+ }
+ }
+
+ /* check if any STA in PS mode */
+ if (pMgmt->sNodeDBTable[ii].bPSEnable)
+ uSleepySTACnt++;
+
+ }
+
+ /* Rate fallback check */
+ if (!pDevice->bFixRate) {
+ if (ii > 0) {
+ /* ii = 0 for multicast node (AP & Adhoc) */
+ RATEvTxRateFallBack((void *) pDevice,
+ &(pMgmt->sNodeDBTable[ii]));
+ } else if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
+ /* ii = 0 reserved for unicast AP node (Infra STA) */
+ RATEvTxRateFallBack((void *) pDevice,
+ &(pMgmt->sNodeDBTable[ii]));
+ }
+
+ }
+
+ /* check if pending PS queue */
+ if (pMgmt->sNodeDBTable[ii].wEnQueueCnt != 0) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Index= %d, Queue = %d pending\n",
+ ii,
+ pMgmt->sNodeDBTable[ii].wEnQueueCnt);
+ if (ii > 0 &&
+ pMgmt->sNodeDBTable[ii].wEnQueueCnt > 15) {
+ BSSvRemoveOneNode(pDevice, ii);
+ DBG_PRT(MSG_LEVEL_NOTICE,
+ KERN_INFO "Pending many queues PS STA Index = %d remove\n",
+ ii);
+ continue;
+ }
+ }
}
- if (pMgmt->sNodeDBTable[0].uInActiveCount >=ADHOC_LOST_BEACON_COUNT) {
- DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Lost other STA beacon [%d] sec, started !\n", pMgmt->sNodeDBTable[0].uInActiveCount);
- pMgmt->sNodeDBTable[0].uInActiveCount = 0;
- pMgmt->eCurrState = WMAC_STATE_STARTED;
- netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = false;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
- }
- }
- }
- if (pDevice->bLinkPass == true) {
- if (pMgmt->eAuthenMode < WMAC_AUTH_WPA ||
- pDevice->fWPA_Authened == true) {
- if (++pDevice->tx_data_time_out > 40) {
- pDevice->tx_trigger = true;
+ }
+
+ if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP &&
+ pDevice->byBBType == BB_TYPE_11G) {
+
+ /* on/off protect mode */
+ if (WLAN_GET_ERP_USE_PROTECTION(pDevice->byERPFlag)) {
+ if (!pDevice->bProtectMode) {
+ MACvEnableProtectMD(pDevice);
+ pDevice->bProtectMode = true;
+ }
+ } else if (pDevice->bProtectMode) {
+ MACvDisableProtectMD(pDevice);
+ pDevice->bProtectMode = false;
+ }
+ /* on/off short slot time */
+
+ if (uNonShortSlotSTACnt > 0) {
+ if (pDevice->bShortSlotTime) {
+ pDevice->bShortSlotTime = false;
+ BBvSetShortSlotTime(pDevice);
+ vUpdateIFS((void *) pDevice);
+ }
+ } else if (!pDevice->bShortSlotTime) {
+ pDevice->bShortSlotTime = true;
+ BBvSetShortSlotTime(pDevice);
+ vUpdateIFS((void *) pDevice);
+ }
+
+ /* on/off barker long preamble mode */
+
+ if (uLongPreambleSTACnt > 0) {
+ if (!pDevice->bBarkerPreambleMd) {
+ MACvEnableBarkerPreambleMd(pDevice);
+ pDevice->bBarkerPreambleMd = true;
+ }
+ } else if (pDevice->bBarkerPreambleMd) {
+ MACvDisableBarkerPreambleMd(pDevice);
+ pDevice->bBarkerPreambleMd = false;
+ }
+
+ }
+
+ /* Check if any STA in PS mode, enable DTIM multicast deliver */
+ if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
+ if (uSleepySTACnt > 0)
+ pMgmt->sNodeDBTable[0].bPSEnable = true;
+ else
+ pMgmt->sNodeDBTable[0].bPSEnable = false;
+ }
+
+ pItemSSID = (PWLAN_IE_SSID) pMgmt->abyDesireSSID;
+ pCurrSSID = (PWLAN_IE_SSID) pMgmt->abyCurrSSID;
+
+ if (pMgmt->eCurrMode == WMAC_MODE_STANDBY ||
+ pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
+
+ if (pMgmt->sNodeDBTable[0].bActive) { /* Assoc with BSS */
+
+ if (pDevice->bUpdateBBVGA) {
+ s_vCheckSensitivity(pDevice);
+ s_vCheckPreEDThreshold(pDevice);
+ }
- PSbSendNullPacket(pDevice);
+ if (pMgmt->sNodeDBTable[0].uInActiveCount >=
+ (LOST_BEACON_COUNT/2) &&
+ pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) {
+ pDevice->byBBVGANew = pDevice->abyBBVGA[0];
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_CHANGE_BBSENSITIVITY,
+ NULL);
+ }
+
+ if (pMgmt->sNodeDBTable[0].uInActiveCount >=
+ LOST_BEACON_COUNT) {
+ pMgmt->sNodeDBTable[0].bActive = false;
+ pMgmt->eCurrMode = WMAC_MODE_STANDBY;
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ netif_stop_queue(pDevice->dev);
+ pDevice->bLinkPass = false;
+ ControlvMaskByte(pDevice,
+ MESSAGE_REQUEST_MACREG,
+ MAC_REG_PAPEDELAY, LEDSTS_STS,
+ LEDSTS_SLOW);
+ pDevice->bRoaming = true;
+ pDevice->bIsRoaming = false;
+
+ DBG_PRT(MSG_LEVEL_NOTICE,
+ KERN_INFO "Lost AP beacon [%d] sec, disconnected !\n",
+ pMgmt->sNodeDBTable[0].uInActiveCount);
+ /* let wpa supplicant know AP may disconnect */
+ {
+ union iwreq_data wrqu;
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n");
+ wireless_send_event(pDevice->dev,
+ SIOCGIWAP,
+ &wrqu,
+ NULL);
+ }
+ }
+ } else if (pItemSSID->len != 0) {
+ /* Davidwang */
+ if ((pDevice->bEnableRoaming == true) &&
+ (!(pMgmt->Cisco_cckm))) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "bRoaming %d, !\n",
+ pDevice->bRoaming);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "bIsRoaming %d, !\n",
+ pDevice->bIsRoaming);
+ if ((pDevice->bRoaming == true) &&
+ (pDevice->bIsRoaming == true)) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Fast Roaming ...\n");
+ BSSvClearBSSList((void *) pDevice,
+ pDevice->bLinkPass);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_SSID,
+ pMgmt->abyDesireSSID);
+ pDevice->uAutoReConnectTime = 0;
+ pDevice->uIsroamingTime = 0;
+ pDevice->bRoaming = false;
+ } else if (pDevice->bRoaming == false &&
+ pDevice->bIsRoaming == true) {
+ pDevice->uIsroamingTime++;
+ if (pDevice->uIsroamingTime >= 20)
+ pDevice->bIsRoaming = false;
+ }
+ } else if (pDevice->uAutoReConnectTime < 10) {
+ pDevice->uAutoReConnectTime++;
+ /* network manager support need not do Roaming scan??? */
+ if (pDevice->bWPASuppWextEnabled == true)
+ pDevice->uAutoReConnectTime = 0;
+ } else {
+ /* mike use old encryption status for wpa reauthen */
+ if (pDevice->bWPADEVUp)
+ pDevice->eEncryptionStatus =
+ pDevice->eOldEncryptionStatus;
+
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Roaming ...\n");
+ BSSvClearBSSList((void *) pDevice,
+ pDevice->bLinkPass);
+ pMgmt->eScanType = WMAC_SCAN_ACTIVE;
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_SSID,
+ pMgmt->abyDesireSSID);
+ pDevice->uAutoReConnectTime = 0;
+ }
+ }
+ }
+
+ if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
+ /* if adhoc started which essid is NULL string, rescanning. */
+ if (pMgmt->eCurrState == WMAC_STATE_STARTED &&
+ pCurrSSID->len == 0) {
+ if (pDevice->uAutoReConnectTime < 10) {
+ pDevice->uAutoReConnectTime++;
+ } else {
+ DBG_PRT(MSG_LEVEL_NOTICE,
+ KERN_INFO "Adhoc re-scanning ...\n");
+ pMgmt->eScanType = WMAC_SCAN_ACTIVE;
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_BSSID_SCAN, NULL);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_SSID, NULL);
+ pDevice->uAutoReConnectTime = 0;
+ }
+ }
+ if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
- pDevice->tx_trigger = false;
- pDevice->tx_data_time_out = 0;
+ if (pDevice->bUpdateBBVGA) {
+ s_vCheckSensitivity(pDevice);
+ s_vCheckPreEDThreshold(pDevice);
+ }
+ if (pMgmt->sNodeDBTable[0].uInActiveCount >=
+ ADHOC_LOST_BEACON_COUNT) {
+ DBG_PRT(MSG_LEVEL_NOTICE,
+ KERN_INFO "Lost other STA beacon [%d] sec, started !\n",
+ pMgmt->sNodeDBTable[0].uInActiveCount);
+ pMgmt->sNodeDBTable[0].uInActiveCount = 0;
+ pMgmt->eCurrState = WMAC_STATE_STARTED;
+ netif_stop_queue(pDevice->dev);
+ pDevice->bLinkPass = false;
+ ControlvMaskByte(pDevice,
+ MESSAGE_REQUEST_MACREG,
+ MAC_REG_PAPEDELAY, LEDSTS_STS,
+ LEDSTS_SLOW);
}
}
+ }
+
+ if (pDevice->bLinkPass == true) {
+ if ((pMgmt->eAuthenMode < WMAC_AUTH_WPA ||
+ pDevice->fWPA_Authened == true) &&
+ (++pDevice->tx_data_time_out > 40)) {
+ pDevice->tx_trigger = true;
+
+ PSbSendNullPacket(pDevice);
+
+ pDevice->tx_trigger = false;
+ pDevice->tx_data_time_out = 0;
+ }
if (netif_queue_stopped(pDevice->dev))
netif_wake_queue(pDevice->dev);
}
- spin_unlock_irq(&pDevice->lock);
+ spin_unlock_irq(&pDevice->lock);
schedule_delayed_work(&pDevice->second_callback_work, HZ);
}
-/*+
- *
+/*
* Routine Description:
*
*
- * Update Tx attemps, Tx failure counter in Node DB
+ * Update Tx attemps, Tx failure counter in Node DB
*
*
* Return Value:
- * none.
- *
--*/
-
-void BSSvUpdateNodeTxCounter(struct vnt_private *pDevice,
- PSStatCounter pStatistic, u8 byTSR, u8 byPktNO)
+ * none.
+ */
+void BSSvUpdateNodeTxCounter(struct vnt_private *pDevice, u8 byTSR, u8 byPktNO)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct vnt_tx_pkt_info *pkt_info = pDevice->pkt_info;
u32 uNodeIndex = 0;
u8 byTxRetry;
u16 wRate;
@@ -1174,171 +1187,174 @@ void BSSvUpdateNodeTxCounter(struct vnt_private *pDevice,
u8 byPktNum;
u16 wFIFOCtl;
- byPktNum = (byPktNO & 0x0F) >> 4;
- byTxRetry = (byTSR & 0xF0) >> 4;
- wRate = (u16) (byPktNO & 0xF0) >> 4;
- wFIFOCtl = pStatistic->abyTxPktInfo[byPktNum].wFIFOCtl;
- pbyDestAddr = (u8 *) &( pStatistic->abyTxPktInfo[byPktNum].abyDestAddr[0]);
-
- if (wFIFOCtl & FIFOCTL_AUTO_FB_0) {
- byFallBack = AUTO_FB_0;
- } else if (wFIFOCtl & FIFOCTL_AUTO_FB_1) {
- byFallBack = AUTO_FB_1;
- } else {
- byFallBack = AUTO_FB_NONE;
- }
-
- // Only Unicast using support rates
- if (wFIFOCtl & FIFOCTL_NEEDACK) {
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
- pMgmt->sNodeDBTable[0].uTxAttempts += 1;
- if ( !(byTSR & (TSR_TMO | TSR_RETRYTMO))) {
- // transmit success, TxAttempts at least plus one
- pMgmt->sNodeDBTable[0].uTxOk[MAX_RATE]++;
- if ( (byFallBack == AUTO_FB_NONE) ||
- (wRate < RATE_18M) ) {
- wFallBackRate = wRate;
- } else if (byFallBack == AUTO_FB_0) {
- if (byTxRetry < 5)
- wFallBackRate = awHWRetry0[wRate-RATE_18M][byTxRetry];
- else
- wFallBackRate = awHWRetry0[wRate-RATE_18M][4];
- } else if (byFallBack == AUTO_FB_1) {
- if (byTxRetry < 5)
- wFallBackRate = awHWRetry1[wRate-RATE_18M][byTxRetry];
- else
- wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
- }
- pMgmt->sNodeDBTable[0].uTxOk[wFallBackRate]++;
- } else {
- pMgmt->sNodeDBTable[0].uTxFailures ++;
- }
- pMgmt->sNodeDBTable[0].uTxRetry += byTxRetry;
- if (byTxRetry != 0) {
- pMgmt->sNodeDBTable[0].uTxFail[MAX_RATE]+=byTxRetry;
- if ( (byFallBack == AUTO_FB_NONE) ||
- (wRate < RATE_18M) ) {
- pMgmt->sNodeDBTable[0].uTxFail[wRate]+=byTxRetry;
- } else if (byFallBack == AUTO_FB_0) {
- for (ii = 0; ii < byTxRetry; ii++) {
- if (ii < 5)
- wFallBackRate =
- awHWRetry0[wRate-RATE_18M][ii];
- else
- wFallBackRate =
- awHWRetry0[wRate-RATE_18M][4];
- pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++;
+ byPktNum = (byPktNO & 0x0F) >> 4;
+ byTxRetry = (byTSR & 0xF0) >> 4;
+ wRate = (u16) (byPktNO & 0xF0) >> 4;
+ wFIFOCtl = pkt_info[byPktNum].fifo_ctl;
+ pbyDestAddr = pkt_info[byPktNum].dest_addr;
+
+ if (wFIFOCtl & FIFOCTL_AUTO_FB_0)
+ byFallBack = AUTO_FB_0;
+ else if (wFIFOCtl & FIFOCTL_AUTO_FB_1)
+ byFallBack = AUTO_FB_1;
+ else
+ byFallBack = AUTO_FB_NONE;
+
+ /* Only Unicast using support rates */
+ if (wFIFOCtl & FIFOCTL_NEEDACK) {
+ if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
+ pMgmt->sNodeDBTable[0].uTxAttempts += 1;
+ if (!(byTSR & (TSR_TMO | TSR_RETRYTMO))) {
+ /* transmit success, TxAttempts at least plus one */
+ pMgmt->sNodeDBTable[0].uTxOk[MAX_RATE]++;
+ if ((byFallBack == AUTO_FB_NONE) ||
+ (wRate < RATE_18M)) {
+ wFallBackRate = wRate;
+ } else if (byFallBack == AUTO_FB_0) {
+ if (byTxRetry < 5)
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][byTxRetry];
+ else
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][4];
+ } else if (byFallBack == AUTO_FB_1) {
+ if (byTxRetry < 5)
+ wFallBackRate =
+ awHWRetry1[wRate-RATE_18M][byTxRetry];
+ else
+ wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
+ }
+ pMgmt->sNodeDBTable[0].uTxOk[wFallBackRate]++;
+ } else {
+ pMgmt->sNodeDBTable[0].uTxFailures++;
}
- } else if (byFallBack == AUTO_FB_1) {
- for (ii = 0; ii < byTxRetry; ii++) {
- if (ii < 5)
- wFallBackRate =
- awHWRetry1[wRate-RATE_18M][ii];
- else
- wFallBackRate =
- awHWRetry1[wRate-RATE_18M][4];
- pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++;
+ pMgmt->sNodeDBTable[0].uTxRetry += byTxRetry;
+ if (byTxRetry != 0) {
+ pMgmt->sNodeDBTable[0].uTxFail[MAX_RATE] += byTxRetry;
+ if (byFallBack == AUTO_FB_NONE ||
+ wRate < RATE_18M) {
+ pMgmt->sNodeDBTable[0].uTxFail[wRate] += byTxRetry;
+ } else if (byFallBack == AUTO_FB_0) {
+ for (ii = 0; ii < byTxRetry; ii++) {
+ if (ii < 5)
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][ii];
+ else
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][4];
+ pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++;
+ }
+ } else if (byFallBack == AUTO_FB_1) {
+ for (ii = 0; ii < byTxRetry; ii++) {
+ if (ii < 5)
+ wFallBackRate =
+ awHWRetry1[wRate-RATE_18M][ii];
+ else
+ wFallBackRate =
+ awHWRetry1[wRate-RATE_18M][4];
+ pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++;
+ }
+ }
}
- }
- }
- }
-
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) ||
- (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) {
+ }
- if (BSSbIsSTAInNodeDB((void *) pDevice,
- pbyDestAddr,
- &uNodeIndex)) {
+ if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA ||
+ pMgmt->eCurrMode == WMAC_MODE_ESS_AP) &&
+ BSSbIsSTAInNodeDB((void *) pDevice,
+ pbyDestAddr,
+ &uNodeIndex)) {
pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts += 1;
- if ( !(byTSR & (TSR_TMO | TSR_RETRYTMO))) {
- // transmit success, TxAttempts at least plus one
- pMgmt->sNodeDBTable[uNodeIndex].uTxOk[MAX_RATE]++;
- if ( (byFallBack == AUTO_FB_NONE) ||
- (wRate < RATE_18M) ) {
- wFallBackRate = wRate;
- } else if (byFallBack == AUTO_FB_0) {
- if (byTxRetry < 5)
- wFallBackRate = awHWRetry0[wRate-RATE_18M][byTxRetry];
- else
- wFallBackRate = awHWRetry0[wRate-RATE_18M][4];
- } else if (byFallBack == AUTO_FB_1) {
- if (byTxRetry < 5)
- wFallBackRate = awHWRetry1[wRate-RATE_18M][byTxRetry];
- else
- wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
- }
- pMgmt->sNodeDBTable[uNodeIndex].uTxOk[wFallBackRate]++;
- } else {
- pMgmt->sNodeDBTable[uNodeIndex].uTxFailures ++;
- }
- pMgmt->sNodeDBTable[uNodeIndex].uTxRetry += byTxRetry;
- if (byTxRetry != 0) {
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[MAX_RATE]+=byTxRetry;
- if ( (byFallBack == AUTO_FB_NONE) ||
- (wRate < RATE_18M) ) {
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wRate]+=byTxRetry;
- } else if (byFallBack == AUTO_FB_0) {
- for (ii = 0; ii < byTxRetry; ii++) {
- if (ii < 5)
- wFallBackRate =
- awHWRetry0[wRate-RATE_18M][ii];
- else
- wFallBackRate =
- awHWRetry0[wRate-RATE_18M][4];
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++;
- }
- } else if (byFallBack == AUTO_FB_1) {
- for (ii = 0; ii < byTxRetry; ii++) {
- if (ii < 5)
- wFallBackRate = awHWRetry1[wRate-RATE_18M][ii];
- else
- wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++;
- }
- }
- }
- }
- }
- }
+ if (!(byTSR & (TSR_TMO | TSR_RETRYTMO))) {
+ /* transmit success, TxAttempts at least plus one */
+ pMgmt->sNodeDBTable[uNodeIndex].uTxOk[MAX_RATE]++;
+ if ((byFallBack == AUTO_FB_NONE) ||
+ (wRate < RATE_18M)) {
+ wFallBackRate = wRate;
+ } else if (byFallBack == AUTO_FB_0) {
+ if (byTxRetry < 5)
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][byTxRetry];
+ else
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][4];
+ } else if (byFallBack == AUTO_FB_1) {
+ if (byTxRetry < 5)
+ wFallBackRate =
+ awHWRetry1[wRate-RATE_18M][byTxRetry];
+ else
+ wFallBackRate =
+ awHWRetry1[wRate-RATE_18M][4];
+ }
+ pMgmt->sNodeDBTable[uNodeIndex].uTxOk[wFallBackRate]++;
+ } else {
+ pMgmt->sNodeDBTable[uNodeIndex].uTxFailures++;
+ }
+ pMgmt->sNodeDBTable[uNodeIndex].uTxRetry += byTxRetry;
+ if (byTxRetry != 0) {
+ pMgmt->sNodeDBTable[uNodeIndex].uTxFail[MAX_RATE] += byTxRetry;
+ if ((byFallBack == AUTO_FB_NONE) ||
+ (wRate < RATE_18M)) {
+ pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wRate] += byTxRetry;
+ } else if (byFallBack == AUTO_FB_0) {
+ for (ii = 0; ii < byTxRetry; ii++) {
+ if (ii < 5)
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][ii];
+ else
+ wFallBackRate =
+ awHWRetry0[wRate-RATE_18M][4];
+ pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++;
+ }
+ } else if (byFallBack == AUTO_FB_1) {
+ for (ii = 0; ii < byTxRetry; ii++) {
+ if (ii < 5)
+ wFallBackRate = awHWRetry1[wRate-RATE_18M][ii];
+ else
+ wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
+ pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++;
+ }
+ }
+ }
+ }
+ }
}
-/*+
- *
+/*
* Routine Description:
- * Clear Nodes & skb in DB Table
+ * Clear Nodes & skb in DB Table
*
*
* Parameters:
- * In:
- * hDeviceContext - The adapter context.
- * uStartIndex - starting index
- * Out:
- * none
+ * In:
+ * hDeviceContext - The adapter context.
+ * uStartIndex - starting index
+ * Out:
+ * none
*
* Return Value:
- * None.
- *
--*/
-
+ * None.
+ */
void BSSvClearNodeDBTable(struct vnt_private *pDevice, u32 uStartIndex)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
- struct sk_buff *skb;
+ struct sk_buff *skb;
int ii;
- for (ii = uStartIndex; ii < (MAX_NODE_NUM + 1); ii++) {
- if (pMgmt->sNodeDBTable[ii].bActive) {
- // check if sTxPSQueue has been initial
- if (pMgmt->sNodeDBTable[ii].sTxPSQueue.next != NULL) {
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL){
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "PS skb != NULL %d\n", ii);
- dev_kfree_skb(skb);
- }
- }
- memset(&pMgmt->sNodeDBTable[ii], 0, sizeof(KnownNodeDB));
- }
- }
-};
+ for (ii = uStartIndex; ii < (MAX_NODE_NUM + 1); ii++) {
+ if (pMgmt->sNodeDBTable[ii].bActive) {
+ /* check if sTxPSQueue has been initial */
+ if (pMgmt->sNodeDBTable[ii].sTxPSQueue.next) {
+ while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue))) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "PS skb != NULL %d\n",
+ ii);
+ dev_kfree_skb(skb);
+ }
+ }
+ memset(&pMgmt->sNodeDBTable[ii], 0, sizeof(KnownNodeDB));
+ }
+ }
+}
static void s_vCheckSensitivity(struct vnt_private *pDevice)
{
@@ -1346,82 +1362,87 @@ static void s_vCheckSensitivity(struct vnt_private *pDevice)
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
int ii;
- if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
- ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
- pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID);
- if (pBSSList != NULL) {
- /* Update BB register if RSSI is too strong */
- signed long LocalldBmAverage = 0;
- signed long uNumofdBm = 0;
- for (ii = 0; ii < RSSI_STAT_COUNT; ii++) {
- if (pBSSList->ldBmAverage[ii] != 0) {
- uNumofdBm ++;
- LocalldBmAverage += pBSSList->ldBmAverage[ii];
- }
- }
- if (uNumofdBm > 0) {
- LocalldBmAverage = LocalldBmAverage/uNumofdBm;
- for (ii=0;ii<BB_VGA_LEVEL;ii++) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"LocalldBmAverage:%ld, %ld %02x\n", LocalldBmAverage, pDevice->ldBmThreshold[ii], pDevice->abyBBVGA[ii]);
- if (LocalldBmAverage < pDevice->ldBmThreshold[ii]) {
- pDevice->byBBVGANew = pDevice->abyBBVGA[ii];
- break;
- }
- }
- if (pDevice->byBBVGANew != pDevice->byBBVGACurrent) {
- pDevice->uBBVGADiffCount++;
- if (pDevice->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD)
- bScheduleCommand(pDevice,
- WLAN_CMD_CHANGE_BBSENSITIVITY,
- NULL);
- } else {
- pDevice->uBBVGADiffCount = 0;
- }
- }
- }
- }
+ if (pMgmt->eCurrState == WMAC_STATE_ASSOC ||
+ (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA &&
+ pMgmt->eCurrState == WMAC_STATE_JOINTED)) {
+ pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID,
+ (PWLAN_IE_SSID) pMgmt->abyCurrSSID);
+ if (pBSSList) {
+ /* Update BB register if RSSI is too strong */
+ signed long LocalldBmAverage = 0;
+ signed long uNumofdBm = 0;
+ for (ii = 0; ii < RSSI_STAT_COUNT; ii++) {
+ if (pBSSList->ldBmAverage[ii] != 0) {
+ uNumofdBm++;
+ LocalldBmAverage += pBSSList->ldBmAverage[ii];
+ }
+ }
+ if (uNumofdBm > 0) {
+ LocalldBmAverage = LocalldBmAverage/uNumofdBm;
+ for (ii = 0; ii < BB_VGA_LEVEL; ii++) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO"LocalldBmAverage:%ld, %ld %02x\n",
+ LocalldBmAverage,
+ pDevice->ldBmThreshold[ii],
+ pDevice->abyBBVGA[ii]);
+ if (LocalldBmAverage < pDevice->ldBmThreshold[ii]) {
+ pDevice->byBBVGANew =
+ pDevice->abyBBVGA[ii];
+ break;
+ }
+ }
+ if (pDevice->byBBVGANew !=
+ pDevice->byBBVGACurrent) {
+ pDevice->uBBVGADiffCount++;
+ if (pDevice->uBBVGADiffCount >=
+ BB_VGA_CHANGE_THRESHOLD)
+ bScheduleCommand(pDevice,
+ WLAN_CMD_CHANGE_BBSENSITIVITY,
+ NULL);
+ } else {
+ pDevice->uBBVGADiffCount = 0;
+ }
+ }
+ }
+ }
}
static void s_uCalculateLinkQual(struct vnt_private *pDevice)
{
+ struct net_device_stats *stats = &pDevice->stats;
unsigned long TxOkRatio, TxCnt;
unsigned long RxOkRatio, RxCnt;
unsigned long RssiRatio;
+ unsigned long qual;
long ldBm;
-TxCnt = pDevice->scStatistic.TxNoRetryOkCount +
- pDevice->scStatistic.TxRetryOkCount +
- pDevice->scStatistic.TxFailCount;
-RxCnt = pDevice->scStatistic.RxFcsErrCnt +
- pDevice->scStatistic.RxOkCnt;
-TxOkRatio = (TxCnt < 6) ? 4000:((pDevice->scStatistic.TxNoRetryOkCount * 4000) / TxCnt);
-RxOkRatio = (RxCnt < 6) ? 2000:((pDevice->scStatistic.RxOkCnt * 2000) / RxCnt);
-//decide link quality
-if(pDevice->bLinkPass !=true)
-{
- pDevice->scStatistic.LinkQuality = 0;
- pDevice->scStatistic.SignalStren = 0;
-}
-else
-{
- RFvRSSITodBm(pDevice, (u8)(pDevice->uCurrRSSI), &ldBm);
- if(-ldBm < 50) {
- RssiRatio = 4000;
- }
- else if(-ldBm > 90) {
- RssiRatio = 0;
- }
- else {
- RssiRatio = (40-(-ldBm-50))*4000/40;
- }
- pDevice->scStatistic.SignalStren = RssiRatio/40;
- pDevice->scStatistic.LinkQuality = (RssiRatio+TxOkRatio+RxOkRatio)/100;
-}
- pDevice->scStatistic.RxFcsErrCnt = 0;
- pDevice->scStatistic.RxOkCnt = 0;
- pDevice->scStatistic.TxFailCount = 0;
- pDevice->scStatistic.TxNoRetryOkCount = 0;
- pDevice->scStatistic.TxRetryOkCount = 0;
+ TxCnt = stats->tx_packets + pDevice->wstats.discard.retries;
+
+ RxCnt = stats->rx_packets + stats->rx_frame_errors;
+
+ TxOkRatio = (TxCnt < 6) ? 4000:((stats->tx_packets * 4000) / TxCnt);
+
+ RxOkRatio = (RxCnt < 6) ? 2000 :
+ ((stats->rx_packets * 2000) / RxCnt);
+
+ /* decide link quality */
+ if (pDevice->bLinkPass != true) {
+ pDevice->wstats.qual.qual = 0;
+ } else {
+ RFvRSSITodBm(pDevice, (u8) (pDevice->uCurrRSSI), &ldBm);
+ if (-ldBm < 50)
+ RssiRatio = 4000;
+ else if (-ldBm > 90)
+ RssiRatio = 0;
+ else
+ RssiRatio = (40-(-ldBm-50)) * 4000 / 40;
+
+ qual = (RssiRatio + TxOkRatio + RxOkRatio) / 100;
+ if (qual < 100)
+ pDevice->wstats.qual.qual = (u8) qual;
+ else
+ pDevice->wstats.qual.qual = 100;
+ }
}
void BSSvClearAnyBSSJoinRecord(struct vnt_private *pDevice)
@@ -1440,13 +1461,17 @@ static void s_vCheckPreEDThreshold(struct vnt_private *pDevice)
PKnownBSS pBSSList = NULL;
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
- if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
- ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
- pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID);
- if (pBSSList != NULL) {
- pDevice->byBBPreEDRSSI = (u8) (~(pBSSList->ldBmAverRange) + 1);
- BBvUpdatePreEDThreshold(pDevice, false);
- }
- }
+ if (pMgmt->eCurrState == WMAC_STATE_ASSOC ||
+ (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA &&
+ pMgmt->eCurrState == WMAC_STATE_JOINTED)) {
+ pBSSList = BSSpAddrIsInBSSList(pDevice,
+ pMgmt->abyCurrBSSID,
+ (PWLAN_IE_SSID) pMgmt->abyCurrSSID);
+ if (pBSSList) {
+ pDevice->byBBPreEDRSSI =
+ (u8) (~(pBSSList->ldBmAverRange) + 1);
+ BBvUpdatePreEDThreshold(pDevice, false);
+ }
+ }
}
diff --git a/drivers/staging/vt6656/bssdb.h b/drivers/staging/vt6656/bssdb.h
index fc418555bc4d..8df3fb2a6199 100644
--- a/drivers/staging/vt6656/bssdb.h
+++ b/drivers/staging/vt6656/bssdb.h
@@ -34,7 +34,6 @@
#include "80211hdr.h"
#include "80211mgr.h"
#include "card.h"
-#include "mib.h"
#define MAX_NODE_NUM 64
#define MAX_BSS_NUM 42
@@ -264,8 +263,7 @@ void BSSvUpdateAPNode(struct vnt_private *, u16 *pwCapInfo,
void BSSvSecondCallBack(struct work_struct *work);
-void BSSvUpdateNodeTxCounter(struct vnt_private *, PSStatCounter pStatistic,
- u8 byTSR, u8 byPktNO);
+void BSSvUpdateNodeTxCounter(struct vnt_private *, u8 byTSR, u8 byPktNO);
void BSSvRemoveOneNode(struct vnt_private *, u32 uNodeIndex);
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index 19d3cf451b88..0d8772858f09 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -66,7 +66,7 @@ static int msglevel =MSG_LEVEL_INFO;
//const u16 cwRXBCNTSFOff[MAX_RATE] =
//{17, 34, 96, 192, 34, 23, 17, 11, 8, 5, 4, 3};
-const u16 cwRXBCNTSFOff[MAX_RATE] =
+static const u16 cwRXBCNTSFOff[MAX_RATE] =
{192, 96, 34, 17, 34, 23, 17, 11, 8, 5, 4, 3};
/*
@@ -75,52 +75,48 @@ const u16 cwRXBCNTSFOff[MAX_RATE] =
* Parameters:
* In:
* pDevice - The adapter to be set
- * uConnectionChannel - Channel to be set
+ * connection_channel - Channel to be set
* Out:
* none
*/
-void CARDbSetMediaChannel(struct vnt_private *pDevice, u32 uConnectionChannel)
+void CARDbSetMediaChannel(struct vnt_private *priv, u32 connection_channel)
{
- if (pDevice->byBBType == BB_TYPE_11A) { // 15 ~ 38
- if ((uConnectionChannel < (CB_MAX_CHANNEL_24G+1)) || (uConnectionChannel > CB_MAX_CHANNEL))
- uConnectionChannel = (CB_MAX_CHANNEL_24G+1);
- } else {
- if ((uConnectionChannel > CB_MAX_CHANNEL_24G) || (uConnectionChannel == 0)) // 1 ~ 14
- uConnectionChannel = 1;
- }
-
- // clear NAV
- MACvRegBitsOn(pDevice, MAC_REG_MACCR, MACCR_CLRNAV);
-
- // Set Channel[7] = 0 to tell H/W channel is changing now.
- MACvRegBitsOff(pDevice, MAC_REG_CHANNEL, 0x80);
-
- //if (pMgmt->uCurrChannel == uConnectionChannel)
- // return bResult;
-
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_SELECT_CHANNLE,
- (u16) uConnectionChannel,
- 0,
- 0,
- NULL
- );
+ if (priv->byBBType == BB_TYPE_11A) {
+ if ((connection_channel < (CB_MAX_CHANNEL_24G + 1)) ||
+ (connection_channel > CB_MAX_CHANNEL))
+ connection_channel = (CB_MAX_CHANNEL_24G + 1);
+ } else {
+ if ((connection_channel > CB_MAX_CHANNEL_24G) ||
+ (connection_channel == 0))
+ connection_channel = 1;
+ }
- //{{ RobertYu: 20041202
- //// TX_PE will reserve 3 us for MAX2829 A mode only, it is for better TX throughput
+ /* clear NAV */
+ MACvRegBitsOn(priv, MAC_REG_MACCR, MACCR_CLRNAV);
+
+ /* Set Channel[7] = 0 to tell H/W channel is changing now. */
+ MACvRegBitsOff(priv, MAC_REG_CHANNEL, 0xb0);
+
+ CONTROLnsRequestOut(priv, MESSAGE_TYPE_SELECT_CHANNLE,
+ connection_channel, 0, 0, NULL);
+
+ if (priv->byBBType == BB_TYPE_11A) {
+ priv->byCurPwr = 0xff;
+ RFbRawSetPower(priv,
+ priv->abyOFDMAPwrTbl[connection_channel-15], RATE_54M);
+ } else if (priv->byBBType == BB_TYPE_11G) {
+ priv->byCurPwr = 0xff;
+ RFbRawSetPower(priv,
+ priv->abyOFDMPwrTbl[connection_channel-1], RATE_54M);
+ } else {
+ priv->byCurPwr = 0xff;
+ RFbRawSetPower(priv,
+ priv->abyCCKPwrTbl[connection_channel-1], RATE_1M);
+ }
- if (pDevice->byBBType == BB_TYPE_11A) {
- pDevice->byCurPwr = 0xFF;
- RFbRawSetPower(pDevice, pDevice->abyOFDMAPwrTbl[uConnectionChannel-15], RATE_54M);
- } else if (pDevice->byBBType == BB_TYPE_11G) {
- pDevice->byCurPwr = 0xFF;
- RFbRawSetPower(pDevice, pDevice->abyOFDMPwrTbl[uConnectionChannel-1], RATE_54M);
- } else {
- pDevice->byCurPwr = 0xFF;
- RFbRawSetPower(pDevice, pDevice->abyCCKPwrTbl[uConnectionChannel-1], RATE_1M);
- }
- ControlvWriteByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_CHANNEL,(u8)(uConnectionChannel|0x80));
+ ControlvWriteByte(priv, MESSAGE_REQUEST_MACREG, MAC_REG_CHANNEL,
+ (u8)(connection_channel|0x80));
}
/*
@@ -205,7 +201,7 @@ static u16 swGetOFDMControlRate(struct vnt_private *pDevice, u16 wRateIdx)
* Return Value: none
*
*/
-void
+static void
CARDvCalculateOFDMRParameter (
u16 wRate,
u8 byBBType,
@@ -724,28 +720,20 @@ bool CARDbClearCurrentTSF(struct vnt_private *pDevice)
*/
u64 CARDqGetNextTBTT(u64 qwTSF, u16 wBeaconInterval)
{
+ u32 uBeaconInterval;
- unsigned int uLowNextTBTT;
- unsigned int uHighRemain, uLowRemain;
- unsigned int uBeaconInterval;
-
- uBeaconInterval = wBeaconInterval * 1024;
- // Next TBTT = ((local_current_TSF / beacon_interval) + 1 ) * beacon_interval
- uLowNextTBTT = ((qwTSF & 0xffffffffU) >> 10) << 10;
- uLowRemain = (uLowNextTBTT) % uBeaconInterval;
- uHighRemain = ((0x80000000 % uBeaconInterval) * 2 * (u32)(qwTSF >> 32))
- % uBeaconInterval;
- uLowRemain = (uHighRemain + uLowRemain) % uBeaconInterval;
- uLowRemain = uBeaconInterval - uLowRemain;
+ uBeaconInterval = wBeaconInterval * 1024;
- // check if carry when add one beacon interval
- if ((~uLowNextTBTT) < uLowRemain)
- qwTSF = ((qwTSF >> 32) + 1) << 32;
-
- qwTSF = (qwTSF & 0xffffffff00000000ULL) |
- (u64)(uLowNextTBTT + uLowRemain);
+ /* Next TBTT =
+ * ((local_current_TSF / beacon_interval) + 1) * beacon_interval
+ */
+ if (uBeaconInterval) {
+ do_div(qwTSF, uBeaconInterval);
+ qwTSF += 1;
+ qwTSF *= uBeaconInterval;
+ }
- return (qwTSF);
+ return qwTSF;
}
/*
diff --git a/drivers/staging/vt6656/channel.c b/drivers/staging/vt6656/channel.c
index e430b35463b6..5a4fa0e2581b 100644
--- a/drivers/staging/vt6656/channel.c
+++ b/drivers/staging/vt6656/channel.c
@@ -423,8 +423,7 @@ void CHvInitChannelTable(struct vnt_private *pDevice)
break;
}
- if ((pDevice->dwDiagRefCount != 0) ||
- (pDevice->b11hEable == true)) {
+ if (pDevice->b11hEable == true) {
if (bMultiBand == true) {
for (ii = 0; ii < CB_MAX_CHANNEL; ii++) {
sChannelTbl[ii+1].bValid = true;
diff --git a/drivers/staging/vt6656/datarate.c b/drivers/staging/vt6656/datarate.c
index af9eab0c00a3..547db6f0c53f 100644
--- a/drivers/staging/vt6656/datarate.c
+++ b/drivers/staging/vt6656/datarate.c
@@ -45,7 +45,7 @@
/* static int msglevel = MSG_LEVEL_DEBUG; */
static int msglevel = MSG_LEVEL_INFO;
-const u8 acbyIERate[MAX_RATE] = {0x02, 0x04, 0x0B, 0x16, 0x0C, 0x12, 0x18,
+static const u8 acbyIERate[MAX_RATE] = {0x02, 0x04, 0x0B, 0x16, 0x0C, 0x12, 0x18,
0x24, 0x30, 0x48, 0x60, 0x6C};
#define AUTORATE_TXOK_CNT 0x0400
diff --git a/drivers/staging/vt6656/datarate.h b/drivers/staging/vt6656/datarate.h
index 43cb77894b66..96252adf1ea6 100644
--- a/drivers/staging/vt6656/datarate.h
+++ b/drivers/staging/vt6656/datarate.h
@@ -52,7 +52,6 @@
#define RATE_48M 10
#define RATE_54M 11
#define RATE_AUTO 12
-#define MAX_RATE 12
void RATEvParseMaxRate(struct vnt_private *, PWLAN_IE_SUPP_RATES pItemRates,
PWLAN_IE_SUPP_RATES pItemExtRates, int bUpdateBasicRate,
diff --git a/drivers/staging/vt6656/desc.h b/drivers/staging/vt6656/desc.h
index afe7074c3037..7c6dd5f52295 100644
--- a/drivers/staging/vt6656/desc.h
+++ b/drivers/staging/vt6656/desc.h
@@ -146,14 +146,6 @@
/*
* TX FIFO header
*/
-
-typedef struct tagSTxShortBufHead {
- u16 wFIFOCtl;
- u16 wTimeStamp;
-} __attribute__ ((__packed__))
-STxShortBufHead, *PSTxShortBufHead;
-typedef const STxShortBufHead *PCSTxShortBufHead;
-
typedef struct tagSBEACONCtl {
u32 BufReady:1;
u32 TSF:15;
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index 62b7de19b371..1f422574c3d8 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -32,7 +32,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/firmware.h>
@@ -44,6 +43,7 @@
#include <net/cfg80211.h>
#include <linux/timer.h>
#include <linux/usb.h>
+#include <linux/crc32.h>
#ifdef SIOCETHTOOL
#define DEVICE_ETHTOOL_IOCTL_SUPPORT
@@ -69,12 +69,12 @@
#include "tether.h"
#include "wmgr.h"
#include "wcmd.h"
-#include "mib.h"
#include "srom.h"
#include "rc4.h"
#include "desc.h"
#include "key.h"
#include "card.h"
+#include "rndis.h"
#define VNT_USB_VENDOR_ID 0x160a
#define VNT_USB_PRODUCT_ID 0x3184
@@ -149,11 +149,9 @@ typedef enum __device_msg_level {
MSG_LEVEL_DEBUG = 4 /* Only for debug purpose. */
} DEVICE_MSG_LEVEL, *PDEVICE_MSG_LEVEL;
-typedef enum __device_init_type {
- DEVICE_INIT_COLD = 0, /* cold init */
- DEVICE_INIT_RESET, /* reset init or Dx to D0 power remain */
- DEVICE_INIT_DXPL /* Dx to D0 power lost init */
-} DEVICE_INIT_TYPE, *PDEVICE_INIT_TYPE;
+#define DEVICE_INIT_COLD 0x0 /* cold init */
+#define DEVICE_INIT_RESET 0x1 /* reset init or Dx to D0 power remain */
+#define DEVICE_INIT_DXPL 0x2 /* Dx to D0 power lost init */
/* USB */
@@ -189,6 +187,12 @@ struct vnt_usb_send_context {
unsigned char Data[MAX_TOTAL_SIZE_WITH_ALL_HEADERS];
};
+/* tx packet info for rxtx */
+struct vnt_tx_pkt_info {
+ u16 fifo_ctl;
+ u8 dest_addr[ETH_ALEN];
+};
+
/* structure got from configuration file as user-desired default settings */
typedef struct _DEFAULT_CONFIG {
signed int ZoneType;
@@ -430,6 +434,7 @@ struct vnt_private {
/* Variables to track resources for the BULK Out Pipe */
struct vnt_usb_send_context *apTD[CB_MAX_TX_DESC];
u32 cbTD;
+ struct vnt_tx_pkt_info pkt_info[16];
/* Variables to track resources for the Interrupt In Pipe */
INT_BUFFER intBuf;
@@ -467,16 +472,13 @@ struct vnt_private {
u8 byOriginalZonetype;
int bLinkPass; /* link status: OK or fail */
+ struct vnt_cmd_card_init init_command;
+ struct vnt_rsp_card_init init_response;
u8 abyCurrentNetAddr[ETH_ALEN];
u8 abyPermanentNetAddr[ETH_ALEN];
int bExistSWNetAddr;
- /* Adapter statistics */
- SStatCounter scStatistic;
- /* 802.11 counter */
- SDot11Counters s802_11Counter;
-
/* Maintain statistical debug info. */
unsigned long packetsReceived;
unsigned long packetsReceivedDropped;
@@ -596,7 +598,6 @@ struct vnt_private {
int bCCK;
int bEncryptionEnable;
- int bLongHeader;
int bShortSlotTime;
int bProtectMode;
int bNonERPPresent;
@@ -666,8 +667,6 @@ struct vnt_private {
u8 abyPRNG[WLAN_WEPMAX_KEYLEN+3];
u8 byKeyIndex;
- int bAES;
-
u32 uKeyLength;
u8 abyKey[WLAN_WEP232_KEYLEN];
@@ -695,7 +694,6 @@ struct vnt_private {
u8 byBBPreEDIndex;
int bRadioCmd;
- u32 dwDiagRefCount;
/* For FOE Tuning */
u8 byFOETuning;
diff --git a/drivers/staging/vt6656/device_cfg.h b/drivers/staging/vt6656/device_cfg.h
index a97f7bb13db8..0b9d8349c2e4 100644
--- a/drivers/staging/vt6656/device_cfg.h
+++ b/drivers/staging/vt6656/device_cfg.h
@@ -65,6 +65,8 @@ struct _version {
#define DEVICE_VERSION "1.19_12"
#endif
+#define MAX_RATE 12
+
/* config file */
#include <linux/fs.h>
#include <linux/fcntl.h>
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index 75dc92d64056..eca04c0c1d97 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -38,6 +38,7 @@
*
*/
+#include "dpc.h"
#include "device.h"
#include "rxtx.h"
#include "tether.h"
@@ -59,7 +60,7 @@
//static int msglevel =MSG_LEVEL_DEBUG;
static int msglevel =MSG_LEVEL_INFO;
-const u8 acbyRxRate[MAX_RATE] =
+static const u8 acbyRxRate[MAX_RATE] =
{2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108};
static u8 s_byGetRateIdx(u8 byRate);
@@ -291,12 +292,14 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
if (BytesToIndicate != FrameSize) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"------- WRONG Length 1\n");
+ pStats->rx_frame_errors++;
return false;
}
if ((BytesToIndicate > 2372) || (BytesToIndicate <= 40)) {
// Frame Size error drop this packet.
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---------- WRONG Length 2\n");
+ pStats->rx_frame_errors++;
return false;
}
@@ -314,6 +317,7 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
(BytesToIndicate < (*pwPLCP_Length)) ) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Wrong PLCP Length %x\n", (int) *pwPLCP_Length);
+ pStats->rx_frame_errors++;
return false;
}
for ( ii=RATE_1M;ii<MAX_RATE;ii++) {
@@ -344,16 +348,6 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
FrameSize = *pwPLCP_Length;
pbyFrame = pbyDAddress + 8;
- // update receive statistic counter
-
- STAvUpdateRDStatCounter(&pDevice->scStatistic,
- *pbyRsr,
- *pbyNewRsr,
- *pbyRxSts,
- *pbyRxRate,
- pbyFrame,
- FrameSize
- );
pMACHeader = (struct ieee80211_hdr *) pbyFrame;
@@ -370,7 +364,6 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
if (!is_multicast_ether_addr(pMACHeader->addr1)) {
if (WCTLbIsDuplicate(&(pDevice->sDupRxCache), (struct ieee80211_hdr *) pbyFrame)) {
- pDevice->s802_11Counter.FrameDuplicateCount++;
return false;
}
@@ -450,14 +443,6 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
(pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) ||
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) {
-
- if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_TKIP)) {
- pDevice->s802_11Counter.TKIPICVErrors++;
- } else if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_CCMP)) {
- pDevice->s802_11Counter.CCMPDecryptErrors++;
- } else if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_WEP)) {
-// pDevice->s802_11Counter.WEPICVErrorCount.QuadPart++;
- }
}
return false;
}
@@ -482,7 +467,6 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
) {
// defragment
bDeFragRx = WCTLbHandleFragment(pDevice, (struct ieee80211_hdr *) (pbyFrame), FrameSize, bIsWEP, bExtIV);
- pDevice->s802_11Counter.ReceivedFragmentCount++;
if (bDeFragRx) {
// defrag complete
// TODO skb, pbyFrame
@@ -760,8 +744,6 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
(pDevice->bRxMICFail == true)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC comparison is fail!\n");
pDevice->bRxMICFail = false;
- //pDevice->s802_11Counter.TKIPLocalMICFailures.QuadPart++;
- pDevice->s802_11Counter.TKIPLocalMICFailures++;
if (bDeFragRx) {
if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s: can not alloc more frag bufs\n",
@@ -824,12 +806,6 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
(dwRxTSC47_16 <= dwLocalTSC47_16) &&
!((dwRxTSC47_16 == 0) && (dwLocalTSC47_16 == 0xFFFFFFFF))) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"TSC is illegal~~!\n ");
- if (pKey->byCipherSuite == KEY_CTL_TKIP)
- //pDevice->s802_11Counter.TKIPReplays.QuadPart++;
- pDevice->s802_11Counter.TKIPReplays++;
- else
- //pDevice->s802_11Counter.CCMPReplays.QuadPart++;
- pDevice->s802_11Counter.CCMPReplays++;
if (bDeFragRx) {
if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
@@ -1061,19 +1037,9 @@ static int s_bHandleRxEncryption(struct vnt_private *pDevice, u8 *pbyFrame,
if (pKey == NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey == NULL\n");
- if (byDecMode == KEY_CTL_WEP) {
-// pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++;
- } else if (pDevice->bLinkPass == true) {
-// pDevice->s802_11Counter.DecryptFailureCount.QuadPart++;
- }
return false;
}
if (byDecMode != pKey->byCipherSuite) {
- if (byDecMode == KEY_CTL_WEP) {
-// pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++;
- } else if (pDevice->bLinkPass == true) {
-// pDevice->s802_11Counter.DecryptFailureCount.QuadPart++;
- }
*pKeyOut = NULL;
return false;
}
@@ -1164,11 +1130,6 @@ static int s_bHostWepRxEncryption(struct vnt_private *pDevice, u8 *pbyFrame,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"AES:%d %d %d\n", pMgmt->byCSSPK, pMgmt->byCSSGK, byDecMode);
if (byDecMode != pKey->byCipherSuite) {
- if (byDecMode == KEY_CTL_WEP) {
-// pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++;
- } else if (pDevice->bLinkPass == true) {
-// pDevice->s802_11Counter.DecryptFailureCount.QuadPart++;
- }
return false;
}
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index a2b4ba6d4f01..e0e93869a681 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -33,7 +33,6 @@
*/
#include "int.h"
-#include "mib.h"
#include "tmacro.h"
#include "mac.h"
#include "power.h"
@@ -86,45 +85,46 @@ void INTnsProcessData(struct vnt_private *pDevice)
pINTData = (PSINTData) pDevice->intBuf.pDataBuf;
if (pINTData->byTSR0 & TSR_VALID) {
- STAvUpdateTDStatCounter(&(pDevice->scStatistic),
- (u8)(pINTData->byPkt0 & 0x0F),
- (u8)(pINTData->byPkt0>>4),
- pINTData->byTSR0);
+ if (pINTData->byTSR0 & (TSR_TMO | TSR_RETRYTMO))
+ pDevice->wstats.discard.retries++;
+ else
+ pStats->tx_packets++;
+
BSSvUpdateNodeTxCounter(pDevice,
- &(pDevice->scStatistic),
pINTData->byTSR0,
pINTData->byPkt0);
/*DBG_PRN_GRP01(("TSR0 %02x\n", pINTData->byTSR0));*/
}
if (pINTData->byTSR1 & TSR_VALID) {
- STAvUpdateTDStatCounter(&(pDevice->scStatistic),
- (u8)(pINTData->byPkt1 & 0x0F),
- (u8)(pINTData->byPkt1>>4),
- pINTData->byTSR1);
+ if (pINTData->byTSR1 & (TSR_TMO | TSR_RETRYTMO))
+ pDevice->wstats.discard.retries++;
+ else
+ pStats->tx_packets++;
+
+
BSSvUpdateNodeTxCounter(pDevice,
- &(pDevice->scStatistic),
pINTData->byTSR1,
pINTData->byPkt1);
/*DBG_PRN_GRP01(("TSR1 %02x\n", pINTData->byTSR1));*/
}
if (pINTData->byTSR2 & TSR_VALID) {
- STAvUpdateTDStatCounter(&(pDevice->scStatistic),
- (u8)(pINTData->byPkt2 & 0x0F),
- (u8)(pINTData->byPkt2>>4),
- pINTData->byTSR2);
+ if (pINTData->byTSR2 & (TSR_TMO | TSR_RETRYTMO))
+ pDevice->wstats.discard.retries++;
+ else
+ pStats->tx_packets++;
+
BSSvUpdateNodeTxCounter(pDevice,
- &(pDevice->scStatistic),
pINTData->byTSR2,
pINTData->byPkt2);
/*DBG_PRN_GRP01(("TSR2 %02x\n", pINTData->byTSR2));*/
}
if (pINTData->byTSR3 & TSR_VALID) {
- STAvUpdateTDStatCounter(&(pDevice->scStatistic),
- (u8)(pINTData->byPkt3 & 0x0F),
- (u8)(pINTData->byPkt3>>4),
- pINTData->byTSR3);
+ if (pINTData->byTSR3 & (TSR_TMO | TSR_RETRYTMO))
+ pDevice->wstats.discard.retries++;
+ else
+ pStats->tx_packets++;
+
BSSvUpdateNodeTxCounter(pDevice,
- &(pDevice->scStatistic),
pINTData->byTSR3,
pINTData->byPkt3);
/*DBG_PRN_GRP01(("TSR3 %02x\n", pINTData->byTSR3));*/
@@ -174,16 +174,6 @@ void INTnsProcessData(struct vnt_private *pDevice)
pINTData->byISR0,
pINTData->dwLoTSF,
pINTData->dwHiTSF)); */
-
- STAvUpdate802_11Counter(&pDevice->s802_11Counter,
- &pDevice->scStatistic,
- pINTData->byRTSSuccess,
- pINTData->byRTSFail,
- pINTData->byACKFail,
- pINTData->byFCSErr);
- STAvUpdateIsrStatCounter(&pDevice->scStatistic,
- pINTData->byISR0,
- pINTData->byISR1);
}
if (pINTData->byISR1 != 0)
if (pINTData->byISR1 & ISR_GPIO3)
@@ -193,10 +183,6 @@ void INTnsProcessData(struct vnt_private *pDevice)
pDevice->intBuf.uDataLen = 0;
pDevice->intBuf.bInUse = false;
- pStats->tx_packets = pDevice->scStatistic.ullTsrOK;
- pStats->tx_bytes = pDevice->scStatistic.ullTxDirectedBytes +
- pDevice->scStatistic.ullTxMulticastBytes +
- pDevice->scStatistic.ullTxBroadcastBytes;
- pStats->tx_errors = pDevice->scStatistic.dwTsrErr;
- pStats->tx_dropped = pDevice->scStatistic.dwTsrErr;
+ pStats->tx_errors = pDevice->wstats.discard.retries;
+ pStats->tx_dropped = pDevice->wstats.discard.retries;
}
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index 63917abbbd00..3a68dfa23d84 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -58,9 +58,6 @@ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
long ldBm;
pDevice->wstats.status = pDevice->eOPMode;
- if (pDevice->scStatistic.LinkQuality > 100)
- pDevice->scStatistic.LinkQuality = 100;
- pDevice->wstats.qual.qual = (u8)pDevice->scStatistic.LinkQuality;
RFvRSSITodBm(pDevice, (u8)(pDevice->uCurrRSSI), &ldBm);
pDevice->wstats.qual.level = ldBm;
pDevice->wstats.qual.noise = 0;
@@ -68,7 +65,6 @@ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
pDevice->wstats.discard.nwid = 0;
pDevice->wstats.discard.code = 0;
pDevice->wstats.discard.fragment = 0;
- pDevice->wstats.discard.retries = pDevice->scStatistic.dwTsrErr;
pDevice->wstats.discard.misc = 0;
pDevice->wstats.miss.beacon = 0;
return &pDevice->wstats;
@@ -1568,10 +1564,8 @@ int iwctl_siwgenie(struct net_device *dev, struct iw_request_info *info,
goto out;
}
memset(pMgmt->abyWPAIE, 0, MAX_WPA_IE_LEN);
- if (copy_from_user(pMgmt->abyWPAIE, extra, wrq->length)) {
- ret = -EFAULT;
- goto out;
- }
+
+ memcpy(pMgmt->abyWPAIE, extra, wrq->length);
pMgmt->wWPAIELen = wrq->length;
} else {
memset(pMgmt->abyWPAIE, 0, MAX_WPA_IE_LEN);
@@ -1597,13 +1591,11 @@ int iwctl_giwgenie(struct net_device *dev, struct iw_request_info *info,
wrq->length = 0;
if (pMgmt->wWPAIELen > 0) {
wrq->length = pMgmt->wWPAIELen;
- if (pMgmt->wWPAIELen <= space) {
- if (copy_to_user(extra, pMgmt->abyWPAIE, pMgmt->wWPAIELen)) {
- ret = -EFAULT;
- }
- } else {
+
+ if (pMgmt->wWPAIELen <= space)
+ memcpy(extra, pMgmt->abyWPAIE, pMgmt->wWPAIELen);
+ else
ret = -E2BIG;
- }
}
return ret;
}
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index aae228c533ef..58edcae74efc 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -67,7 +67,6 @@
#include "datarate.h"
#include "rf.h"
#include "firmware.h"
-#include "rndis.h"
#include "control.h"
#include "channel.h"
#include "int.h"
@@ -215,13 +214,12 @@ static void device_set_multi(struct net_device *dev);
static int device_close(struct net_device *dev);
static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static int device_init_registers(struct vnt_private *pDevice,
- DEVICE_INIT_TYPE InitType);
+static int device_init_registers(struct vnt_private *pDevice);
static bool device_init_defrag_cb(struct vnt_private *pDevice);
static void device_init_diversity_timer(struct vnt_private *pDevice);
static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev);
-static int ethtool_ioctl(struct net_device *dev, void *useraddr);
+static int ethtool_ioctl(struct net_device *dev, struct ifreq *);
static void device_free_tx_bufs(struct vnt_private *pDevice);
static void device_free_rx_bufs(struct vnt_private *pDevice);
static void device_free_int_bufs(struct vnt_private *pDevice);
@@ -296,343 +294,352 @@ static void device_init_diversity_timer(struct vnt_private *pDevice)
/*
* initialization of MAC & BBP registers
*/
-
-static int device_init_registers(struct vnt_private *pDevice,
- DEVICE_INIT_TYPE InitType)
+static int device_init_registers(struct vnt_private *pDevice)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct vnt_cmd_card_init *init_cmd = &pDevice->init_command;
+ struct vnt_rsp_card_init *init_rsp = &pDevice->init_response;
u8 abyBroadcastAddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u8 abySNAP_RFC1042[ETH_ALEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
u8 abySNAP_Bridgetunnel[ETH_ALEN]
= {0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8};
u8 byAntenna;
int ii;
- CMD_CARD_INIT sInitCmd;
int ntStatus = STATUS_SUCCESS;
- RSP_CARD_INIT sInitRsp;
u8 byTmp;
u8 byCalibTXIQ = 0, byCalibTXDC = 0, byCalibRXIQ = 0;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---->INIbInitAdapter. [%d][%d]\n", InitType, pDevice->byPacketType);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---->INIbInitAdapter. [%d][%d]\n",
+ DEVICE_INIT_COLD, pDevice->byPacketType);
+
spin_lock_irq(&pDevice->lock);
- if (InitType == DEVICE_INIT_COLD) {
- memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, ETH_ALEN);
- memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, ETH_ALEN);
- memcpy(pDevice->abySNAP_Bridgetunnel,
- abySNAP_Bridgetunnel,
- ETH_ALEN);
-
- if ( !FIRMWAREbCheckVersion(pDevice) ) {
- if (FIRMWAREbDownload(pDevice) == true) {
- if (FIRMWAREbBrach2Sram(pDevice) == false) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" FIRMWAREbBrach2Sram fail \n");
- spin_unlock_irq(&pDevice->lock);
- return false;
- }
- } else {
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" FIRMWAREbDownload fail \n");
- spin_unlock_irq(&pDevice->lock);
- return false;
- }
- }
- if ( !BBbVT3184Init(pDevice) ) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" BBbVT3184Init fail \n");
- spin_unlock_irq(&pDevice->lock);
- return false;
- }
- }
+ memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, ETH_ALEN);
+ memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, ETH_ALEN);
+ memcpy(pDevice->abySNAP_Bridgetunnel, abySNAP_Bridgetunnel, ETH_ALEN);
+
+ if (!FIRMWAREbCheckVersion(pDevice)) {
+ if (FIRMWAREbDownload(pDevice) == true) {
+ if (FIRMWAREbBrach2Sram(pDevice) == false) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ " FIRMWAREbBrach2Sram fail\n");
+ spin_unlock_irq(&pDevice->lock);
+ return false;
+ }
+ } else {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ " FIRMWAREbDownload fail\n");
+ spin_unlock_irq(&pDevice->lock);
+ return false;
+ }
+ }
- sInitCmd.byInitClass = (u8)InitType;
- sInitCmd.bExistSWNetAddr = (u8) pDevice->bExistSWNetAddr;
- for (ii = 0; ii < 6; ii++)
- sInitCmd.bySWNetAddr[ii] = pDevice->abyCurrentNetAddr[ii];
- sInitCmd.byShortRetryLimit = pDevice->byShortRetryLimit;
- sInitCmd.byLongRetryLimit = pDevice->byLongRetryLimit;
-
- /* issue card_init command to device */
- ntStatus = CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_CARDINIT,
- 0,
- 0,
- sizeof(CMD_CARD_INIT),
- (u8 *) &(sInitCmd));
-
- if ( ntStatus != STATUS_SUCCESS ) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Issue Card init fail \n");
- spin_unlock_irq(&pDevice->lock);
- return false;
- }
- if (InitType == DEVICE_INIT_COLD) {
+ if (!BBbVT3184Init(pDevice)) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" BBbVT3184Init fail\n");
+ spin_unlock_irq(&pDevice->lock);
+ return false;
+ }
- ntStatus = CONTROLnsRequestIn(pDevice,MESSAGE_TYPE_INIT_RSP,0,0,sizeof(RSP_CARD_INIT), (u8 *) &(sInitRsp));
+ init_cmd->init_class = DEVICE_INIT_COLD;
+ init_cmd->exist_sw_net_addr = (u8) pDevice->bExistSWNetAddr;
+ for (ii = 0; ii < 6; ii++)
+ init_cmd->sw_net_addr[ii] = pDevice->abyCurrentNetAddr[ii];
+ init_cmd->short_retry_limit = pDevice->byShortRetryLimit;
+ init_cmd->long_retry_limit = pDevice->byLongRetryLimit;
+
+ /* issue card_init command to device */
+ ntStatus = CONTROLnsRequestOut(pDevice,
+ MESSAGE_TYPE_CARDINIT, 0, 0,
+ sizeof(struct vnt_cmd_card_init), (u8 *)init_cmd);
+ if (ntStatus != STATUS_SUCCESS) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Issue Card init fail\n");
+ spin_unlock_irq(&pDevice->lock);
+ return false;
+ }
- if (ntStatus != STATUS_SUCCESS) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Cardinit request in status fail!\n");
- spin_unlock_irq(&pDevice->lock);
- return false;
- }
+ ntStatus = CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_INIT_RSP, 0, 0,
+ sizeof(struct vnt_rsp_card_init), (u8 *)init_rsp);
+ if (ntStatus != STATUS_SUCCESS) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Cardinit request in status fail!\n");
+ spin_unlock_irq(&pDevice->lock);
+ return false;
+ }
/* local ID for AES functions */
- ntStatus = CONTROLnsRequestIn(pDevice,
- MESSAGE_TYPE_READ,
- MAC_REG_LOCALID,
- MESSAGE_REQUEST_MACREG,
- 1,
- &pDevice->byLocalID);
-
- if ( ntStatus != STATUS_SUCCESS ) {
- spin_unlock_irq(&pDevice->lock);
- return false;
- }
+ ntStatus = CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ,
+ MAC_REG_LOCALID, MESSAGE_REQUEST_MACREG, 1,
+ &pDevice->byLocalID);
+ if (ntStatus != STATUS_SUCCESS) {
+ spin_unlock_irq(&pDevice->lock);
+ return false;
+ }
/* do MACbSoftwareReset in MACvInitialize */
/* force CCK */
- pDevice->bCCK = true;
+ pDevice->bCCK = true;
pDevice->bProtectMode = false;
/* only used in 11g type, sync with ERP IE */
- pDevice->bNonERPPresent = false;
- pDevice->bBarkerPreambleMd = false;
- if ( pDevice->bFixRate ) {
- pDevice->wCurrentRate = (u16) pDevice->uConnectionRate;
- } else {
- if ( pDevice->byBBType == BB_TYPE_11B )
- pDevice->wCurrentRate = RATE_11M;
- else
- pDevice->wCurrentRate = RATE_54M;
- }
+ pDevice->bNonERPPresent = false;
+ pDevice->bBarkerPreambleMd = false;
+ if (pDevice->bFixRate) {
+ pDevice->wCurrentRate = (u16)pDevice->uConnectionRate;
+ } else {
+ if (pDevice->byBBType == BB_TYPE_11B)
+ pDevice->wCurrentRate = RATE_11M;
+ else
+ pDevice->wCurrentRate = RATE_54M;
+ }
- CHvInitChannelTable(pDevice);
+ CHvInitChannelTable(pDevice);
- pDevice->byTopOFDMBasicRate = RATE_24M;
- pDevice->byTopCCKBasicRate = RATE_1M;
+ pDevice->byTopOFDMBasicRate = RATE_24M;
+ pDevice->byTopCCKBasicRate = RATE_1M;
pDevice->byRevId = 0;
/* target to IF pin while programming to RF chip */
- pDevice->byCurPwr = 0xFF;
+ pDevice->byCurPwr = 0xFF;
- pDevice->byCCKPwr = pDevice->abyEEPROM[EEP_OFS_PWR_CCK];
- pDevice->byOFDMPwrG = pDevice->abyEEPROM[EEP_OFS_PWR_OFDMG];
+ pDevice->byCCKPwr = pDevice->abyEEPROM[EEP_OFS_PWR_CCK];
+ pDevice->byOFDMPwrG = pDevice->abyEEPROM[EEP_OFS_PWR_OFDMG];
/* load power table */
for (ii = 0; ii < 14; ii++) {
- pDevice->abyCCKPwrTbl[ii] = pDevice->abyEEPROM[ii + EEP_OFS_CCK_PWR_TBL];
- if (pDevice->abyCCKPwrTbl[ii] == 0)
- pDevice->abyCCKPwrTbl[ii] = pDevice->byCCKPwr;
- pDevice->abyOFDMPwrTbl[ii] = pDevice->abyEEPROM[ii + EEP_OFS_OFDM_PWR_TBL];
- if (pDevice->abyOFDMPwrTbl[ii] == 0)
- pDevice->abyOFDMPwrTbl[ii] = pDevice->byOFDMPwrG;
- }
+ pDevice->abyCCKPwrTbl[ii] =
+ pDevice->abyEEPROM[ii + EEP_OFS_CCK_PWR_TBL];
+
+ if (pDevice->abyCCKPwrTbl[ii] == 0)
+ pDevice->abyCCKPwrTbl[ii] = pDevice->byCCKPwr;
+ pDevice->abyOFDMPwrTbl[ii] =
+ pDevice->abyEEPROM[ii + EEP_OFS_OFDM_PWR_TBL];
+ if (pDevice->abyOFDMPwrTbl[ii] == 0)
+ pDevice->abyOFDMPwrTbl[ii] = pDevice->byOFDMPwrG;
+ }
/*
* original zonetype is USA, but custom zonetype is Europe,
* then need to recover 12, 13, 14 channels with 11 channel
*/
- if(((pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Japan) ||
- (pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Europe))&&
- (pDevice->byOriginalZonetype == ZoneType_USA)) {
+ if (((pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Japan) ||
+ (pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Europe)) &&
+ (pDevice->byOriginalZonetype == ZoneType_USA)) {
for (ii = 11; ii < 14; ii++) {
pDevice->abyCCKPwrTbl[ii] = pDevice->abyCCKPwrTbl[10];
pDevice->abyOFDMPwrTbl[ii] = pDevice->abyOFDMPwrTbl[10];
}
- }
+ }
- pDevice->byOFDMPwrA = 0x34; /* same as RFbMA2829SelectChannel */
+ pDevice->byOFDMPwrA = 0x34; /* same as RFbMA2829SelectChannel */
- /* load OFDM A power table */
- for (ii = 0; ii < CB_MAX_CHANNEL_5G; ii++) {
- pDevice->abyOFDMAPwrTbl[ii] = pDevice->abyEEPROM[ii + EEP_OFS_OFDMA_PWR_TBL];
- if (pDevice->abyOFDMAPwrTbl[ii] == 0)
- pDevice->abyOFDMAPwrTbl[ii] = pDevice->byOFDMPwrA;
- }
+ /* load OFDM A power table */
+ for (ii = 0; ii < CB_MAX_CHANNEL_5G; ii++) {
+ pDevice->abyOFDMAPwrTbl[ii] =
+ pDevice->abyEEPROM[ii + EEP_OFS_OFDMA_PWR_TBL];
+
+ if (pDevice->abyOFDMAPwrTbl[ii] == 0)
+ pDevice->abyOFDMAPwrTbl[ii] = pDevice->byOFDMPwrA;
+ }
- byAntenna = pDevice->abyEEPROM[EEP_OFS_ANTENNA];
- if (byAntenna & EEP_ANTINV)
- pDevice->bTxRxAntInv = true;
- else
- pDevice->bTxRxAntInv = false;
+ byAntenna = pDevice->abyEEPROM[EEP_OFS_ANTENNA];
- byAntenna &= (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
+ if (byAntenna & EEP_ANTINV)
+ pDevice->bTxRxAntInv = true;
+ else
+ pDevice->bTxRxAntInv = false;
+
+ byAntenna &= (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
if (byAntenna == 0) /* if not set default is both */
- byAntenna = (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
-
- if (byAntenna == (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN)) {
- pDevice->byAntennaCount = 2;
- pDevice->byTxAntennaMode = ANT_B;
- pDevice->dwTxAntennaSel = 1;
- pDevice->dwRxAntennaSel = 1;
- if (pDevice->bTxRxAntInv == true)
- pDevice->byRxAntennaMode = ANT_A;
- else
- pDevice->byRxAntennaMode = ANT_B;
-
- if (pDevice->bDiversityRegCtlON)
- pDevice->bDiversityEnable = true;
- else
- pDevice->bDiversityEnable = false;
- } else {
- pDevice->bDiversityEnable = false;
- pDevice->byAntennaCount = 1;
- pDevice->dwTxAntennaSel = 0;
- pDevice->dwRxAntennaSel = 0;
- if (byAntenna & EEP_ANTENNA_AUX) {
- pDevice->byTxAntennaMode = ANT_A;
- if (pDevice->bTxRxAntInv == true)
- pDevice->byRxAntennaMode = ANT_B;
- else
- pDevice->byRxAntennaMode = ANT_A;
- } else {
- pDevice->byTxAntennaMode = ANT_B;
- if (pDevice->bTxRxAntInv == true)
- pDevice->byRxAntennaMode = ANT_A;
- else
- pDevice->byRxAntennaMode = ANT_B;
- }
- }
- pDevice->ulDiversityNValue = 100*255;
- pDevice->ulDiversityMValue = 100*16;
- pDevice->byTMax = 1;
- pDevice->byTMax2 = 4;
- pDevice->ulSQ3TH = 0;
- pDevice->byTMax3 = 64;
+ byAntenna = (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
+
+ if (byAntenna == (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN)) {
+ pDevice->byAntennaCount = 2;
+ pDevice->byTxAntennaMode = ANT_B;
+ pDevice->dwTxAntennaSel = 1;
+ pDevice->dwRxAntennaSel = 1;
+
+ if (pDevice->bTxRxAntInv == true)
+ pDevice->byRxAntennaMode = ANT_A;
+ else
+ pDevice->byRxAntennaMode = ANT_B;
+
+ if (pDevice->bDiversityRegCtlON)
+ pDevice->bDiversityEnable = true;
+ else
+ pDevice->bDiversityEnable = false;
+ } else {
+ pDevice->bDiversityEnable = false;
+ pDevice->byAntennaCount = 1;
+ pDevice->dwTxAntennaSel = 0;
+ pDevice->dwRxAntennaSel = 0;
+
+ if (byAntenna & EEP_ANTENNA_AUX) {
+ pDevice->byTxAntennaMode = ANT_A;
+
+ if (pDevice->bTxRxAntInv == true)
+ pDevice->byRxAntennaMode = ANT_B;
+ else
+ pDevice->byRxAntennaMode = ANT_A;
+ } else {
+ pDevice->byTxAntennaMode = ANT_B;
+
+ if (pDevice->bTxRxAntInv == true)
+ pDevice->byRxAntennaMode = ANT_A;
+ else
+ pDevice->byRxAntennaMode = ANT_B;
+ }
+ }
+
+ pDevice->ulDiversityNValue = 100 * 255;
+ pDevice->ulDiversityMValue = 100 * 16;
+ pDevice->byTMax = 1;
+ pDevice->byTMax2 = 4;
+ pDevice->ulSQ3TH = 0;
+ pDevice->byTMax3 = 64;
/* get Auto Fall Back type */
- pDevice->byAutoFBCtrl = AUTO_FB_0;
+ pDevice->byAutoFBCtrl = AUTO_FB_0;
/* set SCAN Time */
- pDevice->uScanTime = WLAN_SCAN_MINITIME;
+ pDevice->uScanTime = WLAN_SCAN_MINITIME;
/* default Auto Mode */
/* pDevice->NetworkType = Ndis802_11Automode; */
- pDevice->eConfigPHYMode = PHY_TYPE_AUTO;
- pDevice->byBBType = BB_TYPE_11G;
+ pDevice->eConfigPHYMode = PHY_TYPE_AUTO;
+ pDevice->byBBType = BB_TYPE_11G;
/* initialize BBP registers */
- pDevice->ulTxPower = 25;
+ pDevice->ulTxPower = 25;
/* get channel range */
- pDevice->byMinChannel = 1;
- pDevice->byMaxChannel = CB_MAX_CHANNEL;
+ pDevice->byMinChannel = 1;
+ pDevice->byMaxChannel = CB_MAX_CHANNEL;
/* get RFType */
- pDevice->byRFType = sInitRsp.byRFType;
+ pDevice->byRFType = init_rsp->rf_type;
- if ((pDevice->byRFType & RF_EMU) != 0) {
+ if ((pDevice->byRFType & RF_EMU) != 0) {
/* force change RevID for VT3253 emu */
pDevice->byRevId = 0x80;
- }
+ }
/* load vt3266 calibration parameters in EEPROM */
- if (pDevice->byRFType == RF_VT3226D0) {
- if((pDevice->abyEEPROM[EEP_OFS_MAJOR_VER] == 0x1) &&
- (pDevice->abyEEPROM[EEP_OFS_MINOR_VER] >= 0x4)) {
- byCalibTXIQ = pDevice->abyEEPROM[EEP_OFS_CALIB_TX_IQ];
- byCalibTXDC = pDevice->abyEEPROM[EEP_OFS_CALIB_TX_DC];
- byCalibRXIQ = pDevice->abyEEPROM[EEP_OFS_CALIB_RX_IQ];
- if( (byCalibTXIQ || byCalibTXDC || byCalibRXIQ) ) {
+ if (pDevice->byRFType == RF_VT3226D0) {
+ if ((pDevice->abyEEPROM[EEP_OFS_MAJOR_VER] == 0x1) &&
+ (pDevice->abyEEPROM[EEP_OFS_MINOR_VER] >= 0x4)) {
+
+ byCalibTXIQ = pDevice->abyEEPROM[EEP_OFS_CALIB_TX_IQ];
+ byCalibTXDC = pDevice->abyEEPROM[EEP_OFS_CALIB_TX_DC];
+ byCalibRXIQ = pDevice->abyEEPROM[EEP_OFS_CALIB_RX_IQ];
+ if (byCalibTXIQ || byCalibTXDC || byCalibRXIQ) {
/* CR255, enable TX/RX IQ and DC compensation mode */
- ControlvWriteByte(pDevice,
- MESSAGE_REQUEST_BBREG,
- 0xFF,
- 0x03);
+ ControlvWriteByte(pDevice,
+ MESSAGE_REQUEST_BBREG,
+ 0xff,
+ 0x03);
/* CR251, TX I/Q Imbalance Calibration */
- ControlvWriteByte(pDevice,
- MESSAGE_REQUEST_BBREG,
- 0xFB,
- byCalibTXIQ);
+ ControlvWriteByte(pDevice,
+ MESSAGE_REQUEST_BBREG,
+ 0xfb,
+ byCalibTXIQ);
/* CR252, TX DC-Offset Calibration */
- ControlvWriteByte(pDevice,
- MESSAGE_REQUEST_BBREG,
- 0xFC,
- byCalibTXDC);
+ ControlvWriteByte(pDevice,
+ MESSAGE_REQUEST_BBREG,
+ 0xfC,
+ byCalibTXDC);
/* CR253, RX I/Q Imbalance Calibration */
- ControlvWriteByte(pDevice,
- MESSAGE_REQUEST_BBREG,
- 0xFD,
- byCalibRXIQ);
- } else {
+ ControlvWriteByte(pDevice,
+ MESSAGE_REQUEST_BBREG,
+ 0xfd,
+ byCalibRXIQ);
+ } else {
/* CR255, turn off BB Calibration compensation */
- ControlvWriteByte(pDevice,
- MESSAGE_REQUEST_BBREG,
- 0xFF,
- 0x0);
- }
- }
- }
- pMgmt->eScanType = WMAC_SCAN_PASSIVE;
- pMgmt->uCurrChannel = pDevice->uChannel;
- pMgmt->uIBSSChannel = pDevice->uChannel;
- CARDbSetMediaChannel(pDevice, pMgmt->uCurrChannel);
+ ControlvWriteByte(pDevice,
+ MESSAGE_REQUEST_BBREG,
+ 0xff,
+ 0x0);
+ }
+ }
+ }
+
+ pMgmt->eScanType = WMAC_SCAN_PASSIVE;
+ pMgmt->uCurrChannel = pDevice->uChannel;
+ pMgmt->uIBSSChannel = pDevice->uChannel;
+ CARDbSetMediaChannel(pDevice, pMgmt->uCurrChannel);
/* get permanent network address */
- memcpy(pDevice->abyPermanentNetAddr,&(sInitRsp.byNetAddr[0]),6);
+ memcpy(pDevice->abyPermanentNetAddr, init_rsp->net_addr, 6);
memcpy(pDevice->abyCurrentNetAddr,
- pDevice->abyPermanentNetAddr,
- ETH_ALEN);
+ pDevice->abyPermanentNetAddr, ETH_ALEN);
/* if exist SW network address, use it */
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Network address = %pM\n",
pDevice->abyCurrentNetAddr);
- }
- /*
- * set BB and packet type at the same time
- * set Short Slot Time, xIFS, and RSPINF
- */
- if (pDevice->byBBType == BB_TYPE_11A) {
- CARDbAddBasicRate(pDevice, RATE_6M);
- pDevice->bShortSlotTime = true;
- } else {
- CARDbAddBasicRate(pDevice, RATE_1M);
- pDevice->bShortSlotTime = false;
- }
- BBvSetShortSlotTime(pDevice);
- CARDvSetBSSMode(pDevice);
+ /*
+ * set BB and packet type at the same time
+ * set Short Slot Time, xIFS, and RSPINF
+ */
+ if (pDevice->byBBType == BB_TYPE_11A) {
+ CARDbAddBasicRate(pDevice, RATE_6M);
+ pDevice->bShortSlotTime = true;
+ } else {
+ CARDbAddBasicRate(pDevice, RATE_1M);
+ pDevice->bShortSlotTime = false;
+ }
- if (pDevice->bUpdateBBVGA) {
- pDevice->byBBVGACurrent = pDevice->abyBBVGA[0];
- pDevice->byBBVGANew = pDevice->byBBVGACurrent;
- BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]);
- }
+ BBvSetShortSlotTime(pDevice);
+ CARDvSetBSSMode(pDevice);
- pDevice->byRadioCtl = pDevice->abyEEPROM[EEP_OFS_RADIOCTL];
- pDevice->bHWRadioOff = false;
- if ( (pDevice->byRadioCtl & EEP_RADIOCTL_ENABLE) != 0 ) {
- ntStatus = CONTROLnsRequestIn(pDevice,
- MESSAGE_TYPE_READ,
- MAC_REG_GPIOCTL1,
- MESSAGE_REQUEST_MACREG,
- 1,
- &byTmp);
-
- if ( ntStatus != STATUS_SUCCESS ) {
- spin_unlock_irq(&pDevice->lock);
- return false;
- }
- if ( (byTmp & GPIO3_DATA) == 0 ) {
- pDevice->bHWRadioOff = true;
- MACvRegBitsOn(pDevice,MAC_REG_GPIOCTL1,GPIO3_INTMD);
- } else {
- MACvRegBitsOff(pDevice,MAC_REG_GPIOCTL1,GPIO3_INTMD);
- pDevice->bHWRadioOff = false;
- }
+ if (pDevice->bUpdateBBVGA) {
+ pDevice->byBBVGACurrent = pDevice->abyBBVGA[0];
+ pDevice->byBBVGANew = pDevice->byBBVGACurrent;
- }
+ BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]);
+ }
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_TMLEN,0x38);
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
- MACvRegBitsOn(pDevice,MAC_REG_GPIOCTL0,0x01);
+ pDevice->byRadioCtl = pDevice->abyEEPROM[EEP_OFS_RADIOCTL];
+ pDevice->bHWRadioOff = false;
- if ((pDevice->bHWRadioOff == true) || (pDevice->bRadioControlOff == true)) {
- CARDbRadioPowerOff(pDevice);
- } else {
- CARDbRadioPowerOn(pDevice);
- }
+ if ((pDevice->byRadioCtl & EEP_RADIOCTL_ENABLE) != 0) {
+ ntStatus = CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ,
+ MAC_REG_GPIOCTL1, MESSAGE_REQUEST_MACREG, 1, &byTmp);
- spin_unlock_irq(&pDevice->lock);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----INIbInitAdapter Exit\n");
- return true;
+ if (ntStatus != STATUS_SUCCESS) {
+ spin_unlock_irq(&pDevice->lock);
+ return false;
+ }
+
+ if ((byTmp & GPIO3_DATA) == 0) {
+ pDevice->bHWRadioOff = true;
+ MACvRegBitsOn(pDevice, MAC_REG_GPIOCTL1, GPIO3_INTMD);
+ } else {
+ MACvRegBitsOff(pDevice, MAC_REG_GPIOCTL1, GPIO3_INTMD);
+ pDevice->bHWRadioOff = false;
+ }
+
+ }
+
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG,
+ MAC_REG_PAPEDELAY, LEDSTS_TMLEN, 0x38);
+
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG,
+ MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
+
+ MACvRegBitsOn(pDevice, MAC_REG_GPIOCTL0, 0x01);
+
+ if ((pDevice->bHWRadioOff == true) ||
+ (pDevice->bRadioControlOff == true)) {
+ CARDbRadioPowerOff(pDevice);
+ } else {
+ CARDbRadioPowerOn(pDevice);
+ }
+
+
+ spin_unlock_irq(&pDevice->lock);
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----INIbInitAdapter Exit\n");
+
+ return true;
}
#ifdef CONFIG_PM /* Minimal support for suspend and resume */
@@ -962,10 +969,10 @@ static int device_open(struct net_device *dev)
/* read config file */
Read_config_file(pDevice);
- if (device_init_registers(pDevice, DEVICE_INIT_COLD) == false) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " init register fail\n");
- goto free_all;
- }
+ if (device_init_registers(pDevice) == false) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " init register fail\n");
+ goto free_all;
+ }
device_set_multi(pDevice->dev);
@@ -1187,22 +1194,6 @@ out:
return NETDEV_TX_OK;
}
-static unsigned const ethernet_polynomial = 0x04c11db7U;
-static inline u32 ether_crc(int length, unsigned char *data)
-{
- int crc = -1;
-
- while(--length >= 0) {
- unsigned char current_octet = *data++;
- int bit;
- for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
- crc = (crc << 1) ^
- ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
- }
- }
- return crc;
-}
-
/* find out the start position of str2 from str1 */
static unsigned char *kstrstr(const unsigned char *str1,
const unsigned char *str2) {
@@ -1448,18 +1439,18 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
break;
case SIOCETHTOOL:
- return ethtool_ioctl(dev, (void *) rq->ifr_data);
+ return ethtool_ioctl(dev, rq);
}
return rc;
}
-static int ethtool_ioctl(struct net_device *dev, void *useraddr)
+static int ethtool_ioctl(struct net_device *dev, struct ifreq *rq)
{
u32 ethcmd;
- if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
+ if (copy_from_user(&ethcmd, rq->ifr_data, sizeof(ethcmd)))
return -EFAULT;
switch (ethcmd) {
@@ -1467,7 +1458,7 @@ static int ethtool_ioctl(struct net_device *dev, void *useraddr)
struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
strncpy(info.driver, DEVICE_NAME, sizeof(info.driver)-1);
strncpy(info.version, DEVICE_VERSION, sizeof(info.version)-1);
- if (copy_to_user(useraddr, &info, sizeof(info)))
+ if (copy_to_user(rq->ifr_data, &info, sizeof(info)))
return -EFAULT;
return 0;
}
diff --git a/drivers/staging/vt6656/mib.c b/drivers/staging/vt6656/mib.c
deleted file mode 100644
index 12333cdcbc6a..000000000000
--- a/drivers/staging/vt6656/mib.c
+++ /dev/null
@@ -1,489 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: mib.c
- *
- * Purpose: Implement MIB Data Structure
- *
- * Author: Tevin Chen
- *
- * Date: May 21, 1996
- *
- * Functions:
- * STAvUpdateIstStatCounter - Update ISR statistic counter
- * STAvUpdateRDStatCounter - Update Rx statistic counter
- * STAvUpdateTDStatCounter - Update Tx statistic counter
- * STAvUpdateTDStatCounterEx - Update Tx statistic counter and copy tx data
- * STAvUpdate802_11Counter - Update 802.11 mib counter
- *
- * Revision History:
- *
- */
-
-#include "mac.h"
-#include "tether.h"
-#include "mib.h"
-#include "wctl.h"
-#include "baseband.h"
-
-static int msglevel =MSG_LEVEL_INFO;
-
-/*
- * Description: Update Isr Statistic Counter
- *
- * Parameters:
- * In:
- * pStatistic - Pointer to Statistic Counter Data Structure
- * wisr - Interrupt status
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void STAvUpdateIsrStatCounter (PSStatCounter pStatistic, u8 byIsr0, u8 byIsr1)
-{
- /**********************/
- /* ABNORMAL interrupt */
- /**********************/
- // not any IMR bit invoke irq
- if (byIsr0 == 0) {
- pStatistic->ISRStat.dwIsrUnknown++;
- return;
- }
-
- if (byIsr0 & ISR_ACTX) // ISR, bit0
- pStatistic->ISRStat.dwIsrTx0OK++; // TXDMA0 successful
-
- if (byIsr0 & ISR_BNTX) // ISR, bit2
- pStatistic->ISRStat.dwIsrBeaconTxOK++; // BeaconTx successful
-
- if (byIsr0 & ISR_RXDMA0) // ISR, bit3
- pStatistic->ISRStat.dwIsrRx0OK++; // Rx0 successful
-
- if (byIsr0 & ISR_TBTT) // ISR, bit4
- pStatistic->ISRStat.dwIsrTBTTInt++; // TBTT successful
-
- if (byIsr0 & ISR_SOFTTIMER) // ISR, bit6
- pStatistic->ISRStat.dwIsrSTIMERInt++;
-
- if (byIsr0 & ISR_WATCHDOG) // ISR, bit7
- pStatistic->ISRStat.dwIsrWatchDog++;
-
- if (byIsr1 & ISR_FETALERR) // ISR, bit8
- pStatistic->ISRStat.dwIsrUnrecoverableError++;
-
- if (byIsr1 & ISR_SOFTINT) // ISR, bit9
- pStatistic->ISRStat.dwIsrSoftInterrupt++; // software interrupt
-
- if (byIsr1 & ISR_MIBNEARFULL) // ISR, bit10
- pStatistic->ISRStat.dwIsrMIBNearfull++;
-
- if (byIsr1 & ISR_RXNOBUF) // ISR, bit11
- pStatistic->ISRStat.dwIsrRxNoBuf++; // Rx No Buff
-
-}
-
-/*
- * Description: Update Rx Statistic Counter
- *
- * Parameters:
- * In:
- * pStatistic - Pointer to Statistic Counter Data Structure
- * byRSR - Rx Status
- * byNewRSR - Rx Status
- * pbyBuffer - Rx Buffer
- * cbFrameLength - Rx Length
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void STAvUpdateRDStatCounter(PSStatCounter pStatistic,
- u8 byRSR, u8 byNewRSR,
- u8 byRxSts, u8 byRxRate,
- u8 * pbyBuffer, unsigned int cbFrameLength)
-{
- /* need change */
- struct ieee80211_hdr *pHeader = (struct ieee80211_hdr *)pbyBuffer;
-
- if (byRSR & RSR_ADDROK)
- pStatistic->dwRsrADDROk++;
- if (byRSR & RSR_CRCOK) {
- pStatistic->dwRsrCRCOk++;
- pStatistic->ullRsrOK++;
-
- if (cbFrameLength >= ETH_ALEN) {
- /* update counters in case of successful transmission */
- if (byRSR & RSR_ADDRBROAD) {
- pStatistic->ullRxBroadcastFrames++;
- pStatistic->ullRxBroadcastBytes +=
- (unsigned long long) cbFrameLength;
- }
- else if (byRSR & RSR_ADDRMULTI) {
- pStatistic->ullRxMulticastFrames++;
- pStatistic->ullRxMulticastBytes +=
- (unsigned long long) cbFrameLength;
- }
- else {
- pStatistic->ullRxDirectedFrames++;
- pStatistic->ullRxDirectedBytes +=
- (unsigned long long) cbFrameLength;
- }
- }
- }
-
- if(byRxRate==22) {
- pStatistic->CustomStat.ullRsr11M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr11MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "11M: ALL[%d], OK[%d]:[%02x]\n",
- (signed int) pStatistic->CustomStat.ullRsr11M,
- (signed int) pStatistic->CustomStat.ullRsr11MCRCOk, byRSR);
- }
- else if(byRxRate==11) {
- pStatistic->CustomStat.ullRsr5M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr5MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " 5M: ALL[%d], OK[%d]:[%02x]\n",
- (signed int) pStatistic->CustomStat.ullRsr5M,
- (signed int) pStatistic->CustomStat.ullRsr5MCRCOk, byRSR);
- }
- else if(byRxRate==4) {
- pStatistic->CustomStat.ullRsr2M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr2MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " 2M: ALL[%d], OK[%d]:[%02x]\n",
- (signed int) pStatistic->CustomStat.ullRsr2M,
- (signed int) pStatistic->CustomStat.ullRsr2MCRCOk, byRSR);
- }
- else if(byRxRate==2){
- pStatistic->CustomStat.ullRsr1M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr1MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " 1M: ALL[%d], OK[%d]:[%02x]\n",
- (signed int) pStatistic->CustomStat.ullRsr1M,
- (signed int) pStatistic->CustomStat.ullRsr1MCRCOk, byRSR);
- }
- else if(byRxRate==12){
- pStatistic->CustomStat.ullRsr6M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr6MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " 6M: ALL[%d], OK[%d]\n",
- (signed int) pStatistic->CustomStat.ullRsr6M,
- (signed int) pStatistic->CustomStat.ullRsr6MCRCOk);
- }
- else if(byRxRate==18){
- pStatistic->CustomStat.ullRsr9M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr9MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " 9M: ALL[%d], OK[%d]\n",
- (signed int) pStatistic->CustomStat.ullRsr9M,
- (signed int) pStatistic->CustomStat.ullRsr9MCRCOk);
- }
- else if(byRxRate==24){
- pStatistic->CustomStat.ullRsr12M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr12MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "12M: ALL[%d], OK[%d]\n",
- (signed int) pStatistic->CustomStat.ullRsr12M,
- (signed int) pStatistic->CustomStat.ullRsr12MCRCOk);
- }
- else if(byRxRate==36){
- pStatistic->CustomStat.ullRsr18M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr18MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "18M: ALL[%d], OK[%d]\n",
- (signed int) pStatistic->CustomStat.ullRsr18M,
- (signed int) pStatistic->CustomStat.ullRsr18MCRCOk);
- }
- else if(byRxRate==48){
- pStatistic->CustomStat.ullRsr24M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr24MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "24M: ALL[%d], OK[%d]\n",
- (signed int) pStatistic->CustomStat.ullRsr24M,
- (signed int) pStatistic->CustomStat.ullRsr24MCRCOk);
- }
- else if(byRxRate==72){
- pStatistic->CustomStat.ullRsr36M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr36MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "36M: ALL[%d], OK[%d]\n",
- (signed int) pStatistic->CustomStat.ullRsr36M,
- (signed int) pStatistic->CustomStat.ullRsr36MCRCOk);
- }
- else if(byRxRate==96){
- pStatistic->CustomStat.ullRsr48M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr48MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "48M: ALL[%d], OK[%d]\n",
- (signed int) pStatistic->CustomStat.ullRsr48M,
- (signed int) pStatistic->CustomStat.ullRsr48MCRCOk);
- }
- else if(byRxRate==108){
- pStatistic->CustomStat.ullRsr54M++;
- if(byRSR & RSR_CRCOK) {
- pStatistic->CustomStat.ullRsr54MCRCOk++;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "54M: ALL[%d], OK[%d]\n",
- (signed int) pStatistic->CustomStat.ullRsr54M,
- (signed int) pStatistic->CustomStat.ullRsr54MCRCOk);
- }
- else {
- DBG_PRT(MSG_LEVEL_DEBUG,
- KERN_INFO "Unknown: Total[%d], CRCOK[%d]\n",
- (signed int) pStatistic->dwRsrRxPacket+1,
- (signed int)pStatistic->dwRsrCRCOk);
- }
-
- if (byRSR & RSR_BSSIDOK)
- pStatistic->dwRsrBSSIDOk++;
-
- if (byRSR & RSR_BCNSSIDOK)
- pStatistic->dwRsrBCNSSIDOk++;
- if (byRSR & RSR_IVLDLEN) //invalid len (> 2312 byte)
- pStatistic->dwRsrLENErr++;
- if (byRSR & RSR_IVLDTYP) //invalid packet type
- pStatistic->dwRsrTYPErr++;
- if ((byRSR & (RSR_IVLDTYP | RSR_IVLDLEN)) || !(byRSR & RSR_CRCOK))
- pStatistic->dwRsrErr++;
-
- if (byNewRSR & NEWRSR_DECRYPTOK)
- pStatistic->dwNewRsrDECRYPTOK++;
- if (byNewRSR & NEWRSR_CFPIND)
- pStatistic->dwNewRsrCFP++;
- if (byNewRSR & NEWRSR_HWUTSF)
- pStatistic->dwNewRsrUTSF++;
- if (byNewRSR & NEWRSR_BCNHITAID)
- pStatistic->dwNewRsrHITAID++;
- if (byNewRSR & NEWRSR_BCNHITAID0)
- pStatistic->dwNewRsrHITAID0++;
-
- // increase rx packet count
- pStatistic->dwRsrRxPacket++;
- pStatistic->dwRsrRxOctet += cbFrameLength;
-
- if (IS_TYPE_DATA(pbyBuffer)) {
- pStatistic->dwRsrRxData++;
- } else if (IS_TYPE_MGMT(pbyBuffer)){
- pStatistic->dwRsrRxManage++;
- } else if (IS_TYPE_CONTROL(pbyBuffer)){
- pStatistic->dwRsrRxControl++;
- }
-
- if (byRSR & RSR_ADDRBROAD)
- pStatistic->dwRsrBroadcast++;
- else if (byRSR & RSR_ADDRMULTI)
- pStatistic->dwRsrMulticast++;
- else
- pStatistic->dwRsrDirected++;
-
- if (WLAN_GET_FC_MOREFRAG(pHeader->frame_control))
- pStatistic->dwRsrRxFragment++;
-
- if (cbFrameLength < ETH_ZLEN + 4) {
- pStatistic->dwRsrRunt++;
- } else if (cbFrameLength == ETH_ZLEN + 4) {
- pStatistic->dwRsrRxFrmLen64++;
- }
- else if ((65 <= cbFrameLength) && (cbFrameLength <= 127)) {
- pStatistic->dwRsrRxFrmLen65_127++;
- }
- else if ((128 <= cbFrameLength) && (cbFrameLength <= 255)) {
- pStatistic->dwRsrRxFrmLen128_255++;
- }
- else if ((256 <= cbFrameLength) && (cbFrameLength <= 511)) {
- pStatistic->dwRsrRxFrmLen256_511++;
- }
- else if ((512 <= cbFrameLength) && (cbFrameLength <= 1023)) {
- pStatistic->dwRsrRxFrmLen512_1023++;
- } else if ((1024 <= cbFrameLength) &&
- (cbFrameLength <= ETH_FRAME_LEN + 4)) {
- pStatistic->dwRsrRxFrmLen1024_1518++;
- } else if (cbFrameLength > ETH_FRAME_LEN + 4) {
- pStatistic->dwRsrLong++;
- }
-}
-
-/*
- * Description: Update Tx Statistic Counter
- *
- * Parameters:
- * In:
- * pStatistic - Pointer to Statistic Counter Data Structure
- * byTSR0 - Tx Status
- * byTSR1 - Tx Status
- * pbyBuffer - Tx Buffer
- * cbFrameLength - Tx Length
- * uIdx - Index of Tx DMA
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void
-STAvUpdateTDStatCounter (
- PSStatCounter pStatistic,
- u8 byPktNum,
- u8 byRate,
- u8 byTSR
- )
-{
- u8 byRetyCnt;
- // increase tx packet count
- pStatistic->dwTsrTxPacket++;
-
- byRetyCnt = (byTSR & 0xF0) >> 4;
- if (byRetyCnt != 0) {
- pStatistic->dwTsrRetry++;
- pStatistic->dwTsrTotalRetry += byRetyCnt;
- pStatistic->dwTxFail[byRate]+= byRetyCnt;
- pStatistic->dwTxFail[MAX_RATE] += byRetyCnt;
-
- if ( byRetyCnt == 0x1)
- pStatistic->dwTsrOnceRetry++;
- else
- pStatistic->dwTsrMoreThanOnceRetry++;
-
- if (byRetyCnt <= 8)
- pStatistic->dwTxRetryCount[byRetyCnt-1]++;
-
- }
- if ( !(byTSR & (TSR_TMO | TSR_RETRYTMO))) {
-
- if (byRetyCnt < 2)
- pStatistic->TxNoRetryOkCount ++;
- else
- pStatistic->TxRetryOkCount ++;
-
- pStatistic->ullTsrOK++;
- pStatistic->CustomStat.ullTsrAllOK++;
- // update counters in case that successful transmit
- pStatistic->dwTxOk[byRate]++;
- pStatistic->dwTxOk[MAX_RATE]++;
-
- if ( pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni == TX_PKT_BROAD ) {
- pStatistic->ullTxBroadcastFrames++;
- pStatistic->ullTxBroadcastBytes += pStatistic->abyTxPktInfo[byPktNum].wLength;
- } else if ( pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni == TX_PKT_MULTI ) {
- pStatistic->ullTxMulticastFrames++;
- pStatistic->ullTxMulticastBytes += pStatistic->abyTxPktInfo[byPktNum].wLength;
- } else if ( pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni == TX_PKT_UNI ) {
- pStatistic->ullTxDirectedFrames++;
- pStatistic->ullTxDirectedBytes += pStatistic->abyTxPktInfo[byPktNum].wLength;
- }
- }
- else {
-
- pStatistic->TxFailCount ++;
-
- pStatistic->dwTsrErr++;
- if (byTSR & TSR_RETRYTMO)
- pStatistic->dwTsrRetryTimeout++;
- if (byTSR & TSR_TMO)
- pStatistic->dwTsrTransmitTimeout++;
- }
-
- if ( pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni == TX_PKT_BROAD ) {
- pStatistic->dwTsrBroadcast++;
- } else if ( pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni == TX_PKT_MULTI ) {
- pStatistic->dwTsrMulticast++;
- } else if ( pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni == TX_PKT_UNI ) {
- pStatistic->dwTsrDirected++;
- }
-}
-
-/*
- * Description: Update 802.11 mib counter
- *
- * Parameters:
- * In:
- * p802_11Counter - Pointer to 802.11 mib counter
- * pStatistic - Pointer to Statistic Counter Data Structure
- * dwCounter - hardware counter for 802.11 mib
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void
-STAvUpdate802_11Counter(
- PSDot11Counters p802_11Counter,
- PSStatCounter pStatistic,
- u8 byRTSSuccess,
- u8 byRTSFail,
- u8 byACKFail,
- u8 byFCSErr
- )
-{
- //p802_11Counter->TransmittedFragmentCount
- p802_11Counter->MulticastTransmittedFrameCount =
- (unsigned long long) (pStatistic->dwTsrBroadcast +
- pStatistic->dwTsrMulticast);
- p802_11Counter->FailedCount = (unsigned long long) (pStatistic->dwTsrErr);
- p802_11Counter->RetryCount = (unsigned long long) (pStatistic->dwTsrRetry);
- p802_11Counter->MultipleRetryCount =
- (unsigned long long) (pStatistic->dwTsrMoreThanOnceRetry);
- //p802_11Counter->FrameDuplicateCount
- p802_11Counter->RTSSuccessCount += (unsigned long long) byRTSSuccess;
- p802_11Counter->RTSFailureCount += (unsigned long long) byRTSFail;
- p802_11Counter->ACKFailureCount += (unsigned long long) byACKFail;
- p802_11Counter->FCSErrorCount += (unsigned long long) byFCSErr;
- //p802_11Counter->ReceivedFragmentCount
- p802_11Counter->MulticastReceivedFrameCount =
- (unsigned long long) (pStatistic->dwRsrBroadcast +
- pStatistic->dwRsrMulticast);
-}
-
-/*
- * Description: Clear 802.11 mib counter
- *
- * Parameters:
- * In:
- * pUsbCounter - Pointer to USB mib counter
- * ntStatus - URB status
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-
-void STAvUpdateUSBCounter(PSUSBCounter pUsbCounter, int ntStatus)
-{
-
-// if ( ntStatus == USBD_STATUS_CRC ) {
- pUsbCounter->dwCrc++;
-// }
-
-}
diff --git a/drivers/staging/vt6656/mib.h b/drivers/staging/vt6656/mib.h
deleted file mode 100644
index 35375325a777..000000000000
--- a/drivers/staging/vt6656/mib.h
+++ /dev/null
@@ -1,378 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: mib.h
- *
- * Purpose: Implement MIB Data Structure
- *
- * Author: Tevin Chen
- *
- * Date: May 21, 1996
- *
- */
-
-#ifndef __MIB_H__
-#define __MIB_H__
-
-#include "tether.h"
-#include "desc.h"
-
-//
-// USB counter
-//
-typedef struct tagSUSBCounter {
- u32 dwCrc;
-
-} SUSBCounter, *PSUSBCounter;
-
-//
-// 802.11 counter
-//
-
-typedef struct tagSDot11Counters {
- /* unsigned long Length; // Length of structure */
- unsigned long long TransmittedFragmentCount;
- unsigned long long MulticastTransmittedFrameCount;
- unsigned long long FailedCount;
- unsigned long long RetryCount;
- unsigned long long MultipleRetryCount;
- unsigned long long RTSSuccessCount;
- unsigned long long RTSFailureCount;
- unsigned long long ACKFailureCount;
- unsigned long long FrameDuplicateCount;
- unsigned long long ReceivedFragmentCount;
- unsigned long long MulticastReceivedFrameCount;
- unsigned long long FCSErrorCount;
- unsigned long long TKIPLocalMICFailures;
- unsigned long long TKIPRemoteMICFailures;
- unsigned long long TKIPICVErrors;
- unsigned long long TKIPReplays;
- unsigned long long CCMPFormatErrors;
- unsigned long long CCMPReplays;
- unsigned long long CCMPDecryptErrors;
- unsigned long long FourWayHandshakeFailures;
- /*
- * unsigned long long WEPUndecryptableCount;
- * unsigned long long WEPICVErrorCount;
- * unsigned long long DecryptSuccessCount;
- * unsigned long long DecryptFailureCount;
- */
-} SDot11Counters, *PSDot11Counters;
-
-//
-// MIB2 counter
-//
-typedef struct tagSMib2Counter {
- signed long ifIndex;
- char ifDescr[256]; // max size 255 plus zero ending
- // e.g. "interface 1"
- signed long ifType;
- signed long ifMtu;
- u32 ifSpeed;
- u8 ifPhysAddress[ETH_ALEN];
- signed long ifAdminStatus;
- signed long ifOperStatus;
- u32 ifLastChange;
- u32 ifInOctets;
- u32 ifInUcastPkts;
- u32 ifInNUcastPkts;
- u32 ifInDiscards;
- u32 ifInErrors;
- u32 ifInUnknownProtos;
- u32 ifOutOctets;
- u32 ifOutUcastPkts;
- u32 ifOutNUcastPkts;
- u32 ifOutDiscards;
- u32 ifOutErrors;
- u32 ifOutQLen;
- u32 ifSpecific;
-} SMib2Counter, *PSMib2Counter;
-
-// Value in the ifType entry
-#define WIRELESSLANIEEE80211b 6 //
-
-// Value in the ifAdminStatus/ifOperStatus entry
-#define UP 1 //
-#define DOWN 2 //
-#define TESTING 3 //
-
-//
-// RMON counter
-//
-typedef struct tagSRmonCounter {
- signed long etherStatsIndex;
- u32 etherStatsDataSource;
- u32 etherStatsDropEvents;
- u32 etherStatsOctets;
- u32 etherStatsPkts;
- u32 etherStatsBroadcastPkts;
- u32 etherStatsMulticastPkts;
- u32 etherStatsCRCAlignErrors;
- u32 etherStatsUndersizePkts;
- u32 etherStatsOversizePkts;
- u32 etherStatsFragments;
- u32 etherStatsJabbers;
- u32 etherStatsCollisions;
- u32 etherStatsPkt64Octets;
- u32 etherStatsPkt65to127Octets;
- u32 etherStatsPkt128to255Octets;
- u32 etherStatsPkt256to511Octets;
- u32 etherStatsPkt512to1023Octets;
- u32 etherStatsPkt1024to1518Octets;
- u32 etherStatsOwners;
- u32 etherStatsStatus;
-} SRmonCounter, *PSRmonCounter;
-
-//
-// Custom counter
-//
-typedef struct tagSCustomCounters {
- unsigned long Length;
-
- unsigned long long ullTsrAllOK;
-
- unsigned long long ullRsr11M;
- unsigned long long ullRsr5M;
- unsigned long long ullRsr2M;
- unsigned long long ullRsr1M;
-
- unsigned long long ullRsr11MCRCOk;
- unsigned long long ullRsr5MCRCOk;
- unsigned long long ullRsr2MCRCOk;
- unsigned long long ullRsr1MCRCOk;
-
- unsigned long long ullRsr54M;
- unsigned long long ullRsr48M;
- unsigned long long ullRsr36M;
- unsigned long long ullRsr24M;
- unsigned long long ullRsr18M;
- unsigned long long ullRsr12M;
- unsigned long long ullRsr9M;
- unsigned long long ullRsr6M;
-
- unsigned long long ullRsr54MCRCOk;
- unsigned long long ullRsr48MCRCOk;
- unsigned long long ullRsr36MCRCOk;
- unsigned long long ullRsr24MCRCOk;
- unsigned long long ullRsr18MCRCOk;
- unsigned long long ullRsr12MCRCOk;
- unsigned long long ullRsr9MCRCOk;
- unsigned long long ullRsr6MCRCOk;
-
-} SCustomCounters, *PSCustomCounters;
-
-//
-// Custom counter
-//
-typedef struct tagSISRCounters {
- unsigned long Length;
-
- u32 dwIsrTx0OK;
- u32 dwIsrAC0TxOK;
- u32 dwIsrBeaconTxOK;
- u32 dwIsrRx0OK;
- u32 dwIsrTBTTInt;
- u32 dwIsrSTIMERInt;
- u32 dwIsrWatchDog;
- u32 dwIsrUnrecoverableError;
- u32 dwIsrSoftInterrupt;
- u32 dwIsrMIBNearfull;
- u32 dwIsrRxNoBuf;
-
- u32 dwIsrUnknown; // unknown interrupt count
-
- u32 dwIsrRx1OK;
- u32 dwIsrATIMTxOK;
- u32 dwIsrSYNCTxOK;
- u32 dwIsrCFPEnd;
- u32 dwIsrATIMEnd;
- u32 dwIsrSYNCFlushOK;
- u32 dwIsrSTIMER1Int;
- /////////////////////////////////////
-} SISRCounters, *PSISRCounters;
-
-// Value in the etherStatsStatus entry
-#define VALID 1 //
-#define CREATE_REQUEST 2 //
-#define UNDER_CREATION 3 //
-#define INVALID 4 //
-
-//
-// Tx packet information
-//
-typedef struct tagSTxPktInfo {
- u8 byBroadMultiUni;
- u16 wLength;
- u16 wFIFOCtl;
- u8 abyDestAddr[ETH_ALEN];
-} STxPktInfo, *PSTxPktInfo;
-
-#define MAX_RATE 12
-//
-// statistic counter
-//
-typedef struct tagSStatCounter {
- //
- // ISR status count
- //
-
- SISRCounters ISRStat;
-
- // RSR status count
- //
- u32 dwRsrFrmAlgnErr;
- u32 dwRsrErr;
- u32 dwRsrCRCErr;
- u32 dwRsrCRCOk;
- u32 dwRsrBSSIDOk;
- u32 dwRsrADDROk;
- u32 dwRsrBCNSSIDOk;
- u32 dwRsrLENErr;
- u32 dwRsrTYPErr;
-
- u32 dwNewRsrDECRYPTOK;
- u32 dwNewRsrCFP;
- u32 dwNewRsrUTSF;
- u32 dwNewRsrHITAID;
- u32 dwNewRsrHITAID0;
-
- u32 dwRsrLong;
- u32 dwRsrRunt;
-
- u32 dwRsrRxControl;
- u32 dwRsrRxData;
- u32 dwRsrRxManage;
-
- u32 dwRsrRxPacket;
- u32 dwRsrRxOctet;
- u32 dwRsrBroadcast;
- u32 dwRsrMulticast;
- u32 dwRsrDirected;
- // 64-bit OID
- unsigned long long ullRsrOK;
-
- // for some optional OIDs (64 bits) and DMI support
- unsigned long long ullRxBroadcastBytes;
- unsigned long long ullRxMulticastBytes;
- unsigned long long ullRxDirectedBytes;
- unsigned long long ullRxBroadcastFrames;
- unsigned long long ullRxMulticastFrames;
- unsigned long long ullRxDirectedFrames;
-
- u32 dwRsrRxFragment;
- u32 dwRsrRxFrmLen64;
- u32 dwRsrRxFrmLen65_127;
- u32 dwRsrRxFrmLen128_255;
- u32 dwRsrRxFrmLen256_511;
- u32 dwRsrRxFrmLen512_1023;
- u32 dwRsrRxFrmLen1024_1518;
-
- // TSR status count
- //
- u32 dwTsrTotalRetry; // total collision retry count
- u32 dwTsrOnceRetry; // this packet only occur one collision
- u32 dwTsrMoreThanOnceRetry; // this packet occur more than one collision
- u32 dwTsrRetry; // this packet has ever occur collision,
- // that is (dwTsrOnceCollision0 + dwTsrMoreThanOnceCollision0)
- u32 dwTsrACKData;
- u32 dwTsrErr;
- u32 dwAllTsrOK;
- u32 dwTsrRetryTimeout;
- u32 dwTsrTransmitTimeout;
-
- u32 dwTsrTxPacket;
- u32 dwTsrTxOctet;
- u32 dwTsrBroadcast;
- u32 dwTsrMulticast;
- u32 dwTsrDirected;
-
- // RD/TD count
- u32 dwCntRxFrmLength;
- u32 dwCntTxBufLength;
-
- u8 abyCntRxPattern[16];
- u8 abyCntTxPattern[16];
-
- // Software check....
- u32 dwCntRxDataErr; // rx buffer data software compare CRC err count
- u32 dwCntDecryptErr; // rx buffer data software compare CRC err count
- u32 dwCntRxICVErr; // rx buffer data software compare CRC err count
-
- // 64-bit OID
- unsigned long long ullTsrOK;
-
- // for some optional OIDs (64 bits) and DMI support
- unsigned long long ullTxBroadcastFrames;
- unsigned long long ullTxMulticastFrames;
- unsigned long long ullTxDirectedFrames;
- unsigned long long ullTxBroadcastBytes;
- unsigned long long ullTxMulticastBytes;
- unsigned long long ullTxDirectedBytes;
-
- // for autorate
- u32 dwTxOk[MAX_RATE+1];
- u32 dwTxFail[MAX_RATE+1];
- u32 dwTxRetryCount[8];
-
- STxPktInfo abyTxPktInfo[16];
-
- SUSBCounter USB_EP0Stat;
- SUSBCounter USB_BulkInStat;
- SUSBCounter USB_BulkOutStat;
- SUSBCounter USB_InterruptStat;
-
- SCustomCounters CustomStat;
-
- //Tx count:
- unsigned long TxNoRetryOkCount; /* success tx no retry ! */
- unsigned long TxRetryOkCount; /* success tx but retry ! */
- unsigned long TxFailCount; /* fail tx ? */
- //Rx count:
- unsigned long RxOkCnt; /* success rx ! */
- unsigned long RxFcsErrCnt; /* fail rx ? */
- //statistic
- unsigned long SignalStren;
- unsigned long LinkQuality;
-
-} SStatCounter, *PSStatCounter;
-
-void STAvUpdateIsrStatCounter(PSStatCounter pStatistic,
- u8 byIsr0,
- u8 byIsr1);
-
-void STAvUpdateRDStatCounter(PSStatCounter pStatistic,
- u8 byRSR, u8 byNewRSR, u8 byRxSts,
- u8 byRxRate, u8 * pbyBuffer,
- unsigned int cbFrameLength);
-
-void STAvUpdateTDStatCounter(PSStatCounter pStatistic, u8 byPktNum,
- u8 byRate, u8 byTSR);
-
-void
-STAvUpdate802_11Counter(
- PSDot11Counters p802_11Counter,
- PSStatCounter pStatistic,
- u8 byRTSSuccess,
- u8 byRTSFail,
- u8 byACKFail,
- u8 byFCSErr
- );
-
-void STAvUpdateUSBCounter(PSUSBCounter pUsbCounter, int ntStatus);
-
-#endif /* __MIB_H__ */
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index d27fa434550d..1e8f64bff03c 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -419,7 +419,7 @@ static u8 vt3226_channel_table1[CB_MAX_CHANNEL_24G][3] = {
///}}RobertYu
//{{RobertYu:20060502, TWIF 1.14, LO Current for 11b mode
-const u32 vt3226d0_lo_current_table[CB_MAX_CHANNEL_24G] = {
+static const u32 vt3226d0_lo_current_table[CB_MAX_CHANNEL_24G] = {
0x0135C600+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
0x0135C600+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
0x0235C600+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
@@ -597,7 +597,7 @@ static u8 vt3342_channel_table1[CB_MAX_CHANNEL][3] = {
*
-*/
-const u32 al2230_power_table[AL2230_PWR_IDX_LEN] = {
+static const u32 al2230_power_table[AL2230_PWR_IDX_LEN] = {
0x04040900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
0x04041900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
0x04042900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
@@ -740,9 +740,6 @@ int RFbSetPower(struct vnt_private *priv, u32 rate, u32 channel)
int ret = true;
u8 power = priv->byCCKPwr;
- if (priv->dwDiagRefCount)
- return true;
-
if (channel == 0)
return -EINVAL;
diff --git a/drivers/staging/vt6656/rndis.h b/drivers/staging/vt6656/rndis.h
index 5cf5e732a36f..3661f82766e0 100644
--- a/drivers/staging/vt6656/rndis.h
+++ b/drivers/staging/vt6656/rndis.h
@@ -79,23 +79,23 @@ typedef struct _CMD_WRITE_MASK
u8 byMask;
} CMD_WRITE_MASK, *PCMD_WRITE_MASK;
-typedef struct _CMD_CARD_INIT
+struct vnt_cmd_card_init
{
- u8 byInitClass;
- u8 bExistSWNetAddr;
- u8 bySWNetAddr[6];
- u8 byShortRetryLimit;
- u8 byLongRetryLimit;
-} CMD_CARD_INIT, *PCMD_CARD_INIT;
-
-typedef struct _RSP_CARD_INIT
+ u8 init_class;
+ u8 exist_sw_net_addr;
+ u8 sw_net_addr[6];
+ u8 short_retry_limit;
+ u8 long_retry_limit;
+};
+
+struct vnt_rsp_card_init
{
- u8 byStatus;
- u8 byNetAddr[6];
- u8 byRFType;
- u8 byMinChannel;
- u8 byMaxChannel;
-} RSP_CARD_INIT, *PRSP_CARD_INIT;
+ u8 status;
+ u8 net_addr[6];
+ u8 rf_type;
+ u8 min_channel;
+ u8 max_channel;
+};
typedef struct _CMD_SET_KEY
{
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 35a3ddb41a6a..51fff896fcb5 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -64,16 +64,16 @@
static int msglevel = MSG_LEVEL_INFO;
-const u16 wTimeStampOff[2][MAX_RATE] = {
+static const u16 wTimeStampOff[2][MAX_RATE] = {
{384, 288, 226, 209, 54, 43, 37, 31, 28, 25, 24, 23}, // Long Preamble
{384, 192, 130, 113, 54, 43, 37, 31, 28, 25, 24, 23}, // Short Preamble
};
-const u16 wFB_Opt0[2][5] = {
+static const u16 wFB_Opt0[2][5] = {
{RATE_12M, RATE_18M, RATE_24M, RATE_36M, RATE_48M}, // fallback_rate0
{RATE_12M, RATE_12M, RATE_18M, RATE_24M, RATE_36M}, // fallback_rate1
};
-const u16 wFB_Opt1[2][5] = {
+static const u16 wFB_Opt1[2][5] = {
{RATE_12M, RATE_18M, RATE_24M, RATE_24M, RATE_36M}, // fallback_rate0
{RATE_6M , RATE_6M, RATE_12M, RATE_12M, RATE_18M}, // fallback_rate1
};
@@ -96,7 +96,7 @@ const u16 wFB_Opt1[2][5] = {
static void s_vSaveTxPktInfo(struct vnt_private *pDevice, u8 byPktNum,
u8 *pbyDestAddr, u16 wPktLength, u16 wFIFOCtl);
-static void *s_vGetFreeContext(struct vnt_private *pDevice);
+static struct vnt_usb_send_context *s_vGetFreeContext(struct vnt_private *);
static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
u8 byPktType, u16 wCurrentRate, struct vnt_tx_buffer *tx_buffer,
@@ -118,8 +118,8 @@ static void s_vSWencryption(struct vnt_private *pDevice,
static unsigned int s_uGetTxRsvTime(struct vnt_private *pDevice, u8 byPktType,
u32 cbFrameLength, u16 wRate, int bNeedAck);
-static u16 s_uGetRTSCTSRsvTime(struct vnt_private *pDevice, u8 byRTSRsvType,
- u8 byPktType, u32 cbFrameLength, u16 wCurrentRate);
+static u16 s_uGetRTSCTSRsvTime(struct vnt_private *priv,
+ u8 rsv_type, u8 pkt_type, u32 frame_lenght, u16 current_rate);
static u16 s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
u8 byPktType, union vnt_tx_data_head *head, u32 cbFrameLength,
@@ -136,48 +136,43 @@ static u16 s_uGetRTSCTSDuration(struct vnt_private *pDevice,
u8 byDurType, u32 cbFrameLength, u8 byPktType, u16 wRate,
int bNeedAck, u8 byFBOption);
-static void *s_vGetFreeContext(struct vnt_private *pDevice)
+static struct vnt_usb_send_context
+ *s_vGetFreeContext(struct vnt_private *priv)
{
- struct vnt_usb_send_context *pContext = NULL;
- struct vnt_usb_send_context *pReturnContext = NULL;
+ struct vnt_usb_send_context *context = NULL;
int ii;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n");
-
- for (ii = 0; ii < pDevice->cbTD; ii++) {
- if (!pDevice->apTD[ii])
- return NULL;
- pContext = pDevice->apTD[ii];
- if (pContext->bBoolInUse == false) {
- pContext->bBoolInUse = true;
- memset(pContext->Data, 0, MAX_TOTAL_SIZE_WITH_ALL_HEADERS);
- pReturnContext = pContext;
- break;
- }
- }
- if ( ii == pDevice->cbTD ) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"No Free Tx Context\n");
- }
- return (void *) pReturnContext;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n");
+
+ for (ii = 0; ii < priv->cbTD; ii++) {
+ if (!priv->apTD[ii])
+ return NULL;
+
+ context = priv->apTD[ii];
+ if (context->bBoolInUse == false) {
+ context->bBoolInUse = true;
+ memset(context->Data, 0,
+ MAX_TOTAL_SIZE_WITH_ALL_HEADERS);
+ return context;
+ }
+ }
+
+ if (ii == priv->cbTD)
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"No Free Tx Context\n");
+
+ return NULL;
}
static void s_vSaveTxPktInfo(struct vnt_private *pDevice, u8 byPktNum,
u8 *pbyDestAddr, u16 wPktLength, u16 wFIFOCtl)
{
- PSStatCounter pStatistic = &pDevice->scStatistic;
-
- if (is_broadcast_ether_addr(pbyDestAddr))
- pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni = TX_PKT_BROAD;
- else if (is_multicast_ether_addr(pbyDestAddr))
- pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni = TX_PKT_MULTI;
- else
- pStatistic->abyTxPktInfo[byPktNum].byBroadMultiUni = TX_PKT_UNI;
-
- pStatistic->abyTxPktInfo[byPktNum].wLength = wPktLength;
- pStatistic->abyTxPktInfo[byPktNum].wFIFOCtl = wFIFOCtl;
- memcpy(pStatistic->abyTxPktInfo[byPktNum].abyDestAddr,
- pbyDestAddr,
- ETH_ALEN);
+ struct net_device_stats *stats = &pDevice->stats;
+ struct vnt_tx_pkt_info *pkt_info = pDevice->pkt_info;
+
+ pkt_info[byPktNum].fifo_ctl = wFIFOCtl;
+ memcpy(pkt_info[byPktNum].dest_addr, pbyDestAddr, ETH_ALEN);
+
+ stats->tx_bytes += wPktLength;
}
static void s_vFillTxKey(struct vnt_private *pDevice,
@@ -278,7 +273,7 @@ static void s_vFillTxKey(struct vnt_private *pDevice,
mic_hdr->tsc_15_0 = cpu_to_be16(pTransmitKey->wTSC15_0);
/* MICHDR1 */
- if (pDevice->bLongHeader)
+ if (ieee80211_has_a4(pMACHeader->frame_control))
mic_hdr->hlen = cpu_to_be16(28);
else
mic_hdr->hlen = cpu_to_be16(22);
@@ -292,7 +287,7 @@ static void s_vFillTxKey(struct vnt_private *pDevice,
& 0xc78f);
mic_hdr->seq_ctrl = cpu_to_le16(pMACHeader->seq_ctrl & 0xf);
- if (pDevice->bLongHeader)
+ if (ieee80211_has_a4(pMACHeader->frame_control))
memcpy(mic_hdr->addr4, pMACHeader->addr4, ETH_ALEN);
}
}
@@ -343,24 +338,25 @@ static u16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
PK_TYPE_11GB 2
PK_TYPE_11GA 3
*/
-static u32 s_uGetTxRsvTime(struct vnt_private *pDevice, u8 byPktType,
- u32 cbFrameLength, u16 wRate, int bNeedAck)
+static u32 s_uGetTxRsvTime(struct vnt_private *priv, u8 pkt_type,
+ u32 frame_length, u16 rate, int need_ack)
{
- u32 uDataTime, uAckTime;
+ u32 data_time, ack_time;
- uDataTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, cbFrameLength, wRate);
- if (byPktType == PK_TYPE_11B) {//llb,CCK mode
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, (u16)pDevice->byTopCCKBasicRate);
- } else {//11g 2.4G OFDM mode & 11a 5G OFDM mode
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, (u16)pDevice->byTopOFDMBasicRate);
- }
+ data_time = BBuGetFrameTime(priv->byPreambleType, pkt_type,
+ frame_length, rate);
- if (bNeedAck) {
- return (uDataTime + pDevice->uSIFS + uAckTime);
- }
- else {
- return uDataTime;
- }
+ if (pkt_type == PK_TYPE_11B)
+ ack_time = BBuGetFrameTime(priv->byPreambleType, pkt_type, 14,
+ (u16)priv->byTopCCKBasicRate);
+ else
+ ack_time = BBuGetFrameTime(priv->byPreambleType, pkt_type, 14,
+ (u16)priv->byTopOFDMBasicRate);
+
+ if (need_ack)
+ return data_time + priv->uSIFS + ack_time;
+
+ return data_time;
}
static u16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
@@ -371,37 +367,47 @@ static u16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
}
//byFreqType: 0=>5GHZ 1=>2.4GHZ
-static u16 s_uGetRTSCTSRsvTime(struct vnt_private *pDevice,
- u8 byRTSRsvType, u8 byPktType, u32 cbFrameLength, u16 wCurrentRate)
+static u16 s_uGetRTSCTSRsvTime(struct vnt_private *priv,
+ u8 rsv_type, u8 pkt_type, u32 frame_lenght, u16 current_rate)
{
- u32 uRrvTime, uRTSTime, uCTSTime, uAckTime, uDataTime;
+ u32 rrv_time, rts_time, cts_time, ack_time, data_time;
+
+ rrv_time = rts_time = cts_time = ack_time = data_time = 0;
+
+ data_time = BBuGetFrameTime(priv->byPreambleType, pkt_type,
+ frame_lenght, current_rate);
+
+ if (rsv_type == 0) {
+ rts_time = BBuGetFrameTime(priv->byPreambleType,
+ pkt_type, 20, priv->byTopCCKBasicRate);
+ cts_time = ack_time = BBuGetFrameTime(priv->byPreambleType,
+ pkt_type, 14, priv->byTopCCKBasicRate);
+ } else if (rsv_type == 1) {
+ rts_time = BBuGetFrameTime(priv->byPreambleType,
+ pkt_type, 20, priv->byTopCCKBasicRate);
+ cts_time = BBuGetFrameTime(priv->byPreambleType, pkt_type,
+ 14, priv->byTopCCKBasicRate);
+ ack_time = BBuGetFrameTime(priv->byPreambleType, pkt_type,
+ 14, priv->byTopOFDMBasicRate);
+ } else if (rsv_type == 2) {
+ rts_time = BBuGetFrameTime(priv->byPreambleType, pkt_type,
+ 20, priv->byTopOFDMBasicRate);
+ cts_time = ack_time = BBuGetFrameTime(priv->byPreambleType,
+ pkt_type, 14, priv->byTopOFDMBasicRate);
+ } else if (rsv_type == 3) {
+ cts_time = BBuGetFrameTime(priv->byPreambleType, pkt_type,
+ 14, priv->byTopCCKBasicRate);
+ ack_time = BBuGetFrameTime(priv->byPreambleType, pkt_type,
+ 14, priv->byTopOFDMBasicRate);
+
+ rrv_time = cts_time + ack_time + data_time + 2 * priv->uSIFS;
+
+ return rrv_time;
+ }
- uRrvTime = uRTSTime = uCTSTime = uAckTime = uDataTime = 0;
+ rrv_time = rts_time + cts_time + ack_time + data_time + 3 * priv->uSIFS;
- uDataTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, cbFrameLength, wCurrentRate);
- if (byRTSRsvType == 0) { //RTSTxRrvTime_bb
- uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopCCKBasicRate);
- uCTSTime = uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- }
- else if (byRTSRsvType == 1){ //RTSTxRrvTime_ba, only in 2.4GHZ
- uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopCCKBasicRate);
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- }
- else if (byRTSRsvType == 2) { //RTSTxRrvTime_aa
- uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopOFDMBasicRate);
- uCTSTime = uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- }
- else if (byRTSRsvType == 3) { //CTSTxRrvTime_ba, only in 2.4GHZ
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- uRrvTime = uCTSTime + uAckTime + uDataTime + 2*pDevice->uSIFS;
- return uRrvTime;
- }
-
- //RTSRrvTime
- uRrvTime = uRTSTime + uCTSTime + uAckTime + uDataTime + 3*pDevice->uSIFS;
- return cpu_to_le16((u16)uRrvTime);
+ return cpu_to_le16((u16)rrv_time);
}
//byFreqType 0: 5GHz, 1:2.4Ghz
@@ -790,7 +796,6 @@ static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
{
struct vnt_tx_fifo_head *pFifoHead = &tx_buffer->fifo_head;
union vnt_tx_data_head *head = NULL;
- u32 cbMACHdLen = WLAN_HDR_ADDR3_LEN; /* 24 */
u16 wFifoCtl;
u8 byFBOption = AUTO_FB_NONE;
@@ -805,9 +810,6 @@ static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
if (!pFifoHead)
return 0;
- if (pDevice->bLongHeader)
- cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
-
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
if (need_rts) {
struct vnt_rrv_time_rts *pBuf =
@@ -978,28 +980,19 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
bSoftWEP = true; /* WEP 256 */
}
- // Get pkt type
- if (ntohs(psEthHeader->h_proto) > ETH_DATA_LEN) {
- if (pDevice->dwDiagRefCount == 0) {
- cb802_1_H_len = 8;
- } else {
- cb802_1_H_len = 2;
- }
- } else {
- cb802_1_H_len = 0;
- }
+ /* Get pkt type */
+ if (ntohs(psEthHeader->h_proto) > ETH_DATA_LEN)
+ cb802_1_H_len = 8;
+ else
+ cb802_1_H_len = 0;
cbFrameBodySize = uSkbPacketLen - ETH_HLEN + cb802_1_H_len;
//Set packet type
pTxBufHead->wFIFOCtl |= (u16)(byPktType<<8);
- if (pDevice->dwDiagRefCount != 0) {
- bNeedACK = false;
- pTxBufHead->wFIFOCtl = pTxBufHead->wFIFOCtl & (~FIFOCTL_NEEDACK);
- } else { //if (pDevice->dwDiagRefCount != 0) {
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
+ (pDevice->eOPMode == OP_MODE_AP)) {
if (is_multicast_ether_addr(psEthHeader->h_dest)) {
bNeedACK = false;
pTxBufHead->wFIFOCtl =
@@ -1008,26 +1001,17 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
bNeedACK = true;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
}
- }
- else {
- // MSDUs in Infra mode always need ACK
- bNeedACK = true;
- pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
- }
- } //if (pDevice->dwDiagRefCount != 0) {
+ } else {
+ /* MSDUs in Infra mode always need ACK */
+ bNeedACK = true;
+ pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
+ }
pTxBufHead->wTimeStamp = DEFAULT_MSDU_LIFETIME_RES_64us;
- //Set FIFOCTL_LHEAD
- if (pDevice->bLongHeader)
- pTxBufHead->wFIFOCtl |= FIFOCTL_LHEAD;
-
//Set FRAGCTL_MACHDCNT
- if (pDevice->bLongHeader) {
- cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
- } else {
- cbMACHdLen = WLAN_HDR_ADDR3_LEN;
- }
+ cbMACHdLen = WLAN_HDR_ADDR3_LEN;
+
pTxBufHead->wFragCtl |= (u16)(cbMACHdLen << 10);
//Set FIFOCTL_GrpAckPolicy
@@ -1183,24 +1167,19 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
}
}
- // 802.1H
- if (ntohs(psEthHeader->h_proto) > ETH_DATA_LEN) {
- if (pDevice->dwDiagRefCount == 0) {
+ /* 802.1H */
+ if (ntohs(psEthHeader->h_proto) > ETH_DATA_LEN) {
if ((psEthHeader->h_proto == cpu_to_be16(ETH_P_IPX)) ||
- (psEthHeader->h_proto == cpu_to_le16(0xF380))) {
+ (psEthHeader->h_proto == cpu_to_le16(0xF380)))
memcpy((u8 *) (pbyPayloadHead),
- abySNAP_Bridgetunnel, 6);
- } else {
- memcpy((u8 *) (pbyPayloadHead), &abySNAP_RFC1042[0], 6);
- }
- pbyType = (u8 *) (pbyPayloadHead + 6);
- memcpy(pbyType, &(psEthHeader->h_proto), sizeof(u16));
- } else {
- memcpy((u8 *) (pbyPayloadHead), &(psEthHeader->h_proto), sizeof(u16));
+ abySNAP_Bridgetunnel, 6);
+ else
+ memcpy((u8 *) (pbyPayloadHead), &abySNAP_RFC1042[0], 6);
- }
+ pbyType = (u8 *) (pbyPayloadHead + 6);
- }
+ memcpy(pbyType, &(psEthHeader->h_proto), sizeof(u16));
+ }
if (pPacket != NULL) {
// Copy the Packet into a tx Buffer
@@ -1352,11 +1331,6 @@ static void s_vGenerateMACHeader(struct vnt_private *pDevice,
pMACHeader->duration_id = cpu_to_le16(wDuration);
- if (pDevice->bLongHeader) {
- PWLAN_80211HDR_A4 pMACA4Header = (PWLAN_80211HDR_A4) pbyBufferAddr;
- pMACHeader->frame_control |= (FC_TODS | FC_FROMDS);
- memcpy(pMACA4Header->abyAddr4, pDevice->abyBSSID, WLAN_ADDR_LEN);
- }
pMACHeader->seq_ctrl = cpu_to_le16(pDevice->wSeqCounter << 4);
//Set FragNumber in Sequence Control
@@ -1409,7 +1383,7 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
u32 cbMacHdLen;
u16 wCurrentRate = RATE_1M;
- pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
+ pContext = s_vGetFreeContext(pDevice);
if (NULL == pContext) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ManagementSend TX...NO CONTEXT!\n");
@@ -1494,7 +1468,6 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
// Notes:
// Although spec says MMPDU can be fragmented; In most case,
// no one will send a MMPDU under fragmentation. With RTS may occur.
- pDevice->bAES = false; //Set FRAGCTL_WEPTYP
if (WLAN_GET_FC_ISWEP(pPacket->p80211Header->sA4.wFrameCtl) != 0) {
if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) {
@@ -1515,7 +1488,6 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
cbIVlen = 8;//RSN Header
cbICVlen = 8;//MIC
pTxBufHead->wFragCtl |= FRAGCTL_AES;
- pDevice->bAES = true;
}
//MAC Header should be padding 0 to DW alignment.
uPadding = 4 - (cbMacHdLen%4);
@@ -1659,20 +1631,17 @@ CMD_STATUS csBeacon_xmit(struct vnt_private *pDevice,
struct vnt_tx_mgmt *pPacket)
{
struct vnt_beacon_buffer *pTX_Buffer;
+ struct vnt_tx_short_buf_head *short_head;
u32 cbFrameSize = pPacket->cbMPDULen + WLAN_FCS_LEN;
u32 cbHeaderSize = 0;
- u16 wTxBufSize = sizeof(STxShortBufHead);
- PSTxShortBufHead pTxBufHead;
struct ieee80211_hdr *pMACHeader;
- struct vnt_tx_datahead_ab *pTxDataHead;
u16 wCurrentRate;
u32 cbFrameBodySize;
u32 cbReqCount;
- u8 *pbyTxBufferAddr;
struct vnt_usb_send_context *pContext;
CMD_STATUS status;
- pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
+ pContext = s_vGetFreeContext(pDevice);
if (NULL == pContext) {
status = CMD_STATUS_RESOURCES;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ManagementSend TX...NO CONTEXT!\n");
@@ -1680,49 +1649,50 @@ CMD_STATUS csBeacon_xmit(struct vnt_private *pDevice,
}
pTX_Buffer = (struct vnt_beacon_buffer *)&pContext->Data[0];
- pbyTxBufferAddr = (u8 *)&(pTX_Buffer->wFIFOCtl);
+ short_head = &pTX_Buffer->short_head;
cbFrameBodySize = pPacket->cbPayloadLen;
- pTxBufHead = (PSTxShortBufHead) pbyTxBufferAddr;
- wTxBufSize = sizeof(STxShortBufHead);
+ cbHeaderSize = sizeof(struct vnt_tx_short_buf_head);
- if (pDevice->byBBType == BB_TYPE_11A) {
- wCurrentRate = RATE_6M;
- pTxDataHead = (struct vnt_tx_datahead_ab *)
- (pbyTxBufferAddr + wTxBufSize);
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameSize, wCurrentRate, PK_TYPE_11A,
- &pTxDataHead->ab);
- //Get Duration and TimeStampOff
- pTxDataHead->wDuration = s_uGetDataDuration(pDevice,
- PK_TYPE_11A, false);
- pTxDataHead->wTimeStampOff = vnt_time_stamp_off(pDevice, wCurrentRate);
- cbHeaderSize = wTxBufSize + sizeof(struct vnt_tx_datahead_ab);
- } else {
- wCurrentRate = RATE_1M;
- pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
- pTxDataHead = (struct vnt_tx_datahead_ab *)
- (pbyTxBufferAddr + wTxBufSize);
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameSize, wCurrentRate, PK_TYPE_11B,
- &pTxDataHead->ab);
- //Get Duration and TimeStampOff
- pTxDataHead->wDuration = s_uGetDataDuration(pDevice,
+ if (pDevice->byBBType == BB_TYPE_11A) {
+ wCurrentRate = RATE_6M;
+
+ /* Get SignalField,ServiceField,Length */
+ BBvCalculateParameter(pDevice, cbFrameSize, wCurrentRate,
+ PK_TYPE_11A, &short_head->ab);
+
+ /* Get Duration and TimeStampOff */
+ short_head->duration = s_uGetDataDuration(pDevice,
+ PK_TYPE_11A, false);
+ short_head->time_stamp_off =
+ vnt_time_stamp_off(pDevice, wCurrentRate);
+ } else {
+ wCurrentRate = RATE_1M;
+ short_head->fifo_ctl |= FIFOCTL_11B;
+
+ /* Get SignalField,ServiceField,Length */
+ BBvCalculateParameter(pDevice, cbFrameSize, wCurrentRate,
+ PK_TYPE_11B, &short_head->ab);
+
+ /* Get Duration and TimeStampOff */
+ short_head->duration = s_uGetDataDuration(pDevice,
PK_TYPE_11B, false);
- pTxDataHead->wTimeStampOff = vnt_time_stamp_off(pDevice, wCurrentRate);
- cbHeaderSize = wTxBufSize + sizeof(struct vnt_tx_datahead_ab);
- }
+ short_head->time_stamp_off =
+ vnt_time_stamp_off(pDevice, wCurrentRate);
+ }
- //Generate Beacon Header
- pMACHeader = (struct ieee80211_hdr *)(pbyTxBufferAddr + cbHeaderSize);
- memcpy(pMACHeader, pPacket->p80211Header, pPacket->cbMPDULen);
- pMACHeader->duration_id = 0;
- pMACHeader->seq_ctrl = cpu_to_le16(pDevice->wSeqCounter << 4);
- pDevice->wSeqCounter++ ;
- if (pDevice->wSeqCounter > 0x0fff)
- pDevice->wSeqCounter = 0;
+ /* Generate Beacon Header */
+ pMACHeader = &pTX_Buffer->hdr;
+
+ memcpy(pMACHeader, pPacket->p80211Header, pPacket->cbMPDULen);
+
+ pMACHeader->duration_id = 0;
+ pMACHeader->seq_ctrl = cpu_to_le16(pDevice->wSeqCounter << 4);
+ pDevice->wSeqCounter++;
+ if (pDevice->wSeqCounter > 0x0fff)
+ pDevice->wSeqCounter = 0;
cbReqCount = cbHeaderSize + WLAN_HDR_ADDR3_LEN + cbFrameBodySize;
@@ -1781,7 +1751,7 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
}
p80211Header = (PUWLAN_80211HDR)skb->data;
- pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
+ pContext = s_vGetFreeContext(pDevice);
if (NULL == pContext) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0 TX...NO CONTEXT!\n");
@@ -1892,7 +1862,6 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
// Notes:
// Although spec says MMPDU can be fragmented; In most case,
// no one will send a MMPDU under fragmentation. With RTS may occur.
- pDevice->bAES = false; //Set FRAGCTL_WEPTYP
if (WLAN_GET_FC_ISWEP(p80211Header->sA4.wFrameCtl) != 0) {
if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) {
@@ -1914,7 +1883,6 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
cbICVlen = 8;//MIC
cbMICHDR = sizeof(struct vnt_mic_hdr);
pTxBufHead->wFragCtl |= FRAGCTL_AES;
- pDevice->bAES = true;
}
//MAC Header should be padding 0 to DW alignment.
uPadding = 4 - (cbMacHdLen%4);
@@ -2204,7 +2172,7 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
}
}
- pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
+ pContext = s_vGetFreeContext(pDevice);
if (pContext == NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG" pContext == NULL\n");
@@ -2529,7 +2497,7 @@ int bRelayPacketSend(struct vnt_private *pDevice, u8 *pbySkbData, u32 uDataLen,
u32 status;
u16 wKeepRate = pDevice->wCurrentRate;
- pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
+ pContext = s_vGetFreeContext(pDevice);
if (NULL == pContext) {
return false;
diff --git a/drivers/staging/vt6656/rxtx.h b/drivers/staging/vt6656/rxtx.h
index eecbe890027e..b3ee6d01aa88 100644
--- a/drivers/staging/vt6656/rxtx.h
+++ b/drivers/staging/vt6656/rxtx.h
@@ -230,12 +230,20 @@ struct vnt_tx_buffer {
union vnt_tx_head tx_head;
} __packed;
+struct vnt_tx_short_buf_head {
+ u16 fifo_ctl;
+ u16 time_stamp;
+ struct vnt_phy_field ab;
+ u16 duration;
+ u16 time_stamp_off;
+} __packed;
+
struct vnt_beacon_buffer {
u8 byType;
u8 byPKTNO;
u16 wTxByteCount;
- u16 wFIFOCtl;
- u16 wTimeStamp;
+ struct vnt_tx_short_buf_head short_head;
+ struct ieee80211_hdr hdr;
} __packed;
void vDMA0_tx_80211(struct vnt_private *, struct sk_buff *skb);
diff --git a/drivers/staging/vt6656/tkip.c b/drivers/staging/vt6656/tkip.c
index 9d643e449ac3..28282f345901 100644
--- a/drivers/staging/vt6656/tkip.c
+++ b/drivers/staging/vt6656/tkip.c
@@ -39,7 +39,7 @@
/* The 2nd table is the same as the 1st but with the upper and lower */
/* bytes swapped. To allow an endian tolerant implementation, the byte */
/* halves have been expressed independently here. */
-const u8 TKIP_Sbox_Lower[256] = {
+static const u8 TKIP_Sbox_Lower[256] = {
0xA5,0x84,0x99,0x8D,0x0D,0xBD,0xB1,0x54,
0x50,0x03,0xA9,0x7D,0x19,0x62,0xE6,0x9A,
0x45,0x9D,0x40,0x87,0x15,0xEB,0xC9,0x0B,
@@ -74,7 +74,7 @@ const u8 TKIP_Sbox_Lower[256] = {
0xC3,0xB0,0x77,0x11,0xCB,0xFC,0xD6,0x3A
};
-const u8 TKIP_Sbox_Upper[256] = {
+static const u8 TKIP_Sbox_Upper[256] = {
0xC6,0xF8,0xEE,0xF6,0xFF,0xD6,0xDE,0x91,
0x60,0x02,0xCE,0x56,0xE7,0xB5,0x4D,0xEC,
0x8F,0x1F,0x89,0xFA,0xEF,0xB2,0x8E,0xFB,
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index 5fc18ad822d3..01cf09999b6d 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -390,8 +390,6 @@ static void s_nsInterruptUsbIoCompleteRead(struct urb *urb)
INTnsProcessData(pDevice);
}
- STAvUpdateUSBCounter(&pDevice->scStatistic.USB_InterruptStat, ntStatus);
-
if (pDevice->fKillEventPollingThread != true) {
usb_fill_bulk_urb(pDevice->pInterruptURB,
pDevice->usb,
@@ -499,8 +497,6 @@ static void s_nsBulkInUsbIoCompleteRead(struct urb *urb)
if (status) {
pDevice->ulBulkInError++;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BULK In failed %d\n", status);
-
- pDevice->scStatistic.RxFcsErrCnt ++;
//todo...xxxxxx
// if (status == USBD_STATUS_CRC) {
// pDevice->ulBulkInContCRCError++;
@@ -514,12 +510,8 @@ static void s_nsBulkInUsbIoCompleteRead(struct urb *urb)
bIndicateReceive = true;
pDevice->ulBulkInContCRCError = 0;
pDevice->ulBulkInBytesRead += bytesRead;
-
- pDevice->scStatistic.RxOkCnt ++;
}
- STAvUpdateUSBCounter(&pDevice->scStatistic.USB_BulkInStat, status);
-
if (bIndicateReceive) {
spin_lock(&pDevice->lock);
if (RXbBulkInProcessData(pDevice, pRCB, bytesRead) == true)
@@ -655,8 +647,6 @@ static void s_nsBulkOutIoCompleteWrite(struct urb *urb)
//
status = urb->status;
- //we should have failed, succeeded, or cancelled, but NOT be pending
- STAvUpdateUSBCounter(&pDevice->scStatistic.USB_BulkOutStat, status);
if(status == STATUS_SUCCESS) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Write %d bytes\n",(int)ulBufLen);
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index 2f8e2a875331..6b9522914634 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -55,8 +55,8 @@
#include "channel.h"
#include "iowpa.h"
-static int msglevel =MSG_LEVEL_INFO;
-//static int msglevel =MSG_LEVEL_DEBUG;
+static int msglevel = MSG_LEVEL_INFO;
+//static int msglevel = MSG_LEVEL_DEBUG;
static void s_vProbeChannel(struct vnt_private *);
@@ -87,38 +87,33 @@ static void vAdHocBeaconStop(struct vnt_private *pDevice)
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
int bStop;
- /*
- * temporarily stop Beacon packet for AdHoc Server
- * if all of the following coditions are met:
- * (1) STA is in AdHoc mode
- * (2) VT3253 is programmed as automatic Beacon Transmitting
- * (3) One of the following conditions is met
- * (3.1) AdHoc channel is in B/G band and the
- * current scan channel is in A band
- * or
- * (3.2) AdHoc channel is in A mode
- */
- bStop = false;
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
- (pMgmt->eCurrState >= WMAC_STATE_STARTED))
- {
- if ((pMgmt->uIBSSChannel <= CB_MAX_CHANNEL_24G) &&
- (pMgmt->uScanChannel > CB_MAX_CHANNEL_24G))
- {
- bStop = true;
- }
- if (pMgmt->uIBSSChannel > CB_MAX_CHANNEL_24G)
- {
- bStop = true;
- }
- }
-
- if (bStop)
- {
- //PMESG(("STOP_BEACON: IBSSChannel = %u, ScanChannel = %u\n",
- // pMgmt->uIBSSChannel, pMgmt->uScanChannel));
- MACvRegBitsOff(pDevice, MAC_REG_TCR, TCR_AUTOBCNTX);
- }
+ /*
+ * temporarily stop Beacon packet for AdHoc Server
+ * if all of the following coditions are met:
+ * (1) STA is in AdHoc mode
+ * (2) VT3253 is programmed as automatic Beacon Transmitting
+ * (3) One of the following conditions is met
+ * (3.1) AdHoc channel is in B/G band and the
+ * current scan channel is in A band
+ * or
+ * (3.2) AdHoc channel is in A mode
+ */
+ bStop = false;
+ if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
+ (pMgmt->eCurrState >= WMAC_STATE_STARTED)) {
+ if ((pMgmt->uIBSSChannel <= CB_MAX_CHANNEL_24G) &&
+ (pMgmt->uScanChannel > CB_MAX_CHANNEL_24G)) {
+ bStop = true;
+ }
+ if (pMgmt->uIBSSChannel > CB_MAX_CHANNEL_24G)
+ bStop = true;
+ }
+
+ if (bStop) {
+ //PMESG(("STOP_BEACON: IBSSChannel = %u, ScanChannel = %u\n",
+ // pMgmt->uIBSSChannel, pMgmt->uScanChannel));
+ MACvRegBitsOff(pDevice, MAC_REG_TCR, TCR_AUTOBCNTX);
+ }
} /* vAdHocBeaconStop */
@@ -145,12 +140,11 @@ static void vAdHocBeaconRestart(struct vnt_private *pDevice)
* (1) STA is in AdHoc mode
* (2) VT3253 is programmed as automatic Beacon Transmitting
*/
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
- (pMgmt->eCurrState >= WMAC_STATE_STARTED))
- {
- //PMESG(("RESTART_BEACON\n"));
- MACvRegBitsOn(pDevice, MAC_REG_TCR, TCR_AUTOBCNTX);
- }
+ if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
+ (pMgmt->eCurrState >= WMAC_STATE_STARTED)) {
+ //PMESG(("RESTART_BEACON\n"));
+ MACvRegBitsOn(pDevice, MAC_REG_TCR, TCR_AUTOBCNTX);
+ }
}
@@ -182,34 +176,33 @@ static void s_vProbeChannel(struct vnt_private *pDevice)
u8 *pbyRate;
int ii;
- if (pDevice->byBBType == BB_TYPE_11A) {
- pbyRate = &abyCurrSuppRatesA[0];
- } else if (pDevice->byBBType == BB_TYPE_11B) {
- pbyRate = &abyCurrSuppRatesB[0];
- } else {
- pbyRate = &abyCurrSuppRatesG[0];
- }
- // build an assocreq frame and send it
- pTxPacket = s_MgrMakeProbeRequest
- (
- pDevice,
- pMgmt,
- pMgmt->abyScanBSSID,
- (PWLAN_IE_SSID)pMgmt->abyScanSSID,
- (PWLAN_IE_SUPP_RATES)pbyRate,
- (PWLAN_IE_SUPP_RATES)abyCurrExtSuppRatesG
- );
-
- if (pTxPacket != NULL ){
- for (ii = 0; ii < 1 ; ii++) {
- if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe request sending fail.. \n");
- }
- else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe request is sending.. \n");
- }
- }
- }
+ if (pDevice->byBBType == BB_TYPE_11A)
+ pbyRate = &abyCurrSuppRatesA[0];
+ else if (pDevice->byBBType == BB_TYPE_11B)
+ pbyRate = &abyCurrSuppRatesB[0];
+ else
+ pbyRate = &abyCurrSuppRatesG[0];
+
+ // build an assocreq frame and send it
+ pTxPacket = s_MgrMakeProbeRequest
+ (
+ pDevice,
+ pMgmt,
+ pMgmt->abyScanBSSID,
+ (PWLAN_IE_SSID)pMgmt->abyScanSSID,
+ (PWLAN_IE_SUPP_RATES)pbyRate,
+ (PWLAN_IE_SUPP_RATES)abyCurrExtSuppRatesG
+ );
+
+ if (pTxPacket != NULL) {
+ for (ii = 0; ii < 1; ii++) {
+ if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe request sending fail..\n");
+ } else {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe request is sending..\n");
+ }
+ }
+ }
}
@@ -224,7 +217,7 @@ static void s_vProbeChannel(struct vnt_private *pDevice)
*
-*/
-struct vnt_tx_mgmt *s_MgrMakeProbeRequest(struct vnt_private *pDevice,
+static struct vnt_tx_mgmt *s_MgrMakeProbeRequest(struct vnt_private *pDevice,
struct vnt_manager *pMgmt, u8 *pScanBSSID, PWLAN_IE_SSID pSSID,
PWLAN_IE_SUPP_RATES pCurrRates, PWLAN_IE_SUPP_RATES pCurrExtSuppRates)
{
@@ -236,37 +229,38 @@ struct vnt_tx_mgmt *s_MgrMakeProbeRequest(struct vnt_private *pDevice,
+ WLAN_PROBEREQ_FR_MAXLEN);
pTxPacket->p80211Header = (PUWLAN_80211HDR)((u8 *)pTxPacket
+ sizeof(struct vnt_tx_mgmt));
- sFrame.pBuf = (u8 *)pTxPacket->p80211Header;
- sFrame.len = WLAN_PROBEREQ_FR_MAXLEN;
- vMgrEncodeProbeRequest(&sFrame);
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_PROBEREQ)
- ));
- memcpy( sFrame.pHdr->sA3.abyAddr1, pScanBSSID, WLAN_ADDR_LEN);
- memcpy( sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy( sFrame.pHdr->sA3.abyAddr3, pScanBSSID, WLAN_BSSID_LEN);
- // Copy the SSID, pSSID->len=0 indicate broadcast SSID
- sFrame.pSSID = (PWLAN_IE_SSID)(sFrame.pBuf + sFrame.len);
- sFrame.len += pSSID->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
- sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += pCurrRates->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSuppRates, pCurrRates, pCurrRates->len + WLAN_IEHDR_LEN);
- // Copy the extension rate set
- if (pDevice->byBBType == BB_TYPE_11G) {
- sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += pCurrExtSuppRates->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pExtSuppRates, pCurrExtSuppRates, pCurrExtSuppRates->len + WLAN_IEHDR_LEN);
- }
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- return pTxPacket;
+ sFrame.pBuf = (u8 *)pTxPacket->p80211Header;
+ sFrame.len = WLAN_PROBEREQ_FR_MAXLEN;
+ vMgrEncodeProbeRequest(&sFrame);
+ sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
+ (
+ WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
+ WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_PROBEREQ)
+ ));
+ memcpy(sFrame.pHdr->sA3.abyAddr1, pScanBSSID, WLAN_ADDR_LEN);
+ memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
+ memcpy(sFrame.pHdr->sA3.abyAddr3, pScanBSSID, WLAN_BSSID_LEN);
+ // Copy the SSID, pSSID->len=0 indicate broadcast SSID
+ sFrame.pSSID = (PWLAN_IE_SSID)(sFrame.pBuf + sFrame.len);
+ sFrame.len += pSSID->len + WLAN_IEHDR_LEN;
+ memcpy(sFrame.pSSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
+ sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
+ sFrame.len += pCurrRates->len + WLAN_IEHDR_LEN;
+ memcpy(sFrame.pSuppRates, pCurrRates, pCurrRates->len + WLAN_IEHDR_LEN);
+ // Copy the extension rate set
+ if (pDevice->byBBType == BB_TYPE_11G) {
+ sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
+ sFrame.len += pCurrExtSuppRates->len + WLAN_IEHDR_LEN;
+ memcpy(sFrame.pExtSuppRates, pCurrExtSuppRates, pCurrExtSuppRates->len + WLAN_IEHDR_LEN);
+ }
+ pTxPacket->cbMPDULen = sFrame.len;
+ pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
+
+ return pTxPacket;
}
-void vCommandTimerWait(struct vnt_private *pDevice, unsigned long MSecond)
+static void
+vCommandTimerWait(struct vnt_private *pDevice, unsigned long MSecond)
{
schedule_delayed_work(&pDevice->run_command_work,
msecs_to_jiffies(MSecond));
@@ -289,661 +283,639 @@ void vRunCommand(struct work_struct *work)
if (pDevice->Flags & fMP_DISCONNECTED)
return;
- if (pDevice->dwDiagRefCount != 0)
- return;
- if (pDevice->bCmdRunning != true)
- return;
+ if (pDevice->bCmdRunning != true)
+ return;
- spin_lock_irq(&pDevice->lock);
+ spin_lock_irq(&pDevice->lock);
- switch ( pDevice->eCommandState ) {
+ switch (pDevice->eCommandState) {
- case WLAN_CMD_SCAN_START:
+ case WLAN_CMD_SCAN_START:
pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff == true) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
-
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyScanSSID;
-
- if (pMgmt->uScanChannel == 0 ) {
- pMgmt->uScanChannel = pDevice->byMinChannel;
- }
- if (pMgmt->uScanChannel > pDevice->byMaxChannel) {
- pDevice->eCommandState = WLAN_CMD_SCAN_END;
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
-
- } else {
- if (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel)) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Invalid channel pMgmt->uScanChannel = %d \n",pMgmt->uScanChannel);
+ if (pDevice->bRadioOff == true) {
+ s_bCommandComplete(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+ return;
+ }
+
+ if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
+ s_bCommandComplete(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+ return;
+ }
+
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyScanSSID;
+
+ if (pMgmt->uScanChannel == 0)
+ pMgmt->uScanChannel = pDevice->byMinChannel;
+ if (pMgmt->uScanChannel > pDevice->byMaxChannel) {
+ pDevice->eCommandState = WLAN_CMD_SCAN_END;
+ s_bCommandComplete(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+ return;
+ } else {
+ if (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel)) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Invalid channel pMgmt->uScanChannel = %d\n", pMgmt->uScanChannel);
+ pMgmt->uScanChannel++;
+ s_bCommandComplete(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+ return;
+ }
+ if (pMgmt->uScanChannel == pDevice->byMinChannel) {
+ // pMgmt->eScanType = WMAC_SCAN_ACTIVE; //mike mark
+ pMgmt->abyScanBSSID[0] = 0xFF;
+ pMgmt->abyScanBSSID[1] = 0xFF;
+ pMgmt->abyScanBSSID[2] = 0xFF;
+ pMgmt->abyScanBSSID[3] = 0xFF;
+ pMgmt->abyScanBSSID[4] = 0xFF;
+ pMgmt->abyScanBSSID[5] = 0xFF;
+ pItemSSID->byElementID = WLAN_EID_SSID;
+ // clear bssid list
+ /* BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass); */
+ pMgmt->eScanState = WMAC_IS_SCANNING;
+ pDevice->byScanBBType = pDevice->byBBType; //lucas
+ pDevice->bStopDataPkt = true;
+ // Turn off RCR_BSSID filter every time
+ MACvRegBitsOff(pDevice, MAC_REG_RCR, RCR_BSSID);
+ pDevice->byRxMode &= ~RCR_BSSID;
+ }
+ //lucas
+ vAdHocBeaconStop(pDevice);
+ if ((pDevice->byBBType != BB_TYPE_11A) &&
+ (pMgmt->uScanChannel > CB_MAX_CHANNEL_24G)) {
+ pDevice->byBBType = BB_TYPE_11A;
+ CARDvSetBSSMode(pDevice);
+ } else if ((pDevice->byBBType == BB_TYPE_11A) &&
+ (pMgmt->uScanChannel <= CB_MAX_CHANNEL_24G)) {
+ pDevice->byBBType = BB_TYPE_11G;
+ CARDvSetBSSMode(pDevice);
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning.... channel: [%d]\n", pMgmt->uScanChannel);
+ // Set channel
+ CARDbSetMediaChannel(pDevice, pMgmt->uScanChannel);
+ // Set Baseband to be more sensitive.
+
+ if (pDevice->bUpdateBBVGA) {
+ BBvSetShortSlotTime(pDevice);
+ BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]);
+ BBvUpdatePreEDThreshold(pDevice, true);
+ }
pMgmt->uScanChannel++;
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- if (pMgmt->uScanChannel == pDevice->byMinChannel) {
- // pMgmt->eScanType = WMAC_SCAN_ACTIVE; //mike mark
- pMgmt->abyScanBSSID[0] = 0xFF;
- pMgmt->abyScanBSSID[1] = 0xFF;
- pMgmt->abyScanBSSID[2] = 0xFF;
- pMgmt->abyScanBSSID[3] = 0xFF;
- pMgmt->abyScanBSSID[4] = 0xFF;
- pMgmt->abyScanBSSID[5] = 0xFF;
- pItemSSID->byElementID = WLAN_EID_SSID;
- // clear bssid list
- /* BSSvClearBSSList((void *) pDevice,
- pDevice->bLinkPass); */
- pMgmt->eScanState = WMAC_IS_SCANNING;
- pDevice->byScanBBType = pDevice->byBBType; //lucas
- pDevice->bStopDataPkt = true;
- // Turn off RCR_BSSID filter every time
- MACvRegBitsOff(pDevice, MAC_REG_RCR, RCR_BSSID);
- pDevice->byRxMode &= ~RCR_BSSID;
-
- }
- //lucas
- vAdHocBeaconStop(pDevice);
- if ((pDevice->byBBType != BB_TYPE_11A) && (pMgmt->uScanChannel > CB_MAX_CHANNEL_24G)) {
- pDevice->byBBType = BB_TYPE_11A;
- CARDvSetBSSMode(pDevice);
- }
- else if ((pDevice->byBBType == BB_TYPE_11A) && (pMgmt->uScanChannel <= CB_MAX_CHANNEL_24G)) {
- pDevice->byBBType = BB_TYPE_11G;
- CARDvSetBSSMode(pDevice);
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning.... channel: [%d]\n", pMgmt->uScanChannel);
- // Set channel
- CARDbSetMediaChannel(pDevice, pMgmt->uScanChannel);
- // Set Baseband to be more sensitive.
-
- if (pDevice->bUpdateBBVGA) {
- BBvSetShortSlotTime(pDevice);
- BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]);
- BBvUpdatePreEDThreshold(pDevice, true);
- }
- pMgmt->uScanChannel++;
-
- while (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel) &&
- pMgmt->uScanChannel <= pDevice->byMaxChannel ){
- pMgmt->uScanChannel++;
- }
-
- if (pMgmt->uScanChannel > pDevice->byMaxChannel) {
- // Set Baseband to be not sensitive and rescan
- pDevice->eCommandState = WLAN_CMD_SCAN_END;
-
- }
- if ((pMgmt->b11hEnable == false) ||
- (pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) {
- s_vProbeChannel(pDevice);
- spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((void *) pDevice, 100);
- return;
- } else {
- spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((void *) pDevice, WCMD_PASSIVE_SCAN_TIME);
- return;
- }
-
- }
-
- break;
-
- case WLAN_CMD_SCAN_END:
-
- // Set Baseband's sensitivity back.
- if (pDevice->byBBType != pDevice->byScanBBType) {
- pDevice->byBBType = pDevice->byScanBBType;
- CARDvSetBSSMode(pDevice);
- }
-
- if (pDevice->bUpdateBBVGA) {
- BBvSetShortSlotTime(pDevice);
- BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
- BBvUpdatePreEDThreshold(pDevice, false);
- }
-
- // Set channel back
- vAdHocBeaconRestart(pDevice);
- // Set channel back
- CARDbSetMediaChannel(pDevice, pMgmt->uCurrChannel);
- // Set Filter
- if (pMgmt->bCurrBSSIDFilterOn) {
- MACvRegBitsOn(pDevice, MAC_REG_RCR, RCR_BSSID);
- pDevice->byRxMode |= RCR_BSSID;
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel);
+
+ while (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel) &&
+ pMgmt->uScanChannel <= pDevice->byMaxChannel){
+ pMgmt->uScanChannel++;
+ }
+
+ if (pMgmt->uScanChannel > pDevice->byMaxChannel) {
+ // Set Baseband to be not sensitive and rescan
+ pDevice->eCommandState = WLAN_CMD_SCAN_END;
+ }
+ if ((pMgmt->b11hEnable == false) ||
+ (pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) {
+ s_vProbeChannel(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+ vCommandTimerWait((void *) pDevice, 100);
+ return;
+ } else {
+ spin_unlock_irq(&pDevice->lock);
+ vCommandTimerWait((void *) pDevice, WCMD_PASSIVE_SCAN_TIME);
+ return;
+ }
+ }
+
+ break;
+
+ case WLAN_CMD_SCAN_END:
+
+ // Set Baseband's sensitivity back.
+ if (pDevice->byBBType != pDevice->byScanBBType) {
+ pDevice->byBBType = pDevice->byScanBBType;
+ CARDvSetBSSMode(pDevice);
+ }
+
+ if (pDevice->bUpdateBBVGA) {
+ BBvSetShortSlotTime(pDevice);
+ BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
+ BBvUpdatePreEDThreshold(pDevice, false);
+ }
+
+ // Set channel back
+ vAdHocBeaconRestart(pDevice);
+ // Set channel back
+ CARDbSetMediaChannel(pDevice, pMgmt->uCurrChannel);
+ // Set Filter
+ if (pMgmt->bCurrBSSIDFilterOn) {
+ MACvRegBitsOn(pDevice, MAC_REG_RCR, RCR_BSSID);
+ pDevice->byRxMode |= RCR_BSSID;
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel);
pMgmt->uScanChannel = 0;
- pMgmt->eScanState = WMAC_NO_SCANNING;
- pDevice->bStopDataPkt = false;
+ pMgmt->eScanState = WMAC_NO_SCANNING;
+ pDevice->bStopDataPkt = false;
/*send scan event to wpa_Supplicant*/
PRINT_K("wireless_send_event--->SIOCGIWSCAN(scan done)\n");
memset(&wrqu, 0, sizeof(wrqu));
wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL);
- s_bCommandComplete(pDevice);
- break;
+ s_bCommandComplete(pDevice);
+ break;
- case WLAN_CMD_DISASSOCIATE_START :
+ case WLAN_CMD_DISASSOCIATE_START:
pDevice->byReAssocCount = 0;
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pMgmt->eCurrState != WMAC_STATE_ASSOC)) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- } else {
-
- pDevice->bwextstep0 = false;
- pDevice->bwextstep1 = false;
- pDevice->bwextstep2 = false;
- pDevice->bwextstep3 = false;
- pDevice->bWPASuppWextEnabled = false;
- pDevice->fWPA_Authened = false;
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send Disassociation Packet..\n");
- // reason = 8 : disassoc because sta has left
- vMgrDisassocBeginSta((void *) pDevice,
- pMgmt,
- pMgmt->abyCurrBSSID,
- (8),
- &Status);
- pDevice->bLinkPass = false;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
- // unlock command busy
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- pItemSSID->len = 0;
- memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pMgmt->sNodeDBTable[0].bActive = false;
-// pDevice->bBeaconBufReady = false;
- }
- netif_stop_queue(pDevice->dev);
- if (pDevice->bNeedRadioOFF == true)
- CARDbRadioPowerOff(pDevice);
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_SSID_START:
+ if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
+ (pMgmt->eCurrState != WMAC_STATE_ASSOC)) {
+ s_bCommandComplete(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+ return;
+ } else {
+ pDevice->bwextstep0 = false;
+ pDevice->bwextstep1 = false;
+ pDevice->bwextstep2 = false;
+ pDevice->bwextstep3 = false;
+ pDevice->bWPASuppWextEnabled = false;
+ pDevice->fWPA_Authened = false;
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send Disassociation Packet..\n");
+ // reason = 8 : disassoc because sta has left
+ vMgrDisassocBeginSta((void *) pDevice,
+ pMgmt,
+ pMgmt->abyCurrBSSID,
+ (8),
+ &Status);
+ pDevice->bLinkPass = false;
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
+ // unlock command busy
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
+ pItemSSID->len = 0;
+ memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ pMgmt->sNodeDBTable[0].bActive = false;
+// pDevice->bBeaconBufReady = false;
+ }
+ netif_stop_queue(pDevice->dev);
+ if (pDevice->bNeedRadioOFF == true)
+ CARDbRadioPowerOff(pDevice);
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_SSID_START:
pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff == true) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
-
- memcpy(pMgmt->abyAdHocSSID,pMgmt->abyDesireSSID,
- ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len + WLAN_IEHDR_LEN);
-
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pItemSSIDCurr = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: desire ssid = %s\n", pItemSSID->abySSID);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: curr ssid = %s\n", pItemSSIDCurr->abySSID);
-
- if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Cmd pMgmt->eCurrState == WMAC_STATE_ASSOC\n");
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSID->len =%d\n",pItemSSID->len);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSIDCurr->len = %d\n",pItemSSIDCurr->len);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" desire ssid = %s\n", pItemSSID->abySSID);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" curr ssid = %s\n", pItemSSIDCurr->abySSID);
- }
-
- if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
- ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)&& (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
-
- if (pItemSSID->len == pItemSSIDCurr->len) {
- if (memcmp(pItemSSID->abySSID, pItemSSIDCurr->abySSID, pItemSSID->len) == 0) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- }
- netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = false;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
- }
- // set initial state
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- PSvDisablePowerSaving((void *) pDevice);
- BSSvClearNodeDBTable(pDevice, 0);
- vMgrJoinBSSBegin((void *) pDevice, &Status);
- // if Infra mode
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED)) {
- // Call mgr to begin the deauthentication
- // reason = (3) because sta has left ESS
- if (pMgmt->eCurrState >= WMAC_STATE_AUTH) {
- vMgrDeAuthenBeginSta((void *)pDevice,
- pMgmt,
- pMgmt->abyCurrBSSID,
- (3),
- &Status);
- }
- // Call mgr to begin the authentication
- vMgrAuthenBeginSta((void *) pDevice, pMgmt, &Status);
- if (Status == CMD_STATUS_SUCCESS) {
- pDevice->byLinkWaitCount = 0;
- pDevice->eCommandState = WLAN_AUTHENTICATE_WAIT;
- vCommandTimerWait((void *) pDevice, AUTHENTICATE_TIMEOUT);
- spin_unlock_irq(&pDevice->lock);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Set eCommandState = WLAN_AUTHENTICATE_WAIT\n");
- return;
- }
- }
- // if Adhoc mode
- else if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
- if (netif_queue_stopped(pDevice->dev)){
- netif_wake_queue(pDevice->dev);
- }
- pDevice->bLinkPass = true;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
- pMgmt->sNodeDBTable[0].bActive = true;
- pMgmt->sNodeDBTable[0].uInActiveCount = 0;
- }
- else {
- // start own IBSS
- DBG_PRT(MSG_LEVEL_DEBUG,
- KERN_INFO "CreateOwn IBSS by CurrMode = IBSS_STA\n");
- vMgrCreateOwnIBSS((void *) pDevice, &Status);
- if (Status != CMD_STATUS_SUCCESS){
- DBG_PRT(MSG_LEVEL_DEBUG,
- KERN_INFO "WLAN_CMD_IBSS_CREATE fail!\n");
- }
- BSSvAddMulticastNode(pDevice);
- }
- s_bClearBSSID_SCAN(pDevice);
- }
- // if SSID not found
- else if (pMgmt->eCurrMode == WMAC_MODE_STANDBY) {
- if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA ||
- pMgmt->eConfigMode == WMAC_CONFIG_AUTO) {
- // start own IBSS
- DBG_PRT(MSG_LEVEL_DEBUG,
- KERN_INFO "CreateOwn IBSS by CurrMode = STANDBY\n");
- vMgrCreateOwnIBSS((void *) pDevice, &Status);
- if (Status != CMD_STATUS_SUCCESS){
- DBG_PRT(MSG_LEVEL_DEBUG,
- KERN_INFO "WLAN_CMD_IBSS_CREATE fail!\n");
- }
- BSSvAddMulticastNode(pDevice);
- s_bClearBSSID_SCAN(pDevice);
+ if (pDevice->bRadioOff == true) {
+ s_bCommandComplete(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+ return;
+ }
+
+ memcpy(pMgmt->abyAdHocSSID, pMgmt->abyDesireSSID,
+ ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len + WLAN_IEHDR_LEN);
+
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
+ pItemSSIDCurr = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: desire ssid = %s\n", pItemSSID->abySSID);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: curr ssid = %s\n", pItemSSIDCurr->abySSID);
+
+ if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Cmd pMgmt->eCurrState == WMAC_STATE_ASSOC\n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSID->len =%d\n", pItemSSID->len);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSIDCurr->len = %d\n", pItemSSIDCurr->len);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" desire ssid = %s\n", pItemSSID->abySSID);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" curr ssid = %s\n", pItemSSIDCurr->abySSID);
+ }
+
+ if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
+ ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
+ if (pItemSSID->len == pItemSSIDCurr->len) {
+ if (memcmp(pItemSSID->abySSID, pItemSSIDCurr->abySSID, pItemSSID->len) == 0) {
+ s_bCommandComplete(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+ return;
+ }
+ }
+ netif_stop_queue(pDevice->dev);
+ pDevice->bLinkPass = false;
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
+ }
+ // set initial state
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ pMgmt->eCurrMode = WMAC_MODE_STANDBY;
+ PSvDisablePowerSaving((void *) pDevice);
+ BSSvClearNodeDBTable(pDevice, 0);
+ vMgrJoinBSSBegin((void *) pDevice, &Status);
+ // if Infra mode
+ if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED)) {
+ // Call mgr to begin the deauthentication
+ // reason = (3) because sta has left ESS
+ if (pMgmt->eCurrState >= WMAC_STATE_AUTH) {
+ vMgrDeAuthenBeginSta((void *)pDevice,
+ pMgmt,
+ pMgmt->abyCurrBSSID,
+ (3),
+ &Status);
+ }
+ // Call mgr to begin the authentication
+ vMgrAuthenBeginSta((void *) pDevice, pMgmt, &Status);
+ if (Status == CMD_STATUS_SUCCESS) {
+ pDevice->byLinkWaitCount = 0;
+ pDevice->eCommandState = WLAN_AUTHENTICATE_WAIT;
+ vCommandTimerWait((void *) pDevice, AUTHENTICATE_TIMEOUT);
+ spin_unlock_irq(&pDevice->lock);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Set eCommandState = WLAN_AUTHENTICATE_WAIT\n");
+ return;
+ }
+ }
+ // if Adhoc mode
+ else if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
+ if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
+ if (netif_queue_stopped(pDevice->dev))
+ netif_wake_queue(pDevice->dev);
+ pDevice->bLinkPass = true;
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_INTER);
+ pMgmt->sNodeDBTable[0].bActive = true;
+ pMgmt->sNodeDBTable[0].uInActiveCount = 0;
+ } else {
+ // start own IBSS
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "CreateOwn IBSS by CurrMode = IBSS_STA\n");
+ vMgrCreateOwnIBSS((void *) pDevice, &Status);
+ if (Status != CMD_STATUS_SUCCESS) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "WLAN_CMD_IBSS_CREATE fail!\n");
+ }
+ BSSvAddMulticastNode(pDevice);
+ }
+ s_bClearBSSID_SCAN(pDevice);
+ }
+ // if SSID not found
+ else if (pMgmt->eCurrMode == WMAC_MODE_STANDBY) {
+ if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA ||
+ pMgmt->eConfigMode == WMAC_CONFIG_AUTO) {
+ // start own IBSS
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "CreateOwn IBSS by CurrMode = STANDBY\n");
+ vMgrCreateOwnIBSS((void *) pDevice, &Status);
+ if (Status != CMD_STATUS_SUCCESS) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "WLAN_CMD_IBSS_CREATE fail!\n");
+ }
+ BSSvAddMulticastNode(pDevice);
+ s_bClearBSSID_SCAN(pDevice);
/*
- pDevice->bLinkPass = true;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
- if (netif_queue_stopped(pDevice->dev)){
- netif_wake_queue(pDevice->dev);
- }
- s_bClearBSSID_SCAN(pDevice);
+ pDevice->bLinkPass = true;
+ ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
+ if (netif_queue_stopped(pDevice->dev)){
+ netif_wake_queue(pDevice->dev);
+ }
+ s_bClearBSSID_SCAN(pDevice);
*/
- }
- else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disconnect SSID none\n");
- // if(pDevice->bWPASuppWextEnabled == true)
- {
- union iwreq_data wrqu;
- memset(&wrqu, 0, sizeof (wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated:vMgrJoinBSSBegin Fail !!)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
- }
- }
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_AUTHENTICATE_WAIT :
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_AUTHENTICATE_WAIT\n");
- if (pMgmt->eCurrState == WMAC_STATE_AUTH) {
+ } else {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disconnect SSID none\n");
+ // if(pDevice->bWPASuppWextEnabled == true)
+ {
+ union iwreq_data wrqu;
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated:vMgrJoinBSSBegin Fail !!)\n");
+ wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
+ }
+ }
+ }
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_AUTHENTICATE_WAIT:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_AUTHENTICATE_WAIT\n");
+ if (pMgmt->eCurrState == WMAC_STATE_AUTH) {
+ pDevice->byLinkWaitCount = 0;
+ // Call mgr to begin the association
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_AUTH\n");
+ vMgrAssocBeginSta((void *) pDevice, pMgmt, &Status);
+ if (Status == CMD_STATUS_SUCCESS) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState = WLAN_ASSOCIATE_WAIT\n");
+ pDevice->byLinkWaitCount = 0;
+ pDevice->eCommandState = WLAN_ASSOCIATE_WAIT;
+ vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT);
+ spin_unlock_irq(&pDevice->lock);
+ return;
+ }
+ } else if (pMgmt->eCurrState < WMAC_STATE_AUTHPENDING) {
+ printk("WLAN_AUTHENTICATE_WAIT:Authen Fail???\n");
+ } else if (pDevice->byLinkWaitCount <= 4) {
+ //mike add:wait another 2 sec if authenticated_frame delay!
+ pDevice->byLinkWaitCount++;
+ printk("WLAN_AUTHENTICATE_WAIT:wait %d times!!\n", pDevice->byLinkWaitCount);
+ spin_unlock_irq(&pDevice->lock);
+ vCommandTimerWait((void *) pDevice, AUTHENTICATE_TIMEOUT/2);
+ return;
+ }
pDevice->byLinkWaitCount = 0;
- // Call mgr to begin the association
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_AUTH\n");
- vMgrAssocBeginSta((void *) pDevice, pMgmt, &Status);
- if (Status == CMD_STATUS_SUCCESS) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState = WLAN_ASSOCIATE_WAIT\n");
- pDevice->byLinkWaitCount = 0;
- pDevice->eCommandState = WLAN_ASSOCIATE_WAIT;
- vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- }
- else if(pMgmt->eCurrState < WMAC_STATE_AUTHPENDING) {
- printk("WLAN_AUTHENTICATE_WAIT:Authen Fail???\n");
- }
- else if(pDevice->byLinkWaitCount <= 4){ //mike add:wait another 2 sec if authenticated_frame delay!
- pDevice->byLinkWaitCount ++;
- printk("WLAN_AUTHENTICATE_WAIT:wait %d times!!\n",pDevice->byLinkWaitCount);
- spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((void *) pDevice, AUTHENTICATE_TIMEOUT/2);
- return;
- }
- pDevice->byLinkWaitCount = 0;
-
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_ASSOCIATE_WAIT :
- if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_ASSOC\n");
- if (pDevice->ePSMode != WMAC_POWER_CAM) {
- PSvEnablePowerSaving((void *) pDevice,
- pMgmt->wListenInterval);
- }
+
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_ASSOCIATE_WAIT:
+ if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_ASSOC\n");
+ if (pDevice->ePSMode != WMAC_POWER_CAM) {
+ PSvEnablePowerSaving((void *) pDevice,
+ pMgmt->wListenInterval);
+ }
/*
- if (pMgmt->eAuthenMode >= WMAC_AUTH_WPA) {
- KeybRemoveAllKey(pDevice, &(pDevice->sKey), pDevice->abyBSSID);
- }
+ if (pMgmt->eAuthenMode >= WMAC_AUTH_WPA) {
+ KeybRemoveAllKey(pDevice, &(pDevice->sKey), pDevice->abyBSSID);
+ }
*/
- pDevice->byLinkWaitCount = 0;
- pDevice->byReAssocCount = 0;
- pDevice->bLinkPass = true;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
- s_bClearBSSID_SCAN(pDevice);
-
- if (netif_queue_stopped(pDevice->dev)){
- netif_wake_queue(pDevice->dev);
- }
-
- }
- else if(pMgmt->eCurrState < WMAC_STATE_ASSOCPENDING) {
- printk("WLAN_ASSOCIATE_WAIT:Association Fail???\n");
- }
- else if(pDevice->byLinkWaitCount <= 4){ //mike add:wait another 2 sec if associated_frame delay!
- pDevice->byLinkWaitCount ++;
- printk("WLAN_ASSOCIATE_WAIT:wait %d times!!\n",pDevice->byLinkWaitCount);
- spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT/2);
- return;
- }
-
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_AP_MODE_START :
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_AP_MODE_START\n");
-
- if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- cancel_delayed_work_sync(&pDevice->second_callback_work);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- pDevice->bLinkPass = false;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
- if (pDevice->bEnableHostWEP == true)
- BSSvClearNodeDBTable(pDevice, 1);
- else
- BSSvClearNodeDBTable(pDevice, 0);
- pDevice->uAssocCount = 0;
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pDevice->bFixRate = false;
-
- vMgrCreateOwnIBSS((void *) pDevice, &Status);
- if (Status != CMD_STATUS_SUCCESS) {
- DBG_PRT(MSG_LEVEL_DEBUG,
- KERN_INFO "vMgrCreateOwnIBSS fail!\n");
- }
- // always turn off unicast bit
- MACvRegBitsOff(pDevice, MAC_REG_RCR, RCR_UNICAST);
- pDevice->byRxMode &= ~RCR_UNICAST;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wcmd: rx_mode = %x\n", pDevice->byRxMode );
- BSSvAddMulticastNode(pDevice);
- if (netif_queue_stopped(pDevice->dev)){
- netif_wake_queue(pDevice->dev);
- }
- pDevice->bLinkPass = true;
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
- schedule_delayed_work(&pDevice->second_callback_work, HZ);
- }
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_TX_PSPACKET_START :
- // DTIM Multicast tx
- if (pMgmt->sNodeDBTable[0].bRxPSPoll) {
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[0].sTxPSQueue)) != NULL) {
- if (skb_queue_empty(&pMgmt->sNodeDBTable[0].sTxPSQueue)) {
- pMgmt->abyPSTxMap[0] &= ~byMask[0];
- pDevice->bMoreData = false;
- }
- else {
- pDevice->bMoreData = true;
- }
-
- if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) != 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Multicast ps tx fail \n");
- }
-
- pMgmt->sNodeDBTable[0].wEnQueueCnt--;
- }
- }
-
- // PS nodes tx
- for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
- if (pMgmt->sNodeDBTable[ii].bActive &&
- pMgmt->sNodeDBTable[ii].bRxPSPoll) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d Enqueu Cnt= %d\n",
- ii, pMgmt->sNodeDBTable[ii].wEnQueueCnt);
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL) {
- if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) {
- // clear tx map
- pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
- ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
- pDevice->bMoreData = false;
- }
- else {
- pDevice->bMoreData = true;
- }
-
- if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) != 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "sta ps tx fail \n");
- }
-
- pMgmt->sNodeDBTable[ii].wEnQueueCnt--;
- // check if sta ps enable, wait next pspoll
- // if sta ps disable, send all pending buffers.
- if (pMgmt->sNodeDBTable[ii].bPSEnable)
- break;
- }
- if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) {
- // clear tx map
- pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
- ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d PS queue clear \n", ii);
- }
- pMgmt->sNodeDBTable[ii].bRxPSPoll = false;
- }
- }
-
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_RADIO_START:
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_RADIO_START\n");
- // if (pDevice->bRadioCmd == true)
- // CARDbRadioPowerOn(pDevice);
- // else
- // CARDbRadioPowerOff(pDevice);
-
- {
- int ntStatus = STATUS_SUCCESS;
- u8 byTmp;
-
- ntStatus = CONTROLnsRequestIn(pDevice,
- MESSAGE_TYPE_READ,
- MAC_REG_GPIOCTL1,
- MESSAGE_REQUEST_MACREG,
- 1,
- &byTmp);
-
- if ( ntStatus != STATUS_SUCCESS ) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- if ( (byTmp & GPIO3_DATA) == 0 ) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" WLAN_CMD_RADIO_START_OFF........................\n");
- // Old commands are useless.
- // empty command Q
- pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
- pDevice->uCmdDequeueIdx = 0;
- pDevice->uCmdEnqueueIdx = 0;
- //0415pDevice->bCmdRunning = false;
- pDevice->bCmdClear = true;
- pDevice->bStopTx0Pkt = false;
- pDevice->bStopDataPkt = true;
-
- pDevice->byKeyIndex = 0;
- pDevice->bTransmitKey = false;
- spin_unlock_irq(&pDevice->lock);
- KeyvInitTable(pDevice,&pDevice->sKey);
- spin_lock_irq(&pDevice->lock);
- pMgmt->byCSSPK = KEY_CTL_NONE;
- pMgmt->byCSSGK = KEY_CTL_NONE;
-
- if (pDevice->bLinkPass == true) {
- // reason = 8 : disassoc because sta has left
- vMgrDisassocBeginSta((void *) pDevice,
- pMgmt,
- pMgmt->abyCurrBSSID,
- (8),
- &Status);
- pDevice->bLinkPass = false;
- // unlock command busy
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pMgmt->sNodeDBTable[0].bActive = false;
- // if(pDevice->bWPASuppWextEnabled == true)
- {
- union iwreq_data wrqu;
- memset(&wrqu, 0, sizeof (wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
- }
- pDevice->bwextstep0 = false;
- pDevice->bwextstep1 = false;
- pDevice->bwextstep2 = false;
- pDevice->bwextstep3 = false;
- pDevice->bWPASuppWextEnabled = false;
- //clear current SSID
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- pItemSSID->len = 0;
- memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
- //clear desired SSID
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pItemSSID->len = 0;
- memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
-
- netif_stop_queue(pDevice->dev);
- CARDbRadioPowerOff(pDevice);
- MACvRegBitsOn(pDevice,MAC_REG_GPIOCTL1,GPIO3_INTMD);
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_OFF);
- pDevice->bHWRadioOff = true;
- } else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" WLAN_CMD_RADIO_START_ON........................\n");
- pDevice->bHWRadioOff = false;
- CARDbRadioPowerOn(pDevice);
- MACvRegBitsOff(pDevice,MAC_REG_GPIOCTL1,GPIO3_INTMD);
- ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_ON);
- }
- }
-
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_CHANGE_BBSENSITIVITY_START:
-
- pDevice->bStopDataPkt = true;
- pDevice->byBBVGACurrent = pDevice->byBBVGANew;
- BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Change sensitivity pDevice->byBBVGACurrent = %x\n", pDevice->byBBVGACurrent);
- pDevice->bStopDataPkt = false;
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_TBTT_WAKEUP_START:
- PSbIsNextTBTTWakeUp(pDevice);
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_BECON_SEND_START:
- bMgrPrepareBeaconToSend(pDevice, pMgmt);
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_SETPOWER_START:
-
- RFbSetPower(pDevice, pDevice->wCurrentRate, pMgmt->uCurrChannel);
-
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_CHANGE_ANTENNA_START:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Change from Antenna%d to", (int)pDevice->dwRxAntennaSel);
- if ( pDevice->dwRxAntennaSel == 0) {
- pDevice->dwRxAntennaSel=1;
- if (pDevice->bTxRxAntInv == true)
- BBvSetAntennaMode(pDevice, ANT_RXA);
- else
- BBvSetAntennaMode(pDevice, ANT_RXB);
- } else {
- pDevice->dwRxAntennaSel=0;
- if (pDevice->bTxRxAntInv == true)
- BBvSetAntennaMode(pDevice, ANT_RXB);
- else
- BBvSetAntennaMode(pDevice, ANT_RXA);
- }
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_REMOVE_ALLKEY_START:
- KeybRemoveAllKey(pDevice, &(pDevice->sKey), pDevice->abyBSSID);
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_MAC_DISPOWERSAVING_START:
- ControlvReadByte (pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PSCTL, &byData);
- if ( (byData & PSCTL_PS) != 0 ) {
- // disable power saving hw function
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_DISABLE_PS,
- 0,
- 0,
- 0,
- NULL
- );
- }
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_11H_CHSW_START:
- CARDbSetMediaChannel(pDevice, pDevice->byNewChannel);
- pDevice->bChannelSwitch = false;
- pMgmt->uCurrChannel = pDevice->byNewChannel;
- pDevice->bStopDataPkt = false;
- s_bCommandComplete(pDevice);
- break;
-
- default:
- s_bCommandComplete(pDevice);
- break;
- } //switch
-
- spin_unlock_irq(&pDevice->lock);
- return;
+ pDevice->byLinkWaitCount = 0;
+ pDevice->byReAssocCount = 0;
+ pDevice->bLinkPass = true;
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_INTER);
+ s_bClearBSSID_SCAN(pDevice);
+
+ if (netif_queue_stopped(pDevice->dev))
+ netif_wake_queue(pDevice->dev);
+
+ } else if (pMgmt->eCurrState < WMAC_STATE_ASSOCPENDING) {
+ printk("WLAN_ASSOCIATE_WAIT:Association Fail???\n");
+ } else if (pDevice->byLinkWaitCount <= 4) {
+ //mike add:wait another 2 sec if associated_frame delay!
+ pDevice->byLinkWaitCount++;
+ printk("WLAN_ASSOCIATE_WAIT:wait %d times!!\n", pDevice->byLinkWaitCount);
+ spin_unlock_irq(&pDevice->lock);
+ vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT/2);
+ return;
+ }
+
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_AP_MODE_START:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_AP_MODE_START\n");
+
+ if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
+ cancel_delayed_work_sync(&pDevice->second_callback_work);
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ pMgmt->eCurrMode = WMAC_MODE_STANDBY;
+ pDevice->bLinkPass = false;
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
+ if (pDevice->bEnableHostWEP == true)
+ BSSvClearNodeDBTable(pDevice, 1);
+ else
+ BSSvClearNodeDBTable(pDevice, 0);
+ pDevice->uAssocCount = 0;
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ pDevice->bFixRate = false;
+
+ vMgrCreateOwnIBSS((void *) pDevice, &Status);
+ if (Status != CMD_STATUS_SUCCESS) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "vMgrCreateOwnIBSS fail!\n");
+ }
+ // always turn off unicast bit
+ MACvRegBitsOff(pDevice, MAC_REG_RCR, RCR_UNICAST);
+ pDevice->byRxMode &= ~RCR_UNICAST;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wcmd: rx_mode = %x\n", pDevice->byRxMode);
+ BSSvAddMulticastNode(pDevice);
+ if (netif_queue_stopped(pDevice->dev))
+ netif_wake_queue(pDevice->dev);
+ pDevice->bLinkPass = true;
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_INTER);
+ schedule_delayed_work(&pDevice->second_callback_work, HZ);
+ }
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_TX_PSPACKET_START:
+ // DTIM Multicast tx
+ if (pMgmt->sNodeDBTable[0].bRxPSPoll) {
+ while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[0].sTxPSQueue)) != NULL) {
+ if (skb_queue_empty(&pMgmt->sNodeDBTable[0].sTxPSQueue)) {
+ pMgmt->abyPSTxMap[0] &= ~byMask[0];
+ pDevice->bMoreData = false;
+ } else {
+ pDevice->bMoreData = true;
+ }
+
+ if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) != 0)
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Multicast ps tx fail\n");
+
+ pMgmt->sNodeDBTable[0].wEnQueueCnt--;
+ }
+ }
+
+ // PS nodes tx
+ for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
+ if (pMgmt->sNodeDBTable[ii].bActive &&
+ pMgmt->sNodeDBTable[ii].bRxPSPoll) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d Enqueu Cnt= %d\n",
+ ii, pMgmt->sNodeDBTable[ii].wEnQueueCnt);
+ while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL) {
+ if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) {
+ // clear tx map
+ pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
+ ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
+ pDevice->bMoreData = false;
+ } else {
+ pDevice->bMoreData = true;
+ }
+
+ if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) != 0)
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "sta ps tx fail\n");
+
+ pMgmt->sNodeDBTable[ii].wEnQueueCnt--;
+ // check if sta ps enable, wait next pspoll
+ // if sta ps disable, send all pending buffers.
+ if (pMgmt->sNodeDBTable[ii].bPSEnable)
+ break;
+ }
+ if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) {
+ // clear tx map
+ pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
+ ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d PS queue clear\n", ii);
+ }
+ pMgmt->sNodeDBTable[ii].bRxPSPoll = false;
+ }
+ }
+
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_RADIO_START:
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_RADIO_START\n");
+// if (pDevice->bRadioCmd == true)
+// CARDbRadioPowerOn(pDevice);
+// else
+// CARDbRadioPowerOff(pDevice);
+ {
+ int ntStatus = STATUS_SUCCESS;
+ u8 byTmp;
+
+ ntStatus = CONTROLnsRequestIn(pDevice,
+ MESSAGE_TYPE_READ,
+ MAC_REG_GPIOCTL1,
+ MESSAGE_REQUEST_MACREG,
+ 1,
+ &byTmp);
+
+ if (ntStatus != STATUS_SUCCESS) {
+ s_bCommandComplete(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+ return;
+ }
+ if ((byTmp & GPIO3_DATA) == 0) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" WLAN_CMD_RADIO_START_OFF........................\n");
+ // Old commands are useless.
+ // empty command Q
+ pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
+ pDevice->uCmdDequeueIdx = 0;
+ pDevice->uCmdEnqueueIdx = 0;
+ //0415pDevice->bCmdRunning = false;
+ pDevice->bCmdClear = true;
+ pDevice->bStopTx0Pkt = false;
+ pDevice->bStopDataPkt = true;
+
+ pDevice->byKeyIndex = 0;
+ pDevice->bTransmitKey = false;
+ spin_unlock_irq(&pDevice->lock);
+ KeyvInitTable(pDevice, &pDevice->sKey);
+ spin_lock_irq(&pDevice->lock);
+ pMgmt->byCSSPK = KEY_CTL_NONE;
+ pMgmt->byCSSGK = KEY_CTL_NONE;
+
+ if (pDevice->bLinkPass == true) {
+ // reason = 8 : disassoc because sta has left
+ vMgrDisassocBeginSta((void *) pDevice,
+ pMgmt,
+ pMgmt->abyCurrBSSID,
+ (8),
+ &Status);
+ pDevice->bLinkPass = false;
+ // unlock command busy
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+ pMgmt->sNodeDBTable[0].bActive = false;
+ // if(pDevice->bWPASuppWextEnabled == true)
+ {
+ union iwreq_data wrqu;
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n");
+ wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
+ }
+ }
+ pDevice->bwextstep0 = false;
+ pDevice->bwextstep1 = false;
+ pDevice->bwextstep2 = false;
+ pDevice->bwextstep3 = false;
+ pDevice->bWPASuppWextEnabled = false;
+ //clear current SSID
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
+ pItemSSID->len = 0;
+ memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
+ //clear desired SSID
+ pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
+ pItemSSID->len = 0;
+ memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
+
+ netif_stop_queue(pDevice->dev);
+ CARDbRadioPowerOff(pDevice);
+ MACvRegBitsOn(pDevice, MAC_REG_GPIOCTL1, GPIO3_INTMD);
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_OFF);
+ pDevice->bHWRadioOff = true;
+ } else {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" WLAN_CMD_RADIO_START_ON........................\n");
+ pDevice->bHWRadioOff = false;
+ CARDbRadioPowerOn(pDevice);
+ MACvRegBitsOff(pDevice, MAC_REG_GPIOCTL1, GPIO3_INTMD);
+ ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_ON);
+ }
+ }
+
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_CHANGE_BBSENSITIVITY_START:
+
+ pDevice->bStopDataPkt = true;
+ pDevice->byBBVGACurrent = pDevice->byBBVGANew;
+ BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Change sensitivity pDevice->byBBVGACurrent = %x\n", pDevice->byBBVGACurrent);
+ pDevice->bStopDataPkt = false;
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_TBTT_WAKEUP_START:
+ PSbIsNextTBTTWakeUp(pDevice);
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_BECON_SEND_START:
+ bMgrPrepareBeaconToSend(pDevice, pMgmt);
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_SETPOWER_START:
+
+ RFbSetPower(pDevice, pDevice->wCurrentRate, pMgmt->uCurrChannel);
+
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_CHANGE_ANTENNA_START:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Change from Antenna%d to", (int)pDevice->dwRxAntennaSel);
+ if (pDevice->dwRxAntennaSel == 0) {
+ pDevice->dwRxAntennaSel = 1;
+ if (pDevice->bTxRxAntInv == true)
+ BBvSetAntennaMode(pDevice, ANT_RXA);
+ else
+ BBvSetAntennaMode(pDevice, ANT_RXB);
+ } else {
+ pDevice->dwRxAntennaSel = 0;
+ if (pDevice->bTxRxAntInv == true)
+ BBvSetAntennaMode(pDevice, ANT_RXB);
+ else
+ BBvSetAntennaMode(pDevice, ANT_RXA);
+ }
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_REMOVE_ALLKEY_START:
+ KeybRemoveAllKey(pDevice, &(pDevice->sKey), pDevice->abyBSSID);
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_MAC_DISPOWERSAVING_START:
+ ControlvReadByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PSCTL, &byData);
+ if ((byData & PSCTL_PS) != 0) {
+ // disable power saving hw function
+ CONTROLnsRequestOut(pDevice,
+ MESSAGE_TYPE_DISABLE_PS,
+ 0,
+ 0,
+ 0,
+ NULL
+ );
+ }
+ s_bCommandComplete(pDevice);
+ break;
+
+ case WLAN_CMD_11H_CHSW_START:
+ CARDbSetMediaChannel(pDevice, pDevice->byNewChannel);
+ pDevice->bChannelSwitch = false;
+ pMgmt->uCurrChannel = pDevice->byNewChannel;
+ pDevice->bStopDataPkt = false;
+ s_bCommandComplete(pDevice);
+ break;
+
+ default:
+ s_bCommandComplete(pDevice);
+ break;
+ } //switch
+
+ spin_unlock_irq(&pDevice->lock);
+ return;
}
static int s_bCommandComplete(struct vnt_private *pDevice)
@@ -953,152 +925,146 @@ static int s_bCommandComplete(struct vnt_private *pDevice)
int bRadioCmd = false;
int bForceSCAN = true;
- pDevice->eCommandState = WLAN_CMD_IDLE;
- if (pDevice->cbFreeCmdQueue == CMD_Q_SIZE) {
- //Command Queue Empty
- pDevice->bCmdRunning = false;
- return true;
- }
- else {
- pDevice->eCommand = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].eCmd;
- pSSID = (PWLAN_IE_SSID)pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].abyCmdDesireSSID;
- bRadioCmd = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].bRadioCmd;
- bForceSCAN = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].bForceSCAN;
- ADD_ONE_WITH_WRAP_AROUND(pDevice->uCmdDequeueIdx, CMD_Q_SIZE);
- pDevice->cbFreeCmdQueue++;
- pDevice->bCmdRunning = true;
- switch ( pDevice->eCommand ) {
- case WLAN_CMD_BSSID_SCAN:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState= WLAN_CMD_BSSID_SCAN\n");
- pDevice->eCommandState = WLAN_CMD_SCAN_START;
- pMgmt->uScanChannel = 0;
- if (pSSID->len != 0) {
- memcpy(pMgmt->abyScanSSID, pSSID, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- } else {
- memset(pMgmt->abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- }
+ pDevice->eCommandState = WLAN_CMD_IDLE;
+ if (pDevice->cbFreeCmdQueue == CMD_Q_SIZE) {
+ //Command Queue Empty
+ pDevice->bCmdRunning = false;
+ return true;
+ } else {
+ pDevice->eCommand = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].eCmd;
+ pSSID = (PWLAN_IE_SSID)pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].abyCmdDesireSSID;
+ bRadioCmd = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].bRadioCmd;
+ bForceSCAN = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].bForceSCAN;
+ ADD_ONE_WITH_WRAP_AROUND(pDevice->uCmdDequeueIdx, CMD_Q_SIZE);
+ pDevice->cbFreeCmdQueue++;
+ pDevice->bCmdRunning = true;
+ switch (pDevice->eCommand) {
+ case WLAN_CMD_BSSID_SCAN:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState= WLAN_CMD_BSSID_SCAN\n");
+ pDevice->eCommandState = WLAN_CMD_SCAN_START;
+ pMgmt->uScanChannel = 0;
+ if (pSSID->len != 0)
+ memcpy(pMgmt->abyScanSSID, pSSID, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ else
+ memset(pMgmt->abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
/*
- if ((bForceSCAN == false) && (pDevice->bLinkPass == true)) {
- if ((pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len) &&
- ( !memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID, pSSID->len))) {
- pDevice->eCommandState = WLAN_CMD_IDLE;
- }
- }
+ if ((bForceSCAN == false) && (pDevice->bLinkPass == true)) {
+ if ((pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len) &&
+ ( !memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID, pSSID->len))) {
+ pDevice->eCommandState = WLAN_CMD_IDLE;
+ }
+ }
*/
- break;
- case WLAN_CMD_SSID:
- pDevice->eCommandState = WLAN_CMD_SSID_START;
- if (pSSID->len > WLAN_SSID_MAXLEN)
- pSSID->len = WLAN_SSID_MAXLEN;
- if (pSSID->len != 0)
- memcpy(pMgmt->abyDesireSSID, pSSID, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState= WLAN_CMD_SSID_START\n");
- break;
- case WLAN_CMD_DISASSOCIATE:
- pDevice->eCommandState = WLAN_CMD_DISASSOCIATE_START;
- break;
- case WLAN_CMD_RX_PSPOLL:
- pDevice->eCommandState = WLAN_CMD_TX_PSPACKET_START;
- break;
- case WLAN_CMD_RUN_AP:
- pDevice->eCommandState = WLAN_CMD_AP_MODE_START;
- break;
- case WLAN_CMD_RADIO:
- pDevice->eCommandState = WLAN_CMD_RADIO_START;
- pDevice->bRadioCmd = bRadioCmd;
- break;
- case WLAN_CMD_CHANGE_BBSENSITIVITY:
- pDevice->eCommandState = WLAN_CMD_CHANGE_BBSENSITIVITY_START;
- break;
-
- case WLAN_CMD_TBTT_WAKEUP:
- pDevice->eCommandState = WLAN_CMD_TBTT_WAKEUP_START;
- break;
-
- case WLAN_CMD_BECON_SEND:
- pDevice->eCommandState = WLAN_CMD_BECON_SEND_START;
- break;
-
- case WLAN_CMD_SETPOWER:
- pDevice->eCommandState = WLAN_CMD_SETPOWER_START;
- break;
-
- case WLAN_CMD_CHANGE_ANTENNA:
- pDevice->eCommandState = WLAN_CMD_CHANGE_ANTENNA_START;
- break;
-
- case WLAN_CMD_REMOVE_ALLKEY:
- pDevice->eCommandState = WLAN_CMD_REMOVE_ALLKEY_START;
- break;
-
- case WLAN_CMD_MAC_DISPOWERSAVING:
- pDevice->eCommandState = WLAN_CMD_MAC_DISPOWERSAVING_START;
- break;
-
- case WLAN_CMD_11H_CHSW:
- pDevice->eCommandState = WLAN_CMD_11H_CHSW_START;
- break;
-
- default:
- break;
-
- }
- vCommandTimerWait(pDevice, 0);
- }
-
- return true;
+ break;
+ case WLAN_CMD_SSID:
+ pDevice->eCommandState = WLAN_CMD_SSID_START;
+ if (pSSID->len > WLAN_SSID_MAXLEN)
+ pSSID->len = WLAN_SSID_MAXLEN;
+ if (pSSID->len != 0)
+ memcpy(pMgmt->abyDesireSSID, pSSID, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState= WLAN_CMD_SSID_START\n");
+ break;
+ case WLAN_CMD_DISASSOCIATE:
+ pDevice->eCommandState = WLAN_CMD_DISASSOCIATE_START;
+ break;
+ case WLAN_CMD_RX_PSPOLL:
+ pDevice->eCommandState = WLAN_CMD_TX_PSPACKET_START;
+ break;
+ case WLAN_CMD_RUN_AP:
+ pDevice->eCommandState = WLAN_CMD_AP_MODE_START;
+ break;
+ case WLAN_CMD_RADIO:
+ pDevice->eCommandState = WLAN_CMD_RADIO_START;
+ pDevice->bRadioCmd = bRadioCmd;
+ break;
+ case WLAN_CMD_CHANGE_BBSENSITIVITY:
+ pDevice->eCommandState = WLAN_CMD_CHANGE_BBSENSITIVITY_START;
+ break;
+
+ case WLAN_CMD_TBTT_WAKEUP:
+ pDevice->eCommandState = WLAN_CMD_TBTT_WAKEUP_START;
+ break;
+
+ case WLAN_CMD_BECON_SEND:
+ pDevice->eCommandState = WLAN_CMD_BECON_SEND_START;
+ break;
+
+ case WLAN_CMD_SETPOWER:
+ pDevice->eCommandState = WLAN_CMD_SETPOWER_START;
+ break;
+
+ case WLAN_CMD_CHANGE_ANTENNA:
+ pDevice->eCommandState = WLAN_CMD_CHANGE_ANTENNA_START;
+ break;
+
+ case WLAN_CMD_REMOVE_ALLKEY:
+ pDevice->eCommandState = WLAN_CMD_REMOVE_ALLKEY_START;
+ break;
+
+ case WLAN_CMD_MAC_DISPOWERSAVING:
+ pDevice->eCommandState = WLAN_CMD_MAC_DISPOWERSAVING_START;
+ break;
+
+ case WLAN_CMD_11H_CHSW:
+ pDevice->eCommandState = WLAN_CMD_11H_CHSW_START;
+ break;
+
+ default:
+ break;
+ }
+ vCommandTimerWait(pDevice, 0);
+ }
+
+ return true;
}
int bScheduleCommand(struct vnt_private *pDevice,
CMD_CODE eCommand, u8 *pbyItem0)
{
- if (pDevice->cbFreeCmdQueue == 0) {
- return (false);
- }
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].eCmd = eCommand;
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = true;
- memset(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID, 0 , WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- if (pbyItem0 != NULL) {
- switch (eCommand) {
- case WLAN_CMD_BSSID_SCAN:
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = false;
- memcpy(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID,
- pbyItem0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- break;
-
- case WLAN_CMD_SSID:
- memcpy(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID,
- pbyItem0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- break;
-
- case WLAN_CMD_DISASSOCIATE:
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bNeedRadioOFF = *((int *)pbyItem0);
- break;
+ if (pDevice->cbFreeCmdQueue == 0)
+ return false;
+ pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].eCmd = eCommand;
+ pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = true;
+ memset(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID, 0 , WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ if (pbyItem0 != NULL) {
+ switch (eCommand) {
+ case WLAN_CMD_BSSID_SCAN:
+ pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = false;
+ memcpy(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID,
+ pbyItem0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ break;
+
+ case WLAN_CMD_SSID:
+ memcpy(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID,
+ pbyItem0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
+ break;
+
+ case WLAN_CMD_DISASSOCIATE:
+ pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bNeedRadioOFF = *((int *)pbyItem0);
+ break;
/*
- case WLAN_CMD_DEAUTH:
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].wDeAuthenReason = *((u16 *)pbyItem0);
- break;
+ case WLAN_CMD_DEAUTH:
+ pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].wDeAuthenReason = *((u16 *)pbyItem0);
+ break;
*/
- case WLAN_CMD_RADIO:
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bRadioCmd = *((int *)pbyItem0);
- break;
+ case WLAN_CMD_RADIO:
+ pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bRadioCmd = *((int *)pbyItem0);
+ break;
+
+ default:
+ break;
+ }
+ }
- default:
- break;
- }
- }
+ ADD_ONE_WITH_WRAP_AROUND(pDevice->uCmdEnqueueIdx, CMD_Q_SIZE);
+ pDevice->cbFreeCmdQueue--;
- ADD_ONE_WITH_WRAP_AROUND(pDevice->uCmdEnqueueIdx, CMD_Q_SIZE);
- pDevice->cbFreeCmdQueue--;
+ if (pDevice->bCmdRunning == false)
+ s_bCommandComplete(pDevice);
- if (pDevice->bCmdRunning == false) {
- s_bCommandComplete(pDevice);
- }
- else {
- }
- return (true);
+ return true;
}
@@ -1121,16 +1087,16 @@ static int s_bClearBSSID_SCAN(struct vnt_private *pDevice)
unsigned int uCmdDequeueIdx = pDevice->uCmdDequeueIdx;
unsigned int ii;
- if ((pDevice->cbFreeCmdQueue < CMD_Q_SIZE) && (uCmdDequeueIdx != pDevice->uCmdEnqueueIdx)) {
- for (ii = 0; ii < (CMD_Q_SIZE - pDevice->cbFreeCmdQueue); ii ++) {
- if (pDevice->eCmdQueue[uCmdDequeueIdx].eCmd == WLAN_CMD_BSSID_SCAN)
- pDevice->eCmdQueue[uCmdDequeueIdx].eCmd = WLAN_CMD_IDLE;
- ADD_ONE_WITH_WRAP_AROUND(uCmdDequeueIdx, CMD_Q_SIZE);
- if (uCmdDequeueIdx == pDevice->uCmdEnqueueIdx)
- break;
- }
- }
- return true;
+ if ((pDevice->cbFreeCmdQueue < CMD_Q_SIZE) && (uCmdDequeueIdx != pDevice->uCmdEnqueueIdx)) {
+ for (ii = 0; ii < (CMD_Q_SIZE - pDevice->cbFreeCmdQueue); ii++) {
+ if (pDevice->eCmdQueue[uCmdDequeueIdx].eCmd == WLAN_CMD_BSSID_SCAN)
+ pDevice->eCmdQueue[uCmdDequeueIdx].eCmd = WLAN_CMD_IDLE;
+ ADD_ONE_WITH_WRAP_AROUND(uCmdDequeueIdx, CMD_Q_SIZE);
+ if (uCmdDequeueIdx == pDevice->uCmdEnqueueIdx)
+ break;
+ }
+ }
+ return true;
}
//mike add:reset command timer
diff --git a/drivers/staging/vt6656/wmgr.c b/drivers/staging/vt6656/wmgr.c
index e26c41519b15..d74b0e7cb171 100644
--- a/drivers/staging/vt6656/wmgr.c
+++ b/drivers/staging/vt6656/wmgr.c
@@ -2961,7 +2961,7 @@ static struct vnt_tx_mgmt *s_MgrMakeBeacon(struct vnt_private *pDevice,
*
-*/
-struct vnt_tx_mgmt *s_MgrMakeProbeResponse(struct vnt_private *pDevice,
+static struct vnt_tx_mgmt *s_MgrMakeProbeResponse(struct vnt_private *pDevice,
struct vnt_manager *pMgmt, u16 wCurrCapInfo, u16 wCurrBeaconPeriod,
u32 uCurrChannel, u16 wCurrATIMWinodw, u8 *pDstAddr,
PWLAN_IE_SSID pCurrSSID, u8 *pCurrBSSID,
@@ -3081,7 +3081,7 @@ struct vnt_tx_mgmt *s_MgrMakeProbeResponse(struct vnt_private *pDevice,
*
-*/
-struct vnt_tx_mgmt *s_MgrMakeAssocRequest(struct vnt_private *pDevice,
+static struct vnt_tx_mgmt *s_MgrMakeAssocRequest(struct vnt_private *pDevice,
struct vnt_manager *pMgmt, u8 *pDAddr, u16 wCurrCapInfo,
u16 wListenInterval,
PWLAN_IE_SSID pCurrSSID,
@@ -3329,7 +3329,7 @@ struct vnt_tx_mgmt *s_MgrMakeAssocRequest(struct vnt_private *pDevice,
*
-*/
-struct vnt_tx_mgmt *s_MgrMakeReAssocRequest(struct vnt_private *pDevice,
+static struct vnt_tx_mgmt *s_MgrMakeReAssocRequest(struct vnt_private *pDevice,
struct vnt_manager *pMgmt, u8 *pDAddr, u16 wCurrCapInfo,
u16 wListenInterval, PWLAN_IE_SSID pCurrSSID,
PWLAN_IE_SUPP_RATES pCurrRates,
@@ -3576,7 +3576,7 @@ struct vnt_tx_mgmt *s_MgrMakeReAssocRequest(struct vnt_private *pDevice,
*
-*/
-struct vnt_tx_mgmt *s_MgrMakeAssocResponse(struct vnt_private *pDevice,
+static struct vnt_tx_mgmt *s_MgrMakeAssocResponse(struct vnt_private *pDevice,
struct vnt_manager *pMgmt, u16 wCurrCapInfo, u16 wAssocStatus,
u16 wAssocAID, u8 *pDstAddr, PWLAN_IE_SUPP_RATES pCurrSuppRates,
PWLAN_IE_SUPP_RATES pCurrExtSuppRates)
@@ -3642,7 +3642,7 @@ struct vnt_tx_mgmt *s_MgrMakeAssocResponse(struct vnt_private *pDevice,
*
-*/
-struct vnt_tx_mgmt *s_MgrMakeReAssocResponse(struct vnt_private *pDevice,
+static struct vnt_tx_mgmt *s_MgrMakeReAssocResponse(struct vnt_private *pDevice,
struct vnt_manager *pMgmt, u16 wCurrCapInfo, u16 wAssocStatus,
u16 wAssocAID, u8 *pDstAddr, PWLAN_IE_SUPP_RATES pCurrSuppRates,
PWLAN_IE_SUPP_RATES pCurrExtSuppRates)
diff --git a/drivers/staging/vt6656/wpa.c b/drivers/staging/vt6656/wpa.c
index 01db4e7154da..403c295cc02c 100644
--- a/drivers/staging/vt6656/wpa.c
+++ b/drivers/staging/vt6656/wpa.c
@@ -43,12 +43,12 @@
static int msglevel =MSG_LEVEL_INFO;
-const u8 abyOUI00[4] = { 0x00, 0x50, 0xf2, 0x00 };
-const u8 abyOUI01[4] = { 0x00, 0x50, 0xf2, 0x01 };
-const u8 abyOUI02[4] = { 0x00, 0x50, 0xf2, 0x02 };
-const u8 abyOUI03[4] = { 0x00, 0x50, 0xf2, 0x03 };
-const u8 abyOUI04[4] = { 0x00, 0x50, 0xf2, 0x04 };
-const u8 abyOUI05[4] = { 0x00, 0x50, 0xf2, 0x05 };
+static const u8 abyOUI00[4] = { 0x00, 0x50, 0xf2, 0x00 };
+static const u8 abyOUI01[4] = { 0x00, 0x50, 0xf2, 0x01 };
+static const u8 abyOUI02[4] = { 0x00, 0x50, 0xf2, 0x02 };
+static const u8 abyOUI03[4] = { 0x00, 0x50, 0xf2, 0x03 };
+static const u8 abyOUI04[4] = { 0x00, 0x50, 0xf2, 0x04 };
+static const u8 abyOUI05[4] = { 0x00, 0x50, 0xf2, 0x05 };
/*+
*
diff --git a/drivers/staging/vt6656/wpa2.c b/drivers/staging/vt6656/wpa2.c
index aa2216184345..df5541794e0f 100644
--- a/drivers/staging/vt6656/wpa2.c
+++ b/drivers/staging/vt6656/wpa2.c
@@ -37,14 +37,14 @@
static int msglevel =MSG_LEVEL_INFO;
//static int msglevel =MSG_LEVEL_DEBUG;
-const u8 abyOUIGK[4] = { 0x00, 0x0F, 0xAC, 0x00 };
-const u8 abyOUIWEP40[4] = { 0x00, 0x0F, 0xAC, 0x01 };
-const u8 abyOUIWEP104[4] = { 0x00, 0x0F, 0xAC, 0x05 };
-const u8 abyOUITKIP[4] = { 0x00, 0x0F, 0xAC, 0x02 };
-const u8 abyOUICCMP[4] = { 0x00, 0x0F, 0xAC, 0x04 };
-
-const u8 abyOUI8021X[4] = { 0x00, 0x0F, 0xAC, 0x01 };
-const u8 abyOUIPSK[4] = { 0x00, 0x0F, 0xAC, 0x02 };
+static const u8 abyOUIGK[4] = { 0x00, 0x0F, 0xAC, 0x00 };
+static const u8 abyOUIWEP40[4] = { 0x00, 0x0F, 0xAC, 0x01 };
+static const u8 abyOUIWEP104[4] = { 0x00, 0x0F, 0xAC, 0x05 };
+static const u8 abyOUITKIP[4] = { 0x00, 0x0F, 0xAC, 0x02 };
+static const u8 abyOUICCMP[4] = { 0x00, 0x0F, 0xAC, 0x04 };
+
+static const u8 abyOUI8021X[4] = { 0x00, 0x0F, 0xAC, 0x01 };
+static const u8 abyOUIPSK[4] = { 0x00, 0x0F, 0xAC, 0x02 };
/*+
*
diff --git a/drivers/staging/vt6656/wpactl.c b/drivers/staging/vt6656/wpactl.c
index 003bd7c614e5..f4a8a5cb9feb 100644
--- a/drivers/staging/vt6656/wpactl.c
+++ b/drivers/staging/vt6656/wpactl.c
@@ -86,7 +86,7 @@ int wpa_set_keys(struct vnt_private *pDevice, void *ctx)
return ret;
}
- if (param->u.wpa_key.key && param->u.wpa_key.key_len > sizeof(abyKey))
+ if (param->u.wpa_key.key_len > sizeof(abyKey))
return -EINVAL;
memcpy(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len);
diff --git a/drivers/staging/winbond/mds.c b/drivers/staging/winbond/mds.c
index cac7720bef2b..aef0855f4c68 100644
--- a/drivers/staging/winbond/mds.c
+++ b/drivers/staging/winbond/mds.c
@@ -56,7 +56,8 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter,
RTS_on = true; /* Using RTS */
else {
if (pT01->T01_modulation_type) { /* Is using OFDM */
- if (CURRENT_PROTECT_MECHANISM) /* Is using protect */
+ /* Is using protect */
+ if (CURRENT_PROTECT_MECHANISM)
CTS_on = true; /* Using CTS */
}
}
@@ -69,9 +70,9 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter,
* ACK Rate : 24 Mega bps
* ACK frame length = 14 bytes */
Duration = 2*DEFAULT_SIFSTIME +
- 2*PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION +
- ((BodyLen*8 + 22 + Rate*4 - 1)/(Rate*4))*Tsym +
- ((112 + 22 + 95)/96)*Tsym;
+ 2*PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION +
+ ((BodyLen*8 + 22 + Rate*4 - 1)/(Rate*4))*Tsym +
+ ((112 + 22 + 95)/96)*Tsym;
} else { /* DSSS */
/* CTS duration
* 2 SIFS + DATA transmit time + 1 ACK
@@ -92,13 +93,15 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter,
* CTS Rate : 24 Mega bps
* CTS frame length = 14 bytes */
Duration += (DEFAULT_SIFSTIME +
- PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION +
- ((112 + 22 + 95)/96)*Tsym);
+ PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION +
+ ((112 + 22 + 95)/96)*Tsym);
} else {
/* CTS + 1 SIFS + CTS duration
* CTS Rate : ?? Mega bps
- * CTS frame length = 14 bytes */
- if (pT01->T01_plcp_header_length) /* long preamble */
+ * CTS frame length = 14 bytes
+ */
+ /* long preamble */
+ if (pT01->T01_plcp_header_length)
Duration += LONG_PREAMBLE_PLUS_PLCPHEADER_TIME;
else
Duration += SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME;
@@ -149,8 +152,8 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter,
+ Rate-1) / Rate +
DEFAULT_SIFSTIME*3);
}
-
- ((u16 *)buffer)[5] = cpu_to_le16(Duration); /* 4 USHOR for skip 8B USB, 2USHORT=FC + Duration */
+ /* 4 USHOR for skip 8B USB, 2USHORT=FC + Duration */
+ ((u16 *)buffer)[5] = cpu_to_le16(Duration);
/* ----20061009 add by anson's endian */
pNextT00->value = cpu_to_le32(pNextT00->value);
@@ -159,7 +162,8 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter,
buffer += OffsetSize;
pT01 = (struct T01_descriptor *)(buffer+4);
- if (i != 1) /* The last fragment will not have the next fragment */
+ /* The last fragment will not have the next fragment */
+ if (i != 1)
pNextT00 = (struct T00_descriptor *)(buffer+OffsetSize);
}
@@ -189,7 +193,8 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter,
}
}
- ((u16 *)buffer)[5] = cpu_to_le16(Duration); /* 4 USHOR for skip 8B USB, 2USHORT=FC + Duration */
+ /* 4 USHOR for skip 8B USB, 2USHORT=FC + Duration */
+ ((u16 *)buffer)[5] = cpu_to_le16(Duration);
pT00->value = cpu_to_le32(pT00->value);
pT01->value = cpu_to_le32(pT01->value);
/* --end 20061009 add */
@@ -221,9 +226,10 @@ static u16 Mds_BodyCopy(struct wbsoft_priv *adapter,
CopySize = SizeLeft;
if (SizeLeft > pDes->FragmentThreshold) {
CopySize = pDes->FragmentThreshold;
- pT00->T00_frame_length = 24 + CopySize; /* Set USB length */
- } else
- pT00->T00_frame_length = 24 + SizeLeft; /* Set USB length */
+ /* Set USB length */
+ pT00->T00_frame_length = 24 + CopySize;
+ } else /* Set USB length */
+ pT00->T00_frame_length = 24 + SizeLeft;
SizeLeft -= CopySize;
@@ -267,21 +273,27 @@ static u16 Mds_BodyCopy(struct wbsoft_priv *adapter,
/* 931130.5.n */
if (pMds->MicAdd) {
if (!SizeLeft) {
- pMds->MicWriteAddress[pMds->MicWriteIndex] = buffer - pMds->MicAdd;
- pMds->MicWriteSize[pMds->MicWriteIndex] = pMds->MicAdd;
+ pMds->MicWriteAddress[pMds->MicWriteIndex] =
+ buffer - pMds->MicAdd;
+ pMds->MicWriteSize[pMds->MicWriteIndex] =
+ pMds->MicAdd;
pMds->MicAdd = 0;
} else if (SizeLeft < 8) { /* 931130.5.p */
pMds->MicAdd = SizeLeft;
- pMds->MicWriteAddress[pMds->MicWriteIndex] = buffer - (8 - SizeLeft);
- pMds->MicWriteSize[pMds->MicWriteIndex] = 8 - SizeLeft;
+ pMds->MicWriteAddress[pMds->MicWriteIndex] =
+ buffer - (8 - SizeLeft);
+ pMds->MicWriteSize[pMds->MicWriteIndex] =
+ 8 - SizeLeft;
pMds->MicWriteIndex++;
}
}
/* Does it need to generate the new header for next mpdu? */
if (SizeLeft) {
- buffer = TargetBuffer + Size; /* Get the next 4n start address */
- memcpy(buffer, TargetBuffer, 32); /* Copy 8B USB +24B 802.11 */
+ /* Get the next 4n start address */
+ buffer = TargetBuffer + Size;
+ /* Copy 8B USB +24B 802.11 */
+ memcpy(buffer, TargetBuffer, 32);
pT00 = (struct T00_descriptor *)buffer;
pT00->T00_first_mpdu = 0;
}
@@ -293,7 +305,8 @@ static u16 Mds_BodyCopy(struct wbsoft_priv *adapter,
pT00->T00_IsLastMpdu = 1;
buffer = (u8 *)pT00 + 8; /* +8 for USB hdr */
buffer[1] &= ~0x04; /* Clear more frag bit of 802.11 frame control */
- pDes->FragmentCount = FragmentCount; /* Update the correct fragment number */
+ /* Update the correct fragment number */
+ pDes->FragmentCount = FragmentCount;
return Size;
}
@@ -330,7 +343,8 @@ static void Mds_HeaderCopy(struct wbsoft_priv *adapter,
FragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD; /* Do not fragment */
/* Copy full data, the 1'st buffer contain all the data 931130.5.j */
- memcpy(TargetBuffer, src_buffer, DOT_11_MAC_HEADER_SIZE); /* Copy header */
+ /* Copy header */
+ memcpy(TargetBuffer, src_buffer, DOT_11_MAC_HEADER_SIZE);
pDes->buffer_address[0] = src_buffer + DOT_11_MAC_HEADER_SIZE;
pDes->buffer_total_size -= DOT_11_MAC_HEADER_SIZE;
pDes->buffer_size[0] = pDes->buffer_total_size;
@@ -358,8 +372,8 @@ static void Mds_HeaderCopy(struct wbsoft_priv *adapter,
for (i = 0; i < 2; i++) {
if (i == 1)
ctmp1 = ctmpf;
-
- pMds->TxRate[pDes->Descriptor_ID][i] = ctmp1; /* backup the ta rate and fall back rate */
+ /* backup the ta rate and fall back rate */
+ pMds->TxRate[pDes->Descriptor_ID][i] = ctmp1;
if (ctmp1 == 108)
ctmp2 = 7;
@@ -395,7 +409,8 @@ static void Mds_HeaderCopy(struct wbsoft_priv *adapter,
/*
* Set preamble type
*/
- if ((pT01->T01_modulation_type == 0) && (pT01->T01_transmit_rate == 0)) /* RATE_1M */
+ /* RATE_1M */
+ if ((pT01->T01_modulation_type == 0) && (pT01->T01_transmit_rate == 0))
pDes->PreambleMode = WLAN_PREAMBLE_TYPE_LONG;
else
pDes->PreambleMode = CURRENT_PREAMBLE_MODE;
@@ -468,12 +483,14 @@ Mds_Tx(struct wbsoft_priv *adapter)
/* Start to fill the data */
do {
FillIndex = pMds->TxFillIndex;
- if (pMds->TxOwner[FillIndex]) { /* Is owned by software 0:Yes 1:No */
+ /* Is owned by software 0:Yes 1:No */
+ if (pMds->TxOwner[FillIndex]) {
pr_debug("[Mds_Tx] Tx Owner is H/W.\n");
break;
}
- XmitBufAddress = pMds->pTxBuffer + (MAX_USB_TX_BUFFER * FillIndex); /* Get buffer */
+ /* Get buffer */
+ XmitBufAddress = pMds->pTxBuffer + (MAX_USB_TX_BUFFER * FillIndex);
XmitBufSize = 0;
FillCount = 0;
do {
@@ -485,7 +502,8 @@ Mds_Tx(struct wbsoft_priv *adapter)
FragmentThreshold = CURRENT_FRAGMENT_THRESHOLD;
/* 931130.5.b */
FragmentCount = PacketSize/FragmentThreshold + 1;
- stmp = PacketSize + FragmentCount*32 + 8; /* 931130.5.c 8:MIC */
+ /* 931130.5.c 8:MIC */
+ stmp = PacketSize + FragmentCount*32 + 8;
if ((XmitBufSize + stmp) >= MAX_USB_TX_BUFFER)
break; /* buffer is not enough */
@@ -499,18 +517,23 @@ Mds_Tx(struct wbsoft_priv *adapter)
TxDesIndex = pMds->TxDesIndex; /* Get the current ID */
pTxDes->Descriptor_ID = TxDesIndex;
- pMds->TxDesFrom[TxDesIndex] = 2; /* Storing the information of source coming from */
+ /* Storing the information of source coming from */
+ pMds->TxDesFrom[TxDesIndex] = 2;
pMds->TxDesIndex++;
pMds->TxDesIndex %= MAX_USB_TX_DESCRIPTOR;
MLME_GetNextPacket(adapter, pTxDes);
- /* Copy header. 8byte USB + 24byte 802.11Hdr. Set TxRate, Preamble type */
+ /*
+ * Copy header. 8byte USB + 24byte 802.11Hdr.
+ * Set TxRate, Preamble type
+ */
Mds_HeaderCopy(adapter, pTxDes, XmitBufAddress);
/* For speed up Key setting */
if (pTxDes->EapFix) {
- pr_debug("35: EPA 4th frame detected. Size = %d\n", PacketSize);
+ pr_debug("35: EPA 4th frame detected. Size = %d\n",
+ PacketSize);
pHwData->IsKeyPreSet = 1;
}
@@ -524,7 +547,9 @@ Mds_Tx(struct wbsoft_priv *adapter)
XmitBufSize += CurrentSize;
XmitBufAddress += CurrentSize;
- /* Get packet to transmit completed, 1:TESTSTA 2:MLME 3: Ndis data */
+ /* Get packet to transmit completed,
+ * 1:TESTSTA 2:MLME 3: Ndis data
+ */
MLME_SendComplete(adapter, 0, true);
/* Software TSC count 20060214 */
@@ -533,7 +558,12 @@ Mds_Tx(struct wbsoft_priv *adapter)
pMds->TxTsc_2++;
FillCount++; /* 20060928 */
- } while (HAL_USB_MODE_BURST(pHwData)); /* End of multiple MSDU copy loop. false = single true = multiple sending */
+ /*
+ * End of multiple MSDU copy loop.
+ * false = single
+ * true = multiple sending
+ */
+ } while (HAL_USB_MODE_BURST(pHwData));
/* Move to the next one, if necessary */
if (BufferFilled) {
@@ -594,7 +624,8 @@ Mds_SendComplete(struct wbsoft_priv *adapter, struct T02_descriptor *pT02)
pHwData->tx_retry_count[RetryCount] += RetryCount;
else
pHwData->tx_retry_count[7] += RetryCount;
- pr_debug("dto_tx_retry_count =%d\n", pHwData->dto_tx_retry_count);
+ pr_debug("dto_tx_retry_count =%d\n",
+ pHwData->dto_tx_retry_count);
MTO_SetTxCount(adapter, TxRate, RetryCount);
}
pHwData->dto_tx_frag_count += (RetryCount+1);
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index 07891a3e316e..0d29624416c3 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -788,7 +788,6 @@ static int wb35_probe(struct usb_interface *intf,
dev->flags = IEEE80211_HW_SIGNAL_UNSPEC;
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
- dev->channel_change_time = 1000;
dev->max_signal = 100;
dev->queues = 1;
diff --git a/drivers/staging/wlags49_h2/debug.h b/drivers/staging/wlags49_h2/debug.h
index 811698f1070c..40f6a3ee7408 100644
--- a/drivers/staging/wlags49_h2/debug.h
+++ b/drivers/staging/wlags49_h2/debug.h
@@ -83,7 +83,8 @@
the types of messages displayed */
#ifndef DBG_LVL
#define DBG_LVL 5 /* yields nothing via init_module,
- original value of 5 yields DBG_TRACE_ON and DBG_VERBOSE_ON */
+ original value of 5 yields
+ DBG_TRACE_ON and DBG_VERBOSE_ON */
#endif /* DBG_LVL*/
@@ -105,46 +106,16 @@
#define DBG_LEVEL(A) ((A)->dbgLevel)
-#ifndef PRINTK
-# define PRINTK(S...) printk(S)
-#endif /* PRINTK */
-
-
#ifndef DBG_PRINT
-# define DBG_PRINT(S...) PRINTK(KERN_DEBUG S)
+# define DBG_PRINT(S...) printk(KERN_DEBUG S)
#endif /* DBG_PRINT */
#ifndef DBG_PRINTC
-# define DBG_PRINTC(S...) PRINTK(S)
+# define DBG_PRINTC(S...) printk(S)
#endif /* DBG_PRINTC */
-#ifndef DBG_TRAP
-# define DBG_TRAP {}
-#endif /* DBG_TRAP */
-
-
-#define _ENTER_STR ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
-#define _LEAVE_STR "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
-
-
-#define _DBG_ENTER(A) \
- DBG_PRINT("%s:%.*s:%s\n", DBG_NAME(A), ++DBG_LEVEL(A), \
- _ENTER_STR, __func__)
-#define _DBG_LEAVE(A) \
- DBG_PRINT("%s:%.*s:%s\n", DBG_NAME(A), DBG_LEVEL(A)--, \
- _LEAVE_STR, __func__)
-
-
-#define DBG_FUNC(F)
-
-#define DBG_ENTER(A) {if (DBG_FLAGS(A) & DBG_TRACE_ON) \
- _DBG_ENTER(A); }
-
-#define DBG_LEAVE(A) {if (DBG_FLAGS(A) & DBG_TRACE_ON) \
- _DBG_LEAVE(A); }
-
#define DBG_PARAM(A, N, F, S...) {if (DBG_FLAGS(A) & DBG_PARAM_ON) \
DBG_PRINT(" %s -- "F"\n", N, S); }
@@ -153,7 +124,6 @@
if (DBG_FLAGS(A) & DBG_ERROR_ON) { \
DBG_PRINT("%s:ERROR:%s ", DBG_NAME(A), __func__); \
DBG_PRINTC(S); \
- DBG_TRAP; \
} } while (0)
@@ -193,26 +163,22 @@
if (!(C)) { \
DBG_PRINT("ASSERT(%s) -- %s#%d (%s)\n", \
#C, __FILE__, __LINE__, __func__); \
- DBG_TRAP; \
} } while (0)
-typedef struct {
- char *dbgName;
- int dbgLevel;
- unsigned long DebugFlag;
-} dbg_info_t;
+struct dbg_info {
+ char *dbgName;
+ int dbgLevel;
+ unsigned long DebugFlag;
+};
+
+extern struct dbg_info *DbgInfo;
/****************************************************************************/
#else /* DBG */
/****************************************************************************/
-#define DBG_DEFN
-#define DBG_TRAP
-#define DBG_FUNC(F)
#define DBG_PRINT(S...)
-#define DBG_ENTER(A)
-#define DBG_LEAVE(A)
#define DBG_PARAM(A, N, F, S...)
#define DBG_ERROR(A, S...)
#define DBG_WARNING(A, S...)
diff --git a/drivers/staging/wlags49_h2/sta_h25.c b/drivers/staging/wlags49_h2/sta_h25.c
index 5b6f670d8ef2..eccd780ef135 100644
--- a/drivers/staging/wlags49_h2/sta_h25.c
+++ b/drivers/staging/wlags49_h2/sta_h25.c
@@ -5211,7 +5211,7 @@ static const CFG_PROG_STRCT fw_image_code[] = {
0000,
0x000F429B, // Start execution address
},
- { 0000, 0000, 0000, 0000, 00000000, 0000, 00000000}
+ { 0000, 0000, 0000, 0000, 00000000, 0000, NULL}
};
static const CFG_RANGE20_STRCT fw_image_infocompat[] = {
@@ -5247,8 +5247,8 @@ memimage fw_image = {
"FUPU7D37dhfwci\001C", //signature, <format number>, C/Bin type
(CFG_PROG_STRCT *) fw_image_code,
0x000F429B,
- 00000000, //(dummy) pdaplug
- 00000000, //(dummy) priplug
+ NULL, //(dummy) pdaplug
+ NULL, //(dummy) priplug
(CFG_RANGE20_STRCT *) fw_image_infocompat,
(CFG_IDENTITY_STRCT *) fw_image_infoidentity,
};
diff --git a/drivers/staging/wlags49_h2/wl_cs.c b/drivers/staging/wlags49_h2/wl_cs.c
index a458705a379c..3f7cf41a0e34 100644
--- a/drivers/staging/wlags49_h2/wl_cs.c
+++ b/drivers/staging/wlags49_h2/wl_cs.c
@@ -100,15 +100,6 @@
#include <wl_netdev.h>
#include <wl_cs.h>
-
-/*******************************************************************************
- * global definitions
- ******************************************************************************/
-#if DBG
-extern dbg_info_t *DbgInfo;
-#endif /* DBG */
-
-
/*******************************************************************************
* wl_adapter_attach()
*******************************************************************************
@@ -133,10 +124,6 @@ static int wl_adapter_attach(struct pcmcia_device *link)
struct net_device *dev;
struct wl_private *lp;
int ret;
- /*--------------------------------------------------------------------*/
-
- DBG_FUNC("wl_adapter_attach");
- DBG_ENTER(DbgInfo);
dev = wl_device_alloc();
if (dev == NULL) {
@@ -158,7 +145,6 @@ static int wl_adapter_attach(struct pcmcia_device *link)
if (ret != 0)
wl_device_dealloc(dev);
- DBG_LEAVE(DbgInfo);
return ret;
} /* wl_adapter_attach */
/*============================================================================*/
@@ -168,10 +154,7 @@ static int wl_adapter_attach(struct pcmcia_device *link)
static void wl_adapter_detach(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- /*--------------------------------------------------------------------*/
- DBG_FUNC("wl_adapter_detach");
- DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "link", "0x%p", link);
wl_adapter_release(link);
@@ -180,24 +163,18 @@ static void wl_adapter_detach(struct pcmcia_device *link)
unregister_netdev(dev);
wl_device_dealloc(dev);
}
-
- DBG_LEAVE(DbgInfo);
} /* wl_adapter_detach */
/*============================================================================*/
void wl_adapter_release(struct pcmcia_device *link)
{
- DBG_FUNC("wl_adapter_release");
- DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "link", "0x%p", link);
/* Stop hardware */
wl_remove(link->priv);
pcmcia_disable_device(link);
-
- DBG_LEAVE(DbgInfo);
} /* wl_adapter_release */
/*============================================================================*/
@@ -229,10 +206,7 @@ int wl_adapter_insert(struct pcmcia_device *link)
{
struct net_device *dev;
int ret;
- /*--------------------------------------------------------------------*/
- DBG_FUNC("wl_adapter_insert");
- DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "link", "0x%p", link);
dev = link->priv;
@@ -259,20 +233,17 @@ int wl_adapter_insert(struct pcmcia_device *link)
SET_NETDEV_DEV(dev, &link->dev);
ret = register_netdev(dev);
if (ret != 0) {
- printk("%s: register_netdev() failed\n", MODULE_NAME);
+ printk("%s: register_netdev() failed\n", KBUILD_MODNAME);
goto failed;
}
printk(KERN_INFO "%s: Wireless, io_addr %#03lx, irq %d, mac_address"
" %pM\n", dev->name, dev->base_addr, dev->irq, dev->dev_addr);
- DBG_LEAVE(DbgInfo);
return 0;
failed:
wl_adapter_release(link);
-
- DBG_LEAVE(DbgInfo);
return ret;
} /* wl_adapter_insert */
/*============================================================================*/
@@ -303,17 +274,12 @@ int wl_adapter_open(struct net_device *dev)
struct pcmcia_device *link = lp->link;
int result = 0;
int hcf_status = HCF_SUCCESS;
- /*--------------------------------------------------------------------*/
- DBG_FUNC("wl_adapter_open");
- DBG_ENTER(DbgInfo);
DBG_PRINT("%s\n", VERSION_INFO);
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
- if (!pcmcia_dev_present(link)) {
- DBG_LEAVE(DbgInfo);
+ if (!pcmcia_dev_present(link))
return -ENODEV;
- }
link->open++;
@@ -324,7 +290,6 @@ int wl_adapter_open(struct net_device *dev)
result = -ENODEV;
}
- DBG_LEAVE(DbgInfo);
return result;
} /* wl_adapter_open */
/*============================================================================*/
@@ -353,23 +318,17 @@ int wl_adapter_close(struct net_device *dev)
{
struct wl_private *lp = wl_priv(dev);
struct pcmcia_device *link = lp->link;
- /*--------------------------------------------------------------------*/
- DBG_FUNC("wl_adapter_close");
- DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
- if (link == NULL) {
- DBG_LEAVE(DbgInfo);
+ if (link == NULL)
return -ENODEV;
- }
DBG_TRACE(DbgInfo, "%s: Shutting down adapter.\n", dev->name);
wl_close(dev);
link->open--;
- DBG_LEAVE(DbgInfo);
return 0;
} /* wl_adapter_close */
/*============================================================================*/
@@ -420,17 +379,7 @@ static struct pcmcia_driver wlags49_driver = {
******************************************************************************/
int wl_adapter_init_module(void)
{
- int ret;
- /*--------------------------------------------------------------------*/
-
- DBG_FUNC("wl_adapter_init_module");
- DBG_ENTER(DbgInfo);
- DBG_TRACE(DbgInfo, "wl_adapter_init_module() -- PCMCIA\n");
-
- ret = pcmcia_register_driver(&wlags49_driver);
-
- DBG_LEAVE(DbgInfo);
- return ret;
+ return pcmcia_register_driver(&wlags49_driver);
} /* wl_adapter_init_module */
/*============================================================================*/
@@ -454,15 +403,7 @@ int wl_adapter_init_module(void)
******************************************************************************/
void wl_adapter_cleanup_module(void)
{
- DBG_FUNC("wl_adapter_cleanup_module");
- DBG_ENTER(DbgInfo);
- DBG_TRACE(DbgInfo, "wl_adapter_cleanup_module() -- PCMCIA\n");
-
-
pcmcia_unregister_driver(&wlags49_driver);
-
- DBG_LEAVE(DbgInfo);
- return;
} /* wl_adapter_cleanup_module */
/*============================================================================*/
diff --git a/drivers/staging/wlags49_h2/wl_cs.h b/drivers/staging/wlags49_h2/wl_cs.h
index 081cc6f28d1f..9a597a9f145f 100644
--- a/drivers/staging/wlags49_h2/wl_cs.h
+++ b/drivers/staging/wlags49_h2/wl_cs.h
@@ -86,4 +86,4 @@ const char *DbgEvent( int mask );
-#endif // __WL_CS_H__
+#endif /* __WL_CS_H__ */
diff --git a/drivers/staging/wlags49_h2/wl_enc.c b/drivers/staging/wlags49_h2/wl_enc.c
index 51293d9f2be9..389c23bdc28f 100644
--- a/drivers/staging/wlags49_h2/wl_enc.c
+++ b/drivers/staging/wlags49_h2/wl_enc.c
@@ -70,21 +70,6 @@
#include <wl_enc.h>
-
-
-
-/*******************************************************************************
- * global definitions
- ******************************************************************************/
-#if DBG
-
-extern dbg_info_t *DbgInfo;
-
-#endif /* DBG */
-
-
-
-
/*******************************************************************************
* wl_wep_code()
*******************************************************************************
diff --git a/drivers/staging/wlags49_h2/wl_enc.h b/drivers/staging/wlags49_h2/wl_enc.h
index 1804611276b8..03a52fbd3c09 100644
--- a/drivers/staging/wlags49_h2/wl_enc.h
+++ b/drivers/staging/wlags49_h2/wl_enc.h
@@ -69,7 +69,7 @@
******************************************************************************/
#define CRYPT_CODE "57617665A5D6"
#define ENCRYPTION_LEN 102
-#define ENCRYPTION_MAGIC 0x48576877L // HWhw
+#define ENCRYPTION_MAGIC 0x48576877L /* HWhw */
#define DEF_CRYPT_STR "G?TIUEA]d5MAdZV'eUb&&6.)'&:,'VF/(FR2)6^5*'*8*W6;+GB>,7NA-'ZD-X&G.H2J/8>M0(JP0XVS1HbV29.Y3):\\3YF_4IRb56"
#define DEFAULT_CRYPT_MAC "W\x01\x6B\x66\xA5\x5A"
@@ -115,4 +115,4 @@ int wl_wep_decode( char *szCrypt, void *Dest, char *szData );
-#endif // __WAVELAN2_ENCRYPTION_H__
+#endif /* __WAVELAN2_ENCRYPTION_H__ */
diff --git a/drivers/staging/wlags49_h2/wl_main.c b/drivers/staging/wlags49_h2/wl_main.c
index 43535610acc4..650def88e5c2 100644
--- a/drivers/staging/wlags49_h2/wl_main.c
+++ b/drivers/staging/wlags49_h2/wl_main.c
@@ -400,8 +400,8 @@ static p_u32 pc_debug = DBG_LVL;
*/ p_u32 DebugFlag = ~0; //recognizable "undefined value" rather then DBG_DEFAULTS;
//MODULE_PARM(DebugFlag, "l");
-dbg_info_t wl_info = { DBG_MOD_NAME, 0, 0 };
-dbg_info_t *DbgInfo = &wl_info;
+static struct dbg_info wl_info = { KBUILD_MODNAME, 0, 0 };
+struct dbg_info *DbgInfo = &wl_info;
#endif /* DBG */
#ifdef USE_RTS
@@ -434,9 +434,6 @@ int wl_insert( struct net_device *dev )
int i;
unsigned long flags = 0;
struct wl_private *lp = wl_priv(dev);
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_insert" );
- DBG_ENTER( DbgInfo );
/* Initialize the adapter hardware. */
memset( &( lp->hcfCtx ), 0, sizeof( IFB_STRCT ));
@@ -926,7 +923,6 @@ int wl_insert( struct net_device *dev )
proc_mkdir("driver/wlags49", 0);
#endif /* SCULL_USE_PROC */
- DBG_LEAVE( DbgInfo );
return result;
hcf_failed:
@@ -944,8 +940,6 @@ failed:
result = -EFAULT;
-
- DBG_LEAVE( DbgInfo );
return result;
} // wl_insert
/*============================================================================*/
@@ -972,9 +966,7 @@ int wl_reset(struct net_device *dev)
{
struct wl_private *lp = wl_priv(dev);
int hcf_status = HCF_SUCCESS;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_reset" );
- DBG_ENTER( DbgInfo );
+
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
DBG_PARAM( DbgInfo, "dev->base_addr", "(%#03lx)", dev->base_addr );
@@ -1021,7 +1013,6 @@ int wl_reset(struct net_device *dev)
}
out:
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_reset
/*============================================================================*/
@@ -1049,9 +1040,6 @@ int wl_go( struct wl_private *lp )
int hcf_status = HCF_SUCCESS;
char *cp = NULL; //fw_image
int retries = 0;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_go" );
- DBG_ENTER( DbgInfo );
hcf_status = wl_disable( lp );
if ( hcf_status != HCF_SUCCESS ) {
@@ -1148,7 +1136,6 @@ int rc;
}
if ( hcf_status != HCF_SUCCESS ) {
DBG_ERROR( DbgInfo, "Firmware Download failed\n" );
- DBG_LEAVE( DbgInfo );
return hcf_status;
}
}
@@ -1187,7 +1174,6 @@ int rc;
hcf_status = hcf_get_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord ));
if ( hcf_status != HCF_SUCCESS ) {
DBG_ERROR( DbgInfo, "Could not retrieve MAC address\n" );
- DBG_LEAVE( DbgInfo );
return hcf_status;
}
memcpy( lp->MACAddress, &lp->ltvRecord.u.u8[0], ETH_ALEN );
@@ -1206,7 +1192,6 @@ int rc;
#endif // USE_WDS
hcf_status = wl_connect( lp );
}
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_go
/*============================================================================*/
@@ -1234,9 +1219,7 @@ int rc;
void wl_set_wep_keys( struct wl_private *lp )
{
int count = 0;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_set_wep_keys" );
- DBG_ENTER( DbgInfo );
+
DBG_PARAM( DbgInfo, "lp", "%s (0x%p)", lp->dev->name, lp );
if ( lp->EnableEncryption ) {
/* NOTE: CFG_CNF_ENCRYPTION is set in wl_put_ltv() as it's a static
@@ -1274,8 +1257,6 @@ void wl_set_wep_keys( struct wl_private *lp )
DBG_NOTICE( DbgInfo, "encrypt: %d, ID: %d\n", lp->EnableEncryption, lp->TransmitKeyID );
DBG_NOTICE( DbgInfo, "set key: %s(%d) [%d]\n", lp->DefaultKeys.key[lp->TransmitKeyID-1].key, lp->DefaultKeys.key[lp->TransmitKeyID-1].len, lp->TransmitKeyID-1 );
}
-
- DBG_LEAVE( DbgInfo );
} // wl_set_wep_keys
/*============================================================================*/
@@ -1301,9 +1282,7 @@ void wl_set_wep_keys( struct wl_private *lp )
int wl_apply(struct wl_private *lp)
{
int hcf_status = HCF_SUCCESS;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_apply" );
- DBG_ENTER( DbgInfo );
+
DBG_ASSERT( lp != NULL);
DBG_PARAM( DbgInfo, "lp", "%s (0x%p)", lp->dev->name, lp );
@@ -1319,13 +1298,11 @@ int wl_apply(struct wl_private *lp)
hcf_status = wl_disconnect( lp );
if ( hcf_status != HCF_SUCCESS ) {
DBG_ERROR( DbgInfo, "Disconnect failed\n" );
- DBG_LEAVE( DbgInfo );
return -1;
}
hcf_status = wl_disable( lp );
if ( hcf_status != HCF_SUCCESS ) {
DBG_ERROR( DbgInfo, "Disable failed\n" );
- DBG_LEAVE( DbgInfo );
return -1;
} else {
/* Write out configuration to the device, enable, and reconnect.
@@ -1347,7 +1324,6 @@ int wl_apply(struct wl_private *lp)
}
}
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_apply
/*============================================================================*/
@@ -1375,12 +1351,9 @@ int wl_put_ltv_init( struct wl_private *lp )
int i;
int hcf_status;
CFG_RID_LOG_STRCT *RidLog;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_put_ltv_init" );
- DBG_ENTER( DbgInfo );
+
if ( lp == NULL ) {
DBG_ERROR( DbgInfo, "lp pointer is NULL\n" );
- DBG_LEAVE( DbgInfo );
return -1;
}
/* DMA/IO */
@@ -1446,7 +1419,6 @@ int wl_put_ltv_init( struct wl_private *lp )
DBG_TRACE( DbgInfo, "CFG_REG_INFO_LOG\n" );
DBG_TRACE( DbgInfo, "CFG_REG_INFO_LOG result : 0x%04x\n",
hcf_status );
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_put_ltv_init
/*============================================================================*/
@@ -1473,9 +1445,6 @@ int wl_put_ltv( struct wl_private *lp )
{
int len;
int hcf_status;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_put_ltv" );
- DBG_ENTER( DbgInfo );
if ( lp == NULL ) {
DBG_ERROR( DbgInfo, "lp pointer is NULL\n" );
@@ -2013,7 +1982,6 @@ int wl_put_ltv( struct wl_private *lp )
/* Country Code */
/* countryInfo, ltvCountryInfo, CFG_CNF_COUNTRY_INFO */
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_put_ltv
/*============================================================================*/
@@ -2042,7 +2010,6 @@ static int __init wl_module_init( void )
int result;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_module_init" );
#if DBG
/* Convert "standard" PCMCIA parameter pc_debug to a reasonable DebugFlag value.
@@ -2067,7 +2034,6 @@ static int __init wl_module_init( void )
}
#endif /* DBG */
- DBG_ENTER( DbgInfo );
printk(KERN_INFO "%s\n", VERSION_INFO);
printk(KERN_INFO "*** Modified for kernel 2.6 by Henk de Groot <pe1dnn@amsat.org>\n");
printk(KERN_INFO "*** Based on 7.18 version by Andrey Borzenkov <arvidjaar@mail.ru> $Revision: 39 $\n");
@@ -2080,7 +2046,6 @@ static int __init wl_module_init( void )
// #endif /* (HCF_TYPE) & HCF_TYPE_AP */
result = wl_adapter_init_module( );
- DBG_LEAVE( DbgInfo );
return result;
} // init_module
/*============================================================================*/
@@ -2105,16 +2070,10 @@ static int __init wl_module_init( void )
******************************************************************************/
static void __exit wl_module_exit( void )
{
- DBG_FUNC( "wl_module_exit" );
- DBG_ENTER(DbgInfo);
-
wl_adapter_cleanup_module( );
#if 0 //SCULL_USE_PROC /* don't waste space if unused */
remove_proc_entry( "wlags", NULL ); //;?why so a-symmetric compared to location of proc_create_data
#endif
-
- DBG_LEAVE( DbgInfo );
- return;
} // cleanup_module
/*============================================================================*/
@@ -2322,9 +2281,6 @@ void wl_remove( struct net_device *dev )
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_remove" );
- DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
@@ -2356,8 +2312,6 @@ void wl_remove( struct net_device *dev )
#ifdef USE_RTS
if ( lp->useRTS == 1 ) {
wl_unlock( lp, &flags );
-
- DBG_LEAVE( DbgInfo );
return;
}
#endif /* USE_RTS */
@@ -2366,9 +2320,6 @@ void wl_remove( struct net_device *dev )
hcf_connect( &lp->hcfCtx, HCF_DISCONNECT );
wl_unlock( lp, &flags );
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_remove
/*============================================================================*/
@@ -2394,9 +2345,6 @@ void wl_suspend( struct net_device *dev )
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_suspend" );
- DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
@@ -2422,9 +2370,6 @@ void wl_suspend( struct net_device *dev )
lp->portState = WVLAN_PORT_STATE_DISABLED;
wl_unlock( lp, &flags );
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_suspend
/*============================================================================*/
@@ -2450,9 +2395,6 @@ void wl_resume(struct net_device *dev)
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_resume" );
- DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
@@ -2474,9 +2416,6 @@ void wl_resume(struct net_device *dev)
wl_act_int_on( lp );
wl_unlock( lp, &flags );
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_resume
/*============================================================================*/
@@ -2504,9 +2443,6 @@ void wl_resume(struct net_device *dev)
void wl_release( struct net_device *dev )
{
struct wl_private *lp = wl_priv(dev);
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_release" );
- DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
/* If wl_remove() hasn't been called (i.e. when Card Services is shut
@@ -2517,9 +2453,6 @@ void wl_release( struct net_device *dev )
lp->is_registered = FALSE;
}
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_release
/*============================================================================*/
@@ -2593,9 +2526,6 @@ p_s8 * wl_get_irq_list( void )
int wl_enable( struct wl_private *lp )
{
int hcf_status = HCF_SUCCESS;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_enable" );
- DBG_ENTER( DbgInfo );
if ( lp->portState == WVLAN_PORT_STATE_ENABLED ) {
DBG_TRACE( DbgInfo, "No action: Card already enabled\n" );
@@ -2617,7 +2547,6 @@ int wl_enable( struct wl_private *lp )
if ( hcf_status != HCF_SUCCESS ) { //;?make this an assert
DBG_TRACE( DbgInfo, "failed: 0x%x\n", hcf_status );
}
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_enable
/*============================================================================*/
@@ -2643,14 +2572,9 @@ int wl_enable( struct wl_private *lp )
******************************************************************************/
void wl_enable_wds_ports( struct wl_private * lp )
{
-
- DBG_FUNC( "wl_enable_wds_ports" );
- DBG_ENTER( DbgInfo );
if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ){
DBG_ERROR( DbgInfo, "!!!!;? someone misunderstood something !!!!!\n" );
}
- DBG_LEAVE( DbgInfo );
- return;
} // wl_enable_wds_ports
#endif /* USE_WDS */
/*============================================================================*/
@@ -2676,21 +2600,15 @@ void wl_enable_wds_ports( struct wl_private * lp )
int wl_connect( struct wl_private *lp )
{
int hcf_status;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_connect" );
- DBG_ENTER( DbgInfo );
if ( lp->portState != WVLAN_PORT_STATE_ENABLED ) {
DBG_TRACE( DbgInfo, "No action: Not in enabled state\n" );
- DBG_LEAVE( DbgInfo );
return HCF_SUCCESS;
}
hcf_status = hcf_cntl( &lp->hcfCtx, HCF_CNTL_CONNECT );
if ( hcf_status == HCF_SUCCESS ) {
lp->portState = WVLAN_PORT_STATE_CONNECTED;
}
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_connect
/*============================================================================*/
@@ -2716,21 +2634,15 @@ int wl_connect( struct wl_private *lp )
int wl_disconnect( struct wl_private *lp )
{
int hcf_status;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_disconnect" );
- DBG_ENTER( DbgInfo );
if ( lp->portState != WVLAN_PORT_STATE_CONNECTED ) {
DBG_TRACE( DbgInfo, "No action: Not in connected state\n" );
- DBG_LEAVE( DbgInfo );
return HCF_SUCCESS;
}
hcf_status = hcf_cntl( &lp->hcfCtx, HCF_CNTL_DISCONNECT );
if ( hcf_status == HCF_SUCCESS ) {
lp->portState = WVLAN_PORT_STATE_ENABLED;
}
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_disconnect
/*============================================================================*/
@@ -2757,9 +2669,6 @@ int wl_disconnect( struct wl_private *lp )
int wl_disable( struct wl_private *lp )
{
int hcf_status = HCF_SUCCESS;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_disable" );
- DBG_ENTER( DbgInfo );
if ( lp->portState == WVLAN_PORT_STATE_DISABLED ) {
DBG_TRACE( DbgInfo, "No action: Port state is disabled\n" );
@@ -2779,7 +2688,6 @@ int wl_disable( struct wl_private *lp )
if ( hcf_status != HCF_SUCCESS ) {
DBG_TRACE( DbgInfo, "failed: 0x%x\n", hcf_status );
}
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_disable
/*============================================================================*/
@@ -2805,10 +2713,6 @@ int wl_disable( struct wl_private *lp )
******************************************************************************/
void wl_disable_wds_ports( struct wl_private * lp )
{
-
- DBG_FUNC( "wl_disable_wds_ports" );
- DBG_ENTER( DbgInfo );
-
if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ){
DBG_ERROR( DbgInfo, "!!!!;? someone misunderstood something !!!!!\n" );
}
@@ -2820,7 +2724,6 @@ void wl_disable_wds_ports( struct wl_private * lp )
// wl_disable( lp, HCF_PORT_5 );
// wl_disable( lp, HCF_PORT_6 );
// }
- DBG_LEAVE( DbgInfo );
return;
} // wl_disable_wds_ports
#endif // USE_WDS
@@ -2848,9 +2751,7 @@ void wl_disable_wds_ports( struct wl_private * lp )
int wl_mbx( struct wl_private *lp )
{
int hcf_status = HCF_SUCCESS;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_mbx" );
- DBG_ENTER( DbgInfo );
+
DBG_TRACE( DbgInfo, "Mailbox Info: IFB_MBInfoLen: %d\n",
lp->hcfCtx.IFB_MBInfoLen );
@@ -2862,19 +2763,15 @@ int wl_mbx( struct wl_private *lp )
if ( hcf_status != HCF_SUCCESS ) {
DBG_ERROR( DbgInfo, "hcf_get_info returned 0x%x\n", hcf_status );
-
- DBG_LEAVE( DbgInfo );
return hcf_status;
}
- if ( lp->ltvRecord.typ == CFG_MB_INFO ) {
- DBG_LEAVE( DbgInfo );
+ if ( lp->ltvRecord.typ == CFG_MB_INFO )
return hcf_status;
- }
+
/* Endian translate the mailbox data, then process the message */
wl_endian_translate_mailbox( &( lp->ltvRecord ));
wl_process_mailbox( lp );
- DBG_LEAVE( DbgInfo );
return hcf_status;
} // wl_mbx
/*============================================================================*/
@@ -2900,9 +2797,6 @@ int wl_mbx( struct wl_private *lp )
******************************************************************************/
void wl_endian_translate_mailbox( ltv_t *ltv )
{
-
- DBG_FUNC( "wl_endian_translate_mailbox" );
- DBG_ENTER( DbgInfo );
switch( ltv->typ ) {
case CFG_TALLIES:
break;
@@ -2990,9 +2884,6 @@ void wl_endian_translate_mailbox( ltv_t *ltv )
default:
break;
}
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_endian_translate_mailbox
/*============================================================================*/
@@ -3017,9 +2908,7 @@ void wl_process_mailbox( struct wl_private *lp )
{
ltv_t *ltv;
hcf_16 ltv_val = 0xFFFF;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_process_mailbox" );
- DBG_ENTER( DbgInfo );
+
ltv = &( lp->ltvRecord );
switch( ltv->typ ) {
@@ -3448,8 +3337,6 @@ void wl_process_mailbox( struct wl_private *lp )
DBG_TRACE( DbgInfo, "UNKNOWN MESSAGE: 0x%04x\n", ltv->typ );
break;
}
- DBG_LEAVE( DbgInfo );
- return;
} // wl_process_mailbox
/*============================================================================*/
#endif /* ifndef USE_MBOX_SYNC */
@@ -3477,9 +3364,7 @@ void wl_process_mailbox( struct wl_private *lp )
void wl_wds_netdev_register( struct wl_private *lp )
{
int count;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_wds_netdev_register" );
- DBG_ENTER( DbgInfo );
+
//;?why is there no USE_WDS clause like in wl_enable_wds_ports
if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ) {
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
@@ -3496,8 +3381,6 @@ void wl_wds_netdev_register( struct wl_private *lp )
}
}
}
- DBG_LEAVE( DbgInfo );
- return;
} // wl_wds_netdev_register
/*============================================================================*/
@@ -3524,9 +3407,7 @@ void wl_wds_netdev_register( struct wl_private *lp )
void wl_wds_netdev_deregister( struct wl_private *lp )
{
int count;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_wds_netdev_deregister" );
- DBG_ENTER( DbgInfo );
+
if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ) {
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if ( WVLAN_VALID_MAC_ADDRESS( lp->wds_port[count].wdsAddress )) {
@@ -3535,8 +3416,6 @@ void wl_wds_netdev_deregister( struct wl_private *lp )
lp->wds_port[count].is_registered = FALSE;
}
}
- DBG_LEAVE( DbgInfo );
- return;
} // wl_wds_netdev_deregister
/*============================================================================*/
#endif /* USE_WDS */
@@ -3780,9 +3659,6 @@ static int write_int(struct file *file, const char *buffer, unsigned long count,
static char proc_number[11];
unsigned int nr = 0;
- DBG_FUNC( "write_int" );
- DBG_ENTER( DbgInfo );
-
if (count > 9) {
count = -EINVAL;
} else if ( copy_from_user(proc_number, buffer, count) ) {
@@ -3799,7 +3675,6 @@ static int write_int(struct file *file, const char *buffer, unsigned long count,
}
}
DBG_PRINT( "value: %08X\n", nr );
- DBG_LEAVE( DbgInfo );
return count;
} // write_int
@@ -3839,10 +3714,6 @@ void timer_oor( u_long arg )
{
struct wl_private *lp = (struct wl_private *)arg;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "timer_oor" );
- DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "arg", "0x%08lx", arg );
printk(KERN_NOTICE "timer_oor: %ld 0x%04X\n", jiffies, lp->timer_oor_cnt ); //;?remove me 1 day
@@ -3856,8 +3727,6 @@ void timer_oor( u_long arg )
lp->timer_oor.data = (unsigned long)lp;
lp->timer_oor.expires = RUN_AT( (lp->timer_oor_cnt & ~DS_OOR) * HZ );
add_timer( &lp->timer_oor );
-
- DBG_LEAVE( DbgInfo );
} // timer_oor
#endif //DN554
diff --git a/drivers/staging/wlags49_h2/wl_main.h b/drivers/staging/wlags49_h2/wl_main.h
index 3b5acdf4e329..3806e744d7f6 100644
--- a/drivers/staging/wlags49_h2/wl_main.h
+++ b/drivers/staging/wlags49_h2/wl_main.h
@@ -135,4 +135,4 @@ void wl_wds_netdev_deregister( struct wl_private *lp );
#define WL_WDS_NETDEV_DEREGISTER( ARG )
#endif /* USE_WDS */
-#endif // __WL_MAIN_H__
+#endif /* __WL_MAIN_H__ */
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index 235cc2a7ffe6..965b1c0a4753 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -115,14 +115,6 @@
#endif /* BUS_PCI */
-/*******************************************************************************
- * global variables
- ******************************************************************************/
-#if DBG
-extern dbg_info_t *DbgInfo;
-#endif /* DBG */
-
-
#if HCF_ENCAP
#define MTU_MAX (HCF_MAX_MSG - ETH_HLEN - 8)
#else
@@ -170,10 +162,6 @@ int wl_init( struct net_device *dev )
{
// unsigned long flags;
// struct wl_private *lp = wl_priv(dev);
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_init" );
- DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
@@ -182,7 +170,6 @@ int wl_init( struct net_device *dev )
// wl_lock( lp, &flags );
// wl_unlock( lp, &flags );
- DBG_LEAVE( DbgInfo );
return 0;
} // wl_init
/*============================================================================*/
@@ -208,9 +195,6 @@ int wl_init( struct net_device *dev )
******************************************************************************/
int wl_config( struct net_device *dev, struct ifmap *map )
{
- DBG_FUNC( "wl_config" );
- DBG_ENTER( DbgInfo );
-
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
DBG_PARAM( DbgInfo, "map", "0x%p", map );
@@ -218,7 +202,6 @@ int wl_config( struct net_device *dev, struct ifmap *map )
ignore the request. */
DBG_TRACE(DbgInfo, "%s: %s called.\n", dev->name, __func__);
- DBG_LEAVE( DbgInfo );
return 0;
} // wl_config
/*============================================================================*/
@@ -249,10 +232,7 @@ struct net_device_stats *wl_stats( struct net_device *dev )
unsigned long flags;
struct net_device_stats *pStats;
struct wl_private *lp = wl_priv(dev);
- /*------------------------------------------------------------------------*/
- //DBG_FUNC( "wl_stats" );
- //DBG_ENTER( DbgInfo );
//DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
pStats = NULL;
@@ -262,8 +242,6 @@ struct net_device_stats *wl_stats( struct net_device *dev )
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
wl_unlock( lp, &flags );
-
- //DBG_LEAVE( DbgInfo );
return NULL;
}
#endif /* USE_RTS */
@@ -286,8 +264,6 @@ struct net_device_stats *wl_stats( struct net_device *dev )
wl_unlock( lp, &flags );
- //DBG_LEAVE( DbgInfo );
-
return pStats;
} // wl_stats
/*============================================================================*/
@@ -315,10 +291,6 @@ int wl_open(struct net_device *dev)
int status = HCF_SUCCESS;
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_open" );
- DBG_ENTER( DbgInfo );
wl_lock( lp, &flags );
@@ -326,7 +298,6 @@ int wl_open(struct net_device *dev)
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping device open, in RTS mode\n" );
wl_unlock( lp, &flags );
- DBG_LEAVE( DbgInfo );
return -EIO;
}
#endif /* USE_RTS */
@@ -384,7 +355,6 @@ int wl_open(struct net_device *dev)
wl_unlock( lp, &flags );
- DBG_LEAVE( DbgInfo );
return status;
} // wl_open
/*============================================================================*/
@@ -411,10 +381,7 @@ int wl_close( struct net_device *dev )
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
- /*------------------------------------------------------------------------*/
- DBG_FUNC("wl_close");
- DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
/* Mark the adapter as busy */
@@ -440,7 +407,6 @@ int wl_close( struct net_device *dev )
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping device close, in RTS mode\n" );
wl_unlock( lp, &flags );
- DBG_LEAVE( DbgInfo );
return -EIO;
}
#endif /* USE_RTS */
@@ -450,7 +416,6 @@ int wl_close( struct net_device *dev )
wl_unlock( lp, &flags );
- DBG_LEAVE( DbgInfo );
return 0;
} // wl_close
/*============================================================================*/
@@ -504,10 +469,7 @@ int wl_ioctl( struct net_device *dev, struct ifreq *rq, int cmd )
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = 0;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_ioctl" );
- DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
DBG_PARAM(DbgInfo, "rq", "0x%p", rq);
DBG_PARAM(DbgInfo, "cmd", "0x%04x", cmd);
@@ -563,7 +525,6 @@ out_act_int_on_unlock:
wl_unlock( lp, &flags );
- DBG_LEAVE( DbgInfo );
return ret;
} // wl_ioctl
/*============================================================================*/
@@ -606,10 +567,6 @@ void wl_tx_timeout( struct net_device *dev )
unsigned long flags;
struct wl_private *lp = wl_priv(dev);
struct net_device_stats *pStats = NULL;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_tx_timeout" );
- DBG_ENTER( DbgInfo );
DBG_WARNING( DbgInfo, "%s: Transmit timeout.\n", dev->name );
@@ -619,8 +576,6 @@ void wl_tx_timeout( struct net_device *dev )
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping tx_timeout handler, in RTS mode\n" );
wl_unlock( lp, &flags );
-
- DBG_LEAVE( DbgInfo );
return;
}
#endif /* USE_RTS */
@@ -650,8 +605,6 @@ void wl_tx_timeout( struct net_device *dev )
pStats->tx_errors++;
wl_unlock( lp, &flags );
-
- DBG_LEAVE( DbgInfo );
} // wl_tx_timeout
/*============================================================================*/
@@ -683,8 +636,6 @@ int wl_send( struct wl_private *lp )
int len;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_send" );
-
if( lp == NULL ) {
DBG_ERROR( DbgInfo, "Private adapter struct is NULL\n" );
return FALSE;
@@ -801,8 +752,6 @@ int wl_tx( struct sk_buff *skb, struct net_device *dev, int port )
struct list_head *element;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_tx" );
-
/* Grab the spinlock */
wl_lock( lp, &flags );
@@ -895,7 +844,6 @@ int wl_rx(struct net_device *dev)
DESC_STRCT *desc;
/*------------------------------------------------------------------------*/
- DBG_FUNC("wl_rx")
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
if(!( lp->flags & WVLAN2_UIL_BUSY )) {
@@ -1047,16 +995,11 @@ void wl_multicast( struct net_device *dev )
struct netdev_hw_addr *ha;
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_multicast" );
- DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
- if( !wl_adapter_is_open( dev )) {
- DBG_LEAVE( DbgInfo );
+ if( !wl_adapter_is_open( dev ))
return;
- }
#if DBG
if( DBG_FLAGS( DbgInfo ) & DBG_PARAM_ON ) {
@@ -1077,8 +1020,6 @@ void wl_multicast( struct net_device *dev )
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping multicast, in RTS mode\n" );
-
- DBG_LEAVE( DbgInfo );
return;
}
#endif /* USE_RTS */
@@ -1146,7 +1087,6 @@ void wl_multicast( struct net_device *dev )
wl_act_int_on( lp );
wl_unlock( lp, &flags );
}
- DBG_LEAVE( DbgInfo );
#endif /* HCF_STA */
} // wl_multicast
/*============================================================================*/
@@ -1155,16 +1095,11 @@ void wl_multicast( struct net_device *dev )
void wl_multicast( struct net_device *dev, int num_addrs, void *addrs )
{
- DBG_FUNC( "wl_multicast");
- DBG_ENTER(DbgInfo);
-
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
DBG_PARAM( DbgInfo, "num_addrs", "%d", num_addrs );
DBG_PARAM( DbgInfo, "addrs", "0x%p", addrs );
#error Obsolete set multicast interface!
-
- DBG_LEAVE( DbgInfo );
} // wl_multicast
/*============================================================================*/
@@ -1213,10 +1148,6 @@ struct net_device * wl_device_alloc( void )
{
struct net_device *dev = NULL;
struct wl_private *lp = NULL;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_device_alloc" );
- DBG_ENTER( DbgInfo );
/* Alloc a net_device struct */
dev = alloc_etherdev(sizeof(struct wl_private));
@@ -1253,7 +1184,6 @@ struct net_device * wl_device_alloc( void )
/* Allocate virtual devices for WDS support if needed */
WL_WDS_DEVICE_ALLOC( lp );
- DBG_LEAVE( DbgInfo );
return dev;
} // wl_device_alloc
/*============================================================================*/
@@ -1279,17 +1209,11 @@ struct net_device * wl_device_alloc( void )
void wl_device_dealloc( struct net_device *dev )
{
// struct wl_private *lp = wl_priv(dev);
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_device_dealloc" );
- DBG_ENTER( DbgInfo );
/* Dealloc the WDS ports */
WL_WDS_DEVICE_DEALLOC( lp );
free_netdev( dev );
-
- DBG_LEAVE( DbgInfo );
} // wl_device_dealloc
/*============================================================================*/
@@ -1496,10 +1420,6 @@ int wl_tx_port6( struct sk_buff *skb, struct net_device *dev )
void wl_wds_device_alloc( struct wl_private *lp )
{
int count;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_wds_device_alloc" );
- DBG_ENTER( DbgInfo );
/* WDS support requires additional net_device structs to be allocated,
so that user space apps can use these virtual devices to specify the
@@ -1508,10 +1428,8 @@ void wl_wds_device_alloc( struct wl_private *lp )
struct net_device *dev_wds = NULL;
dev_wds = kzalloc(sizeof(struct net_device), GFP_KERNEL);
- if (!dev_wds) {
- DBG_LEAVE(DbgInfo);
+ if (!dev_wds)
return;
- }
ether_setup( dev_wds );
@@ -1542,8 +1460,6 @@ void wl_wds_device_alloc( struct wl_private *lp )
lp->wds_port[5].dev->hard_start_xmit = &wl_tx_port6;
WL_WDS_NETIF_STOP_QUEUE( lp );
-
- DBG_LEAVE( DbgInfo );
} // wl_wds_device_alloc
/*============================================================================*/
@@ -1567,10 +1483,6 @@ void wl_wds_device_alloc( struct wl_private *lp )
void wl_wds_device_dealloc( struct wl_private *lp )
{
int count;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_wds_device_dealloc" );
- DBG_ENTER( DbgInfo );
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
struct net_device *dev_wds = NULL;
@@ -1587,8 +1499,6 @@ void wl_wds_device_dealloc( struct wl_private *lp )
lp->wds_port[count].dev = NULL;
}
}
-
- DBG_LEAVE( DbgInfo );
} // wl_wds_device_dealloc
/*============================================================================*/
@@ -1792,8 +1702,6 @@ int wl_send_dma( struct wl_private *lp, struct sk_buff *skb, int port )
DESC_STRCT *desc_next = NULL;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "wl_send_dma" );
-
if( lp == NULL ) {
DBG_ERROR( DbgInfo, "Private adapter struct is NULL\n" );
return FALSE;
@@ -1882,7 +1790,6 @@ int wl_rx_dma( struct net_device *dev )
//CFG_MB_INFO_RANGE2_STRCT x;
/*------------------------------------------------------------------------*/
- DBG_FUNC("wl_rx")
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
if((( lp = dev->priv ) != NULL ) &&
diff --git a/drivers/staging/wlags49_h2/wl_pci.c b/drivers/staging/wlags49_h2/wl_pci.c
deleted file mode 100644
index 6226e5eebf3a..000000000000
--- a/drivers/staging/wlags49_h2/wl_pci.c
+++ /dev/null
@@ -1,1578 +0,0 @@
-/*******************************************************************************
- * Agere Systems Inc.
- * Wireless device driver for Linux (wlags49).
- *
- * Copyright (c) 1998-2003 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- * Initially developed by TriplePoint, Inc.
- * http://www.triplepoint.com
- *
- *------------------------------------------------------------------------------
- *
- * This file contains processing and initialization specific to PCI/miniPCI
- * devices.
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2003 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- ******************************************************************************/
-
-/*******************************************************************************
- * include files
- ******************************************************************************/
-#include <wireless/wl_version.h>
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-//#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/bitops.h>
-#include <asm/uaccess.h>
-
-#include <linux/ethtool.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/ioport.h>
-
-#include <hcf/debug.h>
-
-#include <hcf.h>
-#include <dhf.h>
-#include <hcfdef.h>
-
-#include <wireless/wl_if.h>
-#include <wireless/wl_internal.h>
-#include <wireless/wl_util.h>
-#include <wireless/wl_main.h>
-#include <wireless/wl_netdev.h>
-#include <wireless/wl_pci.h>
-
-
-/*******************************************************************************
- * global variables
- ******************************************************************************/
-#if DBG
-extern dbg_info_t *DbgInfo;
-#endif // DBG
-
-/* define the PCI device Table Cardname and id tables */
-static struct pci_device_id wl_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_0), },
- { PCI_DEVICE(PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_1), },
- { PCI_DEVICE(PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_2), },
-
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(pci, wl_pci_tbl);
-
-/*******************************************************************************
- * function prototypes
- ******************************************************************************/
-int wl_pci_probe( struct pci_dev *pdev,
- const struct pci_device_id *ent );
-void wl_pci_remove(struct pci_dev *pdev);
-int wl_pci_setup( struct pci_dev *pdev );
-void wl_pci_enable_cardbus_interrupts( struct pci_dev *pdev );
-
-#ifdef ENABLE_DMA
-int wl_pci_dma_alloc( struct pci_dev *pdev, struct wl_private *lp );
-int wl_pci_dma_free( struct pci_dev *pdev, struct wl_private *lp );
-int wl_pci_dma_alloc_tx_packet( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc );
-int wl_pci_dma_free_tx_packet( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc );
-int wl_pci_dma_alloc_rx_packet( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc );
-int wl_pci_dma_free_rx_packet( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc );
-int wl_pci_dma_alloc_desc_and_buf( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc, int size );
-int wl_pci_dma_free_desc_and_buf( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc );
-int wl_pci_dma_alloc_desc( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc );
-int wl_pci_dma_free_desc( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc );
-int wl_pci_dma_alloc_buf( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT *desc, int size );
-int wl_pci_dma_free_buf( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT *desc );
-
-void wl_pci_dma_hcf_reclaim_rx( struct wl_private *lp );
-#endif // ENABLE_DMA
-
-/*******************************************************************************
- * PCI module function registration
- ******************************************************************************/
-static struct pci_driver wl_driver = {
- .name = MODULE_NAME,
- .id_table = wl_pci_tbl,
- .probe = wl_pci_probe,
- .remove = wl_pci_remove,
- .suspend = NULL,
- .resume = NULL
-};
-
-/*******************************************************************************
- * wl_adapter_init_module()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Called by init_module() to perform PCI-specific driver initialization.
- *
- * PARAMETERS:
- *
- * N/A
- *
- * RETURNS:
- *
- * 0
- *
- ******************************************************************************/
-int wl_adapter_init_module( void )
-{
- int result;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_adapter_init_module()" );
- DBG_ENTER( DbgInfo );
- DBG_TRACE( DbgInfo, "wl_adapter_init_module() -- PCI\n" );
-
- result = pci_register_driver( &wl_driver ); //;?replace with pci_module_init, Rubini pg 490
- //;? why not do something with the result
-
- DBG_LEAVE( DbgInfo );
- return 0;
-} // wl_adapter_init_module
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_adapter_cleanup_module()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Called by cleanup_module() to perform PCI-specific driver cleanup.
- *
- * PARAMETERS:
- *
- * N/A
- *
- * RETURNS:
- *
- * N/A
- *
- ******************************************************************************/
-void wl_adapter_cleanup_module( void )
-{
- //;?how come wl_adapter_cleanup_module is located in a seemingly pci specific module
- DBG_FUNC( "wl_adapter_cleanup_module" );
- DBG_ENTER( DbgInfo );
-
- //;?DBG_TRACE below feels like nearly redundant in the light of DBG_ENTER above
- DBG_TRACE( DbgInfo, "wl_adapter_cleanup_module() -- PCI\n" );
-
- pci_unregister_driver( &wl_driver );
-
- DBG_LEAVE( DbgInfo );
- return;
-} // wl_adapter_cleanup_module
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_adapter_insert()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Called by wl_pci_probe() to continue the process of device insertion.
- *
- * PARAMETERS:
- *
- * dev - a pointer to the device's net_device structure
- *
- * RETURNS:
- *
- * TRUE or FALSE
- *
- ******************************************************************************/
-int wl_adapter_insert( struct net_device *dev )
-{
- int result = FALSE;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_adapter_insert" );
- DBG_ENTER( DbgInfo );
-
- DBG_TRACE( DbgInfo, "wl_adapter_insert() -- PCI\n" );
-
- if( dev == NULL ) {
- DBG_ERROR( DbgInfo, "net_device pointer is NULL!!!\n" );
- } else if( dev->priv == NULL ) {
- DBG_ERROR( DbgInfo, "wl_private pointer is NULL!!!\n" );
- } else if( wl_insert( dev ) ) { /* Perform remaining device initialization */
- result = TRUE;
- } else {
- DBG_TRACE( DbgInfo, "wl_insert() FAILED\n" );
- }
- DBG_LEAVE( DbgInfo );
- return result;
-} // wl_adapter_insert
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_adapter_open()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Open the device.
- *
- * PARAMETERS:
- *
- * dev - a pointer to the device's net_device structure
- *
- * RETURNS:
- *
- * an HCF status code
- *
- ******************************************************************************/
-int wl_adapter_open( struct net_device *dev )
-{
- int result = 0;
- int hcf_status = HCF_SUCCESS;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_adapter_open" );
- DBG_ENTER( DbgInfo );
-
- DBG_TRACE( DbgInfo, "wl_adapter_open() -- PCI\n" );
-
- hcf_status = wl_open( dev );
-
- if( hcf_status != HCF_SUCCESS ) {
- result = -ENODEV;
- }
-
- DBG_LEAVE( DbgInfo );
- return result;
-} // wl_adapter_open
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_adapter_close()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Close the device
- *
- * PARAMETERS:
- *
- * dev - a pointer to the device's net_device structure
- *
- * RETURNS:
- *
- * 0
- *
- ******************************************************************************/
-int wl_adapter_close( struct net_device *dev )
-{
- DBG_FUNC( "wl_adapter_close" );
- DBG_ENTER( DbgInfo );
-
- DBG_TRACE( DbgInfo, "wl_adapter_close() -- PCI\n" );
- DBG_TRACE( DbgInfo, "%s: Shutting down adapter.\n", dev->name );
-
- wl_close( dev );
-
- DBG_LEAVE( DbgInfo );
- return 0;
-} // wl_adapter_close
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_adapter_is_open()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Check whether this device is open. Returns
- *
- * PARAMETERS:
- *
- * dev - a pointer to the device's net_device structure
- *
- * RETURNS:
- *
- * nonzero if device is open.
- *
- ******************************************************************************/
-int wl_adapter_is_open( struct net_device *dev )
-{
- /* This function is used in PCMCIA to check the status of the 'open' field
- in the dev_link_t structure associated with a network device. There
- doesn't seem to be an analog to this for PCI, and checking the status
- contained in the net_device structure doesn't have the same effect.
- For now, return TRUE, but find out if this is necessary for PCI. */
-
- return TRUE;
-} // wl_adapter_is_open
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_probe()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Registered in the pci_driver structure, this function is called when the
- * PCI subsystem finds a new PCI device which matches the information contained
- * in the pci_device_id table.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * ent - this device's entry in the pci_device_id table
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_probe( struct pci_dev *pdev,
- const struct pci_device_id *ent )
-{
- int result;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_probe" );
- DBG_ENTER( DbgInfo );
- DBG_PRINT( "%s\n", VERSION_INFO );
-
- result = wl_pci_setup( pdev );
-
- DBG_LEAVE( DbgInfo );
-
- return result;
-} // wl_pci_probe
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_remove()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Registered in the pci_driver structure, this function is called when the
- * PCI subsystem detects that a PCI device which matches the information
- * contained in the pci_device_id table has been removed.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- *
- * RETURNS:
- *
- * N/A
- *
- ******************************************************************************/
-void wl_pci_remove(struct pci_dev *pdev)
-{
- struct net_device *dev = NULL;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_remove" );
- DBG_ENTER( DbgInfo );
-
- /* Make sure the pci_dev pointer passed in is valid */
- if( pdev == NULL ) {
- DBG_ERROR( DbgInfo, "PCI subsys passed in an invalid pci_dev pointer\n" );
- return;
- }
-
- dev = pci_get_drvdata( pdev );
- if( dev == NULL ) {
- DBG_ERROR( DbgInfo, "Could not retrieve net_device structure\n" );
- return;
- }
-
- /* Perform device cleanup */
- wl_remove( dev );
- free_irq( dev->irq, dev );
-
-#ifdef ENABLE_DMA
- wl_pci_dma_free( pdev, dev->priv );
-#endif
-
- wl_device_dealloc( dev );
-
- DBG_LEAVE( DbgInfo );
- return;
-} // wl_pci_remove
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_setup()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Called by wl_pci_probe() to begin a device's initialization process.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_setup( struct pci_dev *pdev )
-{
- int result = 0;
- struct net_device *dev = NULL;
- struct wl_private *lp = NULL;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_setup" );
- DBG_ENTER( DbgInfo );
-
- /* Make sure the pci_dev pointer passed in is valid */
- if( pdev == NULL ) {
- DBG_ERROR( DbgInfo, "PCI subsys passed in an invalid pci_dev pointer\n" );
- return -ENODEV;
- }
-
- result = pci_enable_device( pdev );
- if( result != 0 ) {
- DBG_ERROR( DbgInfo, "pci_enable_device() failed\n" );
- DBG_LEAVE( DbgInfo );
- return result;
- }
-
- /* We found our device! Let's register it with the system */
- DBG_TRACE( DbgInfo, "Found our device, now registering\n" );
- dev = wl_device_alloc( );
- if( dev == NULL ) {
- DBG_ERROR( DbgInfo, "Could not register device!!!\n" );
- DBG_LEAVE( DbgInfo );
- return -ENOMEM;
- }
-
- /* Make sure that space was allocated for our private adapter struct */
- if( dev->priv == NULL ) {
- DBG_ERROR( DbgInfo, "Private adapter struct was not allocated!!!\n" );
- wl_device_dealloc(dev);
- DBG_LEAVE( DbgInfo );
- return -ENOMEM;
- }
-
-#ifdef ENABLE_DMA
- /* Allocate DMA Descriptors */
- if( wl_pci_dma_alloc( pdev, dev->priv ) < 0 ) {
- DBG_ERROR( DbgInfo, "Could not allocate DMA descriptor memory!!!\n" );
- wl_device_dealloc(dev);
- DBG_LEAVE( DbgInfo );
- return -ENOMEM;
- }
-#endif
-
- /* Register our private adapter structure with PCI */
- pci_set_drvdata( pdev, dev );
-
- /* Fill out bus specific information in the net_device struct */
- dev->irq = pdev->irq;
- SET_MODULE_OWNER( dev );
-
- DBG_TRACE( DbgInfo, "Device Base Address: %#03lx\n", pdev->resource[0].start );
- dev->base_addr = pdev->resource[0].start;
-
- /* Initialize our device here */
- if( !wl_adapter_insert( dev )) {
- DBG_ERROR( DbgInfo, "wl_adapter_insert() FAILED!!!\n" );
- wl_device_dealloc( dev );
- DBG_LEAVE( DbgInfo );
- return -EINVAL;
- }
-
- /* Register our ISR */
- DBG_TRACE( DbgInfo, "Registering ISR...\n" );
-
- result = request_irq(dev->irq, wl_isr, SA_SHIRQ, dev->name, dev);
- if( result ) {
- DBG_WARNING( DbgInfo, "Could not register ISR!!!\n" );
- wl_remove(dev);
- wl_device_dealloc(dev);
- DBG_LEAVE( DbgInfo );
- return result;
- }
-
- /* Make sure interrupts are enabled properly for CardBus */
- lp = dev->priv;
-
- if( lp->hcfCtx.IFB_BusType == CFG_NIC_BUS_TYPE_CARDBUS ||
- lp->hcfCtx.IFB_BusType == CFG_NIC_BUS_TYPE_PCI ) {
- DBG_TRACE( DbgInfo, "This is a PCI/CardBus card, enable interrupts\n" );
- wl_pci_enable_cardbus_interrupts( pdev );
- }
-
- /* Enable bus mastering */
- pci_set_master( pdev );
-
- DBG_LEAVE( DbgInfo );
- return 0;
-} // wl_pci_setup
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_enable_cardbus_interrupts()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Called by wl_pci_setup() to enable interrupts on a CardBus device. This
- * is done by writing bit 15 to the function event mask register. This
- * CardBus-specific register is located in BAR2 (counting from BAR0), in memory
- * space at byte offset 1f4 (7f4 for WARP).
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- *
- * RETURNS:
- *
- * N/A
- *
- ******************************************************************************/
-void wl_pci_enable_cardbus_interrupts( struct pci_dev *pdev )
-{
- u32 bar2_reg;
- u32 mem_addr_bus;
- u32 func_evt_mask_reg;
- void *mem_addr_kern = NULL;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_enable_cardbus_interrupts" );
- DBG_ENTER( DbgInfo );
-
- /* Initialize to known bad values */
- bar2_reg = 0xdeadbeef;
- mem_addr_bus = 0xdeadbeef;
-
- /* Read the BAR2 register; this register contains the base address of the
- memory region where the function event mask register lives */
- pci_read_config_dword( pdev, PCI_BASE_ADDRESS_2, &bar2_reg );
- mem_addr_bus = bar2_reg & PCI_BASE_ADDRESS_MEM_MASK;
-
- /* Once the base address is obtained, remap the memory region to kernel
- space so we can retrieve the register */
- mem_addr_kern = ioremap( mem_addr_bus, 0x200 );
-
-#ifdef HERMES25
-#define REG_OFFSET 0x07F4
-#else
-#define REG_OFFSET 0x01F4
-#endif // HERMES25
-
-#define BIT15 0x8000
-
- /* Retrieve the functional event mask register, enable interrupts by
- setting Bit 15, and write back the value */
- func_evt_mask_reg = *(u32 *)( mem_addr_kern + REG_OFFSET );
- func_evt_mask_reg |= BIT15;
- *(u32 *)( mem_addr_kern + REG_OFFSET ) = func_evt_mask_reg;
-
- /* Once complete, unmap the region and exit */
- iounmap( mem_addr_kern );
-
- DBG_LEAVE( DbgInfo );
- return;
-} // wl_pci_enable_cardbus_interrupts
-/*============================================================================*/
-
-#ifdef ENABLE_DMA
-/*******************************************************************************
- * wl_pci_dma_alloc()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Allocates all resources needed for PCI/CardBus DMA operation
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_alloc( struct pci_dev *pdev, struct wl_private *lp )
-{
- int i;
- int status = 0;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_dma_alloc" );
- DBG_ENTER( DbgInfo );
-
-// lp->dma.tx_rsc_ind = lp->dma.rx_rsc_ind = 0;
-//
-// /* Alloc for the Tx chain and its reclaim descriptor */
-// for( i = 0; i < NUM_TX_DESC; i++ ) {
-// status = wl_pci_dma_alloc_tx_packet( pdev, lp, &lp->dma.tx_packet[i] );
-// if( status == 0 ) {
-// DBG_PRINT( "lp->dma.tx_packet[%d] : 0x%p\n", i, lp->dma.tx_packet[i] );
-// DBG_PRINT( "lp->dma.tx_packet[%d]->next_desc_addr : 0x%p\n", i, lp->dma.tx_packet[i]->next_desc_addr );
-// lp->dma.tx_rsc_ind++;
-// } else {
-// DBG_ERROR( DbgInfo, "Could not alloc DMA Tx Packet\n" );
-// break;
-// }
-// }
-// if( status == 0 ) {
-// status = wl_pci_dma_alloc_desc( pdev, lp, &lp->dma.tx_reclaim_desc );
-// DBG_PRINT( "lp->dma.tx_reclaim_desc: 0x%p\n", lp->dma.tx_reclaim_desc );
-// }
-// /* Alloc for the Rx chain and its reclaim descriptor */
-// if( status == 0 ) {
-// for( i = 0; i < NUM_RX_DESC; i++ ) {
-// status = wl_pci_dma_alloc_rx_packet( pdev, lp, &lp->dma.rx_packet[i] );
-// if( status == 0 ) {
-// DBG_PRINT( "lp->dma.rx_packet[%d] : 0x%p\n", i, lp->dma.rx_packet[i] );
-// DBG_PRINT( "lp->dma.rx_packet[%d]->next_desc_addr : 0x%p\n", i, lp->dma.rx_packet[i]->next_desc_addr );
-// lp->dma.rx_rsc_ind++;
-// } else {
-// DBG_ERROR( DbgInfo, "Could not alloc DMA Rx Packet\n" );
-// break;
-// }
-// }
-// }
-// if( status == 0 ) {
-// status = wl_pci_dma_alloc_desc( pdev, lp, &lp->dma.rx_reclaim_desc );
-// DBG_PRINT( "lp->dma.rx_reclaim_desc: 0x%p\n", lp->dma.rx_reclaim_desc );
-// }
-// /* Store status, as host should not call HCF functions if this fails */
-// lp->dma.status = status; //;?all useages of dma.status have been commented out
-// DBG_LEAVE( DbgInfo );
- return status;
-} // wl_pci_dma_alloc
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_free()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Deallocated all resources needed for PCI/CardBus DMA operation
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_free( struct pci_dev *pdev, struct wl_private *lp )
-{
- int i;
- int status = 0;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_dma_free" );
- DBG_ENTER( DbgInfo );
-
- /* Reclaim all Rx packets that were handed over to the HCF */
- /* Do I need to do this? Before this free is called, I've already disabled
- the port which will call wl_pci_dma_hcf_reclaim */
- //if( lp->dma.status == 0 )
- //{
- // wl_pci_dma_hcf_reclaim( lp );
- //}
-
- /* Free everything needed for DMA Rx */
- for( i = 0; i < NUM_RX_DESC; i++ ) {
- if( lp->dma.rx_packet[i] ) {
- status = wl_pci_dma_free_rx_packet( pdev, lp, &lp->dma.rx_packet[i] );
- if( status != 0 ) {
- DBG_WARNING( DbgInfo, "Problem freeing Rx packet\n" );
- }
- }
- }
- lp->dma.rx_rsc_ind = 0;
-
- if( lp->dma.rx_reclaim_desc ) {
- status = wl_pci_dma_free_desc( pdev, lp, &lp->dma.rx_reclaim_desc );
- if( status != 0 ) {
- DBG_WARNING( DbgInfo, "Problem freeing Rx reclaim descriptor\n" );
- }
- }
-
- /* Free everything needed for DMA Tx */
- for( i = 0; i < NUM_TX_DESC; i++ ) {
- if( lp->dma.tx_packet[i] ) {
- status = wl_pci_dma_free_tx_packet( pdev, lp, &lp->dma.tx_packet[i] );
- if( status != 0 ) {
- DBG_WARNING( DbgInfo, "Problem freeing Tx packet\n" );
- }
- }
- }
- lp->dma.tx_rsc_ind = 0;
-
- if( lp->dma.tx_reclaim_desc ) {
- status = wl_pci_dma_free_desc( pdev, lp, &lp->dma.tx_reclaim_desc );
- if( status != 0 ) {
- DBG_WARNING( DbgInfo, "Problem freeing Tx reclaim descriptor\n" );
- }
- }
-
- DBG_LEAVE( DbgInfo );
- return status;
-} // wl_pci_dma_free
-
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_alloc_tx_packet()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Allocates a single Tx packet, consisting of several descriptors and
- * buffers. Data to transmit is first copied into the 'payload' buffer
- * before being transmitted.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- * desc - a pointer which will reference the descriptor to be alloc'd.
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_alloc_tx_packet( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc )
-{
-// int status = 0;
-// /*------------------------------------------------------------------------*/
-//
-// if( desc == NULL ) {
-// status = -EFAULT;
-// }
-// if( status == 0 ) {
-// status = wl_pci_dma_alloc_desc_and_buf( pdev, lp, desc,
-// HCF_DMA_TX_BUF1_SIZE );
-//
-// if( status == 0 ) {
-// status = wl_pci_dma_alloc_desc_and_buf( pdev, lp,
-// &( (*desc)->next_desc_addr ),
-// HCF_MAX_PACKET_SIZE );
-// }
-// }
-// if( status == 0 ) {
-// (*desc)->next_desc_phys_addr = (*desc)->next_desc_addr->desc_phys_addr;
-// }
-// return status;
-} // wl_pci_dma_alloc_tx_packet
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_free_tx_packet()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Frees a single Tx packet, described in the corresponding alloc function.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- * desc - a pointer which will reference the descriptor to be alloc'd.
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_free_tx_packet( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc )
-{
- int status = 0;
- /*------------------------------------------------------------------------*/
-
- if( *desc == NULL ) {
- DBG_PRINT( "Null descriptor\n" );
- status = -EFAULT;
- }
- //;?the "limited" NDIS strategy, assuming a frame consists ALWAYS out of 2
- //descriptors, make this robust
- if( status == 0 && (*desc)->next_desc_addr ) {
- status = wl_pci_dma_free_desc_and_buf( pdev, lp, &(*desc)->next_desc_addr );
- }
- if( status == 0 ) {
- status = wl_pci_dma_free_desc_and_buf( pdev, lp, desc );
- }
- return status;
-} // wl_pci_dma_free_tx_packet
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_alloc_rx_packet()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Allocates a single Rx packet, consisting of two descriptors and one
- * contiguous buffer. The buffer starts with the hermes-specific header.
- * One descriptor points at the start, the other at offset 0x3a of the
- * buffer.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- * desc - a pointer which will reference the descriptor to be alloc'd.
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_alloc_rx_packet( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc )
-{
- int status = 0;
- DESC_STRCT *p;
- /*------------------------------------------------------------------------*/
-
-// if( desc == NULL ) {
-// status = -EFAULT;
-// }
-// //;?the "limited" NDIS strategy, assuming a frame consists ALWAYS out of 2
-// //descriptors, make this robust
-// if( status == 0 ) {
-// status = wl_pci_dma_alloc_desc( pdev, lp, desc );
-// }
-// if( status == 0 ) {
-// status = wl_pci_dma_alloc_buf( pdev, lp, *desc, HCF_MAX_PACKET_SIZE );
-// }
-// if( status == 0 ) {
-// status = wl_pci_dma_alloc_desc( pdev, lp, &p );
-// }
-// if( status == 0 ) {
-// /* Size of 1st descriptor becomes 0x3a bytes */
-// SET_BUF_SIZE( *desc, HCF_DMA_RX_BUF1_SIZE );
-//
-// /* Make 2nd descriptor point at offset 0x3a of the buffer */
-// SET_BUF_SIZE( p, ( HCF_MAX_PACKET_SIZE - HCF_DMA_RX_BUF1_SIZE ));
-// p->buf_addr = (*desc)->buf_addr + HCF_DMA_RX_BUF1_SIZE;
-// p->buf_phys_addr = (*desc)->buf_phys_addr + HCF_DMA_RX_BUF1_SIZE;
-// p->next_desc_addr = NULL;
-//
-// /* Chain 2nd descriptor to 1st descriptor */
-// (*desc)->next_desc_addr = p;
-// (*desc)->next_desc_phys_addr = p->desc_phys_addr;
-// }
-
- return status;
-} // wl_pci_dma_alloc_rx_packet
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_free_rx_packet()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Frees a single Rx packet, described in the corresponding alloc function.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- * desc - a pointer which will reference the descriptor to be alloc'd.
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_free_rx_packet( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc )
-{
- int status = 0;
- DESC_STRCT *p;
- /*------------------------------------------------------------------------*/
-
- if( *desc == NULL ) {
- status = -EFAULT;
- }
- if( status == 0 ) {
- p = (*desc)->next_desc_addr;
-
- /* Free the 2nd descriptor */
- if( p != NULL ) {
- p->buf_addr = NULL;
- p->buf_phys_addr = 0;
-
- status = wl_pci_dma_free_desc( pdev, lp, &p );
- }
- }
-
- /* Free the buffer and 1st descriptor */
- if( status == 0 ) {
- SET_BUF_SIZE( *desc, HCF_MAX_PACKET_SIZE );
- status = wl_pci_dma_free_desc_and_buf( pdev, lp, desc );
- }
- return status;
-} // wl_pci_dma_free_rx_packet
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_alloc_desc_and_buf()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Allocates a DMA descriptor and buffer, and associates them with one
- * another.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- * desc - a pointer which will reference the descriptor to be alloc'd
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_alloc_desc_and_buf( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc, int size )
-{
- int status = 0;
- /*------------------------------------------------------------------------*/
-
-// if( desc == NULL ) {
-// status = -EFAULT;
-// }
-// if( status == 0 ) {
-// status = wl_pci_dma_alloc_desc( pdev, lp, desc );
-//
-// if( status == 0 ) {
-// status = wl_pci_dma_alloc_buf( pdev, lp, *desc, size );
-// }
-// }
- return status;
-} // wl_pci_dma_alloc_desc_and_buf
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_free_desc_and_buf()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Frees a DMA descriptor and associated buffer.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- * desc - a pointer which will reference the descriptor to be alloc'd
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_free_desc_and_buf( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc )
-{
- int status = 0;
- /*------------------------------------------------------------------------*/
-
- if( desc == NULL ) {
- status = -EFAULT;
- }
- if( status == 0 && *desc == NULL ) {
- status = -EFAULT;
- }
- if( status == 0 ) {
- status = wl_pci_dma_free_buf( pdev, lp, *desc );
-
- if( status == 0 ) {
- status = wl_pci_dma_free_desc( pdev, lp, desc );
- }
- }
- return status;
-} // wl_pci_dma_free_desc_and_buf
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_alloc_desc()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Allocates one DMA descriptor in cache coherent memory.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_alloc_desc( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc )
-{
-// int status = 0;
-// dma_addr_t pa;
-// /*------------------------------------------------------------------------*/
-//
-// DBG_FUNC( "wl_pci_dma_alloc_desc" );
-// DBG_ENTER( DbgInfo );
-//
-// if( desc == NULL ) {
-// status = -EFAULT;
-// }
-// if( status == 0 ) {
-// *desc = pci_alloc_consistent( pdev, sizeof( DESC_STRCT ), &pa );
-// }
-// if( *desc == NULL ) {
-// DBG_ERROR( DbgInfo, "pci_alloc_consistent() failed\n" );
-// status = -ENOMEM;
-// } else {
-// memset( *desc, 0, sizeof( DESC_STRCT ));
-// (*desc)->desc_phys_addr = cpu_to_le32( pa );
-// }
-// DBG_LEAVE( DbgInfo );
-// return status;
-} // wl_pci_dma_alloc_desc
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_free_desc()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Frees one DMA descriptor in cache coherent memory.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_free_desc( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT **desc )
-{
- int status = 0;
- /*------------------------------------------------------------------------*/
-
- if( *desc == NULL ) {
- status = -EFAULT;
- }
- if( status == 0 ) {
- pci_free_consistent( pdev, sizeof( DESC_STRCT ), *desc,
- (*desc)->desc_phys_addr );
- }
- *desc = NULL;
- return status;
-} // wl_pci_dma_free_desc
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_alloc_buf()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Allocates one DMA buffer in cache coherent memory, and associates a DMA
- * descriptor with this buffer.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_alloc_buf( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT *desc, int size )
-{
- int status = 0;
- dma_addr_t pa;
- /*------------------------------------------------------------------------*/
-
-// DBG_FUNC( "wl_pci_dma_alloc_buf" );
-// DBG_ENTER( DbgInfo );
-//
-// if( desc == NULL ) {
-// status = -EFAULT;
-// }
-// if( status == 0 && desc->buf_addr != NULL ) {
-// status = -EFAULT;
-// }
-// if( status == 0 ) {
-// desc->buf_addr = pci_alloc_consistent( pdev, size, &pa );
-// }
-// if( desc->buf_addr == NULL ) {
-// DBG_ERROR( DbgInfo, "pci_alloc_consistent() failed\n" );
-// status = -ENOMEM;
-// } else {
-// desc->buf_phys_addr = cpu_to_le32( pa );
-// SET_BUF_SIZE( desc, size );
-// }
-// DBG_LEAVE( DbgInfo );
- return status;
-} // wl_pci_dma_alloc_buf
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_free_buf()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Allocates one DMA buffer in cache coherent memory, and associates a DMA
- * descriptor with this buffer.
- *
- * PARAMETERS:
- *
- * pdev - a pointer to the device's pci_dev structure
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-int wl_pci_dma_free_buf( struct pci_dev *pdev, struct wl_private *lp,
- DESC_STRCT *desc )
-{
- int status = 0;
- /*------------------------------------------------------------------------*/
-
- if( desc == NULL ) {
- status = -EFAULT;
- }
- if( status == 0 && desc->buf_addr == NULL ) {
- status = -EFAULT;
- }
- if( status == 0 ) {
- pci_free_consistent( pdev, GET_BUF_SIZE( desc ), desc->buf_addr,
- desc->buf_phys_addr );
-
- desc->buf_addr = 0;
- desc->buf_phys_addr = 0;
- SET_BUF_SIZE( desc, 0 );
- }
- return status;
-} // wl_pci_dma_free_buf
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_hcf_supply()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Supply HCF with DMA-related resources. These consist of:
- * - buffers and descriptors for receive purposes
- * - one 'reclaim' descriptor for the transmit path, used to fulfill a
- * certain H25 DMA engine requirement
- * - one 'reclaim' descriptor for the receive path, used to fulfill a
- * certain H25 DMA engine requirement
- *
- * This function is called at start-of-day or at re-initialization.
- *
- * PARAMETERS:
- *
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-void wl_pci_dma_hcf_supply( struct wl_private *lp )
-{
- int i;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_dma_hcf_supply" );
- DBG_ENTER( DbgInfo );
-
- //if( lp->dma.status == 0 );
- //{
- /* Hand over the Rx/Tx reclaim descriptors to the HCF */
- if( lp->dma.tx_reclaim_desc ) {
- DBG_PRINT( "lp->dma.tx_reclaim_desc: 0x%p\n", lp->dma.tx_reclaim_desc );
- hcf_dma_tx_put( &lp->hcfCtx, lp->dma.tx_reclaim_desc, 0 );
- lp->dma.tx_reclaim_desc = NULL;
- DBG_PRINT( "lp->dma.tx_reclaim_desc: 0x%p\n", lp->dma.tx_reclaim_desc );
- }
- if( lp->dma.rx_reclaim_desc ) {
- DBG_PRINT( "lp->dma.rx_reclaim_desc: 0x%p\n", lp->dma.rx_reclaim_desc );
- hcf_dma_rx_put( &lp->hcfCtx, lp->dma.rx_reclaim_desc );
- lp->dma.rx_reclaim_desc = NULL;
- DBG_PRINT( "lp->dma.rx_reclaim_desc: 0x%p\n", lp->dma.rx_reclaim_desc );
- }
- /* Hand over the Rx descriptor chain to the HCF */
- for( i = 0; i < NUM_RX_DESC; i++ ) {
- DBG_PRINT( "lp->dma.rx_packet[%d]: 0x%p\n", i, lp->dma.rx_packet[i] );
- hcf_dma_rx_put( &lp->hcfCtx, lp->dma.rx_packet[i] );
- lp->dma.rx_packet[i] = NULL;
- DBG_PRINT( "lp->dma.rx_packet[%d]: 0x%p\n", i, lp->dma.rx_packet[i] );
- }
- //}
-
- DBG_LEAVE( DbgInfo );
- return;
-} // wl_pci_dma_hcf_supply
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_hcf_reclaim()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Return DMA-related resources from the HCF. These consist of:
- * - buffers and descriptors for receive purposes
- * - buffers and descriptors for transmit purposes
- * - one 'reclaim' descriptor for the transmit path, used to fulfill a
- * certain H25 DMA engine requirement
- * - one 'reclaim' descriptor for the receive path, used to fulfill a
- * certain H25 DMA engine requirement
- *
- * This function is called at end-of-day or at re-initialization.
- *
- * PARAMETERS:
- *
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-void wl_pci_dma_hcf_reclaim( struct wl_private *lp )
-{
- int i;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_dma_hcf_reclaim" );
- DBG_ENTER( DbgInfo );
-
- wl_pci_dma_hcf_reclaim_rx( lp );
- for( i = 0; i < NUM_RX_DESC; i++ ) {
- DBG_PRINT( "rx_packet[%d] 0x%p\n", i, lp->dma.rx_packet[i] );
-// if( lp->dma.rx_packet[i] == NULL ) {
-// DBG_PRINT( "wl_pci_dma_hcf_reclaim: rx_packet[%d] NULL\n", i );
-// }
- }
-
- wl_pci_dma_hcf_reclaim_tx( lp );
- for( i = 0; i < NUM_TX_DESC; i++ ) {
- DBG_PRINT( "tx_packet[%d] 0x%p\n", i, lp->dma.tx_packet[i] );
-// if( lp->dma.tx_packet[i] == NULL ) {
-// DBG_PRINT( "wl_pci_dma_hcf_reclaim: tx_packet[%d] NULL\n", i );
-// }
- }
-
- DBG_LEAVE( DbgInfo );
- return;
-} // wl_pci_dma_hcf_reclaim
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_hcf_reclaim_rx()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Reclaim Rx packets that have already been processed by the HCF.
- *
- * PARAMETERS:
- *
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-void wl_pci_dma_hcf_reclaim_rx( struct wl_private *lp )
-{
- int i;
- DESC_STRCT *p;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_dma_hcf_reclaim_rx" );
- DBG_ENTER( DbgInfo );
-
- //if( lp->dma.status == 0 )
- //{
- while ( ( p = hcf_dma_rx_get( &lp->hcfCtx ) ) != NULL ) {
- if( p && p->buf_addr == NULL ) {
- /* A reclaim descriptor is being given back by the HCF. Reclaim
- descriptors have a NULL buf_addr */
- lp->dma.rx_reclaim_desc = p;
- DBG_PRINT( "reclaim_descriptor: 0x%p\n", p );
- continue;
- }
- for( i = 0; i < NUM_RX_DESC; i++ ) {
- if( lp->dma.rx_packet[i] == NULL ) {
- break;
- }
- }
- /* An Rx buffer descriptor is being given back by the HCF */
- lp->dma.rx_packet[i] = p;
- lp->dma.rx_rsc_ind++;
- DBG_PRINT( "rx_packet[%d] 0x%p\n", i, lp->dma.rx_packet[i] );
- }
- //}
- DBG_LEAVE( DbgInfo );
-} // wl_pci_dma_hcf_reclaim_rx
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_get_tx_packet()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Obtains a Tx descriptor from the chain to use for Tx.
- *
- * PARAMETERS:
- *
- * lp - a pointer to the device's wl_private structure.
- *
- * RETURNS:
- *
- * A pointer to the retrieved descriptor
- *
- ******************************************************************************/
-DESC_STRCT * wl_pci_dma_get_tx_packet( struct wl_private *lp )
-{
- int i;
- DESC_STRCT *desc = NULL;
- /*------------------------------------------------------------------------*/
-
- for( i = 0; i < NUM_TX_DESC; i++ ) {
- if( lp->dma.tx_packet[i] ) {
- break;
- }
- }
-
- if( i != NUM_TX_DESC ) {
- desc = lp->dma.tx_packet[i];
-
- lp->dma.tx_packet[i] = NULL;
- lp->dma.tx_rsc_ind--;
-
- memset( desc->buf_addr, 0, HCF_DMA_TX_BUF1_SIZE );
- }
-
- return desc;
-} // wl_pci_dma_get_tx_packet
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_put_tx_packet()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Returns a Tx descriptor to the chain.
- *
- * PARAMETERS:
- *
- * lp - a pointer to the device's wl_private structure.
- * desc - a pointer to the descriptor to return.
- *
- * RETURNS:
- *
- * N/A
- *
- ******************************************************************************/
-void wl_pci_dma_put_tx_packet( struct wl_private *lp, DESC_STRCT *desc )
-{
- int i;
- /*------------------------------------------------------------------------*/
-
- for( i = 0; i < NUM_TX_DESC; i++ ) {
- if( lp->dma.tx_packet[i] == NULL ) {
- break;
- }
- }
-
- if( i != NUM_TX_DESC ) {
- lp->dma.tx_packet[i] = desc;
- lp->dma.tx_rsc_ind++;
- }
-} // wl_pci_dma_put_tx_packet
-/*============================================================================*/
-
-/*******************************************************************************
- * wl_pci_dma_hcf_reclaim_tx()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Reclaim Tx packets that have either been processed by the HCF due to a
- * port disable or a Tx completion.
- *
- * PARAMETERS:
- *
- * lp - the device's private adapter structure
- *
- * RETURNS:
- *
- * 0 on success
- * errno value otherwise
- *
- ******************************************************************************/
-void wl_pci_dma_hcf_reclaim_tx( struct wl_private *lp )
-{
- int i;
- DESC_STRCT *p;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_pci_dma_hcf_reclaim_tx" );
- DBG_ENTER( DbgInfo );
-
- //if( lp->dma.status == 0 )
- //{
- while ( ( p = hcf_dma_tx_get( &lp->hcfCtx ) ) != NULL ) {
-
- if( p != NULL && p->buf_addr == NULL ) {
- /* A Reclaim descriptor is being given back by the HCF. Reclaim
- descriptors have a NULL buf_addr */
- lp->dma.tx_reclaim_desc = p;
- DBG_PRINT( "reclaim_descriptor: 0x%p\n", p );
- continue;
- }
- for( i = 0; i < NUM_TX_DESC; i++ ) {
- if( lp->dma.tx_packet[i] == NULL ) {
- break;
- }
- }
- /* An Rx buffer descriptor is being given back by the HCF */
- lp->dma.tx_packet[i] = p;
- lp->dma.tx_rsc_ind++;
- DBG_PRINT( "tx_packet[%d] 0x%p\n", i, lp->dma.tx_packet[i] );
- }
- //}
-
- if( lp->netif_queue_on == FALSE ) {
- netif_wake_queue( lp->dev );
- WL_WDS_NETIF_WAKE_QUEUE( lp );
- lp->netif_queue_on = TRUE;
- }
- DBG_LEAVE( DbgInfo );
- return;
-} // wl_pci_dma_hcf_reclaim_tx
-/*============================================================================*/
-#endif // ENABLE_DMA
diff --git a/drivers/staging/wlags49_h2/wl_pci.h b/drivers/staging/wlags49_h2/wl_pci.h
deleted file mode 100644
index 86831f1b4de2..000000000000
--- a/drivers/staging/wlags49_h2/wl_pci.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*******************************************************************************
- * Agere Systems Inc.
- * Wireless device driver for Linux (wlags49).
- *
- * Copyright (c) 1998-2003 Agere Systems Inc.
- * All rights reserved.
- * http://www.agere.com
- *
- * Initially developed by TriplePoint, Inc.
- * http://www.triplepoint.com
- *
- *------------------------------------------------------------------------------
- *
- * Header describing information required for the driver to support PCI.
- *
- *------------------------------------------------------------------------------
- *
- * SOFTWARE LICENSE
- *
- * This software is provided subject to the following terms and conditions,
- * which you should read carefully before using the software. Using this
- * software indicates your acceptance of these terms and conditions. If you do
- * not agree with these terms and conditions, do not use the software.
- *
- * Copyright © 2003 Agere Systems Inc.
- * All rights reserved.
- *
- * Redistribution and use in source or binary forms, with or without
- * modifications, are permitted provided that the following conditions are met:
- *
- * . Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following Disclaimer as comments in the code as
- * well as in the documentation and/or other materials provided with the
- * distribution.
- *
- * . Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following Disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * . Neither the name of Agere Systems Inc. nor the names of the contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * Disclaimer
- *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
- * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
- * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- ******************************************************************************/
-
-#ifndef __WL_PCI_H__
-#define __WL_PCI_H__
-
-
-
-
-/*******************************************************************************
- * constant definitions
- ******************************************************************************/
-#define PCI_VENDOR_IDWL_LKM 0x11C1 /* Lucent Microelectronics */
-#define PCI_DEVICE_ID_WL_LKM_0 0xAB30 /* Mini PCI */
-#define PCI_DEVICE_ID_WL_LKM_1 0xAB34 /* Mini PCI */
-#define PCI_DEVICE_ID_WL_LKM_2 0xAB11 /* WARP CardBus */
-
-
-
-
-/*******************************************************************************
- * function prototypes
- ******************************************************************************/
-int wl_adapter_init_module( void );
-
-void wl_adapter_cleanup_module( void );
-
-int wl_adapter_insert( struct net_device *dev );
-
-int wl_adapter_open( struct net_device *dev );
-
-int wl_adapter_close( struct net_device *dev );
-
-int wl_adapter_is_open( struct net_device *dev );
-
-
-#ifdef ENABLE_DMA
-
-void wl_pci_dma_hcf_supply( struct wl_private *lp );
-
-void wl_pci_dma_hcf_reclaim( struct wl_private *lp );
-
-DESC_STRCT * wl_pci_dma_get_tx_packet( struct wl_private *lp );
-
-void wl_pci_dma_put_tx_packet( struct wl_private *lp, DESC_STRCT *desc );
-
-void wl_pci_dma_hcf_reclaim_tx( struct wl_private *lp );
-
-#endif // ENABLE_DMA
-
-
-#endif // __WL_PCI_H__
diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
index 7e10dcdc3090..41f332499d42 100644
--- a/drivers/staging/wlags49_h2/wl_priv.c
+++ b/drivers/staging/wlags49_h2/wl_priv.c
@@ -94,16 +94,6 @@ int cfg_driver_info(struct uilreq *urq, struct wl_private *lp);
int cfg_driver_identity(struct uilreq *urq, struct wl_private *lp);
-/*******************************************************************************
- * global variables
- ******************************************************************************/
-#if DBG
-extern dbg_info_t *DbgInfo;
-#endif /* DBG */
-
-
-
-
/* If USE_UIL is not defined, then none of the UIL Interface code below will
be included in the build */
#ifdef USE_UIL
@@ -130,10 +120,6 @@ extern dbg_info_t *DbgInfo;
int wvlan_uil(struct uilreq *urq, struct wl_private *lp)
{
int ioctl_ret = 0;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC("wvlan_uil");
- DBG_ENTER(DbgInfo);
switch (urq->command) {
case UIL_FUN_CONNECT:
@@ -165,7 +151,6 @@ int wvlan_uil(struct uilreq *urq, struct wl_private *lp)
ioctl_ret = -EOPNOTSUPP;
break;
}
- DBG_LEAVE(DbgInfo);
return ioctl_ret;
} /* wvlan_uil */
/*============================================================================*/
@@ -195,12 +180,6 @@ int wvlan_uil(struct uilreq *urq, struct wl_private *lp)
int wvlan_uil_connect(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_uil_connect");
- DBG_ENTER(DbgInfo);
-
if (!(lp->flags & WVLAN2_UIL_CONNECTED)) {
lp->flags |= WVLAN2_UIL_CONNECTED;
@@ -211,7 +190,6 @@ int wvlan_uil_connect(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_ERR_IN_USE;
}
- DBG_LEAVE(DbgInfo);
return result;
} /* wvlan_uil_connect */
/*============================================================================*/
@@ -241,12 +219,6 @@ int wvlan_uil_connect(struct uilreq *urq, struct wl_private *lp)
int wvlan_uil_disconnect(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_uil_disconnect");
- DBG_ENTER(DbgInfo);
-
if (urq->hcfCtx == &(lp->hcfCtx)) {
if (lp->flags & WVLAN2_UIL_CONNECTED) {
@@ -266,7 +238,6 @@ int wvlan_uil_disconnect(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE(DbgInfo);
return result;
} /* wvlan_uil_disconnect */
/*============================================================================*/
@@ -297,12 +268,6 @@ int wvlan_uil_action(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
ltv_t *ltv;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_uil_action");
- DBG_ENTER(DbgInfo);
-
if (urq->hcfCtx == &(lp->hcfCtx)) {
/* Make sure there's an LTV in the request buffer */
@@ -344,7 +309,6 @@ int wvlan_uil_action(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE(DbgInfo);
return result;
} /* wvlan_uil_action */
/*============================================================================*/
@@ -376,11 +340,6 @@ int wvlan_uil_action(struct uilreq *urq, struct wl_private *lp)
int wvlan_uil_block(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_uil_block");
- DBG_ENTER(DbgInfo);
if (urq->hcfCtx == &(lp->hcfCtx)) {
if (capable(CAP_NET_ADMIN)) {
@@ -398,7 +357,6 @@ int wvlan_uil_block(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE(DbgInfo);
return result;
} /* wvlan_uil_block */
/*============================================================================*/
@@ -428,11 +386,6 @@ int wvlan_uil_block(struct uilreq *urq, struct wl_private *lp)
int wvlan_uil_unblock(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_uil_unblock");
- DBG_ENTER(DbgInfo);
if (urq->hcfCtx == &(lp->hcfCtx)) {
if (capable(CAP_NET_ADMIN)) {
@@ -451,7 +404,6 @@ int wvlan_uil_unblock(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE(DbgInfo);
return result;
} /* wvlan_uil_unblock */
/*============================================================================*/
@@ -482,11 +434,6 @@ int wvlan_uil_send_diag_msg(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
DESC_STRCT Descp[1];
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_uil_send_diag_msg");
- DBG_ENTER(DbgInfo);
if (urq->hcfCtx == &(lp->hcfCtx)) {
if (capable(CAP_NET_ADMIN)) {
@@ -499,7 +446,6 @@ int wvlan_uil_send_diag_msg(struct uilreq *urq, struct wl_private *lp)
if (result != 0) {
DBG_ERROR(DbgInfo, "verify_area failed, result: %d\n", result);
urq->result = UIL_FAILURE;
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -518,7 +464,6 @@ int wvlan_uil_send_diag_msg(struct uilreq *urq, struct wl_private *lp)
DBG_ERROR(DbgInfo, "ENOMEM\n");
urq->result = UIL_FAILURE;
result = -ENOMEM;
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -539,7 +484,6 @@ int wvlan_uil_send_diag_msg(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE(DbgInfo);
return result;
} /* wvlan_uil_send_diag_msg */
/*============================================================================*/
@@ -575,10 +519,6 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
#ifdef USE_WDS
hcf_16 hcfPort = HCF_PORT_0;
#endif /* USE_WDS */
- /*------------------------------------------------------------------------*/
- DBG_FUNC("wvlan_uil_put_info");
- DBG_ENTER(DbgInfo);
-
if (urq->hcfCtx == &(lp->hcfCtx)) {
if (capable(CAP_NET_ADMIN)) {
@@ -589,7 +529,6 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_ERR_LEN;
DBG_ERROR(DbgInfo, "No Length/Type in LTV!!!\n");
DBG_ERROR(DbgInfo, "UIL_ERR_LEN\n");
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -598,7 +537,6 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
if (result != 0) {
urq->result = UIL_FAILURE;
DBG_ERROR(DbgInfo, "verify_area(), VERIFY_READ FAILED\n");
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -611,7 +549,6 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
urq->len = sizeof(lp->ltvRecord);
urq->result = UIL_ERR_LEN;
DBG_ERROR(DbgInfo, "UIL_ERR_LEN\n");
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -627,7 +564,6 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
urq->len = sizeof(lp->ltvRecord);
urq->result = UIL_ERR_LEN;
result = -ENOMEM;
- DBG_LEAVE(DbgInfo);
return result;
}
} else {
@@ -1161,7 +1097,6 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE(DbgInfo);
return result;
} /* wvlan_uil_put_info */
@@ -1191,10 +1126,6 @@ int wvlan_uil_get_info(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
int i;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC("wvlan_uil_get_info");
- DBG_ENTER(DbgInfo);
if (urq->hcfCtx == &(lp->hcfCtx)) {
if ((urq->data != NULL) && (urq->len != 0)) {
@@ -1207,7 +1138,6 @@ int wvlan_uil_get_info(struct uilreq *urq, struct wl_private *lp)
DBG_ERROR(DbgInfo, "No Length/Type in LTV!!!\n");
DBG_ERROR(DbgInfo, "UIL_ERR_LEN\n");
urq->result = UIL_ERR_LEN;
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -1216,7 +1146,6 @@ int wvlan_uil_get_info(struct uilreq *urq, struct wl_private *lp)
if (result != 0) {
DBG_ERROR(DbgInfo, "verify_area(), VERIFY_READ FAILED\n");
urq->result = UIL_FAILURE;
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -1229,7 +1158,6 @@ int wvlan_uil_get_info(struct uilreq *urq, struct wl_private *lp)
DBG_ERROR(DbgInfo, "Incoming LTV too big\n");
urq->len = sizeof(lp->ltvRecord);
urq->result = UIL_ERR_LEN;
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -1513,7 +1441,6 @@ int wvlan_uil_get_info(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_ERR_WRONG_IFB;
}
- DBG_LEAVE(DbgInfo);
return result;
} /* wvlan_uil_get_info */
/*============================================================================*/
@@ -1544,18 +1471,11 @@ int wvlan_uil_get_info(struct uilreq *urq, struct wl_private *lp)
int cfg_driver_info(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("cfg_driver_info");
- DBG_ENTER(DbgInfo);
-
/* Make sure that user buffer can handle the driver information buffer */
if (urq->len < sizeof(lp->driverInfo)) {
urq->len = sizeof(lp->driverInfo);
urq->result = UIL_ERR_LEN;
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -1563,7 +1483,6 @@ int cfg_driver_info(struct uilreq *urq, struct wl_private *lp)
result = verify_area(VERIFY_WRITE, urq->data, sizeof(lp->driverInfo));
if (result != 0) {
urq->result = UIL_FAILURE;
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -1573,7 +1492,6 @@ int cfg_driver_info(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_SUCCESS;
copy_to_user(urq->data, &(lp->driverInfo), sizeof(lp->driverInfo));
- DBG_LEAVE(DbgInfo);
return result;
} /* cfg_driver_info */
/*============================================================================*/
@@ -1603,18 +1521,11 @@ int cfg_driver_info(struct uilreq *urq, struct wl_private *lp)
int cfg_driver_identity(struct uilreq *urq, struct wl_private *lp)
{
int result = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_driver_identity");
- DBG_ENTER(DbgInfo);
-
/* Make sure that user buffer can handle the driver identity structure. */
if (urq->len < sizeof(lp->driverIdentity)) {
urq->len = sizeof(lp->driverIdentity);
urq->result = UIL_ERR_LEN;
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -1622,7 +1533,6 @@ int cfg_driver_identity(struct uilreq *urq, struct wl_private *lp)
result = verify_area(VERIFY_WRITE, urq->data, sizeof(lp->driverIdentity));
if (result != 0) {
urq->result = UIL_FAILURE;
- DBG_LEAVE(DbgInfo);
return result;
}
@@ -1630,7 +1540,6 @@ int cfg_driver_identity(struct uilreq *urq, struct wl_private *lp)
urq->result = UIL_SUCCESS;
copy_to_user(urq->data, &(lp->driverIdentity), sizeof(lp->driverIdentity));
- DBG_LEAVE(DbgInfo);
return result;
} /* cfg_driver_identity */
/*============================================================================*/
@@ -1672,11 +1581,6 @@ int wvlan_set_netname(struct net_device *dev,
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_set_netname");
- DBG_ENTER(DbgInfo);
wl_lock(lp, &flags);
@@ -1687,7 +1591,6 @@ int wvlan_set_netname(struct net_device *dev,
wl_apply(lp);
wl_unlock(lp, &flags);
- DBG_LEAVE(DbgInfo);
return ret;
} /* wvlan_set_netname */
/*============================================================================*/
@@ -1724,11 +1627,6 @@ int wvlan_get_netname(struct net_device *dev,
int ret = 0;
int status = -1;
wvName_t *pName;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_get_netname");
- DBG_ENTER(DbgInfo);
wl_lock(lp, &flags);
@@ -1751,7 +1649,6 @@ int wvlan_get_netname(struct net_device *dev,
wl_unlock(lp, &flags);
- DBG_LEAVE(DbgInfo);
return ret;
} /* wvlan_get_netname */
/*============================================================================*/
@@ -1787,11 +1684,6 @@ int wvlan_set_station_nickname(struct net_device *dev,
unsigned long flags;
size_t len;
int ret = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_set_station_nickname");
- DBG_ENTER(DbgInfo);
wl_lock(lp, &flags);
@@ -1803,7 +1695,6 @@ int wvlan_set_station_nickname(struct net_device *dev,
wl_apply(lp);
wl_unlock(lp, &flags);
- DBG_LEAVE(DbgInfo);
return ret;
} /* wvlan_set_station_nickname */
/*============================================================================*/
@@ -1840,11 +1731,6 @@ int wvlan_get_station_nickname(struct net_device *dev,
int ret = 0;
int status = -1;
wvName_t *pName;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_get_station_nickname");
- DBG_ENTER(DbgInfo);
wl_lock(lp, &flags);
@@ -1867,7 +1753,6 @@ int wvlan_get_station_nickname(struct net_device *dev,
wl_unlock(lp, &flags);
/* out: */
- DBG_LEAVE(DbgInfo);
return ret;
} /* wvlan_get_station_nickname */
/*============================================================================*/
@@ -1903,11 +1788,6 @@ int wvlan_set_porttype(struct net_device *dev,
unsigned long flags;
int ret = 0;
hcf_16 portType;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_set_porttype");
- DBG_ENTER(DbgInfo);
wl_lock(lp, &flags);
@@ -1928,7 +1808,6 @@ out_unlock:
wl_unlock(lp, &flags);
/* out: */
- DBG_LEAVE(DbgInfo);
return ret;
}
@@ -1965,11 +1844,6 @@ int wvlan_get_porttype(struct net_device *dev,
int status = -1;
hcf_16 *pPortType;
__u32 *pData = (__u32 *)extra;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_get_porttype");
- DBG_ENTER(DbgInfo);
wl_lock(lp, &flags);
@@ -1990,7 +1864,6 @@ int wvlan_get_porttype(struct net_device *dev,
wl_unlock(lp, &flags);
/* out: */
- DBG_LEAVE(DbgInfo);
return ret;
} /* wvlan_get_porttype */
/*============================================================================*/
@@ -2023,12 +1896,6 @@ int wvlan_get_porttype(struct net_device *dev,
int wvlan_rts(struct rtsreq *rrq, __u32 io_base)
{
int ioctl_ret = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC("wvlan_rts");
- DBG_ENTER(DbgInfo);
-
DBG_PRINT("io_base: 0x%08x\n", io_base);
@@ -2060,7 +1927,6 @@ int wvlan_rts(struct rtsreq *rrq, __u32 io_base)
break;
}
- DBG_LEAVE(DbgInfo);
return ioctl_ret;
} /* wvlan_rts */
/*============================================================================*/
diff --git a/drivers/staging/wlags49_h2/wl_profile.c b/drivers/staging/wlags49_h2/wl_profile.c
index beabf5916df7..28cc5765e5c1 100644
--- a/drivers/staging/wlags49_h2/wl_profile.c
+++ b/drivers/staging/wlags49_h2/wl_profile.c
@@ -101,16 +101,11 @@
#include <wl_profile.h>
-/*******************************************************************************
- * global variables
- ******************************************************************************/
-
/* Definition needed to prevent unresolved external in unistd.h */
static int errno;
#if DBG
extern p_u32 DebugFlag;
-extern dbg_info_t *DbgInfo;
#endif
int parse_yes_no(char *value);
@@ -163,10 +158,6 @@ void parse_config(struct net_device *dev)
mm_segment_t fs;
struct wl_private *wvlan_config = NULL;
ENCSTRCT sEncryption;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC("parse_config");
- DBG_ENTER(DbgInfo);
/* Get the wavelan specific info for this device */
wvlan_config = dev->priv;
@@ -272,7 +263,6 @@ void parse_config(struct net_device *dev)
set_fs(fs); /* Return to the original context */
#endif /* BIN_DL */
- DBG_LEAVE(DbgInfo);
return;
} /* parse_config */
@@ -354,8 +344,6 @@ void translate_option(char *buffer, struct wl_private *lp)
u_char mac_value[ETH_ALEN];
/*------------------------------------------------------------------------*/
- DBG_FUNC("translate_option");
-
if (buffer == NULL || lp == NULL) {
DBG_ERROR(DbgInfo, "Config file buffer and/or wavelan buffer ptr NULL\n");
return;
@@ -959,10 +947,6 @@ void ParseConfigLine(char *pszLine, char **ppszLVal, char **ppszRVal)
{
int i;
int size;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC("ParseConfigLine");
- DBG_ENTER(DbgInfo);
/* get a snapshot of our string size */
size = strlen(pszLine);
@@ -1005,7 +989,6 @@ void ParseConfigLine(char *pszLine, char **ppszLVal, char **ppszRVal)
pszLine[i] = '\0';
}
}
- DBG_LEAVE(DbgInfo);
} /* ParseConfigLine */
/*============================================================================*/
diff --git a/drivers/staging/wlags49_h2/wl_util.c b/drivers/staging/wlags49_h2/wl_util.c
index 404ec7da0348..4ca6e42ecd7e 100644
--- a/drivers/staging/wlags49_h2/wl_util.c
+++ b/drivers/staging/wlags49_h2/wl_util.c
@@ -128,13 +128,6 @@ static const long chan_freq_list[][2] =
{161,5805}
};
-#if DBG
-extern dbg_info_t *DbgInfo;
-#endif /* DBG */
-
-
-
-
/*******************************************************************************
* dbm()
*******************************************************************************
@@ -481,10 +474,6 @@ void wl_hcf_error( struct net_device *dev, int hcfStatus )
******************************************************************************/
void wl_endian_translate_event( ltv_t *pLtv )
{
- DBG_FUNC( "wl_endian_translate_event" );
- DBG_ENTER( DbgInfo );
-
-
switch( pLtv->typ ) {
case CFG_TALLIES:
break;
@@ -582,9 +571,6 @@ void wl_endian_translate_event( ltv_t *pLtv )
default:
break;
}
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_endian_translate_event
/*============================================================================*/
@@ -997,10 +983,6 @@ int wl_get_chan_from_freq( long frequency )
void wl_process_link_status( struct wl_private *lp )
{
hcf_16 link_stat;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_process_link_status" );
- DBG_ENTER( DbgInfo );
if( lp != NULL ) {
//link_stat = lp->hcfCtx.IFB_DSLinkStat & CFG_LINK_STAT_FW;
@@ -1027,8 +1009,6 @@ void wl_process_link_status( struct wl_private *lp )
break;
}
}
- DBG_LEAVE( DbgInfo );
- return;
} // wl_process_link_status
/*============================================================================*/
@@ -1058,12 +1038,6 @@ void wl_process_probe_response( struct wl_private *lp )
PROBE_RESP *probe_rsp;
hcf_8 *wpa_ie = NULL;
hcf_16 wpa_ie_len = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wl_process_probe_response" );
- DBG_ENTER( DbgInfo );
-
if( lp != NULL ) {
probe_rsp = (PROBE_RESP *)&lp->ProbeResp;
@@ -1235,9 +1209,6 @@ void wl_process_probe_response( struct wl_private *lp )
}
}
}
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_process_probe_response
/*============================================================================*/
@@ -1263,10 +1234,6 @@ void wl_process_probe_response( struct wl_private *lp )
******************************************************************************/
void wl_process_updated_record( struct wl_private *lp )
{
- DBG_FUNC( "wl_process_updated_record" );
- DBG_ENTER( DbgInfo );
-
-
if( lp != NULL ) {
lp->updatedRecord.u.u16[0] = CNV_LITTLE_TO_INT( lp->updatedRecord.u.u16[0] );
@@ -1286,9 +1253,6 @@ void wl_process_updated_record( struct wl_private *lp )
lp->updatedRecord.u.u16[0] );
}
}
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_process_updated_record
/*============================================================================*/
@@ -1315,12 +1279,6 @@ void wl_process_updated_record( struct wl_private *lp )
void wl_process_assoc_status( struct wl_private *lp )
{
ASSOC_STATUS_STRCT *assoc_stat;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wl_process_assoc_status" );
- DBG_ENTER( DbgInfo );
-
if( lp != NULL ) {
assoc_stat = (ASSOC_STATUS_STRCT *)&lp->assoc_stat;
@@ -1353,9 +1311,6 @@ void wl_process_assoc_status( struct wl_private *lp )
assoc_stat->oldApAddr);
}
}
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_process_assoc_status
/*============================================================================*/
@@ -1382,12 +1337,6 @@ void wl_process_assoc_status( struct wl_private *lp )
void wl_process_security_status( struct wl_private *lp )
{
SECURITY_STATUS_STRCT *sec_stat;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wl_process_security_status" );
- DBG_ENTER( DbgInfo );
-
if( lp != NULL ) {
sec_stat = (SECURITY_STATUS_STRCT *)&lp->sec_stat;
@@ -1425,9 +1374,6 @@ void wl_process_security_status( struct wl_private *lp )
DBG_TRACE(DbgInfo, "Reason : 0x%04x\n", sec_stat->reason);
}
-
- DBG_LEAVE( DbgInfo );
- return;
} // wl_process_security_status
/*============================================================================*/
@@ -1438,9 +1384,6 @@ int wl_get_tallies(struct wl_private *lp,
int status;
CFG_HERMES_TALLIES_STRCT *pTallies;
- DBG_FUNC( "wl_get_tallies" );
- DBG_ENTER(DbgInfo);
-
/* Get the current tallies from the adapter */
lp->ltvRecord.len = 1 + HCF_TOT_TAL_CNT * sizeof(hcf_16);
lp->ltvRecord.typ = CFG_TALLIES;
@@ -1456,8 +1399,6 @@ int wl_get_tallies(struct wl_private *lp,
ret = -EFAULT;
}
- DBG_LEAVE( DbgInfo );
-
return ret;
}
diff --git a/drivers/staging/wlags49_h2/wl_version.h b/drivers/staging/wlags49_h2/wl_version.h
index 037b5266428c..bbc484a6b80f 100644
--- a/drivers/staging/wlags49_h2/wl_version.h
+++ b/drivers/staging/wlags49_h2/wl_version.h
@@ -115,42 +115,12 @@ err: define bus type;
#define DRV_VARIANT 2
#endif // HERMES25
-#ifdef BUS_PCMCIA
-#if defined HERMES25
-#define MODULE_NAME DRIVER_NAME "_h25_cs"
-#else
-#define MODULE_NAME DRIVER_NAME "_h2_cs"
-#endif /* HERMES25 */
-#elif defined BUS_PCI
-#if defined HERMES25
-#define MODULE_NAME DRIVER_NAME "_h25"
-#else
-#define MODULE_NAME DRIVER_NAME "_h2"
-#endif /* HERMES25 */
-#endif /* BUS_XXX */
-
-#ifdef DBG
-#define MODULE_DATE __DATE__ " " __TIME__
-#else
-#define MODULE_DATE "07/18/2004 13:30:00"
-#endif // DBG
-
-//#define STR2(m) #m
-//#define STR1(m) STR2(m)
-//#define MODULE_NAME STR1( MOD_NAME )
-
-#define VERSION_INFO MODULE_NAME " v" DRV_VERSION_STR \
- " for " BUS_TYPE ", " \
- MODULE_DATE " by " VENDOR_NAME
+#define VERSION_INFO KBUILD_MODNAME " v" DRV_VERSION_STR \
+ " for " BUS_TYPE ", by " VENDOR_NAME
/* The version of wireless extensions we support */
#define WIRELESS_SUPPORT 21
-//#define DBG_MOD_NAME DRIVER_NAME ":" BUS_TYPE ":" HW_TYPE ":" FW_TYPE
-#define DBG_MOD_NAME MODULE_NAME
-
-
-
/*******************************************************************************
* bus architecture specific defines, includes, etc.
******************************************************************************/
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c
index c731ff2a6aa1..187fc060de26 100644
--- a/drivers/staging/wlags49_h2/wl_wext.c
+++ b/drivers/staging/wlags49_h2/wl_wext.c
@@ -76,14 +76,6 @@
#include <wl_wext.h>
#include <wl_priv.h>
-/*******************************************************************************
- * global definitions
- ******************************************************************************/
-#if DBG
-extern dbg_info_t *DbgInfo;
-#endif // DBG
-
-
/* Set up the LTV to program the appropriate key */
static int hermes_set_tkip_keys(ltv_t *ltv, u16 key_idx, u8 *addr,
int set_tx, u8 *seq, u8 *key, size_t key_len)
@@ -93,8 +85,6 @@ static int hermes_set_tkip_keys(ltv_t *ltv, u16 key_idx, u8 *addr,
hcf_8 tsc[IW_ENCODE_SEQ_MAX_SIZE] =
{ 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00 };
- DBG_ENTER(DbgInfo);
-
/*
* Check the key index here; if 0, load as Pairwise Key, otherwise,
* load as a group key. Note that for the Hermes, the RIDs for
@@ -163,7 +153,6 @@ static int hermes_set_tkip_keys(ltv_t *ltv, u16 key_idx, u8 *addr,
break;
}
- DBG_LEAVE(DbgInfo);
return ret;
}
@@ -327,10 +316,6 @@ static int wireless_commit(struct net_device *dev,
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = 0;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wireless_commit" );
- DBG_ENTER(DbgInfo);
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -348,7 +333,6 @@ static int wireless_commit(struct net_device *dev,
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_commit
/*============================================================================*/
@@ -376,16 +360,12 @@ out:
******************************************************************************/
static int wireless_get_protocol(struct net_device *dev, struct iw_request_info *info, char *name, char *extra)
{
- DBG_FUNC( "wireless_get_protocol" );
- DBG_ENTER( DbgInfo );
-
/* Originally, the driver was placing the string "Wireless" here. However,
the wireless extensions (/linux/wireless.h) indicate this string should
describe the wireless protocol. */
strcpy(name, "IEEE 802.11b");
- DBG_LEAVE(DbgInfo);
return 0;
} // wireless_get_protocol
/*============================================================================*/
@@ -418,11 +398,6 @@ static int wireless_set_frequency(struct net_device *dev, struct iw_request_info
unsigned long flags;
int channel = 0;
int ret = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_set_frequency" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -431,7 +406,6 @@ static int wireless_set_frequency(struct net_device *dev, struct iw_request_info
if( !capable( CAP_NET_ADMIN )) {
ret = -EPERM;
- DBG_LEAVE( DbgInfo );
return ret;
}
@@ -473,7 +447,6 @@ static int wireless_set_frequency(struct net_device *dev, struct iw_request_info
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_set_frequency
/*============================================================================*/
@@ -505,11 +478,6 @@ static int wireless_get_frequency(struct net_device *dev, struct iw_request_info
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = -1;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_frequency" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -538,7 +506,6 @@ static int wireless_get_frequency(struct net_device *dev, struct iw_request_info
ret = (ret == HCF_SUCCESS ? 0 : -EFAULT);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_frequency
/*============================================================================*/
@@ -576,11 +543,6 @@ static int wireless_get_range(struct net_device *dev, struct iw_request_info *in
int count;
__u16 *pTxRate;
int retries = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_range" );
- DBG_ENTER( DbgInfo );
/* Set range information */
data->length = sizeof(struct iw_range);
@@ -748,7 +710,6 @@ out_unlock:
wl_unlock(lp, &flags);
- DBG_LEAVE(DbgInfo);
return ret;
} // wireless_get_range
/*============================================================================*/
@@ -781,11 +742,6 @@ static int wireless_get_bssid(struct net_device *dev, struct iw_request_info *in
#if 1 //;? (HCF_TYPE) & HCF_TYPE_STA
int status = -1;
#endif /* (HCF_TYPE) & HCF_TYPE_STA */
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_bssid" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -828,7 +784,6 @@ static int wireless_get_bssid(struct net_device *dev, struct iw_request_info *in
wl_unlock(lp, &flags);
out:
- DBG_LEAVE(DbgInfo);
return ret;
} // wireless_get_bssid
/*============================================================================*/
@@ -874,10 +829,6 @@ static int wireless_get_ap_list (struct net_device *dev, struct iw_request_info
#else
ProbeResult *p = &lp->probe_results;
#endif // WARP
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wireless_get_ap_list" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -977,7 +928,6 @@ static int wireless_get_ap_list (struct net_device *dev, struct iw_request_info
}
}
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_ap_list
/*============================================================================*/
@@ -1010,11 +960,6 @@ static int wireless_set_sensitivity(struct net_device *dev, struct iw_request_in
unsigned long flags;
int ret = 0;
int dens = sens->value;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_set_sensitivity" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -1038,7 +983,6 @@ static int wireless_set_sensitivity(struct net_device *dev, struct iw_request_in
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_set_sensitivity
/*============================================================================*/
@@ -1069,12 +1013,6 @@ static int wireless_get_sensitivity(struct net_device *dev, struct iw_request_in
{
struct wl_private *lp = wl_priv(dev);
int ret = 0;
- /*------------------------------------------------------------------------*/
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_sensitivity" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -1085,7 +1023,6 @@ static int wireless_get_sensitivity(struct net_device *dev, struct iw_request_in
sens->value = lp->DistanceBetweenAPs;
sens->fixed = 0; /* auto */
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_sensitivity
/*============================================================================*/
@@ -1119,15 +1056,12 @@ static int wireless_set_essid(struct net_device *dev, struct iw_request_info *in
unsigned long flags;
int ret = 0;
- DBG_FUNC( "wireless_set_essid" );
- DBG_ENTER( DbgInfo );
-
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
goto out;
}
- if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN + 1) {
+ if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN) {
ret = -EINVAL;
goto out;
}
@@ -1165,7 +1099,6 @@ static int wireless_set_essid(struct net_device *dev, struct iw_request_info *in
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_set_essid
/*============================================================================*/
@@ -1201,11 +1134,6 @@ static int wireless_get_essid(struct net_device *dev, struct iw_request_info *in
int ret = 0;
int status = -1;
wvName_t *pName;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_essid" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -1300,7 +1228,6 @@ out_unlock:
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_essid
/*============================================================================*/
@@ -1335,8 +1262,6 @@ static int wireless_set_encode(struct net_device *dev, struct iw_request_info *i
int ret = 0;
bool enable = true;
- DBG_ENTER(DbgInfo);
-
if (lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
goto out;
@@ -1361,7 +1286,6 @@ static int wireless_set_encode(struct net_device *dev, struct iw_request_info *i
wl_unlock(lp, &flags);
out:
- DBG_LEAVE(DbgInfo);
return ret;
}
@@ -1391,11 +1315,7 @@ static int wireless_get_encode(struct net_device *dev, struct iw_request_info *i
unsigned long flags;
int ret = 0;
int index;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wireless_get_encode" );
- DBG_ENTER( DbgInfo );
DBG_NOTICE(DbgInfo, "GIWENCODE: encrypt: %d, ID: %d\n", lp->EnableEncryption, lp->TransmitKeyID);
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
@@ -1406,7 +1326,6 @@ static int wireless_get_encode(struct net_device *dev, struct iw_request_info *i
/* Only super-user can see WEP key */
if( !capable( CAP_NET_ADMIN )) {
ret = -EPERM;
- DBG_LEAVE( DbgInfo );
return ret;
}
@@ -1450,7 +1369,6 @@ out_unlock:
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_encode
/*============================================================================*/
@@ -1482,11 +1400,6 @@ static int wireless_set_nickname(struct net_device *dev, struct iw_request_info
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_set_nickname" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -1496,7 +1409,6 @@ static int wireless_set_nickname(struct net_device *dev, struct iw_request_info
#if 0 //;? Needed, was present in original code but not in 7.18 Linux 2.6 kernel version
if( !capable(CAP_NET_ADMIN )) {
ret = -EPERM;
- DBG_LEAVE( DbgInfo );
return ret;
}
#endif
@@ -1523,7 +1435,6 @@ static int wireless_set_nickname(struct net_device *dev, struct iw_request_info
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_set_nickname
/*============================================================================*/
@@ -1557,11 +1468,6 @@ static int wireless_get_nickname(struct net_device *dev, struct iw_request_info
int ret = 0;
int status = -1;
wvName_t *pName;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_nickname" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -1600,7 +1506,6 @@ static int wireless_get_nickname(struct net_device *dev, struct iw_request_info
wl_unlock(lp, &flags);
out:
- DBG_LEAVE(DbgInfo);
return ret;
} // wireless_get_nickname
/*============================================================================*/
@@ -1634,10 +1539,6 @@ static int wireless_set_porttype(struct net_device *dev, struct iw_request_info
int ret = 0;
hcf_16 portType;
hcf_16 createIBSS;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wireless_set_porttype" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -1714,7 +1615,6 @@ static int wireless_set_porttype(struct net_device *dev, struct iw_request_info
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_set_porttype
/*============================================================================*/
@@ -1749,11 +1649,6 @@ static int wireless_get_porttype(struct net_device *dev, struct iw_request_info
int ret = 0;
int status = -1;
hcf_16 *pPortType;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_porttype" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -1824,7 +1719,6 @@ static int wireless_get_porttype(struct net_device *dev, struct iw_request_info
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_porttype
/*============================================================================*/
@@ -1856,11 +1750,6 @@ static int wireless_set_power(struct net_device *dev, struct iw_request_info *in
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_set_power" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -1872,8 +1761,6 @@ static int wireless_set_power(struct net_device *dev, struct iw_request_info *in
#if 0 //;? Needed, was present in original code but not in 7.18 Linux 2.6 kernel version
if( !capable( CAP_NET_ADMIN )) {
ret = -EPERM;
-
- DBG_LEAVE( DbgInfo );
return ret;
}
#endif
@@ -1897,7 +1784,6 @@ static int wireless_set_power(struct net_device *dev, struct iw_request_info *in
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_set_power
/*============================================================================*/
@@ -1930,9 +1816,6 @@ static int wireless_get_power(struct net_device *dev, struct iw_request_info *in
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = 0;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wireless_get_power" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -1959,7 +1842,6 @@ static int wireless_get_power(struct net_device *dev, struct iw_request_info *in
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_power
/*============================================================================*/
@@ -1991,9 +1873,6 @@ static int wireless_get_tx_power(struct net_device *dev, struct iw_request_info
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = 0;
- /*------------------------------------------------------------------------*/
- DBG_FUNC( "wireless_get_tx_power" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -2019,7 +1898,6 @@ static int wireless_get_tx_power(struct net_device *dev, struct iw_request_info
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_tx_power
/*============================================================================*/
@@ -2052,11 +1930,6 @@ static int wireless_set_rts_threshold (struct net_device *dev, struct iw_request
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int rthr = rts->value;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_set_rts_threshold" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -2090,7 +1963,6 @@ static int wireless_set_rts_threshold (struct net_device *dev, struct iw_request
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_set_rts_threshold
/*============================================================================*/
@@ -2122,10 +1994,6 @@ static int wireless_get_rts_threshold (struct net_device *dev, struct iw_request
int ret = 0;
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wireless_get_rts_threshold" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -2147,7 +2015,6 @@ static int wireless_get_rts_threshold (struct net_device *dev, struct iw_request
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_rts_threshold
/*============================================================================*/
@@ -2184,11 +2051,6 @@ static int wireless_set_rate(struct net_device *dev, struct iw_request_info *inf
int status = -1;
int index = 0;
#endif // WARP
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_set_rate" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -2214,7 +2076,6 @@ static int wireless_set_rate(struct net_device *dev, struct iw_request_info *inf
DBG_PRINT( "Index: %d\n", index );
} else {
DBG_ERROR( DbgInfo, "Could not determine radio frequency\n" );
- DBG_LEAVE( DbgInfo );
ret = -EINVAL;
goto out_unlock;
}
@@ -2375,7 +2236,6 @@ out_unlock:
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_set_rate
/*============================================================================*/
@@ -2410,11 +2270,6 @@ static int wireless_get_rate(struct net_device *dev, struct iw_request_info *inf
int ret = 0;
int status = -1;
hcf_16 txRate;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_rate" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -2490,7 +2345,6 @@ static int wireless_get_rate(struct net_device *dev, struct iw_request_info *inf
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_rate
/*============================================================================*/
@@ -2522,11 +2376,6 @@ out:
int wireless_get_private_interface( struct iwreq *wrq, struct wl_private *lp )
{
int ret = 0;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_private_interface" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -2547,10 +2396,8 @@ int wireless_get_private_interface( struct iwreq *wrq, struct wl_private *lp )
/* Verify the user buffer */
ret = verify_area( VERIFY_WRITE, wrq->u.data.pointer, sizeof( priv ));
- if( ret != 0 ) {
- DBG_LEAVE( DbgInfo );
+ if( ret != 0 )
return ret;
- }
/* Copy the data into the user's buffer */
wrq->u.data.length = NELEM( priv );
@@ -2558,7 +2405,6 @@ int wireless_get_private_interface( struct iwreq *wrq, struct wl_private *lp )
}
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_private_interface
/*============================================================================*/
@@ -2592,13 +2438,9 @@ static int wireless_set_scan(struct net_device *dev, struct iw_request_info *inf
int ret = 0;
int status = -1;
int retries = 0;
- /*------------------------------------------------------------------------*/
//;? Note: shows results as trace, returns always 0 unless BUSY
- DBG_FUNC( "wireless_set_scan" );
- DBG_ENTER( DbgInfo );
-
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
goto out;
@@ -2694,7 +2536,6 @@ retry:
wl_unlock(lp, &flags);
out:
- DBG_LEAVE(DbgInfo);
return ret;
} // wireless_set_scan
/*============================================================================*/
@@ -2734,11 +2575,6 @@ static int wireless_get_scan(struct net_device *dev, struct iw_request_info *inf
hcf_8 msg[512];
hcf_8 *wpa_ie;
hcf_16 wpa_ie_len;
- /*------------------------------------------------------------------------*/
-
-
- DBG_FUNC( "wireless_get_scan" );
- DBG_ENTER( DbgInfo );
if(lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
@@ -2888,7 +2724,6 @@ out_unlock:
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_get_scan
/*============================================================================*/
@@ -2923,9 +2758,6 @@ static int wireless_set_auth(struct net_device *dev,
int iwa_idx = data->flags & IW_AUTH_INDEX;
int iwa_val = data->value;
- DBG_FUNC( "wireless_set_auth" );
- DBG_ENTER( DbgInfo );
-
if (lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
goto out;
@@ -3038,7 +2870,6 @@ static int wireless_set_auth(struct net_device *dev,
wl_unlock(lp, &flags);
out:
- DBG_LEAVE( DbgInfo );
return ret;
} // wireless_set_auth
/*============================================================================*/
@@ -3087,8 +2918,6 @@ static int wireless_set_encodeext(struct net_device *dev,
bool enable = true;
bool set_tx = false;
- DBG_ENTER(DbgInfo);
-
if (lp->portState == WVLAN_PORT_STATE_DISABLED) {
ret = -EBUSY;
goto out;
@@ -3114,7 +2943,6 @@ static int wireless_set_encodeext(struct net_device *dev,
if (sizeof(ext->rx_seq) != 8) {
DBG_TRACE(DbgInfo, "rx_seq size mismatch\n");
- DBG_LEAVE(DbgInfo);
ret = -EINVAL;
goto out_unlock;
}
@@ -3188,7 +3016,6 @@ out_unlock:
wl_unlock(lp, &flags);
out:
- DBG_LEAVE(DbgInfo);
return ret;
}
/*============================================================================*/
@@ -3202,13 +3029,10 @@ static int wireless_set_genie(struct net_device *dev,
{
int ret = 0;
- DBG_ENTER(DbgInfo);
-
/* We can't write this to the card, but apparently this
* operation needs to succeed */
ret = 0;
- DBG_LEAVE(DbgInfo);
return ret;
}
/*============================================================================*/
@@ -3237,11 +3061,7 @@ struct iw_statistics * wl_wireless_stats( struct net_device *dev )
{
struct iw_statistics *pStats;
struct wl_private *lp = wl_priv(dev);
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_wireless_stats" );
- DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
pStats = NULL;
@@ -3302,7 +3122,6 @@ struct iw_statistics * wl_wireless_stats( struct net_device *dev )
}
}
- DBG_LEAVE( DbgInfo );
return pStats;
} // wl_wireless_stats
/*============================================================================*/
@@ -3336,10 +3155,6 @@ struct iw_statistics * wl_get_wireless_stats( struct net_device *dev )
unsigned long flags;
struct wl_private *lp = wl_priv(dev);
struct iw_statistics *pStats = NULL;
- /*------------------------------------------------------------------------*/
-
- DBG_FUNC( "wl_get_wireless_stats" );
- DBG_ENTER(DbgInfo);
wl_lock( lp, &flags );
@@ -3357,7 +3172,6 @@ struct iw_statistics * wl_get_wireless_stats( struct net_device *dev )
wl_unlock(lp, &flags);
- DBG_LEAVE( DbgInfo );
return pStats;
} // wl_get_wireless_stats
diff --git a/drivers/staging/wlags49_h2/wl_wext.h b/drivers/staging/wlags49_h2/wl_wext.h
index 029da52c4c49..4a85dc889a12 100644
--- a/drivers/staging/wlags49_h2/wl_wext.h
+++ b/drivers/staging/wlags49_h2/wl_wext.h
@@ -85,4 +85,4 @@ void wl_wext_event_assoc_ie( struct net_device *dev );
extern const struct iw_handler_def wl_iw_handler_def;
-#endif // __WL_WEXT_H__
+#endif /* __WL_WEXT_H__ */
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index a4fd5c4717a8..a7d24c95191d 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -73,7 +73,8 @@ static int prism2_result2err(int prism2_result)
static int prism2_domibset_uint32(wlandevice_t *wlandev, u32 did, u32 data)
{
struct p80211msg_dot11req_mibset msg;
- p80211item_uint32_t *mibitem = (p80211item_uint32_t *) &msg.mibattribute.data;
+ p80211item_uint32_t *mibitem =
+ (p80211item_uint32_t *) &msg.mibattribute.data;
msg.msgcode = DIDmsg_dot11req_mibset;
mibitem->did = did;
@@ -86,7 +87,8 @@ static int prism2_domibset_pstr32(wlandevice_t *wlandev,
u32 did, u8 len, u8 *data)
{
struct p80211msg_dot11req_mibset msg;
- p80211item_pstr32_t *mibitem = (p80211item_pstr32_t *) &msg.mibattribute.data;
+ p80211item_pstr32_t *mibitem =
+ (p80211item_pstr32_t *) &msg.mibattribute.data;
msg.msgcode = DIDmsg_dot11req_mibset;
mibitem->did = did;
@@ -182,7 +184,8 @@ static int prism2_add_key(struct wiphy *wiphy, struct net_device *dev,
goto exit;
}
- result = prism2_domibset_pstr32(wlandev, did, params->key_len, params->key);
+ result = prism2_domibset_pstr32(wlandev, did,
+ params->key_len, params->key);
if (result)
goto exit;
break;
@@ -328,7 +331,8 @@ static int prism2_get_station(struct wiphy *wiphy, struct net_device *dev,
return result;
}
-static int prism2_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
+static int prism2_scan(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request)
{
struct net_device *dev;
struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
@@ -380,7 +384,8 @@ static int prism2_scan(struct wiphy *wiphy, struct cfg80211_scan_request *reques
(i < request->n_channels) && i < ARRAY_SIZE(prism2_channels);
i++)
msg1.channellist.data.data[i] =
- ieee80211_frequency_to_channel(request->channels[i]->center_freq);
+ ieee80211_frequency_to_channel(
+ request->channels[i]->center_freq);
msg1.channellist.data.len = request->n_channels;
msg1.maxchanneltime.data = 250;
@@ -410,7 +415,8 @@ static int prism2_scan(struct wiphy *wiphy, struct cfg80211_scan_request *reques
ie_len = ie_buf[1] + 2;
memcpy(&ie_buf[2], &(msg2.ssid.data.data), msg2.ssid.data.len);
bss = cfg80211_inform_bss(wiphy,
- ieee80211_get_channel(wiphy, ieee80211_dsss_chan_to_freq(msg2.dschannel.data)),
+ ieee80211_get_channel(wiphy,
+ ieee80211_dsss_chan_to_freq(msg2.dschannel.data)),
(const u8 *) &(msg2.bssid.data.data),
msg2.timestamp.data, msg2.capinfo.data,
msg2.beaconperiod.data,
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 3dfa85ccc504..333a2f693e49 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -350,10 +350,10 @@ PD Record codes
/*-------------------------------------------------------------*/
/* Commonly used basic types */
-typedef struct hfa384x_bytestr {
+struct hfa384x_bytestr {
u16 len;
u8 data[0];
-} __packed hfa384x_bytestr_t;
+} __packed;
typedef struct hfa384x_bytestr32 {
u16 len;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index d22db43e8031..a9909f6b0001 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -525,7 +525,7 @@ int prism2mgmt_start(wlandevice_t *wlandev, void *msgp)
p80211pstrd_t *pstr;
u8 bytebuf[80];
- hfa384x_bytestr_t *p2bytestr = (hfa384x_bytestr_t *) bytebuf;
+ struct hfa384x_bytestr *p2bytestr = (struct hfa384x_bytestr *) bytebuf;
u16 word;
wlandev->macmode = WLAN_MACMODE_NONE;
@@ -1019,7 +1019,7 @@ int prism2mgmt_autojoin(wlandevice_t *wlandev, void *msgp)
struct p80211msg_lnxreq_autojoin *msg = msgp;
p80211pstrd_t *pstr;
u8 bytebuf[256];
- hfa384x_bytestr_t *p2bytestr = (hfa384x_bytestr_t *) bytebuf;
+ struct hfa384x_bytestr *p2bytestr = (struct hfa384x_bytestr *) bytebuf;
wlandev->macmode = WLAN_MACMODE_NONE;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h
index 07eecebeb6cc..190d390c8490 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.h
+++ b/drivers/staging/wlan-ng/prism2mgmt.h
@@ -92,8 +92,10 @@ void prism2mgmt_pstr2bytearea(u8 *bytearea, p80211pstrd_t *pstr);
void prism2mgmt_bytearea2pstr(u8 *bytearea, p80211pstrd_t *pstr, int len);
/* byte string conversion functions*/
-void prism2mgmt_pstr2bytestr(hfa384x_bytestr_t *bytestr, p80211pstrd_t *pstr);
-void prism2mgmt_bytestr2pstr(hfa384x_bytestr_t *bytestr, p80211pstrd_t *pstr);
+void prism2mgmt_pstr2bytestr(struct hfa384x_bytestr *bytestr,
+ p80211pstrd_t *pstr);
+void prism2mgmt_bytestr2pstr(struct hfa384x_bytestr *bytestr,
+ p80211pstrd_t *pstr);
/* functions to convert Group Addresses */
void prism2mgmt_get_grpaddr(u32 did, p80211pstrd_t *pstr, hfa384x_t *priv);
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index d3a06fa0b4f6..9b5f3b72d3ca 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -763,7 +763,8 @@ static int prism2mib_priv(struct mibrec *mib,
*
----------------------------------------------------------------*/
-void prism2mgmt_pstr2bytestr(hfa384x_bytestr_t *bytestr, p80211pstrd_t *pstr)
+void prism2mgmt_pstr2bytestr(struct hfa384x_bytestr *bytestr,
+ p80211pstrd_t *pstr)
{
bytestr->len = cpu_to_le16((u16) (pstr->len));
memcpy(bytestr->data, pstr->data, pstr->len);
@@ -804,7 +805,8 @@ void prism2mgmt_pstr2bytearea(u8 *bytearea, p80211pstrd_t *pstr)
*
----------------------------------------------------------------*/
-void prism2mgmt_bytestr2pstr(hfa384x_bytestr_t *bytestr, p80211pstrd_t *pstr)
+void prism2mgmt_bytestr2pstr(struct hfa384x_bytestr *bytestr,
+ p80211pstrd_t *pstr)
{
pstr->len = (u8) (le16_to_cpu((u16) (bytestr->len)));
memcpy(pstr->data, bytestr->data, pstr->len);
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index 76374b220228..2199f5afbf90 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -55,7 +55,6 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/wireless.h>
#include <linux/netdevice.h>
@@ -1279,7 +1278,7 @@ void prism2sta_processing_defer(struct work_struct *data)
HFA384x_RID_CURRENTSSID, result);
return;
}
- prism2mgmt_bytestr2pstr((hfa384x_bytestr_t *) &ssid,
+ prism2mgmt_bytestr2pstr((struct hfa384x_bytestr *) &ssid,
(p80211pstrd_t *) &
wlandev->ssid);
@@ -1361,7 +1360,7 @@ void prism2sta_processing_defer(struct work_struct *data)
HFA384x_RID_CURRENTSSID, result);
return;
}
- prism2mgmt_bytestr2pstr((hfa384x_bytestr_t *) &ssid,
+ prism2mgmt_bytestr2pstr((struct hfa384x_bytestr *) &ssid,
(p80211pstrd_t *) &wlandev->ssid);
hw->link_status = HFA384x_LINK_CONNECTED;
@@ -2037,7 +2036,7 @@ void prism2sta_commsqual_defer(struct work_struct *data)
HFA384x_RID_CURRENTSSID, result);
return;
}
- prism2mgmt_bytestr2pstr((hfa384x_bytestr_t *) &ssid,
+ prism2mgmt_bytestr2pstr((struct hfa384x_bytestr *) &ssid,
(p80211pstrd_t *) &wlandev->ssid);
/* Reschedule timer */
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index b401974fb282..4739c14d8359 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -140,11 +140,9 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
prism2_reset_holdtime,
prism2_reset_settletime, 0);
if (result != 0) {
- unregister_wlandev(wlandev);
- hfa384x_destroy(hw);
result = -EIO;
dev_err(&interface->dev, "hfa384x_corereset() failed.\n");
- goto failed;
+ goto failed_reset;
}
}
@@ -159,11 +157,15 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
if (register_wlandev(wlandev) != 0) {
dev_err(&interface->dev, "register_wlandev() failed.\n");
result = -EIO;
- goto failed;
+ goto failed_register;
}
goto done;
+failed_register:
+ usb_put_dev(dev);
+failed_reset:
+ wlan_unsetup(wlandev);
failed:
kfree(wlandev);
kfree(hw);
diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h
index c033da408a58..95ce9708cec9 100644
--- a/drivers/staging/xgifb/XGI_main.h
+++ b/drivers/staging/xgifb/XGI_main.h
@@ -11,7 +11,7 @@
#define PCI_DEVICE_ID_XGI_27 0x027
#endif
-static DEFINE_PCI_DEVICE_TABLE(xgifb_pci_table) = {
+static const struct pci_device_id xgifb_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_20)},
{PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_27)},
{PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_40)},
diff --git a/drivers/staging/xillybus/Kconfig b/drivers/staging/xillybus/Kconfig
index b15f778b4c68..75c38c8c26eb 100644
--- a/drivers/staging/xillybus/Kconfig
+++ b/drivers/staging/xillybus/Kconfig
@@ -4,7 +4,7 @@
config XILLYBUS
tristate "Xillybus generic FPGA interface"
- depends on PCI || (OF_ADDRESS && OF_IRQ) && m
+ depends on PCI || (OF_ADDRESS && OF_IRQ)
help
Xillybus is a generic interface for peripherals designed on
programmable logic (FPGA). The driver probes the hardware for
diff --git a/drivers/staging/xillybus/xillybus_of.c b/drivers/staging/xillybus/xillybus_of.c
index 394bfea1af6e..23a609b0ab1d 100644
--- a/drivers/staging/xillybus/xillybus_of.c
+++ b/drivers/staging/xillybus/xillybus_of.c
@@ -31,7 +31,8 @@ static const char xillyname[] = "xillybus_of";
/* Match table for of_platform binding */
static struct of_device_id xillybus_of_match[] = {
- { .compatible = "xlnx,xillybus-1.00.a", },
+ { .compatible = "xillybus,xillybus-1.00.a", },
+ { .compatible = "xlnx,xillybus-1.00.a", }, /* Deprecated */
{}
};
@@ -53,6 +54,13 @@ static void xilly_dma_sync_single_for_device_of(struct xilly_endpoint *ep,
dma_sync_single_for_device(ep->dev, dma_handle, size, direction);
}
+static void xilly_dma_sync_single_nop(struct xilly_endpoint *ep,
+ dma_addr_t dma_handle,
+ size_t size,
+ int direction)
+{
+}
+
static dma_addr_t xilly_map_single_of(struct xilly_cleanup *mem,
struct xilly_endpoint *ep,
void *ptr,
@@ -101,14 +109,26 @@ static struct xilly_endpoint_hardware of_hw = {
.unmap_single = xilly_unmap_single_of
};
+static struct xilly_endpoint_hardware of_hw_coherent = {
+ .owner = THIS_MODULE,
+ .hw_sync_sgl_for_cpu = xilly_dma_sync_single_nop,
+ .hw_sync_sgl_for_device = xilly_dma_sync_single_nop,
+ .map_single = xilly_map_single_of,
+ .unmap_single = xilly_unmap_single_of
+};
+
static int xilly_drv_probe(struct platform_device *op)
{
struct device *dev = &op->dev;
struct xilly_endpoint *endpoint;
int rc = 0;
int irq;
+ struct xilly_endpoint_hardware *ephw = &of_hw;
- endpoint = xillybus_init_endpoint(NULL, dev, &of_hw);
+ if (of_property_read_bool(dev->of_node, "dma-coherent"))
+ ephw = &of_hw_coherent;
+
+ endpoint = xillybus_init_endpoint(NULL, dev, ephw);
if (!endpoint)
return -ENOMEM;
@@ -131,10 +151,10 @@ static int xilly_drv_probe(struct platform_device *op)
}
endpoint->registers = of_iomap(dev->of_node, 0);
-
if (!endpoint->registers) {
dev_err(endpoint->dev,
"Failed to map I/O memory. Aborting.\n");
+ rc = -EIO;
goto failed_iomap0;
}
diff --git a/drivers/staging/xillybus/xillybus_pcie.c b/drivers/staging/xillybus/xillybus_pcie.c
index 1811aa764213..51426d80ca7b 100644
--- a/drivers/staging/xillybus/xillybus_pcie.c
+++ b/drivers/staging/xillybus/xillybus_pcie.c
@@ -30,7 +30,7 @@ MODULE_LICENSE("GPL v2");
static const char xillyname[] = "xillybus_pcie";
-static DEFINE_PCI_DEVICE_TABLE(xillyids) = {
+static const struct pci_device_id xillyids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_XILLYBUS)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTERA, PCI_DEVICE_ID_XILLYBUS)},
{PCI_DEVICE(PCI_VENDOR_ID_ACTEL, PCI_DEVICE_ID_XILLYBUS)},
@@ -168,9 +168,9 @@ static int xilly_probe(struct pci_dev *pdev,
}
endpoint->registers = pci_iomap(pdev, 0, 128);
-
if (!endpoint->registers) {
dev_err(endpoint->dev, "Failed to map BAR 0. Aborting.\n");
+ rc = -EIO;
goto failed_iomap0;
}
diff --git a/drivers/staging/zram/zram.txt b/drivers/staging/zram/zram.txt
deleted file mode 100644
index 765d790ae831..000000000000
--- a/drivers/staging/zram/zram.txt
+++ /dev/null
@@ -1,77 +0,0 @@
-zram: Compressed RAM based block devices
-----------------------------------------
-
-Project home: http://compcache.googlecode.com/
-
-* Introduction
-
-The zram module creates RAM based block devices named /dev/zram<id>
-(<id> = 0, 1, ...). Pages written to these disks are compressed and stored
-in memory itself. These disks allow very fast I/O and compression provides
-good amounts of memory savings. Some of the usecases include /tmp storage,
-use as swap disks, various caches under /var and maybe many more :)
-
-Statistics for individual zram devices are exported through sysfs nodes at
-/sys/block/zram<id>/
-
-* Usage
-
-Following shows a typical sequence of steps for using zram.
-
-1) Load Module:
- modprobe zram num_devices=4
- This creates 4 devices: /dev/zram{0,1,2,3}
- (num_devices parameter is optional. Default: 1)
-
-2) Set Disksize
- Set disk size by writing the value to sysfs node 'disksize'.
- The value can be either in bytes or you can use mem suffixes.
- Examples:
- # Initialize /dev/zram0 with 50MB disksize
- echo $((50*1024*1024)) > /sys/block/zram0/disksize
-
- # Using mem suffixes
- echo 256K > /sys/block/zram0/disksize
- echo 512M > /sys/block/zram0/disksize
- echo 1G > /sys/block/zram0/disksize
-
-3) Activate:
- mkswap /dev/zram0
- swapon /dev/zram0
-
- mkfs.ext4 /dev/zram1
- mount /dev/zram1 /tmp
-
-4) Stats:
- Per-device statistics are exported as various nodes under
- /sys/block/zram<id>/
- disksize
- num_reads
- num_writes
- invalid_io
- notify_free
- discard
- zero_pages
- orig_data_size
- compr_data_size
- mem_used_total
-
-5) Deactivate:
- swapoff /dev/zram0
- umount /dev/zram1
-
-6) Reset:
- Write any positive value to 'reset' sysfs node
- echo 1 > /sys/block/zram0/reset
- echo 1 > /sys/block/zram1/reset
-
- This frees all the memory allocated for the given device and
- resets the disksize to zero. You must set the disksize again
- before reusing the device.
-
-Please report any problems at:
- - Mailing list: linux-mm-cc at laptop dot org
- - Issue tracker: http://code.google.com/p/compcache/issues/list
-
-Nitin Gupta
-ngupta@vflare.org
diff --git a/drivers/staging/zsmalloc/Kconfig b/drivers/staging/zsmalloc/Kconfig
deleted file mode 100644
index 0ae13cd0908e..000000000000
--- a/drivers/staging/zsmalloc/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-config ZSMALLOC
- bool "Memory allocator for compressed pages"
- depends on MMU
- default n
- help
- zsmalloc is a slab-based memory allocator designed to store
- compressed RAM pages. zsmalloc uses virtual memory mapping
- in order to reduce fragmentation. However, this results in a
- non-standard allocator interface where a handle, not a pointer, is
- returned by an alloc(). This handle must be mapped in order to
- access the allocated space.
diff --git a/drivers/staging/zsmalloc/Makefile b/drivers/staging/zsmalloc/Makefile
deleted file mode 100644
index b134848a590d..000000000000
--- a/drivers/staging/zsmalloc/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-zsmalloc-y := zsmalloc-main.o
-
-obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
deleted file mode 100644
index 3b950e5a918f..000000000000
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ /dev/null
@@ -1,1072 +0,0 @@
-/*
- * zsmalloc memory allocator
- *
- * Copyright (C) 2011 Nitin Gupta
- *
- * This code is released using a dual license strategy: BSD/GPL
- * You can choose the license that better fits your requirements.
- *
- * Released under the terms of 3-clause BSD License
- * Released under the terms of GNU General Public License Version 2.0
- */
-
-
-/*
- * This allocator is designed for use with zcache and zram. Thus, the
- * allocator is supposed to work well under low memory conditions. In
- * particular, it never attempts higher order page allocation which is
- * very likely to fail under memory pressure. On the other hand, if we
- * just use single (0-order) pages, it would suffer from very high
- * fragmentation -- any object of size PAGE_SIZE/2 or larger would occupy
- * an entire page. This was one of the major issues with its predecessor
- * (xvmalloc).
- *
- * To overcome these issues, zsmalloc allocates a bunch of 0-order pages
- * and links them together using various 'struct page' fields. These linked
- * pages act as a single higher-order page i.e. an object can span 0-order
- * page boundaries. The code refers to these linked pages as a single entity
- * called zspage.
- *
- * Following is how we use various fields and flags of underlying
- * struct page(s) to form a zspage.
- *
- * Usage of struct page fields:
- * page->first_page: points to the first component (0-order) page
- * page->index (union with page->freelist): offset of the first object
- * starting in this page. For the first page, this is
- * always 0, so we use this field (aka freelist) to point
- * to the first free object in zspage.
- * page->lru: links together all component pages (except the first page)
- * of a zspage
- *
- * For _first_ page only:
- *
- * page->private (union with page->first_page): refers to the
- * component page after the first page
- * page->freelist: points to the first free object in zspage.
- * Free objects are linked together using in-place
- * metadata.
- * page->objects: maximum number of objects we can store in this
- * zspage (class->zspage_order * PAGE_SIZE / class->size)
- * page->lru: links together first pages of various zspages.
- * Basically forming list of zspages in a fullness group.
- * page->mapping: class index and fullness group of the zspage
- *
- * Usage of struct page flags:
- * PG_private: identifies the first component page
- * PG_private2: identifies the last component page
- *
- */
-
-#ifdef CONFIG_ZSMALLOC_DEBUG
-#define DEBUG
-#endif
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <linux/errno.h>
-#include <linux/highmem.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
-#include <linux/cpumask.h>
-#include <linux/cpu.h>
-#include <linux/vmalloc.h>
-#include <linux/hardirq.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include "zsmalloc.h"
-
-/*
- * This must be power of 2 and greater than of equal to sizeof(link_free).
- * These two conditions ensure that any 'struct link_free' itself doesn't
- * span more than 1 page which avoids complex case of mapping 2 pages simply
- * to restore link_free pointer values.
- */
-#define ZS_ALIGN 8
-
-/*
- * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
- * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
- */
-#define ZS_MAX_ZSPAGE_ORDER 2
-#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
-
-/*
- * Object location (<PFN>, <obj_idx>) is encoded as
- * as single (void *) handle value.
- *
- * Note that object index <obj_idx> is relative to system
- * page <PFN> it is stored in, so for each sub-page belonging
- * to a zspage, obj_idx starts with 0.
- *
- * This is made more complicated by various memory models and PAE.
- */
-
-#ifndef MAX_PHYSMEM_BITS
-#ifdef CONFIG_HIGHMEM64G
-#define MAX_PHYSMEM_BITS 36
-#else /* !CONFIG_HIGHMEM64G */
-/*
- * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
- * be PAGE_SHIFT
- */
-#define MAX_PHYSMEM_BITS BITS_PER_LONG
-#endif
-#endif
-#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
-#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
-#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
-
-#define MAX(a, b) ((a) >= (b) ? (a) : (b))
-/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
-#define ZS_MIN_ALLOC_SIZE \
- MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
-#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
-
-/*
- * On systems with 4K page size, this gives 254 size classes! There is a
- * trader-off here:
- * - Large number of size classes is potentially wasteful as free page are
- * spread across these classes
- * - Small number of size classes causes large internal fragmentation
- * - Probably its better to use specific size classes (empirically
- * determined). NOTE: all those class sizes must be set as multiple of
- * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
- *
- * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
- * (reason above)
- */
-#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8)
-#define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
- ZS_SIZE_CLASS_DELTA + 1)
-
-/*
- * We do not maintain any list for completely empty or full pages
- */
-enum fullness_group {
- ZS_ALMOST_FULL,
- ZS_ALMOST_EMPTY,
- _ZS_NR_FULLNESS_GROUPS,
-
- ZS_EMPTY,
- ZS_FULL
-};
-
-/*
- * We assign a page to ZS_ALMOST_EMPTY fullness group when:
- * n <= N / f, where
- * n = number of allocated objects
- * N = total number of objects zspage can store
- * f = 1/fullness_threshold_frac
- *
- * Similarly, we assign zspage to:
- * ZS_ALMOST_FULL when n > N / f
- * ZS_EMPTY when n == 0
- * ZS_FULL when n == N
- *
- * (see: fix_fullness_group())
- */
-static const int fullness_threshold_frac = 4;
-
-struct size_class {
- /*
- * Size of objects stored in this class. Must be multiple
- * of ZS_ALIGN.
- */
- int size;
- unsigned int index;
-
- /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
- int pages_per_zspage;
-
- spinlock_t lock;
-
- /* stats */
- u64 pages_allocated;
-
- struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
-};
-
-/*
- * Placed within free objects to form a singly linked list.
- * For every zspage, first_page->freelist gives head of this list.
- *
- * This must be power of 2 and less than or equal to ZS_ALIGN
- */
-struct link_free {
- /* Handle of next free chunk (encodes <PFN, obj_idx>) */
- void *next;
-};
-
-struct zs_pool {
- struct size_class size_class[ZS_SIZE_CLASSES];
-
- gfp_t flags; /* allocation flags used when growing pool */
-};
-
-/*
- * A zspage's class index and fullness group
- * are encoded in its (first)page->mapping
- */
-#define CLASS_IDX_BITS 28
-#define FULLNESS_BITS 4
-#define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
-#define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
-
-/*
- * By default, zsmalloc uses a copy-based object mapping method to access
- * allocations that span two pages. However, if a particular architecture
- * performs VM mapping faster than copying, then it should be added here
- * so that USE_PGTABLE_MAPPING is defined. This causes zsmalloc to use
- * page table mapping rather than copying for object mapping.
- */
-#if defined(CONFIG_ARM) && !defined(MODULE)
-#define USE_PGTABLE_MAPPING
-#endif
-
-struct mapping_area {
-#ifdef USE_PGTABLE_MAPPING
- struct vm_struct *vm; /* vm area for mapping object that span pages */
-#else
- char *vm_buf; /* copy buffer for objects that span pages */
-#endif
- char *vm_addr; /* address of kmap_atomic()'ed pages */
- enum zs_mapmode vm_mm; /* mapping mode */
-};
-
-
-/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
-static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
-
-static int is_first_page(struct page *page)
-{
- return PagePrivate(page);
-}
-
-static int is_last_page(struct page *page)
-{
- return PagePrivate2(page);
-}
-
-static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
- enum fullness_group *fullness)
-{
- unsigned long m;
- BUG_ON(!is_first_page(page));
-
- m = (unsigned long)page->mapping;
- *fullness = m & FULLNESS_MASK;
- *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK;
-}
-
-static void set_zspage_mapping(struct page *page, unsigned int class_idx,
- enum fullness_group fullness)
-{
- unsigned long m;
- BUG_ON(!is_first_page(page));
-
- m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) |
- (fullness & FULLNESS_MASK);
- page->mapping = (struct address_space *)m;
-}
-
-static int get_size_class_index(int size)
-{
- int idx = 0;
-
- if (likely(size > ZS_MIN_ALLOC_SIZE))
- idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
- ZS_SIZE_CLASS_DELTA);
-
- return idx;
-}
-
-static enum fullness_group get_fullness_group(struct page *page)
-{
- int inuse, max_objects;
- enum fullness_group fg;
- BUG_ON(!is_first_page(page));
-
- inuse = page->inuse;
- max_objects = page->objects;
-
- if (inuse == 0)
- fg = ZS_EMPTY;
- else if (inuse == max_objects)
- fg = ZS_FULL;
- else if (inuse <= max_objects / fullness_threshold_frac)
- fg = ZS_ALMOST_EMPTY;
- else
- fg = ZS_ALMOST_FULL;
-
- return fg;
-}
-
-static void insert_zspage(struct page *page, struct size_class *class,
- enum fullness_group fullness)
-{
- struct page **head;
-
- BUG_ON(!is_first_page(page));
-
- if (fullness >= _ZS_NR_FULLNESS_GROUPS)
- return;
-
- head = &class->fullness_list[fullness];
- if (*head)
- list_add_tail(&page->lru, &(*head)->lru);
-
- *head = page;
-}
-
-static void remove_zspage(struct page *page, struct size_class *class,
- enum fullness_group fullness)
-{
- struct page **head;
-
- BUG_ON(!is_first_page(page));
-
- if (fullness >= _ZS_NR_FULLNESS_GROUPS)
- return;
-
- head = &class->fullness_list[fullness];
- BUG_ON(!*head);
- if (list_empty(&(*head)->lru))
- *head = NULL;
- else if (*head == page)
- *head = (struct page *)list_entry((*head)->lru.next,
- struct page, lru);
-
- list_del_init(&page->lru);
-}
-
-static enum fullness_group fix_fullness_group(struct zs_pool *pool,
- struct page *page)
-{
- int class_idx;
- struct size_class *class;
- enum fullness_group currfg, newfg;
-
- BUG_ON(!is_first_page(page));
-
- get_zspage_mapping(page, &class_idx, &currfg);
- newfg = get_fullness_group(page);
- if (newfg == currfg)
- goto out;
-
- class = &pool->size_class[class_idx];
- remove_zspage(page, class, currfg);
- insert_zspage(page, class, newfg);
- set_zspage_mapping(page, class_idx, newfg);
-
-out:
- return newfg;
-}
-
-/*
- * We have to decide on how many pages to link together
- * to form a zspage for each size class. This is important
- * to reduce wastage due to unusable space left at end of
- * each zspage which is given as:
- * wastage = Zp - Zp % size_class
- * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
- *
- * For example, for size class of 3/8 * PAGE_SIZE, we should
- * link together 3 PAGE_SIZE sized pages to form a zspage
- * since then we can perfectly fit in 8 such objects.
- */
-static int get_pages_per_zspage(int class_size)
-{
- int i, max_usedpc = 0;
- /* zspage order which gives maximum used size per KB */
- int max_usedpc_order = 1;
-
- for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
- int zspage_size;
- int waste, usedpc;
-
- zspage_size = i * PAGE_SIZE;
- waste = zspage_size % class_size;
- usedpc = (zspage_size - waste) * 100 / zspage_size;
-
- if (usedpc > max_usedpc) {
- max_usedpc = usedpc;
- max_usedpc_order = i;
- }
- }
-
- return max_usedpc_order;
-}
-
-/*
- * A single 'zspage' is composed of many system pages which are
- * linked together using fields in struct page. This function finds
- * the first/head page, given any component page of a zspage.
- */
-static struct page *get_first_page(struct page *page)
-{
- if (is_first_page(page))
- return page;
- else
- return page->first_page;
-}
-
-static struct page *get_next_page(struct page *page)
-{
- struct page *next;
-
- if (is_last_page(page))
- next = NULL;
- else if (is_first_page(page))
- next = (struct page *)page_private(page);
- else
- next = list_entry(page->lru.next, struct page, lru);
-
- return next;
-}
-
-/*
- * Encode <page, obj_idx> as a single handle value.
- * On hardware platforms with physical memory starting at 0x0 the pfn
- * could be 0 so we ensure that the handle will never be 0 by adjusting the
- * encoded obj_idx value before encoding.
- */
-static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
-{
- unsigned long handle;
-
- if (!page) {
- BUG_ON(obj_idx);
- return NULL;
- }
-
- handle = page_to_pfn(page) << OBJ_INDEX_BITS;
- handle |= ((obj_idx + 1) & OBJ_INDEX_MASK);
-
- return (void *)handle;
-}
-
-/*
- * Decode <page, obj_idx> pair from the given object handle. We adjust the
- * decoded obj_idx back to its original value since it was adjusted in
- * obj_location_to_handle().
- */
-static void obj_handle_to_location(unsigned long handle, struct page **page,
- unsigned long *obj_idx)
-{
- *page = pfn_to_page(handle >> OBJ_INDEX_BITS);
- *obj_idx = (handle & OBJ_INDEX_MASK) - 1;
-}
-
-static unsigned long obj_idx_to_offset(struct page *page,
- unsigned long obj_idx, int class_size)
-{
- unsigned long off = 0;
-
- if (!is_first_page(page))
- off = page->index;
-
- return off + obj_idx * class_size;
-}
-
-static void reset_page(struct page *page)
-{
- clear_bit(PG_private, &page->flags);
- clear_bit(PG_private_2, &page->flags);
- set_page_private(page, 0);
- page->mapping = NULL;
- page->freelist = NULL;
- page_mapcount_reset(page);
-}
-
-static void free_zspage(struct page *first_page)
-{
- struct page *nextp, *tmp, *head_extra;
-
- BUG_ON(!is_first_page(first_page));
- BUG_ON(first_page->inuse);
-
- head_extra = (struct page *)page_private(first_page);
-
- reset_page(first_page);
- __free_page(first_page);
-
- /* zspage with only 1 system page */
- if (!head_extra)
- return;
-
- list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) {
- list_del(&nextp->lru);
- reset_page(nextp);
- __free_page(nextp);
- }
- reset_page(head_extra);
- __free_page(head_extra);
-}
-
-/* Initialize a newly allocated zspage */
-static void init_zspage(struct page *first_page, struct size_class *class)
-{
- unsigned long off = 0;
- struct page *page = first_page;
-
- BUG_ON(!is_first_page(first_page));
- while (page) {
- struct page *next_page;
- struct link_free *link;
- unsigned int i, objs_on_page;
-
- /*
- * page->index stores offset of first object starting
- * in the page. For the first page, this is always 0,
- * so we use first_page->index (aka ->freelist) to store
- * head of corresponding zspage's freelist.
- */
- if (page != first_page)
- page->index = off;
-
- link = (struct link_free *)kmap_atomic(page) +
- off / sizeof(*link);
- objs_on_page = (PAGE_SIZE - off) / class->size;
-
- for (i = 1; i <= objs_on_page; i++) {
- off += class->size;
- if (off < PAGE_SIZE) {
- link->next = obj_location_to_handle(page, i);
- link += class->size / sizeof(*link);
- }
- }
-
- /*
- * We now come to the last (full or partial) object on this
- * page, which must point to the first object on the next
- * page (if present)
- */
- next_page = get_next_page(page);
- link->next = obj_location_to_handle(next_page, 0);
- kunmap_atomic(link);
- page = next_page;
- off = (off + class->size) % PAGE_SIZE;
- }
-}
-
-/*
- * Allocate a zspage for the given size class
- */
-static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
-{
- int i, error;
- struct page *first_page = NULL, *uninitialized_var(prev_page);
-
- /*
- * Allocate individual pages and link them together as:
- * 1. first page->private = first sub-page
- * 2. all sub-pages are linked together using page->lru
- * 3. each sub-page is linked to the first page using page->first_page
- *
- * For each size class, First/Head pages are linked together using
- * page->lru. Also, we set PG_private to identify the first page
- * (i.e. no other sub-page has this flag set) and PG_private_2 to
- * identify the last page.
- */
- error = -ENOMEM;
- for (i = 0; i < class->pages_per_zspage; i++) {
- struct page *page;
-
- page = alloc_page(flags);
- if (!page)
- goto cleanup;
-
- INIT_LIST_HEAD(&page->lru);
- if (i == 0) { /* first page */
- SetPagePrivate(page);
- set_page_private(page, 0);
- first_page = page;
- first_page->inuse = 0;
- }
- if (i == 1)
- set_page_private(first_page, (unsigned long)page);
- if (i >= 1)
- page->first_page = first_page;
- if (i >= 2)
- list_add(&page->lru, &prev_page->lru);
- if (i == class->pages_per_zspage - 1) /* last page */
- SetPagePrivate2(page);
- prev_page = page;
- }
-
- init_zspage(first_page, class);
-
- first_page->freelist = obj_location_to_handle(first_page, 0);
- /* Maximum number of objects we can store in this zspage */
- first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
-
- error = 0; /* Success */
-
-cleanup:
- if (unlikely(error) && first_page) {
- free_zspage(first_page);
- first_page = NULL;
- }
-
- return first_page;
-}
-
-static struct page *find_get_zspage(struct size_class *class)
-{
- int i;
- struct page *page;
-
- for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
- page = class->fullness_list[i];
- if (page)
- break;
- }
-
- return page;
-}
-
-#ifdef USE_PGTABLE_MAPPING
-static inline int __zs_cpu_up(struct mapping_area *area)
-{
- /*
- * Make sure we don't leak memory if a cpu UP notification
- * and zs_init() race and both call zs_cpu_up() on the same cpu
- */
- if (area->vm)
- return 0;
- area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL);
- if (!area->vm)
- return -ENOMEM;
- return 0;
-}
-
-static inline void __zs_cpu_down(struct mapping_area *area)
-{
- if (area->vm)
- free_vm_area(area->vm);
- area->vm = NULL;
-}
-
-static inline void *__zs_map_object(struct mapping_area *area,
- struct page *pages[2], int off, int size)
-{
- BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, &pages));
- area->vm_addr = area->vm->addr;
- return area->vm_addr + off;
-}
-
-static inline void __zs_unmap_object(struct mapping_area *area,
- struct page *pages[2], int off, int size)
-{
- unsigned long addr = (unsigned long)area->vm_addr;
-
- unmap_kernel_range(addr, PAGE_SIZE * 2);
-}
-
-#else /* USE_PGTABLE_MAPPING */
-
-static inline int __zs_cpu_up(struct mapping_area *area)
-{
- /*
- * Make sure we don't leak memory if a cpu UP notification
- * and zs_init() race and both call zs_cpu_up() on the same cpu
- */
- if (area->vm_buf)
- return 0;
- area->vm_buf = (char *)__get_free_page(GFP_KERNEL);
- if (!area->vm_buf)
- return -ENOMEM;
- return 0;
-}
-
-static inline void __zs_cpu_down(struct mapping_area *area)
-{
- if (area->vm_buf)
- free_page((unsigned long)area->vm_buf);
- area->vm_buf = NULL;
-}
-
-static void *__zs_map_object(struct mapping_area *area,
- struct page *pages[2], int off, int size)
-{
- int sizes[2];
- void *addr;
- char *buf = area->vm_buf;
-
- /* disable page faults to match kmap_atomic() return conditions */
- pagefault_disable();
-
- /* no read fastpath */
- if (area->vm_mm == ZS_MM_WO)
- goto out;
-
- sizes[0] = PAGE_SIZE - off;
- sizes[1] = size - sizes[0];
-
- /* copy object to per-cpu buffer */
- addr = kmap_atomic(pages[0]);
- memcpy(buf, addr + off, sizes[0]);
- kunmap_atomic(addr);
- addr = kmap_atomic(pages[1]);
- memcpy(buf + sizes[0], addr, sizes[1]);
- kunmap_atomic(addr);
-out:
- return area->vm_buf;
-}
-
-static void __zs_unmap_object(struct mapping_area *area,
- struct page *pages[2], int off, int size)
-{
- int sizes[2];
- void *addr;
- char *buf = area->vm_buf;
-
- /* no write fastpath */
- if (area->vm_mm == ZS_MM_RO)
- goto out;
-
- sizes[0] = PAGE_SIZE - off;
- sizes[1] = size - sizes[0];
-
- /* copy per-cpu buffer to object */
- addr = kmap_atomic(pages[0]);
- memcpy(addr + off, buf, sizes[0]);
- kunmap_atomic(addr);
- addr = kmap_atomic(pages[1]);
- memcpy(addr, buf + sizes[0], sizes[1]);
- kunmap_atomic(addr);
-
-out:
- /* enable page faults to match kunmap_atomic() return conditions */
- pagefault_enable();
-}
-
-#endif /* USE_PGTABLE_MAPPING */
-
-static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
- void *pcpu)
-{
- int ret, cpu = (long)pcpu;
- struct mapping_area *area;
-
- switch (action) {
- case CPU_UP_PREPARE:
- area = &per_cpu(zs_map_area, cpu);
- ret = __zs_cpu_up(area);
- if (ret)
- return notifier_from_errno(ret);
- break;
- case CPU_DEAD:
- case CPU_UP_CANCELED:
- area = &per_cpu(zs_map_area, cpu);
- __zs_cpu_down(area);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block zs_cpu_nb = {
- .notifier_call = zs_cpu_notifier
-};
-
-static void zs_exit(void)
-{
- int cpu;
-
- for_each_online_cpu(cpu)
- zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
- unregister_cpu_notifier(&zs_cpu_nb);
-}
-
-static int zs_init(void)
-{
- int cpu, ret;
-
- register_cpu_notifier(&zs_cpu_nb);
- for_each_online_cpu(cpu) {
- ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
- if (notifier_to_errno(ret))
- goto fail;
- }
- return 0;
-fail:
- zs_exit();
- return notifier_to_errno(ret);
-}
-
-/**
- * zs_create_pool - Creates an allocation pool to work from.
- * @flags: allocation flags used to allocate pool metadata
- *
- * This function must be called before anything when using
- * the zsmalloc allocator.
- *
- * On success, a pointer to the newly created pool is returned,
- * otherwise NULL.
- */
-struct zs_pool *zs_create_pool(gfp_t flags)
-{
- int i, ovhd_size;
- struct zs_pool *pool;
-
- ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
- pool = kzalloc(ovhd_size, GFP_KERNEL);
- if (!pool)
- return NULL;
-
- for (i = 0; i < ZS_SIZE_CLASSES; i++) {
- int size;
- struct size_class *class;
-
- size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
- if (size > ZS_MAX_ALLOC_SIZE)
- size = ZS_MAX_ALLOC_SIZE;
-
- class = &pool->size_class[i];
- class->size = size;
- class->index = i;
- spin_lock_init(&class->lock);
- class->pages_per_zspage = get_pages_per_zspage(size);
-
- }
-
- pool->flags = flags;
-
- return pool;
-}
-EXPORT_SYMBOL_GPL(zs_create_pool);
-
-void zs_destroy_pool(struct zs_pool *pool)
-{
- int i;
-
- for (i = 0; i < ZS_SIZE_CLASSES; i++) {
- int fg;
- struct size_class *class = &pool->size_class[i];
-
- for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
- if (class->fullness_list[fg]) {
- pr_info("Freeing non-empty class with size %db, fullness group %d\n",
- class->size, fg);
- }
- }
- }
- kfree(pool);
-}
-EXPORT_SYMBOL_GPL(zs_destroy_pool);
-
-/**
- * zs_malloc - Allocate block of given size from pool.
- * @pool: pool to allocate from
- * @size: size of block to allocate
- *
- * On success, handle to the allocated object is returned,
- * otherwise 0.
- * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
- */
-unsigned long zs_malloc(struct zs_pool *pool, size_t size)
-{
- unsigned long obj;
- struct link_free *link;
- int class_idx;
- struct size_class *class;
-
- struct page *first_page, *m_page;
- unsigned long m_objidx, m_offset;
-
- if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
- return 0;
-
- class_idx = get_size_class_index(size);
- class = &pool->size_class[class_idx];
- BUG_ON(class_idx != class->index);
-
- spin_lock(&class->lock);
- first_page = find_get_zspage(class);
-
- if (!first_page) {
- spin_unlock(&class->lock);
- first_page = alloc_zspage(class, pool->flags);
- if (unlikely(!first_page))
- return 0;
-
- set_zspage_mapping(first_page, class->index, ZS_EMPTY);
- spin_lock(&class->lock);
- class->pages_allocated += class->pages_per_zspage;
- }
-
- obj = (unsigned long)first_page->freelist;
- obj_handle_to_location(obj, &m_page, &m_objidx);
- m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
-
- link = (struct link_free *)kmap_atomic(m_page) +
- m_offset / sizeof(*link);
- first_page->freelist = link->next;
- memset(link, POISON_INUSE, sizeof(*link));
- kunmap_atomic(link);
-
- first_page->inuse++;
- /* Now move the zspage to another fullness group, if required */
- fix_fullness_group(pool, first_page);
- spin_unlock(&class->lock);
-
- return obj;
-}
-EXPORT_SYMBOL_GPL(zs_malloc);
-
-void zs_free(struct zs_pool *pool, unsigned long obj)
-{
- struct link_free *link;
- struct page *first_page, *f_page;
- unsigned long f_objidx, f_offset;
-
- int class_idx;
- struct size_class *class;
- enum fullness_group fullness;
-
- if (unlikely(!obj))
- return;
-
- obj_handle_to_location(obj, &f_page, &f_objidx);
- first_page = get_first_page(f_page);
-
- get_zspage_mapping(first_page, &class_idx, &fullness);
- class = &pool->size_class[class_idx];
- f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
-
- spin_lock(&class->lock);
-
- /* Insert this object in containing zspage's freelist */
- link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
- + f_offset);
- link->next = first_page->freelist;
- kunmap_atomic(link);
- first_page->freelist = (void *)obj;
-
- first_page->inuse--;
- fullness = fix_fullness_group(pool, first_page);
-
- if (fullness == ZS_EMPTY)
- class->pages_allocated -= class->pages_per_zspage;
-
- spin_unlock(&class->lock);
-
- if (fullness == ZS_EMPTY)
- free_zspage(first_page);
-}
-EXPORT_SYMBOL_GPL(zs_free);
-
-/**
- * zs_map_object - get address of allocated object from handle.
- * @pool: pool from which the object was allocated
- * @handle: handle returned from zs_malloc
- *
- * Before using an object allocated from zs_malloc, it must be mapped using
- * this function. When done with the object, it must be unmapped using
- * zs_unmap_object.
- *
- * Only one object can be mapped per cpu at a time. There is no protection
- * against nested mappings.
- *
- * This function returns with preemption and page faults disabled.
- */
-void *zs_map_object(struct zs_pool *pool, unsigned long handle,
- enum zs_mapmode mm)
-{
- struct page *page;
- unsigned long obj_idx, off;
-
- unsigned int class_idx;
- enum fullness_group fg;
- struct size_class *class;
- struct mapping_area *area;
- struct page *pages[2];
-
- BUG_ON(!handle);
-
- /*
- * Because we use per-cpu mapping areas shared among the
- * pools/users, we can't allow mapping in interrupt context
- * because it can corrupt another users mappings.
- */
- BUG_ON(in_interrupt());
-
- obj_handle_to_location(handle, &page, &obj_idx);
- get_zspage_mapping(get_first_page(page), &class_idx, &fg);
- class = &pool->size_class[class_idx];
- off = obj_idx_to_offset(page, obj_idx, class->size);
-
- area = &get_cpu_var(zs_map_area);
- area->vm_mm = mm;
- if (off + class->size <= PAGE_SIZE) {
- /* this object is contained entirely within a page */
- area->vm_addr = kmap_atomic(page);
- return area->vm_addr + off;
- }
-
- /* this object spans two pages */
- pages[0] = page;
- pages[1] = get_next_page(page);
- BUG_ON(!pages[1]);
-
- return __zs_map_object(area, pages, off, class->size);
-}
-EXPORT_SYMBOL_GPL(zs_map_object);
-
-void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
-{
- struct page *page;
- unsigned long obj_idx, off;
-
- unsigned int class_idx;
- enum fullness_group fg;
- struct size_class *class;
- struct mapping_area *area;
-
- BUG_ON(!handle);
-
- obj_handle_to_location(handle, &page, &obj_idx);
- get_zspage_mapping(get_first_page(page), &class_idx, &fg);
- class = &pool->size_class[class_idx];
- off = obj_idx_to_offset(page, obj_idx, class->size);
-
- area = &__get_cpu_var(zs_map_area);
- if (off + class->size <= PAGE_SIZE)
- kunmap_atomic(area->vm_addr);
- else {
- struct page *pages[2];
-
- pages[0] = page;
- pages[1] = get_next_page(page);
- BUG_ON(!pages[1]);
-
- __zs_unmap_object(area, pages, off, class->size);
- }
- put_cpu_var(zs_map_area);
-}
-EXPORT_SYMBOL_GPL(zs_unmap_object);
-
-u64 zs_get_total_size_bytes(struct zs_pool *pool)
-{
- int i;
- u64 npages = 0;
-
- for (i = 0; i < ZS_SIZE_CLASSES; i++)
- npages += pool->size_class[i].pages_allocated;
-
- return npages << PAGE_SHIFT;
-}
-EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);
-
-module_init(zs_init);
-module_exit(zs_exit);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
diff --git a/drivers/staging/zsmalloc/zsmalloc.h b/drivers/staging/zsmalloc/zsmalloc.h
deleted file mode 100644
index fbe6bec421aa..000000000000
--- a/drivers/staging/zsmalloc/zsmalloc.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * zsmalloc memory allocator
- *
- * Copyright (C) 2011 Nitin Gupta
- *
- * This code is released using a dual license strategy: BSD/GPL
- * You can choose the license that better fits your requirements.
- *
- * Released under the terms of 3-clause BSD License
- * Released under the terms of GNU General Public License Version 2.0
- */
-
-#ifndef _ZS_MALLOC_H_
-#define _ZS_MALLOC_H_
-
-#include <linux/types.h>
-
-/*
- * zsmalloc mapping modes
- *
- * NOTE: These only make a difference when a mapped object spans pages
- */
-enum zs_mapmode {
- ZS_MM_RW, /* normal read-write mapping */
- ZS_MM_RO, /* read-only (no copy-out at unmap time) */
- ZS_MM_WO /* write-only (no copy-in at map time) */
-};
-
-struct zs_pool;
-
-struct zs_pool *zs_create_pool(gfp_t flags);
-void zs_destroy_pool(struct zs_pool *pool);
-
-unsigned long zs_malloc(struct zs_pool *pool, size_t size);
-void zs_free(struct zs_pool *pool, unsigned long obj);
-
-void *zs_map_object(struct zs_pool *pool, unsigned long handle,
- enum zs_mapmode mm);
-void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
-
-u64 zs_get_total_size_bytes(struct zs_pool *pool);
-
-#endif
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 18303686eb58..dc2d84ac5a0e 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -3,6 +3,7 @@ menuconfig TARGET_CORE
tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
depends on SCSI && BLOCK
select CONFIGFS_FS
+ select CRC_T10DIF
default n
help
Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
@@ -13,6 +14,7 @@ if TARGET_CORE
config TCM_IBLOCK
tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
+ select BLK_DEV_INTEGRITY
help
Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
access to Linux/Block devices using BIO
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index d70e9119e906..7f1a7ce4b771 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -52,7 +52,7 @@
static LIST_HEAD(g_tiqn_list);
static LIST_HEAD(g_np_list);
static DEFINE_SPINLOCK(tiqn_lock);
-static DEFINE_SPINLOCK(np_lock);
+static DEFINE_MUTEX(np_lock);
static struct idr tiqn_idr;
struct idr sess_idr;
@@ -307,6 +307,9 @@ bool iscsit_check_np_match(
return false;
}
+/*
+ * Called with mutex np_lock held
+ */
static struct iscsi_np *iscsit_get_np(
struct __kernel_sockaddr_storage *sockaddr,
int network_transport)
@@ -314,11 +317,10 @@ static struct iscsi_np *iscsit_get_np(
struct iscsi_np *np;
bool match;
- spin_lock_bh(&np_lock);
list_for_each_entry(np, &g_np_list, np_list) {
- spin_lock(&np->np_thread_lock);
+ spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
- spin_unlock(&np->np_thread_lock);
+ spin_unlock_bh(&np->np_thread_lock);
continue;
}
@@ -330,13 +332,11 @@ static struct iscsi_np *iscsit_get_np(
* while iscsi_tpg_add_network_portal() is called.
*/
np->np_exports++;
- spin_unlock(&np->np_thread_lock);
- spin_unlock_bh(&np_lock);
+ spin_unlock_bh(&np->np_thread_lock);
return np;
}
- spin_unlock(&np->np_thread_lock);
+ spin_unlock_bh(&np->np_thread_lock);
}
- spin_unlock_bh(&np_lock);
return NULL;
}
@@ -350,16 +350,22 @@ struct iscsi_np *iscsit_add_np(
struct sockaddr_in6 *sock_in6;
struct iscsi_np *np;
int ret;
+
+ mutex_lock(&np_lock);
+
/*
* Locate the existing struct iscsi_np if already active..
*/
np = iscsit_get_np(sockaddr, network_transport);
- if (np)
+ if (np) {
+ mutex_unlock(&np_lock);
return np;
+ }
np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
if (!np) {
pr_err("Unable to allocate memory for struct iscsi_np\n");
+ mutex_unlock(&np_lock);
return ERR_PTR(-ENOMEM);
}
@@ -382,6 +388,7 @@ struct iscsi_np *iscsit_add_np(
ret = iscsi_target_setup_login_socket(np, sockaddr);
if (ret != 0) {
kfree(np);
+ mutex_unlock(&np_lock);
return ERR_PTR(ret);
}
@@ -390,6 +397,7 @@ struct iscsi_np *iscsit_add_np(
pr_err("Unable to create kthread: iscsi_np\n");
ret = PTR_ERR(np->np_thread);
kfree(np);
+ mutex_unlock(&np_lock);
return ERR_PTR(ret);
}
/*
@@ -400,10 +408,10 @@ struct iscsi_np *iscsit_add_np(
* point because iscsi_np has not been added to g_np_list yet.
*/
np->np_exports = 1;
+ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
- spin_lock_bh(&np_lock);
list_add_tail(&np->np_list, &g_np_list);
- spin_unlock_bh(&np_lock);
+ mutex_unlock(&np_lock);
pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
np->np_ip, np->np_port, np->np_transport->name);
@@ -465,13 +473,14 @@ int iscsit_del_np(struct iscsi_np *np)
*/
send_sig(SIGINT, np->np_thread, 1);
kthread_stop(np->np_thread);
+ np->np_thread = NULL;
}
np->np_transport->iscsit_free_np(np);
- spin_lock_bh(&np_lock);
+ mutex_lock(&np_lock);
list_del(&np->np_list);
- spin_unlock_bh(&np_lock);
+ mutex_unlock(&np_lock);
pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
np->np_ip, np->np_port, np->np_transport->name);
@@ -621,7 +630,7 @@ static int iscsit_add_reject(
{
struct iscsi_cmd *cmd;
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
return -1;
@@ -823,24 +832,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
(hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
/*
- * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
- * that adds support for RESERVE/RELEASE. There is a bug
- * add with this new functionality that sets R/W bits when
- * neither CDB carries any READ or WRITE datapayloads.
+ * From RFC-3720 Section 10.3.1:
+ *
+ * "Either or both of R and W MAY be 1 when either the
+ * Expected Data Transfer Length and/or Bidirectional Read
+ * Expected Data Transfer Length are 0"
+ *
+ * For this case, go ahead and clear the unnecssary bits
+ * to avoid any confusion with ->data_direction.
*/
- if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
- hdr->flags &= ~ISCSI_FLAG_CMD_READ;
- hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
- goto done;
- }
+ hdr->flags &= ~ISCSI_FLAG_CMD_READ;
+ hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
- pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
+ pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
" set when Expected Data Transfer Length is 0 for"
- " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
- return iscsit_add_reject_cmd(cmd,
- ISCSI_REASON_BOOKMARK_INVALID, buf);
+ " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
}
-done:
if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
!(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
@@ -2476,7 +2483,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
if (!conn_p)
return;
- cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
+ cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
if (!cmd) {
iscsit_dec_conn_usage_count(conn_p);
return;
@@ -3952,7 +3959,7 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
switch (hdr->opcode & ISCSI_OPCODE_MASK) {
case ISCSI_OP_SCSI_CMD:
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
@@ -3964,28 +3971,28 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
case ISCSI_OP_NOOP_OUT:
cmd = NULL;
if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
}
ret = iscsit_handle_nop_out(conn, cmd, buf);
break;
case ISCSI_OP_SCSI_TMFUNC:
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
break;
case ISCSI_OP_TEXT:
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
ret = iscsit_handle_text_cmd(conn, cmd, buf);
break;
case ISCSI_OP_LOGOUT:
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index e3318edb233d..1c0088fe9e99 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -474,7 +474,8 @@ static ssize_t __iscsi_##prefix##_store_##name( \
\
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
- \
+ if (count >= sizeof(auth->name)) \
+ return -EINVAL; \
snprintf(auth->name, sizeof(auth->name), "%s", page); \
if (!strncmp("NULL", auth->name, 4)) \
auth->naf_flags &= ~flags; \
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index e048d6439f4a..cda4d80cfaef 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -507,7 +507,9 @@ int iscsit_handle_status_snack(
u32 last_statsn;
int found_cmd;
- if (conn->exp_statsn > begrun) {
+ if (!begrun) {
+ begrun = conn->exp_statsn;
+ } else if (conn->exp_statsn > begrun) {
pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"
" 0x%08x but already got ExpStatSN: 0x%08x on CID:"
" %hu.\n", begrun, runlength, conn->exp_statsn,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 4eb93b2b6473..e29279e6b577 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1403,11 +1403,6 @@ old_sess_out:
out:
stop = kthread_should_stop();
- if (!stop && signal_pending(current)) {
- spin_lock_bh(&np->np_thread_lock);
- stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
- spin_unlock_bh(&np->np_thread_lock);
- }
/* Wait for another socket.. */
if (!stop)
return 1;
@@ -1415,7 +1410,6 @@ exit:
iscsi_stop_login_thread_timer(np);
spin_lock_bh(&np->np_thread_lock);
np->np_thread_state = ISCSI_NP_THREAD_EXIT;
- np->np_thread = NULL;
spin_unlock_bh(&np->np_thread_lock);
return 0;
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 83c965c65386..582ba84075ec 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -1192,7 +1192,7 @@ get_target:
*/
alloc_tags:
tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
- tag_num += (tag_num / 2) + ISCSIT_EXTRA_TAGS;
+ tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 0819e688a398..e655b042ed18 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -152,13 +152,16 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
* May be called from software interrupt (timer) context for allocating
* iSCSI NopINs.
*/
-struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
+struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
{
struct iscsi_cmd *cmd;
struct se_session *se_sess = conn->sess->se_sess;
int size, tag;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, gfp_mask);
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
+ if (tag < 0)
+ return NULL;
+
size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));
memset(cmd, 0, size);
@@ -926,7 +929,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
u8 state;
struct iscsi_cmd *cmd;
- cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC);
+ cmd = iscsit_allocate_cmd(conn, TASK_RUNNING);
if (!cmd)
return -1;
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index e4fc34a02f57..561a424d1980 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -9,7 +9,7 @@ extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t);
-extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
+extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 1b41e6776152..fadad7c5f635 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -217,7 +217,8 @@ static void tcm_loop_submission_work(struct work_struct *work)
scsi_bufflen(sc), tcm_loop_sam_attr(sc),
sc->sc_data_direction, 0,
scsi_sglist(sc), scsi_sg_count(sc),
- sgl_bidi, sgl_bidi_count);
+ sgl_bidi, sgl_bidi_count,
+ scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
if (rc < 0) {
set_host_byte(sc, DID_NO_CONNECT);
goto out_done;
@@ -462,7 +463,7 @@ static int tcm_loop_driver_probe(struct device *dev)
{
struct tcm_loop_hba *tl_hba;
struct Scsi_Host *sh;
- int error;
+ int error, host_prot;
tl_hba = to_tcm_loop_hba(dev);
@@ -486,6 +487,13 @@ static int tcm_loop_driver_probe(struct device *dev)
sh->max_channel = 0;
sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
+ host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
+
+ scsi_host_set_prot(sh, host_prot);
+ scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
+
error = scsi_add_host(sh, &tl_hba->dev);
if (error) {
pr_err("%s: scsi_add_host failed\n", __func__);
@@ -1228,7 +1236,7 @@ static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
/* Start items for tcm_loop_naa_cit */
-struct se_portal_group *tcm_loop_make_naa_tpg(
+static struct se_portal_group *tcm_loop_make_naa_tpg(
struct se_wwn *wwn,
struct config_group *group,
const char *name)
@@ -1273,7 +1281,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
return &tl_tpg->tl_se_tpg;
}
-void tcm_loop_drop_naa_tpg(
+static void tcm_loop_drop_naa_tpg(
struct se_portal_group *se_tpg)
{
struct se_wwn *wwn = se_tpg->se_tpg_wwn;
@@ -1305,7 +1313,7 @@ void tcm_loop_drop_naa_tpg(
/* Start items for tcm_loop_cit */
-struct se_wwn *tcm_loop_make_scsi_hba(
+static struct se_wwn *tcm_loop_make_scsi_hba(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
@@ -1375,7 +1383,7 @@ out:
return ERR_PTR(ret);
}
-void tcm_loop_drop_scsi_hba(
+static void tcm_loop_drop_scsi_hba(
struct se_wwn *wwn)
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fdcee326bfbc..c3d9df6aaf5f 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -41,11 +41,14 @@
#include "target_core_alua.h"
#include "target_core_ua.h"
-static sense_reason_t core_alua_check_transition(int state, int *primary);
+static sense_reason_t core_alua_check_transition(int state, int valid,
+ int *primary);
static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
struct se_port *port, int explicit, int offline);
+static char *core_alua_dump_state(int state);
+
static u16 alua_lu_gps_counter;
static u32 alua_lu_gps_count;
@@ -55,6 +58,86 @@ static LIST_HEAD(lu_gps_list);
struct t10_alua_lu_gp *default_lu_gp;
/*
+ * REPORT REFERRALS
+ *
+ * See sbc3r35 section 5.23
+ */
+sense_reason_t
+target_emulate_report_referrals(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct t10_alua_lba_map *map;
+ struct t10_alua_lba_map_member *map_mem;
+ unsigned char *buf;
+ u32 rd_len = 0, off;
+
+ if (cmd->data_length < 4) {
+ pr_warn("REPORT REFERRALS allocation length %u too"
+ " small\n", cmd->data_length);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ off = 4;
+ spin_lock(&dev->t10_alua.lba_map_lock);
+ if (list_empty(&dev->t10_alua.lba_map_list)) {
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ transport_kunmap_data_sg(cmd);
+
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ list_for_each_entry(map, &dev->t10_alua.lba_map_list,
+ lba_map_list) {
+ int desc_num = off + 3;
+ int pg_num;
+
+ off += 4;
+ if (cmd->data_length > off)
+ put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
+ off += 8;
+ if (cmd->data_length > off)
+ put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
+ off += 8;
+ rd_len += 20;
+ pg_num = 0;
+ list_for_each_entry(map_mem, &map->lba_map_mem_list,
+ lba_map_mem_list) {
+ int alua_state = map_mem->lba_map_mem_alua_state;
+ int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
+
+ if (cmd->data_length > off)
+ buf[off] = alua_state & 0x0f;
+ off += 2;
+ if (cmd->data_length > off)
+ buf[off] = (alua_pg_id >> 8) & 0xff;
+ off++;
+ if (cmd->data_length > off)
+ buf[off] = (alua_pg_id & 0xff);
+ off++;
+ rd_len += 4;
+ pg_num++;
+ }
+ if (cmd->data_length > desc_num)
+ buf[desc_num] = pg_num;
+ }
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+
+ /*
+ * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
+ */
+ put_unaligned_be16(rd_len, &buf[2]);
+
+ transport_kunmap_data_sg(cmd);
+
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
+/*
* REPORT_TARGET_PORT_GROUPS
*
* See spc4r17 section 6.27
@@ -210,7 +293,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
unsigned char *ptr;
sense_reason_t rc = TCM_NO_SENSE;
u32 len = 4; /* Skip over RESERVED area in header */
- int alua_access_state, primary = 0;
+ int alua_access_state, primary = 0, valid_states;
u16 tg_pt_id, rtpi;
if (!l_port)
@@ -252,6 +335,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
+ valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
ptr = &buf[4]; /* Skip over RESERVED area in header */
@@ -263,7 +347,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
* the state is a primary or secondary target port asymmetric
* access state.
*/
- rc = core_alua_check_transition(alua_access_state, &primary);
+ rc = core_alua_check_transition(alua_access_state,
+ valid_states, &primary);
if (rc) {
/*
* If the SET TARGET PORT GROUPS attempts to establish
@@ -386,6 +471,81 @@ static inline int core_alua_state_nonoptimized(
return 0;
}
+static inline int core_alua_state_lba_dependent(
+ struct se_cmd *cmd,
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ u8 *alua_ascq)
+{
+ struct se_device *dev = cmd->se_dev;
+ u64 segment_size, segment_mult, sectors, lba;
+
+ /* Only need to check for cdb actually containing LBAs */
+ if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
+ return 0;
+
+ spin_lock(&dev->t10_alua.lba_map_lock);
+ segment_size = dev->t10_alua.lba_map_segment_size;
+ segment_mult = dev->t10_alua.lba_map_segment_multiplier;
+ sectors = cmd->data_length / dev->dev_attrib.block_size;
+
+ lba = cmd->t_task_lba;
+ while (lba < cmd->t_task_lba + sectors) {
+ struct t10_alua_lba_map *cur_map = NULL, *map;
+ struct t10_alua_lba_map_member *map_mem;
+
+ list_for_each_entry(map, &dev->t10_alua.lba_map_list,
+ lba_map_list) {
+ u64 start_lba, last_lba;
+ u64 first_lba = map->lba_map_first_lba;
+
+ if (segment_mult) {
+ u64 tmp = lba;
+ start_lba = do_div(tmp, segment_size * segment_mult);
+
+ last_lba = first_lba + segment_size - 1;
+ if (start_lba >= first_lba &&
+ start_lba <= last_lba) {
+ lba += segment_size;
+ cur_map = map;
+ break;
+ }
+ } else {
+ last_lba = map->lba_map_last_lba;
+ if (lba >= first_lba && lba <= last_lba) {
+ lba = last_lba + 1;
+ cur_map = map;
+ break;
+ }
+ }
+ }
+ if (!cur_map) {
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ return 1;
+ }
+ list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
+ lba_map_mem_list) {
+ if (map_mem->lba_map_mem_alua_pg_id !=
+ tg_pt_gp->tg_pt_gp_id)
+ continue;
+ switch(map_mem->lba_map_mem_alua_state) {
+ case ALUA_ACCESS_STATE_STANDBY:
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+ return 1;
+ case ALUA_ACCESS_STATE_UNAVAILABLE:
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ return 1;
+ default:
+ break;
+ }
+ }
+ }
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ return 0;
+}
+
static inline int core_alua_state_standby(
struct se_cmd *cmd,
unsigned char *cdb,
@@ -583,6 +743,9 @@ target_alua_state_check(struct se_cmd *cmd)
case ALUA_ACCESS_STATE_TRANSITION:
ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
break;
+ case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+ ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq);
+ break;
/*
* OFFLINE is a secondary ALUA target port group access state, that is
* handled above with struct se_port->sep_tg_pt_secondary_offline=1
@@ -618,17 +781,36 @@ out:
* Check implicit and explicit ALUA state change request.
*/
static sense_reason_t
-core_alua_check_transition(int state, int *primary)
+core_alua_check_transition(int state, int valid, int *primary)
{
+ /*
+ * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
+ * defined as primary target port asymmetric access states.
+ */
switch (state) {
case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
+ if (!(valid & ALUA_AO_SUP))
+ goto not_supported;
+ *primary = 1;
+ break;
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+ if (!(valid & ALUA_AN_SUP))
+ goto not_supported;
+ *primary = 1;
+ break;
case ALUA_ACCESS_STATE_STANDBY:
+ if (!(valid & ALUA_S_SUP))
+ goto not_supported;
+ *primary = 1;
+ break;
case ALUA_ACCESS_STATE_UNAVAILABLE:
- /*
- * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
- * defined as primary target port asymmetric access states.
- */
+ if (!(valid & ALUA_U_SUP))
+ goto not_supported;
+ *primary = 1;
+ break;
+ case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+ if (!(valid & ALUA_LBD_SUP))
+ goto not_supported;
*primary = 1;
break;
case ALUA_ACCESS_STATE_OFFLINE:
@@ -636,14 +818,27 @@ core_alua_check_transition(int state, int *primary)
* OFFLINE state is defined as a secondary target port
* asymmetric access state.
*/
+ if (!(valid & ALUA_O_SUP))
+ goto not_supported;
*primary = 0;
break;
+ case ALUA_ACCESS_STATE_TRANSITION:
+ /*
+ * Transitioning is set internally, and
+ * cannot be selected manually.
+ */
+ goto not_supported;
default:
pr_err("Unknown ALUA access state: 0x%02x\n", state);
return TCM_INVALID_PARAMETER_LIST;
}
return 0;
+
+not_supported:
+ pr_err("ALUA access state %s not supported",
+ core_alua_dump_state(state));
+ return TCM_INVALID_PARAMETER_LIST;
}
static char *core_alua_dump_state(int state)
@@ -653,12 +848,16 @@ static char *core_alua_dump_state(int state)
return "Active/Optimized";
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
return "Active/NonOptimized";
+ case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+ return "LBA Dependent";
case ALUA_ACCESS_STATE_STANDBY:
return "Standby";
case ALUA_ACCESS_STATE_UNAVAILABLE:
return "Unavailable";
case ALUA_ACCESS_STATE_OFFLINE:
return "Offline";
+ case ALUA_ACCESS_STATE_TRANSITION:
+ return "Transitioning";
default:
return "Unknown";
}
@@ -735,58 +934,49 @@ static int core_alua_write_tpg_metadata(
* Called with tg_pt_gp->tg_pt_gp_md_mutex held
*/
static int core_alua_update_tpg_primary_metadata(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- int primary_state,
- unsigned char *md_buf)
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
{
+ unsigned char *md_buf;
struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
char path[ALUA_METADATA_PATH_LEN];
- int len;
+ int len, rc;
+
+ md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
+ if (!md_buf) {
+ pr_err("Unable to allocate buf for ALUA metadata\n");
+ return -ENOMEM;
+ }
memset(path, 0, ALUA_METADATA_PATH_LEN);
- len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
+ len = snprintf(md_buf, ALUA_MD_BUF_LEN,
"tg_pt_gp_id=%hu\n"
"alua_access_state=0x%02x\n"
"alua_access_status=0x%02x\n",
- tg_pt_gp->tg_pt_gp_id, primary_state,
+ tg_pt_gp->tg_pt_gp_id,
+ tg_pt_gp->tg_pt_gp_alua_pending_state,
tg_pt_gp->tg_pt_gp_alua_access_status);
snprintf(path, ALUA_METADATA_PATH_LEN,
"/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
- return core_alua_write_tpg_metadata(path, md_buf, len);
+ rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ kfree(md_buf);
+ return rc;
}
-static int core_alua_do_transition_tg_pt(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- struct se_port *l_port,
- struct se_node_acl *nacl,
- unsigned char *md_buf,
- int new_state,
- int explicit)
+static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
{
+ struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
+ struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
struct se_dev_entry *se_deve;
struct se_lun_acl *lacl;
struct se_port *port;
struct t10_alua_tg_pt_gp_member *mem;
- int old_state = 0;
- /*
- * Save the old primary ALUA access state, and set the current state
- * to ALUA_ACCESS_STATE_TRANSITION.
- */
- old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
- atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
- ALUA_ACCESS_STATE_TRANSITION);
- tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
- ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
- ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
- /*
- * Check for the optional ALUA primary state transition delay
- */
- if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
- msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+ bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
@@ -821,9 +1011,12 @@ static int core_alua_do_transition_tg_pt(
if (!lacl)
continue;
- if (explicit &&
- (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
- (l_port != NULL) && (l_port == port))
+ if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
+ (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
+ (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) &&
+ (tg_pt_gp->tg_pt_gp_alua_port != NULL) &&
+ (tg_pt_gp->tg_pt_gp_alua_port == port))
continue;
core_scsi3_ua_allocate(lacl->se_lun_nacl,
@@ -851,20 +1044,102 @@ static int core_alua_do_transition_tg_pt(
*/
if (tg_pt_gp->tg_pt_gp_write_metadata) {
mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
- core_alua_update_tpg_primary_metadata(tg_pt_gp,
- new_state, md_buf);
+ core_alua_update_tpg_primary_metadata(tg_pt_gp);
mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
}
/*
* Set the current primary ALUA access state to the requested new state
*/
- atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
+ atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+ tg_pt_gp->tg_pt_gp_alua_pending_state);
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
" from primary access state %s to %s\n", (explicit) ? "explicit" :
"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
- tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
- core_alua_dump_state(new_state));
+ tg_pt_gp->tg_pt_gp_id,
+ core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
+ core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ smp_mb__after_atomic_dec();
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+ if (tg_pt_gp->tg_pt_gp_transition_complete)
+ complete(tg_pt_gp->tg_pt_gp_transition_complete);
+}
+
+static int core_alua_do_transition_tg_pt(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ int new_state,
+ int explicit)
+{
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ /* Nothing to be done here */
+ if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
+ return 0;
+
+ if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+ return -EAGAIN;
+
+ /*
+ * Flush any pending transitions
+ */
+ if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
+ atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
+ ALUA_ACCESS_STATE_TRANSITION) {
+ /* Just in case */
+ tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+ tg_pt_gp->tg_pt_gp_transition_complete = &wait;
+ flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
+ wait_for_completion(&wait);
+ tg_pt_gp->tg_pt_gp_transition_complete = NULL;
+ return 0;
+ }
+
+ /*
+ * Save the old primary ALUA access state, and set the current state
+ * to ALUA_ACCESS_STATE_TRANSITION.
+ */
+ tg_pt_gp->tg_pt_gp_alua_previous_state =
+ atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+ tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+
+ atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+ ALUA_ACCESS_STATE_TRANSITION);
+ tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
+ ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
+
+ /*
+ * Check for the optional ALUA primary state transition delay
+ */
+ if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
+ msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+
+ /*
+ * Take a reference for workqueue item
+ */
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+ if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
+ unsigned long transition_tmo;
+
+ transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
+ queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
+ &tg_pt_gp->tg_pt_gp_transition_work,
+ transition_tmo);
+ } else {
+ tg_pt_gp->tg_pt_gp_transition_complete = &wait;
+ queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
+ &tg_pt_gp->tg_pt_gp_transition_work, 0);
+ wait_for_completion(&wait);
+ tg_pt_gp->tg_pt_gp_transition_complete = NULL;
+ }
return 0;
}
@@ -878,23 +1153,15 @@ int core_alua_do_port_transition(
int explicit)
{
struct se_device *dev;
- struct se_port *port;
- struct se_node_acl *nacl;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
struct t10_alua_tg_pt_gp *tg_pt_gp;
- unsigned char *md_buf;
- int primary;
+ int primary, valid_states, rc = 0;
- if (core_alua_check_transition(new_state, &primary) != 0)
+ valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
+ if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
return -EINVAL;
- md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
- if (!md_buf) {
- pr_err("Unable to allocate buf for ALUA metadata\n");
- return -ENOMEM;
- }
-
local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
lu_gp = local_lu_gp_mem->lu_gp;
@@ -911,12 +1178,13 @@ int core_alua_do_port_transition(
* core_alua_do_transition_tg_pt() will always return
* success.
*/
- core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
- md_buf, new_state, explicit);
+ l_tg_pt_gp->tg_pt_gp_alua_port = l_port;
+ l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
+ rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
+ new_state, explicit);
atomic_dec(&lu_gp->lu_gp_ref_cnt);
smp_mb__after_atomic_dec();
- kfree(md_buf);
- return 0;
+ return rc;
}
/*
* For all other LU groups aside from 'default_lu_gp', walk all of
@@ -951,11 +1219,11 @@ int core_alua_do_port_transition(
continue;
if (l_tg_pt_gp == tg_pt_gp) {
- port = l_port;
- nacl = l_nacl;
+ tg_pt_gp->tg_pt_gp_alua_port = l_port;
+ tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
} else {
- port = NULL;
- nacl = NULL;
+ tg_pt_gp->tg_pt_gp_alua_port = NULL;
+ tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
}
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_inc();
@@ -964,12 +1232,14 @@ int core_alua_do_port_transition(
* core_alua_do_transition_tg_pt() will always return
* success.
*/
- core_alua_do_transition_tg_pt(tg_pt_gp, port,
- nacl, md_buf, new_state, explicit);
+ rc = core_alua_do_transition_tg_pt(tg_pt_gp,
+ new_state, explicit);
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_dec();
+ if (rc)
+ break;
}
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
@@ -979,16 +1249,18 @@ int core_alua_do_port_transition(
}
spin_unlock(&lu_gp->lu_gp_lock);
- pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
- " Group IDs: %hu %s transition to primary state: %s\n",
- config_item_name(&lu_gp->lu_gp_group.cg_item),
- l_tg_pt_gp->tg_pt_gp_id, (explicit) ? "explicit" : "implicit",
- core_alua_dump_state(new_state));
+ if (!rc) {
+ pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
+ " Group IDs: %hu %s transition to primary state: %s\n",
+ config_item_name(&lu_gp->lu_gp_group.cg_item),
+ l_tg_pt_gp->tg_pt_gp_id,
+ (explicit) ? "explicit" : "implicit",
+ core_alua_dump_state(new_state));
+ }
atomic_dec(&lu_gp->lu_gp_ref_cnt);
smp_mb__after_atomic_dec();
- kfree(md_buf);
- return 0;
+ return rc;
}
/*
@@ -996,13 +1268,18 @@ int core_alua_do_port_transition(
*/
static int core_alua_update_tpg_secondary_metadata(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
- struct se_port *port,
- unsigned char *md_buf,
- u32 md_buf_len)
+ struct se_port *port)
{
+ unsigned char *md_buf;
struct se_portal_group *se_tpg = port->sep_tpg;
char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
- int len;
+ int len, rc;
+
+ md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
+ if (!md_buf) {
+ pr_err("Unable to allocate buf for ALUA metadata\n");
+ return -ENOMEM;
+ }
memset(path, 0, ALUA_METADATA_PATH_LEN);
memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
@@ -1014,7 +1291,7 @@ static int core_alua_update_tpg_secondary_metadata(
snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
- len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
+ len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
"alua_tg_pt_status=0x%02x\n",
atomic_read(&port->sep_tg_pt_secondary_offline),
port->sep_tg_pt_secondary_stat);
@@ -1023,7 +1300,10 @@ static int core_alua_update_tpg_secondary_metadata(
se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
port->sep_lun->unpacked_lun);
- return core_alua_write_tpg_metadata(path, md_buf, len);
+ rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ kfree(md_buf);
+
+ return rc;
}
static int core_alua_set_tg_pt_secondary_state(
@@ -1033,8 +1313,6 @@ static int core_alua_set_tg_pt_secondary_state(
int offline)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
- unsigned char *md_buf;
- u32 md_buf_len;
int trans_delay_msecs;
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1055,7 +1333,6 @@ static int core_alua_set_tg_pt_secondary_state(
else
atomic_set(&port->sep_tg_pt_secondary_offline, 0);
- md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
port->sep_tg_pt_secondary_stat = (explicit) ?
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
@@ -1077,23 +1354,115 @@ static int core_alua_set_tg_pt_secondary_state(
* secondary state and status
*/
if (port->sep_tg_pt_secondary_write_md) {
- md_buf = kzalloc(md_buf_len, GFP_KERNEL);
- if (!md_buf) {
- pr_err("Unable to allocate md_buf for"
- " secondary ALUA access metadata\n");
- return -ENOMEM;
- }
mutex_lock(&port->sep_tg_pt_md_mutex);
- core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
- md_buf, md_buf_len);
+ core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port);
mutex_unlock(&port->sep_tg_pt_md_mutex);
+ }
+
+ return 0;
+}
+
+struct t10_alua_lba_map *
+core_alua_allocate_lba_map(struct list_head *list,
+ u64 first_lba, u64 last_lba)
+{
+ struct t10_alua_lba_map *lba_map;
+
+ lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
+ if (!lba_map) {
+ pr_err("Unable to allocate struct t10_alua_lba_map\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
+ lba_map->lba_map_first_lba = first_lba;
+ lba_map->lba_map_last_lba = last_lba;
- kfree(md_buf);
+ list_add_tail(&lba_map->lba_map_list, list);
+ return lba_map;
+}
+
+int
+core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
+ int pg_id, int state)
+{
+ struct t10_alua_lba_map_member *lba_map_mem;
+
+ list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
+ lba_map_mem_list) {
+ if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
+ pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
+ return -EINVAL;
+ }
+ }
+
+ lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
+ if (!lba_map_mem) {
+ pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
+ return -ENOMEM;
}
+ lba_map_mem->lba_map_mem_alua_state = state;
+ lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
+ list_add_tail(&lba_map_mem->lba_map_mem_list,
+ &lba_map->lba_map_mem_list);
return 0;
}
+void
+core_alua_free_lba_map(struct list_head *lba_list)
+{
+ struct t10_alua_lba_map *lba_map, *lba_map_tmp;
+ struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
+
+ list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
+ lba_map_list) {
+ list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
+ &lba_map->lba_map_mem_list,
+ lba_map_mem_list) {
+ list_del(&lba_map_mem->lba_map_mem_list);
+ kmem_cache_free(t10_alua_lba_map_mem_cache,
+ lba_map_mem);
+ }
+ list_del(&lba_map->lba_map_list);
+ kmem_cache_free(t10_alua_lba_map_cache, lba_map);
+ }
+}
+
+void
+core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
+ int segment_size, int segment_mult)
+{
+ struct list_head old_lba_map_list;
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ int activate = 0, supported;
+
+ INIT_LIST_HEAD(&old_lba_map_list);
+ spin_lock(&dev->t10_alua.lba_map_lock);
+ dev->t10_alua.lba_map_segment_size = segment_size;
+ dev->t10_alua.lba_map_segment_multiplier = segment_mult;
+ list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
+ if (lba_map_list) {
+ list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
+ activate = 1;
+ }
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
+ tg_pt_gp_list) {
+
+ if (!tg_pt_gp->tg_pt_gp_valid_id)
+ continue;
+ supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
+ if (activate)
+ supported |= ALUA_LBD_SUP;
+ else
+ supported &= ~ALUA_LBD_SUP;
+ tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
+ }
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+ core_alua_free_lba_map(&old_lba_map_list);
+}
+
struct t10_alua_lu_gp *
core_alua_allocate_lu_gp(const char *name, int def_group)
{
@@ -1346,8 +1715,9 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
+ INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
+ core_alua_do_transition_tg_pt_work);
tg_pt_gp->tg_pt_gp_dev = dev;
- tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
/*
@@ -1475,6 +1845,8 @@ void core_alua_free_tg_pt_gp(
dev->t10_alua.alua_tg_pt_gps_counter--;
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+ flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
+
/*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
* core_alua_get_tg_pt_gp_by_name() in
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index 88e2e835f14a..0a7d65e80404 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -13,12 +13,13 @@
/*
* ASYMMETRIC ACCESS STATE field
*
- * from spc4r17 section 6.27 Table 245
+ * from spc4r36j section 6.37 Table 307
*/
#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0
#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1
#define ALUA_ACCESS_STATE_STANDBY 0x2
#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3
+#define ALUA_ACCESS_STATE_LBA_DEPENDENT 0x4
#define ALUA_ACCESS_STATE_OFFLINE 0xe
#define ALUA_ACCESS_STATE_TRANSITION 0xf
@@ -78,18 +79,30 @@
*/
#define ALUA_SECONDARY_METADATA_WWN_LEN 256
+/* Used by core_alua_update_tpg_(primary,secondary)_metadata */
+#define ALUA_MD_BUF_LEN 1024
+
extern struct kmem_cache *t10_alua_lu_gp_cache;
extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+extern struct kmem_cache *t10_alua_lba_map_cache;
+extern struct kmem_cache *t10_alua_lba_map_mem_cache;
extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
+extern sense_reason_t target_emulate_report_referrals(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
struct se_device *, struct se_port *,
struct se_node_acl *, int, int);
extern char *core_alua_dump_status(int);
+extern struct t10_alua_lba_map *core_alua_allocate_lba_map(
+ struct list_head *, u64, u64);
+extern int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *, int, int);
+extern void core_alua_free_lba_map(struct list_head *);
+extern void core_alua_set_lba_map(struct se_device *, struct list_head *,
+ int, int);
extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 272755d03e5a..f0e85b119692 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -643,6 +643,15 @@ SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(emulate_3pc);
SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR);
+DEF_DEV_ATTRIB(pi_prot_type);
+SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_pi_prot_type);
+SE_DEV_ATTR_RO(hw_pi_prot_type);
+
+DEF_DEV_ATTRIB(pi_prot_format);
+SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR);
+
DEF_DEV_ATTRIB(enforce_pr_isids);
SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
@@ -702,6 +711,9 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
&target_core_dev_attrib_emulate_tpws.attr,
&target_core_dev_attrib_emulate_caw.attr,
&target_core_dev_attrib_emulate_3pc.attr,
+ &target_core_dev_attrib_pi_prot_type.attr,
+ &target_core_dev_attrib_hw_pi_prot_type.attr,
+ &target_core_dev_attrib_pi_prot_format.attr,
&target_core_dev_attrib_enforce_pr_isids.attr,
&target_core_dev_attrib_is_nonrot.attr,
&target_core_dev_attrib_emulate_rest_reord.attr,
@@ -1741,6 +1753,176 @@ static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
.store = target_core_store_alua_lu_gp,
};
+static ssize_t target_core_show_dev_lba_map(void *p, char *page)
+{
+ struct se_device *dev = p;
+ struct t10_alua_lba_map *map;
+ struct t10_alua_lba_map_member *mem;
+ char *b = page;
+ int bl = 0;
+ char state;
+
+ spin_lock(&dev->t10_alua.lba_map_lock);
+ if (!list_empty(&dev->t10_alua.lba_map_list))
+ bl += sprintf(b + bl, "%u %u\n",
+ dev->t10_alua.lba_map_segment_size,
+ dev->t10_alua.lba_map_segment_multiplier);
+ list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
+ bl += sprintf(b + bl, "%llu %llu",
+ map->lba_map_first_lba, map->lba_map_last_lba);
+ list_for_each_entry(mem, &map->lba_map_mem_list,
+ lba_map_mem_list) {
+ switch (mem->lba_map_mem_alua_state) {
+ case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
+ state = 'O';
+ break;
+ case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+ state = 'A';
+ break;
+ case ALUA_ACCESS_STATE_STANDBY:
+ state = 'S';
+ break;
+ case ALUA_ACCESS_STATE_UNAVAILABLE:
+ state = 'U';
+ break;
+ default:
+ state = '.';
+ break;
+ }
+ bl += sprintf(b + bl, " %d:%c",
+ mem->lba_map_mem_alua_pg_id, state);
+ }
+ bl += sprintf(b + bl, "\n");
+ }
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ return bl;
+}
+
+static ssize_t target_core_store_dev_lba_map(
+ void *p,
+ const char *page,
+ size_t count)
+{
+ struct se_device *dev = p;
+ struct t10_alua_lba_map *lba_map = NULL;
+ struct list_head lba_list;
+ char *map_entries, *ptr;
+ char state;
+ int pg_num = -1, pg;
+ int ret = 0, num = 0, pg_id, alua_state;
+ unsigned long start_lba = -1, end_lba = -1;
+ unsigned long segment_size = -1, segment_mult = -1;
+
+ map_entries = kstrdup(page, GFP_KERNEL);
+ if (!map_entries)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&lba_list);
+ while ((ptr = strsep(&map_entries, "\n")) != NULL) {
+ if (!*ptr)
+ continue;
+
+ if (num == 0) {
+ if (sscanf(ptr, "%lu %lu\n",
+ &segment_size, &segment_mult) != 2) {
+ pr_err("Invalid line %d\n", num);
+ ret = -EINVAL;
+ break;
+ }
+ num++;
+ continue;
+ }
+ if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
+ pr_err("Invalid line %d\n", num);
+ ret = -EINVAL;
+ break;
+ }
+ ptr = strchr(ptr, ' ');
+ if (!ptr) {
+ pr_err("Invalid line %d, missing end lba\n", num);
+ ret = -EINVAL;
+ break;
+ }
+ ptr++;
+ ptr = strchr(ptr, ' ');
+ if (!ptr) {
+ pr_err("Invalid line %d, missing state definitions\n",
+ num);
+ ret = -EINVAL;
+ break;
+ }
+ ptr++;
+ lba_map = core_alua_allocate_lba_map(&lba_list,
+ start_lba, end_lba);
+ if (IS_ERR(lba_map)) {
+ ret = PTR_ERR(lba_map);
+ break;
+ }
+ pg = 0;
+ while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
+ switch (state) {
+ case 'O':
+ alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
+ break;
+ case 'A':
+ alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
+ break;
+ case 'S':
+ alua_state = ALUA_ACCESS_STATE_STANDBY;
+ break;
+ case 'U':
+ alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
+ break;
+ default:
+ pr_err("Invalid ALUA state '%c'\n", state);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = core_alua_allocate_lba_map_mem(lba_map,
+ pg_id, alua_state);
+ if (ret) {
+ pr_err("Invalid target descriptor %d:%c "
+ "at line %d\n",
+ pg_id, state, num);
+ break;
+ }
+ pg++;
+ ptr = strchr(ptr, ' ');
+ if (ptr)
+ ptr++;
+ else
+ break;
+ }
+ if (pg_num == -1)
+ pg_num = pg;
+ else if (pg != pg_num) {
+ pr_err("Only %d from %d port groups definitions "
+ "at line %d\n", pg, pg_num, num);
+ ret = -EINVAL;
+ break;
+ }
+ num++;
+ }
+out:
+ if (ret) {
+ core_alua_free_lba_map(&lba_list);
+ count = ret;
+ } else
+ core_alua_set_lba_map(dev, &lba_list,
+ segment_size, segment_mult);
+ kfree(map_entries);
+ return count;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "lba_map",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = target_core_show_dev_lba_map,
+ .store = target_core_store_dev_lba_map,
+};
+
static struct configfs_attribute *lio_core_dev_attrs[] = {
&target_core_attr_dev_info.attr,
&target_core_attr_dev_control.attr,
@@ -1748,6 +1930,7 @@ static struct configfs_attribute *lio_core_dev_attrs[] = {
&target_core_attr_dev_udev_path.attr,
&target_core_attr_dev_enable.attr,
&target_core_attr_dev_alua_lu_gp.attr,
+ &target_core_attr_dev_lba_map.attr,
NULL,
};
@@ -2054,6 +2237,13 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
" transition while TPGS_IMPLICIT_ALUA is disabled\n");
return -EINVAL;
}
+ if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
+ new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
+ /* LBA DEPENDENT is only allowed with implicit ALUA */
+ pr_err("Unable to process implicit configfs ALUA transition"
+ " while explicit ALUA management is enabled\n");
+ return -EINVAL;
+ }
ret = core_alua_do_port_transition(tg_pt_gp, dev,
NULL, NULL, new_state, 0);
@@ -2188,7 +2378,7 @@ SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent,
tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent,
tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
-SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO | S_IWUSR);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO);
SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable,
tg_pt_gp_alua_supported_states, ALUA_U_SUP);
@@ -2937,7 +3127,7 @@ static int __init target_core_init_configfs(void)
* and ALUA Logical Unit Group and Target Port Group infrastructure.
*/
target_cg = &subsys->su_group;
- target_cg->default_groups = kmalloc(sizeof(struct config_group) * 2,
+ target_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!target_cg->default_groups) {
pr_err("Unable to allocate target_cg->default_groups\n");
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 207b340498a3..65001e133670 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -918,6 +918,90 @@ int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
return 0;
}
+int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
+{
+ int rc, old_prot = dev->dev_attrib.pi_prot_type;
+
+ if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
+ pr_err("Illegal value %d for pi_prot_type\n", flag);
+ return -EINVAL;
+ }
+ if (flag == 2) {
+ pr_err("DIF TYPE2 protection currently not supported\n");
+ return -ENOSYS;
+ }
+ if (dev->dev_attrib.hw_pi_prot_type) {
+ pr_warn("DIF protection enabled on underlying hardware,"
+ " ignoring\n");
+ return 0;
+ }
+ if (!dev->transport->init_prot || !dev->transport->free_prot) {
+ pr_err("DIF protection not supported by backend: %s\n",
+ dev->transport->name);
+ return -ENOSYS;
+ }
+ if (!(dev->dev_flags & DF_CONFIGURED)) {
+ pr_err("DIF protection requires device to be configured\n");
+ return -ENODEV;
+ }
+ if (dev->export_count) {
+ pr_err("dev[%p]: Unable to change SE Device PROT type while"
+ " export_count is %d\n", dev, dev->export_count);
+ return -EINVAL;
+ }
+
+ dev->dev_attrib.pi_prot_type = flag;
+
+ if (flag && !old_prot) {
+ rc = dev->transport->init_prot(dev);
+ if (rc) {
+ dev->dev_attrib.pi_prot_type = old_prot;
+ return rc;
+ }
+
+ } else if (!flag && old_prot) {
+ dev->transport->free_prot(dev);
+ }
+ pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
+
+ return 0;
+}
+
+int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
+{
+ int rc;
+
+ if (!flag)
+ return 0;
+
+ if (flag != 1) {
+ pr_err("Illegal value %d for pi_prot_format\n", flag);
+ return -EINVAL;
+ }
+ if (!dev->transport->format_prot) {
+ pr_err("DIF protection format not supported by backend %s\n",
+ dev->transport->name);
+ return -ENOSYS;
+ }
+ if (!(dev->dev_flags & DF_CONFIGURED)) {
+ pr_err("DIF protection format requires device to be configured\n");
+ return -ENODEV;
+ }
+ if (dev->export_count) {
+ pr_err("dev[%p]: Unable to format SE Device PROT type while"
+ " export_count is %d\n", dev, dev->export_count);
+ return -EINVAL;
+ }
+
+ rc = dev->transport->format_prot(dev);
+ if (rc)
+ return rc;
+
+ pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
+
+ return 0;
+}
+
int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
@@ -1106,29 +1190,34 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
dev->dev_attrib.block_size = block_size;
pr_debug("dev[%p]: SE Device block_size changed to %u\n",
dev, block_size);
+
+ if (dev->dev_attrib.max_bytes_per_io)
+ dev->dev_attrib.hw_max_sectors =
+ dev->dev_attrib.max_bytes_per_io / block_size;
+
return 0;
}
struct se_lun *core_dev_add_lun(
struct se_portal_group *tpg,
struct se_device *dev,
- u32 lun)
+ u32 unpacked_lun)
{
- struct se_lun *lun_p;
+ struct se_lun *lun;
int rc;
- lun_p = core_tpg_pre_addlun(tpg, lun);
- if (IS_ERR(lun_p))
- return lun_p;
+ lun = core_tpg_alloc_lun(tpg, unpacked_lun);
+ if (IS_ERR(lun))
+ return lun;
- rc = core_tpg_post_addlun(tpg, lun_p,
+ rc = core_tpg_add_lun(tpg, lun,
TRANSPORT_LUNFLAGS_READ_WRITE, dev);
if (rc < 0)
return ERR_PTR(rc);
pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
- tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
/*
* Update LUN maps for dynamically added initiators when
@@ -1149,7 +1238,7 @@ struct se_lun *core_dev_add_lun(
spin_unlock_irq(&tpg->acl_node_lock);
}
- return lun_p;
+ return lun;
}
/* core_dev_del_lun():
@@ -1415,6 +1504,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_link_magic = SE_DEV_LINK_MAGIC;
dev->se_hba = hba;
dev->transport = hba->transport;
+ dev->prot_length = sizeof(struct se_dif_v1_tuple);
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list);
@@ -1439,6 +1529,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
+ INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
+ spin_lock_init(&dev->t10_alua.lba_map_lock);
dev->t10_wwn.t10_dev = dev;
dev->t10_alua.t10_dev = dev;
@@ -1455,6 +1547,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
+ dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
dev->dev_attrib.is_nonrot = DA_IS_NONROT;
dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
@@ -1583,9 +1676,13 @@ void target_free_device(struct se_device *dev)
}
core_alua_free_lu_gp_mem(dev);
+ core_alua_set_lba_map(dev, NULL, 0, 0);
core_scsi3_free_all_registrations(dev);
se_release_vpd_for_dev(dev);
+ if (dev->transport->free_prot)
+ dev->transport->free_prot(dev);
+
dev->transport->free_device(dev);
}
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index dae2ad6a669e..7de9f0475d05 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -906,7 +906,7 @@ static struct config_group *target_fabric_make_lun(
lun_cg->default_groups[1] = NULL;
port_stat_grp = &lun->port_stat_grps.stat_group;
- port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
+ port_stat_grp->default_groups = kzalloc(sizeof(struct config_group *) * 4,
GFP_KERNEL);
if (!port_stat_grp->default_groups) {
pr_err("Unable to allocate port_stat_grp->default_groups\n");
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 0e34cda3271e..cf991a91a8a9 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
TARGET_CORE_MOD_VERSION);
- pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
- " MaxSectors: %u\n",
- hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
+ pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
+ hba->hba_id, fd_host->fd_host_id);
return 0;
}
@@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev)
}
dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
- dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
+ dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
+ dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
@@ -257,6 +257,72 @@ static void fd_free_device(struct se_device *dev)
kfree(fd_dev);
}
+static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
+ int is_write)
+{
+ struct se_device *se_dev = cmd->se_dev;
+ struct fd_dev *dev = FD_DEV(se_dev);
+ struct file *prot_fd = dev->fd_prot_file;
+ struct scatterlist *sg;
+ loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
+ unsigned char *buf;
+ u32 prot_size, len, size;
+ int rc, ret = 1, i;
+
+ prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
+ se_dev->prot_length;
+
+ if (!is_write) {
+ fd_prot->prot_buf = vzalloc(prot_size);
+ if (!fd_prot->prot_buf) {
+ pr_err("Unable to allocate fd_prot->prot_buf\n");
+ return -ENOMEM;
+ }
+ buf = fd_prot->prot_buf;
+
+ fd_prot->prot_sg_nents = cmd->t_prot_nents;
+ fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
+ fd_prot->prot_sg_nents, GFP_KERNEL);
+ if (!fd_prot->prot_sg) {
+ pr_err("Unable to allocate fd_prot->prot_sg\n");
+ vfree(fd_prot->prot_buf);
+ return -ENOMEM;
+ }
+ size = prot_size;
+
+ for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
+
+ len = min_t(u32, PAGE_SIZE, size);
+ sg_set_buf(sg, buf, len);
+ size -= len;
+ buf += len;
+ }
+ }
+
+ if (is_write) {
+ rc = kernel_write(prot_fd, fd_prot->prot_buf, prot_size, pos);
+ if (rc < 0 || prot_size != rc) {
+ pr_err("kernel_write() for fd_do_prot_rw failed:"
+ " %d\n", rc);
+ ret = -EINVAL;
+ }
+ } else {
+ rc = kernel_read(prot_fd, pos, fd_prot->prot_buf, prot_size);
+ if (rc < 0) {
+ pr_err("kernel_read() for fd_do_prot_rw failed:"
+ " %d\n", rc);
+ ret = -EINVAL;
+ }
+ }
+
+ if (is_write || ret < 0) {
+ kfree(fd_prot->prot_sg);
+ vfree(fd_prot->prot_buf);
+ }
+
+ return ret;
+}
+
static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents, int is_write)
{
@@ -551,6 +617,8 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
{
struct se_device *dev = cmd->se_dev;
+ struct fd_prot fd_prot;
+ sense_reason_t rc;
int ret = 0;
/*
@@ -558,8 +626,48 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
* physical memory addresses to struct iovec virtual memory.
*/
if (data_direction == DMA_FROM_DEVICE) {
+ memset(&fd_prot, 0, sizeof(struct fd_prot));
+
+ if (cmd->prot_type) {
+ ret = fd_do_prot_rw(cmd, &fd_prot, false);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
+
+ if (ret > 0 && cmd->prot_type) {
+ u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
+
+ rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors,
+ 0, fd_prot.prot_sg, 0);
+ if (rc) {
+ kfree(fd_prot.prot_sg);
+ vfree(fd_prot.prot_buf);
+ return rc;
+ }
+ kfree(fd_prot.prot_sg);
+ vfree(fd_prot.prot_buf);
+ }
} else {
+ memset(&fd_prot, 0, sizeof(struct fd_prot));
+
+ if (cmd->prot_type) {
+ u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
+
+ ret = fd_do_prot_rw(cmd, &fd_prot, false);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors,
+ 0, fd_prot.prot_sg, 0);
+ if (rc) {
+ kfree(fd_prot.prot_sg);
+ vfree(fd_prot.prot_buf);
+ return rc;
+ }
+ }
+
ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
/*
* Perform implicit vfs_fsync_range() for fd_do_writev() ops
@@ -576,10 +684,19 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
vfs_fsync_range(fd_dev->fd_file, start, end, 1);
}
+
+ if (ret > 0 && cmd->prot_type) {
+ ret = fd_do_prot_rw(cmd, &fd_prot, true);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
}
- if (ret < 0)
+ if (ret < 0) {
+ kfree(fd_prot.prot_sg);
+ vfree(fd_prot.prot_buf);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
if (ret)
target_complete_cmd(cmd, SAM_STAT_GOOD);
@@ -700,6 +817,140 @@ static sector_t fd_get_blocks(struct se_device *dev)
dev->dev_attrib.block_size);
}
+static int fd_init_prot(struct se_device *dev)
+{
+ struct fd_dev *fd_dev = FD_DEV(dev);
+ struct file *prot_file, *file = fd_dev->fd_file;
+ struct inode *inode;
+ int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
+ char buf[FD_MAX_DEV_PROT_NAME];
+
+ if (!file) {
+ pr_err("Unable to locate fd_dev->fd_file\n");
+ return -ENODEV;
+ }
+
+ inode = file->f_mapping->host;
+ if (S_ISBLK(inode->i_mode)) {
+ pr_err("FILEIO Protection emulation only supported on"
+ " !S_ISBLK\n");
+ return -ENOSYS;
+ }
+
+ if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE)
+ flags &= ~O_DSYNC;
+
+ snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection",
+ fd_dev->fd_dev_name);
+
+ prot_file = filp_open(buf, flags, 0600);
+ if (IS_ERR(prot_file)) {
+ pr_err("filp_open(%s) failed\n", buf);
+ ret = PTR_ERR(prot_file);
+ return ret;
+ }
+ fd_dev->fd_prot_file = prot_file;
+
+ return 0;
+}
+
+static void fd_init_format_buf(struct se_device *dev, unsigned char *buf,
+ u32 unit_size, u32 *ref_tag, u16 app_tag,
+ bool inc_reftag)
+{
+ unsigned char *p = buf;
+ int i;
+
+ for (i = 0; i < unit_size; i += dev->prot_length) {
+ *((u16 *)&p[0]) = 0xffff;
+ *((__be16 *)&p[2]) = cpu_to_be16(app_tag);
+ *((__be32 *)&p[4]) = cpu_to_be32(*ref_tag);
+
+ if (inc_reftag)
+ (*ref_tag)++;
+
+ p += dev->prot_length;
+ }
+}
+
+static int fd_format_prot(struct se_device *dev)
+{
+ struct fd_dev *fd_dev = FD_DEV(dev);
+ struct file *prot_fd = fd_dev->fd_prot_file;
+ sector_t prot_length, prot;
+ unsigned char *buf;
+ loff_t pos = 0;
+ u32 ref_tag = 0;
+ int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
+ int rc, ret = 0, size, len;
+ bool inc_reftag = false;
+
+ if (!dev->dev_attrib.pi_prot_type) {
+ pr_err("Unable to format_prot while pi_prot_type == 0\n");
+ return -ENODEV;
+ }
+ if (!prot_fd) {
+ pr_err("Unable to locate fd_dev->fd_prot_file\n");
+ return -ENODEV;
+ }
+
+ switch (dev->dev_attrib.pi_prot_type) {
+ case TARGET_DIF_TYPE3_PROT:
+ ref_tag = 0xffffffff;
+ break;
+ case TARGET_DIF_TYPE2_PROT:
+ case TARGET_DIF_TYPE1_PROT:
+ inc_reftag = true;
+ break;
+ default:
+ break;
+ }
+
+ buf = vzalloc(unit_size);
+ if (!buf) {
+ pr_err("Unable to allocate FILEIO prot buf\n");
+ return -ENOMEM;
+ }
+
+ prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
+ size = prot_length;
+
+ pr_debug("Using FILEIO prot_length: %llu\n",
+ (unsigned long long)prot_length);
+
+ for (prot = 0; prot < prot_length; prot += unit_size) {
+
+ fd_init_format_buf(dev, buf, unit_size, &ref_tag, 0xffff,
+ inc_reftag);
+
+ len = min(unit_size, size);
+
+ rc = kernel_write(prot_fd, buf, len, pos);
+ if (rc != len) {
+ pr_err("vfs_write to prot file failed: %d\n", rc);
+ ret = -ENODEV;
+ goto out;
+ }
+ pos += len;
+ size -= len;
+ }
+
+out:
+ vfree(buf);
+ return ret;
+}
+
+static void fd_free_prot(struct se_device *dev)
+{
+ struct fd_dev *fd_dev = FD_DEV(dev);
+
+ if (!fd_dev->fd_prot_file)
+ return;
+
+ filp_close(fd_dev->fd_prot_file, NULL);
+ fd_dev->fd_prot_file = NULL;
+}
+
static struct sbc_ops fd_sbc_ops = {
.execute_rw = fd_execute_rw,
.execute_sync_cache = fd_execute_sync_cache,
@@ -730,6 +981,9 @@ static struct se_subsystem_api fileio_template = {
.show_configfs_dev_params = fd_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = fd_get_blocks,
+ .init_prot = fd_init_prot,
+ .format_prot = fd_format_prot,
+ .free_prot = fd_free_prot,
};
static int __init fileio_module_init(void)
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 37ffc5bd2399..182cbb295039 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -4,10 +4,14 @@
#define FD_VERSION "4.0"
#define FD_MAX_DEV_NAME 256
+#define FD_MAX_DEV_PROT_NAME FD_MAX_DEV_NAME + 16
#define FD_DEVICE_QUEUE_DEPTH 32
#define FD_MAX_DEVICE_QUEUE_DEPTH 128
#define FD_BLOCKSIZE 512
-#define FD_MAX_SECTORS 2048
+/*
+ * Limited by the number of iovecs (2048) per vfs_[writev,readv] call
+ */
+#define FD_MAX_BYTES 8388608
#define RRF_EMULATE_CDB 0x01
#define RRF_GOT_LBA 0x02
@@ -15,6 +19,13 @@
#define FBDF_HAS_PATH 0x01
#define FBDF_HAS_SIZE 0x02
#define FDBD_HAS_BUFFERED_IO_WCE 0x04
+#define FDBD_FORMAT_UNIT_SIZE 2048
+
+struct fd_prot {
+ unsigned char *prot_buf;
+ struct scatterlist *prot_sg;
+ u32 prot_sg_nents;
+};
struct fd_dev {
struct se_device dev;
@@ -29,6 +40,7 @@ struct fd_dev {
u32 fd_block_size;
unsigned long long fd_dev_size;
struct file *fd_file;
+ struct file *fd_prot_file;
/* FILEIO HBA device is connected to */
struct fd_host *fd_host;
} ____cacheline_aligned;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c87959f12760..554d4f75a75a 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -91,6 +91,7 @@ static int iblock_configure_device(struct se_device *dev)
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct request_queue *q;
struct block_device *bd = NULL;
+ struct blk_integrity *bi;
fmode_t mode;
int ret = -ENOMEM;
@@ -155,8 +156,40 @@ static int iblock_configure_device(struct se_device *dev)
if (blk_queue_nonrot(q))
dev->dev_attrib.is_nonrot = 1;
+ bi = bdev_get_integrity(bd);
+ if (bi) {
+ struct bio_set *bs = ib_dev->ibd_bio_set;
+
+ if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") ||
+ !strcmp(bi->name, "T10-DIF-TYPE1-IP")) {
+ pr_err("IBLOCK export of blk_integrity: %s not"
+ " supported\n", bi->name);
+ ret = -ENOSYS;
+ goto out_blkdev_put;
+ }
+
+ if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) {
+ dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
+ } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) {
+ dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
+ }
+
+ if (dev->dev_attrib.pi_prot_type) {
+ if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
+ pr_err("Unable to allocate bioset for PI\n");
+ ret = -ENOMEM;
+ goto out_blkdev_put;
+ }
+ pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
+ bs->bio_integrity_pool);
+ }
+ dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
+ }
+
return 0;
+out_blkdev_put:
+ blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
out_free_bioset:
bioset_free(ib_dev->ibd_bio_set);
ib_dev->ibd_bio_set = NULL;
@@ -170,8 +203,10 @@ static void iblock_free_device(struct se_device *dev)
if (ib_dev->ibd_bd != NULL)
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
- if (ib_dev->ibd_bio_set != NULL)
+ if (ib_dev->ibd_bio_set != NULL) {
+ bioset_integrity_free(ib_dev->ibd_bio_set);
bioset_free(ib_dev->ibd_bio_set);
+ }
kfree(ib_dev);
}
@@ -319,7 +354,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
bio->bi_bdev = ib_dev->ibd_bd;
bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done;
- bio->bi_sector = lba;
+ bio->bi_iter.bi_sector = lba;
return bio;
}
@@ -586,13 +621,58 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
return bl;
}
+static int
+iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct blk_integrity *bi;
+ struct bio_integrity_payload *bip;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct scatterlist *sg;
+ int i, rc;
+
+ bi = bdev_get_integrity(ib_dev->ibd_bd);
+ if (!bi) {
+ pr_err("Unable to locate bio_integrity\n");
+ return -ENODEV;
+ }
+
+ bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
+ if (!bip) {
+ pr_err("Unable to allocate bio_integrity_payload\n");
+ return -ENOMEM;
+ }
+
+ bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
+ dev->prot_length;
+ bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
+
+ pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
+ (unsigned long long)bip->bip_iter.bi_sector);
+
+ for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
+
+ rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
+ sg->offset);
+ if (rc != sg->length) {
+ pr_err("bio_integrity_add_page() failed; %d\n", rc);
+ return -ENOMEM;
+ }
+
+ pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
+ sg_page(sg), sg->length, sg->offset);
+ }
+
+ return 0;
+}
+
static sense_reason_t
iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
{
struct se_device *dev = cmd->se_dev;
struct iblock_req *ibr;
- struct bio *bio;
+ struct bio *bio, *bio_start;
struct bio_list list;
struct scatterlist *sg;
u32 sg_num = sgl_nents;
@@ -655,6 +735,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (!bio)
goto fail_free_ibr;
+ bio_start = bio;
bio_list_init(&list);
bio_list_add(&list, bio);
@@ -688,6 +769,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
sg_num--;
}
+ if (cmd->prot_type) {
+ int rc = iblock_alloc_bip(cmd, bio_start);
+ if (rc)
+ goto fail_put_bios;
+ }
+
iblock_submit_bios(&list, rw);
iblock_complete_cmd(cmd);
return 0;
@@ -763,7 +850,7 @@ iblock_parse_cdb(struct se_cmd *cmd)
return sbc_parse_cdb(cmd, &iblock_sbc_ops);
}
-bool iblock_get_write_cache(struct se_device *dev)
+static bool iblock_get_write_cache(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 47b63b094cdc..de9cab708f45 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -35,6 +35,8 @@ int se_dev_set_emulate_tpu(struct se_device *, int);
int se_dev_set_emulate_tpws(struct se_device *, int);
int se_dev_set_emulate_caw(struct se_device *, int);
int se_dev_set_emulate_3pc(struct se_device *, int);
+int se_dev_set_pi_prot_type(struct se_device *, int);
+int se_dev_set_pi_prot_format(struct se_device *, int);
int se_dev_set_enforce_pr_isids(struct se_device *, int);
int se_dev_set_is_nonrot(struct se_device *, int);
int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
@@ -77,9 +79,9 @@ struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tp
const char *);
void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
-struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
-int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *,
- u32, void *);
+struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32);
+int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
+ u32, struct se_device *);
struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun);
int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 2f5d77932c80..3013287a2aaa 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -2009,7 +2009,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
sense_reason_t ret = TCM_NO_SENSE;
- int pr_holder = 0;
+ int pr_holder = 0, type;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
@@ -2131,6 +2131,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
ret = TCM_RESERVATION_CONFLICT;
goto out;
}
+ type = pr_reg->pr_res_type;
spin_lock(&pr_tmpl->registration_lock);
/*
@@ -2161,6 +2162,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
* Release the calling I_T Nexus registration now..
*/
__core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1);
+ pr_reg = NULL;
/*
* From spc4r17, section 5.7.11.3 Unregistering
@@ -2174,8 +2176,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
* RESERVATIONS RELEASED.
*/
if (pr_holder &&
- (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
- pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
+ (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
+ type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
list_for_each_entry(pr_reg_p,
&pr_tmpl->registration_list,
pr_reg_list) {
@@ -2194,7 +2196,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
ret = core_scsi3_update_and_write_aptpl(dev, aptpl);
out:
- core_scsi3_put_pr_reg(pr_reg);
+ if (pr_reg)
+ core_scsi3_put_pr_reg(pr_reg);
return ret;
}
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index ed75cdd32cb0..2ee2936fa0bd 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -43,6 +43,11 @@
#define PR_APTPL_MAX_IPORT_LEN 256
#define PR_APTPL_MAX_TPORT_LEN 256
+/*
+ * Function defined in target_core_spc.c
+ */
+void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
+
extern struct kmem_cache *t10_pr_reg_cache;
extern void core_pr_dump_initiator_port(struct t10_pr_registration *,
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 4ffe5f2ec0e9..66a5aba5a0d9 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -78,23 +78,14 @@ static void rd_detach_hba(struct se_hba *hba)
hba->hba_ptr = NULL;
}
-/* rd_release_device_space():
- *
- *
- */
-static void rd_release_device_space(struct rd_dev *rd_dev)
+static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
+ u32 sg_table_count)
{
- u32 i, j, page_count = 0, sg_per_table;
- struct rd_dev_sg_table *sg_table;
struct page *pg;
struct scatterlist *sg;
+ u32 i, j, page_count = 0, sg_per_table;
- if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
- return;
-
- sg_table = rd_dev->sg_table_array;
-
- for (i = 0; i < rd_dev->sg_table_count; i++) {
+ for (i = 0; i < sg_table_count; i++) {
sg = sg_table[i].sg_table;
sg_per_table = sg_table[i].rd_sg_count;
@@ -105,16 +96,28 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
page_count++;
}
}
-
kfree(sg);
}
+ kfree(sg_table);
+ return page_count;
+}
+
+static void rd_release_device_space(struct rd_dev *rd_dev)
+{
+ u32 page_count;
+
+ if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
+ return;
+
+ page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
+ rd_dev->sg_table_count);
+
pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
" Device ID: %u, pages %u in %u tables total bytes %lu\n",
rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
- kfree(sg_table);
rd_dev->sg_table_array = NULL;
rd_dev->sg_table_count = 0;
}
@@ -124,38 +127,15 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
*
*
*/
-static int rd_build_device_space(struct rd_dev *rd_dev)
+static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
+ u32 total_sg_needed, unsigned char init_payload)
{
- u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
+ u32 i = 0, j, page_offset = 0, sg_per_table;
u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
- struct rd_dev_sg_table *sg_table;
struct page *pg;
struct scatterlist *sg;
-
- if (rd_dev->rd_page_count <= 0) {
- pr_err("Illegal page count: %u for Ramdisk device\n",
- rd_dev->rd_page_count);
- return -EINVAL;
- }
-
- /* Don't need backing pages for NULLIO */
- if (rd_dev->rd_flags & RDF_NULLIO)
- return 0;
-
- total_sg_needed = rd_dev->rd_page_count;
-
- sg_tables = (total_sg_needed / max_sg_per_table) + 1;
-
- sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
- if (!sg_table) {
- pr_err("Unable to allocate memory for Ramdisk"
- " scatterlist tables\n");
- return -ENOMEM;
- }
-
- rd_dev->sg_table_array = sg_table;
- rd_dev->sg_table_count = sg_tables;
+ unsigned char *p;
while (total_sg_needed) {
sg_per_table = (total_sg_needed > max_sg_per_table) ?
@@ -186,16 +166,114 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
}
sg_assign_page(&sg[j], pg);
sg[j].length = PAGE_SIZE;
+
+ p = kmap(pg);
+ memset(p, init_payload, PAGE_SIZE);
+ kunmap(pg);
}
page_offset += sg_per_table;
total_sg_needed -= sg_per_table;
}
+ return 0;
+}
+
+static int rd_build_device_space(struct rd_dev *rd_dev)
+{
+ struct rd_dev_sg_table *sg_table;
+ u32 sg_tables, total_sg_needed;
+ u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+ sizeof(struct scatterlist));
+ int rc;
+
+ if (rd_dev->rd_page_count <= 0) {
+ pr_err("Illegal page count: %u for Ramdisk device\n",
+ rd_dev->rd_page_count);
+ return -EINVAL;
+ }
+
+ /* Don't need backing pages for NULLIO */
+ if (rd_dev->rd_flags & RDF_NULLIO)
+ return 0;
+
+ total_sg_needed = rd_dev->rd_page_count;
+
+ sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+
+ sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
+ if (!sg_table) {
+ pr_err("Unable to allocate memory for Ramdisk"
+ " scatterlist tables\n");
+ return -ENOMEM;
+ }
+
+ rd_dev->sg_table_array = sg_table;
+ rd_dev->sg_table_count = sg_tables;
+
+ rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
+ if (rc)
+ return rc;
+
pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
- " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
- rd_dev->rd_dev_id, rd_dev->rd_page_count,
- rd_dev->sg_table_count);
+ " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+ rd_dev->rd_dev_id, rd_dev->rd_page_count,
+ rd_dev->sg_table_count);
+
+ return 0;
+}
+
+static void rd_release_prot_space(struct rd_dev *rd_dev)
+{
+ u32 page_count;
+
+ if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
+ return;
+
+ page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
+ rd_dev->sg_prot_count);
+
+ pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
+ " Device ID: %u, pages %u in %u tables total bytes %lu\n",
+ rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
+ rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
+
+ rd_dev->sg_prot_array = NULL;
+ rd_dev->sg_prot_count = 0;
+}
+
+static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length)
+{
+ struct rd_dev_sg_table *sg_table;
+ u32 total_sg_needed, sg_tables;
+ u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+ sizeof(struct scatterlist));
+ int rc;
+
+ if (rd_dev->rd_flags & RDF_NULLIO)
+ return 0;
+
+ total_sg_needed = rd_dev->rd_page_count / prot_length;
+
+ sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+
+ sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
+ if (!sg_table) {
+ pr_err("Unable to allocate memory for Ramdisk protection"
+ " scatterlist tables\n");
+ return -ENOMEM;
+ }
+
+ rd_dev->sg_prot_array = sg_table;
+ rd_dev->sg_prot_count = sg_tables;
+
+ rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
+ if (rc)
+ return rc;
+
+ pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
+ " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+ rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
return 0;
}
@@ -278,6 +356,26 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL;
}
+static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
+{
+ struct rd_dev_sg_table *sg_table;
+ u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+ sizeof(struct scatterlist));
+
+ i = page / sg_per_table;
+ if (i < rd_dev->sg_prot_count) {
+ sg_table = &rd_dev->sg_prot_array[i];
+ if ((sg_table->page_start_offset <= page) &&
+ (sg_table->page_end_offset >= page))
+ return sg_table;
+ }
+
+ pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
+ page);
+
+ return NULL;
+}
+
static sense_reason_t
rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
@@ -292,6 +390,7 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
u32 rd_page;
u32 src_len;
u64 tmp;
+ sense_reason_t rc;
if (dev->rd_flags & RDF_NULLIO) {
target_complete_cmd(cmd, SAM_STAT_GOOD);
@@ -314,6 +413,28 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
cmd->t_task_lba, rd_size, rd_page, rd_offset);
+ if (cmd->prot_type && data_direction == DMA_TO_DEVICE) {
+ struct rd_dev_sg_table *prot_table;
+ struct scatterlist *prot_sg;
+ u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
+ u32 prot_offset, prot_page;
+
+ tmp = cmd->t_task_lba * se_dev->prot_length;
+ prot_offset = do_div(tmp, PAGE_SIZE);
+ prot_page = tmp;
+
+ prot_table = rd_get_prot_table(dev, prot_page);
+ if (!prot_table)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
+
+ rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0,
+ prot_sg, prot_offset);
+ if (rc)
+ return rc;
+ }
+
src_len = PAGE_SIZE - rd_offset;
sg_miter_start(&m, sgl, sgl_nents,
data_direction == DMA_FROM_DEVICE ?
@@ -375,6 +496,28 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
}
sg_miter_stop(&m);
+ if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) {
+ struct rd_dev_sg_table *prot_table;
+ struct scatterlist *prot_sg;
+ u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
+ u32 prot_offset, prot_page;
+
+ tmp = cmd->t_task_lba * se_dev->prot_length;
+ prot_offset = do_div(tmp, PAGE_SIZE);
+ prot_page = tmp;
+
+ prot_table = rd_get_prot_table(dev, prot_page);
+ if (!prot_table)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
+
+ rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
+ prot_sg, prot_offset);
+ if (rc)
+ return rc;
+ }
+
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
@@ -456,6 +599,23 @@ static sector_t rd_get_blocks(struct se_device *dev)
return blocks_long;
}
+static int rd_init_prot(struct se_device *dev)
+{
+ struct rd_dev *rd_dev = RD_DEV(dev);
+
+ if (!dev->dev_attrib.pi_prot_type)
+ return 0;
+
+ return rd_build_prot_space(rd_dev, dev->prot_length);
+}
+
+static void rd_free_prot(struct se_device *dev)
+{
+ struct rd_dev *rd_dev = RD_DEV(dev);
+
+ rd_release_prot_space(rd_dev);
+}
+
static struct sbc_ops rd_sbc_ops = {
.execute_rw = rd_execute_rw,
};
@@ -481,6 +641,8 @@ static struct se_subsystem_api rd_mcp_template = {
.show_configfs_dev_params = rd_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = rd_get_blocks,
+ .init_prot = rd_init_prot,
+ .free_prot = rd_free_prot,
};
int __init rd_module_init(void)
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 1789d1e14395..cc46a6a89b38 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -33,8 +33,12 @@ struct rd_dev {
u32 rd_page_count;
/* Number of SG tables in sg_table_array */
u32 sg_table_count;
+ /* Number of SG tables in sg_prot_array */
+ u32 sg_prot_count;
/* Array of rd_dev_sg_table_t containing scatterlists */
struct rd_dev_sg_table *sg_table_array;
+ /* Array of rd_dev_sg_table containing protection scatterlists */
+ struct rd_dev_sg_table *sg_prot_array;
/* Ramdisk HBA device is connected to */
struct rd_host *rd_host;
} ____cacheline_aligned;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 52ae54e60105..a4489444ffbc 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ratelimit.h>
+#include <linux/crc-t10dif.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
@@ -33,7 +34,7 @@
#include "target_core_internal.h"
#include "target_core_ua.h"
-
+#include "target_core_alua.h"
static sense_reason_t
sbc_emulate_readcapacity(struct se_cmd *cmd)
@@ -105,6 +106,11 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
buf[11] = dev->dev_attrib.block_size & 0xff;
+ /*
+ * Set P_TYPE and PROT_EN bits for DIF support
+ */
+ if (dev->dev_attrib.pi_prot_type)
+ buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
if (dev->transport->get_lbppbe)
buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
@@ -563,6 +569,44 @@ sbc_compare_and_write(struct se_cmd *cmd)
return TCM_NO_SENSE;
}
+static bool
+sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
+ u32 sectors)
+{
+ if (!cmd->t_prot_sg || !cmd->t_prot_nents)
+ return true;
+
+ switch (dev->dev_attrib.pi_prot_type) {
+ case TARGET_DIF_TYPE3_PROT:
+ if (!(cdb[1] & 0xe0))
+ return true;
+
+ cmd->reftag_seed = 0xffffffff;
+ break;
+ case TARGET_DIF_TYPE2_PROT:
+ if (cdb[1] & 0xe0)
+ return false;
+
+ cmd->reftag_seed = cmd->t_task_lba;
+ break;
+ case TARGET_DIF_TYPE1_PROT:
+ if (!(cdb[1] & 0xe0))
+ return true;
+
+ cmd->reftag_seed = cmd->t_task_lba;
+ break;
+ case TARGET_DIF_TYPE0_PROT:
+ default:
+ return true;
+ }
+
+ cmd->prot_type = dev->dev_attrib.pi_prot_type;
+ cmd->prot_length = dev->prot_length * sectors;
+ cmd->prot_handover = PROT_SEPERATED;
+
+ return true;
+}
+
sense_reason_t
sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
{
@@ -583,6 +627,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
case READ_10:
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
+
+ if (!sbc_check_prot(dev, cmd, cdb, sectors))
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw;
@@ -590,6 +638,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
case READ_12:
sectors = transport_get_sectors_12(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
+
+ if (!sbc_check_prot(dev, cmd, cdb, sectors))
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw;
@@ -597,6 +649,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
case READ_16:
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
+
+ if (!sbc_check_prot(dev, cmd, cdb, sectors))
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw;
@@ -612,6 +668,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
case WRITE_VERIFY:
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
+
+ if (!sbc_check_prot(dev, cmd, cdb, sectors))
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -621,6 +681,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
case WRITE_12:
sectors = transport_get_sectors_12(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
+
+ if (!sbc_check_prot(dev, cmd, cdb, sectors))
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -630,6 +694,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
case WRITE_16:
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
+
+ if (!sbc_check_prot(dev, cmd, cdb, sectors))
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -731,6 +799,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
case SAI_READ_CAPACITY_16:
cmd->execute_cmd = sbc_emulate_readcapacity_16;
break;
+ case SAI_REPORT_REFERRALS:
+ cmd->execute_cmd = target_emulate_report_referrals;
+ break;
default:
pr_err("Unsupported SA: 0x%02x\n",
cmd->t_task_cdb[1] & 0x1f);
@@ -959,3 +1030,190 @@ err:
return ret;
}
EXPORT_SYMBOL(sbc_execute_unmap);
+
+static sense_reason_t
+sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
+ const void *p, sector_t sector, unsigned int ei_lba)
+{
+ int block_size = dev->dev_attrib.block_size;
+ __be16 csum;
+
+ csum = cpu_to_be16(crc_t10dif(p, block_size));
+
+ if (sdt->guard_tag != csum) {
+ pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
+ " csum 0x%04x\n", (unsigned long long)sector,
+ be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
+ return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+ }
+
+ if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT &&
+ be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
+ pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
+ " sector MSB: 0x%08x\n", (unsigned long long)sector,
+ be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
+ return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+ }
+
+ if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT &&
+ be32_to_cpu(sdt->ref_tag) != ei_lba) {
+ pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
+ " ei_lba: 0x%08x\n", (unsigned long long)sector,
+ be32_to_cpu(sdt->ref_tag), ei_lba);
+ return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+ }
+
+ return 0;
+}
+
+static void
+sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
+ struct scatterlist *sg, int sg_off)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct scatterlist *psg;
+ void *paddr, *addr;
+ unsigned int i, len, left;
+ unsigned int offset = 0;
+
+ left = sectors * dev->prot_length;
+
+ for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
+
+ len = min(psg->length, left);
+ if (offset >= sg->length) {
+ sg = sg_next(sg);
+ offset = 0;
+ sg_off = sg->offset;
+ }
+
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+ addr = kmap_atomic(sg_page(sg)) + sg_off;
+
+ if (read)
+ memcpy(paddr, addr, len);
+ else
+ memcpy(addr, paddr, len);
+
+ left -= len;
+ offset += len;
+ kunmap_atomic(paddr);
+ kunmap_atomic(addr);
+ }
+}
+
+sense_reason_t
+sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+ unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_dif_v1_tuple *sdt;
+ struct scatterlist *dsg, *psg = cmd->t_prot_sg;
+ sector_t sector = start;
+ void *daddr, *paddr;
+ int i, j, offset = 0;
+ sense_reason_t rc;
+
+ for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+
+ for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+
+ if (offset >= psg->length) {
+ kunmap_atomic(paddr);
+ psg = sg_next(psg);
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+ offset = 0;
+ }
+
+ sdt = paddr + offset;
+
+ pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x"
+ " app_tag: 0x%04x ref_tag: %u\n",
+ (unsigned long long)sector, sdt->guard_tag,
+ sdt->app_tag, be32_to_cpu(sdt->ref_tag));
+
+ rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
+ ei_lba);
+ if (rc) {
+ kunmap_atomic(paddr);
+ kunmap_atomic(daddr);
+ cmd->bad_sector = sector;
+ return rc;
+ }
+
+ sector++;
+ ei_lba++;
+ offset += sizeof(struct se_dif_v1_tuple);
+ }
+
+ kunmap_atomic(paddr);
+ kunmap_atomic(daddr);
+ }
+ sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
+
+ return 0;
+}
+EXPORT_SYMBOL(sbc_dif_verify_write);
+
+sense_reason_t
+sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+ unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_dif_v1_tuple *sdt;
+ struct scatterlist *dsg;
+ sector_t sector = start;
+ void *daddr, *paddr;
+ int i, j, offset = sg_off;
+ sense_reason_t rc;
+
+ for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ paddr = kmap_atomic(sg_page(sg)) + sg->offset;
+
+ for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+
+ if (offset >= sg->length) {
+ kunmap_atomic(paddr);
+ sg = sg_next(sg);
+ paddr = kmap_atomic(sg_page(sg)) + sg->offset;
+ offset = 0;
+ }
+
+ sdt = paddr + offset;
+
+ pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
+ " app_tag: 0x%04x ref_tag: %u\n",
+ (unsigned long long)sector, sdt->guard_tag,
+ sdt->app_tag, be32_to_cpu(sdt->ref_tag));
+
+ if (sdt->app_tag == cpu_to_be16(0xffff)) {
+ sector++;
+ offset += sizeof(struct se_dif_v1_tuple);
+ continue;
+ }
+
+ rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
+ ei_lba);
+ if (rc) {
+ kunmap_atomic(paddr);
+ kunmap_atomic(daddr);
+ cmd->bad_sector = sector;
+ return rc;
+ }
+
+ sector++;
+ ei_lba++;
+ offset += sizeof(struct se_dif_v1_tuple);
+ }
+
+ kunmap_atomic(paddr);
+ kunmap_atomic(daddr);
+ }
+ sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
+
+ return 0;
+}
+EXPORT_SYMBOL(sbc_dif_verify_read);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 021c3f4a4f00..3bebc71ea033 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -100,6 +100,11 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
*/
if (dev->dev_attrib.emulate_3pc)
buf[5] |= 0x8;
+ /*
+ * Set Protection (PROTECT) bit when DIF has been enabled.
+ */
+ if (dev->dev_attrib.pi_prot_type)
+ buf[5] |= 0x1;
buf[7] = 0x2; /* CmdQue=1 */
@@ -267,7 +272,7 @@ check_t10_vend_desc:
port = lun->lun_sep;
if (port) {
struct t10_alua_lu_gp *lu_gp;
- u32 padding, scsi_name_len;
+ u32 padding, scsi_name_len, scsi_target_len;
u16 lu_gp_id = 0;
u16 tg_pt_gp_id = 0;
u16 tpgt;
@@ -365,16 +370,6 @@ check_lu_gp:
* section 7.5.1 Table 362
*/
check_scsi_name:
- scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg));
- /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
- scsi_name_len += 10;
- /* Check for 4-byte padding */
- padding = ((-scsi_name_len) & 3);
- if (padding != 0)
- scsi_name_len += padding;
- /* Header size + Designation descriptor */
- scsi_name_len += 4;
-
buf[off] =
(tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
buf[off++] |= 0x3; /* CODE SET == UTF-8 */
@@ -402,13 +397,57 @@ check_scsi_name:
* shall be no larger than 256 and shall be a multiple
* of four.
*/
+ padding = ((-scsi_name_len) & 3);
if (padding)
scsi_name_len += padding;
+ if (scsi_name_len > 256)
+ scsi_name_len = 256;
buf[off-1] = scsi_name_len;
off += scsi_name_len;
/* Header size + Designation descriptor */
len += (scsi_name_len + 4);
+
+ /*
+ * Target device designator
+ */
+ buf[off] =
+ (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+ buf[off++] |= 0x3; /* CODE SET == UTF-8 */
+ buf[off] = 0x80; /* Set PIV=1 */
+ /* Set ASSOCIATION == target device: 10b */
+ buf[off] |= 0x20;
+ /* DESIGNATOR TYPE == SCSI name string */
+ buf[off++] |= 0x8;
+ off += 2; /* Skip over Reserved and length */
+ /*
+ * SCSI name string identifer containing, $FABRIC_MOD
+ * dependent information. For LIO-Target and iSCSI
+ * Target Port, this means "<iSCSI name>" in
+ * UTF-8 encoding.
+ */
+ scsi_target_len = sprintf(&buf[off], "%s",
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg));
+ scsi_target_len += 1 /* Include NULL terminator */;
+ /*
+ * The null-terminated, null-padded (see 4.4.2) SCSI
+ * NAME STRING field contains a UTF-8 format string.
+ * The number of bytes in the SCSI NAME STRING field
+ * (i.e., the value in the DESIGNATOR LENGTH field)
+ * shall be no larger than 256 and shall be a multiple
+ * of four.
+ */
+ padding = ((-scsi_target_len) & 3);
+ if (padding)
+ scsi_target_len += padding;
+ if (scsi_target_len > 256)
+ scsi_target_len = 256;
+
+ buf[off-1] = scsi_target_len;
+ off += scsi_target_len;
+
+ /* Header size + Designation descriptor */
+ len += (scsi_target_len + 4);
}
buf[2] = ((len >> 8) & 0xff);
buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
@@ -436,12 +475,26 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
struct se_device *dev = cmd->se_dev;
buf[3] = 0x3c;
+ /*
+ * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
+ * only for TYPE3 protection.
+ */
+ if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
+ buf[4] = 0x5;
+ else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
+ buf[4] = 0x4;
+
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
/* If WriteCache emulation is enabled, set V_SUP */
if (spc_check_dev_wce(dev))
buf[6] = 0x01;
+ /* If an LBA map is present set R_SUP */
+ spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
+ if (!list_empty(&dev->t10_alua.lba_map_list))
+ buf[8] = 0x10;
+ spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
return 0;
}
@@ -600,6 +653,20 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
+/* Referrals VPD page */
+static sense_reason_t
+spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = cmd->se_dev;
+
+ buf[0] = dev->transport->get_device_type(dev);
+ buf[3] = 0x0c;
+ put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
+ put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]);
+
+ return 0;
+}
+
static sense_reason_t
spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
@@ -614,6 +681,7 @@ static struct {
{ .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
{ .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
{ .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
+ { .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
};
/* supported vital product data pages */
@@ -643,11 +711,15 @@ spc_emulate_inquiry(struct se_cmd *cmd)
struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
unsigned char *rbuf;
unsigned char *cdb = cmd->t_task_cdb;
- unsigned char buf[SE_INQUIRY_BUF];
+ unsigned char *buf;
sense_reason_t ret;
int p;
- memset(buf, 0, SE_INQUIRY_BUF);
+ buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
+ if (!buf) {
+ pr_err("Unable to allocate response buffer for INQUIRY\n");
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
if (dev == tpg->tpg_virt_lun0.lun_se_dev)
buf[0] = 0x3f; /* Not connected */
@@ -680,9 +752,10 @@ spc_emulate_inquiry(struct se_cmd *cmd)
out:
rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
- memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+ memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length));
transport_kunmap_data_sg(cmd);
}
+ kfree(buf);
if (!ret)
target_complete_cmd(cmd, GOOD);
@@ -785,6 +858,19 @@ static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
* status (see SAM-4).
*/
p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
+ /*
+ * From spc4r30, section 7.5.7 Control mode page
+ *
+ * Application Tag Owner (ATO) bit set to one.
+ *
+ * If the ATO bit is set to one the device server shall not modify the
+ * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection
+ * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
+ * TAG field.
+ */
+ if (dev->dev_attrib.pi_prot_type)
+ p[5] |= 0x80;
+
p[8] = 0xff;
p[9] = 0xff;
p[11] = 30;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index f697f8baec54..c036595b17cf 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -278,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
- spin_lock_init(&acl->stats_lock);
acl->dynamic_node_acl = 1;
tpg->se_tpg_tfo->set_default_node_attributes(acl);
@@ -406,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
- spin_lock_init(&acl->stats_lock);
tpg->se_tpg_tfo->set_default_node_attributes(acl);
@@ -658,16 +656,10 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
spin_lock_init(&lun->lun_sep_lock);
init_completion(&lun->lun_ref_comp);
- ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
+ ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev);
if (ret < 0)
return ret;
- ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
- if (ret < 0) {
- percpu_ref_cancel_init(&lun->lun_ref);
- return ret;
- }
-
return 0;
}
@@ -789,7 +781,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
}
EXPORT_SYMBOL(core_tpg_deregister);
-struct se_lun *core_tpg_pre_addlun(
+struct se_lun *core_tpg_alloc_lun(
struct se_portal_group *tpg,
u32 unpacked_lun)
{
@@ -819,11 +811,11 @@ struct se_lun *core_tpg_pre_addlun(
return lun;
}
-int core_tpg_post_addlun(
+int core_tpg_add_lun(
struct se_portal_group *tpg,
struct se_lun *lun,
u32 lun_access,
- void *lun_ptr)
+ struct se_device *dev)
{
int ret;
@@ -831,7 +823,7 @@ int core_tpg_post_addlun(
if (ret < 0)
return ret;
- ret = core_dev_export(lun_ptr, tpg, lun);
+ ret = core_dev_export(dev, tpg, lun);
if (ret < 0) {
percpu_ref_cancel_init(&lun->lun_ref);
return ret;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 91953da0f623..24b4f65d8777 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -62,6 +62,8 @@ struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+struct kmem_cache *t10_alua_lba_map_cache;
+struct kmem_cache *t10_alua_lba_map_mem_cache;
static void transport_complete_task_attr(struct se_cmd *cmd);
static void transport_handle_queue_full(struct se_cmd *cmd,
@@ -128,14 +130,36 @@ int init_se_kmem_caches(void)
"mem_t failed\n");
goto out_free_tg_pt_gp_cache;
}
+ t10_alua_lba_map_cache = kmem_cache_create(
+ "t10_alua_lba_map_cache",
+ sizeof(struct t10_alua_lba_map),
+ __alignof__(struct t10_alua_lba_map), 0, NULL);
+ if (!t10_alua_lba_map_cache) {
+ pr_err("kmem_cache_create() for t10_alua_lba_map_"
+ "cache failed\n");
+ goto out_free_tg_pt_gp_mem_cache;
+ }
+ t10_alua_lba_map_mem_cache = kmem_cache_create(
+ "t10_alua_lba_map_mem_cache",
+ sizeof(struct t10_alua_lba_map_member),
+ __alignof__(struct t10_alua_lba_map_member), 0, NULL);
+ if (!t10_alua_lba_map_mem_cache) {
+ pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
+ "cache failed\n");
+ goto out_free_lba_map_cache;
+ }
target_completion_wq = alloc_workqueue("target_completion",
WQ_MEM_RECLAIM, 0);
if (!target_completion_wq)
- goto out_free_tg_pt_gp_mem_cache;
+ goto out_free_lba_map_mem_cache;
return 0;
+out_free_lba_map_mem_cache:
+ kmem_cache_destroy(t10_alua_lba_map_mem_cache);
+out_free_lba_map_cache:
+ kmem_cache_destroy(t10_alua_lba_map_cache);
out_free_tg_pt_gp_mem_cache:
kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
out_free_tg_pt_gp_cache:
@@ -164,6 +188,8 @@ void release_se_kmem_caches(void)
kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+ kmem_cache_destroy(t10_alua_lba_map_cache);
+ kmem_cache_destroy(t10_alua_lba_map_mem_cache);
}
/* This code ensures unique mib indexes are handed out. */
@@ -568,10 +594,11 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
struct se_lun *lun = cmd->se_lun;
- if (!lun || !cmd->lun_ref_active)
+ if (!lun)
return;
- percpu_ref_put(&lun->lun_ref);
+ if (cmpxchg(&cmd->lun_ref_active, true, false))
+ percpu_ref_put(&lun->lun_ref);
}
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
@@ -642,9 +669,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
return;
}
- if (!success)
- cmd->transport_state |= CMD_T_FAILED;
-
/*
* Check for case where an explicit ABORT_TASK has been received
* and transport_wait_for_tasks() will be waiting for completion..
@@ -654,7 +678,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&cmd->t_transport_stop_comp);
return;
- } else if (cmd->transport_state & CMD_T_FAILED) {
+ } else if (!success) {
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
INIT_WORK(&cmd->work, target_complete_ok_work);
@@ -1284,6 +1308,8 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
* @sgl_count: scatterlist count for unidirectional mapping
* @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
* @sgl_bidi_count: scatterlist count for bidirectional READ mapping
+ * @sgl_prot: struct scatterlist memory protection information
+ * @sgl_prot_count: scatterlist count for protection information
*
* Returns non zero to signal active I/O shutdown failure. All other
* setup exceptions will be returned as a SCSI CHECK_CONDITION response,
@@ -1296,7 +1322,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
u32 data_length, int task_attr, int data_dir, int flags,
struct scatterlist *sgl, u32 sgl_count,
- struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
+ struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
+ struct scatterlist *sgl_prot, u32 sgl_prot_count)
{
struct se_portal_group *se_tpg;
sense_reason_t rc;
@@ -1338,6 +1365,14 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
target_put_sess_cmd(se_sess, se_cmd);
return 0;
}
+ /*
+ * Save pointers for SGLs containing protection information,
+ * if present.
+ */
+ if (sgl_prot_count) {
+ se_cmd->t_prot_sg = sgl_prot;
+ se_cmd->t_prot_nents = sgl_prot_count;
+ }
rc = target_setup_cmd_from_cdb(se_cmd, cdb);
if (rc != 0) {
@@ -1380,6 +1415,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
return 0;
}
}
+
/*
* Check if we need to delay processing because of ALUA
* Active/NonOptimized primary access state..
@@ -1419,7 +1455,7 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
{
return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
unpacked_lun, data_length, task_attr, data_dir,
- flags, NULL, 0, NULL, 0);
+ flags, NULL, 0, NULL, 0, NULL, 0);
}
EXPORT_SYMBOL(target_submit_cmd);
@@ -2455,6 +2491,19 @@ static int transport_get_sense_codes(
return 0;
}
+static
+void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector)
+{
+ /* Place failed LBA in sense data information descriptor 0. */
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc;
+ buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */
+ buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa;
+ buffer[SPC_VALIDITY_OFFSET] = 0x80;
+
+ /* Descriptor Information: failing sector */
+ put_unaligned_be64(bad_sector, &buffer[12]);
+}
+
int
transport_send_check_condition_and_sense(struct se_cmd *cmd,
sense_reason_t reason, int from_transport)
@@ -2648,6 +2697,39 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
buffer[SPC_ASC_KEY_OFFSET] = 0x1d;
buffer[SPC_ASCQ_KEY_OFFSET] = 0x00;
break;
+ case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL BLOCK GUARD CHECK FAILED */
+ buffer[SPC_ASC_KEY_OFFSET] = 0x10;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x01;
+ transport_err_sector_info(buffer, cmd->bad_sector);
+ break;
+ case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
+ buffer[SPC_ASC_KEY_OFFSET] = 0x10;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x02;
+ transport_err_sector_info(buffer, cmd->bad_sector);
+ break;
+ case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
+ buffer[SPC_ASC_KEY_OFFSET] = 0x10;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
+ transport_err_sector_info(buffer, cmd->bad_sector);
+ break;
case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
default:
/* CURRENT ERROR */
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index b04467e7547c..505519b10cb7 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -98,7 +98,6 @@ int core_scsi3_ua_allocate(
pr_err("Unable to allocate struct se_ua\n");
return -ENOMEM;
}
- INIT_LIST_HEAD(&ua->ua_dev_list);
INIT_LIST_HEAD(&ua->ua_nacl_list);
ua->ua_nacl = nacl;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 6b88a9958f61..669c536fd959 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -40,10 +40,6 @@
static struct workqueue_struct *xcopy_wq = NULL;
/*
- * From target_core_spc.c
- */
-extern void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
-/*
* From target_core_device.c
*/
extern struct mutex g_device_mutex;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 479ec5621a4e..8b2c1aaf81de 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -438,7 +438,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
struct se_session *se_sess = sess->se_sess;
int tag;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
if (tag < 0)
goto busy;
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index c6932fb53a8d..e879da81ad93 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -267,7 +267,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
return found;
}
-struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
+static struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
{
struct ft_node_acl *acl;
@@ -552,7 +552,7 @@ static struct target_core_fabric_ops ft_fabric_ops = {
.fabric_drop_nodeacl = &ft_del_acl,
};
-int ft_register_configfs(void)
+static int ft_register_configfs(void)
{
struct target_fabric_configfs *fabric;
int ret;
@@ -599,7 +599,7 @@ int ft_register_configfs(void)
return 0;
}
-void ft_deregister_configfs(void)
+static void ft_deregister_configfs(void)
{
if (!ft_configfs)
return;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index f35a1f75b15b..35c066489a19 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -29,6 +29,19 @@ config THERMAL_HWMON
Say 'Y' here if you want all thermal sensors to
have hwmon sysfs interface too.
+config THERMAL_OF
+ bool
+ prompt "APIs to parse thermal data out of device tree"
+ depends on OF
+ default y
+ help
+ This options provides helpers to add the support to
+ read and parse thermal data definitions out of the
+ device tree blob.
+
+ Say 'Y' here if you need to build thermal infrastructure
+ based on device tree.
+
choice
prompt "Default Thermal governor"
default THERMAL_DEFAULT_GOV_STEP_WISE
@@ -79,6 +92,7 @@ config THERMAL_GOV_USER_SPACE
config CPU_THERMAL
bool "generic cpu cooling support"
depends on CPU_FREQ
+ depends on THERMAL_OF
help
This implements the generic cpu cooling mechanism through frequency
reduction. An ACPI version of this already exists
@@ -121,7 +135,7 @@ config SPEAR_THERMAL
config RCAR_THERMAL
tristate "Renesas R-Car thermal driver"
- depends on ARCH_SHMOBILE
+ depends on ARCH_SHMOBILE || COMPILE_TEST
help
Enable this to plug the R-Car thermal sensor driver into the Linux
thermal framework.
@@ -192,6 +206,13 @@ config X86_PKG_TEMP_THERMAL
two trip points which can be set by user to get notifications via thermal
notification methods.
+config ACPI_INT3403_THERMAL
+ tristate "ACPI INT3403 thermal driver"
+ depends on X86 && ACPI
+ help
+ This driver uses ACPI INT3403 device objects. If present, it will
+ register each INT3403 thermal sensor as a thermal zone.
+
menu "Texas Instruments thermal drivers"
source "drivers/thermal/ti-soc-thermal/Kconfig"
endmenu
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 584b36319d51..54e4ec9eb5df 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -7,6 +7,7 @@ thermal_sys-y += thermal_core.o
# interface to/from other layers providing sensors
thermal_sys-$(CONFIG_THERMAL_HWMON) += thermal_hwmon.o
+thermal_sys-$(CONFIG_THERMAL_OF) += of-thermal.o
# governors
thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o
@@ -29,3 +30,4 @@ obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o
obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
+obj-$(CONFIG_ACPI_INT3403_THERMAL) += int3403_thermal.o
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 02a46f23d14c..4246262c4bd2 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -174,6 +174,13 @@ static int get_property(unsigned int cpu, unsigned long input,
max_level++;
}
+ /* No valid cpu frequency entry */
+ if (max_level == 0)
+ return -EINVAL;
+
+ /* max_level is an index, not a counter */
+ max_level--;
+
/* get max level */
if (property == GET_MAXL) {
*output = (unsigned int)max_level;
@@ -181,7 +188,7 @@ static int get_property(unsigned int cpu, unsigned long input,
}
if (property == GET_FREQ)
- level = descend ? input : (max_level - input - 1);
+ level = descend ? input : (max_level - input);
for (i = 0, j = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
/* ignore invalid entry */
@@ -197,7 +204,7 @@ static int get_property(unsigned int cpu, unsigned long input,
if (property == GET_LEVEL && (unsigned int)input == freq) {
/* get level by frequency */
- *output = descend ? j : (max_level - j - 1);
+ *output = descend ? j : (max_level - j);
return 0;
}
if (property == GET_FREQ && level == j) {
@@ -417,18 +424,21 @@ static struct notifier_block thermal_cpufreq_notifier_block = {
};
/**
- * cpufreq_cooling_register - function to create cpufreq cooling device.
+ * __cpufreq_cooling_register - helper function to create cpufreq cooling device
+ * @np: a valid struct device_node to the cooling device device tree node
* @clip_cpus: cpumask of cpus where the frequency constraints will happen.
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
- * cooling devices.
+ * cooling devices. It also gives the opportunity to link the cooling device
+ * with a device tree node, in order to bind it via the thermal DT code.
*
* Return: a valid struct thermal_cooling_device pointer on success,
* on failure, it returns a corresponding ERR_PTR().
*/
-struct thermal_cooling_device *
-cpufreq_cooling_register(const struct cpumask *clip_cpus)
+static struct thermal_cooling_device *
+__cpufreq_cooling_register(struct device_node *np,
+ const struct cpumask *clip_cpus)
{
struct thermal_cooling_device *cool_dev;
struct cpufreq_cooling_device *cpufreq_dev = NULL;
@@ -467,8 +477,8 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus)
snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
cpufreq_dev->id);
- cool_dev = thermal_cooling_device_register(dev_name, cpufreq_dev,
- &cpufreq_cooling_ops);
+ cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
+ &cpufreq_cooling_ops);
if (IS_ERR(cool_dev)) {
release_idr(&cpufreq_idr, cpufreq_dev->id);
kfree(cpufreq_dev);
@@ -488,9 +498,50 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus)
return cool_dev;
}
+
+/**
+ * cpufreq_cooling_register - function to create cpufreq cooling device.
+ * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
+ *
+ * This interface function registers the cpufreq cooling device with the name
+ * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
+ * cooling devices.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
+ */
+struct thermal_cooling_device *
+cpufreq_cooling_register(const struct cpumask *clip_cpus)
+{
+ return __cpufreq_cooling_register(NULL, clip_cpus);
+}
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
/**
+ * of_cpufreq_cooling_register - function to create cpufreq cooling device.
+ * @np: a valid struct device_node to the cooling device device tree node
+ * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
+ *
+ * This interface function registers the cpufreq cooling device with the name
+ * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
+ * cooling devices. Using this API, the cpufreq cooling device will be
+ * linked to the device tree node provided.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
+ */
+struct thermal_cooling_device *
+of_cpufreq_cooling_register(struct device_node *np,
+ const struct cpumask *clip_cpus)
+{
+ if (!np)
+ return ERR_PTR(-EINVAL);
+
+ return __cpufreq_cooling_register(np, clip_cpus);
+}
+EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
+
+/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
* @cdev: thermal cooling device pointer.
*
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 1d6c801c1eb9..45af765a3198 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -7,6 +7,7 @@
*
*/
+#include <linux/clk.h>
#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
@@ -73,6 +74,7 @@ struct imx_thermal_data {
unsigned long last_temp;
bool irq_enabled;
int irq;
+ struct clk *thermal_clk;
};
static void imx_set_alarm_temp(struct imx_thermal_data *data,
@@ -284,7 +286,7 @@ static int imx_unbind(struct thermal_zone_device *tz,
return 0;
}
-static const struct thermal_zone_device_ops imx_tz_ops = {
+static struct thermal_zone_device_ops imx_tz_ops = {
.bind = imx_bind,
.unbind = imx_unbind,
.get_temp = imx_get_temp,
@@ -457,6 +459,22 @@ static int imx_thermal_probe(struct platform_device *pdev)
return ret;
}
+ data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(data->thermal_clk)) {
+ dev_warn(&pdev->dev, "failed to get thermal clk!\n");
+ } else {
+ /*
+ * Thermal sensor needs clk on to get correct value, normally
+ * we should enable its clk before taking measurement and disable
+ * clk after measurement is done, but if alarm function is enabled,
+ * hardware will auto measure the temperature periodically, so we
+ * need to keep the clk always on for alarm function.
+ */
+ ret = clk_prepare_enable(data->thermal_clk);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
+ }
+
/* Enable measurements at ~ 10 Hz */
regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
@@ -478,6 +496,8 @@ static int imx_thermal_remove(struct platform_device *pdev)
/* Disable measurements */
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
+ if (!IS_ERR(data->thermal_clk))
+ clk_disable_unprepare(data->thermal_clk);
thermal_zone_device_unregister(data->tz);
cpufreq_cooling_unregister(data->cdev);
@@ -490,27 +510,30 @@ static int imx_thermal_suspend(struct device *dev)
{
struct imx_thermal_data *data = dev_get_drvdata(dev);
struct regmap *map = data->tempmon;
- u32 val;
- regmap_read(map, TEMPSENSE0, &val);
- if ((val & TEMPSENSE0_POWER_DOWN) == 0) {
- /*
- * If a measurement is taking place, wait for a long enough
- * time for it to finish, and then check again. If it still
- * does not finish, something must go wrong.
- */
- udelay(50);
- regmap_read(map, TEMPSENSE0, &val);
- if ((val & TEMPSENSE0_POWER_DOWN) == 0)
- return -ETIMEDOUT;
- }
+ /*
+ * Need to disable thermal sensor, otherwise, when thermal core
+ * try to get temperature before thermal sensor resume, a wrong
+ * temperature will be read as the thermal sensor is powered
+ * down.
+ */
+ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP);
+ regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
+ data->mode = THERMAL_DEVICE_DISABLED;
return 0;
}
static int imx_thermal_resume(struct device *dev)
{
- /* Nothing to do for now */
+ struct imx_thermal_data *data = dev_get_drvdata(dev);
+ struct regmap *map = data->tempmon;
+
+ /* Enabled thermal sensor after resume */
+ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
+ regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+ data->mode = THERMAL_DEVICE_ENABLED;
+
return 0;
}
#endif
@@ -522,6 +545,7 @@ static const struct of_device_id of_imx_thermal_match[] = {
{ .compatible = "fsl,imx6q-tempmon", },
{ /* end */ }
};
+MODULE_DEVICE_TABLE(of, of_imx_thermal_match);
static struct platform_driver imx_thermal = {
.driver = {
diff --git a/drivers/thermal/int3403_thermal.c b/drivers/thermal/int3403_thermal.c
new file mode 100644
index 000000000000..1301681d9a77
--- /dev/null
+++ b/drivers/thermal/int3403_thermal.c
@@ -0,0 +1,237 @@
+/*
+ * ACPI INT3403 thermal driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <linux/thermal.h>
+
+#define INT3403_TYPE_SENSOR 0x03
+#define INT3403_PERF_CHANGED_EVENT 0x80
+#define INT3403_THERMAL_EVENT 0x90
+
+#define DECI_KELVIN_TO_MILLI_CELSIUS(t, off) (((t) - (off)) * 100)
+#define KELVIN_OFFSET 2732
+#define MILLI_CELSIUS_TO_DECI_KELVIN(t, off) (((t) / 100) + (off))
+
+#define ACPI_INT3403_CLASS "int3403"
+#define ACPI_INT3403_FILE_STATE "state"
+
+struct int3403_sensor {
+ struct thermal_zone_device *tzone;
+ unsigned long *thresholds;
+};
+
+static int sys_get_curr_temp(struct thermal_zone_device *tzone,
+ unsigned long *temp)
+{
+ struct acpi_device *device = tzone->devdata;
+ unsigned long long tmp;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(device->handle, "_TMP", NULL, &tmp);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ *temp = DECI_KELVIN_TO_MILLI_CELSIUS(tmp, KELVIN_OFFSET);
+
+ return 0;
+}
+
+static int sys_get_trip_hyst(struct thermal_zone_device *tzone,
+ int trip, unsigned long *temp)
+{
+ struct acpi_device *device = tzone->devdata;
+ unsigned long long hyst;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(device->handle, "GTSH", NULL, &hyst);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ *temp = DECI_KELVIN_TO_MILLI_CELSIUS(hyst, KELVIN_OFFSET);
+
+ return 0;
+}
+
+static int sys_get_trip_temp(struct thermal_zone_device *tzone,
+ int trip, unsigned long *temp)
+{
+ struct acpi_device *device = tzone->devdata;
+ struct int3403_sensor *obj = acpi_driver_data(device);
+
+ /*
+ * get_trip_temp is a mandatory callback but
+ * PATx method doesn't return any value, so return
+ * cached value, which was last set from user space.
+ */
+ *temp = obj->thresholds[trip];
+
+ return 0;
+}
+
+static int sys_get_trip_type(struct thermal_zone_device *thermal,
+ int trip, enum thermal_trip_type *type)
+{
+ /* Mandatory callback, may not mean much here */
+ *type = THERMAL_TRIP_PASSIVE;
+
+ return 0;
+}
+
+int sys_set_trip_temp(struct thermal_zone_device *tzone, int trip,
+ unsigned long temp)
+{
+ struct acpi_device *device = tzone->devdata;
+ acpi_status status;
+ char name[10];
+ int ret = 0;
+ struct int3403_sensor *obj = acpi_driver_data(device);
+
+ snprintf(name, sizeof(name), "PAT%d", trip);
+ if (acpi_has_method(device->handle, name)) {
+ status = acpi_execute_simple_method(device->handle, name,
+ MILLI_CELSIUS_TO_DECI_KELVIN(temp,
+ KELVIN_OFFSET));
+ if (ACPI_FAILURE(status))
+ ret = -EIO;
+ else
+ obj->thresholds[trip] = temp;
+ } else {
+ ret = -EIO;
+ dev_err(&device->dev, "sys_set_trip_temp: method not found\n");
+ }
+
+ return ret;
+}
+
+static struct thermal_zone_device_ops tzone_ops = {
+ .get_temp = sys_get_curr_temp,
+ .get_trip_temp = sys_get_trip_temp,
+ .get_trip_type = sys_get_trip_type,
+ .set_trip_temp = sys_set_trip_temp,
+ .get_trip_hyst = sys_get_trip_hyst,
+};
+
+static void acpi_thermal_notify(struct acpi_device *device, u32 event)
+{
+ struct int3403_sensor *obj;
+
+ if (!device)
+ return;
+
+ obj = acpi_driver_data(device);
+ if (!obj)
+ return;
+
+ switch (event) {
+ case INT3403_PERF_CHANGED_EVENT:
+ break;
+ case INT3403_THERMAL_EVENT:
+ thermal_zone_device_update(obj->tzone);
+ break;
+ default:
+ dev_err(&device->dev, "Unsupported event [0x%x]\n", event);
+ break;
+ }
+}
+
+static int acpi_int3403_add(struct acpi_device *device)
+{
+ int result = 0;
+ unsigned long long ptyp;
+ acpi_status status;
+ struct int3403_sensor *obj;
+ unsigned long long trip_cnt;
+ int trip_mask = 0;
+
+ if (!device)
+ return -EINVAL;
+
+ status = acpi_evaluate_integer(device->handle, "PTYP", NULL, &ptyp);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ if (ptyp != INT3403_TYPE_SENSOR)
+ return -EINVAL;
+
+ obj = devm_kzalloc(&device->dev, sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ device->driver_data = obj;
+
+ status = acpi_evaluate_integer(device->handle, "PATC", NULL,
+ &trip_cnt);
+ if (ACPI_FAILURE(status))
+ trip_cnt = 0;
+
+ if (trip_cnt) {
+ /* We have to cache, thresholds can't be readback */
+ obj->thresholds = devm_kzalloc(&device->dev,
+ sizeof(*obj->thresholds) * trip_cnt,
+ GFP_KERNEL);
+ if (!obj->thresholds)
+ return -ENOMEM;
+ trip_mask = BIT(trip_cnt) - 1;
+ }
+ obj->tzone = thermal_zone_device_register(acpi_device_bid(device),
+ trip_cnt, trip_mask, device, &tzone_ops,
+ NULL, 0, 0);
+ if (IS_ERR(obj->tzone)) {
+ result = PTR_ERR(obj->tzone);
+ return result;
+ }
+
+ strcpy(acpi_device_name(device), "INT3403");
+ strcpy(acpi_device_class(device), ACPI_INT3403_CLASS);
+
+ return 0;
+}
+
+static int acpi_int3403_remove(struct acpi_device *device)
+{
+ struct int3403_sensor *obj;
+
+ obj = acpi_driver_data(device);
+ thermal_zone_device_unregister(obj->tzone);
+
+ return 0;
+}
+
+ACPI_MODULE_NAME("int3403");
+static const struct acpi_device_id int3403_device_ids[] = {
+ {"INT3403", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, int3403_device_ids);
+
+static struct acpi_driver acpi_int3403_driver = {
+ .name = "INT3403",
+ .class = ACPI_INT3403_CLASS,
+ .ids = int3403_device_ids,
+ .ops = {
+ .add = acpi_int3403_add,
+ .remove = acpi_int3403_remove,
+ .notify = acpi_thermal_notify,
+ },
+};
+
+module_acpi_driver(acpi_int3403_driver);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ACPI INT3403 thermal driver");
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 8f181b3f842b..a084325f1386 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -206,6 +206,15 @@ static void find_target_mwait(void)
}
+static bool has_pkg_state_counter(void)
+{
+ u64 tmp;
+ return !rdmsrl_safe(MSR_PKG_C2_RESIDENCY, &tmp) ||
+ !rdmsrl_safe(MSR_PKG_C3_RESIDENCY, &tmp) ||
+ !rdmsrl_safe(MSR_PKG_C6_RESIDENCY, &tmp) ||
+ !rdmsrl_safe(MSR_PKG_C7_RESIDENCY, &tmp);
+}
+
static u64 pkg_state_counter(void)
{
u64 val;
@@ -438,14 +447,12 @@ static int clamp_thread(void *arg)
*/
local_touch_nmi();
stop_critical_timings();
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- cpu_relax(); /* allow HT sibling to run */
- __mwait(eax, ecx);
+ mwait_idle_with_hints(eax, ecx);
start_critical_timings();
atomic_inc(&idle_wakeup_counter);
}
tick_nohz_idle_exit();
- preempt_enable_no_resched();
+ preempt_enable();
}
del_timer_sync(&wakeup_timer);
clear_bit(cpunr, cpu_clamping_mask);
@@ -500,7 +507,7 @@ static int start_power_clamp(void)
struct task_struct *thread;
/* check if pkg cstate counter is completely 0, abort in this case */
- if (!pkg_state_counter()) {
+ if (!has_pkg_state_counter()) {
pr_err("pkg cstate counter not functional, abort\n");
return -EINVAL;
}
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
new file mode 100644
index 000000000000..04b1be7fa018
--- /dev/null
+++ b/drivers/thermal/of-thermal.c
@@ -0,0 +1,849 @@
+/*
+ * of-thermal.c - Generic Thermal Management device tree support.
+ *
+ * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Eduardo Valentin <eduardo.valentin@ti.com>
+ *
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <linux/thermal.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/string.h>
+
+#include "thermal_core.h"
+
+/*** Private data structures to represent thermal device tree data ***/
+
+/**
+ * struct __thermal_trip - representation of a point in temperature domain
+ * @np: pointer to struct device_node that this trip point was created from
+ * @temperature: temperature value in miliCelsius
+ * @hysteresis: relative hysteresis in miliCelsius
+ * @type: trip point type
+ */
+
+struct __thermal_trip {
+ struct device_node *np;
+ unsigned long int temperature;
+ unsigned long int hysteresis;
+ enum thermal_trip_type type;
+};
+
+/**
+ * struct __thermal_bind_param - a match between trip and cooling device
+ * @cooling_device: a pointer to identify the referred cooling device
+ * @trip_id: the trip point index
+ * @usage: the percentage (from 0 to 100) of cooling contribution
+ * @min: minimum cooling state used at this trip point
+ * @max: maximum cooling state used at this trip point
+ */
+
+struct __thermal_bind_params {
+ struct device_node *cooling_device;
+ unsigned int trip_id;
+ unsigned int usage;
+ unsigned long min;
+ unsigned long max;
+};
+
+/**
+ * struct __thermal_zone - internal representation of a thermal zone
+ * @mode: current thermal zone device mode (enabled/disabled)
+ * @passive_delay: polling interval while passive cooling is activated
+ * @polling_delay: zone polling interval
+ * @ntrips: number of trip points
+ * @trips: an array of trip points (0..ntrips - 1)
+ * @num_tbps: number of thermal bind params
+ * @tbps: an array of thermal bind params (0..num_tbps - 1)
+ * @sensor_data: sensor private data used while reading temperature and trend
+ * @get_temp: sensor callback to read temperature
+ * @get_trend: sensor callback to read temperature trend
+ */
+
+struct __thermal_zone {
+ enum thermal_device_mode mode;
+ int passive_delay;
+ int polling_delay;
+
+ /* trip data */
+ int ntrips;
+ struct __thermal_trip *trips;
+
+ /* cooling binding data */
+ int num_tbps;
+ struct __thermal_bind_params *tbps;
+
+ /* sensor interface */
+ void *sensor_data;
+ int (*get_temp)(void *, long *);
+ int (*get_trend)(void *, long *);
+};
+
+/*** DT thermal zone device callbacks ***/
+
+static int of_thermal_get_temp(struct thermal_zone_device *tz,
+ unsigned long *temp)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (!data->get_temp)
+ return -EINVAL;
+
+ return data->get_temp(data->sensor_data, temp);
+}
+
+static int of_thermal_get_trend(struct thermal_zone_device *tz, int trip,
+ enum thermal_trend *trend)
+{
+ struct __thermal_zone *data = tz->devdata;
+ long dev_trend;
+ int r;
+
+ if (!data->get_trend)
+ return -EINVAL;
+
+ r = data->get_trend(data->sensor_data, &dev_trend);
+ if (r)
+ return r;
+
+ /* TODO: These intervals might have some thresholds, but in core code */
+ if (dev_trend > 0)
+ *trend = THERMAL_TREND_RAISING;
+ else if (dev_trend < 0)
+ *trend = THERMAL_TREND_DROPPING;
+ else
+ *trend = THERMAL_TREND_STABLE;
+
+ return 0;
+}
+
+static int of_thermal_bind(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ struct __thermal_zone *data = thermal->devdata;
+ int i;
+
+ if (!data || IS_ERR(data))
+ return -ENODEV;
+
+ /* find where to bind */
+ for (i = 0; i < data->num_tbps; i++) {
+ struct __thermal_bind_params *tbp = data->tbps + i;
+
+ if (tbp->cooling_device == cdev->np) {
+ int ret;
+
+ ret = thermal_zone_bind_cooling_device(thermal,
+ tbp->trip_id, cdev,
+ tbp->min,
+ tbp->max);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int of_thermal_unbind(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ struct __thermal_zone *data = thermal->devdata;
+ int i;
+
+ if (!data || IS_ERR(data))
+ return -ENODEV;
+
+ /* find where to unbind */
+ for (i = 0; i < data->num_tbps; i++) {
+ struct __thermal_bind_params *tbp = data->tbps + i;
+
+ if (tbp->cooling_device == cdev->np) {
+ int ret;
+
+ ret = thermal_zone_unbind_cooling_device(thermal,
+ tbp->trip_id, cdev);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int of_thermal_get_mode(struct thermal_zone_device *tz,
+ enum thermal_device_mode *mode)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ *mode = data->mode;
+
+ return 0;
+}
+
+static int of_thermal_set_mode(struct thermal_zone_device *tz,
+ enum thermal_device_mode mode)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ mutex_lock(&tz->lock);
+
+ if (mode == THERMAL_DEVICE_ENABLED)
+ tz->polling_delay = data->polling_delay;
+ else
+ tz->polling_delay = 0;
+
+ mutex_unlock(&tz->lock);
+
+ data->mode = mode;
+ thermal_zone_device_update(tz);
+
+ return 0;
+}
+
+static int of_thermal_get_trip_type(struct thermal_zone_device *tz, int trip,
+ enum thermal_trip_type *type)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (trip >= data->ntrips || trip < 0)
+ return -EDOM;
+
+ *type = data->trips[trip].type;
+
+ return 0;
+}
+
+static int of_thermal_get_trip_temp(struct thermal_zone_device *tz, int trip,
+ unsigned long *temp)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (trip >= data->ntrips || trip < 0)
+ return -EDOM;
+
+ *temp = data->trips[trip].temperature;
+
+ return 0;
+}
+
+static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip,
+ unsigned long temp)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (trip >= data->ntrips || trip < 0)
+ return -EDOM;
+
+ /* thermal framework should take care of data->mask & (1 << trip) */
+ data->trips[trip].temperature = temp;
+
+ return 0;
+}
+
+static int of_thermal_get_trip_hyst(struct thermal_zone_device *tz, int trip,
+ unsigned long *hyst)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (trip >= data->ntrips || trip < 0)
+ return -EDOM;
+
+ *hyst = data->trips[trip].hysteresis;
+
+ return 0;
+}
+
+static int of_thermal_set_trip_hyst(struct thermal_zone_device *tz, int trip,
+ unsigned long hyst)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (trip >= data->ntrips || trip < 0)
+ return -EDOM;
+
+ /* thermal framework should take care of data->mask & (1 << trip) */
+ data->trips[trip].hysteresis = hyst;
+
+ return 0;
+}
+
+static int of_thermal_get_crit_temp(struct thermal_zone_device *tz,
+ unsigned long *temp)
+{
+ struct __thermal_zone *data = tz->devdata;
+ int i;
+
+ for (i = 0; i < data->ntrips; i++)
+ if (data->trips[i].type == THERMAL_TRIP_CRITICAL) {
+ *temp = data->trips[i].temperature;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct thermal_zone_device_ops of_thermal_ops = {
+ .get_mode = of_thermal_get_mode,
+ .set_mode = of_thermal_set_mode,
+
+ .get_trip_type = of_thermal_get_trip_type,
+ .get_trip_temp = of_thermal_get_trip_temp,
+ .set_trip_temp = of_thermal_set_trip_temp,
+ .get_trip_hyst = of_thermal_get_trip_hyst,
+ .set_trip_hyst = of_thermal_set_trip_hyst,
+ .get_crit_temp = of_thermal_get_crit_temp,
+
+ .bind = of_thermal_bind,
+ .unbind = of_thermal_unbind,
+};
+
+/*** sensor API ***/
+
+static struct thermal_zone_device *
+thermal_zone_of_add_sensor(struct device_node *zone,
+ struct device_node *sensor, void *data,
+ int (*get_temp)(void *, long *),
+ int (*get_trend)(void *, long *))
+{
+ struct thermal_zone_device *tzd;
+ struct __thermal_zone *tz;
+
+ tzd = thermal_zone_get_zone_by_name(zone->name);
+ if (IS_ERR(tzd))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ tz = tzd->devdata;
+
+ mutex_lock(&tzd->lock);
+ tz->get_temp = get_temp;
+ tz->get_trend = get_trend;
+ tz->sensor_data = data;
+
+ tzd->ops->get_temp = of_thermal_get_temp;
+ tzd->ops->get_trend = of_thermal_get_trend;
+ mutex_unlock(&tzd->lock);
+
+ return tzd;
+}
+
+/**
+ * thermal_zone_of_sensor_register - registers a sensor to a DT thermal zone
+ * @dev: a valid struct device pointer of a sensor device. Must contain
+ * a valid .of_node, for the sensor node.
+ * @sensor_id: a sensor identifier, in case the sensor IP has more
+ * than one sensors
+ * @data: a private pointer (owned by the caller) that will be passed
+ * back, when a temperature reading is needed.
+ * @get_temp: a pointer to a function that reads the sensor temperature.
+ * @get_trend: a pointer to a function that reads the sensor temperature trend.
+ *
+ * This function will search the list of thermal zones described in device
+ * tree and look for the zone that refer to the sensor device pointed by
+ * @dev->of_node as temperature providers. For the zone pointing to the
+ * sensor node, the sensor will be added to the DT thermal zone device.
+ *
+ * The thermal zone temperature is provided by the @get_temp function
+ * pointer. When called, it will have the private pointer @data back.
+ *
+ * The thermal zone temperature trend is provided by the @get_trend function
+ * pointer. When called, it will have the private pointer @data back.
+ *
+ * TODO:
+ * 01 - This function must enqueue the new sensor instead of using
+ * it as the only source of temperature values.
+ *
+ * 02 - There must be a way to match the sensor with all thermal zones
+ * that refer to it.
+ *
+ * Return: On success returns a valid struct thermal_zone_device,
+ * otherwise, it returns a corresponding ERR_PTR(). Caller must
+ * check the return value with help of IS_ERR() helper.
+ */
+struct thermal_zone_device *
+thermal_zone_of_sensor_register(struct device *dev, int sensor_id,
+ void *data, int (*get_temp)(void *, long *),
+ int (*get_trend)(void *, long *))
+{
+ struct device_node *np, *child, *sensor_np;
+
+ np = of_find_node_by_name(NULL, "thermal-zones");
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ if (!dev || !dev->of_node)
+ return ERR_PTR(-EINVAL);
+
+ sensor_np = dev->of_node;
+
+ for_each_child_of_node(np, child) {
+ struct of_phandle_args sensor_specs;
+ int ret, id;
+
+ /* For now, thermal framework supports only 1 sensor per zone */
+ ret = of_parse_phandle_with_args(child, "thermal-sensors",
+ "#thermal-sensor-cells",
+ 0, &sensor_specs);
+ if (ret)
+ continue;
+
+ if (sensor_specs.args_count >= 1) {
+ id = sensor_specs.args[0];
+ WARN(sensor_specs.args_count > 1,
+ "%s: too many cells in sensor specifier %d\n",
+ sensor_specs.np->name, sensor_specs.args_count);
+ } else {
+ id = 0;
+ }
+
+ if (sensor_specs.np == sensor_np && id == sensor_id) {
+ of_node_put(np);
+ return thermal_zone_of_add_sensor(child, sensor_np,
+ data,
+ get_temp,
+ get_trend);
+ }
+ }
+ of_node_put(np);
+
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_register);
+
+/**
+ * thermal_zone_of_sensor_unregister - unregisters a sensor from a DT thermal zone
+ * @dev: a valid struct device pointer of a sensor device. Must contain
+ * a valid .of_node, for the sensor node.
+ * @tzd: a pointer to struct thermal_zone_device where the sensor is registered.
+ *
+ * This function removes the sensor callbacks and private data from the
+ * thermal zone device registered with thermal_zone_of_sensor_register()
+ * API. It will also silent the zone by remove the .get_temp() and .get_trend()
+ * thermal zone device callbacks.
+ *
+ * TODO: When the support to several sensors per zone is added, this
+ * function must search the sensor list based on @dev parameter.
+ *
+ */
+void thermal_zone_of_sensor_unregister(struct device *dev,
+ struct thermal_zone_device *tzd)
+{
+ struct __thermal_zone *tz;
+
+ if (!dev || !tzd || !tzd->devdata)
+ return;
+
+ tz = tzd->devdata;
+
+ /* no __thermal_zone, nothing to be done */
+ if (!tz)
+ return;
+
+ mutex_lock(&tzd->lock);
+ tzd->ops->get_temp = NULL;
+ tzd->ops->get_trend = NULL;
+
+ tz->get_temp = NULL;
+ tz->get_trend = NULL;
+ tz->sensor_data = NULL;
+ mutex_unlock(&tzd->lock);
+}
+EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_unregister);
+
+/*** functions parsing device tree nodes ***/
+
+/**
+ * thermal_of_populate_bind_params - parse and fill cooling map data
+ * @np: DT node containing a cooling-map node
+ * @__tbp: data structure to be filled with cooling map info
+ * @trips: array of thermal zone trip points
+ * @ntrips: number of trip points inside trips.
+ *
+ * This function parses a cooling-map type of node represented by
+ * @np parameter and fills the read data into @__tbp data structure.
+ * It needs the already parsed array of trip points of the thermal zone
+ * in consideration.
+ *
+ * Return: 0 on success, proper error code otherwise
+ */
+static int thermal_of_populate_bind_params(struct device_node *np,
+ struct __thermal_bind_params *__tbp,
+ struct __thermal_trip *trips,
+ int ntrips)
+{
+ struct of_phandle_args cooling_spec;
+ struct device_node *trip;
+ int ret, i;
+ u32 prop;
+
+ /* Default weight. Usage is optional */
+ __tbp->usage = 0;
+ ret = of_property_read_u32(np, "contribution", &prop);
+ if (ret == 0)
+ __tbp->usage = prop;
+
+ trip = of_parse_phandle(np, "trip", 0);
+ if (!trip) {
+ pr_err("missing trip property\n");
+ return -ENODEV;
+ }
+
+ /* match using device_node */
+ for (i = 0; i < ntrips; i++)
+ if (trip == trips[i].np) {
+ __tbp->trip_id = i;
+ break;
+ }
+
+ if (i == ntrips) {
+ ret = -ENODEV;
+ goto end;
+ }
+
+ ret = of_parse_phandle_with_args(np, "cooling-device", "#cooling-cells",
+ 0, &cooling_spec);
+ if (ret < 0) {
+ pr_err("missing cooling_device property\n");
+ goto end;
+ }
+ __tbp->cooling_device = cooling_spec.np;
+ if (cooling_spec.args_count >= 2) { /* at least min and max */
+ __tbp->min = cooling_spec.args[0];
+ __tbp->max = cooling_spec.args[1];
+ } else {
+ pr_err("wrong reference to cooling device, missing limits\n");
+ }
+
+end:
+ of_node_put(trip);
+
+ return ret;
+}
+
+/**
+ * It maps 'enum thermal_trip_type' found in include/linux/thermal.h
+ * into the device tree binding of 'trip', property type.
+ */
+static const char * const trip_types[] = {
+ [THERMAL_TRIP_ACTIVE] = "active",
+ [THERMAL_TRIP_PASSIVE] = "passive",
+ [THERMAL_TRIP_HOT] = "hot",
+ [THERMAL_TRIP_CRITICAL] = "critical",
+};
+
+/**
+ * thermal_of_get_trip_type - Get phy mode for given device_node
+ * @np: Pointer to the given device_node
+ * @type: Pointer to resulting trip type
+ *
+ * The function gets trip type string from property 'type',
+ * and store its index in trip_types table in @type,
+ *
+ * Return: 0 on success, or errno in error case.
+ */
+static int thermal_of_get_trip_type(struct device_node *np,
+ enum thermal_trip_type *type)
+{
+ const char *t;
+ int err, i;
+
+ err = of_property_read_string(np, "type", &t);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(trip_types); i++)
+ if (!strcasecmp(t, trip_types[i])) {
+ *type = i;
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+/**
+ * thermal_of_populate_trip - parse and fill one trip point data
+ * @np: DT node containing a trip point node
+ * @trip: trip point data structure to be filled up
+ *
+ * This function parses a trip point type of node represented by
+ * @np parameter and fills the read data into @trip data structure.
+ *
+ * Return: 0 on success, proper error code otherwise
+ */
+static int thermal_of_populate_trip(struct device_node *np,
+ struct __thermal_trip *trip)
+{
+ int prop;
+ int ret;
+
+ ret = of_property_read_u32(np, "temperature", &prop);
+ if (ret < 0) {
+ pr_err("missing temperature property\n");
+ return ret;
+ }
+ trip->temperature = prop;
+
+ ret = of_property_read_u32(np, "hysteresis", &prop);
+ if (ret < 0) {
+ pr_err("missing hysteresis property\n");
+ return ret;
+ }
+ trip->hysteresis = prop;
+
+ ret = thermal_of_get_trip_type(np, &trip->type);
+ if (ret < 0) {
+ pr_err("wrong trip type property\n");
+ return ret;
+ }
+
+ /* Required for cooling map matching */
+ trip->np = np;
+
+ return 0;
+}
+
+/**
+ * thermal_of_build_thermal_zone - parse and fill one thermal zone data
+ * @np: DT node containing a thermal zone node
+ *
+ * This function parses a thermal zone type of node represented by
+ * @np parameter and fills the read data into a __thermal_zone data structure
+ * and return this pointer.
+ *
+ * TODO: Missing properties to parse: thermal-sensor-names and coefficients
+ *
+ * Return: On success returns a valid struct __thermal_zone,
+ * otherwise, it returns a corresponding ERR_PTR(). Caller must
+ * check the return value with help of IS_ERR() helper.
+ */
+static struct __thermal_zone *
+thermal_of_build_thermal_zone(struct device_node *np)
+{
+ struct device_node *child = NULL, *gchild;
+ struct __thermal_zone *tz;
+ int ret, i;
+ u32 prop;
+
+ if (!np) {
+ pr_err("no thermal zone np\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ tz = kzalloc(sizeof(*tz), GFP_KERNEL);
+ if (!tz)
+ return ERR_PTR(-ENOMEM);
+
+ ret = of_property_read_u32(np, "polling-delay-passive", &prop);
+ if (ret < 0) {
+ pr_err("missing polling-delay-passive property\n");
+ goto free_tz;
+ }
+ tz->passive_delay = prop;
+
+ ret = of_property_read_u32(np, "polling-delay", &prop);
+ if (ret < 0) {
+ pr_err("missing polling-delay property\n");
+ goto free_tz;
+ }
+ tz->polling_delay = prop;
+
+ /* trips */
+ child = of_get_child_by_name(np, "trips");
+
+ /* No trips provided */
+ if (!child)
+ goto finish;
+
+ tz->ntrips = of_get_child_count(child);
+ if (tz->ntrips == 0) /* must have at least one child */
+ goto finish;
+
+ tz->trips = kzalloc(tz->ntrips * sizeof(*tz->trips), GFP_KERNEL);
+ if (!tz->trips) {
+ ret = -ENOMEM;
+ goto free_tz;
+ }
+
+ i = 0;
+ for_each_child_of_node(child, gchild) {
+ ret = thermal_of_populate_trip(gchild, &tz->trips[i++]);
+ if (ret)
+ goto free_trips;
+ }
+
+ of_node_put(child);
+
+ /* cooling-maps */
+ child = of_get_child_by_name(np, "cooling-maps");
+
+ /* cooling-maps not provided */
+ if (!child)
+ goto finish;
+
+ tz->num_tbps = of_get_child_count(child);
+ if (tz->num_tbps == 0)
+ goto finish;
+
+ tz->tbps = kzalloc(tz->num_tbps * sizeof(*tz->tbps), GFP_KERNEL);
+ if (!tz->tbps) {
+ ret = -ENOMEM;
+ goto free_trips;
+ }
+
+ i = 0;
+ for_each_child_of_node(child, gchild)
+ ret = thermal_of_populate_bind_params(gchild, &tz->tbps[i++],
+ tz->trips, tz->ntrips);
+ if (ret)
+ goto free_tbps;
+
+finish:
+ of_node_put(child);
+ tz->mode = THERMAL_DEVICE_DISABLED;
+
+ return tz;
+
+free_tbps:
+ kfree(tz->tbps);
+free_trips:
+ kfree(tz->trips);
+free_tz:
+ kfree(tz);
+ of_node_put(child);
+
+ return ERR_PTR(ret);
+}
+
+static inline void of_thermal_free_zone(struct __thermal_zone *tz)
+{
+ kfree(tz->tbps);
+ kfree(tz->trips);
+ kfree(tz);
+}
+
+/**
+ * of_parse_thermal_zones - parse device tree thermal data
+ *
+ * Initialization function that can be called by machine initialization
+ * code to parse thermal data and populate the thermal framework
+ * with hardware thermal zones info. This function only parses thermal zones.
+ * Cooling devices and sensor devices nodes are supposed to be parsed
+ * by their respective drivers.
+ *
+ * Return: 0 on success, proper error code otherwise
+ *
+ */
+int __init of_parse_thermal_zones(void)
+{
+ struct device_node *np, *child;
+ struct __thermal_zone *tz;
+ struct thermal_zone_device_ops *ops;
+
+ np = of_find_node_by_name(NULL, "thermal-zones");
+ if (!np) {
+ pr_debug("unable to find thermal zones\n");
+ return 0; /* Run successfully on systems without thermal DT */
+ }
+
+ for_each_child_of_node(np, child) {
+ struct thermal_zone_device *zone;
+ struct thermal_zone_params *tzp;
+
+ tz = thermal_of_build_thermal_zone(child);
+ if (IS_ERR(tz)) {
+ pr_err("failed to build thermal zone %s: %ld\n",
+ child->name,
+ PTR_ERR(tz));
+ continue;
+ }
+
+ ops = kmemdup(&of_thermal_ops, sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ goto exit_free;
+
+ tzp = kzalloc(sizeof(*tzp), GFP_KERNEL);
+ if (!tzp) {
+ kfree(ops);
+ goto exit_free;
+ }
+
+ /* No hwmon because there might be hwmon drivers registering */
+ tzp->no_hwmon = true;
+
+ zone = thermal_zone_device_register(child->name, tz->ntrips,
+ 0, tz,
+ ops, tzp,
+ tz->passive_delay,
+ tz->polling_delay);
+ if (IS_ERR(zone)) {
+ pr_err("Failed to build %s zone %ld\n", child->name,
+ PTR_ERR(zone));
+ kfree(tzp);
+ kfree(ops);
+ of_thermal_free_zone(tz);
+ /* attempting to build remaining zones still */
+ }
+ }
+
+ return 0;
+
+exit_free:
+ of_thermal_free_zone(tz);
+
+ /* no memory available, so free what we have built */
+ of_thermal_destroy_zones();
+
+ return -ENOMEM;
+}
+
+/**
+ * of_thermal_destroy_zones - remove all zones parsed and allocated resources
+ *
+ * Finds all zones parsed and added to the thermal framework and remove them
+ * from the system, together with their resources.
+ *
+ */
+void of_thermal_destroy_zones(void)
+{
+ struct device_node *np, *child;
+
+ np = of_find_node_by_name(NULL, "thermal-zones");
+ if (!np) {
+ pr_err("unable to find thermal zones\n");
+ return;
+ }
+
+ for_each_child_of_node(np, child) {
+ struct thermal_zone_device *zone;
+
+ zone = thermal_zone_get_zone_by_name(child->name);
+ if (IS_ERR(zone))
+ continue;
+
+ thermal_zone_device_unregister(zone);
+ kfree(zone->tzp);
+ kfree(zone->ops);
+ of_thermal_free_zone(zone->devdata);
+ }
+}
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
index c2301da08ac7..3f5ad25ddca8 100644
--- a/drivers/thermal/samsung/exynos_thermal_common.c
+++ b/drivers/thermal/samsung/exynos_thermal_common.c
@@ -280,7 +280,7 @@ static int exynos_get_trend(struct thermal_zone_device *thermal,
return 0;
}
/* Operation callback functions for thermal zone */
-static struct thermal_zone_device_ops const exynos_dev_ops = {
+static struct thermal_zone_device_ops exynos_dev_ops = {
.bind = exynos_bind,
.unbind = exynos_unbind,
.get_temp = exynos_get_temp,
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 32f38b90c4f6..0d96a510389f 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -205,6 +205,7 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
skip_calib_data:
if (pdata->max_trigger_level > MAX_THRESHOLD_LEVS) {
dev_err(&pdev->dev, "Invalid max trigger level\n");
+ ret = -EINVAL;
goto out;
}
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
index 073c292baa53..476b768c633e 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.c
+++ b/drivers/thermal/samsung/exynos_tmu_data.c
@@ -131,8 +131,8 @@ static const struct exynos_tmu_registers exynos4412_tmu_registers = {
#define EXYNOS4412_TMU_DATA \
.threshold_falling = 10, \
- .trigger_levels[0] = 85, \
- .trigger_levels[1] = 103, \
+ .trigger_levels[0] = 70, \
+ .trigger_levels[1] = 95, \
.trigger_levels[2] = 110, \
.trigger_levels[3] = 120, \
.trigger_enable[0] = true, \
@@ -155,12 +155,12 @@ static const struct exynos_tmu_registers exynos4412_tmu_registers = {
.second_point_trim = 85, \
.default_temp_offset = 50, \
.freq_tab[0] = { \
- .freq_clip_max = 800 * 1000, \
- .temp_level = 85, \
+ .freq_clip_max = 1400 * 1000, \
+ .temp_level = 70, \
}, \
.freq_tab[1] = { \
- .freq_clip_max = 200 * 1000, \
- .temp_level = 103, \
+ .freq_clip_max = 400 * 1000, \
+ .temp_level = 95, \
}, \
.freq_tab_count = 2, \
.registers = &exynos4412_tmu_registers, \
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index d89e781b0a18..f251521baaa2 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -60,6 +60,7 @@ static unsigned long get_target_state(struct thermal_instance *instance,
*/
cdev->ops->get_cur_state(cdev, &cur_state);
next_target = instance->target;
+ dev_dbg(&cdev->device, "cur_state=%ld\n", cur_state);
switch (trend) {
case THERMAL_TREND_RAISING:
@@ -131,6 +132,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
if (tz->temperature >= trip_temp)
throttle = true;
+ dev_dbg(&tz->device, "Trip%d[type=%d,temp=%ld]:trend=%d,throttle=%d\n",
+ trip, trip_type, trip_temp, trend, throttle);
+
mutex_lock(&tz->lock);
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
@@ -139,6 +143,8 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
old_target = instance->target;
instance->target = get_target_state(instance, trend, throttle);
+ dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
+ old_target, (int)instance->target);
if (old_target == instance->target)
continue;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index f1d511a9475b..338a88bf6662 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -34,6 +34,7 @@
#include <linux/thermal.h>
#include <linux/reboot.h>
#include <linux/string.h>
+#include <linux/of.h>
#include <net/netlink.h>
#include <net/genetlink.h>
@@ -403,7 +404,7 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
enum thermal_trip_type type;
#endif
- if (!tz || IS_ERR(tz))
+ if (!tz || IS_ERR(tz) || !tz->ops->get_temp)
goto exit;
mutex_lock(&tz->lock);
@@ -450,12 +451,18 @@ static void update_temperature(struct thermal_zone_device *tz)
tz->last_temperature = tz->temperature;
tz->temperature = temp;
mutex_unlock(&tz->lock);
+
+ dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
+ tz->last_temperature, tz->temperature);
}
void thermal_zone_device_update(struct thermal_zone_device *tz)
{
int count;
+ if (!tz->ops->get_temp)
+ return;
+
update_temperature(tz);
for (count = 0; count < tz->trips; count++)
@@ -774,6 +781,9 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
ret = tz->ops->set_emul_temp(tz, temperature);
}
+ if (!ret)
+ thermal_zone_device_update(tz);
+
return ret ? ret : count;
}
static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
@@ -1052,7 +1062,8 @@ static struct class thermal_class = {
};
/**
- * thermal_cooling_device_register() - register a new thermal cooling device
+ * __thermal_cooling_device_register() - register a new thermal cooling device
+ * @np: a pointer to a device tree node.
* @type: the thermal cooling device type.
* @devdata: device private data.
* @ops: standard thermal cooling devices callbacks.
@@ -1060,13 +1071,16 @@ static struct class thermal_class = {
* This interface function adds a new thermal cooling device (fan/processor/...)
* to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself
* to all the thermal zone devices registered at the same time.
+ * It also gives the opportunity to link the cooling device to a device tree
+ * node, so that it can be bound to a thermal zone created out of device tree.
*
* Return: a pointer to the created struct thermal_cooling_device or an
* ERR_PTR. Caller must check return value with IS_ERR*() helpers.
*/
-struct thermal_cooling_device *
-thermal_cooling_device_register(char *type, void *devdata,
- const struct thermal_cooling_device_ops *ops)
+static struct thermal_cooling_device *
+__thermal_cooling_device_register(struct device_node *np,
+ char *type, void *devdata,
+ const struct thermal_cooling_device_ops *ops)
{
struct thermal_cooling_device *cdev;
int result;
@@ -1091,6 +1105,7 @@ thermal_cooling_device_register(char *type, void *devdata,
strlcpy(cdev->type, type ? : "", sizeof(cdev->type));
mutex_init(&cdev->lock);
INIT_LIST_HEAD(&cdev->thermal_instances);
+ cdev->np = np;
cdev->ops = ops;
cdev->updated = true;
cdev->device.class = &thermal_class;
@@ -1133,9 +1148,53 @@ unregister:
device_unregister(&cdev->device);
return ERR_PTR(result);
}
+
+/**
+ * thermal_cooling_device_register() - register a new thermal cooling device
+ * @type: the thermal cooling device type.
+ * @devdata: device private data.
+ * @ops: standard thermal cooling devices callbacks.
+ *
+ * This interface function adds a new thermal cooling device (fan/processor/...)
+ * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself
+ * to all the thermal zone devices registered at the same time.
+ *
+ * Return: a pointer to the created struct thermal_cooling_device or an
+ * ERR_PTR. Caller must check return value with IS_ERR*() helpers.
+ */
+struct thermal_cooling_device *
+thermal_cooling_device_register(char *type, void *devdata,
+ const struct thermal_cooling_device_ops *ops)
+{
+ return __thermal_cooling_device_register(NULL, type, devdata, ops);
+}
EXPORT_SYMBOL_GPL(thermal_cooling_device_register);
/**
+ * thermal_of_cooling_device_register() - register an OF thermal cooling device
+ * @np: a pointer to a device tree node.
+ * @type: the thermal cooling device type.
+ * @devdata: device private data.
+ * @ops: standard thermal cooling devices callbacks.
+ *
+ * This function will register a cooling device with device tree node reference.
+ * This interface function adds a new thermal cooling device (fan/processor/...)
+ * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself
+ * to all the thermal zone devices registered at the same time.
+ *
+ * Return: a pointer to the created struct thermal_cooling_device or an
+ * ERR_PTR. Caller must check return value with IS_ERR*() helpers.
+ */
+struct thermal_cooling_device *
+thermal_of_cooling_device_register(struct device_node *np,
+ char *type, void *devdata,
+ const struct thermal_cooling_device_ops *ops)
+{
+ return __thermal_cooling_device_register(np, type, devdata, ops);
+}
+EXPORT_SYMBOL_GPL(thermal_of_cooling_device_register);
+
+/**
* thermal_cooling_device_unregister - removes the registered thermal cooling device
* @cdev: the thermal cooling device to remove.
*
@@ -1207,6 +1266,8 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
mutex_lock(&cdev->lock);
/* Make sure cdev enters the deepest cooling state */
list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) {
+ dev_dbg(&cdev->device, "zone%d->target=%lu\n",
+ instance->tz->id, instance->target);
if (instance->target == THERMAL_NO_TARGET)
continue;
if (instance->target > target)
@@ -1215,6 +1276,7 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
mutex_unlock(&cdev->lock);
cdev->ops->set_cur_state(cdev, target);
cdev->updated = true;
+ dev_dbg(&cdev->device, "set to state %lu\n", target);
}
EXPORT_SYMBOL(thermal_cdev_update);
@@ -1370,7 +1432,7 @@ static void remove_trip_attrs(struct thermal_zone_device *tz)
*/
struct thermal_zone_device *thermal_zone_device_register(const char *type,
int trips, int mask, void *devdata,
- const struct thermal_zone_device_ops *ops,
+ struct thermal_zone_device_ops *ops,
const struct thermal_zone_params *tzp,
int passive_delay, int polling_delay)
{
@@ -1386,7 +1448,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
if (trips > THERMAL_MAX_TRIPS || trips < 0 || mask >> trips)
return ERR_PTR(-EINVAL);
- if (!ops || !ops->get_temp)
+ if (!ops)
return ERR_PTR(-EINVAL);
if (trips > 0 && (!ops->get_trip_type || !ops->get_trip_temp))
@@ -1490,6 +1552,9 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
+ if (!tz->ops->get_temp)
+ thermal_zone_device_set_polling(tz, 0);
+
thermal_zone_device_update(tz);
if (!result)
@@ -1740,8 +1805,14 @@ static int __init thermal_init(void)
if (result)
goto unregister_class;
+ result = of_parse_thermal_zones();
+ if (result)
+ goto exit_netlink;
+
return 0;
+exit_netlink:
+ genetlink_exit();
unregister_governors:
thermal_unregister_governors();
unregister_class:
@@ -1757,6 +1828,7 @@ error:
static void __exit thermal_exit(void)
{
+ of_thermal_destroy_zones();
genetlink_exit();
class_unregister(&thermal_class);
thermal_unregister_governors();
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 7cf2f6626251..3db339fb636f 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -77,4 +77,13 @@ static inline int thermal_gov_user_space_register(void) { return 0; }
static inline void thermal_gov_user_space_unregister(void) {}
#endif /* CONFIG_THERMAL_GOV_USER_SPACE */
+/* device tree support */
+#ifdef CONFIG_THERMAL_OF
+int of_parse_thermal_zones(void);
+void of_thermal_destroy_zones(void);
+#else
+static inline int of_parse_thermal_zones(void) { return 0; }
+static inline void of_thermal_destroy_zones(void) { }
+#endif
+
#endif /* __THERMAL_CORE_H__ */
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 5a47cc8c8f85..9eec26dc0448 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -31,6 +31,7 @@
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/cpu_cooling.h>
+#include <linux/of.h>
#include "ti-thermal.h"
#include "ti-bandgap.h"
@@ -44,6 +45,7 @@ struct ti_thermal_data {
enum thermal_device_mode mode;
struct work_struct thermal_wq;
int sensor_id;
+ bool our_zone;
};
static void ti_thermal_work(struct work_struct *work)
@@ -75,11 +77,10 @@ static inline int ti_thermal_hotspot_temperature(int t, int s, int c)
/* thermal zone ops */
/* Get temperature callback function for thermal zone*/
-static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
- unsigned long *temp)
+static inline int __ti_thermal_get_temp(void *devdata, long *temp)
{
struct thermal_zone_device *pcb_tz = NULL;
- struct ti_thermal_data *data = thermal->devdata;
+ struct ti_thermal_data *data = devdata;
struct ti_bandgap *bgp;
const struct ti_temp_sensor *s;
int ret, tmp, slope, constant;
@@ -118,6 +119,14 @@ static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
return ret;
}
+static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
+ unsigned long *temp)
+{
+ struct ti_thermal_data *data = thermal->devdata;
+
+ return __ti_thermal_get_temp(data, temp);
+}
+
/* Bind callback functions for thermal zone */
static int ti_thermal_bind(struct thermal_zone_device *thermal,
struct thermal_cooling_device *cdev)
@@ -230,11 +239,9 @@ static int ti_thermal_get_trip_temp(struct thermal_zone_device *thermal,
return 0;
}
-/* Get the temperature trend callback functions for thermal zone */
-static int ti_thermal_get_trend(struct thermal_zone_device *thermal,
- int trip, enum thermal_trend *trend)
+static int __ti_thermal_get_trend(void *p, long *trend)
{
- struct ti_thermal_data *data = thermal->devdata;
+ struct ti_thermal_data *data = p;
struct ti_bandgap *bgp;
int id, tr, ret = 0;
@@ -245,6 +252,22 @@ static int ti_thermal_get_trend(struct thermal_zone_device *thermal,
if (ret)
return ret;
+ *trend = tr;
+
+ return 0;
+}
+
+/* Get the temperature trend callback functions for thermal zone */
+static int ti_thermal_get_trend(struct thermal_zone_device *thermal,
+ int trip, enum thermal_trend *trend)
+{
+ int ret;
+ long tr;
+
+ ret = __ti_thermal_get_trend(thermal->devdata, &tr);
+ if (ret)
+ return ret;
+
if (tr > 0)
*trend = THERMAL_TREND_RAISING;
else if (tr < 0)
@@ -308,16 +331,23 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
if (!data)
return -EINVAL;
- /* Create thermal zone */
- data->ti_thermal = thermal_zone_device_register(domain,
+ /* in case this is specified by DT */
+ data->ti_thermal = thermal_zone_of_sensor_register(bgp->dev, id,
+ data, __ti_thermal_get_temp,
+ __ti_thermal_get_trend);
+ if (IS_ERR(data->ti_thermal)) {
+ /* Create thermal zone */
+ data->ti_thermal = thermal_zone_device_register(domain,
OMAP_TRIP_NUMBER, 0, data, &ti_thermal_ops,
NULL, FAST_TEMP_MONITORING_RATE,
FAST_TEMP_MONITORING_RATE);
- if (IS_ERR(data->ti_thermal)) {
- dev_err(bgp->dev, "thermal zone device is NULL\n");
- return PTR_ERR(data->ti_thermal);
+ if (IS_ERR(data->ti_thermal)) {
+ dev_err(bgp->dev, "thermal zone device is NULL\n");
+ return PTR_ERR(data->ti_thermal);
+ }
+ data->ti_thermal->polling_delay = FAST_TEMP_MONITORING_RATE;
+ data->our_zone = true;
}
- data->ti_thermal->polling_delay = FAST_TEMP_MONITORING_RATE;
ti_bandgap_set_sensor_data(bgp, id, data);
ti_bandgap_write_update_interval(bgp, data->sensor_id,
data->ti_thermal->polling_delay);
@@ -331,7 +361,13 @@ int ti_thermal_remove_sensor(struct ti_bandgap *bgp, int id)
data = ti_bandgap_get_sensor_data(bgp, id);
- thermal_zone_device_unregister(data->ti_thermal);
+ if (data && data->ti_thermal) {
+ if (data->our_zone)
+ thermal_zone_device_unregister(data->ti_thermal);
+ else
+ thermal_zone_of_sensor_unregister(bgp->dev,
+ data->ti_thermal);
+ }
return 0;
}
@@ -350,6 +386,15 @@ int ti_thermal_report_sensor_temperature(struct ti_bandgap *bgp, int id)
int ti_thermal_register_cpu_cooling(struct ti_bandgap *bgp, int id)
{
struct ti_thermal_data *data;
+ struct device_node *np = bgp->dev->of_node;
+
+ /*
+ * We are assuming here that if one deploys the zone
+ * using DT, then it must be aware that the cooling device
+ * loading has to happen via cpufreq driver.
+ */
+ if (of_find_property(np, "#thermal-sensor-cells", NULL))
+ return 0;
data = ti_bandgap_get_sensor_data(bgp, id);
if (!data || IS_ERR(data))
@@ -380,7 +425,9 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id)
struct ti_thermal_data *data;
data = ti_bandgap_get_sensor_data(bgp, id);
- cpufreq_cooling_unregister(data->cool_dev);
+
+ if (data && data->cool_dev)
+ cpufreq_cooling_unregister(data->cool_dev);
return 0;
}
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 7722cb9d5a80..972e1c73722a 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -215,7 +215,7 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd,
return 0;
}
-int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
+static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
unsigned long temp)
{
u32 l, h;
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 978db344bda0..b24aa010f68c 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -366,7 +366,7 @@ config TRACE_SINK
"Trace data router for MIPI P1149.7 cJTAG standard".
config PPC_EPAPR_HV_BYTECHAN
- tristate "ePAPR hypervisor byte channel driver"
+ bool "ePAPR hypervisor byte channel driver"
depends on PPC
select EPAPR_PARAVIRT
help
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 71630a2af42c..979e7c3ea2cb 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1248,6 +1248,8 @@ static int rs_ioctl(struct tty_struct *tty,
struct async_icount cprev, cnow; /* kernel counter temps */
void __user *argp = (void __user *)arg;
unsigned long flags;
+ DEFINE_WAIT(wait);
+ int ret;
if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
return -ENODEV;
@@ -1288,25 +1290,33 @@ static int rs_ioctl(struct tty_struct *tty,
cprev = info->icount;
local_irq_restore(flags);
while (1) {
- interruptible_sleep_on(&info->tport.delta_msr_wait);
- /* see if a signal did it */
- if (signal_pending(current))
- return -ERESTARTSYS;
+ prepare_to_wait(&info->tport.delta_msr_wait,
+ &wait, TASK_INTERRUPTIBLE);
local_irq_save(flags);
cnow = info->icount; /* atomic copy */
local_irq_restore(flags);
if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
- cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
- return -EIO; /* no change => error */
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
+ ret = -EIO; /* no change => error */
+ break;
+ }
if ( ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
((arg & TIOCM_CTS) && (cnow.cts != cprev.cts)) ) {
- return 0;
+ ret = 0;
+ break;
+ }
+ schedule();
+ /* see if a signal did it */
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
}
cprev = cnow;
}
- /* NOTREACHED */
+ finish_wait(&info->tport.delta_msr_wait, &wait);
+ return ret;
case TIOCSERGWILD:
case TIOCSERSWILD:
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index 33f83fee9fae..a57bb5ab761c 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -2709,6 +2709,8 @@ cy_ioctl(struct tty_struct *tty,
break;
#ifndef CONFIG_CYZ_INTR
case CYZSETPOLLCYCLE:
+ if (arg > LONG_MAX / HZ)
+ return -ENODEV;
cyz_polling_cycle = (arg * HZ) / 1000;
break;
case CYZGETPOLLCYCLE:
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index f17d2e4ee2ca..75dc9d25f326 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -14,7 +14,6 @@
*/
#include <linux/console.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/tty.h>
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 9eba119bcdd3..50b46881b6ca 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -788,7 +788,7 @@ static int hvc_tiocmset(struct tty_struct *tty,
}
#ifdef CONFIG_CONSOLE_POLL
-int hvc_poll_init(struct tty_driver *driver, int line, char *options)
+static int hvc_poll_init(struct tty_driver *driver, int line, char *options)
{
return 0;
}
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index db19a38c8c69..ea74460f3638 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -77,6 +77,7 @@ struct hvc_iucv_private {
struct list_head tty_outqueue; /* outgoing IUCV messages */
struct list_head tty_inqueue; /* incoming IUCV messages */
struct device *dev; /* device structure */
+ u8 info_path[16]; /* IUCV path info (dev attr) */
};
struct iucv_tty_buffer {
@@ -126,7 +127,7 @@ static struct iucv_handler hvc_iucv_handler = {
* This function returns the struct hvc_iucv_private instance that corresponds
* to the HVC virtual terminal number specified as parameter @num.
*/
-struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
+static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
{
if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
return NULL;
@@ -772,18 +773,37 @@ static int hvc_iucv_filter_connreq(u8 ipvmid[8])
static int hvc_iucv_path_pending(struct iucv_path *path,
u8 ipvmid[8], u8 ipuser[16])
{
- struct hvc_iucv_private *priv;
+ struct hvc_iucv_private *priv, *tmp;
+ u8 wildcard[9] = "lnxhvc ";
+ int i, rc, find_unused;
u8 nuser_data[16];
u8 vm_user_id[9];
- int i, rc;
+ ASCEBC(wildcard, sizeof(wildcard));
+ find_unused = !memcmp(wildcard, ipuser, 8);
+
+ /* First, check if the pending path request is managed by this
+ * IUCV handler:
+ * - find a disconnected device if ipuser contains the wildcard
+ * - find the device that matches the terminal ID in ipuser
+ */
priv = NULL;
- for (i = 0; i < hvc_iucv_devices; i++)
- if (hvc_iucv_table[i] &&
- (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) {
- priv = hvc_iucv_table[i];
+ for (i = 0; i < hvc_iucv_devices; i++) {
+ tmp = hvc_iucv_table[i];
+ if (!tmp)
+ continue;
+
+ if (find_unused) {
+ spin_lock(&tmp->lock);
+ if (tmp->iucv_state == IUCV_DISCONN)
+ priv = tmp;
+ spin_unlock(&tmp->lock);
+
+ } else if (!memcmp(tmp->srv_name, ipuser, 8))
+ priv = tmp;
+ if (priv)
break;
- }
+ }
if (!priv)
return -ENODEV;
@@ -826,6 +846,10 @@ static int hvc_iucv_path_pending(struct iucv_path *path,
priv->path = path;
priv->iucv_state = IUCV_CONNECTED;
+ /* store path information */
+ memcpy(priv->info_path, ipvmid, 8);
+ memcpy(priv->info_path + 8, ipuser + 8, 8);
+
/* flush buffered output data... */
schedule_delayed_work(&priv->sndbuf_work, 5);
@@ -960,6 +984,49 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
return 0;
}
+static ssize_t hvc_iucv_dev_termid_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hvc_iucv_private *priv = dev_get_drvdata(dev);
+ size_t len;
+
+ len = sizeof(priv->srv_name);
+ memcpy(buf, priv->srv_name, len);
+ EBCASC(buf, len);
+ buf[len++] = '\n';
+ return len;
+}
+
+static ssize_t hvc_iucv_dev_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hvc_iucv_private *priv = dev_get_drvdata(dev);
+ return sprintf(buf, "%u:%u\n", priv->iucv_state, priv->tty_state);
+}
+
+static ssize_t hvc_iucv_dev_peer_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hvc_iucv_private *priv = dev_get_drvdata(dev);
+ char vmid[9], ipuser[9];
+
+ memset(vmid, 0, sizeof(vmid));
+ memset(ipuser, 0, sizeof(ipuser));
+
+ spin_lock_bh(&priv->lock);
+ if (priv->iucv_state == IUCV_CONNECTED) {
+ memcpy(vmid, priv->info_path, 8);
+ memcpy(ipuser, priv->info_path + 8, 8);
+ }
+ spin_unlock_bh(&priv->lock);
+ EBCASC(ipuser, 8);
+
+ return sprintf(buf, "%s:%s\n", vmid, ipuser);
+}
+
/* HVC operations */
static const struct hv_ops hvc_iucv_ops = {
@@ -985,6 +1052,25 @@ static struct device_driver hvc_iucv_driver = {
.pm = &hvc_iucv_pm_ops,
};
+/* IUCV HVC device attributes */
+static DEVICE_ATTR(termid, 0640, hvc_iucv_dev_termid_show, NULL);
+static DEVICE_ATTR(state, 0640, hvc_iucv_dev_state_show, NULL);
+static DEVICE_ATTR(peer, 0640, hvc_iucv_dev_peer_show, NULL);
+static struct attribute *hvc_iucv_dev_attrs[] = {
+ &dev_attr_termid.attr,
+ &dev_attr_state.attr,
+ &dev_attr_peer.attr,
+ NULL,
+};
+static struct attribute_group hvc_iucv_dev_attr_group = {
+ .attrs = hvc_iucv_dev_attrs,
+};
+static const struct attribute_group *hvc_iucv_dev_attr_groups[] = {
+ &hvc_iucv_dev_attr_group,
+ NULL,
+};
+
+
/**
* hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
* @id: hvc_iucv_table index
@@ -1046,6 +1132,7 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console)
priv->dev->bus = &iucv_bus;
priv->dev->parent = iucv_root;
priv->dev->driver = &hvc_iucv_driver;
+ priv->dev->groups = hvc_iucv_dev_attr_groups;
priv->dev->release = (void (*)(struct device *)) kfree;
rc = device_register(priv->dev);
if (rc) {
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 6496872e2e47..b01659bd4f7c 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -255,13 +255,7 @@ static int __init hvc_opal_init(void)
/* Register as a vio device to receive callbacks */
return platform_driver_register(&hvc_opal_driver);
}
-module_init(hvc_opal_init);
-
-static void __exit hvc_opal_exit(void)
-{
- platform_driver_unregister(&hvc_opal_driver);
-}
-module_exit(hvc_opal_exit);
+device_initcall(hvc_opal_init);
static void udbg_opal_putc(char c)
{
diff --git a/drivers/tty/hvc/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c
index 0069bb86ba49..08c87920b74a 100644
--- a/drivers/tty/hvc/hvc_rtas.c
+++ b/drivers/tty/hvc/hvc_rtas.c
@@ -102,17 +102,7 @@ static int __init hvc_rtas_init(void)
return 0;
}
-module_init(hvc_rtas_init);
-
-/* This will tear down the tty portion of the driver */
-static void __exit hvc_rtas_exit(void)
-{
- /* Really the fun isn't over until the worker thread breaks down and
- * the tty cleans up */
- if (hvc_rtas_dev)
- hvc_remove(hvc_rtas_dev);
-}
-module_exit(hvc_rtas_exit);
+device_initcall(hvc_rtas_init);
/* This will happen prior to module init. There is no tty at this time? */
static int __init hvc_rtas_console_init(void)
diff --git a/drivers/tty/hvc/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c
index 72228276fe31..9cf573d06a29 100644
--- a/drivers/tty/hvc/hvc_udbg.c
+++ b/drivers/tty/hvc/hvc_udbg.c
@@ -80,14 +80,7 @@ static int __init hvc_udbg_init(void)
return 0;
}
-module_init(hvc_udbg_init);
-
-static void __exit hvc_udbg_exit(void)
-{
- if (hvc_udbg_dev)
- hvc_remove(hvc_udbg_dev);
-}
-module_exit(hvc_udbg_exit);
+device_initcall(hvc_udbg_init);
static int __init hvc_udbg_console_init(void)
{
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 636c9baad7a5..2dc2831840ca 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -561,18 +561,7 @@ static int __init xen_hvc_init(void)
#endif
return r;
}
-
-static void __exit xen_hvc_fini(void)
-{
- struct xencons_info *entry, *next;
-
- if (list_empty(&xenconsoles))
- return;
-
- list_for_each_entry_safe(entry, next, &xenconsoles, list) {
- xen_console_remove(entry);
- }
-}
+device_initcall(xen_hvc_init);
static int xen_cons_init(void)
{
@@ -598,10 +587,6 @@ static int xen_cons_init(void)
hvc_instantiate(HVC_COOKIE, 0, ops);
return 0;
}
-
-
-module_init(xen_hvc_init);
-module_exit(xen_hvc_fini);
console_initcall(xen_cons_init);
#ifdef CONFIG_EARLY_PRINTK
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
index 347050ea414a..7ae6c293e518 100644
--- a/drivers/tty/hvc/hvsi_lib.c
+++ b/drivers/tty/hvc/hvsi_lib.c
@@ -1,5 +1,4 @@
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/console.h>
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
index 8fd72ff9436e..ebd5bff0f5c1 100644
--- a/drivers/tty/ipwireless/tty.c
+++ b/drivers/tty/ipwireless/tty.c
@@ -15,7 +15,6 @@
* Copyright (C) 2007 David Sterba
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index c0f76da55304..2ebe47b78a3e 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -194,6 +194,7 @@ struct gsm_control {
struct gsm_mux {
struct tty_struct *tty; /* The tty our ldisc is bound to */
spinlock_t lock;
+ struct mutex mutex;
unsigned int num;
struct kref ref;
@@ -1089,6 +1090,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
{
unsigned int addr = 0;
unsigned int modem = 0;
+ unsigned int brk = 0;
struct gsm_dlci *dlci;
int len = clen;
u8 *dp = data;
@@ -1115,6 +1117,16 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
if (len == 0)
return;
}
+ len--;
+ if (len > 0) {
+ while (gsm_read_ea(&brk, *dp++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ modem <<= 7;
+ modem |= (brk & 0x7f);
+ }
tty = tty_port_tty_get(&dlci->port);
gsm_process_modem(tty, dlci, modem, clen);
if (tty) {
@@ -1704,11 +1716,8 @@ static void gsm_dlci_release(struct gsm_dlci *dlci)
gsm_destroy_network(dlci);
mutex_unlock(&dlci->mutex);
- /* tty_vhangup needs the tty_lock, so unlock and
- relock after doing the hangup. */
- tty_unlock(tty);
tty_vhangup(tty);
- tty_lock(tty);
+
tty_port_tty_set(&dlci->port, NULL);
tty_kref_put(tty);
}
@@ -2019,7 +2028,7 @@ static void gsm_error(struct gsm_mux *gsm,
* and then shut down each device hanging up the channels as we go.
*/
-void gsm_cleanup_mux(struct gsm_mux *gsm)
+static void gsm_cleanup_mux(struct gsm_mux *gsm)
{
int i;
struct gsm_dlci *dlci = gsm->dlci[0];
@@ -2054,15 +2063,16 @@ void gsm_cleanup_mux(struct gsm_mux *gsm)
dlci->state == DLCI_CLOSED);
}
/* Free up any link layer users */
+ mutex_lock(&gsm->mutex);
for (i = 0; i < NUM_DLCI; i++)
if (gsm->dlci[i])
gsm_dlci_release(gsm->dlci[i]);
+ mutex_unlock(&gsm->mutex);
/* Now wipe the queues */
list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list)
kfree(txq);
INIT_LIST_HEAD(&gsm->tx_list);
}
-EXPORT_SYMBOL_GPL(gsm_cleanup_mux);
/**
* gsm_activate_mux - generic GSM setup
@@ -2073,7 +2083,7 @@ EXPORT_SYMBOL_GPL(gsm_cleanup_mux);
* finally kick off connecting to DLCI 0 on the modem.
*/
-int gsm_activate_mux(struct gsm_mux *gsm)
+static int gsm_activate_mux(struct gsm_mux *gsm)
{
struct gsm_dlci *dlci;
int i = 0;
@@ -2109,7 +2119,6 @@ int gsm_activate_mux(struct gsm_mux *gsm)
gsm->dead = 0; /* Tty opens are now permissible */
return 0;
}
-EXPORT_SYMBOL_GPL(gsm_activate_mux);
/**
* gsm_free_mux - free up a mux
@@ -2117,13 +2126,12 @@ EXPORT_SYMBOL_GPL(gsm_activate_mux);
*
* Dispose of allocated resources for a dead mux
*/
-void gsm_free_mux(struct gsm_mux *gsm)
+static void gsm_free_mux(struct gsm_mux *gsm)
{
kfree(gsm->txframe);
kfree(gsm->buf);
kfree(gsm);
}
-EXPORT_SYMBOL_GPL(gsm_free_mux);
/**
* gsm_free_muxr - free up a mux
@@ -2153,7 +2161,7 @@ static inline void mux_put(struct gsm_mux *gsm)
* Creates a new mux ready for activation.
*/
-struct gsm_mux *gsm_alloc_mux(void)
+static struct gsm_mux *gsm_alloc_mux(void)
{
struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL);
if (gsm == NULL)
@@ -2170,6 +2178,7 @@ struct gsm_mux *gsm_alloc_mux(void)
return NULL;
}
spin_lock_init(&gsm->lock);
+ mutex_init(&gsm->mutex);
kref_init(&gsm->ref);
INIT_LIST_HEAD(&gsm->tx_list);
@@ -2185,7 +2194,6 @@ struct gsm_mux *gsm_alloc_mux(void)
return gsm;
}
-EXPORT_SYMBOL_GPL(gsm_alloc_mux);
/**
* gsmld_output - write to link
@@ -2269,14 +2277,15 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char *f;
int i;
char buf[64];
- char flags;
+ char flags = TTY_NORMAL;
if (debug & 4)
print_hex_dump_bytes("gsmld_receive: ", DUMP_PREFIX_OFFSET,
cp, count);
for (i = count, dp = cp, f = fp; i; i--, dp++) {
- flags = *f++;
+ if (f)
+ flags = *f++;
switch (flags) {
case TTY_NORMAL:
gsm->receive(gsm, *dp);
@@ -2711,7 +2720,7 @@ static void gsm_mux_rx_netchar(struct gsm_dlci *dlci,
return;
}
-int gsm_change_mtu(struct net_device *net, int new_mtu)
+static int gsm_change_mtu(struct net_device *net, int new_mtu)
{
struct gsm_mux_net *mux_net = (struct gsm_mux_net *)netdev_priv(net);
if ((new_mtu < 8) || (new_mtu > mux_net->dlci->gsm->mtu))
@@ -2909,23 +2918,33 @@ static int gsmtty_install(struct tty_driver *driver, struct tty_struct *tty)
This is ok from a locking
perspective as we don't have to worry about this
if DLCI0 is lost */
- if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN)
+ mutex_lock(&gsm->mutex);
+ if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN) {
+ mutex_unlock(&gsm->mutex);
return -EL2NSYNC;
+ }
dlci = gsm->dlci[line];
if (dlci == NULL) {
alloc = true;
dlci = gsm_dlci_alloc(gsm, line);
}
- if (dlci == NULL)
+ if (dlci == NULL) {
+ mutex_unlock(&gsm->mutex);
return -ENOMEM;
+ }
ret = tty_port_install(&dlci->port, driver, tty);
if (ret) {
if (alloc)
dlci_put(dlci);
+ mutex_unlock(&gsm->mutex);
return ret;
}
+ dlci_get(dlci);
+ dlci_get(gsm->dlci[0]);
+ mux_get(gsm);
tty->driver_data = dlci;
+ mutex_unlock(&gsm->mutex);
return 0;
}
@@ -2936,9 +2955,6 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
struct tty_port *port = &dlci->port;
port->count++;
- dlci_get(dlci);
- dlci_get(dlci->gsm->dlci[0]);
- mux_get(dlci->gsm);
tty_port_tty_set(port, tty);
dlci->modem_rx = 0;
@@ -2965,7 +2981,7 @@ static void gsmtty_close(struct tty_struct *tty, struct file *filp)
mutex_unlock(&dlci->mutex);
gsm = dlci->gsm;
if (tty_port_close_start(&dlci->port, tty, filp) == 0)
- goto out;
+ return;
gsm_dlci_begin_close(dlci);
if (test_bit(ASYNCB_INITIALIZED, &dlci->port.flags)) {
if (C_HUPCL(tty))
@@ -2973,10 +2989,7 @@ static void gsmtty_close(struct tty_struct *tty, struct file *filp)
}
tty_port_close_end(&dlci->port, tty);
tty_port_tty_set(&dlci->port, NULL);
-out:
- dlci_put(dlci);
- dlci_put(gsm->dlci[0]);
- mux_put(gsm);
+ return;
}
static void gsmtty_hangup(struct tty_struct *tty)
@@ -3153,6 +3166,16 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state)
return gsmtty_modem_update(dlci, encode);
}
+static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ struct gsm_mux *gsm = dlci->gsm;
+
+ dlci_put(dlci);
+ dlci_put(gsm->dlci[0]);
+ mux_put(gsm);
+ driver->ttys[tty->index] = NULL;
+}
/* Virtual ttys for the demux */
static const struct tty_operations gsmtty_ops = {
@@ -3172,6 +3195,7 @@ static const struct tty_operations gsmtty_ops = {
.tiocmget = gsmtty_tiocmget,
.tiocmset = gsmtty_tiocmset,
.break_ctl = gsmtty_break_ctl,
+ .remove = gsmtty_remove,
};
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index 1e6405070ce6..8b157d68a03e 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -1244,7 +1244,7 @@ static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
{
struct r3964_info *pInfo = tty->disc_data;
const unsigned char *p;
- char *f, flags = 0;
+ char *f, flags = TTY_NORMAL;
int i;
for (i = count, p = cp, f = fp; i; i--, p++) {
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 268b62768f2b..d15624c1b751 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -93,6 +93,7 @@ struct n_tty_data {
size_t canon_head;
size_t echo_head;
size_t echo_commit;
+ size_t echo_mark;
DECLARE_BITMAP(char_map, 256);
/* private to n_tty_receive_overrun (single-threaded) */
@@ -104,6 +105,7 @@ struct n_tty_data {
/* must hold exclusive termios_rwsem to reset these */
unsigned char lnext:1, erasing:1, raw:1, real_raw:1, icanon:1;
+ unsigned char push:1;
/* shared by producer and consumer */
char read_buf[N_TTY_BUF_SIZE];
@@ -274,7 +276,8 @@ static void n_tty_check_unthrottle(struct tty_struct *tty)
return;
n_tty_set_room(tty);
n_tty_write_wakeup(tty->link);
- wake_up_interruptible_poll(&tty->link->write_wait, POLLOUT);
+ if (waitqueue_active(&tty->link->write_wait))
+ wake_up_interruptible_poll(&tty->link->write_wait, POLLOUT);
return;
}
@@ -336,10 +339,12 @@ static void reset_buffer_flags(struct n_tty_data *ldata)
{
ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
+ ldata->echo_mark = 0;
ldata->line_start = 0;
ldata->erasing = 0;
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
+ ldata->push = 0;
}
static void n_tty_packet_mode_flush(struct tty_struct *tty)
@@ -349,7 +354,8 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty)
spin_lock_irqsave(&tty->ctrl_lock, flags);
if (tty->link->packet) {
tty->ctrl_status |= TIOCPKT_FLUSHREAD;
- wake_up_interruptible(&tty->link->read_wait);
+ if (waitqueue_active(&tty->link->read_wait))
+ wake_up_interruptible(&tty->link->read_wait);
}
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
}
@@ -787,6 +793,7 @@ static void commit_echoes(struct tty_struct *tty)
size_t head;
head = ldata->echo_head;
+ ldata->echo_mark = head;
old = ldata->echo_commit - ldata->echo_tail;
/* Process committed echoes if the accumulated # of bytes
@@ -810,11 +817,11 @@ static void process_echoes(struct tty_struct *tty)
struct n_tty_data *ldata = tty->disc_data;
size_t echoed;
- if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
- ldata->echo_commit == ldata->echo_tail)
+ if (ldata->echo_mark == ldata->echo_tail)
return;
mutex_lock(&ldata->output_lock);
+ ldata->echo_commit = ldata->echo_mark;
echoed = __process_echoes(tty);
mutex_unlock(&ldata->output_lock);
@@ -822,6 +829,7 @@ static void process_echoes(struct tty_struct *tty)
tty->ops->flush_chars(tty);
}
+/* NB: echo_mark and echo_head should be equivalent here */
static void flush_echoes(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1157,7 +1165,8 @@ static void n_tty_receive_break(struct tty_struct *tty)
put_tty_queue('\0', ldata);
}
put_tty_queue('\0', ldata);
- wake_up_interruptible(&tty->read_wait);
+ if (waitqueue_active(&tty->read_wait))
+ wake_up_interruptible(&tty->read_wait);
}
/**
@@ -1215,7 +1224,8 @@ static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c)
put_tty_queue('\0', ldata);
else
put_tty_queue(c, ldata);
- wake_up_interruptible(&tty->read_wait);
+ if (waitqueue_active(&tty->read_wait))
+ wake_up_interruptible(&tty->read_wait);
}
static void
@@ -1233,7 +1243,8 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
if (L_ECHO(tty)) {
echo_char(c, tty);
commit_echoes(tty);
- }
+ } else
+ process_echoes(tty);
isig(signal, tty);
return;
}
@@ -1259,12 +1270,11 @@ static int
n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
{
struct n_tty_data *ldata = tty->disc_data;
- int parmrk;
if (I_IXON(tty)) {
if (c == START_CHAR(tty)) {
start_tty(tty);
- commit_echoes(tty);
+ process_echoes(tty);
return 0;
}
if (c == STOP_CHAR(tty)) {
@@ -1344,8 +1354,6 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
}
if ((c == EOL_CHAR(tty)) ||
(c == EOL2_CHAR(tty) && L_IEXTEN(tty))) {
- parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty))
- ? 1 : 0;
/*
* XXX are EOL_CHAR and EOL2_CHAR echoed?!?
*/
@@ -1360,7 +1368,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
* XXX does PARMRK doubling happen for
* EOL_CHAR and EOL2_CHAR?
*/
- if (parmrk)
+ if (c == (unsigned char) '\377' && I_PARMRK(tty))
put_tty_queue(c, ldata);
handle_newline:
@@ -1374,7 +1382,6 @@ handle_newline:
}
}
- parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty)) ? 1 : 0;
if (L_ECHO(tty)) {
finish_erasing(ldata);
if (c == '\n')
@@ -1388,7 +1395,8 @@ handle_newline:
commit_echoes(tty);
}
- if (parmrk)
+ /* PARMRK doubling check */
+ if (c == (unsigned char) '\377' && I_PARMRK(tty))
put_tty_queue(c, ldata);
put_tty_queue(c, ldata);
@@ -1399,7 +1407,6 @@ static inline void
n_tty_receive_char_inline(struct tty_struct *tty, unsigned char c)
{
struct n_tty_data *ldata = tty->disc_data;
- int parmrk;
if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && I_IXANY(tty)) {
start_tty(tty);
@@ -1413,13 +1420,13 @@ n_tty_receive_char_inline(struct tty_struct *tty, unsigned char c)
echo_char(c, tty);
commit_echoes(tty);
}
- parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty)) ? 1 : 0;
- if (parmrk)
+ /* PARMRK doubling check */
+ if (c == (unsigned char) '\377' && I_PARMRK(tty))
put_tty_queue(c, ldata);
put_tty_queue(c, ldata);
}
-static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
+static void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
{
n_tty_receive_char_inline(tty, c);
}
@@ -1444,8 +1451,7 @@ n_tty_receive_char_fast(struct tty_struct *tty, unsigned char c)
put_tty_queue(c, ldata);
}
-static inline void
-n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c)
+static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c)
{
if (I_ISTRIP(tty))
c &= 0x7f;
@@ -1676,32 +1682,9 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
}
}
-static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count)
-{
- int room, n;
-
- down_read(&tty->termios_rwsem);
-
- while (1) {
- room = receive_room(tty);
- n = min(count, room);
- if (!n)
- break;
- __receive_buf(tty, cp, fp, n);
- cp += n;
- if (fp)
- fp += n;
- count -= n;
- }
-
- tty->receive_room = room;
- n_tty_check_throttle(tty);
- up_read(&tty->termios_rwsem);
-}
-
-static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count)
+static int
+n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count, int flow)
{
struct n_tty_data *ldata = tty->disc_data;
int room, n, rcvd = 0;
@@ -1712,7 +1695,7 @@ static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
room = receive_room(tty);
n = min(count, room);
if (!n) {
- if (!room)
+ if (flow && !room)
ldata->no_room = 1;
break;
}
@@ -1731,6 +1714,18 @@ static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
return rcvd;
}
+static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ n_tty_receive_buf_common(tty, cp, fp, count, 0);
+}
+
+static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ return n_tty_receive_buf_common(tty, cp, fp, count, 1);
+}
+
int is_ignored(int sig)
{
return (sigismember(&current->blocked, sig) ||
@@ -1757,7 +1752,16 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) {
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
- ldata->line_start = ldata->canon_head = ldata->read_tail;
+ ldata->line_start = ldata->read_tail;
+ if (!L_ICANON(tty) || !read_cnt(ldata)) {
+ ldata->canon_head = ldata->read_tail;
+ ldata->push = 0;
+ } else {
+ set_bit((ldata->read_head - 1) & (N_TTY_BUF_SIZE - 1),
+ ldata->read_flags);
+ ldata->canon_head = ldata->read_head;
+ ldata->push = 1;
+ }
ldata->erasing = 0;
ldata->lnext = 0;
}
@@ -1816,12 +1820,16 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
* Fix tty hang when I_IXON(tty) is cleared, but the tty
* been stopped by STOP_CHAR(tty) before it.
*/
- if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped)
+ if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) {
start_tty(tty);
+ process_echoes(tty);
+ }
/* The termios change make the tty ready for I/O */
- wake_up_interruptible(&tty->write_wait);
- wake_up_interruptible(&tty->read_wait);
+ if (waitqueue_active(&tty->write_wait))
+ wake_up_interruptible(&tty->write_wait);
+ if (waitqueue_active(&tty->read_wait))
+ wake_up_interruptible(&tty->read_wait);
}
/**
@@ -1887,14 +1895,15 @@ err:
return -ENOMEM;
}
-static inline int input_available_p(struct tty_struct *tty, int amt)
+static inline int input_available_p(struct tty_struct *tty, int poll)
{
struct n_tty_data *ldata = tty->disc_data;
+ int amt = poll && !TIME_CHAR(tty) && MIN_CHAR(tty) ? MIN_CHAR(tty) : 1;
if (ldata->icanon && !L_EXTPROC(tty)) {
if (ldata->canon_head != ldata->read_tail)
return 1;
- } else if (read_cnt(ldata) >= (amt ? amt : 1))
+ } else if (read_cnt(ldata) >= amt)
return 1;
return 0;
@@ -1960,6 +1969,12 @@ static int copy_from_read_buf(struct tty_struct *tty,
* it copies one line of input up to and including the line-delimiting
* character into the user-space buffer.
*
+ * NB: When termios is changed from non-canonical to canonical mode and
+ * the read buffer contains data, n_tty_set_termios() simulates an EOF
+ * push (as if C-d were input) _without_ the DISABLED_CHAR in the buffer.
+ * This causes data already processed as input to be immediately available
+ * as input although a newline has not been received.
+ *
* Called under the atomic_read_lock mutex
*
* n_tty_read()/consumer path:
@@ -2006,7 +2021,7 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
n += found;
c = n;
- if (found && read_buf(ldata, eol) == __DISABLED_CHAR) {
+ if (found && !ldata->push && read_buf(ldata, eol) == __DISABLED_CHAR) {
n--;
eof_push = !n && ldata->read_tail != ldata->line_start;
}
@@ -2033,7 +2048,10 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
ldata->read_tail += c;
if (found) {
- ldata->line_start = ldata->read_tail;
+ if (!ldata->push)
+ ldata->line_start = ldata->read_tail;
+ else
+ ldata->push = 0;
tty_audit_push(tty);
}
return eof_push ? -EAGAIN : 0;
@@ -2393,7 +2411,7 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
poll_wait(file, &tty->read_wait, wait);
poll_wait(file, &tty->write_wait, wait);
- if (input_available_p(tty, TIME_CHAR(tty) ? 0 : MIN_CHAR(tty)))
+ if (input_available_p(tty, 1))
mask |= POLLIN | POLLRDNORM;
if (tty->packet && tty->link->ctrl_status)
mask |= POLLPRI | POLLIN | POLLRDNORM;
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index 354564ea47c5..383c4c796637 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -1744,7 +1744,7 @@ static void rp_flush_buffer(struct tty_struct *tty)
#ifdef CONFIG_PCI
-static DEFINE_PCI_DEVICE_TABLE(rocket_pci_ids) = {
+static const struct pci_device_id rocket_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP4QUAD) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_RP8OCTA) },
{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_DEVICE_ID_URP8OCTA) },
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index e33d38cb170f..69932b7556cf 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -2433,6 +2433,24 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
serial_dl_write(up, quot);
/*
+ * XR17V35x UARTs have an extra fractional divisor register (DLD)
+ *
+ * We need to recalculate all of the registers, because DLM and DLL
+ * are already rounded to a whole integer.
+ *
+ * When recalculating we use a 32x clock instead of a 16x clock to
+ * allow 1-bit for rounding in the fractional part.
+ */
+ if (up->port.type == PORT_XR17V35X) {
+ unsigned int baud_x32 = (port->uartclk * 2) / baud;
+ u16 quot = baud_x32 / 32;
+ u8 quot_frac = DIV_ROUND_CLOSEST(baud_x32 % 32, 2);
+
+ serial_dl_write(up, quot);
+ serial_port_out(port, 0x2, quot_frac & 0xf);
+ }
+
+ /*
* LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
* is written without DLAB set, this mode will be disabled.
*/
@@ -2670,6 +2688,10 @@ static void serial8250_config_port(struct uart_port *port, int flags)
if (port->type == PORT_16550A && port->iotype == UPIO_AU)
up->bugs |= UART_BUG_NOMSR;
+ /* HW bugs may trigger IRQ while IIR == NO_INT */
+ if (port->type == PORT_TEGRA)
+ up->bugs |= UART_BUG_NOMSR;
+
if (port->type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
autoconfig_irq(up);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 4658e3e0ec42..ed3113576740 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -14,7 +14,6 @@
* raised, the LCR needs to be rewritten and the uart status register read.
*/
#include <linux/device.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/serial_8250.h>
@@ -96,7 +95,8 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
if (offset == UART_LCR) {
int tries = 1000;
while (tries--) {
- if (value == p->serial_in(p, UART_LCR))
+ unsigned int lcr = p->serial_in(p, UART_LCR);
+ if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
return;
dw8250_force_idle(p);
writeb(value, p->membase + (UART_LCR << p->regshift));
@@ -132,7 +132,8 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
if (offset == UART_LCR) {
int tries = 1000;
while (tries--) {
- if (value == p->serial_in(p, UART_LCR))
+ unsigned int lcr = p->serial_in(p, UART_LCR);
+ if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
return;
dw8250_force_idle(p);
writel(value, p->membase + (UART_LCR << p->regshift));
@@ -272,7 +273,6 @@ static int dw8250_probe_of(struct uart_port *p,
return 0;
}
-#ifdef CONFIG_ACPI
static int dw8250_probe_acpi(struct uart_8250_port *up,
struct dw8250_data *data)
{
@@ -300,13 +300,6 @@ static int dw8250_probe_acpi(struct uart_8250_port *up,
return 0;
}
-#else
-static inline int dw8250_probe_acpi(struct uart_8250_port *up,
- struct dw8250_data *data)
-{
- return -ENODEV;
-}
-#endif /* CONFIG_ACPI */
static int dw8250_probe(struct platform_device *pdev)
{
@@ -398,7 +391,7 @@ static int dw8250_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int dw8250_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
@@ -416,7 +409,7 @@ static int dw8250_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_RUNTIME
static int dw8250_runtime_suspend(struct device *dev)
@@ -455,6 +448,8 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match);
static const struct acpi_device_id dw8250_acpi_match[] = {
{ "INT33C4", 0 },
{ "INT33C5", 0 },
+ { "INT3434", 0 },
+ { "INT3435", 0 },
{ "80860F0A", 0 },
{ },
};
diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c
index d1a9078003bd..56c87232b6a0 100644
--- a/drivers/tty/serial/8250/8250_em.c
+++ b/drivers/tty/serial/8250/8250_em.c
@@ -18,7 +18,6 @@
*/
#include <linux/device.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/serial_8250.h>
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 4697a514b80a..0ff3e3624d4c 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -11,7 +11,6 @@
*/
#undef DEBUG
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/kernel.h>
@@ -784,7 +783,8 @@ static int pci_netmos_9900_setup(struct serial_private *priv,
{
unsigned int bar;
- if ((priv->dev->subsystem_device & 0xff00) == 0x3000) {
+ if ((priv->dev->device != PCI_DEVICE_ID_NETMOS_9865) &&
+ (priv->dev->subsystem_device & 0xff00) == 0x3000) {
/* netmos apparently orders BARs by datasheet layout, so serial
* ports get BARs 0 and 3 (or 1 and 4 for memmapped)
*/
@@ -1259,10 +1259,10 @@ static int pci_quatech_init(struct pci_dev *dev)
unsigned long base = pci_resource_start(dev, 0);
if (base) {
u32 tmp;
- outl(inl(base + 0x38), base + 0x38);
+ outl(inl(base + 0x38) | 0x00002000, base + 0x38);
tmp = inl(base + 0x3c);
outl(tmp | 0x01000000, base + 0x3c);
- outl(tmp, base + 0x3c);
+ outl(tmp &= ~0x01000000, base + 0x3c);
}
}
return 0;
@@ -1744,6 +1744,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_TITAN_800E 0xA014
#define PCI_DEVICE_ID_TITAN_200EI 0xA016
#define PCI_DEVICE_ID_TITAN_200EISI 0xA017
+#define PCI_DEVICE_ID_TITAN_200V3 0xA306
#define PCI_DEVICE_ID_TITAN_400V3 0xA310
#define PCI_DEVICE_ID_TITAN_410V3 0xA312
#define PCI_DEVICE_ID_TITAN_800V3 0xA314
@@ -4427,6 +4428,9 @@ static struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EISI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_oxsemi_2_4000000 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200V3,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_2_921600 },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400V3,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_4_921600 },
diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
index 35d9ab95c5cb..682a2fbe5c06 100644
--- a/drivers/tty/serial/8250/8250_pnp.c
+++ b/drivers/tty/serial/8250/8250_pnp.c
@@ -12,7 +12,6 @@
* the Free Software Foundation; either version 2 of the License.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pnp.h>
#include <linux/string.h>
diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
index 1b74b88e1e1e..4d180c9423ef 100644
--- a/drivers/tty/serial/8250/serial_cs.c
+++ b/drivers/tty/serial/8250/serial_cs.c
@@ -34,7 +34,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index a3817ab8602f..a3815eaed421 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -181,9 +181,8 @@ config SERIAL_KS8695_CONSOLE
config SERIAL_CLPS711X
tristate "CLPS711X serial port support"
- depends on ARCH_CLPS711X
+ depends on ARCH_CLPS711X || COMPILE_TEST
select SERIAL_CORE
- default y
help
This enables the driver for the on-chip UARTs of the Cirrus
Logic EP711x/EP721x/EP731x processors.
@@ -1035,7 +1034,7 @@ config SERIAL_MSM_CONSOLE
config SERIAL_MSM_HS
tristate "MSM UART High Speed: Serial Driver"
- depends on ARCH_MSM
+ depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
select SERIAL_CORE
help
If you have a machine based on MSM family of SoCs, you
@@ -1146,31 +1145,13 @@ config SERIAL_QE
This driver supports the QE serial ports on Freescale embedded
PowerPC that contain a QUICC Engine.
-config SERIAL_SC26XX
- tristate "SC2681/SC2692 serial port support"
- depends on SNI_RM
- select SERIAL_CORE
- help
- This is a driver for the onboard serial ports of
- older RM400 machines.
-
-config SERIAL_SC26XX_CONSOLE
- bool "Console on SC2681/SC2692 serial port"
- depends on SERIAL_SC26XX=y
- select SERIAL_CORE_CONSOLE
- help
- Support for Console on SC2681/SC2692 serial ports.
-
config SERIAL_SCCNXP
tristate "SCCNXP serial port support"
- depends on !SERIAL_SC26XX
select SERIAL_CORE
- default n
help
This selects support for an advanced UART from NXP (Philips).
Supported ICs are SCC2681, SCC2691, SCC2692, SC28L91, SC28L92,
SC28L202, SCC68681 and SCC68692.
- Positioned as a replacement for the driver SC26XX.
config SERIAL_SCCNXP_CONSOLE
bool "Console on SCCNXP serial port"
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 3068c7722087..3680854fef41 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -47,7 +47,6 @@ obj-$(CONFIG_SERIAL_M32R_SIO) += m32r_sio.o
obj-$(CONFIG_SERIAL_MPSC) += mpsc.o
obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o
obj-$(CONFIG_ETRAX_SERIAL) += crisv10.o
-obj-$(CONFIG_SERIAL_SC26XX) += sc26xx.o
obj-$(CONFIG_SERIAL_SCCNXP) += sccnxp.o
obj-$(CONFIG_SERIAL_JSM) += jsm/
obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 33bd8606be62..01c9e72433e1 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -756,9 +756,10 @@ static int pl010_remove(struct amba_device *dev)
return 0;
}
-static int pl010_suspend(struct amba_device *dev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int pl010_suspend(struct device *dev)
{
- struct uart_amba_port *uap = amba_get_drvdata(dev);
+ struct uart_amba_port *uap = dev_get_drvdata(dev);
if (uap)
uart_suspend_port(&amba_reg, &uap->port);
@@ -766,15 +767,18 @@ static int pl010_suspend(struct amba_device *dev, pm_message_t state)
return 0;
}
-static int pl010_resume(struct amba_device *dev)
+static int pl010_resume(struct device *dev)
{
- struct uart_amba_port *uap = amba_get_drvdata(dev);
+ struct uart_amba_port *uap = dev_get_drvdata(dev);
if (uap)
uart_resume_port(&amba_reg, &uap->port);
return 0;
}
+#endif
+
+static SIMPLE_DEV_PM_OPS(pl010_dev_pm_ops, pl010_suspend, pl010_resume);
static struct amba_id pl010_ids[] = {
{
@@ -789,12 +793,11 @@ MODULE_DEVICE_TABLE(amba, pl010_ids);
static struct amba_driver pl010_driver = {
.drv = {
.name = "uart-pl010",
+ .pm = &pl010_dev_pm_ops,
},
.id_table = pl010_ids,
.probe = pl010_probe,
.remove = pl010_remove,
- .suspend = pl010_suspend,
- .resume = pl010_resume,
};
static int __init pl010_init(void)
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 7203864992a5..d58783d364e3 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -112,8 +112,6 @@ static struct vendor_data vendor_st = {
.get_fifosize = get_fifosize_st,
};
-static struct uart_amba_port *amba_ports[UART_NR];
-
/* Deals with DMA transactions */
struct pl011_sgbuf {
@@ -969,6 +967,8 @@ static void pl011_dma_rx_poll(unsigned long args)
spin_lock_irqsave(&uap->port.lock, flags);
pl011_dma_rx_stop(uap);
+ uap->im |= UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
spin_unlock_irqrestore(&uap->port.lock, flags);
uap->dmarx.running = false;
@@ -1216,8 +1216,8 @@ __acquires(&uap->port.lock)
dev_dbg(uap->port.dev, "could not trigger RX DMA job "
"fall back to interrupt mode again\n");
uap->im |= UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
} else {
- uap->im &= ~UART011_RXIM;
#ifdef CONFIG_DMA_ENGINE
/* Start Rx DMA poll */
if (uap->dmarx.poll_rate) {
@@ -1229,8 +1229,6 @@ __acquires(&uap->port.lock)
}
#endif
}
-
- writew(uap->im, uap->port.membase + UART011_IMSC);
}
spin_lock(&uap->port.lock);
}
@@ -1513,10 +1511,25 @@ static int pl011_hwinit(struct uart_port *port)
return retval;
}
+static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
+{
+ writew(lcr_h, uap->port.membase + uap->lcrh_rx);
+ if (uap->lcrh_rx != uap->lcrh_tx) {
+ int i;
+ /*
+ * Wait 10 PCLKs before writing LCRH_TX register,
+ * to get this delay write read only register 10 times
+ */
+ for (i = 0; i < 10; ++i)
+ writew(0xff, uap->port.membase + UART011_MIS);
+ writew(lcr_h, uap->port.membase + uap->lcrh_tx);
+ }
+}
+
static int pl011_startup(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
- unsigned int cr;
+ unsigned int cr, lcr_h, fbrd, ibrd;
int retval;
retval = pl011_hwinit(port);
@@ -1535,32 +1548,36 @@ static int pl011_startup(struct uart_port *port)
writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
/*
- * Provoke TX FIFO interrupt into asserting.
+ * Provoke TX FIFO interrupt into asserting. Taking care to preserve
+ * baud rate and data format specified by FBRD, IBRD and LCRH as the
+ * UART may already be in use as a console.
*/
+ spin_lock_irq(&uap->port.lock);
+
+ fbrd = readw(uap->port.membase + UART011_FBRD);
+ ibrd = readw(uap->port.membase + UART011_IBRD);
+ lcr_h = readw(uap->port.membase + uap->lcrh_rx);
+
cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
writew(cr, uap->port.membase + UART011_CR);
writew(0, uap->port.membase + UART011_FBRD);
writew(1, uap->port.membase + UART011_IBRD);
- writew(0, uap->port.membase + uap->lcrh_rx);
- if (uap->lcrh_tx != uap->lcrh_rx) {
- int i;
- /*
- * Wait 10 PCLKs before writing LCRH_TX register,
- * to get this delay write read only register 10 times
- */
- for (i = 0; i < 10; ++i)
- writew(0xff, uap->port.membase + UART011_MIS);
- writew(0, uap->port.membase + uap->lcrh_tx);
- }
+ pl011_write_lcr_h(uap, 0);
writew(0, uap->port.membase + UART01x_DR);
while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
barrier();
+ writew(fbrd, uap->port.membase + UART011_FBRD);
+ writew(ibrd, uap->port.membase + UART011_IBRD);
+ pl011_write_lcr_h(uap, lcr_h);
+
/* restore RTS and DTR */
cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
writew(cr, uap->port.membase + UART011_CR);
+ spin_unlock_irq(&uap->port.lock);
+
/*
* initialise the old status of the modem signals
*/
@@ -1629,11 +1646,13 @@ static void pl011_shutdown(struct uart_port *port)
* it during startup().
*/
uap->autorts = false;
+ spin_lock_irq(&uap->port.lock);
cr = readw(uap->port.membase + UART011_CR);
uap->old_cr = cr;
cr &= UART011_CR_RTS | UART011_CR_DTR;
cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
writew(cr, uap->port.membase + UART011_CR);
+ spin_unlock_irq(&uap->port.lock);
/*
* disable break condition and fifos
@@ -1797,17 +1816,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
* UART011_FBRD & UART011_IBRD.
* ----------^----------^----------^----------^-----
*/
- writew(lcr_h, port->membase + uap->lcrh_rx);
- if (uap->lcrh_rx != uap->lcrh_tx) {
- int i;
- /*
- * Wait 10 PCLKs before writing LCRH_TX register,
- * to get this delay write read only register 10 times
- */
- for (i = 0; i < 10; ++i)
- writew(0xff, uap->port.membase + UART011_MIS);
- writew(lcr_h, port->membase + uap->lcrh_tx);
- }
+ pl011_write_lcr_h(uap, lcr_h);
writew(old_cr, port->membase + UART011_CR);
spin_unlock_irqrestore(&port->lock, flags);
@@ -2169,10 +2178,10 @@ static int pl011_remove(struct amba_device *dev)
return 0;
}
-#ifdef CONFIG_PM
-static int pl011_suspend(struct amba_device *dev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int pl011_suspend(struct device *dev)
{
- struct uart_amba_port *uap = amba_get_drvdata(dev);
+ struct uart_amba_port *uap = dev_get_drvdata(dev);
if (!uap)
return -EINVAL;
@@ -2180,9 +2189,9 @@ static int pl011_suspend(struct amba_device *dev, pm_message_t state)
return uart_suspend_port(&amba_reg, &uap->port);
}
-static int pl011_resume(struct amba_device *dev)
+static int pl011_resume(struct device *dev)
{
- struct uart_amba_port *uap = amba_get_drvdata(dev);
+ struct uart_amba_port *uap = dev_get_drvdata(dev);
if (!uap)
return -EINVAL;
@@ -2191,6 +2200,8 @@ static int pl011_resume(struct amba_device *dev)
}
#endif
+static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
+
static struct amba_id pl011_ids[] = {
{
.id = 0x00041011,
@@ -2210,14 +2221,11 @@ MODULE_DEVICE_TABLE(amba, pl011_ids);
static struct amba_driver pl011_driver = {
.drv = {
.name = "uart-pl011",
+ .pm = &pl011_dev_pm_ops,
},
.id_table = pl011_ids,
.probe = pl011_probe,
.remove = pl011_remove,
-#ifdef CONFIG_PM
- .suspend = pl011_suspend,
- .resume = pl011_resume,
-#endif
};
static int __init pl011_init(void)
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index c7d99af46a96..a49f10d269b2 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -825,9 +825,6 @@ static void atmel_release_rx_dma(struct uart_port *port)
atmel_port->desc_rx = NULL;
atmel_port->chan_rx = NULL;
atmel_port->cookie_rx = -EINVAL;
-
- if (!atmel_port->is_usart)
- del_timer_sync(&atmel_port->uart_timer);
}
static void atmel_rx_from_dma(struct uart_port *port)
@@ -1229,9 +1226,6 @@ static void atmel_release_rx_pdc(struct uart_port *port)
DMA_FROM_DEVICE);
kfree(pdc->buf);
}
-
- if (!atmel_port->is_usart)
- del_timer_sync(&atmel_port->uart_timer);
}
static void atmel_rx_from_pdc(struct uart_port *port)
@@ -1604,12 +1598,13 @@ static int atmel_startup(struct uart_port *port)
/* enable xmit & rcvr */
UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
+ setup_timer(&atmel_port->uart_timer,
+ atmel_uart_timer_callback,
+ (unsigned long)port);
+
if (atmel_use_pdc_rx(port)) {
/* set UART timeout */
if (!atmel_port->is_usart) {
- setup_timer(&atmel_port->uart_timer,
- atmel_uart_timer_callback,
- (unsigned long)port);
mod_timer(&atmel_port->uart_timer,
jiffies + uart_poll_timeout(port));
/* set USART timeout */
@@ -1624,9 +1619,6 @@ static int atmel_startup(struct uart_port *port)
} else if (atmel_use_dma_rx(port)) {
/* set UART timeout */
if (!atmel_port->is_usart) {
- setup_timer(&atmel_port->uart_timer,
- atmel_uart_timer_callback,
- (unsigned long)port);
mod_timer(&atmel_port->uart_timer,
jiffies + uart_poll_timeout(port));
/* set USART timeout */
@@ -1650,12 +1642,30 @@ static int atmel_startup(struct uart_port *port)
static void atmel_shutdown(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
/*
- * Ensure everything is stopped.
+ * Prevent any tasklets being scheduled during
+ * cleanup
+ */
+ del_timer_sync(&atmel_port->uart_timer);
+
+ /*
+ * Clear out any scheduled tasklets before
+ * we destroy the buffers
+ */
+ tasklet_kill(&atmel_port->tasklet);
+
+ /*
+ * Ensure everything is stopped and
+ * disable all interrupts, port and break condition.
*/
atmel_stop_rx(port);
atmel_stop_tx(port);
+ UART_PUT_CR(port, ATMEL_US_RSTSTA);
+ UART_PUT_IDR(port, -1);
+
+
/*
* Shut-down the DMA.
*/
@@ -1665,10 +1675,10 @@ static void atmel_shutdown(struct uart_port *port)
atmel_port->release_tx(port);
/*
- * Disable all interrupts, port and break condition.
+ * Reset ring buffer pointers
*/
- UART_PUT_CR(port, ATMEL_US_RSTSTA);
- UART_PUT_IDR(port, -1);
+ atmel_port->rx_ring.head = 0;
+ atmel_port->rx_ring.tail = 0;
/*
* Free the interrupt
@@ -2441,11 +2451,12 @@ static int atmel_serial_remove(struct platform_device *pdev)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int ret = 0;
+ tasklet_kill(&atmel_port->tasklet);
+
device_init_wakeup(&pdev->dev, 0);
ret = uart_remove_one_port(&atmel_uart, port);
- tasklet_kill(&atmel_port->tasklet);
kfree(atmel_port->rx_ring.buf);
/* "port" is allocated statically, so we shouldn't free it */
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 649d5129c4b4..78e82b017b92 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -29,10 +29,7 @@
#include <linux/sysrq.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
-
-#include <bcm63xx_irq.h>
-#include <bcm63xx_regs.h>
-#include <bcm63xx_io.h>
+#include <linux/serial_bcm63xx.h>
#define BCM63XX_NR_UARTS 2
@@ -81,13 +78,13 @@ static struct uart_port ports[BCM63XX_NR_UARTS];
static inline unsigned int bcm_uart_readl(struct uart_port *port,
unsigned int offset)
{
- return bcm_readl(port->membase + offset);
+ return __raw_readl(port->membase + offset);
}
static inline void bcm_uart_writel(struct uart_port *port,
unsigned int value, unsigned int offset)
{
- bcm_writel(value, port->membase + offset);
+ __raw_writel(value, port->membase + offset);
}
/*
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index 8d0b994357c8..b0eacb83f831 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -21,44 +21,66 @@
#include <linux/console.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
-#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/io.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/ioport.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
-#include <mach/hardware.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/clps711x.h>
-#define UART_CLPS711X_NAME "uart-clps711x"
+#define UART_CLPS711X_DEVNAME "ttyCL"
#define UART_CLPS711X_NR 2
#define UART_CLPS711X_MAJOR 204
#define UART_CLPS711X_MINOR 40
-#define UBRLCR(port) ((port)->line ? UBRLCR2 : UBRLCR1)
-#define UARTDR(port) ((port)->line ? UARTDR2 : UARTDR1)
-#define SYSFLG(port) ((port)->line ? SYSFLG2 : SYSFLG1)
-#define SYSCON(port) ((port)->line ? SYSCON2 : SYSCON1)
-#define TX_IRQ(port) ((port)->line ? IRQ_UTXINT2 : IRQ_UTXINT1)
-#define RX_IRQ(port) ((port)->line ? IRQ_URXINT2 : IRQ_URXINT1)
+#define UARTDR_OFFSET (0x00)
+#define UBRLCR_OFFSET (0x40)
+
+#define UARTDR_FRMERR (1 << 8)
+#define UARTDR_PARERR (1 << 9)
+#define UARTDR_OVERR (1 << 10)
+
+#define UBRLCR_BAUD_MASK ((1 << 12) - 1)
+#define UBRLCR_BREAK (1 << 12)
+#define UBRLCR_PRTEN (1 << 13)
+#define UBRLCR_EVENPRT (1 << 14)
+#define UBRLCR_XSTOP (1 << 15)
+#define UBRLCR_FIFOEN (1 << 16)
+#define UBRLCR_WRDLEN5 (0 << 17)
+#define UBRLCR_WRDLEN6 (1 << 17)
+#define UBRLCR_WRDLEN7 (2 << 17)
+#define UBRLCR_WRDLEN8 (3 << 17)
+#define UBRLCR_WRDLEN_MASK (3 << 17)
struct clps711x_port {
- struct uart_driver uart;
- struct clk *uart_clk;
- struct uart_port port[UART_CLPS711X_NR];
- int tx_enabled[UART_CLPS711X_NR];
-#ifdef CONFIG_SERIAL_CLPS711X_CONSOLE
- struct console console;
-#endif
+ struct uart_port port;
+ unsigned int tx_enabled;
+ int rx_irq;
+ struct regmap *syscon;
+ bool use_ms;
+};
+
+static struct uart_driver clps711x_uart = {
+ .owner = THIS_MODULE,
+ .driver_name = UART_CLPS711X_DEVNAME,
+ .dev_name = UART_CLPS711X_DEVNAME,
+ .major = UART_CLPS711X_MAJOR,
+ .minor = UART_CLPS711X_MINOR,
+ .nr = UART_CLPS711X_NR,
};
static void uart_clps711x_stop_tx(struct uart_port *port)
{
struct clps711x_port *s = dev_get_drvdata(port->dev);
- if (s->tx_enabled[port->line]) {
- disable_irq(TX_IRQ(port));
- s->tx_enabled[port->line] = 0;
+ if (s->tx_enabled) {
+ disable_irq(port->irq);
+ s->tx_enabled = 0;
}
}
@@ -66,33 +88,27 @@ static void uart_clps711x_start_tx(struct uart_port *port)
{
struct clps711x_port *s = dev_get_drvdata(port->dev);
- if (!s->tx_enabled[port->line]) {
- enable_irq(TX_IRQ(port));
- s->tx_enabled[port->line] = 1;
+ if (!s->tx_enabled) {
+ s->tx_enabled = 1;
+ enable_irq(port->irq);
}
}
-static void uart_clps711x_stop_rx(struct uart_port *port)
-{
- disable_irq(RX_IRQ(port));
-}
-
-static void uart_clps711x_enable_ms(struct uart_port *port)
-{
- /* Do nothing */
-}
-
static irqreturn_t uart_clps711x_int_rx(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- unsigned int status, ch, flg;
+ struct clps711x_port *s = dev_get_drvdata(port->dev);
+ unsigned int status, flg;
+ u16 ch;
for (;;) {
- status = clps_readl(SYSFLG(port));
- if (status & SYSFLG_URXFE)
+ u32 sysflg = 0;
+
+ regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg);
+ if (sysflg & SYSFLG_URXFE)
break;
- ch = clps_readw(UARTDR(port));
+ ch = readw(port->membase + UARTDR_OFFSET);
status = ch & (UARTDR_FRMERR | UARTDR_PARERR | UARTDR_OVERR);
ch &= 0xff;
@@ -138,23 +154,29 @@ static irqreturn_t uart_clps711x_int_tx(int irq, void *dev_id)
struct circ_buf *xmit = &port->state->xmit;
if (port->x_char) {
- clps_writew(port->x_char, UARTDR(port));
+ writew(port->x_char, port->membase + UARTDR_OFFSET);
port->icount.tx++;
port->x_char = 0;
return IRQ_HANDLED;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
- disable_irq_nosync(TX_IRQ(port));
- s->tx_enabled[port->line] = 0;
+ if (s->tx_enabled) {
+ disable_irq_nosync(port->irq);
+ s->tx_enabled = 0;
+ }
return IRQ_HANDLED;
}
while (!uart_circ_empty(xmit)) {
- clps_writew(xmit->buf[xmit->tail], UARTDR(port));
+ u32 sysflg = 0;
+
+ writew(xmit->buf[xmit->tail], port->membase + UARTDR_OFFSET);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
- if (clps_readl(SYSFLG(port) & SYSFLG_UTXFF))
+
+ regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg);
+ if (sysflg & SYSFLG_UTXFF)
break;
}
@@ -166,20 +188,28 @@ static irqreturn_t uart_clps711x_int_tx(int irq, void *dev_id)
static unsigned int uart_clps711x_tx_empty(struct uart_port *port)
{
- return (clps_readl(SYSFLG(port) & SYSFLG_UBUSY)) ? 0 : TIOCSER_TEMT;
+ struct clps711x_port *s = dev_get_drvdata(port->dev);
+ u32 sysflg = 0;
+
+ regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg);
+
+ return (sysflg & SYSFLG_UBUSY) ? 0 : TIOCSER_TEMT;
}
static unsigned int uart_clps711x_get_mctrl(struct uart_port *port)
{
- unsigned int status, result = 0;
+ struct clps711x_port *s = dev_get_drvdata(port->dev);
+ unsigned int result = 0;
+
+ if (s->use_ms) {
+ u32 sysflg = 0;
- if (port->line == 0) {
- status = clps_readl(SYSFLG1);
- if (status & SYSFLG1_DCD)
+ regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg);
+ if (sysflg & SYSFLG1_DCD)
result |= TIOCM_CAR;
- if (status & SYSFLG1_DSR)
+ if (sysflg & SYSFLG1_DSR)
result |= TIOCM_DSR;
- if (status & SYSFLG1_CTS)
+ if (sysflg & SYSFLG1_CTS)
result |= TIOCM_CTS;
} else
result = TIOCM_DSR | TIOCM_CTS | TIOCM_CAR;
@@ -194,65 +224,53 @@ static void uart_clps711x_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void uart_clps711x_break_ctl(struct uart_port *port, int break_state)
{
- unsigned long flags;
unsigned int ubrlcr;
- spin_lock_irqsave(&port->lock, flags);
-
- ubrlcr = clps_readl(UBRLCR(port));
+ ubrlcr = readl(port->membase + UBRLCR_OFFSET);
if (break_state)
ubrlcr |= UBRLCR_BREAK;
else
ubrlcr &= ~UBRLCR_BREAK;
- clps_writel(ubrlcr, UBRLCR(port));
+ writel(ubrlcr, port->membase + UBRLCR_OFFSET);
+}
+
+static void uart_clps711x_set_ldisc(struct uart_port *port, int ld)
+{
+ if (!port->line) {
+ struct clps711x_port *s = dev_get_drvdata(port->dev);
- spin_unlock_irqrestore(&port->lock, flags);
+ regmap_update_bits(s->syscon, SYSCON_OFFSET, SYSCON1_SIREN,
+ (ld == N_IRDA) ? SYSCON1_SIREN : 0);
+ }
}
static int uart_clps711x_startup(struct uart_port *port)
{
struct clps711x_port *s = dev_get_drvdata(port->dev);
- int ret;
-
- s->tx_enabled[port->line] = 1;
- /* Allocate the IRQs */
- ret = devm_request_irq(port->dev, TX_IRQ(port), uart_clps711x_int_tx,
- 0, UART_CLPS711X_NAME " TX", port);
- if (ret)
- return ret;
-
- ret = devm_request_irq(port->dev, RX_IRQ(port), uart_clps711x_int_rx,
- 0, UART_CLPS711X_NAME " RX", port);
- if (ret) {
- devm_free_irq(port->dev, TX_IRQ(port), port);
- return ret;
- }
/* Disable break */
- clps_writel(clps_readl(UBRLCR(port)) & ~UBRLCR_BREAK, UBRLCR(port));
+ writel(readl(port->membase + UBRLCR_OFFSET) & ~UBRLCR_BREAK,
+ port->membase + UBRLCR_OFFSET);
/* Enable the port */
- clps_writel(clps_readl(SYSCON(port)) | SYSCON_UARTEN, SYSCON(port));
-
- return 0;
+ return regmap_update_bits(s->syscon, SYSCON_OFFSET,
+ SYSCON_UARTEN, SYSCON_UARTEN);
}
static void uart_clps711x_shutdown(struct uart_port *port)
{
- /* Free the interrupts */
- devm_free_irq(port->dev, TX_IRQ(port), port);
- devm_free_irq(port->dev, RX_IRQ(port), port);
+ struct clps711x_port *s = dev_get_drvdata(port->dev);
/* Disable the port */
- clps_writel(clps_readl(SYSCON(port)) & ~SYSCON_UARTEN, SYSCON(port));
+ regmap_update_bits(s->syscon, SYSCON_OFFSET, SYSCON_UARTEN, 0);
}
static void uart_clps711x_set_termios(struct uart_port *port,
struct ktermios *termios,
struct ktermios *old)
{
- unsigned int ubrlcr, baud, quot;
- unsigned long flags;
+ u32 ubrlcr;
+ unsigned int baud, quot;
/* Mask termios capabilities we don't support */
termios->c_cflag &= ~CMSPAR;
@@ -291,8 +309,6 @@ static void uart_clps711x_set_termios(struct uart_port *port,
/* Enable FIFO */
ubrlcr |= UBRLCR_FIFOEN;
- spin_lock_irqsave(&port->lock, flags);
-
/* Set read status mask */
port->read_status_mask = UARTDR_OVERR;
if (termios->c_iflag & INPCK)
@@ -306,9 +322,7 @@ static void uart_clps711x_set_termios(struct uart_port *port,
uart_update_timeout(port, termios->c_cflag, baud);
- clps_writel(ubrlcr | (quot - 1), UBRLCR(port));
-
- spin_unlock_irqrestore(&port->lock, flags);
+ writel(ubrlcr | (quot - 1), port->membase + UBRLCR_OFFSET);
}
static const char *uart_clps711x_type(struct uart_port *port)
@@ -322,14 +336,12 @@ static void uart_clps711x_config_port(struct uart_port *port, int flags)
port->type = PORT_CLPS711X;
}
-static void uart_clps711x_release_port(struct uart_port *port)
+static void uart_clps711x_nop_void(struct uart_port *port)
{
- /* Do nothing */
}
-static int uart_clps711x_request_port(struct uart_port *port)
+static int uart_clps711x_nop_int(struct uart_port *port)
{
- /* Do nothing */
return 0;
}
@@ -339,181 +351,237 @@ static const struct uart_ops uart_clps711x_ops = {
.get_mctrl = uart_clps711x_get_mctrl,
.stop_tx = uart_clps711x_stop_tx,
.start_tx = uart_clps711x_start_tx,
- .stop_rx = uart_clps711x_stop_rx,
- .enable_ms = uart_clps711x_enable_ms,
+ .stop_rx = uart_clps711x_nop_void,
+ .enable_ms = uart_clps711x_nop_void,
.break_ctl = uart_clps711x_break_ctl,
+ .set_ldisc = uart_clps711x_set_ldisc,
.startup = uart_clps711x_startup,
.shutdown = uart_clps711x_shutdown,
.set_termios = uart_clps711x_set_termios,
.type = uart_clps711x_type,
.config_port = uart_clps711x_config_port,
- .release_port = uart_clps711x_release_port,
- .request_port = uart_clps711x_request_port,
+ .release_port = uart_clps711x_nop_void,
+ .request_port = uart_clps711x_nop_int,
};
#ifdef CONFIG_SERIAL_CLPS711X_CONSOLE
static void uart_clps711x_console_putchar(struct uart_port *port, int ch)
{
- while (clps_readl(SYSFLG(port)) & SYSFLG_UTXFF)
- barrier();
+ struct clps711x_port *s = dev_get_drvdata(port->dev);
+ u32 sysflg = 0;
+
+ do {
+ regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg);
+ } while (sysflg & SYSFLG_UTXFF);
- clps_writew(ch, UARTDR(port));
+ writew(ch, port->membase + UARTDR_OFFSET);
}
static void uart_clps711x_console_write(struct console *co, const char *c,
unsigned n)
{
- struct clps711x_port *s = (struct clps711x_port *)co->data;
- struct uart_port *port = &s->port[co->index];
- u32 syscon;
-
- /* Ensure that the port is enabled */
- syscon = clps_readl(SYSCON(port));
- clps_writel(syscon | SYSCON_UARTEN, SYSCON(port));
+ struct uart_port *port = clps711x_uart.state[co->index].uart_port;
+ struct clps711x_port *s = dev_get_drvdata(port->dev);
+ u32 sysflg = 0;
uart_console_write(port, c, n, uart_clps711x_console_putchar);
/* Wait for transmitter to become empty */
- while (clps_readl(SYSFLG(port)) & SYSFLG_UBUSY)
- barrier();
-
- /* Restore the uart state */
- clps_writel(syscon, SYSCON(port));
+ do {
+ regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg);
+ } while (sysflg & SYSFLG_UBUSY);
}
-static void uart_clps711x_console_get_options(struct uart_port *port,
- int *baud, int *parity,
- int *bits)
+static int uart_clps711x_console_setup(struct console *co, char *options)
{
- if (clps_readl(SYSCON(port)) & SYSCON_UARTEN) {
- unsigned int ubrlcr, quot;
+ int baud = 38400, bits = 8, parity = 'n', flow = 'n';
+ int ret, index = co->index;
+ struct clps711x_port *s;
+ struct uart_port *port;
+ unsigned int quot;
+ u32 ubrlcr;
- ubrlcr = clps_readl(UBRLCR(port));
+ if (index < 0 || index >= UART_CLPS711X_NR)
+ return -EINVAL;
- *parity = 'n';
- if (ubrlcr & UBRLCR_PRTEN) {
- if (ubrlcr & UBRLCR_EVENPRT)
- *parity = 'e';
- else
- *parity = 'o';
- }
+ port = clps711x_uart.state[index].uart_port;
+ if (!port)
+ return -ENODEV;
- if ((ubrlcr & UBRLCR_WRDLEN_MASK) == UBRLCR_WRDLEN7)
- *bits = 7;
- else
- *bits = 8;
+ s = dev_get_drvdata(port->dev);
- quot = ubrlcr & UBRLCR_BAUD_MASK;
- *baud = port->uartclk / (16 * (quot + 1));
- }
-}
+ if (!options) {
+ u32 syscon = 0;
-static int uart_clps711x_console_setup(struct console *co, char *options)
-{
- int baud = 38400, bits = 8, parity = 'n', flow = 'n';
- struct clps711x_port *s = (struct clps711x_port *)co->data;
- struct uart_port *port = &s->port[(co->index > 0) ? co->index : 0];
+ regmap_read(s->syscon, SYSCON_OFFSET, &syscon);
+ if (syscon & SYSCON_UARTEN) {
+ ubrlcr = readl(port->membase + UBRLCR_OFFSET);
+
+ if (ubrlcr & UBRLCR_PRTEN) {
+ if (ubrlcr & UBRLCR_EVENPRT)
+ parity = 'e';
+ else
+ parity = 'o';
+ }
+
+ if ((ubrlcr & UBRLCR_WRDLEN_MASK) == UBRLCR_WRDLEN7)
+ bits = 7;
- if (options)
+ quot = ubrlcr & UBRLCR_BAUD_MASK;
+ baud = port->uartclk / (16 * (quot + 1));
+ }
+ } else
uart_parse_options(options, &baud, &parity, &bits, &flow);
- else
- uart_clps711x_console_get_options(port, &baud, &parity, &bits);
- return uart_set_options(port, co, baud, parity, bits, flow);
+ ret = uart_set_options(port, co, baud, parity, bits, flow);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(s->syscon, SYSCON_OFFSET,
+ SYSCON_UARTEN, SYSCON_UARTEN);
}
+
+static struct console clps711x_console = {
+ .name = UART_CLPS711X_DEVNAME,
+ .device = uart_console_device,
+ .write = uart_clps711x_console_write,
+ .setup = uart_clps711x_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
#endif
static int uart_clps711x_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
+ int ret, index = np ? of_alias_get_id(np, "serial") : pdev->id;
struct clps711x_port *s;
- int ret, i;
+ struct resource *res;
+ struct clk *uart_clk;
- s = devm_kzalloc(&pdev->dev, sizeof(struct clps711x_port), GFP_KERNEL);
- if (!s) {
- dev_err(&pdev->dev, "Error allocating port structure\n");
+ if (index < 0 || index >= UART_CLPS711X_NR)
+ return -EINVAL;
+
+ s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
+ if (!s)
return -ENOMEM;
+
+ uart_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(uart_clk))
+ return PTR_ERR(uart_clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ s->port.membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(s->port.membase))
+ return PTR_ERR(s->port.membase);
+
+ s->port.irq = platform_get_irq(pdev, 0);
+ if (IS_ERR_VALUE(s->port.irq))
+ return s->port.irq;
+
+ s->rx_irq = platform_get_irq(pdev, 1);
+ if (IS_ERR_VALUE(s->rx_irq))
+ return s->rx_irq;
+
+ if (!np) {
+ char syscon_name[9];
+
+ sprintf(syscon_name, "syscon.%i", index + 1);
+ s->syscon = syscon_regmap_lookup_by_pdevname(syscon_name);
+ if (IS_ERR(s->syscon))
+ return PTR_ERR(s->syscon);
+
+ s->use_ms = !index;
+ } else {
+ s->syscon = syscon_regmap_lookup_by_phandle(np, "syscon");
+ if (IS_ERR(s->syscon))
+ return PTR_ERR(s->syscon);
+
+ if (!index)
+ s->use_ms = of_property_read_bool(np, "uart-use-ms");
}
+
+ s->port.line = index;
+ s->port.dev = &pdev->dev;
+ s->port.iotype = UPIO_MEM32;
+ s->port.mapbase = res->start;
+ s->port.type = PORT_CLPS711X;
+ s->port.fifosize = 16;
+ s->port.flags = UPF_SKIP_TEST | UPF_FIXED_TYPE;
+ s->port.uartclk = clk_get_rate(uart_clk);
+ s->port.ops = &uart_clps711x_ops;
+
platform_set_drvdata(pdev, s);
- s->uart_clk = devm_clk_get(&pdev->dev, "uart");
- if (IS_ERR(s->uart_clk)) {
- dev_err(&pdev->dev, "Can't get UART clocks\n");
- return PTR_ERR(s->uart_clk);
- }
+ ret = uart_add_one_port(&clps711x_uart, &s->port);
+ if (ret)
+ return ret;
- s->uart.owner = THIS_MODULE;
- s->uart.dev_name = "ttyCL";
- s->uart.major = UART_CLPS711X_MAJOR;
- s->uart.minor = UART_CLPS711X_MINOR;
- s->uart.nr = UART_CLPS711X_NR;
-#ifdef CONFIG_SERIAL_CLPS711X_CONSOLE
- s->uart.cons = &s->console;
- s->uart.cons->device = uart_console_device;
- s->uart.cons->write = uart_clps711x_console_write;
- s->uart.cons->setup = uart_clps711x_console_setup;
- s->uart.cons->flags = CON_PRINTBUFFER;
- s->uart.cons->index = -1;
- s->uart.cons->data = s;
- strcpy(s->uart.cons->name, "ttyCL");
-#endif
- ret = uart_register_driver(&s->uart);
+ /* Disable port */
+ if (!uart_console(&s->port))
+ regmap_update_bits(s->syscon, SYSCON_OFFSET, SYSCON_UARTEN, 0);
+
+ s->tx_enabled = 1;
+
+ ret = devm_request_irq(&pdev->dev, s->port.irq, uart_clps711x_int_tx, 0,
+ dev_name(&pdev->dev), &s->port);
if (ret) {
- dev_err(&pdev->dev, "Registering UART driver failed\n");
+ uart_remove_one_port(&clps711x_uart, &s->port);
return ret;
}
- for (i = 0; i < UART_CLPS711X_NR; i++) {
- s->port[i].line = i;
- s->port[i].dev = &pdev->dev;
- s->port[i].irq = TX_IRQ(&s->port[i]);
- s->port[i].iobase = SYSCON(&s->port[i]);
- s->port[i].type = PORT_CLPS711X;
- s->port[i].fifosize = 16;
- s->port[i].flags = UPF_SKIP_TEST | UPF_FIXED_TYPE;
- s->port[i].uartclk = clk_get_rate(s->uart_clk);
- s->port[i].ops = &uart_clps711x_ops;
- WARN_ON(uart_add_one_port(&s->uart, &s->port[i]));
- }
+ ret = devm_request_irq(&pdev->dev, s->rx_irq, uart_clps711x_int_rx, 0,
+ dev_name(&pdev->dev), &s->port);
+ if (ret)
+ uart_remove_one_port(&clps711x_uart, &s->port);
- return 0;
+ return ret;
}
static int uart_clps711x_remove(struct platform_device *pdev)
{
struct clps711x_port *s = platform_get_drvdata(pdev);
- int i;
- for (i = 0; i < UART_CLPS711X_NR; i++)
- uart_remove_one_port(&s->uart, &s->port[i]);
-
- uart_unregister_driver(&s->uart);
-
- return 0;
+ return uart_remove_one_port(&clps711x_uart, &s->port);
}
-static struct platform_driver clps711x_uart_driver = {
+static const struct of_device_id __maybe_unused clps711x_uart_dt_ids[] = {
+ { .compatible = "cirrus,clps711x-uart", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, clps711x_uart_dt_ids);
+
+static struct platform_driver clps711x_uart_platform = {
.driver = {
- .name = UART_CLPS711X_NAME,
- .owner = THIS_MODULE,
+ .name = "clps711x-uart",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(clps711x_uart_dt_ids),
},
.probe = uart_clps711x_probe,
.remove = uart_clps711x_remove,
};
-module_platform_driver(clps711x_uart_driver);
-
-static struct platform_device clps711x_uart_device = {
- .name = UART_CLPS711X_NAME,
-};
static int __init uart_clps711x_init(void)
{
- return platform_device_register(&clps711x_uart_device);
+ int ret;
+
+#ifdef CONFIG_SERIAL_CLPS711X_CONSOLE
+ clps711x_uart.cons = &clps711x_console;
+ clps711x_console.data = &clps711x_uart;
+#endif
+
+ ret = uart_register_driver(&clps711x_uart);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&clps711x_uart_platform);
}
module_init(uart_clps711x_init);
static void __exit uart_clps711x_exit(void)
{
- platform_device_unregister(&clps711x_uart_device);
+ platform_driver_unregister(&clps711x_uart_platform);
+ uart_unregister_driver(&clps711x_uart);
}
module_exit(uart_clps711x_exit);
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
index 527a969b0952..6d3b22e93246 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
@@ -29,7 +29,6 @@
#include <linux/tty.h>
#include <linux/gfp.h>
#include <linux/ioport.h>
-#include <linux/init.h>
#include <linux/serial.h>
#include <linux/console.h>
#include <linux/sysrq.h>
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
index a4927e66e741..f46d2ca87209 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
@@ -29,7 +29,6 @@
#include <linux/tty.h>
#include <linux/ioport.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/serial.h>
#include <linux/console.h>
#include <linux/sysrq.h>
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index d98e43348970..67423805e6d9 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -455,11 +455,11 @@ static void load_code(struct icom_port *icom_port)
for (index = 0; index < fw->size; index++)
new_page[index] = fw->data[index];
- release_firmware(fw);
-
writeb((char) ((fw->size + 16)/16), &icom_port->dram->mac_length);
writel(temp_pci, &icom_port->dram->mac_load_addr);
+ release_firmware(fw);
+
/*Setting the syncReg to 0x80 causes adapter to start downloading
the personality code into adapter instruction RAM.
Once code is loaded, it will begin executing and, based on
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index b2cfdb661947..d799140e53b6 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -806,6 +806,9 @@ static unsigned int imx_get_mctrl(struct uart_port *port)
if (readl(sport->port.membase + UCR2) & UCR2_CTS)
tmp |= TIOCM_RTS;
+ if (readl(sport->port.membase + uts_reg(sport)) & UTS_LOOP)
+ tmp |= TIOCM_LOOP;
+
return tmp;
}
@@ -821,6 +824,11 @@ static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl)
temp |= UCR2_CTS;
writel(temp, sport->port.membase + UCR2);
+
+ temp = readl(sport->port.membase + uts_reg(sport)) & ~UTS_LOOP;
+ if (mctrl & TIOCM_LOOP)
+ temp |= UTS_LOOP;
+ writel(temp, sport->port.membase + uts_reg(sport));
}
/*
diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
index 5dafcf1c227b..5f673b7ca50e 100644
--- a/drivers/tty/serial/kgdb_nmi.c
+++ b/drivers/tty/serial/kgdb_nmi.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/compiler.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/atomic.h>
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index ec06505e3ae6..97888f4900ec 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -421,6 +421,7 @@ struct psc_fifoc {
static struct psc_fifoc __iomem *psc_fifoc;
static unsigned int psc_fifoc_irq;
+static struct clk *psc_fifoc_clk;
static void mpc512x_psc_fifo_init(struct uart_port *port)
{
@@ -568,36 +569,73 @@ static unsigned int mpc512x_psc_set_baudrate(struct uart_port *port,
/* Init PSC FIFO Controller */
static int __init mpc512x_psc_fifoc_init(void)
{
+ int err;
struct device_node *np;
+ struct clk *clk;
+
+ /* default error code, potentially overwritten by clock calls */
+ err = -ENODEV;
np = of_find_compatible_node(NULL, NULL,
"fsl,mpc5121-psc-fifo");
if (!np) {
pr_err("%s: Can't find FIFOC node\n", __func__);
- return -ENODEV;
+ goto out_err;
}
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ /* backwards compat with device trees that lack clock specs */
+ clk = clk_get_sys(np->name, "ipg");
+ }
+ if (IS_ERR(clk)) {
+ pr_err("%s: Can't lookup FIFO clock\n", __func__);
+ err = PTR_ERR(clk);
+ goto out_ofnode_put;
+ }
+ if (clk_prepare_enable(clk)) {
+ pr_err("%s: Can't enable FIFO clock\n", __func__);
+ clk_put(clk);
+ goto out_ofnode_put;
+ }
+ psc_fifoc_clk = clk;
+
psc_fifoc = of_iomap(np, 0);
if (!psc_fifoc) {
pr_err("%s: Can't map FIFOC\n", __func__);
- of_node_put(np);
- return -ENODEV;
+ goto out_clk_disable;
}
psc_fifoc_irq = irq_of_parse_and_map(np, 0);
- of_node_put(np);
if (psc_fifoc_irq == 0) {
pr_err("%s: Can't get FIFOC irq\n", __func__);
- iounmap(psc_fifoc);
- return -ENODEV;
+ goto out_unmap;
}
+ of_node_put(np);
return 0;
+
+out_unmap:
+ iounmap(psc_fifoc);
+out_clk_disable:
+ clk_disable_unprepare(psc_fifoc_clk);
+ clk_put(psc_fifoc_clk);
+out_ofnode_put:
+ of_node_put(np);
+out_err:
+ return err;
}
static void __exit mpc512x_psc_fifoc_uninit(void)
{
iounmap(psc_fifoc);
+
+ /* disable the clock, errors are not fatal */
+ if (psc_fifoc_clk) {
+ clk_disable_unprepare(psc_fifoc_clk);
+ clk_put(psc_fifoc_clk);
+ psc_fifoc_clk = NULL;
+ }
}
/* 512x specific interrupt handler. The caller holds the port lock */
@@ -619,29 +657,55 @@ static irqreturn_t mpc512x_psc_handle_irq(struct uart_port *port)
}
static struct clk *psc_mclk_clk[MPC52xx_PSC_MAXNUM];
+static struct clk *psc_ipg_clk[MPC52xx_PSC_MAXNUM];
/* called from within the .request_port() callback (allocation) */
static int mpc512x_psc_alloc_clock(struct uart_port *port)
{
int psc_num;
- char clk_name[16];
struct clk *clk;
int err;
psc_num = (port->mapbase & 0xf00) >> 8;
- snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num);
- clk = devm_clk_get(port->dev, clk_name);
+
+ clk = devm_clk_get(port->dev, "mclk");
if (IS_ERR(clk)) {
dev_err(port->dev, "Failed to get MCLK!\n");
- return PTR_ERR(clk);
+ err = PTR_ERR(clk);
+ goto out_err;
}
err = clk_prepare_enable(clk);
if (err) {
dev_err(port->dev, "Failed to enable MCLK!\n");
- return err;
+ goto out_err;
}
psc_mclk_clk[psc_num] = clk;
+
+ clk = devm_clk_get(port->dev, "ipg");
+ if (IS_ERR(clk)) {
+ dev_err(port->dev, "Failed to get IPG clock!\n");
+ err = PTR_ERR(clk);
+ goto out_err;
+ }
+ err = clk_prepare_enable(clk);
+ if (err) {
+ dev_err(port->dev, "Failed to enable IPG clock!\n");
+ goto out_err;
+ }
+ psc_ipg_clk[psc_num] = clk;
+
return 0;
+
+out_err:
+ if (psc_mclk_clk[psc_num]) {
+ clk_disable_unprepare(psc_mclk_clk[psc_num]);
+ psc_mclk_clk[psc_num] = NULL;
+ }
+ if (psc_ipg_clk[psc_num]) {
+ clk_disable_unprepare(psc_ipg_clk[psc_num]);
+ psc_ipg_clk[psc_num] = NULL;
+ }
+ return err;
}
/* called from within the .release_port() callback (release) */
@@ -656,6 +720,10 @@ static void mpc512x_psc_relse_clock(struct uart_port *port)
clk_disable_unprepare(clk);
psc_mclk_clk[psc_num] = NULL;
}
+ if (psc_ipg_clk[psc_num]) {
+ clk_disable_unprepare(psc_ipg_clk[psc_num]);
+ psc_ipg_clk[psc_num] = NULL;
+ }
}
/* implementation of the .clock() callback (enable/disable) */
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index d8b6fee77a03..aa97fd845b4d 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -734,9 +734,12 @@ static void mxs_auart_reset(struct uart_port *u)
static int mxs_auart_startup(struct uart_port *u)
{
+ int ret;
struct mxs_auart_port *s = to_auart_port(u);
- clk_prepare_enable(s->clk);
+ ret = clk_prepare_enable(s->clk);
+ if (ret)
+ return ret;
writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
@@ -957,7 +960,9 @@ auart_console_setup(struct console *co, char *options)
if (!s)
return -ENODEV;
- clk_prepare_enable(s->clk);
+ ret = clk_prepare_enable(s->clk);
+ if (ret)
+ return ret;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 2caf9c6f6149..99246606a256 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -9,7 +9,6 @@
* 2 of the License, or (at your option) any later version.
*
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index fa511ebab67c..77f035158d6c 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -738,9 +738,6 @@ static int serial_omap_startup(struct uart_port *port)
return retval;
}
disable_irq(up->wakeirq);
- } else {
- dev_info(up->port.dev, "no wakeirq for uart%d\n",
- up->port.line);
}
dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
@@ -1604,8 +1601,11 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
flags & SER_RS485_RTS_AFTER_SEND);
if (ret < 0)
return ret;
- } else
+ } else if (up->rts_gpio == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else {
up->rts_gpio = -EINVAL;
+ }
if (of_property_read_u32_array(np, "rs485-rts-delay",
rs485_delay, 2) == 0) {
@@ -1687,6 +1687,9 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.iotype = UPIO_MEM;
up->port.irq = uartirq;
up->wakeirq = wakeirq;
+ if (!up->wakeirq)
+ dev_info(up->port.dev, "no wakeirq for uart%d\n",
+ up->port.line);
up->port.regshift = 2;
up->port.fifosize = 64;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 0aa2b528ef3d..8fa1134e0051 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1508,10 +1508,14 @@ static int pch_uart_verify_port(struct uart_port *port,
__func__);
return -EOPNOTSUPP;
#endif
- dev_info(priv->port.dev, "PCH UART : Use DMA Mode\n");
- if (!priv->use_dma)
+ if (!priv->use_dma) {
pch_request_dma(port);
- priv->use_dma = 1;
+ if (priv->chan_rx)
+ priv->use_dma = 1;
+ }
+ dev_info(priv->port.dev, "PCH UART: %s\n",
+ priv->use_dma ?
+ "Use DMA Mode" : "No DMA");
}
return 0;
@@ -1853,7 +1857,6 @@ static void pch_uart_exit_port(struct eg20t_port *priv)
debugfs_remove(priv->debugfs);
#endif
uart_remove_one_port(&pch_uart_driver, &priv->port);
- pci_set_drvdata(priv->pdev, NULL);
free_page((unsigned long)priv->rxbuf.buf);
}
@@ -1907,7 +1910,7 @@ static int pch_uart_pci_resume(struct pci_dev *pdev)
#define pch_uart_pci_resume NULL
#endif
-static DEFINE_PCI_DEVICE_TABLE(pch_uart_pci_id) = {
+static const struct pci_device_id pch_uart_pci_id[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8811),
.driver_data = pch_et20t_uart0},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8812),
diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
index 328d6deb6b08..056f91b3a4ca 100644
--- a/drivers/tty/serial/rp2.c
+++ b/drivers/tty/serial/rp2.c
@@ -810,7 +810,7 @@ static void rp2_remove(struct pci_dev *pdev)
rp2_remove_ports(card);
}
-static DEFINE_PCI_DEVICE_TABLE(rp2_pci_tbl) = {
+static const struct pci_device_id rp2_pci_tbl[] = {
/* RocketPort INFINITY cards */
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index c1af04d46682..9cd706df3b33 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1209,7 +1209,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
/* reset the fifos (and setup the uart) */
s3c24xx_serial_resetport(port, cfg);
- clk_disable_unprepare(ourport->clk);
return 0;
}
@@ -1287,6 +1286,13 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
platform_set_drvdata(pdev, &ourport->port);
+ /*
+ * Deactivate the clock enabled in s3c24xx_serial_init_port here,
+ * so that a potential re-enablement through the pm-callback overlaps
+ * and keeps the clock enabled in this case.
+ */
+ clk_disable_unprepare(ourport->clk);
+
#ifdef CONFIG_SAMSUNG_CLOCK
ret = device_create_file(&pdev->dev, &dev_attr_clock_source);
if (ret < 0)
diff --git a/drivers/tty/serial/sc26xx.c b/drivers/tty/serial/sc26xx.c
deleted file mode 100644
index 887b4f770749..000000000000
--- a/drivers/tty/serial/sc26xx.c
+++ /dev/null
@@ -1,749 +0,0 @@
-/*
- * SC268xx.c: Serial driver for Philiphs SC2681/SC2692 devices.
- *
- * Copyright (C) 2006,2007 Thomas Bogendörfer (tsbogend@alpha.franken.de)
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/major.h>
-#include <linux/circ_buf.h>
-#include <linux/serial.h>
-#include <linux/sysrq.h>
-#include <linux/console.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-
-#warning "Please try migrate to use new driver SCCNXP and report the status" \
- "in the linux-serial mailing list."
-
-#if defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
-#include <linux/serial_core.h>
-
-#define SC26XX_MAJOR 204
-#define SC26XX_MINOR_START 205
-#define SC26XX_NR 2
-
-struct uart_sc26xx_port {
- struct uart_port port[2];
- u8 dsr_mask[2];
- u8 cts_mask[2];
- u8 dcd_mask[2];
- u8 ri_mask[2];
- u8 dtr_mask[2];
- u8 rts_mask[2];
- u8 imr;
-};
-
-/* register common to both ports */
-#define RD_ISR 0x14
-#define RD_IPR 0x34
-
-#define WR_ACR 0x10
-#define WR_IMR 0x14
-#define WR_OPCR 0x34
-#define WR_OPR_SET 0x38
-#define WR_OPR_CLR 0x3C
-
-/* access common register */
-#define READ_SC(p, r) readb((p)->membase + RD_##r)
-#define WRITE_SC(p, r, v) writeb((v), (p)->membase + WR_##r)
-
-/* register per port */
-#define RD_PORT_MRx 0x00
-#define RD_PORT_SR 0x04
-#define RD_PORT_RHR 0x0c
-
-#define WR_PORT_MRx 0x00
-#define WR_PORT_CSR 0x04
-#define WR_PORT_CR 0x08
-#define WR_PORT_THR 0x0c
-
-/* SR bits */
-#define SR_BREAK (1 << 7)
-#define SR_FRAME (1 << 6)
-#define SR_PARITY (1 << 5)
-#define SR_OVERRUN (1 << 4)
-#define SR_TXRDY (1 << 2)
-#define SR_RXRDY (1 << 0)
-
-#define CR_RES_MR (1 << 4)
-#define CR_RES_RX (2 << 4)
-#define CR_RES_TX (3 << 4)
-#define CR_STRT_BRK (6 << 4)
-#define CR_STOP_BRK (7 << 4)
-#define CR_DIS_TX (1 << 3)
-#define CR_ENA_TX (1 << 2)
-#define CR_DIS_RX (1 << 1)
-#define CR_ENA_RX (1 << 0)
-
-/* ISR bits */
-#define ISR_RXRDYB (1 << 5)
-#define ISR_TXRDYB (1 << 4)
-#define ISR_RXRDYA (1 << 1)
-#define ISR_TXRDYA (1 << 0)
-
-/* IMR bits */
-#define IMR_RXRDY (1 << 1)
-#define IMR_TXRDY (1 << 0)
-
-/* access port register */
-static inline u8 read_sc_port(struct uart_port *p, u8 reg)
-{
- return readb(p->membase + p->line * 0x20 + reg);
-}
-
-static inline void write_sc_port(struct uart_port *p, u8 reg, u8 val)
-{
- writeb(val, p->membase + p->line * 0x20 + reg);
-}
-
-#define READ_SC_PORT(p, r) read_sc_port(p, RD_PORT_##r)
-#define WRITE_SC_PORT(p, r, v) write_sc_port(p, WR_PORT_##r, v)
-
-static void sc26xx_enable_irq(struct uart_port *port, int mask)
-{
- struct uart_sc26xx_port *up;
- int line = port->line;
-
- port -= line;
- up = container_of(port, struct uart_sc26xx_port, port[0]);
-
- up->imr |= mask << (line * 4);
- WRITE_SC(port, IMR, up->imr);
-}
-
-static void sc26xx_disable_irq(struct uart_port *port, int mask)
-{
- struct uart_sc26xx_port *up;
- int line = port->line;
-
- port -= line;
- up = container_of(port, struct uart_sc26xx_port, port[0]);
-
- up->imr &= ~(mask << (line * 4));
- WRITE_SC(port, IMR, up->imr);
-}
-
-static bool receive_chars(struct uart_port *port)
-{
- struct tty_port *tport = NULL;
- int limit = 10000;
- unsigned char ch;
- char flag;
- u8 status;
-
- /* FIXME what is this trying to achieve? */
- if (port->state != NULL) /* Unopened serial console */
- tport = &port->state->port;
-
- while (limit-- > 0) {
- status = READ_SC_PORT(port, SR);
- if (!(status & SR_RXRDY))
- break;
- ch = READ_SC_PORT(port, RHR);
-
- flag = TTY_NORMAL;
- port->icount.rx++;
-
- if (unlikely(status & (SR_BREAK | SR_FRAME |
- SR_PARITY | SR_OVERRUN))) {
- if (status & SR_BREAK) {
- status &= ~(SR_PARITY | SR_FRAME);
- port->icount.brk++;
- if (uart_handle_break(port))
- continue;
- } else if (status & SR_PARITY)
- port->icount.parity++;
- else if (status & SR_FRAME)
- port->icount.frame++;
- if (status & SR_OVERRUN)
- port->icount.overrun++;
-
- status &= port->read_status_mask;
- if (status & SR_BREAK)
- flag = TTY_BREAK;
- else if (status & SR_PARITY)
- flag = TTY_PARITY;
- else if (status & SR_FRAME)
- flag = TTY_FRAME;
- }
-
- if (uart_handle_sysrq_char(port, ch))
- continue;
-
- if (status & port->ignore_status_mask)
- continue;
-
- tty_insert_flip_char(tport, ch, flag);
- }
- return !!tport;
-}
-
-static void transmit_chars(struct uart_port *port)
-{
- struct circ_buf *xmit;
-
- if (!port->state)
- return;
-
- xmit = &port->state->xmit;
- if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
- sc26xx_disable_irq(port, IMR_TXRDY);
- return;
- }
- while (!uart_circ_empty(xmit)) {
- if (!(READ_SC_PORT(port, SR) & SR_TXRDY))
- break;
-
- WRITE_SC_PORT(port, THR, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- }
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
-}
-
-static irqreturn_t sc26xx_interrupt(int irq, void *dev_id)
-{
- struct uart_sc26xx_port *up = dev_id;
- unsigned long flags;
- bool push;
- u8 isr;
-
- spin_lock_irqsave(&up->port[0].lock, flags);
-
- push = false;
- isr = READ_SC(&up->port[0], ISR);
- if (isr & ISR_TXRDYA)
- transmit_chars(&up->port[0]);
- if (isr & ISR_RXRDYA)
- push = receive_chars(&up->port[0]);
-
- spin_unlock(&up->port[0].lock);
-
- if (push)
- tty_flip_buffer_push(&up->port[0].state->port);
-
- spin_lock(&up->port[1].lock);
-
- push = false;
- if (isr & ISR_TXRDYB)
- transmit_chars(&up->port[1]);
- if (isr & ISR_RXRDYB)
- push = receive_chars(&up->port[1]);
-
- spin_unlock_irqrestore(&up->port[1].lock, flags);
-
- if (push)
- tty_flip_buffer_push(&up->port[1].state->port);
-
- return IRQ_HANDLED;
-}
-
-/* port->lock is not held. */
-static unsigned int sc26xx_tx_empty(struct uart_port *port)
-{
- return (READ_SC_PORT(port, SR) & SR_TXRDY) ? TIOCSER_TEMT : 0;
-}
-
-/* port->lock held by caller. */
-static void sc26xx_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- struct uart_sc26xx_port *up;
- int line = port->line;
-
- port -= line;
- up = container_of(port, struct uart_sc26xx_port, port[0]);
-
- if (up->dtr_mask[line]) {
- if (mctrl & TIOCM_DTR)
- WRITE_SC(port, OPR_SET, up->dtr_mask[line]);
- else
- WRITE_SC(port, OPR_CLR, up->dtr_mask[line]);
- }
- if (up->rts_mask[line]) {
- if (mctrl & TIOCM_RTS)
- WRITE_SC(port, OPR_SET, up->rts_mask[line]);
- else
- WRITE_SC(port, OPR_CLR, up->rts_mask[line]);
- }
-}
-
-/* port->lock is held by caller and interrupts are disabled. */
-static unsigned int sc26xx_get_mctrl(struct uart_port *port)
-{
- struct uart_sc26xx_port *up;
- int line = port->line;
- unsigned int mctrl = TIOCM_DSR | TIOCM_CTS | TIOCM_CAR;
- u8 ipr;
-
- port -= line;
- up = container_of(port, struct uart_sc26xx_port, port[0]);
- ipr = READ_SC(port, IPR) ^ 0xff;
-
- if (up->dsr_mask[line]) {
- mctrl &= ~TIOCM_DSR;
- mctrl |= ipr & up->dsr_mask[line] ? TIOCM_DSR : 0;
- }
- if (up->cts_mask[line]) {
- mctrl &= ~TIOCM_CTS;
- mctrl |= ipr & up->cts_mask[line] ? TIOCM_CTS : 0;
- }
- if (up->dcd_mask[line]) {
- mctrl &= ~TIOCM_CAR;
- mctrl |= ipr & up->dcd_mask[line] ? TIOCM_CAR : 0;
- }
- if (up->ri_mask[line]) {
- mctrl &= ~TIOCM_RNG;
- mctrl |= ipr & up->ri_mask[line] ? TIOCM_RNG : 0;
- }
- return mctrl;
-}
-
-/* port->lock held by caller. */
-static void sc26xx_stop_tx(struct uart_port *port)
-{
- return;
-}
-
-/* port->lock held by caller. */
-static void sc26xx_start_tx(struct uart_port *port)
-{
- struct circ_buf *xmit = &port->state->xmit;
-
- while (!uart_circ_empty(xmit)) {
- if (!(READ_SC_PORT(port, SR) & SR_TXRDY)) {
- sc26xx_enable_irq(port, IMR_TXRDY);
- break;
- }
- WRITE_SC_PORT(port, THR, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- }
-}
-
-/* port->lock held by caller. */
-static void sc26xx_stop_rx(struct uart_port *port)
-{
-}
-
-/* port->lock held by caller. */
-static void sc26xx_enable_ms(struct uart_port *port)
-{
-}
-
-/* port->lock is not held. */
-static void sc26xx_break_ctl(struct uart_port *port, int break_state)
-{
- if (break_state == -1)
- WRITE_SC_PORT(port, CR, CR_STRT_BRK);
- else
- WRITE_SC_PORT(port, CR, CR_STOP_BRK);
-}
-
-/* port->lock is not held. */
-static int sc26xx_startup(struct uart_port *port)
-{
- sc26xx_disable_irq(port, IMR_TXRDY | IMR_RXRDY);
- WRITE_SC(port, OPCR, 0);
-
- /* reset tx and rx */
- WRITE_SC_PORT(port, CR, CR_RES_RX);
- WRITE_SC_PORT(port, CR, CR_RES_TX);
-
- /* start rx/tx */
- WRITE_SC_PORT(port, CR, CR_ENA_TX | CR_ENA_RX);
-
- /* enable irqs */
- sc26xx_enable_irq(port, IMR_RXRDY);
- return 0;
-}
-
-/* port->lock is not held. */
-static void sc26xx_shutdown(struct uart_port *port)
-{
- /* disable interrupst */
- sc26xx_disable_irq(port, IMR_TXRDY | IMR_RXRDY);
-
- /* stop tx/rx */
- WRITE_SC_PORT(port, CR, CR_DIS_TX | CR_DIS_RX);
-}
-
-/* port->lock is not held. */
-static void sc26xx_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
-{
- unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
- unsigned int quot = uart_get_divisor(port, baud);
- unsigned int iflag, cflag;
- unsigned long flags;
- u8 mr1, mr2, csr;
-
- spin_lock_irqsave(&port->lock, flags);
-
- while ((READ_SC_PORT(port, SR) & ((1 << 3) | (1 << 2))) != 0xc)
- udelay(2);
-
- WRITE_SC_PORT(port, CR, CR_DIS_TX | CR_DIS_RX);
-
- iflag = termios->c_iflag;
- cflag = termios->c_cflag;
-
- port->read_status_mask = SR_OVERRUN;
- if (iflag & INPCK)
- port->read_status_mask |= SR_PARITY | SR_FRAME;
- if (iflag & (BRKINT | PARMRK))
- port->read_status_mask |= SR_BREAK;
-
- port->ignore_status_mask = 0;
- if (iflag & IGNBRK)
- port->ignore_status_mask |= SR_BREAK;
- if ((cflag & CREAD) == 0)
- port->ignore_status_mask |= SR_BREAK | SR_FRAME |
- SR_PARITY | SR_OVERRUN;
-
- switch (cflag & CSIZE) {
- case CS5:
- mr1 = 0x00;
- break;
- case CS6:
- mr1 = 0x01;
- break;
- case CS7:
- mr1 = 0x02;
- break;
- default:
- case CS8:
- mr1 = 0x03;
- break;
- }
- mr2 = 0x07;
- if (cflag & CSTOPB)
- mr2 = 0x0f;
- if (cflag & PARENB) {
- if (cflag & PARODD)
- mr1 |= (1 << 2);
- } else
- mr1 |= (2 << 3);
-
- switch (baud) {
- case 50:
- csr = 0x00;
- break;
- case 110:
- csr = 0x11;
- break;
- case 134:
- csr = 0x22;
- break;
- case 200:
- csr = 0x33;
- break;
- case 300:
- csr = 0x44;
- break;
- case 600:
- csr = 0x55;
- break;
- case 1200:
- csr = 0x66;
- break;
- case 2400:
- csr = 0x88;
- break;
- case 4800:
- csr = 0x99;
- break;
- default:
- case 9600:
- csr = 0xbb;
- break;
- case 19200:
- csr = 0xcc;
- break;
- }
-
- WRITE_SC_PORT(port, CR, CR_RES_MR);
- WRITE_SC_PORT(port, MRx, mr1);
- WRITE_SC_PORT(port, MRx, mr2);
-
- WRITE_SC(port, ACR, 0x80);
- WRITE_SC_PORT(port, CSR, csr);
-
- /* reset tx and rx */
- WRITE_SC_PORT(port, CR, CR_RES_RX);
- WRITE_SC_PORT(port, CR, CR_RES_TX);
-
- WRITE_SC_PORT(port, CR, CR_ENA_TX | CR_ENA_RX);
- while ((READ_SC_PORT(port, SR) & ((1 << 3) | (1 << 2))) != 0xc)
- udelay(2);
-
- /* XXX */
- uart_update_timeout(port, cflag,
- (port->uartclk / (16 * quot)));
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static const char *sc26xx_type(struct uart_port *port)
-{
- return "SC26XX";
-}
-
-static void sc26xx_release_port(struct uart_port *port)
-{
-}
-
-static int sc26xx_request_port(struct uart_port *port)
-{
- return 0;
-}
-
-static void sc26xx_config_port(struct uart_port *port, int flags)
-{
-}
-
-static int sc26xx_verify_port(struct uart_port *port, struct serial_struct *ser)
-{
- return -EINVAL;
-}
-
-static struct uart_ops sc26xx_ops = {
- .tx_empty = sc26xx_tx_empty,
- .set_mctrl = sc26xx_set_mctrl,
- .get_mctrl = sc26xx_get_mctrl,
- .stop_tx = sc26xx_stop_tx,
- .start_tx = sc26xx_start_tx,
- .stop_rx = sc26xx_stop_rx,
- .enable_ms = sc26xx_enable_ms,
- .break_ctl = sc26xx_break_ctl,
- .startup = sc26xx_startup,
- .shutdown = sc26xx_shutdown,
- .set_termios = sc26xx_set_termios,
- .type = sc26xx_type,
- .release_port = sc26xx_release_port,
- .request_port = sc26xx_request_port,
- .config_port = sc26xx_config_port,
- .verify_port = sc26xx_verify_port,
-};
-
-static struct uart_port *sc26xx_port;
-
-#ifdef CONFIG_SERIAL_SC26XX_CONSOLE
-static void sc26xx_console_putchar(struct uart_port *port, char c)
-{
- unsigned long flags;
- int limit = 1000000;
-
- spin_lock_irqsave(&port->lock, flags);
-
- while (limit-- > 0) {
- if (READ_SC_PORT(port, SR) & SR_TXRDY) {
- WRITE_SC_PORT(port, THR, c);
- break;
- }
- udelay(2);
- }
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void sc26xx_console_write(struct console *con, const char *s, unsigned n)
-{
- struct uart_port *port = sc26xx_port;
- int i;
-
- for (i = 0; i < n; i++) {
- if (*s == '\n')
- sc26xx_console_putchar(port, '\r');
- sc26xx_console_putchar(port, *s++);
- }
-}
-
-static int __init sc26xx_console_setup(struct console *con, char *options)
-{
- struct uart_port *port = sc26xx_port;
- int baud = 9600;
- int bits = 8;
- int parity = 'n';
- int flow = 'n';
-
- if (port->type != PORT_SC26XX)
- return -1;
-
- printk(KERN_INFO "Console: ttySC%d (SC26XX)\n", con->index);
- if (options)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
-
- return uart_set_options(port, con, baud, parity, bits, flow);
-}
-
-static struct uart_driver sc26xx_reg;
-static struct console sc26xx_console = {
- .name = "ttySC",
- .write = sc26xx_console_write,
- .device = uart_console_device,
- .setup = sc26xx_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &sc26xx_reg,
-};
-#define SC26XX_CONSOLE &sc26xx_console
-#else
-#define SC26XX_CONSOLE NULL
-#endif
-
-static struct uart_driver sc26xx_reg = {
- .owner = THIS_MODULE,
- .driver_name = "SC26xx",
- .dev_name = "ttySC",
- .major = SC26XX_MAJOR,
- .minor = SC26XX_MINOR_START,
- .nr = SC26XX_NR,
- .cons = SC26XX_CONSOLE,
-};
-
-static u8 sc26xx_flags2mask(unsigned int flags, unsigned int bitpos)
-{
- unsigned int bit = (flags >> bitpos) & 15;
-
- return bit ? (1 << (bit - 1)) : 0;
-}
-
-static void sc26xx_init_masks(struct uart_sc26xx_port *up,
- int line, unsigned int data)
-{
- up->dtr_mask[line] = sc26xx_flags2mask(data, 0);
- up->rts_mask[line] = sc26xx_flags2mask(data, 4);
- up->dsr_mask[line] = sc26xx_flags2mask(data, 8);
- up->cts_mask[line] = sc26xx_flags2mask(data, 12);
- up->dcd_mask[line] = sc26xx_flags2mask(data, 16);
- up->ri_mask[line] = sc26xx_flags2mask(data, 20);
-}
-
-static int sc26xx_probe(struct platform_device *dev)
-{
- struct resource *res;
- struct uart_sc26xx_port *up;
- unsigned int *sc26xx_data = dev_get_platdata(&dev->dev);
- int err;
-
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- up = kzalloc(sizeof *up, GFP_KERNEL);
- if (unlikely(!up))
- return -ENOMEM;
-
- up->port[0].line = 0;
- up->port[0].ops = &sc26xx_ops;
- up->port[0].type = PORT_SC26XX;
- up->port[0].uartclk = (29491200 / 16); /* arbitrary */
-
- up->port[0].mapbase = res->start;
- up->port[0].membase = ioremap_nocache(up->port[0].mapbase, 0x40);
- up->port[0].iotype = UPIO_MEM;
- up->port[0].irq = platform_get_irq(dev, 0);
-
- up->port[0].dev = &dev->dev;
-
- sc26xx_init_masks(up, 0, sc26xx_data[0]);
-
- sc26xx_port = &up->port[0];
-
- up->port[1].line = 1;
- up->port[1].ops = &sc26xx_ops;
- up->port[1].type = PORT_SC26XX;
- up->port[1].uartclk = (29491200 / 16); /* arbitrary */
-
- up->port[1].mapbase = up->port[0].mapbase;
- up->port[1].membase = up->port[0].membase;
- up->port[1].iotype = UPIO_MEM;
- up->port[1].irq = up->port[0].irq;
-
- up->port[1].dev = &dev->dev;
-
- sc26xx_init_masks(up, 1, sc26xx_data[1]);
-
- err = uart_register_driver(&sc26xx_reg);
- if (err)
- goto out_free_port;
-
- sc26xx_reg.tty_driver->name_base = sc26xx_reg.minor;
-
- err = uart_add_one_port(&sc26xx_reg, &up->port[0]);
- if (err)
- goto out_unregister_driver;
-
- err = uart_add_one_port(&sc26xx_reg, &up->port[1]);
- if (err)
- goto out_remove_port0;
-
- err = request_irq(up->port[0].irq, sc26xx_interrupt, 0, "sc26xx", up);
- if (err)
- goto out_remove_ports;
-
- platform_set_drvdata(dev, up);
- return 0;
-
-out_remove_ports:
- uart_remove_one_port(&sc26xx_reg, &up->port[1]);
-out_remove_port0:
- uart_remove_one_port(&sc26xx_reg, &up->port[0]);
-
-out_unregister_driver:
- uart_unregister_driver(&sc26xx_reg);
-
-out_free_port:
- kfree(up);
- sc26xx_port = NULL;
- return err;
-}
-
-
-static int __exit sc26xx_driver_remove(struct platform_device *dev)
-{
- struct uart_sc26xx_port *up = platform_get_drvdata(dev);
-
- free_irq(up->port[0].irq, up);
-
- uart_remove_one_port(&sc26xx_reg, &up->port[0]);
- uart_remove_one_port(&sc26xx_reg, &up->port[1]);
-
- uart_unregister_driver(&sc26xx_reg);
-
- kfree(up);
- sc26xx_port = NULL;
-
- return 0;
-}
-
-static struct platform_driver sc26xx_driver = {
- .probe = sc26xx_probe,
- .remove = sc26xx_driver_remove,
- .driver = {
- .name = "SC26xx",
- .owner = THIS_MODULE,
- },
-};
-
-module_platform_driver(sc26xx_driver);
-
-MODULE_AUTHOR("Thomas Bogendörfer");
-MODULE_DESCRIPTION("SC681/SC2692 serial driver");
-MODULE_VERSION("1.0");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:SC26xx");
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index dfe79ccc4fb3..d5c2a287b7e7 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -34,6 +34,7 @@
#include <linux/of_device.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/serial_core.h>
@@ -44,8 +45,6 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
-#include <linux/clk/tegra.h>
-
#define TEGRA_UART_TYPE "TEGRA_UART"
#define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
#define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
@@ -103,6 +102,7 @@ struct tegra_uart_port {
const struct tegra_uart_chip_data *cdata;
struct clk *uart_clk;
+ struct reset_control *rst;
unsigned int current_baud;
/* Register shadow */
@@ -120,7 +120,6 @@ struct tegra_uart_port {
bool rx_timeout;
int rx_in_progress;
int symb_bit;
- int dma_req_sel;
struct dma_chan *rx_dma_chan;
struct dma_chan *tx_dma_chan;
@@ -832,9 +831,9 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
clk_prepare_enable(tup->uart_clk);
/* Reset the UART controller to clear all previous status.*/
- tegra_periph_reset_assert(tup->uart_clk);
+ reset_control_assert(tup->rst);
udelay(10);
- tegra_periph_reset_deassert(tup->uart_clk);
+ reset_control_deassert(tup->rst);
tup->rx_in_progress = 0;
tup->tx_in_progress = 0;
@@ -910,15 +909,14 @@ static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
dma_addr_t dma_phys;
int ret;
struct dma_slave_config dma_sconfig;
- dma_cap_mask_t mask;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- dma_chan = dma_request_channel(mask, NULL, NULL);
- if (!dma_chan) {
+ dma_chan = dma_request_slave_channel_reason(tup->uport.dev,
+ dma_to_memory ? "rx" : "tx");
+ if (IS_ERR(dma_chan)) {
+ ret = PTR_ERR(dma_chan);
dev_err(tup->uport.dev,
- "Dma channel is not available, will try later\n");
- return -EPROBE_DEFER;
+ "DMA channel alloc failed: %d\n", ret);
+ return ret;
}
if (dma_to_memory) {
@@ -938,7 +936,6 @@ static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
dma_buf = tup->uport.state->xmit.buf;
}
- dma_sconfig.slave_id = tup->dma_req_sel;
if (dma_to_memory) {
dma_sconfig.src_addr = tup->uport.mapbase;
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
@@ -1222,17 +1219,8 @@ static int tegra_uart_parse_dt(struct platform_device *pdev,
struct tegra_uart_port *tup)
{
struct device_node *np = pdev->dev.of_node;
- u32 of_dma[2];
int port;
- if (of_property_read_u32_array(np, "nvidia,dma-request-selector",
- of_dma, 2) >= 0) {
- tup->dma_req_sel = of_dma[1];
- } else {
- dev_err(&pdev->dev, "missing dma requestor in device tree\n");
- return -EINVAL;
- }
-
port = of_alias_get_id(np, "serial");
if (port < 0) {
dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
@@ -1320,6 +1308,12 @@ static int tegra_uart_probe(struct platform_device *pdev)
return PTR_ERR(tup->uart_clk);
}
+ tup->rst = devm_reset_control_get(&pdev->dev, "serial");
+ if (IS_ERR(tup->rst)) {
+ dev_err(&pdev->dev, "Couldn't get the reset\n");
+ return PTR_ERR(tup->rst);
+ }
+
u->iotype = UPIO_MEM32;
u->irq = platform_get_irq(pdev, 0);
u->regshift = 2;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 0f02351c9239..ece2049bd270 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1830,9 +1830,13 @@ uart_set_options(struct uart_port *port, struct console *co,
/*
* Ensure that the serial console lock is initialised
* early.
+ * If this port is a console, then the spinlock is already
+ * initialised.
*/
- spin_lock_init(&port->lock);
- lockdep_set_class(&port->lock, &port_lock_key);
+ if (!(uart_console(port) && (port->cons->flags & CON_ENABLED))) {
+ spin_lock_init(&port->lock);
+ lockdep_set_class(&port->lock, &port_lock_key);
+ }
memset(&termios, 0, sizeof(struct ktermios));
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 7d8103cd3e2e..be33d2b0613b 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -23,35 +23,35 @@
#undef DEBUG
-#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/ctype.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/errno.h>
-#include <linux/sh_dma.h>
-#include <linux/timer.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial.h>
-#include <linux/major.h>
-#include <linux/string.h>
-#include <linux/sysrq.h>
#include <linux/ioport.h>
+#include <linux/major.h>
+#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/console.h>
-#include <linux/platform_device.h>
-#include <linux/serial_sci.h>
#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/cpufreq.h>
-#include <linux/clk.h>
-#include <linux/ctype.h>
-#include <linux/err.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_dma.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/string.h>
+#include <linux/sysrq.h>
+#include <linux/timer.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
#ifdef CONFIG_SUPERH
#include <asm/sh_bios.h>
@@ -59,11 +59,32 @@
#include "sh-sci.h"
+/* Offsets into the sci_port->irqs array */
+enum {
+ SCIx_ERI_IRQ,
+ SCIx_RXI_IRQ,
+ SCIx_TXI_IRQ,
+ SCIx_BRI_IRQ,
+ SCIx_NR_IRQS,
+
+ SCIx_MUX_IRQ = SCIx_NR_IRQS, /* special case */
+};
+
+#define SCIx_IRQ_IS_MUXED(port) \
+ ((port)->irqs[SCIx_ERI_IRQ] == \
+ (port)->irqs[SCIx_RXI_IRQ]) || \
+ ((port)->irqs[SCIx_ERI_IRQ] && \
+ ((port)->irqs[SCIx_RXI_IRQ] < 0))
+
struct sci_port {
struct uart_port port;
/* Platform configuration */
struct plat_sci_port *cfg;
+ int overrun_bit;
+ unsigned int error_mask;
+ unsigned int sampling_rate;
+
/* Break timer */
struct timer_list break_timer;
@@ -74,8 +95,8 @@ struct sci_port {
/* Function clock */
struct clk *fclk;
+ int irqs[SCIx_NR_IRQS];
char *irqstr[SCIx_NR_IRQS];
- char *gpiostr[SCIx_NR_FNS];
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
@@ -421,9 +442,9 @@ static void sci_port_enable(struct sci_port *sci_port)
pm_runtime_get_sync(sci_port->port.dev);
- clk_enable(sci_port->iclk);
+ clk_prepare_enable(sci_port->iclk);
sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
- clk_enable(sci_port->fclk);
+ clk_prepare_enable(sci_port->fclk);
}
static void sci_port_disable(struct sci_port *sci_port)
@@ -431,8 +452,16 @@ static void sci_port_disable(struct sci_port *sci_port)
if (!sci_port->port.dev)
return;
- clk_disable(sci_port->fclk);
- clk_disable(sci_port->iclk);
+ /* Cancel the break timer to ensure that the timer handler will not try
+ * to access the hardware with clocks and power disabled. Reset the
+ * break flag to make the break debouncing state machine ready for the
+ * next break.
+ */
+ del_timer_sync(&sci_port->break_timer);
+ sci_port->break_flag = 0;
+
+ clk_disable_unprepare(sci_port->fclk);
+ clk_disable_unprepare(sci_port->iclk);
pm_runtime_put_sync(sci_port->port.dev);
}
@@ -557,7 +586,7 @@ static inline int sci_rxd_in(struct uart_port *port)
return 1;
/* Cast for ARM damage */
- return !!__raw_readb((void __iomem *)s->cfg->port_reg);
+ return !!__raw_readb((void __iomem *)(uintptr_t)s->cfg->port_reg);
}
/* ********************************************************************** *
@@ -733,8 +762,6 @@ static void sci_break_timer(unsigned long data)
{
struct sci_port *port = (struct sci_port *)data;
- sci_port_enable(port);
-
if (sci_rxd_in(&port->port) == 0) {
port->break_flag = 1;
sci_schedule_break_timer(port);
@@ -744,8 +771,6 @@ static void sci_break_timer(unsigned long data)
sci_schedule_break_timer(port);
} else
port->break_flag = 0;
-
- sci_port_disable(port);
}
static int sci_handle_errors(struct uart_port *port)
@@ -755,19 +780,15 @@ static int sci_handle_errors(struct uart_port *port)
struct tty_port *tport = &port->state->port;
struct sci_port *s = to_sci_port(port);
- /*
- * Handle overruns, if supported.
- */
- if (s->cfg->overrun_bit != SCIx_NOT_SUPPORTED) {
- if (status & (1 << s->cfg->overrun_bit)) {
- port->icount.overrun++;
+ /* Handle overruns */
+ if (status & (1 << s->overrun_bit)) {
+ port->icount.overrun++;
- /* overrun error */
- if (tty_insert_flip_char(tport, 0, TTY_OVERRUN))
- copied++;
+ /* overrun error */
+ if (tty_insert_flip_char(tport, 0, TTY_OVERRUN))
+ copied++;
- dev_notice(port->dev, "overrun error");
- }
+ dev_notice(port->dev, "overrun error");
}
if (status & SCxSR_FER(port)) {
@@ -829,7 +850,7 @@ static int sci_handle_fifo_overrun(struct uart_port *port)
if (!reg->size)
return 0;
- if ((serial_port_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) {
+ if ((serial_port_in(port, SCLSR) & (1 << s->overrun_bit))) {
serial_port_out(port, SCLSR, 0);
port->icount.overrun++;
@@ -1075,19 +1096,19 @@ static int sci_request_irq(struct sci_port *port)
for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) {
struct sci_irq_desc *desc;
- unsigned int irq;
+ int irq;
if (SCIx_IRQ_IS_MUXED(port)) {
i = SCIx_MUX_IRQ;
irq = up->irq;
} else {
- irq = port->cfg->irqs[i];
+ irq = port->irqs[i];
/*
* Certain port types won't support all of the
* available interrupt sources.
*/
- if (unlikely(!irq))
+ if (unlikely(irq < 0))
continue;
}
@@ -1112,7 +1133,7 @@ static int sci_request_irq(struct sci_port *port)
out_noirq:
while (--i >= 0)
- free_irq(port->cfg->irqs[i], port);
+ free_irq(port->irqs[i], port);
out_nomem:
while (--j >= 0)
@@ -1130,16 +1151,16 @@ static void sci_free_irq(struct sci_port *port)
* IRQ first.
*/
for (i = 0; i < SCIx_NR_IRQS; i++) {
- unsigned int irq = port->cfg->irqs[i];
+ int irq = port->irqs[i];
/*
* Certain port types won't support all of the available
* interrupt sources.
*/
- if (unlikely(!irq))
+ if (unlikely(irq < 0))
continue;
- free_irq(port->cfg->irqs[i], port);
+ free_irq(port->irqs[i], port);
kfree(port->irqstr[i]);
if (SCIx_IRQ_IS_MUXED(port)) {
@@ -1149,67 +1170,6 @@ static void sci_free_irq(struct sci_port *port)
}
}
-static const char *sci_gpio_names[SCIx_NR_FNS] = {
- "sck", "rxd", "txd", "cts", "rts",
-};
-
-static const char *sci_gpio_str(unsigned int index)
-{
- return sci_gpio_names[index];
-}
-
-static void sci_init_gpios(struct sci_port *port)
-{
- struct uart_port *up = &port->port;
- int i;
-
- if (!port->cfg)
- return;
-
- for (i = 0; i < SCIx_NR_FNS; i++) {
- const char *desc;
- int ret;
-
- if (!port->cfg->gpios[i])
- continue;
-
- desc = sci_gpio_str(i);
-
- port->gpiostr[i] = kasprintf(GFP_KERNEL, "%s:%s",
- dev_name(up->dev), desc);
-
- /*
- * If we've failed the allocation, we can still continue
- * on with a NULL string.
- */
- if (!port->gpiostr[i])
- dev_notice(up->dev, "%s string allocation failure\n",
- desc);
-
- ret = gpio_request(port->cfg->gpios[i], port->gpiostr[i]);
- if (unlikely(ret != 0)) {
- dev_notice(up->dev, "failed %s gpio request\n", desc);
-
- /*
- * If we can't get the GPIO for whatever reason,
- * no point in keeping the verbose string around.
- */
- kfree(port->gpiostr[i]);
- }
- }
-}
-
-static void sci_free_gpios(struct sci_port *port)
-{
- int i;
-
- for (i = 0; i < SCIx_NR_FNS; i++)
- if (port->cfg->gpios[i]) {
- gpio_free(port->cfg->gpios[i]);
- kfree(port->gpiostr[i]);
- }
-}
-
static unsigned int sci_tx_empty(struct uart_port *port)
{
unsigned short status = serial_port_in(port, SCxSR);
@@ -1309,7 +1269,7 @@ static int sci_dma_rx_push(struct sci_port *s, size_t count)
}
if (room < count)
- dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
+ dev_warn(port->dev, "Rx overrun: dropping %zu bytes\n",
count - room);
if (!room)
return room;
@@ -1442,7 +1402,7 @@ static void work_fn_rx(struct work_struct *work)
int count;
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
- dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
+ dev_dbg(port->dev, "Read %zu bytes with cookie %d\n",
sh_desc->partial, sh_desc->cookie);
spin_lock_irqsave(&port->lock, flags);
@@ -1655,7 +1615,7 @@ static void rx_timer_fn(unsigned long arg)
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
scr &= ~0x4000;
- enable_irq(s->cfg->irqs[1]);
+ enable_irq(s->irqs[SCIx_RXI_IRQ]);
}
serial_port_out(port, SCSCR, scr | SCSCR_RIE);
dev_dbg(port->dev, "DMA Rx timed out\n");
@@ -1691,16 +1651,17 @@ static void sci_request_dma(struct uart_port *port)
s->chan_tx = chan;
sg_init_table(&s->sg_tx, 1);
/* UART circular tx buffer is an aligned page. */
- BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK);
+ BUG_ON((uintptr_t)port->state->xmit.buf & ~PAGE_MASK);
sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf),
- UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK);
+ UART_XMIT_SIZE,
+ (uintptr_t)port->state->xmit.buf & ~PAGE_MASK);
nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE);
if (!nent)
sci_tx_dma_release(s, false);
else
- dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__,
- sg_dma_len(&s->sg_tx),
- port->state->xmit.buf, sg_dma_address(&s->sg_tx));
+ dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
+ sg_dma_len(&s->sg_tx), port->state->xmit.buf,
+ &sg_dma_address(&s->sg_tx));
s->sg_len_tx = nent;
@@ -1740,7 +1701,7 @@ static void sci_request_dma(struct uart_port *port)
sg_init_table(sg, 1);
sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
- (int)buf[i] & ~PAGE_MASK);
+ (uintptr_t)buf[i] & ~PAGE_MASK);
sg_dma_address(sg) = dma[i];
}
@@ -1808,21 +1769,11 @@ static void sci_shutdown(struct uart_port *port)
sci_free_irq(s);
}
-static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
+static unsigned int sci_scbrr_calc(struct sci_port *s, unsigned int bps,
unsigned long freq)
{
- switch (algo_id) {
- case SCBRR_ALGO_1:
- return ((freq + 16 * bps) / (16 * bps) - 1);
- case SCBRR_ALGO_2:
- return ((freq + 16 * bps) / (32 * bps) - 1);
- case SCBRR_ALGO_3:
- return (((freq * 2) + 16 * bps) / (16 * bps) - 1);
- case SCBRR_ALGO_4:
- return (((freq * 2) + 16 * bps) / (32 * bps) - 1);
- case SCBRR_ALGO_5:
- return (((freq * 1000 / 32) / bps) - 1);
- }
+ if (s->sampling_rate)
+ return DIV_ROUND_CLOSEST(freq, s->sampling_rate * bps) - 1;
/* Warn, but use a safe default */
WARN_ON(1);
@@ -1903,12 +1854,11 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
if (likely(baud && port->uartclk)) {
- if (s->cfg->scbrr_algo_id == SCBRR_ALGO_6) {
+ if (s->cfg->type == PORT_HSCIF) {
sci_baud_calc_hscif(baud, port->uartclk, &t, &srr,
&cks);
} else {
- t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud,
- port->uartclk);
+ t = sci_scbrr_calc(s, baud, port->uartclk);
for (cks = 0; t >= 256 && cks <= 3; cks++)
t >>= 2;
}
@@ -2115,10 +2065,6 @@ static void sci_config_port(struct uart_port *port, int flags)
static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
{
- struct sci_port *s = to_sci_port(port);
-
- if (ser->irq != s->cfg->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
- return -EINVAL;
if (ser->baud_base < 2400)
/* No paper tape reader for Mitch.. */
return -EINVAL;
@@ -2151,11 +2097,13 @@ static struct uart_ops sci_uart_ops = {
};
static int sci_init_single(struct platform_device *dev,
- struct sci_port *sci_port,
- unsigned int index,
- struct plat_sci_port *p)
+ struct sci_port *sci_port, unsigned int index,
+ struct plat_sci_port *p, bool early)
{
struct uart_port *port = &sci_port->port;
+ const struct resource *res;
+ unsigned int sampling_rate;
+ unsigned int i;
int ret;
sci_port->cfg = p;
@@ -2164,31 +2112,76 @@ static int sci_init_single(struct platform_device *dev,
port->iotype = UPIO_MEM;
port->line = index;
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -ENOMEM;
+
+ port->mapbase = res->start;
+
+ for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i)
+ sci_port->irqs[i] = platform_get_irq(dev, i);
+
+ /* The SCI generates several interrupts. They can be muxed together or
+ * connected to different interrupt lines. In the muxed case only one
+ * interrupt resource is specified. In the non-muxed case three or four
+ * interrupt resources are specified, as the BRI interrupt is optional.
+ */
+ if (sci_port->irqs[0] < 0)
+ return -ENXIO;
+
+ if (sci_port->irqs[1] < 0) {
+ sci_port->irqs[1] = sci_port->irqs[0];
+ sci_port->irqs[2] = sci_port->irqs[0];
+ sci_port->irqs[3] = sci_port->irqs[0];
+ }
+
+ if (p->regtype == SCIx_PROBE_REGTYPE) {
+ ret = sci_probe_regmap(p);
+ if (unlikely(ret))
+ return ret;
+ }
+
switch (p->type) {
case PORT_SCIFB:
port->fifosize = 256;
+ sci_port->overrun_bit = 9;
+ sampling_rate = 16;
break;
case PORT_HSCIF:
port->fifosize = 128;
+ sampling_rate = 0;
+ sci_port->overrun_bit = 0;
break;
case PORT_SCIFA:
port->fifosize = 64;
+ sci_port->overrun_bit = 9;
+ sampling_rate = 16;
break;
case PORT_SCIF:
port->fifosize = 16;
+ if (p->regtype == SCIx_SH7705_SCIF_REGTYPE) {
+ sci_port->overrun_bit = 9;
+ sampling_rate = 16;
+ } else {
+ sci_port->overrun_bit = 0;
+ sampling_rate = 32;
+ }
break;
default:
port->fifosize = 1;
+ sci_port->overrun_bit = 5;
+ sampling_rate = 32;
break;
}
- if (p->regtype == SCIx_PROBE_REGTYPE) {
- ret = sci_probe_regmap(p);
- if (unlikely(ret))
- return ret;
- }
+ /* SCIFA on sh7723 and sh7724 need a custom sampling rate that doesn't
+ * match the SoC datasheet, this should be investigated. Let platform
+ * data override the sampling rate for now.
+ */
+ sci_port->sampling_rate = p->sampling_rate ? p->sampling_rate
+ : sampling_rate;
- if (dev) {
+ if (!early) {
sci_port->iclk = clk_get(&dev->dev, "sci_ick");
if (IS_ERR(sci_port->iclk)) {
sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
@@ -2208,8 +2201,6 @@ static int sci_init_single(struct platform_device *dev,
port->dev = &dev->dev;
- sci_init_gpios(sci_port);
-
pm_runtime_enable(&dev->dev);
}
@@ -2220,32 +2211,22 @@ static int sci_init_single(struct platform_device *dev,
/*
* Establish some sensible defaults for the error detection.
*/
- if (!p->error_mask)
- p->error_mask = (p->type == PORT_SCI) ?
+ sci_port->error_mask = (p->type == PORT_SCI) ?
SCI_DEFAULT_ERROR_MASK : SCIF_DEFAULT_ERROR_MASK;
/*
* Establish sensible defaults for the overrun detection, unless
* the part has explicitly disabled support for it.
*/
- if (p->overrun_bit != SCIx_NOT_SUPPORTED) {
- if (p->type == PORT_SCI)
- p->overrun_bit = 5;
- else if (p->scbrr_algo_id == SCBRR_ALGO_4)
- p->overrun_bit = 9;
- else
- p->overrun_bit = 0;
- /*
- * Make the error mask inclusive of overrun detection, if
- * supported.
- */
- p->error_mask |= (1 << p->overrun_bit);
- }
+ /*
+ * Make the error mask inclusive of overrun detection, if
+ * supported.
+ */
+ sci_port->error_mask |= 1 << sci_port->overrun_bit;
- port->mapbase = p->mapbase;
port->type = p->type;
- port->flags = p->flags;
+ port->flags = UPF_FIXED_PORT | p->flags;
port->regshift = p->regshift;
/*
@@ -2255,7 +2236,7 @@ static int sci_init_single(struct platform_device *dev,
*
* For the muxed case there's nothing more to do.
*/
- port->irq = p->irqs[SCIx_RXI_IRQ];
+ port->irq = sci_port->irqs[SCIx_RXI_IRQ];
port->irqflags = 0;
port->serial_in = sci_serial_in;
@@ -2270,8 +2251,6 @@ static int sci_init_single(struct platform_device *dev,
static void sci_cleanup_single(struct sci_port *port)
{
- sci_free_gpios(port);
-
clk_put(port->iclk);
clk_put(port->fclk);
@@ -2387,7 +2366,7 @@ static int sci_probe_earlyprintk(struct platform_device *pdev)
early_serial_console.index = pdev->id;
- sci_init_single(NULL, &sci_ports[pdev->id], pdev->id, cfg);
+ sci_init_single(pdev, &sci_ports[pdev->id], pdev->id, cfg, true);
serial_console_setup(&early_serial_console, early_serial_buf);
@@ -2437,6 +2416,83 @@ static int sci_remove(struct platform_device *dev)
return 0;
}
+struct sci_port_info {
+ unsigned int type;
+ unsigned int regtype;
+};
+
+static const struct of_device_id of_sci_match[] = {
+ {
+ .compatible = "renesas,scif",
+ .data = (void *)&(const struct sci_port_info) {
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_REGTYPE,
+ },
+ }, {
+ .compatible = "renesas,scifa",
+ .data = (void *)&(const struct sci_port_info) {
+ .type = PORT_SCIFA,
+ .regtype = SCIx_SCIFA_REGTYPE,
+ },
+ }, {
+ .compatible = "renesas,scifb",
+ .data = (void *)&(const struct sci_port_info) {
+ .type = PORT_SCIFB,
+ .regtype = SCIx_SCIFB_REGTYPE,
+ },
+ }, {
+ .compatible = "renesas,hscif",
+ .data = (void *)&(const struct sci_port_info) {
+ .type = PORT_HSCIF,
+ .regtype = SCIx_HSCIF_REGTYPE,
+ },
+ }, {
+ /* Terminator */
+ },
+};
+MODULE_DEVICE_TABLE(of, of_sci_match);
+
+static struct plat_sci_port *
+sci_parse_dt(struct platform_device *pdev, unsigned int *dev_id)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
+ const struct sci_port_info *info;
+ struct plat_sci_port *p;
+ int id;
+
+ if (!IS_ENABLED(CONFIG_OF) || !np)
+ return NULL;
+
+ match = of_match_node(of_sci_match, pdev->dev.of_node);
+ if (!match)
+ return NULL;
+
+ info = match->data;
+
+ p = devm_kzalloc(&pdev->dev, sizeof(struct plat_sci_port), GFP_KERNEL);
+ if (!p) {
+ dev_err(&pdev->dev, "failed to allocate DT config data\n");
+ return NULL;
+ }
+
+ /* Get the line number for the aliases node. */
+ id = of_alias_get_id(np, "serial");
+ if (id < 0) {
+ dev_err(&pdev->dev, "failed to get alias id (%d)\n", id);
+ return NULL;
+ }
+
+ *dev_id = id;
+
+ p->flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
+ p->type = info->type;
+ p->regtype = info->regtype;
+ p->scscr = SCSCR_RE | SCSCR_TE;
+
+ return p;
+}
+
static int sci_probe_single(struct platform_device *dev,
unsigned int index,
struct plat_sci_port *p,
@@ -2454,7 +2510,7 @@ static int sci_probe_single(struct platform_device *dev,
return -EINVAL;
}
- ret = sci_init_single(dev, sciport, index, p);
+ ret = sci_init_single(dev, sciport, index, p, false);
if (ret)
return ret;
@@ -2469,8 +2525,9 @@ static int sci_probe_single(struct platform_device *dev,
static int sci_probe(struct platform_device *dev)
{
- struct plat_sci_port *p = dev_get_platdata(&dev->dev);
- struct sci_port *sp = &sci_ports[dev->id];
+ struct plat_sci_port *p;
+ struct sci_port *sp;
+ unsigned int dev_id;
int ret;
/*
@@ -2481,9 +2538,24 @@ static int sci_probe(struct platform_device *dev)
if (is_early_platform_device(dev))
return sci_probe_earlyprintk(dev);
+ if (dev->dev.of_node) {
+ p = sci_parse_dt(dev, &dev_id);
+ if (p == NULL)
+ return -EINVAL;
+ } else {
+ p = dev->dev.platform_data;
+ if (p == NULL) {
+ dev_err(&dev->dev, "no platform data supplied\n");
+ return -EINVAL;
+ }
+
+ dev_id = dev->id;
+ }
+
+ sp = &sci_ports[dev_id];
platform_set_drvdata(dev, sp);
- ret = sci_probe_single(dev, dev->id, p, sp);
+ ret = sci_probe_single(dev, dev_id, p, sp);
if (ret)
return ret;
@@ -2535,6 +2607,7 @@ static struct platform_driver sci_driver = {
.name = "sh-sci",
.owner = THIS_MODULE,
.pm = &sci_dev_pm_ops,
+ .of_match_table = of_match_ptr(of_sci_match),
},
};
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index 5aca7364634c..d5db81a0a430 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -9,7 +9,7 @@
#define SCxSR_PER(port) (((port)->type == PORT_SCI) ? SCI_PER : SCIF_PER)
#define SCxSR_BRK(port) (((port)->type == PORT_SCI) ? 0x00 : SCIF_BRK)
-#define SCxSR_ERRORS(port) (to_sci_port(port)->cfg->error_mask)
+#define SCxSR_ERRORS(port) (to_sci_port(port)->error_mask)
#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
defined(CONFIG_CPU_SUBTYPE_SH7720) || \
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index f186a8fb8887..b7bfe24d4ebc 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -524,9 +524,11 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
unsigned int count;
unsigned long flags;
+ struct dma_tx_state tx_state;
spin_lock_irqsave(&sirfport->rx_lock, flags);
- while (sirfport->rx_completed != sirfport->rx_issued) {
+ while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
+ sirfport->rx_dma_items[sirfport->rx_completed].cookie, &tx_state)) {
sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
SIRFSOC_RX_DMA_BUF_SIZE);
sirfport->rx_completed++;
@@ -540,8 +542,10 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
SIRFUART_IO_MODE);
- sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+ spin_lock(&port->lock);
+ sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
+ spin_unlock(&port->lock);
if (sirfport->rx_io_count == 4) {
spin_lock_irqsave(&sirfport->rx_lock, flags);
sirfport->rx_io_count = 0;
@@ -709,8 +713,10 @@ static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param)
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
unsigned long flags;
+ struct dma_tx_state tx_state;
spin_lock_irqsave(&sirfport->rx_lock, flags);
- while (sirfport->rx_completed != sirfport->rx_issued) {
+ while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
+ sirfport->rx_dma_items[sirfport->rx_completed].cookie, &tx_state)) {
sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
SIRFSOC_RX_DMA_BUF_SIZE);
if (rd_regl(port, ureg->sirfsoc_int_en_reg) &
@@ -1033,6 +1039,16 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
spin_unlock_irqrestore(&port->lock, flags);
}
+static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ if (!state)
+ clk_prepare_enable(sirfport->clk);
+ else
+ clk_disable_unprepare(sirfport->clk);
+}
+
static unsigned int sirfsoc_uart_init_tx_dma(struct uart_port *port)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
@@ -1264,6 +1280,7 @@ static struct uart_ops sirfsoc_uart_ops = {
.startup = sirfsoc_uart_startup,
.shutdown = sirfsoc_uart_shutdown,
.set_termios = sirfsoc_uart_set_termios,
+ .pm = sirfsoc_uart_pm,
.type = sirfsoc_uart_type,
.release_port = sirfsoc_uart_release_port,
.request_port = sirfsoc_uart_request_port,
@@ -1486,7 +1503,6 @@ usp_no_flow_control:
ret = PTR_ERR(sirfport->clk);
goto err;
}
- clk_prepare_enable(sirfport->clk);
port->uartclk = clk_get_rate(sirfport->clk);
port->ops = &sirfsoc_uart_ops;
@@ -1502,7 +1518,6 @@ usp_no_flow_control:
return 0;
port_err:
- clk_disable_unprepare(sirfport->clk);
clk_put(sirfport->clk);
err:
return ret;
@@ -1512,38 +1527,42 @@ static int sirfsoc_uart_remove(struct platform_device *pdev)
{
struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
struct uart_port *port = &sirfport->port;
- clk_disable_unprepare(sirfport->clk);
clk_put(sirfport->clk);
uart_remove_one_port(&sirfsoc_uart_drv, port);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int
-sirfsoc_uart_suspend(struct platform_device *pdev, pm_message_t state)
+sirfsoc_uart_suspend(struct device *pdev)
{
- struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
+ struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
struct uart_port *port = &sirfport->port;
uart_suspend_port(&sirfsoc_uart_drv, port);
return 0;
}
-static int sirfsoc_uart_resume(struct platform_device *pdev)
+static int sirfsoc_uart_resume(struct device *pdev)
{
- struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
+ struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
struct uart_port *port = &sirfport->port;
uart_resume_port(&sirfsoc_uart_drv, port);
return 0;
}
+#endif
+
+static const struct dev_pm_ops sirfsoc_uart_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend, sirfsoc_uart_resume)
+};
static struct platform_driver sirfsoc_uart_driver = {
.probe = sirfsoc_uart_probe,
.remove = sirfsoc_uart_remove,
- .suspend = sirfsoc_uart_suspend,
- .resume = sirfsoc_uart_resume,
.driver = {
.name = SIRFUART_PORT_NAME,
.owner = THIS_MODULE,
.of_match_table = sirfsoc_uart_ids,
+ .pm = &sirfsoc_uart_pm_ops,
},
};
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index e46e9f3f19b9..f619ad5b5eae 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -240,6 +240,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
continue;
}
+#ifdef SUPPORT_SYSRQ
/*
* uart_handle_sysrq_char() doesn't work if
* spinlocked, for some reason
@@ -253,6 +254,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
}
spin_lock(&port->lock);
}
+#endif
port->icount.rx++;
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index e1ce141bad5e..5ae14b46cce0 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -3404,8 +3404,8 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
/* If port is closing, signal caller to try again */
if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
- if (info->port.flags & ASYNC_CLOSING)
- interruptible_sleep_on(&info->port.close_wait);
+ wait_event_interruptible_tty(tty, info->port.close_wait,
+ !(info->port.flags & ASYNC_CLOSING));
retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS);
goto cleanup;
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 1abf946463f6..c359a91f7346 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -674,8 +674,8 @@ static int open(struct tty_struct *tty, struct file *filp)
/* If port is closing, signal caller to try again */
if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
- if (info->port.flags & ASYNC_CLOSING)
- interruptible_sleep_on(&info->port.close_wait);
+ wait_event_interruptible_tty(tty, info->port.close_wait,
+ !(info->port.flags & ASYNC_CLOSING));
retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS);
goto cleanup;
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index dc6e96996ead..144202eef6fe 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -754,8 +754,8 @@ static int open(struct tty_struct *tty, struct file *filp)
/* If port is closing, signal caller to try again */
if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
- if (info->port.flags & ASYNC_CLOSING)
- interruptible_sleep_on(&info->port.close_wait);
+ wait_event_interruptible_tty(tty, info->port.close_wait,
+ !(info->port.flags & ASYNC_CLOSING));
retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS);
goto cleanup;
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index a4fdce74f883..b0e540137e39 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -67,7 +67,7 @@ static void tty_audit_log(const char *description, int major, int minor,
struct task_struct *tsk = current;
uid_t uid = from_kuid(&init_user_ns, task_uid(tsk));
uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk));
- u32 sessionid = audit_get_sessionid(tsk);
+ unsigned int sessionid = audit_get_sessionid(tsk);
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY);
if (ab) {
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index c043136fbe51..765125dff20e 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -11,7 +11,6 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/wait.h>
#include <linux/bitops.h>
#include <linux/delay.h>
@@ -26,7 +25,7 @@
* Byte threshold to limit memory consumption for flip buffers.
* The actual memory limit is > 2x this amount.
*/
-#define TTYB_MEM_LIMIT 65536
+#define TTYB_DEFAULT_MEM_LIMIT 65536
/*
* We default to dicing tty buffer allocations to this many characters
@@ -89,9 +88,10 @@ void tty_buffer_unlock_exclusive(struct tty_port *port)
int tty_buffer_space_avail(struct tty_port *port)
{
- int space = TTYB_MEM_LIMIT - atomic_read(&port->buf.memory_used);
+ int space = port->buf.mem_limit - atomic_read(&port->buf.mem_used);
return max(space, 0);
}
+EXPORT_SYMBOL_GPL(tty_buffer_space_avail);
static void tty_buffer_reset(struct tty_buffer *p, size_t size)
{
@@ -100,6 +100,7 @@ static void tty_buffer_reset(struct tty_buffer *p, size_t size)
p->next = NULL;
p->commit = 0;
p->read = 0;
+ p->flags = 0;
}
/**
@@ -129,7 +130,7 @@ void tty_buffer_free_all(struct tty_port *port)
buf->head = &buf->sentinel;
buf->tail = &buf->sentinel;
- atomic_set(&buf->memory_used, 0);
+ atomic_set(&buf->mem_used, 0);
}
/**
@@ -162,7 +163,7 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
/* Should possibly check if this fails for the largest buffer we
have queued and recycle that ? */
- if (atomic_read(&port->buf.memory_used) > TTYB_MEM_LIMIT)
+ if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit)
return NULL;
p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
if (p == NULL)
@@ -170,7 +171,7 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
found:
tty_buffer_reset(p, size);
- atomic_add(size, &port->buf.memory_used);
+ atomic_add(size, &port->buf.mem_used);
return p;
}
@@ -188,7 +189,7 @@ static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b)
struct tty_bufhead *buf = &port->buf;
/* Dumb strategy for now - should keep some stats */
- WARN_ON(atomic_sub_return(b->size, &buf->memory_used) < 0);
+ WARN_ON(atomic_sub_return(b->size, &buf->mem_used) < 0);
if (b->size > MIN_TTYB_SIZE)
kfree(b);
@@ -200,9 +201,7 @@ static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b)
* tty_buffer_flush - flush full tty buffers
* @tty: tty to flush
*
- * flush all the buffers containing receive data. If the buffer is
- * being processed by flush_to_ldisc then we defer the processing
- * to that function
+ * flush all the buffers containing receive data.
*
* Locking: takes buffer lock to ensure single-threaded flip buffer
* 'consumer'
@@ -230,31 +229,49 @@ void tty_buffer_flush(struct tty_struct *tty)
* tty_buffer_request_room - grow tty buffer if needed
* @tty: tty structure
* @size: size desired
+ * @flags: buffer flags if new buffer allocated (default = 0)
*
* Make at least size bytes of linear space available for the tty
* buffer. If we fail return the size we managed to find.
+ *
+ * Will change over to a new buffer if the current buffer is encoded as
+ * TTY_NORMAL (so has no flags buffer) and the new buffer requires
+ * a flags buffer.
*/
-int tty_buffer_request_room(struct tty_port *port, size_t size)
+static int __tty_buffer_request_room(struct tty_port *port, size_t size,
+ int flags)
{
struct tty_bufhead *buf = &port->buf;
struct tty_buffer *b, *n;
- int left;
+ int left, change;
b = buf->tail;
- left = b->size - b->used;
+ if (b->flags & TTYB_NORMAL)
+ left = 2 * b->size - b->used;
+ else
+ left = b->size - b->used;
- if (left < size) {
+ change = (b->flags & TTYB_NORMAL) && (~flags & TTYB_NORMAL);
+ if (change || left < size) {
/* This is the slow path - looking for new buffers to use */
if ((n = tty_buffer_alloc(port, size)) != NULL) {
+ n->flags = flags;
buf->tail = n;
b->commit = b->used;
smp_mb();
b->next = n;
- } else
+ } else if (change)
+ size = 0;
+ else
size = left;
}
return size;
}
+
+int tty_buffer_request_room(struct tty_port *port, size_t size)
+{
+ return __tty_buffer_request_room(port, size, 0);
+}
EXPORT_SYMBOL_GPL(tty_buffer_request_room);
/**
@@ -274,12 +291,14 @@ int tty_insert_flip_string_fixed_flag(struct tty_port *port,
int copied = 0;
do {
int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
- int space = tty_buffer_request_room(port, goal);
+ int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0;
+ int space = __tty_buffer_request_room(port, goal, flags);
struct tty_buffer *tb = port->buf.tail;
if (unlikely(space == 0))
break;
memcpy(char_buf_ptr(tb, tb->used), chars, space);
- memset(flag_buf_ptr(tb, tb->used), flag, space);
+ if (~tb->flags & TTYB_NORMAL)
+ memset(flag_buf_ptr(tb, tb->used), flag, space);
tb->used += space;
copied += space;
chars += space;
@@ -362,52 +381,28 @@ EXPORT_SYMBOL(tty_schedule_flip);
int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
size_t size)
{
- int space = tty_buffer_request_room(port, size);
+ int space = __tty_buffer_request_room(port, size, TTYB_NORMAL);
if (likely(space)) {
struct tty_buffer *tb = port->buf.tail;
*chars = char_buf_ptr(tb, tb->used);
- memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space);
+ if (~tb->flags & TTYB_NORMAL)
+ memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space);
tb->used += space;
}
return space;
}
EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
-/**
- * tty_prepare_flip_string_flags - make room for characters
- * @port: tty port
- * @chars: return pointer for character write area
- * @flags: return pointer for status flag write area
- * @size: desired size
- *
- * Prepare a block of space in the buffer for data. Returns the length
- * available and buffer pointer to the space which is now allocated and
- * accounted for as ready for characters. This is used for drivers
- * that need their own block copy routines into the buffer. There is no
- * guarantee the buffer is a DMA target!
- */
-
-int tty_prepare_flip_string_flags(struct tty_port *port,
- unsigned char **chars, char **flags, size_t size)
-{
- int space = tty_buffer_request_room(port, size);
- if (likely(space)) {
- struct tty_buffer *tb = port->buf.tail;
- *chars = char_buf_ptr(tb, tb->used);
- *flags = flag_buf_ptr(tb, tb->used);
- tb->used += space;
- }
- return space;
-}
-EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
-
static int
receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count)
{
struct tty_ldisc *disc = tty->ldisc;
unsigned char *p = char_buf_ptr(head, head->read);
- char *f = flag_buf_ptr(head, head->read);
+ char *f = NULL;
+
+ if (~head->flags & TTYB_NORMAL)
+ f = flag_buf_ptr(head, head->read);
if (disc->ops->receive_buf2)
count = disc->ops->receive_buf2(tty, p, f, count);
@@ -533,7 +528,25 @@ void tty_buffer_init(struct tty_port *port)
buf->head = &buf->sentinel;
buf->tail = &buf->sentinel;
init_llist_head(&buf->free);
- atomic_set(&buf->memory_used, 0);
+ atomic_set(&buf->mem_used, 0);
atomic_set(&buf->priority, 0);
INIT_WORK(&buf->work, flush_to_ldisc);
+ buf->mem_limit = TTYB_DEFAULT_MEM_LIMIT;
+}
+
+/**
+ * tty_buffer_set_limit - change the tty buffer memory limit
+ * @port: tty port to change
+ *
+ * Change the tty buffer memory limit.
+ * Must be called before the other tty buffer functions are used.
+ */
+
+int tty_buffer_set_limit(struct tty_port *port, int limit)
+{
+ if (limit < MIN_TTYB_SIZE)
+ return -EINVAL;
+ port->buf.mem_limit = limit;
+ return 0;
}
+EXPORT_SYMBOL_GPL(tty_buffer_set_limit);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index c74a00ad7add..bd2715a9d8e5 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1267,16 +1267,17 @@ static void pty_line_name(struct tty_driver *driver, int index, char *p)
* @p: output buffer of at least 7 bytes
*
* Generate a name from a driver reference and write it to the output
- * buffer.
+ * buffer. Return the number of bytes written.
*
* Locking: None
*/
-static void tty_line_name(struct tty_driver *driver, int index, char *p)
+static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
{
if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE)
- strcpy(p, driver->name);
+ return sprintf(p, "%s", driver->name);
else
- sprintf(p, "%s%d", driver->name, index + driver->name_base);
+ return sprintf(p, "%s%d", driver->name,
+ index + driver->name_base);
}
/**
@@ -3545,9 +3546,19 @@ static ssize_t show_cons_active(struct device *dev,
if (i >= ARRAY_SIZE(cs))
break;
}
- while (i--)
- count += sprintf(buf + count, "%s%d%c",
- cs[i]->name, cs[i]->index, i ? ' ':'\n');
+ while (i--) {
+ struct tty_driver *driver;
+ const char *name = cs[i]->name;
+ int index = cs[i]->index;
+
+ driver = cs[i]->device(cs[i], &index);
+ if (driver) {
+ count += tty_line_name(driver, index, buf + count);
+ count += sprintf(buf + count, "%c", i ? ' ':'\n');
+ } else
+ count += sprintf(buf + count, "%s%d%c",
+ name, index, i ? ' ':'\n');
+ }
console_unlock();
return count;
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 6458e11e8e9d..2d822aa259b2 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -11,7 +11,6 @@
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/wait.h>
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 22fad8ad5ac2..d8a55e87877f 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -86,11 +86,21 @@ static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
}
+/*
+ * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
+ * Returns 1 if count was successfully changed; @*old will have @new value.
+ * Returns 0 if count was not changed; @*old will have most recent sem->count
+ */
static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
{
- long tmp = *old;
- *old = atomic_long_cmpxchg(&sem->count, *old, new);
- return *old == tmp;
+ long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
+ if (tmp == *old) {
+ *old = new;
+ return 1;
+ } else {
+ *old = tmp;
+ return 0;
+ }
}
/*
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index c94d2349dd06..3f746c8eb0dd 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -12,7 +12,6 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/wait.h>
#include <linux/bitops.h>
#include <linux/delay.h>
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 61b1137d7e56..23b5d32954bf 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1164,6 +1164,8 @@ static void csi_J(struct vc_data *vc, int vpar)
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
vc->vc_screenbuf_size >> 1);
set_origin(vc);
+ if (CON_IS_VISIBLE(vc))
+ update_screen(vc);
/* fall through */
case 2: /* erase whole display */
count = vc->vc_cols * vc->vc_rows;
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index f7beb6eb40c7..a673e5b6a2e0 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -847,7 +847,7 @@ int __uio_register_device(struct module *owner,
info->uio_dev = idev;
if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
- ret = devm_request_irq(parent, info->irq, uio_interrupt,
+ ret = devm_request_irq(idev->dev, info->irq, uio_interrupt,
info->irq_flags, info->name, idev);
if (ret)
goto err_request_irq;
diff --git a/drivers/uio/uio_mf624.c b/drivers/uio/uio_mf624.c
index f764adbfe036..d1f95a1567bb 100644
--- a/drivers/uio/uio_mf624.c
+++ b/drivers/uio/uio_mf624.c
@@ -228,7 +228,7 @@ static void mf624_pci_remove(struct pci_dev *dev)
kfree(info);
}
-static DEFINE_PCI_DEVICE_TABLE(mf624_pci_id) = {
+static const struct pci_device_id mf624_pci_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUMUSOFT, PCI_DEVICE_ID_MF624) },
{ 0, }
};
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 2642b8a11e05..2e6b832e004b 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -94,8 +94,6 @@ source "drivers/usb/wusbcore/Kconfig"
source "drivers/usb/host/Kconfig"
-source "drivers/usb/musb/Kconfig"
-
source "drivers/usb/renesas_usbhs/Kconfig"
source "drivers/usb/class/Kconfig"
@@ -106,8 +104,12 @@ source "drivers/usb/image/Kconfig"
endif
+source "drivers/usb/musb/Kconfig"
+
source "drivers/usb/dwc3/Kconfig"
+source "drivers/usb/dwc2/Kconfig"
+
source "drivers/usb/chipidea/Kconfig"
comment "USB port drivers"
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 70d7c5b92c3c..1ae2bf39d84b 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -7,6 +7,7 @@
obj-$(CONFIG_USB) += core/
obj-$(CONFIG_USB_DWC3) += dwc3/
+obj-$(CONFIG_USB_DWC2) += dwc2/
obj-$(CONFIG_USB_MON) += mon/
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 8a7eb77233b4..813d4d3a51c6 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -35,7 +35,6 @@
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/mutex.h>
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 69461d653972..0dc8c06a7b5f 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -27,7 +27,6 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/firmware.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index defff43950bc..5a459377574b 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -57,7 +57,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/usb.h>
#include <linux/firmware.h>
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 25a7bfcf666c..dada0146cd7f 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -170,9 +170,9 @@ struct usbatm_control {
static void usbatm_atm_dev_close(struct atm_dev *atm_dev);
static int usbatm_atm_open(struct atm_vcc *vcc);
static void usbatm_atm_close(struct atm_vcc *vcc);
-static int usbatm_atm_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user * arg);
+static int usbatm_atm_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg);
static int usbatm_atm_send(struct atm_vcc *vcc, struct sk_buff *skb);
-static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *page);
+static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page);
static struct atmdev_ops usbatm_atm_devops = {
.dev_close = usbatm_atm_dev_close,
@@ -739,7 +739,7 @@ static void usbatm_atm_dev_close(struct atm_dev *atm_dev)
usbatm_put_instance(instance); /* taken in usbatm_atm_init */
}
-static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *page)
+static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page)
{
struct usbatm_data *instance = atm_dev->dev_data;
int left = *pos;
@@ -895,7 +895,7 @@ static void usbatm_atm_close(struct atm_vcc *vcc)
}
static int usbatm_atm_ioctl(struct atm_dev *atm_dev, unsigned int cmd,
- void __user * arg)
+ void __user *arg)
{
struct usbatm_data *instance = atm_dev->dev_data;
diff --git a/drivers/usb/c67x00/Makefile b/drivers/usb/c67x00/Makefile
index b1218683c8ec..da5f314a5de0 100644
--- a/drivers/usb/c67x00/Makefile
+++ b/drivers/usb/c67x00/Makefile
@@ -2,8 +2,6 @@
# Makefile for Cypress C67X00 USB Controller
#
-ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
-
obj-$(CONFIG_USB_C67X00_HCD) += c67x00.o
c67x00-y := c67x00-drv.o c67x00-ll-hpi.o c67x00-hcd.o c67x00-sched.o
diff --git a/drivers/usb/c67x00/c67x00-hcd.c b/drivers/usb/c67x00/c67x00-hcd.c
index 75e47b860a53..20ec4eee1ac8 100644
--- a/drivers/usb/c67x00/c67x00-hcd.c
+++ b/drivers/usb/c67x00/c67x00-hcd.c
@@ -384,6 +384,8 @@ int c67x00_hcd_probe(struct c67x00_sie *sie)
goto err2;
}
+ device_wakeup_enable(hcd->self.controller);
+
spin_lock_irqsave(&sie->lock, flags);
sie->private_data = c67x00;
sie->irq = c67x00_hcd_irq;
diff --git a/drivers/usb/c67x00/c67x00-hcd.h b/drivers/usb/c67x00/c67x00-hcd.h
index e3d493d4d61a..cf8a455a6403 100644
--- a/drivers/usb/c67x00/c67x00-hcd.h
+++ b/drivers/usb/c67x00/c67x00-hcd.h
@@ -45,7 +45,7 @@
/*
* The current implementation switches between _STD (default) and _ISO (when
* isochronous transfers are scheduled), in order to optimize the throughput
- * in normal cicrumstances, but also provide good isochronous behaviour.
+ * in normal circumstances, but also provide good isochronous behaviour.
*
* Bandwidth is described in bit time so with a 12MHz USB clock and 1ms
* frames; there are 12000 bit times per frame.
diff --git a/drivers/usb/c67x00/c67x00-ll-hpi.c b/drivers/usb/c67x00/c67x00-ll-hpi.c
index 3a1ca4dfc83a..b58151841e10 100644
--- a/drivers/usb/c67x00/c67x00-ll-hpi.c
+++ b/drivers/usb/c67x00/c67x00-ll-hpi.c
@@ -22,6 +22,7 @@
*/
#include <asm/byteorder.h>
+#include <linux/delay.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/usb/c67x00.h>
@@ -62,8 +63,8 @@ struct c67x00_lcp_int_data {
* HPI implementation
*
* The c67x00 chip also support control via SPI or HSS serial
- * interfaces. However, this driver assumes that register access can
- * be performed from IRQ context. While this is a safe assuption with
+ * interfaces. However, this driver assumes that register access can
+ * be performed from IRQ context. While this is a safe assumption with
* the HPI interface, it is not true for the serial interfaces.
*/
@@ -73,13 +74,22 @@ struct c67x00_lcp_int_data {
#define HPI_ADDR 2
#define HPI_STATUS 3
+/*
+ * According to CY7C67300 specification (tables 140 and 141) HPI read and
+ * write cycle duration Tcyc must be at least 6T long, where T is 1/48MHz,
+ * which is 125ns.
+ */
+#define HPI_T_CYC_NS 125
+
static inline u16 hpi_read_reg(struct c67x00_device *dev, int reg)
{
+ ndelay(HPI_T_CYC_NS);
return __raw_readw(dev->hpi.base + reg * dev->hpi.regstep);
}
static inline void hpi_write_reg(struct c67x00_device *dev, int reg, u16 value)
{
+ ndelay(HPI_T_CYC_NS);
__raw_writew(value, dev->hpi.base + reg * dev->hpi.regstep);
}
diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c
index 892cc96466eb..7311ed61e99a 100644
--- a/drivers/usb/c67x00/c67x00-sched.c
+++ b/drivers/usb/c67x00/c67x00-sched.c
@@ -144,8 +144,6 @@ struct c67x00_urb_priv {
/* -------------------------------------------------------------------------- */
-#ifdef DEBUG
-
/**
* dbg_td - Dump the contents of the TD
*/
@@ -166,16 +164,8 @@ static void dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg)
dev_dbg(dev, "retry_cnt: 0x%02x\n", td->retry_cnt);
dev_dbg(dev, "residue: 0x%02x\n", td->residue);
dev_dbg(dev, "next_td_addr: 0x%04x\n", td_next_td_addr(td));
- dev_dbg(dev, "data:");
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1,
- td->data, td_length(td), 1);
+ dev_dbg(dev, "data: %*ph\n", td_length(td), td->data);
}
-#else /* DEBUG */
-
-static inline void
-dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg) { }
-
-#endif /* DEBUG */
/* -------------------------------------------------------------------------- */
/* Helper functions */
@@ -372,6 +362,13 @@ int c67x00_urb_enqueue(struct usb_hcd *hcd,
struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
int port = get_root_port(urb->dev)-1;
+ /* Allocate and initialize urb private data */
+ urbp = kzalloc(sizeof(*urbp), mem_flags);
+ if (!urbp) {
+ ret = -ENOMEM;
+ goto err_urbp;
+ }
+
spin_lock_irqsave(&c67x00->lock, flags);
/* Make sure host controller is running */
@@ -384,13 +381,6 @@ int c67x00_urb_enqueue(struct usb_hcd *hcd,
if (ret)
goto err_not_linked;
- /* Allocate and initialize urb private data */
- urbp = kzalloc(sizeof(*urbp), mem_flags);
- if (!urbp) {
- ret = -ENOMEM;
- goto err_urbp;
- }
-
INIT_LIST_HEAD(&urbp->hep_node);
urbp->urb = urb;
urbp->port = port;
@@ -453,11 +443,11 @@ int c67x00_urb_enqueue(struct usb_hcd *hcd,
return 0;
err_epdata:
- kfree(urbp);
-err_urbp:
usb_hcd_unlink_urb_from_ep(hcd, urb);
err_not_linked:
spin_unlock_irqrestore(&c67x00->lock, flags);
+ kfree(urbp);
+err_urbp:
return ret;
}
@@ -780,7 +770,8 @@ static int c67x00_add_iso_urb(struct c67x00_hcd *c67x00, struct urb *urb)
ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, 0,
urbp->cnt);
if (ret) {
- printk(KERN_DEBUG "create failed: %d\n", ret);
+ dev_dbg(c67x00_hcd_dev(c67x00), "create failed: %d\n",
+ ret);
urb->iso_frame_desc[urbp->cnt].actual_length = 0;
urb->iso_frame_desc[urbp->cnt].status = ret;
if (urbp->cnt + 1 == urb->number_of_packets)
diff --git a/drivers/usb/chipidea/Makefile b/drivers/usb/chipidea/Makefile
index a99d980454a6..7345d2115af2 100644
--- a/drivers/usb/chipidea/Makefile
+++ b/drivers/usb/chipidea/Makefile
@@ -17,5 +17,5 @@ ifneq ($(CONFIG_PCI),)
endif
ifneq ($(CONFIG_OF),)
- obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc_imx.o usbmisc_imx.o
+ obj-$(CONFIG_USB_CHIPIDEA) += usbmisc_imx.o ci_hdrc_imx.o
endif
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 1c94fc5257f4..88b80f7728e4 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -26,6 +26,35 @@
#define ENDPT_MAX 32
/******************************************************************************
+ * REGISTERS
+ *****************************************************************************/
+/* register indices */
+enum ci_hw_regs {
+ CAP_CAPLENGTH,
+ CAP_HCCPARAMS,
+ CAP_DCCPARAMS,
+ CAP_TESTMODE,
+ CAP_LAST = CAP_TESTMODE,
+ OP_USBCMD,
+ OP_USBSTS,
+ OP_USBINTR,
+ OP_DEVICEADDR,
+ OP_ENDPTLISTADDR,
+ OP_PORTSC,
+ OP_DEVLC,
+ OP_OTGSC,
+ OP_USBMODE,
+ OP_ENDPTSETUPSTAT,
+ OP_ENDPTPRIME,
+ OP_ENDPTFLUSH,
+ OP_ENDPTSTAT,
+ OP_ENDPTCOMPLETE,
+ OP_ENDPTCTRL,
+ /* endptctrl1..15 follow */
+ OP_LAST = OP_ENDPTCTRL + ENDPT_MAX / 2,
+};
+
+/******************************************************************************
* STRUCTURES
*****************************************************************************/
/**
@@ -98,7 +127,7 @@ struct hw_bank {
void __iomem *cap;
void __iomem *op;
size_t size;
- void __iomem **regmap;
+ void __iomem *regmap[OP_LAST + 1];
};
/**
@@ -135,6 +164,7 @@ struct hw_bank {
* @id_event: indicates there is an id event, and handled at ci_otg_work
* @b_sess_valid_event: indicates there is a vbus event, and handled
* at ci_otg_work
+ * @imx28_write_fix: Freescale imx28 needs swp instruction for writing
*/
struct ci_hdrc {
struct device *dev;
@@ -173,6 +203,7 @@ struct ci_hdrc {
struct dentry *debugfs;
bool id_event;
bool b_sess_valid_event;
+ bool imx28_write_fix;
};
static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci)
@@ -209,38 +240,6 @@ static inline void ci_role_stop(struct ci_hdrc *ci)
ci->roles[role]->stop(ci);
}
-/******************************************************************************
- * REGISTERS
- *****************************************************************************/
-/* register size */
-#define REG_BITS (32)
-
-/* register indices */
-enum ci_hw_regs {
- CAP_CAPLENGTH,
- CAP_HCCPARAMS,
- CAP_DCCPARAMS,
- CAP_TESTMODE,
- CAP_LAST = CAP_TESTMODE,
- OP_USBCMD,
- OP_USBSTS,
- OP_USBINTR,
- OP_DEVICEADDR,
- OP_ENDPTLISTADDR,
- OP_PORTSC,
- OP_DEVLC,
- OP_OTGSC,
- OP_USBMODE,
- OP_ENDPTSETUPSTAT,
- OP_ENDPTPRIME,
- OP_ENDPTFLUSH,
- OP_ENDPTSTAT,
- OP_ENDPTCOMPLETE,
- OP_ENDPTCTRL,
- /* endptctrl1..15 follow */
- OP_LAST = OP_ENDPTCTRL + ENDPT_MAX / 2,
-};
-
/**
* hw_read: reads from a hw register
* @reg: register index
@@ -253,6 +252,26 @@ static inline u32 hw_read(struct ci_hdrc *ci, enum ci_hw_regs reg, u32 mask)
return ioread32(ci->hw_bank.regmap[reg]) & mask;
}
+#ifdef CONFIG_SOC_IMX28
+static inline void imx28_ci_writel(u32 val, volatile void __iomem *addr)
+{
+ __asm__ ("swp %0, %0, [%1]" : : "r"(val), "r"(addr));
+}
+#else
+static inline void imx28_ci_writel(u32 val, volatile void __iomem *addr)
+{
+}
+#endif
+
+static inline void __hw_write(struct ci_hdrc *ci, u32 val,
+ void __iomem *addr)
+{
+ if (ci->imx28_write_fix)
+ imx28_ci_writel(val, addr);
+ else
+ iowrite32(val, addr);
+}
+
/**
* hw_write: writes to a hw register
* @reg: register index
@@ -266,7 +285,7 @@ static inline void hw_write(struct ci_hdrc *ci, enum ci_hw_regs reg,
data = (ioread32(ci->hw_bank.regmap[reg]) & ~mask)
| (data & mask);
- iowrite32(data, ci->hw_bank.regmap[reg]);
+ __hw_write(ci, data, ci->hw_bank.regmap[reg]);
}
/**
@@ -281,7 +300,7 @@ static inline u32 hw_test_and_clear(struct ci_hdrc *ci, enum ci_hw_regs reg,
{
u32 val = ioread32(ci->hw_bank.regmap[reg]) & mask;
- iowrite32(val, ci->hw_bank.regmap[reg]);
+ __hw_write(ci, val, ci->hw_bank.regmap[reg]);
return val;
}
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index bb5d976e5b81..c00f77257d36 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -23,6 +23,26 @@
#include "ci.h"
#include "ci_hdrc_imx.h"
+#define CI_HDRC_IMX_IMX28_WRITE_FIX BIT(0)
+
+struct ci_hdrc_imx_platform_flag {
+ unsigned int flags;
+};
+
+static const struct ci_hdrc_imx_platform_flag imx27_usb_data = {
+};
+
+static const struct ci_hdrc_imx_platform_flag imx28_usb_data = {
+ .flags = CI_HDRC_IMX_IMX28_WRITE_FIX,
+};
+
+static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+ { .compatible = "fsl,imx28-usb", .data = &imx28_usb_data},
+ { .compatible = "fsl,imx27-usb", .data = &imx27_usb_data},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
+
struct ci_hdrc_imx_data {
struct usb_phy *phy;
struct platform_device *ci_pdev;
@@ -82,6 +102,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
CI_HDRC_DISABLE_STREAMING,
};
int ret;
+ const struct of_device_id *of_id =
+ of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev);
+ const struct ci_hdrc_imx_platform_flag *imx_platform_flag = of_id->data;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data) {
@@ -115,6 +138,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
pdata.phy = data->phy;
+ if (imx_platform_flag->flags & CI_HDRC_IMX_IMX28_WRITE_FIX)
+ pdata.flags |= CI_HDRC_IMX28_WRITE_FIX;
+
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
goto err_clk;
@@ -173,12 +199,6 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
- { .compatible = "fsl,imx27-usb", },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
-
static struct platform_driver ci_hdrc_imx_driver = {
.probe = ci_hdrc_imx_probe,
.remove = ci_hdrc_imx_remove,
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index c7271590dd0a..996ec93467b2 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -9,6 +9,9 @@
* http://www.gnu.org/copyleft/gpl.html
*/
+#ifndef __DRIVER_USB_CHIPIDEA_CI_HDRC_IMX_H
+#define __DRIVER_USB_CHIPIDEA_CI_HDRC_IMX_H
+
struct imx_usbmisc_data {
int index;
@@ -18,3 +21,5 @@ struct imx_usbmisc_data {
int imx_usbmisc_init(struct imx_usbmisc_data *);
int imx_usbmisc_init_post(struct imx_usbmisc_data *);
+
+#endif /* __DRIVER_USB_CHIPIDEA_CI_HDRC_IMX_H */
diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c
index d514332ac081..241ae3444fde 100644
--- a/drivers/usb/chipidea/ci_hdrc_pci.c
+++ b/drivers/usb/chipidea/ci_hdrc_pci.c
@@ -112,7 +112,7 @@ static void ci_hdrc_pci_remove(struct pci_dev *pdev)
*
* Check "pci.h" for details
*/
-static DEFINE_PCI_DEVICE_TABLE(ci_hdrc_pci_id_table) = {
+static const struct pci_device_id ci_hdrc_pci_id_table[] = {
{
PCI_DEVICE(0x153F, 0x1004),
.driver_data = (kernel_ulong_t)&pci_platdata,
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 5d8981c5235e..33f22bc6ad7f 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -75,61 +75,54 @@
#include "otg.h"
/* Controller register map */
-static uintptr_t ci_regs_nolpm[] = {
- [CAP_CAPLENGTH] = 0x000UL,
- [CAP_HCCPARAMS] = 0x008UL,
- [CAP_DCCPARAMS] = 0x024UL,
- [CAP_TESTMODE] = 0x038UL,
- [OP_USBCMD] = 0x000UL,
- [OP_USBSTS] = 0x004UL,
- [OP_USBINTR] = 0x008UL,
- [OP_DEVICEADDR] = 0x014UL,
- [OP_ENDPTLISTADDR] = 0x018UL,
- [OP_PORTSC] = 0x044UL,
- [OP_DEVLC] = 0x084UL,
- [OP_OTGSC] = 0x064UL,
- [OP_USBMODE] = 0x068UL,
- [OP_ENDPTSETUPSTAT] = 0x06CUL,
- [OP_ENDPTPRIME] = 0x070UL,
- [OP_ENDPTFLUSH] = 0x074UL,
- [OP_ENDPTSTAT] = 0x078UL,
- [OP_ENDPTCOMPLETE] = 0x07CUL,
- [OP_ENDPTCTRL] = 0x080UL,
+static const u8 ci_regs_nolpm[] = {
+ [CAP_CAPLENGTH] = 0x00U,
+ [CAP_HCCPARAMS] = 0x08U,
+ [CAP_DCCPARAMS] = 0x24U,
+ [CAP_TESTMODE] = 0x38U,
+ [OP_USBCMD] = 0x00U,
+ [OP_USBSTS] = 0x04U,
+ [OP_USBINTR] = 0x08U,
+ [OP_DEVICEADDR] = 0x14U,
+ [OP_ENDPTLISTADDR] = 0x18U,
+ [OP_PORTSC] = 0x44U,
+ [OP_DEVLC] = 0x84U,
+ [OP_OTGSC] = 0x64U,
+ [OP_USBMODE] = 0x68U,
+ [OP_ENDPTSETUPSTAT] = 0x6CU,
+ [OP_ENDPTPRIME] = 0x70U,
+ [OP_ENDPTFLUSH] = 0x74U,
+ [OP_ENDPTSTAT] = 0x78U,
+ [OP_ENDPTCOMPLETE] = 0x7CU,
+ [OP_ENDPTCTRL] = 0x80U,
};
-static uintptr_t ci_regs_lpm[] = {
- [CAP_CAPLENGTH] = 0x000UL,
- [CAP_HCCPARAMS] = 0x008UL,
- [CAP_DCCPARAMS] = 0x024UL,
- [CAP_TESTMODE] = 0x0FCUL,
- [OP_USBCMD] = 0x000UL,
- [OP_USBSTS] = 0x004UL,
- [OP_USBINTR] = 0x008UL,
- [OP_DEVICEADDR] = 0x014UL,
- [OP_ENDPTLISTADDR] = 0x018UL,
- [OP_PORTSC] = 0x044UL,
- [OP_DEVLC] = 0x084UL,
- [OP_OTGSC] = 0x0C4UL,
- [OP_USBMODE] = 0x0C8UL,
- [OP_ENDPTSETUPSTAT] = 0x0D8UL,
- [OP_ENDPTPRIME] = 0x0DCUL,
- [OP_ENDPTFLUSH] = 0x0E0UL,
- [OP_ENDPTSTAT] = 0x0E4UL,
- [OP_ENDPTCOMPLETE] = 0x0E8UL,
- [OP_ENDPTCTRL] = 0x0ECUL,
+static const u8 ci_regs_lpm[] = {
+ [CAP_CAPLENGTH] = 0x00U,
+ [CAP_HCCPARAMS] = 0x08U,
+ [CAP_DCCPARAMS] = 0x24U,
+ [CAP_TESTMODE] = 0xFCU,
+ [OP_USBCMD] = 0x00U,
+ [OP_USBSTS] = 0x04U,
+ [OP_USBINTR] = 0x08U,
+ [OP_DEVICEADDR] = 0x14U,
+ [OP_ENDPTLISTADDR] = 0x18U,
+ [OP_PORTSC] = 0x44U,
+ [OP_DEVLC] = 0x84U,
+ [OP_OTGSC] = 0xC4U,
+ [OP_USBMODE] = 0xC8U,
+ [OP_ENDPTSETUPSTAT] = 0xD8U,
+ [OP_ENDPTPRIME] = 0xDCU,
+ [OP_ENDPTFLUSH] = 0xE0U,
+ [OP_ENDPTSTAT] = 0xE4U,
+ [OP_ENDPTCOMPLETE] = 0xE8U,
+ [OP_ENDPTCTRL] = 0xECU,
};
static int hw_alloc_regmap(struct ci_hdrc *ci, bool is_lpm)
{
int i;
- kfree(ci->hw_bank.regmap);
-
- ci->hw_bank.regmap = kzalloc((OP_LAST + 1) * sizeof(void *),
- GFP_KERNEL);
- if (!ci->hw_bank.regmap)
- return -ENOMEM;
-
for (i = 0; i < OP_ENDPTCTRL; i++)
ci->hw_bank.regmap[i] =
(i <= CAP_LAST ? ci->hw_bank.cap : ci->hw_bank.op) +
@@ -208,7 +201,8 @@ static int hw_device_init(struct ci_hdrc *ci, void __iomem *base)
reg = hw_read(ci, CAP_HCCPARAMS, HCCPARAMS_LEN) >>
__ffs(HCCPARAMS_LEN);
ci->hw_bank.lpm = reg;
- hw_alloc_regmap(ci, !!reg);
+ if (reg)
+ hw_alloc_regmap(ci, !!reg);
ci->hw_bank.size = ci->hw_bank.op - ci->hw_bank.abs;
ci->hw_bank.size += OP_LAST;
ci->hw_bank.size /= sizeof(u32);
@@ -242,7 +236,7 @@ static int hw_device_init(struct ci_hdrc *ci, void __iomem *base)
static void hw_phymode_configure(struct ci_hdrc *ci)
{
- u32 portsc, lpm, sts;
+ u32 portsc, lpm, sts = 0;
switch (ci->platdata->phy_mode) {
case USBPHY_INTERFACE_MODE_UTMI:
@@ -272,10 +266,12 @@ static void hw_phymode_configure(struct ci_hdrc *ci)
if (ci->hw_bank.lpm) {
hw_write(ci, OP_DEVLC, DEVLC_PTS(7) | DEVLC_PTW, lpm);
- hw_write(ci, OP_DEVLC, DEVLC_STS, sts);
+ if (sts)
+ hw_write(ci, OP_DEVLC, DEVLC_STS, DEVLC_STS);
} else {
hw_write(ci, OP_PORTSC, PORTSC_PTS(7) | PORTSC_PTW, portsc);
- hw_write(ci, OP_PORTSC, PORTSC_STS, sts);
+ if (sts)
+ hw_write(ci, OP_PORTSC, PORTSC_STS, PORTSC_STS);
}
}
@@ -554,6 +550,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ci->dev = dev;
ci->platdata = dev->platform_data;
+ ci->imx28_write_fix = !!(ci->platdata->flags &
+ CI_HDRC_IMX28_WRITE_FIX);
ret = hw_device_init(ci, base);
if (ret < 0) {
@@ -561,6 +559,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
return -ENODEV;
}
+ hw_phymode_configure(ci);
+
ret = ci_usb_phy_init(ci);
if (ret) {
dev_err(dev, "unable to init phy: %d\n", ret);
@@ -578,8 +578,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ci_get_otg_capable(ci);
- hw_phymode_configure(ci);
-
dr_mode = ci->platdata->dr_mode;
/* initialize role(s) before the interrupt is requested */
if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
@@ -642,6 +640,10 @@ static int ci_hdrc_probe(struct platform_device *pdev)
: CI_ROLE_GADGET;
}
+ /* only update vbus status for peripheral */
+ if (ci->role == CI_ROLE_GADGET)
+ ci_handle_vbus_change(ci);
+
ret = ci_role_start(ci, ci->role);
if (ret) {
dev_err(dev, "can't start %s role\n", ci_role(ci)->name);
@@ -676,7 +678,6 @@ static int ci_hdrc_remove(struct platform_device *pdev)
ci_role_destroy(ci);
ci_hdrc_enter_lpm(ci, true);
ci_usb_phy_destroy(ci);
- kfree(ci->hw_bank.regmap);
return 0;
}
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 59e6020ea753..a8ac6c16dac9 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -65,6 +65,7 @@ static int host_start(struct ci_hdrc *ci)
ehci->caps = ci->hw_bank.cap;
ehci->has_hostpc = ci->hw_bank.lpm;
ehci->has_tdi_phy_lpm = ci->hw_bank.lpm;
+ ehci->imx28_write_fix = ci->imx28_write_fix;
if (ci->platdata->reg_vbus) {
ret = regulator_enable(ci->platdata->reg_vbus);
@@ -88,7 +89,8 @@ static int host_start(struct ci_hdrc *ci)
return ret;
disable_reg:
- regulator_disable(ci->platdata->reg_vbus);
+ if (ci->platdata->reg_vbus)
+ regulator_disable(ci->platdata->reg_vbus);
put_hcd:
usb_put_hcd(hcd);
diff --git a/drivers/usb/chipidea/otg.h b/drivers/usb/chipidea/otg.h
index 2d9f090733bc..449bee07f4fe 100644
--- a/drivers/usb/chipidea/otg.h
+++ b/drivers/usb/chipidea/otg.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2013-2014 Freescale Semiconductor, Inc.
*
* Author: Peter Chen
*
@@ -19,12 +19,12 @@ static inline void ci_clear_otg_interrupt(struct ci_hdrc *ci, u32 bits)
static inline void ci_enable_otg_interrupt(struct ci_hdrc *ci, u32 bits)
{
- hw_write(ci, OP_OTGSC, bits, bits);
+ hw_write(ci, OP_OTGSC, bits | OTGSC_INT_STATUS_BITS, bits);
}
static inline void ci_disable_otg_interrupt(struct ci_hdrc *ci, u32 bits)
{
- hw_write(ci, OP_OTGSC, bits, 0);
+ hw_write(ci, OP_OTGSC, bits | OTGSC_INT_STATUS_BITS, 0);
}
int ci_hdrc_otg_init(struct ci_hdrc *ci);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index b34c81969cba..80de2f88ed2c 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -393,6 +393,14 @@ static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
+ if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) {
+ u32 mul = hwreq->req.length / hwep->ep.maxpacket;
+
+ if (hwreq->req.length == 0
+ || hwreq->req.length % hwep->ep.maxpacket)
+ mul++;
+ node->ptr->token |= mul << __ffs(TD_MULTO);
+ }
temp = (u32) (hwreq->req.dma + hwreq->req.actual);
if (length) {
@@ -515,10 +523,11 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
hwep->qh.ptr->td.token &=
cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
- if (hwep->type == USB_ENDPOINT_XFER_ISOC) {
+ if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == RX) {
u32 mul = hwreq->req.length / hwep->ep.maxpacket;
- if (hwreq->req.length % hwep->ep.maxpacket)
+ if (hwreq->req.length == 0
+ || hwreq->req.length % hwep->ep.maxpacket)
mul++;
hwep->qh.ptr->cap |= mul << __ffs(QH_MULT);
}
@@ -1173,6 +1182,12 @@ static int ep_enable(struct usb_ep *ep,
if (hwep->num)
cap |= QH_ZLT;
cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
+ /*
+ * For ISO-TX, we set mult at QH as the largest value, and use
+ * MultO at TD as real mult value.
+ */
+ if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX)
+ cap |= 3 << __ffs(QH_MULT);
hwep->qh.ptr->cap = cpu_to_le32(cap);
@@ -1566,7 +1581,7 @@ static int init_eps(struct ci_hdrc *ci)
* eps, maxP is set by epautoconfig() called
* by gadget layer
*/
- hwep->ep.maxpacket = (unsigned short)~0;
+ usb_ep_set_maxpacket_limit(&hwep->ep, (unsigned short)~0);
INIT_LIST_HEAD(&hwep->qh.queue);
hwep->qh.ptr = dma_pool_alloc(ci->qh_pool, GFP_KERNEL,
@@ -1586,7 +1601,7 @@ static int init_eps(struct ci_hdrc *ci)
else
ci->ep0in = hwep;
- hwep->ep.maxpacket = CTRL_PAYLOAD_MAX;
+ usb_ep_set_maxpacket_limit(&hwep->ep, CTRL_PAYLOAD_MAX);
continue;
}
@@ -1795,9 +1810,6 @@ static int udc_start(struct ci_hdrc *ci)
pm_runtime_no_callbacks(&ci->gadget.dev);
pm_runtime_enable(&ci->gadget.dev);
- /* Update ci->vbus_active */
- ci_handle_vbus_change(ci);
-
return retval;
destroy_eps:
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 8a1094b1182f..cd061abe3507 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -21,6 +21,10 @@
#define MX25_USB_PHY_CTRL_OFFSET 0x08
#define MX25_BM_EXTERNAL_VBUS_DIVIDER BIT(23)
+#define MX27_H1_PM_BIT BIT(8)
+#define MX27_H2_PM_BIT BIT(16)
+#define MX27_OTG_PM_BIT BIT(24)
+
#define MX53_USB_OTG_PHY_CTRL_0_OFFSET 0x08
#define MX53_USB_UH2_CTRL_OFFSET 0x14
#define MX53_USB_UH3_CTRL_OFFSET 0x18
@@ -68,6 +72,36 @@ static int usbmisc_imx25_post(struct imx_usbmisc_data *data)
return 0;
}
+static int usbmisc_imx27_init(struct imx_usbmisc_data *data)
+{
+ unsigned long flags;
+ u32 val;
+
+ switch (data->index) {
+ case 0:
+ val = MX27_OTG_PM_BIT;
+ break;
+ case 1:
+ val = MX27_H1_PM_BIT;
+ break;
+ case 2:
+ val = MX27_H2_PM_BIT;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ if (data->disable_oc)
+ val = readl(usbmisc->base) | val;
+ else
+ val = readl(usbmisc->base) & ~val;
+ writel(val, usbmisc->base);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+ return 0;
+}
+
static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
{
void __iomem *reg = NULL;
@@ -128,6 +162,10 @@ static const struct usbmisc_ops imx25_usbmisc_ops = {
.post = usbmisc_imx25_post,
};
+static const struct usbmisc_ops imx27_usbmisc_ops = {
+ .init = usbmisc_imx27_init,
+};
+
static const struct usbmisc_ops imx53_usbmisc_ops = {
.init = usbmisc_imx53_init,
};
@@ -162,6 +200,14 @@ static const struct of_device_id usbmisc_imx_dt_ids[] = {
.data = &imx25_usbmisc_ops,
},
{
+ .compatible = "fsl,imx27-usbmisc",
+ .data = &imx27_usbmisc_ops,
+ },
+ {
+ .compatible = "fsl,imx51-usbmisc",
+ .data = &imx53_usbmisc_ops,
+ },
+ {
.compatible = "fsl,imx53-usbmisc",
.data = &imx53_usbmisc_ops,
},
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index e8404319ca68..900f7ff805ee 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -262,6 +262,7 @@ static void acm_ctrl_irq(struct urb *urb)
struct usb_cdc_notification *dr = urb->transfer_buffer;
unsigned char *data;
int newctrl;
+ int difference;
int retval;
int status = urb->status;
@@ -302,20 +303,31 @@ static void acm_ctrl_irq(struct urb *urb)
tty_port_tty_hangup(&acm->port, false);
}
+ difference = acm->ctrlin ^ newctrl;
+ spin_lock(&acm->read_lock);
acm->ctrlin = newctrl;
+ acm->oldcount = acm->iocount;
+
+ if (difference & ACM_CTRL_DSR)
+ acm->iocount.dsr++;
+ if (difference & ACM_CTRL_BRK)
+ acm->iocount.brk++;
+ if (difference & ACM_CTRL_RI)
+ acm->iocount.rng++;
+ if (difference & ACM_CTRL_DCD)
+ acm->iocount.dcd++;
+ if (difference & ACM_CTRL_FRAMING)
+ acm->iocount.frame++;
+ if (difference & ACM_CTRL_PARITY)
+ acm->iocount.parity++;
+ if (difference & ACM_CTRL_OVERRUN)
+ acm->iocount.overrun++;
+ spin_unlock(&acm->read_lock);
+
+ if (difference)
+ wake_up_all(&acm->wioctl);
- dev_dbg(&acm->control->dev,
- "%s - input control lines: dcd%c dsr%c break%c "
- "ring%c framing%c parity%c overrun%c\n",
- __func__,
- acm->ctrlin & ACM_CTRL_DCD ? '+' : '-',
- acm->ctrlin & ACM_CTRL_DSR ? '+' : '-',
- acm->ctrlin & ACM_CTRL_BRK ? '+' : '-',
- acm->ctrlin & ACM_CTRL_RI ? '+' : '-',
- acm->ctrlin & ACM_CTRL_FRAMING ? '+' : '-',
- acm->ctrlin & ACM_CTRL_PARITY ? '+' : '-',
- acm->ctrlin & ACM_CTRL_OVERRUN ? '+' : '-');
- break;
+ break;
default:
dev_dbg(&acm->control->dev,
@@ -796,6 +808,72 @@ static int set_serial_info(struct acm *acm,
return retval;
}
+static int wait_serial_change(struct acm *acm, unsigned long arg)
+{
+ int rv = 0;
+ DECLARE_WAITQUEUE(wait, current);
+ struct async_icount old, new;
+
+ if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD ))
+ return -EINVAL;
+ do {
+ spin_lock_irq(&acm->read_lock);
+ old = acm->oldcount;
+ new = acm->iocount;
+ acm->oldcount = new;
+ spin_unlock_irq(&acm->read_lock);
+
+ if ((arg & TIOCM_DSR) &&
+ old.dsr != new.dsr)
+ break;
+ if ((arg & TIOCM_CD) &&
+ old.dcd != new.dcd)
+ break;
+ if ((arg & TIOCM_RI) &&
+ old.rng != new.rng)
+ break;
+
+ add_wait_queue(&acm->wioctl, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ remove_wait_queue(&acm->wioctl, &wait);
+ if (acm->disconnected) {
+ if (arg & TIOCM_CD)
+ break;
+ else
+ rv = -ENODEV;
+ } else {
+ if (signal_pending(current))
+ rv = -ERESTARTSYS;
+ }
+ } while (!rv);
+
+
+
+ return rv;
+}
+
+static int get_serial_usage(struct acm *acm,
+ struct serial_icounter_struct __user *count)
+{
+ struct serial_icounter_struct icount;
+ int rv = 0;
+
+ memset(&icount, 0, sizeof(icount));
+ icount.dsr = acm->iocount.dsr;
+ icount.rng = acm->iocount.rng;
+ icount.dcd = acm->iocount.dcd;
+ icount.frame = acm->iocount.frame;
+ icount.overrun = acm->iocount.overrun;
+ icount.parity = acm->iocount.parity;
+ icount.brk = acm->iocount.brk;
+
+ if (copy_to_user(count, &icount, sizeof(icount)) > 0)
+ rv = -EFAULT;
+
+ return rv;
+}
+
static int acm_tty_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
@@ -809,6 +887,18 @@ static int acm_tty_ioctl(struct tty_struct *tty,
case TIOCSSERIAL:
rv = set_serial_info(acm, (struct serial_struct __user *) arg);
break;
+ case TIOCMIWAIT:
+ rv = usb_autopm_get_interface(acm->control);
+ if (rv < 0) {
+ rv = -EIO;
+ break;
+ }
+ rv = wait_serial_change(acm, arg);
+ usb_autopm_put_interface(acm->control);
+ break;
+ case TIOCGICOUNT:
+ rv = get_serial_usage(acm, (struct serial_icounter_struct __user *) arg);
+ break;
}
return rv;
@@ -1167,6 +1257,7 @@ made_compressed_probe:
acm->readsize = readsize;
acm->rx_buflimit = num_rx_buf;
INIT_WORK(&acm->work, acm_softint);
+ init_waitqueue_head(&acm->wioctl);
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
mutex_init(&acm->mutex);
@@ -1383,6 +1474,7 @@ static void acm_disconnect(struct usb_interface *intf)
device_remove_file(&acm->control->dev,
&dev_attr_iCountryCodeRelDate);
}
+ wake_up_all(&acm->wioctl);
device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
usb_set_intfdata(acm->control, NULL);
usb_set_intfdata(acm->data, NULL);
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 0f76e4af600e..e38dc785808f 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -106,6 +106,9 @@ struct acm {
struct work_struct work; /* work queue entry for line discipline waking up */
unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
unsigned int ctrlout; /* output control lines (DTR, RTS) */
+ struct async_icount iocount; /* counters for control line changes */
+ struct async_icount oldcount; /* for comparison of counter */
+ wait_queue_head_t wioctl; /* for ioctl */
unsigned int writesize; /* max packet size for the output bulk endpoint */
unsigned int readsize,ctrlsize; /* buffer sizes for freeing */
unsigned int minor; /* acm minor number */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 4d387596f3f0..a051a7a2b1bd 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -432,6 +432,38 @@ outnl:
return rv < 0 ? rv : count;
}
+/*
+ * clear WDM_READ flag and possibly submit the read urb if resp_count
+ * is non-zero.
+ *
+ * Called with desc->iuspin locked
+ */
+static int clear_wdm_read_flag(struct wdm_device *desc)
+{
+ int rv = 0;
+
+ clear_bit(WDM_READ, &desc->flags);
+
+ /* submit read urb only if the device is waiting for it */
+ if (!desc->resp_count || !--desc->resp_count)
+ goto out;
+
+ set_bit(WDM_RESPONDING, &desc->flags);
+ spin_unlock_irq(&desc->iuspin);
+ rv = usb_submit_urb(desc->response, GFP_KERNEL);
+ spin_lock_irq(&desc->iuspin);
+ if (rv) {
+ dev_err(&desc->intf->dev,
+ "usb_submit_urb failed with result %d\n", rv);
+
+ /* make sure the next notification trigger a submit */
+ clear_bit(WDM_RESPONDING, &desc->flags);
+ desc->resp_count = 0;
+ }
+out:
+ return rv;
+}
+
static ssize_t wdm_read
(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
@@ -503,8 +535,10 @@ retry:
if (!desc->reslength) { /* zero length read */
dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
- clear_bit(WDM_READ, &desc->flags);
+ rv = clear_wdm_read_flag(desc);
spin_unlock_irq(&desc->iuspin);
+ if (rv < 0)
+ goto err;
goto retry;
}
cntr = desc->length;
@@ -526,37 +560,9 @@ retry:
desc->length -= cntr;
/* in case we had outstanding data */
- if (!desc->length) {
- clear_bit(WDM_READ, &desc->flags);
-
- if (--desc->resp_count) {
- set_bit(WDM_RESPONDING, &desc->flags);
- spin_unlock_irq(&desc->iuspin);
-
- rv = usb_submit_urb(desc->response, GFP_KERNEL);
- if (rv) {
- dev_err(&desc->intf->dev,
- "%s: usb_submit_urb failed with result %d\n",
- __func__, rv);
- spin_lock_irq(&desc->iuspin);
- clear_bit(WDM_RESPONDING, &desc->flags);
- spin_unlock_irq(&desc->iuspin);
-
- if (rv == -ENOMEM) {
- rv = schedule_work(&desc->rxwork);
- if (rv)
- dev_err(&desc->intf->dev, "Cannot schedule work\n");
- } else {
- spin_lock_irq(&desc->iuspin);
- desc->resp_count = 0;
- spin_unlock_irq(&desc->iuspin);
- }
- }
- } else
- spin_unlock_irq(&desc->iuspin);
- } else
- spin_unlock_irq(&desc->iuspin);
-
+ if (!desc->length)
+ clear_wdm_read_flag(desc);
+ spin_unlock_irq(&desc->iuspin);
rv = cntr;
err:
@@ -854,13 +860,11 @@ static int wdm_manage_power(struct usb_interface *intf, int on)
{
/* need autopm_get/put here to ensure the usbcore sees the new value */
int rv = usb_autopm_get_interface(intf);
- if (rv < 0)
- goto err;
intf->needs_remote_wakeup = on;
- usb_autopm_put_interface(intf);
-err:
- return rv;
+ if (!rv)
+ usb_autopm_put_interface(intf);
+ return 0;
}
static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index d4c47d5d7625..0924ee40a966 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -52,7 +52,6 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/poll.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/lp.h>
#include <linux/mutex.h>
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 09de131ee0cb..cfbec9c7e09e 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -21,7 +21,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fs.h>
diff --git a/drivers/usb/core/Makefile b/drivers/usb/core/Makefile
index 5e847ad2f58a..2f6f93220046 100644
--- a/drivers/usb/core/Makefile
+++ b/drivers/usb/core/Makefile
@@ -2,8 +2,6 @@
# Makefile for USB Core files and filesystem
#
-ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
-
usbcore-y := usb.o hub.o hcd.o urb.o message.o driver.o
usbcore-y += config.o file.o buffer.o sysfs.o endpoint.o
usbcore-y += devio.o notify.o generic.o quirks.o devices.o
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 23559746be92..684ef70dc09d 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -2,7 +2,7 @@
* DMA memory management for framework level HCD code (hc_driver)
*
* This implementation plugs in through generic "usb_bus" level methods,
- * and should work with all USB controllers, regardles of bus type.
+ * and should work with all USB controllers, regardless of bus type.
*/
#include <linux/module.h>
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index a6b2cabe7930..8d72f0c65937 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -3,7 +3,6 @@
#include <linux/usb/hcd.h>
#include <linux/usb/quirks.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <asm/byteorder.h>
@@ -651,10 +650,6 @@ void usb_destroy_configuration(struct usb_device *dev)
*
* hub-only!! ... and only in reset path, or usb_new_device()
* (used by real hubs and virtual root hubs)
- *
- * NOTE: if this is a WUSB device and is not authorized, we skip the
- * whole thing. A non-authorized USB device has no
- * configurations.
*/
int usb_get_configuration(struct usb_device *dev)
{
@@ -666,8 +661,6 @@ int usb_get_configuration(struct usb_device *dev)
struct usb_config_descriptor *desc;
cfgno = 0;
- if (dev->authorized == 0) /* Not really an error */
- goto out_not_authorized;
result = -ENOMEM;
if (ncfg > USB_MAXCONFIG) {
dev_warn(ddev, "too many configurations: %d, "
@@ -751,7 +744,6 @@ int usb_get_configuration(struct usb_device *dev)
err:
kfree(desc);
-out_not_authorized:
dev->descriptor.bNumConfigurations = cfgno;
err2:
if (result == -ENOMEM)
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 967152a63bd3..90e18f6fa2bb 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -118,7 +118,7 @@ module_param(usbfs_memory_mb, uint, 0644);
MODULE_PARM_DESC(usbfs_memory_mb,
"maximum MB allowed for usbfs buffers (0 = no limit)");
-/* Hard limit, necessary to avoid aithmetic overflow */
+/* Hard limit, necessary to avoid arithmetic overflow */
#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000)
static atomic_t usbfs_memory_usage; /* Total memory currently allocated */
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 47aade2a5e74..ab90a0156828 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -37,6 +37,7 @@
* and cause the driver to probe for all devices again.
*/
ssize_t usb_store_new_id(struct usb_dynids *dynids,
+ const struct usb_device_id *id_table,
struct device_driver *driver,
const char *buf, size_t count)
{
@@ -44,11 +45,12 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
u32 idVendor = 0;
u32 idProduct = 0;
unsigned int bInterfaceClass = 0;
+ u32 refVendor, refProduct;
int fields = 0;
int retval = 0;
- fields = sscanf(buf, "%x %x %x", &idVendor, &idProduct,
- &bInterfaceClass);
+ fields = sscanf(buf, "%x %x %x %x %x", &idVendor, &idProduct,
+ &bInterfaceClass, &refVendor, &refProduct);
if (fields < 2)
return -EINVAL;
@@ -60,11 +62,36 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
dynid->id.idVendor = idVendor;
dynid->id.idProduct = idProduct;
dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE;
- if (fields == 3) {
+ if (fields > 2 && bInterfaceClass) {
+ if (bInterfaceClass > 255) {
+ retval = -EINVAL;
+ goto fail;
+ }
+
dynid->id.bInterfaceClass = (u8)bInterfaceClass;
dynid->id.match_flags |= USB_DEVICE_ID_MATCH_INT_CLASS;
}
+ if (fields > 4) {
+ const struct usb_device_id *id = id_table;
+
+ if (!id) {
+ retval = -ENODEV;
+ goto fail;
+ }
+
+ for (; id->match_flags; id++)
+ if (id->idVendor == refVendor && id->idProduct == refProduct)
+ break;
+
+ if (id->match_flags) {
+ dynid->id.driver_info = id->driver_info;
+ } else {
+ retval = -ENODEV;
+ goto fail;
+ }
+ }
+
spin_lock(&dynids->lock);
list_add_tail(&dynid->node, &dynids->list);
spin_unlock(&dynids->lock);
@@ -74,6 +101,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
if (retval)
return retval;
return count;
+
+fail:
+ kfree(dynid);
+ return retval;
}
EXPORT_SYMBOL_GPL(usb_store_new_id);
@@ -106,7 +137,7 @@ static ssize_t new_id_store(struct device_driver *driver,
{
struct usb_driver *usb_drv = to_usb_driver(driver);
- return usb_store_new_id(&usb_drv->dynids, driver, buf, count);
+ return usb_store_new_id(&usb_drv->dynids, usb_drv->id_table, driver, buf, count);
}
static DRIVER_ATTR_RW(new_id);
@@ -839,7 +870,7 @@ int usb_register_device_driver(struct usb_device_driver *new_udriver,
return -ENODEV;
new_udriver->drvwrap.for_devices = 1;
- new_udriver->drvwrap.driver.name = (char *) new_udriver->name;
+ new_udriver->drvwrap.driver.name = new_udriver->name;
new_udriver->drvwrap.driver.bus = &usb_bus_type;
new_udriver->drvwrap.driver.probe = usb_probe_device;
new_udriver->drvwrap.driver.remove = usb_unbind_device;
@@ -900,7 +931,7 @@ int usb_register_driver(struct usb_driver *new_driver, struct module *owner,
return -ENODEV;
new_driver->drvwrap.for_devices = 0;
- new_driver->drvwrap.driver.name = (char *) new_driver->name;
+ new_driver->drvwrap.driver.name = new_driver->name;
new_driver->drvwrap.driver.bus = &usb_bus_type;
new_driver->drvwrap.driver.probe = usb_probe_interface;
new_driver->drvwrap.driver.remove = usb_unbind_interface;
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index dfe9d0f22978..d59d99347d54 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -282,6 +282,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (retval != 0)
goto unmap_registers;
+ device_wakeup_enable(hcd->self.controller);
if (pci_dev_run_wake(dev))
pm_runtime_put_noidle(&dev->dev);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 6bffb8c87bc9..2518c3250750 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -44,6 +44,7 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/usb/phy.h>
#include "usb.h"
@@ -1031,7 +1032,6 @@ static int register_root_hub(struct usb_hcd *hcd)
dev_name(&usb_dev->dev), retval);
return retval;
}
- usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
}
retval = usb_new_device (usb_dev);
@@ -1297,7 +1297,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_unlink_urb_from_ep);
* DMA framework is dma_declare_coherent_memory()
*
* - So we use that, even though the primary requirement
- * is that the memory be "local" (hence addressible
+ * is that the memory be "local" (hence addressable
* by that device), not "coherent".
*
*/
@@ -2588,6 +2588,24 @@ int usb_add_hcd(struct usb_hcd *hcd,
int retval;
struct usb_device *rhdev;
+ if (IS_ENABLED(CONFIG_USB_PHY) && !hcd->phy) {
+ struct usb_phy *phy = usb_get_phy_dev(hcd->self.controller, 0);
+
+ if (IS_ERR(phy)) {
+ retval = PTR_ERR(phy);
+ if (retval == -EPROBE_DEFER)
+ return retval;
+ } else {
+ retval = usb_phy_init(phy);
+ if (retval) {
+ usb_put_phy(phy);
+ return retval;
+ }
+ hcd->phy = phy;
+ hcd->remove_phy = 1;
+ }
+ }
+
dev_info(hcd->self.controller, "%s\n", hcd->product_desc);
/* Keep old behaviour if authorized_default is not in [0, 1]. */
@@ -2603,7 +2621,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
*/
if ((retval = hcd_buffer_create(hcd)) != 0) {
dev_dbg(hcd->self.controller, "pool alloc failed\n");
- return retval;
+ goto err_remove_phy;
}
if ((retval = usb_register_bus(&hcd->self)) < 0)
@@ -2693,12 +2711,6 @@ int usb_add_hcd(struct usb_hcd *hcd,
if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
usb_hcd_poll_rh_status(hcd);
- /*
- * Host controllers don't generate their own wakeup requests;
- * they only forward requests from the root hub. Therefore
- * controllers should always be enabled for remote wakeup.
- */
- device_wakeup_enable(hcd->self.controller);
return retval;
error_create_attr_group:
@@ -2734,6 +2746,12 @@ err_allocate_root_hub:
usb_deregister_bus(&hcd->self);
err_register_bus:
hcd_buffer_destroy(hcd);
+err_remove_phy:
+ if (hcd->remove_phy && hcd->phy) {
+ usb_phy_shutdown(hcd->phy);
+ usb_put_phy(hcd->phy);
+ hcd->phy = NULL;
+ }
return retval;
}
EXPORT_SYMBOL_GPL(usb_add_hcd);
@@ -2806,6 +2824,11 @@ void usb_remove_hcd(struct usb_hcd *hcd)
usb_put_dev(hcd->self.root_hub);
usb_deregister_bus(&hcd->self);
hcd_buffer_destroy(hcd);
+ if (hcd->remove_phy && hcd->phy) {
+ usb_phy_shutdown(hcd->phy);
+ usb_put_phy(hcd->phy);
+ hcd->phy = NULL;
+ }
}
EXPORT_SYMBOL_GPL(usb_remove_hcd);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index bd9dc3504b51..64ea21971be2 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -33,13 +33,6 @@
#include "hub.h"
-/* if we are in debug mode, always announce new devices */
-#ifdef DEBUG
-#ifndef CONFIG_USB_ANNOUNCE_NEW_DEVICES
-#define CONFIG_USB_ANNOUNCE_NEW_DEVICES
-#endif
-#endif
-
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
@@ -135,7 +128,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
return usb_get_intfdata(hdev->actconfig->interface[0]);
}
-int usb_device_supports_lpm(struct usb_device *udev)
+static int usb_device_supports_lpm(struct usb_device *udev)
{
/* USB 2.1 (and greater) devices indicate LPM support through
* their USB 2.0 Extended Capabilities BOS descriptor.
@@ -156,11 +149,6 @@ int usb_device_supports_lpm(struct usb_device *udev)
"Power management will be impacted.\n");
return 0;
}
-
- /* udev is root hub */
- if (!udev->parent)
- return 1;
-
if (udev->parent->lpm_capable)
return 1;
@@ -1154,7 +1142,8 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
/* Tell khubd to disconnect the device or
* check for a new connection
*/
- if (udev || (portstatus & USB_PORT_STAT_CONNECTION))
+ if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
+ (portstatus & USB_PORT_STAT_OVERCURRENT))
set_bit(port1, hub->change_bits);
} else if (portstatus & USB_PORT_STAT_ENABLE) {
@@ -1607,7 +1596,7 @@ static void hub_disconnect(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata(intf);
struct usb_device *hdev = interface_to_usbdev(intf);
- int i;
+ int port1;
/* Take the hub off the event list and don't let it be added again */
spin_lock_irq(&hub_event_lock);
@@ -1622,11 +1611,15 @@ static void hub_disconnect(struct usb_interface *intf)
hub->error = 0;
hub_quiesce(hub, HUB_DISCONNECT);
- usb_set_intfdata (intf, NULL);
+ /* Avoid races with recursively_mark_NOTATTACHED() */
+ spin_lock_irq(&device_state_lock);
+ port1 = hdev->maxchild;
+ hdev->maxchild = 0;
+ usb_set_intfdata(intf, NULL);
+ spin_unlock_irq(&device_state_lock);
- for (i = 0; i < hdev->maxchild; i++)
- usb_hub_remove_port_device(hub, i + 1);
- hub->hdev->maxchild = 0;
+ for (; port1 > 0; --port1)
+ usb_hub_remove_port_device(hub, port1);
if (hub->hdev->speed == USB_SPEED_HIGH)
highspeed_hubs--;
@@ -2235,17 +2228,13 @@ static int usb_enumerate_device(struct usb_device *udev)
return err;
}
}
- if (udev->wusb == 1 && udev->authorized == 0) {
- udev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL);
- udev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL);
- udev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL);
- } else {
- /* read the standard strings and cache them if present */
- udev->product = usb_cache_string(udev, udev->descriptor.iProduct);
- udev->manufacturer = usb_cache_string(udev,
- udev->descriptor.iManufacturer);
- udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
- }
+
+ /* read the standard strings and cache them if present */
+ udev->product = usb_cache_string(udev, udev->descriptor.iProduct);
+ udev->manufacturer = usb_cache_string(udev,
+ udev->descriptor.iManufacturer);
+ udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
+
err = usb_enumerate_device_otg(udev);
if (err < 0)
return err;
@@ -2427,16 +2416,6 @@ int usb_deauthorize_device(struct usb_device *usb_dev)
usb_dev->authorized = 0;
usb_set_configuration(usb_dev, -1);
- kfree(usb_dev->product);
- usb_dev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL);
- kfree(usb_dev->manufacturer);
- usb_dev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL);
- kfree(usb_dev->serial);
- usb_dev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL);
-
- usb_destroy_configuration(usb_dev);
- usb_dev->descriptor.bNumConfigurations = 0;
-
out_unauthorized:
usb_unlock_device(usb_dev);
return 0;
@@ -2464,17 +2443,7 @@ int usb_authorize_device(struct usb_device *usb_dev)
goto error_device_descriptor;
}
- kfree(usb_dev->product);
- usb_dev->product = NULL;
- kfree(usb_dev->manufacturer);
- usb_dev->manufacturer = NULL;
- kfree(usb_dev->serial);
- usb_dev->serial = NULL;
-
usb_dev->authorized = 1;
- result = usb_enumerate_device(usb_dev);
- if (result < 0)
- goto error_enumerate;
/* Choose and set the configuration. This registers the interfaces
* with the driver core and lets interface drivers bind to them.
*/
@@ -2490,7 +2459,6 @@ int usb_authorize_device(struct usb_device *usb_dev)
}
dev_info(&usb_dev->dev, "authorized to connect\n");
-error_enumerate:
error_device_descriptor:
usb_autosuspend_device(usb_dev);
error_autoresume:
@@ -2523,10 +2491,25 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
#define HUB_LONG_RESET_TIME 200
#define HUB_RESET_TIMEOUT 800
+/*
+ * "New scheme" enumeration causes an extra state transition to be
+ * exposed to an xhci host and causes USB3 devices to receive control
+ * commands in the default state. This has been seen to cause
+ * enumeration failures, so disable this enumeration scheme for USB3
+ * devices.
+ */
+static bool use_new_scheme(struct usb_device *udev, int retry)
+{
+ if (udev->speed == USB_SPEED_SUPER)
+ return false;
+
+ return USE_NEW_SCHEME(retry);
+}
+
static int hub_port_reset(struct usb_hub *hub, int port1,
struct usb_device *udev, unsigned int delay, bool warm);
-/* Is a USB 3.0 port in the Inactive or Complinance Mode state?
+/* Is a USB 3.0 port in the Inactive or Compliance Mode state?
* Port worm reset is required to recover
*/
static bool hub_port_warm_reset_required(struct usb_hub *hub, u16 portstatus)
@@ -3334,7 +3317,8 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
udev = hub->ports[port1 - 1]->child;
if (udev && udev->can_submit) {
- dev_warn(&intf->dev, "port %d nyet suspended\n", port1);
+ dev_warn(&intf->dev, "port %d not suspended yet\n",
+ port1);
if (PMSG_IS_AUTO(msg))
return -EBUSY;
}
@@ -3981,6 +3965,20 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
}
}
+static int hub_enable_device(struct usb_device *udev)
+{
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+
+ if (!hcd->driver->enable_device)
+ return 0;
+ if (udev->state == USB_STATE_ADDRESS)
+ return 0;
+ if (udev->state != USB_STATE_DEFAULT)
+ return -EINVAL;
+
+ return hcd->driver->enable_device(hcd, udev);
+}
+
/* Reset device, (re)assign address, get device descriptor.
* Device connection must be stable, no more debouncing needed.
* Returns device in USB_STATE_ADDRESS, except on error.
@@ -4093,7 +4091,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
* this area, and this is how Linux has done it for ages.
* Change it cautiously.
*
- * NOTE: If USE_NEW_SCHEME() is true we will start by issuing
+ * NOTE: If use_new_scheme() is true we will start by issuing
* a 64-byte GET_DESCRIPTOR request. This is what Windows does,
* so it may help with some non-standards-compliant devices.
* Otherwise we start with SET_ADDRESS and then try to read the
@@ -4101,10 +4099,17 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
* value.
*/
for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
- if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) {
+ bool did_new_scheme = false;
+
+ if (use_new_scheme(udev, retry_counter)) {
struct usb_device_descriptor *buf;
int r = 0;
+ did_new_scheme = true;
+ retval = hub_enable_device(udev);
+ if (retval < 0)
+ goto fail;
+
#define GET_DESCRIPTOR_BUFSIZE 64
buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO);
if (!buf) {
@@ -4193,7 +4198,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
* - read ep0 maxpacket even for high and low speed,
*/
msleep(10);
- if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3))
+ /* use_new_scheme() checks the speed which may have
+ * changed since the initial look so we cache the result
+ * in did_new_scheme
+ */
+ if (did_new_scheme)
break;
}
@@ -4900,7 +4909,7 @@ static void hub_events(void)
static int hub_thread(void *__unused)
{
- /* khubd needs to be freezable to avoid intefering with USB-PERSIST
+ /* khubd needs to be freezable to avoid interfering with USB-PERSIST
* port handover. Otherwise it might see that a full-speed device
* was gone before the EHCI controller had handed its port over to
* the companion full-speed controller.
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 4e4790dea343..df629a310e44 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -78,7 +78,7 @@ struct usb_hub {
/**
* struct usb port - kernel's representation of a usb port
- * @child: usb device attatched to the port
+ * @child: usb device attached to the port
* @dev: generic device interface
* @port_owner: port's owner
* @connect_type: port's connect type
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 874d1a406ebc..5239e5163074 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -6,7 +6,6 @@
#include <linux/usb.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/timer.h>
#include <linux/ctype.h>
@@ -218,7 +217,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
*
* Return:
* If successful, 0. Otherwise a negative error number. The number of actual
- * bytes transferred will be stored in the @actual_length paramater.
+ * bytes transferred will be stored in the @actual_length parameter.
*
*/
int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
@@ -518,7 +517,7 @@ void usb_sg_wait(struct usb_sg_request *io)
io->urbs[i]->dev = io->dev;
retval = usb_submit_urb(io->urbs[i], GFP_ATOMIC);
- /* after we submit, let completions or cancelations fire;
+ /* after we submit, let completions or cancellations fire;
* we handshake using io->status.
*/
spin_unlock_irq(&io->lock);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 12924dbfdc2c..8f37063c0a49 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -98,9 +98,6 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Alcor Micro Corp. Hub */
{ USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
- /* MicroTouch Systems touchscreen */
- { USB_DEVICE(0x0596, 0x051e), .driver_info = USB_QUIRK_RESET_RESUME },
-
/* appletouch */
{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 52a97adf02a0..1236c6011c70 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -837,7 +837,7 @@ void usb_remove_sysfs_dev_files(struct usb_device *udev)
device_remove_bin_file(dev, &dev_bin_attr_descriptors);
}
-/* Interface Accociation Descriptor fields */
+/* Interface Association Descriptor fields */
#define usb_intf_assoc_attr(field, format_string) \
static ssize_t \
iad_##field##_show(struct device *dev, struct device_attribute *attr, \
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index e726f5e80448..991386ceb4ec 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -2,7 +2,6 @@
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/log2.h>
#include <linux/usb.h>
#include <linux/wait.h>
@@ -53,7 +52,7 @@ EXPORT_SYMBOL_GPL(usb_init_urb);
* valid options for this.
*
* Creates an urb for the USB driver to use, initializes a few internal
- * structures, incrementes the usage counter, and returns a pointer to it.
+ * structures, increments the usage counter, and returns a pointer to it.
*
* If the driver want to use this urb for interrupt, control, or bulk
* endpoints, pass '0' as the number of iso packets.
@@ -281,7 +280,7 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
*
* Device drivers must explicitly request that repetition, by ensuring that
* some URB is always on the endpoint's queue (except possibly for short
- * periods during completion callacks). When there is no longer an urb
+ * periods during completion callbacks). When there is no longer an urb
* queued, the endpoint's bandwidth reservation is canceled. This means
* drivers can use their completion handlers to ensure they keep bandwidth
* they need, by reinitializing and resubmitting the just-completed urb
@@ -325,10 +324,14 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
*/
int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
{
+ static int pipetypes[4] = {
+ PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
+ };
int xfertype, max;
struct usb_device *dev;
struct usb_host_endpoint *ep;
int is_out;
+ unsigned int allowed;
if (!urb || !urb->complete)
return -EINVAL;
@@ -436,15 +439,10 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
if (urb->transfer_buffer_length > INT_MAX)
return -EMSGSIZE;
-#ifdef DEBUG
- /* stuff that drivers shouldn't do, but which shouldn't
+ /*
+ * stuff that drivers shouldn't do, but which shouldn't
* cause problems in HCDs if they get it wrong.
*/
- {
- unsigned int allowed;
- static int pipetypes[4] = {
- PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
- };
/* Check that the pipe's type matches the endpoint's type */
if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
@@ -476,8 +474,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
if (allowed != urb->transfer_flags)
dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n",
urb->transfer_flags, allowed);
- }
-#endif
+
/*
* Force periodic transfer intervals to be legal values that are
* a power of two (so HCDs don't need to).
@@ -492,9 +489,9 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
/* too small? */
switch (dev->speed) {
case USB_SPEED_WIRELESS:
- if (urb->interval < 6)
+ if ((urb->interval < 6)
+ && (xfertype == USB_ENDPOINT_XFER_INT))
return -EINVAL;
- break;
default:
if (urb->interval <= 0)
return -EINVAL;
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index 4e243c37f17f..5ca4070b1f38 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -16,7 +16,6 @@
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/usb/hcd.h>
-#include <acpi/acpi_bus.h>
#include "usb.h"
@@ -92,7 +91,7 @@ static int usb_acpi_check_port_connect_type(struct usb_device *hdev,
int ret = 0;
/*
- * Accoding to ACPI Spec 9.13. PLD indicates whether usb port is
+ * According to ACPI Spec 9.13. PLD indicates whether usb port is
* user visible and _UPC indicates whether it is connectable. If
* the port was visible and connectable, it could be freely connected
* and disconnected with USB devices. If no visible and connectable,
@@ -127,7 +126,7 @@ out:
return ret;
}
-static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
+static struct acpi_device *usb_acpi_find_companion(struct device *dev)
{
struct usb_device *udev;
acpi_handle *parent_handle;
@@ -169,16 +168,15 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
break;
}
- return -ENODEV;
+ return NULL;
}
/* root hub's parent is the usb hcd. */
- parent_handle = ACPI_HANDLE(dev->parent);
- *handle = acpi_get_child(parent_handle, udev->portnum);
- if (!*handle)
- return -ENODEV;
- return 0;
+ return acpi_find_child_device(ACPI_COMPANION(dev->parent),
+ udev->portnum, false);
} else if (is_usb_port(dev)) {
+ struct acpi_device *adev = NULL;
+
sscanf(dev_name(dev), "port%d", &port_num);
/* Get the struct usb_device point of port's hub */
udev = to_usb_device(dev->parent->parent);
@@ -194,26 +192,27 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
raw_port_num = usb_hcd_find_raw_port_number(hcd,
port_num);
- *handle = acpi_get_child(ACPI_HANDLE(&udev->dev),
- raw_port_num);
- if (!*handle)
- return -ENODEV;
+ adev = acpi_find_child_device(ACPI_COMPANION(&udev->dev),
+ raw_port_num, false);
+ if (!adev)
+ return NULL;
} else {
parent_handle =
usb_get_hub_port_acpi_handle(udev->parent,
udev->portnum);
if (!parent_handle)
- return -ENODEV;
+ return NULL;
- *handle = acpi_get_child(parent_handle, port_num);
- if (!*handle)
- return -ENODEV;
+ acpi_bus_get_device(parent_handle, &adev);
+ adev = acpi_find_child_device(adev, port_num, false);
+ if (!adev)
+ return NULL;
}
- usb_acpi_check_port_connect_type(udev, *handle, port_num);
- } else
- return -ENODEV;
+ usb_acpi_check_port_connect_type(udev, adev->handle, port_num);
+ return adev;
+ }
- return 0;
+ return NULL;
}
static bool usb_acpi_bus_match(struct device *dev)
@@ -224,7 +223,7 @@ static bool usb_acpi_bus_match(struct device *dev)
static struct acpi_bus_type usb_acpi_bus = {
.name = "USB",
.match = usb_acpi_bus_match,
- .find_device = usb_acpi_find_device,
+ .find_companion = usb_acpi_find_companion,
};
int usb_acpi_register(void)
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index c49383669cd8..823857767a16 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -35,7 +35,6 @@ extern int usb_get_device_descriptor(struct usb_device *dev,
unsigned int size);
extern int usb_get_bos_descriptor(struct usb_device *dev);
extern void usb_release_bos_descriptor(struct usb_device *dev);
-extern int usb_device_supports_lpm(struct usb_device *udev);
extern char *usb_cache_string(struct usb_device *udev, int index);
extern int usb_set_configuration(struct usb_device *dev, int configuration);
extern int usb_choose_configuration(struct usb_device *udev);
diff --git a/drivers/staging/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig
index be947d673844..be947d673844 100644
--- a/drivers/staging/dwc2/Kconfig
+++ b/drivers/usb/dwc2/Kconfig
diff --git a/drivers/staging/dwc2/Makefile b/drivers/usb/dwc2/Makefile
index 11529d3439b0..11529d3439b0 100644
--- a/drivers/staging/dwc2/Makefile
+++ b/drivers/usb/dwc2/Makefile
diff --git a/drivers/staging/dwc2/core.c b/drivers/usb/dwc2/core.c
index 6d001b52f652..1d129884cc39 100644
--- a/drivers/staging/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -114,7 +114,7 @@ static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
* Do core a soft reset of the core. Be careful with this because it
* resets all the internal state machines of the core.
*/
-static void dwc2_core_reset(struct dwc2_hsotg *hsotg)
+static int dwc2_core_reset(struct dwc2_hsotg *hsotg)
{
u32 greset;
int count = 0;
@@ -129,7 +129,7 @@ static void dwc2_core_reset(struct dwc2_hsotg *hsotg)
dev_warn(hsotg->dev,
"%s() HANG! AHB Idle GRSTCTL=%0x\n",
__func__, greset);
- return;
+ return -EBUSY;
}
} while (!(greset & GRSTCTL_AHBIDLE));
@@ -144,7 +144,7 @@ static void dwc2_core_reset(struct dwc2_hsotg *hsotg)
dev_warn(hsotg->dev,
"%s() HANG! Soft Reset GRSTCTL=%0x\n",
__func__, greset);
- break;
+ return -EBUSY;
}
} while (greset & GRSTCTL_CSFTRST);
@@ -153,11 +153,14 @@ static void dwc2_core_reset(struct dwc2_hsotg *hsotg)
* not stay in host mode after a connector ID change!
*/
usleep_range(150000, 200000);
+
+ return 0;
}
-static void dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
{
u32 usbcfg, i2cctl;
+ int retval = 0;
/*
* core_init() is now called on every switch so only call the
@@ -170,7 +173,12 @@ static void dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
writel(usbcfg, hsotg->regs + GUSBCFG);
/* Reset after a PHY select */
- dwc2_core_reset(hsotg);
+ retval = dwc2_core_reset(hsotg);
+ if (retval) {
+ dev_err(hsotg->dev, "%s() Reset failed, aborting",
+ __func__);
+ return retval;
+ }
}
/*
@@ -198,14 +206,17 @@ static void dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
i2cctl |= GI2CCTL_I2CEN;
writel(i2cctl, hsotg->regs + GI2CCTL);
}
+
+ return retval;
}
-static void dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
{
u32 usbcfg;
+ int retval = 0;
if (!select_phy)
- return;
+ return 0;
usbcfg = readl(hsotg->regs + GUSBCFG);
@@ -238,20 +249,32 @@ static void dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
writel(usbcfg, hsotg->regs + GUSBCFG);
/* Reset after setting the PHY parameters */
- dwc2_core_reset(hsotg);
+ retval = dwc2_core_reset(hsotg);
+ if (retval) {
+ dev_err(hsotg->dev, "%s() Reset failed, aborting",
+ __func__);
+ return retval;
+ }
+
+ return retval;
}
-static void dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
{
u32 usbcfg;
+ int retval = 0;
if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
/* If FS mode with FS PHY */
- dwc2_fs_phy_init(hsotg, select_phy);
+ retval = dwc2_fs_phy_init(hsotg, select_phy);
+ if (retval)
+ return retval;
} else {
/* High speed PHY */
- dwc2_hs_phy_init(hsotg, select_phy);
+ retval = dwc2_hs_phy_init(hsotg, select_phy);
+ if (retval)
+ return retval;
}
if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
@@ -268,6 +291,8 @@ static void dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
writel(usbcfg, hsotg->regs + GUSBCFG);
}
+
+ return retval;
}
static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
@@ -382,12 +407,19 @@ int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq)
writel(usbcfg, hsotg->regs + GUSBCFG);
/* Reset the Controller */
- dwc2_core_reset(hsotg);
+ retval = dwc2_core_reset(hsotg);
+ if (retval) {
+ dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
+ __func__);
+ return retval;
+ }
/*
* This needs to happen in FS mode before any other programming occurs
*/
- dwc2_phy_init(hsotg, select_phy);
+ retval = dwc2_phy_init(hsotg, select_phy);
+ if (retval)
+ return retval;
/* Program the GAHBCFG Register */
retval = dwc2_gahbcfg_init(hsotg);
@@ -451,9 +483,6 @@ void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
writel(0, hsotg->regs + GINTMSK);
writel(0, hsotg->regs + HAINTMSK);
- /* Clear any pending interrupts */
- writel(0xffffffff, hsotg->regs + GINTSTS);
-
/* Enable the common interrupts */
dwc2_enable_common_interrupts(hsotg);
@@ -1912,13 +1941,12 @@ void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
udelay(1);
}
-#define DWC2_PARAM_TEST(a, b, c) ((a) < (b) || (a) > (c))
+#define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c))
/* Parameter access functions */
-int dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
switch (val) {
case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
@@ -1964,17 +1992,14 @@ int dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
break;
}
dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->otg_cap = val;
- return retval;
}
-int dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
valid = 0;
@@ -1988,17 +2013,14 @@ int dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
val);
val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->dma_enable = val;
- return retval;
}
-int dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
!hsotg->hw_params.dma_desc_enable))
@@ -2014,19 +2036,15 @@ int dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
val = (hsotg->core_params->dma_enable > 0 &&
hsotg->hw_params.dma_desc_enable);
dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->dma_desc_enable = val;
- return retval;
}
-int dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
- int val)
+void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
+ int val)
{
- int retval = 0;
-
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev,
"Wrong value for host_support_fs_low_power\n");
@@ -2036,17 +2054,14 @@ int dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
val = 0;
dev_dbg(hsotg->dev,
"Setting host_support_fs_low_power to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->host_support_fs_ls_low_power = val;
- return retval;
}
-int dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
valid = 0;
@@ -2060,17 +2075,14 @@ int dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
val);
val = hsotg->hw_params.enable_dynamic_fifo;
dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->enable_dynamic_fifo = val;
- return retval;
}
-int dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
valid = 0;
@@ -2082,17 +2094,14 @@ int dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
val);
val = hsotg->hw_params.host_rx_fifo_size;
dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->host_rx_fifo_size = val;
- return retval;
}
-int dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
valid = 0;
@@ -2105,17 +2114,14 @@ int dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
val = hsotg->hw_params.host_nperio_tx_fifo_size;
dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
val);
- retval = -EINVAL;
}
hsotg->core_params->host_nperio_tx_fifo_size = val;
- return retval;
}
-int dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
valid = 0;
@@ -2128,17 +2134,14 @@ int dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
val = hsotg->hw_params.host_perio_tx_fifo_size;
dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
val);
- retval = -EINVAL;
}
hsotg->core_params->host_perio_tx_fifo_size = val;
- return retval;
}
-int dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
valid = 0;
@@ -2150,17 +2153,14 @@ int dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
val);
val = hsotg->hw_params.max_transfer_size;
dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->max_transfer_size = val;
- return retval;
}
-int dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
if (val < 15 || val > hsotg->hw_params.max_packet_count)
valid = 0;
@@ -2172,17 +2172,14 @@ int dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
val);
val = hsotg->hw_params.max_packet_count;
dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->max_packet_count = val;
- return retval;
}
-int dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
if (val < 1 || val > hsotg->hw_params.host_channels)
valid = 0;
@@ -2194,38 +2191,26 @@ int dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
val);
val = hsotg->hw_params.host_channels;
dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->host_channels = val;
- return retval;
}
-int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
{
-#ifndef NO_FS_PHY_HW_CHECKS
int valid = 0;
u32 hs_phy_type, fs_phy_type;
-#endif
- int retval = 0;
- if (DWC2_PARAM_TEST(val, DWC2_PHY_TYPE_PARAM_FS,
- DWC2_PHY_TYPE_PARAM_ULPI)) {
+ if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
+ DWC2_PHY_TYPE_PARAM_ULPI)) {
if (val >= 0) {
dev_err(hsotg->dev, "Wrong value for phy_type\n");
dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
}
-#ifndef NO_FS_PHY_HW_CHECKS
valid = 0;
-#else
- val = DWC2_PHY_TYPE_PARAM_FS;
- dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
- retval = -EINVAL;
-#endif
}
-#ifndef NO_FS_PHY_HW_CHECKS
hs_phy_type = hsotg->hw_params.hs_phy_type;
fs_phy_type = hsotg->hw_params.fs_phy_type;
if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
@@ -2254,12 +2239,9 @@ int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
val = DWC2_PHY_TYPE_PARAM_ULPI;
}
dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
- retval = -EINVAL;
}
-#endif
hsotg->core_params->phy_type = val;
- return retval;
}
static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
@@ -2267,12 +2249,11 @@ static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
return hsotg->core_params->phy_type;
}
-int dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev, "Wrong value for speed parameter\n");
dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
@@ -2292,20 +2273,17 @@ int dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->speed = val;
- return retval;
}
-int dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
- if (DWC2_PARAM_TEST(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
- DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
+ if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
+ DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
if (val >= 0) {
dev_err(hsotg->dev,
"Wrong value for host_ls_low_power_phy_clk parameter\n");
@@ -2329,36 +2307,28 @@ int dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
: DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
val);
- retval = -EINVAL;
}
hsotg->core_params->host_ls_low_power_phy_clk = val;
- return retval;
}
-int dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
{
- int retval = 0;
-
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
}
val = 0;
dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->phy_ulpi_ddr = val;
- return retval;
}
-int dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
{
- int retval = 0;
-
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev,
"Wrong value for phy_ulpi_ext_vbus\n");
@@ -2367,17 +2337,14 @@ int dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
}
val = 0;
dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->phy_ulpi_ext_vbus = val;
- return retval;
}
-int dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
{
int valid = 0;
- int retval = 0;
switch (hsotg->hw_params.utmi_phy_data_width) {
case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
@@ -2400,72 +2367,52 @@ int dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
val = (hsotg->hw_params.utmi_phy_data_width ==
GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->phy_utmi_width = val;
- return retval;
}
-int dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
{
- int retval = 0;
-
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
}
val = 0;
dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->ulpi_fs_ls = val;
- return retval;
}
-int dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
{
- int retval = 0;
-
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev, "Wrong value for ts_dline\n");
dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
}
val = 0;
dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->ts_dline = val;
- return retval;
}
-int dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
{
-#ifndef NO_FS_PHY_HW_CHECKS
int valid = 1;
-#endif
- int retval = 0;
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
}
-#ifndef NO_FS_PHY_HW_CHECKS
valid = 0;
-#else
- val = 0;
- dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
- retval = -EINVAL;
-#endif
}
-#ifndef NO_FS_PHY_HW_CHECKS
if (val == 1 && !(hsotg->hw_params.i2c_enable))
valid = 0;
@@ -2476,20 +2423,16 @@ int dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
val);
val = hsotg->hw_params.i2c_enable;
dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
- retval = -EINVAL;
}
-#endif
hsotg->core_params->i2c_enable = val;
- return retval;
}
-int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev,
"Wrong value for en_multiple_tx_fifo,\n");
@@ -2509,19 +2452,16 @@ int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
val);
val = hsotg->hw_params.en_multiple_tx_fifo;
dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->en_multiple_tx_fifo = val;
- return retval;
}
-int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- int retval = 0;
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev,
"'%d' invalid for parameter reload_ctl\n", val);
@@ -2540,28 +2480,23 @@ int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
val);
val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->reload_ctl = val;
- return retval;
}
-int dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
{
if (val != -1)
hsotg->core_params->ahbcfg = val;
else
hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
GAHBCFG_HBSTLEN_SHIFT;
- return 0;
}
-int dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
+void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
{
- int retval = 0;
-
- if (DWC2_PARAM_TEST(val, 0, 1)) {
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
if (val >= 0) {
dev_err(hsotg->dev,
"'%d' invalid for parameter otg_ver\n", val);
@@ -2570,11 +2505,71 @@ int dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
}
val = 0;
dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
- retval = -EINVAL;
}
hsotg->core_params->otg_ver = val;
- return retval;
+}
+
+static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter uframe_sched\n",
+ val);
+ dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
+ }
+ val = 1;
+ dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
+ }
+
+ hsotg->core_params->uframe_sched = val;
+}
+
+/*
+ * This function is called during module intialization to pass module parameters
+ * for the DWC_otg core.
+ */
+void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
+ const struct dwc2_core_params *params)
+{
+ dev_dbg(hsotg->dev, "%s()\n", __func__);
+
+ dwc2_set_param_otg_cap(hsotg, params->otg_cap);
+ dwc2_set_param_dma_enable(hsotg, params->dma_enable);
+ dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
+ dwc2_set_param_host_support_fs_ls_low_power(hsotg,
+ params->host_support_fs_ls_low_power);
+ dwc2_set_param_enable_dynamic_fifo(hsotg,
+ params->enable_dynamic_fifo);
+ dwc2_set_param_host_rx_fifo_size(hsotg,
+ params->host_rx_fifo_size);
+ dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
+ params->host_nperio_tx_fifo_size);
+ dwc2_set_param_host_perio_tx_fifo_size(hsotg,
+ params->host_perio_tx_fifo_size);
+ dwc2_set_param_max_transfer_size(hsotg,
+ params->max_transfer_size);
+ dwc2_set_param_max_packet_count(hsotg,
+ params->max_packet_count);
+ dwc2_set_param_host_channels(hsotg, params->host_channels);
+ dwc2_set_param_phy_type(hsotg, params->phy_type);
+ dwc2_set_param_speed(hsotg, params->speed);
+ dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
+ params->host_ls_low_power_phy_clk);
+ dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
+ dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
+ params->phy_ulpi_ext_vbus);
+ dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
+ dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
+ dwc2_set_param_ts_dline(hsotg, params->ts_dline);
+ dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
+ dwc2_set_param_en_multiple_tx_fifo(hsotg,
+ params->en_multiple_tx_fifo);
+ dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
+ dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
+ dwc2_set_param_otg_ver(hsotg, params->otg_ver);
+ dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
}
/**
@@ -2736,88 +2731,17 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
return 0;
}
-int dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
-{
- int retval = 0;
-
- if (DWC2_PARAM_TEST(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter uframe_sched\n",
- val);
- dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
- }
- val = 1;
- dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
- retval = -EINVAL;
- }
-
- hsotg->core_params->uframe_sched = val;
- return retval;
-}
-
-/*
- * This function is called during module intialization to pass module parameters
- * for the DWC_otg core. It returns non-0 if any parameters are invalid.
- */
-int dwc2_set_parameters(struct dwc2_hsotg *hsotg,
- const struct dwc2_core_params *params)
-{
- int retval = 0;
-
- dev_dbg(hsotg->dev, "%s()\n", __func__);
-
- retval |= dwc2_set_param_otg_cap(hsotg, params->otg_cap);
- retval |= dwc2_set_param_dma_enable(hsotg, params->dma_enable);
- retval |= dwc2_set_param_dma_desc_enable(hsotg,
- params->dma_desc_enable);
- retval |= dwc2_set_param_host_support_fs_ls_low_power(hsotg,
- params->host_support_fs_ls_low_power);
- retval |= dwc2_set_param_enable_dynamic_fifo(hsotg,
- params->enable_dynamic_fifo);
- retval |= dwc2_set_param_host_rx_fifo_size(hsotg,
- params->host_rx_fifo_size);
- retval |= dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
- params->host_nperio_tx_fifo_size);
- retval |= dwc2_set_param_host_perio_tx_fifo_size(hsotg,
- params->host_perio_tx_fifo_size);
- retval |= dwc2_set_param_max_transfer_size(hsotg,
- params->max_transfer_size);
- retval |= dwc2_set_param_max_packet_count(hsotg,
- params->max_packet_count);
- retval |= dwc2_set_param_host_channels(hsotg, params->host_channels);
- retval |= dwc2_set_param_phy_type(hsotg, params->phy_type);
- retval |= dwc2_set_param_speed(hsotg, params->speed);
- retval |= dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
- params->host_ls_low_power_phy_clk);
- retval |= dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
- retval |= dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
- params->phy_ulpi_ext_vbus);
- retval |= dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
- retval |= dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
- retval |= dwc2_set_param_ts_dline(hsotg, params->ts_dline);
- retval |= dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
- retval |= dwc2_set_param_en_multiple_tx_fifo(hsotg,
- params->en_multiple_tx_fifo);
- retval |= dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
- retval |= dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
- retval |= dwc2_set_param_otg_ver(hsotg, params->otg_ver);
- retval |= dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
-
- return retval;
-}
-
u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
{
- return (u16)(hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103);
+ return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
}
-int dwc2_check_core_status(struct dwc2_hsotg *hsotg)
+bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
{
if (readl(hsotg->regs + GSNPSID) == 0xffffffff)
- return -1;
+ return false;
else
- return 0;
+ return true;
}
/**
diff --git a/drivers/staging/dwc2/core.h b/drivers/usb/dwc2/core.h
index fab718d9b326..648519c024b5 100644
--- a/drivers/staging/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -544,7 +544,7 @@ extern void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg);
extern void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg);
extern u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg);
-extern int dwc2_check_core_status(struct dwc2_hsotg *hsotg);
+extern bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg);
/*
* Common core Functions.
@@ -571,7 +571,7 @@ extern irqreturn_t dwc2_handle_common_intr(int irq, void *dev);
* 1 - SRP Only capable
* 2 - No HNP/SRP capable
*/
-extern int dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val);
#define DWC2_CAP_PARAM_HNP_SRP_CAPABLE 0
#define DWC2_CAP_PARAM_SRP_ONLY_CAPABLE 1
#define DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE 2
@@ -583,7 +583,7 @@ extern int dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val);
* 0 - Slave
* 1 - DMA (default, if available)
*/
-extern int dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val);
/*
* When DMA mode is enabled specifies whether to use
@@ -593,7 +593,7 @@ extern int dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val);
* 0 - address DMA
* 1 - DMA Descriptor(default, if available)
*/
-extern int dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val);
/*
* Specifies the maximum speed of operation in host and device mode.
@@ -603,7 +603,7 @@ extern int dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val);
* 0 - High Speed (default)
* 1 - Full Speed
*/
-extern int dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val);
#define DWC2_SPEED_PARAM_HIGH 0
#define DWC2_SPEED_PARAM_FULL 1
@@ -614,8 +614,8 @@ extern int dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val);
* 0 - Don't support low power mode (default)
* 1 - Support low power mode
*/
-extern int dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
- int val);
+extern void dwc2_set_param_host_support_fs_ls_low_power(
+ struct dwc2_hsotg *hsotg, int val);
/*
* Specifies the PHY clock rate in low power mode when connected to a
@@ -626,8 +626,8 @@ extern int dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
* 0 - 48 MHz
* 1 - 6 MHz
*/
-extern int dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg,
- int val);
+extern void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg,
+ int val);
#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0
#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1
@@ -635,50 +635,50 @@ extern int dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg,
* 0 - Use cC FIFO size parameters
* 1 - Allow dynamic FIFO sizing (default)
*/
-extern int dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg,
- int val);
+extern void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg,
+ int val);
/*
* Number of 4-byte words in the Rx FIFO in host mode when dynamic
* FIFO sizing is enabled.
* 16 to 32768 (default 1024)
*/
-extern int dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val);
/*
* Number of 4-byte words in the non-periodic Tx FIFO in host mode
* when Dynamic FIFO sizing is enabled in the core.
* 16 to 32768 (default 256)
*/
-extern int dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg,
- int val);
+extern void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg,
+ int val);
/*
* Number of 4-byte words in the host periodic Tx FIFO when dynamic
* FIFO sizing is enabled.
* 16 to 32768 (default 256)
*/
-extern int dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg,
- int val);
+extern void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg,
+ int val);
/*
* The maximum transfer size supported in bytes.
* 2047 to 65,535 (default 65,535)
*/
-extern int dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val);
/*
* The maximum number of packets in a transfer.
* 15 to 511 (default 511)
*/
-extern int dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val);
/*
* The number of host channel registers to use.
* 1 to 16 (default 11)
* Note: The FPGA configuration supports a maximum of 11 host channels.
*/
-extern int dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val);
/*
* Specifies the type of PHY interface to use. By default, the driver
@@ -688,7 +688,7 @@ extern int dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val);
* 1 - UTMI+ (default)
* 2 - ULPI
*/
-extern int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val);
#define DWC2_PHY_TYPE_PARAM_FS 0
#define DWC2_PHY_TYPE_PARAM_UTMI 1
#define DWC2_PHY_TYPE_PARAM_ULPI 2
@@ -704,7 +704,7 @@ extern int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val);
*
* 8 or 16 bits (default 16)
*/
-extern int dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val);
/*
* Specifies whether the ULPI operates at double or single
@@ -716,13 +716,13 @@ extern int dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val);
* 1 - double data rate ULPI interface with 4 bit wide data
* bus
*/
-extern int dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val);
/*
* Specifies whether to use the internal or external supply to
* drive the vbus with a ULPI phy.
*/
-extern int dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val);
#define DWC2_PHY_ULPI_INTERNAL_VBUS 0
#define DWC2_PHY_ULPI_EXTERNAL_VBUS 1
@@ -732,11 +732,11 @@ extern int dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val);
* 0 - No (default)
* 1 - Yes
*/
-extern int dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val);
-extern int dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val);
-extern int dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val);
/*
* Specifies whether dedicated transmit FIFOs are
@@ -744,14 +744,14 @@ extern int dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val);
* 0 - No
* 1 - Yes
*/
-extern int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg,
- int val);
+extern void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg,
+ int val);
-extern int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val);
-extern int dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val);
-extern int dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val);
+extern void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val);
/*
* Dump core registers and SPRAM
diff --git a/drivers/staging/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 07cfa2f6aa2b..8205799e6db3 100644
--- a/drivers/staging/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -55,7 +55,6 @@
static const char *dwc2_op_state_str(struct dwc2_hsotg *hsotg)
{
-#ifdef DEBUG
switch (hsotg->op_state) {
case OTG_STATE_A_HOST:
return "a_host";
@@ -70,9 +69,6 @@ static const char *dwc2_op_state_str(struct dwc2_hsotg *hsotg)
default:
return "unknown";
}
-#else
- return "";
-#endif
}
/**
@@ -419,12 +415,10 @@ static u32 dwc2_read_common_intr(struct dwc2_hsotg *hsotg)
gintmsk = readl(hsotg->regs + GINTMSK);
gahbcfg = readl(hsotg->regs + GAHBCFG);
-#ifdef DEBUG
/* If any common interrupts set */
if (gintsts & gintmsk_common)
dev_dbg(hsotg->dev, "gintsts=%08x gintmsk=%08x\n",
gintsts, gintmsk);
-#endif
if (gahbcfg & GAHBCFG_GLBL_INTR_EN)
return gintsts & gintmsk & gintmsk_common;
@@ -451,8 +445,8 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
u32 gintsts;
irqreturn_t retval = IRQ_NONE;
- if (dwc2_check_core_status(hsotg) < 0) {
- dev_warn(hsotg->dev, "Controller is disconnected\n");
+ if (!dwc2_is_controller_alive(hsotg)) {
+ dev_warn(hsotg->dev, "Controller is dead\n");
goto out;
}
diff --git a/drivers/staging/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 3cfd2d5152c9..4d918ed8d343 100644
--- a/drivers/staging/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -355,6 +355,7 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
unsigned long flags;
u32 intr_mask;
int retval;
+ int dev_speed;
if (!hsotg->flags.b.port_connect_status) {
/* No longer connected */
@@ -362,6 +363,19 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
return -ENODEV;
}
+ dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
+
+ /* Some configurations cannot support LS traffic on a FS root port */
+ if ((dev_speed == USB_SPEED_LOW) &&
+ (hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) &&
+ (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI)) {
+ u32 hprt0 = readl(hsotg->regs + HPRT0);
+ u32 prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
+
+ if (prtspd == HPRT0_SPD_FULL_SPEED)
+ return -ENODEV;
+ }
+
qtd = kzalloc(sizeof(*qtd), mem_flags);
if (!qtd)
return -ENOMEM;
@@ -369,7 +383,7 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
dwc2_hcd_qtd_init(qtd, urb);
retval = dwc2_hcd_qtd_add(hsotg, qtd, (struct dwc2_qh **)ep_handle,
mem_flags);
- if (retval < 0) {
+ if (retval) {
dev_err(hsotg->dev,
"DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n",
retval);
@@ -378,7 +392,7 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
}
intr_mask = readl(hsotg->regs + GINTMSK);
- if (!(intr_mask & GINTSTS_SOF) && retval == 0) {
+ if (!(intr_mask & GINTSTS_SOF)) {
enum dwc2_transaction_type tr_type;
if (qtd->qh->ep_type == USB_ENDPOINT_XFER_BULK &&
@@ -396,7 +410,7 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
spin_unlock_irqrestore(&hsotg->lock, flags);
}
- return retval;
+ return 0;
}
/* Must be called with interrupt disabled and spinlock held */
@@ -1795,7 +1809,7 @@ int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg)
{
- return (hsotg->op_state == OTG_STATE_B_HOST);
+ return hsotg->op_state == OTG_STATE_B_HOST;
}
static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg,
@@ -2551,25 +2565,14 @@ static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
- int is_control = usb_endpoint_xfer_control(&ep->desc);
- int is_out = usb_endpoint_dir_out(&ep->desc);
- int epnum = usb_endpoint_num(&ep->desc);
- struct usb_device *udev;
unsigned long flags;
dev_dbg(hsotg->dev,
"DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n",
ep->desc.bEndpointAddress);
- udev = to_usb_device(hsotg->dev);
-
spin_lock_irqsave(&hsotg->lock, flags);
-
- usb_settoggle(udev, epnum, is_out, 0);
- if (is_control)
- usb_settoggle(udev, epnum, !is_out, 0);
dwc2_hcd_endpoint_reset(hsotg, ep);
-
spin_unlock_irqrestore(&hsotg->lock, flags);
}
@@ -2921,6 +2924,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
if (retval < 0)
goto error3;
+ device_wakeup_enable(hcd->self.controller);
+
dwc2_hcd_dump_state(hsotg);
dwc2_enable_global_interrupts(hsotg);
diff --git a/drivers/staging/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 89a5484f5b74..fdc6d489084a 100644
--- a/drivers/staging/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -452,8 +452,8 @@ static inline u8 dwc2_hcd_is_pipe_out(struct dwc2_hcd_pipe_info *pipe)
extern int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
const struct dwc2_core_params *params);
extern void dwc2_hcd_remove(struct dwc2_hsotg *hsotg);
-extern int dwc2_set_parameters(struct dwc2_hsotg *hsotg,
- const struct dwc2_core_params *params);
+extern void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
+ const struct dwc2_core_params *params);
extern void dwc2_set_all_params(struct dwc2_core_params *params, int value);
extern int dwc2_get_hwparams(struct dwc2_hsotg *hsotg);
diff --git a/drivers/staging/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index c7d434519776..3376177e4d3c 100644
--- a/drivers/staging/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -621,8 +621,8 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc];
int len = chan->xfer_len;
- if (len > MAX_DMA_DESC_SIZE)
- len = MAX_DMA_DESC_SIZE - chan->max_packet + 1;
+ if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1))
+ len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1);
if (chan->ep_is_in) {
int num_packets;
@@ -1103,8 +1103,10 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
for (i = 0; i < qtd->n_desc; i++) {
if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
desc_num, halt_status,
- &xfer_done))
+ &xfer_done)) {
+ qtd = NULL;
break;
+ }
desc_num++;
}
}
diff --git a/drivers/staging/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index dda18540f5a7..012f17ec1a37 100644
--- a/drivers/staging/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -935,7 +935,7 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
frame_desc->actual_length += len;
- if (chan->align_buf && len) {
+ if (chan->align_buf) {
dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
dma_sync_single_for_cpu(hsotg->dev, qtd->urb->dma,
qtd->urb->length, DMA_FROM_DEVICE);
@@ -2059,8 +2059,8 @@ irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
u32 gintsts, dbg_gintsts;
irqreturn_t retval = IRQ_NONE;
- if (dwc2_check_core_status(hsotg) < 0) {
- dev_warn(hsotg->dev, "Controller is disconnected\n");
+ if (!dwc2_is_controller_alive(hsotg)) {
+ dev_warn(hsotg->dev, "Controller is dead\n");
return retval;
}
diff --git a/drivers/staging/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index f200f1f6e1c6..9540f7e1e20e 100644
--- a/drivers/staging/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -344,25 +344,17 @@ void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg)
static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
unsigned short utime = qh->usecs;
- int done = 0;
- int i = 0;
- int ret = -1;
+ int i;
- while (!done) {
+ for (i = 0; i < 8; i++) {
/* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */
if (utime <= hsotg->frame_usecs[i]) {
hsotg->frame_usecs[i] -= utime;
qh->frame_usecs[i] += utime;
- ret = i;
- done = 1;
- } else {
- i++;
- if (i == 8)
- done = 1;
+ return i;
}
}
-
- return ret;
+ return -ENOSPC;
}
/*
@@ -372,21 +364,14 @@ static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
unsigned short utime = qh->usecs;
unsigned short xtime;
- int t_left = utime;
- int done = 0;
- int i = 0;
+ int t_left;
+ int i;
int j;
- int ret = -1;
-
- while (!done) {
- if (hsotg->frame_usecs[i] <= 0) {
- i++;
- if (i == 8) {
- ret = -1;
- done = 1;
- }
+ int k;
+
+ for (i = 0; i < 8; i++) {
+ if (hsotg->frame_usecs[i] <= 0)
continue;
- }
/*
* we need n consecutive slots so use j as a start slot
@@ -400,50 +385,35 @@ static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
*/
if (xtime + hsotg->frame_usecs[j] < utime) {
if (hsotg->frame_usecs[j] <
- max_uframe_usecs[j]) {
- ret = -1;
- break;
- }
+ max_uframe_usecs[j])
+ continue;
}
if (xtime >= utime) {
- ret = i;
- break;
+ t_left = utime;
+ for (k = i; k < 8; k++) {
+ t_left -= hsotg->frame_usecs[k];
+ if (t_left <= 0) {
+ qh->frame_usecs[k] +=
+ hsotg->frame_usecs[k]
+ + t_left;
+ hsotg->frame_usecs[k] = -t_left;
+ return i;
+ } else {
+ qh->frame_usecs[k] +=
+ hsotg->frame_usecs[k];
+ hsotg->frame_usecs[k] = 0;
+ }
+ }
}
/* add the frame time to x time */
xtime += hsotg->frame_usecs[j];
/* we must have a fully available next frame or break */
if (xtime < utime &&
- hsotg->frame_usecs[j] == max_uframe_usecs[j]) {
- ret = -1;
- break;
- }
- }
- if (ret >= 0) {
- t_left = utime;
- for (j = i; t_left > 0 && j < 8; j++) {
- t_left -= hsotg->frame_usecs[j];
- if (t_left <= 0) {
- qh->frame_usecs[j] +=
- hsotg->frame_usecs[j] + t_left;
- hsotg->frame_usecs[j] = -t_left;
- ret = i;
- done = 1;
- } else {
- qh->frame_usecs[j] +=
- hsotg->frame_usecs[j];
- hsotg->frame_usecs[j] = 0;
- }
- }
- } else {
- i++;
- if (i == 8) {
- ret = -1;
- done = 1;
- }
+ hsotg->frame_usecs[j] == max_uframe_usecs[j])
+ continue;
}
}
-
- return ret;
+ return -ENOSPC;
}
static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
@@ -517,12 +487,12 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
frame = status - 1;
/* Set the new frame up */
- if (frame > -1) {
+ if (frame >= 0) {
qh->sched_frame &= ~0x7;
qh->sched_frame |= (frame & 7);
}
- if (status != -1)
+ if (status > 0)
status = 0;
} else {
status = dwc2_periodic_channel_available(hsotg);
@@ -609,7 +579,7 @@ static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
*/
int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
- int status = 0;
+ int status;
u32 intr_mask;
if (dbg_qh(qh))
@@ -617,26 +587,27 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
if (!list_empty(&qh->qh_list_entry))
/* QH already in a schedule */
- return status;
+ return 0;
/* Add the new QH to the appropriate schedule */
if (dwc2_qh_is_non_per(qh)) {
/* Always start in inactive schedule */
list_add_tail(&qh->qh_list_entry,
&hsotg->non_periodic_sched_inactive);
- } else {
- status = dwc2_schedule_periodic(hsotg, qh);
- if (status == 0) {
- if (!hsotg->periodic_qh_count) {
- intr_mask = readl(hsotg->regs + GINTMSK);
- intr_mask |= GINTSTS_SOF;
- writel(intr_mask, hsotg->regs + GINTMSK);
- }
- hsotg->periodic_qh_count++;
- }
+ return 0;
}
- return status;
+ status = dwc2_schedule_periodic(hsotg, qh);
+ if (status)
+ return status;
+ if (!hsotg->periodic_qh_count) {
+ intr_mask = readl(hsotg->regs + GINTMSK);
+ intr_mask |= GINTSTS_SOF;
+ writel(intr_mask, hsotg->regs + GINTMSK);
+ }
+ hsotg->periodic_qh_count++;
+
+ return 0;
}
/**
@@ -661,14 +632,15 @@ void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
hsotg->non_periodic_qh_ptr =
hsotg->non_periodic_qh_ptr->next;
list_del_init(&qh->qh_list_entry);
- } else {
- dwc2_deschedule_periodic(hsotg, qh);
- hsotg->periodic_qh_count--;
- if (!hsotg->periodic_qh_count) {
- intr_mask = readl(hsotg->regs + GINTMSK);
- intr_mask &= ~GINTSTS_SOF;
- writel(intr_mask, hsotg->regs + GINTMSK);
- }
+ return;
+ }
+
+ dwc2_deschedule_periodic(hsotg, qh);
+ hsotg->periodic_qh_count--;
+ if (!hsotg->periodic_qh_count) {
+ intr_mask = readl(hsotg->regs + GINTMSK);
+ intr_mask &= ~GINTSTS_SOF;
+ writel(intr_mask, hsotg->regs + GINTMSK);
}
}
@@ -723,6 +695,8 @@ static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg,
void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
int sched_next_periodic_split)
{
+ u16 frame_number;
+
if (dbg_qh(qh))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
@@ -731,37 +705,36 @@ void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
if (!list_empty(&qh->qtd_list))
/* Add back to inactive non-periodic schedule */
dwc2_hcd_qh_add(hsotg, qh);
+ return;
+ }
+
+ frame_number = dwc2_hcd_get_frame_number(hsotg);
+
+ if (qh->do_split) {
+ dwc2_sched_periodic_split(hsotg, qh, frame_number,
+ sched_next_periodic_split);
} else {
- u16 frame_number = dwc2_hcd_get_frame_number(hsotg);
-
- if (qh->do_split) {
- dwc2_sched_periodic_split(hsotg, qh, frame_number,
- sched_next_periodic_split);
- } else {
- qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame,
- qh->interval);
- if (dwc2_frame_num_le(qh->sched_frame, frame_number))
- qh->sched_frame = frame_number;
- }
+ qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame,
+ qh->interval);
+ if (dwc2_frame_num_le(qh->sched_frame, frame_number))
+ qh->sched_frame = frame_number;
+ }
- if (list_empty(&qh->qtd_list)) {
- dwc2_hcd_qh_unlink(hsotg, qh);
- } else {
- /*
- * Remove from periodic_sched_queued and move to
- * appropriate queue
- */
- if ((hsotg->core_params->uframe_sched > 0 &&
- dwc2_frame_num_le(qh->sched_frame, frame_number))
- || (hsotg->core_params->uframe_sched <= 0 &&
- qh->sched_frame == frame_number))
- list_move(&qh->qh_list_entry,
- &hsotg->periodic_sched_ready);
- else
- list_move(&qh->qh_list_entry,
- &hsotg->periodic_sched_inactive);
- }
+ if (list_empty(&qh->qtd_list)) {
+ dwc2_hcd_qh_unlink(hsotg, qh);
+ return;
}
+ /*
+ * Remove from periodic_sched_queued and move to
+ * appropriate queue
+ */
+ if ((hsotg->core_params->uframe_sched > 0 &&
+ dwc2_frame_num_le(qh->sched_frame, frame_number)) ||
+ (hsotg->core_params->uframe_sched <= 0 &&
+ qh->sched_frame == frame_number))
+ list_move(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
+ else
+ list_move(&qh->qh_list_entry, &hsotg->periodic_sched_inactive);
}
/**
diff --git a/drivers/staging/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 9c92a3c7588a..9c92a3c7588a 100644
--- a/drivers/staging/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
diff --git a/drivers/staging/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index 3d14c8870fca..c291fca5d21f 100644
--- a/drivers/staging/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -152,7 +152,7 @@ static int dwc2_driver_probe(struct pci_dev *dev,
return retval;
}
-static DEFINE_PCI_DEVICE_TABLE(dwc2_pci_ids) = {
+static const struct pci_device_id dwc2_pci_ids[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, PCI_PRODUCT_ID_HAPS_HSOTG),
},
diff --git a/drivers/staging/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 83ca1053bb1d..eaba547ce26b 100644
--- a/drivers/staging/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include "core.h"
@@ -46,6 +47,34 @@
static const char dwc2_driver_name[] = "dwc2";
+static const struct dwc2_core_params params_bcm2835 = {
+ .otg_cap = 0, /* HNP/SRP capable */
+ .otg_ver = 0, /* 1.3 */
+ .dma_enable = 1,
+ .dma_desc_enable = 0,
+ .speed = 0, /* High Speed */
+ .enable_dynamic_fifo = 1,
+ .en_multiple_tx_fifo = 1,
+ .host_rx_fifo_size = 774, /* 774 DWORDs */
+ .host_nperio_tx_fifo_size = 256, /* 256 DWORDs */
+ .host_perio_tx_fifo_size = 512, /* 512 DWORDs */
+ .max_transfer_size = 65535,
+ .max_packet_count = 511,
+ .host_channels = 8,
+ .phy_type = 1, /* UTMI */
+ .phy_utmi_width = 8, /* 8 bits */
+ .phy_ulpi_ddr = 0, /* Single */
+ .phy_ulpi_ext_vbus = 0,
+ .i2c_enable = 0,
+ .ulpi_fs_ls = 0,
+ .host_support_fs_ls_low_power = 0,
+ .host_ls_low_power_phy_clk = 0, /* 48 MHz */
+ .ts_dline = 0,
+ .reload_ctl = 0,
+ .ahbcfg = 0x10,
+ .uframe_sched = 0,
+};
+
/**
* dwc2_driver_remove() - Called when the DWC_otg core is unregistered with the
* DWC_otg driver
@@ -66,6 +95,13 @@ static int dwc2_driver_remove(struct platform_device *dev)
return 0;
}
+static const struct of_device_id dwc2_of_match_table[] = {
+ { .compatible = "brcm,bcm2835-usb", .data = &params_bcm2835 },
+ { .compatible = "snps,dwc2", .data = NULL },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
+
/**
* dwc2_driver_probe() - Called when the DWC_otg core is bound to the DWC_otg
* driver
@@ -80,14 +116,25 @@ static int dwc2_driver_remove(struct platform_device *dev)
*/
static int dwc2_driver_probe(struct platform_device *dev)
{
+ const struct of_device_id *match;
+ const struct dwc2_core_params *params;
+ struct dwc2_core_params defparams;
struct dwc2_hsotg *hsotg;
struct resource *res;
int retval;
int irq;
- struct dwc2_core_params params;
- /* Default all params to autodetect */
- dwc2_set_all_params(&params, -1);
+ if (usb_disabled())
+ return -ENODEV;
+
+ match = of_match_device(dwc2_of_match_table, &dev->dev);
+ if (match && match->data) {
+ params = match->data;
+ } else {
+ /* Default all params to autodetect */
+ dwc2_set_all_params(&defparams, -1);
+ params = &defparams;
+ }
hsotg = devm_kzalloc(&dev->dev, sizeof(*hsotg), GFP_KERNEL);
if (!hsotg)
@@ -118,7 +165,7 @@ static int dwc2_driver_probe(struct platform_device *dev)
dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n",
(unsigned long)res->start, hsotg->regs);
- retval = dwc2_hcd_init(hsotg, irq, &params);
+ retval = dwc2_hcd_init(hsotg, irq, params);
if (retval)
return retval;
@@ -127,15 +174,9 @@ static int dwc2_driver_probe(struct platform_device *dev)
return retval;
}
-static const struct of_device_id dwc2_of_match_table[] = {
- { .compatible = "snps,dwc2" },
- {},
-};
-MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
-
static struct platform_driver dwc2_platform_driver = {
.driver = {
- .name = (char *)dwc2_driver_name,
+ .name = dwc2_driver_name,
.of_match_table = dwc2_of_match_table,
},
.probe = dwc2_driver_probe,
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 70fc43027a5c..e2c730fc9a90 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -70,6 +70,13 @@ config USB_DWC3_PCI
One such PCIe-based platform is Synopsys' PCIe HAPS model of
this IP.
+config USB_DWC3_KEYSTONE
+ tristate "Texas Instruments Keystone2 Platforms"
+ default USB_DWC3
+ help
+ Support of USB2/3 functionality in TI Keystone2 platforms.
+ Say 'Y' or 'M' here if you have one such device
+
comment "Debugging features"
config USB_DWC3_DEBUG
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index dd1760145c46..10ac3e72482e 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -32,3 +32,4 @@ endif
obj-$(CONFIG_USB_DWC3_OMAP) += dwc3-omap.o
obj-$(CONFIG_USB_DWC3_EXYNOS) += dwc3-exynos.o
obj-$(CONFIG_USB_DWC3_PCI) += dwc3-pci.o
+obj-$(CONFIG_USB_DWC3_KEYSTONE) += dwc3-keystone.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 74f9cf02da07..a49217ae3533 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -455,9 +455,6 @@ static int dwc3_probe(struct platform_device *pdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
- usb_phy_set_suspend(dwc->usb2_phy, 0);
- usb_phy_set_suspend(dwc->usb3_phy, 0);
-
spin_lock_init(&dwc->lock);
platform_set_drvdata(pdev, dwc);
@@ -488,6 +485,9 @@ static int dwc3_probe(struct platform_device *pdev)
goto err0;
}
+ usb_phy_set_suspend(dwc->usb2_phy, 0);
+ usb_phy_set_suspend(dwc->usb3_phy, 0);
+
ret = dwc3_event_buffers_setup(dwc);
if (ret) {
dev_err(dwc->dev, "failed to setup event buffers\n");
@@ -569,6 +569,8 @@ err2:
dwc3_event_buffers_cleanup(dwc);
err1:
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
dwc3_core_exit(dwc);
err0:
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 8b20c70d91e7..28c8ad79f5e6 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -50,6 +50,7 @@ static int dwc3_exynos_register_phys(struct dwc3_exynos *exynos)
exynos->usb2_phy = pdev;
pdata.type = USB_PHY_TYPE_USB2;
+ pdata.gpio_reset = -1;
ret = platform_device_add_data(exynos->usb2_phy, &pdata, sizeof(pdata));
if (ret)
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
new file mode 100644
index 000000000000..1fad1618df6e
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -0,0 +1,202 @@
+/**
+ * dwc3-keystone.c - Keystone Specific Glue layer
+ *
+ * Copyright (C) 2010-2013 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: WingMan Kwok <w-kwok2@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+
+/* USBSS register offsets */
+#define USBSS_REVISION 0x0000
+#define USBSS_SYSCONFIG 0x0010
+#define USBSS_IRQ_EOI 0x0018
+#define USBSS_IRQSTATUS_RAW_0 0x0020
+#define USBSS_IRQSTATUS_0 0x0024
+#define USBSS_IRQENABLE_SET_0 0x0028
+#define USBSS_IRQENABLE_CLR_0 0x002c
+
+/* IRQ register bits */
+#define USBSS_IRQ_EOI_LINE(n) BIT(n)
+#define USBSS_IRQ_EVENT_ST BIT(0)
+#define USBSS_IRQ_COREIRQ_EN BIT(0)
+#define USBSS_IRQ_COREIRQ_CLR BIT(0)
+
+static u64 kdwc3_dma_mask;
+
+struct dwc3_keystone {
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *usbss;
+};
+
+static inline u32 kdwc3_readl(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+static inline void kdwc3_writel(void __iomem *base, u32 offset, u32 value)
+{
+ writel(value, base + offset);
+}
+
+static void kdwc3_enable_irqs(struct dwc3_keystone *kdwc)
+{
+ u32 val;
+
+ val = kdwc3_readl(kdwc->usbss, USBSS_IRQENABLE_SET_0);
+ val |= USBSS_IRQ_COREIRQ_EN;
+ kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_SET_0, val);
+}
+
+static void kdwc3_disable_irqs(struct dwc3_keystone *kdwc)
+{
+ u32 val;
+
+ val = kdwc3_readl(kdwc->usbss, USBSS_IRQENABLE_SET_0);
+ val &= ~USBSS_IRQ_COREIRQ_EN;
+ kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_SET_0, val);
+}
+
+static irqreturn_t dwc3_keystone_interrupt(int irq, void *_kdwc)
+{
+ struct dwc3_keystone *kdwc = _kdwc;
+
+ kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_CLR_0, USBSS_IRQ_COREIRQ_CLR);
+ kdwc3_writel(kdwc->usbss, USBSS_IRQSTATUS_0, USBSS_IRQ_EVENT_ST);
+ kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_SET_0, USBSS_IRQ_COREIRQ_EN);
+ kdwc3_writel(kdwc->usbss, USBSS_IRQ_EOI, USBSS_IRQ_EOI_LINE(0));
+
+ return IRQ_HANDLED;
+}
+
+static int kdwc3_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct dwc3_keystone *kdwc;
+ struct resource *res;
+ int error, irq;
+
+ kdwc = devm_kzalloc(dev, sizeof(*kdwc), GFP_KERNEL);
+ if (!kdwc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, kdwc);
+
+ kdwc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "missing usbss resource\n");
+ return -EINVAL;
+ }
+
+ kdwc->usbss = devm_ioremap_resource(dev, res);
+ if (IS_ERR(kdwc->usbss))
+ return PTR_ERR(kdwc->usbss);
+
+ kdwc3_dma_mask = dma_get_mask(dev);
+ dev->dma_mask = &kdwc3_dma_mask;
+
+ kdwc->clk = devm_clk_get(kdwc->dev, "usb");
+
+ error = clk_prepare_enable(kdwc->clk);
+ if (error < 0) {
+ dev_dbg(kdwc->dev, "unable to enable usb clock, err %d\n",
+ error);
+ return error;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "missing irq\n");
+ goto err_irq;
+ }
+
+ error = devm_request_irq(dev, irq, dwc3_keystone_interrupt, IRQF_SHARED,
+ dev_name(dev), kdwc);
+ if (error) {
+ dev_err(dev, "failed to request IRQ #%d --> %d\n",
+ irq, error);
+ goto err_irq;
+ }
+
+ kdwc3_enable_irqs(kdwc);
+
+ error = of_platform_populate(node, NULL, NULL, dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to create dwc3 core\n");
+ goto err_core;
+ }
+
+ return 0;
+
+err_core:
+ kdwc3_disable_irqs(kdwc);
+err_irq:
+ clk_disable_unprepare(kdwc->clk);
+
+ return error;
+}
+
+static int kdwc3_remove_core(struct device *dev, void *c)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static int kdwc3_remove(struct platform_device *pdev)
+{
+ struct dwc3_keystone *kdwc = platform_get_drvdata(pdev);
+
+ kdwc3_disable_irqs(kdwc);
+ device_for_each_child(&pdev->dev, NULL, kdwc3_remove_core);
+ clk_disable_unprepare(kdwc->clk);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id kdwc3_of_match[] = {
+ { .compatible = "ti,keystone-dwc3", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, kdwc3_of_match);
+
+static struct platform_driver kdwc3_driver = {
+ .probe = kdwc3_probe,
+ .remove = kdwc3_remove,
+ .driver = {
+ .name = "keystone-dwc3",
+ .owner = THIS_MODULE,
+ .of_match_table = kdwc3_of_match,
+ },
+};
+
+module_platform_driver(kdwc3_driver);
+
+MODULE_ALIAS("platform:keystone-dwc3");
+MODULE_AUTHOR("WingMan Kwok <w-kwok2@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DesignWare USB3 KEYSTONE Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 7f7ea62e961b..b269dbd47fc4 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/platform_data/dwc3-omap.h>
#include <linux/pm_runtime.h>
@@ -120,9 +119,6 @@
#define USBOTGSS_UTMI_OTG_STATUS_VBUSVALID (1 << 1)
struct dwc3_omap {
- /* device lock */
- spinlock_t lock;
-
struct device *dev;
int irq;
@@ -280,8 +276,6 @@ static irqreturn_t dwc3_omap_interrupt(int irq, void *_omap)
struct dwc3_omap *omap = _omap;
u32 reg;
- spin_lock(&omap->lock);
-
reg = dwc3_omap_read_irqmisc_status(omap);
if (reg & USBOTGSS_IRQMISC_DMADISABLECLR) {
@@ -322,8 +316,6 @@ static irqreturn_t dwc3_omap_interrupt(int irq, void *_omap)
dwc3_omap_write_irq0_status(omap, reg);
- spin_unlock(&omap->lock);
-
return IRQ_HANDLED;
}
@@ -449,8 +441,6 @@ static int dwc3_omap_probe(struct platform_device *pdev)
}
}
- spin_lock_init(&omap->lock);
-
omap->dev = dev;
omap->irq = irq;
omap->base = base;
@@ -535,7 +525,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
edev = of_extcon_get_extcon_dev(dev, 0);
if (IS_ERR(edev)) {
dev_vdbg(dev, "couldn't get extcon device\n");
- ret = PTR_ERR(edev);
+ ret = -EPROBE_DEFER;
goto err2;
}
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 31443aeedcdb..f393c183cc69 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -52,6 +52,7 @@ static int dwc3_pci_register_phys(struct dwc3_pci *glue)
glue->usb2_phy = pdev;
pdata.type = USB_PHY_TYPE_USB2;
+ pdata.gpio_reset = -1;
ret = platform_device_add_data(glue->usb2_phy, &pdata, sizeof(pdata));
if (ret)
@@ -182,7 +183,7 @@ static void dwc3_pci_remove(struct pci_dev *pci)
pci_disable_device(pci);
}
-static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
+static const struct pci_device_id dwc3_pci_id_table[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 02e44fcaf205..2da0a5a2803a 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1650,7 +1650,7 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
dev_vdbg(dwc->dev, "initializing %s\n", dep->name);
if (epnum == 0 || epnum == 1) {
- dep->endpoint.maxpacket = 512;
+ usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
dep->endpoint.maxburst = 1;
dep->endpoint.ops = &dwc3_gadget_ep0_ops;
if (!epnum)
@@ -1658,7 +1658,7 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
} else {
int ret;
- dep->endpoint.maxpacket = 1024;
+ usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
dep->endpoint.max_streams = 15;
dep->endpoint.ops = &dwc3_gadget_ep_ops;
list_add_tail(&dep->endpoint.ep_list,
@@ -2597,6 +2597,12 @@ int dwc3_gadget_init(struct dwc3 *dwc)
dwc->gadget.name = "dwc3-gadget";
/*
+ * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
+ * on ep out.
+ */
+ dwc->gadget.quirk_ep_out_aligned_size = true;
+
+ /*
* REVISIT: Here we should clear all pending IRQs to be
* sure we're starting from a well known location.
*/
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index f66d96ad1f51..8154165aa601 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -216,6 +216,13 @@ config USB_FOTG210_UDC
Say "y" to link the driver statically, or "m" to build a
dynamically linked module called "fotg210_udc".
+config USB_GR_UDC
+ tristate "Aeroflex Gaisler GRUSBDC USB Peripheral Controller Driver"
+ depends on HAS_DMA
+ help
+ Select this to support Aeroflex Gaisler GRUSBDC cores from the GRLIB
+ VHDL IP core library.
+
config USB_OMAP
tristate "OMAP USB Device Controller"
depends on ARCH_OMAP1
@@ -294,11 +301,11 @@ config USB_PXA27X
gadget drivers to also be dynamically linked.
config USB_S3C_HSOTG
- tristate "S3C HS/OtG USB Device controller"
- depends on S3C_DEV_USB_HSOTG
+ depends on ARM
+ tristate "Designware/S3C HS/OtG USB Device controller"
help
- The Samsung S3C64XX USB2.0 high-speed gadget controller
- integrated into the S3C64XX series SoC.
+ The Designware USB2.0 high-speed gadget controller
+ integrated into many SoCs.
config USB_S3C2410
tristate "S3C2410 USB Device Controller"
@@ -512,9 +519,6 @@ config USB_U_SERIAL
config USB_U_ETHER
tristate
-config USB_U_RNDIS
- tristate
-
config USB_F_SERIAL
tristate
@@ -542,6 +546,9 @@ config USB_F_RNDIS
config USB_F_MASS_STORAGE
tristate
+config USB_F_FS
+ tristate
+
choice
tristate "USB Gadget Drivers"
default USB_ETH
@@ -642,7 +649,6 @@ config USB_CONFIGFS_RNDIS
depends on USB_CONFIGFS
depends on NET
select USB_U_ETHER
- select USB_U_RNDIS
select USB_F_RNDIS
help
Microsoft Windows XP bundles the "Remote NDIS" (RNDIS) protocol,
@@ -690,6 +696,31 @@ config USB_CONFIGFS_MASS_STORAGE
device (in much the same way as the "loop" device driver),
specified as a module parameter or sysfs option.
+config USB_CONFIGFS_F_LB_SS
+ boolean "Loopback and sourcesink function (for testing)"
+ depends on USB_CONFIGFS
+ select USB_F_SS_LB
+ help
+ Loopback function loops back a configurable number of transfers.
+ Sourcesink function either sinks and sources bulk data.
+ It also implements control requests, for "chapter 9" conformance.
+ Make this be the first driver you try using on top of any new
+ USB peripheral controller driver. Then you can use host-side
+ test software, like the "usbtest" driver, to put your hardware
+ and its driver through a basic set of functional tests.
+
+config USB_CONFIGFS_F_FS
+ boolean "Function filesystem (FunctionFS)"
+ depends on USB_CONFIGFS
+ select USB_F_FS
+ help
+ The Function Filesystem (FunctionFS) lets one create USB
+ composite functions in user space in the same way GadgetFS
+ lets one create USB gadgets in user space. This allows creation
+ of composite gadgets such that some of the functions are
+ implemented in kernel space (for instance Ethernet, serial or
+ mass storage) and other are implemented in user space.
+
config USB_ZERO
tristate "Gadget Zero (DEVELOPMENT)"
select USB_LIBCOMPOSITE
@@ -760,7 +791,6 @@ config USB_ETH
depends on NET
select USB_LIBCOMPOSITE
select USB_U_ETHER
- select USB_U_RNDIS
select USB_F_ECM
select USB_F_SUBSET
select CRC32
@@ -864,6 +894,7 @@ config USB_GADGETFS
config USB_FUNCTIONFS
tristate "Function Filesystem"
select USB_LIBCOMPOSITE
+ select USB_F_FS
select USB_FUNCTIONFS_GENERIC if !(USB_FUNCTIONFS_ETH || USB_FUNCTIONFS_RNDIS)
help
The Function Filesystem (FunctionFS) lets one create USB
@@ -883,6 +914,8 @@ config USB_FUNCTIONFS_ETH
bool "Include configuration with CDC ECM (Ethernet)"
depends on USB_FUNCTIONFS && NET
select USB_U_ETHER
+ select USB_F_ECM
+ select USB_F_SUBSET
help
Include a configuration with CDC ECM function (Ethernet) and the
Function Filesystem.
@@ -891,7 +924,7 @@ config USB_FUNCTIONFS_RNDIS
bool "Include configuration with RNDIS (Ethernet)"
depends on USB_FUNCTIONFS && NET
select USB_U_ETHER
- select USB_U_RNDIS
+ select USB_F_RNDIS
help
Include a configuration with RNDIS function (Ethernet) and the Filesystem.
@@ -1065,7 +1098,6 @@ config USB_G_MULTI
config USB_G_MULTI_RNDIS
bool "RNDIS + CDC Serial + Storage configuration"
depends on USB_G_MULTI
- select USB_U_RNDIS
select USB_F_RNDIS
default y
help
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index f1af39603d4d..5f150bc1b4bc 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -7,7 +7,7 @@ ccflags-$(CONFIG_USB_GADGET_VERBOSE) += -DVERBOSE_DEBUG
obj-$(CONFIG_USB_GADGET) += udc-core.o
obj-$(CONFIG_USB_LIBCOMPOSITE) += libcomposite.o
libcomposite-y := usbstring.o config.o epautoconf.o
-libcomposite-y += composite.o functions.o configfs.o
+libcomposite-y += composite.o functions.o configfs.o u_f.o
obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o
obj-$(CONFIG_USB_NET2272) += net2272.o
obj-$(CONFIG_USB_NET2280) += net2280.o
@@ -35,6 +35,7 @@ mv_udc-y := mv_udc_core.o
obj-$(CONFIG_USB_FUSB300) += fusb300_udc.o
obj-$(CONFIG_USB_FOTG210_UDC) += fotg210-udc.o
obj-$(CONFIG_USB_MV_U3D) += mv_u3d_core.o
+obj-$(CONFIG_USB_GR_UDC) += gr_udc.o
# USB Functions
usb_f_acm-y := f_acm.o
@@ -47,8 +48,6 @@ obj-$(CONFIG_USB_F_SERIAL) += usb_f_serial.o
usb_f_obex-y := f_obex.o
obj-$(CONFIG_USB_F_OBEX) += usb_f_obex.o
obj-$(CONFIG_USB_U_ETHER) += u_ether.o
-u_rndis-y := rndis.o
-obj-$(CONFIG_USB_U_RNDIS) += u_rndis.o
usb_f_ncm-y := f_ncm.o
obj-$(CONFIG_USB_F_NCM) += usb_f_ncm.o
usb_f_ecm-y := f_ecm.o
@@ -59,10 +58,12 @@ usb_f_eem-y := f_eem.o
obj-$(CONFIG_USB_F_EEM) += usb_f_eem.o
usb_f_ecm_subset-y := f_subset.o
obj-$(CONFIG_USB_F_SUBSET) += usb_f_ecm_subset.o
-usb_f_rndis-y := f_rndis.o
+usb_f_rndis-y := f_rndis.o rndis.o
obj-$(CONFIG_USB_F_RNDIS) += usb_f_rndis.o
usb_f_mass_storage-y := f_mass_storage.o storage_common.o
obj-$(CONFIG_USB_F_MASS_STORAGE)+= usb_f_mass_storage.o
+usb_f_fs-y := f_fs.o
+obj-$(CONFIG_USB_F_FS) += usb_f_fs.o
#
# USB gadget drivers
diff --git a/drivers/usb/gadget/acm_ms.c b/drivers/usb/gadget/acm_ms.c
index 7bfa134fe0e3..a252444cc0a7 100644
--- a/drivers/usb/gadget/acm_ms.c
+++ b/drivers/usb/gadget/acm_ms.c
@@ -107,7 +107,7 @@ static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
*/
#define fsg_num_buffers CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS
-#endif /* CONFIG_USB_DEBUG */
+#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data);
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 54a1e2954cea..41b062eb4de0 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -40,7 +40,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
@@ -446,7 +445,7 @@ static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
ep->ep.ops = &udc_ep_ops;
INIT_LIST_HEAD(&ep->queue);
- ep->ep.maxpacket = (u16) ~0;
+ usb_ep_set_maxpacket_limit(&ep->ep,(u16) ~0);
/* set NAK */
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_SNAK);
@@ -1564,12 +1563,15 @@ static void udc_setup_endpoints(struct udc *dev)
}
/* EP0 max packet */
if (dev->gadget.speed == USB_SPEED_FULL) {
- dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_FS_EP0IN_MAX_PKT_SIZE;
- dev->ep[UDC_EP0OUT_IX].ep.maxpacket =
- UDC_FS_EP0OUT_MAX_PKT_SIZE;
+ usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
+ UDC_FS_EP0IN_MAX_PKT_SIZE);
+ usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
+ UDC_FS_EP0OUT_MAX_PKT_SIZE);
} else if (dev->gadget.speed == USB_SPEED_HIGH) {
- dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
- dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
+ usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
+ UDC_EP0IN_MAX_PKT_SIZE);
+ usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
+ UDC_EP0OUT_MAX_PKT_SIZE);
}
/*
@@ -3338,7 +3340,7 @@ static int udc_remote_wakeup(struct udc *dev)
}
/* PCI device parameters */
-static DEFINE_PCI_DEVICE_TABLE(pci_id) = {
+static const struct pci_device_id pci_id[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 4cc4fd6d1473..cea8c20a1425 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -21,7 +21,6 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
@@ -834,7 +833,7 @@ static void udc_reinit(struct at91_udc *udc)
ep->ep.desc = NULL;
ep->stopped = 0;
ep->fifo_bank = 0;
- ep->ep.maxpacket = ep->maxpacket;
+ usb_ep_set_maxpacket_limit(&ep->ep, ep->maxpacket);
ep->creg = (void __iomem *) udc->udp_baseaddr + AT91_UDP_CSR(i);
/* initialize one queue per endpoint */
INIT_LIST_HEAD(&ep->queue);
@@ -1759,15 +1758,15 @@ static int at91udc_probe(struct platform_device *pdev)
/* newer chips have more FIFO memory than rm9200 */
if (cpu_is_at91sam9260() || cpu_is_at91sam9g20()) {
- udc->ep[0].maxpacket = 64;
- udc->ep[3].maxpacket = 64;
- udc->ep[4].maxpacket = 512;
- udc->ep[5].maxpacket = 512;
+ usb_ep_set_maxpacket_limit(&udc->ep[0].ep, 64);
+ usb_ep_set_maxpacket_limit(&udc->ep[3].ep, 64);
+ usb_ep_set_maxpacket_limit(&udc->ep[4].ep, 512);
+ usb_ep_set_maxpacket_limit(&udc->ep[5].ep, 512);
} else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) {
- udc->ep[3].maxpacket = 64;
+ usb_ep_set_maxpacket_limit(&udc->ep[3].ep, 64);
} else if (cpu_is_at91sam9263()) {
- udc->ep[0].maxpacket = 64;
- udc->ep[3].maxpacket = 64;
+ usb_ep_set_maxpacket_limit(&udc->ep[0].ep, 64);
+ usb_ep_set_maxpacket_limit(&udc->ep[3].ep, 64);
}
udc->udp_baseaddr = ioremap(res->start, resource_size(res));
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 2cb52e0438df..52771d4c44bc 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -326,7 +326,7 @@ static int vbus_is_present(struct usba_udc *udc)
#if defined(CONFIG_ARCH_AT91SAM9RL)
-#include <mach/at91_pmc.h>
+#include <linux/clk/at91_pmc.h>
static void toggle_bias(int is_on)
{
@@ -1012,7 +1012,7 @@ static void nop_release(struct device *dev)
}
-struct usb_gadget usba_gadget_template = {
+static struct usb_gadget usba_gadget_template = {
.ops = &usba_udc_ops,
.max_speed = USB_SPEED_HIGH,
.name = "atmel_usba_udc",
@@ -1904,7 +1904,7 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
ep->fifo = udc->fifo + USBA_FIFO_BASE(i);
ep->ep.ops = &usba_ep_ops;
- ep->ep.maxpacket = ep->fifo_size;
+ usb_ep_set_maxpacket_limit(&ep->ep, ep->fifo_size);
ep->udc = udc;
INIT_LIST_HEAD(&ep->queue);
@@ -1957,7 +1957,8 @@ static struct usba_ep * usba_udc_pdata(struct platform_device *pdev,
ep->fifo = udc->fifo + USBA_FIFO_BASE(i);
ep->ep.ops = &usba_ep_ops;
ep->ep.name = pdata->ep[i].name;
- ep->fifo_size = ep->ep.maxpacket = pdata->ep[i].fifo_size;
+ ep->fifo_size = pdata->ep[i].fifo_size;
+ usb_ep_set_maxpacket_limit(&ep->ep, ep->fifo_size);
ep->udc = udc;
INIT_LIST_HEAD(&ep->queue);
ep->nr_banks = pdata->ep[i].nr_banks;
@@ -1995,14 +1996,12 @@ static int __init usba_udc_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- pclk = clk_get(&pdev->dev, "pclk");
+ pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(pclk))
return PTR_ERR(pclk);
- hclk = clk_get(&pdev->dev, "hclk");
- if (IS_ERR(hclk)) {
- ret = PTR_ERR(hclk);
- goto err_get_hclk;
- }
+ hclk = devm_clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(hclk))
+ return PTR_ERR(hclk);
spin_lock_init(&udc->lock);
udc->pdev = pdev;
@@ -2011,17 +2010,17 @@ static int __init usba_udc_probe(struct platform_device *pdev)
udc->vbus_pin = -ENODEV;
ret = -ENOMEM;
- udc->regs = ioremap(regs->start, resource_size(regs));
+ udc->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
if (!udc->regs) {
dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n");
- goto err_map_regs;
+ return ret;
}
dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n",
(unsigned long)regs->start, udc->regs);
- udc->fifo = ioremap(fifo->start, resource_size(fifo));
+ udc->fifo = devm_ioremap(&pdev->dev, fifo->start, resource_size(fifo));
if (!udc->fifo) {
dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n");
- goto err_map_fifo;
+ return ret;
}
dev_info(&pdev->dev, "FIFO at 0x%08lx mapped at %p\n",
(unsigned long)fifo->start, udc->fifo);
@@ -2032,7 +2031,7 @@ static int __init usba_udc_probe(struct platform_device *pdev)
ret = clk_prepare_enable(pclk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable pclk, aborting.\n");
- goto err_clk_enable;
+ return ret;
}
toggle_bias(0);
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
@@ -2043,22 +2042,22 @@ static int __init usba_udc_probe(struct platform_device *pdev)
else
udc->usba_ep = usba_udc_pdata(pdev, udc);
- if (IS_ERR(udc->usba_ep)) {
- ret = PTR_ERR(udc->usba_ep);
- goto err_alloc_ep;
- }
+ if (IS_ERR(udc->usba_ep))
+ return PTR_ERR(udc->usba_ep);
- ret = request_irq(irq, usba_udc_irq, 0, "atmel_usba_udc", udc);
+ ret = devm_request_irq(&pdev->dev, irq, usba_udc_irq, 0,
+ "atmel_usba_udc", udc);
if (ret) {
dev_err(&pdev->dev, "Cannot request irq %d (error %d)\n",
irq, ret);
- goto err_request_irq;
+ return ret;
}
udc->irq = irq;
if (gpio_is_valid(udc->vbus_pin)) {
if (!devm_gpio_request(&pdev->dev, udc->vbus_pin, "atmel_usba_udc")) {
- ret = request_irq(gpio_to_irq(udc->vbus_pin),
+ ret = devm_request_irq(&pdev->dev,
+ gpio_to_irq(udc->vbus_pin),
usba_vbus_irq, 0,
"atmel_usba_udc", udc);
if (ret) {
@@ -2077,31 +2076,13 @@ static int __init usba_udc_probe(struct platform_device *pdev)
ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
if (ret)
- goto err_add_udc;
+ return ret;
usba_init_debugfs(udc);
for (i = 1; i < udc->num_ep; i++)
usba_ep_init_debugfs(udc, &udc->usba_ep[i]);
return 0;
-
-err_add_udc:
- if (gpio_is_valid(udc->vbus_pin))
- free_irq(gpio_to_irq(udc->vbus_pin), udc);
-
- free_irq(irq, udc);
-err_request_irq:
-err_alloc_ep:
-err_clk_enable:
- iounmap(udc->fifo);
-err_map_fifo:
- iounmap(udc->regs);
-err_map_regs:
- clk_put(hclk);
-err_get_hclk:
- clk_put(pclk);
-
- return ret;
}
static int __exit usba_udc_remove(struct platform_device *pdev)
@@ -2117,16 +2098,6 @@ static int __exit usba_udc_remove(struct platform_device *pdev)
usba_ep_cleanup_debugfs(&udc->usba_ep[i]);
usba_cleanup_debugfs(udc);
- if (gpio_is_valid(udc->vbus_pin)) {
- free_irq(gpio_to_irq(udc->vbus_pin), udc);
- }
-
- free_irq(udc->irq, udc);
- iounmap(udc->fifo);
- iounmap(udc->regs);
- clk_put(udc->hclk);
- clk_put(udc->pclk);
-
return 0;
}
diff --git a/drivers/usb/gadget/bcm63xx_udc.c b/drivers/usb/gadget/bcm63xx_udc.c
index c58fcf1ebe41..888fbb43b338 100644
--- a/drivers/usb/gadget/bcm63xx_udc.c
+++ b/drivers/usb/gadget/bcm63xx_udc.c
@@ -19,7 +19,6 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kconfig.h>
@@ -549,7 +548,7 @@ static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
if (idx < 0)
continue;
- udc->bep[idx].ep.maxpacket = max_pkt;
+ usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
val = (idx << USBD_CSR_EP_LOG_SHIFT) |
(cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
@@ -943,7 +942,7 @@ static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
bep->ep.ops = &bcm63xx_udc_ep_ops;
list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
bep->halted = 0;
- bep->ep.maxpacket = BCM63XX_MAX_CTRL_PKT;
+ usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
bep->udc = udc;
bep->ep.desc = NULL;
INIT_LIST_HEAD(&bep->queue);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 2018ba1a2172..d742bed7a5fa 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1452,8 +1452,22 @@ unknown:
struct usb_configuration *c;
c = cdev->config;
- if (c && c->setup)
+ if (!c)
+ goto done;
+
+ /* try current config's setup */
+ if (c->setup) {
value = c->setup(c, ctrl);
+ goto done;
+ }
+
+ /* try the only function in the current config */
+ if (!list_is_singular(&c->functions))
+ goto done;
+ f = list_first_entry(&c->functions, struct usb_function,
+ list);
+ if (f->setup)
+ value = f->setup(f, ctrl);
}
goto done;
@@ -1714,7 +1728,7 @@ composite_resume(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
struct usb_function *f;
- u8 maxpower;
+ u16 maxpower;
/* REVISIT: should we have config level
* suspend/resume callbacks?
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 25885112fa35..7d1cc01796b6 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -4,6 +4,7 @@
#include <linux/device.h>
#include <linux/usb/composite.h>
#include <linux/usb/gadget_configfs.h>
+#include "configfs.h"
int check_user_usb_string(const char *name,
struct usb_gadget_strings *stringtab_dev)
@@ -564,6 +565,13 @@ static struct config_group *function_make(
usb_put_function_instance(fi);
return ERR_PTR(ret);
}
+ if (fi->set_inst_name) {
+ ret = fi->set_inst_name(fi, instance_name);
+ if (ret) {
+ usb_put_function_instance(fi);
+ return ERR_PTR(ret);
+ }
+ }
gi = container_of(group, struct gadget_info, functions_group);
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 8f4dae310923..8c06430dcc47 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -951,7 +951,7 @@ static void init_dummy_udc_hw(struct dummy *dum)
list_add_tail(&ep->ep.ep_list, &dum->gadget.ep_list);
ep->halted = ep->wedged = ep->already_seen =
ep->setup_stage = 0;
- ep->ep.maxpacket = ~0;
+ usb_ep_set_maxpacket_limit(&ep->ep, ~0);
ep->ep.max_streams = 16;
ep->last_io = jiffies;
ep->gadget = &dum->gadget;
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index a777f7bd11b4..0567cca1465e 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
@@ -58,7 +57,7 @@ ep_matches (
return 0;
/* only support ep0 for portable CONTROL traffic */
- type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ type = usb_endpoint_type(desc);
if (USB_ENDPOINT_XFER_CONTROL == type)
return 0;
@@ -129,7 +128,7 @@ ep_matches (
* and wants to know the maximum possible, provide the info.
*/
if (desc->wMaxPacketSize == 0)
- desc->wMaxPacketSize = cpu_to_le16(ep->maxpacket);
+ desc->wMaxPacketSize = cpu_to_le16(ep->maxpacket_limit);
/* endpoint maxpacket size is an input parameter, except for bulk
* where it's an output parameter representing the full speed limit.
@@ -145,7 +144,7 @@ ep_matches (
case USB_ENDPOINT_XFER_ISOC:
/* ISO: limit 1023 bytes full speed, 1024 high/super speed */
- if (ep->maxpacket < max)
+ if (ep->maxpacket_limit < max)
return 0;
if (!gadget_is_dualspeed(gadget) && max > 1023)
return 0;
@@ -178,7 +177,7 @@ ep_matches (
/* report (variable) full speed bulk maxpacket */
if ((USB_ENDPOINT_XFER_BULK == type) && !ep_comp) {
- int size = ep->maxpacket;
+ int size = ep->maxpacket_limit;
/* min() doesn't work on bitfields with gcc-3.5 */
if (size > 64)
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index 8d9e6f7e8f1a..798760fa7e70 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -691,7 +691,6 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
int status;
struct usb_ep *ep;
-#ifndef USBF_ECM_INCLUDED
struct f_ecm_opts *ecm_opts;
if (!can_support_ecm(cdev->gadget))
@@ -715,7 +714,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
return status;
ecm_opts->bound = true;
}
-#endif
+
us = usb_gstrings_attach(cdev, ecm_strings,
ARRAY_SIZE(ecm_string_defs));
if (IS_ERR(us))
@@ -834,74 +833,6 @@ fail:
return status;
}
-#ifdef USBF_ECM_INCLUDED
-
-static void
-ecm_old_unbind(struct usb_configuration *c, struct usb_function *f)
-{
- struct f_ecm *ecm = func_to_ecm(f);
-
- DBG(c->cdev, "ecm unbind\n");
-
- usb_free_all_descriptors(f);
-
- kfree(ecm->notify_req->buf);
- usb_ep_free_request(ecm->notify, ecm->notify_req);
- kfree(ecm);
-}
-
-/**
- * ecm_bind_config - add CDC Ethernet network link to a configuration
- * @c: the configuration to support the network link
- * @ethaddr: a buffer in which the ethernet address of the host side
- * side of the link was recorded
- * @dev: eth_dev structure
- * Context: single threaded during gadget setup
- *
- * Returns zero on success, else negative errno.
- *
- * Caller must have called @gether_setup(). Caller is also responsible
- * for calling @gether_cleanup() before module unload.
- */
-int
-ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
- struct eth_dev *dev)
-{
- struct f_ecm *ecm;
- int status;
-
- if (!can_support_ecm(c->cdev->gadget) || !ethaddr)
- return -EINVAL;
-
- /* allocate and initialize one new instance */
- ecm = kzalloc(sizeof *ecm, GFP_KERNEL);
- if (!ecm)
- return -ENOMEM;
-
- /* export host's Ethernet address in CDC format */
- snprintf(ecm->ethaddr, sizeof ecm->ethaddr, "%pm", ethaddr);
- ecm_string_defs[1].s = ecm->ethaddr;
-
- ecm->port.ioport = dev;
- ecm->port.cdc_filter = DEFAULT_FILTER;
-
- ecm->port.func.name = "cdc_ethernet";
- /* descriptors are per-instance copies */
- ecm->port.func.bind = ecm_bind;
- ecm->port.func.unbind = ecm_old_unbind;
- ecm->port.func.set_alt = ecm_set_alt;
- ecm->port.func.get_alt = ecm_get_alt;
- ecm->port.func.setup = ecm_setup;
- ecm->port.func.disable = ecm_disable;
-
- status = usb_add_function(c, &ecm->port.func);
- if (status)
- kfree(ecm);
- return status;
-}
-
-#else
-
static inline struct f_ecm_opts *to_f_ecm_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_ecm_opts,
@@ -1040,5 +971,3 @@ static struct usb_function *ecm_alloc(struct usb_function_instance *fi)
DECLARE_USB_FUNCTION_INIT(ecm, ecm_alloc_inst, ecm_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Brownell");
-
-#endif
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 241fc873ffa4..306a2b52125c 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -22,218 +22,42 @@
#include <linux/pagemap.h>
#include <linux/export.h>
#include <linux/hid.h>
+#include <linux/module.h>
#include <asm/unaligned.h>
#include <linux/usb/composite.h>
#include <linux/usb/functionfs.h>
+#include "u_fs.h"
+#include "configfs.h"
#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
-
-/* Debugging ****************************************************************/
-
-#ifdef VERBOSE_DEBUG
-#ifndef pr_vdebug
-# define pr_vdebug pr_debug
-#endif /* pr_vdebug */
-# define ffs_dump_mem(prefix, ptr, len) \
- print_hex_dump_bytes(pr_fmt(prefix ": "), DUMP_PREFIX_NONE, ptr, len)
-#else
-#ifndef pr_vdebug
-# define pr_vdebug(...) do { } while (0)
-#endif /* pr_vdebug */
-# define ffs_dump_mem(prefix, ptr, len) do { } while (0)
-#endif /* VERBOSE_DEBUG */
-
-#define ENTER() pr_vdebug("%s()\n", __func__)
-
-
-/* The data structure and setup file ****************************************/
-
-enum ffs_state {
- /*
- * Waiting for descriptors and strings.
- *
- * In this state no open(2), read(2) or write(2) on epfiles
- * may succeed (which should not be the problem as there
- * should be no such files opened in the first place).
- */
- FFS_READ_DESCRIPTORS,
- FFS_READ_STRINGS,
-
- /*
- * We've got descriptors and strings. We are or have called
- * functionfs_ready_callback(). functionfs_bind() may have
- * been called but we don't know.
- *
- * This is the only state in which operations on epfiles may
- * succeed.
- */
- FFS_ACTIVE,
-
- /*
- * All endpoints have been closed. This state is also set if
- * we encounter an unrecoverable error. The only
- * unrecoverable error is situation when after reading strings
- * from user space we fail to initialise epfiles or
- * functionfs_ready_callback() returns with error (<0).
- *
- * In this state no open(2), read(2) or write(2) (both on ep0
- * as well as epfile) may succeed (at this point epfiles are
- * unlinked and all closed so this is not a problem; ep0 is
- * also closed but ep0 file exists and so open(2) on ep0 must
- * fail).
- */
- FFS_CLOSING
-};
-
-
-enum ffs_setup_state {
- /* There is no setup request pending. */
- FFS_NO_SETUP,
- /*
- * User has read events and there was a setup request event
- * there. The next read/write on ep0 will handle the
- * request.
- */
- FFS_SETUP_PENDING,
- /*
- * There was event pending but before user space handled it
- * some other event was introduced which canceled existing
- * setup. If this state is set read/write on ep0 return
- * -EIDRM. This state is only set when adding event.
- */
- FFS_SETUP_CANCELED
-};
-
-
-
-struct ffs_epfile;
-struct ffs_function;
-
-struct ffs_data {
- struct usb_gadget *gadget;
-
- /*
- * Protect access read/write operations, only one read/write
- * at a time. As a consequence protects ep0req and company.
- * While setup request is being processed (queued) this is
- * held.
- */
- struct mutex mutex;
-
- /*
- * Protect access to endpoint related structures (basically
- * usb_ep_queue(), usb_ep_dequeue(), etc. calls) except for
- * endpoint zero.
- */
- spinlock_t eps_lock;
-
- /*
- * XXX REVISIT do we need our own request? Since we are not
- * handling setup requests immediately user space may be so
- * slow that another setup will be sent to the gadget but this
- * time not to us but another function and then there could be
- * a race. Is that the case? Or maybe we can use cdev->req
- * after all, maybe we just need some spinlock for that?
- */
- struct usb_request *ep0req; /* P: mutex */
- struct completion ep0req_completion; /* P: mutex */
- int ep0req_status; /* P: mutex */
-
- /* reference counter */
- atomic_t ref;
- /* how many files are opened (EP0 and others) */
- atomic_t opened;
-
- /* EP0 state */
- enum ffs_state state;
-
- /*
- * Possible transitions:
- * + FFS_NO_SETUP -> FFS_SETUP_PENDING -- P: ev.waitq.lock
- * happens only in ep0 read which is P: mutex
- * + FFS_SETUP_PENDING -> FFS_NO_SETUP -- P: ev.waitq.lock
- * happens only in ep0 i/o which is P: mutex
- * + FFS_SETUP_PENDING -> FFS_SETUP_CANCELED -- P: ev.waitq.lock
- * + FFS_SETUP_CANCELED -> FFS_NO_SETUP -- cmpxchg
- */
- enum ffs_setup_state setup_state;
-
-#define FFS_SETUP_STATE(ffs) \
- ((enum ffs_setup_state)cmpxchg(&(ffs)->setup_state, \
- FFS_SETUP_CANCELED, FFS_NO_SETUP))
-
- /* Events & such. */
- struct {
- u8 types[4];
- unsigned short count;
- /* XXX REVISIT need to update it in some places, or do we? */
- unsigned short can_stall;
- struct usb_ctrlrequest setup;
-
- wait_queue_head_t waitq;
- } ev; /* the whole structure, P: ev.waitq.lock */
-
- /* Flags */
- unsigned long flags;
-#define FFS_FL_CALL_CLOSED_CALLBACK 0
-#define FFS_FL_BOUND 1
-
- /* Active function */
- struct ffs_function *func;
-
- /*
- * Device name, write once when file system is mounted.
- * Intended for user to read if she wants.
- */
- const char *dev_name;
- /* Private data for our user (ie. gadget). Managed by user. */
- void *private_data;
-
- /* filled by __ffs_data_got_descs() */
- /*
- * Real descriptors are 16 bytes after raw_descs (so you need
- * to skip 16 bytes (ie. ffs->raw_descs + 16) to get to the
- * first full speed descriptor). raw_descs_length and
- * raw_fs_descs_length do not have those 16 bytes added.
- */
- const void *raw_descs;
- unsigned raw_descs_length;
- unsigned raw_fs_descs_length;
- unsigned fs_descs_count;
- unsigned hs_descs_count;
-
- unsigned short strings_count;
- unsigned short interfaces_count;
- unsigned short eps_count;
- unsigned short _pad1;
-
- /* filled by __ffs_data_got_strings() */
- /* ids in stringtabs are set in functionfs_bind() */
- const void *raw_strings;
- struct usb_gadget_strings **stringtabs;
-
- /*
- * File system's super block, write once when file system is
- * mounted.
- */
- struct super_block *sb;
-
- /* File permissions, written once when fs is mounted */
- struct ffs_file_perms {
- umode_t mode;
- kuid_t uid;
- kgid_t gid;
- } file_perms;
-
- /*
- * The endpoint files, filled by ffs_epfiles_create(),
- * destroyed by ffs_epfiles_destroy().
- */
- struct ffs_epfile *epfiles;
-};
+/* Variable Length Array Macros **********************************************/
+#define vla_group(groupname) size_t groupname##__next = 0
+#define vla_group_size(groupname) groupname##__next
+
+#define vla_item(groupname, type, name, n) \
+ size_t groupname##_##name##__offset = ({ \
+ size_t align_mask = __alignof__(type) - 1; \
+ size_t offset = (groupname##__next + align_mask) & ~align_mask;\
+ size_t size = (n) * sizeof(type); \
+ groupname##__next = offset + size; \
+ offset; \
+ })
+
+#define vla_item_with_sz(groupname, type, name, n) \
+ size_t groupname##_##name##__sz = (n) * sizeof(type); \
+ size_t groupname##_##name##__offset = ({ \
+ size_t align_mask = __alignof__(type) - 1; \
+ size_t offset = (groupname##__next + align_mask) & ~align_mask;\
+ size_t size = groupname##_##name##__sz; \
+ groupname##__next = offset + size; \
+ offset; \
+ })
+
+#define vla_ptr(ptr, groupname, name) \
+ ((void *) ((char *)ptr + groupname##_##name##__offset))
/* Reference counter handling */
static void ffs_data_get(struct ffs_data *ffs);
@@ -274,15 +98,12 @@ static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
return container_of(f, struct ffs_function, function);
}
-static void ffs_func_free(struct ffs_function *func);
static void ffs_func_eps_disable(struct ffs_function *func);
static int __must_check ffs_func_eps_enable(struct ffs_function *func);
static int ffs_func_bind(struct usb_configuration *,
struct usb_function *);
-static void ffs_func_unbind(struct usb_configuration *,
- struct usb_function *);
static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
static void ffs_func_disable(struct usb_function *);
static int ffs_func_setup(struct usb_function *,
@@ -335,6 +156,17 @@ ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
const struct file_operations *fops,
struct dentry **dentry_p);
+/* Devices management *******************************************************/
+
+DEFINE_MUTEX(ffs_lock);
+EXPORT_SYMBOL(ffs_lock);
+
+static struct ffs_dev *ffs_find_dev(const char *name);
+static int _ffs_name_dev(struct ffs_dev *dev, const char *name);
+static void *ffs_acquire_dev(const char *dev_name);
+static void ffs_release_dev(struct ffs_data *ffs_data);
+static int ffs_ready(struct ffs_data *ffs);
+static void ffs_closed(struct ffs_data *ffs);
/* Misc helper functions ****************************************************/
@@ -460,7 +292,7 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
ffs->state = FFS_ACTIVE;
mutex_unlock(&ffs->mutex);
- ret = functionfs_ready_callback(ffs);
+ ret = ffs_ready(ffs);
if (unlikely(ret < 0)) {
ffs->state = FFS_CLOSING;
return ret;
@@ -753,78 +585,71 @@ static ssize_t ffs_epfile_io(struct file *file,
char __user *buf, size_t len, int read)
{
struct ffs_epfile *epfile = file->private_data;
+ struct usb_gadget *gadget = epfile->ffs->gadget;
struct ffs_ep *ep;
char *data = NULL;
- ssize_t ret;
+ ssize_t ret, data_len;
int halt;
- goto first_try;
- do {
- spin_unlock_irq(&epfile->ffs->eps_lock);
- mutex_unlock(&epfile->mutex);
+ /* Are we still active? */
+ if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
+ ret = -ENODEV;
+ goto error;
+ }
-first_try:
- /* Are we still active? */
- if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
- ret = -ENODEV;
+ /* Wait for endpoint to be enabled */
+ ep = epfile->ep;
+ if (!ep) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
goto error;
}
- /* Wait for endpoint to be enabled */
- ep = epfile->ep;
- if (!ep) {
- if (file->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- goto error;
- }
-
- if (wait_event_interruptible(epfile->wait,
- (ep = epfile->ep))) {
- ret = -EINTR;
- goto error;
- }
- }
-
- /* Do we halt? */
- halt = !read == !epfile->in;
- if (halt && epfile->isoc) {
- ret = -EINVAL;
+ ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep));
+ if (ret) {
+ ret = -EINTR;
goto error;
}
+ }
- /* Allocate & copy */
- if (!halt && !data) {
- data = kzalloc(len, GFP_KERNEL);
- if (unlikely(!data))
- return -ENOMEM;
+ /* Do we halt? */
+ halt = !read == !epfile->in;
+ if (halt && epfile->isoc) {
+ ret = -EINVAL;
+ goto error;
+ }
- if (!read &&
- unlikely(__copy_from_user(data, buf, len))) {
- ret = -EFAULT;
- goto error;
- }
- }
+ /* Allocate & copy */
+ if (!halt) {
+ /*
+ * Controller may require buffer size to be aligned to
+ * maxpacketsize of an out endpoint.
+ */
+ data_len = read ? usb_ep_align_maybe(gadget, ep->ep, len) : len;
+
+ data = kmalloc(data_len, GFP_KERNEL);
+ if (unlikely(!data))
+ return -ENOMEM;
- /* We will be using request */
- ret = ffs_mutex_lock(&epfile->mutex,
- file->f_flags & O_NONBLOCK);
- if (unlikely(ret))
+ if (!read && unlikely(copy_from_user(data, buf, len))) {
+ ret = -EFAULT;
goto error;
+ }
+ }
- /*
- * We're called from user space, we can use _irq rather then
- * _irqsave
- */
- spin_lock_irq(&epfile->ffs->eps_lock);
+ /* We will be using request */
+ ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
+ if (unlikely(ret))
+ goto error;
- /*
- * While we were acquiring mutex endpoint got disabled
- * or changed?
- */
- } while (unlikely(epfile->ep != ep));
+ spin_lock_irq(&epfile->ffs->eps_lock);
- /* Halt */
- if (unlikely(halt)) {
+ if (epfile->ep != ep) {
+ /* In the meantime, endpoint got disabled or changed. */
+ ret = -ESHUTDOWN;
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ } else if (halt) {
+ /* Halt */
if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
usb_ep_set_halt(ep->ep);
spin_unlock_irq(&epfile->ffs->eps_lock);
@@ -837,7 +662,7 @@ first_try:
req->context = &done;
req->complete = ffs_epfile_io_complete;
req->buf = data;
- req->length = len;
+ req->length = data_len;
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
@@ -849,9 +674,17 @@ first_try:
ret = -EINTR;
usb_ep_dequeue(ep->ep, req);
} else {
+ /*
+ * XXX We may end up silently droping data here.
+ * Since data_len (i.e. req->length) may be bigger
+ * than len (after being rounded up to maxpacketsize),
+ * we may end up with more data then user space has
+ * space for.
+ */
ret = ep->status;
if (read && ret > 0 &&
- unlikely(copy_to_user(buf, data, ret)))
+ unlikely(copy_to_user(buf, data,
+ min_t(size_t, ret, len))))
ret = -EFAULT;
}
}
@@ -1191,7 +1024,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
return ERR_PTR(-ENOMEM);
}
- ffs_dev = functionfs_acquire_dev_callback(dev_name);
+ ffs_dev = ffs_acquire_dev(dev_name);
if (IS_ERR(ffs_dev)) {
ffs_data_put(ffs);
return ERR_CAST(ffs_dev);
@@ -1201,7 +1034,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
rv = mount_nodev(t, flags, &data, ffs_sb_fill);
if (IS_ERR(rv) && data.ffs_data) {
- functionfs_release_dev_callback(data.ffs_data);
+ ffs_release_dev(data.ffs_data);
ffs_data_put(data.ffs_data);
}
return rv;
@@ -1214,7 +1047,7 @@ ffs_fs_kill_sb(struct super_block *sb)
kill_litter_super(sb);
if (sb->s_fs_info) {
- functionfs_release_dev_callback(sb->s_fs_info);
+ ffs_release_dev(sb->s_fs_info);
ffs_data_put(sb->s_fs_info);
}
}
@@ -1327,7 +1160,7 @@ static void ffs_data_clear(struct ffs_data *ffs)
ENTER();
if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags))
- functionfs_closed_callback(ffs);
+ ffs_closed(ffs);
BUG_ON(ffs->gadget);
@@ -1463,71 +1296,6 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
kfree(epfiles);
}
-static int functionfs_bind_config(struct usb_composite_dev *cdev,
- struct usb_configuration *c,
- struct ffs_data *ffs)
-{
- struct ffs_function *func;
- int ret;
-
- ENTER();
-
- func = kzalloc(sizeof *func, GFP_KERNEL);
- if (unlikely(!func))
- return -ENOMEM;
-
- func->function.name = "Function FS Gadget";
- func->function.strings = ffs->stringtabs;
-
- func->function.bind = ffs_func_bind;
- func->function.unbind = ffs_func_unbind;
- func->function.set_alt = ffs_func_set_alt;
- func->function.disable = ffs_func_disable;
- func->function.setup = ffs_func_setup;
- func->function.suspend = ffs_func_suspend;
- func->function.resume = ffs_func_resume;
-
- func->conf = c;
- func->gadget = cdev->gadget;
- func->ffs = ffs;
- ffs_data_get(ffs);
-
- ret = usb_add_function(c, &func->function);
- if (unlikely(ret))
- ffs_func_free(func);
-
- return ret;
-}
-
-static void ffs_func_free(struct ffs_function *func)
-{
- struct ffs_ep *ep = func->eps;
- unsigned count = func->ffs->eps_count;
- unsigned long flags;
-
- ENTER();
-
- /* cleanup after autoconfig */
- spin_lock_irqsave(&func->ffs->eps_lock, flags);
- do {
- if (ep->ep && ep->req)
- usb_ep_free_request(ep->ep, ep->req);
- ep->req = NULL;
- ++ep;
- } while (--count);
- spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
-
- ffs_data_put(func->ffs);
-
- kfree(func->eps);
- /*
- * eps and interfaces_nums are allocated in the same chunk so
- * only one free is required. Descriptors are also allocated
- * in the same chunk.
- */
-
- kfree(func);
-}
static void ffs_func_eps_disable(struct ffs_function *func)
{
@@ -1901,30 +1669,34 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
/* Allocate everything in one chunk so there's less maintenance. */
{
- struct {
- struct usb_gadget_strings *stringtabs[lang_count + 1];
- struct usb_gadget_strings stringtab[lang_count];
- struct usb_string strings[lang_count*(needed_count+1)];
- } *d;
unsigned i = 0;
+ vla_group(d);
+ vla_item(d, struct usb_gadget_strings *, stringtabs,
+ lang_count + 1);
+ vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
+ vla_item(d, struct usb_string, strings,
+ lang_count*(needed_count+1));
+
+ char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
- d = kmalloc(sizeof *d, GFP_KERNEL);
- if (unlikely(!d)) {
+ if (unlikely(!vlabuf)) {
kfree(_data);
return -ENOMEM;
}
- stringtabs = d->stringtabs;
- t = d->stringtab;
+ /* Initialize the VLA pointers */
+ stringtabs = vla_ptr(vlabuf, d, stringtabs);
+ t = vla_ptr(vlabuf, d, stringtab);
i = lang_count;
do {
*stringtabs++ = t++;
} while (--i);
*stringtabs = NULL;
- stringtabs = d->stringtabs;
- t = d->stringtab;
- s = d->strings;
+ /* stringtabs = vlabuf = d_stringtabs for later kfree */
+ stringtabs = vla_ptr(vlabuf, d, stringtabs);
+ t = vla_ptr(vlabuf, d, stringtab);
+ s = vla_ptr(vlabuf, d, strings);
strings = s;
}
@@ -2187,8 +1959,57 @@ static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
return 0;
}
-static int ffs_func_bind(struct usb_configuration *c,
- struct usb_function *f)
+static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
+ struct usb_configuration *c)
+{
+ struct ffs_function *func = ffs_func_from_usb(f);
+ struct f_fs_opts *ffs_opts =
+ container_of(f->fi, struct f_fs_opts, func_inst);
+ int ret;
+
+ ENTER();
+
+ /*
+ * Legacy gadget triggers binding in functionfs_ready_callback,
+ * which already uses locking; taking the same lock here would
+ * cause a deadlock.
+ *
+ * Configfs-enabled gadgets however do need ffs_dev_lock.
+ */
+ if (!ffs_opts->no_configfs)
+ ffs_dev_lock();
+ ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
+ func->ffs = ffs_opts->dev->ffs_data;
+ if (!ffs_opts->no_configfs)
+ ffs_dev_unlock();
+ if (ret)
+ return ERR_PTR(ret);
+
+ func->conf = c;
+ func->gadget = c->cdev->gadget;
+
+ ffs_data_get(func->ffs);
+
+ /*
+ * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
+ * configurations are bound in sequence with list_for_each_entry,
+ * in each configuration its functions are bound in sequence
+ * with list_for_each_entry, so we assume no race condition
+ * with regard to ffs_opts->bound access
+ */
+ if (!ffs_opts->refcnt) {
+ ret = functionfs_bind(func->ffs, c->cdev);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ ffs_opts->refcnt++;
+ func->function.strings = func->ffs->stringtabs;
+
+ return ffs_opts;
+}
+
+static int _ffs_func_bind(struct usb_configuration *c,
+ struct usb_function *f)
{
struct ffs_function *func = ffs_func_from_usb(f);
struct ffs_data *ffs = func->ffs;
@@ -2200,16 +2021,16 @@ static int ffs_func_bind(struct usb_configuration *c,
int ret;
/* Make it a single chunk, less management later on */
- struct {
- struct ffs_ep eps[ffs->eps_count];
- struct usb_descriptor_header
- *fs_descs[full ? ffs->fs_descs_count + 1 : 0];
- struct usb_descriptor_header
- *hs_descs[high ? ffs->hs_descs_count + 1 : 0];
- short inums[ffs->interfaces_count];
- char raw_descs[high ? ffs->raw_descs_length
- : ffs->raw_fs_descs_length];
- } *data;
+ vla_group(d);
+ vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count);
+ vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs,
+ full ? ffs->fs_descs_count + 1 : 0);
+ vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
+ high ? ffs->hs_descs_count + 1 : 0);
+ vla_item_with_sz(d, short, inums, ffs->interfaces_count);
+ vla_item_with_sz(d, char, raw_descs,
+ high ? ffs->raw_descs_length : ffs->raw_fs_descs_length);
+ char *vlabuf;
ENTER();
@@ -2217,21 +2038,28 @@ static int ffs_func_bind(struct usb_configuration *c,
if (unlikely(!(full | high)))
return -ENOTSUPP;
- /* Allocate */
- data = kmalloc(sizeof *data, GFP_KERNEL);
- if (unlikely(!data))
+ /* Allocate a single chunk, less management later on */
+ vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
+ if (unlikely(!vlabuf))
return -ENOMEM;
/* Zero */
- memset(data->eps, 0, sizeof data->eps);
- memcpy(data->raw_descs, ffs->raw_descs + 16, sizeof data->raw_descs);
- memset(data->inums, 0xff, sizeof data->inums);
- for (ret = ffs->eps_count; ret; --ret)
- data->eps[ret].num = -1;
+ memset(vla_ptr(vlabuf, d, eps), 0, d_eps__sz);
+ memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs + 16,
+ d_raw_descs__sz);
+ memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
+ for (ret = ffs->eps_count; ret; --ret) {
+ struct ffs_ep *ptr;
+
+ ptr = vla_ptr(vlabuf, d, eps);
+ ptr[ret].num = -1;
+ }
- /* Save pointers */
- func->eps = data->eps;
- func->interfaces_nums = data->inums;
+ /* Save pointers
+ * d_eps == vlabuf, func->eps used to kfree vlabuf later
+ */
+ func->eps = vla_ptr(vlabuf, d, eps);
+ func->interfaces_nums = vla_ptr(vlabuf, d, inums);
/*
* Go through all the endpoint descriptors and allocate
@@ -2239,10 +2067,10 @@ static int ffs_func_bind(struct usb_configuration *c,
* numbers without worrying that it may be described later on.
*/
if (likely(full)) {
- func->function.fs_descriptors = data->fs_descs;
+ func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
ret = ffs_do_descs(ffs->fs_descs_count,
- data->raw_descs,
- sizeof data->raw_descs,
+ vla_ptr(vlabuf, d, raw_descs),
+ d_raw_descs__sz,
__ffs_func_bind_do_descs, func);
if (unlikely(ret < 0))
goto error;
@@ -2251,10 +2079,10 @@ static int ffs_func_bind(struct usb_configuration *c,
}
if (likely(high)) {
- func->function.hs_descriptors = data->hs_descs;
+ func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
ret = ffs_do_descs(ffs->hs_descs_count,
- data->raw_descs + ret,
- (sizeof data->raw_descs) - ret,
+ vla_ptr(vlabuf, d, raw_descs) + ret,
+ d_raw_descs__sz - ret,
__ffs_func_bind_do_descs, func);
if (unlikely(ret < 0))
goto error;
@@ -2267,7 +2095,7 @@ static int ffs_func_bind(struct usb_configuration *c,
*/
ret = ffs_do_descs(ffs->fs_descs_count +
(high ? ffs->hs_descs_count : 0),
- data->raw_descs, sizeof data->raw_descs,
+ vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
__ffs_func_bind_do_nums, func);
if (unlikely(ret < 0))
goto error;
@@ -2281,26 +2109,19 @@ error:
return ret;
}
-
-/* Other USB function hooks *************************************************/
-
-static void ffs_func_unbind(struct usb_configuration *c,
- struct usb_function *f)
+static int ffs_func_bind(struct usb_configuration *c,
+ struct usb_function *f)
{
- struct ffs_function *func = ffs_func_from_usb(f);
- struct ffs_data *ffs = func->ffs;
+ struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
- ENTER();
+ if (IS_ERR(ffs_opts))
+ return PTR_ERR(ffs_opts);
- if (ffs->func == func) {
- ffs_func_eps_disable(func);
- ffs->func = NULL;
- }
+ return _ffs_func_bind(c, f);
+}
- ffs_event_add(ffs, FUNCTIONFS_UNBIND);
- ffs_func_free(func);
-}
+/* Other USB function hooks *************************************************/
static int ffs_func_set_alt(struct usb_function *f,
unsigned interface, unsigned alt)
@@ -2428,6 +2249,411 @@ static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
}
+/* Devices management *******************************************************/
+
+static LIST_HEAD(ffs_devices);
+
+static struct ffs_dev *_ffs_find_dev(const char *name)
+{
+ struct ffs_dev *dev;
+
+ list_for_each_entry(dev, &ffs_devices, entry) {
+ if (!dev->name || !name)
+ continue;
+ if (strcmp(dev->name, name) == 0)
+ return dev;
+ }
+
+ return NULL;
+}
+
+/*
+ * ffs_lock must be taken by the caller of this function
+ */
+static struct ffs_dev *ffs_get_single_dev(void)
+{
+ struct ffs_dev *dev;
+
+ if (list_is_singular(&ffs_devices)) {
+ dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
+ if (dev->single)
+ return dev;
+ }
+
+ return NULL;
+}
+
+/*
+ * ffs_lock must be taken by the caller of this function
+ */
+static struct ffs_dev *ffs_find_dev(const char *name)
+{
+ struct ffs_dev *dev;
+
+ dev = ffs_get_single_dev();
+ if (dev)
+ return dev;
+
+ return _ffs_find_dev(name);
+}
+
+/* Configfs support *********************************************************/
+
+static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_fs_opts,
+ func_inst.group);
+}
+
+static void ffs_attr_release(struct config_item *item)
+{
+ struct f_fs_opts *opts = to_ffs_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations ffs_item_ops = {
+ .release = ffs_attr_release,
+};
+
+static struct config_item_type ffs_func_type = {
+ .ct_item_ops = &ffs_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+
+/* Function registration interface ******************************************/
+
+static void ffs_free_inst(struct usb_function_instance *f)
+{
+ struct f_fs_opts *opts;
+
+ opts = to_f_fs_opts(f);
+ ffs_dev_lock();
+ ffs_free_dev(opts->dev);
+ ffs_dev_unlock();
+ kfree(opts);
+}
+
+#define MAX_INST_NAME_LEN 40
+
+static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
+{
+ struct f_fs_opts *opts;
+ char *ptr;
+ const char *tmp;
+ int name_len, ret;
+
+ name_len = strlen(name) + 1;
+ if (name_len > MAX_INST_NAME_LEN)
+ return -ENAMETOOLONG;
+
+ ptr = kstrndup(name, name_len, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ opts = to_f_fs_opts(fi);
+ tmp = NULL;
+
+ ffs_dev_lock();
+
+ tmp = opts->dev->name_allocated ? opts->dev->name : NULL;
+ ret = _ffs_name_dev(opts->dev, ptr);
+ if (ret) {
+ kfree(ptr);
+ ffs_dev_unlock();
+ return ret;
+ }
+ opts->dev->name_allocated = true;
+
+ ffs_dev_unlock();
+
+ kfree(tmp);
+
+ return 0;
+}
+
+static struct usb_function_instance *ffs_alloc_inst(void)
+{
+ struct f_fs_opts *opts;
+ struct ffs_dev *dev;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ opts->func_inst.set_inst_name = ffs_set_inst_name;
+ opts->func_inst.free_func_inst = ffs_free_inst;
+ ffs_dev_lock();
+ dev = ffs_alloc_dev();
+ ffs_dev_unlock();
+ if (IS_ERR(dev)) {
+ kfree(opts);
+ return ERR_CAST(dev);
+ }
+ opts->dev = dev;
+ dev->opts = opts;
+
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &ffs_func_type);
+ return &opts->func_inst;
+}
+
+static void ffs_free(struct usb_function *f)
+{
+ kfree(ffs_func_from_usb(f));
+}
+
+static void ffs_func_unbind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct ffs_function *func = ffs_func_from_usb(f);
+ struct ffs_data *ffs = func->ffs;
+ struct f_fs_opts *opts =
+ container_of(f->fi, struct f_fs_opts, func_inst);
+ struct ffs_ep *ep = func->eps;
+ unsigned count = ffs->eps_count;
+ unsigned long flags;
+
+ ENTER();
+ if (ffs->func == func) {
+ ffs_func_eps_disable(func);
+ ffs->func = NULL;
+ }
+
+ if (!--opts->refcnt)
+ functionfs_unbind(ffs);
+
+ /* cleanup after autoconfig */
+ spin_lock_irqsave(&func->ffs->eps_lock, flags);
+ do {
+ if (ep->ep && ep->req)
+ usb_ep_free_request(ep->ep, ep->req);
+ ep->req = NULL;
+ ++ep;
+ } while (--count);
+ spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+ kfree(func->eps);
+ func->eps = NULL;
+ /*
+ * eps, descriptors and interfaces_nums are allocated in the
+ * same chunk so only one free is required.
+ */
+ func->function.fs_descriptors = NULL;
+ func->function.hs_descriptors = NULL;
+ func->interfaces_nums = NULL;
+
+ ffs_event_add(ffs, FUNCTIONFS_UNBIND);
+}
+
+static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
+{
+ struct ffs_function *func;
+
+ ENTER();
+
+ func = kzalloc(sizeof(*func), GFP_KERNEL);
+ if (unlikely(!func))
+ return ERR_PTR(-ENOMEM);
+
+ func->function.name = "Function FS Gadget";
+
+ func->function.bind = ffs_func_bind;
+ func->function.unbind = ffs_func_unbind;
+ func->function.set_alt = ffs_func_set_alt;
+ func->function.disable = ffs_func_disable;
+ func->function.setup = ffs_func_setup;
+ func->function.suspend = ffs_func_suspend;
+ func->function.resume = ffs_func_resume;
+ func->function.free_func = ffs_free;
+
+ return &func->function;
+}
+
+/*
+ * ffs_lock must be taken by the caller of this function
+ */
+struct ffs_dev *ffs_alloc_dev(void)
+{
+ struct ffs_dev *dev;
+ int ret;
+
+ if (ffs_get_single_dev())
+ return ERR_PTR(-EBUSY);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (list_empty(&ffs_devices)) {
+ ret = functionfs_init();
+ if (ret) {
+ kfree(dev);
+ return ERR_PTR(ret);
+ }
+ }
+
+ list_add(&dev->entry, &ffs_devices);
+
+ return dev;
+}
+
+/*
+ * ffs_lock must be taken by the caller of this function
+ * The caller is responsible for "name" being available whenever f_fs needs it
+ */
+static int _ffs_name_dev(struct ffs_dev *dev, const char *name)
+{
+ struct ffs_dev *existing;
+
+ existing = _ffs_find_dev(name);
+ if (existing)
+ return -EBUSY;
+
+ dev->name = name;
+
+ return 0;
+}
+
+/*
+ * The caller is responsible for "name" being available whenever f_fs needs it
+ */
+int ffs_name_dev(struct ffs_dev *dev, const char *name)
+{
+ int ret;
+
+ ffs_dev_lock();
+ ret = _ffs_name_dev(dev, name);
+ ffs_dev_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(ffs_name_dev);
+
+int ffs_single_dev(struct ffs_dev *dev)
+{
+ int ret;
+
+ ret = 0;
+ ffs_dev_lock();
+
+ if (!list_is_singular(&ffs_devices))
+ ret = -EBUSY;
+ else
+ dev->single = true;
+
+ ffs_dev_unlock();
+ return ret;
+}
+EXPORT_SYMBOL(ffs_single_dev);
+
+/*
+ * ffs_lock must be taken by the caller of this function
+ */
+void ffs_free_dev(struct ffs_dev *dev)
+{
+ list_del(&dev->entry);
+ if (dev->name_allocated)
+ kfree(dev->name);
+ kfree(dev);
+ if (list_empty(&ffs_devices))
+ functionfs_cleanup();
+}
+
+static void *ffs_acquire_dev(const char *dev_name)
+{
+ struct ffs_dev *ffs_dev;
+
+ ENTER();
+ ffs_dev_lock();
+
+ ffs_dev = ffs_find_dev(dev_name);
+ if (!ffs_dev)
+ ffs_dev = ERR_PTR(-ENODEV);
+ else if (ffs_dev->mounted)
+ ffs_dev = ERR_PTR(-EBUSY);
+ else if (ffs_dev->ffs_acquire_dev_callback &&
+ ffs_dev->ffs_acquire_dev_callback(ffs_dev))
+ ffs_dev = ERR_PTR(-ENODEV);
+ else
+ ffs_dev->mounted = true;
+
+ ffs_dev_unlock();
+ return ffs_dev;
+}
+
+static void ffs_release_dev(struct ffs_data *ffs_data)
+{
+ struct ffs_dev *ffs_dev;
+
+ ENTER();
+ ffs_dev_lock();
+
+ ffs_dev = ffs_data->private_data;
+ if (ffs_dev)
+ ffs_dev->mounted = false;
+
+ if (ffs_dev->ffs_release_dev_callback)
+ ffs_dev->ffs_release_dev_callback(ffs_dev);
+
+ ffs_dev_unlock();
+}
+
+static int ffs_ready(struct ffs_data *ffs)
+{
+ struct ffs_dev *ffs_obj;
+ int ret = 0;
+
+ ENTER();
+ ffs_dev_lock();
+
+ ffs_obj = ffs->private_data;
+ if (!ffs_obj) {
+ ret = -EINVAL;
+ goto done;
+ }
+ if (WARN_ON(ffs_obj->desc_ready)) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ ffs_obj->desc_ready = true;
+ ffs_obj->ffs_data = ffs;
+
+ if (ffs_obj->ffs_ready_callback)
+ ret = ffs_obj->ffs_ready_callback(ffs);
+
+done:
+ ffs_dev_unlock();
+ return ret;
+}
+
+static void ffs_closed(struct ffs_data *ffs)
+{
+ struct ffs_dev *ffs_obj;
+
+ ENTER();
+ ffs_dev_lock();
+
+ ffs_obj = ffs->private_data;
+ if (!ffs_obj)
+ goto done;
+
+ ffs_obj->desc_ready = false;
+
+ if (ffs_obj->ffs_closed_callback)
+ ffs_obj->ffs_closed_callback(ffs);
+
+ if (!ffs_obj->opts || ffs_obj->opts->no_configfs
+ || !ffs_obj->opts->func_inst.group.cg_item.ci_parent)
+ goto done;
+
+ unregister_gadget_item(ffs_obj->opts->
+ func_inst.group.cg_item.ci_parent->ci_parent);
+done:
+ ffs_dev_unlock();
+}
+
/* Misc helper functions ****************************************************/
static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
@@ -2458,3 +2684,7 @@ static char *ffs_prepare_buffer(const char __user *buf, size_t len)
return data;
}
+
+DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michal Nazarewicz");
diff --git a/drivers/usb/gadget/f_hid.c b/drivers/usb/gadget/f_hid.c
index 6e69a8e8d22a..a95290a1289f 100644
--- a/drivers/usb/gadget/f_hid.c
+++ b/drivers/usb/gadget/f_hid.c
@@ -20,6 +20,8 @@
#include <linux/sched.h>
#include <linux/usb/g_hid.h>
+#include "u_f.h"
+
static int major, minors;
static struct class *hidg_class;
@@ -334,20 +336,10 @@ static int f_hidg_open(struct inode *inode, struct file *fd)
/*-------------------------------------------------------------------------*/
/* usb_function */
-static struct usb_request *hidg_alloc_ep_req(struct usb_ep *ep, unsigned length)
+static inline struct usb_request *hidg_alloc_ep_req(struct usb_ep *ep,
+ unsigned length)
{
- struct usb_request *req;
-
- req = usb_ep_alloc_request(ep, GFP_ATOMIC);
- if (req) {
- req->length = length;
- req->buf = kmalloc(length, GFP_ATOMIC);
- if (!req->buf) {
- usb_ep_free_request(ep, req);
- req = NULL;
- }
- }
- return req;
+ return alloc_ep_req(ep, length, length);
}
static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req)
diff --git a/drivers/usb/gadget/f_loopback.c b/drivers/usb/gadget/f_loopback.c
index 4a3873a0f2d0..4557cd03f0b1 100644
--- a/drivers/usb/gadget/f_loopback.c
+++ b/drivers/usb/gadget/f_loopback.c
@@ -20,6 +20,7 @@
#include <linux/usb/composite.h>
#include "g_zero.h"
+#include "u_f.h"
/*
* LOOPBACK FUNCTION ... a testing vehicle for USB peripherals,
@@ -119,7 +120,7 @@ static struct usb_endpoint_descriptor ss_loop_source_desc = {
.wMaxPacketSize = cpu_to_le16(1024),
};
-struct usb_ss_ep_comp_descriptor ss_loop_source_comp_desc = {
+static struct usb_ss_ep_comp_descriptor ss_loop_source_comp_desc = {
.bLength = USB_DT_SS_EP_COMP_SIZE,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.bMaxBurst = 0,
@@ -135,7 +136,7 @@ static struct usb_endpoint_descriptor ss_loop_sink_desc = {
.wMaxPacketSize = cpu_to_le16(1024),
};
-struct usb_ss_ep_comp_descriptor ss_loop_sink_comp_desc = {
+static struct usb_ss_ep_comp_descriptor ss_loop_sink_comp_desc = {
.bLength = USB_DT_SS_EP_COMP_SIZE,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
.bMaxBurst = 0,
@@ -230,6 +231,14 @@ autoconf_fail:
static void lb_free_func(struct usb_function *f)
{
+ struct f_lb_opts *opts;
+
+ opts = container_of(f->fi, struct f_lb_opts, func_inst);
+
+ mutex_lock(&opts->lock);
+ opts->refcnt--;
+ mutex_unlock(&opts->lock);
+
usb_free_all_descriptors(f);
kfree(func_to_loop(f));
}
@@ -293,6 +302,11 @@ static void disable_loopback(struct f_loopback *loop)
VDBG(cdev, "%s disabled\n", loop->function.name);
}
+static inline struct usb_request *lb_alloc_ep_req(struct usb_ep *ep, int len)
+{
+ return alloc_ep_req(ep, len, buflen);
+}
+
static int
enable_loopback(struct usb_composite_dev *cdev, struct f_loopback *loop)
{
@@ -332,7 +346,7 @@ fail0:
* than 'buflen' bytes each.
*/
for (i = 0; i < qlen && result == 0; i++) {
- req = alloc_ep_req(ep, 0);
+ req = lb_alloc_ep_req(ep, 0);
if (req) {
req->complete = loopback_complete;
result = usb_ep_queue(ep, req, GFP_ATOMIC);
@@ -380,6 +394,11 @@ static struct usb_function *loopback_alloc(struct usb_function_instance *fi)
return ERR_PTR(-ENOMEM);
lb_opts = container_of(fi, struct f_lb_opts, func_inst);
+
+ mutex_lock(&lb_opts->lock);
+ lb_opts->refcnt++;
+ mutex_unlock(&lb_opts->lock);
+
buflen = lb_opts->bulk_buflen;
qlen = lb_opts->qlen;
if (!qlen)
@@ -396,6 +415,118 @@ static struct usb_function *loopback_alloc(struct usb_function_instance *fi)
return &loop->function;
}
+static inline struct f_lb_opts *to_f_lb_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_lb_opts,
+ func_inst.group);
+}
+
+CONFIGFS_ATTR_STRUCT(f_lb_opts);
+CONFIGFS_ATTR_OPS(f_lb_opts);
+
+static void lb_attr_release(struct config_item *item)
+{
+ struct f_lb_opts *lb_opts = to_f_lb_opts(item);
+
+ usb_put_function_instance(&lb_opts->func_inst);
+}
+
+static struct configfs_item_operations lb_item_ops = {
+ .release = lb_attr_release,
+ .show_attribute = f_lb_opts_attr_show,
+ .store_attribute = f_lb_opts_attr_store,
+};
+
+static ssize_t f_lb_opts_qlen_show(struct f_lb_opts *opts, char *page)
+{
+ int result;
+
+ mutex_lock(&opts->lock);
+ result = sprintf(page, "%d", opts->qlen);
+ mutex_unlock(&opts->lock);
+
+ return result;
+}
+
+static ssize_t f_lb_opts_qlen_store(struct f_lb_opts *opts,
+ const char *page, size_t len)
+{
+ int ret;
+ u32 num;
+
+ mutex_lock(&opts->lock);
+ if (opts->refcnt) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ ret = kstrtou32(page, 0, &num);
+ if (ret)
+ goto end;
+
+ opts->qlen = num;
+ ret = len;
+end:
+ mutex_unlock(&opts->lock);
+ return ret;
+}
+
+static struct f_lb_opts_attribute f_lb_opts_qlen =
+ __CONFIGFS_ATTR(qlen, S_IRUGO | S_IWUSR,
+ f_lb_opts_qlen_show,
+ f_lb_opts_qlen_store);
+
+static ssize_t f_lb_opts_bulk_buflen_show(struct f_lb_opts *opts, char *page)
+{
+ int result;
+
+ mutex_lock(&opts->lock);
+ result = sprintf(page, "%d", opts->bulk_buflen);
+ mutex_unlock(&opts->lock);
+
+ return result;
+}
+
+static ssize_t f_lb_opts_bulk_buflen_store(struct f_lb_opts *opts,
+ const char *page, size_t len)
+{
+ int ret;
+ u32 num;
+
+ mutex_lock(&opts->lock);
+ if (opts->refcnt) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ ret = kstrtou32(page, 0, &num);
+ if (ret)
+ goto end;
+
+ opts->bulk_buflen = num;
+ ret = len;
+end:
+ mutex_unlock(&opts->lock);
+ return ret;
+}
+
+static struct f_lb_opts_attribute f_lb_opts_bulk_buflen =
+ __CONFIGFS_ATTR(buflen, S_IRUGO | S_IWUSR,
+ f_lb_opts_bulk_buflen_show,
+ f_lb_opts_bulk_buflen_store);
+
+static struct configfs_attribute *lb_attrs[] = {
+ &f_lb_opts_qlen.attr,
+ &f_lb_opts_bulk_buflen.attr,
+ NULL,
+};
+
+static struct config_item_type lb_func_type = {
+ .ct_item_ops = &lb_item_ops,
+ .ct_attrs = lb_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
static void lb_free_instance(struct usb_function_instance *fi)
{
struct f_lb_opts *lb_opts;
@@ -411,7 +542,14 @@ static struct usb_function_instance *loopback_alloc_instance(void)
lb_opts = kzalloc(sizeof(*lb_opts), GFP_KERNEL);
if (!lb_opts)
return ERR_PTR(-ENOMEM);
+ mutex_init(&lb_opts->lock);
lb_opts->func_inst.free_func_inst = lb_free_instance;
+ lb_opts->bulk_buflen = GZERO_BULK_BUFLEN;
+ lb_opts->qlen = GZERO_QLEN;
+
+ config_group_init_type_name(&lb_opts->func_inst.group, "",
+ &lb_func_type);
+
return &lb_opts->func_inst;
}
DECLARE_USB_FUNCTION(Loopback, loopback_alloc_instance, loopback_alloc);
diff --git a/drivers/usb/gadget/f_midi.c b/drivers/usb/gadget/f_midi.c
index 263e721c2694..36d4bb23087f 100644
--- a/drivers/usb/gadget/f_midi.c
+++ b/drivers/usb/gadget/f_midi.c
@@ -32,6 +32,8 @@
#include <linux/usb/audio.h>
#include <linux/usb/midi.h>
+#include "u_f.h"
+
MODULE_AUTHOR("Ben Williamson");
MODULE_LICENSE("GPL v2");
@@ -191,20 +193,10 @@ static struct usb_gadget_strings *midi_strings[] = {
NULL,
};
-static struct usb_request *alloc_ep_req(struct usb_ep *ep, unsigned length)
+static inline struct usb_request *midi_alloc_ep_req(struct usb_ep *ep,
+ unsigned length)
{
- struct usb_request *req;
-
- req = usb_ep_alloc_request(ep, GFP_ATOMIC);
- if (req) {
- req->length = length;
- req->buf = kmalloc(length, GFP_ATOMIC);
- if (!req->buf) {
- usb_ep_free_request(ep, req);
- req = NULL;
- }
- }
- return req;
+ return alloc_ep_req(ep, length, length);
}
static void free_ep_req(struct usb_ep *ep, struct usb_request *req)
@@ -365,7 +357,7 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
/* allocate a bunch of read buffers and queue them all at once. */
for (i = 0; i < midi->qlen && err == 0; i++) {
struct usb_request *req =
- alloc_ep_req(midi->out_ep, midi->buflen);
+ midi_alloc_ep_req(midi->out_ep, midi->buflen);
if (req == NULL)
return -ENOMEM;
@@ -546,7 +538,7 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req)
return;
if (!req)
- req = alloc_ep_req(ep, midi->buflen);
+ req = midi_alloc_ep_req(ep, midi->buflen);
if (!req) {
ERROR(midi, "gmidi_transmit: alloc_ep_request failed\n");
diff --git a/drivers/usb/gadget/f_ncm.c b/drivers/usb/gadget/f_ncm.c
index 1c28fe13328a..a9499fd30792 100644
--- a/drivers/usb/gadget/f_ncm.c
+++ b/drivers/usb/gadget/f_ncm.c
@@ -1386,7 +1386,7 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
usb_ep_free_request(ncm->notify, ncm->notify_req);
}
-struct usb_function *ncm_alloc(struct usb_function_instance *fi)
+static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
{
struct f_ncm *ncm;
struct f_ncm_opts *opts;
diff --git a/drivers/usb/gadget/f_obex.c b/drivers/usb/gadget/f_obex.c
index ad39f1dacba3..aebae1853bce 100644
--- a/drivers/usb/gadget/f_obex.c
+++ b/drivers/usb/gadget/f_obex.c
@@ -499,7 +499,7 @@ static void obex_unbind(struct usb_configuration *c, struct usb_function *f)
usb_free_all_descriptors(f);
}
-struct usb_function *obex_alloc(struct usb_function_instance *fi)
+static struct usb_function *obex_alloc(struct usb_function_instance *fi)
{
struct f_obex *obex;
struct f_serial_opts *opts;
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index eb3aa817a662..f2b781773eed 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -689,7 +689,7 @@ static void pn_unbind(struct usb_configuration *c, struct usb_function *f)
usb_free_all_descriptors(f);
}
-struct usb_function *phonet_alloc(struct usb_function_instance *fi)
+static struct usb_function *phonet_alloc(struct usb_function_instance *fi)
{
struct f_phonet *fp;
struct f_phonet_opts *opts;
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 717ed7f95639..c11761ce5113 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -675,7 +675,6 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
int status;
struct usb_ep *ep;
-#ifndef USB_FRNDIS_INCLUDED
struct f_rndis_opts *rndis_opts;
if (!can_support_rndis(c))
@@ -697,7 +696,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
return status;
rndis_opts->bound = true;
}
-#endif
+
us = usb_gstrings_attach(cdev, rndis_strings,
ARRAY_SIZE(rndis_string_defs));
if (IS_ERR(us))
@@ -782,13 +781,6 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
rndis->port.open = rndis_open;
rndis->port.close = rndis_close;
-#ifdef USB_FRNDIS_INCLUDED
- status = rndis_register(rndis_response_available, rndis);
- if (status < 0)
- goto fail;
- rndis->config = status;
-#endif
-
rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0);
rndis_set_host_mac(rndis->config, rndis->ethaddr);
@@ -830,66 +822,6 @@ fail:
return status;
}
-#ifdef USB_FRNDIS_INCLUDED
-
-static void
-rndis_old_unbind(struct usb_configuration *c, struct usb_function *f)
-{
- struct f_rndis *rndis = func_to_rndis(f);
-
- rndis_deregister(rndis->config);
-
- usb_free_all_descriptors(f);
-
- kfree(rndis->notify_req->buf);
- usb_ep_free_request(rndis->notify, rndis->notify_req);
-
- kfree(rndis);
-}
-
-int
-rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
- u32 vendorID, const char *manufacturer, struct eth_dev *dev)
-{
- struct f_rndis *rndis;
- int status;
-
- /* allocate and initialize one new instance */
- status = -ENOMEM;
- rndis = kzalloc(sizeof *rndis, GFP_KERNEL);
- if (!rndis)
- goto fail;
-
- memcpy(rndis->ethaddr, ethaddr, ETH_ALEN);
- rndis->vendorID = vendorID;
- rndis->manufacturer = manufacturer;
-
- rndis->port.ioport = dev;
- /* RNDIS activates when the host changes this filter */
- rndis->port.cdc_filter = 0;
-
- /* RNDIS has special (and complex) framing */
- rndis->port.header_len = sizeof(struct rndis_packet_msg_type);
- rndis->port.wrap = rndis_add_header;
- rndis->port.unwrap = rndis_rm_hdr;
-
- rndis->port.func.name = "rndis";
- /* descriptors are per-instance copies */
- rndis->port.func.bind = rndis_bind;
- rndis->port.func.unbind = rndis_old_unbind;
- rndis->port.func.set_alt = rndis_set_alt;
- rndis->port.func.setup = rndis_setup;
- rndis->port.func.disable = rndis_disable;
-
- status = usb_add_function(c, &rndis->port.func);
- if (status)
- kfree(rndis);
-fail:
- return status;
-}
-
-#else
-
void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net)
{
struct f_rndis_opts *opts;
@@ -1047,8 +979,26 @@ static struct usb_function *rndis_alloc(struct usb_function_instance *fi)
return &rndis->port.func;
}
-DECLARE_USB_FUNCTION_INIT(rndis, rndis_alloc_inst, rndis_alloc);
+DECLARE_USB_FUNCTION(rndis, rndis_alloc_inst, rndis_alloc);
+
+static int __init rndis_mod_init(void)
+{
+ int ret;
+
+ ret = rndis_init();
+ if (ret)
+ return ret;
+
+ return usb_function_register(&rndisusb_func);
+}
+module_init(rndis_mod_init);
+
+static void __exit rndis_mod_exit(void)
+{
+ usb_function_unregister(&rndisusb_func);
+ rndis_exit();
+}
+module_exit(rndis_mod_exit);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Brownell");
-
-#endif
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 981113c9924d..9ecbcbf36a45 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -354,7 +354,7 @@ static void gser_unbind(struct usb_configuration *c, struct usb_function *f)
usb_free_all_descriptors(f);
}
-struct usb_function *gser_alloc(struct usb_function_instance *fi)
+static struct usb_function *gser_alloc(struct usb_function_instance *fi)
{
struct f_gser *gser;
struct f_serial_opts *opts;
diff --git a/drivers/usb/gadget/f_sourcesink.c b/drivers/usb/gadget/f_sourcesink.c
index a8895859a221..d3cd52db78fe 100644
--- a/drivers/usb/gadget/f_sourcesink.c
+++ b/drivers/usb/gadget/f_sourcesink.c
@@ -21,6 +21,7 @@
#include "g_zero.h"
#include "gadget_chips.h"
+#include "u_f.h"
/*
* SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral
@@ -201,7 +202,7 @@ static struct usb_endpoint_descriptor ss_source_desc = {
.wMaxPacketSize = cpu_to_le16(1024),
};
-struct usb_ss_ep_comp_descriptor ss_source_comp_desc = {
+static struct usb_ss_ep_comp_descriptor ss_source_comp_desc = {
.bLength = USB_DT_SS_EP_COMP_SIZE,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
@@ -218,7 +219,7 @@ static struct usb_endpoint_descriptor ss_sink_desc = {
.wMaxPacketSize = cpu_to_le16(1024),
};
-struct usb_ss_ep_comp_descriptor ss_sink_comp_desc = {
+static struct usb_ss_ep_comp_descriptor ss_sink_comp_desc = {
.bLength = USB_DT_SS_EP_COMP_SIZE,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
@@ -236,7 +237,7 @@ static struct usb_endpoint_descriptor ss_iso_source_desc = {
.bInterval = 4,
};
-struct usb_ss_ep_comp_descriptor ss_iso_source_comp_desc = {
+static struct usb_ss_ep_comp_descriptor ss_iso_source_comp_desc = {
.bLength = USB_DT_SS_EP_COMP_SIZE,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
@@ -254,7 +255,7 @@ static struct usb_endpoint_descriptor ss_iso_sink_desc = {
.bInterval = 4,
};
-struct usb_ss_ep_comp_descriptor ss_iso_sink_comp_desc = {
+static struct usb_ss_ep_comp_descriptor ss_iso_sink_comp_desc = {
.bLength = USB_DT_SS_EP_COMP_SIZE,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
@@ -301,23 +302,9 @@ static struct usb_gadget_strings *sourcesink_strings[] = {
/*-------------------------------------------------------------------------*/
-struct usb_request *alloc_ep_req(struct usb_ep *ep, int len)
+static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len)
{
- struct usb_request *req;
-
- req = usb_ep_alloc_request(ep, GFP_ATOMIC);
- if (req) {
- if (len)
- req->length = len;
- else
- req->length = buflen;
- req->buf = kmalloc(req->length, GFP_ATOMIC);
- if (!req->buf) {
- usb_ep_free_request(ep, req);
- req = NULL;
- }
- }
- return req;
+ return alloc_ep_req(ep, len, buflen);
}
void free_ep_req(struct usb_ep *ep, struct usb_request *req)
@@ -490,6 +477,14 @@ no_iso:
static void
sourcesink_free_func(struct usb_function *f)
{
+ struct f_ss_opts *opts;
+
+ opts = container_of(f->fi, struct f_ss_opts, func_inst);
+
+ mutex_lock(&opts->lock);
+ opts->refcnt--;
+ mutex_unlock(&opts->lock);
+
usb_free_all_descriptors(f);
kfree(func_to_ss(f));
}
@@ -628,10 +623,10 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
break;
}
ep = is_in ? ss->iso_in_ep : ss->iso_out_ep;
- req = alloc_ep_req(ep, size);
+ req = ss_alloc_ep_req(ep, size);
} else {
ep = is_in ? ss->in_ep : ss->out_ep;
- req = alloc_ep_req(ep, 0);
+ req = ss_alloc_ep_req(ep, 0);
}
if (!req)
@@ -878,6 +873,11 @@ static struct usb_function *source_sink_alloc_func(
return NULL;
ss_opts = container_of(fi, struct f_ss_opts, func_inst);
+
+ mutex_lock(&ss_opts->lock);
+ ss_opts->refcnt++;
+ mutex_unlock(&ss_opts->lock);
+
pattern = ss_opts->pattern;
isoc_interval = ss_opts->isoc_interval;
isoc_maxpacket = ss_opts->isoc_maxpacket;
@@ -898,6 +898,303 @@ static struct usb_function *source_sink_alloc_func(
return &ss->function;
}
+static inline struct f_ss_opts *to_f_ss_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_ss_opts,
+ func_inst.group);
+}
+
+CONFIGFS_ATTR_STRUCT(f_ss_opts);
+CONFIGFS_ATTR_OPS(f_ss_opts);
+
+static void ss_attr_release(struct config_item *item)
+{
+ struct f_ss_opts *ss_opts = to_f_ss_opts(item);
+
+ usb_put_function_instance(&ss_opts->func_inst);
+}
+
+static struct configfs_item_operations ss_item_ops = {
+ .release = ss_attr_release,
+ .show_attribute = f_ss_opts_attr_show,
+ .store_attribute = f_ss_opts_attr_store,
+};
+
+static ssize_t f_ss_opts_pattern_show(struct f_ss_opts *opts, char *page)
+{
+ int result;
+
+ mutex_lock(&opts->lock);
+ result = sprintf(page, "%d", opts->pattern);
+ mutex_unlock(&opts->lock);
+
+ return result;
+}
+
+static ssize_t f_ss_opts_pattern_store(struct f_ss_opts *opts,
+ const char *page, size_t len)
+{
+ int ret;
+ u8 num;
+
+ mutex_lock(&opts->lock);
+ if (opts->refcnt) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ ret = kstrtou8(page, 0, &num);
+ if (ret)
+ goto end;
+
+ if (num != 0 && num != 1 && num != 2) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ opts->pattern = num;
+ ret = len;
+end:
+ mutex_unlock(&opts->lock);
+ return ret;
+}
+
+static struct f_ss_opts_attribute f_ss_opts_pattern =
+ __CONFIGFS_ATTR(pattern, S_IRUGO | S_IWUSR,
+ f_ss_opts_pattern_show,
+ f_ss_opts_pattern_store);
+
+static ssize_t f_ss_opts_isoc_interval_show(struct f_ss_opts *opts, char *page)
+{
+ int result;
+
+ mutex_lock(&opts->lock);
+ result = sprintf(page, "%d", opts->isoc_interval);
+ mutex_unlock(&opts->lock);
+
+ return result;
+}
+
+static ssize_t f_ss_opts_isoc_interval_store(struct f_ss_opts *opts,
+ const char *page, size_t len)
+{
+ int ret;
+ u8 num;
+
+ mutex_lock(&opts->lock);
+ if (opts->refcnt) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ ret = kstrtou8(page, 0, &num);
+ if (ret)
+ goto end;
+
+ if (num > 16) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ opts->isoc_interval = num;
+ ret = len;
+end:
+ mutex_unlock(&opts->lock);
+ return ret;
+}
+
+static struct f_ss_opts_attribute f_ss_opts_isoc_interval =
+ __CONFIGFS_ATTR(isoc_interval, S_IRUGO | S_IWUSR,
+ f_ss_opts_isoc_interval_show,
+ f_ss_opts_isoc_interval_store);
+
+static ssize_t f_ss_opts_isoc_maxpacket_show(struct f_ss_opts *opts, char *page)
+{
+ int result;
+
+ mutex_lock(&opts->lock);
+ result = sprintf(page, "%d", opts->isoc_maxpacket);
+ mutex_unlock(&opts->lock);
+
+ return result;
+}
+
+static ssize_t f_ss_opts_isoc_maxpacket_store(struct f_ss_opts *opts,
+ const char *page, size_t len)
+{
+ int ret;
+ u16 num;
+
+ mutex_lock(&opts->lock);
+ if (opts->refcnt) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ ret = kstrtou16(page, 0, &num);
+ if (ret)
+ goto end;
+
+ if (num > 1024) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ opts->isoc_maxpacket = num;
+ ret = len;
+end:
+ mutex_unlock(&opts->lock);
+ return ret;
+}
+
+static struct f_ss_opts_attribute f_ss_opts_isoc_maxpacket =
+ __CONFIGFS_ATTR(isoc_maxpacket, S_IRUGO | S_IWUSR,
+ f_ss_opts_isoc_maxpacket_show,
+ f_ss_opts_isoc_maxpacket_store);
+
+static ssize_t f_ss_opts_isoc_mult_show(struct f_ss_opts *opts, char *page)
+{
+ int result;
+
+ mutex_lock(&opts->lock);
+ result = sprintf(page, "%d", opts->isoc_mult);
+ mutex_unlock(&opts->lock);
+
+ return result;
+}
+
+static ssize_t f_ss_opts_isoc_mult_store(struct f_ss_opts *opts,
+ const char *page, size_t len)
+{
+ int ret;
+ u8 num;
+
+ mutex_lock(&opts->lock);
+ if (opts->refcnt) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ ret = kstrtou8(page, 0, &num);
+ if (ret)
+ goto end;
+
+ if (num > 2) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ opts->isoc_mult = num;
+ ret = len;
+end:
+ mutex_unlock(&opts->lock);
+ return ret;
+}
+
+static struct f_ss_opts_attribute f_ss_opts_isoc_mult =
+ __CONFIGFS_ATTR(isoc_mult, S_IRUGO | S_IWUSR,
+ f_ss_opts_isoc_mult_show,
+ f_ss_opts_isoc_mult_store);
+
+static ssize_t f_ss_opts_isoc_maxburst_show(struct f_ss_opts *opts, char *page)
+{
+ int result;
+
+ mutex_lock(&opts->lock);
+ result = sprintf(page, "%d", opts->isoc_maxburst);
+ mutex_unlock(&opts->lock);
+
+ return result;
+}
+
+static ssize_t f_ss_opts_isoc_maxburst_store(struct f_ss_opts *opts,
+ const char *page, size_t len)
+{
+ int ret;
+ u8 num;
+
+ mutex_lock(&opts->lock);
+ if (opts->refcnt) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ ret = kstrtou8(page, 0, &num);
+ if (ret)
+ goto end;
+
+ if (num > 15) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ opts->isoc_maxburst = num;
+ ret = len;
+end:
+ mutex_unlock(&opts->lock);
+ return ret;
+}
+
+static struct f_ss_opts_attribute f_ss_opts_isoc_maxburst =
+ __CONFIGFS_ATTR(isoc_maxburst, S_IRUGO | S_IWUSR,
+ f_ss_opts_isoc_maxburst_show,
+ f_ss_opts_isoc_maxburst_store);
+
+static ssize_t f_ss_opts_bulk_buflen_show(struct f_ss_opts *opts, char *page)
+{
+ int result;
+
+ mutex_lock(&opts->lock);
+ result = sprintf(page, "%d", opts->bulk_buflen);
+ mutex_unlock(&opts->lock);
+
+ return result;
+}
+
+static ssize_t f_ss_opts_bulk_buflen_store(struct f_ss_opts *opts,
+ const char *page, size_t len)
+{
+ int ret;
+ u32 num;
+
+ mutex_lock(&opts->lock);
+ if (opts->refcnt) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ ret = kstrtou32(page, 0, &num);
+ if (ret)
+ goto end;
+
+ opts->bulk_buflen = num;
+ ret = len;
+end:
+ mutex_unlock(&opts->lock);
+ return ret;
+}
+
+static struct f_ss_opts_attribute f_ss_opts_bulk_buflen =
+ __CONFIGFS_ATTR(buflen, S_IRUGO | S_IWUSR,
+ f_ss_opts_bulk_buflen_show,
+ f_ss_opts_bulk_buflen_store);
+
+static struct configfs_attribute *ss_attrs[] = {
+ &f_ss_opts_pattern.attr,
+ &f_ss_opts_isoc_interval.attr,
+ &f_ss_opts_isoc_maxpacket.attr,
+ &f_ss_opts_isoc_mult.attr,
+ &f_ss_opts_isoc_maxburst.attr,
+ &f_ss_opts_bulk_buflen.attr,
+ NULL,
+};
+
+static struct config_item_type ss_func_type = {
+ .ct_item_ops = &ss_item_ops,
+ .ct_attrs = ss_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
static void source_sink_free_instance(struct usb_function_instance *fi)
{
struct f_ss_opts *ss_opts;
@@ -913,7 +1210,15 @@ static struct usb_function_instance *source_sink_alloc_inst(void)
ss_opts = kzalloc(sizeof(*ss_opts), GFP_KERNEL);
if (!ss_opts)
return ERR_PTR(-ENOMEM);
+ mutex_init(&ss_opts->lock);
ss_opts->func_inst.free_func_inst = source_sink_free_instance;
+ ss_opts->isoc_interval = GZERO_ISOC_INTERVAL;
+ ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET;
+ ss_opts->bulk_buflen = GZERO_BULK_BUFLEN;
+
+ config_group_init_type_name(&ss_opts->func_inst.group, "",
+ &ss_func_type);
+
return &ss_opts->func_inst;
}
DECLARE_USB_FUNCTION(SourceSink, source_sink_alloc_inst,
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
index 7c8674fa7e80..f1a59190ac9a 100644
--- a/drivers/usb/gadget/f_subset.c
+++ b/drivers/usb/gadget/f_subset.c
@@ -301,7 +301,6 @@ geth_bind(struct usb_configuration *c, struct usb_function *f)
int status;
struct usb_ep *ep;
-#ifndef USB_FSUBSET_INCLUDED
struct f_gether_opts *gether_opts;
gether_opts = container_of(f->fi, struct f_gether_opts, func_inst);
@@ -322,7 +321,7 @@ geth_bind(struct usb_configuration *c, struct usb_function *f)
return status;
gether_opts->bound = true;
}
-#endif
+
us = usb_gstrings_attach(cdev, geth_strings,
ARRAY_SIZE(geth_string_defs));
if (IS_ERR(us))
@@ -393,61 +392,6 @@ fail:
return status;
}
-#ifdef USB_FSUBSET_INCLUDED
-
-static void
-geth_old_unbind(struct usb_configuration *c, struct usb_function *f)
-{
- geth_string_defs[0].id = 0;
- usb_free_all_descriptors(f);
- kfree(func_to_geth(f));
-}
-
-/**
- * geth_bind_config - add CDC Subset network link to a configuration
- * @c: the configuration to support the network link
- * @ethaddr: a buffer in which the ethernet address of the host side
- * side of the link was recorded
- * @dev: eth_dev structure
- * Context: single threaded during gadget setup
- *
- * Returns zero on success, else negative errno.
- *
- * Caller must have called @gether_setup(). Caller is also responsible
- * for calling @gether_cleanup() before module unload.
- */
-int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
- struct eth_dev *dev)
-{
- struct f_gether *geth;
- int status;
-
- /* allocate and initialize one new instance */
- geth = kzalloc(sizeof *geth, GFP_KERNEL);
- if (!geth)
- return -ENOMEM;
-
- /* export host's Ethernet address in CDC format */
- snprintf(geth->ethaddr, sizeof geth->ethaddr, "%pm", ethaddr);
- geth_string_defs[1].s = geth->ethaddr;
-
- geth->port.ioport = dev;
- geth->port.cdc_filter = DEFAULT_FILTER;
-
- geth->port.func.name = "cdc_subset";
- geth->port.func.bind = geth_bind;
- geth->port.func.unbind = geth_old_unbind;
- geth->port.func.set_alt = geth_set_alt;
- geth->port.func.disable = geth_disable;
-
- status = usb_add_function(c, &geth->port.func);
- if (status)
- kfree(geth);
- return status;
-}
-
-#else
-
static inline struct f_gether_opts *to_f_gether_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_gether_opts,
@@ -573,5 +517,3 @@ static struct usb_function *geth_alloc(struct usb_function_instance *fi)
DECLARE_USB_FUNCTION_INIT(geth, geth_alloc_inst, geth_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Brownell");
-
-#endif
diff --git a/drivers/usb/gadget/fotg210-udc.c b/drivers/usb/gadget/fotg210-udc.c
index bbbfd1948778..2d0305280e8c 100644
--- a/drivers/usb/gadget/fotg210-udc.c
+++ b/drivers/usb/gadget/fotg210-udc.c
@@ -1157,8 +1157,9 @@ static int fotg210_udc_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&ep->queue);
ep->ep.name = fotg210_ep_name[i];
ep->ep.ops = &fotg210_ep_ops;
+ usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
}
- fotg210->ep[0]->ep.maxpacket = 0x40;
+ usb_ep_set_maxpacket_limit(&fotg210->ep[0]->ep, 0x40);
fotg210->gadget.ep0 = &fotg210->ep[0]->ep;
INIT_LIST_HEAD(&fotg210->gadget.ep0->ep_list);
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 807127d56fa3..ad5483335167 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -2429,7 +2428,7 @@ static int qe_ep_config(struct qe_udc *udc, unsigned char pipe_num)
ep->ep.ops = &qe_ep_ops;
ep->stopped = 1;
- ep->ep.maxpacket = (unsigned short) ~0;
+ usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
ep->ep.desc = NULL;
ep->dir = 0xff;
ep->epnum = (u8)pipe_num;
@@ -2717,7 +2716,7 @@ MODULE_DEVICE_TABLE(of, qe_udc_match);
static struct platform_driver udc_driver = {
.driver = {
- .name = (char *)driver_name,
+ .name = driver_name,
.owner = THIS_MODULE,
.of_match_table = qe_udc_match,
},
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 36ac7cfba91d..15960af0f67e 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -2311,7 +2311,7 @@ static int __init struct_ep_setup(struct fsl_udc *udc, unsigned char index,
/* for ep0: maxP defined in desc
* for other eps, maxP is set by epautoconfig() called by gadget layer
*/
- ep->ep.maxpacket = (unsigned short) ~0;
+ usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
/* the queue lists any req for this ep */
INIT_LIST_HEAD(&ep->queue);
@@ -2469,7 +2469,8 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
* for other eps, gadget layer called ep_enable with defined desc
*/
udc_controller->eps[0].ep.desc = &fsl_ep0_desc;
- udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD;
+ usb_ep_set_maxpacket_limit(&udc_controller->eps[0].ep,
+ USB_MAX_CTRL_PAYLOAD);
/* setup the udc->eps[] for non-control endpoints and link
* to gadget.ep_list */
@@ -2666,7 +2667,7 @@ static struct platform_driver udc_driver = {
.suspend = fsl_udc_suspend,
.resume = fsl_udc_resume,
.driver = {
- .name = (char *)driver_name,
+ .name = driver_name,
.owner = THIS_MODULE,
/* udc suspend/resume called from OTG driver */
.suspend = fsl_udc_otg_suspend,
diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c
index b278abe52453..6423f1840ed9 100644
--- a/drivers/usb/gadget/fusb300_udc.c
+++ b/drivers/usb/gadget/fusb300_udc.c
@@ -1452,9 +1452,9 @@ static int __init fusb300_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&ep->queue);
ep->ep.name = fusb300_ep_name[i];
ep->ep.ops = &fusb300_ep_ops;
- ep->ep.maxpacket = HS_BULK_MAX_PACKET_SIZE;
+ usb_ep_set_maxpacket_limit(&ep->ep, HS_BULK_MAX_PACKET_SIZE);
}
- fusb300->ep[0]->ep.maxpacket = HS_CTL_MAX_PACKET_SIZE;
+ usb_ep_set_maxpacket_limit(&fusb300->ep[0]->ep, HS_CTL_MAX_PACKET_SIZE);
fusb300->ep[0]->epnum = 0;
fusb300->gadget.ep0 = &fusb300->ep[0]->ep;
INIT_LIST_HEAD(&fusb300->gadget.ep0->ep_list);
diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c
index 2344efe4f4ce..fe12e6a27448 100644
--- a/drivers/usb/gadget/g_ffs.c
+++ b/drivers/usb/gadget/g_ffs.c
@@ -13,14 +13,10 @@
#define pr_fmt(fmt) "g_ffs: " fmt
#include <linux/module.h>
-/*
- * kbuild is not very cooperative with respect to linking separately
- * compiled library objects into one module. So for now we won't use
- * separate compilation ... ensuring init/exit sections work to shrink
- * the runtime footprint, and giving us at least some parts of what
- * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
- */
+
#if defined CONFIG_USB_FUNCTIONFS_ETH || defined CONFIG_USB_FUNCTIONFS_RNDIS
+#include <linux/netdevice.h>
+
# if defined USB_ETH_RNDIS
# undef USB_ETH_RNDIS
# endif
@@ -28,31 +24,31 @@
# define USB_ETH_RNDIS y
# endif
-#define USBF_ECM_INCLUDED
-# include "f_ecm.c"
-#define USB_FSUBSET_INCLUDED
-# include "f_subset.c"
+# include "u_ecm.h"
+# include "u_gether.h"
# ifdef USB_ETH_RNDIS
-# define USB_FRNDIS_INCLUDED
-# include "f_rndis.c"
+# include "u_rndis.h"
# include "rndis.h"
# endif
# include "u_ether.h"
-static u8 gfs_host_mac[ETH_ALEN];
-static struct eth_dev *the_dev;
+USB_ETHERNET_MODULE_PARAMETERS();
+
# ifdef CONFIG_USB_FUNCTIONFS_ETH
-static int eth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
- struct eth_dev *dev);
+static int eth_bind_config(struct usb_configuration *c);
+static struct usb_function_instance *fi_ecm;
+static struct usb_function *f_ecm;
+static struct usb_function_instance *fi_geth;
+static struct usb_function *f_geth;
+# endif
+# ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+static int bind_rndis_config(struct usb_configuration *c);
+static struct usb_function_instance *fi_rndis;
+static struct usb_function *f_rndis;
# endif
-#else
-# define the_dev NULL
-# define gether_cleanup(dev) do { } while (0)
-# define gfs_host_mac NULL
-struct eth_dev;
#endif
-#include "f_fs.c"
+#include "u_fs.h"
#define DRIVER_NAME "g_ffs"
#define DRIVER_DESC "USB Function Filesystem"
@@ -67,19 +63,8 @@ MODULE_LICENSE("GPL");
#define GFS_MAX_DEVS 10
-struct gfs_ffs_obj {
- const char *name;
- bool mounted;
- bool desc_ready;
- struct ffs_data *ffs_data;
-};
-
USB_GADGET_COMPOSITE_OPTIONS();
-#if defined CONFIG_USB_FUNCTIONFS_ETH || defined CONFIG_USB_FUNCTIONFS_RNDIS
-USB_ETHERNET_MODULE_PARAMETERS();
-#endif
-
static struct usb_device_descriptor gfs_dev_desc = {
.bLength = sizeof gfs_dev_desc,
.bDescriptorType = USB_DT_DEVICE,
@@ -146,12 +131,12 @@ static struct usb_gadget_strings *gfs_dev_strings[] = {
struct gfs_configuration {
struct usb_configuration c;
- int (*eth)(struct usb_configuration *c, u8 *ethaddr,
- struct eth_dev *dev);
+ int (*eth)(struct usb_configuration *c);
+ int num;
} gfs_configurations[] = {
#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
{
- .eth = rndis_bind_config,
+ .eth = bind_rndis_config,
},
#endif
@@ -167,10 +152,15 @@ struct gfs_configuration {
#endif
};
+static void *functionfs_acquire_dev(struct ffs_dev *dev);
+static void functionfs_release_dev(struct ffs_dev *dev);
+static int functionfs_ready_callback(struct ffs_data *ffs);
+static void functionfs_closed_callback(struct ffs_data *ffs);
static int gfs_bind(struct usb_composite_dev *cdev);
static int gfs_unbind(struct usb_composite_dev *cdev);
static int gfs_do_config(struct usb_configuration *c);
+
static __refdata struct usb_composite_driver gfs_driver = {
.name = DRIVER_NAME,
.dev = &gfs_dev_desc,
@@ -180,206 +170,244 @@ static __refdata struct usb_composite_driver gfs_driver = {
.unbind = gfs_unbind,
};
-static DEFINE_MUTEX(gfs_lock);
static unsigned int missing_funcs;
-static bool gfs_ether_setup;
static bool gfs_registered;
static bool gfs_single_func;
-static struct gfs_ffs_obj *ffs_tab;
+static struct usb_function_instance **fi_ffs;
+static struct usb_function **f_ffs[] = {
+#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+ NULL,
+#endif
+
+#ifdef CONFIG_USB_FUNCTIONFS_ETH
+ NULL,
+#endif
+
+#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
+ NULL,
+#endif
+};
+
+#define N_CONF ARRAY_SIZE(f_ffs)
static int __init gfs_init(void)
{
+ struct f_fs_opts *opts;
int i;
+ int ret = 0;
ENTER();
- if (!func_num) {
+ if (func_num < 2) {
gfs_single_func = true;
func_num = 1;
}
- ffs_tab = kcalloc(func_num, sizeof *ffs_tab, GFP_KERNEL);
- if (!ffs_tab)
- return -ENOMEM;
+ /*
+ * Allocate in one chunk for easier maintenance
+ */
+ f_ffs[0] = kcalloc(func_num * N_CONF, sizeof(*f_ffs), GFP_KERNEL);
+ if (!f_ffs[0]) {
+ ret = -ENOMEM;
+ goto no_func;
+ }
+ for (i = 1; i < N_CONF; ++i)
+ f_ffs[i] = f_ffs[0] + i * func_num;
- if (!gfs_single_func)
- for (i = 0; i < func_num; i++)
- ffs_tab[i].name = func_names[i];
+ fi_ffs = kcalloc(func_num, sizeof(*fi_ffs), GFP_KERNEL);
+ if (!fi_ffs) {
+ ret = -ENOMEM;
+ goto no_func;
+ }
+
+ for (i = 0; i < func_num; i++) {
+ fi_ffs[i] = usb_get_function_instance("ffs");
+ if (IS_ERR(fi_ffs[i])) {
+ ret = PTR_ERR(fi_ffs[i]);
+ --i;
+ goto no_dev;
+ }
+ opts = to_f_fs_opts(fi_ffs[i]);
+ if (gfs_single_func)
+ ret = ffs_single_dev(opts->dev);
+ else
+ ret = ffs_name_dev(opts->dev, func_names[i]);
+ if (ret)
+ goto no_dev;
+ opts->dev->ffs_ready_callback = functionfs_ready_callback;
+ opts->dev->ffs_closed_callback = functionfs_closed_callback;
+ opts->dev->ffs_acquire_dev_callback = functionfs_acquire_dev;
+ opts->dev->ffs_release_dev_callback = functionfs_release_dev;
+ opts->no_configfs = true;
+ }
missing_funcs = func_num;
- return functionfs_init();
+ return 0;
+no_dev:
+ while (i >= 0)
+ usb_put_function_instance(fi_ffs[i--]);
+ kfree(fi_ffs);
+no_func:
+ kfree(f_ffs[0]);
+ return ret;
}
module_init(gfs_init);
static void __exit gfs_exit(void)
{
+ int i;
+
ENTER();
- mutex_lock(&gfs_lock);
if (gfs_registered)
usb_composite_unregister(&gfs_driver);
gfs_registered = false;
- functionfs_cleanup();
+ kfree(f_ffs[0]);
+
+ for (i = 0; i < func_num; i++)
+ usb_put_function_instance(fi_ffs[i]);
- mutex_unlock(&gfs_lock);
- kfree(ffs_tab);
+ kfree(fi_ffs);
}
module_exit(gfs_exit);
-static struct gfs_ffs_obj *gfs_find_dev(const char *dev_name)
+static void *functionfs_acquire_dev(struct ffs_dev *dev)
{
- int i;
-
- ENTER();
-
- if (gfs_single_func)
- return &ffs_tab[0];
-
- for (i = 0; i < func_num; i++)
- if (strcmp(ffs_tab[i].name, dev_name) == 0)
- return &ffs_tab[i];
+ if (!try_module_get(THIS_MODULE))
+ return ERR_PTR(-ENODEV);
+
+ return 0;
+}
- return NULL;
+static void functionfs_release_dev(struct ffs_dev *dev)
+{
+ module_put(THIS_MODULE);
}
+/*
+ * The caller of this function takes ffs_lock
+ */
static int functionfs_ready_callback(struct ffs_data *ffs)
{
- struct gfs_ffs_obj *ffs_obj;
- int ret;
-
- ENTER();
- mutex_lock(&gfs_lock);
+ int ret = 0;
- ffs_obj = ffs->private_data;
- if (!ffs_obj) {
- ret = -EINVAL;
- goto done;
- }
+ if (--missing_funcs)
+ return 0;
- if (WARN_ON(ffs_obj->desc_ready)) {
- ret = -EBUSY;
- goto done;
- }
- ffs_obj->desc_ready = true;
- ffs_obj->ffs_data = ffs;
-
- if (--missing_funcs) {
- ret = 0;
- goto done;
- }
+ if (gfs_registered)
+ return -EBUSY;
- if (gfs_registered) {
- ret = -EBUSY;
- goto done;
- }
gfs_registered = true;
ret = usb_composite_probe(&gfs_driver);
if (unlikely(ret < 0))
gfs_registered = false;
-
-done:
- mutex_unlock(&gfs_lock);
+
return ret;
}
+/*
+ * The caller of this function takes ffs_lock
+ */
static void functionfs_closed_callback(struct ffs_data *ffs)
{
- struct gfs_ffs_obj *ffs_obj;
-
- ENTER();
- mutex_lock(&gfs_lock);
-
- ffs_obj = ffs->private_data;
- if (!ffs_obj)
- goto done;
-
- ffs_obj->desc_ready = false;
missing_funcs++;
if (gfs_registered)
usb_composite_unregister(&gfs_driver);
gfs_registered = false;
-
-done:
- mutex_unlock(&gfs_lock);
}
-static void *functionfs_acquire_dev_callback(const char *dev_name)
+/*
+ * It is assumed that gfs_bind is called from a context where ffs_lock is held
+ */
+static int gfs_bind(struct usb_composite_dev *cdev)
{
- struct gfs_ffs_obj *ffs_dev;
+#if defined CONFIG_USB_FUNCTIONFS_ETH || defined CONFIG_USB_FUNCTIONFS_RNDIS
+ struct net_device *net;
+#endif
+ int ret, i;
ENTER();
- mutex_lock(&gfs_lock);
-
- ffs_dev = gfs_find_dev(dev_name);
- if (!ffs_dev) {
- ffs_dev = ERR_PTR(-ENODEV);
- goto done;
- }
- if (ffs_dev->mounted) {
- ffs_dev = ERR_PTR(-EBUSY);
- goto done;
+ if (missing_funcs)
+ return -ENODEV;
+#if defined CONFIG_USB_FUNCTIONFS_ETH
+ if (can_support_ecm(cdev->gadget)) {
+ struct f_ecm_opts *ecm_opts;
+
+ fi_ecm = usb_get_function_instance("ecm");
+ if (IS_ERR(fi_ecm))
+ return PTR_ERR(fi_ecm);
+ ecm_opts = container_of(fi_ecm, struct f_ecm_opts, func_inst);
+ net = ecm_opts->net;
+ } else {
+ struct f_gether_opts *geth_opts;
+
+ fi_geth = usb_get_function_instance("geth");
+ if (IS_ERR(fi_geth))
+ return PTR_ERR(fi_geth);
+ geth_opts = container_of(fi_geth, struct f_gether_opts,
+ func_inst);
+ net = geth_opts->net;
}
- ffs_dev->mounted = true;
+#endif
-done:
- mutex_unlock(&gfs_lock);
- return ffs_dev;
-}
+#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+ {
+ struct f_rndis_opts *rndis_opts;
-static void functionfs_release_dev_callback(struct ffs_data *ffs_data)
-{
- struct gfs_ffs_obj *ffs_dev;
+ fi_rndis = usb_get_function_instance("rndis");
+ if (IS_ERR(fi_rndis)) {
+ ret = PTR_ERR(fi_rndis);
+ goto error;
+ }
+ rndis_opts = container_of(fi_rndis, struct f_rndis_opts,
+ func_inst);
+#ifndef CONFIG_USB_FUNCTIONFS_ETH
+ net = rndis_opts->net;
+#endif
+ }
+#endif
- ENTER();
- mutex_lock(&gfs_lock);
+#if defined CONFIG_USB_FUNCTIONFS_ETH || defined CONFIG_USB_FUNCTIONFS_RNDIS
+ gether_set_qmult(net, qmult);
+ if (!gether_set_host_addr(net, host_addr))
+ pr_info("using host ethernet address: %s", host_addr);
+ if (!gether_set_dev_addr(net, dev_addr))
+ pr_info("using self ethernet address: %s", dev_addr);
+#endif
- ffs_dev = ffs_data->private_data;
- if (ffs_dev)
- ffs_dev->mounted = false;
+#if defined CONFIG_USB_FUNCTIONFS_RNDIS && defined CONFIG_USB_FUNCTIONFS_ETH
+ gether_set_gadget(net, cdev->gadget);
+ ret = gether_register_netdev(net);
+ if (ret)
+ goto error_rndis;
- mutex_unlock(&gfs_lock);
-}
+ if (can_support_ecm(cdev->gadget)) {
+ struct f_ecm_opts *ecm_opts;
-/*
- * It is assumed that gfs_bind is called from a context where gfs_lock is held
- */
-static int gfs_bind(struct usb_composite_dev *cdev)
-{
- int ret, i;
+ ecm_opts = container_of(fi_ecm, struct f_ecm_opts, func_inst);
+ ecm_opts->bound = true;
+ } else {
+ struct f_gether_opts *geth_opts;
- ENTER();
+ geth_opts = container_of(fi_geth, struct f_gether_opts,
+ func_inst);
+ geth_opts->bound = true;
+ }
- if (missing_funcs)
- return -ENODEV;
-#if defined CONFIG_USB_FUNCTIONFS_ETH || defined CONFIG_USB_FUNCTIONFS_RNDIS
- the_dev = gether_setup(cdev->gadget, dev_addr, host_addr, gfs_host_mac,
- qmult);
+ rndis_borrow_net(fi_rndis, net);
#endif
- if (IS_ERR(the_dev)) {
- ret = PTR_ERR(the_dev);
- goto error_quick;
- }
- gfs_ether_setup = true;
+ /* TODO: gstrings_attach? */
ret = usb_string_ids_tab(cdev, gfs_strings);
if (unlikely(ret < 0))
- goto error;
+ goto error_rndis;
gfs_dev_desc.iProduct = gfs_strings[USB_GADGET_PRODUCT_IDX].id;
- for (i = func_num; i--; ) {
- ret = functionfs_bind(ffs_tab[i].ffs_data, cdev);
- if (unlikely(ret < 0)) {
- while (++i < func_num)
- functionfs_unbind(ffs_tab[i].ffs_data);
- goto error;
- }
- }
-
for (i = 0; i < ARRAY_SIZE(gfs_configurations); ++i) {
struct gfs_configuration *c = gfs_configurations + i;
int sid = USB_GADGET_FIRST_AVAIL_IDX + i;
@@ -389,6 +417,8 @@ static int gfs_bind(struct usb_composite_dev *cdev)
c->c.bConfigurationValue = 1 + i;
c->c.bmAttributes = USB_CONFIG_ATT_SELFPOWER;
+ c->num = i;
+
ret = usb_add_config(cdev, &c->c, gfs_do_config);
if (unlikely(ret < 0))
goto error_unbind;
@@ -396,18 +426,24 @@ static int gfs_bind(struct usb_composite_dev *cdev)
usb_composite_overwrite_options(cdev, &coverwrite);
return 0;
+/* TODO */
error_unbind:
- for (i = 0; i < func_num; i++)
- functionfs_unbind(ffs_tab[i].ffs_data);
+error_rndis:
+#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+ usb_put_function_instance(fi_rndis);
error:
- gether_cleanup(the_dev);
-error_quick:
- gfs_ether_setup = false;
+#endif
+#if defined CONFIG_USB_FUNCTIONFS_ETH
+ if (can_support_ecm(cdev->gadget))
+ usb_put_function_instance(fi_ecm);
+ else
+ usb_put_function_instance(fi_geth);
+#endif
return ret;
}
/*
- * It is assumed that gfs_unbind is called from a context where gfs_lock is held
+ * It is assumed that gfs_unbind is called from a context where ffs_lock is held
*/
static int gfs_unbind(struct usb_composite_dev *cdev)
{
@@ -415,28 +451,30 @@ static int gfs_unbind(struct usb_composite_dev *cdev)
ENTER();
- /*
- * We may have been called in an error recovery from
- * composite_bind() after gfs_unbind() failure so we need to
- * check if gfs_ffs_data is not NULL since gfs_bind() handles
- * all error recovery itself. I'd rather we werent called
- * from composite on orror recovery, but what you're gonna
- * do...?
- */
- if (gfs_ether_setup)
- gether_cleanup(the_dev);
- gfs_ether_setup = false;
- for (i = func_num; i--; )
- if (ffs_tab[i].ffs_data)
- functionfs_unbind(ffs_tab[i].ffs_data);
+#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+ usb_put_function(f_rndis);
+ usb_put_function_instance(fi_rndis);
+#endif
+
+#if defined CONFIG_USB_FUNCTIONFS_ETH
+ if (can_support_ecm(cdev->gadget)) {
+ usb_put_function(f_ecm);
+ usb_put_function_instance(fi_ecm);
+ } else {
+ usb_put_function(f_geth);
+ usb_put_function_instance(fi_geth);
+ }
+#endif
+ for (i = 0; i < N_CONF * func_num; ++i)
+ usb_put_function(*(f_ffs[0] + i));
return 0;
}
/*
* It is assumed that gfs_do_config is called from a context where
- * gfs_lock is held
+ * ffs_lock is held
*/
static int gfs_do_config(struct usb_configuration *c)
{
@@ -454,15 +492,22 @@ static int gfs_do_config(struct usb_configuration *c)
}
if (gc->eth) {
- ret = gc->eth(c, gfs_host_mac, the_dev);
+ ret = gc->eth(c);
if (unlikely(ret < 0))
return ret;
}
for (i = 0; i < func_num; i++) {
- ret = functionfs_bind_config(c->cdev, c, ffs_tab[i].ffs_data);
- if (unlikely(ret < 0))
- return ret;
+ f_ffs[gc->num][i] = usb_get_function(fi_ffs[i]);
+ if (IS_ERR(f_ffs[gc->num][i])) {
+ ret = PTR_ERR(f_ffs[gc->num][i]);
+ goto error;
+ }
+ ret = usb_add_function(c, f_ffs[gc->num][i]);
+ if (ret < 0) {
+ usb_put_function(f_ffs[gc->num][i]);
+ goto error;
+ }
}
/*
@@ -479,16 +524,59 @@ static int gfs_do_config(struct usb_configuration *c)
c->interface[c->next_interface_id] = NULL;
return 0;
+error:
+ while (--i >= 0) {
+ if (!IS_ERR(f_ffs[gc->num][i]))
+ usb_remove_function(c, f_ffs[gc->num][i]);
+ usb_put_function(f_ffs[gc->num][i]);
+ }
+ return ret;
}
#ifdef CONFIG_USB_FUNCTIONFS_ETH
-static int eth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
- struct eth_dev *dev)
+static int eth_bind_config(struct usb_configuration *c)
+{
+ int status = 0;
+
+ if (can_support_ecm(c->cdev->gadget)) {
+ f_ecm = usb_get_function(fi_ecm);
+ if (IS_ERR(f_ecm))
+ return PTR_ERR(f_ecm);
+
+ status = usb_add_function(c, f_ecm);
+ if (status < 0)
+ usb_put_function(f_ecm);
+
+ } else {
+ f_geth = usb_get_function(fi_geth);
+ if (IS_ERR(f_geth))
+ return PTR_ERR(f_geth);
+
+ status = usb_add_function(c, f_geth);
+ if (status < 0)
+ usb_put_function(f_geth);
+ }
+ return status;
+}
+
+#endif
+
+#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+
+static int bind_rndis_config(struct usb_configuration *c)
{
- return can_support_ecm(c->cdev->gadget)
- ? ecm_bind_config(c, ethaddr, dev)
- : geth_bind_config(c, ethaddr, dev);
+ int status = 0;
+
+ f_rndis = usb_get_function(fi_rndis);
+ if (IS_ERR(f_rndis))
+ return PTR_ERR(f_rndis);
+
+ status = usb_add_function(c, f_rndis);
+ if (status < 0)
+ usb_put_function(f_rndis);
+
+ return status;
}
#endif
diff --git a/drivers/usb/gadget/g_zero.h b/drivers/usb/gadget/g_zero.h
index ef3e8515272b..15f180904f8a 100644
--- a/drivers/usb/gadget/g_zero.h
+++ b/drivers/usb/gadget/g_zero.h
@@ -6,6 +6,11 @@
#ifndef __G_ZERO_H
#define __G_ZERO_H
+#define GZERO_BULK_BUFLEN 4096
+#define GZERO_QLEN 32
+#define GZERO_ISOC_INTERVAL 4
+#define GZERO_ISOC_MAXPACKET 1024
+
struct usb_zero_options {
unsigned pattern;
unsigned isoc_interval;
@@ -24,19 +29,36 @@ struct f_ss_opts {
unsigned isoc_mult;
unsigned isoc_maxburst;
unsigned bulk_buflen;
+
+ /*
+ * Read/write access to configfs attributes is handled by configfs.
+ *
+ * This is to protect the data from concurrent access by read/write
+ * and create symlink/remove symlink.
+ */
+ struct mutex lock;
+ int refcnt;
};
struct f_lb_opts {
struct usb_function_instance func_inst;
unsigned bulk_buflen;
unsigned qlen;
+
+ /*
+ * Read/write access to configfs attributes is handled by configfs.
+ *
+ * This is to protect the data from concurrent access by read/write
+ * and create symlink/remove symlink.
+ */
+ struct mutex lock;
+ int refcnt;
};
void lb_modexit(void);
int lb_modinit(void);
/* common utilities */
-struct usb_request *alloc_ep_req(struct usb_ep *ep, int len);
void free_ep_req(struct usb_ep *ep, struct usb_request *req);
void disable_endpoints(struct usb_composite_dev *cdev,
struct usb_ep *in, struct usb_ep *out,
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index f82768015715..6c85839e15ad 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -30,7 +30,6 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
@@ -231,7 +230,7 @@ static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep)
}
}
- ep->ep.maxpacket = MAX_FIFO_SIZE;
+ usb_ep_set_maxpacket_limit(&ep->ep, MAX_FIFO_SIZE);
ep->ep.desc = NULL;
ep->stopped = 1;
ep->irqs = 0;
@@ -1251,7 +1250,7 @@ static void udc_reinit (struct goku_udc *dev)
}
dev->ep[0].reg_mode = NULL;
- dev->ep[0].ep.maxpacket = MAX_EP0_SIZE;
+ usb_ep_set_maxpacket_limit(&dev->ep[0].ep, MAX_EP0_SIZE);
list_del_init (&dev->ep[0].ep.ep_list);
}
@@ -1350,16 +1349,12 @@ static int goku_udc_start(struct usb_gadget *g,
return 0;
}
-static void
-stop_activity(struct goku_udc *dev, struct usb_gadget_driver *driver)
+static void stop_activity(struct goku_udc *dev)
{
unsigned i;
DBG (dev, "%s\n", __func__);
- if (dev->gadget.speed == USB_SPEED_UNKNOWN)
- driver = NULL;
-
/* disconnect gadget driver after quiesceing hw and the driver */
udc_reset (dev);
for (i = 0; i < 4; i++)
@@ -1377,7 +1372,7 @@ static int goku_udc_stop(struct usb_gadget *g,
spin_lock_irqsave(&dev->lock, flags);
dev->driver = NULL;
- stop_activity(dev, driver);
+ stop_activity(dev);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
@@ -1521,7 +1516,7 @@ rescan:
if (unlikely(stat & INT_DEVWIDE)) {
if (stat & INT_SYSERROR) {
ERROR(dev, "system error\n");
- stop_activity(dev, dev->driver);
+ stop_activity(dev);
stat = 0;
handled = 1;
// FIXME have a neater way to prevent re-enumeration
@@ -1536,7 +1531,7 @@ rescan:
} else {
DBG(dev, "disconnect\n");
if (dev->gadget.speed == USB_SPEED_FULL)
- stop_activity(dev, dev->driver);
+ stop_activity(dev);
dev->ep0state = EP0_DISCONNECT;
dev->int_enable = INT_DEVWIDE;
writel(dev->int_enable, &dev->regs->int_enable);
diff --git a/drivers/usb/gadget/gr_udc.c b/drivers/usb/gadget/gr_udc.c
new file mode 100644
index 000000000000..914cbd84ee40
--- /dev/null
+++ b/drivers/usb/gadget/gr_udc.c
@@ -0,0 +1,2238 @@
+/*
+ * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
+ *
+ * 2013 (c) Aeroflex Gaisler AB
+ *
+ * This driver supports GRUSBDC USB Device Controller cores available in the
+ * GRLIB VHDL IP core library.
+ *
+ * Full documentation of the GRUSBDC core can be found here:
+ * http://www.gaisler.com/products/grlib/grip.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Contributors:
+ * - Andreas Larsson <andreas@gaisler.com>
+ * - Marko Isomaki
+ */
+
+/*
+ * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
+ * individually configurable to any of the four USB transfer types. This driver
+ * only supports cores in DMA mode.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#include <asm/byteorder.h>
+
+#include "gr_udc.h"
+
+#define DRIVER_NAME "gr_udc"
+#define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
+
+static const char driver_name[] = DRIVER_NAME;
+static const char driver_desc[] = DRIVER_DESC;
+
+#define gr_read32(x) (ioread32be((x)))
+#define gr_write32(x, v) (iowrite32be((v), (x)))
+
+/* USB speed and corresponding string calculated from status register value */
+#define GR_SPEED(status) \
+ ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
+#define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
+
+/* Size of hardware buffer calculated from epctrl register value */
+#define GR_BUFFER_SIZE(epctrl) \
+ ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
+ GR_EPCTRL_BUFSZ_SCALER)
+
+/* ---------------------------------------------------------------------- */
+/* Debug printout functionality */
+
+static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
+
+static const char *gr_ep0state_string(enum gr_ep0state state)
+{
+ static const char *const names[] = {
+ [GR_EP0_DISCONNECT] = "disconnect",
+ [GR_EP0_SETUP] = "setup",
+ [GR_EP0_IDATA] = "idata",
+ [GR_EP0_ODATA] = "odata",
+ [GR_EP0_ISTATUS] = "istatus",
+ [GR_EP0_OSTATUS] = "ostatus",
+ [GR_EP0_STALL] = "stall",
+ [GR_EP0_SUSPEND] = "suspend",
+ };
+
+ if (state < 0 || state >= ARRAY_SIZE(names))
+ return "UNKNOWN";
+
+ return names[state];
+}
+
+#ifdef VERBOSE_DEBUG
+
+static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
+ struct gr_request *req)
+{
+ int buflen = ep->is_in ? req->req.length : req->req.actual;
+ int rowlen = 32;
+ int plen = min(rowlen, buflen);
+
+ dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
+ (buflen > plen ? " (truncated)" : ""));
+ print_hex_dump_debug(" ", DUMP_PREFIX_NONE,
+ rowlen, 4, req->req.buf, plen, false);
+}
+
+static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
+ u16 value, u16 index, u16 length)
+{
+ dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
+ type, request, value, index, length);
+}
+#else /* !VERBOSE_DEBUG */
+
+static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
+ struct gr_request *req) {}
+
+static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
+ u16 value, u16 index, u16 length) {}
+
+#endif /* VERBOSE_DEBUG */
+
+/* ---------------------------------------------------------------------- */
+/* Debugfs functionality */
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FS
+
+static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
+{
+ u32 epctrl = gr_read32(&ep->regs->epctrl);
+ u32 epstat = gr_read32(&ep->regs->epstat);
+ int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
+ struct gr_request *req;
+
+ seq_printf(seq, "%s:\n", ep->ep.name);
+ seq_printf(seq, " mode = %s\n", gr_modestring[mode]);
+ seq_printf(seq, " halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
+ seq_printf(seq, " disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
+ seq_printf(seq, " valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
+ seq_printf(seq, " dma_start = %d\n", ep->dma_start);
+ seq_printf(seq, " stopped = %d\n", ep->stopped);
+ seq_printf(seq, " wedged = %d\n", ep->wedged);
+ seq_printf(seq, " callback = %d\n", ep->callback);
+ seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket);
+ seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer);
+ if (mode == 1 || mode == 3)
+ seq_printf(seq, " nt = %d\n",
+ (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
+
+ seq_printf(seq, " Buffer 0: %s %s%d\n",
+ epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
+ epstat & GR_EPSTAT_BS ? " " : "selected ",
+ (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
+ seq_printf(seq, " Buffer 1: %s %s%d\n",
+ epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
+ epstat & GR_EPSTAT_BS ? "selected " : " ",
+ (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
+
+ if (list_empty(&ep->queue)) {
+ seq_puts(seq, " Queue: empty\n\n");
+ return;
+ }
+
+ seq_puts(seq, " Queue:\n");
+ list_for_each_entry(req, &ep->queue, queue) {
+ struct gr_dma_desc *desc;
+ struct gr_dma_desc *next;
+
+ seq_printf(seq, " 0x%p: 0x%p %d %d\n", req,
+ &req->req.buf, req->req.actual, req->req.length);
+
+ next = req->first_desc;
+ do {
+ desc = next;
+ next = desc->next_desc;
+ seq_printf(seq, " %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
+ desc == req->curr_desc ? 'c' : ' ',
+ desc, desc->paddr, desc->ctrl, desc->data);
+ } while (desc != req->last_desc);
+ }
+ seq_puts(seq, "\n");
+}
+
+
+static int gr_seq_show(struct seq_file *seq, void *v)
+{
+ struct gr_udc *dev = seq->private;
+ u32 control = gr_read32(&dev->regs->control);
+ u32 status = gr_read32(&dev->regs->status);
+ struct gr_ep *ep;
+
+ seq_printf(seq, "usb state = %s\n",
+ usb_state_string(dev->gadget.state));
+ seq_printf(seq, "address = %d\n",
+ (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
+ seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
+ seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
+ seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
+ seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
+ seq_printf(seq, "test_mode = %d\n", dev->test_mode);
+ seq_puts(seq, "\n");
+
+ list_for_each_entry(ep, &dev->ep_list, ep_list)
+ gr_seq_ep_show(seq, ep);
+
+ return 0;
+}
+
+static int gr_dfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, gr_seq_show, inode->i_private);
+}
+
+static const struct file_operations gr_dfs_fops = {
+ .owner = THIS_MODULE,
+ .open = gr_dfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void gr_dfs_create(struct gr_udc *dev)
+{
+ const char *name = "gr_udc_state";
+
+ dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
+ if (IS_ERR(dev->dfs_root)) {
+ dev_err(dev->dev, "Failed to create debugfs directory\n");
+ return;
+ }
+ dev->dfs_state = debugfs_create_file(name, 0444, dev->dfs_root,
+ dev, &gr_dfs_fops);
+ if (IS_ERR(dev->dfs_state))
+ dev_err(dev->dev, "Failed to create debugfs file %s\n", name);
+}
+
+static void gr_dfs_delete(struct gr_udc *dev)
+{
+ /* Handles NULL and ERR pointers internally */
+ debugfs_remove(dev->dfs_state);
+ debugfs_remove(dev->dfs_root);
+}
+
+#else /* !CONFIG_USB_GADGET_DEBUG_FS */
+
+static void gr_dfs_create(struct gr_udc *dev) {}
+static void gr_dfs_delete(struct gr_udc *dev) {}
+
+#endif /* CONFIG_USB_GADGET_DEBUG_FS */
+
+/* ---------------------------------------------------------------------- */
+/* DMA and request handling */
+
+/* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
+static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
+{
+ dma_addr_t paddr;
+ struct gr_dma_desc *dma_desc;
+
+ dma_desc = dma_pool_alloc(ep->dev->desc_pool, gfp_flags, &paddr);
+ if (!dma_desc) {
+ dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
+ return NULL;
+ }
+
+ memset(dma_desc, 0, sizeof(*dma_desc));
+ dma_desc->paddr = paddr;
+
+ return dma_desc;
+}
+
+static inline void gr_free_dma_desc(struct gr_udc *dev,
+ struct gr_dma_desc *desc)
+{
+ dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
+}
+
+/* Frees the chain of struct gr_dma_desc for the given request */
+static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
+{
+ struct gr_dma_desc *desc;
+ struct gr_dma_desc *next;
+
+ next = req->first_desc;
+ if (!next)
+ return;
+
+ do {
+ desc = next;
+ next = desc->next_desc;
+ gr_free_dma_desc(dev, desc);
+ } while (desc != req->last_desc);
+
+ req->first_desc = NULL;
+ req->curr_desc = NULL;
+ req->last_desc = NULL;
+}
+
+static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
+
+/*
+ * Frees allocated resources and calls the appropriate completion function/setup
+ * package handler for a finished request.
+ *
+ * Must be called with dev->lock held and irqs disabled.
+ */
+static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
+ int status)
+ __releases(&dev->lock)
+ __acquires(&dev->lock)
+{
+ struct gr_udc *dev;
+
+ list_del_init(&req->queue);
+
+ if (likely(req->req.status == -EINPROGRESS))
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ dev = ep->dev;
+ usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
+ gr_free_dma_desc_chain(dev, req);
+
+ if (ep->is_in) /* For OUT, actual gets updated bit by bit */
+ req->req.actual = req->req.length;
+
+ if (!status) {
+ if (ep->is_in)
+ gr_dbgprint_request("SENT", ep, req);
+ else
+ gr_dbgprint_request("RECV", ep, req);
+ }
+
+ /* Prevent changes to ep->queue during callback */
+ ep->callback = 1;
+ if (req == dev->ep0reqo && !status) {
+ if (req->setup)
+ gr_ep0_setup(dev, req);
+ else
+ dev_err(dev->dev,
+ "Unexpected non setup packet on ep0in\n");
+ } else if (req->req.complete) {
+ spin_unlock(&dev->lock);
+
+ req->req.complete(&ep->ep, &req->req);
+
+ spin_lock(&dev->lock);
+ }
+ ep->callback = 0;
+}
+
+static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+ struct gr_request *req;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+/*
+ * Starts DMA for endpoint ep if there are requests in the queue.
+ *
+ * Must be called with dev->lock held and with !ep->stopped.
+ */
+static void gr_start_dma(struct gr_ep *ep)
+{
+ struct gr_request *req;
+ u32 dmactrl;
+
+ if (list_empty(&ep->queue)) {
+ ep->dma_start = 0;
+ return;
+ }
+
+ req = list_first_entry(&ep->queue, struct gr_request, queue);
+
+ /* A descriptor should already have been allocated */
+ BUG_ON(!req->curr_desc);
+
+ wmb(); /* Make sure all is settled before handing it over to DMA */
+
+ /* Set the descriptor pointer in the hardware */
+ gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
+
+ /* Announce available descriptors */
+ dmactrl = gr_read32(&ep->regs->dmactrl);
+ gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
+
+ ep->dma_start = 1;
+}
+
+/*
+ * Finishes the first request in the ep's queue and, if available, starts the
+ * next request in queue.
+ *
+ * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
+ */
+static void gr_dma_advance(struct gr_ep *ep, int status)
+{
+ struct gr_request *req;
+
+ req = list_first_entry(&ep->queue, struct gr_request, queue);
+ gr_finish_request(ep, req, status);
+ gr_start_dma(ep); /* Regardless of ep->dma_start */
+}
+
+/*
+ * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
+ * transfer to be canceled and clears GR_DMACTRL_DA.
+ *
+ * Must be called with dev->lock held.
+ */
+static void gr_abort_dma(struct gr_ep *ep)
+{
+ u32 dmactrl;
+
+ dmactrl = gr_read32(&ep->regs->dmactrl);
+ gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
+}
+
+/*
+ * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
+ * chain.
+ *
+ * Size is not used for OUT endpoints. Hardware can not be instructed to handle
+ * smaller buffer than MAXPL in the OUT direction.
+ */
+static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
+ dma_addr_t data, unsigned size, gfp_t gfp_flags)
+{
+ struct gr_dma_desc *desc;
+
+ desc = gr_alloc_dma_desc(ep, gfp_flags);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->data = data;
+ if (ep->is_in)
+ desc->ctrl =
+ (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
+ else
+ desc->ctrl = GR_DESC_OUT_CTRL_IE;
+
+ if (!req->first_desc) {
+ req->first_desc = desc;
+ req->curr_desc = desc;
+ } else {
+ req->last_desc->next_desc = desc;
+ req->last_desc->next = desc->paddr;
+ req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
+ }
+ req->last_desc = desc;
+
+ return 0;
+}
+
+/*
+ * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
+ * together covers req->req.length bytes of the buffer at DMA address
+ * req->req.dma for the OUT direction.
+ *
+ * The first descriptor in the chain is enabled, the rest disabled. The
+ * interrupt handler will later enable them one by one when needed so we can
+ * find out when the transfer is finished. For OUT endpoints, all descriptors
+ * therefore generate interrutps.
+ */
+static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
+ gfp_t gfp_flags)
+{
+ u16 bytes_left; /* Bytes left to provide descriptors for */
+ u16 bytes_used; /* Bytes accommodated for */
+ int ret = 0;
+
+ req->first_desc = NULL; /* Signals that no allocation is done yet */
+ bytes_left = req->req.length;
+ bytes_used = 0;
+ while (bytes_left > 0) {
+ dma_addr_t start = req->req.dma + bytes_used;
+ u16 size = min(bytes_left, ep->bytes_per_buffer);
+
+ /* Should not happen however - gr_queue stops such lengths */
+ if (size < ep->bytes_per_buffer)
+ dev_warn(ep->dev->dev,
+ "Buffer overrun risk: %u < %u bytes/buffer\n",
+ size, ep->bytes_per_buffer);
+
+ ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
+ if (ret)
+ goto alloc_err;
+
+ bytes_left -= size;
+ bytes_used += size;
+ }
+
+ req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
+
+ return 0;
+
+alloc_err:
+ gr_free_dma_desc_chain(ep->dev, req);
+
+ return ret;
+}
+
+/*
+ * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
+ * together covers req->req.length bytes of the buffer at DMA address
+ * req->req.dma for the IN direction.
+ *
+ * When more data is provided than the maximum payload size, the hardware splits
+ * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
+ * is always set to a multiple of the maximum payload (restricted to the valid
+ * number of maximum payloads during high bandwidth isochronous or interrupt
+ * transfers)
+ *
+ * All descriptors are enabled from the beginning and we only generate an
+ * interrupt for the last one indicating that the entire request has been pushed
+ * to hardware.
+ */
+static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
+ gfp_t gfp_flags)
+{
+ u16 bytes_left; /* Bytes left in req to provide descriptors for */
+ u16 bytes_used; /* Bytes in req accommodated for */
+ int ret = 0;
+
+ req->first_desc = NULL; /* Signals that no allocation is done yet */
+ bytes_left = req->req.length;
+ bytes_used = 0;
+ do { /* Allow for zero length packets */
+ dma_addr_t start = req->req.dma + bytes_used;
+ u16 size = min(bytes_left, ep->bytes_per_buffer);
+
+ ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
+ if (ret)
+ goto alloc_err;
+
+ bytes_left -= size;
+ bytes_used += size;
+ } while (bytes_left > 0);
+
+ /*
+ * Send an extra zero length packet to indicate that no more data is
+ * available when req->req.zero is set and the data length is even
+ * multiples of ep->ep.maxpacket.
+ */
+ if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
+ ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
+ if (ret)
+ goto alloc_err;
+ }
+
+ /*
+ * For IN packets we only want to know when the last packet has been
+ * transmitted (not just put into internal buffers).
+ */
+ req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
+
+ return 0;
+
+alloc_err:
+ gr_free_dma_desc_chain(ep->dev, req);
+
+ return ret;
+}
+
+/* Must be called with dev->lock held */
+static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
+{
+ struct gr_udc *dev = ep->dev;
+ int ret;
+
+ if (unlikely(!ep->ep.desc && ep->num != 0)) {
+ dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
+ return -EINVAL;
+ }
+
+ if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
+ dev_err(dev->dev,
+ "Invalid request for %s: buf=%p list_empty=%d\n",
+ ep->ep.name, req->req.buf, list_empty(&req->queue));
+ return -EINVAL;
+ }
+
+ /*
+ * The DMA controller can not handle smaller OUT buffers than
+ * maxpacket. It could lead to buffer overruns if unexpectedly long
+ * packet are received.
+ */
+ if (!ep->is_in && (req->req.length % ep->ep.maxpacket) != 0) {
+ dev_err(dev->dev,
+ "OUT request length %d is not multiple of maxpacket\n",
+ req->req.length);
+ return -EMSGSIZE;
+ }
+
+ if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
+ dev_err(dev->dev, "-ESHUTDOWN");
+ return -ESHUTDOWN;
+ }
+
+ /* Can't touch registers when suspended */
+ if (dev->ep0state == GR_EP0_SUSPEND) {
+ dev_err(dev->dev, "-EBUSY");
+ return -EBUSY;
+ }
+
+ /* Set up DMA mapping in case the caller didn't */
+ ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
+ if (ret) {
+ dev_err(dev->dev, "usb_gadget_map_request");
+ return ret;
+ }
+
+ if (ep->is_in)
+ ret = gr_setup_in_desc_list(ep, req, gfp_flags);
+ else
+ ret = gr_setup_out_desc_list(ep, req, gfp_flags);
+ if (ret)
+ return ret;
+
+ req->req.status = -EINPROGRESS;
+ req->req.actual = 0;
+ list_add_tail(&req->queue, &ep->queue);
+
+ /* Start DMA if not started, otherwise interrupt handler handles it */
+ if (!ep->dma_start && likely(!ep->stopped))
+ gr_start_dma(ep);
+
+ return 0;
+}
+
+/*
+ * Queue a request from within the driver.
+ *
+ * Must be called with dev->lock held.
+ */
+static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
+ gfp_t gfp_flags)
+{
+ if (ep->is_in)
+ gr_dbgprint_request("RESP", ep, req);
+
+ return gr_queue(ep, req, gfp_flags);
+}
+
+/* ---------------------------------------------------------------------- */
+/* General helper functions */
+
+/*
+ * Dequeue ALL requests.
+ *
+ * Must be called with dev->lock held and irqs disabled.
+ */
+static void gr_ep_nuke(struct gr_ep *ep)
+{
+ struct gr_request *req;
+
+ ep->stopped = 1;
+ ep->dma_start = 0;
+ gr_abort_dma(ep);
+
+ while (!list_empty(&ep->queue)) {
+ req = list_first_entry(&ep->queue, struct gr_request, queue);
+ gr_finish_request(ep, req, -ESHUTDOWN);
+ }
+}
+
+/*
+ * Reset the hardware state of this endpoint.
+ *
+ * Must be called with dev->lock held.
+ */
+static void gr_ep_reset(struct gr_ep *ep)
+{
+ gr_write32(&ep->regs->epctrl, 0);
+ gr_write32(&ep->regs->dmactrl, 0);
+
+ ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
+ ep->ep.desc = NULL;
+ ep->stopped = 1;
+ ep->dma_start = 0;
+}
+
+/*
+ * Generate STALL on ep0in/out.
+ *
+ * Must be called with dev->lock held.
+ */
+static void gr_control_stall(struct gr_udc *dev)
+{
+ u32 epctrl;
+
+ epctrl = gr_read32(&dev->epo[0].regs->epctrl);
+ gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
+ epctrl = gr_read32(&dev->epi[0].regs->epctrl);
+ gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
+
+ dev->ep0state = GR_EP0_STALL;
+}
+
+/*
+ * Halts, halts and wedges, or clears halt for an endpoint.
+ *
+ * Must be called with dev->lock held.
+ */
+static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
+{
+ u32 epctrl;
+ int retval = 0;
+
+ if (ep->num && !ep->ep.desc)
+ return -EINVAL;
+
+ if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
+ return -EOPNOTSUPP;
+
+ /* Never actually halt ep0, and therefore never clear halt for ep0 */
+ if (!ep->num) {
+ if (halt && !fromhost) {
+ /* ep0 halt from gadget - generate protocol stall */
+ gr_control_stall(ep->dev);
+ dev_dbg(ep->dev->dev, "EP: stall ep0\n");
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
+ (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
+
+ epctrl = gr_read32(&ep->regs->epctrl);
+ if (halt) {
+ /* Set HALT */
+ gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
+ ep->stopped = 1;
+ if (wedge)
+ ep->wedged = 1;
+ } else {
+ gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
+ ep->stopped = 0;
+ ep->wedged = 0;
+
+ /* Things might have been queued up in the meantime */
+ if (!ep->dma_start)
+ gr_start_dma(ep);
+ }
+
+ return retval;
+}
+
+/* Must be called with dev->lock held */
+static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
+{
+ if (dev->ep0state != value)
+ dev_vdbg(dev->dev, "STATE: ep0state=%s\n",
+ gr_ep0state_string(value));
+ dev->ep0state = value;
+}
+
+/*
+ * Should only be called when endpoints can not generate interrupts.
+ *
+ * Must be called with dev->lock held.
+ */
+static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
+{
+ gr_write32(&dev->regs->control, 0);
+ wmb(); /* Make sure that we do not deny one of our interrupts */
+ dev->irq_enabled = 0;
+}
+
+/*
+ * Stop all device activity and disable data line pullup.
+ *
+ * Must be called with dev->lock held and irqs disabled.
+ */
+static void gr_stop_activity(struct gr_udc *dev)
+{
+ struct gr_ep *ep;
+
+ list_for_each_entry(ep, &dev->ep_list, ep_list)
+ gr_ep_nuke(ep);
+
+ gr_disable_interrupts_and_pullup(dev);
+
+ gr_set_ep0state(dev, GR_EP0_DISCONNECT);
+ usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
+}
+
+/* ---------------------------------------------------------------------- */
+/* ep0 setup packet handling */
+
+static void gr_ep0_testmode_complete(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ struct gr_ep *ep;
+ struct gr_udc *dev;
+ u32 control;
+
+ ep = container_of(_ep, struct gr_ep, ep);
+ dev = ep->dev;
+
+ spin_lock(&dev->lock);
+
+ control = gr_read32(&dev->regs->control);
+ control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
+ gr_write32(&dev->regs->control, control);
+
+ spin_unlock(&dev->lock);
+}
+
+static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
+{
+ /* Nothing needs to be done here */
+}
+
+/*
+ * Queue a response on ep0in.
+ *
+ * Must be called with dev->lock held.
+ */
+static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
+ void (*complete)(struct usb_ep *ep,
+ struct usb_request *req))
+{
+ u8 *reqbuf = dev->ep0reqi->req.buf;
+ int status;
+ int i;
+
+ for (i = 0; i < length; i++)
+ reqbuf[i] = buf[i];
+ dev->ep0reqi->req.length = length;
+ dev->ep0reqi->req.complete = complete;
+
+ status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
+ if (status < 0)
+ dev_err(dev->dev,
+ "Could not queue ep0in setup response: %d\n", status);
+
+ return status;
+}
+
+/*
+ * Queue a 2 byte response on ep0in.
+ *
+ * Must be called with dev->lock held.
+ */
+static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
+{
+ __le16 le_response = cpu_to_le16(response);
+
+ return gr_ep0_respond(dev, (u8 *)&le_response, 2,
+ gr_ep0_dummy_complete);
+}
+
+/*
+ * Queue a ZLP response on ep0in.
+ *
+ * Must be called with dev->lock held.
+ */
+static inline int gr_ep0_respond_empty(struct gr_udc *dev)
+{
+ return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
+}
+
+/*
+ * This is run when a SET_ADDRESS request is received. First writes
+ * the new address to the control register which is updated internally
+ * when the next IN packet is ACKED.
+ *
+ * Must be called with dev->lock held.
+ */
+static void gr_set_address(struct gr_udc *dev, u8 address)
+{
+ u32 control;
+
+ control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
+ control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
+ control |= GR_CONTROL_SU;
+ gr_write32(&dev->regs->control, control);
+}
+
+/*
+ * Returns negative for STALL, 0 for successful handling and positive for
+ * delegation.
+ *
+ * Must be called with dev->lock held.
+ */
+static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
+ u16 value, u16 index)
+{
+ u16 response;
+ u8 test;
+
+ switch (request) {
+ case USB_REQ_SET_ADDRESS:
+ dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
+ gr_set_address(dev, value & 0xff);
+ if (value)
+ usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
+ else
+ usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
+ return gr_ep0_respond_empty(dev);
+
+ case USB_REQ_GET_STATUS:
+ /* Self powered | remote wakeup */
+ response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
+ return gr_ep0_respond_u16(dev, response);
+
+ case USB_REQ_SET_FEATURE:
+ switch (value) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ /* Allow remote wakeup */
+ dev->remote_wakeup = 1;
+ return gr_ep0_respond_empty(dev);
+
+ case USB_DEVICE_TEST_MODE:
+ /* The hardware does not support TEST_FORCE_EN */
+ test = index >> 8;
+ if (test >= TEST_J && test <= TEST_PACKET) {
+ dev->test_mode = test;
+ return gr_ep0_respond(dev, NULL, 0,
+ gr_ep0_testmode_complete);
+ }
+ }
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ switch (value) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ /* Disallow remote wakeup */
+ dev->remote_wakeup = 0;
+ return gr_ep0_respond_empty(dev);
+ }
+ break;
+ }
+
+ return 1; /* Delegate the rest */
+}
+
+/*
+ * Returns negative for STALL, 0 for successful handling and positive for
+ * delegation.
+ *
+ * Must be called with dev->lock held.
+ */
+static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
+ u16 value, u16 index)
+{
+ if (dev->gadget.state != USB_STATE_CONFIGURED)
+ return -1;
+
+ /*
+ * Should return STALL for invalid interfaces, but udc driver does not
+ * know anything about that. However, many gadget drivers do not handle
+ * GET_STATUS so we need to take care of that.
+ */
+
+ switch (request) {
+ case USB_REQ_GET_STATUS:
+ return gr_ep0_respond_u16(dev, 0x0000);
+
+ case USB_REQ_SET_FEATURE:
+ case USB_REQ_CLEAR_FEATURE:
+ /*
+ * No possible valid standard requests. Still let gadget drivers
+ * have a go at it.
+ */
+ break;
+ }
+
+ return 1; /* Delegate the rest */
+}
+
+/*
+ * Returns negative for STALL, 0 for successful handling and positive for
+ * delegation.
+ *
+ * Must be called with dev->lock held.
+ */
+static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
+ u16 value, u16 index)
+{
+ struct gr_ep *ep;
+ int status;
+ int halted;
+ u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
+ u8 is_in = index & USB_ENDPOINT_DIR_MASK;
+
+ if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
+ return -1;
+
+ if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
+ return -1;
+
+ ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
+
+ switch (request) {
+ case USB_REQ_GET_STATUS:
+ halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
+ return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
+
+ case USB_REQ_SET_FEATURE:
+ switch (value) {
+ case USB_ENDPOINT_HALT:
+ status = gr_ep_halt_wedge(ep, 1, 0, 1);
+ if (status >= 0)
+ status = gr_ep0_respond_empty(dev);
+ return status;
+ }
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ switch (value) {
+ case USB_ENDPOINT_HALT:
+ if (ep->wedged)
+ return -1;
+ status = gr_ep_halt_wedge(ep, 0, 0, 1);
+ if (status >= 0)
+ status = gr_ep0_respond_empty(dev);
+ return status;
+ }
+ break;
+ }
+
+ return 1; /* Delegate the rest */
+}
+
+/* Must be called with dev->lock held */
+static void gr_ep0out_requeue(struct gr_udc *dev)
+{
+ int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
+
+ if (ret)
+ dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
+ ret);
+}
+
+/*
+ * The main function dealing with setup requests on ep0.
+ *
+ * Must be called with dev->lock held and irqs disabled
+ */
+static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
+ __releases(&dev->lock)
+ __acquires(&dev->lock)
+{
+ union {
+ struct usb_ctrlrequest ctrl;
+ u8 raw[8];
+ u32 word[2];
+ } u;
+ u8 type;
+ u8 request;
+ u16 value;
+ u16 index;
+ u16 length;
+ int i;
+ int status;
+
+ /* Restore from ep0 halt */
+ if (dev->ep0state == GR_EP0_STALL) {
+ gr_set_ep0state(dev, GR_EP0_SETUP);
+ if (!req->req.actual)
+ goto out;
+ }
+
+ if (dev->ep0state == GR_EP0_ISTATUS) {
+ gr_set_ep0state(dev, GR_EP0_SETUP);
+ if (req->req.actual > 0)
+ dev_dbg(dev->dev,
+ "Unexpected setup packet at state %s\n",
+ gr_ep0state_string(GR_EP0_ISTATUS));
+ else
+ goto out; /* Got expected ZLP */
+ } else if (dev->ep0state != GR_EP0_SETUP) {
+ dev_info(dev->dev,
+ "Unexpected ep0out request at state %s - stalling\n",
+ gr_ep0state_string(dev->ep0state));
+ gr_control_stall(dev);
+ gr_set_ep0state(dev, GR_EP0_SETUP);
+ goto out;
+ } else if (!req->req.actual) {
+ dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
+ gr_ep0state_string(dev->ep0state));
+ goto out;
+ }
+
+ /* Handle SETUP packet */
+ for (i = 0; i < req->req.actual; i++)
+ u.raw[i] = ((u8 *)req->req.buf)[i];
+
+ type = u.ctrl.bRequestType;
+ request = u.ctrl.bRequest;
+ value = le16_to_cpu(u.ctrl.wValue);
+ index = le16_to_cpu(u.ctrl.wIndex);
+ length = le16_to_cpu(u.ctrl.wLength);
+
+ gr_dbgprint_devreq(dev, type, request, value, index, length);
+
+ /* Check for data stage */
+ if (length) {
+ if (type & USB_DIR_IN)
+ gr_set_ep0state(dev, GR_EP0_IDATA);
+ else
+ gr_set_ep0state(dev, GR_EP0_ODATA);
+ }
+
+ status = 1; /* Positive status flags delegation */
+ if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (type & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ status = gr_device_request(dev, type, request,
+ value, index);
+ break;
+ case USB_RECIP_ENDPOINT:
+ status = gr_endpoint_request(dev, type, request,
+ value, index);
+ break;
+ case USB_RECIP_INTERFACE:
+ status = gr_interface_request(dev, type, request,
+ value, index);
+ break;
+ }
+ }
+
+ if (status > 0) {
+ spin_unlock(&dev->lock);
+
+ dev_vdbg(dev->dev, "DELEGATE\n");
+ status = dev->driver->setup(&dev->gadget, &u.ctrl);
+
+ spin_lock(&dev->lock);
+ }
+
+ /* Generate STALL on both ep0out and ep0in if requested */
+ if (unlikely(status < 0)) {
+ dev_vdbg(dev->dev, "STALL\n");
+ gr_control_stall(dev);
+ }
+
+ if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
+ request == USB_REQ_SET_CONFIGURATION) {
+ if (!value) {
+ dev_dbg(dev->dev, "STATUS: deconfigured\n");
+ usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
+ } else if (status >= 0) {
+ /* Not configured unless gadget OK:s it */
+ dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
+ usb_gadget_set_state(&dev->gadget,
+ USB_STATE_CONFIGURED);
+ }
+ }
+
+ /* Get ready for next stage */
+ if (dev->ep0state == GR_EP0_ODATA)
+ gr_set_ep0state(dev, GR_EP0_OSTATUS);
+ else if (dev->ep0state == GR_EP0_IDATA)
+ gr_set_ep0state(dev, GR_EP0_ISTATUS);
+ else
+ gr_set_ep0state(dev, GR_EP0_SETUP);
+
+out:
+ gr_ep0out_requeue(dev);
+}
+
+/* ---------------------------------------------------------------------- */
+/* VBUS and USB reset handling */
+
+/* Must be called with dev->lock held and irqs disabled */
+static void gr_vbus_connected(struct gr_udc *dev, u32 status)
+{
+ u32 control;
+
+ dev->gadget.speed = GR_SPEED(status);
+ usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
+
+ /* Turn on full interrupts and pullup */
+ control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
+ GR_CONTROL_SP | GR_CONTROL_EP);
+ gr_write32(&dev->regs->control, control);
+}
+
+/* Must be called with dev->lock held */
+static void gr_enable_vbus_detect(struct gr_udc *dev)
+{
+ u32 status;
+
+ dev->irq_enabled = 1;
+ wmb(); /* Make sure we do not ignore an interrupt */
+ gr_write32(&dev->regs->control, GR_CONTROL_VI);
+
+ /* Take care of the case we are already plugged in at this point */
+ status = gr_read32(&dev->regs->status);
+ if (status & GR_STATUS_VB)
+ gr_vbus_connected(dev, status);
+}
+
+/* Must be called with dev->lock held and irqs disabled */
+static void gr_vbus_disconnected(struct gr_udc *dev)
+{
+ gr_stop_activity(dev);
+
+ /* Report disconnect */
+ if (dev->driver && dev->driver->disconnect) {
+ spin_unlock(&dev->lock);
+
+ dev->driver->disconnect(&dev->gadget);
+
+ spin_lock(&dev->lock);
+ }
+
+ gr_enable_vbus_detect(dev);
+}
+
+/* Must be called with dev->lock held and irqs disabled */
+static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
+{
+ gr_set_address(dev, 0);
+ gr_set_ep0state(dev, GR_EP0_SETUP);
+ usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
+ dev->gadget.speed = GR_SPEED(status);
+
+ gr_ep_nuke(&dev->epo[0]);
+ gr_ep_nuke(&dev->epi[0]);
+ dev->epo[0].stopped = 0;
+ dev->epi[0].stopped = 0;
+ gr_ep0out_requeue(dev);
+}
+
+/* ---------------------------------------------------------------------- */
+/* Irq handling */
+
+/*
+ * Handles interrupts from in endpoints. Returns whether something was handled.
+ *
+ * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
+ */
+static int gr_handle_in_ep(struct gr_ep *ep)
+{
+ struct gr_request *req;
+
+ req = list_first_entry(&ep->queue, struct gr_request, queue);
+ if (!req->last_desc)
+ return 0;
+
+ if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
+ return 0; /* Not put in hardware buffers yet */
+
+ if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
+ return 0; /* Not transmitted yet, still in hardware buffers */
+
+ /* Write complete */
+ gr_dma_advance(ep, 0);
+
+ return 1;
+}
+
+/*
+ * Handles interrupts from out endpoints. Returns whether something was handled.
+ *
+ * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
+ */
+static int gr_handle_out_ep(struct gr_ep *ep)
+{
+ u32 ep_dmactrl;
+ u32 ctrl;
+ u16 len;
+ struct gr_request *req;
+ struct gr_udc *dev = ep->dev;
+
+ req = list_first_entry(&ep->queue, struct gr_request, queue);
+ if (!req->curr_desc)
+ return 0;
+
+ ctrl = ACCESS_ONCE(req->curr_desc->ctrl);
+ if (ctrl & GR_DESC_OUT_CTRL_EN)
+ return 0; /* Not received yet */
+
+ /* Read complete */
+ len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
+ req->req.actual += len;
+ if (ctrl & GR_DESC_OUT_CTRL_SE)
+ req->setup = 1;
+
+ if (len < ep->ep.maxpacket || req->req.actual == req->req.length) {
+ /* Short packet or the expected size - we are done */
+
+ if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
+ /*
+ * Send a status stage ZLP to ack the DATA stage in the
+ * OUT direction. This needs to be done before
+ * gr_dma_advance as that can lead to a call to
+ * ep0_setup that can change dev->ep0state.
+ */
+ gr_ep0_respond_empty(dev);
+ gr_set_ep0state(dev, GR_EP0_SETUP);
+ }
+
+ gr_dma_advance(ep, 0);
+ } else {
+ /* Not done yet. Enable the next descriptor to receive more. */
+ req->curr_desc = req->curr_desc->next_desc;
+ req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
+
+ ep_dmactrl = gr_read32(&ep->regs->dmactrl);
+ gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
+ }
+
+ return 1;
+}
+
+/*
+ * Handle state changes. Returns whether something was handled.
+ *
+ * Must be called with dev->lock held and irqs disabled.
+ */
+static int gr_handle_state_changes(struct gr_udc *dev)
+{
+ u32 status = gr_read32(&dev->regs->status);
+ int handled = 0;
+ int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
+ dev->gadget.state == USB_STATE_ATTACHED);
+
+ /* VBUS valid detected */
+ if (!powstate && (status & GR_STATUS_VB)) {
+ dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
+ gr_vbus_connected(dev, status);
+ handled = 1;
+ }
+
+ /* Disconnect */
+ if (powstate && !(status & GR_STATUS_VB)) {
+ dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
+ gr_vbus_disconnected(dev);
+ handled = 1;
+ }
+
+ /* USB reset detected */
+ if (status & GR_STATUS_UR) {
+ dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
+ GR_SPEED_STR(status));
+ gr_write32(&dev->regs->status, GR_STATUS_UR);
+ gr_udc_usbreset(dev, status);
+ handled = 1;
+ }
+
+ /* Speed change */
+ if (dev->gadget.speed != GR_SPEED(status)) {
+ dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
+ GR_SPEED_STR(status));
+ dev->gadget.speed = GR_SPEED(status);
+ handled = 1;
+ }
+
+ /* Going into suspend */
+ if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
+ dev_dbg(dev->dev, "STATUS: USB suspend\n");
+ gr_set_ep0state(dev, GR_EP0_SUSPEND);
+ dev->suspended_from = dev->gadget.state;
+ usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
+
+ if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
+ dev->driver && dev->driver->suspend) {
+ spin_unlock(&dev->lock);
+
+ dev->driver->suspend(&dev->gadget);
+
+ spin_lock(&dev->lock);
+ }
+ handled = 1;
+ }
+
+ /* Coming out of suspend */
+ if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
+ dev_dbg(dev->dev, "STATUS: USB resume\n");
+ if (dev->suspended_from == USB_STATE_POWERED)
+ gr_set_ep0state(dev, GR_EP0_DISCONNECT);
+ else
+ gr_set_ep0state(dev, GR_EP0_SETUP);
+ usb_gadget_set_state(&dev->gadget, dev->suspended_from);
+
+ if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
+ dev->driver && dev->driver->resume) {
+ spin_unlock(&dev->lock);
+
+ dev->driver->resume(&dev->gadget);
+
+ spin_lock(&dev->lock);
+ }
+ handled = 1;
+ }
+
+ return handled;
+}
+
+/* Non-interrupt context irq handler */
+static irqreturn_t gr_irq_handler(int irq, void *_dev)
+{
+ struct gr_udc *dev = _dev;
+ struct gr_ep *ep;
+ int handled = 0;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (!dev->irq_enabled)
+ goto out;
+
+ /*
+ * Check IN ep interrupts. We check these before the OUT eps because
+ * some gadgets reuse the request that might already be currently
+ * outstanding and needs to be completed (mainly setup requests).
+ */
+ for (i = 0; i < dev->nepi; i++) {
+ ep = &dev->epi[i];
+ if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
+ handled = gr_handle_in_ep(ep) || handled;
+ }
+
+ /* Check OUT ep interrupts */
+ for (i = 0; i < dev->nepo; i++) {
+ ep = &dev->epo[i];
+ if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
+ handled = gr_handle_out_ep(ep) || handled;
+ }
+
+ /* Check status interrupts */
+ handled = gr_handle_state_changes(dev) || handled;
+
+ /*
+ * Check AMBA DMA errors. Only check if we didn't find anything else to
+ * handle because this shouldn't happen if we did everything right.
+ */
+ if (!handled) {
+ list_for_each_entry(ep, &dev->ep_list, ep_list) {
+ if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
+ dev_err(dev->dev,
+ "AMBA Error occurred for %s\n",
+ ep->ep.name);
+ handled = 1;
+ }
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/* Interrupt context irq handler */
+static irqreturn_t gr_irq(int irq, void *_dev)
+{
+ struct gr_udc *dev = _dev;
+
+ if (!dev->irq_enabled)
+ return IRQ_NONE;
+
+ return IRQ_WAKE_THREAD;
+}
+
+/* ---------------------------------------------------------------------- */
+/* USB ep ops */
+
+/* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
+static int gr_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct gr_udc *dev;
+ struct gr_ep *ep;
+ u8 mode;
+ u8 nt;
+ u16 max;
+ u16 buffer_size = 0;
+ u32 epctrl;
+
+ ep = container_of(_ep, struct gr_ep, ep);
+ if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return -EINVAL;
+
+ dev = ep->dev;
+
+ /* 'ep0' IN and OUT are reserved */
+ if (ep == &dev->epo[0] || ep == &dev->epi[0])
+ return -EINVAL;
+
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ /* Make sure we are clear for enabling */
+ epctrl = gr_read32(&ep->regs->epctrl);
+ if (epctrl & GR_EPCTRL_EV)
+ return -EBUSY;
+
+ /* Check that directions match */
+ if (!ep->is_in != !usb_endpoint_dir_in(desc))
+ return -EINVAL;
+
+ /* Check ep num */
+ if ((!ep->is_in && ep->num >= dev->nepo) ||
+ (ep->is_in && ep->num >= dev->nepi))
+ return -EINVAL;
+
+ if (usb_endpoint_xfer_control(desc)) {
+ mode = 0;
+ } else if (usb_endpoint_xfer_isoc(desc)) {
+ mode = 1;
+ } else if (usb_endpoint_xfer_bulk(desc)) {
+ mode = 2;
+ } else if (usb_endpoint_xfer_int(desc)) {
+ mode = 3;
+ } else {
+ dev_err(dev->dev, "Unknown transfer type for %s\n",
+ ep->ep.name);
+ return -EINVAL;
+ }
+
+ /*
+ * Bits 10-0 set the max payload. 12-11 set the number of
+ * additional transactions.
+ */
+ max = 0x7ff & usb_endpoint_maxp(desc);
+ nt = 0x3 & (usb_endpoint_maxp(desc) >> 11);
+ buffer_size = GR_BUFFER_SIZE(epctrl);
+ if (nt && (mode == 0 || mode == 2)) {
+ dev_err(dev->dev,
+ "%s mode: multiple trans./microframe not valid\n",
+ (mode == 2 ? "Bulk" : "Control"));
+ return -EINVAL;
+ } else if (nt == 0x11) {
+ dev_err(dev->dev, "Invalid value for trans./microframe\n");
+ return -EINVAL;
+ } else if ((nt + 1) * max > buffer_size) {
+ dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
+ buffer_size, (nt + 1), max);
+ return -EINVAL;
+ } else if (max == 0) {
+ dev_err(dev->dev, "Max payload cannot be set to 0\n");
+ return -EINVAL;
+ }
+
+ spin_lock(&ep->dev->lock);
+
+ if (!ep->stopped) {
+ spin_unlock(&ep->dev->lock);
+ return -EBUSY;
+ }
+
+ ep->stopped = 0;
+ ep->wedged = 0;
+ ep->ep.desc = desc;
+ ep->ep.maxpacket = max;
+ ep->dma_start = 0;
+
+
+ if (nt) {
+ /*
+ * Maximum possible size of all payloads in one microframe
+ * regardless of direction when using high-bandwidth mode.
+ */
+ ep->bytes_per_buffer = (nt + 1) * max;
+ } else if (ep->is_in) {
+ /*
+ * The biggest multiple of maximum packet size that fits into
+ * the buffer. The hardware will split up into many packets in
+ * the IN direction.
+ */
+ ep->bytes_per_buffer = (buffer_size / max) * max;
+ } else {
+ /*
+ * Only single packets will be placed the buffers in the OUT
+ * direction.
+ */
+ ep->bytes_per_buffer = max;
+ }
+
+ epctrl = (max << GR_EPCTRL_MAXPL_POS)
+ | (nt << GR_EPCTRL_NT_POS)
+ | (mode << GR_EPCTRL_TT_POS)
+ | GR_EPCTRL_EV;
+ if (ep->is_in)
+ epctrl |= GR_EPCTRL_PI;
+ gr_write32(&ep->regs->epctrl, epctrl);
+
+ gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
+
+ spin_unlock(&ep->dev->lock);
+
+ dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
+ ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
+ return 0;
+}
+
+/* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
+static int gr_ep_disable(struct usb_ep *_ep)
+{
+ struct gr_ep *ep;
+ struct gr_udc *dev;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct gr_ep, ep);
+ if (!_ep || !ep->ep.desc)
+ return -ENODEV;
+
+ dev = ep->dev;
+
+ /* 'ep0' IN and OUT are reserved */
+ if (ep == &dev->epo[0] || ep == &dev->epi[0])
+ return -EINVAL;
+
+ if (dev->ep0state == GR_EP0_SUSPEND)
+ return -EBUSY;
+
+ dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ gr_ep_nuke(ep);
+ gr_ep_reset(ep);
+ ep->ep.desc = NULL;
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+/*
+ * Frees a request, but not any DMA buffers associated with it
+ * (gr_finish_request should already have taken care of that).
+ */
+static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct gr_request *req;
+
+ if (!_ep || !_req)
+ return;
+ req = container_of(_req, struct gr_request, req);
+
+ /* Leads to memory leak */
+ WARN(!list_empty(&req->queue),
+ "request not dequeued properly before freeing\n");
+
+ kfree(req);
+}
+
+/* Queue a request from the gadget */
+static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct gr_ep *ep;
+ struct gr_request *req;
+ struct gr_udc *dev;
+ int ret;
+
+ if (unlikely(!_ep || !_req))
+ return -EINVAL;
+
+ ep = container_of(_ep, struct gr_ep, ep);
+ req = container_of(_req, struct gr_request, req);
+ dev = ep->dev;
+
+ spin_lock(&ep->dev->lock);
+
+ /*
+ * The ep0 pointer in the gadget struct is used both for ep0in and
+ * ep0out. In a data stage in the out direction ep0out needs to be used
+ * instead of the default ep0in. Completion functions might use
+ * driver_data, so that needs to be copied as well.
+ */
+ if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
+ ep = &dev->epo[0];
+ ep->ep.driver_data = dev->epi[0].ep.driver_data;
+ }
+
+ if (ep->is_in)
+ gr_dbgprint_request("EXTERN", ep, req);
+
+ ret = gr_queue(ep, req, gfp_flags);
+
+ spin_unlock(&ep->dev->lock);
+
+ return ret;
+}
+
+/* Dequeue JUST ONE request */
+static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct gr_request *req;
+ struct gr_ep *ep;
+ struct gr_udc *dev;
+ int ret = 0;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct gr_ep, ep);
+ if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
+ return -EINVAL;
+ dev = ep->dev;
+ if (!dev->driver)
+ return -ESHUTDOWN;
+
+ /* We can't touch (DMA) registers when suspended */
+ if (dev->ep0state == GR_EP0_SUSPEND)
+ return -EBUSY;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* Make sure it's actually queued on this endpoint */
+ list_for_each_entry(req, &ep->queue, queue) {
+ if (&req->req == _req)
+ break;
+ }
+ if (&req->req != _req) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
+ /* This request is currently being processed */
+ gr_abort_dma(ep);
+ if (ep->stopped)
+ gr_finish_request(ep, req, -ECONNRESET);
+ else
+ gr_dma_advance(ep, -ECONNRESET);
+ } else if (!list_empty(&req->queue)) {
+ /* Not being processed - gr_finish_request dequeues it */
+ gr_finish_request(ep, req, -ECONNRESET);
+ } else {
+ ret = -EOPNOTSUPP;
+ }
+
+out:
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return ret;
+}
+
+/* Helper for gr_set_halt and gr_set_wedge */
+static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
+{
+ int ret;
+ struct gr_ep *ep;
+
+ if (!_ep)
+ return -ENODEV;
+ ep = container_of(_ep, struct gr_ep, ep);
+
+ spin_lock(&ep->dev->lock);
+
+ /* Halting an IN endpoint should fail if queue is not empty */
+ if (halt && ep->is_in && !list_empty(&ep->queue)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
+
+out:
+ spin_unlock(&ep->dev->lock);
+
+ return ret;
+}
+
+/* Halt endpoint */
+static int gr_set_halt(struct usb_ep *_ep, int halt)
+{
+ return gr_set_halt_wedge(_ep, halt, 0);
+}
+
+/* Halt and wedge endpoint */
+static int gr_set_wedge(struct usb_ep *_ep)
+{
+ return gr_set_halt_wedge(_ep, 1, 1);
+}
+
+/*
+ * Return the total number of bytes currently stored in the internal buffers of
+ * the endpoint.
+ */
+static int gr_fifo_status(struct usb_ep *_ep)
+{
+ struct gr_ep *ep;
+ u32 epstat;
+ u32 bytes = 0;
+
+ if (!_ep)
+ return -ENODEV;
+ ep = container_of(_ep, struct gr_ep, ep);
+
+ epstat = gr_read32(&ep->regs->epstat);
+
+ if (epstat & GR_EPSTAT_B0)
+ bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
+ if (epstat & GR_EPSTAT_B1)
+ bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
+
+ return bytes;
+}
+
+
+/* Empty data from internal buffers of an endpoint. */
+static void gr_fifo_flush(struct usb_ep *_ep)
+{
+ struct gr_ep *ep;
+ u32 epctrl;
+
+ if (!_ep)
+ return;
+ ep = container_of(_ep, struct gr_ep, ep);
+ dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
+
+ spin_lock(&ep->dev->lock);
+
+ epctrl = gr_read32(&ep->regs->epctrl);
+ epctrl |= GR_EPCTRL_CB;
+ gr_write32(&ep->regs->epctrl, epctrl);
+
+ spin_unlock(&ep->dev->lock);
+}
+
+static struct usb_ep_ops gr_ep_ops = {
+ .enable = gr_ep_enable,
+ .disable = gr_ep_disable,
+
+ .alloc_request = gr_alloc_request,
+ .free_request = gr_free_request,
+
+ .queue = gr_queue_ext,
+ .dequeue = gr_dequeue,
+
+ .set_halt = gr_set_halt,
+ .set_wedge = gr_set_wedge,
+ .fifo_status = gr_fifo_status,
+ .fifo_flush = gr_fifo_flush,
+};
+
+/* ---------------------------------------------------------------------- */
+/* USB Gadget ops */
+
+static int gr_get_frame(struct usb_gadget *_gadget)
+{
+ struct gr_udc *dev;
+
+ if (!_gadget)
+ return -ENODEV;
+ dev = container_of(_gadget, struct gr_udc, gadget);
+ return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
+}
+
+static int gr_wakeup(struct usb_gadget *_gadget)
+{
+ struct gr_udc *dev;
+
+ if (!_gadget)
+ return -ENODEV;
+ dev = container_of(_gadget, struct gr_udc, gadget);
+
+ /* Remote wakeup feature not enabled by host*/
+ if (!dev->remote_wakeup)
+ return -EINVAL;
+
+ spin_lock(&dev->lock);
+
+ gr_write32(&dev->regs->control,
+ gr_read32(&dev->regs->control) | GR_CONTROL_RW);
+
+ spin_unlock(&dev->lock);
+
+ return 0;
+}
+
+static int gr_pullup(struct usb_gadget *_gadget, int is_on)
+{
+ struct gr_udc *dev;
+ u32 control;
+
+ if (!_gadget)
+ return -ENODEV;
+ dev = container_of(_gadget, struct gr_udc, gadget);
+
+ spin_lock(&dev->lock);
+
+ control = gr_read32(&dev->regs->control);
+ if (is_on)
+ control |= GR_CONTROL_EP;
+ else
+ control &= ~GR_CONTROL_EP;
+ gr_write32(&dev->regs->control, control);
+
+ spin_unlock(&dev->lock);
+
+ return 0;
+}
+
+static int gr_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct gr_udc *dev = to_gr_udc(gadget);
+
+ spin_lock(&dev->lock);
+
+ /* Hook up the driver */
+ driver->driver.bus = NULL;
+ dev->driver = driver;
+
+ /* Get ready for host detection */
+ gr_enable_vbus_detect(dev);
+
+ spin_unlock(&dev->lock);
+
+ dev_info(dev->dev, "Started with gadget driver '%s'\n",
+ driver->driver.name);
+
+ return 0;
+}
+
+static int gr_udc_stop(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct gr_udc *dev = to_gr_udc(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ dev->driver = NULL;
+ gr_stop_activity(dev);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ dev_info(dev->dev, "Stopped\n");
+
+ return 0;
+}
+
+static const struct usb_gadget_ops gr_ops = {
+ .get_frame = gr_get_frame,
+ .wakeup = gr_wakeup,
+ .pullup = gr_pullup,
+ .udc_start = gr_udc_start,
+ .udc_stop = gr_udc_stop,
+ /* Other operations not supported */
+};
+
+/* ---------------------------------------------------------------------- */
+/* Module probe, removal and of-matching */
+
+static const char * const onames[] = {
+ "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
+ "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
+ "ep12out", "ep13out", "ep14out", "ep15out"
+};
+
+static const char * const inames[] = {
+ "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
+ "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
+ "ep12in", "ep13in", "ep14in", "ep15in"
+};
+
+/* Must be called with dev->lock held */
+static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
+{
+ struct gr_ep *ep;
+ struct gr_request *req;
+ struct usb_request *_req;
+ void *buf;
+
+ if (is_in) {
+ ep = &dev->epi[num];
+ ep->ep.name = inames[num];
+ ep->regs = &dev->regs->epi[num];
+ } else {
+ ep = &dev->epo[num];
+ ep->ep.name = onames[num];
+ ep->regs = &dev->regs->epo[num];
+ }
+
+ gr_ep_reset(ep);
+ ep->num = num;
+ ep->is_in = is_in;
+ ep->dev = dev;
+ ep->ep.ops = &gr_ep_ops;
+ INIT_LIST_HEAD(&ep->queue);
+
+ if (num == 0) {
+ _req = gr_alloc_request(&ep->ep, GFP_KERNEL);
+ buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_KERNEL);
+ if (!_req || !buf) {
+ /* possible _req freed by gr_probe via gr_remove */
+ return -ENOMEM;
+ }
+
+ req = container_of(_req, struct gr_request, req);
+ req->req.buf = buf;
+ req->req.length = MAX_CTRL_PL_SIZE;
+
+ if (is_in)
+ dev->ep0reqi = req; /* Complete gets set as used */
+ else
+ dev->ep0reqo = req; /* Completion treated separately */
+
+ usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
+ ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
+ } else {
+ usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
+ list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
+ }
+ list_add_tail(&ep->ep_list, &dev->ep_list);
+
+ return 0;
+}
+
+/* Must be called with dev->lock held */
+static int gr_udc_init(struct gr_udc *dev)
+{
+ struct device_node *np = dev->dev->of_node;
+ u32 epctrl_val;
+ u32 dmactrl_val;
+ int i;
+ int ret = 0;
+ u32 *bufsizes;
+ u32 bufsize;
+ int len;
+
+ gr_set_address(dev, 0);
+
+ INIT_LIST_HEAD(&dev->gadget.ep_list);
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+ dev->gadget.ep0 = &dev->epi[0].ep;
+
+ INIT_LIST_HEAD(&dev->ep_list);
+ gr_set_ep0state(dev, GR_EP0_DISCONNECT);
+
+ bufsizes = (u32 *)of_get_property(np, "epobufsizes", &len);
+ len /= sizeof(u32);
+ for (i = 0; i < dev->nepo; i++) {
+ bufsize = (bufsizes && i < len) ? bufsizes[i] : 1024;
+ ret = gr_ep_init(dev, i, 0, bufsize);
+ if (ret)
+ return ret;
+ }
+
+ bufsizes = (u32 *)of_get_property(np, "epibufsizes", &len);
+ len /= sizeof(u32);
+ for (i = 0; i < dev->nepi; i++) {
+ bufsize = (bufsizes && i < len) ? bufsizes[i] : 1024;
+ ret = gr_ep_init(dev, i, 1, bufsize);
+ if (ret)
+ return ret;
+ }
+
+ /* Must be disabled by default */
+ dev->remote_wakeup = 0;
+
+ /* Enable ep0out and ep0in */
+ epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
+ dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
+ gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
+ gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
+ gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
+ gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
+
+ return 0;
+}
+
+static int gr_remove(struct platform_device *ofdev)
+{
+ struct gr_udc *dev = dev_get_drvdata(&ofdev->dev);
+
+ if (dev->added)
+ usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
+ if (dev->driver)
+ return -EBUSY;
+
+ gr_dfs_delete(dev);
+ if (dev->desc_pool)
+ dma_pool_destroy(dev->desc_pool);
+ dev_set_drvdata(&ofdev->dev, NULL);
+
+ gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
+ gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
+
+ return 0;
+}
+static int gr_request_irq(struct gr_udc *dev, int irq)
+{
+ return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
+ IRQF_SHARED, driver_name, dev);
+}
+
+static int gr_probe(struct platform_device *ofdev)
+{
+ struct gr_udc *dev;
+ struct resource *res;
+ struct gr_regs __iomem *regs;
+ int retval;
+ u32 status;
+
+ dev = devm_kzalloc(&ofdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ dev->dev = &ofdev->dev;
+
+ res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ dev->irq = irq_of_parse_and_map(dev->dev->of_node, 0);
+ if (!dev->irq) {
+ dev_err(dev->dev, "No irq found\n");
+ return -ENODEV;
+ }
+
+ /* Some core configurations has separate irqs for IN and OUT events */
+ dev->irqi = irq_of_parse_and_map(dev->dev->of_node, 1);
+ if (dev->irqi) {
+ dev->irqo = irq_of_parse_and_map(dev->dev->of_node, 2);
+ if (!dev->irqo) {
+ dev_err(dev->dev, "Found irqi but not irqo\n");
+ return -ENODEV;
+ }
+ }
+
+ dev->gadget.name = driver_name;
+ dev->gadget.max_speed = USB_SPEED_HIGH;
+ dev->gadget.ops = &gr_ops;
+ dev->gadget.quirk_ep_out_aligned_size = true;
+
+ spin_lock_init(&dev->lock);
+ dev->regs = regs;
+
+ dev_set_drvdata(&ofdev->dev, dev);
+
+ /* Determine number of endpoints and data interface mode */
+ status = gr_read32(&dev->regs->status);
+ dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
+ dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
+
+ if (!(status & GR_STATUS_DM)) {
+ dev_err(dev->dev, "Slave mode cores are not supported\n");
+ return -ENODEV;
+ }
+
+ /* --- Effects of the following calls might need explicit cleanup --- */
+
+ /* Create DMA pool for descriptors */
+ dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
+ sizeof(struct gr_dma_desc), 4, 0);
+ if (!dev->desc_pool) {
+ dev_err(dev->dev, "Could not allocate DMA pool");
+ return -ENOMEM;
+ }
+
+ spin_lock(&dev->lock);
+
+ /* Inside lock so that no gadget can use this udc until probe is done */
+ retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
+ if (retval) {
+ dev_err(dev->dev, "Could not add gadget udc");
+ goto out;
+ }
+ dev->added = 1;
+
+ retval = gr_udc_init(dev);
+ if (retval)
+ goto out;
+
+ gr_dfs_create(dev);
+
+ /* Clear all interrupt enables that might be left on since last boot */
+ gr_disable_interrupts_and_pullup(dev);
+
+ retval = gr_request_irq(dev, dev->irq);
+ if (retval) {
+ dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
+ goto out;
+ }
+
+ if (dev->irqi) {
+ retval = gr_request_irq(dev, dev->irqi);
+ if (retval) {
+ dev_err(dev->dev, "Failed to request irqi %d\n",
+ dev->irqi);
+ goto out;
+ }
+ retval = gr_request_irq(dev, dev->irqo);
+ if (retval) {
+ dev_err(dev->dev, "Failed to request irqo %d\n",
+ dev->irqo);
+ goto out;
+ }
+ }
+
+ if (dev->irqi)
+ dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
+ dev->irq, dev->irqi, dev->irqo);
+ else
+ dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
+
+out:
+ spin_unlock(&dev->lock);
+
+ if (retval)
+ gr_remove(ofdev);
+
+ return retval;
+}
+
+static struct of_device_id gr_match[] = {
+ {.name = "GAISLER_USBDC"},
+ {.name = "01_021"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, gr_match);
+
+static struct platform_driver gr_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = gr_match,
+ },
+ .probe = gr_probe,
+ .remove = gr_remove,
+};
+module_platform_driver(gr_driver);
+
+MODULE_AUTHOR("Aeroflex Gaisler AB.");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/gr_udc.h b/drivers/usb/gadget/gr_udc.h
new file mode 100644
index 000000000000..8388897d9ec3
--- /dev/null
+++ b/drivers/usb/gadget/gr_udc.h
@@ -0,0 +1,220 @@
+/*
+ * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
+ *
+ * 2013 (c) Aeroflex Gaisler AB
+ *
+ * This driver supports GRUSBDC USB Device Controller cores available in the
+ * GRLIB VHDL IP core library.
+ *
+ * Full documentation of the GRUSBDC core can be found here:
+ * http://www.gaisler.com/products/grlib/grip.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Contributors:
+ * - Andreas Larsson <andreas@gaisler.com>
+ * - Marko Isomaki
+ */
+
+/* Control registers on the AMBA bus */
+
+#define GR_MAXEP 16 /* Max # endpoints for *each* direction */
+
+struct gr_epregs {
+ u32 epctrl;
+ union {
+ struct { /* Slave mode*/
+ u32 slvctrl;
+ u32 slvdata;
+ };
+ struct { /* DMA mode*/
+ u32 dmactrl;
+ u32 dmaaddr;
+ };
+ };
+ u32 epstat;
+};
+
+struct gr_regs {
+ struct gr_epregs epo[GR_MAXEP]; /* 0x000 - 0x0fc */
+ struct gr_epregs epi[GR_MAXEP]; /* 0x100 - 0x1fc */
+ u32 control; /* 0x200 */
+ u32 status; /* 0x204 */
+};
+
+#define GR_EPCTRL_BUFSZ_SCALER 8
+#define GR_EPCTRL_BUFSZ_MASK 0xffe00000
+#define GR_EPCTRL_BUFSZ_POS 21
+#define GR_EPCTRL_PI BIT(20)
+#define GR_EPCTRL_CB BIT(19)
+#define GR_EPCTRL_CS BIT(18)
+#define GR_EPCTRL_MAXPL_MASK 0x0003ff80
+#define GR_EPCTRL_MAXPL_POS 7
+#define GR_EPCTRL_NT_MASK 0x00000060
+#define GR_EPCTRL_NT_POS 5
+#define GR_EPCTRL_TT_MASK 0x00000018
+#define GR_EPCTRL_TT_POS 3
+#define GR_EPCTRL_EH BIT(2)
+#define GR_EPCTRL_ED BIT(1)
+#define GR_EPCTRL_EV BIT(0)
+
+#define GR_DMACTRL_AE BIT(10)
+#define GR_DMACTRL_AD BIT(3)
+#define GR_DMACTRL_AI BIT(2)
+#define GR_DMACTRL_IE BIT(1)
+#define GR_DMACTRL_DA BIT(0)
+
+#define GR_EPSTAT_PT BIT(29)
+#define GR_EPSTAT_PR BIT(29)
+#define GR_EPSTAT_B1CNT_MASK 0x1fff0000
+#define GR_EPSTAT_B1CNT_POS 16
+#define GR_EPSTAT_B0CNT_MASK 0x0000fff8
+#define GR_EPSTAT_B0CNT_POS 3
+#define GR_EPSTAT_B1 BIT(2)
+#define GR_EPSTAT_B0 BIT(1)
+#define GR_EPSTAT_BS BIT(0)
+
+#define GR_CONTROL_SI BIT(31)
+#define GR_CONTROL_UI BIT(30)
+#define GR_CONTROL_VI BIT(29)
+#define GR_CONTROL_SP BIT(28)
+#define GR_CONTROL_FI BIT(27)
+#define GR_CONTROL_EP BIT(14)
+#define GR_CONTROL_DH BIT(13)
+#define GR_CONTROL_RW BIT(12)
+#define GR_CONTROL_TS_MASK 0x00000e00
+#define GR_CONTROL_TS_POS 9
+#define GR_CONTROL_TM BIT(8)
+#define GR_CONTROL_UA_MASK 0x000000fe
+#define GR_CONTROL_UA_POS 1
+#define GR_CONTROL_SU BIT(0)
+
+#define GR_STATUS_NEPI_MASK 0xf0000000
+#define GR_STATUS_NEPI_POS 28
+#define GR_STATUS_NEPO_MASK 0x0f000000
+#define GR_STATUS_NEPO_POS 24
+#define GR_STATUS_DM BIT(23)
+#define GR_STATUS_SU BIT(17)
+#define GR_STATUS_UR BIT(16)
+#define GR_STATUS_VB BIT(15)
+#define GR_STATUS_SP BIT(14)
+#define GR_STATUS_AF_MASK 0x00003800
+#define GR_STATUS_AF_POS 11
+#define GR_STATUS_FN_MASK 0x000007ff
+#define GR_STATUS_FN_POS 0
+
+
+#define MAX_CTRL_PL_SIZE 64 /* As per USB standard for full and high speed */
+
+/*-------------------------------------------------------------------------*/
+
+/* Driver data structures and utilities */
+
+struct gr_dma_desc {
+ u32 ctrl;
+ u32 data;
+ u32 next;
+
+ /* These must be last because hw uses the previous three */
+ u32 paddr;
+ struct gr_dma_desc *next_desc;
+};
+
+#define GR_DESC_OUT_CTRL_SE BIT(17)
+#define GR_DESC_OUT_CTRL_IE BIT(15)
+#define GR_DESC_OUT_CTRL_NX BIT(14)
+#define GR_DESC_OUT_CTRL_EN BIT(13)
+#define GR_DESC_OUT_CTRL_LEN_MASK 0x00001fff
+
+#define GR_DESC_IN_CTRL_MO BIT(18)
+#define GR_DESC_IN_CTRL_PI BIT(17)
+#define GR_DESC_IN_CTRL_ML BIT(16)
+#define GR_DESC_IN_CTRL_IE BIT(15)
+#define GR_DESC_IN_CTRL_NX BIT(14)
+#define GR_DESC_IN_CTRL_EN BIT(13)
+#define GR_DESC_IN_CTRL_LEN_MASK 0x00001fff
+
+#define GR_DESC_DMAADDR_MASK 0xfffffffc
+
+struct gr_ep {
+ struct usb_ep ep;
+ struct gr_udc *dev;
+ u16 bytes_per_buffer;
+ unsigned int dma_start;
+ struct gr_epregs __iomem *regs;
+
+ unsigned num:8;
+ unsigned is_in:1;
+ unsigned stopped:1;
+ unsigned wedged:1;
+ unsigned callback:1;
+
+ /* analogous to a host-side qh */
+ struct list_head queue;
+
+ struct list_head ep_list;
+};
+
+struct gr_request {
+ struct usb_request req;
+ struct list_head queue;
+
+ /* Chain of dma descriptors */
+ struct gr_dma_desc *first_desc; /* First in the chain */
+ struct gr_dma_desc *curr_desc; /* Current descriptor */
+ struct gr_dma_desc *last_desc; /* Last in the chain */
+
+ u8 setup; /* Setup packet */
+};
+
+enum gr_ep0state {
+ GR_EP0_DISCONNECT = 0, /* No host */
+ GR_EP0_SETUP, /* Between STATUS ack and SETUP report */
+ GR_EP0_IDATA, /* IN data stage */
+ GR_EP0_ODATA, /* OUT data stage */
+ GR_EP0_ISTATUS, /* Status stage after IN data stage */
+ GR_EP0_OSTATUS, /* Status stage after OUT data stage */
+ GR_EP0_STALL, /* Data or status stages */
+ GR_EP0_SUSPEND, /* USB suspend */
+};
+
+struct gr_udc {
+ struct usb_gadget gadget;
+ struct gr_ep epi[GR_MAXEP];
+ struct gr_ep epo[GR_MAXEP];
+ struct usb_gadget_driver *driver;
+ struct dma_pool *desc_pool;
+ struct device *dev;
+
+ enum gr_ep0state ep0state;
+ struct gr_request *ep0reqo;
+ struct gr_request *ep0reqi;
+
+ struct gr_regs __iomem *regs;
+ int irq;
+ int irqi;
+ int irqo;
+
+ unsigned added:1;
+ unsigned irq_enabled:1;
+ unsigned remote_wakeup:1;
+
+ u8 test_mode;
+
+ enum usb_device_state suspended_from;
+
+ unsigned int nepi;
+ unsigned int nepo;
+
+ struct list_head ep_list;
+
+ spinlock_t lock; /* General lock, a.k.a. "dev->lock" in comments */
+
+ struct dentry *dfs_root;
+ struct dentry *dfs_state;
+};
+
+#define to_gr_udc(gadget) (container_of((gadget), struct gr_udc, gadget))
diff --git a/drivers/usb/gadget/lpc32xx_udc.c b/drivers/usb/gadget/lpc32xx_udc.c
index 6a2a65aa0057..049ebab0d360 100644
--- a/drivers/usb/gadget/lpc32xx_udc.c
+++ b/drivers/usb/gadget/lpc32xx_udc.c
@@ -1449,7 +1449,7 @@ static void udc_reinit(struct lpc32xx_udc *udc)
if (i != 0)
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
- ep->ep.maxpacket = ep->maxpacket;
+ usb_ep_set_maxpacket_limit(&ep->ep, ep->maxpacket);
INIT_LIST_HEAD(&ep->queue);
ep->req_pending = 0;
}
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index d5f050d30edf..8cae01d88597 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1647,9 +1647,9 @@ static int __init m66592_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&ep->queue);
ep->ep.name = m66592_ep_name[i];
ep->ep.ops = &m66592_ep_ops;
- ep->ep.maxpacket = 512;
+ usb_ep_set_maxpacket_limit(&ep->ep, 512);
}
- m66592->ep[0].ep.maxpacket = 64;
+ usb_ep_set_maxpacket_limit(&m66592->ep[0].ep, 64);
m66592->ep[0].pipenum = 0;
m66592->ep[0].fifoaddr = M66592_CFIFO;
m66592->ep[0].fifosel = M66592_CFIFOSEL;
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 4fdaa54a2a2a..940f6cde8e89 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -134,7 +134,7 @@ static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
*/
#define fsg_num_buffers CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS
-#endif /* CONFIG_USB_DEBUG */
+#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data);
diff --git a/drivers/usb/gadget/mv_u3d_core.c b/drivers/usb/gadget/mv_u3d_core.c
index 234711eabea1..d2ca59e7b477 100644
--- a/drivers/usb/gadget/mv_u3d_core.c
+++ b/drivers/usb/gadget/mv_u3d_core.c
@@ -15,7 +15,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/notifier.h>
@@ -1336,7 +1335,7 @@ static int mv_u3d_eps_init(struct mv_u3d *u3d)
ep->ep.name = ep->name;
ep->ep.ops = &mv_u3d_ep_ops;
ep->wedge = 0;
- ep->ep.maxpacket = MV_U3D_EP0_MAX_PKT_SIZE;
+ usb_ep_set_maxpacket_limit(&ep->ep, MV_U3D_EP0_MAX_PKT_SIZE);
ep->ep_num = 0;
ep->ep.desc = &mv_u3d_ep0_desc;
INIT_LIST_HEAD(&ep->queue);
@@ -1361,7 +1360,7 @@ static int mv_u3d_eps_init(struct mv_u3d *u3d)
ep->ep.name = ep->name;
ep->ep.ops = &mv_u3d_ep_ops;
- ep->ep.maxpacket = (unsigned short) ~0;
+ usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
ep->ep_num = i / 2;
INIT_LIST_HEAD(&ep->queue);
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
index 104cdbea635a..fcff3a571b45 100644
--- a/drivers/usb/gadget/mv_udc_core.c
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -20,7 +20,6 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
@@ -1261,7 +1260,7 @@ static int eps_init(struct mv_udc *udc)
ep->ep.ops = &mv_ep_ops;
ep->wedge = 0;
ep->stopped = 0;
- ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
+ usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE);
ep->ep_num = 0;
ep->ep.desc = &mv_ep0_desc;
INIT_LIST_HEAD(&ep->queue);
@@ -1284,7 +1283,7 @@ static int eps_init(struct mv_udc *udc)
ep->ep.ops = &mv_ep_ops;
ep->stopped = 0;
- ep->ep.maxpacket = (unsigned short) ~0;
+ usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
ep->ep_num = i / 2;
INIT_LIST_HEAD(&ep->queue);
diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c
index bf2bb39f35a2..ca15405583e2 100644
--- a/drivers/usb/gadget/net2272.c
+++ b/drivers/usb/gadget/net2272.c
@@ -266,7 +266,7 @@ static void net2272_ep_reset(struct net2272_ep *ep)
ep->desc = NULL;
INIT_LIST_HEAD(&ep->queue);
- ep->ep.maxpacket = ~0;
+ usb_ep_set_maxpacket_limit(&ep->ep, ~0);
ep->ep.ops = &net2272_ep_ops;
/* disable irqs, endpoint */
@@ -1409,7 +1409,7 @@ net2272_usb_reinit(struct net2272 *dev)
ep->fifo_size = 64;
net2272_ep_reset(ep);
}
- dev->ep[0].ep.maxpacket = 64;
+ usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
dev->gadget.ep0 = &dev->ep[0].ep;
dev->ep[0].stopped = 0;
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index fc852177c087..43e5e2f9888f 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -293,7 +293,7 @@ static void ep_reset (struct net2280_regs __iomem *regs, struct net2280_ep *ep)
ep->desc = NULL;
INIT_LIST_HEAD (&ep->queue);
- ep->ep.maxpacket = ~0;
+ usb_ep_set_maxpacket_limit(&ep->ep, ~0);
ep->ep.ops = &net2280_ep_ops;
/* disable the dma, irqs, endpoint... */
@@ -1805,9 +1805,9 @@ static void usb_reinit (struct net2280 *dev)
ep->regs = &dev->epregs [tmp];
ep_reset (dev->regs, ep);
}
- dev->ep [0].ep.maxpacket = 64;
- dev->ep [5].ep.maxpacket = 64;
- dev->ep [6].ep.maxpacket = 64;
+ usb_ep_set_maxpacket_limit(&dev->ep [0].ep, 64);
+ usb_ep_set_maxpacket_limit(&dev->ep [5].ep, 64);
+ usb_ep_set_maxpacket_limit(&dev->ep [6].ep, 64);
dev->gadget.ep0 = &dev->ep [0].ep;
dev->ep [0].stopped = 0;
diff --git a/drivers/usb/gadget/nokia.c b/drivers/usb/gadget/nokia.c
index 0a8099a488c4..3ab386167519 100644
--- a/drivers/usb/gadget/nokia.c
+++ b/drivers/usb/gadget/nokia.c
@@ -126,9 +126,9 @@ static int __init nokia_bind_config(struct usb_configuration *c)
struct usb_function *f_ecm;
struct usb_function *f_obex2 = NULL;
int status = 0;
- int obex1_stat = 0;
- int obex2_stat = 0;
- int phonet_stat = 0;
+ int obex1_stat = -1;
+ int obex2_stat = -1;
+ int phonet_stat = -1;
if (!IS_ERR(fi_phonet)) {
f_phonet = usb_get_function(fi_phonet);
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 83957cc225d9..2ae4f6d69f74 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -22,7 +22,6 @@
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
@@ -2586,7 +2585,8 @@ omap_ep_setup(char *name, u8 addr, u8 type,
ep->ep.name = ep->name;
ep->ep.ops = &omap_ep_ops;
- ep->ep.maxpacket = ep->maxpacket = maxp;
+ ep->maxpacket = maxp;
+ usb_ep_set_maxpacket_limit(&ep->ep, ep->maxpacket);
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
return buf;
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 32d5e923750b..eb8c3bedb57a 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -2896,12 +2896,12 @@ static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
UDC_EP_REG_SHIFT;
/* need to set ep->ep.maxpacket and set Default Configuration?*/
- ep->ep.maxpacket = UDC_BULK_MAX_PKT_SIZE;
+ usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
INIT_LIST_HEAD(&ep->queue);
}
- dev->ep[UDC_EP0IN_IDX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
- dev->ep[UDC_EP0OUT_IDX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
+ usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
+ usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
/* remove ep0 in and out from the list. They have own pointer */
list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
@@ -3210,7 +3210,7 @@ finished:
return retval;
}
-static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
+static const struct pci_device_id pch_udc_pcidev_id[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index 409a3c45a36a..9984437d2952 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -24,7 +24,6 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
@@ -1194,6 +1193,7 @@ static void udc_reinit(struct pxa25x_udc *dev)
ep->stopped = 0;
INIT_LIST_HEAD (&ep->queue);
ep->pio_irqs = 0;
+ usb_ep_set_maxpacket_limit(&ep->ep, ep->ep.maxpacket);
}
/* the rest was statically initialized, and is read-only */
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 3c97da7760da..cdf4d678be96 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -1737,9 +1737,12 @@ static void udc_init_data(struct pxa_udc *dev)
}
/* USB endpoints init */
- for (i = 1; i < NR_USB_ENDPOINTS; i++)
+ for (i = 1; i < NR_USB_ENDPOINTS; i++) {
list_add_tail(&dev->udc_usb_ep[i].usb_ep.ep_list,
&dev->gadget.ep_list);
+ usb_ep_set_maxpacket_limit(&dev->udc_usb_ep[i].usb_ep,
+ dev->udc_usb_ep[i].usb_ep.maxpacket);
+ }
}
/**
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 68be48d33404..aff0a6718bc6 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1833,7 +1833,7 @@ static int __exit r8a66597_remove(struct platform_device *pdev)
r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
if (r8a66597->pdata->on_chip) {
- clk_disable(r8a66597->clk);
+ clk_disable_unprepare(r8a66597->clk);
clk_put(r8a66597->clk);
}
@@ -1931,7 +1931,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
ret = PTR_ERR(r8a66597->clk);
goto clean_up;
}
- clk_enable(r8a66597->clk);
+ clk_prepare_enable(r8a66597->clk);
}
if (r8a66597->pdata->sudmac) {
@@ -1964,9 +1964,9 @@ static int __init r8a66597_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&ep->queue);
ep->ep.name = r8a66597_ep_name[i];
ep->ep.ops = &r8a66597_ep_ops;
- ep->ep.maxpacket = 512;
+ usb_ep_set_maxpacket_limit(&ep->ep, 512);
}
- r8a66597->ep[0].ep.maxpacket = 64;
+ usb_ep_set_maxpacket_limit(&r8a66597->ep[0].ep, 64);
r8a66597->ep[0].pipenum = 0;
r8a66597->ep[0].fifoaddr = CFIFO;
r8a66597->ep[0].fifosel = CFIFOSEL;
@@ -1996,7 +1996,7 @@ clean_up3:
free_irq(irq, r8a66597);
clean_up2:
if (r8a66597->pdata->on_chip) {
- clk_disable(r8a66597->clk);
+ clk_disable_unprepare(r8a66597->clk);
clk_put(r8a66597->clk);
}
clean_up:
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index a3ad732bc812..d822d822efb3 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -25,7 +25,6 @@
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
@@ -1142,7 +1141,7 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
-static int rndis_init(void)
+int rndis_init(void)
{
u8 i;
@@ -1174,9 +1173,8 @@ static int rndis_init(void)
return 0;
}
-module_init(rndis_init);
-static void rndis_exit(void)
+void rndis_exit(void)
{
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
u8 i;
@@ -1188,6 +1186,4 @@ static void rndis_exit(void)
}
#endif
}
-module_exit(rndis_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index e20bc109fdd7..1172eaeddd85 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -30,14 +30,13 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/phy.h>
#include <linux/platform_data/s3c-hsotg.h>
-#include <mach/map.h>
-
#include "s3c-hsotg.h"
static const char * const s3c_hsotg_supply_names[] = {
@@ -140,11 +139,13 @@ struct s3c_hsotg_ep {
* @dev: The parent device supplied to the probe function
* @driver: USB gadget driver
* @phy: The otg phy transceiver structure for phy control.
+ * @uphy: The otg phy transceiver structure for old USB phy control.
* @plat: The platform specific configuration data. This can be removed once
* all SoCs support usb transceiver.
* @regs: The memory area mapped for accessing registers.
* @irq: The IRQ number we are using
* @supplies: Definition of USB power supplies
+ * @phyif: PHY interface width
* @dedicated_fifos: Set if the hardware has dedicated IN-EP fifos.
* @num_of_eps: Number of available EPs (excluding EP0)
* @debug_root: root directrory for debugfs.
@@ -161,7 +162,8 @@ struct s3c_hsotg_ep {
struct s3c_hsotg {
struct device *dev;
struct usb_gadget_driver *driver;
- struct usb_phy *phy;
+ struct phy *phy;
+ struct usb_phy *uphy;
struct s3c_hsotg_plat *plat;
spinlock_t lock;
@@ -172,6 +174,7 @@ struct s3c_hsotg {
struct regulator_bulk_data supplies[ARRAY_SIZE(s3c_hsotg_supply_names)];
+ u32 phyif;
unsigned int dedicated_fifos:1;
unsigned char num_of_eps;
@@ -2086,13 +2089,13 @@ static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg)
case DSTS_EnumSpd_FS48:
hsotg->gadget.speed = USB_SPEED_FULL;
ep0_mps = EP0_MPS_LIMIT;
- ep_mps = 64;
+ ep_mps = 1023;
break;
case DSTS_EnumSpd_HS:
hsotg->gadget.speed = USB_SPEED_HIGH;
ep0_mps = EP0_MPS_LIMIT;
- ep_mps = 512;
+ ep_mps = 1024;
break;
case DSTS_EnumSpd_LS:
@@ -2156,6 +2159,9 @@ static void kill_all_requests(struct s3c_hsotg *hsotg,
s3c_hsotg_complete_request(hsotg, ep, req,
result);
}
+ if(hsotg->dedicated_fifos)
+ if ((readl(hsotg->regs + DTXFSTS(ep->index)) & 0xffff) * 4 < 3072)
+ s3c_hsotg_txfifo_flush(hsotg, ep->index);
}
#define call_gadget(_hs, _entry) \
@@ -2283,7 +2289,7 @@ static void s3c_hsotg_core_init(struct s3c_hsotg *hsotg)
*/
/* set the PLL on, remove the HNP/SRP and set the PHY */
- writel(GUSBCFG_PHYIf16 | GUSBCFG_TOutCal(7) |
+ writel(hsotg->phyif | GUSBCFG_TOutCal(7) |
(0x5 << 10), hsotg->regs + GUSBCFG);
s3c_hsotg_init_fifo(hsotg);
@@ -2908,8 +2914,11 @@ static void s3c_hsotg_phy_enable(struct s3c_hsotg *hsotg)
dev_dbg(hsotg->dev, "pdev 0x%p\n", pdev);
- if (hsotg->phy)
- usb_phy_init(hsotg->phy);
+ if (hsotg->phy) {
+ phy_init(hsotg->phy);
+ phy_power_on(hsotg->phy);
+ } else if (hsotg->uphy)
+ usb_phy_init(hsotg->uphy);
else if (hsotg->plat->phy_init)
hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
}
@@ -2925,8 +2934,11 @@ static void s3c_hsotg_phy_disable(struct s3c_hsotg *hsotg)
{
struct platform_device *pdev = to_platform_device(hsotg->dev);
- if (hsotg->phy)
- usb_phy_shutdown(hsotg->phy);
+ if (hsotg->phy) {
+ phy_power_off(hsotg->phy);
+ phy_exit(hsotg->phy);
+ } else if (hsotg->uphy)
+ usb_phy_shutdown(hsotg->uphy);
else if (hsotg->plat->phy_exit)
hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
}
@@ -3152,7 +3164,7 @@ static void s3c_hsotg_initep(struct s3c_hsotg *hsotg,
hs_ep->parent = hsotg;
hs_ep->ep.name = hs_ep->name;
- hs_ep->ep.maxpacket = epnum ? 1024 : EP0_MPS_LIMIT;
+ usb_ep_set_maxpacket_limit(&hs_ep->ep, epnum ? 1024 : EP0_MPS_LIMIT);
hs_ep->ep.ops = &s3c_hsotg_ep_ops;
/*
@@ -3533,7 +3545,8 @@ static void s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)
static int s3c_hsotg_probe(struct platform_device *pdev)
{
struct s3c_hsotg_plat *plat = dev_get_platdata(&pdev->dev);
- struct usb_phy *phy;
+ struct phy *phy;
+ struct usb_phy *uphy;
struct device *dev = &pdev->dev;
struct s3c_hsotg_ep *eps;
struct s3c_hsotg *hsotg;
@@ -3548,19 +3561,26 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
return -ENOMEM;
}
- phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ /*
+ * Attempt to find a generic PHY, then look for an old style
+ * USB PHY, finally fall back to pdata
+ */
+ phy = devm_phy_get(&pdev->dev, "usb2-phy");
if (IS_ERR(phy)) {
- /* Fallback for pdata */
- plat = dev_get_platdata(&pdev->dev);
- if (!plat) {
- dev_err(&pdev->dev, "no platform data or transceiver defined\n");
- return -EPROBE_DEFER;
- } else {
+ uphy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(uphy)) {
+ /* Fallback for pdata */
+ plat = dev_get_platdata(&pdev->dev);
+ if (!plat) {
+ dev_err(&pdev->dev,
+ "no platform data or transceiver defined\n");
+ return -EPROBE_DEFER;
+ }
hsotg->plat = plat;
- }
- } else {
+ } else
+ hsotg->uphy = uphy;
+ } else
hsotg->phy = phy;
- }
hsotg->dev = dev;
@@ -3627,6 +3647,19 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
goto err_supplies;
}
+ /* Set default UTMI width */
+ hsotg->phyif = GUSBCFG_PHYIf16;
+
+ /*
+ * If using the generic PHY framework, check if the PHY bus
+ * width is 8-bit and set the phyif appropriately.
+ */
+ if (hsotg->phy && (phy_get_bus_width(phy) == 8))
+ hsotg->phyif = GUSBCFG_PHYIf8;
+
+ if (hsotg->phy)
+ phy_init(hsotg->phy);
+
/* usb phy enable */
s3c_hsotg_phy_enable(hsotg);
@@ -3720,6 +3753,8 @@ static int s3c_hsotg_remove(struct platform_device *pdev)
}
s3c_hsotg_phy_disable(hsotg);
+ if (hsotg->phy)
+ phy_exit(hsotg->phy);
clk_disable_unprepare(hsotg->clk);
return 0;
@@ -3733,6 +3768,7 @@ static int s3c_hsotg_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id s3c_hsotg_of_ids[] = {
{ .compatible = "samsung,s3c6400-hsotg", },
+ { .compatible = "snps,dwc2", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, s3c_hsotg_of_ids);
diff --git a/drivers/usb/gadget/s3c-hsotg.h b/drivers/usb/gadget/s3c-hsotg.h
index d650b1295831..85f549ff8c1f 100644
--- a/drivers/usb/gadget/s3c-hsotg.h
+++ b/drivers/usb/gadget/s3c-hsotg.h
@@ -55,6 +55,7 @@
#define GUSBCFG_HNPCap (1 << 9)
#define GUSBCFG_SRPCap (1 << 8)
#define GUSBCFG_PHYIf16 (1 << 3)
+#define GUSBCFG_PHYIf8 (0 << 3)
#define GUSBCFG_TOutCal_MASK (0x7 << 0)
#define GUSBCFG_TOutCal_SHIFT (0)
#define GUSBCFG_TOutCal_LIMIT (0x7)
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index 1a1a41498db2..ea4bbfe72ec0 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -999,7 +999,7 @@ static void s3c_hsudc_initep(struct s3c_hsudc *hsudc,
hsep->dev = hsudc;
hsep->ep.name = hsep->name;
- hsep->ep.maxpacket = epnum ? 512 : 64;
+ usb_ep_set_maxpacket_limit(&hsep->ep, epnum ? 512 : 64);
hsep->ep.ops = &s3c_hsudc_ep_ops;
hsep->fifo = hsudc->regs + S3C_BR(epnum);
hsep->ep.desc = NULL;
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index c72d810e6b36..f04b2c3154de 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1629,6 +1629,7 @@ static void s3c2410_udc_reinit(struct s3c2410_udc *dev)
ep->ep.desc = NULL;
ep->halted = 0;
INIT_LIST_HEAD(&ep->queue);
+ usb_ep_set_maxpacket_limit(&ep->ep, &ep->ep.maxpacket);
}
}
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 2aae0d61bb19..b7d4f82872b7 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -753,7 +753,7 @@ static struct device_type gadget_type = {
* gadget driver using this framework. The link layer addresses are
* set up using module parameters.
*
- * Returns negative errno, or zero on success
+ * Returns an eth_dev pointer on success, or an ERR_PTR on failure.
*/
struct eth_dev *gether_setup_name(struct usb_gadget *g,
const char *dev_addr, const char *host_addr,
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index fb23d1fde8eb..0f0290acea7e 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -106,7 +106,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
* gadget driver using this framework. The link layer addresses are
* set up using module parameters.
*
- * Returns negative errno, or zero on success
+ * Returns a eth_dev pointer on success, or an ERR_PTR on failure
*/
static inline struct eth_dev *gether_setup(struct usb_gadget *g,
const char *dev_addr, const char *host_addr,
@@ -267,45 +267,4 @@ static inline bool can_support_ecm(struct usb_gadget *gadget)
return true;
}
-/* each configuration may bind one instance of an ethernet link */
-int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
- struct eth_dev *dev);
-int ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
- struct eth_dev *dev);
-
-#ifdef USB_ETH_RNDIS
-
-int rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
- u32 vendorID, const char *manufacturer, struct eth_dev *dev);
-
-#else
-
-static inline int
-rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
- u32 vendorID, const char *manufacturer, struct eth_dev *dev)
-{
- return 0;
-}
-
-#endif
-
-/**
- * rndis_bind_config - add RNDIS network link to a configuration
- * @c: the configuration to support the network link
- * @ethaddr: a buffer in which the ethernet address of the host side
- * side of the link was recorded
- * Context: single threaded during gadget setup
- *
- * Returns zero on success, else negative errno.
- *
- * Caller must have called @gether_setup(). Caller is also responsible
- * for calling @gether_cleanup() before module unload.
- */
-static inline int rndis_bind_config(struct usb_configuration *c,
- u8 ethaddr[ETH_ALEN], struct eth_dev *dev)
-{
- return rndis_bind_config_vendor(c, ethaddr, 0, NULL, dev);
-}
-
-
#endif /* __U_ETHER_H */
diff --git a/drivers/usb/gadget/u_f.c b/drivers/usb/gadget/u_f.c
new file mode 100644
index 000000000000..63b6642c162b
--- /dev/null
+++ b/drivers/usb/gadget/u_f.c
@@ -0,0 +1,32 @@
+/*
+ * u_f.c -- USB function utilities for Gadget stack
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/usb/gadget.h>
+#include "u_f.h"
+
+struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+ if (req) {
+ req->length = len ?: default_len;
+ req->buf = kmalloc(req->length, GFP_ATOMIC);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ req = NULL;
+ }
+ }
+ return req;
+}
+EXPORT_SYMBOL(alloc_ep_req);
diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
new file mode 100644
index 000000000000..71034c061fca
--- /dev/null
+++ b/drivers/usb/gadget/u_f.h
@@ -0,0 +1,26 @@
+/*
+ * u_f.h
+ *
+ * Utility definitions for USB functions
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __U_F_H__
+#define __U_F_H__
+
+struct usb_ep;
+struct usb_request;
+
+struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len);
+
+#endif /* __U_F_H__ */
+
+
diff --git a/drivers/usb/gadget/u_fs.h b/drivers/usb/gadget/u_fs.h
new file mode 100644
index 000000000000..bc2d3718219b
--- /dev/null
+++ b/drivers/usb/gadget/u_fs.h
@@ -0,0 +1,267 @@
+/*
+ * u_fs.h
+ *
+ * Utility definitions for the FunctionFS
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef U_FFS_H
+#define U_FFS_H
+
+#include <linux/usb/composite.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#ifdef VERBOSE_DEBUG
+#ifndef pr_vdebug
+# define pr_vdebug pr_debug
+#endif /* pr_vdebug */
+# define ffs_dump_mem(prefix, ptr, len) \
+ print_hex_dump_bytes(pr_fmt(prefix ": "), DUMP_PREFIX_NONE, ptr, len)
+#else
+#ifndef pr_vdebug
+# define pr_vdebug(...) do { } while (0)
+#endif /* pr_vdebug */
+# define ffs_dump_mem(prefix, ptr, len) do { } while (0)
+#endif /* VERBOSE_DEBUG */
+
+#define ENTER() pr_vdebug("%s()\n", __func__)
+
+struct f_fs_opts;
+
+struct ffs_dev {
+ const char *name;
+ bool name_allocated;
+ bool mounted;
+ bool desc_ready;
+ bool single;
+ struct ffs_data *ffs_data;
+ struct f_fs_opts *opts;
+ struct list_head entry;
+
+ int (*ffs_ready_callback)(struct ffs_data *ffs);
+ void (*ffs_closed_callback)(struct ffs_data *ffs);
+ void *(*ffs_acquire_dev_callback)(struct ffs_dev *dev);
+ void (*ffs_release_dev_callback)(struct ffs_dev *dev);
+};
+
+extern struct mutex ffs_lock;
+
+static inline void ffs_dev_lock(void)
+{
+ mutex_lock(&ffs_lock);
+}
+
+static inline void ffs_dev_unlock(void)
+{
+ mutex_unlock(&ffs_lock);
+}
+
+struct ffs_dev *ffs_alloc_dev(void);
+int ffs_name_dev(struct ffs_dev *dev, const char *name);
+int ffs_single_dev(struct ffs_dev *dev);
+void ffs_free_dev(struct ffs_dev *dev);
+
+struct ffs_epfile;
+struct ffs_function;
+
+enum ffs_state {
+ /*
+ * Waiting for descriptors and strings.
+ *
+ * In this state no open(2), read(2) or write(2) on epfiles
+ * may succeed (which should not be the problem as there
+ * should be no such files opened in the first place).
+ */
+ FFS_READ_DESCRIPTORS,
+ FFS_READ_STRINGS,
+
+ /*
+ * We've got descriptors and strings. We are or have called
+ * functionfs_ready_callback(). functionfs_bind() may have
+ * been called but we don't know.
+ *
+ * This is the only state in which operations on epfiles may
+ * succeed.
+ */
+ FFS_ACTIVE,
+
+ /*
+ * All endpoints have been closed. This state is also set if
+ * we encounter an unrecoverable error. The only
+ * unrecoverable error is situation when after reading strings
+ * from user space we fail to initialise epfiles or
+ * functionfs_ready_callback() returns with error (<0).
+ *
+ * In this state no open(2), read(2) or write(2) (both on ep0
+ * as well as epfile) may succeed (at this point epfiles are
+ * unlinked and all closed so this is not a problem; ep0 is
+ * also closed but ep0 file exists and so open(2) on ep0 must
+ * fail).
+ */
+ FFS_CLOSING
+};
+
+enum ffs_setup_state {
+ /* There is no setup request pending. */
+ FFS_NO_SETUP,
+ /*
+ * User has read events and there was a setup request event
+ * there. The next read/write on ep0 will handle the
+ * request.
+ */
+ FFS_SETUP_PENDING,
+ /*
+ * There was event pending but before user space handled it
+ * some other event was introduced which canceled existing
+ * setup. If this state is set read/write on ep0 return
+ * -EIDRM. This state is only set when adding event.
+ */
+ FFS_SETUP_CANCELED
+};
+
+struct ffs_data {
+ struct usb_gadget *gadget;
+
+ /*
+ * Protect access read/write operations, only one read/write
+ * at a time. As a consequence protects ep0req and company.
+ * While setup request is being processed (queued) this is
+ * held.
+ */
+ struct mutex mutex;
+
+ /*
+ * Protect access to endpoint related structures (basically
+ * usb_ep_queue(), usb_ep_dequeue(), etc. calls) except for
+ * endpoint zero.
+ */
+ spinlock_t eps_lock;
+
+ /*
+ * XXX REVISIT do we need our own request? Since we are not
+ * handling setup requests immediately user space may be so
+ * slow that another setup will be sent to the gadget but this
+ * time not to us but another function and then there could be
+ * a race. Is that the case? Or maybe we can use cdev->req
+ * after all, maybe we just need some spinlock for that?
+ */
+ struct usb_request *ep0req; /* P: mutex */
+ struct completion ep0req_completion; /* P: mutex */
+ int ep0req_status; /* P: mutex */
+
+ /* reference counter */
+ atomic_t ref;
+ /* how many files are opened (EP0 and others) */
+ atomic_t opened;
+
+ /* EP0 state */
+ enum ffs_state state;
+
+ /*
+ * Possible transitions:
+ * + FFS_NO_SETUP -> FFS_SETUP_PENDING -- P: ev.waitq.lock
+ * happens only in ep0 read which is P: mutex
+ * + FFS_SETUP_PENDING -> FFS_NO_SETUP -- P: ev.waitq.lock
+ * happens only in ep0 i/o which is P: mutex
+ * + FFS_SETUP_PENDING -> FFS_SETUP_CANCELED -- P: ev.waitq.lock
+ * + FFS_SETUP_CANCELED -> FFS_NO_SETUP -- cmpxchg
+ */
+ enum ffs_setup_state setup_state;
+
+#define FFS_SETUP_STATE(ffs) \
+ ((enum ffs_setup_state)cmpxchg(&(ffs)->setup_state, \
+ FFS_SETUP_CANCELED, FFS_NO_SETUP))
+
+ /* Events & such. */
+ struct {
+ u8 types[4];
+ unsigned short count;
+ /* XXX REVISIT need to update it in some places, or do we? */
+ unsigned short can_stall;
+ struct usb_ctrlrequest setup;
+
+ wait_queue_head_t waitq;
+ } ev; /* the whole structure, P: ev.waitq.lock */
+
+ /* Flags */
+ unsigned long flags;
+#define FFS_FL_CALL_CLOSED_CALLBACK 0
+#define FFS_FL_BOUND 1
+
+ /* Active function */
+ struct ffs_function *func;
+
+ /*
+ * Device name, write once when file system is mounted.
+ * Intended for user to read if she wants.
+ */
+ const char *dev_name;
+ /* Private data for our user (ie. gadget). Managed by user. */
+ void *private_data;
+
+ /* filled by __ffs_data_got_descs() */
+ /*
+ * Real descriptors are 16 bytes after raw_descs (so you need
+ * to skip 16 bytes (ie. ffs->raw_descs + 16) to get to the
+ * first full speed descriptor). raw_descs_length and
+ * raw_fs_descs_length do not have those 16 bytes added.
+ */
+ const void *raw_descs;
+ unsigned raw_descs_length;
+ unsigned raw_fs_descs_length;
+ unsigned fs_descs_count;
+ unsigned hs_descs_count;
+
+ unsigned short strings_count;
+ unsigned short interfaces_count;
+ unsigned short eps_count;
+ unsigned short _pad1;
+
+ /* filled by __ffs_data_got_strings() */
+ /* ids in stringtabs are set in functionfs_bind() */
+ const void *raw_strings;
+ struct usb_gadget_strings **stringtabs;
+
+ /*
+ * File system's super block, write once when file system is
+ * mounted.
+ */
+ struct super_block *sb;
+
+ /* File permissions, written once when fs is mounted */
+ struct ffs_file_perms {
+ umode_t mode;
+ kuid_t uid;
+ kgid_t gid;
+ } file_perms;
+
+ /*
+ * The endpoint files, filled by ffs_epfiles_create(),
+ * destroyed by ffs_epfiles_destroy().
+ */
+ struct ffs_epfile *epfiles;
+};
+
+
+struct f_fs_opts {
+ struct usb_function_instance func_inst;
+ struct ffs_dev *dev;
+ unsigned refcnt;
+ bool no_configfs;
+};
+
+static inline struct f_fs_opts *to_f_fs_opts(struct usb_function_instance *fi)
+{
+ return container_of(fi, struct f_fs_opts, func_inst);
+}
+
+#endif /* U_FFS_H */
diff --git a/drivers/usb/gadget/u_rndis.h b/drivers/usb/gadget/u_rndis.h
index c62ba82e9600..7291b15c9dce 100644
--- a/drivers/usb/gadget/u_rndis.h
+++ b/drivers/usb/gadget/u_rndis.h
@@ -36,6 +36,8 @@ struct f_rndis_opts {
int refcnt;
};
+int rndis_init(void);
+void rndis_exit(void);
void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net);
#endif /* U_RNDIS_H */
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c
index 1f49fce0f0b7..73a4dfba0edb 100644
--- a/drivers/usb/gadget/usbstring.c
+++ b/drivers/usb/gadget/usbstring.c
@@ -13,7 +13,6 @@
#include <linux/list.h>
#include <linux/string.h>
#include <linux/device.h>
-#include <linux/init.h>
#include <linux/nls.h>
#include <linux/usb/ch9.h>
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index f49b0b61ecc8..9f170c53e3d9 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -64,10 +64,10 @@ static bool loopdefault = 0;
module_param(loopdefault, bool, S_IRUGO|S_IWUSR);
static struct usb_zero_options gzero_options = {
- .isoc_interval = 4,
- .isoc_maxpacket = 1024,
- .bulk_buflen = 4096,
- .qlen = 32,
+ .isoc_interval = GZERO_ISOC_INTERVAL,
+ .isoc_maxpacket = GZERO_ISOC_MAXPACKET,
+ .bulk_buflen = GZERO_BULK_BUFLEN,
+ .qlen = GZERO_QLEN,
};
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 01e879ef3654..7530468c9a4f 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -2,8 +2,6 @@
# Makefile for USB Host Controller Drivers
#
-ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
-
# tell define_trace.h where to find the xhci trace header
CFLAGS_xhci-trace.o := -I$(src)
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 284f8417eae5..ec9f7b75d497 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -153,6 +153,7 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval)
goto fail_add_hcd;
+ device_wakeup_enable(hcd->self.controller);
return retval;
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 4a9c2edbcb2b..524cbf26d992 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -18,7 +18,7 @@
/* this file is part of ehci-hcd.c */
-#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+#ifdef CONFIG_DYNAMIC_DEBUG
/* check the values in the HCSPARAMS register
* (host controller _Structural_ parameters)
@@ -62,7 +62,7 @@ static inline void dbg_hcs_params (struct ehci_hcd *ehci, char *label) {}
#endif
-#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+#ifdef CONFIG_DYNAMIC_DEBUG
/* check the values in the HCCPARAMS register
* (host controller _Capability_ parameters)
@@ -101,7 +101,7 @@ static inline void dbg_hcc_params (struct ehci_hcd *ehci, char *label) {}
#endif
-#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+#ifdef CONFIG_DYNAMIC_DEBUG
static void __maybe_unused
dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
@@ -301,7 +301,7 @@ static inline int __maybe_unused
dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
{ return 0; }
-#endif /* DEBUG || CONFIG_DYNAMIC_DEBUG */
+#endif /* CONFIG_DYNAMIC_DEBUG */
/* functions have the "wrong" filename when they're output... */
#define dbg_status(ehci, label, status) { \
@@ -818,7 +818,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
#ifdef CONFIG_PCI
/* EHCI 0.96 and later may have "extended capabilities" */
- if (hcd->self.controller->bus == &pci_bus_type) {
+ if (dev_is_pci(hcd->self.controller)) {
struct pci_dev *pdev;
u32 offset, cap, cap2;
unsigned count = 256/4;
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index e97c198e052f..d1d8c47777c5 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -166,6 +166,7 @@ skip_phy:
dev_err(&pdev->dev, "Failed to add USB HCD\n");
goto fail_add_hcd;
}
+ device_wakeup_enable(hcd->self.controller);
platform_set_drvdata(pdev, hcd);
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index a06d5012201f..6f2c8d3899d2 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -102,19 +102,11 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
- driver->description)) {
- dev_dbg(&pdev->dev, "controller already in use\n");
- retval = -EBUSY;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
goto err2;
}
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
-
- if (hcd->regs == NULL) {
- dev_dbg(&pdev->dev, "error mapping memory\n");
- retval = -EFAULT;
- goto err3;
- }
pdata->regs = hcd->regs;
@@ -126,7 +118,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
*/
if (pdata->init && pdata->init(pdev)) {
retval = -ENODEV;
- goto err4;
+ goto err2;
}
/* Enable USB controller, 83xx or 8536 */
@@ -137,7 +129,8 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval != 0)
- goto err4;
+ goto err2;
+ device_wakeup_enable(hcd->self.controller);
#ifdef CONFIG_USB_OTG
if (pdata->operating_mode == FSL_USB2_DR_OTG) {
@@ -152,21 +145,17 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
&ehci_to_hcd(ehci)->self);
if (retval) {
usb_put_phy(hcd->phy);
- goto err4;
+ goto err2;
}
} else {
dev_err(&pdev->dev, "can't find phy\n");
retval = -ENODEV;
- goto err4;
+ goto err2;
}
}
#endif
return retval;
- err4:
- iounmap(hcd->regs);
- err3:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err2:
usb_put_hcd(hcd);
err1:
@@ -205,8 +194,6 @@ static void usb_hcd_fsl_remove(struct usb_hcd *hcd,
*/
if (pdata->exit)
pdata->exit(pdev);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
}
@@ -267,7 +254,7 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0) ||
in_be32(non_ehci + FSL_SOC_USB_PRICTRL))) {
- printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n");
+ dev_warn(hcd->self.controller, "USB PHY clock invalid\n");
return -EINVAL;
}
}
@@ -413,7 +400,7 @@ static int ehci_fsl_mpc512x_drv_suspend(struct device *dev)
struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
u32 tmp;
-#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+#ifdef CONFIG_DYNAMIC_DEBUG
u32 mode = ehci_readl(ehci, hcd->regs + FSL_SOC_USB_USBMODE);
mode &= USBMODE_CM_MASK;
tmp = ehci_readl(ehci, hcd->regs + 0x140); /* usbcmd */
diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c
index b52a66ce92e8..495b6fbcbcd9 100644
--- a/drivers/usb/host/ehci-grlib.c
+++ b/drivers/usb/host/ehci-grlib.c
@@ -113,7 +113,8 @@ static int ehci_hcd_grlib_probe(struct platform_device *op)
irq = irq_of_parse_and_map(dn, 0);
if (irq == NO_IRQ) {
- printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
+ dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
+ __FILE__);
rv = -EBUSY;
goto err_irq;
}
@@ -140,6 +141,7 @@ static int ehci_hcd_grlib_probe(struct platform_device *op)
if (rv)
goto err_ioremap;
+ device_wakeup_enable(hcd->self.controller);
return 0;
err_ioremap:
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index e8ba4c44223a..471142725ffe 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -71,7 +71,6 @@
static const char hcd_name [] = "ehci_hcd";
-#undef VERBOSE_DEBUG
#undef EHCI_URB_TRACE
/* magic numbers that can affect system performance */
@@ -714,13 +713,6 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
cmd = ehci_readl(ehci, &ehci->regs->command);
bh = 0;
-#ifdef VERBOSE_DEBUG
- /* unrequested/ignored: Frame List Rollover */
- dbg_status (ehci, "irq", status);
-#endif
-
- /* INT, ERR, and IAA interrupt rates can be throttled */
-
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
if (likely ((status & STS_ERR) == 0))
@@ -1320,7 +1312,7 @@ static int __init ehci_hcd_init(void)
sizeof(struct ehci_qh), sizeof(struct ehci_qtd),
sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
-#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+#ifdef CONFIG_DYNAMIC_DEBUG
ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
if (!ehci_debug_root) {
retval = -ENOENT;
@@ -1369,7 +1361,7 @@ clean2:
platform_driver_unregister(&PLATFORM_DRIVER);
clean0:
#endif
-#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+#ifdef CONFIG_DYNAMIC_DEBUG
debugfs_remove(ehci_debug_root);
ehci_debug_root = NULL;
err_debug:
@@ -1393,7 +1385,7 @@ static void __exit ehci_hcd_cleanup(void)
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
#endif
-#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+#ifdef CONFIG_DYNAMIC_DEBUG
debugfs_remove(ehci_debug_root);
#endif
clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 835fc0844a66..47b858fc50b2 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -1114,10 +1114,8 @@ static int ehci_hub_control (
if (test_bit(wIndex, &ehci->port_c_suspend))
status |= USB_PORT_STAT_C_SUSPEND << 16;
-#ifndef VERBOSE_DEBUG
- if (status & ~0xffff) /* only if wPortChange is interesting */
-#endif
- dbg_port (ehci, "GetStatus", wIndex + 1, temp);
+ if (status & ~0xffff) /* only if wPortChange is interesting */
+ dbg_port(ehci, "GetStatus", wIndex + 1, temp);
put_unaligned_le32(status, buf);
break;
case SetHubFeature:
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index 417c10da9450..bd61612a7251 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -178,7 +178,7 @@ static int mv_ehci_probe(struct platform_device *pdev)
ehci_mv->phy_regs = devm_ioremap(&pdev->dev, r->start,
resource_size(r));
- if (ehci_mv->phy_regs == 0) {
+ if (!ehci_mv->phy_regs) {
dev_err(&pdev->dev, "failed to map phy I/O memory\n");
retval = -EFAULT;
goto err_put_hcd;
@@ -257,6 +257,7 @@ static int mv_ehci_probe(struct platform_device *pdev)
"failed to add hcd with err %d\n", retval);
goto err_set_vbus;
}
+ device_wakeup_enable(hcd->self.controller);
}
if (pdata->private_init)
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index 0528dc4526c8..dbe5e4eea08d 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -155,6 +155,7 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
if (ret)
goto err_add;
+ device_wakeup_enable(hcd->self.controller);
return 0;
err_add:
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
index 4c528b2c033a..9051439039a7 100644
--- a/drivers/usb/host/ehci-octeon.c
+++ b/drivers/usb/host/ehci-octeon.c
@@ -128,20 +128,12 @@ static int ehci_octeon_drv_probe(struct platform_device *pdev)
hcd->rsrc_start = res_mem->start;
hcd->rsrc_len = resource_size(res_mem);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
- OCTEON_EHCI_HCD_NAME)) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
- ret = -EBUSY;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
goto err1;
}
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto err2;
- }
-
ehci_octeon_start();
ehci = hcd_to_ehci(hcd);
@@ -156,18 +148,16 @@ static int ehci_octeon_drv_probe(struct platform_device *pdev)
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret) {
dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
- goto err3;
+ goto err2;
}
+ device_wakeup_enable(hcd->self.controller);
platform_set_drvdata(pdev, hcd);
return 0;
-err3:
+err2:
ehci_octeon_stop();
- iounmap(hcd->regs);
-err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err1:
usb_put_hcd(hcd);
return ret;
@@ -180,8 +170,6 @@ static int ehci_octeon_drv_remove(struct platform_device *pdev)
usb_remove_hcd(hcd);
ehci_octeon_stop();
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
return 0;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 6fa82d6b7661..a24720beb39d 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -215,6 +215,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
dev_err(dev, "failed to add hcd with err %d\n", ret);
goto err_pm_runtime;
}
+ device_wakeup_enable(hcd->self.controller);
/*
* Bring PHYs out of reset for non PHY modes.
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 2ba76730e650..30d35e5e503a 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -184,33 +184,23 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
if (err)
goto err1;
- if (!request_mem_region(res->start, resource_size(res),
- ehci_orion_hc_driver.description)) {
- dev_dbg(&pdev->dev, "controller already in use\n");
- err = -EBUSY;
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs)) {
+ err = PTR_ERR(regs);
goto err1;
}
- regs = ioremap(res->start, resource_size(res));
- if (regs == NULL) {
- dev_dbg(&pdev->dev, "error mapping memory\n");
- err = -EFAULT;
- goto err2;
- }
-
/* Not all platforms can gate the clock, so it is not
an error if the clock does not exists. */
- clk = clk_get(&pdev->dev, NULL);
- if (!IS_ERR(clk)) {
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk))
clk_prepare_enable(clk);
- clk_put(clk);
- }
hcd = usb_create_hcd(&ehci_orion_hc_driver,
&pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
err = -ENOMEM;
- goto err3;
+ goto err2;
}
hcd->rsrc_start = res->start;
@@ -245,25 +235,21 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
case EHCI_PHY_DD:
case EHCI_PHY_KW:
default:
- printk(KERN_WARNING "Orion ehci -USB phy version isn't supported.\n");
+ dev_warn(&pdev->dev, "USB phy version isn't supported.\n");
}
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
- goto err4;
+ goto err3;
+ device_wakeup_enable(hcd->self.controller);
return 0;
-err4:
- usb_put_hcd(hcd);
err3:
- if (!IS_ERR(clk)) {
- clk_disable_unprepare(clk);
- clk_put(clk);
- }
- iounmap(regs);
+ usb_put_hcd(hcd);
err2:
- release_mem_region(res->start, resource_size(res));
+ if (!IS_ERR(clk))
+ clk_disable_unprepare(clk);
err1:
dev_err(&pdev->dev, "init %s fail, %d\n",
dev_name(&pdev->dev), err);
@@ -277,15 +263,11 @@ static int ehci_orion_drv_remove(struct platform_device *pdev)
struct clk *clk;
usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
- clk = clk_get(&pdev->dev, NULL);
- if (!IS_ERR(clk)) {
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk))
clk_disable_unprepare(clk);
- clk_put(clk);
- }
return 0;
}
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 7f30b7168d5a..01536cfd361d 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -132,6 +132,7 @@ static int ehci_platform_probe(struct platform_device *dev)
if (err)
goto err_put_hcd;
+ device_wakeup_enable(hcd->self.controller);
platform_set_drvdata(dev, hcd);
return err;
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
index 893b707f0000..af3974a5e7c2 100644
--- a/drivers/usb/host/ehci-pmcmsp.c
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -210,8 +210,10 @@ int usb_hcd_msp_probe(const struct hc_driver *driver,
retval = usb_add_hcd(hcd, res->start, IRQF_SHARED);
- if (retval == 0)
+ if (retval == 0) {
+ device_wakeup_enable(hcd->self.controller);
return 0;
+ }
usb_remove_hcd(hcd);
err3:
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 875d2fcc9e0e..547924796d29 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -119,7 +119,8 @@ static int ehci_hcd_ppc_of_probe(struct platform_device *op)
irq = irq_of_parse_and_map(dn, 0);
if (irq == NO_IRQ) {
- printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
+ dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
+ __FILE__);
rv = -EBUSY;
goto err_irq;
}
@@ -169,6 +170,7 @@ static int ehci_hcd_ppc_of_probe(struct platform_device *op)
if (rv)
goto err_ioremap;
+ device_wakeup_enable(hcd->self.controller);
return 0;
err_ioremap:
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 8188542ba17e..7934ff9b35e1 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -189,6 +189,7 @@ static int ps3_ehci_probe(struct ps3_system_bus_device *dev)
goto fail_add_hcd;
}
+ device_wakeup_enable(hcd->self.controller);
return result;
fail_add_hcd:
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index db05bd8ee9d5..54f5332f814d 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -168,13 +168,13 @@ static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
* Note: this routine is never called for Isochronous transfers.
*/
if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
-#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+#ifdef CONFIG_DYNAMIC_DEBUG
struct usb_device *tt = urb->dev->tt->hub;
dev_dbg(&tt->dev,
"clear tt buffer port %d, a%d ep%d t%08x\n",
urb->dev->ttport, urb->dev->devnum,
usb_pipeendpoint(urb->pipe), token);
-#endif /* DEBUG || CONFIG_DYNAMIC_DEBUG */
+#endif /* CONFIG_DYNAMIC_DEBUG */
if (!ehci_is_TDI(ehci)
|| urb->dev->tt->hub !=
ehci_to_hcd(ehci)->self.root_hub) {
diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c
index 8a734498079b..cf1267673868 100644
--- a/drivers/usb/host/ehci-sead3.c
+++ b/drivers/usb/host/ehci-sead3.c
@@ -126,6 +126,7 @@ static int ehci_hcd_sead3_drv_probe(struct platform_device *pdev)
IRQF_SHARED);
if (ret == 0) {
platform_set_drvdata(pdev, hcd);
+ device_wakeup_enable(hcd->self.controller);
return ret;
}
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index dc899eb2b861..9b9b9f5b016e 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -151,6 +151,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to add hcd");
goto fail_add_hcd;
}
+ device_wakeup_enable(hcd->self.controller);
priv->hcd = hcd;
platform_set_drvdata(pdev, priv);
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index ee6f9ffaa0e7..8bd915b2ae8c 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -130,6 +130,7 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
if (retval)
goto err_stop_ehci;
+ device_wakeup_enable(hcd->self.controller);
return retval;
err_stop_ehci:
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index b9fd0396011e..af28b748e87a 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -17,7 +17,6 @@
*/
#include <linux/clk.h>
-#include <linux/clk/tegra.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/gpio.h>
@@ -29,6 +28,7 @@
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/usb/ehci_def.h>
#include <linux/usb/tegra_usb_phy.h>
@@ -62,6 +62,7 @@ static int (*orig_hub_control)(struct usb_hcd *hcd,
struct tegra_ehci_hcd {
struct tegra_usb_phy *phy;
struct clk *clk;
+ struct reset_control *rst;
int port_resuming;
bool needs_double_reset;
enum tegra_usb_phy_port_speed port_speed;
@@ -385,13 +386,20 @@ static int tegra_ehci_probe(struct platform_device *pdev)
goto cleanup_hcd_create;
}
+ tegra->rst = devm_reset_control_get(&pdev->dev, "usb");
+ if (IS_ERR(tegra->rst)) {
+ dev_err(&pdev->dev, "Can't get ehci reset\n");
+ err = PTR_ERR(tegra->rst);
+ goto cleanup_hcd_create;
+ }
+
err = clk_prepare_enable(tegra->clk);
if (err)
goto cleanup_hcd_create;
- tegra_periph_reset_assert(tegra->clk);
+ reset_control_assert(tegra->rst);
udelay(1);
- tegra_periph_reset_deassert(tegra->clk);
+ reset_control_deassert(tegra->rst);
u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
if (IS_ERR(u_phy)) {
@@ -455,6 +463,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to add USB HCD\n");
goto cleanup_otg_set_host;
}
+ device_wakeup_enable(hcd->self.controller);
return err;
diff --git a/drivers/usb/host/ehci-tilegx.c b/drivers/usb/host/ehci-tilegx.c
index 67026ffbf9a8..f3713d32c9a1 100644
--- a/drivers/usb/host/ehci-tilegx.c
+++ b/drivers/usb/host/ehci-tilegx.c
@@ -170,6 +170,7 @@ static int ehci_hcd_tilegx_drv_probe(struct platform_device *pdev)
ret = usb_add_hcd(hcd, pdata->irq, IRQF_SHARED);
if (ret == 0) {
platform_set_drvdata(pdev, hcd);
+ device_wakeup_enable(hcd->self.controller);
return ret;
}
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index cdad8438c02b..a9303aff125e 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -58,17 +58,12 @@ static int usb_w90x900_probe(const struct hc_driver *driver,
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- retval = -EBUSY;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
goto err2;
}
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (hcd->regs == NULL) {
- retval = -EFAULT;
- goto err3;
- }
-
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs;
ehci->regs = hcd->regs +
@@ -88,17 +83,14 @@ static int usb_w90x900_probe(const struct hc_driver *driver,
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- goto err4;
+ goto err2;
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval != 0)
- goto err4;
+ goto err2;
+ device_wakeup_enable(hcd->self.controller);
return retval;
-err4:
- iounmap(hcd->regs);
-err3:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err2:
usb_put_hcd(hcd);
err1:
@@ -109,8 +101,6 @@ static void usb_w90x900_remove(struct usb_hcd *hcd,
struct platform_device *pdev)
{
usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
}
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 95979f9f4381..fe57710753e8 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -155,7 +155,8 @@ static int ehci_hcd_xilinx_of_probe(struct platform_device *op)
irq = irq_of_parse_and_map(dn, 0);
if (!irq) {
- printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
+ dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
+ __FILE__);
rv = -EBUSY;
goto err_irq;
}
@@ -191,8 +192,10 @@ static int ehci_hcd_xilinx_of_probe(struct platform_device *op)
ehci->caps = hcd->regs + 0x100;
rv = usb_add_hcd(hcd, irq, 0);
- if (rv == 0)
+ if (rv == 0) {
+ device_wakeup_enable(hcd->self.controller);
return 0;
+ }
err_irq:
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index e8f41c5e771b..9dfc6c1394d6 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -38,7 +38,7 @@ typedef __u16 __bitwise __hc16;
#endif
/* statistics can be kept for tuning/monitoring */
-#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+#ifdef CONFIG_DYNAMIC_DEBUG
#define EHCI_STATS
#endif
@@ -225,6 +225,7 @@ struct ehci_hcd { /* one per controller */
unsigned has_synopsys_hc_bug:1; /* Synopsys HC */
unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
unsigned need_oc_pp_cycle:1; /* MPC834X port power */
+ unsigned imx28_write_fix:1; /* For Freescale i.MX28 */
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
@@ -248,7 +249,7 @@ struct ehci_hcd { /* one per controller */
#endif
/* debug files */
-#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+#ifdef CONFIG_DYNAMIC_DEBUG
struct dentry *debug_dir;
#endif
@@ -728,6 +729,18 @@ static inline unsigned int ehci_readl(const struct ehci_hcd *ehci,
#endif
}
+#ifdef CONFIG_SOC_IMX28
+static inline void imx28_ehci_writel(const unsigned int val,
+ volatile __u32 __iomem *addr)
+{
+ __asm__ ("swp %0, %0, [%1]" : : "r"(val), "r"(addr));
+}
+#else
+static inline void imx28_ehci_writel(const unsigned int val,
+ volatile __u32 __iomem *addr)
+{
+}
+#endif
static inline void ehci_writel(const struct ehci_hcd *ehci,
const unsigned int val, __u32 __iomem *regs)
{
@@ -736,7 +749,10 @@ static inline void ehci_writel(const struct ehci_hcd *ehci,
writel_be(val, regs) :
writel(val, regs);
#else
- writel(val, regs);
+ if (ehci->imx28_write_fix)
+ imx28_ehci_writel(val, regs);
+ else
+ writel(val, regs);
#endif
}
@@ -832,9 +848,9 @@ static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
dev_warn(ehci_to_hcd(ehci)->self.controller , fmt , ## args)
-#if !defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
+#ifndef CONFIG_DYNAMIC_DEBUG
#define STUB_DEBUG_FILES
-#endif /* !DEBUG && !CONFIG_DYNAMIC_DEBUG */
+#endif
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 0551c0af0fd1..1cf68eaf2ed8 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -754,6 +754,8 @@ static int of_fhci_probe(struct platform_device *ofdev)
if (ret < 0)
goto err_add_hcd;
+ device_wakeup_enable(hcd->self.controller);
+
fhci_dfs_create(fhci);
return 0;
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 55486bd23cf1..98a89d16cc3e 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -56,12 +56,9 @@
static const char hcd_name[] = "fotg210_hcd";
-#undef VERBOSE_DEBUG
#undef FOTG210_URB_TRACE
-#ifdef DEBUG
#define FOTG210_STATS
-#endif
/* magic numbers that can affect system performance */
#define FOTG210_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
@@ -107,14 +104,6 @@ MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us");
#define fotg210_warn(fotg210, fmt, args...) \
dev_warn(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args)
-#ifdef VERBOSE_DEBUG
-# define fotg210_vdbg fotg210_dbg
-#else
- static inline void fotg210_vdbg(struct fotg210_hcd *fotg210, ...) {}
-#endif
-
-#ifdef DEBUG
-
/* check the values in the HCSPARAMS register
* (host controller _Structural_ parameters)
* see EHCI spec, Table 2-4 for each value
@@ -129,13 +118,6 @@ static void dbg_hcs_params(struct fotg210_hcd *fotg210, char *label)
HCS_N_PORTS(params)
);
}
-#else
-
-static inline void dbg_hcs_params(struct fotg210_hcd *fotg210, char *label) {}
-
-#endif
-
-#ifdef DEBUG
/* check the values in the HCCPARAMS register
* (host controller _Capability_ parameters)
@@ -152,13 +134,6 @@ static void dbg_hcc_params(struct fotg210_hcd *fotg210, char *label)
HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
HCC_CANPARK(params) ? " park" : "");
}
-#else
-
-static inline void dbg_hcc_params(struct fotg210_hcd *fotg210, char *label) {}
-
-#endif
-
-#ifdef DEBUG
static void __maybe_unused
dbg_qtd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd)
@@ -272,8 +247,8 @@ dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
);
}
-static int
-dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
+static char
+*dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
{
char *sig;
@@ -293,7 +268,7 @@ dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
break;
}
- return scnprintf(buf, len,
+ scnprintf(buf, len,
"%s%sport:%d status %06x %d "
"sig=%s%s%s%s%s%s%s%s",
label, label[0] ? " " : "", port, status,
@@ -306,31 +281,9 @@ dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
(status & PORT_PE) ? " PE" : "",
(status & PORT_CSC) ? " CSC" : "",
(status & PORT_CONNECT) ? " CONNECT" : "");
+ return buf;
}
-#else
-static inline void __maybe_unused
-dbg_qh(char *label, struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
-{}
-
-static inline int __maybe_unused
-dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
-{ return 0; }
-
-static inline int __maybe_unused
-dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
-{ return 0; }
-
-static inline int __maybe_unused
-dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
-{ return 0; }
-
-static inline int __maybe_unused
-dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
-{ return 0; }
-
-#endif /* DEBUG */
-
/* functions have the "wrong" filename when they're output... */
#define dbg_status(fotg210, label, status) { \
char _buf[80]; \
@@ -346,19 +299,11 @@ dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
#define dbg_port(fotg210, label, port, status) { \
char _buf[80]; \
- dbg_port_buf(_buf, sizeof(_buf), label, port, status); \
- fotg210_dbg(fotg210, "%s\n", _buf); \
+ fotg210_dbg(fotg210, "%s\n", dbg_port_buf(_buf, sizeof(_buf), label, port, status) ); \
}
/*-------------------------------------------------------------------------*/
-#ifdef STUB_DEBUG_FILES
-
-static inline void create_debug_files(struct fotg210_hcd *bus) { }
-static inline void remove_debug_files(struct fotg210_hcd *bus) { }
-
-#else
-
/* troubleshooting help: expose state in debugfs */
static int debug_async_open(struct inode *, struct file *);
@@ -954,7 +899,6 @@ static inline void remove_debug_files(struct fotg210_hcd *fotg210)
debugfs_remove_recursive(fotg210->debug_dir);
}
-#endif /* STUB_DEBUG_FILES */
/*-------------------------------------------------------------------------*/
/*
@@ -1398,7 +1342,7 @@ static void fotg210_iaa_watchdog(struct fotg210_hcd *fotg210)
&fotg210->regs->status);
}
- fotg210_vdbg(fotg210, "IAA watchdog: status %x cmd %x\n",
+ fotg210_dbg(fotg210, "IAA watchdog: status %x cmd %x\n",
status, cmd);
end_unlink_async(fotg210);
}
@@ -1810,10 +1754,8 @@ static int fotg210_hub_control(
if (test_bit(wIndex, &fotg210->port_c_suspend))
status |= USB_PORT_STAT_C_SUSPEND << 16;
-#ifndef VERBOSE_DEBUG
- if (status & ~0xffff) /* only if wPortChange is interesting */
-#endif
- dbg_port(fotg210, "GetStatus", wIndex + 1, temp);
+ if (status & ~0xffff) /* only if wPortChange is interesting */
+ dbg_port(fotg210, "GetStatus", wIndex + 1, temp);
put_unaligned_le32(status, buf);
break;
case SetHubFeature:
@@ -1856,7 +1798,7 @@ static int fotg210_hub_control(
* which can be fine if this root hub has a
* transaction translator built in.
*/
- fotg210_vdbg(fotg210, "port %d reset\n", wIndex + 1);
+ fotg210_dbg(fotg210, "port %d reset\n", wIndex + 1);
temp |= PORT_RESET;
temp &= ~PORT_PE;
@@ -2274,13 +2216,12 @@ static void fotg210_clear_tt_buffer(struct fotg210_hcd *fotg210,
* Note: this routine is never called for Isochronous transfers.
*/
if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
-#ifdef DEBUG
struct usb_device *tt = urb->dev->tt->hub;
dev_dbg(&tt->dev,
"clear tt buffer port %d, a%d ep%d t%08x\n",
urb->dev->ttport, urb->dev->devnum,
usb_pipeendpoint(urb->pipe), token);
-#endif /* DEBUG */
+
if (urb->dev->tt->hub !=
fotg210_to_hcd(fotg210)->self.root_hub) {
if (usb_hub_clear_tt_buffer(urb) == 0)
@@ -2341,7 +2282,7 @@ static int qtd_copy_status(
status = -EPROTO;
}
- fotg210_vdbg(fotg210,
+ fotg210_dbg(fotg210,
"dev%d ep%d%s qtd token %08x --> status %d\n",
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe),
@@ -3583,11 +3524,9 @@ periodic_usecs(struct fotg210_hcd *fotg210, unsigned frame, unsigned uframe)
break;
}
}
-#ifdef DEBUG
if (usecs > fotg210->uframe_periodic_max)
fotg210_err(fotg210, "uframe %d sched overrun: %d usecs\n",
frame * 8 + uframe, usecs);
-#endif
return usecs;
}
@@ -4646,7 +4585,7 @@ static void itd_link_urb(
if (unlikely(list_empty(&stream->td_list))) {
fotg210_to_hcd(fotg210)->self.bandwidth_allocated
+= stream->bandwidth;
- fotg210_vdbg(fotg210,
+ fotg210_dbg(fotg210,
"schedule devp %s ep%d%s-iso period %d start %d.%d\n",
urb->dev->devpath, stream->bEndpointAddress & 0x0f,
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
@@ -4779,7 +4718,7 @@ static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
if (unlikely(list_is_singular(&stream->td_list))) {
fotg210_to_hcd(fotg210)->self.bandwidth_allocated
-= stream->bandwidth;
- fotg210_vdbg(fotg210,
+ fotg210_dbg(fotg210,
"deschedule devp %s ep%d%s-iso\n",
dev->devpath, stream->bEndpointAddress & 0x0f,
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
@@ -5444,10 +5383,8 @@ static irqreturn_t fotg210_irq(struct usb_hcd *hcd)
cmd = fotg210_readl(fotg210, &fotg210->regs->command);
bh = 0;
-#ifdef VERBOSE_DEBUG
/* unrequested/ignored: Frame List Rollover */
dbg_status(fotg210, "irq", status);
-#endif
/* INT, ERR, and IAA interrupt rates can be throttled */
@@ -5952,6 +5889,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
dev_err(dev, "failed to add hcd with err %d\n", retval);
goto fail_add_hcd;
}
+ device_wakeup_enable(hcd->self.controller);
return retval;
@@ -6013,13 +5951,11 @@ static int __init fotg210_hcd_init(void)
sizeof(struct fotg210_qh), sizeof(struct fotg210_qtd),
sizeof(struct fotg210_itd));
-#ifdef DEBUG
fotg210_debug_root = debugfs_create_dir("fotg210", usb_debug_root);
if (!fotg210_debug_root) {
retval = -ENOENT;
goto err_debug;
}
-#endif
retval = platform_driver_register(&fotg210_hcd_driver);
if (retval < 0)
@@ -6028,11 +5964,9 @@ static int __init fotg210_hcd_init(void)
platform_driver_unregister(&fotg210_hcd_driver);
clean:
-#ifdef DEBUG
debugfs_remove(fotg210_debug_root);
fotg210_debug_root = NULL;
err_debug:
-#endif
clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
return retval;
}
@@ -6041,9 +5975,7 @@ module_init(fotg210_hcd_init);
static void __exit fotg210_hcd_cleanup(void)
{
platform_driver_unregister(&fotg210_hcd_driver);
-#ifdef DEBUG
debugfs_remove(fotg210_debug_root);
-#endif
clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
}
module_exit(fotg210_hcd_cleanup);
diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
index 8920f9d32564..ac6cd1bfd208 100644
--- a/drivers/usb/host/fotg210.h
+++ b/drivers/usb/host/fotg210.h
@@ -174,9 +174,7 @@ struct fotg210_hcd { /* one per controller */
#endif
/* debug files */
-#ifdef DEBUG
struct dentry *debug_dir;
-#endif
};
/* convert between an HCD pointer and the corresponding FOTG210_HCD */
@@ -741,10 +739,4 @@ static inline unsigned fotg210_read_frame_index(struct fotg210_hcd *fotg210)
})
/*-------------------------------------------------------------------------*/
-#ifndef DEBUG
-#define STUB_DEBUG_FILES
-#endif /* DEBUG */
-
-/*-------------------------------------------------------------------------*/
-
#endif /* __LINUX_FOTG210_H */
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index abd5050a4899..9162d1b6c0a3 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -261,19 +261,8 @@ int fsl_usb2_mpc5121_init(struct platform_device *pdev)
struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct clk *clk;
int err;
- char clk_name[10];
- int base, clk_num;
-
- base = pdev->resource->start & 0xf000;
- if (base == 0x3000)
- clk_num = 1;
- else if (base == 0x4000)
- clk_num = 2;
- else
- return -ENODEV;
- snprintf(clk_name, sizeof(clk_name), "usb%d_clk", clk_num);
- clk = devm_clk_get(pdev->dev.parent, clk_name);
+ clk = devm_clk_get(pdev->dev.parent, "ipg");
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clk\n");
return PTR_ERR(clk);
diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
index e1c6d850a7e1..ba9499060f63 100644
--- a/drivers/usb/host/fusbh200-hcd.c
+++ b/drivers/usb/host/fusbh200-hcd.c
@@ -57,13 +57,8 @@
static const char hcd_name [] = "fusbh200_hcd";
-#undef VERBOSE_DEBUG
#undef FUSBH200_URB_TRACE
-#ifdef DEBUG
-#define FUSBH200_STATS
-#endif
-
/* magic numbers that can affect system performance */
#define FUSBH200_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
#define FUSBH200_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
@@ -108,14 +103,6 @@ MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us");
#define fusbh200_warn(fusbh200, fmt, args...) \
dev_warn (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
-#ifdef VERBOSE_DEBUG
-# define fusbh200_vdbg fusbh200_dbg
-#else
- static inline void fusbh200_vdbg(struct fusbh200_hcd *fusbh200, ...) {}
-#endif
-
-#ifdef DEBUG
-
/* check the values in the HCSPARAMS register
* (host controller _Structural_ parameters)
* see EHCI spec, Table 2-4 for each value
@@ -130,13 +117,6 @@ static void dbg_hcs_params (struct fusbh200_hcd *fusbh200, char *label)
HCS_N_PORTS (params)
);
}
-#else
-
-static inline void dbg_hcs_params (struct fusbh200_hcd *fusbh200, char *label) {}
-
-#endif
-
-#ifdef DEBUG
/* check the values in the HCCPARAMS register
* (host controller _Capability_ parameters)
@@ -153,13 +133,6 @@ static void dbg_hcc_params (struct fusbh200_hcd *fusbh200, char *label)
HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
HCC_CANPARK(params) ? " park" : "");
}
-#else
-
-static inline void dbg_hcc_params (struct fusbh200_hcd *fusbh200, char *label) {}
-
-#endif
-
-#ifdef DEBUG
static void __maybe_unused
dbg_qtd (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd)
@@ -302,29 +275,6 @@ dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
(status & PORT_CONNECT) ? " CONNECT" : "");
}
-#else
-static inline void __maybe_unused
-dbg_qh (char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
-{}
-
-static inline int __maybe_unused
-dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
-{ return 0; }
-
-static inline int __maybe_unused
-dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
-{ return 0; }
-
-static inline int __maybe_unused
-dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
-{ return 0; }
-
-static inline int __maybe_unused
-dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
-{ return 0; }
-
-#endif /* DEBUG */
-
/* functions have the "wrong" filename when they're output... */
#define dbg_status(fusbh200, label, status) { \
char _buf [80]; \
@@ -346,13 +296,6 @@ dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
/*-------------------------------------------------------------------------*/
-#ifdef STUB_DEBUG_FILES
-
-static inline void create_debug_files (struct fusbh200_hcd *bus) { }
-static inline void remove_debug_files (struct fusbh200_hcd *bus) { }
-
-#else
-
/* troubleshooting help: expose state in debugfs */
static int debug_async_open(struct inode *, struct file *);
@@ -775,7 +718,6 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
next += temp;
}
-#ifdef FUSBH200_STATS
temp = scnprintf (next, size,
"irq normal %ld err %ld iaa %ld (lost %ld)\n",
fusbh200->stats.normal, fusbh200->stats.error, fusbh200->stats.iaa,
@@ -787,7 +729,6 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
fusbh200->stats.complete, fusbh200->stats.unlink);
size -= temp;
next += temp;
-#endif
done:
spin_unlock_irqrestore (&fusbh200->lock, flags);
@@ -928,7 +869,6 @@ static inline void remove_debug_files (struct fusbh200_hcd *fusbh200)
debugfs_remove_recursive(fusbh200->debug_dir);
}
-#endif /* STUB_DEBUG_FILES */
/*-------------------------------------------------------------------------*/
/*
@@ -1362,7 +1302,7 @@ static void fusbh200_iaa_watchdog(struct fusbh200_hcd *fusbh200)
fusbh200_writel(fusbh200, STS_IAA, &fusbh200->regs->status);
}
- fusbh200_vdbg(fusbh200, "IAA watchdog: status %x cmd %x\n",
+ fusbh200_dbg(fusbh200, "IAA watchdog: status %x cmd %x\n",
status, cmd);
end_unlink_async(fusbh200);
}
@@ -1769,10 +1709,8 @@ static int fusbh200_hub_control (
if (test_bit(wIndex, &fusbh200->port_c_suspend))
status |= USB_PORT_STAT_C_SUSPEND << 16;
-#ifndef VERBOSE_DEBUG
- if (status & ~0xffff) /* only if wPortChange is interesting */
-#endif
- dbg_port (fusbh200, "GetStatus", wIndex + 1, temp);
+ if (status & ~0xffff) /* only if wPortChange is interesting */
+ dbg_port(fusbh200, "GetStatus", wIndex + 1, temp);
put_unaligned_le32(status, buf);
break;
case SetHubFeature:
@@ -1814,7 +1752,7 @@ static int fusbh200_hub_control (
* which can be fine if this root hub has a
* transaction translator built in.
*/
- fusbh200_vdbg (fusbh200, "port %d reset\n", wIndex + 1);
+ fusbh200_dbg(fusbh200, "port %d reset\n", wIndex + 1);
temp |= PORT_RESET;
temp &= ~PORT_PE;
@@ -2230,13 +2168,13 @@ static void fusbh200_clear_tt_buffer(struct fusbh200_hcd *fusbh200, struct fusbh
* Note: this routine is never called for Isochronous transfers.
*/
if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
-#ifdef DEBUG
struct usb_device *tt = urb->dev->tt->hub;
+
dev_dbg(&tt->dev,
"clear tt buffer port %d, a%d ep%d t%08x\n",
urb->dev->ttport, urb->dev->devnum,
usb_pipeendpoint(urb->pipe), token);
-#endif /* DEBUG */
+
if (urb->dev->tt->hub !=
fusbh200_to_hcd(fusbh200)->self.root_hub) {
if (usb_hub_clear_tt_buffer(urb) == 0)
@@ -2297,7 +2235,7 @@ static int qtd_copy_status (
status = -EPROTO;
}
- fusbh200_vdbg (fusbh200,
+ fusbh200_dbg(fusbh200,
"dev%d ep%d%s qtd token %08x --> status %d\n",
usb_pipedevice (urb->pipe),
usb_pipeendpoint (urb->pipe),
@@ -3529,11 +3467,9 @@ periodic_usecs (struct fusbh200_hcd *fusbh200, unsigned frame, unsigned uframe)
break;
}
}
-#ifdef DEBUG
if (usecs > fusbh200->uframe_periodic_max)
fusbh200_err (fusbh200, "uframe %d sched overrun: %d usecs\n",
frame * 8 + uframe, usecs);
-#endif
return usecs;
}
@@ -4586,7 +4522,7 @@ static void itd_link_urb(
if (unlikely (list_empty(&stream->td_list))) {
fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated
+= stream->bandwidth;
- fusbh200_vdbg (fusbh200,
+ fusbh200_dbg(fusbh200,
"schedule devp %s ep%d%s-iso period %d start %d.%d\n",
urb->dev->devpath, stream->bEndpointAddress & 0x0f,
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
@@ -4717,7 +4653,7 @@ static bool itd_complete(struct fusbh200_hcd *fusbh200, struct fusbh200_itd *itd
if (unlikely(list_is_singular(&stream->td_list))) {
fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated
-= stream->bandwidth;
- fusbh200_vdbg (fusbh200,
+ fusbh200_dbg(fusbh200,
"deschedule devp %s ep%d%s-iso\n",
dev->devpath, stream->bEndpointAddress & 0x0f,
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
@@ -5115,13 +5051,11 @@ static void fusbh200_stop (struct usb_hcd *hcd)
spin_unlock_irq (&fusbh200->lock);
fusbh200_mem_cleanup (fusbh200);
-#ifdef FUSBH200_STATS
fusbh200_dbg(fusbh200, "irq normal %ld err %ld iaa %ld (lost %ld)\n",
fusbh200->stats.normal, fusbh200->stats.error, fusbh200->stats.iaa,
fusbh200->stats.lost_iaa);
fusbh200_dbg (fusbh200, "complete %ld unlink %ld\n",
fusbh200->stats.complete, fusbh200->stats.unlink);
-#endif
dbg_status (fusbh200, "fusbh200_stop completed",
fusbh200_readl(fusbh200, &fusbh200->regs->status));
@@ -5365,13 +5299,6 @@ static irqreturn_t fusbh200_irq (struct usb_hcd *hcd)
cmd = fusbh200_readl(fusbh200, &fusbh200->regs->command);
bh = 0;
-#ifdef VERBOSE_DEBUG
- /* unrequested/ignored: Frame List Rollover */
- dbg_status (fusbh200, "irq", status);
-#endif
-
- /* INT, ERR, and IAA interrupt rates can be throttled */
-
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
if (likely ((status & STS_ERR) == 0))
@@ -5871,6 +5798,7 @@ static int fusbh200_hcd_probe(struct platform_device *pdev)
dev_err(dev, "failed to add hcd with err %d\n", retval);
goto fail_add_hcd;
}
+ device_wakeup_enable(hcd->self.controller);
return retval;
@@ -5936,13 +5864,11 @@ static int __init fusbh200_hcd_init(void)
sizeof(struct fusbh200_qh), sizeof(struct fusbh200_qtd),
sizeof(struct fusbh200_itd));
-#ifdef DEBUG
fusbh200_debug_root = debugfs_create_dir("fusbh200", usb_debug_root);
if (!fusbh200_debug_root) {
retval = -ENOENT;
goto err_debug;
}
-#endif
retval = platform_driver_register(&fusbh200_hcd_fusbh200_driver);
if (retval < 0)
@@ -5951,11 +5877,9 @@ static int __init fusbh200_hcd_init(void)
platform_driver_unregister(&fusbh200_hcd_fusbh200_driver);
clean:
-#ifdef DEBUG
debugfs_remove(fusbh200_debug_root);
fusbh200_debug_root = NULL;
err_debug:
-#endif
clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
return retval;
}
@@ -5964,9 +5888,7 @@ module_init(fusbh200_hcd_init);
static void __exit fusbh200_hcd_cleanup(void)
{
platform_driver_unregister(&fusbh200_hcd_fusbh200_driver);
-#ifdef DEBUG
debugfs_remove(fusbh200_debug_root);
-#endif
clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
}
module_exit(fusbh200_hcd_cleanup);
diff --git a/drivers/usb/host/fusbh200.h b/drivers/usb/host/fusbh200.h
index 797c9e855270..6b719e066c3f 100644
--- a/drivers/usb/host/fusbh200.h
+++ b/drivers/usb/host/fusbh200.h
@@ -165,17 +165,11 @@ struct fusbh200_hcd { /* one per controller */
u8 sbrn; /* packed release number */
/* irq statistics */
-#ifdef FUSBH200_STATS
struct fusbh200_stats stats;
# define COUNT(x) do { (x)++; } while (0)
-#else
-# define COUNT(x) do {} while (0)
-#endif
/* debug files */
-#ifdef DEBUG
struct dentry *debug_dir;
-#endif
};
/* convert between an HCD pointer and the corresponding FUSBH200_HCD */
@@ -734,10 +728,4 @@ static inline unsigned fusbh200_read_frame_index(struct fusbh200_hcd *fusbh200)
})
/*-------------------------------------------------------------------------*/
-#ifndef DEBUG
-#define STUB_DEBUG_FILES
-#endif /* DEBUG */
-
-/*-------------------------------------------------------------------------*/
-
#endif /* __LINUX_FUSBH200_H */
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index ada0a52797b1..e07669993f58 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -54,7 +54,6 @@
* DWA).
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/workqueue.h>
@@ -86,7 +85,7 @@ static int __hwahc_set_cluster_id(struct hwahc *hwahc, u8 cluster_id)
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
cluster_id,
wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(dev, "Cannot set WUSB Cluster ID to 0x%02x: %d\n",
cluster_id, result);
@@ -106,7 +105,7 @@ static int __hwahc_op_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
interval << 8 | slots,
wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
}
/*
@@ -224,7 +223,7 @@ static int hwahc_op_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb,
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- return wa_urb_dequeue(&hwahc->wa, urb);
+ return wa_urb_dequeue(&hwahc->wa, urb, status);
}
/*
@@ -281,7 +280,7 @@ static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc, int delay)
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
delay * 1000,
iface_no,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
if (ret == 0)
msleep(delay);
@@ -310,7 +309,7 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
stream_index,
wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Cannot set WUSB stream index: %d\n", result);
goto out;
@@ -321,7 +320,7 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
WUSB_REQ_SET_WUSB_MAS,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- mas_le, 32, 1000 /* FIXME: arbitrary */);
+ mas_le, 32, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
out:
@@ -355,7 +354,7 @@ static int __hwahc_op_mmcie_add(struct wusbhc *wusbhc, u8 interval,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
interval << 8 | repeat_cnt,
handle << 8 | iface_no,
- wuie, wuie->bLength, 1000 /* FIXME: arbitrary */);
+ wuie, wuie->bLength, USB_CTRL_SET_TIMEOUT);
}
/*
@@ -372,7 +371,7 @@ static int __hwahc_op_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
WUSB_REQ_REMOVE_MMC_IE,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, handle << 8 | iface_no,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
}
/*
@@ -415,7 +414,7 @@ static int __hwahc_op_dev_info_set(struct wusbhc *wusbhc,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, wusb_dev->port_idx << 8 | iface_no,
dev_info, sizeof(struct hwa_dev_info),
- 1000 /* FIXME: arbitrary */);
+ USB_CTRL_SET_TIMEOUT);
kfree(dev_info);
return ret;
}
@@ -455,7 +454,7 @@ static int __hwahc_dev_set_key(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
USB_DT_KEY << 8 | key_idx,
port_idx << 8 | iface_no,
- keyd, keyd_len, 1000 /* FIXME: arbitrary */);
+ keyd, keyd_len, USB_CTRL_SET_TIMEOUT);
kzfree(keyd); /* clear keys etc. */
return result;
@@ -497,7 +496,7 @@ static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
USB_REQ_SET_ENCRYPTION,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
encryption_value, port_idx << 8 | iface_no,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(wusbhc->dev, "Can't set host's WUSB encryption for "
"port index %u to %s (value %d): %d\n", port_idx,
@@ -791,6 +790,7 @@ static int hwahc_probe(struct usb_interface *usb_iface,
dev_err(dev, "Cannot add HCD: %d\n", result);
goto error_add_hcd;
}
+ device_wakeup_enable(usb_hcd->self.controller);
result = wusbhc_b_create(&hwahc->wusbhc);
if (result < 0) {
dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result);
diff --git a/drivers/usb/host/imx21-dbg.c b/drivers/usb/host/imx21-dbg.c
index ec98ecee3517..4f320d050da7 100644
--- a/drivers/usb/host/imx21-dbg.c
+++ b/drivers/usb/host/imx21-dbg.c
@@ -18,6 +18,10 @@
/* this file is part of imx21-hcd.c */
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define DEBUG
+#endif
+
#ifndef DEBUG
static inline void create_debug_files(struct imx21 *imx21) { }
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index adb01d950a16..207bad99301f 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -62,6 +62,10 @@
#include "imx21-hcd.h"
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define DEBUG
+#endif
+
#ifdef DEBUG
#define DEBUG_LOG_FRAME(imx21, etd, event) \
(etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
@@ -1906,6 +1910,7 @@ static int imx21_probe(struct platform_device *pdev)
dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
goto failed_add_hcd;
}
+ device_wakeup_enable(hcd->self.controller);
return 0;
@@ -1926,7 +1931,7 @@ failed_request_mem:
static struct platform_driver imx21_hcd_driver = {
.driver = {
- .name = (char *)hcd_name,
+ .name = hcd_name,
},
.probe = imx21_probe,
.remove = imx21_remove,
diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h
index c005770a73e9..05122f8a6983 100644
--- a/drivers/usb/host/imx21-hcd.h
+++ b/drivers/usb/host/imx21-hcd.h
@@ -24,6 +24,10 @@
#ifndef __LINUX_IMX21_HCD_H__
#define __LINUX_IMX21_HCD_H__
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define DEBUG
+#endif
+
#include <linux/platform_data/usb-mx2.h>
#define NUM_ISO_ETDS 2
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index c7d0f8f231be..240e792c81a7 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -60,7 +60,6 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/usb.h>
@@ -1645,6 +1644,8 @@ static int isp116x_probe(struct platform_device *pdev)
if (ret)
goto err6;
+ device_wakeup_enable(hcd->self.controller);
+
ret = create_debug_file(isp116x);
if (ret) {
ERR("Couldn't create debugfs entry\n");
@@ -1705,7 +1706,7 @@ static struct platform_driver isp116x_driver = {
.suspend = isp116x_suspend,
.resume = isp116x_resume,
.driver = {
- .name = (char *)hcd_name,
+ .name = hcd_name,
.owner = THIS_MODULE,
},
};
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 935a2dd367a8..875bcfd3ec1a 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -67,7 +67,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
@@ -2746,6 +2745,8 @@ static int isp1362_probe(struct platform_device *pdev)
retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED);
if (retval != 0)
goto err6;
+ device_wakeup_enable(hcd->self.controller);
+
pr_info("%s, irq %d\n", hcd->product_desc, irq);
create_debug_file(isp1362_hcd);
@@ -2829,7 +2830,7 @@ static struct platform_driver isp1362_driver = {
.suspend = isp1362_suspend,
.resume = isp1362_resume,
.driver = {
- .name = (char *)hcd_name,
+ .name = hcd_name,
.owner = THIS_MODULE,
},
};
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 2facee53eab1..51a0ae9cdd1d 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -2250,6 +2250,7 @@ struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,
ret = usb_add_hcd(hcd, irq, irqflags);
if (ret)
goto err_unmap;
+ device_wakeup_enable(hcd->self.controller);
return hcd;
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 418444ebb1b8..091ae4905cfc 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -136,61 +136,58 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
struct ohci_hcd *ohci;
int retval;
struct usb_hcd *hcd = NULL;
-
- if (pdev->num_resources != 2) {
- pr_debug("hcd probe: invalid num_resources");
- return -ENODEV;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_dbg(dev, "hcd probe: missing memory resource\n");
+ return -ENXIO;
}
- if ((pdev->resource[0].flags != IORESOURCE_MEM)
- || (pdev->resource[1].flags != IORESOURCE_IRQ)) {
- pr_debug("hcd probe: invalid resource type\n");
- return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_dbg(dev, "hcd probe: missing irq resource\n");
+ return irq;
}
- hcd = usb_create_hcd(driver, &pdev->dev, "at91");
+ hcd = usb_create_hcd(driver, dev, "at91");
if (!hcd)
return -ENOMEM;
- hcd->rsrc_start = pdev->resource[0].start;
- hcd->rsrc_len = resource_size(&pdev->resource[0]);
-
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- pr_debug("request_mem_region failed\n");
- retval = -EBUSY;
- goto err1;
- }
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- pr_debug("ioremap failed\n");
- retval = -EIO;
- goto err2;
+ hcd->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
+ goto err;
}
- iclk = clk_get(&pdev->dev, "ohci_clk");
+ iclk = devm_clk_get(dev, "ohci_clk");
if (IS_ERR(iclk)) {
- dev_err(&pdev->dev, "failed to get ohci_clk\n");
+ dev_err(dev, "failed to get ohci_clk\n");
retval = PTR_ERR(iclk);
- goto err3;
+ goto err;
}
- fclk = clk_get(&pdev->dev, "uhpck");
+ fclk = devm_clk_get(dev, "uhpck");
if (IS_ERR(fclk)) {
- dev_err(&pdev->dev, "failed to get uhpck\n");
+ dev_err(dev, "failed to get uhpck\n");
retval = PTR_ERR(fclk);
- goto err4;
+ goto err;
}
- hclk = clk_get(&pdev->dev, "hclk");
+ hclk = devm_clk_get(dev, "hclk");
if (IS_ERR(hclk)) {
- dev_err(&pdev->dev, "failed to get hclk\n");
+ dev_err(dev, "failed to get hclk\n");
retval = PTR_ERR(hclk);
- goto err5;
+ goto err;
}
if (IS_ENABLED(CONFIG_COMMON_CLK)) {
- uclk = clk_get(&pdev->dev, "usb_clk");
+ uclk = devm_clk_get(dev, "usb_clk");
if (IS_ERR(uclk)) {
- dev_err(&pdev->dev, "failed to get uclk\n");
+ dev_err(dev, "failed to get uclk\n");
retval = PTR_ERR(uclk);
- goto err6;
+ goto err;
}
}
@@ -199,29 +196,16 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
ohci->num_ports = board->ports;
at91_start_hc(pdev);
- retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED);
- if (retval == 0)
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (retval == 0) {
+ device_wakeup_enable(hcd->self.controller);
return retval;
+ }
/* Error handling */
at91_stop_hc(pdev);
- if (IS_ENABLED(CONFIG_COMMON_CLK))
- clk_put(uclk);
- err6:
- clk_put(hclk);
- err5:
- clk_put(fclk);
- err4:
- clk_put(iclk);
-
- err3:
- iounmap(hcd->regs);
-
- err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-
- err1:
+ err:
usb_put_hcd(hcd);
return retval;
}
@@ -244,16 +228,7 @@ static void usb_hcd_at91_remove(struct usb_hcd *hcd,
{
usb_remove_hcd(hcd);
at91_stop_hc(pdev);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
-
- if (IS_ENABLED(CONFIG_COMMON_CLK))
- clk_put(uclk);
- clk_put(hclk);
- clk_put(fclk);
- clk_put(iclk);
- fclk = iclk = hclk = NULL;
}
/*-------------------------------------------------------------------------*/
@@ -635,10 +610,17 @@ ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ bool do_wakeup = device_may_wakeup(&pdev->dev);
+ int ret;
- if (device_may_wakeup(&pdev->dev))
+ if (do_wakeup)
enable_irq_wake(hcd->irq);
+ ret = ohci_suspend(hcd, do_wakeup);
+ if (ret) {
+ disable_irq_wake(hcd->irq);
+ return ret;
+ }
/*
* The integrated transceivers seem unable to notice disconnect,
* reconnect, or wakeup without the 48 MHz clock active. so for
@@ -657,7 +639,7 @@ ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg)
at91_stop_clock();
}
- return 0;
+ return ret;
}
static int ohci_hcd_at91_drv_resume(struct platform_device *pdev)
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index 9be59f11e051..df06be6b47f5 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -300,41 +300,28 @@ static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
if (hub == NULL)
return -ENODEV;
- usb11_clk = clk_get(&pdev->dev, "usb11");
+ usb11_clk = devm_clk_get(&pdev->dev, "usb11");
if (IS_ERR(usb11_clk))
return PTR_ERR(usb11_clk);
- usb20_clk = clk_get(&pdev->dev, "usb20");
- if (IS_ERR(usb20_clk)) {
- error = PTR_ERR(usb20_clk);
- goto err0;
- }
+ usb20_clk = devm_clk_get(&pdev->dev, "usb20");
+ if (IS_ERR(usb20_clk))
+ return PTR_ERR(usb20_clk);
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
- if (!hcd) {
- error = -ENOMEM;
- goto err1;
- }
+ if (!hcd)
+ return -ENOMEM;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- error = -ENODEV;
- goto err2;
- }
+ if (!mem)
+ return -ENODEV;
hcd->rsrc_start = mem->start;
hcd->rsrc_len = resource_size(mem);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- dev_dbg(&pdev->dev, "request_mem_region failed\n");
- error = -EBUSY;
- goto err2;
- }
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- dev_err(&pdev->dev, "ioremap failed\n");
- error = -ENOMEM;
- goto err3;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(hcd->regs)) {
+ error = PTR_ERR(hcd->regs);
+ goto err;
}
ohci_hcd_init(hcd_to_ohci(hcd));
@@ -342,11 +329,13 @@ static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
error = -ENODEV;
- goto err4;
+ goto err;
}
error = usb_add_hcd(hcd, irq, 0);
if (error)
- goto err4;
+ goto err;
+
+ device_wakeup_enable(hcd->self.controller);
if (hub->ocic_notify) {
error = hub->ocic_notify(ohci_da8xx_ocic_handler);
@@ -355,16 +344,8 @@ static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
}
usb_remove_hcd(hcd);
-err4:
- iounmap(hcd->regs);
-err3:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-err2:
+err:
usb_put_hcd(hcd);
-err1:
- clk_put(usb20_clk);
-err0:
- clk_put(usb11_clk);
return error;
}
@@ -384,11 +365,7 @@ usb_hcd_da8xx_remove(struct usb_hcd *hcd, struct platform_device *pdev)
hub->ocic_notify(NULL);
usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
- clk_put(usb20_clk);
- clk_put(usb11_clk);
}
static int ohci_hcd_da8xx_drv_probe(struct platform_device *dev)
@@ -406,19 +383,27 @@ static int ohci_hcd_da8xx_drv_remove(struct platform_device *dev)
}
#ifdef CONFIG_PM
-static int ohci_da8xx_suspend(struct platform_device *dev, pm_message_t message)
+static int ohci_da8xx_suspend(struct platform_device *pdev,
+ pm_message_t message)
{
- struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ bool do_wakeup = device_may_wakeup(&pdev->dev);
+ int ret;
+
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
+ ret = ohci_suspend(hcd, do_wakeup);
+ if (ret)
+ return ret;
+
ohci_da8xx_clock(0);
hcd->state = HC_STATE_SUSPENDED;
- dev->dev.power.power_state = PMSG_SUSPEND;
- return 0;
+
+ return ret;
}
static int ohci_da8xx_resume(struct platform_device *dev)
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index 3fca52ec02ac..45032e933e18 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -9,8 +9,6 @@
/*-------------------------------------------------------------------------*/
-#ifdef DEBUG
-
#define edstring(ed_type) ({ char *temp; \
switch (ed_type) { \
case PIPE_CONTROL: temp = "ctrl"; break; \
@@ -20,57 +18,6 @@
} temp;})
#define pipestring(pipe) edstring(usb_pipetype(pipe))
-/* debug| print the main components of an URB
- * small: 0) header + data packets 1) just header
- */
-static void __maybe_unused
-urb_print(struct urb * urb, char * str, int small, int status)
-{
- unsigned int pipe= urb->pipe;
-
- if (!urb->dev || !urb->dev->bus) {
- printk(KERN_DEBUG "%s URB: no dev\n", str);
- return;
- }
-
-#ifndef OHCI_VERBOSE_DEBUG
- if (status != 0)
-#endif
- printk(KERN_DEBUG "%s %p dev=%d ep=%d%s-%s flags=%x len=%d/%d stat=%d\n",
- str,
- urb,
- usb_pipedevice (pipe),
- usb_pipeendpoint (pipe),
- usb_pipeout (pipe)? "out" : "in",
- pipestring (pipe),
- urb->transfer_flags,
- urb->actual_length,
- urb->transfer_buffer_length,
- status);
-
-#ifdef OHCI_VERBOSE_DEBUG
- if (!small) {
- int i, len;
-
- if (usb_pipecontrol (pipe)) {
- printk (KERN_DEBUG "%s: setup(8):", __FILE__);
- for (i = 0; i < 8 ; i++)
- printk (" %02x", ((__u8 *) urb->setup_packet) [i]);
- printk ("\n");
- }
- if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) {
- printk (KERN_DEBUG "%s: data(%d/%d):", __FILE__,
- urb->actual_length,
- urb->transfer_buffer_length);
- len = usb_pipeout (pipe)?
- urb->transfer_buffer_length: urb->actual_length;
- for (i = 0; i < 16 && i < len; i++)
- printk (" %02x", ((__u8 *) urb->transfer_buffer) [i]);
- printk ("%s stat:%d\n", i < len? "...": "", status);
- }
- }
-#endif
-}
#define ohci_dbg_sw(ohci, next, size, format, arg...) \
do { \
@@ -407,22 +354,8 @@ ohci_dump_ed (const struct ohci_hcd *ohci, const char *label,
}
}
-#else
-static inline void ohci_dump (struct ohci_hcd *controller, int verbose) {}
-
-#undef OHCI_VERBOSE_DEBUG
-
-#endif /* DEBUG */
-
/*-------------------------------------------------------------------------*/
-#ifdef STUB_DEBUG_FILES
-
-static inline void create_debug_files (struct ohci_hcd *bus) { }
-static inline void remove_debug_files (struct ohci_hcd *bus) { }
-
-#else
-
static int debug_async_open(struct inode *, struct file *);
static int debug_periodic_open(struct inode *, struct file *);
static int debug_registers_open(struct inode *, struct file *);
@@ -871,7 +804,5 @@ static inline void remove_debug_files (struct ohci_hcd *ohci)
debugfs_remove(ohci->debug_dir);
}
-#endif
-
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index 91ec9b2cd378..68588d8a09bb 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -146,6 +146,7 @@ skip_phy:
dev_err(&pdev->dev, "Failed to add USB HCD\n");
goto fail_add_hcd;
}
+ device_wakeup_enable(hcd->self.controller);
return 0;
fail_add_hcd:
@@ -191,23 +192,14 @@ static int exynos_ohci_suspend(struct device *dev)
struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct platform_device *pdev = to_platform_device(dev);
+ bool do_wakeup = device_may_wakeup(dev);
unsigned long flags;
- int rc = 0;
+ int rc = ohci_suspend(hcd, do_wakeup);
- /*
- * Root hub was already suspended. Disable irq emission and
- * mark HW unaccessible, bail out if RH has been resumed. Use
- * the spinlock to properly synchronize with possible pending
- * RH suspend or resume activity.
- */
- spin_lock_irqsave(&ohci->lock, flags);
- if (ohci->rh_state != OHCI_RH_SUSPENDED &&
- ohci->rh_state != OHCI_RH_HALTED) {
- rc = -EINVAL;
- goto fail;
- }
+ if (rc)
+ return rc;
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ spin_lock_irqsave(&ohci->lock, flags);
if (exynos_ohci->otg)
exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
@@ -216,10 +208,9 @@ static int exynos_ohci_suspend(struct device *dev)
clk_disable_unprepare(exynos_ohci->clk);
-fail:
spin_unlock_irqrestore(&ohci->lock, flags);
- return rc;
+ return 0;
}
static int exynos_ohci_resume(struct device *dev)
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 8ada13f8dde2..3586460fb2a1 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -51,8 +51,6 @@
/*-------------------------------------------------------------------------*/
-#undef OHCI_VERBOSE_DEBUG /* not always helpful */
-
/* For initializing controller (mask in an HCFS mode too) */
#define OHCI_CONTROL_INIT OHCI_CTRL_CBSR
#define OHCI_INTR_INIT \
@@ -127,10 +125,6 @@ static int ohci_urb_enqueue (
unsigned long flags;
int retval = 0;
-#ifdef OHCI_VERBOSE_DEBUG
- urb_print(urb, "SUB", usb_pipein(pipe), -EINPROGRESS);
-#endif
-
/* every endpoint has a ed, locate and maybe (re)initialize it */
if (! (ed = ed_get (ohci, urb->ep, urb->dev, pipe, urb->interval)))
return -ENOMEM;
@@ -284,10 +278,6 @@ static int ohci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
unsigned long flags;
int rc;
-#ifdef OHCI_VERBOSE_DEBUG
- urb_print(urb, "UNLINK", 1, status);
-#endif
-
spin_lock_irqsave (&ohci->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc) {
@@ -840,7 +830,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
}
if (ints & OHCI_INTR_RHSC) {
- ohci_vdbg(ohci, "rhsc\n");
+ ohci_dbg(ohci, "rhsc\n");
ohci->next_statechange = jiffies + STATECHANGE_DELAY;
ohci_writel(ohci, OHCI_INTR_RD | OHCI_INTR_RHSC,
&regs->intrstatus);
@@ -862,7 +852,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
* this might not happen.
*/
else if (ints & OHCI_INTR_RD) {
- ohci_vdbg(ohci, "resume detect\n");
+ ohci_dbg(ohci, "resume detect\n");
ohci_writel(ohci, OHCI_INTR_RD, &regs->intrstatus);
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
if (ohci->autostop) {
@@ -1036,6 +1026,7 @@ int ohci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
unsigned long flags;
+ int rc = 0;
/* Disable irq emission and mark HW unaccessible. Use
* the spinlock to properly synchronize with possible pending
@@ -1048,7 +1039,13 @@ int ohci_suspend(struct usb_hcd *hcd, bool do_wakeup)
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
spin_unlock_irqrestore (&ohci->lock, flags);
- return 0;
+ synchronize_irq(hcd->irq);
+
+ if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
+ ohci_resume(hcd, false);
+ rc = -EBUSY;
+ }
+ return rc;
}
EXPORT_SYMBOL_GPL(ohci_suspend);
@@ -1233,13 +1230,11 @@ static int __init ohci_hcd_mod_init(void)
sizeof (struct ed), sizeof (struct td));
set_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
-#ifdef DEBUG
ohci_debug_root = debugfs_create_dir("ohci", usb_debug_root);
if (!ohci_debug_root) {
retval = -ENOENT;
goto error_debug;
}
-#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
retval = ps3_ohci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
@@ -1314,11 +1309,9 @@ static int __init ohci_hcd_mod_init(void)
ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
error_ps3:
#endif
-#ifdef DEBUG
debugfs_remove(ohci_debug_root);
ohci_debug_root = NULL;
error_debug:
-#endif
clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
return retval;
@@ -1348,9 +1341,7 @@ static void __exit ohci_hcd_mod_exit(void)
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
#endif
-#ifdef DEBUG
debugfs_remove(ohci_debug_root);
-#endif
clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
}
module_exit(ohci_hcd_mod_exit);
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 61705a760e7d..c81c8721cc5a 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -725,10 +725,8 @@ static int ohci_hub_control (
temp = roothub_portstatus (ohci, wIndex);
put_unaligned_le32(temp, buf);
-#ifndef OHCI_VERBOSE_DEBUG
- if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
-#endif
- dbg_port (ohci, "GetStatus", wIndex, temp);
+ if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
+ dbg_port(ohci, "GetStatus", wIndex, temp);
break;
case SetHubFeature:
switch (wValue) {
diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c
index d4ef53990d73..af8dc1b92d75 100644
--- a/drivers/usb/host/ohci-jz4740.c
+++ b/drivers/usb/host/ohci-jz4740.c
@@ -174,31 +174,23 @@ static int jz4740_ohci_probe(struct platform_device *pdev)
jz4740_ohci = hcd_to_jz4740_hcd(hcd);
- res = request_mem_region(res->start, resource_size(res), hcd_name);
- if (!res) {
- dev_err(&pdev->dev, "Failed to request mem region.\n");
- ret = -EBUSY;
- goto err_free;
- }
-
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- hcd->regs = ioremap(res->start, resource_size(res));
- if (!hcd->regs) {
- dev_err(&pdev->dev, "Failed to ioremap registers.\n");
- ret = -EBUSY;
- goto err_release_mem;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
+ goto err_free;
}
- jz4740_ohci->clk = clk_get(&pdev->dev, "uhc");
+ jz4740_ohci->clk = devm_clk_get(&pdev->dev, "uhc");
if (IS_ERR(jz4740_ohci->clk)) {
ret = PTR_ERR(jz4740_ohci->clk);
dev_err(&pdev->dev, "Failed to get clock: %d\n", ret);
- goto err_iounmap;
+ goto err_free;
}
- jz4740_ohci->vbus = regulator_get(&pdev->dev, "vbus");
+ jz4740_ohci->vbus = devm_regulator_get(&pdev->dev, "vbus");
if (IS_ERR(jz4740_ohci->vbus))
jz4740_ohci->vbus = NULL;
@@ -217,21 +209,15 @@ static int jz4740_ohci_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to add hcd: %d\n", ret);
goto err_disable;
}
+ device_wakeup_enable(hcd->self.controller);
return 0;
err_disable:
- if (jz4740_ohci->vbus) {
+ if (jz4740_ohci->vbus)
regulator_disable(jz4740_ohci->vbus);
- regulator_put(jz4740_ohci->vbus);
- }
clk_disable(jz4740_ohci->clk);
- clk_put(jz4740_ohci->clk);
-err_iounmap:
- iounmap(hcd->regs);
-err_release_mem:
- release_mem_region(res->start, resource_size(res));
err_free:
usb_put_hcd(hcd);
@@ -245,16 +231,10 @@ static int jz4740_ohci_remove(struct platform_device *pdev)
usb_remove_hcd(hcd);
- if (jz4740_ohci->vbus) {
+ if (jz4740_ohci->vbus)
regulator_disable(jz4740_ohci->vbus);
- regulator_put(jz4740_ohci->vbus);
- }
clk_disable(jz4740_ohci->clk);
- clk_put(jz4740_ohci->clk);
-
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index e99db8a6d55f..ba180ed0f81c 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -196,17 +196,17 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
__raw_writel(USB_SLAVE_HCLK_EN | PAD_CONTROL_LAST_DRIVEN, USB_CTRL);
/* Enable USB PLL */
- usb_pll_clk = clk_get(&pdev->dev, "ck_pll5");
+ usb_pll_clk = devm_clk_get(&pdev->dev, "ck_pll5");
if (IS_ERR(usb_pll_clk)) {
dev_err(&pdev->dev, "failed to acquire USB PLL\n");
ret = PTR_ERR(usb_pll_clk);
- goto fail_pll;
+ goto fail_disable;
}
ret = clk_enable(usb_pll_clk);
if (ret < 0) {
dev_err(&pdev->dev, "failed to start USB PLL\n");
- goto fail_pllen;
+ goto fail_disable;
}
ret = clk_set_rate(usb_pll_clk, 48000);
@@ -216,21 +216,21 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
}
/* Enable USB device clock */
- usb_dev_clk = clk_get(&pdev->dev, "ck_usbd");
+ usb_dev_clk = devm_clk_get(&pdev->dev, "ck_usbd");
if (IS_ERR(usb_dev_clk)) {
dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n");
ret = PTR_ERR(usb_dev_clk);
- goto fail_dev;
+ goto fail_rate;
}
ret = clk_enable(usb_dev_clk);
if (ret < 0) {
dev_err(&pdev->dev, "failed to start USB DEV Clock\n");
- goto fail_deven;
+ goto fail_rate;
}
/* Enable USB otg clocks */
- usb_otg_clk = clk_get(&pdev->dev, "ck_usb_otg");
+ usb_otg_clk = devm_clk_get(&pdev->dev, "ck_usb_otg");
if (IS_ERR(usb_otg_clk)) {
dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n");
ret = PTR_ERR(usb_otg_clk);
@@ -242,7 +242,7 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
ret = clk_enable(usb_otg_clk);
if (ret < 0) {
dev_err(&pdev->dev, "failed to start USB DEV Clock\n");
- goto fail_otgen;
+ goto fail_otg;
}
isp1301_configure();
@@ -274,26 +274,20 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "at 0x%p, irq %d\n", hcd->regs, hcd->irq);
ret = usb_add_hcd(hcd, irq, 0);
- if (ret == 0)
+ if (ret == 0) {
+ device_wakeup_enable(hcd->self.controller);
return ret;
+ }
ohci_nxp_stop_hc();
fail_resource:
usb_put_hcd(hcd);
fail_hcd:
clk_disable(usb_otg_clk);
-fail_otgen:
- clk_put(usb_otg_clk);
fail_otg:
clk_disable(usb_dev_clk);
-fail_deven:
- clk_put(usb_dev_clk);
-fail_dev:
fail_rate:
clk_disable(usb_pll_clk);
-fail_pllen:
- clk_put(usb_pll_clk);
-fail_pll:
fail_disable:
isp1301_i2c_client = NULL;
return ret;
@@ -307,9 +301,7 @@ static int ohci_hcd_nxp_remove(struct platform_device *pdev)
ohci_nxp_stop_hc();
usb_put_hcd(hcd);
clk_disable(usb_pll_clk);
- clk_put(usb_pll_clk);
clk_disable(usb_dev_clk);
- clk_put(usb_dev_clk);
i2c_unregister_device(isp1301_i2c_client);
isp1301_i2c_client = NULL;
diff --git a/drivers/usb/host/ohci-octeon.c b/drivers/usb/host/ohci-octeon.c
index 6c16dcef15c6..15af8954085e 100644
--- a/drivers/usb/host/ohci-octeon.c
+++ b/drivers/usb/host/ohci-octeon.c
@@ -138,20 +138,12 @@ static int ohci_octeon_drv_probe(struct platform_device *pdev)
hcd->rsrc_start = res_mem->start;
hcd->rsrc_len = resource_size(res_mem);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
- OCTEON_OHCI_HCD_NAME)) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
- ret = -EBUSY;
+ reg_base = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(reg_base)) {
+ ret = PTR_ERR(reg_base);
goto err1;
}
- reg_base = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!reg_base) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto err2;
- }
-
ohci_octeon_hw_start();
hcd->regs = reg_base;
@@ -168,19 +160,18 @@ static int ohci_octeon_drv_probe(struct platform_device *pdev)
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret) {
dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
- goto err3;
+ goto err2;
}
+ device_wakeup_enable(hcd->self.controller);
+
platform_set_drvdata(pdev, hcd);
return 0;
-err3:
+err2:
ohci_octeon_hw_stop();
- iounmap(hcd->regs);
-err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err1:
usb_put_hcd(hcd);
return ret;
@@ -193,8 +184,6 @@ static int ohci_octeon_drv_remove(struct platform_device *pdev)
usb_remove_hcd(hcd);
ohci_octeon_hw_stop();
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
return 0;
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index f253214741ba..c923cafcaca7 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -311,14 +311,14 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver,
struct usb_hcd *hcd = 0;
if (pdev->num_resources != 2) {
- printk(KERN_ERR "hcd probe: invalid num_resources: %i\n",
+ dev_err(&pdev->dev, "invalid num_resources: %i\n",
pdev->num_resources);
return -ENODEV;
}
if (pdev->resource[0].flags != IORESOURCE_MEM
|| pdev->resource[1].flags != IORESOURCE_IRQ) {
- printk(KERN_ERR "hcd probe: invalid resource type\n");
+ dev_err(&pdev->dev, "invalid resource type\n");
return -ENODEV;
}
@@ -367,6 +367,7 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver,
if (retval)
goto err3;
+ device_wakeup_enable(hcd->self.controller);
return 0;
err3:
iounmap(hcd->regs);
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
index 21457417a856..ec15aebe8786 100644
--- a/drivers/usb/host/ohci-omap3.c
+++ b/drivers/usb/host/ohci-omap3.c
@@ -130,6 +130,7 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev)
dev_dbg(dev, "failed to add hcd with err %d\n", ret);
goto err_add_hcd;
}
+ device_wakeup_enable(hcd->self.controller);
return 0;
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index f351ff5b171f..68f674cd095f 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -108,6 +108,8 @@ static int ohci_platform_probe(struct platform_device *dev)
if (err)
goto err_put_hcd;
+ device_wakeup_enable(hcd->self.controller);
+
platform_set_drvdata(dev, hcd);
return err;
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 81f3eba215c1..965e3e9e688a 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -115,24 +115,18 @@ static int ohci_hcd_ppc_of_probe(struct platform_device *op)
hcd->rsrc_start = res.start;
hcd->rsrc_len = resource_size(&res);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
- rv = -EBUSY;
+ hcd->regs = devm_ioremap_resource(&op->dev, &res);
+ if (IS_ERR(hcd->regs)) {
+ rv = PTR_ERR(hcd->regs);
goto err_rmr;
}
irq = irq_of_parse_and_map(dn, 0);
if (irq == NO_IRQ) {
- printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
+ dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
+ __FILE__);
rv = -EBUSY;
- goto err_irq;
- }
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
- rv = -ENOMEM;
- goto err_ioremap;
+ goto err_rmr;
}
ohci = hcd_to_ohci(hcd);
@@ -147,8 +141,10 @@ static int ohci_hcd_ppc_of_probe(struct platform_device *op)
ohci_hcd_init(ohci);
rv = usb_add_hcd(hcd, irq, 0);
- if (rv == 0)
+ if (rv == 0) {
+ device_wakeup_enable(hcd->self.controller);
return 0;
+ }
/* by now, 440epx is known to show usb_23 erratum */
np = of_find_compatible_node(NULL, NULL, "ibm,usb-ehci-440epx");
@@ -174,11 +170,7 @@ static int ohci_hcd_ppc_of_probe(struct platform_device *op)
pr_debug("%s: cannot get ehci offset from fdt\n", __FILE__);
}
- iounmap(hcd->regs);
-err_ioremap:
irq_dispose_mapping(irq);
-err_irq:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err_rmr:
usb_put_hcd(hcd);
@@ -193,9 +185,7 @@ static int ohci_hcd_ppc_of_remove(struct platform_device *op)
usb_remove_hcd(hcd);
- iounmap(hcd->regs);
irq_dispose_mapping(hcd->irq);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c
index 7d35cd9e2862..71d8bc4c27f6 100644
--- a/drivers/usb/host/ohci-ps3.c
+++ b/drivers/usb/host/ohci-ps3.c
@@ -173,6 +173,7 @@ static int ps3_ohci_probe(struct ps3_system_bus_device *dev)
goto fail_add_hcd;
}
+ device_wakeup_enable(hcd->self.controller);
return result;
fail_add_hcd:
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 9b7435f0dcd6..d21d5fefa76c 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -388,37 +388,28 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
return -ENXIO;
}
- usb_clk = clk_get(&pdev->dev, NULL);
+ usb_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usb_clk))
return PTR_ERR(usb_clk);
hcd = usb_create_hcd (driver, &pdev->dev, "pxa27x");
- if (!hcd) {
- retval = -ENOMEM;
- goto err0;
- }
+ if (!hcd)
+ return -ENOMEM;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
pr_err("no resource of IORESOURCE_MEM");
retval = -ENXIO;
- goto err1;
+ goto err;
}
hcd->rsrc_start = r->start;
hcd->rsrc_len = resource_size(r);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- pr_debug("request_mem_region failed");
- retval = -EBUSY;
- goto err1;
- }
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- pr_debug("ioremap failed");
- retval = -ENOMEM;
- goto err2;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
+ goto err;
}
/* initialize "struct pxa27x_ohci" */
@@ -429,7 +420,7 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
retval = pxa27x_start_hc(pxa_ohci, &pdev->dev);
if (retval < 0) {
pr_debug("pxa27x_start_hc failed");
- goto err3;
+ goto err;
}
/* Select Power Management Mode */
@@ -443,18 +434,14 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
ohci->num_ports = 3;
retval = usb_add_hcd(hcd, irq, 0);
- if (retval == 0)
+ if (retval == 0) {
+ device_wakeup_enable(hcd->self.controller);
return retval;
+ }
pxa27x_stop_hc(pxa_ohci, &pdev->dev);
- err3:
- iounmap(hcd->regs);
- err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- err1:
+ err:
usb_put_hcd(hcd);
- err0:
- clk_put(usb_clk);
return retval;
}
@@ -478,9 +465,6 @@ void usb_hcd_pxa27x_remove (struct usb_hcd *hcd, struct platform_device *pdev)
usb_remove_hcd(hcd);
pxa27x_stop_hc(pxa_ohci, &pdev->dev);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- clk_put(pxa_ohci->clk);
usb_put_hcd(hcd);
}
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index e7f577e63624..d4253e319428 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -68,10 +68,6 @@ __acquires(ohci->lock)
break;
}
-#ifdef OHCI_VERBOSE_DEBUG
- urb_print(urb, "RET", usb_pipeout (urb->pipe), status);
-#endif
-
/* urb->complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb);
spin_unlock (&ohci->lock);
@@ -147,7 +143,7 @@ static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)
{
unsigned i;
- ohci_vdbg (ohci, "link %sed %p branch %d [%dus.], interval %d\n",
+ ohci_dbg(ohci, "link %sed %p branch %d [%dus.], interval %d\n",
(ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
ed, ed->branch, ed->load, ed->interval);
@@ -294,7 +290,7 @@ static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
}
ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval;
- ohci_vdbg (ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
+ ohci_dbg(ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
(ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
ed, ed->branch, ed->load, ed->interval);
}
@@ -765,7 +761,7 @@ static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td)
urb->iso_frame_desc [td->index].status = cc_to_error [cc];
if (cc != TD_CC_NOERROR)
- ohci_vdbg (ohci,
+ ohci_dbg(ohci,
"urb %p iso td %p (%d) len %d cc %d\n",
urb, td, 1 + td->index, dlen, cc);
@@ -797,7 +793,7 @@ static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td)
}
if (cc != TD_CC_NOERROR && cc < 0x0E)
- ohci_vdbg (ohci,
+ ohci_dbg(ohci,
"urb %p td %p (%d) cc %d, len=%d/%d\n",
urb, td, 1 + td->index, cc,
urb->actual_length,
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index f90101b9cdb9..ff7c8f1c48fb 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -395,6 +395,7 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
if (retval != 0)
goto err_ioremap;
+ device_wakeup_enable(hcd->self.controller);
return 0;
err_ioremap:
@@ -426,28 +427,15 @@ static int ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev)
static int ohci_hcd_s3c2410_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct platform_device *pdev = to_platform_device(dev);
- unsigned long flags;
+ bool do_wakeup = device_may_wakeup(dev);
int rc = 0;
- /*
- * Root hub was already suspended. Disable irq emission and
- * mark HW unaccessible, bail out if RH has been resumed. Use
- * the spinlock to properly synchronize with possible pending
- * RH suspend or resume activity.
- */
- spin_lock_irqsave(&ohci->lock, flags);
- if (ohci->rh_state != OHCI_RH_SUSPENDED) {
- rc = -EINVAL;
- goto bail;
- }
-
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ rc = ohci_suspend(hcd, do_wakeup);
+ if (rc)
+ return rc;
s3c2410_stop_hc(pdev);
-bail:
- spin_unlock_irqrestore(&ohci->lock, flags);
return rc;
}
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index aa9e127bbe71..2ac266d692a2 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -211,8 +211,10 @@ static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
goto err2;
ret = usb_add_hcd(hcd, dev->irq[1], 0);
- if (ret == 0)
+ if (ret == 0) {
+ device_wakeup_enable(hcd->self.controller);
return ret;
+ }
sa1111_stop_hc(dev);
err2:
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index 2a5de5fecd8f..4e81c804c73e 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -168,6 +168,7 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev)
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval)
goto err5;
+ device_wakeup_enable(hcd->self.controller);
/* enable power and unmask interrupts */
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index 6b02107d281d..8b29a0c04c23 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -81,17 +81,10 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
hcd->rsrc_start = pdev->resource[0].start;
hcd->rsrc_len = resource_size(res);
- if (!devm_request_mem_region(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len,
- hcd_name)) {
- dev_dbg(&pdev->dev, "request_mem_region failed\n");
- retval = -EBUSY;
- goto err_put_hcd;
- }
- hcd->regs = devm_ioremap(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- dev_dbg(&pdev->dev, "ioremap failed\n");
- retval = -ENOMEM;
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
goto err_put_hcd;
}
@@ -103,8 +96,10 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
ohci = hcd_to_ohci(hcd);
retval = usb_add_hcd(hcd, platform_get_irq(pdev, 0), 0);
- if (retval == 0)
+ if (retval == 0) {
+ device_wakeup_enable(hcd->self.controller);
return retval;
+ }
clk_disable_unprepare(sohci_p->clk);
err_put_hcd:
@@ -129,20 +124,26 @@ static int spear_ohci_hcd_drv_remove(struct platform_device *pdev)
}
#if defined(CONFIG_PM)
-static int spear_ohci_hcd_drv_suspend(struct platform_device *dev,
+static int spear_ohci_hcd_drv_suspend(struct platform_device *pdev,
pm_message_t message)
{
- struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct spear_ohci *sohci_p = to_spear_ohci(hcd);
+ bool do_wakeup = device_may_wakeup(&pdev->dev);
+ int ret;
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
+ ret = ohci_suspend(hcd, do_wakeup);
+ if (ret)
+ return ret;
+
clk_disable_unprepare(sohci_p->clk);
- return 0;
+ return ret;
}
static int spear_ohci_hcd_drv_resume(struct platform_device *dev)
diff --git a/drivers/usb/host/ohci-tilegx.c b/drivers/usb/host/ohci-tilegx.c
index 22540ab71f55..0b183e0b0a8a 100644
--- a/drivers/usb/host/ohci-tilegx.c
+++ b/drivers/usb/host/ohci-tilegx.c
@@ -159,6 +159,7 @@ static int ohci_hcd_tilegx_drv_probe(struct platform_device *pdev)
ret = usb_add_hcd(hcd, pdata->irq, IRQF_SHARED);
if (ret == 0) {
platform_set_drvdata(pdev, hcd);
+ device_wakeup_enable(hcd->self.controller);
return ret;
}
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index ecb09a5ada9c..bb409588d39c 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -27,7 +27,6 @@
/*#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
-#include <linux/init.h>
#include <linux/namei.h>
#include <linux/sched.h>*/
#include <linux/platform_device.h>
@@ -250,6 +249,7 @@ static int ohci_hcd_tmio_drv_probe(struct platform_device *dev)
if (ret)
goto err_add_hcd;
+ device_wakeup_enable(hcd->self.controller);
if (ret == 0)
return ret;
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index e2e5faa5a402..9250cada13f0 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -415,12 +415,11 @@ struct ohci_hcd {
struct ed *ed_to_check;
unsigned zf_delay;
-#ifdef DEBUG
struct dentry *debug_dir;
struct dentry *debug_async;
struct dentry *debug_periodic;
struct dentry *debug_registers;
-#endif
+
/* platform-specific data -- must come last */
unsigned long priv[0] __aligned(sizeof(s64));
@@ -474,10 +473,6 @@ static inline struct usb_hcd *ohci_to_hcd (const struct ohci_hcd *ohci)
/*-------------------------------------------------------------------------*/
-#ifndef DEBUG
-#define STUB_DEBUG_FILES
-#endif /* DEBUG */
-
#define ohci_dbg(ohci, fmt, args...) \
dev_dbg (ohci_to_hcd(ohci)->self.controller , fmt , ## args )
#define ohci_err(ohci, fmt, args...) \
@@ -487,12 +482,6 @@ static inline struct usb_hcd *ohci_to_hcd (const struct ohci_hcd *ohci)
#define ohci_warn(ohci, fmt, args...) \
dev_warn (ohci_to_hcd(ohci)->self.controller , fmt , ## args )
-#ifdef OHCI_VERBOSE_DEBUG
-# define ohci_vdbg ohci_dbg
-#else
-# define ohci_vdbg(ohci, fmt, args...) do { } while (0)
-#endif
-
/*-------------------------------------------------------------------------*/
/*
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 4a6df2d8f902..e07248b6ab67 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -29,7 +29,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
@@ -60,6 +59,10 @@
#define oxu_info(oxu, fmt, args...) \
dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define DEBUG
+#endif
+
static inline struct usb_hcd *oxu_to_hcd(struct oxu_hcd *oxu)
{
return container_of((void *) oxu, struct usb_hcd, hcd_priv);
@@ -3747,6 +3750,7 @@ static struct usb_hcd *oxu_create(struct platform_device *pdev,
if (ret < 0)
return ERR_PTR(ret);
+ device_wakeup_enable(hcd->self.controller);
return hcd;
}
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index dfbdd3aefe98..00661d305143 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -12,7 +12,6 @@
#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/acpi.h>
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 2ad004ae747c..110b4b9ebeaa 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -27,7 +27,6 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/list.h>
@@ -95,7 +94,7 @@ static int r8a66597_clock_enable(struct r8a66597 *r8a66597)
int i = 0;
if (r8a66597->pdata->on_chip) {
- clk_enable(r8a66597->clk);
+ clk_prepare_enable(r8a66597->clk);
do {
r8a66597_write(r8a66597, SCKE, SYSCFG0);
tmp = r8a66597_read(r8a66597, SYSCFG0);
@@ -139,7 +138,7 @@ static void r8a66597_clock_disable(struct r8a66597 *r8a66597)
udelay(1);
if (r8a66597->pdata->on_chip) {
- clk_disable(r8a66597->clk);
+ clk_disable_unprepare(r8a66597->clk);
} else {
r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
@@ -2514,6 +2513,7 @@ static int r8a66597_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to add hcd\n");
goto clean_up3;
}
+ device_wakeup_enable(hcd->self.controller);
return 0;
@@ -2534,7 +2534,7 @@ static struct platform_driver r8a66597_driver = {
.probe = r8a66597_probe,
.remove = r8a66597_remove,
.driver = {
- .name = (char *) hcd_name,
+ .name = hcd_name,
.owner = THIS_MODULE,
.pm = R8A66597_DEV_PM_OPS,
},
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 79620c39217e..a517151867af 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -39,7 +39,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
@@ -1732,6 +1731,8 @@ sl811h_probe(struct platform_device *dev)
if (retval != 0)
goto err6;
+ device_wakeup_enable(hcd->self.controller);
+
create_debug_file(sl811);
return retval;
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
index 469564e57a52..88a9bffe93df 100644
--- a/drivers/usb/host/sl811_cs.c
+++ b/drivers/usb/host/sl811_cs.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index e402beb5a069..c0671750671f 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3133,6 +3133,7 @@ static int u132_probe(struct platform_device *pdev)
u132_u132_put_kref(u132);
return retval;
} else {
+ device_wakeup_enable(hcd->self.controller);
u132_monitor_queue_work(u132, 100);
return 0;
}
@@ -3217,7 +3218,7 @@ static struct platform_driver u132_platform_driver = {
.suspend = u132_suspend,
.resume = u132_resume,
.driver = {
- .name = (char *)hcd_name,
+ .name = hcd_name,
.owner = THIS_MODULE,
},
};
diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c
index 8e239cdd95d5..1b28a000d5c6 100644
--- a/drivers/usb/host/uhci-debug.c
+++ b/drivers/usb/host/uhci-debug.c
@@ -20,7 +20,7 @@
static struct dentry *uhci_debugfs_root;
-#ifdef DEBUG
+#ifdef CONFIG_DYNAMIC_DEBUG
/* Handle REALLY large printks so we don't overflow buffers */
static void lprintk(char *buf)
@@ -635,7 +635,7 @@ static const struct file_operations uhci_debug_operations = {
#endif /* CONFIG_DEBUG_FS */
-#else /* DEBUG */
+#else /* CONFIG_DYNAMIC_DEBUG*/
static inline void lprintk(char *buf)
{}
diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c
index 53c23ff7d685..ab25dc397e8b 100644
--- a/drivers/usb/host/uhci-grlib.c
+++ b/drivers/usb/host/uhci-grlib.c
@@ -141,6 +141,7 @@ static int uhci_hcd_grlib_probe(struct platform_device *op)
if (rv)
goto err_uhci;
+ device_wakeup_enable(hcd->self.controller);
return 0;
err_uhci:
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 4a86b63745b8..27f35e8f161b 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -69,18 +69,21 @@ MODULE_PARM_DESC(ignore_oc, "ignore hardware overcurrent indications");
* show all queues in /sys/kernel/debug/uhci/[pci_addr]
* debug = 3, show all TDs in URBs when dumping
*/
-#ifdef DEBUG
-#define DEBUG_CONFIGURED 1
+#ifdef CONFIG_DYNAMIC_DEBUG
+
static int debug = 1;
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug level");
+static char *errbuf;
#else
-#define DEBUG_CONFIGURED 0
-#define debug 0
+
+#define debug 0
+#define errbuf NULL
+
#endif
-static char *errbuf;
+
#define ERRBUF_LEN (32 * 1024)
static struct kmem_cache *uhci_up_cachep; /* urb_priv */
@@ -516,13 +519,12 @@ static void release_uhci(struct uhci_hcd *uhci)
{
int i;
- if (DEBUG_CONFIGURED) {
- spin_lock_irq(&uhci->lock);
- uhci->is_initialized = 0;
- spin_unlock_irq(&uhci->lock);
- debugfs_remove(uhci->dentry);
- }
+ spin_lock_irq(&uhci->lock);
+ uhci->is_initialized = 0;
+ spin_unlock_irq(&uhci->lock);
+
+ debugfs_remove(uhci->dentry);
for (i = 0; i < UHCI_NUM_SKELQH; i++)
uhci_free_qh(uhci, uhci->skelqh[i]);
@@ -868,14 +870,14 @@ static int __init uhci_hcd_init(void)
ignore_oc ? ", overcurrent ignored" : "");
set_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
- if (DEBUG_CONFIGURED) {
- errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
- if (!errbuf)
- goto errbuf_failed;
- uhci_debugfs_root = debugfs_create_dir("uhci", usb_debug_root);
- if (!uhci_debugfs_root)
- goto debug_failed;
- }
+#ifdef CONFIG_DYNAMIC_DEBUG
+ errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
+ if (!errbuf)
+ goto errbuf_failed;
+ uhci_debugfs_root = debugfs_create_dir("uhci", usb_debug_root);
+ if (!uhci_debugfs_root)
+ goto debug_failed;
+#endif
uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
sizeof(struct urb_priv), 0, 0, NULL);
@@ -906,12 +908,14 @@ clean0:
kmem_cache_destroy(uhci_up_cachep);
up_failed:
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
debugfs_remove(uhci_debugfs_root);
debug_failed:
kfree(errbuf);
errbuf_failed:
+#endif
clear_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
return retval;
@@ -927,7 +931,9 @@ static void __exit uhci_hcd_cleanup(void)
#endif
kmem_cache_destroy(uhci_up_cachep);
debugfs_remove(uhci_debugfs_root);
+#ifdef CONFIG_DYNAMIC_DEBUG
kfree(errbuf);
+#endif
clear_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
}
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index 4cd79888804b..940304c33224 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -279,7 +279,7 @@ static const struct hc_driver uhci_driver = {
.hub_control = uhci_hub_control,
};
-static DEFINE_PCI_DEVICE_TABLE(uhci_pci_ids) = { {
+static const struct pci_device_id uhci_pci_ids[] = { {
/* handle any USB UHCI controller */
PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0),
.driver_data = (unsigned long) &uhci_driver,
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index 3003fefaa964..44e6c9da8892 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -108,6 +108,7 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
if (ret)
goto err_uhci;
+ device_wakeup_enable(hcd->self.controller);
return 0;
err_uhci:
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
index 1b0888f8da9a..d7b363a418de 100644
--- a/drivers/usb/host/whci/hcd.c
+++ b/drivers/usb/host/whci/hcd.c
@@ -293,6 +293,7 @@ static int whc_probe(struct umc_dev *umc)
dev_err(dev, "cannot add HCD: %d\n", ret);
goto error_usb_add_hcd;
}
+ device_wakeup_enable(usb_hcd->self.controller);
ret = wusbhc_b_create(wusbhc);
if (ret) {
diff --git a/drivers/usb/host/whci/int.c b/drivers/usb/host/whci/int.c
index 6aae70028101..0c086b2790d1 100644
--- a/drivers/usb/host/whci/int.c
+++ b/drivers/usb/host/whci/int.c
@@ -16,7 +16,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/uwb/umc.h>
#include "../../wusbcore/wusbhc.h"
diff --git a/drivers/usb/host/whci/wusb.c b/drivers/usb/host/whci/wusb.c
index f24efdebad17..8d2762682869 100644
--- a/drivers/usb/host/whci/wusb.c
+++ b/drivers/usb/host/whci/wusb.c
@@ -16,7 +16,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/uwb/umc.h>
#include "../../wusbcore/wusbhc.h"
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 73503a81ee81..eb009a457fb5 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -32,7 +32,7 @@ void xhci_dbg_regs(struct xhci_hcd *xhci)
xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
xhci->cap_regs);
- temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+ temp = readl(&xhci->cap_regs->hc_capbase);
xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
&xhci->cap_regs->hc_capbase, temp);
xhci_dbg(xhci, "// CAPLENGTH: 0x%x\n",
@@ -44,13 +44,13 @@ void xhci_dbg_regs(struct xhci_hcd *xhci)
xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
- temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
+ temp = readl(&xhci->cap_regs->run_regs_off);
xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
&xhci->cap_regs->run_regs_off,
(unsigned int) temp & RTSOFF_MASK);
xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
- temp = xhci_readl(xhci, &xhci->cap_regs->db_off);
+ temp = readl(&xhci->cap_regs->db_off);
xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
}
@@ -61,7 +61,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
- temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+ temp = readl(&xhci->cap_regs->hc_capbase);
xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
(unsigned int) temp);
xhci_dbg(xhci, "CAPLENGTH: 0x%x\n",
@@ -69,7 +69,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
xhci_dbg(xhci, "HCIVERSION: 0x%x\n",
(unsigned int) HC_VERSION(temp));
- temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
+ temp = readl(&xhci->cap_regs->hcs_params1);
xhci_dbg(xhci, "HCSPARAMS 1: 0x%x\n",
(unsigned int) temp);
xhci_dbg(xhci, " Max device slots: %u\n",
@@ -79,7 +79,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
xhci_dbg(xhci, " Max ports: %u\n",
(unsigned int) HCS_MAX_PORTS(temp));
- temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
+ temp = readl(&xhci->cap_regs->hcs_params2);
xhci_dbg(xhci, "HCSPARAMS 2: 0x%x\n",
(unsigned int) temp);
xhci_dbg(xhci, " Isoc scheduling threshold: %u\n",
@@ -87,7 +87,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
xhci_dbg(xhci, " Maximum allowed segments in event ring: %u\n",
(unsigned int) HCS_ERST_MAX(temp));
- temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
+ temp = readl(&xhci->cap_regs->hcs_params3);
xhci_dbg(xhci, "HCSPARAMS 3 0x%x:\n",
(unsigned int) temp);
xhci_dbg(xhci, " Worst case U1 device exit latency: %u\n",
@@ -95,14 +95,14 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
xhci_dbg(xhci, " Worst case U2 device exit latency: %u\n",
(unsigned int) HCS_U2_LATENCY(temp));
- temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+ temp = readl(&xhci->cap_regs->hcc_params);
xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
xhci_dbg(xhci, " HC generates %s bit addresses\n",
HCC_64BIT_ADDR(temp) ? "64" : "32");
/* FIXME */
xhci_dbg(xhci, " FIXME: more HCCPARAMS debugging\n");
- temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
+ temp = readl(&xhci->cap_regs->run_regs_off);
xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
}
@@ -110,7 +110,7 @@ static void xhci_print_command_reg(struct xhci_hcd *xhci)
{
u32 temp;
- temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp = readl(&xhci->op_regs->command);
xhci_dbg(xhci, "USBCMD 0x%x:\n", temp);
xhci_dbg(xhci, " HC is %s\n",
(temp & CMD_RUN) ? "running" : "being stopped");
@@ -128,7 +128,7 @@ static void xhci_print_status(struct xhci_hcd *xhci)
{
u32 temp;
- temp = xhci_readl(xhci, &xhci->op_regs->status);
+ temp = readl(&xhci->op_regs->status);
xhci_dbg(xhci, "USBSTS 0x%x:\n", temp);
xhci_dbg(xhci, " Event ring is %sempty\n",
(temp & STS_EINT) ? "not " : "");
@@ -163,7 +163,7 @@ static void xhci_print_ports(struct xhci_hcd *xhci)
for (j = 0; j < NUM_PORT_REGS; ++j) {
xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
addr, names[j],
- (unsigned int) xhci_readl(xhci, addr));
+ (unsigned int) readl(addr));
addr++;
}
}
@@ -177,7 +177,7 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
u64 temp_64;
addr = &ir_set->irq_pending;
- temp = xhci_readl(xhci, addr);
+ temp = readl(addr);
if (temp == XHCI_INIT_VALUE)
return;
@@ -187,17 +187,17 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
(unsigned int)temp);
addr = &ir_set->irq_control;
- temp = xhci_readl(xhci, addr);
+ temp = readl(addr);
xhci_dbg(xhci, " %p: ir_set.control = 0x%x\n", addr,
(unsigned int)temp);
addr = &ir_set->erst_size;
- temp = xhci_readl(xhci, addr);
+ temp = readl(addr);
xhci_dbg(xhci, " %p: ir_set.erst_size = 0x%x\n", addr,
(unsigned int)temp);
addr = &ir_set->rsvd;
- temp = xhci_readl(xhci, addr);
+ temp = readl(addr);
if (temp != XHCI_INIT_VALUE)
xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n",
addr, (unsigned int)temp);
@@ -219,12 +219,12 @@ void xhci_print_run_regs(struct xhci_hcd *xhci)
int i;
xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
- temp = xhci_readl(xhci, &xhci->run_regs->microframe_index);
+ temp = readl(&xhci->run_regs->microframe_index);
xhci_dbg(xhci, " %p: Microframe index = 0x%x\n",
&xhci->run_regs->microframe_index,
(unsigned int) temp);
for (i = 0; i < 7; ++i) {
- temp = xhci_readl(xhci, &xhci->run_regs->rsvd[i]);
+ temp = readl(&xhci->run_regs->rsvd[i]);
if (temp != XHCI_INIT_VALUE)
xhci_dbg(xhci, " WARN: %p: Rsvd[%i] = 0x%x\n",
&xhci->run_regs->rsvd[i],
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 805f2348eeba..9992fbfec85f 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -94,7 +94,7 @@ static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
*/
memset(port_removable, 0, sizeof(port_removable));
for (i = 0; i < ports; i++) {
- portsc = xhci_readl(xhci, xhci->usb2_ports[i]);
+ portsc = readl(xhci->usb2_ports[i]);
/* If a device is removable, PORTSC reports a 0, same as in the
* hub descriptor DeviceRemovable bits.
*/
@@ -148,7 +148,7 @@ static void xhci_usb3_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
port_removable = 0;
/* bit 0 is reserved, bit 1 is for port 1, etc. */
for (i = 0; i < ports; i++) {
- portsc = xhci_readl(xhci, xhci->usb3_ports[i]);
+ portsc = readl(xhci->usb3_ports[i]);
if (portsc & PORT_DEV_REMOVE)
port_removable |= 1 << (i + 1);
}
@@ -342,8 +342,8 @@ static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
}
/* Write 1 to disable the port */
- xhci_writel(xhci, port_status | PORT_PE, addr);
- port_status = xhci_readl(xhci, addr);
+ writel(port_status | PORT_PE, addr);
+ port_status = readl(addr);
xhci_dbg(xhci, "disable port, actual port %d status = 0x%x\n",
wIndex, port_status);
}
@@ -388,8 +388,8 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
return;
}
/* Change bits are all write 1 to clear */
- xhci_writel(xhci, port_status | status, addr);
- port_status = xhci_readl(xhci, addr);
+ writel(port_status | status, addr);
+ port_status = readl(addr);
xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
port_change_bit, wIndex, port_status);
}
@@ -415,11 +415,11 @@ void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
{
u32 temp;
- temp = xhci_readl(xhci, port_array[port_id]);
+ temp = readl(port_array[port_id]);
temp = xhci_port_state_to_neutral(temp);
temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | link_state;
- xhci_writel(xhci, temp, port_array[port_id]);
+ writel(temp, port_array[port_id]);
}
static void xhci_set_remote_wake_mask(struct xhci_hcd *xhci,
@@ -427,7 +427,7 @@ static void xhci_set_remote_wake_mask(struct xhci_hcd *xhci,
{
u32 temp;
- temp = xhci_readl(xhci, port_array[port_id]);
+ temp = readl(port_array[port_id]);
temp = xhci_port_state_to_neutral(temp);
if (wake_mask & USB_PORT_FEAT_REMOTE_WAKE_CONNECT)
@@ -445,7 +445,7 @@ static void xhci_set_remote_wake_mask(struct xhci_hcd *xhci,
else
temp &= ~PORT_WKOC_E;
- xhci_writel(xhci, temp, port_array[port_id]);
+ writel(temp, port_array[port_id]);
}
/* Test and clear port RWC bit */
@@ -454,11 +454,11 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
{
u32 temp;
- temp = xhci_readl(xhci, port_array[port_id]);
+ temp = readl(port_array[port_id]);
if (temp & port_bit) {
temp = xhci_port_state_to_neutral(temp);
temp |= port_bit;
- xhci_writel(xhci, temp, port_array[port_id]);
+ writel(temp, port_array[port_id]);
}
}
@@ -623,8 +623,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
}
xhci_ring_device(xhci, slot_id);
} else {
- int port_status = xhci_readl(xhci,
- port_array[wIndex]);
+ int port_status = readl(port_array[wIndex]);
xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
XHCI_MAX_REXIT_TIMEOUT,
port_status);
@@ -733,12 +732,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* Set the U1 and U2 exit latencies. */
memcpy(buf, &usb_bos_descriptor,
USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE);
- temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
+ temp = readl(&xhci->cap_regs->hcs_params3);
buf[12] = HCS_U1_LATENCY(temp);
put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]);
/* Indicate whether the host has LTM support. */
- temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+ temp = readl(&xhci->cap_regs->hcc_params);
if (HCC_LTC(temp))
buf[8] |= USB_LTM_SUPPORT;
@@ -748,7 +747,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if (!wIndex || wIndex > max_ports)
goto error;
wIndex--;
- temp = xhci_readl(xhci, port_array[wIndex]);
+ temp = readl(port_array[wIndex]);
if (temp == 0xffffffff) {
retval = -ENODEV;
break;
@@ -775,7 +774,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if (!wIndex || wIndex > max_ports)
goto error;
wIndex--;
- temp = xhci_readl(xhci, port_array[wIndex]);
+ temp = readl(port_array[wIndex]);
if (temp == 0xffffffff) {
retval = -ENODEV;
break;
@@ -784,7 +783,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* FIXME: What new port features do we need to support? */
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- temp = xhci_readl(xhci, port_array[wIndex]);
+ temp = readl(port_array[wIndex]);
if ((temp & PORT_PLS_MASK) != XDEV_U0) {
/* Resume the port to U0 first */
xhci_set_link_state(xhci, port_array, wIndex,
@@ -797,7 +796,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* a port unless the port reports that it is in the
* enabled (PED = ‘1’,PLS < ‘3’) state.
*/
- temp = xhci_readl(xhci, port_array[wIndex]);
+ temp = readl(port_array[wIndex]);
if ((temp & PORT_PE) == 0 || (temp & PORT_RESET)
|| (temp & PORT_PLS_MASK) >= XDEV_U3) {
xhci_warn(xhci, "USB core suspending device "
@@ -822,11 +821,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
msleep(10); /* wait device to enter */
spin_lock_irqsave(&xhci->lock, flags);
- temp = xhci_readl(xhci, port_array[wIndex]);
+ temp = readl(port_array[wIndex]);
bus_state->suspended_ports |= 1 << wIndex;
break;
case USB_PORT_FEAT_LINK_STATE:
- temp = xhci_readl(xhci, port_array[wIndex]);
+ temp = readl(port_array[wIndex]);
/* Disable port */
if (link_state == USB_SS_PORT_LS_SS_DISABLED) {
@@ -839,9 +838,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp |= PORT_CSC | PORT_PEC | PORT_WRC |
PORT_OCC | PORT_RC | PORT_PLC |
PORT_CEC;
- xhci_writel(xhci, temp | PORT_PE,
- port_array[wIndex]);
- temp = xhci_readl(xhci, port_array[wIndex]);
+ writel(temp | PORT_PE, port_array[wIndex]);
+ temp = readl(port_array[wIndex]);
break;
}
@@ -850,7 +848,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_dbg(xhci, "Enable port %d\n", wIndex);
xhci_set_link_state(xhci, port_array, wIndex,
link_state);
- temp = xhci_readl(xhci, port_array[wIndex]);
+ temp = readl(port_array[wIndex]);
break;
}
@@ -884,7 +882,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
msleep(20); /* wait device to enter */
spin_lock_irqsave(&xhci->lock, flags);
- temp = xhci_readl(xhci, port_array[wIndex]);
+ temp = readl(port_array[wIndex]);
if (link_state == USB_SS_PORT_LS_U3)
bus_state->suspended_ports |= 1 << wIndex;
break;
@@ -895,10 +893,9 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* However, khubd will ignore the roothub events until
* the roothub is registered.
*/
- xhci_writel(xhci, temp | PORT_POWER,
- port_array[wIndex]);
+ writel(temp | PORT_POWER, port_array[wIndex]);
- temp = xhci_readl(xhci, port_array[wIndex]);
+ temp = readl(port_array[wIndex]);
xhci_dbg(xhci, "set port power, actual port %d status = 0x%x\n", wIndex, temp);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -911,52 +908,52 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case USB_PORT_FEAT_RESET:
temp = (temp | PORT_RESET);
- xhci_writel(xhci, temp, port_array[wIndex]);
+ writel(temp, port_array[wIndex]);
- temp = xhci_readl(xhci, port_array[wIndex]);
+ temp = readl(port_array[wIndex]);
xhci_dbg(xhci, "set port reset, actual port %d status = 0x%x\n", wIndex, temp);
break;
case USB_PORT_FEAT_REMOTE_WAKE_MASK:
xhci_set_remote_wake_mask(xhci, port_array,
wIndex, wake_mask);
- temp = xhci_readl(xhci, port_array[wIndex]);
+ temp = readl(port_array[wIndex]);
xhci_dbg(xhci, "set port remote wake mask, "
"actual port %d status = 0x%x\n",
wIndex, temp);
break;
case USB_PORT_FEAT_BH_PORT_RESET:
temp |= PORT_WR;
- xhci_writel(xhci, temp, port_array[wIndex]);
+ writel(temp, port_array[wIndex]);
- temp = xhci_readl(xhci, port_array[wIndex]);
+ temp = readl(port_array[wIndex]);
break;
case USB_PORT_FEAT_U1_TIMEOUT:
if (hcd->speed != HCD_USB3)
goto error;
- temp = xhci_readl(xhci, port_array[wIndex] + PORTPMSC);
+ temp = readl(port_array[wIndex] + PORTPMSC);
temp &= ~PORT_U1_TIMEOUT_MASK;
temp |= PORT_U1_TIMEOUT(timeout);
- xhci_writel(xhci, temp, port_array[wIndex] + PORTPMSC);
+ writel(temp, port_array[wIndex] + PORTPMSC);
break;
case USB_PORT_FEAT_U2_TIMEOUT:
if (hcd->speed != HCD_USB3)
goto error;
- temp = xhci_readl(xhci, port_array[wIndex] + PORTPMSC);
+ temp = readl(port_array[wIndex] + PORTPMSC);
temp &= ~PORT_U2_TIMEOUT_MASK;
temp |= PORT_U2_TIMEOUT(timeout);
- xhci_writel(xhci, temp, port_array[wIndex] + PORTPMSC);
+ writel(temp, port_array[wIndex] + PORTPMSC);
break;
default:
goto error;
}
/* unblock any posted writes */
- temp = xhci_readl(xhci, port_array[wIndex]);
+ temp = readl(port_array[wIndex]);
break;
case ClearPortFeature:
if (!wIndex || wIndex > max_ports)
goto error;
wIndex--;
- temp = xhci_readl(xhci, port_array[wIndex]);
+ temp = readl(port_array[wIndex]);
if (temp == 0xffffffff) {
retval = -ENODEV;
break;
@@ -965,7 +962,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = xhci_port_state_to_neutral(temp);
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- temp = xhci_readl(xhci, port_array[wIndex]);
+ temp = readl(port_array[wIndex]);
xhci_dbg(xhci, "clear USB_PORT_FEAT_SUSPEND\n");
xhci_dbg(xhci, "PORTSC %04x\n", temp);
if (temp & PORT_RESET)
@@ -1008,8 +1005,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
port_array[wIndex], temp);
break;
case USB_PORT_FEAT_POWER:
- xhci_writel(xhci, temp & ~PORT_POWER,
- port_array[wIndex]);
+ writel(temp & ~PORT_POWER, port_array[wIndex]);
spin_unlock_irqrestore(&xhci->lock, flags);
temp = usb_acpi_power_manageable(hcd->self.root_hub,
@@ -1070,7 +1066,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
spin_lock_irqsave(&xhci->lock, flags);
/* For each port, did anything change? If so, set that bit in buf. */
for (i = 0; i < max_ports; i++) {
- temp = xhci_readl(xhci, port_array[i]);
+ temp = readl(port_array[i]);
if (temp == 0xffffffff) {
retval = -ENODEV;
break;
@@ -1124,7 +1120,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
u32 t1, t2;
int slot_id;
- t1 = xhci_readl(xhci, port_array[port_index]);
+ t1 = readl(port_array[port_index]);
t2 = xhci_port_state_to_neutral(t1);
if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) {
@@ -1157,7 +1153,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
t1 = xhci_port_state_to_neutral(t1);
if (t1 != t2)
- xhci_writel(xhci, t2, port_array[port_index]);
+ writel(t2, port_array[port_index]);
}
hcd->state = HC_STATE_SUSPENDED;
bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
@@ -1187,9 +1183,9 @@ int xhci_bus_resume(struct usb_hcd *hcd)
}
/* delay the irqs */
- temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp = readl(&xhci->op_regs->command);
temp &= ~CMD_EIE;
- xhci_writel(xhci, temp, &xhci->op_regs->command);
+ writel(temp, &xhci->op_regs->command);
port_index = max_ports;
while (port_index--) {
@@ -1198,7 +1194,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
u32 temp;
int slot_id;
- temp = xhci_readl(xhci, port_array[port_index]);
+ temp = readl(port_array[port_index]);
if (DEV_SUPERSPEED(temp))
temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
else
@@ -1235,17 +1231,17 @@ int xhci_bus_resume(struct usb_hcd *hcd)
if (slot_id)
xhci_ring_device(xhci, slot_id);
} else
- xhci_writel(xhci, temp, port_array[port_index]);
+ writel(temp, port_array[port_index]);
}
- (void) xhci_readl(xhci, &xhci->op_regs->command);
+ (void) readl(&xhci->op_regs->command);
bus_state->next_statechange = jiffies + msecs_to_jiffies(5);
/* re-enable irqs */
- temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp = readl(&xhci->op_regs->command);
temp |= CMD_EIE;
- xhci_writel(xhci, temp, &xhci->op_regs->command);
- temp = xhci_readl(xhci, &xhci->op_regs->command);
+ writel(temp, &xhci->op_regs->command);
+ temp = readl(&xhci->op_regs->command);
spin_unlock_irqrestore(&xhci->lock, flags);
return 0;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 49b8bd063fab..bce4391a0e7d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -57,7 +57,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
if (cycle_state == 0) {
for (i = 0; i < TRBS_PER_SEGMENT; i++)
- seg->trbs[i].link.control |= TRB_CYCLE;
+ seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
}
seg->dma = dma;
seg->next = NULL;
@@ -308,7 +308,8 @@ static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
if (cycle_state == 0) {
for (i = 0; i < TRBS_PER_SEGMENT; i++)
- seg->trbs[i].link.control |= TRB_CYCLE;
+ seg->trbs[i].link.control |=
+ cpu_to_le32(TRB_CYCLE);
}
/* All endpoint rings have link TRBs */
xhci_link_segments(xhci, seg, seg->next, type);
@@ -432,10 +433,10 @@ static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
- dma_free_coherent(&pdev->dev,
+ dma_free_coherent(dev,
sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
stream_ctx, dma);
else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
@@ -460,10 +461,10 @@ static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs, dma_addr_t *dma,
gfp_t mem_flags)
{
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
- return dma_alloc_coherent(&pdev->dev,
+ return dma_alloc_coherent(dev,
sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
dma, mem_flags);
else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
@@ -721,8 +722,7 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
stream_info->stream_ctx_array,
stream_info->ctx_array_dma);
- if (stream_info)
- kfree(stream_info->stream_rings);
+ kfree(stream_info->stream_rings);
kfree(stream_info);
}
@@ -1616,7 +1616,7 @@ static void scratchpad_free(struct xhci_hcd *xhci)
{
int num_sp;
int i;
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
if (!xhci->scratchpad)
return;
@@ -1624,13 +1624,13 @@ static void scratchpad_free(struct xhci_hcd *xhci)
num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
for (i = 0; i < num_sp; i++) {
- dma_free_coherent(&pdev->dev, xhci->page_size,
+ dma_free_coherent(dev, xhci->page_size,
xhci->scratchpad->sp_buffers[i],
xhci->scratchpad->sp_dma_buffers[i]);
}
kfree(xhci->scratchpad->sp_dma_buffers);
kfree(xhci->scratchpad->sp_buffers);
- dma_free_coherent(&pdev->dev, num_sp * sizeof(u64),
+ dma_free_coherent(dev, num_sp * sizeof(u64),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
kfree(xhci->scratchpad);
@@ -1692,7 +1692,7 @@ void xhci_free_command(struct xhci_hcd *xhci,
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
struct xhci_cd *cur_cd, *next_cd;
int size;
int i, j, num_ports;
@@ -1700,7 +1700,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
/* Free the Event Ring Segment Table and the actual Event Ring */
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
if (xhci->erst.entries)
- dma_free_coherent(&pdev->dev, size,
+ dma_free_coherent(dev, size,
xhci->erst.entries, xhci->erst.erst_dma_addr);
xhci->erst.entries = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
@@ -1748,7 +1748,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
"Freed medium stream array pool");
if (xhci->dcbaa)
- dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
+ dma_free_coherent(dev, sizeof(*xhci->dcbaa),
xhci->dcbaa, xhci->dcbaa->dma);
xhci->dcbaa = NULL;
@@ -1986,7 +1986,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
}
/* Port offset and count in the third dword, see section 7.2 */
- temp = xhci_readl(xhci, addr + 2);
+ temp = readl(addr + 2);
port_offset = XHCI_EXT_PORT_OFF(temp);
port_count = XHCI_EXT_PORT_COUNT(temp);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
@@ -2069,7 +2069,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
int cap_count = 0;
addr = &xhci->cap_regs->hcc_params;
- offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
+ offset = XHCI_HCC_EXT_CAPS(readl(addr));
if (offset == 0) {
xhci_err(xhci, "No Extended Capability registers, "
"unable to set up roothub.\n");
@@ -2106,7 +2106,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
/* count extended protocol capability entries for later caching */
do {
u32 cap_id;
- cap_id = xhci_readl(xhci, tmp_addr);
+ cap_id = readl(tmp_addr);
if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
cap_count++;
tmp_offset = XHCI_EXT_CAPS_NEXT(cap_id);
@@ -2120,7 +2120,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
while (1) {
u32 cap_id;
- cap_id = xhci_readl(xhci, addr);
+ cap_id = readl(addr);
if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
xhci_add_in_port(xhci, num_ports, addr,
(u8) XHCI_EXT_PORT_MAJOR(cap_id),
@@ -2224,7 +2224,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
INIT_LIST_HEAD(&xhci->cancel_cmd_list);
- page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
+ page_size = readl(&xhci->op_regs->page_size);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Supported page size register = 0x%x", page_size);
for (i = 0; i < 16; i++) {
@@ -2247,14 +2247,14 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* Program the Number of Device Slots Enabled field in the CONFIG
* register with the max value of slots the HC can handle.
*/
- val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
+ val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// xHC can handle at most %d device slots.", val);
- val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
+ val2 = readl(&xhci->op_regs->config_reg);
val |= (val2 & ~HCS_SLOTS_MASK);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting Max device slots reg = 0x%x.", val);
- xhci_writel(xhci, val, &xhci->op_regs->config_reg);
+ writel(val, &xhci->op_regs->config_reg);
/*
* Section 5.4.8 - doorbell array must be
@@ -2331,7 +2331,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
*/
xhci->cmd_ring_reserved_trbs++;
- val = xhci_readl(xhci, &xhci->cap_regs->db_off);
+ val = readl(&xhci->cap_regs->db_off);
val &= DBOFF_MASK;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Doorbell array is located at offset 0x%x"
@@ -2382,13 +2382,13 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
}
/* set ERST count with the number of entries in the segment table */
- val = xhci_readl(xhci, &xhci->ir_set->erst_size);
+ val = readl(&xhci->ir_set->erst_size);
val &= ERST_SIZE_MASK;
val |= ERST_NUM_SEGS;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Write ERST size = %i to ir_set 0 (some bits preserved)",
val);
- xhci_writel(xhci, val, &xhci->ir_set->erst_size);
+ writel(val, &xhci->ir_set->erst_size);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Set ERST entries to point to event ring.");
@@ -2431,10 +2431,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* is necessary for allowing USB 3.0 devices to do remote wakeup from
* U3 (device suspend).
*/
- temp = xhci_readl(xhci, &xhci->op_regs->dev_notification);
+ temp = readl(&xhci->op_regs->dev_notification);
temp &= ~DEV_NOTE_MASK;
temp |= DEV_NOTE_FWAKE;
- xhci_writel(xhci, temp, &xhci->op_regs->dev_notification);
+ writel(temp, &xhci->op_regs->dev_notification);
return 0;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index b8dffd59eb25..04f986d9234f 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -128,7 +128,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
* any other sleep) on Haswell machines with LPT and LPT-LP
* with the new Intel BIOS
*/
- xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+ /* Limit the quirk to only known vendors, as this triggers
+ * yet another BIOS bug on some other machines
+ * https://bugzilla.kernel.org/show_bug.cgi?id=66171
+ */
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
+ xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
}
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
@@ -137,6 +142,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
"QUIRK: Resetting on resume");
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
+ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ pdev->device == 0x0015 &&
+ pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
+ pdev->subsystem_device == 0xc0cd)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
if (pdev->vendor == PCI_VENDOR_ID_VIA)
xhci->quirks |= XHCI_RESET_ON_RESUME;
}
@@ -331,6 +341,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
.check_bandwidth = xhci_check_bandwidth,
.reset_bandwidth = xhci_reset_bandwidth,
.address_device = xhci_address_device,
+ .enable_device = xhci_enable_device,
.update_hub_device = xhci_update_hub_device,
.reset_device = xhci_discover_or_reset_device,
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index d9c169f470d3..8abda5c73ca1 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -69,6 +69,7 @@ static const struct hc_driver xhci_plat_xhci_driver = {
.check_bandwidth = xhci_check_bandwidth,
.reset_bandwidth = xhci_reset_bandwidth,
.address_device = xhci_address_device,
+ .enable_device = xhci_enable_device,
.update_hub_device = xhci_update_hub_device,
.reset_device = xhci_discover_or_reset_device,
@@ -139,6 +140,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto unmap_registers;
+ device_wakeup_enable(hcd->self.controller);
/* USB 2.0 roothub is stored in the platform_device now. */
hcd = platform_get_drvdata(pdev);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 53c2e296467f..0ed64eb68e48 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -156,8 +156,6 @@ static void next_trb(struct xhci_hcd *xhci,
*/
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
- unsigned long long addr;
-
ring->deq_updates++;
/*
@@ -186,8 +184,6 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
ring->dequeue++;
}
} while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
-
- addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
}
/*
@@ -212,7 +208,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
{
u32 chain;
union xhci_trb *next;
- unsigned long long addr;
chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
/* If this is not event ring, there is one less usable TRB */
@@ -264,7 +259,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring->enqueue = ring->enq_seg->trbs;
next = ring->enqueue;
}
- addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
}
/*
@@ -295,9 +289,9 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
return;
xhci_dbg(xhci, "// Ding dong!\n");
- xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
+ writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
/* Flush PCI posted writes */
- xhci_readl(xhci, &xhci->dba->doorbell[0]);
+ readl(&xhci->dba->doorbell[0]);
}
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
@@ -427,7 +421,7 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
(ep_state & EP_HALTED))
return;
- xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
+ writel(DB_VALUE(ep_index, stream_id), db_addr);
/* The CPU has better things to do at this point than wait for a
* write-posting flush. It'll get there soon enough.
*/
@@ -1655,7 +1649,7 @@ static void handle_device_notification(struct xhci_hcd *xhci,
u32 slot_id;
struct usb_device *udev;
- slot_id = TRB_TO_SLOT_ID(event->generic.field[3]);
+ slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
if (!xhci->devs[slot_id]) {
xhci_warn(xhci, "Device Notification event for "
"unused slot %u\n", slot_id);
@@ -1739,7 +1733,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
port_id);
- temp = xhci_readl(xhci, port_array[faked_port_index]);
+ temp = readl(port_array[faked_port_index]);
if (hcd->state == HC_STATE_SUSPENDED) {
xhci_dbg(xhci, "resume root hub\n");
usb_hcd_resume_root_hub(hcd);
@@ -1748,7 +1742,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
xhci_dbg(xhci, "port resume event for port %d\n", port_id);
- temp1 = xhci_readl(xhci, &xhci->op_regs->command);
+ temp1 = readl(&xhci->op_regs->command);
if (!(temp1 & CMD_RUN)) {
xhci_warn(xhci, "xHC is not running.\n");
goto cleanup;
@@ -2831,7 +2825,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
spin_lock(&xhci->lock);
/* Check if the xHC generated the interrupt, or the irq is shared */
- status = xhci_readl(xhci, &xhci->op_regs->status);
+ status = readl(&xhci->op_regs->status);
if (status == 0xffffffff)
goto hw_died;
@@ -2853,16 +2847,16 @@ hw_died:
* Write 1 to clear the interrupt status.
*/
status |= STS_EINT;
- xhci_writel(xhci, status, &xhci->op_regs->status);
+ writel(status, &xhci->op_regs->status);
/* FIXME when MSI-X is supported and there are multiple vectors */
/* Clear the MSI-X event interrupt status */
if (hcd->irq) {
u32 irq_pending;
/* Acknowledge the PCI interrupt */
- irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ irq_pending = readl(&xhci->ir_set->irq_pending);
irq_pending |= IMAN_IP;
- xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
+ writel(irq_pending, &xhci->ir_set->irq_pending);
}
if (xhci->xhc_state & XHCI_STATE_DYING) {
@@ -2973,58 +2967,8 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
}
while (1) {
- if (room_on_ring(xhci, ep_ring, num_trbs)) {
- union xhci_trb *trb = ep_ring->enqueue;
- unsigned int usable = ep_ring->enq_seg->trbs +
- TRBS_PER_SEGMENT - 1 - trb;
- u32 nop_cmd;
-
- /*
- * Section 4.11.7.1 TD Fragments states that a link
- * TRB must only occur at the boundary between
- * data bursts (eg 512 bytes for 480M).
- * While it is possible to split a large fragment
- * we don't know the size yet.
- * Simplest solution is to fill the trb before the
- * LINK with nop commands.
- */
- if (num_trbs == 1 || num_trbs <= usable || usable == 0)
- break;
-
- if (ep_ring->type != TYPE_BULK)
- /*
- * While isoc transfers might have a buffer that
- * crosses a 64k boundary it is unlikely.
- * Since we can't add NOPs without generating
- * gaps in the traffic just hope it never
- * happens at the end of the ring.
- * This could be fixed by writing a LINK TRB
- * instead of the first NOP - however the
- * TRB_TYPE_LINK_LE32() calls would all need
- * changing to check the ring length.
- */
- break;
-
- if (num_trbs >= TRBS_PER_SEGMENT) {
- xhci_err(xhci, "Too many fragments %d, max %d\n",
- num_trbs, TRBS_PER_SEGMENT - 1);
- return -ENOMEM;
- }
-
- nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) |
- ep_ring->cycle_state);
- ep_ring->num_trbs_free -= usable;
- do {
- trb->generic.field[0] = 0;
- trb->generic.field[1] = 0;
- trb->generic.field[2] = 0;
- trb->generic.field[3] = nop_cmd;
- trb++;
- } while (--usable);
- ep_ring->enqueue = trb;
- if (room_on_ring(xhci, ep_ring, num_trbs))
- break;
- }
+ if (room_on_ring(xhci, ep_ring, num_trbs))
+ break;
if (ep_ring == xhci->cmd_ring) {
xhci_err(xhci, "Do not support expand command ring\n");
@@ -3981,7 +3925,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
if (ret)
return ret;
- start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
+ start_frame = readl(&xhci->run_regs->microframe_index);
start_frame &= 0x3fff;
urb->start_frame = start_frame;
@@ -4056,12 +4000,12 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
/* Queue an address device command TRB */
int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
- u32 slot_id)
+ u32 slot_id, enum xhci_setup_dev setup)
{
return queue_command(xhci, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
- TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
- false);
+ TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
+ | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
}
int xhci_queue_vendor_command(struct xhci_hcd *xhci,
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index 20364cc8d2fb..dde3959b7a33 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -116,12 +116,12 @@ DECLARE_EVENT_CLASS(xhci_log_event,
__field(u64, dma)
__field(u32, status)
__field(u32, flags)
- __dynamic_array(__le32, trb, 4)
+ __dynamic_array(u8, trb, sizeof(struct xhci_generic_trb))
),
TP_fast_assign(
__entry->va = trb_va;
- __entry->dma = le64_to_cpu(((u64)ev->field[1]) << 32 |
- ev->field[0]);
+ __entry->dma = ((u64)le32_to_cpu(ev->field[1])) << 32 |
+ le32_to_cpu(ev->field[0]);
__entry->status = le32_to_cpu(ev->field[2]);
__entry->flags = le32_to_cpu(ev->field[3]);
memcpy(__get_dynamic_array(trb), trb_va,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 4265b48856f6..6fe577d46fa2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -40,6 +40,10 @@ static int link_quirk;
module_param(link_quirk, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
+static unsigned int quirks;
+module_param(quirks, uint, S_IRUGO);
+MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
+
/* TODO: copied from ehci-hcd.c - can this be refactored? */
/*
* xhci_handshake - spin reading hc until handshake completes or fails
@@ -60,7 +64,7 @@ int xhci_handshake(struct xhci_hcd *xhci, void __iomem *ptr,
u32 result;
do {
- result = xhci_readl(xhci, ptr);
+ result = readl(ptr);
if (result == ~(u32)0) /* card removed */
return -ENODEV;
result &= mask;
@@ -82,13 +86,13 @@ void xhci_quiesce(struct xhci_hcd *xhci)
u32 mask;
mask = ~(XHCI_IRQS);
- halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
+ halted = readl(&xhci->op_regs->status) & STS_HALT;
if (!halted)
mask &= ~CMD_RUN;
- cmd = xhci_readl(xhci, &xhci->op_regs->command);
+ cmd = readl(&xhci->op_regs->command);
cmd &= mask;
- xhci_writel(xhci, cmd, &xhci->op_regs->command);
+ writel(cmd, &xhci->op_regs->command);
}
/*
@@ -124,11 +128,11 @@ static int xhci_start(struct xhci_hcd *xhci)
u32 temp;
int ret;
- temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp = readl(&xhci->op_regs->command);
temp |= (CMD_RUN);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
temp);
- xhci_writel(xhci, temp, &xhci->op_regs->command);
+ writel(temp, &xhci->op_regs->command);
/*
* Wait for the HCHalted Status bit to be 0 to indicate the host is
@@ -158,16 +162,16 @@ int xhci_reset(struct xhci_hcd *xhci)
u32 state;
int ret, i;
- state = xhci_readl(xhci, &xhci->op_regs->status);
+ state = readl(&xhci->op_regs->status);
if ((state & STS_HALT) == 0) {
xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
return 0;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
- command = xhci_readl(xhci, &xhci->op_regs->command);
+ command = readl(&xhci->op_regs->command);
command |= CMD_RESET;
- xhci_writel(xhci, command, &xhci->op_regs->command);
+ writel(command, &xhci->op_regs->command);
ret = xhci_handshake(xhci, &xhci->op_regs->command,
CMD_RESET, 0, 10 * 1000 * 1000);
@@ -321,6 +325,9 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ if (xhci->quirks & XHCI_PLAT)
+ return;
+
xhci_free_irq(xhci);
if (xhci->msix_entries) {
@@ -422,7 +429,7 @@ static void compliance_mode_recovery(unsigned long arg)
xhci = (struct xhci_hcd *)arg;
for (i = 0; i < xhci->num_usb3_ports; i++) {
- temp = xhci_readl(xhci, xhci->usb3_ports[i]);
+ temp = readl(xhci->usb3_ports[i]);
if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
/*
* Compliance Mode Detected. Letting USB Core
@@ -611,24 +618,23 @@ int xhci_run(struct usb_hcd *hcd)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Set the interrupt modulation register");
- temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
+ temp = readl(&xhci->ir_set->irq_control);
temp &= ~ER_IRQ_INTERVAL_MASK;
temp |= (u32) 160;
- xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
+ writel(temp, &xhci->ir_set->irq_control);
/* Set the HCD state before we enable the irqs */
- temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp = readl(&xhci->op_regs->command);
temp |= (CMD_EIE);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Enable interrupts, cmd = 0x%x.", temp);
- xhci_writel(xhci, temp, &xhci->op_regs->command);
+ writel(temp, &xhci->op_regs->command);
- temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ temp = readl(&xhci->ir_set->irq_pending);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
- xhci_writel(xhci, ER_IRQ_ENABLE(temp),
- &xhci->ir_set->irq_pending);
+ writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, 0);
if (xhci->quirks & XHCI_NEC_HOST)
@@ -698,18 +704,17 @@ void xhci_stop(struct usb_hcd *hcd)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Disabling event ring interrupts");
- temp = xhci_readl(xhci, &xhci->op_regs->status);
- xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
- temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- xhci_writel(xhci, ER_IRQ_DISABLE(temp),
- &xhci->ir_set->irq_pending);
+ temp = readl(&xhci->op_regs->status);
+ writel(temp & ~STS_EINT, &xhci->op_regs->status);
+ temp = readl(&xhci->ir_set->irq_pending);
+ writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, 0);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
xhci_mem_cleanup(xhci);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xhci_stop completed - status = %x",
- xhci_readl(xhci, &xhci->op_regs->status));
+ readl(&xhci->op_regs->status));
}
/*
@@ -739,7 +744,7 @@ void xhci_shutdown(struct usb_hcd *hcd)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xhci_shutdown completed - status = %x",
- xhci_readl(xhci, &xhci->op_regs->status));
+ readl(&xhci->op_regs->status));
/* Yet another workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
@@ -749,28 +754,28 @@ void xhci_shutdown(struct usb_hcd *hcd)
#ifdef CONFIG_PM
static void xhci_save_registers(struct xhci_hcd *xhci)
{
- xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
- xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
+ xhci->s3.command = readl(&xhci->op_regs->command);
+ xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
- xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
- xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
+ xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
+ xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
- xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
+ xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
+ xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
}
static void xhci_restore_registers(struct xhci_hcd *xhci)
{
- xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
- xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
+ writel(xhci->s3.command, &xhci->op_regs->command);
+ writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
- xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
- xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
+ writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
+ writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
- xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
- xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
+ writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
+ writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
}
static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
@@ -866,9 +871,9 @@ int xhci_suspend(struct xhci_hcd *xhci)
/* skipped assuming that port suspend has done */
/* step 2: clear Run/Stop bit */
- command = xhci_readl(xhci, &xhci->op_regs->command);
+ command = readl(&xhci->op_regs->command);
command &= ~CMD_RUN;
- xhci_writel(xhci, command, &xhci->op_regs->command);
+ writel(command, &xhci->op_regs->command);
/* Some chips from Fresco Logic need an extraordinary delay */
delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
@@ -885,9 +890,9 @@ int xhci_suspend(struct xhci_hcd *xhci)
xhci_save_registers(xhci);
/* step 4: set CSS flag */
- command = xhci_readl(xhci, &xhci->op_regs->command);
+ command = readl(&xhci->op_regs->command);
command |= CMD_CSS;
- xhci_writel(xhci, command, &xhci->op_regs->command);
+ writel(command, &xhci->op_regs->command);
if (xhci_handshake(xhci, &xhci->op_regs->status,
STS_SAVE, 0, 10 * 1000)) {
xhci_warn(xhci, "WARN: xHC save state timeout\n");
@@ -951,16 +956,16 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
xhci_set_cmd_ring_deq(xhci);
/* step 3: restore state and start state*/
/* step 3: set CRS flag */
- command = xhci_readl(xhci, &xhci->op_regs->command);
+ command = readl(&xhci->op_regs->command);
command |= CMD_CRS;
- xhci_writel(xhci, command, &xhci->op_regs->command);
+ writel(command, &xhci->op_regs->command);
if (xhci_handshake(xhci, &xhci->op_regs->status,
STS_RESTORE, 0, 10 * 1000)) {
xhci_warn(xhci, "WARN: xHC restore state timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
- temp = xhci_readl(xhci, &xhci->op_regs->status);
+ temp = readl(&xhci->op_regs->status);
}
/* If restore operation fails, re-initialize the HC during resume */
@@ -984,17 +989,16 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
xhci_cleanup_msix(xhci);
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
- temp = xhci_readl(xhci, &xhci->op_regs->status);
- xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
- temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- xhci_writel(xhci, ER_IRQ_DISABLE(temp),
- &xhci->ir_set->irq_pending);
+ temp = readl(&xhci->op_regs->status);
+ writel(temp & ~STS_EINT, &xhci->op_regs->status);
+ temp = readl(&xhci->ir_set->irq_pending);
+ writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, 0);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
- xhci_readl(xhci, &xhci->op_regs->status));
+ readl(&xhci->op_regs->status));
/* USB core calls the PCI reinit and start functions twice:
* first with the primary HCD, and then with the secondary HCD.
@@ -1023,9 +1027,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
}
/* step 4: set Run/Stop bit */
- command = xhci_readl(xhci, &xhci->op_regs->command);
+ command = readl(&xhci->op_regs->command);
command |= CMD_RUN;
- xhci_writel(xhci, command, &xhci->op_regs->command);
+ writel(command, &xhci->op_regs->command);
xhci_handshake(xhci, &xhci->op_regs->status, STS_HALT,
0, 250 * 1000);
@@ -1464,7 +1468,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
if (ret || !urb->hcpriv)
goto done;
- temp = xhci_readl(xhci, &xhci->op_regs->status);
+ temp = readl(&xhci->op_regs->status);
if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"HW died, freeing TD.");
@@ -1892,8 +1896,8 @@ static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
* (bit 1). The default control endpoint is added during the Address
* Device command and is never removed until the slot is disabled.
*/
- valid_add_flags = ctrl_ctx->add_flags >> 2;
- valid_drop_flags = ctrl_ctx->drop_flags >> 2;
+ valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
+ valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
/* Use hweight32 to count the number of ones in the add flags, or
* number of endpoints added. Don't count endpoints that are changed
@@ -1909,8 +1913,8 @@ static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
u32 valid_add_flags;
u32 valid_drop_flags;
- valid_add_flags = ctrl_ctx->add_flags >> 2;
- valid_drop_flags = ctrl_ctx->drop_flags >> 2;
+ valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
+ valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
return hweight32(valid_drop_flags) -
hweight32(valid_add_flags & valid_drop_flags);
@@ -3585,7 +3589,7 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
spin_lock_irqsave(&xhci->lock, flags);
/* Don't disable the slot if the host controller is dead. */
- state = xhci_readl(xhci, &xhci->op_regs->status);
+ state = readl(&xhci->op_regs->status);
if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
(xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_free_virt_device(xhci, udev->slot_id);
@@ -3712,13 +3716,15 @@ disable_slot:
}
/*
- * Issue an Address Device command (which will issue a SetAddress request to
- * the device).
+ * Issue an Address Device command and optionally send a corresponding
+ * SetAddress request to the device.
* We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
* we should only issue and wait on one address command at the same time.
*/
-int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ enum xhci_setup_dev setup)
{
+ const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
unsigned long flags;
int timeleft;
struct xhci_virt_device *virt_dev;
@@ -3771,12 +3777,12 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
- slot_ctx->dev_info >> 27);
+ le32_to_cpu(slot_ctx->dev_info) >> 27);
spin_lock_irqsave(&xhci->lock, flags);
cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
- udev->slot_id);
+ udev->slot_id, setup);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
@@ -3794,8 +3800,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
* command on a timeout.
*/
if (timeleft <= 0) {
- xhci_warn(xhci, "%s while waiting for address device command\n",
- timeleft == 0 ? "Timeout" : "Signal");
+ xhci_warn(xhci, "%s while waiting for setup %s command\n",
+ timeleft == 0 ? "Timeout" : "Signal", act);
/* cancel the address device command */
ret = xhci_cancel_cmd(xhci, NULL, cmd_trb);
if (ret < 0)
@@ -3806,26 +3812,27 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
switch (virt_dev->cmd_status) {
case COMP_CTX_STATE:
case COMP_EBADSLT:
- xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
- udev->slot_id);
+ xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
+ act, udev->slot_id);
ret = -EINVAL;
break;
case COMP_TX_ERR:
- dev_warn(&udev->dev, "Device not responding to set address.\n");
+ dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
ret = -EPROTO;
break;
case COMP_DEV_ERR:
- dev_warn(&udev->dev, "ERROR: Incompatible device for address "
- "device command.\n");
+ dev_warn(&udev->dev,
+ "ERROR: Incompatible device for setup %s command\n", act);
ret = -ENODEV;
break;
case COMP_SUCCESS:
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
- "Successful Address Device command");
+ "Successful setup %s command", act);
break;
default:
- xhci_err(xhci, "ERROR: unexpected command completion "
- "code 0x%x.\n", virt_dev->cmd_status);
+ xhci_err(xhci,
+ "ERROR: unexpected setup %s command completion code 0x%x.\n",
+ act, virt_dev->cmd_status);
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
@@ -3850,7 +3857,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
- slot_ctx->dev_info >> 27);
+ le32_to_cpu(slot_ctx->dev_info) >> 27);
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
/*
@@ -3859,7 +3866,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
*/
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
- slot_ctx->dev_info >> 27);
+ le32_to_cpu(slot_ctx->dev_info) >> 27);
/* Zero the input context control for later use */
ctrl_ctx->add_flags = 0;
ctrl_ctx->drop_flags = 0;
@@ -3871,6 +3878,16 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
return 0;
}
+int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
+}
+
+int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
+}
+
/*
* Transfer the port index into real index in the HW port status
* registers. Caculate offset between the port's PORTSC register
@@ -4042,7 +4059,7 @@ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
port_array = xhci->usb2_ports;
port_num = udev->portnum - 1;
pm_addr = port_array[port_num] + PORTPMSC;
- pm_val = xhci_readl(xhci, pm_addr);
+ pm_val = readl(pm_addr);
hlpm_addr = port_array[port_num] + PORTHLPMC;
field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
@@ -4082,26 +4099,26 @@ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
spin_lock_irqsave(&xhci->lock, flags);
hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
- xhci_writel(xhci, hlpm_val, hlpm_addr);
+ writel(hlpm_val, hlpm_addr);
/* flush write */
- xhci_readl(xhci, hlpm_addr);
+ readl(hlpm_addr);
} else {
hird = xhci_calculate_hird_besl(xhci, udev);
}
pm_val &= ~PORT_HIRD_MASK;
pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
- xhci_writel(xhci, pm_val, pm_addr);
- pm_val = xhci_readl(xhci, pm_addr);
+ writel(pm_val, pm_addr);
+ pm_val = readl(pm_addr);
pm_val |= PORT_HLE;
- xhci_writel(xhci, pm_val, pm_addr);
+ writel(pm_val, pm_addr);
/* flush write */
- xhci_readl(xhci, pm_addr);
+ readl(pm_addr);
} else {
pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
- xhci_writel(xhci, pm_val, pm_addr);
+ writel(pm_val, pm_addr);
/* flush write */
- xhci_readl(xhci, pm_addr);
+ readl(pm_addr);
if (udev->usb2_hw_lpm_besl_capable) {
spin_unlock_irqrestore(&xhci->lock, flags);
mutex_lock(hcd->bandwidth_mutex);
@@ -4455,7 +4472,7 @@ static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
if (!config)
return timeout;
- for (i = 0; i < USB_MAXINTERFACES; i++) {
+ for (i = 0; i < config->desc.bNumInterfaces; i++) {
struct usb_driver *driver;
struct usb_interface *intf = config->interface[i];
@@ -4704,7 +4721,7 @@ int xhci_get_frame(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
/* EHCI mods by the periodic size. Why? */
- return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
+ return readl(&xhci->run_regs->microframe_index) >> 3;
}
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
@@ -4716,9 +4733,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
/* Accept arbitrarily long scatter-gather lists */
hcd->self.sg_tablesize = ~0;
- /* support to build packet from discontinuous buffers */
- hcd->self.no_sg_constraint = 1;
-
/* XHCI controllers don't stop the ep queue on short packets :| */
hcd->self.no_stop_on_short = 1;
@@ -4743,23 +4757,33 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
/* xHCI private pointer was set in xhci_pci_probe for the second
* registered roothub.
*/
+ xhci = hcd_to_xhci(hcd);
+ /*
+ * Support arbitrarily aligned sg-list entries on hosts without
+ * TD fragment rules (which are currently unsupported).
+ */
+ if (xhci->hci_version < 0x100)
+ hcd->self.no_sg_constraint = 1;
+
return 0;
}
xhci->cap_regs = hcd->regs;
xhci->op_regs = hcd->regs +
- HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
+ HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
xhci->run_regs = hcd->regs +
- (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
+ (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
/* Cache read-only capability registers */
- xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
- xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
- xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
- xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+ xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
+ xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
+ xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
+ xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
xhci->hci_version = HC_VERSION(xhci->hcc_params);
- xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+ xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
xhci_print_registers(xhci);
+ xhci->quirks = quirks;
+
get_quirks(dev, xhci);
/* In xhci controllers which follow xhci 1.0 spec gives a spurious
@@ -4769,6 +4793,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
if (xhci->hci_version > 0x96)
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ if (xhci->hci_version < 0x100)
+ hcd->self.no_sg_constraint = 1;
+
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 03c74b7965f8..58ed9d088e63 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -752,7 +752,7 @@ struct xhci_stream_ctx {
};
/* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */
-#define SCT_FOR_CTX(p) (((p) << 1) & 0x7)
+#define SCT_FOR_CTX(p) (((p) & 0x7) << 1)
/* Secondary stream array type, dequeue pointer is to a transfer ring */
#define SCT_SEC_TR 0
/* Primary stream array type, dequeue pointer is to a transfer ring */
@@ -1097,6 +1097,14 @@ struct xhci_event_cmd {
};
/* flags bitmasks */
+
+/* Address device - disable SetAddress */
+#define TRB_BSR (1<<9)
+enum xhci_setup_dev {
+ SETUP_CONTEXT_ONLY,
+ SETUP_CONTEXT_ADDRESS,
+};
+
/* bits 16:23 are the virtual function ID */
/* bits 24:31 are the slot ID */
#define TRB_TO_SLOT_ID(p) (((p) & (0xff<<24)) >> 24)
@@ -1595,19 +1603,6 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
#define xhci_warn_ratelimited(xhci, fmt, args...) \
dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
-/* TODO: copied from ehci.h - can be refactored? */
-/* xHCI spec says all registers are little endian */
-static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
- __le32 __iomem *regs)
-{
- return readl(regs);
-}
-static inline void xhci_writel(struct xhci_hcd *xhci,
- const unsigned int val, __le32 __iomem *regs)
-{
- writel(val, regs);
-}
-
/*
* Registers should always be accessed with double word or quad word accesses.
*
@@ -1790,6 +1785,7 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
gfp_t mem_flags);
int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
struct usb_device *udev, int enable);
@@ -1813,7 +1809,7 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
void xhci_ring_cmd_db(struct xhci_hcd *xhci);
int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
- u32 slot_id);
+ u32 slot_id, enum xhci_setup_dev);
int xhci_queue_vendor_command(struct xhci_hcd *xhci,
u32 field1, u32 field2, u32 field3, u32 field4);
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index 7121b50098d3..a62865af53cc 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -51,7 +51,7 @@
*
* version 0.7.3
* bugfix : The mdc800->state field gets set to READY after the
- * the diconnect function sets it to NOT_CONNECTED. This makes the
+ * the disconnect function sets it to NOT_CONNECTED. This makes the
* driver running like the camera is connected and causes some
* hang ups.
*
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 9c0f8caba3be..37b44b04a701 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -125,7 +125,6 @@
#include <linux/errno.h>
#include <linux/random.h>
#include <linux/poll.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/usb.h>
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 3eaa83f05086..493c7f268b6f 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -22,7 +22,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index 3f7c1a92579f..402b94dd2531 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -29,7 +29,6 @@
* published by the Free Software Foundation, version 2.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c
index 5b9831b95d97..9bab1a33bc16 100644
--- a/drivers/usb/misc/cytherm.c
+++ b/drivers/usb/misc/cytherm.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c
index d65984dee751..8950fa5e973d 100644
--- a/drivers/usb/misc/emi26.c
+++ b/drivers/usb/misc/emi26.c
@@ -13,7 +13,6 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/usb.h>
#include <linux/delay.h>
#include <linux/firmware.h>
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index ae794b90766b..1d9be4431b72 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/delay.h>
diff --git a/drivers/usb/misc/ezusb.c b/drivers/usb/misc/ezusb.c
index e712afed947c..947811bc8126 100644
--- a/drivers/usb/misc/ezusb.c
+++ b/drivers/usb/misc/ezusb.c
@@ -9,7 +9,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index ce978384fda1..4e38683c653c 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/completion.h>
@@ -386,7 +385,7 @@ static int idmouse_probe(struct usb_interface *interface,
result = usb_register_dev(interface, &idmouse_class);
if (result) {
/* something prevented us from registering this device */
- dev_err(&interface->dev, "Unble to allocate minor number.\n");
+ dev_err(&interface->dev, "Unable to allocate minor number.\n");
usb_set_intfdata(interface, NULL);
idmouse_delete(dev);
return result;
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index d36f34e25bed..20bcfdd7eace 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/usb.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mutex.h>
@@ -300,7 +299,7 @@ static ssize_t iowarrior_read(struct file *file, char __user *buffer,
do {
atomic_set(&dev->overflow_flag, 0);
if ((read_idx = read_index(dev)) == -1) {
- /* queue emty */
+ /* queue empty */
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
else {
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index b1d59532ac22..82503a7ff6c8 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -24,7 +24,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index eb37c9542052..97cd9e24bd25 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -79,7 +79,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/completion.h>
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index b9b356a9dd11..13731d512624 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -36,7 +36,6 @@
#include <linux/errno.h>
#include <linux/random.h>
#include <linux/poll.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/usb.h>
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.c b/drivers/usb/misc/sisusbvga/sisusb_init.c
index cb8a3d91f970..bf0032ca35ed 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.c
@@ -40,7 +40,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/poll.h>
-#include <linux/init.h>
#include <linux/spinlock.h>
#include "sisusb.h"
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index 741efed4a236..4145314a515b 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -21,7 +21,6 @@
/* Standard include files */
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 89927bcff974..1184390508e9 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -14,7 +14,6 @@
*****************************************************************************/
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/mutex.h>
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index 12d03e7ad636..78eb4ff33269 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index b2d82b937392..1fe6b73c22f3 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/string.h>
@@ -57,7 +56,7 @@ struct usb_sevsegdev {
* if str commands are used, we would assume the end of string
* so mem commands are used.
*/
-inline size_t my_memlen(const char *buf, size_t count)
+static inline size_t my_memlen(const char *buf, size_t count)
{
if (count > 0 && buf[count-1] == '\n')
return count - 1;
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index b4152820d655..f6568b5e9b06 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -10,6 +10,7 @@
#include <linux/usb.h>
+#define SIMPLE_IO_TIMEOUT 10000 /* in milliseconds */
/*-------------------------------------------------------------------------*/
@@ -366,6 +367,7 @@ static int simple_io(
int max = urb->transfer_buffer_length;
struct completion completion;
int retval = 0;
+ unsigned long expire;
urb->context = &completion;
while (retval == 0 && iterations-- > 0) {
@@ -378,9 +380,15 @@ static int simple_io(
if (retval != 0)
break;
- /* NOTE: no timeouts; can't be broken out of by interrupt */
- wait_for_completion(&completion);
- retval = urb->status;
+ expire = msecs_to_jiffies(SIMPLE_IO_TIMEOUT);
+ if (!wait_for_completion_timeout(&completion, expire)) {
+ usb_kill_urb(urb);
+ retval = (urb->status == -ENOENT ?
+ -ETIMEDOUT : urb->status);
+ } else {
+ retval = urb->status;
+ }
+
urb->dev = udev;
if (retval == 0 && usb_pipein(urb->pipe))
retval = simple_check_buf(tdev, urb);
@@ -619,8 +627,8 @@ static int is_good_ext(struct usbtest_dev *tdev, u8 *buf)
}
attr = le32_to_cpu(ext->bmAttributes);
- /* bits[1:4] is used and others are reserved */
- if (attr & ~0x1e) { /* reserved == 0 */
+ /* bits[1:15] is used and others are reserved */
+ if (attr & ~0xfffe) { /* reserved == 0 */
ERROR(tdev, "reserved bits set\n");
return 0;
}
@@ -763,7 +771,7 @@ static int ch9_postconfig(struct usbtest_dev *dev)
* there's always [9.4.3] a bos device descriptor [9.6.2] in USB
* 3.0 spec
*/
- if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0300) {
+ if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0210) {
struct usb_bos_descriptor *bos = NULL;
struct usb_dev_cap_header *header = NULL;
unsigned total, num, length;
@@ -944,7 +952,7 @@ struct ctrl_ctx {
int last;
};
-#define NUM_SUBCASES 15 /* how many test subcases here? */
+#define NUM_SUBCASES 16 /* how many test subcases here? */
struct subcase {
struct usb_ctrlrequest setup;
@@ -1218,6 +1226,15 @@ test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
}
expected = -EREMOTEIO;
break;
+ case 15:
+ req.wValue = cpu_to_le16(USB_DT_BOS << 8);
+ if (udev->bos)
+ len = le16_to_cpu(udev->bos->desc->wTotalLength);
+ else
+ len = sizeof(struct usb_bos_descriptor);
+ if (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0201)
+ expected = -EPIPE;
+ break;
default:
ERROR(dev, "bogus number of ctrl queue testcases!\n");
context.status = -EINVAL;
@@ -1537,8 +1554,17 @@ static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
return retval;
}
retval = verify_halted(tdev, ep, urb);
- if (retval < 0)
+ if (retval < 0) {
+ int ret;
+
+ /* clear halt anyways, else further tests will fail */
+ ret = usb_clear_halt(urb->dev, urb->pipe);
+ if (ret)
+ ERROR(tdev, "ep %02x couldn't clear halt, %d\n",
+ ep, ret);
+
return retval;
+ }
/* clear halt (tests API + protocol), verify it worked */
retval = usb_clear_halt(urb->dev, urb->pipe);
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index b6ab515bfc6c..24278208bf74 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
@@ -464,7 +463,7 @@ static ssize_t yurex_write(struct file *file, const char *user_buffer, size_t co
goto error;
mutex_lock(&dev->io_mutex);
- if (!dev->interface) { /* alreaday disconnected */
+ if (!dev->interface) { /* already disconnected */
mutex_unlock(&dev->io_mutex);
retval = -ENODEV;
goto error;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 57dfc0cedb00..688dc8bb192d 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -6,7 +6,7 @@
# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
config USB_MUSB_HDRC
tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
- depends on USB_GADGET
+ depends on (USB || USB_GADGET)
help
Say Y here if your system has a dual role high speed USB
controller based on the Mentor Graphics silicon IP. Then
@@ -35,21 +35,21 @@ choice
config USB_MUSB_HOST
bool "Host only mode"
- depends on USB
+ depends on USB=y || USB=USB_MUSB_HDRC
help
Select this when you want to use MUSB in host mode only,
thereby the gadget feature will be regressed.
config USB_MUSB_GADGET
bool "Gadget only mode"
- depends on USB_GADGET
+ depends on USB_GADGET=y || USB_GADGET=USB_MUSB_HDRC
help
Select this when you want to use MUSB in gadget mode only,
thereby the host feature will be regressed.
config USB_MUSB_DUAL_ROLE
bool "Dual Role mode"
- depends on (USB && USB_GADGET)
+ depends on ((USB=y || USB=USB_MUSB_HDRC) && (USB_GADGET=y || USB_GADGET=USB_MUSB_HDRC))
help
This is the default mode of working of MUSB controller where
both host and gadget features are enabled.
@@ -93,6 +93,12 @@ config USB_MUSB_BLACKFIN
config USB_MUSB_UX500
tristate "Ux500 platforms"
+config USB_MUSB_JZ4740
+ tristate "JZ4740"
+ depends on MACH_JZ4740 || COMPILE_TEST
+ depends on USB_MUSB_GADGET
+ depends on USB_OTG_BLACKLIST_HUB
+
endchoice
config USB_MUSB_AM335X_CHILD
@@ -100,7 +106,7 @@ config USB_MUSB_AM335X_CHILD
choice
prompt 'MUSB DMA mode'
- default MUSB_PIO_ONLY if ARCH_MULTIPLATFORM
+ default MUSB_PIO_ONLY if ARCH_MULTIPLATFORM || USB_MUSB_JZ4740
default USB_UX500_DMA if USB_MUSB_UX500
default USB_INVENTRA_DMA if USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN
default USB_TI_CPPI_DMA if USB_MUSB_DAVINCI
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index c5ea5c6dc169..ba495018b416 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_USB_MUSB_DAVINCI) += davinci.o
obj-$(CONFIG_USB_MUSB_DA8XX) += da8xx.o
obj-$(CONFIG_USB_MUSB_BLACKFIN) += blackfin.o
obj-$(CONFIG_USB_MUSB_UX500) += ux500.o
+obj-$(CONFIG_USB_MUSB_JZ4740) += jz4740.o
obj-$(CONFIG_USB_MUSB_AM335X_CHILD) += musb_am335x.o
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index ca45b39db5b9..b3aa0184af9a 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -26,7 +26,6 @@
*
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/err.h>
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index d9692f78e227..796677fa9a15 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/gpio.h>
#include <linux/io.h>
@@ -77,7 +76,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), dma_reg);
SSYNC();
- /* Wait for compelete */
+ /* Wait for complete */
while (!(bfin_read_USB_DMA_INTERRUPT() & (1 << epnum)))
cpu_relax();
@@ -131,7 +130,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), dma_reg);
SSYNC();
- /* Wait for compelete */
+ /* Wait for complete */
while (!(bfin_read_USB_DMA_INTERRUPT() & (1 << epnum)))
cpu_relax();
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 2f2c1cb36421..e3486de71995 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -26,7 +26,6 @@
*
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/err.h>
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 1121fd741bf8..c259dac9d056 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -24,7 +24,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/clk.h>
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
new file mode 100644
index 000000000000..5f30537f1927
--- /dev/null
+++ b/drivers/usb/musb/jz4740.c
@@ -0,0 +1,201 @@
+/*
+ * Ingenic JZ4740 "glue layer"
+ *
+ * Copyright (C) 2013, Apelete Seketeli <apelete@seketeli.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "musb_core.h"
+
+struct jz4740_glue {
+ struct device *dev;
+ struct platform_device *musb;
+ struct clk *clk;
+};
+
+static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+ musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+ musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+
+ /*
+ * The controller is gadget only, the state of the host mode IRQ bits is
+ * undefined. Mask them to make sure that the musb driver core will
+ * never see them set
+ */
+ musb->int_usb &= MUSB_INTR_SUSPEND | MUSB_INTR_RESUME |
+ MUSB_INTR_RESET | MUSB_INTR_SOF;
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx)
+ retval = musb_interrupt(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return retval;
+}
+
+static struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 64, },
+};
+
+static struct musb_hdrc_config jz4740_musb_config = {
+ /* Silicon does not implement USB OTG. */
+ .multipoint = 0,
+ /* Max EPs scanned, driver will decide which EP can be used. */
+ .num_eps = 4,
+ /* RAMbits needed to configure EPs from table */
+ .ram_bits = 9,
+ .fifo_cfg = jz4740_musb_fifo_cfg,
+ .fifo_cfg_size = ARRAY_SIZE(jz4740_musb_fifo_cfg),
+};
+
+static struct musb_hdrc_platform_data jz4740_musb_platform_data = {
+ .mode = MUSB_PERIPHERAL,
+ .config = &jz4740_musb_config,
+};
+
+static int jz4740_musb_init(struct musb *musb)
+{
+ musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (!musb->xceiv) {
+ pr_err("HS UDC: no transceiver configured\n");
+ return -ENODEV;
+ }
+
+ /* Silicon does not implement ConfigData register.
+ * Set dyn_fifo to avoid reading EP config from hardware.
+ */
+ musb->dyn_fifo = true;
+
+ musb->isr = jz4740_musb_interrupt;
+
+ return 0;
+}
+
+static int jz4740_musb_exit(struct musb *musb)
+{
+ usb_put_phy(musb->xceiv);
+
+ return 0;
+}
+
+static const struct musb_platform_ops jz4740_musb_ops = {
+ .init = jz4740_musb_init,
+ .exit = jz4740_musb_exit,
+};
+
+static int jz4740_probe(struct platform_device *pdev)
+{
+ struct musb_hdrc_platform_data *pdata = &jz4740_musb_platform_data;
+ struct platform_device *musb;
+ struct jz4740_glue *glue;
+ struct clk *clk;
+ int ret;
+
+ glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
+ musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
+ if (!musb) {
+ dev_err(&pdev->dev, "failed to allocate musb device\n");
+ return -ENOMEM;
+ }
+
+ clk = devm_clk_get(&pdev->dev, "udc");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ ret = PTR_ERR(clk);
+ goto err_platform_device_put;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ goto err_platform_device_put;
+ }
+
+ musb->dev.parent = &pdev->dev;
+
+ glue->dev = &pdev->dev;
+ glue->musb = musb;
+ glue->clk = clk;
+
+ pdata->platform_ops = &jz4740_musb_ops;
+
+ platform_set_drvdata(pdev, glue);
+
+ ret = platform_device_add_resources(musb, pdev->resource,
+ pdev->num_resources);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add resources\n");
+ goto err_clk_disable;
+ }
+
+ ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add platform_data\n");
+ goto err_clk_disable;
+ }
+
+ ret = platform_device_add(musb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register musb device\n");
+ goto err_clk_disable;
+ }
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(clk);
+err_platform_device_put:
+ platform_device_put(musb);
+ return ret;
+}
+
+static int jz4740_remove(struct platform_device *pdev)
+{
+ struct jz4740_glue *glue = platform_get_drvdata(pdev);
+
+ platform_device_unregister(glue->musb);
+ clk_disable_unprepare(glue->clk);
+
+ return 0;
+}
+
+static struct platform_driver jz4740_driver = {
+ .probe = jz4740_probe,
+ .remove = jz4740_remove,
+ .driver = {
+ .name = "musb-jz4740",
+ },
+};
+
+MODULE_DESCRIPTION("JZ4740 MUSB Glue Layer");
+MODULE_AUTHOR("Apelete Seketeli <apelete@seketeli.net>");
+MODULE_LICENSE("GPL v2");
+module_platform_driver(jz4740_driver);
diff --git a/drivers/usb/musb/musb_am335x.c b/drivers/usb/musb/musb_am335x.c
index 8be9b02c3cc2..d2353781bd2d 100644
--- a/drivers/usb/musb/musb_am335x.c
+++ b/drivers/usb/musb/musb_am335x.c
@@ -1,4 +1,3 @@
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 4d4499b80449..fc192ad9cc6a 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -83,7 +83,7 @@
* This gets many kinds of configuration information:
* - Kconfig for everything user-configurable
* - platform_device for addressing, irq, and platform_data
- * - platform_data is mostly for board-specific informarion
+ * - platform_data is mostly for board-specific information
* (plus recentrly, SOC or family details)
*
* Most of the conditional compilation will (someday) vanish.
@@ -93,7 +93,6 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/kobject.h>
#include <linux/prefetch.h>
@@ -478,8 +477,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
musb->port1_status |=
(USB_PORT_STAT_C_SUSPEND << 16)
| MUSB_PORT_STAT_RESUME;
- musb->rh_timer = jiffies
- + msecs_to_jiffies(20);
+ schedule_delayed_work(
+ &musb->finish_resume_work, 20);
musb->xceiv->state = OTG_STATE_A_HOST;
musb->is_active = 1;
@@ -1187,7 +1186,7 @@ fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
/* EP0 reserved endpoint for control, bidirectional;
- * EP1 reserved for bulk, two unidirection halves.
+ * EP1 reserved for bulk, two unidirectional halves.
*/
if (hw_ep->epnum == 1)
musb->bulk_ep = hw_ep;
@@ -1813,6 +1812,21 @@ static void musb_free(struct musb *musb)
musb_host_free(musb);
}
+static void musb_deassert_reset(struct work_struct *work)
+{
+ struct musb *musb;
+ unsigned long flags;
+
+ musb = container_of(work, struct musb, deassert_reset_work.work);
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (musb->port1_status & USB_PORT_STAT_RESET)
+ musb_port_reset(musb, false);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
/*
* Perform generic per-controller initialization.
*
@@ -1857,7 +1871,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
/* The musb_platform_init() call:
* - adjusts musb->mregs
* - sets the musb->isr
- * - may initialize an integrated tranceiver
+ * - may initialize an integrated transceiver
* - initializes musb->xceiv, usually by otg_get_phy()
* - stops powering VBUS
*
@@ -1897,6 +1911,8 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
/* Init IRQ workqueue before request_irq */
INIT_WORK(&musb->irq_work, musb_irq_work);
+ INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
+ INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume);
/* setup musb parts of the core (especially endpoints) */
status = musb_core_init(plat->config->multipoint
@@ -1940,17 +1956,26 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
switch (musb->port_mode) {
case MUSB_PORT_MODE_HOST:
status = musb_host_setup(musb, plat->power);
+ if (status < 0)
+ goto fail3;
+ status = musb_platform_set_mode(musb, MUSB_HOST);
break;
case MUSB_PORT_MODE_GADGET:
status = musb_gadget_setup(musb);
+ if (status < 0)
+ goto fail3;
+ status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
break;
case MUSB_PORT_MODE_DUAL_ROLE:
status = musb_host_setup(musb, plat->power);
if (status < 0)
goto fail3;
status = musb_gadget_setup(musb);
- if (status)
+ if (status) {
musb_host_cleanup(musb);
+ goto fail3;
+ }
+ status = musb_platform_set_mode(musb, MUSB_OTG);
break;
default:
dev_err(dev, "unsupported port mode %d\n", musb->port_mode);
@@ -1981,6 +2006,8 @@ fail4:
fail3:
cancel_work_sync(&musb->irq_work);
+ cancel_delayed_work_sync(&musb->finish_resume_work);
+ cancel_delayed_work_sync(&musb->deassert_reset_work);
if (musb->dma_controller)
dma_controller_destroy(musb->dma_controller);
fail2_5:
@@ -2044,6 +2071,8 @@ static int musb_remove(struct platform_device *pdev)
dma_controller_destroy(musb->dma_controller);
cancel_work_sync(&musb->irq_work);
+ cancel_delayed_work_sync(&musb->finish_resume_work);
+ cancel_delayed_work_sync(&musb->deassert_reset_work);
musb_free(musb);
device_init_wakeup(dev, 0);
return 0;
@@ -2216,16 +2245,28 @@ static int musb_suspend(struct device *dev)
*/
}
+ musb_save_context(musb);
+
spin_unlock_irqrestore(&musb->lock, flags);
return 0;
}
static int musb_resume_noirq(struct device *dev)
{
- /* for static cmos like DaVinci, register values were preserved
+ struct musb *musb = dev_to_musb(dev);
+
+ /*
+ * For static cmos like DaVinci, register values were preserved
* unless for some reason the whole soc powered down or the USB
* module got reset through the PSC (vs just being disabled).
+ *
+ * For the DSPS glue layer though, a full register restore has to
+ * be done. As it shouldn't harm other platforms, we do it
+ * unconditionally.
*/
+
+ musb_restore_context(musb);
+
return 0;
}
@@ -2283,19 +2324,4 @@ static struct platform_driver musb_driver = {
.shutdown = musb_shutdown,
};
-/*-------------------------------------------------------------------------*/
-
-static int __init musb_init(void)
-{
- if (usb_disabled())
- return 0;
-
- return platform_driver_register(&musb_driver);
-}
-module_init(musb_init);
-
-static void __exit musb_cleanup(void)
-{
- platform_driver_unregister(&musb_driver);
-}
-module_exit(musb_cleanup);
+module_platform_driver(musb_driver);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 29f7cd7c7964..7083e82776ff 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -47,6 +47,7 @@
#include <linux/usb/otg.h>
#include <linux/usb/musb.h>
#include <linux/phy/phy.h>
+#include <linux/workqueue.h>
struct musb;
struct musb_hw_ep;
@@ -295,6 +296,8 @@ struct musb {
irqreturn_t (*isr)(int, void *);
struct work_struct irq_work;
+ struct delayed_work deassert_reset_work;
+ struct delayed_work finish_resume_work;
u16 hwvers;
u16 intrrxe;
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index a12bd30401e0..f88929609bac 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -615,7 +615,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
dc = dma_request_slave_channel(dev, str);
if (!dc) {
- dev_err(dev, "Falied to request %s.\n", str);
+ dev_err(dev, "Failed to request %s.\n", str);
ret = -EPROBE_DEFER;
goto err;
}
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 1901f6fe5807..7a109eae9b9a 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -29,7 +29,6 @@
* da8xx.c would be merged to this file after testing.
*/
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/platform_device.h>
@@ -83,6 +82,8 @@ struct dsps_musb_wrapper {
u16 coreintr_status;
u16 phy_utmi;
u16 mode;
+ u16 tx_mode;
+ u16 rx_mode;
/* bit positions for control */
unsigned reset:5;
@@ -106,10 +107,24 @@ struct dsps_musb_wrapper {
/* bit positions for mode */
unsigned iddig:5;
+ unsigned iddig_mux:5;
/* miscellaneous stuff */
u8 poll_seconds;
};
+/*
+ * register shadow for suspend
+ */
+struct dsps_context {
+ u32 control;
+ u32 epintr;
+ u32 coreintr;
+ u32 phy_utmi;
+ u32 mode;
+ u32 tx_mode;
+ u32 rx_mode;
+};
+
/**
* DSPS glue structure.
*/
@@ -119,6 +134,8 @@ struct dsps_glue {
const struct dsps_musb_wrapper *wrp; /* wrapper register offsets */
struct timer_list timer; /* otg_workaround timer */
unsigned long last_timer; /* last timer data for each instance */
+
+ struct dsps_context context;
};
static void dsps_musb_try_idle(struct musb *musb, unsigned long timeout)
@@ -341,8 +358,9 @@ static irqreturn_t dsps_interrupt(int irq, void *hci)
if (musb->int_tx || musb->int_rx || musb->int_usb)
ret |= musb_interrupt(musb);
- /* Poll for ID change */
- if (musb->xceiv->state == OTG_STATE_B_IDLE)
+ /* Poll for ID change in OTG port mode */
+ if (musb->xceiv->state == OTG_STATE_B_IDLE &&
+ musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ);
out:
spin_unlock_irqrestore(&musb->lock, flags);
@@ -406,6 +424,54 @@ static int dsps_musb_exit(struct musb *musb)
return 0;
}
+static int dsps_musb_set_mode(struct musb *musb, u8 mode)
+{
+ struct device *dev = musb->controller;
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ void __iomem *ctrl_base = musb->ctrl_base;
+ void __iomem *base = musb->mregs;
+ u32 reg;
+
+ reg = dsps_readl(base, wrp->mode);
+
+ switch (mode) {
+ case MUSB_HOST:
+ reg &= ~(1 << wrp->iddig);
+
+ /*
+ * if we're setting mode to host-only or device-only, we're
+ * going to ignore whatever the PHY sends us and just force
+ * ID pin status by SW
+ */
+ reg |= (1 << wrp->iddig_mux);
+
+ dsps_writel(base, wrp->mode, reg);
+ dsps_writel(ctrl_base, wrp->phy_utmi, 0x02);
+ break;
+ case MUSB_PERIPHERAL:
+ reg |= (1 << wrp->iddig);
+
+ /*
+ * if we're setting mode to host-only or device-only, we're
+ * going to ignore whatever the PHY sends us and just force
+ * ID pin status by SW
+ */
+ reg |= (1 << wrp->iddig_mux);
+
+ dsps_writel(base, wrp->mode, reg);
+ break;
+ case MUSB_OTG:
+ dsps_writel(base, wrp->phy_utmi, 0x02);
+ break;
+ default:
+ dev_err(glue->dev, "unsupported mode %d\n", mode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static struct musb_platform_ops dsps_ops = {
.init = dsps_musb_init,
.exit = dsps_musb_exit,
@@ -414,6 +480,7 @@ static struct musb_platform_ops dsps_ops = {
.disable = dsps_musb_disable,
.try_idle = dsps_musb_try_idle,
+ .set_mode = dsps_musb_set_mode,
};
static u64 musb_dmamask = DMA_BIT_MASK(32);
@@ -507,6 +574,7 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue,
config->num_eps = get_int_prop(dn, "mentor,num-eps");
config->ram_bits = get_int_prop(dn, "mentor,ram-bits");
+ config->host_port_deassert_reset_at_resume = 1;
pdata.mode = get_musb_port_mode(dev);
/* DT keeps this entry in mA, musb expects it as per USB spec */
pdata.power = get_int_prop(dn, "mentor,power") / 2;
@@ -605,9 +673,12 @@ static const struct dsps_musb_wrapper am33xx_driver_data = {
.coreintr_status = 0x34,
.phy_utmi = 0xe0,
.mode = 0xe8,
+ .tx_mode = 0x70,
+ .rx_mode = 0x74,
.reset = 0,
.otg_disable = 21,
.iddig = 8,
+ .iddig_mux = 7,
.usb_shift = 0,
.usb_mask = 0x1ff,
.usb_bitmap = (0x1ff << 0),
@@ -628,11 +699,52 @@ static const struct of_device_id musb_dsps_of_match[] = {
};
MODULE_DEVICE_TABLE(of, musb_dsps_of_match);
+#ifdef CONFIG_PM
+static int dsps_suspend(struct device *dev)
+{
+ struct dsps_glue *glue = dev_get_drvdata(dev);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ struct musb *musb = platform_get_drvdata(glue->musb);
+ void __iomem *mbase = musb->ctrl_base;
+
+ glue->context.control = dsps_readl(mbase, wrp->control);
+ glue->context.epintr = dsps_readl(mbase, wrp->epintr_set);
+ glue->context.coreintr = dsps_readl(mbase, wrp->coreintr_set);
+ glue->context.phy_utmi = dsps_readl(mbase, wrp->phy_utmi);
+ glue->context.mode = dsps_readl(mbase, wrp->mode);
+ glue->context.tx_mode = dsps_readl(mbase, wrp->tx_mode);
+ glue->context.rx_mode = dsps_readl(mbase, wrp->rx_mode);
+
+ return 0;
+}
+
+static int dsps_resume(struct device *dev)
+{
+ struct dsps_glue *glue = dev_get_drvdata(dev);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ struct musb *musb = platform_get_drvdata(glue->musb);
+ void __iomem *mbase = musb->ctrl_base;
+
+ dsps_writel(mbase, wrp->control, glue->context.control);
+ dsps_writel(mbase, wrp->epintr_set, glue->context.epintr);
+ dsps_writel(mbase, wrp->coreintr_set, glue->context.coreintr);
+ dsps_writel(mbase, wrp->phy_utmi, glue->context.phy_utmi);
+ dsps_writel(mbase, wrp->mode, glue->context.mode);
+ dsps_writel(mbase, wrp->tx_mode, glue->context.tx_mode);
+ dsps_writel(mbase, wrp->rx_mode, glue->context.rx_mode);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dsps_pm_ops, dsps_suspend, dsps_resume);
+
static struct platform_driver dsps_usbss_driver = {
.probe = dsps_probe,
.remove = dsps_remove,
.driver = {
.name = "musb-dsps",
+ .pm = &dsps_pm_ops,
.of_match_table = musb_dsps_of_match,
},
};
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 32fb057c03f5..d4aa779339f1 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1727,14 +1727,14 @@ init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
ep->end_point.name = ep->name;
INIT_LIST_HEAD(&ep->end_point.ep_list);
if (!epnum) {
- ep->end_point.maxpacket = 64;
+ usb_ep_set_maxpacket_limit(&ep->end_point, 64);
ep->end_point.ops = &musb_g_ep0_ops;
musb->g.ep0 = &ep->end_point;
} else {
if (is_in)
- ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
+ usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx);
else
- ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
+ usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx);
ep->end_point.ops = &musb_ep_ops;
list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
}
@@ -2119,7 +2119,15 @@ __acquires(musb->lock)
/* Normal reset, as B-Device;
* or else after HNP, as A-Device
*/
- if (devctl & MUSB_DEVCTL_BDEVICE) {
+ if (!musb->g.is_otg) {
+ /* USB device controllers that are not OTG compatible
+ * may not have DEVCTL register in silicon.
+ * In that case, do not rely on devctl for setting
+ * peripheral mode.
+ */
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+ musb->g.is_a_peripheral = 0;
+ } else if (devctl & MUSB_DEVCTL_BDEVICE) {
musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
musb->g.is_a_peripheral = 0;
} else {
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 6582a20bec05..ed455724017b 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -39,7 +39,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
@@ -2013,7 +2012,7 @@ static int musb_schedule(
head = &musb->out_bulk;
/* Enable bulk RX/TX NAK timeout scheme when bulk requests are
- * multiplexed. This scheme doen't work in high speed to full
+ * multiplexed. This scheme does not work in high speed to full
* speed scenario as NAK interrupts are not coming from a
* full speed device connected to a high speed device.
* NAK timeout interval is 8 (128 uframe or 16ms) for HS and
@@ -2433,6 +2432,8 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
struct musb *musb = hcd_to_musb(hcd);
u8 devctl;
+ musb_port_suspend(musb, true);
+
if (!is_host_active(musb))
return 0;
@@ -2462,7 +2463,12 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
static int musb_bus_resume(struct usb_hcd *hcd)
{
- /* resuming child port does the work */
+ struct musb *musb = hcd_to_musb(hcd);
+
+ if (musb->config &&
+ musb->config->host_port_deassert_reset_at_resume)
+ musb_port_reset(musb, false);
+
return 0;
}
@@ -2657,6 +2663,7 @@ int musb_host_setup(struct musb *musb, int power_budget)
if (ret < 0)
return ret;
+ device_wakeup_enable(hcd->self.controller);
return 0;
}
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 960d73570b2f..7bbf01bf4bb0 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -92,6 +92,9 @@ extern void musb_host_rx(struct musb *, u8);
extern void musb_root_disconnect(struct musb *musb);
extern void musb_host_resume_root_hub(struct musb *musb);
extern void musb_host_poke_root_hub(struct musb *musb);
+extern void musb_port_suspend(struct musb *musb, bool do_suspend);
+extern void musb_port_reset(struct musb *musb, bool do_reset);
+extern void musb_host_finish_resume(struct work_struct *work);
#else
static inline struct musb *hcd_to_musb(struct usb_hcd *hcd)
{
@@ -121,6 +124,9 @@ static inline void musb_root_disconnect(struct musb *musb) {}
static inline void musb_host_resume_root_hub(struct musb *musb) {}
static inline void musb_host_poll_rh_status(struct musb *musb) {}
static inline void musb_host_poke_root_hub(struct musb *musb) {}
+static inline void musb_port_suspend(struct musb *musb, bool do_suspend) {}
+static inline void musb_port_reset(struct musb *musb, bool do_reset) {}
+static inline void musb_host_finish_resume(struct work_struct *work) {}
#endif
struct usb_hcd;
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 9af6bba5eac9..eb634433ef09 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -36,7 +36,6 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/time.h>
#include <linux/timer.h>
@@ -44,7 +43,38 @@
#include "musb_core.h"
-static void musb_port_suspend(struct musb *musb, bool do_suspend)
+void musb_host_finish_resume(struct work_struct *work)
+{
+ struct musb *musb;
+ unsigned long flags;
+ u8 power;
+
+ musb = container_of(work, struct musb, finish_resume_work.work);
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ power = musb_readb(musb->mregs, MUSB_POWER);
+ power &= ~MUSB_POWER_RESUME;
+ dev_dbg(musb->controller, "root port resume stopped, power %02x\n",
+ power);
+ musb_writeb(musb->mregs, MUSB_POWER, power);
+
+ /*
+ * ISSUE: DaVinci (RTL 1.300) disconnects after
+ * resume of high speed peripherals (but not full
+ * speed ones).
+ */
+ musb->is_active = 1;
+ musb->port1_status &= ~(USB_PORT_STAT_SUSPEND | MUSB_PORT_STAT_RESUME);
+ musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
+ usb_hcd_poll_rh_status(musb->hcd);
+ /* NOTE: it might really be A_WAIT_BCON ... */
+ musb->xceiv->state = OTG_STATE_A_HOST;
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+void musb_port_suspend(struct musb *musb, bool do_suspend)
{
struct usb_otg *otg = musb->xceiv->otg;
u8 power;
@@ -105,11 +135,11 @@ static void musb_port_suspend(struct musb *musb, bool do_suspend)
/* later, GetPortStatus will stop RESUME signaling */
musb->port1_status |= MUSB_PORT_STAT_RESUME;
- musb->rh_timer = jiffies + msecs_to_jiffies(20);
+ schedule_delayed_work(&musb->finish_resume_work, 20);
}
}
-static void musb_port_reset(struct musb *musb, bool do_reset)
+void musb_port_reset(struct musb *musb, bool do_reset)
{
u8 power;
void __iomem *mbase = musb->mregs;
@@ -150,7 +180,7 @@ static void musb_port_reset(struct musb *musb, bool do_reset)
musb->port1_status |= USB_PORT_STAT_RESET;
musb->port1_status &= ~USB_PORT_STAT_ENABLE;
- musb->rh_timer = jiffies + msecs_to_jiffies(50);
+ schedule_delayed_work(&musb->deassert_reset_work, 50);
} else {
dev_dbg(musb->controller, "root port reset stopped\n");
musb_writeb(mbase, MUSB_POWER,
@@ -325,36 +355,6 @@ int musb_hub_control(
if (wIndex != 1)
goto error;
- /* finish RESET signaling? */
- if ((musb->port1_status & USB_PORT_STAT_RESET)
- && time_after_eq(jiffies, musb->rh_timer))
- musb_port_reset(musb, false);
-
- /* finish RESUME signaling? */
- if ((musb->port1_status & MUSB_PORT_STAT_RESUME)
- && time_after_eq(jiffies, musb->rh_timer)) {
- u8 power;
-
- power = musb_readb(musb->mregs, MUSB_POWER);
- power &= ~MUSB_POWER_RESUME;
- dev_dbg(musb->controller, "root port resume stopped, power %02x\n",
- power);
- musb_writeb(musb->mregs, MUSB_POWER, power);
-
- /* ISSUE: DaVinci (RTL 1.300) disconnects after
- * resume of high speed peripherals (but not full
- * speed ones).
- */
-
- musb->is_active = 1;
- musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
- | MUSB_PORT_STAT_RESUME);
- musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
- usb_hcd_poll_rh_status(musb->hcd);
- /* NOTE: it might really be A_WAIT_BCON ... */
- musb->xceiv->state = OTG_STATE_A_HOST;
- }
-
put_unaligned(cpu_to_le32(musb->port1_status
& ~MUSB_PORT_STAT_RESUME),
(__le32 *) buf);
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 4432314d70ee..4e9fb1d08698 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/prefetch.h>
#include <linux/usb.h>
#include <linux/irq.h>
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index b8794eb81e9c..e33b6b2c44c2 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/usb.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index 122446bf1664..c2e45e632723 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -21,7 +21,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index 3700e9713258..9aad00f11bd5 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -336,7 +336,9 @@ static int ux500_dma_controller_start(struct ux500_dma_controller *controller)
data ?
data->dma_filter :
NULL,
- param_array[ch_num]);
+ param_array ?
+ param_array[ch_num] :
+ NULL);
if (!ux500_channel->dma_chan) {
ERR("Dma pipe allocation error dir=%d ch=%d\n",
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 08e2f39027ec..7d1451d5bbea 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -6,6 +6,15 @@ menu "USB Physical Layer drivers"
config USB_PHY
def_bool n
+config USB_OTG_FSM
+ tristate "USB 2.0 OTG FSM implementation"
+ depends on USB
+ select USB_OTG
+ select USB_PHY
+ help
+ Implements OTG Final State Machine as specified in On-The-Go
+ and Embedded Host Supplement to the USB Revision 2.0 Specification.
+
#
# USB Transceiver Drivers
#
@@ -20,7 +29,7 @@ config AB8500_USB
config FSL_USB2_OTG
bool "Freescale USB OTG Transceiver Driver"
- depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME
+ depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM_RUNTIME
select USB_OTG
select USB_PHY
help
@@ -29,6 +38,7 @@ config FSL_USB2_OTG
config ISP1301_OMAP
tristate "Philips ISP1301 with OMAP OTG"
depends on I2C && ARCH_OMAP_OTG
+ depends on USB
select USB_PHY
help
If you say yes here you get support for the Philips ISP1301
@@ -38,7 +48,16 @@ config ISP1301_OMAP
Instruments OMAP processors.
This driver can also be built as a module. If so, the module
- will be called isp1301_omap.
+ will be called phy-isp1301-omap.
+
+config KEYSTONE_USB_PHY
+ tristate "Keystone USB PHY Driver"
+ depends on ARCH_KEYSTONE || COMPILE_TEST
+ select NOP_USB_XCEIV
+ help
+ Enable this to support Keystone USB phy. This driver provides
+ interface to interact with USB 2.0 and USB 3.0 PHY that is part
+ of the Keystone SOC.
config MV_U3D_PHY
bool "Marvell USB 3.0 PHY controller Driver"
@@ -134,6 +153,31 @@ config USB_GPIO_VBUS
optionally control of a D+ pullup GPIO as well as a VBUS
current limit regulator.
+config OMAP_OTG
+ tristate "OMAP USB OTG controller driver"
+ depends on ARCH_OMAP_OTG && EXTCON
+ help
+ Enable this to support some transceivers on OMAP1 platforms. OTG
+ controller is needed to switch between host and peripheral modes.
+
+ This driver can also be built as a module. If so, the module
+ will be called phy-omap-otg.
+
+config TAHVO_USB
+ tristate "Tahvo USB transceiver driver"
+ depends on MFD_RETU && EXTCON
+ select USB_PHY
+ help
+ Enable this to support USB transceiver on Tahvo. This is used
+ at least on Nokia 770.
+
+config TAHVO_USB_HOST_BY_DEFAULT
+ depends on TAHVO_USB
+ boolean "Device in USB host mode by default"
+ help
+ Say Y here, if you want the device to enter USB host mode
+ by default on bootup.
+
config USB_ISP1301
tristate "NXP ISP1301 USB transceiver support"
depends on USB || USB_GADGET
@@ -145,7 +189,7 @@ config USB_ISP1301
and OTG drivers (to be selected separately).
To compile this driver as a module, choose M here: the
- module will be called isp1301.
+ module will be called phy-isp1301.
config USB_MSM_OTG
tristate "OTG support for Qualcomm on-chip USB controller"
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index 022c1da7fb78..be58adae3496 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -3,18 +3,20 @@
#
obj-$(CONFIG_USB_PHY) += phy.o
obj-$(CONFIG_OF) += of.o
+obj-$(CONFIG_USB_OTG_FSM) += phy-fsm-usb.o
# transceiver drivers, keep the list sorted
obj-$(CONFIG_AB8500_USB) += phy-ab8500-usb.o
-phy-fsl-usb2-objs := phy-fsl-usb.o phy-fsm-usb.o
-obj-$(CONFIG_FSL_USB2_OTG) += phy-fsl-usb2.o
+obj-$(CONFIG_FSL_USB2_OTG) += phy-fsl-usb.o
obj-$(CONFIG_ISP1301_OMAP) += phy-isp1301-omap.o
obj-$(CONFIG_MV_U3D_PHY) += phy-mv-u3d-usb.o
obj-$(CONFIG_NOP_USB_XCEIV) += phy-generic.o
+obj-$(CONFIG_TAHVO_USB) += phy-tahvo.o
obj-$(CONFIG_OMAP_CONTROL_USB) += phy-omap-control.o
obj-$(CONFIG_AM335X_CONTROL_USB) += phy-am335x-control.o
obj-$(CONFIG_AM335X_PHY_USB) += phy-am335x.o
+obj-$(CONFIG_OMAP_OTG) += phy-omap-otg.o
obj-$(CONFIG_OMAP_USB3) += phy-omap-usb3.o
obj-$(CONFIG_SAMSUNG_USBPHY) += phy-samsung-usb.o
obj-$(CONFIG_SAMSUNG_USB2PHY) += phy-samsung-usb2.o
@@ -30,3 +32,4 @@ obj-$(CONFIG_USB_RCAR_PHY) += phy-rcar-usb.o
obj-$(CONFIG_USB_RCAR_GEN2_PHY) += phy-rcar-gen2-usb.o
obj-$(CONFIG_USB_ULPI) += phy-ulpi.o
obj-$(CONFIG_USB_ULPI_VIEWPORT) += phy-ulpi-viewport.o
+obj-$(CONFIG_KEYSTONE_USB_PHY) += phy-keystone.o
diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c
index 087402350b6d..11ab2c45e462 100644
--- a/drivers/usb/phy/phy-ab8500-usb.c
+++ b/drivers/usb/phy/phy-ab8500-usb.c
@@ -1415,8 +1415,6 @@ static int ab8500_usb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ab);
- ATOMIC_INIT_NOTIFIER_HEAD(&ab->phy.notifier);
-
/* all: Disable phy when called from set_host and set_peripheral */
INIT_WORK(&ab->phy_dis_work, ab8500_usb_phy_disable_work);
diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
index 634f49acd20e..d75196ad5f2f 100644
--- a/drivers/usb/phy/phy-am335x-control.c
+++ b/drivers/usb/phy/phy-am335x-control.c
@@ -3,11 +3,7 @@
#include <linux/err.h>
#include <linux/of.h>
#include <linux/io.h>
-
-struct phy_control {
- void (*phy_power)(struct phy_control *phy_ctrl, u32 id, bool on);
- void (*phy_wkup)(struct phy_control *phy_ctrl, u32 id, bool on);
-};
+#include "am35x-phy-control.h"
struct am335x_control_usb {
struct device *dev;
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index 0e3c60cb669a..12fc3468a01e 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -63,6 +63,19 @@ static int am335x_phy_probe(struct platform_device *pdev)
am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown;
platform_set_drvdata(pdev, am_phy);
+ device_init_wakeup(dev, true);
+
+ /*
+ * If we leave PHY wakeup enabled then AM33XX wakes up
+ * immediately from DS0. To avoid this we mark dev->power.can_wakeup
+ * to false. The same is checked in suspend routine to decide
+ * on whether to enable PHY wakeup or not.
+ * PHY wakeup works fine in standby mode, there by allowing us to
+ * handle remote wakeup, wakeup on disconnect and connect.
+ */
+
+ device_set_wakeup_enable(dev, false);
+ phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, false);
return 0;
}
@@ -75,38 +88,48 @@ static int am335x_phy_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_RUNTIME
-
-static int am335x_phy_runtime_suspend(struct device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int am335x_phy_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct am335x_phy *am_phy = platform_get_drvdata(pdev);
+ /*
+ * Enable phy wakeup only if dev->power.can_wakeup is true.
+ * Make sure to enable wakeup to support remote wakeup in
+ * standby mode ( same is not supported in OFF(DS0) mode).
+ * Enable it by doing
+ * echo enabled > /sys/bus/platform/devices/<usb-phy-id>/power/wakeup
+ */
+
if (device_may_wakeup(dev))
phy_ctrl_wkup(am_phy->phy_ctrl, am_phy->id, true);
+
phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, false);
+
return 0;
}
-static int am335x_phy_runtime_resume(struct device *dev)
+static int am335x_phy_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct am335x_phy *am_phy = platform_get_drvdata(pdev);
phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, true);
+
if (device_may_wakeup(dev))
phy_ctrl_wkup(am_phy->phy_ctrl, am_phy->id, false);
+
return 0;
}
static const struct dev_pm_ops am335x_pm_ops = {
- SET_RUNTIME_PM_OPS(am335x_phy_runtime_suspend,
- am335x_phy_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(am335x_phy_suspend, am335x_phy_resume)
};
-#define DEV_PM_OPS (&am335x_pm_ops)
+#define DEV_PM_OPS (&am335x_pm_ops)
#else
-#define DEV_PM_OPS NULL
+#define DEV_PM_OPS NULL
#endif
static const struct of_device_id am335x_phy_ids[] = {
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index 7f3c73b967ce..2b0f968d9325 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -27,7 +27,6 @@
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/timer.h>
@@ -848,7 +847,7 @@ static int fsl_otg_conf(struct platform_device *pdev)
pr_info("Couldn't init OTG timers\n");
goto err;
}
- spin_lock_init(&fsl_otg_tc->fsm.lock);
+ mutex_init(&fsl_otg_tc->fsm.lock);
/* Set OTG state machine operations */
fsl_otg_tc->fsm.ops = &fsl_otg_ops;
@@ -1017,10 +1016,9 @@ static int show_fsl_usb2_otg_state(struct device *dev,
struct otg_fsm *fsm = &fsl_otg_dev->fsm;
char *next = buf;
unsigned size = PAGE_SIZE;
- unsigned long flags;
int t;
- spin_lock_irqsave(&fsm->lock, flags);
+ mutex_lock(&fsm->lock);
/* basic driver infomation */
t = scnprintf(next, size,
@@ -1088,7 +1086,7 @@ static int show_fsl_usb2_otg_state(struct device *dev,
size -= t;
next += t;
- spin_unlock_irqrestore(&fsm->lock, flags);
+ mutex_unlock(&fsm->lock);
return PAGE_SIZE - size;
}
diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
index 7365170a2f23..5986c96354df 100644
--- a/drivers/usb/phy/phy-fsl-usb.h
+++ b/drivers/usb/phy/phy-fsl-usb.h
@@ -15,7 +15,7 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include "phy-fsm-usb.h"
+#include <linux/usb/otg-fsm.h>
#include <linux/usb/otg.h>
#include <linux/ioctl.h>
diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c
index 329c2d2f8595..7aa314ef4a8a 100644
--- a/drivers/usb/phy/phy-fsm-usb.c
+++ b/drivers/usb/phy/phy-fsm-usb.c
@@ -23,13 +23,12 @@
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/usb.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
-
-#include "phy-fsm-usb.h"
+#include <linux/usb/otg-fsm.h>
/* Change USB protocol when there is a protocol change */
static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
@@ -65,7 +64,7 @@ static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
static int state_changed;
/* Called when leaving a state. Do state clean up jobs here */
-void otg_leave_state(struct otg_fsm *fsm, enum usb_otg_state old_state)
+static void otg_leave_state(struct otg_fsm *fsm, enum usb_otg_state old_state)
{
switch (old_state) {
case OTG_STATE_B_IDLE:
@@ -122,7 +121,7 @@ void otg_leave_state(struct otg_fsm *fsm, enum usb_otg_state old_state)
}
/* Called when entering a state */
-int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
+static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
{
state_changed = 1;
if (fsm->otg->phy->state == new_state)
@@ -245,9 +244,8 @@ int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
int otg_statemachine(struct otg_fsm *fsm)
{
enum usb_otg_state state;
- unsigned long flags;
- spin_lock_irqsave(&fsm->lock, flags);
+ mutex_lock(&fsm->lock);
state = fsm->otg->phy->state;
state_changed = 0;
@@ -359,7 +357,7 @@ int otg_statemachine(struct otg_fsm *fsm)
default:
break;
}
- spin_unlock_irqrestore(&fsm->lock, flags);
+ mutex_unlock(&fsm->lock);
VDBG("quit statemachine, changed = %d\n", state_changed);
return state_changed;
diff --git a/drivers/usb/phy/phy-fsm-usb.h b/drivers/usb/phy/phy-fsm-usb.h
deleted file mode 100644
index 7441b46a27f1..000000000000
--- a/drivers/usb/phy/phy-fsm-usb.h
+++ /dev/null
@@ -1,236 +0,0 @@
-/* Copyright (C) 2007,2008 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#undef VERBOSE
-
-#ifdef VERBOSE
-#define VDBG(fmt, args...) pr_debug("[%s] " fmt , \
- __func__, ## args)
-#else
-#define VDBG(stuff...) do {} while (0)
-#endif
-
-#ifdef VERBOSE
-#define MPC_LOC printk("Current Location [%s]:[%d]\n", __FILE__, __LINE__)
-#else
-#define MPC_LOC do {} while (0)
-#endif
-
-#define PROTO_UNDEF (0)
-#define PROTO_HOST (1)
-#define PROTO_GADGET (2)
-
-enum otg_fsm_timer {
- /* Standard OTG timers */
- A_WAIT_VRISE,
- A_WAIT_VFALL,
- A_WAIT_BCON,
- A_AIDL_BDIS,
- B_ASE0_BRST,
- A_BIDL_ADIS,
-
- /* Auxiliary timers */
- B_SE0_SRP,
- B_SRP_FAIL,
- A_WAIT_ENUM,
-
- NUM_OTG_FSM_TIMERS,
-};
-
-/* OTG state machine according to the OTG spec */
-struct otg_fsm {
- /* Input */
- int id;
- int adp_change;
- int power_up;
- int test_device;
- int a_bus_drop;
- int a_bus_req;
- int a_srp_det;
- int a_vbus_vld;
- int b_conn;
- int a_bus_resume;
- int a_bus_suspend;
- int a_conn;
- int b_bus_req;
- int b_se0_srp;
- int b_ssend_srp;
- int b_sess_vld;
- /* Auxilary inputs */
- int a_sess_vld;
- int b_bus_resume;
- int b_bus_suspend;
-
- /* Output */
- int data_pulse;
- int drv_vbus;
- int loc_conn;
- int loc_sof;
- int adp_prb;
- int adp_sns;
-
- /* Internal variables */
- int a_set_b_hnp_en;
- int b_srp_done;
- int b_hnp_enable;
- int a_clr_err;
-
- /* Informative variables */
- int a_bus_drop_inf;
- int a_bus_req_inf;
- int a_clr_err_inf;
- int b_bus_req_inf;
- /* Auxilary informative variables */
- int a_suspend_req_inf;
-
- /* Timeout indicator for timers */
- int a_wait_vrise_tmout;
- int a_wait_vfall_tmout;
- int a_wait_bcon_tmout;
- int a_aidl_bdis_tmout;
- int b_ase0_brst_tmout;
- int a_bidl_adis_tmout;
-
- struct otg_fsm_ops *ops;
- struct usb_otg *otg;
-
- /* Current usb protocol used: 0:undefine; 1:host; 2:client */
- int protocol;
- spinlock_t lock;
-};
-
-struct otg_fsm_ops {
- void (*chrg_vbus)(struct otg_fsm *fsm, int on);
- void (*drv_vbus)(struct otg_fsm *fsm, int on);
- void (*loc_conn)(struct otg_fsm *fsm, int on);
- void (*loc_sof)(struct otg_fsm *fsm, int on);
- void (*start_pulse)(struct otg_fsm *fsm);
- void (*start_adp_prb)(struct otg_fsm *fsm);
- void (*start_adp_sns)(struct otg_fsm *fsm);
- void (*add_timer)(struct otg_fsm *fsm, enum otg_fsm_timer timer);
- void (*del_timer)(struct otg_fsm *fsm, enum otg_fsm_timer timer);
- int (*start_host)(struct otg_fsm *fsm, int on);
- int (*start_gadget)(struct otg_fsm *fsm, int on);
-};
-
-
-static inline int otg_chrg_vbus(struct otg_fsm *fsm, int on)
-{
- if (!fsm->ops->chrg_vbus)
- return -EOPNOTSUPP;
- fsm->ops->chrg_vbus(fsm, on);
- return 0;
-}
-
-static inline int otg_drv_vbus(struct otg_fsm *fsm, int on)
-{
- if (!fsm->ops->drv_vbus)
- return -EOPNOTSUPP;
- if (fsm->drv_vbus != on) {
- fsm->drv_vbus = on;
- fsm->ops->drv_vbus(fsm, on);
- }
- return 0;
-}
-
-static inline int otg_loc_conn(struct otg_fsm *fsm, int on)
-{
- if (!fsm->ops->loc_conn)
- return -EOPNOTSUPP;
- if (fsm->loc_conn != on) {
- fsm->loc_conn = on;
- fsm->ops->loc_conn(fsm, on);
- }
- return 0;
-}
-
-static inline int otg_loc_sof(struct otg_fsm *fsm, int on)
-{
- if (!fsm->ops->loc_sof)
- return -EOPNOTSUPP;
- if (fsm->loc_sof != on) {
- fsm->loc_sof = on;
- fsm->ops->loc_sof(fsm, on);
- }
- return 0;
-}
-
-static inline int otg_start_pulse(struct otg_fsm *fsm)
-{
- if (!fsm->ops->start_pulse)
- return -EOPNOTSUPP;
- if (!fsm->data_pulse) {
- fsm->data_pulse = 1;
- fsm->ops->start_pulse(fsm);
- }
- return 0;
-}
-
-static inline int otg_start_adp_prb(struct otg_fsm *fsm)
-{
- if (!fsm->ops->start_adp_prb)
- return -EOPNOTSUPP;
- if (!fsm->adp_prb) {
- fsm->adp_sns = 0;
- fsm->adp_prb = 1;
- fsm->ops->start_adp_prb(fsm);
- }
- return 0;
-}
-
-static inline int otg_start_adp_sns(struct otg_fsm *fsm)
-{
- if (!fsm->ops->start_adp_sns)
- return -EOPNOTSUPP;
- if (!fsm->adp_sns) {
- fsm->adp_sns = 1;
- fsm->ops->start_adp_sns(fsm);
- }
- return 0;
-}
-
-static inline int otg_add_timer(struct otg_fsm *fsm, enum otg_fsm_timer timer)
-{
- if (!fsm->ops->add_timer)
- return -EOPNOTSUPP;
- fsm->ops->add_timer(fsm, timer);
- return 0;
-}
-
-static inline int otg_del_timer(struct otg_fsm *fsm, enum otg_fsm_timer timer)
-{
- if (!fsm->ops->del_timer)
- return -EOPNOTSUPP;
- fsm->ops->del_timer(fsm, timer);
- return 0;
-}
-
-static inline int otg_start_host(struct otg_fsm *fsm, int on)
-{
- if (!fsm->ops->start_host)
- return -EOPNOTSUPP;
- return fsm->ops->start_host(fsm, on);
-}
-
-static inline int otg_start_gadget(struct otg_fsm *fsm, int on)
-{
- if (!fsm->ops->start_gadget)
- return -EOPNOTSUPP;
- return fsm->ops->start_gadget(fsm, on);
-}
-
-int otg_statemachine(struct otg_fsm *fsm);
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index aa6d37b3378a..bb394980532b 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -241,7 +241,6 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop,
nop->phy.otg->set_host = nop_set_host;
nop->phy.otg->set_peripheral = nop_set_peripheral;
- ATOMIC_INIT_NOTIFIER_HEAD(&nop->phy.notifier);
return 0;
}
EXPORT_SYMBOL_GPL(usb_phy_gen_create_phy);
diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c
index 02799a5efcd4..69462e09d014 100644
--- a/drivers/usb/phy/phy-gpio-vbus-usb.c
+++ b/drivers/usb/phy/phy-gpio-vbus-usb.c
@@ -314,8 +314,6 @@ static int gpio_vbus_probe(struct platform_device *pdev)
goto err_irq;
}
- ATOMIC_INIT_NOTIFIER_HEAD(&gpio_vbus->phy.notifier);
-
INIT_DELAYED_WORK(&gpio_vbus->work, gpio_vbus_work);
gpio_vbus->vbus_draw = regulator_get(&pdev->dev, "vbus_draw");
diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
index d3a5160e4cc7..6e146d723b37 100644
--- a/drivers/usb/phy/phy-isp1301-omap.c
+++ b/drivers/usb/phy/phy-isp1301-omap.c
@@ -1277,7 +1277,7 @@ isp1301_set_host(struct usb_otg *otg, struct usb_bus *host)
{
struct isp1301 *isp = container_of(otg->phy, struct isp1301, phy);
- if (!otg || isp != the_transceiver)
+ if (isp != the_transceiver)
return -ENODEV;
if (!host) {
@@ -1333,7 +1333,7 @@ isp1301_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget)
{
struct isp1301 *isp = container_of(otg->phy, struct isp1301, phy);
- if (!otg || isp != the_transceiver)
+ if (isp != the_transceiver)
return -ENODEV;
if (!gadget) {
@@ -1414,8 +1414,7 @@ isp1301_start_srp(struct usb_otg *otg)
struct isp1301 *isp = container_of(otg->phy, struct isp1301, phy);
u32 otg_ctrl;
- if (!otg || isp != the_transceiver
- || isp->phy.state != OTG_STATE_B_IDLE)
+ if (isp != the_transceiver || isp->phy.state != OTG_STATE_B_IDLE)
return -ENODEV;
otg_ctrl = omap_readl(OTG_CTRL);
@@ -1442,7 +1441,7 @@ isp1301_start_hnp(struct usb_otg *otg)
struct isp1301 *isp = container_of(otg->phy, struct isp1301, phy);
u32 l;
- if (!otg || isp != the_transceiver)
+ if (isp != the_transceiver)
return -ENODEV;
if (otg->default_a && (otg->host == NULL || !otg->host->b_hnp_enable))
return -ENOTCONN;
diff --git a/drivers/usb/phy/phy-keystone.c b/drivers/usb/phy/phy-keystone.c
new file mode 100644
index 000000000000..d762003896c0
--- /dev/null
+++ b/drivers/usb/phy/phy-keystone.c
@@ -0,0 +1,136 @@
+/*
+ * phy-keystone - USB PHY, talking to dwc3 controller in Keystone.
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Author: WingMan Kwok <w-kwok2@ti.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/usb/usb_phy_gen_xceiv.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#include "phy-generic.h"
+
+/* USB PHY control register offsets */
+#define USB_PHY_CTL_UTMI 0x0000
+#define USB_PHY_CTL_PIPE 0x0004
+#define USB_PHY_CTL_PARAM_1 0x0008
+#define USB_PHY_CTL_PARAM_2 0x000c
+#define USB_PHY_CTL_CLOCK 0x0010
+#define USB_PHY_CTL_PLL 0x0014
+
+#define PHY_REF_SSP_EN BIT(29)
+
+struct keystone_usbphy {
+ struct usb_phy_gen_xceiv usb_phy_gen;
+ void __iomem *phy_ctrl;
+};
+
+static inline u32 keystone_usbphy_readl(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+static inline void keystone_usbphy_writel(void __iomem *base,
+ u32 offset, u32 value)
+{
+ writel(value, base + offset);
+}
+
+static int keystone_usbphy_init(struct usb_phy *phy)
+{
+ struct keystone_usbphy *k_phy = dev_get_drvdata(phy->dev);
+ u32 val;
+
+ val = keystone_usbphy_readl(k_phy->phy_ctrl, USB_PHY_CTL_CLOCK);
+ keystone_usbphy_writel(k_phy->phy_ctrl, USB_PHY_CTL_CLOCK,
+ val | PHY_REF_SSP_EN);
+ return 0;
+}
+
+static void keystone_usbphy_shutdown(struct usb_phy *phy)
+{
+ struct keystone_usbphy *k_phy = dev_get_drvdata(phy->dev);
+ u32 val;
+
+ val = keystone_usbphy_readl(k_phy->phy_ctrl, USB_PHY_CTL_CLOCK);
+ keystone_usbphy_writel(k_phy->phy_ctrl, USB_PHY_CTL_CLOCK,
+ val &= ~PHY_REF_SSP_EN);
+}
+
+static int keystone_usbphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct keystone_usbphy *k_phy;
+ struct resource *res;
+ int ret;
+
+ k_phy = devm_kzalloc(dev, sizeof(*k_phy), GFP_KERNEL);
+ if (!k_phy)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ k_phy->phy_ctrl = devm_ioremap_resource(dev, res);
+ if (IS_ERR(k_phy->phy_ctrl))
+ return PTR_ERR(k_phy->phy_ctrl);
+
+ ret = usb_phy_gen_create_phy(dev, &k_phy->usb_phy_gen, NULL);
+ if (ret)
+ return ret;
+
+ k_phy->usb_phy_gen.phy.init = keystone_usbphy_init;
+ k_phy->usb_phy_gen.phy.shutdown = keystone_usbphy_shutdown;
+
+ platform_set_drvdata(pdev, k_phy);
+
+ ret = usb_add_phy_dev(&k_phy->usb_phy_gen.phy);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int keystone_usbphy_remove(struct platform_device *pdev)
+{
+ struct keystone_usbphy *k_phy = platform_get_drvdata(pdev);
+
+ usb_remove_phy(&k_phy->usb_phy_gen.phy);
+
+ return 0;
+}
+
+static const struct of_device_id keystone_usbphy_ids[] = {
+ { .compatible = "ti,keystone-usbphy" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, keystone_usbphy_ids);
+
+static struct platform_driver keystone_usbphy_driver = {
+ .probe = keystone_usbphy_probe,
+ .remove = keystone_usbphy_remove,
+ .driver = {
+ .name = "keystone-usbphy",
+ .owner = THIS_MODULE,
+ .of_match_table = keystone_usbphy_ids,
+ },
+};
+
+module_platform_driver(keystone_usbphy_driver);
+
+MODULE_ALIAS("platform:keystone-usbphy");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("Keystone USB phy driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index e9d4cd960ecd..8546c8dccd51 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -40,8 +40,6 @@
#include <linux/usb/msm_hsusb_hw.h>
#include <linux/regulator/consumer.h>
-#include <mach/clk.h>
-
#define MSM_USB_BASE (motg->regs)
#define DRIVER_NAME "msm_otg"
@@ -308,33 +306,30 @@ static void ulpi_init(struct msm_otg *motg)
static int msm_otg_link_clk_reset(struct msm_otg *motg, bool assert)
{
- int ret;
+ int ret = 0;
+
+ if (!motg->pdata->link_clk_reset)
+ return ret;
+
+ ret = motg->pdata->link_clk_reset(motg->clk, assert);
+ if (ret)
+ dev_err(motg->phy.dev, "usb link clk reset %s failed\n",
+ assert ? "assert" : "deassert");
- if (assert) {
- ret = clk_reset(motg->clk, CLK_RESET_ASSERT);
- if (ret)
- dev_err(motg->phy.dev, "usb hs_clk assert failed\n");
- } else {
- ret = clk_reset(motg->clk, CLK_RESET_DEASSERT);
- if (ret)
- dev_err(motg->phy.dev, "usb hs_clk deassert failed\n");
- }
return ret;
}
static int msm_otg_phy_clk_reset(struct msm_otg *motg)
{
- int ret;
+ int ret = 0;
- ret = clk_reset(motg->phy_reset_clk, CLK_RESET_ASSERT);
- if (ret) {
- dev_err(motg->phy.dev, "usb phy clk assert failed\n");
+ if (!motg->pdata->phy_clk_reset)
return ret;
- }
- usleep_range(10000, 12000);
- ret = clk_reset(motg->phy_reset_clk, CLK_RESET_DEASSERT);
+
+ ret = motg->pdata->phy_clk_reset(motg->phy_reset_clk);
if (ret)
- dev_err(motg->phy.dev, "usb phy clk deassert failed\n");
+ dev_err(motg->phy.dev, "usb phy clk reset failed\n");
+
return ret;
}
@@ -669,6 +664,7 @@ static void msm_otg_start_host(struct usb_phy *phy, int on)
pdata->setup_gpio(OTG_STATE_A_HOST);
#ifdef CONFIG_USB
usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
+ device_wakeup_enable(hcd->self.controller);
#endif
} else {
dev_dbg(phy->dev, "host off\n");
diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c
index 98f6ac6a78ea..7d80c54f0ac6 100644
--- a/drivers/usb/phy/phy-mv-usb.c
+++ b/drivers/usb/phy/phy-mv-usb.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/device.h>
@@ -213,10 +212,12 @@ static void mv_otg_start_host(struct mv_otg *mvotg, int on)
hcd = bus_to_hcd(otg->host);
- if (on)
+ if (on) {
usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
- else
+ device_wakeup_enable(hcd->self.controller);
+ } else {
usb_remove_hcd(hcd);
+ }
#endif /* CONFIG_USB */
}
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 545844b7e796..b42897b6474c 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -63,9 +63,13 @@ static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
static int mxs_phy_init(struct usb_phy *phy)
{
+ int ret;
struct mxs_phy *mxs_phy = to_mxs_phy(phy);
- clk_prepare_enable(mxs_phy->clk);
+ ret = clk_prepare_enable(mxs_phy->clk);
+ if (ret)
+ return ret;
+
return mxs_phy_hw_init(mxs_phy);
}
@@ -81,6 +85,7 @@ static void mxs_phy_shutdown(struct usb_phy *phy)
static int mxs_phy_suspend(struct usb_phy *x, int suspend)
{
+ int ret;
struct mxs_phy *mxs_phy = to_mxs_phy(x);
if (suspend) {
@@ -89,7 +94,9 @@ static int mxs_phy_suspend(struct usb_phy *x, int suspend)
x->io_priv + HW_USBPHY_CTRL_SET);
clk_disable_unprepare(mxs_phy->clk);
} else {
- clk_prepare_enable(mxs_phy->clk);
+ ret = clk_prepare_enable(mxs_phy->clk);
+ if (ret)
+ return ret;
writel(BM_USBPHY_CTRL_CLKGATE,
x->io_priv + HW_USBPHY_CTRL_CLR);
writel(0, x->io_priv + HW_USBPHY_PWD);
@@ -160,8 +167,6 @@ static int mxs_phy_probe(struct platform_device *pdev)
mxs_phy->phy.notify_disconnect = mxs_phy_on_disconnect;
mxs_phy->phy.type = USB_PHY_TYPE_USB2;
- ATOMIC_INIT_NOTIFIER_HEAD(&mxs_phy->phy.notifier);
-
mxs_phy->clk = clk;
platform_set_drvdata(pdev, mxs_phy);
diff --git a/drivers/usb/phy/phy-omap-control.c b/drivers/usb/phy/phy-omap-control.c
index 09c5ace1edd8..e7253182e47d 100644
--- a/drivers/usb/phy/phy-omap-control.c
+++ b/drivers/usb/phy/phy-omap-control.c
@@ -84,6 +84,20 @@ void omap_control_usb_phy_power(struct device *dev, int on)
else
val |= OMAP_CTRL_USB2_PHY_PD;
break;
+
+ case OMAP_CTRL_TYPE_AM437USB2:
+ if (on) {
+ val &= ~(AM437X_CTRL_USB2_PHY_PD |
+ AM437X_CTRL_USB2_OTG_PD);
+ val |= (AM437X_CTRL_USB2_OTGVDET_EN |
+ AM437X_CTRL_USB2_OTGSESSEND_EN);
+ } else {
+ val &= ~(AM437X_CTRL_USB2_OTGVDET_EN |
+ AM437X_CTRL_USB2_OTGSESSEND_EN);
+ val |= (AM437X_CTRL_USB2_PHY_PD |
+ AM437X_CTRL_USB2_OTG_PD);
+ }
+ break;
default:
dev_err(dev, "%s: type %d not recognized\n",
__func__, control_usb->type);
@@ -197,6 +211,7 @@ static const enum omap_control_usb_type otghs_data = OMAP_CTRL_TYPE_OTGHS;
static const enum omap_control_usb_type usb2_data = OMAP_CTRL_TYPE_USB2;
static const enum omap_control_usb_type pipe3_data = OMAP_CTRL_TYPE_PIPE3;
static const enum omap_control_usb_type dra7usb2_data = OMAP_CTRL_TYPE_DRA7USB2;
+static const enum omap_control_usb_type am437usb2_data = OMAP_CTRL_TYPE_AM437USB2;
static const struct of_device_id omap_control_usb_id_table[] = {
{
@@ -215,6 +230,10 @@ static const struct of_device_id omap_control_usb_id_table[] = {
.compatible = "ti,control-phy-dra7usb2",
.data = &dra7usb2_data,
},
+ {
+ .compatible = "ti,control-phy-am437usb2",
+ .data = &am437usb2_data,
+ },
{},
};
MODULE_DEVICE_TABLE(of, omap_control_usb_id_table);
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c
new file mode 100644
index 000000000000..11598cdb3189
--- /dev/null
+++ b/drivers/usb/phy/phy-omap-otg.c
@@ -0,0 +1,169 @@
+/*
+ * OMAP OTG controller driver
+ *
+ * Based on code from tahvo-usb.c and isp1301_omap.c drivers.
+ *
+ * Copyright (C) 2005-2006 Nokia Corporation
+ * Copyright (C) 2004 Texas Instruments
+ * Copyright (C) 2004 David Brownell
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/extcon.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/usb-omap1.h>
+
+struct otg_device {
+ void __iomem *base;
+ bool id;
+ bool vbus;
+ struct extcon_specific_cable_nb vbus_dev;
+ struct extcon_specific_cable_nb id_dev;
+ struct notifier_block vbus_nb;
+ struct notifier_block id_nb;
+};
+
+#define OMAP_OTG_CTRL 0x0c
+#define OMAP_OTG_ASESSVLD (1 << 20)
+#define OMAP_OTG_BSESSEND (1 << 19)
+#define OMAP_OTG_BSESSVLD (1 << 18)
+#define OMAP_OTG_VBUSVLD (1 << 17)
+#define OMAP_OTG_ID (1 << 16)
+#define OMAP_OTG_XCEIV_OUTPUTS \
+ (OMAP_OTG_ASESSVLD | OMAP_OTG_BSESSEND | OMAP_OTG_BSESSVLD | \
+ OMAP_OTG_VBUSVLD | OMAP_OTG_ID)
+
+static void omap_otg_ctrl(struct otg_device *otg_dev, u32 outputs)
+{
+ u32 l;
+
+ l = readl(otg_dev->base + OMAP_OTG_CTRL);
+ l &= ~OMAP_OTG_XCEIV_OUTPUTS;
+ l |= outputs;
+ writel(l, otg_dev->base + OMAP_OTG_CTRL);
+}
+
+static void omap_otg_set_mode(struct otg_device *otg_dev)
+{
+ if (!otg_dev->id && otg_dev->vbus)
+ /* Set B-session valid. */
+ omap_otg_ctrl(otg_dev, OMAP_OTG_ID | OMAP_OTG_BSESSVLD);
+ else if (otg_dev->vbus)
+ /* Set A-session valid. */
+ omap_otg_ctrl(otg_dev, OMAP_OTG_ASESSVLD);
+ else if (!otg_dev->id)
+ /* Set B-session end to indicate no VBUS. */
+ omap_otg_ctrl(otg_dev, OMAP_OTG_ID | OMAP_OTG_BSESSEND);
+}
+
+static int omap_otg_id_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct otg_device *otg_dev = container_of(nb, struct otg_device, id_nb);
+
+ otg_dev->id = event;
+ omap_otg_set_mode(otg_dev);
+
+ return NOTIFY_DONE;
+}
+
+static int omap_otg_vbus_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct otg_device *otg_dev = container_of(nb, struct otg_device,
+ vbus_nb);
+
+ otg_dev->vbus = event;
+ omap_otg_set_mode(otg_dev);
+
+ return NOTIFY_DONE;
+}
+
+static int omap_otg_probe(struct platform_device *pdev)
+{
+ const struct omap_usb_config *config = pdev->dev.platform_data;
+ struct otg_device *otg_dev;
+ struct extcon_dev *extcon;
+ int ret;
+ u32 rev;
+
+ if (!config || !config->extcon)
+ return -ENODEV;
+
+ extcon = extcon_get_extcon_dev(config->extcon);
+ if (!extcon)
+ return -EPROBE_DEFER;
+
+ otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL);
+ if (!otg_dev)
+ return -ENOMEM;
+
+ otg_dev->base = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]);
+ if (IS_ERR(otg_dev->base))
+ return PTR_ERR(otg_dev->base);
+
+ otg_dev->id_nb.notifier_call = omap_otg_id_notifier;
+ otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier;
+
+ ret = extcon_register_interest(&otg_dev->id_dev, config->extcon,
+ "USB-HOST", &otg_dev->id_nb);
+ if (ret)
+ return ret;
+
+ ret = extcon_register_interest(&otg_dev->vbus_dev, config->extcon,
+ "USB", &otg_dev->vbus_nb);
+ if (ret) {
+ extcon_unregister_interest(&otg_dev->id_dev);
+ return ret;
+ }
+
+ otg_dev->id = extcon_get_cable_state(extcon, "USB-HOST");
+ otg_dev->vbus = extcon_get_cable_state(extcon, "USB");
+ omap_otg_set_mode(otg_dev);
+
+ rev = readl(otg_dev->base);
+
+ dev_info(&pdev->dev,
+ "OMAP USB OTG controller rev %d.%d (%s, id=%d, vbus=%d)\n",
+ (rev >> 4) & 0xf, rev & 0xf, config->extcon, otg_dev->id,
+ otg_dev->vbus);
+
+ return 0;
+}
+
+static int omap_otg_remove(struct platform_device *pdev)
+{
+ struct otg_device *otg_dev = platform_get_drvdata(pdev);
+
+ extcon_unregister_interest(&otg_dev->id_dev);
+ extcon_unregister_interest(&otg_dev->vbus_dev);
+
+ return 0;
+}
+
+static struct platform_driver omap_otg_driver = {
+ .probe = omap_otg_probe,
+ .remove = omap_otg_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "omap_otg",
+ },
+};
+module_platform_driver(omap_otg_driver);
+
+MODULE_DESCRIPTION("OMAP USB OTG controller driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
diff --git a/drivers/usb/phy/phy-rcar-gen2-usb.c b/drivers/usb/phy/phy-rcar-gen2-usb.c
index db3ab34cddb4..551e0a6c0e22 100644
--- a/drivers/usb/phy/phy-rcar-gen2-usb.c
+++ b/drivers/usb/phy/phy-rcar-gen2-usb.c
@@ -213,7 +213,7 @@ static int rcar_gen2_usb_phy_probe(struct platform_device *pdev)
priv->phy.shutdown = rcar_gen2_usb_phy_shutdown;
priv->phy.set_suspend = rcar_gen2_usb_phy_set_suspend;
- retval = usb_add_phy(&priv->phy, USB_PHY_TYPE_USB2);
+ retval = usb_add_phy_dev(&priv->phy);
if (retval < 0) {
dev_err(dev, "Failed to add USB phy\n");
return retval;
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
new file mode 100644
index 000000000000..cc61ee44b911
--- /dev/null
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -0,0 +1,457 @@
+/*
+ * Tahvo USB transceiver driver
+ *
+ * Copyright (C) 2005-2006 Nokia Corporation
+ *
+ * Parts copied from isp1301_omap.c.
+ * Copyright (C) 2004 Texas Instruments
+ * Copyright (C) 2004 David Brownell
+ *
+ * Original driver written by Juha Yrjölä, Tony Lindgren and Timo Teräs.
+ * Modified for Retu/Tahvo MFD by Aaro Koskinen.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/usb.h>
+#include <linux/extcon.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb/otg.h>
+#include <linux/mfd/retu.h>
+#include <linux/usb/gadget.h>
+#include <linux/platform_device.h>
+
+#define DRIVER_NAME "tahvo-usb"
+
+#define TAHVO_REG_IDSR 0x02
+#define TAHVO_REG_USBR 0x06
+
+#define USBR_SLAVE_CONTROL (1 << 8)
+#define USBR_VPPVIO_SW (1 << 7)
+#define USBR_SPEED (1 << 6)
+#define USBR_REGOUT (1 << 5)
+#define USBR_MASTER_SW2 (1 << 4)
+#define USBR_MASTER_SW1 (1 << 3)
+#define USBR_SLAVE_SW (1 << 2)
+#define USBR_NSUSPEND (1 << 1)
+#define USBR_SEMODE (1 << 0)
+
+#define TAHVO_MODE_HOST 0
+#define TAHVO_MODE_PERIPHERAL 1
+
+struct tahvo_usb {
+ struct platform_device *pt_dev;
+ struct usb_phy phy;
+ int vbus_state;
+ struct mutex serialize;
+ struct clk *ick;
+ int irq;
+ int tahvo_mode;
+ struct extcon_dev extcon;
+};
+
+static const char *tahvo_cable[] = {
+ "USB-HOST",
+ "USB",
+ NULL,
+};
+
+static ssize_t vbus_state_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct tahvo_usb *tu = dev_get_drvdata(device);
+ return sprintf(buf, "%s\n", tu->vbus_state ? "on" : "off");
+}
+static DEVICE_ATTR(vbus, 0444, vbus_state_show, NULL);
+
+static void check_vbus_state(struct tahvo_usb *tu)
+{
+ struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
+ int reg, prev_state;
+
+ reg = retu_read(rdev, TAHVO_REG_IDSR);
+ if (reg & TAHVO_STAT_VBUS) {
+ switch (tu->phy.state) {
+ case OTG_STATE_B_IDLE:
+ /* Enable the gadget driver */
+ if (tu->phy.otg->gadget)
+ usb_gadget_vbus_connect(tu->phy.otg->gadget);
+ tu->phy.state = OTG_STATE_B_PERIPHERAL;
+ break;
+ case OTG_STATE_A_IDLE:
+ /*
+ * Session is now valid assuming the USB hub is driving
+ * Vbus.
+ */
+ tu->phy.state = OTG_STATE_A_HOST;
+ break;
+ default:
+ break;
+ }
+ dev_info(&tu->pt_dev->dev, "USB cable connected\n");
+ } else {
+ switch (tu->phy.state) {
+ case OTG_STATE_B_PERIPHERAL:
+ if (tu->phy.otg->gadget)
+ usb_gadget_vbus_disconnect(tu->phy.otg->gadget);
+ tu->phy.state = OTG_STATE_B_IDLE;
+ break;
+ case OTG_STATE_A_HOST:
+ tu->phy.state = OTG_STATE_A_IDLE;
+ break;
+ default:
+ break;
+ }
+ dev_info(&tu->pt_dev->dev, "USB cable disconnected\n");
+ }
+
+ prev_state = tu->vbus_state;
+ tu->vbus_state = reg & TAHVO_STAT_VBUS;
+ if (prev_state != tu->vbus_state) {
+ extcon_set_cable_state(&tu->extcon, "USB", tu->vbus_state);
+ sysfs_notify(&tu->pt_dev->dev.kobj, NULL, "vbus_state");
+ }
+}
+
+static void tahvo_usb_become_host(struct tahvo_usb *tu)
+{
+ struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
+
+ extcon_set_cable_state(&tu->extcon, "USB-HOST", true);
+
+ /* Power up the transceiver in USB host mode */
+ retu_write(rdev, TAHVO_REG_USBR, USBR_REGOUT | USBR_NSUSPEND |
+ USBR_MASTER_SW2 | USBR_MASTER_SW1);
+ tu->phy.state = OTG_STATE_A_IDLE;
+
+ check_vbus_state(tu);
+}
+
+static void tahvo_usb_stop_host(struct tahvo_usb *tu)
+{
+ tu->phy.state = OTG_STATE_A_IDLE;
+}
+
+static void tahvo_usb_become_peripheral(struct tahvo_usb *tu)
+{
+ struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
+
+ extcon_set_cable_state(&tu->extcon, "USB-HOST", false);
+
+ /* Power up transceiver and set it in USB peripheral mode */
+ retu_write(rdev, TAHVO_REG_USBR, USBR_SLAVE_CONTROL | USBR_REGOUT |
+ USBR_NSUSPEND | USBR_SLAVE_SW);
+ tu->phy.state = OTG_STATE_B_IDLE;
+
+ check_vbus_state(tu);
+}
+
+static void tahvo_usb_stop_peripheral(struct tahvo_usb *tu)
+{
+ if (tu->phy.otg->gadget)
+ usb_gadget_vbus_disconnect(tu->phy.otg->gadget);
+ tu->phy.state = OTG_STATE_B_IDLE;
+}
+
+static void tahvo_usb_power_off(struct tahvo_usb *tu)
+{
+ struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
+
+ /* Disable gadget controller if any */
+ if (tu->phy.otg->gadget)
+ usb_gadget_vbus_disconnect(tu->phy.otg->gadget);
+
+ /* Power off transceiver */
+ retu_write(rdev, TAHVO_REG_USBR, 0);
+ tu->phy.state = OTG_STATE_UNDEFINED;
+}
+
+static int tahvo_usb_set_suspend(struct usb_phy *dev, int suspend)
+{
+ struct tahvo_usb *tu = container_of(dev, struct tahvo_usb, phy);
+ struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
+ u16 w;
+
+ dev_dbg(&tu->pt_dev->dev, "%s\n", __func__);
+
+ w = retu_read(rdev, TAHVO_REG_USBR);
+ if (suspend)
+ w &= ~USBR_NSUSPEND;
+ else
+ w |= USBR_NSUSPEND;
+ retu_write(rdev, TAHVO_REG_USBR, w);
+
+ return 0;
+}
+
+static int tahvo_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
+{
+ struct tahvo_usb *tu = container_of(otg->phy, struct tahvo_usb, phy);
+
+ dev_dbg(&tu->pt_dev->dev, "%s %p\n", __func__, host);
+
+ mutex_lock(&tu->serialize);
+
+ if (host == NULL) {
+ if (tu->tahvo_mode == TAHVO_MODE_HOST)
+ tahvo_usb_power_off(tu);
+ otg->host = NULL;
+ mutex_unlock(&tu->serialize);
+ return 0;
+ }
+
+ if (tu->tahvo_mode == TAHVO_MODE_HOST) {
+ otg->host = NULL;
+ tahvo_usb_become_host(tu);
+ }
+
+ otg->host = host;
+
+ mutex_unlock(&tu->serialize);
+
+ return 0;
+}
+
+static int tahvo_usb_set_peripheral(struct usb_otg *otg,
+ struct usb_gadget *gadget)
+{
+ struct tahvo_usb *tu = container_of(otg->phy, struct tahvo_usb, phy);
+
+ dev_dbg(&tu->pt_dev->dev, "%s %p\n", __func__, gadget);
+
+ mutex_lock(&tu->serialize);
+
+ if (!gadget) {
+ if (tu->tahvo_mode == TAHVO_MODE_PERIPHERAL)
+ tahvo_usb_power_off(tu);
+ tu->phy.otg->gadget = NULL;
+ mutex_unlock(&tu->serialize);
+ return 0;
+ }
+
+ tu->phy.otg->gadget = gadget;
+ if (tu->tahvo_mode == TAHVO_MODE_PERIPHERAL)
+ tahvo_usb_become_peripheral(tu);
+
+ mutex_unlock(&tu->serialize);
+
+ return 0;
+}
+
+static irqreturn_t tahvo_usb_vbus_interrupt(int irq, void *_tu)
+{
+ struct tahvo_usb *tu = _tu;
+
+ mutex_lock(&tu->serialize);
+ check_vbus_state(tu);
+ mutex_unlock(&tu->serialize);
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t otg_mode_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct tahvo_usb *tu = dev_get_drvdata(device);
+
+ switch (tu->tahvo_mode) {
+ case TAHVO_MODE_HOST:
+ return sprintf(buf, "host\n");
+ case TAHVO_MODE_PERIPHERAL:
+ return sprintf(buf, "peripheral\n");
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t otg_mode_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tahvo_usb *tu = dev_get_drvdata(device);
+ int r;
+
+ mutex_lock(&tu->serialize);
+ if (count >= 4 && strncmp(buf, "host", 4) == 0) {
+ if (tu->tahvo_mode == TAHVO_MODE_PERIPHERAL)
+ tahvo_usb_stop_peripheral(tu);
+ tu->tahvo_mode = TAHVO_MODE_HOST;
+ if (tu->phy.otg->host) {
+ dev_info(device, "HOST mode: host controller present\n");
+ tahvo_usb_become_host(tu);
+ } else {
+ dev_info(device, "HOST mode: no host controller, powering off\n");
+ tahvo_usb_power_off(tu);
+ }
+ r = strlen(buf);
+ } else if (count >= 10 && strncmp(buf, "peripheral", 10) == 0) {
+ if (tu->tahvo_mode == TAHVO_MODE_HOST)
+ tahvo_usb_stop_host(tu);
+ tu->tahvo_mode = TAHVO_MODE_PERIPHERAL;
+ if (tu->phy.otg->gadget) {
+ dev_info(device, "PERIPHERAL mode: gadget driver present\n");
+ tahvo_usb_become_peripheral(tu);
+ } else {
+ dev_info(device, "PERIPHERAL mode: no gadget driver, powering off\n");
+ tahvo_usb_power_off(tu);
+ }
+ r = strlen(buf);
+ } else {
+ r = -EINVAL;
+ }
+ mutex_unlock(&tu->serialize);
+
+ return r;
+}
+static DEVICE_ATTR(otg_mode, 0644, otg_mode_show, otg_mode_store);
+
+static struct attribute *tahvo_attributes[] = {
+ &dev_attr_vbus.attr,
+ &dev_attr_otg_mode.attr,
+ NULL
+};
+
+static struct attribute_group tahvo_attr_group = {
+ .attrs = tahvo_attributes,
+};
+
+static int tahvo_usb_probe(struct platform_device *pdev)
+{
+ struct retu_dev *rdev = dev_get_drvdata(pdev->dev.parent);
+ struct tahvo_usb *tu;
+ int ret;
+
+ tu = devm_kzalloc(&pdev->dev, sizeof(*tu), GFP_KERNEL);
+ if (!tu)
+ return -ENOMEM;
+
+ tu->phy.otg = devm_kzalloc(&pdev->dev, sizeof(*tu->phy.otg),
+ GFP_KERNEL);
+ if (!tu->phy.otg)
+ return -ENOMEM;
+
+ tu->pt_dev = pdev;
+
+ /* Default mode */
+#ifdef CONFIG_TAHVO_USB_HOST_BY_DEFAULT
+ tu->tahvo_mode = TAHVO_MODE_HOST;
+#else
+ tu->tahvo_mode = TAHVO_MODE_PERIPHERAL;
+#endif
+
+ mutex_init(&tu->serialize);
+
+ tu->ick = devm_clk_get(&pdev->dev, "usb_l4_ick");
+ if (!IS_ERR(tu->ick))
+ clk_enable(tu->ick);
+
+ /*
+ * Set initial state, so that we generate kevents only on state changes.
+ */
+ tu->vbus_state = retu_read(rdev, TAHVO_REG_IDSR) & TAHVO_STAT_VBUS;
+
+ tu->extcon.name = DRIVER_NAME;
+ tu->extcon.supported_cable = tahvo_cable;
+ tu->extcon.dev.parent = &pdev->dev;
+
+ ret = extcon_dev_register(&tu->extcon);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register extcon device: %d\n",
+ ret);
+ goto err_disable_clk;
+ }
+
+ /* Set the initial cable state. */
+ extcon_set_cable_state(&tu->extcon, "USB-HOST",
+ tu->tahvo_mode == TAHVO_MODE_HOST);
+ extcon_set_cable_state(&tu->extcon, "USB", tu->vbus_state);
+
+ /* Create OTG interface */
+ tahvo_usb_power_off(tu);
+ tu->phy.dev = &pdev->dev;
+ tu->phy.state = OTG_STATE_UNDEFINED;
+ tu->phy.label = DRIVER_NAME;
+ tu->phy.set_suspend = tahvo_usb_set_suspend;
+
+ tu->phy.otg->phy = &tu->phy;
+ tu->phy.otg->set_host = tahvo_usb_set_host;
+ tu->phy.otg->set_peripheral = tahvo_usb_set_peripheral;
+
+ ret = usb_add_phy(&tu->phy, USB_PHY_TYPE_USB2);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot register USB transceiver: %d\n",
+ ret);
+ goto err_extcon_unreg;
+ }
+
+ dev_set_drvdata(&pdev->dev, tu);
+
+ tu->irq = platform_get_irq(pdev, 0);
+ ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt, 0,
+ "tahvo-vbus", tu);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register tahvo-vbus irq: %d\n",
+ ret);
+ goto err_remove_phy;
+ }
+
+ /* Attributes */
+ ret = sysfs_create_group(&pdev->dev.kobj, &tahvo_attr_group);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot create sysfs group: %d\n", ret);
+ goto err_free_irq;
+ }
+
+ return 0;
+
+err_free_irq:
+ free_irq(tu->irq, tu);
+err_remove_phy:
+ usb_remove_phy(&tu->phy);
+err_extcon_unreg:
+ extcon_dev_unregister(&tu->extcon);
+err_disable_clk:
+ if (!IS_ERR(tu->ick))
+ clk_disable(tu->ick);
+
+ return ret;
+}
+
+static int tahvo_usb_remove(struct platform_device *pdev)
+{
+ struct tahvo_usb *tu = platform_get_drvdata(pdev);
+
+ sysfs_remove_group(&pdev->dev.kobj, &tahvo_attr_group);
+ free_irq(tu->irq, tu);
+ usb_remove_phy(&tu->phy);
+ extcon_dev_unregister(&tu->extcon);
+ if (!IS_ERR(tu->ick))
+ clk_disable(tu->ick);
+
+ return 0;
+}
+
+static struct platform_driver tahvo_usb_driver = {
+ .probe = tahvo_usb_probe,
+ .remove = tahvo_usb_remove,
+ .driver = {
+ .name = "tahvo-usb",
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(tahvo_usb_driver);
+
+MODULE_DESCRIPTION("Tahvo USB transceiver driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Juha Yrjölä, Tony Lindgren, and Timo Teräs");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 82232acf1ab6..bbe4f8e6e8d7 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -876,7 +876,7 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
- if (!tegra_phy->regs) {
+ if (!tegra_phy->pad_regs) {
dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n");
return -ENOMEM;
}
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index 30e8a61552d4..214172b68d5d 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -127,7 +127,8 @@ static inline int twl6030_writeb(struct twl6030_usb *twl, u8 module,
static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address)
{
- u8 data, ret = 0;
+ u8 data;
+ int ret;
ret = twl_i2c_read_u8(module, &data, address);
if (ret >= 0)
@@ -327,7 +328,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct twl4030_usb_data *pdata = dev_get_platdata(dev);
- twl = devm_kzalloc(dev, sizeof *twl, GFP_KERNEL);
+ twl = devm_kzalloc(dev, sizeof(*twl), GFP_KERNEL);
if (!twl)
return -ENOMEM;
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 1b74523e1fee..8afa813d690b 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -130,7 +130,7 @@ struct usb_phy *usb_get_phy(enum usb_phy_type type)
phy = __usb_find_phy(&phy_list, type);
if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
- pr_err("unable to find transceiver of type %s\n",
+ pr_debug("PHY: unable to find transceiver of type %s\n",
usb_phy_type_string(type));
goto err0;
}
@@ -228,7 +228,7 @@ struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index)
phy = __usb_find_phy_dev(dev, &phy_bind_list, index);
if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
- pr_err("unable to find transceiver\n");
+ dev_dbg(dev, "unable to find transceiver\n");
goto err0;
}
@@ -329,6 +329,8 @@ int usb_add_phy(struct usb_phy *x, enum usb_phy_type type)
return -EINVAL;
}
+ ATOMIC_INIT_NOTIFIER_HEAD(&x->notifier);
+
spin_lock_irqsave(&phy_lock, flags);
list_for_each_entry(phy, &phy_list, head) {
@@ -367,6 +369,8 @@ int usb_add_phy_dev(struct usb_phy *x)
return -EINVAL;
}
+ ATOMIC_INIT_NOTIFIER_HEAD(&x->notifier);
+
spin_lock_irqsave(&phy_lock, flags);
list_for_each_entry(phy_bind, &phy_bind_list, list)
if (!(strcmp(phy_bind->phy_dev_name, dev_name(x->dev))))
@@ -420,10 +424,8 @@ int usb_bind_phy(const char *dev_name, u8 index,
unsigned long flags;
phy_bind = kzalloc(sizeof(*phy_bind), GFP_KERNEL);
- if (!phy_bind) {
- pr_err("phy_bind(): No memory for phy_bind");
+ if (!phy_bind)
return -ENOMEM;
- }
phy_bind->dev_name = dev_name;
phy_bind->phy_dev_name = phy_dev_name;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 45b94019aec8..d49f9c326035 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -1124,19 +1124,8 @@ void usbhs_fifo_init(struct usbhs_priv *priv)
mod->irq_brdysts = 0;
cfifo->pipe = NULL;
- cfifo->tx_chan = NULL;
- cfifo->rx_chan = NULL;
-
d0fifo->pipe = NULL;
- d0fifo->tx_chan = NULL;
- d0fifo->rx_chan = NULL;
-
d1fifo->pipe = NULL;
- d1fifo->tx_chan = NULL;
- d1fifo->rx_chan = NULL;
-
- usbhsf_dma_init(priv, usbhsf_get_d0fifo(priv));
- usbhsf_dma_init(priv, usbhsf_get_d1fifo(priv));
}
void usbhs_fifo_quit(struct usbhs_priv *priv)
@@ -1147,9 +1136,6 @@ void usbhs_fifo_quit(struct usbhs_priv *priv)
mod->irq_ready = NULL;
mod->irq_bempsts = 0;
mod->irq_brdysts = 0;
-
- usbhsf_dma_quit(priv, usbhsf_get_d0fifo(priv));
- usbhsf_dma_quit(priv, usbhsf_get_d1fifo(priv));
}
int usbhs_fifo_probe(struct usbhs_priv *priv)
@@ -1171,6 +1157,7 @@ int usbhs_fifo_probe(struct usbhs_priv *priv)
fifo->ctr = D0FIFOCTR;
fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_tx_id);
fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_rx_id);
+ usbhsf_dma_init(priv, fifo);
/* D1FIFO */
fifo = usbhsf_get_d1fifo(priv);
@@ -1180,10 +1167,13 @@ int usbhs_fifo_probe(struct usbhs_priv *priv)
fifo->ctr = D1FIFOCTR;
fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_tx_id);
fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_rx_id);
+ usbhsf_dma_init(priv, fifo);
return 0;
}
void usbhs_fifo_remove(struct usbhs_priv *priv)
{
+ usbhsf_dma_quit(priv, usbhsf_get_d0fifo(priv));
+ usbhsf_dma_quit(priv, usbhsf_get_d1fifo(priv));
}
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 3385aeb5a364..458f3766bef1 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -987,11 +987,11 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
/* init DCP */
if (usbhsg_is_dcp(uep)) {
gpriv->gadget.ep0 = &uep->ep;
- uep->ep.maxpacket = 64;
+ usb_ep_set_maxpacket_limit(&uep->ep, 64);
}
/* init normal pipe */
else {
- uep->ep.maxpacket = 512;
+ usb_ep_set_maxpacket_limit(&uep->ep, 512);
list_add_tail(&uep->ep.ep_list, &gpriv->gadget.ep_list);
}
}
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index e40f565004d0..10e1ded9c9cc 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -1469,6 +1469,7 @@ static int usbhsh_start(struct usbhs_priv *priv)
ret = usb_add_hcd(hcd, 0, 0);
if (ret < 0)
return 0;
+ device_wakeup_enable(hcd->self.controller);
/*
* pipe initialize and enable DCP
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index ddb9c51f2c99..3ce5c74b29e4 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -472,6 +472,35 @@ config USB_SERIAL_MOS7840
To compile this driver as a module, choose M here: the
module will be called mos7840. If unsure, choose N.
+config USB_SERIAL_MXUPORT
+ tristate "USB Moxa UPORT Serial Driver"
+ ---help---
+ Say Y here if you want to use a MOXA UPort Serial hub.
+
+ This driver supports:
+
+ [2 Port]
+ - UPort 1250 : 2 Port RS-232/422/485 USB to Serial Hub
+ - UPort 1250I : 2 Port RS-232/422/485 USB to Serial Hub with
+ Isolation
+
+ [4 Port]
+ - UPort 1410 : 4 Port RS-232 USB to Serial Hub
+ - UPort 1450 : 4 Port RS-232/422/485 USB to Serial Hub
+ - UPort 1450I : 4 Port RS-232/422/485 USB to Serial Hub with
+ Isolation
+
+ [8 Port]
+ - UPort 1610-8 : 8 Port RS-232 USB to Serial Hub
+ - UPort 1650-8 : 8 Port RS-232/422/485 USB to Serial Hub
+
+ [16 Port]
+ - UPort 1610-16 : 16 Port RS-232 USB to Serial Hub
+ - UPort 1650-16 : 16 Port RS-232/422/485 USB to Serial Hub
+
+ To compile this driver as a module, choose M here: the
+ module will be called mxuport.
+
config USB_SERIAL_NAVMAN
tristate "USB Navman GPS device"
help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 42670f0b5bc0..bfdafd349441 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_USB_SERIAL_MCT_U232) += mct_u232.o
obj-$(CONFIG_USB_SERIAL_METRO) += metro-usb.o
obj-$(CONFIG_USB_SERIAL_MOS7720) += mos7720.o
obj-$(CONFIG_USB_SERIAL_MOS7840) += mos7840.o
+obj-$(CONFIG_USB_SERIAL_MXUPORT) += mxuport.o
obj-$(CONFIG_USB_SERIAL_NAVMAN) += navman.o
obj-$(CONFIG_USB_SERIAL_OMNINET) += omninet.o
obj-$(CONFIG_USB_SERIAL_OPTICON) += opticon.o
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 6e320cec397d..80a9845cd93f 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -10,9 +10,9 @@
*
* The device works as an standard CDC device, it has 2 interfaces, the first
* one is for firmware access and the second is the serial one.
- * The protocol is very simply, there are two posibilities reading or writing.
+ * The protocol is very simply, there are two possibilities reading or writing.
* When writing the first urb must have a Header that starts with 0x20 0x29 the
- * next two bytes must say how much data will be sended.
+ * next two bytes must say how much data will be sent.
* When reading the process is almost equal except that the header starts with
* 0x00 0x20.
*
@@ -31,15 +31,15 @@
*
* The driver registers himself with the USB-serial core and the USB Core. I had
* to implement a probe function against USB-serial, because other way, the
- * driver was attaching himself to both interfaces. I have tryed with different
+ * driver was attaching himself to both interfaces. I have tried with different
* configurations of usb_serial_driver with out exit, only the probe function
* could handle this correctly.
*
* I have taken some info from a Greg Kroah-Hartman article:
* http://www.linuxjournal.com/article/6573
* And from Linux Device Driver Kit CD, which is a great work, the authors taken
- * the work to recompile lots of information an knowladge in drivers development
- * and made it all avaible inside a cd.
+ * the work to recompile lots of information an knowledge in drivers development
+ * and made it all available inside a cd.
* URL: http://kernel.org/pub/linux/kernel/people/gregkh/ddk/
*
*/
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index bc77e955cbef..1532cde8a437 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -23,7 +23,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/tty.h>
#include <linux/slab.h>
@@ -71,7 +70,7 @@ struct ark3116_private {
__u32 lcr; /* line control register value */
__u32 hcr; /* handshake control register (0x8)
* value */
- __u32 mcr; /* modem contol register value */
+ __u32 mcr; /* modem control register value */
/* protects the status values below */
spinlock_t status_lock;
@@ -609,7 +608,7 @@ static void ark3116_read_int_callback(struct urb *urb)
}
-/* Data comes in via the bulk (data) URB, erors/interrupts via the int URB.
+/* Data comes in via the bulk (data) URB, errors/interrupts via the int URB.
* This means that we cannot be sure which data byte has an associated error
* condition, so we report an error for all data in the next bulk read.
*
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index 84217e78ded4..15bc71853db5 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -18,14 +18,13 @@
* driver
*
* TODO:
- * -- Add true modem contol line query capability. Currently we track the
+ * -- Add true modem control line query capability. Currently we track the
* states reported by the interrupt and the states we request.
* -- Add support for flush commands
*/
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index 6335490d5760..35a2373cde67 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -125,10 +125,12 @@ static ssize_t new_id_store(struct device_driver *driver,
const char *buf, size_t count)
{
struct usb_serial_driver *usb_drv = to_usb_serial_driver(driver);
- ssize_t retval = usb_store_new_id(&usb_drv->dynids, driver, buf, count);
+ ssize_t retval = usb_store_new_id(&usb_drv->dynids, usb_drv->id_table,
+ driver, buf, count);
if (retval >= 0 && usb_drv->usb_driver != NULL)
retval = usb_store_new_id(&usb_drv->usb_driver->dynids,
+ usb_drv->usb_driver->id_table,
&usb_drv->usb_driver->drvwrap.driver,
buf, count);
return retval;
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index c2a4171ab9cb..82371f61f23d 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -16,7 +16,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -83,7 +82,6 @@ struct ch341_private {
unsigned baud_rate; /* set baud rate */
u8 line_control; /* set line control value RTS/DTR */
u8 line_status; /* active status of modem control inputs */
- u8 multi_status_change; /* status changed multiple since last call */
};
static int ch341_control_out(struct usb_device *dev, u8 request,
@@ -174,7 +172,6 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
r = 0;
spin_lock_irqsave(&priv->lock, flags);
priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
- priv->multi_status_change = 0;
spin_unlock_irqrestore(&priv->lock, flags);
} else
r = -EPROTO;
@@ -384,10 +381,8 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state)
uint8_t *break_reg;
break_reg = kmalloc(2, GFP_KERNEL);
- if (!break_reg) {
- dev_err(&port->dev, "%s - kmalloc failed\n", __func__);
+ if (!break_reg)
return;
- }
r = ch341_control_in(port->serial->dev, CH341_REQ_READ_REG,
ch341_break_reg, 0, break_reg, 2);
@@ -442,11 +437,55 @@ static int ch341_tiocmset(struct tty_struct *tty,
return ch341_set_handshake(port->serial->dev, control);
}
+static void ch341_update_line_status(struct usb_serial_port *port,
+ unsigned char *data, size_t len)
+{
+ struct ch341_private *priv = usb_get_serial_port_data(port);
+ struct tty_struct *tty;
+ unsigned long flags;
+ u8 status;
+ u8 delta;
+
+ if (len < 4)
+ return;
+
+ status = ~data[2] & CH341_BITS_MODEM_STAT;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ delta = status ^ priv->line_status;
+ priv->line_status = status;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (data[1] & CH341_MULT_STAT)
+ dev_dbg(&port->dev, "%s - multiple status change\n", __func__);
+
+ if (!delta)
+ return;
+
+ if (delta & CH341_BIT_CTS)
+ port->icount.cts++;
+ if (delta & CH341_BIT_DSR)
+ port->icount.dsr++;
+ if (delta & CH341_BIT_RI)
+ port->icount.rng++;
+ if (delta & CH341_BIT_DCD) {
+ port->icount.dcd++;
+ tty = tty_port_tty_get(&port->port);
+ if (tty) {
+ usb_serial_handle_dcd_change(port, tty,
+ status & CH341_BIT_DCD);
+ tty_kref_put(tty);
+ }
+ }
+
+ wake_up_interruptible(&port->port.delta_msr_wait);
+}
+
static void ch341_read_int_callback(struct urb *urb)
{
- struct usb_serial_port *port = (struct usb_serial_port *) urb->context;
+ struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
- unsigned int actual_length = urb->actual_length;
+ unsigned int len = urb->actual_length;
int status;
switch (urb->status) {
@@ -457,89 +496,23 @@ static void ch341_read_int_callback(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
- dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n",
+ dev_dbg(&urb->dev->dev, "%s - urb shutting down: %d\n",
__func__, urb->status);
return;
default:
- dev_dbg(&urb->dev->dev, "%s - nonzero urb status received: %d\n",
+ dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n",
__func__, urb->status);
goto exit;
}
- usb_serial_debug_data(&port->dev, __func__,
- urb->actual_length, urb->transfer_buffer);
-
- if (actual_length >= 4) {
- struct ch341_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
- u8 prev_line_status = priv->line_status;
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_status = (~(data[2])) & CH341_BITS_MODEM_STAT;
- if ((data[1] & CH341_MULT_STAT))
- priv->multi_status_change = 1;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if ((priv->line_status ^ prev_line_status) & CH341_BIT_DCD) {
- struct tty_struct *tty = tty_port_tty_get(&port->port);
- if (tty)
- usb_serial_handle_dcd_change(port, tty,
- priv->line_status & CH341_BIT_DCD);
- tty_kref_put(tty);
- }
-
- wake_up_interruptible(&port->port.delta_msr_wait);
- }
-
+ usb_serial_debug_data(&port->dev, __func__, len, data);
+ ch341_update_line_status(port, data, len);
exit:
status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status)
- dev_err(&urb->dev->dev,
- "%s - usb_submit_urb failed with result %d\n",
+ if (status) {
+ dev_err(&urb->dev->dev, "%s - usb_submit_urb failed: %d\n",
__func__, status);
-}
-
-static int ch341_tiocmiwait(struct tty_struct *tty, unsigned long arg)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct ch341_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
- u8 prevstatus;
- u8 status;
- u8 changed;
- u8 multi_change = 0;
-
- spin_lock_irqsave(&priv->lock, flags);
- prevstatus = priv->line_status;
- priv->multi_status_change = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- while (!multi_change) {
- interruptible_sleep_on(&port->port.delta_msr_wait);
- /* see if a signal did it */
- if (signal_pending(current))
- return -ERESTARTSYS;
-
- if (port->serial->disconnected)
- return -EIO;
-
- spin_lock_irqsave(&priv->lock, flags);
- status = priv->line_status;
- multi_change = priv->multi_status_change;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- changed = prevstatus ^ status;
-
- if (((arg & TIOCM_RNG) && (changed & CH341_BIT_RI)) ||
- ((arg & TIOCM_DSR) && (changed & CH341_BIT_DSR)) ||
- ((arg & TIOCM_CD) && (changed & CH341_BIT_DCD)) ||
- ((arg & TIOCM_CTS) && (changed & CH341_BIT_CTS))) {
- return 0;
- }
- prevstatus = status;
}
-
- return 0;
}
static int ch341_tiocmget(struct tty_struct *tty)
@@ -595,7 +568,7 @@ static struct usb_serial_driver ch341_device = {
.break_ctl = ch341_break_ctl,
.tiocmget = ch341_tiocmget,
.tiocmset = ch341_tiocmset,
- .tiocmiwait = ch341_tiocmiwait,
+ .tiocmiwait = usb_serial_generic_tiocmiwait,
.read_int_callback = ch341_read_int_callback,
.port_probe = ch341_port_probe,
.port_remove = ch341_port_remove,
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index c69bb50d4663..8d7fc48b1f30 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -14,7 +14,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/console.h>
@@ -135,7 +134,6 @@ static int usb_console_setup(struct console *co, char *options)
tty = kzalloc(sizeof(*tty), GFP_KERNEL);
if (!tty) {
retval = -ENOMEM;
- dev_err(&port->dev, "no more memory\n");
goto reset_open_count;
}
kref_init(&tty->kref);
@@ -144,7 +142,6 @@ static int usb_console_setup(struct console *co, char *options)
tty->index = co->index;
if (tty_init_termios(tty)) {
retval = -ENOMEM;
- dev_err(&port->dev, "no more memory\n");
goto free_tty;
}
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 6987b535aa98..95fa1217afdd 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -305,10 +305,8 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
length = (((size - 1) | 3) + 1) / 4;
buf = kcalloc(length, sizeof(__le32), GFP_KERNEL);
- if (!buf) {
- dev_err(&port->dev, "%s - out of memory.\n", __func__);
+ if (!buf)
return -ENOMEM;
- }
/* Issue the request, attempting to read 'size' bytes */
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
@@ -352,10 +350,8 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
length = (((size - 1) | 3) + 1) / 4;
buf = kmalloc(length * sizeof(__le32), GFP_KERNEL);
- if (!buf) {
- dev_err(&port->dev, "%s - out of memory.\n", __func__);
+ if (!buf)
return -ENOMEM;
- }
/* Array of integers into bytes */
for (i = 0; i < length; i++)
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 6e1b69d0f5f5..0ac3b3b3236c 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -285,7 +284,7 @@ static void cyberjack_read_int_callback(struct urb *urb)
goto resubmit;
}
- /* "+=" is probably more fault tollerant than "=" */
+ /* "+=" is probably more fault tolerant than "=" */
priv->rdtodo += size;
dev_dbg(dev, "%s - rdtodo: %d\n", __func__, priv->rdtodo);
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 558605d646f3..bccb1223143a 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -27,7 +27,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -113,7 +112,7 @@ struct cypress_private {
int baud_rate; /* stores current baud rate in
integer form */
int isthrottled; /* if throttled, discard reads */
- char prev_status, diff_status; /* used for TIOCMIWAIT */
+ char prev_status; /* used for TIOCMIWAIT */
/* we pass a pointer to this as the argument sent to
cypress_set_termios old_termios */
struct ktermios tmp_termios; /* stores the old termios settings */
@@ -136,7 +135,6 @@ static void cypress_set_termios(struct tty_struct *tty,
static int cypress_tiocmget(struct tty_struct *tty);
static int cypress_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
-static int cypress_tiocmiwait(struct tty_struct *tty, unsigned long arg);
static int cypress_chars_in_buffer(struct tty_struct *tty);
static void cypress_throttle(struct tty_struct *tty);
static void cypress_unthrottle(struct tty_struct *tty);
@@ -162,7 +160,7 @@ static struct usb_serial_driver cypress_earthmate_device = {
.set_termios = cypress_set_termios,
.tiocmget = cypress_tiocmget,
.tiocmset = cypress_tiocmset,
- .tiocmiwait = cypress_tiocmiwait,
+ .tiocmiwait = usb_serial_generic_tiocmiwait,
.chars_in_buffer = cypress_chars_in_buffer,
.throttle = cypress_throttle,
.unthrottle = cypress_unthrottle,
@@ -188,7 +186,7 @@ static struct usb_serial_driver cypress_hidcom_device = {
.set_termios = cypress_set_termios,
.tiocmget = cypress_tiocmget,
.tiocmset = cypress_tiocmset,
- .tiocmiwait = cypress_tiocmiwait,
+ .tiocmiwait = usb_serial_generic_tiocmiwait,
.chars_in_buffer = cypress_chars_in_buffer,
.throttle = cypress_throttle,
.unthrottle = cypress_unthrottle,
@@ -214,7 +212,7 @@ static struct usb_serial_driver cypress_ca42v2_device = {
.set_termios = cypress_set_termios,
.tiocmget = cypress_tiocmget,
.tiocmset = cypress_tiocmset,
- .tiocmiwait = cypress_tiocmiwait,
+ .tiocmiwait = usb_serial_generic_tiocmiwait,
.chars_in_buffer = cypress_chars_in_buffer,
.throttle = cypress_throttle,
.unthrottle = cypress_unthrottle,
@@ -864,45 +862,6 @@ static int cypress_tiocmset(struct tty_struct *tty,
return cypress_write(tty, port, NULL, 0);
}
-
-static int cypress_tiocmiwait(struct tty_struct *tty, unsigned long arg)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct cypress_private *priv = usb_get_serial_port_data(port);
- char diff;
-
- for (;;) {
- interruptible_sleep_on(&port->port.delta_msr_wait);
- /* see if a signal did it */
- if (signal_pending(current))
- return -ERESTARTSYS;
-
- if (port->serial->disconnected)
- return -EIO;
-
- diff = priv->diff_status;
- if (diff == 0)
- return -EIO; /* no change => error */
-
- /* consume all events */
- priv->diff_status = 0;
-
- /* return 0 if caller wanted to know about
- these bits */
- if (((arg & TIOCM_RNG) && (diff & UART_RI)) ||
- ((arg & TIOCM_DSR) && (diff & UART_DSR)) ||
- ((arg & TIOCM_CD) && (diff & UART_CD)) ||
- ((arg & TIOCM_CTS) && (diff & UART_CTS)))
- return 0;
- /* otherwise caller can't care less about what
- * happened, and so we continue to wait for
- * more events.
- */
- }
-
- return 0;
-}
-
static void cypress_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
@@ -1185,9 +1144,21 @@ static void cypress_read_int_callback(struct urb *urb)
spin_lock_irqsave(&priv->lock, flags);
/* check to see if status has changed */
if (priv->current_status != priv->prev_status) {
- priv->diff_status |= priv->current_status ^
- priv->prev_status;
- wake_up_interruptible(&port->port.delta_msr_wait);
+ u8 delta = priv->current_status ^ priv->prev_status;
+
+ if (delta & UART_MSR_MASK) {
+ if (delta & UART_CTS)
+ port->icount.cts++;
+ if (delta & UART_DSR)
+ port->icount.dsr++;
+ if (delta & UART_RI)
+ port->icount.rng++;
+ if (delta & UART_CD)
+ port->icount.dcd++;
+
+ wake_up_interruptible(&port->port.delta_msr_wait);
+ }
+
priv->prev_status = priv->current_status;
}
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h
index b461311a2ae7..119d2e17077b 100644
--- a/drivers/usb/serial/cypress_m8.h
+++ b/drivers/usb/serial/cypress_m8.h
@@ -55,19 +55,23 @@
#define CT_GENERIC 0x0F
/* End of chiptype definitions */
-/* RS-232 serial data communication protocol definitions */
-/* these are sent / read at byte 0 of the input/output hid reports */
-/* You can find these values defined in the CY4601 USB to Serial design notes */
-
-#define CONTROL_DTR 0x20 /* data terminal ready - flow control - host to device */
-#define UART_DSR 0x20 /* data set ready - flow control - device to host */
-#define CONTROL_RTS 0x10 /* request to send - flow control - host to device */
-#define UART_CTS 0x10 /* clear to send - flow control - device to host */
-#define UART_RI 0x10 /* ring indicator - modem - device to host */
-#define UART_CD 0x40 /* carrier detect - modem - device to host */
-#define CYP_ERROR 0x08 /* received from input report - device to host */
-/* Note - the below has nothing to do with the "feature report" reset */
-#define CONTROL_RESET 0x08 /* sent with output report - host to device */
+/*
+ * RS-232 serial data communication protocol definitions.
+ *
+ * These are sent / read at byte 0 of the input/output hid reports.
+ * You can find these values defined in the CY4601 USB to Serial design notes.
+ */
+
+#define CONTROL_DTR 0x20 /* data terminal ready */
+#define CONTROL_RTS 0x10 /* request to send */
+#define CONTROL_RESET 0x08 /* sent with output report */
+
+#define UART_MSR_MASK 0xf0
+#define UART_RI 0x80 /* ring indicator */
+#define UART_CD 0x40 /* carrier detect */
+#define UART_DSR 0x20 /* data set ready */
+#define UART_CTS 0x10 /* clear to send */
+#define CYP_ERROR 0x08 /* received from input report */
/* End of RS-232 protocol definitions */
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 19b467fe0388..8a23c53b946e 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 0f658618db13..90e603d5f660 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 639a18fb67e6..c5dc233db2d9 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -55,6 +54,13 @@ static void f81232_update_line_status(struct usb_serial_port *port,
unsigned char *data,
unsigned int actual_length)
{
+ /*
+ * FIXME: Update port->icount, and call
+ *
+ * wake_up_interruptible(&port->port.delta_msr_wait);
+ *
+ * on MSR changes.
+ */
}
static void f81232_read_int_callback(struct urb *urb)
@@ -110,7 +116,6 @@ static void f81232_process_read_urb(struct urb *urb)
line_status = priv->line_status;
priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
spin_unlock_irqrestore(&priv->lock, flags);
- wake_up_interruptible(&port->port.delta_msr_wait);
if (!urb->actual_length)
return;
@@ -241,54 +246,12 @@ static int f81232_carrier_raised(struct usb_serial_port *port)
return 0;
}
-static int f81232_tiocmiwait(struct tty_struct *tty, unsigned long arg)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct f81232_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
- unsigned int prevstatus;
- unsigned int status;
- unsigned int changed;
-
- spin_lock_irqsave(&priv->lock, flags);
- prevstatus = priv->line_status;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- while (1) {
- interruptible_sleep_on(&port->port.delta_msr_wait);
- /* see if a signal did it */
- if (signal_pending(current))
- return -ERESTARTSYS;
-
- if (port->serial->disconnected)
- return -EIO;
-
- spin_lock_irqsave(&priv->lock, flags);
- status = priv->line_status;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- changed = prevstatus ^ status;
-
- if (((arg & TIOCM_RNG) && (changed & UART_RING)) ||
- ((arg & TIOCM_DSR) && (changed & UART_DSR)) ||
- ((arg & TIOCM_CD) && (changed & UART_DCD)) ||
- ((arg & TIOCM_CTS) && (changed & UART_CTS))) {
- return 0;
- }
- prevstatus = status;
- }
- /* NOTREACHED */
- return 0;
-}
-
static int f81232_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct serial_struct ser;
struct usb_serial_port *port = tty->driver_data;
- dev_dbg(&port->dev, "%s cmd = 0x%04x\n", __func__, cmd);
-
switch (cmd) {
case TIOCGSERIAL:
memset(&ser, 0, sizeof ser);
@@ -302,8 +265,6 @@ static int f81232_ioctl(struct tty_struct *tty,
return 0;
default:
- dev_dbg(&port->dev, "%s not supported = 0x%04x\n",
- __func__, cmd);
break;
}
return -ENOIOCTLCMD;
@@ -354,7 +315,7 @@ static struct usb_serial_driver f81232_device = {
.set_termios = f81232_set_termios,
.tiocmget = f81232_tiocmget,
.tiocmset = f81232_tiocmset,
- .tiocmiwait = f81232_tiocmiwait,
+ .tiocmiwait = usb_serial_generic_tiocmiwait,
.process_read_urb = f81232_process_read_urb,
.read_int_callback = f81232_read_int_callback,
.port_probe = f81232_port_probe,
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index fb0d537435eb..ee1f00f03c43 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -33,7 +33,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -145,7 +144,7 @@ static struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
* Device ID not listed? Test it using
* /sys/bus/usb-serial/drivers/ftdi_sio/new_id and send a patch or report.
*/
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
@@ -153,6 +152,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -192,6 +192,8 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
+ { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_LP101_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_P200X_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
@@ -1695,11 +1697,8 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
priv = kzalloc(sizeof(struct ftdi_private), GFP_KERNEL);
- if (!priv) {
- dev_err(&port->dev, "%s- kmalloc(%Zd) failed.\n", __func__,
- sizeof(struct ftdi_private));
+ if (!priv)
return -ENOMEM;
- }
mutex_init(&priv->cfg_lock);
@@ -2124,10 +2123,20 @@ static void ftdi_set_termios(struct tty_struct *tty,
}
/*
- * All FTDI UART chips are limited to CS7/8. We won't pretend to
+ * All FTDI UART chips are limited to CS7/8. We shouldn't pretend to
* support CS5/6 and revert the CSIZE setting instead.
+ *
+ * CS5 however is used to control some smartcard readers which abuse
+ * this limitation to switch modes. Original FTDI chips fall back to
+ * eight data bits.
+ *
+ * TODO: Implement a quirk to only allow this with mentioned
+ * readers. One I know of (Argolis Smartreader V1)
+ * returns "USB smartcard server" as iInterface string.
+ * The vendor didn't bother with a custom VID/PID of
+ * course.
*/
- if ((C_CSIZE(tty) != CS8) && (C_CSIZE(tty) != CS7)) {
+ if (C_CSIZE(tty) == CS6) {
dev_warn(ddev, "requested CSIZE setting not supported\n");
termios->c_cflag &= ~CSIZE;
@@ -2174,6 +2183,9 @@ no_skip:
urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE;
}
switch (cflag & CSIZE) {
+ case CS5:
+ dev_dbg(ddev, "Setting CS5 quirk\n");
+ break;
case CS7:
urb_value |= 7;
dev_dbg(ddev, "Setting CS7\n");
@@ -2383,8 +2395,6 @@ static int ftdi_ioctl(struct tty_struct *tty,
{
struct usb_serial_port *port = tty->driver_data;
- dev_dbg(&port->dev, "%s cmd 0x%04x\n", __func__, cmd);
-
/* Based on code from acm.c and others */
switch (cmd) {
@@ -2401,11 +2411,7 @@ static int ftdi_ioctl(struct tty_struct *tty,
default:
break;
}
- /* This is not necessarily an error - turns out the higher layers
- * will do some ioctls themselves (see comment above)
- */
- dev_dbg(&port->dev, "%s arg not supported - it was 0x%04x - check /usr/include/asm/ioctls.h\n",
- __func__, cmd);
+
return -ENOIOCTLCMD;
}
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index a7019d1e3058..1e2d369df86e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -50,6 +50,7 @@
#define TI_XDS100V2_PID 0xa6d0
#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
+#define FTDI_EV3CON_PID 0xABB9 /* Mindstorms EV3 Console Adapter */
/* US Interface Navigator (http://www.usinterface.com/) */
#define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */
@@ -363,6 +364,12 @@
/* Sprog II (Andrew Crosland's SprogII DCC interface) */
#define FTDI_SPROG_II 0xF0C8
+/*
+ * Two of the Tagsys RFID Readers
+ */
+#define FTDI_TAGSYS_LP101_PID 0xF0E9 /* Tagsys L-P101 RFID*/
+#define FTDI_TAGSYS_P200X_PID 0xF0EE /* Tagsys Medio P200x RFID*/
+
/* an infrared receiver for user access control with IR tags */
#define FTDI_PIEGROUP_PID 0xF208 /* Product Id */
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 04b5ed90ffb2..db591d19d416 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -25,7 +25,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/tty.h>
@@ -275,14 +274,13 @@ static int pkt_add(struct garmin_data *garmin_data_p,
unsigned long flags;
struct garmin_packet *pkt;
- /* process only packets containg data ... */
+ /* process only packets containing data ... */
if (data_length) {
pkt = kmalloc(sizeof(struct garmin_packet)+data_length,
GFP_ATOMIC);
- if (pkt == NULL) {
- dev_err(&garmin_data_p->port->dev, "out of memory\n");
+ if (!pkt)
return 0;
- }
+
pkt->size = data_length;
memcpy(pkt->data, data, data_length);
@@ -1006,14 +1004,11 @@ static int garmin_write_bulk(struct usb_serial_port *port,
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
buffer = kmalloc(count, GFP_ATOMIC);
- if (!buffer) {
- dev_err(&port->dev, "out of memory\n");
+ if (!buffer)
return -ENOMEM;
- }
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
- dev_err(&port->dev, "no more free urbs\n");
kfree(buffer);
return -ENOMEM;
}
@@ -1148,7 +1143,7 @@ static void garmin_read_process(struct garmin_data *garmin_data_p,
unsigned long flags;
if (garmin_data_p->flags & FLAGS_DROP_DATA) {
- /* abort-transfer cmd is actice */
+ /* abort-transfer cmd is active */
dev_dbg(&garmin_data_p->port->dev, "%s - pkt dropped\n", __func__);
} else if (garmin_data_p->state != STATE_DISCONNECTED &&
garmin_data_p->state != STATE_RESET) {
@@ -1393,10 +1388,9 @@ static int garmin_port_probe(struct usb_serial_port *port)
struct garmin_data *garmin_data_p;
garmin_data_p = kzalloc(sizeof(struct garmin_data), GFP_KERNEL);
- if (garmin_data_p == NULL) {
- dev_err(&port->dev, "%s - Out of memory\n", __func__);
+ if (!garmin_data_p)
return -ENOMEM;
- }
+
init_timer(&garmin_data_p->timer);
spin_lock_init(&garmin_data_p->lock);
INIT_LIST_HEAD(&garmin_data_p->pktlist);
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index c91481d74a14..c0866971db2b 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -32,7 +32,6 @@
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -898,7 +897,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
edge_port->txfifo.fifo = kmalloc(edge_port->maxTxCredits, GFP_KERNEL);
if (!edge_port->txfifo.fifo) {
- dev_dbg(dev, "%s - no memory\n", __func__);
edge_close(port);
return -ENOMEM;
}
@@ -908,7 +906,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
edge_port->write_in_progress = false;
if (!edge_port->write_urb) {
- dev_dbg(dev, "%s - no memory\n", __func__);
edge_close(port);
return -ENOMEM;
}
@@ -1245,9 +1242,7 @@ static void send_more_port_data(struct edgeport_serial *edge_serial,
to send out */
count = fifo->count;
buffer = kmalloc(count+2, GFP_ATOMIC);
- if (buffer == NULL) {
- dev_err_console(edge_port->port,
- "%s - no more kernel memory...\n", __func__);
+ if (!buffer) {
edge_port->write_in_progress = false;
goto exit_send;
}
@@ -1593,8 +1588,6 @@ static int edge_ioctl(struct tty_struct *tty,
DEFINE_WAIT(wait);
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
- dev_dbg(&port->dev, "%s - cmd = 0x%x\n", __func__, cmd);
-
switch (cmd) {
case TIOCSERGETLSR:
dev_dbg(&port->dev, "%s TIOCSERGETLSR\n", __func__);
@@ -2027,11 +2020,8 @@ static int sram_write(struct usb_serial *serial, __u16 extAddr, __u16 addr,
dev_dbg(&serial->dev->dev, "%s - %x, %x, %d\n", __func__, extAddr, addr, length);
transfer_buffer = kmalloc(64, GFP_KERNEL);
- if (!transfer_buffer) {
- dev_err(&serial->dev->dev, "%s - kmalloc(%d) failed.\n",
- __func__, 64);
+ if (!transfer_buffer)
return -ENOMEM;
- }
/* need to split these writes up into 64 byte chunks */
result = 0;
@@ -2075,11 +2065,8 @@ static int rom_write(struct usb_serial *serial, __u16 extAddr, __u16 addr,
unsigned char *transfer_buffer;
transfer_buffer = kmalloc(64, GFP_KERNEL);
- if (!transfer_buffer) {
- dev_err(&serial->dev->dev, "%s - kmalloc(%d) failed.\n",
- __func__, 64);
+ if (!transfer_buffer)
return -ENOMEM;
- }
/* need to split these writes up into 64 byte chunks */
result = 0;
@@ -2121,11 +2108,8 @@ static int rom_read(struct usb_serial *serial, __u16 extAddr,
unsigned char *transfer_buffer;
transfer_buffer = kmalloc(64, GFP_KERNEL);
- if (!transfer_buffer) {
- dev_err(&serial->dev->dev,
- "%s - kmalloc(%d) failed.\n", __func__, 64);
+ if (!transfer_buffer)
return -ENOMEM;
- }
/* need to split these reads up into 64 byte chunks */
result = 0;
@@ -2165,11 +2149,8 @@ static int send_iosp_ext_cmd(struct edgeport_port *edge_port,
int status = 0;
buffer = kmalloc(10, GFP_ATOMIC);
- if (!buffer) {
- dev_err(&edge_port->port->dev,
- "%s - kmalloc(%d) failed.\n", __func__, 10);
+ if (!buffer)
return -ENOMEM;
- }
currentCommand = buffer;
@@ -2276,10 +2257,9 @@ static int send_cmd_write_baud_rate(struct edgeport_port *edge_port,
/* Alloc memory for the string of commands. */
cmdBuffer = kmalloc(0x100, GFP_ATOMIC);
- if (!cmdBuffer) {
- dev_err(dev, "%s - kmalloc(%d) failed.\n", __func__, 0x100);
+ if (!cmdBuffer)
return -ENOMEM;
- }
+
currCmd = cmdBuffer;
/* Enable access to divisor latch */
@@ -2785,10 +2765,9 @@ static int edge_startup(struct usb_serial *serial)
/* create our private serial structure */
edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL);
- if (edge_serial == NULL) {
- dev_err(&serial->dev->dev, "%s - Out of memory\n", __func__);
+ if (!edge_serial)
return -ENOMEM;
- }
+
spin_lock_init(&edge_serial->es_lock);
edge_serial->serial = serial;
usb_set_serial_data(serial, edge_serial);
@@ -2877,14 +2856,12 @@ static int edge_startup(struct usb_serial *serial)
/* not set up yet, so do it now */
edge_serial->interrupt_read_urb =
usb_alloc_urb(0, GFP_KERNEL);
- if (!edge_serial->interrupt_read_urb) {
- dev_err(ddev, "out of memory\n");
+ if (!edge_serial->interrupt_read_urb)
return -ENOMEM;
- }
+
edge_serial->interrupt_in_buffer =
kmalloc(buffer_size, GFP_KERNEL);
if (!edge_serial->interrupt_in_buffer) {
- dev_err(ddev, "out of memory\n");
usb_free_urb(edge_serial->interrupt_read_urb);
return -ENOMEM;
}
@@ -2914,14 +2891,12 @@ static int edge_startup(struct usb_serial *serial)
/* not set up yet, so do it now */
edge_serial->read_urb =
usb_alloc_urb(0, GFP_KERNEL);
- if (!edge_serial->read_urb) {
- dev_err(ddev, "out of memory\n");
+ if (!edge_serial->read_urb)
return -ENOMEM;
- }
+
edge_serial->bulk_in_buffer =
kmalloc(buffer_size, GFP_KERNEL);
if (!edge_serial->bulk_in_buffer) {
- dev_err(&dev->dev, "out of memory\n");
usb_free_urb(edge_serial->read_urb);
return -ENOMEM;
}
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index b7187bf32469..a2db5be9c305 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -364,11 +363,9 @@ static int write_boot_mem(struct edgeport_serial *serial,
/* Must do a read before write */
if (!serial->TiReadI2C) {
temp = kmalloc(1, GFP_KERNEL);
- if (!temp) {
- dev_err(&serial->serial->dev->dev,
- "%s - out of memory\n", __func__);
+ if (!temp)
return -ENOMEM;
- }
+
status = read_boot_mem(serial, 0, 1, temp);
kfree(temp);
if (status)
@@ -471,10 +468,8 @@ static int tx_active(struct edgeport_port *port)
int bytes_left = 0;
oedb = kmalloc(sizeof(*oedb), GFP_KERNEL);
- if (!oedb) {
- dev_err(&port->port->dev, "%s - out of memory\n", __func__);
+ if (!oedb)
return -ENOMEM;
- }
lsr = kmalloc(1, GFP_KERNEL); /* Sigh, that's right, just one byte,
as not all platforms can do DMA
@@ -625,14 +620,11 @@ static int check_i2c_image(struct edgeport_serial *serial)
__u16 ttype;
rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL);
- if (!rom_desc) {
- dev_err(dev, "%s - out of memory\n", __func__);
+ if (!rom_desc)
return -ENOMEM;
- }
+
buffer = kmalloc(TI_MAX_I2C_SIZE, GFP_KERNEL);
if (!buffer) {
- dev_err(dev, "%s - out of memory when allocating buffer\n",
- __func__);
kfree(rom_desc);
return -ENOMEM;
}
@@ -706,10 +698,9 @@ static int get_manuf_info(struct edgeport_serial *serial, __u8 *buffer)
struct device *dev = &serial->serial->dev->dev;
rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL);
- if (!rom_desc) {
- dev_err(dev, "%s - out of memory\n", __func__);
+ if (!rom_desc)
return -ENOMEM;
- }
+
start_address = get_descriptor_addr(serial, I2C_DESC_TYPE_ION,
rom_desc);
@@ -769,10 +760,8 @@ static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
sizeof(struct ti_i2c_firmware_rec));
buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!buffer) {
- dev_err(dev, "%s - out of memory\n", __func__);
+ if (!buffer)
return -ENOMEM;
- }
// Set entire image of 0xffs
memset(buffer, 0xff, buffer_size);
@@ -832,10 +821,8 @@ static int i2c_type_bootmode(struct edgeport_serial *serial)
u8 *data;
data = kmalloc(1, GFP_KERNEL);
- if (!data) {
- dev_err(dev, "%s - out of memory\n", __func__);
+ if (!data)
return -ENOMEM;
- }
/* Try to read type 2 */
status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
@@ -986,10 +973,9 @@ static int download_fw(struct edgeport_serial *serial)
* Read Manufacturing Descriptor from TI Based Edgeport
*/
ti_manuf_desc = kmalloc(sizeof(*ti_manuf_desc), GFP_KERNEL);
- if (!ti_manuf_desc) {
- dev_err(dev, "%s - out of memory.\n", __func__);
+ if (!ti_manuf_desc)
return -ENOMEM;
- }
+
status = get_manuf_info(serial, (__u8 *)ti_manuf_desc);
if (status) {
kfree(ti_manuf_desc);
@@ -1006,7 +992,6 @@ static int download_fw(struct edgeport_serial *serial)
rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL);
if (!rom_desc) {
- dev_err(dev, "%s - out of memory.\n", __func__);
kfree(ti_manuf_desc);
return -ENOMEM;
}
@@ -1023,7 +1008,6 @@ static int download_fw(struct edgeport_serial *serial)
firmware_version = kmalloc(sizeof(*firmware_version),
GFP_KERNEL);
if (!firmware_version) {
- dev_err(dev, "%s - out of memory.\n", __func__);
kfree(rom_desc);
kfree(ti_manuf_desc);
return -ENOMEM;
@@ -1068,8 +1052,6 @@ static int download_fw(struct edgeport_serial *serial)
record = kmalloc(1, GFP_KERNEL);
if (!record) {
- dev_err(dev, "%s - out of memory.\n",
- __func__);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
@@ -1153,7 +1135,6 @@ static int download_fw(struct edgeport_serial *serial)
header = kmalloc(HEADER_SIZE, GFP_KERNEL);
if (!header) {
- dev_err(dev, "%s - out of memory.\n", __func__);
kfree(rom_desc);
kfree(ti_manuf_desc);
return -ENOMEM;
@@ -1161,7 +1142,6 @@ static int download_fw(struct edgeport_serial *serial)
vheader = kmalloc(HEADER_SIZE, GFP_KERNEL);
if (!vheader) {
- dev_err(dev, "%s - out of memory.\n", __func__);
kfree(header);
kfree(rom_desc);
kfree(ti_manuf_desc);
@@ -1290,10 +1270,9 @@ static int download_fw(struct edgeport_serial *serial)
* Read Manufacturing Descriptor from TI Based Edgeport
*/
ti_manuf_desc = kmalloc(sizeof(*ti_manuf_desc), GFP_KERNEL);
- if (!ti_manuf_desc) {
- dev_err(dev, "%s - out of memory.\n", __func__);
+ if (!ti_manuf_desc)
return -ENOMEM;
- }
+
status = get_manuf_info(serial, (__u8 *)ti_manuf_desc);
if (status) {
kfree(ti_manuf_desc);
@@ -1328,10 +1307,8 @@ static int download_fw(struct edgeport_serial *serial)
buffer_size = (((1024 * 16) - 512) +
sizeof(struct ti_i2c_image_header));
buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!buffer) {
- dev_err(dev, "%s - out of memory\n", __func__);
+ if (!buffer)
return -ENOMEM;
- }
/* Initialize the buffer to 0xff (pad the buffer) */
memset(buffer, 0xff, buffer_size);
@@ -2122,7 +2099,6 @@ static void change_port_settings(struct tty_struct *tty,
config = kmalloc (sizeof (*config), GFP_KERNEL);
if (!config) {
tty->termios = *old_termios;
- dev_err(dev, "%s - out of memory\n", __func__);
return;
}
@@ -2362,8 +2338,6 @@ static int edge_ioctl(struct tty_struct *tty,
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
- dev_dbg(&port->dev, "%s - cmd = 0x%x\n", __func__, cmd);
-
switch (cmd) {
case TIOCGSERIAL:
dev_dbg(&port->dev, "%s - TIOCGSERIAL\n", __func__);
@@ -2395,10 +2369,9 @@ static int edge_startup(struct usb_serial *serial)
/* create our private serial structure */
edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL);
- if (edge_serial == NULL) {
- dev_err(&serial->dev->dev, "%s - Out of memory\n", __func__);
+ if (!edge_serial)
return -ENOMEM;
- }
+
mutex_init(&edge_serial->es_lock);
edge_serial->serial = serial;
usb_set_serial_data(serial, edge_serial);
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index 76c9a847da5d..f51a5d52c0ed 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -37,7 +36,7 @@ static int ipaq_open(struct tty_struct *tty,
static int ipaq_calc_num_ports(struct usb_serial *serial);
static int ipaq_startup(struct usb_serial *serial);
-static struct usb_device_id ipaq_id_table [] = {
+static const struct usb_device_id ipaq_id_table[] = {
{ USB_DEVICE(0x0104, 0x00BE) }, /* Socket USB Sync */
{ USB_DEVICE(0x03F0, 0x1016) }, /* HP USB Sync */
{ USB_DEVICE(0x03F0, 0x1116) }, /* HP USB Sync 1611 */
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index 155eab14b30e..8b1cf18a668b 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -38,7 +38,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 716930ab1bb1..73956d48a0c5 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -377,15 +377,12 @@ static void ir_set_termios(struct tty_struct *tty,
* send the baud change out on an "empty" data packet
*/
urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- dev_err(&port->dev, "%s - no more urbs\n", __func__);
+ if (!urb)
return;
- }
+
transfer_buffer = kmalloc(1, GFP_KERNEL);
- if (!transfer_buffer) {
- dev_err(&port->dev, "%s - out of memory\n", __func__);
+ if (!transfer_buffer)
goto err_buf;
- }
*transfer_buffer = ir_xbof | ir_baud;
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 57c439a24b5a..d00dae17d520 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -17,7 +17,6 @@
*/
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -770,7 +769,7 @@ uart_enable_failed:
return status;
}
-/* Diables the IUU UART (a.k.a. the Phoenix voiderface) */
+/* Disables the IUU UART (a.k.a. the Phoenix voiderface) */
static int iuu_uart_off(struct usb_serial_port *port)
{
int status;
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index d6960aebe246..265c6776b081 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -31,7 +31,6 @@
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -165,7 +164,7 @@ static void keyspan_set_termios(struct tty_struct *tty,
if (d_details->calculate_baud_rate(port, baud_rate, d_details->baudclk,
NULL, NULL, NULL, device_port) == KEYSPAN_BAUD_RATE_OK) {
/* FIXME - more to do here to ensure rate changes cleanly */
- /* FIXME - calcuate exact rate from divisor ? */
+ /* FIXME - calculate exact rate from divisor ? */
p_priv->baud = baud_rate;
} else
baud_rate = tty_termios_baud_rate(old_termios);
@@ -1226,10 +1225,8 @@ static struct urb *keyspan_setup_urb(struct usb_serial *serial, int endpoint,
dev_dbg(&serial->interface->dev, "%s - alloc for endpoint %d.\n", __func__, endpoint);
urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
- if (urb == NULL) {
- dev_dbg(&serial->interface->dev, "%s - alloc for endpoint %d failed.\n", __func__, endpoint);
+ if (!urb)
return NULL;
- }
if (endpoint == 0) {
/* control EP filled in when used */
@@ -2312,10 +2309,8 @@ static int keyspan_startup(struct usb_serial *serial)
/* Setup private data for serial driver */
s_priv = kzalloc(sizeof(struct keyspan_serial_private), GFP_KERNEL);
- if (!s_priv) {
- dev_dbg(&serial->dev->dev, "%s - kmalloc for keyspan_serial_private failed.\n", __func__);
+ if (!s_priv)
return -ENOMEM;
- }
s_priv->instat_buf = kzalloc(INSTAT_BUFLEN, GFP_KERNEL);
if (!s_priv->instat_buf)
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 5f1d382e55cf..e972412b614b 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
diff --git a/drivers/usb/serial/keyspan_usa26msg.h b/drivers/usb/serial/keyspan_usa26msg.h
index 3808727db65a..09e21e84fc4e 100644
--- a/drivers/usb/serial/keyspan_usa26msg.h
+++ b/drivers/usb/serial/keyspan_usa26msg.h
@@ -62,7 +62,7 @@
or:
(b) 0x80 bit set
- indiates that the bytes following alternate data and
+ indicates that the bytes following alternate data and
status bytes:
STAT DATA STAT DATA STAT DATA STAT DATA ...
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 1b4054fe52a5..c88cc4966b23 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -37,7 +37,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -182,11 +181,9 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
dev_info(&port->serial->dev->dev, "sending SIO Poll request\n");
status_buf = kmalloc(KLSI_STATUSBUF_LEN, GFP_KERNEL);
- if (!status_buf) {
- dev_err(&port->dev, "%s - out of memory for status buffer.\n",
- __func__);
+ if (!status_buf)
return -ENOMEM;
- }
+
status_buf[0] = 0xff;
status_buf[1] = 0xff;
rc = usb_control_msg(port->serial->dev,
@@ -273,11 +270,9 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
* priv->line_state.
*/
cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg) {
- dev_err(&port->dev, "%s - out of memory for config buffer.\n",
- __func__);
+ if (!cfg)
return -ENOMEM;
- }
+
cfg->pktlen = 5;
cfg->baudrate = kl5kusb105a_sio_b9600;
cfg->databits = kl5kusb105a_dtb_8;
@@ -417,10 +412,8 @@ static void klsi_105_set_termios(struct tty_struct *tty,
speed_t baud;
cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg) {
- dev_err(dev, "%s - out of memory for config buffer.\n", __func__);
+ if (!cfg)
return;
- }
/* lock while we are modifying the settings */
spin_lock_irqsave(&priv->lock, flags);
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 4c94ffcb66cd..618c1c1f227e 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -25,7 +25,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 6a15adf53360..fd707d6a10e2 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -23,7 +23,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index 40ccf6e5e318..39e683096e94 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -7,7 +7,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/usb.h>
@@ -43,7 +42,7 @@ struct metrousb_private {
};
/* Device table list. */
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) },
{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) },
{ }, /* Terminating entry. */
@@ -54,7 +53,7 @@ MODULE_DEVICE_TABLE(usb, id_table);
#define UNI_CMD_OPEN 0x80
#define UNI_CMD_CLOSE 0xFF
-inline int metrousb_is_unidirectional_mode(struct usb_serial_port *port)
+static inline int metrousb_is_unidirectional_mode(struct usb_serial_port *port)
{
__u16 product_id = le16_to_cpu(
port->serial->dev->descriptor.idProduct);
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 439c951f261b..4eb277225a77 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1,6 +1,6 @@
/*
* mos7720.c
- * Controls the Moschip 7720 usb to dual port serial convertor
+ * Controls the Moschip 7720 usb to dual port serial converter
*
* Copyright 2006 Moschip Semiconductor Tech. Ltd.
*
@@ -22,7 +22,6 @@
*/
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -46,7 +45,7 @@
#define MOS_WRITE 0x0E
#define MOS_READ 0x0D
-/* Interrupt Rotinue Defines */
+/* Interrupt Routines Defines */
#define SERIAL_IIR_RLS 0x06
#define SERIAL_IIR_RDA 0x04
#define SERIAL_IIR_CTI 0x0c
@@ -362,15 +361,13 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
/* create and initialize the control urb and containing urbtracker */
urbtrack = kmalloc(sizeof(struct urbtracker), GFP_ATOMIC);
- if (urbtrack == NULL) {
- dev_err(&usbdev->dev, "out of memory");
+ if (!urbtrack)
return -ENOMEM;
- }
+
kref_get(&mos_parport->ref_count);
urbtrack->mos_parport = mos_parport;
urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (urbtrack->urb == NULL) {
- dev_err(&usbdev->dev, "out of urbs");
+ if (!urbtrack->urb) {
kfree(urbtrack);
return -ENOMEM;
}
@@ -440,7 +437,7 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
* not called the release function yet because someone has a serial port open.
* The shared release_lock prevents the first, and the mutex and disconnected
* flag maintained by usbserial covers the second. We also use the msg_pending
- * flag to ensure that all synchronous usb messgage calls have completed before
+ * flag to ensure that all synchronous usb message calls have completed before
* our release function can return.
*/
static int parport_prologue(struct parport *pp)
@@ -471,7 +468,7 @@ static int parport_prologue(struct parport *pp)
}
/*
- * This is the the common bottom part of all parallel port functions that send
+ * This is the common bottom part of all parallel port functions that send
* synchronous messages to the device.
*/
static inline void parport_epilogue(struct parport *pp)
@@ -702,10 +699,9 @@ static int mos7715_parport_init(struct usb_serial *serial)
/* allocate and initialize parallel port control struct */
mos_parport = kzalloc(sizeof(struct mos7715_parport), GFP_KERNEL);
- if (mos_parport == NULL) {
- dev_dbg(&serial->dev->dev, "%s: kzalloc failed\n", __func__);
+ if (!mos_parport)
return -ENOMEM;
- }
+
mos_parport->msg_pending = false;
kref_init(&mos_parport->ref_count);
spin_lock_init(&mos_parport->listlock);
@@ -1018,18 +1014,12 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
for (j = 0; j < NUM_URBS; ++j) {
urb = usb_alloc_urb(0, GFP_KERNEL);
mos7720_port->write_urb_pool[j] = urb;
-
- if (urb == NULL) {
- dev_err(&port->dev, "No more urbs???\n");
+ if (!urb)
continue;
- }
urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
GFP_KERNEL);
if (!urb->transfer_buffer) {
- dev_err(&port->dev,
- "%s-out of memory for urb buffers.\n",
- __func__);
usb_free_urb(mos7720_port->write_urb_pool[j]);
mos7720_port->write_urb_pool[j] = NULL;
continue;
@@ -1250,11 +1240,8 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port,
if (urb->transfer_buffer == NULL) {
urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
GFP_KERNEL);
- if (urb->transfer_buffer == NULL) {
- dev_err_console(port, "%s no more kernel memory...\n",
- __func__);
+ if (!urb->transfer_buffer)
goto exit;
- }
}
transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
@@ -1885,8 +1872,6 @@ static int mos7720_ioctl(struct tty_struct *tty,
if (mos7720_port == NULL)
return -ENODEV;
- dev_dbg(&port->dev, "%s - cmd = 0x%x", __func__, cmd);
-
switch (cmd) {
case TIOCSERGETLSR:
dev_dbg(&port->dev, "%s TIOCSERGETLSR\n", __func__);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index a69da83604c0..e9d967ff521b 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -24,7 +24,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -876,20 +875,14 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
for (j = 0; j < NUM_URBS; ++j) {
urb = usb_alloc_urb(0, GFP_KERNEL);
mos7840_port->write_urb_pool[j] = urb;
-
- if (urb == NULL) {
- dev_err(&port->dev, "No more urbs???\n");
+ if (!urb)
continue;
- }
urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
GFP_KERNEL);
if (!urb->transfer_buffer) {
usb_free_urb(urb);
mos7840_port->write_urb_pool[j] = NULL;
- dev_err(&port->dev,
- "%s-out of memory for urb buffers.\n",
- __func__);
continue;
}
}
@@ -1381,12 +1374,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
if (urb->transfer_buffer == NULL) {
urb->transfer_buffer =
kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
-
- if (urb->transfer_buffer == NULL) {
- dev_err_console(port, "%s no more kernel memory...\n",
- __func__);
+ if (!urb->transfer_buffer)
goto exit;
- }
}
transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
@@ -2070,8 +2059,6 @@ static int mos7840_ioctl(struct tty_struct *tty,
if (mos7840_port == NULL)
return -1;
- dev_dbg(&port->dev, "%s - cmd = 0x%x\n", __func__, cmd);
-
switch (cmd) {
/* return number of bytes available */
@@ -2208,10 +2195,8 @@ static int mos7840_port_probe(struct usb_serial_port *port)
dev_dbg(&port->dev, "mos7840_startup: configuring port %d\n", pnum);
mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
- if (mos7840_port == NULL) {
- dev_err(&port->dev, "%s - Out of memory\n", __func__);
+ if (!mos7840_port)
return -ENOMEM;
- }
/* Initialize all port interrupt end point to port 0 int
* endpoint. Our device has only one interrupt end point
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
new file mode 100644
index 000000000000..ab1d690274ae
--- /dev/null
+++ b/drivers/usb/serial/mxuport.c
@@ -0,0 +1,1393 @@
+/*
+ * mxuport.c - MOXA UPort series driver
+ *
+ * Copyright (c) 2006 Moxa Technologies Co., Ltd.
+ * Copyright (c) 2013 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Supports the following Moxa USB to serial converters:
+ * 2 ports : UPort 1250, UPort 1250I
+ * 4 ports : UPort 1410, UPort 1450, UPort 1450I
+ * 8 ports : UPort 1610-8, UPort 1650-8
+ * 16 ports : UPort 1610-16, UPort 1650-16
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/jiffies.h>
+#include <linux/serial.h>
+#include <linux/serial_reg.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/uaccess.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+#include <asm/unaligned.h>
+
+/* Definitions for the vendor ID and device ID */
+#define MX_USBSERIAL_VID 0x110A
+#define MX_UPORT1250_PID 0x1250
+#define MX_UPORT1251_PID 0x1251
+#define MX_UPORT1410_PID 0x1410
+#define MX_UPORT1450_PID 0x1450
+#define MX_UPORT1451_PID 0x1451
+#define MX_UPORT1618_PID 0x1618
+#define MX_UPORT1658_PID 0x1658
+#define MX_UPORT1613_PID 0x1613
+#define MX_UPORT1653_PID 0x1653
+
+/* Definitions for USB info */
+#define HEADER_SIZE 4
+#define EVENT_LENGTH 8
+#define DOWN_BLOCK_SIZE 64
+
+/* Definitions for firmware info */
+#define VER_ADDR_1 0x20
+#define VER_ADDR_2 0x24
+#define VER_ADDR_3 0x28
+
+/* Definitions for USB vendor request */
+#define RQ_VENDOR_NONE 0x00
+#define RQ_VENDOR_SET_BAUD 0x01 /* Set baud rate */
+#define RQ_VENDOR_SET_LINE 0x02 /* Set line status */
+#define RQ_VENDOR_SET_CHARS 0x03 /* Set Xon/Xoff chars */
+#define RQ_VENDOR_SET_RTS 0x04 /* Set RTS */
+#define RQ_VENDOR_SET_DTR 0x05 /* Set DTR */
+#define RQ_VENDOR_SET_XONXOFF 0x06 /* Set auto Xon/Xoff */
+#define RQ_VENDOR_SET_RX_HOST_EN 0x07 /* Set RX host enable */
+#define RQ_VENDOR_SET_OPEN 0x08 /* Set open/close port */
+#define RQ_VENDOR_PURGE 0x09 /* Purge Rx/Tx buffer */
+#define RQ_VENDOR_SET_MCR 0x0A /* Set MCR register */
+#define RQ_VENDOR_SET_BREAK 0x0B /* Set Break signal */
+
+#define RQ_VENDOR_START_FW_DOWN 0x0C /* Start firmware download */
+#define RQ_VENDOR_STOP_FW_DOWN 0x0D /* Stop firmware download */
+#define RQ_VENDOR_QUERY_FW_READY 0x0E /* Query if new firmware ready */
+
+#define RQ_VENDOR_SET_FIFO_DISABLE 0x0F /* Set fifo disable */
+#define RQ_VENDOR_SET_INTERFACE 0x10 /* Set interface */
+#define RQ_VENDOR_SET_HIGH_PERFOR 0x11 /* Set hi-performance */
+
+#define RQ_VENDOR_ERASE_BLOCK 0x12 /* Erase flash block */
+#define RQ_VENDOR_WRITE_PAGE 0x13 /* Write flash page */
+#define RQ_VENDOR_PREPARE_WRITE 0x14 /* Prepare write flash */
+#define RQ_VENDOR_CONFIRM_WRITE 0x15 /* Confirm write flash */
+#define RQ_VENDOR_LOCATE 0x16 /* Locate the device */
+
+#define RQ_VENDOR_START_ROM_DOWN 0x17 /* Start firmware download */
+#define RQ_VENDOR_ROM_DATA 0x18 /* Rom file data */
+#define RQ_VENDOR_STOP_ROM_DOWN 0x19 /* Stop firmware download */
+#define RQ_VENDOR_FW_DATA 0x20 /* Firmware data */
+
+#define RQ_VENDOR_RESET_DEVICE 0x23 /* Try to reset the device */
+#define RQ_VENDOR_QUERY_FW_CONFIG 0x24
+
+#define RQ_VENDOR_GET_VERSION 0x81 /* Get firmware version */
+#define RQ_VENDOR_GET_PAGE 0x82 /* Read flash page */
+#define RQ_VENDOR_GET_ROM_PROC 0x83 /* Get ROM process state */
+
+#define RQ_VENDOR_GET_INQUEUE 0x84 /* Data in input buffer */
+#define RQ_VENDOR_GET_OUTQUEUE 0x85 /* Data in output buffer */
+
+#define RQ_VENDOR_GET_MSR 0x86 /* Get modem status register */
+
+/* Definitions for UPort event type */
+#define UPORT_EVENT_NONE 0 /* None */
+#define UPORT_EVENT_TXBUF_THRESHOLD 1 /* Tx buffer threshold */
+#define UPORT_EVENT_SEND_NEXT 2 /* Send next */
+#define UPORT_EVENT_MSR 3 /* Modem status */
+#define UPORT_EVENT_LSR 4 /* Line status */
+#define UPORT_EVENT_MCR 5 /* Modem control */
+
+/* Definitions for serial event type */
+#define SERIAL_EV_CTS 0x0008 /* CTS changed state */
+#define SERIAL_EV_DSR 0x0010 /* DSR changed state */
+#define SERIAL_EV_RLSD 0x0020 /* RLSD changed state */
+
+/* Definitions for modem control event type */
+#define SERIAL_EV_XOFF 0x40 /* XOFF received */
+
+/* Definitions for line control of communication */
+#define MX_WORDLENGTH_5 5
+#define MX_WORDLENGTH_6 6
+#define MX_WORDLENGTH_7 7
+#define MX_WORDLENGTH_8 8
+
+#define MX_PARITY_NONE 0
+#define MX_PARITY_ODD 1
+#define MX_PARITY_EVEN 2
+#define MX_PARITY_MARK 3
+#define MX_PARITY_SPACE 4
+
+#define MX_STOP_BITS_1 0
+#define MX_STOP_BITS_1_5 1
+#define MX_STOP_BITS_2 2
+
+#define MX_RTS_DISABLE 0x0
+#define MX_RTS_ENABLE 0x1
+#define MX_RTS_HW 0x2
+#define MX_RTS_NO_CHANGE 0x3 /* Flag, not valid register value*/
+
+#define MX_INT_RS232 0
+#define MX_INT_2W_RS485 1
+#define MX_INT_RS422 2
+#define MX_INT_4W_RS485 3
+
+/* Definitions for holding reason */
+#define MX_WAIT_FOR_CTS 0x0001
+#define MX_WAIT_FOR_DSR 0x0002
+#define MX_WAIT_FOR_DCD 0x0004
+#define MX_WAIT_FOR_XON 0x0008
+#define MX_WAIT_FOR_START_TX 0x0010
+#define MX_WAIT_FOR_UNTHROTTLE 0x0020
+#define MX_WAIT_FOR_LOW_WATER 0x0040
+#define MX_WAIT_FOR_SEND_NEXT 0x0080
+
+#define MX_UPORT_2_PORT BIT(0)
+#define MX_UPORT_4_PORT BIT(1)
+#define MX_UPORT_8_PORT BIT(2)
+#define MX_UPORT_16_PORT BIT(3)
+
+/* This structure holds all of the local port information */
+struct mxuport_port {
+ u8 mcr_state; /* Last MCR state */
+ u8 msr_state; /* Last MSR state */
+ struct mutex mutex; /* Protects mcr_state */
+ spinlock_t spinlock; /* Protects msr_state */
+};
+
+/* Table of devices that work with this driver */
+static const struct usb_device_id mxuport_idtable[] = {
+ { USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1250_PID),
+ .driver_info = MX_UPORT_2_PORT },
+ { USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1251_PID),
+ .driver_info = MX_UPORT_2_PORT },
+ { USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1410_PID),
+ .driver_info = MX_UPORT_4_PORT },
+ { USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1450_PID),
+ .driver_info = MX_UPORT_4_PORT },
+ { USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1451_PID),
+ .driver_info = MX_UPORT_4_PORT },
+ { USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1618_PID),
+ .driver_info = MX_UPORT_8_PORT },
+ { USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1658_PID),
+ .driver_info = MX_UPORT_8_PORT },
+ { USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1613_PID),
+ .driver_info = MX_UPORT_16_PORT },
+ { USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1653_PID),
+ .driver_info = MX_UPORT_16_PORT },
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, mxuport_idtable);
+
+/*
+ * Add a four byte header containing the port number and the number of
+ * bytes of data in the message. Return the number of bytes in the
+ * buffer.
+ */
+static int mxuport_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size)
+{
+ u8 *buf = dest;
+ int count;
+
+ count = kfifo_out_locked(&port->write_fifo, buf + HEADER_SIZE,
+ size - HEADER_SIZE,
+ &port->lock);
+
+ put_unaligned_be16(port->port_number, buf);
+ put_unaligned_be16(count, buf + 2);
+
+ dev_dbg(&port->dev, "%s - size %zd count %d\n", __func__,
+ size, count);
+
+ return count + HEADER_SIZE;
+}
+
+/* Read the given buffer in from the control pipe. */
+static int mxuport_recv_ctrl_urb(struct usb_serial *serial,
+ u8 request, u16 value, u16 index,
+ u8 *data, size_t size)
+{
+ int status;
+
+ status = usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0),
+ request,
+ (USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE), value, index,
+ data, size,
+ USB_CTRL_GET_TIMEOUT);
+ if (status < 0) {
+ dev_err(&serial->interface->dev,
+ "%s - usb_control_msg failed (%d)\n",
+ __func__, status);
+ return status;
+ }
+
+ if (status != size) {
+ dev_err(&serial->interface->dev,
+ "%s - short read (%d / %zd)\n",
+ __func__, status, size);
+ return -EIO;
+ }
+
+ return status;
+}
+
+/* Write the given buffer out to the control pipe. */
+static int mxuport_send_ctrl_data_urb(struct usb_serial *serial,
+ u8 request,
+ u16 value, u16 index,
+ u8 *data, size_t size)
+{
+ int status;
+
+ status = usb_control_msg(serial->dev,
+ usb_sndctrlpipe(serial->dev, 0),
+ request,
+ (USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE), value, index,
+ data, size,
+ USB_CTRL_SET_TIMEOUT);
+ if (status < 0) {
+ dev_err(&serial->interface->dev,
+ "%s - usb_control_msg failed (%d)\n",
+ __func__, status);
+ return status;
+ }
+
+ if (status != size) {
+ dev_err(&serial->interface->dev,
+ "%s - short write (%d / %zd)\n",
+ __func__, status, size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Send a vendor request without any data */
+static int mxuport_send_ctrl_urb(struct usb_serial *serial,
+ u8 request, u16 value, u16 index)
+{
+ return mxuport_send_ctrl_data_urb(serial, request, value, index,
+ NULL, 0);
+}
+
+/*
+ * mxuport_throttle - throttle function of driver
+ *
+ * This function is called by the tty driver when it wants to stop the
+ * data being read from the port. Since all the data comes over one
+ * bulk in endpoint, we cannot stop submitting urbs by setting
+ * port->throttle. Instead tell the device to stop sending us data for
+ * the port.
+ */
+static void mxuport_throttle(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct usb_serial *serial = port->serial;
+
+ dev_dbg(&port->dev, "%s\n", __func__);
+
+ mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_RX_HOST_EN,
+ 0, port->port_number);
+}
+
+/*
+ * mxuport_unthrottle - unthrottle function of driver
+ *
+ * This function is called by the tty driver when it wants to resume
+ * the data being read from the port. Tell the device it can resume
+ * sending us received data from the port.
+ */
+static void mxuport_unthrottle(struct tty_struct *tty)
+{
+
+ struct usb_serial_port *port = tty->driver_data;
+ struct usb_serial *serial = port->serial;
+
+ dev_dbg(&port->dev, "%s\n", __func__);
+
+ mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_RX_HOST_EN,
+ 1, port->port_number);
+}
+
+/*
+ * Processes one chunk of data received for a port. Mostly a copy of
+ * usb_serial_generic_process_read_urb().
+ */
+static void mxuport_process_read_urb_data(struct usb_serial_port *port,
+ char *data, int size)
+{
+ int i;
+
+ if (!port->port.console || !port->sysrq) {
+ tty_insert_flip_string(&port->port, data, size);
+ } else {
+ for (i = 0; i < size; i++, data++) {
+ if (!usb_serial_handle_sysrq_char(port, *data))
+ tty_insert_flip_char(&port->port, *data,
+ TTY_NORMAL);
+ }
+ }
+ tty_flip_buffer_push(&port->port);
+}
+
+static void mxuport_msr_event(struct usb_serial_port *port, u8 buf[4])
+{
+ struct mxuport_port *mxport = usb_get_serial_port_data(port);
+ u8 rcv_msr_hold = buf[2] & 0xF0;
+ u16 rcv_msr_event = get_unaligned_be16(buf);
+ unsigned long flags;
+
+ if (rcv_msr_event == 0)
+ return;
+
+ /* Update MSR status */
+ spin_lock_irqsave(&mxport->spinlock, flags);
+
+ dev_dbg(&port->dev, "%s - current MSR status = 0x%x\n",
+ __func__, mxport->msr_state);
+
+ if (rcv_msr_hold & UART_MSR_CTS) {
+ mxport->msr_state |= UART_MSR_CTS;
+ dev_dbg(&port->dev, "%s - CTS high\n", __func__);
+ } else {
+ mxport->msr_state &= ~UART_MSR_CTS;
+ dev_dbg(&port->dev, "%s - CTS low\n", __func__);
+ }
+
+ if (rcv_msr_hold & UART_MSR_DSR) {
+ mxport->msr_state |= UART_MSR_DSR;
+ dev_dbg(&port->dev, "%s - DSR high\n", __func__);
+ } else {
+ mxport->msr_state &= ~UART_MSR_DSR;
+ dev_dbg(&port->dev, "%s - DSR low\n", __func__);
+ }
+
+ if (rcv_msr_hold & UART_MSR_DCD) {
+ mxport->msr_state |= UART_MSR_DCD;
+ dev_dbg(&port->dev, "%s - DCD high\n", __func__);
+ } else {
+ mxport->msr_state &= ~UART_MSR_DCD;
+ dev_dbg(&port->dev, "%s - DCD low\n", __func__);
+ }
+ spin_unlock_irqrestore(&mxport->spinlock, flags);
+
+ if (rcv_msr_event &
+ (SERIAL_EV_CTS | SERIAL_EV_DSR | SERIAL_EV_RLSD)) {
+
+ if (rcv_msr_event & SERIAL_EV_CTS) {
+ port->icount.cts++;
+ dev_dbg(&port->dev, "%s - CTS change\n", __func__);
+ }
+
+ if (rcv_msr_event & SERIAL_EV_DSR) {
+ port->icount.dsr++;
+ dev_dbg(&port->dev, "%s - DSR change\n", __func__);
+ }
+
+ if (rcv_msr_event & SERIAL_EV_RLSD) {
+ port->icount.dcd++;
+ dev_dbg(&port->dev, "%s - DCD change\n", __func__);
+ }
+ wake_up_interruptible(&port->port.delta_msr_wait);
+ }
+}
+
+static void mxuport_lsr_event(struct usb_serial_port *port, u8 buf[4])
+{
+ u8 lsr_event = buf[2];
+
+ if (lsr_event & UART_LSR_BI) {
+ port->icount.brk++;
+ dev_dbg(&port->dev, "%s - break error\n", __func__);
+ }
+
+ if (lsr_event & UART_LSR_FE) {
+ port->icount.frame++;
+ dev_dbg(&port->dev, "%s - frame error\n", __func__);
+ }
+
+ if (lsr_event & UART_LSR_PE) {
+ port->icount.parity++;
+ dev_dbg(&port->dev, "%s - parity error\n", __func__);
+ }
+
+ if (lsr_event & UART_LSR_OE) {
+ port->icount.overrun++;
+ dev_dbg(&port->dev, "%s - overrun error\n", __func__);
+ }
+}
+
+/*
+ * When something interesting happens, modem control lines XON/XOFF
+ * etc, the device sends an event. Process these events.
+ */
+static void mxuport_process_read_urb_event(struct usb_serial_port *port,
+ u8 buf[4], u32 event)
+{
+ dev_dbg(&port->dev, "%s - receive event : %04x\n", __func__, event);
+
+ switch (event) {
+ case UPORT_EVENT_SEND_NEXT:
+ /*
+ * Sent as part of the flow control on device buffers.
+ * Not currently used.
+ */
+ break;
+ case UPORT_EVENT_MSR:
+ mxuport_msr_event(port, buf);
+ break;
+ case UPORT_EVENT_LSR:
+ mxuport_lsr_event(port, buf);
+ break;
+ case UPORT_EVENT_MCR:
+ /*
+ * Event to indicate a change in XON/XOFF from the
+ * peer. Currently not used. We just continue
+ * sending the device data and it will buffer it if
+ * needed. This event could be used for flow control
+ * between the host and the device.
+ */
+ break;
+ default:
+ dev_dbg(&port->dev, "Unexpected event\n");
+ break;
+ }
+}
+
+/*
+ * One URB can contain data for multiple ports. Demultiplex the data,
+ * checking the port exists, is opened and the message is valid.
+ */
+static void mxuport_process_read_urb_demux_data(struct urb *urb)
+{
+ struct usb_serial_port *port = urb->context;
+ struct usb_serial *serial = port->serial;
+ u8 *data = urb->transfer_buffer;
+ u8 *end = data + urb->actual_length;
+ struct usb_serial_port *demux_port;
+ u8 *ch;
+ u16 rcv_port;
+ u16 rcv_len;
+
+ while (data < end) {
+ if (data + HEADER_SIZE > end) {
+ dev_warn(&port->dev, "%s - message with short header\n",
+ __func__);
+ return;
+ }
+
+ rcv_port = get_unaligned_be16(data);
+ if (rcv_port >= serial->num_ports) {
+ dev_warn(&port->dev, "%s - message for invalid port\n",
+ __func__);
+ return;
+ }
+
+ demux_port = serial->port[rcv_port];
+ rcv_len = get_unaligned_be16(data + 2);
+ if (!rcv_len || data + HEADER_SIZE + rcv_len > end) {
+ dev_warn(&port->dev, "%s - short data\n", __func__);
+ return;
+ }
+
+ if (test_bit(ASYNCB_INITIALIZED, &demux_port->port.flags)) {
+ ch = data + HEADER_SIZE;
+ mxuport_process_read_urb_data(demux_port, ch, rcv_len);
+ } else {
+ dev_dbg(&demux_port->dev, "%s - data for closed port\n",
+ __func__);
+ }
+ data += HEADER_SIZE + rcv_len;
+ }
+}
+
+/*
+ * One URB can contain events for multiple ports. Demultiplex the event,
+ * checking the port exists, and is opened.
+ */
+static void mxuport_process_read_urb_demux_event(struct urb *urb)
+{
+ struct usb_serial_port *port = urb->context;
+ struct usb_serial *serial = port->serial;
+ u8 *data = urb->transfer_buffer;
+ u8 *end = data + urb->actual_length;
+ struct usb_serial_port *demux_port;
+ u8 *ch;
+ u16 rcv_port;
+ u16 rcv_event;
+
+ while (data < end) {
+ if (data + EVENT_LENGTH > end) {
+ dev_warn(&port->dev, "%s - message with short event\n",
+ __func__);
+ return;
+ }
+
+ rcv_port = get_unaligned_be16(data);
+ if (rcv_port >= serial->num_ports) {
+ dev_warn(&port->dev, "%s - message for invalid port\n",
+ __func__);
+ return;
+ }
+
+ demux_port = serial->port[rcv_port];
+ if (test_bit(ASYNCB_INITIALIZED, &demux_port->port.flags)) {
+ ch = data + HEADER_SIZE;
+ rcv_event = get_unaligned_be16(data + 2);
+ mxuport_process_read_urb_event(demux_port, ch,
+ rcv_event);
+ } else {
+ dev_dbg(&demux_port->dev,
+ "%s - event for closed port\n", __func__);
+ }
+ data += EVENT_LENGTH;
+ }
+}
+
+/*
+ * This is called when we have received data on the bulk in
+ * endpoint. Depending on which port it was received on, it can
+ * contain serial data or events.
+ */
+static void mxuport_process_read_urb(struct urb *urb)
+{
+ struct usb_serial_port *port = urb->context;
+ struct usb_serial *serial = port->serial;
+
+ if (port == serial->port[0])
+ mxuport_process_read_urb_demux_data(urb);
+
+ if (port == serial->port[1])
+ mxuport_process_read_urb_demux_event(urb);
+}
+
+/*
+ * Ask the device how many bytes it has queued to be sent out. If
+ * there are none, return true.
+ */
+static bool mxuport_tx_empty(struct usb_serial_port *port)
+{
+ struct usb_serial *serial = port->serial;
+ bool is_empty = true;
+ u32 txlen;
+ u8 *len_buf;
+ int err;
+
+ len_buf = kzalloc(4, GFP_KERNEL);
+ if (!len_buf)
+ goto out;
+
+ err = mxuport_recv_ctrl_urb(serial, RQ_VENDOR_GET_OUTQUEUE, 0,
+ port->port_number, len_buf, 4);
+ if (err < 0)
+ goto out;
+
+ txlen = get_unaligned_be32(len_buf);
+ dev_dbg(&port->dev, "%s - tx len = %u\n", __func__, txlen);
+
+ if (txlen != 0)
+ is_empty = false;
+
+out:
+ kfree(len_buf);
+ return is_empty;
+}
+
+static int mxuport_set_mcr(struct usb_serial_port *port, u8 mcr_state)
+{
+ struct usb_serial *serial = port->serial;
+ int err;
+
+ dev_dbg(&port->dev, "%s - %02x\n", __func__, mcr_state);
+
+ err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_MCR,
+ mcr_state, port->port_number);
+ if (err)
+ dev_err(&port->dev, "%s - failed to change MCR\n", __func__);
+
+ return err;
+}
+
+static int mxuport_set_dtr(struct usb_serial_port *port, int on)
+{
+ struct mxuport_port *mxport = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ int err;
+
+ mutex_lock(&mxport->mutex);
+
+ err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_DTR,
+ !!on, port->port_number);
+ if (!err) {
+ if (on)
+ mxport->mcr_state |= UART_MCR_DTR;
+ else
+ mxport->mcr_state &= ~UART_MCR_DTR;
+ }
+
+ mutex_unlock(&mxport->mutex);
+
+ return err;
+}
+
+static int mxuport_set_rts(struct usb_serial_port *port, u8 state)
+{
+ struct mxuport_port *mxport = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ int err;
+ u8 mcr_state;
+
+ mutex_lock(&mxport->mutex);
+ mcr_state = mxport->mcr_state;
+
+ switch (state) {
+ case MX_RTS_DISABLE:
+ mcr_state &= ~UART_MCR_RTS;
+ break;
+ case MX_RTS_ENABLE:
+ mcr_state |= UART_MCR_RTS;
+ break;
+ case MX_RTS_HW:
+ /*
+ * Do not update mxport->mcr_state when doing hardware
+ * flow control.
+ */
+ break;
+ default:
+ /*
+ * Should not happen, but somebody might try passing
+ * MX_RTS_NO_CHANGE, which is not valid.
+ */
+ err = -EINVAL;
+ goto out;
+ }
+ err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_RTS,
+ state, port->port_number);
+ if (!err)
+ mxport->mcr_state = mcr_state;
+
+out:
+ mutex_unlock(&mxport->mutex);
+
+ return err;
+}
+
+static void mxuport_dtr_rts(struct usb_serial_port *port, int on)
+{
+ struct mxuport_port *mxport = usb_get_serial_port_data(port);
+ u8 mcr_state;
+ int err;
+
+ mutex_lock(&mxport->mutex);
+ mcr_state = mxport->mcr_state;
+
+ if (on)
+ mcr_state |= (UART_MCR_RTS | UART_MCR_DTR);
+ else
+ mcr_state &= ~(UART_MCR_RTS | UART_MCR_DTR);
+
+ err = mxuport_set_mcr(port, mcr_state);
+ if (!err)
+ mxport->mcr_state = mcr_state;
+
+ mutex_unlock(&mxport->mutex);
+}
+
+static int mxuport_tiocmset(struct tty_struct *tty, unsigned int set,
+ unsigned int clear)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct mxuport_port *mxport = usb_get_serial_port_data(port);
+ int err;
+ u8 mcr_state;
+
+ mutex_lock(&mxport->mutex);
+ mcr_state = mxport->mcr_state;
+
+ if (set & TIOCM_RTS)
+ mcr_state |= UART_MCR_RTS;
+
+ if (set & TIOCM_DTR)
+ mcr_state |= UART_MCR_DTR;
+
+ if (clear & TIOCM_RTS)
+ mcr_state &= ~UART_MCR_RTS;
+
+ if (clear & TIOCM_DTR)
+ mcr_state &= ~UART_MCR_DTR;
+
+ err = mxuport_set_mcr(port, mcr_state);
+ if (!err)
+ mxport->mcr_state = mcr_state;
+
+ mutex_unlock(&mxport->mutex);
+
+ return err;
+}
+
+static int mxuport_tiocmget(struct tty_struct *tty)
+{
+ struct mxuport_port *mxport;
+ struct usb_serial_port *port = tty->driver_data;
+ unsigned int result;
+ unsigned long flags;
+ unsigned int msr;
+ unsigned int mcr;
+
+ mxport = usb_get_serial_port_data(port);
+
+ mutex_lock(&mxport->mutex);
+ spin_lock_irqsave(&mxport->spinlock, flags);
+
+ msr = mxport->msr_state;
+ mcr = mxport->mcr_state;
+
+ spin_unlock_irqrestore(&mxport->spinlock, flags);
+ mutex_unlock(&mxport->mutex);
+
+ result = (((mcr & UART_MCR_DTR) ? TIOCM_DTR : 0) | /* 0x002 */
+ ((mcr & UART_MCR_RTS) ? TIOCM_RTS : 0) | /* 0x004 */
+ ((msr & UART_MSR_CTS) ? TIOCM_CTS : 0) | /* 0x020 */
+ ((msr & UART_MSR_DCD) ? TIOCM_CAR : 0) | /* 0x040 */
+ ((msr & UART_MSR_RI) ? TIOCM_RI : 0) | /* 0x080 */
+ ((msr & UART_MSR_DSR) ? TIOCM_DSR : 0)); /* 0x100 */
+
+ dev_dbg(&port->dev, "%s - 0x%04x\n", __func__, result);
+
+ return result;
+}
+
+static int mxuport_set_termios_flow(struct tty_struct *tty,
+ struct ktermios *old_termios,
+ struct usb_serial_port *port,
+ struct usb_serial *serial)
+{
+ u8 xon = START_CHAR(tty);
+ u8 xoff = STOP_CHAR(tty);
+ int enable;
+ int err;
+ u8 *buf;
+ u8 rts;
+
+ buf = kmalloc(2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* S/W flow control settings */
+ if (I_IXOFF(tty) || I_IXON(tty)) {
+ enable = 1;
+ buf[0] = xon;
+ buf[1] = xoff;
+
+ err = mxuport_send_ctrl_data_urb(serial, RQ_VENDOR_SET_CHARS,
+ 0, port->port_number,
+ buf, 2);
+ if (err)
+ goto out;
+
+ dev_dbg(&port->dev, "%s - XON = 0x%02x, XOFF = 0x%02x\n",
+ __func__, xon, xoff);
+ } else {
+ enable = 0;
+ }
+
+ err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_XONXOFF,
+ enable, port->port_number);
+ if (err)
+ goto out;
+
+ rts = MX_RTS_NO_CHANGE;
+
+ /* H/W flow control settings */
+ if (!old_termios ||
+ C_CRTSCTS(tty) != (old_termios->c_cflag & CRTSCTS)) {
+ if (C_CRTSCTS(tty))
+ rts = MX_RTS_HW;
+ else
+ rts = MX_RTS_ENABLE;
+ }
+
+ if (C_BAUD(tty)) {
+ if (old_termios && (old_termios->c_cflag & CBAUD) == B0) {
+ /* Raise DTR and RTS */
+ if (C_CRTSCTS(tty))
+ rts = MX_RTS_HW;
+ else
+ rts = MX_RTS_ENABLE;
+ mxuport_set_dtr(port, 1);
+ }
+ } else {
+ /* Drop DTR and RTS */
+ rts = MX_RTS_DISABLE;
+ mxuport_set_dtr(port, 0);
+ }
+
+ if (rts != MX_RTS_NO_CHANGE)
+ err = mxuport_set_rts(port, rts);
+
+out:
+ kfree(buf);
+ return err;
+}
+
+static void mxuport_set_termios(struct tty_struct *tty,
+ struct usb_serial_port *port,
+ struct ktermios *old_termios)
+{
+ struct usb_serial *serial = port->serial;
+ u8 *buf;
+ u8 data_bits;
+ u8 stop_bits;
+ u8 parity;
+ int baud;
+ int err;
+
+ if (old_termios &&
+ !tty_termios_hw_change(&tty->termios, old_termios) &&
+ tty->termios.c_iflag == old_termios->c_iflag) {
+ dev_dbg(&port->dev, "%s - nothing to change\n", __func__);
+ return;
+ }
+
+ buf = kmalloc(4, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ /* Set data bit of termios */
+ switch (C_CSIZE(tty)) {
+ case CS5:
+ data_bits = MX_WORDLENGTH_5;
+ break;
+ case CS6:
+ data_bits = MX_WORDLENGTH_6;
+ break;
+ case CS7:
+ data_bits = MX_WORDLENGTH_7;
+ break;
+ case CS8:
+ default:
+ data_bits = MX_WORDLENGTH_8;
+ break;
+ }
+
+ /* Set parity of termios */
+ if (C_PARENB(tty)) {
+ if (C_CMSPAR(tty)) {
+ if (C_PARODD(tty))
+ parity = MX_PARITY_MARK;
+ else
+ parity = MX_PARITY_SPACE;
+ } else {
+ if (C_PARODD(tty))
+ parity = MX_PARITY_ODD;
+ else
+ parity = MX_PARITY_EVEN;
+ }
+ } else {
+ parity = MX_PARITY_NONE;
+ }
+
+ /* Set stop bit of termios */
+ if (C_CSTOPB(tty))
+ stop_bits = MX_STOP_BITS_2;
+ else
+ stop_bits = MX_STOP_BITS_1;
+
+ buf[0] = data_bits;
+ buf[1] = parity;
+ buf[2] = stop_bits;
+ buf[3] = 0;
+
+ err = mxuport_send_ctrl_data_urb(serial, RQ_VENDOR_SET_LINE,
+ 0, port->port_number, buf, 4);
+ if (err)
+ goto out;
+
+ err = mxuport_set_termios_flow(tty, old_termios, port, serial);
+ if (err)
+ goto out;
+
+ baud = tty_get_baud_rate(tty);
+ if (!baud)
+ baud = 9600;
+
+ /* Note: Little Endian */
+ put_unaligned_le32(baud, buf);
+
+ err = mxuport_send_ctrl_data_urb(serial, RQ_VENDOR_SET_BAUD,
+ 0, port->port_number,
+ buf, 4);
+ if (err)
+ goto out;
+
+ dev_dbg(&port->dev, "baud_rate : %d\n", baud);
+ dev_dbg(&port->dev, "data_bits : %d\n", data_bits);
+ dev_dbg(&port->dev, "parity : %d\n", parity);
+ dev_dbg(&port->dev, "stop_bits : %d\n", stop_bits);
+
+out:
+ kfree(buf);
+}
+
+/*
+ * Determine how many ports this device has dynamically. It will be
+ * called after the probe() callback is called, but before attach().
+ */
+static int mxuport_calc_num_ports(struct usb_serial *serial)
+{
+ unsigned long features = (unsigned long)usb_get_serial_data(serial);
+
+ if (features & MX_UPORT_2_PORT)
+ return 2;
+ if (features & MX_UPORT_4_PORT)
+ return 4;
+ if (features & MX_UPORT_8_PORT)
+ return 8;
+ if (features & MX_UPORT_16_PORT)
+ return 16;
+
+ return 0;
+}
+
+/* Get the version of the firmware currently running. */
+static int mxuport_get_fw_version(struct usb_serial *serial, u32 *version)
+{
+ u8 *ver_buf;
+ int err;
+
+ ver_buf = kzalloc(4, GFP_KERNEL);
+ if (!ver_buf)
+ return -ENOMEM;
+
+ /* Get firmware version from SDRAM */
+ err = mxuport_recv_ctrl_urb(serial, RQ_VENDOR_GET_VERSION, 0, 0,
+ ver_buf, 4);
+ if (err != 4) {
+ err = -EIO;
+ goto out;
+ }
+
+ *version = (ver_buf[0] << 16) | (ver_buf[1] << 8) | ver_buf[2];
+ err = 0;
+out:
+ kfree(ver_buf);
+ return err;
+}
+
+/* Given a firmware blob, download it to the device. */
+static int mxuport_download_fw(struct usb_serial *serial,
+ const struct firmware *fw_p)
+{
+ u8 *fw_buf;
+ size_t txlen;
+ size_t fwidx;
+ int err;
+
+ fw_buf = kmalloc(DOWN_BLOCK_SIZE, GFP_KERNEL);
+ if (!fw_buf)
+ return -ENOMEM;
+
+ dev_dbg(&serial->interface->dev, "Starting firmware download...\n");
+ err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_START_FW_DOWN, 0, 0);
+ if (err)
+ goto out;
+
+ fwidx = 0;
+ do {
+ txlen = min_t(size_t, (fw_p->size - fwidx), DOWN_BLOCK_SIZE);
+
+ memcpy(fw_buf, &fw_p->data[fwidx], txlen);
+ err = mxuport_send_ctrl_data_urb(serial, RQ_VENDOR_FW_DATA,
+ 0, 0, fw_buf, txlen);
+ if (err) {
+ mxuport_send_ctrl_urb(serial, RQ_VENDOR_STOP_FW_DOWN,
+ 0, 0);
+ goto out;
+ }
+
+ fwidx += txlen;
+ usleep_range(1000, 2000);
+
+ } while (fwidx < fw_p->size);
+
+ msleep(1000);
+ err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_STOP_FW_DOWN, 0, 0);
+ if (err)
+ goto out;
+
+ msleep(1000);
+ err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_QUERY_FW_READY, 0, 0);
+
+out:
+ kfree(fw_buf);
+ return err;
+}
+
+static int mxuport_probe(struct usb_serial *serial,
+ const struct usb_device_id *id)
+{
+ u16 productid = le16_to_cpu(serial->dev->descriptor.idProduct);
+ const struct firmware *fw_p = NULL;
+ u32 version;
+ int local_ver;
+ char buf[32];
+ int err;
+
+ /* Load our firmware */
+ err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_QUERY_FW_CONFIG, 0, 0);
+ if (err) {
+ mxuport_send_ctrl_urb(serial, RQ_VENDOR_RESET_DEVICE, 0, 0);
+ return err;
+ }
+
+ err = mxuport_get_fw_version(serial, &version);
+ if (err < 0)
+ return err;
+
+ dev_dbg(&serial->interface->dev, "Device firmware version v%x.%x.%x\n",
+ (version & 0xff0000) >> 16,
+ (version & 0xff00) >> 8,
+ (version & 0xff));
+
+ snprintf(buf, sizeof(buf) - 1, "moxa/moxa-%04x.fw", productid);
+
+ err = request_firmware(&fw_p, buf, &serial->interface->dev);
+ if (err) {
+ dev_warn(&serial->interface->dev, "Firmware %s not found\n",
+ buf);
+
+ /* Use the firmware already in the device */
+ err = 0;
+ } else {
+ local_ver = ((fw_p->data[VER_ADDR_1] << 16) |
+ (fw_p->data[VER_ADDR_2] << 8) |
+ fw_p->data[VER_ADDR_3]);
+ dev_dbg(&serial->interface->dev,
+ "Available firmware version v%x.%x.%x\n",
+ fw_p->data[VER_ADDR_1], fw_p->data[VER_ADDR_2],
+ fw_p->data[VER_ADDR_3]);
+ if (local_ver > version) {
+ err = mxuport_download_fw(serial, fw_p);
+ if (err)
+ goto out;
+ err = mxuport_get_fw_version(serial, &version);
+ if (err < 0)
+ goto out;
+ }
+ }
+
+ dev_info(&serial->interface->dev,
+ "Using device firmware version v%x.%x.%x\n",
+ (version & 0xff0000) >> 16,
+ (version & 0xff00) >> 8,
+ (version & 0xff));
+
+ /*
+ * Contains the features of this hardware. Store away for
+ * later use, eg, number of ports.
+ */
+ usb_set_serial_data(serial, (void *)id->driver_info);
+out:
+ if (fw_p)
+ release_firmware(fw_p);
+ return err;
+}
+
+
+static int mxuport_port_probe(struct usb_serial_port *port)
+{
+ struct usb_serial *serial = port->serial;
+ struct mxuport_port *mxport;
+ int err;
+
+ mxport = devm_kzalloc(&port->dev, sizeof(struct mxuport_port),
+ GFP_KERNEL);
+ if (!mxport)
+ return -ENOMEM;
+
+ mutex_init(&mxport->mutex);
+ spin_lock_init(&mxport->spinlock);
+
+ /* Set the port private data */
+ usb_set_serial_port_data(port, mxport);
+
+ /* Set FIFO (Enable) */
+ err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_FIFO_DISABLE,
+ 0, port->port_number);
+ if (err)
+ return err;
+
+ /* Set transmission mode (Hi-Performance) */
+ err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_HIGH_PERFOR,
+ 0, port->port_number);
+ if (err)
+ return err;
+
+ /* Set interface (RS-232) */
+ err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_INTERFACE,
+ MX_INT_RS232,
+ port->port_number);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mxuport_alloc_write_urb(struct usb_serial *serial,
+ struct usb_serial_port *port,
+ struct usb_serial_port *port0,
+ int j)
+{
+ struct usb_device *dev = interface_to_usbdev(serial->interface);
+
+ set_bit(j, &port->write_urbs_free);
+ port->write_urbs[j] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!port->write_urbs[j])
+ return -ENOMEM;
+
+ port->bulk_out_buffers[j] = kmalloc(port0->bulk_out_size, GFP_KERNEL);
+ if (!port->bulk_out_buffers[j])
+ return -ENOMEM;
+
+ usb_fill_bulk_urb(port->write_urbs[j], dev,
+ usb_sndbulkpipe(dev, port->bulk_out_endpointAddress),
+ port->bulk_out_buffers[j],
+ port->bulk_out_size,
+ serial->type->write_bulk_callback,
+ port);
+ return 0;
+}
+
+
+static int mxuport_alloc_write_urbs(struct usb_serial *serial,
+ struct usb_serial_port *port,
+ struct usb_serial_port *port0)
+{
+ int j;
+ int ret;
+
+ for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j) {
+ ret = mxuport_alloc_write_urb(serial, port, port0, j);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+
+static int mxuport_attach(struct usb_serial *serial)
+{
+ struct usb_serial_port *port0 = serial->port[0];
+ struct usb_serial_port *port1 = serial->port[1];
+ struct usb_serial_port *port;
+ int err;
+ int i;
+ int j;
+
+ /*
+ * Throw away all but the first allocated write URBs so we can
+ * set them up again to fit the multiplexing scheme.
+ */
+ for (i = 1; i < serial->num_bulk_out; ++i) {
+ port = serial->port[i];
+ for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j) {
+ usb_free_urb(port->write_urbs[j]);
+ kfree(port->bulk_out_buffers[j]);
+ port->write_urbs[j] = NULL;
+ port->bulk_out_buffers[j] = NULL;
+ }
+ port->write_urbs_free = 0;
+ }
+
+ /*
+ * All write data is sent over the first bulk out endpoint,
+ * with an added header to indicate the port. Allocate URBs
+ * for each port to the first bulk out endpoint.
+ */
+ for (i = 1; i < serial->num_ports; ++i) {
+ port = serial->port[i];
+ port->bulk_out_size = port0->bulk_out_size;
+ port->bulk_out_endpointAddress =
+ port0->bulk_out_endpointAddress;
+
+ err = mxuport_alloc_write_urbs(serial, port, port0);
+ if (err)
+ return err;
+
+ port->write_urb = port->write_urbs[0];
+ port->bulk_out_buffer = port->bulk_out_buffers[0];
+
+ /*
+ * Ensure each port has a fifo. The framework only
+ * allocates a fifo to ports with a bulk out endpoint,
+ * where as we need one for every port.
+ */
+ if (!kfifo_initialized(&port->write_fifo)) {
+ err = kfifo_alloc(&port->write_fifo, PAGE_SIZE,
+ GFP_KERNEL);
+ if (err)
+ return err;
+ }
+ }
+
+ /*
+ * All data from the ports is received on the first bulk in
+ * endpoint, with a multiplex header. The second bulk in is
+ * used for events.
+ *
+ * Start to read from the device.
+ */
+ err = usb_serial_generic_submit_read_urbs(port0, GFP_KERNEL);
+ if (err)
+ return err;
+
+ err = usb_serial_generic_submit_read_urbs(port1, GFP_KERNEL);
+ if (err) {
+ usb_serial_generic_close(port0);
+ return err;
+ }
+
+ return 0;
+}
+
+static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port)
+{
+ struct mxuport_port *mxport = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ int err;
+
+ /* Set receive host (enable) */
+ err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_RX_HOST_EN,
+ 1, port->port_number);
+ if (err)
+ return err;
+
+ err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_OPEN,
+ 1, port->port_number);
+ if (err) {
+ mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_RX_HOST_EN,
+ 0, port->port_number);
+ return err;
+ }
+
+ /* Initial port termios */
+ mxuport_set_termios(tty, port, NULL);
+
+ /*
+ * TODO: use RQ_VENDOR_GET_MSR, once we know what it
+ * returns.
+ */
+ mxport->msr_state = 0;
+
+ return err;
+}
+
+static void mxuport_close(struct usb_serial_port *port)
+{
+ struct usb_serial *serial = port->serial;
+
+ mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_OPEN, 0,
+ port->port_number);
+
+ mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_RX_HOST_EN, 0,
+ port->port_number);
+}
+
+/* Send a break to the port. */
+static void mxuport_break_ctl(struct tty_struct *tty, int break_state)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct usb_serial *serial = port->serial;
+ int enable;
+
+ if (break_state == -1) {
+ enable = 1;
+ dev_dbg(&port->dev, "%s - sending break\n", __func__);
+ } else {
+ enable = 0;
+ dev_dbg(&port->dev, "%s - clearing break\n", __func__);
+ }
+
+ mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_BREAK,
+ enable, port->port_number);
+}
+
+static int mxuport_resume(struct usb_serial *serial)
+{
+ struct usb_serial_port *port;
+ int c = 0;
+ int i;
+ int r;
+
+ for (i = 0; i < 2; i++) {
+ port = serial->port[i];
+
+ r = usb_serial_generic_submit_read_urbs(port, GFP_NOIO);
+ if (r < 0)
+ c++;
+ }
+
+ for (i = 0; i < serial->num_ports; i++) {
+ port = serial->port[i];
+ if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags))
+ continue;
+
+ r = usb_serial_generic_write_start(port, GFP_NOIO);
+ if (r < 0)
+ c++;
+ }
+
+ return c ? -EIO : 0;
+}
+
+static struct usb_serial_driver mxuport_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mxuport",
+ },
+ .description = "MOXA UPort",
+ .id_table = mxuport_idtable,
+ .num_ports = 0,
+ .probe = mxuport_probe,
+ .port_probe = mxuport_port_probe,
+ .attach = mxuport_attach,
+ .calc_num_ports = mxuport_calc_num_ports,
+ .open = mxuport_open,
+ .close = mxuport_close,
+ .set_termios = mxuport_set_termios,
+ .break_ctl = mxuport_break_ctl,
+ .tx_empty = mxuport_tx_empty,
+ .tiocmiwait = usb_serial_generic_tiocmiwait,
+ .get_icount = usb_serial_generic_get_icount,
+ .throttle = mxuport_throttle,
+ .unthrottle = mxuport_unthrottle,
+ .tiocmget = mxuport_tiocmget,
+ .tiocmset = mxuport_tiocmset,
+ .dtr_rts = mxuport_dtr_rts,
+ .process_read_urb = mxuport_process_read_urb,
+ .prepare_write_buffer = mxuport_prepare_write_buffer,
+ .resume = mxuport_resume,
+};
+
+static struct usb_serial_driver *const serial_drivers[] = {
+ &mxuport_device, NULL
+};
+
+module_usb_serial_driver(serial_drivers, mxuport_idtable);
+
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
+MODULE_AUTHOR("<support@moxa.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index 38725fc8c2c8..2a97cdc078d5 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -14,7 +14,6 @@
#include <linux/gfp.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 5739bf6f7200..f6c6900bccf0 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index cbe779f578f9..4856fb7e637e 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -12,7 +12,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/slab.h>
@@ -139,7 +138,7 @@ static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port)
/* Clear RTS line */
send_control_msg(port, CONTROL_RTS, 0);
- /* clear the halt status of the enpoint */
+ /* clear the halt status of the endpoint */
usb_clear_halt(port->serial->dev, port->read_urb->pipe);
res = usb_serial_generic_open(tty, port);
@@ -200,15 +199,12 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
buffer = kmalloc(count, GFP_ATOMIC);
if (!buffer) {
- dev_err(&port->dev, "out of memory\n");
count = -ENOMEM;
-
goto error_no_buffer;
}
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
- dev_err(&port->dev, "no more free urbs\n");
count = -ENOMEM;
goto error_no_urb;
}
@@ -217,11 +213,10 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
usb_serial_debug_data(&port->dev, __func__, count, buffer);
- /* The conncected devices do not have a bulk write endpoint,
+ /* The connected devices do not have a bulk write endpoint,
* to transmit data to de barcode device the control endpoint is used */
dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
if (!dr) {
- dev_err(&port->dev, "out of memory\n");
count = -ENOMEM;
goto error_no_dr;
}
@@ -367,8 +362,6 @@ static int opticon_ioctl(struct tty_struct *tty,
{
struct usb_serial_port *port = tty->driver_data;
- dev_dbg(&port->dev, "%s - cmd = 0x%x\n", __func__, cmd);
-
switch (cmd) {
case TIOCGSERIAL:
return get_serial_info(port,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 496b7e39d5be..216d20affba8 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb);
#define ZTE_PRODUCT_MF628 0x0015
#define ZTE_PRODUCT_MF626 0x0031
#define ZTE_PRODUCT_MC2718 0xffe8
+#define ZTE_PRODUCT_AC2726 0xfff1
#define BENQ_VENDOR_ID 0x04a5
#define BENQ_PRODUCT_H10 0x4068
@@ -319,6 +320,9 @@ static void option_instat_callback(struct urb *urb);
* It seems to contain a Qualcomm QSC6240/6290 chipset */
#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
+/* iBall 3.5G connect wireless modem */
+#define IBALL_3_5G_CONNECT 0x9605
+
/* Zoom */
#define ZOOM_PRODUCT_4597 0x9607
@@ -1358,7 +1362,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
@@ -1446,6 +1451,17 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8e, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8f, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff90, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff91, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
/* NOTE: most ZTE CDMA devices should be driven by zte_ev, not option */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
@@ -1453,6 +1469,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
@@ -1487,6 +1504,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&four_g_w14_blacklist
},
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
/* Pirelli */
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1, 0xff) },
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index a2080ac7b7e5..a4b88bc038b6 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -39,7 +39,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -103,6 +102,7 @@ struct oti6858_control_pkt {
#define TX_BUFFER_EMPTIED 0x09
u8 pin_state;
#define PIN_MASK 0x3f
+#define PIN_MSR_MASK 0x1b
#define PIN_RTS 0x20 /* output pin */
#define PIN_CTS 0x10 /* input pin, active low */
#define PIN_DSR 0x08 /* input pin, active low */
@@ -134,7 +134,6 @@ static int oti6858_chars_in_buffer(struct tty_struct *tty);
static int oti6858_tiocmget(struct tty_struct *tty);
static int oti6858_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
-static int oti6858_tiocmiwait(struct tty_struct *tty, unsigned long arg);
static int oti6858_port_probe(struct usb_serial_port *port);
static int oti6858_port_remove(struct usb_serial_port *port);
@@ -153,7 +152,7 @@ static struct usb_serial_driver oti6858_device = {
.init_termios = oti6858_init_termios,
.tiocmget = oti6858_tiocmget,
.tiocmset = oti6858_tiocmset,
- .tiocmiwait = oti6858_tiocmiwait,
+ .tiocmiwait = usb_serial_generic_tiocmiwait,
.read_bulk_callback = oti6858_read_bulk_callback,
.read_int_callback = oti6858_read_int_callback,
.write_bulk_callback = oti6858_write_bulk_callback,
@@ -200,8 +199,7 @@ static void setup_line(struct work_struct *work)
int result;
new_setup = kmalloc(OTI6858_CTRL_PKT_SIZE, GFP_KERNEL);
- if (new_setup == NULL) {
- dev_err(&port->dev, "%s(): out of memory!\n", __func__);
+ if (!new_setup) {
/* we will try again */
schedule_delayed_work(&priv->delayed_setup_work,
msecs_to_jiffies(2));
@@ -287,11 +285,9 @@ static void send_data(struct work_struct *work)
if (count != 0) {
allow = kmalloc(1, GFP_KERNEL);
- if (!allow) {
- dev_err_console(port, "%s(): kmalloc failed\n",
- __func__);
+ if (!allow)
return;
- }
+
result = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
OTI6858_REQ_T_CHECK_TXBUFF,
@@ -517,10 +513,8 @@ static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port)
usb_clear_halt(serial->dev, port->read_urb->pipe);
buf = kmalloc(OTI6858_CTRL_PKT_SIZE, GFP_KERNEL);
- if (buf == NULL) {
- dev_err(&port->dev, "%s(): out of memory!\n", __func__);
+ if (!buf)
return -ENOMEM;
- }
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
OTI6858_REQ_T_GET_STATUS,
@@ -647,46 +641,6 @@ static int oti6858_tiocmget(struct tty_struct *tty)
return result;
}
-static int oti6858_tiocmiwait(struct tty_struct *tty, unsigned long arg)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct oti6858_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
- unsigned int prev, status;
- unsigned int changed;
-
- spin_lock_irqsave(&priv->lock, flags);
- prev = priv->status.pin_state;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- while (1) {
- wait_event_interruptible(port->port.delta_msr_wait,
- port->serial->disconnected ||
- priv->status.pin_state != prev);
- if (signal_pending(current))
- return -ERESTARTSYS;
-
- if (port->serial->disconnected)
- return -EIO;
-
- spin_lock_irqsave(&priv->lock, flags);
- status = priv->status.pin_state & PIN_MASK;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- changed = prev ^ status;
- /* FIXME: check if this is correct (active high/low) */
- if (((arg & TIOCM_RNG) && (changed & PIN_RI)) ||
- ((arg & TIOCM_DSR) && (changed & PIN_DSR)) ||
- ((arg & TIOCM_CD) && (changed & PIN_DCD)) ||
- ((arg & TIOCM_CTS) && (changed & PIN_CTS)))
- return 0;
- prev = status;
- }
-
- /* NOTREACHED */
- return 0;
-}
-
static void oti6858_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
@@ -744,8 +698,21 @@ static void oti6858_read_int_callback(struct urb *urb)
}
if (!priv->transient) {
- if (xs->pin_state != priv->status.pin_state)
+ u8 delta = xs->pin_state ^ priv->status.pin_state;
+
+ if (delta & PIN_MSR_MASK) {
+ if (delta & PIN_CTS)
+ port->icount.cts++;
+ if (delta & PIN_DSR)
+ port->icount.dsr++;
+ if (delta & PIN_RI)
+ port->icount.rng++;
+ if (delta & PIN_DCD)
+ port->icount.dcd++;
+
wake_up_interruptible(&port->port.delta_msr_wait);
+ }
+
memcpy(&priv->status, xs, OTI6858_CTRL_PKT_SIZE);
}
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 1e3318dfa1cb..2e22fc22c382 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -12,12 +12,10 @@
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
- *
*/
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -32,10 +30,9 @@
#include <asm/unaligned.h>
#include "pl2303.h"
-/*
- * Version Information
- */
-#define DRIVER_DESC "Prolific PL2303 USB to serial adaptor driver"
+
+#define PL2303_QUIRK_UART_STATE_IDX0 BIT(0)
+#define PL2303_QUIRK_LEGACY BIT(1)
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID) },
@@ -64,9 +61,12 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SITECOM_VENDOR_ID, SITECOM_PRODUCT_ID) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_ID) },
{ USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_ID) },
- { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_SX1) },
- { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X65) },
- { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X75) },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_SX1),
+ .driver_info = PL2303_QUIRK_UART_STATE_IDX0 },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X65),
+ .driver_info = PL2303_QUIRK_UART_STATE_IDX0 },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X75),
+ .driver_info = PL2303_QUIRK_UART_STATE_IDX0 },
{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_EF81) },
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_ID_S81) }, /* Benq/Siemens S81 */
{ USB_DEVICE(SYNTECH_VENDOR_ID, SYNTECH_PRODUCT_ID) },
@@ -116,7 +116,8 @@ MODULE_DEVICE_TABLE(usb, id_table);
#define VENDOR_READ_REQUEST_TYPE 0xc0
#define VENDOR_READ_REQUEST 0x01
-#define UART_STATE 0x08
+#define UART_STATE_INDEX 8
+#define UART_STATE_MSR_MASK 0x8b
#define UART_STATE_TRANSIENT_MASK 0x74
#define UART_DCD 0x01
#define UART_DSR 0x02
@@ -129,98 +130,142 @@ MODULE_DEVICE_TABLE(usb, id_table);
enum pl2303_type {
- type_0, /* don't know the difference between type 0 and */
- type_1, /* type 1, until someone from prolific tells us... */
- HX, /* HX version of the pl2303 chip */
+ TYPE_01, /* Type 0 and 1 (difference unknown) */
+ TYPE_HX, /* HX version of the pl2303 chip */
+ TYPE_COUNT
+};
+
+struct pl2303_type_data {
+ speed_t max_baud_rate;
+ unsigned long quirks;
};
struct pl2303_serial_private {
- enum pl2303_type type;
+ const struct pl2303_type_data *type;
+ unsigned long quirks;
};
struct pl2303_private {
spinlock_t lock;
u8 line_control;
u8 line_status;
+
+ u8 line_settings[7];
+};
+
+static const struct pl2303_type_data pl2303_type_data[TYPE_COUNT] = {
+ [TYPE_01] = {
+ .max_baud_rate = 1228800,
+ .quirks = PL2303_QUIRK_LEGACY,
+ },
};
-static int pl2303_vendor_read(__u16 value, __u16 index,
- struct usb_serial *serial, unsigned char *buf)
+static int pl2303_vendor_read(struct usb_serial *serial, u16 value,
+ unsigned char buf[1])
{
- int res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ struct device *dev = &serial->interface->dev;
+ int res;
+
+ res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
VENDOR_READ_REQUEST, VENDOR_READ_REQUEST_TYPE,
- value, index, buf, 1, 100);
- dev_dbg(&serial->interface->dev, "0x%x:0x%x:0x%x:0x%x %d - %x\n",
- VENDOR_READ_REQUEST_TYPE, VENDOR_READ_REQUEST, value, index,
- res, buf[0]);
- return res;
+ value, 0, buf, 1, 100);
+ if (res != 1) {
+ dev_err(dev, "%s - failed to read [%04x]: %d\n", __func__,
+ value, res);
+ if (res >= 0)
+ res = -EIO;
+
+ return res;
+ }
+
+ dev_dbg(dev, "%s - [%04x] = %02x\n", __func__, value, buf[0]);
+
+ return 0;
}
-static int pl2303_vendor_write(__u16 value, __u16 index,
- struct usb_serial *serial)
+static int pl2303_vendor_write(struct usb_serial *serial, u16 value, u16 index)
{
- int res = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
+ struct device *dev = &serial->interface->dev;
+ int res;
+
+ dev_dbg(dev, "%s - [%04x] = %02x\n", __func__, value, index);
+
+ res = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
VENDOR_WRITE_REQUEST, VENDOR_WRITE_REQUEST_TYPE,
value, index, NULL, 0, 100);
- dev_dbg(&serial->interface->dev, "0x%x:0x%x:0x%x:0x%x %d\n",
- VENDOR_WRITE_REQUEST_TYPE, VENDOR_WRITE_REQUEST, value, index,
- res);
- return res;
+ if (res) {
+ dev_err(dev, "%s - failed to write [%04x]: %d\n", __func__,
+ value, res);
+ return res;
+ }
+
+ return 0;
+}
+
+static int pl2303_probe(struct usb_serial *serial,
+ const struct usb_device_id *id)
+{
+ usb_set_serial_data(serial, (void *)id->driver_info);
+
+ return 0;
}
static int pl2303_startup(struct usb_serial *serial)
{
struct pl2303_serial_private *spriv;
- enum pl2303_type type = type_0;
+ enum pl2303_type type = TYPE_01;
unsigned char *buf;
spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
if (!spriv)
return -ENOMEM;
- buf = kmalloc(10, GFP_KERNEL);
+ buf = kmalloc(1, GFP_KERNEL);
if (!buf) {
kfree(spriv);
return -ENOMEM;
}
if (serial->dev->descriptor.bDeviceClass == 0x02)
- type = type_0;
+ type = TYPE_01; /* type 0 */
else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40)
- type = HX;
+ type = TYPE_HX;
else if (serial->dev->descriptor.bDeviceClass == 0x00)
- type = type_1;
+ type = TYPE_01; /* type 1 */
else if (serial->dev->descriptor.bDeviceClass == 0xFF)
- type = type_1;
+ type = TYPE_01; /* type 1 */
dev_dbg(&serial->interface->dev, "device type: %d\n", type);
- spriv->type = type;
+ spriv->type = &pl2303_type_data[type];
+ spriv->quirks = (unsigned long)usb_get_serial_data(serial);
+ spriv->quirks |= spriv->type->quirks;
+
usb_set_serial_data(serial, spriv);
- pl2303_vendor_read(0x8484, 0, serial, buf);
- pl2303_vendor_write(0x0404, 0, serial);
- pl2303_vendor_read(0x8484, 0, serial, buf);
- pl2303_vendor_read(0x8383, 0, serial, buf);
- pl2303_vendor_read(0x8484, 0, serial, buf);
- pl2303_vendor_write(0x0404, 1, serial);
- pl2303_vendor_read(0x8484, 0, serial, buf);
- pl2303_vendor_read(0x8383, 0, serial, buf);
- pl2303_vendor_write(0, 1, serial);
- pl2303_vendor_write(1, 0, serial);
- if (type == HX)
- pl2303_vendor_write(2, 0x44, serial);
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_write(serial, 0x0404, 0);
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_read(serial, 0x8383, buf);
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_write(serial, 0x0404, 1);
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_read(serial, 0x8383, buf);
+ pl2303_vendor_write(serial, 0, 1);
+ pl2303_vendor_write(serial, 1, 0);
+ if (spriv->quirks & PL2303_QUIRK_LEGACY)
+ pl2303_vendor_write(serial, 2, 0x24);
else
- pl2303_vendor_write(2, 0x24, serial);
+ pl2303_vendor_write(serial, 2, 0x44);
kfree(buf);
+
return 0;
}
static void pl2303_release(struct usb_serial *serial)
{
- struct pl2303_serial_private *spriv;
+ struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
- spriv = usb_get_serial_data(serial);
kfree(spriv);
}
@@ -243,9 +288,8 @@ static int pl2303_port_probe(struct usb_serial_port *port)
static int pl2303_port_remove(struct usb_serial_port *port)
{
- struct pl2303_private *priv;
+ struct pl2303_private *priv = usb_get_serial_port_data(port);
- priv = usb_get_serial_port_data(port);
kfree(priv);
return 0;
@@ -256,39 +300,31 @@ static int pl2303_set_control_lines(struct usb_serial_port *port, u8 value)
struct usb_device *dev = port->serial->dev;
int retval;
+ dev_dbg(&port->dev, "%s - %02x\n", __func__, value);
+
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
SET_CONTROL_REQUEST, SET_CONTROL_REQUEST_TYPE,
value, 0, NULL, 0, 100);
- dev_dbg(&port->dev, "%s - value = %d, retval = %d\n", __func__,
- value, retval);
+ if (retval)
+ dev_err(&port->dev, "%s - failed: %d\n", __func__, retval);
+
return retval;
}
-static void pl2303_encode_baudrate(struct tty_struct *tty,
- struct usb_serial_port *port,
- u8 buf[4])
+/*
+ * Returns the nearest supported baud rate that can be set directly without
+ * using divisors.
+ */
+static speed_t pl2303_get_supported_baud_rate(speed_t baud)
{
- const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600,
- 4800, 7200, 9600, 14400, 19200, 28800, 38400,
- 57600, 115200, 230400, 460800, 500000, 614400,
- 921600, 1228800, 2457600, 3000000, 6000000 };
-
- struct usb_serial *serial = port->serial;
- struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
- int baud;
- int i;
+ static const speed_t baud_sup[] = {
+ 75, 150, 300, 600, 1200, 1800, 2400, 3600, 4800, 7200, 9600,
+ 14400, 19200, 28800, 38400, 57600, 115200, 230400, 460800,
+ 614400, 921600, 1228800, 2457600, 3000000, 6000000
+ };
- /*
- * NOTE: Only the values defined in baud_sup are supported!
- * => if unsupported values are set, the PL2303 seems to use
- * 9600 baud (at least my PL2303X always does)
- */
- baud = tty_get_baud_rate(tty);
- dev_dbg(&port->dev, "baud requested = %d\n", baud);
- if (!baud)
- return;
+ unsigned i;
- /* Set baudrate to nearest supported value */
for (i = 0; i < ARRAY_SIZE(baud_sup); ++i) {
if (baud_sup[i] > baud)
break;
@@ -301,31 +337,120 @@ static void pl2303_encode_baudrate(struct tty_struct *tty,
else
baud = baud_sup[i];
- /* type_0, type_1 only support up to 1228800 baud */
- if (spriv->type != HX)
- baud = min_t(int, baud, 1228800);
+ return baud;
+}
- if (baud <= 115200) {
- put_unaligned_le32(baud, buf);
- } else {
- /*
- * Apparently the formula for higher speeds is:
- * baudrate = 12M * 32 / (2^buf[1]) / buf[0]
- */
- unsigned tmp = 12000000 * 32 / baud;
- buf[3] = 0x80;
- buf[2] = 0;
- buf[1] = (tmp >= 256);
- while (tmp >= 256) {
- tmp >>= 2;
- buf[1] <<= 1;
- }
- buf[0] = tmp;
+/*
+ * NOTE: If unsupported baud rates are set directly, the PL2303 seems to
+ * use 9600 baud.
+ */
+static speed_t pl2303_encode_baud_rate_direct(unsigned char buf[4],
+ speed_t baud)
+{
+ put_unaligned_le32(baud, buf);
+
+ return baud;
+}
+
+static speed_t pl2303_encode_baud_rate_divisor(unsigned char buf[4],
+ speed_t baud)
+{
+ unsigned int tmp;
+
+ /*
+ * Apparently the formula is:
+ * baudrate = 12M * 32 / (2^buf[1]) / buf[0]
+ */
+ tmp = 12000000 * 32 / baud;
+ buf[3] = 0x80;
+ buf[2] = 0;
+ buf[1] = (tmp >= 256);
+ while (tmp >= 256) {
+ tmp >>= 2;
+ buf[1] <<= 1;
}
+ buf[0] = tmp;
+
+ return baud;
+}
+
+static void pl2303_encode_baud_rate(struct tty_struct *tty,
+ struct usb_serial_port *port,
+ u8 buf[4])
+{
+ struct usb_serial *serial = port->serial;
+ struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
+ speed_t baud_sup;
+ speed_t baud;
+
+ baud = tty_get_baud_rate(tty);
+ dev_dbg(&port->dev, "baud requested = %u\n", baud);
+ if (!baud)
+ return;
+
+ if (spriv->type->max_baud_rate)
+ baud = min_t(speed_t, baud, spriv->type->max_baud_rate);
+ /*
+ * Set baud rate to nearest supported value.
+ *
+ * NOTE: Baud rate 500k can only be set using divisors.
+ */
+ baud_sup = pl2303_get_supported_baud_rate(baud);
+
+ if (baud == 500000)
+ baud = pl2303_encode_baud_rate_divisor(buf, baud);
+ else
+ baud = pl2303_encode_baud_rate_direct(buf, baud_sup);
/* Save resulting baud rate */
tty_encode_baud_rate(tty, baud, baud);
- dev_dbg(&port->dev, "baud set = %d\n", baud);
+ dev_dbg(&port->dev, "baud set = %u\n", baud);
+}
+
+static int pl2303_get_line_request(struct usb_serial_port *port,
+ unsigned char buf[7])
+{
+ struct usb_device *udev = port->serial->dev;
+ int ret;
+
+ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ GET_LINE_REQUEST, GET_LINE_REQUEST_TYPE,
+ 0, 0, buf, 7, 100);
+ if (ret != 7) {
+ dev_err(&port->dev, "%s - failed: %d\n", __func__, ret);
+
+ if (ret > 0)
+ ret = -EIO;
+
+ return ret;
+ }
+
+ dev_dbg(&port->dev, "%s - %7ph\n", __func__, buf);
+
+ return 0;
+}
+
+static int pl2303_set_line_request(struct usb_serial_port *port,
+ unsigned char buf[7])
+{
+ struct usb_device *udev = port->serial->dev;
+ int ret;
+
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ SET_LINE_REQUEST, SET_LINE_REQUEST_TYPE,
+ 0, 0, buf, 7, 100);
+ if (ret != 7) {
+ dev_err(&port->dev, "%s - failed: %d\n", __func__, ret);
+
+ if (ret > 0)
+ ret = -EIO;
+
+ return ret;
+ }
+
+ dev_dbg(&port->dev, "%s - %7ph\n", __func__, buf);
+
+ return 0;
}
static void pl2303_set_termios(struct tty_struct *tty,
@@ -336,30 +461,21 @@ static void pl2303_set_termios(struct tty_struct *tty,
struct pl2303_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned char *buf;
- int i;
+ int ret;
u8 control;
- /*
- * The PL2303 is reported to lose bytes if you change serial settings
- * even to the same values as before. Thus we actually need to filter
- * in this specific case.
- */
if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
return;
buf = kzalloc(7, GFP_KERNEL);
if (!buf) {
- dev_err(&port->dev, "%s - out of memory.\n", __func__);
/* Report back no change occurred */
if (old_termios)
tty->termios = *old_termios;
return;
}
- i = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
- GET_LINE_REQUEST, GET_LINE_REQUEST_TYPE,
- 0, 0, buf, 7, 100);
- dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf);
+ pl2303_get_line_request(port, buf);
switch (C_CSIZE(tty)) {
case CS5:
@@ -378,7 +494,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
/* For reference buf[0]:buf[3] baud rate value */
- pl2303_encode_baudrate(tty, port, &buf[0]);
+ pl2303_encode_baud_rate(tty, port, &buf[0]);
/* For reference buf[4]=0 is 1 stop bits */
/* For reference buf[4]=1 is 1.5 stop bits */
@@ -407,7 +523,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
/* For reference buf[5]=3 is mark parity */
/* For reference buf[5]=4 is space parity */
if (C_PARODD(tty)) {
- if (tty->termios.c_cflag & CMSPAR) {
+ if (C_CMSPAR(tty)) {
buf[5] = 3;
dev_dbg(&port->dev, "parity = mark\n");
} else {
@@ -415,7 +531,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
dev_dbg(&port->dev, "parity = odd\n");
}
} else {
- if (tty->termios.c_cflag & CMSPAR) {
+ if (C_CMSPAR(tty)) {
buf[5] = 4;
dev_dbg(&port->dev, "parity = space\n");
} else {
@@ -428,10 +544,23 @@ static void pl2303_set_termios(struct tty_struct *tty,
dev_dbg(&port->dev, "parity = none\n");
}
- i = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- SET_LINE_REQUEST, SET_LINE_REQUEST_TYPE,
- 0, 0, buf, 7, 100);
- dev_dbg(&port->dev, "0x21:0x20:0:0 %d\n", i);
+ /*
+ * Some PL2303 are known to lose bytes if you change serial settings
+ * even to the same values as before. Thus we actually need to filter
+ * in this specific case.
+ *
+ * Note that the tty_termios_hw_change check above is not sufficient
+ * as a previously requested baud rate may differ from the one
+ * actually used (and stored in old_termios).
+ *
+ * NOTE: No additional locking needed for line_settings as it is
+ * only used in set_termios, which is serialised against itself.
+ */
+ if (!old_termios || memcmp(buf, priv->line_settings, 7)) {
+ ret = pl2303_set_line_request(port, buf);
+ if (!ret)
+ memcpy(priv->line_settings, buf, 7);
+ }
/* change control lines if we are switching to or from B0 */
spin_lock_irqsave(&priv->lock, flags);
@@ -448,19 +577,13 @@ static void pl2303_set_termios(struct tty_struct *tty,
spin_unlock_irqrestore(&priv->lock, flags);
}
- memset(buf, 0, 7);
- i = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
- GET_LINE_REQUEST, GET_LINE_REQUEST_TYPE,
- 0, 0, buf, 7, 100);
- dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf);
-
if (C_CRTSCTS(tty)) {
- if (spriv->type == HX)
- pl2303_vendor_write(0x0, 0x61, serial);
+ if (spriv->quirks & PL2303_QUIRK_LEGACY)
+ pl2303_vendor_write(serial, 0x0, 0x41);
else
- pl2303_vendor_write(0x0, 0x41, serial);
+ pl2303_vendor_write(serial, 0x0, 0x61);
} else {
- pl2303_vendor_write(0x0, 0x0, serial);
+ pl2303_vendor_write(serial, 0x0, 0x0);
}
kfree(buf);
@@ -473,13 +596,13 @@ static void pl2303_dtr_rts(struct usb_serial_port *port, int on)
u8 control;
spin_lock_irqsave(&priv->lock, flags);
- /* Change DTR and RTS */
if (on)
priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
else
priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
control = priv->line_control;
spin_unlock_irqrestore(&priv->lock, flags);
+
pl2303_set_control_lines(port, control);
}
@@ -495,13 +618,13 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
int result;
- if (spriv->type != HX) {
+ if (spriv->quirks & PL2303_QUIRK_LEGACY) {
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
} else {
/* reset upstream data pipes */
- pl2303_vendor_write(8, 0, serial);
- pl2303_vendor_write(9, 0, serial);
+ pl2303_vendor_write(serial, 8, 0);
+ pl2303_vendor_write(serial, 9, 0);
}
/* Setup termios */
@@ -510,8 +633,8 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
- dev_err(&port->dev, "%s - failed submitting interrupt urb,"
- " error %d\n", __func__, result);
+ dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
+ result);
return result;
}
@@ -581,48 +704,10 @@ static int pl2303_tiocmget(struct tty_struct *tty)
static int pl2303_carrier_raised(struct usb_serial_port *port)
{
struct pl2303_private *priv = usb_get_serial_port_data(port);
+
if (priv->line_status & UART_DCD)
return 1;
- return 0;
-}
-static int pl2303_tiocmiwait(struct tty_struct *tty, unsigned long arg)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct pl2303_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
- unsigned int prevstatus;
- unsigned int status;
- unsigned int changed;
-
- spin_lock_irqsave(&priv->lock, flags);
- prevstatus = priv->line_status;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- while (1) {
- interruptible_sleep_on(&port->port.delta_msr_wait);
- /* see if a signal did it */
- if (signal_pending(current))
- return -ERESTARTSYS;
-
- if (port->serial->disconnected)
- return -EIO;
-
- spin_lock_irqsave(&priv->lock, flags);
- status = priv->line_status;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- changed = prevstatus ^ status;
-
- if (((arg & TIOCM_RNG) && (changed & UART_RING)) ||
- ((arg & TIOCM_DSR) && (changed & UART_DSR)) ||
- ((arg & TIOCM_CD) && (changed & UART_DCD)) ||
- ((arg & TIOCM_CTS) && (changed & UART_CTS))) {
- return 0;
- }
- prevstatus = status;
- }
- /* NOTREACHED */
return 0;
}
@@ -632,8 +717,6 @@ static int pl2303_ioctl(struct tty_struct *tty,
struct serial_struct ser;
struct usb_serial_port *port = tty->driver_data;
- dev_dbg(&port->dev, "%s cmd = 0x%04x\n", __func__, cmd);
-
switch (cmd) {
case TIOCGSERIAL:
memset(&ser, 0, sizeof ser);
@@ -647,9 +730,9 @@ static int pl2303_ioctl(struct tty_struct *tty,
return 0;
default:
- dev_dbg(&port->dev, "%s not supported = 0x%04x\n", __func__, cmd);
break;
}
+
return -ENOIOCTLCMD;
}
@@ -664,6 +747,7 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
state = BREAK_OFF;
else
state = BREAK_ON;
+
dev_dbg(&port->dev, "%s - turning break %s\n", __func__,
state == BREAK_OFF ? "off" : "on");
@@ -678,48 +762,51 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
unsigned char *data,
unsigned int actual_length)
{
-
+ struct usb_serial *serial = port->serial;
+ struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
struct pl2303_private *priv = usb_get_serial_port_data(port);
struct tty_struct *tty;
unsigned long flags;
- u8 status_idx = UART_STATE;
- u8 length = UART_STATE + 1;
- u8 prev_line_status;
- u16 idv, idp;
-
- idv = le16_to_cpu(port->serial->dev->descriptor.idVendor);
- idp = le16_to_cpu(port->serial->dev->descriptor.idProduct);
-
+ unsigned int status_idx = UART_STATE_INDEX;
+ u8 status;
+ u8 delta;
- if (idv == SIEMENS_VENDOR_ID) {
- if (idp == SIEMENS_PRODUCT_ID_X65 ||
- idp == SIEMENS_PRODUCT_ID_SX1 ||
- idp == SIEMENS_PRODUCT_ID_X75) {
+ if (spriv->quirks & PL2303_QUIRK_UART_STATE_IDX0)
+ status_idx = 0;
- length = 1;
- status_idx = 0;
- }
- }
-
- if (actual_length < length)
+ if (actual_length < status_idx + 1)
return;
+ status = data[status_idx];
+
/* Save off the uart status for others to look at */
spin_lock_irqsave(&priv->lock, flags);
- prev_line_status = priv->line_status;
- priv->line_status = data[status_idx];
+ delta = priv->line_status ^ status;
+ priv->line_status = status;
spin_unlock_irqrestore(&priv->lock, flags);
- if (priv->line_status & UART_BREAK_ERROR)
+
+ if (status & UART_BREAK_ERROR)
usb_serial_handle_break(port);
- wake_up_interruptible(&port->port.delta_msr_wait);
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
- if ((priv->line_status ^ prev_line_status) & UART_DCD)
- usb_serial_handle_dcd_change(port, tty,
- priv->line_status & UART_DCD);
- tty_kref_put(tty);
+ if (delta & UART_STATE_MSR_MASK) {
+ if (delta & UART_CTS)
+ port->icount.cts++;
+ if (delta & UART_DSR)
+ port->icount.dsr++;
+ if (delta & UART_RING)
+ port->icount.rng++;
+ if (delta & UART_DCD) {
+ port->icount.dcd++;
+ tty = tty_port_tty_get(&port->port);
+ if (tty) {
+ usb_serial_handle_dcd_change(port, tty,
+ status & UART_DCD);
+ tty_kref_put(tty);
+ }
+ }
+
+ wake_up_interruptible(&port->port.delta_msr_wait);
+ }
}
static void pl2303_read_int_callback(struct urb *urb)
@@ -754,10 +841,11 @@ static void pl2303_read_int_callback(struct urb *urb)
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval)
+ if (retval) {
dev_err(&port->dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, retval);
+ }
}
static void pl2303_process_read_urb(struct urb *urb)
@@ -775,13 +863,14 @@ static void pl2303_process_read_urb(struct urb *urb)
line_status = priv->line_status;
priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
spin_unlock_irqrestore(&priv->lock, flags);
- wake_up_interruptible(&port->port.delta_msr_wait);
if (!urb->actual_length)
return;
- /* break takes precedence over parity, */
- /* which takes precedence over framing errors */
+ /*
+ * Break takes precedence over parity, which takes precedence over
+ * framing errors.
+ */
if (line_status & UART_BREAK_ERROR)
tty_flag = TTY_BREAK;
else if (line_status & UART_PARITY_ERROR)
@@ -809,7 +898,6 @@ static void pl2303_process_read_urb(struct urb *urb)
tty_flip_buffer_push(&port->port);
}
-/* All of the device info needed for the PL2303 SIO serial converter */
static struct usb_serial_driver pl2303_device = {
.driver = {
.owner = THIS_MODULE,
@@ -821,16 +909,17 @@ static struct usb_serial_driver pl2303_device = {
.bulk_out_size = 256,
.open = pl2303_open,
.close = pl2303_close,
- .dtr_rts = pl2303_dtr_rts,
+ .dtr_rts = pl2303_dtr_rts,
.carrier_raised = pl2303_carrier_raised,
.ioctl = pl2303_ioctl,
.break_ctl = pl2303_break_ctl,
.set_termios = pl2303_set_termios,
.tiocmget = pl2303_tiocmget,
.tiocmset = pl2303_tiocmset,
- .tiocmiwait = pl2303_tiocmiwait,
+ .tiocmiwait = usb_serial_generic_tiocmiwait,
.process_read_urb = pl2303_process_read_urb,
.read_int_callback = pl2303_read_int_callback,
+ .probe = pl2303_probe,
.attach = pl2303_startup,
.release = pl2303_release,
.port_probe = pl2303_port_probe,
@@ -843,5 +932,5 @@ static struct usb_serial_driver * const serial_drivers[] = {
module_usb_serial_driver(serial_drivers, id_table);
-MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_DESCRIPTION("Prolific PL2303 USB to serial adaptor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
index 31f81c3c15eb..6e9f8af96959 100644
--- a/drivers/usb/serial/qcaux.c
+++ b/drivers/usb/serial/qcaux.c
@@ -16,7 +16,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/usb.h>
@@ -54,7 +53,7 @@
#define SAMSUNG_VENDOR_ID 0x04e8
#define SAMSUNG_PRODUCT_U520 0x6640 /* SCH-U520 */
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM150, 0xff, 0x00, 0x00) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index c65437cfd4a2..968a40201e5f 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -139,6 +139,9 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index a24d59ae4032..7725ed261ed6 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -15,7 +15,6 @@
#include <asm/unaligned.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -676,10 +675,8 @@ static int qt2_setup_urbs(struct usb_serial *serial)
serial_priv = usb_get_serial_data(serial);
serial_priv->read_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!serial_priv->read_urb) {
- dev_err(&serial->dev->dev, "No free urbs available\n");
+ if (!serial_priv->read_urb)
return -ENOMEM;
- }
usb_fill_bulk_urb(serial_priv->read_urb, serial->dev,
usb_rcvbulkpipe(serial->dev,
@@ -715,10 +712,8 @@ static int qt2_attach(struct usb_serial *serial)
}
serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
- if (!serial_priv) {
- dev_err(&serial->dev->dev, "%s - Out of memory\n", __func__);
+ if (!serial_priv)
return -ENOMEM;
- }
serial_priv->read_buffer = kmalloc(QT2_READ_BUFFER_SIZE, GFP_KERNEL);
if (!serial_priv->read_buffer) {
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index ba895989d8c4..b2dff0f14743 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -67,7 +67,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/gfp.h>
-#include <linux/init.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
@@ -125,7 +124,7 @@ MODULE_PARM_DESC(padded, "Pad to full wMaxPacketSize On/Off");
.bInterfaceClass = (ic), \
.bInterfaceSubClass = (isc),
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{MY_USB_DEVICE(0x49f, 0xffff, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Itsy */
{MY_USB_DEVICE(0x3f0, 0x2101, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Calypso */
{MY_USB_DEVICE(0x4dd, 0x8001, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Iris */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index de958c5b52e3..a9eb6221a815 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -497,14 +497,12 @@ static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
buffer = kmalloc(writesize, GFP_ATOMIC);
if (!buffer) {
- dev_err(&port->dev, "out of memory\n");
retval = -ENOMEM;
goto error_no_buffer;
}
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
- dev_err(&port->dev, "no more free urbs\n");
retval = -ENOMEM;
goto error_no_urb;
}
@@ -736,11 +734,8 @@ static struct urb *sierra_setup_urb(struct usb_serial *serial, int endpoint,
return NULL;
urb = usb_alloc_urb(0, mem_flags);
- if (urb == NULL) {
- dev_dbg(&serial->dev->dev, "%s: alloc for endpoint %d failed\n",
- __func__, endpoint);
+ if (!urb)
return NULL;
- }
buf = kmalloc(len, mem_flags);
if (buf) {
@@ -752,9 +747,6 @@ static struct urb *sierra_setup_urb(struct usb_serial *serial, int endpoint,
dev_dbg(&serial->dev->dev, "%s %c u : %p d:%p\n", __func__,
dir == USB_DIR_IN ? 'i' : 'o', urb, buf);
} else {
- dev_dbg(&serial->dev->dev, "%s %c u:%p d:%p\n", __func__,
- dir == USB_DIR_IN ? 'i' : 'o', urb, buf);
-
sierra_release_urb(urb);
urb = NULL;
}
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 5b793c352267..4ec04f73c800 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -16,7 +16,6 @@
*/
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index e5750be49054..a7fe664b6b7d 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -6,7 +6,6 @@
*/
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -342,8 +341,6 @@ static int ssu100_ioctl(struct tty_struct *tty,
{
struct usb_serial_port *port = tty->driver_data;
- dev_dbg(&port->dev, "%s cmd 0x%04x\n", __func__, cmd);
-
switch (cmd) {
case TIOCGSERIAL:
return get_serial_info(port,
@@ -352,8 +349,6 @@ static int ssu100_ioctl(struct tty_struct *tty,
break;
}
- dev_dbg(&port->dev, "%s arg not supported\n", __func__);
-
return -ENOIOCTLCMD;
}
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 9b1648945e7a..9fa7dd413e83 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -11,7 +11,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/tty_driver.h>
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index c9a35697ebe9..ec7cea585663 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -21,7 +21,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/firmware.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -143,7 +142,7 @@ static int ti_download_firmware(struct ti_device *tdev);
static int closing_wait = TI_DEFAULT_CLOSING_WAIT;
/* supported devices */
-static struct usb_device_id ti_id_table_3410[] = {
+static const struct usb_device_id ti_id_table_3410[] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -163,7 +162,7 @@ static struct usb_device_id ti_id_table_3410[] = {
{ } /* terminator */
};
-static struct usb_device_id ti_id_table_5052[] = {
+static const struct usb_device_id ti_id_table_5052[] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
@@ -171,7 +170,7 @@ static struct usb_device_id ti_id_table_5052[] = {
{ } /* terminator */
};
-static struct usb_device_id ti_id_table_combined[] = {
+static const struct usb_device_id ti_id_table_combined[] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -301,10 +300,9 @@ static int ti_startup(struct usb_serial *serial)
/* create device structure */
tdev = kzalloc(sizeof(struct ti_device), GFP_KERNEL);
- if (tdev == NULL) {
- dev_err(&dev->dev, "%s - out of memory\n", __func__);
+ if (!tdev)
return -ENOMEM;
- }
+
mutex_init(&tdev->td_open_close_lock);
tdev->td_serial = serial;
usb_set_serial_data(serial, tdev);
@@ -683,8 +681,6 @@ static int ti_ioctl(struct tty_struct *tty,
struct usb_serial_port *port = tty->driver_data;
struct ti_port *tport = usb_get_serial_port_data(port);
- dev_dbg(&port->dev, "%s - cmd = 0x%04X\n", __func__, cmd);
-
if (tport == NULL)
return -ENODEV;
@@ -724,10 +720,8 @@ static void ti_set_termios(struct tty_struct *tty,
return;
config = kmalloc(sizeof(*config), GFP_KERNEL);
- if (!config) {
- dev_err(&port->dev, "%s - out of memory\n", __func__);
+ if (!config)
return;
- }
config->wFlags = 0;
@@ -1196,10 +1190,8 @@ static int ti_get_lsr(struct ti_port *tport, u8 *lsr)
size = sizeof(struct ti_port_status);
data = kmalloc(size, GFP_KERNEL);
- if (!data) {
- dev_err(&port->dev, "%s - out of memory\n", __func__);
+ if (!data)
return -ENOMEM;
- }
status = ti_command_in_sync(tdev, TI_GET_PORT_STATUS,
(__u8)(TI_UART1_PORT+port_number), 0, (__u8 *)data, size);
@@ -1399,10 +1391,8 @@ static int ti_write_byte(struct usb_serial_port *port,
size = sizeof(struct ti_write_data_bytes) + 2;
data = kmalloc(size, GFP_KERNEL);
- if (!data) {
- dev_err(&port->dev, "%s - out of memory\n", __func__);
+ if (!data)
return -ENOMEM;
- }
data->bAddrType = TI_RW_DATA_ADDR_XDATA;
data->bDataType = TI_RW_DATA_BYTE;
@@ -1518,7 +1508,6 @@ static int ti_download_firmware(struct ti_device *tdev)
status = ti_do_download(dev, pipe, buffer, fw_p->size);
kfree(buffer);
} else {
- dev_dbg(&dev->dev, "%s ENOMEM\n", __func__);
status = -ENOMEM;
}
release_firmware(fw_p);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 52eb91f2eb2a..fb79775447b0 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -15,7 +15,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/usb.h>
@@ -72,7 +71,8 @@ DEVICE(hp4x, HP4X_IDS);
/* Suunto ANT+ USB Driver */
#define SUUNTO_IDS() \
- { USB_DEVICE(0x0fcf, 0x1008) }
+ { USB_DEVICE(0x0fcf, 0x1008) }, \
+ { USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */
DEVICE(suunto, SUUNTO_IDS);
/* Siemens USB/MPI adapter */
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 6091bd5a1f4f..7c9dc28640bb 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -405,7 +405,7 @@ static int serial_ioctl(struct tty_struct *tty,
struct usb_serial_port *port = tty->driver_data;
int retval = -ENOIOCTLCMD;
- dev_dbg(tty->dev, "%s - cmd 0x%.4x\n", __func__, cmd);
+ dev_dbg(tty->dev, "%s - cmd 0x%04x\n", __func__, cmd);
switch (cmd) {
case TIOCMIWAIT:
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 5760f97ee508..ca2fa5bbe17e 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -10,7 +10,6 @@
#include <linux/gfp.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/usb.h>
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 85365784040b..640fe0173236 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -447,12 +447,8 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port,
struct urb *urb;
urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
- if (urb == NULL) {
- dev_dbg(&serial->interface->dev,
- "%s: alloc for endpoint %d failed.\n", __func__,
- endpoint);
+ if (!urb)
return NULL;
- }
/* Fill URB using supplied data. */
usb_fill_bulk_urb(urb, serial->dev,
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 9910aa2edf4b..bf2bd40e5f2a 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -51,7 +50,7 @@ static int palm_os_3_probe(struct usb_serial *serial,
static int palm_os_4_probe(struct usb_serial *serial,
const struct usb_device_id *id);
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_VISOR_ID),
.driver_info = (kernel_ulong_t)&palm_os_3_probe },
{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO_ID),
@@ -113,18 +112,18 @@ static struct usb_device_id id_table [] = {
{ } /* Terminating entry */
};
-static struct usb_device_id clie_id_5_table [] = {
+static const struct usb_device_id clie_id_5_table[] = {
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_UX50_ID),
.driver_info = (kernel_ulong_t)&palm_os_4_probe },
{ } /* Terminating entry */
};
-static struct usb_device_id clie_id_3_5_table [] = {
+static const struct usb_device_id clie_id_3_5_table[] = {
{ USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_3_5_ID) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_VISOR_ID) },
{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO_ID) },
{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO600_ID) },
@@ -324,11 +323,8 @@ static int palm_os_3_probe(struct usb_serial *serial,
int num_ports = 0;
transfer_buffer = kmalloc(sizeof(*connection_info), GFP_KERNEL);
- if (!transfer_buffer) {
- dev_err(dev, "%s - kmalloc(%Zd) failed.\n", __func__,
- sizeof(*connection_info));
+ if (!transfer_buffer)
return -ENOMEM;
- }
/* send a get connection info request */
retval = usb_control_msg(serial->dev,
@@ -419,11 +415,8 @@ static int palm_os_4_probe(struct usb_serial *serial,
int retval;
transfer_buffer = kmalloc(sizeof(*connection_info), GFP_KERNEL);
- if (!transfer_buffer) {
- dev_err(dev, "%s - kmalloc(%Zd) failed.\n", __func__,
- sizeof(*connection_info));
+ if (!transfer_buffer)
return -ENOMEM;
- }
retval = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
diff --git a/drivers/usb/serial/visor.h b/drivers/usb/serial/visor.h
index 88db4d06aefb..4c456dd69ce5 100644
--- a/drivers/usb/serial/visor.h
+++ b/drivers/usb/serial/visor.h
@@ -136,7 +136,7 @@ struct visor_connection_info {
* connections.end_point_info is non-zero. If value is 0, then
* connections.port contains the endpoint number, which is the same for in
* and out.
- * @port_function_id: contains the creator id of the applicaton that opened
+ * @port_function_id: contains the creator id of the application that opened
* this connection.
* @port: contains the in/out endpoint number. Is 0 if in and out endpoint
* numbers are different.
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 36a7740e827c..e62f2dff8b7d 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -288,12 +287,8 @@ static int whiteheat_attach(struct usb_serial *serial)
command_info = kmalloc(sizeof(struct whiteheat_command_private),
GFP_KERNEL);
- if (command_info == NULL) {
- dev_err(&serial->dev->dev,
- "%s: Out of memory for port structures\n",
- serial->type->description);
+ if (!command_info)
goto no_command_private;
- }
mutex_init(&command_info->mutex);
command_info->port_running = 0;
@@ -455,8 +450,6 @@ static int whiteheat_ioctl(struct tty_struct *tty,
struct serial_struct serstruct;
void __user *user_arg = (void __user *)arg;
- dev_dbg(&port->dev, "%s - cmd 0x%.4x\n", __func__, cmd);
-
switch (cmd) {
case TIOCGSERIAL:
memset(&serstruct, 0, sizeof(serstruct));
diff --git a/drivers/usb/serial/wishbone-serial.c b/drivers/usb/serial/wishbone-serial.c
index 100573c6f19e..4fed4a0bd702 100644
--- a/drivers/usb/serial/wishbone-serial.c
+++ b/drivers/usb/serial/wishbone-serial.c
@@ -11,7 +11,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/usb.h>
diff --git a/drivers/usb/serial/xsens_mt.c b/drivers/usb/serial/xsens_mt.c
index 1d5798d891bc..4841fb57400c 100644
--- a/drivers/usb/serial/xsens_mt.c
+++ b/drivers/usb/serial/xsens_mt.c
@@ -9,7 +9,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/usb.h>
diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c
index fca4c752a4ed..e40ab739c4a6 100644
--- a/drivers/usb/serial/zte_ev.c
+++ b/drivers/usb/serial/zte_ev.c
@@ -13,7 +13,6 @@
* show the commands used to talk to the device, but I am not sure.
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -53,7 +52,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
- /* send 2st cmd and recieve data */
+ /* send 2st cmd and receive data */
/*
* 16.0 CTL a1 21 00 00 00 00 07 00 CLASS 25.1.0(5)
* 16.0 DI 00 96 00 00 00 00 08
@@ -65,7 +64,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
- /* send 3 cmd */
+ /* send 3rd cmd */
/*
* 16.0 CTL 21 20 00 00 00 00 07 00 CLASS 30.1.0
* 16.0 DO 80 25 00 00 00 00 08 .%..... 30.2.0
@@ -84,7 +83,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
- /* send 4 cmd */
+ /* send 4th cmd */
/*
* 16.0 CTL 21 22 03 00 00 00 00 00
*/
@@ -95,7 +94,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
- /* send 5 cmd */
+ /* send 5th cmd */
/*
* 16.0 CTL a1 21 00 00 00 00 07 00 CLASS 33.1.0
* 16.0 DI 80 25 00 00 00 00 08
@@ -107,7 +106,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
- /* send 6 cmd */
+ /* send 6th cmd */
/*
* 16.0 CTL 21 20 00 00 00 00 07 00 CLASS 34.1.0
* 16.0 DO 80 25 00 00 00 00 08
@@ -195,7 +194,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
- /* send 4 cmd */
+ /* send 4th cmd */
/*
* 16.0 CTL 21 20 00 00 00 00 07 00 CLASS 30.1.0
* 16.0 DO 00 c2 01 00 00 00 08 .%..... 30.2.0
@@ -214,7 +213,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
- /* send 5 cmd */
+ /* send 5th cmd */
/*
* 16.0 CTL 21 22 03 00 00 00 00 00
*/
@@ -225,7 +224,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
- /* send 6 cmd */
+ /* send 6th cmd */
/*
* 16.0 CTL a1 21 00 00 00 00 07 00 CLASS 33.1.0
* 16.0 DI 00 c2 01 00 00 00 08
@@ -237,7 +236,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
- /* send 7 cmd */
+ /* send 7th cmd */
/*
* 16.0 CTL 21 20 00 00 00 00 07 00 CLASS 354.1.0
* 16.0 DO 00 c2 01 00 00 00 08 ....... 354.2.0
@@ -256,7 +255,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
- /* send 8 cmd */
+ /* send 8th cmd */
/*
* 16.0 CTL 21 22 03 00 00 00 00 00
*/
@@ -281,8 +280,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x19d2, 0xfffd) },
{ USB_DEVICE(0x19d2, 0xfffc) },
{ USB_DEVICE(0x19d2, 0xfffb) },
- /* AC2726, AC8710_V3 */
- { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) },
+ /* AC8710_V3 */
{ USB_DEVICE(0x19d2, 0xfff6) },
{ USB_DEVICE(0x19d2, 0xfff7) },
{ USB_DEVICE(0x19d2, 0xfff8) },
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 8470e1b114f2..1dd0604d1911 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -18,7 +18,9 @@ config USB_STORAGE
This option depends on 'SCSI' support being enabled, but you
probably also need 'SCSI device support: SCSI disk support'
- (BLK_DEV_SD) for most USB storage devices.
+ (BLK_DEV_SD) for most USB storage devices. Some devices also
+ will require 'Probe all LUNs on each SCSI device'
+ (SCSI_MULTI_LUN).
To compile this driver as a module, choose M here: the
module will be called usb-storage.
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 26964895c88b..74e2aa23b045 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/input.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb/input.h>
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index 5dfb4c36a1b0..12e3c2fac642 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -135,69 +135,42 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
unsigned int buflen, struct scsi_cmnd *srb, struct scatterlist **sgptr,
unsigned int *offset, enum xfer_buf_dir dir)
{
- unsigned int cnt;
+ unsigned int cnt = 0;
struct scatterlist *sg = *sgptr;
+ struct sg_mapping_iter miter;
+ unsigned int nents = scsi_sg_count(srb);
- /* We have to go through the list one entry
- * at a time. Each s-g entry contains some number of pages, and
- * each page has to be kmap()'ed separately. If the page is already
- * in kernel-addressable memory then kmap() will return its address.
- * If the page is not directly accessible -- such as a user buffer
- * located in high memory -- then kmap() will map it to a temporary
- * position in the kernel's virtual address space.
- */
-
- if (!sg)
+ if (sg)
+ nents = sg_nents(sg);
+ else
sg = scsi_sglist(srb);
- /* This loop handles a single s-g list entry, which may
- * include multiple pages. Find the initial page structure
- * and the starting offset within the page, and update
- * the *offset and **sgptr values for the next loop.
- */
- cnt = 0;
- while (cnt < buflen && sg) {
- struct page *page = sg_page(sg) +
- ((sg->offset + *offset) >> PAGE_SHIFT);
- unsigned int poff = (sg->offset + *offset) & (PAGE_SIZE-1);
- unsigned int sglen = sg->length - *offset;
-
- if (sglen > buflen - cnt) {
-
- /* Transfer ends within this s-g entry */
- sglen = buflen - cnt;
- *offset += sglen;
- } else {
+ sg_miter_start(&miter, sg, nents, dir == FROM_XFER_BUF ?
+ SG_MITER_FROM_SG: SG_MITER_TO_SG);
- /* Transfer continues to next s-g entry */
- *offset = 0;
- sg = sg_next(sg);
- }
+ if (!sg_miter_skip(&miter, *offset))
+ return cnt;
+
+ while (sg_miter_next(&miter) && cnt < buflen) {
+ unsigned int len = min_t(unsigned int, miter.length,
+ buflen - cnt);
+
+ if (dir == FROM_XFER_BUF)
+ memcpy(buffer + cnt, miter.addr, len);
+ else
+ memcpy(miter.addr, buffer + cnt, len);
- /* Transfer the data for all the pages in this
- * s-g entry. For each page: call kmap(), do the
- * transfer, and call kunmap() immediately after. */
- while (sglen > 0) {
- unsigned int plen = min(sglen, (unsigned int)
- PAGE_SIZE - poff);
- unsigned char *ptr = kmap(page);
-
- if (dir == TO_XFER_BUF)
- memcpy(ptr + poff, buffer + cnt, plen);
- else
- memcpy(buffer + cnt, ptr + poff, plen);
- kunmap(page);
-
- /* Start at the beginning of the next page */
- poff = 0;
- ++page;
- cnt += plen;
- sglen -= plen;
+ if (*offset + len < miter.piter.sg->length) {
+ *offset += len;
+ *sgptr = miter.piter.sg;
+ } else {
+ *offset = 0;
+ *sgptr = sg_next(miter.piter.sg);
}
+ cnt += len;
}
- *sgptr = sg;
+ sg_miter_stop(&miter);
- /* Return the amount actually transferred */
return cnt;
}
EXPORT_SYMBOL_GPL(usb_stor_access_xfer_buf);
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 18509e6c21ab..9d38ddc8da49 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -78,6 +78,8 @@ static const char* host_info(struct Scsi_Host *host)
static int slave_alloc (struct scsi_device *sdev)
{
+ struct us_data *us = host_to_us(sdev->host);
+
/*
* Set the INQUIRY transfer length to 36. We don't use any of
* the extra data and many devices choke if asked for more or
@@ -102,6 +104,10 @@ static int slave_alloc (struct scsi_device *sdev)
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
+ /* Tell the SCSI layer if we know there is more than one LUN */
+ if (us->protocol == USB_PR_BULK && us->max_lun > 0)
+ sdev->sdev_bflags |= BLIST_FORCELUN;
+
return 0;
}
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index 65a6a75066a8..82e8ed0324e3 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
"Cypress ISD-300LP",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219,
+UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
"Super Top",
"USB 2.0 SATA BRIDGE",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index de32cfa5bfa6..adbeb255616a 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -234,6 +234,13 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64 ),
+/* Patch submitted by Mikhail Zolotaryov <lebon@lebon.org.ua> */
+UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110,
+ "Nokia",
+ "502",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 ),
+
#ifdef NO_SDDR09
UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100,
"Microtech",
@@ -1448,6 +1455,13 @@ UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
+/* Reported by Moritz Moeller-Herrmann <moritz-kernel@moeller-herrmann.de> */
+UNUSUAL_DEV( 0x0fca, 0x8004, 0x0201, 0x0201,
+ "Research In Motion",
+ "BlackBerry Bold 9000",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 ),
+
/* Reported by Michael Stattmann <michael@stattmann.com> */
UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
"Sony Ericsson",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 5c4fe0749af1..1c0b89f2a138 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -53,7 +53,6 @@
#include <linux/errno.h>
#include <linux/freezer.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index ff97652343a3..545d09b8081d 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
index f06ed82e63d1..da1b872918b5 100644
--- a/drivers/usb/wusbcore/cbaf.c
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -144,7 +144,7 @@ static int cbaf_check(struct cbaf *cbaf)
CBAF_REQ_GET_ASSOCIATION_INFORMATION,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- cbaf->buffer, cbaf->buffer_size, 1000 /* FIXME: arbitrary */);
+ cbaf->buffer, cbaf->buffer_size, USB_CTRL_GET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Cannot get available association types: %d\n",
result);
@@ -184,7 +184,7 @@ static int cbaf_check(struct cbaf *cbaf)
assoc_request = itr;
if (top - itr < sizeof(*assoc_request)) {
- dev_err(dev, "Not enough data to decode associaton "
+ dev_err(dev, "Not enough data to decode association "
"request (%zu vs %zu bytes needed)\n",
top - itr, sizeof(*assoc_request));
break;
@@ -235,7 +235,7 @@ static int cbaf_check(struct cbaf *cbaf)
static const struct wusb_cbaf_host_info cbaf_host_info_defaults = {
.AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
- .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
+ .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
.AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
.AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_RETRIEVE_HOST_INFO),
.CHID_hdr = WUSB_AR_CHID,
@@ -260,12 +260,13 @@ static int cbaf_send_host_info(struct cbaf *cbaf)
hi->HostFriendlyName_hdr.len = cpu_to_le16(name_len);
hi_size = sizeof(*hi) + name_len;
- return usb_control_msg(cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0),
+ return usb_control_msg(cbaf->usb_dev,
+ usb_sndctrlpipe(cbaf->usb_dev, 0),
CBAF_REQ_SET_ASSOCIATION_RESPONSE,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0x0101,
cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- hi, hi_size, 1000 /* FIXME: arbitrary */);
+ hi, hi_size, USB_CTRL_SET_TIMEOUT);
}
/*
@@ -288,9 +289,10 @@ static int cbaf_cdid_get(struct cbaf *cbaf)
CBAF_REQ_GET_ASSOCIATION_REQUEST,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0x0200, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- di, cbaf->buffer_size, 1000 /* FIXME: arbitrary */);
+ di, cbaf->buffer_size, USB_CTRL_GET_TIMEOUT);
if (result < 0) {
- dev_err(dev, "Cannot request device information: %d\n", result);
+ dev_err(dev, "Cannot request device information: %d\n",
+ result);
return result;
}
@@ -491,11 +493,11 @@ static DEVICE_ATTR(wusb_device_name, 0600, cbaf_wusb_device_name_show, NULL);
static const struct wusb_cbaf_cc_data cbaf_cc_data_defaults = {
.AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
- .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
+ .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
.AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
.AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_ASSOCIATE),
.Length_hdr = WUSB_AR_Length,
- .Length = cpu_to_le32(sizeof(struct wusb_cbaf_cc_data)),
+ .Length = cpu_to_le32(sizeof(struct wusb_cbaf_cc_data)),
.ConnectionContext_hdr = WUSB_AR_ConnectionContext,
.BandGroups_hdr = WUSB_AR_BandGroups,
};
@@ -536,7 +538,7 @@ static int cbaf_cc_upload(struct cbaf *cbaf)
CBAF_REQ_SET_ASSOCIATION_RESPONSE,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0x0201, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- ccd, sizeof(*ccd), 1000 /* FIXME: arbitrary */);
+ ccd, sizeof(*ccd), USB_CTRL_SET_TIMEOUT);
return result;
}
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index 7e4bf95f8f7b..9a95b2dc6d1b 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -87,7 +87,7 @@ struct aes_ccm_block {
* B1 contains l(a), the MAC header, the encryption offset and padding.
*
* If EO is nonzero, additional blocks are built from payload bytes
- * until EO is exahusted (FIXME: padding to 16 bytes, I guess). The
+ * until EO is exhausted (FIXME: padding to 16 bytes, I guess). The
* padding is not xmitted.
*/
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index f14e7929ba22..3b959e83b28e 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -265,9 +265,9 @@ static void wusbhc_devconnect_acked_work(struct work_struct *work)
* Addresses: because WUSB hosts have no downstream hubs, we can do a
* 1:1 mapping between 'port number' and device
* address. This simplifies many things, as during this
- * initial connect phase the USB stack has no knoledge of
+ * initial connect phase the USB stack has no knowledge of
* the device and hasn't assigned an address yet--we know
- * USB's choose_address() will use the same euristics we
+ * USB's choose_address() will use the same heuristics we
* use here, so we can assume which address will be assigned.
*
* USB stack always assigns address 1 to the root hub, so
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c
index b71760c8d3ad..44741267c917 100644
--- a/drivers/usb/wusbcore/mmc.c
+++ b/drivers/usb/wusbcore/mmc.c
@@ -206,13 +206,15 @@ int wusbhc_start(struct wusbhc *wusbhc)
result = wusbhc_devconnect_start(wusbhc);
if (result < 0) {
- dev_err(dev, "error enabling device connections: %d\n", result);
+ dev_err(dev, "error enabling device connections: %d\n",
+ result);
goto error_devconnect_start;
}
result = wusbhc_sec_start(wusbhc);
if (result < 0) {
- dev_err(dev, "error starting security in the HC: %d\n", result);
+ dev_err(dev, "error starting security in the HC: %d\n",
+ result);
goto error_sec_start;
}
@@ -284,7 +286,8 @@ int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
wusbhc->uwb_rc = uwb_rc_get_by_grandpa(wusbhc->dev->parent);
if (wusbhc->uwb_rc == NULL) {
result = -ENODEV;
- dev_err(wusbhc->dev, "Cannot get associated UWB Host Controller\n");
+ dev_err(wusbhc->dev,
+ "Cannot get associated UWB Host Controller\n");
goto error_rc_get;
}
diff --git a/drivers/usb/wusbcore/pal.c b/drivers/usb/wusbcore/pal.c
index 59e100c2eb50..090f27371a8f 100644
--- a/drivers/usb/wusbcore/pal.c
+++ b/drivers/usb/wusbcore/pal.c
@@ -22,6 +22,7 @@ static void wusbhc_channel_changed(struct uwb_pal *pal, int channel)
{
struct wusbhc *wusbhc = container_of(pal, struct wusbhc, pal);
+ dev_dbg(wusbhc->dev, "%s: channel = %d\n", __func__, channel);
if (channel < 0)
wusbhc_stop(wusbhc);
else
diff --git a/drivers/usb/wusbcore/reservation.c b/drivers/usb/wusbcore/reservation.c
index ead79f793927..d5efd0f07d2b 100644
--- a/drivers/usb/wusbcore/reservation.c
+++ b/drivers/usb/wusbcore/reservation.c
@@ -51,6 +51,7 @@ static void wusbhc_rsv_complete_cb(struct uwb_rsv *rsv)
struct uwb_mas_bm mas;
char buf[72];
+ dev_dbg(dev, "%s: state = %d\n", __func__, rsv->state);
switch (rsv->state) {
case UWB_RSV_STATE_O_ESTABLISHED:
uwb_rsv_get_usable_mas(rsv, &mas);
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index 4c40d0dbf53d..95be9953cd47 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -33,7 +33,8 @@ static void wusbhc_gtk_rekey_work(struct work_struct *work);
int wusbhc_sec_create(struct wusbhc *wusbhc)
{
- wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data);
+ wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) +
+ sizeof(wusbhc->gtk.data);
wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY;
wusbhc->gtk.descr.bReserved = 0;
wusbhc->gtk_index = 0;
@@ -56,7 +57,7 @@ void wusbhc_sec_destroy(struct wusbhc *wusbhc)
* @wusb_dev: the device whose PTK the TKID is for
* (or NULL for a TKID for a GTK)
*
- * The generated TKID consist of two parts: the device's authenicated
+ * The generated TKID consists of two parts: the device's authenticated
* address (or 0 or a GTK); and an incrementing number. This ensures
* that TKIDs cannot be shared between devices and by the time the
* incrementing number wraps around the older TKIDs will no longer be
@@ -138,7 +139,7 @@ const char *wusb_et_name(u8 x)
case USB_ENC_TYPE_WIRED: return "wired";
case USB_ENC_TYPE_CCM_1: return "CCM-1";
case USB_ENC_TYPE_RSA_1: return "RSA-1";
- default: return "unknown";
+ default: return "unknown";
}
}
EXPORT_SYMBOL_GPL(wusb_et_name);
@@ -165,7 +166,7 @@ static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value)
result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_ENCRYPTION,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- value, 0, NULL, 0, 1000 /* FIXME: arbitrary */);
+ value, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(dev, "Can't set device's WUSB encryption to "
"%s (value %d): %d\n",
@@ -191,7 +192,7 @@ static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
USB_DT_KEY << 8 | key_index, 0,
&wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
- 1000);
+ USB_CTRL_SET_TIMEOUT);
}
@@ -222,7 +223,8 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc,
secd_size = le16_to_cpu(secd->wTotalLength);
new_secd = krealloc(secd, secd_size, GFP_KERNEL);
if (new_secd == NULL) {
- dev_err(dev, "Can't allocate space for security descriptors\n");
+ dev_err(dev,
+ "Can't allocate space for security descriptors\n");
goto out;
}
secd = new_secd;
@@ -301,8 +303,9 @@ int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
/* Set address 0 */
result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_ADDRESS, 0,
- 0, 0, NULL, 0, 1000 /* FIXME: arbitrary */);
+ USB_REQ_SET_ADDRESS,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "auth failed: can't set address 0: %d\n",
result);
@@ -316,9 +319,10 @@ int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
/* Set new (authenticated) address. */
result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_ADDRESS, 0,
- new_address, 0, NULL, 0,
- 1000 /* FIXME: arbitrary */);
+ USB_REQ_SET_ADDRESS,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ new_address, 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "auth failed: can't set address %u: %d\n",
new_address, result);
@@ -375,13 +379,13 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
hs[0].bReserved = 0;
memcpy(hs[0].CDID, &wusb_dev->cdid, sizeof(hs[0].CDID));
get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce));
- memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */
+ memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */
result = usb_control_msg(
usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_HANDSHAKE,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- 1, 0, &hs[0], sizeof(hs[0]), 1000 /* FIXME: arbitrary */);
+ 1, 0, &hs[0], sizeof(hs[0]), USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Handshake1: request failed: %d\n", result);
goto error_hs1;
@@ -392,7 +396,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
usb_dev, usb_rcvctrlpipe(usb_dev, 0),
USB_REQ_GET_HANDSHAKE,
USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- 2, 0, &hs[1], sizeof(hs[1]), 1000 /* FIXME: arbitrary */);
+ 2, 0, &hs[1], sizeof(hs[1]), USB_CTRL_GET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Handshake2: request failed: %d\n", result);
goto error_hs2;
@@ -422,7 +426,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
}
/* Setup the CCM nonce */
- memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */
+ memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */
memcpy(ccm_n.tkid, &tkid_le, sizeof(ccm_n.tkid));
ccm_n.src_addr = wusbhc->uwb_rc->uwb_dev.dev_addr;
ccm_n.dest_addr.data[0] = wusb_dev->addr;
@@ -469,7 +473,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_HANDSHAKE,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- 3, 0, &hs[2], sizeof(hs[2]), 1000 /* FIXME: arbitrary */);
+ 3, 0, &hs[2], sizeof(hs[2]), USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Handshake3: request failed: %d\n", result);
goto error_hs3;
@@ -553,11 +557,13 @@ static void wusbhc_gtk_rekey_work(struct work_struct *work)
list_for_each_entry_safe(wusb_dev, wusb_dev_next, &rekey_list,
rekey_node) {
list_del_init(&wusb_dev->rekey_node);
- dev_dbg(&wusb_dev->usb_dev->dev, "%s: rekey device at port %d\n",
+ dev_dbg(&wusb_dev->usb_dev->dev,
+ "%s: rekey device at port %d\n",
__func__, wusb_dev->port_idx);
if (wusb_dev_set_gtk(wusbhc, wusb_dev) < 0) {
- dev_err(&wusb_dev->usb_dev->dev, "%s: rekey device at port %d failed\n",
+ dev_err(&wusb_dev->usb_dev->dev,
+ "%s: rekey device at port %d failed\n",
__func__, wusb_dev->port_idx);
}
wusb_dev_put(wusb_dev);
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
index e614f02f0cf2..a2ef84b8397e 100644
--- a/drivers/usb/wusbcore/wa-hc.h
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -36,7 +36,7 @@
*
* hcd glue with the USB API Host Controller Interface API.
*
- * nep Notification EndPoint managent: collect notifications
+ * nep Notification EndPoint management: collect notifications
* and queue them with the workqueue daemon.
*
* Handle notifications as coming from the NEP. Sends them
@@ -144,7 +144,7 @@ enum wa_quirks {
*
* @wa_descr Can be accessed without locking because it is in
* the same area where the device descriptors were
- * read, so it is guaranteed to exist umodified while
+ * read, so it is guaranteed to exist unmodified while
* the device exists.
*
* Endianess has been converted to CPU's.
@@ -167,8 +167,8 @@ enum wa_quirks {
* submitted from an atomic context).
*
* FIXME: this needs to be layered up: a wusbhc layer (for sharing
- * comonalities with WHCI), a wa layer (for sharing
- * comonalities with DWA-RC).
+ * commonalities with WHCI), a wa layer (for sharing
+ * commonalities with DWA-RC).
*/
struct wahc {
struct usb_device *usb_dev;
@@ -197,10 +197,10 @@ struct wahc {
struct mutex rpipe_mutex; /* assigning resources to endpoints */
/*
- * dti_state is used to track the state of the dti_urb. When dti_state
+ * dti_state is used to track the state of the dti_urb. When dti_state
* is WA_DTI_ISOC_PACKET_STATUS_PENDING, dti_isoc_xfer_in_progress and
- * dti_isoc_xfer_seg identify which xfer the incoming isoc packet status
- * refers to.
+ * dti_isoc_xfer_seg identify which xfer the incoming isoc packet
+ * status refers to.
*/
enum wa_dti_state dti_state;
u32 dti_isoc_xfer_in_progress;
@@ -211,7 +211,7 @@ struct wahc {
void *dti_buf;
size_t dti_buf_size;
- unsigned long dto_in_use; /* protect dto endoint serialization. */
+ unsigned long dto_in_use; /* protect dto endoint serialization */
s32 status; /* For reading status */
@@ -332,7 +332,7 @@ static inline int rpipe_avail_inc(struct wa_rpipe *rpipe)
/* Transferring data */
extern int wa_urb_enqueue(struct wahc *, struct usb_host_endpoint *,
struct urb *, gfp_t);
-extern int wa_urb_dequeue(struct wahc *, struct urb *);
+extern int wa_urb_dequeue(struct wahc *, struct urb *, int);
extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *);
@@ -345,7 +345,7 @@ extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *);
* it...no RC specific function is called...unless I miss
* something.
*
- * FIXME: has to go away in favour of an 'struct' hcd based sollution
+ * FIXME: has to go away in favour of a 'struct' hcd based solution
*/
static inline struct wahc *wa_get(struct wahc *wa)
{
@@ -366,7 +366,7 @@ static inline int __wa_feature(struct wahc *wa, unsigned op, u16 feature)
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
feature,
wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
}
@@ -400,8 +400,7 @@ s32 __wa_get_status(struct wahc *wa)
USB_REQ_GET_STATUS,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- &wa->status, sizeof(wa->status),
- 1000 /* FIXME: arbitrary */);
+ &wa->status, sizeof(wa->status), USB_CTRL_GET_TIMEOUT);
if (result >= 0)
result = wa->status;
return result;
diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c
index ada4e0870623..60a10d21947d 100644
--- a/drivers/usb/wusbcore/wa-nep.c
+++ b/drivers/usb/wusbcore/wa-nep.c
@@ -69,8 +69,8 @@ struct wa_notif_work {
* [the wuswad daemon, basically]
*
* @_nw: Pointer to a descriptor which has the pointer to the
- * @wa, the size of the buffer and the work queue
- * structure (so we can free all when done).
+ * @wa, the size of the buffer and the work queue
+ * structure (so we can free all when done).
* @returns 0 if ok, < 0 errno code on error.
*
* All notifications follow the same format; they need to start with a
@@ -93,7 +93,8 @@ static void wa_notif_dispatch(struct work_struct *ws)
{
void *itr;
u8 missing = 0;
- struct wa_notif_work *nw = container_of(ws, struct wa_notif_work, work);
+ struct wa_notif_work *nw = container_of(ws, struct wa_notif_work,
+ work);
struct wahc *wa = nw->wa;
struct wa_notif_hdr *notif_hdr;
size_t size;
@@ -271,7 +272,8 @@ int wa_nep_create(struct wahc *wa, struct usb_interface *iface)
wa->nep_buffer_size = 1024;
wa->nep_buffer = kmalloc(wa->nep_buffer_size, GFP_KERNEL);
if (wa->nep_buffer == NULL) {
- dev_err(dev, "Unable to allocate notification's read buffer\n");
+ dev_err(dev,
+ "Unable to allocate notification's read buffer\n");
goto error_nep_buffer;
}
wa->nep_urb = usb_alloc_urb(0, GFP_KERNEL);
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index b48e74cc54d7..6ca80a4efc1b 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -57,7 +57,6 @@
* urb->dev->devnum, to make sure that we always have the right
* destination address.
*/
-#include <linux/init.h>
#include <linux/atomic.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
@@ -80,7 +79,7 @@ static int __rpipe_get_descr(struct wahc *wa,
USB_REQ_GET_DESCRIPTOR,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE,
USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
- 1000 /* FIXME: arbitrary */);
+ USB_CTRL_GET_TIMEOUT);
if (result < 0) {
dev_err(dev, "rpipe %u: get descriptor failed: %d\n",
index, (int)result);
@@ -118,7 +117,7 @@ static int __rpipe_set_descr(struct wahc *wa,
USB_REQ_SET_DESCRIPTOR,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
- HZ / 10);
+ USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "rpipe %u: set descriptor failed: %d\n",
index, (int)result);
@@ -184,7 +183,7 @@ EXPORT_SYMBOL_GPL(rpipe_destroy);
/*
* Locate an idle rpipe, create an structure for it and return it
*
- * @wa is referenced and unlocked
+ * @wa is referenced and unlocked
* @crs enum rpipe_attr, required endpoint characteristics
*
* The rpipe can be used only sequentially (not in parallel).
@@ -237,7 +236,7 @@ static int __rpipe_reset(struct wahc *wa, unsigned index)
wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_RPIPE_RESET,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
- 0, index, NULL, 0, 1000 /* FIXME: arbitrary */);
+ 0, index, NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(dev, "rpipe %u: reset failed: %d\n",
index, result);
@@ -308,7 +307,7 @@ out:
/*
* Aim an rpipe to its device & endpoint destination
*
- * Make sure we change the address to unauthenticathed if the device
+ * Make sure we change the address to unauthenticated if the device
* is WUSB and it is not authenticated.
*/
static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
@@ -329,7 +328,8 @@ static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
}
unauth = usb_dev->wusb && !usb_dev->authenticated ? 0x80 : 0;
__rpipe_reset(wa, le16_to_cpu(rpipe->descr.wRPipeIndex));
- atomic_set(&rpipe->segs_available, le16_to_cpu(rpipe->descr.wRequests));
+ atomic_set(&rpipe->segs_available,
+ le16_to_cpu(rpipe->descr.wRequests));
/* FIXME: block allocation system; request with queuing and timeout */
/* FIXME: compute so seg_size > ep->maxpktsize */
rpipe->descr.wBlocks = cpu_to_le16(16); /* given */
@@ -527,7 +527,7 @@ void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep)
wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
USB_REQ_RPIPE_ABORT,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
- 0, index, NULL, 0, 1000 /* FIXME: arbitrary */);
+ 0, index, NULL, 0, USB_CTRL_SET_TIMEOUT);
rpipe_put(rpipe);
}
mutex_unlock(&wa->rpipe_mutex);
@@ -548,9 +548,8 @@ void rpipe_clear_feature_stalled(struct wahc *wa, struct usb_host_endpoint *ep)
wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
USB_REQ_CLEAR_FEATURE,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
- RPIPE_STALL, index, NULL, 0, 1000);
+ RPIPE_STALL, index, NULL, 0, USB_CTRL_SET_TIMEOUT);
}
mutex_unlock(&wa->rpipe_mutex);
}
EXPORT_SYMBOL_GPL(rpipe_clear_feature_stalled);
-
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index ed5abe87b049..3cd96e936d77 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -79,7 +79,6 @@
* availability of the different required components (blocks,
* rpipes, segment slots, etc), we go scheduling them. Painful.
*/
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/hash.h>
@@ -124,6 +123,8 @@ struct wa_seg {
u8 index; /* which segment we are */
int isoc_frame_count; /* number of isoc frames in this segment. */
int isoc_frame_offset; /* starting frame offset in the xfer URB. */
+ /* Isoc frame that the current transfer buffer corresponds to. */
+ int isoc_frame_index;
int isoc_size; /* size of all isoc frames sent by this seg. */
enum wa_seg_status status;
ssize_t result; /* bytes xfered or error */
@@ -158,8 +159,6 @@ struct wa_xfer {
unsigned is_dma:1;
size_t seg_size;
int result;
- /* Isoc frame that the current transfer buffer corresponds to. */
- int dto_isoc_frame_index;
gfp_t gfp; /* allocation mask */
@@ -282,6 +281,7 @@ static void wa_xfer_giveback(struct wa_xfer *xfer)
spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
list_del_init(&xfer->list_node);
+ usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb);
spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
/* FIXME: segmentation broken -- kills DWA */
wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
@@ -372,10 +372,10 @@ static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
seg->result);
goto out;
case WA_SEG_ABORTED:
- dev_dbg(dev, "xfer %p ID %08X#%u ABORTED: result %d\n",
- xfer, wa_xfer_id(xfer), seg->index,
- urb->status);
- xfer->result = urb->status;
+ xfer->result = seg->result;
+ dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zu(0x%08zX)\n",
+ xfer, wa_xfer_id(xfer), seg->index, seg->result,
+ seg->result);
goto out;
default:
dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
@@ -487,13 +487,14 @@ static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer,
&& ((segment_size + iso_frame_desc[index].length)
<= xfer->seg_size)) {
/*
- * For Alereon HWA devices, only include an isoc frame in a
- * segment if it is physically contiguous with the previous
+ * For Alereon HWA devices, only include an isoc frame in an
+ * out segment if it is physically contiguous with the previous
* frame. This is required because those devices expect
* the isoc frames to be sent as a single USB transaction as
* opposed to one transaction per frame with standard HWA.
*/
if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
+ && (xfer->is_inbound == 0)
&& (index > isoc_frame_offset)
&& ((iso_frame_desc[index - 1].offset +
iso_frame_desc[index - 1].length) !=
@@ -536,14 +537,8 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
result = sizeof(struct wa_xfer_bi);
break;
case USB_ENDPOINT_XFER_ISOC:
- if (usb_pipeout(urb->pipe)) {
- *pxfer_type = WA_XFER_TYPE_ISO;
- result = sizeof(struct wa_xfer_hwaiso);
- } else {
- dev_err(dev, "FIXME: ISOC IN not implemented\n");
- result = -ENOSYS;
- goto error;
- }
+ *pxfer_type = WA_XFER_TYPE_ISO;
+ result = sizeof(struct wa_xfer_hwaiso);
break;
default:
/* never happens */
@@ -554,10 +549,22 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
+ xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
+ * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
+ /* Compute the segment size and make sure it is a multiple of
+ * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
+ * a check (FIXME) */
+ if (xfer->seg_size < maxpktsize) {
+ dev_err(dev,
+ "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
+ xfer->seg_size, maxpktsize);
+ result = -EINVAL;
+ goto error;
+ }
+ xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
int index = 0;
- xfer->seg_size = maxpktsize;
xfer->segs = 0;
/*
* loop over urb->number_of_packets to determine how many
@@ -570,19 +577,6 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
++xfer->segs;
}
} else {
- xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
- * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
- /* Compute the segment size and make sure it is a multiple of
- * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
- * a check (FIXME) */
- if (xfer->seg_size < maxpktsize) {
- dev_err(dev,
- "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
- xfer->seg_size, maxpktsize);
- result = -EINVAL;
- goto error;
- }
- xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
xfer->seg_size);
if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
@@ -700,23 +694,23 @@ static void wa_seg_dto_cb(struct urb *urb)
if (usb_pipeisoc(xfer->urb->pipe)) {
/* Alereon HWA sends all isoc frames in a single transfer. */
if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
- xfer->dto_isoc_frame_index += seg->isoc_frame_count;
+ seg->isoc_frame_index += seg->isoc_frame_count;
else
- xfer->dto_isoc_frame_index += 1;
- if (xfer->dto_isoc_frame_index < seg->isoc_frame_count) {
+ seg->isoc_frame_index += 1;
+ if (seg->isoc_frame_index < seg->isoc_frame_count) {
data_send_done = 0;
holding_dto = 1; /* checked in error cases. */
/*
* if this is the last isoc frame of the segment, we
* can release DTO after sending this frame.
*/
- if ((xfer->dto_isoc_frame_index + 1) >=
+ if ((seg->isoc_frame_index + 1) >=
seg->isoc_frame_count)
release_dto = 1;
}
dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
- wa_xfer_id(xfer), seg->index,
- xfer->dto_isoc_frame_index, holding_dto, release_dto);
+ wa_xfer_id(xfer), seg->index, seg->isoc_frame_index,
+ holding_dto, release_dto);
}
spin_unlock_irqrestore(&xfer->lock, flags);
@@ -736,8 +730,7 @@ static void wa_seg_dto_cb(struct urb *urb)
* send the URB and release DTO if we no longer need it.
*/
__wa_populate_dto_urb_isoc(xfer, seg,
- seg->isoc_frame_offset +
- xfer->dto_isoc_frame_index);
+ seg->isoc_frame_offset + seg->isoc_frame_index);
/* resubmit the URB with the next isoc frame. */
result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
@@ -844,7 +837,7 @@ static void wa_seg_iso_pack_desc_cb(struct urb *urb)
wa_xfer_id(xfer), seg->index, urb->status);
if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
EDC_ERROR_TIMEFRAME)){
- dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
+ dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n");
wa_reset_all(wa);
}
if (seg->status != WA_SEG_ERROR) {
@@ -1108,7 +1101,7 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
struct wa_seg *seg;
size_t buf_itr, buf_size, buf_itr_size;
- int xfer_isoc_frame_offset = 0;
+ int isoc_frame_offset = 0;
result = -ENOMEM;
xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
@@ -1121,10 +1114,14 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
size_t iso_pkt_descr_size = 0;
int seg_isoc_frame_count = 0, seg_isoc_size = 0;
+ /*
+ * Adjust the size of the segment object to contain space for
+ * the isoc packet descriptor buffer.
+ */
if (usb_pipeisoc(xfer->urb->pipe)) {
seg_isoc_frame_count =
__wa_seg_calculate_isoc_frame_count(xfer,
- xfer_isoc_frame_offset, &seg_isoc_size);
+ isoc_frame_offset, &seg_isoc_size);
iso_pkt_descr_size =
sizeof(struct wa_xfer_packet_info_hwaiso) +
@@ -1137,15 +1134,40 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
wa_seg_init(seg);
seg->xfer = xfer;
seg->index = cnt;
- seg->isoc_frame_count = seg_isoc_frame_count;
- seg->isoc_frame_offset = xfer_isoc_frame_offset;
- seg->isoc_size = seg_isoc_size;
usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
usb_sndbulkpipe(usb_dev,
dto_epd->bEndpointAddress),
&seg->xfer_hdr, xfer_hdr_size,
wa_seg_tr_cb, seg);
buf_itr_size = min(buf_size, xfer->seg_size);
+
+ if (usb_pipeisoc(xfer->urb->pipe)) {
+ seg->isoc_frame_count = seg_isoc_frame_count;
+ seg->isoc_frame_offset = isoc_frame_offset;
+ seg->isoc_size = seg_isoc_size;
+ /* iso packet descriptor. */
+ seg->isoc_pack_desc_urb =
+ usb_alloc_urb(0, GFP_ATOMIC);
+ if (seg->isoc_pack_desc_urb == NULL)
+ goto error_iso_pack_desc_alloc;
+ /*
+ * The buffer for the isoc packet descriptor starts
+ * after the transfer request header in the
+ * segment object memory buffer.
+ */
+ usb_fill_bulk_urb(
+ seg->isoc_pack_desc_urb, usb_dev,
+ usb_sndbulkpipe(usb_dev,
+ dto_epd->bEndpointAddress),
+ (void *)(&seg->xfer_hdr) +
+ xfer_hdr_size,
+ iso_pkt_descr_size,
+ wa_seg_iso_pack_desc_cb, seg);
+
+ /* adjust starting frame offset for next seg. */
+ isoc_frame_offset += seg_isoc_frame_count;
+ }
+
if (xfer->is_inbound == 0 && buf_size > 0) {
/* outbound data. */
seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -1158,25 +1180,6 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
NULL, 0, wa_seg_dto_cb, seg);
if (usb_pipeisoc(xfer->urb->pipe)) {
- /* iso packet descriptor. */
- seg->isoc_pack_desc_urb =
- usb_alloc_urb(0, GFP_ATOMIC);
- if (seg->isoc_pack_desc_urb == NULL)
- goto error_iso_pack_desc_alloc;
- /*
- * The buffer for the isoc packet descriptor
- * after the transfer request header in the
- * segment object memory buffer.
- */
- usb_fill_bulk_urb(
- seg->isoc_pack_desc_urb, usb_dev,
- usb_sndbulkpipe(usb_dev,
- dto_epd->bEndpointAddress),
- (void *)(&seg->xfer_hdr) +
- xfer_hdr_size,
- iso_pkt_descr_size,
- wa_seg_iso_pack_desc_cb, seg);
-
/*
* Fill in the xfer buffer information for the
* first isoc frame. Subsequent frames in this
@@ -1184,9 +1187,7 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
* DTO completion routine, if needed.
*/
__wa_populate_dto_urb_isoc(xfer, seg,
- xfer_isoc_frame_offset);
- /* adjust starting frame offset for next seg. */
- xfer_isoc_frame_offset += seg_isoc_frame_count;
+ seg->isoc_frame_offset);
} else {
/* fill in the xfer buffer information. */
result = __wa_populate_dto_urb(xfer, seg,
@@ -1207,10 +1208,11 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
* Use the fact that cnt is left at were it failed. The remaining
* segments will be cleaned up by wa_xfer_destroy.
*/
-error_iso_pack_desc_alloc:
error_seg_outbound_populate:
usb_free_urb(xfer->seg[cnt]->dto_urb);
error_dto_alloc:
+ usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb);
+error_iso_pack_desc_alloc:
kfree(xfer->seg[cnt]);
xfer->seg[cnt] = NULL;
error_seg_kmalloc:
@@ -1259,8 +1261,11 @@ static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
for (cnt = 1; cnt < xfer->segs; cnt++) {
struct wa_xfer_packet_info_hwaiso *packet_desc;
struct wa_seg *seg = xfer->seg[cnt];
+ struct wa_xfer_hwaiso *xfer_iso;
xfer_hdr = &seg->xfer_hdr;
+ xfer_iso = container_of(xfer_hdr,
+ struct wa_xfer_hwaiso, hdr);
packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
/*
* Copy values from the 0th header. Segment specific
@@ -1270,6 +1275,8 @@ static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
xfer_hdr->bTransferSegment = cnt;
xfer_hdr->dwTransferLength =
cpu_to_le32(seg->isoc_size);
+ xfer_iso->dwNumOfPackets =
+ cpu_to_le32(seg->isoc_frame_count);
__wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
seg->status = WA_SEG_READY;
}
@@ -1320,32 +1327,31 @@ static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
}
/* submit the isoc packet descriptor if present. */
if (seg->isoc_pack_desc_urb) {
- struct wahc *wa = xfer->wa;
-
result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
+ seg->isoc_frame_index = 0;
if (result < 0) {
pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
__func__, xfer, seg->index, result);
goto error_iso_pack_desc_submit;
}
- xfer->dto_isoc_frame_index = 0;
- /*
- * If this segment contains more than one isoc frame, hold
- * onto the dto resource until we send all frames.
- * Only applies to non-Alereon devices.
- */
- if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
- && (seg->isoc_frame_count > 1))
- *dto_done = 0;
}
/* submit the out data if this is an out request. */
if (seg->dto_urb) {
+ struct wahc *wa = xfer->wa;
result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
if (result < 0) {
pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
__func__, xfer, seg->index, result);
goto error_dto_submit;
}
+ /*
+ * If this segment contains more than one isoc frame, hold
+ * onto the dto resource until we send all frames.
+ * Only applies to non-Alereon devices.
+ */
+ if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
+ && (seg->isoc_frame_count > 1))
+ *dto_done = 0;
}
seg->status = WA_SEG_SUBMITTED;
rpipe_avail_dec(rpipe);
@@ -1567,7 +1573,8 @@ static int wa_urb_enqueue_b(struct wa_xfer *xfer)
wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
if (wusb_dev == NULL) {
mutex_unlock(&wusbhc->mutex);
- pr_err("%s: error wusb dev gone\n", __func__);
+ dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n",
+ __func__);
goto error_dev_gone;
}
mutex_unlock(&wusbhc->mutex);
@@ -1576,18 +1583,18 @@ static int wa_urb_enqueue_b(struct wa_xfer *xfer)
xfer->wusb_dev = wusb_dev;
result = urb->status;
if (urb->status != -EINPROGRESS) {
- pr_err("%s: error_dequeued\n", __func__);
+ dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__);
goto error_dequeued;
}
result = __wa_xfer_setup(xfer, urb);
if (result < 0) {
- pr_err("%s: error_xfer_setup\n", __func__);
+ dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);
goto error_xfer_setup;
}
result = __wa_xfer_submit(xfer);
if (result < 0) {
- pr_err("%s: error_xfer_submit\n", __func__);
+ dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);
goto error_xfer_submit;
}
spin_unlock_irqrestore(&xfer->lock, flags);
@@ -1730,6 +1737,12 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
dump_stack();
}
+ spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
+ result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb);
+ spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
+ if (result < 0)
+ goto error_link_urb;
+
result = -ENOMEM;
xfer = kzalloc(sizeof(*xfer), gfp);
if (xfer == NULL)
@@ -1769,6 +1782,9 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
__func__, result);
wa_put(xfer->wa);
wa_xfer_put(xfer);
+ spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
+ usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
+ spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
return result;
}
}
@@ -1777,6 +1793,10 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
error_dequeued:
kfree(xfer);
error_kmalloc:
+ spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
+ usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
+ spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
+error_link_urb:
return result;
}
EXPORT_SYMBOL_GPL(wa_urb_enqueue);
@@ -1799,7 +1819,7 @@ EXPORT_SYMBOL_GPL(wa_urb_enqueue);
* asynch request] and then make sure we cancel each segment.
*
*/
-int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
+int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
{
unsigned long flags, flags2;
struct wa_xfer *xfer;
@@ -1807,6 +1827,14 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
struct wa_rpipe *rpipe;
unsigned cnt, done = 0, xfer_abort_pending;
unsigned rpipe_ready = 0;
+ int result;
+
+ /* check if it is safe to unlink. */
+ spin_lock_irqsave(&wa->xfer_list_lock, flags);
+ result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status);
+ spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
+ if (result)
+ return result;
xfer = urb->hcpriv;
if (xfer == NULL) {
@@ -1822,9 +1850,10 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
rpipe = xfer->ep->hcpriv;
if (rpipe == NULL) {
- pr_debug("%s: xfer id 0x%08X has no RPIPE. %s",
- __func__, wa_xfer_id(xfer),
+ pr_debug("%s: xfer %p id 0x%08X has no RPIPE. %s",
+ __func__, xfer, wa_xfer_id(xfer),
"Probably already aborted.\n" );
+ result = -ENOENT;
goto out_unlock;
}
/* Check the delayed list -> if there, release and complete */
@@ -1855,6 +1884,7 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
* segments will be completed in the DTI interrupt.
*/
seg->status = WA_SEG_ABORTED;
+ seg->result = -ENOENT;
spin_lock_irqsave(&rpipe->seg_lock, flags2);
list_del(&seg->list_node);
xfer->segs_done++;
@@ -1894,12 +1924,12 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
wa_xfer_completion(xfer);
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
- return 0;
+ return result;
out_unlock:
spin_unlock_irqrestore(&xfer->lock, flags);
out:
- return 0;
+ return result;
dequeue_delayed:
list_del_init(&xfer->list_node);
@@ -1935,7 +1965,7 @@ static int wa_xfer_status_to_errno(u8 status)
[WA_XFER_STATUS_NOT_FOUND] = 0,
[WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
[WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
- [WA_XFER_STATUS_ABORTED] = -EINTR,
+ [WA_XFER_STATUS_ABORTED] = -ENOENT,
[WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
[WA_XFER_INVALID_FORMAT] = EINVAL,
[WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
@@ -1968,7 +1998,7 @@ static int wa_xfer_status_to_errno(u8 status)
* the xfer will complete cleanly.
*/
static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
- struct wa_seg *incoming_seg)
+ struct wa_seg *incoming_seg, enum wa_seg_status status)
{
int index;
struct wa_rpipe *rpipe = xfer->ep->hcpriv;
@@ -1990,7 +2020,7 @@ static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
*/
case WA_SEG_DELAYED:
xfer->segs_done++;
- current_seg->status = incoming_seg->status;
+ current_seg->status = status;
break;
case WA_SEG_ABORTED:
break;
@@ -2003,6 +2033,77 @@ static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
}
}
+/* Populate the wa->buf_in_urb based on the current isoc transfer state. */
+static void __wa_populate_buf_in_urb_isoc(struct wahc *wa, struct wa_xfer *xfer,
+ struct wa_seg *seg, int curr_iso_frame)
+{
+ BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
+
+ /* this should always be 0 before a resubmit. */
+ wa->buf_in_urb->num_mapped_sgs = 0;
+ wa->buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
+ xfer->urb->iso_frame_desc[curr_iso_frame].offset;
+ wa->buf_in_urb->transfer_buffer_length =
+ xfer->urb->iso_frame_desc[curr_iso_frame].length;
+ wa->buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ wa->buf_in_urb->transfer_buffer = NULL;
+ wa->buf_in_urb->sg = NULL;
+ wa->buf_in_urb->num_sgs = 0;
+ wa->buf_in_urb->context = seg;
+}
+
+/* Populate the wa->buf_in_urb based on the current transfer state. */
+static int wa_populate_buf_in_urb(struct wahc *wa, struct wa_xfer *xfer,
+ unsigned int seg_idx, unsigned int bytes_transferred)
+{
+ int result = 0;
+ struct wa_seg *seg = xfer->seg[seg_idx];
+
+ BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
+ /* this should always be 0 before a resubmit. */
+ wa->buf_in_urb->num_mapped_sgs = 0;
+
+ if (xfer->is_dma) {
+ wa->buf_in_urb->transfer_dma = xfer->urb->transfer_dma
+ + (seg_idx * xfer->seg_size);
+ wa->buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ wa->buf_in_urb->transfer_buffer = NULL;
+ wa->buf_in_urb->sg = NULL;
+ wa->buf_in_urb->num_sgs = 0;
+ } else {
+ /* do buffer or SG processing. */
+ wa->buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
+
+ if (xfer->urb->transfer_buffer) {
+ wa->buf_in_urb->transfer_buffer =
+ xfer->urb->transfer_buffer
+ + (seg_idx * xfer->seg_size);
+ wa->buf_in_urb->sg = NULL;
+ wa->buf_in_urb->num_sgs = 0;
+ } else {
+ /* allocate an SG list to store seg_size bytes
+ and copy the subset of the xfer->urb->sg
+ that matches the buffer subset we are
+ about to read. */
+ wa->buf_in_urb->sg = wa_xfer_create_subset_sg(
+ xfer->urb->sg,
+ seg_idx * xfer->seg_size,
+ bytes_transferred,
+ &(wa->buf_in_urb->num_sgs));
+
+ if (!(wa->buf_in_urb->sg)) {
+ wa->buf_in_urb->num_sgs = 0;
+ result = -ENOMEM;
+ }
+ wa->buf_in_urb->transfer_buffer = NULL;
+ }
+ }
+ wa->buf_in_urb->transfer_buffer_length = bytes_transferred;
+ wa->buf_in_urb->context = seg;
+
+ return result;
+}
+
/*
* Process a xfer result completion message
*
@@ -2016,12 +2117,13 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
int result;
struct device *dev = &wa->usb_iface->dev;
unsigned long flags;
- u8 seg_idx;
+ unsigned int seg_idx;
struct wa_seg *seg;
struct wa_rpipe *rpipe;
unsigned done = 0;
u8 usb_status;
unsigned rpipe_ready = 0;
+ unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength);
spin_lock_irqsave(&xfer->lock, flags);
seg_idx = xfer_result->bTransferSegment & 0x7f;
@@ -2054,66 +2156,34 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
/* FIXME: we ignore warnings, tally them for stats */
if (usb_status & 0x40) /* Warning?... */
usb_status = 0; /* ... pass */
- if (usb_pipeisoc(xfer->urb->pipe)) {
+ /*
+ * If the last segment bit is set, complete the remaining segments.
+ * When the current segment is completed, either in wa_buf_in_cb for
+ * transfers with data or below for no data, the xfer will complete.
+ */
+ if (xfer_result->bTransferSegment & 0x80)
+ wa_complete_remaining_xfer_segs(xfer, seg, WA_SEG_DONE);
+ if (usb_pipeisoc(xfer->urb->pipe)
+ && (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) {
/* set up WA state to read the isoc packet status next. */
wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
wa->dti_isoc_xfer_seg = seg_idx;
wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
- } else if (xfer->is_inbound) { /* IN data phase: read to buffer */
+ } else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe)
+ && (bytes_transferred > 0)) {
+ /* IN data phase: read to buffer */
seg->status = WA_SEG_DTI_PENDING;
- BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
- /* this should always be 0 before a resubmit. */
- wa->buf_in_urb->num_mapped_sgs = 0;
-
- if (xfer->is_dma) {
- wa->buf_in_urb->transfer_dma =
- xfer->urb->transfer_dma
- + (seg_idx * xfer->seg_size);
- wa->buf_in_urb->transfer_flags
- |= URB_NO_TRANSFER_DMA_MAP;
- wa->buf_in_urb->transfer_buffer = NULL;
- wa->buf_in_urb->sg = NULL;
- wa->buf_in_urb->num_sgs = 0;
- } else {
- /* do buffer or SG processing. */
- wa->buf_in_urb->transfer_flags
- &= ~URB_NO_TRANSFER_DMA_MAP;
-
- if (xfer->urb->transfer_buffer) {
- wa->buf_in_urb->transfer_buffer =
- xfer->urb->transfer_buffer
- + (seg_idx * xfer->seg_size);
- wa->buf_in_urb->sg = NULL;
- wa->buf_in_urb->num_sgs = 0;
- } else {
- /* allocate an SG list to store seg_size bytes
- and copy the subset of the xfer->urb->sg
- that matches the buffer subset we are
- about to read. */
- wa->buf_in_urb->sg = wa_xfer_create_subset_sg(
- xfer->urb->sg,
- seg_idx * xfer->seg_size,
- le32_to_cpu(
- xfer_result->dwTransferLength),
- &(wa->buf_in_urb->num_sgs));
-
- if (!(wa->buf_in_urb->sg)) {
- wa->buf_in_urb->num_sgs = 0;
- goto error_sg_alloc;
- }
- wa->buf_in_urb->transfer_buffer = NULL;
- }
- }
- wa->buf_in_urb->transfer_buffer_length =
- le32_to_cpu(xfer_result->dwTransferLength);
- wa->buf_in_urb->context = seg;
+ result = wa_populate_buf_in_urb(wa, xfer, seg_idx,
+ bytes_transferred);
+ if (result < 0)
+ goto error_buf_in_populate;
result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
if (result < 0)
goto error_submit_buf_in;
} else {
- /* OUT data phase, complete it -- */
+ /* OUT data phase or no data, complete it -- */
seg->status = WA_SEG_DONE;
- seg->result = le32_to_cpu(xfer_result->dwTransferLength);
+ seg->result = bytes_transferred;
xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
done = __wa_xfer_is_done(xfer);
@@ -2137,13 +2207,13 @@ error_submit_buf_in:
seg->result = result;
kfree(wa->buf_in_urb->sg);
wa->buf_in_urb->sg = NULL;
-error_sg_alloc:
+error_buf_in_populate:
__wa_xfer_abort(xfer);
seg->status = WA_SEG_ERROR;
error_complete:
xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
- wa_complete_remaining_xfer_segs(xfer, seg);
+ wa_complete_remaining_xfer_segs(xfer, seg, seg->status);
done = __wa_xfer_is_done(xfer);
/*
* queue work item to clear STALL for control endpoints.
@@ -2172,7 +2242,7 @@ error_complete:
error_bad_seg:
spin_unlock_irqrestore(&xfer->lock, flags);
- wa_urb_dequeue(wa, xfer->urb);
+ wa_urb_dequeue(wa, xfer->urb, -ENOENT);
if (printk_ratelimit())
dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
@@ -2192,7 +2262,7 @@ segment_aborted:
*
* inbound transfers: need to schedule a buf_in_urb read
*/
-static void wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
+static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
{
struct device *dev = &wa->usb_iface->dev;
struct wa_xfer_packet_status_hwaiso *packet_status;
@@ -2201,8 +2271,8 @@ static void wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
unsigned long flags;
struct wa_seg *seg;
struct wa_rpipe *rpipe;
- unsigned done = 0;
- unsigned rpipe_ready = 0, seg_index;
+ unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index;
+ unsigned first_frame_index = 0, rpipe_ready = 0;
int expected_size;
/* We have a xfer result buffer; check it */
@@ -2238,18 +2308,48 @@ static void wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
le16_to_cpu(packet_status->wLength));
goto error_bad_seg;
}
- /* isoc packet status and lengths back xfer urb. */
+ /* write isoc packet status and lengths back to the xfer urb. */
status_array = packet_status->PacketStatus;
+ xfer->urb->start_frame =
+ wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd);
for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
- xfer->urb->iso_frame_desc[seg->index].status =
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+ const int urb_frame_index =
+ seg->isoc_frame_offset + seg_index;
+
+ iso_frame_desc[urb_frame_index].status =
wa_xfer_status_to_errno(
le16_to_cpu(status_array[seg_index].PacketStatus));
- xfer->urb->iso_frame_desc[seg->index].actual_length =
+ iso_frame_desc[urb_frame_index].actual_length =
le16_to_cpu(status_array[seg_index].PacketLength);
+ /* track the number of frames successfully transferred. */
+ if (iso_frame_desc[urb_frame_index].actual_length > 0) {
+ /* save the starting frame index for buf_in_urb. */
+ if (!data_frame_count)
+ first_frame_index = seg_index;
+ ++data_frame_count;
+ }
}
- if (!xfer->is_inbound) {
- /* OUT transfer, complete it -- */
+ if (xfer->is_inbound && data_frame_count) {
+ int result;
+
+ seg->isoc_frame_index = first_frame_index;
+ /* submit a read URB for the first frame with data. */
+ __wa_populate_buf_in_urb_isoc(wa, xfer, seg,
+ seg->isoc_frame_index + seg->isoc_frame_offset);
+
+ result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
+ result);
+ wa_reset_all(wa);
+ } else if (data_frame_count > 1)
+ /* If we need to read multiple frames, set DTI busy. */
+ dti_busy = 1;
+ } else {
+ /* OUT transfer or no more IN data, complete it -- */
seg->status = WA_SEG_DONE;
xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
@@ -2262,13 +2362,13 @@ static void wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
wa_xfer_put(xfer);
- return;
+ return dti_busy;
error_bad_seg:
spin_unlock_irqrestore(&xfer->lock, flags);
wa_xfer_put(xfer);
error_parse_buffer:
- return;
+ return dti_busy;
}
/*
@@ -2288,7 +2388,7 @@ static void wa_buf_in_cb(struct urb *urb)
struct wahc *wa;
struct device *dev;
struct wa_rpipe *rpipe;
- unsigned rpipe_ready;
+ unsigned rpipe_ready = 0, seg_index, isoc_data_frame_count = 0;
unsigned long flags;
u8 done = 0;
@@ -2296,19 +2396,61 @@ static void wa_buf_in_cb(struct urb *urb)
kfree(urb->sg);
urb->sg = NULL;
+ spin_lock_irqsave(&xfer->lock, flags);
+ wa = xfer->wa;
+ dev = &wa->usb_iface->dev;
+
+ if (usb_pipeisoc(xfer->urb->pipe)) {
+ /*
+ * Find the next isoc frame with data. Bail out after
+ * isoc_data_frame_count > 1 since there is no need to walk
+ * the entire frame array. We just need to know if
+ * isoc_data_frame_count is 0, 1, or >1.
+ */
+ seg_index = seg->isoc_frame_index + 1;
+ while ((seg_index < seg->isoc_frame_count)
+ && (isoc_data_frame_count <= 1)) {
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+ const int urb_frame_index =
+ seg->isoc_frame_offset + seg_index;
+
+ if (iso_frame_desc[urb_frame_index].actual_length > 0) {
+ /* save the index of the next frame with data */
+ if (!isoc_data_frame_count)
+ seg->isoc_frame_index = seg_index;
+ ++isoc_data_frame_count;
+ }
+ ++seg_index;
+ }
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+
switch (urb->status) {
case 0:
spin_lock_irqsave(&xfer->lock, flags);
- wa = xfer->wa;
- dev = &wa->usb_iface->dev;
- rpipe = xfer->ep->hcpriv;
- dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
- xfer, seg->index, (size_t)urb->actual_length);
- seg->status = WA_SEG_DONE;
- seg->result = urb->actual_length;
- xfer->segs_done++;
- rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+
+ seg->result += urb->actual_length;
+ if (isoc_data_frame_count > 0) {
+ int result;
+ /* submit a read URB for the first frame with data. */
+ __wa_populate_buf_in_urb_isoc(wa, xfer, seg,
+ seg->isoc_frame_index + seg->isoc_frame_offset);
+ result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
+ result);
+ wa_reset_all(wa);
+ }
+ } else {
+ rpipe = xfer->ep->hcpriv;
+ seg->status = WA_SEG_DONE;
+ dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
+ xfer, seg->index, seg->result);
+ xfer->segs_done++;
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ done = __wa_xfer_is_done(xfer);
+ }
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
wa_xfer_completion(xfer);
@@ -2320,8 +2462,6 @@ static void wa_buf_in_cb(struct urb *urb)
break;
default: /* Other errors ... */
spin_lock_irqsave(&xfer->lock, flags);
- wa = xfer->wa;
- dev = &wa->usb_iface->dev;
rpipe = xfer->ep->hcpriv;
if (printk_ratelimit())
dev_err(dev, "xfer %p#%u: data in error %d\n",
@@ -2344,6 +2484,20 @@ static void wa_buf_in_cb(struct urb *urb)
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
}
+ /*
+ * If we are in this callback and isoc_data_frame_count > 0, it means
+ * that the dti_urb submission was delayed in wa_dti_cb. Once
+ * isoc_data_frame_count gets to 1, we can submit the deferred URB
+ * since the last buf_in_urb was just submitted.
+ */
+ if (isoc_data_frame_count == 1) {
+ int result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
+ result);
+ wa_reset_all(wa);
+ }
+ }
}
/*
@@ -2374,7 +2528,7 @@ static void wa_buf_in_cb(struct urb *urb)
*/
static void wa_dti_cb(struct urb *urb)
{
- int result;
+ int result, dti_busy = 0;
struct wahc *wa = urb->context;
struct device *dev = &wa->usb_iface->dev;
u32 xfer_id;
@@ -2422,7 +2576,7 @@ static void wa_dti_cb(struct urb *urb)
wa_xfer_result_chew(wa, xfer, xfer_result);
wa_xfer_put(xfer);
} else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
- wa_process_iso_packet_status(wa, urb);
+ dti_busy = wa_process_iso_packet_status(wa, urb);
} else {
dev_err(dev, "DTI Error: unexpected EP state = %d\n",
wa->dti_state);
@@ -2445,12 +2599,15 @@ static void wa_dti_cb(struct urb *urb)
dev_err(dev, "DTI: URB error %d\n", urb->status);
break;
}
- /* Resubmit the DTI URB */
- result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
- if (result < 0) {
- dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
- "resetting\n", result);
- wa_reset_all(wa);
+
+ /* Resubmit the DTI URB if we are not busy processing isoc in frames. */
+ if (!dti_busy) {
+ result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
+ result);
+ wa_reset_all(wa);
+ }
}
out:
return;
@@ -2517,8 +2674,8 @@ void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
NULL, 0, wa_buf_in_cb, wa);
result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
if (result < 0) {
- dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
- "resetting\n", result);
+ dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
+ result);
goto error_dti_urb_submit;
}
out:
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c
index 742c607d1fa3..3e1ba51d1a43 100644
--- a/drivers/usb/wusbcore/wusbhc.c
+++ b/drivers/usb/wusbcore/wusbhc.c
@@ -55,7 +55,8 @@ static struct wusbhc *usbhc_dev_to_wusbhc(struct device *dev)
* value of trust_timeout is jiffies.
*/
static ssize_t wusb_trust_timeout_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
@@ -173,7 +174,8 @@ static ssize_t wusb_phy_rate_store(struct device *dev,
wusbhc->phy_rate = phy_rate;
return size;
}
-static DEVICE_ATTR(wusb_phy_rate, 0644, wusb_phy_rate_show, wusb_phy_rate_store);
+static DEVICE_ATTR(wusb_phy_rate, 0644, wusb_phy_rate_show,
+ wusb_phy_rate_store);
static ssize_t wusb_dnts_show(struct device *dev,
struct device_attribute *attr,
@@ -227,7 +229,8 @@ static ssize_t wusb_retry_count_store(struct device *dev,
if (result != 1)
return -EINVAL;
- wusbhc->retry_count = max_t(uint8_t, retry_count, WUSB_RETRY_COUNT_MAX);
+ wusbhc->retry_count = max_t(uint8_t, retry_count,
+ WUSB_RETRY_COUNT_MAX);
return size;
}
@@ -321,7 +324,8 @@ int wusbhc_b_create(struct wusbhc *wusbhc)
result = sysfs_create_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
if (result < 0) {
- dev_err(dev, "Cannot register WUSBHC attributes: %d\n", result);
+ dev_err(dev, "Cannot register WUSBHC attributes: %d\n",
+ result);
goto error_create_attr_group;
}
@@ -419,13 +423,14 @@ EXPORT_SYMBOL_GPL(wusb_cluster_id_put);
* - After a successful transfer, update the trust timeout timestamp
* for the WUSB device.
*
- * - [WUSB] sections 4.13 and 7.5.1 specifies the stop retrasmittion
+ * - [WUSB] sections 4.13 and 7.5.1 specify the stop retransmission
* condition for the WCONNECTACK_IE is that the host has observed
* the associated device responding to a control transfer.
*/
void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, int status)
{
- struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
+ struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc,
+ urb->dev);
if (status == 0 && wusb_dev) {
wusb_dev->entry_ts = jiffies;
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
index 6bd3b819a6b5..2384add45371 100644
--- a/drivers/usb/wusbcore/wusbhc.h
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -164,7 +164,7 @@ struct wusb_port {
* functions/operations that only deal with general Wireless USB HC
* issues use this data type to refer to the host.
*
- * @usb_hcd Instantiation of a USB host controller
+ * @usb_hcd Instantiation of a USB host controller
* (initialized by upper layer [HWA=HC or WHCI].
*
* @dev Device that implements this; initialized by the
@@ -196,7 +196,7 @@ struct wusb_port {
* @ports_max Number of simultaneous device connections (fake
* ports) this HC will take. Read-only.
*
- * @port Array of port status for each fake root port. Guaranteed to
+ * @port Array of port status for each fake root port. Guaranteed to
* always be the same length during device existence
* [this allows for some unlocked but referenced reading].
*
@@ -329,7 +329,8 @@ void wusbhc_pal_unregister(struct wusbhc *wusbhc);
* This is a safe assumption as @usb_dev->bus is referenced all the
* time during the @usb_dev life cycle.
*/
-static inline struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev)
+static inline
+struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev)
{
struct usb_hcd *usb_hcd;
usb_hcd = container_of(usb_dev->bus, struct usb_hcd, self);
diff --git a/drivers/uwb/beacon.c b/drivers/uwb/beacon.c
index dcdd59bfcd09..57b5ff61020c 100644
--- a/drivers/uwb/beacon.c
+++ b/drivers/uwb/beacon.c
@@ -117,6 +117,7 @@ int uwb_rc_beacon(struct uwb_rc *rc, int channel, unsigned bpst_offset)
int result;
struct device *dev = &rc->uwb_dev.dev;
+ dev_dbg(dev, "%s: channel = %d\n", __func__, channel);
if (channel < 0)
channel = -1;
if (channel == -1)
@@ -184,7 +185,7 @@ out:
/* Find a beacon by dev addr in the cache */
static
-struct uwb_beca_e *__uwb_beca_find_bymac(struct uwb_rc *rc,
+struct uwb_beca_e *__uwb_beca_find_bymac(struct uwb_rc *rc,
const struct uwb_mac_addr *mac_addr)
{
struct uwb_beca_e *bce, *next;
@@ -515,13 +516,13 @@ int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *evt)
}
bpsc = container_of(evt->notif.rceb, struct uwb_rc_evt_bp_slot_change, rceb);
- mutex_lock(&rc->uwb_dev.mutex);
if (uwb_rc_evt_bp_slot_change_no_slot(bpsc)) {
- dev_info(dev, "stopped beaconing: No free slots in BP\n");
+ dev_err(dev, "stopped beaconing: No free slots in BP\n");
+ mutex_lock(&rc->uwb_dev.mutex);
rc->beaconing = -1;
+ mutex_unlock(&rc->uwb_dev.mutex);
} else
rc->uwb_dev.beacon_slot = uwb_rc_evt_bp_slot_change_slot_num(bpsc);
- mutex_unlock(&rc->uwb_dev.mutex);
return 0;
}
diff --git a/drivers/uwb/radio.c b/drivers/uwb/radio.c
index d58dfecf9a79..fd23d985ea71 100644
--- a/drivers/uwb/radio.c
+++ b/drivers/uwb/radio.c
@@ -62,6 +62,10 @@ static void uwb_radio_channel_changed(struct uwb_rc *rc, int channel)
static int uwb_radio_change_channel(struct uwb_rc *rc, int channel)
{
int ret = 0;
+ struct device *dev = &rc->uwb_dev.dev;
+
+ dev_dbg(dev, "%s: channel = %d, rc->beaconing = %d\n", __func__,
+ channel, rc->beaconing);
if (channel == -1)
uwb_radio_channel_changed(rc, channel);
@@ -89,7 +93,7 @@ static int uwb_radio_change_channel(struct uwb_rc *rc, int channel)
* uwb_radio_start - request that the radio be started
* @pal: the PAL making the request.
*
- * If the radio is not already active, aa suitable channel is selected
+ * If the radio is not already active, a suitable channel is selected
* and beacons are started.
*/
int uwb_radio_start(struct uwb_pal *pal)
diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c
index 738e8a8cb811..3fe611941046 100644
--- a/drivers/uwb/rsv.c
+++ b/drivers/uwb/rsv.c
@@ -237,7 +237,7 @@ void uwb_rsv_backoff_win_increment(struct uwb_rc *rc)
/* reset the timer associated variables */
timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US;
bow->total_expired = 0;
- mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us));
+ mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us));
}
static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv)
@@ -257,7 +257,7 @@ static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv)
sframes = 1;
if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED)
sframes = 0;
-
+
}
if (sframes > 0) {
@@ -611,7 +611,7 @@ int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available)
struct device *dev = &rc->uwb_dev.dev;
struct uwb_rsv_move *mv;
int ret = 0;
-
+
if (bow->can_reserve_extra_mases == false)
return -EBUSY;
@@ -628,7 +628,7 @@ int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available)
} else {
dev_dbg(dev, "new allocation not found\n");
}
-
+
return ret;
}
@@ -640,7 +640,7 @@ void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc)
struct uwb_drp_backoff_win *bow = &rc->bow;
struct uwb_rsv *rsv;
struct uwb_mas_bm mas;
-
+
if (bow->can_reserve_extra_mases == false)
return;
@@ -652,7 +652,7 @@ void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc)
uwb_rsv_try_move(rsv, &mas);
}
}
-
+
}
/**
@@ -916,10 +916,10 @@ static void uwb_rsv_alien_bp_work(struct work_struct *work)
struct uwb_rsv *rsv;
mutex_lock(&rc->rsvs_mutex);
-
+
list_for_each_entry(rsv, &rc->reservations, rc_node) {
if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) {
- rsv->callback(rsv);
+ uwb_rsv_callback(rsv);
}
}
diff --git a/drivers/uwb/umc-bus.c b/drivers/uwb/umc-bus.c
index e3ed6ff6a481..88a290f57ea0 100644
--- a/drivers/uwb/umc-bus.c
+++ b/drivers/uwb/umc-bus.c
@@ -85,7 +85,7 @@ int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc)
const struct pci_device_id *id_table = umc_drv->match_data;
struct pci_dev *pci;
- if (umc->dev.parent->bus != &pci_bus_type)
+ if (!dev_is_pci(umc->dev.parent))
return 0;
pci = to_pci_dev(umc->dev.parent);
diff --git a/drivers/uwb/umc-dev.c b/drivers/uwb/umc-dev.c
index 4613c13cd851..7b0b268e0c8e 100644
--- a/drivers/uwb/umc-dev.c
+++ b/drivers/uwb/umc-dev.c
@@ -66,6 +66,7 @@ int umc_device_register(struct umc_dev *umc)
return 0;
error_device_register:
+ put_device(&umc->dev);
release_resource(&umc->resource);
error_request_resource:
return err;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 6ab71b9fcf8d..7ba042498857 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -139,25 +139,14 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
/*
- * Careful, device_lock may already be held. This is the case if
- * a driver unbind is blocked. Try to get the locks ourselves to
- * prevent a deadlock.
+ * Try to reset the device. The success of this is dependent on
+ * being able to lock the device, which is not always possible.
*/
if (vdev->reset_works) {
- bool reset_done = false;
-
- if (pci_cfg_access_trylock(pdev)) {
- if (device_trylock(&pdev->dev)) {
- __pci_reset_function_locked(pdev);
- reset_done = true;
- device_unlock(&pdev->dev);
- }
- pci_cfg_access_unlock(pdev);
- }
-
- if (!reset_done)
- pr_warn("%s: Unable to acquire locks for reset of %s\n",
- __func__, dev_name(&pdev->dev));
+ int ret = pci_try_reset_function(pdev);
+ if (ret)
+ pr_warn("%s: Failed to reset device %s (%d)\n",
+ __func__, dev_name(&pdev->dev), ret);
}
pci_restore_state(pdev);
@@ -514,7 +503,7 @@ static long vfio_pci_ioctl(void *device_data,
} else if (cmd == VFIO_DEVICE_RESET) {
return vdev->reset_works ?
- pci_reset_function(vdev->pdev) : -EINVAL;
+ pci_try_reset_function(vdev->pdev) : -EINVAL;
} else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
struct vfio_pci_hot_reset_info hdr;
@@ -684,8 +673,8 @@ reset_info_exit:
&info, slot);
if (!ret)
/* User has access, do the reset */
- ret = slot ? pci_reset_slot(vdev->pdev->slot) :
- pci_reset_bus(vdev->pdev->bus);
+ ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
+ pci_try_reset_bus(vdev->pdev->bus);
hot_reset_release:
for (i--; i >= 0; i--)
@@ -883,9 +872,13 @@ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_DISCONNECT;
}
+ mutex_lock(&vdev->igate);
+
if (vdev->err_trigger)
eventfd_signal(vdev->err_trigger, 1);
+ mutex_unlock(&vdev->igate);
+
vfio_device_put(device);
return PCI_ERS_RESULT_CAN_RECOVER;
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index ffd0632c3cbc..83cd1574c810 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -975,20 +975,20 @@ static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos)
int ret, evcc, phases, vc_arb;
int len = PCI_CAP_VC_BASE_SIZEOF;
- ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_REG1, &tmp);
+ ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP1, &tmp);
if (ret)
return pcibios_err_to_errno(ret);
- evcc = tmp & PCI_VC_REG1_EVCC; /* extended vc count */
- ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_REG2, &tmp);
+ evcc = tmp & PCI_VC_CAP1_EVCC; /* extended vc count */
+ ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP2, &tmp);
if (ret)
return pcibios_err_to_errno(ret);
- if (tmp & PCI_VC_REG2_128_PHASE)
+ if (tmp & PCI_VC_CAP2_128_PHASE)
phases = 128;
- else if (tmp & PCI_VC_REG2_64_PHASE)
+ else if (tmp & PCI_VC_CAP2_64_PHASE)
phases = 64;
- else if (tmp & PCI_VC_REG2_32_PHASE)
+ else if (tmp & PCI_VC_CAP2_32_PHASE)
phases = 32;
else
phases = 0;
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 641bc87bdb96..210357691dc0 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -749,54 +749,37 @@ static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
unsigned count, uint32_t flags, void *data)
{
int32_t fd = *(int32_t *)data;
- struct pci_dev *pdev = vdev->pdev;
if ((index != VFIO_PCI_ERR_IRQ_INDEX) ||
!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
return -EINVAL;
- /*
- * device_lock synchronizes setting and checking of
- * err_trigger. The vfio_pci_aer_err_detected() is also
- * called with device_lock held.
- */
-
/* DATA_NONE/DATA_BOOL enables loopback testing */
-
if (flags & VFIO_IRQ_SET_DATA_NONE) {
- device_lock(&pdev->dev);
if (vdev->err_trigger)
eventfd_signal(vdev->err_trigger, 1);
- device_unlock(&pdev->dev);
return 0;
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t trigger = *(uint8_t *)data;
- device_lock(&pdev->dev);
if (trigger && vdev->err_trigger)
eventfd_signal(vdev->err_trigger, 1);
- device_unlock(&pdev->dev);
return 0;
}
/* Handle SET_DATA_EVENTFD */
-
if (fd == -1) {
- device_lock(&pdev->dev);
if (vdev->err_trigger)
eventfd_ctx_put(vdev->err_trigger);
vdev->err_trigger = NULL;
- device_unlock(&pdev->dev);
return 0;
} else if (fd >= 0) {
struct eventfd_ctx *efdctx;
efdctx = eventfd_ctx_fdget(fd);
if (IS_ERR(efdctx))
return PTR_ERR(efdctx);
- device_lock(&pdev->dev);
if (vdev->err_trigger)
eventfd_ctx_put(vdev->err_trigger);
vdev->err_trigger = efdctx;
- device_unlock(&pdev->dev);
return 0;
} else
return -EINVAL;
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 1eab4ace0671..21271d8df023 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -22,6 +22,7 @@
#include <linux/idr.h>
#include <linux/iommu.h>
#include <linux/list.h>
+#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
@@ -45,9 +46,7 @@ static struct vfio {
struct idr group_idr;
struct mutex group_lock;
struct cdev group_cdev;
- struct device *dev;
- dev_t devt;
- struct cdev cdev;
+ dev_t group_devt;
wait_queue_head_t release_q;
} vfio;
@@ -142,8 +141,7 @@ EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
*/
static int vfio_alloc_group_minor(struct vfio_group *group)
{
- /* index 0 is used by /dev/vfio/vfio */
- return idr_alloc(&vfio.group_idr, group, 1, MINORMASK + 1, GFP_KERNEL);
+ return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL);
}
static void vfio_free_group_minor(int minor)
@@ -243,7 +241,8 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
}
}
- dev = device_create(vfio.class, NULL, MKDEV(MAJOR(vfio.devt), minor),
+ dev = device_create(vfio.class, NULL,
+ MKDEV(MAJOR(vfio.group_devt), minor),
group, "%d", iommu_group_id(iommu_group));
if (IS_ERR(dev)) {
vfio_free_group_minor(minor);
@@ -268,7 +267,7 @@ static void vfio_group_release(struct kref *kref)
WARN_ON(!list_empty(&group->device_list));
- device_destroy(vfio.class, MKDEV(MAJOR(vfio.devt), group->minor));
+ device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
list_del(&group->vfio_next);
vfio_free_group_minor(group->minor);
vfio_group_unlock_and_free(group);
@@ -1419,12 +1418,17 @@ EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
*/
static char *vfio_devnode(struct device *dev, umode_t *mode)
{
- if (mode && (MINOR(dev->devt) == 0))
- *mode = S_IRUGO | S_IWUGO;
-
return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
}
+static struct miscdevice vfio_dev = {
+ .minor = VFIO_MINOR,
+ .name = "vfio",
+ .fops = &vfio_fops,
+ .nodename = "vfio/vfio",
+ .mode = S_IRUGO | S_IWUGO,
+};
+
static int __init vfio_init(void)
{
int ret;
@@ -1436,6 +1440,13 @@ static int __init vfio_init(void)
INIT_LIST_HEAD(&vfio.iommu_drivers_list);
init_waitqueue_head(&vfio.release_q);
+ ret = misc_register(&vfio_dev);
+ if (ret) {
+ pr_err("vfio: misc device register failed\n");
+ return ret;
+ }
+
+ /* /dev/vfio/$GROUP */
vfio.class = class_create(THIS_MODULE, "vfio");
if (IS_ERR(vfio.class)) {
ret = PTR_ERR(vfio.class);
@@ -1444,27 +1455,14 @@ static int __init vfio_init(void)
vfio.class->devnode = vfio_devnode;
- ret = alloc_chrdev_region(&vfio.devt, 0, MINORMASK, "vfio");
- if (ret)
- goto err_base_chrdev;
-
- cdev_init(&vfio.cdev, &vfio_fops);
- ret = cdev_add(&vfio.cdev, vfio.devt, 1);
+ ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK, "vfio");
if (ret)
- goto err_base_cdev;
+ goto err_alloc_chrdev;
- vfio.dev = device_create(vfio.class, NULL, vfio.devt, NULL, "vfio");
- if (IS_ERR(vfio.dev)) {
- ret = PTR_ERR(vfio.dev);
- goto err_base_dev;
- }
-
- /* /dev/vfio/$GROUP */
cdev_init(&vfio.group_cdev, &vfio_group_fops);
- ret = cdev_add(&vfio.group_cdev,
- MKDEV(MAJOR(vfio.devt), 1), MINORMASK - 1);
+ ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK);
if (ret)
- goto err_groups_cdev;
+ goto err_cdev_add;
pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
@@ -1478,16 +1476,13 @@ static int __init vfio_init(void)
return 0;
-err_groups_cdev:
- device_destroy(vfio.class, vfio.devt);
-err_base_dev:
- cdev_del(&vfio.cdev);
-err_base_cdev:
- unregister_chrdev_region(vfio.devt, MINORMASK);
-err_base_chrdev:
+err_cdev_add:
+ unregister_chrdev_region(vfio.group_devt, MINORMASK);
+err_alloc_chrdev:
class_destroy(vfio.class);
vfio.class = NULL;
err_class:
+ misc_deregister(&vfio_dev);
return ret;
}
@@ -1497,11 +1492,10 @@ static void __exit vfio_cleanup(void)
idr_destroy(&vfio.group_idr);
cdev_del(&vfio.group_cdev);
- device_destroy(vfio.class, vfio.devt);
- cdev_del(&vfio.cdev);
- unregister_chrdev_region(vfio.devt, MINORMASK);
+ unregister_chrdev_region(vfio.group_devt, MINORMASK);
class_destroy(vfio.class);
vfio.class = NULL;
+ misc_deregister(&vfio_dev);
}
module_init(vfio_init);
@@ -1511,3 +1505,5 @@ MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS_MISCDEV(VFIO_MINOR);
+MODULE_ALIAS("devname:vfio/vfio");
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index bdae7a04af75..a84788ba662c 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -81,7 +81,7 @@ static int tce_iommu_enable(struct tce_container *container)
* enforcing the limit based on the max that the guest can map.
*/
down_write(&current->mm->mmap_sem);
- npages = (tbl->it_size << IOMMU_PAGE_SHIFT) >> PAGE_SHIFT;
+ npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
locked = current->mm->locked_vm + npages;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
@@ -110,7 +110,7 @@ static void tce_iommu_disable(struct tce_container *container)
down_write(&current->mm->mmap_sem);
current->mm->locked_vm -= (container->tbl->it_size <<
- IOMMU_PAGE_SHIFT) >> PAGE_SHIFT;
+ IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
up_write(&current->mm->mmap_sem);
}
@@ -174,8 +174,8 @@ static long tce_iommu_ioctl(void *iommu_data,
if (info.argsz < minsz)
return -EINVAL;
- info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT;
- info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT;
+ info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT_4K;
+ info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT_4K;
info.flags = 0;
if (copy_to_user((void __user *)arg, &info, minsz))
@@ -205,8 +205,8 @@ static long tce_iommu_ioctl(void *iommu_data,
VFIO_DMA_MAP_FLAG_WRITE))
return -EINVAL;
- if ((param.size & ~IOMMU_PAGE_MASK) ||
- (param.vaddr & ~IOMMU_PAGE_MASK))
+ if ((param.size & ~IOMMU_PAGE_MASK_4K) ||
+ (param.vaddr & ~IOMMU_PAGE_MASK_4K))
return -EINVAL;
/* iova is checked by the IOMMU API */
@@ -220,17 +220,17 @@ static long tce_iommu_ioctl(void *iommu_data,
if (ret)
return ret;
- for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT); ++i) {
+ for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT_4K); ++i) {
ret = iommu_put_tce_user_mode(tbl,
- (param.iova >> IOMMU_PAGE_SHIFT) + i,
+ (param.iova >> IOMMU_PAGE_SHIFT_4K) + i,
tce);
if (ret)
break;
- tce += IOMMU_PAGE_SIZE;
+ tce += IOMMU_PAGE_SIZE_4K;
}
if (ret)
iommu_clear_tces_and_put_pages(tbl,
- param.iova >> IOMMU_PAGE_SHIFT, i);
+ param.iova >> IOMMU_PAGE_SHIFT_4K, i);
iommu_flush_tce(tbl);
@@ -256,17 +256,17 @@ static long tce_iommu_ioctl(void *iommu_data,
if (param.flags)
return -EINVAL;
- if (param.size & ~IOMMU_PAGE_MASK)
+ if (param.size & ~IOMMU_PAGE_MASK_4K)
return -EINVAL;
ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
- param.size >> IOMMU_PAGE_SHIFT);
+ param.size >> IOMMU_PAGE_SHIFT_4K);
if (ret)
return ret;
ret = iommu_clear_tces_and_put_pages(tbl,
- param.iova >> IOMMU_PAGE_SHIFT,
- param.size >> IOMMU_PAGE_SHIFT);
+ param.iova >> IOMMU_PAGE_SHIFT_4K,
+ param.size >> IOMMU_PAGE_SHIFT_4K);
iommu_flush_tce(tbl);
return ret;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 831eb4fd197d..9a68409580d5 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -683,7 +683,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
- int r, i;
+ int i;
if (!n)
return -ENOMEM;
@@ -706,12 +706,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
n->vqs[i].vhost_hlen = 0;
n->vqs[i].sock_hlen = 0;
}
- r = vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
- if (r < 0) {
- kfree(n);
- kfree(vqs);
- return r;
- }
+ vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index f175629513ed..0a025b8e2a12 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -728,7 +728,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq,
}
se_sess = tv_nexus->tvn_se_sess;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
if (tag < 0) {
pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
return ERR_PTR(-ENOMEM);
@@ -889,7 +889,7 @@ static void tcm_vhost_submission_work(struct work_struct *work)
cmd->tvc_lun, cmd->tvc_exp_data_len,
cmd->tvc_task_attr, cmd->tvc_data_direction,
TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
- sg_bidi_ptr, sg_no_bidi);
+ sg_bidi_ptr, sg_no_bidi, NULL, 0);
if (rc < 0) {
transport_send_check_condition_and_sense(se_cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
@@ -1417,18 +1417,13 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vqs[i] = &vs->vqs[i].vq;
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
- r = vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
+ vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
tcm_vhost_init_inflight(vs, NULL);
- if (r < 0)
- goto err_init;
-
f->private_data = vs;
return 0;
-err_init:
- kfree(vqs);
err_vqs:
vhost_scsi_free(vs);
err_vs:
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 339eae85859a..c2a54fbf7f99 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -104,7 +104,6 @@ static int vhost_test_open(struct inode *inode, struct file *f)
struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
- int r;
if (!n)
return -ENOMEM;
@@ -117,12 +116,7 @@ static int vhost_test_open(struct inode *inode, struct file *f)
dev = &n->dev;
vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
- r = vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
- if (r < 0) {
- kfree(vqs);
- kfree(n);
- return r;
- }
+ vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
f->private_data = n;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 69068e0d8f31..78987e481bc6 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -290,7 +290,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
vhost_vq_free_iovecs(dev->vqs[i]);
}
-long vhost_dev_init(struct vhost_dev *dev,
+void vhost_dev_init(struct vhost_dev *dev,
struct vhost_virtqueue **vqs, int nvqs)
{
struct vhost_virtqueue *vq;
@@ -319,8 +319,6 @@ long vhost_dev_init(struct vhost_dev *dev,
vhost_poll_init(&vq->poll, vq->handle_kick,
POLLIN, dev);
}
-
- return 0;
}
EXPORT_SYMBOL_GPL(vhost_dev_init);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 4465ed5f316d..35eeb2a1bada 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -127,7 +127,7 @@ struct vhost_dev {
struct task_struct *worker;
};
-long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
+void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
long vhost_dev_set_owner(struct vhost_dev *dev);
bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 4f2e1b35eb38..dade5b7699bc 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -312,7 +312,8 @@ config FB_PM2_FIFO_DISCONNECT
config FB_ARMCLCD
tristate "ARM PrimeCell PL110 support"
- depends on FB && ARM && ARM_AMBA
+ depends on ARM || ARM64 || COMPILE_TEST
+ depends on FB && ARM_AMBA
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -363,7 +364,7 @@ config FB_SA1100
config FB_IMX
tristate "Freescale i.MX1/21/25/27 LCD support"
- depends on FB && IMX_HAVE_PLATFORM_IMX_FB
+ depends on FB && ARCH_MXC
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -979,6 +980,22 @@ config FB_PVR2
(<file:drivers/video/pvr2fb.c>). Please see the file
<file:Documentation/fb/pvr2fb.txt>.
+config FB_OPENCORES
+ tristate "OpenCores VGA/LCD core 2.0 framebuffer support"
+ depends on FB
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ This enables support for the OpenCores VGA/LCD core.
+
+ The OpenCores VGA/LCD core is typically used together with
+ softcore CPUs (e.g. OpenRISC or Microblaze) or hard processor
+ systems (e.g. Altera socfpga or Xilinx Zynq) on FPGAs.
+
+ The source code and specification for the core is available at
+ <http://opencores.org/project,vga_lcd>
+
config FB_S1D13XXX
tristate "Epson S1D13XXX framebuffer support"
depends on FB
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index e8bae8dd4804..ae17ddf49a00 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -150,6 +150,7 @@ obj-$(CONFIG_FB_NUC900) += nuc900fb.o
obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o
obj-$(CONFIG_FB_PUV3_UNIGFX) += fb-puv3.o
obj-$(CONFIG_FB_HYPERV) += hyperv_fb.o
+obj-$(CONFIG_FB_OPENCORES) += ocfb.o
# Platform or fallback drivers go here
obj-$(CONFIG_FB_UVESA) += uvesafb.o
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index 0dac36ce09d6..518f790ef88a 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -3710,7 +3710,7 @@ default_chipset:
if (!videomemory) {
dev_warn(&pdev->dev,
"Unable to map videomem cached writethrough\n");
- info->screen_base = (char *)ZTWO_VADDR(info->fix.smem_start);
+ info->screen_base = ZTWO_VADDR(info->fix.smem_start);
} else
info->screen_base = (char *)videomemory;
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
index a6b29bd4a12a..adc4ea2cc5a0 100644
--- a/drivers/video/arkfb.c
+++ b/drivers/video/arkfb.c
@@ -1014,7 +1014,7 @@ static int ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
vga_res.flags = IORESOURCE_IO;
- pcibios_bus_to_resource(dev, &vga_res, &bus_reg);
+ pcibios_bus_to_resource(dev->bus, &vga_res, &bus_reg);
par->state.vgabase = (void __iomem *) vga_res.start;
diff --git a/drivers/video/asiliantfb.c b/drivers/video/asiliantfb.c
index d611f1a1ac53..7e8ddf00ccc2 100644
--- a/drivers/video/asiliantfb.c
+++ b/drivers/video/asiliantfb.c
@@ -589,7 +589,6 @@ static void asiliantfb_remove(struct pci_dev *dp)
fb_dealloc_cmap(&p->cmap);
iounmap(p->screen_base);
release_mem_region(pci_resource_start(dp, 0), pci_resource_len(dp, 0));
- pci_set_drvdata(dp, NULL);
framebuffer_release(p);
}
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 12ca031877d4..52108be69e77 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -357,11 +357,13 @@ static int default_lcd_on = 1;
static bool mtrr = true;
#endif
+#ifdef CONFIG_FB_ATY128_BACKLIGHT
#ifdef CONFIG_PMAC_BACKLIGHT
static int backlight = 1;
#else
static int backlight = 0;
#endif
+#endif
/* PLL constants */
struct aty128_constants {
@@ -1671,7 +1673,9 @@ static int aty128fb_setup(char *options)
default_crt_on = simple_strtoul(this_opt+4, NULL, 0);
continue;
} else if (!strncmp(this_opt, "backlight:", 10)) {
+#ifdef CONFIG_FB_ATY128_BACKLIGHT
backlight = simple_strtoul(this_opt+10, NULL, 0);
+#endif
continue;
}
#ifdef CONFIG_MTRR
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 00076ecfe9b8..8ea42b8d9bc8 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -110,8 +110,8 @@ static int hp680bl_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = HP680_MAX_INTENSITY;
- bd = backlight_device_register("hp680-bl", &pdev->dev, NULL,
- &hp680bl_ops, &props);
+ bd = devm_backlight_device_register(&pdev->dev, "hp680-bl", &pdev->dev,
+ NULL, &hp680bl_ops, &props);
if (IS_ERR(bd))
return PTR_ERR(bd);
@@ -131,8 +131,6 @@ static int hp680bl_remove(struct platform_device *pdev)
bd->props.power = 0;
hp680bl_send_intensity(bd);
- backlight_device_unregister(bd);
-
return 0;
}
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 3ccb89340f22..6ce96b4a8796 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -115,9 +115,10 @@ static int jornada_bl_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = BL_MAX_BRIGHT;
- bd = backlight_device_register(S1D_DEVICENAME, &pdev->dev, NULL,
- &jornada_bl_ops, &props);
+ bd = devm_backlight_device_register(&pdev->dev, S1D_DEVICENAME,
+ &pdev->dev, NULL, &jornada_bl_ops,
+ &props);
if (IS_ERR(bd)) {
ret = PTR_ERR(bd);
dev_err(&pdev->dev, "failed to register device, err=%x\n", ret);
@@ -139,18 +140,8 @@ static int jornada_bl_probe(struct platform_device *pdev)
return 0;
}
-static int jornada_bl_remove(struct platform_device *pdev)
-{
- struct backlight_device *bd = platform_get_drvdata(pdev);
-
- backlight_device_unregister(bd);
-
- return 0;
-}
-
static struct platform_driver jornada_bl_driver = {
.probe = jornada_bl_probe,
- .remove = jornada_bl_remove,
.driver = {
.name = "jornada_bl",
},
diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c
index b061413f1a65..da3876c9b3ae 100644
--- a/drivers/video/backlight/jornada720_lcd.c
+++ b/drivers/video/backlight/jornada720_lcd.c
@@ -100,7 +100,8 @@ static int jornada_lcd_probe(struct platform_device *pdev)
struct lcd_device *lcd_device;
int ret;
- lcd_device = lcd_device_register(S1D_DEVICENAME, &pdev->dev, NULL, &jornada_lcd_props);
+ lcd_device = devm_lcd_device_register(&pdev->dev, S1D_DEVICENAME,
+ &pdev->dev, NULL, &jornada_lcd_props);
if (IS_ERR(lcd_device)) {
ret = PTR_ERR(lcd_device);
@@ -119,18 +120,8 @@ static int jornada_lcd_probe(struct platform_device *pdev)
return 0;
}
-static int jornada_lcd_remove(struct platform_device *pdev)
-{
- struct lcd_device *lcd_device = platform_get_drvdata(pdev);
-
- lcd_device_unregister(lcd_device);
-
- return 0;
-}
-
static struct platform_driver jornada_lcd_driver = {
.probe = jornada_lcd_probe,
- .remove = jornada_lcd_remove,
.driver = {
.name = "jornada_lcd",
},
diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
index 7592cc25c963..84a110a719cb 100644
--- a/drivers/video/backlight/kb3886_bl.c
+++ b/drivers/video/backlight/kb3886_bl.c
@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
static unsigned long kb3886bl_flags;
#define KB3886BL_SUSPENDED 0x01
-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
+static struct dmi_system_id kb3886bl_device_table[] __initdata = {
{
.ident = "Sahara Touch-iT",
.matches = {
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index b5fc13bc24e7..63e763828e0e 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -223,8 +223,8 @@ static int l4f00242t03_probe(struct spi_device *spi)
return PTR_ERR(priv->core_reg);
}
- priv->ld = lcd_device_register("l4f00242t03",
- &spi->dev, priv, &l4f_ops);
+ priv->ld = devm_lcd_device_register(&spi->dev, "l4f00242t03", &spi->dev,
+ priv, &l4f_ops);
if (IS_ERR(priv->ld))
return PTR_ERR(priv->ld);
@@ -243,8 +243,6 @@ static int l4f00242t03_remove(struct spi_device *spi)
struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
- lcd_device_unregister(priv->ld);
-
return 0;
}
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 93cf15efc717..7de847df224f 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -228,7 +228,7 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
rc = device_register(&new_ld->dev);
if (rc) {
- kfree(new_ld);
+ put_device(&new_ld->dev);
return ERR_PTR(rc);
}
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index cae80d555e84..2ca3a040007b 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -125,7 +125,7 @@ static bool lp855x_is_valid_rom_area(struct lp855x *lp, u8 addr)
return false;
}
- return (addr >= start && addr <= end);
+ return addr >= start && addr <= end;
}
static int lp8557_bl_off(struct lp855x *lp)
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
index e49905d495dc..daba34dc46d4 100644
--- a/drivers/video/backlight/lp8788_bl.c
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -63,13 +63,13 @@ static struct lp8788_bl_config default_bl_config = {
static inline bool is_brightness_ctrl_by_pwm(enum lp8788_bl_ctrl_mode mode)
{
- return (mode == LP8788_BL_COMB_PWM_BASED);
+ return mode == LP8788_BL_COMB_PWM_BASED;
}
static inline bool is_brightness_ctrl_by_register(enum lp8788_bl_ctrl_mode mode)
{
- return (mode == LP8788_BL_REGISTER_ONLY ||
- mode == LP8788_BL_COMB_REGISTER_BASED);
+ return mode == LP8788_BL_REGISTER_ONLY ||
+ mode == LP8788_BL_COMB_REGISTER_BASED;
}
static int lp8788_backlight_configure(struct lp8788_bl *bl)
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index ac11a4650c19..a0dcd88ac74f 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -146,8 +146,8 @@ static int omapbl_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = OMAPBL_MAX_INTENSITY;
- dev = backlight_device_register("omap-bl", &pdev->dev, bl, &omapbl_ops,
- &props);
+ dev = devm_backlight_device_register(&pdev->dev, "omap-bl", &pdev->dev,
+ bl, &omapbl_ops, &props);
if (IS_ERR(dev))
return PTR_ERR(dev);
@@ -170,20 +170,10 @@ static int omapbl_probe(struct platform_device *pdev)
return 0;
}
-static int omapbl_remove(struct platform_device *pdev)
-{
- struct backlight_device *dev = platform_get_drvdata(pdev);
-
- backlight_device_unregister(dev);
-
- return 0;
-}
-
static SIMPLE_DEV_PM_OPS(omapbl_pm_ops, omapbl_suspend, omapbl_resume);
static struct platform_driver omapbl_driver = {
.probe = omapbl_probe,
- .remove = omapbl_remove,
.driver = {
.name = "omap-bl",
.pm = &omapbl_pm_ops,
diff --git a/drivers/video/backlight/ot200_bl.c b/drivers/video/backlight/ot200_bl.c
index fdbb6ee5027c..f5a5202dd79d 100644
--- a/drivers/video/backlight/ot200_bl.c
+++ b/drivers/video/backlight/ot200_bl.c
@@ -118,8 +118,9 @@ static int ot200_backlight_probe(struct platform_device *pdev)
props.brightness = 100;
props.type = BACKLIGHT_RAW;
- bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, data,
- &ot200_backlight_ops, &props);
+ bl = devm_backlight_device_register(&pdev->dev, dev_name(&pdev->dev),
+ &pdev->dev, data, &ot200_backlight_ops,
+ &props);
if (IS_ERR(bl)) {
dev_err(&pdev->dev, "failed to register backlight\n");
retval = PTR_ERR(bl);
@@ -137,10 +138,6 @@ error_devm_kzalloc:
static int ot200_backlight_remove(struct platform_device *pdev)
{
- struct backlight_device *bl = platform_get_drvdata(pdev);
-
- backlight_device_unregister(bl);
-
/* on module unload set brightness to 100% */
cs5535_mfgpt_write(pwm_timer, MFGPT_REG_COUNTER, 0);
cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index fb80d68f4d33..b75201ff46f6 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -241,7 +241,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pb = devm_kzalloc(&pdev->dev, sizeof(*pb), GFP_KERNEL);
if (!pb) {
- dev_err(&pdev->dev, "no memory for state\n");
ret = -ENOMEM;
goto err_alloc;
}
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index b8db9338cacd..3ad676558c80 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -105,8 +105,9 @@ static int tosa_bl_probe(struct i2c_client *client,
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = 512 - 1;
- data->bl = backlight_device_register("tosa-bl", &client->dev, data,
- &bl_ops, &props);
+ data->bl = devm_backlight_device_register(&client->dev, "tosa-bl",
+ &client->dev, data, &bl_ops,
+ &props);
if (IS_ERR(data->bl)) {
ret = PTR_ERR(data->bl);
goto err_reg;
@@ -128,9 +129,7 @@ static int tosa_bl_remove(struct i2c_client *client)
{
struct tosa_bl_data *data = i2c_get_clientdata(client);
- backlight_device_unregister(data->bl);
data->bl = NULL;
-
return 0;
}
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index be5d636764bf..f08d641ccd01 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -206,8 +206,8 @@ static int tosa_lcd_probe(struct spi_device *spi)
tosa_lcd_tg_on(data);
- data->lcd = lcd_device_register("tosa-lcd", &spi->dev, data,
- &tosa_lcd_ops);
+ data->lcd = devm_lcd_device_register(&spi->dev, "tosa-lcd", &spi->dev,
+ data, &tosa_lcd_ops);
if (IS_ERR(data->lcd)) {
ret = PTR_ERR(data->lcd);
@@ -226,8 +226,6 @@ static int tosa_lcd_remove(struct spi_device *spi)
{
struct tosa_lcd_data *data = spi_get_drvdata(spi);
- lcd_device_unregister(data->lcd);
-
if (data->i2c)
i2c_unregister_device(data->i2c);
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index 5aab9b9dc210..d992aa5eb3f0 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -2256,7 +2256,7 @@ static int cirrusfb_zorro_register(struct zorro_dev *z,
info->fix.mmio_start = regbase;
cinfo->regbase = regbase > 16 * MB_ ? ioremap(regbase, 64 * 1024)
- : (caddr_t)ZTWO_VADDR(regbase);
+ : ZTWO_VADDR(regbase);
if (!cinfo->regbase) {
dev_err(info->device, "Cannot map registers\n");
error = -EIO;
@@ -2266,7 +2266,7 @@ static int cirrusfb_zorro_register(struct zorro_dev *z,
info->fix.smem_start = rambase;
info->screen_size = ramsize;
info->screen_base = rambase > 16 * MB_ ? ioremap(rambase, ramsize)
- : (caddr_t)ZTWO_VADDR(rambase);
+ : ZTWO_VADDR(rambase);
if (!info->screen_base) {
dev_err(info->device, "Cannot map video RAM\n");
error = -EIO;
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 846caab75a46..fe1cd0148e13 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -8,7 +8,8 @@ config VGA_CONSOLE
bool "VGA text console" if EXPERT || !X86
depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && \
!SUPERH && !BLACKFIN && !AVR32 && !MN10300 && !CRIS && \
- (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER)
+ (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \
+ !ARM64
default y
help
Saying Y here will allow you to use Linux in text mode through a
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index cd8a8027f8ae..4e39291ac8b4 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3547,8 +3547,10 @@ static void fbcon_exit(void)
"no"));
for (j = first_fb_vc; j <= last_fb_vc; j++) {
- if (con2fb_map[j] == i)
+ if (con2fb_map[j] == i) {
mapped = 1;
+ break;
+ }
}
if (mapped) {
@@ -3561,6 +3563,7 @@ static void fbcon_exit(void)
fbcon_del_cursor_timer(info);
kfree(ops->cursor_src);
+ kfree(ops->cursor_state.mask);
kfree(info->fbcon_par);
info->fbcon_par = NULL;
}
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index 4ad24f2c6472..cecd3de01c24 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -488,7 +488,7 @@ static int sti_init_glob_cfg(struct sti_struct *sti, unsigned long rom_address,
return 0;
}
-#ifdef CONFIG_FONTS
+#ifdef CONFIG_FONT_SUPPORT
static struct sti_cooked_font *
sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
{
diff --git a/drivers/video/exynos/Kconfig b/drivers/video/exynos/Kconfig
index 1129d0e9e640..75c8a8e7efc0 100644
--- a/drivers/video/exynos/Kconfig
+++ b/drivers/video/exynos/Kconfig
@@ -22,7 +22,8 @@ config EXYNOS_MIPI_DSI
config EXYNOS_LCD_S6E8AX0
bool "S6E8AX0 MIPI AMOLED LCD Driver"
- depends on (EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE && LCD_CLASS_DEVICE)
+ depends on EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE
+ depends on (LCD_CLASS_DEVICE = y)
default n
help
If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 010d19105ebc..7309ac704e26 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1577,10 +1577,10 @@ static bool fb_do_apertures_overlap(struct apertures_struct *gena,
static int do_unregister_framebuffer(struct fb_info *fb_info);
#define VGA_FB_PHYS 0xA0000
-static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary)
+static int do_remove_conflicting_framebuffers(struct apertures_struct *a,
+ const char *name, bool primary)
{
- int i;
+ int i, ret;
/* check all firmware fbs and kick off if the base addr overlaps */
for (i = 0 ; i < FB_MAX; i++) {
@@ -1599,22 +1599,29 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
printk(KERN_INFO "fb: conflicting fb hw usage "
"%s vs %s - removing generic driver\n",
name, registered_fb[i]->fix.id);
- do_unregister_framebuffer(registered_fb[i]);
+ ret = do_unregister_framebuffer(registered_fb[i]);
+ if (ret)
+ return ret;
}
}
+
+ return 0;
}
static int do_register_framebuffer(struct fb_info *fb_info)
{
- int i;
+ int i, ret;
struct fb_event event;
struct fb_videomode mode;
if (fb_check_foreignness(fb_info))
return -ENOSYS;
- do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id,
- fb_is_primary_device(fb_info));
+ ret = do_remove_conflicting_framebuffers(fb_info->apertures,
+ fb_info->fix.id,
+ fb_is_primary_device(fb_info));
+ if (ret)
+ return ret;
if (num_registered_fb == FB_MAX)
return -ENXIO;
@@ -1739,12 +1746,16 @@ int unlink_framebuffer(struct fb_info *fb_info)
}
EXPORT_SYMBOL(unlink_framebuffer);
-void remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary)
+int remove_conflicting_framebuffers(struct apertures_struct *a,
+ const char *name, bool primary)
{
+ int ret;
+
mutex_lock(&registration_lock);
- do_remove_conflicting_framebuffers(a, name, primary);
+ ret = do_remove_conflicting_framebuffers(a, name, primary);
mutex_unlock(&registration_lock);
+
+ return ret;
}
EXPORT_SYMBOL(remove_conflicting_framebuffers);
@@ -1930,6 +1941,9 @@ int fb_get_options(const char *name, char **option)
options = opt + name_len + 1;
}
}
+ /* No match, pass global option */
+ if (!options && option && fb_mode_option)
+ options = kstrdup(fb_mode_option, GFP_KERNEL);
if (options && !strncmp(options, "off", 3))
retval = 1;
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 038192ac7369..bb674e431741 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -2011,9 +2011,7 @@ static int i810fb_init_pci(struct pci_dev *dev,
struct fb_info *info;
struct i810fb_par *par = NULL;
struct fb_videomode mode;
- int i, err = -1, vfreq, hfreq, pixclock;
-
- i = 0;
+ int err = -1, vfreq, hfreq, pixclock;
info = framebuffer_alloc(sizeof(struct i810fb_par), &dev->dev);
if (!info)
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
index 080c35b34bbb..b670cbda38e3 100644
--- a/drivers/video/logo/logo.c
+++ b/drivers/video/logo/logo.c
@@ -17,10 +17,6 @@
#include <asm/setup.h>
#endif
-#ifdef CONFIG_MIPS
-#include <asm/bootinfo.h>
-#endif
-
static bool nologo;
module_param(nologo, bool, 0);
MODULE_PARM_DESC(nologo, "Disables startup logo");
diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c
index 5bd2eb8d4f39..cda7587cbc86 100644
--- a/drivers/video/macfb.c
+++ b/drivers/video/macfb.c
@@ -34,7 +34,6 @@
#include <linux/fb.h>
#include <asm/setup.h>
-#include <asm/bootinfo.h>
#include <asm/macintosh.h>
#include <asm/io.h>
diff --git a/drivers/video/mmp/core.c b/drivers/video/mmp/core.c
index 84de2632857a..b563b920f159 100644
--- a/drivers/video/mmp/core.c
+++ b/drivers/video/mmp/core.c
@@ -30,7 +30,7 @@ static struct mmp_overlay *path_get_overlay(struct mmp_path *path,
{
if (path && overlay_id < path->overlay_num)
return &path->overlays[overlay_id];
- return 0;
+ return NULL;
}
static int path_check_status(struct mmp_path *path)
@@ -173,7 +173,7 @@ struct mmp_path *mmp_register_path(struct mmp_path_info *info)
+ sizeof(struct mmp_overlay) * info->overlay_num;
path = kzalloc(size, GFP_KERNEL);
if (!path)
- goto failed;
+ return NULL;
/* path set */
mutex_init(&path->access_ok);
@@ -219,11 +219,6 @@ struct mmp_path *mmp_register_path(struct mmp_path_info *info)
mutex_unlock(&disp_lock);
return path;
-
-failed:
- kfree(path);
- mutex_unlock(&disp_lock);
- return NULL;
}
EXPORT_SYMBOL_GPL(mmp_register_path);
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index 804f874d32d3..142e860fb527 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -1263,7 +1263,7 @@ static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len,
fbi->screen_base = dma_alloc_writecombine(fbi->device,
mem_len,
- &addr, GFP_DMA);
+ &addr, GFP_DMA | GFP_KERNEL);
if (!fbi->screen_base) {
dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n",
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 27197a8048c0..accf48a2cce4 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -49,6 +49,7 @@
#include <linux/fb.h>
#include <linux/regulator/consumer.h>
#include <video/of_display_timing.h>
+#include <video/of_videomode.h>
#include <video/videomode.h>
#define REG_SET 4
@@ -297,7 +298,7 @@ static int mxsfb_check_var(struct fb_var_screeninfo *var,
}
break;
default:
- pr_debug("Unsupported colour depth: %u\n", var->bits_per_pixel);
+ pr_err("Unsupported colour depth: %u\n", var->bits_per_pixel);
return -EINVAL;
}
@@ -426,7 +427,7 @@ static int mxsfb_set_par(struct fb_info *fb_info)
ctrl |= CTRL_SET_WORD_LENGTH(3);
switch (host->ld_intf_width) {
case STMLCDIF_8BIT:
- dev_dbg(&host->pdev->dev,
+ dev_err(&host->pdev->dev,
"Unsupported LCD bus width mapping\n");
return -EINVAL;
case STMLCDIF_16BIT:
@@ -439,7 +440,7 @@ static int mxsfb_set_par(struct fb_info *fb_info)
writel(CTRL1_SET_BYTE_PACKAGING(0x7), host->base + LCDC_CTRL1);
break;
default:
- dev_dbg(&host->pdev->dev, "Unhandled color depth of %u\n",
+ dev_err(&host->pdev->dev, "Unhandled color depth of %u\n",
fb_info->var.bits_per_pixel);
return -EINVAL;
}
@@ -589,7 +590,8 @@ static struct fb_ops mxsfb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static int mxsfb_restore_mode(struct mxsfb_info *host)
+static int mxsfb_restore_mode(struct mxsfb_info *host,
+ struct fb_videomode *vmode)
{
struct fb_info *fb_info = &host->fb_info;
unsigned line_count;
@@ -597,7 +599,6 @@ static int mxsfb_restore_mode(struct mxsfb_info *host)
unsigned long pa, fbsize;
int bits_per_pixel, ofs;
u32 transfer_count, vdctrl0, vdctrl2, vdctrl3, vdctrl4, ctrl;
- struct fb_videomode vmode;
/* Only restore the mode when the controller is running */
ctrl = readl(host->base + LCDC_CTRL);
@@ -611,8 +612,8 @@ static int mxsfb_restore_mode(struct mxsfb_info *host)
transfer_count = readl(host->base + host->devdata->transfer_count);
- vmode.xres = TRANSFER_COUNT_GET_HCOUNT(transfer_count);
- vmode.yres = TRANSFER_COUNT_GET_VCOUNT(transfer_count);
+ vmode->xres = TRANSFER_COUNT_GET_HCOUNT(transfer_count);
+ vmode->yres = TRANSFER_COUNT_GET_VCOUNT(transfer_count);
switch (CTRL_GET_WORD_LENGTH(ctrl)) {
case 0:
@@ -628,40 +629,39 @@ static int mxsfb_restore_mode(struct mxsfb_info *host)
fb_info->var.bits_per_pixel = bits_per_pixel;
- vmode.pixclock = KHZ2PICOS(clk_get_rate(host->clk) / 1000U);
- vmode.hsync_len = get_hsync_pulse_width(host, vdctrl2);
- vmode.left_margin = GET_HOR_WAIT_CNT(vdctrl3) - vmode.hsync_len;
- vmode.right_margin = VDCTRL2_GET_HSYNC_PERIOD(vdctrl2) - vmode.hsync_len -
- vmode.left_margin - vmode.xres;
- vmode.vsync_len = VDCTRL0_GET_VSYNC_PULSE_WIDTH(vdctrl0);
+ vmode->pixclock = KHZ2PICOS(clk_get_rate(host->clk) / 1000U);
+ vmode->hsync_len = get_hsync_pulse_width(host, vdctrl2);
+ vmode->left_margin = GET_HOR_WAIT_CNT(vdctrl3) - vmode->hsync_len;
+ vmode->right_margin = VDCTRL2_GET_HSYNC_PERIOD(vdctrl2) -
+ vmode->hsync_len - vmode->left_margin - vmode->xres;
+ vmode->vsync_len = VDCTRL0_GET_VSYNC_PULSE_WIDTH(vdctrl0);
period = readl(host->base + LCDC_VDCTRL1);
- vmode.upper_margin = GET_VERT_WAIT_CNT(vdctrl3) - vmode.vsync_len;
- vmode.lower_margin = period - vmode.vsync_len - vmode.upper_margin - vmode.yres;
+ vmode->upper_margin = GET_VERT_WAIT_CNT(vdctrl3) - vmode->vsync_len;
+ vmode->lower_margin = period - vmode->vsync_len -
+ vmode->upper_margin - vmode->yres;
- vmode.vmode = FB_VMODE_NONINTERLACED;
+ vmode->vmode = FB_VMODE_NONINTERLACED;
- vmode.sync = 0;
+ vmode->sync = 0;
if (vdctrl0 & VDCTRL0_HSYNC_ACT_HIGH)
- vmode.sync |= FB_SYNC_HOR_HIGH_ACT;
+ vmode->sync |= FB_SYNC_HOR_HIGH_ACT;
if (vdctrl0 & VDCTRL0_VSYNC_ACT_HIGH)
- vmode.sync |= FB_SYNC_VERT_HIGH_ACT;
+ vmode->sync |= FB_SYNC_VERT_HIGH_ACT;
pr_debug("Reconstructed video mode:\n");
pr_debug("%dx%d, hsync: %u left: %u, right: %u, vsync: %u, upper: %u, lower: %u\n",
- vmode.xres, vmode.yres,
- vmode.hsync_len, vmode.left_margin, vmode.right_margin,
- vmode.vsync_len, vmode.upper_margin, vmode.lower_margin);
- pr_debug("pixclk: %ldkHz\n", PICOS2KHZ(vmode.pixclock));
-
- fb_add_videomode(&vmode, &fb_info->modelist);
+ vmode->xres, vmode->yres, vmode->hsync_len, vmode->left_margin,
+ vmode->right_margin, vmode->vsync_len, vmode->upper_margin,
+ vmode->lower_margin);
+ pr_debug("pixclk: %ldkHz\n", PICOS2KHZ(vmode->pixclock));
host->ld_intf_width = CTRL_GET_BUS_WIDTH(ctrl);
host->dotclk_delay = VDCTRL4_GET_DOTCLK_DLY(vdctrl4);
- fb_info->fix.line_length = vmode.xres * (bits_per_pixel >> 3);
+ fb_info->fix.line_length = vmode->xres * (bits_per_pixel >> 3);
pa = readl(host->base + host->devdata->cur_buf);
- fbsize = fb_info->fix.line_length * vmode.yres;
+ fbsize = fb_info->fix.line_length * vmode->yres;
if (pa < fb_info->fix.smem_start)
return -EINVAL;
if (pa + fbsize > fb_info->fix.smem_start + fb_info->fix.smem_len)
@@ -681,18 +681,17 @@ static int mxsfb_restore_mode(struct mxsfb_info *host)
return 0;
}
-static int mxsfb_init_fbinfo_dt(struct mxsfb_info *host)
+static int mxsfb_init_fbinfo_dt(struct mxsfb_info *host,
+ struct fb_videomode *vmode)
{
struct fb_info *fb_info = &host->fb_info;
struct fb_var_screeninfo *var = &fb_info->var;
struct device *dev = &host->pdev->dev;
struct device_node *np = host->pdev->dev.of_node;
struct device_node *display_np;
- struct device_node *timings_np;
- struct display_timings *timings;
+ struct videomode vm;
u32 width;
- int i;
- int ret = 0;
+ int ret;
display_np = of_parse_phandle(np, "display", 0);
if (!display_np) {
@@ -732,54 +731,35 @@ static int mxsfb_init_fbinfo_dt(struct mxsfb_info *host)
goto put_display_node;
}
- timings = of_get_display_timings(display_np);
- if (!timings) {
- dev_err(dev, "failed to get display timings\n");
- ret = -ENOENT;
+ ret = of_get_videomode(display_np, &vm, OF_USE_NATIVE_MODE);
+ if (ret) {
+ dev_err(dev, "failed to get videomode from DT\n");
goto put_display_node;
}
- timings_np = of_find_node_by_name(display_np,
- "display-timings");
- if (!timings_np) {
- dev_err(dev, "failed to find display-timings node\n");
- ret = -ENOENT;
+ ret = fb_videomode_from_videomode(&vm, vmode);
+ if (ret < 0)
goto put_display_node;
- }
- for (i = 0; i < of_get_child_count(timings_np); i++) {
- struct videomode vm;
- struct fb_videomode fb_vm;
-
- ret = videomode_from_timings(timings, &vm, i);
- if (ret < 0)
- goto put_timings_node;
- ret = fb_videomode_from_videomode(&vm, &fb_vm);
- if (ret < 0)
- goto put_timings_node;
-
- if (vm.flags & DISPLAY_FLAGS_DE_HIGH)
- host->sync |= MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
- if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
- host->sync |= MXSFB_SYNC_DOTCLK_FALLING_ACT;
- fb_add_videomode(&fb_vm, &fb_info->modelist);
- }
+ if (vm.flags & DISPLAY_FLAGS_DE_HIGH)
+ host->sync |= MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
+ if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+ host->sync |= MXSFB_SYNC_DOTCLK_FALLING_ACT;
-put_timings_node:
- of_node_put(timings_np);
put_display_node:
of_node_put(display_np);
return ret;
}
-static int mxsfb_init_fbinfo(struct mxsfb_info *host)
+static int mxsfb_init_fbinfo(struct mxsfb_info *host,
+ struct fb_videomode *vmode)
{
+ int ret;
struct fb_info *fb_info = &host->fb_info;
struct fb_var_screeninfo *var = &fb_info->var;
dma_addr_t fb_phys;
void *fb_virt;
unsigned fb_size;
- int ret;
fb_info->fbops = &mxsfb_ops;
fb_info->flags = FBINFO_FLAG_DEFAULT | FBINFO_READS_FAST;
@@ -789,7 +769,7 @@ static int mxsfb_init_fbinfo(struct mxsfb_info *host)
fb_info->fix.visual = FB_VISUAL_TRUECOLOR,
fb_info->fix.accel = FB_ACCEL_NONE;
- ret = mxsfb_init_fbinfo_dt(host);
+ ret = mxsfb_init_fbinfo_dt(host, vmode);
if (ret)
return ret;
@@ -810,7 +790,7 @@ static int mxsfb_init_fbinfo(struct mxsfb_info *host)
fb_info->screen_base = fb_virt;
fb_info->screen_size = fb_info->fix.smem_len = fb_size;
- if (mxsfb_restore_mode(host))
+ if (mxsfb_restore_mode(host, vmode))
memset(fb_virt, 0, fb_size);
return 0;
@@ -850,7 +830,7 @@ static int mxsfb_probe(struct platform_device *pdev)
struct resource *res;
struct mxsfb_info *host;
struct fb_info *fb_info;
- struct fb_modelist *modelist;
+ struct fb_videomode *mode;
int ret;
if (of_id)
@@ -862,6 +842,11 @@ static int mxsfb_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ mode = devm_kzalloc(&pdev->dev, sizeof(struct fb_videomode),
+ GFP_KERNEL);
+ if (mode == NULL)
+ return -ENOMEM;
+
host = to_imxfb_host(fb_info);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -893,15 +878,11 @@ static int mxsfb_probe(struct platform_device *pdev)
goto fb_release;
}
- INIT_LIST_HEAD(&fb_info->modelist);
-
- ret = mxsfb_init_fbinfo(host);
+ ret = mxsfb_init_fbinfo(host, mode);
if (ret != 0)
goto fb_release;
- modelist = list_first_entry(&fb_info->modelist,
- struct fb_modelist, list);
- fb_videomode_to_var(&fb_info->var, &modelist->mode);
+ fb_videomode_to_var(&fb_info->var, mode);
/* init the color fields */
mxsfb_check_var(&fb_info->var, fb_info);
@@ -927,7 +908,6 @@ static int mxsfb_probe(struct platform_device *pdev)
fb_destroy:
if (host->enabled)
clk_disable_unprepare(host->clk);
- fb_destroy_modelist(&fb_info->modelist);
fb_release:
framebuffer_release(fb_info);
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index ff228713425e..def041204676 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -1461,7 +1461,6 @@ static void nvidiafb_remove(struct pci_dev *pd)
pci_release_regions(pd);
kfree(info->pixmap.addr);
framebuffer_release(info);
- pci_set_drvdata(pd, NULL);
NVTRACE_LEAVE();
}
diff --git a/drivers/video/ocfb.c b/drivers/video/ocfb.c
new file mode 100644
index 000000000000..7f9dc9bec309
--- /dev/null
+++ b/drivers/video/ocfb.c
@@ -0,0 +1,440 @@
+/*
+ * OpenCores VGA/LCD 2.0 core frame buffer driver
+ *
+ * Copyright (C) 2013 Stefan Kristiansson, stefan.kristiansson@saunalahti.fi
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+/* OCFB register defines */
+#define OCFB_CTRL 0x000
+#define OCFB_STAT 0x004
+#define OCFB_HTIM 0x008
+#define OCFB_VTIM 0x00c
+#define OCFB_HVLEN 0x010
+#define OCFB_VBARA 0x014
+#define OCFB_PALETTE 0x800
+
+#define OCFB_CTRL_VEN 0x00000001 /* Video Enable */
+#define OCFB_CTRL_HIE 0x00000002 /* HSync Interrupt Enable */
+#define OCFB_CTRL_PC 0x00000800 /* 8-bit Pseudo Color Enable*/
+#define OCFB_CTRL_CD8 0x00000000 /* Color Depth 8 */
+#define OCFB_CTRL_CD16 0x00000200 /* Color Depth 16 */
+#define OCFB_CTRL_CD24 0x00000400 /* Color Depth 24 */
+#define OCFB_CTRL_CD32 0x00000600 /* Color Depth 32 */
+#define OCFB_CTRL_VBL1 0x00000000 /* Burst Length 1 */
+#define OCFB_CTRL_VBL2 0x00000080 /* Burst Length 2 */
+#define OCFB_CTRL_VBL4 0x00000100 /* Burst Length 4 */
+#define OCFB_CTRL_VBL8 0x00000180 /* Burst Length 8 */
+
+#define PALETTE_SIZE 256
+
+#define OCFB_NAME "OC VGA/LCD"
+
+static char *mode_option;
+
+static const struct fb_videomode default_mode = {
+ /* 640x480 @ 60 Hz, 31.5 kHz hsync */
+ NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2,
+ 0, FB_VMODE_NONINTERLACED
+};
+
+struct ocfb_dev {
+ struct fb_info info;
+ void __iomem *regs;
+ /* flag indicating whether the regs are little endian accessed */
+ int little_endian;
+ /* Physical and virtual addresses of framebuffer */
+ phys_addr_t fb_phys;
+ void __iomem *fb_virt;
+ u32 pseudo_palette[PALETTE_SIZE];
+};
+
+#ifndef MODULE
+static int __init ocfb_setup(char *options)
+{
+ char *curr_opt;
+
+ if (!options || !*options)
+ return 0;
+
+ while ((curr_opt = strsep(&options, ",")) != NULL) {
+ if (!*curr_opt)
+ continue;
+ mode_option = curr_opt;
+ }
+
+ return 0;
+}
+#endif
+
+static inline u32 ocfb_readreg(struct ocfb_dev *fbdev, loff_t offset)
+{
+ if (fbdev->little_endian)
+ return ioread32(fbdev->regs + offset);
+ else
+ return ioread32be(fbdev->regs + offset);
+}
+
+static void ocfb_writereg(struct ocfb_dev *fbdev, loff_t offset, u32 data)
+{
+ if (fbdev->little_endian)
+ iowrite32(data, fbdev->regs + offset);
+ else
+ iowrite32be(data, fbdev->regs + offset);
+}
+
+static int ocfb_setupfb(struct ocfb_dev *fbdev)
+{
+ unsigned long bpp_config;
+ struct fb_var_screeninfo *var = &fbdev->info.var;
+ struct device *dev = fbdev->info.device;
+ u32 hlen;
+ u32 vlen;
+
+ /* Disable display */
+ ocfb_writereg(fbdev, OCFB_CTRL, 0);
+
+ /* Register framebuffer address */
+ fbdev->little_endian = 0;
+ ocfb_writereg(fbdev, OCFB_VBARA, fbdev->fb_phys);
+
+ /* Detect endianess */
+ if (ocfb_readreg(fbdev, OCFB_VBARA) != fbdev->fb_phys) {
+ fbdev->little_endian = 1;
+ ocfb_writereg(fbdev, OCFB_VBARA, fbdev->fb_phys);
+ }
+
+ /* Horizontal timings */
+ ocfb_writereg(fbdev, OCFB_HTIM, (var->hsync_len - 1) << 24 |
+ (var->right_margin - 1) << 16 | (var->xres - 1));
+
+ /* Vertical timings */
+ ocfb_writereg(fbdev, OCFB_VTIM, (var->vsync_len - 1) << 24 |
+ (var->lower_margin - 1) << 16 | (var->yres - 1));
+
+ /* Total length of frame */
+ hlen = var->left_margin + var->right_margin + var->hsync_len +
+ var->xres;
+
+ vlen = var->upper_margin + var->lower_margin + var->vsync_len +
+ var->yres;
+
+ ocfb_writereg(fbdev, OCFB_HVLEN, (hlen - 1) << 16 | (vlen - 1));
+
+ bpp_config = OCFB_CTRL_CD8;
+ switch (var->bits_per_pixel) {
+ case 8:
+ if (!var->grayscale)
+ bpp_config |= OCFB_CTRL_PC; /* enable palette */
+ break;
+
+ case 16:
+ bpp_config |= OCFB_CTRL_CD16;
+ break;
+
+ case 24:
+ bpp_config |= OCFB_CTRL_CD24;
+ break;
+
+ case 32:
+ bpp_config |= OCFB_CTRL_CD32;
+ break;
+
+ default:
+ dev_err(dev, "no bpp specified\n");
+ break;
+ }
+
+ /* maximum (8) VBL (video memory burst length) */
+ bpp_config |= OCFB_CTRL_VBL8;
+
+ /* Enable output */
+ ocfb_writereg(fbdev, OCFB_CTRL, (OCFB_CTRL_VEN | bpp_config));
+
+ return 0;
+}
+
+static int ocfb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp,
+ struct fb_info *info)
+{
+ struct ocfb_dev *fbdev = (struct ocfb_dev *)info->par;
+ u32 color;
+
+ if (regno >= info->cmap.len) {
+ dev_err(info->device, "regno >= cmap.len\n");
+ return 1;
+ }
+
+ if (info->var.grayscale) {
+ /* grayscale = 0.30*R + 0.59*G + 0.11*B */
+ red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
+ }
+
+ red >>= (16 - info->var.red.length);
+ green >>= (16 - info->var.green.length);
+ blue >>= (16 - info->var.blue.length);
+ transp >>= (16 - info->var.transp.length);
+
+ if (info->var.bits_per_pixel == 8 && !info->var.grayscale) {
+ regno <<= 2;
+ color = (red << 16) | (green << 8) | blue;
+ ocfb_writereg(fbdev, OCFB_PALETTE + regno, color);
+ } else {
+ ((u32 *)(info->pseudo_palette))[regno] =
+ (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset) |
+ (transp << info->var.transp.offset);
+ }
+
+ return 0;
+}
+
+static int ocfb_init_fix(struct ocfb_dev *fbdev)
+{
+ struct fb_var_screeninfo *var = &fbdev->info.var;
+ struct fb_fix_screeninfo *fix = &fbdev->info.fix;
+
+ strcpy(fix->id, OCFB_NAME);
+
+ fix->line_length = var->xres * var->bits_per_pixel/8;
+ fix->smem_len = fix->line_length * var->yres;
+ fix->type = FB_TYPE_PACKED_PIXELS;
+
+ if (var->bits_per_pixel == 8 && !var->grayscale)
+ fix->visual = FB_VISUAL_PSEUDOCOLOR;
+ else
+ fix->visual = FB_VISUAL_TRUECOLOR;
+
+ return 0;
+}
+
+static int ocfb_init_var(struct ocfb_dev *fbdev)
+{
+ struct fb_var_screeninfo *var = &fbdev->info.var;
+
+ var->accel_flags = FB_ACCEL_NONE;
+ var->activate = FB_ACTIVATE_NOW;
+ var->xres_virtual = var->xres;
+ var->yres_virtual = var->yres;
+
+ switch (var->bits_per_pixel) {
+ case 8:
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ var->red.offset = 0;
+ var->red.length = 8;
+ var->green.offset = 0;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ break;
+
+ case 16:
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ break;
+
+ case 24:
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ break;
+
+ case 32:
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ break;
+ }
+
+ return 0;
+}
+
+static struct fb_ops ocfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_setcolreg = ocfb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+};
+
+static int ocfb_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct ocfb_dev *fbdev;
+ struct resource *res;
+ int fbsize;
+
+ fbdev = devm_kzalloc(&pdev->dev, sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, fbdev);
+
+ fbdev->info.fbops = &ocfb_ops;
+ fbdev->info.device = &pdev->dev;
+ fbdev->info.par = fbdev;
+
+ /* Video mode setup */
+ if (!fb_find_mode(&fbdev->info.var, &fbdev->info, mode_option,
+ NULL, 0, &default_mode, 16)) {
+ dev_err(&pdev->dev, "No valid video modes found\n");
+ return -EINVAL;
+ }
+ ocfb_init_var(fbdev);
+ ocfb_init_fix(fbdev);
+
+ /* Request I/O resource */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "I/O resource request failed\n");
+ return -ENXIO;
+ }
+ res->flags &= ~IORESOURCE_CACHEABLE;
+ fbdev->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fbdev->regs))
+ return PTR_ERR(fbdev->regs);
+
+ /* Allocate framebuffer memory */
+ fbsize = fbdev->info.fix.smem_len;
+ fbdev->fb_virt = dma_alloc_coherent(&pdev->dev, PAGE_ALIGN(fbsize),
+ &fbdev->fb_phys, GFP_KERNEL);
+ if (!fbdev->fb_virt) {
+ dev_err(&pdev->dev,
+ "Frame buffer memory allocation failed\n");
+ return -ENOMEM;
+ }
+ fbdev->info.fix.smem_start = fbdev->fb_phys;
+ fbdev->info.screen_base = fbdev->fb_virt;
+ fbdev->info.pseudo_palette = fbdev->pseudo_palette;
+
+ /* Clear framebuffer */
+ memset_io(fbdev->fb_virt, 0, fbsize);
+
+ /* Setup and enable the framebuffer */
+ ocfb_setupfb(fbdev);
+
+ if (fbdev->little_endian)
+ fbdev->info.flags |= FBINFO_FOREIGN_ENDIAN;
+
+ /* Allocate color map */
+ ret = fb_alloc_cmap(&fbdev->info.cmap, PALETTE_SIZE, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "Color map allocation failed\n");
+ goto err_dma_free;
+ }
+
+ /* Register framebuffer */
+ ret = register_framebuffer(&fbdev->info);
+ if (ret) {
+ dev_err(&pdev->dev, "Framebuffer registration failed\n");
+ goto err_dealloc_cmap;
+ }
+
+ return 0;
+
+err_dealloc_cmap:
+ fb_dealloc_cmap(&fbdev->info.cmap);
+
+err_dma_free:
+ dma_free_coherent(&pdev->dev, PAGE_ALIGN(fbsize), fbdev->fb_virt,
+ fbdev->fb_phys);
+
+ return ret;
+}
+
+static int ocfb_remove(struct platform_device *pdev)
+{
+ struct ocfb_dev *fbdev = platform_get_drvdata(pdev);
+
+ unregister_framebuffer(&fbdev->info);
+ fb_dealloc_cmap(&fbdev->info.cmap);
+ dma_free_coherent(&pdev->dev, PAGE_ALIGN(fbdev->info.fix.smem_len),
+ fbdev->fb_virt, fbdev->fb_phys);
+
+ /* Disable display */
+ ocfb_writereg(fbdev, OCFB_CTRL, 0);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct of_device_id ocfb_match[] = {
+ { .compatible = "opencores,ocfb", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ocfb_match);
+
+static struct platform_driver ocfb_driver = {
+ .probe = ocfb_probe,
+ .remove = ocfb_remove,
+ .driver = {
+ .name = "ocfb_fb",
+ .of_match_table = ocfb_match,
+ }
+};
+
+/*
+ * Init and exit routines
+ */
+static int __init ocfb_init(void)
+{
+#ifndef MODULE
+ char *option = NULL;
+
+ if (fb_get_options("ocfb", &option))
+ return -ENODEV;
+ ocfb_setup(option);
+#endif
+ return platform_driver_register(&ocfb_driver);
+}
+
+static void __exit ocfb_exit(void)
+{
+ platform_driver_unregister(&ocfb_driver);
+}
+
+module_init(ocfb_init);
+module_exit(ocfb_exit);
+
+MODULE_AUTHOR("Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>");
+MODULE_DESCRIPTION("OpenCores VGA/LCD 2.0 frame buffer driver");
+MODULE_LICENSE("GPL v2");
+module_param(mode_option, charp, 0);
+MODULE_PARM_DESC(mode_option, "Video mode ('<xres>x<yres>[-<bpp>][@refresh]')");
diff --git a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
index d94f35dbd536..8e97d06921ff 100644
--- a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
+++ b/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
@@ -346,28 +346,22 @@ static int acx565akm_get_actual_brightness(struct panel_drv_data *ddata)
static int acx565akm_bl_update_status(struct backlight_device *dev)
{
struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
- int r;
int level;
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
- mutex_lock(&ddata->mutex);
-
if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
dev->props.power == FB_BLANK_UNBLANK)
level = dev->props.brightness;
else
level = 0;
- r = 0;
if (ddata->has_bc)
acx565akm_set_brightness(ddata, level);
else
- r = -ENODEV;
-
- mutex_unlock(&ddata->mutex);
+ return -ENODEV;
- return r;
+ return 0;
}
static int acx565akm_bl_get_intensity(struct backlight_device *dev)
@@ -390,9 +384,33 @@ static int acx565akm_bl_get_intensity(struct backlight_device *dev)
return 0;
}
+static int acx565akm_bl_update_status_locked(struct backlight_device *dev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
+ int r;
+
+ mutex_lock(&ddata->mutex);
+ r = acx565akm_bl_update_status(dev);
+ mutex_unlock(&ddata->mutex);
+
+ return r;
+}
+
+static int acx565akm_bl_get_intensity_locked(struct backlight_device *dev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
+ int r;
+
+ mutex_lock(&ddata->mutex);
+ r = acx565akm_bl_get_intensity(dev);
+ mutex_unlock(&ddata->mutex);
+
+ return r;
+}
+
static const struct backlight_ops acx565akm_bl_ops = {
- .get_brightness = acx565akm_bl_get_intensity,
- .update_status = acx565akm_bl_update_status,
+ .get_brightness = acx565akm_bl_get_intensity_locked,
+ .update_status = acx565akm_bl_update_status_locked,
};
/*--------------------Auto Brightness control via Sysfs---------------------*/
@@ -526,8 +544,6 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
struct omap_dss_device *in = ddata->in;
int r;
- mutex_lock(&ddata->mutex);
-
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
in->ops.sdi->set_timings(in, &ddata->videomode);
@@ -568,8 +584,6 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
set_display_state(ddata, 1);
set_cabc_mode(ddata, ddata->cabc_mode);
- mutex_unlock(&ddata->mutex);
-
return acx565akm_bl_update_status(ddata->bl_dev);
}
@@ -616,7 +630,9 @@ static int acx565akm_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
+ mutex_lock(&ddata->mutex);
r = acx565akm_panel_power_on(dssdev);
+ mutex_unlock(&ddata->mutex);
if (r)
return r;
diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c
index 60758dbefd79..0a0b084ce65d 100644
--- a/drivers/video/omap2/dss/apply.c
+++ b/drivers/video/omap2/dss/apply.c
@@ -149,6 +149,9 @@ static void apply_init_priv(void)
op = &dss_data.ovl_priv_data_array[i];
+ op->info.color_mode = OMAP_DSS_COLOR_RGB16;
+ op->info.rotation_type = OMAP_DSS_ROT_DMA;
+
op->info.global_alpha = 255;
switch (i) {
@@ -629,7 +632,7 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
struct mgr_priv_data *mp;
int r;
- DSSDBG("writing ovl %d regs", ovl->id);
+ DSSDBG("writing ovl %d regs\n", ovl->id);
if (!op->enabled || !op->info_dirty)
return;
@@ -664,7 +667,7 @@ static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
struct ovl_priv_data *op = get_ovl_priv(ovl);
struct mgr_priv_data *mp;
- DSSDBG("writing ovl %d regs extra", ovl->id);
+ DSSDBG("writing ovl %d regs extra\n", ovl->id);
if (!op->extra_info_dirty)
return;
@@ -687,7 +690,7 @@ static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
struct mgr_priv_data *mp = get_mgr_priv(mgr);
struct omap_overlay *ovl;
- DSSDBG("writing mgr %d regs", mgr->id);
+ DSSDBG("writing mgr %d regs\n", mgr->id);
if (!mp->enabled)
return;
@@ -713,7 +716,7 @@ static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
{
struct mgr_priv_data *mp = get_mgr_priv(mgr);
- DSSDBG("writing mgr %d regs extra", mgr->id);
+ DSSDBG("writing mgr %d regs extra\n", mgr->id);
if (!mp->extra_info_dirty)
return;
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 4ec59ca72e5d..77d6221618f4 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -90,6 +90,8 @@ struct dispc_features {
/* revert to the OMAP4 mechanism of DISPC Smart Standby operation */
bool mstandby_workaround:1;
+
+ bool set_max_preload:1;
};
#define DISPC_MAX_NR_FIFOS 5
@@ -1200,7 +1202,17 @@ void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high)
dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane),
FLD_VAL(high, hi_start, hi_end) |
FLD_VAL(low, lo_start, lo_end));
+
+ /*
+ * configure the preload to the pipeline's high threhold, if HT it's too
+ * large for the preload field, set the threshold to the maximum value
+ * that can be held by the preload register
+ */
+ if (dss_has_feature(FEAT_PRELOAD) && dispc.feat->set_max_preload &&
+ plane != OMAP_DSS_WB)
+ dispc_write_reg(DISPC_OVL_PRELOAD(plane), min(high, 0xfffu));
}
+EXPORT_SYMBOL(dispc_ovl_set_fifo_threshold);
void dispc_enable_fifomerge(bool enable)
{
@@ -1259,6 +1271,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
*fifo_high = total_fifo_size - buf_unit;
}
}
+EXPORT_SYMBOL(dispc_ovl_compute_fifo_thresholds);
static void dispc_ovl_set_fir(enum omap_plane plane,
int hinc, int vinc,
@@ -1988,7 +2001,8 @@ static void calc_tiler_rotation_offset(u16 screen_width, u16 width,
*/
static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
const struct omap_video_timings *t, u16 pos_x,
- u16 width, u16 height, u16 out_width, u16 out_height)
+ u16 width, u16 height, u16 out_width, u16 out_height,
+ bool five_taps)
{
const int ds = DIV_ROUND_UP(height, out_height);
unsigned long nonactive;
@@ -2008,6 +2022,10 @@ static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
if (blank <= limits[i])
return -EINVAL;
+ /* FIXME add checks for 3-tap filter once the limitations are known */
+ if (!five_taps)
+ return 0;
+
/*
* Pixel data should be prepared before visible display point starts.
* So, atleast DS-2 lines must have already been fetched by DISPC
@@ -2142,8 +2160,8 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
*five_taps = false;
do {
- in_height = DIV_ROUND_UP(height, *decim_y);
- in_width = DIV_ROUND_UP(width, *decim_x);
+ in_height = height / *decim_y;
+ in_width = width / *decim_x;
*core_clk = dispc.feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height, mem_to_mem);
error = (in_width > maxsinglelinewidth || !*core_clk ||
@@ -2181,24 +2199,32 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
do {
- in_height = DIV_ROUND_UP(height, *decim_y);
- in_width = DIV_ROUND_UP(width, *decim_x);
- *core_clk = calc_core_clk_five_taps(pclk, mgr_timings,
- in_width, in_height, out_width, out_height, color_mode);
-
- error = check_horiz_timing_omap3(pclk, lclk, mgr_timings,
- pos_x, in_width, in_height, out_width,
- out_height);
+ in_height = height / *decim_y;
+ in_width = width / *decim_x;
+ *five_taps = in_height > out_height;
if (in_width > maxsinglelinewidth)
if (in_height > out_height &&
in_height < out_height * 2)
*five_taps = false;
- if (!*five_taps)
+again:
+ if (*five_taps)
+ *core_clk = calc_core_clk_five_taps(pclk, mgr_timings,
+ in_width, in_height, out_width,
+ out_height, color_mode);
+ else
*core_clk = dispc.feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height,
mem_to_mem);
+ error = check_horiz_timing_omap3(pclk, lclk, mgr_timings,
+ pos_x, in_width, in_height, out_width,
+ out_height, *five_taps);
+ if (error && *five_taps) {
+ *five_taps = false;
+ goto again;
+ }
+
error = (error || in_width > maxsinglelinewidth * 2 ||
(in_width > maxsinglelinewidth && *five_taps) ||
!*core_clk || *core_clk > dispc_core_clk_rate());
@@ -2215,7 +2241,7 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
} while (*decim_x <= *x_predecim && *decim_y <= *y_predecim && error);
if (check_horiz_timing_omap3(pclk, lclk, mgr_timings, pos_x, width,
- height, out_width, out_height)){
+ height, out_width, out_height, *five_taps)) {
DSSERR("horizontal timing too tight\n");
return -EINVAL;
}
@@ -2242,7 +2268,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
{
u16 in_width, in_width_max;
int decim_x_min = *decim_x;
- u16 in_height = DIV_ROUND_UP(height, *decim_y);
+ u16 in_height = height / *decim_y;
const int maxsinglelinewidth =
dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
@@ -2261,7 +2287,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
return -EINVAL;
do {
- in_width = DIV_ROUND_UP(width, *decim_x);
+ in_width = width / *decim_x;
} while (*decim_x <= *x_predecim &&
in_width > maxsinglelinewidth && ++*decim_x);
@@ -2440,8 +2466,8 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
if (r)
return r;
- in_width = DIV_ROUND_UP(in_width, x_predecim);
- in_height = DIV_ROUND_UP(in_height, y_predecim);
+ in_width = in_width / x_predecim;
+ in_height = in_height / y_predecim;
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY ||
@@ -3211,6 +3237,8 @@ static void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_CONTROL3);
DUMPREG(DISPC_CONFIG3);
}
+ if (dss_has_feature(FEAT_MFLAG))
+ DUMPREG(DISPC_GLOBAL_MFLAG_ATTRIBUTE);
#undef DUMPREG
@@ -3285,6 +3313,8 @@ static void dispc_dump_regs(struct seq_file *s)
DUMPREG(i, DISPC_OVL_ATTRIBUTES2);
if (dss_has_feature(FEAT_PRELOAD))
DUMPREG(i, DISPC_OVL_PRELOAD);
+ if (dss_has_feature(FEAT_MFLAG))
+ DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD);
}
#undef DISPC_REG
@@ -3520,6 +3550,7 @@ static const struct dispc_features omap24xx_dispc_feats __initconst = {
.calc_core_clk = calc_core_clk_24xx,
.num_fifos = 3,
.no_framedone_tv = true,
+ .set_max_preload = false,
};
static const struct dispc_features omap34xx_rev1_0_dispc_feats __initconst = {
@@ -3539,6 +3570,7 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats __initconst = {
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
.no_framedone_tv = true,
+ .set_max_preload = false,
};
static const struct dispc_features omap34xx_rev3_0_dispc_feats __initconst = {
@@ -3558,6 +3590,7 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats __initconst = {
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
.no_framedone_tv = true,
+ .set_max_preload = false,
};
static const struct dispc_features omap44xx_dispc_feats __initconst = {
@@ -3577,6 +3610,7 @@ static const struct dispc_features omap44xx_dispc_feats __initconst = {
.calc_core_clk = calc_core_clk_44xx,
.num_fifos = 5,
.gfx_fifo_workaround = true,
+ .set_max_preload = true,
};
static const struct dispc_features omap54xx_dispc_feats __initconst = {
@@ -3597,6 +3631,7 @@ static const struct dispc_features omap54xx_dispc_feats __initconst = {
.num_fifos = 5,
.gfx_fifo_workaround = true,
.mstandby_workaround = true,
+ .set_max_preload = true,
};
static int __init dispc_init_features(struct platform_device *pdev)
@@ -3691,7 +3726,6 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- pm_runtime_irq_safe(&pdev->dev);
r = dispc_runtime_get();
if (r)
@@ -3734,6 +3768,8 @@ static int dispc_runtime_suspend(struct device *dev)
static int dispc_runtime_resume(struct device *dev)
{
+ _omap_dispc_initial_config();
+
dispc_restore_context();
return 0;
diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h
index de4863d21ab7..78edb449c763 100644
--- a/drivers/video/omap2/dss/dispc.h
+++ b/drivers/video/omap2/dss/dispc.h
@@ -40,6 +40,7 @@
#define DISPC_CONTROL3 0x0848
#define DISPC_CONFIG3 0x084C
#define DISPC_MSTANDBY_CTRL 0x0858
+#define DISPC_GLOBAL_MFLAG_ATTRIBUTE 0x085C
/* DISPC overlay registers */
#define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \
@@ -100,6 +101,8 @@
DISPC_FIR_COEF_V2_OFFSET(n, i))
#define DISPC_OVL_PRELOAD(n) (DISPC_OVL_BASE(n) + \
DISPC_PRELOAD_OFFSET(n))
+#define DISPC_OVL_MFLAG_THRESHOLD(n) (DISPC_OVL_BASE(n) + \
+ DISPC_MFLAG_THRESHOLD_OFFSET(n))
/* DISPC up/downsampling FIR filter coefficient structure */
struct dispc_coef {
@@ -894,4 +897,21 @@ static inline u16 DISPC_PRELOAD_OFFSET(enum omap_plane plane)
return 0;
}
}
+
+static inline u16 DISPC_MFLAG_THRESHOLD_OFFSET(enum omap_plane plane)
+{
+ switch (plane) {
+ case OMAP_DSS_GFX:
+ return 0x0860;
+ case OMAP_DSS_VIDEO1:
+ return 0x0864;
+ case OMAP_DSS_VIDEO2:
+ return 0x0868;
+ case OMAP_DSS_VIDEO3:
+ return 0x086c;
+ default:
+ BUG();
+ return 0;
+ }
+}
#endif
diff --git a/drivers/video/omap2/dss/display-sysfs.c b/drivers/video/omap2/dss/display-sysfs.c
index 21d7f77df702..f7b5f9561041 100644
--- a/drivers/video/omap2/dss/display-sysfs.c
+++ b/drivers/video/omap2/dss/display-sysfs.c
@@ -277,7 +277,7 @@ static ssize_t display_wss_store(struct device *dev,
return size;
}
-static DEVICE_ATTR(name, S_IRUGO, display_name_show, NULL);
+static DEVICE_ATTR(display_name, S_IRUGO, display_name_show, NULL);
static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR,
display_enabled_show, display_enabled_store);
static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR,
@@ -292,7 +292,7 @@ static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR,
display_wss_show, display_wss_store);
static const struct attribute *display_sysfs_attrs[] = {
- &dev_attr_name.attr,
+ &dev_attr_display_name.attr,
&dev_attr_enabled.attr,
&dev_attr_tear_elim.attr,
&dev_attr_timings.attr,
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index bd48cde53561..23ef21ffc2c4 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -117,7 +117,7 @@ struct dpi_clk_calc_ctx {
/* outputs */
struct dsi_clock_info dsi_cinfo;
- struct dss_clock_info dss_cinfo;
+ unsigned long fck;
struct dispc_clock_info dispc_cinfo;
};
@@ -184,12 +184,11 @@ static bool dpi_calc_pll_cb(int regn, int regm, unsigned long fint,
dpi_calc_hsdiv_cb, ctx);
}
-static bool dpi_calc_dss_cb(int fckd, unsigned long fck, void *data)
+static bool dpi_calc_dss_cb(unsigned long fck, void *data)
{
struct dpi_clk_calc_ctx *ctx = data;
- ctx->dss_cinfo.fck = fck;
- ctx->dss_cinfo.fck_div = fckd;
+ ctx->fck = fck;
return dispc_div_calc(fck, ctx->pck_min, ctx->pck_max,
dpi_calc_dispc_cb, ctx);
@@ -237,7 +236,7 @@ static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
ctx->pck_min = 0;
ctx->pck_max = pck + 1000 * i * i * i;
- ok = dss_div_calc(ctx->pck_min, dpi_calc_dss_cb, ctx);
+ ok = dss_div_calc(pck, ctx->pck_min, dpi_calc_dss_cb, ctx);
if (ok)
return ok;
}
@@ -286,13 +285,13 @@ static int dpi_set_dispc_clk(unsigned long pck_req, unsigned long *fck,
if (!ok)
return -EINVAL;
- r = dss_set_clock_div(&ctx.dss_cinfo);
+ r = dss_set_fck_rate(ctx.fck);
if (r)
return r;
dpi.mgr_config.clock_info = ctx.dispc_cinfo;
- *fck = ctx.dss_cinfo.fck;
+ *fck = ctx.fck;
*lck_div = ctx.dispc_cinfo.lck_div;
*pck_div = ctx.dispc_cinfo.pck_div;
@@ -495,7 +494,7 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
if (!ok)
return -EINVAL;
- fck = ctx.dss_cinfo.fck;
+ fck = ctx.fck;
}
lck_div = ctx.dispc_cinfo.lck_div;
@@ -551,7 +550,8 @@ static int dpi_init_regulator(void)
vdds_dsi = devm_regulator_get(&dpi.pdev->dev, "vdds_dsi");
if (IS_ERR(vdds_dsi)) {
- DSSERR("can't get VDDS_DSI regulator\n");
+ if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
+ DSSERR("can't get VDDS_DSI regulator\n");
return PTR_ERR(vdds_dsi);
}
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 6056b27cf73c..a820c37e323e 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -47,63 +47,73 @@
#define DSI_CATCH_MISSING_TE
-struct dsi_reg { u16 idx; };
+struct dsi_reg { u16 module; u16 idx; };
-#define DSI_REG(idx) ((const struct dsi_reg) { idx })
+#define DSI_REG(mod, idx) ((const struct dsi_reg) { mod, idx })
-#define DSI_SZ_REGS SZ_1K
/* DSI Protocol Engine */
-#define DSI_REVISION DSI_REG(0x0000)
-#define DSI_SYSCONFIG DSI_REG(0x0010)
-#define DSI_SYSSTATUS DSI_REG(0x0014)
-#define DSI_IRQSTATUS DSI_REG(0x0018)
-#define DSI_IRQENABLE DSI_REG(0x001C)
-#define DSI_CTRL DSI_REG(0x0040)
-#define DSI_GNQ DSI_REG(0x0044)
-#define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048)
-#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C)
-#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050)
-#define DSI_CLK_CTRL DSI_REG(0x0054)
-#define DSI_TIMING1 DSI_REG(0x0058)
-#define DSI_TIMING2 DSI_REG(0x005C)
-#define DSI_VM_TIMING1 DSI_REG(0x0060)
-#define DSI_VM_TIMING2 DSI_REG(0x0064)
-#define DSI_VM_TIMING3 DSI_REG(0x0068)
-#define DSI_CLK_TIMING DSI_REG(0x006C)
-#define DSI_TX_FIFO_VC_SIZE DSI_REG(0x0070)
-#define DSI_RX_FIFO_VC_SIZE DSI_REG(0x0074)
-#define DSI_COMPLEXIO_CFG2 DSI_REG(0x0078)
-#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(0x007C)
-#define DSI_VM_TIMING4 DSI_REG(0x0080)
-#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(0x0084)
-#define DSI_VM_TIMING5 DSI_REG(0x0088)
-#define DSI_VM_TIMING6 DSI_REG(0x008C)
-#define DSI_VM_TIMING7 DSI_REG(0x0090)
-#define DSI_STOPCLK_TIMING DSI_REG(0x0094)
-#define DSI_VC_CTRL(n) DSI_REG(0x0100 + (n * 0x20))
-#define DSI_VC_TE(n) DSI_REG(0x0104 + (n * 0x20))
-#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(0x0108 + (n * 0x20))
-#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(0x010C + (n * 0x20))
-#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(0x0110 + (n * 0x20))
-#define DSI_VC_IRQSTATUS(n) DSI_REG(0x0118 + (n * 0x20))
-#define DSI_VC_IRQENABLE(n) DSI_REG(0x011C + (n * 0x20))
+#define DSI_PROTO 0
+#define DSI_PROTO_SZ 0x200
+
+#define DSI_REVISION DSI_REG(DSI_PROTO, 0x0000)
+#define DSI_SYSCONFIG DSI_REG(DSI_PROTO, 0x0010)
+#define DSI_SYSSTATUS DSI_REG(DSI_PROTO, 0x0014)
+#define DSI_IRQSTATUS DSI_REG(DSI_PROTO, 0x0018)
+#define DSI_IRQENABLE DSI_REG(DSI_PROTO, 0x001C)
+#define DSI_CTRL DSI_REG(DSI_PROTO, 0x0040)
+#define DSI_GNQ DSI_REG(DSI_PROTO, 0x0044)
+#define DSI_COMPLEXIO_CFG1 DSI_REG(DSI_PROTO, 0x0048)
+#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(DSI_PROTO, 0x004C)
+#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(DSI_PROTO, 0x0050)
+#define DSI_CLK_CTRL DSI_REG(DSI_PROTO, 0x0054)
+#define DSI_TIMING1 DSI_REG(DSI_PROTO, 0x0058)
+#define DSI_TIMING2 DSI_REG(DSI_PROTO, 0x005C)
+#define DSI_VM_TIMING1 DSI_REG(DSI_PROTO, 0x0060)
+#define DSI_VM_TIMING2 DSI_REG(DSI_PROTO, 0x0064)
+#define DSI_VM_TIMING3 DSI_REG(DSI_PROTO, 0x0068)
+#define DSI_CLK_TIMING DSI_REG(DSI_PROTO, 0x006C)
+#define DSI_TX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0070)
+#define DSI_RX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0074)
+#define DSI_COMPLEXIO_CFG2 DSI_REG(DSI_PROTO, 0x0078)
+#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(DSI_PROTO, 0x007C)
+#define DSI_VM_TIMING4 DSI_REG(DSI_PROTO, 0x0080)
+#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(DSI_PROTO, 0x0084)
+#define DSI_VM_TIMING5 DSI_REG(DSI_PROTO, 0x0088)
+#define DSI_VM_TIMING6 DSI_REG(DSI_PROTO, 0x008C)
+#define DSI_VM_TIMING7 DSI_REG(DSI_PROTO, 0x0090)
+#define DSI_STOPCLK_TIMING DSI_REG(DSI_PROTO, 0x0094)
+#define DSI_VC_CTRL(n) DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20))
+#define DSI_VC_TE(n) DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20))
+#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20))
+#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(DSI_PROTO, 0x010C + (n * 0x20))
+#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20))
+#define DSI_VC_IRQSTATUS(n) DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20))
+#define DSI_VC_IRQENABLE(n) DSI_REG(DSI_PROTO, 0x011C + (n * 0x20))
/* DSIPHY_SCP */
-#define DSI_DSIPHY_CFG0 DSI_REG(0x200 + 0x0000)
-#define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004)
-#define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008)
-#define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014)
-#define DSI_DSIPHY_CFG10 DSI_REG(0x200 + 0x0028)
+#define DSI_PHY 1
+#define DSI_PHY_OFFSET 0x200
+#define DSI_PHY_SZ 0x40
+
+#define DSI_DSIPHY_CFG0 DSI_REG(DSI_PHY, 0x0000)
+#define DSI_DSIPHY_CFG1 DSI_REG(DSI_PHY, 0x0004)
+#define DSI_DSIPHY_CFG2 DSI_REG(DSI_PHY, 0x0008)
+#define DSI_DSIPHY_CFG5 DSI_REG(DSI_PHY, 0x0014)
+#define DSI_DSIPHY_CFG10 DSI_REG(DSI_PHY, 0x0028)
/* DSI_PLL_CTRL_SCP */
-#define DSI_PLL_CONTROL DSI_REG(0x300 + 0x0000)
-#define DSI_PLL_STATUS DSI_REG(0x300 + 0x0004)
-#define DSI_PLL_GO DSI_REG(0x300 + 0x0008)
-#define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C)
-#define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010)
+#define DSI_PLL 2
+#define DSI_PLL_OFFSET 0x300
+#define DSI_PLL_SZ 0x20
+
+#define DSI_PLL_CONTROL DSI_REG(DSI_PLL, 0x0000)
+#define DSI_PLL_STATUS DSI_REG(DSI_PLL, 0x0004)
+#define DSI_PLL_GO DSI_REG(DSI_PLL, 0x0008)
+#define DSI_PLL_CONFIGURATION1 DSI_REG(DSI_PLL, 0x000C)
+#define DSI_PLL_CONFIGURATION2 DSI_REG(DSI_PLL, 0x0010)
#define REG_GET(dsidev, idx, start, end) \
FLD_GET(dsi_read_reg(dsidev, idx), start, end)
@@ -277,7 +287,9 @@ struct dsi_clk_calc_ctx {
struct dsi_data {
struct platform_device *pdev;
- void __iomem *base;
+ void __iomem *proto_base;
+ void __iomem *phy_base;
+ void __iomem *pll_base;
int module_id;
@@ -297,7 +309,8 @@ struct dsi_data {
struct {
enum dsi_vc_source source;
struct omap_dss_device *dssdev;
- enum fifo_size fifo_size;
+ enum fifo_size tx_fifo_size;
+ enum fifo_size rx_fifo_size;
int vc_id;
} vc[4];
@@ -413,16 +426,32 @@ static inline void dsi_write_reg(struct platform_device *dsidev,
const struct dsi_reg idx, u32 val)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ void __iomem *base;
+
+ switch(idx.module) {
+ case DSI_PROTO: base = dsi->proto_base; break;
+ case DSI_PHY: base = dsi->phy_base; break;
+ case DSI_PLL: base = dsi->pll_base; break;
+ default: return;
+ }
- __raw_writel(val, dsi->base + idx.idx);
+ __raw_writel(val, base + idx.idx);
}
static inline u32 dsi_read_reg(struct platform_device *dsidev,
const struct dsi_reg idx)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ void __iomem *base;
- return __raw_readl(dsi->base + idx.idx);
+ switch(idx.module) {
+ case DSI_PROTO: base = dsi->proto_base; break;
+ case DSI_PHY: base = dsi->phy_base; break;
+ case DSI_PLL: base = dsi->pll_base; break;
+ default: return 0;
+ }
+
+ return __raw_readl(base + idx.idx);
}
static void dsi_bus_lock(struct omap_dss_device *dssdev)
@@ -1129,7 +1158,8 @@ static int dsi_regulator_init(struct platform_device *dsidev)
vdds_dsi = devm_regulator_get(&dsi->pdev->dev, "VCXIO");
if (IS_ERR(vdds_dsi)) {
- DSSERR("can't get VDDS_DSI regulator\n");
+ if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
+ DSSERR("can't get VDDS_DSI regulator\n");
return PTR_ERR(vdds_dsi);
}
@@ -2427,14 +2457,14 @@ static void dsi_config_tx_fifo(struct platform_device *dsidev,
int add = 0;
int i;
- dsi->vc[0].fifo_size = size1;
- dsi->vc[1].fifo_size = size2;
- dsi->vc[2].fifo_size = size3;
- dsi->vc[3].fifo_size = size4;
+ dsi->vc[0].tx_fifo_size = size1;
+ dsi->vc[1].tx_fifo_size = size2;
+ dsi->vc[2].tx_fifo_size = size3;
+ dsi->vc[3].tx_fifo_size = size4;
for (i = 0; i < 4; i++) {
u8 v;
- int size = dsi->vc[i].fifo_size;
+ int size = dsi->vc[i].tx_fifo_size;
if (add + size > 4) {
DSSERR("Illegal FIFO configuration\n");
@@ -2460,14 +2490,14 @@ static void dsi_config_rx_fifo(struct platform_device *dsidev,
int add = 0;
int i;
- dsi->vc[0].fifo_size = size1;
- dsi->vc[1].fifo_size = size2;
- dsi->vc[2].fifo_size = size3;
- dsi->vc[3].fifo_size = size4;
+ dsi->vc[0].rx_fifo_size = size1;
+ dsi->vc[1].rx_fifo_size = size2;
+ dsi->vc[2].rx_fifo_size = size3;
+ dsi->vc[3].rx_fifo_size = size4;
for (i = 0; i < 4; i++) {
u8 v;
- int size = dsi->vc[i].fifo_size;
+ int size = dsi->vc[i].rx_fifo_size;
if (add + size > 4) {
DSSERR("Illegal FIFO configuration\n");
@@ -2920,7 +2950,7 @@ static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
DSSDBG("dsi_vc_send_long, %d bytes\n", len);
/* len + header */
- if (dsi->vc[channel].fifo_size * 32 * 4 < len + 4) {
+ if (dsi->vc[channel].tx_fifo_size * 32 * 4 < len + 4) {
DSSERR("unable to send long packet: packet too long.\n");
return -EINVAL;
}
@@ -5345,8 +5375,9 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
{
u32 rev;
int r, i;
- struct resource *dsi_mem;
struct dsi_data *dsi;
+ struct resource *res;
+ struct resource temp_res;
dsi = devm_kzalloc(&dsidev->dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
@@ -5376,16 +5407,64 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
dsi->te_timer.function = dsi_te_timeout;
dsi->te_timer.data = 0;
#endif
- dsi_mem = platform_get_resource(dsi->pdev, IORESOURCE_MEM, 0);
- if (!dsi_mem) {
- DSSERR("can't get IORESOURCE_MEM DSI\n");
- return -EINVAL;
+
+ res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
+ if (!res) {
+ res = platform_get_resource(dsidev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DSSERR("can't get IORESOURCE_MEM DSI\n");
+ return -EINVAL;
+ }
+
+ temp_res.start = res->start;
+ temp_res.end = temp_res.start + DSI_PROTO_SZ - 1;
+ res = &temp_res;
+ }
+
+ dsi->proto_base = devm_ioremap(&dsidev->dev, res->start,
+ resource_size(res));
+ if (!dsi->proto_base) {
+ DSSERR("can't ioremap DSI protocol engine\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "phy");
+ if (!res) {
+ res = platform_get_resource(dsidev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DSSERR("can't get IORESOURCE_MEM DSI\n");
+ return -EINVAL;
+ }
+
+ temp_res.start = res->start + DSI_PHY_OFFSET;
+ temp_res.end = temp_res.start + DSI_PHY_SZ - 1;
+ res = &temp_res;
+ }
+
+ dsi->phy_base = devm_ioremap(&dsidev->dev, res->start,
+ resource_size(res));
+ if (!dsi->proto_base) {
+ DSSERR("can't ioremap DSI PHY\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "pll");
+ if (!res) {
+ res = platform_get_resource(dsidev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DSSERR("can't get IORESOURCE_MEM DSI\n");
+ return -EINVAL;
+ }
+
+ temp_res.start = res->start + DSI_PLL_OFFSET;
+ temp_res.end = temp_res.start + DSI_PLL_SZ - 1;
+ res = &temp_res;
}
- dsi->base = devm_ioremap(&dsidev->dev, dsi_mem->start,
- resource_size(dsi_mem));
- if (!dsi->base) {
- DSSERR("can't ioremap DSI\n");
+ dsi->pll_base = devm_ioremap(&dsidev->dev, res->start,
+ resource_size(res));
+ if (!dsi->proto_base) {
+ DSSERR("can't ioremap DSI PLL\n");
return -ENOMEM;
}
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index bd01608e67e2..9a145da35ad3 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -67,7 +67,7 @@ static void dss_runtime_put(void);
struct dss_features {
u8 fck_div_max;
u8 dss_fck_multiplier;
- const char *clk_name;
+ const char *parent_clk_name;
int (*dpi_select_source)(enum omap_channel channel);
};
@@ -75,13 +75,12 @@ static struct {
struct platform_device *pdev;
void __iomem *base;
- struct clk *dpll4_m4_ck;
+ struct clk *parent_clk;
struct clk *dss_clk;
unsigned long dss_clk_rate;
unsigned long cache_req_pck;
unsigned long cache_prate;
- struct dss_clock_info cache_dss_cinfo;
struct dispc_clock_info cache_dispc_cinfo;
enum omap_dss_clk_source dsi_clk_source[MAX_NUM_DSI];
@@ -265,8 +264,6 @@ const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src)
void dss_dump_clocks(struct seq_file *s)
{
- unsigned long dpll4_ck_rate;
- unsigned long dpll4_m4_ck_rate;
const char *fclk_name, *fclk_real_name;
unsigned long fclk_rate;
@@ -279,21 +276,9 @@ void dss_dump_clocks(struct seq_file *s)
fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
fclk_rate = clk_get_rate(dss.dss_clk);
- if (dss.dpll4_m4_ck) {
- dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
- dpll4_m4_ck_rate = clk_get_rate(dss.dpll4_m4_ck);
-
- seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate);
-
- seq_printf(s, "%s (%s) = %lu / %lu * %d = %lu\n",
- fclk_name, fclk_real_name, dpll4_ck_rate,
- dpll4_ck_rate / dpll4_m4_ck_rate,
- dss.feat->dss_fck_multiplier, fclk_rate);
- } else {
- seq_printf(s, "%s (%s) = %lu\n",
- fclk_name, fclk_real_name,
- fclk_rate);
- }
+ seq_printf(s, "%s (%s) = %lu\n",
+ fclk_name, fclk_real_name,
+ fclk_rate);
dss_runtime_put();
}
@@ -451,30 +436,8 @@ enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
}
}
-/* calculate clock rates using dividers in cinfo */
-int dss_calc_clock_rates(struct dss_clock_info *cinfo)
-{
- if (dss.dpll4_m4_ck) {
- unsigned long prate;
-
- if (cinfo->fck_div > dss.feat->fck_div_max ||
- cinfo->fck_div == 0)
- return -EINVAL;
-
- prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
-
- cinfo->fck = prate / cinfo->fck_div *
- dss.feat->dss_fck_multiplier;
- } else {
- if (cinfo->fck_div != 0)
- return -EINVAL;
- cinfo->fck = clk_get_rate(dss.dss_clk);
- }
-
- return 0;
-}
-
-bool dss_div_calc(unsigned long fck_min, dss_div_calc_func func, void *data)
+bool dss_div_calc(unsigned long pck, unsigned long fck_min,
+ dss_div_calc_func func, void *data)
{
int fckd, fckd_start, fckd_stop;
unsigned long fck;
@@ -483,22 +446,24 @@ bool dss_div_calc(unsigned long fck_min, dss_div_calc_func func, void *data)
unsigned long prate;
unsigned m;
- if (dss.dpll4_m4_ck == NULL) {
- /*
- * TODO: dss1_fclk can be changed on OMAP2, but the available
- * dividers are not continuous. We just use the pre-set rate for
- * now.
- */
- fck = clk_get_rate(dss.dss_clk);
- fckd = 1;
- return func(fckd, fck, data);
+ fck_hw_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
+
+ if (dss.parent_clk == NULL) {
+ unsigned pckd;
+
+ pckd = fck_hw_max / pck;
+
+ fck = pck * pckd;
+
+ fck = clk_round_rate(dss.dss_clk, fck);
+
+ return func(fck, data);
}
- fck_hw_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
fckd_hw_max = dss.feat->fck_div_max;
m = dss.feat->dss_fck_multiplier;
- prate = dss_get_dpll4_rate();
+ prate = clk_get_rate(dss.parent_clk);
fck_min = fck_min ? fck_min : 1;
@@ -508,50 +473,32 @@ bool dss_div_calc(unsigned long fck_min, dss_div_calc_func func, void *data)
for (fckd = fckd_start; fckd >= fckd_stop; --fckd) {
fck = prate / fckd * m;
- if (func(fckd, fck, data))
+ if (func(fck, data))
return true;
}
return false;
}
-int dss_set_clock_div(struct dss_clock_info *cinfo)
+int dss_set_fck_rate(unsigned long rate)
{
- if (dss.dpll4_m4_ck) {
- unsigned long prate;
- int r;
+ int r;
- prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
- DSSDBG("dpll4_m4 = %ld\n", prate);
+ DSSDBG("set fck to %lu\n", rate);
- r = clk_set_rate(dss.dpll4_m4_ck,
- DIV_ROUND_UP(prate, cinfo->fck_div));
- if (r)
- return r;
- } else {
- if (cinfo->fck_div != 0)
- return -EINVAL;
- }
+ r = clk_set_rate(dss.dss_clk, rate);
+ if (r)
+ return r;
dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
- WARN_ONCE(dss.dss_clk_rate != cinfo->fck,
+ WARN_ONCE(dss.dss_clk_rate != rate,
"clk rate mismatch: %lu != %lu", dss.dss_clk_rate,
- cinfo->fck);
-
- DSSDBG("fck = %ld (%d)\n", cinfo->fck, cinfo->fck_div);
+ rate);
return 0;
}
-unsigned long dss_get_dpll4_rate(void)
-{
- if (dss.dpll4_m4_ck)
- return clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
- else
- return 0;
-}
-
unsigned long dss_get_dispc_clk_rate(void)
{
return dss.dss_clk_rate;
@@ -560,27 +507,23 @@ unsigned long dss_get_dispc_clk_rate(void)
static int dss_setup_default_clock(void)
{
unsigned long max_dss_fck, prate;
+ unsigned long fck;
unsigned fck_div;
- struct dss_clock_info dss_cinfo = { 0 };
int r;
- if (dss.dpll4_m4_ck == NULL)
- return 0;
-
max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
- prate = dss_get_dpll4_rate();
-
- fck_div = DIV_ROUND_UP(prate * dss.feat->dss_fck_multiplier,
- max_dss_fck);
-
- dss_cinfo.fck_div = fck_div;
+ if (dss.parent_clk == NULL) {
+ fck = clk_round_rate(dss.dss_clk, max_dss_fck);
+ } else {
+ prate = clk_get_rate(dss.parent_clk);
- r = dss_calc_clock_rates(&dss_cinfo);
- if (r)
- return r;
+ fck_div = DIV_ROUND_UP(prate * dss.feat->dss_fck_multiplier,
+ max_dss_fck);
+ fck = prate / fck_div * dss.feat->dss_fck_multiplier;
+ }
- r = dss_set_clock_div(&dss_cinfo);
+ r = dss_set_fck_rate(fck);
if (r)
return r;
@@ -706,25 +649,25 @@ static int dss_get_clocks(void)
dss.dss_clk = clk;
- if (dss.feat->clk_name) {
- clk = clk_get(NULL, dss.feat->clk_name);
+ if (dss.feat->parent_clk_name) {
+ clk = clk_get(NULL, dss.feat->parent_clk_name);
if (IS_ERR(clk)) {
- DSSERR("Failed to get %s\n", dss.feat->clk_name);
+ DSSERR("Failed to get %s\n", dss.feat->parent_clk_name);
return PTR_ERR(clk);
}
} else {
clk = NULL;
}
- dss.dpll4_m4_ck = clk;
+ dss.parent_clk = clk;
return 0;
}
static void dss_put_clocks(void)
{
- if (dss.dpll4_m4_ck)
- clk_put(dss.dpll4_m4_ck);
+ if (dss.parent_clk)
+ clk_put(dss.parent_clk);
}
static int dss_runtime_get(void)
@@ -761,37 +704,41 @@ void dss_debug_dump_clocks(struct seq_file *s)
#endif
static const struct dss_features omap24xx_dss_feats __initconst = {
- .fck_div_max = 16,
+ /*
+ * fck div max is really 16, but the divider range has gaps. The range
+ * from 1 to 6 has no gaps, so let's use that as a max.
+ */
+ .fck_div_max = 6,
.dss_fck_multiplier = 2,
- .clk_name = NULL,
+ .parent_clk_name = "core_ck",
.dpi_select_source = &dss_dpi_select_source_omap2_omap3,
};
static const struct dss_features omap34xx_dss_feats __initconst = {
.fck_div_max = 16,
.dss_fck_multiplier = 2,
- .clk_name = "dpll4_m4_ck",
+ .parent_clk_name = "dpll4_ck",
.dpi_select_source = &dss_dpi_select_source_omap2_omap3,
};
static const struct dss_features omap3630_dss_feats __initconst = {
.fck_div_max = 32,
.dss_fck_multiplier = 1,
- .clk_name = "dpll4_m4_ck",
+ .parent_clk_name = "dpll4_ck",
.dpi_select_source = &dss_dpi_select_source_omap2_omap3,
};
static const struct dss_features omap44xx_dss_feats __initconst = {
.fck_div_max = 32,
.dss_fck_multiplier = 1,
- .clk_name = "dpll_per_m5x2_ck",
+ .parent_clk_name = "dpll_per_x2_ck",
.dpi_select_source = &dss_dpi_select_source_omap4,
};
static const struct dss_features omap54xx_dss_feats __initconst = {
.fck_div_max = 64,
.dss_fck_multiplier = 1,
- .clk_name = "dpll_per_h12x2_ck",
+ .parent_clk_name = "dpll_per_x2_ck",
.dpi_select_source = &dss_dpi_select_source_omap5,
};
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index f538e867c0f8..057f24c8a332 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -100,14 +100,6 @@ enum dss_writeback_channel {
DSS_WB_LCD3_MGR = 7,
};
-struct dss_clock_info {
- /* rates that we get with dividers below */
- unsigned long fck;
-
- /* dividers */
- u16 fck_div;
-};
-
struct dispc_clock_info {
/* rates that we get with dividers below */
unsigned long lck;
@@ -250,12 +242,11 @@ enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel);
void dss_set_venc_output(enum omap_dss_venc_type type);
void dss_set_dac_pwrdn_bgz(bool enable);
-unsigned long dss_get_dpll4_rate(void);
-int dss_calc_clock_rates(struct dss_clock_info *cinfo);
-int dss_set_clock_div(struct dss_clock_info *cinfo);
+int dss_set_fck_rate(unsigned long rate);
-typedef bool (*dss_div_calc_func)(int fckd, unsigned long fck, void *data);
-bool dss_div_calc(unsigned long fck_min, dss_div_calc_func func, void *data);
+typedef bool (*dss_div_calc_func)(unsigned long fck, void *data);
+bool dss_div_calc(unsigned long pck, unsigned long fck_min,
+ dss_div_calc_func func, void *data);
/* SDI */
int sdi_init_platform_driver(void) __init;
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index f8fd6dbacabc..7f8969191dc6 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -613,6 +613,7 @@ static const enum dss_feat_id omap5_dss_feat_list[] = {
FEAT_DSI_PLL_SELFREQDCO,
FEAT_DSI_PLL_REFSEL,
FEAT_DSI_PHY_DCC,
+ FEAT_MFLAG,
};
/* OMAP2 DSS Features */
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index 10b0556e1352..e3ef3b714896 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -64,6 +64,7 @@ enum dss_feat_id {
FEAT_DSI_PLL_SELFREQDCO,
FEAT_DSI_PLL_REFSEL,
FEAT_DSI_PHY_DCC,
+ FEAT_MFLAG,
};
/* DSS register field id */
diff --git a/drivers/video/omap2/dss/hdmi.h b/drivers/video/omap2/dss/hdmi.h
index b0493768a5d7..e25681ff5a70 100644
--- a/drivers/video/omap2/dss/hdmi.h
+++ b/drivers/video/omap2/dss/hdmi.h
@@ -41,14 +41,14 @@
#define HDMI_WP_VIDEO_SIZE 0x60
#define HDMI_WP_VIDEO_TIMING_H 0x68
#define HDMI_WP_VIDEO_TIMING_V 0x6C
-#define HDMI_WP_WP_CLK 0x70
+#define HDMI_WP_CLK 0x70
#define HDMI_WP_AUDIO_CFG 0x80
#define HDMI_WP_AUDIO_CFG2 0x84
#define HDMI_WP_AUDIO_CTRL 0x88
#define HDMI_WP_AUDIO_DATA 0x8C
/* HDMI WP IRQ flags */
-
+#define HDMI_IRQ_CORE (1 << 0)
#define HDMI_IRQ_OCP_TIMEOUT (1 << 4)
#define HDMI_IRQ_AUDIO_FIFO_UNDERFLOW (1 << 8)
#define HDMI_IRQ_AUDIO_FIFO_OVERFLOW (1 << 9)
@@ -378,15 +378,15 @@ static inline u32 hdmi_read_reg(void __iomem *base_addr, const u16 idx)
FLD_GET(hdmi_read_reg(base, idx), start, end)
static inline int hdmi_wait_for_bit_change(void __iomem *base_addr,
- const u16 idx, int b2, int b1, u32 val)
+ const u32 idx, int b2, int b1, u32 val)
{
- u32 t = 0;
- while (val != REG_GET(base_addr, idx, b2, b1)) {
- udelay(1);
+ u32 t = 0, v;
+ while (val != (v = REG_GET(base_addr, idx, b2, b1))) {
if (t++ > 10000)
- return !val;
+ return v;
+ udelay(1);
}
- return val;
+ return v;
}
/* HDMI wrapper funcs */
diff --git a/drivers/video/omap2/dss/hdmi4.c b/drivers/video/omap2/dss/hdmi4.c
index e14009614338..4a74538f9ea5 100644
--- a/drivers/video/omap2/dss/hdmi4.c
+++ b/drivers/video/omap2/dss/hdmi4.c
@@ -95,7 +95,8 @@ static int hdmi_init_regulator(void)
reg = devm_regulator_get(&hdmi.pdev->dev, "VDAC");
if (IS_ERR(reg)) {
- DSSERR("can't get VDDA_HDMI_DAC regulator\n");
+ if (PTR_ERR(reg) != -EPROBE_DEFER)
+ DSSERR("can't get VDDA_HDMI_DAC regulator\n");
return PTR_ERR(reg);
}
@@ -148,8 +149,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
if (r)
return r;
- dss_mgr_disable(mgr);
-
p = &hdmi.cfg.timings;
DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
@@ -158,8 +157,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi_pll_compute(&hdmi.pll, clk_get_rate(hdmi.sys_clk), phy);
- hdmi_wp_video_stop(&hdmi.wp);
-
/* config the PLL and PHY hdmi_set_pll_pwrfirst */
r = hdmi_pll_enable(&hdmi.pll, &hdmi.wp);
if (r) {
@@ -218,14 +215,12 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- struct hdmi_cm cm;
+ struct omap_dss_device *out = &hdmi.output;
- cm = hdmi_get_code(timings);
- if (cm.code == -1)
+ if (!dispc_mgr_timings_ok(out->dispc_channel, timings))
return -EINVAL;
return 0;
-
}
static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
@@ -244,8 +239,17 @@ static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
hdmi.cfg = *t;
dispc_set_tv_pclk(t->timings.pixel_clock * 1000);
+ } else {
+ hdmi.cfg.timings = *timings;
+ hdmi.cfg.cm.code = 0;
+ hdmi.cfg.cm.mode = HDMI_DVI;
+
+ dispc_set_tv_pclk(timings->pixel_clock * 1000);
}
+ DSSDBG("using mode: %s, code %d\n", hdmi.cfg.cm.mode == HDMI_DVI ?
+ "DVI" : "HDMI", hdmi.cfg.cm.code);
+
mutex_unlock(&hdmi.lock);
}
diff --git a/drivers/video/omap2/dss/hdmi4_core.c b/drivers/video/omap2/dss/hdmi4_core.c
index 5dd5e5489b41..2eb04dcf807c 100644
--- a/drivers/video/omap2/dss/hdmi4_core.c
+++ b/drivers/video/omap2/dss/hdmi4_core.c
@@ -19,6 +19,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#define DSS_SUBSYS_NAME "HDMICORE"
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
@@ -125,12 +127,12 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
/* HDMI_CORE_DDC_STATUS_BUS_LOW */
if (REG_GET(base, HDMI_CORE_DDC_STATUS, 6, 6) == 1) {
- pr_err("I2C Bus Low?\n");
+ DSSERR("I2C Bus Low?\n");
return -EIO;
}
/* HDMI_CORE_DDC_STATUS_NO_ACK */
if (REG_GET(base, HDMI_CORE_DDC_STATUS, 5, 5) == 1) {
- pr_err("I2C No Ack\n");
+ DSSERR("I2C No Ack\n");
return -EIO;
}
@@ -161,7 +163,7 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
checksum += pedid[i];
if (checksum != 0) {
- pr_err("E-EDID checksum failed!!\n");
+ DSSERR("E-EDID checksum failed!!\n");
return -EIO;
}
@@ -199,7 +201,7 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
struct hdmi_core_infoframe_avi *avi_cfg,
struct hdmi_core_packet_enable_repeat *repeat_cfg)
{
- pr_debug("Enter hdmi_core_init\n");
+ DSSDBG("Enter hdmi_core_init\n");
/* video core */
video_cfg->ip_bus_width = HDMI_INPUT_8BIT;
@@ -241,19 +243,19 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
static void hdmi_core_powerdown_disable(struct hdmi_core_data *core)
{
- pr_debug("Enter hdmi_core_powerdown_disable\n");
+ DSSDBG("Enter hdmi_core_powerdown_disable\n");
REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x0, 0, 0);
}
static void hdmi_core_swreset_release(struct hdmi_core_data *core)
{
- pr_debug("Enter hdmi_core_swreset_release\n");
+ DSSDBG("Enter hdmi_core_swreset_release\n");
REG_FLD_MOD(core->base, HDMI_CORE_SYS_SRST, 0x0, 0, 0);
}
static void hdmi_core_swreset_assert(struct hdmi_core_data *core)
{
- pr_debug("Enter hdmi_core_swreset_assert\n");
+ DSSDBG("Enter hdmi_core_swreset_assert\n");
REG_FLD_MOD(core->base, HDMI_CORE_SYS_SRST, 0x1, 0, 0);
}
@@ -1004,7 +1006,7 @@ int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
struct resource *res;
struct resource temp_res;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_core");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
if (!res) {
DSSDBG("can't get CORE mem resource by name\n");
/*
diff --git a/drivers/video/omap2/dss/hdmi_common.c b/drivers/video/omap2/dss/hdmi_common.c
index 5586aaad9d63..0614922902dd 100644
--- a/drivers/video/omap2/dss/hdmi_common.c
+++ b/drivers/video/omap2/dss/hdmi_common.c
@@ -13,6 +13,8 @@
* map it to corresponding CEA or VESA index.
*/
+#define DSS_SUBSYS_NAME "HDMI"
+
#include <linux/kernel.h>
#include <linux/err.h>
#include <video/omapdss.h>
diff --git a/drivers/video/omap2/dss/hdmi_phy.c b/drivers/video/omap2/dss/hdmi_phy.c
index 45acb997ac00..dd376ce8da01 100644
--- a/drivers/video/omap2/dss/hdmi_phy.c
+++ b/drivers/video/omap2/dss/hdmi_phy.c
@@ -124,7 +124,7 @@ int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy)
struct resource *res;
struct resource temp_res;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_txphy");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
if (!res) {
DSSDBG("can't get PHY mem resource by name\n");
/*
diff --git a/drivers/video/omap2/dss/hdmi_pll.c b/drivers/video/omap2/dss/hdmi_pll.c
index d3e6e78c0082..5fc71215c303 100644
--- a/drivers/video/omap2/dss/hdmi_pll.c
+++ b/drivers/video/omap2/dss/hdmi_pll.c
@@ -8,6 +8,8 @@
* the Free Software Foundation.
*/
+#define DSS_SUBSYS_NAME "HDMIPLL"
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
@@ -127,24 +129,24 @@ static int hdmi_pll_config(struct hdmi_pll_data *pll)
/* wait for bit change */
if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_GO,
0, 0, 1) != 1) {
- pr_err("PLL GO bit not set\n");
+ DSSERR("PLL GO bit not set\n");
return -ETIMEDOUT;
}
/* Wait till the lock bit is set in PLL status */
if (hdmi_wait_for_bit_change(pll->base,
PLLCTRL_PLL_STATUS, 1, 1, 1) != 1) {
- pr_err("cannot lock PLL\n");
- pr_err("CFG1 0x%x\n",
+ DSSERR("cannot lock PLL\n");
+ DSSERR("CFG1 0x%x\n",
hdmi_read_reg(pll->base, PLLCTRL_CFG1));
- pr_err("CFG2 0x%x\n",
+ DSSERR("CFG2 0x%x\n",
hdmi_read_reg(pll->base, PLLCTRL_CFG2));
- pr_err("CFG4 0x%x\n",
+ DSSERR("CFG4 0x%x\n",
hdmi_read_reg(pll->base, PLLCTRL_CFG4));
return -ETIMEDOUT;
}
- pr_debug("PLL locked!\n");
+ DSSDBG("PLL locked!\n");
return 0;
}
@@ -157,7 +159,7 @@ static int hdmi_pll_reset(struct hdmi_pll_data *pll)
/* READ 0x0 reset is in progress */
if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_STATUS, 0, 0, 1)
!= 1) {
- pr_err("Failed to sysreset PLL\n");
+ DSSERR("Failed to sysreset PLL\n");
return -ETIMEDOUT;
}
@@ -200,7 +202,7 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll)
struct resource *res;
struct resource temp_res;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_pllctrl");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll");
if (!res) {
DSSDBG("can't get PLL mem resource by name\n");
/*
diff --git a/drivers/video/omap2/dss/hdmi_wp.c b/drivers/video/omap2/dss/hdmi_wp.c
index 8151d8969a6e..cd620c6e43a0 100644
--- a/drivers/video/omap2/dss/hdmi_wp.c
+++ b/drivers/video/omap2/dss/hdmi_wp.c
@@ -8,6 +8,8 @@
* the Free Software Foundation.
*/
+#define DSS_SUBSYS_NAME "HDMIWP"
+
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -34,7 +36,7 @@ void hdmi_wp_dump(struct hdmi_wp_data *wp, struct seq_file *s)
DUMPREG(HDMI_WP_VIDEO_SIZE);
DUMPREG(HDMI_WP_VIDEO_TIMING_H);
DUMPREG(HDMI_WP_VIDEO_TIMING_V);
- DUMPREG(HDMI_WP_WP_CLK);
+ DUMPREG(HDMI_WP_CLK);
DUMPREG(HDMI_WP_AUDIO_CFG);
DUMPREG(HDMI_WP_AUDIO_CFG2);
DUMPREG(HDMI_WP_AUDIO_CTRL);
@@ -76,7 +78,7 @@ int hdmi_wp_set_phy_pwr(struct hdmi_wp_data *wp, enum hdmi_phy_pwr val)
/* Status of the power control of HDMI PHY */
if (hdmi_wait_for_bit_change(wp->base, HDMI_WP_PWR_CTRL, 5, 4, val)
!= val) {
- pr_err("Failed to set PHY power mode to %d\n", val);
+ DSSERR("Failed to set PHY power mode to %d\n", val);
return -ETIMEDOUT;
}
@@ -92,7 +94,7 @@ int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val)
/* wait till PHY_PWR_STATUS is set */
if (hdmi_wait_for_bit_change(wp->base, HDMI_WP_PWR_CTRL, 1, 0, val)
!= val) {
- pr_err("Failed to set PLL_PWR_STATUS\n");
+ DSSERR("Failed to set PLL_PWR_STATUS\n");
return -ETIMEDOUT;
}
@@ -129,7 +131,7 @@ void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
{
u32 r;
bool vsync_pol, hsync_pol;
- pr_debug("Enter hdmi_wp_video_config_interface\n");
+ DSSDBG("Enter hdmi_wp_video_config_interface\n");
vsync_pol = timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
hsync_pol = timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
@@ -148,7 +150,7 @@ void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
u32 timing_h = 0;
u32 timing_v = 0;
- pr_debug("Enter hdmi_wp_video_config_timing\n");
+ DSSDBG("Enter hdmi_wp_video_config_timing\n");
timing_h |= FLD_VAL(timings->hbp, 31, 20);
timing_h |= FLD_VAL(timings->hfp, 19, 8);
@@ -164,7 +166,7 @@ void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
struct omap_video_timings *timings, struct hdmi_config *param)
{
- pr_debug("Enter hdmi_wp_video_init_format\n");
+ DSSDBG("Enter hdmi_wp_video_init_format\n");
video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
video_fmt->y_res = param->timings.y_res;
@@ -241,7 +243,7 @@ int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp)
struct resource *res;
struct resource temp_res;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_wp");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wp");
if (!res) {
DSSDBG("can't get WP mem resource by name\n");
/*
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index eccde322c28a..2f7cee985cdd 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -113,11 +113,6 @@ void dss_uninit_overlays(struct platform_device *pdev)
int dss_ovl_simple_check(struct omap_overlay *ovl,
const struct omap_overlay_info *info)
{
- if (info->paddr == 0) {
- DSSERR("check_overlay: paddr cannot be 0\n");
- return -EINVAL;
- }
-
if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
if (info->out_width != 0 && info->width != info->out_width) {
DSSERR("check_overlay: overlay %d doesn't support "
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index ccc569ae7cca..ba806c9e7f54 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -46,7 +46,7 @@ static struct {
struct sdi_clk_calc_ctx {
unsigned long pck_min, pck_max;
- struct dss_clock_info dss_cinfo;
+ unsigned long fck;
struct dispc_clock_info dispc_cinfo;
};
@@ -63,19 +63,18 @@ static bool dpi_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
return true;
}
-static bool dpi_calc_dss_cb(int fckd, unsigned long fck, void *data)
+static bool dpi_calc_dss_cb(unsigned long fck, void *data)
{
struct sdi_clk_calc_ctx *ctx = data;
- ctx->dss_cinfo.fck = fck;
- ctx->dss_cinfo.fck_div = fckd;
+ ctx->fck = fck;
return dispc_div_calc(fck, ctx->pck_min, ctx->pck_max,
dpi_calc_dispc_cb, ctx);
}
static int sdi_calc_clock_div(unsigned long pclk,
- struct dss_clock_info *dss_cinfo,
+ unsigned long *fck,
struct dispc_clock_info *dispc_cinfo)
{
int i;
@@ -98,9 +97,9 @@ static int sdi_calc_clock_div(unsigned long pclk,
ctx.pck_min = 0;
ctx.pck_max = pclk + 1000 * i * i * i;
- ok = dss_div_calc(ctx.pck_min, dpi_calc_dss_cb, &ctx);
+ ok = dss_div_calc(pclk, ctx.pck_min, dpi_calc_dss_cb, &ctx);
if (ok) {
- *dss_cinfo = ctx.dss_cinfo;
+ *fck = ctx.fck;
*dispc_cinfo = ctx.dispc_cinfo;
return 0;
}
@@ -128,7 +127,7 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_dss_device *out = &sdi.output;
struct omap_video_timings *t = &sdi.timings;
- struct dss_clock_info dss_cinfo;
+ unsigned long fck;
struct dispc_clock_info dispc_cinfo;
unsigned long pck;
int r;
@@ -150,13 +149,13 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- r = sdi_calc_clock_div(t->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo);
+ r = sdi_calc_clock_div(t->pixel_clock * 1000, &fck, &dispc_cinfo);
if (r)
goto err_calc_clock_div;
sdi.mgr_config.clock_info = dispc_cinfo;
- pck = dss_cinfo.fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div / 1000;
+ pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div / 1000;
if (pck != t->pixel_clock) {
DSSWARN("Could not find exact pixel clock. Requested %d kHz, "
@@ -169,7 +168,7 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
dss_mgr_set_timings(out->manager, t);
- r = dss_set_clock_div(&dss_cinfo);
+ r = dss_set_fck_rate(fck);
if (r)
goto err_set_dss_clock_div;
@@ -265,7 +264,8 @@ static int sdi_init_regulator(void)
vdds_sdi = devm_regulator_get(&sdi.pdev->dev, "vdds_sdi");
if (IS_ERR(vdds_sdi)) {
- DSSERR("can't get VDDS_SDI regulator\n");
+ if (PTR_ERR(vdds_sdi) != -EPROBE_DEFER)
+ DSSERR("can't get VDDS_SDI regulator\n");
return PTR_ERR(vdds_sdi);
}
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 5f88ac47b7fa..2cd7f7e42105 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -639,7 +639,8 @@ static int venc_init_regulator(void)
vdda_dac = devm_regulator_get(&venc.pdev->dev, "vdda_dac");
if (IS_ERR(vdda_dac)) {
- DSSERR("can't get VDDA_DAC regulator\n");
+ if (PTR_ERR(vdda_dac) != -EPROBE_DEFER)
+ DSSERR("can't get VDDA_DAC regulator\n");
return PTR_ERR(vdda_dac);
}
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 27d6905683f3..fcb9e932d00c 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -1833,6 +1833,16 @@ static void omapfb_free_resources(struct omapfb2_device *fbdev)
if (fbdev == NULL)
return;
+ for (i = 0; i < fbdev->num_fbs; i++) {
+ struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
+ int j;
+
+ for (j = 0; j < ofbi->num_overlays; j++) {
+ struct omap_overlay *ovl = ofbi->overlays[j];
+ ovl->disable(ovl);
+ }
+ }
+
for (i = 0; i < fbdev->num_fbs; i++)
unregister_framebuffer(fbdev->fbs[i]);
@@ -2557,6 +2567,15 @@ static int omapfb_probe(struct platform_device *pdev)
goto cleanup;
}
+ if (def_display) {
+ u16 w, h;
+
+ def_display->driver->get_resolution(def_display, &w, &h);
+
+ dev_info(fbdev->dev, "using display '%s' mode %dx%d\n",
+ def_display->name, w, h);
+ }
+
return 0;
cleanup:
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index a5514acd2ac6..8a8d7f060784 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -2128,7 +2128,6 @@ static void rivafb_remove(struct pci_dev *pd)
pci_release_regions(pd);
kfree(info->pixmap.addr);
framebuffer_release(info);
- pci_set_drvdata(pd, NULL);
NVTRACE_LEAVE();
}
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 968b2997175a..9a3f8f1c6aab 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -1180,7 +1180,7 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
vga_res.flags = IORESOURCE_IO;
- pcibios_bus_to_resource(dev, &vga_res, &bus_reg);
+ pcibios_bus_to_resource(dev->bus, &vga_res, &bus_reg);
par->state.vgabase = (void __iomem *) vga_res.start;
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index ab85ad6c25ec..2bcc84ac18c7 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -1227,7 +1227,7 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
/* Free the MERAM cache. */
if (ch->cache) {
sh_mobile_meram_cache_free(priv->meram_dev, ch->cache);
- ch->cache = 0;
+ ch->cache = NULL;
}
}
diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
index f28674fea909..07c7df9ee77b 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/tgafb.c
@@ -32,12 +32,6 @@
#include <video/tgafb.h>
-#ifdef CONFIG_PCI
-#define TGA_BUS_PCI(dev) (dev->bus == &pci_bus_type)
-#else
-#define TGA_BUS_PCI(dev) 0
-#endif
-
#ifdef CONFIG_TC
#define TGA_BUS_TC(dev) (dev->bus == &tc_bus_type)
#else
@@ -236,7 +230,7 @@ tgafb_set_par(struct fb_info *info)
};
struct tga_par *par = (struct tga_par *) info->par;
- int tga_bus_pci = TGA_BUS_PCI(par->dev);
+ int tga_bus_pci = dev_is_pci(par->dev);
int tga_bus_tc = TGA_BUS_TC(par->dev);
u32 htimings, vtimings, pll_freq;
u8 tga_type;
@@ -519,7 +513,7 @@ tgafb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue,
unsigned transp, struct fb_info *info)
{
struct tga_par *par = (struct tga_par *) info->par;
- int tga_bus_pci = TGA_BUS_PCI(par->dev);
+ int tga_bus_pci = dev_is_pci(par->dev);
int tga_bus_tc = TGA_BUS_TC(par->dev);
if (regno > 255)
@@ -1472,7 +1466,7 @@ static void
tgafb_init_fix(struct fb_info *info)
{
struct tga_par *par = (struct tga_par *)info->par;
- int tga_bus_pci = TGA_BUS_PCI(par->dev);
+ int tga_bus_pci = dev_is_pci(par->dev);
int tga_bus_tc = TGA_BUS_TC(par->dev);
u8 tga_type = par->tga_type;
const char *tga_type_name = NULL;
@@ -1496,10 +1490,9 @@ tgafb_init_fix(struct fb_info *info)
if (tga_bus_tc)
tga_type_name = "Digital ZLX-E3";
break;
- default:
- tga_type_name = "Unknown";
- break;
}
+ if (!tga_type_name)
+ tga_type_name = "Unknown";
strlcpy(info->fix.id, tga_type_name, sizeof(info->fix.id));
@@ -1560,7 +1553,7 @@ static int tgafb_register(struct device *dev)
const struct fb_videomode *modedb_tga = NULL;
resource_size_t bar0_start = 0, bar0_len = 0;
const char *mode_option_tga = NULL;
- int tga_bus_pci = TGA_BUS_PCI(dev);
+ int tga_bus_pci = dev_is_pci(dev);
int tga_bus_tc = TGA_BUS_TC(dev);
unsigned int modedbsize_tga = 0;
void __iomem *mem_base;
@@ -1690,7 +1683,7 @@ static int tgafb_register(struct device *dev)
static void tgafb_unregister(struct device *dev)
{
resource_size_t bar0_start = 0, bar0_len = 0;
- int tga_bus_pci = TGA_BUS_PCI(dev);
+ int tga_bus_pci = dev_is_pci(dev);
int tga_bus_tc = TGA_BUS_TC(dev);
struct fb_info *info = NULL;
struct tga_par *par;
diff --git a/drivers/video/valkyriefb.c b/drivers/video/valkyriefb.c
index e287ebc47817..97cb9bd1d1dd 100644
--- a/drivers/video/valkyriefb.c
+++ b/drivers/video/valkyriefb.c
@@ -56,7 +56,6 @@
#include <linux/cuda.h>
#include <asm/io.h>
#ifdef CONFIG_MAC
-#include <asm/bootinfo.h>
#include <asm/macintosh.h>
#else
#include <asm/prom.h>
diff --git a/drivers/video/vermilion/vermilion.c b/drivers/video/vermilion/vermilion.c
index 09a136633f35..048a66640b03 100644
--- a/drivers/video/vermilion/vermilion.c
+++ b/drivers/video/vermilion/vermilion.c
@@ -383,7 +383,6 @@ static void vmlfb_disable_mmio(struct vml_par *par)
static void vmlfb_release_devices(struct vml_par *par)
{
if (atomic_dec_and_test(&par->refcount)) {
- pci_set_drvdata(par->vdc, NULL);
pci_disable_device(par->gpu);
pci_disable_device(par->vdc);
}
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index 8bc6e0958a09..5c7cbc6c6236 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -729,7 +729,7 @@ static int vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
vga_res.flags = IORESOURCE_IO;
- pcibios_bus_to_resource(dev, &vga_res, &bus_reg);
+ pcibios_bus_to_resource(dev->bus, &vga_res, &bus_reg);
par->state.vgabase = (void __iomem *) vga_res.start;
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index cd005c227a23..901014bbc821 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -35,6 +35,7 @@
#include <xen/interface/io/fbif.h>
#include <xen/interface/io/protocols.h>
#include <xen/xenbus.h>
+#include <xen/platform_pci.h>
struct xenfb_info {
unsigned char *fb;
@@ -692,13 +693,16 @@ static DEFINE_XENBUS_DRIVER(xenfb, ,
static int __init xenfb_init(void)
{
- if (!xen_pv_domain())
+ if (!xen_domain())
return -ENODEV;
/* Nothing to do if running in dom0. */
if (xen_initial_domain())
return -ENODEV;
+ if (!xen_has_pv_devices())
+ return -ENODEV;
+
return xenbus_register_frontend(&xenfb_driver);
}
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index c444654fc33f..34bdabaecbd6 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -285,7 +285,7 @@ static void update_balloon_size(struct virtio_balloon *vb)
{
__le32 actual = cpu_to_le32(vb->num_pages);
- virtio_cwrite(vb->vdev, struct virtio_balloon_config, num_pages,
+ virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
&actual);
}
@@ -369,7 +369,7 @@ static const struct address_space_operations virtio_balloon_aops;
* This function preforms the balloon page migration task.
* Called through balloon_mapping->a_ops->migratepage
*/
-int virtballoon_migratepage(struct address_space *mapping,
+static int virtballoon_migratepage(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode)
{
struct balloon_dev_info *vb_dev_info = balloon_page_device(page);
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index a37c69941d30..a416f9b2a7f6 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -742,7 +742,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
return 0;
out_set_drvdata:
- pci_set_drvdata(pci_dev, NULL);
pci_iounmap(pci_dev, vp_dev->ioaddr);
out_req_regions:
pci_release_regions(pci_dev);
@@ -760,7 +759,6 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
unregister_virtio_device(&vp_dev->vdev);
vp_del_vqs(&vp_dev->vdev);
- pci_set_drvdata(pci_dev, NULL);
pci_iounmap(pci_dev, vp_dev->ioaddr);
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c
index 7b07135ab26e..c0227f9418eb 100644
--- a/drivers/vlynq/vlynq.c
+++ b/drivers/vlynq/vlynq.c
@@ -762,7 +762,8 @@ static int vlynq_remove(struct platform_device *pdev)
device_unregister(&dev->dev);
iounmap(dev->local);
- release_mem_region(dev->regs_start, dev->regs_end - dev->regs_start);
+ release_mem_region(dev->regs_start,
+ dev->regs_end - dev->regs_start + 1);
kfree(dev);
diff --git a/drivers/vme/Kconfig b/drivers/vme/Kconfig
index c5c22465a805..a6a6f9559119 100644
--- a/drivers/vme/Kconfig
+++ b/drivers/vme/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig VME_BUS
- tristate "VME bridge support"
+ bool "VME bridge support"
depends on PCI
---help---
If you say Y here you get support for the VME bridge Framework.
diff --git a/drivers/vme/boards/vme_vmivme7805.c b/drivers/vme/boards/vme_vmivme7805.c
index cf74aee2cef0..ac422121f9bb 100644
--- a/drivers/vme/boards/vme_vmivme7805.c
+++ b/drivers/vme/boards/vme_vmivme7805.c
@@ -27,7 +27,7 @@ static void __iomem *vmic_base;
static const char driver_name[] = "vmivme_7805";
-static DEFINE_PCI_DEVICE_TABLE(vmic_ids) = {
+static const struct pci_device_id vmic_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VMIC, PCI_DEVICE_ID_VTIMR) },
{ },
};
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index f8448573d030..1b5d48c578e1 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -42,7 +42,7 @@ static int geoid;
static const char driver_name[] = "vme_ca91cx42";
-static DEFINE_PCI_DEVICE_TABLE(ca91cx42_ids) = {
+static const struct pci_device_id ca91cx42_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
{ },
};
@@ -884,7 +884,7 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
@@ -938,7 +938,7 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 9cf88337e4e9..9911cd5fddb5 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -45,7 +45,7 @@ static int geoid;
static const char driver_name[] = "vme_tsi148";
-static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = {
+static const struct pci_device_id tsi148_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
{ },
};
@@ -1289,7 +1289,7 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
@@ -1371,7 +1371,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index f6856b427496..7516030037a1 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -1274,7 +1274,7 @@ void vme_lm_free(struct vme_resource *resource)
}
EXPORT_SYMBOL(vme_lm_free);
-int vme_slot_get(struct vme_dev *vdev)
+int vme_slot_num(struct vme_dev *vdev)
{
struct vme_bridge *bridge;
@@ -1285,14 +1285,27 @@ int vme_slot_get(struct vme_dev *vdev)
}
if (bridge->slot_get == NULL) {
- printk(KERN_WARNING "vme_slot_get not supported\n");
+ printk(KERN_WARNING "vme_slot_num not supported\n");
return -EINVAL;
}
return bridge->slot_get(bridge);
}
-EXPORT_SYMBOL(vme_slot_get);
+EXPORT_SYMBOL(vme_slot_num);
+int vme_bus_num(struct vme_dev *vdev)
+{
+ struct vme_bridge *bridge;
+
+ bridge = vdev->bridge;
+ if (bridge == NULL) {
+ pr_err("Can't find VME bus\n");
+ return -EINVAL;
+ }
+
+ return bridge->num;
+}
+EXPORT_SYMBOL(vme_bus_num);
/* - Bridge Registration --------------------------------------------------- */
@@ -1512,9 +1525,5 @@ static void __exit vme_exit(void)
bus_unregister(&vme_bus_type);
}
-MODULE_DESCRIPTION("VME bridge driver framework");
-MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
-MODULE_LICENSE("GPL");
-
-module_init(vme_init);
+subsys_initcall(vme_init);
module_exit(vme_exit);
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 15c7251b0556..1e5d94c5afc9 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -46,7 +46,6 @@
struct mxc_w1_device {
void __iomem *regs;
- unsigned int clkdiv;
struct clk *clk;
struct w1_bus_master bus_master;
};
@@ -106,8 +105,10 @@ static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit)
static int mxc_w1_probe(struct platform_device *pdev)
{
struct mxc_w1_device *mdev;
+ unsigned long clkrate;
struct resource *res;
- int err = 0;
+ unsigned int clkdiv;
+ int err;
mdev = devm_kzalloc(&pdev->dev, sizeof(struct mxc_w1_device),
GFP_KERNEL);
@@ -118,27 +119,39 @@ static int mxc_w1_probe(struct platform_device *pdev)
if (IS_ERR(mdev->clk))
return PTR_ERR(mdev->clk);
- mdev->clkdiv = (clk_get_rate(mdev->clk) / 1000000) - 1;
+ clkrate = clk_get_rate(mdev->clk);
+ if (clkrate < 10000000)
+ dev_warn(&pdev->dev,
+ "Low clock frequency causes improper function\n");
+
+ clkdiv = DIV_ROUND_CLOSEST(clkrate, 1000000);
+ clkrate /= clkdiv;
+ if ((clkrate < 980000) || (clkrate > 1020000))
+ dev_warn(&pdev->dev,
+ "Incorrect time base frequency %lu Hz\n", clkrate);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mdev->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mdev->regs))
return PTR_ERR(mdev->regs);
- clk_prepare_enable(mdev->clk);
- __raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER);
+ err = clk_prepare_enable(mdev->clk);
+ if (err)
+ return err;
+
+ __raw_writeb(clkdiv - 1, mdev->regs + MXC_W1_TIME_DIVIDER);
mdev->bus_master.data = mdev;
mdev->bus_master.reset_bus = mxc_w1_ds2_reset_bus;
mdev->bus_master.touch_bit = mxc_w1_ds2_touch_bit;
- err = w1_add_master_device(&mdev->bus_master);
+ platform_set_drvdata(pdev, mdev);
+ err = w1_add_master_device(&mdev->bus_master);
if (err)
- return err;
+ clk_disable_unprepare(mdev->clk);
- platform_set_drvdata(pdev, mdev);
- return 0;
+ return err;
}
/*
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index e36b18b2817b..9709b8b484ba 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -18,10 +18,31 @@
#include <linux/of_gpio.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/delay.h>
#include "../w1.h"
#include "../w1_int.h"
+static u8 w1_gpio_set_pullup(void *data, int delay)
+{
+ struct w1_gpio_platform_data *pdata = data;
+
+ if (delay) {
+ pdata->pullup_duration = delay;
+ } else {
+ if (pdata->pullup_duration) {
+ gpio_direction_output(pdata->pin, 1);
+
+ msleep(pdata->pullup_duration);
+
+ gpio_direction_input(pdata->pin);
+ }
+ pdata->pullup_duration = 0;
+ }
+
+ return 0;
+}
+
static void w1_gpio_write_bit_dir(void *data, u8 bit)
{
struct w1_gpio_platform_data *pdata = data;
@@ -132,6 +153,7 @@ static int w1_gpio_probe(struct platform_device *pdev)
} else {
gpio_direction_input(pdata->pin);
master->write_bit = w1_gpio_write_bit_dir;
+ master->set_pullup = w1_gpio_set_pullup;
}
err = w1_add_master_device(master);
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index 5a98649f6abc..590bd8a7cd1b 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -117,18 +117,6 @@ int w1_add_master_device(struct w1_bus_master *master)
printk(KERN_ERR "w1_add_master_device: invalid function set\n");
return(-EINVAL);
}
- /* While it would be electrically possible to make a device that
- * generated a strong pullup in bit bang mode, only hardware that
- * controls 1-wire time frames are even expected to support a strong
- * pullup. w1_io.c would need to support calling set_pullup before
- * the last write_bit operation of a w1_write_8 which it currently
- * doesn't.
- */
- if (!master->write_byte && !master->touch_bit && master->set_pullup) {
- printk(KERN_ERR "w1_add_master_device: set_pullup requires "
- "write_byte or touch_bit, disabling\n");
- master->set_pullup = NULL;
- }
/* Lock until the device is added (or not) to w1_masters. */
mutex_lock(&w1_mlock);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 5be6e919f785..79d25894343a 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -87,6 +87,14 @@ config DA9055_WATCHDOG
This driver can also be built as a module. If so, the module
will be called da9055_wdt.
+config GPIO_WATCHDOG
+ tristate "Watchdog device controlled through GPIO-line"
+ depends on OF_GPIO
+ select WATCHDOG_CORE
+ help
+ If you say yes here you get support for watchdog device
+ controlled through GPIO-line.
+
config WM831X_WATCHDOG
tristate "WM831x watchdog"
depends on MFD_WM831X
@@ -109,7 +117,7 @@ config WM8350_WATCHDOG
config ARM_SP805_WATCHDOG
tristate "ARM SP805 Watchdog"
- depends on ARM && ARM_AMBA
+ depends on (ARM || ARM64) && ARM_AMBA
select WATCHDOG_CORE
help
ARM Primecell SP805 Watchdog timer. This will reboot your system when
@@ -188,6 +196,7 @@ config S3C2410_WATCHDOG
tristate "S3C2410 Watchdog"
depends on HAVE_S3C2410_WATCHDOG
select WATCHDOG_CORE
+ select MFD_SYSCON if ARCH_EXYNOS5
help
Watchdog timer block in the Samsung SoCs. This will reboot
the system when the timer expires with the watchdog enabled.
@@ -214,10 +223,10 @@ config SA1100_WATCHDOG
config DW_WATCHDOG
tristate "Synopsys DesignWare watchdog"
- depends on ARM && HAVE_CLK
+ depends on HAS_IOMEM
help
Say Y here if to include support for the Synopsys DesignWare
- watchdog timer found in many ARM chips.
+ watchdog timer found in many chips.
To compile this driver as a module, choose M here: the
module will be called dw_wdt.
@@ -270,10 +279,11 @@ config IOP_WATCHDOG
config DAVINCI_WATCHDOG
tristate "DaVinci watchdog"
- depends on ARCH_DAVINCI
+ depends on ARCH_DAVINCI || ARCH_KEYSTONE
+ select WATCHDOG_CORE
help
Say Y here if to include support for the watchdog timer
- in the DaVinci DM644x/DM646x processors.
+ in the DaVinci DM644x/DM646x or Keystone processors.
To compile this driver as a module, choose M here: the
module will be called davinci_wdt.
@@ -883,13 +893,22 @@ config VIA_WDT
Most people will say N.
config W83627HF_WDT
- tristate "W83627HF/W83627DHG Watchdog Timer"
+ tristate "Watchdog timer for W83627HF/W83627DHG and compatibles"
depends on X86
select WATCHDOG_CORE
---help---
- This is the driver for the hardware watchdog on the W83627HF chipset
- as used in Advantech PC-9578 and Tyan S2721-533 motherboards
- (and likely others). The driver also supports the W83627DHG chip.
+ This is the driver for the hardware watchdog on the following
+ Super I/O chips.
+ W83627DHG/DHG-P/EHF/EHG/F/G/HF/S/SF/THF/UHG/UG
+ W83637HF
+ W83667HG/HG-B
+ W83687THF
+ W83697HF
+ W83697UG
+ NCT6775
+ NCT6776
+ NCT6779
+
This watchdog simply watches your kernel to make sure it doesn't
freeze, and if it does, it reboots your computer after a certain
amount of time.
@@ -1139,6 +1158,28 @@ config BCM2835_WDT
To compile this driver as a loadable module, choose M here.
The module will be called bcm2835_wdt.
+config BCM_KONA_WDT
+ tristate "BCM Kona Watchdog"
+ depends on ARCH_BCM
+ select WATCHDOG_CORE
+ help
+ Support for the watchdog timer on the following Broadcom BCM281xx
+ family, which includes BCM11130, BCM11140, BCM11351, BCM28145 and
+ BCM28155 variants.
+
+ Say 'Y' or 'M' here to enable the driver. The module will be called
+ bcm_kona_wdt.
+
+config BCM_KONA_WDT_DEBUG
+ bool "DEBUGFS support for BCM Kona Watchdog"
+ depends on BCM_KONA_WDT
+ help
+ If enabled, adds /sys/kernel/debug/bcm_kona_wdt/info which provides
+ access to the driver's internal data structures as well as watchdog
+ timer hardware registres.
+
+ If in doubt, say 'N'.
+
config LANTIQ_WDT
tristate "Lantiq SoC watchdog"
depends on LANTIQ
@@ -1171,6 +1212,7 @@ config MPC5200_WDT
config 8xxx_WDT
tristate "MPC8xxx Platform Watchdog Timer"
depends on PPC_8xx || PPC_83xx || PPC_86xx
+ select WATCHDOG_CORE
help
This driver is for a SoC level watchdog that exists on some
Freescale PowerPC processors. So far this driver supports:
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 91bd95a64baf..985a66cda76f 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o
obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o
obj-$(CONFIG_MOXART_WDT) += moxart_wdt.o
obj-$(CONFIG_SIRFSOC_WATCHDOG) += sirfsoc_wdt.o
+obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
@@ -171,6 +172,7 @@ obj-$(CONFIG_XEN_WDT) += xen_wdt.o
# Architecture Independent
obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o
+obj-$(CONFIG_GPIO_WATCHDOG) += gpio_wdt.o
obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index fbb7b94cabfd..3a17fbd39f8a 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -301,7 +301,7 @@ static int ali_notify_sys(struct notifier_block *this,
* want to register another driver on the same PCI id.
*/
-static DEFINE_PCI_DEVICE_TABLE(ali_pci_tbl) __used = {
+static const struct pci_device_id ali_pci_tbl[] __used = {
{ PCI_VENDOR_ID_AL, 0x1533, PCI_ANY_ID, PCI_ANY_ID,},
{ PCI_VENDOR_ID_AL, 0x1535, PCI_ANY_ID, PCI_ANY_ID,},
{ 0, },
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 12f0b762b528..996b2f7d330e 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -414,7 +414,7 @@ err_out:
module_init(alim7101_wdt_init);
module_exit(alim7101_wdt_unload);
-static DEFINE_PCI_DEVICE_TABLE(alim7101_pci_tbl) __used = {
+static const struct pci_device_id alim7101_pci_tbl[] __used = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533) },
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ }
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index be37dde4f864..489729b26298 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -19,11 +19,13 @@
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
+#include <linux/reboot.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/jiffies.h>
@@ -31,22 +33,33 @@
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include "at91sam9_wdt.h"
#define DRV_NAME "AT91SAM9 Watchdog"
-#define wdt_read(field) \
- __raw_readl(at91wdt_private.base + field)
-#define wdt_write(field, val) \
- __raw_writel((val), at91wdt_private.base + field)
+#define wdt_read(wdt, field) \
+ __raw_readl((wdt)->base + (field))
+#define wdt_write(wtd, field, val) \
+ __raw_writel((val), (wdt)->base + (field))
/* AT91SAM9 watchdog runs a 12bit counter @ 256Hz,
* use this to convert a watchdog
* value from/to milliseconds.
*/
-#define ms_to_ticks(t) (((t << 8) / 1000) - 1)
-#define ticks_to_ms(t) (((t + 1) * 1000) >> 8)
+#define ticks_to_hz_rounddown(t) ((((t) + 1) * HZ) >> 8)
+#define ticks_to_hz_roundup(t) (((((t) + 1) * HZ) + 255) >> 8)
+#define ticks_to_secs(t) (((t) + 1) >> 8)
+#define secs_to_ticks(s) ((s) ? (((s) << 8) - 1) : 0)
+
+#define WDT_MR_RESET 0x3FFF2FFF
+
+/* Watchdog max counter value in ticks */
+#define WDT_COUNTER_MAX_TICKS 0xFFF
+
+/* Watchdog max delta/value in secs */
+#define WDT_COUNTER_MAX_SECS ticks_to_secs(WDT_COUNTER_MAX_TICKS)
/* Hardware timeout in seconds */
#define WDT_HW_TIMEOUT 2
@@ -66,23 +79,40 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static struct watchdog_device at91_wdt_dev;
-static void at91_ping(unsigned long data);
-
-static struct {
+#define to_wdt(wdd) container_of(wdd, struct at91wdt, wdd)
+struct at91wdt {
+ struct watchdog_device wdd;
void __iomem *base;
unsigned long next_heartbeat; /* the next_heartbeat for the timer */
struct timer_list timer; /* The timer that pings the watchdog */
-} at91wdt_private;
+ u32 mr;
+ u32 mr_mask;
+ unsigned long heartbeat; /* WDT heartbeat in jiffies */
+ bool nowayout;
+ unsigned int irq;
+};
/* ......................................................................... */
+static irqreturn_t wdt_interrupt(int irq, void *dev_id)
+{
+ struct at91wdt *wdt = (struct at91wdt *)dev_id;
+
+ if (wdt_read(wdt, AT91_WDT_SR)) {
+ pr_crit("at91sam9 WDT software reset\n");
+ emergency_restart();
+ pr_crit("Reboot didn't ?????\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
/*
* Reload the watchdog timer. (ie, pat the watchdog)
*/
-static inline void at91_wdt_reset(void)
+static inline void at91_wdt_reset(struct at91wdt *wdt)
{
- wdt_write(AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
+ wdt_write(wdt, AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
}
/*
@@ -90,26 +120,21 @@ static inline void at91_wdt_reset(void)
*/
static void at91_ping(unsigned long data)
{
- if (time_before(jiffies, at91wdt_private.next_heartbeat) ||
- (!watchdog_active(&at91_wdt_dev))) {
- at91_wdt_reset();
- mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
- } else
+ struct at91wdt *wdt = (struct at91wdt *)data;
+ if (time_before(jiffies, wdt->next_heartbeat) ||
+ !watchdog_active(&wdt->wdd)) {
+ at91_wdt_reset(wdt);
+ mod_timer(&wdt->timer, jiffies + wdt->heartbeat);
+ } else {
pr_crit("I will reset your machine !\n");
-}
-
-static int at91_wdt_ping(struct watchdog_device *wdd)
-{
- /* calculate when the next userspace timeout will be */
- at91wdt_private.next_heartbeat = jiffies + wdd->timeout * HZ;
- return 0;
+ }
}
static int at91_wdt_start(struct watchdog_device *wdd)
{
- /* calculate the next userspace timeout and modify the timer */
- at91_wdt_ping(wdd);
- mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
+ struct at91wdt *wdt = to_wdt(wdd);
+ /* calculate when the next userspace timeout will be */
+ wdt->next_heartbeat = jiffies + wdd->timeout * HZ;
return 0;
}
@@ -122,39 +147,104 @@ static int at91_wdt_stop(struct watchdog_device *wdd)
static int at91_wdt_set_timeout(struct watchdog_device *wdd, unsigned int new_timeout)
{
wdd->timeout = new_timeout;
- return 0;
+ return at91_wdt_start(wdd);
}
-/*
- * Set the watchdog time interval in 1/256Hz (write-once)
- * Counter is 12 bit.
- */
-static int at91_wdt_settimeout(unsigned int timeout)
+static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
{
- unsigned int reg;
- unsigned int mr;
-
- /* Check if disabled */
- mr = wdt_read(AT91_WDT_MR);
- if (mr & AT91_WDT_WDDIS) {
- pr_err("sorry, watchdog is disabled\n");
- return -EIO;
+ u32 tmp;
+ u32 delta;
+ u32 value;
+ int err;
+ u32 mask = wdt->mr_mask;
+ unsigned long min_heartbeat = 1;
+ unsigned long max_heartbeat;
+ struct device *dev = &pdev->dev;
+
+ tmp = wdt_read(wdt, AT91_WDT_MR);
+ if ((tmp & mask) != (wdt->mr & mask)) {
+ if (tmp == WDT_MR_RESET) {
+ wdt_write(wdt, AT91_WDT_MR, wdt->mr);
+ tmp = wdt_read(wdt, AT91_WDT_MR);
+ }
+ }
+
+ if (tmp & AT91_WDT_WDDIS) {
+ if (wdt->mr & AT91_WDT_WDDIS)
+ return 0;
+ dev_err(dev, "watchdog is disabled\n");
+ return -EINVAL;
+ }
+
+ value = tmp & AT91_WDT_WDV;
+ delta = (tmp & AT91_WDT_WDD) >> 16;
+
+ if (delta < value)
+ min_heartbeat = ticks_to_hz_roundup(value - delta);
+
+ max_heartbeat = ticks_to_hz_rounddown(value);
+ if (!max_heartbeat) {
+ dev_err(dev,
+ "heartbeat is too small for the system to handle it correctly\n");
+ return -EINVAL;
}
/*
- * All counting occurs at SLOW_CLOCK / 128 = 256 Hz
- *
- * Since WDV is a 12-bit counter, the maximum period is
- * 4096 / 256 = 16 seconds.
+ * Try to reset the watchdog counter 4 or 2 times more often than
+ * actually requested, to avoid spurious watchdog reset.
+ * If this is not possible because of the min_heartbeat value, reset
+ * it at the min_heartbeat period.
*/
- reg = AT91_WDT_WDRSTEN /* causes watchdog reset */
- /* | AT91_WDT_WDRPROC causes processor reset only */
- | AT91_WDT_WDDBGHLT /* disabled in debug mode */
- | AT91_WDT_WDD /* restart at any time */
- | (timeout & AT91_WDT_WDV); /* timer value */
- wdt_write(AT91_WDT_MR, reg);
+ if ((max_heartbeat / 4) >= min_heartbeat)
+ wdt->heartbeat = max_heartbeat / 4;
+ else if ((max_heartbeat / 2) >= min_heartbeat)
+ wdt->heartbeat = max_heartbeat / 2;
+ else
+ wdt->heartbeat = min_heartbeat;
+
+ if (max_heartbeat < min_heartbeat + 4)
+ dev_warn(dev,
+ "min heartbeat and max heartbeat might be too close for the system to handle it correctly\n");
+
+ if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) {
+ err = request_irq(wdt->irq, wdt_interrupt,
+ IRQF_SHARED | IRQF_IRQPOLL,
+ pdev->name, wdt);
+ if (err)
+ return err;
+ }
+
+ if ((tmp & wdt->mr_mask) != (wdt->mr & wdt->mr_mask))
+ dev_warn(dev,
+ "watchdog already configured differently (mr = %x expecting %x)\n",
+ tmp & wdt->mr_mask, wdt->mr & wdt->mr_mask);
+
+ setup_timer(&wdt->timer, at91_ping, (unsigned long)wdt);
+
+ /*
+ * Use min_heartbeat the first time to avoid spurious watchdog reset:
+ * we don't know for how long the watchdog counter is running, and
+ * - resetting it right now might trigger a watchdog fault reset
+ * - waiting for heartbeat time might lead to a watchdog timeout
+ * reset
+ */
+ mod_timer(&wdt->timer, jiffies + min_heartbeat);
+
+ /* Try to set timeout from device tree first */
+ if (watchdog_init_timeout(&wdt->wdd, 0, dev))
+ watchdog_init_timeout(&wdt->wdd, heartbeat, dev);
+ watchdog_set_nowayout(&wdt->wdd, wdt->nowayout);
+ err = watchdog_register_device(&wdt->wdd);
+ if (err)
+ goto out_stop_timer;
+
+ wdt->next_heartbeat = jiffies + wdt->wdd.timeout * HZ;
return 0;
+
+out_stop_timer:
+ del_timer(&wdt->timer);
+ return err;
}
/* ......................................................................... */
@@ -169,61 +259,123 @@ static const struct watchdog_ops at91_wdt_ops = {
.owner = THIS_MODULE,
.start = at91_wdt_start,
.stop = at91_wdt_stop,
- .ping = at91_wdt_ping,
.set_timeout = at91_wdt_set_timeout,
};
-static struct watchdog_device at91_wdt_dev = {
- .info = &at91_wdt_info,
- .ops = &at91_wdt_ops,
- .timeout = WDT_HEARTBEAT,
- .min_timeout = 1,
- .max_timeout = 0xFFFF,
-};
+#if defined(CONFIG_OF)
+static int of_at91wdt_init(struct device_node *np, struct at91wdt *wdt)
+{
+ u32 min = 0;
+ u32 max = WDT_COUNTER_MAX_SECS;
+ const char *tmp;
+
+ /* Get the interrupts property */
+ wdt->irq = irq_of_parse_and_map(np, 0);
+ if (!wdt->irq)
+ dev_warn(wdt->wdd.parent, "failed to get IRQ from DT\n");
+
+ if (!of_property_read_u32_index(np, "atmel,max-heartbeat-sec", 0,
+ &max)) {
+ if (!max || max > WDT_COUNTER_MAX_SECS)
+ max = WDT_COUNTER_MAX_SECS;
+
+ if (!of_property_read_u32_index(np, "atmel,min-heartbeat-sec",
+ 0, &min)) {
+ if (min >= max)
+ min = max - 1;
+ }
+ }
+
+ min = secs_to_ticks(min);
+ max = secs_to_ticks(max);
+
+ wdt->mr_mask = 0x3FFFFFFF;
+ wdt->mr = 0;
+ if (!of_property_read_string(np, "atmel,watchdog-type", &tmp) &&
+ !strcmp(tmp, "software")) {
+ wdt->mr |= AT91_WDT_WDFIEN;
+ wdt->mr_mask &= ~AT91_WDT_WDRPROC;
+ } else {
+ wdt->mr |= AT91_WDT_WDRSTEN;
+ }
+
+ if (!of_property_read_string(np, "atmel,reset-type", &tmp) &&
+ !strcmp(tmp, "proc"))
+ wdt->mr |= AT91_WDT_WDRPROC;
+
+ if (of_property_read_bool(np, "atmel,disable")) {
+ wdt->mr |= AT91_WDT_WDDIS;
+ wdt->mr_mask &= AT91_WDT_WDDIS;
+ }
+
+ if (of_property_read_bool(np, "atmel,idle-halt"))
+ wdt->mr |= AT91_WDT_WDIDLEHLT;
+
+ if (of_property_read_bool(np, "atmel,dbg-halt"))
+ wdt->mr |= AT91_WDT_WDDBGHLT;
+
+ wdt->mr |= max | ((max - min) << 16);
+
+ return 0;
+}
+#else
+static inline int of_at91wdt_init(struct device_node *np, struct at91wdt *wdt)
+{
+ return 0;
+}
+#endif
static int __init at91wdt_probe(struct platform_device *pdev)
{
struct resource *r;
- int res;
+ int err;
+ struct at91wdt *wdt;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENODEV;
- at91wdt_private.base = ioremap(r->start, resource_size(r));
- if (!at91wdt_private.base) {
- dev_err(&pdev->dev, "failed to map registers, aborting.\n");
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
return -ENOMEM;
- }
- at91_wdt_dev.parent = &pdev->dev;
- watchdog_init_timeout(&at91_wdt_dev, heartbeat, &pdev->dev);
- watchdog_set_nowayout(&at91_wdt_dev, nowayout);
+ wdt->mr = (WDT_HW_TIMEOUT * 256) | AT91_WDT_WDRSTEN | AT91_WDT_WDD |
+ AT91_WDT_WDDBGHLT | AT91_WDT_WDIDLEHLT;
+ wdt->mr_mask = 0x3FFFFFFF;
+ wdt->nowayout = nowayout;
+ wdt->wdd.parent = &pdev->dev;
+ wdt->wdd.info = &at91_wdt_info;
+ wdt->wdd.ops = &at91_wdt_ops;
+ wdt->wdd.timeout = WDT_HEARTBEAT;
+ wdt->wdd.min_timeout = 1;
+ wdt->wdd.max_timeout = 0xFFFF;
- /* Set watchdog */
- res = at91_wdt_settimeout(ms_to_ticks(WDT_HW_TIMEOUT * 1000));
- if (res)
- return res;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdt->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(wdt->base))
+ return PTR_ERR(wdt->base);
+
+ if (pdev->dev.of_node) {
+ err = of_at91wdt_init(pdev->dev.of_node, wdt);
+ if (err)
+ return err;
+ }
- res = watchdog_register_device(&at91_wdt_dev);
- if (res)
- return res;
+ err = at91_wdt_init(pdev, wdt);
+ if (err)
+ return err;
- at91wdt_private.next_heartbeat = jiffies + at91_wdt_dev.timeout * HZ;
- setup_timer(&at91wdt_private.timer, at91_ping, 0);
- mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
+ platform_set_drvdata(pdev, wdt);
pr_info("enabled (heartbeat=%d sec, nowayout=%d)\n",
- at91_wdt_dev.timeout, nowayout);
+ wdt->wdd.timeout, wdt->nowayout);
return 0;
}
static int __exit at91wdt_remove(struct platform_device *pdev)
{
- watchdog_unregister_device(&at91_wdt_dev);
+ struct at91wdt *wdt = platform_get_drvdata(pdev);
+ watchdog_unregister_device(&wdt->wdd);
pr_warn("I quit now, hardware will probably reboot!\n");
- del_timer(&at91wdt_private.timer);
+ del_timer(&wdt->timer);
return 0;
}
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
new file mode 100644
index 000000000000..9c248099f4a2
--- /dev/null
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+#define SECWDOG_CTRL_REG 0x00000000
+#define SECWDOG_COUNT_REG 0x00000004
+
+#define SECWDOG_RESERVED_MASK 0x1dffffff
+#define SECWDOG_WD_LOAD_FLAG 0x10000000
+#define SECWDOG_EN_MASK 0x08000000
+#define SECWDOG_SRSTEN_MASK 0x04000000
+#define SECWDOG_RES_MASK 0x00f00000
+#define SECWDOG_COUNT_MASK 0x000fffff
+
+#define SECWDOG_MAX_COUNT SECWDOG_COUNT_MASK
+#define SECWDOG_CLKS_SHIFT 20
+#define SECWDOG_MAX_RES 15
+#define SECWDOG_DEFAULT_RESOLUTION 4
+#define SECWDOG_MAX_TRY 1000
+
+#define SECS_TO_TICKS(x, w) ((x) << (w)->resolution)
+#define TICKS_TO_SECS(x, w) ((x) >> (w)->resolution)
+
+#define BCM_KONA_WDT_NAME "bcm_kona_wdt"
+
+struct bcm_kona_wdt {
+ void __iomem *base;
+ /*
+ * One watchdog tick is 1/(2^resolution) seconds. Resolution can take
+ * the values 0-15, meaning one tick can be 1s to 30.52us. Our default
+ * resolution of 4 means one tick is 62.5ms.
+ *
+ * The watchdog counter is 20 bits. Depending on resolution, the maximum
+ * counter value of 0xfffff expires after about 12 days (resolution 0)
+ * down to only 32s (resolution 15). The default resolution of 4 gives
+ * us a maximum of about 18 hours and 12 minutes before the watchdog
+ * times out.
+ */
+ int resolution;
+ spinlock_t lock;
+#ifdef CONFIG_BCM_KONA_WDT_DEBUG
+ unsigned long busy_count;
+ struct dentry *debugfs;
+#endif
+};
+
+static int secure_register_read(struct bcm_kona_wdt *wdt, uint32_t offset)
+{
+ uint32_t val;
+ unsigned count = 0;
+
+ /*
+ * If the WD_LOAD_FLAG is set, the watchdog counter field is being
+ * updated in hardware. Once the WD timer is updated in hardware, it
+ * gets cleared.
+ */
+ do {
+ if (unlikely(count > 1))
+ udelay(5);
+ val = readl_relaxed(wdt->base + offset);
+ count++;
+ } while ((val & SECWDOG_WD_LOAD_FLAG) && count < SECWDOG_MAX_TRY);
+
+#ifdef CONFIG_BCM_KONA_WDT_DEBUG
+ /* Remember the maximum number iterations due to WD_LOAD_FLAG */
+ if (count > wdt->busy_count)
+ wdt->busy_count = count;
+#endif
+
+ /* This is the only place we return a negative value. */
+ if (val & SECWDOG_WD_LOAD_FLAG)
+ return -ETIMEDOUT;
+
+ /* We always mask out reserved bits. */
+ val &= SECWDOG_RESERVED_MASK;
+
+ return val;
+}
+
+#ifdef CONFIG_BCM_KONA_WDT_DEBUG
+
+static int bcm_kona_wdt_dbg_show(struct seq_file *s, void *data)
+{
+ int ctl_val, cur_val, ret;
+ unsigned long flags;
+ struct bcm_kona_wdt *wdt = s->private;
+
+ if (!wdt)
+ return seq_puts(s, "No device pointer\n");
+
+ spin_lock_irqsave(&wdt->lock, flags);
+ ctl_val = secure_register_read(wdt, SECWDOG_CTRL_REG);
+ cur_val = secure_register_read(wdt, SECWDOG_COUNT_REG);
+ spin_unlock_irqrestore(&wdt->lock, flags);
+
+ if (ctl_val < 0 || cur_val < 0) {
+ ret = seq_puts(s, "Error accessing hardware\n");
+ } else {
+ int ctl, cur, ctl_sec, cur_sec, res;
+
+ ctl = ctl_val & SECWDOG_COUNT_MASK;
+ res = (ctl_val & SECWDOG_RES_MASK) >> SECWDOG_CLKS_SHIFT;
+ cur = cur_val & SECWDOG_COUNT_MASK;
+ ctl_sec = TICKS_TO_SECS(ctl, wdt);
+ cur_sec = TICKS_TO_SECS(cur, wdt);
+ ret = seq_printf(s, "Resolution: %d / %d\n"
+ "Control: %d s / %d (%#x) ticks\n"
+ "Current: %d s / %d (%#x) ticks\n"
+ "Busy count: %lu\n", res,
+ wdt->resolution, ctl_sec, ctl, ctl, cur_sec,
+ cur, cur, wdt->busy_count);
+ }
+
+ return ret;
+}
+
+static int bcm_kona_dbg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, bcm_kona_wdt_dbg_show, inode->i_private);
+}
+
+static const struct file_operations bcm_kona_dbg_operations = {
+ .open = bcm_kona_dbg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void bcm_kona_wdt_debug_init(struct platform_device *pdev)
+{
+ struct dentry *dir;
+ struct bcm_kona_wdt *wdt = platform_get_drvdata(pdev);
+
+ if (!wdt)
+ return;
+
+ wdt->debugfs = NULL;
+
+ dir = debugfs_create_dir(BCM_KONA_WDT_NAME, NULL);
+ if (IS_ERR_OR_NULL(dir))
+ return;
+
+ if (debugfs_create_file("info", S_IFREG | S_IRUGO, dir, wdt,
+ &bcm_kona_dbg_operations))
+ wdt->debugfs = dir;
+ else
+ debugfs_remove_recursive(dir);
+}
+
+static void bcm_kona_wdt_debug_exit(struct platform_device *pdev)
+{
+ struct bcm_kona_wdt *wdt = platform_get_drvdata(pdev);
+
+ if (wdt && wdt->debugfs) {
+ debugfs_remove_recursive(wdt->debugfs);
+ wdt->debugfs = NULL;
+ }
+}
+
+#else
+
+static void bcm_kona_wdt_debug_init(struct platform_device *pdev) {}
+static void bcm_kona_wdt_debug_exit(struct platform_device *pdev) {}
+
+#endif /* CONFIG_BCM_KONA_WDT_DEBUG */
+
+static int bcm_kona_wdt_ctrl_reg_modify(struct bcm_kona_wdt *wdt,
+ unsigned mask, unsigned newval)
+{
+ int val;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&wdt->lock, flags);
+
+ val = secure_register_read(wdt, SECWDOG_CTRL_REG);
+ if (val < 0) {
+ ret = val;
+ } else {
+ val &= ~mask;
+ val |= newval;
+ writel_relaxed(val, wdt->base + SECWDOG_CTRL_REG);
+ }
+
+ spin_unlock_irqrestore(&wdt->lock, flags);
+
+ return ret;
+}
+
+static int bcm_kona_wdt_set_resolution_reg(struct bcm_kona_wdt *wdt)
+{
+ if (wdt->resolution > SECWDOG_MAX_RES)
+ return -EINVAL;
+
+ return bcm_kona_wdt_ctrl_reg_modify(wdt, SECWDOG_RES_MASK,
+ wdt->resolution << SECWDOG_CLKS_SHIFT);
+}
+
+static int bcm_kona_wdt_set_timeout_reg(struct watchdog_device *wdog,
+ unsigned watchdog_flags)
+{
+ struct bcm_kona_wdt *wdt = watchdog_get_drvdata(wdog);
+
+ return bcm_kona_wdt_ctrl_reg_modify(wdt, SECWDOG_COUNT_MASK,
+ SECS_TO_TICKS(wdog->timeout, wdt) |
+ watchdog_flags);
+}
+
+static int bcm_kona_wdt_set_timeout(struct watchdog_device *wdog,
+ unsigned int t)
+{
+ wdog->timeout = t;
+ return 0;
+}
+
+static unsigned int bcm_kona_wdt_get_timeleft(struct watchdog_device *wdog)
+{
+ struct bcm_kona_wdt *wdt = watchdog_get_drvdata(wdog);
+ int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdt->lock, flags);
+ val = secure_register_read(wdt, SECWDOG_COUNT_REG);
+ spin_unlock_irqrestore(&wdt->lock, flags);
+
+ if (val < 0)
+ return val;
+
+ return TICKS_TO_SECS(val & SECWDOG_COUNT_MASK, wdt);
+}
+
+static int bcm_kona_wdt_start(struct watchdog_device *wdog)
+{
+ return bcm_kona_wdt_set_timeout_reg(wdog,
+ SECWDOG_EN_MASK | SECWDOG_SRSTEN_MASK);
+}
+
+static int bcm_kona_wdt_stop(struct watchdog_device *wdog)
+{
+ struct bcm_kona_wdt *wdt = watchdog_get_drvdata(wdog);
+
+ return bcm_kona_wdt_ctrl_reg_modify(wdt, SECWDOG_EN_MASK |
+ SECWDOG_SRSTEN_MASK, 0);
+}
+
+static struct watchdog_ops bcm_kona_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = bcm_kona_wdt_start,
+ .stop = bcm_kona_wdt_stop,
+ .set_timeout = bcm_kona_wdt_set_timeout,
+ .get_timeleft = bcm_kona_wdt_get_timeleft,
+};
+
+static struct watchdog_info bcm_kona_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING,
+ .identity = "Broadcom Kona Watchdog Timer",
+};
+
+static struct watchdog_device bcm_kona_wdt_wdd = {
+ .info = &bcm_kona_wdt_info,
+ .ops = &bcm_kona_wdt_ops,
+ .min_timeout = 1,
+ .max_timeout = SECWDOG_MAX_COUNT >> SECWDOG_DEFAULT_RESOLUTION,
+ .timeout = SECWDOG_MAX_COUNT >> SECWDOG_DEFAULT_RESOLUTION,
+};
+
+static void bcm_kona_wdt_shutdown(struct platform_device *pdev)
+{
+ bcm_kona_wdt_stop(&bcm_kona_wdt_wdd);
+}
+
+static int bcm_kona_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm_kona_wdt *wdt;
+ struct resource *res;
+ int ret;
+
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdt->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(wdt->base))
+ return -ENODEV;
+
+ wdt->resolution = SECWDOG_DEFAULT_RESOLUTION;
+ ret = bcm_kona_wdt_set_resolution_reg(wdt);
+ if (ret) {
+ dev_err(dev, "Failed to set resolution (error: %d)", ret);
+ return ret;
+ }
+
+ spin_lock_init(&wdt->lock);
+ platform_set_drvdata(pdev, wdt);
+ watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
+
+ ret = bcm_kona_wdt_set_timeout_reg(&bcm_kona_wdt_wdd, 0);
+ if (ret) {
+ dev_err(dev, "Failed set watchdog timeout");
+ return ret;
+ }
+
+ ret = watchdog_register_device(&bcm_kona_wdt_wdd);
+ if (ret) {
+ dev_err(dev, "Failed to register watchdog device");
+ return ret;
+ }
+
+ bcm_kona_wdt_debug_init(pdev);
+ dev_dbg(dev, "Broadcom Kona Watchdog Timer");
+
+ return 0;
+}
+
+static int bcm_kona_wdt_remove(struct platform_device *pdev)
+{
+ bcm_kona_wdt_debug_exit(pdev);
+ bcm_kona_wdt_shutdown(pdev);
+ watchdog_unregister_device(&bcm_kona_wdt_wdd);
+ dev_dbg(&pdev->dev, "Watchdog driver disabled");
+
+ return 0;
+}
+
+static const struct of_device_id bcm_kona_wdt_of_match[] = {
+ { .compatible = "brcm,kona-wdt", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm_kona_wdt_of_match);
+
+static struct platform_driver bcm_kona_wdt_driver = {
+ .driver = {
+ .name = BCM_KONA_WDT_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = bcm_kona_wdt_of_match,
+ },
+ .probe = bcm_kona_wdt_probe,
+ .remove = bcm_kona_wdt_remove,
+ .shutdown = bcm_kona_wdt_shutdown,
+};
+
+module_platform_driver(bcm_kona_wdt_driver);
+
+MODULE_ALIAS("platform:" BCM_KONA_WDT_NAME);
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom Kona Watchdog Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index dd625cca1ae5..b1bae03742a9 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -3,7 +3,7 @@
*
* Watchdog driver for DaVinci DM644x/DM646x processors
*
- * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2006-2013 Texas Instruments.
*
* 2007 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
@@ -15,18 +15,12 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/init.h>
-#include <linux/bitops.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/device.h>
#include <linux/clk.h>
-#include <linux/slab.h>
#include <linux/err.h>
#define MODULE_NAME "DAVINCI-WDT: "
@@ -61,142 +55,103 @@
#define WDKEY_SEQ0 (0xa5c6 << 16)
#define WDKEY_SEQ1 (0xda7e << 16)
-static int heartbeat = DEFAULT_HEARTBEAT;
+static int heartbeat;
-static DEFINE_SPINLOCK(io_lock);
-static unsigned long wdt_status;
-#define WDT_IN_USE 0
-#define WDT_OK_TO_CLOSE 1
-#define WDT_REGION_INITED 2
-#define WDT_DEVICE_INITED 3
-
-static void __iomem *wdt_base;
-struct clk *wdt_clk;
-
-static void wdt_service(void)
-{
- spin_lock(&io_lock);
-
- /* put watchdog in service state */
- iowrite32(WDKEY_SEQ0, wdt_base + WDTCR);
- /* put watchdog in active state */
- iowrite32(WDKEY_SEQ1, wdt_base + WDTCR);
-
- spin_unlock(&io_lock);
-}
+/*
+ * struct to hold data for each WDT device
+ * @base - base io address of WD device
+ * @clk - source clock of WDT
+ * @wdd - hold watchdog device as is in WDT core
+ */
+struct davinci_wdt_device {
+ void __iomem *base;
+ struct clk *clk;
+ struct watchdog_device wdd;
+};
-static void wdt_enable(void)
+static int davinci_wdt_start(struct watchdog_device *wdd)
{
u32 tgcr;
u32 timer_margin;
unsigned long wdt_freq;
+ struct davinci_wdt_device *davinci_wdt = watchdog_get_drvdata(wdd);
- wdt_freq = clk_get_rate(wdt_clk);
-
- spin_lock(&io_lock);
+ wdt_freq = clk_get_rate(davinci_wdt->clk);
/* disable, internal clock source */
- iowrite32(0, wdt_base + TCR);
+ iowrite32(0, davinci_wdt->base + TCR);
/* reset timer, set mode to 64-bit watchdog, and unreset */
- iowrite32(0, wdt_base + TGCR);
+ iowrite32(0, davinci_wdt->base + TGCR);
tgcr = TIMMODE_64BIT_WDOG | TIM12RS_UNRESET | TIM34RS_UNRESET;
- iowrite32(tgcr, wdt_base + TGCR);
+ iowrite32(tgcr, davinci_wdt->base + TGCR);
/* clear counter regs */
- iowrite32(0, wdt_base + TIM12);
- iowrite32(0, wdt_base + TIM34);
+ iowrite32(0, davinci_wdt->base + TIM12);
+ iowrite32(0, davinci_wdt->base + TIM34);
/* set timeout period */
- timer_margin = (((u64)heartbeat * wdt_freq) & 0xffffffff);
- iowrite32(timer_margin, wdt_base + PRD12);
- timer_margin = (((u64)heartbeat * wdt_freq) >> 32);
- iowrite32(timer_margin, wdt_base + PRD34);
+ timer_margin = (((u64)wdd->timeout * wdt_freq) & 0xffffffff);
+ iowrite32(timer_margin, davinci_wdt->base + PRD12);
+ timer_margin = (((u64)wdd->timeout * wdt_freq) >> 32);
+ iowrite32(timer_margin, davinci_wdt->base + PRD34);
/* enable run continuously */
- iowrite32(ENAMODE12_PERIODIC, wdt_base + TCR);
+ iowrite32(ENAMODE12_PERIODIC, davinci_wdt->base + TCR);
/* Once the WDT is in pre-active state write to
* TIM12, TIM34, PRD12, PRD34, TCR, TGCR, WDTCR are
* write protected (except for the WDKEY field)
*/
/* put watchdog in pre-active state */
- iowrite32(WDKEY_SEQ0 | WDEN, wdt_base + WDTCR);
+ iowrite32(WDKEY_SEQ0 | WDEN, davinci_wdt->base + WDTCR);
/* put watchdog in active state */
- iowrite32(WDKEY_SEQ1 | WDEN, wdt_base + WDTCR);
-
- spin_unlock(&io_lock);
+ iowrite32(WDKEY_SEQ1 | WDEN, davinci_wdt->base + WDTCR);
+ return 0;
}
-static int davinci_wdt_open(struct inode *inode, struct file *file)
+static int davinci_wdt_ping(struct watchdog_device *wdd)
{
- if (test_and_set_bit(WDT_IN_USE, &wdt_status))
- return -EBUSY;
-
- wdt_enable();
+ struct davinci_wdt_device *davinci_wdt = watchdog_get_drvdata(wdd);
- return nonseekable_open(inode, file);
+ /* put watchdog in service state */
+ iowrite32(WDKEY_SEQ0, davinci_wdt->base + WDTCR);
+ /* put watchdog in active state */
+ iowrite32(WDKEY_SEQ1, davinci_wdt->base + WDTCR);
+ return 0;
}
-static ssize_t
-davinci_wdt_write(struct file *file, const char *data, size_t len,
- loff_t *ppos)
+static unsigned int davinci_wdt_get_timeleft(struct watchdog_device *wdd)
{
- if (len)
- wdt_service();
+ u64 timer_counter;
+ unsigned long freq;
+ u32 val;
+ struct davinci_wdt_device *davinci_wdt = watchdog_get_drvdata(wdd);
- return len;
-}
+ /* if timeout has occured then return 0 */
+ val = ioread32(davinci_wdt->base + WDTCR);
+ if (val & WDFLAG)
+ return 0;
-static const struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING,
- .identity = "DaVinci Watchdog",
-};
+ freq = clk_get_rate(davinci_wdt->clk);
-static long davinci_wdt_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- int ret = -ENOTTY;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ret = copy_to_user((struct watchdog_info *)arg, &ident,
- sizeof(ident)) ? -EFAULT : 0;
- break;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- ret = put_user(0, (int *)arg);
- break;
-
- case WDIOC_KEEPALIVE:
- wdt_service();
- ret = 0;
- break;
-
- case WDIOC_GETTIMEOUT:
- ret = put_user(heartbeat, (int *)arg);
- break;
- }
- return ret;
-}
+ if (!freq)
+ return 0;
-static int davinci_wdt_release(struct inode *inode, struct file *file)
-{
- wdt_service();
- clear_bit(WDT_IN_USE, &wdt_status);
+ timer_counter = ioread32(davinci_wdt->base + TIM12);
+ timer_counter |= ((u64)ioread32(davinci_wdt->base + TIM34) << 32);
- return 0;
+ do_div(timer_counter, freq);
+
+ return wdd->timeout - timer_counter;
}
-static const struct file_operations davinci_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = davinci_wdt_write,
- .unlocked_ioctl = davinci_wdt_ioctl,
- .open = davinci_wdt_open,
- .release = davinci_wdt_release,
+static const struct watchdog_info davinci_wdt_info = {
+ .options = WDIOF_KEEPALIVEPING,
+ .identity = "DaVinci/Keystone Watchdog",
};
-static struct miscdevice davinci_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &davinci_wdt_fops,
+static const struct watchdog_ops davinci_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = davinci_wdt_start,
+ .stop = davinci_wdt_ping,
+ .ping = davinci_wdt_ping,
+ .get_timeleft = davinci_wdt_get_timeleft,
};
static int davinci_wdt_probe(struct platform_device *pdev)
@@ -204,37 +159,53 @@ static int davinci_wdt_probe(struct platform_device *pdev)
int ret = 0;
struct device *dev = &pdev->dev;
struct resource *wdt_mem;
+ struct watchdog_device *wdd;
+ struct davinci_wdt_device *davinci_wdt;
+
+ davinci_wdt = devm_kzalloc(dev, sizeof(*davinci_wdt), GFP_KERNEL);
+ if (!davinci_wdt)
+ return -ENOMEM;
- wdt_clk = devm_clk_get(dev, NULL);
- if (WARN_ON(IS_ERR(wdt_clk)))
- return PTR_ERR(wdt_clk);
+ davinci_wdt->clk = devm_clk_get(dev, NULL);
+ if (WARN_ON(IS_ERR(davinci_wdt->clk)))
+ return PTR_ERR(davinci_wdt->clk);
- clk_prepare_enable(wdt_clk);
+ clk_prepare_enable(davinci_wdt->clk);
- if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
- heartbeat = DEFAULT_HEARTBEAT;
+ platform_set_drvdata(pdev, davinci_wdt);
- dev_info(dev, "heartbeat %d sec\n", heartbeat);
+ wdd = &davinci_wdt->wdd;
+ wdd->info = &davinci_wdt_info;
+ wdd->ops = &davinci_wdt_ops;
+ wdd->min_timeout = 1;
+ wdd->max_timeout = MAX_HEARTBEAT;
+ wdd->timeout = DEFAULT_HEARTBEAT;
+
+ watchdog_init_timeout(wdd, heartbeat, dev);
+
+ dev_info(dev, "heartbeat %d sec\n", wdd->timeout);
+
+ watchdog_set_drvdata(wdd, davinci_wdt);
+ watchdog_set_nowayout(wdd, 1);
wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt_base = devm_ioremap_resource(dev, wdt_mem);
- if (IS_ERR(wdt_base))
- return PTR_ERR(wdt_base);
+ davinci_wdt->base = devm_ioremap_resource(dev, wdt_mem);
+ if (IS_ERR(davinci_wdt->base))
+ return PTR_ERR(davinci_wdt->base);
- ret = misc_register(&davinci_wdt_miscdev);
- if (ret < 0) {
- dev_err(dev, "cannot register misc device\n");
- } else {
- set_bit(WDT_DEVICE_INITED, &wdt_status);
- }
+ ret = watchdog_register_device(wdd);
+ if (ret < 0)
+ dev_err(dev, "cannot register watchdog device\n");
return ret;
}
static int davinci_wdt_remove(struct platform_device *pdev)
{
- misc_deregister(&davinci_wdt_miscdev);
- clk_disable_unprepare(wdt_clk);
+ struct davinci_wdt_device *davinci_wdt = platform_get_drvdata(pdev);
+
+ watchdog_unregister_device(&davinci_wdt->wdd);
+ clk_disable_unprepare(davinci_wdt->clk);
return 0;
}
@@ -247,7 +218,7 @@ MODULE_DEVICE_TABLE(of, davinci_wdt_of_match);
static struct platform_driver platform_wdt_driver = {
.driver = {
- .name = "watchdog",
+ .name = "davinci-wdt",
.owner = THIS_MODULE,
.of_match_table = davinci_wdt_of_match,
},
@@ -267,4 +238,4 @@ MODULE_PARM_DESC(heartbeat,
__MODULE_STRING(DEFAULT_HEARTBEAT));
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:watchdog");
+MODULE_ALIAS("platform:davinci-wdt");
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index a46f5c7ee7ff..ee4f86ba83ec 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -8,7 +8,7 @@
* 2 of the License, or (at your option) any later version.
*
* This file implements a driver for the Synopsys DesignWare watchdog device
- * in the many ARM subsystems. The watchdog has 16 different timeout periods
+ * in the many subsystems. The watchdog has 16 different timeout periods
* and these are a function of the input clock frequency.
*
* The DesignWare watchdog cannot be stopped once it has been started so we
diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
new file mode 100644
index 000000000000..220a9e07cfd5
--- /dev/null
+++ b/drivers/watchdog/gpio_wdt.c
@@ -0,0 +1,254 @@
+/*
+ * Driver for watchdog device controlled through GPIO-line
+ *
+ * Author: 2013, Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/watchdog.h>
+
+#define SOFT_TIMEOUT_MIN 1
+#define SOFT_TIMEOUT_DEF 60
+#define SOFT_TIMEOUT_MAX 0xffff
+
+enum {
+ HW_ALGO_TOGGLE,
+ HW_ALGO_LEVEL,
+};
+
+struct gpio_wdt_priv {
+ int gpio;
+ bool active_low;
+ bool state;
+ unsigned int hw_algo;
+ unsigned int hw_margin;
+ unsigned long last_jiffies;
+ struct notifier_block notifier;
+ struct timer_list timer;
+ struct watchdog_device wdd;
+};
+
+static void gpio_wdt_disable(struct gpio_wdt_priv *priv)
+{
+ gpio_set_value_cansleep(priv->gpio, !priv->active_low);
+
+ /* Put GPIO back to tristate */
+ if (priv->hw_algo == HW_ALGO_TOGGLE)
+ gpio_direction_input(priv->gpio);
+}
+
+static int gpio_wdt_start(struct watchdog_device *wdd)
+{
+ struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+ priv->state = priv->active_low;
+ gpio_direction_output(priv->gpio, priv->state);
+ priv->last_jiffies = jiffies;
+ mod_timer(&priv->timer, priv->last_jiffies + priv->hw_margin);
+
+ return 0;
+}
+
+static int gpio_wdt_stop(struct watchdog_device *wdd)
+{
+ struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+ mod_timer(&priv->timer, 0);
+ gpio_wdt_disable(priv);
+
+ return 0;
+}
+
+static int gpio_wdt_ping(struct watchdog_device *wdd)
+{
+ struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+ priv->last_jiffies = jiffies;
+
+ return 0;
+}
+
+static int gpio_wdt_set_timeout(struct watchdog_device *wdd, unsigned int t)
+{
+ wdd->timeout = t;
+
+ return gpio_wdt_ping(wdd);
+}
+
+static void gpio_wdt_hwping(unsigned long data)
+{
+ struct watchdog_device *wdd = (struct watchdog_device *)data;
+ struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+ if (time_after(jiffies, priv->last_jiffies +
+ msecs_to_jiffies(wdd->timeout * 1000))) {
+ dev_crit(wdd->dev, "Timer expired. System will reboot soon!\n");
+ return;
+ }
+
+ /* Restart timer */
+ mod_timer(&priv->timer, jiffies + priv->hw_margin);
+
+ switch (priv->hw_algo) {
+ case HW_ALGO_TOGGLE:
+ /* Toggle output pin */
+ priv->state = !priv->state;
+ gpio_set_value_cansleep(priv->gpio, priv->state);
+ break;
+ case HW_ALGO_LEVEL:
+ /* Pulse */
+ gpio_set_value_cansleep(priv->gpio, !priv->active_low);
+ udelay(1);
+ gpio_set_value_cansleep(priv->gpio, priv->active_low);
+ break;
+ }
+}
+
+static int gpio_wdt_notify_sys(struct notifier_block *nb, unsigned long code,
+ void *unused)
+{
+ struct gpio_wdt_priv *priv = container_of(nb, struct gpio_wdt_priv,
+ notifier);
+
+ mod_timer(&priv->timer, 0);
+
+ switch (code) {
+ case SYS_HALT:
+ case SYS_POWER_OFF:
+ gpio_wdt_disable(priv);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static const struct watchdog_info gpio_wdt_ident = {
+ .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING |
+ WDIOF_SETTIMEOUT,
+ .identity = "GPIO Watchdog",
+};
+
+static const struct watchdog_ops gpio_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = gpio_wdt_start,
+ .stop = gpio_wdt_stop,
+ .ping = gpio_wdt_ping,
+ .set_timeout = gpio_wdt_set_timeout,
+};
+
+static int gpio_wdt_probe(struct platform_device *pdev)
+{
+ struct gpio_wdt_priv *priv;
+ enum of_gpio_flags flags;
+ unsigned int hw_margin;
+ unsigned long f = 0;
+ const char *algo;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->gpio = of_get_gpio_flags(pdev->dev.of_node, 0, &flags);
+ if (!gpio_is_valid(priv->gpio))
+ return priv->gpio;
+
+ priv->active_low = flags & OF_GPIO_ACTIVE_LOW;
+
+ ret = of_property_read_string(pdev->dev.of_node, "hw_algo", &algo);
+ if (ret)
+ return ret;
+ if (!strncmp(algo, "toggle", 6)) {
+ priv->hw_algo = HW_ALGO_TOGGLE;
+ f = GPIOF_IN;
+ } else if (!strncmp(algo, "level", 5)) {
+ priv->hw_algo = HW_ALGO_LEVEL;
+ f = priv->active_low ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
+ } else {
+ return -EINVAL;
+ }
+
+ ret = devm_gpio_request_one(&pdev->dev, priv->gpio, f,
+ dev_name(&pdev->dev));
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "hw_margin_ms", &hw_margin);
+ if (ret)
+ return ret;
+ /* Disallow values lower than 2 and higher than 65535 ms */
+ if (hw_margin < 2 || hw_margin > 65535)
+ return -EINVAL;
+
+ /* Use safe value (1/2 of real timeout) */
+ priv->hw_margin = msecs_to_jiffies(hw_margin / 2);
+
+ watchdog_set_drvdata(&priv->wdd, priv);
+
+ priv->wdd.info = &gpio_wdt_ident;
+ priv->wdd.ops = &gpio_wdt_ops;
+ priv->wdd.min_timeout = SOFT_TIMEOUT_MIN;
+ priv->wdd.max_timeout = SOFT_TIMEOUT_MAX;
+
+ if (watchdog_init_timeout(&priv->wdd, 0, &pdev->dev) < 0)
+ priv->wdd.timeout = SOFT_TIMEOUT_DEF;
+
+ setup_timer(&priv->timer, gpio_wdt_hwping, (unsigned long)&priv->wdd);
+
+ ret = watchdog_register_device(&priv->wdd);
+ if (ret)
+ return ret;
+
+ priv->notifier.notifier_call = gpio_wdt_notify_sys;
+ ret = register_reboot_notifier(&priv->notifier);
+ if (ret)
+ watchdog_unregister_device(&priv->wdd);
+
+ return ret;
+}
+
+static int gpio_wdt_remove(struct platform_device *pdev)
+{
+ struct gpio_wdt_priv *priv = platform_get_drvdata(pdev);
+
+ del_timer_sync(&priv->timer);
+ unregister_reboot_notifier(&priv->notifier);
+ watchdog_unregister_device(&priv->wdd);
+
+ return 0;
+}
+
+static const struct of_device_id gpio_wdt_dt_ids[] = {
+ { .compatible = "linux,wdt-gpio", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpio_wdt_dt_ids);
+
+static struct platform_driver gpio_wdt_driver = {
+ .driver = {
+ .name = "gpio-wdt",
+ .owner = THIS_MODULE,
+ .of_match_table = gpio_wdt_dt_ids,
+ },
+ .probe = gpio_wdt_probe,
+ .remove = gpio_wdt_remove,
+};
+module_platform_driver(gpio_wdt_driver);
+
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("GPIO Watchdog");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 45b979d9dd13..2b75e8b47279 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -39,7 +39,7 @@
#endif /* CONFIG_HPWDT_NMI_DECODING */
#include <asm/nmi.h>
-#define HPWDT_VERSION "1.3.2"
+#define HPWDT_VERSION "1.3.3"
#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
#define TICKS_TO_SECS(ticks) ((ticks) * 128 / 1000)
#define HPWDT_MAX_TIMER TICKS_TO_SECS(65535)
@@ -55,7 +55,7 @@ static void __iomem *pci_mem_addr; /* the PCI-memory address */
static unsigned long __iomem *hpwdt_timer_reg;
static unsigned long __iomem *hpwdt_timer_con;
-static DEFINE_PCI_DEVICE_TABLE(hpwdt_devices) = {
+static const struct pci_device_id hpwdt_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB203) }, /* iLO2 */
{ PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3306) }, /* iLO3 */
{0}, /* terminate list */
@@ -501,8 +501,13 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
"but unable to determine source.\n");
}
}
- panic("An NMI occurred, please see the Integrated "
- "Management Log for details.\n");
+ panic("An NMI occurred. Depending on your system the reason "
+ "for the NMI is logged in any one of the following "
+ "resources:\n"
+ "1. Integrated Management Log (IML)\n"
+ "2. OA Syslog\n"
+ "3. OA Forward Progress Log\n"
+ "4. iLO Event Log");
out:
return NMI_DONE;
diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c
index a72fe9361ddf..25a2bfdb4e9d 100644
--- a/drivers/watchdog/i6300esb.c
+++ b/drivers/watchdog/i6300esb.c
@@ -334,7 +334,7 @@ static struct miscdevice esb_miscdev = {
/*
* Data for PCI driver interface
*/
-static DEFINE_PCI_DEVICE_TABLE(esb_pci_tbl) = {
+static const struct pci_device_id esb_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_9), },
{ 0, }, /* End of list */
};
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index b4786bccc42c..dd51d9539b33 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -2,6 +2,7 @@
* Watchdog driver for IMX2 and later processors
*
* Copyright (C) 2010 Wolfram Sang, Pengutronix e.K. <w.sang@pengutronix.de>
+ * Copyright (C) 2014 Freescale Semiconductor, Inc.
*
* some parts adapted by similar drivers from Darius Augulis and Vladimir
* Zapolskiy, additional improvements by Wim Van Sebroeck.
@@ -40,6 +41,7 @@
#define IMX2_WDT_WCR_WT (0xFF << 8) /* -> Watchdog Timeout Field */
#define IMX2_WDT_WCR_WRE (1 << 3) /* -> WDOG Reset Enable */
#define IMX2_WDT_WCR_WDE (1 << 2) /* -> Watchdog Enable */
+#define IMX2_WDT_WCR_WDZST (1 << 0) /* -> Watchdog timer Suspend */
#define IMX2_WDT_WSR 0x02 /* Service Register */
#define IMX2_WDT_SEQ1 0x5555 /* -> service sequence 1 */
@@ -87,6 +89,8 @@ static inline void imx2_wdt_setup(void)
{
u16 val = __raw_readw(imx2_wdt.base + IMX2_WDT_WCR);
+ /* Suspend timer in low power mode, write once-only */
+ val |= IMX2_WDT_WCR_WDZST;
/* Strip the old watchdog Time-Out value */
val &= ~IMX2_WDT_WCR_WT;
/* Generate reset if WDOG times out */
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
index 4166e4d116a8..4aa3a8a876fe 100644
--- a/drivers/watchdog/moxart_wdt.c
+++ b/drivers/watchdog/moxart_wdt.c
@@ -19,6 +19,8 @@
#include <linux/watchdog.h>
#include <linux/moduleparam.h>
+#include <asm/system_misc.h>
+
#define REG_COUNT 0x4
#define REG_MODE 0x8
#define REG_ENABLE 0xC
@@ -29,8 +31,17 @@ struct moxart_wdt_dev {
unsigned int clock_frequency;
};
+static struct moxart_wdt_dev *moxart_restart_ctx;
+
static int heartbeat;
+static void moxart_wdt_restart(enum reboot_mode reboot_mode, const char *cmd)
+{
+ writel(1, moxart_restart_ctx->base + REG_COUNT);
+ writel(0x5ab9, moxart_restart_ctx->base + REG_MODE);
+ writel(0x03, moxart_restart_ctx->base + REG_ENABLE);
+}
+
static int moxart_wdt_stop(struct watchdog_device *wdt_dev)
{
struct moxart_wdt_dev *moxart_wdt = watchdog_get_drvdata(wdt_dev);
@@ -125,6 +136,9 @@ static int moxart_wdt_probe(struct platform_device *pdev)
if (err)
return err;
+ moxart_restart_ctx = moxart_wdt;
+ arm_pm_restart = moxart_wdt_restart;
+
dev_dbg(dev, "Watchdog enabled (heartbeat=%d sec, nowayout=%d)\n",
moxart_wdt->dev.timeout, nowayout);
@@ -135,6 +149,7 @@ static int moxart_wdt_remove(struct platform_device *pdev)
{
struct moxart_wdt_dev *moxart_wdt = platform_get_drvdata(pdev);
+ arm_pm_restart = NULL;
moxart_wdt_stop(&moxart_wdt->dev);
watchdog_unregister_device(&moxart_wdt->dev);
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index d82152077fd9..c1f65b4c0aa4 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -73,9 +73,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
* to 0
*/
static int prescale = 1;
-static unsigned int timeout_sec;
-static unsigned long wdt_is_open;
static DEFINE_SPINLOCK(wdt_spinlock);
static void mpc8xxx_wdt_keepalive(void)
@@ -87,39 +85,23 @@ static void mpc8xxx_wdt_keepalive(void)
spin_unlock(&wdt_spinlock);
}
+static struct watchdog_device mpc8xxx_wdt_dev;
static void mpc8xxx_wdt_timer_ping(unsigned long arg);
-static DEFINE_TIMER(wdt_timer, mpc8xxx_wdt_timer_ping, 0, 0);
+static DEFINE_TIMER(wdt_timer, mpc8xxx_wdt_timer_ping, 0,
+ (unsigned long)&mpc8xxx_wdt_dev);
static void mpc8xxx_wdt_timer_ping(unsigned long arg)
{
+ struct watchdog_device *w = (struct watchdog_device *)arg;
+
mpc8xxx_wdt_keepalive();
/* We're pinging it twice faster than needed, just to be sure. */
- mod_timer(&wdt_timer, jiffies + HZ * timeout_sec / 2);
-}
-
-static void mpc8xxx_wdt_pr_warn(const char *msg)
-{
- pr_crit("%s, expect the %s soon!\n", msg,
- reset ? "reset" : "machine check exception");
+ mod_timer(&wdt_timer, jiffies + HZ * w->timeout / 2);
}
-static ssize_t mpc8xxx_wdt_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- if (count)
- mpc8xxx_wdt_keepalive();
- return count;
-}
-
-static int mpc8xxx_wdt_open(struct inode *inode, struct file *file)
+static int mpc8xxx_wdt_start(struct watchdog_device *w)
{
u32 tmp = SWCRR_SWEN;
- if (test_and_set_bit(0, &wdt_is_open))
- return -EBUSY;
-
- /* Once we start the watchdog we can't stop it */
- if (nowayout)
- __module_get(THIS_MODULE);
/* Good, fire up the show */
if (prescale)
@@ -133,59 +115,37 @@ static int mpc8xxx_wdt_open(struct inode *inode, struct file *file)
del_timer_sync(&wdt_timer);
- return nonseekable_open(inode, file);
+ return 0;
}
-static int mpc8xxx_wdt_release(struct inode *inode, struct file *file)
+static int mpc8xxx_wdt_ping(struct watchdog_device *w)
{
- if (!nowayout)
- mpc8xxx_wdt_timer_ping(0);
- else
- mpc8xxx_wdt_pr_warn("watchdog closed");
- clear_bit(0, &wdt_is_open);
+ mpc8xxx_wdt_keepalive();
return 0;
}
-static long mpc8xxx_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int mpc8xxx_wdt_stop(struct watchdog_device *w)
{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- static const struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING,
- .firmware_version = 1,
- .identity = "MPC8xxx",
- };
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
- case WDIOC_KEEPALIVE:
- mpc8xxx_wdt_keepalive();
- return 0;
- case WDIOC_GETTIMEOUT:
- return put_user(timeout_sec, p);
- default:
- return -ENOTTY;
- }
+ mod_timer(&wdt_timer, jiffies);
+ return 0;
}
-static const struct file_operations mpc8xxx_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = mpc8xxx_wdt_write,
- .unlocked_ioctl = mpc8xxx_wdt_ioctl,
- .open = mpc8xxx_wdt_open,
- .release = mpc8xxx_wdt_release,
+static struct watchdog_info mpc8xxx_wdt_info = {
+ .options = WDIOF_KEEPALIVEPING,
+ .firmware_version = 1,
+ .identity = "MPC8xxx",
};
-static struct miscdevice mpc8xxx_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &mpc8xxx_wdt_fops,
+static struct watchdog_ops mpc8xxx_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = mpc8xxx_wdt_start,
+ .ping = mpc8xxx_wdt_ping,
+ .stop = mpc8xxx_wdt_stop,
+};
+
+static struct watchdog_device mpc8xxx_wdt_dev = {
+ .info = &mpc8xxx_wdt_info,
+ .ops = &mpc8xxx_wdt_ops,
};
static const struct of_device_id mpc8xxx_wdt_match[];
@@ -197,6 +157,7 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
const struct mpc8xxx_wdt_type *wdt_type;
u32 freq = fsl_get_sys_freq();
bool enabled;
+ unsigned int timeout_sec;
match = of_match_device(mpc8xxx_wdt_match, &ofdev->dev);
if (!match)
@@ -223,6 +184,7 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
else
timeout_sec = timeout / freq;
+ mpc8xxx_wdt_dev.timeout = timeout_sec;
#ifdef MODULE
ret = mpc8xxx_wdt_init_late();
if (ret)
@@ -238,7 +200,7 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
* userspace handles it.
*/
if (enabled)
- mpc8xxx_wdt_timer_ping(0);
+ mod_timer(&wdt_timer, jiffies);
return 0;
err_unmap:
iounmap(wd_base);
@@ -248,9 +210,10 @@ err_unmap:
static int mpc8xxx_wdt_remove(struct platform_device *ofdev)
{
- mpc8xxx_wdt_pr_warn("watchdog removed");
+ pr_crit("Watchdog removed, expect the %s soon!\n",
+ reset ? "reset" : "machine check exception");
del_timer_sync(&wdt_timer);
- misc_deregister(&mpc8xxx_wdt_miscdev);
+ watchdog_unregister_device(&mpc8xxx_wdt_dev);
iounmap(wd_base);
return 0;
@@ -302,10 +265,11 @@ static int mpc8xxx_wdt_init_late(void)
if (!wd_base)
return -ENODEV;
- ret = misc_register(&mpc8xxx_wdt_miscdev);
+ watchdog_set_nowayout(&mpc8xxx_wdt_dev, nowayout);
+
+ ret = watchdog_register_device(&mpc8xxx_wdt_dev);
if (ret) {
- pr_err("cannot register miscdev on minor=%d (err=%d)\n",
- WATCHDOG_MINOR, ret);
+ pr_err("cannot register watchdog device (err=%d)\n", ret);
return ret;
}
return 0;
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index 231e5b9d5c8e..0b9ec61e1313 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -289,7 +289,7 @@ static struct miscdevice nv_tco_miscdev = {
* register a pci_driver, because someone else might one day
* want to register another driver on the same PCI id.
*/
-static DEFINE_PCI_DEVICE_TABLE(tco_pci_tbl) = {
+static const struct pci_device_id tco_pci_tbl[] = {
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c
index b4864f254b48..c0d07eef2640 100644
--- a/drivers/watchdog/pcwd_pci.c
+++ b/drivers/watchdog/pcwd_pci.c
@@ -801,7 +801,7 @@ static void pcipcwd_card_exit(struct pci_dev *pdev)
cards_found--;
}
-static DEFINE_PCI_DEVICE_TABLE(pcipcwd_pci_tbl) = {
+static const struct pci_device_id pcipcwd_pci_tbl[] = {
{ PCI_VENDOR_ID_QUICKLOGIC, PCI_DEVICE_ID_WATCHDOG_PCIPCWD,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }, /* End of list */
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index b731b5d129be..e562e0476016 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -44,23 +44,6 @@
#include <linux/hid.h> /* For HID_REQ_SET_REPORT & HID_DT_REPORT */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
-#ifdef CONFIG_USB_DEBUG
-static int debug = 1;
-#else
-static int debug;
-#endif
-
-/* Use our own dbg macro */
-
-#undef dbg
-#ifndef DEBUG
-#define DEBUG
-#endif
-#define dbg(format, ...) \
-do { \
- if (debug) \
- pr_debug(format "\n", ##__VA_ARGS__); \
-} while (0)
/* Module and Version Information */
#define DRIVER_VERSION "1.02"
@@ -73,10 +56,6 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE(DRIVER_LICENSE);
-/* Module Parameters */
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug enabled or not");
-
#define WATCHDOG_HEARTBEAT 0 /* default heartbeat =
delay-time from dip-switches */
static int heartbeat = WATCHDOG_HEARTBEAT;
@@ -193,6 +172,7 @@ static void usb_pcwd_intr_done(struct urb *urb)
struct usb_pcwd_private *usb_pcwd =
(struct usb_pcwd_private *)urb->context;
unsigned char *data = usb_pcwd->intr_buffer;
+ struct device *dev = &usb_pcwd->interface->dev;
int retval;
switch (urb->status) {
@@ -202,17 +182,17 @@ static void usb_pcwd_intr_done(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __func__,
- urb->status);
+ dev_dbg(dev, "%s - urb shutting down with status: %d",
+ __func__, urb->status);
return;
/* -EPIPE: should clear the halt */
default: /* error */
- dbg("%s - nonzero urb status received: %d", __func__,
- urb->status);
+ dev_dbg(dev, "%s - nonzero urb status received: %d",
+ __func__, urb->status);
goto resubmit;
}
- dbg("received following data cmd=0x%02x msb=0x%02x lsb=0x%02x",
+ dev_dbg(dev, "received following data cmd=0x%02x msb=0x%02x lsb=0x%02x",
data[0], data[1], data[2]);
usb_pcwd->cmd_command = data[0];
@@ -251,7 +231,8 @@ static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd,
buf[2] = *lsb; /* Byte 2 = Data LSB */
buf[3] = buf[4] = buf[5] = 0; /* All other bytes not used */
- dbg("sending following data cmd=0x%02x msb=0x%02x lsb=0x%02x",
+ dev_dbg(&usb_pcwd->interface->dev,
+ "sending following data cmd=0x%02x msb=0x%02x lsb=0x%02x",
buf[0], buf[1], buf[2]);
atomic_set(&usb_pcwd->cmd_received, 0);
@@ -260,8 +241,9 @@ static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd,
HID_REQ_SET_REPORT, HID_DT_REPORT,
0x0200, usb_pcwd->interface_number, buf, 6,
USB_COMMAND_TIMEOUT) != 6) {
- dbg("usb_pcwd_send_command: error in usb_control_msg for "
- "cmd 0x%x 0x%x 0x%x\n", cmd, *msb, *lsb);
+ dev_dbg(&usb_pcwd->interface->dev,
+ "usb_pcwd_send_command: error in usb_control_msg for cmd 0x%x 0x%x 0x%x\n",
+ cmd, *msb, *lsb);
}
/* wait till the usb card processed the command,
* with a max. timeout of USB_COMMAND_TIMEOUT */
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 7d8fd041ee25..aec946df6ed9 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -40,6 +40,8 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#define S3C2410_WTCON 0x00
#define S3C2410_WTDAT 0x04
@@ -60,6 +62,16 @@
#define CONFIG_S3C2410_WATCHDOG_ATBOOT (0)
#define CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME (15)
+#define EXYNOS5_RST_STAT_REG_OFFSET 0x0404
+#define EXYNOS5_WDT_DISABLE_REG_OFFSET 0x0408
+#define EXYNOS5_WDT_MASK_RESET_REG_OFFSET 0x040c
+#define QUIRK_HAS_PMU_CONFIG (1 << 0)
+#define QUIRK_HAS_RST_STAT (1 << 1)
+
+/* These quirks require that we have a PMU register map */
+#define QUIRKS_HAVE_PMUREG (QUIRK_HAS_PMU_CONFIG | \
+ QUIRK_HAS_RST_STAT)
+
static bool nowayout = WATCHDOG_NOWAYOUT;
static int tmr_margin;
static int tmr_atboot = CONFIG_S3C2410_WATCHDOG_ATBOOT;
@@ -83,6 +95,30 @@ MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, "
"0 to reboot (default 0)");
MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug (default 0)");
+/**
+ * struct s3c2410_wdt_variant - Per-variant config data
+ *
+ * @disable_reg: Offset in pmureg for the register that disables the watchdog
+ * timer reset functionality.
+ * @mask_reset_reg: Offset in pmureg for the register that masks the watchdog
+ * timer reset functionality.
+ * @mask_bit: Bit number for the watchdog timer in the disable register and the
+ * mask reset register.
+ * @rst_stat_reg: Offset in pmureg for the register that has the reset status.
+ * @rst_stat_bit: Bit number in the rst_stat register indicating a watchdog
+ * reset.
+ * @quirks: A bitfield of quirks.
+ */
+
+struct s3c2410_wdt_variant {
+ int disable_reg;
+ int mask_reset_reg;
+ int mask_bit;
+ int rst_stat_reg;
+ int rst_stat_bit;
+ u32 quirks;
+};
+
struct s3c2410_wdt {
struct device *dev;
struct clk *clock;
@@ -93,8 +129,54 @@ struct s3c2410_wdt {
unsigned long wtdat_save;
struct watchdog_device wdt_device;
struct notifier_block freq_transition;
+ struct s3c2410_wdt_variant *drv_data;
+ struct regmap *pmureg;
};
+static const struct s3c2410_wdt_variant drv_data_s3c2410 = {
+ .quirks = 0
+};
+
+#ifdef CONFIG_OF
+static const struct s3c2410_wdt_variant drv_data_exynos5250 = {
+ .disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
+ .mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
+ .mask_bit = 20,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = 20,
+ .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
+};
+
+static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
+ .disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
+ .mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
+ .mask_bit = 0,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = 9,
+ .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
+};
+
+static const struct of_device_id s3c2410_wdt_match[] = {
+ { .compatible = "samsung,s3c2410-wdt",
+ .data = &drv_data_s3c2410 },
+ { .compatible = "samsung,exynos5250-wdt",
+ .data = &drv_data_exynos5250 },
+ { .compatible = "samsung,exynos5420-wdt",
+ .data = &drv_data_exynos5420 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
+#endif
+
+static const struct platform_device_id s3c2410_wdt_ids[] = {
+ {
+ .name = "s3c2410-wdt",
+ .driver_data = (unsigned long)&drv_data_s3c2410,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, s3c2410_wdt_ids);
+
/* watchdog control routines */
#define DBG(fmt, ...) \
@@ -110,6 +192,35 @@ static inline struct s3c2410_wdt *freq_to_wdt(struct notifier_block *nb)
return container_of(nb, struct s3c2410_wdt, freq_transition);
}
+static int s3c2410wdt_mask_and_disable_reset(struct s3c2410_wdt *wdt, bool mask)
+{
+ int ret;
+ u32 mask_val = 1 << wdt->drv_data->mask_bit;
+ u32 val = 0;
+
+ /* No need to do anything if no PMU CONFIG needed */
+ if (!(wdt->drv_data->quirks & QUIRK_HAS_PMU_CONFIG))
+ return 0;
+
+ if (mask)
+ val = mask_val;
+
+ ret = regmap_update_bits(wdt->pmureg,
+ wdt->drv_data->disable_reg,
+ mask_val, val);
+ if (ret < 0)
+ goto error;
+
+ ret = regmap_update_bits(wdt->pmureg,
+ wdt->drv_data->mask_reset_reg,
+ mask_val, val);
+ error:
+ if (ret < 0)
+ dev_err(wdt->dev, "failed to update reg(%d)\n", ret);
+
+ return ret;
+}
+
static int s3c2410wdt_keepalive(struct watchdog_device *wdd)
{
struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
@@ -188,7 +299,7 @@ static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned timeou
if (timeout < 1)
return -EINVAL;
- freq /= 128;
+ freq = DIV_ROUND_UP(freq, 128);
count = timeout * freq;
DBG("%s: count=%d, timeout=%d, freq=%lu\n",
@@ -200,21 +311,18 @@ static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned timeou
*/
if (count >= 0x10000) {
- for (divisor = 1; divisor <= 0x100; divisor++) {
- if ((count / divisor) < 0x10000)
- break;
- }
+ divisor = DIV_ROUND_UP(count, 0xffff);
- if ((count / divisor) >= 0x10000) {
+ if (divisor > 0x100) {
dev_err(wdt->dev, "timeout %d too big\n", timeout);
return -EINVAL;
}
}
DBG("%s: timeout=%d, divisor=%d, count=%d (%08x)\n",
- __func__, timeout, divisor, count, count/divisor);
+ __func__, timeout, divisor, count, DIV_ROUND_UP(count, divisor));
- count /= divisor;
+ count = DIV_ROUND_UP(count, divisor);
wdt->count = count;
/* update the pre-scaler */
@@ -264,7 +372,7 @@ static irqreturn_t s3c2410wdt_irq(int irqno, void *param)
return IRQ_HANDLED;
}
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
static int s3c2410wdt_cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
@@ -331,6 +439,37 @@ static inline void s3c2410wdt_cpufreq_deregister(struct s3c2410_wdt *wdt)
}
#endif
+static inline unsigned int s3c2410wdt_get_bootstatus(struct s3c2410_wdt *wdt)
+{
+ unsigned int rst_stat;
+ int ret;
+
+ if (!(wdt->drv_data->quirks & QUIRK_HAS_RST_STAT))
+ return 0;
+
+ ret = regmap_read(wdt->pmureg, wdt->drv_data->rst_stat_reg, &rst_stat);
+ if (ret)
+ dev_warn(wdt->dev, "Couldn't get RST_STAT register\n");
+ else if (rst_stat & BIT(wdt->drv_data->rst_stat_bit))
+ return WDIOF_CARDRESET;
+
+ return 0;
+}
+
+/* s3c2410_get_wdt_driver_data */
+static inline struct s3c2410_wdt_variant *
+get_wdt_drv_data(struct platform_device *pdev)
+{
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(s3c2410_wdt_match, pdev->dev.of_node);
+ return (struct s3c2410_wdt_variant *)match->data;
+ } else {
+ return (struct s3c2410_wdt_variant *)
+ platform_get_device_id(pdev)->driver_data;
+ }
+}
+
static int s3c2410wdt_probe(struct platform_device *pdev)
{
struct device *dev;
@@ -353,6 +492,16 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
spin_lock_init(&wdt->lock);
wdt->wdt_device = s3c2410_wdd;
+ wdt->drv_data = get_wdt_drv_data(pdev);
+ if (wdt->drv_data->quirks & QUIRKS_HAVE_PMUREG) {
+ wdt->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,syscon-phandle");
+ if (IS_ERR(wdt->pmureg)) {
+ dev_err(dev, "syscon regmap lookup failed.\n");
+ return PTR_ERR(wdt->pmureg);
+ }
+ }
+
wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (wdt_irq == NULL) {
dev_err(dev, "no irq resource specified\n");
@@ -415,12 +564,18 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(&wdt->wdt_device, nowayout);
+ wdt->wdt_device.bootstatus = s3c2410wdt_get_bootstatus(wdt);
+
ret = watchdog_register_device(&wdt->wdt_device);
if (ret) {
dev_err(dev, "cannot register watchdog (%d)\n", ret);
goto err_cpufreq;
}
+ ret = s3c2410wdt_mask_and_disable_reset(wdt, false);
+ if (ret < 0)
+ goto err_unregister;
+
if (tmr_atboot && started == 0) {
dev_info(dev, "starting watchdog timer\n");
s3c2410wdt_start(&wdt->wdt_device);
@@ -445,6 +600,9 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
return 0;
+ err_unregister:
+ watchdog_unregister_device(&wdt->wdt_device);
+
err_cpufreq:
s3c2410wdt_cpufreq_deregister(wdt);
@@ -458,8 +616,13 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
static int s3c2410wdt_remove(struct platform_device *dev)
{
+ int ret;
struct s3c2410_wdt *wdt = platform_get_drvdata(dev);
+ ret = s3c2410wdt_mask_and_disable_reset(wdt, true);
+ if (ret < 0)
+ return ret;
+
watchdog_unregister_device(&wdt->wdt_device);
s3c2410wdt_cpufreq_deregister(wdt);
@@ -474,6 +637,8 @@ static void s3c2410wdt_shutdown(struct platform_device *dev)
{
struct s3c2410_wdt *wdt = platform_get_drvdata(dev);
+ s3c2410wdt_mask_and_disable_reset(wdt, true);
+
s3c2410wdt_stop(&wdt->wdt_device);
}
@@ -481,12 +646,17 @@ static void s3c2410wdt_shutdown(struct platform_device *dev)
static int s3c2410wdt_suspend(struct device *dev)
{
+ int ret;
struct s3c2410_wdt *wdt = dev_get_drvdata(dev);
/* Save watchdog state, and turn it off. */
wdt->wtcon_save = readl(wdt->reg_base + S3C2410_WTCON);
wdt->wtdat_save = readl(wdt->reg_base + S3C2410_WTDAT);
+ ret = s3c2410wdt_mask_and_disable_reset(wdt, true);
+ if (ret < 0)
+ return ret;
+
/* Note that WTCNT doesn't need to be saved. */
s3c2410wdt_stop(&wdt->wdt_device);
@@ -495,6 +665,7 @@ static int s3c2410wdt_suspend(struct device *dev)
static int s3c2410wdt_resume(struct device *dev)
{
+ int ret;
struct s3c2410_wdt *wdt = dev_get_drvdata(dev);
/* Restore watchdog state. */
@@ -502,6 +673,10 @@ static int s3c2410wdt_resume(struct device *dev)
writel(wdt->wtdat_save, wdt->reg_base + S3C2410_WTCNT);/* Reset count */
writel(wdt->wtcon_save, wdt->reg_base + S3C2410_WTCON);
+ ret = s3c2410wdt_mask_and_disable_reset(wdt, false);
+ if (ret < 0)
+ return ret;
+
dev_info(dev, "watchdog %sabled\n",
(wdt->wtcon_save & S3C2410_WTCON_ENABLE) ? "en" : "dis");
@@ -512,18 +687,11 @@ static int s3c2410wdt_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(s3c2410wdt_pm_ops, s3c2410wdt_suspend,
s3c2410wdt_resume);
-#ifdef CONFIG_OF
-static const struct of_device_id s3c2410_wdt_match[] = {
- { .compatible = "samsung,s3c2410-wdt" },
- {},
-};
-MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
-#endif
-
static struct platform_driver s3c2410wdt_driver = {
.probe = s3c2410wdt_probe,
.remove = s3c2410wdt_remove,
.shutdown = s3c2410wdt_shutdown,
+ .id_table = s3c2410_wdt_ids,
.driver = {
.owner = THIS_MODULE,
.name = "s3c2410-wdt",
@@ -538,4 +706,3 @@ MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>, "
"Dimitry Andric <dimitry.andric@tomtom.com>");
MODULE_DESCRIPTION("S3C2410 Watchdog Device Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c2410-wdt");
diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c
index ced3edc95957..702d07870808 100644
--- a/drivers/watchdog/sirfsoc_wdt.c
+++ b/drivers/watchdog/sirfsoc_wdt.c
@@ -212,7 +212,7 @@ static struct platform_driver sirfsoc_wdt_driver = {
.name = "sirfsoc-wdt",
.owner = THIS_MODULE,
.pm = &sirfsoc_wdt_pm_ops,
- .of_match_table = of_match_ptr(sirfsoc_wdt_of_match),
+ .of_match_table = sirfsoc_wdt_of_match,
},
.probe = sirfsoc_wdt_probe,
.remove = sirfsoc_wdt_remove,
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index ce63a1bbf395..5cca9cddb87d 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -303,7 +303,7 @@ static struct miscdevice sp5100_tco_miscdev = {
* register a pci_driver, because someone else might
* want to register another driver on the same PCI id.
*/
-static DEFINE_PCI_DEVICE_TABLE(sp5100_tco_pci_tbl) = {
+static const struct pci_device_id sp5100_tco_pci_tbl[] = {
{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, PCI_ANY_ID,
PCI_ANY_ID, },
{ 0, }, /* End of list */
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
index 1a68f760cf86..d2cd9f0bcb9a 100644
--- a/drivers/watchdog/via_wdt.c
+++ b/drivers/watchdog/via_wdt.c
@@ -239,7 +239,7 @@ static void wdt_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static DEFINE_PCI_DEVICE_TABLE(wdt_pci_table) = {
+static const struct pci_device_id wdt_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700) },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX800) },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855) },
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index e24b21082874..b1da0c18fd1a 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -44,10 +44,13 @@
#define WATCHDOG_NAME "w83627hf/thf/hg/dhg WDT"
#define WATCHDOG_TIMEOUT 60 /* 60 sec default timeout */
-/* You must set this - there is no sane way to probe for this board. */
-static int wdt_io = 0x2E;
-module_param(wdt_io, int, 0);
-MODULE_PARM_DESC(wdt_io, "w83627hf/thf WDT io port (default 0x2E)");
+static int wdt_io;
+static int cr_wdt_timeout; /* WDT timeout register */
+static int cr_wdt_control; /* WDT control register */
+
+enum chips { w83627hf, w83627s, w83697hf, w83697ug, w83637hf, w83627thf,
+ w83687thf, w83627ehf, w83627dhg, w83627uhg, w83667hg, w83627dhg_p,
+ w83667hg_b, nct6775, nct6776, nct6779 };
static int timeout; /* in seconds */
module_param(timeout, int, 0);
@@ -72,6 +75,29 @@ MODULE_PARM_DESC(nowayout,
#define W83627HF_LD_WDT 0x08
+#define W83627HF_ID 0x52
+#define W83627S_ID 0x59
+#define W83697HF_ID 0x60
+#define W83697UG_ID 0x68
+#define W83637HF_ID 0x70
+#define W83627THF_ID 0x82
+#define W83687THF_ID 0x85
+#define W83627EHF_ID 0x88
+#define W83627DHG_ID 0xa0
+#define W83627UHG_ID 0xa2
+#define W83667HG_ID 0xa5
+#define W83627DHG_P_ID 0xb0
+#define W83667HG_B_ID 0xb3
+#define NCT6775_ID 0xb4
+#define NCT6776_ID 0xc3
+#define NCT6779_ID 0xc5
+
+#define W83627HF_WDT_TIMEOUT 0xf6
+#define W83697HF_WDT_TIMEOUT 0xf4
+
+#define W83627HF_WDT_CONTROL 0xf5
+#define W83697HF_WDT_CONTROL 0xf3
+
static void superio_outb(int reg, int val)
{
outb(reg, WDT_EFER);
@@ -106,10 +132,7 @@ static void superio_exit(void)
release_region(wdt_io, 2);
}
-/* tyan motherboards seem to set F5 to 0x4C ?
- * So explicitly init to appropriate value. */
-
-static int w83627hf_init(struct watchdog_device *wdog)
+static int w83627hf_init(struct watchdog_device *wdog, enum chips chip)
{
int ret;
unsigned char t;
@@ -119,35 +142,83 @@ static int w83627hf_init(struct watchdog_device *wdog)
return ret;
superio_select(W83627HF_LD_WDT);
- t = superio_inb(0x20); /* check chip version */
- if (t == 0x82) { /* W83627THF */
- t = (superio_inb(0x2b) & 0xf7);
- superio_outb(0x2b, t | 0x04); /* set GPIO3 to WDT0 */
- } else if (t == 0x88 || t == 0xa0) { /* W83627EHF / W83627DHG */
- t = superio_inb(0x2d);
- superio_outb(0x2d, t & ~0x01); /* set GPIO5 to WDT0 */
- }
/* set CR30 bit 0 to activate GPIO2 */
t = superio_inb(0x30);
if (!(t & 0x01))
superio_outb(0x30, t | 0x01);
- t = superio_inb(0xF6);
+ switch (chip) {
+ case w83627hf:
+ case w83627s:
+ t = superio_inb(0x2B) & ~0x10;
+ superio_outb(0x2B, t); /* set GPIO24 to WDT0 */
+ break;
+ case w83697hf:
+ /* Set pin 119 to WDTO# mode (= CR29, WDT0) */
+ t = superio_inb(0x29) & ~0x60;
+ t |= 0x20;
+ superio_outb(0x29, t);
+ break;
+ case w83697ug:
+ /* Set pin 118 to WDTO# mode */
+ t = superio_inb(0x2b) & ~0x04;
+ superio_outb(0x2b, t);
+ break;
+ case w83627thf:
+ t = (superio_inb(0x2B) & ~0x08) | 0x04;
+ superio_outb(0x2B, t); /* set GPIO3 to WDT0 */
+ break;
+ case w83627dhg:
+ case w83627dhg_p:
+ t = superio_inb(0x2D) & ~0x01; /* PIN77 -> WDT0# */
+ superio_outb(0x2D, t); /* set GPIO5 to WDT0 */
+ t = superio_inb(cr_wdt_control);
+ t |= 0x02; /* enable the WDTO# output low pulse
+ * to the KBRST# pin */
+ superio_outb(cr_wdt_control, t);
+ break;
+ case w83637hf:
+ break;
+ case w83687thf:
+ t = superio_inb(0x2C) & ~0x80; /* PIN47 -> WDT0# */
+ superio_outb(0x2C, t);
+ break;
+ case w83627ehf:
+ case w83627uhg:
+ case w83667hg:
+ case w83667hg_b:
+ case nct6775:
+ case nct6776:
+ case nct6779:
+ /*
+ * These chips have a fixed WDTO# output pin (W83627UHG),
+ * or support more than one WDTO# output pin.
+ * Don't touch its configuration, and hope the BIOS
+ * does the right thing.
+ */
+ t = superio_inb(cr_wdt_control);
+ t |= 0x02; /* enable the WDTO# output low pulse
+ * to the KBRST# pin */
+ superio_outb(cr_wdt_control, t);
+ break;
+ default:
+ break;
+ }
+
+ t = superio_inb(cr_wdt_timeout);
if (t != 0) {
pr_info("Watchdog already running. Resetting timeout to %d sec\n",
wdog->timeout);
- superio_outb(0xF6, wdog->timeout);
+ superio_outb(cr_wdt_timeout, wdog->timeout);
}
/* set second mode & disable keyboard turning off watchdog */
- t = superio_inb(0xF5) & ~0x0C;
- /* enable the WDTO# output low pulse to the KBRST# pin */
- t |= 0x02;
- superio_outb(0xF5, t);
+ t = superio_inb(cr_wdt_control) & ~0x0C;
+ superio_outb(cr_wdt_control, t);
- /* disable keyboard & mouse turning off watchdog */
- t = superio_inb(0xF7) & ~0xC0;
+ /* reset trigger, disable keyboard & mouse turning off watchdog */
+ t = superio_inb(0xF7) & ~0xD0;
superio_outb(0xF7, t);
superio_exit();
@@ -164,7 +235,7 @@ static int wdt_set_time(unsigned int timeout)
return ret;
superio_select(W83627HF_LD_WDT);
- superio_outb(0xF6, timeout);
+ superio_outb(cr_wdt_timeout, timeout);
superio_exit();
return 0;
@@ -197,7 +268,7 @@ static unsigned int wdt_get_time(struct watchdog_device *wdog)
return 0;
superio_select(W83627HF_LD_WDT);
- timeleft = superio_inb(0xF6);
+ timeleft = superio_inb(cr_wdt_timeout);
superio_exit();
return timeleft;
@@ -249,16 +320,123 @@ static struct notifier_block wdt_notifier = {
.notifier_call = wdt_notify_sys,
};
+static int wdt_find(int addr)
+{
+ u8 val;
+ int ret;
+
+ cr_wdt_timeout = W83627HF_WDT_TIMEOUT;
+ cr_wdt_control = W83627HF_WDT_CONTROL;
+
+ ret = superio_enter();
+ if (ret)
+ return ret;
+ superio_select(W83627HF_LD_WDT);
+ val = superio_inb(0x20);
+ switch (val) {
+ case W83627HF_ID:
+ ret = w83627hf;
+ break;
+ case W83627S_ID:
+ ret = w83627s;
+ break;
+ case W83697HF_ID:
+ ret = w83697hf;
+ cr_wdt_timeout = W83697HF_WDT_TIMEOUT;
+ cr_wdt_control = W83697HF_WDT_CONTROL;
+ break;
+ case W83697UG_ID:
+ ret = w83697ug;
+ cr_wdt_timeout = W83697HF_WDT_TIMEOUT;
+ cr_wdt_control = W83697HF_WDT_CONTROL;
+ break;
+ case W83637HF_ID:
+ ret = w83637hf;
+ break;
+ case W83627THF_ID:
+ ret = w83627thf;
+ break;
+ case W83687THF_ID:
+ ret = w83687thf;
+ break;
+ case W83627EHF_ID:
+ ret = w83627ehf;
+ break;
+ case W83627DHG_ID:
+ ret = w83627dhg;
+ break;
+ case W83627DHG_P_ID:
+ ret = w83627dhg_p;
+ break;
+ case W83627UHG_ID:
+ ret = w83627uhg;
+ break;
+ case W83667HG_ID:
+ ret = w83667hg;
+ break;
+ case W83667HG_B_ID:
+ ret = w83667hg_b;
+ break;
+ case NCT6775_ID:
+ ret = nct6775;
+ break;
+ case NCT6776_ID:
+ ret = nct6776;
+ break;
+ case NCT6779_ID:
+ ret = nct6779;
+ break;
+ case 0xff:
+ ret = -ENODEV;
+ break;
+ default:
+ ret = -ENODEV;
+ pr_err("Unsupported chip ID: 0x%02x\n", val);
+ break;
+ }
+ superio_exit();
+ return ret;
+}
+
static int __init wdt_init(void)
{
int ret;
+ int chip;
+ const char * const chip_name[] = {
+ "W83627HF",
+ "W83627S",
+ "W83697HF",
+ "W83697UG",
+ "W83637HF",
+ "W83627THF",
+ "W83687THF",
+ "W83627EHF",
+ "W83627DHG",
+ "W83627UHG",
+ "W83667HG",
+ "W83667DHG-P",
+ "W83667HG-B",
+ "NCT6775",
+ "NCT6776",
+ "NCT6779",
+ };
+
+ wdt_io = 0x2e;
+ chip = wdt_find(0x2e);
+ if (chip < 0) {
+ wdt_io = 0x4e;
+ chip = wdt_find(0x4e);
+ if (chip < 0)
+ return chip;
+ }
- pr_info("WDT driver for the Winbond(TM) W83627HF/THF/HG/DHG Super I/O chip initialising\n");
+ pr_info("WDT driver for %s Super I/O chip initialising\n",
+ chip_name[chip]);
watchdog_init_timeout(&wdt_dev, timeout, NULL);
watchdog_set_nowayout(&wdt_dev, nowayout);
- ret = w83627hf_init(&wdt_dev);
+ ret = w83627hf_init(&wdt_dev, chip);
if (ret) {
pr_err("failed to initialize watchdog (err=%d)\n", ret);
return ret;
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 461336c4519f..cec9b559647d 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -78,7 +78,7 @@ int watchdog_init_timeout(struct watchdog_device *wdd,
watchdog_check_min_max_timeout(wdd);
/* try to get the timeout module parameter first */
- if (!watchdog_timeout_invalid(wdd, timeout_parm)) {
+ if (!watchdog_timeout_invalid(wdd, timeout_parm) && timeout_parm) {
wdd->timeout = timeout_parm;
return ret;
}
@@ -89,7 +89,7 @@ int watchdog_init_timeout(struct watchdog_device *wdd,
if (dev == NULL || dev->of_node == NULL)
return ret;
of_property_read_u32(dev->of_node, "timeout-sec", &t);
- if (!watchdog_timeout_invalid(wdd, t))
+ if (!watchdog_timeout_invalid(wdd, t) && t)
wdd->timeout = t;
else
ret = -EINVAL;
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index ee89ba4dea63..3dc578e71211 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -720,7 +720,7 @@ static void wdtpci_remove_one(struct pci_dev *pdev)
}
-static DEFINE_PCI_DEVICE_TABLE(wdtpci_pci_tbl) = {
+static const struct pci_device_id wdtpci_pci_tbl[] = {
{
.vendor = PCI_VENDOR_ID_ACCESSIO,
.device = PCI_DEVICE_ID_ACCESSIO_WDG_CSM,
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 12ba6db65142..38fb36e1c592 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -3,7 +3,6 @@ menu "Xen driver support"
config XEN_BALLOON
bool "Xen memory balloon driver"
- depends on !ARM
default y
help
The balloon driver allows the Xen domain to request more memory from
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 14fe79d8634a..45e00afa7f2d 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -2,7 +2,8 @@ ifeq ($(filter y, $(CONFIG_ARM) $(CONFIG_ARM64)),)
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
endif
obj-$(CONFIG_X86) += fallback.o
-obj-y += grant-table.o features.o events.o balloon.o manage.o
+obj-y += grant-table.o features.o balloon.o manage.o
+obj-y += events/
obj-y += xenbus/
nostackp := $(call cc-option, -fno-stack-protector)
@@ -15,7 +16,6 @@ xen-pad-$(CONFIG_X86) += xen-acpi-pad.o
dom0-$(CONFIG_X86) += pcpu.o
obj-$(CONFIG_XEN_DOM0) += $(dom0-y)
obj-$(CONFIG_BLOCK) += biomerge.o
-obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o
obj-$(CONFIG_XEN_SELFBALLOONING) += xen-selfballoon.o
obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 55ea73f7c70b..37d06ea624aa 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -157,13 +157,6 @@ static struct page *balloon_retrieve(bool prefer_highmem)
return page;
}
-static struct page *balloon_first_page(void)
-{
- if (list_empty(&ballooned_pages))
- return NULL;
- return list_entry(ballooned_pages.next, struct page, lru);
-}
-
static struct page *balloon_next_page(struct page *page)
{
struct list_head *next = page->lru.next;
@@ -328,7 +321,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
if (nr_pages > ARRAY_SIZE(frame_list))
nr_pages = ARRAY_SIZE(frame_list);
- page = balloon_first_page();
+ page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
for (i = 0; i < nr_pages; i++) {
if (!page) {
nr_pages = i;
@@ -350,17 +343,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
pfn = page_to_pfn(page);
- set_phys_to_machine(pfn, frame_list[i]);
-
#ifdef CONFIG_XEN_HAVE_PVMMU
- /* Link back into the page tables if not highmem. */
- if (xen_pv_domain() && !PageHighMem(page)) {
- int ret;
- ret = HYPERVISOR_update_va_mapping(
- (unsigned long)__va(pfn << PAGE_SHIFT),
- mfn_pte(frame_list[i], PAGE_KERNEL),
- 0);
- BUG_ON(ret);
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ set_phys_to_machine(pfn, frame_list[i]);
+
+ /* Link back into the page tables if not highmem. */
+ if (!PageHighMem(page)) {
+ int ret;
+ ret = HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ mfn_pte(frame_list[i], PAGE_KERNEL),
+ 0);
+ BUG_ON(ret);
+ }
}
#endif
@@ -378,7 +373,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
enum bp_state state = BP_DONE;
unsigned long pfn, i;
struct page *page;
- struct page *scratch_page;
int ret;
struct xen_memory_reservation reservation = {
.address_bits = 0,
@@ -411,27 +405,29 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
scrub_page(page);
+#ifdef CONFIG_XEN_HAVE_PVMMU
/*
* Ballooned out frames are effectively replaced with
* a scratch frame. Ensure direct mappings and the
* p2m are consistent.
*/
- scratch_page = get_balloon_scratch_page();
-#ifdef CONFIG_XEN_HAVE_PVMMU
- if (xen_pv_domain() && !PageHighMem(page)) {
- ret = HYPERVISOR_update_va_mapping(
- (unsigned long)__va(pfn << PAGE_SHIFT),
- pfn_pte(page_to_pfn(scratch_page),
- PAGE_KERNEL_RO), 0);
- BUG_ON(ret);
- }
-#endif
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long p;
+ struct page *scratch_page = get_balloon_scratch_page();
+
+ if (!PageHighMem(page)) {
+ ret = HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ pfn_pte(page_to_pfn(scratch_page),
+ PAGE_KERNEL_RO), 0);
+ BUG_ON(ret);
+ }
p = page_to_pfn(scratch_page);
__set_phys_to_machine(pfn, pfn_to_mfn(p));
+
+ put_balloon_scratch_page();
}
- put_balloon_scratch_page();
+#endif
balloon_append(pfn_to_page(pfn));
}
@@ -627,15 +623,17 @@ static int __init balloon_init(void)
if (!xen_domain())
return -ENODEV;
- for_each_online_cpu(cpu)
- {
- per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
- if (per_cpu(balloon_scratch_page, cpu) == NULL) {
- pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
- return -ENOMEM;
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ for_each_online_cpu(cpu)
+ {
+ per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
+ if (per_cpu(balloon_scratch_page, cpu) == NULL) {
+ pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
+ return -ENOMEM;
+ }
}
+ register_cpu_notifier(&balloon_cpu_notifier);
}
- register_cpu_notifier(&balloon_cpu_notifier);
pr_info("Initialising balloon driver\n");
diff --git a/drivers/xen/dbgp.c b/drivers/xen/dbgp.c
index f3ccc80a455f..8145a59fd9f6 100644
--- a/drivers/xen/dbgp.c
+++ b/drivers/xen/dbgp.c
@@ -19,7 +19,7 @@ static int xen_dbgp_op(struct usb_hcd *hcd, int op)
dbgp.op = op;
#ifdef CONFIG_PCI
- if (ctrlr->bus == &pci_bus_type) {
+ if (dev_is_pci(ctrlr)) {
const struct pci_dev *pdev = to_pci_dev(ctrlr);
dbgp.u.pci.seg = pci_domain_nr(pdev->bus);
diff --git a/drivers/xen/events/Makefile b/drivers/xen/events/Makefile
new file mode 100644
index 000000000000..62be55cd981d
--- /dev/null
+++ b/drivers/xen/events/Makefile
@@ -0,0 +1,5 @@
+obj-y += events.o
+
+events-y += events_base.o
+events-y += events_2l.o
+events-y += events_fifo.o
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
new file mode 100644
index 000000000000..d7ff91757307
--- /dev/null
+++ b/drivers/xen/events/events_2l.c
@@ -0,0 +1,372 @@
+/*
+ * Xen event channels (2-level ABI)
+ *
+ * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
+ */
+
+#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+
+#include <linux/linkage.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+
+#include <asm/sync_bitops.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+
+#include <xen/xen.h>
+#include <xen/xen-ops.h>
+#include <xen/events.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/event_channel.h>
+
+#include "events_internal.h"
+
+/*
+ * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
+ * careful to only use bitops which allow for this (e.g
+ * test_bit/find_first_bit and friends but not __ffs) and to pass
+ * BITS_PER_EVTCHN_WORD as the bitmask length.
+ */
+#define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
+/*
+ * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
+ * array. Primarily to avoid long lines (hence the terse name).
+ */
+#define BM(x) (unsigned long *)(x)
+/* Find the first set bit in a evtchn mask */
+#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
+
+static DEFINE_PER_CPU(xen_ulong_t [EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD],
+ cpu_evtchn_mask);
+
+static unsigned evtchn_2l_max_channels(void)
+{
+ return EVTCHN_2L_NR_CHANNELS;
+}
+
+static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
+{
+ clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
+ set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
+}
+
+static void evtchn_2l_clear_pending(unsigned port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ sync_clear_bit(port, BM(&s->evtchn_pending[0]));
+}
+
+static void evtchn_2l_set_pending(unsigned port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ sync_set_bit(port, BM(&s->evtchn_pending[0]));
+}
+
+static bool evtchn_2l_is_pending(unsigned port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ return sync_test_bit(port, BM(&s->evtchn_pending[0]));
+}
+
+static bool evtchn_2l_test_and_set_mask(unsigned port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
+}
+
+static void evtchn_2l_mask(unsigned port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ sync_set_bit(port, BM(&s->evtchn_mask[0]));
+}
+
+static void evtchn_2l_unmask(unsigned port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ unsigned int cpu = get_cpu();
+ int do_hypercall = 0, evtchn_pending = 0;
+
+ BUG_ON(!irqs_disabled());
+
+ if (unlikely((cpu != cpu_from_evtchn(port))))
+ do_hypercall = 1;
+ else {
+ /*
+ * Need to clear the mask before checking pending to
+ * avoid a race with an event becoming pending.
+ *
+ * EVTCHNOP_unmask will only trigger an upcall if the
+ * mask bit was set, so if a hypercall is needed
+ * remask the event.
+ */
+ sync_clear_bit(port, BM(&s->evtchn_mask[0]));
+ evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
+
+ if (unlikely(evtchn_pending && xen_hvm_domain())) {
+ sync_set_bit(port, BM(&s->evtchn_mask[0]));
+ do_hypercall = 1;
+ }
+ }
+
+ /* Slow path (hypercall) if this is a non-local port or if this is
+ * an hvm domain and an event is pending (hvm domains don't have
+ * their own implementation of irq_enable). */
+ if (do_hypercall) {
+ struct evtchn_unmask unmask = { .port = port };
+ (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
+ } else {
+ struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
+
+ /*
+ * The following is basically the equivalent of
+ * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
+ * the interrupt edge' if the channel is masked.
+ */
+ if (evtchn_pending &&
+ !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
+ BM(&vcpu_info->evtchn_pending_sel)))
+ vcpu_info->evtchn_upcall_pending = 1;
+ }
+
+ put_cpu();
+}
+
+static DEFINE_PER_CPU(unsigned int, current_word_idx);
+static DEFINE_PER_CPU(unsigned int, current_bit_idx);
+
+/*
+ * Mask out the i least significant bits of w
+ */
+#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
+
+static inline xen_ulong_t active_evtchns(unsigned int cpu,
+ struct shared_info *sh,
+ unsigned int idx)
+{
+ return sh->evtchn_pending[idx] &
+ per_cpu(cpu_evtchn_mask, cpu)[idx] &
+ ~sh->evtchn_mask[idx];
+}
+
+/*
+ * Search the CPU's pending events bitmasks. For each one found, map
+ * the event number to an irq, and feed it into do_IRQ() for handling.
+ *
+ * Xen uses a two-level bitmap to speed searching. The first level is
+ * a bitset of words which contain pending event bits. The second
+ * level is a bitset of pending events themselves.
+ */
+static void evtchn_2l_handle_events(unsigned cpu)
+{
+ int irq;
+ xen_ulong_t pending_words;
+ xen_ulong_t pending_bits;
+ int start_word_idx, start_bit_idx;
+ int word_idx, bit_idx;
+ int i;
+ struct irq_desc *desc;
+ struct shared_info *s = HYPERVISOR_shared_info;
+ struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
+
+ /* Timer interrupt has highest priority. */
+ irq = irq_from_virq(cpu, VIRQ_TIMER);
+ if (irq != -1) {
+ unsigned int evtchn = evtchn_from_irq(irq);
+ word_idx = evtchn / BITS_PER_LONG;
+ bit_idx = evtchn % BITS_PER_LONG;
+ if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx)) {
+ desc = irq_to_desc(irq);
+ if (desc)
+ generic_handle_irq_desc(irq, desc);
+ }
+ }
+
+ /*
+ * Master flag must be cleared /before/ clearing
+ * selector flag. xchg_xen_ulong must contain an
+ * appropriate barrier.
+ */
+ pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
+
+ start_word_idx = __this_cpu_read(current_word_idx);
+ start_bit_idx = __this_cpu_read(current_bit_idx);
+
+ word_idx = start_word_idx;
+
+ for (i = 0; pending_words != 0; i++) {
+ xen_ulong_t words;
+
+ words = MASK_LSBS(pending_words, word_idx);
+
+ /*
+ * If we masked out all events, wrap to beginning.
+ */
+ if (words == 0) {
+ word_idx = 0;
+ bit_idx = 0;
+ continue;
+ }
+ word_idx = EVTCHN_FIRST_BIT(words);
+
+ pending_bits = active_evtchns(cpu, s, word_idx);
+ bit_idx = 0; /* usually scan entire word from start */
+ /*
+ * We scan the starting word in two parts.
+ *
+ * 1st time: start in the middle, scanning the
+ * upper bits.
+ *
+ * 2nd time: scan the whole word (not just the
+ * parts skipped in the first pass) -- if an
+ * event in the previously scanned bits is
+ * pending again it would just be scanned on
+ * the next loop anyway.
+ */
+ if (word_idx == start_word_idx) {
+ if (i == 0)
+ bit_idx = start_bit_idx;
+ }
+
+ do {
+ xen_ulong_t bits;
+ int port;
+
+ bits = MASK_LSBS(pending_bits, bit_idx);
+
+ /* If we masked out all events, move on. */
+ if (bits == 0)
+ break;
+
+ bit_idx = EVTCHN_FIRST_BIT(bits);
+
+ /* Process port. */
+ port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
+ irq = get_evtchn_to_irq(port);
+
+ if (irq != -1) {
+ desc = irq_to_desc(irq);
+ if (desc)
+ generic_handle_irq_desc(irq, desc);
+ }
+
+ bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
+
+ /* Next caller starts at last processed + 1 */
+ __this_cpu_write(current_word_idx,
+ bit_idx ? word_idx :
+ (word_idx+1) % BITS_PER_EVTCHN_WORD);
+ __this_cpu_write(current_bit_idx, bit_idx);
+ } while (bit_idx != 0);
+
+ /* Scan start_l1i twice; all others once. */
+ if ((word_idx != start_word_idx) || (i != 0))
+ pending_words &= ~(1UL << word_idx);
+
+ word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
+ }
+}
+
+irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
+{
+ struct shared_info *sh = HYPERVISOR_shared_info;
+ int cpu = smp_processor_id();
+ xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
+ int i;
+ unsigned long flags;
+ static DEFINE_SPINLOCK(debug_lock);
+ struct vcpu_info *v;
+
+ spin_lock_irqsave(&debug_lock, flags);
+
+ printk("\nvcpu %d\n ", cpu);
+
+ for_each_online_cpu(i) {
+ int pending;
+ v = per_cpu(xen_vcpu, i);
+ pending = (get_irq_regs() && i == cpu)
+ ? xen_irqs_disabled(get_irq_regs())
+ : v->evtchn_upcall_mask;
+ printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n ", i,
+ pending, v->evtchn_upcall_pending,
+ (int)(sizeof(v->evtchn_pending_sel)*2),
+ v->evtchn_pending_sel);
+ }
+ v = per_cpu(xen_vcpu, cpu);
+
+ printk("\npending:\n ");
+ for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
+ printk("%0*"PRI_xen_ulong"%s",
+ (int)sizeof(sh->evtchn_pending[0])*2,
+ sh->evtchn_pending[i],
+ i % 8 == 0 ? "\n " : " ");
+ printk("\nglobal mask:\n ");
+ for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
+ printk("%0*"PRI_xen_ulong"%s",
+ (int)(sizeof(sh->evtchn_mask[0])*2),
+ sh->evtchn_mask[i],
+ i % 8 == 0 ? "\n " : " ");
+
+ printk("\nglobally unmasked:\n ");
+ for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
+ printk("%0*"PRI_xen_ulong"%s",
+ (int)(sizeof(sh->evtchn_mask[0])*2),
+ sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
+ i % 8 == 0 ? "\n " : " ");
+
+ printk("\nlocal cpu%d mask:\n ", cpu);
+ for (i = (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
+ printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
+ cpu_evtchn[i],
+ i % 8 == 0 ? "\n " : " ");
+
+ printk("\nlocally unmasked:\n ");
+ for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
+ xen_ulong_t pending = sh->evtchn_pending[i]
+ & ~sh->evtchn_mask[i]
+ & cpu_evtchn[i];
+ printk("%0*"PRI_xen_ulong"%s",
+ (int)(sizeof(sh->evtchn_mask[0])*2),
+ pending, i % 8 == 0 ? "\n " : " ");
+ }
+
+ printk("\npending list:\n");
+ for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) {
+ if (sync_test_bit(i, BM(sh->evtchn_pending))) {
+ int word_idx = i / BITS_PER_EVTCHN_WORD;
+ printk(" %d: event %d -> irq %d%s%s%s\n",
+ cpu_from_evtchn(i), i,
+ get_evtchn_to_irq(i),
+ sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
+ ? "" : " l2-clear",
+ !sync_test_bit(i, BM(sh->evtchn_mask))
+ ? "" : " globally-masked",
+ sync_test_bit(i, BM(cpu_evtchn))
+ ? "" : " locally-masked");
+ }
+ }
+
+ spin_unlock_irqrestore(&debug_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static const struct evtchn_ops evtchn_ops_2l = {
+ .max_channels = evtchn_2l_max_channels,
+ .nr_channels = evtchn_2l_max_channels,
+ .bind_to_cpu = evtchn_2l_bind_to_cpu,
+ .clear_pending = evtchn_2l_clear_pending,
+ .set_pending = evtchn_2l_set_pending,
+ .is_pending = evtchn_2l_is_pending,
+ .test_and_set_mask = evtchn_2l_test_and_set_mask,
+ .mask = evtchn_2l_mask,
+ .unmask = evtchn_2l_unmask,
+ .handle_events = evtchn_2l_handle_events,
+};
+
+void __init xen_evtchn_2l_init(void)
+{
+ pr_info("Using 2-level ABI\n");
+ evtchn_ops = &evtchn_ops_2l;
+}
diff --git a/drivers/xen/events.c b/drivers/xen/events/events_base.c
index 4035e833ea26..f4a9e3311297 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events/events_base.c
@@ -59,6 +59,10 @@
#include <xen/interface/vcpu.h>
#include <asm/hw_irq.h>
+#include "events_internal.h"
+
+const struct evtchn_ops *evtchn_ops;
+
/*
* This lock protects updates to the following mapping and reference-count
* arrays. The lock does not need to be acquired to read the mapping tables.
@@ -73,71 +77,15 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
/* IRQ <-> IPI mapping */
static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
-/* Interrupt types. */
-enum xen_irq_type {
- IRQT_UNBOUND = 0,
- IRQT_PIRQ,
- IRQT_VIRQ,
- IRQT_IPI,
- IRQT_EVTCHN
-};
-
-/*
- * Packed IRQ information:
- * type - enum xen_irq_type
- * event channel - irq->event channel mapping
- * cpu - cpu this event channel is bound to
- * index - type-specific information:
- * PIRQ - physical IRQ, GSI, flags, and owner domain
- * VIRQ - virq number
- * IPI - IPI vector
- * EVTCHN -
- */
-struct irq_info {
- struct list_head list;
- int refcnt;
- enum xen_irq_type type; /* type */
- unsigned irq;
- unsigned short evtchn; /* event channel */
- unsigned short cpu; /* cpu bound */
-
- union {
- unsigned short virq;
- enum ipi_vector ipi;
- struct {
- unsigned short pirq;
- unsigned short gsi;
- unsigned char flags;
- uint16_t domid;
- } pirq;
- } u;
-};
-#define PIRQ_NEEDS_EOI (1 << 0)
-#define PIRQ_SHAREABLE (1 << 1)
-
-static int *evtchn_to_irq;
+int **evtchn_to_irq;
#ifdef CONFIG_X86
static unsigned long *pirq_eoi_map;
#endif
static bool (*pirq_needs_eoi)(unsigned irq);
-/*
- * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
- * careful to only use bitops which allow for this (e.g
- * test_bit/find_first_bit and friends but not __ffs) and to pass
- * BITS_PER_EVTCHN_WORD as the bitmask length.
- */
-#define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
-/*
- * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
- * array. Primarily to avoid long lines (hence the terse name).
- */
-#define BM(x) (unsigned long *)(x)
-/* Find the first set bit in a evtchn mask */
-#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
-
-static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD],
- cpu_evtchn_mask);
+#define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
+#define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
+#define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
/* Xen will never allocate port zero for any purpose. */
#define VALID_EVTCHN(chn) ((chn) != 0)
@@ -148,19 +96,75 @@ static struct irq_chip xen_pirq_chip;
static void enable_dynirq(struct irq_data *data);
static void disable_dynirq(struct irq_data *data);
+static void clear_evtchn_to_irq_row(unsigned row)
+{
+ unsigned col;
+
+ for (col = 0; col < EVTCHN_PER_ROW; col++)
+ evtchn_to_irq[row][col] = -1;
+}
+
+static void clear_evtchn_to_irq_all(void)
+{
+ unsigned row;
+
+ for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
+ if (evtchn_to_irq[row] == NULL)
+ continue;
+ clear_evtchn_to_irq_row(row);
+ }
+}
+
+static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
+{
+ unsigned row;
+ unsigned col;
+
+ if (evtchn >= xen_evtchn_max_channels())
+ return -EINVAL;
+
+ row = EVTCHN_ROW(evtchn);
+ col = EVTCHN_COL(evtchn);
+
+ if (evtchn_to_irq[row] == NULL) {
+ /* Unallocated irq entries return -1 anyway */
+ if (irq == -1)
+ return 0;
+
+ evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
+ if (evtchn_to_irq[row] == NULL)
+ return -ENOMEM;
+
+ clear_evtchn_to_irq_row(row);
+ }
+
+ evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
+ return 0;
+}
+
+int get_evtchn_to_irq(unsigned evtchn)
+{
+ if (evtchn >= xen_evtchn_max_channels())
+ return -1;
+ if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
+ return -1;
+ return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)];
+}
+
/* Get info for IRQ */
-static struct irq_info *info_for_irq(unsigned irq)
+struct irq_info *info_for_irq(unsigned irq)
{
return irq_get_handler_data(irq);
}
/* Constructors for packed IRQ information. */
-static void xen_irq_info_common_init(struct irq_info *info,
+static int xen_irq_info_common_setup(struct irq_info *info,
unsigned irq,
enum xen_irq_type type,
- unsigned short evtchn,
+ unsigned evtchn,
unsigned short cpu)
{
+ int ret;
BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
@@ -169,68 +173,78 @@ static void xen_irq_info_common_init(struct irq_info *info,
info->evtchn = evtchn;
info->cpu = cpu;
- evtchn_to_irq[evtchn] = irq;
+ ret = set_evtchn_to_irq(evtchn, irq);
+ if (ret < 0)
+ return ret;
irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
+
+ return xen_evtchn_port_setup(info);
}
-static void xen_irq_info_evtchn_init(unsigned irq,
- unsigned short evtchn)
+static int xen_irq_info_evtchn_setup(unsigned irq,
+ unsigned evtchn)
{
struct irq_info *info = info_for_irq(irq);
- xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
+ return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
}
-static void xen_irq_info_ipi_init(unsigned cpu,
+static int xen_irq_info_ipi_setup(unsigned cpu,
unsigned irq,
- unsigned short evtchn,
+ unsigned evtchn,
enum ipi_vector ipi)
{
struct irq_info *info = info_for_irq(irq);
- xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
-
info->u.ipi = ipi;
per_cpu(ipi_to_irq, cpu)[ipi] = irq;
+
+ return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
}
-static void xen_irq_info_virq_init(unsigned cpu,
+static int xen_irq_info_virq_setup(unsigned cpu,
unsigned irq,
- unsigned short evtchn,
- unsigned short virq)
+ unsigned evtchn,
+ unsigned virq)
{
struct irq_info *info = info_for_irq(irq);
- xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
-
info->u.virq = virq;
per_cpu(virq_to_irq, cpu)[virq] = irq;
+
+ return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
}
-static void xen_irq_info_pirq_init(unsigned irq,
- unsigned short evtchn,
- unsigned short pirq,
- unsigned short gsi,
+static int xen_irq_info_pirq_setup(unsigned irq,
+ unsigned evtchn,
+ unsigned pirq,
+ unsigned gsi,
uint16_t domid,
unsigned char flags)
{
struct irq_info *info = info_for_irq(irq);
- xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
-
info->u.pirq.pirq = pirq;
info->u.pirq.gsi = gsi;
info->u.pirq.domid = domid;
info->u.pirq.flags = flags;
+
+ return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
+}
+
+static void xen_irq_info_cleanup(struct irq_info *info)
+{
+ set_evtchn_to_irq(info->evtchn, -1);
+ info->evtchn = 0;
}
/*
* Accessors for packed IRQ information.
*/
-static unsigned int evtchn_from_irq(unsigned irq)
+unsigned int evtchn_from_irq(unsigned irq)
{
if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
return 0;
@@ -240,10 +254,15 @@ static unsigned int evtchn_from_irq(unsigned irq)
unsigned irq_from_evtchn(unsigned int evtchn)
{
- return evtchn_to_irq[evtchn];
+ return get_evtchn_to_irq(evtchn);
}
EXPORT_SYMBOL_GPL(irq_from_evtchn);
+int irq_from_virq(unsigned int cpu, unsigned int virq)
+{
+ return per_cpu(virq_to_irq, cpu)[virq];
+}
+
static enum ipi_vector ipi_from_irq(unsigned irq)
{
struct irq_info *info = info_for_irq(irq);
@@ -279,14 +298,14 @@ static enum xen_irq_type type_from_irq(unsigned irq)
return info_for_irq(irq)->type;
}
-static unsigned cpu_from_irq(unsigned irq)
+unsigned cpu_from_irq(unsigned irq)
{
return info_for_irq(irq)->cpu;
}
-static unsigned int cpu_from_evtchn(unsigned int evtchn)
+unsigned int cpu_from_evtchn(unsigned int evtchn)
{
- int irq = evtchn_to_irq[evtchn];
+ int irq = get_evtchn_to_irq(evtchn);
unsigned ret = 0;
if (irq != -1)
@@ -310,67 +329,29 @@ static bool pirq_needs_eoi_flag(unsigned irq)
return info->u.pirq.flags & PIRQ_NEEDS_EOI;
}
-static inline xen_ulong_t active_evtchns(unsigned int cpu,
- struct shared_info *sh,
- unsigned int idx)
-{
- return sh->evtchn_pending[idx] &
- per_cpu(cpu_evtchn_mask, cpu)[idx] &
- ~sh->evtchn_mask[idx];
-}
-
static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
{
- int irq = evtchn_to_irq[chn];
+ int irq = get_evtchn_to_irq(chn);
+ struct irq_info *info = info_for_irq(irq);
BUG_ON(irq == -1);
#ifdef CONFIG_SMP
cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
#endif
- clear_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu_from_irq(irq))));
- set_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu)));
+ xen_evtchn_port_bind_to_cpu(info, cpu);
- info_for_irq(irq)->cpu = cpu;
-}
-
-static void init_evtchn_cpu_bindings(void)
-{
- int i;
-#ifdef CONFIG_SMP
- struct irq_info *info;
-
- /* By default all event channels notify CPU#0. */
- list_for_each_entry(info, &xen_irq_list_head, list) {
- struct irq_desc *desc = irq_to_desc(info->irq);
- cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
- }
-#endif
-
- for_each_possible_cpu(i)
- memset(per_cpu(cpu_evtchn_mask, i),
- (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
-}
-
-static inline void clear_evtchn(int port)
-{
- struct shared_info *s = HYPERVISOR_shared_info;
- sync_clear_bit(port, BM(&s->evtchn_pending[0]));
+ info->cpu = cpu;
}
-static inline void set_evtchn(int port)
+static void xen_evtchn_mask_all(void)
{
- struct shared_info *s = HYPERVISOR_shared_info;
- sync_set_bit(port, BM(&s->evtchn_pending[0]));
-}
+ unsigned int evtchn;
-static inline int test_evtchn(int port)
-{
- struct shared_info *s = HYPERVISOR_shared_info;
- return sync_test_bit(port, BM(&s->evtchn_pending[0]));
+ for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
+ mask_evtchn(evtchn);
}
-
/**
* notify_remote_via_irq - send event to remote end of event channel via irq
* @irq: irq of event channel to send event to
@@ -388,63 +369,6 @@ void notify_remote_via_irq(int irq)
}
EXPORT_SYMBOL_GPL(notify_remote_via_irq);
-static void mask_evtchn(int port)
-{
- struct shared_info *s = HYPERVISOR_shared_info;
- sync_set_bit(port, BM(&s->evtchn_mask[0]));
-}
-
-static void unmask_evtchn(int port)
-{
- struct shared_info *s = HYPERVISOR_shared_info;
- unsigned int cpu = get_cpu();
- int do_hypercall = 0, evtchn_pending = 0;
-
- BUG_ON(!irqs_disabled());
-
- if (unlikely((cpu != cpu_from_evtchn(port))))
- do_hypercall = 1;
- else {
- /*
- * Need to clear the mask before checking pending to
- * avoid a race with an event becoming pending.
- *
- * EVTCHNOP_unmask will only trigger an upcall if the
- * mask bit was set, so if a hypercall is needed
- * remask the event.
- */
- sync_clear_bit(port, BM(&s->evtchn_mask[0]));
- evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
-
- if (unlikely(evtchn_pending && xen_hvm_domain())) {
- sync_set_bit(port, BM(&s->evtchn_mask[0]));
- do_hypercall = 1;
- }
- }
-
- /* Slow path (hypercall) if this is a non-local port or if this is
- * an hvm domain and an event is pending (hvm domains don't have
- * their own implementation of irq_enable). */
- if (do_hypercall) {
- struct evtchn_unmask unmask = { .port = port };
- (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
- } else {
- struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
-
- /*
- * The following is basically the equivalent of
- * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
- * the interrupt edge' if the channel is masked.
- */
- if (evtchn_pending &&
- !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
- BM(&vcpu_info->evtchn_pending_sel)))
- vcpu_info->evtchn_upcall_pending = 1;
- }
-
- put_cpu();
-}
-
static void xen_irq_init(unsigned irq)
{
struct irq_info *info;
@@ -538,6 +462,18 @@ static void xen_free_irq(unsigned irq)
irq_free_desc(irq);
}
+static void xen_evtchn_close(unsigned int port)
+{
+ struct evtchn_close close;
+
+ close.port = port;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
+ BUG();
+
+ /* Closed ports are implicitly re-bound to VCPU0. */
+ bind_evtchn_to_cpu(port, 0);
+}
+
static void pirq_query_unmask(int irq)
{
struct physdev_irq_status_query irq_status;
@@ -610,7 +546,13 @@ static unsigned int __startup_pirq(unsigned int irq)
pirq_query_unmask(irq);
- evtchn_to_irq[evtchn] = irq;
+ rc = set_evtchn_to_irq(evtchn, irq);
+ if (rc != 0) {
+ pr_err("irq%d: Failed to set port to irq mapping (%d)\n",
+ irq, rc);
+ xen_evtchn_close(evtchn);
+ return 0;
+ }
bind_evtchn_to_cpu(evtchn, 0);
info->evtchn = evtchn;
@@ -628,10 +570,9 @@ static unsigned int startup_pirq(struct irq_data *data)
static void shutdown_pirq(struct irq_data *data)
{
- struct evtchn_close close;
unsigned int irq = data->irq;
struct irq_info *info = info_for_irq(irq);
- int evtchn = evtchn_from_irq(irq);
+ unsigned evtchn = evtchn_from_irq(irq);
BUG_ON(info->type != IRQT_PIRQ);
@@ -639,14 +580,8 @@ static void shutdown_pirq(struct irq_data *data)
return;
mask_evtchn(evtchn);
-
- close.port = evtchn;
- if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
- BUG();
-
- bind_evtchn_to_cpu(evtchn, 0);
- evtchn_to_irq[evtchn] = -1;
- info->evtchn = 0;
+ xen_evtchn_close(evtchn);
+ xen_irq_info_cleanup(info);
}
static void enable_pirq(struct irq_data *data)
@@ -675,6 +610,41 @@ int xen_irq_from_gsi(unsigned gsi)
}
EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
+static void __unbind_from_irq(unsigned int irq)
+{
+ int evtchn = evtchn_from_irq(irq);
+ struct irq_info *info = irq_get_handler_data(irq);
+
+ if (info->refcnt > 0) {
+ info->refcnt--;
+ if (info->refcnt != 0)
+ return;
+ }
+
+ if (VALID_EVTCHN(evtchn)) {
+ unsigned int cpu = cpu_from_irq(irq);
+
+ xen_evtchn_close(evtchn);
+
+ switch (type_from_irq(irq)) {
+ case IRQT_VIRQ:
+ per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
+ break;
+ case IRQT_IPI:
+ per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
+ break;
+ default:
+ break;
+ }
+
+ xen_irq_info_cleanup(info);
+ }
+
+ BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
+
+ xen_free_irq(irq);
+}
+
/*
* Do not make any assumptions regarding the relationship between the
* IRQ number returned here and the Xen pirq argument.
@@ -690,6 +660,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
{
int irq = -1;
struct physdev_irq irq_op;
+ int ret;
mutex_lock(&irq_mapping_update_lock);
@@ -717,8 +688,13 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
goto out;
}
- xen_irq_info_pirq_init(irq, 0, pirq, gsi, DOMID_SELF,
+ ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
shareable ? PIRQ_SHAREABLE : 0);
+ if (ret < 0) {
+ __unbind_from_irq(irq);
+ irq = ret;
+ goto out;
+ }
pirq_query_unmask(irq);
/* We try to use the handler with the appropriate semantic for the
@@ -778,7 +754,9 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
name);
- xen_irq_info_pirq_init(irq, 0, pirq, 0, domid, 0);
+ ret = xen_irq_info_pirq_setup(irq, 0, pirq, 0, domid, 0);
+ if (ret < 0)
+ goto error_irq;
ret = irq_set_msi_desc(irq, msidesc);
if (ret < 0)
goto error_irq;
@@ -786,8 +764,8 @@ out:
mutex_unlock(&irq_mapping_update_lock);
return irq;
error_irq:
+ __unbind_from_irq(irq);
mutex_unlock(&irq_mapping_update_lock);
- xen_free_irq(irq);
return ret;
}
#endif
@@ -857,13 +835,18 @@ int xen_pirq_from_irq(unsigned irq)
return pirq_from_irq(irq);
}
EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
+
int bind_evtchn_to_irq(unsigned int evtchn)
{
int irq;
+ int ret;
+
+ if (evtchn >= xen_evtchn_max_channels())
+ return -ENOMEM;
mutex_lock(&irq_mapping_update_lock);
- irq = evtchn_to_irq[evtchn];
+ irq = get_evtchn_to_irq(evtchn);
if (irq == -1) {
irq = xen_allocate_irq_dynamic();
@@ -873,7 +856,14 @@ int bind_evtchn_to_irq(unsigned int evtchn)
irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
handle_edge_irq, "event");
- xen_irq_info_evtchn_init(irq, evtchn);
+ ret = xen_irq_info_evtchn_setup(irq, evtchn);
+ if (ret < 0) {
+ __unbind_from_irq(irq);
+ irq = ret;
+ goto out;
+ }
+ /* New interdomain events are bound to VCPU 0. */
+ bind_evtchn_to_cpu(evtchn, 0);
} else {
struct irq_info *info = info_for_irq(irq);
WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
@@ -890,6 +880,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
{
struct evtchn_bind_ipi bind_ipi;
int evtchn, irq;
+ int ret;
mutex_lock(&irq_mapping_update_lock);
@@ -909,8 +900,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
BUG();
evtchn = bind_ipi.port;
- xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
-
+ ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
+ if (ret < 0) {
+ __unbind_from_irq(irq);
+ irq = ret;
+ goto out;
+ }
bind_evtchn_to_cpu(evtchn, cpu);
} else {
struct irq_info *info = info_for_irq(irq);
@@ -943,7 +938,7 @@ static int find_virq(unsigned int virq, unsigned int cpu)
int port, rc = -ENOENT;
memset(&status, 0, sizeof(status));
- for (port = 0; port <= NR_EVENT_CHANNELS; port++) {
+ for (port = 0; port < xen_evtchn_max_channels(); port++) {
status.dom = DOMID_SELF;
status.port = port;
rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
@@ -959,6 +954,19 @@ static int find_virq(unsigned int virq, unsigned int cpu)
return rc;
}
+/**
+ * xen_evtchn_nr_channels - number of usable event channel ports
+ *
+ * This may be less than the maximum supported by the current
+ * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum
+ * supported.
+ */
+unsigned xen_evtchn_nr_channels(void)
+{
+ return evtchn_ops->nr_channels();
+}
+EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
+
int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
{
struct evtchn_bind_virq bind_virq;
@@ -989,7 +997,12 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
evtchn = ret;
}
- xen_irq_info_virq_init(cpu, irq, evtchn, virq);
+ ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
+ if (ret < 0) {
+ __unbind_from_irq(irq);
+ irq = ret;
+ goto out;
+ }
bind_evtchn_to_cpu(evtchn, cpu);
} else {
@@ -1005,50 +1018,8 @@ out:
static void unbind_from_irq(unsigned int irq)
{
- struct evtchn_close close;
- int evtchn = evtchn_from_irq(irq);
- struct irq_info *info = irq_get_handler_data(irq);
-
- if (WARN_ON(!info))
- return;
-
mutex_lock(&irq_mapping_update_lock);
-
- if (info->refcnt > 0) {
- info->refcnt--;
- if (info->refcnt != 0)
- goto done;
- }
-
- if (VALID_EVTCHN(evtchn)) {
- close.port = evtchn;
- if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
- BUG();
-
- switch (type_from_irq(irq)) {
- case IRQT_VIRQ:
- per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
- [virq_from_irq(irq)] = -1;
- break;
- case IRQT_IPI:
- per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
- [ipi_from_irq(irq)] = -1;
- break;
- default:
- break;
- }
-
- /* Closed ports are implicitly re-bound to VCPU0. */
- bind_evtchn_to_cpu(evtchn, 0);
-
- evtchn_to_irq[evtchn] = -1;
- }
-
- BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
-
- xen_free_irq(irq);
-
- done:
+ __unbind_from_irq(irq);
mutex_unlock(&irq_mapping_update_lock);
}
@@ -1148,9 +1119,26 @@ void unbind_from_irqhandler(unsigned int irq, void *dev_id)
}
EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
+/**
+ * xen_set_irq_priority() - set an event channel priority.
+ * @irq:irq bound to an event channel.
+ * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN.
+ */
+int xen_set_irq_priority(unsigned irq, unsigned priority)
+{
+ struct evtchn_set_priority set_priority;
+
+ set_priority.port = evtchn_from_irq(irq);
+ set_priority.priority = priority;
+
+ return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority,
+ &set_priority);
+}
+EXPORT_SYMBOL_GPL(xen_set_irq_priority);
+
int evtchn_make_refcounted(unsigned int evtchn)
{
- int irq = evtchn_to_irq[evtchn];
+ int irq = get_evtchn_to_irq(evtchn);
struct irq_info *info;
if (irq == -1)
@@ -1175,12 +1163,12 @@ int evtchn_get(unsigned int evtchn)
struct irq_info *info;
int err = -ENOENT;
- if (evtchn >= NR_EVENT_CHANNELS)
+ if (evtchn >= xen_evtchn_max_channels())
return -EINVAL;
mutex_lock(&irq_mapping_update_lock);
- irq = evtchn_to_irq[evtchn];
+ irq = get_evtchn_to_irq(evtchn);
if (irq == -1)
goto done;
@@ -1204,7 +1192,7 @@ EXPORT_SYMBOL_GPL(evtchn_get);
void evtchn_put(unsigned int evtchn)
{
- int irq = evtchn_to_irq[evtchn];
+ int irq = get_evtchn_to_irq(evtchn);
if (WARN_ON(irq == -1))
return;
unbind_from_irq(irq);
@@ -1228,222 +1216,21 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
notify_remote_via_irq(irq);
}
-irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
-{
- struct shared_info *sh = HYPERVISOR_shared_info;
- int cpu = smp_processor_id();
- xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
- int i;
- unsigned long flags;
- static DEFINE_SPINLOCK(debug_lock);
- struct vcpu_info *v;
-
- spin_lock_irqsave(&debug_lock, flags);
-
- printk("\nvcpu %d\n ", cpu);
-
- for_each_online_cpu(i) {
- int pending;
- v = per_cpu(xen_vcpu, i);
- pending = (get_irq_regs() && i == cpu)
- ? xen_irqs_disabled(get_irq_regs())
- : v->evtchn_upcall_mask;
- printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n ", i,
- pending, v->evtchn_upcall_pending,
- (int)(sizeof(v->evtchn_pending_sel)*2),
- v->evtchn_pending_sel);
- }
- v = per_cpu(xen_vcpu, cpu);
-
- printk("\npending:\n ");
- for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
- printk("%0*"PRI_xen_ulong"%s",
- (int)sizeof(sh->evtchn_pending[0])*2,
- sh->evtchn_pending[i],
- i % 8 == 0 ? "\n " : " ");
- printk("\nglobal mask:\n ");
- for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
- printk("%0*"PRI_xen_ulong"%s",
- (int)(sizeof(sh->evtchn_mask[0])*2),
- sh->evtchn_mask[i],
- i % 8 == 0 ? "\n " : " ");
-
- printk("\nglobally unmasked:\n ");
- for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
- printk("%0*"PRI_xen_ulong"%s",
- (int)(sizeof(sh->evtchn_mask[0])*2),
- sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
- i % 8 == 0 ? "\n " : " ");
-
- printk("\nlocal cpu%d mask:\n ", cpu);
- for (i = (NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
- printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
- cpu_evtchn[i],
- i % 8 == 0 ? "\n " : " ");
-
- printk("\nlocally unmasked:\n ");
- for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
- xen_ulong_t pending = sh->evtchn_pending[i]
- & ~sh->evtchn_mask[i]
- & cpu_evtchn[i];
- printk("%0*"PRI_xen_ulong"%s",
- (int)(sizeof(sh->evtchn_mask[0])*2),
- pending, i % 8 == 0 ? "\n " : " ");
- }
-
- printk("\npending list:\n");
- for (i = 0; i < NR_EVENT_CHANNELS; i++) {
- if (sync_test_bit(i, BM(sh->evtchn_pending))) {
- int word_idx = i / BITS_PER_EVTCHN_WORD;
- printk(" %d: event %d -> irq %d%s%s%s\n",
- cpu_from_evtchn(i), i,
- evtchn_to_irq[i],
- sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
- ? "" : " l2-clear",
- !sync_test_bit(i, BM(sh->evtchn_mask))
- ? "" : " globally-masked",
- sync_test_bit(i, BM(cpu_evtchn))
- ? "" : " locally-masked");
- }
- }
-
- spin_unlock_irqrestore(&debug_lock, flags);
-
- return IRQ_HANDLED;
-}
-
static DEFINE_PER_CPU(unsigned, xed_nesting_count);
-static DEFINE_PER_CPU(unsigned int, current_word_idx);
-static DEFINE_PER_CPU(unsigned int, current_bit_idx);
-/*
- * Mask out the i least significant bits of w
- */
-#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
-
-/*
- * Search the CPUs pending events bitmasks. For each one found, map
- * the event number to an irq, and feed it into do_IRQ() for
- * handling.
- *
- * Xen uses a two-level bitmap to speed searching. The first level is
- * a bitset of words which contain pending event bits. The second
- * level is a bitset of pending events themselves.
- */
static void __xen_evtchn_do_upcall(void)
{
- int start_word_idx, start_bit_idx;
- int word_idx, bit_idx;
- int i, irq;
- int cpu = get_cpu();
- struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
+ int cpu = get_cpu();
unsigned count;
do {
- xen_ulong_t pending_words;
- xen_ulong_t pending_bits;
- struct irq_desc *desc;
-
vcpu_info->evtchn_upcall_pending = 0;
if (__this_cpu_inc_return(xed_nesting_count) - 1)
goto out;
- /*
- * Master flag must be cleared /before/ clearing
- * selector flag. xchg_xen_ulong must contain an
- * appropriate barrier.
- */
- if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) {
- int evtchn = evtchn_from_irq(irq);
- word_idx = evtchn / BITS_PER_LONG;
- pending_bits = evtchn % BITS_PER_LONG;
- if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) {
- desc = irq_to_desc(irq);
- if (desc)
- generic_handle_irq_desc(irq, desc);
- }
- }
-
- pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
-
- start_word_idx = __this_cpu_read(current_word_idx);
- start_bit_idx = __this_cpu_read(current_bit_idx);
-
- word_idx = start_word_idx;
-
- for (i = 0; pending_words != 0; i++) {
- xen_ulong_t words;
-
- words = MASK_LSBS(pending_words, word_idx);
-
- /*
- * If we masked out all events, wrap to beginning.
- */
- if (words == 0) {
- word_idx = 0;
- bit_idx = 0;
- continue;
- }
- word_idx = EVTCHN_FIRST_BIT(words);
-
- pending_bits = active_evtchns(cpu, s, word_idx);
- bit_idx = 0; /* usually scan entire word from start */
- /*
- * We scan the starting word in two parts.
- *
- * 1st time: start in the middle, scanning the
- * upper bits.
- *
- * 2nd time: scan the whole word (not just the
- * parts skipped in the first pass) -- if an
- * event in the previously scanned bits is
- * pending again it would just be scanned on
- * the next loop anyway.
- */
- if (word_idx == start_word_idx) {
- if (i == 0)
- bit_idx = start_bit_idx;
- }
-
- do {
- xen_ulong_t bits;
- int port;
-
- bits = MASK_LSBS(pending_bits, bit_idx);
-
- /* If we masked out all events, move on. */
- if (bits == 0)
- break;
-
- bit_idx = EVTCHN_FIRST_BIT(bits);
-
- /* Process port. */
- port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
- irq = evtchn_to_irq[port];
-
- if (irq != -1) {
- desc = irq_to_desc(irq);
- if (desc)
- generic_handle_irq_desc(irq, desc);
- }
-
- bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
-
- /* Next caller starts at last processed + 1 */
- __this_cpu_write(current_word_idx,
- bit_idx ? word_idx :
- (word_idx+1) % BITS_PER_EVTCHN_WORD);
- __this_cpu_write(current_bit_idx, bit_idx);
- } while (bit_idx != 0);
-
- /* Scan start_l1i twice; all others once. */
- if ((word_idx != start_word_idx) || (i != 0))
- pending_words &= ~(1UL << word_idx);
-
- word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
- }
+ xen_evtchn_handle_events(cpu);
BUG_ON(!irqs_disabled());
@@ -1492,12 +1279,12 @@ void rebind_evtchn_irq(int evtchn, int irq)
mutex_lock(&irq_mapping_update_lock);
/* After resume the irq<->evtchn mappings are all cleared out */
- BUG_ON(evtchn_to_irq[evtchn] != -1);
+ BUG_ON(get_evtchn_to_irq(evtchn) != -1);
/* Expect irq to have been bound before,
so there should be a proper type */
BUG_ON(info->type == IRQT_UNBOUND);
- xen_irq_info_evtchn_init(irq, evtchn);
+ (void)xen_irq_info_evtchn_setup(irq, evtchn);
mutex_unlock(&irq_mapping_update_lock);
@@ -1511,7 +1298,6 @@ void rebind_evtchn_irq(int evtchn, int irq)
/* Rebind an evtchn so that it gets delivered to a specific cpu */
static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{
- struct shared_info *s = HYPERVISOR_shared_info;
struct evtchn_bind_vcpu bind_vcpu;
int evtchn = evtchn_from_irq(irq);
int masked;
@@ -1534,7 +1320,7 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
* Mask the event while changing the VCPU binding to prevent
* it being delivered on an unexpected VCPU.
*/
- masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
+ masked = test_and_set_mask(evtchn);
/*
* If this fails, it usually just indicates that we're dealing with a
@@ -1558,22 +1344,26 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
return rebind_irq_to_cpu(data->irq, tcpu);
}
-int resend_irq_on_evtchn(unsigned int irq)
+static int retrigger_evtchn(int evtchn)
{
- int masked, evtchn = evtchn_from_irq(irq);
- struct shared_info *s = HYPERVISOR_shared_info;
+ int masked;
if (!VALID_EVTCHN(evtchn))
- return 1;
+ return 0;
- masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
- sync_set_bit(evtchn, BM(s->evtchn_pending));
+ masked = test_and_set_mask(evtchn);
+ set_evtchn(evtchn);
if (!masked)
unmask_evtchn(evtchn);
return 1;
}
+int resend_irq_on_evtchn(unsigned int irq)
+{
+ return retrigger_evtchn(evtchn_from_irq(irq));
+}
+
static void enable_dynirq(struct irq_data *data)
{
int evtchn = evtchn_from_irq(data->irq);
@@ -1608,21 +1398,7 @@ static void mask_ack_dynirq(struct irq_data *data)
static int retrigger_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(data->irq);
- struct shared_info *sh = HYPERVISOR_shared_info;
- int ret = 0;
-
- if (VALID_EVTCHN(evtchn)) {
- int masked;
-
- masked = sync_test_and_set_bit(evtchn, BM(sh->evtchn_mask));
- sync_set_bit(evtchn, BM(sh->evtchn_pending));
- if (!masked)
- unmask_evtchn(evtchn);
- ret = 1;
- }
-
- return ret;
+ return retrigger_evtchn(evtchn_from_irq(data->irq));
}
static void restore_pirqs(void)
@@ -1683,7 +1459,7 @@ static void restore_cpu_virqs(unsigned int cpu)
evtchn = bind_virq.port;
/* Record the new mapping. */
- xen_irq_info_virq_init(cpu, irq, evtchn, virq);
+ (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
bind_evtchn_to_cpu(evtchn, cpu);
}
}
@@ -1707,7 +1483,7 @@ static void restore_cpu_ipis(unsigned int cpu)
evtchn = bind_ipi.port;
/* Record the new mapping. */
- xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
+ (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
bind_evtchn_to_cpu(evtchn, cpu);
}
}
@@ -1784,21 +1560,18 @@ EXPORT_SYMBOL_GPL(xen_test_irq_shared);
void xen_irq_resume(void)
{
- unsigned int cpu, evtchn;
+ unsigned int cpu;
struct irq_info *info;
- init_evtchn_cpu_bindings();
-
/* New event-channel space is not 'live' yet. */
- for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
- mask_evtchn(evtchn);
+ xen_evtchn_mask_all();
+ xen_evtchn_resume();
/* No IRQ <-> event-channel mappings. */
list_for_each_entry(info, &xen_irq_list_head, list)
info->evtchn = 0; /* zap event-channel binding */
- for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
- evtchn_to_irq[evtchn] = -1;
+ clear_evtchn_to_irq_all();
for_each_possible_cpu(cpu) {
restore_cpu_virqs(cpu);
@@ -1889,27 +1662,40 @@ void xen_callback_vector(void)
void xen_callback_vector(void) {}
#endif
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "xen."
+
+static bool fifo_events = true;
+module_param(fifo_events, bool, 0);
+
void __init xen_init_IRQ(void)
{
- int i;
+ int ret = -EINVAL;
- evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
- GFP_KERNEL);
- BUG_ON(!evtchn_to_irq);
- for (i = 0; i < NR_EVENT_CHANNELS; i++)
- evtchn_to_irq[i] = -1;
+ if (fifo_events)
+ ret = xen_evtchn_fifo_init();
+ if (ret < 0)
+ xen_evtchn_2l_init();
- init_evtchn_cpu_bindings();
+ evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
+ sizeof(*evtchn_to_irq), GFP_KERNEL);
+ BUG_ON(!evtchn_to_irq);
/* No event channels are 'live' right now. */
- for (i = 0; i < NR_EVENT_CHANNELS; i++)
- mask_evtchn(i);
+ xen_evtchn_mask_all();
pirq_needs_eoi = pirq_needs_eoi_flag;
#ifdef CONFIG_X86
- if (xen_hvm_domain()) {
+ if (xen_pv_domain()) {
+ irq_ctx_init(smp_processor_id());
+ if (xen_initial_domain())
+ pci_xen_initial_domain();
+ }
+ if (xen_feature(XENFEAT_hvm_callback_vector))
xen_callback_vector();
+
+ if (xen_hvm_domain()) {
native_init_IRQ();
/* pci_xen_hvm_init must be called after native_init_IRQ so that
* __acpi_register_gsi can point at the right function */
@@ -1918,13 +1704,10 @@ void __init xen_init_IRQ(void)
int rc;
struct physdev_pirq_eoi_gmfn eoi_gmfn;
- irq_ctx_init(smp_processor_id());
- if (xen_initial_domain())
- pci_xen_initial_domain();
-
pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map);
rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
+ /* TODO: No PVH support for PIRQ EOI */
if (rc != 0) {
free_page((unsigned long) pirq_eoi_map);
pirq_eoi_map = NULL;
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
new file mode 100644
index 000000000000..1de2a191b395
--- /dev/null
+++ b/drivers/xen/events/events_fifo.c
@@ -0,0 +1,428 @@
+/*
+ * Xen event channels (FIFO-based ABI)
+ *
+ * Copyright (C) 2013 Citrix Systems R&D ltd.
+ *
+ * This source code is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * Or, when distributed separately from the Linux kernel or
+ * incorporated into other software packages, subject to the following
+ * license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+
+#include <linux/linkage.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/percpu.h>
+#include <linux/cpu.h>
+
+#include <asm/sync_bitops.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/page.h>
+
+#include <xen/xen.h>
+#include <xen/xen-ops.h>
+#include <xen/events.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/event_channel.h>
+
+#include "events_internal.h"
+
+#define EVENT_WORDS_PER_PAGE (PAGE_SIZE / sizeof(event_word_t))
+#define MAX_EVENT_ARRAY_PAGES (EVTCHN_FIFO_NR_CHANNELS / EVENT_WORDS_PER_PAGE)
+
+struct evtchn_fifo_queue {
+ uint32_t head[EVTCHN_FIFO_MAX_QUEUES];
+};
+
+static DEFINE_PER_CPU(struct evtchn_fifo_control_block *, cpu_control_block);
+static DEFINE_PER_CPU(struct evtchn_fifo_queue, cpu_queue);
+static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly;
+static unsigned event_array_pages __read_mostly;
+
+#define BM(w) ((unsigned long *)(w))
+
+static inline event_word_t *event_word_from_port(unsigned port)
+{
+ unsigned i = port / EVENT_WORDS_PER_PAGE;
+
+ return event_array[i] + port % EVENT_WORDS_PER_PAGE;
+}
+
+static unsigned evtchn_fifo_max_channels(void)
+{
+ return EVTCHN_FIFO_NR_CHANNELS;
+}
+
+static unsigned evtchn_fifo_nr_channels(void)
+{
+ return event_array_pages * EVENT_WORDS_PER_PAGE;
+}
+
+static void free_unused_array_pages(void)
+{
+ unsigned i;
+
+ for (i = event_array_pages; i < MAX_EVENT_ARRAY_PAGES; i++) {
+ if (!event_array[i])
+ break;
+ free_page((unsigned long)event_array[i]);
+ event_array[i] = NULL;
+ }
+}
+
+static void init_array_page(event_word_t *array_page)
+{
+ unsigned i;
+
+ for (i = 0; i < EVENT_WORDS_PER_PAGE; i++)
+ array_page[i] = 1 << EVTCHN_FIFO_MASKED;
+}
+
+static int evtchn_fifo_setup(struct irq_info *info)
+{
+ unsigned port = info->evtchn;
+ unsigned new_array_pages;
+ int ret;
+
+ new_array_pages = port / EVENT_WORDS_PER_PAGE + 1;
+
+ if (new_array_pages > MAX_EVENT_ARRAY_PAGES)
+ return -EINVAL;
+
+ while (event_array_pages < new_array_pages) {
+ void *array_page;
+ struct evtchn_expand_array expand_array;
+
+ /* Might already have a page if we've resumed. */
+ array_page = event_array[event_array_pages];
+ if (!array_page) {
+ array_page = (void *)__get_free_page(GFP_KERNEL);
+ if (array_page == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ event_array[event_array_pages] = array_page;
+ }
+
+ /* Mask all events in this page before adding it. */
+ init_array_page(array_page);
+
+ expand_array.array_gfn = virt_to_mfn(array_page);
+
+ ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array);
+ if (ret < 0)
+ goto error;
+
+ event_array_pages++;
+ }
+ return 0;
+
+ error:
+ if (event_array_pages == 0)
+ panic("xen: unable to expand event array with initial page (%d)\n", ret);
+ else
+ pr_err("unable to expand event array (%d)\n", ret);
+ free_unused_array_pages();
+ return ret;
+}
+
+static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu)
+{
+ /* no-op */
+}
+
+static void evtchn_fifo_clear_pending(unsigned port)
+{
+ event_word_t *word = event_word_from_port(port);
+ sync_clear_bit(EVTCHN_FIFO_PENDING, BM(word));
+}
+
+static void evtchn_fifo_set_pending(unsigned port)
+{
+ event_word_t *word = event_word_from_port(port);
+ sync_set_bit(EVTCHN_FIFO_PENDING, BM(word));
+}
+
+static bool evtchn_fifo_is_pending(unsigned port)
+{
+ event_word_t *word = event_word_from_port(port);
+ return sync_test_bit(EVTCHN_FIFO_PENDING, BM(word));
+}
+
+static bool evtchn_fifo_test_and_set_mask(unsigned port)
+{
+ event_word_t *word = event_word_from_port(port);
+ return sync_test_and_set_bit(EVTCHN_FIFO_MASKED, BM(word));
+}
+
+static void evtchn_fifo_mask(unsigned port)
+{
+ event_word_t *word = event_word_from_port(port);
+ sync_set_bit(EVTCHN_FIFO_MASKED, BM(word));
+}
+
+/*
+ * Clear MASKED, spinning if BUSY is set.
+ */
+static void clear_masked(volatile event_word_t *word)
+{
+ event_word_t new, old, w;
+
+ w = *word;
+
+ do {
+ old = w & ~(1 << EVTCHN_FIFO_BUSY);
+ new = old & ~(1 << EVTCHN_FIFO_MASKED);
+ w = sync_cmpxchg(word, old, new);
+ } while (w != old);
+}
+
+static void evtchn_fifo_unmask(unsigned port)
+{
+ event_word_t *word = event_word_from_port(port);
+
+ BUG_ON(!irqs_disabled());
+
+ clear_masked(word);
+ if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))) {
+ struct evtchn_unmask unmask = { .port = port };
+ (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
+ }
+}
+
+static uint32_t clear_linked(volatile event_word_t *word)
+{
+ event_word_t new, old, w;
+
+ w = *word;
+
+ do {
+ old = w;
+ new = (w & ~((1 << EVTCHN_FIFO_LINKED)
+ | EVTCHN_FIFO_LINK_MASK));
+ } while ((w = sync_cmpxchg(word, old, new)) != old);
+
+ return w & EVTCHN_FIFO_LINK_MASK;
+}
+
+static void handle_irq_for_port(unsigned port)
+{
+ int irq;
+ struct irq_desc *desc;
+
+ irq = get_evtchn_to_irq(port);
+ if (irq != -1) {
+ desc = irq_to_desc(irq);
+ if (desc)
+ generic_handle_irq_desc(irq, desc);
+ }
+}
+
+static void consume_one_event(unsigned cpu,
+ struct evtchn_fifo_control_block *control_block,
+ unsigned priority, uint32_t *ready)
+{
+ struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
+ uint32_t head;
+ unsigned port;
+ event_word_t *word;
+
+ head = q->head[priority];
+
+ /*
+ * Reached the tail last time? Read the new HEAD from the
+ * control block.
+ */
+ if (head == 0) {
+ rmb(); /* Ensure word is up-to-date before reading head. */
+ head = control_block->head[priority];
+ }
+
+ port = head;
+ word = event_word_from_port(port);
+ head = clear_linked(word);
+
+ /*
+ * If the link is non-zero, there are more events in the
+ * queue, otherwise the queue is empty.
+ *
+ * If the queue is empty, clear this priority from our local
+ * copy of the ready word.
+ */
+ if (head == 0)
+ clear_bit(priority, BM(ready));
+
+ if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))
+ && !sync_test_bit(EVTCHN_FIFO_MASKED, BM(word)))
+ handle_irq_for_port(port);
+
+ q->head[priority] = head;
+}
+
+static void evtchn_fifo_handle_events(unsigned cpu)
+{
+ struct evtchn_fifo_control_block *control_block;
+ uint32_t ready;
+ unsigned q;
+
+ control_block = per_cpu(cpu_control_block, cpu);
+
+ ready = xchg(&control_block->ready, 0);
+
+ while (ready) {
+ q = find_first_bit(BM(&ready), EVTCHN_FIFO_MAX_QUEUES);
+ consume_one_event(cpu, control_block, q, &ready);
+ ready |= xchg(&control_block->ready, 0);
+ }
+}
+
+static void evtchn_fifo_resume(void)
+{
+ unsigned cpu;
+
+ for_each_possible_cpu(cpu) {
+ void *control_block = per_cpu(cpu_control_block, cpu);
+ struct evtchn_init_control init_control;
+ int ret;
+
+ if (!control_block)
+ continue;
+
+ /*
+ * If this CPU is offline, take the opportunity to
+ * free the control block while it is not being
+ * used.
+ */
+ if (!cpu_online(cpu)) {
+ free_page((unsigned long)control_block);
+ per_cpu(cpu_control_block, cpu) = NULL;
+ continue;
+ }
+
+ init_control.control_gfn = virt_to_mfn(control_block);
+ init_control.offset = 0;
+ init_control.vcpu = cpu;
+
+ ret = HYPERVISOR_event_channel_op(EVTCHNOP_init_control,
+ &init_control);
+ if (ret < 0)
+ BUG();
+ }
+
+ /*
+ * The event array starts out as empty again and is extended
+ * as normal when events are bound. The existing pages will
+ * be reused.
+ */
+ event_array_pages = 0;
+}
+
+static const struct evtchn_ops evtchn_ops_fifo = {
+ .max_channels = evtchn_fifo_max_channels,
+ .nr_channels = evtchn_fifo_nr_channels,
+ .setup = evtchn_fifo_setup,
+ .bind_to_cpu = evtchn_fifo_bind_to_cpu,
+ .clear_pending = evtchn_fifo_clear_pending,
+ .set_pending = evtchn_fifo_set_pending,
+ .is_pending = evtchn_fifo_is_pending,
+ .test_and_set_mask = evtchn_fifo_test_and_set_mask,
+ .mask = evtchn_fifo_mask,
+ .unmask = evtchn_fifo_unmask,
+ .handle_events = evtchn_fifo_handle_events,
+ .resume = evtchn_fifo_resume,
+};
+
+static int evtchn_fifo_init_control_block(unsigned cpu)
+{
+ struct page *control_block = NULL;
+ struct evtchn_init_control init_control;
+ int ret = -ENOMEM;
+
+ control_block = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ if (control_block == NULL)
+ goto error;
+
+ init_control.control_gfn = virt_to_mfn(page_address(control_block));
+ init_control.offset = 0;
+ init_control.vcpu = cpu;
+
+ ret = HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control);
+ if (ret < 0)
+ goto error;
+
+ per_cpu(cpu_control_block, cpu) = page_address(control_block);
+
+ return 0;
+
+ error:
+ __free_page(control_block);
+ return ret;
+}
+
+static int evtchn_fifo_cpu_notification(struct notifier_block *self,
+ unsigned long action,
+ void *hcpu)
+{
+ int cpu = (long)hcpu;
+ int ret = 0;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ if (!per_cpu(cpu_control_block, cpu))
+ ret = evtchn_fifo_init_control_block(cpu);
+ break;
+ default:
+ break;
+ }
+ return ret < 0 ? NOTIFY_BAD : NOTIFY_OK;
+}
+
+static struct notifier_block evtchn_fifo_cpu_notifier = {
+ .notifier_call = evtchn_fifo_cpu_notification,
+};
+
+int __init xen_evtchn_fifo_init(void)
+{
+ int cpu = get_cpu();
+ int ret;
+
+ ret = evtchn_fifo_init_control_block(cpu);
+ if (ret < 0)
+ goto out;
+
+ pr_info("Using FIFO-based ABI\n");
+
+ evtchn_ops = &evtchn_ops_fifo;
+
+ register_cpu_notifier(&evtchn_fifo_cpu_notifier);
+out:
+ put_cpu();
+ return ret;
+}
diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
new file mode 100644
index 000000000000..677f41a0fff9
--- /dev/null
+++ b/drivers/xen/events/events_internal.h
@@ -0,0 +1,150 @@
+/*
+ * Xen Event Channels (internal header)
+ *
+ * Copyright (C) 2013 Citrix Systems R&D Ltd.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2 or later. See the file COPYING for more details.
+ */
+#ifndef __EVENTS_INTERNAL_H__
+#define __EVENTS_INTERNAL_H__
+
+/* Interrupt types. */
+enum xen_irq_type {
+ IRQT_UNBOUND = 0,
+ IRQT_PIRQ,
+ IRQT_VIRQ,
+ IRQT_IPI,
+ IRQT_EVTCHN
+};
+
+/*
+ * Packed IRQ information:
+ * type - enum xen_irq_type
+ * event channel - irq->event channel mapping
+ * cpu - cpu this event channel is bound to
+ * index - type-specific information:
+ * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
+ * guest, or GSI (real passthrough IRQ) of the device.
+ * VIRQ - virq number
+ * IPI - IPI vector
+ * EVTCHN -
+ */
+struct irq_info {
+ struct list_head list;
+ int refcnt;
+ enum xen_irq_type type; /* type */
+ unsigned irq;
+ unsigned int evtchn; /* event channel */
+ unsigned short cpu; /* cpu bound */
+
+ union {
+ unsigned short virq;
+ enum ipi_vector ipi;
+ struct {
+ unsigned short pirq;
+ unsigned short gsi;
+ unsigned char vector;
+ unsigned char flags;
+ uint16_t domid;
+ } pirq;
+ } u;
+};
+
+#define PIRQ_NEEDS_EOI (1 << 0)
+#define PIRQ_SHAREABLE (1 << 1)
+
+struct evtchn_ops {
+ unsigned (*max_channels)(void);
+ unsigned (*nr_channels)(void);
+
+ int (*setup)(struct irq_info *info);
+ void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
+
+ void (*clear_pending)(unsigned port);
+ void (*set_pending)(unsigned port);
+ bool (*is_pending)(unsigned port);
+ bool (*test_and_set_mask)(unsigned port);
+ void (*mask)(unsigned port);
+ void (*unmask)(unsigned port);
+
+ void (*handle_events)(unsigned cpu);
+ void (*resume)(void);
+};
+
+extern const struct evtchn_ops *evtchn_ops;
+
+extern int **evtchn_to_irq;
+int get_evtchn_to_irq(unsigned int evtchn);
+
+struct irq_info *info_for_irq(unsigned irq);
+unsigned cpu_from_irq(unsigned irq);
+unsigned cpu_from_evtchn(unsigned int evtchn);
+
+static inline unsigned xen_evtchn_max_channels(void)
+{
+ return evtchn_ops->max_channels();
+}
+
+/*
+ * Do any ABI specific setup for a bound event channel before it can
+ * be unmasked and used.
+ */
+static inline int xen_evtchn_port_setup(struct irq_info *info)
+{
+ if (evtchn_ops->setup)
+ return evtchn_ops->setup(info);
+ return 0;
+}
+
+static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info,
+ unsigned cpu)
+{
+ evtchn_ops->bind_to_cpu(info, cpu);
+}
+
+static inline void clear_evtchn(unsigned port)
+{
+ evtchn_ops->clear_pending(port);
+}
+
+static inline void set_evtchn(unsigned port)
+{
+ evtchn_ops->set_pending(port);
+}
+
+static inline bool test_evtchn(unsigned port)
+{
+ return evtchn_ops->is_pending(port);
+}
+
+static inline bool test_and_set_mask(unsigned port)
+{
+ return evtchn_ops->test_and_set_mask(port);
+}
+
+static inline void mask_evtchn(unsigned port)
+{
+ return evtchn_ops->mask(port);
+}
+
+static inline void unmask_evtchn(unsigned port)
+{
+ return evtchn_ops->unmask(port);
+}
+
+static inline void xen_evtchn_handle_events(unsigned cpu)
+{
+ return evtchn_ops->handle_events(cpu);
+}
+
+static inline void xen_evtchn_resume(void)
+{
+ if (evtchn_ops->resume)
+ evtchn_ops->resume();
+}
+
+void xen_evtchn_2l_init(void);
+int xen_evtchn_fifo_init(void);
+
+#endif /* #ifndef __EVENTS_INTERNAL_H__ */
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 5de2063e16d3..00f40f051d95 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -417,7 +417,7 @@ static long evtchn_ioctl(struct file *file,
break;
rc = -EINVAL;
- if (unbind.port >= NR_EVENT_CHANNELS)
+ if (unbind.port >= xen_evtchn_nr_channels())
break;
rc = -ENOTCONN;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index e41c79c986ea..073b4a19a8b0 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -846,7 +846,7 @@ static int __init gntdev_init(void)
if (!xen_domain())
return -ENODEV;
- use_ptemod = xen_pv_domain();
+ use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap);
err = misc_register(&gntdev_miscdev);
if (err != 0) {
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 028387192b60..b84e3ab839aa 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -62,12 +62,10 @@
static grant_ref_t **gnttab_list;
static unsigned int nr_grant_frames;
-static unsigned int boot_max_nr_grant_frames;
static int gnttab_free_count;
static grant_ref_t gnttab_free_head;
static DEFINE_SPINLOCK(gnttab_list_lock);
-unsigned long xen_hvm_resume_frames;
-EXPORT_SYMBOL_GPL(xen_hvm_resume_frames);
+struct grant_frames xen_auto_xlat_grant_frames;
static union {
struct grant_entry_v1 *v1;
@@ -827,6 +825,11 @@ static unsigned int __max_nr_grant_frames(void)
unsigned int gnttab_max_grant_frames(void)
{
unsigned int xen_max = __max_nr_grant_frames();
+ static unsigned int boot_max_nr_grant_frames;
+
+ /* First time, initialize it properly. */
+ if (!boot_max_nr_grant_frames)
+ boot_max_nr_grant_frames = __max_nr_grant_frames();
if (xen_max > boot_max_nr_grant_frames)
return boot_max_nr_grant_frames;
@@ -834,6 +837,51 @@ unsigned int gnttab_max_grant_frames(void)
}
EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
+int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
+{
+ xen_pfn_t *pfn;
+ unsigned int max_nr_gframes = __max_nr_grant_frames();
+ unsigned int i;
+ void *vaddr;
+
+ if (xen_auto_xlat_grant_frames.count)
+ return -EINVAL;
+
+ vaddr = xen_remap(addr, PAGE_SIZE * max_nr_gframes);
+ if (vaddr == NULL) {
+ pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
+ &addr);
+ return -ENOMEM;
+ }
+ pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
+ if (!pfn) {
+ xen_unmap(vaddr);
+ return -ENOMEM;
+ }
+ for (i = 0; i < max_nr_gframes; i++)
+ pfn[i] = PFN_DOWN(addr) + i;
+
+ xen_auto_xlat_grant_frames.vaddr = vaddr;
+ xen_auto_xlat_grant_frames.pfn = pfn;
+ xen_auto_xlat_grant_frames.count = max_nr_gframes;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
+
+void gnttab_free_auto_xlat_frames(void)
+{
+ if (!xen_auto_xlat_grant_frames.count)
+ return;
+ kfree(xen_auto_xlat_grant_frames.pfn);
+ xen_unmap(xen_auto_xlat_grant_frames.vaddr);
+
+ xen_auto_xlat_grant_frames.pfn = NULL;
+ xen_auto_xlat_grant_frames.count = 0;
+ xen_auto_xlat_grant_frames.vaddr = NULL;
+}
+EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
+
/* Handling of paged out grant targets (GNTST_eagain) */
#define MAX_DELAY 256
static inline void
@@ -1060,10 +1108,11 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
unsigned int nr_gframes = end_idx + 1;
int rc;
- if (xen_hvm_domain()) {
+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
struct xen_add_to_physmap xatp;
unsigned int i = end_idx;
rc = 0;
+ BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
/*
* Loop backwards, so that the first hypercall has the largest
* index, ensuring that the table will grow only once.
@@ -1072,7 +1121,7 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
xatp.domid = DOMID_SELF;
xatp.idx = i;
xatp.space = XENMAPSPACE_grant_table;
- xatp.gpfn = (xen_hvm_resume_frames >> PAGE_SHIFT) + i;
+ xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
if (rc != 0) {
pr_warn("grant table add_to_physmap failed, err=%d\n",
@@ -1135,10 +1184,8 @@ static void gnttab_request_version(void)
int rc;
struct gnttab_set_version gsv;
- if (xen_hvm_domain())
- gsv.version = 1;
- else
- gsv.version = 2;
+ gsv.version = 1;
+
rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
if (rc == 0 && gsv.version == 2) {
grant_table_version = 2;
@@ -1169,21 +1216,15 @@ static int gnttab_setup(void)
if (max_nr_gframes < nr_grant_frames)
return -ENOSYS;
- if (xen_pv_domain())
- return gnttab_map(0, nr_grant_frames - 1);
-
- if (gnttab_shared.addr == NULL) {
- gnttab_shared.addr = xen_remap(xen_hvm_resume_frames,
- PAGE_SIZE * max_nr_gframes);
+ if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
+ gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
if (gnttab_shared.addr == NULL) {
- pr_warn("Failed to ioremap gnttab share frames!\n");
+ pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n",
+ (unsigned long)xen_auto_xlat_grant_frames.vaddr);
return -ENOMEM;
}
}
-
- gnttab_map(0, nr_grant_frames - 1);
-
- return 0;
+ return gnttab_map(0, nr_grant_frames - 1);
}
int gnttab_resume(void)
@@ -1226,13 +1267,12 @@ int gnttab_init(void)
gnttab_request_version();
nr_grant_frames = 1;
- boot_max_nr_grant_frames = __max_nr_grant_frames();
/* Determine the maximum number of frames required for the
* grant reference free list on the current hypervisor.
*/
BUG_ON(grefs_per_grant_frame == 0);
- max_nr_glist_frames = (boot_max_nr_grant_frames *
+ max_nr_glist_frames = (gnttab_max_grant_frames() *
grefs_per_grant_frame / RPP);
gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
@@ -1285,5 +1325,6 @@ static int __gnttab_init(void)
return gnttab_init();
}
-
-core_initcall(__gnttab_init);
+/* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
+ * beforehand to initialize xen_auto_xlat_grant_frames. */
+core_initcall_sync(__gnttab_init);
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 188825122aae..dd9c249ea311 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -26,7 +26,9 @@
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include "../pci/pci.h"
+#ifdef CONFIG_PCI_MMCONFIG
#include <asm/pci_x86.h>
+#endif
static bool __read_mostly pci_seg_supported = true;
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 2f3528e93cb9..a1361c312c06 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -108,6 +108,7 @@ static int platform_pci_init(struct pci_dev *pdev,
long ioaddr;
long mmio_addr, mmio_len;
unsigned int max_nr_gframes;
+ unsigned long grant_frames;
if (!xen_domain())
return -ENODEV;
@@ -154,13 +155,17 @@ static int platform_pci_init(struct pci_dev *pdev,
}
max_nr_gframes = gnttab_max_grant_frames();
- xen_hvm_resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
- ret = gnttab_init();
+ grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
+ ret = gnttab_setup_auto_xlat_frames(grant_frames);
if (ret)
goto out;
+ ret = gnttab_init();
+ if (ret)
+ goto grant_out;
xenbus_probe(NULL);
return 0;
-
+grant_out:
+ gnttab_free_auto_xlat_frames();
out:
pci_release_region(pdev, 0);
mem_out:
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 8e74590fa1bb..569a13b9e856 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -533,12 +533,17 @@ static void privcmd_close(struct vm_area_struct *vma)
{
struct page **pages = vma->vm_private_data;
int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int rc;
if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
return;
- xen_unmap_domain_mfn_range(vma, numpgs, pages);
- free_xenballooned_pages(numpgs, pages);
+ rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
+ if (rc == 0)
+ free_xenballooned_pages(numpgs, pages);
+ else
+ pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
+ numpgs, rc);
kfree(pages);
}
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 1eac0731c349..ebd8f218a788 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -75,14 +75,32 @@ static unsigned long xen_io_tlb_nslabs;
static u64 start_dma_addr;
+/*
+ * Both of these functions should avoid PFN_PHYS because phys_addr_t
+ * can be 32bit when dma_addr_t is 64bit leading to a loss in
+ * information if the shift is done before casting to 64bit.
+ */
static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
{
- return phys_to_machine(XPADDR(paddr)).maddr;
+ unsigned long mfn = pfn_to_mfn(PFN_DOWN(paddr));
+ dma_addr_t dma = (dma_addr_t)mfn << PAGE_SHIFT;
+
+ dma |= paddr & ~PAGE_MASK;
+
+ return dma;
}
static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
{
- return machine_to_phys(XMADDR(baddr)).paddr;
+ unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr));
+ dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
+ phys_addr_t paddr = dma;
+
+ BUG_ON(paddr != dma); /* truncation has occurred, should never happen */
+
+ paddr |= baddr & ~PAGE_MASK;
+
+ return paddr;
}
static inline dma_addr_t xen_virt_to_bus(void *address)
diff --git a/drivers/xen/xen-acpi-cpuhotplug.c b/drivers/xen/xen-acpi-cpuhotplug.c
index 8dae6c13063a..80875fb770ed 100644
--- a/drivers/xen/xen-acpi-cpuhotplug.c
+++ b/drivers/xen/xen-acpi-cpuhotplug.c
@@ -24,10 +24,7 @@
#include <linux/cpu.h>
#include <linux/acpi.h>
#include <linux/uaccess.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
#include <acpi/processor.h>
-
#include <xen/acpi.h>
#include <xen/interface/platform.h>
#include <asm/xen/hypercall.h>
@@ -269,7 +266,8 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
if (!is_processor_present(handle))
break;
- if (!acpi_bus_get_device(handle, &device))
+ acpi_bus_get_device(handle, &device);
+ if (acpi_device_enumerated(device))
break;
result = acpi_bus_scan(handle);
@@ -277,8 +275,9 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
pr_err(PREFIX "Unable to add the device\n");
break;
}
- result = acpi_bus_get_device(handle, &device);
- if (result) {
+ device = NULL;
+ acpi_bus_get_device(handle, &device);
+ if (!acpi_device_enumerated(device)) {
pr_err(PREFIX "Missing device object\n");
break;
}
diff --git a/drivers/xen/xen-acpi-memhotplug.c b/drivers/xen/xen-acpi-memhotplug.c
index 9083f1e474f8..f8d18626969a 100644
--- a/drivers/xen/xen-acpi-memhotplug.c
+++ b/drivers/xen/xen-acpi-memhotplug.c
@@ -22,7 +22,6 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/acpi.h>
-#include <acpi/acpi_drivers.h>
#include <xen/acpi.h>
#include <xen/interface/platform.h>
#include <asm/xen/hypercall.h>
@@ -169,7 +168,7 @@ static int acpi_memory_get_device(acpi_handle handle,
acpi_scan_lock_acquire();
acpi_bus_get_device(handle, &device);
- if (device)
+ if (acpi_device_enumerated(device))
goto end;
/*
@@ -182,8 +181,9 @@ static int acpi_memory_get_device(acpi_handle handle,
result = -EINVAL;
goto out;
}
- result = acpi_bus_get_device(handle, &device);
- if (result) {
+ device = NULL;
+ acpi_bus_get_device(handle, &device);
+ if (!acpi_device_enumerated(device)) {
pr_warn(PREFIX "Missing device object\n");
result = -EINVAL;
goto out;
diff --git a/drivers/xen/xen-acpi-pad.c b/drivers/xen/xen-acpi-pad.c
index 59708fdd068b..40c4bc06b5fa 100644
--- a/drivers/xen/xen-acpi-pad.c
+++ b/drivers/xen/xen-acpi-pad.c
@@ -18,11 +18,10 @@
#include <linux/kernel.h>
#include <linux/types.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-#include <asm/xen/hypercall.h>
+#include <linux/acpi.h>
#include <xen/interface/version.h>
#include <xen/xen-ops.h>
+#include <asm/xen/hypercall.h>
#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 13bc6c31c060..7231859119f1 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -28,10 +28,8 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/syscore_ops.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <acpi/processor.h>
-
#include <xen/xen.h>
#include <xen/interface/platform.h>
#include <asm/xen/hypercall.h>
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 21e18c18c7a1..745ad79c1d8e 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -175,6 +175,7 @@ static void frontswap_selfshrink(void)
#endif /* CONFIG_FRONTSWAP */
#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
+#define PAGES2MB(pages) ((pages) >> (20 - PAGE_SHIFT))
/*
* Use current balloon size, the goal (vm_committed_as), and hysteresis
@@ -525,6 +526,7 @@ EXPORT_SYMBOL(register_xen_selfballooning);
int xen_selfballoon_init(bool use_selfballooning, bool use_frontswap_selfshrink)
{
bool enable = false;
+ unsigned long reserve_pages;
if (!xen_domain())
return -ENODEV;
@@ -549,6 +551,26 @@ int xen_selfballoon_init(bool use_selfballooning, bool use_frontswap_selfshrink)
if (!enable)
return -ENODEV;
+ /*
+ * Give selfballoon_reserved_mb a default value(10% of total ram pages)
+ * to make selfballoon not so aggressive.
+ *
+ * There are mainly two reasons:
+ * 1) The original goal_page didn't consider some pages used by kernel
+ * space, like slab pages and memory used by device drivers.
+ *
+ * 2) The balloon driver may not give back memory to guest OS fast
+ * enough when the workload suddenly aquries a lot of physical memory.
+ *
+ * In both cases, the guest OS will suffer from memory pressure and
+ * OOM killer may be triggered.
+ * By reserving extra 10% of total ram pages, we can keep the system
+ * much more reliably and response faster in some cases.
+ */
+ if (!selfballoon_reserved_mb) {
+ reserve_pages = totalram_pages / 10;
+ selfballoon_reserved_mb = PAGES2MB(reserve_pages);
+ }
schedule_delayed_work(&selfballoon_worker, selfballoon_interval * HZ);
return 0;
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index ec097d6f964d..01d59e66565d 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -45,6 +45,7 @@
#include <xen/grant_table.h>
#include <xen/xenbus.h>
#include <xen/xen.h>
+#include <xen/features.h>
#include "xenbus_probe.h"
@@ -743,7 +744,7 @@ static const struct xenbus_ring_ops ring_ops_hvm = {
void __init xenbus_ring_ops_init(void)
{
- if (xen_pv_domain())
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
ring_ops = &ring_ops_pv;
else
ring_ops = &ring_ops_hvm;
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 129bf84c19ec..cb385c10d2b1 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -496,7 +496,7 @@ subsys_initcall(xenbus_probe_frontend_init);
#ifndef MODULE
static int __init boot_wait_for_devices(void)
{
- if (xen_hvm_domain() && !xen_platform_pci_unplug)
+ if (!xen_has_pv_devices())
return -ENODEV;
ready_to_wait_for_devices = 1;
diff --git a/drivers/xen/xencomm.c b/drivers/xen/xencomm.c
deleted file mode 100644
index 4793fc594549..000000000000
--- a/drivers/xen/xencomm.c
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Copyright (C) IBM Corp. 2006
- *
- * Authors: Hollis Blanchard <hollisb@us.ibm.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <asm/page.h>
-#include <xen/xencomm.h>
-#include <xen/interface/xen.h>
-#include <asm/xen/xencomm.h> /* for xencomm_is_phys_contiguous() */
-
-static int xencomm_init(struct xencomm_desc *desc,
- void *buffer, unsigned long bytes)
-{
- unsigned long recorded = 0;
- int i = 0;
-
- while ((recorded < bytes) && (i < desc->nr_addrs)) {
- unsigned long vaddr = (unsigned long)buffer + recorded;
- unsigned long paddr;
- int offset;
- int chunksz;
-
- offset = vaddr % PAGE_SIZE; /* handle partial pages */
- chunksz = min(PAGE_SIZE - offset, bytes - recorded);
-
- paddr = xencomm_vtop(vaddr);
- if (paddr == ~0UL) {
- printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n",
- __func__, vaddr);
- return -EINVAL;
- }
-
- desc->address[i++] = paddr;
- recorded += chunksz;
- }
-
- if (recorded < bytes) {
- printk(KERN_DEBUG
- "%s: could only translate %ld of %ld bytes\n",
- __func__, recorded, bytes);
- return -ENOSPC;
- }
-
- /* mark remaining addresses invalid (just for safety) */
- while (i < desc->nr_addrs)
- desc->address[i++] = XENCOMM_INVALID;
-
- desc->magic = XENCOMM_MAGIC;
-
- return 0;
-}
-
-static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
- void *buffer, unsigned long bytes)
-{
- struct xencomm_desc *desc;
- unsigned long buffer_ulong = (unsigned long)buffer;
- unsigned long start = buffer_ulong & PAGE_MASK;
- unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
- unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
- unsigned long size = sizeof(*desc) +
- sizeof(desc->address[0]) * nr_addrs;
-
- /*
- * slab allocator returns at least sizeof(void*) aligned pointer.
- * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
- * cross page boundary.
- */
- if (sizeof(*desc) > sizeof(void *)) {
- unsigned long order = get_order(size);
- desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
- order);
- if (desc == NULL)
- return NULL;
-
- desc->nr_addrs =
- ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
- sizeof(*desc->address);
- } else {
- desc = kmalloc(size, gfp_mask);
- if (desc == NULL)
- return NULL;
-
- desc->nr_addrs = nr_addrs;
- }
- return desc;
-}
-
-void xencomm_free(struct xencomm_handle *desc)
-{
- if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
- struct xencomm_desc *desc__ = (struct xencomm_desc *)desc;
- if (sizeof(*desc__) > sizeof(void *)) {
- unsigned long size = sizeof(*desc__) +
- sizeof(desc__->address[0]) * desc__->nr_addrs;
- unsigned long order = get_order(size);
- free_pages((unsigned long)__va(desc), order);
- } else
- kfree(__va(desc));
- }
-}
-
-static int xencomm_create(void *buffer, unsigned long bytes,
- struct xencomm_desc **ret, gfp_t gfp_mask)
-{
- struct xencomm_desc *desc;
- int rc;
-
- pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes);
-
- if (bytes == 0) {
- /* don't create a descriptor; Xen recognizes NULL. */
- BUG_ON(buffer != NULL);
- *ret = NULL;
- return 0;
- }
-
- BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
-
- desc = xencomm_alloc(gfp_mask, buffer, bytes);
- if (!desc) {
- printk(KERN_DEBUG "%s failure\n", "xencomm_alloc");
- return -ENOMEM;
- }
-
- rc = xencomm_init(desc, buffer, bytes);
- if (rc) {
- printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc);
- xencomm_free((struct xencomm_handle *)__pa(desc));
- return rc;
- }
-
- *ret = desc;
- return 0;
-}
-
-static struct xencomm_handle *xencomm_create_inline(void *ptr)
-{
- unsigned long paddr;
-
- BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr));
-
- paddr = (unsigned long)xencomm_pa(ptr);
- BUG_ON(paddr & XENCOMM_INLINE_FLAG);
- return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
-}
-
-/* "mini" routine, for stack-based communications: */
-static int xencomm_create_mini(void *buffer,
- unsigned long bytes, struct xencomm_mini *xc_desc,
- struct xencomm_desc **ret)
-{
- int rc = 0;
- struct xencomm_desc *desc;
- BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0);
-
- desc = (void *)xc_desc;
-
- desc->nr_addrs = XENCOMM_MINI_ADDRS;
-
- rc = xencomm_init(desc, buffer, bytes);
- if (!rc)
- *ret = desc;
-
- return rc;
-}
-
-struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
-{
- int rc;
- struct xencomm_desc *desc;
-
- if (xencomm_is_phys_contiguous((unsigned long)ptr))
- return xencomm_create_inline(ptr);
-
- rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
-
- if (rc || desc == NULL)
- return NULL;
-
- return xencomm_pa(desc);
-}
-
-struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
- struct xencomm_mini *xc_desc)
-{
- int rc;
- struct xencomm_desc *desc = NULL;
-
- if (xencomm_is_phys_contiguous((unsigned long)ptr))
- return xencomm_create_inline(ptr);
-
- rc = xencomm_create_mini(ptr, bytes, xc_desc,
- &desc);
-
- if (rc)
- return NULL;
-
- return xencomm_pa(desc);
-}
diff --git a/drivers/zorro/Makefile b/drivers/zorro/Makefile
index f62172603215..7dc5332ff984 100644
--- a/drivers/zorro/Makefile
+++ b/drivers/zorro/Makefile
@@ -2,8 +2,9 @@
# Makefile for the Zorro bus specific drivers.
#
-obj-$(CONFIG_ZORRO) += zorro.o zorro-driver.o zorro-sysfs.o names.o
+obj-$(CONFIG_ZORRO) += zorro.o zorro-driver.o zorro-sysfs.o
obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_ZORRO_NAMES) += names.o
hostprogs-y := gen-devlist
diff --git a/drivers/zorro/names.c b/drivers/zorro/names.c
index e8517c3d8e82..6f3fd9903ac3 100644
--- a/drivers/zorro/names.c
+++ b/drivers/zorro/names.c
@@ -15,8 +15,6 @@
#include <linux/zorro.h>
-#ifdef CONFIG_ZORRO_NAMES
-
struct zorro_prod_info {
__u16 prod;
unsigned short seen;
@@ -69,7 +67,6 @@ void __init zorro_name_device(struct zorro_dev *dev)
} while (--i);
/* Couldn't find either the manufacturer nor the product */
- sprintf(name, "Zorro device %08x", dev->id);
return;
match_manuf: {
@@ -98,11 +95,3 @@ void __init zorro_name_device(struct zorro_dev *dev)
}
}
}
-
-#else
-
-void __init zorro_name_device(struct zorro_dev *dev)
-{
-}
-
-#endif
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index ea1ce822a8e0..6ac2579da0eb 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -14,6 +14,8 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/export.h>
+
+#include <asm/byteorder.h>
#include <asm/uaccess.h>
#include <asm/amigahw.h>
#include <asm/setup.h>
@@ -41,10 +43,10 @@ proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *
/* Construct a ConfigDev */
memset(&cd, 0, sizeof(cd));
cd.cd_Rom = z->rom;
- cd.cd_SlotAddr = z->slotaddr;
- cd.cd_SlotSize = z->slotsize;
- cd.cd_BoardAddr = (void *)zorro_resource_start(z);
- cd.cd_BoardSize = zorro_resource_len(z);
+ cd.cd_SlotAddr = cpu_to_be16(z->slotaddr);
+ cd.cd_SlotSize = cpu_to_be16(z->slotsize);
+ cd.cd_BoardAddr = cpu_to_be32(zorro_resource_start(z));
+ cd.cd_BoardSize = cpu_to_be32(zorro_resource_len(z));
if (copy_to_user(buf, (void *)&cd + pos, nbytes))
return -EFAULT;
diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c
index ac1db7f1bcab..eacae1434b73 100644
--- a/drivers/zorro/zorro-driver.c
+++ b/drivers/zorro/zorro-driver.c
@@ -161,11 +161,12 @@ static int zorro_uevent(struct device *dev, struct kobj_uevent_env *env)
}
struct bus_type zorro_bus_type = {
- .name = "zorro",
- .match = zorro_bus_match,
- .uevent = zorro_uevent,
- .probe = zorro_device_probe,
- .remove = zorro_device_remove,
+ .name = "zorro",
+ .dev_name = "zorro",
+ .match = zorro_bus_match,
+ .uevent = zorro_uevent,
+ .probe = zorro_device_probe,
+ .remove = zorro_device_remove,
};
EXPORT_SYMBOL(zorro_bus_type);
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index 26f7184ef9e1..36b210f9b6b2 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -16,6 +16,8 @@
#include <linux/stat.h>
#include <linux/string.h>
+#include <asm/byteorder.h>
+
#include "zorro.h"
@@ -33,10 +35,20 @@ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
zorro_config_attr(id, id, "0x%08x\n");
zorro_config_attr(type, rom.er_Type, "0x%02x\n");
-zorro_config_attr(serial, rom.er_SerialNumber, "0x%08x\n");
zorro_config_attr(slotaddr, slotaddr, "0x%04x\n");
zorro_config_attr(slotsize, slotsize, "0x%04x\n");
+static ssize_t
+show_serial(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct zorro_dev *z;
+
+ z = to_zorro_dev(dev);
+ return sprintf(buf, "0x%08x\n", be32_to_cpu(z->rom.er_SerialNumber));
+}
+
+static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
+
static ssize_t zorro_show_resource(struct device *dev, struct device_attribute *attr, char *buf)
{
struct zorro_dev *z = to_zorro_dev(dev);
@@ -60,10 +72,10 @@ static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
/* Construct a ConfigDev */
memset(&cd, 0, sizeof(cd));
cd.cd_Rom = z->rom;
- cd.cd_SlotAddr = z->slotaddr;
- cd.cd_SlotSize = z->slotsize;
- cd.cd_BoardAddr = (void *)zorro_resource_start(z);
- cd.cd_BoardSize = zorro_resource_len(z);
+ cd.cd_SlotAddr = cpu_to_be16(z->slotaddr);
+ cd.cd_SlotSize = cpu_to_be16(z->slotsize);
+ cd.cd_BoardAddr = cpu_to_be32(zorro_resource_start(z));
+ cd.cd_BoardSize = cpu_to_be32(zorro_resource_len(z));
return memory_read_from_buffer(buf, count, &off, &cd, sizeof(cd));
}
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index 858c9714b2f3..707c1a5a0317 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <asm/byteorder.h>
#include <asm/setup.h>
#include <asm/amigahw.h>
@@ -29,7 +30,8 @@
*/
unsigned int zorro_num_autocon;
-struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO];
+struct zorro_dev_init zorro_autocon_init[ZORRO_NUM_AUTO] __initdata;
+struct zorro_dev *zorro_autocon;
/*
@@ -38,6 +40,7 @@ struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO];
struct zorro_bus {
struct device dev;
+ struct zorro_dev devices[0];
};
@@ -125,18 +128,22 @@ static struct resource __init *zorro_find_parent_resource(
static int __init amiga_zorro_probe(struct platform_device *pdev)
{
struct zorro_bus *bus;
+ struct zorro_dev_init *zi;
struct zorro_dev *z;
struct resource *r;
unsigned int i;
int error;
/* Initialize the Zorro bus */
- bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+ bus = kzalloc(sizeof(*bus) +
+ zorro_num_autocon * sizeof(bus->devices[0]),
+ GFP_KERNEL);
if (!bus)
return -ENOMEM;
+ zorro_autocon = bus->devices;
bus->dev.parent = &pdev->dev;
- dev_set_name(&bus->dev, "zorro");
+ dev_set_name(&bus->dev, zorro_bus_type.name);
error = device_register(&bus->dev);
if (error) {
pr_err("Zorro: Error registering zorro_bus\n");
@@ -151,15 +158,23 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
/* First identify all devices ... */
for (i = 0; i < zorro_num_autocon; i++) {
+ zi = &zorro_autocon_init[i];
z = &zorro_autocon[i];
- z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8);
+
+ z->rom = zi->rom;
+ z->id = (be16_to_cpu(z->rom.er_Manufacturer) << 16) |
+ (z->rom.er_Product << 8);
if (z->id == ZORRO_PROD_GVP_EPC_BASE) {
/* GVP quirk */
- unsigned long magic = zorro_resource_start(z)+0x8000;
+ unsigned long magic = zi->boardaddr + 0x8000;
z->id |= *(u16 *)ZTWO_VADDR(magic) & GVP_PRODMASK;
}
+ z->slotaddr = zi->slotaddr;
+ z->slotsize = zi->slotsize;
sprintf(z->name, "Zorro device %08x", z->id);
zorro_name_device(z);
+ z->resource.start = zi->boardaddr;
+ z->resource.end = zi->boardaddr + zi->boardsize - 1;
z->resource.name = z->name;
r = zorro_find_parent_resource(pdev, z);
error = request_resource(r, &z->resource);
@@ -167,9 +182,9 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
dev_err(&bus->dev,
"Address space collision on device %s %pR\n",
z->name, &z->resource);
- dev_set_name(&z->dev, "%02x", i);
z->dev.parent = &bus->dev;
z->dev.bus = &zorro_bus_type;
+ z->dev.id = i;
}
/* ... then register them */
diff --git a/drivers/zorro/zorro.h b/drivers/zorro/zorro.h
index b682d5ccd63f..34119fb4e560 100644
--- a/drivers/zorro/zorro.h
+++ b/drivers/zorro/zorro.h
@@ -1,4 +1,9 @@
+#ifdef CONFIG_ZORRO_NAMES
extern void zorro_name_device(struct zorro_dev *z);
+#else
+static inline void zorro_name_device(struct zorro_dev *dev) { }
+#endif
+
extern int zorro_create_sysfs_dev_files(struct zorro_dev *z);